aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_configfs.c4
-rw-r--r--drivers/acpi/acpi_dbg.c2
-rw-r--r--drivers/acpi/acpi_lpat.c2
-rw-r--r--drivers/acpi/acpi_lpss.c4
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/dbexec.c2
-rw-r--r--drivers/acpi/acpica/dbnames.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c4
-rw-r--r--drivers/acpi/acpica/exnames.c6
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c4
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c8
-rw-r--r--drivers/acpi/acpica/nsobject.c4
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c4
-rw-r--r--drivers/acpi/acpica/nsutils.c14
-rw-r--r--drivers/acpi/acpica/nsxfname.c4
-rw-r--r--drivers/acpi/acpica/psargs.c8
-rw-r--r--drivers/acpi/acpica/rsxface.c8
-rw-r--r--drivers/acpi/acpica/tbdata.c3
-rw-r--r--drivers/acpi/acpica/tbfind.c20
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c10
-rw-r--r--drivers/acpi/acpica/tbutils.c6
-rw-r--r--drivers/acpi/acpica/tbxface.c4
-rw-r--r--drivers/acpi/acpica/tbxfload.c15
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c4
-rw-r--r--drivers/acpi/acpica/utmisc.c8
-rw-r--r--drivers/acpi/acpica/utpredef.c4
-rw-r--r--drivers/acpi/acpica/utstring.c6
-rw-r--r--drivers/acpi/arm64/iort.c161
-rw-r--r--drivers/acpi/button.c5
-rw-r--r--drivers/acpi/cppc_acpi.c34
-rw-r--r--drivers/acpi/device_pm.c5
-rw-r--r--drivers/acpi/device_sysfs.c6
-rw-r--r--drivers/acpi/dptf/dptf_power.c3
-rw-r--r--drivers/acpi/event.c4
-rw-r--r--drivers/acpi/hmat/Kconfig11
-rw-r--r--drivers/acpi/hmat/Makefile1
-rw-r--r--drivers/acpi/hmat/hmat.c666
-rw-r--r--drivers/acpi/nfit/core.c12
-rw-r--r--drivers/acpi/nfit/intel.c10
-rw-r--r--drivers/acpi/numa.c16
-rw-r--r--drivers/acpi/pci_mcfg.c12
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/power.c4
-rw-r--r--drivers/acpi/pptt.c48
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/property.c9
-rw-r--r--drivers/acpi/scan.c25
-rw-r--r--drivers/acpi/sleep.c4
-rw-r--r--drivers/acpi/spcr.c2
-rw-r--r--drivers/acpi/sysfs.c14
-rw-r--r--drivers/acpi/tables.c98
-rw-r--r--drivers/acpi/utils.c16
-rw-r--r--drivers/acpi/video_detect.c10
-rw-r--r--drivers/android/binder.c12
-rw-r--r--drivers/android/binder_alloc.c8
-rw-r--r--drivers/ata/ahci_qoriq.c55
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/Kconfig10
-rw-r--r--drivers/base/arch_topology.c36
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/firmware_loader/Kconfig1
-rw-r--r--drivers/base/firmware_loader/builtin/.gitignore1
-rw-r--r--drivers/base/firmware_loader/fallback.c6
-rw-r--r--drivers/base/memory.c26
-rw-r--r--drivers/base/node.c352
-rw-r--r--drivers/base/platform.c12
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/common.c4
-rw-r--r--drivers/base/power/domain.c130
-rw-r--r--drivers/base/power/domain_governor.c71
-rw-r--r--drivers/base/power/generic_ops.c4
-rw-r--r--drivers/base/power/main.c80
-rw-r--r--drivers/base/power/qos.c6
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/power/wakeirq.c15
-rw-r--r--drivers/base/power/wakeup.c10
-rw-r--r--drivers/base/property.c75
-rw-r--r--drivers/base/regmap/internal.h5
-rw-r--r--drivers/base/regmap/regcache-flat.c18
-rw-r--r--drivers/base/regmap/regcache-lzo.c18
-rw-r--r--drivers/base/regmap/regcache-rbtree.c18
-rw-r--r--drivers/base/regmap/regcache.c18
-rw-r--r--drivers/base/regmap/regmap-ac97.c22
-rw-r--r--drivers/base/regmap/regmap-debugfs.c48
-rw-r--r--drivers/base/regmap/regmap-i2c.c18
-rw-r--r--drivers/base/regmap/regmap-irq.c21
-rw-r--r--drivers/base/regmap/regmap-mmio.c22
-rw-r--r--drivers/base/regmap/regmap-spi.c18
-rw-r--r--drivers/base/regmap/regmap-spmi.c29
-rw-r--r--drivers/base/regmap/regmap-w1.c16
-rw-r--r--drivers/base/regmap/regmap.c27
-rw-r--r--drivers/base/syscore.c12
-rw-r--r--drivers/base/test/Makefile1
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/brd.c7
-rw-r--r--drivers/block/drbd/drbd_int.h7
-rw-r--r--drivers/block/drbd/drbd_nl.c8
-rw-r--r--drivers/block/drbd/drbd_nla.c3
-rw-r--r--drivers/block/drbd/drbd_receiver.c7
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c11
-rw-r--r--drivers/block/loop.c35
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c89
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h17
-rw-r--r--drivers/block/nbd.c38
-rw-r--r--drivers/block/null_blk_main.c5
-rw-r--r--drivers/block/paride/pcd.c15
-rw-r--r--drivers/block/paride/pd.c1
-rw-r--r--drivers/block/paride/pf.c13
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/rsxx/core.c1
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--drivers/block/xsysace.c3
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/bluetooth/Kconfig15
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btbcm.c10
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c36
-rw-r--r--drivers/bluetooth/btmtksdio.c1101
-rw-r--r--drivers/bluetooth/btmtkuart.c2
-rw-r--r--drivers/bluetooth/btqca.c7
-rw-r--r--drivers/bluetooth/btqca.h13
-rw-r--r--drivers/bluetooth/btsdio.c15
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c20
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_qca.c91
-rw-r--r--drivers/bus/ti-sysc.c661
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/ds1620.c2
-rw-r--r--drivers/char/dtlk.c3
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c1
-rw-r--r--drivers/char/hw_random/stm32-rng.c9
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c3
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c3
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c23
-rw-r--r--drivers/char/ipmi/ipmi_plat_data.c27
-rw-r--r--drivers/char/ipmi/ipmi_plat_data.h3
-rw-r--r--drivers/char/ipmi/ipmi_si_hardcode.c3
-rw-r--r--drivers/char/ipmi/ipmi_si_hotmod.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c6
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c11
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/pcmcia/scr24x_cs.c2
-rw-r--r--drivers/char/random.c199
-rw-r--r--drivers/char/tb0219.c2
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c4
-rw-r--r--drivers/char/tpm/tpm-dev-common.c9
-rw-r--r--drivers/char/tpm/tpm-interface.c14
-rw-r--r--drivers/char/virtio_console.c3
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/actions/owl-common.h2
-rw-r--r--drivers/clk/actions/owl-composite.h2
-rw-r--r--drivers/clk/actions/owl-divider.h2
-rw-r--r--drivers/clk/actions/owl-factor.h2
-rw-r--r--drivers/clk/actions/owl-fixed-factor.h2
-rw-r--r--drivers/clk/actions/owl-gate.h2
-rw-r--r--drivers/clk/actions/owl-mux.h2
-rw-r--r--drivers/clk/actions/owl-pll.h2
-rw-r--r--drivers/clk/actions/owl-reset.h2
-rw-r--r--drivers/clk/analogbits/Kconfig2
-rw-r--r--drivers/clk/analogbits/Makefile3
-rw-r--r--drivers/clk/analogbits/wrpll-cln28hpc.c364
-rw-r--r--drivers/clk/at91/Makefile2
-rw-r--r--drivers/clk/at91/at91sam9260.c14
-rw-r--r--drivers/clk/at91/at91sam9rl.c2
-rw-r--r--drivers/clk/at91/at91sam9x5.c11
-rw-r--r--drivers/clk/at91/clk-generated.c48
-rw-r--r--drivers/clk/at91/clk-master.c8
-rw-r--r--drivers/clk/at91/clk-peripheral.c46
-rw-r--r--drivers/clk/at91/clk-programmable.c57
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c330
-rw-r--r--drivers/clk/at91/clk-usb.c33
-rw-r--r--drivers/clk/at91/dt-compat.c12
-rw-r--r--drivers/clk/at91/pmc.h27
-rw-r--r--drivers/clk/at91/sam9x60.c307
-rw-r--r--drivers/clk/at91/sama5d2.c22
-rw-r--r--drivers/clk/at91/sama5d4.c10
-rw-r--r--drivers/clk/at91/sckc.c134
-rw-r--r--drivers/clk/clk-aspeed.c42
-rw-r--r--drivers/clk/clk-composite.c2
-rw-r--r--drivers/clk/clk-divider.c26
-rw-r--r--drivers/clk/clk-fixed-factor.c57
-rw-r--r--drivers/clk/clk-fixed-rate.c2
-rw-r--r--drivers/clk/clk-fractional-divider.c24
-rw-r--r--drivers/clk/clk-gate.c24
-rw-r--r--drivers/clk/clk-gpio.c2
-rw-r--r--drivers/clk/clk-highbank.c23
-rw-r--r--drivers/clk/clk-lochnagar.c336
-rw-r--r--drivers/clk/clk-milbeaut.c663
-rw-r--r--drivers/clk/clk-multiplier.c22
-rw-r--r--drivers/clk/clk-mux.c24
-rw-r--r--drivers/clk/clk-pwm.c2
-rw-r--r--drivers/clk/clk-qoriq.c77
-rw-r--r--drivers/clk/clk-stm32f4.c307
-rw-r--r--drivers/clk/clk-stm32mp1.c3
-rw-r--r--drivers/clk/clk-xgene.c6
-rw-r--r--drivers/clk/clk.c392
-rw-r--r--drivers/clk/clk.h2
-rw-r--r--drivers/clk/clkdev.c30
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c4
-rw-r--r--drivers/clk/davinci/pll.h2
-rw-r--r--drivers/clk/davinci/psc.h2
-rw-r--r--drivers/clk/hisilicon/clk-hi3660.c6
-rw-r--r--drivers/clk/hisilicon/clk-hisi-phase.c4
-rw-r--r--drivers/clk/imx/Makefile2
-rw-r--r--drivers/clk/imx/clk-divider-gate.c20
-rw-r--r--drivers/clk/imx/clk-imx5.c (renamed from drivers/clk/imx/clk-imx51-imx53.c)59
-rw-r--r--drivers/clk/imx/clk-imx6sll.c18
-rw-r--r--drivers/clk/imx/clk-imx7d.c4
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c1
-rw-r--r--drivers/clk/imx/clk-imx8mq.c1
-rw-r--r--drivers/clk/imx/clk-pfdv2.c10
-rw-r--r--drivers/clk/imx/clk-pll14xx.c8
-rw-r--r--drivers/clk/imx/clk-pllv3.c31
-rw-r--r--drivers/clk/imx/clk-pllv4.c72
-rw-r--r--drivers/clk/imx/clk-sccg-pll.c12
-rw-r--r--drivers/clk/imx/clk.h6
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c6
-rw-r--r--drivers/clk/mediatek/Kconfig83
-rw-r--r--drivers/clk/mediatek/Makefile16
-rw-r--r--drivers/clk/mediatek/clk-gate.c3
-rw-r--r--drivers/clk/mediatek/clk-gate.h14
-rw-r--r--drivers/clk/mediatek/clk-mt8183-audio.c105
-rw-r--r--drivers/clk/mediatek/clk-mt8183-cam.c63
-rw-r--r--drivers/clk/mediatek/clk-mt8183-img.c63
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu0.c56
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu1.c56
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_adl.c54
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_conn.c123
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c54
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c111
-rw-r--r--drivers/clk/mediatek/clk-mt8183-vdec.c67
-rw-r--r--drivers/clk/mediatek/clk-mt8183-venc.c59
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c1284
-rw-r--r--drivers/clk/mediatek/clk-mt8516.c815
-rw-r--r--drivers/clk/mediatek/clk-mtk.h3
-rw-r--r--drivers/clk/mediatek/clk-mux.c223
-rw-r--r--drivers/clk/mediatek/clk-mux.h89
-rw-r--r--drivers/clk/mediatek/clk-pll.c87
-rw-r--r--drivers/clk/meson/axg-audio.c1219
-rw-r--r--drivers/clk/meson/axg-audio.h16
-rw-r--r--drivers/clk/meson/clk-pll.c28
-rw-r--r--drivers/clk/meson/clk-pll.h1
-rw-r--r--drivers/clk/meson/g12a-aoclk.h2
-rw-r--r--drivers/clk/meson/g12a.c637
-rw-r--r--drivers/clk/meson/g12a.h31
-rw-r--r--drivers/clk/meson/gxbb.c2
-rw-r--r--drivers/clk/meson/meson8b.c734
-rw-r--r--drivers/clk/meson/meson8b.h27
-rw-r--r--drivers/clk/meson/vid-pll-div.c4
-rw-r--r--drivers/clk/mmp/clk-gate.c2
-rw-r--r--drivers/clk/mvebu/common.c2
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c4
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c6
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c24
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c7
-rw-r--r--drivers/clk/qcom/Kconfig6
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/clk-branch.c6
-rw-r--r--drivers/clk/qcom/clk-branch.h1
-rw-r--r--drivers/clk/qcom/clk-regmap-mux-div.h2
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c90
-rw-r--r--drivers/clk/qcom/turingcc-qcs404.c170
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c18
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c41
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c35
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c33
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c25
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c1
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h4
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c71
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h9
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h4
-rw-r--r--drivers/clk/rockchip/clk-ddr.c2
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c6
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c36
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c18
-rw-r--r--drivers/clk/rockchip/clk.c9
-rw-r--r--drivers/clk/rockchip/clk.h23
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c1
-rw-r--r--drivers/clk/sifive/Kconfig18
-rw-r--r--drivers/clk/sifive/Makefile1
-rw-r--r--drivers/clk/sifive/fu540-prci.c626
-rw-r--r--drivers/clk/sprd/common.h2
-rw-r--r--drivers/clk/sprd/composite.h2
-rw-r--r--drivers/clk/sprd/div.h2
-rw-r--r--drivers/clk/sprd/gate.h2
-rw-r--r--drivers/clk/sprd/mux.h2
-rw-r--r--drivers/clk/sprd/pll.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c19
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c24
-rw-r--r--drivers/clk/sunxi/Kconfig43
-rw-r--r--drivers/clk/sunxi/Makefile49
-rw-r--r--drivers/clk/tegra/clk-divider.c3
-rw-r--r--drivers/clk/tegra/clk-emc.c57
-rw-r--r--drivers/clk/tegra/clk-pll.c54
-rw-r--r--drivers/clk/tegra/clk-super.c2
-rw-r--r--drivers/clk/tegra/clk-tegra124.c7
-rw-r--r--drivers/clk/tegra/clk-tegra210.c6
-rw-r--r--drivers/clk/ti/clk-7xx-compat.c6
-rw-r--r--drivers/clk/ti/clk-7xx.c6
-rw-r--r--drivers/clk/ti/clkctrl.c17
-rw-r--r--drivers/clk/ti/clock.h8
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c3
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c14
-rw-r--r--drivers/clk/zynq/clkc.c6
-rw-r--r--drivers/clk/zynq/pll.c18
-rw-r--r--drivers/clk/zynqmp/clk-mux-zynqmp.c1
-rw-r--r--drivers/clk/zynqmp/clk-zynqmp.h6
-rw-r--r--drivers/clk/zynqmp/clkc.c180
-rw-r--r--drivers/clk/zynqmp/divider.c17
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c143
-rw-r--r--drivers/clocksource/timer-fsl-ftm.c15
-rw-r--r--drivers/clocksource/timer-ixp4xx.c282
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c2
-rw-r--r--drivers/clocksource/timer-ti-dm.c29
-rw-r--r--drivers/counter/104-quad-8.c1367
-rw-r--r--drivers/counter/Kconfig60
-rw-r--r--drivers/counter/Makefile10
-rw-r--r--drivers/counter/counter.c1567
-rw-r--r--drivers/counter/ftm-quaddec.c356
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c754
-rw-r--r--drivers/counter/stm32-timer-cnt.c390
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c19
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c22
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq.c242
-rw-r--r--drivers/cpufreq/cpufreq_governor.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c15
-rw-r--r--drivers/cpufreq/freq_table.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c70
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c19
-rw-r--r--drivers/cpufreq/maple-cpufreq.c6
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c2
-rw-r--r--drivers/cpufreq/speedstep-centrino.c2
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c2
-rw-r--r--drivers/cpuidle/cpuidle.c19
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c24
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c48
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h3
-rw-r--r--drivers/crypto/atmel-tdes.c106
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/bcm/cipher.c22
-rw-r--r--drivers/crypto/bcm/spu.c3
-rw-r--r--drivers/crypto/bcm/util.c1
-rw-r--r--drivers/crypto/caam/caamalg.c84
-rw-r--r--drivers/crypto/caam/caamalg_qi.c73
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c251
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/caamhash.c13
-rw-r--r--drivers/crypto/caam/caampkc.c2
-rw-r--r--drivers/crypto/caam/ctrl.c20
-rw-r--r--drivers/crypto/caam/error.c4
-rw-r--r--drivers/crypto/caam/intern.h4
-rw-r--r--drivers/crypto/caam/jr.c31
-rw-r--r--drivers/crypto/caam/qi.c4
-rw-r--r--drivers/crypto/caam/regs.h3
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_mbox.c17
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c65
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h46
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c8
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c69
-rw-r--r--drivers/crypto/ccree/Makefile1
-rw-r--r--drivers/crypto/ccree/cc_aead.c118
-rw-r--r--drivers/crypto/ccree/cc_aead.h3
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c341
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_cipher.c585
-rw-r--r--drivers/crypto/ccree/cc_cipher.h3
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h10
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c44
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h2
-rw-r--r--drivers/crypto/ccree/cc_driver.c120
-rw-r--r--drivers/crypto/ccree/cc_driver.h36
-rw-r--r--drivers/crypto/ccree/cc_fips.c29
-rw-r--r--drivers/crypto/ccree/cc_fips.h4
-rw-r--r--drivers/crypto/ccree/cc_hash.c64
-rw-r--r--drivers/crypto/ccree/cc_hash.h2
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h123
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h35
-rw-r--r--drivers/crypto/ccree/cc_ivgen.c11
-rw-r--r--drivers/crypto/ccree/cc_ivgen.h2
-rw-r--r--drivers/crypto/ccree/cc_kernel_regs.h2
-rw-r--r--drivers/crypto/ccree/cc_lli_defs.h4
-rw-r--r--drivers/crypto/ccree/cc_pm.c11
-rw-r--r--drivers/crypto/ccree/cc_pm.h2
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c116
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c7
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c19
-rw-r--r--drivers/crypto/chelsio/chcr_core.c4
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c3
-rw-r--r--drivers/crypto/hifn_795x.c31
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c11
-rw-r--r--drivers/crypto/ixp4xx_crypto.c68
-rw-r--r--drivers/crypto/marvell/cipher.c11
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c3
-rw-r--r--drivers/crypto/mxc-scc.c767
-rw-r--r--drivers/crypto/mxs-dcp.c14
-rw-r--r--drivers/crypto/n2_core.c15
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c6
-rw-r--r--drivers/crypto/nx/nx-842.c3
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c12
-rw-r--r--drivers/crypto/nx/nx-sha256.c6
-rw-r--r--drivers/crypto/nx/nx-sha512.c6
-rw-r--r--drivers/crypto/omap-des.c29
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/padlock-sha.c5
-rw-r--r--drivers/crypto/picoxcell_crypto.c35
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c2
-rw-r--r--drivers/crypto/qce/ablkcipher.c22
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c61
-rw-r--r--drivers/crypto/s5p-sss.c1
-rw-r--r--drivers/crypto/sahara.c6
-rw-r--r--drivers/crypto/stm32/Kconfig1
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c74
-rw-r--r--drivers/crypto/stm32/stm32-hash.c4
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c78
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c19
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h2
-rw-r--r--drivers/crypto/talitos.c108
-rw-r--r--drivers/crypto/ux500/cryp/Makefile6
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c86
-rw-r--r--drivers/crypto/vmx/aes.c14
-rw-r--r--drivers/crypto/vmx/aes_cbc.c14
-rw-r--r--drivers/crypto/vmx/aes_ctr.c10
-rw-r--r--drivers/crypto/vmx/aes_xts.c14
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl4
-rw-r--r--drivers/crypto/vmx/ghash.c10
-rw-r--r--drivers/crypto/vmx/vmx.c4
-rw-r--r--drivers/dax/Kconfig3
-rw-r--r--drivers/dax/device.c6
-rw-r--r--drivers/dax/pmem/core.c6
-rw-r--r--drivers/dax/super.c7
-rw-r--r--drivers/devfreq/devfreq-event.c2
-rw-r--r--drivers/devfreq/devfreq.c90
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c25
-rw-r--r--drivers/devfreq/exynos-bus.c8
-rw-r--r--drivers/devfreq/rk3399_dmc.c73
-rw-r--r--drivers/devfreq/tegra-devfreq.c7
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/Makefile3
-rw-r--r--drivers/dma-buf/dma-fence-chain.c242
-rw-r--r--drivers/dma-buf/dma-fence.c1
-rw-r--r--drivers/dma-buf/reservation.c8
-rw-r--r--drivers/dma-buf/sw_sync.c2
-rw-r--r--drivers/dma-buf/sync_file.c3
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/amba-pl08x.c22
-rw-r--r--drivers/dma/at_xdmac.c67
-rw-r--r--drivers/dma/bcm-sba-raid.c3
-rw-r--r--drivers/dma/bcm2835-dma.c3
-rw-r--r--drivers/dma/dma-axi-dmac.c116
-rw-r--r--drivers/dma/fsl-edma-common.h2
-rw-r--r--drivers/dma/fsl-edma.c6
-rw-r--r--drivers/dma/idma64.c15
-rw-r--r--drivers/dma/idma64.h2
-rw-r--r--drivers/dma/imx-sdma.c15
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c2
-rw-r--r--drivers/dma/nbpfaxi.c4
-rw-r--r--drivers/dma/pl330.c61
-rw-r--r--drivers/dma/sh/rcar-dmac.c34
-rw-r--r--drivers/dma/stm32-dma.c103
-rw-r--r--drivers/dma/tegra210-adma.c269
-rw-r--r--drivers/dma/txx9dmac.c3
-rw-r--r--drivers/dma/xgene-dma.c6
-rw-r--r--drivers/edac/altera_edac.c262
-rw-r--r--drivers/edac/altera_edac.h69
-rw-r--r--drivers/edac/amd64_edac.c135
-rw-r--r--drivers/edac/amd64_edac.h11
-rw-r--r--drivers/edac/i10nm_base.c52
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/skx_base.c50
-rw-r--r--drivers/edac/skx_common.c57
-rw-r--r--drivers/edac/skx_common.h8
-rw-r--r--drivers/extcon/Kconfig11
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/devres.c2
-rw-r--r--drivers/extcon/extcon-arizona.c10
-rw-r--r--drivers/extcon/extcon-axp288.c9
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c81
-rw-r--r--drivers/extcon/extcon-intel-mrfld.c284
-rw-r--r--drivers/extcon/extcon-intel.h20
-rw-r--r--drivers/firewire/core-iso.c15
-rw-r--r--drivers/firewire/nosy.c2
-rw-r--r--drivers/firewire/ohci.c1
-rw-r--r--drivers/firmware/Kconfig31
-rw-r--r--drivers/firmware/Makefile4
-rw-r--r--drivers/firmware/arm_sdei.c3
-rw-r--r--drivers/firmware/dmi_scan.c28
-rw-r--r--drivers/firmware/efi/arm-runtime.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile20
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/firmware/iscsi_ibft.c2
-rw-r--r--drivers/firmware/psci/Kconfig13
-rw-r--r--drivers/firmware/psci/Makefile4
-rw-r--r--drivers/firmware/psci/psci.c (renamed from drivers/firmware/psci.c)110
-rw-r--r--drivers/firmware/psci/psci_checker.c (renamed from drivers/firmware/psci_checker.c)0
-rw-r--r--drivers/firmware/trusted_foundations.c176
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c2
-rw-r--r--drivers/gnss/core.c2
-rw-r--r--drivers/gnss/ubx.c1
-rw-r--r--drivers/gpio/Kconfig104
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-74x164.c22
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c4
-rw-r--r--drivers/gpio/gpio-amdpt.c8
-rw-r--r--drivers/gpio/gpio-aspeed.c4
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-cadence.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c7
-rw-r--r--drivers/gpio/gpio-dwapb.c4
-rw-r--r--drivers/gpio/gpio-eic-sprd.c1
-rw-r--r--drivers/gpio/gpio-ftgpio010.c4
-rw-r--r--drivers/gpio/gpio-hlwd.c4
-rw-r--r--drivers/gpio/gpio-iop.c4
-rw-r--r--drivers/gpio/gpio-ixp4xx.c474
-rw-r--r--drivers/gpio/gpio-janz-ttl.c4
-rw-r--r--drivers/gpio/gpio-loongson1.c4
-rw-r--r--drivers/gpio/gpio-lpc18xx.c5
-rw-r--r--drivers/gpio/gpio-max77650.c190
-rw-r--r--drivers/gpio/gpio-mb86s7x.c4
-rw-r--r--drivers/gpio/gpio-merrifield.c18
-rw-r--r--drivers/gpio/gpio-mlxbf.c152
-rw-r--r--drivers/gpio/gpio-mmio.c99
-rw-r--r--drivers/gpio/gpio-mt7621.c3
-rw-r--r--drivers/gpio/gpio-mvebu.c7
-rw-r--r--drivers/gpio/gpio-mxc.c4
-rw-r--r--drivers/gpio/gpio-octeon.c4
-rw-r--r--drivers/gpio/gpio-omap.c644
-rw-r--r--drivers/gpio/gpio-pca953x.c25
-rw-r--r--drivers/gpio/gpio-pxa.c12
-rw-r--r--drivers/gpio/gpio-rcar.c5
-rw-r--r--drivers/gpio/gpio-sch.c5
-rw-r--r--drivers/gpio/gpio-spear-spics.c4
-rw-r--r--drivers/gpio/gpio-sprd.c4
-rw-r--r--drivers/gpio/gpio-sta2x11.c5
-rw-r--r--drivers/gpio/gpio-stp-xway.c4
-rw-r--r--drivers/gpio/gpio-tb10x.c4
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-timberdale.c4
-rw-r--r--drivers/gpio/gpio-ts4800.c4
-rw-r--r--drivers/gpio/gpio-uniphier.c4
-rw-r--r--drivers/gpio/gpio-vf610.c92
-rw-r--r--drivers/gpio/gpio-xgene-sb.c4
-rw-r--r--drivers/gpio/gpio-xlp.c7
-rw-r--r--drivers/gpio/gpio-zx.c4
-rw-r--r--drivers/gpio/gpio-zynq.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c115
-rw-r--r--drivers/gpio/gpiolib-devprop.c2
-rw-r--r--drivers/gpio/gpiolib-of.c24
-rw-r--r--drivers/gpio/gpiolib.c55
-rw-r--r--drivers/gpio/gpiolib.h22
-rw-r--r--drivers/gpu/drm/Kconfig22
-rw-r--r--drivers/gpu/drm/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c531
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c318
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c1482
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h294
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c977
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c270
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c207
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c652
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c238
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c117
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1824
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c53
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c194
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c183
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c202
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c383
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c195
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h137
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c155
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c213
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c174
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h5
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h98
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h16
-rw-r--r--drivers/gpu/drm/amd/include/linux/chash.h366
-rw-r--r--drivers/gpu/drm/amd/lib/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/lib/Makefile32
-rw-r--r--drivers/gpu/drm/amd/lib/chash.c638
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c1253
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c127
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c73
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h770
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu10.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h89
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h128
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h147
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c1977
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c2413
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.h129
-rw-r--r--drivers/gpu/drm/arm/display/include/malidp_product.h12
-rw-r--r--drivers/gpu/drm/arm/display/include/malidp_utils.h31
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c685
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c431
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h50
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h530
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c407
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c118
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h95
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c77
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h26
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c113
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h129
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c610
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c139
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c220
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c48
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h6
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c249
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h31
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c271
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h20
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c6
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig14
-rw-r--r--drivers/gpu/drm/aspeed/Makefile3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx.h104
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c241
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c269
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_out.c42
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h4
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c7
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c10
-rw-r--r--drivers/gpu/drm/bochs/bochs.h9
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c194
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c10
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c50
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c140
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig2
-rw-r--r--drivers/gpu/drm/cirrus/Makefile3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c657
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c161
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c315
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c328
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c621
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c10
-rw-r--r--drivers/gpu/drm/drm_atomic.c45
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c24
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c71
-rw-r--r--drivers/gpu/drm/drm_auth.c21
-rw-r--r--drivers/gpu/drm/drm_bufs.c8
-rw-r--r--drivers/gpu/drm/drm_client.c11
-rw-r--r--drivers/gpu/drm/drm_connector.c97
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c223
-rw-r--r--drivers/gpu/drm/drm_dsc.c269
-rw-r--r--drivers/gpu/drm/drm_edid.c105
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c302
-rw-r--r--drivers/gpu/drm/drm_file.c26
-rw-r--r--drivers/gpu/drm/drm_format_helper.c324
-rw-r--r--drivers/gpu/drm/drm_fourcc.c27
-rw-r--r--drivers/gpu/drm/drm_gem.c320
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c625
-rw-r--r--drivers/gpu/drm/drm_internal.h10
-rw-r--r--drivers/gpu/drm/drm_ioc32.c13
-rw-r--r--drivers/gpu/drm/drm_ioctl.c86
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c2
-rw-r--r--drivers/gpu/drm/drm_lease.c13
-rw-r--r--drivers/gpu/drm/drm_legacy.h87
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c82
-rw-r--r--drivers/gpu/drm/drm_lock.c19
-rw-r--r--drivers/gpu/drm/drm_memory.c26
-rw-r--r--drivers/gpu/drm/drm_mm.c25
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_mode_object.c5
-rw-r--r--drivers/gpu/drm/drm_modes.c12
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c13
-rw-r--r--drivers/gpu/drm/drm_plane.c8
-rw-r--r--drivers/gpu/drm/drm_prime.c1
-rw-r--r--drivers/gpu/drm/drm_print.c28
-rw-r--r--drivers/gpu/drm/drm_syncobj.c462
-rw-r--r--drivers/gpu/drm/drm_vm.c6
-rw-r--r--drivers/gpu/drm/drm_writeback.c73
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c40
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c22
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c97
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c48
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c72
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c49
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c75
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c43
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c7
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c7
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c12
-rw-r--r--drivers/gpu/drm/i915/.gitignore1
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile8
-rw-r--r--drivers/gpu/drm/i915/Makefile.header-test47
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c74
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c189
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c248
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h31
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_active.c23
-rw-r--r--drivers/gpu/drm/i915/i915_active.h16
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c178
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c622
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h408
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c782
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1097
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h260
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context_types.h175
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c156
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c141
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.c42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c8
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c125
-rw-r--r--drivers/gpu/drm/i915/i915_globals.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c183
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h51
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c665
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c262
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c114
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c67
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h42
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h2
-rw-r--r--drivers/gpu/drm/i915/i915_query.c39
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h571
-rw-r--r--drivers/gpu/drm/i915/i915_request.c554
-rw-r--r--drivers/gpu/drm/i915/i915_request.h87
-rw-r--r--drivers/gpu/drm/i915/i915_reset.c621
-rw-r--r--drivers/gpu/drm/i915/i915_reset.h16
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c112
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h95
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h72
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c43
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h16
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.c301
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h89
-rw-r--r--drivers/gpu/drm/i915/i915_timeline_types.h70
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h106
-rw-r--r--drivers/gpu/drm/i915/i915_user_extensions.c61
-rw-r--r--drivers/gpu/drm/i915/i915_user_extensions.h20
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h31
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c11
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c62
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h3
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c99
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c6
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c59
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.h40
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c95
-rw-r--r--drivers/gpu/drm/i915/intel_audio.h24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c133
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c92
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c382
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.h46
-rw-r--r--drivers/gpu/drm/i915/intel_color.c1131
-rw-r--r--drivers/gpu/drm/i915/intel_color.h17
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.c3
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c19
-rw-r--r--drivers/gpu/drm/i915/intel_connector.h35
-rw-r--r--drivers/gpu/drm/i915/intel_context.c270
-rw-r--r--drivers/gpu/drm/i915/intel_context.h87
-rw-r--r--drivers/gpu/drm/i915/intel_context_types.h77
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_crt.h21
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_csr.h17
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c350
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.h53
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c136
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h46
-rw-r--r--drivers/gpu/drm/i915/intel_display.c819
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c658
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h122
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c154
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c770
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h676
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c24
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.h13
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c491
-rw-r--r--drivers/gpu/drm/i915/intel_engine_types.h546
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.h42
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c245
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.h53
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c1
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h10
-rw-r--r--drivers/gpu/drm/i915/intel_gpu_commands.h9
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c45
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.c3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c99
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c134
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c26
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c1179
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.h33
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c810
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.h51
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c27
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c904
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h35
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c19
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.h38
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c101
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.h22
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c14
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c150
-rw-r--r--drivers/gpu/drm/i915/intel_panel.h65
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c243
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.h35
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c555
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h71
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c318
-rw-r--r--drivers/gpu/drm/i915/intel_psr.h40
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c435
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h650
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c124
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c169
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c260
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.h55
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/intel_tv.h13
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c25
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c996
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h286
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h3
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c133
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c187
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h19
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds_types.h27
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c457
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c37
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c120
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c446
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c166
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c423
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c145
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c54
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c108
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c4
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c4
-rw-r--r--drivers/gpu/drm/lima/Kconfig13
-rw-r--r--drivers/gpu/drm/lima/Makefile21
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.c47
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.h14
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c98
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.h30
-rw-r--r--drivers/gpu/drm/lima/lima_device.c385
-rw-r--r--drivers/gpu/drm/lima/lima_device.h131
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.c58
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.h18
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c376
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h45
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c349
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h25
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.c47
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.h13
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c283
-rw-r--r--drivers/gpu/drm/lima/lima_gp.h16
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.c80
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.h14
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c142
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h16
-rw-r--r--drivers/gpu/drm/lima/lima_object.c122
-rw-r--r--drivers/gpu/drm/lima/lima_object.h36
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.c60
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.h12
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c427
-rw-r--r--drivers/gpu/drm/lima/lima_pp.h19
-rw-r--r--drivers/gpu/drm/lima/lima_regs.h298
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c362
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h102
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c282
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h62
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c46
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_phy.c35
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_phy.h5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c23
-rw-r--r--drivers/gpu/drm/meson/Makefile2
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c73
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h51
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c353
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c83
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h5
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c163
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.h32
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c18
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c21
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h247
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c123
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c11
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c25
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c85
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c51
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig5
-rw-r--r--drivers/gpu/drm/msm/Makefile9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c109
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c216
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c63
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c141
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c69
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c177
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h10
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c72
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h10
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c52
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c13
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c41
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig17
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c45
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c330
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c45
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c39
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c170
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c40
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c221
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c140
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c61
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c55
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c58
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c48
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c144
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c64
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c110
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h76
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c36
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c68
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c229
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c181
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c13
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c236
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c211
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig31
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c6
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c272
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c6
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c1
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c20
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c3
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c387
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c258
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c3
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c84
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c12
-rw-r--r--drivers/gpu/drm/panfrost/Kconfig14
-rw-r--r--drivers/gpu/drm/panfrost/Makefile12
-rw-r--r--drivers/gpu/drm/panfrost/TODO27
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c219
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c257
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h125
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c475
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h309
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c95
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h29
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c367
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_issues.h176
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c564
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h51
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c386
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h298
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c7
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c12
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c17
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig4
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c64
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c54
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c37
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c122
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c243
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.h39
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c19
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig8
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c876
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.h229
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c17
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c20
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c3
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c12
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/Kconfig2
-rw-r--r--drivers/gpu/drm/stm/drv.c35
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c28
-rw-r--r--drivers/gpu/drm/stm/ltdc.c24
-rw-r--r--drivers/gpu/drm/stm/ltdc.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c63
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c40
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c29
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c74
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c179
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h11
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c12
-rw-r--r--drivers/gpu/drm/tegra/sor.c21
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tinydrm/core/Makefile2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c183
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c160
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c24
-rw-r--r--drivers/gpu/drm/tinydrm/hx8357d.c59
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c87
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c59
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c67
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c185
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c147
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c148
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c13
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c58
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h8
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c20
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c29
-rw-r--r--drivers/gpu/drm/v3d/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c314
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c65
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h37
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c110
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c67
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h2
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c25
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig (renamed from drivers/staging/vboxvideo/Kconfig)1
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile (renamed from drivers/staging/vboxvideo/Makefile)0
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c (renamed from drivers/staging/vboxvideo/hgsmi_base.c)0
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h (renamed from drivers/staging/vboxvideo/hgsmi_ch_setup.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_channels.h (renamed from drivers/staging/vboxvideo/hgsmi_channels.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_defs.h (renamed from drivers/staging/vboxvideo/hgsmi_defs.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/modesetting.c (renamed from drivers/staging/vboxvideo/modesetting.c)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c (renamed from drivers/staging/vboxvideo/vbox_drv.c)25
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h (renamed from drivers/staging/vboxvideo/vbox_drv.h)9
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c (renamed from drivers/staging/vboxvideo/vbox_fb.c)8
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_hgsmi.c (renamed from drivers/staging/vboxvideo/vbox_hgsmi.c)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c (renamed from drivers/staging/vboxvideo/vbox_irq.c)10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c (renamed from drivers/staging/vboxvideo/vbox_main.c)6
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c (renamed from drivers/staging/vboxvideo/vbox_mode.c)24
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_prime.c (renamed from drivers/staging/vboxvideo/vbox_prime.c)10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c (renamed from drivers/staging/vboxvideo/vbox_ttm.c)12
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h (renamed from drivers/staging/vboxvideo/vboxvideo.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_guest.h (renamed from drivers/staging/vboxvideo/vboxvideo_guest.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h (renamed from drivers/staging/vboxvideo/vboxvideo_vbe.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vbva_base.c (renamed from drivers/staging/vboxvideo/vbva_base.c)0
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c69
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c107
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c90
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c39
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c42
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h77
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c175
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c49
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c162
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c180
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c123
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c59
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h51
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c23
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c49
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c240
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c83
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c27
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h49
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c35
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c107
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c74
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c25
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c102
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c36
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c98
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1505
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c80
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h7
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c18
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-dp.c12
-rw-r--r--drivers/hid/Kconfig28
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-core.c59
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c101
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c1142
-rw-r--r--drivers/hid/hid-logitech-hidpp.c747
-rw-r--r--drivers/hid/hid-macally.c45
-rw-r--r--drivers/hid/hid-picolcd_core.c18
-rw-r--r--drivers/hid/hid-quirks.c17
-rw-r--r--drivers/hid/hid-sensor-custom.c12
-rw-r--r--drivers/hid/hid-steam.c26
-rw-r--r--drivers/hid/hid-u2fzero.c374
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig15
-rw-r--r--drivers/hid/intel-ish-hid/Makefile3
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c1085
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c168
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c49
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h14
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c96
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h37
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c60
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h24
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h31
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hv/channel_mgmt.c3
-rw-r--r--drivers/hv/hv.c1
-rw-r--r--drivers/hv/hyperv_vmbus.h3
-rw-r--r--drivers/hv/ring_buffer.c22
-rw-r--r--drivers/hv/vmbus_drv.c166
-rw-r--r--drivers/hwmon/Kconfig20
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/adc128d818.c2
-rw-r--r--drivers/hwmon/adm1025.c98
-rw-r--r--drivers/hwmon/adm1026.c416
-rw-r--r--drivers/hwmon/adm1029.c41
-rw-r--r--drivers/hwmon/adm1031.c201
-rw-r--r--drivers/hwmon/adm9240.c135
-rw-r--r--drivers/hwmon/ads1015.c2
-rw-r--r--drivers/hwmon/ads7828.c4
-rw-r--r--drivers/hwmon/adt7411.c48
-rw-r--r--drivers/hwmon/adt7475.c2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c6
-rw-r--r--drivers/hwmon/f71805f.c15
-rw-r--r--drivers/hwmon/fschmd.c2
-rw-r--r--drivers/hwmon/gpio-fan.c25
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/hwmon.c5
-rw-r--r--drivers/hwmon/iio_hwmon.c27
-rw-r--r--drivers/hwmon/ina209.c2
-rw-r--r--drivers/hwmon/ina2xx.c2
-rw-r--r--drivers/hwmon/ina3221.c176
-rw-r--r--drivers/hwmon/jc42.c18
-rw-r--r--drivers/hwmon/jz4740-hwmon.c4
-rw-r--r--drivers/hwmon/lm63.c2
-rw-r--r--drivers/hwmon/lm75.c45
-rw-r--r--drivers/hwmon/lm78.c114
-rw-r--r--drivers/hwmon/lm85.c342
-rw-r--r--drivers/hwmon/lm87.c165
-rw-r--r--drivers/hwmon/lm90.c15
-rw-r--r--drivers/hwmon/lm95241.c34
-rw-r--r--drivers/hwmon/lm95245.c49
-rw-r--r--drivers/hwmon/lochnagar-hwmon.c412
-rw-r--r--drivers/hwmon/ltc4151.c2
-rw-r--r--drivers/hwmon/ltc4245.c73
-rw-r--r--drivers/hwmon/ltq-cputemp.c26
-rw-r--r--drivers/hwmon/max197.c2
-rw-r--r--drivers/hwmon/max31790.c58
-rw-r--r--drivers/hwmon/max6621.c44
-rw-r--r--drivers/hwmon/max6650.c90
-rw-r--r--drivers/hwmon/max6697.c2
-rw-r--r--drivers/hwmon/menf21bmc_hwmon.c43
-rw-r--r--drivers/hwmon/mlxreg-fan.c152
-rw-r--r--drivers/hwmon/nct7904.c128
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c76
-rw-r--r--drivers/hwmon/ntc_thermistor.c24
-rw-r--r--drivers/hwmon/occ/Kconfig17
-rw-r--r--drivers/hwmon/occ/Makefile6
-rw-r--r--drivers/hwmon/occ/common.c17
-rw-r--r--drivers/hwmon/occ/common.h3
-rw-r--r--drivers/hwmon/occ/sysfs.c29
-rw-r--r--drivers/hwmon/pc87427.c14
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ir38064.c65
-rw-r--r--drivers/hwmon/pmbus/isl68137.c169
-rw-r--r--drivers/hwmon/pmbus/lm25066.c17
-rw-r--r--drivers/hwmon/pmbus/pmbus.h18
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c129
-rw-r--r--drivers/hwmon/pmbus/tps53679.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c2
-rw-r--r--drivers/hwmon/pwm-fan.c169
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c13
-rw-r--r--drivers/hwmon/s3c-hwmon.c4
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/sis5595.c92
-rw-r--r--drivers/hwmon/smsc47b397.c13
-rw-r--r--drivers/hwmon/smsc47m1.c106
-rw-r--r--drivers/hwmon/smsc47m192.c146
-rw-r--r--drivers/hwmon/stts751.c2
-rw-r--r--drivers/hwmon/thmc50.c83
-rw-r--r--drivers/hwmon/tmp102.c28
-rw-r--r--drivers/hwmon/tmp103.c2
-rw-r--r--drivers/hwmon/tmp108.c29
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/via686a.c148
-rw-r--r--drivers/hwmon/vt1211.c15
-rw-r--r--drivers/hwmon/vt8231.c166
-rw-r--r--drivers/hwmon/w83627hf.c299
-rw-r--r--drivers/hwmon/w83773g.c32
-rw-r--r--drivers/hwmon/w83793.c2
-rw-r--r--drivers/hwtracing/coresight/Kconfig9
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c255
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c97
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c114
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c116
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c238
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c82
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c266
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h12
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c18
-rw-r--r--drivers/hwtracing/coresight/coresight.c29
-rw-r--r--drivers/hwtracing/intel_th/acpi.c10
-rw-r--r--drivers/hwtracing/intel_th/core.c139
-rw-r--r--drivers/hwtracing/intel_th/gth.c125
-rw-r--r--drivers/hwtracing/intel_th/gth.h19
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h30
-rw-r--r--drivers/hwtracing/intel_th/msu.c407
-rw-r--r--drivers/hwtracing/intel_th/msu.h10
-rw-r--r--drivers/hwtracing/intel_th/pci.c37
-rw-r--r--drivers/hwtracing/stm/core.c9
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c22
-rw-r--r--drivers/i2c/busses/Kconfig25
-rw-r--r--drivers/i2c/busses/Makefile5
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c483
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c367
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2.h219
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c376
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c (renamed from drivers/i2c/busses/i2c-at91.c)480
-rw-r--r--drivers/i2c/busses/i2c-at91-slave.c143
-rw-r--r--drivers/i2c/busses/i2c-at91.h174
-rw-r--r--drivers/i2c/busses/i2c-axxia.c57
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c764
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c18
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c5
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c11
-rw-r--r--drivers/i2c/busses/i2c-imx.c8
-rw-r--r--drivers/i2c/busses/i2c-isch.c1
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c255
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c3
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c7
-rw-r--r--drivers/i2c/busses/i2c-ocores.c16
-rw-r--r--drivers/i2c/busses/i2c-omap.c76
-rw-r--r--drivers/i2c/busses/i2c-piix4.c15
-rw-r--r--drivers/i2c/busses/i2c-rcar.c30
-rw-r--r--drivers/i2c/busses/i2c-riic.c43
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c4
-rw-r--r--drivers/i2c/busses/i2c-stu300.c25
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c25
-rw-r--r--drivers/i2c/i2c-core-base.c29
-rw-r--r--drivers/i2c/i2c-core-smbus.c29
-rw-r--r--drivers/i2c/i2c-core.h36
-rw-r--r--drivers/i2c/i2c-mux.c6
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c6
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c8
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c106
-rw-r--r--drivers/i3c/master.c10
-rw-r--r--drivers/i3c/master/dw-i3c-master.c12
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-cd_ioctl.c5
-rw-r--r--drivers/ide/ide-gd.c6
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/iio/Kconfig26
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/Kconfig50
-rw-r--r--drivers/iio/accel/bma180.c18
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c23
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c40
-rw-r--r--drivers/iio/accel/kxcjk-1013.c15
-rw-r--r--drivers/iio/accel/kxsd9.c4
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/accel/st_accel.h2
-rw-r--r--drivers/iio/accel/st_accel_core.c78
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/adc/Kconfig48
-rw-r--r--drivers/iio/adc/Makefile2
-rw-r--r--drivers/iio/adc/ad7124.c2
-rw-r--r--drivers/iio/adc/ad7606.c120
-rw-r--r--drivers/iio/adc/ad7606.h25
-rw-r--r--drivers/iio/adc/ad7606_spi.c2
-rw-r--r--drivers/iio/adc/ad7780.c (renamed from drivers/staging/iio/adc/ad7780.c)179
-rw-r--r--drivers/iio/adc/ad7923.c24
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c17
-rw-r--r--drivers/iio/adc/at91_adc.c28
-rw-r--r--drivers/iio/adc/imx7d_adc.c175
-rw-r--r--drivers/iio/adc/ingenic-adc.c4
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c60
-rw-r--r--drivers/iio/adc/meson_saradc.c8
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c1
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c628
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c180
-rw-r--r--drivers/iio/adc/stmpe-adc.c5
-rw-r--r--drivers/iio/adc/ti-ads7950.c219
-rw-r--r--drivers/iio/adc/ti-ads8344.c204
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c3
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c10
-rw-r--r--drivers/iio/chemical/Kconfig26
-rw-r--r--drivers/iio/chemical/bme680.h6
-rw-r--r--drivers/iio/chemical/bme680_core.c54
-rw-r--r--drivers/iio/chemical/bme680_i2c.c21
-rw-r--r--drivers/iio/chemical/bme680_spi.c115
-rw-r--r--drivers/iio/chemical/pms7003.c5
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c19
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c22
-rw-r--r--drivers/iio/common/ms_sensors/Kconfig2
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio.c2
-rw-r--r--drivers/iio/counter/104-quad-8.c631
-rw-r--r--drivers/iio/counter/Kconfig34
-rw-r--r--drivers/iio/counter/Makefile8
-rw-r--r--drivers/iio/counter/stm32-lptimer-cnt.c382
-rw-r--r--drivers/iio/dac/ad5064.c15
-rw-r--r--drivers/iio/dac/ad5758.c55
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/dac/ti-dac5571.c2
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c5
-rw-r--r--drivers/iio/frequency/ad9523.c16
-rw-r--r--drivers/iio/gyro/Kconfig22
-rw-r--r--drivers/iio/gyro/Makefile3
-rw-r--r--drivers/iio/gyro/bmg160_core.c27
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c9
-rw-r--r--drivers/iio/gyro/fxas21002c.h150
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c1004
-rw-r--r--drivers/iio/gyro/fxas21002c_i2c.c69
-rw-r--r--drivers/iio/gyro/fxas21002c_spi.c70
-rw-r--r--drivers/iio/gyro/itg3200_core.c20
-rw-r--r--drivers/iio/gyro/mpu3050-core.c13
-rw-r--r--drivers/iio/humidity/Kconfig20
-rw-r--r--drivers/iio/imu/Makefile2
-rw-r--r--drivers/iio/imu/adis16400.c (renamed from drivers/iio/imu/adis16400_core.c)232
-rw-r--r--drivers/iio/imu/adis16400.h215
-rw-r--r--drivers/iio/imu/adis16400_buffer.c101
-rw-r--r--drivers/iio/imu/adis16480.c435
-rw-r--r--drivers/iio/imu/adis_buffer.c40
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c12
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c157
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c15
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c15
-rw-r--r--drivers/iio/industrialio-buffer.c25
-rw-r--r--drivers/iio/industrialio-core.c50
-rw-r--r--drivers/iio/industrialio-trigger.c5
-rw-r--r--drivers/iio/inkern.c22
-rw-r--r--drivers/iio/light/Kconfig274
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c12
-rw-r--r--drivers/iio/light/vcnl4000.c77
-rw-r--r--drivers/iio/magnetometer/ak8974.c5
-rw-r--r--drivers/iio/magnetometer/ak8975.c13
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c21
-rw-r--r--drivers/iio/magnetometer/hmc5843.h1
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c20
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c7
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c7
-rw-r--r--drivers/iio/potentiometer/Kconfig34
-rw-r--r--drivers/iio/potentiostat/lmp91000.c14
-rw-r--r--drivers/iio/pressure/bmp280-core.c6
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c11
-rw-r--r--drivers/iio/proximity/Kconfig23
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/as3935.c50
-rw-r--r--drivers/iio/proximity/mb1232.c272
-rw-r--r--drivers/iio/proximity/srf04.c38
-rw-r--r--drivers/iio/temperature/Kconfig24
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/max31856.c356
-rw-r--r--drivers/iio/trigger/iio-trig-loop.c2
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/addr.c25
-rw-r--r--drivers/infiniband/core/cache.c145
-rw-r--r--drivers/infiniband/core/cm.c94
-rw-r--r--drivers/infiniband/core/cm_msgs.h22
-rw-r--r--drivers/infiniband/core/cma.c83
-rw-r--r--drivers/infiniband/core/core_priv.h18
-rw-r--r--drivers/infiniband/core/cq.c21
-rw-r--r--drivers/infiniband/core/device.c632
-rw-r--r--drivers/infiniband/core/iwcm.c35
-rw-r--r--drivers/infiniband/core/iwpm_util.c8
-rw-r--r--drivers/infiniband/core/mad.c87
-rw-r--r--drivers/infiniband/core/mad_priv.h4
-rw-r--r--drivers/infiniband/core/multicast.c1
-rw-r--r--drivers/infiniband/core/nldev.c160
-rw-r--r--drivers/infiniband/core/rdma_core.c200
-rw-r--r--drivers/infiniband/core/rdma_core.h11
-rw-r--r--drivers/infiniband/core/sa_query.c52
-rw-r--r--drivers/infiniband/core/sysfs.c93
-rw-r--r--drivers/infiniband/core/ucm.c37
-rw-r--r--drivers/infiniband/core/ucma.c2
-rw-r--r--drivers/infiniband/core/umem.c182
-rw-r--r--drivers/infiniband/core/umem_odp.c25
-rw-r--r--drivers/infiniband/core/user_mad.c24
-rw-r--r--drivers/infiniband/core/uverbs.h8
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c99
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c29
-rw-r--r--drivers/infiniband/core/uverbs_main.c124
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c52
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c6
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c12
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dm.c10
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c6
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c9
-rw-r--r--drivers/infiniband/core/verbs.c233
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig12
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c194
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h36
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c39
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h10
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c56
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h38
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c18
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c97
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c69
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c23
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c210
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h96
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c25
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c77
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c77
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c8
-rw-r--r--drivers/infiniband/hw/efa/Kconfig15
-rw-r--r--drivers/infiniband/hw/efa/Makefile9
-rw-r--r--drivers/infiniband/hw/efa/efa.h163
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h794
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h136
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c1160
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h144
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c692
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h270
-rw-r--r--drivers/infiniband/hw/efa/efa_common_defs.h18
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c533
-rw-r--r--drivers/infiniband/hw/efa/efa_regs_defs.h113
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c1825
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c77
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h3
-rw-r--r--drivers/infiniband/hw/hfi1/common.h2
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c82
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c19
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c3
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h8
-rw-r--r--drivers/infiniband/hw/hfi1/init.c59
-rw-r--r--drivers/infiniband/hw/hfi1/opfn.h6
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c1
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c6
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c27
-rw-r--r--drivers/infiniband/hw/hfi1/rc.h8
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c305
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.h2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h4
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tid.h12
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c15
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h1
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c19
-rw-r--r--drivers/infiniband/hw/hns/Makefile4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c36
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c68
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h52
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c398
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c320
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c55
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c126
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c52
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c21
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c123
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h3
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c103
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c36
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c40
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c33
-rw-r--r--drivers/infiniband/hw/mlx4/main.c13
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h41
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c56
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c59
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c33
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c159
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h8
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c47
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c45
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c99
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c109
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h13
-rw-r--r--drivers/infiniband/hw/mlx5/main.c692
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h118
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c53
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c135
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c175
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c76
-rw-r--r--drivers/infiniband/hw/mlx5/srq.h7
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c35
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c179
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c23
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c168
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c37
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c23
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c25
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c128
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h24
-rw-r--r--drivers/infiniband/hw/qedr/main.c57
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h11
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c10
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c11
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c141
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h27
-rw-r--r--drivers/infiniband/hw/qib/qib.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c56
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c15
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h12
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c12
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c30
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c43
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c56
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h27
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c38
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h8
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h3
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.h6
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c26
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h7
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c27
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c49
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.h7
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rc.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c46
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c90
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c7
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c6
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c60
-rw-r--r--drivers/input/evdev.c9
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/keyboard/Kconfig15
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c8
-rw-r--r--drivers/input/keyboard/qt1050.c598
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c36
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c38
-rw-r--r--drivers/input/misc/Kconfig21
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/gpio-vibra.c207
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c20
-rw-r--r--drivers/input/misc/max77650-onkey.c121
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c25
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/input/rmi4/rmi_f11.c2
-rw-r--r--drivers/input/rmi4/rmi_f54.c21
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/serio/hyperv-keyboard.c2
-rw-r--r--drivers/input/serio/i8042.c3
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/touchscreen/Kconfig10
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c23
-rw-r--r--drivers/input/touchscreen/goodix.c54
-rw-r--r--drivers/input/touchscreen/iqs5xx.c1133
-rw-r--r--drivers/interconnect/core.c13
-rw-r--r--drivers/iommu/Kconfig25
-rw-r--r--drivers/iommu/amd_iommu.c54
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/amd_iommu_types.h6
-rw-r--r--drivers/iommu/arm-smmu-regs.h2
-rw-r--r--drivers/iommu/arm-smmu-v3.c355
-rw-r--r--drivers/iommu/arm-smmu.c11
-rw-r--r--drivers/iommu/dma-iommu.c47
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-iommu.c586
-rw-r--r--drivers/iommu/intel-pasid.c4
-rw-r--r--drivers/iommu/intel-svm.c19
-rw-r--r--drivers/iommu/intel_irq_remapping.c9
-rw-r--r--drivers/iommu/io-pgtable-arm.c91
-rw-r--r--drivers/iommu/io-pgtable.c1
-rw-r--r--drivers/iommu/iommu.c211
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c8
-rw-r--r--drivers/iommu/tegra-smmu.c41
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-ath79-misc.c11
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c6
-rw-r--r--drivers/irqchip/irq-gic-v3.c10
-rw-r--r--drivers/irqchip/irq-gic.c4
-rw-r--r--drivers/irqchip/irq-ixp4xx.c403
-rw-r--r--drivers/irqchip/irq-ls1x.c1
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c9
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c6
-rw-r--r--drivers/isdn/hisax/config.c6
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c9
-rw-r--r--drivers/isdn/mISDN/socket.c4
-rw-r--r--drivers/leds/Kconfig39
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c1
-rw-r--r--drivers/leds/led-core.c5
-rw-r--r--drivers/leds/leds-as3645a.c93
-rw-r--r--drivers/leds/leds-blinkm.c1
-rw-r--r--drivers/leds/leds-lm3532.c683
-rw-r--r--drivers/leds/leds-lt3593.c64
-rw-r--r--drivers/leds/leds-max77650.c147
-rw-r--r--drivers/leds/leds-pca955x.c57
-rw-r--r--drivers/leds/leds-pca963x.c66
-rw-r--r--drivers/leds/uleds.c2
-rw-r--r--drivers/lightnvm/pblk-read.c50
-rw-r--r--drivers/mailbox/Kconfig10
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c225
-rw-r--r--drivers/mailbox/imx-mailbox.c4
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/mailbox/stm32-ipcc.c13
-rw-r--r--drivers/md/bcache/alloc.c5
-rw-r--r--drivers/md/bcache/btree.c12
-rw-r--r--drivers/md/bcache/journal.c42
-rw-r--r--drivers/md/bcache/request.c41
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c84
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/util.h26
-rw-r--r--drivers/md/dm-bufio.c15
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-exception-store.h28
-rw-r--r--drivers/md/dm-init.c2
-rw-r--r--drivers/md/dm-integrity.c26
-rw-r--r--drivers/md/dm-rq.c11
-rw-r--r--drivers/md/dm-table.c39
-rw-r--r--drivers/md/dm.c30
-rw-r--r--drivers/md/md-bitmap.c8
-rw-r--r--drivers/md/md.c199
-rw-r--r--drivers/md/md.h25
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c19
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/Kconfig20
-rw-r--r--drivers/media/Makefile6
-rw-r--r--drivers/media/cec/Kconfig4
-rw-r--r--drivers/media/cec/cec-core.c1
-rw-r--r--drivers/media/cec/cec-notifier.c30
-rw-r--r--drivers/media/common/cx2341x.c151
-rw-r--r--drivers/media/common/siano/Kconfig4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c53
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c6
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c22
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c24
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c10
-rw-r--r--drivers/media/dvb-core/dvbdev.c1
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c3
-rw-r--r--drivers/media/dvb-frontends/dib8000.c4
-rw-r--r--drivers/media/dvb-frontends/dib9000.c6
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c30
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c11
-rw-r--r--drivers/media/dvb-frontends/si2165.c9
-rw-r--r--drivers/media/dvb-frontends/ts2020.c3
-rw-r--r--drivers/media/i2c/Kconfig271
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/cx25840/Kconfig2
-rw-r--r--drivers/media/i2c/et8ek8/Kconfig2
-rw-r--r--drivers/media/i2c/imx214.c10
-rw-r--r--drivers/media/i2c/m5mols/Kconfig2
-rw-r--r--drivers/media/i2c/ov2659.c8
-rw-r--r--drivers/media/i2c/ov6650.c43
-rw-r--r--drivers/media/i2c/ov7670.c32
-rw-r--r--drivers/media/i2c/ov7740.c28
-rw-r--r--drivers/media/i2c/smiapp/Kconfig2
-rw-r--r--drivers/media/i2c/st-mipid02.c1033
-rw-r--r--drivers/media/media-dev-allocator.c135
-rw-r--r--drivers/media/media-devnode.c4
-rw-r--r--drivers/media/media-entity.c33
-rw-r--r--drivers/media/media-request.c20
-rw-r--r--drivers/media/mmc/siano/Kconfig2
-rw-r--r--drivers/media/pci/bt8xx/Kconfig2
-rw-r--r--drivers/media/pci/bt8xx/dst.c3
-rw-r--r--drivers/media/pci/bt8xx/dst_common.h2
-rw-r--r--drivers/media/pci/cobalt/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c2
-rw-r--r--drivers/media/pci/cx18/Kconfig4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/pci/cx23885/Kconfig4
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c5
-rw-r--r--drivers/media/pci/cx25821/Kconfig4
-rw-r--r--drivers/media/pci/cx88/Kconfig10
-rw-r--r--drivers/media/pci/ddbridge/Kconfig4
-rw-r--r--drivers/media/pci/dt3155/Kconfig2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c8
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c1
-rw-r--r--drivers/media/pci/ivtv/Kconfig10
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c2
-rw-r--r--drivers/media/pci/mantis/mantis_i2c.c2
-rw-r--r--drivers/media/pci/meye/Kconfig2
-rw-r--r--drivers/media/pci/netup_unidvb/Kconfig2
-rw-r--r--drivers/media/pci/ngene/Kconfig2
-rw-r--r--drivers/media/pci/saa7134/Kconfig10
-rw-r--r--drivers/media/pci/saa7134/saa7134-go7007.c2
-rw-r--r--drivers/media/pci/saa7146/Kconfig6
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c5
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c5
-rw-r--r--drivers/media/pci/saa7164/Kconfig2
-rw-r--r--drivers/media/pci/solo6x10/Kconfig2
-rw-r--r--drivers/media/pci/tw5864/Kconfig2
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c4
-rw-r--r--drivers/media/pci/tw68/Kconfig2
-rw-r--r--drivers/media/platform/Kconfig90
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c2
-rw-r--r--drivers/media/platform/aspeed-video.c33
-rw-r--r--drivers/media/platform/atmel/Kconfig2
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h2
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c906
-rw-r--r--drivers/media/platform/coda/coda-bit.c3
-rw-r--r--drivers/media/platform/coda/coda-common.c120
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c1
-rw-r--r--drivers/media/platform/davinci/isif.c9
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c3
-rw-r--r--drivers/media/platform/davinci/vpif_display.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/imx-pxp.c4
-rw-r--r--drivers/media/platform/marvell-ccic/Kconfig4
-rw-r--r--drivers/media/platform/meson/Makefile1
-rw-r--r--drivers/media/platform/meson/ao-cec-g12a.c779
-rw-r--r--drivers/media/platform/meson/ao-cec.c16
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c75
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h16
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c10
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c27
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/omap/Kconfig2
-rw-r--r--drivers/media/platform/pxa_camera.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h4
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig3
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c47
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c120
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c2
-rw-r--r--drivers/media/platform/rcar_drif.c8
-rw-r--r--drivers/media/platform/rcar_fdp1.c28
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c16
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c6
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.c1
-rw-r--r--drivers/media/platform/sh_veu.c6
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Kconfig2
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c21
-rw-r--r--drivers/media/platform/sti/delta/delta-ipc.c6
-rw-r--r--drivers/media/platform/stm32/stm32-cec.c11
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c60
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c4
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c14
-rw-r--r--drivers/media/platform/ti-vpe/cal.c12
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c6
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.c121
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.h12
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.c431
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.h7
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c762
-rw-r--r--drivers/media/platform/video-mux.c5
-rw-r--r--drivers/media/platform/vim2m.c69
-rw-r--r--drivers/media/platform/vimc/Kconfig2
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c93
-rw-r--r--drivers/media/platform/vimc/vimc-common.c313
-rw-r--r--drivers/media/platform/vimc/vimc-common.h60
-rw-r--r--drivers/media/platform/vimc/vimc-core.c2
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c98
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c78
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c70
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c40
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.h22
-rw-r--r--drivers/media/platform/vivid/Kconfig6
-rw-r--r--drivers/media/platform/vivid/vivid-core.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_brx.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_clu.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c84
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h6
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c94
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c3
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h7
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgo.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgt.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c62
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h6
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_uif.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c16
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c83
-rw-r--r--drivers/media/platform/xilinx/Kconfig6
-rw-r--r--drivers/media/radio/Kconfig54
-rw-r--r--drivers/media/radio/si470x/Kconfig6
-rw-r--r--drivers/media/radio/si4713/Kconfig6
-rw-r--r--drivers/media/radio/si4713/si4713.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c14
-rw-r--r--drivers/media/rc/Kconfig80
-rw-r--r--drivers/media/rc/bpf-lirc.c6
-rw-r--r--drivers/media/rc/ir-rcmm-decoder.c1
-rw-r--r--drivers/media/rc/keymaps/Kconfig2
-rw-r--r--drivers/media/rc/keymaps/rc-xbox-dvd.c2
-rw-r--r--drivers/media/rc/lirc_dev.c2
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/rc/serial_ir.c9
-rw-r--r--drivers/media/rc/xbox_remote.c6
-rw-r--r--drivers/media/spi/Kconfig2
-rw-r--r--drivers/media/usb/airspy/Kconfig2
-rw-r--r--drivers/media/usb/au0828/Kconfig8
-rw-r--r--drivers/media/usb/au0828/au0828-core.c196
-rw-r--r--drivers/media/usb/au0828/au0828-video.c20
-rw-r--r--drivers/media/usb/au0828/au0828.h6
-rw-r--r--drivers/media/usb/cpia2/Kconfig2
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c3
-rw-r--r--drivers/media/usb/cx231xx/Kconfig8
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c104
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h12
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c18
-rw-r--r--drivers/media/usb/em28xx/Kconfig8
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c2
-rw-r--r--drivers/media/usb/go7007/Kconfig8
-rw-r--r--drivers/media/usb/go7007/go7007-fw.c4
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c16
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/gspca/Kconfig2
-rw-r--r--drivers/media/usb/gspca/gspca.c12
-rw-r--r--drivers/media/usb/hackrf/Kconfig2
-rw-r--r--drivers/media/usb/hdpvr/Kconfig2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c10
-rw-r--r--drivers/media/usb/pulse8-cec/Kconfig2
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c4
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig8
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.h1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/pwc/Kconfig4
-rw-r--r--drivers/media/usb/pwc/pwc-ctrl.c17
-rw-r--r--drivers/media/usb/rainshadow-cec/Kconfig2
-rw-r--r--drivers/media/usb/siano/Kconfig2
-rw-r--r--drivers/media/usb/stk1160/Kconfig2
-rw-r--r--drivers/media/usb/stkwebcam/Kconfig2
-rw-r--r--drivers/media/usb/tm6000/Kconfig4
-rw-r--r--drivers/media/usb/usbtv/Kconfig2
-rw-r--r--drivers/media/usb/usbvision/Kconfig2
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c3
-rw-r--r--drivers/media/usb/uvc/Kconfig4
-rw-r--r--drivers/media/usb/zr364xx/Kconfig2
-rw-r--r--drivers/media/v4l2-core/Kconfig8
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c186
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c6
-rw-r--r--drivers/memory/Makefile7
-rw-r--r--drivers/memory/Makefile.asm-offsets4
-rw-r--r--drivers/memory/atmel-ebi.c37
-rw-r--r--drivers/memstick/host/jmb38x_ms.c9
-rw-r--r--drivers/memstick/host/tifm_ms.c5
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/message/fusion/mptsas.c36
-rw-r--r--drivers/message/fusion/mptscsih.c4
-rw-r--r--drivers/message/fusion/mptspi.c5
-rw-r--r--drivers/mfd/Kconfig101
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/ab8500-debugfs.c2
-rw-r--r--drivers/mfd/altera-sysmgr.c211
-rw-r--r--drivers/mfd/atmel-hlcdc.c1
-rw-r--r--drivers/mfd/axp20x-i2c.c2
-rw-r--r--drivers/mfd/axp20x.c16
-rw-r--r--drivers/mfd/cros_ec.c39
-rw-r--r--drivers/mfd/cros_ec_dev.c36
-rw-r--r--drivers/mfd/cs47l35-tables.c2
-rw-r--r--drivers/mfd/cs47l90-tables.c2
-rw-r--r--drivers/mfd/da9063-core.c28
-rw-r--r--drivers/mfd/da9063-i2c.c10
-rw-r--r--drivers/mfd/da9063-irq.c10
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel-lpss.c7
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c10
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c1
-rw-r--r--drivers/mfd/max77620.c87
-rw-r--r--drivers/mfd/max77650.c232
-rw-r--r--drivers/mfd/mfd-core.c13
-rw-r--r--drivers/mfd/omap-usb-tll.c1
-rw-r--r--drivers/mfd/rk808.c9
-rw-r--r--drivers/mfd/sec-core.c59
-rw-r--r--drivers/mfd/sec-irq.c3
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c42
-rw-r--r--drivers/mfd/ssbi.c6
-rw-r--r--drivers/mfd/stmfx.c545
-rw-r--r--drivers/mfd/sun6i-prcm.c3
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/t7l66xb.c12
-rw-r--r--drivers/mfd/tc6387xb.c12
-rw-r--r--drivers/mfd/tc6393xb.c23
-rw-r--r--drivers/mfd/ti-lmu.c11
-rw-r--r--drivers/mfd/tps65912-spi.c1
-rw-r--r--drivers/mfd/twl-core.c23
-rw-r--r--drivers/mfd/twl6040.c13
-rw-r--r--drivers/mfd/wm831x-core.c2
-rw-r--r--drivers/mfd/wm8400-core.c6
-rw-r--r--drivers/misc/cardreader/rts5260.c11
-rw-r--r--drivers/misc/cxl/fault.c2
-rw-r--r--drivers/misc/fastrpc.c242
-rw-r--r--drivers/misc/genwqe/card_debugfs.c4
-rw-r--r--drivers/misc/genwqe/card_utils.c2
-rw-r--r--drivers/misc/habanalabs/Makefile2
-rw-r--r--drivers/misc/habanalabs/command_buffer.c13
-rw-r--r--drivers/misc/habanalabs/command_submission.c22
-rw-r--r--drivers/misc/habanalabs/context.c4
-rw-r--r--drivers/misc/habanalabs/debugfs.c96
-rw-r--r--drivers/misc/habanalabs/device.c93
-rw-r--r--drivers/misc/habanalabs/firmware_if.c322
-rw-r--r--drivers/misc/habanalabs/goya/Makefile3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c1218
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h81
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c628
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c15
-rw-r--r--drivers/misc/habanalabs/habanalabs.h220
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c9
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c139
-rw-r--r--drivers/misc/habanalabs/hw_queue.c46
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h12
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h3
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h306
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h4
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_async_events.h9
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_coresight.h199
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_fw_if.h2
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h3
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h16
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h23
-rw-r--r--drivers/misc/habanalabs/irq.c14
-rw-r--r--drivers/misc/habanalabs/memory.c197
-rw-r--r--drivers/misc/habanalabs/mmu.c600
-rw-r--r--drivers/misc/habanalabs/pci.c408
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/kgdbts.c4
-rw-r--r--drivers/misc/mei/Kconfig12
-rw-r--r--drivers/misc/mei/Makefile2
-rw-r--r--drivers/misc/mei/bus-fixup.c14
-rw-r--r--drivers/misc/mei/bus.c13
-rw-r--r--drivers/misc/mei/client.c16
-rw-r--r--drivers/misc/mei/client.h14
-rw-r--r--drivers/misc/mei/debugfs.c15
-rw-r--r--drivers/misc/mei/dma-ring.c2
-rw-r--r--drivers/misc/mei/hbm.c15
-rw-r--r--drivers/misc/mei/hbm.h14
-rw-r--r--drivers/misc/mei/hdcp/Kconfig13
-rw-r--r--drivers/misc/mei/hdcp/Makefile2
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c2
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h68
-rw-r--r--drivers/misc/mei/hw-me.c17
-rw-r--r--drivers/misc/mei/hw-me.h16
-rw-r--r--drivers/misc/mei/hw-txe-regs.h63
-rw-r--r--drivers/misc/mei/hw-txe.c14
-rw-r--r--drivers/misc/mei/hw-txe.h14
-rw-r--r--drivers/misc/mei/hw.h14
-rw-r--r--drivers/misc/mei/init.c34
-rw-r--r--drivers/misc/mei/interrupt.c15
-rw-r--r--drivers/misc/mei/main.c80
-rw-r--r--drivers/misc/mei/mei-trace.c14
-rw-r--r--drivers/misc/mei/mei-trace.h14
-rw-r--r--drivers/misc/mei/mei_dev.h17
-rw-r--r--drivers/misc/mei/pci-me.c15
-rw-r--r--drivers/misc/mei/pci-txe.c14
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/ocxl/Makefile3
-rw-r--r--drivers/misc/ocxl/afu_irq.c102
-rw-r--r--drivers/misc/ocxl/config.c13
-rw-r--r--drivers/misc/ocxl/context.c31
-rw-r--r--drivers/misc/ocxl/core.c574
-rw-r--r--drivers/misc/ocxl/file.c182
-rw-r--r--drivers/misc/ocxl/link.c42
-rw-r--r--drivers/misc/ocxl/mmio.c234
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h94
-rw-r--r--drivers/misc/ocxl/pci.c565
-rw-r--r--drivers/misc/ocxl/sysfs.c54
-rw-r--r--drivers/misc/ocxl/trace.h12
-rw-r--r--drivers/misc/pci_endpoint_test.c18
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c1
-rw-r--r--drivers/misc/tifm_7xx1.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/mmc_ops.c16
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c38
-rw-r--r--drivers/mmc/core/queue.c1
-rw-r--r--drivers/mmc/core/quirks.h2
-rw-r--r--drivers/mmc/core/sd.c8
-rw-r--r--drivers/mmc/host/Kconfig46
-rw-r--r--drivers/mmc/host/alcor.c107
-rw-r--r--drivers/mmc/host/cqhci.c2
-rw-r--r--drivers/mmc/host/cqhci.h4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c419
-rw-r--r--drivers/mmc/host/mmc_spi.c98
-rw-r--r--drivers/mmc/host/mmci.c82
-rw-r--r--drivers/mmc/host/mmci.h32
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.c17
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.h30
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c18
-rw-r--r--drivers/mmc/host/mtk-sd.c97
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mmc/host/of_mmc_spi.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c4
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi.h2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c12
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c11
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c41
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c5
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c47
-rw-r--r--drivers/mmc/host/sdhci-omap.c40
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c98
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c362
-rw-r--r--drivers/mmc/host/sdhci.c177
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sdhci_am654.c22
-rw-r--r--drivers/mmc/host/tifm_sd.c3
-rw-r--r--drivers/mmc/host/tmio_mmc.h6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mmc/host/usdhi6rol0.c9
-rw-r--r--drivers/mmc/host/via-sdmmc.c10
-rw-r--r--drivers/mtd/Kconfig20
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/afs.c266
-rw-r--r--drivers/mtd/bcm63xxpart.c163
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c8
-rw-r--r--drivers/mtd/chips/cfi_util.c6
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c1
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/physmap-core.c2
-rw-r--r--drivers/mtd/maps/physmap-gemini.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c8
-rw-r--r--drivers/mtd/mtdpart.c2
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/core.c34
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c5
-rw-r--r--drivers/mtd/nand/onenand/onenand_bbt.c3
-rw-r--r--drivers/mtd/nand/raw/Kconfig393
-rw-r--r--drivers/mtd/nand/raw/Makefile9
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c127
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c5
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.h6
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c7
-rw-r--r--drivers/mtd/nand/raw/denali.c1152
-rw-r--r--drivers/mtd/nand/raw/denali.h117
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c98
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c38
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c7
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c201
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c4
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c19
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h1
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig50
-rw-r--r--drivers/mtd/nand/raw/ingenic/Makefile7
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c166
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.h83
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand.c530
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4725b_bch.c295
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4740_ecc.c197
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4740_nand.c (renamed from drivers/mtd/nand/raw/jz4740_nand.c)7
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4780_bch.c (renamed from drivers/mtd/nand/raw/jz4780_bch.c)182
-rw-r--r--drivers/mtd/nand/raw/internals.h3
-rw-r--r--drivers/mtd/nand/raw/jz4780_bch.h43
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c415
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c63
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c30
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c4
-rw-r--r--drivers/mtd/nand/raw/nand_amd.c19
-rw-r--r--drivers/mtd/nand/raw/nand_base.c324
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c73
-rw-r--r--drivers/mtd/nand/raw/nand_esmt.c19
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c94
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c27
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c16
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c35
-rw-r--r--drivers/mtd/nand/raw/nand_samsung.c46
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c21
-rw-r--r--drivers/mtd/nand/raw/nandsim.c144
-rw-r--r--drivers/mtd/nand/raw/nuc900_nand.c3
-rw-r--r--drivers/mtd/nand/raw/omap2.c4
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c8
-rw-r--r--drivers/mtd/nand/raw/r852.c2
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c13
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c90
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c8
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c1
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c5
-rw-r--r--drivers/mtd/nand/spi/core.c169
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c8
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/mtd/nand/spi/micron.c2
-rw-r--r--drivers/mtd/nand/spi/toshiba.c12
-rw-r--r--drivers/mtd/nand/spi/winbond.c4
-rw-r--r--drivers/mtd/parsers/Kconfig27
-rw-r--r--drivers/mtd/parsers/Makefile2
-rw-r--r--drivers/mtd/parsers/afs.c410
-rw-r--r--drivers/mtd/parsers/parser_imagetag.c222
-rw-r--r--drivers/mtd/sm_ftl.c12
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c1
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c8
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c10
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c16
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/appletalk/ipddp.c6
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/bonding/bond_options.c7
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/dsa/Kconfig4
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c34
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/bcm_sf2.c9
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/lantiq_gswip.c812
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/mt7530.c20
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c217
-rw-r--r--drivers/net/dsa/mv88e6060.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c287
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c158
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.h59
-rw-r--r--drivers/net/dsa/sja1105/Kconfig17
-rw-r--r--drivers/net/dsa/sja1105/Makefile9
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h159
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c601
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c532
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h43
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c419
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c1679
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c591
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c987
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h253
-rw-r--r--drivers/net/dummy.c15
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c3
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c12
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c35
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c6
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c130
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c41
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c121
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c188
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c56
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h37
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c36
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c5
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c71
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c343
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h263
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c25
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c13
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c19
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c89
-rw-r--r--drivers/net/ethernet/cavium/Kconfig1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c1
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c42
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c10
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h25
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c97
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c32
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c55
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h49
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c953
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c207
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1017
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c910
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c73
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c52
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c207
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h22
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h46
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c355
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c481
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c85
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c441
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c63
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c171
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h110
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c335
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c1392
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c551
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c366
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c768
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c824
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c524
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c711
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c720
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h28
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c71
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h68
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h17
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c839
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c456
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c19
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h52
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c704
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h138
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c253
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c32
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c306
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c485
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c233
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c258
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c494
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c389
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c29
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c541
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c24
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c4
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile4
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c236
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h33
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h17
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.c220
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h83
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c206
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c155
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h103
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c36
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c117
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c618
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c366
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h23
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c133
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c131
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h8
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c91
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c96
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c32
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c17
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c844
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c29
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c5
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c9
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/sfc/falcon/io.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/io.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c12
-rw-r--r--drivers/net/ethernet/silan/sc92031.c15
-rw-r--r--drivers/net/ethernet/socionext/netsec.c11
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c10
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig19
-rw-r--r--drivers/net/ethernet/ti/Makefile11
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1544
-rw-r--r--drivers/net/ethernet/ti/cpsw.h9
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c55
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h12
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c719
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c132
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h429
-rw-r--r--drivers/net/ethernet/ti/cpsw_sl.c328
-rw-r--r--drivers/net/ethernet/ti/cpsw_sl.h73
-rw-r--r--drivers/net/ethernet/ti/cpts.c14
-rw-r--r--drivers/net/ethernet/ti/cpts.h14
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c37
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h13
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c32
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c45
-rw-r--r--drivers/net/ethernet/ti/netcp.h10
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c12
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c10
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c9
-rw-r--r--drivers/net/ethernet/ti/netcp_xgbepcsr.c9
-rw-r--r--drivers/net/ethernet/via/via-rhine.c3
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c8
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c15
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h26
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c531
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c53
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c44
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c14
-rw-r--r--drivers/net/geneve.c3
-rw-r--r--drivers/net/gtp.c7
-rw-r--r--drivers/net/hippi/rrunner.c4
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c19
-rw-r--r--drivers/net/hyperv/netvsc_drv.c42
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c31
-rw-r--r--drivers/net/ieee802154/mcr20a.c6
-rw-r--r--drivers/net/loopback.c14
-rw-r--r--drivers/net/macsec.c78
-rw-r--r--drivers/net/macvlan.c52
-rw-r--r--drivers/net/net_failover.c8
-rw-r--r--drivers/net/netdevsim/Makefile6
-rw-r--r--drivers/net/netdevsim/bpf.c107
-rw-r--r--drivers/net/netdevsim/bus.c341
-rw-r--r--drivers/net/netdevsim/dev.c447
-rw-r--r--drivers/net/netdevsim/devlink.c295
-rw-r--r--drivers/net/netdevsim/fib.c102
-rw-r--r--drivers/net/netdevsim/ipsec.c3
-rw-r--r--drivers/net/netdevsim/netdev.c428
-rw-r--r--drivers/net/netdevsim/netdevsim.h145
-rw-r--r--drivers/net/netdevsim/sdev.c69
-rw-r--r--drivers/net/phy/Kconfig19
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia_main.c526
-rw-r--r--drivers/net/phy/asix.c2
-rw-r--r--drivers/net/phy/at803x.c32
-rw-r--r--drivers/net/phy/bcm-cygnus.c149
-rw-r--r--drivers/net/phy/bcm-phy-lib.c52
-rw-r--r--drivers/net/phy/bcm-phy-lib.h20
-rw-r--r--drivers/net/phy/bcm63xx.c4
-rw-r--r--drivers/net/phy/bcm7xxx.c82
-rw-r--r--drivers/net/phy/broadcom.c34
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c8
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/intel-xway.c20
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/phy/marvell.c148
-rw-r--r--drivers/net/phy/marvell10g.c15
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c7
-rw-r--r--drivers/net/phy/mdio-mux-meson-g12a.c380
-rw-r--r--drivers/net/phy/mdio_bus.c33
-rw-r--r--drivers/net/phy/mdio_device.c13
-rw-r--r--drivers/net/phy/meson-gxl.c19
-rw-r--r--drivers/net/phy/micrel.c72
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/mscc.c479
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy-c45.c37
-rw-r--r--drivers/net/phy/phy-core.c272
-rw-r--r--drivers/net/phy/phy.c47
-rw-r--r--drivers/net/phy/phy_device.c208
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek.c106
-rw-r--r--drivers/net/phy/rockchip.c33
-rw-r--r--drivers/net/phy/smsc.c12
-rw-r--r--drivers/net/phy/spi_ks8995.c9
-rw-r--r--drivers/net/phy/ste10Xp.c4
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c34
-rw-r--r--drivers/net/ppp/ppp_mppe.c1
-rw-r--r--drivers/net/sb1000.c9
-rw-r--r--drivers/net/slip/slhc.c2
-rw-r--r--drivers/net/team/team.c67
-rw-r--r--drivers/net/thunderbolt.c3
-rw-r--r--drivers/net/tun.c37
-rw-r--r--drivers/net/usb/aqc111.c31
-rw-r--r--drivers/net/usb/cdc_mbim.c1
-rw-r--r--drivers/net/usb/ipheth.c60
-rw-r--r--drivers/net/usb/qmi_wwan.c76
-rw-r--r--drivers/net/usb/r8152.c53
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/veth.c14
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--drivers/net/vrf.c23
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c1
-rw-r--r--drivers/net/wimax/i2400m/tx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c39
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c38
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c78
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c35
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c74
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h47
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h91
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c7
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.c13
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.c1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/ilt.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c20
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/pio.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c4
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c36
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c58
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c68
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h181
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c639
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c303
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c12
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c1
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c94
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h69
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c164
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h119
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c205
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c98
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c229
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c775
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h300
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c499
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c1655
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h520
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h195
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c150
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h203
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h44
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c286
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c199
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c266
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c188
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c148
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c379
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h25
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c86
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c379
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c91
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/debug.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c16
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c32
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c31
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h87
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c117
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h19
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c628
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c124
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c28
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c22
-rw-r--r--drivers/net/wireless/ray_cs.c8
-rw-r--r--drivers/net/wireless/realtek/Kconfig1
-rw-r--r--drivers/net/wireless/realtek/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig54
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile22
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c637
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h52
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c160
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c633
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h222
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h211
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c965
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h35
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c481
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c1211
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h1104
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c1211
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h237
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c1727
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h134
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c166
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h421
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c391
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h67
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c1594
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h170
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.c20783
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c1890
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h186
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c11753
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.h17
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c151
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h41
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.c120
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.h39
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c367
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h89
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c72
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h34
-rw-r--r--drivers/net/wireless/rndis_wlan.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c199
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c30
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c232
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c129
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c96
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h63
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h44
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h21
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h26
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h5
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h3
-rw-r--r--drivers/net/wireless/st/cw1200/main.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/xen-netback/common.h18
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/xenbus.c17
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nfc/mei_phy.c18
-rw-r--r--drivers/nfc/microread/mei.c17
-rw-r--r--drivers/nfc/pn533/pn533.c2
-rw-r--r--drivers/nfc/pn544/mei.c15
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c1
-rw-r--r--drivers/nfc/st21nfca/dep.c2
-rw-r--r--drivers/nfc/st95hf/core.c11
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c7
-rw-r--r--drivers/ntb/test/ntb_perf.c3
-rw-r--r--drivers/nvdimm/btt_devs.c18
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c2
-rw-r--r--drivers/nvdimm/label.c29
-rw-r--r--drivers/nvdimm/namespace_devs.c20
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/nvdimm/pmem.c8
-rw-r--r--drivers/nvdimm/security.c118
-rw-r--r--drivers/nvme/host/core.c48
-rw-r--r--drivers/nvme/host/fabrics.c1
-rw-r--r--drivers/nvme/host/fc.c22
-rw-r--r--drivers/nvme/host/multipath.c10
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c300
-rw-r--r--drivers/nvme/host/rdma.c10
-rw-r--r--drivers/nvme/host/tcp.c21
-rw-r--r--drivers/nvme/target/Kconfig1
-rw-r--r--drivers/nvme/target/admin-cmd.c5
-rw-r--r--drivers/nvme/target/configfs.c4
-rw-r--r--drivers/nvme/target/core.c38
-rw-r--r--drivers/nvme/target/discovery.c77
-rw-r--r--drivers/nvme/target/fabrics-cmd.c16
-rw-r--r--drivers/nvme/target/fc.c9
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c6
-rw-r--r--drivers/nvme/target/io-cmd-file.c7
-rw-r--r--drivers/nvme/target/loop.c22
-rw-r--r--drivers/nvme/target/nvmet.h5
-rw-r--r--drivers/nvme/target/rdma.c21
-rw-r--r--drivers/nvme/target/tcp.c38
-rw-r--r--drivers/nvmem/Kconfig24
-rw-r--r--drivers/nvmem/Makefile5
-rw-r--r--drivers/nvmem/core.c316
-rw-r--r--drivers/nvmem/imx-iim.c4
-rw-r--r--drivers/nvmem/imx-ocotp.c11
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/nvmem/nvmem-sysfs.c256
-rw-r--r--drivers/nvmem/nvmem.h62
-rw-r--r--drivers/nvmem/stm32-romem.c202
-rw-r--r--drivers/nvmem/sunxi_sid.c115
-rw-r--r--drivers/of/address.c40
-rw-r--r--drivers/of/base.c5
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_net.c41
-rw-r--r--drivers/of/of_reserved_mem.c22
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/of/unittest.c13
-rw-r--r--drivers/opp/core.c54
-rw-r--r--drivers/parisc/iosapic.c6
-rw-r--r--drivers/parisc/led.c3
-rw-r--r--drivers/parport/ieee1284.c2
-rw-r--r--drivers/parport/parport_cs.c5
-rw-r--r--drivers/parport/parport_ip32.c18
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c5
-rw-r--r--drivers/pci/controller/Kconfig1
-rw-r--r--drivers/pci/controller/dwc/Kconfig29
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c3
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c144
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c926
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c93
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c55
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c157
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c64
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h26
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c11
-rw-r--r--drivers/pci/controller/pci-aardvark.c13
-rw-r--r--drivers/pci/controller/pci-host-generic.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c23
-rw-r--r--drivers/pci/controller/pci-tegra.c37
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c98
-rw-r--r--drivers/pci/controller/pcie-mediatek.c51
-rw-r--r--drivers/pci/controller/pcie-rcar.c85
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c1
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c9
-rw-r--r--drivers/pci/controller/pcie-xilinx.c12
-rw-r--r--drivers/pci/controller/vmd.c7
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c10
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c10
-rw-r--r--drivers/pci/hotplug/pciehp.h31
-rw-r--r--drivers/pci/hotplug/pciehp_core.c18
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c6
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c17
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c3
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/pci/of.c58
-rw-r--r--drivers/pci/p2pdma.c38
-rw-r--r--drivers/pci/pci-acpi.c183
-rw-r--r--drivers/pci/pci-driver.c14
-rw-r--r--drivers/pci/pci-stub.c10
-rw-r--r--drivers/pci/pci-sysfs.c3
-rw-r--r--drivers/pci/pci.c363
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Kconfig8
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer.c30
-rw-r--r--drivers/pci/pcie/aer_inject.c20
-rw-r--r--drivers/pci/pcie/aspm.c47
-rw-r--r--drivers/pci/pcie/bw_notification.c14
-rw-r--r--drivers/pci/pcie/dpc.c37
-rw-r--r--drivers/pci/pcie/pme.c10
-rw-r--r--drivers/pci/pcie/portdrv.h4
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/probe.c230
-rw-r--r--drivers/pci/proc.c1
-rw-r--r--drivers/pci/quirks.c98
-rw-r--r--drivers/pci/search.c10
-rw-r--r--drivers/pci/setup-bus.c526
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pci/switch/switchtec.c44
-rw-r--r--drivers/pci/xen-pcifront.c9
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm-cci.c21
-rw-r--r--drivers/perf/arm-ccn.c25
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c865
-rw-r--r--drivers/phy/allwinner/Kconfig9
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c4
-rw-r--r--drivers/phy/amlogic/Kconfig22
-rw-r--r--drivers/phy/amlogic/Makefile2
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c341
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c413
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb2.c11
-rw-r--r--drivers/phy/broadcom/Kconfig11
-rw-r--r--drivers/phy/broadcom/Makefile1
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-usb.c394
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c12
-rw-r--r--drivers/phy/hisilicon/Kconfig10
-rw-r--r--drivers/phy/hisilicon/Makefile1
-rw-r--r--drivers/phy/hisilicon/phy-hi3660-usb3.c233
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c1
-rw-r--r--drivers/phy/mediatek/Kconfig10
-rw-r--r--drivers/phy/mediatek/Makefile1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c10
-rw-r--r--drivers/phy/mediatek/phy-mtk-ufs.c245
-rw-r--r--drivers/phy/motorola/Kconfig2
-rw-r--r--drivers/phy/mscc/phy-ocelot-serdes.c240
-rw-r--r--drivers/phy/phy-core.c11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c222
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h12
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c25
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c25
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c59
-rw-r--r--drivers/phy/renesas/Kconfig2
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c130
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c232
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c30
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c4
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3hs.c10
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3ss.c10
-rw-r--r--drivers/phy/tegra/Makefile1
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c899
-rw-r--r--drivers/phy/tegra/xusb.c67
-rw-r--r--drivers/phy/tegra/xusb.h35
-rw-r--r--drivers/phy/ti/Kconfig14
-rw-r--r--drivers/phy/ti/Makefile1
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c658
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c362
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c35
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/cirrus/Kconfig10
-rw-r--r--drivers/pinctrl/cirrus/Makefile2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c1235
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c25
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-scu.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c66
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h11
-rw-r--r--drivers/pinctrl/mediatek/Kconfig7
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8183.c50
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8516.c362
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c49
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h11
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8516.h1182
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c21
-rw-r--r--drivers/pinctrl/pinctrl-amd.c4
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c19
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c2
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c965
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c113
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c2
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c18
-rw-r--r--drivers/pinctrl/pinctrl-st.c15
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c819
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c1
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig204
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile15
-rw-r--r--drivers/pinctrl/sh-pfc/core.c130
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c8
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-emev2.c67
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c64
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c56
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77470.c136
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c101
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c235
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c132
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c156
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7792.c134
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c127
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c222
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c201
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c225
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c294
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c123
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77980.c135
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c214
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c120
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c152
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c232
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c252
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c52
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c144
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c220
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c200
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c204
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c140
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c244
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c136
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c80
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c32
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h68
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c105
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h14
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp157.c1089
-rw-r--r--drivers/pinctrl/sunxi/Kconfig57
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c96
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h18
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c1
-rw-r--r--drivers/platform/chrome/Kconfig24
-rw-r--r--drivers/platform/chrome/Makefile7
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c2
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c76
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c21
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c258
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c80
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c124
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h51
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c262
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c89
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c53
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/mellanox/Kconfig12
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo-regs.h63
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c1281
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/alienware-wmi.c19
-rw-r--r--drivers/platform/x86/asus-wmi.c37
-rw-r--r--drivers/platform/x86/dell-laptop.c6
-rw-r--r--drivers/platform/x86/dell-rbtn.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c321
-rw-r--r--drivers/platform/x86/intel_mrfld_pwrbtn.c107
-rw-r--r--drivers/platform/x86/intel_pmc_core.c172
-rw-r--r--drivers/platform/x86/intel_pmc_core.h7
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c46
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c8
-rw-r--r--drivers/platform/x86/mlx-platform.c228
-rw-r--r--drivers/platform/x86/pmc_atom.c21
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c146
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c51
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c25
-rw-r--r--drivers/power/reset/syscon-reboot.c19
-rw-r--r--drivers/power/supply/Kconfig36
-rw-r--r--drivers/power/supply/Makefile5
-rw-r--r--drivers/power/supply/ab8500_bmdata.c1
-rw-r--r--drivers/power/supply/axp20x_usb_power.c179
-rw-r--r--drivers/power/supply/axp288_charger.c4
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c20
-rw-r--r--drivers/power/supply/bq27xxx_battery.c3
-rw-r--r--drivers/power/supply/charger-manager.c3
-rw-r--r--drivers/power/supply/cpcap-battery.c45
-rw-r--r--drivers/power/supply/cpcap-charger.c5
-rw-r--r--drivers/power/supply/goldfish_battery.c2
-rw-r--r--drivers/power/supply/gpio-charger.c57
-rw-r--r--drivers/power/supply/ingenic-battery.c184
-rw-r--r--drivers/power/supply/lt3651-charger.c (renamed from drivers/power/supply/ltc3651-charger.c)123
-rw-r--r--drivers/power/supply/max14656_charger_detector.c27
-rw-r--r--drivers/power/supply/max77650-charger.c368
-rw-r--r--drivers/power/supply/olpc_battery.c171
-rw-r--r--drivers/power/supply/power_supply_core.c38
-rw-r--r--drivers/power/supply/power_supply_sysfs.c12
-rw-r--r--drivers/power/supply/ucs1002_power.c646
-rw-r--r--drivers/pps/clients/pps-gpio.c153
-rw-r--r--drivers/ptp/ptp_qoriq.c3
-rw-r--r--drivers/pwm/Kconfig16
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c11
-rw-r--r--drivers/pwm/pwm-berlin.c1
-rw-r--r--drivers/pwm/pwm-ep93xx.c2
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c44
-rw-r--r--drivers/pwm/pwm-img.c2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c449
-rw-r--r--drivers/pwm/pwm-imx27.c4
-rw-r--r--drivers/pwm/pwm-meson.c66
-rw-r--r--drivers/pwm/pwm-pca9685.c1
-rw-r--r--drivers/pwm/pwm-samsung.c5
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/pwm/sysfs.c16
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/rapidio/rio_cm.c8
-rw-r--r--drivers/ras/cec.c4
-rw-r--r--drivers/regulator/88pm800.c18
-rw-r--r--drivers/regulator/88pm8607.c43
-rw-r--r--drivers/regulator/Kconfig11
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/ab3100.c45
-rw-r--r--drivers/regulator/ab8500-ext.c49
-rw-r--r--drivers/regulator/ab8500.c20
-rw-r--r--drivers/regulator/act8865-regulator.c147
-rw-r--r--drivers/regulator/anatop-regulator.c63
-rw-r--r--drivers/regulator/arizona-ldo1.c19
-rw-r--r--drivers/regulator/arizona-micsupp.c19
-rw-r--r--drivers/regulator/as3711-regulator.c37
-rw-r--r--drivers/regulator/as3722-regulator.c287
-rw-r--r--drivers/regulator/axp20x-regulator.c23
-rw-r--r--drivers/regulator/bcm590xx-regulator.c105
-rw-r--r--drivers/regulator/bd718x7-regulator.c4
-rw-r--r--drivers/regulator/core.c30
-rw-r--r--drivers/regulator/cpcap-regulator.c15
-rw-r--r--drivers/regulator/da903x.c16
-rw-r--r--drivers/regulator/da9052-regulator.c55
-rw-r--r--drivers/regulator/da9055-regulator.c89
-rw-r--r--drivers/regulator/da9062-regulator.c146
-rw-r--r--drivers/regulator/da9063-regulator.c134
-rw-r--r--drivers/regulator/da9210-regulator.c23
-rw-r--r--drivers/regulator/da9210-regulator.h17
-rw-r--r--drivers/regulator/da9211-regulator.c24
-rw-r--r--drivers/regulator/da9211-regulator.h11
-rw-r--r--drivers/regulator/db8500-prcmu.c143
-rw-r--r--drivers/regulator/dbx500-prcmu.h4
-rw-r--r--drivers/regulator/fan53555.c60
-rw-r--r--drivers/regulator/gpio-regulator.c22
-rw-r--r--drivers/regulator/hi6421-regulator.c232
-rw-r--r--drivers/regulator/hi6421v530-regulator.c26
-rw-r--r--drivers/regulator/hi655x-regulator.c37
-rw-r--r--drivers/regulator/lm363x-regulator.c8
-rw-r--r--drivers/regulator/lp8755.c15
-rw-r--r--drivers/regulator/lp87565-regulator.c49
-rw-r--r--drivers/regulator/ltc3589.c269
-rw-r--r--drivers/regulator/ltc3676.c10
-rw-r--r--drivers/regulator/max14577-regulator.c55
-rw-r--r--drivers/regulator/max77620-regulator.c2
-rw-r--r--drivers/regulator/max77650-regulator.c2
-rw-r--r--drivers/regulator/max8925-regulator.c76
-rw-r--r--drivers/regulator/max8998.c300
-rw-r--r--drivers/regulator/mcp16502.c67
-rw-r--r--drivers/regulator/mt6311-regulator.c17
-rw-r--r--drivers/regulator/mt6311-regulator.h10
-rw-r--r--drivers/regulator/mt6323-regulator.c32
-rw-r--r--drivers/regulator/mt6380-regulator.c25
-rw-r--r--drivers/regulator/mt6397-regulator.c33
-rw-r--r--drivers/regulator/of_regulator.c5
-rw-r--r--drivers/regulator/palmas-regulator.c12
-rw-r--r--drivers/regulator/pv88060-regulator.c22
-rw-r--r--drivers/regulator/pv88060-regulator.h11
-rw-r--r--drivers/regulator/pv88080-regulator.c22
-rw-r--r--drivers/regulator/pv88080-regulator.h11
-rw-r--r--drivers/regulator/pv88090-regulator.c22
-rw-r--r--drivers/regulator/pv88090-regulator.h11
-rw-r--r--drivers/regulator/rc5t583-regulator.c25
-rw-r--r--drivers/regulator/rn5t618-regulator.c8
-rw-r--r--drivers/regulator/s2mpa01.c41
-rw-r--r--drivers/regulator/sc2731-regulator.c2
-rw-r--r--drivers/regulator/sky81452-regulator.c26
-rw-r--r--drivers/regulator/stm32-pwr.c186
-rw-r--r--drivers/regulator/sy8106a-regulator.c40
-rw-r--r--drivers/regulator/tps6507x-regulator.c113
-rw-r--r--drivers/regulator/tps65086-regulator.c4
-rw-r--r--drivers/regulator/tps65132-regulator.c29
-rw-r--r--drivers/regulator/tps65217-regulator.c9
-rw-r--r--drivers/regulator/tps65218-regulator.c56
-rw-r--r--drivers/regulator/tps6524x-regulator.c11
-rw-r--r--drivers/regulator/tps80031-regulator.c48
-rw-r--r--drivers/regulator/twl-regulator.c6
-rw-r--r--drivers/regulator/vctrl-regulator.c4
-rw-r--r--drivers/regulator/vexpress-regulator.c72
-rw-r--r--drivers/regulator/wm831x-dcdc.c23
-rw-r--r--drivers/regulator/wm831x-isink.c66
-rw-r--r--drivers/regulator/wm831x-ldo.c21
-rw-r--r--drivers/regulator/wm8350-regulator.c102
-rw-r--r--drivers/regulator/wm8400-regulator.c39
-rw-r--r--drivers/regulator/wm8994-regulator.c19
-rw-r--r--drivers/reset/reset-meson-audio-arb.c1
-rw-r--r--drivers/rtc/Kconfig23
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/class.c21
-rw-r--r--drivers/rtc/dev.c20
-rw-r--r--drivers/rtc/hctosys.c10
-rw-r--r--drivers/rtc/interface.c107
-rw-r--r--drivers/rtc/lib.c30
-rw-r--r--drivers/rtc/nvmem.c7
-rw-r--r--drivers/rtc/proc.c21
-rw-r--r--drivers/rtc/rtc-88pm80x.c14
-rw-r--r--drivers/rtc/rtc-88pm860x.c2
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c189
-rw-r--r--drivers/rtc/rtc-ab3100.c24
-rw-r--r--drivers/rtc/rtc-abx80x.c43
-rw-r--r--drivers/rtc/rtc-aspeed.c136
-rw-r--r--drivers/rtc/rtc-at91sam9.c108
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c2
-rw-r--r--drivers/rtc/rtc-coh901331.c37
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-da9063.c34
-rw-r--r--drivers/rtc/rtc-digicolor.c25
-rw-r--r--drivers/rtc/rtc-dm355evm.c24
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-ds1672.c127
-rw-r--r--drivers/rtc/rtc-ds1685.c262
-rw-r--r--drivers/rtc/rtc-ds2404.c73
-rw-r--r--drivers/rtc/rtc-ds3232.c40
-rw-r--r--drivers/rtc/rtc-ep93xx.c70
-rw-r--r--drivers/rtc/rtc-goldfish.c50
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c3
-rw-r--r--drivers/rtc/rtc-imxdi.c50
-rw-r--r--drivers/rtc/rtc-jz4740.c95
-rw-r--r--drivers/rtc/rtc-lpc32xx.c59
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-mc13xxx.c25
-rw-r--r--drivers/rtc/rtc-mt6397.c9
-rw-r--r--drivers/rtc/rtc-mv.c33
-rw-r--r--drivers/rtc/rtc-mxc.c86
-rw-r--r--drivers/rtc/rtc-mxc_v2.c29
-rw-r--r--drivers/rtc/rtc-omap.c32
-rw-r--r--drivers/rtc/rtc-opal.c2
-rw-r--r--drivers/rtc/rtc-pcap.c28
-rw-r--r--drivers/rtc/rtc-pcf85063.c446
-rw-r--r--drivers/rtc/rtc-pcf85363.c20
-rw-r--r--drivers/rtc/rtc-ps3.c30
-rw-r--r--drivers/rtc/rtc-pxa.c3
-rw-r--r--drivers/rtc/rtc-rk808.c6
-rw-r--r--drivers/rtc/rtc-rx6110.c9
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-sh.c32
-rw-r--r--drivers/rtc/rtc-sirfsoc.c2
-rw-r--r--drivers/rtc/rtc-snvs.c48
-rw-r--r--drivers/rtc/rtc-stm32.c9
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c34
-rw-r--r--drivers/rtc/rtc-sun4v.c21
-rw-r--r--drivers/rtc/rtc-tegra.c47
-rw-r--r--drivers/rtc/rtc-test.c11
-rw-r--r--drivers/rtc/rtc-tx4939.c17
-rw-r--r--drivers/rtc/rtc-wilco-ec.c63
-rw-r--r--drivers/rtc/rtc-wm831x.c69
-rw-r--r--drivers/rtc/rtc-wm8350.c12
-rw-r--r--drivers/rtc/rtc-x1205.c7
-rw-r--r--drivers/rtc/rtc-xgene.c61
-rw-r--r--drivers/rtc/rtc-zynqmp.c13
-rw-r--r--drivers/rtc/sysfs.c23
-rw-r--r--drivers/rtc/systohc.c13
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/char/con3270.c2
-rw-r--r--drivers/s390/char/fs3270.c5
-rw-r--r--drivers/s390/char/raw3270.c3
-rw-r--r--drivers/s390/char/raw3270.h4
-rw-r--r--drivers/s390/char/sclp.c14
-rw-r--r--drivers/s390/char/sclp.h10
-rw-r--r--drivers/s390/char/sclp_early.c5
-rw-r--r--drivers/s390/char/sclp_early_core.c20
-rw-r--r--drivers/s390/char/sclp_sdias.c74
-rw-r--r--drivers/s390/char/tape_char.c2
-rw-r--r--drivers/s390/char/tty3270.c3
-rw-r--r--drivers/s390/char/zcore.c24
-rw-r--r--drivers/s390/cio/Makefile3
-rw-r--r--drivers/s390/cio/airq.c41
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/cio.h4
-rw-r--r--drivers/s390/cio/ioasm.c1
-rw-r--r--drivers/s390/cio/qdio.h6
-rw-r--r--drivers/s390/cio/qdio_debug.c9
-rw-r--r--drivers/s390/cio/qdio_main.c211
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c88
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c21
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c81
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c143
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c227
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h48
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/crypto/ap_queue.c2
-rw-r--r--drivers/s390/crypto/pkey_api.c3
-rw-r--r--drivers/s390/crypto/zcrypt_api.c6
-rw-r--r--drivers/s390/net/ctcm_main.c1
-rw-r--r--drivers/s390/net/ism.h29
-rw-r--r--drivers/s390/net/ism_drv.c20
-rw-r--r--drivers/s390/net/qeth_core.h131
-rw-r--r--drivers/s390/net/qeth_core_main.c919
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/net/qeth_core_sys.c10
-rw-r--r--drivers/s390/net/qeth_ethtool.c17
-rw-r--r--drivers/s390/net/qeth_l2_main.c99
-rw-r--r--drivers/s390/net/qeth_l3_main.c263
-rw-r--r--drivers/s390/net/qeth_l3_sys.c26
-rw-r--r--drivers/s390/virtio/virtio_ccw.c54
-rw-r--r--drivers/sbus/char/oradax.c4
-rw-r--r--drivers/scsi/NCR5380.c11
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx2
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c14
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c10
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c1
-rw-r--r--drivers/scsi/atp870u.c7
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/bfa/bfa.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c6
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c2
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/scsi/csiostor/csio_isr.c28
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c5
-rw-r--r--drivers/scsi/csiostor/csio_wr.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c14
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c22
-rw-r--r--drivers/scsi/dpt_i2o.c12
-rw-r--r--drivers/scsi/esp_scsi.c2
-rw-r--r--drivers/scsi/gdth.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c104
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c49
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c473
-rw-r--r--drivers/scsi/hpsa.c27
-rw-r--r--drivers/scsi/imm.c33
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c83
-rw-r--r--drivers/scsi/libsas/sas_init.c42
-rw-r--r--drivers/scsi/libsas/sas_phy.c7
-rw-r--r--drivers/scsi/libsas/sas_port.c24
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c243
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c123
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c486
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c137
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c350
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpt3sas/Kconfig1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c179
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h22
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c3
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c3
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/mvumi.c6
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c37
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c55
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/qedf/qedf.h57
-rw-r--r--drivers/scsi/qedf/qedf_dbg.c32
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c57
-rw-r--r--drivers/scsi/qedf/qedf_els.c82
-rw-r--r--drivers/scsi/qedf/qedf_fip.c95
-rw-r--r--drivers/scsi/qedf/qedf_io.c753
-rw-r--r--drivers/scsi/qedf/qedf_main.c281
-rw-r--r--drivers/scsi/qedf/qedf_version.h6
-rw-r--r--drivers/scsi/qedi/qedi_fw.c5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c12
-rw-r--r--drivers/scsi/qedi/qedi_main.c7
-rw-r--r--drivers/scsi/qla1280.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c329
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c84
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c192
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h267
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_dsd.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h98
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h107
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c139
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1490
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h69
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c360
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c144
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c336
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c115
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c607
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c973
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c201
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c447
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h76
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c58
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/qlogicfas408.c4
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_dh.c1
-rw-r--r--drivers/scsi/scsi_error.c1
-rw-r--r--drivers/scsi/scsi_lib.c36
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c119
-rw-r--r--drivers/scsi/sd.c33
-rw-r--r--drivers/scsi/smartpqi/Makefile1
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h15
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c51
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c15
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c15
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h15
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/storvsc_drv.c15
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c1
-rw-r--r--drivers/scsi/ufs/Kconfig15
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c74
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c113
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c368
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h53
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c216
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h4
-rw-r--r--drivers/scsi/ufs/ufs.h1
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c112
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h21
-rw-r--r--drivers/scsi/ufs/ufshcd.c41
-rw-r--r--drivers/scsi/ufs/unipro.h2
-rw-r--r--drivers/scsi/virtio_scsi.c3
-rw-r--r--drivers/sh/intc/userimask.c2
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c4
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile3
-rw-r--r--drivers/soc/aspeed/Kconfig15
-rw-r--r--drivers/soc/aspeed/Makefile1
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c444
-rw-r--r--drivers/soc/imx/gpc.c13
-rw-r--r--drivers/soc/ixp4xx/Kconfig16
-rw-r--r--drivers/soc/ixp4xx/Makefile2
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c762
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c488
-rw-r--r--drivers/soc/sunxi/Kconfig1
-rw-r--r--drivers/soundwire/Kconfig4
-rw-r--r--drivers/soundwire/bus.c152
-rw-r--r--drivers/soundwire/bus.h16
-rw-r--r--drivers/soundwire/bus_type.c4
-rw-r--r--drivers/soundwire/cadence_master.c100
-rw-r--r--drivers/soundwire/cadence_master.h22
-rw-r--r--drivers/soundwire/intel.c138
-rw-r--r--drivers/soundwire/intel.h4
-rw-r--r--drivers/soundwire/intel_init.c15
-rw-r--r--drivers/soundwire/mipi_disco.c122
-rw-r--r--drivers/soundwire/slave.c10
-rw-r--r--drivers/soundwire/stream.c285
-rw-r--r--drivers/spi/Kconfig16
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c8
-rw-r--r--drivers/spi/spi-at91-usart.c8
-rw-r--r--drivers/spi/spi-bcm2835.c39
-rw-r--r--drivers/spi/spi-bcm2835aux.c205
-rw-r--r--drivers/spi/spi-bitbang.c66
-rw-r--r--drivers/spi/spi-dw-mmio.c12
-rw-r--r--drivers/spi/spi-ep93xx.c33
-rw-r--r--drivers/spi/spi-fsl-lib.h2
-rw-r--r--drivers/spi/spi-fsl-lpspi.c573
-rw-r--r--drivers/spi/spi-fsl-qspi.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c81
-rw-r--r--drivers/spi/spi-gpio.c227
-rw-r--r--drivers/spi/spi-imx.c9
-rw-r--r--drivers/spi/spi-mem.c8
-rw-r--r--drivers/spi/spi-mt7621.c (renamed from drivers/staging/mt7621-spi/spi-mt7621.c)90
-rw-r--r--drivers/spi/spi-mxic.c6
-rw-r--r--drivers/spi/spi-orion.c4
-rw-r--r--drivers/spi/spi-pic32.c2
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c4
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c5
-rw-r--r--drivers/spi/spi-pxa2xx.c45
-rw-r--r--drivers/spi/spi-rockchip.c1
-rw-r--r--drivers/spi/spi-rspi.c119
-rw-r--r--drivers/spi/spi-sh-msiof.c224
-rw-r--r--drivers/spi/spi-stm32-qspi.c229
-rw-r--r--drivers/spi/spi-stm32.c5
-rw-r--r--drivers/spi/spi-tegra114.c310
-rw-r--r--drivers/spi/spi-tegra20-slink.c12
-rw-r--r--drivers/spi/spi-topcliff-pch.c15
-rw-r--r--drivers/spi/spi-zynq-qspi.c761
-rw-r--r--drivers/spi/spi.c76
-rw-r--r--drivers/spi/spidev.c6
-rw-r--r--drivers/ssb/bridge_pcmcia_80211.c9
-rw-r--r--drivers/ssb/pci.c1
-rw-r--r--drivers/ssb/pcmcia.c4
-rw-r--r--drivers/staging/Kconfig18
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/android/Kconfig3
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/ion/Kconfig1
-rw-r--r--drivers/staging/android/vsoc.c3
-rw-r--r--drivers/staging/axis-fifo/Kconfig8
-rw-r--r--drivers/staging/axis-fifo/Makefile1
-rw-r--r--drivers/staging/board/Kconfig1
-rw-r--r--drivers/staging/board/Makefile1
-rw-r--r--drivers/staging/clocking-wizard/Kconfig1
-rw-r--r--drivers/staging/clocking-wizard/Makefile1
-rw-r--r--drivers/staging/comedi/Kconfig255
-rw-r--r--drivers/staging/comedi/comedi_buf.c2
-rw-r--r--drivers/staging/comedi/comedi_fops.c32
-rw-r--r--drivers/staging/comedi/drivers.c11
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c2
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.c17
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.h3
-rw-r--r--drivers/staging/comedi/drivers/das08.c4
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c2
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c5
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c12
-rw-r--r--drivers/staging/comedi/drivers/mite.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c37
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_routing/tools/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c15
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c14
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c1
-rw-r--r--drivers/staging/comedi/drivers/s626.c2
-rw-r--r--drivers/staging/comedi/drivers/tests/ni_routes_test.c2
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c2
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c8
-rw-r--r--drivers/staging/comedi/kcomedilib/Makefile1
-rw-r--r--drivers/staging/emxx_udc/Kconfig1
-rw-r--r--drivers/staging/emxx_udc/Makefile1
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c57
-rw-r--r--drivers/staging/erofs/Documentation/filesystems/erofs.txt1
-rw-r--r--drivers/staging/erofs/data.c23
-rw-r--r--drivers/staging/erofs/inode.c18
-rw-r--r--drivers/staging/erofs/internal.h19
-rw-r--r--drivers/staging/erofs/namei.c3
-rw-r--r--drivers/staging/erofs/super.c56
-rw-r--r--drivers/staging/erofs/unzip_pagevec.h6
-rw-r--r--drivers/staging/erofs/unzip_vle.c99
-rw-r--r--drivers/staging/erofs/utils.c4
-rw-r--r--drivers/staging/erofs/xattr.c50
-rw-r--r--drivers/staging/fbtft/Kconfig1
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c4
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c4
-rw-r--r--drivers/staging/fbtft/fb_watterott.c15
-rw-r--r--drivers/staging/fbtft/fbtft-io.c12
-rw-r--r--drivers/staging/fbtft/fbtft.h1
-rw-r--r--drivers/staging/fbtft/fbtft_device.c2
-rw-r--r--drivers/staging/fbtft/flexfb.c7
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev31
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev62
-rw-r--r--drivers/staging/fieldbus/Documentation/fieldbus_dev.txt66
-rw-r--r--drivers/staging/fieldbus/Kconfig18
-rw-r--r--drivers/staging/fieldbus/Makefile7
-rw-r--r--drivers/staging/fieldbus/TODO5
-rw-r--r--drivers/staging/fieldbus/anybuss/Kconfig39
-rw-r--r--drivers/staging/fieldbus/anybuss/Makefile10
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h102
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-controller.h47
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c399
-rw-r--r--drivers/staging/fieldbus/anybuss/hms-profinet.c228
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c1458
-rw-r--r--drivers/staging/fieldbus/dev_core.c351
-rw-r--r--drivers/staging/fieldbus/fieldbus_dev.h108
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig1
-rw-r--r--drivers/staging/fsl-dpaa2/Makefile1
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c4
-rw-r--r--drivers/staging/fwserial/Kconfig1
-rw-r--r--drivers/staging/fwserial/Makefile1
-rw-r--r--drivers/staging/fwserial/fwserial.c5
-rw-r--r--drivers/staging/gasket/Kconfig1
-rw-r--r--drivers/staging/gasket/Makefile1
-rw-r--r--drivers/staging/gasket/apex_driver.c8
-rw-r--r--drivers/staging/gasket/gasket_interrupt.c6
-rw-r--r--drivers/staging/gasket/gasket_page_table.c9
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c9
-rw-r--r--drivers/staging/gasket/gasket_sysfs.h4
-rw-r--r--drivers/staging/gdm724x/Kconfig1
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c1
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c7
-rw-r--r--drivers/staging/gdm724x/hci_packet.h2
-rw-r--r--drivers/staging/goldfish/Kconfig1
-rw-r--r--drivers/staging/goldfish/Makefile1
-rw-r--r--drivers/staging/greybus/Kconfig1
-rw-r--r--drivers/staging/greybus/audio_codec.h122
-rw-r--r--drivers/staging/greybus/audio_manager.c3
-rw-r--r--drivers/staging/greybus/bundle.c2
-rw-r--r--drivers/staging/greybus/hid.c1
-rw-r--r--drivers/staging/greybus/power_supply.c4
-rw-r--r--drivers/staging/greybus/sdio.c8
-rw-r--r--drivers/staging/gs_fpgaboot/Kconfig2
-rw-r--r--drivers/staging/gs_fpgaboot/Makefile1
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad593330
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/accel/Kconfig1
-rw-r--r--drivers/staging/iio/accel/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16203.c3
-rw-r--r--drivers/staging/iio/accel/adis16240.c3
-rw-r--r--drivers/staging/iio/adc/Kconfig14
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c244
-rw-r--r--drivers/staging/iio/adc/ad7192.h12
-rw-r--r--drivers/staging/iio/adc/ad7280a.c112
-rw-r--r--drivers/staging/iio/adc/ad7280a.h3
-rw-r--r--drivers/staging/iio/adc/ad7816.c5
-rw-r--r--drivers/staging/iio/addac/Kconfig1
-rw-r--r--drivers/staging/iio/addac/Makefile1
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c3
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c3
-rw-r--r--drivers/staging/iio/addac/adt7316.c7
-rw-r--r--drivers/staging/iio/addac/adt7316.h3
-rw-r--r--drivers/staging/iio/cdc/Kconfig1
-rw-r--r--drivers/staging/iio/cdc/Makefile1
-rw-r--r--drivers/staging/iio/cdc/ad7150.c3
-rw-r--r--drivers/staging/iio/cdc/ad7746.c3
-rw-r--r--drivers/staging/iio/cdc/ad7746.h3
-rw-r--r--drivers/staging/iio/frequency/Kconfig1
-rw-r--r--drivers/staging/iio/frequency/Makefile1
-rw-r--r--drivers/staging/iio/frequency/ad9832.c54
-rw-r--r--drivers/staging/iio/frequency/ad9832.h4
-rw-r--r--drivers/staging/iio/frequency/ad9834.c5
-rw-r--r--drivers/staging/iio/frequency/ad9834.h3
-rw-r--r--drivers/staging/iio/frequency/dds.h3
-rw-r--r--drivers/staging/iio/impedance-analyzer/Kconfig3
-rw-r--r--drivers/staging/iio/impedance-analyzer/Makefile1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c58
-rw-r--r--drivers/staging/iio/meter/Kconfig1
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c3
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c3
-rw-r--r--drivers/staging/iio/meter/ade7854.c5
-rw-r--r--drivers/staging/iio/resolver/Kconfig1
-rw-r--r--drivers/staging/iio/resolver/Makefile1
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c6
-rw-r--r--drivers/staging/kpc2000/Kconfig57
-rw-r--r--drivers/staging/kpc2000/Makefile6
-rw-r--r--drivers/staging/kpc2000/TODO8
-rw-r--r--drivers/staging/kpc2000/kpc.h23
-rw-r--r--drivers/staging/kpc2000/kpc2000/Makefile4
-rw-r--r--drivers/staging/kpc2000/kpc2000/cell_probe.c471
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c437
-rw-r--r--drivers/staging/kpc2000/kpc2000/dma_common_defs.h43
-rw-r--r--drivers/staging/kpc2000/kpc2000/fileops.c131
-rw-r--r--drivers/staging/kpc2000/kpc2000/kp2000_module.c54
-rw-r--r--drivers/staging/kpc2000/kpc2000/pcie.h112
-rw-r--r--drivers/staging/kpc2000/kpc2000/uapi.h22
-rw-r--r--drivers/staging/kpc2000/kpc_dma/Makefile6
-rw-r--r--drivers/staging/kpc2000/kpc_dma/dma.c264
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c420
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c248
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h220
-rw-r--r--drivers/staging/kpc2000/kpc_dma/uapi.h11
-rw-r--r--drivers/staging/kpc2000/kpc_i2c/Makefile4
-rw-r--r--drivers/staging/kpc2000/kpc_i2c/fileops.c181
-rw-r--r--drivers/staging/kpc2000/kpc_i2c/i2c_driver.c699
-rw-r--r--drivers/staging/kpc2000/kpc_spi/Makefile4
-rw-r--r--drivers/staging/kpc2000/kpc_spi/spi_driver.c507
-rw-r--r--drivers/staging/kpc2000/kpc_spi/spi_parts.h48
-rw-r--r--drivers/staging/ks7010/Kconfig1
-rw-r--r--drivers/staging/ks7010/Makefile1
-rw-r--r--drivers/staging/ks7010/ks_hostif.c17
-rw-r--r--drivers/staging/media/Kconfig5
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/bcm2048/Kconfig3
-rw-r--r--drivers/staging/media/bcm2048/Makefile1
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/Kconfig3
-rw-r--r--drivers/staging/media/davinci_vpfe/Makefile1
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c24
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c41
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c20
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c6
-rw-r--r--drivers/staging/media/imx/Kconfig5
-rw-r--r--drivers/staging/media/imx/imx-ic-common.c10
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c6
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c6
-rw-r--r--drivers/staging/media/imx/imx-ic.h6
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c11
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c24
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c4
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c34
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c6
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c38
-rw-r--r--drivers/staging/media/imx/imx-media-of.c79
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c6
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c18
-rw-r--r--drivers/staging/media/imx/imx-media.h13
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c10
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c6
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c26
-rw-r--r--drivers/staging/media/ipu3/Kconfig3
-rw-r--r--drivers/staging/media/ipu3/Makefile1
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c40
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c126
-rw-r--r--drivers/staging/media/ipu3/ipu3.c33
-rw-r--r--drivers/staging/media/mt9t031/Kconfig5
-rw-r--r--drivers/staging/media/mt9t031/Makefile1
-rw-r--r--drivers/staging/media/mt9t031/TODO5
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/staging/media/rockchip/vpu/Kconfig2
-rw-r--r--drivers/staging/media/rockchip/vpu/Makefile1
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c3
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c12
-rw-r--r--drivers/staging/media/soc_camera/Kconfig1
-rw-r--r--drivers/staging/media/soc_camera/TODO4
-rw-r--r--drivers/staging/media/soc_camera/imx074.c6
-rw-r--r--drivers/staging/media/soc_camera/mt9t031.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_camera.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_mediabus.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_mt9v022.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_ov5642.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_ov9740.c6
-rw-r--r--drivers/staging/media/sunxi/Kconfig1
-rw-r--r--drivers/staging/media/sunxi/Makefile1
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig1
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c1
-rw-r--r--drivers/staging/media/tegra-vde/Kconfig1
-rw-r--r--drivers/staging/media/tegra-vde/Makefile1
-rw-r--r--drivers/staging/media/tegra-vde/tegra-vde.c5
-rw-r--r--drivers/staging/media/tegra-vde/uapi.h11
-rw-r--r--drivers/staging/media/zoran/Kconfig75
-rw-r--r--drivers/staging/media/zoran/Makefile7
-rw-r--r--drivers/staging/media/zoran/TODO4
-rw-r--r--drivers/staging/media/zoran/videocodec.c391
-rw-r--r--drivers/staging/media/zoran/videocodec.h349
-rw-r--r--drivers/staging/media/zoran/zoran.h402
-rw-r--r--drivers/staging/media/zoran/zoran_card.c1524
-rw-r--r--drivers/staging/media/zoran/zoran_card.h50
-rw-r--r--drivers/staging/media/zoran/zoran_device.c1619
-rw-r--r--drivers/staging/media/zoran/zoran_device.h91
-rw-r--r--drivers/staging/media/zoran/zoran_driver.c2850
-rw-r--r--drivers/staging/media/zoran/zoran_procfs.c221
-rw-r--r--drivers/staging/media/zoran/zoran_procfs.h32
-rw-r--r--drivers/staging/media/zoran/zr36016.c516
-rw-r--r--drivers/staging/media/zoran/zr36016.h107
-rw-r--r--drivers/staging/media/zoran/zr36050.c896
-rw-r--r--drivers/staging/media/zoran/zr36050.h179
-rw-r--r--drivers/staging/media/zoran/zr36057.h164
-rw-r--r--drivers/staging/media/zoran/zr36060.c1006
-rw-r--r--drivers/staging/media/zoran/zr36060.h216
-rw-r--r--drivers/staging/most/Documentation/ABI/configfs-most.txt204
-rw-r--r--drivers/staging/most/Documentation/driver_usage.txt131
-rw-r--r--drivers/staging/most/Kconfig3
-rw-r--r--drivers/staging/most/Makefile1
-rw-r--r--drivers/staging/most/cdev/Kconfig1
-rw-r--r--drivers/staging/most/cdev/Makefile1
-rw-r--r--drivers/staging/most/cdev/cdev.c10
-rw-r--r--drivers/staging/most/configfs.c676
-rw-r--r--drivers/staging/most/core.c305
-rw-r--r--drivers/staging/most/core.h20
-rw-r--r--drivers/staging/most/dim2/Kconfig1
-rw-r--r--drivers/staging/most/dim2/Makefile1
-rw-r--r--drivers/staging/most/dim2/errors.h2
-rw-r--r--drivers/staging/most/dim2/hal.h2
-rw-r--r--drivers/staging/most/dim2/reg.h2
-rw-r--r--drivers/staging/most/dim2/sysfs.h2
-rw-r--r--drivers/staging/most/i2c/Kconfig3
-rw-r--r--drivers/staging/most/i2c/Makefile1
-rw-r--r--drivers/staging/most/net/Kconfig3
-rw-r--r--drivers/staging/most/net/Makefile1
-rw-r--r--drivers/staging/most/net/net.c3
-rw-r--r--drivers/staging/most/sound/Kconfig3
-rw-r--r--drivers/staging/most/sound/Makefile1
-rw-r--r--drivers/staging/most/sound/sound.c61
-rw-r--r--drivers/staging/most/usb/Kconfig3
-rw-r--r--drivers/staging/most/usb/Makefile1
-rw-r--r--drivers/staging/most/usb/usb.c2
-rw-r--r--drivers/staging/most/video/Kconfig3
-rw-r--r--drivers/staging/most/video/Makefile1
-rw-r--r--drivers/staging/most/video/video.c3
-rw-r--r--drivers/staging/mt7621-dma/Kconfig1
-rw-r--r--drivers/staging/mt7621-dma/Makefile1
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c7
-rw-r--r--drivers/staging/mt7621-dts/Kconfig1
-rw-r--r--drivers/staging/mt7621-dts/Makefile1
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi23
-rw-r--r--drivers/staging/mt7621-mmc/Kconfig16
-rw-r--r--drivers/staging/mt7621-mmc/Makefile42
-rw-r--r--drivers/staging/mt7621-mmc/TODO8
-rw-r--r--drivers/staging/mt7621-mmc/board.h63
-rw-r--r--drivers/staging/mt7621-mmc/dbg.c304
-rw-r--r--drivers/staging/mt7621-mmc/dbg.h101
-rw-r--r--drivers/staging/mt7621-mmc/mt6575_sd.h488
-rw-r--r--drivers/staging/mt7621-mmc/sd.c1855
-rw-r--r--drivers/staging/mt7621-pci-phy/Kconfig1
-rw-r--r--drivers/staging/mt7621-pci-phy/Makefile1
-rw-r--r--drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt44
-rw-r--r--drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c284
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/mt7621-pci/Makefile1
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c2
-rw-r--r--drivers/staging/mt7621-pinctrl/Kconfig1
-rw-r--r--drivers/staging/mt7621-pinctrl/Makefile1
-rw-r--r--drivers/staging/mt7621-spi/Kconfig6
-rw-r--r--drivers/staging/mt7621-spi/Makefile1
-rw-r--r--drivers/staging/mt7621-spi/TODO5
-rw-r--r--drivers/staging/netlogic/Kconfig1
-rw-r--r--drivers/staging/netlogic/Makefile1
-rw-r--r--drivers/staging/netlogic/xlr_net.c11
-rw-r--r--drivers/staging/nvec/Kconfig1
-rw-r--r--drivers/staging/octeon-usb/Kconfig1
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c4
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h8
-rw-r--r--drivers/staging/octeon/Kconfig1
-rw-r--r--drivers/staging/octeon/TODO9
-rw-r--r--drivers/staging/octeon/ethernet-tx.c20
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/olpc_dcon/Kconfig2
-rw-r--r--drivers/staging/olpc_dcon/Makefile1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h5
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c7
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c56
-rw-r--r--drivers/staging/pi433/Kconfig1
-rw-r--r--drivers/staging/pi433/Makefile1
-rw-r--r--drivers/staging/pi433/pi433_if.c40
-rw-r--r--drivers/staging/pi433/rf69.c49
-rw-r--r--drivers/staging/ralink-gdma/Kconfig1
-rw-r--r--drivers/staging/ralink-gdma/Makefile1
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c105
-rw-r--r--drivers/staging/rtl8188eu/Kconfig1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c28
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c70
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_hwconfig.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c10
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h4
-rw-r--r--drivers/staging/rtl8188eu/include/phydm_reg.h (renamed from drivers/staging/rtlwifi/phydm/phydm_features.h)12
-rw-r--r--drivers/staging/rtl8188eu/include/phydm_regdefine11n.h53
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h130
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c1
-rw-r--r--drivers/staging/rtl8192e/Kconfig10
-rw-r--r--drivers/staging/rtl8192e/dot11d.c9
-rw-r--r--drivers/staging/rtl8192e/license339
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c44
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h22
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c37
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h21
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c21
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h17
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BA.h16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c16
-rw-r--r--drivers/staging/rtl8192e/rtllib.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c7
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h22
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c21
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c20
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c22
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c19
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h40
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c10
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c11
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c76
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c41
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c40
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c114
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c26
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c16
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c38
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h2
-rw-r--r--drivers/staging/rtl8712/Kconfig11
-rw-r--r--drivers/staging/rtl8712/drv_types.h6
-rw-r--r--drivers/staging/rtl8712/hal_init.c3
-rw-r--r--drivers/staging/rtl8712/ieee80211.c3
-rw-r--r--drivers/staging/rtl8712/ieee80211.h3
-rw-r--r--drivers/staging/rtl8712/os_intfs.c8
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c6
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c6
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_io.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c13
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c28
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c12
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.c23
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c18
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c14
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c12
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c26
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c51
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c11
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c33
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c79
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c59
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c18
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c11
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c32
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c20
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h20
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c13
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c9
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c22
-rw-r--r--drivers/staging/rtl8723bs/include/cmd_osdep.h6
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h10
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types_sdio.h4
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h15
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h31
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_pwrctrl.h30
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h86
-rw-r--r--drivers/staging/rtl8723bs/include/wlan_bssdef.h56
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c57
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c22
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c29
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c30
-rw-r--r--drivers/staging/rtlwifi/Kconfig12
-rw-r--r--drivers/staging/rtlwifi/Makefile70
-rw-r--r--drivers/staging/rtlwifi/TODO11
-rw-r--r--drivers/staging/rtlwifi/base.c2815
-rw-r--r--drivers/staging/rtlwifi/base.h175
-rw-r--r--drivers/staging/rtlwifi/btcoexist/Makefile8
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbt_precomp.h74
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c5233
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h433
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c5210
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h487
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c54
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h24
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c1837
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h791
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.c517
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.h64
-rw-r--r--drivers/staging/rtlwifi/cam.c315
-rw-r--r--drivers/staging/rtlwifi/cam.h39
-rw-r--r--drivers/staging/rtlwifi/core.c1996
-rw-r--r--drivers/staging/rtlwifi/core.h71
-rw-r--r--drivers/staging/rtlwifi/debug.c624
-rw-r--r--drivers/staging/rtlwifi/debug.h223
-rw-r--r--drivers/staging/rtlwifi/efuse.c1329
-rw-r--r--drivers/staging/rtlwifi/efuse.h109
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_2_platform.h41
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h121
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c95
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c552
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h29
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c332
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h33
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c312
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h42
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c173
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h31
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c174
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h34
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c403
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h27
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h160
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c5970
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h385
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c318
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h60
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c963
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h73
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c543
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h62
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c4465
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h310
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.c415
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.h70
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit2.h13396
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h12092
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_info.h111
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h173
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h504
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h104
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h43
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h392
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h1000
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h105
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg2.h1121
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h717
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h37
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h37
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h107
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h122
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h51
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h107
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h112
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h433
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h495
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_type.h1923
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_usb_reg.h17
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c1373
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.h83
-rw-r--r--drivers/staging/rtlwifi/pci.c2496
-rw-r--r--drivers/staging/rtlwifi/pci.h319
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.c954
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.h74
-rw-r--r--drivers/staging/rtlwifi/phydm/mp_precomp.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c1975
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.h935
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.c189
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.h46
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.c930
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.h108
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c616
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h85
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.c72
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.h290
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_beamforming.h37
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.c447
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.h72
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.c332
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.h49
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.c2888
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.h164
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dfs.h48
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.c1521
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.h230
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h26
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c118
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h39
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c91
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h53
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c128
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h33
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.c1848
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.h487
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.c307
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.h183
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_iqk.h65
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.c217
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.h31
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c319
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h35
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c633
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h282
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_pre_define.h602
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_precomp.h74
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.c406
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.h56
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.c1196
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.h258
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_reg.h140
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h83
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h202
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_types.h119
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c1956
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h43
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c211
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h27
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c4730
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h118
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c340
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h34
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c1804
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h73
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c1399
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h37
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c157
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h43
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c214
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h19
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h23
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c865
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.h34
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h56
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h28
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h27
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h25
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h30
-rw-r--r--drivers/staging/rtlwifi/ps.c996
-rw-r--r--drivers/staging/rtlwifi/ps.h39
-rw-r--r--drivers/staging/rtlwifi/pwrseqcmd.h83
-rw-r--r--drivers/staging/rtlwifi/rc.c309
-rw-r--r--drivers/staging/rtlwifi/rc.h38
-rw-r--r--drivers/staging/rtlwifi/regd.c458
-rw-r--r--drivers/staging/rtlwifi/regd.h52
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/Makefile7
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/def.h71
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c964
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.h187
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2430
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.h55
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.c116
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.h23
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.c2223
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.h134
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/reg.h1642
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.c470
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.h21
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.c1004
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.h154
-rw-r--r--drivers/staging/rtlwifi/stats.c249
-rw-r--r--drivers/staging/rtlwifi/stats.h31
-rw-r--r--drivers/staging/rtlwifi/wifi.h3362
-rw-r--r--drivers/staging/rts5208/Kconfig1
-rw-r--r--drivers/staging/rts5208/Makefile1
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h2
-rw-r--r--drivers/staging/sm750fb/Kconfig1
-rw-r--r--drivers/staging/sm750fb/Makefile1
-rw-r--r--drivers/staging/sm750fb/ddk750.h1
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c27
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c10
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c40
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c114
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.h1
-rw-r--r--drivers/staging/sm750fb/sm750.c7
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c32
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/speakup/Kconfig1
-rw-r--r--drivers/staging/speakup/kobjects.c14
-rw-r--r--drivers/staging/speakup/main.c1
-rw-r--r--drivers/staging/speakup/selection.c212
-rw-r--r--drivers/staging/speakup/speakup.h1
-rw-r--r--drivers/staging/speakup/speakup_decpc.c2
-rw-r--r--drivers/staging/speakup/speakup_keypc.c6
-rw-r--r--drivers/staging/speakup/spk_ttyio.c2
-rw-r--r--drivers/staging/unisys/Kconfig1
-rw-r--r--drivers/staging/unisys/Makefile1
-rw-r--r--drivers/staging/unisys/include/iochannel.h2
-rw-r--r--drivers/staging/unisys/visorhba/Kconfig13
-rw-r--r--drivers/staging/unisys/visorhba/Makefile1
-rw-r--r--drivers/staging/unisys/visorinput/Kconfig15
-rw-r--r--drivers/staging/unisys/visorinput/Makefile1
-rw-r--r--drivers/staging/unisys/visornic/Kconfig15
-rw-r--r--drivers/staging/unisys/visornic/Makefile1
-rw-r--r--drivers/staging/vboxvideo/TODO10
-rw-r--r--drivers/staging/vc04_services/Kconfig1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Makefile1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c12
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Kconfig1
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c55
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c75
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h38
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_cfg.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h176
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c37
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c56
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h32
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c50
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c32
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h34
-rw-r--r--drivers/staging/vme/Makefile1
-rw-r--r--drivers/staging/vme/devices/Kconfig1
-rw-r--r--drivers/staging/vme/devices/Makefile1
-rw-r--r--drivers/staging/vt6655/Kconfig1
-rw-r--r--drivers/staging/vt6655/card.h6
-rw-r--r--drivers/staging/vt6655/channel.h2
-rw-r--r--drivers/staging/vt6655/desc.h2
-rw-r--r--drivers/staging/vt6655/device.h2
-rw-r--r--drivers/staging/vt6655/device_cfg.h2
-rw-r--r--drivers/staging/vt6655/dpc.h2
-rw-r--r--drivers/staging/vt6655/key.h2
-rw-r--r--drivers/staging/vt6655/mac.c25
-rw-r--r--drivers/staging/vt6655/mac.h9
-rw-r--r--drivers/staging/vt6655/power.c7
-rw-r--r--drivers/staging/vt6655/power.h2
-rw-r--r--drivers/staging/vt6655/rf.c35
-rw-r--r--drivers/staging/vt6655/rf.h2
-rw-r--r--drivers/staging/vt6655/rxtx.c18
-rw-r--r--drivers/staging/vt6655/rxtx.h2
-rw-r--r--drivers/staging/vt6655/srom.h2
-rw-r--r--drivers/staging/vt6655/tmacro.h2
-rw-r--r--drivers/staging/vt6655/upc.h4
-rw-r--r--drivers/staging/vt6656/Kconfig1
-rw-r--r--drivers/staging/vt6656/baseband.h2
-rw-r--r--drivers/staging/vt6656/card.h2
-rw-r--r--drivers/staging/vt6656/channel.h2
-rw-r--r--drivers/staging/vt6656/desc.h2
-rw-r--r--drivers/staging/vt6656/device.h2
-rw-r--r--drivers/staging/vt6656/dpc.h2
-rw-r--r--drivers/staging/vt6656/firmware.h2
-rw-r--r--drivers/staging/vt6656/int.h2
-rw-r--r--drivers/staging/vt6656/key.h2
-rw-r--r--drivers/staging/vt6656/power.h2
-rw-r--r--drivers/staging/vt6656/rf.h2
-rw-r--r--drivers/staging/vt6656/rxtx.h2
-rw-r--r--drivers/staging/vt6656/usbpipe.h2
-rw-r--r--drivers/staging/vt6656/wcmd.h2
-rw-r--r--drivers/staging/wilc1000/Kconfig2
-rw-r--r--drivers/staging/wilc1000/host_interface.c71
-rw-r--r--drivers/staging/wilc1000/host_interface.h14
-rw-r--r--drivers/staging/wilc1000/wilc_netdev.c2
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c13
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c72
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c8
-rw-r--r--drivers/staging/wlan-ng/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c118
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c9
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c5
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c163
-rw-r--r--drivers/target/target_core_device.c4
-rw-r--r--drivers/target/target_core_pr.c33
-rw-r--r--drivers/target/target_core_pr.h1
-rw-r--r--drivers/target/target_core_tmr.c2
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/target/target_core_xcopy.c92
-rw-r--r--drivers/tee/tee_shm.c2
-rw-r--r--drivers/thermal/Kconfig11
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c8
-rw-r--r--drivers/thermal/cpu_cooling.c30
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c2
-rw-r--r--drivers/thermal/of-thermal.c3
-rw-r--r--drivers/thermal/qcom/Makefile4
-rw-r--r--drivers/thermal/qcom/tsens-8916.c105
-rw-r--r--drivers/thermal/qcom/tsens-8960.c84
-rw-r--r--drivers/thermal/qcom/tsens-common.c159
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c (renamed from drivers/thermal/qcom/tsens-8974.c)166
-rw-r--r--drivers/thermal/qcom/tsens-v1.c193
-rw-r--r--drivers/thermal/qcom/tsens-v2.c111
-rw-r--r--drivers/thermal/qcom/tsens.c100
-rw-r--r--drivers/thermal/qcom/tsens.h291
-rw-r--r--drivers/thermal/qoriq_thermal.c5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c51
-rw-r--r--drivers/thermal/rcar_thermal.c11
-rw-r--r--drivers/thermal/rockchip_thermal.c74
-rw-r--r--drivers/thermal/st/Kconfig22
-rw-r--r--drivers/thermal/st/stm_thermal.c6
-rw-r--r--drivers/thermal/tegra/Kconfig4
-rw-r--r--drivers/thermal/tegra/soctherm.c961
-rw-r--r--drivers/thermal/tegra/soctherm.h16
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c7
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c7
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c15
-rw-r--r--drivers/thermal/thermal-generic-adc.c9
-rw-r--r--drivers/thermal/thermal_core.c49
-rw-r--r--drivers/thermal/thermal_mmio.c129
-rw-r--r--drivers/thunderbolt/Makefile4
-rw-r--r--drivers/thunderbolt/cap.c85
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/domain.c1
-rw-r--r--drivers/thunderbolt/icm.c65
-rw-r--r--drivers/thunderbolt/lc.c179
-rw-r--r--drivers/thunderbolt/nhi.c3
-rw-r--r--drivers/thunderbolt/path.c420
-rw-r--r--drivers/thunderbolt/property.c16
-rw-r--r--drivers/thunderbolt/switch.c557
-rw-r--r--drivers/thunderbolt/tb.c608
-rw-r--r--drivers/thunderbolt/tb.h227
-rw-r--r--drivers/thunderbolt/tb_msgs.h11
-rw-r--r--drivers/thunderbolt/tb_regs.h50
-rw-r--r--drivers/thunderbolt/tunnel.c691
-rw-r--r--drivers/thunderbolt/tunnel.h78
-rw-r--r--drivers/thunderbolt/tunnel_pci.c226
-rw-r--r--drivers/thunderbolt/tunnel_pci.h31
-rw-r--r--drivers/thunderbolt/xdomain.c170
-rw-r--r--drivers/tty/Kconfig22
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/hvc/Kconfig3
-rw-r--r--drivers/tty/ipwireless/Makefile1
-rw-r--r--drivers/tty/ipwireless/main.c8
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/rocket.c16
-rw-r--r--drivers/tty/rocket.h1
-rw-r--r--drivers/tty/serdev/Kconfig1
-rw-r--r--drivers/tty/serdev/Makefile1
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c7
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c5
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c1
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c162
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig54
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/cpm_uart/Makefile1
-rw-r--r--drivers/tty/serial/jsm/Makefile1
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/serial/milbeaut_usio.c614
-rw-r--r--drivers/tty/serial/sc16is7xx.c38
-rw-r--r--drivers/tty/serial/serial_core.c30
-rw-r--r--drivers/tty/serial/serial_txx9.c1
-rw-r--r--drivers/tty/serial/sh-sci.c6
-rw-r--r--drivers/tty/serial/sifive.c1056
-rw-r--r--drivers/tty/serial/sn_console.c1
-rw-r--r--drivers/tty/serial/sprd_serial.c501
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c12
-rw-r--r--drivers/tty/sysrq.c12
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/tty/tty_jobctrl.c4
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/tty/ttynull.c109
-rw-r--r--drivers/tty/vcc.c1
-rw-r--r--drivers/tty/vt/.gitignore1
-rw-r--r--drivers/tty/vt/consolemap.c8
-rw-r--r--drivers/tty/vt/cp437.uni1
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped1
-rw-r--r--drivers/tty/vt/defkeymap.map1
-rw-r--r--drivers/tty/vt/keyboard.c35
-rw-r--r--drivers/tty/vt/selection.c46
-rw-r--r--drivers/tty/vt/vc_screen.c2
-rw-r--r--drivers/tty/vt/vt.c12
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c9
-rw-r--r--drivers/usb/class/cdc-acm.c63
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/common/common.c16
-rw-r--r--drivers/usb/core/driver.c13
-rw-r--r--drivers/usb/core/hcd.c44
-rw-r--r--drivers/usb/core/hub.c25
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/dwc2/core.c199
-rw-r--r--drivers/usb/dwc2/core.h14
-rw-r--r--drivers/usb/dwc2/core_intr.c12
-rw-r--r--drivers/usb/dwc2/gadget.c101
-rw-r--r--drivers/usb/dwc2/hcd.c326
-rw-r--r--drivers/usb/dwc2/hw.h8
-rw-r--r--drivers/usb/dwc2/params.c35
-rw-r--r--drivers/usb/dwc2/platform.c20
-rw-r--r--drivers/usb/dwc3/Kconfig16
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c18
-rw-r--r--drivers/usb/dwc3/core.h3
-rw-r--r--drivers/usb/dwc3/debug.h3
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c604
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c95
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/early/xhci-dbc.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c3
-rw-r--r--drivers/usb/gadget/function/f_ncm.c57
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c6
-rw-r--r--drivers/usb/gadget/function/u_ncm.h3
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c6
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c84
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h1
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c35
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c167
-rw-r--r--drivers/usb/gadget/udc/net2272.c3
-rw-r--r--drivers/usb/gadget/udc/net2280.c5
-rw-r--r--drivers/usb/host/fhci-sched.c10
-rw-r--r--drivers/usb/host/ohci-da8xx.c42
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c2
-rw-r--r--drivers/usb/host/xhci-hub.c44
-rw-r--r--drivers/usb/host/xhci-mtk.c19
-rw-r--r--drivers/usb/host/xhci-plat.c39
-rw-r--r--drivers/usb/host/xhci-ring.c24
-rw-r--r--drivers/usb/host/xhci-tegra.c68
-rw-r--r--drivers/usb/host/xhci-trace.h30
-rw-r--r--drivers/usb/host/xhci.c40
-rw-r--r--drivers/usb/host/xhci.h46
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c4
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/usb/misc/ldusb.c2
-rw-r--r--drivers/usb/misc/usb251xb.c135
-rw-r--r--drivers/usb/misc/usb3503.c48
-rw-r--r--drivers/usb/misc/yurex.c1
-rw-r--r--drivers/usb/mtu3/Makefile11
-rw-r--r--drivers/usb/mtu3/mtu3.h57
-rw-r--r--drivers/usb/mtu3/mtu3_core.c27
-rw-r--r--drivers/usb/mtu3/mtu3_debug.h50
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c539
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c156
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h4
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c20
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c4
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h48
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c47
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c118
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h1
-rw-r--r--drivers/usb/mtu3/mtu3_trace.c23
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h279
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/jz4740.c19
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_dsps.c6
-rw-r--r--drivers/usb/musb/omap2430.c6
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/serial/ark3116.c11
-rw-r--r--drivers/usb/serial/cypress_m8.c49
-rw-r--r--drivers/usb/serial/digi_acceleport.c41
-rw-r--r--drivers/usb/serial/f81232.c198
-rw-r--r--drivers/usb/serial/generic.c76
-rw-r--r--drivers/usb/serial/io_edgeport.c37
-rw-r--r--drivers/usb/serial/iuu_phoenix.c4
-rw-r--r--drivers/usb/serial/oti6858.c5
-rw-r--r--drivers/usb/serial/pl2303.c58
-rw-r--r--drivers/usb/serial/spcp8x5.c5
-rw-r--r--drivers/usb/serial/usb-serial.c11
-rw-r--r--drivers/usb/storage/realtek_cr.c13
-rw-r--r--drivers/usb/storage/scsiglue.c26
-rw-r--r--drivers/usb/storage/sierra_ms.c4
-rw-r--r--drivers/usb/storage/uas.c35
-rw-r--r--drivers/usb/typec/altmodes/Kconfig10
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c14
-rw-r--r--drivers/usb/typec/altmodes/displayport.h8
-rw-r--r--drivers/usb/typec/altmodes/nvidia.c44
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c3
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c438
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c10
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c32
-rw-r--r--drivers/usb/typec/tcpm/wcove.c39
-rw-r--r--drivers/usb/typec/ucsi/Makefile15
-rw-r--r--drivers/usb/typec/ucsi/displayport.c315
-rw-r--r--drivers/usb/typec/ucsi/trace.c12
-rw-r--r--drivers/usb/typec/ucsi/trace.h26
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c404
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h118
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c884
-rw-r--r--drivers/usb/usbip/stub_rx.c18
-rw-r--r--drivers/usb/usbip/usbip_common.h7
-rw-r--r--drivers/usb/usbip/vhci_hcd.c9
-rw-r--r--drivers/vfio/Kconfig1
-rw-r--r--drivers/vfio/mdev/mdev_core.c36
-rw-r--r--drivers/vfio/mdev/mdev_private.h2
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c27
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c29
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c5
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c12
-rw-r--r--drivers/vfio/vfio.c59
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c5
-rw-r--r--drivers/vfio/vfio_iommu_type1.c156
-rw-r--r--drivers/vhost/scsi.c1
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/video/backlight/Kconfig35
-rw-r--r--drivers/video/backlight/lm3630a_bl.c153
-rw-r--r--drivers/video/backlight/pwm_bl.c15
-rw-r--r--drivers/video/fbdev/Kconfig309
-rw-r--r--drivers/video/fbdev/Makefile2
-rw-r--r--drivers/video/fbdev/amba-clcd-nomadik.c251
-rw-r--r--drivers/video/fbdev/amba-clcd-nomadik.h24
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c567
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.h17
-rw-r--r--drivers/video/fbdev/amba-clcd.c98
-rw-r--r--drivers/video/fbdev/atafb.c67
-rw-r--r--drivers/video/fbdev/atafb_iplan2p2.c23
-rw-r--r--drivers/video/fbdev/atafb_iplan2p4.c23
-rw-r--r--drivers/video/fbdev/atafb_iplan2p8.c23
-rw-r--r--drivers/video/fbdev/atafb_mfb.c23
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c116
-rw-r--r--drivers/video/fbdev/core/fbcmap.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c29
-rw-r--r--drivers/video/fbdev/core/modedb.c3
-rw-r--r--drivers/video/fbdev/da8xx-fb.c13
-rw-r--r--drivers/video/fbdev/efifb.c3
-rw-r--r--drivers/video/fbdev/fb-puv3.c2
-rw-r--r--drivers/video/fbdev/gbefb.c24
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/video/fbdev/imsttfb.c5
-rw-r--r--drivers/video/fbdev/macfb.c29
-rw-r--r--drivers/video/fbdev/mmp/Kconfig6
-rw-r--r--drivers/video/fbdev/mxsfb.c14
-rw-r--r--drivers/video/fbdev/nuc900fb.c2
-rw-r--r--drivers/video/fbdev/omap/Kconfig20
-rw-r--r--drivers/video/fbdev/omap2/omapfb/Kconfig18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/Kconfig40
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/Kconfig6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c6
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/fbdev/s3c2410fb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c6
-rw-r--r--drivers/video/fbdev/sm712.h12
-rw-r--r--drivers/video/fbdev/sm712fb.c243
-rw-r--r--drivers/video/fbdev/udlfb.c114
-rw-r--r--drivers/video/fbdev/uvesafb.c16
-rw-r--r--drivers/video/fbdev/vesafb.c4
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
-rw-r--r--drivers/virt/fsl_hypervisor.c31
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c31
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/virtio/virtio_ring.c30
-rw-r--r--drivers/w1/masters/ds2482.c18
-rw-r--r--drivers/w1/masters/ds2490.c6
-rw-r--r--drivers/w1/slaves/w1_ds2408.c76
-rw-r--r--drivers/w1/w1_io.c3
-rw-r--r--drivers/watchdog/Kconfig161
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/acquirewdt.c2
-rw-r--r--drivers/watchdog/advantechwdt.c2
-rw-r--r--drivers/watchdog/alim1535_wdt.c2
-rw-r--r--drivers/watchdog/alim7101_wdt.c4
-rw-r--r--drivers/watchdog/ar7_wdt.c2
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c43
-rw-r--r--drivers/watchdog/asm9260_wdt.c77
-rw-r--r--drivers/watchdog/aspeed_wdt.c25
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c2
-rw-r--r--drivers/watchdog/at91sam9_wdt.c4
-rw-r--r--drivers/watchdog/ath79_wdt.c6
-rw-r--r--drivers/watchdog/atlas7_wdt.c65
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c2
-rw-r--r--drivers/watchdog/bcm7038_wdt.c42
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c18
-rw-r--r--drivers/watchdog/bd70528_wdt.c290
-rw-r--r--drivers/watchdog/cadence_wdt.c90
-rw-r--r--drivers/watchdog/coh901327_wdt.c28
-rw-r--r--drivers/watchdog/cpu5wdt.c2
-rw-r--r--drivers/watchdog/cpwd.c2
-rw-r--r--drivers/watchdog/da9052_wdt.c13
-rw-r--r--drivers/watchdog/da9055_wdt.c12
-rw-r--r--drivers/watchdog/da9062_wdt.c20
-rw-r--r--drivers/watchdog/da9063_wdt.c21
-rw-r--r--drivers/watchdog/davinci_wdt.c45
-rw-r--r--drivers/watchdog/digicolor_wdt.c4
-rw-r--r--drivers/watchdog/dw_wdt.c4
-rw-r--r--drivers/watchdog/ebc-c384_wdt.c5
-rw-r--r--drivers/watchdog/ep93xx_wdt.c17
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c20
-rw-r--r--drivers/watchdog/ftwdt010_wdt.c6
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/geodewdt.c2
-rw-r--r--drivers/watchdog/gpio_wdt.c16
-rw-r--r--drivers/watchdog/hpwdt.c3
-rw-r--r--drivers/watchdog/i6300esb.c9
-rw-r--r--drivers/watchdog/iTCO_wdt.c13
-rw-r--r--drivers/watchdog/ib700wdt.c2
-rw-r--r--drivers/watchdog/ibmasr.c2
-rw-r--r--drivers/watchdog/imgpdc_wdt.c95
-rw-r--r--drivers/watchdog/imx2_wdt.c8
-rw-r--r--drivers/watchdog/imx_sc_wdt.c175
-rw-r--r--drivers/watchdog/indydog.c2
-rw-r--r--drivers/watchdog/intel-mid_wdt.c22
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c22
-rw-r--r--drivers/watchdog/iop_wdt.c2
-rw-r--r--drivers/watchdog/it8712f_wdt.c2
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c11
-rw-r--r--drivers/watchdog/jz4740_wdt.c17
-rw-r--r--drivers/watchdog/kempld_wdt.c28
-rw-r--r--drivers/watchdog/ks8695_wdt.c2
-rw-r--r--drivers/watchdog/lantiq_wdt.c4
-rw-r--r--drivers/watchdog/loongson1_wdt.c52
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c47
-rw-r--r--drivers/watchdog/m54xx_wdt.c2
-rw-r--r--drivers/watchdog/machzwd.c5
-rw-r--r--drivers/watchdog/max63xx_wdt.c24
-rw-r--r--drivers/watchdog/max77620_wdt.c23
-rw-r--r--drivers/watchdog/mena21_wdt.c28
-rw-r--r--drivers/watchdog/menf21bmc_wdt.c33
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c49
-rw-r--r--drivers/watchdog/meson_wdt.c19
-rw-r--r--drivers/watchdog/mixcomwd.c2
-rw-r--r--drivers/watchdog/mlx_wdt.c14
-rw-r--r--drivers/watchdog/moxart_wdt.c20
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c20
-rw-r--r--drivers/watchdog/mt7621_wdt.c12
-rw-r--r--drivers/watchdog/mtk_wdt.c37
-rw-r--r--drivers/watchdog/mtx-1_wdt.c2
-rw-r--r--drivers/watchdog/mv64x60_wdt.c2
-rw-r--r--drivers/watchdog/ni903x_wdt.c4
-rw-r--r--drivers/watchdog/nic7018_wdt.c5
-rw-r--r--drivers/watchdog/npcm_wdt.c10
-rw-r--r--drivers/watchdog/nuc900_wdt.c6
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c62
-rw-r--r--drivers/watchdog/omap_wdt.c4
-rw-r--r--drivers/watchdog/orion_wdt.c14
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pcwd.c4
-rw-r--r--drivers/watchdog/pcwd_pci.c4
-rw-r--r--drivers/watchdog/pcwd_usb.c4
-rw-r--r--drivers/watchdog/pic32-dmt.c50
-rw-r--r--drivers/watchdog/pic32-wdt.c62
-rw-r--r--drivers/watchdog/pika_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c21
-rw-r--r--drivers/watchdog/pnx4008_wdt.c45
-rw-r--r--drivers/watchdog/pnx833x_wdt.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c55
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--drivers/watchdog/rdc321x_wdt.c2
-rw-r--r--drivers/watchdog/renesas_wdt.c9
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/rn5t618_wdt.c9
-rw-r--r--drivers/watchdog/rt2880_wdt.c32
-rw-r--r--drivers/watchdog/rtd119x_wdt.c47
-rw-r--r--drivers/watchdog/rza_wdt.c25
-rw-r--r--drivers/watchdog/s3c2410_wdt.c4
-rw-r--r--drivers/watchdog/sa1100_wdt.c2
-rw-r--r--drivers/watchdog/sama5d4_wdt.c39
-rw-r--r--drivers/watchdog/sb_wdog.c6
-rw-r--r--drivers/watchdog/sbc60xxwdt.c2
-rw-r--r--drivers/watchdog/sbc7240_wdt.c2
-rw-r--r--drivers/watchdog/sbc8360.c2
-rw-r--r--drivers/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/watchdog/sbsa_gwdt.c30
-rw-r--r--drivers/watchdog/sc1200wdt.c2
-rw-r--r--drivers/watchdog/sc520_wdt.c2
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/scx200_wdt.c2
-rw-r--r--drivers/watchdog/shwdt.c4
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c28
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c4
-rw-r--r--drivers/watchdog/sprd_wdt.c42
-rw-r--r--drivers/watchdog/st_lpc_wdt.c53
-rw-r--r--drivers/watchdog/stm32_iwdg.c150
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c16
-rw-r--r--drivers/watchdog/stpmic1_wdt.c13
-rw-r--r--drivers/watchdog/sunxi_wdt.c19
-rw-r--r--drivers/watchdog/tangox_wdt.c41
-rw-r--r--drivers/watchdog/tegra_wdt.c30
-rw-r--r--drivers/watchdog/tqmx86_wdt.c14
-rw-r--r--drivers/watchdog/ts4800_wdt.c33
-rw-r--r--drivers/watchdog/ts72xx_wdt.c18
-rw-r--r--drivers/watchdog/twl4030_wdt.c22
-rw-r--r--drivers/watchdog/txx9wdt.c4
-rw-r--r--drivers/watchdog/uniphier_wdt.c2
-rw-r--r--drivers/watchdog/ux500_wdt.c17
-rw-r--r--drivers/watchdog/w83877f_wdt.c2
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/watchdog/wafer5823wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.c42
-rw-r--r--drivers/watchdog/watchdog_dev.c2
-rw-r--r--drivers/watchdog/wdat_wdt.c29
-rw-r--r--drivers/watchdog/wdrtas.c4
-rw-r--r--drivers/watchdog/wdt.c4
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/watchdog/wdt_pci.c4
-rw-r--r--drivers/watchdog/wm831x_wdt.c19
-rw-r--r--drivers/watchdog/xen_wdt.c18
-rw-r--r--drivers/watchdog/ziirave_wdt.c6
-rw-r--r--drivers/watchdog/zx2967_wdt.c37
-rw-r--r--drivers/xen/biomerge.c5
-rw-r--r--drivers/xen/events/events_base.c1
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntdev.c19
-rw-r--r--drivers/xen/privcmd-buf.c11
-rw-r--r--drivers/xen/swiotlb-xen.c196
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c6
5656 files changed, 297096 insertions, 233264 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 45f9decb9848..e8231663f201 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -230,4 +230,6 @@ source "drivers/slimbus/Kconfig"
source "drivers/interconnect/Kconfig"
+source "drivers/counter/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index c61cde554340..28b030d7988d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -187,3 +187,4 @@ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
obj-$(CONFIG_GNSS) += gnss/
obj-$(CONFIG_INTERCONNECT) += interconnect/
+obj-$(CONFIG_COUNTER) += counter/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4e015c77e48e..283ee94224c6 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -475,6 +475,7 @@ config ACPI_REDUCED_HARDWARE_ONLY
If you are unsure what to do, do not enable this option.
source "drivers/acpi/nfit/Kconfig"
+source "drivers/acpi/hmat/Kconfig"
source "drivers/acpi/apei/Kconfig"
source "drivers/acpi/dptf/Kconfig"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index bb857421c2e8..5d361e4e3405 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
+obj-$(CONFIG_ACPI_HMAT) += hmat/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
obj-$(CONFIG_ACPI_BATTERY) += battery.o
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 81bfc6197293..f92033661239 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -109,7 +109,7 @@ static ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
if (!h)
return -EINVAL;
- return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature);
+ return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->signature);
}
static ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
@@ -170,7 +170,7 @@ static ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg,
if (!h)
return -EINVAL;
- return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id);
+ return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->asl_compiler_id);
}
static ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 4a434c23a196..d18246a2a65e 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -390,7 +390,7 @@ again:
return size > 0 ? size : ret;
}
-static int acpi_aml_thread(void *unsed)
+static int acpi_aml_thread(void *unused)
{
acpi_osd_exec_callback function = NULL;
void *context;
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
index 2cd9f738812b..43f1b99c86ca 100644
--- a/drivers/acpi/acpi_lpat.c
+++ b/drivers/acpi/acpi_lpat.c
@@ -22,7 +22,7 @@
* LPAT conversion table
*
* @lpat_table: the temperature_raw mapping table structure
- * @raw: the raw value, used as a key to get the temerature from the
+ * @raw: the raw value, used as a key to get the temperature from the
* above mapping table
*
* A positive converted temperature value will be returned on success,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 1e2a10a06b9d..cf768608437e 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1142,8 +1142,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
.thaw_noirq = acpi_subsys_thaw_noirq,
.poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_lpss_suspend_late,
- .poweroff_noirq = acpi_subsys_suspend_noirq,
- .restore_noirq = acpi_subsys_resume_noirq,
+ .poweroff_noirq = acpi_lpss_suspend_noirq,
+ .restore_noirq = acpi_lpss_resume_noirq,
.restore_early = acpi_lpss_resume_early,
#endif
.runtime_suspend = acpi_lpss_runtime_suspend,
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index a2dfbf6b004e..13d513b81589 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
* expected_return_btypes - Allowed type(s) for the return value
*/
struct acpi_name_info {
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
u16 argument_list;
u8 expected_btypes;
};
@@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
converted_object);
struct acpi_simple_repair_info {
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
u32 unexpected_btypes;
u32 package_index;
acpi_object_converter object_converter;
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index bb43305cb215..4027eaab18a4 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -453,7 +453,7 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags)
/* Dump a _PLD buffer if present */
- if (ACPI_COMPARE_NAME
+ if (ACPI_COMPARE_NAMESEG
((ACPI_CAST_PTR
(struct acpi_namespace_node,
acpi_gbl_db_method_info.method)->name.ascii),
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 004d34d9369b..63fe30e86807 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -354,7 +354,7 @@ acpi_status acpi_db_find_name_in_namespace(char *name_arg)
char acpi_name[5] = "____";
char *acpi_name_ptr = acpi_name;
- if (strlen(name_arg) > ACPI_NAME_SIZE) {
+ if (strlen(name_arg) > ACPI_NAMESEG_SIZE) {
acpi_os_printf("Name must be no longer than 4 characters\n");
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a4a24ffe5fae..4ebd23700bbc 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -200,7 +200,7 @@ acpi_ds_initialize_objects(u32 table_index,
/* DSDT is always the first AML table */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) {
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"\nInitializing Namespace objects:\n"));
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index c92d2f6ebe01..b04f982e59fa 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -292,7 +292,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
acpi_status status;
u32 gpe_number;
u8 temp_gpe_number;
- char name[ACPI_NAME_SIZE + 1];
+ char name[ACPI_NAMESEG_SIZE + 1];
u8 type;
ACPI_FUNCTION_TRACE(ev_match_gpe_method);
@@ -310,7 +310,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
* 1) Extract the method name and null terminate it
*/
ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
- name[ACPI_NAME_SIZE] = 0;
+ name[ACPI_NAMESEG_SIZE] = 0;
/* 2) Name must begin with an underscore */
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index bd68d66e89f0..6b76be5212a4 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -53,10 +53,10 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
/* Special case for root */
- size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+ size_needed = 1 + (ACPI_NAMESEG_SIZE * num_name_segs) + 2 + 1;
} else {
size_needed =
- prefix_count + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+ prefix_count + (ACPI_NAMESEG_SIZE * num_name_segs) + 2 + 1;
}
/*
@@ -141,7 +141,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
}
for (index = 0;
- (index < ACPI_NAME_SIZE)
+ (index < ACPI_NAMESEG_SIZE)
&& (acpi_ut_valid_name_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 75192b958544..7b855603f81a 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -683,7 +683,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Point to next name segment and make this node current */
- path += ACPI_NAME_SIZE;
+ path += ACPI_NAMESEG_SIZE;
current_node = this_node;
}
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 5470213b8e64..6eb63db72249 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -74,6 +74,10 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
ACPI_FUNCTION_NAME(ns_delete_node);
+ if (!node) {
+ return_VOID;
+ }
+
/* Detach an object if there is one */
acpi_ns_detach_object(node);
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 15070bd0c28a..1b12c172e115 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -70,7 +70,7 @@ void acpi_ns_print_pathname(u32 num_segments, const char *pathname)
acpi_os_printf("?");
}
- pathname += ACPI_NAME_SIZE;
+ pathname += ACPI_NAMESEG_SIZE;
num_segments--;
if (num_segments) {
acpi_os_printf(".");
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 19fb8dda870f..53e5d00d3a5e 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -478,7 +478,7 @@ acpi_ns_find_ini_methods(acpi_handle obj_handle,
/* We are only looking for methods named _INI */
- if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) {
+ if (!ACPI_COMPARE_NAMESEG(node->name.ascii, METHOD_NAME__INI)) {
return (AE_OK);
}
@@ -641,7 +641,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
* Note: We know there is an _INI within this subtree, but it may not be
* under this particular device, it may be lower in the branch.
*/
- if (!ACPI_COMPARE_NAME(device_node->name.ascii, "_SB_") ||
+ if (!ACPI_COMPARE_NAMESEG(device_node->name.ascii, "_SB_") ||
device_node->parent != acpi_gbl_root_node) {
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_METHOD, device_node,
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 289c15bb8c6a..370bbc867745 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -108,8 +108,8 @@ acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer)
/* Just copy the ACPI name from the Node and zero terminate it */
node_name = acpi_ut_get_node_name(node);
- ACPI_MOVE_NAME(buffer->pointer, node_name);
- ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
+ ACPI_COPY_NAMESEG(buffer->pointer, node_name);
+ ((char *)buffer->pointer)[ACPI_NAMESEG_SIZE] = 0;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%4.4s\n", (char *)buffer->pointer));
return_ACPI_STATUS(AE_OK);
@@ -198,7 +198,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing)
{
u32 length = 0, i;
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
u8 do_no_trailing;
char c, *left, *right;
struct acpi_namespace_node *next_node;
@@ -446,7 +446,7 @@ static void acpi_ns_normalize_pathname(char *original_path)
/* Do one nameseg at a time */
- for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
+ for (i = 0; (i < ACPI_NAMESEG_SIZE) && *input_path; i++) {
if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
*new_path = *input_path;
new_path++;
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 8638f43cfc3d..79d86da1c892 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
}
}
+ if (obj_desc->common.type == ACPI_TYPE_REGION) {
+ acpi_ut_remove_address_range(obj_desc->region.space_id, node);
+ }
+
/* Clear the Node entry in all cases */
node->object = NULL;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index c0b4f7bedfab..f16cf5e4742c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -203,7 +203,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
/* Found OSDT table, enable the namespace override feature */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) &&
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) &&
pass_number == ACPI_IMODE_LOAD_PASS1) {
walk_state->namespace_override = TRUE;
}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 0aacfa48e20d..be86fea8e4d4 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -316,7 +316,7 @@ static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
this_name = acpi_object_repair_info;
while (this_name->object_converter) {
- if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) {
+ if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) {
/* Check if we can actually repair this name/type combination */
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index d5804a6d1d65..8d776256b213 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
return_object_ptr);
typedef struct acpi_repair_info {
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
acpi_repair_function repair_function;
} acpi_repair_info;
@@ -188,7 +188,7 @@ static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct
this_name = acpi_ns_repairable_names;
while (this_name->repair_function) {
- if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) {
+ if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) {
return (this_name);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index e5cef1edf49f..6bc90d46db5c 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -178,7 +178,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
}
}
- info->length = (ACPI_NAME_SIZE * info->num_segments) +
+ info->length = (ACPI_NAMESEG_SIZE * info->num_segments) +
4 + info->num_carats;
info->next_external_char = next_external_char;
@@ -249,7 +249,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Build the name (minus path separators) */
for (; num_segments; num_segments--) {
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (ACPI_IS_PATH_SEPARATOR(*external_name) ||
(*external_name == 0)) {
@@ -274,7 +274,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Move on the next segment */
external_name++;
- result += ACPI_NAME_SIZE;
+ result += ACPI_NAMESEG_SIZE;
}
/* Terminate the string */
@@ -489,12 +489,12 @@ acpi_ns_externalize_name(u32 internal_name_length,
/* Copy and validate the 4-char name segment */
- ACPI_MOVE_NAME(&(*converted_name)[j],
- &internal_name[names_index]);
+ ACPI_COPY_NAMESEG(&(*converted_name)[j],
+ &internal_name[names_index]);
acpi_ut_repair_name(&(*converted_name)[j]);
- j += ACPI_NAME_SIZE;
- names_index += ACPI_NAME_SIZE;
+ j += ACPI_NAMESEG_SIZE;
+ names_index += ACPI_NAMESEG_SIZE;
}
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index de2d3135d6a9..55b4a5b3331f 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -495,8 +495,8 @@ acpi_status acpi_install_method(u8 *buffer)
/* Table must be a DSDT or SSDT */
- if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) &&
- !ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
+ if (!ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) &&
+ !ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT)) {
return (AE_BAD_HEADER);
}
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 9d9d442cd999..e62c7897fdf1 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -150,21 +150,21 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
/* Two name segments */
- end += 1 + (2 * ACPI_NAME_SIZE);
+ end += 1 + (2 * ACPI_NAMESEG_SIZE);
break;
case AML_MULTI_NAME_PREFIX:
/* Multiple name segments, 4 chars each, count in next byte */
- end += 2 + (*(end + 1) * ACPI_NAME_SIZE);
+ end += 2 + (*(end + 1) * ACPI_NAMESEG_SIZE);
break;
default:
/* Single name segment */
- end += ACPI_NAME_SIZE;
+ end += ACPI_NAMESEG_SIZE;
break;
}
@@ -522,7 +522,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
ACPI_MOVE_32_TO_32(&name, parser_state->aml);
acpi_ps_set_name(field, name);
- parser_state->aml += ACPI_NAME_SIZE;
+ parser_state->aml += ACPI_NAMESEG_SIZE;
ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 1d6f136e4068..c62be3d91712 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -603,10 +603,10 @@ acpi_walk_resources(acpi_handle device_handle,
/* Parameter validation */
if (!device_handle || !user_function || !name ||
- (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__DMA))) {
+ (!ACPI_COMPARE_NAMESEG(name, METHOD_NAME__CRS) &&
+ !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__PRS) &&
+ !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__AEI) &&
+ !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__DMA))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 0cecd0039acf..933f81316ad2 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -480,7 +480,8 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
/* If a particular signature is expected (DSDT/FACS), it must match */
- if (signature && !ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
+ if (signature &&
+ !ACPI_COMPARE_NAMESEG(&table_desc->signature, signature)) {
ACPI_BIOS_ERROR((AE_INFO,
"Invalid signature 0x%X for ACPI table, expected [%s]",
table_desc->signature.integer, signature));
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 951bd8e1c50a..b2abb40023a6 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -56,7 +56,7 @@ acpi_tb_find_table(char *signature,
/* Normalize the input strings */
memset(&header, 0, sizeof(struct acpi_table_header));
- ACPI_MOVE_NAME(header.signature, signature);
+ ACPI_COPY_NAMESEG(header.signature, signature);
strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
@@ -65,7 +65,7 @@ acpi_tb_find_table(char *signature,
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (memcmp(&(acpi_gbl_root_table_list.tables[i].signature),
- header.signature, ACPI_NAME_SIZE)) {
+ header.signature, ACPI_NAMESEG_SIZE)) {
/* Not the requested table */
@@ -94,14 +94,14 @@ acpi_tb_find_table(char *signature,
if (!memcmp
(acpi_gbl_root_table_list.tables[i].pointer->signature,
- header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
- ||
- !memcmp
- (acpi_gbl_root_table_list.
- tables[i].pointer->
- oem_id,
- header.oem_id,
- ACPI_OEM_ID_SIZE))
+ header.signature, ACPI_NAMESEG_SIZE) && (!oem_id[0]
+ ||
+ !memcmp
+ (acpi_gbl_root_table_list.
+ tables[i].
+ pointer->oem_id,
+ header.oem_id,
+ ACPI_OEM_ID_SIZE))
&& (!oem_table_id[0]
|| !memcmp(acpi_gbl_root_table_list.tables[i].pointer->
oem_table_id, header.oem_table_id,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index be6642bf6366..ef1ffd36ab3f 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -120,7 +120,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
*/
if (!reload &&
acpi_gbl_disable_ssdt_table_install &&
- ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
+ ACPI_COMPARE_NAMESEG(&new_table_desc.signature, ACPI_SIG_SSDT)) {
ACPI_INFO(("Ignoring installation of %4.4s at %8.8X%8.8X",
new_table_desc.signature.ascii,
ACPI_FORMAT_UINT64(address)));
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 9b5df95d881b..4764f849cb78 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -69,10 +69,10 @@ acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
memcpy(out_header, header, sizeof(struct acpi_table_header));
- acpi_tb_fix_string(out_header->signature, ACPI_NAME_SIZE);
+ acpi_tb_fix_string(out_header->signature, ACPI_NAMESEG_SIZE);
acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(out_header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
- acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAME_SIZE);
+ acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAMESEG_SIZE);
}
/*******************************************************************************
@@ -94,7 +94,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
- if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
+ if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -158,8 +158,8 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
* They are the odd tables, have no standard ACPI header and no checksum
*/
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) {
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_S3PT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_FACS)) {
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 2469e01310e2..c5f0b8ec70cc 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -332,9 +332,9 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
&table_index);
if (ACPI_SUCCESS(status) &&
- ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
- tables[table_index].signature,
- ACPI_SIG_FADT)) {
+ ACPI_COMPARE_NAMESEG(&acpi_gbl_root_table_list.
+ tables[table_index].signature,
+ ACPI_SIG_FADT)) {
acpi_gbl_fadt_index = table_index;
acpi_tb_parse_fadt();
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 36592888f0e7..1640685bf4ae 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -230,7 +230,7 @@ acpi_get_table_header(char *signature,
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
i++) {
- if (!ACPI_COMPARE_NAME
+ if (!ACPI_COMPARE_NAMESEG
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
continue;
@@ -323,7 +323,7 @@ acpi_get_table(char *signature,
i++) {
table_desc = &acpi_gbl_root_table_list.tables[i];
- if (!ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
+ if (!ACPI_COMPARE_NAMESEG(&table_desc->signature, signature)) {
continue;
}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 1a2592cc3245..4f30f06a6f78 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -118,7 +118,7 @@ acpi_status acpi_tb_load_namespace(void)
table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index];
if (!acpi_gbl_root_table_list.current_table_count ||
- !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_DSDT) ||
+ !ACPI_COMPARE_NAMESEG(table->signature.ascii, ACPI_SIG_DSDT) ||
ACPI_FAILURE(acpi_tb_validate_table(table))) {
status = AE_NO_ACPI_TABLES;
goto unlock_and_exit;
@@ -170,11 +170,12 @@ acpi_status acpi_tb_load_namespace(void)
table = &acpi_gbl_root_table_list.tables[i];
if (!table->address ||
- (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
- && !ACPI_COMPARE_NAME(table->signature.ascii,
- ACPI_SIG_PSDT)
- && !ACPI_COMPARE_NAME(table->signature.ascii,
- ACPI_SIG_OSDT))
+ (!ACPI_COMPARE_NAMESEG
+ (table->signature.ascii, ACPI_SIG_SSDT)
+ && !ACPI_COMPARE_NAMESEG(table->signature.ascii,
+ ACPI_SIG_PSDT)
+ && !ACPI_COMPARE_NAMESEG(table->signature.ascii,
+ ACPI_SIG_OSDT))
|| ACPI_FAILURE(acpi_tb_validate_table(table))) {
continue;
}
@@ -364,7 +365,7 @@ acpi_status acpi_unload_parent_table(acpi_handle object)
* only these types can contain AML and thus are the only types
* that can create namespace objects.
*/
- if (ACPI_COMPARE_NAME
+ if (ACPI_COMPARE_NAMESEG
(acpi_gbl_root_table_list.tables[i].signature.ascii,
ACPI_SIG_DSDT)) {
status = AE_TYPE;
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 79d7426fd7bf..f6cd7d4f698b 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -30,7 +30,7 @@ u8 acpi_ut_valid_nameseg(char *name)
/* Validate each character in the signature */
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (!acpi_ut_valid_name_char(name[i], i)) {
return (FALSE);
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index ad9f77eb554f..65beaa237669 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -78,7 +78,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"IPMI", /* 0x07 */
"GeneralPurposeIo", /* 0x08 */
"GenericSerialBus", /* 0x09 */
- "PCC" /* 0x0A */
+ "PlatformCommChannel" /* 0x0A */
};
const char *acpi_ut_get_region_name(u8 space_id)
@@ -239,7 +239,7 @@ const char *acpi_ut_get_node_name(void *object)
{
struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
- /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
+ /* Must return a string of exactly 4 characters == ACPI_NAMESEG_SIZE */
if (!object) {
return ("NULL");
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index afaadc73196b..8638efacdbf4 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -59,10 +59,10 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
/* These are the only tables that contain executable AML */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) ||
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_PSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) ||
ACPI_IS_OEM_SIG(table->signature)) {
return (TRUE);
}
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index a9f08f43c685..1b0f68f5ed8c 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -84,7 +84,7 @@ const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name)
this_name = acpi_gbl_predefined_methods;
while (this_name->info.name[0]) {
- if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+ if (ACPI_COMPARE_NAMESEG(name, this_name->info.name)) {
return (this_name);
}
@@ -201,7 +201,7 @@ const union acpi_predefined_info *acpi_ut_match_resource_name(char *name)
this_name = acpi_gbl_resource_names;
while (this_name->info.name[0]) {
- if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+ if (ACPI_COMPARE_NAMESEG(name, this_name->info.name)) {
return (this_name);
}
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 5bef0b059406..c39b5483045d 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -141,15 +141,15 @@ void acpi_ut_repair_name(char *name)
* Special case for the root node. This can happen if we get an
* error during the execution of module-level code.
*/
- if (ACPI_COMPARE_NAME(name, ACPI_ROOT_PATHNAME)) {
+ if (ACPI_COMPARE_NAMESEG(name, ACPI_ROOT_PATHNAME)) {
return;
}
- ACPI_MOVE_NAME(&original_name, name);
+ ACPI_COPY_NAMESEG(&original_name, name);
/* Check each character in the name */
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (acpi_ut_valid_name_char(name[i], i)) {
continue;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e48894e002ba..9058cb084b91 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -356,7 +356,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
- node->type == ACPI_IORT_NODE_SMMU_V3) {
+ node->type == ACPI_IORT_NODE_SMMU_V3 ||
+ node->type == ACPI_IORT_NODE_PMCG) {
*id_out = map->output_base;
return parent;
}
@@ -394,6 +395,8 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
}
return smmu->id_mapping_index;
+ case ACPI_IORT_NODE_PMCG:
+ return 0;
default:
return -EINVAL;
}
@@ -1028,6 +1031,14 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
}
+static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
+{
+ struct acpi_iort_root_complex *pci_rc;
+
+ pci_rc = (struct acpi_iort_root_complex *)node->node_data;
+ return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
+}
+
/**
* iort_iommu_configure - Set-up IOMMU configuration for a device.
*
@@ -1063,6 +1074,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
info.node = node;
err = pci_for_each_dma_alias(to_pci_dev(dev),
iort_pci_iommu_init, &info);
+
+ if (!err && iort_pci_rc_supports_ats(node))
+ dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
int i = 0;
@@ -1218,32 +1232,47 @@ static void __init arm_smmu_v3_init_resources(struct resource *res,
}
}
-static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
+static void __init arm_smmu_v3_dma_configure(struct device *dev,
+ struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
+ enum dev_dma_attr attr;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
- return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
+ attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+
+ /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ /* Configure DMA for the page table walker */
+ acpi_dma_configure(dev, attr);
}
#if defined(CONFIG_ACPI_NUMA)
/*
* set numa proximity domain for smmuv3 device
*/
-static void __init arm_smmu_v3_set_proximity(struct device *dev,
+static int __init arm_smmu_v3_set_proximity(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
- set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
+ int node = acpi_map_pxm_to_node(smmu->pxm);
+
+ if (node != NUMA_NO_NODE && !node_online(node))
+ return -EINVAL;
+
+ set_dev_node(dev, node);
pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
smmu->base_address,
smmu->pxm);
}
+ return 0;
}
#else
#define arm_smmu_v3_set_proximity NULL
@@ -1301,30 +1330,96 @@ static void __init arm_smmu_init_resources(struct resource *res,
}
}
-static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
+static void __init arm_smmu_dma_configure(struct device *dev,
+ struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
+ enum dev_dma_attr attr;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
- return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
+ attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+
+ /* We expect the dma masks to be equivalent for SMMU set-ups */
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ /* Configure DMA for the page table walker */
+ acpi_dma_configure(dev, attr);
+}
+
+static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
+{
+ struct acpi_iort_pmcg *pmcg;
+
+ /* Retrieve PMCG specific data */
+ pmcg = (struct acpi_iort_pmcg *)node->node_data;
+
+ /*
+ * There are always 2 memory resources.
+ * If the overflow_gsiv is present then add that for a total of 3.
+ */
+ return pmcg->overflow_gsiv ? 3 : 2;
+}
+
+static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_pmcg *pmcg;
+
+ /* Retrieve PMCG specific data */
+ pmcg = (struct acpi_iort_pmcg *)node->node_data;
+
+ res[0].start = pmcg->page0_base_address;
+ res[0].end = pmcg->page0_base_address + SZ_4K - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = pmcg->page1_base_address;
+ res[1].end = pmcg->page1_base_address + SZ_4K - 1;
+ res[1].flags = IORESOURCE_MEM;
+
+ if (pmcg->overflow_gsiv)
+ acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
+ ACPI_EDGE_SENSITIVE, &res[2]);
+}
+
+static struct acpi_platform_list pmcg_plat_info[] __initdata = {
+ /* HiSilicon Hip08 Platform */
+ {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
+ { }
+};
+
+static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
+{
+ u32 model;
+ int idx;
+
+ idx = acpi_match_platform_list(pmcg_plat_info);
+ if (idx >= 0)
+ model = pmcg_plat_info[idx].data;
+ else
+ model = IORT_SMMU_V3_PMCG_GENERIC;
+
+ return platform_device_add_data(pdev, &model, sizeof(model));
}
struct iort_dev_config {
const char *name;
int (*dev_init)(struct acpi_iort_node *node);
- bool (*dev_is_coherent)(struct acpi_iort_node *node);
+ void (*dev_dma_configure)(struct device *dev,
+ struct acpi_iort_node *node);
int (*dev_count_resources)(struct acpi_iort_node *node);
void (*dev_init_resources)(struct resource *res,
struct acpi_iort_node *node);
- void (*dev_set_proximity)(struct device *dev,
+ int (*dev_set_proximity)(struct device *dev,
struct acpi_iort_node *node);
+ int (*dev_add_platdata)(struct platform_device *pdev);
};
static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
- .dev_is_coherent = arm_smmu_v3_is_coherent,
+ .dev_dma_configure = arm_smmu_v3_dma_configure,
.dev_count_resources = arm_smmu_v3_count_resources,
.dev_init_resources = arm_smmu_v3_init_resources,
.dev_set_proximity = arm_smmu_v3_set_proximity,
@@ -1332,9 +1427,16 @@ static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
.name = "arm-smmu",
- .dev_is_coherent = arm_smmu_is_coherent,
+ .dev_dma_configure = arm_smmu_dma_configure,
.dev_count_resources = arm_smmu_count_resources,
- .dev_init_resources = arm_smmu_init_resources
+ .dev_init_resources = arm_smmu_init_resources,
+};
+
+static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
+ .name = "arm-smmu-v3-pmcg",
+ .dev_count_resources = arm_smmu_v3_pmcg_count_resources,
+ .dev_init_resources = arm_smmu_v3_pmcg_init_resources,
+ .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
};
static __init const struct iort_dev_config *iort_get_dev_cfg(
@@ -1345,6 +1447,8 @@ static __init const struct iort_dev_config *iort_get_dev_cfg(
return &iort_arm_smmu_v3_cfg;
case ACPI_IORT_NODE_SMMU:
return &iort_arm_smmu_cfg;
+ case ACPI_IORT_NODE_PMCG:
+ return &iort_arm_smmu_v3_pmcg_cfg;
default:
return NULL;
}
@@ -1362,15 +1466,17 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
struct fwnode_handle *fwnode;
struct platform_device *pdev;
struct resource *r;
- enum dev_dma_attr attr;
int ret, count;
pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
- if (ops->dev_set_proximity)
- ops->dev_set_proximity(&pdev->dev, node);
+ if (ops->dev_set_proximity) {
+ ret = ops->dev_set_proximity(&pdev->dev, node);
+ if (ret)
+ goto dev_put;
+ }
count = ops->dev_count_resources(node);
@@ -1393,19 +1499,19 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
goto dev_put;
/*
- * Add a copy of IORT node pointer to platform_data to
- * be used to retrieve IORT data information.
+ * Platform devices based on PMCG nodes uses platform_data to
+ * pass the hardware model info to the driver. For others, add
+ * a copy of IORT node pointer to platform_data to be used to
+ * retrieve IORT data information.
*/
- ret = platform_device_add_data(pdev, &node, sizeof(node));
+ if (ops->dev_add_platdata)
+ ret = ops->dev_add_platdata(pdev);
+ else
+ ret = platform_device_add_data(pdev, &node, sizeof(node));
+
if (ret)
goto dev_put;
- /*
- * We expect the dma masks to be equivalent for
- * all SMMUs set-ups
- */
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-
fwnode = iort_get_fwnode(node);
if (!fwnode) {
@@ -1415,11 +1521,8 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
pdev->dev.fwnode = fwnode;
- attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ?
- DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
-
- /* Configure DMA for the page table walker */
- acpi_dma_configure(&pdev->dev, attr);
+ if (ops->dev_dma_configure)
+ ops->dev_dma_configure(&pdev->dev, node);
iort_set_device_domain(&pdev->dev, node);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index a19ff3977ac4..623998a8d722 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -456,8 +456,11 @@ static int acpi_button_resume(struct device *dev)
struct acpi_button *button = acpi_driver_data(device);
button->suspended = false;
- if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users)
+ if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) {
+ button->last_state = !!acpi_lid_evaluate_state(device);
+ button->last_time = ktime_get();
acpi_lid_initialize_state(device);
+ }
return 0;
}
#endif
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d4244e7d0e38..653642a4cbdd 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -81,9 +81,9 @@ struct cppc_pcc_data {
int refcount;
};
-/* Array to represent the PCC channel per subspace id */
+/* Array to represent the PCC channel per subspace ID */
static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
-/* The cpu_pcc_subspace_idx containsper CPU subspace id */
+/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
/*
@@ -436,7 +436,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
return -ENOMEM;
/*
- * Now that we have _PSD data from all CPUs, lets setup P-state
+ * Now that we have _PSD data from all CPUs, let's setup P-state
* domain info.
*/
for_each_possible_cpu(i) {
@@ -588,7 +588,7 @@ static int register_pcc_channel(int pcc_ss_idx)
return -ENOMEM;
}
- /* Set flag so that we dont come here for each CPU. */
+ /* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
@@ -613,7 +613,7 @@ bool __weak cpc_ffh_supported(void)
*
* Check and allocate the cppc_pcc_data memory.
* In some processor configurations it is possible that same subspace
- * is shared between multiple CPU's. This is seen especially in CPU's
+ * is shared between multiple CPUs. This is seen especially in CPUs
* with hardware multi-threading support.
*
* Return: 0 for success, errno for failure
@@ -711,7 +711,7 @@ static bool is_cppc_supported(int revision, int num_ent)
/**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects.
- * @pr: Ptr to acpi_processor containing this CPUs logical Id.
+ * @pr: Ptr to acpi_processor containing this CPU's logical ID.
*
* Return: 0 for success or negative value for err.
*/
@@ -728,7 +728,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
acpi_status status;
int ret = -EFAULT;
- /* Parse the ACPI _CPC table for this cpu. */
+ /* Parse the ACPI _CPC table for this CPU. */
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status)) {
@@ -840,7 +840,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (ret)
goto out_free;
- /* Register PCC channel once for all PCC subspace id. */
+ /* Register PCC channel once for all PCC subspace ID. */
if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
ret = register_pcc_channel(pcc_subspace_id);
if (ret)
@@ -860,7 +860,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- /* Plug PSD data into this CPUs CPC descriptor. */
+ /* Plug PSD data into this CPU's CPC descriptor. */
per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
@@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
/**
* acpi_cppc_processor_exit - Cleanup CPC structs.
- * @pr: Ptr to acpi_processor containing this CPUs logical Id.
+ * @pr: Ptr to acpi_processor containing this CPU's logical ID.
*
* Return: Void
*/
@@ -931,7 +931,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
/**
* cpc_read_ffh() - Read FFH register
- * @cpunum: cpu number to read
+ * @cpunum: CPU number to read
* @reg: cppc register information
* @val: place holder for return value
*
@@ -946,7 +946,7 @@ int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
/**
* cpc_write_ffh() - Write FFH register
- * @cpunum: cpu number to write
+ * @cpunum: CPU number to write
* @reg: cppc register information
* @val: value to write
*
@@ -1093,7 +1093,7 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
/**
- * cppc_get_perf_caps - Get a CPUs performance capabilities.
+ * cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
*
@@ -1183,7 +1183,7 @@ out_err:
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
/**
- * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
+ * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
* @cpunum: CPU from which to read counters.
* @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
*
@@ -1210,7 +1210,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
/*
- * If refernce perf register is not supported then we should
+ * If reference perf register is not supported then we should
* use the nominal perf value
*/
if (!CPC_SUPPORTED(ref_perf_reg))
@@ -1263,7 +1263,7 @@ out_err:
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
/**
- * cppc_set_perf - Set a CPUs performance controls.
+ * cppc_set_perf - Set a CPU's performance controls.
* @cpu: CPU for which to set performance controls.
* @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
*
@@ -1344,7 +1344,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* executing the Phase-II.
* 2. Some other CPU has beaten this CPU to successfully execute the
* write_trylock and has already acquired the write_lock. We know for a
- * fact it(other CPU acquiring the write_lock) couldn't have happened
+ * fact it (other CPU acquiring the write_lock) couldn't have happened
* before this CPU's Phase-I as we held the read_lock.
* 3. Some other CPU executing pcc CMD_READ has stolen the
* down_write, in which case, send_pcc_cmd will check for pending
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 824ae985ad93..b859d75eaf9f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -414,7 +414,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (adev->wakeup.flags.notifier_present) {
pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
if (adev->wakeup.context.func) {
- acpi_handle_debug(handle, "Running %pF for %s\n",
+ acpi_handle_debug(handle, "Running %pS for %s\n",
adev->wakeup.context.func,
dev_name(adev->wakeup.context.dev));
adev->wakeup.context.func(&adev->wakeup.context);
@@ -728,6 +728,9 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev,
goto out;
}
+ acpi_handle_debug(adev->handle, "GPE%2X enabled for wakeup\n",
+ (unsigned int)wakeup->gpe_number);
+
inc:
wakeup->enable_count++;
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 8940054d6250..78c2653bf020 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -428,8 +428,10 @@ static ssize_t acpi_device_adr_show(struct device *dev,
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
- return sprintf(buf, "0x%08x\n",
- (unsigned int)(acpi_dev->pnp.bus_address));
+ if (acpi_dev->pnp.bus_address > U32_MAX)
+ return sprintf(buf, "0x%016llx\n", acpi_dev->pnp.bus_address);
+ else
+ return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address);
}
static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index e1c242568341..0c081390930a 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -31,8 +31,7 @@ static ssize_t name##_show(struct device *dev,\
struct device_attribute *attr,\
char *buf)\
{\
- struct platform_device *pdev = to_platform_device(dev);\
- struct acpi_device *acpi_dev = platform_get_drvdata(pdev);\
+ struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
unsigned long long val;\
acpi_status status;\
\
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 5a127f3f2d5c..47f21599f2ab 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -131,8 +131,8 @@ int acpi_bus_generate_netlink_event(const char *device_class,
event = nla_data(attr);
memset(event, 0, sizeof(struct acpi_genl_event));
- strcpy(event->device_class, device_class);
- strcpy(event->bus_id, bus_id);
+ strscpy(event->device_class, device_class, sizeof(event->device_class));
+ strscpy(event->bus_id, bus_id, sizeof(event->bus_id));
event->type = type;
event->data = data;
diff --git a/drivers/acpi/hmat/Kconfig b/drivers/acpi/hmat/Kconfig
new file mode 100644
index 000000000000..95a29964dbea
--- /dev/null
+++ b/drivers/acpi/hmat/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+config ACPI_HMAT
+ bool "ACPI Heterogeneous Memory Attribute Table Support"
+ depends on ACPI_NUMA
+ select HMEM_REPORTING
+ help
+ If set, this option has the kernel parse and report the
+ platform's ACPI HMAT (Heterogeneous Memory Attributes Table),
+ register memory initiators with their targets, and export
+ performance attributes through the node's sysfs device if
+ provided.
diff --git a/drivers/acpi/hmat/Makefile b/drivers/acpi/hmat/Makefile
new file mode 100644
index 000000000000..e909051d3d00
--- /dev/null
+++ b/drivers/acpi/hmat/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ACPI_HMAT) := hmat.o
diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
new file mode 100644
index 000000000000..96b7d39a97c6
--- /dev/null
+++ b/drivers/acpi/hmat/hmat.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Intel Corporation.
+ *
+ * Heterogeneous Memory Attributes Table (HMAT) representation
+ *
+ * This program parses and reports the platform's HMAT tables, and registers
+ * the applicable attributes with the node's interfaces.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/node.h>
+#include <linux/sysfs.h>
+
+static __initdata u8 hmat_revision;
+
+static __initdata LIST_HEAD(targets);
+static __initdata LIST_HEAD(initiators);
+static __initdata LIST_HEAD(localities);
+
+/*
+ * The defined enum order is used to prioritize attributes to break ties when
+ * selecting the best performing node.
+ */
+enum locality_types {
+ WRITE_LATENCY,
+ READ_LATENCY,
+ WRITE_BANDWIDTH,
+ READ_BANDWIDTH,
+};
+
+static struct memory_locality *localities_types[4];
+
+struct memory_target {
+ struct list_head node;
+ unsigned int memory_pxm;
+ unsigned int processor_pxm;
+ struct node_hmem_attrs hmem_attrs;
+};
+
+struct memory_initiator {
+ struct list_head node;
+ unsigned int processor_pxm;
+};
+
+struct memory_locality {
+ struct list_head node;
+ struct acpi_hmat_locality *hmat_loc;
+};
+
+static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
+{
+ struct memory_initiator *initiator;
+
+ list_for_each_entry(initiator, &initiators, node)
+ if (initiator->processor_pxm == cpu_pxm)
+ return initiator;
+ return NULL;
+}
+
+static __init struct memory_target *find_mem_target(unsigned int mem_pxm)
+{
+ struct memory_target *target;
+
+ list_for_each_entry(target, &targets, node)
+ if (target->memory_pxm == mem_pxm)
+ return target;
+ return NULL;
+}
+
+static __init void alloc_memory_initiator(unsigned int cpu_pxm)
+{
+ struct memory_initiator *initiator;
+
+ if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
+ return;
+
+ initiator = find_mem_initiator(cpu_pxm);
+ if (initiator)
+ return;
+
+ initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
+ if (!initiator)
+ return;
+
+ initiator->processor_pxm = cpu_pxm;
+ list_add_tail(&initiator->node, &initiators);
+}
+
+static __init void alloc_memory_target(unsigned int mem_pxm)
+{
+ struct memory_target *target;
+
+ if (pxm_to_node(mem_pxm) == NUMA_NO_NODE)
+ return;
+
+ target = find_mem_target(mem_pxm);
+ if (target)
+ return;
+
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target)
+ return;
+
+ target->memory_pxm = mem_pxm;
+ target->processor_pxm = PXM_INVAL;
+ list_add_tail(&target->node, &targets);
+}
+
+static __init const char *hmat_data_type(u8 type)
+{
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ return "Access Latency";
+ case ACPI_HMAT_READ_LATENCY:
+ return "Read Latency";
+ case ACPI_HMAT_WRITE_LATENCY:
+ return "Write Latency";
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ return "Access Bandwidth";
+ case ACPI_HMAT_READ_BANDWIDTH:
+ return "Read Bandwidth";
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ return "Write Bandwidth";
+ default:
+ return "Reserved";
+ }
+}
+
+static __init const char *hmat_data_type_suffix(u8 type)
+{
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ return " nsec";
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ case ACPI_HMAT_READ_BANDWIDTH:
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ return " MB/s";
+ default:
+ return "";
+ }
+}
+
+static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
+{
+ u32 value;
+
+ /*
+ * Check for invalid and overflow values
+ */
+ if (entry == 0xffff || !entry)
+ return 0;
+ else if (base > (UINT_MAX / (entry)))
+ return 0;
+
+ /*
+ * Divide by the base unit for version 1, convert latency from
+ * picosenonds to nanoseconds if revision 2.
+ */
+ value = entry * base;
+ if (hmat_revision == 1) {
+ if (value < 10)
+ return 0;
+ value = DIV_ROUND_UP(value, 10);
+ } else if (hmat_revision == 2) {
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ value = DIV_ROUND_UP(value, 1000);
+ break;
+ default:
+ break;
+ }
+ }
+ return value;
+}
+
+static __init void hmat_update_target_access(struct memory_target *target,
+ u8 type, u32 value)
+{
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ target->hmem_attrs.read_latency = value;
+ target->hmem_attrs.write_latency = value;
+ break;
+ case ACPI_HMAT_READ_LATENCY:
+ target->hmem_attrs.read_latency = value;
+ break;
+ case ACPI_HMAT_WRITE_LATENCY:
+ target->hmem_attrs.write_latency = value;
+ break;
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ target->hmem_attrs.read_bandwidth = value;
+ target->hmem_attrs.write_bandwidth = value;
+ break;
+ case ACPI_HMAT_READ_BANDWIDTH:
+ target->hmem_attrs.read_bandwidth = value;
+ break;
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ target->hmem_attrs.write_bandwidth = value;
+ break;
+ default:
+ break;
+ }
+}
+
+static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
+{
+ struct memory_locality *loc;
+
+ loc = kzalloc(sizeof(*loc), GFP_KERNEL);
+ if (!loc) {
+ pr_notice_once("Failed to allocate HMAT locality\n");
+ return;
+ }
+
+ loc->hmat_loc = hmat_loc;
+ list_add_tail(&loc->node, &localities);
+
+ switch (hmat_loc->data_type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ localities_types[READ_LATENCY] = loc;
+ localities_types[WRITE_LATENCY] = loc;
+ break;
+ case ACPI_HMAT_READ_LATENCY:
+ localities_types[READ_LATENCY] = loc;
+ break;
+ case ACPI_HMAT_WRITE_LATENCY:
+ localities_types[WRITE_LATENCY] = loc;
+ break;
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ localities_types[READ_BANDWIDTH] = loc;
+ localities_types[WRITE_BANDWIDTH] = loc;
+ break;
+ case ACPI_HMAT_READ_BANDWIDTH:
+ localities_types[READ_BANDWIDTH] = loc;
+ break;
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ localities_types[WRITE_BANDWIDTH] = loc;
+ break;
+ default:
+ break;
+ }
+}
+
+static __init int hmat_parse_locality(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_hmat_locality *hmat_loc = (void *)header;
+ struct memory_target *target;
+ unsigned int init, targ, total_size, ipds, tpds;
+ u32 *inits, *targs, value;
+ u16 *entries;
+ u8 type, mem_hier;
+
+ if (hmat_loc->header.length < sizeof(*hmat_loc)) {
+ pr_notice("HMAT: Unexpected locality header length: %d\n",
+ hmat_loc->header.length);
+ return -EINVAL;
+ }
+
+ type = hmat_loc->data_type;
+ mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
+ ipds = hmat_loc->number_of_initiator_Pds;
+ tpds = hmat_loc->number_of_target_Pds;
+ total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
+ sizeof(*inits) * ipds + sizeof(*targs) * tpds;
+ if (hmat_loc->header.length < total_size) {
+ pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n",
+ hmat_loc->header.length, total_size);
+ return -EINVAL;
+ }
+
+ pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n",
+ hmat_loc->flags, hmat_data_type(type), ipds, tpds,
+ hmat_loc->entry_base_unit);
+
+ inits = (u32 *)(hmat_loc + 1);
+ targs = inits + ipds;
+ entries = (u16 *)(targs + tpds);
+ for (init = 0; init < ipds; init++) {
+ alloc_memory_initiator(inits[init]);
+ for (targ = 0; targ < tpds; targ++) {
+ value = hmat_normalize(entries[init * tpds + targ],
+ hmat_loc->entry_base_unit,
+ type);
+ pr_info(" Initiator-Target[%d-%d]:%d%s\n",
+ inits[init], targs[targ], value,
+ hmat_data_type_suffix(type));
+
+ if (mem_hier == ACPI_HMAT_MEMORY) {
+ target = find_mem_target(targs[targ]);
+ if (target && target->processor_pxm == inits[init])
+ hmat_update_target_access(target, type, value);
+ }
+ }
+ }
+
+ if (mem_hier == ACPI_HMAT_MEMORY)
+ hmat_add_locality(hmat_loc);
+
+ return 0;
+}
+
+static __init int hmat_parse_cache(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_hmat_cache *cache = (void *)header;
+ struct node_cache_attrs cache_attrs;
+ u32 attrs;
+
+ if (cache->header.length < sizeof(*cache)) {
+ pr_notice("HMAT: Unexpected cache header length: %d\n",
+ cache->header.length);
+ return -EINVAL;
+ }
+
+ attrs = cache->cache_attributes;
+ pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ cache->memory_PD, cache->cache_size, attrs,
+ cache->number_of_SMBIOShandles);
+
+ cache_attrs.size = cache->cache_size;
+ cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
+ cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
+
+ switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
+ case ACPI_HMAT_CA_DIRECT_MAPPED:
+ cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+ break;
+ case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
+ cache_attrs.indexing = NODE_CACHE_INDEXED;
+ break;
+ case ACPI_HMAT_CA_NONE:
+ default:
+ cache_attrs.indexing = NODE_CACHE_OTHER;
+ break;
+ }
+
+ switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
+ case ACPI_HMAT_CP_WB:
+ cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
+ break;
+ case ACPI_HMAT_CP_WT:
+ cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
+ break;
+ case ACPI_HMAT_CP_NONE:
+ default:
+ cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
+ break;
+ }
+
+ node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
+ return 0;
+}
+
+static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_hmat_proximity_domain *p = (void *)header;
+ struct memory_target *target = NULL;
+
+ if (p->header.length != sizeof(*p)) {
+ pr_notice("HMAT: Unexpected address range header length: %d\n",
+ p->header.length);
+ return -EINVAL;
+ }
+
+ if (hmat_revision == 1)
+ pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ p->reserved3, p->reserved4, p->flags, p->processor_PD,
+ p->memory_PD);
+ else
+ pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ p->flags, p->processor_PD, p->memory_PD);
+
+ if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
+ target = find_mem_target(p->memory_PD);
+ if (!target) {
+ pr_debug("HMAT: Memory Domain missing from SRAT\n");
+ return -EINVAL;
+ }
+ }
+ if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
+ int p_node = pxm_to_node(p->processor_PD);
+
+ if (p_node == NUMA_NO_NODE) {
+ pr_debug("HMAT: Invalid Processor Domain\n");
+ return -EINVAL;
+ }
+ target->processor_pxm = p_node;
+ }
+
+ return 0;
+}
+
+static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_hmat_structure *hdr = (void *)header;
+
+ if (!hdr)
+ return -EINVAL;
+
+ switch (hdr->type) {
+ case ACPI_HMAT_TYPE_PROXIMITY:
+ return hmat_parse_proximity_domain(header, end);
+ case ACPI_HMAT_TYPE_LOCALITY:
+ return hmat_parse_locality(header, end);
+ case ACPI_HMAT_TYPE_CACHE:
+ return hmat_parse_cache(header, end);
+ default:
+ return -EINVAL;
+ }
+}
+
+static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_srat_mem_affinity *ma = (void *)header;
+
+ if (!ma)
+ return -EINVAL;
+ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
+ return 0;
+ alloc_memory_target(ma->proximity_domain);
+ return 0;
+}
+
+static __init u32 hmat_initiator_perf(struct memory_target *target,
+ struct memory_initiator *initiator,
+ struct acpi_hmat_locality *hmat_loc)
+{
+ unsigned int ipds, tpds, i, idx = 0, tdx = 0;
+ u32 *inits, *targs;
+ u16 *entries;
+
+ ipds = hmat_loc->number_of_initiator_Pds;
+ tpds = hmat_loc->number_of_target_Pds;
+ inits = (u32 *)(hmat_loc + 1);
+ targs = inits + ipds;
+ entries = (u16 *)(targs + tpds);
+
+ for (i = 0; i < ipds; i++) {
+ if (inits[i] == initiator->processor_pxm) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (i == ipds)
+ return 0;
+
+ for (i = 0; i < tpds; i++) {
+ if (targs[i] == target->memory_pxm) {
+ tdx = i;
+ break;
+ }
+ }
+ if (i == tpds)
+ return 0;
+
+ return hmat_normalize(entries[idx * tpds + tdx],
+ hmat_loc->entry_base_unit,
+ hmat_loc->data_type);
+}
+
+static __init bool hmat_update_best(u8 type, u32 value, u32 *best)
+{
+ bool updated = false;
+
+ if (!value)
+ return false;
+
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ if (!*best || *best > value) {
+ *best = value;
+ updated = true;
+ }
+ break;
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ case ACPI_HMAT_READ_BANDWIDTH:
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ if (!*best || *best < value) {
+ *best = value;
+ updated = true;
+ }
+ break;
+ }
+
+ return updated;
+}
+
+static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct memory_initiator *ia;
+ struct memory_initiator *ib;
+ unsigned long *p_nodes = priv;
+
+ ia = list_entry(a, struct memory_initiator, node);
+ ib = list_entry(b, struct memory_initiator, node);
+
+ set_bit(ia->processor_pxm, p_nodes);
+ set_bit(ib->processor_pxm, p_nodes);
+
+ return ia->processor_pxm - ib->processor_pxm;
+}
+
+static __init void hmat_register_target_initiators(struct memory_target *target)
+{
+ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
+ struct memory_initiator *initiator;
+ unsigned int mem_nid, cpu_nid;
+ struct memory_locality *loc = NULL;
+ u32 best = 0;
+ int i;
+
+ mem_nid = pxm_to_node(target->memory_pxm);
+ /*
+ * If the Address Range Structure provides a local processor pxm, link
+ * only that one. Otherwise, find the best performance attributes and
+ * register all initiators that match.
+ */
+ if (target->processor_pxm != PXM_INVAL) {
+ cpu_nid = pxm_to_node(target->processor_pxm);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ return;
+ }
+
+ if (list_empty(&localities))
+ return;
+
+ /*
+ * We need the initiator list sorted so we can use bitmap_clear for
+ * previously set initiators when we find a better memory accessor.
+ * We'll also use the sorting to prime the candidate nodes with known
+ * initiators.
+ */
+ bitmap_zero(p_nodes, MAX_NUMNODES);
+ list_sort(p_nodes, &initiators, initiator_cmp);
+ for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
+ loc = localities_types[i];
+ if (!loc)
+ continue;
+
+ best = 0;
+ list_for_each_entry(initiator, &initiators, node) {
+ u32 value;
+
+ if (!test_bit(initiator->processor_pxm, p_nodes))
+ continue;
+
+ value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
+ if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
+ bitmap_clear(p_nodes, 0, initiator->processor_pxm);
+ if (value != best)
+ clear_bit(initiator->processor_pxm, p_nodes);
+ }
+ if (best)
+ hmat_update_target_access(target, loc->hmat_loc->data_type, best);
+ }
+
+ for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
+ cpu_nid = pxm_to_node(i);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ }
+}
+
+static __init void hmat_register_target_perf(struct memory_target *target)
+{
+ unsigned mem_nid = pxm_to_node(target->memory_pxm);
+ node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
+}
+
+static __init void hmat_register_targets(void)
+{
+ struct memory_target *target;
+
+ list_for_each_entry(target, &targets, node) {
+ hmat_register_target_initiators(target);
+ hmat_register_target_perf(target);
+ }
+}
+
+static __init void hmat_free_structures(void)
+{
+ struct memory_target *target, *tnext;
+ struct memory_locality *loc, *lnext;
+ struct memory_initiator *initiator, *inext;
+
+ list_for_each_entry_safe(target, tnext, &targets, node) {
+ list_del(&target->node);
+ kfree(target);
+ }
+
+ list_for_each_entry_safe(initiator, inext, &initiators, node) {
+ list_del(&initiator->node);
+ kfree(initiator);
+ }
+
+ list_for_each_entry_safe(loc, lnext, &localities, node) {
+ list_del(&loc->node);
+ kfree(loc);
+ }
+}
+
+static __init int hmat_init(void)
+{
+ struct acpi_table_header *tbl;
+ enum acpi_hmat_type i;
+ acpi_status status;
+
+ if (srat_disabled())
+ return 0;
+
+ status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ if (acpi_table_parse_entries(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ srat_parse_mem_affinity, 0) < 0)
+ goto out_put;
+ acpi_put_table(tbl);
+
+ status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
+ if (ACPI_FAILURE(status))
+ goto out_put;
+
+ hmat_revision = tbl->revision;
+ switch (hmat_revision) {
+ case 1:
+ case 2:
+ break;
+ default:
+ pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
+ goto out_put;
+ }
+
+ for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
+ if (acpi_table_parse_entries(ACPI_SIG_HMAT,
+ sizeof(struct acpi_table_hmat), i,
+ hmat_parse_subtable, 0) < 0) {
+ pr_notice("Ignoring HMAT: Invalid table");
+ goto out_put;
+ }
+ }
+ hmat_register_targets();
+out_put:
+ hmat_free_structures();
+ acpi_put_table(tbl);
+ return 0;
+}
+subsys_initcall(hmat_init);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 5a389a4f4f65..f1ed0befe303 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -567,6 +567,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
goto out;
}
+ dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
+ cmd_name, out_obj->buffer.length);
+ print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
+ out_obj->buffer.pointer,
+ min_t(u32, 128, out_obj->buffer.length), true);
+
if (call_pkg) {
call_pkg->nd_fw_size = out_obj->buffer.length;
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -585,12 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return 0;
}
- dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
- cmd_name, out_obj->buffer.length);
- print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
- out_obj->buffer.pointer,
- min_t(u32, 128, out_obj->buffer.length), true);
-
for (i = 0, offset = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
(u32 *) out_obj->buffer.pointer,
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index f70de71f79d6..cddd0fcf622c 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
if (!test_bit(cmd, &nfit_mem->dsm_mask))
return -ENOTTY;
- if (old_data)
- memcpy(nd_cmd.cmd.old_pass, old_data->data,
- sizeof(nd_cmd.cmd.old_pass));
+ memcpy(nd_cmd.cmd.old_pass, old_data->data,
+ sizeof(nd_cmd.cmd.old_pass));
memcpy(nd_cmd.cmd.new_pass, new_data->data,
sizeof(nd_cmd.cmd.new_pass));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
/* flush all cache before we erase DIMM */
nvdimm_invalidate_cache();
- if (nkey)
- memcpy(nd_cmd.cmd.passphrase, nkey->data,
- sizeof(nd_cmd.cmd.passphrase));
+ memcpy(nd_cmd.cmd.passphrase, nkey->data,
+ sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
if (rc < 0)
return rc;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 867f6e3f2b4f..30995834ad70 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -339,7 +339,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
}
static int __init
-acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
+acpi_parse_x2apic_affinity(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
@@ -348,7 +348,7 @@ acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
if (!processor_affinity)
return -EINVAL;
- acpi_table_print_srat_entry(header);
+ acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
acpi_numa_x2apic_affinity_init(processor_affinity);
@@ -357,7 +357,7 @@ acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
}
static int __init
-acpi_parse_processor_affinity(struct acpi_subtable_header *header,
+acpi_parse_processor_affinity(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_cpu_affinity *processor_affinity;
@@ -366,7 +366,7 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
if (!processor_affinity)
return -EINVAL;
- acpi_table_print_srat_entry(header);
+ acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
acpi_numa_processor_affinity_init(processor_affinity);
@@ -375,7 +375,7 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
}
static int __init
-acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
+acpi_parse_gicc_affinity(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_gicc_affinity *processor_affinity;
@@ -384,7 +384,7 @@ acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
if (!processor_affinity)
return -EINVAL;
- acpi_table_print_srat_entry(header);
+ acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
acpi_numa_gicc_affinity_init(processor_affinity);
@@ -395,7 +395,7 @@ acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
static int __initdata parsed_numa_memblks;
static int __init
-acpi_parse_memory_affinity(struct acpi_subtable_header * header,
+acpi_parse_memory_affinity(union acpi_subtable_headers * header,
const unsigned long end)
{
struct acpi_srat_mem_affinity *memory_affinity;
@@ -404,7 +404,7 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
if (!memory_affinity)
return -EINVAL;
- acpi_table_print_srat_entry(header);
+ acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
if (!acpi_numa_memory_affinity_init(memory_affinity))
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index a4e8432fc2fb..b42be067fb83 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -52,6 +52,18 @@ struct mcfg_fixup {
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+#define AL_ECAM(table_id, rev, seg, ops) \
+ { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
+
+ AL_ECAM("GRAVITON", 0, 0, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 1, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 2, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 3, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 4, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 5, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 6, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 7, &al_pcie_ops),
+
#define QCOM_ECAM32(seg) \
{ "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 707aafc7c2aa..c36781a9b493 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -145,6 +145,7 @@ static struct pci_osc_bit_struct pci_osc_support_bit[] = {
{ OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
{ OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
{ OSC_PCI_MSI_SUPPORT, "MSI" },
+ { OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" },
};
static struct pci_osc_bit_struct pci_osc_control_bit[] = {
@@ -446,6 +447,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
* PCI domains, so we indicate this in _OSC support capabilities.
*/
support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+ support |= OSC_PCI_HPX_TYPE_3_SUPPORT;
if (pci_ext_cfg_avail())
support |= OSC_PCI_EXT_CONFIG_SUPPORT;
if (pcie_aspm_support_enabled())
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 665e93ca0b40..87db3e124725 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -535,12 +535,12 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
/*
* Try to execute _DSW first.
*
- * Three agruments are needed for the _DSW object:
+ * Three arguments are needed for the _DSW object:
* Argument 0: enable/disable the wake capabilities
* Argument 1: target system state
* Argument 2: target device state
* When _DSW object is called to disable the wake capabilities, maybe
- * the first argument is filled. The values of the other two agruments
+ * the first argument is filled. The values of the other two arguments
* are meaningless.
*/
in_arg[0].type = ACPI_TYPE_INTEGER;
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 065c4fc245d1..b72e6afaa8fb 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -164,7 +164,7 @@ static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *t
}
/**
- * acpi_count_levels() - Given a PPTT table, and a cpu node, count the caches
+ * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
*
@@ -235,7 +235,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
/**
* acpi_find_processor_node() - Given a PPTT table find the requested processor
* @table_hdr: Pointer to the head of the PPTT table
- * @acpi_cpu_id: cpu we are searching for
+ * @acpi_cpu_id: CPU we are searching for
*
* Find the subtable entry describing the provided processor.
* This is done by iterating the PPTT table looking for processor nodes
@@ -456,21 +456,21 @@ static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_ta
static void acpi_pptt_warn_missing(void)
{
- pr_warn_once("No PPTT table found, cpu and cache topology may be inaccurate\n");
+ pr_warn_once("No PPTT table found, CPU and cache topology may be inaccurate\n");
}
/**
* topology_get_acpi_cpu_tag() - Find a unique topology value for a feature
* @table: Pointer to the head of the PPTT table
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
* @level: A level that terminates the search
* @flag: A flag which terminates the search
*
- * Get a unique value given a cpu, and a topology level, that can be
+ * Get a unique value given a CPU, and a topology level, that can be
* matched to determine which cpus share common topological features
* at that level.
*
- * Return: Unique value, or -ENOENT if unable to locate cpu
+ * Return: Unique value, or -ENOENT if unable to locate CPU
*/
static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
unsigned int cpu, int level, int flag)
@@ -510,7 +510,7 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
return -ENOENT;
}
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
- pr_debug("Topology Setup ACPI cpu %d, level %d ret = %d\n",
+ pr_debug("Topology Setup ACPI CPU %d, level %d ret = %d\n",
cpu, level, retval);
acpi_put_table(table);
@@ -519,9 +519,9 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
/**
* acpi_find_last_cache_level() - Determines the number of cache levels for a PE
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
*
- * Given a logical cpu number, returns the number of levels of cache represented
+ * Given a logical CPU number, returns the number of levels of cache represented
* in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
* indicating we didn't find any cache levels.
*
@@ -534,7 +534,7 @@ int acpi_find_last_cache_level(unsigned int cpu)
int number_of_levels = 0;
acpi_status status;
- pr_debug("Cache Setup find last level cpu=%d\n", cpu);
+ pr_debug("Cache Setup find last level CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
@@ -551,14 +551,14 @@ int acpi_find_last_cache_level(unsigned int cpu)
/**
* cache_setup_acpi() - Override CPU cache topology with data from the PPTT
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
*
* Updates the global cache info provided by cpu_get_cacheinfo()
* when there are valid properties in the acpi_pptt_cache nodes. A
* successful parse may not result in any updates if none of the
- * cache levels have any valid flags set. Futher, a unique value is
+ * cache levels have any valid flags set. Further, a unique value is
* associated with each known CPU cache entry. This unique value
- * can be used to determine whether caches are shared between cpus.
+ * can be used to determine whether caches are shared between CPUs.
*
* Return: -ENOENT on failure to find table, or 0 on success
*/
@@ -567,7 +567,7 @@ int cache_setup_acpi(unsigned int cpu)
struct acpi_table_header *table;
acpi_status status;
- pr_debug("Cache Setup ACPI cpu %d\n", cpu);
+ pr_debug("Cache Setup ACPI CPU %d\n", cpu);
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
if (ACPI_FAILURE(status)) {
@@ -582,8 +582,8 @@ int cache_setup_acpi(unsigned int cpu)
}
/**
- * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu
- * @cpu: Kernel logical cpu number
+ * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
+ * @cpu: Kernel logical CPU number
* @level: The topological level for which we would like a unique ID
*
* Determine a topology unique ID for each thread/core/cluster/mc_grouping
@@ -596,7 +596,7 @@ int cache_setup_acpi(unsigned int cpu)
* other levels beyond this use a generated value to uniquely identify
* a topological feature.
*
- * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a unique topological feature.
*/
int find_acpi_cpu_topology(unsigned int cpu, int level)
@@ -606,12 +606,12 @@ int find_acpi_cpu_topology(unsigned int cpu, int level)
/**
* find_acpi_cpu_cache_topology() - Determine a unique cache topology value
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
* @level: The cache level for which we would like a unique ID
*
* Determine a unique ID for each unified cache in the system
*
- * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a unique topological feature.
*/
int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
@@ -643,17 +643,17 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
/**
- * find_acpi_cpu_topology_package() - Determine a unique cpu package value
- * @cpu: Kernel logical cpu number
+ * find_acpi_cpu_topology_package() - Determine a unique CPU package value
+ * @cpu: Kernel logical CPU number
*
- * Determine a topology unique package ID for the given cpu.
+ * Determine a topology unique package ID for the given CPU.
* This ID can then be used to group peers, which will have matching ids.
*
* The search terminates when either a level is found with the PHYSICAL_PACKAGE
* flag set or we reach a root node.
*
- * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
- * Otherwise returns a value which represents the package for this cpu.
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
+ * Otherwise returns a value which represents the package for this CPU.
*/
int find_acpi_cpu_topology_package(unsigned int cpu)
{
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a303fd0e108c..c73d3a62799a 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
acpi_processor_ppc_ost(pr->handle, 0);
}
if (ret >= 0)
- cpufreq_update_policy(pr->id);
+ cpufreq_update_limits(pr->id);
}
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 77abe0ec4043..9d460a859be0 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -44,6 +44,7 @@ static const guid_t prp_guids[] = {
0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
};
+/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
@@ -1031,6 +1032,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
const struct acpi_data_node *data = to_acpi_data_node(fwnode);
struct acpi_data_node *dn;
+ /*
+ * We can have a combination of device and data nodes, e.g. with
+ * hierarchical _DSD properties. Make sure the adev pointer is
+ * restored before going through data nodes, otherwise we will
+ * be looking for data_nodes below the last device found instead
+ * of the common fwnode shared by device_nodes and data_nodes.
+ */
+ adev = to_acpi_device_node(fwnode);
if (adev)
head = &adev->data.subnodes;
else if (data)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 446c959a8f08..566270d0e91a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -763,18 +763,16 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
}
EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
-static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
- struct acpi_device_wakeup *wakeup)
+static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
{
+ acpi_handle handle = dev->handle;
+ struct acpi_device_wakeup *wakeup = &dev->wakeup;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
union acpi_object *element = NULL;
acpi_status status;
int err = -ENODATA;
- if (!wakeup)
- return -EINVAL;
-
INIT_LIST_HEAD(&wakeup->resources);
/* _PRW */
@@ -848,9 +846,9 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
static bool acpi_wakeup_gpe_init(struct acpi_device *device)
{
static const struct acpi_device_id button_device_ids[] = {
- {"PNP0C0C", 0},
- {"PNP0C0D", 0},
- {"PNP0C0E", 0},
+ {"PNP0C0C", 0}, /* Power button */
+ {"PNP0C0D", 0}, /* Lid */
+ {"PNP0C0E", 0}, /* Sleep button */
{"", 0},
};
struct acpi_device_wakeup *wakeup = &device->wakeup;
@@ -883,8 +881,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
if (!acpi_has_method(device->handle, "_PRW"))
return;
- err = acpi_bus_extract_wakeup_device_power_package(device->handle,
- &device->wakeup);
+ err = acpi_bus_extract_wakeup_device_power_package(device);
if (err) {
dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
return;
@@ -895,7 +892,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
/*
* Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object.
- * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
+ * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW.
* So it is necessary to call _DSW object first. Only when it is not
* present will the _PSW object used.
*/
@@ -2241,10 +2238,10 @@ static struct acpi_probe_entry *ape;
static int acpi_probe_count;
static DEFINE_MUTEX(acpi_probe_mutex);
-static int __init acpi_match_madt(struct acpi_subtable_header *header,
+static int __init acpi_match_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
- if (!ape->subtable_valid || ape->subtable_valid(header, ape))
+ if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape))
if (!ape->probe_subtbl(header, end))
acpi_probe_count++;
@@ -2260,7 +2257,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
mutex_lock(&acpi_probe_mutex);
for (ape = ap_head; nr; ape++, nr--) {
- if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
+ if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
acpi_table_parse_madt(ape->type, acpi_match_madt, 0);
count += acpi_probe_count;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 403c4ff15349..e52f1238d2d6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -977,6 +977,8 @@ static int acpi_s2idle_prepare(void)
if (acpi_sci_irq_valid())
enable_irq_wake(acpi_sci_irq);
+ acpi_enable_wakeup_devices(ACPI_STATE_S0);
+
/* Change the configuration of GPEs to avoid spurious wakeup. */
acpi_enable_all_wakeup_gpes();
acpi_os_wait_events_complete();
@@ -1027,6 +1029,8 @@ static void acpi_s2idle_restore(void)
{
acpi_enable_all_runtime_gpes();
+ acpi_disable_wakeup_devices(ACPI_STATE_S0);
+
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index c336784d0bcb..b34d05e365b7 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -28,7 +28,7 @@ EXPORT_SYMBOL(qdf2400_e44_present);
/*
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
- * Detect them by examining the OEM fields in the SPCR header, similiar to PCI
+ * Detect them by examining the OEM fields in the SPCR header, similar to PCI
* quirk detection in pci_mcfg.c.
*/
static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index fa76f5e41b5c..75948a3f1a20 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -327,9 +327,9 @@ static struct kobject *hotplug_kobj;
struct acpi_table_attr {
struct bin_attribute attr;
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
int instance;
- char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE];
+ char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
struct list_head node;
};
@@ -368,10 +368,10 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
char instance_str[ACPI_INST_SIZE];
sysfs_attr_init(&table_attr->attr.attr);
- ACPI_MOVE_NAME(table_attr->name, table_header->signature);
+ ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
list_for_each_entry(attr, &acpi_table_attr_list, node) {
- if (ACPI_COMPARE_NAME(table_attr->name, attr->name))
+ if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
if (table_attr->instance < attr->instance)
table_attr->instance = attr->instance;
}
@@ -382,8 +382,8 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
return -ERANGE;
}
- ACPI_MOVE_NAME(table_attr->filename, table_header->signature);
- table_attr->filename[ACPI_NAME_SIZE] = '\0';
+ ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
+ table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
if (table_attr->instance > 1 || (table_attr->instance == 1 &&
!acpi_get_table
(table_header->signature, 2, &header))) {
@@ -484,7 +484,7 @@ static int acpi_table_data_init(struct acpi_table_header *th)
int i;
for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
- if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) {
+ if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
if (!data_attr)
return -ENOMEM;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8fccbe49612a..3b5d04fd5e3e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -49,6 +49,16 @@ static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
static int acpi_apic_instance __initdata;
+enum acpi_subtable_type {
+ ACPI_SUBTABLE_COMMON,
+ ACPI_SUBTABLE_HMAT,
+};
+
+struct acpi_subtable_entry {
+ union acpi_subtable_headers *hdr;
+ enum acpi_subtable_type type;
+};
+
/*
* Disable table checksum verification for the early stage due to the size
* limitation of the current x86 early mapping implementation.
@@ -217,6 +227,50 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
}
+static unsigned long __init
+acpi_get_entry_type(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.type;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.type;
+ }
+ return 0;
+}
+
+static unsigned long __init
+acpi_get_entry_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.length;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.length;
+ }
+ return 0;
+}
+
+static unsigned long __init
+acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return sizeof(entry->hdr->common);
+ case ACPI_SUBTABLE_HMAT:
+ return sizeof(entry->hdr->hmat);
+ }
+ return 0;
+}
+
+static enum acpi_subtable_type __init
+acpi_get_subtable_type(char *id)
+{
+ if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
+ return ACPI_SUBTABLE_HMAT;
+ return ACPI_SUBTABLE_COMMON;
+}
+
/**
* acpi_parse_entries_array - for each proc_num find a suitable subtable
*
@@ -240,14 +294,13 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
-static int __init
-acpi_parse_entries_array(char *id, unsigned long table_size,
+static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
struct acpi_table_header *table_header,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
{
- struct acpi_subtable_header *entry;
- unsigned long table_end;
+ struct acpi_subtable_entry entry;
+ unsigned long table_end, subtable_len, entry_len;
int count = 0;
int errs = 0;
int i;
@@ -270,19 +323,20 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
/* Parse all entries looking for a match. */
- entry = (struct acpi_subtable_header *)
+ entry.type = acpi_get_subtable_type(id);
+ entry.hdr = (union acpi_subtable_headers *)
((unsigned long)table_header + table_size);
+ subtable_len = acpi_get_subtable_header_length(&entry);
- while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) <
- table_end) {
+ while (((unsigned long)entry.hdr) + subtable_len < table_end) {
if (max_entries && count >= max_entries)
break;
for (i = 0; i < proc_num; i++) {
- if (entry->type != proc[i].id)
+ if (acpi_get_entry_type(&entry) != proc[i].id)
continue;
if (!proc[i].handler ||
- (!errs && proc[i].handler(entry, table_end))) {
+ (!errs && proc[i].handler(entry.hdr, table_end))) {
errs++;
continue;
}
@@ -297,13 +351,14 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
* If entry->length is 0, break from this loop to avoid
* infinite loop.
*/
- if (entry->length == 0) {
+ entry_len = acpi_get_entry_length(&entry);
+ if (entry_len == 0) {
pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
return -EINVAL;
}
- entry = (struct acpi_subtable_header *)
- ((unsigned long)entry + entry->length);
+ entry.hdr = (union acpi_subtable_headers *)
+ ((unsigned long)entry.hdr + entry_len);
}
if (max_entries && count > max_entries) {
@@ -314,8 +369,7 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
return errs ? -EINVAL : count;
}
-int __init
-acpi_table_parse_entries_array(char *id,
+int __init acpi_table_parse_entries_array(char *id,
unsigned long table_size,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
@@ -346,8 +400,7 @@ acpi_table_parse_entries_array(char *id,
return count;
}
-int __init
-acpi_table_parse_entries(char *id,
+int __init acpi_table_parse_entries(char *id,
unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
@@ -362,8 +415,7 @@ acpi_table_parse_entries(char *id,
max_entries);
}
-int __init
-acpi_table_parse_madt(enum acpi_madt_type id,
+int __init acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler, unsigned int max_entries)
{
return acpi_table_parse_entries(ACPI_SIG_MADT,
@@ -670,8 +722,8 @@ static void __init acpi_table_initrd_scan(void)
table_length = table->length;
/* Skip RSDT/XSDT which should only be used for override */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_RSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_XSDT)) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
goto next_table;
}
@@ -725,8 +777,7 @@ static void *amlcode __attribute__ ((weakref("AmlCode")));
static void *dsdt_amlcode __attribute__ ((weakref("dsdt_aml_code")));
#endif
-acpi_status
-acpi_os_table_override(struct acpi_table_header *existing_table,
+acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
struct acpi_table_header **new_table)
{
if (!existing_table || !new_table)
@@ -788,7 +839,6 @@ static int __init acpi_parse_apic_instance(char *str)
return 0;
}
-
early_param("acpi_apic_instance", acpi_parse_apic_instance);
static int __init acpi_force_table_verification_setup(char *s)
@@ -797,7 +847,6 @@ static int __init acpi_force_table_verification_setup(char *s)
return 0;
}
-
early_param("acpi_force_table_verification", acpi_force_table_verification_setup);
static int __init acpi_force_32bit_fadt_addr(char *s)
@@ -807,5 +856,4 @@ static int __init acpi_force_32bit_fadt_addr(char *s)
return 0;
}
-
early_param("acpi_force_32bit_fadt_addr", acpi_force_32bit_fadt_addr);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index c4b06cc075f9..89363b245489 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -739,6 +739,7 @@ EXPORT_SYMBOL(acpi_dev_found);
struct acpi_dev_match_info {
const char *dev_name;
+ struct acpi_device *adev;
struct acpi_device_id hid[2];
const char *uid;
s64 hrv;
@@ -759,6 +760,7 @@ static int acpi_dev_match_cb(struct device *dev, void *data)
return 0;
match->dev_name = acpi_dev_name(adev);
+ match->adev = adev;
if (match->hrv == -1)
return 1;
@@ -806,18 +808,20 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
EXPORT_SYMBOL(acpi_dev_present);
/**
- * acpi_dev_get_first_match_name - Return name of first match of ACPI device
+ * acpi_dev_get_first_match_dev - Return the first match of ACPI device
* @hid: Hardware ID of the device.
* @uid: Unique ID of the device, pass NULL to not check _UID
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
- * Return device name if a matching device was present
+ * Return the first match of ACPI device if a matching device was present
* at the moment of invocation, or NULL otherwise.
*
+ * The caller is responsible to call put_device() on the returned device.
+ *
* See additional information in acpi_dev_present() as well.
*/
-const char *
-acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
struct acpi_dev_match_info match = {};
struct device *dev;
@@ -827,9 +831,9 @@ acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
- return dev ? match.dev_name : NULL;
+ return dev ? match.adev : NULL;
}
-EXPORT_SYMBOL(acpi_dev_get_first_match_name);
+EXPORT_SYMBOL(acpi_dev_get_first_match_dev);
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 43587ac680e4..31014c7d3793 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -112,7 +112,7 @@ static int video_detect_force_none(const struct dmi_system_id *d)
static const struct dmi_system_id video_detect_dmi_table[] = {
/* On Samsung X360, the BIOS will set a flag (VDRV) if generic
* ACPI backlight device is used. This flag will definitively break
- * the backlight interface (even the vendor interface) untill next
+ * the backlight interface (even the vendor interface) until next
* reboot. It's why we should prevent video.ko from being used here
* and we can't rely on a later call to acpi_video_unregister().
*/
@@ -141,6 +141,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Sony VPCEH3U1E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
+ },
+ },
/*
* These models have a working acpi_video backlight control, and using
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 4b9c7ca492e6..6f0712f0767c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3121,6 +3121,7 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node && target_node->txn_security_ctx) {
u32 secid;
+ size_t added_size;
security_task_getsecid(proc->tsk, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
@@ -3130,7 +3131,15 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
- extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
+ added_size = ALIGN(secctx_sz, sizeof(u64));
+ extra_buffers_size += added_size;
+ if (extra_buffers_size < added_size) {
+ /* integer overflow of extra_buffers_size */
+ return_error = BR_FAILED_REPLY;
+ return_error_param = EINVAL;
+ return_error_line = __LINE__;
+ goto err_bad_extra_size;
+ }
}
trace_binder_transaction(reply, t, target_node);
@@ -3480,6 +3489,7 @@ err_copy_data_failed:
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
+err_bad_extra_size:
if (secctx)
security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 195f120c4e8c..bb929eb87116 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -931,8 +931,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!down_write_trylock(&mm->mmap_sem))
- goto err_down_write_mmap_sem_failed;
+ if (!down_read_trylock(&mm->mmap_sem))
+ goto err_down_read_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
list_lru_isolate(lru, item);
@@ -945,7 +945,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
@@ -959,7 +959,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
-err_down_write_mmap_sem_failed:
+err_down_read_mmap_sem_failed:
mmput_async(mm);
err_mmget:
err_page_already_freed:
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index ce59253ec158..ea1175f7f147 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -53,11 +53,13 @@
enum ahci_qoriq_type {
AHCI_LS1021A,
+ AHCI_LS1028A,
AHCI_LS1043A,
AHCI_LS2080A,
AHCI_LS1046A,
AHCI_LS1088A,
AHCI_LS2088A,
+ AHCI_LX2160A,
};
struct ahci_qoriq_priv {
@@ -67,13 +69,17 @@ struct ahci_qoriq_priv {
bool is_dmacoherent;
};
+static bool ecc_initialized;
+
static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
+ { .compatible = "fsl,ls1028a-ahci", .data = (void *)AHCI_LS1028A},
{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
{ .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
{ .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
{ .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
+ { .compatible = "fsl,lx2160a-ahci", .data = (void *)AHCI_LX2160A},
{},
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
@@ -165,9 +171,10 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
switch (qpriv->type) {
case AHCI_LS1021A:
- if (!qpriv->ecc_addr)
+ if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
- writel(SATA_ECC_DISABLE, qpriv->ecc_addr);
+ else if (qpriv->ecc_addr && !ecc_initialized)
+ writel(SATA_ECC_DISABLE, qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(LS1021A_PORT_PHY2, reg_base + PORT_PHY2);
writel(LS1021A_PORT_PHY3, reg_base + PORT_PHY3);
@@ -180,10 +187,12 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
break;
case AHCI_LS1043A:
- if (!qpriv->ecc_addr)
+ if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
- writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
- qpriv->ecc_addr);
+ else if (qpriv->ecc_addr && !ecc_initialized)
+ writel(readl(qpriv->ecc_addr) |
+ ECC_DIS_ARMV8_CH2,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
@@ -202,10 +211,12 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
break;
case AHCI_LS1046A:
- if (!qpriv->ecc_addr)
+ if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
- writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
- qpriv->ecc_addr);
+ else if (qpriv->ecc_addr && !ecc_initialized)
+ writel(readl(qpriv->ecc_addr) |
+ ECC_DIS_ARMV8_CH2,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
@@ -214,11 +225,15 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
+ case AHCI_LS1028A:
case AHCI_LS1088A:
- if (!qpriv->ecc_addr)
+ case AHCI_LX2160A:
+ if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
- writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
- qpriv->ecc_addr);
+ else if (qpriv->ecc_addr && !ecc_initialized)
+ writel(readl(qpriv->ecc_addr) |
+ ECC_DIS_LS1088A,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
@@ -237,6 +252,7 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
break;
}
+ ecc_initialized = true;
return 0;
}
@@ -264,13 +280,18 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "sata-ecc");
- if (res) {
- qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(qoriq_priv->ecc_addr))
- return PTR_ERR(qoriq_priv->ecc_addr);
+ if (unlikely(!ecc_initialized)) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "sata-ecc");
+ if (res) {
+ qoriq_priv->ecc_addr =
+ devm_ioremap_resource(dev, res);
+ if (IS_ERR(qoriq_priv->ecc_addr))
+ return PTR_ERR(qoriq_priv->ecc_addr);
+ }
}
+
qoriq_priv->is_dmacoherent = of_dma_is_coherent(np);
rc = ahci_platform_enable_resources(hpriv);
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index cc6d06c1b2c7..db271b705529 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -44,7 +44,7 @@
#include <linux/ktime.h>
#include <linux/platform_data/dma-ep93xx.h>
-#include <mach/platform.h>
+#include <linux/soc/cirrus/ep93xx.h>
#define DRV_NAME "ep93xx-ide"
#define DRV_VERSION "1.0"
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 11e1663bdc4d..b2c06da4f62e 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1646,7 +1646,7 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
}
if (status & ISR_TBRQ_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Data tramsitted!\n");
+ fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n");
process_txdone_queue (dev, &dev->tx_relq);
}
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 82532c299bb5..5278c57dce73 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2826,8 +2826,8 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
case 0x6:
{
ia_cmds.status = 0;
- printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
- printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
+ printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
+ printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
}
break;
case 0x8:
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 059700ea3521..dc404492381d 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -3,7 +3,6 @@ menu "Generic Driver Options"
config UEVENT_HELPER
bool "Support for uevent helper"
- default y
help
The uevent helper program is forked by the kernel for
every uevent.
@@ -149,6 +148,14 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
+config HMEM_REPORTING
+ bool
+ default n
+ depends on NUMA
+ help
+ Enable reporting for heterogenous memory access attributes under
+ their non-uniform memory nodes.
+
source "drivers/base/test/Kconfig"
config SYS_HYPERVISOR
@@ -174,7 +181,6 @@ source "drivers/base/regmap/Kconfig"
config DMA_SHARED_BUFFER
bool
default n
- select ANON_INODES
select IRQ_WORK
help
This option enables the framework for buffer-sharing between
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index edfcf8d982e4..1739d7e1952a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -7,7 +7,6 @@
*/
#include <linux/acpi.h>
-#include <linux/arch_topology.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
@@ -31,7 +30,6 @@ void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
per_cpu(freq_scale, i) = scale;
}
-static DEFINE_MUTEX(cpu_scale_mutex);
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
@@ -51,37 +49,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
static void update_topology_flags_workfn(struct work_struct *work);
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
-static ssize_t cpu_capacity_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int this_cpu = cpu->dev.id;
- int i;
- unsigned long new_capacity;
- ssize_t ret;
-
- if (!count)
- return 0;
-
- ret = kstrtoul(buf, 0, &new_capacity);
- if (ret)
- return ret;
- if (new_capacity > SCHED_CAPACITY_SCALE)
- return -EINVAL;
-
- mutex_lock(&cpu_scale_mutex);
- for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
- topology_set_cpu_scale(i, new_capacity);
- mutex_unlock(&cpu_scale_mutex);
-
- schedule_work(&update_topology_flags_work);
-
- return count;
-}
-
-static DEVICE_ATTR_RW(cpu_capacity);
+static DEVICE_ATTR_RO(cpu_capacity);
static int register_cpu_capacity_sysctl(void)
{
@@ -141,7 +109,6 @@ void topology_normalize_cpu_scale(void)
return;
pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
- mutex_lock(&cpu_scale_mutex);
for_each_possible_cpu(cpu) {
pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
cpu, raw_capacity[cpu]);
@@ -151,7 +118,6 @@ void topology_normalize_cpu_scale(void)
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, topology_get_cpu_scale(NULL, cpu));
}
- mutex_unlock(&cpu_scale_mutex);
}
bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4aeaa0c92bda..fd7511e04e62 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1999,6 +1999,11 @@ static int device_private_init(struct device *dev)
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use put_device() to give up your
* reference instead.
+ *
+ * Rule of thumb is: if device_add() succeeds, you should call
+ * device_del() when you want to get rid of it. If device_add() has
+ * *not* succeeded, use *only* put_device() to drop the reference
+ * count.
*/
int device_add(struct device *dev)
{
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 668139cfa664..cc37511de866 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -548,11 +548,18 @@ ssize_t __weak cpu_show_l1tf(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -560,6 +567,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
+ &dev_attr_mds.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a823f469e53f..0df9b4461766 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -490,7 +490,7 @@ re_probe:
if (dev->bus->dma_configure) {
ret = dev->bus->dma_configure(dev);
if (ret)
- goto dma_failed;
+ goto probe_failed;
}
if (driver_sysfs_add(dev)) {
@@ -546,14 +546,13 @@ re_probe:
goto done;
probe_failed:
- arch_teardown_dma_ops(dev);
-dma_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
device_links_no_driver(dev);
devres_release_all(dev);
+ arch_teardown_dma_ops(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index eb15d976a9ea..38f2da6f5c2b 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "Firmware loader"
config FW_LOADER
diff --git a/drivers/base/firmware_loader/builtin/.gitignore b/drivers/base/firmware_loader/builtin/.gitignore
index 9c8bdb9fdcc3..166f76b43049 100644
--- a/drivers/base/firmware_loader/builtin/.gitignore
+++ b/drivers/base/firmware_loader/builtin/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
*.gen.S
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index b5c865fe263b..f962488546b6 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -674,8 +674,8 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
*
* This function is called if direct lookup for the firmware failed, it enables
* a fallback mechanism through userspace by exposing a sysfs loading
- * interface. Userspace is in charge of loading the firmware through the syfs
- * loading interface. This syfs fallback mechanism may be disabled completely
+ * interface. Userspace is in charge of loading the firmware through the sysfs
+ * loading interface. This sysfs fallback mechanism may be disabled completely
* on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
* If this false we check if the internal API caller set the @FW_OPT_NOFALLBACK
* flag, if so it would also disable the fallback mechanism. A system may want
@@ -693,7 +693,7 @@ int firmware_fallback_sysfs(struct firmware *fw, const char *name,
return ret;
if (!(opt_flags & FW_OPT_NO_WARN))
- dev_warn(device, "Falling back to syfs fallback for: %s\n",
+ dev_warn(device, "Falling back to sysfs fallback for: %s\n",
name);
else
dev_dbg(device, "Falling back to sysfs fallback for: %s\n",
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index cb8347500ce2..f180427e48f4 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -231,13 +231,14 @@ static bool pages_correctly_probed(unsigned long start_pfn)
* OK to have direct references to sparsemem variables in here.
*/
static int
-memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
+memory_block_action(unsigned long start_section_nr, unsigned long action,
+ int online_type)
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
int ret;
- start_pfn = section_nr_to_pfn(phys_index);
+ start_pfn = section_nr_to_pfn(start_section_nr);
switch (action) {
case MEM_ONLINE:
@@ -251,7 +252,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
break;
default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
- "%ld\n", __func__, phys_index, action, action);
+ "%ld\n", __func__, start_section_nr, action, action);
ret = -EINVAL;
}
@@ -506,7 +507,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
ret = lock_device_hotplug_sysfs();
if (ret)
- goto out;
+ return ret;
nid = memory_add_physaddr_to_nid(phys_addr);
ret = __add_memory(nid, phys_addr,
@@ -733,16 +734,18 @@ unregister_memory(struct memory_block *memory)
{
BUG_ON(memory->dev.bus != &memory_subsys);
- /* drop the ref. we got in remove_memory_section() */
+ /* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
}
-static int remove_memory_section(unsigned long node_id,
- struct mem_section *section, int phys_device)
+void unregister_memory_section(struct mem_section *section)
{
struct memory_block *mem;
+ if (WARN_ON_ONCE(!present_section(section)))
+ return;
+
mutex_lock(&mem_sysfs_mutex);
/*
@@ -763,15 +766,6 @@ static int remove_memory_section(unsigned long node_id,
out_unlock:
mutex_unlock(&mem_sysfs_mutex);
- return 0;
-}
-
-int unregister_memory_section(struct mem_section *section)
-{
- if (!present_section(section))
- return -EINVAL;
-
- return remove_memory_section(0, section, 0);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 86d6cd92ce3d..8598fcbd2a17 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -17,6 +17,7 @@
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/device.h>
+#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
@@ -59,6 +60,302 @@ static inline ssize_t node_read_cpulist(struct device *dev,
static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
+/**
+ * struct node_access_nodes - Access class device to hold user visible
+ * relationships to other nodes.
+ * @dev: Device for this memory access class
+ * @list_node: List element in the node's access list
+ * @access: The access class rank
+ */
+struct node_access_nodes {
+ struct device dev;
+ struct list_head list_node;
+ unsigned access;
+#ifdef CONFIG_HMEM_REPORTING
+ struct node_hmem_attrs hmem_attrs;
+#endif
+};
+#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
+
+static struct attribute *node_init_access_node_attrs[] = {
+ NULL,
+};
+
+static struct attribute *node_targ_access_node_attrs[] = {
+ NULL,
+};
+
+static const struct attribute_group initiators = {
+ .name = "initiators",
+ .attrs = node_init_access_node_attrs,
+};
+
+static const struct attribute_group targets = {
+ .name = "targets",
+ .attrs = node_targ_access_node_attrs,
+};
+
+static const struct attribute_group *node_access_node_groups[] = {
+ &initiators,
+ &targets,
+ NULL,
+};
+
+static void node_remove_accesses(struct node *node)
+{
+ struct node_access_nodes *c, *cnext;
+
+ list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
+ list_del(&c->list_node);
+ device_unregister(&c->dev);
+ }
+}
+
+static void node_access_release(struct device *dev)
+{
+ kfree(to_access_nodes(dev));
+}
+
+static struct node_access_nodes *node_init_node_access(struct node *node,
+ unsigned access)
+{
+ struct node_access_nodes *access_node;
+ struct device *dev;
+
+ list_for_each_entry(access_node, &node->access_list, list_node)
+ if (access_node->access == access)
+ return access_node;
+
+ access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
+ if (!access_node)
+ return NULL;
+
+ access_node->access = access;
+ dev = &access_node->dev;
+ dev->parent = &node->dev;
+ dev->release = node_access_release;
+ dev->groups = node_access_node_groups;
+ if (dev_set_name(dev, "access%u", access))
+ goto free;
+
+ if (device_register(dev))
+ goto free_name;
+
+ pm_runtime_no_callbacks(dev);
+ list_add_tail(&access_node->list_node, &node->access_list);
+ return access_node;
+free_name:
+ kfree_const(dev->kobj.name);
+free:
+ kfree(access_node);
+ return NULL;
+}
+
+#ifdef CONFIG_HMEM_REPORTING
+#define ACCESS_ATTR(name) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, "%u\n", to_access_nodes(dev)->hmem_attrs.name); \
+} \
+static DEVICE_ATTR_RO(name);
+
+ACCESS_ATTR(read_bandwidth)
+ACCESS_ATTR(read_latency)
+ACCESS_ATTR(write_bandwidth)
+ACCESS_ATTR(write_latency)
+
+static struct attribute *access_attrs[] = {
+ &dev_attr_read_bandwidth.attr,
+ &dev_attr_read_latency.attr,
+ &dev_attr_write_bandwidth.attr,
+ &dev_attr_write_latency.attr,
+ NULL,
+};
+
+/**
+ * node_set_perf_attrs - Set the performance values for given access class
+ * @nid: Node identifier to be set
+ * @hmem_attrs: Heterogeneous memory performance attributes
+ * @access: The access class the for the given attributes
+ */
+void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+ unsigned access)
+{
+ struct node_access_nodes *c;
+ struct node *node;
+ int i;
+
+ if (WARN_ON_ONCE(!node_online(nid)))
+ return;
+
+ node = node_devices[nid];
+ c = node_init_node_access(node, access);
+ if (!c)
+ return;
+
+ c->hmem_attrs = *hmem_attrs;
+ for (i = 0; access_attrs[i] != NULL; i++) {
+ if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
+ "initiators")) {
+ pr_info("failed to add performance attribute to node %d\n",
+ nid);
+ break;
+ }
+ }
+}
+
+/**
+ * struct node_cache_info - Internal tracking for memory node caches
+ * @dev: Device represeting the cache level
+ * @node: List element for tracking in the node
+ * @cache_attrs:Attributes for this cache level
+ */
+struct node_cache_info {
+ struct device dev;
+ struct list_head node;
+ struct node_cache_attrs cache_attrs;
+};
+#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
+
+#define CACHE_ATTR(name, fmt) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, fmt "\n", to_cache_info(dev)->cache_attrs.name);\
+} \
+DEVICE_ATTR_RO(name);
+
+CACHE_ATTR(size, "%llu")
+CACHE_ATTR(line_size, "%u")
+CACHE_ATTR(indexing, "%u")
+CACHE_ATTR(write_policy, "%u")
+
+static struct attribute *cache_attrs[] = {
+ &dev_attr_indexing.attr,
+ &dev_attr_size.attr,
+ &dev_attr_line_size.attr,
+ &dev_attr_write_policy.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cache);
+
+static void node_cache_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static void node_cacheinfo_release(struct device *dev)
+{
+ struct node_cache_info *info = to_cache_info(dev);
+ kfree(info);
+}
+
+static void node_init_cache_dev(struct node *node)
+{
+ struct device *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
+
+ dev->parent = &node->dev;
+ dev->release = node_cache_release;
+ if (dev_set_name(dev, "memory_side_cache"))
+ goto free_dev;
+
+ if (device_register(dev))
+ goto free_name;
+
+ pm_runtime_no_callbacks(dev);
+ node->cache_dev = dev;
+ return;
+free_name:
+ kfree_const(dev->kobj.name);
+free_dev:
+ kfree(dev);
+}
+
+/**
+ * node_add_cache() - add cache attribute to a memory node
+ * @nid: Node identifier that has new cache attributes
+ * @cache_attrs: Attributes for the cache being added
+ */
+void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
+{
+ struct node_cache_info *info;
+ struct device *dev;
+ struct node *node;
+
+ if (!node_online(nid) || !node_devices[nid])
+ return;
+
+ node = node_devices[nid];
+ list_for_each_entry(info, &node->cache_attrs, node) {
+ if (info->cache_attrs.level == cache_attrs->level) {
+ dev_warn(&node->dev,
+ "attempt to add duplicate cache level:%d\n",
+ cache_attrs->level);
+ return;
+ }
+ }
+
+ if (!node->cache_dev)
+ node_init_cache_dev(node);
+ if (!node->cache_dev)
+ return;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ dev = &info->dev;
+ dev->parent = node->cache_dev;
+ dev->release = node_cacheinfo_release;
+ dev->groups = cache_groups;
+ if (dev_set_name(dev, "index%d", cache_attrs->level))
+ goto free_cache;
+
+ info->cache_attrs = *cache_attrs;
+ if (device_register(dev)) {
+ dev_warn(&node->dev, "failed to add cache level:%d\n",
+ cache_attrs->level);
+ goto free_name;
+ }
+ pm_runtime_no_callbacks(dev);
+ list_add_tail(&info->node, &node->cache_attrs);
+ return;
+free_name:
+ kfree_const(dev->kobj.name);
+free_cache:
+ kfree(info);
+}
+
+static void node_remove_caches(struct node *node)
+{
+ struct node_cache_info *info, *next;
+
+ if (!node->cache_dev)
+ return;
+
+ list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
+ list_del(&info->node);
+ device_unregister(&info->dev);
+ }
+ device_unregister(node->cache_dev);
+}
+
+static void node_init_caches(unsigned int nid)
+{
+ INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
+}
+#else
+static void node_init_caches(unsigned int nid) { }
+static void node_remove_caches(struct node *node) { }
+#endif
+
#define K(x) ((x) << (PAGE_SHIFT - 10))
static ssize_t node_read_meminfo(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -340,7 +637,8 @@ static int register_node(struct node *node, int num)
void unregister_node(struct node *node)
{
hugetlb_unregister_node(node); /* no-op, if memoryless node */
-
+ node_remove_accesses(node);
+ node_remove_caches(node);
device_unregister(&node->dev);
}
@@ -372,6 +670,56 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
kobject_name(&node_devices[nid]->dev.kobj));
}
+/**
+ * register_memory_node_under_compute_node - link memory node to its compute
+ * node for a given access class.
+ * @mem_node: Memory node number
+ * @cpu_node: Cpu node number
+ * @access: Access class to register
+ *
+ * Description:
+ * For use with platforms that may have separate memory and compute nodes.
+ * This function will export node relationships linking which memory
+ * initiator nodes can access memory targets at a given ranked access
+ * class.
+ */
+int register_memory_node_under_compute_node(unsigned int mem_nid,
+ unsigned int cpu_nid,
+ unsigned access)
+{
+ struct node *init_node, *targ_node;
+ struct node_access_nodes *initiator, *target;
+ int ret;
+
+ if (!node_online(cpu_nid) || !node_online(mem_nid))
+ return -ENODEV;
+
+ init_node = node_devices[cpu_nid];
+ targ_node = node_devices[mem_nid];
+ initiator = node_init_node_access(init_node, access);
+ target = node_init_node_access(targ_node, access);
+ if (!initiator || !target)
+ return -ENOMEM;
+
+ ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
+ &targ_node->dev.kobj,
+ dev_name(&targ_node->dev));
+ if (ret)
+ return ret;
+
+ ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
+ &init_node->dev.kobj,
+ dev_name(&init_node->dev));
+ if (ret)
+ goto err;
+
+ return 0;
+ err:
+ sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
+ dev_name(&targ_node->dev));
+ return ret;
+}
+
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
struct device *obj;
@@ -580,8 +928,10 @@ int __register_one_node(int nid)
register_cpu_under_node(cpu, nid);
}
+ INIT_LIST_HEAD(&node_devices[nid]->access_list);
/* initialize work queue for memory hot plug */
init_node_hugetlb_work(nid);
+ node_init_caches(nid);
return error;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index dab0a5abc391..4d1729853d1a 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
* device
*
* @pdev: platform device to use both for memory resource lookup as well as
- * resource managemend
+ * resource management
* @index: resource index
*/
#ifdef CONFIG_HAS_IOMEM
@@ -438,10 +438,12 @@ int platform_device_add(struct platform_device *pdev)
p = &ioport_resource;
}
- if (p && insert_resource(p, r)) {
- dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
- ret = -EBUSY;
- goto failed;
+ if (p) {
+ ret = insert_resource(p, r);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
+ goto failed;
+ }
}
}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 365ad751ce0f..59d19dd64928 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
*
* Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
#include <linux/kernel.h>
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 22aedb28aad7..8db98a1f83dc 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/common.c - Common device power management code.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/export.h>
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 96a6dc9d305c..33c30c1e6a30 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/domain.c - Common code related to device power domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
-
#define pr_fmt(fmt) "PM: " fmt
#include <linux/delay.h>
@@ -22,6 +20,7 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/export.h>
+#include <linux/cpu.h>
#include "power.h"
@@ -128,6 +127,8 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
+#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
+#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -391,11 +392,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
if (unlikely(!genpd->set_performance_state))
return -EINVAL;
- if (unlikely(!dev->power.subsys_data ||
- !dev->power.subsys_data->domain_data)) {
- WARN_ON(1);
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
return -EINVAL;
- }
genpd_lock(genpd);
@@ -517,7 +516,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
* (1) The domain is configured as always on.
* (2) When the domain has a subdomain being powered on.
*/
- if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
+ if (genpd_is_always_on(genpd) ||
+ genpd_is_rpm_always_on(genpd) ||
+ atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
@@ -1396,8 +1397,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#endif /* CONFIG_PM_SLEEP */
-static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
- struct gpd_timing_data *td)
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
int ret;
@@ -1412,9 +1412,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
goto err_put;
}
- if (td)
- gpd_data->td = *td;
-
gpd_data->base.dev = dev;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
@@ -1454,8 +1451,57 @@ static void genpd_free_dev_data(struct device *dev,
dev_pm_put_subsys_data(dev);
}
+static void genpd_update_cpumask(struct generic_pm_domain *genpd,
+ int cpu, bool set, unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return;
+
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ struct generic_pm_domain *master = link->master;
+
+ genpd_lock_nested(master, depth + 1);
+ genpd_update_cpumask(master, cpu, set, depth + 1);
+ genpd_unlock(master);
+ }
+
+ if (set)
+ cpumask_set_cpu(cpu, genpd->cpus);
+ else
+ cpumask_clear_cpu(cpu, genpd->cpus);
+}
+
+static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, true, 0);
+}
+
+static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, false, 0);
+}
+
+static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
+{
+ int cpu;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (get_cpu_device(cpu) == dev)
+ return cpu;
+ }
+
+ return -1;
+}
+
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
- struct gpd_timing_data *td)
+ struct device *base_dev)
{
struct generic_pm_domain_data *gpd_data;
int ret;
@@ -1465,16 +1511,19 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- gpd_data = genpd_alloc_dev_data(dev, td);
+ gpd_data = genpd_alloc_dev_data(dev);
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
+ gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
+
ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
if (ret)
goto out;
genpd_lock(genpd);
+ genpd_set_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++;
@@ -1502,7 +1551,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
int ret;
mutex_lock(&gpd_list_lock);
- ret = genpd_add_device(genpd, dev, NULL);
+ ret = genpd_add_device(genpd, dev, dev);
mutex_unlock(&gpd_list_lock);
return ret;
@@ -1532,6 +1581,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
genpd->device_count--;
genpd->max_off_time_changed = true;
+ genpd_clear_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, NULL);
list_del_init(&pdd->list_node);
@@ -1686,6 +1736,12 @@ out:
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+static void genpd_free_default_power_state(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ kfree(states);
+}
+
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
{
struct genpd_power_state *state;
@@ -1696,7 +1752,7 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
genpd->states = state;
genpd->state_count = 1;
- genpd->free = state;
+ genpd->free_states = genpd_free_default_power_state;
return 0;
}
@@ -1759,14 +1815,22 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
}
/* Always-on domains must be powered on at initialization. */
- if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
+ if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
+ !genpd_status_on(genpd))
return -EINVAL;
+ if (genpd_is_cpu_domain(genpd) &&
+ !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
/* Use only one "off" state if there were no states declared */
if (genpd->state_count == 0) {
ret = genpd_set_default_power_state(genpd);
- if (ret)
+ if (ret) {
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
return ret;
+ }
} else if (!gov && genpd->state_count > 1) {
pr_warn("%s: no governor for states\n", genpd->name);
}
@@ -1812,7 +1876,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
list_del(&genpd->gpd_list_node);
genpd_unlock(genpd);
cancel_work_sync(&genpd->power_off_work);
- kfree(genpd->free);
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ if (genpd->free_states)
+ genpd->free_states(genpd->states, genpd->state_count);
+
pr_debug("%s: removed %s\n", __func__, genpd->name);
return 0;
@@ -2190,7 +2258,7 @@ int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
goto out;
}
- ret = genpd_add_device(genpd, dev, NULL);
+ ret = genpd_add_device(genpd, dev, dev);
out:
mutex_unlock(&gpd_list_lock);
@@ -2274,6 +2342,7 @@ EXPORT_SYMBOL_GPL(of_genpd_remove_last);
static void genpd_release_dev(struct device *dev)
{
+ of_node_put(dev->of_node);
kfree(dev);
}
@@ -2335,14 +2404,14 @@ static void genpd_dev_pm_sync(struct device *dev)
genpd_queue_power_off_work(pd);
}
-static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
+static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
unsigned int index, bool power_on)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
int ret;
- ret = of_parse_phandle_with_args(np, "power-domains",
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", index, &pd_args);
if (ret < 0)
return ret;
@@ -2354,12 +2423,12 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return driver_deferred_probe_check_state(dev);
+ return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
- ret = genpd_add_device(pd, dev, NULL);
+ ret = genpd_add_device(pd, dev, base_dev);
mutex_unlock(&gpd_list_lock);
if (ret < 0) {
@@ -2410,7 +2479,7 @@ int genpd_dev_pm_attach(struct device *dev)
"#power-domain-cells") != 1)
return 0;
- return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
+ return __genpd_dev_pm_attach(dev, dev, 0, true);
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
@@ -2440,10 +2509,10 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
if (!dev->of_node)
return NULL;
- /* Deal only with devices using multiple PM domains. */
+ /* Verify that the index is within a valid range. */
num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
- if (num_domains < 2 || index >= num_domains)
+ if (index >= num_domains)
return NULL;
/* Allocate and register device on the genpd bus. */
@@ -2454,15 +2523,16 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
virt_dev->bus = &genpd_bus_type;
virt_dev->release = genpd_release_dev;
+ virt_dev->of_node = of_node_get(dev->of_node);
ret = device_register(virt_dev);
if (ret) {
- kfree(virt_dev);
+ put_device(virt_dev);
return ERR_PTR(ret);
}
/* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
+ ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
if (ret < 1) {
device_unregister(virt_dev);
return ret ? ERR_PTR(ret) : NULL;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 4d07e38a8247..3838045c9277 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -1,15 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/domain_governor.c - Governors for device PM domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/ktime.h>
static int dev_update_qos_constraint(struct device *dev, void *data)
{
@@ -210,8 +211,10 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
- if (!genpd->max_off_time_changed)
+ if (!genpd->max_off_time_changed) {
+ genpd->state_idx = genpd->cached_power_down_state_idx;
return genpd->cached_power_down_ok;
+ }
/*
* We have to invalidate the cached results for the masters, so
@@ -236,6 +239,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
genpd->state_idx--;
}
+ genpd->cached_power_down_state_idx = genpd->state_idx;
return genpd->cached_power_down_ok;
}
@@ -244,6 +248,65 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
return false;
}
+#ifdef CONFIG_CPU_IDLE
+static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct cpuidle_device *dev;
+ ktime_t domain_wakeup, next_hrtimer;
+ s64 idle_duration_ns;
+ int cpu, i;
+
+ /* Validate dev PM QoS constraints. */
+ if (!default_power_down_ok(pd))
+ return false;
+
+ if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
+ return true;
+
+ /*
+ * Find the next wakeup for any of the online CPUs within the PM domain
+ * and its subdomains. Note, we only need the genpd->cpus, as it already
+ * contains a mask of all CPUs from subdomains.
+ */
+ domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
+ for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (dev) {
+ next_hrtimer = READ_ONCE(dev->next_hrtimer);
+ if (ktime_before(next_hrtimer, domain_wakeup))
+ domain_wakeup = next_hrtimer;
+ }
+ }
+
+ /* The minimum idle duration is from now - until the next wakeup. */
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+ if (idle_duration_ns <= 0)
+ return false;
+
+ /*
+ * Find the deepest idle state that has its residency value satisfied
+ * and by also taking into account the power off latency for the state.
+ * Start at the state picked by the dev PM QoS constraint validation.
+ */
+ i = genpd->state_idx;
+ do {
+ if (idle_duration_ns >= (genpd->states[i].residency_ns +
+ genpd->states[i].power_off_latency_ns)) {
+ genpd->state_idx = i;
+ return true;
+ }
+ } while (--i >= 0);
+
+ return false;
+}
+
+struct dev_power_governor pm_domain_cpu_gov = {
+ .suspend_ok = default_suspend_ok,
+ .power_down_ok = cpu_power_down_ok,
+};
+#endif
+
struct dev_power_governor simple_qos_governor = {
.suspend_ok = default_suspend_ok,
.power_down_ok = default_power_down_ok,
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index b2ed606265a8..4fa525668cb7 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
*
* Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f80d298de3fa..dcfc0a36c8f7 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/main.c - Where the driver meets power management.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
- * This file is released under the GPLv2
- *
- *
* The driver model core calls device_pm_add() when a device is registered.
* This will initialize the embedded device_pm_info object in the device
* and add it to the list of power-controlled devices. sysfs entries for
@@ -207,7 +205,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -225,7 +223,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
rettime = ktime_get();
nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
- dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
+ dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
(unsigned long long)nsecs >> 10);
}
@@ -478,7 +476,7 @@ struct dpm_watchdog {
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
- * @data: Watchdog object address.
+ * @t: The timer that PM watchdog depends on.
*
* Called when a driver has timed out suspending or resuming.
* There's not much we can do here to recover so panic() to
@@ -706,6 +704,19 @@ static bool is_async(struct device *dev)
&& !pm_trace_is_enabled();
}
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(func, dev);
+ return true;
+ }
+
+ return false;
+}
+
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = (struct device *)data;
@@ -732,13 +743,8 @@ void dpm_noirq_resume_devices(pm_message_t state)
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
- list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_resume_noirq, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+ dpm_async_fn(dev, async_resume_noirq);
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
@@ -889,13 +895,8 @@ void dpm_resume_early(pm_message_t state)
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
- list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_resume_early, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+ dpm_async_fn(dev, async_resume_early);
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
@@ -1053,13 +1054,8 @@ void dpm_resume(pm_message_t state)
pm_transition = state;
async_error = 0;
- list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_resume, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+ dpm_async_fn(dev, async_resume);
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
@@ -1373,13 +1369,9 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
static int device_suspend_noirq(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_suspend_noirq, dev);
+ if (dpm_async_fn(dev, async_suspend_noirq))
return 0;
- }
+
return __device_suspend_noirq(dev, pm_transition, false);
}
@@ -1576,13 +1568,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
static int device_suspend_late(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_suspend_late, dev);
+ if (dpm_async_fn(dev, async_suspend_late))
return 0;
- }
return __device_suspend_late(dev, pm_transition, false);
}
@@ -1747,6 +1734,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ /* Avoid direct_complete to let wakeup_path propagate. */
+ if (device_may_wakeup(dev) || dev->power.wakeup_path)
+ dev->power.direct_complete = false;
+
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
@@ -1842,13 +1833,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
static int device_suspend(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_suspend, dev);
+ if (dpm_async_fn(dev, async_suspend))
return 0;
- }
return __device_suspend(dev, pm_transition, false);
}
@@ -2063,14 +2049,14 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, void *fn, int ret)
{
if (ret)
- pr_err("%s(): %pF returns %d\n", function, fn, ret);
+ pr_err("%s(): %pS returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
/**
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
- * @dev: Device to wait for.
* @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
*/
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
{
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index f80e402ef778..6c91f8df1d59 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Devices PM QoS constraints management
*
* Copyright (C) 2011 Texas Instruments, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *
* This module exposes the interface to kernel space for specifying
* per-device PM QoS dependencies. It provides infrastructure for registration
* of:
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 977db40378b0..952a1e7057c7 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
* Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/sched/mm.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 1226e441ddfe..1b9c281cbe41 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -1,7 +1,5 @@
-/*
- * drivers/base/power/sysfs.c - sysfs entries for device PM
- */
-
+// SPDX-License-Identifier: GPL-2.0
+/* sysfs entries for device PM */
#include <linux/device.h>
#include <linux/string.h>
#include <linux/export.h>
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 2bd9d2c744ca..977d27bd1a22 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/trace.c
*
@@ -6,7 +7,6 @@
* Trace facility for suspend/resume problems, when none of the
* devices may be working.
*/
-
#define pr_fmt(fmt) "PM: " fmt
#include <linux/pm-trace.h>
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index b8fa5c0f2d13..5ce77d1ef9fc 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -1,16 +1,5 @@
-/*
- * wakeirq.c - Device wakeirq helper functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
+// SPDX-License-Identifier: GPL-2.0
+/* Device wakeirq helper functions */
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index bb1ae175fae1..5b2b6a05a4f3 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/wakeup.c - System wakeup events framework
*
* Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
*/
-
#define pr_fmt(fmt) "PM: " fmt
#include <linux/device.h>
@@ -804,7 +802,7 @@ void pm_print_active_wakeup_sources(void)
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
- pr_debug("active wakeup source: %s\n", ws->name);
+ pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
@@ -815,7 +813,7 @@ void pm_print_active_wakeup_sources(void)
}
if (!active && last_activity_ws)
- pr_debug("last active wakeup source: %s\n",
+ pm_pr_dbg("last active wakeup source: %s\n",
last_activity_ws->name);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -845,7 +843,7 @@ bool pm_wakeup_pending(void)
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
- pr_debug("Wakeup pending, aborting suspend\n");
+ pm_pr_dbg("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 8b91ab380d14..348b37e64944 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -984,6 +984,81 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id,
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
/**
+ * fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers
+ * @fwnode: parent fwnode_handle containing the graph
+ * @port: identifier of the port node
+ * @endpoint: identifier of the endpoint node under the port node
+ * @flags: fwnode lookup flags
+ *
+ * Return the fwnode handle of the local endpoint corresponding the port and
+ * endpoint IDs or NULL if not found.
+ *
+ * If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint
+ * has not been found, look for the closest endpoint ID greater than the
+ * specified one and return the endpoint that corresponds to it, if present.
+ *
+ * Do not return endpoints that belong to disabled devices, unless
+ * FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
+ *
+ * The returned endpoint needs to be released by calling fwnode_handle_put() on
+ * it when it is not needed any more.
+ */
+struct fwnode_handle *
+fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
+ u32 port, u32 endpoint, unsigned long flags)
+{
+ struct fwnode_handle *ep = NULL, *best_ep = NULL;
+ unsigned int best_ep_id = 0;
+ bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT;
+ bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED);
+
+ while ((ep = fwnode_graph_get_next_endpoint(fwnode, ep))) {
+ struct fwnode_endpoint fwnode_ep = { 0 };
+ int ret;
+
+ if (enabled_only) {
+ struct fwnode_handle *dev_node;
+ bool available;
+
+ dev_node = fwnode_graph_get_remote_port_parent(ep);
+ available = fwnode_device_is_available(dev_node);
+ fwnode_handle_put(dev_node);
+ if (!available)
+ continue;
+ }
+
+ ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep);
+ if (ret < 0)
+ continue;
+
+ if (fwnode_ep.port != port)
+ continue;
+
+ if (fwnode_ep.id == endpoint)
+ return ep;
+
+ if (!endpoint_next)
+ continue;
+
+ /*
+ * If the endpoint that has just been found is not the first
+ * matching one and the ID of the one found previously is closer
+ * to the requested endpoint ID, skip it.
+ */
+ if (fwnode_ep.id < endpoint ||
+ (best_ep && best_ep_id < fwnode_ep.id))
+ continue;
+
+ fwnode_handle_put(best_ep);
+ best_ep = fwnode_handle_get(ep);
+ best_ep_id = fwnode_ep.id;
+ }
+
+ return best_ep;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
+
+/**
* fwnode_graph_parse_endpoint - parse common endpoint node properties
* @fwnode: pointer to endpoint fwnode_handle
* @endpoint: pointer to the fwnode endpoint data structure
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index a98fced9bff8..3d80c4b43f72 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Register map access API internal header
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _REGMAP_INTERNAL_H
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index bc6cd88b8cc6..b7e4b2464102 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API - flat caching support
- *
- * Copyright 2012 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - flat caching support
+//
+// Copyright 2012 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/seq_file.h>
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 4ff311374c4a..fc14e8b9344f 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API - LZO caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - LZO caching support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/lzo.h>
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 9cbb4b0cd01b..cfa29dc89bbf 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API - rbtree caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - rbtree caching support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
#include <linux/debugfs.h>
#include <linux/device.h>
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 773560348337..a93cafd7be4f 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
#include <linux/bsearch.h>
#include <linux/device.h>
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index c03ebfd4c731..b9f76bdf74a9 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -1,20 +1,8 @@
-/*
- * Register map access API - AC'97 support
- *
- * Copyright 2013 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - AC'97 support
+//
+// Copyright 2013 Linaro Ltd. All rights reserved.
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 19eb454f26c3..263f82516ff4 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API - debugfs
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - debugfs
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/slab.h>
#include <linux/mutex.h>
@@ -195,6 +191,28 @@ static inline void regmap_calc_tot_len(struct regmap *map,
}
}
+static int regmap_next_readable_reg(struct regmap *map, int reg)
+{
+ struct regmap_debugfs_off_cache *c;
+ int ret = -EINVAL;
+
+ if (regmap_printable(map, reg + map->reg_stride)) {
+ ret = reg + map->reg_stride;
+ } else {
+ mutex_lock(&map->cache_lock);
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ if (reg > c->max_reg)
+ continue;
+ if (reg < c->base_reg) {
+ ret = c->base_reg;
+ break;
+ }
+ }
+ mutex_unlock(&map->cache_lock);
+ }
+ return ret;
+}
+
static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
unsigned int to, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -218,12 +236,8 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
- for (i = start_reg; i <= to; i += map->reg_stride) {
- if (!regmap_readable(map, i) && !regmap_cached(map, i))
- continue;
-
- if (regmap_precious(map, i))
- continue;
+ for (i = start_reg; i >= 0 && i <= to;
+ i = regmap_next_readable_reg(map, i)) {
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 056acde5e7d3..ac9b31c57967 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API - I2C support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - I2C support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/regmap.h>
#include <linux/i2c.h>
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5059748afd4c..c9dc70ceca5f 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -1,14 +1,10 @@
-/*
- * regmap based irq_chip
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// regmap based irq_chip
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/export.h>
@@ -761,9 +757,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (chip->num_type_reg && !chip->type_in_mask) {
for (i = 0; i < chip->num_type_reg; ++i) {
- if (!d->type_buf_def[i])
- continue;
-
reg = chip->type_base +
(i * map->reg_stride * d->type_reg_stride);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 8741fb5f8f54..af967d8f975e 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -1,20 +1,8 @@
-/*
- * Register map access API - MMIO support
- *
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - MMIO support
+//
+// Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index c7150dd264d5..c1894e93c378 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API - SPI support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPI support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 0bfb8ed244d5..cdf12d2aa3a1 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -1,22 +1,13 @@
-/*
- * Register map access API - SPMI support
- *
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * Based on regmap-i2c.c:
- * Copyright 2011 Wolfson Microelectronics plc
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPMI support
+//
+// Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+//
+// Based on regmap-i2c.c:
+// Copyright 2011 Wolfson Microelectronics plc
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
#include <linux/regmap.h>
#include <linux/spmi.h>
#include <linux/module.h>
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index e6c64b0be5b2..3a7d30b8c3ac 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -1,13 +1,9 @@
-/*
- * Register map access API - W1 (1-Wire) support
- *
- * Copyright (c) 2017 Radioavionica Corporation
- * Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - W1 (1-Wire) support
+//
+// Copyright (c) 2017 Radioavionica Corporation
+// Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
#include <linux/regmap.h>
#include <linux/module.h>
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4f822e087def..f1025452bb39 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/slab.h>
@@ -1493,11 +1489,10 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
WARN_ON(!map->bus);
/* Check for unwritable registers before we start */
- if (map->writeable_reg)
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!map->writeable_reg(map->dev,
- reg + regmap_get_offset(map, i)))
- return -EINVAL;
+ for (i = 0; i < val_len / map->format.val_bytes; i++)
+ if (!regmap_writeable(map,
+ reg + regmap_get_offset(map, i)))
+ return -EINVAL;
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 6e076f359dcc..0d346a307140 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -62,19 +62,19 @@ int syscore_suspend(void)
list_for_each_entry_reverse(ops, &syscore_ops_list, node)
if (ops->suspend) {
if (initcall_debug)
- pr_info("PM: Calling %pF\n", ops->suspend);
+ pr_info("PM: Calling %pS\n", ops->suspend);
ret = ops->suspend();
if (ret)
goto err_out;
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pF\n", ops->suspend);
+ "Interrupts enabled after %pS\n", ops->suspend);
}
trace_suspend_resume(TPS("syscore_suspend"), 0, false);
return 0;
err_out:
- pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
+ pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
list_for_each_entry_continue(ops, &syscore_ops_list, node)
if (ops->resume)
@@ -100,10 +100,10 @@ void syscore_resume(void)
list_for_each_entry(ops, &syscore_ops_list, node)
if (ops->resume) {
if (initcall_debug)
- pr_info("PM: Calling %pF\n", ops->resume);
+ pr_info("PM: Calling %pS\n", ops->resume);
ops->resume();
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pF\n", ops->resume);
+ "Interrupts enabled after %pS\n", ops->resume);
}
trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
@@ -122,7 +122,7 @@ void syscore_shutdown(void)
list_for_each_entry_reverse(ops, &syscore_ops_list, node)
if (ops->shutdown) {
if (initcall_debug)
- pr_info("PM: Calling %pF\n", ops->shutdown);
+ pr_info("PM: Calling %pS\n", ops->shutdown);
ops->shutdown();
}
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 90477c5fd9f9..0f1f7277a013 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 0903e0803ec8..92b930cb3b72 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1829,6 +1829,7 @@ static int __init fd_probe_drives(void)
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disk->disk_name, "fd%d", drive);
disk->private_data = &unit[drive];
set_capacity(disk, 880*2);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index b0dbbdfeb33e..c7b5c4671f05 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -2028,6 +2028,7 @@ static int __init atari_floppy_init (void)
unit[i].disk->first_minor = i;
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
+ unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
unit[i].disk->private_data = &unit[i];
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c18586fccb6f..17defbf4f332 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -96,13 +96,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
/*
* Must use NOIO because we don't want to recurse back into the
* block or filesystem layers from page reclaim.
- *
- * Cannot support DAX and highmem, because our ->direct_access
- * routine for DAX must return memory that is always addressable.
- * If DAX was reworked to use pfns and kmap throughout, this
- * restriction might be able to be lifted.
*/
- gfp_flags = GFP_NOIO | __GFP_ZERO;
+ gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
page = alloc_page(gfp_flags);
if (!page)
return NULL;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 000a2f4c0e92..549c64df9708 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1317,10 +1317,6 @@ struct bm_extent {
#define DRBD_MAX_SECTORS_FIXED_BM \
((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
-#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
-#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
-#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
-#else
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
/* 16 TB in units of sectors */
#if BITS_PER_LONG == 32
@@ -1333,7 +1329,6 @@ struct bm_extent {
#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
/* corresponds to (1UL << 38) bits right now. */
#endif
-#endif
/* Estimate max bio size as 256 * PAGE_SIZE,
* so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
@@ -1778,7 +1773,7 @@ static inline void __drbd_chk_io_error_(struct drbd_device *device,
_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
- /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
+ /* fall through - for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
case EP_DETACH:
case EP_CALL_HELPER:
/* Remember whether we saw a READ or WRITE error.
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index f2471172a961..1cb5a0b85fd9 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -114,7 +114,7 @@ static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
if (!info || !info[0])
return 0;
- nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
if (!nla)
return err;
@@ -135,7 +135,7 @@ static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
int err = -EMSGSIZE;
int len;
- nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
if (!nla)
return err;
@@ -3269,7 +3269,7 @@ static int nla_put_drbd_cfg_context(struct sk_buff *skb,
struct drbd_device *device)
{
struct nlattr *nla;
- nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
if (!nla)
goto nla_put_failure;
if (device &&
@@ -3837,7 +3837,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
if (err)
goto nla_put_failure;
- nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
if (!nla)
goto nla_put_failure;
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c
index 8e261cb5198b..6a09b0b98018 100644
--- a/drivers/block/drbd/drbd_nla.c
+++ b/drivers/block/drbd/drbd_nla.c
@@ -35,7 +35,8 @@ int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
err = drbd_nla_check_mandatory(maxtype, nla);
if (!err)
- err = nla_parse_nested(tb, maxtype, nla, policy, NULL);
+ err = nla_parse_nested_deprecated(tb, maxtype, nla, policy,
+ NULL);
return err;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c7ad88d91a09..6a727df02889 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3094,7 +3094,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
rv = 1;
break;
}
- /* Else fall through to one of the other strategies... */
+ /* Else fall through - to one of the other strategies... */
case ASB_DISCARD_OLDER_PRI:
if (self == 0 && peer == 1) {
rv = 1;
@@ -3119,7 +3119,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
}
if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
- /* else: fall through */
+ /* else, fall through */
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
@@ -5443,7 +5443,6 @@ static int drbd_do_auth(struct drbd_connection *connection)
rcu_read_unlock();
desc->tfm = connection->cram_hmac_tfm;
- desc->flags = 0;
rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
@@ -6116,7 +6115,7 @@ int drbd_ack_receiver(struct drbd_thread *thi)
err = cmd->fn(connection, &pi);
if (err) {
- drbd_err(connection, "%pf failed\n", cmd->fn);
+ drbd_err(connection, "%ps failed\n", cmd->fn);
goto reconnect;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 643a04af213b..3809c7e6be8c 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -866,7 +866,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
} /* else: FIXME can this happen? */
break;
}
- /* else, fall through to BARRIER_ACKED */
+ /* else, fall through - to BARRIER_ACKED */
case BARRIER_ACKED:
/* barrier ack for READ requests does not make sense */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 268ef0c5d4ab..6781bcf3ec26 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -304,7 +304,6 @@ void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req,
void *src;
desc->tfm = tfm;
- desc->flags = 0;
crypto_shash_init(desc);
@@ -332,7 +331,6 @@ void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
struct bvec_iter iter;
desc->tfm = tfm;
- desc->flags = 0;
crypto_shash_init(desc);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 95f608d1a098..b8998abd86a5 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1693,7 +1693,7 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id)
/* we don't even know which FDC is the culprit */
pr_info("DOR0=%x\n", fdc_state[0].dor);
pr_info("floppy interrupt on bizarre fdc %d\n", fdc);
- pr_info("handler=%pf\n", handler);
+ pr_info("handler=%ps\n", handler);
is_alive(__func__, "bizarre fdc");
return IRQ_NONE;
}
@@ -1752,7 +1752,7 @@ static void reset_interrupt(void)
debugt(__func__, "");
result(); /* get the status ready for set_fdc */
if (FDCS->reset) {
- pr_info("reset set in interrupt, calling %pf\n", cont->error);
+ pr_info("reset set in interrupt, calling %ps\n", cont->error);
cont->error(); /* a reset just after a reset. BAD! */
}
cont->redo();
@@ -1793,7 +1793,7 @@ static void show_floppy(void)
pr_info("\n");
pr_info("floppy driver state\n");
pr_info("-------------------\n");
- pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%pf\n",
+ pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%ps\n",
jiffies, interruptjiffies, jiffies - interruptjiffies,
lasthandler);
@@ -1812,9 +1812,9 @@ static void show_floppy(void)
pr_info("status=%x\n", fd_inb(FD_STATUS));
pr_info("fdc_busy=%lu\n", fdc_busy);
if (do_floppy)
- pr_info("do_floppy=%pf\n", do_floppy);
+ pr_info("do_floppy=%ps\n", do_floppy);
if (work_pending(&floppy_work))
- pr_info("floppy_work.func=%pf\n", floppy_work.func);
+ pr_info("floppy_work.func=%ps\n", floppy_work.func);
if (delayed_work_pending(&fd_timer))
pr_info("delayed work.function=%p expires=%ld\n",
fd_timer.work.func,
@@ -4540,6 +4540,7 @@ static int __init do_floppy_init(void)
disks[drive]->major = FLOPPY_MAJOR;
disks[drive]->first_minor = TOMINOR(drive);
disks[drive]->fops = &floppy_fops;
+ disks[drive]->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disks[drive]->disk_name, "fd%d", drive);
timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bf1c61cab8eb..102d79575895 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -264,12 +264,20 @@ lo_do_transfer(struct loop_device *lo, int cmd,
return ret;
}
+static inline void loop_iov_iter_bvec(struct iov_iter *i,
+ unsigned int direction, const struct bio_vec *bvec,
+ unsigned long nr_segs, size_t count)
+{
+ iov_iter_bvec(i, direction, bvec, nr_segs, count);
+ i->type |= ITER_BVEC_FLAG_NO_REF;
+}
+
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
ssize_t bw;
- iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
+ loop_iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
@@ -347,7 +355,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
ssize_t len;
rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
+ loop_iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0)
return len;
@@ -388,7 +396,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
- iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
+ loop_iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0) {
ret = len;
@@ -555,7 +563,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
}
atomic_set(&cmd->ref, 2);
- iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
+ loop_iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
@@ -900,6 +908,24 @@ static int loop_prepare_queue(struct loop_device *lo)
return 0;
}
+static void loop_update_rotational(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct inode *file_inode = file->f_mapping->host;
+ struct block_device *file_bdev = file_inode->i_sb->s_bdev;
+ struct request_queue *q = lo->lo_queue;
+ bool nonrot = true;
+
+ /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
+ if (file_bdev)
+ nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
+
+ if (nonrot)
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+}
+
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct block_device *bdev, unsigned int arg)
{
@@ -963,6 +989,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
+ loop_update_rotational(lo);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 83302ecdc8db..f0105d118056 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1192,14 +1192,6 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
else
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
-#ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
- /* Demux ID.DRAT & ID.RZAT to determine trim support */
- if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
- port->dd->trim_supp = true;
- else
-#endif
- port->dd->trim_supp = false;
-
/* Set the identify buffer as valid. */
port->identify_valid = 1;
@@ -1387,77 +1379,6 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
}
/*
- * Trim unused sectors
- *
- * @dd pointer to driver_data structure
- * @lba starting lba
- * @len # of 512b sectors to trim
- */
-static blk_status_t mtip_send_trim(struct driver_data *dd, unsigned int lba,
- unsigned int len)
-{
- u64 tlba, tlen, sect_left;
- struct mtip_trim_entry *buf;
- dma_addr_t dma_addr;
- struct host_to_dev_fis fis;
- blk_status_t ret = BLK_STS_OK;
- int i;
-
- if (!len || dd->trim_supp == false)
- return BLK_STS_IOERR;
-
- /* Trim request too big */
- WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
-
- /* Trim request not aligned on 4k boundary */
- WARN_ON(len % 8 != 0);
-
- /* Warn if vu_trim structure is too big */
- WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
-
- /* Allocate a DMA buffer for the trim structure */
- buf = dma_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
- GFP_KERNEL);
- if (!buf)
- return BLK_STS_RESOURCE;
- memset(buf, 0, ATA_SECT_SIZE);
-
- for (i = 0, sect_left = len, tlba = lba;
- i < MTIP_MAX_TRIM_ENTRIES && sect_left;
- i++) {
- tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
- MTIP_MAX_TRIM_ENTRY_LEN :
- sect_left);
- buf[i].lba = cpu_to_le32(tlba);
- buf[i].range = cpu_to_le16(tlen);
- tlba += tlen;
- sect_left -= tlen;
- }
- WARN_ON(sect_left != 0);
-
- /* Build the fis */
- memset(&fis, 0, sizeof(struct host_to_dev_fis));
- fis.type = 0x27;
- fis.opts = 1 << 7;
- fis.command = 0xfb;
- fis.features = 0x60;
- fis.sect_count = 1;
- fis.device = ATA_DEVICE_OBS;
-
- if (mtip_exec_internal_command(dd->port,
- &fis,
- 5,
- dma_addr,
- ATA_SECT_SIZE,
- 0,
- MTIP_TRIM_TIMEOUT_MS) < 0)
- ret = BLK_STS_IOERR;
-
- dma_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
- return ret;
-}
-
-/*
* Get the drive capacity.
*
* @dd Pointer to the device data structure.
@@ -3590,8 +3511,6 @@ static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- if (req_op(rq) == REQ_OP_DISCARD)
- return mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
mtip_hw_submit_io(dd, rq, cmd, hctx);
return BLK_STS_OK;
}
@@ -3769,14 +3688,6 @@ skip_create_disk:
blk_queue_max_segment_size(dd->queue, 0x400000);
blk_queue_io_min(dd->queue, 4096);
- /* Signal trim support */
- if (dd->trim_supp == true) {
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, dd->queue);
- dd->queue->limits.discard_granularity = 4096;
- blk_queue_max_discard_sectors(dd->queue,
- MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
- }
-
/* Set the capacity of the device in 512 byte sectors. */
if (!(mtip_hw_get_capacity(dd, &capacity))) {
dev_warn(&dd->pdev->dev,
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index abce25f27f57..91c1cb5b1532 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -193,21 +193,6 @@ struct mtip_work {
mtip_workq_sdbfx(w->port, group, w->completed); \
}
-#define MTIP_TRIM_TIMEOUT_MS 240000
-#define MTIP_MAX_TRIM_ENTRIES 8
-#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
-
-struct mtip_trim_entry {
- __le32 lba; /* starting lba of region */
- __le16 rsvd; /* unused */
- __le16 range; /* # of 512b blocks to trim */
-} __packed;
-
-struct mtip_trim {
- /* Array of regions to trim */
- struct mtip_trim_entry entry[MTIP_MAX_TRIM_ENTRIES];
-} __packed;
-
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
/*
@@ -474,8 +459,6 @@ struct driver_data {
struct dentry *dfs_node;
- bool trim_supp; /* flag indicating trim support */
-
bool sr;
int numa_node; /* NUMA support */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 90ba9f4c03f3..053958a8a2ba 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -44,6 +44,9 @@
#include <linux/nbd-netlink.h>
#include <net/genetlink.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/nbd.h>
+
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static int nbd_total_devices = 0;
@@ -510,6 +513,10 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
if (sent) {
if (sent >= sizeof(request)) {
skip = sent - sizeof(request);
+
+ /* initialize handle for tracing purposes */
+ handle = nbd_cmd_handle(cmd);
+
goto send_pages;
}
iov_iter_advance(&from, sent);
@@ -526,11 +533,14 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
handle = nbd_cmd_handle(cmd);
memcpy(request.handle, &handle, sizeof(handle));
+ trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
+
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
req, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
+ trace_nbd_header_sent(req, handle);
if (result <= 0) {
if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
@@ -603,6 +613,7 @@ send_pages:
bio = next;
}
out:
+ trace_nbd_payload_sent(req, handle);
nsock->pending = NULL;
nsock->sent = 0;
return 0;
@@ -650,6 +661,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
tag, req);
return ERR_PTR(-ENOENT);
}
+ trace_nbd_header_received(req, handle);
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
@@ -703,6 +715,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
}
}
out:
+ trace_nbd_payload_received(req, handle);
mutex_unlock(&cmd->lock);
return ret ? ERR_PTR(ret) : cmd;
}
@@ -1797,8 +1810,10 @@ again:
ret = -EINVAL;
goto out;
}
- ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
- nbd_sock_policy, info->extack);
+ ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
+ attr,
+ nbd_sock_policy,
+ info->extack);
if (ret != 0) {
printk(KERN_ERR "nbd: error processing sock list\n");
ret = -EINVAL;
@@ -1968,8 +1983,10 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
ret = -EINVAL;
goto out;
}
- ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
- nbd_sock_policy, info->extack);
+ ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
+ attr,
+ nbd_sock_policy,
+ info->extack);
if (ret != 0) {
printk(KERN_ERR "nbd: error processing sock list\n");
ret = -EINVAL;
@@ -1999,22 +2016,22 @@ out:
static const struct genl_ops nbd_connect_genl_ops[] = {
{
.cmd = NBD_CMD_CONNECT,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_connect,
},
{
.cmd = NBD_CMD_DISCONNECT,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_disconnect,
},
{
.cmd = NBD_CMD_RECONFIGURE,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_reconfigure,
},
{
.cmd = NBD_CMD_STATUS,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_status,
},
};
@@ -2031,6 +2048,7 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.ops = nbd_connect_genl_ops,
.n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
.maxattr = NBD_ATTR_MAX,
+ .policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
.n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
};
@@ -2050,7 +2068,7 @@ static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
*/
if (refcount_read(&nbd->config_refs))
connected = 1;
- dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
+ dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
if (!dev_opt)
return -EMSGSIZE;
ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
@@ -2098,7 +2116,7 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
+ dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
if (index == -1) {
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
if (ret) {
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 417a9f15c116..d7ac09c092f2 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1748,6 +1748,11 @@ static int __init null_init(void)
return -EINVAL;
}
+ if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
+ pr_err("null_blk: invalid home_node value\n");
+ g_home_node = NUMA_NO_NODE;
+ }
+
if (g_queue_mode == NULL_Q_RQ) {
pr_err("null_blk: legacy IO path no longer available\n");
return -EINVAL;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 377a694dc228..001dbdcbf355 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -314,6 +314,7 @@ static void pcd_init_units(void)
disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
1, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
+ put_disk(disk);
disk->queue = NULL;
continue;
}
@@ -342,6 +343,7 @@ static void pcd_init_units(void)
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}
@@ -750,6 +752,8 @@ static int pcd_detect(void)
printk("%s: No CD-ROM drive found\n", name);
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+ if (!cd->disk)
+ continue;
blk_cleanup_queue(cd->disk->queue);
cd->disk->queue = NULL;
blk_mq_free_tag_set(&cd->tag_set);
@@ -1010,8 +1014,14 @@ static int __init pcd_init(void)
pcd_probe_capabilities();
if (register_blkdev(major, name)) {
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+ if (!cd->disk)
+ continue;
+
+ blk_cleanup_queue(cd->disk->queue);
+ blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
+ }
return -EBUSY;
}
@@ -1032,6 +1042,9 @@ static void __exit pcd_exit(void)
int unit;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+ if (!cd->disk)
+ continue;
+
if (cd->present) {
del_gendisk(cd->disk);
pi_release(cd->pi);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 0ff9b12d0e35..6f9ad3fc716f 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -897,6 +897,7 @@ static void pd_probe_drive(struct pd_unit *disk)
p->fops = &pd_fops;
p->major = major;
p->first_minor = (disk - pd) << PD_BITS;
+ p->events = DISK_EVENT_MEDIA_CHANGE;
disk->gd = p;
p->private_data = disk;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 103b617cdc31..1e9c50a7256c 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -319,6 +319,7 @@ static void __init pf_init_units(void)
disk->first_minor = unit;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
}
@@ -762,6 +763,8 @@ static int pf_detect(void)
printk("%s: No ATAPI disk detected\n", name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+ if (!pf->disk)
+ continue;
blk_cleanup_queue(pf->disk->queue);
pf->disk->queue = NULL;
blk_mq_free_tag_set(&pf->tag_set);
@@ -1029,8 +1032,13 @@ static int __init pf_init(void)
pf_busy = 0;
if (register_blkdev(major, name)) {
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+ if (!pf->disk)
+ continue;
+ blk_cleanup_queue(pf->disk->queue);
+ blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
+ }
return -EBUSY;
}
@@ -1051,6 +1059,9 @@ static void __exit pf_exit(void)
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+ if (!pf->disk)
+ continue;
+
if (pf->present)
del_gendisk(pf->disk);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f5a71023f76c..024060165afa 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2761,7 +2761,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
- disk->async_events = pd->bdev->bd_disk->async_events;
add_disk(disk);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 4e1d9b31f60c..cc61c5ce3ad5 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -102,7 +102,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
- dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+ dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %llu\n",
__func__, __LINE__, i, bio_sectors(iter.bio),
iter.bio->bi_iter.bi_sector);
@@ -496,7 +496,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
dev->regions[dev->region_idx].size*priv->blocking_factor);
dev_info(&dev->sbd.core,
- "%s is a %s (%llu MiB total, %lu MiB for OtherOS)\n",
+ "%s is a %s (%llu MiB total, %llu MiB for OtherOS)\n",
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 0cf4509d575c..898d522e8338 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -439,6 +439,7 @@ static void card_state_change(struct rsxx_cardinfo *card,
* Fall through so the DMA devices can be attached and
* the user can attempt to pull off their data.
*/
+ /* fall through */
case CARD_STATE_GOOD:
st = rsxx_get_card_size8(card, &card->size8);
if (st)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 3fa6fcc34790..67b5ec281c6d 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -862,6 +862,7 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->first_minor = drive;
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
swd->unit[drive].disk->fops = &floppy_fops;
+ swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
add_disk(swd->unit[drive].disk);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 1e2ae90d7715..cf42729c788e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1216,6 +1216,7 @@ static int swim3_attach(struct macio_dev *mdev,
disk->first_minor = floppy_count;
disk->fops = &floppy_fops;
disk->private_data = fs;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4bc083b7c9b5..f1d90cd3dc47 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
+ num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
@@ -691,7 +693,8 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
- return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
+ return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
+ vblk->vdev, 0);
}
#ifdef CONFIG_VIRTIO_BLK_SCSI
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 87ccef4bd69e..464c9092bc8b 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1032,6 +1032,7 @@ static int ace_setup(struct ace_device *ace)
ace->gd->major = ace_major;
ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
ace->gd->fops = &ace_fops;
+ ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
ace->gd->queue = ace->queue;
ace->gd->private_data = ace;
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
@@ -1090,6 +1091,8 @@ static int ace_setup(struct ace_device *ace)
return 0;
err_read:
+ /* prevent double queue cleanup */
+ ace->gd->queue = NULL;
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 399cad7daae7..d58a359a6622 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -774,18 +774,18 @@ struct zram_work {
struct zram *zram;
unsigned long entry;
struct bio *bio;
+ struct bio_vec bvec;
};
#if PAGE_SIZE != 4096
static void zram_sync_read(struct work_struct *work)
{
- struct bio_vec bvec;
struct zram_work *zw = container_of(work, struct zram_work, work);
struct zram *zram = zw->zram;
unsigned long entry = zw->entry;
struct bio *bio = zw->bio;
- read_from_bdev_async(zram, &bvec, entry, bio);
+ read_from_bdev_async(zram, &zw->bvec, entry, bio);
}
/*
@@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
{
struct zram_work work;
+ work.bvec = *bvec;
work.zram = zram;
work.entry = entry;
work.bio = bio;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 7b2e76e7f22f..b9c34ff9a0d3 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -336,7 +336,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8997.
+ Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8987/8997.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -350,7 +350,7 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997
chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
@@ -379,6 +379,17 @@ config BT_WILINK
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module (btwilink).
+config BT_MTKSDIO
+ tristate "MediaTek HCI SDIO driver"
+ depends on MMC
+ help
+ MediaTek Bluetooth HCI SDIO driver.
+ This driver is required if you want to use MediaTek Bluetooth
+ with SDIO interface.
+
+ Say Y here to compile support for MediaTek Bluetooth SDIO devices
+ into the kernel or say M to compile it as module (btmtksdio).
+
config BT_MTKUART
tristate "MediaTek HCI UART driver"
depends on SERIAL_DEV_BUS
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b7e393cfc1e3..34887b9b3a85 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_MTKSDIO) += btmtksdio.o
obj-$(CONFIG_BT_MTKUART) += btmtkuart.o
obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index d5d6e6e5da3b..8e17963ab65a 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -34,9 +34,11 @@
#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
#define BDADDR_BCM20702A1 (&(bdaddr_t) {{0x00, 0x00, 0xa0, 0x02, 0x70, 0x20}})
+#define BDADDR_BCM2076B1 (&(bdaddr_t) {{0x79, 0x56, 0x00, 0xa0, 0x76, 0x20}})
#define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
+#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
int btbcm_check_bdaddr(struct hci_dev *hdev)
{
@@ -69,6 +71,9 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
* The address 20:70:02:A0:00:00 indicates a BCM20702A1 controller
* with no configured address.
*
+ * The address 20:76:A0:00:56:79 indicates a BCM2076B1 controller
+ * with no configured address.
+ *
* The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
* with waiting for configuration state.
*
@@ -80,9 +85,11 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
*/
if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM20702A1) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM2076B1) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
- !bacmp(&bda->bdaddr, BDADDR_BCM43430A0)) {
+ !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
&bda->bdaddr);
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -333,6 +340,7 @@ struct bcm_subver_table {
static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
+ { 0x4204, "BCM2076B1" }, /* 002.002.004 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 047b75ce1deb..0f3a020703ab 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -235,6 +235,29 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8977 = {
.fw_dump_end = 0xf8,
};
+static const struct btmrvl_sdio_card_reg btmrvl_reg_8987 = {
+ .cfg = 0x00,
+ .host_int_mask = 0x08,
+ .host_intstatus = 0x0c,
+ .card_status = 0x5c,
+ .sq_read_base_addr_a0 = 0xf8,
+ .sq_read_base_addr_a1 = 0xf9,
+ .card_revision = 0xc8,
+ .card_fw_status0 = 0xe8,
+ .card_fw_status1 = 0xe9,
+ .card_rx_len = 0xea,
+ .card_rx_unit = 0xeb,
+ .io_port_0 = 0xe4,
+ .io_port_1 = 0xe5,
+ .io_port_2 = 0xe6,
+ .int_read_to_clear = true,
+ .host_int_rsr = 0x04,
+ .card_misc_cfg = 0xd8,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+};
+
static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = {
.cfg = 0x00,
.host_int_mask = 0x08,
@@ -312,6 +335,15 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
.supports_fw_dump = true,
};
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8987_uapsta.bin",
+ .reg = &btmrvl_reg_8987,
+ .support_pscan_win_report = true,
+ .sd_blksz_fw_dl = 256,
+ .supports_fw_dump = true,
+};
+
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
.firmware = "mrvl/sd8997_uapsta.bin",
@@ -343,6 +375,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8977 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9146),
.driver_data = (unsigned long)&btmrvl_sdio_sd8977 },
+ /* Marvell SD8987 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x914A),
+ .driver_data = (unsigned long)&btmrvl_sdio_sd8987 },
/* Marvell SD8997 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142),
.driver_data = (unsigned long)&btmrvl_sdio_sd8997 },
@@ -1797,4 +1832,5 @@ MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
new file mode 100644
index 000000000000..813338288453
--- /dev/null
+++ b/drivers/bluetooth/btmtksdio.c
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 MediaTek Inc.
+
+/*
+ * Bluetooth support for MediaTek SDIO devices
+ *
+ * This file is written based on btsdio.c and btmtkuart.c.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/skbuff.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "h4_recv.h"
+
+#define VERSION "0.1"
+
+#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
+#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+
+#define MTKBTSDIO_AUTOSUSPEND_DELAY 8000
+
+static bool enable_autosuspend;
+
+struct btmtksdio_data {
+ const char *fwname;
+};
+
+static const struct btmtksdio_data mt7663_data = {
+ .fwname = FIRMWARE_MT7663,
+};
+
+static const struct btmtksdio_data mt7668_data = {
+ .fwname = FIRMWARE_MT7668,
+};
+
+static const struct sdio_device_id btmtksdio_table[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7663),
+ .driver_data = (kernel_ulong_t)&mt7663_data },
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7668),
+ .driver_data = (kernel_ulong_t)&mt7668_data },
+ { } /* Terminating entry */
+};
+
+#define MTK_REG_CHLPCR 0x4 /* W1S */
+#define C_INT_EN_SET BIT(0)
+#define C_INT_EN_CLR BIT(1)
+#define C_FW_OWN_REQ_SET BIT(8) /* For write */
+#define C_COM_DRV_OWN BIT(8) /* For read */
+#define C_FW_OWN_REQ_CLR BIT(9)
+
+#define MTK_REG_CSDIOCSR 0x8
+#define SDIO_RE_INIT_EN BIT(0)
+#define SDIO_INT_CTL BIT(2)
+
+#define MTK_REG_CHCR 0xc
+#define C_INT_CLR_CTRL BIT(1)
+
+/* CHISR have the same bits field definition with CHIER */
+#define MTK_REG_CHISR 0x10
+#define MTK_REG_CHIER 0x14
+#define FW_OWN_BACK_INT BIT(0)
+#define RX_DONE_INT BIT(1)
+#define TX_EMPTY BIT(2)
+#define TX_FIFO_OVERFLOW BIT(8)
+#define RX_PKT_LEN GENMASK(31, 16)
+
+#define MTK_REG_CTDR 0x18
+
+#define MTK_REG_CRDR 0x1c
+
+#define MTK_SDIO_BLOCK_SIZE 256
+
+#define BTMTKSDIO_TX_WAIT_VND_EVT 1
+
+enum {
+ MTK_WMT_PATCH_DWNLD = 0x1,
+ MTK_WMT_TEST = 0x2,
+ MTK_WMT_WAKEUP = 0x3,
+ MTK_WMT_HIF = 0x4,
+ MTK_WMT_FUNC_CTRL = 0x6,
+ MTK_WMT_RST = 0x7,
+ MTK_WMT_SEMAPHORE = 0x17,
+};
+
+enum {
+ BTMTK_WMT_INVALID,
+ BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_DONE,
+ BTMTK_WMT_ON_UNDONE,
+ BTMTK_WMT_ON_DONE,
+ BTMTK_WMT_ON_PROGRESS,
+};
+
+struct mtkbtsdio_hdr {
+ __le16 len;
+ __le16 reserved;
+ u8 bt_type;
+} __packed;
+
+struct mtk_wmt_hdr {
+ u8 dir;
+ u8 op;
+ __le16 dlen;
+ u8 flag;
+} __packed;
+
+struct mtk_hci_wmt_cmd {
+ struct mtk_wmt_hdr hdr;
+ u8 data[256];
+} __packed;
+
+struct btmtk_hci_wmt_evt {
+ struct hci_event_hdr hhdr;
+ struct mtk_wmt_hdr whdr;
+} __packed;
+
+struct btmtk_hci_wmt_evt_funcc {
+ struct btmtk_hci_wmt_evt hwhdr;
+ __be16 status;
+} __packed;
+
+struct btmtk_tci_sleep {
+ u8 mode;
+ __le16 duration;
+ __le16 host_duration;
+ u8 host_wakeup_pin;
+ u8 time_compensation;
+} __packed;
+
+struct btmtk_hci_wmt_params {
+ u8 op;
+ u8 flag;
+ u16 dlen;
+ const void *data;
+ u32 *status;
+};
+
+struct btmtksdio_dev {
+ struct hci_dev *hdev;
+ struct sdio_func *func;
+ struct device *dev;
+
+ struct work_struct tx_work;
+ unsigned long tx_state;
+ struct sk_buff_head txq;
+
+ struct sk_buff *evt_skb;
+
+ const struct btmtksdio_data *data;
+};
+
+static int mtk_hci_wmt_sync(struct hci_dev *hdev,
+ struct btmtk_hci_wmt_params *wmt_params)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
+ u32 hlen, status = BTMTK_WMT_INVALID;
+ struct btmtk_hci_wmt_evt *wmt_evt;
+ struct mtk_hci_wmt_cmd wc;
+ struct mtk_wmt_hdr *hdr;
+ int err;
+
+ hlen = sizeof(*hdr) + wmt_params->dlen;
+ if (hlen > 255)
+ return -EINVAL;
+
+ hdr = (struct mtk_wmt_hdr *)&wc;
+ hdr->dir = 1;
+ hdr->op = wmt_params->op;
+ hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
+ hdr->flag = wmt_params->flag;
+ memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+
+ set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ if (err < 0) {
+ clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return err;
+ }
+
+ /* The vendor specific WMT commands are all answered by a vendor
+ * specific event and will not have the Command Status or Command
+ * Complete as with usual HCI command flow control.
+ *
+ * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT
+ * state to be cleared. The driver specific event receive routine
+ * will clear that state and with that indicate completion of the
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT,
+ TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Execution of wmt command interrupted");
+ clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return err;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+ clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return -ETIMEDOUT;
+ }
+
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+ bt_dev_err(hdev, "Wrong op received %d expected %d",
+ wmt_evt->whdr.op, hdr->op);
+ err = -EIO;
+ goto err_free_skb;
+ }
+
+ switch (wmt_evt->whdr.op) {
+ case MTK_WMT_SEMAPHORE:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_UNDONE;
+ else
+ status = BTMTK_WMT_PATCH_DONE;
+ break;
+ case MTK_WMT_FUNC_CTRL:
+ wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
+ if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
+ status = BTMTK_WMT_ON_DONE;
+ else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
+ status = BTMTK_WMT_ON_PROGRESS;
+ else
+ status = BTMTK_WMT_ON_UNDONE;
+ break;
+ }
+
+ if (wmt_params->status)
+ *wmt_params->status = status;
+
+err_free_skb:
+ kfree_skb(bdev->evt_skb);
+ bdev->evt_skb = NULL;
+
+ return err;
+}
+
+static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
+ struct sk_buff *skb)
+{
+ struct mtkbtsdio_hdr *sdio_hdr;
+ int err;
+
+ /* Make sure that there are enough rooms for SDIO header */
+ if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) {
+ err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0,
+ GFP_ATOMIC);
+ if (err < 0)
+ return err;
+ }
+
+ /* Prepend MediaTek SDIO Specific Header */
+ skb_push(skb, sizeof(*sdio_hdr));
+
+ sdio_hdr = (void *)skb->data;
+ sdio_hdr->len = cpu_to_le16(skb->len);
+ sdio_hdr->reserved = cpu_to_le16(0);
+ sdio_hdr->bt_type = hci_skb_pkt_type(skb);
+
+ err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
+ round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
+ if (err < 0)
+ goto err_skb_pull;
+
+ bdev->hdev->stat.byte_tx += skb->len;
+
+ kfree_skb(skb);
+
+ return 0;
+
+err_skb_pull:
+ skb_pull(skb, sizeof(*sdio_hdr));
+
+ return err;
+}
+
+static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
+{
+ return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
+}
+
+static void btmtksdio_tx_work(struct work_struct *work)
+{
+ struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
+ tx_work);
+ struct sk_buff *skb;
+ int err;
+
+ pm_runtime_get_sync(bdev->dev);
+
+ sdio_claim_host(bdev->func);
+
+ while ((skb = skb_dequeue(&bdev->txq))) {
+ err = btmtksdio_tx_packet(bdev, skb);
+ if (err < 0) {
+ bdev->hdev->stat.err_tx++;
+ skb_queue_head(&bdev->txq, skb);
+ break;
+ }
+ }
+
+ sdio_release_host(bdev->func);
+
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+}
+
+static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ int err;
+
+ /* Fix up the vendor event id with 0xff for vendor specific instead
+ * of 0xe4 so that event send via monitoring socket can be parsed
+ * properly.
+ */
+ if (hdr->evt == 0xe4)
+ hdr->evt = HCI_EV_VENDOR;
+
+ /* When someone waits for the WMT event, the skb is being cloned
+ * and being processed the events from there then.
+ */
+ if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) {
+ bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
+ if (!bdev->evt_skb) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ err = hci_recv_frame(hdev, skb);
+ if (err < 0)
+ goto err_free_skb;
+
+ if (hdr->evt == HCI_EV_VENDOR) {
+ if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT,
+ &bdev->tx_state)) {
+ /* Barrier to sync with other CPUs */
+ smp_mb__after_atomic();
+ wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT);
+ }
+ }
+
+ return 0;
+
+err_free_skb:
+ kfree_skb(bdev->evt_skb);
+ bdev->evt_skb = NULL;
+
+err_out:
+ return err;
+}
+
+static const struct h4_recv_pkt mtk_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = btmtksdio_recv_event },
+};
+
+static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
+{
+ const struct h4_recv_pkt *pkts = mtk_recv_pkts;
+ int pkts_count = ARRAY_SIZE(mtk_recv_pkts);
+ struct mtkbtsdio_hdr *sdio_hdr;
+ int err, i, pad_size;
+ struct sk_buff *skb;
+ u16 dlen;
+
+ if (rx_size < sizeof(*sdio_hdr))
+ return -EILSEQ;
+
+ /* A SDIO packet is exactly containing a Bluetooth packet */
+ skb = bt_skb_alloc(rx_size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, rx_size);
+
+ err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size);
+ if (err < 0)
+ goto err_kfree_skb;
+
+ sdio_hdr = (void *)skb->data;
+
+ /* We assume the default error as -EILSEQ simply to make the error path
+ * be cleaner.
+ */
+ err = -EILSEQ;
+
+ if (rx_size != le16_to_cpu(sdio_hdr->len)) {
+ bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched ");
+ goto err_kfree_skb;
+ }
+
+ hci_skb_pkt_type(skb) = sdio_hdr->bt_type;
+
+ /* Remove MediaTek SDIO header */
+ skb_pull(skb, sizeof(*sdio_hdr));
+
+ /* We have to dig into the packet to get payload size and then know how
+ * many padding bytes at the tail, these padding bytes should be removed
+ * before the packet is indicated to the core layer.
+ */
+ for (i = 0; i < pkts_count; i++) {
+ if (sdio_hdr->bt_type == (&pkts[i])->type)
+ break;
+ }
+
+ if (i >= pkts_count) {
+ bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x",
+ sdio_hdr->bt_type);
+ goto err_kfree_skb;
+ }
+
+ /* Remaining bytes cannot hold a header*/
+ if (skb->len < (&pkts[i])->hlen) {
+ bt_dev_err(bdev->hdev, "The size of bt header is mismatched");
+ goto err_kfree_skb;
+ }
+
+ switch ((&pkts[i])->lsize) {
+ case 1:
+ dlen = skb->data[(&pkts[i])->loff];
+ break;
+ case 2:
+ dlen = get_unaligned_le16(skb->data +
+ (&pkts[i])->loff);
+ break;
+ default:
+ goto err_kfree_skb;
+ }
+
+ pad_size = skb->len - (&pkts[i])->hlen - dlen;
+
+ /* Remaining bytes cannot hold a payload */
+ if (pad_size < 0) {
+ bt_dev_err(bdev->hdev, "The size of bt payload is mismatched");
+ goto err_kfree_skb;
+ }
+
+ /* Remove padding bytes */
+ skb_trim(skb, skb->len - pad_size);
+
+ /* Complete frame */
+ (&pkts[i])->recv(bdev->hdev, skb);
+
+ bdev->hdev->stat.byte_rx += rx_size;
+
+ return 0;
+
+err_kfree_skb:
+ kfree_skb(skb);
+
+ return err;
+}
+
+static void btmtksdio_interrupt(struct sdio_func *func)
+{
+ struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
+ u32 int_status;
+ u16 rx_size;
+
+ /* It is required that the host gets ownership from the device before
+ * accessing any register, however, if SDIO host is not being released,
+ * a potential deadlock probably happens in a circular wait between SDIO
+ * IRQ work and PM runtime work. So, we have to explicitly release SDIO
+ * host here and claim again after the PM runtime work is all done.
+ */
+ sdio_release_host(bdev->func);
+
+ pm_runtime_get_sync(bdev->dev);
+
+ sdio_claim_host(bdev->func);
+
+ /* Disable interrupt */
+ sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
+
+ int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
+
+ /* Ack an interrupt as soon as possible before any operation on
+ * hardware.
+ *
+ * Note that we don't ack any status during operations to avoid race
+ * condition between the host and the device such as it's possible to
+ * mistakenly ack RX_DONE for the next packet and then cause interrupts
+ * not be raised again but there is still pending data in the hardware
+ * FIFO.
+ */
+ sdio_writel(func, int_status, MTK_REG_CHISR, NULL);
+
+ if (unlikely(!int_status))
+ bt_dev_err(bdev->hdev, "CHISR is 0");
+
+ if (int_status & FW_OWN_BACK_INT)
+ bt_dev_dbg(bdev->hdev, "Get fw own back");
+
+ if (int_status & TX_EMPTY)
+ schedule_work(&bdev->tx_work);
+ else if (unlikely(int_status & TX_FIFO_OVERFLOW))
+ bt_dev_warn(bdev->hdev, "Tx fifo overflow");
+
+ if (int_status & RX_DONE_INT) {
+ rx_size = (int_status & RX_PKT_LEN) >> 16;
+
+ if (btmtksdio_rx_packet(bdev, rx_size) < 0)
+ bdev->hdev->stat.err_rx++;
+ }
+
+ /* Enable interrupt */
+ sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
+
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+}
+
+static int btmtksdio_open(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ int err;
+ u32 status;
+
+ sdio_claim_host(bdev->func);
+
+ err = sdio_enable_func(bdev->func);
+ if (err < 0)
+ goto err_release_host;
+
+ /* Get ownership from the device */
+ sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto err_disable_func;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ status & C_COM_DRV_OWN, 2000, 1000000);
+ if (err < 0) {
+ bt_dev_err(bdev->hdev, "Cannot get ownership from device");
+ goto err_disable_func;
+ }
+
+ /* Disable interrupt & mask out all interrupt sources */
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto err_disable_func;
+
+ sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err);
+ if (err < 0)
+ goto err_disable_func;
+
+ err = sdio_claim_irq(bdev->func, btmtksdio_interrupt);
+ if (err < 0)
+ goto err_disable_func;
+
+ err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* SDIO CMD 5 allows the SDIO device back to idle state an
+ * synchronous interrupt is supported in SDIO 4-bit mode
+ */
+ sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN,
+ MTK_REG_CSDIOCSR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Setup write-1-clear for CHISR register */
+ sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Setup interrupt sources */
+ sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW,
+ MTK_REG_CHIER, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Enable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ sdio_release_host(bdev->func);
+
+ return 0;
+
+err_release_irq:
+ sdio_release_irq(bdev->func);
+
+err_disable_func:
+ sdio_disable_func(bdev->func);
+
+err_release_host:
+ sdio_release_host(bdev->func);
+
+ return err;
+}
+
+static int btmtksdio_close(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ u32 status;
+ int err;
+
+ sdio_claim_host(bdev->func);
+
+ /* Disable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
+
+ sdio_release_irq(bdev->func);
+
+ /* Return ownership to the device */
+ sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ !(status & C_COM_DRV_OWN), 2000, 1000000);
+ if (err < 0)
+ bt_dev_err(bdev->hdev, "Cannot return ownership to device");
+
+ sdio_disable_func(bdev->func);
+
+ sdio_release_host(bdev->func);
+
+ return 0;
+}
+
+static int btmtksdio_flush(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+
+ skb_queue_purge(&bdev->txq);
+
+ cancel_work_sync(&bdev->tx_work);
+
+ return 0;
+}
+
+static int btmtksdio_func_query(struct hci_dev *hdev)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ int status, err;
+ u8 param = 0;
+
+ /* Query whether the function is enabled */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 4;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query function status (%d)", err);
+ return err;
+ }
+
+ return status;
+}
+
+static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+ u8 flag;
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ /* The size of patch header is 30 bytes, should be skip */
+ if (fw_size < 30) {
+ err = -EINVAL;
+ goto free_fw;
+ }
+
+ fw_size -= 30;
+ fw_ptr += 30;
+ flag = 1;
+
+ wmt_params.op = MTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (fw_size > 0) {
+ dlen = min_t(int, 250, fw_size);
+
+ /* Tell device the position in sequence */
+ if (fw_size - dlen <= 0)
+ flag = 3;
+ else if (fw_size < fw->size - 30)
+ flag = 2;
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto free_fw;
+ }
+
+ fw_size -= dlen;
+ fw_ptr += dlen;
+ }
+
+ wmt_params.op = MTK_WMT_RST;
+ wmt_params.flag = 4;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = NULL;
+
+ /* Activate funciton the firmware providing to */
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ goto free_fw;
+ }
+
+ /* Wait a few moments for firmware activation done */
+ usleep_range(10000, 12000);
+
+free_fw:
+ release_firmware(fw);
+ return err;
+}
+
+static int btmtksdio_setup(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ ktime_t calltime, delta, rettime;
+ struct btmtk_tci_sleep tci_sleep;
+ unsigned long long duration;
+ struct sk_buff *skb;
+ int err, status;
+ u8 param = 0x1;
+
+ calltime = ktime_get();
+
+ /* Query whether the firmware is already download */
+ wmt_params.op = MTK_WMT_SEMAPHORE;
+ wmt_params.flag = 1;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
+ return err;
+ }
+
+ if (status == BTMTK_WMT_PATCH_DONE) {
+ bt_dev_info(hdev, "Firmware already downloaded");
+ goto ignore_setup_fw;
+ }
+
+ /* Setup a firmware which the device definitely requires */
+ err = mtk_setup_firmware(hdev, bdev->data->fwname);
+ if (err < 0)
+ return err;
+
+ignore_setup_fw:
+ /* Query whether the device is already enabled */
+ err = readx_poll_timeout(btmtksdio_func_query, hdev, status,
+ status < 0 || status != BTMTK_WMT_ON_PROGRESS,
+ 2000, 5000000);
+ /* -ETIMEDOUT happens */
+ if (err < 0)
+ return err;
+
+ /* The other errors happen in btusb_mtk_func_query */
+ if (status < 0)
+ return status;
+
+ if (status == BTMTK_WMT_ON_DONE) {
+ bt_dev_info(hdev, "function already on");
+ goto ignore_func_on;
+ }
+
+ /* Enable Bluetooth protocol */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ignore_func_on:
+ /* Apply the low power environment setup */
+ tci_sleep.mode = 0x5;
+ tci_sleep.duration = cpu_to_le16(0x640);
+ tci_sleep.host_duration = cpu_to_le16(0x640);
+ tci_sleep.host_wakeup_pin = 0;
+ tci_sleep.time_compensation = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ pm_runtime_set_autosuspend_delay(bdev->dev,
+ MTKBTSDIO_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(bdev->dev);
+
+ err = pm_runtime_set_active(bdev->dev);
+ if (err < 0)
+ return err;
+
+ /* Default forbid runtime auto suspend, that can be allowed by
+ * enable_autosuspend flag or the PM runtime entry under sysfs.
+ */
+ pm_runtime_forbid(bdev->dev);
+ pm_runtime_enable(bdev->dev);
+
+ if (enable_autosuspend)
+ pm_runtime_allow(bdev->dev);
+
+ bt_dev_info(hdev, "Device setup in %llu usecs", duration);
+
+ return 0;
+}
+
+static int btmtksdio_shutdown(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ u8 param = 0x0;
+ int err;
+
+ /* Get back the state to be consistent with the state
+ * in btmtksdio_setup.
+ */
+ pm_runtime_get_sync(bdev->dev);
+
+ /* Disable the device */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ pm_runtime_put_noidle(bdev->dev);
+ pm_runtime_disable(bdev->dev);
+
+ return 0;
+}
+
+static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+
+ default:
+ return -EILSEQ;
+ }
+
+ skb_queue_tail(&bdev->txq, skb);
+
+ schedule_work(&bdev->tx_work);
+
+ return 0;
+}
+
+static int btmtksdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct btmtksdio_dev *bdev;
+ struct hci_dev *hdev;
+ int err;
+
+ bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ bdev->data = (void *)id->driver_data;
+ if (!bdev->data)
+ return -ENODEV;
+
+ bdev->dev = &func->dev;
+ bdev->func = func;
+
+ INIT_WORK(&bdev->tx_work, btmtksdio_tx_work);
+ skb_queue_head_init(&bdev->txq);
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ dev_err(&func->dev, "Can't allocate HCI device\n");
+ return -ENOMEM;
+ }
+
+ bdev->hdev = hdev;
+
+ hdev->bus = HCI_SDIO;
+ hci_set_drvdata(hdev, bdev);
+
+ hdev->open = btmtksdio_open;
+ hdev->close = btmtksdio_close;
+ hdev->flush = btmtksdio_flush;
+ hdev->setup = btmtksdio_setup;
+ hdev->shutdown = btmtksdio_shutdown;
+ hdev->send = btmtksdio_send_frame;
+ SET_HCIDEV_DEV(hdev, &func->dev);
+
+ hdev->manufacturer = 70;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+
+ err = hci_register_dev(hdev);
+ if (err < 0) {
+ dev_err(&func->dev, "Can't register HCI device\n");
+ hci_free_dev(hdev);
+ return err;
+ }
+
+ sdio_set_drvdata(func, bdev);
+
+ /* pm_runtime_enable would be done after the firmware is being
+ * downloaded because the core layer probably already enables
+ * runtime PM for this func such as the case host->caps &
+ * MMC_CAP_POWER_OFF_CARD.
+ */
+ if (pm_runtime_enabled(bdev->dev))
+ pm_runtime_disable(bdev->dev);
+
+ /* As explaination in drivers/mmc/core/sdio_bus.c tells us:
+ * Unbound SDIO functions are always suspended.
+ * During probe, the function is set active and the usage count
+ * is incremented. If the driver supports runtime PM,
+ * it should call pm_runtime_put_noidle() in its probe routine and
+ * pm_runtime_get_noresume() in its remove routine.
+ *
+ * So, put a pm_runtime_put_noidle here !
+ */
+ pm_runtime_put_noidle(bdev->dev);
+
+ return 0;
+}
+
+static void btmtksdio_remove(struct sdio_func *func)
+{
+ struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
+ struct hci_dev *hdev;
+
+ if (!bdev)
+ return;
+
+ /* Be consistent the state in btmtksdio_probe */
+ pm_runtime_get_noresume(bdev->dev);
+
+ hdev = bdev->hdev;
+
+ sdio_set_drvdata(func, NULL);
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+}
+
+#ifdef CONFIG_PM
+static int btmtksdio_runtime_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmtksdio_dev *bdev;
+ u32 status;
+ int err;
+
+ bdev = sdio_get_drvdata(func);
+ if (!bdev)
+ return 0;
+
+ sdio_claim_host(bdev->func);
+
+ sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto out;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ !(status & C_COM_DRV_OWN), 2000, 1000000);
+out:
+ bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err);
+
+ sdio_release_host(bdev->func);
+
+ return err;
+}
+
+static int btmtksdio_runtime_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmtksdio_dev *bdev;
+ u32 status;
+ int err;
+
+ bdev = sdio_get_drvdata(func);
+ if (!bdev)
+ return 0;
+
+ sdio_claim_host(bdev->func);
+
+ sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto out;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ status & C_COM_DRV_OWN, 2000, 1000000);
+out:
+ bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err);
+
+ sdio_release_host(bdev->func);
+
+ return err;
+}
+
+static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend,
+ btmtksdio_runtime_resume, NULL);
+#define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops)
+#else /* CONFIG_PM */
+#define BTMTKSDIO_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct sdio_driver btmtksdio_driver = {
+ .name = "btmtksdio",
+ .probe = btmtksdio_probe,
+ .remove = btmtksdio_remove,
+ .id_table = btmtksdio_table,
+ .drv = {
+ .owner = THIS_MODULE,
+ .pm = BTMTKSDIO_PM_OPS,
+ }
+};
+
+module_sdio_driver(btmtksdio_driver);
+
+module_param(enable_autosuspend, bool, 0644);
+MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default");
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7663);
+MODULE_FIRMWARE(FIRMWARE_MT7668);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index b0b680dd69f4..f5dbeec8e274 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -661,7 +661,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
{
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_params wmt_params;
- u32 baudrate;
+ __le32 baudrate;
u8 param;
int err;
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 612268574fc7..cc12eecd9e4d 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -336,7 +336,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
{
struct rome_config config;
int err;
- u8 rom_ver;
+ u8 rom_ver = 0;
bt_dev_dbg(hdev, "QCA setup on UART");
@@ -344,7 +344,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- if (soc_type == QCA_WCN3990) {
+ if (qca_is_wcn399x(soc_type)) {
/* Firmware files to download are based on ROM version.
* ROM version is derived from last two bytes of soc_ver.
*/
@@ -365,7 +365,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
- if (soc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02x.bin", rom_ver);
else
@@ -410,6 +410,7 @@ int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr);
+
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index c72c56ea7480..4c4fe2b5b7b7 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -41,7 +41,7 @@
#define QCA_WCN3990_POWERON_PULSE 0xFC
#define QCA_WCN3990_POWEROFF_PULSE 0xC0
-enum qca_bardrate {
+enum qca_baudrate {
QCA_BAUDRATE_115200 = 0,
QCA_BAUDRATE_57600,
QCA_BAUDRATE_38400,
@@ -132,7 +132,8 @@ enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
- QCA_WCN3990
+ QCA_WCN3990,
+ QCA_WCN3998,
};
#if IS_ENABLED(CONFIG_BT_QCA)
@@ -142,6 +143,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver);
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+{
+ return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
+}
#else
static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
@@ -165,4 +170,8 @@ static inline int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return -EOPNOTSUPP;
}
+static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+{
+ return false;
+}
#endif
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 282d1af1d3ba..4cfa9abe03c8 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -376,20 +376,7 @@ static struct sdio_driver btsdio_driver = {
.id_table = btsdio_table,
};
-static int __init btsdio_init(void)
-{
- BT_INFO("Generic Bluetooth SDIO driver ver %s", VERSION);
-
- return sdio_register_driver(&btsdio_driver);
-}
-
-static void __exit btsdio_exit(void)
-{
- sdio_unregister_driver(&btsdio_driver);
-}
-
-module_init(btsdio_init);
-module_exit(btsdio_exit);
+module_sdio_driver(btsdio_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index ded198328f21..7db48ae65cd2 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
return 0;
}
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
0, "OOB Wake-on-BT", data);
if (ret) {
@@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
}
data->oob_wake_irq = irq;
- disable_irq(irq);
bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
return 0;
}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index ddbe518c3e5b..b5d31d583d60 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -228,9 +228,15 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
int err;
if (powered && !dev->res_enabled) {
- err = regulator_bulk_enable(BCM_NUM_SUPPLIES, dev->supplies);
- if (err)
- return err;
+ /* Intel Macs use bcm_apple_get_resources() and don't
+ * have regulator supplies configured.
+ */
+ if (dev->supplies[0].supply) {
+ err = regulator_bulk_enable(BCM_NUM_SUPPLIES,
+ dev->supplies);
+ if (err)
+ return err;
+ }
/* LPO clock needs to be 32.768 kHz */
err = clk_set_rate(dev->lpo_clk, 32768);
@@ -259,7 +265,13 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
if (!powered && dev->res_enabled) {
clk_disable_unprepare(dev->txco_clk);
clk_disable_unprepare(dev->lpo_clk);
- regulator_bulk_disable(BCM_NUM_SUPPLIES, dev->supplies);
+
+ /* Intel Macs use bcm_apple_get_resources() and don't
+ * have regulator supplies configured.
+ */
+ if (dev->supplies[0].supply)
+ regulator_bulk_disable(BCM_NUM_SUPPLIES,
+ dev->supplies);
}
/* wait for device to power on and come out of reset */
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 069d1c8fde73..3f02ae560120 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -536,7 +536,7 @@ static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
skb_put_data(h5->rx_skb, byte, 1);
h5->rx_pending--;
- BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
+ BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
}
static void h5_reset_rx(struct h5 *h5)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 237aea34b69f..57322c42bb2d 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -54,9 +54,6 @@
#define HCI_IBS_WAKE_ACK 0xFC
#define HCI_MAX_IBS_SIZE 10
-/* Controller states */
-#define STATE_IN_BAND_SLEEP_ENABLED 1
-
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
#define IBS_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
@@ -67,6 +64,10 @@
/* Controller debug log header */
#define QCA_DEBUG_HANDLE 0x2EDC
+enum qca_flags {
+ QCA_IBS_ENABLED,
+};
+
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
HCI_IBS_TX_ASLEEP,
@@ -174,6 +175,21 @@ static int qca_power_setup(struct hci_uart *hu, bool on);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
+static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
+{
+ enum qca_btsoc_type soc_type;
+
+ if (hu->serdev) {
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
+
+ soc_type = qsd->btsoc_type;
+ } else {
+ soc_type = QCA_ROME;
+ }
+
+ return soc_type;
+}
+
static void __serial_clock_on(struct tty_struct *tty)
{
/* TODO: Some chipset requires to enable UART clock on client
@@ -506,8 +522,10 @@ static int qca_open(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type != QCA_WCN3990) {
+ if (!qca_is_wcn399x(qcadev->btsoc_type)) {
gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ /* Controller needs time to bootup. */
+ msleep(150);
} else {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
@@ -612,7 +630,7 @@ static int qca_close(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(qcadev->btsoc_type))
qca_power_shutdown(hu);
else
gpiod_set_value_cansleep(qcadev->bt_en, 0);
@@ -775,7 +793,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
/* Don't go to sleep in middle of patch download or
* Out-Of-Band(GPIOs control) sleep is selected.
*/
- if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) {
skb_queue_tail(&qca->txq, skb);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
return 0;
@@ -963,7 +981,6 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
- struct qca_serdev *qcadev;
struct sk_buff *skb;
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
@@ -985,18 +1002,17 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
skb_queue_tail(&qca->txq, skb);
hci_uart_tx_wakeup(hu);
- qcadev = serdev_device_get_drvdata(hu->serdev);
-
/* Wait for the baudrate change request to be sent */
while (!skb_queue_empty(&qca->txq))
usleep_range(100, 200);
- serdev_device_wait_until_sent(hu->serdev,
+ if (hu->serdev)
+ serdev_device_wait_until_sent(hu->serdev,
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
/* Give the controller time to process the request */
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(qca_soc_type(hu)))
msleep(10);
else
msleep(300);
@@ -1072,10 +1088,7 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
- struct qca_serdev *qcadev;
-
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990) {
+ if (qca_is_wcn399x(qca_soc_type(hu))) {
if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
!qca_get_speed(hu, QCA_OPER_SPEED))
return -EINVAL;
@@ -1091,7 +1104,6 @@ static int qca_check_speeds(struct hci_uart *hu)
static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
{
unsigned int speed, qca_baudrate;
- struct qca_serdev *qcadev;
int ret = 0;
if (speed_type == QCA_INIT_SPEED) {
@@ -1099,6 +1111,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
if (speed)
host_set_baudrate(hu, speed);
} else {
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
+
speed = qca_get_speed(hu, QCA_OPER_SPEED);
if (!speed)
return 0;
@@ -1106,8 +1120,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
/* Disable flow control for wcn3990 to deassert RTS while
* changing the baudrate of chip and host.
*/
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
hci_uart_set_flow_control(hu, true);
qca_baudrate = qca_get_baudrate_value(speed);
@@ -1119,7 +1132,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
host_set_baudrate(hu, speed);
error:
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
hci_uart_set_flow_control(hu, false);
}
@@ -1181,20 +1194,18 @@ static int qca_setup(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
- struct qca_serdev *qcadev;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
int ret;
int soc_ver = 0;
- qcadev = serdev_device_get_drvdata(hu->serdev);
-
ret = qca_check_speeds(hu);
if (ret)
return ret;
/* Patch downloading has to be done without IBS mode */
- clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
- if (qcadev->btsoc_type == QCA_WCN3990) {
+ if (qca_is_wcn399x(soc_type)) {
bt_dev_info(hdev, "setting up wcn3990");
/* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
@@ -1225,7 +1236,7 @@ static int qca_setup(struct hci_uart *hu)
qca_baudrate = qca_get_baudrate_value(speed);
}
- if (qcadev->btsoc_type != QCA_WCN3990) {
+ if (!qca_is_wcn399x(soc_type)) {
/* Get QCA version information */
ret = qca_read_soc_version(hdev, &soc_ver);
if (ret)
@@ -1234,9 +1245,9 @@ static int qca_setup(struct hci_uart *hu)
bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
/* Setup patch / NVM configurations */
- ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver);
+ ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver);
if (!ret) {
- set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ set_bit(QCA_IBS_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
@@ -1250,7 +1261,7 @@ static int qca_setup(struct hci_uart *hu)
}
/* Setup bdaddr */
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
hu->hdev->set_bdaddr = qca_set_bdaddr;
else
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
@@ -1273,7 +1284,7 @@ static struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
-static const struct qca_vreg_data qca_soc_data = {
+static const struct qca_vreg_data qca_soc_data_wcn3990 = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
{ "vddio", 1800000, 1900000, 15000 },
@@ -1284,6 +1295,17 @@ static const struct qca_vreg_data qca_soc_data = {
.num_vregs = 4,
};
+static const struct qca_vreg_data qca_soc_data_wcn3998 = {
+ .soc_type = QCA_WCN3998,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 1800000, 1900000, 10000 },
+ { "vddxo", 1800000, 1900000, 80000 },
+ { "vddrf", 1300000, 1352000, 300000 },
+ { "vddch0", 3300000, 3300000, 450000 },
+ },
+ .num_vregs = 4,
+};
+
static void qca_power_shutdown(struct hci_uart *hu)
{
struct qca_data *qca = hu->priv;
@@ -1294,7 +1316,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
* data in skb's.
*/
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
- clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
qca_flush(hu);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
@@ -1417,8 +1439,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->serdev_hu.serdev = serdev;
data = of_device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
- if (data && data->soc_type == QCA_WCN3990) {
- qcadev->btsoc_type = QCA_WCN3990;
+ if (data && qca_is_wcn399x(data->soc_type)) {
+ qcadev->btsoc_type = data->soc_type;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
GFP_KERNEL);
@@ -1482,7 +1504,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
{
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(qcadev->btsoc_type))
qca_power_shutdown(&qcadev->serdev_hu);
else
clk_disable_unprepare(qcadev->susclk);
@@ -1492,7 +1514,8 @@ static void qca_serdev_remove(struct serdev_device *serdev)
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
- { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data},
+ { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
+ { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index d299ec79e4c3..308475ed4b32 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -47,7 +47,10 @@ enum sysc_clocks {
SYSC_MAX_CLOCKS,
};
-static const char * const clock_names[SYSC_ICK + 1] = { "fck", "ick", };
+static const char * const clock_names[SYSC_MAX_CLOCKS] = {
+ "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
+ "opt5", "opt6", "opt7",
+};
#define SYSC_IDLEMODE_MASK 3
#define SYSC_CLOCKACTIVITY_MASK 3
@@ -75,6 +78,7 @@ struct sysc {
u32 module_size;
void __iomem *module_va;
int offsets[SYSC_MAX_REGS];
+ struct ti_sysc_module_data *mdata;
struct clk **clocks;
const char **clock_roles;
int nr_clocks;
@@ -94,7 +98,7 @@ struct sysc {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
-void sysc_write(struct sysc *ddata, int offset, u32 value)
+static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
writel_relaxed(value, ddata->module_va + offset);
}
@@ -128,6 +132,81 @@ static u32 sysc_read_revision(struct sysc *ddata)
return sysc_read(ddata, offset);
}
+static int sysc_add_named_clock_from_child(struct sysc *ddata,
+ const char *name,
+ const char *optfck_name)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct device_node *child;
+ struct clk_lookup *cl;
+ struct clk *clock;
+ const char *n;
+
+ if (name)
+ n = name;
+ else
+ n = optfck_name;
+
+ /* Does the clock alias already exist? */
+ clock = of_clk_get_by_name(np, n);
+ if (!IS_ERR(clock)) {
+ clk_put(clock);
+
+ return 0;
+ }
+
+ child = of_get_next_available_child(np, NULL);
+ if (!child)
+ return -ENODEV;
+
+ clock = devm_get_clk_from_child(ddata->dev, child, name);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ /*
+ * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
+ * limit for clk_get(). If cl ever needs to be freed, it should be done
+ * with clkdev_drop().
+ */
+ cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->con_id = n;
+ cl->dev_id = dev_name(ddata->dev);
+ cl->clk = clock;
+ clkdev_add(cl);
+
+ clk_put(clock);
+
+ return 0;
+}
+
+static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
+{
+ const char *optfck_name;
+ int error, index;
+
+ if (ddata->nr_clocks < SYSC_OPTFCK0)
+ index = SYSC_OPTFCK0;
+ else
+ index = ddata->nr_clocks;
+
+ if (name)
+ optfck_name = name;
+ else
+ optfck_name = clock_names[index];
+
+ error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
+ if (error)
+ return error;
+
+ ddata->clock_roles[index] = optfck_name;
+ ddata->nr_clocks++;
+
+ return 0;
+}
+
static int sysc_get_one_clock(struct sysc *ddata, const char *name)
{
int error, i, index = -ENODEV;
@@ -199,6 +278,12 @@ static int sysc_get_clocks(struct sysc *ddata)
if (ddata->nr_clocks < 1)
return 0;
+ if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
+ error = sysc_init_ext_opt_clock(ddata, NULL);
+ if (error)
+ return error;
+ }
+
if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
dev_err(ddata->dev, "too many clocks for %pOF\n", np);
@@ -231,39 +316,125 @@ static int sysc_get_clocks(struct sysc *ddata)
return 0;
}
+static int sysc_enable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+}
+
+static int sysc_enable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return 0;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ clk_disable(clock);
+ }
+}
+
/**
- * sysc_init_resets - reset module on init
+ * sysc_init_resets - init rstctrl reset line if configured
* @ddata: device driver data
*
- * A module can have both OCP softreset control and external rstctrl.
- * If more complicated rstctrl resets are needed, please handle these
- * directly from the child device driver and map only the module reset
- * for the parent interconnect target module device.
- *
- * Automatic reset of the module on init can be skipped with the
- * "ti,no-reset-on-init" device tree property.
+ * See sysc_rstctrl_reset_deassert().
*/
static int sysc_init_resets(struct sysc *ddata)
{
- int error;
-
ddata->rsts =
devm_reset_control_array_get_optional_exclusive(ddata->dev);
if (IS_ERR(ddata->rsts))
return PTR_ERR(ddata->rsts);
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
- goto deassert;
-
- error = reset_control_assert(ddata->rsts);
- if (error)
- return error;
-
-deassert:
- error = reset_control_deassert(ddata->rsts);
- if (error)
- return error;
-
return 0;
}
@@ -622,91 +793,239 @@ static void sysc_show_registers(struct sysc *ddata)
buf);
}
-static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
+
+static int sysc_enable_module(struct device *dev)
{
- struct ti_sysc_platform_data *pdata;
struct sysc *ddata;
- int error = 0, i;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
- if (!ddata->enabled)
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ goto set_midle;
+
+ best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return -EINVAL;
+ }
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+
+set_midle:
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
return 0;
- if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ best_mode = fls(ddata->cfg.midlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return -EINVAL;
+ }
- if (!pdata->idle_module)
- return -ENODEV;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- error = pdata->idle_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not idle: %i\n",
- __func__, error);
+ return 0;
+}
+
+static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
+{
+ if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
+ *best_mode = SYSC_IDLE_SMART_WKUP;
+ else if (idlemodes & BIT(SYSC_IDLE_SMART))
+ *best_mode = SYSC_IDLE_SMART;
+ else if (idlemodes & SYSC_IDLE_FORCE)
+ *best_mode = SYSC_IDLE_FORCE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sysc_disable_module(struct device *dev)
+{
+ struct sysc *ddata;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
+ int ret;
- goto idled;
+ ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
+
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
+ goto set_sidle;
+
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return ret;
}
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+set_sidle:
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ return 0;
- clk_disable(ddata->clocks[i]);
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return ret;
}
-idled:
- ddata->enabled = false;
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- return error;
+ return 0;
}
-static int __maybe_unused sysc_runtime_resume(struct device *dev)
+static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
+ struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->idle_module)
+ return -ENODEV;
+
+ error = pdata->idle_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not idle: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
+ struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->enable_module)
+ return -ENODEV;
+
+ error = pdata->enable_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not enable: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+{
struct sysc *ddata;
- int error = 0, i;
+ int error = 0;
ddata = dev_get_drvdata(dev);
- if (ddata->enabled)
+ if (!ddata->enabled)
return 0;
if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ error = sysc_runtime_suspend_legacy(dev, ddata);
+ if (error)
+ return error;
+ } else {
+ error = sysc_disable_module(dev);
+ if (error)
+ return error;
+ }
- if (!pdata->enable_module)
- return -ENODEV;
+ sysc_disable_main_clocks(ddata);
- error = pdata->enable_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not enable: %i\n",
- __func__, error);
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
- goto awake;
- }
+ ddata->enabled = false;
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ return error;
+}
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+static int __maybe_unused sysc_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
- error = clk_enable(ddata->clocks[i]);
+ if (ddata->enabled)
+ return 0;
+
+ if (sysc_opt_clks_needed(ddata)) {
+ error = sysc_enable_opt_clocks(ddata);
if (error)
return error;
}
-awake:
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
+
+ if (ddata->legacy_mode) {
+ error = sysc_runtime_resume_legacy(dev, ddata);
+ if (error)
+ goto err_main_clocks;
+ } else {
+ error = sysc_enable_module(dev);
+ if (error)
+ goto err_main_clocks;
+ }
+
ddata->enabled = true;
+ return 0;
+
+err_main_clocks:
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
+
return error;
}
@@ -788,12 +1107,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+
+ /* Quirks that need to be set based on the module address */
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff,
+ SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
+ SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
@@ -805,6 +1129,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
0xffff00f0, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0x00001401, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
@@ -853,6 +1178,42 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
#endif
};
+/*
+ * Early quirks based on module base and register offsets only that are
+ * needed before the module revision can be read
+ */
+static void sysc_init_early_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (!q->base)
+ continue;
+
+ if (q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset >= 0 &&
+ q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset >= 0 &&
+ q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset >= 0 &&
+ q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+}
+
+/* Quirks that also consider the revision register value */
static void sysc_init_revision_quirks(struct sysc *ddata)
{
const struct sysc_revision_quirk *q;
@@ -885,6 +1246,55 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+/*
+ * Note that pdata->init_module() typically does a reset first. After
+ * pdata->init_module() is done, PM runtime can be used for the interconnect
+ * target module.
+ */
+static int sysc_legacy_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ int error;
+
+ if (!ddata->legacy_mode || !pdata || !pdata->init_module)
+ return 0;
+
+ error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
+ if (error == -EEXIST)
+ error = 0;
+
+ return error;
+}
+
+/**
+ * sysc_rstctrl_reset_deassert - deassert rstctrl reset
+ * @ddata: device driver data
+ * @reset: reset before deassert
+ *
+ * A module can have both OCP softreset control and external rstctrl.
+ * If more complicated rstctrl resets are needed, please handle these
+ * directly from the child device driver and map only the module reset
+ * for the parent interconnect target module device.
+ *
+ * Automatic reset of the module on init can be skipped with the
+ * "ti,no-reset-on-init" device tree property.
+ */
+static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
+{
+ int error;
+
+ if (!ddata->rsts)
+ return 0;
+
+ if (reset) {
+ error = reset_control_assert(ddata->rsts);
+ if (error)
+ return error;
+ }
+
+ return reset_control_deassert(ddata->rsts);
+}
+
static int sysc_reset(struct sysc *ddata)
{
int offset = ddata->offsets[SYSC_SYSCONFIG];
@@ -915,38 +1325,58 @@ static int sysc_reset(struct sysc *ddata)
100, MAX_MODULE_SOFTRESET_WAIT);
}
-/* At this point the module is configured enough to read the revision */
+/*
+ * At this point the module is configured enough to read the revision but
+ * module may not be completely configured yet to use PM runtime. Enable
+ * all clocks directly during init to configure the quirks needed for PM
+ * runtime based on the revision register.
+ */
static int sysc_init_module(struct sysc *ddata)
{
- int error;
+ int error = 0;
+ bool manage_clocks = true;
+ bool reset = true;
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE_ON_INIT) {
- ddata->revision = sysc_read_revision(ddata);
- goto rev_quirks;
- }
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ reset = false;
- error = pm_runtime_get_sync(ddata->dev);
- if (error < 0) {
- pm_runtime_put_noidle(ddata->dev);
+ error = sysc_rstctrl_reset_deassert(ddata, reset);
+ if (error)
+ return error;
- return 0;
- }
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))
+ manage_clocks = false;
- error = sysc_reset(ddata);
- if (error) {
- dev_err(ddata->dev, "Reset failed with %d\n", error);
- pm_runtime_put_sync(ddata->dev);
+ if (manage_clocks) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ return error;
- return error;
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
}
ddata->revision = sysc_read_revision(ddata);
- pm_runtime_put_sync(ddata->dev);
-
-rev_quirks:
sysc_init_revision_quirks(ddata);
- return 0;
+ error = sysc_legacy_init(ddata);
+ if (error)
+ goto err_main_clocks;
+
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
+
+err_main_clocks:
+ if (manage_clocks)
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (manage_clocks)
+ sysc_disable_opt_clocks(ddata);
+
+ return error;
}
static int sysc_init_sysc_mask(struct sysc *ddata)
@@ -1208,7 +1638,7 @@ static int sysc_child_resume_noirq(struct device *dev)
}
#endif
-struct dev_pm_domain sysc_child_pm_domain = {
+static struct dev_pm_domain sysc_child_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
sysc_child_runtime_resume,
@@ -1281,6 +1711,8 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = {
.mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
{ .name = "ti,no-reset-on-init",
.mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
+ { .name = "ti,no-idle",
+ .mask = SYSC_QUIRK_NO_IDLE, },
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -1331,6 +1763,9 @@ static void sysc_unprepare(struct sysc *ddata)
{
int i;
+ if (!ddata->clocks)
+ return;
+
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
if (!IS_ERR_OR_NULL(ddata->clocks[i]))
clk_unprepare(ddata->clocks[i]);
@@ -1576,28 +2011,26 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
- struct ti_sysc_module_data mdata;
- int error = 0;
+ struct ti_sysc_module_data *mdata;
if (!pdata || !ddata->legacy_mode)
return 0;
- mdata.name = ddata->legacy_mode;
- mdata.module_pa = ddata->module_pa;
- mdata.module_size = ddata->module_size;
- mdata.offsets = ddata->offsets;
- mdata.nr_offsets = SYSC_MAX_REGS;
- mdata.cap = ddata->cap;
- mdata.cfg = &ddata->cfg;
+ mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
- if (!pdata->init_module)
- return -ENODEV;
+ mdata->name = ddata->legacy_mode;
+ mdata->module_pa = ddata->module_pa;
+ mdata->module_size = ddata->module_size;
+ mdata->offsets = ddata->offsets;
+ mdata->nr_offsets = SYSC_MAX_REGS;
+ mdata->cap = ddata->cap;
+ mdata->cfg = &ddata->cfg;
- error = pdata->init_module(ddata->dev, &mdata, &ddata->cookie);
- if (error == -EEXIST)
- error = 0;
+ ddata->mdata = mdata;
- return error;
+ return 0;
}
static int sysc_init_match(struct sysc *ddata)
@@ -1651,10 +2084,6 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
- error = sysc_get_clocks(ddata);
- if (error)
- return error;
-
error = sysc_map_and_check_registers(ddata);
if (error)
goto unprepare;
@@ -1675,15 +2104,21 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
+ sysc_init_early_quirks(ddata);
+
+ error = sysc_get_clocks(ddata);
+ if (error)
+ return error;
+
error = sysc_init_resets(ddata);
if (error)
return error;
- pm_runtime_enable(ddata->dev);
error = sysc_init_module(ddata);
if (error)
goto unprepare;
+ pm_runtime_enable(ddata->dev);
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
pm_runtime_put_noidle(ddata->dev);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index f8b7345fe1cb..5cf3bade0d57 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -786,6 +786,7 @@ static int probe_gdrom(struct platform_device *devptr)
goto probe_fail_cdrom_register;
}
gd.disk->fops = &gdrom_bdops;
+ gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
/* latch on to the interrupt */
err = gdrom_set_interrupt_handlers();
if (err)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72866a004f07..466ebd84ad17 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config XILINX_HWICAP
config R3964
tristate "Siemens R3964 line discipline"
- depends on TTY
+ depends on TTY && BROKEN
---help---
This driver allows synchronous communication with devices using the
Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index a5ecf6dae02e..373f549525fe 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -212,7 +212,7 @@ static void ds1620_read_state(struct therm *therm)
static int ds1620_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static ssize_t
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index f882460b5a44..4fed8fafa0f0 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -298,12 +298,11 @@ static int dtlk_open(struct inode *inode, struct file *file)
{
TRACE_TEXT("(dtlk_open");
- nonseekable_open(inode, file);
switch (iminor(inode)) {
case DTLK_MINOR:
if (dtlk_busy)
return -EBUSY;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
default:
return -ENXIO;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index d0ad85900b79..3a1e6b3ccd10 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -973,6 +973,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
if (ACPI_SUCCESS(status)) {
hdp->hd_phys_address = addr.address.minimum;
hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
+ if (!hdp->hd_address)
+ return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b65ff6962899..e9b6ac61fb7f 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
priv->rng.read = omap_rng_do_read;
priv->rng.init = omap_rng_init;
priv->rng.cleanup = omap_rng_cleanup;
+ priv->rng.quality = 900;
priv->rng.priv = (unsigned long)priv;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 042860d97b15..0ef5b6a3f560 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -161,6 +161,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
#endif
priv->rng.read = stm32_rng_read,
priv->rng.priv = (unsigned long) dev;
+ priv->rng.quality = 900;
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
@@ -169,6 +170,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
return devm_hwrng_register(dev, &priv->rng);
}
+static int stm32_rng_remove(struct platform_device *ofdev)
+{
+ pm_runtime_disable(&ofdev->dev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int stm32_rng_runtime_suspend(struct device *dev)
{
@@ -210,6 +218,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
+ .remove = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 99c9f581a1f3..f7b1c004a12b 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -29,7 +29,6 @@ struct ipmi_file_private
struct ipmi_user *user;
spinlock_t recv_msg_lock;
struct list_head recv_msgs;
- struct file *file;
struct fasync_struct *fasync_queue;
wait_queue_head_t wait;
struct mutex recv_mutex;
@@ -95,8 +94,6 @@ static int ipmi_open(struct inode *inode, struct file *file)
if (!priv)
return -ENOMEM;
- priv->file = file;
-
rv = ipmi_create_user(if_num,
&ipmi_hndlrs,
priv,
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index ff0b199be472..f38e651dd1b5 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -47,9 +47,11 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
memset(&p, 0, sizeof(p));
name = "dmi-ipmi-si";
+ p.iftype = IPMI_PLAT_IF_SI;
switch (type) {
case IPMI_DMI_TYPE_SSIF:
name = "dmi-ipmi-ssif";
+ p.iftype = IPMI_PLAT_IF_SSIF;
p.type = SI_TYPE_INVALID;
break;
case IPMI_DMI_TYPE_BT:
@@ -66,7 +68,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
return;
}
- memset(&p, 0, sizeof(p));
p.addr = base_addr;
p.space = space;
p.regspacing = offset;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e8ba67834746..1dc10740fc0f 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -214,6 +214,9 @@ struct ipmi_user {
/* Does this interface receive IPMI events? */
bool gets_events;
+
+ /* Free must run in process context for RCU cleanup. */
+ struct work_struct remove_work;
};
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
@@ -632,7 +635,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-struct srcu_struct ipmi_interfaces_srcu;
+static struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
return rv;
}
+static void free_user_work(struct work_struct *work)
+{
+ struct ipmi_user *user = container_of(work, struct ipmi_user,
+ remove_work);
+
+ cleanup_srcu_struct(&user->release_barrier);
+ kfree(user);
+}
+
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
@@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int if_num,
goto out_kfree;
found:
+ INIT_WORK(&new_user->remove_work, free_user_work);
+
rv = init_srcu_struct(&new_user->release_barrier);
if (rv)
goto out_kfree;
@@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
static void free_user(struct kref *ref)
{
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
- cleanup_srcu_struct(&user->release_barrier);
- kfree(user);
+
+ /* SRCU cleanup must happen in task context. */
+ schedule_work(&user->remove_work);
}
static void _ipmi_destroy_user(struct ipmi_user *user)
@@ -5164,7 +5179,7 @@ static void __exit cleanup_ipmi(void)
* avoids problems with race conditions removing the timer
* here.
*/
- atomic_inc(&stop_operation);
+ atomic_set(&stop_operation, 1);
del_timer_sync(&ipmi_timer);
initialized = false;
diff --git a/drivers/char/ipmi/ipmi_plat_data.c b/drivers/char/ipmi/ipmi_plat_data.c
index 8f0ca2a848eb..28471ff2a3a3 100644
--- a/drivers/char/ipmi/ipmi_plat_data.c
+++ b/drivers/char/ipmi/ipmi_plat_data.c
@@ -12,7 +12,7 @@ struct platform_device *ipmi_platform_add(const char *name, unsigned int inst,
struct ipmi_plat_data *p)
{
struct platform_device *pdev;
- unsigned int num_r = 1, size, pidx = 0;
+ unsigned int num_r = 1, size = 0, pidx = 0;
struct resource r[4];
struct property_entry pr[6];
u32 flags;
@@ -21,19 +21,22 @@ struct platform_device *ipmi_platform_add(const char *name, unsigned int inst,
memset(pr, 0, sizeof(pr));
memset(r, 0, sizeof(r));
- if (p->type == SI_BT)
- size = 3;
- else if (p->type == SI_TYPE_INVALID)
- size = 0;
- else
- size = 2;
+ if (p->iftype == IPMI_PLAT_IF_SI) {
+ if (p->type == SI_BT)
+ size = 3;
+ else if (p->type != SI_TYPE_INVALID)
+ size = 2;
+
+ if (p->regsize == 0)
+ p->regsize = DEFAULT_REGSIZE;
+ if (p->regspacing == 0)
+ p->regspacing = p->regsize;
- if (p->regsize == 0)
- p->regsize = DEFAULT_REGSIZE;
- if (p->regspacing == 0)
- p->regspacing = p->regsize;
+ pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type);
+ } else if (p->iftype == IPMI_PLAT_IF_SSIF) {
+ pr[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", p->addr);
+ }
- pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type);
if (p->slave_addr)
pr[pidx++] = PROPERTY_ENTRY_U8("slave-addr", p->slave_addr);
pr[pidx++] = PROPERTY_ENTRY_U8("addr-source", p->addr_source);
diff --git a/drivers/char/ipmi/ipmi_plat_data.h b/drivers/char/ipmi/ipmi_plat_data.h
index 567cfcec8ada..9ba744ea9571 100644
--- a/drivers/char/ipmi/ipmi_plat_data.h
+++ b/drivers/char/ipmi/ipmi_plat_data.h
@@ -6,7 +6,10 @@
#include <linux/ipmi.h>
+enum ipmi_plat_interface_type { IPMI_PLAT_IF_SI, IPMI_PLAT_IF_SSIF };
+
struct ipmi_plat_data {
+ enum ipmi_plat_interface_type iftype;
unsigned int type; /* si_type for si, SI_INVALID for others */
unsigned int space; /* addr_space for si, intf# for ssif. */
unsigned long addr;
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
index 01946cad3d13..f6ece7569504 100644
--- a/drivers/char/ipmi/ipmi_si_hardcode.c
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -83,6 +83,7 @@ static void __init ipmi_hardcode_init_one(const char *si_type_str,
memset(&p, 0, sizeof(p));
+ p.iftype = IPMI_PLAT_IF_SI;
if (!si_type_str || !*si_type_str || strcmp(si_type_str, "kcs") == 0) {
p.type = SI_KCS;
} else if (strcmp(si_type_str, "smic") == 0) {
@@ -118,6 +119,8 @@ void __init ipmi_hardcode_init(void)
char *str;
char *si_type[SI_MAX_PARMS];
+ memset(si_type, 0, sizeof(si_type));
+
/* Parse out the si_type string into its components. */
str = si_type_str;
if (*str != '\0') {
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
index 03140f6cdf6f..42a925f8cf69 100644
--- a/drivers/char/ipmi/ipmi_si_hotmod.c
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -108,6 +108,7 @@ static int parse_hotmod_str(const char *curr, enum hotmod_op *op,
int rv;
unsigned int ival;
+ h->iftype = IPMI_PLAT_IF_SI;
rv = parse_str(hotmod_ops, &ival, "operation", &curr);
if (rv)
return rv;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b1732882b97e..f124a2d2bb9f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1931,7 +1931,6 @@ static int try_smi_init(struct smi_info *new_smi)
{
int rv = 0;
int i;
- char *init_name = NULL;
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
@@ -2073,7 +2072,6 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->io.io_cleanup = NULL;
}
- kfree(init_name);
return rv;
}
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 54c7ded2a1ff..f2a91c4d8cab 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -188,12 +188,10 @@ static int platform_ipmi_probe(struct platform_device *pdev)
return -EINVAL;
rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
- if (rv) {
- dev_warn(&pdev->dev, "device has no slave-addr property\n");
+ if (rv)
io.slave_addr = 0x20;
- } else {
+ else
io.slave_addr = slave_addr;
- }
io.irq = platform_get_irq(pdev, 0);
if (io.irq > 0)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 8b5aec5430f1..cf8156d6bc07 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -727,12 +727,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
- } else if (blocknum != ssif_info->multi_pos) {
+ } else if (blocknum + 1 != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
* but multi_pos starts at one, so the +1.
*/
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Received message out of sequence, expected %u, got %u\n",
+ ssif_info->multi_pos - 1, blocknum);
result = -EIO;
} else {
ssif_inc_stat(ssif_info, received_message_parts);
@@ -1991,7 +1995,7 @@ static int dmi_ipmi_probe(struct platform_device *pdev)
rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
if (rv)
- dev_warn(&pdev->dev, "device has no slave-addr property");
+ slave_addr = 0x20;
return new_ssif_client(i2c_addr, NULL, 0,
slave_addr, SI_SMBIOS, &pdev->dev);
@@ -2107,7 +2111,8 @@ static void cleanup_ipmi_ssif(void)
kfree(ssif_i2c_driver.address_list);
- platform_driver_unregister(&ipmi_driver);
+ if (ssif_trydmi)
+ platform_driver_unregister(&ipmi_driver);
free_ssif_clients();
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 2924a4bc4a32..74c6d1f34132 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -837,7 +837,7 @@ static int ipmi_open(struct inode *ino, struct file *filep)
* first heartbeat.
*/
ipmi_start_timer_on_heartbeat = 1;
- return nonseekable_open(ino, filep);
+ return stream_open(ino, filep);
default:
return (-ENODEV);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 7a4eb86aedac..15bf585af5d3 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1682,7 +1682,7 @@ static int cmm_open(struct inode *inode, struct file *filp)
link->open = 1; /* only one open per device */
DEBUGP(2, dev, "<- cmm_open\n");
- ret = nonseekable_open(inode, filp);
+ ret = stream_open(inode, filp);
out:
mutex_unlock(&cmm_mutex);
return ret;
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
index f6b43d9350f0..04b39c3596cc 100644
--- a/drivers/char/pcmcia/scr24x_cs.c
+++ b/drivers/char/pcmcia/scr24x_cs.c
@@ -92,7 +92,7 @@ static int scr24x_open(struct inode *inode, struct file *filp)
kref_get(&dev->refcnt);
filp->private_data = dev;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int scr24x_release(struct inode *inode, struct file *filp)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 38c6d1af6d1c..a42b3d764da8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -101,15 +101,13 @@
* Exported interfaces ---- output
* ===============================
*
- * There are three exported interfaces; the first is one designed to
- * be used from within the kernel:
+ * There are four exported interfaces; two for use within the kernel,
+ * and two or use from userspace.
*
- * void get_random_bytes(void *buf, int nbytes);
- *
- * This interface will return the requested number of random bytes,
- * and place it in the requested buffer.
+ * Exported interfaces ---- userspace output
+ * -----------------------------------------
*
- * The two other interfaces are two character devices /dev/random and
+ * The userspace interfaces are two character devices /dev/random and
* /dev/urandom. /dev/random is suitable for use when very high
* quality randomness is desired (for example, for key generation or
* one-time pads), as it will only return a maximum of the number of
@@ -122,6 +120,77 @@
* this will result in random numbers that are merely cryptographically
* strong. For many applications, however, this is acceptable.
*
+ * Exported interfaces ---- kernel output
+ * --------------------------------------
+ *
+ * The primary kernel interface is
+ *
+ * void get_random_bytes(void *buf, int nbytes);
+ *
+ * This interface will return the requested number of random bytes,
+ * and place it in the requested buffer. This is equivalent to a
+ * read from /dev/urandom.
+ *
+ * For less critical applications, there are the functions:
+ *
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned int get_random_int()
+ * unsigned long get_random_long()
+ *
+ * These are produced by a cryptographic RNG seeded from get_random_bytes,
+ * and so do not deplete the entropy pool as much. These are recommended
+ * for most in-kernel operations *if the result is going to be stored in
+ * the kernel*.
+ *
+ * Specifically, the get_random_int() family do not attempt to do
+ * "anti-backtracking". If you capture the state of the kernel (e.g.
+ * by snapshotting the VM), you can figure out previous get_random_int()
+ * return values. But if the value is stored in the kernel anyway,
+ * this is not a problem.
+ *
+ * It *is* safe to expose get_random_int() output to attackers (e.g. as
+ * network cookies); given outputs 1..n, it's not feasible to predict
+ * outputs 0 or n+1. The only concern is an attacker who breaks into
+ * the kernel later; the get_random_int() engine is not reseeded as
+ * often as the get_random_bytes() one.
+ *
+ * get_random_bytes() is needed for keys that need to stay secret after
+ * they are erased from the kernel. For example, any key that will
+ * be wrapped and stored encrypted. And session encryption keys: we'd
+ * like to know that after the session is closed and the keys erased,
+ * the plaintext is unrecoverable to someone who recorded the ciphertext.
+ *
+ * But for network ports/cookies, stack canaries, PRNG seeds, address
+ * space layout randomization, session *authentication* keys, or other
+ * applications where the sensitive data is stored in the kernel in
+ * plaintext for as long as it's sensitive, the get_random_int() family
+ * is just fine.
+ *
+ * Consider ASLR. We want to keep the address space secret from an
+ * outside attacker while the process is running, but once the address
+ * space is torn down, it's of no use to an attacker any more. And it's
+ * stored in kernel data structures as long as it's alive, so worrying
+ * about an attacker's ability to extrapolate it from the get_random_int()
+ * CRNG is silly.
+ *
+ * Even some cryptographic keys are safe to generate with get_random_int().
+ * In particular, keys for SipHash are generally fine. Here, knowledge
+ * of the key authorizes you to do something to a kernel object (inject
+ * packets to a network connection, or flood a hash table), and the
+ * key is stored with the object being protected. Once it goes away,
+ * we no longer care if anyone knows the key.
+ *
+ * prandom_u32()
+ * -------------
+ *
+ * For even weaker applications, see the pseudorandom generator
+ * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
+ * numbers aren't security-critical at all, these are *far* cheaper.
+ * Useful for self-tests, random error simulation, randomized backoffs,
+ * and any other application where you trust that nobody is trying to
+ * maliciously mess with you by guessing the "random" numbers.
+ *
* Exported interfaces ---- input
* ==============================
*
@@ -295,7 +364,7 @@
* To allow fractional bits to be tracked, the entropy_count field is
* denominated in units of 1/8th bits.
*
- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
+ * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
* credit_entropy_bits() needs to be 64 bits wide.
*/
#define ENTROPY_SHIFT 3
@@ -359,9 +428,9 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
* polynomial which improves the resulting TGFSR polynomial to be
* irreducible, which we have made here.
*/
-static struct poolinfo {
- int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
-#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
+static const struct poolinfo {
+ int poolbitshift, poolwords, poolbytes, poolfracbits;
+#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
int tap1, tap2, tap3, tap4, tap5;
} poolinfo_table[] = {
/* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
@@ -415,7 +484,7 @@ struct crng_state {
spinlock_t lock;
};
-struct crng_state primary_crng = {
+static struct crng_state primary_crng = {
.lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
};
@@ -470,7 +539,6 @@ struct entropy_store {
unsigned short add_ptr;
unsigned short input_rotate;
int entropy_count;
- int entropy_total;
unsigned int initialized:1;
unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE];
@@ -643,7 +711,7 @@ static void process_random_ready_list(void)
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
- int entropy_count, orig;
+ int entropy_count, orig, has_initialized = 0;
const int pool_size = r->poolinfo->poolfracbits;
int nfrac = nbits << ENTROPY_SHIFT;
@@ -698,23 +766,25 @@ retry:
entropy_count = 0;
} else if (entropy_count > pool_size)
entropy_count = pool_size;
+ if ((r == &blocking_pool) && !r->initialized &&
+ (entropy_count >> ENTROPY_SHIFT) > 128)
+ has_initialized = 1;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
- r->entropy_total += nbits;
- if (!r->initialized && r->entropy_total > 128) {
+ if (has_initialized)
r->initialized = 1;
- r->entropy_total = 0;
- }
trace_credit_entropy_bits(r->name, nbits,
- entropy_count >> ENTROPY_SHIFT,
- r->entropy_total, _RET_IP_);
+ entropy_count >> ENTROPY_SHIFT, _RET_IP_);
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
+ struct entropy_store *other = &blocking_pool;
- if (crng_init < 2 && entropy_bits >= 128) {
+ if (crng_init < 2) {
+ if (entropy_bits < 128)
+ return;
crng_reseed(&primary_crng, r);
entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
}
@@ -725,20 +795,14 @@ retry:
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
- /* If the input pool is getting full, send some
- * entropy to the blocking pool until it is 75% full.
+ /* If the input pool is getting full, and the blocking
+ * pool has room, send some entropy to the blocking
+ * pool.
*/
- if (entropy_bits > random_write_wakeup_bits &&
- r->initialized &&
- r->entropy_total >= 2*random_read_wakeup_bits) {
- struct entropy_store *other = &blocking_pool;
-
- if (other->entropy_count <=
- 3 * other->poolinfo->poolfracbits / 4) {
- schedule_work(&other->push_work);
- r->entropy_total = 0;
- }
- }
+ if (!work_pending(&other->push_work) &&
+ (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) &&
+ (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes))
+ schedule_work(&other->push_work);
}
}
@@ -777,6 +841,7 @@ static struct crng_state **crng_node_pool __read_mostly;
#endif
static void invalidate_batched_entropy(void);
+static void numa_crng_init(void);
static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
static int __init parse_trust_cpu(char *arg)
@@ -805,7 +870,9 @@ static void crng_initialize(struct crng_state *crng)
}
crng->state[i] ^= rv;
}
- if (trust_cpu && arch_init) {
+ if (trust_cpu && arch_init && crng == &primary_crng) {
+ invalidate_batched_entropy();
+ numa_crng_init();
crng_init = 2;
pr_notice("random: crng done (trusting CPU's manufacturer)\n");
}
@@ -1553,6 +1620,11 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
int large_request = (nbytes > 256);
trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
+ if (!r->initialized && r->pull) {
+ xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8);
+ if (!r->initialized)
+ return 0;
+ }
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
@@ -1783,7 +1855,7 @@ EXPORT_SYMBOL(get_random_bytes_arch);
* data into the pool to prepare it for use. The pool is not cleared
* as that can only decrease the entropy in the pool.
*/
-static void init_std_data(struct entropy_store *r)
+static void __init init_std_data(struct entropy_store *r)
{
int i;
ktime_t now = ktime_get_real();
@@ -1810,7 +1882,7 @@ static void init_std_data(struct entropy_store *r)
* take care not to overwrite the precious per platform data
* we were given.
*/
-static int rand_initialize(void)
+int __init rand_initialize(void)
{
init_std_data(&input_pool);
init_std_data(&blocking_pool);
@@ -1822,7 +1894,6 @@ static int rand_initialize(void)
}
return 0;
}
-early_initcall(rand_initialize);
#ifdef CONFIG_BLOCK
void rand_initialize_disk(struct gendisk *disk)
@@ -2211,8 +2282,8 @@ struct batched_entropy {
u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
};
unsigned int position;
+ spinlock_t batch_lock;
};
-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
/*
* Get a random word for internal kernel use only. The quality of the random
@@ -2222,12 +2293,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
* wait_for_random_bytes() should be called and return 0 at least once
* at any point prior.
*/
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+};
+
u64 get_random_u64(void)
{
u64 ret;
- bool use_lock;
- unsigned long flags = 0;
+ unsigned long flags;
struct batched_entropy *batch;
static void *previous;
@@ -2242,28 +2315,25 @@ u64 get_random_u64(void)
warn_unseeded_randomness(&previous);
- use_lock = READ_ONCE(crng_init) < 2;
- batch = &get_cpu_var(batched_entropy_u64);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ batch = raw_cpu_ptr(&batched_entropy_u64);
+ spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- put_cpu_var(batched_entropy_u64);
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u64);
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+};
u32 get_random_u32(void)
{
u32 ret;
- bool use_lock;
- unsigned long flags = 0;
+ unsigned long flags;
struct batched_entropy *batch;
static void *previous;
@@ -2272,18 +2342,14 @@ u32 get_random_u32(void)
warn_unseeded_randomness(&previous);
- use_lock = READ_ONCE(crng_init) < 2;
- batch = &get_cpu_var(batched_entropy_u32);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ batch = raw_cpu_ptr(&batched_entropy_u32);
+ spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- put_cpu_var(batched_entropy_u32);
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u32);
@@ -2297,12 +2363,19 @@ static void invalidate_batched_entropy(void)
int cpu;
unsigned long flags;
- write_lock_irqsave(&batched_entropy_reset_lock, flags);
for_each_possible_cpu (cpu) {
- per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
- per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+ struct batched_entropy *batched_entropy;
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
+ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
+ batched_entropy->position = 0;
+ spin_unlock(&batched_entropy->batch_lock);
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
+ spin_lock(&batched_entropy->batch_lock);
+ batched_entropy->position = 0;
+ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
}
- write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
}
/**
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 7c19d9b22785..e8614ea843e2 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -243,7 +243,7 @@ static int tanbac_tb0219_open(struct inode *inode, struct file *file)
case 16 ... 23:
case 32 ... 39:
case 48 ... 55:
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
default:
break;
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 536e55d3919f..f3e4bc490cf0 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -157,7 +157,6 @@ config TCG_CRB
config TCG_VTPM_PROXY
tristate "VTPM Proxy Interface"
depends on TCG_TPM
- select ANON_INODES
---help---
This driver proxies for an emulated TPM (vTPM) running in userspace.
A device /dev/vtpmx is provided that creates a device pair
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index d8b77133a83a..f824563fc28d 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -37,8 +37,8 @@
*
* Returns size of the event. If it is an invalid event, returns 0.
*/
-static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
- struct tcg_pcr_event *event_header)
+static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+ struct tcg_pcr_event *event_header)
{
struct tcg_efi_specid_event_head *efispecid;
struct tcg_event_field *event_field;
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 8856cce5a23b..817ae09a369e 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
__poll_t mask = 0;
poll_wait(file, &priv->async_wait, wait);
+ mutex_lock(&priv->buffer_mutex);
- if (!priv->response_read || priv->response_length)
+ /*
+ * The response_length indicates if there is still response
+ * (or part of it) to be consumed. Partial reads decrease it
+ * by the number of bytes read, and write resets it the zero.
+ */
+ if (priv->response_length)
mask = EPOLLIN | EPOLLRDNORM;
else
mask = EPOLLOUT | EPOLLWRNORM;
+ mutex_unlock(&priv->buffer_mutex);
return mask;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 83ece5639f86..ae1030c9b086 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev)
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
return 0;
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- mutex_lock(&chip->tpm_mutex);
- if (!tpm_chip_start(chip)) {
+ if (!tpm_chip_start(chip)) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_STATE);
- tpm_chip_stop(chip);
- }
- mutex_unlock(&chip->tpm_mutex);
- } else {
- rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+ else
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
+ tpm_chip_stop(chip);
}
return rc;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fbeb71953526..05dbfdb9f4af 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -75,7 +75,7 @@ struct ports_driver_data {
/* All the console devices handled by this driver */
struct list_head consoles;
};
-static struct ports_driver_data pdrvdata;
+static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
static DEFINE_SPINLOCK(pdrvdata_lock);
static DECLARE_COMPLETION(early_console_added);
@@ -1394,6 +1394,7 @@ static int add_port(struct ports_device *portdev, u32 id)
port->async_queue = NULL;
port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+ port->cons.vtermno = 0;
port->host_connected = port->guest_connected = false;
port->stats = (struct port_stats) { 0 };
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e705aab9e38b..fc1e0cf44995 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config CLKDEV_LOOKUP
bool
@@ -219,6 +220,13 @@ config COMMON_CLK_XGENE
---help---
Sypport for the APM X-Gene SoC reference, PLL, and device clocks.
+config COMMON_CLK_LOCHNAGAR
+ tristate "Cirrus Logic Lochnagar clock driver"
+ depends on MFD_LOCHNAGAR
+ help
+ This driver supports the clocking features of the Cirrus Logic
+ Lochnagar audio development board.
+
config COMMON_CLK_NXP
def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
select REGMAP_MMIO if ARCH_LPC32XX
@@ -297,6 +305,7 @@ config COMMON_CLK_FIXED_MMIO
Support for Memory Mapped IO Fixed clocks
source "drivers/clk/actions/Kconfig"
+source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
source "drivers/clk/imgtec/Kconfig"
@@ -309,7 +318,9 @@ source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/qcom/Kconfig"
source "drivers/clk/renesas/Kconfig"
source "drivers/clk/samsung/Kconfig"
+source "drivers/clk/sifive/Kconfig"
source "drivers/clk/sprd/Kconfig"
+source "drivers/clk/sunxi/Kconfig"
source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 1db133652f0c..9ef4305d55e0 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -32,8 +32,10 @@ obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
+obj-$(CONFIG_COMMON_CLK_LOCHNAGAR) += clk-lochnagar.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
+obj-$(CONFIG_ARCH_MILBEAUT_M10V) += clk-milbeaut.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
@@ -64,6 +66,7 @@ obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
# please keep this section sorted lexicographically by directory path name
obj-y += actions/
+obj-y += analogbits/
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
obj-$(CONFIG_ARCH_ARTPEC) += axis/
obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
@@ -93,6 +96,7 @@ obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
+obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/clk/actions/owl-common.h b/drivers/clk/actions/owl-common.h
index 5a866a8b913d..c000a431471e 100644
--- a/drivers/clk/actions/owl-common.h
+++ b/drivers/clk/actions/owl-common.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL common clock driver
//
diff --git a/drivers/clk/actions/owl-composite.h b/drivers/clk/actions/owl-composite.h
index b410ed5bf308..bca38bf8f218 100644
--- a/drivers/clk/actions/owl-composite.h
+++ b/drivers/clk/actions/owl-composite.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL composite clock driver
//
diff --git a/drivers/clk/actions/owl-divider.h b/drivers/clk/actions/owl-divider.h
index 92d3e3d23967..083be6d80954 100644
--- a/drivers/clk/actions/owl-divider.h
+++ b/drivers/clk/actions/owl-divider.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL divider clock driver
//
diff --git a/drivers/clk/actions/owl-factor.h b/drivers/clk/actions/owl-factor.h
index f1a7ffe896e1..04b89cbfdccb 100644
--- a/drivers/clk/actions/owl-factor.h
+++ b/drivers/clk/actions/owl-factor.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL factor clock driver
//
diff --git a/drivers/clk/actions/owl-fixed-factor.h b/drivers/clk/actions/owl-fixed-factor.h
index cc9fe36c0964..3dfd7fd7d292 100644
--- a/drivers/clk/actions/owl-fixed-factor.h
+++ b/drivers/clk/actions/owl-fixed-factor.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL fixed factor clock driver
//
diff --git a/drivers/clk/actions/owl-gate.h b/drivers/clk/actions/owl-gate.h
index c2d61ceebce2..c2f161c93fda 100644
--- a/drivers/clk/actions/owl-gate.h
+++ b/drivers/clk/actions/owl-gate.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL gate clock driver
//
diff --git a/drivers/clk/actions/owl-mux.h b/drivers/clk/actions/owl-mux.h
index 834284c8c3ae..53b9ab665294 100644
--- a/drivers/clk/actions/owl-mux.h
+++ b/drivers/clk/actions/owl-mux.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL mux clock driver
//
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h
index 6fb0d45bb088..78e5fc360b03 100644
--- a/drivers/clk/actions/owl-pll.h
+++ b/drivers/clk/actions/owl-pll.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL pll clock driver
//
diff --git a/drivers/clk/actions/owl-reset.h b/drivers/clk/actions/owl-reset.h
index 10f5774979a6..a947ffcb5a02 100644
--- a/drivers/clk/actions/owl-reset.h
+++ b/drivers/clk/actions/owl-reset.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-License-Identifier: GPL-2.0-or-later */
//
// Actions Semi Owl SoCs Reset Management Unit driver
//
diff --git a/drivers/clk/analogbits/Kconfig b/drivers/clk/analogbits/Kconfig
new file mode 100644
index 000000000000..b5fd60c7f136
--- /dev/null
+++ b/drivers/clk/analogbits/Kconfig
@@ -0,0 +1,2 @@
+config CLK_ANALOGBITS_WRPLL_CLN28HPC
+ bool
diff --git a/drivers/clk/analogbits/Makefile b/drivers/clk/analogbits/Makefile
new file mode 100644
index 000000000000..bf017447451e
--- /dev/null
+++ b/drivers/clk/analogbits/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC) += wrpll-cln28hpc.o
diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
new file mode 100644
index 000000000000..776ead319ae9
--- /dev/null
+++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ *
+ * This library supports configuration parsing and reprogramming of
+ * the CLN28HPC variant of the Analog Bits Wide Range PLL. The
+ * intention is for this library to be reusable for any device that
+ * integrates this PLL; thus the register structure and programming
+ * details are expected to be provided by a separate IP block driver.
+ *
+ * The bulk of this code is primarily useful for clock configurations
+ * that must operate at arbitrary rates, as opposed to clock configurations
+ * that are restricted by software or manufacturer guidance to a small,
+ * pre-determined set of performance points.
+ *
+ * References:
+ * - Analog Bits "Wide Range PLL Datasheet", version 2015.10.01
+ * - SiFive FU540-C000 Manual v1p0, Chapter 7 "Clocking and Reset"
+ * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
+#include <linux/clk/analogbits-wrpll-cln28hpc.h>
+
+/* MIN_INPUT_FREQ: minimum input clock frequency, in Hz (Fref_min) */
+#define MIN_INPUT_FREQ 7000000
+
+/* MAX_INPUT_FREQ: maximum input clock frequency, in Hz (Fref_max) */
+#define MAX_INPUT_FREQ 600000000
+
+/* MIN_POST_DIVIDE_REF_FREQ: minimum post-divider reference frequency, in Hz */
+#define MIN_POST_DIVR_FREQ 7000000
+
+/* MAX_POST_DIVIDE_REF_FREQ: maximum post-divider reference frequency, in Hz */
+#define MAX_POST_DIVR_FREQ 200000000
+
+/* MIN_VCO_FREQ: minimum VCO frequency, in Hz (Fvco_min) */
+#define MIN_VCO_FREQ 2400000000UL
+
+/* MAX_VCO_FREQ: maximum VCO frequency, in Hz (Fvco_max) */
+#define MAX_VCO_FREQ 4800000000ULL
+
+/* MAX_DIVQ_DIVISOR: maximum output divisor. Selected by DIVQ = 6 */
+#define MAX_DIVQ_DIVISOR 64
+
+/* MAX_DIVR_DIVISOR: maximum reference divisor. Selected by DIVR = 63 */
+#define MAX_DIVR_DIVISOR 64
+
+/* MAX_LOCK_US: maximum PLL lock time, in microseconds (tLOCK_max) */
+#define MAX_LOCK_US 70
+
+/*
+ * ROUND_SHIFT: number of bits to shift to avoid precision loss in the rounding
+ * algorithm
+ */
+#define ROUND_SHIFT 20
+
+/*
+ * Private functions
+ */
+
+/**
+ * __wrpll_calc_filter_range() - determine PLL loop filter bandwidth
+ * @post_divr_freq: input clock rate after the R divider
+ *
+ * Select the value to be presented to the PLL RANGE input signals, based
+ * on the input clock frequency after the post-R-divider @post_divr_freq.
+ * This code follows the recommendations in the PLL datasheet for filter
+ * range selection.
+ *
+ * Return: The RANGE value to be presented to the PLL configuration inputs,
+ * or a negative return code upon error.
+ */
+static int __wrpll_calc_filter_range(unsigned long post_divr_freq)
+{
+ if (post_divr_freq < MIN_POST_DIVR_FREQ ||
+ post_divr_freq > MAX_POST_DIVR_FREQ) {
+ WARN(1, "%s: post-divider reference freq out of range: %lu",
+ __func__, post_divr_freq);
+ return -ERANGE;
+ }
+
+ switch (post_divr_freq) {
+ case 0 ... 10999999:
+ return 1;
+ case 11000000 ... 17999999:
+ return 2;
+ case 18000000 ... 29999999:
+ return 3;
+ case 30000000 ... 49999999:
+ return 4;
+ case 50000000 ... 79999999:
+ return 5;
+ case 80000000 ... 129999999:
+ return 6;
+ }
+
+ return 7;
+}
+
+/**
+ * __wrpll_calc_fbdiv() - return feedback fixed divide value
+ * @c: ptr to a struct wrpll_cfg record to read from
+ *
+ * The internal feedback path includes a fixed by-two divider; the
+ * external feedback path does not. Return the appropriate divider
+ * value (2 or 1) depending on whether internal or external feedback
+ * is enabled. This code doesn't test for invalid configurations
+ * (e.g. both or neither of WRPLL_FLAGS_*_FEEDBACK are set); it relies
+ * on the caller to do so.
+ *
+ * Context: Any context. Caller must protect the memory pointed to by
+ * @c from simultaneous modification.
+ *
+ * Return: 2 if internal feedback is enabled or 1 if external feedback
+ * is enabled.
+ */
+static u8 __wrpll_calc_fbdiv(const struct wrpll_cfg *c)
+{
+ return (c->flags & WRPLL_FLAGS_INT_FEEDBACK_MASK) ? 2 : 1;
+}
+
+/**
+ * __wrpll_calc_divq() - determine DIVQ based on target PLL output clock rate
+ * @target_rate: target PLL output clock rate
+ * @vco_rate: pointer to a u64 to store the computed VCO rate into
+ *
+ * Determine a reasonable value for the PLL Q post-divider, based on the
+ * target output rate @target_rate for the PLL. Along with returning the
+ * computed Q divider value as the return value, this function stores the
+ * desired target VCO rate into the variable pointed to by @vco_rate.
+ *
+ * Context: Any context. Caller must protect the memory pointed to by
+ * @vco_rate from simultaneous access or modification.
+ *
+ * Return: a positive integer DIVQ value to be programmed into the hardware
+ * upon success, or 0 upon error (since 0 is an invalid DIVQ value)
+ */
+static u8 __wrpll_calc_divq(u32 target_rate, u64 *vco_rate)
+{
+ u64 s;
+ u8 divq = 0;
+
+ if (!vco_rate) {
+ WARN_ON(1);
+ goto wcd_out;
+ }
+
+ s = div_u64(MAX_VCO_FREQ, target_rate);
+ if (s <= 1) {
+ divq = 1;
+ *vco_rate = MAX_VCO_FREQ;
+ } else if (s > MAX_DIVQ_DIVISOR) {
+ divq = ilog2(MAX_DIVQ_DIVISOR);
+ *vco_rate = MIN_VCO_FREQ;
+ } else {
+ divq = ilog2(s);
+ *vco_rate = (u64)target_rate << divq;
+ }
+
+wcd_out:
+ return divq;
+}
+
+/**
+ * __wrpll_update_parent_rate() - update PLL data when parent rate changes
+ * @c: ptr to a struct wrpll_cfg record to write PLL data to
+ * @parent_rate: PLL input refclk rate (pre-R-divider)
+ *
+ * Pre-compute some data used by the PLL configuration algorithm when
+ * the PLL's reference clock rate changes. The intention is to avoid
+ * computation when the parent rate remains constant - expected to be
+ * the common case.
+ *
+ * Returns: 0 upon success or -ERANGE if the reference clock rate is
+ * out of range.
+ */
+static int __wrpll_update_parent_rate(struct wrpll_cfg *c,
+ unsigned long parent_rate)
+{
+ u8 max_r_for_parent;
+
+ if (parent_rate > MAX_INPUT_FREQ || parent_rate < MIN_POST_DIVR_FREQ)
+ return -ERANGE;
+
+ c->parent_rate = parent_rate;
+ max_r_for_parent = div_u64(parent_rate, MIN_POST_DIVR_FREQ);
+ c->max_r = min_t(u8, MAX_DIVR_DIVISOR, max_r_for_parent);
+
+ c->init_r = DIV_ROUND_UP_ULL(parent_rate, MAX_POST_DIVR_FREQ);
+
+ return 0;
+}
+
+/**
+ * wrpll_configure() - compute PLL configuration for a target rate
+ * @c: ptr to a struct wrpll_cfg record to write into
+ * @target_rate: target PLL output clock rate (post-Q-divider)
+ * @parent_rate: PLL input refclk rate (pre-R-divider)
+ *
+ * Compute the appropriate PLL signal configuration values and store
+ * in PLL context @c. PLL reprogramming is not glitchless, so the
+ * caller should switch any downstream logic to a different clock
+ * source or clock-gate it before presenting these values to the PLL
+ * configuration signals.
+ *
+ * The caller must pass this function a pre-initialized struct
+ * wrpll_cfg record: either initialized to zero (with the
+ * exception of the .name and .flags fields) or read from the PLL.
+ *
+ * Context: Any context. Caller must protect the memory pointed to by @c
+ * from simultaneous access or modification.
+ *
+ * Return: 0 upon success; anything else upon failure.
+ */
+int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
+ unsigned long parent_rate)
+{
+ unsigned long ratio;
+ u64 target_vco_rate, delta, best_delta, f_pre_div, vco, vco_pre;
+ u32 best_f, f, post_divr_freq;
+ u8 fbdiv, divq, best_r, r;
+ int range;
+
+ if (c->flags == 0) {
+ WARN(1, "%s called with uninitialized PLL config", __func__);
+ return -EINVAL;
+ }
+
+ /* Initialize rounding data if it hasn't been initialized already */
+ if (parent_rate != c->parent_rate) {
+ if (__wrpll_update_parent_rate(c, parent_rate)) {
+ pr_err("%s: PLL input rate is out of range\n",
+ __func__);
+ return -ERANGE;
+ }
+ }
+
+ c->flags &= ~WRPLL_FLAGS_RESET_MASK;
+
+ /* Put the PLL into bypass if the user requests the parent clock rate */
+ if (target_rate == parent_rate) {
+ c->flags |= WRPLL_FLAGS_BYPASS_MASK;
+ return 0;
+ }
+
+ c->flags &= ~WRPLL_FLAGS_BYPASS_MASK;
+
+ /* Calculate the Q shift and target VCO rate */
+ divq = __wrpll_calc_divq(target_rate, &target_vco_rate);
+ if (!divq)
+ return -1;
+ c->divq = divq;
+
+ /* Precalculate the pre-Q divider target ratio */
+ ratio = div64_u64((target_vco_rate << ROUND_SHIFT), parent_rate);
+
+ fbdiv = __wrpll_calc_fbdiv(c);
+ best_r = 0;
+ best_f = 0;
+ best_delta = MAX_VCO_FREQ;
+
+ /*
+ * Consider all values for R which land within
+ * [MIN_POST_DIVR_FREQ, MAX_POST_DIVR_FREQ]; prefer smaller R
+ */
+ for (r = c->init_r; r <= c->max_r; ++r) {
+ f_pre_div = ratio * r;
+ f = (f_pre_div + (1 << ROUND_SHIFT)) >> ROUND_SHIFT;
+ f >>= (fbdiv - 1);
+
+ post_divr_freq = div_u64(parent_rate, r);
+ vco_pre = fbdiv * post_divr_freq;
+ vco = vco_pre * f;
+
+ /* Ensure rounding didn't take us out of range */
+ if (vco > target_vco_rate) {
+ --f;
+ vco = vco_pre * f;
+ } else if (vco < MIN_VCO_FREQ) {
+ ++f;
+ vco = vco_pre * f;
+ }
+
+ delta = abs(target_rate - vco);
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_r = r;
+ best_f = f;
+ }
+ }
+
+ c->divr = best_r - 1;
+ c->divf = best_f - 1;
+
+ post_divr_freq = div_u64(parent_rate, best_r);
+
+ /* Pick the best PLL jitter filter */
+ range = __wrpll_calc_filter_range(post_divr_freq);
+ if (range < 0)
+ return range;
+ c->range = range;
+
+ return 0;
+}
+
+/**
+ * wrpll_calc_output_rate() - calculate the PLL's target output rate
+ * @c: ptr to a struct wrpll_cfg record to read from
+ * @parent_rate: PLL refclk rate
+ *
+ * Given a pointer to the PLL's current input configuration @c and the
+ * PLL's input reference clock rate @parent_rate (before the R
+ * pre-divider), calculate the PLL's output clock rate (after the Q
+ * post-divider).
+ *
+ * Context: Any context. Caller must protect the memory pointed to by @c
+ * from simultaneous modification.
+ *
+ * Return: the PLL's output clock rate, in Hz. The return value from
+ * this function is intended to be convenient to pass directly
+ * to the Linux clock framework; thus there is no explicit
+ * error return value.
+ */
+unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c,
+ unsigned long parent_rate)
+{
+ u8 fbdiv;
+ u64 n;
+
+ if (c->flags & WRPLL_FLAGS_EXT_FEEDBACK_MASK) {
+ WARN(1, "external feedback mode not yet supported");
+ return ULONG_MAX;
+ }
+
+ fbdiv = __wrpll_calc_fbdiv(c);
+ n = parent_rate * fbdiv * (c->divf + 1);
+ n = div_u64(n, c->divr + 1);
+ n >>= c->divq;
+
+ return n;
+}
+
+/**
+ * wrpll_calc_max_lock_us() - return the time for the PLL to lock
+ * @c: ptr to a struct wrpll_cfg record to read from
+ *
+ * Return the minimum amount of time (in microseconds) that the caller
+ * must wait after reprogramming the PLL to ensure that it is locked
+ * to the input frequency and stable. This is likely to depend on the DIVR
+ * value; this is under discussion with the manufacturer.
+ *
+ * Return: the minimum amount of time the caller must wait for the PLL
+ * to lock (in microseconds)
+ */
+unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c)
+{
+ return MAX_LOCK_US;
+}
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index c75df1cad60e..3732241352ce 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o
+obj-$(CONFIG_HAVE_AT91_SAM9X60_PLL) += clk-sam9x60-pll.o
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o
+obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index b1af5a395423..0aabe49aed09 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -41,7 +41,7 @@ static u8 sam9260_plla_out[] = { 0, 2 };
static u16 sam9260_plla_icpll[] = { 1, 1 };
-static struct clk_range sam9260_plla_outputs[] = {
+static const struct clk_range sam9260_plla_outputs[] = {
{ .min = 80000000, .max = 160000000 },
{ .min = 150000000, .max = 240000000 },
};
@@ -58,7 +58,7 @@ static u8 sam9260_pllb_out[] = { 1 };
static u16 sam9260_pllb_icpll[] = { 1 };
-static struct clk_range sam9260_pllb_outputs[] = {
+static const struct clk_range sam9260_pllb_outputs[] = {
{ .min = 70000000, .max = 130000000 },
};
@@ -128,7 +128,7 @@ static u8 sam9g20_plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
static u16 sam9g20_plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
-static struct clk_range sam9g20_plla_outputs[] = {
+static const struct clk_range sam9g20_plla_outputs[] = {
{ .min = 745000000, .max = 800000000 },
{ .min = 695000000, .max = 750000000 },
{ .min = 645000000, .max = 700000000 },
@@ -151,7 +151,7 @@ static u8 sam9g20_pllb_out[] = { 0 };
static u16 sam9g20_pllb_icpll[] = { 0 };
-static struct clk_range sam9g20_pllb_outputs[] = {
+static const struct clk_range sam9g20_pllb_outputs[] = {
{ .min = 30000000, .max = 100000000 },
};
@@ -182,7 +182,7 @@ static const struct clk_master_characteristics sam9261_mck_characteristics = {
.divisors = { 1, 2, 4, 0 },
};
-static struct clk_range sam9261_plla_outputs[] = {
+static const struct clk_range sam9261_plla_outputs[] = {
{ .min = 80000000, .max = 200000000 },
{ .min = 190000000, .max = 240000000 },
};
@@ -199,7 +199,7 @@ static u8 sam9261_pllb_out[] = { 1 };
static u16 sam9261_pllb_icpll[] = { 1 };
-static struct clk_range sam9261_pllb_outputs[] = {
+static const struct clk_range sam9261_pllb_outputs[] = {
{ .min = 70000000, .max = 130000000 },
};
@@ -262,7 +262,7 @@ static const struct clk_master_characteristics sam9263_mck_characteristics = {
.divisors = { 1, 2, 4, 0 },
};
-static struct clk_range sam9263_pll_outputs[] = {
+static const struct clk_range sam9263_pll_outputs[] = {
{ .min = 80000000, .max = 200000000 },
{ .min = 190000000, .max = 240000000 },
};
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index 5aeef68b4bdd..0ac34cdaa106 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -14,7 +14,7 @@ static const struct clk_master_characteristics sam9rl_mck_characteristics = {
static u8 sam9rl_plla_out[] = { 0, 2 };
-static struct clk_range sam9rl_plla_outputs[] = {
+static const struct clk_range sam9rl_plla_outputs[] = {
{ .min = 80000000, .max = 200000000 },
{ .min = 190000000, .max = 240000000 },
};
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 3487e03d4bc6..0855f3a80cc7 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -17,7 +17,7 @@ static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
-static struct clk_range plla_outputs[] = {
+static const struct clk_range plla_outputs[] = {
{ .min = 745000000, .max = 800000000 },
{ .min = 695000000, .max = 750000000 },
{ .min = 645000000, .max = 700000000 },
@@ -49,6 +49,13 @@ static const struct {
{ .n = "pck1", .p = "prog1", .id = 9 },
};
+static const struct clk_pcr_layout at91sam9x5_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(5, 0),
+ .div_mask = GENMASK(17, 16),
+};
+
struct pck {
char *n;
u8 id;
@@ -242,6 +249,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
for (i = 0; i < ARRAY_SIZE(at91sam9x5_periphck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &at91sam9x5_pcr_layout,
at91sam9x5_periphck[i].n,
"masterck",
at91sam9x5_periphck[i].id,
@@ -254,6 +262,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
for (i = 0; extra_pcks[i].id; i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &at91sam9x5_pcr_layout,
extra_pcks[i].n,
"masterck",
extra_pcks[i].id,
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 66e7f7baf958..5f18847965c1 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
@@ -31,6 +32,7 @@ struct clk_generated {
spinlock_t *lock;
u32 id;
u32 gckdiv;
+ const struct clk_pcr_layout *layout;
u8 parent_id;
bool audio_pll_allowed;
};
@@ -47,14 +49,14 @@ static int clk_generated_enable(struct clk_hw *hw)
__func__, gck->gckdiv, gck->parent_id);
spin_lock_irqsave(gck->lock, flags);
- regmap_write(gck->regmap, AT91_PMC_PCR,
- (gck->id & AT91_PMC_PCR_PID_MASK));
- regmap_update_bits(gck->regmap, AT91_PMC_PCR,
- AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK |
- AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
- AT91_PMC_PCR_GCKCSS(gck->parent_id) |
- AT91_PMC_PCR_CMD |
- AT91_PMC_PCR_GCKDIV(gck->gckdiv) |
+ regmap_write(gck->regmap, gck->layout->offset,
+ (gck->id & gck->layout->pid_mask));
+ regmap_update_bits(gck->regmap, gck->layout->offset,
+ AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask |
+ gck->layout->cmd | AT91_PMC_PCR_GCKEN,
+ field_prep(gck->layout->gckcss_mask, gck->parent_id) |
+ gck->layout->cmd |
+ FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) |
AT91_PMC_PCR_GCKEN);
spin_unlock_irqrestore(gck->lock, flags);
return 0;
@@ -66,11 +68,11 @@ static void clk_generated_disable(struct clk_hw *hw)
unsigned long flags;
spin_lock_irqsave(gck->lock, flags);
- regmap_write(gck->regmap, AT91_PMC_PCR,
- (gck->id & AT91_PMC_PCR_PID_MASK));
- regmap_update_bits(gck->regmap, AT91_PMC_PCR,
- AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
- AT91_PMC_PCR_CMD);
+ regmap_write(gck->regmap, gck->layout->offset,
+ (gck->id & gck->layout->pid_mask));
+ regmap_update_bits(gck->regmap, gck->layout->offset,
+ gck->layout->cmd | AT91_PMC_PCR_GCKEN,
+ gck->layout->cmd);
spin_unlock_irqrestore(gck->lock, flags);
}
@@ -81,9 +83,9 @@ static int clk_generated_is_enabled(struct clk_hw *hw)
unsigned int status;
spin_lock_irqsave(gck->lock, flags);
- regmap_write(gck->regmap, AT91_PMC_PCR,
- (gck->id & AT91_PMC_PCR_PID_MASK));
- regmap_read(gck->regmap, AT91_PMC_PCR, &status);
+ regmap_write(gck->regmap, gck->layout->offset,
+ (gck->id & gck->layout->pid_mask));
+ regmap_read(gck->regmap, gck->layout->offset, &status);
spin_unlock_irqrestore(gck->lock, flags);
return status & AT91_PMC_PCR_GCKEN ? 1 : 0;
@@ -259,19 +261,18 @@ static void clk_generated_startup(struct clk_generated *gck)
unsigned long flags;
spin_lock_irqsave(gck->lock, flags);
- regmap_write(gck->regmap, AT91_PMC_PCR,
- (gck->id & AT91_PMC_PCR_PID_MASK));
- regmap_read(gck->regmap, AT91_PMC_PCR, &tmp);
+ regmap_write(gck->regmap, gck->layout->offset,
+ (gck->id & gck->layout->pid_mask));
+ regmap_read(gck->regmap, gck->layout->offset, &tmp);
spin_unlock_irqrestore(gck->lock, flags);
- gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK)
- >> AT91_PMC_PCR_GCKCSS_OFFSET;
- gck->gckdiv = (tmp & AT91_PMC_PCR_GCKDIV_MASK)
- >> AT91_PMC_PCR_GCKDIV_OFFSET;
+ gck->parent_id = field_get(gck->layout->gckcss_mask, tmp);
+ gck->gckdiv = FIELD_GET(AT91_PMC_PCR_GCKDIV_MASK, tmp);
}
struct clk_hw * __init
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
+ const struct clk_pcr_layout *layout,
const char *name, const char **parent_names,
u8 num_parents, u8 id, bool pll_audio,
const struct clk_range *range)
@@ -298,6 +299,7 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
gck->lock = lock;
gck->range = *range;
gck->audio_pll_allowed = pll_audio;
+ gck->layout = layout;
clk_generated_startup(gck);
hw = &gck->hw;
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index eb53b4a8fab6..12b5bf4cc7bb 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -29,6 +29,7 @@ struct clk_master {
struct regmap *regmap;
const struct clk_master_layout *layout;
const struct clk_master_characteristics *characteristics;
+ u32 mckr;
};
static inline bool clk_master_ready(struct regmap *regmap)
@@ -69,7 +70,7 @@ static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
master->characteristics;
unsigned int mckr;
- regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
mckr &= layout->mask;
pres = (mckr >> layout->pres_shift) & MASTER_PRES_MASK;
@@ -95,7 +96,7 @@ static u8 clk_master_get_parent(struct clk_hw *hw)
struct clk_master *master = to_clk_master(hw);
unsigned int mckr;
- regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
return mckr & AT91_PMC_CSS;
}
@@ -147,13 +148,14 @@ at91_clk_register_master(struct regmap *regmap,
return hw;
}
-
const struct clk_master_layout at91rm9200_master_layout = {
.mask = 0x31F,
.pres_shift = 2,
+ .offset = AT91_PMC_MCKR,
};
const struct clk_master_layout at91sam9x5_master_layout = {
.mask = 0x373,
.pres_shift = 4,
+ .offset = AT91_PMC_MCKR,
};
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 65c1defa78e4..6b7748b9588a 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
@@ -23,9 +24,6 @@ DEFINE_SPINLOCK(pmc_pcr_lock);
#define PERIPHERAL_ID_MAX 31
#define PERIPHERAL_MASK(id) (1 << ((id) & PERIPHERAL_ID_MAX))
-#define PERIPHERAL_RSHIFT_MASK 0x3
-#define PERIPHERAL_RSHIFT(val) (((val) >> 16) & PERIPHERAL_RSHIFT_MASK)
-
#define PERIPHERAL_MAX_SHIFT 3
struct clk_peripheral {
@@ -43,6 +41,7 @@ struct clk_sam9x5_peripheral {
spinlock_t *lock;
u32 id;
u32 div;
+ const struct clk_pcr_layout *layout;
bool auto_div;
};
@@ -169,13 +168,13 @@ static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
return 0;
spin_lock_irqsave(periph->lock, flags);
- regmap_write(periph->regmap, AT91_PMC_PCR,
- (periph->id & AT91_PMC_PCR_PID_MASK));
- regmap_update_bits(periph->regmap, AT91_PMC_PCR,
- AT91_PMC_PCR_DIV_MASK | AT91_PMC_PCR_CMD |
+ regmap_write(periph->regmap, periph->layout->offset,
+ (periph->id & periph->layout->pid_mask));
+ regmap_update_bits(periph->regmap, periph->layout->offset,
+ periph->layout->div_mask | periph->layout->cmd |
AT91_PMC_PCR_EN,
- AT91_PMC_PCR_DIV(periph->div) |
- AT91_PMC_PCR_CMD |
+ field_prep(periph->layout->div_mask, periph->div) |
+ periph->layout->cmd |
AT91_PMC_PCR_EN);
spin_unlock_irqrestore(periph->lock, flags);
@@ -191,11 +190,11 @@ static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
return;
spin_lock_irqsave(periph->lock, flags);
- regmap_write(periph->regmap, AT91_PMC_PCR,
- (periph->id & AT91_PMC_PCR_PID_MASK));
- regmap_update_bits(periph->regmap, AT91_PMC_PCR,
- AT91_PMC_PCR_EN | AT91_PMC_PCR_CMD,
- AT91_PMC_PCR_CMD);
+ regmap_write(periph->regmap, periph->layout->offset,
+ (periph->id & periph->layout->pid_mask));
+ regmap_update_bits(periph->regmap, periph->layout->offset,
+ AT91_PMC_PCR_EN | periph->layout->cmd,
+ periph->layout->cmd);
spin_unlock_irqrestore(periph->lock, flags);
}
@@ -209,9 +208,9 @@ static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
return 1;
spin_lock_irqsave(periph->lock, flags);
- regmap_write(periph->regmap, AT91_PMC_PCR,
- (periph->id & AT91_PMC_PCR_PID_MASK));
- regmap_read(periph->regmap, AT91_PMC_PCR, &status);
+ regmap_write(periph->regmap, periph->layout->offset,
+ (periph->id & periph->layout->pid_mask));
+ regmap_read(periph->regmap, periph->layout->offset, &status);
spin_unlock_irqrestore(periph->lock, flags);
return status & AT91_PMC_PCR_EN ? 1 : 0;
@@ -229,13 +228,13 @@ clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
return parent_rate;
spin_lock_irqsave(periph->lock, flags);
- regmap_write(periph->regmap, AT91_PMC_PCR,
- (periph->id & AT91_PMC_PCR_PID_MASK));
- regmap_read(periph->regmap, AT91_PMC_PCR, &status);
+ regmap_write(periph->regmap, periph->layout->offset,
+ (periph->id & periph->layout->pid_mask));
+ regmap_read(periph->regmap, periph->layout->offset, &status);
spin_unlock_irqrestore(periph->lock, flags);
if (status & AT91_PMC_PCR_EN) {
- periph->div = PERIPHERAL_RSHIFT(status);
+ periph->div = field_get(periph->layout->div_mask, status);
periph->auto_div = false;
} else {
clk_sam9x5_peripheral_autodiv(periph);
@@ -328,6 +327,7 @@ static const struct clk_ops sam9x5_peripheral_ops = {
struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
+ const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
u32 id, const struct clk_range *range)
{
@@ -354,7 +354,9 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
periph->div = 0;
periph->regmap = regmap;
periph->lock = lock;
- periph->auto_div = true;
+ if (layout->div_mask)
+ periph->auto_div = true;
+ periph->layout = layout;
periph->range = *range;
hw = &periph->hw;
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 89d6f3736dbf..f8edbb65eda3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -20,8 +20,7 @@
#define PROG_ID_MAX 7
#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
-#define PROG_PRES_MASK 0x7
-#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
+#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & layout->pres_mask)
#define PROG_MAX_RM9200_CSS 3
struct clk_programmable {
@@ -37,20 +36,29 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_programmable *prog = to_clk_programmable(hw);
+ const struct clk_programmable_layout *layout = prog->layout;
unsigned int pckr;
+ unsigned long rate;
regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
- return parent_rate >> PROG_PRES(prog->layout, pckr);
+ if (layout->is_pres_direct)
+ rate = parent_rate / (PROG_PRES(layout, pckr) + 1);
+ else
+ rate = parent_rate >> PROG_PRES(layout, pckr);
+
+ return rate;
}
static int clk_programmable_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ const struct clk_programmable_layout *layout = prog->layout;
struct clk_hw *parent;
long best_rate = -EINVAL;
unsigned long parent_rate;
- unsigned long tmp_rate;
+ unsigned long tmp_rate = 0;
int shift;
int i;
@@ -60,10 +68,18 @@ static int clk_programmable_determine_rate(struct clk_hw *hw,
continue;
parent_rate = clk_hw_get_rate(parent);
- for (shift = 0; shift < PROG_PRES_MASK; shift++) {
- tmp_rate = parent_rate >> shift;
- if (tmp_rate <= req->rate)
- break;
+ if (layout->is_pres_direct) {
+ for (shift = 0; shift <= layout->pres_mask; shift++) {
+ tmp_rate = parent_rate / (shift + 1);
+ if (tmp_rate <= req->rate)
+ break;
+ }
+ } else {
+ for (shift = 0; shift < layout->pres_mask; shift++) {
+ tmp_rate = parent_rate >> shift;
+ if (tmp_rate <= req->rate)
+ break;
+ }
}
if (tmp_rate > req->rate)
@@ -137,16 +153,23 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
if (!div)
return -EINVAL;
- shift = fls(div) - 1;
+ if (layout->is_pres_direct) {
+ shift = div - 1;
- if (div != (1 << shift))
- return -EINVAL;
+ if (shift > layout->pres_mask)
+ return -EINVAL;
+ } else {
+ shift = fls(div) - 1;
- if (shift >= PROG_PRES_MASK)
- return -EINVAL;
+ if (div != (1 << shift))
+ return -EINVAL;
+
+ if (shift >= layout->pres_mask)
+ return -EINVAL;
+ }
regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
- PROG_PRES_MASK << layout->pres_shift,
+ layout->pres_mask << layout->pres_shift,
shift << layout->pres_shift);
return 0;
@@ -202,19 +225,25 @@ at91_clk_register_programmable(struct regmap *regmap,
}
const struct clk_programmable_layout at91rm9200_programmable_layout = {
+ .pres_mask = 0x7,
.pres_shift = 2,
.css_mask = 0x3,
.have_slck_mck = 0,
+ .is_pres_direct = 0,
};
const struct clk_programmable_layout at91sam9g45_programmable_layout = {
+ .pres_mask = 0x7,
.pres_shift = 2,
.css_mask = 0x3,
.have_slck_mck = 1,
+ .is_pres_direct = 0,
};
const struct clk_programmable_layout at91sam9x5_programmable_layout = {
+ .pres_mask = 0x7,
.pres_shift = 4,
.css_mask = 0x7,
.have_slck_mck = 0,
+ .is_pres_direct = 0,
};
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
new file mode 100644
index 000000000000..34b817825b22
--- /dev/null
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Microchip Technology Inc.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pmc.h"
+
+#define PMC_PLL_CTRL0 0xc
+#define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0)
+#define PMC_PLL_CTRL0_ENPLL BIT(28)
+#define PMC_PLL_CTRL0_ENPLLCK BIT(29)
+#define PMC_PLL_CTRL0_ENLOCK BIT(31)
+
+#define PMC_PLL_CTRL1 0x10
+#define PMC_PLL_CTRL1_FRACR_MSK GENMASK(21, 0)
+#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24)
+
+#define PMC_PLL_ACR 0x18
+#define PMC_PLL_ACR_DEFAULT 0x1b040010UL
+#define PMC_PLL_ACR_UTMIVR BIT(12)
+#define PMC_PLL_ACR_UTMIBG BIT(13)
+#define PMC_PLL_ACR_LOOP_FILTER_MSK GENMASK(31, 24)
+
+#define PMC_PLL_UPDT 0x1c
+#define PMC_PLL_UPDT_UPDATE BIT(8)
+
+#define PMC_PLL_ISR0 0xec
+
+#define PLL_DIV_MAX (FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, UINT_MAX) + 1)
+#define UPLL_DIV 2
+#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
+
+#define PLL_MAX_ID 1
+
+struct sam9x60_pll {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ spinlock_t *lock;
+ const struct clk_pll_characteristics *characteristics;
+ u32 frac;
+ u8 id;
+ u8 div;
+ u16 mul;
+};
+
+#define to_sam9x60_pll(hw) container_of(hw, struct sam9x60_pll, hw)
+
+static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
+{
+ unsigned int status;
+
+ regmap_read(regmap, PMC_PLL_ISR0, &status);
+
+ return !!(status & BIT(id));
+}
+
+static int sam9x60_pll_prepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+ struct regmap *regmap = pll->regmap;
+ unsigned long flags;
+ u8 div;
+ u16 mul;
+ u32 val;
+
+ spin_lock_irqsave(pll->lock, flags);
+ regmap_write(regmap, PMC_PLL_UPDT, pll->id);
+
+ regmap_read(regmap, PMC_PLL_CTRL0, &val);
+ div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
+
+ regmap_read(regmap, PMC_PLL_CTRL1, &val);
+ mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val);
+
+ if (sam9x60_pll_ready(regmap, pll->id) &&
+ (div == pll->div && mul == pll->mul)) {
+ spin_unlock_irqrestore(pll->lock, flags);
+ return 0;
+ }
+
+ /* Recommended value for PMC_PLL_ACR */
+ val = PMC_PLL_ACR_DEFAULT;
+ regmap_write(regmap, PMC_PLL_ACR, val);
+
+ regmap_write(regmap, PMC_PLL_CTRL1,
+ FIELD_PREP(PMC_PLL_CTRL1_MUL_MSK, pll->mul));
+
+ if (pll->characteristics->upll) {
+ /* Enable the UTMI internal bandgap */
+ val |= PMC_PLL_ACR_UTMIBG;
+ regmap_write(regmap, PMC_PLL_ACR, val);
+
+ udelay(10);
+
+ /* Enable the UTMI internal regulator */
+ val |= PMC_PLL_ACR_UTMIVR;
+ regmap_write(regmap, PMC_PLL_ACR, val);
+
+ udelay(10);
+ }
+
+ regmap_update_bits(regmap, PMC_PLL_UPDT,
+ PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+
+ regmap_write(regmap, PMC_PLL_CTRL0,
+ PMC_PLL_CTRL0_ENLOCK | PMC_PLL_CTRL0_ENPLL |
+ PMC_PLL_CTRL0_ENPLLCK | pll->div);
+
+ regmap_update_bits(regmap, PMC_PLL_UPDT,
+ PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+
+ while (!sam9x60_pll_ready(regmap, pll->id))
+ cpu_relax();
+
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return 0;
+}
+
+static int sam9x60_pll_is_prepared(struct clk_hw *hw)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+
+ return sam9x60_pll_ready(pll->regmap, pll->id);
+}
+
+static void sam9x60_pll_unprepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(pll->lock, flags);
+
+ regmap_write(pll->regmap, PMC_PLL_UPDT, pll->id);
+
+ regmap_update_bits(pll->regmap, PMC_PLL_CTRL0,
+ PMC_PLL_CTRL0_ENPLLCK, 0);
+
+ regmap_update_bits(pll->regmap, PMC_PLL_UPDT,
+ PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+
+ regmap_update_bits(pll->regmap, PMC_PLL_CTRL0, PMC_PLL_CTRL0_ENPLL, 0);
+
+ if (pll->characteristics->upll)
+ regmap_update_bits(pll->regmap, PMC_PLL_ACR,
+ PMC_PLL_ACR_UTMIBG | PMC_PLL_ACR_UTMIVR, 0);
+
+ regmap_update_bits(pll->regmap, PMC_PLL_UPDT,
+ PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static unsigned long sam9x60_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+
+ return (parent_rate * (pll->mul + 1)) / (pll->div + 1);
+}
+
+static long sam9x60_pll_get_best_div_mul(struct sam9x60_pll *pll,
+ unsigned long rate,
+ unsigned long parent_rate,
+ bool update)
+{
+ const struct clk_pll_characteristics *characteristics =
+ pll->characteristics;
+ unsigned long bestremainder = ULONG_MAX;
+ unsigned long maxdiv, mindiv, tmpdiv;
+ long bestrate = -ERANGE;
+ unsigned long bestdiv = 0;
+ unsigned long bestmul = 0;
+ unsigned long bestfrac = 0;
+
+ if (rate < characteristics->output[0].min ||
+ rate > characteristics->output[0].max)
+ return -ERANGE;
+
+ if (!pll->characteristics->upll) {
+ mindiv = parent_rate / rate;
+ if (mindiv < 2)
+ mindiv = 2;
+
+ maxdiv = DIV_ROUND_UP(parent_rate * PLL_MUL_MAX, rate);
+ if (maxdiv > PLL_DIV_MAX)
+ maxdiv = PLL_DIV_MAX;
+ } else {
+ mindiv = maxdiv = UPLL_DIV;
+ }
+
+ for (tmpdiv = mindiv; tmpdiv <= maxdiv; tmpdiv++) {
+ unsigned long remainder;
+ unsigned long tmprate;
+ unsigned long tmpmul;
+ unsigned long tmpfrac = 0;
+
+ /*
+ * Calculate the multiplier associated with the current
+ * divider that provide the closest rate to the requested one.
+ */
+ tmpmul = mult_frac(rate, tmpdiv, parent_rate);
+ tmprate = mult_frac(parent_rate, tmpmul, tmpdiv);
+ remainder = rate - tmprate;
+
+ if (remainder) {
+ tmpfrac = DIV_ROUND_CLOSEST_ULL((u64)remainder * tmpdiv * (1 << 22),
+ parent_rate);
+
+ tmprate += DIV_ROUND_CLOSEST_ULL((u64)tmpfrac * parent_rate,
+ tmpdiv * (1 << 22));
+
+ if (tmprate > rate)
+ remainder = tmprate - rate;
+ else
+ remainder = rate - tmprate;
+ }
+
+ /*
+ * Compare the remainder with the best remainder found until
+ * now and elect a new best multiplier/divider pair if the
+ * current remainder is smaller than the best one.
+ */
+ if (remainder < bestremainder) {
+ bestremainder = remainder;
+ bestdiv = tmpdiv;
+ bestmul = tmpmul;
+ bestrate = tmprate;
+ bestfrac = tmpfrac;
+ }
+
+ /* We've found a perfect match! */
+ if (!remainder)
+ break;
+ }
+
+ /* Check if bestrate is a valid output rate */
+ if (bestrate < characteristics->output[0].min &&
+ bestrate > characteristics->output[0].max)
+ return -ERANGE;
+
+ if (update) {
+ pll->div = bestdiv - 1;
+ pll->mul = bestmul - 1;
+ pll->frac = bestfrac;
+ }
+
+ return bestrate;
+}
+
+static long sam9x60_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+
+ return sam9x60_pll_get_best_div_mul(pll, rate, *parent_rate, false);
+}
+
+static int sam9x60_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+
+ return sam9x60_pll_get_best_div_mul(pll, rate, parent_rate, true);
+}
+
+static const struct clk_ops pll_ops = {
+ .prepare = sam9x60_pll_prepare,
+ .unprepare = sam9x60_pll_unprepare,
+ .is_prepared = sam9x60_pll_is_prepared,
+ .recalc_rate = sam9x60_pll_recalc_rate,
+ .round_rate = sam9x60_pll_round_rate,
+ .set_rate = sam9x60_pll_set_rate,
+};
+
+struct clk_hw * __init
+sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name, u8 id,
+ const struct clk_pll_characteristics *characteristics)
+{
+ struct sam9x60_pll *pll;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ unsigned int pllr;
+ int ret;
+
+ if (id > PLL_MAX_ID)
+ return ERR_PTR(-EINVAL);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pll_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ pll->id = id;
+ pll->hw.init = &init;
+ pll->characteristics = characteristics;
+ pll->regmap = regmap;
+ pll->lock = lock;
+
+ regmap_write(regmap, PMC_PLL_UPDT, id);
+ regmap_read(regmap, PMC_PLL_CTRL0, &pllr);
+ pll->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, pllr);
+ regmap_read(regmap, PMC_PLL_CTRL1, &pllr);
+ pll->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, pllr);
+
+ hw = &pll->hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(pll);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 79ee1c760f2a..ebc37ee33518 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -23,9 +23,13 @@
#define RM9200_USB_DIV_SHIFT 28
#define RM9200_USB_DIV_TAB_SIZE 4
+#define SAM9X5_USBS_MASK GENMASK(0, 0)
+#define SAM9X60_USBS_MASK GENMASK(1, 0)
+
struct at91sam9x5_clk_usb {
struct clk_hw hw;
struct regmap *regmap;
+ u32 usbs_mask;
};
#define to_at91sam9x5_clk_usb(hw) \
@@ -111,8 +115,7 @@ static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
if (index > 1)
return -EINVAL;
- regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS,
- index ? AT91_PMC_USBS : 0);
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, usb->usbs_mask, index);
return 0;
}
@@ -124,7 +127,7 @@ static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw)
regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
- return usbr & AT91_PMC_USBS;
+ return usbr & usb->usbs_mask;
}
static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -190,9 +193,10 @@ static const struct clk_ops at91sam9n12_usb_ops = {
.set_rate = at91sam9x5_clk_usb_set_rate,
};
-struct clk_hw * __init
-at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
- const char **parent_names, u8 num_parents)
+static struct clk_hw * __init
+_at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents,
+ u32 usbs_mask)
{
struct at91sam9x5_clk_usb *usb;
struct clk_hw *hw;
@@ -212,6 +216,7 @@ at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
usb->hw.init = &init;
usb->regmap = regmap;
+ usb->usbs_mask = SAM9X5_USBS_MASK;
hw = &usb->hw;
ret = clk_hw_register(NULL, &usb->hw);
@@ -224,6 +229,22 @@ at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
}
struct clk_hw * __init
+at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ return _at91sam9x5_clk_register_usb(regmap, name, parent_names,
+ num_parents, SAM9X5_USBS_MASK);
+}
+
+struct clk_hw * __init
+sam9x60_clk_register_usb(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ return _at91sam9x5_clk_register_usb(regmap, name, parent_names,
+ num_parents, SAM9X60_USBS_MASK);
+}
+
+struct clk_hw * __init
at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name)
{
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index b95bb4e2a927..aa1754eac59f 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -93,6 +93,14 @@ CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pmc_setup,
of_sama5d2_clk_audio_pll_pmc_setup);
#endif /* CONFIG_HAVE_AT91_AUDIO_PLL */
+static const struct clk_pcr_layout dt_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(5, 0),
+ .div_mask = GENMASK(17, 16),
+ .gckcss_mask = GENMASK(10, 8),
+};
+
#ifdef CONFIG_HAVE_AT91_GENERATED_CLK
#define GENERATED_SOURCE_MAX 6
@@ -146,7 +154,8 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
id == GCK_ID_CLASSD))
pll_audio = true;
- hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &dt_pcr_layout, name,
parent_names, num_parents,
id, pll_audio, &range);
if (IS_ERR(hw))
@@ -448,6 +457,7 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
hw = at91_clk_register_sam9x5_peripheral(regmap,
&pmc_pcr_lock,
+ &dt_pcr_layout,
name,
parent_name,
id, &range);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 672a79bda88c..2311204948be 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -38,6 +38,7 @@ struct clk_range {
#define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,}
struct clk_master_layout {
+ u32 offset;
u32 mask;
u8 pres_shift;
};
@@ -65,21 +66,35 @@ extern const struct clk_pll_layout sama5d3_pll_layout;
struct clk_pll_characteristics {
struct clk_range input;
int num_output;
- struct clk_range *output;
+ const struct clk_range *output;
u16 *icpll;
u8 *out;
+ u8 upll : 1;
};
struct clk_programmable_layout {
+ u8 pres_mask;
u8 pres_shift;
u8 css_mask;
u8 have_slck_mck;
+ u8 is_pres_direct;
};
extern const struct clk_programmable_layout at91rm9200_programmable_layout;
extern const struct clk_programmable_layout at91sam9g45_programmable_layout;
extern const struct clk_programmable_layout at91sam9x5_programmable_layout;
+struct clk_pcr_layout {
+ u32 offset;
+ u32 cmd;
+ u32 div_mask;
+ u32 gckcss_mask;
+ u32 pid_mask;
+};
+
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
@@ -105,6 +120,7 @@ at91_clk_register_audio_pll_pmc(struct regmap *regmap, const char *name,
struct clk_hw * __init
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
+ const struct clk_pcr_layout *layout,
const char *name, const char **parent_names,
u8 num_parents, u8 id, bool pll_audio,
const struct clk_range *range);
@@ -143,6 +159,7 @@ at91_clk_register_peripheral(struct regmap *regmap, const char *name,
const char *parent_name, u32 id);
struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
+ const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
u32 id, const struct clk_range *range);
@@ -156,6 +173,11 @@ at91_clk_register_plldiv(struct regmap *regmap, const char *name,
const char *parent_name);
struct clk_hw * __init
+sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name, u8 id,
+ const struct clk_pll_characteristics *characteristics);
+
+struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents, u8 id,
const struct clk_programmable_layout *layout);
@@ -181,6 +203,9 @@ struct clk_hw * __init
at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name);
struct clk_hw * __init
+sam9x60_clk_register_usb(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents);
+struct clk_hw * __init
at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name, const u32 *divisors);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
new file mode 100644
index 000000000000..9790ddfa5b3c
--- /dev/null
+++ b/drivers/clk/at91/sam9x60.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 140000000, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3 },
+ .have_div3_pres = 1,
+};
+
+static const struct clk_master_layout sam9x60_master_layout = {
+ .mask = 0x373,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+static const struct clk_range plla_outputs[] = {
+ { .min = 300000000, .max = 600000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 12000000, .max = 48000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 300000000, .max = 500000000 },
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 12000000, .max = 48000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .upll = true,
+};
+
+static const struct clk_programmable_layout sam9x60_programmable_layout = {
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+};
+
+static const struct clk_pcr_layout sam9x60_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} sam9x60_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "qspick", .p = "masterck", .id = 19 },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+} sam9x60_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioC_clk", .id = 4, },
+ { .n = "flex0_clk", .id = 5, },
+ { .n = "flex1_clk", .id = 6, },
+ { .n = "flex2_clk", .id = 7, },
+ { .n = "flex3_clk", .id = 8, },
+ { .n = "flex6_clk", .id = 9, },
+ { .n = "flex7_clk", .id = 10, },
+ { .n = "flex8_clk", .id = 11, },
+ { .n = "sdmmc0_clk", .id = 12, },
+ { .n = "flex4_clk", .id = 13, },
+ { .n = "flex5_clk", .id = 14, },
+ { .n = "flex9_clk", .id = 15, },
+ { .n = "flex10_clk", .id = 16, },
+ { .n = "tcb0_clk", .id = 17, },
+ { .n = "pwm_clk", .id = 18, },
+ { .n = "adc_clk", .id = 19, },
+ { .n = "dma0_clk", .id = 20, },
+ { .n = "matrix_clk", .id = 21, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "udphs_clk", .id = 23, },
+ { .n = "macb0_clk", .id = 24, },
+ { .n = "lcd_clk", .id = 25, },
+ { .n = "sdmmc1_clk", .id = 26, },
+ { .n = "macb1_clk", .id = 27, },
+ { .n = "ssc_clk", .id = 28, },
+ { .n = "can0_clk", .id = 29, },
+ { .n = "can1_clk", .id = 30, },
+ { .n = "flex11_clk", .id = 32, },
+ { .n = "flex12_clk", .id = 33, },
+ { .n = "i2s_clk", .id = 34, },
+ { .n = "qspi_clk", .id = 35, },
+ { .n = "gfx2d_clk", .id = 36, },
+ { .n = "pit64b_clk", .id = 37, },
+ { .n = "trng_clk", .id = 38, },
+ { .n = "aes_clk", .id = 39, },
+ { .n = "tdes_clk", .id = 40, },
+ { .n = "sha_clk", .id = 41, },
+ { .n = "classd_clk", .id = 42, },
+ { .n = "isi_clk", .id = 43, },
+ { .n = "pioD_clk", .id = 44, },
+ { .n = "tcb1_clk", .id = 45, },
+ { .n = "dbgu_clk", .id = 47, },
+ { .n = "mpddr_clk", .id = 49, },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+ struct clk_range r;
+ bool pll;
+} sam9x60_gck[] = {
+ { .n = "flex0_gclk", .id = 5, },
+ { .n = "flex1_gclk", .id = 6, },
+ { .n = "flex2_gclk", .id = 7, },
+ { .n = "flex3_gclk", .id = 8, },
+ { .n = "flex6_gclk", .id = 9, },
+ { .n = "flex7_gclk", .id = 10, },
+ { .n = "flex8_gclk", .id = 11, },
+ { .n = "sdmmc0_gclk", .id = 12, .r = { .min = 0, .max = 105000000 }, },
+ { .n = "flex4_gclk", .id = 13, },
+ { .n = "flex5_gclk", .id = 14, },
+ { .n = "flex9_gclk", .id = 15, },
+ { .n = "flex10_gclk", .id = 16, },
+ { .n = "tcb0_gclk", .id = 17, },
+ { .n = "adc_gclk", .id = 19, },
+ { .n = "lcd_gclk", .id = 25, .r = { .min = 0, .max = 140000000 }, },
+ { .n = "sdmmc1_gclk", .id = 26, .r = { .min = 0, .max = 105000000 }, },
+ { .n = "flex11_gclk", .id = 32, },
+ { .n = "flex12_gclk", .id = 33, },
+ { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 },
+ .pll = true, },
+ { .n = "pit64b_gclk", .id = 37, },
+ { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 },
+ .pll = true, },
+ { .n = "tcb1_gclk", .id = 45, },
+ { .n = "dbgu_gclk", .id = 47, },
+};
+
+static void __init sam9x60_pmc_setup(struct device_node *np)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *td_slck_name, *md_slck_name, *mainxtal_name;
+ struct pmc_data *sam9x60_pmc;
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "td_slck");
+ if (i < 0)
+ return;
+
+ td_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "md_slck");
+ if (i < 0)
+ return;
+
+ md_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sam9x60_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(sam9x60_systemck),
+ nck(sam9x60_periphck),
+ nck(sam9x60_gck));
+ if (!sam9x60_pmc)
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 24000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->chws[PMC_MAIN] = hw;
+
+ hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "pllack",
+ "mainck", 0, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "upllck",
+ "main_osc", 1, &upll_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ hw = at91_clk_register_master(regmap, "masterck", 3, parent_names,
+ &sam9x60_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "pllack";
+ parent_names[1] = "upllck";
+ parent_names[2] = "mainck";
+ parent_names[3] = "mainck";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 4);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "masterck";
+ parent_names[4] = "pllack";
+ parent_names[5] = "upllck";
+ for (i = 0; i < 8; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 6, i,
+ &sam9x60_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x60_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sam9x60_systemck[i].n,
+ sam9x60_systemck[i].p,
+ sam9x60_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->shws[sam9x60_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x60_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sam9x60_pcr_layout,
+ sam9x60_periphck[i].n,
+ "masterck",
+ sam9x60_periphck[i].id,
+ &range);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->phws[sam9x60_periphck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x60_gck); i++) {
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sam9x60_pcr_layout,
+ sam9x60_gck[i].n,
+ parent_names, 6,
+ sam9x60_gck[i].id,
+ sam9x60_gck[i].pll,
+ &sam9x60_gck[i].r);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->ghws[sam9x60_gck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sam9x60_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(sam9x60_pmc);
+}
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sam9x60_pmc, "microchip,sam9x60-pmc", sam9x60_pmc_setup);
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 1f70cb164b06..6509d0934804 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -16,7 +16,7 @@ static u8 plla_out[] = { 0 };
static u16 plla_icpll[] = { 0 };
-static struct clk_range plla_outputs[] = {
+static const struct clk_range plla_outputs[] = {
{ .min = 600000000, .max = 1200000000 },
};
@@ -28,6 +28,13 @@ static const struct clk_pll_characteristics plla_characteristics = {
.out = plla_out,
};
+static const struct clk_pcr_layout sama5d2_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .gckcss_mask = GENMASK(10, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
static const struct {
char *n;
char *p;
@@ -125,6 +132,14 @@ static const struct {
.pll = true },
};
+static const struct clk_programmable_layout sama5d2_programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 4,
+ .css_mask = 0x7,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
static void __init sama5d2_pmc_setup(struct device_node *np)
{
struct clk_range range = CLK_RANGE(0, 0);
@@ -249,7 +264,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
hw = at91_clk_register_programmable(regmap, name,
parent_names, 6, i,
- &at91sam9x5_programmable_layout);
+ &sama5d2_programmable_layout);
if (IS_ERR(hw))
goto err_free;
}
@@ -266,6 +281,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d2_periphck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d2_pcr_layout,
sama5d2_periphck[i].n,
"masterck",
sama5d2_periphck[i].id,
@@ -278,6 +294,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d2_periph32ck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d2_pcr_layout,
sama5d2_periph32ck[i].n,
"h32mxck",
sama5d2_periph32ck[i].id,
@@ -296,6 +313,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sama5d2_pcr_layout,
sama5d2_gck[i].n,
parent_names, 6,
sama5d2_gck[i].id,
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index b645a9d59cdb..25b156d4e645 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -16,7 +16,7 @@ static u8 plla_out[] = { 0 };
static u16 plla_icpll[] = { 0 };
-static struct clk_range plla_outputs[] = {
+static const struct clk_range plla_outputs[] = {
{ .min = 600000000, .max = 1200000000 },
};
@@ -28,6 +28,12 @@ static const struct clk_pll_characteristics plla_characteristics = {
.out = plla_out,
};
+static const struct clk_pcr_layout sama5d4_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(6, 0),
+};
+
static const struct {
char *n;
char *p;
@@ -232,6 +238,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d4_periphck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d4_pcr_layout,
sama5d4_periphck[i].n,
"masterck",
sama5d4_periphck[i].id,
@@ -244,6 +251,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d4_periph32ck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d4_pcr_layout,
sama5d4_periph32ck[i].n,
"h32mxck",
sama5d4_periph32ck[i].id,
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index ab6ecefc49ad..e76b1d64e905 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -152,28 +152,6 @@ at91_clk_register_slow_osc(void __iomem *sckcr,
return hw;
}
-static void __init
-of_at91sam9x5_clk_slow_osc_setup(struct device_node *np, void __iomem *sckcr)
-{
- struct clk_hw *hw;
- const char *parent_name;
- const char *name = np->name;
- u32 startup;
- bool bypass;
-
- parent_name = of_clk_get_parent_name(np, 0);
- of_property_read_string(np, "clock-output-names", &name);
- of_property_read_u32(np, "atmel,startup-time-usec", &startup);
- bypass = of_property_read_bool(np, "atmel,osc-bypass");
-
- hw = at91_clk_register_slow_osc(sckcr, name, parent_name, startup,
- bypass);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-
static unsigned long clk_slow_rc_osc_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -266,28 +244,6 @@ at91_clk_register_slow_rc_osc(void __iomem *sckcr,
return hw;
}
-static void __init
-of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np, void __iomem *sckcr)
-{
- struct clk_hw *hw;
- u32 frequency = 0;
- u32 accuracy = 0;
- u32 startup = 0;
- const char *name = np->name;
-
- of_property_read_string(np, "clock-output-names", &name);
- of_property_read_u32(np, "clock-frequency", &frequency);
- of_property_read_u32(np, "clock-accuracy", &accuracy);
- of_property_read_u32(np, "atmel,startup-time-usec", &startup);
-
- hw = at91_clk_register_slow_rc_osc(sckcr, name, frequency, accuracy,
- startup);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-
static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
@@ -365,68 +321,72 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
return hw;
}
-static void __init
-of_at91sam9x5_clk_slow_setup(struct device_node *np, void __iomem *sckcr)
+static void __init at91sam9x5_sckc_register(struct device_node *np,
+ unsigned int rc_osc_startup_us)
{
+ const char *parent_names[2] = { "slow_rc_osc", "slow_osc" };
+ void __iomem *regbase = of_iomap(np, 0);
+ struct device_node *child = NULL;
+ const char *xtal_name;
struct clk_hw *hw;
- const char *parent_names[2];
- unsigned int num_parents;
- const char *name = np->name;
+ bool bypass;
- num_parents = of_clk_get_parent_count(np);
- if (num_parents == 0 || num_parents > 2)
+ if (!regbase)
+ return;
+
+ hw = at91_clk_register_slow_rc_osc(regbase, parent_names[0], 32768,
+ 50000000, rc_osc_startup_us);
+ if (IS_ERR(hw))
return;
- of_clk_parent_fill(np, parent_names, num_parents);
+ xtal_name = of_clk_get_parent_name(np, 0);
+ if (!xtal_name) {
+ /* DT backward compatibility */
+ child = of_get_compatible_child(np, "atmel,at91sam9x5-clk-slow-osc");
+ if (!child)
+ return;
+
+ xtal_name = of_clk_get_parent_name(child, 0);
+ bypass = of_property_read_bool(child, "atmel,osc-bypass");
+
+ child = of_get_compatible_child(np, "atmel,at91sam9x5-clk-slow");
+ } else {
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+ }
+
+ if (!xtal_name)
+ return;
- of_property_read_string(np, "clock-output-names", &name);
+ hw = at91_clk_register_slow_osc(regbase, parent_names[1], xtal_name,
+ 1200000, bypass);
+ if (IS_ERR(hw))
+ return;
- hw = at91_clk_register_sam9x5_slow(sckcr, name, parent_names,
- num_parents);
+ hw = at91_clk_register_sam9x5_slow(regbase, "slowck", parent_names, 2);
if (IS_ERR(hw))
return;
of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-static const struct of_device_id sckc_clk_ids[] __initconst = {
- /* Slow clock */
- {
- .compatible = "atmel,at91sam9x5-clk-slow-osc",
- .data = of_at91sam9x5_clk_slow_osc_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-slow-rc-osc",
- .data = of_at91sam9x5_clk_slow_rc_osc_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-slow",
- .data = of_at91sam9x5_clk_slow_setup,
- },
- { /*sentinel*/ }
-};
+ /* DT backward compatibility */
+ if (child)
+ of_clk_add_hw_provider(child, of_clk_hw_simple_get, hw);
+}
static void __init of_at91sam9x5_sckc_setup(struct device_node *np)
{
- struct device_node *childnp;
- void (*clk_setup)(struct device_node *, void __iomem *);
- const struct of_device_id *clk_id;
- void __iomem *regbase = of_iomap(np, 0);
-
- if (!regbase)
- return;
-
- for_each_child_of_node(np, childnp) {
- clk_id = of_match_node(sckc_clk_ids, childnp);
- if (!clk_id)
- continue;
- clk_setup = clk_id->data;
- clk_setup(childnp, regbase);
- }
+ at91sam9x5_sckc_register(np, 75);
}
CLK_OF_DECLARE(at91sam9x5_clk_sckc, "atmel,at91sam9x5-sckc",
of_at91sam9x5_sckc_setup);
+static void __init of_sama5d3_sckc_setup(struct device_node *np)
+{
+ at91sam9x5_sckc_register(np, 500);
+}
+CLK_OF_DECLARE(sama5d3_clk_sckc, "atmel,sama5d3-sckc",
+ of_sama5d3_sckc_setup);
+
static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
{
struct clk_sama5d4_slow_osc *osc = to_clk_sama5d4_slow_osc(hw);
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 596136793fc4..42b4df6ba249 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -87,10 +87,10 @@ struct aspeed_clk_gate {
/* TODO: ask Aspeed about the actual parent data */
static const struct aspeed_gate_data aspeed_gates[] = {
/* clk rst name parent flags */
- [ASPEED_CLK_GATE_ECLK] = { 0, -1, "eclk-gate", "eclk", 0 }, /* Video Engine */
+ [ASPEED_CLK_GATE_ECLK] = { 0, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
- [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
+ [ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */
[ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
@@ -113,6 +113,24 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_LHCCLK] = { 28, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
};
+static const char * const eclk_parent_names[] = {
+ "mpll",
+ "hpll",
+ "dpll",
+};
+
+static const struct clk_div_table ast2500_eclk_div_table[] = {
+ { 0x0, 2 },
+ { 0x1, 2 },
+ { 0x2, 3 },
+ { 0x3, 4 },
+ { 0x4, 5 },
+ { 0x5, 6 },
+ { 0x6, 7 },
+ { 0x7, 8 },
+ { 0 }
+};
+
static const struct clk_div_table ast2500_mac_div_table[] = {
{ 0x0, 4 }, /* Yep, really. Aspeed confirmed this is correct */
{ 0x1, 4 },
@@ -192,18 +210,21 @@ static struct clk_hw *aspeed_ast2500_calc_pll(const char *name, u32 val)
struct aspeed_clk_soc_data {
const struct clk_div_table *div_table;
+ const struct clk_div_table *eclk_div_table;
const struct clk_div_table *mac_div_table;
struct clk_hw *(*calc_pll)(const char *name, u32 val);
};
static const struct aspeed_clk_soc_data ast2500_data = {
.div_table = ast2500_div_table,
+ .eclk_div_table = ast2500_eclk_div_table,
.mac_div_table = ast2500_mac_div_table,
.calc_pll = aspeed_ast2500_calc_pll,
};
static const struct aspeed_clk_soc_data ast2400_data = {
.div_table = ast2400_div_table,
+ .eclk_div_table = ast2400_div_table,
.mac_div_table = ast2400_div_table,
.calc_pll = aspeed_ast2400_calc_pll,
};
@@ -522,6 +543,22 @@ static int aspeed_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_24M] = hw;
+ hw = clk_hw_register_mux(dev, "eclk-mux", eclk_parent_names,
+ ARRAY_SIZE(eclk_parent_names), 0,
+ scu_base + ASPEED_CLK_SELECTION, 2, 0x3, 0,
+ &aspeed_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_ECLK_MUX] = hw;
+
+ hw = clk_hw_register_divider_table(dev, "eclk", "eclk-mux", 0,
+ scu_base + ASPEED_CLK_SELECTION, 28,
+ 3, 0, soc_data->eclk_div_table,
+ &aspeed_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_ECLK] = hw;
+
/*
* TODO: There are a number of clocks that not included in this driver
* as more information is required:
@@ -531,7 +568,6 @@ static int aspeed_clk_probe(struct platform_device *pdev)
* RGMII
* RMII
* UART[1..5] clock source mux
- * Video Engine (ECLK) mux and clock divider
*/
for (i = 0; i < ARRAY_SIZE(aspeed_gates); i++) {
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 46604214bba0..b06038b8f658 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -218,7 +218,7 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
hw = &composite->hw;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index e5a17265cfaf..3f9ff78c4a2a 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -25,6 +25,22 @@
* parent - fixed parent. No clk_set_parent support
*/
+static inline u32 clk_div_readl(struct clk_divider *divider)
+{
+ if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
+ return ioread32be(divider->reg);
+
+ return readl(divider->reg);
+}
+
+static inline void clk_div_writel(struct clk_divider *divider, u32 val)
+{
+ if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
+ iowrite32be(val, divider->reg);
+ else
+ writel(val, divider->reg);
+}
+
static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
u8 width)
{
@@ -135,7 +151,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
struct clk_divider *divider = to_clk_divider(hw);
unsigned int val;
- val = clk_readl(divider->reg) >> divider->shift;
+ val = clk_div_readl(divider) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_recalc_rate(hw, parent_rate, val, divider->table,
@@ -370,7 +386,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
u32 val;
- val = clk_readl(divider->reg) >> divider->shift;
+ val = clk_div_readl(divider) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_ro_round_rate(hw, rate, prate, divider->table,
@@ -420,11 +436,11 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = clk_div_mask(divider->width) << (divider->shift + 16);
} else {
- val = clk_readl(divider->reg);
+ val = clk_div_readl(divider);
val &= ~(clk_div_mask(divider->width) << divider->shift);
}
val |= (u32)value << divider->shift;
- clk_writel(val, divider->reg);
+ clk_div_writel(divider, val);
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
@@ -475,7 +491,7 @@ static struct clk_hw *_register_divider(struct device *dev, const char *name,
init.ops = &clk_divider_ro_ops;
else
init.ops = &clk_divider_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 241b3f8c61a9..8b343e59dc61 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -64,12 +64,14 @@ const struct clk_ops clk_fixed_factor_ops = {
};
EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
-struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned int mult, unsigned int div)
+static struct clk_hw *
+__clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
+ const char *name, const char *parent_name, int index,
+ unsigned long flags, unsigned int mult, unsigned int div)
{
struct clk_fixed_factor *fix;
- struct clk_init_data init;
+ struct clk_init_data init = { };
+ struct clk_parent_data pdata = { .index = index };
struct clk_hw *hw;
int ret;
@@ -84,12 +86,18 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
init.name = name;
init.ops = &clk_fixed_factor_ops;
- init.flags = flags | CLK_IS_BASIC;
- init.parent_names = &parent_name;
+ init.flags = flags;
+ if (parent_name)
+ init.parent_names = &parent_name;
+ else
+ init.parent_data = &pdata;
init.num_parents = 1;
hw = &fix->hw;
- ret = clk_hw_register(dev, hw);
+ if (dev)
+ ret = clk_hw_register(dev, hw);
+ else
+ ret = of_clk_hw_register(np, hw);
if (ret) {
kfree(fix);
hw = ERR_PTR(ret);
@@ -97,6 +105,14 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
return hw;
}
+
+struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ flags, mult, div);
+}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
@@ -143,11 +159,10 @@ static const struct of_device_id set_rate_parent_matches[] = {
{ /* Sentinel */ },
};
-static struct clk *_of_fixed_factor_clk_setup(struct device_node *node)
+static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *clk_name = node->name;
- const char *parent_name;
unsigned long flags = 0;
u32 div, mult;
int ret;
@@ -165,30 +180,28 @@ static struct clk *_of_fixed_factor_clk_setup(struct device_node *node)
}
of_property_read_string(node, "clock-output-names", &clk_name);
- parent_name = of_clk_get_parent_name(node, 0);
if (of_match_node(set_rate_parent_matches, node))
flags |= CLK_SET_RATE_PARENT;
- clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
- mult, div);
- if (IS_ERR(clk)) {
+ hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, 0,
+ flags, mult, div);
+ if (IS_ERR(hw)) {
/*
- * If parent clock is not registered, registration would fail.
* Clear OF_POPULATED flag so that clock registration can be
* attempted again from probe function.
*/
of_node_clear_flag(node, OF_POPULATED);
- return clk;
+ return ERR_CAST(hw);
}
- ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
if (ret) {
- clk_unregister(clk);
+ clk_hw_unregister_fixed_factor(hw);
return ERR_PTR(ret);
}
- return clk;
+ return hw;
}
/**
@@ -203,17 +216,17 @@ CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
static int of_fixed_factor_clk_remove(struct platform_device *pdev)
{
- struct clk *clk = platform_get_drvdata(pdev);
+ struct clk_hw *clk = platform_get_drvdata(pdev);
of_clk_del_provider(pdev->dev.of_node);
- clk_unregister_fixed_factor(clk);
+ clk_hw_unregister_fixed_factor(clk);
return 0;
}
static int of_fixed_factor_clk_probe(struct platform_device *pdev)
{
- struct clk *clk;
+ struct clk_hw *clk;
/*
* This function is not executed when of_fixed_factor_clk_setup
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 00ef4f5e53fe..a7e4aef7a376 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -68,7 +68,7 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
init.name = name;
init.ops = &clk_fixed_rate_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index fdfe2e423d15..d81f1d2e9129 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -13,6 +13,22 @@
#include <linux/slab.h>
#include <linux/rational.h>
+static inline u32 clk_fd_readl(struct clk_fractional_divider *fd)
+{
+ if (fd->flags & CLK_FRAC_DIVIDER_BIG_ENDIAN)
+ return ioread32be(fd->reg);
+
+ return readl(fd->reg);
+}
+
+static inline void clk_fd_writel(struct clk_fractional_divider *fd, u32 val)
+{
+ if (fd->flags & CLK_FRAC_DIVIDER_BIG_ENDIAN)
+ iowrite32be(val, fd->reg);
+ else
+ writel(val, fd->reg);
+}
+
static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -27,7 +43,7 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
else
__acquire(fd->lock);
- val = clk_readl(fd->reg);
+ val = clk_fd_readl(fd);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
@@ -115,10 +131,10 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
else
__acquire(fd->lock);
- val = clk_readl(fd->reg);
+ val = clk_fd_readl(fd);
val &= ~(fd->mmask | fd->nmask);
val |= (m << fd->mshift) | (n << fd->nshift);
- clk_writel(val, fd->reg);
+ clk_fd_writel(fd, val);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
@@ -151,7 +167,7 @@ struct clk_hw *clk_hw_register_fractional_divider(struct device *dev,
init.name = name;
init.ops = &clk_fractional_divider_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index f05823cd9b21..1b99fc962745 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -23,6 +23,22 @@
* parent - fixed parent. No clk_set_parent support
*/
+static inline u32 clk_gate_readl(struct clk_gate *gate)
+{
+ if (gate->flags & CLK_GATE_BIG_ENDIAN)
+ return ioread32be(gate->reg);
+
+ return readl(gate->reg);
+}
+
+static inline void clk_gate_writel(struct clk_gate *gate, u32 val)
+{
+ if (gate->flags & CLK_GATE_BIG_ENDIAN)
+ iowrite32be(val, gate->reg);
+ else
+ writel(val, gate->reg);
+}
+
/*
* It works on following logic:
*
@@ -55,7 +71,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
if (set)
reg |= BIT(gate->bit_idx);
} else {
- reg = clk_readl(gate->reg);
+ reg = clk_gate_readl(gate);
if (set)
reg |= BIT(gate->bit_idx);
@@ -63,7 +79,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
reg &= ~BIT(gate->bit_idx);
}
- clk_writel(reg, gate->reg);
+ clk_gate_writel(gate, reg);
if (gate->lock)
spin_unlock_irqrestore(gate->lock, flags);
@@ -88,7 +104,7 @@ int clk_gate_is_enabled(struct clk_hw *hw)
u32 reg;
struct clk_gate *gate = to_clk_gate(hw);
- reg = clk_readl(gate->reg);
+ reg = clk_gate_readl(gate);
/* if a set bit disables this clk, flip it before masking */
if (gate->flags & CLK_GATE_SET_TO_DISABLE)
@@ -142,7 +158,7 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
init.name = name;
init.ops = &clk_gate_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index c2f07f0d077c..9d930edd6516 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -137,7 +137,7 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
init.name = name;
init.ops = clk_gpio_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 8e4581004695..bd328b0eb243 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -272,7 +271,7 @@ static const struct clk_ops periclk_ops = {
.set_rate = clk_periclk_set_rate,
};
-static __init struct clk *hb_clk_init(struct device_node *node, const struct clk_ops *ops)
+static void __init hb_clk_init(struct device_node *node, const struct clk_ops *ops, unsigned long clkflags)
{
u32 reg;
struct hb_clk *hb_clk;
@@ -284,11 +283,11 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
rc = of_property_read_u32(node, "reg", &reg);
if (WARN_ON(rc))
- return NULL;
+ return;
hb_clk = kzalloc(sizeof(*hb_clk), GFP_KERNEL);
if (WARN_ON(!hb_clk))
- return NULL;
+ return;
/* Map system registers */
srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
@@ -301,7 +300,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
init.name = clk_name;
init.ops = ops;
- init.flags = 0;
+ init.flags = clkflags;
parent_name = of_clk_get_parent_name(node, 0);
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -311,33 +310,31 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
rc = clk_hw_register(NULL, &hb_clk->hw);
if (WARN_ON(rc)) {
kfree(hb_clk);
- return NULL;
+ return;
}
- rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw);
- return hb_clk->hw.clk;
+ of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw);
}
static void __init hb_pll_init(struct device_node *node)
{
- hb_clk_init(node, &clk_pll_ops);
+ hb_clk_init(node, &clk_pll_ops, 0);
}
CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init);
static void __init hb_a9periph_init(struct device_node *node)
{
- hb_clk_init(node, &a9periphclk_ops);
+ hb_clk_init(node, &a9periphclk_ops, 0);
}
CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init);
static void __init hb_a9bus_init(struct device_node *node)
{
- struct clk *clk = hb_clk_init(node, &a9bclk_ops);
- clk_prepare_enable(clk);
+ hb_clk_init(node, &a9bclk_ops, CLK_IS_CRITICAL);
}
CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init);
static void __init hb_emmc_init(struct device_node *node)
{
- hb_clk_init(node, &periclk_ops);
+ hb_clk_init(node, &periclk_ops, 0);
}
CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init);
diff --git a/drivers/clk/clk-lochnagar.c b/drivers/clk/clk-lochnagar.c
new file mode 100644
index 000000000000..a2f31e58ee48
--- /dev/null
+++ b/drivers/clk/clk-lochnagar.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lochnagar clock control
+ *
+ * Copyright (c) 2017-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lochnagar.h>
+#include <linux/mfd/lochnagar1_regs.h>
+#include <linux/mfd/lochnagar2_regs.h>
+
+#include <dt-bindings/clk/lochnagar.h>
+
+#define LOCHNAGAR_NUM_CLOCKS (LOCHNAGAR_SPDIF_CLKOUT + 1)
+
+struct lochnagar_clk {
+ const char * const name;
+ struct clk_hw hw;
+
+ struct lochnagar_clk_priv *priv;
+
+ u16 cfg_reg;
+ u16 ena_mask;
+
+ u16 src_reg;
+ u16 src_mask;
+};
+
+struct lochnagar_clk_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ enum lochnagar_type type;
+
+ const char **parents;
+ unsigned int nparents;
+
+ struct lochnagar_clk lclks[LOCHNAGAR_NUM_CLOCKS];
+};
+
+static const char * const lochnagar1_clk_parents[] = {
+ "ln-none",
+ "ln-spdif-mclk",
+ "ln-psia1-mclk",
+ "ln-psia2-mclk",
+ "ln-cdc-clkout",
+ "ln-dsp-clkout",
+ "ln-pmic-32k",
+ "ln-gf-mclk1",
+ "ln-gf-mclk3",
+ "ln-gf-mclk2",
+ "ln-gf-mclk4",
+};
+
+static const char * const lochnagar2_clk_parents[] = {
+ "ln-none",
+ "ln-cdc-clkout",
+ "ln-dsp-clkout",
+ "ln-pmic-32k",
+ "ln-spdif-mclk",
+ "ln-clk-12m",
+ "ln-clk-11m",
+ "ln-clk-24m",
+ "ln-clk-22m",
+ "ln-clk-8m",
+ "ln-usb-clk-24m",
+ "ln-gf-mclk1",
+ "ln-gf-mclk3",
+ "ln-gf-mclk2",
+ "ln-psia1-mclk",
+ "ln-psia2-mclk",
+ "ln-spdif-clkout",
+ "ln-adat-mclk",
+ "ln-usb-clk-12m",
+};
+
+#define LN1_CLK(ID, NAME, REG) \
+ [LOCHNAGAR_##ID] = { \
+ .name = NAME, \
+ .cfg_reg = LOCHNAGAR1_##REG, \
+ .ena_mask = LOCHNAGAR1_##ID##_ENA_MASK, \
+ .src_reg = LOCHNAGAR1_##ID##_SEL, \
+ .src_mask = LOCHNAGAR1_SRC_MASK, \
+ }
+
+#define LN2_CLK(ID, NAME) \
+ [LOCHNAGAR_##ID] = { \
+ .name = NAME, \
+ .cfg_reg = LOCHNAGAR2_##ID##_CTRL, \
+ .src_reg = LOCHNAGAR2_##ID##_CTRL, \
+ .ena_mask = LOCHNAGAR2_CLK_ENA_MASK, \
+ .src_mask = LOCHNAGAR2_CLK_SRC_MASK, \
+ }
+
+static const struct lochnagar_clk lochnagar1_clks[LOCHNAGAR_NUM_CLOCKS] = {
+ LN1_CLK(CDC_MCLK1, "ln-cdc-mclk1", CDC_AIF_CTRL2),
+ LN1_CLK(CDC_MCLK2, "ln-cdc-mclk2", CDC_AIF_CTRL2),
+ LN1_CLK(DSP_CLKIN, "ln-dsp-clkin", DSP_AIF),
+ LN1_CLK(GF_CLKOUT1, "ln-gf-clkout1", GF_AIF1),
+};
+
+static const struct lochnagar_clk lochnagar2_clks[LOCHNAGAR_NUM_CLOCKS] = {
+ LN2_CLK(CDC_MCLK1, "ln-cdc-mclk1"),
+ LN2_CLK(CDC_MCLK2, "ln-cdc-mclk2"),
+ LN2_CLK(DSP_CLKIN, "ln-dsp-clkin"),
+ LN2_CLK(GF_CLKOUT1, "ln-gf-clkout1"),
+ LN2_CLK(GF_CLKOUT2, "ln-gf-clkout2"),
+ LN2_CLK(PSIA1_MCLK, "ln-psia1-mclk"),
+ LN2_CLK(PSIA2_MCLK, "ln-psia2-mclk"),
+ LN2_CLK(SPDIF_MCLK, "ln-spdif-mclk"),
+ LN2_CLK(ADAT_MCLK, "ln-adat-mclk"),
+ LN2_CLK(SOUNDCARD_MCLK, "ln-soundcard-mclk"),
+};
+
+static inline struct lochnagar_clk *lochnagar_hw_to_lclk(struct clk_hw *hw)
+{
+ return container_of(hw, struct lochnagar_clk, hw);
+}
+
+static int lochnagar_clk_prepare(struct clk_hw *hw)
+{
+ struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
+ struct lochnagar_clk_priv *priv = lclk->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, lclk->cfg_reg,
+ lclk->ena_mask, lclk->ena_mask);
+ if (ret < 0)
+ dev_dbg(priv->dev, "Failed to prepare %s: %d\n",
+ lclk->name, ret);
+
+ return ret;
+}
+
+static void lochnagar_clk_unprepare(struct clk_hw *hw)
+{
+ struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
+ struct lochnagar_clk_priv *priv = lclk->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, lclk->cfg_reg, lclk->ena_mask, 0);
+ if (ret < 0)
+ dev_dbg(priv->dev, "Failed to unprepare %s: %d\n",
+ lclk->name, ret);
+}
+
+static int lochnagar_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
+ struct lochnagar_clk_priv *priv = lclk->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, lclk->src_reg, lclk->src_mask, index);
+ if (ret < 0)
+ dev_dbg(priv->dev, "Failed to reparent %s: %d\n",
+ lclk->name, ret);
+
+ return ret;
+}
+
+static u8 lochnagar_clk_get_parent(struct clk_hw *hw)
+{
+ struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
+ struct lochnagar_clk_priv *priv = lclk->priv;
+ struct regmap *regmap = priv->regmap;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, lclk->src_reg, &val);
+ if (ret < 0) {
+ dev_dbg(priv->dev, "Failed to read parent of %s: %d\n",
+ lclk->name, ret);
+ return priv->nparents;
+ }
+
+ val &= lclk->src_mask;
+
+ return val;
+}
+
+static const struct clk_ops lochnagar_clk_ops = {
+ .prepare = lochnagar_clk_prepare,
+ .unprepare = lochnagar_clk_unprepare,
+ .set_parent = lochnagar_clk_set_parent,
+ .get_parent = lochnagar_clk_get_parent,
+};
+
+static int lochnagar_init_parents(struct lochnagar_clk_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+ int i, j;
+
+ switch (priv->type) {
+ case LOCHNAGAR1:
+ memcpy(priv->lclks, lochnagar1_clks, sizeof(lochnagar1_clks));
+
+ priv->nparents = ARRAY_SIZE(lochnagar1_clk_parents);
+ priv->parents = devm_kmemdup(priv->dev, lochnagar1_clk_parents,
+ sizeof(lochnagar1_clk_parents),
+ GFP_KERNEL);
+ break;
+ case LOCHNAGAR2:
+ memcpy(priv->lclks, lochnagar2_clks, sizeof(lochnagar2_clks));
+
+ priv->nparents = ARRAY_SIZE(lochnagar2_clk_parents);
+ priv->parents = devm_kmemdup(priv->dev, lochnagar2_clk_parents,
+ sizeof(lochnagar2_clk_parents),
+ GFP_KERNEL);
+ break;
+ default:
+ dev_err(priv->dev, "Unknown Lochnagar type: %d\n", priv->type);
+ return -EINVAL;
+ }
+
+ if (!priv->parents)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->nparents; i++) {
+ j = of_property_match_string(np, "clock-names",
+ priv->parents[i]);
+ if (j >= 0)
+ priv->parents[i] = of_clk_get_parent_name(np, j);
+ }
+
+ return 0;
+}
+
+static struct clk_hw *
+lochnagar_of_clk_hw_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct lochnagar_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ARRAY_SIZE(priv->lclks)) {
+ dev_err(priv->dev, "Invalid index %u\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &priv->lclks[idx].hw;
+}
+
+static int lochnagar_init_clks(struct lochnagar_clk_priv *priv)
+{
+ struct clk_init_data clk_init = {
+ .ops = &lochnagar_clk_ops,
+ .parent_names = priv->parents,
+ .num_parents = priv->nparents,
+ };
+ struct lochnagar_clk *lclk;
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->lclks); i++) {
+ lclk = &priv->lclks[i];
+
+ if (!lclk->name)
+ continue;
+
+ clk_init.name = lclk->name;
+
+ lclk->priv = priv;
+ lclk->hw.init = &clk_init;
+
+ ret = devm_clk_hw_register(priv->dev, &lclk->hw);
+ if (ret) {
+ dev_err(priv->dev, "Failed to register %s: %d\n",
+ lclk->name, ret);
+ return ret;
+ }
+ }
+
+ ret = devm_of_clk_add_hw_provider(priv->dev, lochnagar_of_clk_hw_get,
+ priv);
+ if (ret < 0)
+ dev_err(priv->dev, "Failed to register provider: %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id lochnagar_of_match[] = {
+ { .compatible = "cirrus,lochnagar1-clk", .data = (void *)LOCHNAGAR1 },
+ { .compatible = "cirrus,lochnagar2-clk", .data = (void *)LOCHNAGAR2 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lochnagar_of_match);
+
+static int lochnagar_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct lochnagar_clk_priv *priv;
+ const struct of_device_id *of_id;
+ int ret;
+
+ of_id = of_match_device(lochnagar_of_match, dev);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->regmap = dev_get_regmap(dev->parent, NULL);
+ priv->type = (enum lochnagar_type)of_id->data;
+
+ ret = lochnagar_init_parents(priv);
+ if (ret)
+ return ret;
+
+ return lochnagar_init_clks(priv);
+}
+
+static struct platform_driver lochnagar_clk_driver = {
+ .driver = {
+ .name = "lochnagar-clk",
+ .of_match_table = lochnagar_of_match,
+ },
+ .probe = lochnagar_clk_probe,
+};
+module_platform_driver(lochnagar_clk_driver);
+
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Clock driver for Cirrus Logic Lochnagar Board");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c
new file mode 100644
index 000000000000..5fc78faf820c
--- /dev/null
+++ b/drivers/clk/clk-milbeaut.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Socionext Inc.
+ * Copyright (C) 2016 Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define M10V_CLKSEL1 0x0
+#define CLKSEL(n) (((n) - 1) * 4 + M10V_CLKSEL1)
+
+#define M10V_PLL1 "pll1"
+#define M10V_PLL1DIV2 "pll1-2"
+#define M10V_PLL2 "pll2"
+#define M10V_PLL2DIV2 "pll2-2"
+#define M10V_PLL6 "pll6"
+#define M10V_PLL6DIV2 "pll6-2"
+#define M10V_PLL6DIV3 "pll6-3"
+#define M10V_PLL7 "pll7"
+#define M10V_PLL7DIV2 "pll7-2"
+#define M10V_PLL7DIV5 "pll7-5"
+#define M10V_PLL9 "pll9"
+#define M10V_PLL10 "pll10"
+#define M10V_PLL10DIV2 "pll10-2"
+#define M10V_PLL11 "pll11"
+
+#define M10V_SPI_PARENT0 "spi-parent0"
+#define M10V_SPI_PARENT1 "spi-parent1"
+#define M10V_SPI_PARENT2 "spi-parent2"
+#define M10V_UHS1CLK2_PARENT0 "uhs1clk2-parent0"
+#define M10V_UHS1CLK2_PARENT1 "uhs1clk2-parent1"
+#define M10V_UHS1CLK2_PARENT2 "uhs1clk2-parent2"
+#define M10V_UHS1CLK1_PARENT0 "uhs1clk1-parent0"
+#define M10V_UHS1CLK1_PARENT1 "uhs1clk1-parent1"
+#define M10V_NFCLK_PARENT0 "nfclk-parent0"
+#define M10V_NFCLK_PARENT1 "nfclk-parent1"
+#define M10V_NFCLK_PARENT2 "nfclk-parent2"
+#define M10V_NFCLK_PARENT3 "nfclk-parent3"
+#define M10V_NFCLK_PARENT4 "nfclk-parent4"
+#define M10V_NFCLK_PARENT5 "nfclk-parent5"
+
+#define M10V_DCHREQ 1
+#define M10V_UPOLL_RATE 1
+#define M10V_UTIMEOUT 250
+
+#define M10V_EMMCCLK_ID 0
+#define M10V_ACLK_ID 1
+#define M10V_HCLK_ID 2
+#define M10V_PCLK_ID 3
+#define M10V_RCLK_ID 4
+#define M10V_SPICLK_ID 5
+#define M10V_NFCLK_ID 6
+#define M10V_UHS1CLK2_ID 7
+#define M10V_NUM_CLKS 8
+
+#define to_m10v_div(_hw) container_of(_hw, struct m10v_clk_divider, hw)
+
+static struct clk_hw_onecell_data *m10v_clk_data;
+
+static DEFINE_SPINLOCK(m10v_crglock);
+
+struct m10v_clk_div_factors {
+ const char *name;
+ const char *parent_name;
+ u32 offset;
+ u8 shift;
+ u8 width;
+ const struct clk_div_table *table;
+ unsigned long div_flags;
+ int onecell_idx;
+};
+
+struct m10v_clk_div_fixed_data {
+ const char *name;
+ const char *parent_name;
+ u8 div;
+ u8 mult;
+ int onecell_idx;
+};
+
+struct m10v_clk_mux_factors {
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ u32 offset;
+ u8 shift;
+ u8 mask;
+ u32 *table;
+ unsigned long mux_flags;
+ int onecell_idx;
+};
+
+static const struct clk_div_table emmcclk_table[] = {
+ { .val = 0, .div = 8 },
+ { .val = 1, .div = 9 },
+ { .val = 2, .div = 10 },
+ { .val = 3, .div = 15 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table mclk400_table[] = {
+ { .val = 1, .div = 2 },
+ { .val = 3, .div = 4 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table mclk200_table[] = {
+ { .val = 3, .div = 4 },
+ { .val = 7, .div = 8 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table aclk400_table[] = {
+ { .val = 1, .div = 2 },
+ { .val = 3, .div = 4 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table aclk300_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 3 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table aclk_table[] = {
+ { .val = 3, .div = 4 },
+ { .val = 7, .div = 8 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table aclkexs_table[] = {
+ { .val = 3, .div = 4 },
+ { .val = 4, .div = 5 },
+ { .val = 5, .div = 6 },
+ { .val = 7, .div = 8 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table hclk_table[] = {
+ { .val = 7, .div = 8 },
+ { .val = 15, .div = 16 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table hclkbmh_table[] = {
+ { .val = 3, .div = 4 },
+ { .val = 7, .div = 8 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table pclk_table[] = {
+ { .val = 15, .div = 16 },
+ { .val = 31, .div = 32 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table rclk_table[] = {
+ { .val = 0, .div = 8 },
+ { .val = 1, .div = 16 },
+ { .val = 2, .div = 24 },
+ { .val = 3, .div = 32 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table uhs1clk0_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 3 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { .val = 4, .div = 16 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table uhs2clk_table[] = {
+ { .val = 0, .div = 9 },
+ { .val = 1, .div = 10 },
+ { .val = 2, .div = 11 },
+ { .val = 3, .div = 12 },
+ { .val = 4, .div = 13 },
+ { .val = 5, .div = 14 },
+ { .val = 6, .div = 16 },
+ { .val = 7, .div = 18 },
+ { .div = 0 },
+};
+
+static u32 spi_mux_table[] = {0, 1, 2};
+static const char * const spi_mux_names[] = {
+ M10V_SPI_PARENT0, M10V_SPI_PARENT1, M10V_SPI_PARENT2
+};
+
+static u32 uhs1clk2_mux_table[] = {2, 3, 4, 8};
+static const char * const uhs1clk2_mux_names[] = {
+ M10V_UHS1CLK2_PARENT0, M10V_UHS1CLK2_PARENT1,
+ M10V_UHS1CLK2_PARENT2, M10V_PLL6DIV2
+};
+
+static u32 uhs1clk1_mux_table[] = {3, 4, 8};
+static const char * const uhs1clk1_mux_names[] = {
+ M10V_UHS1CLK1_PARENT0, M10V_UHS1CLK1_PARENT1, M10V_PLL6DIV2
+};
+
+static u32 nfclk_mux_table[] = {0, 1, 2, 3, 4, 8};
+static const char * const nfclk_mux_names[] = {
+ M10V_NFCLK_PARENT0, M10V_NFCLK_PARENT1, M10V_NFCLK_PARENT2,
+ M10V_NFCLK_PARENT3, M10V_NFCLK_PARENT4, M10V_NFCLK_PARENT5
+};
+
+static const struct m10v_clk_div_fixed_data m10v_pll_fixed_data[] = {
+ {M10V_PLL1, NULL, 1, 40, -1},
+ {M10V_PLL2, NULL, 1, 30, -1},
+ {M10V_PLL6, NULL, 1, 35, -1},
+ {M10V_PLL7, NULL, 1, 40, -1},
+ {M10V_PLL9, NULL, 1, 33, -1},
+ {M10V_PLL10, NULL, 5, 108, -1},
+ {M10V_PLL10DIV2, M10V_PLL10, 2, 1, -1},
+ {M10V_PLL11, NULL, 2, 75, -1},
+};
+
+static const struct m10v_clk_div_fixed_data m10v_div_fixed_data[] = {
+ {"usb2", NULL, 2, 1, -1},
+ {"pcisuppclk", NULL, 20, 1, -1},
+ {M10V_PLL1DIV2, M10V_PLL1, 2, 1, -1},
+ {M10V_PLL2DIV2, M10V_PLL2, 2, 1, -1},
+ {M10V_PLL6DIV2, M10V_PLL6, 2, 1, -1},
+ {M10V_PLL6DIV3, M10V_PLL6, 3, 1, -1},
+ {M10V_PLL7DIV2, M10V_PLL7, 2, 1, -1},
+ {M10V_PLL7DIV5, M10V_PLL7, 5, 1, -1},
+ {"ca7wd", M10V_PLL2DIV2, 12, 1, -1},
+ {"pclkca7wd", M10V_PLL1DIV2, 16, 1, -1},
+ {M10V_SPI_PARENT0, M10V_PLL10DIV2, 2, 1, -1},
+ {M10V_SPI_PARENT1, M10V_PLL10DIV2, 4, 1, -1},
+ {M10V_SPI_PARENT2, M10V_PLL7DIV2, 8, 1, -1},
+ {M10V_UHS1CLK2_PARENT0, M10V_PLL7, 4, 1, -1},
+ {M10V_UHS1CLK2_PARENT1, M10V_PLL7, 8, 1, -1},
+ {M10V_UHS1CLK2_PARENT2, M10V_PLL7, 16, 1, -1},
+ {M10V_UHS1CLK1_PARENT0, M10V_PLL7, 8, 1, -1},
+ {M10V_UHS1CLK1_PARENT1, M10V_PLL7, 16, 1, -1},
+ {M10V_NFCLK_PARENT0, M10V_PLL7DIV2, 8, 1, -1},
+ {M10V_NFCLK_PARENT1, M10V_PLL7DIV2, 10, 1, -1},
+ {M10V_NFCLK_PARENT2, M10V_PLL7DIV2, 13, 1, -1},
+ {M10V_NFCLK_PARENT3, M10V_PLL7DIV2, 16, 1, -1},
+ {M10V_NFCLK_PARENT4, M10V_PLL7DIV2, 40, 1, -1},
+ {M10V_NFCLK_PARENT5, M10V_PLL7DIV5, 10, 1, -1},
+};
+
+static const struct m10v_clk_div_factors m10v_div_factor_data[] = {
+ {"emmc", M10V_PLL11, CLKSEL(1), 28, 3, emmcclk_table, 0,
+ M10V_EMMCCLK_ID},
+ {"mclk400", M10V_PLL1DIV2, CLKSEL(10), 7, 3, mclk400_table, 0, -1},
+ {"mclk200", M10V_PLL1DIV2, CLKSEL(10), 3, 4, mclk200_table, 0, -1},
+ {"aclk400", M10V_PLL1DIV2, CLKSEL(10), 0, 3, aclk400_table, 0, -1},
+ {"aclk300", M10V_PLL2DIV2, CLKSEL(12), 0, 2, aclk300_table, 0, -1},
+ {"aclk", M10V_PLL1DIV2, CLKSEL(9), 20, 4, aclk_table, 0, M10V_ACLK_ID},
+ {"aclkexs", M10V_PLL1DIV2, CLKSEL(9), 16, 4, aclkexs_table, 0, -1},
+ {"hclk", M10V_PLL1DIV2, CLKSEL(9), 7, 5, hclk_table, 0, M10V_HCLK_ID},
+ {"hclkbmh", M10V_PLL1DIV2, CLKSEL(9), 12, 4, hclkbmh_table, 0, -1},
+ {"pclk", M10V_PLL1DIV2, CLKSEL(9), 0, 7, pclk_table, 0, M10V_PCLK_ID},
+ {"uhs1clk0", M10V_PLL7, CLKSEL(1), 3, 5, uhs1clk0_table, 0, -1},
+ {"uhs2clk", M10V_PLL6DIV3, CLKSEL(1), 18, 4, uhs2clk_table, 0, -1},
+};
+
+static const struct m10v_clk_mux_factors m10v_mux_factor_data[] = {
+ {"spi", spi_mux_names, ARRAY_SIZE(spi_mux_names),
+ CLKSEL(8), 3, 7, spi_mux_table, 0, M10V_SPICLK_ID},
+ {"uhs1clk2", uhs1clk2_mux_names, ARRAY_SIZE(uhs1clk2_mux_names),
+ CLKSEL(1), 13, 31, uhs1clk2_mux_table, 0, M10V_UHS1CLK2_ID},
+ {"uhs1clk1", uhs1clk1_mux_names, ARRAY_SIZE(uhs1clk1_mux_names),
+ CLKSEL(1), 8, 31, uhs1clk1_mux_table, 0, -1},
+ {"nfclk", nfclk_mux_names, ARRAY_SIZE(nfclk_mux_names),
+ CLKSEL(1), 22, 127, nfclk_mux_table, 0, M10V_NFCLK_ID},
+};
+
+static u8 m10v_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+
+ val = readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+
+ return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
+}
+
+static int m10v_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
+ unsigned long flags = 0;
+ u32 reg;
+ u32 write_en = BIT(fls(mux->mask) - 1);
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ reg = readl(mux->reg);
+ reg &= ~(mux->mask << mux->shift);
+
+ val = (val | write_en) << mux->shift;
+ reg |= val;
+ writel(reg, mux->reg);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return 0;
+}
+
+static const struct clk_ops m10v_mux_ops = {
+ .get_parent = m10v_mux_get_parent,
+ .set_parent = m10v_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk_hw *m10v_clk_hw_register_mux(struct device *dev,
+ const char *name, const char * const *parent_names,
+ u8 num_parents, unsigned long flags, void __iomem *reg,
+ u8 shift, u32 mask, u8 clk_mux_flags, u32 *table,
+ spinlock_t *lock)
+{
+ struct clk_mux *mux;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &m10v_mux_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->mask = mask;
+ mux->flags = clk_mux_flags;
+ mux->lock = lock;
+ mux->table = table;
+ mux->hw.init = &init;
+
+ hw = &mux->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(mux);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+
+}
+
+struct m10v_clk_divider {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+ spinlock_t *lock;
+ void __iomem *write_valid_reg;
+};
+
+static unsigned long m10v_clk_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct m10v_clk_divider *divider = to_m10v_div(hw);
+ unsigned int val;
+
+ val = readl(divider->reg) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct m10v_clk_divider *divider = to_m10v_div(hw);
+
+ /* if read only, just return current value */
+ if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 val;
+
+ val = readl(divider->reg) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_ro_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags,
+ val);
+ }
+
+ return divider_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags);
+}
+
+static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct m10v_clk_divider *divider = to_m10v_div(hw);
+ int value;
+ unsigned long flags = 0;
+ u32 val;
+ u32 write_en = BIT(divider->width - 1);
+
+ value = divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ val = readl(divider->reg);
+ val &= ~(clk_div_mask(divider->width) << divider->shift);
+
+ val |= ((u32)value | write_en) << divider->shift;
+ writel(val, divider->reg);
+
+ if (divider->write_valid_reg) {
+ writel(M10V_DCHREQ, divider->write_valid_reg);
+ if (readl_poll_timeout(divider->write_valid_reg, val,
+ !val, M10V_UPOLL_RATE, M10V_UTIMEOUT))
+ pr_err("%s:%s couldn't stabilize\n",
+ __func__, divider->hw.init->name);
+ }
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return 0;
+}
+
+static const struct clk_ops m10v_clk_divider_ops = {
+ .recalc_rate = m10v_clk_divider_recalc_rate,
+ .round_rate = m10v_clk_divider_round_rate,
+ .set_rate = m10v_clk_divider_set_rate,
+};
+
+static struct clk_hw *m10v_clk_hw_register_divider(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock, void __iomem *write_valid_reg)
+{
+ struct m10v_clk_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &m10v_clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+ div->write_valid_reg = write_valid_reg;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static void m10v_reg_div_pre(const struct m10v_clk_div_factors *factors,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base)
+{
+ struct clk_hw *hw;
+ void __iomem *write_valid_reg;
+
+ /*
+ * The registers on CLKSEL(9) or CLKSEL(10) need additional
+ * writing to become valid.
+ */
+ if ((factors->offset == CLKSEL(9)) || (factors->offset == CLKSEL(10)))
+ write_valid_reg = base + CLKSEL(11);
+ else
+ write_valid_reg = NULL;
+
+ hw = m10v_clk_hw_register_divider(NULL, factors->name,
+ factors->parent_name,
+ CLK_SET_RATE_PARENT,
+ base + factors->offset,
+ factors->shift,
+ factors->width, factors->div_flags,
+ factors->table,
+ &m10v_crglock, write_valid_reg);
+
+ if (factors->onecell_idx >= 0)
+ clk_data->hws[factors->onecell_idx] = hw;
+}
+
+static void m10v_reg_fixed_pre(const struct m10v_clk_div_fixed_data *factors,
+ struct clk_hw_onecell_data *clk_data,
+ const char *parent_name)
+{
+ struct clk_hw *hw;
+ const char *pn = factors->parent_name ?
+ factors->parent_name : parent_name;
+
+ hw = clk_hw_register_fixed_factor(NULL, factors->name, pn, 0,
+ factors->mult, factors->div);
+
+ if (factors->onecell_idx >= 0)
+ clk_data->hws[factors->onecell_idx] = hw;
+}
+
+static void m10v_reg_mux_pre(const struct m10v_clk_mux_factors *factors,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base)
+{
+ struct clk_hw *hw;
+
+ hw = m10v_clk_hw_register_mux(NULL, factors->name,
+ factors->parent_names,
+ factors->num_parents,
+ CLK_SET_RATE_PARENT,
+ base + factors->offset, factors->shift,
+ factors->mask, factors->mux_flags,
+ factors->table, &m10v_crglock);
+
+ if (factors->onecell_idx >= 0)
+ clk_data->hws[factors->onecell_idx] = hw;
+}
+
+static int m10v_clk_probe(struct platform_device *pdev)
+{
+ int id;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+ const char *parent_name;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ for (id = 0; id < ARRAY_SIZE(m10v_div_factor_data); ++id)
+ m10v_reg_div_pre(&m10v_div_factor_data[id],
+ m10v_clk_data, base);
+
+ for (id = 0; id < ARRAY_SIZE(m10v_div_fixed_data); ++id)
+ m10v_reg_fixed_pre(&m10v_div_fixed_data[id],
+ m10v_clk_data, parent_name);
+
+ for (id = 0; id < ARRAY_SIZE(m10v_mux_factor_data); ++id)
+ m10v_reg_mux_pre(&m10v_mux_factor_data[id],
+ m10v_clk_data, base);
+
+ for (id = 0; id < M10V_NUM_CLKS; id++) {
+ if (IS_ERR(m10v_clk_data->hws[id]))
+ return PTR_ERR(m10v_clk_data->hws[id]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id m10v_clk_dt_ids[] = {
+ { .compatible = "socionext,milbeaut-m10v-ccu", },
+ { }
+};
+
+static struct platform_driver m10v_clk_driver = {
+ .probe = m10v_clk_probe,
+ .driver = {
+ .name = "m10v-ccu",
+ .of_match_table = m10v_clk_dt_ids,
+ },
+};
+builtin_platform_driver(m10v_clk_driver);
+
+static void __init m10v_cc_init(struct device_node *np)
+{
+ int id;
+ void __iomem *base;
+ const char *parent_name;
+ struct clk_hw *hw;
+
+ m10v_clk_data = kzalloc(struct_size(m10v_clk_data, hws,
+ M10V_NUM_CLKS),
+ GFP_KERNEL);
+
+ if (!m10v_clk_data)
+ return;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ kfree(m10v_clk_data);
+ return;
+ }
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name) {
+ kfree(m10v_clk_data);
+ iounmap(base);
+ return;
+ }
+
+ /*
+ * This way all clocks fetched before the platform device probes,
+ * except those we assign here for early use, will be deferred.
+ */
+ for (id = 0; id < M10V_NUM_CLKS; id++)
+ m10v_clk_data->hws[id] = ERR_PTR(-EPROBE_DEFER);
+
+ /*
+ * PLLs are set by bootloader so this driver registers them as the
+ * fixed factor.
+ */
+ for (id = 0; id < ARRAY_SIZE(m10v_pll_fixed_data); ++id)
+ m10v_reg_fixed_pre(&m10v_pll_fixed_data[id],
+ m10v_clk_data, parent_name);
+
+ /*
+ * timer consumes "rclk" so it needs to register here.
+ */
+ hw = m10v_clk_hw_register_divider(NULL, "rclk", M10V_PLL10DIV2, 0,
+ base + CLKSEL(1), 0, 3, 0, rclk_table,
+ &m10v_crglock, NULL);
+ m10v_clk_data->hws[M10V_RCLK_ID] = hw;
+
+ m10v_clk_data->num = M10V_NUM_CLKS;
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, m10v_clk_data);
+}
+CLK_OF_DECLARE_DRIVER(m10v_cc, "socionext,milbeaut-m10v-ccu", m10v_cc_init);
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index 3c86f859c199..94470b4eadf4 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -11,6 +11,22 @@
#include <linux/of.h>
#include <linux/slab.h>
+static inline u32 clk_mult_readl(struct clk_multiplier *mult)
+{
+ if (mult->flags & CLK_MULTIPLIER_BIG_ENDIAN)
+ return ioread32be(mult->reg);
+
+ return readl(mult->reg);
+}
+
+static inline void clk_mult_writel(struct clk_multiplier *mult, u32 val)
+{
+ if (mult->flags & CLK_MULTIPLIER_BIG_ENDIAN)
+ iowrite32be(val, mult->reg);
+ else
+ writel(val, mult->reg);
+}
+
static unsigned long __get_mult(struct clk_multiplier *mult,
unsigned long rate,
unsigned long parent_rate)
@@ -27,7 +43,7 @@ static unsigned long clk_multiplier_recalc_rate(struct clk_hw *hw,
struct clk_multiplier *mult = to_clk_multiplier(hw);
unsigned long val;
- val = clk_readl(mult->reg) >> mult->shift;
+ val = clk_mult_readl(mult) >> mult->shift;
val &= GENMASK(mult->width - 1, 0);
if (!val && mult->flags & CLK_MULTIPLIER_ZERO_BYPASS)
@@ -118,10 +134,10 @@ static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
else
__acquire(mult->lock);
- val = clk_readl(mult->reg);
+ val = clk_mult_readl(mult);
val &= ~GENMASK(mult->width + mult->shift - 1, mult->shift);
val |= factor << mult->shift;
- clk_writel(val, mult->reg);
+ clk_mult_writel(mult, val);
if (mult->lock)
spin_unlock_irqrestore(mult->lock, flags);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 2ad2df2e8909..66e91f740508 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -23,6 +23,22 @@
* parent - parent is adjustable through clk_set_parent
*/
+static inline u32 clk_mux_readl(struct clk_mux *mux)
+{
+ if (mux->flags & CLK_MUX_BIG_ENDIAN)
+ return ioread32be(mux->reg);
+
+ return readl(mux->reg);
+}
+
+static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
+{
+ if (mux->flags & CLK_MUX_BIG_ENDIAN)
+ iowrite32be(val, mux->reg);
+ else
+ writel(val, mux->reg);
+}
+
int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
unsigned int val)
{
@@ -73,7 +89,7 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
struct clk_mux *mux = to_clk_mux(hw);
u32 val;
- val = clk_readl(mux->reg) >> mux->shift;
+ val = clk_mux_readl(mux) >> mux->shift;
val &= mux->mask;
return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
@@ -94,12 +110,12 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
if (mux->flags & CLK_MUX_HIWORD_MASK) {
reg = mux->mask << (mux->shift + 16);
} else {
- reg = clk_readl(mux->reg);
+ reg = clk_mux_readl(mux);
reg &= ~(mux->mask << mux->shift);
}
val = val << mux->shift;
reg |= val;
- clk_writel(reg, mux->reg);
+ clk_mux_writel(mux, reg);
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
@@ -159,7 +175,7 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
init.ops = &clk_mux_ro_ops;
else
init.ops = &clk_mux_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 8cb9d117fdbf..02b472a1f9b0 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -101,7 +101,7 @@ static int clk_pwm_probe(struct platform_device *pdev)
init.name = clk_name;
init.ops = &clk_pwm_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
init.num_parents = 0;
clk_pwm->pwm = pwm;
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 1212a9be7e80..4739a47ec8bd 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -34,6 +34,7 @@
#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
#define CGB_PLL1 4
#define CGB_PLL2 5
+#define MAX_PLL_DIV 16
struct clockgen_pll_div {
struct clk *clk;
@@ -41,7 +42,7 @@ struct clockgen_pll_div {
};
struct clockgen_pll {
- struct clockgen_pll_div div[8];
+ struct clockgen_pll_div div[MAX_PLL_DIV];
};
#define CLKSEL_VALID 1
@@ -79,7 +80,7 @@ struct clockgen_chipinfo {
const struct clockgen_muxinfo *cmux_groups[2];
const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
void (*init_periph)(struct clockgen *cg);
- int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
+ int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */
u32 pll_mask; /* 1 << n bit set if PLL n is valid */
u32 flags; /* CG_xxx */
};
@@ -245,6 +246,58 @@ static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
},
};
+static const struct clockgen_muxinfo ls1028a_hwa1 = {
+ {
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1028a_hwa2 = {
+ {
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1028a_hwa3 = {
+ {
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1028a_hwa4 = {
+ {
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ },
+};
+
static const struct clockgen_muxinfo ls1043a_hwa1 = {
{
{},
@@ -508,6 +561,21 @@ static const struct clockgen_chipinfo chipinfo[] = {
.pll_mask = 0x03,
},
{
+ .compat = "fsl,ls1028a-clockgen",
+ .cmux_groups = {
+ &clockgen2_cmux_cga12
+ },
+ .hwaccel = {
+ &ls1028a_hwa1, &ls1028a_hwa2,
+ &ls1028a_hwa3, &ls1028a_hwa4
+ },
+ .cmux_to_group = {
+ 0, 0, 0, 0, -1
+ },
+ .pll_mask = 0x07,
+ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
+ },
+ {
.compat = "fsl,ls1043a-clockgen",
.init_periph = t2080_init_periph,
.cmux_groups = {
@@ -601,7 +669,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
&p4080_cmux_grp1, &p4080_cmux_grp2
},
.cmux_to_group = {
- 0, 0, 0, 0, 1, 1, 1, 1
+ 0, 0, 0, 0, 1, 1, 1, 1, -1
},
.pll_mask = 0x1f,
},
@@ -1128,7 +1196,7 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
int ret;
/*
- * For platform PLL, there are 8 divider clocks.
+ * For platform PLL, there are MAX_PLL_DIV divider clocks.
* For core PLL, there are 4 divider clocks at most.
*/
if (idx != PLATFORM_PLL && i >= 4)
@@ -1423,6 +1491,7 @@ CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index cdaa567c8042..fdac33a9be2f 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -300,6 +300,85 @@ static const struct stm32f4_gate_data stm32f746_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
+static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
+ { STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 20, "dtcmram", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" },
+
+ { STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 1, "jpeg", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" },
+ { STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" },
+
+ { STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div",
+ CLK_IGNORE_UNUSED },
+ { STM32F4_RCC_AHB3ENR, 1, "qspi", "ahb_div",
+ CLK_IGNORE_UNUSED },
+
+ { STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 10, "rtcapb", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 13, "can3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 16, "spdifrx", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 27, "cec", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" },
+
+ { STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 7, "sdmmc2", "sdmux2" },
+ { STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 11, "sdmmc1", "sdmux1" },
+ { STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
+};
+
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
@@ -318,6 +397,10 @@ static const u64 stm32f746_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
0x0000000000000003ull,
0x04f77f833e01c9ffull };
+static const u64 stm32f769_gate_map[MAX_GATE_MAP] = { 0x000000f37ef417ffull,
+ 0x0000000000000003ull,
+ 0x44F77F833E01EDFFull };
+
static const u64 *stm32f4_gate_map;
static struct clk_hw **clks;
@@ -1048,6 +1131,10 @@ static const char *rtc_parents[4] = {
"no-clock", "lse", "lsi", "hse-rtc"
};
+static const char *pll_src = "pll-src";
+
+static const char *pllsrc_parent[2] = { "hsi", NULL };
+
static const char *dsi_parent[2] = { NULL, "pll-r" };
static const char *lcd_parent[1] = { "pllsai-r-div" };
@@ -1072,6 +1159,9 @@ static const char *uart_parents2[4] = { "apb1_div", "sys", "hsi", "lse" };
static const char *i2c_parents[4] = { "apb1_div", "sys", "hsi", "no-clock" };
+static const char * const dfsdm1_src[] = { "apb2_div", "sys" };
+static const char * const adsfdm1_parent[] = { "sai1_clk", "sai2_clk" };
+
struct stm32_aux_clk {
int idx;
const char *name;
@@ -1313,6 +1403,177 @@ static const struct stm32_aux_clk stm32f746_aux_clk[] = {
},
};
+static const struct stm32_aux_clk stm32f769_aux_clk[] = {
+ {
+ CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent),
+ NO_MUX, 0, 0,
+ STM32F4_RCC_APB2ENR, 26,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents),
+ STM32F4_RCC_CFGR, 23, 1,
+ NO_GATE, 0,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_SAI1, "sai1_clk", sai_parents, ARRAY_SIZE(sai_parents),
+ STM32F4_RCC_DCKCFGR, 20, 3,
+ STM32F4_RCC_APB2ENR, 22,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_SAI2, "sai2_clk", sai_parents, ARRAY_SIZE(sai_parents),
+ STM32F4_RCC_DCKCFGR, 22, 3,
+ STM32F4_RCC_APB2ENR, 23,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ NO_IDX, "pll48", pll48_parents, ARRAY_SIZE(pll48_parents),
+ STM32F7_RCC_DCKCFGR2, 27, 1,
+ NO_GATE, 0,
+ 0
+ },
+ {
+ NO_IDX, "sdmux1", sdmux_parents, ARRAY_SIZE(sdmux_parents),
+ STM32F7_RCC_DCKCFGR2, 28, 1,
+ NO_GATE, 0,
+ 0
+ },
+ {
+ NO_IDX, "sdmux2", sdmux_parents, ARRAY_SIZE(sdmux_parents),
+ STM32F7_RCC_DCKCFGR2, 29, 1,
+ NO_GATE, 0,
+ 0
+ },
+ {
+ CLK_HDMI_CEC, "hdmi-cec",
+ hdmi_parents, ARRAY_SIZE(hdmi_parents),
+ STM32F7_RCC_DCKCFGR2, 26, 1,
+ NO_GATE, 0,
+ 0
+ },
+ {
+ CLK_SPDIF, "spdif-rx",
+ spdif_parent, ARRAY_SIZE(spdif_parent),
+ STM32F7_RCC_DCKCFGR2, 22, 3,
+ STM32F4_RCC_APB2ENR, 23,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_USART1, "usart1",
+ uart_parents1, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 0, 3,
+ STM32F4_RCC_APB2ENR, 4,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_USART2, "usart2",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 2, 3,
+ STM32F4_RCC_APB1ENR, 17,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_USART3, "usart3",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 4, 3,
+ STM32F4_RCC_APB1ENR, 18,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_UART4, "uart4",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 6, 3,
+ STM32F4_RCC_APB1ENR, 19,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_UART5, "uart5",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 8, 3,
+ STM32F4_RCC_APB1ENR, 20,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_USART6, "usart6",
+ uart_parents1, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 10, 3,
+ STM32F4_RCC_APB2ENR, 5,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_UART7, "uart7",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 12, 3,
+ STM32F4_RCC_APB1ENR, 30,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_UART8, "uart8",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 14, 3,
+ STM32F4_RCC_APB1ENR, 31,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_I2C1, "i2c1",
+ i2c_parents, ARRAY_SIZE(i2c_parents),
+ STM32F7_RCC_DCKCFGR2, 16, 3,
+ STM32F4_RCC_APB1ENR, 21,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_I2C2, "i2c2",
+ i2c_parents, ARRAY_SIZE(i2c_parents),
+ STM32F7_RCC_DCKCFGR2, 18, 3,
+ STM32F4_RCC_APB1ENR, 22,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_I2C3, "i2c3",
+ i2c_parents, ARRAY_SIZE(i2c_parents),
+ STM32F7_RCC_DCKCFGR2, 20, 3,
+ STM32F4_RCC_APB1ENR, 23,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_I2C4, "i2c4",
+ i2c_parents, ARRAY_SIZE(i2c_parents),
+ STM32F7_RCC_DCKCFGR2, 22, 3,
+ STM32F4_RCC_APB1ENR, 24,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_LPTIMER, "lptim1",
+ lptim_parent, ARRAY_SIZE(lptim_parent),
+ STM32F7_RCC_DCKCFGR2, 24, 3,
+ STM32F4_RCC_APB1ENR, 9,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_F769_DSI, "dsi",
+ dsi_parent, ARRAY_SIZE(dsi_parent),
+ STM32F7_RCC_DCKCFGR2, 0, 1,
+ STM32F4_RCC_APB2ENR, 27,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_DFSDM1, "dfsdm1",
+ dfsdm1_src, ARRAY_SIZE(dfsdm1_src),
+ STM32F4_RCC_DCKCFGR, 25, 1,
+ STM32F4_RCC_APB2ENR, 29,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_ADFSDM1, "adfsdm1",
+ adsfdm1_parent, ARRAY_SIZE(adsfdm1_parent),
+ STM32F4_RCC_DCKCFGR, 26, 1,
+ STM32F4_RCC_APB2ENR, 29,
+ CLK_SET_RATE_PARENT
+ },
+};
+
static const struct stm32f4_clk_data stm32f429_clk_data = {
.end_primary = END_PRIMARY_CLK,
.gates_data = stm32f429_gates,
@@ -1343,6 +1604,16 @@ static const struct stm32f4_clk_data stm32f746_clk_data = {
.aux_clk_num = ARRAY_SIZE(stm32f746_aux_clk),
};
+static const struct stm32f4_clk_data stm32f769_clk_data = {
+ .end_primary = END_PRIMARY_CLK_F7,
+ .gates_data = stm32f769_gates,
+ .gates_map = stm32f769_gate_map,
+ .gates_num = ARRAY_SIZE(stm32f769_gates),
+ .pll_data = stm32f469_pll,
+ .aux_clk = stm32f769_aux_clk,
+ .aux_clk_num = ARRAY_SIZE(stm32f769_aux_clk),
+};
+
static const struct of_device_id stm32f4_of_match[] = {
{
.compatible = "st,stm32f42xx-rcc",
@@ -1356,6 +1627,10 @@ static const struct of_device_id stm32f4_of_match[] = {
.compatible = "st,stm32f746-rcc",
.data = &stm32f746_clk_data
},
+ {
+ .compatible = "st,stm32f769-rcc",
+ .data = &stm32f769_clk_data
+ },
{}
};
@@ -1427,9 +1702,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
int n;
const struct of_device_id *match;
const struct stm32f4_clk_data *data;
- unsigned long pllcfgr;
- const char *pllsrc;
unsigned long pllm;
+ struct clk_hw *pll_src_hw;
base = of_iomap(np, 0);
if (!base) {
@@ -1460,21 +1734,33 @@ static void __init stm32f4_rcc_init(struct device_node *np)
hse_clk = of_clk_get_parent_name(np, 0);
dsi_parent[0] = hse_clk;
+ pllsrc_parent[1] = hse_clk;
i2s_in_clk = of_clk_get_parent_name(np, 1);
i2s_parents[1] = i2s_in_clk;
sai_parents[2] = i2s_in_clk;
+ if (of_device_is_compatible(np, "st,stm32f769-rcc")) {
+ clk_hw_register_gate(NULL, "dfsdm1_apb", "apb2_div", 0,
+ base + STM32F4_RCC_APB2ENR, 29,
+ CLK_IGNORE_UNUSED, &stm32f4_clk_lock);
+ dsi_parent[0] = pll_src;
+ sai_parents[3] = pll_src;
+ }
+
clks[CLK_HSI] = clk_hw_register_fixed_rate_with_accuracy(NULL, "hsi",
NULL, 0, 16000000, 160000);
- pllcfgr = readl(base + STM32F4_RCC_PLLCFGR);
- pllsrc = pllcfgr & BIT(22) ? hse_clk : "hsi";
- pllm = pllcfgr & 0x3f;
+ pll_src_hw = clk_hw_register_mux(NULL, pll_src, pllsrc_parent,
+ ARRAY_SIZE(pllsrc_parent), 0,
+ base + STM32F4_RCC_PLLCFGR, 22, 1, 0,
+ &stm32f4_clk_lock);
+
+ pllm = readl(base + STM32F4_RCC_PLLCFGR) & 0x3f;
- clk_hw_register_fixed_factor(NULL, "vco_in", pllsrc,
- 0, 1, pllm);
+ clk_hw_register_fixed_factor(NULL, "vco_in", pll_src,
+ 0, 1, pllm);
stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
&stm32f4_clk_lock);
@@ -1612,12 +1898,16 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clks[aux_clk->idx] = hw;
}
- if (of_device_is_compatible(np, "st,stm32f746-rcc"))
+ if (of_device_is_compatible(np, "st,stm32f746-rcc")) {
clk_hw_register_fixed_factor(NULL, "hsi_div488", "hsi", 0,
1, 488);
+ clks[CLK_PLL_SRC] = pll_src_hw;
+ }
+
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
+
return;
fail:
kfree(clks);
@@ -1626,3 +1916,4 @@ fail:
CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE_DRIVER(stm32f746_rcc, "st,stm32f746-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE_DRIVER(stm32f769_rcc, "st,stm32f769-rcc", stm32f4_rcc_init);
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index a0ae8dc16909..a875649df8b8 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -1402,6 +1402,7 @@ enum {
G_CRYP1,
G_HASH1,
G_BKPSRAM,
+ G_DDRPERFM,
G_LAST
};
@@ -1488,6 +1489,7 @@ static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_GATE(G_STGENRO, RCC_APB4ENSETR, 20, 0),
K_MGATE(G_USBPHY, RCC_APB4ENSETR, 16, 0),
K_GATE(G_IWDG2, RCC_APB4ENSETR, 15, 0),
+ K_GATE(G_DDRPERFM, RCC_APB4ENSETR, 8, 0),
K_MGATE(G_DSI, RCC_APB4ENSETR, 4, 0),
K_MGATE(G_LTDC, RCC_APB4ENSETR, 0, 0),
@@ -1899,6 +1901,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
PCLK(CRC1, "crc1", "ck_axi", 0, G_CRC1),
PCLK(USBH, "usbh", "ck_axi", 0, G_USBH),
PCLK(ETHSTP, "ethstp", "ck_axi", 0, G_ETHSTP),
+ PCLK(DDRPERFM, "ddrperfm", "pclk4", 0, G_DDRPERFM),
/* Kernel clocks */
KCLK(SDMMC1_K, "sdmmc1_k", sdmmc12_src, 0, G_SDMMC1, M_SDMMC12),
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 531b030d4d4e..d975465fe2a8 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -262,7 +262,7 @@ static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
else
__acquire(fd->lock);
- val = clk_readl(fd->reg);
+ val = readl(fd->reg);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
@@ -333,10 +333,10 @@ static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
else
__acquire(fd->lock);
- val = clk_readl(fd->reg);
+ val = readl(fd->reg);
val &= ~fd->mask;
val |= (scale << fd->shift);
- clk_writel(val, fd->reg);
+ writel(val, fd->reg);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 96053a96fe2f..aa51756fd4d6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -39,15 +39,23 @@ static LIST_HEAD(clk_notifier_list);
/*** private data structures ***/
+struct clk_parent_map {
+ const struct clk_hw *hw;
+ struct clk_core *core;
+ const char *fw_name;
+ const char *name;
+ int index;
+};
+
struct clk_core {
const char *name;
const struct clk_ops *ops;
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct device_node *of_node;
struct clk_core *parent;
- const char **parent_names;
- struct clk_core **parents;
+ struct clk_parent_map *parents;
u8 num_parents;
u8 new_parent_index;
unsigned long rate;
@@ -316,17 +324,102 @@ static struct clk_core *clk_core_lookup(const char *name)
return NULL;
}
+/**
+ * clk_core_get - Find the clk_core parent of a clk
+ * @core: clk to find parent of
+ * @p_index: parent index to search for
+ *
+ * This is the preferred method for clk providers to find the parent of a
+ * clk when that parent is external to the clk controller. The parent_names
+ * array is indexed and treated as a local name matching a string in the device
+ * node's 'clock-names' property or as the 'con_id' matching the device's
+ * dev_name() in a clk_lookup. This allows clk providers to use their own
+ * namespace instead of looking for a globally unique parent string.
+ *
+ * For example the following DT snippet would allow a clock registered by the
+ * clock-controller@c001 that has a clk_init_data::parent_data array
+ * with 'xtal' in the 'name' member to find the clock provided by the
+ * clock-controller@f00abcd without needing to get the globally unique name of
+ * the xtal clk.
+ *
+ * parent: clock-controller@f00abcd {
+ * reg = <0xf00abcd 0xabcd>;
+ * #clock-cells = <0>;
+ * };
+ *
+ * clock-controller@c001 {
+ * reg = <0xc001 0xf00d>;
+ * clocks = <&parent>;
+ * clock-names = "xtal";
+ * #clock-cells = <1>;
+ * };
+ *
+ * Returns: -ENOENT when the provider can't be found or the clk doesn't
+ * exist in the provider. -EINVAL when the name can't be found. NULL when the
+ * provider knows about the clk but it isn't provided on this system.
+ * A valid clk_core pointer when the clk can be found in the provider.
+ */
+static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
+{
+ const char *name = core->parents[p_index].fw_name;
+ int index = core->parents[p_index].index;
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
+ struct device *dev = core->dev;
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct device_node *np = core->of_node;
+
+ if (np && index >= 0)
+ hw = of_clk_get_hw(np, index, name);
+
+ /*
+ * If the DT search above couldn't find the provider or the provider
+ * didn't know about this clk, fallback to looking up via clkdev based
+ * clk_lookups
+ */
+ if (PTR_ERR(hw) == -ENOENT && name)
+ hw = clk_find_hw(dev_id, name);
+
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+
+ return hw->core;
+}
+
+static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
+{
+ struct clk_parent_map *entry = &core->parents[index];
+ struct clk_core *parent = ERR_PTR(-ENOENT);
+
+ if (entry->hw) {
+ parent = entry->hw->core;
+ /*
+ * We have a direct reference but it isn't registered yet?
+ * Orphan it and let clk_reparent() update the orphan status
+ * when the parent is registered.
+ */
+ if (!parent)
+ parent = ERR_PTR(-EPROBE_DEFER);
+ } else {
+ parent = clk_core_get(core, index);
+ if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT)
+ parent = clk_core_lookup(entry->name);
+ }
+
+ /* Only cache it if it's not an error */
+ if (!IS_ERR(parent))
+ entry->core = parent;
+}
+
static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
u8 index)
{
- if (!core || index >= core->num_parents)
+ if (!core || index >= core->num_parents || !core->parents)
return NULL;
- if (!core->parents[index])
- core->parents[index] =
- clk_core_lookup(core->parent_names[index]);
+ if (!core->parents[index].core)
+ clk_core_fill_parent_index(core, index);
- return core->parents[index];
+ return core->parents[index].core;
}
struct clk_hw *
@@ -347,23 +440,18 @@ unsigned int __clk_get_enable_count(struct clk *clk)
static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
{
- unsigned long ret;
-
- if (!core) {
- ret = 0;
- goto out;
- }
-
- ret = core->rate;
-
- if (!core->num_parents)
- goto out;
+ if (!core)
+ return 0;
- if (!core->parent)
- ret = 0;
+ if (!core->num_parents || core->parent)
+ return core->rate;
-out:
- return ret;
+ /*
+ * Clk must have a parent because num_parents > 0 but the parent isn't
+ * known yet. Best to return 0 as the rate of this clk until we can
+ * properly recalc the rate based on the parent's rate.
+ */
+ return 0;
}
unsigned long clk_hw_get_rate(const struct clk_hw *hw)
@@ -524,9 +612,15 @@ void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
/*
+ * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
+ * @hw: mux type clk to determine rate on
+ * @req: rate request, also used to return preferred parent and frequencies
+ *
* Helper for finding best parent to provide a given frequency. This can be used
* directly as a determine_rate callback (e.g. for a mux), or from a more
* complex clock that may combine a mux with other operations.
+ *
+ * Returns: 0 on success, -EERROR value on error
*/
int __clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
@@ -1519,20 +1613,37 @@ static int clk_fetch_parent_index(struct clk_core *core,
return -EINVAL;
for (i = 0; i < core->num_parents; i++) {
- if (core->parents[i] == parent)
+ /* Found it first try! */
+ if (core->parents[i].core == parent)
return i;
- if (core->parents[i])
+ /* Something else is here, so keep looking */
+ if (core->parents[i].core)
continue;
- /* Fallback to comparing globally unique names */
- if (!strcmp(parent->name, core->parent_names[i])) {
- core->parents[i] = parent;
- return i;
+ /* Maybe core hasn't been cached but the hw is all we know? */
+ if (core->parents[i].hw) {
+ if (core->parents[i].hw == parent->hw)
+ break;
+
+ /* Didn't match, but we're expecting a clk_hw */
+ continue;
}
+
+ /* Maybe it hasn't been cached (clk_set_parent() path) */
+ if (parent == clk_core_get(core, i))
+ break;
+
+ /* Fallback to comparing globally unique names */
+ if (!strcmp(parent->name, core->parents[i].name))
+ break;
}
- return -EINVAL;
+ if (i == core->num_parents)
+ return -EINVAL;
+
+ core->parents[i].core = parent;
+ return i;
}
/*
@@ -2293,6 +2404,7 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
bool clk_has_parent(struct clk *clk, struct clk *parent)
{
struct clk_core *core, *parent_core;
+ int i;
/* NULL clocks should be nops, so return success if either is NULL. */
if (!clk || !parent)
@@ -2305,8 +2417,11 @@ bool clk_has_parent(struct clk *clk, struct clk *parent)
if (core->parent == parent_core)
return true;
- return match_string(core->parent_names, core->num_parents,
- parent_core->name) >= 0;
+ for (i = 0; i < core->num_parents; i++)
+ if (!strcmp(core->parents[i].name, parent_core->name))
+ return true;
+
+ return false;
}
EXPORT_SYMBOL_GPL(clk_has_parent);
@@ -2850,7 +2965,6 @@ static const struct {
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),
ENTRY(CLK_IGNORE_UNUSED),
- ENTRY(CLK_IS_BASIC),
ENTRY(CLK_GET_RATE_NOCACHE),
ENTRY(CLK_SET_RATE_NO_REPARENT),
ENTRY(CLK_GET_ACCURACY_NOCACHE),
@@ -2889,9 +3003,9 @@ static int possible_parents_show(struct seq_file *s, void *data)
int i;
for (i = 0; i < core->num_parents - 1; i++)
- seq_printf(s, "%s ", core->parent_names[i]);
+ seq_printf(s, "%s ", core->parents[i].name);
- seq_printf(s, "%s\n", core->parent_names[i]);
+ seq_printf(s, "%s\n", core->parents[i].name);
return 0;
}
@@ -3025,7 +3139,7 @@ static inline void clk_debug_unregister(struct clk_core *core)
*/
static int __clk_core_init(struct clk_core *core)
{
- int i, ret;
+ int ret;
struct clk_core *orphan;
struct hlist_node *tmp2;
unsigned long rate;
@@ -3079,12 +3193,6 @@ static int __clk_core_init(struct clk_core *core)
goto out;
}
- /* throw a WARN if any entries in parent_names are NULL */
- for (i = 0; i < core->num_parents; i++)
- WARN(!core->parent_names[i],
- "%s: invalid NULL in %s's .parent_names\n",
- __func__, core->name);
-
core->parent = __clk_init_parent(core);
/*
@@ -3313,20 +3421,104 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
return clk;
}
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes. It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjunction with the
- * rest of the clock API. In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
-struct clk *clk_register(struct device *dev, struct clk_hw *hw)
+static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
+{
+ const char *dst;
+
+ if (!src) {
+ if (must_exist)
+ return -EINVAL;
+ return 0;
+ }
+
+ *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int clk_core_populate_parent_map(struct clk_core *core)
+{
+ const struct clk_init_data *init = core->hw->init;
+ u8 num_parents = init->num_parents;
+ const char * const *parent_names = init->parent_names;
+ const struct clk_hw **parent_hws = init->parent_hws;
+ const struct clk_parent_data *parent_data = init->parent_data;
+ int i, ret = 0;
+ struct clk_parent_map *parents, *parent;
+
+ if (!num_parents)
+ return 0;
+
+ /*
+ * Avoid unnecessary string look-ups of clk_core's possible parents by
+ * having a cache of names/clk_hw pointers to clk_core pointers.
+ */
+ parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
+ core->parents = parents;
+ if (!parents)
+ return -ENOMEM;
+
+ /* Copy everything over because it might be __initdata */
+ for (i = 0, parent = parents; i < num_parents; i++, parent++) {
+ parent->index = -1;
+ if (parent_names) {
+ /* throw a WARN if any entries are NULL */
+ WARN(!parent_names[i],
+ "%s: invalid NULL in %s's .parent_names\n",
+ __func__, core->name);
+ ret = clk_cpy_name(&parent->name, parent_names[i],
+ true);
+ } else if (parent_data) {
+ parent->hw = parent_data[i].hw;
+ parent->index = parent_data[i].index;
+ ret = clk_cpy_name(&parent->fw_name,
+ parent_data[i].fw_name, false);
+ if (!ret)
+ ret = clk_cpy_name(&parent->name,
+ parent_data[i].name,
+ false);
+ } else if (parent_hws) {
+ parent->hw = parent_hws[i];
+ } else {
+ ret = -EINVAL;
+ WARN(1, "Must specify parents if num_parents > 0\n");
+ }
+
+ if (ret) {
+ do {
+ kfree_const(parents[i].name);
+ kfree_const(parents[i].fw_name);
+ } while (--i >= 0);
+ kfree(parents);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void clk_core_free_parent_map(struct clk_core *core)
+{
+ int i = core->num_parents;
+
+ if (!core->num_parents)
+ return;
+
+ while (--i >= 0) {
+ kfree_const(core->parents[i].name);
+ kfree_const(core->parents[i].fw_name);
+ }
+
+ kfree(core->parents);
+}
+
+static struct clk *
+__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
- int i, ret;
+ int ret;
struct clk_core *core;
core = kzalloc(sizeof(*core), GFP_KERNEL);
@@ -3350,6 +3542,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
if (dev && pm_runtime_enabled(dev))
core->rpm_enabled = true;
core->dev = dev;
+ core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
core->hw = hw;
@@ -3359,33 +3552,9 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
core->max_rate = ULONG_MAX;
hw->core = core;
- /* allocate local copy in case parent_names is __initdata */
- core->parent_names = kcalloc(core->num_parents, sizeof(char *),
- GFP_KERNEL);
-
- if (!core->parent_names) {
- ret = -ENOMEM;
- goto fail_parent_names;
- }
-
-
- /* copy each string name in case parent_names is __initdata */
- for (i = 0; i < core->num_parents; i++) {
- core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
- GFP_KERNEL);
- if (!core->parent_names[i]) {
- ret = -ENOMEM;
- goto fail_parent_names_copy;
- }
- }
-
- /* avoid unnecessary string look-ups of clk_core's possible parents. */
- core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
- GFP_KERNEL);
- if (!core->parents) {
- ret = -ENOMEM;
+ ret = clk_core_populate_parent_map(core);
+ if (ret)
goto fail_parents;
- };
INIT_HLIST_HEAD(&core->clks);
@@ -3396,7 +3565,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
hw->clk = alloc_clk(core, NULL, NULL);
if (IS_ERR(hw->clk)) {
ret = PTR_ERR(hw->clk);
- goto fail_parents;
+ goto fail_create_clk;
}
clk_core_link_consumer(hw->core, hw->clk);
@@ -3412,13 +3581,9 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
free_clk(hw->clk);
hw->clk = NULL;
+fail_create_clk:
+ clk_core_free_parent_map(core);
fail_parents:
- kfree(core->parents);
-fail_parent_names_copy:
- while (--i >= 0)
- kfree_const(core->parent_names[i]);
- kfree(core->parent_names);
-fail_parent_names:
fail_ops:
kfree_const(core->name);
fail_name:
@@ -3426,6 +3591,24 @@ fail_name:
fail_out:
return ERR_PTR(ret);
}
+
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_register is the *deprecated* interface for populating the clock tree with
+ * new clock nodes. Use clk_hw_register() instead.
+ *
+ * Returns: a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjunction with the
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
+ */
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
+{
+ return __clk_register(dev, dev_of_node(dev), hw);
+}
EXPORT_SYMBOL_GPL(clk_register);
/**
@@ -3440,23 +3623,35 @@ EXPORT_SYMBOL_GPL(clk_register);
*/
int clk_hw_register(struct device *dev, struct clk_hw *hw)
{
- return PTR_ERR_OR_ZERO(clk_register(dev, hw));
+ return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
}
EXPORT_SYMBOL_GPL(clk_hw_register);
+/*
+ * of_clk_hw_register - register a clk_hw and return an error code
+ * @node: device_node of device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * of_clk_hw_register() is the primary interface for populating the clock tree
+ * with new clock nodes when a struct device is not available, but a struct
+ * device_node is. It returns an integer equal to zero indicating success or
+ * less than zero indicating failure. Drivers must test for an error code after
+ * calling of_clk_hw_register().
+ */
+int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
+{
+ return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
+}
+EXPORT_SYMBOL_GPL(of_clk_hw_register);
+
/* Free memory allocated for a clock. */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
- int i = core->num_parents;
lockdep_assert_held(&prepare_lock);
- kfree(core->parents);
- while (--i >= 0)
- kfree_const(core->parent_names[i]);
-
- kfree(core->parent_names);
+ clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
@@ -3575,9 +3770,10 @@ static void devm_clk_hw_release(struct device *dev, void *res)
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
*
- * Managed clk_register(). Clocks returned from this function are
- * automatically clk_unregister()ed on driver detach. See clk_register() for
- * more information.
+ * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
+ *
+ * Clocks returned from this function are automatically clk_unregister()ed on
+ * driver detach. See clk_register() for more information.
*/
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
@@ -3895,6 +4091,8 @@ EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
* @np: Device node pointer associated with clock provider
* @clk_src_get: callback for decoding clock
* @data: context pointer for @clk_src_get callback.
+ *
+ * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
*/
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index 553f531cc232..d8400d623b34 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -19,6 +19,8 @@ static inline struct clk_hw *of_clk_get_hw(struct device_node *np,
}
#endif
+struct clk_hw *clk_find_hw(const char *dev_id, const char *con_id);
+
#ifdef CONFIG_COMMON_CLK
struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
const char *dev_id, const char *con_id);
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 8c4435c53f09..2afc8df8acff 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -46,6 +46,8 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
if (con_id)
best_possible += 1;
+ lockdep_assert_held(&clocks_mutex);
+
list_for_each_entry(p, &clocks, node) {
match = 0;
if (p->dev_id) {
@@ -70,25 +72,26 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
return cl;
}
-static struct clk *__clk_get_sys(struct device *dev, const char *dev_id,
- const char *con_id)
+struct clk_hw *clk_find_hw(const char *dev_id, const char *con_id)
{
struct clk_lookup *cl;
- struct clk *clk = NULL;
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
mutex_lock(&clocks_mutex);
-
cl = clk_find(dev_id, con_id);
- if (!cl)
- goto out;
-
- clk = clk_hw_create_clk(dev, cl->clk_hw, dev_id, con_id);
- if (IS_ERR(clk))
- cl = NULL;
-out:
+ if (cl)
+ hw = cl->clk_hw;
mutex_unlock(&clocks_mutex);
- return cl ? clk : ERR_PTR(-ENOENT);
+ return hw;
+}
+
+static struct clk *__clk_get_sys(struct device *dev, const char *dev_id,
+ const char *con_id)
+{
+ struct clk_hw *hw = clk_find_hw(dev_id, con_id);
+
+ return clk_hw_create_clk(dev, hw, dev_id, con_id);
}
struct clk *clk_get_sys(const char *dev_id, const char *con_id)
@@ -402,7 +405,10 @@ void devm_clk_release_clkdev(struct device *dev, const char *con_id,
struct clk_lookup *cl;
int rval;
+ mutex_lock(&clocks_mutex);
cl = clk_find(dev_id, con_id);
+ mutex_unlock(&clocks_mutex);
+
WARN_ON(!cl);
rval = devres_release(dev, devm_clkdev_release,
devm_clk_match_clkdev, cl);
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index d1bbee19ed0f..bdc52364b421 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -160,10 +160,8 @@ static int __init da8xx_cfgchip_register_div4p5(struct device *dev,
struct da8xx_cfgchip_gate_clk *gate;
gate = da8xx_cfgchip_gate_clk_register(dev, &da8xx_div4p5ena_info, regmap);
- if (IS_ERR(gate))
- return PTR_ERR(gate);
- return 0;
+ return PTR_ERR_OR_ZERO(gate);
}
static int __init
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index 7cc354dd29e2..c2a453caa131 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Clock driver for TI Davinci PSC controllers
*
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index cc5614567a70..69070f834391 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Clock driver for TI Davinci PSC controllers
*
diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
index f40419959656..794eeff0d5d2 100644
--- a/drivers/clk/hisilicon/clk-hi3660.c
+++ b/drivers/clk/hisilicon/clk-hi3660.c
@@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
"clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
{ HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2",
"clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, },
+ /*
+ * clk_gate_ufs_subsys is a system bus clock, mark it as critical
+ * clock and keep it on for system suspend and resume.
+ */
{ HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0x50, 21, 0, },
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, },
{ HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
CLK_SET_RATE_PARENT, 0x50, 28, 0, },
{ HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus",
diff --git a/drivers/clk/hisilicon/clk-hisi-phase.c b/drivers/clk/hisilicon/clk-hisi-phase.c
index 5fdc267bb2da..ba6afad66a2b 100644
--- a/drivers/clk/hisilicon/clk-hisi-phase.c
+++ b/drivers/clk/hisilicon/clk-hisi-phase.c
@@ -75,10 +75,10 @@ static int hisi_clk_set_phase(struct clk_hw *hw, int degrees)
spin_lock_irqsave(phase->lock, flags);
- val = clk_readl(phase->reg);
+ val = readl(phase->reg);
val &= ~phase->mask;
val |= regval << phase->shift;
- clk_writel(val, phase->reg);
+ writel(val, phase->reg);
spin_unlock_irqrestore(phase->lock, flags);
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 0d5180fbe988..05641c64b317 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -35,7 +35,7 @@ obj-$(CONFIG_SOC_IMX25) += clk-imx25.o
obj-$(CONFIG_SOC_IMX27) += clk-imx27.o
obj-$(CONFIG_SOC_IMX31) += clk-imx31.o
obj-$(CONFIG_SOC_IMX35) += clk-imx35.o
-obj-$(CONFIG_SOC_IMX5) += clk-imx51-imx53.o
+obj-$(CONFIG_SOC_IMX5) += clk-imx5.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o
obj-$(CONFIG_SOC_IMX6SLL) += clk-imx6sll.o
diff --git a/drivers/clk/imx/clk-divider-gate.c b/drivers/clk/imx/clk-divider-gate.c
index df1f8429fe16..2a8352a316c7 100644
--- a/drivers/clk/imx/clk-divider-gate.c
+++ b/drivers/clk/imx/clk-divider-gate.c
@@ -29,7 +29,7 @@ static unsigned long clk_divider_gate_recalc_rate_ro(struct clk_hw *hw,
struct clk_divider *div = to_clk_divider(hw);
unsigned int val;
- val = clk_readl(div->reg) >> div->shift;
+ val = readl(div->reg) >> div->shift;
val &= clk_div_mask(div->width);
if (!val)
return 0;
@@ -51,7 +51,7 @@ static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
if (!clk_hw_is_enabled(hw)) {
val = div_gate->cached_val;
} else {
- val = clk_readl(div->reg) >> div->shift;
+ val = readl(div->reg) >> div->shift;
val &= clk_div_mask(div->width);
}
@@ -87,10 +87,10 @@ static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
spin_lock_irqsave(div->lock, flags);
if (clk_hw_is_enabled(hw)) {
- val = clk_readl(div->reg);
+ val = readl(div->reg);
val &= ~(clk_div_mask(div->width) << div->shift);
val |= (u32)value << div->shift;
- clk_writel(val, div->reg);
+ writel(val, div->reg);
} else {
div_gate->cached_val = value;
}
@@ -114,9 +114,9 @@ static int clk_divider_enable(struct clk_hw *hw)
spin_lock_irqsave(div->lock, flags);
/* restore div val */
- val = clk_readl(div->reg);
+ val = readl(div->reg);
val |= div_gate->cached_val << div->shift;
- clk_writel(val, div->reg);
+ writel(val, div->reg);
spin_unlock_irqrestore(div->lock, flags);
@@ -133,10 +133,10 @@ static void clk_divider_disable(struct clk_hw *hw)
spin_lock_irqsave(div->lock, flags);
/* store the current div val */
- val = clk_readl(div->reg) >> div->shift;
+ val = readl(div->reg) >> div->shift;
val &= clk_div_mask(div->width);
div_gate->cached_val = val;
- clk_writel(0, div->reg);
+ writel(0, div->reg);
spin_unlock_irqrestore(div->lock, flags);
}
@@ -146,7 +146,7 @@ static int clk_divider_is_enabled(struct clk_hw *hw)
struct clk_divider *div = to_clk_divider(hw);
u32 val;
- val = clk_readl(div->reg) >> div->shift;
+ val = readl(div->reg) >> div->shift;
val &= clk_div_mask(div->width);
return val ? 1 : 0;
@@ -206,7 +206,7 @@ struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
div_gate->divider.hw.init = &init;
div_gate->divider.flags = CLK_DIVIDER_ONE_BASED | clk_divider_flags;
/* cache gate status */
- val = clk_readl(reg) >> shift;
+ val = readl(reg) >> shift;
val &= clk_div_mask(width);
div_gate->cached_val = val;
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx5.c
index e91c826bce70..c85ebd74a8a5 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx5.c
@@ -164,10 +164,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_CKIH1] = imx_obtain_fixed_clock("ckih1", 0);
clk[IMX5_CLK_CKIH2] = imx_obtain_fixed_clock("ckih2", 0);
- clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
- periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
- clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
- main_bus_sel, ARRAY_SIZE(main_bus_sel));
clk[IMX5_CLK_PER_LP_APM] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
clk[IMX5_CLK_PER_PRED1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
@@ -191,16 +187,10 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_UART_PRED] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
clk[IMX5_CLK_UART_ROOT] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
- clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
- standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
- clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
- standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[IMX5_CLK_ESDHC_A_PRED] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
clk[IMX5_CLK_ESDHC_A_PODF] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
clk[IMX5_CLK_ESDHC_B_PRED] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
clk[IMX5_CLK_ESDHC_B_PODF] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
- clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
- clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[IMX5_CLK_EMI_SEL] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
@@ -311,10 +301,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk_register_clkdev(clk[IMX5_CLK_CPU_PODF], NULL, "cpu0");
clk_register_clkdev(clk[IMX5_CLK_GPC_DVFS], "gpc_dvfs", NULL);
- /* Set SDHC parents to be PLL2 */
- clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]);
- clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]);
-
/* move usb phy clk to 24MHz */
clk_set_parent(clk[IMX5_CLK_USB_PHY_SEL], clk[IMX5_CLK_OSC]);
}
@@ -342,8 +328,21 @@ static void __init mx50_clocks_init(struct device_node *np)
mx5_clocks_common_init(ccm_base);
+ /*
+ * This clock is called periph_clk in the i.MX50 Reference Manual, but
+ * it comes closest in scope to the main_bus_clk of i.MX51 and i.MX53
+ */
+ clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 10, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
+ clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 21, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 20, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+ clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
@@ -372,6 +371,10 @@ static void __init mx50_clocks_init(struct device_node *np)
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ /* Set SDHC parents to be PLL2 */
+ clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]);
+ clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]);
+
/* set SDHC root clock to 200MHZ*/
clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 200000000);
clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 200000000);
@@ -410,6 +413,10 @@ static void __init mx51_clocks_init(struct device_node *np)
mx5_clocks_common_init(ccm_base);
+ clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
+ periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
+ clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
+ main_bus_sel, ARRAY_SIZE(main_bus_sel));
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
clk[IMX5_CLK_IPU_DI0_SEL] = imx_clk_mux_flags("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
@@ -422,6 +429,12 @@ static void __init mx51_clocks_init(struct device_node *np)
mx51_tve_sel, ARRAY_SIZE(mx51_tve_sel));
clk[IMX5_CLK_TVE_GATE] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
clk[IMX5_CLK_TVE_PRED] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
+ clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+ clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
@@ -452,6 +465,10 @@ static void __init mx51_clocks_init(struct device_node *np)
/* set the usboh3 parent to pll2_sw */
clk_set_parent(clk[IMX5_CLK_USBOH3_SEL], clk[IMX5_CLK_PLL2_SW]);
+ /* Set SDHC parents to be PLL2 */
+ clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]);
+ clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]);
+
/* set SDHC root clock to 166.25MHZ*/
clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 166250000);
clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 166250000);
@@ -506,6 +523,10 @@ static void __init mx53_clocks_init(struct device_node *np)
mx5_clocks_common_init(ccm_base);
+ clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
+ periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
+ clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
+ main_bus_sel, ARRAY_SIZE(main_bus_sel));
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 10, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
clk[IMX5_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
@@ -527,6 +548,12 @@ static void __init mx53_clocks_init(struct device_node *np)
mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel), CLK_SET_RATE_PARENT);
clk[IMX5_CLK_TVE_GATE] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
clk[IMX5_CLK_TVE_PRED] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
+ clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+ clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
@@ -589,6 +616,10 @@ static void __init mx53_clocks_init(struct device_node *np)
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ /* Set SDHC parents to be PLL2 */
+ clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]);
+ clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]);
+
/* set SDHC root clock to 200MHZ*/
clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 200000000);
clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 200000000);
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
index 3bd2044cf25c..7eea448cb9a9 100644
--- a/drivers/clk/imx/clk-imx6sll.c
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -76,6 +76,20 @@ static u32 share_count_ssi1;
static u32 share_count_ssi2;
static u32 share_count_ssi3;
+static struct clk ** const uart_clks[] __initconst = {
+ &clks[IMX6SLL_CLK_UART1_IPG],
+ &clks[IMX6SLL_CLK_UART1_SERIAL],
+ &clks[IMX6SLL_CLK_UART2_IPG],
+ &clks[IMX6SLL_CLK_UART2_SERIAL],
+ &clks[IMX6SLL_CLK_UART3_IPG],
+ &clks[IMX6SLL_CLK_UART3_SERIAL],
+ &clks[IMX6SLL_CLK_UART4_IPG],
+ &clks[IMX6SLL_CLK_UART4_SERIAL],
+ &clks[IMX6SLL_CLK_UART5_IPG],
+ &clks[IMX6SLL_CLK_UART5_SERIAL],
+ NULL
+};
+
static void __init imx6sll_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -268,7 +282,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_GPT_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20);
clks[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
clks[IMX6SLL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
- clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
+ clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24);
clks[IMX6SLL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26);
clks[IMX6SLL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30);
@@ -334,6 +348,8 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ imx_register_uart_clocks(uart_clks);
+
/* Lower the AHB clock rate before changing the clock source. */
clk_set_rate(clks[IMX6SLL_CLK_AHB], 99000000);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index cfbd8d4edb85..5b8a0c729f90 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -417,8 +417,8 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_DDR_IMX7, "pll_dram_main", "osc", base + 0x70, 0x7f);
clks[IMX7D_PLL_SYS_MAIN] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "osc", base + 0xb0, 0x1);
clks[IMX7D_PLL_ENET_MAIN] = imx_clk_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "osc", base + 0xe0, 0x0);
- clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_audio_main", "osc", base + 0xf0, 0x7f);
- clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_video_main", "osc", base + 0x130, 0x7f);
+ clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV_IMX7, "pll_audio_main", "osc", base + 0xf0, 0x7f);
+ clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV_IMX7, "pll_video_main", "osc", base + 0x130, 0x7f);
clks[IMX7D_PLL_ARM_MAIN_BYPASS] = imx_clk_mux_flags("pll_arm_main_bypass", base + 0x60, 16, 1, pll_arm_bypass_sel, ARRAY_SIZE(pll_arm_bypass_sel), CLK_SET_RATE_PARENT);
clks[IMX7D_PLL_DRAM_MAIN_BYPASS] = imx_clk_mux_flags("pll_dram_main_bypass", base + 0x70, 16, 1, pll_dram_bypass_sel, ARRAY_SIZE(pll_dram_bypass_sel), CLK_SET_RATE_PARENT);
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index ce306631e844..66682100f14c 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -151,7 +151,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
clks[IMX7ULP_CLK_DMA1] = imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30);
clks[IMX7ULP_CLK_RGPIO2P1] = imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30);
clks[IMX7ULP_CLK_DMA_MUX1] = imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30);
- clks[IMX7ULP_CLK_SNVS] = imx_clk_hw_gate("snvs", "nic1_bus_clk", base + 0x8c, 30);
clks[IMX7ULP_CLK_CAAM] = imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30);
clks[IMX7ULP_CLK_LPTPM4] = imx7ulp_clk_composite("lptpm4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
clks[IMX7ULP_CLK_LPTPM5] = imx7ulp_clk_composite("lptpm5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index a9b3888aef0c..daf1841b2adb 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -458,6 +458,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00);
clks[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80);
clks[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200);
+ clks[IMX8MQ_CLK_DSI_IPG_DIV] = imx_clk_divider2("dsi_ipg_div", "dsi_ahb", base + 0x9280, 0, 6);
clks[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00);
clks[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80);
clks[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00);
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index 7e9134b205ab..fb567dcc2118 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -43,7 +43,7 @@ static int clk_pfdv2_wait(struct clk_pfdv2 *pfd)
{
u32 val;
- return readl_poll_timeout(pfd->reg, val, val & pfd->vld_bit,
+ return readl_poll_timeout(pfd->reg, val, val & (1 << pfd->vld_bit),
0, LOCK_TIMEOUT_US);
}
@@ -55,7 +55,7 @@ static int clk_pfdv2_enable(struct clk_hw *hw)
spin_lock_irqsave(&pfd_lock, flags);
val = readl_relaxed(pfd->reg);
- val &= ~pfd->gate_bit;
+ val &= ~(1 << pfd->gate_bit);
writel_relaxed(val, pfd->reg);
spin_unlock_irqrestore(&pfd_lock, flags);
@@ -70,7 +70,7 @@ static void clk_pfdv2_disable(struct clk_hw *hw)
spin_lock_irqsave(&pfd_lock, flags);
val = readl_relaxed(pfd->reg);
- val |= pfd->gate_bit;
+ val |= (1 << pfd->gate_bit);
writel_relaxed(val, pfd->reg);
spin_unlock_irqrestore(&pfd_lock, flags);
}
@@ -123,7 +123,7 @@ static int clk_pfdv2_is_enabled(struct clk_hw *hw)
{
struct clk_pfdv2 *pfd = to_clk_pfdv2(hw);
- if (readl_relaxed(pfd->reg) & pfd->gate_bit)
+ if (readl_relaxed(pfd->reg) & (1 << pfd->gate_bit))
return 0;
return 1;
@@ -180,7 +180,7 @@ struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
return ERR_PTR(-ENOMEM);
pfd->reg = reg;
- pfd->gate_bit = 1 << ((idx + 1) * 8 - 1);
+ pfd->gate_bit = (idx + 1) * 8 - 1;
pfd->vld_bit = pfd->gate_bit - 1;
pfd->frac_off = idx * 8;
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 1acfa3e3cfb4..b7213023b238 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -74,10 +74,9 @@ static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
- u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div;
+ u32 mdiv, pdiv, sdiv, pll_div;
u64 fvco = parent_rate;
- pll_gnrl = readl_relaxed(pll->base);
pll_div = readl_relaxed(pll->base + 4);
mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
@@ -93,11 +92,10 @@ static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
- u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div_ctl0, pll_div_ctl1;
+ u32 mdiv, pdiv, sdiv, pll_div_ctl0, pll_div_ctl1;
short int kdiv;
u64 fvco = parent_rate;
- pll_gnrl = readl_relaxed(pll->base);
pll_div_ctl0 = readl_relaxed(pll->base + 4);
pll_div_ctl1 = readl_relaxed(pll->base + 8);
mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
@@ -362,7 +360,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
switch (pll_clk->type) {
case PLL_1416X:
- if (!pll->rate_table)
+ if (!pll_clk->rate_table)
init.ops = &clk_pll1416x_min_ops;
else
init.ops = &clk_pll1416x_ops;
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 9af62ee8f347..4110e713d259 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -20,6 +20,8 @@
#define PLL_NUM_OFFSET 0x10
#define PLL_DENOM_OFFSET 0x20
+#define PLL_IMX7_NUM_OFFSET 0x20
+#define PLL_IMX7_DENOM_OFFSET 0x30
#define PLL_VF610_NUM_OFFSET 0x20
#define PLL_VF610_DENOM_OFFSET 0x30
@@ -49,6 +51,8 @@ struct clk_pllv3 {
u32 div_mask;
u32 div_shift;
unsigned long ref_clock;
+ u32 num_offset;
+ u32 denom_offset;
};
#define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
@@ -219,8 +223,8 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
- u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
- u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ u32 mfn = readl_relaxed(pll->base + pll->num_offset);
+ u32 mfd = readl_relaxed(pll->base + pll->denom_offset);
u32 div = readl_relaxed(pll->base) & pll->div_mask;
u64 temp64 = (u64)parent_rate;
@@ -289,8 +293,8 @@ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
val &= ~pll->div_mask;
val |= div;
writel_relaxed(val, pll->base);
- writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
- writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+ writel_relaxed(mfn, pll->base + pll->num_offset);
+ writel_relaxed(mfd, pll->base + pll->denom_offset);
return clk_pllv3_wait_lock(pll);
}
@@ -352,8 +356,8 @@ static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw,
struct clk_pllv3 *pll = to_clk_pllv3(hw);
struct clk_pllv3_vf610_mf mf;
- mf.mfn = readl_relaxed(pll->base + PLL_VF610_NUM_OFFSET);
- mf.mfd = readl_relaxed(pll->base + PLL_VF610_DENOM_OFFSET);
+ mf.mfn = readl_relaxed(pll->base + pll->num_offset);
+ mf.mfd = readl_relaxed(pll->base + pll->denom_offset);
mf.mfi = (readl_relaxed(pll->base) & pll->div_mask) ? 22 : 20;
return clk_pllv3_vf610_mf_to_rate(parent_rate, mf);
@@ -382,8 +386,8 @@ static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate,
val |= pll->div_mask; /* set bit for mfi=22 */
writel_relaxed(val, pll->base);
- writel_relaxed(mf.mfn, pll->base + PLL_VF610_NUM_OFFSET);
- writel_relaxed(mf.mfd, pll->base + PLL_VF610_DENOM_OFFSET);
+ writel_relaxed(mf.mfn, pll->base + pll->num_offset);
+ writel_relaxed(mf.mfd, pll->base + pll->denom_offset);
return clk_pllv3_wait_lock(pll);
}
@@ -426,6 +430,8 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
return ERR_PTR(-ENOMEM);
pll->power_bit = BM_PLL_POWER;
+ pll->num_offset = PLL_NUM_OFFSET;
+ pll->denom_offset = PLL_DENOM_OFFSET;
switch (type) {
case IMX_PLLV3_SYS:
@@ -433,13 +439,20 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
break;
case IMX_PLLV3_SYS_VF610:
ops = &clk_pllv3_vf610_ops;
+ pll->num_offset = PLL_VF610_NUM_OFFSET;
+ pll->denom_offset = PLL_VF610_DENOM_OFFSET;
break;
case IMX_PLLV3_USB_VF610:
pll->div_shift = 1;
+ /* fall through */
case IMX_PLLV3_USB:
ops = &clk_pllv3_ops;
pll->powerup_set = true;
break;
+ case IMX_PLLV3_AV_IMX7:
+ pll->num_offset = PLL_IMX7_NUM_OFFSET;
+ pll->denom_offset = PLL_IMX7_DENOM_OFFSET;
+ /* fall through */
case IMX_PLLV3_AV:
ops = &clk_pllv3_av_ops;
break;
@@ -454,6 +467,8 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
break;
case IMX_PLLV3_DDR_IMX7:
pll->power_bit = IMX7_DDR_PLL_POWER;
+ pll->num_offset = PLL_IMX7_NUM_OFFSET;
+ pll->denom_offset = PLL_IMX7_DENOM_OFFSET;
ops = &clk_pllv3_av_ops;
break;
default:
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index d38bc9f87c1d..d7e62c3620d3 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -30,6 +30,9 @@
/* PLL Denominator Register (xPLLDENOM) */
#define PLL_DENOM_OFFSET 0x14
+#define MAX_MFD 0x3fffffff
+#define DEFAULT_MFD 1000000
+
struct clk_pllv4 {
struct clk_hw hw;
void __iomem *base;
@@ -64,13 +67,20 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
- u32 div;
+ u32 mult, mfn, mfd;
+ u64 temp64;
+
+ mult = readl_relaxed(pll->base + PLL_CFG_OFFSET);
+ mult &= BM_PLL_MULT;
+ mult >>= BP_PLL_MULT;
- div = readl_relaxed(pll->base + PLL_CFG_OFFSET);
- div &= BM_PLL_MULT;
- div >>= BP_PLL_MULT;
+ mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
+ mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ temp64 = parent_rate;
+ temp64 *= mfn;
+ do_div(temp64, mfd);
- return parent_rate * div;
+ return (parent_rate * mult) + (u32)temp64;
}
static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -78,14 +88,46 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
{
unsigned long parent_rate = *prate;
unsigned long round_rate, i;
+ u32 mfn, mfd = DEFAULT_MFD;
+ bool found = false;
+ u64 temp64;
for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
round_rate = parent_rate * pllv4_mult_table[i];
- if (rate >= round_rate)
- return round_rate;
+ if (rate >= round_rate) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_warn("%s: unable to round rate %lu, parent rate %lu\n",
+ clk_hw_get_name(hw), rate, parent_rate);
+ return 0;
}
- return round_rate;
+ if (parent_rate <= MAX_MFD)
+ mfd = parent_rate;
+
+ temp64 = (u64)(rate - round_rate);
+ temp64 *= mfd;
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
+ /*
+ * NOTE: The value of numerator must always be configured to be
+ * less than the value of the denominator. If we can't get a proper
+ * pair of mfn/mfd, we simply return the round_rate without using
+ * the frac part.
+ */
+ if (mfn >= mfd)
+ return round_rate;
+
+ temp64 = (u64)parent_rate;
+ temp64 *= mfn;
+ do_div(temp64, mfd);
+
+ return round_rate + (u32)temp64;
}
static bool clk_pllv4_is_valid_mult(unsigned int mult)
@@ -105,18 +147,30 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
- u32 val, mult;
+ u32 val, mult, mfn, mfd = DEFAULT_MFD;
+ u64 temp64;
mult = rate / parent_rate;
if (!clk_pllv4_is_valid_mult(mult))
return -EINVAL;
+ if (parent_rate <= MAX_MFD)
+ mfd = parent_rate;
+
+ temp64 = (u64)(rate - mult * parent_rate);
+ temp64 *= mfd;
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
val = readl_relaxed(pll->base + PLL_CFG_OFFSET);
val &= ~BM_PLL_MULT;
val |= mult << BP_PLL_MULT;
writel_relaxed(val, pll->base + PLL_CFG_OFFSET);
+ writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
+ writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+
return 0;
}
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
index 9dfd03a95557..991bbe63f156 100644
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ b/drivers/clk/imx/clk-sccg-pll.c
@@ -348,7 +348,7 @@ static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw,
temp64 = parent_rate;
- val = clk_readl(pll->base + PLL_CFG0);
+ val = readl(pll->base + PLL_CFG0);
if (val & SSCG_PLL_BYPASS2_MASK) {
temp64 = parent_rate;
} else if (val & SSCG_PLL_BYPASS1_MASK) {
@@ -371,10 +371,10 @@ static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
u32 val;
/* set bypass here too since the parent might be the same */
- val = clk_readl(pll->base + PLL_CFG0);
+ val = readl(pll->base + PLL_CFG0);
val &= ~SSCG_PLL_BYPASS_MASK;
val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
- clk_writel(val, pll->base + PLL_CFG0);
+ writel(val, pll->base + PLL_CFG0);
val = readl_relaxed(pll->base + PLL_CFG2);
val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
@@ -395,7 +395,7 @@ static u8 clk_sccg_pll_get_parent(struct clk_hw *hw)
u32 val;
u8 ret = pll->parent;
- val = clk_readl(pll->base + PLL_CFG0);
+ val = readl(pll->base + PLL_CFG0);
if (val & SSCG_PLL_BYPASS2_MASK)
ret = pll->bypass2;
else if (val & SSCG_PLL_BYPASS1_MASK)
@@ -408,10 +408,10 @@ static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index)
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
u32 val;
- val = clk_readl(pll->base + PLL_CFG0);
+ val = readl(pll->base + PLL_CFG0);
val &= ~SSCG_PLL_BYPASS_MASK;
val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
- clk_writel(val, pll->base + PLL_CFG0);
+ writel(val, pll->base + PLL_CFG0);
return clk_sccg_pll_wait_lock(pll);
}
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 5748ec8673e4..8639a8f2153e 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -77,6 +77,7 @@ enum imx_pllv3_type {
IMX_PLLV3_ENET_IMX7,
IMX_PLLV3_SYS_VF610,
IMX_PLLV3_DDR_IMX7,
+ IMX_PLLV3_AV_IMX7,
};
struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
@@ -138,11 +139,6 @@ static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
}
-static inline struct clk_hw *imx_get_clk_hw_fixed(const char *name, int rate)
-{
- return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
-}
-
static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
u8 shift, u8 width, const char * const *parents,
int num_parents)
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 584ff4ff81c7..8901ea0295b7 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -205,6 +205,12 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
.parents = { JZ4725B_CLK_EXT512, JZ4725B_CLK_OSC32K, -1, -1 },
.mux = { CGU_REG_OPCR, 2, 1},
},
+
+ [JZ4725B_CLK_UDC_PHY] = {
+ "udc_phy", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_EXT, -1, -1, -1 },
+ .gate = { CGU_REG_OPCR, 6, true },
+ },
};
static void __init jz4725b_cgu_init(struct device_node *np)
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 53edade25a1d..4d8a9aef95f6 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -216,4 +216,87 @@ config COMMON_CLK_MT8173
default ARCH_MEDIATEK
---help---
This driver supports MediaTek MT8173 clocks.
+
+config COMMON_CLK_MT8183
+ bool "Clock driver for MediaTek MT8183"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM64
+ help
+ This driver supports MediaTek MT8183 basic clocks.
+
+config COMMON_CLK_MT8183_AUDIOSYS
+ bool "Clock driver for MediaTek MT8183 audiosys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 audiosys clocks.
+
+config COMMON_CLK_MT8183_CAMSYS
+ bool "Clock driver for MediaTek MT8183 camsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 camsys clocks.
+
+config COMMON_CLK_MT8183_IMGSYS
+ bool "Clock driver for MediaTek MT8183 imgsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 imgsys clocks.
+
+config COMMON_CLK_MT8183_IPU_CORE0
+ bool "Clock driver for MediaTek MT8183 ipu_core0"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 ipu_core0 clocks.
+
+config COMMON_CLK_MT8183_IPU_CORE1
+ bool "Clock driver for MediaTek MT8183 ipu_core1"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 ipu_core1 clocks.
+
+config COMMON_CLK_MT8183_IPU_ADL
+ bool "Clock driver for MediaTek MT8183 ipu_adl"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 ipu_adl clocks.
+
+config COMMON_CLK_MT8183_IPU_CONN
+ bool "Clock driver for MediaTek MT8183 ipu_conn"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 ipu_conn clocks.
+
+config COMMON_CLK_MT8183_MFGCFG
+ bool "Clock driver for MediaTek MT8183 mfgcfg"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 mfgcfg clocks.
+
+config COMMON_CLK_MT8183_MMSYS
+ bool "Clock driver for MediaTek MT8183 mmsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 mmsys clocks.
+
+config COMMON_CLK_MT8183_VDECSYS
+ bool "Clock driver for MediaTek MT8183 vdecsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 vdecsys clocks.
+
+config COMMON_CLK_MT8183_VENCSYS
+ bool "Clock driver for MediaTek MT8183 vencsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 vencsys clocks.
+
+config COMMON_CLK_MT8516
+ bool "Clock driver for MediaTek MT8516"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8516 clocks.
+
endmenu
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index ee4410ff43ab..f74937b35f68 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o
+obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o
+
obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o
obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o
obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o
@@ -31,3 +32,16 @@ obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
+obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
+obj-$(CONFIG_COMMON_CLK_MT8183_AUDIOSYS) += clk-mt8183-audio.o
+obj-$(CONFIG_COMMON_CLK_MT8183_CAMSYS) += clk-mt8183-cam.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IMGSYS) += clk-mt8183-img.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CORE0) += clk-mt8183-ipu0.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CORE1) += clk-mt8183-ipu1.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_ADL) += clk-mt8183-ipu_adl.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CONN) += clk-mt8183-ipu_conn.o
+obj-$(CONFIG_COMMON_CLK_MT8183_MFGCFG) += clk-mt8183-mfgcfg.o
+obj-$(CONFIG_COMMON_CLK_MT8183_MMSYS) += clk-mt8183-mm.o
+obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 9628d4e7690b..85daf826619a 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -169,11 +169,10 @@ struct clk *mtk_clk_register_gate(
return ERR_PTR(-ENOMEM);
init.name = name;
- init.flags = CLK_SET_RATE_PARENT;
+ init.flags = flags | CLK_SET_RATE_PARENT;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
init.ops = ops;
- init.flags = flags;
cg->regmap = regmap;
cg->set_ofs = set_ofs;
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 9f766dfe1d57..ab240163f9f8 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -50,4 +50,18 @@ struct clk *mtk_clk_register_gate(
const struct clk_ops *ops,
unsigned long flags);
+#define GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, \
+ _ops, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = _regs, \
+ .shift = _shift, \
+ .ops = _ops, \
+ .flags = _flags, \
+ }
+
+#define GATE_MTK(_id, _name, _parent, _regs, _shift, _ops) \
+ GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, _ops, 0)
+
#endif /* __DRV_CLK_GATE_H */
diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
new file mode 100644
index 000000000000..c87450180b7b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-audio.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x4,
+};
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr)
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate audio_clks[] = {
+ /* AUDIO0 */
+ GATE_AUDIO0(CLK_AUDIO_AFE, "aud_afe", "audio_sel",
+ 2),
+ GATE_AUDIO0(CLK_AUDIO_22M, "aud_22m", "aud_eng1_sel",
+ 8),
+ GATE_AUDIO0(CLK_AUDIO_24M, "aud_24m", "aud_eng2_sel",
+ 9),
+ GATE_AUDIO0(CLK_AUDIO_APLL2_TUNER, "aud_apll2_tuner", "aud_eng2_sel",
+ 18),
+ GATE_AUDIO0(CLK_AUDIO_APLL_TUNER, "aud_apll_tuner", "aud_eng1_sel",
+ 19),
+ GATE_AUDIO0(CLK_AUDIO_TDM, "aud_tdm", "apll12_divb",
+ 20),
+ GATE_AUDIO0(CLK_AUDIO_ADC, "aud_adc", "audio_sel",
+ 24),
+ GATE_AUDIO0(CLK_AUDIO_DAC, "aud_dac", "audio_sel",
+ 25),
+ GATE_AUDIO0(CLK_AUDIO_DAC_PREDIS, "aud_dac_predis", "audio_sel",
+ 26),
+ GATE_AUDIO0(CLK_AUDIO_TML, "aud_tml", "audio_sel",
+ 27),
+ /* AUDIO1 */
+ GATE_AUDIO1(CLK_AUDIO_I2S1, "aud_i2s1", "audio_sel",
+ 4),
+ GATE_AUDIO1(CLK_AUDIO_I2S2, "aud_i2s2", "audio_sel",
+ 5),
+ GATE_AUDIO1(CLK_AUDIO_I2S3, "aud_i2s3", "audio_sel",
+ 6),
+ GATE_AUDIO1(CLK_AUDIO_I2S4, "aud_i2s4", "audio_sel",
+ 7),
+ GATE_AUDIO1(CLK_AUDIO_PDN_ADDA6_ADC, "aud_pdn_adda6_adc", "audio_sel",
+ 20),
+};
+
+static int clk_mt8183_audio_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+
+ mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ return r;
+
+ r = devm_of_platform_populate(&pdev->dev);
+ if (r)
+ of_clk_del_provider(node);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt8183_audio[] = {
+ { .compatible = "mediatek,mt8183-audiosys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_audio_drv = {
+ .probe = clk_mt8183_audio_probe,
+ .driver = {
+ .name = "clk-mt8183-audio",
+ .of_match_table = of_match_clk_mt8183_audio,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_audio_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
new file mode 100644
index 000000000000..8643802c4471
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB6, "cam_larb6", "cam_sel", 0),
+ GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "cam_sel", 1),
+ GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "cam_sel", 2),
+ GATE_CAM(CLK_CAM_CAM, "cam_cam", "cam_sel", 6),
+ GATE_CAM(CLK_CAM_CAMTG, "cam_camtg", "cam_sel", 7),
+ GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "cam_sel", 8),
+ GATE_CAM(CLK_CAM_CAMSV0, "cam_camsv0", "cam_sel", 9),
+ GATE_CAM(CLK_CAM_CAMSV1, "cam_camsv1", "cam_sel", 10),
+ GATE_CAM(CLK_CAM_CAMSV2, "cam_camsv2", "cam_sel", 11),
+ GATE_CAM(CLK_CAM_CCU, "cam_ccu", "cam_sel", 12),
+};
+
+static int clk_mt8183_cam_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
+
+ mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_cam[] = {
+ { .compatible = "mediatek,mt8183-camsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_cam_drv = {
+ .probe = clk_mt8183_cam_probe,
+ .driver = {
+ .name = "clk-mt8183-cam",
+ .of_match_table = of_match_clk_mt8183_cam,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_cam_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
new file mode 100644
index 000000000000..470d676a4a10
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_LARB5, "img_larb5", "img_sel", 0),
+ GATE_IMG(CLK_IMG_LARB2, "img_larb2", "img_sel", 1),
+ GATE_IMG(CLK_IMG_DIP, "img_dip", "img_sel", 2),
+ GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "img_sel", 3),
+ GATE_IMG(CLK_IMG_DPE, "img_dpe", "img_sel", 4),
+ GATE_IMG(CLK_IMG_RSC, "img_rsc", "img_sel", 5),
+ GATE_IMG(CLK_IMG_MFB, "img_mfb", "img_sel", 6),
+ GATE_IMG(CLK_IMG_WPE_A, "img_wpe_a", "img_sel", 7),
+ GATE_IMG(CLK_IMG_WPE_B, "img_wpe_b", "img_sel", 8),
+ GATE_IMG(CLK_IMG_OWE, "img_owe", "img_sel", 9),
+};
+
+static int clk_mt8183_img_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_img[] = {
+ { .compatible = "mediatek,mt8183-imgsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_img_drv = {
+ .probe = clk_mt8183_img_probe,
+ .driver = {
+ .name = "clk-mt8183-img",
+ .of_match_table = of_match_clk_mt8183_img,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
new file mode 100644
index 000000000000..c5cb76fc9e5e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_core0_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IPU_CORE0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_core0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ipu_core0_clks[] = {
+ GATE_IPU_CORE0(CLK_IPU_CORE0_JTAG, "ipu_core0_jtag", "dsp_sel", 0),
+ GATE_IPU_CORE0(CLK_IPU_CORE0_AXI, "ipu_core0_axi", "dsp_sel", 1),
+ GATE_IPU_CORE0(CLK_IPU_CORE0_IPU, "ipu_core0_ipu", "dsp_sel", 2),
+};
+
+static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IPU_CORE0_NR_CLK);
+
+ mtk_clk_register_gates(node, ipu_core0_clks, ARRAY_SIZE(ipu_core0_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_core0[] = {
+ { .compatible = "mediatek,mt8183-ipu_core0", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_ipu_core0_drv = {
+ .probe = clk_mt8183_ipu_core0_probe,
+ .driver = {
+ .name = "clk-mt8183-ipu_core0",
+ .of_match_table = of_match_clk_mt8183_ipu_core0,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_ipu_core0_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
new file mode 100644
index 000000000000..8fd5fe002890
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_core1_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IPU_CORE1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_core1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ipu_core1_clks[] = {
+ GATE_IPU_CORE1(CLK_IPU_CORE1_JTAG, "ipu_core1_jtag", "dsp_sel", 0),
+ GATE_IPU_CORE1(CLK_IPU_CORE1_AXI, "ipu_core1_axi", "dsp_sel", 1),
+ GATE_IPU_CORE1(CLK_IPU_CORE1_IPU, "ipu_core1_ipu", "dsp_sel", 2),
+};
+
+static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IPU_CORE1_NR_CLK);
+
+ mtk_clk_register_gates(node, ipu_core1_clks, ARRAY_SIZE(ipu_core1_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_core1[] = {
+ { .compatible = "mediatek,mt8183-ipu_core1", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_ipu_core1_drv = {
+ .probe = clk_mt8183_ipu_core1_probe,
+ .driver = {
+ .name = "clk-mt8183-ipu_core1",
+ .of_match_table = of_match_clk_mt8183_ipu_core1,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_ipu_core1_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
new file mode 100644
index 000000000000..3f37d0ef1df1
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_adl_cg_regs = {
+ .set_ofs = 0x204,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x204,
+};
+
+#define GATE_IPU_ADL_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_adl_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate ipu_adl_clks[] = {
+ GATE_IPU_ADL_I(CLK_IPU_ADL_CABGEN, "ipu_adl_cabgen", "dsp_sel", 24),
+};
+
+static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IPU_ADL_NR_CLK);
+
+ mtk_clk_register_gates(node, ipu_adl_clks, ARRAY_SIZE(ipu_adl_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_adl[] = {
+ { .compatible = "mediatek,mt8183-ipu_adl", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_ipu_adl_drv = {
+ .probe = clk_mt8183_ipu_adl_probe,
+ .driver = {
+ .name = "clk-mt8183-ipu_adl",
+ .of_match_table = of_match_clk_mt8183_ipu_adl,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_ipu_adl_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
new file mode 100644
index 000000000000..7e0eef79c461
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_conn_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs ipu_conn_apb_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs ipu_conn_axi_cg_regs = {
+ .set_ofs = 0x18,
+ .clr_ofs = 0x18,
+ .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs ipu_conn_axi1_cg_regs = {
+ .set_ofs = 0x1c,
+ .clr_ofs = 0x1c,
+ .sta_ofs = 0x1c,
+};
+
+static const struct mtk_gate_regs ipu_conn_axi2_cg_regs = {
+ .set_ofs = 0x20,
+ .clr_ofs = 0x20,
+ .sta_ofs = 0x20,
+};
+
+#define GATE_IPU_CONN(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_IPU_CONN_APB(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_apb_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr)
+
+#define GATE_IPU_CONN_AXI_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_axi_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_IPU_CONN_AXI1_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_axi1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_IPU_CONN_AXI2_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_axi2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate ipu_conn_clks[] = {
+ GATE_IPU_CONN(CLK_IPU_CONN_IPU,
+ "ipu_conn_ipu", "dsp_sel", 0),
+ GATE_IPU_CONN(CLK_IPU_CONN_AHB,
+ "ipu_conn_ahb", "dsp_sel", 1),
+ GATE_IPU_CONN(CLK_IPU_CONN_AXI,
+ "ipu_conn_axi", "dsp_sel", 2),
+ GATE_IPU_CONN(CLK_IPU_CONN_ISP,
+ "ipu_conn_isp", "dsp_sel", 3),
+ GATE_IPU_CONN(CLK_IPU_CONN_CAM_ADL,
+ "ipu_conn_cam_adl", "dsp_sel", 4),
+ GATE_IPU_CONN(CLK_IPU_CONN_IMG_ADL,
+ "ipu_conn_img_adl", "dsp_sel", 5),
+ GATE_IPU_CONN_APB(CLK_IPU_CONN_DAP_RX,
+ "ipu_conn_dap_rx", "dsp1_sel", 0),
+ GATE_IPU_CONN_APB(CLK_IPU_CONN_APB2AXI,
+ "ipu_conn_apb2axi", "dsp1_sel", 3),
+ GATE_IPU_CONN_APB(CLK_IPU_CONN_APB2AHB,
+ "ipu_conn_apb2ahb", "dsp1_sel", 20),
+ GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU_CAB1TO2,
+ "ipu_conn_ipu_cab1to2", "dsp1_sel", 6),
+ GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU1_CAB1TO2,
+ "ipu_conn_ipu1_cab1to2", "dsp1_sel", 13),
+ GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU2_CAB1TO2,
+ "ipu_conn_ipu2_cab1to2", "dsp1_sel", 20),
+ GATE_IPU_CONN_AXI1_I(CLK_IPU_CONN_CAB3TO3,
+ "ipu_conn_cab3to3", "dsp1_sel", 0),
+ GATE_IPU_CONN_AXI2_I(CLK_IPU_CONN_CAB2TO1,
+ "ipu_conn_cab2to1", "dsp1_sel", 14),
+ GATE_IPU_CONN_AXI2_I(CLK_IPU_CONN_CAB3TO1_SLICE,
+ "ipu_conn_cab3to1_slice", "dsp1_sel", 17),
+};
+
+static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IPU_CONN_NR_CLK);
+
+ mtk_clk_register_gates(node, ipu_conn_clks, ARRAY_SIZE(ipu_conn_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_conn[] = {
+ { .compatible = "mediatek,mt8183-ipu_conn", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_ipu_conn_drv = {
+ .probe = clk_mt8183_ipu_conn_probe,
+ .driver = {
+ .name = "clk-mt8183-ipu_conn",
+ .of_match_table = of_match_clk_mt8183_ipu_conn,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_ipu_conn_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
new file mode 100644
index 000000000000..99a6b020833e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0)
+};
+
+static int clk_mt8183_mfg_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
+
+ mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_mfg[] = {
+ { .compatible = "mediatek,mt8183-mfgcfg", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_mfg_drv = {
+ .probe = clk_mt8183_mfg_probe,
+ .driver = {
+ .name = "clk-mt8183-mfg",
+ .of_match_table = of_match_clk_mt8183_mfg,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
new file mode 100644
index 000000000000..720c696b506d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_SMI_LARB1, "mm_smi_larb1", "mm_sel", 2),
+ GATE_MM0(CLK_MM_GALS_COMM0, "mm_gals_comm0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_GALS_COMM1, "mm_gals_comm1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_GALS_CCU2MM, "mm_gals_ccu2mm", "mm_sel", 5),
+ GATE_MM0(CLK_MM_GALS_IPU12MM, "mm_gals_ipu12mm", "mm_sel", 6),
+ GATE_MM0(CLK_MM_GALS_IMG2MM, "mm_gals_img2mm", "mm_sel", 7),
+ GATE_MM0(CLK_MM_GALS_CAM2MM, "mm_gals_cam2mm", "mm_sel", 8),
+ GATE_MM0(CLK_MM_GALS_IPU2MM, "mm_gals_ipu2mm", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_DL_TXCK, "mm_mdp_dl_txck", "mm_sel", 10),
+ GATE_MM0(CLK_MM_IPU_DL_TXCK, "mm_ipu_dl_txck", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 15),
+ GATE_MM0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "mm_sel", 16),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 17),
+ GATE_MM0(CLK_MM_MDP_WDMA0, "mm_mdp_wdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_OVL1_2L, "mm_disp_ovl1_2l", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_sel", 29),
+ GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DISP_SPLIT, "mm_disp_split", "mm_sel", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DSI0_MM, "mm_dsi0_mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DSI0_IF, "mm_dsi0_if", "mm_sel", 1),
+ GATE_MM1(CLK_MM_DPI_MM, "mm_dpi_mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DPI_IF, "mm_dpi_if", "dpi0_sel", 3),
+ GATE_MM1(CLK_MM_FAKE_ENG2, "mm_fake_eng2", "mm_sel", 4),
+ GATE_MM1(CLK_MM_MDP_DL_RX, "mm_mdp_dl_rx", "mm_sel", 5),
+ GATE_MM1(CLK_MM_IPU_DL_RX, "mm_ipu_dl_rx", "mm_sel", 6),
+ GATE_MM1(CLK_MM_26M, "mm_26m", "f_f26m_ck", 7),
+ GATE_MM1(CLK_MM_MMSYS_R2Y, "mm_mmsys_r2y", "mm_sel", 8),
+ GATE_MM1(CLK_MM_DISP_RSZ, "mm_disp_rsz", "mm_sel", 9),
+ GATE_MM1(CLK_MM_MDP_AAL, "mm_mdp_aal", "mm_sel", 10),
+ GATE_MM1(CLK_MM_MDP_CCORR, "mm_mdp_ccorr", "mm_sel", 11),
+ GATE_MM1(CLK_MM_DBI_MM, "mm_dbi_mm", "mm_sel", 12),
+ GATE_MM1(CLK_MM_DBI_IF, "mm_dbi_if", "dpi0_sel", 13),
+};
+
+static int clk_mt8183_mm_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+ mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_mm[] = {
+ { .compatible = "mediatek,mt8183-mmsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_mm_drv = {
+ .probe = clk_mt8183_mm_probe,
+ .driver = {
+ .name = "clk-mt8183-mm",
+ .of_match_table = of_match_clk_mt8183_mm,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
new file mode 100644
index 000000000000..6250fd1e0edc
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0_I(CLK_VDEC_VDEC, "vdec_vdec", "mm_sel", 0),
+ /* VDEC1 */
+ GATE_VDEC1_I(CLK_VDEC_LARB1, "vdec_larb1", "mm_sel", 0),
+};
+
+static int clk_mt8183_vdec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+
+ mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_vdec[] = {
+ { .compatible = "mediatek,mt8183-vdecsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_vdec_drv = {
+ .probe = clk_mt8183_vdec_probe,
+ .driver = {
+ .name = "clk-mt8183-vdec",
+ .of_match_table = of_match_clk_mt8183_vdec,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
new file mode 100644
index 000000000000..6678ef03fab2
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC_I(CLK_VENC_LARB, "venc_larb",
+ "mm_sel", 0),
+ GATE_VENC_I(CLK_VENC_VENC, "venc_venc",
+ "mm_sel", 4),
+ GATE_VENC_I(CLK_VENC_JPGENC, "venc_jpgenc",
+ "mm_sel", 8),
+};
+
+static int clk_mt8183_venc_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+
+ mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_venc[] = {
+ { .compatible = "mediatek,mt8183-vencsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_venc_drv = {
+ .probe = clk_mt8183_venc_probe,
+ .driver = {
+ .name = "clk-mt8183-venc",
+ .of_match_table = of_match_clk_mt8183_venc,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
new file mode 100644
index 000000000000..9d8651033ae9
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -0,0 +1,1284 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static DEFINE_SPINLOCK(mt8183_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_CLK26M, "f_f26m_ck", "clk26m", 26000000),
+ FIXED_CLK(CLK_TOP_ULPOSC, "osc", NULL, 250000),
+ FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
+ 2),
+ FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
+ 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D2_D2, "syspll_d2_d2", "syspll_d2", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D2_D4, "syspll_d2_d4", "syspll_d2", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D2_D8, "syspll_d2_d8", "syspll_d2", 1,
+ 8),
+ FACTOR(CLK_TOP_SYSPLL_D2_D16, "syspll_d2_d16", "syspll_d2", 1,
+ 16),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1,
+ 3),
+ FACTOR(CLK_TOP_SYSPLL_D3_D2, "syspll_d3_d2", "syspll_d3", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D3_D4, "syspll_d3_d4", "syspll_d3", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D3_D8, "syspll_d3_d8", "syspll_d3", 1,
+ 8),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1,
+ 5),
+ FACTOR(CLK_TOP_SYSPLL_D5_D2, "syspll_d5_d2", "syspll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D5_D4, "syspll_d5_d4", "syspll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1,
+ 7),
+ FACTOR(CLK_TOP_SYSPLL_D7_D2, "syspll_d7_d2", "syspll_d7", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D7_D4, "syspll_d7_d4", "syspll_d7", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL_CK, "univpll_ck", "univpll", 1,
+ 1),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2_D2, "univpll_d2_d2", "univpll_d2", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2_D4, "univpll_d2_d4", "univpll_d2", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL_D2_D8, "univpll_d2_d8", "univpll_d2", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1,
+ 3),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D2, "univpll_d3_d2", "univpll_d3", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D4, "univpll_d3_d4", "univpll_d3", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D8, "univpll_d3_d8", "univpll_d3", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1,
+ 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1,
+ 7),
+ FACTOR(CLK_TOP_UNIVP_192M_CK, "univ_192m_ck", "univpll_192m", 1,
+ 1),
+ FACTOR(CLK_TOP_UNIVP_192M_D2, "univ_192m_d2", "univ_192m_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVP_192M_D4, "univ_192m_d4", "univ_192m_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVP_192M_D8, "univ_192m_d8", "univ_192m_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVP_192M_D16, "univ_192m_d16", "univ_192m_ck", 1,
+ 16),
+ FACTOR(CLK_TOP_UNIVP_192M_D32, "univ_192m_d32", "univ_192m_ck", 1,
+ 32),
+ FACTOR(CLK_TOP_APLL1_CK, "apll1_ck", "apll1", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1", 1,
+ 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1,
+ 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1", 1,
+ 8),
+ FACTOR(CLK_TOP_APLL2_CK, "apll2_ck", "apll2", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2", 1,
+ 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1,
+ 4),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2", 1,
+ 8),
+ FACTOR(CLK_TOP_TVDPLL_CK, "tvdpll_ck", "tvdpll", 1,
+ 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1,
+ 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll", 1,
+ 8),
+ FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll", 1,
+ 16),
+ FACTOR(CLK_TOP_MMPLL_CK, "mmpll_ck", "mmpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1,
+ 4),
+ FACTOR(CLK_TOP_MMPLL_D4_D2, "mmpll_d4_d2", "mmpll_d4", 1,
+ 2),
+ FACTOR(CLK_TOP_MMPLL_D4_D4, "mmpll_d4_d4", "mmpll_d4", 1,
+ 4),
+ FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1,
+ 5),
+ FACTOR(CLK_TOP_MMPLL_D5_D2, "mmpll_d5_d2", "mmpll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_MMPLL_D5_D4, "mmpll_d5_d4", "mmpll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll", 1,
+ 6),
+ FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1,
+ 7),
+ FACTOR(CLK_TOP_MFGPLL_CK, "mfgpll_ck", "mfgpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MSDCPLL_CK, "msdcpll_ck", "msdcpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1,
+ 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1,
+ 4),
+ FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1,
+ 8),
+ FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1,
+ 16),
+ FACTOR(CLK_TOP_AD_OSC_CK, "ad_osc_ck", "osc", 1,
+ 1),
+ FACTOR(CLK_TOP_OSC_D2, "osc_d2", "osc", 1,
+ 2),
+ FACTOR(CLK_TOP_OSC_D4, "osc_d4", "osc", 1,
+ 4),
+ FACTOR(CLK_TOP_OSC_D8, "osc_d8", "osc", 1,
+ 8),
+ FACTOR(CLK_TOP_OSC_D16, "osc_d16", "osc", 1,
+ 16),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D16, "univpll_d3_d16", "univpll_d3", 1,
+ 16),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll_d2_d4",
+ "syspll_d7",
+ "osc_d4"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "mmpll_d7",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const img_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "syspll_d2",
+ "mmpll_d6",
+ "syspll_d3",
+ "mmpll_d7",
+ "univpll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "syspll_d3_d2",
+ "univpll_d3_d2"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const dsp1_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const dsp2_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const ipu_if_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mfgpll_ck",
+ "univpll_d3",
+ "syspll_d3"
+};
+
+static const char * const f52m_mfg_parents[] = {
+ "clk26m",
+ "univpll_d3_d2",
+ "univpll_d3_d4",
+ "univpll_d3_d8"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univ_192m_d8",
+ "univpll_d3_d8",
+ "univ_192m_d4",
+ "univpll_d3_d16",
+ "csw_f26m_ck_d2",
+ "univ_192m_d16",
+ "univ_192m_d32"
+};
+
+static const char * const camtg2_parents[] = {
+ "clk26m",
+ "univ_192m_d8",
+ "univpll_d3_d8",
+ "univ_192m_d4",
+ "univpll_d3_d16",
+ "csw_f26m_ck_d2",
+ "univ_192m_d16",
+ "univ_192m_d32"
+};
+
+static const char * const camtg3_parents[] = {
+ "clk26m",
+ "univ_192m_d8",
+ "univpll_d3_d8",
+ "univ_192m_d4",
+ "univpll_d3_d16",
+ "csw_f26m_ck_d2",
+ "univ_192m_d16",
+ "univ_192m_d32"
+};
+
+static const char * const camtg4_parents[] = {
+ "clk26m",
+ "univ_192m_d8",
+ "univpll_d3_d8",
+ "univ_192m_d4",
+ "univpll_d3_d16",
+ "csw_f26m_ck_d2",
+ "univ_192m_d16",
+ "univ_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d3_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "syspll_d5_d2",
+ "syspll_d3_d4",
+ "msdcpll_d4"
+};
+
+static const char * const msdc50_hclk_parents[] = {
+ "clk26m",
+ "syspll_d2_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "univpll_d2_d4",
+ "syspll_d3_d2",
+ "univpll_d2_d2"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "univpll_d3_d2",
+ "syspll_d3_d2",
+ "syspll_d7",
+ "msdcpll_d2"
+};
+
+static const char * const msdc30_2_parents[] = {
+ "clk26m",
+ "univpll_d3_d2",
+ "syspll_d3_d2",
+ "syspll_d7",
+ "msdcpll_d2"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll_d5_d4",
+ "syspll_d7_d4",
+ "syspll_d2_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll_d2_d4",
+ "syspll_d7_d2"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clk26m",
+ "syspll_d2_d8",
+ "osc_d8"
+};
+
+static const char * const fpwrap_ulposc_parents[] = {
+ "clk26m",
+ "osc_d16",
+ "osc_d4",
+ "osc_d8"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "syspll_d2_d2",
+ "syspll_d5"
+};
+
+static const char * const sspm_parents[] = {
+ "clk26m",
+ "univpll_d2_d4",
+ "syspll_d2_d2",
+ "univpll_d2_d2",
+ "syspll_d3"
+};
+
+static const char * const dpi0_parents[] = {
+ "clk26m",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "tvdpll_d8",
+ "tvdpll_d16",
+ "univpll_d5_d2",
+ "univpll_d3_d4",
+ "syspll_d3_d4",
+ "univpll_d3_d8"
+};
+
+static const char * const scam_parents[] = {
+ "clk26m",
+ "syspll_d5_d2"
+};
+
+static const char * const disppwm_parents[] = {
+ "clk26m",
+ "univpll_d3_d4",
+ "osc_d2",
+ "osc_d4",
+ "osc_d16"
+};
+
+static const char * const usb_top_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d3_d4",
+ "univpll_d5_d2"
+};
+
+
+static const char * const ssusb_top_xhci_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d3_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "syspll_d2_d8"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "syspll_d2_d8",
+ "univpll_d5_d2"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "univpll_d2_d8",
+ "syspll_d5",
+ "syspll_d2_d2",
+ "univpll_d2_d2",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "univpll_d2_d2",
+ "univpll_d3_d2",
+ "univpll_d2_d4"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "syspll_d2_d2",
+ "syspll_d2_d4",
+ "syspll_d2_d8"
+};
+
+static const char * const aud_engen1_parents[] = {
+ "clk26m",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const aud_engen2_parents[] = {
+ "clk26m",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8"
+};
+
+static const char * const faes_ufsfde_parents[] = {
+ "clk26m",
+ "syspll_d2",
+ "syspll_d2_d2",
+ "syspll_d3",
+ "syspll_d2_d4",
+ "univpll_d3"
+};
+
+static const char * const fufs_parents[] = {
+ "clk26m",
+ "syspll_d2_d4",
+ "syspll_d2_d8",
+ "syspll_d2_d16"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck"
+};
+
+/*
+ * CRITICAL CLOCK:
+ * axi_sel is the main bus clock of whole SOC.
+ * spm_sel is the clock of the always-on co-processor.
+ */
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_AXI, "axi_sel",
+ axi_parents, 0x40,
+ 0x44, 0x48, 0, 2, 7, 0x004, 0, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MM, "mm_sel",
+ mm_parents, 0x40,
+ 0x44, 0x48, 8, 3, 15, 0x004, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IMG, "img_sel",
+ img_parents, 0x40,
+ 0x44, 0x48, 16, 3, 23, 0x004, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAM, "cam_sel",
+ cam_parents, 0x40,
+ 0x44, 0x48, 24, 4, 31, 0x004, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP, "dsp_sel",
+ dsp_parents, 0x50,
+ 0x54, 0x58, 0, 4, 7, 0x004, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP1, "dsp1_sel",
+ dsp1_parents, 0x50,
+ 0x54, 0x58, 8, 4, 15, 0x004, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP2, "dsp2_sel",
+ dsp2_parents, 0x50,
+ 0x54, 0x58, 16, 4, 23, 0x004, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IPU_IF, "ipu_if_sel",
+ ipu_if_parents, 0x50,
+ 0x54, 0x58, 24, 4, 31, 0x004, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MFG, "mfg_sel",
+ mfg_parents, 0x60,
+ 0x64, 0x68, 0, 2, 7, 0x004, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_F52M_MFG, "f52m_mfg_sel",
+ f52m_mfg_parents, 0x60,
+ 0x64, 0x68, 8, 2, 15, 0x004, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG, "camtg_sel",
+ camtg_parents, 0x60,
+ 0x64, 0x68, 16, 3, 23, 0x004, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG2, "camtg2_sel",
+ camtg2_parents, 0x60,
+ 0x64, 0x68, 24, 3, 31, 0x004, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG3, "camtg3_sel",
+ camtg3_parents, 0x70,
+ 0x74, 0x78, 0, 3, 7, 0x004, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG4, "camtg4_sel",
+ camtg4_parents, 0x70,
+ 0x74, 0x78, 8, 3, 15, 0x004, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_UART, "uart_sel",
+ uart_parents, 0x70,
+ 0x74, 0x78, 16, 1, 23, 0x004, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SPI, "spi_sel",
+ spi_parents, 0x70,
+ 0x74, 0x78, 24, 2, 31, 0x004, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0_HCLK, "msdc50_hclk_sel",
+ msdc50_hclk_parents, 0x80,
+ 0x84, 0x88, 0, 2, 7, 0x004, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0, "msdc50_0_sel",
+ msdc50_0_parents, 0x80,
+ 0x84, 0x88, 8, 3, 15, 0x004, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_1, "msdc30_1_sel",
+ msdc30_1_parents, 0x80,
+ 0x84, 0x88, 16, 3, 23, 0x004, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_2, "msdc30_2_sel",
+ msdc30_2_parents, 0x80,
+ 0x84, 0x88, 24, 3, 31, 0x004, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUDIO, "audio_sel",
+ audio_parents, 0x90,
+ 0x94, 0x98, 0, 2, 7, 0x004, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_INTBUS, "aud_intbus_sel",
+ aud_intbus_parents, 0x90,
+ 0x94, 0x98, 8, 2, 15, 0x004, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_PMICSPI, "pmicspi_sel",
+ pmicspi_parents, 0x90,
+ 0x94, 0x98, 16, 2, 23, 0x004, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FPWRAP_ULPOSC, "fpwrap_ulposc_sel",
+ fpwrap_ulposc_parents, 0x90,
+ 0x94, 0x98, 24, 2, 31, 0x004, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel",
+ atb_parents, 0xa0,
+ 0xa4, 0xa8, 0, 2, 7, 0x004, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSPM, "sspm_sel",
+ sspm_parents, 0xa0,
+ 0xa4, 0xa8, 8, 3, 15, 0x004, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel",
+ dpi0_parents, 0xa0,
+ 0xa4, 0xa8, 16, 4, 23, 0x004, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCAM, "scam_sel",
+ scam_parents, 0xa0,
+ 0xa4, 0xa8, 24, 1, 31, 0x004, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DISP_PWM, "disppwm_sel",
+ disppwm_parents, 0xb0,
+ 0xb4, 0xb8, 0, 3, 7, 0x004, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_USB_TOP, "usb_top_sel",
+ usb_top_parents, 0xb0,
+ 0xb4, 0xb8, 8, 2, 15, 0x004, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSUSB_TOP_XHCI, "ssusb_top_xhci_sel",
+ ssusb_top_xhci_parents, 0xb0,
+ 0xb4, 0xb8, 16, 2, 23, 0x004, 30),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_SPM, "spm_sel",
+ spm_parents, 0xb0,
+ 0xb4, 0xb8, 24, 1, 31, 0x008, 0, CLK_IS_CRITICAL),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_I2C, "i2c_sel",
+ i2c_parents, 0xc0,
+ 0xc4, 0xc8, 0, 2, 7, 0x008, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCP, "scp_sel",
+ scp_parents, 0xc0,
+ 0xc4, 0xc8, 8, 3, 15, 0x008, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SENINF, "seninf_sel",
+ seninf_parents, 0xc0,
+ 0xc4, 0xc8, 16, 2, 23, 0x008, 3),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DXCC, "dxcc_sel",
+ dxcc_parents, 0xc0,
+ 0xc4, 0xc8, 24, 2, 31, 0x008, 4),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG1, "aud_eng1_sel",
+ aud_engen1_parents, 0xd0,
+ 0xd4, 0xd8, 0, 2, 7, 0x008, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG2, "aud_eng2_sel",
+ aud_engen2_parents, 0xd0,
+ 0xd4, 0xd8, 8, 2, 15, 0x008, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FAES_UFSFDE, "faes_ufsfde_sel",
+ faes_ufsfde_parents, 0xd0,
+ 0xd4, 0xd8, 16, 3, 23, 0x008, 7),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FUFS, "fufs_sel",
+ fufs_parents, 0xd0,
+ 0xd4, 0xd8, 24, 2, 31, 0x008, 8),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_1, "aud_1_sel",
+ aud_1_parents, 0xe0,
+ 0xe4, 0xe8, 0, 1, 7, 0x008, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_2, "aud_2_sel",
+ aud_2_parents, 0xe0,
+ 0xe4, 0xe8, 8, 1, 15, 0x008, 10),
+};
+
+static const char * const apll_i2s0_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s1_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s2_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s3_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s4_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s5_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static struct mtk_composite top_aud_muxes[] = {
+ MUX(CLK_TOP_MUX_APLL_I2S0, "apll_i2s0_sel", apll_i2s0_parents,
+ 0x320, 8, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S1, "apll_i2s1_sel", apll_i2s1_parents,
+ 0x320, 9, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S2, "apll_i2s2_sel", apll_i2s2_parents,
+ 0x320, 10, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S3, "apll_i2s3_sel", apll_i2s3_parents,
+ 0x320, 11, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S4, "apll_i2s4_sel", apll_i2s4_parents,
+ 0x320, 12, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S5, "apll_i2s5_sel", apll_i2s5_parents,
+ 0x328, 20, 1),
+};
+
+static const char * const mcu_mp0_parents[] = {
+ "clk26m",
+ "armpll_ll",
+ "armpll_div_pll1",
+ "armpll_div_pll2"
+};
+
+static const char * const mcu_mp2_parents[] = {
+ "clk26m",
+ "armpll_l",
+ "armpll_div_pll1",
+ "armpll_div_pll2"
+};
+
+static const char * const mcu_bus_parents[] = {
+ "clk26m",
+ "ccipll",
+ "armpll_div_pll1",
+ "armpll_div_pll2"
+};
+
+static struct mtk_composite mcu_muxes[] = {
+ /* mp0_pll_divider_cfg */
+ MUX(CLK_MCU_MP0_SEL, "mcu_mp0_sel", mcu_mp0_parents, 0x7A0, 9, 2),
+ /* mp2_pll_divider_cfg */
+ MUX(CLK_MCU_MP2_SEL, "mcu_mp2_sel", mcu_mp2_parents, 0x7A8, 9, 2),
+ /* bus_pll_divider_cfg */
+ MUX(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0, 9, 2),
+};
+
+static struct mtk_composite top_aud_divs[] = {
+ DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll_i2s0_sel",
+ 0x320, 2, 0x324, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll_i2s1_sel",
+ 0x320, 3, 0x324, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll_i2s2_sel",
+ 0x320, 4, 0x324, 8, 16),
+ DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll_i2s3_sel",
+ 0x320, 5, 0x324, 8, 24),
+ DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll_i2s4_sel",
+ 0x320, 6, 0x328, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIVB, "apll12_divb", "apll12_div4",
+ 0x320, 7, 0x328, 8, 8),
+};
+
+static const struct mtk_gate_regs top_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x104,
+ .sta_ofs = 0x104,
+};
+
+#define GATE_TOP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate top_clks[] = {
+ /* TOP */
+ GATE_TOP(CLK_TOP_ARMPLL_DIV_PLL1, "armpll_div_pll1", "mainpll", 4),
+ GATE_TOP(CLK_TOP_ARMPLL_DIV_PLL2, "armpll_div_pll2", "univpll", 5),
+};
+
+static const struct mtk_gate_regs infra0_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs infra1_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs infra2_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs infra3_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+#define GATE_INFRA0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra3_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate infra_clks[] = {
+ /* INFRA0 */
+ GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr",
+ "axi_sel", 0),
+ GATE_INFRA0(CLK_INFRA_PMIC_AP, "infra_pmic_ap",
+ "axi_sel", 1),
+ GATE_INFRA0(CLK_INFRA_PMIC_MD, "infra_pmic_md",
+ "axi_sel", 2),
+ GATE_INFRA0(CLK_INFRA_PMIC_CONN, "infra_pmic_conn",
+ "axi_sel", 3),
+ GATE_INFRA0(CLK_INFRA_SCPSYS, "infra_scp",
+ "scp_sel", 4),
+ GATE_INFRA0(CLK_INFRA_SEJ, "infra_sej",
+ "f_f26m_ck", 5),
+ GATE_INFRA0(CLK_INFRA_APXGPT, "infra_apxgpt",
+ "axi_sel", 6),
+ GATE_INFRA0(CLK_INFRA_ICUSB, "infra_icusb",
+ "axi_sel", 8),
+ GATE_INFRA0(CLK_INFRA_GCE, "infra_gce",
+ "axi_sel", 9),
+ GATE_INFRA0(CLK_INFRA_THERM, "infra_therm",
+ "axi_sel", 10),
+ GATE_INFRA0(CLK_INFRA_I2C0, "infra_i2c0",
+ "i2c_sel", 11),
+ GATE_INFRA0(CLK_INFRA_I2C1, "infra_i2c1",
+ "i2c_sel", 12),
+ GATE_INFRA0(CLK_INFRA_I2C2, "infra_i2c2",
+ "i2c_sel", 13),
+ GATE_INFRA0(CLK_INFRA_I2C3, "infra_i2c3",
+ "i2c_sel", 14),
+ GATE_INFRA0(CLK_INFRA_PWM_HCLK, "infra_pwm_hclk",
+ "axi_sel", 15),
+ GATE_INFRA0(CLK_INFRA_PWM1, "infra_pwm1",
+ "i2c_sel", 16),
+ GATE_INFRA0(CLK_INFRA_PWM2, "infra_pwm2",
+ "i2c_sel", 17),
+ GATE_INFRA0(CLK_INFRA_PWM3, "infra_pwm3",
+ "i2c_sel", 18),
+ GATE_INFRA0(CLK_INFRA_PWM4, "infra_pwm4",
+ "i2c_sel", 19),
+ GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
+ "i2c_sel", 21),
+ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
+ "uart_sel", 22),
+ GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
+ "uart_sel", 23),
+ GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
+ "uart_sel", 24),
+ GATE_INFRA0(CLK_INFRA_UART3, "infra_uart3",
+ "uart_sel", 25),
+ GATE_INFRA0(CLK_INFRA_GCE_26M, "infra_gce_26m",
+ "axi_sel", 27),
+ GATE_INFRA0(CLK_INFRA_CQ_DMA_FPC, "infra_cqdma_fpc",
+ "axi_sel", 28),
+ GATE_INFRA0(CLK_INFRA_BTIF, "infra_btif",
+ "axi_sel", 31),
+ /* INFRA1 */
+ GATE_INFRA1(CLK_INFRA_SPI0, "infra_spi0",
+ "spi_sel", 1),
+ GATE_INFRA1(CLK_INFRA_MSDC0, "infra_msdc0",
+ "msdc50_hclk_sel", 2),
+ GATE_INFRA1(CLK_INFRA_MSDC1, "infra_msdc1",
+ "axi_sel", 4),
+ GATE_INFRA1(CLK_INFRA_MSDC2, "infra_msdc2",
+ "axi_sel", 5),
+ GATE_INFRA1(CLK_INFRA_MSDC0_SCK, "infra_msdc0_sck",
+ "msdc50_0_sel", 6),
+ GATE_INFRA1(CLK_INFRA_DVFSRC, "infra_dvfsrc",
+ "f_f26m_ck", 7),
+ GATE_INFRA1(CLK_INFRA_GCPU, "infra_gcpu",
+ "axi_sel", 8),
+ GATE_INFRA1(CLK_INFRA_TRNG, "infra_trng",
+ "axi_sel", 9),
+ GATE_INFRA1(CLK_INFRA_AUXADC, "infra_auxadc",
+ "f_f26m_ck", 10),
+ GATE_INFRA1(CLK_INFRA_CPUM, "infra_cpum",
+ "axi_sel", 11),
+ GATE_INFRA1(CLK_INFRA_CCIF1_AP, "infra_ccif1_ap",
+ "axi_sel", 12),
+ GATE_INFRA1(CLK_INFRA_CCIF1_MD, "infra_ccif1_md",
+ "axi_sel", 13),
+ GATE_INFRA1(CLK_INFRA_AUXADC_MD, "infra_auxadc_md",
+ "f_f26m_ck", 14),
+ GATE_INFRA1(CLK_INFRA_MSDC1_SCK, "infra_msdc1_sck",
+ "msdc30_1_sel", 16),
+ GATE_INFRA1(CLK_INFRA_MSDC2_SCK, "infra_msdc2_sck",
+ "msdc30_2_sel", 17),
+ GATE_INFRA1(CLK_INFRA_AP_DMA, "infra_apdma",
+ "axi_sel", 18),
+ GATE_INFRA1(CLK_INFRA_XIU, "infra_xiu",
+ "axi_sel", 19),
+ GATE_INFRA1(CLK_INFRA_DEVICE_APC, "infra_device_apc",
+ "axi_sel", 20),
+ GATE_INFRA1(CLK_INFRA_CCIF_AP, "infra_ccif_ap",
+ "axi_sel", 23),
+ GATE_INFRA1(CLK_INFRA_DEBUGSYS, "infra_debugsys",
+ "axi_sel", 24),
+ GATE_INFRA1(CLK_INFRA_AUDIO, "infra_audio",
+ "axi_sel", 25),
+ GATE_INFRA1(CLK_INFRA_CCIF_MD, "infra_ccif_md",
+ "axi_sel", 26),
+ GATE_INFRA1(CLK_INFRA_DXCC_SEC_CORE, "infra_dxcc_sec_core",
+ "dxcc_sel", 27),
+ GATE_INFRA1(CLK_INFRA_DXCC_AO, "infra_dxcc_ao",
+ "dxcc_sel", 28),
+ GATE_INFRA1(CLK_INFRA_DEVMPU_BCLK, "infra_devmpu_bclk",
+ "axi_sel", 30),
+ GATE_INFRA1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m",
+ "f_f26m_ck", 31),
+ /* INFRA2 */
+ GATE_INFRA2(CLK_INFRA_IRTX, "infra_irtx",
+ "f_f26m_ck", 0),
+ GATE_INFRA2(CLK_INFRA_USB, "infra_usb",
+ "usb_top_sel", 1),
+ GATE_INFRA2(CLK_INFRA_DISP_PWM, "infra_disppwm",
+ "axi_sel", 2),
+ GATE_INFRA2(CLK_INFRA_CLDMA_BCLK, "infra_cldma_bclk",
+ "axi_sel", 3),
+ GATE_INFRA2(CLK_INFRA_AUDIO_26M_BCLK, "infra_audio_26m_bclk",
+ "f_f26m_ck", 4),
+ GATE_INFRA2(CLK_INFRA_SPI1, "infra_spi1",
+ "spi_sel", 6),
+ GATE_INFRA2(CLK_INFRA_I2C4, "infra_i2c4",
+ "i2c_sel", 7),
+ GATE_INFRA2(CLK_INFRA_MODEM_TEMP_SHARE, "infra_md_tmp_share",
+ "f_f26m_ck", 8),
+ GATE_INFRA2(CLK_INFRA_SPI2, "infra_spi2",
+ "spi_sel", 9),
+ GATE_INFRA2(CLK_INFRA_SPI3, "infra_spi3",
+ "spi_sel", 10),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_SCK, "infra_unipro_sck",
+ "ssusb_top_xhci_sel", 11),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick",
+ "fufs_sel", 12),
+ GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck",
+ "fufs_sel", 13),
+ GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk",
+ "axi_sel", 14),
+ GATE_INFRA2(CLK_INFRA_SSPM, "infra_sspm",
+ "sspm_sel", 15),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist",
+ "axi_sel", 16),
+ GATE_INFRA2(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk",
+ "axi_sel", 17),
+ GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5",
+ "i2c_sel", 18),
+ GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter",
+ "i2c_sel", 19),
+ GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm",
+ "i2c_sel", 20),
+ GATE_INFRA2(CLK_INFRA_I2C1_ARBITER, "infra_i2c1_arbiter",
+ "i2c_sel", 21),
+ GATE_INFRA2(CLK_INFRA_I2C1_IMM, "infra_i2c1_imm",
+ "i2c_sel", 22),
+ GATE_INFRA2(CLK_INFRA_I2C2_ARBITER, "infra_i2c2_arbiter",
+ "i2c_sel", 23),
+ GATE_INFRA2(CLK_INFRA_I2C2_IMM, "infra_i2c2_imm",
+ "i2c_sel", 24),
+ GATE_INFRA2(CLK_INFRA_SPI4, "infra_spi4",
+ "spi_sel", 25),
+ GATE_INFRA2(CLK_INFRA_SPI5, "infra_spi5",
+ "spi_sel", 26),
+ GATE_INFRA2(CLK_INFRA_CQ_DMA, "infra_cqdma",
+ "axi_sel", 27),
+ GATE_INFRA2(CLK_INFRA_UFS, "infra_ufs",
+ "fufs_sel", 28),
+ GATE_INFRA2(CLK_INFRA_AES_UFSFDE, "infra_aes_ufsfde",
+ "faes_ufsfde_sel", 29),
+ GATE_INFRA2(CLK_INFRA_UFS_TICK, "infra_ufs_tick",
+ "fufs_sel", 30),
+ /* INFRA3 */
+ GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self",
+ "msdc50_0_sel", 0),
+ GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self",
+ "msdc50_0_sel", 1),
+ GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self",
+ "msdc50_0_sel", 2),
+ GATE_INFRA3(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self",
+ "f_f26m_ck", 3),
+ GATE_INFRA3(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self",
+ "f_f26m_ck", 4),
+ GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi",
+ "axi_sel", 5),
+ GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6",
+ "i2c_sel", 6),
+ GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0",
+ "msdc50_hclk_sel", 7),
+ GATE_INFRA3(CLK_INFRA_MD_MSDC0, "infra_md_msdc0",
+ "msdc50_hclk_sel", 8),
+ GATE_INFRA3(CLK_INFRA_CCIF2_AP, "infra_ccif2_ap",
+ "axi_sel", 16),
+ GATE_INFRA3(CLK_INFRA_CCIF2_MD, "infra_ccif2_md",
+ "axi_sel", 17),
+ GATE_INFRA3(CLK_INFRA_CCIF3_AP, "infra_ccif3_ap",
+ "axi_sel", 18),
+ GATE_INFRA3(CLK_INFRA_CCIF3_MD, "infra_ccif3_md",
+ "axi_sel", 19),
+ GATE_INFRA3(CLK_INFRA_SEJ_F13M, "infra_sej_f13m",
+ "f_f26m_ck", 20),
+ GATE_INFRA3(CLK_INFRA_AES_BCLK, "infra_aes_bclk",
+ "axi_sel", 21),
+ GATE_INFRA3(CLK_INFRA_I2C7, "infra_i2c7",
+ "i2c_sel", 22),
+ GATE_INFRA3(CLK_INFRA_I2C8, "infra_i2c8",
+ "i2c_sel", 23),
+ GATE_INFRA3(CLK_INFRA_FBIST2FPC, "infra_fbist2fpc",
+ "msdc50_0_sel", 24),
+};
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x20,
+ .clr_ofs = 0x20,
+ .sta_ofs = 0x20,
+};
+
+#define GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, \
+ _shift, &mtk_clk_gate_ops_no_setclr_inv, _flags)
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, 0)
+
+/*
+ * CRITICAL CLOCK:
+ * apmixed_appll26m is the toppest clock gate of all PLLs.
+ */
+static const struct mtk_gate apmixed_clks[] = {
+ /* AUDIO0 */
+ GATE_APMIXED(CLK_APMIXED_SSUSB_26M, "apmixed_ssusb26m",
+ "f_f26m_ck", 4),
+ GATE_APMIXED_FLAGS(CLK_APMIXED_APPLL_26M, "apmixed_appll26m",
+ "f_f26m_ck", 5, CLK_IS_CRITICAL),
+ GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m",
+ "f_f26m_ck", 6),
+ GATE_APMIXED(CLK_APMIXED_MDPLLGP_26M, "apmixed_mdpll26m",
+ "f_f26m_ck", 7),
+ GATE_APMIXED(CLK_APMIXED_MMSYS_26M, "apmixed_mmsys26m",
+ "f_f26m_ck", 8),
+ GATE_APMIXED(CLK_APMIXED_UFS_26M, "apmixed_ufs26m",
+ "f_f26m_ck", 9),
+ GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m",
+ "f_f26m_ck", 11),
+ GATE_APMIXED(CLK_APMIXED_MEMPLL_26M, "apmixed_mempll26m",
+ "f_f26m_ck", 13),
+ GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m",
+ "f_f26m_ck", 14),
+ GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m",
+ "f_f26m_ck", 16),
+ GATE_APMIXED(CLK_APMIXED_MIPID1_26M, "apmixed_mipid126m",
+ "f_f26m_ck", 17),
+};
+
+#define MT8183_PLL_FMAX (3800UL * MHZ)
+#define MT8183_PLL_FMIN (1500UL * MHZ)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg, _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8183_PLL_FMAX, \
+ .fmin = MT8183_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = _pcwibits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg, NULL)
+
+static const struct mtk_pll_div_table armpll_div_table[] = {
+ { .div = 0, .freq = MT8183_PLL_FMAX },
+ { .div = 1, .freq = 1500 * MHZ },
+ { .div = 2, .freq = 750 * MHZ },
+ { .div = 3, .freq = 375 * MHZ },
+ { .div = 4, .freq = 187500000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table mfgpll_div_table[] = {
+ { .div = 0, .freq = MT8183_PLL_FMAX },
+ { .div = 1, .freq = 1600 * MHZ },
+ { .div = 2, .freq = 800 * MHZ },
+ { .div = 3, .freq = 400 * MHZ },
+ { .div = 4, .freq = 200 * MHZ },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0x00000001,
+ HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0204, 24, 0x0, 0x0, 0,
+ 0x0204, 0, 0, armpll_div_table),
+ PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0x00000001,
+ HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0214, 24, 0x0, 0x0, 0,
+ 0x0214, 0, 0, armpll_div_table),
+ PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0x00000001,
+ HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0294, 24, 0x0, 0x0, 0,
+ 0x0294, 0, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0x00000001,
+ HAVE_RST_BAR, BIT(24), 22, 8, 0x0224, 24, 0x0, 0x0, 0,
+ 0x0224, 0, 0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0x00000001,
+ HAVE_RST_BAR, BIT(24), 22, 8, 0x0234, 24, 0x0, 0x0, 0,
+ 0x0234, 0, 0),
+ PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000001,
+ 0, 0, 22, 8, 0x0244, 24, 0x0, 0x0, 0, 0x0244, 0, 0,
+ mfgpll_div_table),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000001,
+ 0, 0, 22, 8, 0x0254, 24, 0x0, 0x0, 0, 0x0254, 0, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0x00000001,
+ 0, 0, 22, 8, 0x0264, 24, 0x0, 0x0, 0, 0x0264, 0, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0x00000001,
+ HAVE_RST_BAR, BIT(23), 22, 8, 0x0274, 24, 0x0, 0x0, 0,
+ 0x0274, 0, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000001,
+ 0, 0, 32, 8, 0x02A0, 1, 0x02A8, 0x0014, 0, 0x02A4, 0, 0x02A0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0x00000001,
+ 0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4),
+};
+
+static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8183_top_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *base;
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+
+ mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
+ node, &mt8183_clk_lock, clk_data);
+
+ mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
+ base, &mt8183_clk_lock, clk_data);
+
+ mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
+ base, &mt8183_clk_lock, clk_data);
+
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8183_infra_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8183_mcu_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+
+ mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+ &mt8183_clk_lock, clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183[] = {
+ {
+ .compatible = "mediatek,mt8183-apmixedsys",
+ .data = clk_mt8183_apmixed_probe,
+ }, {
+ .compatible = "mediatek,mt8183-topckgen",
+ .data = clk_mt8183_top_probe,
+ }, {
+ .compatible = "mediatek,mt8183-infracfg",
+ .data = clk_mt8183_infra_probe,
+ }, {
+ .compatible = "mediatek,mt8183-mcucfg",
+ .data = clk_mt8183_mcu_probe,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt8183_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *pdev);
+ int r;
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ r = clk_probe(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt8183_drv = {
+ .probe = clk_mt8183_probe,
+ .driver = {
+ .name = "clk-mt8183",
+ .of_match_table = of_match_clk_mt8183,
+ },
+};
+
+static int __init clk_mt8183_init(void)
+{
+ return platform_driver_register(&clk_mt8183_drv);
+}
+
+arch_initcall(clk_mt8183_init);
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
new file mode 100644
index 000000000000..26fe43cc9ea2
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8516-clk.h>
+
+static DEFINE_SPINLOCK(mt8516_clk_lock);
+
+static const struct mtk_fixed_clk fixed_clks[] __initconst = {
+ FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0),
+ FIXED_CLK(CLK_TOP_I2S_INFRA_BCK, "i2s_infra_bck", "clk_null", 26000000),
+ FIXED_CLK(CLK_TOP_MEMPLL, "mempll", "clk26m", 800000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] __initconst = {
+ FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
+ FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D8, "mainpll_d8", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D16, "mainpll_d16", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D11, "mainpll_d11", "mainpll", 1, 11),
+ FACTOR(CLK_TOP_MAINPLL_D22, "mainpll_d22", "mainpll", 1, 22),
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D12, "mainpll_d12", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D10, "mainpll_d10", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_MAINPLL_D20, "mainpll_d20", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_MAINPLL_D40, "mainpll_d40", "mainpll", 1, 40),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D14, "mainpll_d14", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D8, "univpll_d8", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D16, "univpll_d16", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D12, "univpll_d12", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL_D24, "univpll_d24", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D20, "univpll_d20", "univpll", 1, 20),
+ FACTOR(CLK_TOP_MMPLL380M, "mmpll380m", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_200M, "mmpll_200m", "mmpll", 1, 3),
+ FACTOR(CLK_TOP_USB_PHY48M, "usb_phy48m_ck", "univpll", 1, 26),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "rg_apll1_d2_en", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "rg_apll1_d4_en", 1, 2),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "rg_apll2_d2_en", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "rg_apll2_d4_en", 1, 2),
+ FACTOR(CLK_TOP_CLK26M, "clk26m_ck", "clk26m", 1, 1),
+ FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_AHB_INFRA_D2, "ahb_infra_d2", "ahb_infra_sel", 1, 2),
+ FACTOR(CLK_TOP_NFI1X, "nfi1x_ck", "nfi2x_pad_sel", 1, 2),
+ FACTOR(CLK_TOP_ETH_D2, "eth_d2_ck", "eth_sel", 1, 2),
+};
+
+static const char * const uart0_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const ahb_infra_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d11",
+ "clk_null",
+ "mainpll_d12",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d10"
+};
+
+static const char * const msdc0_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d6",
+ "mainpll_d8",
+ "univpll_d8",
+ "mainpll_d16",
+ "mmpll_200m",
+ "mainpll_d12",
+ "mmpll_d2"
+};
+
+static const char * const uart1_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const msdc1_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d6",
+ "mainpll_d8",
+ "univpll_d8",
+ "mainpll_d16",
+ "mmpll_200m",
+ "mainpll_d12",
+ "mmpll_d2"
+};
+
+static const char * const pmicspi_parents[] __initconst = {
+ "univpll_d20",
+ "usb_phy48m_ck",
+ "univpll_d16",
+ "clk26m_ck"
+};
+
+static const char * const qaxi_aud26m_parents[] __initconst = {
+ "clk26m_ck",
+ "ahb_infra_sel"
+};
+
+static const char * const aud_intbus_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d22",
+ "clk_null",
+ "mainpll_d11"
+};
+
+static const char * const nfi2x_pad_parents[] __initconst = {
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk26m_ck",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d12",
+ "mainpll_d8",
+ "clk_null",
+ "mainpll_d6",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d4",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d10",
+ "mainpll_d7",
+ "clk_null",
+ "mainpll_d5"
+};
+
+static const char * const nfi1x_pad_parents[] __initconst = {
+ "ahb_infra_sel",
+ "nfi1x_ck"
+};
+
+static const char * const ddrphycfg_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d16"
+};
+
+static const char * const usb_78m_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "univpll_d16",
+ "clk_null",
+ "mainpll_d20"
+};
+
+static const char * const spinor_parents[] __initconst = {
+ "clk26m_d2",
+ "clk26m_ck",
+ "mainpll_d40",
+ "univpll_d24",
+ "univpll_d20",
+ "mainpll_d20",
+ "mainpll_d16",
+ "univpll_d12"
+};
+
+static const char * const msdc2_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d6",
+ "mainpll_d8",
+ "univpll_d8",
+ "mainpll_d16",
+ "mmpll_200m",
+ "mainpll_d12",
+ "mmpll_d2"
+};
+
+static const char * const eth_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d40",
+ "univpll_d24",
+ "univpll_d20",
+ "mainpll_d20"
+};
+
+static const char * const aud1_parents[] __initconst = {
+ "clk26m_ck",
+ "apll1_ck"
+};
+
+static const char * const aud2_parents[] __initconst = {
+ "clk26m_ck",
+ "apll2_ck"
+};
+
+static const char * const aud_engen1_parents[] __initconst = {
+ "clk26m_ck",
+ "rg_apll1_d2_en",
+ "rg_apll1_d4_en",
+ "rg_apll1_d8_en"
+};
+
+static const char * const aud_engen2_parents[] __initconst = {
+ "clk26m_ck",
+ "rg_apll2_d2_en",
+ "rg_apll2_d4_en",
+ "rg_apll2_d8_en"
+};
+
+static const char * const i2c_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d20",
+ "univpll_d16",
+ "univpll_d12"
+};
+
+static const char * const aud_i2s0_m_parents[] __initconst = {
+ "rg_aud1",
+ "rg_aud2"
+};
+
+static const char * const pwm_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d12"
+};
+
+static const char * const spi_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d12",
+ "univpll_d8",
+ "univpll_d6"
+};
+
+static const char * const aud_spdifin_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d2"
+};
+
+static const char * const uart2_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const bsi_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d10",
+ "mainpll_d12",
+ "mainpll_d20"
+};
+
+static const char * const dbg_atclk_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d5",
+ "clk_null",
+ "univpll_d5"
+};
+
+static const char * const csw_nfiecc_parents[] __initconst = {
+ "clk_null",
+ "mainpll_d7",
+ "mainpll_d6",
+ "clk_null",
+ "mainpll_d5"
+};
+
+static const char * const nfiecc_parents[] __initconst = {
+ "clk_null",
+ "nfi2x_pad_sel",
+ "mainpll_d4",
+ "clk_null",
+ "csw_nfiecc_sel"
+};
+
+static struct mtk_composite top_muxes[] __initdata = {
+ /* CLK_MUX_SEL0 */
+ MUX(CLK_TOP_UART0_SEL, "uart0_sel", uart0_parents,
+ 0x000, 0, 1),
+ MUX(CLK_TOP_AHB_INFRA_SEL, "ahb_infra_sel", ahb_infra_parents,
+ 0x000, 4, 4),
+ MUX(CLK_TOP_MSDC0_SEL, "msdc0_sel", msdc0_parents,
+ 0x000, 11, 3),
+ MUX(CLK_TOP_UART1_SEL, "uart1_sel", uart1_parents,
+ 0x000, 19, 1),
+ MUX(CLK_TOP_MSDC1_SEL, "msdc1_sel", msdc1_parents,
+ 0x000, 20, 3),
+ MUX(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+ 0x000, 24, 2),
+ MUX(CLK_TOP_QAXI_AUD26M_SEL, "qaxi_aud26m_sel", qaxi_aud26m_parents,
+ 0x000, 26, 1),
+ MUX(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x000, 27, 3),
+ /* CLK_MUX_SEL1 */
+ MUX(CLK_TOP_NFI2X_PAD_SEL, "nfi2x_pad_sel", nfi2x_pad_parents,
+ 0x004, 0, 7),
+ MUX(CLK_TOP_NFI1X_PAD_SEL, "nfi1x_pad_sel", nfi1x_pad_parents,
+ 0x004, 7, 1),
+ MUX(CLK_TOP_USB_78M_SEL, "usb_78m_sel", usb_78m_parents,
+ 0x004, 20, 3),
+ /* CLK_MUX_SEL8 */
+ MUX(CLK_TOP_SPINOR_SEL, "spinor_sel", spinor_parents,
+ 0x040, 0, 3),
+ MUX(CLK_TOP_MSDC2_SEL, "msdc2_sel", msdc2_parents,
+ 0x040, 3, 3),
+ MUX(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+ 0x040, 6, 3),
+ MUX(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents,
+ 0x040, 22, 1),
+ MUX(CLK_TOP_AUD2_SEL, "aud2_sel", aud2_parents,
+ 0x040, 23, 1),
+ MUX(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel", aud_engen1_parents,
+ 0x040, 24, 2),
+ MUX(CLK_TOP_AUD_ENGEN2_SEL, "aud_engen2_sel", aud_engen2_parents,
+ 0x040, 26, 2),
+ MUX(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents,
+ 0x040, 28, 2),
+ /* CLK_SEL_9 */
+ MUX(CLK_TOP_AUD_I2S0_M_SEL, "aud_i2s0_m_sel", aud_i2s0_m_parents,
+ 0x044, 12, 1),
+ MUX(CLK_TOP_AUD_I2S1_M_SEL, "aud_i2s1_m_sel", aud_i2s0_m_parents,
+ 0x044, 13, 1),
+ MUX(CLK_TOP_AUD_I2S2_M_SEL, "aud_i2s2_m_sel", aud_i2s0_m_parents,
+ 0x044, 14, 1),
+ MUX(CLK_TOP_AUD_I2S3_M_SEL, "aud_i2s3_m_sel", aud_i2s0_m_parents,
+ 0x044, 15, 1),
+ MUX(CLK_TOP_AUD_I2S4_M_SEL, "aud_i2s4_m_sel", aud_i2s0_m_parents,
+ 0x044, 16, 1),
+ MUX(CLK_TOP_AUD_I2S5_M_SEL, "aud_i2s5_m_sel", aud_i2s0_m_parents,
+ 0x044, 17, 1),
+ MUX(CLK_TOP_AUD_SPDIF_B_SEL, "aud_spdif_b_sel", aud_i2s0_m_parents,
+ 0x044, 18, 1),
+ /* CLK_MUX_SEL13 */
+ MUX(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+ 0x07c, 0, 1),
+ MUX(CLK_TOP_SPI_SEL, "spi_sel", spi_parents,
+ 0x07c, 1, 2),
+ MUX(CLK_TOP_AUD_SPDIFIN_SEL, "aud_spdifin_sel", aud_spdifin_parents,
+ 0x07c, 3, 1),
+ MUX(CLK_TOP_UART2_SEL, "uart2_sel", uart2_parents,
+ 0x07c, 4, 1),
+ MUX(CLK_TOP_BSI_SEL, "bsi_sel", bsi_parents,
+ 0x07c, 5, 2),
+ MUX(CLK_TOP_DBG_ATCLK_SEL, "dbg_atclk_sel", dbg_atclk_parents,
+ 0x07c, 7, 3),
+ MUX(CLK_TOP_CSW_NFIECC_SEL, "csw_nfiecc_sel", csw_nfiecc_parents,
+ 0x07c, 10, 3),
+ MUX(CLK_TOP_NFIECC_SEL, "nfiecc_sel", nfiecc_parents,
+ 0x07c, 13, 3),
+};
+
+static const char * const ifr_mux1_parents[] __initconst = {
+ "clk26m_ck",
+ "armpll",
+ "univpll",
+ "mainpll_d2"
+};
+
+static const char * const ifr_eth_25m_parents[] __initconst = {
+ "eth_d2_ck",
+ "rg_eth"
+};
+
+static const char * const ifr_i2c0_parents[] __initconst = {
+ "ahb_infra_d2",
+ "rg_i2c"
+};
+
+static const struct mtk_composite ifr_muxes[] __initconst = {
+ MUX(CLK_IFR_MUX1_SEL, "ifr_mux1_sel", ifr_mux1_parents, 0x000,
+ 2, 2),
+ MUX(CLK_IFR_ETH_25M_SEL, "ifr_eth_25m_sel", ifr_eth_25m_parents, 0x080,
+ 0, 1),
+ MUX(CLK_IFR_I2C0_SEL, "ifr_i2c0_sel", ifr_i2c0_parents, 0x080,
+ 1, 1),
+ MUX(CLK_IFR_I2C1_SEL, "ifr_i2c1_sel", ifr_i2c0_parents, 0x080,
+ 2, 1),
+ MUX(CLK_IFR_I2C2_SEL, "ifr_i2c2_sel", ifr_i2c0_parents, 0x080,
+ 3, 1),
+};
+
+#define DIV_ADJ(_id, _name, _parent, _reg, _shift, _width) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .div_reg = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+}
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV0, "apll12_ck_div0", "aud_i2s0_m_sel",
+ 0x0048, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV1, "apll12_ck_div1", "aud_i2s1_m_sel",
+ 0x0048, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV2, "apll12_ck_div2", "aud_i2s2_m_sel",
+ 0x0048, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "aud_i2s3_m_sel",
+ 0x0048, 24, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV4, "apll12_ck_div4", "aud_i2s4_m_sel",
+ 0x004c, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV4B, "apll12_ck_div4b", "apll12_div4",
+ 0x004c, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV5, "apll12_ck_div5", "aud_i2s5_m_sel",
+ 0x004c, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV5B, "apll12_ck_div5b", "apll12_div5",
+ 0x004c, 24, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "aud_spdif_b_sel",
+ 0x0078, 0, 8),
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x54,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x24,
+};
+
+static const struct mtk_gate_regs top2_cg_regs = {
+ .set_ofs = 0x6c,
+ .clr_ofs = 0x9c,
+ .sta_ofs = 0x3c,
+};
+
+static const struct mtk_gate_regs top3_cg_regs = {
+ .set_ofs = 0xa0,
+ .clr_ofs = 0xb0,
+ .sta_ofs = 0x70,
+};
+
+static const struct mtk_gate_regs top4_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xb4,
+ .sta_ofs = 0x74,
+};
+
+static const struct mtk_gate_regs top5_cg_regs = {
+ .set_ofs = 0x44,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x44,
+};
+
+#define GATE_TOP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP2_I(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_TOP3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP4_I(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top4_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_TOP5(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top5_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate top_clks[] __initconst = {
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_THEM, "them", "ahb_infra_sel", 1),
+ GATE_TOP1(CLK_TOP_APDMA, "apdma", "ahb_infra_sel", 2),
+ GATE_TOP1(CLK_TOP_I2C0, "i2c0", "ifr_i2c0_sel", 3),
+ GATE_TOP1(CLK_TOP_I2C1, "i2c1", "ifr_i2c1_sel", 4),
+ GATE_TOP1(CLK_TOP_AUXADC1, "auxadc1", "ahb_infra_sel", 5),
+ GATE_TOP1(CLK_TOP_NFI, "nfi", "nfi1x_pad_sel", 6),
+ GATE_TOP1(CLK_TOP_NFIECC, "nfiecc", "rg_nfiecc", 7),
+ GATE_TOP1(CLK_TOP_DEBUGSYS, "debugsys", "rg_dbg_atclk", 8),
+ GATE_TOP1(CLK_TOP_PWM, "pwm", "ahb_infra_sel", 9),
+ GATE_TOP1(CLK_TOP_UART0, "uart0", "uart0_sel", 10),
+ GATE_TOP1(CLK_TOP_UART1, "uart1", "uart1_sel", 11),
+ GATE_TOP1(CLK_TOP_BTIF, "btif", "ahb_infra_sel", 12),
+ GATE_TOP1(CLK_TOP_USB, "usb", "usb_78m", 13),
+ GATE_TOP1(CLK_TOP_FLASHIF_26M, "flashif_26m", "clk26m_ck", 14),
+ GATE_TOP1(CLK_TOP_AUXADC2, "auxadc2", "ahb_infra_sel", 15),
+ GATE_TOP1(CLK_TOP_I2C2, "i2c2", "ifr_i2c2_sel", 16),
+ GATE_TOP1(CLK_TOP_MSDC0, "msdc0", "msdc0_sel", 17),
+ GATE_TOP1(CLK_TOP_MSDC1, "msdc1", "msdc1_sel", 18),
+ GATE_TOP1(CLK_TOP_NFI2X, "nfi2x", "nfi2x_pad_sel", 19),
+ GATE_TOP1(CLK_TOP_PMICWRAP_AP, "pwrap_ap", "clk26m_ck", 20),
+ GATE_TOP1(CLK_TOP_SEJ, "sej", "ahb_infra_sel", 21),
+ GATE_TOP1(CLK_TOP_MEMSLP_DLYER, "memslp_dlyer", "clk26m_ck", 22),
+ GATE_TOP1(CLK_TOP_SPI, "spi", "spi_sel", 23),
+ GATE_TOP1(CLK_TOP_APXGPT, "apxgpt", "clk26m_ck", 24),
+ GATE_TOP1(CLK_TOP_AUDIO, "audio", "clk26m_ck", 25),
+ GATE_TOP1(CLK_TOP_PMICWRAP_MD, "pwrap_md", "clk26m_ck", 27),
+ GATE_TOP1(CLK_TOP_PMICWRAP_CONN, "pwrap_conn", "clk26m_ck", 28),
+ GATE_TOP1(CLK_TOP_PMICWRAP_26M, "pwrap_26m", "clk26m_ck", 29),
+ GATE_TOP1(CLK_TOP_AUX_ADC, "aux_adc", "clk26m_ck", 30),
+ GATE_TOP1(CLK_TOP_AUX_TP, "aux_tp", "clk26m_ck", 31),
+ /* TOP2 */
+ GATE_TOP2(CLK_TOP_MSDC2, "msdc2", "ahb_infra_sel", 0),
+ GATE_TOP2(CLK_TOP_RBIST, "rbist", "univpll_d12", 1),
+ GATE_TOP2(CLK_TOP_NFI_BUS, "nfi_bus", "ahb_infra_sel", 2),
+ GATE_TOP2(CLK_TOP_GCE, "gce", "ahb_infra_sel", 4),
+ GATE_TOP2(CLK_TOP_TRNG, "trng", "ahb_infra_sel", 5),
+ GATE_TOP2(CLK_TOP_SEJ_13M, "sej_13m", "clk26m_ck", 6),
+ GATE_TOP2(CLK_TOP_AES, "aes", "ahb_infra_sel", 7),
+ GATE_TOP2(CLK_TOP_PWM_B, "pwm_b", "rg_pwm_infra", 8),
+ GATE_TOP2(CLK_TOP_PWM1_FB, "pwm1_fb", "rg_pwm_infra", 9),
+ GATE_TOP2(CLK_TOP_PWM2_FB, "pwm2_fb", "rg_pwm_infra", 10),
+ GATE_TOP2(CLK_TOP_PWM3_FB, "pwm3_fb", "rg_pwm_infra", 11),
+ GATE_TOP2(CLK_TOP_PWM4_FB, "pwm4_fb", "rg_pwm_infra", 12),
+ GATE_TOP2(CLK_TOP_PWM5_FB, "pwm5_fb", "rg_pwm_infra", 13),
+ GATE_TOP2(CLK_TOP_USB_1P, "usb_1p", "usb_78m", 14),
+ GATE_TOP2(CLK_TOP_FLASHIF_FREERUN, "flashif_freerun", "ahb_infra_sel",
+ 15),
+ GATE_TOP2(CLK_TOP_66M_ETH, "eth_66m", "ahb_infra_d2", 19),
+ GATE_TOP2(CLK_TOP_133M_ETH, "eth_133m", "ahb_infra_sel", 20),
+ GATE_TOP2(CLK_TOP_FETH_25M, "feth_25m", "ifr_eth_25m_sel", 21),
+ GATE_TOP2(CLK_TOP_FETH_50M, "feth_50m", "rg_eth", 22),
+ GATE_TOP2(CLK_TOP_FLASHIF_AXI, "flashif_axi", "ahb_infra_sel", 23),
+ GATE_TOP2(CLK_TOP_USBIF, "usbif", "ahb_infra_sel", 24),
+ GATE_TOP2(CLK_TOP_UART2, "uart2", "rg_uart2", 25),
+ GATE_TOP2(CLK_TOP_BSI, "bsi", "ahb_infra_sel", 26),
+ GATE_TOP2_I(CLK_TOP_MSDC0_INFRA, "msdc0_infra", "msdc0", 28),
+ GATE_TOP2_I(CLK_TOP_MSDC1_INFRA, "msdc1_infra", "msdc1", 29),
+ GATE_TOP2_I(CLK_TOP_MSDC2_INFRA, "msdc2_infra", "rg_msdc2", 30),
+ GATE_TOP2(CLK_TOP_USB_78M, "usb_78m", "usb_78m_sel", 31),
+ /* TOP3 */
+ GATE_TOP3(CLK_TOP_RG_SPINOR, "rg_spinor", "spinor_sel", 0),
+ GATE_TOP3(CLK_TOP_RG_MSDC2, "rg_msdc2", "msdc2_sel", 1),
+ GATE_TOP3(CLK_TOP_RG_ETH, "rg_eth", "eth_sel", 2),
+ GATE_TOP3(CLK_TOP_RG_AUD1, "rg_aud1", "aud1_sel", 8),
+ GATE_TOP3(CLK_TOP_RG_AUD2, "rg_aud2", "aud2_sel", 9),
+ GATE_TOP3(CLK_TOP_RG_AUD_ENGEN1, "rg_aud_engen1", "aud_engen1_sel", 10),
+ GATE_TOP3(CLK_TOP_RG_AUD_ENGEN2, "rg_aud_engen2", "aud_engen2_sel", 11),
+ GATE_TOP3(CLK_TOP_RG_I2C, "rg_i2c", "i2c_sel", 12),
+ GATE_TOP3(CLK_TOP_RG_PWM_INFRA, "rg_pwm_infra", "pwm_sel", 13),
+ GATE_TOP3(CLK_TOP_RG_AUD_SPDIF_IN, "rg_aud_spdif_in", "aud_spdifin_sel",
+ 14),
+ GATE_TOP3(CLK_TOP_RG_UART2, "rg_uart2", "uart2_sel", 15),
+ GATE_TOP3(CLK_TOP_RG_BSI, "rg_bsi", "bsi_sel", 16),
+ GATE_TOP3(CLK_TOP_RG_DBG_ATCLK, "rg_dbg_atclk", "dbg_atclk_sel", 17),
+ GATE_TOP3(CLK_TOP_RG_NFIECC, "rg_nfiecc", "nfiecc_sel", 18),
+ /* TOP4 */
+ GATE_TOP4_I(CLK_TOP_RG_APLL1_D2_EN, "rg_apll1_d2_en", "apll1_d2", 8),
+ GATE_TOP4_I(CLK_TOP_RG_APLL1_D4_EN, "rg_apll1_d4_en", "apll1_d4", 9),
+ GATE_TOP4_I(CLK_TOP_RG_APLL1_D8_EN, "rg_apll1_d8_en", "apll1_d8", 10),
+ GATE_TOP4_I(CLK_TOP_RG_APLL2_D2_EN, "rg_apll2_d2_en", "apll2_d2", 11),
+ GATE_TOP4_I(CLK_TOP_RG_APLL2_D4_EN, "rg_apll2_d4_en", "apll2_d4", 12),
+ GATE_TOP4_I(CLK_TOP_RG_APLL2_D8_EN, "rg_apll2_d8_en", "apll2_d8", 13),
+ /* TOP5 */
+ GATE_TOP5(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll12_ck_div0", 0),
+ GATE_TOP5(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll12_ck_div1", 1),
+ GATE_TOP5(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll12_ck_div2", 2),
+ GATE_TOP5(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll12_ck_div3", 3),
+ GATE_TOP5(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll12_ck_div4", 4),
+ GATE_TOP5(CLK_TOP_APLL12_DIV4B, "apll12_div4b", "apll12_ck_div4b", 5),
+ GATE_TOP5(CLK_TOP_APLL12_DIV5, "apll12_div5", "apll12_ck_div5", 6),
+ GATE_TOP5(CLK_TOP_APLL12_DIV5B, "apll12_div5b", "apll12_ck_div5b", 7),
+ GATE_TOP5(CLK_TOP_APLL12_DIV6, "apll12_div6", "apll12_ck_div6", 8),
+};
+
+static void __init mtk_topckgen_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ void __iomem *base;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ clk_data);
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
+
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ &mt8516_clk_lock, clk_data);
+ mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ base, &mt8516_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8516-topckgen", mtk_topckgen_init);
+
+static void __init mtk_infracfg_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ void __iomem *base;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+
+ mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
+ &mt8516_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_infracfg, "mediatek,mt8516-infracfg", mtk_infracfg_init);
+
+#define MT8516_PLL_FMAX (1502UL * MHZ)
+
+#define CON0_MT8516_RST_BAR BIT(27)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT8516_RST_BAR, \
+ .fmax = MT8516_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT8516_PLL_FMAX },
+ { .div = 1, .freq = 1000000000 },
+ { .div = 2, .freq = 604500000 },
+ { .div = 3, .freq = 253500000 },
+ { .div = 4, .freq = 126750000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0,
+ 21, 0x0104, 24, 0, 0x0104, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001,
+ HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001,
+ HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0,
+ 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0,
+ 31, 0x0180, 1, 0x0194, 0x0184, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0,
+ 31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
+};
+
+static void __init mtk_apmixedsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ int r;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+}
+CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8516-apmixedsys",
+ mtk_apmixedsys_init);
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index fb27b5bf30d9..33ab1731482f 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -227,10 +227,13 @@ struct mtk_pll_data {
unsigned int flags;
const struct clk_ops *ops;
u32 rst_bar_mask;
+ unsigned long fmin;
unsigned long fmax;
int pcwbits;
+ int pcwibits;
uint32_t pcw_reg;
int pcw_shift;
+ uint32_t pcw_chg_reg;
const struct mtk_pll_div_table *div_table;
const char *parent_name;
};
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
new file mode 100644
index 000000000000..76f9cd039195
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
+{
+ return container_of(hw, struct mtk_clk_mux, hw);
+}
+
+static int mtk_clk_mux_enable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = BIT(mux->data->gate_shift);
+
+ return regmap_update_bits(mux->regmap, mux->data->mux_ofs,
+ mask, ~mask);
+}
+
+static void mtk_clk_mux_disable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = BIT(mux->data->gate_shift);
+
+ regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask, mask);
+}
+
+static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+
+ return regmap_write(mux->regmap, mux->data->clr_ofs,
+ BIT(mux->data->gate_shift));
+}
+
+static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+
+ regmap_write(mux->regmap, mux->data->set_ofs,
+ BIT(mux->data->gate_shift));
+}
+
+static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, mux->data->mux_ofs, &val);
+
+ return (val & BIT(mux->data->gate_shift)) == 0;
+}
+
+static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = GENMASK(mux->data->mux_width - 1, 0);
+ u32 val;
+
+ regmap_read(mux->regmap, mux->data->mux_ofs, &val);
+ val = (val >> mux->data->mux_shift) & mask;
+
+ return val;
+}
+
+static int mtk_clk_mux_set_parent_lock(struct clk_hw *hw, u8 index)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = GENMASK(mux->data->mux_width - 1, 0);
+ unsigned long flags = 0;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask,
+ index << mux->data->mux_shift);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return 0;
+}
+
+static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = GENMASK(mux->data->mux_width - 1, 0);
+ u32 val, orig;
+ unsigned long flags = 0;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ regmap_read(mux->regmap, mux->data->mux_ofs, &orig);
+ val = (orig & ~(mask << mux->data->mux_shift))
+ | (index << mux->data->mux_shift);
+
+ if (val != orig) {
+ regmap_write(mux->regmap, mux->data->clr_ofs,
+ mask << mux->data->mux_shift);
+ regmap_write(mux->regmap, mux->data->set_ofs,
+ index << mux->data->mux_shift);
+
+ if (mux->data->upd_shift >= 0)
+ regmap_write(mux->regmap, mux->data->upd_ofs,
+ BIT(mux->data->upd_shift));
+ }
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return 0;
+}
+
+const struct clk_ops mtk_mux_ops = {
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_lock,
+};
+
+const struct clk_ops mtk_mux_clr_set_upd_ops = {
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+};
+
+const struct clk_ops mtk_mux_gate_ops = {
+ .enable = mtk_clk_mux_enable,
+ .disable = mtk_clk_mux_disable,
+ .is_enabled = mtk_clk_mux_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_lock,
+};
+
+const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_enable_setclr,
+ .disable = mtk_clk_mux_disable_setclr,
+ .is_enabled = mtk_clk_mux_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+};
+
+struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
+ struct regmap *regmap,
+ spinlock_t *lock)
+{
+ struct mtk_clk_mux *clk_mux;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
+ if (!clk_mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = mux->name;
+ init.flags = mux->flags | CLK_SET_RATE_PARENT;
+ init.parent_names = mux->parent_names;
+ init.num_parents = mux->num_parents;
+ init.ops = mux->ops;
+
+ clk_mux->regmap = regmap;
+ clk_mux->data = mux;
+ clk_mux->lock = lock;
+ clk_mux->hw.init = &init;
+
+ clk = clk_register(NULL, &clk_mux->hw);
+ if (IS_ERR(clk)) {
+ kfree(clk_mux);
+ return clk;
+ }
+
+ return clk;
+}
+
+int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+ int num, struct device_node *node,
+ spinlock_t *lock,
+ struct clk_onecell_data *clk_data)
+{
+ struct regmap *regmap;
+ struct clk *clk;
+ int i;
+
+ regmap = syscon_node_to_regmap(node);
+ if (IS_ERR(regmap)) {
+ pr_err("Cannot find regmap for %pOF: %ld\n", node,
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ for (i = 0; i < num; i++) {
+ const struct mtk_mux *mux = &muxes[i];
+
+ if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) {
+ clk = mtk_clk_register_mux(mux, regmap, lock);
+
+ if (IS_ERR(clk)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ mux->name, PTR_ERR(clk));
+ continue;
+ }
+
+ clk_data->clks[mux->id] = clk;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
new file mode 100644
index 000000000000..f5625f4d9e6c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#ifndef __DRV_CLK_MTK_MUX_H
+#define __DRV_CLK_MTK_MUX_H
+
+#include <linux/clk-provider.h>
+
+struct mtk_clk_mux {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ const struct mtk_mux *data;
+ spinlock_t *lock;
+};
+
+struct mtk_mux {
+ int id;
+ const char *name;
+ const char * const *parent_names;
+ unsigned int flags;
+
+ u32 mux_ofs;
+ u32 set_ofs;
+ u32 clr_ofs;
+ u32 upd_ofs;
+
+ u8 mux_shift;
+ u8 mux_width;
+ u8 gate_shift;
+ s8 upd_shift;
+
+ const struct clk_ops *ops;
+
+ signed char num_parents;
+};
+
+extern const struct clk_ops mtk_mux_ops;
+extern const struct clk_ops mtk_mux_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_ops;
+extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+
+#define GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd, _flags, _ops) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .flags = _flags, \
+ .ops = &_ops, \
+ }
+
+#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd, _flags) \
+ GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd, _flags, \
+ mtk_mux_gate_clr_set_upd_ops)
+
+#define MUX_GATE_CLR_SET_UPD(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd) \
+ MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, _shift, \
+ _width, _gate, _upd_ofs, _upd, \
+ CLK_SET_RATE_PARENT)
+
+struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
+ struct regmap *regmap,
+ spinlock_t *lock);
+
+int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+ int num, struct device_node *node,
+ spinlock_t *lock,
+ struct clk_onecell_data *clk_data);
+
+#endif /* __DRV_CLK_MTK_MUX_H */
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index f54e4015b0b1..8d556fc99fed 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -27,11 +27,13 @@
#define CON0_BASE_EN BIT(0)
#define CON0_PWR_ON BIT(0)
#define CON0_ISO_EN BIT(1)
-#define CON0_PCW_CHG BIT(31)
+#define PCW_CHG_MASK BIT(31)
#define AUDPLL_TUNER_EN BIT(31)
#define POSTDIV_MASK 0x7
+
+/* default 7 bits integer, can be overridden with pcwibits. */
#define INTEGER_BITS 7
/*
@@ -49,6 +51,7 @@ struct mtk_clk_pll {
void __iomem *tuner_addr;
void __iomem *tuner_en_addr;
void __iomem *pcw_addr;
+ void __iomem *pcw_chg_addr;
const struct mtk_pll_data *data;
};
@@ -68,12 +71,15 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
u32 pcw, int postdiv)
{
int pcwbits = pll->data->pcwbits;
- int pcwfbits;
+ int pcwfbits = 0;
+ int ibits;
u64 vco;
u8 c = 0;
/* The fractional part of the PLL divider. */
- pcwfbits = pcwbits > INTEGER_BITS ? pcwbits - INTEGER_BITS : 0;
+ ibits = pll->data->pcwibits ? pll->data->pcwibits : INTEGER_BITS;
+ if (pcwbits > ibits)
+ pcwfbits = pcwbits - ibits;
vco = (u64)fin * pcw;
@@ -88,13 +94,39 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
return ((unsigned long)vco + postdiv - 1) / postdiv;
}
+static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll)
+{
+ u32 r;
+
+ if (pll->tuner_en_addr) {
+ r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
+ writel(r, pll->tuner_en_addr);
+ } else if (pll->tuner_addr) {
+ r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
+ writel(r, pll->tuner_addr);
+ }
+}
+
+static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll)
+{
+ u32 r;
+
+ if (pll->tuner_en_addr) {
+ r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
+ writel(r, pll->tuner_en_addr);
+ } else if (pll->tuner_addr) {
+ r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
+ writel(r, pll->tuner_addr);
+ }
+}
+
static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
int postdiv)
{
- u32 con1, val;
- int pll_en;
+ u32 chg, val;
- pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
+ /* disable tuner */
+ __mtk_pll_tuner_disable(pll);
/* set postdiv */
val = readl(pll->pd_addr);
@@ -112,18 +144,15 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
pll->data->pcw_shift);
val |= pcw << pll->data->pcw_shift;
writel(val, pll->pcw_addr);
-
- con1 = readl(pll->base_addr + REG_CON1);
-
- if (pll_en)
- con1 |= CON0_PCW_CHG;
-
- writel(con1, pll->base_addr + REG_CON1);
+ chg = readl(pll->pcw_chg_addr) | PCW_CHG_MASK;
+ writel(chg, pll->pcw_chg_addr);
if (pll->tuner_addr)
- writel(con1 + 1, pll->tuner_addr);
+ writel(val + 1, pll->tuner_addr);
+
+ /* restore tuner_en */
+ __mtk_pll_tuner_enable(pll);
- if (pll_en)
- udelay(20);
+ udelay(20);
}
/*
@@ -138,9 +167,10 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
u32 freq, u32 fin)
{
- unsigned long fmin = 1000 * MHZ;
+ unsigned long fmin = pll->data->fmin ? pll->data->fmin : (1000 * MHZ);
const struct mtk_pll_div_table *div_table = pll->data->div_table;
u64 _pcw;
+ int ibits;
u32 val;
if (freq > pll->data->fmax)
@@ -164,7 +194,8 @@ static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
}
/* _pcw = freq * postdiv / fin * 2^pcwfbits */
- _pcw = ((u64)freq << val) << (pll->data->pcwbits - INTEGER_BITS);
+ ibits = pll->data->pcwibits ? pll->data->pcwibits : INTEGER_BITS;
+ _pcw = ((u64)freq << val) << (pll->data->pcwbits - ibits);
do_div(_pcw, fin);
*pcw = (u32)_pcw;
@@ -228,13 +259,7 @@ static int mtk_pll_prepare(struct clk_hw *hw)
r |= pll->data->en_mask;
writel(r, pll->base_addr + REG_CON0);
- if (pll->tuner_en_addr) {
- r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
- writel(r, pll->tuner_en_addr);
- } else if (pll->tuner_addr) {
- r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
- writel(r, pll->tuner_addr);
- }
+ __mtk_pll_tuner_enable(pll);
udelay(20);
@@ -258,13 +283,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
writel(r, pll->base_addr + REG_CON0);
}
- if (pll->tuner_en_addr) {
- r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
- writel(r, pll->tuner_en_addr);
- } else if (pll->tuner_addr) {
- r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
- writel(r, pll->tuner_addr);
- }
+ __mtk_pll_tuner_disable(pll);
r = readl(pll->base_addr + REG_CON0);
r &= ~CON0_BASE_EN;
@@ -302,6 +321,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
pll->pwr_addr = base + data->pwr_reg;
pll->pd_addr = base + data->pd_reg;
pll->pcw_addr = base + data->pcw_reg;
+ if (data->pcw_chg_reg)
+ pll->pcw_chg_addr = base + data->pcw_chg_reg;
+ else
+ pll->pcw_chg_addr = pll->base_addr + REG_CON1;
if (data->tuner_reg)
pll->tuner_addr = base + data->tuner_reg;
if (data->tuner_en_reg)
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 7ab200b6c3bf..8028ff6f6610 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -20,18 +20,18 @@
#include "clk-phase.h"
#include "sclk-div.h"
-#define AXG_MST_IN_COUNT 8
-#define AXG_SLV_SCLK_COUNT 10
-#define AXG_SLV_LRCLK_COUNT 10
+#define AUD_MST_IN_COUNT 8
+#define AUD_SLV_SCLK_COUNT 10
+#define AUD_SLV_LRCLK_COUNT 10
-#define AXG_AUD_GATE(_name, _reg, _bit, _pname, _iflags) \
-struct clk_regmap axg_##_name = { \
+#define AUD_GATE(_name, _reg, _bit, _pname, _iflags) \
+struct clk_regmap aud_##_name = { \
.data = &(struct clk_regmap_gate_data){ \
.offset = (_reg), \
.bit_idx = (_bit), \
}, \
.hw.init = &(struct clk_init_data) { \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &clk_regmap_gate_ops, \
.parent_names = (const char *[]){ _pname }, \
.num_parents = 1, \
@@ -39,8 +39,8 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \
-struct clk_regmap axg_##_name = { \
+#define AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \
+struct clk_regmap aud_##_name = { \
.data = &(struct clk_regmap_mux_data){ \
.offset = (_reg), \
.mask = (_mask), \
@@ -48,7 +48,7 @@ struct clk_regmap axg_##_name = { \
.flags = (_dflags), \
}, \
.hw.init = &(struct clk_init_data){ \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &clk_regmap_mux_ops, \
.parent_names = (_pnames), \
.num_parents = ARRAY_SIZE(_pnames), \
@@ -56,8 +56,8 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \
-struct clk_regmap axg_##_name = { \
+#define AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \
+struct clk_regmap aud_##_name = { \
.data = &(struct clk_regmap_div_data){ \
.offset = (_reg), \
.shift = (_shift), \
@@ -65,7 +65,7 @@ struct clk_regmap axg_##_name = { \
.flags = (_dflags), \
}, \
.hw.init = &(struct clk_init_data){ \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &clk_regmap_divider_ops, \
.parent_names = (const char *[]) { _pname }, \
.num_parents = 1, \
@@ -73,109 +73,113 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_PCLK_GATE(_name, _bit) \
- AXG_AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "axg_audio_pclk", 0)
+#define AUD_PCLK_GATE(_name, _bit) \
+ AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "audio_pclk", 0)
/* Audio peripheral clocks */
-static AXG_PCLK_GATE(ddr_arb, 0);
-static AXG_PCLK_GATE(pdm, 1);
-static AXG_PCLK_GATE(tdmin_a, 2);
-static AXG_PCLK_GATE(tdmin_b, 3);
-static AXG_PCLK_GATE(tdmin_c, 4);
-static AXG_PCLK_GATE(tdmin_lb, 5);
-static AXG_PCLK_GATE(tdmout_a, 6);
-static AXG_PCLK_GATE(tdmout_b, 7);
-static AXG_PCLK_GATE(tdmout_c, 8);
-static AXG_PCLK_GATE(frddr_a, 9);
-static AXG_PCLK_GATE(frddr_b, 10);
-static AXG_PCLK_GATE(frddr_c, 11);
-static AXG_PCLK_GATE(toddr_a, 12);
-static AXG_PCLK_GATE(toddr_b, 13);
-static AXG_PCLK_GATE(toddr_c, 14);
-static AXG_PCLK_GATE(loopback, 15);
-static AXG_PCLK_GATE(spdifin, 16);
-static AXG_PCLK_GATE(spdifout, 17);
-static AXG_PCLK_GATE(resample, 18);
-static AXG_PCLK_GATE(power_detect, 19);
+static AUD_PCLK_GATE(ddr_arb, 0);
+static AUD_PCLK_GATE(pdm, 1);
+static AUD_PCLK_GATE(tdmin_a, 2);
+static AUD_PCLK_GATE(tdmin_b, 3);
+static AUD_PCLK_GATE(tdmin_c, 4);
+static AUD_PCLK_GATE(tdmin_lb, 5);
+static AUD_PCLK_GATE(tdmout_a, 6);
+static AUD_PCLK_GATE(tdmout_b, 7);
+static AUD_PCLK_GATE(tdmout_c, 8);
+static AUD_PCLK_GATE(frddr_a, 9);
+static AUD_PCLK_GATE(frddr_b, 10);
+static AUD_PCLK_GATE(frddr_c, 11);
+static AUD_PCLK_GATE(toddr_a, 12);
+static AUD_PCLK_GATE(toddr_b, 13);
+static AUD_PCLK_GATE(toddr_c, 14);
+static AUD_PCLK_GATE(loopback, 15);
+static AUD_PCLK_GATE(spdifin, 16);
+static AUD_PCLK_GATE(spdifout, 17);
+static AUD_PCLK_GATE(resample, 18);
+static AUD_PCLK_GATE(power_detect, 19);
+static AUD_PCLK_GATE(spdifout_b, 21);
/* Audio Master Clocks */
static const char * const mst_mux_parent_names[] = {
- "axg_mst_in0", "axg_mst_in1", "axg_mst_in2", "axg_mst_in3",
- "axg_mst_in4", "axg_mst_in5", "axg_mst_in6", "axg_mst_in7",
+ "aud_mst_in0", "aud_mst_in1", "aud_mst_in2", "aud_mst_in3",
+ "aud_mst_in4", "aud_mst_in5", "aud_mst_in6", "aud_mst_in7",
};
-#define AXG_MST_MUX(_name, _reg, _flag) \
- AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
- mst_mux_parent_names, CLK_SET_RATE_PARENT)
-
-#define AXG_MST_MCLK_MUX(_name, _reg) \
- AXG_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
-
-#define AXG_MST_SYS_MUX(_name, _reg) \
- AXG_MST_MUX(_name, _reg, 0)
-
-static AXG_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AXG_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AXG_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AXG_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AXG_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AXG_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AXG_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AXG_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AXG_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AXG_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-
-#define AXG_MST_DIV(_name, _reg, _flag) \
- AXG_AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
- "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \
-
-#define AXG_MST_MCLK_DIV(_name, _reg) \
- AXG_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
-
-#define AXG_MST_SYS_DIV(_name, _reg) \
- AXG_MST_DIV(_name, _reg, 0)
-
-static AXG_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AXG_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AXG_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AXG_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AXG_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AXG_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AXG_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AXG_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AXG_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AXG_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-
-#define AXG_MST_MCLK_GATE(_name, _reg) \
- AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \
- CLK_SET_RATE_PARENT)
-
-static AXG_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AXG_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AXG_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AXG_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AXG_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AXG_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AXG_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AXG_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AXG_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AXG_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+#define AUD_MST_MUX(_name, _reg, _flag) \
+ AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
+ mst_mux_parent_names, CLK_SET_RATE_PARENT)
+
+#define AUD_MST_MCLK_MUX(_name, _reg) \
+ AUD_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
+
+#define AUD_MST_SYS_MUX(_name, _reg) \
+ AUD_MST_MUX(_name, _reg, 0)
+
+static AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AUD_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AUD_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AUD_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AUD_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AUD_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AUD_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AUD_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static AUD_MST_MCLK_MUX(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+#define AUD_MST_DIV(_name, _reg, _flag) \
+ AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
+ "aud_"#_name"_sel", CLK_SET_RATE_PARENT) \
+
+#define AUD_MST_MCLK_DIV(_name, _reg) \
+ AUD_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
+
+#define AUD_MST_SYS_DIV(_name, _reg) \
+ AUD_MST_DIV(_name, _reg, 0)
+
+static AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AUD_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AUD_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AUD_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AUD_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AUD_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AUD_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AUD_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static AUD_MST_MCLK_DIV(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+#define AUD_MST_MCLK_GATE(_name, _reg) \
+ AUD_GATE(_name, _reg, 31, "aud_"#_name"_div", \
+ CLK_SET_RATE_PARENT)
+
+static AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AUD_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AUD_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AUD_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AUD_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AUD_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AUD_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AUD_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static AUD_MST_MCLK_GATE(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
/* Sample Clocks */
-#define AXG_MST_SCLK_PRE_EN(_name, _reg) \
- AXG_AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
- "axg_mst_"#_name"_mclk", 0)
-
-static AXG_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AXG_AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
+#define AUD_MST_SCLK_PRE_EN(_name, _reg) \
+ AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
+ "aud_mst_"#_name"_mclk", 0)
+
+static AUD_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
_hi_shift, _hi_width, _pname, _iflags) \
-struct clk_regmap axg_##_name = { \
+struct clk_regmap aud_##_name = { \
.data = &(struct meson_sclk_div_data) { \
.div = { \
.reg_off = (_reg), \
@@ -189,7 +193,7 @@ struct clk_regmap axg_##_name = { \
}, \
}, \
.hw.init = &(struct clk_init_data) { \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &meson_sclk_div_ops, \
.parent_names = (const char *[]) { _pname }, \
.num_parents = 1, \
@@ -197,32 +201,32 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_MST_SCLK_DIV(_name, _reg) \
- AXG_AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
- "axg_mst_"#_name"_sclk_pre_en", \
- CLK_SET_RATE_PARENT)
-
-static AXG_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AXG_MST_SCLK_POST_EN(_name, _reg) \
- AXG_AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
- "axg_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT)
-
-static AXG_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AXG_AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
+#define AUD_MST_SCLK_DIV(_name, _reg) \
+ AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
+ "aud_mst_"#_name"_sclk_pre_en", \
+ CLK_SET_RATE_PARENT)
+
+static AUD_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AUD_MST_SCLK_POST_EN(_name, _reg) \
+ AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
+ "aud_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT)
+
+static AUD_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
_pname, _iflags) \
-struct clk_regmap axg_##_name = { \
+struct clk_regmap aud_##_name = { \
.data = &(struct meson_clk_triphase_data) { \
.ph0 = { \
.reg_off = (_reg), \
@@ -241,7 +245,7 @@ struct clk_regmap axg_##_name = { \
}, \
}, \
.hw.init = &(struct clk_init_data) { \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &meson_clk_triphase_ops, \
.parent_names = (const char *[]) { _pname }, \
.num_parents = 1, \
@@ -249,87 +253,87 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_MST_SCLK(_name, _reg) \
- AXG_AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
- "axg_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT)
-
-static AXG_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
-static AXG_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
-static AXG_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
-static AXG_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
-static AXG_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
-static AXG_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
-
-#define AXG_MST_LRCLK_DIV(_name, _reg) \
- AXG_AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
- "axg_mst_"#_name"_sclk_post_en", 0) \
-
-static AXG_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AXG_MST_LRCLK(_name, _reg) \
- AXG_AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
- "axg_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT)
-
-static AXG_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
-static AXG_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
-static AXG_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
-static AXG_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
-static AXG_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
-static AXG_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+#define AUD_MST_SCLK(_name, _reg) \
+ AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
+ "aud_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT)
+
+static AUD_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AUD_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AUD_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AUD_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AUD_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AUD_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+#define AUD_MST_LRCLK_DIV(_name, _reg) \
+ AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
+ "aud_mst_"#_name"_sclk_post_en", 0) \
+
+static AUD_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AUD_MST_LRCLK(_name, _reg) \
+ AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
+ "aud_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT)
+
+static AUD_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AUD_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AUD_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AUD_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AUD_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AUD_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
static const char * const tdm_sclk_parent_names[] = {
- "axg_mst_a_sclk", "axg_mst_b_sclk", "axg_mst_c_sclk",
- "axg_mst_d_sclk", "axg_mst_e_sclk", "axg_mst_f_sclk",
- "axg_slv_sclk0", "axg_slv_sclk1", "axg_slv_sclk2",
- "axg_slv_sclk3", "axg_slv_sclk4", "axg_slv_sclk5",
- "axg_slv_sclk6", "axg_slv_sclk7", "axg_slv_sclk8",
- "axg_slv_sclk9"
+ "aud_mst_a_sclk", "aud_mst_b_sclk", "aud_mst_c_sclk",
+ "aud_mst_d_sclk", "aud_mst_e_sclk", "aud_mst_f_sclk",
+ "aud_slv_sclk0", "aud_slv_sclk1", "aud_slv_sclk2",
+ "aud_slv_sclk3", "aud_slv_sclk4", "aud_slv_sclk5",
+ "aud_slv_sclk6", "aud_slv_sclk7", "aud_slv_sclk8",
+ "aud_slv_sclk9"
};
-#define AXG_TDM_SCLK_MUX(_name, _reg) \
- AXG_AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
+#define AUD_TDM_SCLK_MUX(_name, _reg) \
+ AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
CLK_MUX_ROUND_CLOSEST, \
tdm_sclk_parent_names, 0)
-static AXG_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AXG_TDM_SCLK_PRE_EN(_name, _reg) \
- AXG_AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
- "axg_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT)
-
-static AXG_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AXG_TDM_SCLK_POST_EN(_name, _reg) \
- AXG_AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
- "axg_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT)
-
-static AXG_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AXG_TDM_SCLK(_name, _reg) \
- struct clk_regmap axg_tdm##_name##_sclk = { \
+static AUD_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AUD_TDM_SCLK_PRE_EN(_name, _reg) \
+ AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
+ "aud_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT)
+
+static AUD_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AUD_TDM_SCLK_POST_EN(_name, _reg) \
+ AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
+ "aud_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT)
+
+static AUD_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AUD_TDM_SCLK(_name, _reg) \
+ struct clk_regmap aud_tdm##_name##_sclk = { \
.data = &(struct meson_clk_phase_data) { \
.ph = { \
.reg_off = (_reg), \
@@ -338,44 +342,83 @@ static AXG_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
}, \
}, \
.hw.init = &(struct clk_init_data) { \
- .name = "axg_tdm"#_name"_sclk", \
+ .name = "aud_tdm"#_name"_sclk", \
.ops = &meson_clk_phase_ops, \
.parent_names = (const char *[]) \
- { "axg_tdm"#_name"_sclk_post_en" }, \
+ { "aud_tdm"#_name"_sclk_post_en" }, \
.num_parents = 1, \
.flags = CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT, \
}, \
}
-static AXG_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+static AUD_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
static const char * const tdm_lrclk_parent_names[] = {
- "axg_mst_a_lrclk", "axg_mst_b_lrclk", "axg_mst_c_lrclk",
- "axg_mst_d_lrclk", "axg_mst_e_lrclk", "axg_mst_f_lrclk",
- "axg_slv_lrclk0", "axg_slv_lrclk1", "axg_slv_lrclk2",
- "axg_slv_lrclk3", "axg_slv_lrclk4", "axg_slv_lrclk5",
- "axg_slv_lrclk6", "axg_slv_lrclk7", "axg_slv_lrclk8",
- "axg_slv_lrclk9"
+ "aud_mst_a_lrclk", "aud_mst_b_lrclk", "aud_mst_c_lrclk",
+ "aud_mst_d_lrclk", "aud_mst_e_lrclk", "aud_mst_f_lrclk",
+ "aud_slv_lrclk0", "aud_slv_lrclk1", "aud_slv_lrclk2",
+ "aud_slv_lrclk3", "aud_slv_lrclk4", "aud_slv_lrclk5",
+ "aud_slv_lrclk6", "aud_slv_lrclk7", "aud_slv_lrclk8",
+ "aud_slv_lrclk9"
};
-#define AXG_TDM_LRLCK(_name, _reg) \
- AXG_AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
- CLK_MUX_ROUND_CLOSEST, \
- tdm_lrclk_parent_names, 0)
+#define AUD_TDM_LRLCK(_name, _reg) \
+ AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
+ CLK_MUX_ROUND_CLOSEST, \
+ tdm_lrclk_parent_names, 0)
+
+static AUD_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+/* G12a Pad control */
+#define AUD_TDM_PAD_CTRL(_name, _reg, _shift, _parents) \
+ AUD_MUX(tdm_##_name, _reg, 0x7, _shift, 0, _parents, \
+ CLK_SET_RATE_NO_REPARENT)
+
+static const char * const mclk_pad_ctrl_parent_names[] = {
+ "aud_mst_a_mclk", "aud_mst_b_mclk", "aud_mst_c_mclk",
+ "aud_mst_d_mclk", "aud_mst_e_mclk", "aud_mst_f_mclk",
+};
+
+static AUD_TDM_PAD_CTRL(mclk_pad_0, AUDIO_MST_PAD_CTRL0, 0,
+ mclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(mclk_pad_1, AUDIO_MST_PAD_CTRL0, 4,
+ mclk_pad_ctrl_parent_names);
+
+static const char * const lrclk_pad_ctrl_parent_names[] = {
+ "aud_mst_a_lrclk", "aud_mst_b_lrclk", "aud_mst_c_lrclk",
+ "aud_mst_d_lrclk", "aud_mst_e_lrclk", "aud_mst_f_lrclk",
+};
-static AXG_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+static AUD_TDM_PAD_CTRL(lrclk_pad_0, AUDIO_MST_PAD_CTRL1, 16,
+ lrclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(lrclk_pad_1, AUDIO_MST_PAD_CTRL1, 20,
+ lrclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(lrclk_pad_2, AUDIO_MST_PAD_CTRL1, 24,
+ lrclk_pad_ctrl_parent_names);
+
+static const char * const sclk_pad_ctrl_parent_names[] = {
+ "aud_mst_a_sclk", "aud_mst_b_sclk", "aud_mst_c_sclk",
+ "aud_mst_d_sclk", "aud_mst_e_sclk", "aud_mst_f_sclk",
+};
+
+static AUD_TDM_PAD_CTRL(sclk_pad_0, AUDIO_MST_PAD_CTRL1, 0,
+ sclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(sclk_pad_1, AUDIO_MST_PAD_CTRL1, 4,
+ sclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8,
+ sclk_pad_ctrl_parent_names);
/*
* Array of all clocks provided by this provider
@@ -383,255 +426,416 @@ static AXG_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
*/
static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
.hws = {
- [AUD_CLKID_DDR_ARB] = &axg_ddr_arb.hw,
- [AUD_CLKID_PDM] = &axg_pdm.hw,
- [AUD_CLKID_TDMIN_A] = &axg_tdmin_a.hw,
- [AUD_CLKID_TDMIN_B] = &axg_tdmin_b.hw,
- [AUD_CLKID_TDMIN_C] = &axg_tdmin_c.hw,
- [AUD_CLKID_TDMIN_LB] = &axg_tdmin_lb.hw,
- [AUD_CLKID_TDMOUT_A] = &axg_tdmout_a.hw,
- [AUD_CLKID_TDMOUT_B] = &axg_tdmout_b.hw,
- [AUD_CLKID_TDMOUT_C] = &axg_tdmout_c.hw,
- [AUD_CLKID_FRDDR_A] = &axg_frddr_a.hw,
- [AUD_CLKID_FRDDR_B] = &axg_frddr_b.hw,
- [AUD_CLKID_FRDDR_C] = &axg_frddr_c.hw,
- [AUD_CLKID_TODDR_A] = &axg_toddr_a.hw,
- [AUD_CLKID_TODDR_B] = &axg_toddr_b.hw,
- [AUD_CLKID_TODDR_C] = &axg_toddr_c.hw,
- [AUD_CLKID_LOOPBACK] = &axg_loopback.hw,
- [AUD_CLKID_SPDIFIN] = &axg_spdifin.hw,
- [AUD_CLKID_SPDIFOUT] = &axg_spdifout.hw,
- [AUD_CLKID_RESAMPLE] = &axg_resample.hw,
- [AUD_CLKID_POWER_DETECT] = &axg_power_detect.hw,
- [AUD_CLKID_MST_A_MCLK_SEL] = &axg_mst_a_mclk_sel.hw,
- [AUD_CLKID_MST_B_MCLK_SEL] = &axg_mst_b_mclk_sel.hw,
- [AUD_CLKID_MST_C_MCLK_SEL] = &axg_mst_c_mclk_sel.hw,
- [AUD_CLKID_MST_D_MCLK_SEL] = &axg_mst_d_mclk_sel.hw,
- [AUD_CLKID_MST_E_MCLK_SEL] = &axg_mst_e_mclk_sel.hw,
- [AUD_CLKID_MST_F_MCLK_SEL] = &axg_mst_f_mclk_sel.hw,
- [AUD_CLKID_MST_A_MCLK_DIV] = &axg_mst_a_mclk_div.hw,
- [AUD_CLKID_MST_B_MCLK_DIV] = &axg_mst_b_mclk_div.hw,
- [AUD_CLKID_MST_C_MCLK_DIV] = &axg_mst_c_mclk_div.hw,
- [AUD_CLKID_MST_D_MCLK_DIV] = &axg_mst_d_mclk_div.hw,
- [AUD_CLKID_MST_E_MCLK_DIV] = &axg_mst_e_mclk_div.hw,
- [AUD_CLKID_MST_F_MCLK_DIV] = &axg_mst_f_mclk_div.hw,
- [AUD_CLKID_MST_A_MCLK] = &axg_mst_a_mclk.hw,
- [AUD_CLKID_MST_B_MCLK] = &axg_mst_b_mclk.hw,
- [AUD_CLKID_MST_C_MCLK] = &axg_mst_c_mclk.hw,
- [AUD_CLKID_MST_D_MCLK] = &axg_mst_d_mclk.hw,
- [AUD_CLKID_MST_E_MCLK] = &axg_mst_e_mclk.hw,
- [AUD_CLKID_MST_F_MCLK] = &axg_mst_f_mclk.hw,
- [AUD_CLKID_SPDIFOUT_CLK_SEL] = &axg_spdifout_clk_sel.hw,
- [AUD_CLKID_SPDIFOUT_CLK_DIV] = &axg_spdifout_clk_div.hw,
- [AUD_CLKID_SPDIFOUT_CLK] = &axg_spdifout_clk.hw,
- [AUD_CLKID_SPDIFIN_CLK_SEL] = &axg_spdifin_clk_sel.hw,
- [AUD_CLKID_SPDIFIN_CLK_DIV] = &axg_spdifin_clk_div.hw,
- [AUD_CLKID_SPDIFIN_CLK] = &axg_spdifin_clk.hw,
- [AUD_CLKID_PDM_DCLK_SEL] = &axg_pdm_dclk_sel.hw,
- [AUD_CLKID_PDM_DCLK_DIV] = &axg_pdm_dclk_div.hw,
- [AUD_CLKID_PDM_DCLK] = &axg_pdm_dclk.hw,
- [AUD_CLKID_PDM_SYSCLK_SEL] = &axg_pdm_sysclk_sel.hw,
- [AUD_CLKID_PDM_SYSCLK_DIV] = &axg_pdm_sysclk_div.hw,
- [AUD_CLKID_PDM_SYSCLK] = &axg_pdm_sysclk.hw,
- [AUD_CLKID_MST_A_SCLK_PRE_EN] = &axg_mst_a_sclk_pre_en.hw,
- [AUD_CLKID_MST_B_SCLK_PRE_EN] = &axg_mst_b_sclk_pre_en.hw,
- [AUD_CLKID_MST_C_SCLK_PRE_EN] = &axg_mst_c_sclk_pre_en.hw,
- [AUD_CLKID_MST_D_SCLK_PRE_EN] = &axg_mst_d_sclk_pre_en.hw,
- [AUD_CLKID_MST_E_SCLK_PRE_EN] = &axg_mst_e_sclk_pre_en.hw,
- [AUD_CLKID_MST_F_SCLK_PRE_EN] = &axg_mst_f_sclk_pre_en.hw,
- [AUD_CLKID_MST_A_SCLK_DIV] = &axg_mst_a_sclk_div.hw,
- [AUD_CLKID_MST_B_SCLK_DIV] = &axg_mst_b_sclk_div.hw,
- [AUD_CLKID_MST_C_SCLK_DIV] = &axg_mst_c_sclk_div.hw,
- [AUD_CLKID_MST_D_SCLK_DIV] = &axg_mst_d_sclk_div.hw,
- [AUD_CLKID_MST_E_SCLK_DIV] = &axg_mst_e_sclk_div.hw,
- [AUD_CLKID_MST_F_SCLK_DIV] = &axg_mst_f_sclk_div.hw,
- [AUD_CLKID_MST_A_SCLK_POST_EN] = &axg_mst_a_sclk_post_en.hw,
- [AUD_CLKID_MST_B_SCLK_POST_EN] = &axg_mst_b_sclk_post_en.hw,
- [AUD_CLKID_MST_C_SCLK_POST_EN] = &axg_mst_c_sclk_post_en.hw,
- [AUD_CLKID_MST_D_SCLK_POST_EN] = &axg_mst_d_sclk_post_en.hw,
- [AUD_CLKID_MST_E_SCLK_POST_EN] = &axg_mst_e_sclk_post_en.hw,
- [AUD_CLKID_MST_F_SCLK_POST_EN] = &axg_mst_f_sclk_post_en.hw,
- [AUD_CLKID_MST_A_SCLK] = &axg_mst_a_sclk.hw,
- [AUD_CLKID_MST_B_SCLK] = &axg_mst_b_sclk.hw,
- [AUD_CLKID_MST_C_SCLK] = &axg_mst_c_sclk.hw,
- [AUD_CLKID_MST_D_SCLK] = &axg_mst_d_sclk.hw,
- [AUD_CLKID_MST_E_SCLK] = &axg_mst_e_sclk.hw,
- [AUD_CLKID_MST_F_SCLK] = &axg_mst_f_sclk.hw,
- [AUD_CLKID_MST_A_LRCLK_DIV] = &axg_mst_a_lrclk_div.hw,
- [AUD_CLKID_MST_B_LRCLK_DIV] = &axg_mst_b_lrclk_div.hw,
- [AUD_CLKID_MST_C_LRCLK_DIV] = &axg_mst_c_lrclk_div.hw,
- [AUD_CLKID_MST_D_LRCLK_DIV] = &axg_mst_d_lrclk_div.hw,
- [AUD_CLKID_MST_E_LRCLK_DIV] = &axg_mst_e_lrclk_div.hw,
- [AUD_CLKID_MST_F_LRCLK_DIV] = &axg_mst_f_lrclk_div.hw,
- [AUD_CLKID_MST_A_LRCLK] = &axg_mst_a_lrclk.hw,
- [AUD_CLKID_MST_B_LRCLK] = &axg_mst_b_lrclk.hw,
- [AUD_CLKID_MST_C_LRCLK] = &axg_mst_c_lrclk.hw,
- [AUD_CLKID_MST_D_LRCLK] = &axg_mst_d_lrclk.hw,
- [AUD_CLKID_MST_E_LRCLK] = &axg_mst_e_lrclk.hw,
- [AUD_CLKID_MST_F_LRCLK] = &axg_mst_f_lrclk.hw,
- [AUD_CLKID_TDMIN_A_SCLK_SEL] = &axg_tdmin_a_sclk_sel.hw,
- [AUD_CLKID_TDMIN_B_SCLK_SEL] = &axg_tdmin_b_sclk_sel.hw,
- [AUD_CLKID_TDMIN_C_SCLK_SEL] = &axg_tdmin_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &axg_tdmin_lb_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &axg_tdmout_a_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &axg_tdmout_b_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &axg_tdmout_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &axg_tdmin_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &axg_tdmin_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &axg_tdmin_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &axg_tdmin_lb_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &axg_tdmout_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &axg_tdmout_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &axg_tdmout_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &axg_tdmin_a_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &axg_tdmin_b_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &axg_tdmin_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &axg_tdmin_lb_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &axg_tdmout_a_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &axg_tdmout_b_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &axg_tdmout_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK] = &axg_tdmin_a_sclk.hw,
- [AUD_CLKID_TDMIN_B_SCLK] = &axg_tdmin_b_sclk.hw,
- [AUD_CLKID_TDMIN_C_SCLK] = &axg_tdmin_c_sclk.hw,
- [AUD_CLKID_TDMIN_LB_SCLK] = &axg_tdmin_lb_sclk.hw,
- [AUD_CLKID_TDMOUT_A_SCLK] = &axg_tdmout_a_sclk.hw,
- [AUD_CLKID_TDMOUT_B_SCLK] = &axg_tdmout_b_sclk.hw,
- [AUD_CLKID_TDMOUT_C_SCLK] = &axg_tdmout_c_sclk.hw,
- [AUD_CLKID_TDMIN_A_LRCLK] = &axg_tdmin_a_lrclk.hw,
- [AUD_CLKID_TDMIN_B_LRCLK] = &axg_tdmin_b_lrclk.hw,
- [AUD_CLKID_TDMIN_C_LRCLK] = &axg_tdmin_c_lrclk.hw,
- [AUD_CLKID_TDMIN_LB_LRCLK] = &axg_tdmin_lb_lrclk.hw,
- [AUD_CLKID_TDMOUT_A_LRCLK] = &axg_tdmout_a_lrclk.hw,
- [AUD_CLKID_TDMOUT_B_LRCLK] = &axg_tdmout_b_lrclk.hw,
- [AUD_CLKID_TDMOUT_C_LRCLK] = &axg_tdmout_c_lrclk.hw,
+ [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw,
+ [AUD_CLKID_PDM] = &aud_pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &aud_loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &aud_resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
-/* Convenience table to populate regmap in .probe() */
-static struct clk_regmap *const axg_audio_clk_regmaps[] = {
- &axg_ddr_arb,
- &axg_pdm,
- &axg_tdmin_a,
- &axg_tdmin_b,
- &axg_tdmin_c,
- &axg_tdmin_lb,
- &axg_tdmout_a,
- &axg_tdmout_b,
- &axg_tdmout_c,
- &axg_frddr_a,
- &axg_frddr_b,
- &axg_frddr_c,
- &axg_toddr_a,
- &axg_toddr_b,
- &axg_toddr_c,
- &axg_loopback,
- &axg_spdifin,
- &axg_spdifout,
- &axg_resample,
- &axg_power_detect,
- &axg_mst_a_mclk_sel,
- &axg_mst_b_mclk_sel,
- &axg_mst_c_mclk_sel,
- &axg_mst_d_mclk_sel,
- &axg_mst_e_mclk_sel,
- &axg_mst_f_mclk_sel,
- &axg_mst_a_mclk_div,
- &axg_mst_b_mclk_div,
- &axg_mst_c_mclk_div,
- &axg_mst_d_mclk_div,
- &axg_mst_e_mclk_div,
- &axg_mst_f_mclk_div,
- &axg_mst_a_mclk,
- &axg_mst_b_mclk,
- &axg_mst_c_mclk,
- &axg_mst_d_mclk,
- &axg_mst_e_mclk,
- &axg_mst_f_mclk,
- &axg_spdifout_clk_sel,
- &axg_spdifout_clk_div,
- &axg_spdifout_clk,
- &axg_spdifin_clk_sel,
- &axg_spdifin_clk_div,
- &axg_spdifin_clk,
- &axg_pdm_dclk_sel,
- &axg_pdm_dclk_div,
- &axg_pdm_dclk,
- &axg_pdm_sysclk_sel,
- &axg_pdm_sysclk_div,
- &axg_pdm_sysclk,
- &axg_mst_a_sclk_pre_en,
- &axg_mst_b_sclk_pre_en,
- &axg_mst_c_sclk_pre_en,
- &axg_mst_d_sclk_pre_en,
- &axg_mst_e_sclk_pre_en,
- &axg_mst_f_sclk_pre_en,
- &axg_mst_a_sclk_div,
- &axg_mst_b_sclk_div,
- &axg_mst_c_sclk_div,
- &axg_mst_d_sclk_div,
- &axg_mst_e_sclk_div,
- &axg_mst_f_sclk_div,
- &axg_mst_a_sclk_post_en,
- &axg_mst_b_sclk_post_en,
- &axg_mst_c_sclk_post_en,
- &axg_mst_d_sclk_post_en,
- &axg_mst_e_sclk_post_en,
- &axg_mst_f_sclk_post_en,
- &axg_mst_a_sclk,
- &axg_mst_b_sclk,
- &axg_mst_c_sclk,
- &axg_mst_d_sclk,
- &axg_mst_e_sclk,
- &axg_mst_f_sclk,
- &axg_mst_a_lrclk_div,
- &axg_mst_b_lrclk_div,
- &axg_mst_c_lrclk_div,
- &axg_mst_d_lrclk_div,
- &axg_mst_e_lrclk_div,
- &axg_mst_f_lrclk_div,
- &axg_mst_a_lrclk,
- &axg_mst_b_lrclk,
- &axg_mst_c_lrclk,
- &axg_mst_d_lrclk,
- &axg_mst_e_lrclk,
- &axg_mst_f_lrclk,
- &axg_tdmin_a_sclk_sel,
- &axg_tdmin_b_sclk_sel,
- &axg_tdmin_c_sclk_sel,
- &axg_tdmin_lb_sclk_sel,
- &axg_tdmout_a_sclk_sel,
- &axg_tdmout_b_sclk_sel,
- &axg_tdmout_c_sclk_sel,
- &axg_tdmin_a_sclk_pre_en,
- &axg_tdmin_b_sclk_pre_en,
- &axg_tdmin_c_sclk_pre_en,
- &axg_tdmin_lb_sclk_pre_en,
- &axg_tdmout_a_sclk_pre_en,
- &axg_tdmout_b_sclk_pre_en,
- &axg_tdmout_c_sclk_pre_en,
- &axg_tdmin_a_sclk_post_en,
- &axg_tdmin_b_sclk_post_en,
- &axg_tdmin_c_sclk_post_en,
- &axg_tdmin_lb_sclk_post_en,
- &axg_tdmout_a_sclk_post_en,
- &axg_tdmout_b_sclk_post_en,
- &axg_tdmout_c_sclk_post_en,
- &axg_tdmin_a_sclk,
- &axg_tdmin_b_sclk,
- &axg_tdmin_c_sclk,
- &axg_tdmin_lb_sclk,
- &axg_tdmout_a_sclk,
- &axg_tdmout_b_sclk,
- &axg_tdmout_c_sclk,
- &axg_tdmin_a_lrclk,
- &axg_tdmin_b_lrclk,
- &axg_tdmin_c_lrclk,
- &axg_tdmin_lb_lrclk,
- &axg_tdmout_a_lrclk,
- &axg_tdmout_b_lrclk,
- &axg_tdmout_c_lrclk,
+/*
+ * Array of all G12A clocks provided by this provider
+ * The input clocks of the controller will be populated at runtime
+ */
+static struct clk_hw_onecell_data g12a_audio_hw_onecell_data = {
+ .hws = {
+ [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw,
+ [AUD_CLKID_PDM] = &aud_pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &aud_loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &aud_resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw,
+ [AUD_CLKID_SPDIFOUT_B] = &aud_spdifout_b.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_SEL] = &aud_spdifout_b_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_DIV] = &aud_spdifout_b_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK] = &aud_spdifout_b_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw,
+ [AUD_CLKID_TDM_MCLK_PAD0] = &aud_tdm_mclk_pad_0.hw,
+ [AUD_CLKID_TDM_MCLK_PAD1] = &aud_tdm_mclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD0] = &aud_tdm_lrclk_pad_0.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD1] = &aud_tdm_lrclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD2] = &aud_tdm_lrclk_pad_2.hw,
+ [AUD_CLKID_TDM_SCLK_PAD0] = &aud_tdm_sclk_pad_0.hw,
+ [AUD_CLKID_TDM_SCLK_PAD1] = &aud_tdm_sclk_pad_1.hw,
+ [AUD_CLKID_TDM_SCLK_PAD2] = &aud_tdm_sclk_pad_2.hw,
+ [NR_CLKS] = NULL,
+ },
+ .num = NR_CLKS,
+};
+
+/* Convenience table to populate regmap in .probe()
+ * Note that this table is shared between both AXG and G12A,
+ * with spdifout_b clocks being exclusive to G12A. Since those
+ * clocks are not declared within the AXG onecell table, we do not
+ * feel the need to have separate AXG/G12A regmap tables.
+ */
+static struct clk_regmap *const aud_clk_regmaps[] = {
+ &aud_ddr_arb,
+ &aud_pdm,
+ &aud_tdmin_a,
+ &aud_tdmin_b,
+ &aud_tdmin_c,
+ &aud_tdmin_lb,
+ &aud_tdmout_a,
+ &aud_tdmout_b,
+ &aud_tdmout_c,
+ &aud_frddr_a,
+ &aud_frddr_b,
+ &aud_frddr_c,
+ &aud_toddr_a,
+ &aud_toddr_b,
+ &aud_toddr_c,
+ &aud_loopback,
+ &aud_spdifin,
+ &aud_spdifout,
+ &aud_resample,
+ &aud_power_detect,
+ &aud_spdifout_b,
+ &aud_mst_a_mclk_sel,
+ &aud_mst_b_mclk_sel,
+ &aud_mst_c_mclk_sel,
+ &aud_mst_d_mclk_sel,
+ &aud_mst_e_mclk_sel,
+ &aud_mst_f_mclk_sel,
+ &aud_mst_a_mclk_div,
+ &aud_mst_b_mclk_div,
+ &aud_mst_c_mclk_div,
+ &aud_mst_d_mclk_div,
+ &aud_mst_e_mclk_div,
+ &aud_mst_f_mclk_div,
+ &aud_mst_a_mclk,
+ &aud_mst_b_mclk,
+ &aud_mst_c_mclk,
+ &aud_mst_d_mclk,
+ &aud_mst_e_mclk,
+ &aud_mst_f_mclk,
+ &aud_spdifout_clk_sel,
+ &aud_spdifout_clk_div,
+ &aud_spdifout_clk,
+ &aud_spdifin_clk_sel,
+ &aud_spdifin_clk_div,
+ &aud_spdifin_clk,
+ &aud_pdm_dclk_sel,
+ &aud_pdm_dclk_div,
+ &aud_pdm_dclk,
+ &aud_pdm_sysclk_sel,
+ &aud_pdm_sysclk_div,
+ &aud_pdm_sysclk,
+ &aud_mst_a_sclk_pre_en,
+ &aud_mst_b_sclk_pre_en,
+ &aud_mst_c_sclk_pre_en,
+ &aud_mst_d_sclk_pre_en,
+ &aud_mst_e_sclk_pre_en,
+ &aud_mst_f_sclk_pre_en,
+ &aud_mst_a_sclk_div,
+ &aud_mst_b_sclk_div,
+ &aud_mst_c_sclk_div,
+ &aud_mst_d_sclk_div,
+ &aud_mst_e_sclk_div,
+ &aud_mst_f_sclk_div,
+ &aud_mst_a_sclk_post_en,
+ &aud_mst_b_sclk_post_en,
+ &aud_mst_c_sclk_post_en,
+ &aud_mst_d_sclk_post_en,
+ &aud_mst_e_sclk_post_en,
+ &aud_mst_f_sclk_post_en,
+ &aud_mst_a_sclk,
+ &aud_mst_b_sclk,
+ &aud_mst_c_sclk,
+ &aud_mst_d_sclk,
+ &aud_mst_e_sclk,
+ &aud_mst_f_sclk,
+ &aud_mst_a_lrclk_div,
+ &aud_mst_b_lrclk_div,
+ &aud_mst_c_lrclk_div,
+ &aud_mst_d_lrclk_div,
+ &aud_mst_e_lrclk_div,
+ &aud_mst_f_lrclk_div,
+ &aud_mst_a_lrclk,
+ &aud_mst_b_lrclk,
+ &aud_mst_c_lrclk,
+ &aud_mst_d_lrclk,
+ &aud_mst_e_lrclk,
+ &aud_mst_f_lrclk,
+ &aud_tdmin_a_sclk_sel,
+ &aud_tdmin_b_sclk_sel,
+ &aud_tdmin_c_sclk_sel,
+ &aud_tdmin_lb_sclk_sel,
+ &aud_tdmout_a_sclk_sel,
+ &aud_tdmout_b_sclk_sel,
+ &aud_tdmout_c_sclk_sel,
+ &aud_tdmin_a_sclk_pre_en,
+ &aud_tdmin_b_sclk_pre_en,
+ &aud_tdmin_c_sclk_pre_en,
+ &aud_tdmin_lb_sclk_pre_en,
+ &aud_tdmout_a_sclk_pre_en,
+ &aud_tdmout_b_sclk_pre_en,
+ &aud_tdmout_c_sclk_pre_en,
+ &aud_tdmin_a_sclk_post_en,
+ &aud_tdmin_b_sclk_post_en,
+ &aud_tdmin_c_sclk_post_en,
+ &aud_tdmin_lb_sclk_post_en,
+ &aud_tdmout_a_sclk_post_en,
+ &aud_tdmout_b_sclk_post_en,
+ &aud_tdmout_c_sclk_post_en,
+ &aud_tdmin_a_sclk,
+ &aud_tdmin_b_sclk,
+ &aud_tdmin_c_sclk,
+ &aud_tdmin_lb_sclk,
+ &aud_tdmout_a_sclk,
+ &aud_tdmout_b_sclk,
+ &aud_tdmout_c_sclk,
+ &aud_tdmin_a_lrclk,
+ &aud_tdmin_b_lrclk,
+ &aud_tdmin_c_lrclk,
+ &aud_tdmin_lb_lrclk,
+ &aud_tdmout_a_lrclk,
+ &aud_tdmout_b_lrclk,
+ &aud_tdmout_c_lrclk,
+ &aud_spdifout_b_clk_sel,
+ &aud_spdifout_b_clk_div,
+ &aud_spdifout_b_clk,
+ &aud_tdm_mclk_pad_0,
+ &aud_tdm_mclk_pad_1,
+ &aud_tdm_lrclk_pad_0,
+ &aud_tdm_lrclk_pad_1,
+ &aud_tdm_lrclk_pad_2,
+ &aud_tdm_sclk_pad_0,
+ &aud_tdm_sclk_pad_1,
+ &aud_tdm_sclk_pad_2,
};
static int devm_clk_get_enable(struct device *dev, char *id)
@@ -665,14 +869,13 @@ static int devm_clk_get_enable(struct device *dev, char *id)
}
static int axg_register_clk_hw_input(struct device *dev,
- const char *name,
- unsigned int clkid)
+ const char *name)
{
char *clk_name;
struct clk_hw *hw;
int err = 0;
- clk_name = kasprintf(GFP_KERNEL, "axg_%s", name);
+ clk_name = kasprintf(GFP_KERNEL, "aud_%s", name);
if (!clk_name)
return -ENOMEM;
@@ -686,8 +889,6 @@ static int axg_register_clk_hw_input(struct device *dev,
if (err != -EPROBE_DEFER)
dev_err(dev, "failed to get %s clock", name);
}
- } else {
- axg_audio_hw_onecell_data.hws[clkid] = hw;
}
kfree(clk_name);
@@ -696,8 +897,7 @@ static int axg_register_clk_hw_input(struct device *dev,
static int axg_register_clk_hw_inputs(struct device *dev,
const char *basename,
- unsigned int count,
- unsigned int clkid)
+ unsigned int count)
{
char *name;
int i, ret;
@@ -707,7 +907,7 @@ static int axg_register_clk_hw_inputs(struct device *dev,
if (!name)
return -ENOMEM;
- ret = axg_register_clk_hw_input(dev, name, clkid + i);
+ ret = axg_register_clk_hw_input(dev, name);
kfree(name);
if (ret)
return ret;
@@ -723,15 +923,24 @@ static const struct regmap_config axg_audio_regmap_cfg = {
.max_register = AUDIO_CLK_PDMIN_CTRL1,
};
+struct audioclk_data {
+ struct clk_hw_onecell_data *hw_onecell_data;
+};
+
static int axg_audio_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct audioclk_data *data;
struct regmap *map;
struct resource *res;
void __iomem *regs;
struct clk_hw *hw;
int ret, i;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
if (IS_ERR(regs))
@@ -755,40 +964,35 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
/* Register the peripheral input clock */
- hw = meson_clk_hw_register_input(dev, "pclk", "axg_audio_pclk", 0);
+ hw = meson_clk_hw_register_input(dev, "pclk", "audio_pclk", 0);
if (IS_ERR(hw))
return PTR_ERR(hw);
- axg_audio_hw_onecell_data.hws[AUD_CLKID_PCLK] = hw;
-
/* Register optional input master clocks */
ret = axg_register_clk_hw_inputs(dev, "mst_in",
- AXG_MST_IN_COUNT,
- AUD_CLKID_MST0);
+ AUD_MST_IN_COUNT);
if (ret)
return ret;
/* Register optional input slave sclks */
ret = axg_register_clk_hw_inputs(dev, "slv_sclk",
- AXG_SLV_SCLK_COUNT,
- AUD_CLKID_SLV_SCLK0);
+ AUD_SLV_SCLK_COUNT);
if (ret)
return ret;
/* Register optional input slave lrclks */
ret = axg_register_clk_hw_inputs(dev, "slv_lrclk",
- AXG_SLV_LRCLK_COUNT,
- AUD_CLKID_SLV_LRCLK0);
+ AUD_SLV_LRCLK_COUNT);
if (ret)
return ret;
/* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(axg_audio_clk_regmaps); i++)
- axg_audio_clk_regmaps[i]->map = map;
+ for (i = 0; i < ARRAY_SIZE(aud_clk_regmaps); i++)
+ aud_clk_regmaps[i]->map = map;
/* Take care to skip the registered input clocks */
- for (i = AUD_CLKID_DDR_ARB; i < axg_audio_hw_onecell_data.num; i++) {
- hw = axg_audio_hw_onecell_data.hws[i];
+ for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) {
+ hw = data->hw_onecell_data->hws[i];
/* array might be sparse */
if (!hw)
continue;
@@ -802,12 +1006,25 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- &axg_audio_hw_onecell_data);
+ data->hw_onecell_data);
}
+static const struct audioclk_data axg_audioclk_data = {
+ .hw_onecell_data = &axg_audio_hw_onecell_data,
+};
+
+static const struct audioclk_data g12a_audioclk_data = {
+ .hw_onecell_data = &g12a_audio_hw_onecell_data,
+};
+
static const struct of_device_id clkc_match_table[] = {
- { .compatible = "amlogic,axg-audio-clkc" },
- {}
+ {
+ .compatible = "amlogic,axg-audio-clkc",
+ .data = &axg_audioclk_data
+ }, {
+ .compatible = "amlogic,g12a-audio-clkc",
+ .data = &g12a_audioclk_data
+ }, {}
};
MODULE_DEVICE_TABLE(of, clkc_match_table);
@@ -820,6 +1037,6 @@ static struct platform_driver axg_audio_driver = {
};
module_platform_driver(axg_audio_driver);
-MODULE_DESCRIPTION("Amlogic A113x Audio Clock driver");
+MODULE_DESCRIPTION("Amlogic AXG/G12A Audio Clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
index 7191b39c9d65..5d972d55d6c7 100644
--- a/drivers/clk/meson/axg-audio.h
+++ b/drivers/clk/meson/axg-audio.h
@@ -20,6 +20,8 @@
#define AUDIO_MCLK_D_CTRL 0x010
#define AUDIO_MCLK_E_CTRL 0x014
#define AUDIO_MCLK_F_CTRL 0x018
+#define AUDIO_MST_PAD_CTRL0 0x01c
+#define AUDIO_MST_PAD_CTRL1 0x020
#define AUDIO_MST_A_SCLK_CTRL0 0x040
#define AUDIO_MST_A_SCLK_CTRL1 0x044
#define AUDIO_MST_B_SCLK_CTRL0 0x048
@@ -45,21 +47,13 @@
#define AUDIO_CLK_LOCKER_CTRL 0x0A8
#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
+#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
/*
* CLKID index values
* These indices are entirely contrived and do not map onto the hardware.
*/
-#define AUD_CLKID_PCLK 0
-#define AUD_CLKID_MST0 1
-#define AUD_CLKID_MST1 2
-#define AUD_CLKID_MST2 3
-#define AUD_CLKID_MST3 4
-#define AUD_CLKID_MST4 5
-#define AUD_CLKID_MST5 6
-#define AUD_CLKID_MST6 7
-#define AUD_CLKID_MST7 8
#define AUD_CLKID_MST_A_MCLK_SEL 59
#define AUD_CLKID_MST_B_MCLK_SEL 60
#define AUD_CLKID_MST_C_MCLK_SEL 61
@@ -118,10 +112,12 @@
#define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148
#define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149
#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
+#define AUD_CLKID_SPDIFOUT_B_CLK_SEL 153
+#define AUD_CLKID_SPDIFOUT_B_CLK_DIV 154
/* include the CLKIDs which are part of the DT bindings */
#include <dt-bindings/clock/axg-audio-clkc.h>
-#define NR_CLKS 151
+#define NR_CLKS 163
#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 41e16dd7272a..ddb1e5634739 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -120,7 +120,7 @@ static bool meson_clk_pll_is_better(unsigned long rate,
return true;
} else {
/* Round down */
- if (now < rate && best < now)
+ if (now <= rate && best < now)
return true;
}
@@ -303,6 +303,16 @@ static int meson_clk_pll_is_enabled(struct clk_hw *hw)
return 1;
}
+static int meson_clk_pcie_pll_enable(struct clk_hw *hw)
+{
+ meson_clk_pll_init(hw);
+
+ if (meson_clk_pll_wait_lock(hw))
+ return -EIO;
+
+ return 0;
+}
+
static int meson_clk_pll_enable(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
@@ -387,6 +397,22 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+/*
+ * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
+ * 100MHz reference clock for the PCIe Analog PHY, and thus requires
+ * a strict register sequence to enable the PLL.
+ * To simplify, re-use the _init() op to enable the PLL and keep
+ * the other ops except set_rate since the rate is fixed.
+ */
+const struct clk_ops meson_clk_pcie_pll_ops = {
+ .recalc_rate = meson_clk_pll_recalc_rate,
+ .round_rate = meson_clk_pll_round_rate,
+ .is_enabled = meson_clk_pll_is_enabled,
+ .enable = meson_clk_pcie_pll_enable,
+ .disable = meson_clk_pll_disable
+};
+EXPORT_SYMBOL_GPL(meson_clk_pcie_pll_ops);
+
const struct clk_ops meson_clk_pll_ops = {
.init = meson_clk_pll_init,
.recalc_rate = meson_clk_pll_recalc_rate,
diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h
index 55af2e285b1b..367efd0f6410 100644
--- a/drivers/clk/meson/clk-pll.h
+++ b/drivers/clk/meson/clk-pll.h
@@ -45,5 +45,6 @@ struct meson_clk_pll_data {
extern const struct clk_ops meson_clk_pll_ro_ops;
extern const struct clk_ops meson_clk_pll_ops;
+extern const struct clk_ops meson_clk_pcie_pll_ops;
#endif /* __MESON_CLK_PLL_H */
diff --git a/drivers/clk/meson/g12a-aoclk.h b/drivers/clk/meson/g12a-aoclk.h
index 04b0d5506641..a67c8a7cd7c4 100644
--- a/drivers/clk/meson/g12a-aoclk.h
+++ b/drivers/clk/meson/g12a-aoclk.h
@@ -16,9 +16,7 @@
* to expose, such as the internal muxes and dividers of composite clocks,
* will remain defined here.
*/
-#define CLKID_AO_SAR_ADC_SEL 16
#define CLKID_AO_SAR_ADC_DIV 17
-#define CLKID_AO_CTS_OSCIN 19
#define CLKID_AO_32K_PRE 20
#define CLKID_AO_32K_DIV 21
#define CLKID_AO_32K_SEL 22
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 0e1ce8c03259..739f64fdf1e3 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -150,6 +150,318 @@ static struct clk_regmap g12a_sys_pll = {
},
};
+static struct clk_regmap g12a_sys_pll_div16_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sys_pll_div16_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "sys_pll" },
+ .num_parents = 1,
+ /*
+ * This clock is used to debug the sys_pll range
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_fixed_factor g12a_sys_pll_div16 = {
+ .mult = 1,
+ .div = 16,
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll_div16",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "sys_pll_div16_en" },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "premux0" */
+static struct clk_regmap g12a_cpu_clk_premux0 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 0,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn0_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "fclk_div2",
+ "fclk_div3" },
+ .num_parents = 3,
+ },
+};
+
+/* Datasheet names this field as "mux0_divn_tcnt" */
+static struct clk_regmap g12a_cpu_clk_mux0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .shift = 4,
+ .width = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn0_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn0_sel" },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "postmux0" */
+static struct clk_regmap g12a_cpu_clk_postmux0 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn0",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn0_sel",
+ "cpu_clk_dyn0_div" },
+ .num_parents = 2,
+ },
+};
+
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap g12a_cpu_clk_premux1 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "fclk_div2",
+ "fclk_div3" },
+ .num_parents = 3,
+ },
+};
+
+/* Datasheet names this field as "Mux1_divn_tcnt" */
+static struct clk_regmap g12a_cpu_clk_mux1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .shift = 20,
+ .width = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn1_sel" },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "postmux1" */
+static struct clk_regmap g12a_cpu_clk_postmux1 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 18,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn1_sel",
+ "cpu_clk_dyn1_div" },
+ .num_parents = 2,
+ },
+};
+
+/* Datasheet names this field as "Final_dyn_mux_sel" */
+static struct clk_regmap g12a_cpu_clk_dyn = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 10,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn0",
+ "cpu_clk_dyn1" },
+ .num_parents = 2,
+ },
+};
+
+/* Datasheet names this field as "Final_mux_sel" */
+static struct clk_regmap g12a_cpu_clk = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn",
+ "sys_pll" },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_div16_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_div16_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ /*
+ * This clock is used to debug the cpu_clk range
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_fixed_factor g12a_cpu_clk_div16 = {
+ .mult = 1,
+ .div = 16,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_div16",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_clk_div16_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_apb_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 3,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_apb_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_apb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_apb",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_apb_div" },
+ .num_parents = 1,
+ /*
+ * This clock is set by the ROM monitor code,
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_atb_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 6,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_atb_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_atb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 17,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_atb",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_atb_div" },
+ .num_parents = 1,
+ /*
+ * This clock is set by the ROM monitor code,
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_axi_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 9,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_axi_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_axi = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 18,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_axi",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_axi_div" },
+ .num_parents = 1,
+ /*
+ * This clock is set by the ROM monitor code,
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_trace_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 20,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_trace_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_trace = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_trace",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_trace_div" },
+ .num_parents = 1,
+ /*
+ * This clock is set by the ROM monitor code,
+ * Linux should not change it at runtime
+ */
+ },
+};
+
static const struct pll_mult_range g12a_gp0_pll_mult_range = {
.min = 55,
.max = 255,
@@ -302,6 +614,118 @@ static struct clk_regmap g12a_hifi_pll = {
},
};
+/*
+ * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
+ * 100MHz reference clock for the PCIe Analog PHY, and thus requires
+ * a strict register sequence to enable the PLL.
+ */
+static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
+ { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
+ { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
+};
+
+/* Keep a single entry table for recalc/round_rate() ops */
+static const struct pll_params_table g12a_pcie_pll_table[] = {
+ PLL_PARAMS(150, 1),
+ {0, 0},
+};
+
+static struct clk_regmap g12a_pcie_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_PCIE_PLL_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = g12a_pcie_pll_table,
+ .init_regs = g12a_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco",
+ .ops = &meson_clk_pcie_pll_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "pcie_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_pcie_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_PCIE_PLL_CNTL0,
+ .shift = 16,
+ .width = 5,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST |
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_od",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "pcie_pll_dco_div2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_pll",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "pcie_pll_od" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap g12a_hdmi_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -960,14 +1384,14 @@ static struct clk_regmap g12a_sd_emmc_c_clk0 = {
/* VPU Clock */
static const char * const g12a_vpu_parent_names[] = {
- "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
+ "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
"mpll1", "vid_pll", "hifi_pll", "gp0_pll",
};
static struct clk_regmap g12a_vpu_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
- .mask = 0x3,
+ .mask = 0x7,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
@@ -1011,7 +1435,7 @@ static struct clk_regmap g12a_vpu_0 = {
static struct clk_regmap g12a_vpu_1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
- .mask = 0x3,
+ .mask = 0x7,
.shift = 25,
},
.hw.init = &(struct clk_init_data){
@@ -1071,6 +1495,151 @@ static struct clk_regmap g12a_vpu = {
},
};
+/* VDEC clocks */
+
+static const char * const g12a_vdec_parent_names[] = {
+ "fclk_div2p5", "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
+ "hifi_pll", "gp0_pll",
+};
+
+static struct clk_regmap g12a_vdec_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevcf_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevcf_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevcf_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevcf_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_hevcf_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevcf = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_hevcf",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_hevcf_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevc_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevc_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevc = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_hevc",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* VAPB Clock */
static const char * const g12a_vapb_parent_names[] = {
@@ -2167,6 +2736,39 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_MALI] = &g12a_mali.hw,
[CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw,
[CLKID_MPLL_5OM] = &g12a_mpll_50m.hw,
+ [CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
+ [CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
+ [CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
+ [CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
+ [CLKID_CPU_CLK_DIV16] = &g12a_cpu_clk_div16.hw,
+ [CLKID_CPU_CLK_APB_DIV] = &g12a_cpu_clk_apb_div.hw,
+ [CLKID_CPU_CLK_APB] = &g12a_cpu_clk_apb.hw,
+ [CLKID_CPU_CLK_ATB_DIV] = &g12a_cpu_clk_atb_div.hw,
+ [CLKID_CPU_CLK_ATB] = &g12a_cpu_clk_atb.hw,
+ [CLKID_CPU_CLK_AXI_DIV] = &g12a_cpu_clk_axi_div.hw,
+ [CLKID_CPU_CLK_AXI] = &g12a_cpu_clk_axi.hw,
+ [CLKID_CPU_CLK_TRACE_DIV] = &g12a_cpu_clk_trace_div.hw,
+ [CLKID_CPU_CLK_TRACE] = &g12a_cpu_clk_trace.hw,
+ [CLKID_PCIE_PLL_DCO] = &g12a_pcie_pll_dco.hw,
+ [CLKID_PCIE_PLL_DCO_DIV2] = &g12a_pcie_pll_dco_div2.hw,
+ [CLKID_PCIE_PLL_OD] = &g12a_pcie_pll_od.hw,
+ [CLKID_PCIE_PLL] = &g12a_pcie_pll.hw,
+ [CLKID_VDEC_1_SEL] = &g12a_vdec_1_sel.hw,
+ [CLKID_VDEC_1_DIV] = &g12a_vdec_1_div.hw,
+ [CLKID_VDEC_1] = &g12a_vdec_1.hw,
+ [CLKID_VDEC_HEVC_SEL] = &g12a_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &g12a_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC] = &g12a_vdec_hevc.hw,
+ [CLKID_VDEC_HEVCF_SEL] = &g12a_vdec_hevcf_sel.hw,
+ [CLKID_VDEC_HEVCF_DIV] = &g12a_vdec_hevcf_div.hw,
+ [CLKID_VDEC_HEVCF] = &g12a_vdec_hevcf.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2335,6 +2937,35 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&g12a_mali_1,
&g12a_mali,
&g12a_mpll_50m,
+ &g12a_sys_pll_div16_en,
+ &g12a_cpu_clk_premux0,
+ &g12a_cpu_clk_mux0_div,
+ &g12a_cpu_clk_postmux0,
+ &g12a_cpu_clk_premux1,
+ &g12a_cpu_clk_mux1_div,
+ &g12a_cpu_clk_postmux1,
+ &g12a_cpu_clk_dyn,
+ &g12a_cpu_clk,
+ &g12a_cpu_clk_div16_en,
+ &g12a_cpu_clk_apb_div,
+ &g12a_cpu_clk_apb,
+ &g12a_cpu_clk_atb_div,
+ &g12a_cpu_clk_atb,
+ &g12a_cpu_clk_axi_div,
+ &g12a_cpu_clk_axi,
+ &g12a_cpu_clk_trace_div,
+ &g12a_cpu_clk_trace,
+ &g12a_pcie_pll_od,
+ &g12a_pcie_pll_dco,
+ &g12a_vdec_1_sel,
+ &g12a_vdec_1_div,
+ &g12a_vdec_1,
+ &g12a_vdec_hevc_sel,
+ &g12a_vdec_hevc_div,
+ &g12a_vdec_hevc,
+ &g12a_vdec_hevcf_sel,
+ &g12a_vdec_hevcf_div,
+ &g12a_vdec_hevcf,
};
static const struct meson_eeclkc_data g12a_clkc_data = {
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index f399dfe1401c..39c41af70804 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -50,6 +50,7 @@
#define HHI_GCLK_MPEG2 0x148
#define HHI_GCLK_OTHER 0x150
#define HHI_GCLK_OTHER2 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
#define HHI_VID_CLK_DIV 0x164
#define HHI_MPEG_CLK_CNTL 0x174
#define HHI_AUD_CLK_CNTL 0x178
@@ -166,8 +167,36 @@
#define CLKID_MALI_0_DIV 170
#define CLKID_MALI_1_DIV 173
#define CLKID_MPLL_5OM_DIV 176
+#define CLKID_SYS_PLL_DIV16_EN 178
+#define CLKID_SYS_PLL_DIV16 179
+#define CLKID_CPU_CLK_DYN0_SEL 180
+#define CLKID_CPU_CLK_DYN0_DIV 181
+#define CLKID_CPU_CLK_DYN0 182
+#define CLKID_CPU_CLK_DYN1_SEL 183
+#define CLKID_CPU_CLK_DYN1_DIV 184
+#define CLKID_CPU_CLK_DYN1 185
+#define CLKID_CPU_CLK_DYN 186
+#define CLKID_CPU_CLK_DIV16_EN 188
+#define CLKID_CPU_CLK_DIV16 189
+#define CLKID_CPU_CLK_APB_DIV 190
+#define CLKID_CPU_CLK_APB 191
+#define CLKID_CPU_CLK_ATB_DIV 192
+#define CLKID_CPU_CLK_ATB 193
+#define CLKID_CPU_CLK_AXI_DIV 194
+#define CLKID_CPU_CLK_AXI 195
+#define CLKID_CPU_CLK_TRACE_DIV 196
+#define CLKID_CPU_CLK_TRACE 197
+#define CLKID_PCIE_PLL_DCO 198
+#define CLKID_PCIE_PLL_DCO_DIV2 199
+#define CLKID_PCIE_PLL_OD 200
+#define CLKID_VDEC_1_SEL 202
+#define CLKID_VDEC_1_DIV 203
+#define CLKID_VDEC_HEVC_SEL 205
+#define CLKID_VDEC_HEVC_DIV 206
+#define CLKID_VDEC_HEVCF_SEL 208
+#define CLKID_VDEC_HEVCF_DIV 209
-#define NR_CLKS 178
+#define NR_CLKS 211
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/g12a-clkc.h>
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 04df2e208ed6..29ffb4fde714 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -2216,6 +2216,7 @@ static struct clk_regmap gxbb_vdec_1_div = {
.offset = HHI_VDEC_CLK_CNTL,
.shift = 0,
.width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "vdec_1_div",
@@ -2261,6 +2262,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = {
.offset = HHI_VDEC2_CLK_CNTL,
.shift = 16,
.width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_div",
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 576ad42252d0..37cf0f01bb5d 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1703,6 +1703,456 @@ static struct clk_regmap meson8b_mali = {
},
};
+static const struct pll_params_table meson8m2_gp_pll_params_table[] = {
+ PLL_PARAMS(182, 3),
+ { /* sentinel */ },
+};
+
+static struct clk_regmap meson8m2_gp_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .l = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = meson8m2_gp_pll_params_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap meson8m2_gp_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gp_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const char * const mmeson8b_vpu_0_1_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+};
+
+static const char * const mmeson8m2_vpu_0_1_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "gp_pll"
+};
+
+static struct clk_regmap meson8b_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = mmeson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8m2_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = mmeson8m2_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vpu_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vpu_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = mmeson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8m2_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = mmeson8m2_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vpu_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vpu_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static const char * const meson8b_vdec_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "mpll2", "mpll1"
+};
+
+static struct clk_regmap meson8b_vdec_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_1_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_1_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1_2_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC3_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_2_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1_2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC3_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_1_2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_1_2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC3_CLK_CNTL,
+ .mask = 0x1,
+ .shift = 15,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "vdec_1_1", "vdec_1_2" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hcodec_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hcodec_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hcodec_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hcodec_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_hcodec_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hcodec = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_hcodec",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_hcodec_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_2_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_2_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_2_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_2_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_2_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hevc_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hevc_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hevc_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_hevc_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hevc = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x1,
+ .shift = 31,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc",
+ .ops = &clk_regmap_mux_ops,
+ /* TODO: The second parent is currently unknown */
+ .parent_names = (const char *[]){ "vdec_hevc_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
@@ -1966,6 +2416,22 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = {
[CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
[CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
[CLKID_MALI] = &meson8b_mali_0.hw,
+ [CLKID_VPU_0_SEL] = &meson8b_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &meson8b_vpu_0_div.hw,
+ [CLKID_VPU] = &meson8b_vpu_0.hw,
+ [CLKID_VDEC_1_SEL] = &meson8b_vdec_1_sel.hw,
+ [CLKID_VDEC_1_1_DIV] = &meson8b_vdec_1_1_div.hw,
+ [CLKID_VDEC_1] = &meson8b_vdec_1_1.hw,
+ [CLKID_VDEC_HCODEC_SEL] = &meson8b_vdec_hcodec_sel.hw,
+ [CLKID_VDEC_HCODEC_DIV] = &meson8b_vdec_hcodec_div.hw,
+ [CLKID_VDEC_HCODEC] = &meson8b_vdec_hcodec.hw,
+ [CLKID_VDEC_2_SEL] = &meson8b_vdec_2_sel.hw,
+ [CLKID_VDEC_2_DIV] = &meson8b_vdec_2_div.hw,
+ [CLKID_VDEC_2] = &meson8b_vdec_2.hw,
+ [CLKID_VDEC_HEVC_SEL] = &meson8b_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw,
+ [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -2152,6 +2618,240 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw,
[CLKID_MALI_1] = &meson8b_mali_1.hw,
[CLKID_MALI] = &meson8b_mali.hw,
+ [CLKID_VPU_0_SEL] = &meson8b_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &meson8b_vpu_0_div.hw,
+ [CLKID_VPU_0] = &meson8b_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &meson8b_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &meson8b_vpu_1_div.hw,
+ [CLKID_VPU_1] = &meson8b_vpu_1.hw,
+ [CLKID_VPU] = &meson8b_vpu.hw,
+ [CLKID_VDEC_1_SEL] = &meson8b_vdec_1_sel.hw,
+ [CLKID_VDEC_1_1_DIV] = &meson8b_vdec_1_1_div.hw,
+ [CLKID_VDEC_1_1] = &meson8b_vdec_1_1.hw,
+ [CLKID_VDEC_1_2_DIV] = &meson8b_vdec_1_2_div.hw,
+ [CLKID_VDEC_1_2] = &meson8b_vdec_1_2.hw,
+ [CLKID_VDEC_1] = &meson8b_vdec_1.hw,
+ [CLKID_VDEC_HCODEC_SEL] = &meson8b_vdec_hcodec_sel.hw,
+ [CLKID_VDEC_HCODEC_DIV] = &meson8b_vdec_hcodec_div.hw,
+ [CLKID_VDEC_HCODEC] = &meson8b_vdec_hcodec.hw,
+ [CLKID_VDEC_2_SEL] = &meson8b_vdec_2_sel.hw,
+ [CLKID_VDEC_2_DIV] = &meson8b_vdec_2_div.hw,
+ [CLKID_VDEC_2] = &meson8b_vdec_2.hw,
+ [CLKID_VDEC_HEVC_SEL] = &meson8b_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw,
+ [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw,
+ [CLK_NR_CLKS] = NULL,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static struct clk_hw_onecell_data meson8m2_hw_onecell_data = {
+ .hws = {
+ [CLKID_XTAL] = &meson8b_xtal.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_DDR] = &meson8b_ddr.hw,
+ [CLKID_DOS] = &meson8b_dos.hw,
+ [CLKID_ISA] = &meson8b_isa.hw,
+ [CLKID_PL301] = &meson8b_pl301.hw,
+ [CLKID_PERIPHS] = &meson8b_periphs.hw,
+ [CLKID_SPICC] = &meson8b_spicc.hw,
+ [CLKID_I2C] = &meson8b_i2c.hw,
+ [CLKID_SAR_ADC] = &meson8b_sar_adc.hw,
+ [CLKID_SMART_CARD] = &meson8b_smart_card.hw,
+ [CLKID_RNG0] = &meson8b_rng0.hw,
+ [CLKID_UART0] = &meson8b_uart0.hw,
+ [CLKID_SDHC] = &meson8b_sdhc.hw,
+ [CLKID_STREAM] = &meson8b_stream.hw,
+ [CLKID_ASYNC_FIFO] = &meson8b_async_fifo.hw,
+ [CLKID_SDIO] = &meson8b_sdio.hw,
+ [CLKID_ABUF] = &meson8b_abuf.hw,
+ [CLKID_HIU_IFACE] = &meson8b_hiu_iface.hw,
+ [CLKID_ASSIST_MISC] = &meson8b_assist_misc.hw,
+ [CLKID_SPI] = &meson8b_spi.hw,
+ [CLKID_I2S_SPDIF] = &meson8b_i2s_spdif.hw,
+ [CLKID_ETH] = &meson8b_eth.hw,
+ [CLKID_DEMUX] = &meson8b_demux.hw,
+ [CLKID_AIU_GLUE] = &meson8b_aiu_glue.hw,
+ [CLKID_IEC958] = &meson8b_iec958.hw,
+ [CLKID_I2S_OUT] = &meson8b_i2s_out.hw,
+ [CLKID_AMCLK] = &meson8b_amclk.hw,
+ [CLKID_AIFIFO2] = &meson8b_aififo2.hw,
+ [CLKID_MIXER] = &meson8b_mixer.hw,
+ [CLKID_MIXER_IFACE] = &meson8b_mixer_iface.hw,
+ [CLKID_ADC] = &meson8b_adc.hw,
+ [CLKID_BLKMV] = &meson8b_blkmv.hw,
+ [CLKID_AIU] = &meson8b_aiu.hw,
+ [CLKID_UART1] = &meson8b_uart1.hw,
+ [CLKID_G2D] = &meson8b_g2d.hw,
+ [CLKID_USB0] = &meson8b_usb0.hw,
+ [CLKID_USB1] = &meson8b_usb1.hw,
+ [CLKID_RESET] = &meson8b_reset.hw,
+ [CLKID_NAND] = &meson8b_nand.hw,
+ [CLKID_DOS_PARSER] = &meson8b_dos_parser.hw,
+ [CLKID_USB] = &meson8b_usb.hw,
+ [CLKID_VDIN1] = &meson8b_vdin1.hw,
+ [CLKID_AHB_ARB0] = &meson8b_ahb_arb0.hw,
+ [CLKID_EFUSE] = &meson8b_efuse.hw,
+ [CLKID_BOOT_ROM] = &meson8b_boot_rom.hw,
+ [CLKID_AHB_DATA_BUS] = &meson8b_ahb_data_bus.hw,
+ [CLKID_AHB_CTRL_BUS] = &meson8b_ahb_ctrl_bus.hw,
+ [CLKID_HDMI_INTR_SYNC] = &meson8b_hdmi_intr_sync.hw,
+ [CLKID_HDMI_PCLK] = &meson8b_hdmi_pclk.hw,
+ [CLKID_USB1_DDR_BRIDGE] = &meson8b_usb1_ddr_bridge.hw,
+ [CLKID_USB0_DDR_BRIDGE] = &meson8b_usb0_ddr_bridge.hw,
+ [CLKID_MMC_PCLK] = &meson8b_mmc_pclk.hw,
+ [CLKID_DVIN] = &meson8b_dvin.hw,
+ [CLKID_UART2] = &meson8b_uart2.hw,
+ [CLKID_SANA] = &meson8b_sana.hw,
+ [CLKID_VPU_INTR] = &meson8b_vpu_intr.hw,
+ [CLKID_SEC_AHB_AHB3_BRIDGE] = &meson8b_sec_ahb_ahb3_bridge.hw,
+ [CLKID_CLK81_A9] = &meson8b_clk81_a9.hw,
+ [CLKID_VCLK2_VENCI0] = &meson8b_vclk2_venci0.hw,
+ [CLKID_VCLK2_VENCI1] = &meson8b_vclk2_venci1.hw,
+ [CLKID_VCLK2_VENCP0] = &meson8b_vclk2_vencp0.hw,
+ [CLKID_VCLK2_VENCP1] = &meson8b_vclk2_vencp1.hw,
+ [CLKID_GCLK_VENCI_INT] = &meson8b_gclk_venci_int.hw,
+ [CLKID_GCLK_VENCP_INT] = &meson8b_gclk_vencp_int.hw,
+ [CLKID_DAC_CLK] = &meson8b_dac_clk.hw,
+ [CLKID_AOCLK_GATE] = &meson8b_aoclk_gate.hw,
+ [CLKID_IEC958_GATE] = &meson8b_iec958_gate.hw,
+ [CLKID_ENC480P] = &meson8b_enc480p.hw,
+ [CLKID_RNG1] = &meson8b_rng1.hw,
+ [CLKID_GCLK_VENCL_INT] = &meson8b_gclk_vencl_int.hw,
+ [CLKID_VCLK2_VENCLMCC] = &meson8b_vclk2_venclmcc.hw,
+ [CLKID_VCLK2_VENCL] = &meson8b_vclk2_vencl.hw,
+ [CLKID_VCLK2_OTHER] = &meson8b_vclk2_other.hw,
+ [CLKID_EDP] = &meson8b_edp.hw,
+ [CLKID_AO_MEDIA_CPU] = &meson8b_ao_media_cpu.hw,
+ [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw,
+ [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw,
+ [CLKID_AO_IFACE] = &meson8b_ao_iface.hw,
+ [CLKID_MPLL0] = &meson8b_mpll0.hw,
+ [CLKID_MPLL1] = &meson8b_mpll1.hw,
+ [CLKID_MPLL2] = &meson8b_mpll2.hw,
+ [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw,
+ [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw,
+ [CLKID_CPU_IN_DIV2] = &meson8b_cpu_in_div2.hw,
+ [CLKID_CPU_IN_DIV3] = &meson8b_cpu_in_div3.hw,
+ [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw,
+ [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw,
+ [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
+ [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
+ [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
+ [CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
+ [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
+ [CLKID_CPU_CLK_DIV2] = &meson8b_cpu_clk_div2.hw,
+ [CLKID_CPU_CLK_DIV3] = &meson8b_cpu_clk_div3.hw,
+ [CLKID_CPU_CLK_DIV4] = &meson8b_cpu_clk_div4.hw,
+ [CLKID_CPU_CLK_DIV5] = &meson8b_cpu_clk_div5.hw,
+ [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
+ [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
+ [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
+ [CLKID_APB] = &meson8b_apb_clk_gate.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
+ [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
+ [CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
+ [CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
+ [CLKID_VID_PLL_IN_EN] = &meson8b_vid_pll_in_en.hw,
+ [CLKID_VID_PLL_PRE_DIV] = &meson8b_vid_pll_pre_div.hw,
+ [CLKID_VID_PLL_POST_DIV] = &meson8b_vid_pll_post_div.hw,
+ [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw,
+ [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
+ [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
+ [CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
+ [CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
+ [CLKID_CTS_ENCP] = &meson8b_cts_encp.hw,
+ [CLKID_CTS_ENCI_SEL] = &meson8b_cts_enci_sel.hw,
+ [CLKID_CTS_ENCI] = &meson8b_cts_enci.hw,
+ [CLKID_HDMI_TX_PIXEL_SEL] = &meson8b_hdmi_tx_pixel_sel.hw,
+ [CLKID_HDMI_TX_PIXEL] = &meson8b_hdmi_tx_pixel.hw,
+ [CLKID_CTS_ENCL_SEL] = &meson8b_cts_encl_sel.hw,
+ [CLKID_CTS_ENCL] = &meson8b_cts_encl.hw,
+ [CLKID_CTS_VDAC0_SEL] = &meson8b_cts_vdac0_sel.hw,
+ [CLKID_CTS_VDAC0] = &meson8b_cts_vdac0.hw,
+ [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
+ [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
+ [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
+ [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
+ [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
+ [CLKID_MALI_0] = &meson8b_mali_0.hw,
+ [CLKID_MALI_1_SEL] = &meson8b_mali_1_sel.hw,
+ [CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw,
+ [CLKID_MALI_1] = &meson8b_mali_1.hw,
+ [CLKID_MALI] = &meson8b_mali.hw,
+ [CLKID_GP_PLL_DCO] = &meson8m2_gp_pll_dco.hw,
+ [CLKID_GP_PLL] = &meson8m2_gp_pll.hw,
+ [CLKID_VPU_0_SEL] = &meson8m2_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &meson8b_vpu_0_div.hw,
+ [CLKID_VPU_0] = &meson8b_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &meson8m2_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &meson8b_vpu_1_div.hw,
+ [CLKID_VPU_1] = &meson8b_vpu_1.hw,
+ [CLKID_VPU] = &meson8b_vpu.hw,
+ [CLKID_VDEC_1_SEL] = &meson8b_vdec_1_sel.hw,
+ [CLKID_VDEC_1_1_DIV] = &meson8b_vdec_1_1_div.hw,
+ [CLKID_VDEC_1_1] = &meson8b_vdec_1_1.hw,
+ [CLKID_VDEC_1_2_DIV] = &meson8b_vdec_1_2_div.hw,
+ [CLKID_VDEC_1_2] = &meson8b_vdec_1_2.hw,
+ [CLKID_VDEC_1] = &meson8b_vdec_1.hw,
+ [CLKID_VDEC_HCODEC_SEL] = &meson8b_vdec_hcodec_sel.hw,
+ [CLKID_VDEC_HCODEC_DIV] = &meson8b_vdec_hcodec_div.hw,
+ [CLKID_VDEC_HCODEC] = &meson8b_vdec_hcodec.hw,
+ [CLKID_VDEC_2_SEL] = &meson8b_vdec_2_sel.hw,
+ [CLKID_VDEC_2_DIV] = &meson8b_vdec_2_div.hw,
+ [CLKID_VDEC_2] = &meson8b_vdec_2.hw,
+ [CLKID_VDEC_HEVC_SEL] = &meson8b_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw,
+ [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -2314,6 +3014,33 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_mali_1_div,
&meson8b_mali_1,
&meson8b_mali,
+ &meson8m2_gp_pll_dco,
+ &meson8m2_gp_pll,
+ &meson8b_vpu_0_sel,
+ &meson8m2_vpu_0_sel,
+ &meson8b_vpu_0_div,
+ &meson8b_vpu_0,
+ &meson8b_vpu_1_sel,
+ &meson8m2_vpu_1_sel,
+ &meson8b_vpu_1_div,
+ &meson8b_vpu_1,
+ &meson8b_vpu,
+ &meson8b_vdec_1_sel,
+ &meson8b_vdec_1_1_div,
+ &meson8b_vdec_1_1,
+ &meson8b_vdec_1_2_div,
+ &meson8b_vdec_1_2,
+ &meson8b_vdec_1,
+ &meson8b_vdec_hcodec_sel,
+ &meson8b_vdec_hcodec_div,
+ &meson8b_vdec_hcodec,
+ &meson8b_vdec_2_sel,
+ &meson8b_vdec_2_div,
+ &meson8b_vdec_2,
+ &meson8b_vdec_hevc_sel,
+ &meson8b_vdec_hevc_div,
+ &meson8b_vdec_hevc_en,
+ &meson8b_vdec_hevc,
};
static const struct meson8b_clk_reset_line {
@@ -2558,9 +3285,14 @@ static void __init meson8b_clkc_init(struct device_node *np)
return meson8b_clkc_init_common(np, &meson8b_hw_onecell_data);
}
+static void __init meson8m2_clkc_init(struct device_node *np)
+{
+ return meson8b_clkc_init_common(np, &meson8m2_hw_onecell_data);
+}
+
CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc",
meson8_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc",
meson8b_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc",
- meson8b_clkc_init);
+ meson8m2_clkc_init);
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index b8c58faeae52..ed37196187e6 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -19,6 +19,7 @@
*
* [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
*/
+#define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
@@ -34,7 +35,11 @@
#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
+#define HHI_VPU_CLK_CNTL 0x1bc /* 0x6f offset in data sheet */
#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
+#define HHI_VDEC_CLK_CNTL 0x1e0 /* 0x78 offset in data sheet */
+#define HHI_VDEC2_CLK_CNTL 0x1e4 /* 0x79 offset in data sheet */
+#define HHI_VDEC3_CLK_CNTL 0x1e8 /* 0x7a offset in data sheet */
#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
@@ -146,8 +151,28 @@
#define CLKID_MALI_1_SEL 178
#define CLKID_MALI_1_DIV 179
#define CLKID_MALI_1 180
+#define CLKID_GP_PLL_DCO 181
+#define CLKID_GP_PLL 182
+#define CLKID_VPU_0_SEL 183
+#define CLKID_VPU_0_DIV 184
+#define CLKID_VPU_0 185
+#define CLKID_VPU_1_SEL 186
+#define CLKID_VPU_1_DIV 187
+#define CLKID_VPU_1 189
+#define CLKID_VDEC_1_SEL 191
+#define CLKID_VDEC_1_1_DIV 192
+#define CLKID_VDEC_1_1 193
+#define CLKID_VDEC_1_2_DIV 194
+#define CLKID_VDEC_1_2 195
+#define CLKID_VDEC_HCODEC_SEL 197
+#define CLKID_VDEC_HCODEC_DIV 198
+#define CLKID_VDEC_2_SEL 200
+#define CLKID_VDEC_2_DIV 201
+#define CLKID_VDEC_HEVC_SEL 203
+#define CLKID_VDEC_HEVC_DIV 204
+#define CLKID_VDEC_HEVC_EN 205
-#define CLK_NR_CLKS 181
+#define CLK_NR_CLKS 207
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index 08bcc01c0923..daff235bc763 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -82,8 +82,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
div = _get_table_val(meson_parm_read(clk->map, &pll_div->val),
meson_parm_read(clk->map, &pll_div->sel));
if (!div || !div->divider) {
- pr_info("%s: Invalid config value for vid_pll_div\n", __func__);
- return parent_rate;
+ pr_debug("%s: Invalid config value for vid_pll_div\n", __func__);
+ return 0;
}
return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider);
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index 7355595c42e2..1755916ddef2 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -108,7 +108,7 @@ struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
init.name = name;
init.ops = &mmp_clk_gate_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 6ab3c2e627c7..785dbede4835 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -240,7 +240,7 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
int n;
if (ctrl) {
- pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n");
+ pr_err("mvebu-clk-gating: cannot instantiate more than one gateable clock device\n");
return;
}
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 9235a331b588..b6de283f45e3 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -21,7 +21,7 @@
* - Equal to SDIO clock
* - 2/5 PLL0
*
- * CP110 has 32 gatable clocks, for the various peripherals in the IP.
+ * CP110 has 32 gateable clocks, for the various peripherals in the IP.
*/
#define pr_fmt(fmt) "cp110-system-controller: " fmt
@@ -57,7 +57,7 @@ enum {
#define CP110_CORE_NAND 4
#define CP110_CORE_SDIO 5
-/* A number of gatable clocks need special handling */
+/* A number of gateable clocks need special handling */
#define CP110_GATE_AUDIO 0
#define CP110_GATE_COMM_UNIT 1
#define CP110_GATE_NAND 2
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index 27781b49eb82..5969f620607a 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -142,7 +142,7 @@ static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable)
* Divider field is write only, so divider stat field must
* be read so divider field can be set accordingly.
*/
- val = clk_readl(gate->reg);
+ val = readl(gate->reg);
if (val & LPC18XX_CCU_DIVSTAT)
val |= LPC18XX_CCU_DIV;
@@ -155,12 +155,12 @@ static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable)
* and the next write should clear the RUN bit.
*/
val |= LPC18XX_CCU_AUTO;
- clk_writel(val, gate->reg);
+ writel(val, gate->reg);
val &= ~LPC18XX_CCU_RUN;
}
- clk_writel(val, gate->reg);
+ writel(val, gate->reg);
return 0;
}
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index 2531174b399e..f5bc8bd192b7 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -352,9 +352,9 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw,
struct lpc18xx_pll *pll = to_lpc_pll(hw);
u32 ctrl, mdiv, msel, npdiv;
- ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
- mdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
- npdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
+ ctrl = readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ mdiv = readl(pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
+ npdiv = readl(pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
if (ctrl & LPC18XX_PLL0_CTRL_BYPASS)
return parent_rate;
@@ -415,25 +415,25 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
m |= lpc18xx_pll0_msel2seli(m) << LPC18XX_PLL0_MDIV_SELI_SHIFT;
/* Power down PLL, disable clk output and dividers */
- ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ ctrl = readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
ctrl |= LPC18XX_PLL0_CTRL_PD;
ctrl &= ~(LPC18XX_PLL0_CTRL_BYPASS | LPC18XX_PLL0_CTRL_DIRECTI |
LPC18XX_PLL0_CTRL_DIRECTO | LPC18XX_PLL0_CTRL_CLKEN);
- clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
/* Configure new PLL settings */
- clk_writel(m, pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
- clk_writel(LPC18XX_PLL0_NP_DIVS_1, pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
+ writel(m, pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
+ writel(LPC18XX_PLL0_NP_DIVS_1, pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
/* Power up PLL and wait for lock */
ctrl &= ~LPC18XX_PLL0_CTRL_PD;
- clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
do {
udelay(10);
- stat = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_STAT);
+ stat = readl(pll->reg + LPC18XX_CGU_PLL0USB_STAT);
if (stat & LPC18XX_PLL0_STAT_LOCK) {
ctrl |= LPC18XX_PLL0_CTRL_CLKEN;
- clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
return 0;
}
@@ -458,8 +458,8 @@ static unsigned long lpc18xx_pll1_recalc_rate(struct clk_hw *hw,
bool direct, fbsel;
u32 stat, ctrl;
- stat = clk_readl(pll->reg + LPC18XX_CGU_PLL1_STAT);
- ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL1_CTRL);
+ stat = readl(pll->reg + LPC18XX_CGU_PLL1_STAT);
+ ctrl = readl(pll->reg + LPC18XX_CGU_PLL1_CTRL);
direct = (ctrl & LPC18XX_PLL1_CTRL_DIRECT) ? true : false;
fbsel = (ctrl & LPC18XX_PLL1_CTRL_FBSEL) ? true : false;
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 5eeecee17b69..7524d19fe60b 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -1085,13 +1085,12 @@ struct clk_hw_proto {
};
};
-#define LPC32XX_DEFINE_FIXED(_idx, _rate, _flags) \
+#define LPC32XX_DEFINE_FIXED(_idx, _rate) \
[CLK_PREFIX(_idx)] = { \
.type = CLK_FIXED, \
{ \
.f = { \
.fixed_rate = (_rate), \
- .flags = (_flags), \
}, \
}, \
}
@@ -1225,7 +1224,7 @@ struct clk_hw_proto {
}
static struct clk_hw_proto clk_hw_proto[LPC32XX_CLK_HW_MAX] = {
- LPC32XX_DEFINE_FIXED(RTC, 32768, 0),
+ LPC32XX_DEFINE_FIXED(RTC, 32768),
LPC32XX_DEFINE_PLL(PLL397X, pll_397x, HCLKPLL_CTRL, BIT(1)),
LPC32XX_DEFINE_PLL(HCLK_PLL, hclk_pll, HCLKPLL_CTRL, PLL_CTRL_ENABLE),
LPC32XX_DEFINE_PLL(USB_PLL, usb_pll, USB_CTRL, PLL_CTRL_ENABLE),
@@ -1468,7 +1467,7 @@ static struct clk * __init lpc32xx_clk_register(u32 id)
struct clk_fixed_rate *fixed = &clk_hw->f;
clk = clk_register_fixed_rate(NULL, lpc32xx_clk->name,
- parents[0], fixed->flags, fixed->fixed_rate);
+ parents[0], 0, fixed->fixed_rate);
break;
}
default:
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1c04575c118f..18bdf34d5e64 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -243,6 +243,12 @@ config SDM_GCC_660
Say Y if you want to use peripheral devices such as UART, SPI,
i2C, USB, UFS, SDDC, PCIe, etc.
+config QCS_TURING_404
+ tristate "QCS404 Turing Clock Controller"
+ help
+ Support for the Turing Clock Controller on QCS404, provides clocks
+ and resets for the Turing subsystem.
+
config SDM_GCC_845
tristate "SDM845 Global Clock Controller"
select QCOM_GDSC
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index ee8d0698e370..f0768fb1f037 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
+obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 99446bf630aa..f869fc6aaed6 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -146,6 +146,12 @@ const struct clk_ops clk_branch2_ops = {
};
EXPORT_SYMBOL_GPL(clk_branch2_ops);
+const struct clk_ops clk_branch2_aon_ops = {
+ .enable = clk_branch2_enable,
+ .is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_branch2_aon_ops);
+
const struct clk_ops clk_branch_simple_ops = {
.enable = clk_enable_regmap,
.disable = clk_disable_regmap,
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index b3561e0a3984..17a58119165e 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -40,6 +40,7 @@ struct clk_branch {
extern const struct clk_ops clk_branch_ops;
extern const struct clk_ops clk_branch2_ops;
extern const struct clk_ops clk_branch_simple_ops;
+extern const struct clk_ops clk_branch2_aon_ops;
#define to_clk_branch(_hw) \
container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h
index 6cd6261be7ac..4df6c8d24c24 100644
--- a/drivers/clk/qcom/clk-regmap-mux-div.h
+++ b/drivers/clk/qcom/clk-regmap-mux-div.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index c240fba794c7..033688264c7b 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -2161,7 +2161,7 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
static struct clk_branch gcc_pcie_0_pipe_clk = {
.halt_reg = 0x6b018,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x6b018,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 5a62f64ada93..a54807eb3b28 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -260,6 +260,20 @@ static const char * const gcc_parent_names_15[] = {
"core_bi_pll_test_se",
};
+static const struct parent_map gcc_parent_map_16[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_16[] = {
+ "cxo",
+ "gpll0_out_main",
+ "gpll0_out_aux",
+ "core_bi_pll_test_se",
+};
+
static struct clk_fixed_factor cxo = {
.mult = 1,
.div = 1,
@@ -1194,6 +1208,28 @@ static struct clk_rcg2 vsync_clk_src = {
},
};
+static const struct freq_tbl ftbl_cdsp_bimc_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cdsp_bimc_clk_src = {
+ .cmd_rcgr = 0x5e010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_16,
+ .freq_tbl = ftbl_cdsp_bimc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "cdsp_bimc_clk_src",
+ .parent_names = gcc_parent_names_16,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_branch gcc_apss_ahb_clk = {
.halt_reg = 0x4601c,
.halt_check = BRANCH_HALT_VOTED,
@@ -1255,6 +1291,24 @@ static struct clk_branch gcc_bimc_gpu_clk = {
},
};
+static struct clk_branch gcc_bimc_cdsp_clk = {
+ .halt_reg = 0x31030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_cdsp_clk",
+ .parent_names = (const char *[]) {
+ "cdsp_bimc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_bimc_mdss_clk = {
.halt_reg = 0x31038,
.halt_check = BRANCH_HALT,
@@ -1792,6 +1846,24 @@ static struct clk_branch gcc_gfx_tbu_clk = {
},
};
+static struct clk_branch gcc_cdsp_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x13020,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_cdsp_tbu_clk",
+ .parent_names = (const char *[]) {
+ "cdsp_bimc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_gp1_clk = {
.halt_reg = 0x8000,
.halt_check = BRANCH_HALT,
@@ -2304,6 +2376,19 @@ static struct clk_branch gcc_sdcc1_ice_core_clk = {
},
};
+static struct clk_branch gcc_cdsp_cfg_ahb_clk = {
+ .halt_reg = 0x5e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_cdsp_cfg_ahb_cbcr",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sdcc2_ahb_clk = {
.halt_reg = 0x4301c,
.halt_check = BRANCH_HALT,
@@ -2548,6 +2633,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_ESC0_CLK_SRC] = &esc0_clk_src.clkr,
[GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
[GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_CDSP_CLK] = &gcc_bimc_cdsp_clk.clkr,
[GCC_BIMC_MDSS_CLK] = &gcc_bimc_mdss_clk.clkr,
[GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
[GCC_BLSP1_QUP0_I2C_APPS_CLK] = &gcc_blsp1_qup0_i2c_apps_clk.clkr,
@@ -2605,6 +2691,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
[GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
[GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_CDSP_CFG_AHB_CLK] = &gcc_cdsp_cfg_ahb_clk.clkr,
[GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
[GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
[GCC_SYS_NOC_USB3_CLK] = &gcc_sys_noc_usb3_clk.clkr,
@@ -2645,6 +2732,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
[GCC_USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
[GCC_VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_CDSP_BIMC_CLK_SRC] = &cdsp_bimc_clk_src.clkr,
[GCC_USB_HS_INACTIVITY_TIMERS_CLK] =
&gcc_usb_hs_inactivity_timers_clk.clkr,
[GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
@@ -2653,6 +2741,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr,
[GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
[GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_CDSP_TBU_CLK] = &gcc_cdsp_tbu_clk.clkr,
[GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
[GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
[GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
@@ -2664,6 +2753,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
static const struct qcom_reset_map gcc_qcs404_resets[] = {
[GCC_GENI_IR_BCR] = { 0x0F000 },
+ [GCC_CDSP_RESTART] = { 0x18000 },
[GCC_USB_HS_BCR] = { 0x41000 },
[GCC_USB2_HS_PHY_ONLY_BCR] = { 0x41034 },
[GCC_QUSB2_PHY_BCR] = { 0x4103c },
diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c
new file mode 100644
index 000000000000..aa859e6ec9bd
--- /dev/null
+++ b/drivers/clk/qcom/turingcc-qcs404.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,turingcc-qcs404.h>
+
+#include "clk-regmap.h"
+#include "clk-branch.h"
+#include "common.h"
+#include "reset.h"
+
+static struct clk_branch turing_wrapper_aon_cbcr = {
+ .halt_reg = 0x5098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_wrapper_aon_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch turing_q6ss_ahbm_aon_cbcr = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_q6ss_ahbm_aon_cbcr",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch turing_q6ss_q6_axim_clk = {
+ .halt_reg = 0xb000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_q6ss_q6_axim_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch turing_q6ss_ahbs_aon_cbcr = {
+ .halt_reg = 0x10000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_q6ss_ahbs_aon_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch turing_wrapper_qos_ahbs_aon_cbcr = {
+ .halt_reg = 0x11014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_wrapper_qos_ahbs_aon_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_regmap *turingcc_clocks[] = {
+ [TURING_WRAPPER_AON_CLK] = &turing_wrapper_aon_cbcr.clkr,
+ [TURING_Q6SS_AHBM_AON_CLK] = &turing_q6ss_ahbm_aon_cbcr.clkr,
+ [TURING_Q6SS_Q6_AXIM_CLK] = &turing_q6ss_q6_axim_clk.clkr,
+ [TURING_Q6SS_AHBS_AON_CLK] = &turing_q6ss_ahbs_aon_cbcr.clkr,
+ [TURING_WRAPPER_QOS_AHBS_AON_CLK] = &turing_wrapper_qos_ahbs_aon_cbcr.clkr,
+};
+
+static const struct regmap_config turingcc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc turingcc_desc = {
+ .config = &turingcc_regmap_config,
+ .clks = turingcc_clocks,
+ .num_clks = ARRAY_SIZE(turingcc_clocks),
+};
+
+static int turingcc_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ goto destroy_pm_clk;
+ }
+
+ ret = qcom_cc_probe(pdev, &turingcc_desc);
+ if (ret < 0)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int turingcc_remove(struct platform_device *pdev)
+{
+ pm_clk_destroy(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops turingcc_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id turingcc_match_table[] = {
+ { .compatible = "qcom,qcs404-turingcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, turingcc_match_table);
+
+static struct platform_driver turingcc_driver = {
+ .probe = turingcc_probe,
+ .remove = turingcc_remove,
+ .driver = {
+ .name = "qcs404-turingcc",
+ .of_match_table = turingcc_match_table,
+ .pm = &turingcc_pm_ops,
+ },
+};
+
+module_platform_driver(turingcc_driver);
+
+MODULE_DESCRIPTION("Qualcomm QCS404 Turing Clock Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index 57c49fe88295..cf65d4e0e116 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <dt-bindings/clock/r7s9210-cpg-mssr.h>
#include "renesas-cpg-mssr.h"
@@ -119,7 +120,7 @@ static void __init r7s9210_update_clk_table(struct clk *extal_clk,
if (clk_get_rate(extal_clk) > 12000000)
cpg_mode = 1;
- frqcr = clk_readl(base + CPG_FRQCR) & 0xFFF;
+ frqcr = readl(base + CPG_FRQCR) & 0xFFF;
if (frqcr == 0x012)
index = 0;
else if (frqcr == 0x112)
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 4d92b27a6153..76ed7d1bae36 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -71,8 +71,8 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
/* Core Clock Outputs */
- DEF_BASE("z", R8A774A1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
- DEF_BASE("z2", R8A774A1_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
+ DEF_GEN3_Z("z", R8A774A1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
+ DEF_GEN3_Z("z2", R8A774A1_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0),
DEF_FIXED("ztr", R8A774A1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A774A1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A774A1_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
@@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("msiof2", 209, R8A774A1_CLK_MSO),
DEF_MOD("msiof1", 210, R8A774A1_CLK_MSO),
DEF_MOD("msiof0", 211, R8A774A1_CLK_MSO),
- DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S0D3),
- DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S0D3),
+ DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A774A1_CLK_S0D3),
DEF_MOD("cmt3", 300, R8A774A1_CLK_R),
DEF_MOD("cmt2", 301, R8A774A1_CLK_R),
@@ -143,8 +143,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("rwdt", 402, R8A774A1_CLK_R),
DEF_MOD("intc-ex", 407, R8A774A1_CLK_CP),
DEF_MOD("intc-ap", 408, R8A774A1_CLK_S0D3),
- DEF_MOD("audmac1", 501, R8A774A1_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A774A1_CLK_S0D3),
+ DEF_MOD("audmac1", 501, R8A774A1_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A774A1_CLK_S1D2),
DEF_MOD("hscif4", 516, R8A774A1_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A774A1_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A774A1_CLK_S3D1),
@@ -165,9 +165,9 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("vspd0", 623, R8A774A1_CLK_S0D2),
DEF_MOD("vspb", 626, R8A774A1_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A774A1_CLK_S0D1),
- DEF_MOD("ehci1", 702, R8A774A1_CLK_S3D4),
- DEF_MOD("ehci0", 703, R8A774A1_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A774A1_CLK_S3D4),
+ DEF_MOD("ehci1", 702, R8A774A1_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A774A1_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A774A1_CLK_S3D2),
DEF_MOD("csi20", 714, R8A774A1_CLK_CSI0),
DEF_MOD("csi40", 716, R8A774A1_CLK_CSI0),
DEF_MOD("du2", 722, R8A774A1_CLK_S2D1),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 34e274f2a273..f91e7a484753 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -81,6 +81,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
/* Core Clock Outputs */
DEF_FIXED("za2", R8A774C0_CLK_ZA2, CLK_PLL0D24, 1, 1),
DEF_FIXED("za8", R8A774C0_CLK_ZA8, CLK_PLL0D8, 1, 1),
+ DEF_GEN3_Z("z2", R8A774C0_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL0, 4, 8),
DEF_FIXED("ztr", R8A774C0_CLK_ZTR, CLK_PLL1, 6, 1),
DEF_FIXED("zt", R8A774C0_CLK_ZT, CLK_PLL1, 4, 1),
DEF_FIXED("zx", R8A774C0_CLK_ZX, CLK_PLL1, 3, 1),
@@ -157,7 +158,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP),
DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A774C0_CLK_S3D4),
+ DEF_MOD("audmac0", 502, R8A774C0_CLK_S1D2),
DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C),
DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C),
DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C),
@@ -177,8 +178,8 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("vspb", 626, R8A774C0_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A774C0_CLK_S0D1),
- DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4),
+ DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D2),
DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0),
DEF_MOD("du1", 723, R8A774C0_CLK_S1D1),
DEF_MOD("du0", 724, R8A774C0_CLK_S1D1),
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 86842c9fd314..9e9a6f2c31e8 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -3,6 +3,7 @@
* r8a7795 Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
*
* Based on clk-rcar-gen3.c
*
@@ -73,8 +74,8 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
/* Core Clock Outputs */
- DEF_BASE("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
- DEF_BASE("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
+ DEF_GEN3_Z("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
+ DEF_GEN3_Z("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0),
DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A7795_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A7795_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
@@ -129,8 +130,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("msiof2", 209, R8A7795_CLK_MSO),
DEF_MOD("msiof1", 210, R8A7795_CLK_MSO),
DEF_MOD("msiof0", 211, R8A7795_CLK_MSO),
- DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3),
- DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3),
+ DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3),
DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR),
DEF_MOD("cmt3", 300, R8A7795_CLK_R),
@@ -153,16 +154,16 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("rwdt", 402, R8A7795_CLK_R),
DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
DEF_MOD("intc-ap", 408, R8A7795_CLK_S0D3),
- DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3),
- DEF_MOD("drif7", 508, R8A7795_CLK_S3D2),
- DEF_MOD("drif6", 509, R8A7795_CLK_S3D2),
- DEF_MOD("drif5", 510, R8A7795_CLK_S3D2),
- DEF_MOD("drif4", 511, R8A7795_CLK_S3D2),
- DEF_MOD("drif3", 512, R8A7795_CLK_S3D2),
- DEF_MOD("drif2", 513, R8A7795_CLK_S3D2),
- DEF_MOD("drif1", 514, R8A7795_CLK_S3D2),
- DEF_MOD("drif0", 515, R8A7795_CLK_S3D2),
+ DEF_MOD("audmac1", 501, R8A7795_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A7795_CLK_S1D2),
+ DEF_MOD("drif31", 508, R8A7795_CLK_S3D2),
+ DEF_MOD("drif30", 509, R8A7795_CLK_S3D2),
+ DEF_MOD("drif21", 510, R8A7795_CLK_S3D2),
+ DEF_MOD("drif20", 511, R8A7795_CLK_S3D2),
+ DEF_MOD("drif11", 512, R8A7795_CLK_S3D2),
+ DEF_MOD("drif10", 513, R8A7795_CLK_S3D2),
+ DEF_MOD("drif01", 514, R8A7795_CLK_S3D2),
+ DEF_MOD("drif00", 515, R8A7795_CLK_S3D2),
DEF_MOD("hscif4", 516, R8A7795_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A7795_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A7795_CLK_S3D1),
@@ -194,12 +195,12 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("vspi1", 630, R8A7795_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A7795_CLK_S0D1),
- DEF_MOD("ehci3", 700, R8A7795_CLK_S3D4),
- DEF_MOD("ehci2", 701, R8A7795_CLK_S3D4),
- DEF_MOD("ehci1", 702, R8A7795_CLK_S3D4),
- DEF_MOD("ehci0", 703, R8A7795_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A7795_CLK_S3D4),
- DEF_MOD("hsusb3", 705, R8A7795_CLK_S3D4),
+ DEF_MOD("ehci3", 700, R8A7795_CLK_S3D2),
+ DEF_MOD("ehci2", 701, R8A7795_CLK_S3D2),
+ DEF_MOD("ehci1", 702, R8A7795_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A7795_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A7795_CLK_S3D2),
+ DEF_MOD("hsusb3", 705, R8A7795_CLK_S3D2),
DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */
DEF_MOD("csi20", 714, R8A7795_CLK_CSI0),
DEF_MOD("csi41", 715, R8A7795_CLK_CSI0),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 12c455859f2c..d8e9af5d9ae9 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -3,6 +3,7 @@
* r8a7796 Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2016 Glider bvba
+ * Copyright (C) 2018 Renesas Electronics Corp.
*
* Based on r8a7795-cpg-mssr.c
*
@@ -73,8 +74,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
/* Core Clock Outputs */
- DEF_BASE("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
- DEF_BASE("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
+ DEF_GEN3_Z("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
+ DEF_GEN3_Z("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0),
DEF_FIXED("ztr", R8A7796_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A7796_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A7796_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
@@ -126,8 +127,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("msiof2", 209, R8A7796_CLK_MSO),
DEF_MOD("msiof1", 210, R8A7796_CLK_MSO),
DEF_MOD("msiof0", 211, R8A7796_CLK_MSO),
- DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3),
- DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3),
+ DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
DEF_MOD("cmt3", 300, R8A7796_CLK_R),
DEF_MOD("cmt2", 301, R8A7796_CLK_R),
@@ -146,16 +147,16 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("rwdt", 402, R8A7796_CLK_R),
DEF_MOD("intc-ex", 407, R8A7796_CLK_CP),
DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3),
- DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3),
- DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
- DEF_MOD("drif6", 509, R8A7796_CLK_S3D2),
- DEF_MOD("drif5", 510, R8A7796_CLK_S3D2),
- DEF_MOD("drif4", 511, R8A7796_CLK_S3D2),
- DEF_MOD("drif3", 512, R8A7796_CLK_S3D2),
- DEF_MOD("drif2", 513, R8A7796_CLK_S3D2),
- DEF_MOD("drif1", 514, R8A7796_CLK_S3D2),
- DEF_MOD("drif0", 515, R8A7796_CLK_S3D2),
+ DEF_MOD("audmac1", 501, R8A7796_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A7796_CLK_S1D2),
+ DEF_MOD("drif31", 508, R8A7796_CLK_S3D2),
+ DEF_MOD("drif30", 509, R8A7796_CLK_S3D2),
+ DEF_MOD("drif21", 510, R8A7796_CLK_S3D2),
+ DEF_MOD("drif20", 511, R8A7796_CLK_S3D2),
+ DEF_MOD("drif11", 512, R8A7796_CLK_S3D2),
+ DEF_MOD("drif10", 513, R8A7796_CLK_S3D2),
+ DEF_MOD("drif01", 514, R8A7796_CLK_S3D2),
+ DEF_MOD("drif00", 515, R8A7796_CLK_S3D2),
DEF_MOD("hscif4", 516, R8A7796_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A7796_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A7796_CLK_S3D1),
@@ -176,9 +177,9 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("vspd0", 623, R8A7796_CLK_S0D2),
DEF_MOD("vspb", 626, R8A7796_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A7796_CLK_S0D1),
- DEF_MOD("ehci1", 702, R8A7796_CLK_S3D4),
- DEF_MOD("ehci0", 703, R8A7796_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A7796_CLK_S3D4),
+ DEF_MOD("ehci1", 702, R8A7796_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A7796_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A7796_CLK_S3D2),
DEF_MOD("csi20", 714, R8A7796_CLK_CSI0),
DEF_MOD("csi40", 716, R8A7796_CLK_CSI0),
DEF_MOD("du2", 722, R8A7796_CLK_S2D1),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index eb1cca58a1e1..8f87e314d949 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -3,6 +3,7 @@
* r8a77965 Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ * Copyright (C) 2019 Renesas Electronics Corp.
*
* Based on r8a7795-cpg-mssr.c
*
@@ -71,7 +72,7 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
/* Core Clock Outputs */
- DEF_BASE("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
+ DEF_GEN3_Z("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
DEF_FIXED("ztr", R8A77965_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A77965_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A77965_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
@@ -123,8 +124,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("msiof2", 209, R8A77965_CLK_MSO),
DEF_MOD("msiof1", 210, R8A77965_CLK_MSO),
DEF_MOD("msiof0", 211, R8A77965_CLK_MSO),
- DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3),
- DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3),
+ DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
DEF_MOD("cmt3", 300, R8A77965_CLK_R),
@@ -146,16 +147,16 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("intc-ex", 407, R8A77965_CLK_CP),
DEF_MOD("intc-ap", 408, R8A77965_CLK_S0D3),
- DEF_MOD("audmac1", 501, R8A77965_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A77965_CLK_S0D3),
- DEF_MOD("drif7", 508, R8A77965_CLK_S3D2),
- DEF_MOD("drif6", 509, R8A77965_CLK_S3D2),
- DEF_MOD("drif5", 510, R8A77965_CLK_S3D2),
- DEF_MOD("drif4", 511, R8A77965_CLK_S3D2),
- DEF_MOD("drif3", 512, R8A77965_CLK_S3D2),
- DEF_MOD("drif2", 513, R8A77965_CLK_S3D2),
- DEF_MOD("drif1", 514, R8A77965_CLK_S3D2),
- DEF_MOD("drif0", 515, R8A77965_CLK_S3D2),
+ DEF_MOD("audmac1", 501, R8A77965_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A77965_CLK_S1D2),
+ DEF_MOD("drif31", 508, R8A77965_CLK_S3D2),
+ DEF_MOD("drif30", 509, R8A77965_CLK_S3D2),
+ DEF_MOD("drif21", 510, R8A77965_CLK_S3D2),
+ DEF_MOD("drif20", 511, R8A77965_CLK_S3D2),
+ DEF_MOD("drif11", 512, R8A77965_CLK_S3D2),
+ DEF_MOD("drif10", 513, R8A77965_CLK_S3D2),
+ DEF_MOD("drif01", 514, R8A77965_CLK_S3D2),
+ DEF_MOD("drif00", 515, R8A77965_CLK_S3D2),
DEF_MOD("hscif4", 516, R8A77965_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A77965_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A77965_CLK_S3D1),
@@ -175,9 +176,9 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("vspb", 626, R8A77965_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A77965_CLK_S0D1),
- DEF_MOD("ehci1", 702, R8A77965_CLK_S3D4),
- DEF_MOD("ehci0", 703, R8A77965_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A77965_CLK_S3D4),
+ DEF_MOD("ehci1", 702, R8A77965_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A77965_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A77965_CLK_S3D2),
DEF_MOD("csi20", 714, R8A77965_CLK_CSI0),
DEF_MOD("csi40", 716, R8A77965_CLK_CSI0),
DEF_MOD("du3", 721, R8A77965_CLK_S2D1),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index f9e07fcc0d96..7227f675e61f 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -171,7 +171,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
DEF_MOD("gpio1", 911, R8A77980_CLK_CP),
DEF_MOD("gpio0", 912, R8A77980_CLK_CP),
DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2),
- DEF_MOD("rpc-if", 917, R8A77980_CLK_RPC),
+ DEF_MOD("rpc-if", 917, R8A77980_CLK_RPCD2),
DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6),
DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6),
DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index 9a278c75c918..9570404baa58 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -2,7 +2,7 @@
/*
* r8a77990 Clock Pulse Generator / Module Standby and Software Reset
*
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
*
* Based on r8a7795-cpg-mssr.c
*
@@ -81,6 +81,7 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
/* Core Clock Outputs */
DEF_FIXED("za2", R8A77990_CLK_ZA2, CLK_PLL0D24, 1, 1),
DEF_FIXED("za8", R8A77990_CLK_ZA8, CLK_PLL0D8, 1, 1),
+ DEF_GEN3_Z("z2", R8A77990_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL0, 4, 8),
DEF_FIXED("ztr", R8A77990_CLK_ZTR, CLK_PLL1, 6, 1),
DEF_FIXED("zt", R8A77990_CLK_ZT, CLK_PLL1, 4, 1),
DEF_FIXED("zx", R8A77990_CLK_ZX, CLK_PLL1, 3, 1),
@@ -152,15 +153,15 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
DEF_MOD("intc-ex", 407, R8A77990_CLK_CP),
DEF_MOD("intc-ap", 408, R8A77990_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A77990_CLK_S3D4),
- DEF_MOD("drif7", 508, R8A77990_CLK_S3D2),
- DEF_MOD("drif6", 509, R8A77990_CLK_S3D2),
- DEF_MOD("drif5", 510, R8A77990_CLK_S3D2),
- DEF_MOD("drif4", 511, R8A77990_CLK_S3D2),
- DEF_MOD("drif3", 512, R8A77990_CLK_S3D2),
- DEF_MOD("drif2", 513, R8A77990_CLK_S3D2),
- DEF_MOD("drif1", 514, R8A77990_CLK_S3D2),
- DEF_MOD("drif0", 515, R8A77990_CLK_S3D2),
+ DEF_MOD("audmac0", 502, R8A77990_CLK_S1D2),
+ DEF_MOD("drif31", 508, R8A77990_CLK_S3D2),
+ DEF_MOD("drif30", 509, R8A77990_CLK_S3D2),
+ DEF_MOD("drif21", 510, R8A77990_CLK_S3D2),
+ DEF_MOD("drif20", 511, R8A77990_CLK_S3D2),
+ DEF_MOD("drif11", 512, R8A77990_CLK_S3D2),
+ DEF_MOD("drif10", 513, R8A77990_CLK_S3D2),
+ DEF_MOD("drif01", 514, R8A77990_CLK_S3D2),
+ DEF_MOD("drif00", 515, R8A77990_CLK_S3D2),
DEF_MOD("hscif4", 516, R8A77990_CLK_S3D1C),
DEF_MOD("hscif3", 517, R8A77990_CLK_S3D1C),
DEF_MOD("hscif2", 518, R8A77990_CLK_S3D1C),
@@ -180,8 +181,8 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
DEF_MOD("vspb", 626, R8A77990_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A77990_CLK_S0D1),
- DEF_MOD("ehci0", 703, R8A77990_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A77990_CLK_S3D4),
+ DEF_MOD("ehci0", 703, R8A77990_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A77990_CLK_S3D2),
DEF_MOD("csi40", 716, R8A77990_CLK_CSI0),
DEF_MOD("du1", 723, R8A77990_CLK_S1D1),
DEF_MOD("du0", 724, R8A77990_CLK_S1D1),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index eee3874865a9..68707277b17b 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -133,7 +133,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
DEF_MOD("rwdt", 402, R8A77995_CLK_R),
DEF_MOD("intc-ex", 407, R8A77995_CLK_CP),
DEF_MOD("intc-ap", 408, R8A77995_CLK_S1D2),
- DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1),
+ DEF_MOD("audmac0", 502, R8A77995_CLK_S1D2),
DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C),
DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C),
DEF_MOD("thermal", 522, R8A77995_CLK_CP),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 658cb11b6f55..97c72477cd54 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -170,6 +170,7 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = {
D_GATE(CLK_P6_PG2, "clk_p6_pg2", DIV_P6_PG, 0x8a3, 0x8a4, 0x8a5, 0, 0xb61, 0, 0),
D_GATE(CLK_P6_PG3, "clk_p6_pg3", DIV_P6_PG, 0x8a6, 0x8a7, 0x8a8, 0, 0xb62, 0, 0),
D_GATE(CLK_P6_PG4, "clk_p6_pg4", DIV_P6_PG, 0x8a9, 0x8aa, 0x8ab, 0, 0xb63, 0, 0),
+ D_GATE(CLK_PCI_USB, "clk_pci_usb", CLKOUT_D40, 0xe6, 0, 0, 0, 0, 0, 0),
D_GATE(CLK_QSPI0, "clk_qspi0", DIV_QSPI0, 0x2a4, 0x2a5, 0, 0, 0, 0, 0),
D_GATE(CLK_QSPI1, "clk_qspi1", DIV_QSPI1, 0x484, 0x485, 0, 0, 0, 0, 0),
D_GATE(CLK_RGMII_REF, "clk_rgmii_ref", CLKOUT_D8, 0x340, 0, 0, 0, 0, 0, 0),
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index bff9551c7a38..db2f57ef2f99 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* R-Car Gen2 Clock Pulse Generator
*
* Copyright (C) 2016 Cogent Embedded Inc.
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 9a8071a8114d..d25c8ba00a65 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -3,6 +3,7 @@
* R-Car Gen3 Clock Pulse Generator
*
* Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2019 Renesas Electronics Corp.
*
* Based on clk-rcar-gen3.c
*
@@ -88,14 +89,13 @@ static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
#define CPG_FRQCRB 0x00000004
#define CPG_FRQCRB_KICK BIT(31)
#define CPG_FRQCRC 0x000000e0
-#define CPG_FRQCRC_ZFC_MASK GENMASK(12, 8)
-#define CPG_FRQCRC_Z2FC_MASK GENMASK(4, 0)
struct cpg_z_clk {
struct clk_hw hw;
void __iomem *reg;
void __iomem *kick_reg;
unsigned long mask;
+ unsigned int fixed_div;
};
#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
@@ -110,17 +110,18 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
val = readl(zclk->reg) & zclk->mask;
mult = 32 - (val >> __ffs(zclk->mask));
- /* Factor of 2 is for fixed divider */
- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, 32 * 2);
+ return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
+ 32 * zclk->fixed_div);
}
static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- /* Factor of 2 is for fixed divider */
- unsigned long prate = *parent_rate / 2;
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned long prate;
unsigned int mult;
+ prate = *parent_rate / zclk->fixed_div;
mult = div_u64(rate * 32ULL, prate);
mult = clamp(mult, 1U, 32U);
@@ -134,8 +135,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned int mult;
unsigned int i;
- /* Factor of 2 is for fixed divider */
- mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate);
+ mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
+ parent_rate);
mult = clamp(mult, 1U, 32U);
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
@@ -178,7 +179,8 @@ static const struct clk_ops cpg_z_clk_ops = {
static struct clk * __init cpg_z_clk_register(const char *name,
const char *parent_name,
void __iomem *reg,
- unsigned long mask)
+ unsigned int div,
+ unsigned int offset)
{
struct clk_init_data init;
struct cpg_z_clk *zclk;
@@ -197,7 +199,8 @@ static struct clk * __init cpg_z_clk_register(const char *name,
zclk->reg = reg + CPG_FRQCRC;
zclk->kick_reg = reg + CPG_FRQCRB;
zclk->hw.init = &init;
- zclk->mask = mask;
+ zclk->mask = GENMASK(offset + 4, offset);
+ zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
clk = clk_register(NULL, &zclk->hw);
if (IS_ERR(clk))
@@ -234,8 +237,6 @@ struct sd_clock {
const struct sd_div_table *div_table;
struct cpg_simple_notifier csn;
unsigned int div_num;
- unsigned int div_min;
- unsigned int div_max;
unsigned int cur_div_idx;
};
@@ -312,14 +313,20 @@ static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
unsigned long rate,
unsigned long parent_rate)
{
- unsigned int div;
-
- if (!rate)
- rate = 1;
-
- div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ unsigned long calc_rate, diff, diff_min = ULONG_MAX;
+ unsigned int i, best_div = 0;
+
+ for (i = 0; i < clock->div_num; i++) {
+ calc_rate = DIV_ROUND_CLOSEST(parent_rate,
+ clock->div_table[i].div);
+ diff = calc_rate > rate ? calc_rate - rate : rate - calc_rate;
+ if (diff < diff_min) {
+ best_div = clock->div_table[i].div;
+ diff_min = diff;
+ }
+ }
- return clamp_t(unsigned int, div, clock->div_min, clock->div_max);
+ return best_div;
}
static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -369,27 +376,26 @@ static u32 cpg_quirks __initdata;
#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
#define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
-static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
- void __iomem *base, const char *parent_name,
+static struct clk * __init cpg_sd_clk_register(const char *name,
+ void __iomem *base, unsigned int offset, const char *parent_name,
struct raw_notifier_head *notifiers)
{
struct clk_init_data init;
struct sd_clock *clock;
struct clk *clk;
- unsigned int i;
u32 val;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
if (!clock)
return ERR_PTR(-ENOMEM);
- init.name = core->name;
+ init.name = name;
init.ops = &cpg_sd_clock_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = &parent_name;
init.num_parents = 1;
- clock->csn.reg = base + core->offset;
+ clock->csn.reg = base + offset;
clock->hw.init = &init;
clock->div_table = cpg_sd_div_table;
clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
@@ -403,13 +409,6 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
writel(val, clock->csn.reg);
- clock->div_max = clock->div_table[0].div;
- clock->div_min = clock->div_max;
- for (i = 1; i < clock->div_num; i++) {
- clock->div_max = max(clock->div_max, clock->div_table[i].div);
- clock->div_min = min(clock->div_min, clock->div_table[i].div);
- }
-
clk = clk_register(NULL, &clock->hw);
if (IS_ERR(clk))
goto free_clock;
@@ -606,8 +605,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
break;
case CLK_TYPE_GEN3_SD:
- return cpg_sd_clk_register(core, base, __clk_get_name(parent),
- notifiers);
+ return cpg_sd_clk_register(core->name, base, core->offset,
+ __clk_get_name(parent), notifiers);
case CLK_TYPE_GEN3_R:
if (cpg_quirks & RCKCR_CKSEL) {
@@ -658,11 +657,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
case CLK_TYPE_GEN3_Z:
return cpg_z_clk_register(core->name, __clk_get_name(parent),
- base, CPG_FRQCRC_ZFC_MASK);
-
- case CLK_TYPE_GEN3_Z2:
- return cpg_z_clk_register(core->name, __clk_get_name(parent),
- base, CPG_FRQCRC_Z2FC_MASK);
+ base, core->div, core->offset);
case CLK_TYPE_GEN3_OSC:
/*
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index eac1b057455a..c4ac80cac6a0 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -1,8 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* R-Car Gen3 Clock Pulse Generator
*
* Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2018 Renesas Electronics Corp.
*
*/
@@ -20,7 +21,6 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_R,
CLK_TYPE_GEN3_MDSEL, /* Select parent/divider using mode pin */
CLK_TYPE_GEN3_Z,
- CLK_TYPE_GEN3_Z2,
CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
CLK_TYPE_GEN3_RPCSRC,
@@ -51,6 +51,9 @@ enum rcar_gen3_clk_types {
DEF_BASE(_name, _id, CLK_TYPE_GEN3_RCKSEL, \
(_parent0) << 16 | (_parent1), .div = (_div0) << 16 | (_div1))
+#define DEF_GEN3_Z(_name, _id, _type, _parent, _div, _offset) \
+ DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset)
+
struct rcar_gen3_cpg_pll_config {
u8 extal_div;
u8 pll1_mult;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index c4ec9df146fd..4ddcdf3bfb95 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Renesas Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2015 Glider bvba
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index ebce5260068b..09ede6920593 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -82,7 +82,7 @@ static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw);
u32 val;
- val = clk_readl(ddrclk->reg_base +
+ val = readl(ddrclk->reg_base +
ddrclk->mux_offset) >> ddrclk->mux_shift;
val &= GENMASK(ddrclk->mux_width - 1, 0);
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index b8da6e799423..784b81e1ea7c 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -24,7 +24,7 @@ static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
struct clk_divider *divider = to_clk_divider(hw);
unsigned int val;
- val = clk_readl(divider->reg) >> divider->shift;
+ val = readl(divider->reg) >> divider->shift;
val &= div_mask(divider->width);
val = val * 2 + 3;
@@ -124,11 +124,11 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = div_mask(divider->width) << (divider->shift + 16);
} else {
- val = clk_readl(divider->reg);
+ val = readl(divider->reg);
val &= ~(div_mask(divider->width) << divider->shift);
}
val |= value << divider->shift;
- clk_writel(val, divider->reg);
+ writel(val, divider->reg);
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 5a67b7869960..24baeb56a1b3 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -200,8 +200,8 @@ PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu" };
PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" };
PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" };
PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" };
-PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usbphy480m_src" };
-PNAME(mux_pll_src_cpll_gll_usb_npll_p) = { "cpll", "gpll", "usbphy480m_src", "npll" };
+PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "unstable:usbphy480m_src" };
+PNAME(mux_pll_src_cpll_gll_usb_npll_p) = { "cpll", "gpll", "unstable:usbphy480m_src", "npll" };
PNAME(mux_mmc_src_p) = { "cpll", "gpll", "xin24m", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
@@ -219,7 +219,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
-PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" };
+PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" };
PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
"sclk_otgphy0_480m" };
PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
@@ -313,13 +313,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 6, GFLAGS),
- COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
+ COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 7, GFLAGS),
COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 8, GFLAGS),
- GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
RK3288_CLKGATE_CON(12), 9, GFLAGS),
GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(12), 10, GFLAGS),
@@ -420,7 +420,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 11, GFLAGS),
- MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
+ MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
@@ -647,7 +647,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
RK3288_CLKSEL_CON(22), 7, IFLAGS),
- GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
+ GATE(0, "jtag", "ext_jtag", 0,
RK3288_CLKGATE_CON(4), 14, GFLAGS),
COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
@@ -656,7 +656,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
RK3288_CLKGATE_CON(3), 6, GFLAGS),
- GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
+ GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
RK3288_CLKGATE_CON(13), 9, GFLAGS),
DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
@@ -697,7 +697,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
- GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
+ GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
/* ddrctrl [DDR Controller PHY clock] gates */
GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
@@ -837,12 +837,9 @@ static const char *const rk3288_critical_clocks[] __initconst = {
"pclk_alive_niu",
"pclk_pd_pmu",
"pclk_pmu_niu",
- "pclk_core_niu",
- "pclk_ddrupctl0",
- "pclk_publ0",
- "pclk_ddrupctl1",
- "pclk_publ1",
"pmu_hclk_otg0",
+ /* pwm-regulators on some boards, so handoff-critical later */
+ "pclk_rkpwm",
};
static void __iomem *rk3288_cru_base;
@@ -859,6 +856,9 @@ static const int rk3288_saved_cru_reg_ids[] = {
RK3288_CLKSEL_CON(10),
RK3288_CLKSEL_CON(33),
RK3288_CLKSEL_CON(37),
+
+ /* We turn aclk_dmac1 on for suspend; this will restore it */
+ RK3288_CLKGATE_CON(10),
};
static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
@@ -875,6 +875,14 @@ static int rk3288_clk_suspend(void)
}
/*
+ * Going into deep sleep (specifically setting PMU_CLR_DMA in
+ * RK3288_PMU_PWRMODE_CON1) appears to fail unless
+ * "aclk_dmac1" is on.
+ */
+ writel_relaxed(1 << (12 + 16),
+ rk3288_cru_base + RK3288_CLKGATE_CON(10));
+
+ /*
* Switch PLLs other than DPLL (for SDRAM) to slow mode to
* avoid crashes on resume. The Mask ROM on the system will
* put APLL, CPLL, and GPLL into slow mode at resume time
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index 65ab5c2f48b0..f12142d9cea2 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -458,7 +458,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS,
RK3328_CLKGATE_CON(2), 12, GFLAGS),
COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0,
- RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS,
RK3328_CLKGATE_CON(2), 4, GFLAGS),
COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0,
RK3328_CLKSEL_CON(22), 0, 10, DFLAGS,
@@ -550,15 +550,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0,
RK3328_CLKGATE_CON(25), 1, GFLAGS),
GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 0, GFLAGS),
+ RK3328_CLKGATE_CON(25), 2, GFLAGS),
GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 1, GFLAGS),
+ RK3328_CLKGATE_CON(25), 3, GFLAGS),
GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 0, GFLAGS),
+ RK3328_CLKGATE_CON(25), 4, GFLAGS),
GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 1, GFLAGS),
+ RK3328_CLKGATE_CON(25), 5, GFLAGS),
GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED,
- RK3328_CLKGATE_CON(25), 0, GFLAGS),
+ RK3328_CLKGATE_CON(25), 6, GFLAGS),
COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0,
RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS,
@@ -663,7 +663,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
/* PD_GMAC */
COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0,
- RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3328_CLKGATE_CON(3), 2, GFLAGS),
COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0,
RK3328_CLKSEL_CON(25), 8, 3, DFLAGS,
@@ -733,7 +733,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
/* PD_PERI */
GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS),
- GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS),
+ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS),
GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS),
GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS),
@@ -913,7 +913,7 @@ static void __init rk3328_clk_init(struct device_node *np)
&rk3328_cpuclk_data, rk3328_cpuclk_rates,
ARRAY_SIZE(rk3328_cpuclk_rates));
- rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0),
+ rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index c3ad92965823..0ea8e8080d1a 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -46,7 +46,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *base,
int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
- u8 div_shift, u8 div_width, u8 div_flags,
+ int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
struct clk_div_table *div_table, int gate_offset,
u8 gate_shift, u8 gate_flags, unsigned long flags,
spinlock_t *lock)
@@ -95,7 +95,10 @@ static struct clk *rockchip_clk_register_branch(const char *name,
}
div->flags = div_flags;
- div->reg = base + muxdiv_offset;
+ if (div_offset)
+ div->reg = base + div_offset;
+ else
+ div->reg = base + muxdiv_offset;
div->shift = div_shift;
div->width = div_width;
div->lock = lock;
@@ -516,7 +519,7 @@ void __init rockchip_clk_register_branches(
ctx->reg_base, list->muxdiv_offset,
list->mux_shift,
list->mux_width, list->mux_flags,
- list->div_shift, list->div_width,
+ list->div_offset, list->div_shift, list->div_width,
list->div_flags, list->div_table,
list->gate_offset, list->gate_shift,
list->gate_flags, flags, &ctx->lock);
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 6b53fff4cc96..1b5270755431 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -407,6 +407,7 @@ struct rockchip_clk_branch {
u8 mux_shift;
u8 mux_width;
u8 mux_flags;
+ int div_offset;
u8 div_shift;
u8 div_width;
u8 div_flags;
@@ -438,6 +439,28 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define COMPOSITE_DIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, \
+ mf, do, ds, dw, df, go, gs, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_composite, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .mux_flags = mf, \
+ .div_offset = do, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = go, \
+ .gate_shift = gs, \
+ .gate_flags = gf, \
+ }
+
#define COMPOSITE_NOMUX(_id, cname, pname, f, mo, ds, dw, df, \
go, gs, gf) \
{ \
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index 0a0b09591e6f..b2da2c8fa0c7 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -209,6 +209,7 @@ static const struct samsung_gate_clock exynos5410_gate_clks[] __initconst = {
GATE(CLK_USI1, "usi1", "aclk66", GATE_IP_PERIC, 11, 0, 0),
GATE(CLK_USI2, "usi2", "aclk66", GATE_IP_PERIC, 12, 0, 0),
GATE(CLK_USI3, "usi3", "aclk66", GATE_IP_PERIC, 13, 0, 0),
+ GATE(CLK_TSADC, "tsadc", "aclk66", GATE_IP_PERIC, 15, 0, 0),
GATE(CLK_PWM, "pwm", "aclk66", GATE_IP_PERIC, 24, 0, 0),
GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig
new file mode 100644
index 000000000000..8db4a3eb4782
--- /dev/null
+++ b/drivers/clk/sifive/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig CLK_SIFIVE
+ bool "SiFive SoC driver support"
+ help
+ SoC drivers for SiFive Linux-capable SoCs.
+
+if CLK_SIFIVE
+
+config CLK_SIFIVE_FU540_PRCI
+ bool "PRCI driver for SiFive FU540 SoCs"
+ select CLK_ANALOGBITS_WRPLL_CLN28HPC
+ help
+ Supports the Power Reset Clock interface (PRCI) IP block found in
+ FU540 SoCs. If this kernel is meant to run on a SiFive FU540 SoC,
+ enable this driver.
+
+endif
diff --git a/drivers/clk/sifive/Makefile b/drivers/clk/sifive/Makefile
new file mode 100644
index 000000000000..74d58a4c0756
--- /dev/null
+++ b/drivers/clk/sifive/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CLK_SIFIVE_FU540_PRCI) += fu540-prci.o
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
new file mode 100644
index 000000000000..0ec8bf7b4b28
--- /dev/null
+++ b/drivers/clk/sifive/fu540-prci.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * The FU540 PRCI implements clock and reset control for the SiFive
+ * FU540-C000 chip. This driver assumes that it has sole control
+ * over all PRCI resources.
+ *
+ * This driver is based on the PRCI driver written by Wesley Terpstra:
+ * https://github.com/riscv/riscv-linux/commit/999529edf517ed75b56659d456d221b2ee56bb60
+ *
+ * References:
+ * - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
+ */
+
+#include <dt-bindings/clock/sifive-fu540-prci.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/analogbits-wrpll-cln28hpc.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_clk.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
+ * hfclk and rtcclk
+ */
+#define EXPECTED_CLK_PARENT_COUNT 2
+
+/*
+ * Register offsets and bitmasks
+ */
+
+/* COREPLLCFG0 */
+#define PRCI_COREPLLCFG0_OFFSET 0x4
+# define PRCI_COREPLLCFG0_DIVR_SHIFT 0
+# define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
+# define PRCI_COREPLLCFG0_DIVF_SHIFT 6
+# define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
+# define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
+# define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
+# define PRCI_COREPLLCFG0_RANGE_SHIFT 18
+# define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
+# define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
+# define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
+# define PRCI_COREPLLCFG0_FSE_SHIFT 25
+# define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
+# define PRCI_COREPLLCFG0_LOCK_SHIFT 31
+# define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
+
+/* DDRPLLCFG0 */
+#define PRCI_DDRPLLCFG0_OFFSET 0xc
+# define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
+# define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
+# define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
+# define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
+# define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
+# define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
+# define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
+# define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
+# define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
+# define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
+# define PRCI_DDRPLLCFG0_FSE_SHIFT 25
+# define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
+# define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
+# define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
+
+/* DDRPLLCFG1 */
+#define PRCI_DDRPLLCFG1_OFFSET 0x10
+# define PRCI_DDRPLLCFG1_CKE_SHIFT 24
+# define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
+
+/* GEMGXLPLLCFG0 */
+#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
+# define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
+# define PRCI_GEMGXLPLLCFG0_DIVR_MASK (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
+# define PRCI_GEMGXLPLLCFG0_DIVF_MASK (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
+# define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
+# define PRCI_GEMGXLPLLCFG0_RANGE_MASK (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
+# define PRCI_GEMGXLPLLCFG0_BYPASS_MASK (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
+# define PRCI_GEMGXLPLLCFG0_FSE_MASK (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
+# define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
+
+/* GEMGXLPLLCFG1 */
+#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
+# define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 24
+# define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
+
+/* CORECLKSEL */
+#define PRCI_CORECLKSEL_OFFSET 0x24
+# define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
+# define PRCI_CORECLKSEL_CORECLKSEL_MASK (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
+
+/* DEVICESRESETREG */
+#define PRCI_DEVICESRESETREG_OFFSET 0x28
+# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT 0
+# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT)
+# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT 1
+# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT)
+# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT 2
+# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT)
+# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT 3
+# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT)
+# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT 5
+# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT)
+
+/* CLKMUXSTATUSREG */
+#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
+# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
+# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
+
+/*
+ * Private structures
+ */
+
+/**
+ * struct __prci_data - per-device-instance data
+ * @va: base virtual address of the PRCI IP block
+ * @hw_clks: encapsulates struct clk_hw records
+ *
+ * PRCI per-device instance data
+ */
+struct __prci_data {
+ void __iomem *va;
+ struct clk_hw_onecell_data hw_clks;
+};
+
+/**
+ * struct __prci_wrpll_data - WRPLL configuration and integration data
+ * @c: WRPLL current configuration record
+ * @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
+ * @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
+ * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
+ *
+ * @enable_bypass and @disable_bypass are used for WRPLL instances
+ * that contain a separate external glitchless clock mux downstream
+ * from the PLL. The WRPLL internal bypass mux is not glitchless.
+ */
+struct __prci_wrpll_data {
+ struct wrpll_cfg c;
+ void (*enable_bypass)(struct __prci_data *pd);
+ void (*disable_bypass)(struct __prci_data *pd);
+ u8 cfg0_offs;
+};
+
+/**
+ * struct __prci_clock - describes a clock device managed by PRCI
+ * @name: user-readable clock name string - should match the manual
+ * @parent_name: parent name for this clock
+ * @ops: struct clk_ops for the Linux clock framework to use for control
+ * @hw: Linux-private clock data
+ * @pwd: WRPLL-specific data, associated with this clock (if not NULL)
+ * @pd: PRCI-specific data associated with this clock (if not NULL)
+ *
+ * PRCI clock data. Used by the PRCI driver to register PRCI-provided
+ * clocks to the Linux clock infrastructure.
+ */
+struct __prci_clock {
+ const char *name;
+ const char *parent_name;
+ const struct clk_ops *ops;
+ struct clk_hw hw;
+ struct __prci_wrpll_data *pwd;
+ struct __prci_data *pd;
+};
+
+#define clk_hw_to_prci_clock(pwd) container_of(pwd, struct __prci_clock, hw)
+
+/*
+ * Private functions
+ */
+
+/**
+ * __prci_readl() - read from a PRCI register
+ * @pd: PRCI context
+ * @offs: register offset to read from (in bytes, from PRCI base address)
+ *
+ * Read the register located at offset @offs from the base virtual
+ * address of the PRCI register target described by @pd, and return
+ * the value to the caller.
+ *
+ * Context: Any context.
+ *
+ * Return: the contents of the register described by @pd and @offs.
+ */
+static u32 __prci_readl(struct __prci_data *pd, u32 offs)
+{
+ return readl_relaxed(pd->va + offs);
+}
+
+static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
+{
+ writel_relaxed(v, pd->va + offs);
+}
+
+/* WRPLL-related private functions */
+
+/**
+ * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
+ * @c: ptr to a struct wrpll_cfg record to write config into
+ * @r: value read from the PRCI PLL configuration register
+ *
+ * Given a value @r read from an FU540 PRCI PLL configuration register,
+ * split it into fields and populate it into the WRPLL configuration record
+ * pointed to by @c.
+ *
+ * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
+ * have the same register layout.
+ *
+ * Context: Any context.
+ */
+static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
+{
+ u32 v;
+
+ v = r & PRCI_COREPLLCFG0_DIVR_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
+ c->divr = v;
+
+ v = r & PRCI_COREPLLCFG0_DIVF_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
+ c->divf = v;
+
+ v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
+ c->divq = v;
+
+ v = r & PRCI_COREPLLCFG0_RANGE_MASK;
+ v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
+ c->range = v;
+
+ c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
+ WRPLL_FLAGS_EXT_FEEDBACK_MASK);
+
+ /* external feedback mode not supported */
+ c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
+}
+
+/**
+ * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
+ * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
+ *
+ * Using a set of WRPLL configuration values pointed to by @c,
+ * assemble a PRCI PLL configuration register value, and return it to
+ * the caller.
+ *
+ * Context: Any context. Caller must ensure that the contents of the
+ * record pointed to by @c do not change during the execution
+ * of this function.
+ *
+ * Returns: a value suitable for writing into a PRCI PLL configuration
+ * register
+ */
+static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
+{
+ u32 r = 0;
+
+ r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
+ r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
+ r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
+ r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
+
+ /* external feedback mode not supported */
+ r |= PRCI_COREPLLCFG0_FSE_MASK;
+
+ return r;
+}
+
+/**
+ * __prci_wrpll_read_cfg() - read the WRPLL configuration from the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ *
+ * Read the current configuration of the PLL identified by @pwd from
+ * the PRCI identified by @pd, and store it into the local configuration
+ * cache in @pwd.
+ *
+ * Context: Any context. Caller must prevent the records pointed to by
+ * @pd and @pwd from changing during execution.
+ */
+static void __prci_wrpll_read_cfg(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd)
+{
+ __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
+}
+
+/**
+ * __prci_wrpll_write_cfg() - write WRPLL configuration into the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ * @c: WRPLL configuration record to write
+ *
+ * Write the WRPLL configuration described by @c into the WRPLL
+ * configuration register identified by @pwd in the PRCI instance
+ * described by @c. Make a cached copy of the WRPLL's current
+ * configuration so it can be used by other code.
+ *
+ * Context: Any context. Caller must prevent the records pointed to by
+ * @pd and @pwd from changing during execution.
+ */
+static void __prci_wrpll_write_cfg(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd,
+ struct wrpll_cfg *c)
+{
+ __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
+
+ memcpy(&pwd->c, c, sizeof(*c));
+}
+
+/* Core clock mux control */
+
+/**
+ * __prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the HFCLK input source; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+static void __prci_coreclksel_use_hfclk(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/**
+ * __prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the PLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+static void __prci_coreclksel_use_corepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/*
+ * Linux clock framework integration
+ *
+ * See the Linux clock framework documentation for more information on
+ * these functions.
+ */
+
+static unsigned long sifive_fu540_prci_wrpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+
+ return wrpll_calc_output_rate(&pwd->c, parent_rate);
+}
+
+static long sifive_fu540_prci_wrpll_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct wrpll_cfg c;
+
+ memcpy(&c, &pwd->c, sizeof(c));
+
+ wrpll_configure_for_rate(&c, rate, *parent_rate);
+
+ return wrpll_calc_output_rate(&c, *parent_rate);
+}
+
+static int sifive_fu540_prci_wrpll_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+ int r;
+
+ r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
+ if (r)
+ return r;
+
+ if (pwd->enable_bypass)
+ pwd->enable_bypass(pd);
+
+ __prci_wrpll_write_cfg(pd, pwd, &pwd->c);
+
+ udelay(wrpll_calc_max_lock_us(&pwd->c));
+
+ if (pwd->disable_bypass)
+ pwd->disable_bypass(pd);
+
+ return 0;
+}
+
+static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
+ .set_rate = sifive_fu540_prci_wrpll_set_rate,
+ .round_rate = sifive_fu540_prci_wrpll_round_rate,
+ .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu540_prci_wrpll_ro_clk_ops = {
+ .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
+};
+
+/* TLCLKSEL clock integration */
+
+static unsigned long sifive_fu540_prci_tlclksel_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 v;
+ u8 div;
+
+ v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
+ v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
+ div = v ? 1 : 2;
+
+ return div_u64(parent_rate, div);
+}
+
+static const struct clk_ops sifive_fu540_prci_tlclksel_clk_ops = {
+ .recalc_rate = sifive_fu540_prci_tlclksel_recalc_rate,
+};
+
+/*
+ * PRCI integration data for each WRPLL instance
+ */
+
+static struct __prci_wrpll_data __prci_corepll_data = {
+ .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
+ .enable_bypass = __prci_coreclksel_use_hfclk,
+ .disable_bypass = __prci_coreclksel_use_corepll,
+};
+
+static struct __prci_wrpll_data __prci_ddrpll_data = {
+ .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+};
+
+static struct __prci_wrpll_data __prci_gemgxlpll_data = {
+ .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
+};
+
+/*
+ * List of clock controls provided by the PRCI
+ */
+
+static struct __prci_clock __prci_init_clocks[] = {
+ [PRCI_CLK_COREPLL] = {
+ .name = "corepll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu540_prci_wrpll_clk_ops,
+ .pwd = &__prci_corepll_data,
+ },
+ [PRCI_CLK_DDRPLL] = {
+ .name = "ddrpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu540_prci_wrpll_ro_clk_ops,
+ .pwd = &__prci_ddrpll_data,
+ },
+ [PRCI_CLK_GEMGXLPLL] = {
+ .name = "gemgxlpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu540_prci_wrpll_clk_ops,
+ .pwd = &__prci_gemgxlpll_data,
+ },
+ [PRCI_CLK_TLCLK] = {
+ .name = "tlclk",
+ .parent_name = "corepll",
+ .ops = &sifive_fu540_prci_tlclksel_clk_ops,
+ },
+};
+
+/**
+ * __prci_register_clocks() - register clock controls in the PRCI with Linux
+ * @dev: Linux struct device *
+ *
+ * Register the list of clock controls described in __prci_init_plls[] with
+ * the Linux clock framework.
+ *
+ * Return: 0 upon success or a negative error code upon failure.
+ */
+static int __prci_register_clocks(struct device *dev, struct __prci_data *pd)
+{
+ struct clk_init_data init = { };
+ struct __prci_clock *pic;
+ int parent_count, i, r;
+
+ parent_count = of_clk_get_parent_count(dev->of_node);
+ if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
+ dev_err(dev, "expected only two parent clocks, found %d\n",
+ parent_count);
+ return -EINVAL;
+ }
+
+ /* Register PLLs */
+ for (i = 0; i < ARRAY_SIZE(__prci_init_clocks); ++i) {
+ pic = &__prci_init_clocks[i];
+
+ init.name = pic->name;
+ init.parent_names = &pic->parent_name;
+ init.num_parents = 1;
+ init.ops = pic->ops;
+ pic->hw.init = &init;
+
+ pic->pd = pd;
+
+ if (pic->pwd)
+ __prci_wrpll_read_cfg(pd, pic->pwd);
+
+ r = devm_clk_hw_register(dev, &pic->hw);
+ if (r) {
+ dev_warn(dev, "Failed to register clock %s: %d\n",
+ init.name, r);
+ return r;
+ }
+
+ r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
+ if (r) {
+ dev_warn(dev, "Failed to register clkdev for %s: %d\n",
+ init.name, r);
+ return r;
+ }
+
+ pd->hw_clks.hws[i] = &pic->hw;
+ }
+
+ pd->hw_clks.num = i;
+
+ r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &pd->hw_clks);
+ if (r) {
+ dev_err(dev, "could not add hw_provider: %d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * Linux device model integration
+ *
+ * See the Linux device model documentation for more information about
+ * these functions.
+ */
+static int sifive_fu540_prci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct __prci_data *pd;
+ int r;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pd->va = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pd->va))
+ return PTR_ERR(pd->va);
+
+ r = __prci_register_clocks(dev, pd);
+ if (r) {
+ dev_err(dev, "could not register clocks: %d\n", r);
+ return r;
+ }
+
+ dev_dbg(dev, "SiFive FU540 PRCI probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id sifive_fu540_prci_of_match[] = {
+ { .compatible = "sifive,fu540-c000-prci", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sifive_fu540_prci_of_match);
+
+static struct platform_driver sifive_fu540_prci_driver = {
+ .driver = {
+ .name = "sifive-fu540-prci",
+ .of_match_table = sifive_fu540_prci_of_match,
+ },
+ .probe = sifive_fu540_prci_probe,
+};
+
+static int __init sifive_fu540_prci_init(void)
+{
+ return platform_driver_register(&sifive_fu540_prci_driver);
+}
+core_initcall(sifive_fu540_prci_init);
diff --git a/drivers/clk/sprd/common.h b/drivers/clk/sprd/common.h
index abd9ff5ef448..1d077b39cef6 100644
--- a/drivers/clk/sprd/common.h
+++ b/drivers/clk/sprd/common.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum clock infrastructure
//
diff --git a/drivers/clk/sprd/composite.h b/drivers/clk/sprd/composite.h
index 0984e9e252dc..04ab3f587ee2 100644
--- a/drivers/clk/sprd/composite.h
+++ b/drivers/clk/sprd/composite.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum composite clock driver
//
diff --git a/drivers/clk/sprd/div.h b/drivers/clk/sprd/div.h
index b3033d24d431..87510e3d0e14 100644
--- a/drivers/clk/sprd/div.h
+++ b/drivers/clk/sprd/div.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum divider clock driver
//
diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h
index 2e582c68a08b..dc352ea55e1f 100644
--- a/drivers/clk/sprd/gate.h
+++ b/drivers/clk/sprd/gate.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum gate clock driver
//
diff --git a/drivers/clk/sprd/mux.h b/drivers/clk/sprd/mux.h
index 548cfa0f145c..892e4191cc7f 100644
--- a/drivers/clk/sprd/mux.h
+++ b/drivers/clk/sprd/mux.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum multiplexer clock driver
//
diff --git a/drivers/clk/sprd/pll.h b/drivers/clk/sprd/pll.h
index 514175621099..e95f11e91ffe 100644
--- a/drivers/clk/sprd/pll.h
+++ b/drivers/clk/sprd/pll.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum pll clock driver
//
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 932836d26e2b..be0deee70182 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -531,7 +531,8 @@ static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram",
static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 3, BIT(31), 0);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
static const u8 tcon0_table[] = { 0, 2, };
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 139e8389615c..3c32d7798f27 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -266,7 +266,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, 0x600,
0, 4, /* M */
24, 1, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "psi-ahb1-ahb2",
0x60c, BIT(0), 0);
@@ -311,7 +311,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
0, 3, /* M */
24, 1, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "psi-ahb1-ahb2",
0x69c, BIT(0), 0);
@@ -656,6 +656,8 @@ static const char * const hdmi_cec_parents[] = { "osc32k", "pll-periph0-2x" };
static const struct ccu_mux_fixed_prediv hdmi_cec_predivs[] = {
{ .index = 1, .div = 36621 },
};
+
+#define SUN50I_H6_HDMI_CEC_CLK_REG 0xb10
static struct ccu_mux hdmi_cec_clk = {
.enable = BIT(31),
@@ -689,7 +691,7 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0",
tcon_lcd0_parents, 0xb60,
24, 3, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_tcon_lcd0_clk, "bus-tcon-lcd0", "ahb3",
0xb7c, BIT(0), 0);
@@ -704,7 +706,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0",
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_tcon_tv0_clk, "bus-tcon-tv0", "ahb3",
0xb9c, BIT(0), 0);
@@ -1200,6 +1202,15 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
val &= ~(GENMASK(21, 16) | BIT(0));
writel(val | (7 << 16), reg + SUN50I_H6_PLL_AUDIO_REG);
+ /*
+ * First clock parent (osc32K) is unusable for CEC. But since there
+ * is no good way to force parent switch (both run with same frequency),
+ * just set second clock parent here.
+ */
+ val = readl(reg + SUN50I_H6_HDMI_CEC_CLK_REG);
+ val |= BIT(24);
+ writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG);
+
return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_h6_ccu_desc);
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.h b/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
index 2ccfe4428260..9406f9a6a8aa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2016 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.h b/drivers/clk/sunxi-ng/ccu-sun5i.h
index 93a275fbd9a9..b66abd4fd0bf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.h
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.h
@@ -60,10 +60,6 @@
/* The rest of the module clocks are exported */
-#define CLK_MBUS 99
-
-/* And finally the IEP clock */
-
#define CLK_NUMBER (CLK_IEP + 1)
#endif /* _CCU_SUN5I_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 2d6555d73170..5f714b4d8ee4 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -513,8 +513,9 @@ static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", 0x130, BIT(16), 0);
static SUNXI_CCU_GATE(mipi_csi_clk, "mipi-csi", "osc24M", 0x130, BIT(31), 0);
-static const char * const csi_mclk_parents[] = { "pll-de", "osc24M" };
-static const u8 csi_mclk_table[] = { 3, 5 };
+static const char * const csi_mclk_parents[] = { "pll-video0", "pll-de",
+ "osc24M" };
+static const u8 csi_mclk_table[] = { 0, 3, 5 };
static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_mclk_clk, "csi-mclk",
csi_mclk_parents, csi_mclk_table,
0x134,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index ac12f261f8ca..eada0e291859 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -325,7 +325,8 @@ static SUNXI_CCU_GATE(dram_ohci_clk, "dram-ohci", "dram",
static const char * const de_parents[] = { "pll-video", "pll-periph0" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 2, BIT(31), 0);
+ 0x104, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const tcon_parents[] = { "pll-video" };
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index a09dfbe36402..dc9f0a365664 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -240,7 +240,7 @@ static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", i2s_spdif_parents,
/* The BSP header file has a CIR_CFG, but no mod clock uses this definition */
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
- 0x0cc, BIT(8), 0);
+ 0x0cc, BIT(1), 0);
static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "pll-ddr",
0x100, BIT(0), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h
index 39d06fed55b2..b22484f1bb9a 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright 2017 Icenowy Zheng <icenowy@aosc.io>
*
*/
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 9b49adb20d07..cbcdf664f336 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -167,7 +167,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
- u32 n_mask, k_mask, m_mask, p_mask;
+ u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
struct _ccu_nkmp _nkmp;
unsigned long flags;
u32 reg;
@@ -186,10 +186,24 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
- n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
- k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
- m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
- p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
+ /*
+ * If width is 0, GENMASK() macro may not generate expected mask (0)
+ * as it falls under undefined behaviour by C standard due to shifts
+ * which are equal or greater than width of left operand. This can
+ * be easily avoided by explicitly checking if width is 0.
+ */
+ if (nkmp->n.width)
+ n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
+ nkmp->n.shift);
+ if (nkmp->k.width)
+ k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
+ nkmp->k.shift);
+ if (nkmp->m.width)
+ m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
+ nkmp->m.shift);
+ if (nkmp->p.width)
+ p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
+ nkmp->p.shift);
spin_lock_irqsave(nkmp->common.lock, flags);
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
new file mode 100644
index 000000000000..2b6207cc4eda
--- /dev/null
+++ b/drivers/clk/sunxi/Kconfig
@@ -0,0 +1,43 @@
+menuconfig CLK_SUNXI
+ bool "Legacy clock support for Allwinner SoCs"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default y
+
+if CLK_SUNXI
+
+config CLK_SUNXI_CLOCKS
+ bool "Legacy clock drivers"
+ default y
+ help
+ Legacy clock drivers being used on older (A10, A13, A20,
+ A23, A31, A80) SoCs. These drivers are kept around for
+ Device Tree backward compatibility issues, in case one would
+ still use a Device Tree with one clock provider by
+ node. Newer Device Trees and newer SoCs use the drivers
+ controlled by CONFIG_SUNXI_CCU.
+
+config CLK_SUNXI_PRCM_SUN6I
+ bool "Legacy A31 PRCM driver"
+ select MFD_SUN6I_PRCM
+ default y
+ help
+ Legacy clock driver for the A31 PRCM clocks. Those are
+ usually needed for the PMIC communication, mostly.
+
+config CLK_SUNXI_PRCM_SUN8I
+ bool "Legacy sun8i PRCM driver"
+ select MFD_SUN6I_PRCM
+ default y
+ help
+ Legacy clock driver for the sun8i family PRCM clocks.
+ Those are usually needed for the PMIC communication,
+ mostly.
+
+config CLK_SUNXI_PRCM_SUN9I
+ bool "Legacy A80 PRCM driver"
+ default y
+ help
+ Legacy clock driver for the A80 PRCM clocks. Those are
+ usually needed for the PMIC communication, mostly.
+
+endif
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index be88368b48a1..e10824c76ae9 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -3,27 +3,32 @@
# Makefile for sunxi specific clk
#
-obj-y += clk-sunxi.o clk-factors.o
-obj-y += clk-a10-codec.o
-obj-y += clk-a10-hosc.o
-obj-y += clk-a10-mod1.o
-obj-y += clk-a10-pll2.o
-obj-y += clk-a10-ve.o
-obj-y += clk-a20-gmac.o
-obj-y += clk-mod0.o
-obj-y += clk-simple-gates.o
-obj-y += clk-sun4i-display.o
-obj-y += clk-sun4i-pll3.o
-obj-y += clk-sun4i-tcon-ch1.o
-obj-y += clk-sun8i-bus-gates.o
-obj-y += clk-sun8i-mbus.o
-obj-y += clk-sun9i-core.o
-obj-y += clk-sun9i-mmc.o
-obj-y += clk-usb.o
+obj-$(CONFIG_CLK_SUNXI) += clk-factors.o
-obj-$(CONFIG_MACH_SUN9I) += clk-sun8i-apb0.o
-obj-$(CONFIG_MACH_SUN9I) += clk-sun9i-cpus.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sunxi.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-codec.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-hosc.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-mod1.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-pll2.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-ve.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a20-gmac.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-mod0.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-simple-gates.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun4i-display.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun4i-pll3.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun4i-tcon-ch1.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun8i-bus-gates.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun8i-mbus.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun9i-core.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun9i-mmc.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-usb.o
-obj-$(CONFIG_MFD_SUN6I_PRCM) += \
- clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
- clk-sun8i-apb0.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun8i-apb0.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun9i-cpus.o
+
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN6I) += clk-sun6i-apb0.o
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN6I) += clk-sun6i-apb0-gates.o
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN6I) += clk-sun6i-ar100.o
+
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN8I) += clk-sun8i-apb0.o
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN8I) += clk-sun6i-apb0-gates.o
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 205fe8ff63f0..2a1822a22740 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -175,6 +175,7 @@ struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
void __iomem *reg, spinlock_t *lock)
{
return clk_register_divider_table(NULL, name, parent_name,
- CLK_IS_CRITICAL, reg, 16, 1, 0,
+ CLK_IS_CRITICAL,
+ reg, 16, 1, CLK_DIVIDER_READ_ONLY,
mc_div_table, lock);
}
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 0621a3a82ea6..93ecb538e59b 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -121,18 +121,28 @@ static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
struct tegra_clk_emc *tegra;
u8 ram_code = tegra_read_ram_code();
struct emc_timing *timing = NULL;
- int i;
+ int i, k, t;
tegra = container_of(hw, struct tegra_clk_emc, hw);
- for (i = 0; i < tegra->num_timings; i++) {
- if (tegra->timings[i].ram_code != ram_code)
- continue;
+ for (k = 0; k < tegra->num_timings; k++) {
+ if (tegra->timings[k].ram_code == ram_code)
+ break;
+ }
+
+ for (t = k; t < tegra->num_timings; t++) {
+ if (tegra->timings[t].ram_code != ram_code)
+ break;
+ }
+ for (i = k; i < t; i++) {
timing = tegra->timings + i;
+ if (timing->rate < req->rate && i != t - 1)
+ continue;
+
if (timing->rate > req->max_rate) {
- i = max(i, 1);
+ i = max(i, k + 1);
req->rate = tegra->timings[i - 1].rate;
return 0;
}
@@ -140,10 +150,8 @@ static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
if (timing->rate < req->min_rate)
continue;
- if (timing->rate >= req->rate) {
- req->rate = timing->rate;
- return 0;
- }
+ req->rate = timing->rate;
+ return 0;
}
if (timing) {
@@ -214,7 +222,10 @@ static int emc_set_timing(struct tegra_clk_emc *tegra,
if (emc_get_parent(&tegra->hw) == timing->parent_index &&
clk_get_rate(timing->parent) != timing->parent_rate) {
- BUG();
+ WARN_ONCE(1, "parent %s rate mismatch %lu %lu\n",
+ __clk_get_name(timing->parent),
+ clk_get_rate(timing->parent),
+ timing->parent_rate);
return -EINVAL;
}
@@ -282,7 +293,7 @@ static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra,
for (i = timing_index+1; i < tegra->num_timings; i++) {
timing = tegra->timings + i;
if (timing->ram_code != ram_code)
- continue;
+ break;
if (emc_parent_clk_sources[timing->parent_index] !=
emc_parent_clk_sources[
@@ -293,7 +304,7 @@ static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra,
for (i = timing_index-1; i >= 0; --i) {
timing = tegra->timings + i;
if (timing->ram_code != ram_code)
- continue;
+ break;
if (emc_parent_clk_sources[timing->parent_index] !=
emc_parent_clk_sources[
@@ -433,19 +444,23 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
struct device_node *node,
u32 ram_code)
{
+ struct emc_timing *timings_ptr;
struct device_node *child;
int child_count = of_get_child_count(node);
int i = 0, err;
+ size_t size;
- tegra->timings = kcalloc(child_count, sizeof(struct emc_timing),
- GFP_KERNEL);
+ size = (tegra->num_timings + child_count) * sizeof(struct emc_timing);
+
+ tegra->timings = krealloc(tegra->timings, size, GFP_KERNEL);
if (!tegra->timings)
return -ENOMEM;
- tegra->num_timings = child_count;
+ timings_ptr = tegra->timings + tegra->num_timings;
+ tegra->num_timings += child_count;
for_each_child_of_node(node, child) {
- struct emc_timing *timing = tegra->timings + (i++);
+ struct emc_timing *timing = timings_ptr + (i++);
err = load_one_timing_from_dt(tegra, timing, child);
if (err) {
@@ -456,7 +471,7 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
timing->ram_code = ram_code;
}
- sort(tegra->timings, tegra->num_timings, sizeof(struct emc_timing),
+ sort(timings_ptr, child_count, sizeof(struct emc_timing),
cmp_timings, NULL);
return 0;
@@ -499,10 +514,10 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
* fuses until the apbmisc driver is loaded.
*/
err = load_timings_from_dt(tegra, node, node_ram_code);
- of_node_put(node);
- if (err)
+ if (err) {
+ of_node_put(node);
return ERR_PTR(err);
- break;
+ }
}
if (tegra->num_timings == 0)
@@ -532,7 +547,5 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
/* Allow debugging tools to see the EMC clock */
clk_register_clkdev(clk, "emc", "tegra-clk-debug");
- clk_prepare_enable(clk);
-
return clk;
};
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index b50b7460014b..6b976b2514f7 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -444,6 +444,9 @@ static int clk_pll_enable(struct clk_hw *hw)
unsigned long flags = 0;
int ret;
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
@@ -663,8 +666,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
pll_override_writel(val, params->pmc_divp_reg, pll);
val = pll_override_readl(params->pmc_divnm_reg, pll);
- val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) |
- ~(divn_mask(pll) << div_nmp->override_divn_shift);
+ val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) |
+ (divn_mask(pll) << div_nmp->override_divn_shift));
val |= (cfg->m << div_nmp->override_divm_shift) |
(cfg->n << div_nmp->override_divn_shift);
pll_override_writel(val, params->pmc_divnm_reg, pll);
@@ -940,11 +943,16 @@ static int clk_plle_training(struct tegra_clk_pll *pll)
static int clk_plle_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
- unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
struct tegra_clk_pll_freq_table sel;
+ unsigned long input_rate;
u32 val;
int err;
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
+ input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
+
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
@@ -1355,6 +1363,9 @@ static int clk_pllc_enable(struct clk_hw *hw)
int ret;
unsigned long flags = 0;
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
@@ -1567,7 +1578,12 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
u32 val;
int ret;
unsigned long flags = 0;
- unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
+ unsigned long input_rate;
+
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
+ input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
@@ -1704,6 +1720,9 @@ static int clk_pllu_tegra114_enable(struct clk_hw *hw)
return -EINVAL;
}
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
input_rate = clk_hw_get_rate(__clk_get_hw(osc));
if (pll->lock)
@@ -2379,6 +2398,16 @@ struct clk *tegra_clk_register_pllre_tegra210(const char *name,
return clk;
}
+static int clk_plle_tegra210_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ val = pll_readl_base(pll);
+
+ return val & PLLE_BASE_ENABLE ? 1 : 0;
+}
+
static int clk_plle_tegra210_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
@@ -2386,7 +2415,12 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
u32 val;
int ret = 0;
unsigned long flags = 0;
- unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
+ unsigned long input_rate;
+
+ if (clk_plle_tegra210_is_enabled(hw))
+ return 0;
+
+ input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
@@ -2497,16 +2531,6 @@ out:
spin_unlock_irqrestore(pll->lock, flags);
}
-static int clk_plle_tegra210_is_enabled(struct clk_hw *hw)
-{
- struct tegra_clk_pll *pll = to_clk_pll(hw);
- u32 val;
-
- val = pll_readl_base(pll);
-
- return val & PLLE_BASE_ENABLE ? 1 : 0;
-}
-
static const struct clk_ops tegra_clk_plle_tegra210_ops = {
.is_enabled = clk_plle_tegra210_is_enabled,
.enable = clk_plle_tegra210_enable,
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 84267cfc4433..b5ff76c663f8 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -121,7 +121,7 @@ out:
return err;
}
-const struct clk_ops tegra_clk_super_mux_ops = {
+static const struct clk_ops tegra_clk_super_mux_ops = {
.get_parent = clk_super_get_parent,
.set_parent = clk_super_set_parent,
};
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index df0018f7bf7e..d7bee144f4b7 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -413,7 +413,6 @@ static struct tegra_clk_pll_params pll_m_params = {
.base_reg = PLLM_BASE,
.misc_reg = PLLM_MISC,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
.max_p = 5,
.pdiv_tohw = pllm_p,
@@ -421,7 +420,7 @@ static struct tegra_clk_pll_params pll_m_params = {
.pmc_divnm_reg = PMC_PLLM_WB0_OVERRIDE,
.pmc_divp_reg = PMC_PLLM_WB0_OVERRIDE_2,
.freq_table = pll_m_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
@@ -1466,9 +1465,9 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
tegra_pmc_clk_init(pmc_base, tegra124_clks);
/* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
- plld_base = clk_readl(clk_base + PLLD_BASE);
+ plld_base = readl(clk_base + PLLD_BASE);
plld_base &= ~BIT(25);
- clk_writel(plld_base, clk_base + PLLD_BASE);
+ writel(plld_base, clk_base + PLLD_BASE);
}
/**
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 7545af763d7a..ed3c7df75d1e 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3557,7 +3557,7 @@ static void __init tegra210_clock_init(struct device_node *np)
if (!clks)
return;
- value = clk_readl(clk_base + SPARE_REG0) >> CLK_M_DIVISOR_SHIFT;
+ value = readl(clk_base + SPARE_REG0) >> CLK_M_DIVISOR_SHIFT;
clk_m_div = (value & CLK_M_DIVISOR_MASK) + 1;
if (tegra_osc_clk_init(clk_base, tegra210_clks, tegra210_input_freq,
@@ -3574,9 +3574,9 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_pmc_clk_init(pmc_base, tegra210_clks);
/* For Tegra210, PLLD is the only source for DSIA & DSIB */
- value = clk_readl(clk_base + PLLD_BASE);
+ value = readl(clk_base + PLLD_BASE);
value &= ~BIT(25);
- clk_writel(value, clk_base + PLLD_BASE);
+ writel(value, clk_base + PLLD_BASE);
tegra_clk_apply_init_table = tegra210_clock_apply_init_table;
diff --git a/drivers/clk/ti/clk-7xx-compat.c b/drivers/clk/ti/clk-7xx-compat.c
index e3cb7f0b03ae..b3cd2296f84b 100644
--- a/drivers/clk/ti/clk-7xx-compat.c
+++ b/drivers/clk/ti/clk-7xx-compat.c
@@ -362,7 +362,7 @@ static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst
{ DRA7_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" },
{ DRA7_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
{ DRA7_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
- { DRA7_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_DRA74 | CLKF_SOC_DRA76, "dpll_core_h13x2_ck" },
{ DRA7_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
{ DRA7_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
{ DRA7_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
@@ -662,7 +662,7 @@ static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst
{ DRA7_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
{ DRA7_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
{ DRA7_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
- { DRA7_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
+ { DRA7_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_iclk_div", "l4sec_clkdm" },
{ DRA7_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
{ DRA7_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01d0:24", "l4per2_clkdm" },
{ DRA7_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e0:24", "l4per2_clkdm" },
@@ -704,7 +704,7 @@ static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initcons
{ DRA7_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ DRA7_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
{ DRA7_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" },
- { DRA7_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" },
+ { DRA7_TIMER12_CLKCTRL, NULL, CLKF_SOC_NONSEC, "secure_32k_clk_src_ck" },
{ DRA7_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ DRA7_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0060:24" },
{ DRA7_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0068:24" },
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 597fb4a59318..79186b918d87 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -348,7 +348,7 @@ static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst
{ DRA7_L3INIT_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" },
{ DRA7_L3INIT_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
{ DRA7_L3INIT_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
- { DRA7_L3INIT_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_L3INIT_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_DRA74 | CLKF_SOC_DRA76, "dpll_core_h13x2_ck" },
{ DRA7_L3INIT_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
{ DRA7_L3INIT_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
{ DRA7_L3INIT_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
@@ -590,7 +590,7 @@ static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst
{ DRA7_L4SEC_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ DRA7_L4SEC_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+ { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
{ DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ 0 },
};
@@ -757,7 +757,7 @@ static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initcons
{ DRA7_WKUPAON_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ DRA7_WKUPAON_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
{ DRA7_WKUPAON_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" },
- { DRA7_WKUPAON_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" },
+ { DRA7_WKUPAON_TIMER12_CLKCTRL, NULL, CLKF_SOC_NONSEC, "secure_32k_clk_src_ck" },
{ DRA7_WKUPAON_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ DRA7_WKUPAON_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0060:24" },
{ DRA7_WKUPAON_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0068:24" },
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 639f515e08f0..96d65a1cf7be 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -446,6 +446,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
u32 addr;
int ret;
char *c;
+ u16 soc_mask = 0;
if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
of_node_name_eq(node, "clk"))
@@ -469,6 +470,13 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
else
data = dra7_clkctrl_data;
}
+
+ if (of_machine_is_compatible("ti,dra72"))
+ soc_mask = CLKF_SOC_DRA72;
+ if (of_machine_is_compatible("ti,dra74"))
+ soc_mask = CLKF_SOC_DRA74;
+ if (of_machine_is_compatible("ti,dra76"))
+ soc_mask = CLKF_SOC_DRA76;
#endif
#ifdef CONFIG_SOC_AM33XX
if (of_machine_is_compatible("ti,am33xx")) {
@@ -501,6 +509,9 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
data = dm816_clkctrl_data;
#endif
+ if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
+ soc_mask |= CLKF_SOC_NONSEC;
+
while (data->addr) {
if (addr == data->addr)
break;
@@ -562,6 +573,12 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
reg_data = data->regs;
while (reg_data->parent) {
+ if ((reg_data->flags & CLKF_SOC_MASK) &&
+ (reg_data->flags & soc_mask) == 0) {
+ reg_data++;
+ continue;
+ }
+
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
return;
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 1c0fac59d809..e4b8392ff63c 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -83,6 +83,13 @@ enum {
#define CLKF_HW_SUP BIT(6)
#define CLKF_NO_IDLEST BIT(7)
+#define CLKF_SOC_MASK GENMASK(11, 8)
+
+#define CLKF_SOC_NONSEC BIT(8)
+#define CLKF_SOC_DRA72 BIT(9)
+#define CLKF_SOC_DRA74 BIT(10)
+#define CLKF_SOC_DRA76 BIT(11)
+
#define CLK(dev, con, ck) \
{ \
.lk = { \
@@ -303,7 +310,6 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
-bool omap2_clk_is_hw_omap(struct clk_hw *hw);
extern struct ti_clk_ll_ops *ti_clk_ll_ops;
diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c
index 7c0403b733ae..698306f4801f 100644
--- a/drivers/clk/ux500/clk-sysctrl.c
+++ b/drivers/clk/ux500/clk-sysctrl.c
@@ -42,7 +42,8 @@ static int clk_sysctrl_prepare(struct clk_hw *hw)
clk->reg_bits[0]);
if (!ret && clk->enable_delay_us)
- usleep_range(clk->enable_delay_us, clk->enable_delay_us);
+ usleep_range(clk->enable_delay_us, clk->enable_delay_us +
+ (clk->enable_delay_us >> 2));
return ret;
}
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index d977193842df..19174835693b 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
};
static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
- void __iomem *base,
+ const struct pmc_clk_data *pmc_data,
const char **parent_names,
int num_parents)
{
@@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
init.num_parents = num_parents;
pclk->hw.init = &init;
- pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
+ pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
spin_lock_init(&pclk->lock);
+ /*
+ * On some systems, the pmc_plt_clocks already enabled by the
+ * firmware are being marked as critical to avoid them being
+ * gated by the clock framework.
+ */
+ if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
+ init.flags |= CLK_IS_CRITICAL;
+
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
if (ret) {
pclk = ERR_PTR(ret);
@@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
return PTR_ERR(parent_names);
for (i = 0; i < PMC_CLK_NUM; i++) {
- data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
+ data->clks[i] = plt_clk_register(pdev, i, pmc_data,
parent_names, data->nparents);
if (IS_ERR(data->clks[i])) {
err = PTR_ERR(data->clks[i]);
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index d7b53ac8ad11..4b9d5c14c400 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -158,7 +158,7 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
clks[fclk] = clk_register_gate(NULL, clk_name,
div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg,
0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock);
- enable_reg = clk_readl(fclk_gate_reg) & 1;
+ enable_reg = readl(fclk_gate_reg) & 1;
if (enable && !enable_reg) {
if (clk_prepare_enable(clks[fclk]))
pr_warn("%s: FCLK%u enable failed\n", __func__,
@@ -287,7 +287,7 @@ static void __init zynq_clk_setup(struct device_node *np)
SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock);
/* CPU clocks */
- tmp = clk_readl(SLCR_621_TRUE) & 1;
+ tmp = readl(SLCR_621_TRUE) & 1;
clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
&armclk_lock);
@@ -510,7 +510,7 @@ static void __init zynq_clk_setup(struct device_node *np)
&dbgclk_lock);
/* leave debug clocks in the state the bootloader set them up to */
- tmp = clk_readl(SLCR_DBG_CLK_CTRL);
+ tmp = readl(SLCR_DBG_CLK_CTRL);
if (tmp & DBG_CLK_CTRL_CLKACT_TRC)
if (clk_prepare_enable(clks[dbg_trc]))
pr_warn("%s: trace clk enable failed\n", __func__);
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index 00d72fb5c036..800b70ee19b3 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -90,7 +90,7 @@ static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
* makes probably sense to redundantly save fbdiv in the struct
* zynq_pll to save the IO access.
*/
- fbdiv = (clk_readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >>
+ fbdiv = (readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >>
PLLCTRL_FBDIV_SHIFT;
return parent_rate * fbdiv;
@@ -112,7 +112,7 @@ static int zynq_pll_is_enabled(struct clk_hw *hw)
spin_lock_irqsave(clk->lock, flags);
- reg = clk_readl(clk->pll_ctrl);
+ reg = readl(clk->pll_ctrl);
spin_unlock_irqrestore(clk->lock, flags);
@@ -138,10 +138,10 @@ static int zynq_pll_enable(struct clk_hw *hw)
/* Power up PLL and wait for lock */
spin_lock_irqsave(clk->lock, flags);
- reg = clk_readl(clk->pll_ctrl);
+ reg = readl(clk->pll_ctrl);
reg &= ~(PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK);
- clk_writel(reg, clk->pll_ctrl);
- while (!(clk_readl(clk->pll_status) & (1 << clk->lockbit)))
+ writel(reg, clk->pll_ctrl);
+ while (!(readl(clk->pll_status) & (1 << clk->lockbit)))
;
spin_unlock_irqrestore(clk->lock, flags);
@@ -168,9 +168,9 @@ static void zynq_pll_disable(struct clk_hw *hw)
/* shut down PLL */
spin_lock_irqsave(clk->lock, flags);
- reg = clk_readl(clk->pll_ctrl);
+ reg = readl(clk->pll_ctrl);
reg |= PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK;
- clk_writel(reg, clk->pll_ctrl);
+ writel(reg, clk->pll_ctrl);
spin_unlock_irqrestore(clk->lock, flags);
}
@@ -223,9 +223,9 @@ struct clk *clk_register_zynq_pll(const char *name, const char *parent,
spin_lock_irqsave(pll->lock, flags);
- reg = clk_readl(pll->pll_ctrl);
+ reg = readl(pll->pll_ctrl);
reg &= ~PLLCTRL_BPQUAL_MASK;
- clk_writel(reg, pll->pll_ctrl);
+ writel(reg, pll->pll_ctrl);
spin_unlock_irqrestore(pll->lock, flags);
diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
index 4143f560c28d..0af8f74c5fa5 100644
--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
@@ -138,4 +138,3 @@ struct clk_hw *zynqmp_clk_register_mux(const char *name, u32 clk_id,
return hw;
}
-EXPORT_SYMBOL_GPL(zynqmp_clk_register_mux);
diff --git a/drivers/clk/zynqmp/clk-zynqmp.h b/drivers/clk/zynqmp/clk-zynqmp.h
index 7ab163b67249..fec9a15c8786 100644
--- a/drivers/clk/zynqmp/clk-zynqmp.h
+++ b/drivers/clk/zynqmp/clk-zynqmp.h
@@ -10,12 +10,6 @@
#include <linux/firmware/xlnx-zynqmp.h>
-/* Clock APIs payload parameters */
-#define CLK_GET_NAME_RESP_LEN 16
-#define CLK_GET_TOPOLOGY_RESP_WORDS 3
-#define CLK_GET_PARENTS_RESP_WORDS 3
-#define CLK_GET_ATTR_RESP_WORDS 1
-
enum topology_type {
TYPE_INVALID,
TYPE_MUX,
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 2b1d43457f09..a11f93ecbf34 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -21,24 +21,6 @@
#define MAX_NODES 6
#define MAX_NAME_LEN 50
-#define CLK_TYPE_SHIFT 2
-
-#define PM_API_PAYLOAD_LEN 3
-
-#define NA_PARENT 0xFFFFFFFF
-#define DUMMY_PARENT 0xFFFFFFFE
-
-#define CLK_TYPE_FIELD_LEN 4
-#define CLK_TOPOLOGY_NODE_OFFSET 16
-#define NODES_PER_RESP 3
-
-#define CLK_TYPE_FIELD_MASK 0xF
-#define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
-#define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
-
-#define CLK_PARENTS_ID_LEN 16
-#define CLK_PARENTS_ID_MASK 0xFFFF
-
/* Flags for parents */
#define PARENT_CLK_SELF 0
#define PARENT_CLK_NODE1 1
@@ -52,7 +34,10 @@
#define END_OF_PARENTS 1
#define RESERVED_CLK_NAME ""
-#define CLK_VALID_MASK 0x1
+#define CLK_GET_NAME_RESP_LEN 16
+#define CLK_GET_TOPOLOGY_RESP_WORDS 3
+#define CLK_GET_PARENTS_RESP_WORDS 3
+#define CLK_GET_ATTR_RESP_WORDS 1
enum clk_type {
CLK_TYPE_OUTPUT,
@@ -80,6 +65,7 @@ struct clock_parent {
* @num_nodes: Number of nodes present in topology
* @parent: Parent of clock
* @num_parents: Number of parents of clock
+ * @clk_id: Clock id
*/
struct zynqmp_clock {
char clk_name[MAX_NAME_LEN];
@@ -89,6 +75,36 @@ struct zynqmp_clock {
u32 num_nodes;
struct clock_parent parent[MAX_PARENT];
u32 num_parents;
+ u32 clk_id;
+};
+
+struct name_resp {
+ char name[CLK_GET_NAME_RESP_LEN];
+};
+
+struct topology_resp {
+#define CLK_TOPOLOGY_TYPE GENMASK(3, 0)
+#define CLK_TOPOLOGY_FLAGS GENMASK(23, 8)
+#define CLK_TOPOLOGY_TYPE_FLAGS GENMASK(31, 24)
+ u32 topology[CLK_GET_TOPOLOGY_RESP_WORDS];
+};
+
+struct parents_resp {
+#define NA_PARENT 0xFFFFFFFF
+#define DUMMY_PARENT 0xFFFFFFFE
+#define CLK_PARENTS_ID GENMASK(15, 0)
+#define CLK_PARENTS_FLAGS GENMASK(31, 16)
+ u32 parents[CLK_GET_PARENTS_RESP_WORDS];
+};
+
+struct attr_resp {
+#define CLK_ATTR_VALID BIT(0)
+#define CLK_ATTR_TYPE BIT(2)
+#define CLK_ATTR_NODE_INDEX GENMASK(13, 0)
+#define CLK_ATTR_NODE_TYPE GENMASK(19, 14)
+#define CLK_ATTR_NODE_SUBCLASS GENMASK(25, 20)
+#define CLK_ATTR_NODE_CLASS GENMASK(31, 26)
+ u32 attr[CLK_GET_ATTR_RESP_WORDS];
};
static const char clk_type_postfix[][10] = {
@@ -199,14 +215,15 @@ static int zynqmp_pm_clock_get_num_clocks(u32 *nclocks)
/**
* zynqmp_pm_clock_get_name() - Get the name of clock for given id
* @clock_id: ID of the clock to be queried
- * @name: Name of given clock
+ * @response: Name of the clock with the given id
*
* This function is used to get name of clock specified by given
* clock ID.
*
- * Return: Returns 0, in case of error name would be 0
+ * Return: Returns 0
*/
-static int zynqmp_pm_clock_get_name(u32 clock_id, char *name)
+static int zynqmp_pm_clock_get_name(u32 clock_id,
+ struct name_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -215,7 +232,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id, char *name)
qdata.arg1 = clock_id;
eemi_ops->query_data(qdata, ret_payload);
- memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
+ memcpy(response, ret_payload, sizeof(*response));
return 0;
}
@@ -224,7 +241,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id, char *name)
* zynqmp_pm_clock_get_topology() - Get the topology of clock for given id
* @clock_id: ID of the clock to be queried
* @index: Node index of clock topology
- * @topology: Buffer to store nodes in topology and flags
+ * @response: Buffer used for the topology response
*
* This function is used to get topology information for the clock
* specified by given clock ID.
@@ -237,7 +254,8 @@ static int zynqmp_pm_clock_get_name(u32 clock_id, char *name)
*
* Return: 0 on success else error+reason
*/
-static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
+static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index,
+ struct topology_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -248,7 +266,7 @@ static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
qdata.arg2 = index;
ret = eemi_ops->query_data(qdata, ret_payload);
- memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
+ memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
}
@@ -297,7 +315,7 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
* zynqmp_pm_clock_get_parents() - Get the first 3 parents of clock for given id
* @clock_id: Clock ID
* @index: Parent index
- * @parents: 3 parents of the given clock
+ * @response: Parents of the given clock
*
* This function is used to get 3 parents for the clock specified by
* given clock ID.
@@ -310,7 +328,8 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
*
* Return: 0 on success else error+reason
*/
-static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
+static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index,
+ struct parents_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -321,7 +340,7 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
qdata.arg2 = index;
ret = eemi_ops->query_data(qdata, ret_payload);
- memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
+ memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
}
@@ -329,13 +348,14 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
/**
* zynqmp_pm_clock_get_attributes() - Get the attributes of clock for given id
* @clock_id: Clock ID
- * @attr: Clock attributes
+ * @response: Clock attributes response
*
* This function is used to get clock's attributes(e.g. valid, clock type, etc).
*
* Return: 0 on success else error+reason
*/
-static int zynqmp_pm_clock_get_attributes(u32 clock_id, u32 *attr)
+static int zynqmp_pm_clock_get_attributes(u32 clock_id,
+ struct attr_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -345,7 +365,7 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id, u32 *attr)
qdata.arg1 = clock_id;
ret = eemi_ops->query_data(qdata, ret_payload);
- memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
+ memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
}
@@ -354,24 +374,28 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id, u32 *attr)
* __zynqmp_clock_get_topology() - Get topology data of clock from firmware
* response data
* @topology: Clock topology
- * @data: Clock topology data received from firmware
+ * @response: Clock topology data received from firmware
* @nnodes: Number of nodes
*
* Return: 0 on success else error+reason
*/
static int __zynqmp_clock_get_topology(struct clock_topology *topology,
- u32 *data, u32 *nnodes)
+ struct topology_resp *response,
+ u32 *nnodes)
{
int i;
+ u32 type;
- for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
- if (!(data[i] & CLK_TYPE_FIELD_MASK))
+ for (i = 0; i < ARRAY_SIZE(response->topology); i++) {
+ type = FIELD_GET(CLK_TOPOLOGY_TYPE, response->topology[i]);
+ if (type == TYPE_INVALID)
return END_OF_TOPOLOGY_NODE;
- topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
- topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
- data[i]);
+ topology[*nnodes].type = type;
+ topology[*nnodes].flag = FIELD_GET(CLK_TOPOLOGY_FLAGS,
+ response->topology[i]);
topology[*nnodes].type_flag =
- FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
+ FIELD_GET(CLK_TOPOLOGY_TYPE_FLAGS,
+ response->topology[i]);
(*nnodes)++;
}
@@ -392,14 +416,16 @@ static int zynqmp_clock_get_topology(u32 clk_id,
u32 *num_nodes)
{
int j, ret;
- u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
+ struct topology_resp response = { };
*num_nodes = 0;
- for (j = 0; j <= MAX_NODES; j += 3) {
- ret = zynqmp_pm_clock_get_topology(clk_id, j, pm_resp);
+ for (j = 0; j <= MAX_NODES; j += ARRAY_SIZE(response.topology)) {
+ ret = zynqmp_pm_clock_get_topology(clock[clk_id].clk_id, j,
+ &response);
if (ret)
return ret;
- ret = __zynqmp_clock_get_topology(topology, pm_resp, num_nodes);
+ ret = __zynqmp_clock_get_topology(topology, &response,
+ num_nodes);
if (ret == END_OF_TOPOLOGY_NODE)
return 0;
}
@@ -408,31 +434,33 @@ static int zynqmp_clock_get_topology(u32 clk_id,
}
/**
- * __zynqmp_clock_get_topology() - Get parents info of clock from firmware
+ * __zynqmp_clock_get_parents() - Get parents info of clock from firmware
* response data
* @parents: Clock parents
- * @data: Clock parents data received from firmware
+ * @response: Clock parents data received from firmware
* @nparent: Number of parent
*
* Return: 0 on success else error+reason
*/
-static int __zynqmp_clock_get_parents(struct clock_parent *parents, u32 *data,
+static int __zynqmp_clock_get_parents(struct clock_parent *parents,
+ struct parents_resp *response,
u32 *nparent)
{
int i;
struct clock_parent *parent;
- for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
- if (data[i] == NA_PARENT)
+ for (i = 0; i < ARRAY_SIZE(response->parents); i++) {
+ if (response->parents[i] == NA_PARENT)
return END_OF_PARENTS;
parent = &parents[i];
- parent->id = data[i] & CLK_PARENTS_ID_MASK;
- if (data[i] == DUMMY_PARENT) {
+ parent->id = FIELD_GET(CLK_PARENTS_ID, response->parents[i]);
+ if (response->parents[i] == DUMMY_PARENT) {
strcpy(parent->name, "dummy_name");
parent->flag = 0;
} else {
- parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
+ parent->flag = FIELD_GET(CLK_PARENTS_FLAGS,
+ response->parents[i]);
if (zynqmp_get_clock_name(parent->id, parent->name))
continue;
}
@@ -454,20 +482,21 @@ static int zynqmp_clock_get_parents(u32 clk_id, struct clock_parent *parents,
u32 *num_parents)
{
int j = 0, ret;
- u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
+ struct parents_resp response = { };
*num_parents = 0;
do {
/* Get parents from firmware */
- ret = zynqmp_pm_clock_get_parents(clk_id, j, pm_resp);
+ ret = zynqmp_pm_clock_get_parents(clock[clk_id].clk_id, j,
+ &response);
if (ret)
return ret;
- ret = __zynqmp_clock_get_parents(&parents[j], pm_resp,
+ ret = __zynqmp_clock_get_parents(&parents[j], &response,
num_parents);
if (ret == END_OF_PARENTS)
return 0;
- j += PM_API_PAYLOAD_LEN;
+ j += ARRAY_SIZE(response.parents);
} while (*num_parents <= MAX_PARENT);
return 0;
@@ -528,13 +557,14 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
const char **parent_names)
{
int j;
- u32 num_nodes;
+ u32 num_nodes, clk_dev_id;
char *clk_out = NULL;
struct clock_topology *nodes;
struct clk_hw *hw = NULL;
nodes = clock[clk_id].node;
num_nodes = clock[clk_id].num_nodes;
+ clk_dev_id = clock[clk_id].clk_id;
for (j = 0; j < num_nodes; j++) {
/*
@@ -551,13 +581,14 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
if (!clk_topology[nodes[j].type])
continue;
- hw = (*clk_topology[nodes[j].type])(clk_out, clk_id,
+ hw = (*clk_topology[nodes[j].type])(clk_out, clk_dev_id,
parent_names,
num_parents,
&nodes[j]);
if (IS_ERR(hw))
- pr_warn_once("%s() %s register fail with %ld\n",
- __func__, clk_name, PTR_ERR(hw));
+ pr_warn_once("%s() 0x%x: %s register fail with %ld\n",
+ __func__, clk_dev_id, clk_name,
+ PTR_ERR(hw));
parent_names[0] = clk_out;
}
@@ -621,20 +652,33 @@ static int zynqmp_register_clocks(struct device_node *np)
static void zynqmp_get_clock_info(void)
{
int i, ret;
- u32 attr, type = 0;
+ u32 type = 0;
+ u32 nodetype, subclass, class;
+ struct attr_resp attr;
+ struct name_resp name;
for (i = 0; i < clock_max_idx; i++) {
- zynqmp_pm_clock_get_name(i, clock[i].clk_name);
- if (!strcmp(clock[i].clk_name, RESERVED_CLK_NAME))
- continue;
-
ret = zynqmp_pm_clock_get_attributes(i, &attr);
if (ret)
continue;
- clock[i].valid = attr & CLK_VALID_MASK;
- clock[i].type = attr >> CLK_TYPE_SHIFT ? CLK_TYPE_EXTERNAL :
- CLK_TYPE_OUTPUT;
+ clock[i].valid = FIELD_GET(CLK_ATTR_VALID, attr.attr[0]);
+ clock[i].type = FIELD_GET(CLK_ATTR_TYPE, attr.attr[0]) ?
+ CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
+
+ nodetype = FIELD_GET(CLK_ATTR_NODE_TYPE, attr.attr[0]);
+ subclass = FIELD_GET(CLK_ATTR_NODE_SUBCLASS, attr.attr[0]);
+ class = FIELD_GET(CLK_ATTR_NODE_CLASS, attr.attr[0]);
+
+ clock[i].clk_id = FIELD_PREP(CLK_ATTR_NODE_CLASS, class) |
+ FIELD_PREP(CLK_ATTR_NODE_SUBCLASS, subclass) |
+ FIELD_PREP(CLK_ATTR_NODE_TYPE, nodetype) |
+ FIELD_PREP(CLK_ATTR_NODE_INDEX, i);
+
+ zynqmp_pm_clock_get_name(clock[i].clk_id, &name);
+ if (!strcmp(name.name, RESERVED_CLK_NAME))
+ continue;
+ strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN);
}
/* Get topology of all clock */
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index a371c66e72ef..d8f5b70d2709 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -31,12 +31,14 @@
* struct zynqmp_clk_divider - adjustable divider clock
* @hw: handle between common and hardware-specific interfaces
* @flags: Hardware specific flags
+ * @is_frac: The divider is a fractional divider
* @clk_id: Id of clock
* @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
*/
struct zynqmp_clk_divider {
struct clk_hw hw;
u8 flags;
+ bool is_frac;
u32 clk_id;
u32 div_type;
};
@@ -76,6 +78,13 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
else
value = div >> 16;
+ if (!value) {
+ WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+ "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+ clk_name);
+ return parent_rate;
+ }
+
return DIV_ROUND_UP_ULL(parent_rate, value);
}
@@ -116,8 +125,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
bestdiv = zynqmp_divider_get_val(*prate, rate);
- if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
- (divider->flags & CLK_FRAC))
+ if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
bestdiv = rate % *prate ? 1 : bestdiv;
*prate = rate * bestdiv;
@@ -195,11 +203,13 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
init.name = name;
init.ops = &zynqmp_clk_divider_ops;
- init.flags = nodes->flag;
+ /* CLK_FRAC is not defined in the common clk framework */
+ init.flags = nodes->flag & ~CLK_FRAC;
init.parent_names = parents;
init.num_parents = 1;
/* struct clk_divider assignments */
+ div->is_frac = !!(nodes->flag & CLK_FRAC);
div->flags = nodes->type_flag;
div->hw.init = &init;
div->clk_id = clk_id;
@@ -214,4 +224,3 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
return hw;
}
-EXPORT_SYMBOL_GPL(zynqmp_clk_register_divider);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 171502a356aa..48321488f0fd 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -69,6 +69,13 @@ config FTTMR010_TIMER
Enables support for the Faraday Technology timer block
FTTMR010.
+config IXP4XX_TIMER
+ bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ Enables support for the Intel XScale IXP4xx SoC timer.
+
config ROCKCHIP_TIMER
bool "Rockchip timer driver" if COMPILE_TEST
depends on ARM || ARM64
@@ -145,6 +152,7 @@ config VT8500_TIMER
config NPCM7XX_TIMER
bool "NPCM7xx timer driver" if COMPILE_TEST
depends on HAS_IOMEM
+ select TIMER_OF
select CLKSRC_MMIO
help
Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index be6e0fbc7489..dba4eff880de 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
+obj-$(CONFIG_IXP4XX_TIMER) += timer-ixp4xx.o
obj-$(CONFIG_ROCKCHIP_TIMER) += timer-rockchip.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index aa4ec53281ce..b2a951a798e2 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*/
-#define pr_fmt(fmt) "arm_arch_timer: " fmt
+#define pr_fmt(fmt) "arch_timer: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
@@ -33,9 +33,6 @@
#include <clocksource/arm_arch_timer.h>
-#undef pr_fmt
-#define pr_fmt(fmt) "arch_timer: " fmt
-
#define CNTTIDR 0x08
#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
@@ -152,6 +149,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
return val;
}
+static u64 arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct_stable();
+}
+
+static u64 arch_counter_get_cntpct(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static u64 arch_counter_get_cntvct_stable(void)
+{
+ return __arch_counter_get_cntvct_stable();
+}
+
+static u64 arch_counter_get_cntvct(void)
+{
+ return __arch_counter_get_cntvct();
+}
+
/*
* Default to cp15 based access because arm64 uses this function for
* sched_clock() before DT is probed and the cp15 method is guaranteed
@@ -319,13 +336,6 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
}
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-static u64 notrace arm64_1188873_read_cntvct_el0(void)
-{
- return read_sysreg(cntvct_el0);
-}
-#endif
-
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
/*
* The low bits of the counter registers are indeterminate while bit 10 or
@@ -372,8 +382,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
-DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
-EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
+static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
struct clock_event_device *clk)
@@ -457,14 +466,6 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
- {
- .match_type = ate_match_local_cap_id,
- .id = (void *)ARM64_WORKAROUND_1188873,
- .desc = "ARM erratum 1188873",
- .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
- },
-#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
{
.match_type = ate_match_dt,
@@ -552,11 +553,8 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
- /*
- * Use the locked version, as we're called from the CPU
- * hotplug framework. Otherwise, we end-up in deadlock-land.
- */
- static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
+ if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
+ atomic_set(&timer_unstable_counter_workaround_in_use, 1);
/*
* Don't use the vdso fastpath if errata require using the
@@ -573,7 +571,7 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
void *arg)
{
- const struct arch_timer_erratum_workaround *wa;
+ const struct arch_timer_erratum_workaround *wa, *__wa;
ate_match_fn_t match_fn = NULL;
bool local = false;
@@ -597,53 +595,32 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t
if (!wa)
return;
- if (needs_unstable_timer_counter_workaround()) {
- const struct arch_timer_erratum_workaround *__wa;
- __wa = __this_cpu_read(timer_unstable_counter_workaround);
- if (__wa && wa != __wa)
- pr_warn("Can't enable workaround for %s (clashes with %s\n)",
- wa->desc, __wa->desc);
+ __wa = __this_cpu_read(timer_unstable_counter_workaround);
+ if (__wa && wa != __wa)
+ pr_warn("Can't enable workaround for %s (clashes with %s\n)",
+ wa->desc, __wa->desc);
- if (__wa)
- return;
- }
+ if (__wa)
+ return;
arch_timer_enable_workaround(wa, local);
pr_info("Enabling %s workaround for %s\n",
local ? "local" : "global", wa->desc);
}
-#define erratum_handler(fn, r, ...) \
-({ \
- bool __val; \
- if (needs_unstable_timer_counter_workaround()) { \
- const struct arch_timer_erratum_workaround *__wa; \
- __wa = __this_cpu_read(timer_unstable_counter_workaround); \
- if (__wa && __wa->fn) { \
- r = __wa->fn(__VA_ARGS__); \
- __val = true; \
- } else { \
- __val = false; \
- } \
- } else { \
- __val = false; \
- } \
- __val; \
-})
-
static bool arch_timer_this_cpu_has_cntvct_wa(void)
{
- const struct arch_timer_erratum_workaround *wa;
+ return has_erratum_handler(read_cntvct_el0);
+}
- wa = __this_cpu_read(timer_unstable_counter_workaround);
- return wa && wa->read_cntvct_el0;
+static bool arch_timer_counter_has_wa(void)
+{
+ return atomic_read(&timer_unstable_counter_workaround_in_use);
}
#else
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
-#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
-#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
-#define erratum_handler(fn, r, ...) ({false;})
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
+#define arch_timer_counter_has_wa() ({false;})
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
static __always_inline irqreturn_t timer_handler(const int access,
@@ -736,11 +713,6 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
static int arch_timer_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_virt, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
@@ -748,11 +720,6 @@ static int arch_timer_set_next_event_virt(unsigned long evt,
static int arch_timer_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_phys, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
@@ -777,6 +744,10 @@ static void __arch_timer_setup(unsigned type,
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) {
+ typeof(clk->set_next_event) sne;
+
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+
if (arch_timer_c3stop)
clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
@@ -787,20 +758,20 @@ static void __arch_timer_setup(unsigned type,
case ARCH_TIMER_VIRT_PPI:
clk->set_state_shutdown = arch_timer_shutdown_virt;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- clk->set_next_event = arch_timer_set_next_event_virt;
+ sne = erratum_handler(set_next_event_virt);
break;
case ARCH_TIMER_PHYS_SECURE_PPI:
case ARCH_TIMER_PHYS_NONSECURE_PPI:
case ARCH_TIMER_HYP_PPI:
clk->set_state_shutdown = arch_timer_shutdown_phys;
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- clk->set_next_event = arch_timer_set_next_event_phys;
+ sne = erratum_handler(set_next_event_phys);
break;
default:
BUG();
}
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+ clk->set_next_event = sne;
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
@@ -833,7 +804,11 @@ static void arch_timer_evtstrm_enable(int divider)
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
| ARCH_TIMER_VIRT_EVT_EN;
arch_timer_set_cntkctl(cntkctl);
+#ifdef CONFIG_ARM64
+ cpu_set_named_feature(EVTSTRM);
+#else
elf_hwcap |= HWCAP_EVTSTRM;
+#endif
#ifdef CONFIG_COMPAT
compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif
@@ -998,12 +973,22 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
+ u64 (*rd)(void);
+
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
- arch_timer_read_counter = arch_counter_get_cntvct;
- else
- arch_timer_read_counter = arch_counter_get_cntpct;
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntvct_stable;
+ else
+ rd = arch_counter_get_cntvct;
+ } else {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntpct_stable;
+ else
+ rd = arch_counter_get_cntpct;
+ }
+ arch_timer_read_counter = rd;
clocksource_counter.archdata.vdso_direct = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
@@ -1055,7 +1040,11 @@ static int arch_timer_cpu_pm_notify(struct notifier_block *self,
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
+#ifdef CONFIG_ARM64
+ if (cpu_have_named_feature(EVTSTRM))
+#else
if (elf_hwcap & HWCAP_EVTSTRM)
+#endif
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
return NOTIFY_OK;
diff --git a/drivers/clocksource/timer-fsl-ftm.c b/drivers/clocksource/timer-fsl-ftm.c
index 846d18daf893..e1c34b2f53a5 100644
--- a/drivers/clocksource/timer-fsl-ftm.c
+++ b/drivers/clocksource/timer-fsl-ftm.c
@@ -19,20 +19,9 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
+#include <linux/fsl/ftm.h>
-#define FTM_SC 0x00
-#define FTM_SC_CLK_SHIFT 3
-#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT)
-#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT)
-#define FTM_SC_PS_MASK 0x7
-#define FTM_SC_TOIE BIT(6)
-#define FTM_SC_TOF BIT(7)
-
-#define FTM_CNT 0x04
-#define FTM_MOD 0x08
-#define FTM_CNTIN 0x4C
-
-#define FTM_PS_MAX 7
+#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT)
struct ftm_clock_device {
void __iomem *clksrc_base;
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
new file mode 100644
index 000000000000..5c2190b654cd
--- /dev/null
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IXP4 timer driver
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+/* Goes away with OF conversion */
+#include <linux/platform_data/timer-ixp4xx.h>
+
+/*
+ * Constants to make it easy to access Timer Control/Status registers
+ */
+#define IXP4XX_OSTS_OFFSET 0x00 /* Continuous Timestamp */
+#define IXP4XX_OST1_OFFSET 0x04 /* Timer 1 Timestamp */
+#define IXP4XX_OSRT1_OFFSET 0x08 /* Timer 1 Reload */
+#define IXP4XX_OST2_OFFSET 0x0C /* Timer 2 Timestamp */
+#define IXP4XX_OSRT2_OFFSET 0x10 /* Timer 2 Reload */
+#define IXP4XX_OSWT_OFFSET 0x14 /* Watchdog Timer */
+#define IXP4XX_OSWE_OFFSET 0x18 /* Watchdog Enable */
+#define IXP4XX_OSWK_OFFSET 0x1C /* Watchdog Key */
+#define IXP4XX_OSST_OFFSET 0x20 /* Timer Status */
+
+/*
+ * Timer register values and bit definitions
+ */
+#define IXP4XX_OST_ENABLE 0x00000001
+#define IXP4XX_OST_ONE_SHOT 0x00000002
+/* Low order bits of reload value ignored */
+#define IXP4XX_OST_RELOAD_MASK 0x00000003
+#define IXP4XX_OST_DISABLED 0x00000000
+#define IXP4XX_OSST_TIMER_1_PEND 0x00000001
+#define IXP4XX_OSST_TIMER_2_PEND 0x00000002
+#define IXP4XX_OSST_TIMER_TS_PEND 0x00000004
+#define IXP4XX_OSST_TIMER_WDOG_PEND 0x00000008
+#define IXP4XX_OSST_TIMER_WARM_RESET 0x00000010
+
+#define IXP4XX_WDT_KEY 0x0000482E
+#define IXP4XX_WDT_RESET_ENABLE 0x00000001
+#define IXP4XX_WDT_IRQ_ENABLE 0x00000002
+#define IXP4XX_WDT_COUNT_ENABLE 0x00000004
+
+struct ixp4xx_timer {
+ void __iomem *base;
+ unsigned int tick_rate;
+ u32 latch;
+ struct clock_event_device clkevt;
+#ifdef CONFIG_ARM
+ struct delay_timer delay_timer;
+#endif
+};
+
+/*
+ * A local singleton used by sched_clock and delay timer reads, which are
+ * fast and stateless
+ */
+static struct ixp4xx_timer *local_ixp4xx_timer;
+
+static inline struct ixp4xx_timer *
+to_ixp4xx_timer(struct clock_event_device *evt)
+{
+ return container_of(evt, struct ixp4xx_timer, clkevt);
+}
+
+static u64 notrace ixp4xx_read_sched_clock(void)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static u64 ixp4xx_clocksource_read(struct clocksource *c)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
+{
+ struct ixp4xx_timer *tmr = dev_id;
+ struct clock_event_device *evt = &tmr->clkevt;
+
+ /* Clear Pending Interrupt */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int ixp4xx_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ /* Keep enable/oneshot bits */
+ val &= IXP4XX_OST_RELOAD_MASK;
+ __raw_writel((cycles & ~IXP4XX_OST_RELOAD_MASK) | val,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_shutdown(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val &= ~IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_oneshot(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+
+ __raw_writel(IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_periodic(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = tmr->latch & ~IXP4XX_OST_RELOAD_MASK;
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_resume(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+/*
+ * IXP4xx timer tick
+ * We use OS timer1 on the CPU for the timer tick and the timestamp
+ * counter as a source of real clock ticks to account for missed jiffies.
+ */
+static __init int ixp4xx_timer_register(void __iomem *base,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ struct ixp4xx_timer *tmr;
+ int ret;
+
+ tmr = kzalloc(sizeof(*tmr), GFP_KERNEL);
+ if (!tmr)
+ return -ENOMEM;
+ tmr->base = base;
+ tmr->tick_rate = timer_freq;
+
+ /*
+ * The timer register doesn't allow to specify the two least
+ * significant bits of the timeout value and assumes them being zero.
+ * So make sure the latch is the best value with the two least
+ * significant bits unset.
+ */
+ tmr->latch = DIV_ROUND_CLOSEST(timer_freq,
+ (IXP4XX_OST_RELOAD_MASK + 1) * HZ)
+ * (IXP4XX_OST_RELOAD_MASK + 1);
+
+ local_ixp4xx_timer = tmr;
+
+ /* Reset/disable counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ /* Clear any pending interrupt on timer 1 */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ /* Reset time-stamp counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSTS_OFFSET);
+
+ clocksource_mmio_init(NULL, "OSTS", timer_freq, 200, 32,
+ ixp4xx_clocksource_read);
+
+ tmr->clkevt.name = "ixp4xx timer1";
+ tmr->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ tmr->clkevt.rating = 200;
+ tmr->clkevt.set_state_shutdown = ixp4xx_shutdown;
+ tmr->clkevt.set_state_periodic = ixp4xx_set_periodic;
+ tmr->clkevt.set_state_oneshot = ixp4xx_set_oneshot;
+ tmr->clkevt.tick_resume = ixp4xx_resume;
+ tmr->clkevt.set_next_event = ixp4xx_set_next_event;
+ tmr->clkevt.cpumask = cpumask_of(0);
+ tmr->clkevt.irq = timer_irq;
+ ret = request_irq(timer_irq, ixp4xx_timer_interrupt,
+ IRQF_TIMER, "IXP4XX-TIMER1", tmr);
+ if (ret) {
+ pr_crit("no timer IRQ\n");
+ return -ENODEV;
+ }
+ clockevents_config_and_register(&tmr->clkevt, timer_freq,
+ 0xf, 0xfffffffe);
+
+ sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles
+ * @timerbase: physical base of timer block
+ * @timer_irq: Linux IRQ number for the timer
+ * @timer_freq: Fixed frequency of the timer
+ */
+void __init ixp4xx_timer_setup(resource_size_t timerbase,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ void __iomem *base;
+
+ base = ioremap(timerbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return;
+ }
+ ixp4xx_timer_register(base, timer_irq, timer_freq);
+}
+EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
+
+#ifdef CONFIG_OF
+static __init int ixp4xx_of_timer_init(struct device_node *np)
+{
+ void __iomem *base;
+ int irq;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return -ENODEV;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ\n");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ /* TODO: get some fixed clocks into the device tree */
+ ret = ixp4xx_timer_register(base, irq, 66666000);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ iounmap(base);
+ return ret;
+}
+TIMER_OF_DECLARE(ixp4xx, "intel,ixp4xx-timer", ixp4xx_of_timer_init);
+#endif
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
index eed6feff8b5f..30c6f4ce672b 100644
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -296,4 +296,4 @@ err_alloc:
TIMER_OF_DECLARE(ox810se_rps,
"oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
TIMER_OF_DECLARE(ox820_rps,
- "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);
+ "oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 3352da6ed61f..e40b55a7086f 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
return 0;
}
-/* Optimized set_load which removes costly spin wait in timer_start */
-static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
- int autoreload, unsigned int load)
-{
- u32 l;
-
- if (unlikely(!timer))
- return -EINVAL;
-
- omap_dm_timer_enable(timer);
-
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- if (autoreload) {
- l |= OMAP_TIMER_CTRL_AR;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
- } else {
- l &= ~OMAP_TIMER_CTRL_AR;
- }
- l |= OMAP_TIMER_CTRL_ST;
-
- __omap_dm_timer_load_start(timer, l, load, timer->posted);
-
- /* Save the context */
- timer->context.tclr = l;
- timer->context.tldr = load;
- timer->context.tcrr = load;
- return 0;
-}
static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
unsigned int match)
{
@@ -998,5 +970,4 @@ module_platform_driver(omap_dm_timer_driver);
MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
new file mode 100644
index 000000000000..4fa2931dcb7b
--- /dev/null
+++ b/drivers/counter/104-quad-8.c
@@ -0,0 +1,1367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Counter driver for the ACCES 104-QUAD-8
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
+ */
+#include <linux/bitops.h>
+#include <linux/counter.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#define QUAD8_EXTENT 32
+
+static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
+static unsigned int num_quad8;
+module_param_array(base, uint, &num_quad8, 0);
+MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
+
+#define QUAD8_NUM_COUNTERS 8
+
+/**
+ * struct quad8_iio - IIO device private data structure
+ * @counter: instance of the counter_device
+ * @preset: array of preset values
+ * @count_mode: array of count mode configurations
+ * @quadrature_mode: array of quadrature mode configurations
+ * @quadrature_scale: array of quadrature mode scale configurations
+ * @ab_enable: array of A and B inputs enable configurations
+ * @preset_enable: array of set_to_preset_on_index attribute configurations
+ * @synchronous_mode: array of index function synchronous mode configurations
+ * @index_polarity: array of index function polarity configurations
+ * @base: base port address of the IIO device
+ */
+struct quad8_iio {
+ struct counter_device counter;
+ unsigned int preset[QUAD8_NUM_COUNTERS];
+ unsigned int count_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
+ unsigned int ab_enable[QUAD8_NUM_COUNTERS];
+ unsigned int preset_enable[QUAD8_NUM_COUNTERS];
+ unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
+ unsigned int index_polarity[QUAD8_NUM_COUNTERS];
+ unsigned int base;
+};
+
+#define QUAD8_REG_CHAN_OP 0x11
+#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
+/* Borrow Toggle flip-flop */
+#define QUAD8_FLAG_BT BIT(0)
+/* Carry Toggle flip-flop */
+#define QUAD8_FLAG_CT BIT(1)
+/* Error flag */
+#define QUAD8_FLAG_E BIT(4)
+/* Up/Down flag */
+#define QUAD8_FLAG_UD BIT(5)
+/* Reset and Load Signal Decoders */
+#define QUAD8_CTR_RLD 0x00
+/* Counter Mode Register */
+#define QUAD8_CTR_CMR 0x20
+/* Input / Output Control Register */
+#define QUAD8_CTR_IOR 0x40
+/* Index Control Register */
+#define QUAD8_CTR_IDR 0x60
+/* Reset Byte Pointer (three byte data pointer) */
+#define QUAD8_RLD_RESET_BP 0x01
+/* Reset Counter */
+#define QUAD8_RLD_RESET_CNTR 0x02
+/* Reset Borrow Toggle, Carry Toggle, Compare Toggle, and Sign flags */
+#define QUAD8_RLD_RESET_FLAGS 0x04
+/* Reset Error flag */
+#define QUAD8_RLD_RESET_E 0x06
+/* Preset Register to Counter */
+#define QUAD8_RLD_PRESET_CNTR 0x08
+/* Transfer Counter to Output Latch */
+#define QUAD8_RLD_CNTR_OUT 0x10
+#define QUAD8_CHAN_OP_ENABLE_COUNTERS 0x00
+#define QUAD8_CHAN_OP_RESET_COUNTERS 0x01
+#define QUAD8_CMR_QUADRATURE_X1 0x08
+#define QUAD8_CMR_QUADRATURE_X2 0x10
+#define QUAD8_CMR_QUADRATURE_X4 0x18
+
+
+static int quad8_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int flags;
+ unsigned int borrow;
+ unsigned int carry;
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX) {
+ *val = !!(inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
+ & BIT(chan->channel));
+ return IIO_VAL_INT;
+ }
+
+ flags = inb(base_offset + 1);
+ borrow = flags & QUAD8_FLAG_BT;
+ carry = !!(flags & QUAD8_FLAG_CT);
+
+ /* Borrow XOR Carry effectively doubles count range */
+ *val = (borrow ^ carry) << 24;
+
+ /* Reset Byte Pointer; transfer Counter to Output Latch */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ base_offset + 1);
+
+ for (i = 0; i < 3; i++)
+ *val |= (unsigned int)inb(base_offset) << (8 * i);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ *val = priv->ab_enable[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ *val2 = priv->quadrature_scale[chan->channel];
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static int quad8_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ int i;
+ unsigned int ior_cfg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX)
+ return -EINVAL;
+
+ /* Only 24-bit values are supported */
+ if ((unsigned int)val > 0xFFFFFF)
+ return -EINVAL;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Counter can only be set via Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Transfer Preset Register to Counter */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register back to original value */
+ val = priv->preset[chan->channel];
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
+ /* Reset Error flag */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+
+ return 0;
+ case IIO_CHAN_INFO_ENABLE:
+ /* only boolean values accepted */
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ priv->ab_enable[chan->channel] = val;
+
+ ior_cfg = val | priv->preset_enable[chan->channel] << 1;
+
+ /* Load I/O control configuration */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+
+ return 0;
+ case IIO_CHAN_INFO_SCALE:
+ /* Quadrature scaling only available in quadrature mode */
+ if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
+ return -EINVAL;
+
+ /* Only three gain states (1, 0.5, 0.25) */
+ if (val == 1 && !val2)
+ priv->quadrature_scale[chan->channel] = 0;
+ else if (!val)
+ switch (val2) {
+ case 500000:
+ priv->quadrature_scale[chan->channel] = 1;
+ break;
+ case 250000:
+ priv->quadrature_scale[chan->channel] = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info quad8_info = {
+ .read_raw = quad8_read_raw,
+ .write_raw = quad8_write_raw
+};
+
+static ssize_t quad8_read_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset[chan->channel]);
+}
+
+static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int preset;
+ int ret;
+ int i;
+
+ ret = kstrtouint(buf, 0, &preset);
+ if (ret)
+ return ret;
+
+ /* Only 24-bit values are supported */
+ if (preset > 0xFFFFFF)
+ return -EINVAL;
+
+ priv->preset[chan->channel] = preset;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+
+ return len;
+}
+
+static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ !priv->preset_enable[chan->channel]);
+}
+
+static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+ bool preset_enable;
+ int ret;
+ unsigned int ior_cfg;
+
+ ret = kstrtobool(buf, &preset_enable);
+ if (ret)
+ return ret;
+
+ /* Preset enable is active low in Input/Output Control register */
+ preset_enable = !preset_enable;
+
+ priv->preset_enable[chan->channel] = preset_enable;
+
+ ior_cfg = priv->ab_enable[chan->channel] |
+ (unsigned int)preset_enable << 1;
+
+ /* Load I/O control configuration to Input / Output Control Register */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+
+ return len;
+}
+
+static const char *const quad8_noise_error_states[] = {
+ "No excessive noise is present at the count inputs",
+ "Excessive noise is present at the count inputs"
+};
+
+static int quad8_get_noise_error(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & QUAD8_FLAG_E);
+}
+
+static const struct iio_enum quad8_noise_error_enum = {
+ .items = quad8_noise_error_states,
+ .num_items = ARRAY_SIZE(quad8_noise_error_states),
+ .get = quad8_get_noise_error
+};
+
+static const char *const quad8_count_direction_states[] = {
+ "down",
+ "up"
+};
+
+static int quad8_get_count_direction(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & QUAD8_FLAG_UD);
+}
+
+static const struct iio_enum quad8_count_direction_enum = {
+ .items = quad8_count_direction_states,
+ .num_items = ARRAY_SIZE(quad8_count_direction_states),
+ .get = quad8_get_count_direction
+};
+
+static const char *const quad8_count_modes[] = {
+ "normal",
+ "range limit",
+ "non-recycle",
+ "modulo-n"
+};
+
+static int quad8_set_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int cnt_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = cnt_mode << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->count_mode[chan->channel] = cnt_mode;
+
+ /* Add quadrature mode configuration */
+ if (priv->quadrature_mode[chan->channel])
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->count_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_count_mode_enum = {
+ .items = quad8_count_modes,
+ .num_items = ARRAY_SIZE(quad8_count_modes),
+ .set = quad8_set_count_mode,
+ .get = quad8_get_count_mode
+};
+
+static const char *const quad8_synchronous_modes[] = {
+ "non-synchronous",
+ "synchronous"
+};
+
+static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int synchronous_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = synchronous_mode |
+ priv->index_polarity[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ /* Index function must be non-synchronous in non-quadrature mode */
+ if (synchronous_mode && !priv->quadrature_mode[chan->channel])
+ return -EINVAL;
+
+ priv->synchronous_mode[chan->channel] = synchronous_mode;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->synchronous_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_synchronous_mode_enum = {
+ .items = quad8_synchronous_modes,
+ .num_items = ARRAY_SIZE(quad8_synchronous_modes),
+ .set = quad8_set_synchronous_mode,
+ .get = quad8_get_synchronous_mode
+};
+
+static const char *const quad8_quadrature_modes[] = {
+ "non-quadrature",
+ "quadrature"
+};
+
+static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int quadrature_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ if (quadrature_mode)
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+ else {
+ /* Quadrature scaling only available in quadrature mode */
+ priv->quadrature_scale[chan->channel] = 0;
+
+ /* Synchronous function not supported in non-quadrature mode */
+ if (priv->synchronous_mode[chan->channel])
+ quad8_set_synchronous_mode(indio_dev, chan, 0);
+ }
+
+ priv->quadrature_mode[chan->channel] = quadrature_mode;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->quadrature_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_quadrature_mode_enum = {
+ .items = quad8_quadrature_modes,
+ .num_items = ARRAY_SIZE(quad8_quadrature_modes),
+ .set = quad8_set_quadrature_mode,
+ .get = quad8_get_quadrature_mode
+};
+
+static const char *const quad8_index_polarity_modes[] = {
+ "negative",
+ "positive"
+};
+
+static int quad8_set_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int index_polarity)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
+ index_polarity << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->index_polarity[chan->channel] = index_polarity;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->index_polarity[chan->channel];
+}
+
+static const struct iio_enum quad8_index_polarity_enum = {
+ .items = quad8_index_polarity_modes,
+ .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
+ .set = quad8_set_index_polarity,
+ .get = quad8_get_index_polarity
+};
+
+static const struct iio_chan_spec_ext_info quad8_count_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_preset,
+ .write = quad8_write_preset
+ },
+ {
+ .name = "set_to_preset_on_index",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_set_to_preset_on_index,
+ .write = quad8_write_set_to_preset_on_index
+ },
+ IIO_ENUM("noise_error", IIO_SEPARATE, &quad8_noise_error_enum),
+ IIO_ENUM_AVAILABLE("noise_error", &quad8_noise_error_enum),
+ IIO_ENUM("count_direction", IIO_SEPARATE, &quad8_count_direction_enum),
+ IIO_ENUM_AVAILABLE("count_direction", &quad8_count_direction_enum),
+ IIO_ENUM("count_mode", IIO_SEPARATE, &quad8_count_mode_enum),
+ IIO_ENUM_AVAILABLE("count_mode", &quad8_count_mode_enum),
+ IIO_ENUM("quadrature_mode", IIO_SEPARATE, &quad8_quadrature_mode_enum),
+ IIO_ENUM_AVAILABLE("quadrature_mode", &quad8_quadrature_mode_enum),
+ {}
+};
+
+static const struct iio_chan_spec_ext_info quad8_index_ext_info[] = {
+ IIO_ENUM("synchronous_mode", IIO_SEPARATE,
+ &quad8_synchronous_mode_enum),
+ IIO_ENUM_AVAILABLE("synchronous_mode", &quad8_synchronous_mode_enum),
+ IIO_ENUM("index_polarity", IIO_SEPARATE, &quad8_index_polarity_enum),
+ IIO_ENUM_AVAILABLE("index_polarity", &quad8_index_polarity_enum),
+ {}
+};
+
+#define QUAD8_COUNT_CHAN(_chan) { \
+ .type = IIO_COUNT, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = quad8_count_ext_info, \
+ .indexed = 1 \
+}
+
+#define QUAD8_INDEX_CHAN(_chan) { \
+ .type = IIO_INDEX, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .ext_info = quad8_index_ext_info, \
+ .indexed = 1 \
+}
+
+static const struct iio_chan_spec quad8_channels[] = {
+ QUAD8_COUNT_CHAN(0), QUAD8_INDEX_CHAN(0),
+ QUAD8_COUNT_CHAN(1), QUAD8_INDEX_CHAN(1),
+ QUAD8_COUNT_CHAN(2), QUAD8_INDEX_CHAN(2),
+ QUAD8_COUNT_CHAN(3), QUAD8_INDEX_CHAN(3),
+ QUAD8_COUNT_CHAN(4), QUAD8_INDEX_CHAN(4),
+ QUAD8_COUNT_CHAN(5), QUAD8_INDEX_CHAN(5),
+ QUAD8_COUNT_CHAN(6), QUAD8_INDEX_CHAN(6),
+ QUAD8_COUNT_CHAN(7), QUAD8_INDEX_CHAN(7)
+};
+
+static int quad8_signal_read(struct counter_device *counter,
+ struct counter_signal *signal, struct counter_signal_read_value *val)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ unsigned int state;
+ enum counter_signal_level level;
+
+ /* Only Index signal levels can be read */
+ if (signal->id < 16)
+ return -EINVAL;
+
+ state = inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
+ & BIT(signal->id - 16);
+
+ level = (state) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
+
+ counter_signal_read_value_set(val, COUNTER_SIGNAL_LEVEL, &level);
+
+ return 0;
+}
+
+static int quad8_count_read(struct counter_device *counter,
+ struct counter_count *count, struct counter_count_read_value *val)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ unsigned int flags;
+ unsigned int borrow;
+ unsigned int carry;
+ unsigned long position;
+ int i;
+
+ flags = inb(base_offset + 1);
+ borrow = flags & QUAD8_FLAG_BT;
+ carry = !!(flags & QUAD8_FLAG_CT);
+
+ /* Borrow XOR Carry effectively doubles count range */
+ position = (unsigned long)(borrow ^ carry) << 24;
+
+ /* Reset Byte Pointer; transfer Counter to Output Latch */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ base_offset + 1);
+
+ for (i = 0; i < 3; i++)
+ position |= (unsigned long)inb(base_offset) << (8 * i);
+
+ counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position);
+
+ return 0;
+}
+
+static int quad8_count_write(struct counter_device *counter,
+ struct counter_count *count, struct counter_count_write_value *val)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ int err;
+ unsigned long position;
+ int i;
+
+ err = counter_count_write_value_get(&position, COUNTER_COUNT_POSITION,
+ val);
+ if (err)
+ return err;
+
+ /* Only 24-bit values are supported */
+ if (position > 0xFFFFFF)
+ return -EINVAL;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Counter can only be set via Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(position >> (8 * i), base_offset);
+
+ /* Transfer Preset Register to Counter */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register back to original value */
+ position = priv->preset[count->id];
+ for (i = 0; i < 3; i++)
+ outb(position >> (8 * i), base_offset);
+
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
+ /* Reset Error flag */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+
+ return 0;
+}
+
+enum quad8_count_function {
+ QUAD8_COUNT_FUNCTION_PULSE_DIRECTION = 0,
+ QUAD8_COUNT_FUNCTION_QUADRATURE_X1,
+ QUAD8_COUNT_FUNCTION_QUADRATURE_X2,
+ QUAD8_COUNT_FUNCTION_QUADRATURE_X4
+};
+
+static enum counter_count_function quad8_count_functions_list[] = {
+ [QUAD8_COUNT_FUNCTION_PULSE_DIRECTION] = COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
+ [QUAD8_COUNT_FUNCTION_QUADRATURE_X1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
+ [QUAD8_COUNT_FUNCTION_QUADRATURE_X2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
+ [QUAD8_COUNT_FUNCTION_QUADRATURE_X4] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4
+};
+
+static int quad8_function_get(struct counter_device *counter,
+ struct counter_count *count, size_t *function)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const int id = count->id;
+ const unsigned int quadrature_mode = priv->quadrature_mode[id];
+ const unsigned int scale = priv->quadrature_scale[id];
+
+ if (quadrature_mode)
+ switch (scale) {
+ case 0:
+ *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1;
+ break;
+ case 1:
+ *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X2;
+ break;
+ case 2:
+ *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X4;
+ break;
+ }
+ else
+ *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION;
+
+ return 0;
+}
+
+static int quad8_function_set(struct counter_device *counter,
+ struct counter_count *count, size_t function)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const int id = count->id;
+ unsigned int *const quadrature_mode = priv->quadrature_mode + id;
+ unsigned int *const scale = priv->quadrature_scale + id;
+ unsigned int mode_cfg = priv->count_mode[id] << 1;
+ unsigned int *const synchronous_mode = priv->synchronous_mode + id;
+ const unsigned int idr_cfg = priv->index_polarity[id] << 1;
+ const int base_offset = priv->base + 2 * id + 1;
+
+ if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) {
+ *quadrature_mode = 0;
+
+ /* Quadrature scaling only available in quadrature mode */
+ *scale = 0;
+
+ /* Synchronous function not supported in non-quadrature mode */
+ if (*synchronous_mode) {
+ *synchronous_mode = 0;
+ /* Disable synchronous function mode */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ }
+ } else {
+ *quadrature_mode = 1;
+
+ switch (function) {
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X1:
+ *scale = 0;
+ mode_cfg |= QUAD8_CMR_QUADRATURE_X1;
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X2:
+ *scale = 1;
+ mode_cfg |= QUAD8_CMR_QUADRATURE_X2;
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X4:
+ *scale = 2;
+ mode_cfg |= QUAD8_CMR_QUADRATURE_X4;
+ break;
+ }
+ }
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static void quad8_direction_get(struct counter_device *counter,
+ struct counter_count *count, enum counter_count_direction *direction)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ unsigned int ud_flag;
+ const unsigned int flag_addr = priv->base + 2 * count->id + 1;
+
+ /* U/D flag: nonzero = up, zero = down */
+ ud_flag = inb(flag_addr) & QUAD8_FLAG_UD;
+
+ *direction = (ud_flag) ? COUNTER_COUNT_DIRECTION_FORWARD :
+ COUNTER_COUNT_DIRECTION_BACKWARD;
+}
+
+enum quad8_synapse_action {
+ QUAD8_SYNAPSE_ACTION_NONE = 0,
+ QUAD8_SYNAPSE_ACTION_RISING_EDGE,
+ QUAD8_SYNAPSE_ACTION_FALLING_EDGE,
+ QUAD8_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+static enum counter_synapse_action quad8_index_actions_list[] = {
+ [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+ [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE
+};
+
+static enum counter_synapse_action quad8_synapse_actions_list[] = {
+ [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+ [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ [QUAD8_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ [QUAD8_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+static int quad8_action_get(struct counter_device *counter,
+ struct counter_count *count, struct counter_synapse *synapse,
+ size_t *action)
+{
+ struct quad8_iio *const priv = counter->priv;
+ int err;
+ size_t function = 0;
+ const size_t signal_a_id = count->synapses[0].signal->id;
+ enum counter_count_direction direction;
+
+ /* Handle Index signals */
+ if (synapse->signal->id >= 16) {
+ if (priv->preset_enable[count->id])
+ *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ else
+ *action = QUAD8_SYNAPSE_ACTION_NONE;
+
+ return 0;
+ }
+
+ err = quad8_function_get(counter, count, &function);
+ if (err)
+ return err;
+
+ /* Default action mode */
+ *action = QUAD8_SYNAPSE_ACTION_NONE;
+
+ /* Determine action mode based on current count function mode */
+ switch (function) {
+ case QUAD8_COUNT_FUNCTION_PULSE_DIRECTION:
+ if (synapse->signal->id == signal_a_id)
+ *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X1:
+ if (synapse->signal->id == signal_a_id) {
+ quad8_direction_get(counter, count, &direction);
+
+ if (direction == COUNTER_COUNT_DIRECTION_FORWARD)
+ *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ else
+ *action = QUAD8_SYNAPSE_ACTION_FALLING_EDGE;
+ }
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X2:
+ if (synapse->signal->id == signal_a_id)
+ *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X4:
+ *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ }
+
+ return 0;
+}
+
+const struct counter_ops quad8_ops = {
+ .signal_read = quad8_signal_read,
+ .count_read = quad8_count_read,
+ .count_write = quad8_count_write,
+ .function_get = quad8_function_get,
+ .function_set = quad8_function_set,
+ .action_get = quad8_action_get
+};
+
+static int quad8_index_polarity_get(struct counter_device *counter,
+ struct counter_signal *signal, size_t *index_polarity)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+
+ *index_polarity = priv->index_polarity[channel_id];
+
+ return 0;
+}
+
+static int quad8_index_polarity_set(struct counter_device *counter,
+ struct counter_signal *signal, size_t index_polarity)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+ const unsigned int idr_cfg = priv->synchronous_mode[channel_id] |
+ index_polarity << 1;
+ const int base_offset = priv->base + 2 * channel_id + 1;
+
+ priv->index_polarity[channel_id] = index_polarity;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static struct counter_signal_enum_ext quad8_index_pol_enum = {
+ .items = quad8_index_polarity_modes,
+ .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
+ .get = quad8_index_polarity_get,
+ .set = quad8_index_polarity_set
+};
+
+static int quad8_synchronous_mode_get(struct counter_device *counter,
+ struct counter_signal *signal, size_t *synchronous_mode)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+
+ *synchronous_mode = priv->synchronous_mode[channel_id];
+
+ return 0;
+}
+
+static int quad8_synchronous_mode_set(struct counter_device *counter,
+ struct counter_signal *signal, size_t synchronous_mode)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+ const unsigned int idr_cfg = synchronous_mode |
+ priv->index_polarity[channel_id] << 1;
+ const int base_offset = priv->base + 2 * channel_id + 1;
+
+ /* Index function must be non-synchronous in non-quadrature mode */
+ if (synchronous_mode && !priv->quadrature_mode[channel_id])
+ return -EINVAL;
+
+ priv->synchronous_mode[channel_id] = synchronous_mode;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static struct counter_signal_enum_ext quad8_syn_mode_enum = {
+ .items = quad8_synchronous_modes,
+ .num_items = ARRAY_SIZE(quad8_synchronous_modes),
+ .get = quad8_synchronous_mode_get,
+ .set = quad8_synchronous_mode_set
+};
+
+static ssize_t quad8_count_floor_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ /* Only a floor of 0 is supported */
+ return sprintf(buf, "0\n");
+}
+
+static int quad8_count_mode_get(struct counter_device *counter,
+ struct counter_count *count, size_t *cnt_mode)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ /* Map 104-QUAD-8 count mode to Generic Counter count mode */
+ switch (priv->count_mode[count->id]) {
+ case 0:
+ *cnt_mode = COUNTER_COUNT_MODE_NORMAL;
+ break;
+ case 1:
+ *cnt_mode = COUNTER_COUNT_MODE_RANGE_LIMIT;
+ break;
+ case 2:
+ *cnt_mode = COUNTER_COUNT_MODE_NON_RECYCLE;
+ break;
+ case 3:
+ *cnt_mode = COUNTER_COUNT_MODE_MODULO_N;
+ break;
+ }
+
+ return 0;
+}
+
+static int quad8_count_mode_set(struct counter_device *counter,
+ struct counter_count *count, size_t cnt_mode)
+{
+ struct quad8_iio *const priv = counter->priv;
+ unsigned int mode_cfg;
+ const int base_offset = priv->base + 2 * count->id + 1;
+
+ /* Map Generic Counter count mode to 104-QUAD-8 count mode */
+ switch (cnt_mode) {
+ case COUNTER_COUNT_MODE_NORMAL:
+ cnt_mode = 0;
+ break;
+ case COUNTER_COUNT_MODE_RANGE_LIMIT:
+ cnt_mode = 1;
+ break;
+ case COUNTER_COUNT_MODE_NON_RECYCLE:
+ cnt_mode = 2;
+ break;
+ case COUNTER_COUNT_MODE_MODULO_N:
+ cnt_mode = 3;
+ break;
+ }
+
+ priv->count_mode[count->id] = cnt_mode;
+
+ /* Set count mode configuration value */
+ mode_cfg = cnt_mode << 1;
+
+ /* Add quadrature mode configuration */
+ if (priv->quadrature_mode[count->id])
+ mode_cfg |= (priv->quadrature_scale[count->id] + 1) << 3;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static struct counter_count_enum_ext quad8_cnt_mode_enum = {
+ .items = counter_count_mode_str,
+ .num_items = ARRAY_SIZE(counter_count_mode_str),
+ .get = quad8_count_mode_get,
+ .set = quad8_count_mode_set
+};
+
+static ssize_t quad8_count_direction_read(struct counter_device *counter,
+ struct counter_count *count, void *priv, char *buf)
+{
+ enum counter_count_direction dir;
+
+ quad8_direction_get(counter, count, &dir);
+
+ return sprintf(buf, "%s\n", counter_count_direction_str[dir]);
+}
+
+static ssize_t quad8_count_enable_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ return sprintf(buf, "%u\n", priv->ab_enable[count->id]);
+}
+
+static ssize_t quad8_count_enable_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ int err;
+ bool ab_enable;
+ unsigned int ior_cfg;
+
+ err = kstrtobool(buf, &ab_enable);
+ if (err)
+ return err;
+
+ priv->ab_enable[count->id] = ab_enable;
+
+ ior_cfg = ab_enable | priv->preset_enable[count->id] << 1;
+
+ /* Load I/O control configuration */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+
+ return len;
+}
+
+static int quad8_error_noise_get(struct counter_device *counter,
+ struct counter_count *count, size_t *noise_error)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id + 1;
+
+ *noise_error = !!(inb(base_offset) & QUAD8_FLAG_E);
+
+ return 0;
+}
+
+static struct counter_count_enum_ext quad8_error_noise_enum = {
+ .items = quad8_noise_error_states,
+ .num_items = ARRAY_SIZE(quad8_noise_error_states),
+ .get = quad8_error_noise_get
+};
+
+static ssize_t quad8_count_preset_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ return sprintf(buf, "%u\n", priv->preset[count->id]);
+}
+
+static ssize_t quad8_count_preset_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ unsigned int preset;
+ int ret;
+ int i;
+
+ ret = kstrtouint(buf, 0, &preset);
+ if (ret)
+ return ret;
+
+ /* Only 24-bit values are supported */
+ if (preset > 0xFFFFFF)
+ return -EINVAL;
+
+ priv->preset[count->id] = preset;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+
+ return len;
+}
+
+static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ /* Range Limit and Modulo-N count modes use preset value as ceiling */
+ switch (priv->count_mode[count->id]) {
+ case 1:
+ case 3:
+ return quad8_count_preset_read(counter, count, private, buf);
+ }
+
+ /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
+ return sprintf(buf, "33554431\n");
+}
+
+static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+
+ /* Range Limit and Modulo-N count modes use preset value as ceiling */
+ switch (priv->count_mode[count->id]) {
+ case 1:
+ case 3:
+ return quad8_count_preset_write(counter, count, private, buf,
+ len);
+ }
+
+ return len;
+}
+
+static ssize_t quad8_count_preset_enable_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ return sprintf(buf, "%u\n", !priv->preset_enable[count->id]);
+}
+
+static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id + 1;
+ bool preset_enable;
+ int ret;
+ unsigned int ior_cfg;
+
+ ret = kstrtobool(buf, &preset_enable);
+ if (ret)
+ return ret;
+
+ /* Preset enable is active low in Input/Output Control register */
+ preset_enable = !preset_enable;
+
+ priv->preset_enable[count->id] = preset_enable;
+
+ ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1;
+
+ /* Load I/O control configuration to Input / Output Control Register */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+
+ return len;
+}
+
+static const struct counter_signal_ext quad8_index_ext[] = {
+ COUNTER_SIGNAL_ENUM("index_polarity", &quad8_index_pol_enum),
+ COUNTER_SIGNAL_ENUM_AVAILABLE("index_polarity", &quad8_index_pol_enum),
+ COUNTER_SIGNAL_ENUM("synchronous_mode", &quad8_syn_mode_enum),
+ COUNTER_SIGNAL_ENUM_AVAILABLE("synchronous_mode", &quad8_syn_mode_enum)
+};
+
+#define QUAD8_QUAD_SIGNAL(_id, _name) { \
+ .id = (_id), \
+ .name = (_name) \
+}
+
+#define QUAD8_INDEX_SIGNAL(_id, _name) { \
+ .id = (_id), \
+ .name = (_name), \
+ .ext = quad8_index_ext, \
+ .num_ext = ARRAY_SIZE(quad8_index_ext) \
+}
+
+static struct counter_signal quad8_signals[] = {
+ QUAD8_QUAD_SIGNAL(0, "Channel 1 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(1, "Channel 1 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(2, "Channel 2 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(3, "Channel 2 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(4, "Channel 3 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(5, "Channel 3 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(6, "Channel 4 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(7, "Channel 4 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(8, "Channel 5 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(9, "Channel 5 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(10, "Channel 6 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(11, "Channel 6 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(12, "Channel 7 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(13, "Channel 7 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(14, "Channel 8 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(15, "Channel 8 Quadrature B"),
+ QUAD8_INDEX_SIGNAL(16, "Channel 1 Index"),
+ QUAD8_INDEX_SIGNAL(17, "Channel 2 Index"),
+ QUAD8_INDEX_SIGNAL(18, "Channel 3 Index"),
+ QUAD8_INDEX_SIGNAL(19, "Channel 4 Index"),
+ QUAD8_INDEX_SIGNAL(20, "Channel 5 Index"),
+ QUAD8_INDEX_SIGNAL(21, "Channel 6 Index"),
+ QUAD8_INDEX_SIGNAL(22, "Channel 7 Index"),
+ QUAD8_INDEX_SIGNAL(23, "Channel 8 Index")
+};
+
+#define QUAD8_COUNT_SYNAPSES(_id) { \
+ { \
+ .actions_list = quad8_synapse_actions_list, \
+ .num_actions = ARRAY_SIZE(quad8_synapse_actions_list), \
+ .signal = quad8_signals + 2 * (_id) \
+ }, \
+ { \
+ .actions_list = quad8_synapse_actions_list, \
+ .num_actions = ARRAY_SIZE(quad8_synapse_actions_list), \
+ .signal = quad8_signals + 2 * (_id) + 1 \
+ }, \
+ { \
+ .actions_list = quad8_index_actions_list, \
+ .num_actions = ARRAY_SIZE(quad8_index_actions_list), \
+ .signal = quad8_signals + 2 * (_id) + 16 \
+ } \
+}
+
+static struct counter_synapse quad8_count_synapses[][3] = {
+ QUAD8_COUNT_SYNAPSES(0), QUAD8_COUNT_SYNAPSES(1),
+ QUAD8_COUNT_SYNAPSES(2), QUAD8_COUNT_SYNAPSES(3),
+ QUAD8_COUNT_SYNAPSES(4), QUAD8_COUNT_SYNAPSES(5),
+ QUAD8_COUNT_SYNAPSES(6), QUAD8_COUNT_SYNAPSES(7)
+};
+
+static const struct counter_count_ext quad8_count_ext[] = {
+ {
+ .name = "ceiling",
+ .read = quad8_count_ceiling_read,
+ .write = quad8_count_ceiling_write
+ },
+ {
+ .name = "floor",
+ .read = quad8_count_floor_read
+ },
+ COUNTER_COUNT_ENUM("count_mode", &quad8_cnt_mode_enum),
+ COUNTER_COUNT_ENUM_AVAILABLE("count_mode", &quad8_cnt_mode_enum),
+ {
+ .name = "direction",
+ .read = quad8_count_direction_read
+ },
+ {
+ .name = "enable",
+ .read = quad8_count_enable_read,
+ .write = quad8_count_enable_write
+ },
+ COUNTER_COUNT_ENUM("error_noise", &quad8_error_noise_enum),
+ COUNTER_COUNT_ENUM_AVAILABLE("error_noise", &quad8_error_noise_enum),
+ {
+ .name = "preset",
+ .read = quad8_count_preset_read,
+ .write = quad8_count_preset_write
+ },
+ {
+ .name = "preset_enable",
+ .read = quad8_count_preset_enable_read,
+ .write = quad8_count_preset_enable_write
+ }
+};
+
+#define QUAD8_COUNT(_id, _cntname) { \
+ .id = (_id), \
+ .name = (_cntname), \
+ .functions_list = quad8_count_functions_list, \
+ .num_functions = ARRAY_SIZE(quad8_count_functions_list), \
+ .synapses = quad8_count_synapses[(_id)], \
+ .num_synapses = 2, \
+ .ext = quad8_count_ext, \
+ .num_ext = ARRAY_SIZE(quad8_count_ext) \
+}
+
+static struct counter_count quad8_counts[] = {
+ QUAD8_COUNT(0, "Channel 1 Count"),
+ QUAD8_COUNT(1, "Channel 2 Count"),
+ QUAD8_COUNT(2, "Channel 3 Count"),
+ QUAD8_COUNT(3, "Channel 4 Count"),
+ QUAD8_COUNT(4, "Channel 5 Count"),
+ QUAD8_COUNT(5, "Channel 6 Count"),
+ QUAD8_COUNT(6, "Channel 7 Count"),
+ QUAD8_COUNT(7, "Channel 8 Count")
+};
+
+static int quad8_probe(struct device *dev, unsigned int id)
+{
+ struct iio_dev *indio_dev;
+ struct quad8_iio *quad8iio;
+ int i, j;
+ unsigned int base_offset;
+ int err;
+
+ if (!devm_request_region(dev, base[id], QUAD8_EXTENT, dev_name(dev))) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base[id], base[id] + QUAD8_EXTENT);
+ return -EBUSY;
+ }
+
+ /* Allocate IIO device; this also allocates driver data structure */
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*quad8iio));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ /* Initialize IIO device */
+ indio_dev->info = &quad8_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
+ indio_dev->channels = quad8_channels;
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+
+ /* Initialize Counter device and driver data */
+ quad8iio = iio_priv(indio_dev);
+ quad8iio->counter.name = dev_name(dev);
+ quad8iio->counter.parent = dev;
+ quad8iio->counter.ops = &quad8_ops;
+ quad8iio->counter.counts = quad8_counts;
+ quad8iio->counter.num_counts = ARRAY_SIZE(quad8_counts);
+ quad8iio->counter.signals = quad8_signals;
+ quad8iio->counter.num_signals = ARRAY_SIZE(quad8_signals);
+ quad8iio->counter.priv = quad8iio;
+ quad8iio->base = base[id];
+
+ /* Reset all counters and disable interrupt function */
+ outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+ /* Set initial configuration for all counters */
+ for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
+ base_offset = base[id] + 2 * i;
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ /* Reset Preset Register */
+ for (j = 0; j < 3; j++)
+ outb(0x00, base_offset);
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
+ /* Reset Error flag */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+ /* Binary encoding; Normal count; non-quadrature mode */
+ outb(QUAD8_CTR_CMR, base_offset + 1);
+ /* Disable A and B inputs; preset on index; FLG1 as Carry */
+ outb(QUAD8_CTR_IOR, base_offset + 1);
+ /* Disable index function; negative index polarity */
+ outb(QUAD8_CTR_IDR, base_offset + 1);
+ }
+ /* Enable all counters */
+ outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+
+ /* Register IIO device */
+ err = devm_iio_device_register(dev, indio_dev);
+ if (err)
+ return err;
+
+ /* Register Counter device */
+ return devm_counter_register(dev, &quad8iio->counter);
+}
+
+static struct isa_driver quad8_driver = {
+ .probe = quad8_probe,
+ .driver = {
+ .name = "104-quad-8"
+ }
+};
+
+module_isa_driver(quad8_driver, num_quad8);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES 104-QUAD-8 IIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
new file mode 100644
index 000000000000..233ac305d878
--- /dev/null
+++ b/drivers/counter/Kconfig
@@ -0,0 +1,60 @@
+#
+# Counter devices
+#
+
+menuconfig COUNTER
+ tristate "Counter support"
+ help
+ This enables counter device support through the Generic Counter
+ interface. You only need to enable this, if you also want to enable
+ one or more of the counter device drivers below.
+
+if COUNTER
+
+config 104_QUAD_8
+ tristate "ACCES 104-QUAD-8 driver"
+ depends on PC104 && X86 && IIO
+ select ISA_BUS_API
+ help
+ Say yes here to build support for the ACCES 104-QUAD-8 quadrature
+ encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
+
+ A counter's respective error flag may be cleared by performing a write
+ operation on the respective count value attribute. Although the
+ 104-QUAD-8 counters have a 25-bit range, only the lower 24 bits may be
+ set, either directly or via the counter's preset attribute. Interrupts
+ are not supported by this driver.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
+config STM32_TIMER_CNT
+ tristate "STM32 Timer encoder counter driver"
+ depends on MFD_STM32_TIMERS || COMPILE_TEST
+ help
+ Select this option to enable STM32 Timer quadrature encoder
+ and counter driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stm32-timer-cnt.
+
+config STM32_LPTIMER_CNT
+ tristate "STM32 LP Timer encoder counter driver"
+ depends on (MFD_STM32_LPTIMER || COMPILE_TEST) && IIO
+ help
+ Select this option to enable STM32 Low-Power Timer quadrature encoder
+ and counter driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stm32-lptimer-cnt.
+
+config FTM_QUADDEC
+ tristate "Flex Timer Module Quadrature decoder driver"
+ help
+ Select this option to enable the Flex Timer Quadrature decoder
+ driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ftm-quaddec.
+
+endif # COUNTER
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
new file mode 100644
index 000000000000..0c9e622a6bea
--- /dev/null
+++ b/drivers/counter/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for Counter devices
+#
+
+obj-$(CONFIG_COUNTER) += counter.o
+
+obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
+obj-$(CONFIG_STM32_TIMER_CNT) += stm32-timer-cnt.o
+obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
+obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o
diff --git a/drivers/counter/counter.c b/drivers/counter/counter.c
new file mode 100644
index 000000000000..106bc7180cd8
--- /dev/null
+++ b/drivers/counter/counter.c
@@ -0,0 +1,1567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Counter interface
+ * Copyright (C) 2018 William Breathitt Gray
+ */
+#include <linux/counter.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+const char *const counter_count_direction_str[2] = {
+ [COUNTER_COUNT_DIRECTION_FORWARD] = "forward",
+ [COUNTER_COUNT_DIRECTION_BACKWARD] = "backward"
+};
+EXPORT_SYMBOL_GPL(counter_count_direction_str);
+
+const char *const counter_count_mode_str[4] = {
+ [COUNTER_COUNT_MODE_NORMAL] = "normal",
+ [COUNTER_COUNT_MODE_RANGE_LIMIT] = "range limit",
+ [COUNTER_COUNT_MODE_NON_RECYCLE] = "non-recycle",
+ [COUNTER_COUNT_MODE_MODULO_N] = "modulo-n"
+};
+EXPORT_SYMBOL_GPL(counter_count_mode_str);
+
+ssize_t counter_signal_enum_read(struct counter_device *counter,
+ struct counter_signal *signal, void *priv,
+ char *buf)
+{
+ const struct counter_signal_enum_ext *const e = priv;
+ int err;
+ size_t index;
+
+ if (!e->get)
+ return -EINVAL;
+
+ err = e->get(counter, signal, &index);
+ if (err)
+ return err;
+
+ if (index >= e->num_items)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", e->items[index]);
+}
+EXPORT_SYMBOL_GPL(counter_signal_enum_read);
+
+ssize_t counter_signal_enum_write(struct counter_device *counter,
+ struct counter_signal *signal, void *priv,
+ const char *buf, size_t len)
+{
+ const struct counter_signal_enum_ext *const e = priv;
+ ssize_t index;
+ int err;
+
+ if (!e->set)
+ return -EINVAL;
+
+ index = __sysfs_match_string(e->items, e->num_items, buf);
+ if (index < 0)
+ return index;
+
+ err = e->set(counter, signal, index);
+ if (err)
+ return err;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_signal_enum_write);
+
+ssize_t counter_signal_enum_available_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ void *priv, char *buf)
+{
+ const struct counter_signal_enum_ext *const e = priv;
+ size_t i;
+ size_t len = 0;
+
+ if (!e->num_items)
+ return 0;
+
+ for (i = 0; i < e->num_items; i++)
+ len += sprintf(buf + len, "%s\n", e->items[i]);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_signal_enum_available_read);
+
+ssize_t counter_count_enum_read(struct counter_device *counter,
+ struct counter_count *count, void *priv,
+ char *buf)
+{
+ const struct counter_count_enum_ext *const e = priv;
+ int err;
+ size_t index;
+
+ if (!e->get)
+ return -EINVAL;
+
+ err = e->get(counter, count, &index);
+ if (err)
+ return err;
+
+ if (index >= e->num_items)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", e->items[index]);
+}
+EXPORT_SYMBOL_GPL(counter_count_enum_read);
+
+ssize_t counter_count_enum_write(struct counter_device *counter,
+ struct counter_count *count, void *priv,
+ const char *buf, size_t len)
+{
+ const struct counter_count_enum_ext *const e = priv;
+ ssize_t index;
+ int err;
+
+ if (!e->set)
+ return -EINVAL;
+
+ index = __sysfs_match_string(e->items, e->num_items, buf);
+ if (index < 0)
+ return index;
+
+ err = e->set(counter, count, index);
+ if (err)
+ return err;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_count_enum_write);
+
+ssize_t counter_count_enum_available_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, char *buf)
+{
+ const struct counter_count_enum_ext *const e = priv;
+ size_t i;
+ size_t len = 0;
+
+ if (!e->num_items)
+ return 0;
+
+ for (i = 0; i < e->num_items; i++)
+ len += sprintf(buf + len, "%s\n", e->items[i]);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_count_enum_available_read);
+
+ssize_t counter_device_enum_read(struct counter_device *counter, void *priv,
+ char *buf)
+{
+ const struct counter_device_enum_ext *const e = priv;
+ int err;
+ size_t index;
+
+ if (!e->get)
+ return -EINVAL;
+
+ err = e->get(counter, &index);
+ if (err)
+ return err;
+
+ if (index >= e->num_items)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", e->items[index]);
+}
+EXPORT_SYMBOL_GPL(counter_device_enum_read);
+
+ssize_t counter_device_enum_write(struct counter_device *counter, void *priv,
+ const char *buf, size_t len)
+{
+ const struct counter_device_enum_ext *const e = priv;
+ ssize_t index;
+ int err;
+
+ if (!e->set)
+ return -EINVAL;
+
+ index = __sysfs_match_string(e->items, e->num_items, buf);
+ if (index < 0)
+ return index;
+
+ err = e->set(counter, index);
+ if (err)
+ return err;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_device_enum_write);
+
+ssize_t counter_device_enum_available_read(struct counter_device *counter,
+ void *priv, char *buf)
+{
+ const struct counter_device_enum_ext *const e = priv;
+ size_t i;
+ size_t len = 0;
+
+ if (!e->num_items)
+ return 0;
+
+ for (i = 0; i < e->num_items; i++)
+ len += sprintf(buf + len, "%s\n", e->items[i]);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_device_enum_available_read);
+
+static const char *const counter_signal_level_str[] = {
+ [COUNTER_SIGNAL_LEVEL_LOW] = "low",
+ [COUNTER_SIGNAL_LEVEL_HIGH] = "high"
+};
+
+/**
+ * counter_signal_read_value_set - set counter_signal_read_value data
+ * @val: counter_signal_read_value structure to set
+ * @type: property Signal data represents
+ * @data: Signal data
+ *
+ * This function sets an opaque counter_signal_read_value structure with the
+ * provided Signal data.
+ */
+void counter_signal_read_value_set(struct counter_signal_read_value *const val,
+ const enum counter_signal_value_type type,
+ void *const data)
+{
+ if (type == COUNTER_SIGNAL_LEVEL)
+ val->len = sprintf(val->buf, "%s\n",
+ counter_signal_level_str[*(enum counter_signal_level *)data]);
+ else
+ val->len = 0;
+}
+EXPORT_SYMBOL_GPL(counter_signal_read_value_set);
+
+/**
+ * counter_count_read_value_set - set counter_count_read_value data
+ * @val: counter_count_read_value structure to set
+ * @type: property Count data represents
+ * @data: Count data
+ *
+ * This function sets an opaque counter_count_read_value structure with the
+ * provided Count data.
+ */
+void counter_count_read_value_set(struct counter_count_read_value *const val,
+ const enum counter_count_value_type type,
+ void *const data)
+{
+ switch (type) {
+ case COUNTER_COUNT_POSITION:
+ val->len = sprintf(val->buf, "%lu\n", *(unsigned long *)data);
+ break;
+ default:
+ val->len = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(counter_count_read_value_set);
+
+/**
+ * counter_count_write_value_get - get counter_count_write_value data
+ * @data: Count data
+ * @type: property Count data represents
+ * @val: counter_count_write_value structure containing data
+ *
+ * This function extracts Count data from the provided opaque
+ * counter_count_write_value structure and stores it at the address provided by
+ * @data.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int counter_count_write_value_get(void *const data,
+ const enum counter_count_value_type type,
+ const struct counter_count_write_value *const val)
+{
+ int err;
+
+ switch (type) {
+ case COUNTER_COUNT_POSITION:
+ err = kstrtoul(val->buf, 0, data);
+ if (err)
+ return err;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(counter_count_write_value_get);
+
+struct counter_attr_parm {
+ struct counter_device_attr_group *group;
+ const char *prefix;
+ const char *name;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len);
+ void *component;
+};
+
+struct counter_device_attr {
+ struct device_attribute dev_attr;
+ struct list_head l;
+ void *component;
+};
+
+static int counter_attribute_create(const struct counter_attr_parm *const parm)
+{
+ struct counter_device_attr *counter_attr;
+ struct device_attribute *dev_attr;
+ int err;
+ struct list_head *const attr_list = &parm->group->attr_list;
+
+ /* Allocate a Counter device attribute */
+ counter_attr = kzalloc(sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+ dev_attr = &counter_attr->dev_attr;
+
+ sysfs_attr_init(&dev_attr->attr);
+
+ /* Configure device attribute */
+ dev_attr->attr.name = kasprintf(GFP_KERNEL, "%s%s", parm->prefix,
+ parm->name);
+ if (!dev_attr->attr.name) {
+ err = -ENOMEM;
+ goto err_free_counter_attr;
+ }
+ if (parm->show) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = parm->show;
+ }
+ if (parm->store) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = parm->store;
+ }
+
+ /* Store associated Counter component with attribute */
+ counter_attr->component = parm->component;
+
+ /* Keep track of the attribute for later cleanup */
+ list_add(&counter_attr->l, attr_list);
+ parm->group->num_attr++;
+
+ return 0;
+
+err_free_counter_attr:
+ kfree(counter_attr);
+ return err;
+}
+
+#define to_counter_attr(_dev_attr) \
+ container_of(_dev_attr, struct counter_device_attr, dev_attr)
+
+struct counter_signal_unit {
+ struct counter_signal *signal;
+};
+
+static ssize_t counter_signal_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_signal_unit *const component = devattr->component;
+ struct counter_signal *const signal = component->signal;
+ int err;
+ struct counter_signal_read_value val = { .buf = buf };
+
+ err = counter->ops->signal_read(counter, signal, &val);
+ if (err)
+ return err;
+
+ return val.len;
+}
+
+struct counter_name_unit {
+ const char *name;
+};
+
+static ssize_t counter_device_attr_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_name_unit *const comp = to_counter_attr(attr)->component;
+
+ return sprintf(buf, "%s\n", comp->name);
+}
+
+static int counter_name_attribute_create(
+ struct counter_device_attr_group *const group,
+ const char *const name)
+{
+ struct counter_name_unit *name_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Skip if no name */
+ if (!name)
+ return 0;
+
+ /* Allocate name attribute component */
+ name_comp = kmalloc(sizeof(*name_comp), GFP_KERNEL);
+ if (!name_comp)
+ return -ENOMEM;
+ name_comp->name = name;
+
+ /* Allocate Signal name attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "name";
+ parm.show = counter_device_attr_name_show;
+ parm.store = NULL;
+ parm.component = name_comp;
+ err = counter_attribute_create(&parm);
+ if (err)
+ goto err_free_name_comp;
+
+ return 0;
+
+err_free_name_comp:
+ kfree(name_comp);
+ return err;
+}
+
+struct counter_signal_ext_unit {
+ struct counter_signal *signal;
+ const struct counter_signal_ext *ext;
+};
+
+static ssize_t counter_signal_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_signal_ext_unit *const comp = devattr->component;
+ const struct counter_signal_ext *const ext = comp->ext;
+
+ return ext->read(dev_get_drvdata(dev), comp->signal, ext->priv, buf);
+}
+
+static ssize_t counter_signal_ext_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_signal_ext_unit *const comp = devattr->component;
+ const struct counter_signal_ext *const ext = comp->ext;
+
+ return ext->write(dev_get_drvdata(dev), comp->signal, ext->priv, buf,
+ len);
+}
+
+static void counter_device_attr_list_free(struct list_head *attr_list)
+{
+ struct counter_device_attr *p, *n;
+
+ list_for_each_entry_safe(p, n, attr_list, l) {
+ /* free attribute name and associated component memory */
+ kfree(p->dev_attr.attr.name);
+ kfree(p->component);
+ list_del(&p->l);
+ kfree(p);
+ }
+}
+
+static int counter_signal_ext_register(
+ struct counter_device_attr_group *const group,
+ struct counter_signal *const signal)
+{
+ const size_t num_ext = signal->num_ext;
+ size_t i;
+ const struct counter_signal_ext *ext;
+ struct counter_signal_ext_unit *signal_ext_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Create an attribute for each extension */
+ for (i = 0 ; i < num_ext; i++) {
+ ext = signal->ext + i;
+
+ /* Allocate signal_ext attribute component */
+ signal_ext_comp = kmalloc(sizeof(*signal_ext_comp), GFP_KERNEL);
+ if (!signal_ext_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+ signal_ext_comp->signal = signal;
+ signal_ext_comp->ext = ext;
+
+ /* Allocate a Counter device attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = ext->name;
+ parm.show = (ext->read) ? counter_signal_ext_show : NULL;
+ parm.store = (ext->write) ? counter_signal_ext_store : NULL;
+ parm.component = signal_ext_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(signal_ext_comp);
+ goto err_free_attr_list;
+ }
+ }
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static int counter_signal_attributes_create(
+ struct counter_device_attr_group *const group,
+ const struct counter_device *const counter,
+ struct counter_signal *const signal)
+{
+ struct counter_signal_unit *signal_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Allocate Signal attribute component */
+ signal_comp = kmalloc(sizeof(*signal_comp), GFP_KERNEL);
+ if (!signal_comp)
+ return -ENOMEM;
+ signal_comp->signal = signal;
+
+ /* Create main Signal attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "signal";
+ parm.show = (counter->ops->signal_read) ? counter_signal_show : NULL;
+ parm.store = NULL;
+ parm.component = signal_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(signal_comp);
+ return err;
+ }
+
+ /* Create Signal name attribute */
+ err = counter_name_attribute_create(group, signal->name);
+ if (err)
+ goto err_free_attr_list;
+
+ /* Register Signal extension attributes */
+ err = counter_signal_ext_register(group, signal);
+ if (err)
+ goto err_free_attr_list;
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static int counter_signals_register(
+ struct counter_device_attr_group *const groups_list,
+ const struct counter_device *const counter)
+{
+ const size_t num_signals = counter->num_signals;
+ size_t i;
+ struct counter_signal *signal;
+ const char *name;
+ int err;
+
+ /* Register each Signal */
+ for (i = 0; i < num_signals; i++) {
+ signal = counter->signals + i;
+
+ /* Generate Signal attribute directory name */
+ name = kasprintf(GFP_KERNEL, "signal%d", signal->id);
+ if (!name) {
+ err = -ENOMEM;
+ goto err_free_attr_groups;
+ }
+ groups_list[i].attr_group.name = name;
+
+ /* Create all attributes associated with Signal */
+ err = counter_signal_attributes_create(groups_list + i, counter,
+ signal);
+ if (err)
+ goto err_free_attr_groups;
+ }
+
+ return 0;
+
+err_free_attr_groups:
+ do {
+ kfree(groups_list[i].attr_group.name);
+ counter_device_attr_list_free(&groups_list[i].attr_list);
+ } while (i--);
+ return err;
+}
+
+static const char *const counter_synapse_action_str[] = {
+ [COUNTER_SYNAPSE_ACTION_NONE] = "none",
+ [COUNTER_SYNAPSE_ACTION_RISING_EDGE] = "rising edge",
+ [COUNTER_SYNAPSE_ACTION_FALLING_EDGE] = "falling edge",
+ [COUNTER_SYNAPSE_ACTION_BOTH_EDGES] = "both edges"
+};
+
+struct counter_action_unit {
+ struct counter_synapse *synapse;
+ struct counter_count *count;
+};
+
+static ssize_t counter_action_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ int err;
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_action_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ struct counter_synapse *const synapse = component->synapse;
+ size_t action_index;
+ enum counter_synapse_action action;
+
+ err = counter->ops->action_get(counter, count, synapse, &action_index);
+ if (err)
+ return err;
+
+ synapse->action = action_index;
+
+ action = synapse->actions_list[action_index];
+ return sprintf(buf, "%s\n", counter_synapse_action_str[action]);
+}
+
+static ssize_t counter_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_action_unit *const component = devattr->component;
+ struct counter_synapse *const synapse = component->synapse;
+ size_t action_index;
+ const size_t num_actions = synapse->num_actions;
+ enum counter_synapse_action action;
+ int err;
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_count *const count = component->count;
+
+ /* Find requested action mode */
+ for (action_index = 0; action_index < num_actions; action_index++) {
+ action = synapse->actions_list[action_index];
+ if (sysfs_streq(buf, counter_synapse_action_str[action]))
+ break;
+ }
+ /* If requested action mode not found */
+ if (action_index >= num_actions)
+ return -EINVAL;
+
+ err = counter->ops->action_set(counter, count, synapse, action_index);
+ if (err)
+ return err;
+
+ synapse->action = action_index;
+
+ return len;
+}
+
+struct counter_action_avail_unit {
+ const enum counter_synapse_action *actions_list;
+ size_t num_actions;
+};
+
+static ssize_t counter_synapse_action_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_action_avail_unit *const component = devattr->component;
+ size_t i;
+ enum counter_synapse_action action;
+ ssize_t len = 0;
+
+ for (i = 0; i < component->num_actions; i++) {
+ action = component->actions_list[i];
+ len += sprintf(buf + len, "%s\n",
+ counter_synapse_action_str[action]);
+ }
+
+ return len;
+}
+
+static int counter_synapses_register(
+ struct counter_device_attr_group *const group,
+ const struct counter_device *const counter,
+ struct counter_count *const count, const char *const count_attr_name)
+{
+ size_t i;
+ struct counter_synapse *synapse;
+ const char *prefix;
+ struct counter_action_unit *action_comp;
+ struct counter_attr_parm parm;
+ int err;
+ struct counter_action_avail_unit *avail_comp;
+
+ /* Register each Synapse */
+ for (i = 0; i < count->num_synapses; i++) {
+ synapse = count->synapses + i;
+
+ /* Generate attribute prefix */
+ prefix = kasprintf(GFP_KERNEL, "signal%d_",
+ synapse->signal->id);
+ if (!prefix) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+
+ /* Allocate action attribute component */
+ action_comp = kmalloc(sizeof(*action_comp), GFP_KERNEL);
+ if (!action_comp) {
+ err = -ENOMEM;
+ goto err_free_prefix;
+ }
+ action_comp->synapse = synapse;
+ action_comp->count = count;
+
+ /* Create action attribute */
+ parm.group = group;
+ parm.prefix = prefix;
+ parm.name = "action";
+ parm.show = (counter->ops->action_get) ? counter_action_show : NULL;
+ parm.store = (counter->ops->action_set) ? counter_action_store : NULL;
+ parm.component = action_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(action_comp);
+ goto err_free_prefix;
+ }
+
+ /* Allocate action available attribute component */
+ avail_comp = kmalloc(sizeof(*avail_comp), GFP_KERNEL);
+ if (!avail_comp) {
+ err = -ENOMEM;
+ goto err_free_prefix;
+ }
+ avail_comp->actions_list = synapse->actions_list;
+ avail_comp->num_actions = synapse->num_actions;
+
+ /* Create action_available attribute */
+ parm.group = group;
+ parm.prefix = prefix;
+ parm.name = "action_available";
+ parm.show = counter_synapse_action_available_show;
+ parm.store = NULL;
+ parm.component = avail_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(avail_comp);
+ goto err_free_prefix;
+ }
+
+ kfree(prefix);
+ }
+
+ return 0;
+
+err_free_prefix:
+ kfree(prefix);
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+struct counter_count_unit {
+ struct counter_count *count;
+};
+
+static ssize_t counter_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ int err;
+ struct counter_count_read_value val = { .buf = buf };
+
+ err = counter->ops->count_read(counter, count, &val);
+ if (err)
+ return err;
+
+ return val.len;
+}
+
+static ssize_t counter_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ int err;
+ struct counter_count_write_value val = { .buf = buf };
+
+ err = counter->ops->count_write(counter, count, &val);
+ if (err)
+ return err;
+
+ return len;
+}
+
+static const char *const counter_count_function_str[] = {
+ [COUNTER_COUNT_FUNCTION_INCREASE] = "increase",
+ [COUNTER_COUNT_FUNCTION_DECREASE] = "decrease",
+ [COUNTER_COUNT_FUNCTION_PULSE_DIRECTION] = "pulse-direction",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X4] = "quadrature x4"
+};
+
+static ssize_t counter_function_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int err;
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ size_t func_index;
+ enum counter_count_function function;
+
+ err = counter->ops->function_get(counter, count, &func_index);
+ if (err)
+ return err;
+
+ count->function = func_index;
+
+ function = count->functions_list[func_index];
+ return sprintf(buf, "%s\n", counter_count_function_str[function]);
+}
+
+static ssize_t counter_function_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ const size_t num_functions = count->num_functions;
+ size_t func_index;
+ enum counter_count_function function;
+ int err;
+ struct counter_device *const counter = dev_get_drvdata(dev);
+
+ /* Find requested Count function mode */
+ for (func_index = 0; func_index < num_functions; func_index++) {
+ function = count->functions_list[func_index];
+ if (sysfs_streq(buf, counter_count_function_str[function]))
+ break;
+ }
+ /* Return error if requested Count function mode not found */
+ if (func_index >= num_functions)
+ return -EINVAL;
+
+ err = counter->ops->function_set(counter, count, func_index);
+ if (err)
+ return err;
+
+ count->function = func_index;
+
+ return len;
+}
+
+struct counter_count_ext_unit {
+ struct counter_count *count;
+ const struct counter_count_ext *ext;
+};
+
+static ssize_t counter_count_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_ext_unit *const comp = devattr->component;
+ const struct counter_count_ext *const ext = comp->ext;
+
+ return ext->read(dev_get_drvdata(dev), comp->count, ext->priv, buf);
+}
+
+static ssize_t counter_count_ext_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_ext_unit *const comp = devattr->component;
+ const struct counter_count_ext *const ext = comp->ext;
+
+ return ext->write(dev_get_drvdata(dev), comp->count, ext->priv, buf,
+ len);
+}
+
+static int counter_count_ext_register(
+ struct counter_device_attr_group *const group,
+ struct counter_count *const count)
+{
+ size_t i;
+ const struct counter_count_ext *ext;
+ struct counter_count_ext_unit *count_ext_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Create an attribute for each extension */
+ for (i = 0 ; i < count->num_ext; i++) {
+ ext = count->ext + i;
+
+ /* Allocate count_ext attribute component */
+ count_ext_comp = kmalloc(sizeof(*count_ext_comp), GFP_KERNEL);
+ if (!count_ext_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+ count_ext_comp->count = count;
+ count_ext_comp->ext = ext;
+
+ /* Allocate count_ext attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = ext->name;
+ parm.show = (ext->read) ? counter_count_ext_show : NULL;
+ parm.store = (ext->write) ? counter_count_ext_store : NULL;
+ parm.component = count_ext_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(count_ext_comp);
+ goto err_free_attr_list;
+ }
+ }
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+struct counter_func_avail_unit {
+ const enum counter_count_function *functions_list;
+ size_t num_functions;
+};
+
+static ssize_t counter_count_function_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_func_avail_unit *const component = devattr->component;
+ const enum counter_count_function *const func_list = component->functions_list;
+ const size_t num_functions = component->num_functions;
+ size_t i;
+ enum counter_count_function function;
+ ssize_t len = 0;
+
+ for (i = 0; i < num_functions; i++) {
+ function = func_list[i];
+ len += sprintf(buf + len, "%s\n",
+ counter_count_function_str[function]);
+ }
+
+ return len;
+}
+
+static int counter_count_attributes_create(
+ struct counter_device_attr_group *const group,
+ const struct counter_device *const counter,
+ struct counter_count *const count)
+{
+ struct counter_count_unit *count_comp;
+ struct counter_attr_parm parm;
+ int err;
+ struct counter_count_unit *func_comp;
+ struct counter_func_avail_unit *avail_comp;
+
+ /* Allocate count attribute component */
+ count_comp = kmalloc(sizeof(*count_comp), GFP_KERNEL);
+ if (!count_comp)
+ return -ENOMEM;
+ count_comp->count = count;
+
+ /* Create main Count attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "count";
+ parm.show = (counter->ops->count_read) ? counter_count_show : NULL;
+ parm.store = (counter->ops->count_write) ? counter_count_store : NULL;
+ parm.component = count_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(count_comp);
+ return err;
+ }
+
+ /* Allocate function attribute component */
+ func_comp = kmalloc(sizeof(*func_comp), GFP_KERNEL);
+ if (!func_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+ func_comp->count = count;
+
+ /* Create Count function attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "function";
+ parm.show = (counter->ops->function_get) ? counter_function_show : NULL;
+ parm.store = (counter->ops->function_set) ? counter_function_store : NULL;
+ parm.component = func_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(func_comp);
+ goto err_free_attr_list;
+ }
+
+ /* Allocate function available attribute component */
+ avail_comp = kmalloc(sizeof(*avail_comp), GFP_KERNEL);
+ if (!avail_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+ avail_comp->functions_list = count->functions_list;
+ avail_comp->num_functions = count->num_functions;
+
+ /* Create Count function_available attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "function_available";
+ parm.show = counter_count_function_available_show;
+ parm.store = NULL;
+ parm.component = avail_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(avail_comp);
+ goto err_free_attr_list;
+ }
+
+ /* Create Count name attribute */
+ err = counter_name_attribute_create(group, count->name);
+ if (err)
+ goto err_free_attr_list;
+
+ /* Register Count extension attributes */
+ err = counter_count_ext_register(group, count);
+ if (err)
+ goto err_free_attr_list;
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static int counter_counts_register(
+ struct counter_device_attr_group *const groups_list,
+ const struct counter_device *const counter)
+{
+ size_t i;
+ struct counter_count *count;
+ const char *name;
+ int err;
+
+ /* Register each Count */
+ for (i = 0; i < counter->num_counts; i++) {
+ count = counter->counts + i;
+
+ /* Generate Count attribute directory name */
+ name = kasprintf(GFP_KERNEL, "count%d", count->id);
+ if (!name) {
+ err = -ENOMEM;
+ goto err_free_attr_groups;
+ }
+ groups_list[i].attr_group.name = name;
+
+ /* Register the Synapses associated with each Count */
+ err = counter_synapses_register(groups_list + i, counter, count,
+ name);
+ if (err)
+ goto err_free_attr_groups;
+
+ /* Create all attributes associated with Count */
+ err = counter_count_attributes_create(groups_list + i, counter,
+ count);
+ if (err)
+ goto err_free_attr_groups;
+ }
+
+ return 0;
+
+err_free_attr_groups:
+ do {
+ kfree(groups_list[i].attr_group.name);
+ counter_device_attr_list_free(&groups_list[i].attr_list);
+ } while (i--);
+ return err;
+}
+
+struct counter_size_unit {
+ size_t size;
+};
+
+static ssize_t counter_device_attr_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_size_unit *const comp = to_counter_attr(attr)->component;
+
+ return sprintf(buf, "%zu\n", comp->size);
+}
+
+static int counter_size_attribute_create(
+ struct counter_device_attr_group *const group,
+ const size_t size, const char *const name)
+{
+ struct counter_size_unit *size_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Allocate size attribute component */
+ size_comp = kmalloc(sizeof(*size_comp), GFP_KERNEL);
+ if (!size_comp)
+ return -ENOMEM;
+ size_comp->size = size;
+
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = name;
+ parm.show = counter_device_attr_size_show;
+ parm.store = NULL;
+ parm.component = size_comp;
+ err = counter_attribute_create(&parm);
+ if (err)
+ goto err_free_size_comp;
+
+ return 0;
+
+err_free_size_comp:
+ kfree(size_comp);
+ return err;
+}
+
+struct counter_ext_unit {
+ const struct counter_device_ext *ext;
+};
+
+static ssize_t counter_device_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_ext_unit *const component = devattr->component;
+ const struct counter_device_ext *const ext = component->ext;
+
+ return ext->read(dev_get_drvdata(dev), ext->priv, buf);
+}
+
+static ssize_t counter_device_ext_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_ext_unit *const component = devattr->component;
+ const struct counter_device_ext *const ext = component->ext;
+
+ return ext->write(dev_get_drvdata(dev), ext->priv, buf, len);
+}
+
+static int counter_device_ext_register(
+ struct counter_device_attr_group *const group,
+ struct counter_device *const counter)
+{
+ size_t i;
+ struct counter_ext_unit *ext_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Create an attribute for each extension */
+ for (i = 0 ; i < counter->num_ext; i++) {
+ /* Allocate extension attribute component */
+ ext_comp = kmalloc(sizeof(*ext_comp), GFP_KERNEL);
+ if (!ext_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+
+ ext_comp->ext = counter->ext + i;
+
+ /* Allocate extension attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = counter->ext[i].name;
+ parm.show = (counter->ext[i].read) ? counter_device_ext_show : NULL;
+ parm.store = (counter->ext[i].write) ? counter_device_ext_store : NULL;
+ parm.component = ext_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(ext_comp);
+ goto err_free_attr_list;
+ }
+ }
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static int counter_global_attr_register(
+ struct counter_device_attr_group *const group,
+ struct counter_device *const counter)
+{
+ int err;
+
+ /* Create name attribute */
+ err = counter_name_attribute_create(group, counter->name);
+ if (err)
+ return err;
+
+ /* Create num_counts attribute */
+ err = counter_size_attribute_create(group, counter->num_counts,
+ "num_counts");
+ if (err)
+ goto err_free_attr_list;
+
+ /* Create num_signals attribute */
+ err = counter_size_attribute_create(group, counter->num_signals,
+ "num_signals");
+ if (err)
+ goto err_free_attr_list;
+
+ /* Register Counter device extension attributes */
+ err = counter_device_ext_register(group, counter);
+ if (err)
+ goto err_free_attr_list;
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static void counter_device_groups_list_free(
+ struct counter_device_attr_group *const groups_list,
+ const size_t num_groups)
+{
+ struct counter_device_attr_group *group;
+ size_t i;
+
+ /* loop through all attribute groups (signals, counts, global, etc.) */
+ for (i = 0; i < num_groups; i++) {
+ group = groups_list + i;
+
+ /* free all attribute group and associated attributes memory */
+ kfree(group->attr_group.name);
+ kfree(group->attr_group.attrs);
+ counter_device_attr_list_free(&group->attr_list);
+ }
+
+ kfree(groups_list);
+}
+
+static int counter_device_groups_list_prepare(
+ struct counter_device *const counter)
+{
+ const size_t total_num_groups =
+ counter->num_signals + counter->num_counts + 1;
+ struct counter_device_attr_group *groups_list;
+ size_t i;
+ int err;
+ size_t num_groups = 0;
+
+ /* Allocate space for attribute groups (signals, counts, and ext) */
+ groups_list = kcalloc(total_num_groups, sizeof(*groups_list),
+ GFP_KERNEL);
+ if (!groups_list)
+ return -ENOMEM;
+
+ /* Initialize attribute lists */
+ for (i = 0; i < total_num_groups; i++)
+ INIT_LIST_HEAD(&groups_list[i].attr_list);
+
+ /* Register Signals */
+ err = counter_signals_register(groups_list, counter);
+ if (err)
+ goto err_free_groups_list;
+ num_groups += counter->num_signals;
+
+ /* Register Counts and respective Synapses */
+ err = counter_counts_register(groups_list + num_groups, counter);
+ if (err)
+ goto err_free_groups_list;
+ num_groups += counter->num_counts;
+
+ /* Register Counter global attributes */
+ err = counter_global_attr_register(groups_list + num_groups, counter);
+ if (err)
+ goto err_free_groups_list;
+ num_groups++;
+
+ /* Store groups_list in device_state */
+ counter->device_state->groups_list = groups_list;
+ counter->device_state->num_groups = num_groups;
+
+ return 0;
+
+err_free_groups_list:
+ counter_device_groups_list_free(groups_list, num_groups);
+ return err;
+}
+
+static int counter_device_groups_prepare(
+ struct counter_device_state *const device_state)
+{
+ size_t i, j;
+ struct counter_device_attr_group *group;
+ int err;
+ struct counter_device_attr *p;
+
+ /* Allocate attribute groups for association with device */
+ device_state->groups = kcalloc(device_state->num_groups + 1,
+ sizeof(*device_state->groups),
+ GFP_KERNEL);
+ if (!device_state->groups)
+ return -ENOMEM;
+
+ /* Prepare each group of attributes for association */
+ for (i = 0; i < device_state->num_groups; i++) {
+ group = device_state->groups_list + i;
+
+ /* Allocate space for attribute pointers in attribute group */
+ group->attr_group.attrs = kcalloc(group->num_attr + 1,
+ sizeof(*group->attr_group.attrs), GFP_KERNEL);
+ if (!group->attr_group.attrs) {
+ err = -ENOMEM;
+ goto err_free_groups;
+ }
+
+ /* Add attribute pointers to attribute group */
+ j = 0;
+ list_for_each_entry(p, &group->attr_list, l)
+ group->attr_group.attrs[j++] = &p->dev_attr.attr;
+
+ /* Group attributes in attribute group */
+ device_state->groups[i] = &group->attr_group;
+ }
+ /* Associate attributes with device */
+ device_state->dev.groups = device_state->groups;
+
+ return 0;
+
+err_free_groups:
+ do {
+ group = device_state->groups_list + i;
+ kfree(group->attr_group.attrs);
+ group->attr_group.attrs = NULL;
+ } while (i--);
+ kfree(device_state->groups);
+ return err;
+}
+
+/* Provides a unique ID for each counter device */
+static DEFINE_IDA(counter_ida);
+
+static void counter_device_release(struct device *dev)
+{
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device_state *const device_state = counter->device_state;
+
+ kfree(device_state->groups);
+ counter_device_groups_list_free(device_state->groups_list,
+ device_state->num_groups);
+ ida_simple_remove(&counter_ida, device_state->id);
+ kfree(device_state);
+}
+
+static struct device_type counter_device_type = {
+ .name = "counter_device",
+ .release = counter_device_release
+};
+
+static struct bus_type counter_bus_type = {
+ .name = "counter"
+};
+
+/**
+ * counter_register - register Counter to the system
+ * @counter: pointer to Counter to register
+ *
+ * This function registers a Counter to the system. A sysfs "counter" directory
+ * will be created and populated with sysfs attributes correlating with the
+ * Counter Signals, Synapses, and Counts respectively.
+ */
+int counter_register(struct counter_device *const counter)
+{
+ struct counter_device_state *device_state;
+ int err;
+
+ /* Allocate internal state container for Counter device */
+ device_state = kzalloc(sizeof(*device_state), GFP_KERNEL);
+ if (!device_state)
+ return -ENOMEM;
+ counter->device_state = device_state;
+
+ /* Acquire unique ID */
+ device_state->id = ida_simple_get(&counter_ida, 0, 0, GFP_KERNEL);
+ if (device_state->id < 0) {
+ err = device_state->id;
+ goto err_free_device_state;
+ }
+
+ /* Configure device structure for Counter */
+ device_state->dev.type = &counter_device_type;
+ device_state->dev.bus = &counter_bus_type;
+ if (counter->parent) {
+ device_state->dev.parent = counter->parent;
+ device_state->dev.of_node = counter->parent->of_node;
+ }
+ dev_set_name(&device_state->dev, "counter%d", device_state->id);
+ device_initialize(&device_state->dev);
+ dev_set_drvdata(&device_state->dev, counter);
+
+ /* Prepare device attributes */
+ err = counter_device_groups_list_prepare(counter);
+ if (err)
+ goto err_free_id;
+
+ /* Organize device attributes to groups and match to device */
+ err = counter_device_groups_prepare(device_state);
+ if (err)
+ goto err_free_groups_list;
+
+ /* Add device to system */
+ err = device_add(&device_state->dev);
+ if (err)
+ goto err_free_groups;
+
+ return 0;
+
+err_free_groups:
+ kfree(device_state->groups);
+err_free_groups_list:
+ counter_device_groups_list_free(device_state->groups_list,
+ device_state->num_groups);
+err_free_id:
+ ida_simple_remove(&counter_ida, device_state->id);
+err_free_device_state:
+ kfree(device_state);
+ return err;
+}
+EXPORT_SYMBOL_GPL(counter_register);
+
+/**
+ * counter_unregister - unregister Counter from the system
+ * @counter: pointer to Counter to unregister
+ *
+ * The Counter is unregistered from the system; all allocated memory is freed.
+ */
+void counter_unregister(struct counter_device *const counter)
+{
+ if (counter)
+ device_del(&counter->device_state->dev);
+}
+EXPORT_SYMBOL_GPL(counter_unregister);
+
+static void devm_counter_unreg(struct device *dev, void *res)
+{
+ counter_unregister(*(struct counter_device **)res);
+}
+
+/**
+ * devm_counter_register - Resource-managed counter_register
+ * @dev: device to allocate counter_device for
+ * @counter: pointer to Counter to register
+ *
+ * Managed counter_register. The Counter registered with this function is
+ * automatically unregistered on driver detach. This function calls
+ * counter_register internally. Refer to that function for more information.
+ *
+ * If an Counter registered with this function needs to be unregistered
+ * separately, devm_counter_unregister must be used.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_counter_register(struct device *dev,
+ struct counter_device *const counter)
+{
+ struct counter_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_counter_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = counter_register(counter);
+ if (!ret) {
+ *ptr = counter;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_counter_register);
+
+static int devm_counter_match(struct device *dev, void *res, void *data)
+{
+ struct counter_device **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ return *r == data;
+}
+
+/**
+ * devm_counter_unregister - Resource-managed counter_unregister
+ * @dev: device this counter_device belongs to
+ * @counter: pointer to Counter associated with the device
+ *
+ * Unregister Counter registered with devm_counter_register.
+ */
+void devm_counter_unregister(struct device *dev,
+ struct counter_device *const counter)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_counter_unreg, devm_counter_match,
+ counter);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_counter_unregister);
+
+static int __init counter_init(void)
+{
+ return bus_register(&counter_bus_type);
+}
+
+static void __exit counter_exit(void)
+{
+ bus_unregister(&counter_bus_type);
+}
+
+subsys_initcall(counter_init);
+module_exit(counter_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Generic Counter interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
new file mode 100644
index 000000000000..c83c8875bf82
--- /dev/null
+++ b/drivers/counter/ftm-quaddec.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Flex Timer Module Quadrature decoder
+ *
+ * This module implements a driver for decoding the FTM quadrature
+ * of ex. a LS1021A
+ */
+
+#include <linux/fsl/ftm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/counter.h>
+#include <linux/bitfield.h>
+
+#define FTM_FIELD_UPDATE(ftm, offset, mask, val) \
+ ({ \
+ uint32_t flags; \
+ ftm_read(ftm, offset, &flags); \
+ flags &= ~mask; \
+ flags |= FIELD_PREP(mask, val); \
+ ftm_write(ftm, offset, flags); \
+ })
+
+struct ftm_quaddec {
+ struct counter_device counter;
+ struct platform_device *pdev;
+ void __iomem *ftm_base;
+ bool big_endian;
+ struct mutex ftm_quaddec_mutex;
+};
+
+static void ftm_read(struct ftm_quaddec *ftm, uint32_t offset, uint32_t *data)
+{
+ if (ftm->big_endian)
+ *data = ioread32be(ftm->ftm_base + offset);
+ else
+ *data = ioread32(ftm->ftm_base + offset);
+}
+
+static void ftm_write(struct ftm_quaddec *ftm, uint32_t offset, uint32_t data)
+{
+ if (ftm->big_endian)
+ iowrite32be(data, ftm->ftm_base + offset);
+ else
+ iowrite32(data, ftm->ftm_base + offset);
+}
+
+/* Hold mutex before modifying write protection state */
+static void ftm_clear_write_protection(struct ftm_quaddec *ftm)
+{
+ uint32_t flag;
+
+ /* First see if it is enabled */
+ ftm_read(ftm, FTM_FMS, &flag);
+
+ if (flag & FTM_FMS_WPEN)
+ FTM_FIELD_UPDATE(ftm, FTM_MODE, FTM_MODE_WPDIS, 1);
+}
+
+static void ftm_set_write_protection(struct ftm_quaddec *ftm)
+{
+ FTM_FIELD_UPDATE(ftm, FTM_FMS, FTM_FMS_WPEN, 1);
+}
+
+static void ftm_reset_counter(struct ftm_quaddec *ftm)
+{
+ /* Reset hardware counter to CNTIN */
+ ftm_write(ftm, FTM_CNT, 0x0);
+}
+
+static void ftm_quaddec_init(struct ftm_quaddec *ftm)
+{
+ ftm_clear_write_protection(ftm);
+
+ /*
+ * Do not write in the region from the CNTIN register through the
+ * PWMLOAD register when FTMEN = 0.
+ * Also reset other fields to zero
+ */
+ ftm_write(ftm, FTM_MODE, FTM_MODE_FTMEN);
+ ftm_write(ftm, FTM_CNTIN, 0x0000);
+ ftm_write(ftm, FTM_MOD, 0xffff);
+ ftm_write(ftm, FTM_CNT, 0x0);
+ /* Set prescaler, reset other fields to zero */
+ ftm_write(ftm, FTM_SC, FTM_SC_PS_1);
+
+ /* Select quad mode, reset other fields to zero */
+ ftm_write(ftm, FTM_QDCTRL, FTM_QDCTRL_QUADEN);
+
+ /* Unused features and reset to default section */
+ ftm_write(ftm, FTM_POL, 0x0);
+ ftm_write(ftm, FTM_FLTCTRL, 0x0);
+ ftm_write(ftm, FTM_SYNCONF, 0x0);
+ ftm_write(ftm, FTM_SYNC, 0xffff);
+
+ /* Lock the FTM */
+ ftm_set_write_protection(ftm);
+}
+
+static void ftm_quaddec_disable(struct ftm_quaddec *ftm)
+{
+ ftm_clear_write_protection(ftm);
+ ftm_write(ftm, FTM_MODE, 0);
+ ftm_write(ftm, FTM_QDCTRL, 0);
+ /*
+ * This is enough to disable the counter. No clock has been
+ * selected by writing to FTM_SC in init()
+ */
+ ftm_set_write_protection(ftm);
+}
+
+static int ftm_quaddec_get_prescaler(struct counter_device *counter,
+ struct counter_count *count,
+ size_t *cnt_mode)
+{
+ struct ftm_quaddec *ftm = counter->priv;
+ uint32_t scflags;
+
+ ftm_read(ftm, FTM_SC, &scflags);
+
+ *cnt_mode = FIELD_GET(FTM_SC_PS_MASK, scflags);
+
+ return 0;
+}
+
+static int ftm_quaddec_set_prescaler(struct counter_device *counter,
+ struct counter_count *count,
+ size_t cnt_mode)
+{
+ struct ftm_quaddec *ftm = counter->priv;
+
+ mutex_lock(&ftm->ftm_quaddec_mutex);
+
+ ftm_clear_write_protection(ftm);
+ FTM_FIELD_UPDATE(ftm, FTM_SC, FTM_SC_PS_MASK, cnt_mode);
+ ftm_set_write_protection(ftm);
+
+ /* Also resets the counter as it is undefined anyway now */
+ ftm_reset_counter(ftm);
+
+ mutex_unlock(&ftm->ftm_quaddec_mutex);
+ return 0;
+}
+
+static const char * const ftm_quaddec_prescaler[] = {
+ "1", "2", "4", "8", "16", "32", "64", "128"
+};
+
+static struct counter_count_enum_ext ftm_quaddec_prescaler_enum = {
+ .items = ftm_quaddec_prescaler,
+ .num_items = ARRAY_SIZE(ftm_quaddec_prescaler),
+ .get = ftm_quaddec_get_prescaler,
+ .set = ftm_quaddec_set_prescaler
+};
+
+enum ftm_quaddec_synapse_action {
+ FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES,
+};
+
+static enum counter_synapse_action ftm_quaddec_synapse_actions[] = {
+ [FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES] =
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+enum ftm_quaddec_count_function {
+ FTM_QUADDEC_COUNT_ENCODER_MODE_1,
+};
+
+static const enum counter_count_function ftm_quaddec_count_functions[] = {
+ [FTM_QUADDEC_COUNT_ENCODER_MODE_1] =
+ COUNTER_COUNT_FUNCTION_QUADRATURE_X4
+};
+
+static int ftm_quaddec_count_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_read_value *val)
+{
+ struct ftm_quaddec *const ftm = counter->priv;
+ uint32_t cntval;
+
+ ftm_read(ftm, FTM_CNT, &cntval);
+
+ counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cntval);
+
+ return 0;
+}
+
+static int ftm_quaddec_count_write(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_write_value *val)
+{
+ struct ftm_quaddec *const ftm = counter->priv;
+ u32 cnt;
+ int err;
+
+ err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
+ if (err)
+ return err;
+
+ if (cnt != 0) {
+ dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n");
+ return -EINVAL;
+ }
+
+ ftm_reset_counter(ftm);
+
+ return 0;
+}
+
+static int ftm_quaddec_count_function_get(struct counter_device *counter,
+ struct counter_count *count,
+ size_t *function)
+{
+ *function = FTM_QUADDEC_COUNT_ENCODER_MODE_1;
+
+ return 0;
+}
+
+static int ftm_quaddec_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ size_t *action)
+{
+ *action = FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES;
+
+ return 0;
+}
+
+static const struct counter_ops ftm_quaddec_cnt_ops = {
+ .count_read = ftm_quaddec_count_read,
+ .count_write = ftm_quaddec_count_write,
+ .function_get = ftm_quaddec_count_function_get,
+ .action_get = ftm_quaddec_action_get,
+};
+
+static struct counter_signal ftm_quaddec_signals[] = {
+ {
+ .id = 0,
+ .name = "Channel 1 Phase A"
+ },
+ {
+ .id = 1,
+ .name = "Channel 1 Phase B"
+ }
+};
+
+static struct counter_synapse ftm_quaddec_count_synapses[] = {
+ {
+ .actions_list = ftm_quaddec_synapse_actions,
+ .num_actions = ARRAY_SIZE(ftm_quaddec_synapse_actions),
+ .signal = &ftm_quaddec_signals[0]
+ },
+ {
+ .actions_list = ftm_quaddec_synapse_actions,
+ .num_actions = ARRAY_SIZE(ftm_quaddec_synapse_actions),
+ .signal = &ftm_quaddec_signals[1]
+ }
+};
+
+static const struct counter_count_ext ftm_quaddec_count_ext[] = {
+ COUNTER_COUNT_ENUM("prescaler", &ftm_quaddec_prescaler_enum),
+ COUNTER_COUNT_ENUM_AVAILABLE("prescaler", &ftm_quaddec_prescaler_enum),
+};
+
+static struct counter_count ftm_quaddec_counts = {
+ .id = 0,
+ .name = "Channel 1 Count",
+ .functions_list = ftm_quaddec_count_functions,
+ .num_functions = ARRAY_SIZE(ftm_quaddec_count_functions),
+ .synapses = ftm_quaddec_count_synapses,
+ .num_synapses = ARRAY_SIZE(ftm_quaddec_count_synapses),
+ .ext = ftm_quaddec_count_ext,
+ .num_ext = ARRAY_SIZE(ftm_quaddec_count_ext)
+};
+
+static int ftm_quaddec_probe(struct platform_device *pdev)
+{
+ struct ftm_quaddec *ftm;
+
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *io;
+ int ret;
+
+ ftm = devm_kzalloc(&pdev->dev, sizeof(*ftm), GFP_KERNEL);
+ if (!ftm)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ftm);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!io) {
+ dev_err(&pdev->dev, "Failed to get memory region\n");
+ return -ENODEV;
+ }
+
+ ftm->pdev = pdev;
+ ftm->big_endian = of_property_read_bool(node, "big-endian");
+ ftm->ftm_base = devm_ioremap(&pdev->dev, io->start, resource_size(io));
+
+ if (!ftm->ftm_base) {
+ dev_err(&pdev->dev, "Failed to map memory region\n");
+ return -EINVAL;
+ }
+ ftm->counter.name = dev_name(&pdev->dev);
+ ftm->counter.parent = &pdev->dev;
+ ftm->counter.ops = &ftm_quaddec_cnt_ops;
+ ftm->counter.counts = &ftm_quaddec_counts;
+ ftm->counter.num_counts = 1;
+ ftm->counter.signals = ftm_quaddec_signals;
+ ftm->counter.num_signals = ARRAY_SIZE(ftm_quaddec_signals);
+ ftm->counter.priv = ftm;
+
+ mutex_init(&ftm->ftm_quaddec_mutex);
+
+ ftm_quaddec_init(ftm);
+
+ ret = counter_register(&ftm->counter);
+ if (ret)
+ ftm_quaddec_disable(ftm);
+
+ return ret;
+}
+
+static int ftm_quaddec_remove(struct platform_device *pdev)
+{
+ struct ftm_quaddec *ftm = platform_get_drvdata(pdev);
+
+ counter_unregister(&ftm->counter);
+
+ ftm_quaddec_disable(ftm);
+
+ return 0;
+}
+
+static const struct of_device_id ftm_quaddec_match[] = {
+ { .compatible = "fsl,ftm-quaddec" },
+ {},
+};
+
+static struct platform_driver ftm_quaddec_driver = {
+ .driver = {
+ .name = "ftm-quaddec",
+ .of_match_table = ftm_quaddec_match,
+ },
+ .probe = ftm_quaddec_probe,
+ .remove = ftm_quaddec_remove,
+};
+
+module_platform_driver(ftm_quaddec_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kjeld Flarup <kfa@deif.com");
+MODULE_AUTHOR("Patrick Havelange <patrick.havelange@essensium.com");
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
new file mode 100644
index 000000000000..bbc930a5962c
--- /dev/null
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STM32 Low-Power Timer Encoder and Counter driver
+ *
+ * Copyright (C) STMicroelectronics 2017
+ *
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
+ *
+ * Inspired by 104-quad-8 and stm32-timer-trigger drivers.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/counter.h>
+#include <linux/iio/iio.h>
+#include <linux/mfd/stm32-lptimer.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
+struct stm32_lptim_cnt {
+ struct counter_device counter;
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *clk;
+ u32 ceiling;
+ u32 polarity;
+ u32 quadrature_mode;
+ bool enabled;
+};
+
+static int stm32_lptim_is_enabled(struct stm32_lptim_cnt *priv)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(STM32_LPTIM_ENABLE, val);
+}
+
+static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
+ int enable)
+{
+ int ret;
+ u32 val;
+
+ val = FIELD_PREP(STM32_LPTIM_ENABLE, enable);
+ ret = regmap_write(priv->regmap, STM32_LPTIM_CR, val);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ clk_disable(priv->clk);
+ priv->enabled = false;
+ return 0;
+ }
+
+ /* LP timer must be enabled before writing CMP & ARR */
+ ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
+ if (ret)
+ return ret;
+
+ /* ensure CMP & ARR registers are properly written */
+ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+ (val & STM32_LPTIM_CMPOK_ARROK),
+ 100, 1000);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
+ STM32_LPTIM_CMPOKCF_ARROKCF);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(priv->clk);
+ if (ret) {
+ regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+ return ret;
+ }
+ priv->enabled = true;
+
+ /* Start LP timer in continuous mode */
+ return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
+ STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
+}
+
+static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
+{
+ u32 mask = STM32_LPTIM_ENC | STM32_LPTIM_COUNTMODE |
+ STM32_LPTIM_CKPOL | STM32_LPTIM_PRESC;
+ u32 val;
+
+ /* Setup LP timer encoder/counter and polarity, without prescaler */
+ if (priv->quadrature_mode)
+ val = enable ? STM32_LPTIM_ENC : 0;
+ else
+ val = enable ? STM32_LPTIM_COUNTMODE : 0;
+ val |= FIELD_PREP(STM32_LPTIM_CKPOL, enable ? priv->polarity : 0);
+
+ return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val);
+}
+
+static int stm32_lptim_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_ENABLE:
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ /* Check nobody uses the timer, or already disabled/enabled */
+ ret = stm32_lptim_is_enabled(priv);
+ if ((ret < 0) || (!ret && !val))
+ return ret;
+ if (val && ret)
+ return -EBUSY;
+
+ ret = stm32_lptim_setup(priv, val);
+ if (ret)
+ return ret;
+ return stm32_lptim_set_enable_state(priv, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int stm32_lptim_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+ u32 dat;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &dat);
+ if (ret)
+ return ret;
+ *val = dat;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_ENABLE:
+ ret = stm32_lptim_is_enabled(priv);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ /* Non-quadrature mode: scale = 1 */
+ *val = 1;
+ *val2 = 0;
+ if (priv->quadrature_mode) {
+ /*
+ * Quadrature encoder mode:
+ * - both edges, quarter cycle, scale is 0.25
+ * - either rising/falling edge scale is 0.5
+ */
+ if (priv->polarity > 1)
+ *val2 = 2;
+ else
+ *val2 = 1;
+ }
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info stm32_lptim_cnt_iio_info = {
+ .read_raw = stm32_lptim_read_raw,
+ .write_raw = stm32_lptim_write_raw,
+};
+
+static const char *const stm32_lptim_quadrature_modes[] = {
+ "non-quadrature",
+ "quadrature",
+};
+
+static int stm32_lptim_get_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return priv->quadrature_mode;
+}
+
+static int stm32_lptim_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int type)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ priv->quadrature_mode = type;
+
+ return 0;
+}
+
+static const struct iio_enum stm32_lptim_quadrature_mode_en = {
+ .items = stm32_lptim_quadrature_modes,
+ .num_items = ARRAY_SIZE(stm32_lptim_quadrature_modes),
+ .get = stm32_lptim_get_quadrature_mode,
+ .set = stm32_lptim_set_quadrature_mode,
+};
+
+static const char * const stm32_lptim_cnt_polarity[] = {
+ "rising-edge", "falling-edge", "both-edges",
+};
+
+static int stm32_lptim_cnt_get_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return priv->polarity;
+}
+
+static int stm32_lptim_cnt_set_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int type)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ priv->polarity = type;
+
+ return 0;
+}
+
+static const struct iio_enum stm32_lptim_cnt_polarity_en = {
+ .items = stm32_lptim_cnt_polarity,
+ .num_items = ARRAY_SIZE(stm32_lptim_cnt_polarity),
+ .get = stm32_lptim_cnt_get_polarity,
+ .set = stm32_lptim_cnt_set_polarity,
+};
+
+static ssize_t stm32_lptim_cnt_get_ceiling(struct stm32_lptim_cnt *priv,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", priv->ceiling);
+}
+
+static ssize_t stm32_lptim_cnt_set_ceiling(struct stm32_lptim_cnt *priv,
+ const char *buf, size_t len)
+{
+ int ret;
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ ret = kstrtouint(buf, 0, &priv->ceiling);
+ if (ret)
+ return ret;
+
+ if (priv->ceiling > STM32_LPTIM_MAX_ARR)
+ return -EINVAL;
+
+ return len;
+}
+
+static ssize_t stm32_lptim_cnt_get_preset_iio(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return stm32_lptim_cnt_get_ceiling(priv, buf);
+}
+
+static ssize_t stm32_lptim_cnt_set_preset_iio(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return stm32_lptim_cnt_set_ceiling(priv, buf, len);
+}
+
+/* LP timer with encoder */
+static const struct iio_chan_spec_ext_info stm32_lptim_enc_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = stm32_lptim_cnt_get_preset_iio,
+ .write = stm32_lptim_cnt_set_preset_iio,
+ },
+ IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en),
+ IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en),
+ IIO_ENUM("quadrature_mode", IIO_SEPARATE,
+ &stm32_lptim_quadrature_mode_en),
+ IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_lptim_quadrature_mode_en),
+ {}
+};
+
+static const struct iio_chan_spec stm32_lptim_enc_channels = {
+ .type = IIO_COUNT,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = stm32_lptim_enc_ext_info,
+ .indexed = 1,
+};
+
+/* LP timer without encoder (counter only) */
+static const struct iio_chan_spec_ext_info stm32_lptim_cnt_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = stm32_lptim_cnt_get_preset_iio,
+ .write = stm32_lptim_cnt_set_preset_iio,
+ },
+ IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en),
+ IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en),
+ {}
+};
+
+static const struct iio_chan_spec stm32_lptim_cnt_channels = {
+ .type = IIO_COUNT,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = stm32_lptim_cnt_ext_info,
+ .indexed = 1,
+};
+
+/**
+ * stm32_lptim_cnt_function - enumerates stm32 LPTimer counter & encoder modes
+ * @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges
+ * @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature)
+ */
+enum stm32_lptim_cnt_function {
+ STM32_LPTIM_COUNTER_INCREASE,
+ STM32_LPTIM_ENCODER_BOTH_EDGE,
+};
+
+static enum counter_count_function stm32_lptim_cnt_functions[] = {
+ [STM32_LPTIM_COUNTER_INCREASE] = COUNTER_COUNT_FUNCTION_INCREASE,
+ [STM32_LPTIM_ENCODER_BOTH_EDGE] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+};
+
+enum stm32_lptim_synapse_action {
+ STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE,
+ STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE,
+ STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES,
+ STM32_LPTIM_SYNAPSE_ACTION_NONE,
+};
+
+static enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
+ /* Index must match with stm32_lptim_cnt_polarity[] (priv->polarity) */
+ [STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ [STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ [STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ [STM32_LPTIM_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+};
+
+static int stm32_lptim_cnt_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_read_value *val)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ u32 cnt;
+ int ret;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &cnt);
+ if (ret)
+ return ret;
+
+ counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+
+ return 0;
+}
+
+static int stm32_lptim_cnt_function_get(struct counter_device *counter,
+ struct counter_count *count,
+ size_t *function)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+
+ if (!priv->quadrature_mode) {
+ *function = STM32_LPTIM_COUNTER_INCREASE;
+ return 0;
+ }
+
+ if (priv->polarity == STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES) {
+ *function = STM32_LPTIM_ENCODER_BOTH_EDGE;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_lptim_cnt_function_set(struct counter_device *counter,
+ struct counter_count *count,
+ size_t function)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ switch (function) {
+ case STM32_LPTIM_COUNTER_INCREASE:
+ priv->quadrature_mode = 0;
+ return 0;
+ case STM32_LPTIM_ENCODER_BOTH_EDGE:
+ priv->quadrature_mode = 1;
+ priv->polarity = STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t stm32_lptim_cnt_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ int ret;
+
+ ret = stm32_lptim_is_enabled(priv);
+ if (ret < 0)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", ret);
+}
+
+static ssize_t stm32_lptim_cnt_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *private,
+ const char *buf, size_t len)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret)
+ return ret;
+
+ /* Check nobody uses the timer, or already disabled/enabled */
+ ret = stm32_lptim_is_enabled(priv);
+ if ((ret < 0) || (!ret && !enable))
+ return ret;
+ if (enable && ret)
+ return -EBUSY;
+
+ ret = stm32_lptim_setup(priv, enable);
+ if (ret)
+ return ret;
+
+ ret = stm32_lptim_set_enable_state(priv, enable);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t stm32_lptim_cnt_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+
+ return stm32_lptim_cnt_get_ceiling(priv, buf);
+}
+
+static ssize_t stm32_lptim_cnt_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *private,
+ const char *buf, size_t len)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+
+ return stm32_lptim_cnt_set_ceiling(priv, buf, len);
+}
+
+static const struct counter_count_ext stm32_lptim_cnt_ext[] = {
+ {
+ .name = "enable",
+ .read = stm32_lptim_cnt_enable_read,
+ .write = stm32_lptim_cnt_enable_write
+ },
+ {
+ .name = "ceiling",
+ .read = stm32_lptim_cnt_ceiling_read,
+ .write = stm32_lptim_cnt_ceiling_write
+ },
+};
+
+static int stm32_lptim_cnt_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ size_t *action)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ size_t function;
+ int err;
+
+ err = stm32_lptim_cnt_function_get(counter, count, &function);
+ if (err)
+ return err;
+
+ switch (function) {
+ case STM32_LPTIM_COUNTER_INCREASE:
+ /* LP Timer acts as up-counter on input 1 */
+ if (synapse->signal->id == count->synapses[0].signal->id)
+ *action = priv->polarity;
+ else
+ *action = STM32_LPTIM_SYNAPSE_ACTION_NONE;
+ return 0;
+ case STM32_LPTIM_ENCODER_BOTH_EDGE:
+ *action = priv->polarity;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_lptim_cnt_action_set(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ size_t action)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ size_t function;
+ int err;
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ err = stm32_lptim_cnt_function_get(counter, count, &function);
+ if (err)
+ return err;
+
+ /* only set polarity when in counter mode (on input 1) */
+ if (function == STM32_LPTIM_COUNTER_INCREASE
+ && synapse->signal->id == count->synapses[0].signal->id) {
+ switch (action) {
+ case STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE:
+ case STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE:
+ case STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES:
+ priv->polarity = action;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct counter_ops stm32_lptim_cnt_ops = {
+ .count_read = stm32_lptim_cnt_read,
+ .function_get = stm32_lptim_cnt_function_get,
+ .function_set = stm32_lptim_cnt_function_set,
+ .action_get = stm32_lptim_cnt_action_get,
+ .action_set = stm32_lptim_cnt_action_set,
+};
+
+static struct counter_signal stm32_lptim_cnt_signals[] = {
+ {
+ .id = 0,
+ .name = "Channel 1 Quadrature A"
+ },
+ {
+ .id = 1,
+ .name = "Channel 1 Quadrature B"
+ }
+};
+
+static struct counter_synapse stm32_lptim_cnt_synapses[] = {
+ {
+ .actions_list = stm32_lptim_cnt_synapse_actions,
+ .num_actions = ARRAY_SIZE(stm32_lptim_cnt_synapse_actions),
+ .signal = &stm32_lptim_cnt_signals[0]
+ },
+ {
+ .actions_list = stm32_lptim_cnt_synapse_actions,
+ .num_actions = ARRAY_SIZE(stm32_lptim_cnt_synapse_actions),
+ .signal = &stm32_lptim_cnt_signals[1]
+ }
+};
+
+/* LP timer with encoder */
+static struct counter_count stm32_lptim_enc_counts = {
+ .id = 0,
+ .name = "LPTimer Count",
+ .functions_list = stm32_lptim_cnt_functions,
+ .num_functions = ARRAY_SIZE(stm32_lptim_cnt_functions),
+ .synapses = stm32_lptim_cnt_synapses,
+ .num_synapses = ARRAY_SIZE(stm32_lptim_cnt_synapses),
+ .ext = stm32_lptim_cnt_ext,
+ .num_ext = ARRAY_SIZE(stm32_lptim_cnt_ext)
+};
+
+/* LP timer without encoder (counter only) */
+static struct counter_count stm32_lptim_in1_counts = {
+ .id = 0,
+ .name = "LPTimer Count",
+ .functions_list = stm32_lptim_cnt_functions,
+ .num_functions = 1,
+ .synapses = stm32_lptim_cnt_synapses,
+ .num_synapses = 1,
+ .ext = stm32_lptim_cnt_ext,
+ .num_ext = ARRAY_SIZE(stm32_lptim_cnt_ext)
+};
+
+static int stm32_lptim_cnt_probe(struct platform_device *pdev)
+{
+ struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct stm32_lptim_cnt *priv;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ if (IS_ERR_OR_NULL(ddata))
+ return -EINVAL;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->dev = &pdev->dev;
+ priv->regmap = ddata->regmap;
+ priv->clk = ddata->clk;
+ priv->ceiling = STM32_LPTIM_MAX_ARR;
+
+ /* Initialize IIO device */
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &stm32_lptim_cnt_iio_info;
+ if (ddata->has_encoder)
+ indio_dev->channels = &stm32_lptim_enc_channels;
+ else
+ indio_dev->channels = &stm32_lptim_cnt_channels;
+ indio_dev->num_channels = 1;
+
+ /* Initialize Counter device */
+ priv->counter.name = dev_name(&pdev->dev);
+ priv->counter.parent = &pdev->dev;
+ priv->counter.ops = &stm32_lptim_cnt_ops;
+ if (ddata->has_encoder) {
+ priv->counter.counts = &stm32_lptim_enc_counts;
+ priv->counter.num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals);
+ } else {
+ priv->counter.counts = &stm32_lptim_in1_counts;
+ priv->counter.num_signals = 1;
+ }
+ priv->counter.num_counts = 1;
+ priv->counter.signals = stm32_lptim_cnt_signals;
+ priv->counter.priv = priv;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_counter_register(&pdev->dev, &priv->counter);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_lptim_cnt_suspend(struct device *dev)
+{
+ struct stm32_lptim_cnt *priv = dev_get_drvdata(dev);
+ int ret;
+
+ /* Only take care of enabled counter: don't disturb other MFD child */
+ if (priv->enabled) {
+ ret = stm32_lptim_setup(priv, 0);
+ if (ret)
+ return ret;
+
+ ret = stm32_lptim_set_enable_state(priv, 0);
+ if (ret)
+ return ret;
+
+ /* Force enable state for later resume */
+ priv->enabled = true;
+ }
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int stm32_lptim_cnt_resume(struct device *dev)
+{
+ struct stm32_lptim_cnt *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ if (priv->enabled) {
+ priv->enabled = false;
+ ret = stm32_lptim_setup(priv, 1);
+ if (ret)
+ return ret;
+
+ ret = stm32_lptim_set_enable_state(priv, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stm32_lptim_cnt_pm_ops, stm32_lptim_cnt_suspend,
+ stm32_lptim_cnt_resume);
+
+static const struct of_device_id stm32_lptim_cnt_of_match[] = {
+ { .compatible = "st,stm32-lptimer-counter", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_lptim_cnt_of_match);
+
+static struct platform_driver stm32_lptim_cnt_driver = {
+ .probe = stm32_lptim_cnt_probe,
+ .driver = {
+ .name = "stm32-lptimer-counter",
+ .of_match_table = stm32_lptim_cnt_of_match,
+ .pm = &stm32_lptim_cnt_pm_ops,
+ },
+};
+module_platform_driver(stm32_lptim_cnt_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_ALIAS("platform:stm32-lptimer-counter");
+MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
new file mode 100644
index 000000000000..644ba18a72ad
--- /dev/null
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STM32 Timer Encoder and Counter driver
+ *
+ * Copyright (C) STMicroelectronics 2018
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
+ *
+ */
+#include <linux/counter.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/mfd/stm32-timers.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define TIM_CCMR_CCXS (BIT(8) | BIT(0))
+#define TIM_CCMR_MASK (TIM_CCMR_CC1S | TIM_CCMR_CC2S | \
+ TIM_CCMR_IC1F | TIM_CCMR_IC2F)
+#define TIM_CCER_MASK (TIM_CCER_CC1P | TIM_CCER_CC1NP | \
+ TIM_CCER_CC2P | TIM_CCER_CC2NP)
+
+struct stm32_timer_cnt {
+ struct counter_device counter;
+ struct regmap *regmap;
+ struct clk *clk;
+ u32 ceiling;
+};
+
+/**
+ * stm32_count_function - enumerates stm32 timer counter encoder modes
+ * @STM32_COUNT_SLAVE_MODE_DISABLED: counts on internal clock when CEN=1
+ * @STM32_COUNT_ENCODER_MODE_1: counts TI1FP1 edges, depending on TI2FP2 level
+ * @STM32_COUNT_ENCODER_MODE_2: counts TI2FP2 edges, depending on TI1FP1 level
+ * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
+ */
+enum stm32_count_function {
+ STM32_COUNT_SLAVE_MODE_DISABLED = -1,
+ STM32_COUNT_ENCODER_MODE_1,
+ STM32_COUNT_ENCODER_MODE_2,
+ STM32_COUNT_ENCODER_MODE_3,
+};
+
+static enum counter_count_function stm32_count_functions[] = {
+ [STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
+ [STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
+ [STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+};
+
+static int stm32_count_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_read_value *val)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 cnt;
+
+ regmap_read(priv->regmap, TIM_CNT, &cnt);
+ counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+
+ return 0;
+}
+
+static int stm32_count_write(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_write_value *val)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 cnt;
+ int err;
+
+ err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
+ if (err)
+ return err;
+
+ if (cnt > priv->ceiling)
+ return -EINVAL;
+
+ return regmap_write(priv->regmap, TIM_CNT, cnt);
+}
+
+static int stm32_count_function_get(struct counter_device *counter,
+ struct counter_count *count,
+ size_t *function)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 smcr;
+
+ regmap_read(priv->regmap, TIM_SMCR, &smcr);
+
+ switch (smcr & TIM_SMCR_SMS) {
+ case 1:
+ *function = STM32_COUNT_ENCODER_MODE_1;
+ return 0;
+ case 2:
+ *function = STM32_COUNT_ENCODER_MODE_2;
+ return 0;
+ case 3:
+ *function = STM32_COUNT_ENCODER_MODE_3;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_count_function_set(struct counter_device *counter,
+ struct counter_count *count,
+ size_t function)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 cr1, sms;
+
+ switch (function) {
+ case STM32_COUNT_ENCODER_MODE_1:
+ sms = 1;
+ break;
+ case STM32_COUNT_ENCODER_MODE_2:
+ sms = 2;
+ break;
+ case STM32_COUNT_ENCODER_MODE_3:
+ sms = 3;
+ break;
+ default:
+ sms = 0;
+ break;
+ }
+
+ /* Store enable status */
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+
+ /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
+ regmap_write(priv->regmap, TIM_ARR, priv->ceiling);
+
+ regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
+
+ /* Make sure that registers are updated */
+ regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
+
+ /* Restore the enable status */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, cr1);
+
+ return 0;
+}
+
+static ssize_t stm32_count_direction_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ const char *direction;
+ u32 cr1;
+
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+ direction = (cr1 & TIM_CR1_DIR) ? "backward" : "forward";
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", direction);
+}
+
+static ssize_t stm32_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 arr;
+
+ regmap_read(priv->regmap, TIM_ARR, &arr);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", arr);
+}
+
+static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *private,
+ const char *buf, size_t len)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ unsigned int ceiling;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &ceiling);
+ if (ret)
+ return ret;
+
+ /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
+ regmap_write(priv->regmap, TIM_ARR, ceiling);
+
+ priv->ceiling = ceiling;
+ return len;
+}
+
+static ssize_t stm32_count_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 cr1;
+
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)(cr1 & TIM_CR1_CEN));
+}
+
+static ssize_t stm32_count_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *private,
+ const char *buf, size_t len)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ int err;
+ u32 cr1;
+ bool enable;
+
+ err = kstrtobool(buf, &enable);
+ if (err)
+ return err;
+
+ if (enable) {
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+ if (!(cr1 & TIM_CR1_CEN))
+ clk_enable(priv->clk);
+
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
+ TIM_CR1_CEN);
+ } else {
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+ if (cr1 & TIM_CR1_CEN)
+ clk_disable(priv->clk);
+ }
+
+ return len;
+}
+
+static const struct counter_count_ext stm32_count_ext[] = {
+ {
+ .name = "direction",
+ .read = stm32_count_direction_read,
+ },
+ {
+ .name = "enable",
+ .read = stm32_count_enable_read,
+ .write = stm32_count_enable_write
+ },
+ {
+ .name = "ceiling",
+ .read = stm32_count_ceiling_read,
+ .write = stm32_count_ceiling_write
+ },
+};
+
+enum stm32_synapse_action {
+ STM32_SYNAPSE_ACTION_NONE,
+ STM32_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+static enum counter_synapse_action stm32_synapse_actions[] = {
+ [STM32_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+ [STM32_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+static int stm32_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ size_t *action)
+{
+ size_t function;
+ int err;
+
+ /* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */
+ *action = STM32_SYNAPSE_ACTION_NONE;
+
+ err = stm32_count_function_get(counter, count, &function);
+ if (err)
+ return 0;
+
+ switch (function) {
+ case STM32_COUNT_ENCODER_MODE_1:
+ /* counts up/down on TI1FP1 edge depending on TI2FP2 level */
+ if (synapse->signal->id == count->synapses[0].signal->id)
+ *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case STM32_COUNT_ENCODER_MODE_2:
+ /* counts up/down on TI2FP2 edge depending on TI1FP1 level */
+ if (synapse->signal->id == count->synapses[1].signal->id)
+ *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case STM32_COUNT_ENCODER_MODE_3:
+ /* counts up/down on both TI1FP1 and TI2FP2 edges */
+ *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct counter_ops stm32_timer_cnt_ops = {
+ .count_read = stm32_count_read,
+ .count_write = stm32_count_write,
+ .function_get = stm32_count_function_get,
+ .function_set = stm32_count_function_set,
+ .action_get = stm32_action_get,
+};
+
+static struct counter_signal stm32_signals[] = {
+ {
+ .id = 0,
+ .name = "Channel 1 Quadrature A"
+ },
+ {
+ .id = 1,
+ .name = "Channel 1 Quadrature B"
+ }
+};
+
+static struct counter_synapse stm32_count_synapses[] = {
+ {
+ .actions_list = stm32_synapse_actions,
+ .num_actions = ARRAY_SIZE(stm32_synapse_actions),
+ .signal = &stm32_signals[0]
+ },
+ {
+ .actions_list = stm32_synapse_actions,
+ .num_actions = ARRAY_SIZE(stm32_synapse_actions),
+ .signal = &stm32_signals[1]
+ }
+};
+
+static struct counter_count stm32_counts = {
+ .id = 0,
+ .name = "Channel 1 Count",
+ .functions_list = stm32_count_functions,
+ .num_functions = ARRAY_SIZE(stm32_count_functions),
+ .synapses = stm32_count_synapses,
+ .num_synapses = ARRAY_SIZE(stm32_count_synapses),
+ .ext = stm32_count_ext,
+ .num_ext = ARRAY_SIZE(stm32_count_ext)
+};
+
+static int stm32_timer_cnt_probe(struct platform_device *pdev)
+{
+ struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct stm32_timer_cnt *priv;
+
+ if (IS_ERR_OR_NULL(ddata))
+ return -EINVAL;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = ddata->regmap;
+ priv->clk = ddata->clk;
+ priv->ceiling = ddata->max_arr;
+
+ priv->counter.name = dev_name(dev);
+ priv->counter.parent = dev;
+ priv->counter.ops = &stm32_timer_cnt_ops;
+ priv->counter.counts = &stm32_counts;
+ priv->counter.num_counts = 1;
+ priv->counter.signals = stm32_signals;
+ priv->counter.num_signals = ARRAY_SIZE(stm32_signals);
+ priv->counter.priv = priv;
+
+ /* Register Counter device */
+ return devm_counter_register(dev, &priv->counter);
+}
+
+static const struct of_device_id stm32_timer_cnt_of_match[] = {
+ { .compatible = "st,stm32-timer-counter", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_timer_cnt_of_match);
+
+static struct platform_driver stm32_timer_cnt_driver = {
+ .probe = stm32_timer_cnt_probe,
+ .driver = {
+ .name = "stm32-timer-counter",
+ .of_match_table = stm32_timer_cnt_of_match,
+ },
+};
+module_platform_driver(stm32_timer_cnt_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_ALIAS("platform:stm32-timer-counter");
+MODULE_DESCRIPTION("STMicroelectronics STM32 TIMER counter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index b22e6bba71f1..4d2b33a30292 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -26,10 +26,6 @@ config CPU_FREQ_GOV_COMMON
select IRQ_WORK
bool
-config CPU_FREQ_BOOST_SW
- bool
- depends on THERMAL
-
config CPU_FREQ_STAT
bool "CPU frequency transition statistics"
help
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index c72258a44ba4..73bb2aafb1a8 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -366,7 +366,7 @@ static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *dat
val = drv_read(data, mask);
- pr_debug("get_cur_val = %u\n", val);
+ pr_debug("%s = %u\n", __func__, val);
return val;
}
@@ -378,7 +378,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
unsigned int freq;
unsigned int cached_freq;
- pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
+ pr_debug("%s (%d)\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
if (unlikely(!policy))
@@ -458,8 +458,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
if (acpi_pstate_strict) {
if (!check_freqs(policy, mask,
policy->freq_table[index].frequency)) {
- pr_debug("acpi_cpufreq_target failed (%d)\n",
- policy->cpu);
+ pr_debug("%s (%d)\n", __func__, policy->cpu);
result = -EAGAIN;
}
}
@@ -573,7 +572,7 @@ static int cpufreq_boost_down_prep(unsigned int cpu)
static int __init acpi_cpufreq_early_init(void)
{
unsigned int i;
- pr_debug("acpi_cpufreq_early_init\n");
+ pr_debug("%s\n", __func__);
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
if (!acpi_perf_data) {
@@ -657,7 +656,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
static int blacklisted;
#endif
- pr_debug("acpi_cpufreq_cpu_init\n");
+ pr_debug("%s\n", __func__);
#ifdef CONFIG_SMP
if (blacklisted)
@@ -856,7 +855,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
- pr_debug("acpi_cpufreq_cpu_exit\n");
+ pr_debug("%s\n", __func__);
policy->fast_switch_possible = false;
policy->driver_data = NULL;
@@ -881,7 +880,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
- pr_debug("acpi_cpufreq_resume\n");
+ pr_debug("%s\n", __func__);
data->resume = 1;
@@ -954,7 +953,7 @@ static int __init acpi_cpufreq_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
- pr_debug("acpi_cpufreq_init\n");
+ pr_debug("%s\n", __func__);
ret = acpi_cpufreq_early_init();
if (ret)
@@ -991,7 +990,7 @@ static int __init acpi_cpufreq_init(void)
static void __exit acpi_cpufreq_exit(void)
{
- pr_debug("acpi_cpufreq_exit\n");
+ pr_debug("%s\n", __func__);
acpi_cpufreq_boost_exit();
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 4ac7c3cf34be..6927a8c0e748 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void)
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
if (!pcidev) {
- if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
return -ENODEV;
}
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 75491fc841a6..0df16eb1eb3c 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -359,11 +359,11 @@ static int __init armada37xx_cpufreq_driver_init(void)
struct armada_37xx_dvfs *dvfs;
struct platform_device *pdev;
unsigned long freq;
- unsigned int cur_frequency;
+ unsigned int cur_frequency, base_frequency;
struct regmap *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
- struct clk *clk;
+ struct clk *clk, *parent;
nb_pm_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
@@ -399,6 +399,22 @@ static int __init armada37xx_cpufreq_driver_init(void)
return PTR_ERR(clk);
}
+ parent = clk_get_parent(clk);
+ if (IS_ERR(parent)) {
+ dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
+ clk_put(clk);
+ return PTR_ERR(parent);
+ }
+
+ /* Get parent CPU frequency */
+ base_frequency = clk_get_rate(parent);
+
+ if (!base_frequency) {
+ dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
+ clk_put(clk);
+ return -EINVAL;
+ }
+
/* Get nominal (current) CPU frequency */
cur_frequency = clk_get_rate(clk);
if (!cur_frequency) {
@@ -431,7 +447,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
- freq = cur_frequency / dvfs->divider[load_lvl];
+ freq = base_frequency / dvfs->divider[load_lvl];
ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
if (ret)
goto remove_opp;
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index b3f4bd647e9b..988ebc326bdb 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -132,6 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
of_node_put(node);
return -ENODEV;
}
+ of_node_put(node);
nb_cpus = num_possible_cpus();
freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e10922709d13..85ff958e01f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -34,11 +34,6 @@
static LIST_HEAD(cpufreq_policy_list);
-static inline bool policy_is_inactive(struct cpufreq_policy *policy)
-{
- return cpumask_empty(policy->cpus);
-}
-
/* Macros to iterate over CPU policies */
#define for_each_suitable_policy(__policy, __active) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
@@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
+/**
+ * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
+ * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
+ */
+void cpufreq_cpu_release(struct cpufreq_policy *policy)
+{
+ if (WARN_ON(!policy))
+ return;
+
+ lockdep_assert_held(&policy->rwsem);
+
+ up_write(&policy->rwsem);
+
+ cpufreq_cpu_put(policy);
+}
+
+/**
+ * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
+ * @cpu: CPU to find the policy for.
+ *
+ * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
+ * if the policy returned by it is not NULL, acquire its rwsem for writing.
+ * Return the policy if it is active or release it and return NULL otherwise.
+ *
+ * The policy returned by this function has to be released with the help of
+ * cpufreq_cpu_release() in order to release its rwsem and balance its usage
+ * counter properly.
+ */
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ return NULL;
+
+ down_write(&policy->rwsem);
+
+ if (policy_is_inactive(policy)) {
+ cpufreq_cpu_release(policy);
+ return NULL;
+ }
+
+ return policy;
+}
+
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -300,11 +340,14 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs,
unsigned int state)
{
+ int cpu;
+
BUG_ON(irqs_disabled());
if (cpufreq_disabled())
return;
+ freqs->policy = policy;
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
@@ -324,10 +367,8 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
}
}
- for_each_cpu(freqs->cpu, policy->cpus) {
- srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_PRECHANGE, freqs);
- }
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_PRECHANGE, freqs);
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
@@ -337,11 +378,11 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
cpumask_pr_args(policy->cpus));
- for_each_cpu(freqs->cpu, policy->cpus) {
- trace_cpu_frequency(freqs->new, freqs->cpu);
- srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_POSTCHANGE, freqs);
- }
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_frequency(freqs->new, cpu);
+
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_POSTCHANGE, freqs);
cpufreq_stats_record_transition(policy, freqs->new);
policy->cur = freqs->new;
@@ -426,7 +467,7 @@ static void cpufreq_list_transition_notifiers(void)
mutex_lock(&cpufreq_transition_notifier_list.mutex);
for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
- pr_info("%pF\n", nb->notifier_call);
+ pr_info("%pS\n", nb->notifier_call);
mutex_unlock(&cpufreq_transition_notifier_list.mutex);
}
@@ -578,50 +619,52 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
return NULL;
}
+static int cpufreq_parse_policy(char *str_governor,
+ struct cpufreq_policy *policy)
+{
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ return 0;
+ }
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ return 0;
+ }
+ return -EINVAL;
+}
+
/**
- * cpufreq_parse_governor - parse a governor string
+ * cpufreq_parse_governor - parse a governor string only for !setpolicy
*/
static int cpufreq_parse_governor(char *str_governor,
struct cpufreq_policy *policy)
{
- if (cpufreq_driver->setpolicy) {
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- return 0;
- }
-
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
- return 0;
- }
- } else {
- struct cpufreq_governor *t;
+ struct cpufreq_governor *t;
- mutex_lock(&cpufreq_governor_mutex);
+ mutex_lock(&cpufreq_governor_mutex);
- t = find_governor(str_governor);
- if (!t) {
- int ret;
+ t = find_governor(str_governor);
+ if (!t) {
+ int ret;
- mutex_unlock(&cpufreq_governor_mutex);
+ mutex_unlock(&cpufreq_governor_mutex);
- ret = request_module("cpufreq_%s", str_governor);
- if (ret)
- return -EINVAL;
+ ret = request_module("cpufreq_%s", str_governor);
+ if (ret)
+ return -EINVAL;
- mutex_lock(&cpufreq_governor_mutex);
+ mutex_lock(&cpufreq_governor_mutex);
- t = find_governor(str_governor);
- }
- if (t && !try_module_get(t->owner))
- t = NULL;
+ t = find_governor(str_governor);
+ }
+ if (t && !try_module_get(t->owner))
+ t = NULL;
- mutex_unlock(&cpufreq_governor_mutex);
+ mutex_unlock(&cpufreq_governor_mutex);
- if (t) {
- policy->governor = t;
- return 0;
- }
+ if (t) {
+ policy->governor = t;
+ return 0;
}
return -EINVAL;
@@ -669,9 +712,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
return ret;
}
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy);
-
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
@@ -746,8 +786,13 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
if (ret != 1)
return -EINVAL;
- if (cpufreq_parse_governor(str_governor, &new_policy))
- return -EINVAL;
+ if (cpufreq_driver->setpolicy) {
+ if (cpufreq_parse_policy(str_governor, &new_policy))
+ return -EINVAL;
+ } else {
+ if (cpufreq_parse_governor(str_governor, &new_policy))
+ return -EINVAL;
+ }
ret = cpufreq_set_policy(policy, &new_policy);
@@ -857,11 +902,9 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
{
unsigned int limit;
int ret;
- if (cpufreq_driver->bios_limit) {
- ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
- if (!ret)
- return sprintf(buf, "%u\n", limit);
- }
+ ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
+ if (!ret)
+ return sprintf(buf, "%u\n", limit);
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
@@ -1015,32 +1058,39 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void)
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
- struct cpufreq_governor *gov = NULL;
+ struct cpufreq_governor *gov = NULL, *def_gov = NULL;
struct cpufreq_policy new_policy;
memcpy(&new_policy, policy, sizeof(*policy));
- /* Update governor of new_policy to the governor used before hotplug */
- gov = find_governor(policy->last_governor);
- if (gov) {
- pr_debug("Restoring governor %s for cpu %d\n",
+ def_gov = cpufreq_default_governor();
+
+ if (has_target()) {
+ /*
+ * Update governor of new_policy to the governor used before
+ * hotplug
+ */
+ gov = find_governor(policy->last_governor);
+ if (gov) {
+ pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu);
+ } else {
+ if (!def_gov)
+ return -ENODATA;
+ gov = def_gov;
+ }
+ new_policy.governor = gov;
} else {
- gov = cpufreq_default_governor();
- if (!gov)
- return -ENODATA;
- }
-
- new_policy.governor = gov;
-
- /* Use the default policy if there is no last_policy. */
- if (cpufreq_driver->setpolicy) {
- if (policy->last_policy)
+ /* Use the default policy if there is no last_policy. */
+ if (policy->last_policy) {
new_policy.policy = policy->last_policy;
- else
- cpufreq_parse_governor(gov->name, &new_policy);
+ } else {
+ if (!def_gov)
+ return -ENODATA;
+ cpufreq_parse_policy(def_gov->name, &new_policy);
+ }
}
- /* set default policy */
+
return cpufreq_set_policy(policy, &new_policy);
}
@@ -1098,6 +1148,12 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
cpufreq_global_kobject, "policy%u", cpu);
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+ /*
+ * The entire policy object will be freed below, but the extra
+ * memory allocated for the kobject name needs to be freed by
+ * releasing the kobject.
+ */
+ kobject_put(&policy->kobj);
goto err_free_real_cpus;
}
@@ -1550,7 +1606,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
unsigned int ret_freq = 0;
- if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
+ if (unlikely(policy_is_inactive(policy)))
return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu);
@@ -1588,7 +1644,8 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) {
down_read(&policy->rwsem);
- ret_freq = __cpufreq_get(policy);
+ if (cpufreq_driver->get)
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
@@ -2229,8 +2286,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
*
* The cpuinfo part of @policy is not updated by this function.
*/
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy)
+int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
int ret;
@@ -2337,17 +2394,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpufreq_policy new_policy;
if (!policy)
return;
- down_write(&policy->rwsem);
-
- if (policy_is_inactive(policy))
- goto unlock;
-
/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
@@ -2364,12 +2416,26 @@ void cpufreq_update_policy(unsigned int cpu)
cpufreq_set_policy(policy, &new_policy);
unlock:
- up_write(&policy->rwsem);
-
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_release(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
+/**
+ * cpufreq_update_limits - Update policy limits for a given CPU.
+ * @cpu: CPU to update the policy limits for.
+ *
+ * Invoke the driver's ->update_limits callback if present or call
+ * cpufreq_update_policy() for @cpu.
+ */
+void cpufreq_update_limits(unsigned int cpu)
+{
+ if (cpufreq_driver->update_limits)
+ cpufreq_driver->update_limits(cpu);
+ else
+ cpufreq_update_policy(cpu);
+}
+EXPORT_SYMBOL_GPL(cpufreq_update_limits);
+
/*********************************************************************
* BOOST *
*********************************************************************/
@@ -2426,7 +2492,7 @@ int cpufreq_boost_trigger_state(int state)
static bool cpufreq_boost_supported(void)
{
- return likely(cpufreq_driver) && cpufreq_driver->set_boost;
+ return cpufreq_driver->set_boost;
}
static int create_boost_sysfs_file(void)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ffa9adeaba31..9d1d9bf02710 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
/* Failure, so roll back. */
pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
+ kobject_put(&dbs_data->attr_set.kobj);
+
policy->governor_data = NULL;
if (!have_governor_per_policy())
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index e2db5581489a..08b192eb22c6 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-static DEFINE_SPINLOCK(cpufreq_stats_lock);
struct cpufreq_stats {
unsigned int total_trans;
@@ -23,6 +22,7 @@ struct cpufreq_stats {
unsigned int state_num;
unsigned int last_index;
u64 *time_in_state;
+ spinlock_t lock;
unsigned int *freq_table;
unsigned int *trans_table;
};
@@ -39,12 +39,12 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
{
unsigned int count = stats->max_state;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock(&stats->lock);
memset(stats->time_in_state, 0, count * sizeof(u64));
memset(stats->trans_table, 0, count * count * sizeof(int));
stats->last_time = get_jiffies_64();
stats->total_trans = 0;
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock(&stats->lock);
}
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
@@ -62,9 +62,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
if (policy->fast_switch_enabled)
return 0;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock(&stats->lock);
cpufreq_stats_update(stats);
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock(&stats->lock);
for (i = 0; i < stats->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -211,6 +211,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
stats->state_num = i;
stats->last_time = get_jiffies_64();
stats->last_index = freq_table_get_index(stats, policy->cur);
+ spin_lock_init(&stats->lock);
policy->stats = stats;
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
@@ -242,11 +243,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
if (old_index == -1 || new_index == -1 || old_index == new_index)
return;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock(&stats->lock);
cpufreq_stats_update(stats);
stats->last_index = new_index;
stats->trans_table[old_index * stats->max_state + new_index]++;
stats->total_trans++;
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock(&stats->lock);
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 3a8cc99e6815..e7be0af3199f 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -290,9 +290,6 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
struct freq_attr *cpufreq_generic_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
-#ifdef CONFIG_CPU_FREQ_BOOST_SW
- &cpufreq_freq_attr_scaling_boost_freqs,
-#endif
NULL,
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index a4ff09f91c8f..3e17560b1efe 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -388,11 +388,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
ret = imx6ul_opp_check_speed_grading(cpu_dev);
if (ret) {
if (ret == -EPROBE_DEFER)
- return ret;
+ goto put_node;
dev_err(cpu_dev, "failed to read ocotp: %d\n",
ret);
- return ret;
+ goto put_node;
}
} else {
imx6q_opp_check_speed_grading(cpu_dev);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b599c7318aab..34b54df41aaa 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
+ * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -187,6 +188,7 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
+ bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
@@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
u64 epb;
int ret;
- if (!static_cpu_has(X86_FEATURE_EPB))
+ if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
s16 epp;
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
* When hwp_req_data is 0, means that caller didn't read
* MSR_HWP_REQUEST, so need to read and get EPP.
@@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
u64 epb;
int ret;
- if (!static_cpu_has(X86_FEATURE_EPB))
+ if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
if (epp < 0)
return epp;
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if (epp == HWP_EPP_PERFORMANCE)
return 1;
if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
@@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
return 3;
else
return 4;
- } else if (static_cpu_has(X86_FEATURE_EPB)) {
+ } else if (boot_cpu_has(X86_FEATURE_EPB)) {
/*
* Range:
* 0x00-0x03 : Performance
@@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
mutex_lock(&intel_pstate_limits_lock);
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
u64 value;
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
@@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
epp = cpu_data->epp_powersave;
}
update_epp:
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
} else {
@@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
value |= HWP_MIN_PERF(min_perf);
/* Set EPP/EPB to min */
- if (static_cpu_has(X86_FEATURE_HWP_EPP))
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
else
intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
@@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
+static void intel_pstate_update_max_freq(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+ struct cpufreq_policy new_policy;
+ struct cpudata *cpudata;
+
+ if (!policy)
+ return;
+
+ cpudata = all_cpu_data[cpu];
+ policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
+ memcpy(&new_policy, policy, sizeof(*policy));
+ new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
+ new_policy.min = min(policy->user_policy.min, new_policy.max);
+
+ cpufreq_set_policy(policy, &new_policy);
+
+ cpufreq_cpu_release(policy);
+}
+
+static void intel_pstate_update_limits(unsigned int cpu)
+{
+ mutex_lock(&intel_pstate_driver_lock);
+
+ update_turbo_state();
+ /*
+ * If turbo has been turned on or off globally, policy limits for
+ * all CPUs need to be updated to reflect that.
+ */
+ if (global.turbo_disabled_mf != global.turbo_disabled) {
+ global.turbo_disabled_mf = global.turbo_disabled;
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(cpu);
+ } else {
+ cpufreq_update_policy(cpu);
+ }
+
+ mutex_unlock(&intel_pstate_driver_lock);
+}
+
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
@@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
- if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
@@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
update_turbo_state();
+ global.turbo_disabled_mf = global.turbo_disabled;
policy->cpuinfo.max_freq = global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
@@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = {
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
+ .update_limits = intel_pstate_update_limits,
.name = "intel_pstate",
};
@@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = {
.init = intel_cpufreq_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_cpufreq_stop_cpu,
+ .update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq",
};
@@ -2596,6 +2643,9 @@ static int __init intel_pstate_init(void)
const struct x86_cpu_id *id;
int rc;
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
if (no_load)
return -ENODEV;
@@ -2611,7 +2661,7 @@ static int __init intel_pstate_init(void)
} else {
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) {
- pr_info("CPU ID not supported\n");
+ pr_info("CPU model not supported\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index c2dd43f3f5d8..8d63a6dc8383 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
if (IS_ERR(priv.cpu_clk)) {
dev_err(priv.dev, "Unable to get cpuclk\n");
- return PTR_ERR(priv.cpu_clk);
+ err = PTR_ERR(priv.cpu_clk);
+ goto out_node;
}
err = clk_prepare_enable(priv.cpu_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare cpuclk\n");
- return err;
+ goto out_node;
}
kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
goto out_ddr;
}
- of_node_put(np);
- np = NULL;
-
err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
- if (!err)
- return 0;
+ if (err) {
+ dev_err(priv.dev, "Failed to register cpufreq driver\n");
+ goto out_powersave;
+ }
- dev_err(priv.dev, "Failed to register cpufreq driver\n");
+ of_node_put(np);
+ return 0;
+out_powersave:
clk_disable_unprepare(priv.powersave_clk);
out_ddr:
clk_disable_unprepare(priv.ddr_clk);
out_cpu:
clk_disable_unprepare(priv.cpu_clk);
+out_node:
of_node_put(np);
return err;
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index d9df89392b84..a94355723ef8 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -210,7 +210,7 @@ static int __init maple_cpufreq_init(void)
*/
valp = of_get_property(cpunode, "clock-frequency", NULL);
if (!valp)
- return -ENODEV;
+ goto bail_noprops;
max_freq = (*valp)/1000;
maple_cpu_freqs[0].frequency = max_freq;
maple_cpu_freqs[1].frequency = max_freq/2;
@@ -231,10 +231,6 @@ static int __init maple_cpufreq_init(void)
rc = cpufreq_register_driver(&maple_cpufreq_driver);
- of_node_put(cpunode);
-
- return rc;
-
bail_noprops:
of_node_put(cpunode);
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 75dfbd2a58ea..c7710c149de8 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu = of_get_cpu_node(policy->cpu, NULL);
+ of_node_put(cpu);
if (!cpu)
goto out;
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 52f0d91d30c1..9b4ce2eb8222 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
if (!voltage_gpio){
pr_err("missing cpu-vcore-select gpio\n");
return 1;
@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
pvr = mfspr(SPRN_PVR);
has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index fb77b39a4ce3..3c12e03fa343 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1178,7 +1178,7 @@ static int powernowk8_init(void)
unsigned int i, supported_cpus = 0;
int ret;
- if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
+ if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
__request_acpi_cpufreq();
return -ENODEV;
}
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 41a0f0be3f9f..8414c3a4ea08 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
!cbe_get_cpu_mic_tm_regs(policy->cpu)) {
pr_info("invalid CBE regs pointers for cpufreq\n");
+ of_node_put(cpu);
return -EINVAL;
}
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 4295e5476264..71b640c8c1a5 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -280,10 +280,12 @@ static const struct of_device_id node_matches[] __initconst = {
{ .compatible = "fsl,ls1012a-clockgen", },
{ .compatible = "fsl,ls1021a-clockgen", },
+ { .compatible = "fsl,ls1028a-clockgen", },
{ .compatible = "fsl,ls1043a-clockgen", },
{ .compatible = "fsl,ls1046a-clockgen", },
{ .compatible = "fsl,ls1088a-clockgen", },
{ .compatible = "fsl,ls2080a-clockgen", },
+ { .compatible = "fsl,lx2160a-clockgen", },
{ .compatible = "fsl,p4080-clockgen", },
{ .compatible = "fsl,qoriq-clockgen-1.0", },
{ .compatible = "fsl,qoriq-clockgen-2.0", },
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index a1fb735685db..e086b2dd4072 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -412,7 +412,7 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
}
/**
- * centrino_setpolicy - set a new CPUFreq policy
+ * centrino_target - set a new CPUFreq policy
* @policy: new policy
* @index: index of target frequency
*
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index 0171a6e190d7..f7199a35cbb6 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -84,7 +84,7 @@ static struct cpuidle_driver exynos_idle_driver = {
[1] = {
.enter = exynos_enter_lowpower,
.exit_latency = 300,
- .target_residency = 100000,
+ .target_residency = 10000,
.name = "C1",
.desc = "ARM power down",
},
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7f108309e871..0f4b7c45df3e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -328,9 +328,23 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int index)
{
+ int ret = 0;
+
+ /*
+ * Store the next hrtimer, which becomes either next tick or the next
+ * timer event, whatever expires first. Additionally, to make this data
+ * useful for consumers outside cpuidle, we rely on that the governor's
+ * ->select() callback have decided, whether to stop the tick or not.
+ */
+ WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
+
if (cpuidle_state_is_coupled(drv, index))
- return cpuidle_enter_state_coupled(dev, drv, index);
- return cpuidle_enter_state(dev, drv, index);
+ ret = cpuidle_enter_state_coupled(dev, drv, index);
+ else
+ ret = cpuidle_enter_state(dev, drv, index);
+
+ WRITE_ONCE(dev->next_hrtimer, 0);
+ return ret;
}
/**
@@ -511,6 +525,7 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
dev->last_residency = 0;
+ dev->next_hrtimer = 0;
}
/**
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0be55fcc19ba..177b7713bd2d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -404,15 +404,6 @@ config CRYPTO_DEV_SAHARA
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
-config CRYPTO_DEV_MXC_SCC
- tristate "Support for Freescale Security Controller (SCC)"
- depends on ARCH_MXC && OF
- select CRYPTO_BLKCIPHER
- select CRYPTO_DES
- help
- This option enables support for the Security Controller (SCC)
- found in Freescale i.MX25 chips.
-
config CRYPTO_DEV_EXYNOS_RNG
tristate "EXYNOS HW pseudo random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 8e7e225d2446..a23a7197fcd7 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4092c2aad8e2..307f5cfa9ba4 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
/* Setup SA */
sa = ctx->sa_in;
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
- SA_SAVE_IV : SA_NOT_SAVE_IV),
- SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
+ SA_NOT_SAVE_IV : SA_SAVE_IV),
+ SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
+ SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
@@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ /*
+ * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
+ * it's the DIR_(IN|OUT)BOUND that matters
+ */
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
return 0;
}
@@ -258,10 +264,10 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
* overlow.
*/
if (counter + nblks < counter) {
- struct skcipher_request *subreq = skcipher_request_ctx(req);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher.cipher);
int ret;
- skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher.cipher);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -283,14 +289,14 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
{
int rc;
- crypto_skcipher_clear_flags(ctx->sw_cipher.cipher,
+ crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->sw_cipher.cipher,
+ crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
+ rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
crypto_skcipher_set_flags(cipher,
- crypto_skcipher_get_flags(ctx->sw_cipher.cipher) &
+ crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
CRYPTO_TFM_RES_MASK);
return rc;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 06574a884715..3934c2523762 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -539,7 +539,7 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
req = skcipher_request_cast(pd_uinfo->async_req);
- if (pd_uinfo->using_sd) {
+ if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
req->cryptlen, req->dst);
} else {
@@ -593,7 +593,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
u32 icv[AES_BLOCK_SIZE];
int err = 0;
- if (pd_uinfo->using_sd) {
+ if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
dst);
@@ -714,7 +714,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
size_t offset_to_sr_ptr;
u32 gd_idx = 0;
int tmp;
- bool is_busy;
+ bool is_busy, force_sd;
+
+ /*
+ * There's a very subtile/disguised "bug" in the hardware that
+ * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
+ * of the hardware spec:
+ * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
+ * operation modes for >>> "Block ciphers" <<<.
+ *
+ * To workaround this issue and stop the hardware from causing
+ * "overran dst buffer" on crypttexts that are not a multiple
+ * of 16 (AES_BLOCK_SIZE), we force the driver to use the
+ * scatter buffers.
+ */
+ force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
+ || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
+ && (datalen % AES_BLOCK_SIZE);
/* figure how many gd are needed */
tmp = sg_nents_for_len(src, assoclen + datalen);
@@ -732,7 +748,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
/* figure how many sd are needed */
- if (sg_is_last(dst)) {
+ if (sg_is_last(dst) && force_sd == false) {
num_sd = 0;
} else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -807,9 +823,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
pd->sa_len = sa_len;
pd_uinfo = &dev->pdr_uinfo[pd_entry];
- pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
+ pd_uinfo->dest_va = dst;
+ pd_uinfo->async_req = req;
if (iv_len)
memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
@@ -828,7 +845,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
/* get first gd we are going to use */
gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd;
- pd_uinfo->num_gd = num_gd;
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
pd->src = gd_dma;
/* enable gather */
@@ -865,17 +881,13 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
* Indicate gather array is not used
*/
pd_uinfo->first_gd = 0xffffffff;
- pd_uinfo->num_gd = 0;
}
- if (sg_is_last(dst)) {
+ if (!num_sd) {
/*
* we know application give us dst a whole piece of memory
* no need to use scatter ring.
*/
- pd_uinfo->using_sd = 0;
pd_uinfo->first_sd = 0xffffffff;
- pd_uinfo->num_sd = 0;
- pd_uinfo->dest_va = dst;
sa->sa_command_0.bf.scatter = 0;
pd->dest = (u32)dma_map_page(dev->core_dev->device,
sg_page(dst), dst->offset,
@@ -888,10 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
u32 sd_idx = fst_sd;
nbytes = datalen;
sa->sa_command_0.bf.scatter = 1;
- pd_uinfo->using_sd = 1;
- pd_uinfo->dest_va = dst;
pd_uinfo->first_sd = fst_sd;
- pd_uinfo->num_sd = num_sd;
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
pd->dest = sd_dma;
/* setup scatter descriptor */
@@ -954,15 +963,10 @@ static int crypto4xx_sk_init(struct crypto_skcipher *sk)
if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->sw_cipher.cipher =
- crypto_alloc_skcipher(alg->base.cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC);
+ crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher.cipher))
return PTR_ERR(ctx->sw_cipher.cipher);
-
- crypto_skcipher_set_reqsize(sk,
- sizeof(struct skcipher_request) + 32 +
- crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
}
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
@@ -981,7 +985,7 @@ static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
crypto4xx_common_exit(ctx);
if (ctx->sw_cipher.cipher)
- crypto_free_skcipher(ctx->sw_cipher.cipher);
+ crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
}
static int crypto4xx_aead_init(struct crypto_aead *tfm)
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 18df695ca6b1..c624f8cd3d2e 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -64,7 +64,6 @@ union shadow_sa_buf {
struct pd_uinfo {
struct crypto4xx_device *dev;
u32 state;
- u32 using_sd;
u32 first_gd; /* first gather discriptor
used by this packet */
u32 num_gd; /* number of gather discriptor
@@ -131,7 +130,7 @@ struct crypto4xx_ctx {
__le32 iv_nonce;
u32 sa_len;
union {
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
struct crypto_aead *aead;
} sw_cipher;
};
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 65bf1a299562..fa76620281e8 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -800,20 +800,14 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- const char *alg_name;
-
- alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
+ u32 flags;
+ int err;
- /*
- * HW bug in cfb 3-keys mode.
- */
- if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb")
- && (keylen != 2*DES_KEY_SIZE)) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ flags = crypto_ablkcipher_get_flags(tfm);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(tfm, flags);
+ return err;
}
memcpy(ctx->key, key, keylen);
@@ -1060,7 +1054,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
.cra_u.ablkcipher = {
- .min_keysize = 2 * DES_KEY_SIZE,
+ .min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
.setkey = atmel_tdes_setkey,
.encrypt = atmel_tdes_ecb_encrypt,
@@ -1079,7 +1073,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
.cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
+ .min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1088,86 +1082,6 @@ static struct crypto_alg tdes_algs[] = {
}
},
{
- .cra_name = "cfb(des3_ede)",
- .cra_driver_name = "atmel-cfb-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb_encrypt,
- .decrypt = atmel_tdes_cfb_decrypt,
- }
-},
-{
- .cra_name = "cfb8(des3_ede)",
- .cra_driver_name = "atmel-cfb8-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB8_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb8_encrypt,
- .decrypt = atmel_tdes_cfb8_decrypt,
- }
-},
-{
- .cra_name = "cfb16(des3_ede)",
- .cra_driver_name = "atmel-cfb16-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB16_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x1,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb16_encrypt,
- .decrypt = atmel_tdes_cfb16_decrypt,
- }
-},
-{
- .cra_name = "cfb32(des3_ede)",
- .cra_driver_name = "atmel-cfb32-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb32_encrypt,
- .decrypt = atmel_tdes_cfb32_decrypt,
- }
-},
-{
.cra_name = "ofb(des3_ede)",
.cra_driver_name = "atmel-ofb-tdes",
.cra_priority = 100,
@@ -1179,7 +1093,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
.cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
+ .min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = atmel_tdes_setkey,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 57e5dca3253f..d2fb72811442 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2247,8 +2247,6 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
hdesc->tfm = tfm_ctx->child_hash;
- hdesc->flags = crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
tfm_ctx->hmac_key_length = blocksize;
ret = crypto_shash_digest(hdesc, key, keylen,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 28f592f7e1b7..25f8d3913ceb 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1840,13 +1840,14 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
if (keylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)key;
- u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ u32 flags;
+ int ret;
- if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+ flags = crypto_ablkcipher_get_flags(cipher);
+ ret = __des3_verify_key(&flags, key);
+ if (unlikely(ret)) {
crypto_ablkcipher_set_flags(cipher, flags);
- return -EINVAL;
+ return ret;
}
ctx->cipher_type = CIPHER_TYPE_3DES;
@@ -2139,7 +2140,6 @@ static int ahash_init(struct ahash_request *req)
goto err_hash;
}
ctx->shash->tfm = hash;
- ctx->shash->flags = 0;
/* Set the key using data we already have from setkey */
if (ctx->authkeylen > 0) {
@@ -2885,13 +2885,13 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break;
case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)keys.enckey;
- u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ u32 flags;
- if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+ flags = crypto_aead_get_flags(cipher);
+ ret = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(ret)) {
crypto_aead_set_flags(cipher, flags);
- return -EINVAL;
+ return ret;
}
ctx->cipher_type = CIPHER_TYPE_3DES;
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index dbb5c03dde49..2baf6d7f2c1d 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -22,9 +22,6 @@
#include "spum.h"
#include "cipher.h"
-/* This array is based on the hash algo type supported in spu.h */
-char *tag_to_hash_idx[] = { "none", "md5", "sha1", "sha224", "sha256" };
-
char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
"sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index d8cda5fb75ad..91ec56399d84 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -242,7 +242,6 @@ int do_shash(unsigned char *name, unsigned char *result,
goto do_shash_err;
}
sdesc->shash.tfm = hash;
- sdesc->shash.flags = 0x0;
if (key_len > 0) {
rc = crypto_shash_setkey(hash, key, key_len);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 579578498deb..c0ece44f303b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -89,6 +89,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -638,6 +639,39 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
@@ -2019,6 +2053,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -2037,6 +2072,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -2056,6 +2092,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* single-pass ipsec_esp descriptor */
@@ -2457,7 +2494,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2479,7 +2516,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2502,7 +2539,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2525,7 +2562,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2548,7 +2585,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2571,7 +2608,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2594,7 +2631,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2617,7 +2654,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2640,7 +2677,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2663,7 +2700,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2686,7 +2723,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2709,7 +2746,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -3301,6 +3338,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -3323,6 +3361,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
};
@@ -3384,8 +3423,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam,
- alg->setkey == aead_setkey);
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -3460,7 +3498,7 @@ static int __init caam_algapi_init(void)
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
- bool registered = false;
+ bool registered = false, gcm_support;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -3493,7 +3531,7 @@ static int __init caam_algapi_init(void)
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
if (priv->era < 10) {
- u32 cha_vid, cha_inst;
+ u32 cha_vid, cha_inst, aes_rn;
cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
@@ -3508,6 +3546,10 @@ static int __init caam_algapi_init(void)
CHA_ID_LS_ARC4_SHIFT;
ccha_inst = 0;
ptha_inst = 0;
+
+ aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
+ CHA_ID_LS_AES_MASK;
+ gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
} else {
u32 aesa, mdha;
@@ -3523,6 +3565,8 @@ static int __init caam_algapi_init(void)
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
+
+ gcm_support = aesa & CHA_VER_MISC_AES_GCM;
}
/* If MD is present, limit digest size based on LP256 */
@@ -3595,11 +3639,9 @@ static int __init caam_algapi_init(void)
if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
continue;
- /*
- * Check support for AES algorithms not available
- * on LP devices.
- */
- if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
+ /* Skip GCM algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
+ alg_aai == OP_ALG_AAI_GCM && !gcm_support)
continue;
/*
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c61921d32489..d290d6b41825 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -36,6 +36,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -292,6 +293,39 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -667,6 +701,13 @@ badkey:
return -EINVAL;
}
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return unlikely(des3_verify_key(skcipher, key)) ?:
+ skcipher_setkey(skcipher, key, keylen);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
@@ -1382,7 +1423,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des3_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1483,6 +1524,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -1501,6 +1543,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -1520,6 +1563,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
}
},
/* single-pass ipsec_esp descriptor */
@@ -1798,7 +1842,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1820,7 +1864,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1843,7 +1887,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1866,7 +1910,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1889,7 +1933,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1912,7 +1956,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1935,7 +1979,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1958,7 +2002,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1981,7 +2025,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2004,7 +2048,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2027,7 +2071,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2050,7 +2094,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2393,8 +2437,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam,
- alg->setkey == aead_setkey);
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index c2c1abc68f81..2b2980a8a9b9 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -42,6 +42,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -323,6 +324,39 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
@@ -938,6 +972,13 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return unlikely(des3_verify_key(skcipher, key)) ?:
+ skcipher_setkey(skcipher, key, keylen);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
@@ -1440,7 +1481,7 @@ static int caam_cra_init_aead(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
- alg->setkey == aead_setkey);
+ !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -1484,7 +1525,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des3_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1601,6 +1642,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -1619,6 +1661,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -1638,6 +1681,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
}
},
/* single-pass ipsec_esp descriptor */
@@ -1916,7 +1960,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1938,7 +1982,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1961,7 +2005,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1984,7 +2028,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2007,7 +2051,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2030,7 +2074,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2053,7 +2097,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2076,7 +2120,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2099,7 +2143,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2122,7 +2166,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2145,7 +2189,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2168,7 +2212,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2715,6 +2759,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -2737,6 +2782,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -2854,6 +2900,7 @@ struct caam_hash_state {
struct caam_request caam_req;
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
+ int ctx_dma_len;
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_0;
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
@@ -2927,6 +2974,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
struct caam_hash_state *state, int ctx_len,
struct dpaa2_sg_entry *qm_sg, u32 flag)
{
+ state->ctx_dma_len = ctx_len;
state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
if (dma_mapping_error(dev, state->ctx_dma)) {
dev_err(dev, "unable to map ctx\n");
@@ -3018,13 +3066,13 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
}
/* Digest hash size if it is too large */
-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 *keylen, u8 *key_out, u32 digestsize)
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
+ u32 digestsize)
{
struct caam_request *req_ctx;
u32 *desc;
struct split_key_sh_result result;
- dma_addr_t src_dma, dst_dma;
+ dma_addr_t key_dma;
struct caam_flc *flc;
dma_addr_t flc_dma;
int ret = -ENOMEM;
@@ -3041,17 +3089,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
if (!flc)
goto err_flc;
- src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ctx->dev, src_dma)) {
- dev_err(ctx->dev, "unable to map key input memory\n");
- goto err_src_dma;
- }
- dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, dst_dma)) {
- dev_err(ctx->dev, "unable to map key output memory\n");
- goto err_dst_dma;
+ key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(ctx->dev, key_dma)) {
+ dev_err(ctx->dev, "unable to map key memory\n");
+ goto err_key_dma;
}
desc = flc->sh_desc;
@@ -3076,14 +3117,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(in_fle, src_dma);
+ dpaa2_fl_set_addr(in_fle, key_dma);
dpaa2_fl_set_len(in_fle, *keylen);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, dst_dma);
+ dpaa2_fl_set_addr(out_fle, key_dma);
dpaa2_fl_set_len(out_fle, digestsize);
print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
@@ -3103,17 +3144,15 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
wait_for_completion(&result.completion);
ret = result.err;
print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
DMA_TO_DEVICE);
err_flc_dma:
- dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
-err_dst_dma:
- dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
-err_src_dma:
+ dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
+err_key_dma:
kfree(flc);
err_flc:
kfree(req_ctx);
@@ -3135,12 +3174,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
if (keylen > blocksize) {
- hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
- GFP_KERNEL | GFP_DMA);
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!hashed_key)
return -ENOMEM;
- ret = hash_digest_key(ctx, key, &keylen, hashed_key,
- digestsize);
+ ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
if (ret)
goto bad_free_key;
key = hashed_key;
@@ -3165,14 +3202,12 @@ bad_free_key:
}
static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
- struct ahash_request *req, int dst_len)
+ struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
- if (edesc->dst_dma)
- dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
if (edesc->qm_sg_bytes)
dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
@@ -3187,18 +3222,15 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
- struct ahash_request *req, int dst_len,
- u32 flag)
+ struct ahash_request *req, u32 flag)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
if (state->ctx_dma) {
- dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
state->ctx_dma = 0;
}
- ahash_unmap(dev, edesc, req, dst_len);
+ ahash_unmap(dev, edesc, req);
}
static void ahash_done(void *cbk_ctx, u32 status)
@@ -3219,16 +3251,13 @@ static void ahash_done(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+ memcpy(req->result, state->caam_ctx, digestsize);
qi_cache_free(edesc);
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
req->base.complete(&req->base, ecode);
}
@@ -3250,7 +3279,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
switch_buf(state);
qi_cache_free(edesc);
@@ -3283,16 +3312,13 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+ memcpy(req->result, state->caam_ctx, digestsize);
qi_cache_free(edesc);
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
req->base.complete(&req->base, ecode);
}
@@ -3314,7 +3340,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
switch_buf(state);
qi_cache_free(edesc);
@@ -3452,7 +3478,7 @@ static int ahash_update_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3484,7 +3510,7 @@ static int ahash_final_ctx(struct ahash_request *req)
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -3503,22 +3529,13 @@ static int ahash_final_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
- ret = -ENOMEM;
- goto unmap_ctx;
- }
-
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3533,7 +3550,7 @@ static int ahash_final_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3586,7 +3603,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -3605,22 +3622,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
- ret = -ENOMEM;
- goto unmap_ctx;
- }
-
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3635,7 +3643,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3704,18 +3712,19 @@ static int ahash_digest(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
goto unmap;
}
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_len(in_fle, req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -3729,7 +3738,7 @@ static int ahash_digest(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3755,27 +3764,39 @@ static int ahash_final_no_ctx(struct ahash_request *req)
if (!edesc)
return ret;
- state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(ctx->dev, state->buf_dma)) {
- dev_err(ctx->dev, "unable to map src\n");
- goto unmap;
+ if (buflen) {
+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+ dev_err(ctx->dev, "unable to map src\n");
+ goto unmap;
+ }
}
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
goto unmap;
}
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
- dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(in_fle, state->buf_dma);
- dpaa2_fl_set_len(in_fle, buflen);
+ /*
+ * crypto engine requires the input entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type), leaving
+ * in_fle zeroized (except for "Final" flag) is the best option.
+ */
+ if (buflen) {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
+ dpaa2_fl_set_len(in_fle, buflen);
+ }
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -3790,7 +3811,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3870,6 +3891,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
+ state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -3918,7 +3940,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3983,11 +4005,12 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
ret = -ENOMEM;
goto unmap;
}
@@ -3998,7 +4021,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -4013,7 +4036,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return -ENOMEM;
}
@@ -4100,6 +4123,7 @@ static int ahash_update_first(struct ahash_request *req)
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
*next_buflen, 0);
+ state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -4143,7 +4167,7 @@ static int ahash_update_first(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -4162,6 +4186,7 @@ static int ahash_init(struct ahash_request *req)
state->final = ahash_final_no_ctx;
state->ctx_dma = 0;
+ state->ctx_dma_len = 0;
state->current_buf = 0;
state->buf_dma = 0;
state->buflen_0 = 0;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 20890780fb82..be5085451053 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -162,14 +162,12 @@ struct skcipher_edesc {
/*
* ahash_edesc - s/w-extended ahash descriptor
- * @dst_dma: I/O virtual address of req->result
* @qm_sg_dma: I/O virtual address of h/w link table
* @src_nents: number of segments in input scatterlist
* @qm_sg_bytes: length of dma mapped qm_sg space
* @sgt: pointer to h/w link table
*/
struct ahash_edesc {
- dma_addr_t dst_dma;
dma_addr_t qm_sg_dma;
int src_nents;
int qm_sg_bytes;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index b1eadc6652b5..7205d9f4029e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- if (mapped_nents) {
+ if (mapped_nents)
sg_to_sec4_sg_last(req->src, mapped_nents,
edesc->sec4_sg + sec4_sg_src_index,
0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
- } else {
+ else
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
1);
- }
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 58285642306e..fe24485274e1 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -994,8 +994,6 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg caam_rsa = {
.encrypt = caam_rsa_enc,
.decrypt = caam_rsa_dec,
- .sign = caam_rsa_dec,
- .verify = caam_rsa_enc,
.set_pub_key = caam_rsa_set_pub_key,
.set_priv_key = caam_rsa_set_priv_key,
.max_size = caam_rsa_max_size,
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 858bdc9ab4a3..e2ba3d202da5 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -468,6 +468,24 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
return caam_get_era_from_hw(ctrl);
}
+/*
+ * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6DQ)
+ * have an issue wherein AXI bus transactions may not occur in the correct
+ * order. This isn't a problem running single descriptors, but can be if
+ * running multiple concurrent descriptors. Reworking the driver to throttle
+ * to single requests is impractical, thus the workaround is to limit the AXI
+ * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
+ * from occurring.
+ */
+static void handle_imx6_err005766(u32 *mcr)
+{
+ if (of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6dl") ||
+ of_machine_is_compatible("fsl,imx6qp"))
+ clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
+ 1 << MCFGR_AXIPIPE_SHIFT);
+}
+
static const struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
@@ -640,6 +658,8 @@ static int caam_probe(struct platform_device *pdev)
(sizeof(dma_addr_t) == sizeof(u64) ?
MCFGR_LONG_PTR : 0));
+ handle_imx6_err005766(&ctrl->mcr);
+
/*
* Read the Compile Time paramters and SCFGR to determine
* if Virtualization is enabled for this platform
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 21a70fd32f5d..4da844e4b61d 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -22,7 +22,7 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
size_t len;
void *buf;
- for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+ for (it = sg; it && tlen > 0 ; it = sg_next(it)) {
/*
* make sure the scatterlist's page
* has a valid virtual memory mapping
@@ -138,7 +138,7 @@ static const struct {
{ 0x46, "Annotation length exceeds offset (reuse mode)"},
{ 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
{ 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
- { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
+ { 0x4B, "Annotation output enabled but ASA cannot be expanded (frame list)"},
{ 0x51, "Unsupported IF reuse mode"},
{ 0x52, "Unsupported FL use mode"},
{ 0x53, "Unsupported RJD use mode"},
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5869ad58d497..3392615dc91b 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -49,13 +49,11 @@ struct caam_drv_private_jr {
atomic_t tfm_count ____cacheline_aligned;
/* Job ring info */
- int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
- int inp_ring_write_index; /* Input index "tail" */
+ u32 inpring_avail; /* Number of free entries in input ring */
int head; /* entinfo (s/w ring) head index */
dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
- spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
int out_ring_read_index; /* Output index "tail" */
int tail; /* entinfo (s/w ring) tail index */
struct jr_outentry *outring; /* Base of output ring, DMA-safe */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d50085a03597..1de2562d0982 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -170,13 +170,13 @@ static void caam_jr_dequeue(unsigned long devarg)
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
void *userarg;
+ u32 outring_used = 0;
- while (rd_reg32(&jrp->rregs->outring_used)) {
+ while (outring_used ||
+ (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
head = READ_ONCE(jrp->head);
- spin_lock(&jrp->outlock);
-
sw_idx = tail = jrp->tail;
hw_idx = jrp->out_ring_read_index;
@@ -199,7 +199,7 @@ static void caam_jr_dequeue(unsigned long devarg)
/* mark completed, avoid matching on a recycled desc addr */
jrp->entinfo[sw_idx].desc_addr_dma = 0;
- /* Stash callback params for use outside of lock */
+ /* Stash callback params */
usercall = jrp->entinfo[sw_idx].callbk;
userarg = jrp->entinfo[sw_idx].cbkarg;
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
@@ -232,10 +232,9 @@ static void caam_jr_dequeue(unsigned long devarg)
jrp->tail = tail;
}
- spin_unlock(&jrp->outlock);
-
/* Finally, execute user's callback */
usercall(dev, userdesc, userstatus, userarg);
+ outring_used--;
}
/* reenable / unmask IRQs */
@@ -345,7 +344,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
head = jrp->head;
tail = READ_ONCE(jrp->tail);
- if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+ if (!jrp->inpring_avail ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_bh(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
@@ -359,7 +358,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
- jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
+ jrp->inpring[head] = cpu_to_caam_dma(desc_dma);
/*
* Guarantee that the descriptor's DMA address has been written to
@@ -368,18 +367,22 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
*/
smp_wmb();
- jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
- (JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
/*
* Ensure that all job information has been written before
- * notifying CAAM that a new job was added to the input ring.
+ * notifying CAAM that a new job was added to the input ring
+ * using a memory barrier. The wr_reg32() uses api iowrite32()
+ * to do the register write. iowrite32() issues a memory barrier
+ * before the write operation.
*/
- wmb();
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+ jrp->inpring_avail--;
+ if (!jrp->inpring_avail)
+ jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
+
spin_unlock_bh(&jrp->inplock);
return 0;
@@ -431,7 +434,6 @@ static int caam_jr_init(struct device *dev)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
- jrp->inp_ring_write_index = 0;
jrp->out_ring_read_index = 0;
jrp->head = 0;
jrp->tail = 0;
@@ -441,10 +443,9 @@ static int caam_jr_init(struct device *dev)
wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
- jrp->ringsize = JOBR_DEPTH;
+ jrp->inpring_avail = JOBR_DEPTH;
spin_lock_init(&jrp->inplock);
- spin_lock_init(&jrp->outlock);
/* Select interrupt coalescing parameters */
clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 7cb8b1755e57..9f08f84cca59 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
/* Create a new req FQ in parked state */
new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
drv_ctx->context_a, 0);
- if (IS_ERR_OR_NULL(new_fq)) {
+ if (IS_ERR(new_fq)) {
dev_err(qidev, "FQ allocation for shdesc update failed\n");
return PTR_ERR(new_fq);
}
@@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
/* Attach request FQ */
drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
QMAN_INITFQ_FLAG_SCHED);
- if (IS_ERR_OR_NULL(drv_ctx->req_fq)) {
+ if (IS_ERR(drv_ctx->req_fq)) {
dev_err(qidev, "create_caam_req_fq failed\n");
dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
kfree(drv_ctx);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 3cd0822ea819..8591914d5c51 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -253,6 +253,9 @@ struct version_regs {
#define CHA_VER_VID_SHIFT 24
#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
+/* CHA Miscellaneous Information - AESA_MISC specific */
+#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
+
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 600336d169a9..9810ad8ac519 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -10,7 +10,6 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
-#include <crypto/cryptd.h>
#include <crypto/crypto_wq.h>
#include <crypto/des.h>
#include <crypto/xts.h>
@@ -327,27 +326,36 @@ static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
+ u32 flags = crypto_ablkcipher_get_flags(cipher);
+ int err;
+
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
return cvm_setkey(cipher, key, keylen, DES3_CBC);
}
static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
+ u32 flags = crypto_ablkcipher_get_flags(cipher);
+ int err;
+
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
return cvm_setkey(cipher, key, keylen, DES3_ECB);
}
static int cvm_enc_dec_init(struct crypto_tfm *tfm)
{
- struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- memset(ctx, 0, sizeof(*ctx));
- tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx) +
- sizeof(struct ablkcipher_request);
- /* Additional memory for ablkcipher_request is
- * allocated since the cryptd daemon uses
- * this memory for request_ctx information
- */
-
+ tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
return 0;
}
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 2ca431ed1db8..88a0166f5477 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -641,7 +641,7 @@ static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
}
-void cptvf_device_init(struct cpt_vf *cptvf)
+static void cptvf_device_init(struct cpt_vf *cptvf)
{
u64 base_addr = 0;
diff --git a/drivers/crypto/cavium/cpt/cptvf_mbox.c b/drivers/crypto/cavium/cpt/cptvf_mbox.c
index d5ec3b8a9e61..4f438eceb506 100644
--- a/drivers/crypto/cavium/cpt/cptvf_mbox.c
+++ b/drivers/crypto/cavium/cpt/cptvf_mbox.c
@@ -17,23 +17,6 @@ static void cptvf_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
mbx->data);
}
-/* ACKs PF's mailbox message
- */
-void cptvf_mbox_send_ack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
-{
- mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
- cptvf_send_msg_to_pf(cptvf, mbx);
-}
-
-/* NACKs PF's mailbox message that VF is not able to
- * complete the action
- */
-void cptvf_mbox_send_nack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
-{
- mbx->msg = CPT_MBOX_MSG_TYPE_NACK;
- cptvf_send_msg_to_pf(cptvf, mbx);
-}
-
/* Interrupt handler to handle mailbox messages from VFs */
void cptvf_handle_mbox_intr(struct cpt_vf *cptvf)
{
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index ca549c5dc08e..f16f61504241 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -223,7 +223,7 @@ scatter_gather_clean:
return ret;
}
-int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
+static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
u32 qno)
{
struct pci_dev *pdev = cptvf->pdev;
@@ -270,7 +270,7 @@ int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
return ret;
}
-void do_request_cleanup(struct cpt_vf *cptvf,
+static void do_request_cleanup(struct cpt_vf *cptvf,
struct cpt_info_buffer *info)
{
int i;
@@ -316,7 +316,7 @@ void do_request_cleanup(struct cpt_vf *cptvf,
kzfree(info);
}
-void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
+static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
{
struct pci_dev *pdev = cptvf->pdev;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 4f43eacd2557..e4841eb2a09f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -18,26 +18,6 @@
#define GCM_AES_SALT_SIZE 4
-/**
- * struct nitrox_crypt_params - Params to set nitrox crypto request.
- * @cryptlen: Encryption/Decryption data length
- * @authlen: Assoc data length + Cryptlen
- * @srclen: Input buffer length
- * @dstlen: Output buffer length
- * @iv: IV data
- * @ivsize: IV data length
- * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
- */
-struct nitrox_crypt_params {
- unsigned int cryptlen;
- unsigned int authlen;
- unsigned int srclen;
- unsigned int dstlen;
- u8 *iv;
- int ivsize;
- u8 ctrl_arg;
-};
-
union gph_p3 {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -94,36 +74,40 @@ static int nitrox_aead_setauthsize(struct crypto_aead *aead,
return 0;
}
-static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize,
+static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq,
+ struct scatterlist *src, char *iv, int ivsize,
int buflen)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- int nents = sg_nents_for_len(areq->src, buflen) + 1;
+ int nents = sg_nents_for_len(src, buflen);
int ret;
if (nents < 0)
return nents;
+ /* IV entry */
+ nents += 1;
/* Allocate buffer to hold IV and input scatterlist array */
ret = alloc_src_req_buf(nkreq, nents, ivsize);
if (ret)
return ret;
nitrox_creq_copy_iv(nkreq->src, iv, ivsize);
- nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen);
+ nitrox_creq_set_src_sg(nkreq, nents, ivsize, src, buflen);
return 0;
}
-static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
+static int alloc_dst_sglist(struct nitrox_kcrypt_request *nkreq,
+ struct scatterlist *dst, int ivsize, int buflen)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- int nents = sg_nents_for_len(areq->dst, buflen) + 3;
+ int nents = sg_nents_for_len(dst, buflen);
int ret;
if (nents < 0)
return nents;
+ /* IV, ORH, COMPLETION entries */
+ nents += 3;
/* Allocate buffer to hold ORH, COMPLETION and output scatterlist
* array
*/
@@ -133,61 +117,54 @@ static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
nitrox_creq_set_orh(nkreq);
nitrox_creq_set_comp(nkreq);
- nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen);
+ nitrox_creq_set_dst_sg(nkreq, nents, ivsize, dst, buflen);
return 0;
}
-static void free_src_sglist(struct aead_request *areq)
+static void free_src_sglist(struct nitrox_kcrypt_request *nkreq)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-
kfree(nkreq->src);
}
-static void free_dst_sglist(struct aead_request *areq)
+static void free_dst_sglist(struct nitrox_kcrypt_request *nkreq)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-
kfree(nkreq->dst);
}
-static int nitrox_set_creq(struct aead_request *areq,
- struct nitrox_crypt_params *params)
+static int nitrox_set_creq(struct nitrox_aead_rctx *rctx)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- struct se_crypto_request *creq = &nkreq->creq;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct se_crypto_request *creq = &rctx->nkreq.creq;
union gph_p3 param3;
- struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
int ret;
- creq->flags = areq->base.flags;
- creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
+ creq->flags = rctx->flags;
+ creq->gfp = (rctx->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
creq->ctrl.value = 0;
creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
- creq->ctrl.s.arg = params->ctrl_arg;
+ creq->ctrl.s.arg = rctx->ctrl_arg;
- creq->gph.param0 = cpu_to_be16(params->cryptlen);
- creq->gph.param1 = cpu_to_be16(params->authlen);
- creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen);
+ creq->gph.param0 = cpu_to_be16(rctx->cryptlen);
+ creq->gph.param1 = cpu_to_be16(rctx->cryptlen + rctx->assoclen);
+ creq->gph.param2 = cpu_to_be16(rctx->ivsize + rctx->assoclen);
param3.iv_offset = 0;
- param3.auth_offset = params->ivsize;
+ param3.auth_offset = rctx->ivsize;
creq->gph.param3 = cpu_to_be16(param3.param);
- creq->ctx_handle = nctx->u.ctx_handle;
+ creq->ctx_handle = rctx->ctx_handle;
creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
- ret = alloc_src_sglist(areq, params->iv, params->ivsize,
- params->srclen);
+ ret = alloc_src_sglist(&rctx->nkreq, rctx->src, rctx->iv, rctx->ivsize,
+ rctx->srclen);
if (ret)
return ret;
- ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen);
+ ret = alloc_dst_sglist(&rctx->nkreq, rctx->dst, rctx->ivsize,
+ rctx->dstlen);
if (ret) {
- free_src_sglist(areq);
+ free_src_sglist(&rctx->nkreq);
return ret;
}
@@ -197,9 +174,10 @@ static int nitrox_set_creq(struct aead_request *areq,
static void nitrox_aead_callback(void *arg, int err)
{
struct aead_request *areq = arg;
+ struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
- free_src_sglist(areq);
- free_dst_sglist(areq);
+ free_src_sglist(&rctx->nkreq);
+ free_dst_sglist(&rctx->nkreq);
if (err) {
pr_err_ratelimited("request failed status 0x%0x\n", err);
err = -EINVAL;
@@ -212,23 +190,25 @@ static int nitrox_aes_gcm_enc(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- struct se_crypto_request *creq = &nkreq->creq;
+ struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &rctx->nkreq.creq;
struct flexi_crypto_context *fctx = nctx->u.fctx;
- struct nitrox_crypt_params params;
int ret;
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
- memset(&params, 0, sizeof(params));
- params.cryptlen = areq->cryptlen;
- params.authlen = areq->assoclen + params.cryptlen;
- params.srclen = params.authlen;
- params.dstlen = params.srclen + aead->authsize;
- params.iv = &areq->iv[GCM_AES_SALT_SIZE];
- params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
- params.ctrl_arg = ENCRYPT;
- ret = nitrox_set_creq(areq, &params);
+ rctx->cryptlen = areq->cryptlen;
+ rctx->assoclen = areq->assoclen;
+ rctx->srclen = areq->assoclen + areq->cryptlen;
+ rctx->dstlen = rctx->srclen + aead->authsize;
+ rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
+ rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ rctx->flags = areq->base.flags;
+ rctx->ctx_handle = nctx->u.ctx_handle;
+ rctx->src = areq->src;
+ rctx->dst = areq->dst;
+ rctx->ctrl_arg = ENCRYPT;
+ ret = nitrox_set_creq(rctx);
if (ret)
return ret;
@@ -241,23 +221,25 @@ static int nitrox_aes_gcm_dec(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- struct se_crypto_request *creq = &nkreq->creq;
+ struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &rctx->nkreq.creq;
struct flexi_crypto_context *fctx = nctx->u.fctx;
- struct nitrox_crypt_params params;
int ret;
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
- memset(&params, 0, sizeof(params));
- params.cryptlen = areq->cryptlen - aead->authsize;
- params.authlen = areq->assoclen + params.cryptlen;
- params.srclen = areq->cryptlen + areq->assoclen;
- params.dstlen = params.srclen - aead->authsize;
- params.iv = &areq->iv[GCM_AES_SALT_SIZE];
- params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
- params.ctrl_arg = DECRYPT;
- ret = nitrox_set_creq(areq, &params);
+ rctx->cryptlen = areq->cryptlen - aead->authsize;
+ rctx->assoclen = areq->assoclen;
+ rctx->srclen = areq->cryptlen + areq->assoclen;
+ rctx->dstlen = rctx->srclen - aead->authsize;
+ rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
+ rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ rctx->flags = areq->base.flags;
+ rctx->ctx_handle = nctx->u.ctx_handle;
+ rctx->src = areq->src;
+ rctx->dst = areq->dst;
+ rctx->ctrl_arg = DECRYPT;
+ ret = nitrox_set_creq(rctx);
if (ret)
return ret;
@@ -290,7 +272,7 @@ static int nitrox_aead_init(struct crypto_aead *aead)
return 0;
}
-static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+static int nitrox_gcm_common_init(struct crypto_aead *aead)
{
int ret;
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
@@ -308,8 +290,20 @@ static int nitrox_aes_gcm_init(struct crypto_aead *aead)
flags->w0.auth_input_type = 1;
flags->f = be64_to_cpu(flags->f);
- crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
- sizeof(struct nitrox_kcrypt_request));
+ return 0;
+}
+
+static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+{
+ int ret;
+
+ ret = nitrox_gcm_common_init(aead);
+ if (ret)
+ return ret;
+
+ crypto_aead_set_reqsize(aead,
+ sizeof(struct aead_request) +
+ sizeof(struct nitrox_aead_rctx));
return 0;
}
@@ -332,6 +326,166 @@ static void nitrox_aead_exit(struct crypto_aead *aead)
nctx->ndev = NULL;
}
+static int nitrox_rfc4106_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+ int ret;
+
+ if (keylen < GCM_AES_SALT_SIZE)
+ return -EINVAL;
+
+ keylen -= GCM_AES_SALT_SIZE;
+ ret = nitrox_aes_gcm_setkey(aead, key, keylen);
+ if (ret)
+ return ret;
+
+ memcpy(fctx->crypto.iv, key + keylen, GCM_AES_SALT_SIZE);
+ return 0;
+}
+
+static int nitrox_rfc4106_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return nitrox_aead_setauthsize(aead, authsize);
+}
+
+static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq)
+{
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+ unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+ struct scatterlist *sg;
+
+ if (areq->assoclen != 16 && areq->assoclen != 20)
+ return -EINVAL;
+
+ scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0);
+ sg_init_table(rctx->src, 3);
+ sg_set_buf(rctx->src, rctx->assoc, assoclen);
+ sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen);
+ if (sg != rctx->src + 1)
+ sg_chain(rctx->src, 2, sg);
+
+ if (areq->src != areq->dst) {
+ sg_init_table(rctx->dst, 3);
+ sg_set_buf(rctx->dst, rctx->assoc, assoclen);
+ sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen);
+ if (sg != rctx->dst + 1)
+ sg_chain(rctx->dst, 2, sg);
+ }
+
+ aead_rctx->src = rctx->src;
+ aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst;
+
+ return 0;
+}
+
+static void nitrox_rfc4106_callback(void *arg, int err)
+{
+ struct aead_request *areq = arg;
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_kcrypt_request *nkreq = &rctx->base.nkreq;
+
+ free_src_sglist(nkreq);
+ free_dst_sglist(nkreq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int nitrox_rfc4106_enc(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+ struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
+ int ret;
+
+ aead_rctx->cryptlen = areq->cryptlen;
+ aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+ aead_rctx->srclen = aead_rctx->assoclen + aead_rctx->cryptlen;
+ aead_rctx->dstlen = aead_rctx->srclen + aead->authsize;
+ aead_rctx->iv = areq->iv;
+ aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
+ aead_rctx->flags = areq->base.flags;
+ aead_rctx->ctx_handle = nctx->u.ctx_handle;
+ aead_rctx->ctrl_arg = ENCRYPT;
+
+ ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
+ if (ret)
+ return ret;
+
+ ret = nitrox_set_creq(aead_rctx);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq,
+ nitrox_rfc4106_callback, areq);
+}
+
+static int nitrox_rfc4106_dec(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+ struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
+ int ret;
+
+ aead_rctx->cryptlen = areq->cryptlen - aead->authsize;
+ aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+ aead_rctx->srclen =
+ areq->cryptlen - GCM_RFC4106_IV_SIZE + areq->assoclen;
+ aead_rctx->dstlen = aead_rctx->srclen - aead->authsize;
+ aead_rctx->iv = areq->iv;
+ aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
+ aead_rctx->flags = areq->base.flags;
+ aead_rctx->ctx_handle = nctx->u.ctx_handle;
+ aead_rctx->ctrl_arg = DECRYPT;
+
+ ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
+ if (ret)
+ return ret;
+
+ ret = nitrox_set_creq(aead_rctx);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq,
+ nitrox_rfc4106_callback, areq);
+}
+
+static int nitrox_rfc4106_init(struct crypto_aead *aead)
+{
+ int ret;
+
+ ret = nitrox_gcm_common_init(aead);
+ if (ret)
+ return ret;
+
+ crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
+ sizeof(struct nitrox_rfc4106_rctx));
+
+ return 0;
+}
+
static struct aead_alg nitrox_aeads[] = { {
.base = {
.cra_name = "gcm(aes)",
@@ -351,6 +505,25 @@ static struct aead_alg nitrox_aeads[] = { {
.exit = nitrox_aead_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
+}, {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "n5_rfc4106",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .setkey = nitrox_rfc4106_setkey,
+ .setauthsize = nitrox_rfc4106_setauthsize,
+ .encrypt = nitrox_rfc4106_enc,
+ .decrypt = nitrox_rfc4106_dec,
+ .init = nitrox_rfc4106_init,
+ .exit = nitrox_aead_exit,
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
} };
int nitrox_register_aeads(void)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index c08d9f33a3b1..3f0df60267a9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -437,6 +437,45 @@ void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
}
+static const char *get_core_option(u8 se_cores, u8 ae_cores)
+{
+ const char *option = "";
+
+ if (ae_cores == AE_MAX_CORES) {
+ switch (se_cores) {
+ case SE_MAX_CORES:
+ option = "60";
+ break;
+ case 40:
+ option = "60s";
+ break;
+ }
+ } else if (ae_cores == (AE_MAX_CORES / 2)) {
+ option = "30";
+ } else {
+ option = "60i";
+ }
+
+ return option;
+}
+
+static const char *get_feature_option(u8 zip_cores, int core_freq)
+{
+ if (zip_cores == 0)
+ return "";
+ else if (zip_cores < ZIP_MAX_CORES)
+ return "-C15";
+
+ if (core_freq >= 850)
+ return "-C45";
+ else if (core_freq >= 750)
+ return "-C35";
+ else if (core_freq >= 550)
+ return "-C25";
+
+ return "";
+}
+
void nitrox_get_hwinfo(struct nitrox_device *ndev)
{
union emu_fuse_map emu_fuse;
@@ -469,24 +508,14 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev)
ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
}
- /* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
- if (ndev->hw.ae_cores == AE_MAX_CORES) {
- switch (ndev->hw.se_cores) {
- case SE_MAX_CORES:
- i = snprintf(name, sizeof(name), "CNN5560");
- break;
- case 40:
- i = snprintf(name, sizeof(name), "CNN5560s");
- break;
- }
- } else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
- i = snprintf(name, sizeof(name), "CNN5530");
- } else {
- i = snprintf(name, sizeof(name), "CNN5560i");
- }
-
- snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
- ndev->hw.freq, ndev->hw.revision_id);
+ /* determine the partname
+ * CNN55<core option>-<freq><pincount>-<feature option>-<rev>
+ */
+ snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
+ get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
+ ndev->hw.freq,
+ get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
+ ndev->hw.revision_id);
/* copy partname */
strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index 76c0f0be7233..efdbd0fc3e3b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -212,6 +212,50 @@ struct nitrox_kcrypt_request {
};
/**
+ * struct nitrox_aead_rctx - AEAD request context
+ * @nkreq: Base request context
+ * @cryptlen: Encryption/Decryption data length
+ * @assoclen: AAD length
+ * @srclen: Input buffer length
+ * @dstlen: Output buffer length
+ * @iv: IV data
+ * @ivsize: IV data length
+ * @flags: AEAD req flags
+ * @ctx_handle: Device context handle
+ * @src: Source sglist
+ * @dst: Destination sglist
+ * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
+ */
+struct nitrox_aead_rctx {
+ struct nitrox_kcrypt_request nkreq;
+ unsigned int cryptlen;
+ unsigned int assoclen;
+ unsigned int srclen;
+ unsigned int dstlen;
+ u8 *iv;
+ int ivsize;
+ u32 flags;
+ u64 ctx_handle;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ u8 ctrl_arg;
+};
+
+/**
+ * struct nitrox_rfc4106_rctx - rfc4106 cipher request context
+ * @base: AEAD request context
+ * @src: Source sglist
+ * @dst: Destination sglist
+ * @assoc: AAD
+ */
+struct nitrox_rfc4106_rctx {
+ struct nitrox_aead_rctx base;
+ struct scatterlist src[3];
+ struct scatterlist dst[3];
+ u8 assoc[20];
+};
+
+/**
* struct pkt_instr_hdr - Packet Instruction Header
* @g: Gather used
* When [G] is set and [GSZ] != 0, the instruction is
@@ -512,7 +556,7 @@ static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg,
struct scatterlist *sg = to_sg;
unsigned int sglen;
- for (; buflen; buflen -= sglen) {
+ for (; buflen && from_sg; buflen -= sglen) {
sglen = from_sg->length;
if (sglen > buflen)
sglen = buflen;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4c97478d44bd..5826c2c98a50 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -303,8 +303,6 @@ static void post_se_instr(struct nitrox_softreq *sr,
/* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr);
- /* orders the doorbell rings */
- mmiowb();
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
@@ -599,8 +597,6 @@ void pkt_slc_resp_tasklet(unsigned long data)
* MSI-X interrupt generates if Completion count > Threshold
*/
writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
- /* order the writes */
- mmiowb();
if (atomic_read(&cmdq->backlog_count))
schedule_work(&cmdq->backlog_qflush);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index d4935d6cefdd..7e4a5e69085e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -257,12 +257,8 @@ static int nitrox_aes_decrypt(struct skcipher_request *skreq)
static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- if (keylen != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return nitrox_skcipher_setkey(cipher, 0, key, keylen);
+ return unlikely(des3_verify_key(cipher, key)) ?:
+ nitrox_skcipher_setkey(cipher, 0, key, keylen);
}
static int nitrox_3des_encrypt(struct skcipher_request *skreq)
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index b92b6e7e100f..4985bc812b0e 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -69,7 +69,7 @@ static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
zip_ops->csum = 1; /* Adler checksum desired */
}
-int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
+static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
{
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
@@ -107,7 +107,7 @@ err_comp_input:
return -ENOMEM;
}
-void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
+static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
@@ -119,7 +119,7 @@ void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
}
-int zip_compress(const u8 *src, unsigned int slen,
+static int zip_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen,
struct zip_kernel_ctx *zip_ctx)
{
@@ -155,7 +155,7 @@ int zip_compress(const u8 *src, unsigned int slen,
return ret;
}
-int zip_decompress(const u8 *src, unsigned int slen,
+static int zip_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen,
struct zip_kernel_ctx *zip_ctx)
{
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index c2ff551d215b..91482ffcac59 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -43,24 +43,11 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
struct ccp_crypto_ablkcipher_alg *alg =
ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
u32 *flags = &tfm->base.crt_flags;
+ int err;
-
- /* From des_generic.c:
- *
- * RFC2451:
- * If the first two or last two independent 64-bit keys are
- * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
- * same as DES. Implementers MUST reject keys that exhibit this
- * property.
- */
- const u32 *K = (const u32 *)key;
-
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ err = __des3_verify_key(flags, key);
+ if (unlikely(err))
+ return err;
/* It's not clear that there is any support for a keysize of 112.
* If needed, the caller should make K1 == K3
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 05850dfd7940..a2570c0c8cdc 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -37,10 +37,9 @@ static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
if (buf[nskip])
break;
*kplen = sz - nskip;
- *kpbuf = kzalloc(*kplen, GFP_KERNEL);
+ *kpbuf = kmemdup(buf + nskip, *kplen, GFP_KERNEL);
if (!*kpbuf)
return -ENOMEM;
- memcpy(*kpbuf, buf + nskip, *kplen);
return 0;
}
@@ -214,8 +213,6 @@ static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg ccp_rsa_defaults = {
.encrypt = ccp_rsa_encrypt,
.decrypt = ccp_rsa_decrypt,
- .sign = ccp_rsa_decrypt,
- .verify = ccp_rsa_encrypt,
.set_pub_key = ccp_rsa_setpubkey,
.set_priv_key = ccp_rsa_setprivkey,
.max_size = ccp_rsa_maxsize,
@@ -248,7 +245,8 @@ static struct ccp_rsa_def rsa_algs[] = {
}
};
-int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
+static int ccp_register_rsa_alg(struct list_head *head,
+ const struct ccp_rsa_def *def)
{
struct ccp_crypto_akcipher_alg *ccp_alg;
struct akcipher_alg *alg;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 10a61cd54fce..3e10573f589e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,8 +293,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
if (key_len > block_size) {
/* Must hash the input key */
sdesc->tfm = shash;
- sdesc->flags = crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index fadf859a14b8..656838433f2f 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -583,6 +583,69 @@ e_free:
return ret;
}
+static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_get_id2 input;
+ struct sev_data_get_id *data;
+ void *id_blob = NULL;
+ int ret;
+
+ /* SEV GET_ID is available from SEV API v0.16 and up */
+ if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ /* Check if we have write access to the userspace buffer */
+ if (input.address &&
+ input.length &&
+ !access_ok(input.address, input.length))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (input.address && input.length) {
+ id_blob = kmalloc(input.length, GFP_KERNEL);
+ if (!id_blob) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ data->address = __psp_pa(id_blob);
+ data->len = input.length;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+
+ /*
+ * Firmware will return the length of the ID value (either the minimum
+ * required length or the actual length written), return it to the user.
+ */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ if (id_blob) {
+ if (copy_to_user((void __user *)input.address,
+ id_blob, data->len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+e_free:
+ kfree(id_blob);
+ kfree(data);
+
+ return ret;
+}
+
static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
{
struct sev_data_get_id *data;
@@ -761,8 +824,12 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
ret = sev_ioctl_do_pdh_export(&input);
break;
case SEV_GET_ID:
+ pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
ret = sev_ioctl_do_get_id(&input);
break;
+ case SEV_GET_ID2:
+ ret = sev_ioctl_do_get_id2(&input);
+ break;
default:
ret = -EINVAL;
goto out;
@@ -997,7 +1064,7 @@ void psp_pci_init(void)
rc = sev_platform_init(&error);
if (rc) {
dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
- goto err;
+ return;
}
dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index bdc27970f95f..145e50bdbf16 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index a3527c00b29a..7aa4cbe19a86 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,12 +23,8 @@
#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
-#define AES_CCM_RFC4309_NONCE_SIZE 3
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
-/* Value of each ICV_CMP byte (of 8) in case of success */
-#define ICV_VERIF_OK 0x01
-
struct cc_aead_handle {
cc_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
@@ -220,6 +216,10 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ /* BACKLOG notification */
+ if (err == -EINPROGRESS)
+ goto done;
+
cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
@@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
/* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block
*/
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
@@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
unsigned int hashmode;
unsigned int idx = 0;
int rc = 0;
+ u8 *key = NULL;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
}
if (keylen != 0) {
+
+ key = kmemdup(authkey, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(key);
return -ENOMEM;
}
if (keylen > blocksize) {
@@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+ kzfree(key);
+
return rc;
}
@@ -650,6 +659,39 @@ setkey_error:
return rc;
}
+static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = cc_aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -731,7 +773,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
- areq->assoclen, NS_BIT);
+ areq_ctx->assoclen, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
areq_ctx->cryptlen > 0)
@@ -1080,9 +1122,11 @@ static void cc_proc_header_desc(struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
+
/* Hash associated data */
- if (req->assoclen > 0)
+ if (areq_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
@@ -1159,9 +1203,9 @@ static void cc_mlli_to_sram(struct aead_request *req,
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
- !req_ctx->is_single_pass) {
+ !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
@@ -1310,7 +1354,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- unsigned int assoclen = req->assoclen;
+ unsigned int assoclen = areq_ctx->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@@ -1469,7 +1513,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
idx++;
/* process assoc data */
- if (req->assoclen > 0) {
+ if (req_ctx->assoclen > 0) {
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
@@ -1561,7 +1605,7 @@ static int config_ccm_adata(struct aead_request *req)
* NIST Special Publication 800-38C
*/
*b0 |= (8 * ((m - 2) / 2));
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
@@ -1572,7 +1616,7 @@ static int config_ccm_adata(struct aead_request *req)
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
- req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req->iv[15] = 1;
@@ -1604,7 +1648,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= CCM_BLOCK_IV_SIZE;
+ areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1812,7 +1856,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
@@ -1836,8 +1880,8 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
- __func__, cryptlen, req->assoclen, ctx->authsize);
+ dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1853,7 +1897,7 @@ static int config_gcm_context(struct aead_request *req)
if (!req_ctx->plaintext_authenticate_only) {
__be64 temp64;
- temp64 = cpu_to_be64(req->assoclen * 8);
+ temp64 = cpu_to_be64(req_ctx->assoclen * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1863,8 +1907,8 @@ static int config_gcm_context(struct aead_request *req)
*/
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
- cryptlen) * 8);
+ temp64 = cpu_to_be64((req_ctx->assoclen +
+ GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1884,7 +1928,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+ areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@@ -1909,7 +1953,7 @@ static int cc_proc_aead(struct aead_request *req,
/* Check data length according to mode */
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ req->cryptlen, areq_ctx->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2058,8 +2102,11 @@ static int cc_aead_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2087,8 +2134,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2106,8 +2156,11 @@ static int cc_aead_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2133,8 +2186,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2250,8 +2306,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2273,11 +2332,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not encryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2305,8 +2367,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2328,11 +2393,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not decryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2372,7 +2440,7 @@ static struct cc_alg_template aead_algs[] = {
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,
@@ -2412,7 +2480,7 @@ static struct cc_alg_template aead_algs[] = {
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index 5edf3b351fa4..e51724b96c56 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_aead.h
* ARM CryptoCell AEAD Crypto API
@@ -67,6 +67,7 @@ struct aead_req_ctx {
u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /*store iv for generated IV flow*/
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+ u32 assoclen; /* internal assoclen */
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
/* buffer for internal ccm configurations */
dma_addr_t ccm_iv0_dma_addr;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 0ee1c52da0a4..c81ad33f9115 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- u32 skip = req->assoclen + req->cryptlen;
+ u32 skip = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->is_gcm4543)
skip += crypto_aead_ivsize(tfm);
@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
*/
static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list,
- unsigned int nbytes, u32 *lbytes,
- bool *is_chained)
+ unsigned int nbytes, u32 *lbytes)
{
unsigned int nents = 0;
while (nbytes && sg_list) {
- if (sg_list->length) {
- nents++;
- /* get the number of bytes in the last entry */
- *lbytes = nbytes;
- nbytes -= (sg_list->length > nbytes) ?
- nbytes : sg_list->length;
- sg_list = sg_next(sg_list);
- } else {
- sg_list = (struct scatterlist *)sg_page(sg_list);
- if (is_chained)
- *is_chained = true;
- }
+ nents++;
+ /* get the number of bytes in the last entry */
+ *lbytes = nbytes;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
}
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
@@ -140,9 +133,9 @@ void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
{
- u32 nents, lbytes;
+ u32 nents;
- nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ nents = sg_nents_for_len(sg, end);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF));
}
@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
sgl_data->num_of_buffers++;
}
-static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
-{
- u32 i, j;
- struct scatterlist *l_sg = sg;
-
- for (i = 0; i < nents; i++) {
- if (!l_sg)
- break;
- if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
- dev_err(dev, "dma_map_page() sg buffer failed\n");
- goto err;
- }
- l_sg = sg_next(l_sg);
- }
- return nents;
-
-err:
- /* Restore mapped parts */
- for (j = 0; j < i; j++) {
- if (!sg)
- break;
- dma_unmap_sg(dev, sg, 1, direction);
- sg = sg_next(sg);
- }
- return 0;
-}
-
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
- bool is_chained = false;
-
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if (dma_map_sg(dev, sg, 1, direction) != 1) {
@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
- &is_chained);
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
*nents, max_sg_nents);
return -ENOMEM;
}
- if (!is_chained) {
- /* In case of mmu the number of mapped nents might
- * be changed from the original sgl nents
- */
- *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (*mapped_nents == 0) {
- *nents = 0;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
- } else {
- /*In this case the driver maps entry by entry so it
- * must have the same nents before and after map
- */
- *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
- direction);
- if (*mapped_nents != *nents) {
- *nents = *mapped_nents;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
+ /* In case of mmu the number of mapped nents might
+ * be changed from the original sgl nents
+ */
+ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+ if (*mapped_nents == 0) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
}
}
@@ -457,7 +406,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
}
/* Release pool */
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -499,7 +448,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
@@ -568,11 +517,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- u32 dummy;
- bool chained;
- u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -612,6 +557,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL);
+ kzfree(areq_ctx->gen_ctx.iv);
}
/* Release pool */
@@ -628,23 +574,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
- req->assoclen, req->cryptlen);
- size_to_unmap = req->assoclen + req->cryptlen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
- size_to_unmap += areq_ctx->req_authsize;
- if (areq_ctx->is_gcm4543)
- size_to_unmap += crypto_aead_ivsize(tfm);
+ areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src,
- cc_get_sgl_nents(dev, req->src, size_to_unmap,
- &dummy, &chained),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst,
- cc_get_sgl_nents(dev, req->dst, size_to_unmap,
- &dummy, &chained),
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -658,55 +594,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
}
}
-static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
- unsigned int sgl_nents, unsigned int authsize,
- u32 last_entry_data_size,
- bool *is_icv_fragmented)
+static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size)
{
- unsigned int icv_max_size = 0;
- unsigned int icv_required_size = authsize > last_entry_data_size ?
- (authsize - last_entry_data_size) :
- authsize;
- unsigned int nents;
- unsigned int i;
-
- if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
- *is_icv_fragmented = false;
- return 0;
- }
-
- for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
- if (!sgl)
- break;
- sgl = sg_next(sgl);
- }
-
- if (sgl)
- icv_max_size = sgl->length;
-
- if (last_entry_data_size > authsize) {
- /* ICV attached to data in last entry (not fragmented!) */
- nents = 0;
- *is_icv_fragmented = false;
- } else if (last_entry_data_size == authsize) {
- /* ICV placed in whole last entry (not fragmented!) */
- nents = 1;
- *is_icv_fragmented = false;
- } else if (icv_max_size > icv_required_size) {
- nents = 1;
- *is_icv_fragmented = true;
- } else if (icv_max_size == icv_required_size) {
- nents = 2;
- *is_icv_fragmented = true;
- } else {
- dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
- MAX_ICV_NENTS_SUPPORTED);
- nents = -1; /*unsupported*/
- }
- dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
- (*is_icv_fragmented ? "true" : "false"), nents);
-
- return nents;
+ return ((sgl_nents > 1) && (last_entry_data_size < authsize));
}
static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
@@ -717,19 +608,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
+ gfp_t flags = cc_gfp_flags(&req->base);
int rc = 0;
if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
+ areq_ctx->gen_ctx.iv = NULL;
goto chain_iv_exit;
}
- areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
- hw_iv_size,
- DMA_BIDIRECTIONAL);
+ areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+ if (!areq_ctx->gen_ctx.iv)
+ return -ENOMEM;
+
+ areq_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
+ kzfree(areq_ctx->gen_ctx.iv);
+ areq_ctx->gen_ctx.iv = NULL;
rc = -ENOMEM;
goto chain_iv_exit;
}
@@ -760,11 +659,9 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = 0;
- u32 mapped_nents = 0;
- struct scatterlist *current_sg = req->src;
+ int mapped_nents = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int sg_index = 0;
- u32 size_of_assoc = req->assoclen;
+ unsigned int size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
@@ -775,7 +672,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- if (req->assoclen == 0) {
+ if (areq_ctx->assoclen == 0) {
areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
@@ -785,26 +682,10 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- //iterate over the sgl to see how many entries are for associated data
- //it is assumed that if we reach here , the sgl is already mapped
- sg_index = current_sg->length;
- //the first entry in the scatter list contains all the associated data
- if (sg_index > size_of_assoc) {
- mapped_nents++;
- } else {
- while (sg_index <= size_of_assoc) {
- current_sg = sg_next(current_sg);
- /* if have reached the end of the sgl, then this is
- * unexpected
- */
- if (!current_sg) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
- sg_index += current_sg->length;
- mapped_nents++;
- }
- }
+ mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
+ if (mapped_nents < 0)
+ return mapped_nents;
+
if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -835,7 +716,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
- req->assoclen, 0, is_last,
+ areq_ctx->assoclen, 0, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
@@ -850,39 +731,32 @@ static void cc_prepare_aead_data_dlli(struct aead_request *req,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
+ struct scatterlist *sg;
+ ssize_t offset;
areq_ctx->is_icv_fragmented = false;
- if (req->src == req->dst) {
- /*INPLACE*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
- /*NON-INPLACE and DECRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
+
+ if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ sg = areq_ctx->src_sgl;
+ offset = *src_last_bytes - authsize;
} else {
- /*NON-INPLACE and ENCRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
+ sg = areq_ctx->dst_sgl;
+ offset = *dst_last_bytes - authsize;
}
+
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
+ areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
}
-static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- u32 *src_last_bytes, u32 *dst_last_bytes,
- bool is_last_table)
+static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
- int rc = 0, icv_nents;
struct device *dev = drvdata_to_dev(drvdata);
struct scatterlist *sg;
@@ -893,14 +767,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
if (areq_ctx->is_icv_fragmented) {
/* Backup happens only when ICV is fragmented, ICV
@@ -942,16 +811,11 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->dst_offset, is_last_table,
&areq_ctx->dst.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
-
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
/* Backup happens only when ICV is fragmented, ICV
+
* verification is made by CPU compare in order to simplify
* MAC verification upon request completion
*/
@@ -979,14 +843,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
- areq_ctx->dst.nents,
- authsize, *dst_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->dst.nents, authsize,
+ *dst_last_bytes);
if (!areq_ctx->is_icv_fragmented) {
sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
@@ -1000,9 +859,6 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
}
}
-
-prepare_data_mlli_exit:
- return rc;
}
static int cc_aead_chain_data(struct cc_drvdata *drvdata,
@@ -1019,12 +875,12 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
/* non-inplace mode */
- unsigned int size_for_map = req->assoclen + req->cryptlen;
+ unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
- bool chained = false;
bool is_gcm4543 = areq_ctx->is_gcm4543;
- u32 size_to_skip = req->assoclen;
+ u32 size_to_skip = areq_ctx->assoclen;
+ struct scatterlist *sgl;
if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
@@ -1043,19 +899,17 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
- &src_last_bytes, &chained);
+ &src_last_bytes);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
+ src_mapped_nents--;
offset -= areq_ctx->src_sgl->length;
- areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->src_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
+ sgl = sg_next(areq_ctx->src_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->src_sgl = sgl;
sg_index += areq_ctx->src_sgl->length;
- src_mapped_nents--;
}
if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1068,7 +922,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_offset = offset;
if (req->src != req->dst) {
- size_for_map = req->assoclen + req->cryptlen;
+ size_for_map = areq_ctx->assoclen + req->cryptlen;
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
if (is_gcm4543)
@@ -1083,21 +937,19 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
- &dst_last_bytes, &chained);
+ &dst_last_bytes);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
//check where the data starts
while (sg_index <= size_to_skip) {
+ dst_mapped_nents--;
offset -= areq_ctx->dst_sgl->length;
- areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->dst_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
+ sgl = sg_next(areq_ctx->dst_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->dst_sgl = sgl;
sg_index += areq_ctx->dst_sgl->length;
- dst_mapped_nents--;
}
if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1110,9 +962,9 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
dst_mapped_nents > 1 ||
do_chain) {
areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
- rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
- &src_last_bytes,
- &dst_last_bytes, is_last_table);
+ cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes, &dst_last_bytes,
+ is_last_table);
} else {
areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
cc_prepare_aead_data_dlli(req, &src_last_bytes,
@@ -1234,7 +1086,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->ccm_iv0_dma_addr = dma_addr;
rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
- &sg_data, req->assoclen);
+ &sg_data, areq_ctx->assoclen);
if (rc)
goto aead_map_failure;
}
@@ -1285,7 +1137,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
- size_to_map = req->cryptlen + req->assoclen;
+ size_to_map = req->cryptlen + areq_ctx->assoclen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_map += authsize;
@@ -1483,8 +1335,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
- areq_ctx->in_nents =
- cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+ areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 3ec4b4db5247..a726016bdbc1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_buffer_mgr.h
* Buffer Manager
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d9c17078517b..5b58226ea24d 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -34,6 +34,18 @@ struct cc_hw_key_info {
enum cc_hw_crypto_key key2_slot;
};
+struct cc_cpp_key_info {
+ u8 slot;
+ enum cc_cpp_alg alg;
+};
+
+enum cc_key_type {
+ CC_UNPROTECTED_KEY, /* User key */
+ CC_HW_PROTECTED_KEY, /* HW (FDE) key */
+ CC_POLICY_PROTECTED_KEY, /* CPP key */
+ CC_INVALID_PROTECTED_KEY /* Invalid key */
+};
+
struct cc_cipher_ctx {
struct cc_drvdata *drvdata;
int keylen;
@@ -41,19 +53,22 @@ struct cc_cipher_ctx {
int cipher_mode;
int flow_mode;
unsigned int flags;
- bool hw_key;
+ enum cc_key_type key_type;
struct cc_user_key_info user;
- struct cc_hw_key_info hw;
+ union {
+ struct cc_hw_key_info hw;
+ struct cc_cpp_key_info cpp;
+ };
struct crypto_shash *shash_tfm;
};
static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
-static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- return ctx_p->hw_key;
+ return ctx_p->key_type;
}
static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
@@ -232,7 +247,7 @@ struct tdes_keys {
u8 key3[DES_KEY_SIZE];
};
-static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
+static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
{
switch (slot_num) {
case 0:
@@ -247,6 +262,22 @@ static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
return END_OF_KEYS;
}
+static u8 cc_slot_to_cpp_key(u8 slot_num)
+{
+ return (slot_num - CC_FIRST_CPP_KEY_SLOT);
+}
+
+static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num)
+{
+ if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT)
+ return CC_HW_PROTECTED_KEY;
+ else if (slot_num >= CC_FIRST_CPP_KEY_SLOT &&
+ slot_num <= CC_LAST_CPP_KEY_SLOT)
+ return CC_POLICY_PROTECTED_KEY;
+ else
+ return CC_INVALID_PROTECTED_KEY;
+}
+
static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
unsigned int keylen)
{
@@ -261,18 +292,13 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
- /* This check the size of the hardware key token */
+ /* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
- dev_err(dev, "Unsupported HW key size %d.\n", keylen);
+ dev_err(dev, "Unsupported protected key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- if (ctx_p->flow_mode != S_DIN_to_AES) {
- dev_err(dev, "HW key not supported for non-AES flows\n");
- return -EINVAL;
- }
-
memcpy(&hki, key, keylen);
/* The real key len for crypto op is the size of the HW key
@@ -286,31 +312,70 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
return -EINVAL;
}
- ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
- if (ctx_p->hw.key1_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
- return -EINVAL;
- }
+ ctx_p->keylen = keylen;
- if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
- if (hki.hw_key1 == hki.hw_key2) {
- dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
- hki.hw_key1, hki.hw_key2);
+ switch (cc_slot_to_key_type(hki.hw_key1)) {
+ case CC_HW_PROTECTED_KEY:
+ if (ctx_p->flow_mode == S_DIN_to_SM4) {
+ dev_err(dev, "Only AES HW protected keys are supported\n");
return -EINVAL;
}
- ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
- if (ctx_p->hw.key2_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key2 number (%d)\n",
- hki.hw_key2);
+
+ ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki.hw_key1);
return -EINVAL;
}
- }
- ctx_p->keylen = keylen;
- ctx_p->hw_key = true;
- dev_dbg(dev, "cc_is_hw_key ret 0");
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki.hw_key1 == hki.hw_key2) {
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki.hw_key1, hki.hw_key2);
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki.hw_key2);
+ return -EINVAL;
+ }
+ }
+
+ ctx_p->key_type = CC_HW_PROTECTED_KEY;
+ dev_dbg(dev, "HW protected key %d/%d set\n.",
+ ctx_p->hw.key1_slot, ctx_p->hw.key2_slot);
+ break;
+
+ case CC_POLICY_PROTECTED_KEY:
+ if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) {
+ dev_err(dev, "CPP keys not supported in this hardware revision.\n");
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode != DRV_CIPHER_CBC &&
+ ctx_p->cipher_mode != DRV_CIPHER_CTR) {
+ dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n");
+ return -EINVAL;
+ }
+
+ ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1);
+ if (ctx_p->flow_mode == S_DIN_to_AES)
+ ctx_p->cpp.alg = CC_CPP_AES;
+ else /* Must be SM4 since due to sethkey registration */
+ ctx_p->cpp.alg = CC_CPP_SM4;
+ ctx_p->key_type = CC_POLICY_PROTECTED_KEY;
+ dev_dbg(dev, "policy protected key alg: %d slot: %d.\n",
+ ctx_p->cpp.alg, ctx_p->cpp.slot);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1);
+ return -EINVAL;
+ }
return 0;
}
@@ -321,7 +386,6 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- u32 tmp[DES3_EDE_EXPKEY_WORDS];
struct cc_crypto_alg *cc_alg =
container_of(tfm->__crt_alg, struct cc_crypto_alg,
skcipher_alg.base);
@@ -339,7 +403,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
return -EINVAL;
}
- ctx_p->hw_key = false;
+ ctx_p->key_type = CC_UNPROTECTED_KEY;
/*
* Verify DES weak keys
@@ -347,6 +411,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
* HW does the expansion on its own.
*/
if (ctx_p->flow_mode == S_DIN_to_DES) {
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
if (keylen == DES3_EDE_KEY_SIZE &&
__des3_ede_setkey(tmp, &tfm->crt_flags, key,
DES3_EDE_KEY_SIZE)) {
@@ -399,7 +464,77 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
return 0;
}
-static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ return S_AES_to_DOUT;
+ case S_DIN_to_DES:
+ return S_DES_to_DOUT;
+ case S_DIN_to_SM4:
+ return S_SM4_to_DOUT;
+ default:
+ return ctx_p->flow_mode;
+ }
+}
+
+static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = cc_out_setup_mode(ctx_p);
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
+ return;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Read next IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ } else {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
+ }
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
struct cc_hw_desc desc[],
@@ -423,11 +558,13 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
du_size = cc_alg->data_unit;
switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
case DRV_CIPHER_CBC:
case DRV_CIPHER_CBC_CTS:
case DRV_CIPHER_CTR:
case DRV_CIPHER_OFB:
- /* Load cipher state */
+ /* Load IV */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
NS_BIT);
@@ -441,57 +578,15 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
}
(*seq_size)++;
- /*FALLTHROUGH*/
- case DRV_CIPHER_ECB:
- /* Load key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (flow_mode == S_DIN_to_AES) {
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key1_slot);
- } else {
- set_din_type(&desc[*seq_size], DMA_DLLI,
- key_dma_addr, ((key_len == 24) ?
- AES_MAX_KEY_SIZE :
- key_len), NS_BIT);
- }
- set_key_size_aes(&desc[*seq_size], key_len);
- } else {
- /*des*/
- set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
- key_len, NS_BIT);
- set_key_size_des(&desc[*seq_size], key_len);
- }
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
- /* Load AES key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key1_slot);
- } else {
- set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
- (key_len / 2), NS_BIT);
- }
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
-
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key2_slot);
} else {
@@ -505,7 +600,7 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
(*seq_size)++;
- /* Set state */
+ /* Load IV */
hw_desc_init(&desc[*seq_size]);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -521,48 +616,113 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
}
}
-static void cc_setup_cipher_data(struct crypto_tfm *tfm,
- struct cipher_req_ctx *req_ctx,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- void *areq, struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
{
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- unsigned int flow_mode = ctx_p->flow_mode;
-
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
- flow_mode = DIN_AES_DOUT;
- break;
+ return DIN_AES_DOUT;
case S_DIN_to_DES:
- flow_mode = DIN_DES_DOUT;
- break;
+ return DIN_DES_DOUT;
case S_DIN_to_SM4:
- flow_mode = DIN_SM4_DOUT;
- break;
+ return DIN_SM4_DOUT;
default:
- dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
- return;
+ return ctx_p->flow_mode;
}
- /* Process */
- if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
- dev_dbg(dev, " data params addr %pad length 0x%X\n",
- &sg_dma_address(src), nbytes);
- dev_dbg(dev, " data params addr %pad length 0x%X\n",
- &sg_dma_address(dst), nbytes);
+}
+
+static void cc_setup_key_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ unsigned int din_size;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ case DRV_CIPHER_ECB:
+ /* Load key */
hw_desc_init(&desc[*seq_size]);
- set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
- nbytes, NS_BIT);
- set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
- nbytes, NS_BIT, (!areq ? 0 : 1));
- if (areq)
- set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
+ /* We use the AES key size coding for all CPP algs */
+ set_key_size_aes(&desc[*seq_size], key_len);
+ set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot);
+ flow_mode = cc_out_flow_mode(ctx_p);
+ } else {
+ if (flow_mode == S_DIN_to_AES) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ /* CC_POLICY_UNPROTECTED_KEY
+ * Invalid keys are filtered out in
+ * sethkey()
+ */
+ din_size = (key_len == 24) ?
+ AES_MAX_KEY_SIZE : key_len;
+
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, din_size,
+ NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], key_len);
+ } else {
+ /*des*/
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, key_len, NS_BIT);
+ set_key_size_des(&desc[*seq_size], key_len);
+ }
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ }
set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
- } else {
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* Load AES key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ (key_len / 2), NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, void *areq,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
/* bypass */
dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
&req_ctx->mlli_params.mlli_dma_addr,
@@ -577,7 +737,38 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
req_ctx->mlli_params.mlli_len);
set_flow_mode(&desc[*seq_size], BYPASS);
(*seq_size)++;
+ }
+}
+
+static void cc_setup_flow_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ unsigned int flow_mode = cc_out_flow_mode(ctx_p);
+ bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
+ ctx_p->cipher_mode == DRV_CIPHER_ECB);
+ /* Process */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(src), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(dst), nbytes);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+ nbytes, NS_BIT);
+ set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+ nbytes, NS_BIT, (!last_desc ? 0 : 1));
+ if (last_desc)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ } else {
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_MLLI,
ctx_p->drvdata->mlli_sram_addr,
@@ -589,7 +780,7 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
set_dout_mlli(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
} else {
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
@@ -600,9 +791,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
(LLI_ENTRY_BYTE_SIZE *
req_ctx->in_mlli_nents)),
req_ctx->out_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
}
- if (areq)
+ if (last_desc)
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
set_flow_mode(&desc[*seq_size], flow_mode);
@@ -610,38 +801,6 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
}
}
-/*
- * Update a CTR-AES 128 bit counter
- */
-static void cc_update_ctr(u8 *ctr, unsigned int increment)
-{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
- IS_ALIGNED((unsigned long)ctr, 8)) {
-
- __be64 *high_be = (__be64 *)ctr;
- __be64 *low_be = high_be + 1;
- u64 orig_low = __be64_to_cpu(*low_be);
- u64 new_low = orig_low + (u64)increment;
-
- *low_be = __cpu_to_be64(new_low);
-
- if (new_low < orig_low)
- *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
- } else {
- u8 *pos = (ctr + AES_BLOCK_SIZE);
- u8 val;
- unsigned int size;
-
- for (; increment; increment--)
- for (size = AES_BLOCK_SIZE; size; size--) {
- val = *--pos + 1;
- *pos = val;
- if (val)
- break;
- }
- }
-}
-
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
@@ -649,44 +808,15 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- unsigned int len;
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
-
- switch (ctx_p->cipher_mode) {
- case DRV_CIPHER_CBC:
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req->iv, req->dst, len,
- ivsize, 0);
- }
- break;
-
- case DRV_CIPHER_CTR:
- /* Compute the counter of the last block */
- len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
- cc_update_ctr((u8 *)req->iv, len);
- break;
-
- default:
- break;
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ memcpy(req->iv, req_ctx->iv, ivsize);
+ kzfree(req_ctx->iv);
}
- kzfree(req_ctx->iv);
-
skcipher_request_complete(req, err);
}
@@ -741,6 +871,13 @@ static int cc_cipher_process(struct skcipher_request *req,
cc_req.user_cb = (void *)cc_cipher_complete;
cc_req.user_arg = (void *)req;
+ /* Setup CPP operation details */
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
+ cc_req.cpp.is_cpp = true;
+ cc_req.cpp.alg = ctx_p->cpp.alg;
+ cc_req.cpp.slot = ctx_p->cpp.slot;
+ }
+
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
@@ -755,11 +892,16 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_2: Create sequence */
- /* Setup processing */
- cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup IV and XEX key used */
+ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup MLLI line, if needed */
+ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
+ /* Setup key */
+ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
/* Data processing */
- cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
- &seq_len);
+ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
+ /* Read next IV */
+ cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
/* STAT_PHASE_3: Lock HW and push sequence */
@@ -774,7 +916,6 @@ static int cc_cipher_process(struct skcipher_request *req,
exit_process:
if (rc != -EINPROGRESS && rc != -EBUSY) {
- kzfree(req_ctx->backup_info);
kzfree(req_ctx->iv);
}
@@ -792,31 +933,10 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
static int cc_cipher_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- gfp_t flags = cc_gfp_flags(&req->base);
- unsigned int len;
memset(req_ctx, 0, sizeof(*req_ctx));
- if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
- (req->cryptlen >= ivsize)) {
-
- /* Allocate and save the last IV sized bytes of the source,
- * which will be lost in case of in-place decryption.
- */
- req_ctx->backup_info = kzalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
-
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
- ivsize, 0);
- }
-
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
@@ -838,6 +958,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts512(paes)",
@@ -856,6 +977,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts4096(paes)",
@@ -874,6 +996,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv(paes)",
@@ -891,6 +1014,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv512(paes)",
@@ -909,6 +1033,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv4096(paes)",
@@ -927,6 +1052,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker(paes)",
@@ -944,6 +1070,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker512(paes)",
@@ -962,6 +1089,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker4096(paes)",
@@ -980,6 +1108,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ecb(paes)",
@@ -997,6 +1126,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cbc(paes)",
@@ -1014,6 +1144,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ofb(paes)",
@@ -1031,6 +1162,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cts(cbc(paes))",
@@ -1048,6 +1180,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ctr(paes)",
@@ -1065,6 +1198,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts(aes)",
@@ -1429,6 +1563,42 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_713,
.std_body = CC_STD_OSCCA,
},
+ {
+ .name = "cbc(psm4)",
+ .driver_name = "cbc-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
+ {
+ .name = "ctr(psm4)",
+ .driver_name = "ctr-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
};
static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
@@ -1504,7 +1674,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
ARRAY_SIZE(skcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
- !(drvdata->std_bodies & skcipher_algs[alg].std_body))
+ !(drvdata->std_bodies & skcipher_algs[alg].std_body) ||
+ (drvdata->sec_disabled && skcipher_algs[alg].sec_func))
continue;
dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 4dbc0a1e6d5c..da3a38707fae 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_cipher.h
* ARM CryptoCell Cipher Crypto API
@@ -20,7 +20,6 @@ struct cipher_req_ctx {
u32 in_mlli_nents;
u32 out_nents;
u32 out_mlli_nents;
- u8 *backup_info; /*store iv for generated IV flow*/
u8 *iv;
struct mlli_params mlli_params;
};
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index c8dac273c563..ccf960a0d989 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_CRYPTO_CTX_H_
#define _CC_CRYPTO_CTX_H_
@@ -55,6 +55,14 @@
#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+#define CC_CPP_NUM_SLOTS 8
+#define CC_CPP_NUM_ALGS 2
+
+enum cc_cpp_alg {
+ CC_CPP_SM4 = 1,
+ CC_CPP_AES = 0
+};
+
enum drv_engine_type {
DRV_ENGINE_NULL = 0,
DRV_ENGINE_AES = 1,
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5fa05a7bcf36..566999738698 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/debugfs.h>
@@ -25,9 +25,24 @@ struct cc_debugfs_ctx {
*/
static struct dentry *cc_debugfs_dir;
-static struct debugfs_reg32 debug_regs[] = {
+static struct debugfs_reg32 ver_sig_regs[] = {
{ .name = "SIGNATURE" }, /* Must be 0th */
{ .name = "VERSION" }, /* Must be 1st */
+};
+
+static struct debugfs_reg32 pid_cid_regs[] = {
+ CC_DEBUG_REG(PERIPHERAL_ID_0),
+ CC_DEBUG_REG(PERIPHERAL_ID_1),
+ CC_DEBUG_REG(PERIPHERAL_ID_2),
+ CC_DEBUG_REG(PERIPHERAL_ID_3),
+ CC_DEBUG_REG(PERIPHERAL_ID_4),
+ CC_DEBUG_REG(COMPONENT_ID_0),
+ CC_DEBUG_REG(COMPONENT_ID_1),
+ CC_DEBUG_REG(COMPONENT_ID_2),
+ CC_DEBUG_REG(COMPONENT_ID_3),
+};
+
+static struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),
@@ -53,10 +68,7 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
struct cc_debugfs_ctx *ctx;
- struct debugfs_regset32 *regset;
-
- debug_regs[0].offset = drvdata->sig_offset;
- debug_regs[1].offset = drvdata->ver_offset;
+ struct debugfs_regset32 *regset, *verset;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -75,8 +87,26 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
debugfs_create_regset32("regs", 0400, ctx->dir, regset);
debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
- drvdata->debugfs = ctx;
+ verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
+ /* Failing here is not important enough to fail the module load */
+ if (!verset)
+ goto out;
+
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ ver_sig_regs[0].offset = drvdata->sig_offset;
+ ver_sig_regs[1].offset = drvdata->ver_offset;
+ verset->regs = ver_sig_regs;
+ verset->nregs = ARRAY_SIZE(ver_sig_regs);
+ } else {
+ verset->regs = pid_cid_regs;
+ verset->nregs = ARRAY_SIZE(pid_cid_regs);
+ }
+ verset->base = drvdata->cc_base;
+ debugfs_create_regset32("version", 0400, ctx->dir, verset);
+
+out:
+ drvdata->debugfs = ctx;
return 0;
}
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 01cbd9a95659..664ff402e0e9 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_DEBUGFS_H__
#define __CC_DEBUGFS_H__
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 3bcc6c76e090..86ac7b443355 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -30,27 +30,47 @@
bool cc_dump_desc;
module_param_named(dump_desc, cc_dump_desc, bool, 0600);
MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
-
bool cc_dump_bytes;
module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+static bool cc_sec_disable;
+module_param_named(sec_disable, cc_sec_disable, bool, 0600);
+MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
+
struct cc_hw_data {
char *name;
enum cc_hw_rev rev;
u32 sig;
+ u32 cidr_0123;
+ u32 pidr_0124;
int std_bodies;
};
+#define CC_NUM_IDRS 4
+
+/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
+static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
+ CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
+ CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
+};
+
+static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
+ CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
+ CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
+};
+
/* Hardware revisions defs. */
/* The 703 is a OSCCA only variant of the 713 */
static const struct cc_hw_data cc703_hw = {
- .name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA
+ .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
};
static const struct cc_hw_data cc713_hw = {
- .name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL
+ .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc712_hw = {
@@ -78,6 +98,20 @@ static const struct of_device_id arm_ccree_dev_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
+static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
+{
+ int i;
+ union {
+ u8 regs[CC_NUM_IDRS];
+ __le32 val;
+ } idr;
+
+ for (i = 0; i < CC_NUM_IDRS; ++i)
+ idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
+
+ return le32_to_cpu(idr.val);
+}
+
void __dump_byte_array(const char *name, const u8 *buf, size_t len)
{
char prefix[64];
@@ -114,12 +148,12 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr;
/* Completion interrupt - most probable */
- if (irr & CC_COMP_IRQ_MASK) {
- /* Mask AXI completion interrupt - will be unmasked in
- * Deferred service handler
+ if (irr & drvdata->comp_mask) {
+ /* Mask all completion interrupts - will be unmasked in
+ * deferred service handler
*/
- cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
- irr &= ~CC_COMP_IRQ_MASK;
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
+ irr &= ~drvdata->comp_mask;
complete_request(drvdata);
}
#ifdef CONFIG_CRYPTO_FIPS
@@ -159,11 +193,14 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
unsigned int val, cache_params;
struct device *dev = drvdata_to_dev(drvdata);
- /* Unmask all AXI interrupt sources AXI_CFG1 register */
- val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
- cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
- dev_dbg(dev, "AXIM_CFG=0x%08X\n",
- cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ /* Unmask all AXI interrupt sources AXI_CFG1 register */
+ /* AXI interrupt config are obsoleted startign at cc7x3 */
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ }
/* Clear all pending interrupts */
val = cc_ioread(drvdata, CC_REG(HOST_IRR));
@@ -171,7 +208,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
/* Unmask relevant interrupt cause */
- val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
+ val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
if (drvdata->hw_rev >= CC_HW_REV_712)
val |= CC_GPR0_IRQ_MASK;
@@ -201,7 +238,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
struct cc_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
- u32 signature_val;
+ u32 val, hw_rev_pidr, sig_cidr;
u64 dma_mask;
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
@@ -231,6 +268,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
}
+ new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
+
platform_set_drvdata(plat_dev, new_drvdata);
new_drvdata->plat_dev = plat_dev;
@@ -311,22 +350,57 @@ static int init_cc_resources(struct platform_device *plat_dev)
return rc;
}
+ new_drvdata->sec_disabled = cc_sec_disable;
+
if (hw_rev->rev <= CC_HW_REV_712) {
/* Verify correct mapping */
- signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
- if (signature_val != hw_rev->sig) {
+ val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+ if (val != hw_rev->sig) {
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, hw_rev->sig);
+ val, hw_rev->sig);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ sig_cidr = val;
+ hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
+ } else {
+ /* Verify correct mapping */
+ val = cc_read_idr(new_drvdata, pidr_0124_offsets);
+ if (val != hw_rev->pidr_0124) {
+ dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
+ val, hw_rev->pidr_0124);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ hw_rev_pidr = val;
+
+ val = cc_read_idr(new_drvdata, cidr_0123_offsets);
+ if (val != hw_rev->cidr_0123) {
+ dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
+ val, hw_rev->cidr_0123);
rc = -EINVAL;
goto post_clk_err;
}
- dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
+ sig_cidr = val;
+
+ /* Check security disable state */
+ val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
+ val &= CC_SECURITY_DISABLED_MASK;
+ new_drvdata->sec_disabled |= !!val;
+
+ if (!new_drvdata->sec_disabled) {
+ new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
+ if (new_drvdata->std_bodies & CC_STD_NIST)
+ new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
+ }
}
+ if (new_drvdata->sec_disabled)
+ dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
+
/* Display HW versions */
- dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
- hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
- DRV_MODULE_VERSION);
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
+ hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 33dbf3e6d15d..b76181335c08 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_driver.h
* ARM CryptoCell Linux Crypto Driver
@@ -65,10 +65,32 @@ enum cc_std_body {
#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
+
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+#define CC_CPP_AES_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
+
+#define CC_CPP_SM4_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
+
/* Register name mangling macro */
#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
@@ -81,7 +103,6 @@ enum cc_std_body {
#define MAX_REQUEST_QUEUE_SIZE 4096
#define MAX_MLLI_BUFF_SIZE 2080
-#define MAX_ICV_NENTS_SUPPORTED 2
/* Definitions for HW descriptors DIN/DOUT fields */
#define NS_BIT 1
@@ -90,6 +111,12 @@ enum cc_std_body {
* field in the HW descriptor. The DMA engine +8 that value.
*/
+struct cc_cpp_req {
+ bool is_cpp;
+ enum cc_cpp_alg alg;
+ u8 slot;
+};
+
#define CC_MAX_IVGEN_DMA_ADDRESSES 3
struct cc_crypto_req {
void (*user_cb)(struct device *dev, void *req, int err);
@@ -104,6 +131,7 @@ struct cc_crypto_req {
/* The generated IV size required, 8/16 B allowed. */
unsigned int ivgen_size;
struct completion seq_compl; /* request completion */
+ struct cc_cpp_req cpp;
};
/**
@@ -136,6 +164,8 @@ struct cc_drvdata {
u32 sig_offset;
u32 ver_offset;
int std_bodies;
+ bool sec_disabled;
+ u32 comp_mask;
};
struct cc_crypto_alg {
@@ -162,12 +192,14 @@ struct cc_alg_template {
int auth_mode;
u32 min_hw_rev;
enum cc_std_body std_body;
+ bool sec_func;
unsigned int data_unit;
struct cc_drvdata *drvdata;
};
struct async_gen_req_ctx {
dma_addr_t iv_dma_addr;
+ u8 *iv;
enum drv_crypto_direction op_type;
};
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index b4d0a6d983e0..5ad3ffb7acaa 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/fips.h>
@@ -49,8 +49,6 @@ void cc_fips_fini(struct cc_drvdata *drvdata)
/* Kill tasklet */
tasklet_kill(&fips_h->tasklet);
-
- kfree(fips_h);
drvdata->fips_handle = NULL;
}
@@ -72,20 +70,28 @@ static inline void tee_fips_error(struct device *dev)
dev_err(dev, "TEE reported error!\n");
}
+/*
+ * This function check if cryptocell tee fips error occurred
+ * and in such case triggers system error
+ */
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
+{
+ struct device *dev = drvdata_to_dev(p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error(dev);
+}
+
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
- struct device *dev = drvdata_to_dev(drvdata);
- u32 irq, state, val;
+ u32 irq, val;
irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
if (irq) {
- state = cc_ioread(drvdata, CC_REG(GPR_HOST));
-
- if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(drvdata);
}
/* after verifing that there is nothing to do,
@@ -104,7 +110,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
if (p_drvdata->hw_rev < CC_HW_REV_712)
return 0;
- fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+ fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL);
if (!fips_h)
return -ENOMEM;
@@ -113,8 +119,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
dev_dbg(dev, "Initializing fips tasklet\n");
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
- if (!cc_get_tee_fips_status(p_drvdata))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(p_drvdata);
return 0;
}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
index 645e096a7a82..fc33eeb4d566 100644
--- a/drivers/crypto/ccree/cc_fips.h
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_FIPS_H__
#define __CC_FIPS_H__
@@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
void cc_fips_fini(struct cc_drvdata *drvdata);
void fips_handler(struct cc_drvdata *drvdata);
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
#else /* CONFIG_CRYPTO_FIPS */
@@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
bool ok) {}
static inline void fips_handler(struct cc_drvdata *drvdata) {}
+static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 2c4ddc8fb76b..a6abe4e3bb0e 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -69,6 +69,7 @@ struct cc_hash_alg {
struct hash_key_req_ctx {
u32 keylen;
dma_addr_t key_dma_addr;
+ u8 *key;
};
/* hash per-session context */
@@ -280,9 +281,13 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static void cc_digest_complete(struct device *dev, void *cc_req, int err)
@@ -295,10 +300,14 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static void cc_hash_complete(struct device *dev, void *cc_req, int err)
@@ -311,10 +320,14 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
@@ -730,13 +743,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true;
+ ctx->key_params.key = NULL;
if (keylen) {
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
- key, keylen);
+ ctx->key_params.key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -887,6 +907,9 @@ out:
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
}
+
+ kzfree(ctx->key_params.key);
+
return rc;
}
@@ -913,11 +936,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen;
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -969,6 +997,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+ kzfree(ctx->key_params.key);
+
return rc;
}
@@ -1621,7 +1651,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA224,
@@ -1648,7 +1678,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA384,
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 2e5bf8b0bbb6..0d6dc61484d7 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_hash.h
* ARM CryptoCell Hash Crypto API
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index 616b2e1c41ba..d0764147573f 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#ifndef __CC_HOST_H__
#define __CC_HOST_H__
@@ -7,33 +7,102 @@
// --------------------------------------
// BLOCK: HOST_P
// --------------------------------------
+
+
+/* IRR */
#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT 0x3UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT 0x4UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT 0x5UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT 0x6UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT 0x7UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT 0x9UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT 0xCUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT 0xDUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT 0xEUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT 0xFUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT 0x10UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT 0x11UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT 0x12UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT 0x14UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
-#define CC_HOST_IMR_REG_OFFSET 0xA04UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+
+/* IMR */
+#define CC_HOST_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT 0x3UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT 0x4UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT 0x5UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT 0x6UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT 0x7UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT 0x9UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT 0xAUL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT 0xCUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT 0xDUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT 0xEUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT 0xFUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT 0x10UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT 0x11UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT 0x12UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT 0x14UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+
+/* ICR */
#define CC_HOST_ICR_REG_OFFSET 0xA08UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
@@ -45,6 +114,9 @@
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
@@ -132,6 +204,49 @@
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
// --------------------------------------
+// BLOCK: ID_REGISTERS
+// --------------------------------------
+#define CC_PERIPHERAL_ID_4_REG_OFFSET 0x0FD0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SIZE 0x4UL
+#define CC_PIDRESERVED0_REG_OFFSET 0x0FD4UL
+#define CC_PIDRESERVED1_REG_OFFSET 0x0FD8UL
+#define CC_PIDRESERVED2_REG_OFFSET 0x0FDCUL
+#define CC_PERIPHERAL_ID_0_REG_OFFSET 0x0FE0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_PERIPHERAL_ID_1_REG_OFFSET 0x0FE4UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_2_REG_OFFSET 0x0FE8UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SIZE 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SHIFT 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SIZE 0x1UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REG_OFFSET 0x0FECUL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_0_REG_OFFSET 0x0FF0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_1_REG_OFFSET 0x0FF4UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SHIFT 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_2_REG_OFFSET 0x0FF8UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_3_REG_OFFSET 0x0FFCUL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SIZE 0x8UL
+// --------------------------------------
// BLOCK: HOST_SRAM
// --------------------------------------
#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 7a9b90db7db7..9f4db9956e91 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_HW_QUEUE_DEFS_H__
#define __CC_HW_QUEUE_DEFS_H__
@@ -28,11 +28,13 @@
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
#define WORD0_VALUE CC_GENMASK(0, VALUE)
+#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
+#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
#define WORD2_VALUE CC_GENMASK(2, VALUE)
#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
@@ -176,6 +178,15 @@ enum cc_hw_crypto_key {
END_OF_KEYS = S32_MAX,
};
+#define CC_NUM_HW_KEY_SLOTS 4
+#define CC_FIRST_HW_KEY_SLOT 0
+#define CC_LAST_HW_KEY_SLOT (CC_FIRST_HW_KEY_SLOT + CC_NUM_HW_KEY_SLOTS - 1)
+
+#define CC_NUM_CPP_KEY_SLOTS 8
+#define CC_FIRST_CPP_KEY_SLOT 16
+#define CC_LAST_CPP_KEY_SLOT (CC_FIRST_CPP_KEY_SLOT + \
+ CC_NUM_CPP_KEY_SLOTS - 1)
+
enum cc_hw_aes_key_size {
AES_128_KEY = 0,
AES_192_KEY = 1,
@@ -189,6 +200,9 @@ enum cc_hash_cipher_pad {
HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
};
+#define CC_CPP_DIN_ADDR 0xFF00FF00UL
+#define CC_CPP_DIN_SIZE 0xFF00FFUL
+
/*****************************/
/* Descriptor packing macros */
/*****************************/
@@ -249,6 +263,25 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
}
/*
+ * Setup the special CPP descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @alg: cipher used (AES / SM4)
+ * @mode: mode used (CTR or CBC)
+ * @slot: slot number
+ * @ksize: key size
+ */
+static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
+{
+ pdesc->word[0] |= CC_CPP_DIN_ADDR;
+
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DIN_SIZE);
+ pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
+
+ pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
+}
+
+/*
* Set the DIN field of a HW descriptors to SRAM mode.
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
index 769458323394..99dc69383e20 100644
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <crypto/ctr.h>
#include "cc_driver.h"
@@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
}
ivgen_ctx->pool = NULL_SRAM_ADDR;
-
- /* release "this" context */
- kfree(ivgen_ctx);
}
/*!
@@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
int rc;
/* Allocate "this" context */
- ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+ ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
if (!ivgen_ctx)
return -ENOMEM;
+ drvdata->ivgen_handle = ivgen_ctx;
+
/* Allocate pool's header for initial enc. key/IV */
ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
&ivgen_ctx->pool_meta_dma,
@@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
goto out;
}
- drvdata->ivgen_handle = ivgen_ctx;
-
return cc_init_iv_sram(drvdata);
out:
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
index b6ac16903dda..a9f5e8bba4f1 100644
--- a/drivers/crypto/ccree/cc_ivgen.h
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_IVGEN_H__
#define __CC_IVGEN_H__
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
index 8d7262a35156..582bae450596 100644
--- a/drivers/crypto/ccree/cc_kernel_regs.h
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_CRYS_KERNEL_H__
#define __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
index 64b15ac9f1d3..f891ab813f41 100644
--- a/drivers/crypto/ccree/cc_lli_defs.h
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_LLI_DEFS_H_
#define _CC_LLI_DEFS_H_
@@ -14,7 +14,7 @@
#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
-#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 8
#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6ff7e75ad90e..2dad9c9543c6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -11,6 +11,7 @@
#include "cc_ivgen.h"
#include "cc_hash.h"
#include "cc_pm.h"
+#include "cc_fips.h"
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
@@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = cc_suspend_req_queue(drvdata);
if (rc) {
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
return rc;
}
fini_cc_regs(drvdata);
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
return 0;
}
@@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
+ /* Enables the device source clk */
rc = cc_clk_on(drvdata);
if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = init_cc_regs(drvdata, false);
if (rc) {
dev_err(dev, "init_cc_regs (%x)\n", rc);
return rc;
}
+ /* check if tee fips error occurred during power down */
+ cc_tee_handle_fips_error(drvdata);
rc = cc_resume_req_queue(drvdata);
if (rc) {
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 907a6db4d6c0..6190cdba5dad 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_pm.h
*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 83a8aaae61c7..0bc6ccb0b899 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
+#include <linux/nospec.h>
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
#include "cc_request_mgr.h"
@@ -52,11 +53,38 @@ struct cc_bl_item {
bool notif;
};
+static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
+};
+
static void comp_handler(unsigned long devarg);
#ifdef COMP_IN_WQ
static void comp_work_handler(struct work_struct *work);
#endif
+static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
+{
+ alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
+ slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
+
+ return cc_cpp_int_masks[alg][slot];
+}
+
void cc_req_mgr_fini(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
@@ -336,10 +364,12 @@ static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
struct cc_bl_item *bli)
{
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
spin_lock_bh(&mgr->bl_lock);
list_add_tail(&bli->list, &mgr->backlog);
++mgr->bl_len;
+ dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
spin_unlock_bh(&mgr->bl_lock);
tasklet_schedule(&mgr->comptask);
}
@@ -349,7 +379,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
struct cc_bl_item *bli;
struct cc_crypto_req *creq;
- struct crypto_async_request *req;
+ void *req;
bool ivgen;
unsigned int total_len;
struct device *dev = drvdata_to_dev(drvdata);
@@ -359,17 +389,20 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
while (mgr->bl_len) {
bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
+
spin_unlock(&mgr->bl_lock);
+
creq = &bli->creq;
- req = (struct crypto_async_request *)creq->user_arg;
+ req = creq->user_arg;
/*
* Notify the request we're moving out of the backlog
* but only if we haven't done so already.
*/
if (!bli->notif) {
- req->complete(req, -EINPROGRESS);
+ creq->user_cb(dev, req, -EINPROGRESS);
bli->notif = true;
}
@@ -579,6 +612,8 @@ static void proc_completions(struct cc_drvdata *drvdata)
drvdata->request_mgr_handle;
unsigned int *tail = &request_mgr_handle->req_queue_tail;
unsigned int *head = &request_mgr_handle->req_queue_head;
+ int rc;
+ u32 mask;
while (request_mgr_handle->axi_completed) {
request_mgr_handle->axi_completed--;
@@ -596,8 +631,22 @@ static void proc_completions(struct cc_drvdata *drvdata)
cc_req = &request_mgr_handle->req_queue[*tail];
+ if (cc_req->cpp.is_cpp) {
+
+ dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
+ cc_req->cpp.slot, cc_req->cpp.alg);
+ mask = cc_cpp_int_mask(cc_req->cpp.alg,
+ cc_req->cpp.slot);
+ rc = (drvdata->irq & mask ? -EPERM : 0);
+ dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
+ drvdata->irq, rc);
+ } else {
+ dev_dbg(dev, "None CPP request completion\n");
+ rc = 0;
+ }
+
if (cc_req->user_cb)
- cc_req->user_cb(dev, cc_req->user_arg, 0);
+ cc_req->user_cb(dev, cc_req->user_arg, rc);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
dev_dbg(dev, "Request completed. axi_completed=%d\n",
@@ -618,47 +667,50 @@ static void comp_handler(unsigned long devarg)
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
-
+ struct device *dev = drvdata_to_dev(drvdata);
u32 irq;
- irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+ dev_dbg(dev, "Completion handler called!\n");
+ irq = (drvdata->irq & drvdata->comp_mask);
- if (irq & CC_COMP_IRQ_MASK) {
- /* To avoid the interrupt from firing as we unmask it,
- * we clear it now
- */
- cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
- /* Avoid race with above clear: Test completion counter
- * once more
- */
- request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
-
- while (request_mgr_handle->axi_completed) {
- do {
- proc_completions(drvdata);
- /* At this point (after proc_completions()),
- * request_mgr_handle->axi_completed is 0.
- */
- request_mgr_handle->axi_completed =
- cc_axi_comp_count(drvdata);
- } while (request_mgr_handle->axi_completed > 0);
+ /* Avoid race with above clear: Test completion counter once more */
- cc_iowrite(drvdata, CC_REG(HOST_ICR),
- CC_COMP_IRQ_MASK);
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
+
+ dev_dbg(dev, "AXI completion after updated: %d\n",
+ request_mgr_handle->axi_completed);
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
+ irq = (drvdata->irq & drvdata->comp_mask);
+ proc_completions(drvdata);
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
- }
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
+
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
+
/* after verifing that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
- cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
cc_proc_backlog(drvdata);
+ dev_dbg(dev, "Comp. handler done.\n");
}
/*
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index 573cb97af085..f46cf766fe4d 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_request_mgr.h
* Request Manager
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index c8c276f6dee9..62c885e6e791 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include "cc_driver.h"
#include "cc_sram_mgr.h"
@@ -19,8 +19,7 @@ struct cc_sram_ctx {
*/
void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
{
- /* Free "this" context */
- kfree(drvdata->sram_mgr_handle);
+ /* Nothing needed */
}
/**
@@ -48,7 +47,7 @@ int cc_sram_mgr_init(struct cc_drvdata *drvdata)
}
/* Allocate "this" context */
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
index d48649fb3323..1d14de9ee8c3 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.h
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_SRAM_MGR_H__
#define __CC_SRAM_MGR_H__
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 8d8cf80b9294..177f572b9589 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -200,17 +200,10 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
static int chcr_inc_wrcount(struct chcr_dev *dev)
{
- int err = 0;
-
- spin_lock_bh(&dev->lock_chcr_dev);
if (dev->state == CHCR_DETACH)
- err = 1;
- else
- atomic_inc(&dev->inflight);
-
- spin_unlock_bh(&dev->lock_chcr_dev);
-
- return err;
+ return 1;
+ atomic_inc(&dev->inflight);
+ return 0;
}
static inline void chcr_dec_wrcount(struct chcr_dev *dev)
@@ -1101,8 +1094,8 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
- AES_BLOCK_SIZE));
+ ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
+ AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
@@ -2130,7 +2123,6 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
shash->tfm = hmacctx->base_hash;
- shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
@@ -3517,7 +3509,6 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
SHASH_DESC_ON_STACK(shash, base_hash);
shash->tfm = base_hash;
- shash->flags = crypto_shash_get_flags(base_hash);
bs = crypto_shash_blocksize(base_hash);
align = KEYCTX_ALIGN_PAD(max_authsize);
o_ptr = actx->h_iopad + param.result_size + align;
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 239b933d6df6..029a7354f541 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -243,15 +243,11 @@ static void chcr_detach_device(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev = &u_ctx->dev;
- spin_lock_bh(&dev->lock_chcr_dev);
if (dev->state == CHCR_DETACH) {
- spin_unlock_bh(&dev->lock_chcr_dev);
pr_debug("Detached Event received for already detach device\n");
return;
}
dev->state = CHCR_DETACH;
- spin_unlock_bh(&dev->lock_chcr_dev);
-
if (atomic_read(&dev->inflight) != 0) {
schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
wait_for_completion(&dev->detach_comp);
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 2f60049361ef..f429aae72542 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -575,7 +575,8 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
netif_tx_stop_queue(q->txq);
q->q.stops++;
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (!q->dbqt)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr_mid |= FW_ULPTX_WR_DATA_F;
wr->wreq.flowid_len16 = htonl(wr_mid);
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index dad212cabe63..d656be0a142b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1976,6 +1976,29 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
+static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+ struct hifn_device *dev = ctx->dev;
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
+ dev->flags &= ~HIFN_FLAG_OLD_KEY;
+
+ memcpy(ctx->key, key, len);
+ ctx->keysize = len;
+
+ return 0;
+}
+
static int hifn_handle_req(struct ablkcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -2240,7 +2263,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_cfb,
.decrypt = hifn_decrypt_3des_cfb,
},
@@ -2250,7 +2273,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_ofb,
.decrypt = hifn_decrypt_3des_ofb,
},
@@ -2261,7 +2284,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_cbc,
.decrypt = hifn_decrypt_3des_cbc,
},
@@ -2271,7 +2294,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_ecb,
.decrypt = hifn_decrypt_3des_ecb,
},
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index adc0cd8ae97b..02768af0dccd 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -365,20 +365,16 @@ static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
- if (keylen != DES_KEY_SIZE * 3)
- return -EINVAL;
-
- return sec_alg_skcipher_setkey(tfm, key, keylen,
+ return unlikely(des3_verify_key(tfm, key)) ?:
+ sec_alg_skcipher_setkey(tfm, key, keylen,
SEC_C_3DES_ECB_192_3KEY);
}
static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
- if (keylen != DES3_EDE_KEY_SIZE)
- return -EINVAL;
-
- return sec_alg_skcipher_setkey(tfm, key, keylen,
+ return unlikely(des3_verify_key(tfm, key)) ?:
+ sec_alg_skcipher_setkey(tfm, key, keylen,
SEC_C_3DES_CBC_192_3KEY);
}
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 7ef30a98cb24..de4be10b172f 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1039,13 +1039,12 @@ static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ int err;
- if (len != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ err = des3_verify_key(ctfm, key);
+ if (unlikely(err))
+ return err;
/* if context exits and key changed, need to invalidate it */
if (ctx->base.ctxr_dma) {
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 5c4659b04d70..f5414b6dfb55 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -30,8 +30,8 @@
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define MAX_KEYLEN 32
@@ -758,14 +758,6 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
- } else if (cipher_cfg & MOD_3DES) {
- const u32 *K = (const u32 *)key;
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
- {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
- return -EINVAL;
- }
} else {
u32 tmp[DES_EXPKEY_WORDS];
if (des_ekey(tmp, key) == 0) {
@@ -859,6 +851,19 @@ out:
return ret;
}
+static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ u32 flags = crypto_ablkcipher_get_flags(tfm);
+ int err;
+
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err))
+ crypto_ablkcipher_set_flags(tfm, flags);
+
+ return ablk_setkey(tfm, key, key_len);
+}
+
static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
@@ -1175,6 +1180,43 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+ u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.authkeylen > sizeof(ctx->authkey))
+ goto badkey;
+
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(tfm);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err))
+ goto badkey;
+
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+ ctx->authkey_len = keys.authkeylen;
+ ctx->enckey_len = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+ crypto_aead_set_flags(tfm, flags);
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
static int aead_encrypt(struct aead_request *req)
{
return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
@@ -1220,6 +1262,7 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablk_des3_setkey,
}
}
},
@@ -1232,6 +1275,7 @@ static struct ixp_alg ixp4xx_algos[] = {
.cra_u = { .ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = ablk_des3_setkey,
}
}
},
@@ -1313,6 +1357,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = des3_aead_setkey,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1337,6 +1382,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = des3_aead_setkey,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1443,7 +1489,7 @@ static int __init ixp_module_init(void)
/* authenc */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC;
- cra->setkey = aead_setkey;
+ cra->setkey = cra->setkey ?: aead_setkey;
cra->setauthsize = aead_setauthsize;
cra->encrypt = aead_encrypt;
cra->decrypt = aead_decrypt;
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index fb279b3a1ca1..2fd936b19c6d 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -299,13 +299,12 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
- if (len != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ err = des3_verify_key(cipher, key);
+ if (unlikely(err))
+ return err;
memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 99ff54cc8a15..fd456dd703bf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -135,11 +135,10 @@ static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
{
- unsigned int index, padlen;
+ unsigned int padlen;
buf[0] = 0x80;
/* Pad out to 56 mod 64 */
- index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
padlen = mv_cesa_ahash_pad_len(creq);
memset(buf + 1, 0, padlen - 1);
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 5f4f845adbb8..a0806ba40c68 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -365,7 +365,6 @@ static int mtk_sha_finish_hmac(struct ahash_request *req)
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
- shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
return crypto_shash_init(shash) ?:
crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
@@ -810,8 +809,6 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
- shash->flags = crypto_shash_get_flags(bctx->shash) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
deleted file mode 100644
index 519086730791..000000000000
--- a/drivers/crypto/mxc-scc.c
+++ /dev/null
@@ -1,767 +0,0 @@
-/*
- * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
- *
- * The driver is based on information gathered from
- * drivers/mxc/security/mxc_scc.c which can be found in
- * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/clk.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-
-/* Secure Memory (SCM) registers */
-#define SCC_SCM_RED_START 0x0000
-#define SCC_SCM_BLACK_START 0x0004
-#define SCC_SCM_LENGTH 0x0008
-#define SCC_SCM_CTRL 0x000C
-#define SCC_SCM_STATUS 0x0010
-#define SCC_SCM_ERROR_STATUS 0x0014
-#define SCC_SCM_INTR_CTRL 0x0018
-#define SCC_SCM_CFG 0x001C
-#define SCC_SCM_INIT_VECTOR_0 0x0020
-#define SCC_SCM_INIT_VECTOR_1 0x0024
-#define SCC_SCM_RED_MEMORY 0x0400
-#define SCC_SCM_BLACK_MEMORY 0x0800
-
-/* Security Monitor (SMN) Registers */
-#define SCC_SMN_STATUS 0x1000
-#define SCC_SMN_COMMAND 0x1004
-#define SCC_SMN_SEQ_START 0x1008
-#define SCC_SMN_SEQ_END 0x100C
-#define SCC_SMN_SEQ_CHECK 0x1010
-#define SCC_SMN_BIT_COUNT 0x1014
-#define SCC_SMN_BITBANK_INC_SIZE 0x1018
-#define SCC_SMN_BITBANK_DECREMENT 0x101C
-#define SCC_SMN_COMPARE_SIZE 0x1020
-#define SCC_SMN_PLAINTEXT_CHECK 0x1024
-#define SCC_SMN_CIPHERTEXT_CHECK 0x1028
-#define SCC_SMN_TIMER_IV 0x102C
-#define SCC_SMN_TIMER_CONTROL 0x1030
-#define SCC_SMN_DEBUG_DETECT_STAT 0x1034
-#define SCC_SMN_TIMER 0x1038
-
-#define SCC_SCM_CTRL_START_CIPHER BIT(2)
-#define SCC_SCM_CTRL_CBC_MODE BIT(1)
-#define SCC_SCM_CTRL_DECRYPT_MODE BIT(0)
-
-#define SCC_SCM_STATUS_LEN_ERR BIT(12)
-#define SCC_SCM_STATUS_SMN_UNBLOCKED BIT(11)
-#define SCC_SCM_STATUS_CIPHERING_DONE BIT(10)
-#define SCC_SCM_STATUS_ZEROIZING_DONE BIT(9)
-#define SCC_SCM_STATUS_INTR_STATUS BIT(8)
-#define SCC_SCM_STATUS_SEC_KEY BIT(7)
-#define SCC_SCM_STATUS_INTERNAL_ERR BIT(6)
-#define SCC_SCM_STATUS_BAD_SEC_KEY BIT(5)
-#define SCC_SCM_STATUS_ZEROIZE_FAIL BIT(4)
-#define SCC_SCM_STATUS_SMN_BLOCKED BIT(3)
-#define SCC_SCM_STATUS_CIPHERING BIT(2)
-#define SCC_SCM_STATUS_ZEROIZING BIT(1)
-#define SCC_SCM_STATUS_BUSY BIT(0)
-
-#define SCC_SMN_STATUS_STATE_MASK 0x0000001F
-#define SCC_SMN_STATE_START 0x0
-/* The SMN is zeroizing its RAM during reset */
-#define SCC_SMN_STATE_ZEROIZE_RAM 0x5
-/* SMN has passed internal checks */
-#define SCC_SMN_STATE_HEALTH_CHECK 0x6
-/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
-#define SCC_SMN_STATE_FAIL 0x9
-/* SCC is in secure state. SCM is using secret key. */
-#define SCC_SMN_STATE_SECURE 0xA
-/* SCC is not secure. SCM is using default key. */
-#define SCC_SMN_STATE_NON_SECURE 0xC
-
-#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM BIT(2)
-#define SCC_SCM_INTR_CTRL_CLR_INTR BIT(1)
-#define SCC_SCM_INTR_CTRL_MASK_INTR BIT(0)
-
-/* Size, in blocks, of Red memory. */
-#define SCC_SCM_CFG_BLACK_SIZE_MASK 0x07fe0000
-#define SCC_SCM_CFG_BLACK_SIZE_SHIFT 17
-/* Size, in blocks, of Black memory. */
-#define SCC_SCM_CFG_RED_SIZE_MASK 0x0001ff80
-#define SCC_SCM_CFG_RED_SIZE_SHIFT 7
-/* Number of bytes per block. */
-#define SCC_SCM_CFG_BLOCK_SIZE_MASK 0x0000007f
-
-#define SCC_SMN_COMMAND_TAMPER_LOCK BIT(4)
-#define SCC_SMN_COMMAND_CLR_INTR BIT(3)
-#define SCC_SMN_COMMAND_CLR_BIT_BANK BIT(2)
-#define SCC_SMN_COMMAND_EN_INTR BIT(1)
-#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM BIT(0)
-
-#define SCC_KEY_SLOTS 20
-#define SCC_MAX_KEY_SIZE 32
-#define SCC_KEY_SLOT_SIZE 32
-
-#define SCC_CRC_CCITT_START 0xFFFF
-
-/*
- * Offset into each RAM of the base of the area which is not
- * used for Stored Keys.
- */
-#define SCC_NON_RESERVED_OFFSET (SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
-
-/* Fixed padding for appending to plaintext to fill out a block */
-static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
-
-enum mxc_scc_state {
- SCC_STATE_OK,
- SCC_STATE_UNIMPLEMENTED,
- SCC_STATE_FAILED
-};
-
-struct mxc_scc {
- struct device *dev;
- void __iomem *base;
- struct clk *clk;
- bool hw_busy;
- spinlock_t lock;
- struct crypto_queue queue;
- struct crypto_async_request *req;
- int block_size_bytes;
- int black_ram_size_blocks;
- int memory_size_bytes;
- int bytes_remaining;
-
- void __iomem *red_memory;
- void __iomem *black_memory;
-};
-
-struct mxc_scc_ctx {
- struct mxc_scc *scc;
- struct scatterlist *sg_src;
- size_t src_nents;
- struct scatterlist *sg_dst;
- size_t dst_nents;
- unsigned int offset;
- unsigned int size;
- unsigned int ctrl;
-};
-
-struct mxc_scc_crypto_tmpl {
- struct mxc_scc *scc;
- struct crypto_alg alg;
-};
-
-static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
- struct crypto_async_request *req)
-{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mxc_scc *scc = ctx->scc;
- size_t len;
- void __iomem *from;
-
- if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
- from = scc->red_memory;
- else
- from = scc->black_memory;
-
- dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
- ctx->dst_nents * 8);
- len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
- from, ctx->size, ctx->offset);
- if (!len) {
- dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
- return -EINVAL;
- }
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "red memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->red_memory, ctx->size, 1);
- print_hex_dump(KERN_ERR,
- "black memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->black_memory, ctx->size, 1);
-#endif
-
- ctx->offset += len;
-
- if (ctx->offset < ablkreq->nbytes)
- return -EINPROGRESS;
-
- return 0;
-}
-
-static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
- struct mxc_scc_ctx *ctx)
-{
- struct mxc_scc *scc = ctx->scc;
- int nents;
-
- nents = sg_nents_for_len(req->src, req->nbytes);
- if (nents < 0) {
- dev_err(scc->dev, "Invalid number of src SC");
- return nents;
- }
- ctx->src_nents = nents;
-
- nents = sg_nents_for_len(req->dst, req->nbytes);
- if (nents < 0) {
- dev_err(scc->dev, "Invalid number of dst SC");
- return nents;
- }
- ctx->dst_nents = nents;
-
- ctx->size = 0;
- ctx->offset = 0;
-
- return 0;
-}
-
-static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
- struct mxc_scc_ctx *ctx,
- int result)
-{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mxc_scc *scc = ctx->scc;
-
- scc->req = NULL;
- scc->bytes_remaining = scc->memory_size_bytes;
-
- if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
- memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
- scc->block_size_bytes);
-
- req->complete(req, result);
- scc->hw_busy = false;
-
- return 0;
-}
-
-static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
- struct ablkcipher_request *req)
-{
- u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
- size_t len = min_t(size_t, req->nbytes - ctx->offset,
- ctx->scc->bytes_remaining);
- unsigned int padding_byte_count = 0;
- struct mxc_scc *scc = ctx->scc;
- void __iomem *to;
-
- if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
- to = scc->black_memory;
- else
- to = scc->red_memory;
-
- if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
- memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
- scc->block_size_bytes);
-
- len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
- to, len, ctx->offset);
- if (!len) {
- dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
- return -EINVAL;
- }
-
- ctx->size = len;
-
-#ifdef DEBUG
- dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
- print_hex_dump(KERN_ERR,
- "init vector0@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
- 1);
- print_hex_dump(KERN_ERR,
- "red memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->red_memory, ctx->size, 1);
- print_hex_dump(KERN_ERR,
- "black memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->black_memory, ctx->size, 1);
-#endif
-
- scc->bytes_remaining -= len;
-
- padding_byte_count = len % scc->block_size_bytes;
-
- if (padding_byte_count) {
- memcpy(padding_buffer, scc_block_padding, padding_byte_count);
- memcpy(to + len, padding_buffer, padding_byte_count);
- ctx->size += padding_byte_count;
- }
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "data to encrypt@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- to, ctx->size, 1);
-#endif
-
- return 0;
-}
-
-static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
- struct crypto_async_request *req)
-{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mxc_scc *scc = ctx->scc;
- int err;
-
- dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
- ablkreq->nbytes, ablkreq->src, ablkreq->dst);
-
- writel(0, scc->base + SCC_SCM_ERROR_STATUS);
-
- err = mxc_scc_put_data(ctx, ablkreq);
- if (err) {
- mxc_scc_ablkcipher_req_complete(req, ctx, err);
- return;
- }
-
- dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
- readl(scc->base + SCC_SCM_RED_START),
- readl(scc->base + SCC_SCM_BLACK_START));
-
- /* clear interrupt control registers */
- writel(SCC_SCM_INTR_CTRL_CLR_INTR,
- scc->base + SCC_SCM_INTR_CTRL);
-
- writel((ctx->size / ctx->scc->block_size_bytes) - 1,
- scc->base + SCC_SCM_LENGTH);
-
- dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
- ctx->size / ctx->scc->block_size_bytes,
- (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
- scc->red_memory);
-
- writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
-}
-
-static irqreturn_t mxc_scc_int(int irq, void *priv)
-{
- struct crypto_async_request *req;
- struct mxc_scc_ctx *ctx;
- struct mxc_scc *scc = priv;
- int status;
- int ret;
-
- status = readl(scc->base + SCC_SCM_STATUS);
-
- /* clear interrupt control registers */
- writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
-
- if (status & SCC_SCM_STATUS_BUSY)
- return IRQ_NONE;
-
- req = scc->req;
- if (req) {
- ctx = crypto_tfm_ctx(req->tfm);
- ret = mxc_scc_get_data(ctx, req);
- if (ret != -EINPROGRESS)
- mxc_scc_ablkcipher_req_complete(req, ctx, ret);
- else
- mxc_scc_ablkcipher_next(ctx, req);
- }
-
- return IRQ_HANDLED;
-}
-
-static int mxc_scc_cra_init(struct crypto_tfm *tfm)
-{
- struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct mxc_scc_crypto_tmpl *algt;
-
- algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
-
- ctx->scc = algt->scc;
- return 0;
-}
-
-static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
-{
- struct crypto_async_request *req, *backlog;
-
- if (ctx->scc->hw_busy)
- return;
-
- spin_lock_bh(&ctx->scc->lock);
- backlog = crypto_get_backlog(&ctx->scc->queue);
- req = crypto_dequeue_request(&ctx->scc->queue);
- ctx->scc->req = req;
- ctx->scc->hw_busy = true;
- spin_unlock_bh(&ctx->scc->lock);
-
- if (!req)
- return;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- mxc_scc_ablkcipher_next(ctx, req);
-}
-
-static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
- struct crypto_async_request *req)
-{
- int ret;
-
- spin_lock_bh(&ctx->scc->lock);
- ret = crypto_enqueue_request(&ctx->scc->queue, req);
- spin_unlock_bh(&ctx->scc->lock);
-
- if (ret != -EINPROGRESS)
- return ret;
-
- mxc_scc_dequeue_req_unlocked(ctx);
-
- return -EINPROGRESS;
-}
-
-static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
- struct ablkcipher_request *req)
-{
- int err;
-
- err = mxc_scc_ablkcipher_req_init(req, ctx);
- if (err)
- return err;
-
- return mxc_scc_queue_req(ctx, &req->base);
-}
-
-static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
- ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
- ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
- ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
- ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static void mxc_scc_hw_init(struct mxc_scc *scc)
-{
- int offset;
-
- offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
-
- /* Fill the RED_START register */
- writel(offset, scc->base + SCC_SCM_RED_START);
-
- /* Fill the BLACK_START register */
- writel(offset, scc->base + SCC_SCM_BLACK_START);
-
- scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
- SCC_NON_RESERVED_OFFSET;
-
- scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
- SCC_NON_RESERVED_OFFSET;
-
- scc->bytes_remaining = scc->memory_size_bytes;
-}
-
-static int mxc_scc_get_config(struct mxc_scc *scc)
-{
- int config;
-
- config = readl(scc->base + SCC_SCM_CFG);
-
- scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
-
- scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
-
- scc->memory_size_bytes = (scc->block_size_bytes *
- scc->black_ram_size_blocks) -
- SCC_NON_RESERVED_OFFSET;
-
- return 0;
-}
-
-static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
-{
- enum mxc_scc_state state;
- int status;
-
- status = readl(scc->base + SCC_SMN_STATUS) &
- SCC_SMN_STATUS_STATE_MASK;
-
- /* If in Health Check, try to bringup to secure state */
- if (status & SCC_SMN_STATE_HEALTH_CHECK) {
- /*
- * Write a simple algorithm to the Algorithm Sequence
- * Checker (ASC)
- */
- writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
- writel(0x5555, scc->base + SCC_SMN_SEQ_END);
- writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
-
- status = readl(scc->base + SCC_SMN_STATUS) &
- SCC_SMN_STATUS_STATE_MASK;
- }
-
- switch (status) {
- case SCC_SMN_STATE_NON_SECURE:
- case SCC_SMN_STATE_SECURE:
- state = SCC_STATE_OK;
- break;
- case SCC_SMN_STATE_FAIL:
- state = SCC_STATE_FAILED;
- break;
- default:
- state = SCC_STATE_UNIMPLEMENTED;
- break;
- }
-
- return state;
-}
-
-static struct mxc_scc_crypto_tmpl scc_ecb_des = {
- .alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-scc",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mxc_scc_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mxc_scc_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .encrypt = mxc_scc_ecb_des_encrypt,
- .decrypt = mxc_scc_ecb_des_decrypt,
- }
- }
-};
-
-static struct mxc_scc_crypto_tmpl scc_cbc_des = {
- .alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-scc",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mxc_scc_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mxc_scc_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .encrypt = mxc_scc_cbc_des_encrypt,
- .decrypt = mxc_scc_cbc_des_decrypt,
- }
- }
-};
-
-static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
- &scc_ecb_des,
- &scc_cbc_des,
-};
-
-static int mxc_scc_crypto_register(struct mxc_scc *scc)
-{
- int i;
- int err = 0;
-
- for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
- scc_crypto_algs[i]->scc = scc;
- err = crypto_register_alg(&scc_crypto_algs[i]->alg);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- while (--i >= 0)
- crypto_unregister_alg(&scc_crypto_algs[i]->alg);
-
- return err;
-}
-
-static void mxc_scc_crypto_unregister(void)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
- crypto_unregister_alg(&scc_crypto_algs[i]->alg);
-}
-
-static int mxc_scc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct mxc_scc *scc;
- enum mxc_scc_state state;
- int irq;
- int ret;
- int i;
-
- scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
- if (!scc)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scc->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(scc->base))
- return PTR_ERR(scc->base);
-
- scc->clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(scc->clk)) {
- dev_err(dev, "Could not get ipg clock\n");
- return PTR_ERR(scc->clk);
- }
-
- ret = clk_prepare_enable(scc->clk);
- if (ret)
- return ret;
-
- /* clear error status register */
- writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
-
- /* clear interrupt control registers */
- writel(SCC_SCM_INTR_CTRL_CLR_INTR |
- SCC_SCM_INTR_CTRL_MASK_INTR,
- scc->base + SCC_SCM_INTR_CTRL);
-
- writel(SCC_SMN_COMMAND_CLR_INTR |
- SCC_SMN_COMMAND_EN_INTR,
- scc->base + SCC_SMN_COMMAND);
-
- scc->dev = dev;
- platform_set_drvdata(pdev, scc);
-
- ret = mxc_scc_get_config(scc);
- if (ret)
- goto err_out;
-
- state = mxc_scc_get_state(scc);
-
- if (state != SCC_STATE_OK) {
- dev_err(dev, "SCC in unusable state %d\n", state);
- ret = -EINVAL;
- goto err_out;
- }
-
- mxc_scc_hw_init(scc);
-
- spin_lock_init(&scc->lock);
- /* FIXME: calculate queue from RAM slots */
- crypto_init_queue(&scc->queue, 50);
-
- for (i = 0; i < 2; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- dev_err(dev, "failed to get irq resource: %d\n", irq);
- ret = irq;
- goto err_out;
- }
-
- ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
- IRQF_ONESHOT, dev_name(dev), scc);
- if (ret)
- goto err_out;
- }
-
- ret = mxc_scc_crypto_register(scc);
- if (ret) {
- dev_err(dev, "could not register algorithms");
- goto err_out;
- }
-
- dev_info(dev, "registered successfully.\n");
-
- return 0;
-
-err_out:
- clk_disable_unprepare(scc->clk);
-
- return ret;
-}
-
-static int mxc_scc_remove(struct platform_device *pdev)
-{
- struct mxc_scc *scc = platform_get_drvdata(pdev);
-
- mxc_scc_crypto_unregister();
-
- clk_disable_unprepare(scc->clk);
-
- return 0;
-}
-
-static const struct of_device_id mxc_scc_dt_ids[] = {
- { .compatible = "fsl,imx25-scc", .data = NULL, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
-
-static struct platform_driver mxc_scc_driver = {
- .probe = mxc_scc_probe,
- .remove = mxc_scc_remove,
- .driver = {
- .name = "mxc-scc",
- .of_match_table = mxc_scc_dt_ids,
- },
-};
-
-module_platform_driver(mxc_scc_driver);
-MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
-MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a2105cf33abb..b4429891e368 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -471,7 +471,7 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
wake_up_process(sdcp->thread[actx->chan]);
- return -EINPROGRESS;
+ return ret;
}
static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
@@ -700,11 +700,7 @@ static int dcp_chan_thread_sha(void *data)
struct crypto_async_request *backlog;
struct crypto_async_request *arq;
-
- struct dcp_sha_req_ctx *rctx;
-
- struct ahash_request *req;
- int ret, fini;
+ int ret;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -725,11 +721,7 @@ static int dcp_chan_thread_sha(void *data)
backlog->complete(backlog, -EINPROGRESS);
if (arq) {
- req = ahash_request_cast(arq);
- rctx = ahash_request_ctx(req);
-
ret = dcp_sha_req_to_buf(arq);
- fini = rctx->fini;
arq->complete(arq, ret);
}
}
@@ -797,7 +789,7 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
wake_up_process(sdcp->thread[actx->chan]);
mutex_unlock(&actx->mutex);
- return -EINPROGRESS;
+ return ret;
}
static int dcp_sha_update(struct ahash_request *req)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 9450c41211b2..0d5d3d8eb680 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -469,8 +469,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
return err;
shash->tfm = child_shash;
- shash->flags = crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
bs = crypto_shash_blocksize(child_shash);
ds = crypto_shash_digestsize(child_shash);
@@ -788,13 +786,18 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
ctx->enc_type = n2alg->enc_type;
- if (keylen != (3 * DES_KEY_SIZE)) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
ctx->key_len = keylen;
memcpy(ctx->key.des3, key, keylen);
return 0;
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 66869976cfa2..57932848361b 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -296,7 +296,7 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
struct nx842_workmem *workmem;
struct nx842_scatterlist slin, slout;
struct nx_csbcpb *csbcpb;
- int ret = 0, max_sync_size;
+ int ret = 0;
unsigned long inbuf, outbuf;
struct vio_pfo_op op = {
.done = NULL,
@@ -319,7 +319,6 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
rcu_read_unlock();
return -ENODEV;
}
- max_sync_size = local_devdata->max_sync_size;
dev = local_devdata->dev;
/* Init scatterlist */
@@ -427,7 +426,7 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
struct nx842_workmem *workmem;
struct nx842_scatterlist slin, slout;
struct nx_csbcpb *csbcpb;
- int ret = 0, max_sync_size;
+ int ret = 0;
unsigned long inbuf, outbuf;
struct vio_pfo_op op = {
.done = NULL,
@@ -451,7 +450,6 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
rcu_read_unlock();
return -ENODEV;
}
- max_sync_size = local_devdata->max_sync_size;
dev = local_devdata->dev;
workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index d94e25df503b..f06565df2a12 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -353,7 +353,7 @@ static int decompress(struct nx842_crypto_ctx *ctx,
unsigned int adj_slen = slen;
u8 *src = p->in, *dst = p->out;
u16 padding = be16_to_cpu(g->padding);
- int ret, spadding = 0, dpadding = 0;
+ int ret, spadding = 0;
ktime_t timeout;
if (!slen || !required_len)
@@ -413,7 +413,6 @@ usesw:
spadding = 0;
dst = p->out;
dlen = p->oremain;
- dpadding = 0;
if (dlen < required_len) { /* have ignore bytes */
dst = ctx->dbounce;
dlen = BOUNCE_BUFFER_SIZE;
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index ad3358e74f5c..8f5820b78a83 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -105,8 +105,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -134,8 +133,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -279,8 +277,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
@@ -361,8 +358,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index a6764af83c6d..e06f0431dee5 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -162,8 +162,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
@@ -243,8 +242,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 92956bc6e45e..0293b17903d0 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -166,8 +166,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
@@ -249,8 +248,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 1ba2633e90d6..3d82d18ff810 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -656,9 +656,6 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE))
- return -EINVAL;
-
pr_debug("enter, keylen: %d\n", keylen);
/* Do we need to test against weak key? */
@@ -678,6 +675,28 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
+static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 flags;
+ int err;
+
+ pr_debug("enter, keylen: %d\n", keylen);
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
{
return omap_des_crypt(req, FLAGS_ENCRYPT);
@@ -788,7 +807,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
.cra_u.ablkcipher = {
.min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
- .setkey = omap_des_setkey,
+ .setkey = omap_des3_setkey,
.encrypt = omap_des_ecb_encrypt,
.decrypt = omap_des_ecb_decrypt,
}
@@ -811,7 +830,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
.min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des_setkey,
+ .setkey = omap_des3_setkey,
.encrypt = omap_des_cbc_encrypt,
.decrypt = omap_des_cbc_decrypt,
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 0641185bd82f..51b20abac464 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1055,7 +1055,6 @@ static int omap_sham_finish_hmac(struct ahash_request *req)
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
- shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
return crypto_shash_init(shash) ?:
crypto_shash_update(shash, bctx->opad, bs) ?:
@@ -1226,7 +1225,6 @@ static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_digest(shash, data, len, out);
}
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 21e5cae0a1e0..e641481a3cd9 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -39,7 +39,6 @@ static int padlock_sha_init(struct shash_desc *desc)
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
dctx->fallback.tfm = ctx->fallback;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_init(&dctx->fallback);
}
@@ -48,7 +47,6 @@ static int padlock_sha_update(struct shash_desc *desc,
{
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_update(&dctx->fallback, data, length);
}
@@ -65,7 +63,6 @@ static int padlock_sha_import(struct shash_desc *desc, const void *in)
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
dctx->fallback.tfm = ctx->fallback;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_import(&dctx->fallback, in);
}
@@ -91,7 +88,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
unsigned int leftover;
int err;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
@@ -153,7 +149,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
unsigned int leftover;
int err;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 1b3acdeffede..05b89e703903 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -753,11 +753,6 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
- if (len > DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
if (unlikely(!des_ekey(tmp, key)) &&
(crypto_ablkcipher_get_flags(cipher) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
@@ -772,6 +767,30 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
}
/*
+ * Set the 3DES key for a block cipher transform. This also performs weak key
+ * checking if the transform has requested it.
+ */
+static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+ return 0;
+}
+
+/*
* Set the key for an AES block cipher. Some key lengths are not supported in
* hardware so this must also check whether a fallback is needed.
*/
@@ -1196,7 +1215,7 @@ static const struct dev_pm_ops spacc_pm_ops = {
static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
{
- return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
+ return dev ? dev_get_drvdata(dev) : NULL;
}
static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
@@ -1353,7 +1372,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_ablkcipher = {
- .setkey = spacc_des_setkey,
+ .setkey = spacc_des3_setkey,
.encrypt = spacc_ablk_encrypt,
.decrypt = spacc_ablk_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1380,7 +1399,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_ablkcipher = {
- .setkey = spacc_des_setkey,
+ .setkey = spacc_des3_setkey,
.encrypt = spacc_ablk_encrypt,
.decrypt = spacc_ablk_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 975c75198f56..c8d401646902 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -164,7 +164,6 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
memset(ctx->ipad, 0, block_size);
memset(ctx->opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
- shash->flags = 0x0;
if (auth_keylen > block_size) {
int ret = crypto_shash_digest(shash, auth_key,
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index c9f324730d71..692a7aaee749 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -1300,8 +1300,6 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg rsa = {
.encrypt = qat_rsa_enc,
.decrypt = qat_rsa_dec,
- .sign = qat_rsa_dec,
- .verify = qat_rsa_enc,
.set_pub_key = qat_rsa_setpubkey,
.set_priv_key = qat_rsa_setprivkey,
.max_size = qat_rsa_max_size,
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 154b6baa124e..8d3493855a70 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -198,6 +198,25 @@ weakkey:
return -EINVAL;
}
+static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+ unsigned int keylen)
+{
+ struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(ablk);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(ablk, flags);
+ return err;
+ }
+
+ ctx->enc_keylen = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ return 0;
+}
+
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
{
struct crypto_tfm *tfm =
@@ -363,7 +382,8 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
alg->cra_ablkcipher.ivsize = def->ivsize;
alg->cra_ablkcipher.min_keysize = def->min_keysize;
alg->cra_ablkcipher.max_keysize = def->max_keysize;
- alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
+ alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ?
+ qce_des3_setkey : qce_ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 02dac6ae7e53..313759521a0f 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -46,24 +46,36 @@ static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
+static int rk_des_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
- if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (!des_ekey(tmp, key) &&
+ (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
- if (keylen == DES_KEY_SIZE) {
- if (!des_ekey(tmp, key) &&
- (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+ return 0;
+}
+
+static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
}
ctx->keylen = keylen;
@@ -250,9 +262,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
dev->sg_src->offset + dev->sg_src->length - ivsize;
- /* store the iv that need to be updated in chain mode */
- if (ctx->mode & RK_CRYPTO_DEC)
+ /* Store the iv that need to be updated in chain mode.
+ * And update the IV buffer to contain the next IV for decryption mode.
+ */
+ if (ctx->mode & RK_CRYPTO_DEC) {
memcpy(ctx->iv, src_last_blk, ivsize);
+ sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
+ ivsize, dev->total - ivsize);
+ }
err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
if (!err)
@@ -288,13 +305,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
struct ablkcipher_request *req =
ablkcipher_request_cast(dev->async_req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u32 ivsize = crypto_ablkcipher_ivsize(tfm);
- if (ivsize == DES_BLOCK_SIZE)
- memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
- ivsize);
- else if (ivsize == AES_BLOCK_SIZE)
- memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+ /* Update the IV buffer to contain the next IV for encryption mode. */
+ if (!(ctx->mode & RK_CRYPTO_DEC)) {
+ if (dev->aligned) {
+ memcpy(req->info, sg_virt(dev->sg_dst) +
+ dev->sg_dst->length - ivsize, ivsize);
+ } else {
+ memcpy(req->info, dev->addr_vir +
+ dev->count - ivsize, ivsize);
+ }
+ }
}
static void rk_update_iv(struct rk_crypto_info *dev)
@@ -457,7 +480,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
- .setkey = rk_tdes_setkey,
+ .setkey = rk_des_setkey,
.encrypt = rk_des_ecb_encrypt,
.decrypt = rk_des_ecb_decrypt,
}
@@ -483,7 +506,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- .setkey = rk_tdes_setkey,
+ .setkey = rk_des_setkey,
.encrypt = rk_des_cbc_encrypt,
.decrypt = rk_des_cbc_decrypt,
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 1afdcb81d8ed..9ef25230c199 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1534,7 +1534,6 @@ static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_digest(shash, data, len, out);
}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8c32a3059b4a..fd11162a915e 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -354,7 +354,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
{
u8 state;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
state = SAHARA_STATUS_GET_STATE(status);
@@ -406,7 +406,7 @@ static void sahara_dump_descriptors(struct sahara_dev *dev)
{
int i;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
@@ -427,7 +427,7 @@ static void sahara_dump_links(struct sahara_dev *dev)
{
int i;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 63aa78c0b12b..4491e2197d9f 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -24,6 +24,7 @@ config CRYPTO_DEV_STM32_CRYP
depends on ARCH_STM32
select CRYPTO_HASH
select CRYPTO_ENGINE
+ select CRYPTO_DES
help
This enables support for the CRYP (AES/DES/TDES) hw accelerator which
can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 23b0b7bd64c7..cddcc97875b2 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -137,7 +137,6 @@ struct stm32_cryp {
struct crypto_engine *engine;
- struct mutex lock; /* protects req / areq */
struct ablkcipher_request *req;
struct aead_request *areq;
@@ -394,6 +393,23 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
}
}
+static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
+{
+ struct ablkcipher_request *req = cryp->req;
+ u32 *tmp = req->info;
+
+ if (!tmp)
+ return;
+
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
+
+ if (is_aes(cryp)) {
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
+ }
+}
+
static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
{
unsigned int i;
@@ -623,6 +639,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
/* Phase 4 : output tag */
err = stm32_cryp_read_auth_tag(cryp);
+ if (!err && (!(is_gcm(cryp) || is_ccm(cryp))))
+ stm32_cryp_get_iv(cryp);
+
if (cryp->sgs_copied) {
void *buf_in, *buf_out;
int pages, len;
@@ -645,18 +664,13 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
pm_runtime_mark_last_busy(cryp->dev);
pm_runtime_put_autosuspend(cryp->dev);
- if (is_gcm(cryp) || is_ccm(cryp)) {
+ if (is_gcm(cryp) || is_ccm(cryp))
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
- cryp->areq = NULL;
- } else {
+ else
crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
err);
- cryp->req = NULL;
- }
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
-
- mutex_unlock(&cryp->lock);
}
static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
@@ -753,19 +767,35 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ u32 tmp[DES_EXPKEY_WORDS];
+
if (keylen != DES_KEY_SIZE)
return -EINVAL;
- else
- return stm32_cryp_setkey(tfm, key, keylen);
+
+ if ((crypto_ablkcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
+ unlikely(!des_ekey(tmp, key))) {
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ return stm32_cryp_setkey(tfm, key, keylen);
}
static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
- if (keylen != (3 * DES_KEY_SIZE))
- return -EINVAL;
- else
- return stm32_cryp_setkey(tfm, key, keylen);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(tfm);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(tfm, flags);
+ return err;
+ }
+
+ return stm32_cryp_setkey(tfm, key, keylen);
}
static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -917,8 +947,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (!cryp)
return -ENODEV;
- mutex_lock(&cryp->lock);
-
rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
@@ -930,6 +958,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (req) {
cryp->req = req;
+ cryp->areq = NULL;
cryp->total_in = req->nbytes;
cryp->total_out = cryp->total_in;
} else {
@@ -955,6 +984,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
* <---------- total_out ----------------->
*/
cryp->areq = areq;
+ cryp->req = NULL;
cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
cryp->total_in = areq->assoclen + areq->cryptlen;
if (is_encrypt(cryp))
@@ -976,19 +1006,19 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (cryp->in_sg_len < 0) {
dev_err(cryp->dev, "Cannot get in_sg_len\n");
ret = cryp->in_sg_len;
- goto out;
+ return ret;
}
cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
if (cryp->out_sg_len < 0) {
dev_err(cryp->dev, "Cannot get out_sg_len\n");
ret = cryp->out_sg_len;
- goto out;
+ return ret;
}
ret = stm32_cryp_copy_sgs(cryp);
if (ret)
- goto out;
+ return ret;
scatterwalk_start(&cryp->in_walk, cryp->in_sg);
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
@@ -1000,10 +1030,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
}
ret = stm32_cryp_hw_init(cryp);
-out:
- if (ret)
- mutex_unlock(&cryp->lock);
-
return ret;
}
@@ -1943,8 +1969,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
cryp->dev = dev;
- mutex_init(&cryp->lock);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cryp->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cryp->regs))
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4a6cc8a3045d..bfc49e67124b 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -181,8 +181,6 @@ struct stm32_hash_dev {
u32 dma_mode;
u32 dma_maxburst;
- spinlock_t lock; /* lock to protect queue */
-
struct ahash_request *req;
struct crypto_engine *engine;
@@ -977,7 +975,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
pm_runtime_get_sync(hdev->dev);
- while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
+ while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
cpu_relax();
rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 54fd714d53ca..b060a0810934 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -41,11 +41,6 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
if (!areq->cryptlen)
return 0;
- if (!areq->iv) {
- dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
- return -EINVAL;
- }
-
if (!areq->src || !areq->dst) {
dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
return -EINVAL;
@@ -134,6 +129,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
struct scatterlist *out_sg = areq->dst;
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun4i_ss_alg_template *algt;
u32 mode = ctx->mode;
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
u32 rx_cnt = SS_RX_DEFAULT;
@@ -153,20 +150,20 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */
unsigned long flags;
+ bool need_fallback;
if (!areq->cryptlen)
return 0;
- if (!areq->iv) {
- dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
- return -EINVAL;
- }
-
if (!areq->src || !areq->dst) {
dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
return -EINVAL;
}
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+ if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
+ need_fallback = true;
+
/*
* if we have only SGs with size multiple of 4,
* we can use the SS optimized function
@@ -182,9 +179,24 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
out_sg = sg_next(out_sg);
}
- if (no_chunk == 1)
+ if (no_chunk == 1 && !need_fallback)
return sun4i_ss_opti_poll(areq);
+ if (need_fallback) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL,
+ NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (ctx->mode & SS_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+ }
+
spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4)
@@ -458,6 +470,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
{
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct sun4i_ss_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
@@ -468,9 +481,22 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct sun4i_cipher_req_ctx));
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
return 0;
}
+void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ crypto_free_sync_skcipher(op->fallback_tfm);
+}
+
/* check and set the AES key, prepare the mode to be used */
int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
@@ -495,7 +521,11 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
op->keylen = keylen;
memcpy(op->key, key, keylen);
- return 0;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the DES key, prepare the mode to be used */
@@ -525,7 +555,11 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keylen = keylen;
memcpy(op->key, key, keylen);
- return 0;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the 3DES key, prepare the mode to be used */
@@ -533,14 +567,18 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_ss_ctx *ss = op->ss;
+ int err;
+
+ err = des3_verify_key(tfm, key);
+ if (unlikely(err))
+ return err;
- if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
- dev_err(ss->dev, "Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
op->keylen = keylen;
memcpy(op->key, key, keylen);
- return 0;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 89adf9e0fed2..05b3d3c32f6d 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -92,11 +92,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -107,17 +108,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.decrypt = sun4i_ss_ecb_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -134,11 +135,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -154,11 +156,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -175,11 +178,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -190,16 +194,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.decrypt = sun4i_ss_ecb_des3_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index a4b5ff2b72f8..f6936bb3b7be 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
}
} else {
/* Since we have the flag final, we can go up to modulo 4 */
- end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+ if (areq->nbytes < 4)
+ end = 0;
+ else
+ end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
}
/* TODO if SGlen % 4 and !op->len then DMA */
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index f3ac90692ac6..8c4ec9e93565 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -161,6 +161,7 @@ struct sun4i_tfm_ctx {
u32 keylen;
u32 keymode;
struct sun4i_ss_ctx *ss;
+ struct crypto_sync_skcipher *fallback_tfm;
};
struct sun4i_cipher_req_ctx {
@@ -203,6 +204,7 @@ int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq);
int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq);
int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
+void sun4i_ss_cipher_exit(struct crypto_tfm *tfm);
int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index de78b54bcfb1..1d429fc073d1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -913,6 +913,54 @@ badkey:
return -EINVAL;
}
+static int aead_des3_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
+ goto badkey;
+
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(authenc);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(authenc, flags);
+ goto out;
+ }
+
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
+
+ ctx->keylen = keys.authkeylen + keys.enckeylen;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
+ ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
+ DMA_TO_DEVICE);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
/*
* talitos_edesc - s/w-extended descriptor
* @src_nents: number of segments in input scatterlist
@@ -1527,12 +1575,22 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct device *dev = ctx->dev;
- u32 tmp[DES_EXPKEY_WORDS];
- if (keylen > TALITOS_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+ memcpy(&ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 tmp[DES_EXPKEY_WORDS];
if (unlikely(crypto_ablkcipher_get_flags(cipher) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
@@ -1541,15 +1599,23 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return -EINVAL;
}
- if (ctx->keylen)
- dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+ return ablkcipher_setkey(cipher, key, keylen);
+}
- memcpy(&ctx->key, key, keylen);
- ctx->keylen = keylen;
+static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 flags;
+ int err;
- ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
- return 0;
+ return ablkcipher_setkey(cipher, key, keylen);
}
static void common_nonsnoop_unmap(struct device *dev,
@@ -2313,6 +2379,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2336,6 +2403,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2399,6 +2467,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2422,6 +2491,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2485,6 +2555,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2508,6 +2579,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2550,6 +2622,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2592,6 +2665,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2654,6 +2728,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2676,6 +2751,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2748,6 +2824,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
+ .setkey = ablkcipher_des_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2764,6 +2841,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
+ .setkey = ablkcipher_des_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2781,6 +2859,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablkcipher_des3_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2798,6 +2877,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablkcipher_des3_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3144,7 +3224,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_init = talitos_cra_init;
alg->cra_exit = talitos_cra_exit;
alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = ablkcipher_setkey;
+ alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
+ ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
break;
@@ -3152,7 +3233,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg = &t_alg->algt.alg.aead.base;
alg->cra_exit = talitos_cra_exit;
t_alg->algt.alg.aead.init = talitos_cra_init_aead;
- t_alg->algt.alg.aead.setkey = aead_setkey;
+ t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
+ aead_setkey;
t_alg->algt.alg.aead.encrypt = aead_encrypt;
t_alg->algt.alg.aead.decrypt = aead_decrypt;
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
index b497ae3dde07..dc8fff29c4b3 100644
--- a/drivers/crypto/ux500/cryp/Makefile
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -3,11 +3,7 @@
# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
# * License terms: GNU General Public License (GPL) version 2 */
-ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_cryp_core.o := -DDEBUG
-CFLAGS_cryp.o := -DDEBUG
-CFLAGS_cryp_irq.o := -DDEBUG
-endif
+ccflags-$(CONFIG_CRYPTO_DEV_UX500_DEBUG) += -DDEBUG
obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 3235611928f2..7a93cba0877f 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1019,37 +1019,16 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- u32 *flags = &cipher->base.crt_flags;
- const u32 *K = (const u32 *)key;
- u32 tmp[DES3_EDE_EXPKEY_WORDS];
- int i, ret;
+ u32 flags;
+ int err;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- if (keylen != DES3_EDE_KEY_SIZE) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
- __func__);
- return -EINVAL;
- }
- /* Checking key interdependency for weak key detection. */
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
- __func__);
- return -EINVAL;
- }
- for (i = 0; i < 3; i++) {
- ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
- if (unlikely(ret == 0) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
- __func__);
- return -EINVAL;
- }
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
}
memcpy(ctx->key, key, keylen);
@@ -1219,57 +1198,6 @@ static struct cryp_algo_template cryp_algs[] = {
{
.algomode = CRYP_ALGO_DES_ECB,
.crypto = {
- .cra_name = "des",
- .cra_driver_name = "des-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt
- }
- }
- }
-
- },
- {
- .algomode = CRYP_ALGO_TDES_ECB,
- .crypto = {
- .cra_name = "des3_ede",
- .cra_driver_name = "des3_ede-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt
- }
- }
- }
- },
- {
- .algomode = CRYP_ALGO_DES_ECB,
- .crypto = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-ux500",
.cra_priority = 300,
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index d7316f7a3a69..603a62081994 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include "aesp8-ppc.h"
@@ -78,20 +79,21 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
crypto_cipher_encrypt_one(ctx->fallback, dst, src);
} else {
preempt_disable();
@@ -108,7 +110,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
crypto_cipher_decrypt_one(ctx->fallback, dst, src);
} else {
preempt_disable();
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index c5c5ff82b52e..a1a9a6f0d42c 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
@@ -81,13 +82,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
@@ -99,7 +101,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
struct p8_aes_cbc_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
@@ -138,7 +140,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
struct p8_aes_cbc_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 8a2fe092cb8e..192a53512f5e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
@@ -83,8 +84,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
@@ -118,7 +120,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
struct p8_aes_ctr_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index ecd64e5cc5bb..00d412d811ae 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <crypto/skcipher.h>
@@ -86,14 +87,15 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
- ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+ ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
@@ -108,7 +110,7 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
struct p8_aes_xts_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index d6a9f63d65ba..de78282b8f44 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1854,7 +1854,7 @@ Lctr32_enc8x_three:
stvx_u $out1,$x10,$out
stvx_u $out2,$x20,$out
addi $out,$out,0x30
- b Lcbc_dec8x_done
+ b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_two:
@@ -1866,7 +1866,7 @@ Lctr32_enc8x_two:
stvx_u $out0,$x00,$out
stvx_u $out1,$x10,$out
addi $out,$out,0x20
- b Lcbc_dec8x_done
+ b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_one:
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index dd8b8716467a..b5a6883bb09e 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -23,16 +23,15 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/ghash.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/b128ops.h>
-#define IN_INTERRUPT in_interrupt()
-
void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
@@ -102,7 +101,6 @@ static int p8_ghash_init(struct shash_desc *desc)
dctx->bytes = 0;
memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
dctx->fallback_desc.tfm = ctx->fallback;
- dctx->fallback_desc.flags = desc->flags;
return crypto_shash_init(&dctx->fallback_desc);
}
@@ -131,7 +129,7 @@ static int p8_ghash_update(struct shash_desc *desc,
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
+ if (!crypto_simd_usable()) {
return crypto_shash_update(&dctx->fallback_desc, src,
srclen);
} else {
@@ -182,7 +180,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
+ if (!crypto_simd_usable()) {
return crypto_shash_final(&dctx->fallback_desc, out);
} else {
if (dctx->bytes) {
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 31a98dc6f849..a9f519830615 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -41,7 +41,7 @@ static struct crypto_alg *algs[] = {
NULL,
};
-int __init p8_init(void)
+static int __init p8_init(void)
{
int ret = 0;
struct crypto_alg **alg_it;
@@ -67,7 +67,7 @@ int __init p8_init(void)
return ret;
}
-void __exit p8_exit(void)
+static void __exit p8_exit(void)
{
struct crypto_alg **alg_it;
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 5ef624fe3934..a59f338f520f 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -23,7 +23,6 @@ config DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX
- depends on m # until we can kill DEV_DAX_PMEM_COMPAT
default DEV_DAX
help
Support raw access to persistent memory. Note that this
@@ -50,7 +49,7 @@ config DEV_DAX_KMEM
config DEV_DAX_PMEM_COMPAT
tristate "PMEM DAX: support the deprecated /sys/class/dax interface"
- depends on DEV_DAX_PMEM
+ depends on m && DEV_DAX_PMEM=m
default DEV_DAX_PMEM
help
Older versions of the libdaxctl library expect to find all
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index e428468ab661..996d68ff992a 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -184,8 +184,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
- vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@@ -235,8 +234,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
- vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index f71019ce0647..f9f51786d556 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -37,13 +37,13 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
devm_nsio_disable(dev, nsio);
/* reserve the metadata area, device-dax will reserve the data */
- pfn_sb = nd_pfn->pfn_sb;
+ pfn_sb = nd_pfn->pfn_sb;
offset = le64_to_cpu(pfn_sb->dataoff);
if (!devm_request_mem_region(dev, nsio->res.start, offset,
dev_name(&ndns->dev))) {
- dev_warn(dev, "could not reserve metadata\n");
+ dev_warn(dev, "could not reserve metadata\n");
return ERR_PTR(-EBUSY);
- }
+ }
rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
if (rc != 2)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 0a339b85133e..bbd57ca0634a 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -412,11 +412,9 @@ static struct dax_device *to_dax_dev(struct inode *inode)
return container_of(inode, struct dax_device, inode);
}
-static void dax_i_callback(struct rcu_head *head)
+static void dax_free_inode(struct inode *inode)
{
- struct inode *inode = container_of(head, struct inode, i_rcu);
struct dax_device *dax_dev = to_dax_dev(inode);
-
kfree(dax_dev->host);
dax_dev->host = NULL;
if (inode->i_rdev)
@@ -427,16 +425,15 @@ static void dax_i_callback(struct rcu_head *head)
static void dax_destroy_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
-
WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
"kill_dax() must be called before final iput()\n");
- call_rcu(&inode->i_rcu, dax_i_callback);
}
static const struct super_operations dax_sops = {
.statfs = simple_statfs,
.alloc_inode = dax_alloc_inode,
.destroy_inode = dax_destroy_inode,
+ .free_inode = dax_free_inode,
.drop_inode = generic_delete_inode,
};
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index d67242d87744..87e93406d7cd 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -240,7 +240,7 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
}
list_for_each_entry(edev, &devfreq_event_list, node) {
- if (!strcmp(edev->desc->name, node->name))
+ if (of_node_name_eq(node, edev->desc->name))
goto out;
}
edev = NULL;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 0ae3de76833b..6b6991f0e873 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -29,6 +29,9 @@
#include <linux/of.h>
#include "governor.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/devfreq.h>
+
static struct class *devfreq_class;
/*
@@ -228,7 +231,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
* if is not found. This can happen when both drivers (the governor driver
* and the driver that call devfreq_add_device) are built as modules.
* devfreq_list_lock should be held by the caller. Returns the matched
- * governor's pointer.
+ * governor's pointer or an error pointer.
*/
static struct devfreq_governor *try_then_request_governor(const char *name)
{
@@ -254,7 +257,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
/* Restore previous state before return */
mutex_lock(&devfreq_list_lock);
if (err)
- return NULL;
+ return ERR_PTR(err);
governor = find_devfreq_governor(name);
}
@@ -394,6 +397,8 @@ static void devfreq_monitor(struct work_struct *work)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
mutex_unlock(&devfreq->lock);
+
+ trace_devfreq_monitor(devfreq);
}
/**
@@ -528,7 +533,7 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
mutex_lock(&devfreq->lock);
if (!devfreq->stop_polling)
queue_delayed_work(devfreq_wq, &devfreq->work,
- msecs_to_jiffies(devfreq->profile->polling_ms));
+ msecs_to_jiffies(devfreq->profile->polling_ms));
}
out:
mutex_unlock(&devfreq->lock);
@@ -537,7 +542,7 @@ EXPORT_SYMBOL(devfreq_interval_update);
/**
* devfreq_notifier_call() - Notify that the device frequency requirements
- * has been changed out of devfreq framework.
+ * has been changed out of devfreq framework.
* @nb: the notifier_block (supposed to be devfreq->nb)
* @type: not used
* @devp: not used
@@ -651,7 +656,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_unlock(&devfreq->lock);
err = set_freq_table(devfreq);
if (err < 0)
- goto err_out;
+ goto err_dev;
mutex_lock(&devfreq->lock);
}
@@ -683,16 +688,27 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_out;
}
- devfreq->trans_table =
- devm_kzalloc(&devfreq->dev,
- array3_size(sizeof(unsigned int),
- devfreq->profile->max_state,
- devfreq->profile->max_state),
- GFP_KERNEL);
+ devfreq->trans_table = devm_kzalloc(&devfreq->dev,
+ array3_size(sizeof(unsigned int),
+ devfreq->profile->max_state,
+ devfreq->profile->max_state),
+ GFP_KERNEL);
+ if (!devfreq->trans_table) {
+ mutex_unlock(&devfreq->lock);
+ err = -ENOMEM;
+ goto err_devfreq;
+ }
+
devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
- devfreq->profile->max_state,
- sizeof(unsigned long),
- GFP_KERNEL);
+ devfreq->profile->max_state,
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!devfreq->time_in_state) {
+ mutex_unlock(&devfreq->lock);
+ err = -ENOMEM;
+ goto err_devfreq;
+ }
+
devfreq->last_stat_updated = jiffies;
srcu_init_notifier_head(&devfreq->transition_notifier_list);
@@ -726,7 +742,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
err_init:
mutex_unlock(&devfreq_list_lock);
-
+err_devfreq:
devfreq_remove_device(devfreq);
devfreq = NULL;
err_dev:
@@ -1113,7 +1129,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
struct devfreq *df = to_devfreq(dev);
int ret;
char str_governor[DEVFREQ_NAME_LEN + 1];
- struct devfreq_governor *governor;
+ const struct devfreq_governor *governor, *prev_governor;
ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
if (ret != 1)
@@ -1142,12 +1158,24 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
goto out;
}
}
+ prev_governor = df->governor;
df->governor = governor;
strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
- if (ret)
+ if (ret) {
dev_warn(dev, "%s: Governor %s not started(%d)\n",
__func__, df->governor->name, ret);
+ df->governor = prev_governor;
+ strncpy(df->governor_name, prev_governor->name,
+ DEVFREQ_NAME_LEN);
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+ if (ret) {
+ dev_err(dev,
+ "%s: reverting to Governor %s failed (%d)\n",
+ __func__, df->governor_name, ret);
+ df->governor = NULL;
+ }
+ }
out:
mutex_unlock(&devfreq_list_lock);
@@ -1172,7 +1200,7 @@ static ssize_t available_governors_show(struct device *d,
*/
if (df->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
- "%s ", df->governor_name);
+ "%s ", df->governor_name);
/*
* The devfreq device shows the registered governor except for
* immutable governors such as passive governor .
@@ -1485,8 +1513,8 @@ EXPORT_SYMBOL(devfreq_recommended_opp);
/**
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
- * for any changes in the OPP availability
- * changes
+ * for any changes in the OPP availability
+ * changes
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
@@ -1498,8 +1526,8 @@ EXPORT_SYMBOL(devfreq_register_opp_notifier);
/**
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
- * notified for any changes in the OPP
- * availability changes anymore.
+ * notified for any changes in the OPP
+ * availability changes anymore.
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*
@@ -1518,8 +1546,8 @@ static void devm_devfreq_opp_release(struct device *dev, void *res)
}
/**
- * devm_ devfreq_register_opp_notifier()
- * - Resource-managed devfreq_register_opp_notifier()
+ * devm_devfreq_register_opp_notifier() - Resource-managed
+ * devfreq_register_opp_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
@@ -1547,8 +1575,8 @@ int devm_devfreq_register_opp_notifier(struct device *dev,
EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
/**
- * devm_devfreq_unregister_opp_notifier()
- * - Resource-managed devfreq_unregister_opp_notifier()
+ * devm_devfreq_unregister_opp_notifier() - Resource-managed
+ * devfreq_unregister_opp_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
@@ -1567,8 +1595,8 @@ EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
* @list: DEVFREQ_TRANSITION_NOTIFIER.
*/
int devfreq_register_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct notifier_block *nb,
+ unsigned int list)
{
int ret = 0;
@@ -1674,9 +1702,9 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier);
* @list: DEVFREQ_TRANSITION_NOTIFIER.
*/
void devm_devfreq_unregister_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
devm_devfreq_dev_match, devfreq));
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index c61de0bdf053..c2ea94957501 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -529,7 +529,7 @@ static int of_get_devfreq_events(struct device_node *np,
if (!ppmu_events[i].name)
continue;
- if (!of_node_cmp(node->name, ppmu_events[i].name))
+ if (of_node_name_eq(node, ppmu_events[i].name))
break;
}
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 22b113363ffc..a436ec4901bb 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -26,6 +26,8 @@
#include <linux/list.h>
#include <linux/of.h>
+#include <soc/rockchip/rk3399_grf.h>
+
#define RK3399_DMC_NUM_CH 2
/* DDRMON_CTRL */
@@ -43,18 +45,6 @@
#define DDRMON_CH1_COUNT_NUM 0x3c
#define DDRMON_CH1_DFI_ACCESS_NUM 0x40
-/* pmu grf */
-#define PMUGRF_OS_REG2 0x308
-#define DDRTYPE_SHIFT 13
-#define DDRTYPE_MASK 7
-
-enum {
- DDR3 = 3,
- LPDDR3 = 6,
- LPDDR4 = 7,
- UNUSED = 0xFF
-};
-
struct dmc_usage {
u32 access;
u32 total;
@@ -83,16 +73,17 @@ static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
u32 ddr_type;
/* get ddr type */
- regmap_read(info->regmap_pmu, PMUGRF_OS_REG2, &val);
- ddr_type = (val >> DDRTYPE_SHIFT) & DDRTYPE_MASK;
+ regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+ ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
+ RK3399_PMUGRF_DDRTYPE_MASK;
/* clear DDRMON_CTRL setting */
writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL);
/* set ddr type to dfi */
- if (ddr_type == LPDDR3)
+ if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3)
writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL);
- else if (ddr_type == LPDDR4)
+ else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4)
writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL);
/* enable count, use software mode */
@@ -211,7 +202,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
if (IS_ERR(data->clk)) {
dev_err(dev, "Cannot get the clk dmc_clk\n");
return PTR_ERR(data->clk);
- };
+ }
/* try to find the optional reference to the pmu syscon */
node = of_parse_phandle(np, "rockchip,pmu", 0);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index c25658b26598..486cc5b422f1 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -514,6 +514,13 @@ err:
return ret;
}
+static void exynos_bus_shutdown(struct platform_device *pdev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(&pdev->dev);
+
+ devfreq_suspend_device(bus->devfreq);
+}
+
#ifdef CONFIG_PM_SLEEP
static int exynos_bus_resume(struct device *dev)
{
@@ -556,6 +563,7 @@ MODULE_DEVICE_TABLE(of, exynos_bus_of_match);
static struct platform_driver exynos_bus_platdrv = {
.probe = exynos_bus_probe,
+ .shutdown = exynos_bus_shutdown,
.driver = {
.name = "exynos-bus",
.pm = &exynos_bus_pm,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index e795ad2b3f6b..567c034d0301 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -18,14 +18,17 @@
#include <linux/devfreq.h>
#include <linux/devfreq-event.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/rwsem.h>
#include <linux/suspend.h>
+#include <soc/rockchip/rk3399_grf.h>
#include <soc/rockchip/rockchip_sip.h>
struct dram_timing {
@@ -69,8 +72,11 @@ struct rk3399_dmcfreq {
struct mutex lock;
struct dram_timing timing;
struct regulator *vdd_center;
+ struct regmap *regmap_pmu;
unsigned long rate, target_rate;
unsigned long volt, target_volt;
+ unsigned int odt_dis_freq;
+ int odt_pd_arg0, odt_pd_arg1;
};
static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
@@ -80,6 +86,8 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
struct dev_pm_opp *opp;
unsigned long old_clk_rate = dmcfreq->rate;
unsigned long target_volt, target_rate;
+ struct arm_smccc_res res;
+ bool odt_enable = false;
int err;
opp = devfreq_recommended_opp(dev, freq, flags);
@@ -95,6 +103,19 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
mutex_lock(&dmcfreq->lock);
+ if (target_rate >= dmcfreq->odt_dis_freq)
+ odt_enable = true;
+
+ /*
+ * This makes a SMC call to the TF-A to set the DDR PD (power-down)
+ * timings and to enable or disable the ODT (on-die termination)
+ * resistors.
+ */
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
+ dmcfreq->odt_pd_arg1,
+ ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
+ odt_enable, 0, 0, 0, &res);
+
/*
* If frequency scaling from low to high, adjust voltage first.
* If frequency scaling from high to low, adjust frequency first.
@@ -294,11 +315,13 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
{
struct arm_smccc_res res;
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node, *node;
struct rk3399_dmcfreq *data;
int ret, index, size;
uint32_t *timing;
struct dev_pm_opp *opp;
+ u32 ddr_type;
+ u32 val;
data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
if (!data)
@@ -322,7 +345,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
dev_err(dev, "Cannot get the clk dmc_clk\n");
return PTR_ERR(data->dmc_clk);
- };
+ }
data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
if (IS_ERR(data->edev))
@@ -354,11 +377,57 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
}
}
+ node = of_parse_phandle(np, "rockchip,pmu", 0);
+ if (node) {
+ data->regmap_pmu = syscon_node_to_regmap(node);
+ if (IS_ERR(data->regmap_pmu))
+ return PTR_ERR(data->regmap_pmu);
+ }
+
+ regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+ ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
+ RK3399_PMUGRF_DDRTYPE_MASK;
+
+ switch (ddr_type) {
+ case RK3399_PMUGRF_DDRTYPE_DDR3:
+ data->odt_dis_freq = data->timing.ddr3_odt_dis_freq;
+ break;
+ case RK3399_PMUGRF_DDRTYPE_LPDDR3:
+ data->odt_dis_freq = data->timing.lpddr3_odt_dis_freq;
+ break;
+ case RK3399_PMUGRF_DDRTYPE_LPDDR4:
+ data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
+ break;
+ default:
+ return -EINVAL;
+ };
+
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
ROCKCHIP_SIP_CONFIG_DRAM_INIT,
0, 0, 0, 0, &res);
/*
+ * In TF-A there is a platform SIP call to set the PD (power-down)
+ * timings and to enable or disable the ODT (on-die termination).
+ * This call needs three arguments as follows:
+ *
+ * arg0:
+ * bit[0-7] : sr_idle
+ * bit[8-15] : sr_mc_gate_idle
+ * bit[16-31] : standby idle
+ * arg1:
+ * bit[0-11] : pd_idle
+ * bit[16-27] : srpd_lite_idle
+ * arg2:
+ * bit[0] : odt enable
+ */
+ data->odt_pd_arg0 = (data->timing.sr_idle & 0xff) |
+ ((data->timing.sr_mc_gate_idle & 0xff) << 8) |
+ ((data->timing.standby_idle & 0xffff) << 16);
+ data->odt_pd_arg1 = (data->timing.pd_idle & 0xfff) |
+ ((data->timing.srpd_lite_idle & 0xfff) << 16);
+
+ /*
* We add a devfreq driver to our parent since it has a device tree node
* with operating points.
*/
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index c59d2eee5d30..c89ba7b834ff 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -573,10 +573,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
static int tegra_governor_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
- struct tegra_devfreq *tegra;
- int ret = 0;
-
- tegra = dev_get_drvdata(devfreq->dev.parent);
+ struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
switch (event) {
case DEVFREQ_GOV_START:
@@ -600,7 +597,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
break;
}
- return ret;
+ return 0;
}
static struct devfreq_governor tegra_devfreq_governor = {
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 2e5a0faa2cb1..3fc9c2efc583 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -3,7 +3,6 @@ menu "DMABUF options"
config SYNC_FILE
bool "Explicit Synchronization Framework"
default n
- select ANON_INODES
select DMA_SHARED_BUFFER
---help---
The Sync File Framework adds explicit syncronization via
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 0913a6ccab5a..1f006e083eb9 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,4 +1,5 @@
-obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
+ reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
new file mode 100644
index 000000000000..93c42078cb57
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -0,0 +1,242 @@
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dma-fence-chain.h>
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
+ * @chain: chain node to get the previous node from
+ *
+ * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
+ * chain node.
+ */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
+{
+ struct dma_fence *prev;
+
+ rcu_read_lock();
+ prev = dma_fence_get_rcu_safe(&chain->prev);
+ rcu_read_unlock();
+ return prev;
+}
+
+/**
+ * dma_fence_chain_walk - chain walking function
+ * @fence: current chain node
+ *
+ * Walk the chain to the next node. Returns the next fence or NULL if we are at
+ * the end of the chain. Garbage collects chain nodes which are already
+ * signaled.
+ */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain, *prev_chain;
+ struct dma_fence *prev, *replacement, *tmp;
+
+ chain = to_dma_fence_chain(fence);
+ if (!chain) {
+ dma_fence_put(fence);
+ return NULL;
+ }
+
+ while ((prev = dma_fence_chain_get_prev(chain))) {
+
+ prev_chain = to_dma_fence_chain(prev);
+ if (prev_chain) {
+ if (!dma_fence_is_signaled(prev_chain->fence))
+ break;
+
+ replacement = dma_fence_chain_get_prev(prev_chain);
+ } else {
+ if (!dma_fence_is_signaled(prev))
+ break;
+
+ replacement = NULL;
+ }
+
+ tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
+ if (tmp == prev)
+ dma_fence_put(tmp);
+ else
+ dma_fence_put(replacement);
+ dma_fence_put(prev);
+ }
+
+ dma_fence_put(fence);
+ return prev;
+}
+EXPORT_SYMBOL(dma_fence_chain_walk);
+
+/**
+ * dma_fence_chain_find_seqno - find fence chain node by seqno
+ * @pfence: pointer to the chain node where to start
+ * @seqno: the sequence number to search for
+ *
+ * Advance the fence pointer to the chain node which will signal this sequence
+ * number. If no sequence number is provided then this is a no-op.
+ *
+ * Returns EINVAL if the fence is not a chain node or the sequence number has
+ * not yet advanced far enough.
+ */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
+{
+ struct dma_fence_chain *chain;
+
+ if (!seqno)
+ return 0;
+
+ chain = to_dma_fence_chain(*pfence);
+ if (!chain || chain->base.seqno < seqno)
+ return -EINVAL;
+
+ dma_fence_chain_for_each(*pfence, &chain->base) {
+ if ((*pfence)->context != chain->base.context ||
+ to_dma_fence_chain(*pfence)->prev_seqno < seqno)
+ break;
+ }
+ dma_fence_put(&chain->base);
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
+{
+ return "dma_fence_chain";
+}
+
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static void dma_fence_chain_irq_work(struct irq_work *work)
+{
+ struct dma_fence_chain *chain;
+
+ chain = container_of(work, typeof(*chain), work);
+
+ /* Try to rearm the callback */
+ if (!dma_fence_chain_enable_signaling(&chain->base))
+ /* Ok, we are done. No more unsignaled fences left */
+ dma_fence_signal(&chain->base);
+ dma_fence_put(&chain->base);
+}
+
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct dma_fence_chain *chain;
+
+ chain = container_of(cb, typeof(*chain), cb);
+ irq_work_queue(&chain->work);
+ dma_fence_put(f);
+}
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
+{
+ struct dma_fence_chain *head = to_dma_fence_chain(fence);
+
+ dma_fence_get(&head->base);
+ dma_fence_chain_for_each(fence, &head->base) {
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ struct dma_fence *f = chain ? chain->fence : fence;
+
+ dma_fence_get(f);
+ if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
+ dma_fence_put(fence);
+ return true;
+ }
+ dma_fence_put(f);
+ }
+ dma_fence_put(&head->base);
+ return false;
+}
+
+static bool dma_fence_chain_signaled(struct dma_fence *fence)
+{
+ dma_fence_chain_for_each(fence, fence) {
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ struct dma_fence *f = chain ? chain->fence : fence;
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_put(fence);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void dma_fence_chain_release(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+ dma_fence_put(rcu_dereference_protected(chain->prev, true));
+ dma_fence_put(chain->fence);
+ dma_fence_free(fence);
+}
+
+const struct dma_fence_ops dma_fence_chain_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = dma_fence_chain_get_driver_name,
+ .get_timeline_name = dma_fence_chain_get_timeline_name,
+ .enable_signaling = dma_fence_chain_enable_signaling,
+ .signaled = dma_fence_chain_signaled,
+ .release = dma_fence_chain_release,
+};
+EXPORT_SYMBOL(dma_fence_chain_ops);
+
+/**
+ * dma_fence_chain_init - initialize a fence chain
+ * @chain: the chain node to initialize
+ * @prev: the previous fence
+ * @fence: the current fence
+ *
+ * Initialize a new chain node and either start a new chain or add the node to
+ * the existing chain of the previous fence.
+ */
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+ struct dma_fence *prev,
+ struct dma_fence *fence,
+ uint64_t seqno)
+{
+ struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
+ uint64_t context;
+
+ spin_lock_init(&chain->lock);
+ rcu_assign_pointer(chain->prev, prev);
+ chain->fence = fence;
+ chain->prev_seqno = 0;
+ init_irq_work(&chain->work, dma_fence_chain_irq_work);
+
+ /* Try to reuse the context of the previous chain node. */
+ if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
+ context = prev->context;
+ chain->prev_seqno = prev->seqno;
+ } else {
+ context = dma_fence_context_alloc(1);
+ /* Make sure that we always have a valid sequence number. */
+ if (prev_chain)
+ seqno = max(prev->seqno, seqno);
+ }
+
+ dma_fence_init(&chain->base, &dma_fence_chain_ops,
+ &chain->lock, context, seqno);
+}
+EXPORT_SYMBOL(dma_fence_chain_init);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 3aa8733f832a..9bf06042619a 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -29,6 +29,7 @@
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
static DEFINE_SPINLOCK(dma_fence_stub_lock);
static struct dma_fence dma_fence_stub;
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c1618335ca99..4d32e2c67862 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -73,6 +73,8 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
struct reservation_object_list *old, *new;
unsigned int i, j, k, max;
+ reservation_object_assert_held(obj);
+
old = reservation_object_get_list(obj);
if (old && old->shared_max) {
@@ -151,6 +153,8 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
dma_fence_get(fence);
+ reservation_object_assert_held(obj);
+
fobj = reservation_object_get_list(obj);
count = fobj->shared_count;
@@ -196,6 +200,8 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
struct reservation_object_list *old;
u32 i = 0;
+ reservation_object_assert_held(obj);
+
old = reservation_object_get_list(obj);
if (old)
i = old->shared_count;
@@ -236,6 +242,8 @@ int reservation_object_copy_fences(struct reservation_object *dst,
size_t size;
unsigned i;
+ reservation_object_assert_held(dst);
+
rcu_read_lock();
src_list = rcu_dereference(src->fence);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 32dcf7b4c935..119b2ffbc2c9 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -161,7 +161,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = dma_fence_parent(fence);
- return !__dma_fence_is_later(fence->seqno, parent->value);
+ return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
}
static bool timeline_fence_enable_signaling(struct dma_fence *fence)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 4f6305ca52c8..ed3fb6e5224c 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -258,7 +258,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
i_b++;
} else {
- if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno))
+ if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno,
+ pt_a->ops))
add_fence(fences, &i, pt_a);
else
add_fence(fences, &i, pt_b);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 0b1dfb5bf2d9..eaf78f4e07ce 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -99,7 +99,7 @@ config AT_XDMAC
config AXI_DMAC
tristate "Analog Devices AXI-DMAC DMA support"
- depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST
+ depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fc8c2bab563c..8cfc753ad4b0 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -254,6 +254,7 @@ enum pl08x_dma_chan_state {
* @slave: whether this channel is a device (slave) or for memcpy
* @signal: the physical DMA request signal which this channel is using
* @mux_use: count of descriptors using this DMA request signal setting
+ * @waiting_at: time in jiffies when this channel moved to waiting state
*/
struct pl08x_dma_chan {
struct virt_dma_chan vc;
@@ -267,6 +268,7 @@ struct pl08x_dma_chan {
bool slave;
int signal;
unsigned mux_use;
+ unsigned long waiting_at;
};
/**
@@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
if (!ch) {
dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
plchan->state = PL08X_CHAN_WAITING;
+ plchan->waiting_at = jiffies;
return;
}
@@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
{
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_dma_chan *p, *next;
-
+ unsigned long waiting_at;
retry:
next = NULL;
+ waiting_at = jiffies;
- /* Find a waiting virtual channel for the next transfer. */
+ /*
+ * Find a waiting virtual channel for the next transfer.
+ * To be fair, time when each channel reached waiting state is compared
+ * to select channel that is waiting for the longest time.
+ */
list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
- if (p->state == PL08X_CHAN_WAITING) {
+ if (p->state == PL08X_CHAN_WAITING &&
+ p->waiting_at <= waiting_at) {
next = p;
- break;
+ waiting_at = p->waiting_at;
}
if (!next && pl08x->has_slave) {
list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
- if (p->state == PL08X_CHAN_WAITING) {
+ if (p->state == PL08X_CHAN_WAITING &&
+ p->waiting_at <= waiting_at) {
next = p;
- break;
+ waiting_at = p->waiting_at;
}
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index fe69dccfa0c0..e4ae2ee46d3f 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -308,6 +308,11 @@ static inline int at_xdmac_csize(u32 maxburst)
return csize;
};
+static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
+{
+ return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
+}
+
static inline u8 at_xdmac_get_dwidth(u32 cfg)
{
return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
@@ -389,7 +394,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
- reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
+ reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
+ /*
+ * Request Overflow Error is only for peripheral synchronized transfers
+ */
+ if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
+ reg |= AT_XDMAC_CIE_ROIE;
+
/*
* There is no end of list when doing cyclic dma, we need to get
* an interrupt after each periods.
@@ -1575,6 +1586,46 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
dmaengine_desc_get_callback_invoke(txd, NULL);
}
+static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ struct at_xdmac_desc *bad_desc;
+
+ /*
+ * The descriptor currently at the head of the active list is
+ * broken. Since we don't have any way to report errors, we'll
+ * just have to scream loudly and try to continue with other
+ * descriptors queued (if any).
+ */
+ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
+ dev_err(chan2dev(&atchan->chan), "read bus error!!!");
+ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
+ dev_err(chan2dev(&atchan->chan), "write bus error!!!");
+ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
+ dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
+
+ spin_lock_bh(&atchan->lock);
+
+ /* Channel must be disabled first as it's not done automatically */
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
+ cpu_relax();
+
+ bad_desc = list_first_entry(&atchan->xfers_list,
+ struct at_xdmac_desc,
+ xfer_node);
+
+ spin_unlock_bh(&atchan->lock);
+
+ /* Print bad descriptor's details if needed */
+ dev_dbg(chan2dev(&atchan->chan),
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
+ __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
+ bad_desc->lld.mbr_ubc);
+
+ /* Then continue with usual descriptor management */
+}
+
static void at_xdmac_tasklet(unsigned long data)
{
struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
@@ -1594,19 +1645,19 @@ static void at_xdmac_tasklet(unsigned long data)
|| (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd;
- if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
- dev_err(chan2dev(&atchan->chan), "read bus error!!!");
- if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
- dev_err(chan2dev(&atchan->chan), "write bus error!!!");
- if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
- dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
+ if (atchan->irq_status & error_mask)
+ at_xdmac_handle_error(atchan);
spin_lock(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
- BUG_ON(!desc->active_xfer);
+ if (!desc->active_xfer) {
+ dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
+ spin_unlock(&atchan->lock);
+ return;
+ }
txd = &desc->tx_dma_desc;
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 72878ac5c78d..fa81d0177765 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1459,8 +1459,7 @@ static void sba_receive_message(struct mbox_client *cl, void *msg)
static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
{
- struct platform_device *pdev = to_platform_device(file->private);
- struct sba_device *sba = platform_get_drvdata(pdev);
+ struct sba_device *sba = dev_get_drvdata(file->private);
/* Write stats in file */
sba_write_stats_in_seqfile(sba, file);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index ec8a291d62ba..8101ff2f05c1 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -671,7 +671,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
d = bcm2835_dma_create_cb_chain(chan, direction, false,
info, extra,
frames, src, dst, 0, 0,
- GFP_KERNEL);
+ GFP_NOWAIT);
if (!d)
return NULL;
@@ -891,7 +891,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
- dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index ffc0adc2f6ce..f32fdf21edbd 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -166,7 +166,7 @@ static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
{
- if (len == 0 || len > chan->max_length)
+ if (len == 0)
return false;
if ((len & chan->align_mask) != 0) /* Not aligned */
return false;
@@ -379,6 +379,49 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
return desc;
}
+static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
+ enum dma_transfer_direction direction, dma_addr_t addr,
+ unsigned int num_periods, unsigned int period_len,
+ struct axi_dmac_sg *sg)
+{
+ unsigned int num_segments, i;
+ unsigned int segment_size;
+ unsigned int len;
+
+ /* Split into multiple equally sized segments if necessary */
+ num_segments = DIV_ROUND_UP(period_len, chan->max_length);
+ segment_size = DIV_ROUND_UP(period_len, num_segments);
+ /* Take care of alignment */
+ segment_size = ((segment_size - 1) | chan->align_mask) + 1;
+
+ for (i = 0; i < num_periods; i++) {
+ len = period_len;
+
+ while (len > segment_size) {
+ if (direction == DMA_DEV_TO_MEM)
+ sg->dest_addr = addr;
+ else
+ sg->src_addr = addr;
+ sg->x_len = segment_size;
+ sg->y_len = 1;
+ sg++;
+ addr += segment_size;
+ len -= segment_size;
+ }
+
+ if (direction == DMA_DEV_TO_MEM)
+ sg->dest_addr = addr;
+ else
+ sg->src_addr = addr;
+ sg->x_len = len;
+ sg->y_len = 1;
+ sg++;
+ addr += len;
+ }
+
+ return sg;
+}
+
static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
struct dma_chan *c, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -386,16 +429,24 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
+ struct axi_dmac_sg *dsg;
struct scatterlist *sg;
+ unsigned int num_sgs;
unsigned int i;
if (direction != chan->direction)
return NULL;
- desc = axi_dmac_alloc_desc(sg_len);
+ num_sgs = 0;
+ for_each_sg(sgl, sg, sg_len, i)
+ num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
+
+ desc = axi_dmac_alloc_desc(num_sgs);
if (!desc)
return NULL;
+ dsg = desc->sg;
+
for_each_sg(sgl, sg, sg_len, i) {
if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
!axi_dmac_check_len(chan, sg_dma_len(sg))) {
@@ -403,12 +454,8 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
return NULL;
}
- if (direction == DMA_DEV_TO_MEM)
- desc->sg[i].dest_addr = sg_dma_address(sg);
- else
- desc->sg[i].src_addr = sg_dma_address(sg);
- desc->sg[i].x_len = sg_dma_len(sg);
- desc->sg[i].y_len = 1;
+ dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
+ sg_dma_len(sg), dsg);
}
desc->cyclic = false;
@@ -423,7 +470,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
- unsigned int num_periods, i;
+ unsigned int num_periods, num_segments;
if (direction != chan->direction)
return NULL;
@@ -436,20 +483,14 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
return NULL;
num_periods = buf_len / period_len;
+ num_segments = DIV_ROUND_UP(period_len, chan->max_length);
- desc = axi_dmac_alloc_desc(num_periods);
+ desc = axi_dmac_alloc_desc(num_periods * num_segments);
if (!desc)
return NULL;
- for (i = 0; i < num_periods; i++) {
- if (direction == DMA_DEV_TO_MEM)
- desc->sg[i].dest_addr = buf_addr;
- else
- desc->sg[i].src_addr = buf_addr;
- desc->sg[i].x_len = period_len;
- desc->sg[i].y_len = 1;
- buf_addr += period_len;
- }
+ axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
+ period_len, desc->sg);
desc->cyclic = true;
@@ -485,7 +526,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
if (chan->hw_2d) {
if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
- !axi_dmac_check_len(chan, xt->numf))
+ xt->numf == 0)
return NULL;
if (xt->sgl[0].size + dst_icg > chan->max_length ||
xt->sgl[0].size + src_icg > chan->max_length)
@@ -577,15 +618,6 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
return ret;
chan->dest_width = val / 8;
- ret = of_property_read_u32(of_chan, "adi,length-width", &val);
- if (ret)
- return ret;
-
- if (val >= 32)
- chan->max_length = UINT_MAX;
- else
- chan->max_length = (1ULL << val) - 1;
-
chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
@@ -597,12 +629,27 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
else
chan->direction = DMA_DEV_TO_DEV;
- chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
- chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
-
return 0;
}
+static void axi_dmac_detect_caps(struct axi_dmac *dmac)
+{
+ struct axi_dmac_chan *chan = &dmac->chan;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
+ chan->hw_cyclic = true;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
+ chan->hw_2d = true;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
+ chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
+ if (chan->max_length != UINT_MAX)
+ chan->max_length++;
+}
+
static int axi_dmac_probe(struct platform_device *pdev)
{
struct device_node *of_channels, *of_chan;
@@ -647,11 +694,12 @@ static int axi_dmac_probe(struct platform_device *pdev)
of_node_put(of_channels);
pdev->dev.dma_parms = &dmac->dma_parms;
- dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length);
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
dma_dev = &dmac->dma_dev;
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_issue_pending = axi_dmac_issue_pending;
@@ -675,6 +723,8 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ axi_dmac_detect_caps(dmac);
+
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
ret = dma_async_device_register(dma_dev);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index b435d8e1e3a1..c53f76eeb4d3 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -136,7 +136,7 @@ struct fsl_edma_desc {
};
enum edma_version {
- v1, /* 32ch, Vybdir, mpc57x, etc */
+ v1, /* 32ch, Vybrid, mpc57x, etc */
v2, /* 64ch Coldfire */
};
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 75e8a7ba3a22..d641ef85a634 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -144,21 +144,21 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
if (ret) {
dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
- return ret;
+ return ret;
}
} else {
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
if (ret) {
dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
- return ret;
+ return ret;
}
ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
if (ret) {
dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
- return ret;
+ return ret;
}
}
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 0baf9797cc09..07fd4f25cdd8 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -19,10 +19,9 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include "idma64.h"
+#include <linux/dma/idma64.h>
-/* Platform driver name */
-#define DRV_NAME "idma64"
+#include "idma64.h"
/* For now we support only two channels */
#define IDMA64_NR_CHAN 2
@@ -592,7 +591,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- idma64->dma.dev = chip->dev;
+ idma64->dma.dev = chip->sysdev;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
@@ -632,6 +631,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
{
struct idma64_chip *chip;
struct device *dev = &pdev->dev;
+ struct device *sysdev = dev->parent;
struct resource *mem;
int ret;
@@ -648,11 +648,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
chip->dev = dev;
+ chip->sysdev = sysdev;
ret = idma64_probe(chip);
if (ret)
@@ -697,7 +698,7 @@ static struct platform_driver idma64_platform_driver = {
.probe = idma64_platform_probe,
.remove = idma64_platform_remove,
.driver = {
- .name = DRV_NAME,
+ .name = LPSS_IDMA64_DRIVER_NAME,
.pm = &idma64_dev_pm_ops,
},
};
@@ -707,4 +708,4 @@ module_platform_driver(idma64_platform_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("iDMA64 core driver");
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:" LPSS_IDMA64_DRIVER_NAME);
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
index 6b816878e5e7..baa32e1425de 100644
--- a/drivers/dma/idma64.h
+++ b/drivers/dma/idma64.h
@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
/**
* struct idma64_chip - representation of iDMA 64-bit controller hardware
* @dev: struct device of the DMA controller
+ * @sysdev: struct device of the physical device that does DMA
* @irq: irq line
* @regs: memory mapped I/O space
* @idma64: struct idma64 that is filed by idma64_probe()
*/
struct idma64_chip {
struct device *dev;
+ struct device *sysdev;
int irq;
void __iomem *regs;
struct idma64 *idma64;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 5f3c1378b90e..99d9f431ae2c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -419,6 +419,7 @@ struct sdma_driver_data {
int chnenbl0;
int num_events;
struct sdma_script_start_addrs *script_addrs;
+ bool check_ratio;
};
struct sdma_engine {
@@ -557,6 +558,13 @@ static struct sdma_driver_data sdma_imx7d = {
.script_addrs = &sdma_script_imx7d,
};
+static struct sdma_driver_data sdma_imx8mq = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx7d,
+ .check_ratio = 1,
+};
+
static const struct platform_device_id sdma_devtypes[] = {
{
.name = "imx25-sdma",
@@ -580,6 +588,9 @@ static const struct platform_device_id sdma_devtypes[] = {
.name = "imx7d-sdma",
.driver_data = (unsigned long)&sdma_imx7d,
}, {
+ .name = "imx8mq-sdma",
+ .driver_data = (unsigned long)&sdma_imx8mq,
+ }, {
/* sentinel */
}
};
@@ -593,6 +604,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
+ { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -1852,7 +1864,8 @@ static int sdma_init(struct sdma_engine *sdma)
if (ret)
goto disable_clk_ipg;
- if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))
+ if (sdma->drvdata->check_ratio &&
+ (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
sdma->clk_ratio = 1;
/* Be sure SDMA has not started yet */
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 131f3974740d..814853842e29 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
#else
- mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
+ mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
#endif
/* setup the length */
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index a67b292190f4..594409a6e975 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1491,14 +1491,14 @@ MODULE_DEVICE_TABLE(platform, nbpf_ids);
#ifdef CONFIG_PM
static int nbpf_runtime_suspend(struct device *dev)
{
- struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
+ struct nbpf_device *nbpf = dev_get_drvdata(dev);
clk_disable_unprepare(nbpf->clk);
return 0;
}
static int nbpf_runtime_resume(struct device *dev)
{
- struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
+ struct nbpf_device *nbpf = dev_get_drvdata(dev);
return clk_prepare_enable(nbpf->clk);
}
#endif
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index eec79fdf27a5..6e6837214210 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -11,6 +11,7 @@
* (at your option) any later version.
*/
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/init.h>
@@ -966,6 +967,7 @@ static void _stop(struct pl330_thread *thrd)
{
void __iomem *regs = thrd->dmac->base;
u8 insn[6] = {0, 0, 0, 0, 0, 0};
+ u32 inten = readl(regs + INTEN);
if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
@@ -978,10 +980,13 @@ static void _stop(struct pl330_thread *thrd)
_emit_KILL(0, insn);
- /* Stop generating interrupts for SEV */
- writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
-
_execute_DBGINSN(thrd, insn, is_manager(thrd));
+
+ /* clear the event */
+ if (inten & (1 << thrd->ev))
+ writel(1 << thrd->ev, regs + INTCLR);
+ /* Stop generating interrupts for SEV */
+ writel(inten & ~(1 << thrd->ev), regs + INTEN);
}
/* Start doing req 'idx' of thread 'thrd' */
@@ -2896,6 +2901,55 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+#ifdef CONFIG_DEBUG_FS
+static int pl330_debugfs_show(struct seq_file *s, void *data)
+{
+ struct pl330_dmac *pl330 = s->private;
+ int chans, pchs, ch, pr;
+
+ chans = pl330->pcfg.num_chan;
+ pchs = pl330->num_peripherals;
+
+ seq_puts(s, "PL330 physical channels:\n");
+ seq_puts(s, "THREAD:\t\tCHANNEL:\n");
+ seq_puts(s, "--------\t-----\n");
+ for (ch = 0; ch < chans; ch++) {
+ struct pl330_thread *thrd = &pl330->channels[ch];
+ int found = -1;
+
+ for (pr = 0; pr < pchs; pr++) {
+ struct dma_pl330_chan *pch = &pl330->peripherals[pr];
+
+ if (!pch->thread || thrd->id != pch->thread->id)
+ continue;
+
+ found = pr;
+ }
+
+ seq_printf(s, "%d\t\t", thrd->id);
+ if (found == -1)
+ seq_puts(s, "--\n");
+ else
+ seq_printf(s, "%d\n", found);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pl330_debugfs);
+
+static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
+{
+ debugfs_create_file(dev_name(pl330->ddma.dev),
+ S_IFREG | 0444, NULL, pl330,
+ &pl330_debugfs_fops);
+}
+#else
+static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
+{
+}
+#endif
+
/*
* Runtime PM callbacks are provided by amba/bus.c driver.
*
@@ -3082,6 +3136,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
dev_err(&adev->dev, "unable to set the seg size\n");
+ init_pl330_debugfs(pl330);
dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%x\n", adev->periphid);
dev_info(&adev->dev,
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2b4f25698169..33ab1b607e2b 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Renesas R-Car Gen2 DMA Controller Driver
+ * Renesas R-Car Gen2/Gen3 DMA Controller Driver
*
- * Copyright (C) 2014 Renesas Electronics Inc.
+ * Copyright (C) 2014-2019 Renesas Electronics Inc.
*
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
@@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
enum dma_status status;
unsigned int residue = 0;
unsigned int dptr = 0;
+ unsigned int chcrb;
+ unsigned int tcrb;
+ unsigned int i;
if (!desc)
return 0;
@@ -1330,14 +1333,31 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
}
/*
+ * We need to read two registers.
+ * Make sure the control register does not skip to next chunk
+ * while reading the counter.
+ * Trying it 3 times should be enough: Initial read, retry, retry
+ * for the paranoid.
+ */
+ for (i = 0; i < 3; i++) {
+ chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+ RCAR_DMACHCRB_DPTR_MASK;
+ tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
+ /* Still the same? */
+ if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+ RCAR_DMACHCRB_DPTR_MASK))
+ break;
+ }
+ WARN_ONCE(i >= 3, "residue might be not continuous!");
+
+ /*
* In descriptor mode the descriptor running pointer is not maintained
* by the interrupt handler, find the running descriptor from the
* descriptor pointer field in the CHCRB register. In non-descriptor
* mode just use the running descriptor pointer.
*/
if (desc->hwdescs.use) {
- dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
- RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+ dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
if (dptr == 0)
dptr = desc->nchunks;
dptr--;
@@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
}
/* Add the residue for the current chunk. */
- residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
+ residue += tcrb << desc->xfer_shift;
return residue;
}
@@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
enum dma_status status;
unsigned long flags;
unsigned int residue;
+ bool cyclic;
status = dma_cookie_status(chan, cookie, txstate);
if (status == DMA_COMPLETE || !txstate)
@@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&rchan->lock, flags);
residue = rcar_dmac_chan_get_residue(rchan, cookie);
+ cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
spin_unlock_irqrestore(&rchan->lock, flags);
/* if there's no residue, the cookie is complete */
- if (!residue)
+ if (!residue && !cyclic)
return DMA_COMPLETE;
dma_set_residue(txstate, residue);
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index ba239b529fa9..88d9c6c4389f 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1042,33 +1042,97 @@ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
return ndtr << width;
}
+/**
+ * stm32_dma_is_current_sg - check that expected sg_req is currently transferred
+ * @chan: dma channel
+ *
+ * This function called when IRQ are disable, checks that the hardware has not
+ * switched on the next transfer in double buffer mode. The test is done by
+ * comparing the next_sg memory address with the hardware related register
+ * (based on CT bit value).
+ *
+ * Returns true if expected current transfer is still running or double
+ * buffer mode is not activated.
+ */
+static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ struct stm32_dma_sg_req *sg_req;
+ u32 dma_scr, dma_smar, id;
+
+ id = chan->id;
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+
+ if (!(dma_scr & STM32_DMA_SCR_DBM))
+ return true;
+
+ sg_req = &chan->desc->sg_req[chan->next_sg];
+
+ if (dma_scr & STM32_DMA_SCR_CT) {
+ dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
+ return (dma_smar == sg_req->chan_reg.dma_sm0ar);
+ }
+
+ dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
+
+ return (dma_smar == sg_req->chan_reg.dma_sm1ar);
+}
+
static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
struct stm32_dma_desc *desc,
u32 next_sg)
{
u32 modulo, burst_size;
- u32 residue = 0;
+ u32 residue;
+ u32 n_sg = next_sg;
+ struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
int i;
/*
- * In cyclic mode, for the last period, residue = remaining bytes from
- * NDTR
+ * Calculate the residue means compute the descriptors
+ * information:
+ * - the sg_req currently transferred
+ * - the Hardware remaining position in this sg (NDTR bits field).
+ *
+ * A race condition may occur if DMA is running in cyclic or double
+ * buffer mode, since the DMA register are automatically reloaded at end
+ * of period transfer. The hardware may have switched to the next
+ * transfer (CT bit updated) just before the position (SxNDTR reg) is
+ * read.
+ * In this case the SxNDTR reg could (or not) correspond to the new
+ * transfer position, and not the expected one.
+ * The strategy implemented in the stm32 driver is to:
+ * - read the SxNDTR register
+ * - crosscheck that hardware is still in current transfer.
+ * In case of switch, we can assume that the DMA is at the beginning of
+ * the next transfer. So we approximate the residue in consequence, by
+ * pointing on the beginning of next transfer.
+ *
+ * This race condition doesn't apply for none cyclic mode, as double
+ * buffer is not used. In such situation registers are updated by the
+ * software.
*/
- if (chan->desc->cyclic && next_sg == 0) {
- residue = stm32_dma_get_remaining_bytes(chan);
- goto end;
+
+ residue = stm32_dma_get_remaining_bytes(chan);
+
+ if (!stm32_dma_is_current_sg(chan)) {
+ n_sg++;
+ if (n_sg == chan->desc->num_sgs)
+ n_sg = 0;
+ residue = sg_req->len;
}
/*
- * For all other periods in cyclic mode, and in sg mode,
- * residue = remaining bytes from NDTR + remaining periods/sg to be
- * transferred
+ * In cyclic mode, for the last period, residue = remaining bytes
+ * from NDTR,
+ * else for all other periods in cyclic mode, and in sg mode,
+ * residue = remaining bytes from NDTR + remaining
+ * periods/sg to be transferred
*/
- for (i = next_sg; i < desc->num_sgs; i++)
- residue += desc->sg_req[i].len;
- residue += stm32_dma_get_remaining_bytes(chan);
+ if (!chan->desc->cyclic || n_sg != 0)
+ for (i = n_sg; i < desc->num_sgs; i++)
+ residue += desc->sg_req[i].len;
-end:
if (!chan->mem_burst)
return residue;
@@ -1302,13 +1366,16 @@ static int stm32_dma_probe(struct platform_device *pdev)
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i];
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!res) {
- ret = -EINVAL;
- dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
+ chan->irq = platform_get_irq(pdev, i);
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "No irq resource for chan %d\n", i);
goto err_unregister;
}
- chan->irq = res->start;
+ chan->irq = ret;
+
ret = devm_request_irq(&pdev->dev, chan->irq,
stm32_dma_chan_irq, 0,
dev_name(chan2dev(chan)), chan);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 5ec0dd97b397..21f6be16d013 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -22,7 +22,6 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
-#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -31,35 +30,33 @@
#define ADMA_CH_CMD 0x00
#define ADMA_CH_STATUS 0x0c
#define ADMA_CH_STATUS_XFER_EN BIT(0)
+#define ADMA_CH_STATUS_XFER_PAUSED BIT(1)
#define ADMA_CH_INT_STATUS 0x10
#define ADMA_CH_INT_STATUS_XFER_DONE BIT(0)
#define ADMA_CH_INT_CLEAR 0x1c
#define ADMA_CH_CTRL 0x24
-#define ADMA_CH_CTRL_TX_REQ(val) (((val) & 0xf) << 28)
-#define ADMA_CH_CTRL_TX_REQ_MAX 10
-#define ADMA_CH_CTRL_RX_REQ(val) (((val) & 0xf) << 24)
-#define ADMA_CH_CTRL_RX_REQ_MAX 10
#define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
#define ADMA_CH_CTRL_DIR_AHUB2MEM 2
#define ADMA_CH_CTRL_DIR_MEM2AHUB 4
#define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
#define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
+#define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
#define ADMA_CH_CONFIG 0x28
#define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28)
#define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24)
-#define ADMA_CH_CONFIG_BURST_SIZE(val) (((val) & 0x7) << 20)
-#define ADMA_CH_CONFIG_BURST_16 5
+#define ADMA_CH_CONFIG_BURST_SIZE_SHIFT 20
+#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24)
#define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16)
-#define ADMA_CH_FIFO_CTRL_TX_SIZE(val) (((val) & 0xf) << 8)
-#define ADMA_CH_FIFO_CTRL_RX_SIZE(val) ((val) & 0xf)
+#define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT 8
+#define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT 0
#define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@@ -69,25 +66,41 @@
#define ADMA_CH_XFER_STATUS 0x54
#define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff
-#define ADMA_GLOBAL_CMD 0xc00
-#define ADMA_GLOBAL_SOFT_RESET 0xc04
-#define ADMA_GLOBAL_INT_CLEAR 0xc20
-#define ADMA_GLOBAL_CTRL 0xc24
+#define ADMA_GLOBAL_CMD 0x00
+#define ADMA_GLOBAL_SOFT_RESET 0x04
-#define ADMA_CH_REG_OFFSET(a) (a * 0x80)
+#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
#define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \
- ADMA_CH_FIFO_CTRL_STARV_THRES(1) | \
- ADMA_CH_FIFO_CTRL_TX_SIZE(3) | \
- ADMA_CH_FIFO_CTRL_RX_SIZE(3))
+ ADMA_CH_FIFO_CTRL_STARV_THRES(1))
+
+#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
+
struct tegra_adma;
/*
* struct tegra_adma_chip_data - Tegra chip specific data
+ * @global_reg_offset: Register offset of DMA global register.
+ * @global_int_clear: Register offset of DMA global interrupt clear.
+ * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
+ * @ch_req_rx_shift: Register offset for AHUB receive channel select.
+ * @ch_base_offset: Reister offset of DMA channel registers.
+ * @ch_req_mask: Mask for Tx or Rx channel select.
+ * @ch_req_max: Maximum number of Tx or Rx channels available.
+ * @ch_reg_size: Size of DMA channel register space.
* @nr_channels: Number of DMA channels available.
*/
struct tegra_adma_chip_data {
- int nr_channels;
+ unsigned int (*adma_get_burst_config)(unsigned int burst_size);
+ unsigned int global_reg_offset;
+ unsigned int global_int_clear;
+ unsigned int ch_req_tx_shift;
+ unsigned int ch_req_rx_shift;
+ unsigned int ch_base_offset;
+ unsigned int ch_req_mask;
+ unsigned int ch_req_max;
+ unsigned int ch_reg_size;
+ unsigned int nr_channels;
};
/*
@@ -99,6 +112,7 @@ struct tegra_adma_chan_regs {
unsigned int src_addr;
unsigned int trg_addr;
unsigned int fifo_ctrl;
+ unsigned int cmd;
unsigned int tc;
};
@@ -128,6 +142,7 @@ struct tegra_adma_chan {
enum dma_transfer_direction sreq_dir;
unsigned int sreq_index;
bool sreq_reserved;
+ struct tegra_adma_chan_regs ch_regs;
/* Transfer count and position info */
unsigned int tx_buf_count;
@@ -141,6 +156,7 @@ struct tegra_adma {
struct dma_device dma_dev;
struct device *dev;
void __iomem *base_addr;
+ struct clk *ahub_clk;
unsigned int nr_channels;
unsigned long rx_requests_reserved;
unsigned long tx_requests_reserved;
@@ -148,18 +164,20 @@ struct tegra_adma {
/* Used to store global command register state when suspending */
unsigned int global_cmd;
+ const struct tegra_adma_chip_data *cdata;
+
/* Last member of the structure */
struct tegra_adma_chan channels[0];
};
static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
{
- writel(val, tdma->base_addr + reg);
+ writel(val, tdma->base_addr + tdma->cdata->global_reg_offset + reg);
}
static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg)
{
- return readl(tdma->base_addr + reg);
+ return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg);
}
static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val)
@@ -209,14 +227,16 @@ static int tegra_adma_init(struct tegra_adma *tdma)
int ret;
/* Clear any interrupts */
- tdma_write(tdma, ADMA_GLOBAL_INT_CLEAR, 0x1);
+ tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
/* Wait for reset to clear */
ret = readx_poll_timeout(readl,
- tdma->base_addr + ADMA_GLOBAL_SOFT_RESET,
+ tdma->base_addr +
+ tdma->cdata->global_reg_offset +
+ ADMA_GLOBAL_SOFT_RESET,
status, status == 0, 20, 10000);
if (ret)
return ret;
@@ -236,13 +256,13 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc,
if (tdc->sreq_reserved)
return tdc->sreq_dir == direction ? 0 : -EINVAL;
+ if (sreq_index > tdma->cdata->ch_req_max) {
+ dev_err(tdma->dev, "invalid DMA request\n");
+ return -EINVAL;
+ }
+
switch (direction) {
case DMA_MEM_TO_DEV:
- if (sreq_index > ADMA_CH_CTRL_TX_REQ_MAX) {
- dev_err(tdma->dev, "invalid DMA request\n");
- return -EINVAL;
- }
-
if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) {
dev_err(tdma->dev, "DMA request reserved\n");
return -EINVAL;
@@ -250,11 +270,6 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc,
break;
case DMA_DEV_TO_MEM:
- if (sreq_index > ADMA_CH_CTRL_RX_REQ_MAX) {
- dev_err(tdma->dev, "invalid DMA request\n");
- return -EINVAL;
- }
-
if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) {
dev_err(tdma->dev, "DMA request reserved\n");
return -EINVAL;
@@ -428,6 +443,51 @@ static void tegra_adma_issue_pending(struct dma_chan *dc)
spin_unlock_irqrestore(&tdc->vc.lock, flags);
}
+static bool tegra_adma_is_paused(struct tegra_adma_chan *tdc)
+{
+ u32 csts;
+
+ csts = tdma_ch_read(tdc, ADMA_CH_STATUS);
+ csts &= ADMA_CH_STATUS_XFER_PAUSED;
+
+ return csts ? true : false;
+}
+
+static int tegra_adma_pause(struct dma_chan *dc)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+ struct tegra_adma_desc *desc = tdc->desc;
+ struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
+ int dcnt = 10;
+
+ ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
+ ch_regs->ctrl |= (1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT);
+ tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
+
+ while (dcnt-- && !tegra_adma_is_paused(tdc))
+ udelay(TEGRA_ADMA_BURST_COMPLETE_TIME);
+
+ if (dcnt < 0) {
+ dev_err(tdc2dev(tdc), "unable to pause DMA channel\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int tegra_adma_resume(struct dma_chan *dc)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+ struct tegra_adma_desc *desc = tdc->desc;
+ struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
+
+ ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
+ ch_regs->ctrl &= ~(1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT);
+ tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
+
+ return 0;
+}
+
static int tegra_adma_terminate_all(struct dma_chan *dc)
{
struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
@@ -481,12 +541,29 @@ static enum dma_status tegra_adma_tx_status(struct dma_chan *dc,
return ret;
}
+static unsigned int tegra210_adma_get_burst_config(unsigned int burst_size)
+{
+ if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE)
+ burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE;
+
+ return fls(burst_size) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT;
+}
+
+static unsigned int tegra186_adma_get_burst_config(unsigned int burst_size)
+{
+ if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE)
+ burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE;
+
+ return (burst_size - 1) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT;
+}
+
static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
struct tegra_adma_desc *desc,
dma_addr_t buf_addr,
enum dma_transfer_direction direction)
{
struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
+ const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata;
unsigned int burst_size, adma_dir;
if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS)
@@ -495,17 +572,21 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
switch (direction) {
case DMA_MEM_TO_DEV:
adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB;
- burst_size = fls(tdc->sconfig.dst_maxburst);
+ burst_size = tdc->sconfig.dst_maxburst;
ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1);
- ch_regs->ctrl = ADMA_CH_CTRL_TX_REQ(tdc->sreq_index);
+ ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index,
+ cdata->ch_req_mask,
+ cdata->ch_req_tx_shift);
ch_regs->src_addr = buf_addr;
break;
case DMA_DEV_TO_MEM:
adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM;
- burst_size = fls(tdc->sconfig.src_maxburst);
+ burst_size = tdc->sconfig.src_maxburst;
ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1);
- ch_regs->ctrl = ADMA_CH_CTRL_RX_REQ(tdc->sreq_index);
+ ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index,
+ cdata->ch_req_mask,
+ cdata->ch_req_rx_shift);
ch_regs->trg_addr = buf_addr;
break;
@@ -514,13 +595,10 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
return -EINVAL;
}
- if (!burst_size || burst_size > ADMA_CH_CONFIG_BURST_16)
- burst_size = ADMA_CH_CONFIG_BURST_16;
-
ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
ADMA_CH_CTRL_MODE_CONTINUOUS |
ADMA_CH_CTRL_FLOWCTRL_EN;
- ch_regs->config |= ADMA_CH_CONFIG_BURST_SIZE(burst_size);
+ ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT;
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
@@ -635,32 +713,99 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
static int tegra_adma_runtime_suspend(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
+ struct tegra_adma_chan_regs *ch_reg;
+ struct tegra_adma_chan *tdc;
+ int i;
tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+ if (!tdma->global_cmd)
+ goto clk_disable;
+
+ for (i = 0; i < tdma->nr_channels; i++) {
+ tdc = &tdma->channels[i];
+ ch_reg = &tdc->ch_regs;
+ ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
+ /* skip if channel is not active */
+ if (!ch_reg->cmd)
+ continue;
+ ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
+ ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
+ ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
+ ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
+ ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+ ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
+ }
+
+clk_disable:
+ clk_disable_unprepare(tdma->ahub_clk);
- return pm_clk_suspend(dev);
+ return 0;
}
static int tegra_adma_runtime_resume(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
- int ret;
+ struct tegra_adma_chan_regs *ch_reg;
+ struct tegra_adma_chan *tdc;
+ int ret, i;
- ret = pm_clk_resume(dev);
- if (ret)
+ ret = clk_prepare_enable(tdma->ahub_clk);
+ if (ret) {
+ dev_err(dev, "ahub clk_enable failed: %d\n", ret);
return ret;
-
+ }
tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (!tdma->global_cmd)
+ return 0;
+
+ for (i = 0; i < tdma->nr_channels; i++) {
+ tdc = &tdma->channels[i];
+ ch_reg = &tdc->ch_regs;
+ /* skip if channel was not active earlier */
+ if (!ch_reg->cmd)
+ continue;
+ tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
+ tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+ tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
+ tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
+ }
+
return 0;
}
static const struct tegra_adma_chip_data tegra210_chip_data = {
- .nr_channels = 22,
+ .adma_get_burst_config = tegra210_adma_get_burst_config,
+ .global_reg_offset = 0xc00,
+ .global_int_clear = 0x20,
+ .ch_req_tx_shift = 28,
+ .ch_req_rx_shift = 24,
+ .ch_base_offset = 0,
+ .ch_req_mask = 0xf,
+ .ch_req_max = 10,
+ .ch_reg_size = 0x80,
+ .nr_channels = 22,
+};
+
+static const struct tegra_adma_chip_data tegra186_chip_data = {
+ .adma_get_burst_config = tegra186_adma_get_burst_config,
+ .global_reg_offset = 0,
+ .global_int_clear = 0x402c,
+ .ch_req_tx_shift = 27,
+ .ch_req_rx_shift = 22,
+ .ch_base_offset = 0x10000,
+ .ch_req_mask = 0x1f,
+ .ch_req_max = 20,
+ .ch_reg_size = 0x100,
+ .nr_channels = 32,
};
static const struct of_device_id tegra_adma_of_match[] = {
{ .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
+ { .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
@@ -685,6 +830,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
return -ENOMEM;
tdma->dev = &pdev->dev;
+ tdma->cdata = cdata;
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
@@ -693,13 +839,11 @@ static int tegra_adma_probe(struct platform_device *pdev)
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
- ret = pm_clk_create(&pdev->dev);
- if (ret)
- return ret;
-
- ret = of_pm_clk_add_clk(&pdev->dev, "d_audio");
- if (ret)
- goto clk_destroy;
+ tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
+ if (IS_ERR(tdma->ahub_clk)) {
+ dev_err(&pdev->dev, "Error: Missing ahub controller clock\n");
+ return PTR_ERR(tdma->ahub_clk);
+ }
pm_runtime_enable(&pdev->dev);
@@ -715,7 +859,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i];
- tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
+ tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
+ + (cdata->ch_reg_size * i);
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
@@ -746,6 +891,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ tdma->dma_dev.device_pause = tegra_adma_pause;
+ tdma->dma_dev.device_resume = tegra_adma_resume;
ret = dma_async_device_register(&tdma->dma_dev);
if (ret < 0) {
@@ -776,8 +923,6 @@ rpm_put:
pm_runtime_put_sync(&pdev->dev);
rpm_disable:
pm_runtime_disable(&pdev->dev);
-clk_destroy:
- pm_clk_destroy(&pdev->dev);
return ret;
}
@@ -787,6 +932,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
struct tegra_adma *tdma = platform_get_drvdata(pdev);
int i;
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
for (i = 0; i < tdma->nr_channels; ++i)
@@ -794,22 +940,15 @@ static int tegra_adma_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- pm_clk_destroy(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_adma_pm_suspend(struct device *dev)
-{
- return pm_runtime_suspended(dev) == false;
-}
-#endif
-
static const struct dev_pm_ops tegra_adma_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend,
tegra_adma_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(tegra_adma_pm_suspend, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra_admac_driver = {
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index eb45af71d3a3..e8d0881b64d8 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -327,7 +327,6 @@ static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
channel_writel(dc, SAIR, 0);
channel_writel(dc, DAIR, 0);
channel_writel(dc, CCR, 0);
- mmiowb();
}
/* Called with dc->lock held and bh disabled */
@@ -954,7 +953,6 @@ static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
dma_sync_single_for_device(chan2parent(&dc->chan),
prev->txd.phys, ddev->descsize,
DMA_TO_DEVICE);
- mmiowb();
if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
channel_read_CHAR(dc) == prev->txd.phys)
/* Restart chain DMA */
@@ -1080,7 +1078,6 @@ static void txx9dmac_free_chan_resources(struct dma_chan *chan)
static void txx9dmac_off(struct txx9dmac_dev *ddev)
{
dma_writel(ddev, MCR, 0);
- mmiowb();
}
static int __init txx9dmac_chan_probe(struct platform_device *pdev)
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index eafd6c4b90fe..8d174dc5dccd 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -703,7 +703,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
INIT_LIST_HEAD(&ld_completed);
- spin_lock_bh(&chan->lock);
+ spin_lock(&chan->lock);
/* Clean already completed and acked descriptors */
xgene_dma_clean_completed_descriptor(chan);
@@ -772,7 +772,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
*/
xgene_chan_xfer_ld_pending(chan);
- spin_unlock_bh(&chan->lock);
+ spin_unlock(&chan->lock);
/* Run the callback for each descriptor, in order */
list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
@@ -797,7 +797,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
return -ENOMEM;
}
- chan_dbg(chan, "Allocate descripto pool\n");
+ chan_dbg(chan, "Allocate descriptor pool\n");
return 1;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 1bcf9aea0cdf..8816f74a22b4 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -9,6 +9,7 @@
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/edac.h>
+#include <linux/firmware/intel/stratix10-smc.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -1361,8 +1362,19 @@ static const struct edac_device_prv_data a10_l2ecc_data = {
#ifdef CONFIG_EDAC_ALTERA_ETHERNET
+static int __init socfpga_init_ethernet_ecc(struct altr_edac_device_dev *dev)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-eth-mac-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(dev);
+}
+
static const struct edac_device_prv_data a10_enetecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_ethernet_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1374,21 +1386,25 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.inject_fops = &altr_edac_a10_device_inject2_fops,
};
-static int __init socfpga_init_ethernet_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-eth-mac-ecc");
-}
-
-early_initcall(socfpga_init_ethernet_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
/********************** NAND Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_NAND
+static int __init socfpga_init_nand_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-nand-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_nandecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_nand_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1400,21 +1416,25 @@ static const struct edac_device_prv_data a10_nandecc_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_nand_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-nand-ecc");
-}
-
-early_initcall(socfpga_init_nand_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_NAND */
/********************** DMA Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_DMA
+static int __init socfpga_init_dma_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-dma-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_dmaecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_dma_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1426,21 +1446,25 @@ static const struct edac_device_prv_data a10_dmaecc_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_dma_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-dma-ecc");
-}
-
-early_initcall(socfpga_init_dma_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_DMA */
/********************** USB Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_USB
+static int __init socfpga_init_usb_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-usb-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_usbecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_usb_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1452,21 +1476,25 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.inject_fops = &altr_edac_a10_device_inject2_fops,
};
-static int __init socfpga_init_usb_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-usb-ecc");
-}
-
-early_initcall(socfpga_init_usb_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_USB */
/********************** QSPI Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_QSPI
+static int __init socfpga_init_qspi_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-qspi-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_qspiecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_qspi_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1478,13 +1506,6 @@ static const struct edac_device_prv_data a10_qspiecc_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_qspi_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-qspi-ecc");
-}
-
-early_initcall(socfpga_init_qspi_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_QSPI */
/********************* SDMMC Device Functions **********************/
@@ -1593,6 +1614,35 @@ err_release_group_1:
return rc;
}
+static int __init socfpga_init_sdmmc_ecc(struct altr_edac_device_dev *device)
+{
+ int rc = -ENODEV;
+ struct device_node *child;
+
+ child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
+ if (!child)
+ return -ENODEV;
+
+ if (!of_device_is_available(child))
+ goto exit;
+
+ if (validate_parent_available(child))
+ goto exit;
+
+ /* Init portB */
+ rc = altr_init_a10_ecc_block(child, ALTR_A10_SDMMC_IRQ_MASK,
+ a10_sdmmceccb_data.ecc_enable_mask, 1);
+ if (rc)
+ goto exit;
+
+ /* Setup portB */
+ return altr_portb_setup(device);
+
+exit:
+ of_node_put(child);
+ return rc;
+}
+
static irqreturn_t altr_edac_a10_ecc_irq_portb(int irq, void *dev_id)
{
struct altr_edac_device_dev *ad = dev_id;
@@ -1617,7 +1667,7 @@ static irqreturn_t altr_edac_a10_ecc_irq_portb(int irq, void *dev_id)
}
static const struct edac_device_prv_data a10_sdmmcecca_data = {
- .setup = altr_portb_setup,
+ .setup = socfpga_init_sdmmc_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1630,7 +1680,7 @@ static const struct edac_device_prv_data a10_sdmmcecca_data = {
};
static const struct edac_device_prv_data a10_sdmmceccb_data = {
- .setup = altr_portb_setup,
+ .setup = socfpga_init_sdmmc_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENB,
.ue_clear_mask = ALTR_A10_ECC_DERRPENB,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1642,35 +1692,6 @@ static const struct edac_device_prv_data a10_sdmmceccb_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_sdmmc_ecc(void)
-{
- int rc = -ENODEV;
- struct device_node *child;
-
- if (!socfpga_is_a10() && !socfpga_is_s10())
- return -ENODEV;
-
- child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
- if (!child) {
- edac_printk(KERN_WARNING, EDAC_DEVICE, "SDMMC node not found\n");
- return -ENODEV;
- }
-
- if (!of_device_is_available(child))
- goto exit;
-
- if (validate_parent_available(child))
- goto exit;
-
- rc = altr_init_a10_ecc_block(child, ALTR_A10_SDMMC_IRQ_MASK,
- a10_sdmmcecca_data.ecc_enable_mask, 1);
-exit:
- of_node_put(child);
- return rc;
-}
-
-early_initcall(socfpga_init_sdmmc_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_SDMMC */
/********************* Arria10 EDAC Device Functions *************************/
@@ -1762,28 +1783,24 @@ static ssize_t altr_edac_a10_device_trig2(struct file *file,
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
writel(priv->ue_set_mask, set_addr);
} else {
- /* Setup write of 0 to first 4 bytes */
- writel(0x0, drvdata->base + ECC_BLK_WDATA0_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
- /* Setup write of 4 bytes */
+ /* Setup read/write of 4 bytes */
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
/* Setup Address to 0 */
- writel(0x0, drvdata->base + ECC_BLK_ADDRESS_OFST);
- /* Setup accctrl to write & data override */
- writel(ECC_WRITE_DOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
- /* Kick it. */
- writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
- /* Setup accctrl to read & ecc override */
- writel(ECC_READ_EOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ writel(0, drvdata->base + ECC_BLK_ADDRESS_OFST);
+ /* Setup accctrl to read & ecc & data override */
+ writel(ECC_READ_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
/* Kick it. */
writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
/* Setup write for single bit change */
- writel(0x1, drvdata->base + ECC_BLK_WDATA0_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA0_OFST) ^ 0x1,
+ drvdata->base + ECC_BLK_WDATA0_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA1_OFST),
+ drvdata->base + ECC_BLK_WDATA1_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA2_OFST),
+ drvdata->base + ECC_BLK_WDATA2_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA3_OFST),
+ drvdata->base + ECC_BLK_WDATA3_OFST);
+
/* Copy Read ECC to Write ECC */
writel(readl(drvdata->base + ECC_BLK_RECC0_OFST),
drvdata->base + ECC_BLK_WECC0_OFST);
@@ -1930,6 +1947,15 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
goto err_release_group1;
}
+#ifdef CONFIG_ARCH_STRATIX10
+ /* Use IRQ to determine SError origin instead of assigning IRQ */
+ rc = of_property_read_u32_index(np, "interrupts", 0, &altdev->db_irq);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to parse DB IRQ index\n");
+ goto err_release_group1;
+ }
+#else
altdev->db_irq = irq_of_parse_and_map(np, 1);
if (!altdev->db_irq) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating DBIRQ\n");
@@ -1943,6 +1969,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
goto err_release_group1;
}
+#endif
rc = edac_device_add_device(dci);
if (rc) {
@@ -2005,6 +2032,10 @@ static const struct irq_domain_ops a10_eccmgr_ic_ops = {
/************** Stratix 10 EDAC Double Bit Error Handler ************/
#define to_a10edac(p, m) container_of(p, struct altr_arria10_edac, m)
+#ifdef CONFIG_ARCH_STRATIX10
+/* panic routine issues reboot on non-zero panic_timeout */
+extern int panic_timeout;
+
/*
* The double bit error is handled through SError which is fatal. This is
* called as a panic notifier to printout ECC error info as part of the panic.
@@ -2018,17 +2049,37 @@ static int s10_edac_dberr_handler(struct notifier_block *this,
regmap_read(edac->ecc_mgr_map, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
&dberror);
regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST, dberror);
- if (dberror & S10_DDR0_IRQ_MASK) {
- regmap_read(edac->ecc_mgr_map, A10_DERRADDR_OFST, &err_addr);
- regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
- err_addr);
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC: [Uncorrectable errors @ 0x%08X]\n\n",
- err_addr);
+ if (dberror & S10_DBE_IRQ_MASK) {
+ struct list_head *position;
+ struct altr_edac_device_dev *ed;
+ struct arm_smccc_res result;
+
+ /* Find the matching DBE in the list of devices */
+ list_for_each(position, &edac->a10_ecc_devices) {
+ ed = list_entry(position, struct altr_edac_device_dev,
+ next);
+ if (!(BIT(ed->db_irq) & dberror))
+ continue;
+
+ writel(ALTR_A10_ECC_DERRPENA,
+ ed->base + ALTR_A10_ECC_INTSTAT_OFST);
+ err_addr = readl(ed->base + ALTR_S10_DERR_ADDRA_OFST);
+ regmap_write(edac->ecc_mgr_map,
+ S10_SYSMGR_UE_ADDR_OFST, err_addr);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "EDAC: [Fatal DBE on %s @ 0x%08X]\n",
+ ed->edac_dev_name, err_addr);
+ break;
+ }
+ /* Notify the System through SMC. Reboot delay = 1 second */
+ panic_timeout = 1;
+ arm_smccc_smc(INTEL_SIP_SMC_ECC_DBE, dberror, 0, 0, 0, 0,
+ 0, 0, &result);
}
return NOTIFY_DONE;
}
+#endif
/****************** Arria 10 EDAC Probe Function *********************/
static int altr_edac_a10_probe(struct platform_device *pdev)
@@ -2098,16 +2149,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_irq_handler,
edac);
- if (socfpga_is_a10()) {
- edac->db_irq = platform_get_irq(pdev, 1);
- if (edac->db_irq < 0) {
- dev_err(&pdev->dev, "No DBERR IRQ resource\n");
- return edac->db_irq;
- }
- irq_set_chained_handler_and_data(edac->db_irq,
- altr_edac_a10_irq_handler,
- edac);
- } else {
+#ifdef CONFIG_ARCH_STRATIX10
+ {
int dberror, err_addr;
edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
@@ -2130,6 +2173,15 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
S10_SYSMGR_UE_ADDR_OFST, 0);
}
}
+#else
+ edac->db_irq = platform_get_irq(pdev, 1);
+ if (edac->db_irq < 0) {
+ dev_err(&pdev->dev, "No DBERR IRQ resource\n");
+ return edac->db_irq;
+ }
+ irq_set_chained_handler_and_data(edac->db_irq,
+ altr_edac_a10_irq_handler, edac);
+#endif
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index f8664bac9fa8..55654cc4bcdf 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -289,6 +289,7 @@ struct altr_sdram_mc_data {
#define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000
/************* Stratix10 Defines **************/
+#define ALTR_S10_DERR_ADDRA_OFST 0x2C
/* Stratix10 ECC Manager Defines */
#define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
@@ -299,6 +300,7 @@ struct altr_sdram_mc_data {
#define S10_SYSMGR_UE_ADDR_OFST 0x224
#define S10_DDR0_IRQ_MASK BIT(16)
+#define S10_DBE_IRQ_MASK 0x3FE
/* Define ECC Block Offsets for peripherals */
#define ECC_BLK_ADDRESS_OFST 0x40
@@ -319,7 +321,7 @@ struct altr_sdram_mc_data {
#define ECC_BLK_STARTACC_OFST 0x7C
#define ECC_XACT_KICK 0x10000
-#define ECC_WORD_WRITE 0xF
+#define ECC_WORD_WRITE 0xFF
#define ECC_WRITE_DOVR 0x101
#define ECC_WRITE_EDOVR 0x103
#define ECC_READ_EOVR 0x2
@@ -370,69 +372,4 @@ struct altr_arria10_edac {
struct notifier_block panic_notifier;
};
-/*
- * Functions specified by ARM SMC Calling convention:
- *
- * FAST call executes atomic operations, returns when the requested operation
- * has completed.
- * STD call starts a operation which can be preempted by a non-secure
- * interrupt. The call can return before the requested operation has
- * completed.
- *
- * a0..a7 is used as register names in the descriptions below, on arm32
- * that translates to r0..r7 and on arm64 to w0..w7.
- */
-
-#define INTEL_SIP_SMC_STD_CALL_VAL(func_num) \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_SIP, (func_num))
-
-#define INTEL_SIP_SMC_FAST_CALL_VAL(func_num) \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_SIP, (func_num))
-
-#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
-#define INTEL_SIP_SMC_STATUS_OK 0x0
-#define INTEL_SIP_SMC_REG_ERROR 0x5
-
-/*
- * Request INTEL_SIP_SMC_REG_READ
- *
- * Read a protected register using SMCCC
- *
- * Call register usage:
- * a0: INTEL_SIP_SMC_REG_READ.
- * a1: register address.
- * a2-7: not used.
- *
- * Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_REG_ERROR, or
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION
- * a1: Value in the register
- * a2-3: not used.
- */
-#define INTEL_SIP_SMC_FUNCID_REG_READ 7
-#define INTEL_SIP_SMC_REG_READ \
- INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ)
-
-/*
- * Request INTEL_SIP_SMC_REG_WRITE
- *
- * Write a protected register using SMCCC
- *
- * Call register usage:
- * a0: INTEL_SIP_SMC_REG_WRITE.
- * a1: register address
- * a2: value to program into register.
- * a3-7: not used.
- *
- * Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_REG_ERROR, or
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION
- * a1-3: not used.
- */
-#define INTEL_SIP_SMC_FUNCID_REG_WRITE 8
-#define INTEL_SIP_SMC_REG_WRITE \
- INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
-
#endif /* #ifndef _ALTERA_EDAC_H */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 6ea98575a402..e2a99466faaa 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -18,6 +18,9 @@ static struct msr __percpu *msrs;
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
+/* Number of Unified Memory Controllers */
+static u8 num_umcs;
+
/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
@@ -449,6 +452,9 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
#define for_each_chip_select_mask(i, dct, pvt) \
for (i = 0; i < pvt->csels[dct].m_cnt; i++)
+#define for_each_umc(i) \
+ for (i = 0; i < num_umcs; i++)
+
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -722,7 +728,7 @@ static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
if (pvt->umc) {
u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
continue;
@@ -781,6 +787,22 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
(dclr & BIT(15)) ? "yes" : "no");
}
+/*
+ * The Address Mask should be a contiguous set of bits in the non-interleaved
+ * case. So to check for CS interleaving, find the most- and least-significant
+ * bits of the mask, generate a contiguous bitmask, and compare the two.
+ */
+static bool f17_cs_interleaved(struct amd64_pvt *pvt, u8 ctrl, int cs)
+{
+ u32 mask = pvt->csels[ctrl].csmasks[cs >> 1];
+ u32 msb = fls(mask) - 1, lsb = ffs(mask) - 1;
+ u32 test_mask = GENMASK(msb, lsb);
+
+ edac_dbg(1, "mask=0x%08x test_mask=0x%08x\n", mask, test_mask);
+
+ return mask ^ test_mask;
+}
+
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, cs0, cs1;
@@ -797,8 +819,19 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
size1 = 0;
cs1 = dimm * 2 + 1;
- if (csrow_enabled(cs1, ctrl, pvt))
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
+ if (csrow_enabled(cs1, ctrl, pvt)) {
+ /*
+ * CS interleaving is only supported if both CSes have
+ * the same amount of memory. Because they are
+ * interleaved, it will look like both CSes have the
+ * full amount of memory. Save the size for both as
+ * half the amount we found on CS0, if interleaved.
+ */
+ if (f17_cs_interleaved(pvt, ctrl, cs1))
+ size1 = size0 = (size0 >> 1);
+ else
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
+ }
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
@@ -811,7 +844,7 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
struct amd64_umc *umc;
u32 i, tmp, umc_base;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
@@ -894,8 +927,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- amd64_info("using %s syndromes.\n",
- ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
+ amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
/*
@@ -1388,7 +1420,7 @@ static int f17_early_channel_count(struct amd64_pvt *pvt)
int i, channels = 0;
/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
- for (i = 0; i < NUM_UMCS; i++)
+ for_each_umc(i)
channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
amd64_info("MCT channel count: %d\n", channels);
@@ -2211,6 +2243,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
+ [F17_M30H_CPUS] = {
+ .ctl_name = "F17h_M30h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -2464,18 +2505,14 @@ static inline void decode_bus_error(int node_id, struct mce *m)
* To find the UMC channel represented by this bank we need to match on its
* instance_id. The instance_id of a bank is held in the lower 32 bits of its
* IPID.
+ *
+ * Currently, we can derive the channel number by looking at the 6th nibble in
+ * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
+ * number.
*/
-static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
+static int find_umc_channel(struct mce *m)
{
- u32 umc_instance_id[] = {0x50f00, 0x150f00};
- u32 instance_id = m->ipid & GENMASK(31, 0);
- int i, channel = -1;
-
- for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
- if (umc_instance_id[i] == instance_id)
- channel = i;
-
- return channel;
+ return (m->ipid & GENMASK(31, 0)) >> 20;
}
static void decode_umc_error(int node_id, struct mce *m)
@@ -2497,11 +2534,7 @@ static void decode_umc_error(int node_id, struct mce *m)
if (m->status & MCI_STATUS_DEFERRED)
ecc_type = 3;
- err.channel = find_umc_channel(pvt, m);
- if (err.channel < 0) {
- err.err_code = ERR_CHANNEL;
- goto log_error;
- }
+ err.channel = find_umc_channel(m);
if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
err.err_code = ERR_NORM_ADDR;
@@ -2603,19 +2636,19 @@ static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
if (pvt->umc) {
u8 i;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
/* Check enabled channels only: */
- if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
- (pvt->umc[i].ecc_ctrl & BIT(7))) {
- pvt->ecc_sym_sz = 8;
- break;
+ if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
+ if (pvt->umc[i].ecc_ctrl & BIT(9)) {
+ pvt->ecc_sym_sz = 16;
+ return;
+ } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
+ pvt->ecc_sym_sz = 8;
+ return;
+ }
}
}
-
- return;
- }
-
- if (pvt->fam >= 0x10) {
+ } else if (pvt->fam >= 0x10) {
u32 tmp;
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
@@ -2639,7 +2672,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
u32 i, umc_base;
/* Read registers from each UMC */
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
@@ -3052,7 +3085,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
if (boot_cpu_data.x86 >= 0x17) {
u8 umc_en_mask = 0, ecc_en_mask = 0;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
u32 base = get_umc_base(i);
/* Only check enabled UMCs. */
@@ -3105,7 +3138,7 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
u8 i, ecc_en = 1, cpk_en = 1;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
@@ -3203,6 +3236,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F17_M10H_CPUS];
pvt->ops = &family_types[F17_M10H_CPUS].ops;
break;
+ } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
+ fam_type = &family_types[F17_M30H_CPUS];
+ pvt->ops = &family_types[F17_M30H_CPUS].ops;
+ break;
}
/* fall through */
case 0x18:
@@ -3236,6 +3273,22 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
+/* Set the number of Unified Memory Controllers in the system. */
+static void compute_num_umcs(void)
+{
+ u8 model = boot_cpu_data.x86_model;
+
+ if (boot_cpu_data.x86 < 0x17)
+ return;
+
+ if (model >= 0x30 && model <= 0x3f)
+ num_umcs = 8;
+ else
+ num_umcs = 2;
+
+ edac_dbg(1, "Number of UMCs: %x", num_umcs);
+}
+
static int init_one_instance(unsigned int nid)
{
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
@@ -3260,7 +3313,7 @@ static int init_one_instance(unsigned int nid)
goto err_free;
if (pvt->fam >= 0x17) {
- pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
+ pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc) {
ret = -ENOMEM;
goto err_free;
@@ -3299,8 +3352,14 @@ static int init_one_instance(unsigned int nid)
* Always allocate two channels since we can have setups with DIMMs on
* only one channel. Also, this simplifies handling later for the price
* of a couple of KBs tops.
+ *
+ * On Fam17h+, the number of controllers may be greater than two. So set
+ * the size equal to the maximum number of UMCs.
*/
- layers[1].size = 2;
+ if (pvt->fam >= 0x17)
+ layers[1].size = num_umcs;
+ else
+ layers[1].size = 2;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
@@ -3481,6 +3540,8 @@ static int __init amd64_edac_init(void)
if (!msrs)
goto err_free;
+ compute_num_umcs();
+
for (i = 0; i < amd_nb_num(); i++) {
err = probe_one_instance(i);
if (err) {
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 4242f8e39c18..8f66472f7adc 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -117,6 +117,8 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
/*
* Function 1 - Address Map
@@ -272,8 +274,6 @@
#define UMC_SDP_INIT BIT(31)
-#define NUM_UMCS 2
-
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
@@ -284,6 +284,7 @@ enum amd_families {
F16_M30H_CPUS,
F17_CPUS,
F17_M10H_CPUS,
+ F17_M30H_CPUS,
NUM_FAMILIES,
};
@@ -363,7 +364,7 @@ struct amd64_pvt {
u32 dct_sel_hi; /* DRAM Controller Select High */
u32 online_spare; /* On-Line spare Reg */
- /* x4 or x8 syndromes in use */
+ /* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;
/* place to store error injection parameters prior to issue */
@@ -396,8 +397,8 @@ struct err_info {
static inline u32 get_umc_base(u8 channel)
{
- /* ch0: 0x50000, ch1: 0x150000 */
- return 0x50000 + (!!channel << 20);
+ /* chY: 0xY50000 */
+ return 0x50000 + (channel << 20);
}
static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index c334fb7c63df..6f06aec4877c 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -181,6 +181,54 @@ static struct notifier_block i10nm_mce_dec = {
.priority = MCE_PRIO_EDAC,
};
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature.
+ * Exercise the address decode logic by writing an address to
+ * /sys/kernel/debug/edac/i10nm_test/addr.
+ */
+static struct dentry *i10nm_test;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct mce m;
+
+ pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
+
+ memset(&m, 0, sizeof(m));
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x90;
+ /* One corrected error */
+ m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
+ m.addr = val;
+ skx_mce_check_error(NULL, 0, &m);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_i10nm_debug(void)
+{
+ i10nm_test = edac_debugfs_create_dir("i10nm_test");
+ if (!i10nm_test)
+ return;
+
+ if (!edac_debugfs_create_file("addr", 0200, i10nm_test,
+ NULL, &fops_u64_wo)) {
+ debugfs_remove(i10nm_test);
+ i10nm_test = NULL;
+ }
+}
+
+static void teardown_i10nm_debug(void)
+{
+ debugfs_remove_recursive(i10nm_test);
+}
+#else
+static inline void setup_i10nm_debug(void) {}
+static inline void teardown_i10nm_debug(void) {}
+#endif /*CONFIG_EDAC_DEBUG*/
+
static int __init i10nm_init(void)
{
u8 mc = 0, src_id = 0, node_id = 0;
@@ -249,7 +297,7 @@ static int __init i10nm_init(void)
opstate_init();
mce_register_decode_chain(&i10nm_mce_dec);
- setup_skx_debug("i10nm_test");
+ setup_i10nm_debug();
i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
@@ -262,7 +310,7 @@ fail:
static void __exit i10nm_exit(void)
{
edac_dbg(2, "\n");
- teardown_skx_debug();
+ teardown_i10nm_debug();
mce_unregister_decode_chain(&i10nm_mce_dec);
skx_adxl_put();
skx_remove();
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 0a1814dad6cf..bb0202ad7a13 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1004,7 +1004,7 @@ static inline void amd_decode_err_code(u16 ec)
/*
* Filter out unwanted MCE signatures here.
*/
-static bool amd_filter_mce(struct mce *m)
+static bool ignore_mce(struct mce *m)
{
/*
* NB GART TLB error reporting is disabled by default.
@@ -1038,7 +1038,7 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
unsigned int fam = x86_family(m->cpuid);
int ecc;
- if (amd_filter_mce(m))
+ if (ignore_mce(m))
return NOTIFY_STOP;
pr_emerg(HW_ERR "%s\n", decode_error_status(m));
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index adae4c848ca1..a5c8fa3a249a 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -540,6 +540,54 @@ static struct notifier_block skx_mce_dec = {
.priority = MCE_PRIO_EDAC,
};
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature.
+ * Exercise the address decode logic by writing an address to
+ * /sys/kernel/debug/edac/skx_test/addr.
+ */
+static struct dentry *skx_test;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct mce m;
+
+ pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
+
+ memset(&m, 0, sizeof(m));
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x90;
+ /* One corrected error */
+ m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
+ m.addr = val;
+ skx_mce_check_error(NULL, 0, &m);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_skx_debug(void)
+{
+ skx_test = edac_debugfs_create_dir("skx_test");
+ if (!skx_test)
+ return;
+
+ if (!edac_debugfs_create_file("addr", 0200, skx_test,
+ NULL, &fops_u64_wo)) {
+ debugfs_remove(skx_test);
+ skx_test = NULL;
+ }
+}
+
+static void teardown_skx_debug(void)
+{
+ debugfs_remove_recursive(skx_test);
+}
+#else
+static inline void setup_skx_debug(void) {}
+static inline void teardown_skx_debug(void) {}
+#endif /*CONFIG_EDAC_DEBUG*/
+
/*
* skx_init:
* make sure we are running on the correct cpu model
@@ -619,7 +667,7 @@ static int __init skx_init(void)
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- setup_skx_debug("skx_test");
+ setup_skx_debug();
mce_register_decode_chain(&skx_mce_dec);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 0e96e7b5b0a7..b0dddcfa9baa 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -1,7 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Common codes for both the skx_edac driver and Intel 10nm server EDAC driver.
- * Originally split out from the skx_edac driver.
+ *
+ * Shared code by both skx_edac and i10nm_edac. Originally split out
+ * from the skx_edac driver.
+ *
+ * This file is linked into both skx_edac and i10nm_edac drivers. In
+ * order to avoid link errors, this file must be like a pure library
+ * without including symbols and defines which would otherwise conflict,
+ * when linked once into a module and into a built-in object, at the
+ * same time. For example, __this_module symbol references when that
+ * file is being linked into a built-in object.
*
* Copyright (c) 2018, Intel Corporation.
*/
@@ -644,48 +652,3 @@ void skx_remove(void)
kfree(d);
}
}
-
-#ifdef CONFIG_EDAC_DEBUG
-/*
- * Debug feature.
- * Exercise the address decode logic by writing an address to
- * /sys/kernel/debug/edac/dirname/addr.
- */
-static struct dentry *skx_test;
-
-static int debugfs_u64_set(void *data, u64 val)
-{
- struct mce m;
-
- pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
-
- memset(&m, 0, sizeof(m));
- /* ADDRV + MemRd + Unknown channel */
- m.status = MCI_STATUS_ADDRV + 0x90;
- /* One corrected error */
- m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
- m.addr = val;
- skx_mce_check_error(NULL, 0, &m);
-
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
-
-void setup_skx_debug(const char *dirname)
-{
- skx_test = edac_debugfs_create_dir(dirname);
- if (!skx_test)
- return;
-
- if (!edac_debugfs_create_file("addr", 0200, skx_test,
- NULL, &fops_u64_wo)) {
- debugfs_remove(skx_test);
- skx_test = NULL;
- }
-}
-
-void teardown_skx_debug(void)
-{
- debugfs_remove_recursive(skx_test);
-}
-#endif /*CONFIG_EDAC_DEBUG*/
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index d25374e34d4f..d18fa98669af 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -141,12 +141,4 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
void skx_remove(void);
-#ifdef CONFIG_EDAC_DEBUG
-void setup_skx_debug(const char *dirname);
-void teardown_skx_debug(void);
-#else
-static inline void setup_skx_debug(const char *dirname) {}
-static inline void teardown_skx_debug(void) {}
-#endif /*CONFIG_EDAC_DEBUG*/
-
#endif /* _SKX_COMM_EDAC_H */
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 8e17149655f0..de06fafb52ff 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -30,7 +30,7 @@ config EXTCON_ARIZONA
config EXTCON_AXP288
tristate "X-Power AXP288 EXTCON support"
- depends on MFD_AXP20X && USB_SUPPORT && X86
+ depends on MFD_AXP20X && USB_SUPPORT && X86 && ACPI
select USB_ROLE_SWITCH
help
Say Y here to enable support for USB peripheral detection
@@ -60,6 +60,13 @@ config EXTCON_INTEL_CHT_WC
Say Y here to enable extcon support for charger detection / control
on the Intel Cherrytrail Whiskey Cove PMIC.
+config EXTCON_INTEL_MRFLD
+ tristate "Intel Merrifield Basin Cove PMIC extcon driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ help
+ Say Y here to enable extcon support for charger detection / control
+ on the Intel Merrifield Basin Cove PMIC.
+
config EXTCON_MAX14577
tristate "Maxim MAX14577/77836 EXTCON Support"
depends on MFD_MAX14577
@@ -116,7 +123,7 @@ config EXTCON_PALMAS
config EXTCON_PTN5150
tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
- depends on I2C && GPIOLIB || COMPILE_TEST
+ depends on I2C && (GPIOLIB || COMPILE_TEST)
select REGMAP_I2C
help
Say Y here to enable support for USB peripheral and USB host
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 261ce4cfe209..d3941a735df3 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
+obj-$(CONFIG_EXTCON_INTEL_MRFLD) += extcon-intel-mrfld.o
obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
index f599aeddf8e5..f487d877ab5d 100644
--- a/drivers/extcon/devres.c
+++ b/drivers/extcon/devres.c
@@ -205,7 +205,7 @@ EXPORT_SYMBOL(devm_extcon_register_notifier);
/**
* devm_extcon_unregister_notifier()
- - Resource-managed extcon_unregister_notifier()
+ * - Resource-managed extcon_unregister_notifier()
* @dev: the device owning the extcon device being created
* @edev: the extcon device
* @id: the unique id among the extcon enumeration
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index da0e9bc4262f..9327479c719c 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev)
struct arizona_extcon_info *info = platform_get_drvdata(pdev);
struct arizona *arizona = info->arizona;
int jack_irq_rise, jack_irq_fall;
+ bool change;
+
+ regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA, 0,
+ &change);
+
+ if (change) {
+ regulator_disable(info->micvdd);
+ pm_runtime_put(info->dev);
+ }
gpiod_put(info->micd_pol_gpio);
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index a983708b77a6..50f9402fb325 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -333,7 +333,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
struct axp288_extcon_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
- const char *name;
+ struct acpi_device *adev;
int ret, i, pirq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -357,9 +357,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = acpi_dev_get_first_match_name("INT3496", NULL, -1);
- if (name) {
- info->id_extcon = extcon_get_extcon_dev(name);
+ adev = acpi_dev_get_first_match_dev("INT3496", NULL, -1);
+ if (adev) {
+ info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
+ put_device(&adev->dev);
if (!info->id_extcon)
return -EPROBE_DEFER;
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 5ef215297101..9d32150e68db 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -17,6 +17,8 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+#include "extcon-intel.h"
+
#define CHT_WC_PHYCTRL 0x5e07
#define CHT_WC_CHGRCTRL0 0x5e16
@@ -29,7 +31,15 @@
#define CHT_WC_CHGRCTRL0_DBPOFF BIT(6)
#define CHT_WC_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
-#define CHT_WC_CHGRCTRL1 0x5e17
+#define CHT_WC_CHGRCTRL1 0x5e17
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_100 BIT(0)
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_150 BIT(1)
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_500 BIT(2)
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_900 BIT(3)
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_1500 BIT(4)
+#define CHT_WC_CHGRCTRL1_FTEMP_EVENT BIT(5)
+#define CHT_WC_CHGRCTRL1_OTGMODE BIT(6)
+#define CHT_WC_CHGRCTRL1_DBPEN BIT(7)
#define CHT_WC_USBSRC 0x5e29
#define CHT_WC_USBSRC_STS_MASK GENMASK(1, 0)
@@ -48,6 +58,13 @@
#define CHT_WC_USBSRC_TYPE_OTHER 8
#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9
+#define CHT_WC_CHGDISCTRL 0x5e2f
+#define CHT_WC_CHGDISCTRL_OUT BIT(0)
+/* 0 - open drain, 1 - regular push-pull output */
+#define CHT_WC_CHGDISCTRL_DRV BIT(4)
+/* 0 - pin is controlled by SW, 1 - by HW */
+#define CHT_WC_CHGDISCTRL_FN BIT(6)
+
#define CHT_WC_PWRSRC_IRQ 0x6e03
#define CHT_WC_PWRSRC_IRQ_MASK 0x6e0f
#define CHT_WC_PWRSRC_STS 0x6e1e
@@ -65,15 +82,6 @@
#define CHT_WC_VBUS_GPIO_CTLO_DRV_OD BIT(4)
#define CHT_WC_VBUS_GPIO_CTLO_DIR_OUT BIT(5)
-enum cht_wc_usb_id {
- USB_ID_OTG,
- USB_ID_GND,
- USB_ID_FLOAT,
- USB_RID_A,
- USB_RID_B,
- USB_RID_C,
-};
-
enum cht_wc_mux_select {
MUX_SEL_PMIC = 0,
MUX_SEL_SOC,
@@ -101,9 +109,9 @@ static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
{
switch ((pwrsrc_sts & CHT_WC_PWRSRC_USBID_MASK) >> CHT_WC_PWRSRC_USBID_SHIFT) {
case CHT_WC_PWRSRC_RID_GND:
- return USB_ID_GND;
+ return INTEL_USB_ID_GND;
case CHT_WC_PWRSRC_RID_FLOAT:
- return USB_ID_FLOAT;
+ return INTEL_USB_ID_FLOAT;
case CHT_WC_PWRSRC_RID_ACA:
default:
/*
@@ -111,7 +119,7 @@ static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
* the USBID GPADC channel here and determine ACA role
* based on that.
*/
- return USB_ID_FLOAT;
+ return INTEL_USB_ID_FLOAT;
}
}
@@ -198,6 +206,30 @@ static void cht_wc_extcon_set_5v_boost(struct cht_wc_extcon_data *ext,
dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret);
}
+static void cht_wc_extcon_set_otgmode(struct cht_wc_extcon_data *ext,
+ bool enable)
+{
+ unsigned int val = enable ? CHT_WC_CHGRCTRL1_OTGMODE : 0;
+ int ret;
+
+ ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL1,
+ CHT_WC_CHGRCTRL1_OTGMODE, val);
+ if (ret)
+ dev_err(ext->dev, "Error updating CHGRCTRL1 reg: %d\n", ret);
+}
+
+static void cht_wc_extcon_enable_charging(struct cht_wc_extcon_data *ext,
+ bool enable)
+{
+ unsigned int val = enable ? 0 : CHT_WC_CHGDISCTRL_OUT;
+ int ret;
+
+ ret = regmap_update_bits(ext->regmap, CHT_WC_CHGDISCTRL,
+ CHT_WC_CHGDISCTRL_OUT, val);
+ if (ret)
+ dev_err(ext->dev, "Error updating CHGDISCTRL reg: %d\n", ret);
+}
+
/* Small helper to sync EXTCON_CHG_USB_SDP and EXTCON_USB state */
static void cht_wc_extcon_set_state(struct cht_wc_extcon_data *ext,
unsigned int cable, bool state)
@@ -221,11 +253,17 @@ static void cht_wc_extcon_pwrsrc_event(struct cht_wc_extcon_data *ext)
}
id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
- if (id == USB_ID_GND) {
+ if (id == INTEL_USB_ID_GND) {
+ cht_wc_extcon_enable_charging(ext, false);
+ cht_wc_extcon_set_otgmode(ext, true);
+
/* The 5v boost causes a false VBUS / SDP detect, skip */
goto charger_det_done;
}
+ cht_wc_extcon_set_otgmode(ext, false);
+ cht_wc_extcon_enable_charging(ext, true);
+
/* Plugged into a host/charger or not connected? */
if (!(pwrsrc_sts & CHT_WC_PWRSRC_VBUS)) {
/* Route D+ and D- to PMIC for future charger detection */
@@ -248,7 +286,7 @@ set_state:
ext->previous_cable = cable;
}
- ext->usb_host = ((id == USB_ID_GND) || (id == USB_RID_A));
+ ext->usb_host = ((id == INTEL_USB_ID_GND) || (id == INTEL_USB_RID_A));
extcon_set_state_sync(ext->edev, EXTCON_USB_HOST, ext->usb_host);
}
@@ -278,6 +316,14 @@ static int cht_wc_extcon_sw_control(struct cht_wc_extcon_data *ext, bool enable)
{
int ret, mask, val;
+ val = enable ? 0 : CHT_WC_CHGDISCTRL_FN;
+ ret = regmap_update_bits(ext->regmap, CHT_WC_CHGDISCTRL,
+ CHT_WC_CHGDISCTRL_FN, val);
+ if (ret)
+ dev_err(ext->dev,
+ "Error setting sw control for CHGDIS pin: %d\n",
+ ret);
+
mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF;
val = enable ? mask : 0;
ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val);
@@ -329,7 +375,10 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
/* Enable sw control */
ret = cht_wc_extcon_sw_control(ext, true);
if (ret)
- return ret;
+ goto disable_sw_control;
+
+ /* Disable charging by external battery charger */
+ cht_wc_extcon_enable_charging(ext, false);
/* Register extcon device */
ret = devm_extcon_dev_register(ext->dev, ext->edev);
diff --git a/drivers/extcon/extcon-intel-mrfld.c b/drivers/extcon/extcon-intel-mrfld.c
new file mode 100644
index 000000000000..f47016fb28a8
--- /dev/null
+++ b/drivers/extcon/extcon-intel-mrfld.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * extcon driver for Basin Cove PMIC
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/extcon-provider.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "extcon-intel.h"
+
+#define BCOVE_USBIDCTRL 0x19
+#define BCOVE_USBIDCTRL_ID BIT(0)
+#define BCOVE_USBIDCTRL_ACA BIT(1)
+#define BCOVE_USBIDCTRL_ALL (BCOVE_USBIDCTRL_ID | BCOVE_USBIDCTRL_ACA)
+
+#define BCOVE_USBIDSTS 0x1a
+#define BCOVE_USBIDSTS_GND BIT(0)
+#define BCOVE_USBIDSTS_RARBRC_MASK GENMASK(2, 1)
+#define BCOVE_USBIDSTS_RARBRC_SHIFT 1
+#define BCOVE_USBIDSTS_NO_ACA 0
+#define BCOVE_USBIDSTS_R_ID_A 1
+#define BCOVE_USBIDSTS_R_ID_B 2
+#define BCOVE_USBIDSTS_R_ID_C 3
+#define BCOVE_USBIDSTS_FLOAT BIT(3)
+#define BCOVE_USBIDSTS_SHORT BIT(4)
+
+#define BCOVE_CHGRIRQ_ALL (BCOVE_CHGRIRQ_VBUSDET | BCOVE_CHGRIRQ_DCDET | \
+ BCOVE_CHGRIRQ_BATTDET | BCOVE_CHGRIRQ_USBIDDET)
+
+#define BCOVE_CHGRCTRL0 0x4b
+#define BCOVE_CHGRCTRL0_CHGRRESET BIT(0)
+#define BCOVE_CHGRCTRL0_EMRGCHREN BIT(1)
+#define BCOVE_CHGRCTRL0_EXTCHRDIS BIT(2)
+#define BCOVE_CHGRCTRL0_SWCONTROL BIT(3)
+#define BCOVE_CHGRCTRL0_TTLCK BIT(4)
+#define BCOVE_CHGRCTRL0_BIT_5 BIT(5)
+#define BCOVE_CHGRCTRL0_BIT_6 BIT(6)
+#define BCOVE_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
+
+struct mrfld_extcon_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct extcon_dev *edev;
+ unsigned int status;
+ unsigned int id;
+};
+
+static const unsigned int mrfld_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
+ EXTCON_CHG_USB_CDP,
+ EXTCON_CHG_USB_DCP,
+ EXTCON_CHG_USB_ACA,
+ EXTCON_NONE,
+};
+
+static int mrfld_extcon_clear(struct mrfld_extcon_data *data, unsigned int reg,
+ unsigned int mask)
+{
+ return regmap_update_bits(data->regmap, reg, mask, 0x00);
+}
+
+static int mrfld_extcon_set(struct mrfld_extcon_data *data, unsigned int reg,
+ unsigned int mask)
+{
+ return regmap_update_bits(data->regmap, reg, mask, 0xff);
+}
+
+static int mrfld_extcon_sw_control(struct mrfld_extcon_data *data, bool enable)
+{
+ unsigned int mask = BCOVE_CHGRCTRL0_SWCONTROL;
+ struct device *dev = data->dev;
+ int ret;
+
+ if (enable)
+ ret = mrfld_extcon_set(data, BCOVE_CHGRCTRL0, mask);
+ else
+ ret = mrfld_extcon_clear(data, BCOVE_CHGRCTRL0, mask);
+ if (ret)
+ dev_err(dev, "can't set SW control: %d\n", ret);
+ return ret;
+}
+
+static int mrfld_extcon_get_id(struct mrfld_extcon_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ unsigned int id;
+ bool ground;
+ int ret;
+
+ ret = regmap_read(regmap, BCOVE_USBIDSTS, &id);
+ if (ret)
+ return ret;
+
+ if (id & BCOVE_USBIDSTS_FLOAT)
+ return INTEL_USB_ID_FLOAT;
+
+ switch ((id & BCOVE_USBIDSTS_RARBRC_MASK) >> BCOVE_USBIDSTS_RARBRC_SHIFT) {
+ case BCOVE_USBIDSTS_R_ID_A:
+ return INTEL_USB_RID_A;
+ case BCOVE_USBIDSTS_R_ID_B:
+ return INTEL_USB_RID_B;
+ case BCOVE_USBIDSTS_R_ID_C:
+ return INTEL_USB_RID_C;
+ }
+
+ /*
+ * PMIC A0 reports USBIDSTS_GND = 1 for ID_GND,
+ * but PMIC B0 reports USBIDSTS_GND = 0 for ID_GND.
+ * Thus we must check this bit at last.
+ */
+ ground = id & BCOVE_USBIDSTS_GND;
+ switch ('A' + BCOVE_MAJOR(data->id)) {
+ case 'A':
+ return ground ? INTEL_USB_ID_GND : INTEL_USB_ID_FLOAT;
+ case 'B':
+ return ground ? INTEL_USB_ID_FLOAT : INTEL_USB_ID_GND;
+ }
+
+ /* Unknown or unsupported type */
+ return INTEL_USB_ID_FLOAT;
+}
+
+static int mrfld_extcon_role_detect(struct mrfld_extcon_data *data)
+{
+ unsigned int id;
+ bool usb_host;
+ int ret;
+
+ ret = mrfld_extcon_get_id(data);
+ if (ret < 0)
+ return ret;
+
+ id = ret;
+
+ usb_host = (id == INTEL_USB_ID_GND) || (id == INTEL_USB_RID_A);
+ extcon_set_state_sync(data->edev, EXTCON_USB_HOST, usb_host);
+
+ return 0;
+}
+
+static int mrfld_extcon_cable_detect(struct mrfld_extcon_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ unsigned int status, change;
+ int ret;
+
+ /*
+ * It seems SCU firmware clears the content of BCOVE_CHGRIRQ1
+ * and makes it useless for OS. Instead we compare a previously
+ * stored status to the current one, provided by BCOVE_SCHGRIRQ1.
+ */
+ ret = regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
+ if (ret)
+ return ret;
+
+ change = status ^ data->status;
+ if (!change)
+ return -ENODATA;
+
+ if (change & BCOVE_CHGRIRQ_USBIDDET) {
+ ret = mrfld_extcon_role_detect(data);
+ if (ret)
+ return ret;
+ }
+
+ data->status = status;
+
+ return 0;
+}
+
+static irqreturn_t mrfld_extcon_interrupt(int irq, void *dev_id)
+{
+ struct mrfld_extcon_data *data = dev_id;
+ int ret;
+
+ ret = mrfld_extcon_cable_detect(data);
+
+ mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
+
+ return ret ? IRQ_NONE: IRQ_HANDLED;
+}
+
+static int mrfld_extcon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct regmap *regmap = pmic->regmap;
+ struct mrfld_extcon_data *data;
+ unsigned int id;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+ data->regmap = regmap;
+
+ data->edev = devm_extcon_dev_allocate(dev, mrfld_extcon_cable);
+ if (IS_ERR(data->edev))
+ return -ENOMEM;
+
+ ret = devm_extcon_dev_register(dev, data->edev);
+ if (ret < 0) {
+ dev_err(dev, "can't register extcon device: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_extcon_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, pdev->name,
+ data);
+ if (ret) {
+ dev_err(dev, "can't register IRQ handler: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(regmap, BCOVE_ID, &id);
+ if (ret) {
+ dev_err(dev, "can't read PMIC ID: %d\n", ret);
+ return ret;
+ }
+
+ data->id = id;
+
+ ret = mrfld_extcon_sw_control(data, true);
+ if (ret)
+ return ret;
+
+ /* Get initial state */
+ mrfld_extcon_role_detect(data);
+
+ mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
+ mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);
+
+ mrfld_extcon_set(data, BCOVE_USBIDCTRL, BCOVE_USBIDCTRL_ALL);
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static int mrfld_extcon_remove(struct platform_device *pdev)
+{
+ struct mrfld_extcon_data *data = platform_get_drvdata(pdev);
+
+ mrfld_extcon_sw_control(data, false);
+
+ return 0;
+}
+
+static const struct platform_device_id mrfld_extcon_id_table[] = {
+ { .name = "mrfld_bcove_pwrsrc" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mrfld_extcon_id_table);
+
+static struct platform_driver mrfld_extcon_driver = {
+ .driver = {
+ .name = "mrfld_bcove_pwrsrc",
+ },
+ .probe = mrfld_extcon_probe,
+ .remove = mrfld_extcon_remove,
+ .id_table = mrfld_extcon_id_table,
+};
+module_platform_driver(mrfld_extcon_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("extcon driver for Intel Merrifield Basin Cove PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-intel.h b/drivers/extcon/extcon-intel.h
new file mode 100644
index 000000000000..0ad645ec7b33
--- /dev/null
+++ b/drivers/extcon/extcon-intel.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for Intel extcon hardware
+ *
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __EXTCON_INTEL_H__
+#define __EXTCON_INTEL_H__
+
+enum extcon_intel_usb_id {
+ INTEL_USB_ID_OTG,
+ INTEL_USB_ID_GND,
+ INTEL_USB_ID_FLOAT,
+ INTEL_USB_RID_A,
+ INTEL_USB_RID_B,
+ INTEL_USB_RID_C,
+};
+
+#endif /* __EXTCON_INTEL_H__ */
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 35e784cffc23..5414eb1306aa 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -107,19 +107,8 @@ EXPORT_SYMBOL(fw_iso_buffer_init);
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
struct vm_area_struct *vma)
{
- unsigned long uaddr;
- int i, err;
-
- uaddr = vma->vm_start;
- for (i = 0; i < buffer->page_count; i++) {
- err = vm_insert_page(vma, uaddr, buffer->pages[i]);
- if (err)
- return err;
-
- uaddr += PAGE_SIZE;
- }
-
- return 0;
+ return vm_map_pages_zero(vma, buffer->pages,
+ buffer->page_count);
}
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index a128dd1126ae..515e96db4391 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -303,7 +303,7 @@ nosy_open(struct inode *inode, struct file *file)
file->private_data = client;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
fail:
kfree(client);
lynx_put(lynx);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 45c048751f3b..7183ab34269e 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2939,7 +2939,6 @@ static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
- mmiowb();
ohci->mc_channels = channels;
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index cac16c4b0df3..11fda9eb2466 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -5,20 +5,6 @@
menu "Firmware Drivers"
-config ARM_PSCI_FW
- bool
-
-config ARM_PSCI_CHECKER
- bool "ARM PSCI checker"
- depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
- help
- Run the PSCI checker during startup. This checks that hotplug and
- suspend operations work correctly when using PSCI.
-
- The torture tests may interfere with the PSCI checker by turning CPUs
- on and off through hotplug, so for now torture tests and PSCI checker
- are mutually exclusive.
-
config ARM_SCMI_PROTOCOL
bool "ARM System Control and Management Interface (SCMI) Message Protocol"
depends on ARM || ARM64 || COMPILE_TEST
@@ -267,9 +253,26 @@ config TI_SCI_PROTOCOL
This protocol library is used by client drivers to use the features
provided by the system controller.
+config TRUSTED_FOUNDATIONS
+ bool "Trusted Foundations secure monitor support"
+ depends on ARM
+ help
+ Some devices (including most early Tegra-based consumer devices on
+ the market) are booted with the Trusted Foundations secure monitor
+ active, requiring some core operations to be performed by the secure
+ monitor instead of the kernel.
+
+ This option allows the kernel to invoke the secure monitor whenever
+ required on devices using Trusted Foundations. See the functions and
+ comments in linux/firmware/trusted_foundations.h or the device tree
+ bindings for "tlm,trusted-foundations" for details on how to use it.
+
+ Choose N if you don't know what this is about.
+
config HAVE_ARM_SMCCC
bool
+source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 80feb635120f..3fa0b34eb72f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,8 +2,6 @@
#
# Makefile for the linux kernel.
#
-obj-$(CONFIG_ARM_PSCI_FW) += psci.o
-obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
@@ -23,8 +21,10 @@ obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
+obj-y += psci/
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index e6376f985ef7..9cd70d1a5622 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -165,6 +165,7 @@ static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
return err;
}
+NOKPROBE_SYMBOL(invoke_sdei_fn);
static struct sdei_event *sdei_event_find(u32 event_num)
{
@@ -879,6 +880,7 @@ static void sdei_smccc_smc(unsigned long function_id,
{
arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
}
+NOKPROBE_SYMBOL(sdei_smccc_smc);
static void sdei_smccc_hvc(unsigned long function_id,
unsigned long arg0, unsigned long arg1,
@@ -887,6 +889,7 @@ static void sdei_smccc_hvc(unsigned long function_id,
{
arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
}
+NOKPROBE_SYMBOL(sdei_smccc_hvc);
int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
sdei_event_callback *critical_cb)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 099d83e4e910..fae2d5c43314 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -416,11 +416,8 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
nr++;
}
-void __init dmi_memdev_walk(void)
+static void __init dmi_memdev_walk(void)
{
- if (!dmi_available)
- return;
-
if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
if (dmi_memdev)
@@ -614,7 +611,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
return 1;
}
-void __init dmi_scan_machine(void)
+static void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
char buf[32];
@@ -769,15 +766,20 @@ static int __init dmi_init(void)
subsys_initcall(dmi_init);
/**
- * dmi_set_dump_stack_arch_desc - set arch description for dump_stack()
+ * dmi_setup - scan and setup DMI system information
*
- * Invoke dump_stack_set_arch_desc() with DMI system information so that
- * DMI identifiers are printed out on task dumps. Arch boot code should
- * call this function after dmi_scan_machine() if it wants to print out DMI
- * identifiers on task dumps.
+ * Scan the DMI system information. This setups DMI identifiers
+ * (dmi_system_id) for printing it out on task dumps and prepares
+ * DIMM entry information (dmi_memdev_info) from the SMBIOS table
+ * for using this when reporting memory errors.
*/
-void __init dmi_set_dump_stack_arch_desc(void)
+void __init dmi_setup(void)
{
+ dmi_scan_machine();
+ if (!dmi_available)
+ return;
+
+ dmi_memdev_walk();
dump_stack_set_arch_desc("%s", dmi_ids_string);
}
@@ -841,7 +843,7 @@ static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
* returns non zero or we hit the end. Callback function is called for
* each successful match. Returns the number of matches.
*
- * dmi_scan_machine must be called before this function is called.
+ * dmi_setup must be called before this function is called.
*/
int dmi_check_system(const struct dmi_system_id *list)
{
@@ -871,7 +873,7 @@ EXPORT_SYMBOL(dmi_check_system);
* Walk the blacklist table until the first match is found. Return the
* pointer to the matching entry or NULL if there's no match.
*
- * dmi_scan_machine must be called before this function is called.
+ * dmi_setup must be called before this function is called.
*/
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
{
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 0c1af675c338..e2ac5fa5531b 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -162,13 +162,11 @@ void efi_virtmap_unload(void)
static int __init arm_dmi_init(void)
{
/*
- * On arm64/ARM, DMI depends on UEFI, and dmi_scan_machine() needs to
+ * On arm64/ARM, DMI depends on UEFI, and dmi_setup() needs to
* be called early because dmi_id_init(), which is an arch_initcall
* itself, depends on dmi_scan_machine() having been called already.
*/
- dmi_scan_machine();
- if (dmi_available)
- dmi_set_dump_stack_arch_desc();
+ dmi_setup();
return 0;
}
core_initcall(arm_dmi_init);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index b0103e16fc1b..0460c7581220 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -16,9 +16,9 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
- $(DISABLE_STACKLEAK_PLUGIN)
-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM64) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ -fpie $(DISABLE_STACKLEAK_PLUGIN)
+cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
@@ -71,7 +71,6 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y)
lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y))
-STUBCOPY_RM-y := -R *ksymtab* -R *kcrctab*
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
@@ -86,12 +85,13 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
# this time, use objcopy and leave all sections in place.
#
quiet_cmd_stubcopy = STUBCPY $@
- cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
- then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
- then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
- rm -f $@; /bin/false); \
- else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \
- else /bin/false; fi
+ cmd_stubcopy = \
+ $(STRIP) --strip-debug -o $@ $<; \
+ if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); then \
+ echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \
+ /bin/false; \
+ fi; \
+ $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
#
# ARM discards the .data section because it disallows r/w data in the
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index c0c0b4e4e281..f240946ed701 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -254,7 +254,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
static int vpd_sections_init(phys_addr_t physaddr)
{
- struct vpd_cbmem __iomem *temp;
+ struct vpd_cbmem *temp;
struct vpd_cbmem header;
int ret = 0;
@@ -262,7 +262,7 @@ static int vpd_sections_init(phys_addr_t physaddr)
if (!temp)
return -ENOMEM;
- memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
+ memcpy(&header, temp, sizeof(struct vpd_cbmem));
memunmap(temp);
if (header.magic != VPD_CBMEM_MAGIC)
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c51462f5aa1e..a5dc0629f225 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -425,7 +425,7 @@ static ssize_t ibft_attr_show_acpitbl(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ACPITBL_SIGNATURE:
- str += sprintf_string(str, ACPI_NAME_SIZE,
+ str += sprintf_string(str, ACPI_NAMESEG_SIZE,
entry->header->header.signature);
break;
case ISCSI_BOOT_ACPITBL_OEM_ID:
diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig
new file mode 100644
index 000000000000..26a3b32bf7ab
--- /dev/null
+++ b/drivers/firmware/psci/Kconfig
@@ -0,0 +1,13 @@
+config ARM_PSCI_FW
+ bool
+
+config ARM_PSCI_CHECKER
+ bool "ARM PSCI checker"
+ depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
+ help
+ Run the PSCI checker during startup. This checks that hotplug and
+ suspend operations work correctly when using PSCI.
+
+ The torture tests may interfere with the PSCI checker by turning CPUs
+ on and off through hotplug, so for now torture tests and PSCI checker
+ are mutually exclusive.
diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile
new file mode 100644
index 000000000000..1956b882470f
--- /dev/null
+++ b/drivers/firmware/psci/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_ARM_PSCI_FW) += psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci/psci.c
index c80ec1d03274..fe090ef43d28 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -88,6 +88,7 @@ static u32 psci_function_id[PSCI_FN_MAX];
PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
static u32 psci_cpu_suspend_feature;
+static bool psci_system_reset2_supported;
static inline bool psci_has_ext_power_state(void)
{
@@ -95,6 +96,11 @@ static inline bool psci_has_ext_power_state(void)
PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
}
+static inline bool psci_has_osi_support(void)
+{
+ return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
+}
+
static inline bool psci_power_state_loses_context(u32 state)
{
const u32 mask = psci_has_ext_power_state() ?
@@ -253,7 +259,17 @@ static int get_set_conduit_method(struct device_node *np)
static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
{
- invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+ if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
+ psci_system_reset2_supported) {
+ /*
+ * reset_type[31] = 0 (architectural)
+ * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
+ * cookie = 0 (ignored by the implementation)
+ */
+ invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
+ } else {
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+ }
}
static void psci_sys_poweroff(void)
@@ -270,9 +286,26 @@ static int __init psci_features(u32 psci_func_id)
#ifdef CONFIG_CPU_IDLE
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+static int psci_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+ int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
+
+ if (err) {
+ pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
+ return err;
+ }
+
+ if (!psci_power_state_is_valid(*state)) {
+ pr_warn("Invalid PSCI power state %#x\n", *state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
{
- int i, ret, count = 0;
+ int i, ret = 0, count = 0;
u32 *psci_states;
struct device_node *state_node;
@@ -291,29 +324,16 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
return -ENOMEM;
for (i = 0; i < count; i++) {
- u32 state;
-
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
+ of_node_put(state_node);
- ret = of_property_read_u32(state_node,
- "arm,psci-suspend-param",
- &state);
- if (ret) {
- pr_warn(" * %pOF missing arm,psci-suspend-param property\n",
- state_node);
- of_node_put(state_node);
+ if (ret)
goto free_mem;
- }
- of_node_put(state_node);
- pr_debug("psci-power-state %#x index %d\n", state, i);
- if (!psci_power_state_is_valid(state)) {
- pr_warn("Invalid PSCI power state %#x\n", state);
- ret = -EINVAL;
- goto free_mem;
- }
- psci_states[i] = state;
+ pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
}
+
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
return 0;
@@ -451,6 +471,16 @@ static const struct platform_suspend_ops psci_suspend_ops = {
.enter = psci_system_suspend_enter,
};
+static void __init psci_init_system_reset2(void)
+{
+ int ret;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
+
+ if (ret != PSCI_RET_NOT_SUPPORTED)
+ psci_system_reset2_supported = true;
+}
+
static void __init psci_init_system_suspend(void)
{
int ret;
@@ -588,6 +618,7 @@ static int __init psci_probe(void)
psci_init_smccc();
psci_init_cpu_suspend();
psci_init_system_suspend();
+ psci_init_system_reset2();
}
return 0;
@@ -605,9 +636,9 @@ static int __init psci_0_2_init(struct device_node *np)
int err;
err = get_set_conduit_method(np);
-
if (err)
- goto out_put_node;
+ return err;
+
/*
* Starting with v0.2, the PSCI specification introduced a call
* (PSCI_VERSION) that allows probing the firmware version, so
@@ -615,11 +646,7 @@ static int __init psci_0_2_init(struct device_node *np)
* can be carried out according to the specific version reported
* by firmware
*/
- err = psci_probe();
-
-out_put_node:
- of_node_put(np);
- return err;
+ return psci_probe();
}
/*
@@ -631,9 +658,8 @@ static int __init psci_0_1_init(struct device_node *np)
int err;
err = get_set_conduit_method(np);
-
if (err)
- goto out_put_node;
+ return err;
pr_info("Using PSCI v0.1 Function IDs from DT\n");
@@ -657,15 +683,27 @@ static int __init psci_0_1_init(struct device_node *np)
psci_ops.migrate = psci_migrate;
}
-out_put_node:
- of_node_put(np);
- return err;
+ return 0;
+}
+
+static int __init psci_1_0_init(struct device_node *np)
+{
+ int err;
+
+ err = psci_0_2_init(np);
+ if (err)
+ return err;
+
+ if (psci_has_osi_support())
+ pr_info("OSI mode supported.\n");
+
+ return 0;
}
static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
- { .compatible = "arm,psci-1.0", .data = psci_0_2_init},
+ { .compatible = "arm,psci-1.0", .data = psci_1_0_init},
{},
};
@@ -674,6 +712,7 @@ int __init psci_dt_init(void)
struct device_node *np;
const struct of_device_id *matched_np;
psci_initcall_t init_fn;
+ int ret;
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
@@ -681,7 +720,10 @@ int __init psci_dt_init(void)
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
- return init_fn(np);
+ ret = init_fn(np);
+
+ of_node_put(np);
+ return ret;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 346943657962..346943657962 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
new file mode 100644
index 000000000000..fd4999388ff1
--- /dev/null
+++ b/drivers/firmware/trusted_foundations.c
@@ -0,0 +1,176 @@
+/*
+ * Trusted Foundations support for ARM CPUs
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <linux/firmware/trusted_foundations.h>
+
+#include <asm/firmware.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_CACHE_MAINT 0xfffff100
+
+#define TF_CACHE_ENABLE 1
+#define TF_CACHE_DISABLE 2
+
+#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
+
+#define TF_CPU_PM 0xfffffffc
+#define TF_CPU_PM_S3 0xffffffe3
+#define TF_CPU_PM_S2 0xffffffe6
+#define TF_CPU_PM_S2_NO_MC_CLK 0xffffffe5
+#define TF_CPU_PM_S1 0xffffffe4
+#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
+
+static unsigned long cpu_boot_addr;
+
+static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+{
+ register u32 r0 asm("r0") = type;
+ register u32 r1 asm("r1") = arg1;
+ register u32 r2 asm("r2") = arg2;
+
+ asm volatile(
+ ".arch_extension sec\n\t"
+ "stmfd sp!, {r4 - r11}\n\t"
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ "mov r3, #0\n\t"
+ "mov r4, #0\n\t"
+ "smc #0\n\t"
+ "ldmfd sp!, {r4 - r11}\n\t"
+ :
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "memory", "r3", "r12", "lr");
+}
+
+static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
+{
+ cpu_boot_addr = boot_addr;
+ tf_generic_smc(TF_SET_CPU_BOOT_ADDR_SMC, cpu_boot_addr, 0);
+
+ return 0;
+}
+
+static int tf_prepare_idle(unsigned long mode)
+{
+ switch (mode) {
+ case TF_PM_MODE_LP0:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S3, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1_NO_MC_CLK:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2_NO_MC_CLK,
+ cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2_NOFLUSH_L2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2,
+ cpu_boot_addr);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static void tf_cache_write_sec(unsigned long val, unsigned int reg)
+{
+ u32 l2x0_way_mask = 0xff;
+
+ switch (reg) {
+ case L2X0_CTRL:
+ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
+ l2x0_way_mask = 0xffff;
+
+ if (val == L2X0_CTRL_EN)
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_ENABLE,
+ l2x0_saved_regs.aux_ctrl);
+ else
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
+ l2x0_way_mask);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int tf_init_cache(void)
+{
+ outer_cache.write_sec = tf_cache_write_sec;
+
+ return 0;
+}
+#endif /* CONFIG_CACHE_L2X0 */
+
+static const struct firmware_ops trusted_foundations_ops = {
+ .set_cpu_boot_addr = tf_set_cpu_boot_addr,
+ .prepare_idle = tf_prepare_idle,
+#ifdef CONFIG_CACHE_L2X0
+ .l2x0_init = tf_init_cache,
+#endif
+};
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd)
+{
+ /*
+ * we are not using version information for now since currently
+ * supported SMCs are compatible with all TF releases
+ */
+ register_firmware_ops(&trusted_foundations_ops);
+}
+
+void of_register_trusted_foundations(void)
+{
+ struct device_node *node;
+ struct trusted_foundations_platform_data pdata;
+ int err;
+
+ node = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+ if (!node)
+ return;
+
+ err = of_property_read_u32(node, "tlm,version-major",
+ &pdata.version_major);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-major property\n");
+ err = of_property_read_u32(node, "tlm,version-minor",
+ &pdata.version_minor);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-minor property\n");
+ register_trusted_foundations(&pdata);
+}
+
+bool trusted_foundations_registered(void)
+{
+ return firmware_ops == &trusted_foundations_ops;
+}
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index e18a786fc943..c438722bf4e1 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -102,7 +102,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
goto unlock_vm;
}
- pinned = get_user_pages_fast(region->user_addr, npages, 1,
+ pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
region->pages);
if (pinned < 0) {
ret = pinned;
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index 320cfca80d5f..e6f94501cb28 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -42,7 +42,7 @@ static int gnss_open(struct inode *inode, struct file *file)
get_device(&gdev->dev);
- nonseekable_open(inode, file);
+ stream_open(inode, file);
file->private_data = gdev;
down_write(&gdev->rwsem);
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
index 12568aebb7f6..7b05bc40532e 100644
--- a/drivers/gnss/ubx.c
+++ b/drivers/gnss/ubx.c
@@ -130,6 +130,7 @@ static void ubx_remove(struct serdev_device *serdev)
#ifdef CONFIG_OF
static const struct of_device_id ubx_of_match[] = {
+ { .compatible = "u-blox,neo-6m" },
{ .compatible = "u-blox,neo-8" },
{ .compatible = "u-blox,neo-m8" },
{},
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3f50526a771f..8023d03ec362 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -12,7 +12,6 @@ config ARCH_HAVE_CUSTOM_GPIO_H
menuconfig GPIOLIB
bool "GPIO Support"
- select ANON_INODES
help
This enables GPIO support through the generic GPIO library.
You only need to enable this, if you also want to enable
@@ -27,12 +26,12 @@ config GPIOLIB_FASTPATH_LIMIT
range 32 512
default 512
help
- This adjusts the point at which certain APIs will switch from
- using a stack allocated buffer to a dynamically allocated buffer.
+ This adjusts the point at which certain APIs will switch from
+ using a stack allocated buffer to a dynamically allocated buffer.
- You shouldn't need to change this unless you really need to
- optimize either stack space or performance. Change this carefully
- since setting an incorrect value could cause stack corruption.
+ You shouldn't need to change this unless you really need to
+ optimize either stack space or performance. Change this carefully
+ since setting an incorrect value could cause stack corruption.
config OF_GPIO
def_bool y
@@ -287,6 +286,19 @@ config GPIO_IOP
If unsure, say N.
+config GPIO_IXP4XX
+ bool "Intel IXP4xx GPIO"
+ depends on ARM # For <asm/mach-types.h>
+ depends on ARCH_IXP4XX
+ select GPIO_GENERIC
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ IXP4xx series of chips.
+
+ If unsure, say N.
+
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
depends on CPU_LOONGSON2 || CPU_LOONGSON3
@@ -320,7 +332,7 @@ config GPIO_MENZ127
depends on MCB
select GPIO_GENERIC
help
- Say yes here to support the MEN 16Z127 GPIO Controller
+ Say yes here to support the MEN 16Z127 GPIO Controller
config GPIO_MM_LANTIQ
bool "Lantiq Memory mapped GPIOs"
@@ -330,20 +342,6 @@ config GPIO_MM_LANTIQ
(EBU) found on Lantiq SoCs. The gpios are output only as they are
created by attaching a 16bit latch to the bus.
-config GPIO_MOCKUP
- tristate "GPIO Testing Driver"
- depends on GPIOLIB && SYSFS
- select GPIO_SYSFS
- select GPIOLIB_IRQCHIP
- select IRQ_SIM
- help
- This enables GPIO Testing driver, which provides a way to test GPIO
- subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
- must be selected for this test.
- User could use it through the script in
- tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
- it.
-
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
@@ -862,11 +860,11 @@ config GPIO_MAX732X
Input and Output (designed by 'P'). The combinations are listed
below:
- 8 bits: max7319 (8I), max7320 (8O), max7321 (8P),
- max7322 (4I4O), max7323 (4P4O)
+ 8 bits: max7319 (8I), max7320 (8O), max7321 (8P),
+ max7322 (4I4O), max7323 (4P4O)
- 16 bits: max7324 (8I8O), max7325 (8P8O),
- max7326 (4I12O), max7327 (4P12O)
+ 16 bits: max7324 (8I8O), max7325 (8P8O),
+ max7326 (4I12O), max7327 (4P12O)
Board setup code must specify the model to use, and the start
number for these GPIOs.
@@ -893,17 +891,17 @@ config GPIO_PCA953X
SMBus I/O expanders, made mostly by NXP or TI. Compatible
models include:
- 4 bits: pca9536, pca9537
+ 4 bits: pca9536, pca9537
- 8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
- pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
+ 8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
+ pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
- 16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
- tca6416
+ 16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
+ tca6416
- 24 bits: tca6424
+ 24 bits: tca6424
- 40 bits: pca9505, pca9698
+ 40 bits: pca9505, pca9698
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
@@ -925,7 +923,7 @@ config GPIO_PCF857X
8 bits: pcf8574, pcf8574a, pca8574, pca8574a,
pca9670, pca9672, pca9674, pca9674a,
- max7328, max7329
+ max7328, max7329
16 bits: pcf8575, pcf8575c, pca8575,
pca9671, pca9673, pca9675
@@ -1047,9 +1045,9 @@ config HTC_EGPIO
bool "HTC EGPIO support"
depends on GPIOLIB && ARM
help
- This driver supports the CPLD egpio chip present on
- several HTC phones. It provides basic support for input
- pins, output pins, and irqs.
+ This driver supports the CPLD egpio chip present on
+ several HTC phones. It provides basic support for input
+ pins, output pins, and irqs.
config GPIO_JANZ_TTL
tristate "Janz VMOD-TTL Digital IO Module"
@@ -1085,7 +1083,7 @@ config GPIO_LP873X
on LP873X PMICs.
This driver can also be built as a module. If so, the module will be
- called gpio-lp873x.
+ called gpio-lp873x.
config GPIO_LP87565
tristate "TI LP87565 GPIO"
@@ -1112,6 +1110,13 @@ config GPIO_MAX77620
driver also provides interrupt support for each of the gpios.
Say yes here to enable the max77620 to be used as gpio controller.
+config GPIO_MAX77650
+ tristate "Maxim MAX77650/77651 GPIO support"
+ depends on MFD_MAX77650
+ help
+ GPIO driver for MAX77650/77651 PMIC from Maxim Semiconductor.
+ These chips have a single pin that can be configured as GPIO.
+
config GPIO_MSIC
bool "Intel MSIC mixed signal gpio support"
depends on (X86 || COMPILE_TEST) && MFD_INTEL_MSIC
@@ -1316,6 +1321,13 @@ config GPIO_MERRIFIELD
help
Say Y here to support Intel Merrifield GPIO.
+config GPIO_MLXBF
+ tristate "Mellanox BlueField SoC GPIO"
+ depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
+ select GPIO_GENERIC
+ help
+ Say Y here if you want GPIO support on Mellanox BlueField SoC.
+
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on X86 || COMPILE_TEST
@@ -1436,10 +1448,22 @@ config GPIO_VIPERBOARD
Say yes here to access the GPIO signals of Nano River
Technologies Viperboard. There are two GPIO chips on the
board: gpioa and gpiob.
- See viperboard API specification and Nano
- River Tech's viperboard.h for detailed meaning
- of the module parameters.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
endmenu
+config GPIO_MOCKUP
+ tristate "GPIO Testing Driver"
+ depends on GPIOLIB
+ select IRQ_SIM
+ help
+ This enables GPIO Testing driver, which provides a way to test GPIO
+ subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
+ must be selected for this test.
+ User could use it through the script in
+ tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
+ it.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 54d55274b93a..6700eee860b7 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
+obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
@@ -80,11 +81,13 @@ obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
+obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
+obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index fb7b620763a2..e81307f9754e 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -1,21 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* 74Hx164 - Generic serial-in/parallel-out 8-bits shift register GPIO driver
*
* Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/spi/spi.h>
-#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
-#include <linux/slab.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
#define GEN_74X164_NUMBER_GPIOS 8
@@ -116,10 +113,9 @@ static int gen_74x164_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- if (of_property_read_u32(spi->dev.of_node, "registers-number",
- &nregs)) {
- dev_err(&spi->dev,
- "Missing registers-number property in the DT.\n");
+ ret = device_property_read_u32(&spi->dev, "registers-number", &nregs);
+ if (ret) {
+ dev_err(&spi->dev, "Missing 'registers-number' property.\n");
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 49616ec815ee..04247075091d 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -106,7 +106,6 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mmio_74xx_gpio_probe(struct platform_device *pdev)
{
struct mmio_74xx_gpio_priv *priv;
- struct resource *res;
void __iomem *dat;
int err;
@@ -116,8 +115,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
priv->flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dat = devm_ioremap_resource(&pdev->dev, res);
+ dat = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dat))
return PTR_ERR(dat);
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index 9b78dc837603..1ffd7c2d1285 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -78,7 +78,6 @@ static int pt_gpio_probe(struct platform_device *pdev)
struct acpi_device *acpi_dev;
acpi_handle handle = ACPI_HANDLE(dev);
struct pt_gpio_chip *pt_gpio;
- struct resource *res_mem;
int ret = 0;
if (acpi_bus_get_device(handle, &acpi_dev)) {
@@ -90,12 +89,7 @@ static int pt_gpio_probe(struct platform_device *pdev)
if (!pt_gpio)
return -ENOMEM;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem) {
- dev_err(&pdev->dev, "Failed to get MMIO resource for PT GPIO.\n");
- return -EINVAL;
- }
- pt_gpio->reg_base = devm_ioremap_resource(dev, res_mem);
+ pt_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pt_gpio->reg_base)) {
dev_err(&pdev->dev, "Failed to map MMIO resource for PT GPIO.\n");
return PTR_ERR(pt_gpio->reg_base);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 217507002dbc..0f1b55c7c361 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1156,15 +1156,13 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *gpio_id;
struct aspeed_gpio *gpio;
- struct resource *res;
int rc, i, banks;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index c5536a509b59..9fa6d3a967d2 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -568,7 +568,6 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
- struct resource *res;
struct bcm_kona_gpio_bank *bank;
struct bcm_kona_gpio *kona_gpio;
struct gpio_chip *chip;
@@ -618,8 +617,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
return -ENXIO;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- kona_gpio->reg_base = devm_ioremap_resource(dev, res);
+ kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kona_gpio->reg_base)) {
ret = -ENXIO;
goto err_irq_domain;
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index aec8d5df9f30..712ae212b0b4 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -148,7 +148,6 @@ static struct irq_chip cdns_gpio_irqchip = {
static int cdns_gpio_probe(struct platform_device *pdev)
{
struct cdns_gpio_chip *cgpio;
- struct resource *res;
int ret, irq;
u32 dir_prev;
u32 num_gpios = 32;
@@ -157,8 +156,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
if (!cgpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cgpio->regs = devm_ioremap_resource(&pdev->dev, res);
+ cgpio->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cgpio->regs))
return PTR_ERR(cgpio->regs);
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index 52fd63f02134..0fbbb0edc0ba 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -19,7 +19,6 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
void __iomem *dat, *dir;
struct gpio_chip *gc;
- struct resource *res;
int err, id;
if (!np)
@@ -33,13 +32,11 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
if (!gc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dat = devm_ioremap_resource(&pdev->dev, res);
+ dat = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dat))
return PTR_ERR(dat);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dir = devm_ioremap_resource(&pdev->dev, res);
+ dir = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dir))
return PTR_ERR(dir);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 84ae04402f70..d3eda65fd6d3 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -655,7 +655,6 @@ MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match);
static int dwapb_gpio_probe(struct platform_device *pdev)
{
unsigned int i;
- struct resource *res;
struct dwapb_gpio *gpio;
int err;
struct device *dev = &pdev->dev;
@@ -688,8 +687,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
if (!gpio->ports)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->regs = devm_ioremap_resource(&pdev->dev, res);
+ gpio->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->regs))
return PTR_ERR(gpio->regs);
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index f0223cee9774..77092268ee95 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 45fe125823a8..8ff8ce2970d9 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -225,7 +225,6 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct ftgpio_gpio *g;
int irq;
int ret;
@@ -236,8 +235,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
g->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g->base = devm_ioremap_resource(dev, res);
+ g->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g->base))
return PTR_ERR(g->base);
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index a7b17897356e..e5fa00f8145f 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -208,7 +208,6 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
static int hlwd_gpio_probe(struct platform_device *pdev)
{
struct hlwd_gpio *hlwd;
- struct resource *regs_resource;
u32 ngpios;
int res;
@@ -216,8 +215,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
if (!hlwd)
return -ENOMEM;
- regs_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hlwd->regs = devm_ioremap_resource(&pdev->dev, regs_resource);
+ hlwd->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hlwd->regs))
return PTR_ERR(hlwd->regs);
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 8d62db447ec1..11b77d868c89 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -21,7 +21,6 @@
static int iop3xx_gpio_probe(struct platform_device *pdev)
{
- struct resource *res;
struct gpio_chip *gc;
void __iomem *base;
int err;
@@ -30,8 +29,7 @@ static int iop3xx_gpio_probe(struct platform_device *pdev)
if (!gc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
new file mode 100644
index 000000000000..4b1cf7ea858d
--- /dev/null
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// IXP4 GPIO driver
+// Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+//
+// based on previous work and know-how from:
+// Deepak Saxena <dsaxena@plexity.net>
+
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+/* Include that go away with DT transition */
+#include <linux/irqchip/irq-ixp4xx.h>
+
+#include <asm/mach-types.h>
+
+#define IXP4XX_REG_GPOUT 0x00
+#define IXP4XX_REG_GPOE 0x04
+#define IXP4XX_REG_GPIN 0x08
+#define IXP4XX_REG_GPIS 0x0C
+#define IXP4XX_REG_GPIT1 0x10
+#define IXP4XX_REG_GPIT2 0x14
+#define IXP4XX_REG_GPCLK 0x18
+#define IXP4XX_REG_GPDBSEL 0x1C
+
+/*
+ * The hardware uses 3 bits to indicate interrupt "style".
+ * we clear and set these three bits accordingly. The lower 24
+ * bits in two registers (GPIT1 and GPIT2) are used to set up
+ * the style for 8 lines each for a total of 16 GPIO lines.
+ */
+#define IXP4XX_GPIO_STYLE_ACTIVE_HIGH 0x0
+#define IXP4XX_GPIO_STYLE_ACTIVE_LOW 0x1
+#define IXP4XX_GPIO_STYLE_RISING_EDGE 0x2
+#define IXP4XX_GPIO_STYLE_FALLING_EDGE 0x3
+#define IXP4XX_GPIO_STYLE_TRANSITIONAL 0x4
+#define IXP4XX_GPIO_STYLE_MASK GENMASK(2, 0)
+#define IXP4XX_GPIO_STYLE_SIZE 3
+
+/**
+ * struct ixp4xx_gpio - IXP4 GPIO state container
+ * @dev: containing device for this instance
+ * @fwnode: the fwnode for this GPIO chip
+ * @gc: gpiochip for this instance
+ * @domain: irqdomain for this chip instance
+ * @base: remapped I/O-memory base
+ * @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
+ * 0: level triggered
+ */
+struct ixp4xx_gpio {
+ struct device *dev;
+ struct fwnode_handle *fwnode;
+ struct gpio_chip gc;
+ struct irq_domain *domain;
+ void __iomem *base;
+ unsigned long long irq_edge;
+};
+
+/**
+ * struct ixp4xx_gpio_map - IXP4 GPIO to parent IRQ map
+ * @gpio_offset: offset of the IXP4 GPIO line
+ * @parent_hwirq: hwirq on the parent IRQ controller
+ */
+struct ixp4xx_gpio_map {
+ int gpio_offset;
+ int parent_hwirq;
+};
+
+/* GPIO lines 0..12 have corresponding IRQs, GPIOs 13..15 have no IRQs */
+const struct ixp4xx_gpio_map ixp4xx_gpiomap[] = {
+ { .gpio_offset = 0, .parent_hwirq = 6 },
+ { .gpio_offset = 1, .parent_hwirq = 7 },
+ { .gpio_offset = 2, .parent_hwirq = 19 },
+ { .gpio_offset = 3, .parent_hwirq = 20 },
+ { .gpio_offset = 4, .parent_hwirq = 21 },
+ { .gpio_offset = 5, .parent_hwirq = 22 },
+ { .gpio_offset = 6, .parent_hwirq = 23 },
+ { .gpio_offset = 7, .parent_hwirq = 24 },
+ { .gpio_offset = 8, .parent_hwirq = 25 },
+ { .gpio_offset = 9, .parent_hwirq = 26 },
+ { .gpio_offset = 10, .parent_hwirq = 27 },
+ { .gpio_offset = 11, .parent_hwirq = 28 },
+ { .gpio_offset = 12, .parent_hwirq = 29 },
+};
+
+static void ixp4xx_gpio_irq_ack(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
+}
+
+static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ /* ACK when unmasking if not edge-triggered */
+ if (!(g->irq_edge & BIT(d->hwirq)))
+ ixp4xx_gpio_irq_ack(d);
+
+ irq_chip_unmask_parent(d);
+}
+
+static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+ int line = d->hwirq;
+ unsigned long flags;
+ u32 int_style;
+ u32 int_reg;
+ u32 val;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (line >= 8) {
+ /* pins 8-15 */
+ line -= 8;
+ int_reg = IXP4XX_REG_GPIT2;
+ } else {
+ /* pins 0-7 */
+ int_reg = IXP4XX_REG_GPIT1;
+ }
+
+ spin_lock_irqsave(&g->gc.bgpio_lock, flags);
+
+ /* Clear the style for the appropriate pin */
+ val = __raw_readl(g->base + int_reg);
+ val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
+
+ /* Set the new style */
+ val = __raw_readl(g->base + int_reg);
+ val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ /* Force-configure this line as an input */
+ val = __raw_readl(g->base + IXP4XX_REG_GPOE);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+
+ spin_unlock_irqrestore(&g->gc.bgpio_lock, flags);
+
+ /* This parent only accept level high (asserted) */
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip ixp4xx_gpio_irqchip = {
+ .name = "IXP4GPIO",
+ .irq_ack = ixp4xx_gpio_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = ixp4xx_gpio_irq_unmask,
+ .irq_set_type = ixp4xx_gpio_irq_set_type,
+};
+
+static int ixp4xx_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ixp4xx_gpio *g = gpiochip_get_data(gc);
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = offset;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static int ixp4xx_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ /* This goes away when we transition to DT */
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ixp4xx_gpio_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_gpio *g = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_gpio_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ dev_dbg(g->dev, "allocate IRQ %d..%d, hwirq %lu..%lu\n",
+ irq, irq + nr_irqs - 1,
+ hwirq, hwirq + nr_irqs - 1);
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_fwspec parent_fwspec;
+ const struct ixp4xx_gpio_map *map;
+ int j;
+
+ /* Not all lines support IRQs */
+ for (j = 0; j < ARRAY_SIZE(ixp4xx_gpiomap); j++) {
+ map = &ixp4xx_gpiomap[j];
+ if (map->gpio_offset == hwirq)
+ break;
+ }
+ if (j == ARRAY_SIZE(ixp4xx_gpiomap)) {
+ dev_err(g->dev, "can't look up hwirq %lu\n", hwirq);
+ return -EINVAL;
+ }
+ dev_dbg(g->dev, "found parent hwirq %u\n", map->parent_hwirq);
+
+ /*
+ * We set handle_bad_irq because the .set_type() should
+ * always be invoked and set the right type of handler.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixp4xx_gpio_irqchip,
+ g,
+ handle_bad_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+
+ /*
+ * Create a IRQ fwspec to send up to the parent irqdomain:
+ * specify the hwirq we address on the parent and tie it
+ * all together up the chain.
+ */
+ parent_fwspec.fwnode = d->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = map->parent_hwirq;
+ /* This parent only handles asserted level IRQs */
+ parent_fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ dev_dbg(g->dev, "alloc_irqs_parent for %d parent hwirq %d\n",
+ irq + i, map->parent_hwirq);
+ ret = irq_domain_alloc_irqs_parent(d, irq + i, 1,
+ &parent_fwspec);
+ if (ret)
+ dev_err(g->dev,
+ "failed to allocate parent hwirq %d for hwirq %lu\n",
+ map->parent_hwirq, hwirq);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops ixp4xx_gpio_irqdomain_ops = {
+ .translate = ixp4xx_gpio_irq_domain_translate,
+ .alloc = ixp4xx_gpio_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int ixp4xx_gpio_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct irq_domain *parent;
+ struct resource *res;
+ struct ixp4xx_gpio *g;
+ int ret;
+ int i;
+
+ g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+ g->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ g->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(g->base)) {
+ dev_err(dev, "ioremap error\n");
+ return PTR_ERR(g->base);
+ }
+
+ /*
+ * Make sure GPIO 14 and 15 are NOT used as clocks but GPIO on
+ * specific machines.
+ */
+ if (machine_is_dsmg600() || machine_is_nas100d())
+ __raw_writel(0x0, g->base + IXP4XX_REG_GPCLK);
+
+ /*
+ * This is a very special big-endian ARM issue: when the IXP4xx is
+ * run in big endian mode, all registers in the machine are switched
+ * around to the CPU-native endianness. As you see mostly in the
+ * driver we use __raw_readl()/__raw_writel() to access the registers
+ * in the appropriate order. With the GPIO library we need to specify
+ * byte order explicitly, so this flag needs to be set when compiling
+ * for big endian.
+ */
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+ flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+#else
+ flags = 0;
+#endif
+
+ /* Populate and register gpio chip */
+ ret = bgpio_init(&g->gc, dev, 4,
+ g->base + IXP4XX_REG_GPIN,
+ g->base + IXP4XX_REG_GPOUT,
+ NULL,
+ NULL,
+ g->base + IXP4XX_REG_GPOE,
+ flags);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+ g->gc.to_irq = ixp4xx_gpio_to_irq;
+ g->gc.ngpio = 16;
+ g->gc.label = "IXP4XX_GPIO_CHIP";
+ /*
+ * TODO: when we have migrated to device tree and all GPIOs
+ * are fetched using phandles, set this to -1 to get rid of
+ * the fixed gpiochip base.
+ */
+ g->gc.base = 0;
+ g->gc.parent = &pdev->dev;
+ g->gc.owner = THIS_MODULE;
+
+ ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ if (ret) {
+ dev_err(dev, "failed to add SoC gpiochip\n");
+ return ret;
+ }
+
+ /*
+ * When we convert to device tree we will simply look up the
+ * parent irqdomain using irq_find_host(parent) as parent comes
+ * from IRQCHIP_DECLARE(), then use of_node_to_fwnode() to get
+ * the fwnode. For now we need this boardfile style code.
+ */
+ if (np) {
+ struct device_node *irq_parent;
+
+ irq_parent = of_irq_find_parent(np);
+ if (!irq_parent) {
+ dev_err(dev, "no IRQ parent node\n");
+ return -ENODEV;
+ }
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "no IRQ parent domain\n");
+ return -ENODEV;
+ }
+ g->fwnode = of_node_to_fwnode(np);
+ } else {
+ parent = ixp4xx_get_irq_domain();
+ g->fwnode = irq_domain_alloc_fwnode(g->base);
+ if (!g->fwnode) {
+ dev_err(dev, "no domain base\n");
+ return -ENODEV;
+ }
+ }
+ g->domain = irq_domain_create_hierarchy(parent,
+ IRQ_DOMAIN_FLAG_HIERARCHY,
+ ARRAY_SIZE(ixp4xx_gpiomap),
+ g->fwnode,
+ &ixp4xx_gpio_irqdomain_ops,
+ g);
+ if (!g->domain) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev, "no hierarchical irq domain\n");
+ return ret;
+ }
+
+ /*
+ * After adding OF support, this is no longer needed: irqs
+ * will be allocated for the respective fwnodes.
+ */
+ if (!np) {
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_gpiomap); i++) {
+ const struct ixp4xx_gpio_map *map = &ixp4xx_gpiomap[i];
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ /* This is the hwirq for the GPIO line side of things */
+ fwspec.param[0] = map->gpio_offset;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(g->domain,
+ -1, /* just pick something */
+ 1,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev,
+ "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
+ map->gpio_offset, map->parent_hwirq,
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ platform_set_drvdata(pdev, g);
+ dev_info(dev, "IXP4 GPIO @%p registered\n", g->base);
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_gpio_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-gpio",
+ },
+ {},
+};
+
+
+static struct platform_driver ixp4xx_gpio_driver = {
+ .driver = {
+ .name = "ixp4xx-gpio",
+ .of_match_table = of_match_ptr(ixp4xx_gpio_of_match),
+ },
+ .probe = ixp4xx_gpio_probe,
+};
+builtin_platform_driver(ixp4xx_gpio_driver);
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 6b9bf8b7bf16..b97a91166497 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -147,7 +147,6 @@ static int ttl_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ttl_module *mod;
struct gpio_chip *gpio;
- struct resource *res;
int ret;
pdata = dev_get_platdata(&pdev->dev);
@@ -164,8 +163,7 @@ static int ttl_probe(struct platform_device *pdev)
spin_lock_init(&mod->lock);
/* get access to the MODULbus registers for this module */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mod->regs = devm_ioremap_resource(dev, res);
+ mod->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mod->regs))
return PTR_ERR(mod->regs);
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index fca84ccac35c..1b1ee94eeab4 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -47,15 +47,13 @@ static int ls1x_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
- struct resource *res;
int ret;
gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
if (!gc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio_reg_base = devm_ioremap_resource(dev, res);
+ gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio_reg_base))
return PTR_ERR(gpio_reg_base);
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index d441dbaed7a3..d711ae06747e 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -340,10 +340,7 @@ static int lpc18xx_gpio_probe(struct platform_device *pdev)
index = of_property_match_string(dev->of_node, "reg-names", "gpio");
if (index < 0) {
/* To support backward compatibility take the first resource */
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gc->base = devm_ioremap_resource(dev, res);
+ gc->base = devm_platform_ioremap_resource(pdev, 0);
} else {
struct resource res;
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
new file mode 100644
index 000000000000..3f03f4e8956c
--- /dev/null
+++ b/drivers/gpio/gpio-max77650.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// GPIO driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MAX77650_GPIO_DIR_MASK BIT(0)
+#define MAX77650_GPIO_INVAL_MASK BIT(1)
+#define MAX77650_GPIO_DRV_MASK BIT(2)
+#define MAX77650_GPIO_OUTVAL_MASK BIT(3)
+#define MAX77650_GPIO_DEBOUNCE_MASK BIT(4)
+
+#define MAX77650_GPIO_DIR_OUT 0x00
+#define MAX77650_GPIO_DIR_IN BIT(0)
+#define MAX77650_GPIO_OUT_LOW 0x00
+#define MAX77650_GPIO_OUT_HIGH BIT(3)
+#define MAX77650_GPIO_DRV_OPEN_DRAIN 0x00
+#define MAX77650_GPIO_DRV_PUSH_PULL BIT(2)
+#define MAX77650_GPIO_DEBOUNCE BIT(4)
+
+#define MAX77650_GPIO_DIR_BITS(_reg) \
+ ((_reg) & MAX77650_GPIO_DIR_MASK)
+#define MAX77650_GPIO_INVAL_BITS(_reg) \
+ (((_reg) & MAX77650_GPIO_INVAL_MASK) >> 1)
+
+struct max77650_gpio_chip {
+ struct regmap *map;
+ struct gpio_chip gc;
+ int irq;
+};
+
+static int max77650_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DIR_MASK,
+ MAX77650_GPIO_DIR_IN);
+}
+
+static int max77650_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ int mask, regval;
+
+ mask = MAX77650_GPIO_DIR_MASK | MAX77650_GPIO_OUTVAL_MASK;
+ regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW;
+ regval |= MAX77650_GPIO_DIR_OUT;
+
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO, mask, regval);
+}
+
+static void max77650_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ int rv, regval;
+
+ regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW;
+
+ rv = regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_OUTVAL_MASK, regval);
+ if (rv)
+ dev_err(gc->parent, "cannot set GPIO value: %d\n", rv);
+}
+
+static int max77650_gpio_get_value(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ unsigned int val;
+ int rv;
+
+ rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val);
+ if (rv)
+ return rv;
+
+ return MAX77650_GPIO_INVAL_BITS(val);
+}
+
+static int max77650_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ unsigned int val;
+ int rv;
+
+ rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val);
+ if (rv)
+ return rv;
+
+ return MAX77650_GPIO_DIR_BITS(val);
+}
+
+static int max77650_gpio_set_config(struct gpio_chip *gc,
+ unsigned int offset, unsigned long cfg)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+
+ switch (pinconf_to_config_param(cfg)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DRV_MASK,
+ MAX77650_GPIO_DRV_OPEN_DRAIN);
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DRV_MASK,
+ MAX77650_GPIO_DRV_PUSH_PULL);
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DEBOUNCE_MASK,
+ MAX77650_GPIO_DEBOUNCE);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int max77650_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+
+ return chip->irq;
+}
+
+static int max77650_gpio_probe(struct platform_device *pdev)
+{
+ struct max77650_gpio_chip *chip;
+ struct device *dev, *parent;
+ struct i2c_client *i2c;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+ i2c = to_i2c_client(parent);
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->map = dev_get_regmap(parent, NULL);
+ if (!chip->map)
+ return -ENODEV;
+
+ chip->irq = platform_get_irq_byname(pdev, "GPI");
+ if (chip->irq < 0)
+ return chip->irq;
+
+ chip->gc.base = -1;
+ chip->gc.ngpio = 1;
+ chip->gc.label = i2c->name;
+ chip->gc.parent = dev;
+ chip->gc.owner = THIS_MODULE;
+ chip->gc.can_sleep = true;
+
+ chip->gc.direction_input = max77650_gpio_direction_input;
+ chip->gc.direction_output = max77650_gpio_direction_output;
+ chip->gc.set = max77650_gpio_set_value;
+ chip->gc.get = max77650_gpio_get_value;
+ chip->gc.get_direction = max77650_gpio_get_direction;
+ chip->gc.set_config = max77650_gpio_set_config;
+ chip->gc.to_irq = max77650_gpio_to_irq;
+
+ return devm_gpiochip_add_data(dev, &chip->gc, chip);
+}
+
+static struct platform_driver max77650_gpio_driver = {
+ .driver = {
+ .name = "max77650-gpio",
+ },
+ .probe = max77650_gpio_probe,
+};
+module_platform_driver(max77650_gpio_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 GPIO driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 3134c0d2bfe4..9308081e0a4a 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -146,7 +146,6 @@ static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
static int mb86s70_gpio_probe(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip;
- struct resource *res;
int ret;
gchip = devm_kzalloc(&pdev->dev, sizeof(*gchip), GFP_KERNEL);
@@ -155,8 +154,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gchip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gchip->base = devm_ioremap_resource(&pdev->dev, res);
+ gchip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gchip->base))
return PTR_ERR(gchip->base);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 7c659fdaa6d5..3302125e5265 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -377,10 +377,20 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
}
}
-static const char *mrfld_gpio_get_pinctrl_dev_name(void)
+static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
{
- const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
- return dev_name ? dev_name : "pinctrl-merrifield";
+ struct acpi_device *adev;
+ const char *name;
+
+ adev = acpi_dev_get_first_match_dev("INTC1002", NULL, -1);
+ if (adev) {
+ name = devm_kstrdup(priv->dev, acpi_dev_name(adev), GFP_KERNEL);
+ acpi_dev_put(adev);
+ } else {
+ name = "pinctrl-merrifield";
+ }
+
+ return name;
}
static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -441,7 +451,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
}
- pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name(priv);
for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
range = &mrfld_gpio_ranges[i];
retval = gpiochip_add_pin_range(&priv->chip,
diff --git a/drivers/gpio/gpio-mlxbf.c b/drivers/gpio/gpio-mlxbf.c
new file mode 100644
index 000000000000..894aaf55fc96
--- /dev/null
+++ b/drivers/gpio/gpio-mlxbf.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+/* Number of pins on BlueField */
+#define MLXBF_GPIO_NR 54
+
+/* Pad Electrical Controls. */
+#define MLXBF_GPIO_PAD_CONTROL_FIRST_WORD 0x0700
+#define MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD 0x0708
+#define MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD 0x0710
+#define MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD 0x0718
+
+#define MLXBF_GPIO_PIN_DIR_I 0x1040
+#define MLXBF_GPIO_PIN_DIR_O 0x1048
+#define MLXBF_GPIO_PIN_STATE 0x1000
+#define MLXBF_GPIO_SCRATCHPAD 0x20
+
+#ifdef CONFIG_PM
+struct mlxbf_gpio_context_save_regs {
+ u64 scratchpad;
+ u64 pad_control[MLXBF_GPIO_NR];
+ u64 pin_dir_i;
+ u64 pin_dir_o;
+};
+#endif
+
+/* Device state structure. */
+struct mlxbf_gpio_state {
+ struct gpio_chip gc;
+
+ /* Memory Address */
+ void __iomem *base;
+
+#ifdef CONFIG_PM
+ struct mlxbf_gpio_context_save_regs csave_regs;
+#endif
+};
+
+static int mlxbf_gpio_probe(struct platform_device *pdev)
+{
+ struct mlxbf_gpio_state *gs;
+ struct device *dev = &pdev->dev;
+ struct gpio_chip *gc;
+ int ret;
+
+ gs = devm_kzalloc(&pdev->dev, sizeof(*gs), GFP_KERNEL);
+ if (!gs)
+ return -ENOMEM;
+
+ gs->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gs->base))
+ return PTR_ERR(gs->base);
+
+ gc = &gs->gc;
+ ret = bgpio_init(gc, dev, 8,
+ gs->base + MLXBF_GPIO_PIN_STATE,
+ NULL,
+ NULL,
+ gs->base + MLXBF_GPIO_PIN_DIR_O,
+ gs->base + MLXBF_GPIO_PIN_DIR_I,
+ 0);
+ if (ret)
+ return -ENODEV;
+
+ gc->owner = THIS_MODULE;
+ gc->ngpio = MLXBF_GPIO_NR;
+
+ ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, gs);
+ dev_info(&pdev->dev, "registered Mellanox BlueField GPIO");
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mlxbf_gpio_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mlxbf_gpio_state *gs = platform_get_drvdata(pdev);
+
+ gs->csave_regs.scratchpad = readq(gs->base + MLXBF_GPIO_SCRATCHPAD);
+ gs->csave_regs.pad_control[0] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_FIRST_WORD);
+ gs->csave_regs.pad_control[1] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD);
+ gs->csave_regs.pad_control[2] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD);
+ gs->csave_regs.pad_control[3] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD);
+ gs->csave_regs.pin_dir_i = readq(gs->base + MLXBF_GPIO_PIN_DIR_I);
+ gs->csave_regs.pin_dir_o = readq(gs->base + MLXBF_GPIO_PIN_DIR_O);
+
+ return 0;
+}
+
+static int mlxbf_gpio_resume(struct platform_device *pdev)
+{
+ struct mlxbf_gpio_state *gs = platform_get_drvdata(pdev);
+
+ writeq(gs->csave_regs.scratchpad, gs->base + MLXBF_GPIO_SCRATCHPAD);
+ writeq(gs->csave_regs.pad_control[0],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_FIRST_WORD);
+ writeq(gs->csave_regs.pad_control[1],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD);
+ writeq(gs->csave_regs.pad_control[2],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD);
+ writeq(gs->csave_regs.pad_control[3],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD);
+ writeq(gs->csave_regs.pin_dir_i, gs->base + MLXBF_GPIO_PIN_DIR_I);
+ writeq(gs->csave_regs.pin_dir_o, gs->base + MLXBF_GPIO_PIN_DIR_O);
+
+ return 0;
+}
+#endif
+
+static const struct acpi_device_id mlxbf_gpio_acpi_match[] = {
+ { "MLNXBF02", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf_gpio_acpi_match);
+
+static struct platform_driver mlxbf_gpio_driver = {
+ .driver = {
+ .name = "mlxbf_gpio",
+ .acpi_match_table = ACPI_PTR(mlxbf_gpio_acpi_match),
+ },
+ .probe = mlxbf_gpio_probe,
+#ifdef CONFIG_PM
+ .suspend = mlxbf_gpio_suspend,
+ .resume = mlxbf_gpio_resume,
+#endif
+};
+
+module_platform_driver(mlxbf_gpio_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField GPIO Driver");
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 50bdc29591c0..6f904c874678 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -134,17 +134,6 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
unsigned long pinmask = bgpio_line2mask(gc, gpio);
bool dir = !!(gc->bgpio_dir & pinmask);
- /*
- * If the direction is OUT we read the value from the SET
- * register, and if the direction is IN we read the value
- * from the DAT register.
- *
- * If the direction bits are inverted, naturally this gets
- * inverted too.
- */
- if (gc->bgpio_dir_inverted)
- dir = !dir;
-
if (dir)
return !!(gc->read_reg(gc->reg_set) & pinmask);
else
@@ -164,14 +153,8 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- /* Exploit the fact that we know which directions are set */
- if (gc->bgpio_dir_inverted) {
- set_mask = *mask & ~gc->bgpio_dir;
- get_mask = *mask & gc->bgpio_dir;
- } else {
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
- }
+ set_mask = *mask & gc->bgpio_dir;
+ get_mask = *mask & ~gc->bgpio_dir;
if (set_mask)
*bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -372,11 +355,12 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- if (gc->bgpio_dir_inverted)
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- else
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+
+ if (gc->reg_dir_in)
+ gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (gc->reg_dir_out)
+ gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -385,11 +369,16 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
- /* Return 0 if output, 1 of input */
- if (gc->bgpio_dir_inverted)
- return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
- else
- return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
+ /* Return 0 if output, 1 if input */
+ if (gc->bgpio_dir_unreadable)
+ return !(gc->bgpio_dir & bgpio_line2mask(gc, gpio));
+ if (gc->reg_dir_out)
+ return !(gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio));
+ if (gc->reg_dir_in)
+ return !!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio));
+
+ /* This should not happen */
+ return 1;
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -400,11 +389,12 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- if (gc->bgpio_dir_inverted)
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
- else
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+
+ if (gc->reg_dir_in)
+ gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (gc->reg_dir_out)
+ gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -537,19 +527,12 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
void __iomem *dirin,
unsigned long flags)
{
- if (dirout && dirin) {
- return -EINVAL;
- } else if (dirout) {
- gc->reg_dir = dirout;
- gc->direction_output = bgpio_dir_out;
- gc->direction_input = bgpio_dir_in;
- gc->get_direction = bgpio_get_dir;
- } else if (dirin) {
- gc->reg_dir = dirin;
+ if (dirout || dirin) {
+ gc->reg_dir_out = dirout;
+ gc->reg_dir_in = dirin;
gc->direction_output = bgpio_dir_out;
gc->direction_input = bgpio_dir_in;
gc->get_direction = bgpio_get_dir;
- gc->bgpio_dir_inverted = true;
} else {
if (flags & BGPIOF_NO_OUTPUT)
gc->direction_output = bgpio_dir_out_err;
@@ -588,11 +571,11 @@ static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
* @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
* that setting a line to 1 in this register will turn that line into an
* output line. Conversely, setting the line to 0 will turn that line into
- * an input. Either this or @dirin can be defined, but never both.
+ * an input.
* @dirin: MMIO address for the register to set this line as INPUT. It is assumed
* that setting a line to 1 in this register will turn that line into an
* input line. Conversely, setting the line to 0 will turn that line into
- * an output. Either this or @dirout can be defined, but never both.
+ * an output.
* @flags: Different flags that will affect the behaviour of the device, such as
* endianness etc.
*/
@@ -634,8 +617,28 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
if (gc->set == bgpio_set_set &&
!(flags & BGPIOF_UNREADABLE_REG_SET))
gc->bgpio_data = gc->read_reg(gc->reg_set);
- if (gc->reg_dir && !(flags & BGPIOF_UNREADABLE_REG_DIR))
- gc->bgpio_dir = gc->read_reg(gc->reg_dir);
+
+ if (flags & BGPIOF_UNREADABLE_REG_DIR)
+ gc->bgpio_dir_unreadable = true;
+
+ /*
+ * Inspect hardware to find initial direction setting.
+ */
+ if ((gc->reg_dir_out || gc->reg_dir_in) &&
+ !(flags & BGPIOF_UNREADABLE_REG_DIR)) {
+ if (gc->reg_dir_out)
+ gc->bgpio_dir = gc->read_reg(gc->reg_dir_out);
+ else if (gc->reg_dir_in)
+ gc->bgpio_dir = ~gc->read_reg(gc->reg_dir_in);
+ /*
+ * If we have two direction registers, synchronise
+ * input setting to output setting, the library
+ * can not handle a line being input and output at
+ * the same time.
+ */
+ if (gc->reg_dir_out && gc->reg_dir_in)
+ gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ }
return ret;
}
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 74401e0adb29..79654fb2e50f 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -293,7 +293,6 @@ mediatek_gpio_bank_probe(struct device *dev,
static int
mediatek_gpio_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct mtk *mtk;
@@ -304,7 +303,7 @@ mediatek_gpio_probe(struct platform_device *pdev)
if (!mtk)
return -ENOMEM;
- mtk->base = devm_ioremap_resource(dev, res);
+ mtk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mtk->base))
return PTR_ERR(mtk->base);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index f97ed32b8beb..059094ac44cb 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1038,11 +1038,9 @@ static const struct regmap_config mvebu_gpio_regmap_config = {
static int mvebu_gpio_probe_raw(struct platform_device *pdev,
struct mvebu_gpio_chip *mvchip)
{
- struct resource *res;
void __iomem *base;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1062,8 +1060,7 @@ static int mvebu_gpio_probe_raw(struct platform_device *pdev,
* per-CPU registers
*/
if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index e86e61dda4b7..b2813580c582 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -411,7 +411,6 @@ static int mxc_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
- struct resource *iores;
int irq_base;
int err;
@@ -423,8 +422,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- port->base = devm_ioremap_resource(&pdev->dev, iores);
+ port->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 1b19c88ea7bb..afb0e8a791e5 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -82,7 +82,6 @@ static int octeon_gpio_probe(struct platform_device *pdev)
{
struct octeon_gpio *gpio;
struct gpio_chip *chip;
- struct resource *res_mem;
void __iomem *reg_base;
int err = 0;
@@ -91,8 +90,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
chip = &gpio->chip;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 7f33024b6d83..16289bafa001 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -31,8 +31,6 @@
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
-#define OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER BIT(2)
-
struct gpio_regs {
u32 irqenable1;
u32 irqenable2;
@@ -48,13 +46,6 @@ struct gpio_regs {
u32 debounce_en;
};
-struct gpio_bank;
-
-struct gpio_omap_funcs {
- void (*idle_enable_level_quirk)(struct gpio_bank *bank);
- void (*idle_disable_level_quirk)(struct gpio_bank *bank);
-};
-
struct gpio_bank {
struct list_head node;
void __iomem *base;
@@ -62,7 +53,6 @@ struct gpio_bank {
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
struct gpio_regs context;
- struct gpio_omap_funcs funcs;
u32 saved_datain;
u32 level_mask;
u32 toggle_mask;
@@ -83,8 +73,6 @@ struct gpio_bank {
int stride;
u32 width;
int context_loss_count;
- bool workaround_enabled;
- u32 quirks;
void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
void (*set_dataout_multiple)(struct gpio_bank *bank,
@@ -353,6 +341,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
}
}
+/*
+ * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
+ * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
+ * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
+ * are capable waking up the system from off mode.
+ */
+static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask)
+{
+ u32 no_wake = bank->non_wakeup_gpios;
+
+ if (no_wake)
+ return !!(~no_wake & gpio_mask);
+
+ return false;
+}
+
static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
@@ -363,10 +367,16 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
trigger & IRQ_TYPE_LEVEL_LOW);
omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
trigger & IRQ_TYPE_LEVEL_HIGH);
+
+ /*
+ * We need the edge detection enabled for to allow the GPIO block
+ * to be woken from idle state. Set the appropriate edge detection
+ * in addition to the level detection.
+ */
omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
+ trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH));
omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
+ trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW));
bank->context.leveldetect0 =
readl_relaxed(bank->base + bank->regs->leveldetect0);
@@ -384,13 +394,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
}
/* This part needs to be executed always for OMAP{34xx, 44xx} */
- if (!bank->regs->irqctrl) {
- /* On omap24xx proceed only when valid GPIO bit is set */
- if (bank->non_wakeup_gpios) {
- if (!(bank->non_wakeup_gpios & gpio_bit))
- goto exit;
- }
-
+ if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
/*
* Log the edge gpio and manually trigger the IRQ
* after resume if the input level changes
@@ -403,7 +407,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
}
-exit:
bank->level_mask =
readl_relaxed(bank->base + bank->regs->leveldetect0) |
readl_relaxed(bank->base + bank->regs->leveldetect1);
@@ -896,44 +899,6 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
-/*
- * Only edges can generate a wakeup event to the PRCM.
- *
- * Therefore, ensure any wake-up capable GPIOs have
- * edge-detection enabled before going idle to ensure a wakeup
- * to the PRCM is generated on a GPIO transition. (c.f. 34xx
- * NDA TRM 25.5.3.1)
- *
- * The normal values will be restored upon ->runtime_resume()
- * by writing back the values saved in bank->context.
- */
-static void __maybe_unused
-omap2_gpio_enable_level_quirk(struct gpio_bank *bank)
-{
- u32 wake_low, wake_hi;
-
- /* Enable additional edge detection for level gpios for idle */
- wake_low = bank->context.leveldetect0 & bank->context.wake_en;
- if (wake_low)
- writel_relaxed(wake_low | bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
-
- wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
- if (wake_hi)
- writel_relaxed(wake_hi | bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
-}
-
-static void __maybe_unused
-omap2_gpio_disable_level_quirk(struct gpio_bank *bank)
-{
- /* Disable edge detection for level gpios after idle */
- writel_relaxed(bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- writel_relaxed(bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
-}
-
/*---------------------------------------------------------------------*/
static int omap_mpuio_suspend_noirq(struct device *dev)
@@ -1251,203 +1216,70 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
return ret;
}
-static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context);
-static void omap_gpio_unidle(struct gpio_bank *bank);
-
-static int gpio_omap_cpu_notifier(struct notifier_block *nb,
- unsigned long cmd, void *v)
+static void omap_gpio_init_context(struct gpio_bank *p)
{
- struct gpio_bank *bank;
- unsigned long flags;
+ struct omap_gpio_reg_offs *regs = p->regs;
+ void __iomem *base = p->base;
- bank = container_of(nb, struct gpio_bank, nb);
+ p->context.ctrl = readl_relaxed(base + regs->ctrl);
+ p->context.oe = readl_relaxed(base + regs->direction);
+ p->context.wake_en = readl_relaxed(base + regs->wkup_en);
+ p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
+ p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
+ p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
+ p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
+ p->context.irqenable1 = readl_relaxed(base + regs->irqenable);
+ p->context.irqenable2 = readl_relaxed(base + regs->irqenable2);
- raw_spin_lock_irqsave(&bank->lock, flags);
- switch (cmd) {
- case CPU_CLUSTER_PM_ENTER:
- if (bank->is_suspended)
- break;
- omap_gpio_idle(bank, true);
- break;
- case CPU_CLUSTER_PM_ENTER_FAILED:
- case CPU_CLUSTER_PM_EXIT:
- if (bank->is_suspended)
- break;
- omap_gpio_unidle(bank);
- break;
- }
- raw_spin_unlock_irqrestore(&bank->lock, flags);
+ if (regs->set_dataout && p->regs->clr_dataout)
+ p->context.dataout = readl_relaxed(base + regs->set_dataout);
+ else
+ p->context.dataout = readl_relaxed(base + regs->dataout);
- return NOTIFY_OK;
+ p->context_valid = true;
}
-static const struct of_device_id omap_gpio_match[];
-
-static int omap_gpio_probe(struct platform_device *pdev)
+static void omap_gpio_restore_context(struct gpio_bank *bank)
{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- const struct of_device_id *match;
- const struct omap_gpio_platform_data *pdata;
- struct resource *res;
- struct gpio_bank *bank;
- struct irq_chip *irqc;
- int ret;
-
- match = of_match_device(of_match_ptr(omap_gpio_match), dev);
-
- pdata = match ? match->data : dev_get_platdata(dev);
- if (!pdata)
- return -EINVAL;
-
- bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
- if (!bank)
- return -ENOMEM;
-
- irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
- if (!irqc)
- return -ENOMEM;
-
- irqc->irq_startup = omap_gpio_irq_startup,
- irqc->irq_shutdown = omap_gpio_irq_shutdown,
- irqc->irq_ack = omap_gpio_ack_irq,
- irqc->irq_mask = omap_gpio_mask_irq,
- irqc->irq_unmask = omap_gpio_unmask_irq,
- irqc->irq_set_type = omap_gpio_irq_type,
- irqc->irq_set_wake = omap_gpio_wake_enable,
- irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
- irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
- irqc->name = dev_name(&pdev->dev);
- irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
- irqc->parent_device = dev;
-
- bank->irq = platform_get_irq(pdev, 0);
- if (bank->irq <= 0) {
- if (!bank->irq)
- bank->irq = -ENXIO;
- if (bank->irq != -EPROBE_DEFER)
- dev_err(dev,
- "can't get irq resource ret=%d\n", bank->irq);
- return bank->irq;
- }
-
- bank->chip.parent = dev;
- bank->chip.owner = THIS_MODULE;
- bank->dbck_flag = pdata->dbck_flag;
- bank->quirks = pdata->quirks;
- bank->stride = pdata->bank_stride;
- bank->width = pdata->bank_width;
- bank->is_mpuio = pdata->is_mpuio;
- bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
- bank->regs = pdata->regs;
-#ifdef CONFIG_OF_GPIO
- bank->chip.of_node = of_node_get(node);
-#endif
-
- if (node) {
- if (!of_property_read_bool(node, "ti,gpio-always-on"))
- bank->loses_context = true;
- } else {
- bank->loses_context = pdata->loses_context;
-
- if (bank->loses_context)
- bank->get_context_loss_count =
- pdata->get_context_loss_count;
- }
-
- if (bank->regs->set_dataout && bank->regs->clr_dataout) {
- bank->set_dataout = omap_set_gpio_dataout_reg;
- bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
- } else {
- bank->set_dataout = omap_set_gpio_dataout_mask;
- bank->set_dataout_multiple =
- omap_set_gpio_dataout_mask_multiple;
- }
-
- if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
- bank->funcs.idle_enable_level_quirk =
- omap2_gpio_enable_level_quirk;
- bank->funcs.idle_disable_level_quirk =
- omap2_gpio_disable_level_quirk;
- }
-
- raw_spin_lock_init(&bank->lock);
- raw_spin_lock_init(&bank->wa_lock);
-
- /* Static mapping, never released */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(bank->base)) {
- return PTR_ERR(bank->base);
- }
-
- if (bank->dbck_flag) {
- bank->dbck = devm_clk_get(dev, "dbclk");
- if (IS_ERR(bank->dbck)) {
- dev_err(dev,
- "Could not get gpio dbck. Disable debounce\n");
- bank->dbck_flag = false;
- } else {
- clk_prepare(bank->dbck);
- }
- }
-
- platform_set_drvdata(pdev, bank);
-
- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
-
- if (bank->is_mpuio)
- omap_mpuio_init(bank);
-
- omap_gpio_mod_init(bank);
-
- ret = omap_gpio_chip_init(bank, irqc);
- if (ret) {
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- if (bank->dbck_flag)
- clk_unprepare(bank->dbck);
- return ret;
- }
-
- omap_gpio_show_rev(bank);
+ writel_relaxed(bank->context.wake_en,
+ bank->base + bank->regs->wkup_en);
+ writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
+ writel_relaxed(bank->context.leveldetect0,
+ bank->base + bank->regs->leveldetect0);
+ writel_relaxed(bank->context.leveldetect1,
+ bank->base + bank->regs->leveldetect1);
+ writel_relaxed(bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
+ writel_relaxed(bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ writel_relaxed(bank->context.dataout,
+ bank->base + bank->regs->set_dataout);
+ else
+ writel_relaxed(bank->context.dataout,
+ bank->base + bank->regs->dataout);
+ writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
- if (bank->funcs.idle_enable_level_quirk &&
- bank->funcs.idle_disable_level_quirk) {
- bank->nb.notifier_call = gpio_omap_cpu_notifier;
- cpu_pm_register_notifier(&bank->nb);
+ if (bank->dbck_enable_mask) {
+ writel_relaxed(bank->context.debounce, bank->base +
+ bank->regs->debounce);
+ writel_relaxed(bank->context.debounce_en,
+ bank->base + bank->regs->debounce_en);
}
- pm_runtime_put(dev);
-
- return 0;
-}
-
-static int omap_gpio_remove(struct platform_device *pdev)
-{
- struct gpio_bank *bank = platform_get_drvdata(pdev);
-
- if (bank->nb.notifier_call)
- cpu_pm_unregister_notifier(&bank->nb);
- list_del(&bank->node);
- gpiochip_remove(&bank->chip);
- pm_runtime_disable(&pdev->dev);
- if (bank->dbck_flag)
- clk_unprepare(bank->dbck);
-
- return 0;
+ writel_relaxed(bank->context.irqenable1,
+ bank->base + bank->regs->irqenable);
+ writel_relaxed(bank->context.irqenable2,
+ bank->base + bank->regs->irqenable2);
}
-static void omap_gpio_restore_context(struct gpio_bank *bank);
-
static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
struct device *dev = bank->chip.parent;
- u32 l1 = 0, l2 = 0;
+ void __iomem *base = bank->base;
+ u32 nowake;
- if (bank->funcs.idle_enable_level_quirk)
- bank->funcs.idle_enable_level_quirk(bank);
+ bank->saved_datain = readl_relaxed(base + bank->regs->datain);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
@@ -1456,22 +1288,15 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
goto update_gpio_context_count;
/*
- * If going to OFF, remove triggering for all
+ * If going to OFF, remove triggering for all wkup domain
* non-wakeup GPIOs. Otherwise spurious IRQs will be
* generated. See OMAP2420 Errata item 1.101.
*/
- bank->saved_datain = readl_relaxed(bank->base +
- bank->regs->datain);
- l1 = bank->context.fallingdetect;
- l2 = bank->context.risingdetect;
-
- l1 &= ~bank->enabled_non_wakeup_gpios;
- l2 &= ~bank->enabled_non_wakeup_gpios;
-
- writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
- writel_relaxed(l2, bank->base + bank->regs->risingdetect);
-
- bank->workaround_enabled = true;
+ if (!bank->loses_context && bank->enabled_non_wakeup_gpios) {
+ nowake = bank->enabled_non_wakeup_gpios;
+ omap_gpio_rmw(base, bank->regs->fallingdetect, nowake, ~nowake);
+ omap_gpio_rmw(base, bank->regs->risingdetect, nowake, ~nowake);
+ }
update_gpio_context_count:
if (bank->get_context_loss_count)
@@ -1481,8 +1306,6 @@ update_gpio_context_count:
omap_gpio_dbck_disable(bank);
}
-static void omap_gpio_init_context(struct gpio_bank *p);
-
static void omap_gpio_unidle(struct gpio_bank *bank)
{
struct device *dev = bank->chip.parent;
@@ -1504,9 +1327,6 @@ static void omap_gpio_unidle(struct gpio_bank *bank)
omap_gpio_dbck_enable(bank);
- if (bank->funcs.idle_disable_level_quirk)
- bank->funcs.idle_disable_level_quirk(bank);
-
if (bank->loses_context) {
if (!bank->get_context_loss_count) {
omap_gpio_restore_context(bank);
@@ -1518,11 +1338,14 @@ static void omap_gpio_unidle(struct gpio_bank *bank)
return;
}
}
+ } else {
+ /* Restore changes done for OMAP2420 errata 1.101 */
+ writel_relaxed(bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ writel_relaxed(bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
}
- if (!bank->workaround_enabled)
- return;
-
l = readl_relaxed(bank->base + bank->regs->datain);
/*
@@ -1572,117 +1395,35 @@ static void omap_gpio_unidle(struct gpio_bank *bank)
writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
}
-
- bank->workaround_enabled = false;
-}
-
-static void omap_gpio_init_context(struct gpio_bank *p)
-{
- struct omap_gpio_reg_offs *regs = p->regs;
- void __iomem *base = p->base;
-
- p->context.ctrl = readl_relaxed(base + regs->ctrl);
- p->context.oe = readl_relaxed(base + regs->direction);
- p->context.wake_en = readl_relaxed(base + regs->wkup_en);
- p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
- p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
- p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
- p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
- p->context.irqenable1 = readl_relaxed(base + regs->irqenable);
- p->context.irqenable2 = readl_relaxed(base + regs->irqenable2);
-
- if (regs->set_dataout && p->regs->clr_dataout)
- p->context.dataout = readl_relaxed(base + regs->set_dataout);
- else
- p->context.dataout = readl_relaxed(base + regs->dataout);
-
- p->context_valid = true;
-}
-
-static void omap_gpio_restore_context(struct gpio_bank *bank)
-{
- writel_relaxed(bank->context.wake_en,
- bank->base + bank->regs->wkup_en);
- writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
- writel_relaxed(bank->context.leveldetect0,
- bank->base + bank->regs->leveldetect0);
- writel_relaxed(bank->context.leveldetect1,
- bank->base + bank->regs->leveldetect1);
- writel_relaxed(bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
- writel_relaxed(bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- if (bank->regs->set_dataout && bank->regs->clr_dataout)
- writel_relaxed(bank->context.dataout,
- bank->base + bank->regs->set_dataout);
- else
- writel_relaxed(bank->context.dataout,
- bank->base + bank->regs->dataout);
- writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
-
- if (bank->dbck_enable_mask) {
- writel_relaxed(bank->context.debounce, bank->base +
- bank->regs->debounce);
- writel_relaxed(bank->context.debounce_en,
- bank->base + bank->regs->debounce_en);
- }
-
- writel_relaxed(bank->context.irqenable1,
- bank->base + bank->regs->irqenable);
- writel_relaxed(bank->context.irqenable2,
- bank->base + bank->regs->irqenable2);
}
-static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
+static int gpio_omap_cpu_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
{
- struct gpio_bank *bank = dev_get_drvdata(dev);
+ struct gpio_bank *bank;
unsigned long flags;
- int error = 0;
-
- raw_spin_lock_irqsave(&bank->lock, flags);
- /* Must be idled only by CPU_CLUSTER_PM_ENTER? */
- if (bank->irq_usage) {
- error = -EBUSY;
- goto unlock;
- }
- omap_gpio_idle(bank, true);
- bank->is_suspended = true;
-unlock:
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return error;
-}
-static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
-{
- struct gpio_bank *bank = dev_get_drvdata(dev);
- unsigned long flags;
- int error = 0;
+ bank = container_of(nb, struct gpio_bank, nb);
raw_spin_lock_irqsave(&bank->lock, flags);
- /* Must be unidled only by CPU_CLUSTER_PM_ENTER? */
- if (bank->irq_usage) {
- error = -EBUSY;
- goto unlock;
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ if (bank->is_suspended)
+ break;
+ omap_gpio_idle(bank, true);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ if (bank->is_suspended)
+ break;
+ omap_gpio_unidle(bank);
+ break;
}
- omap_gpio_unidle(bank);
- bank->is_suspended = false;
-unlock:
raw_spin_unlock_irqrestore(&bank->lock, flags);
- return error;
+ return NOTIFY_OK;
}
-#ifdef CONFIG_ARCH_OMAP2PLUS
-static const struct dev_pm_ops gpio_pm_ops = {
- SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
- NULL)
-};
-#else
-static const struct dev_pm_ops gpio_pm_ops;
-#endif /* CONFIG_ARCH_OMAP2PLUS */
-
-#if defined(CONFIG_OF)
static struct omap_gpio_reg_offs omap2_gpio_regs = {
.revision = OMAP24XX_GPIO_REVISION,
.direction = OMAP24XX_GPIO_OE,
@@ -1729,11 +1470,6 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
.fallingdetect = OMAP4_GPIO_FALLINGDETECT,
};
-/*
- * Note that omap2 does not currently support idle modes with context loss so
- * no need to add OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER quirk flag to save
- * and restore context.
- */
static const struct omap_gpio_platform_data omap2_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
@@ -1744,14 +1480,12 @@ static const struct omap_gpio_platform_data omap3_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
- .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
};
static const struct omap_gpio_platform_data omap4_pdata = {
.regs = &omap4_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
- .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
};
static const struct of_device_id omap_gpio_match[] = {
@@ -1770,15 +1504,187 @@ static const struct of_device_id omap_gpio_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, omap_gpio_match);
+
+static int omap_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *match;
+ const struct omap_gpio_platform_data *pdata;
+ struct gpio_bank *bank;
+ struct irq_chip *irqc;
+ int ret;
+
+ match = of_match_device(of_match_ptr(omap_gpio_match), dev);
+
+ pdata = match ? match->data : dev_get_platdata(dev);
+ if (!pdata)
+ return -EINVAL;
+
+ bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
+ if (!bank)
+ return -ENOMEM;
+
+ irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ irqc->irq_startup = omap_gpio_irq_startup,
+ irqc->irq_shutdown = omap_gpio_irq_shutdown,
+ irqc->irq_ack = omap_gpio_ack_irq,
+ irqc->irq_mask = omap_gpio_mask_irq,
+ irqc->irq_unmask = omap_gpio_unmask_irq,
+ irqc->irq_set_type = omap_gpio_irq_type,
+ irqc->irq_set_wake = omap_gpio_wake_enable,
+ irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
+ irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
+ irqc->name = dev_name(&pdev->dev);
+ irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
+ irqc->parent_device = dev;
+
+ bank->irq = platform_get_irq(pdev, 0);
+ if (bank->irq <= 0) {
+ if (!bank->irq)
+ bank->irq = -ENXIO;
+ if (bank->irq != -EPROBE_DEFER)
+ dev_err(dev,
+ "can't get irq resource ret=%d\n", bank->irq);
+ return bank->irq;
+ }
+
+ bank->chip.parent = dev;
+ bank->chip.owner = THIS_MODULE;
+ bank->dbck_flag = pdata->dbck_flag;
+ bank->stride = pdata->bank_stride;
+ bank->width = pdata->bank_width;
+ bank->is_mpuio = pdata->is_mpuio;
+ bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
+ bank->regs = pdata->regs;
+#ifdef CONFIG_OF_GPIO
+ bank->chip.of_node = of_node_get(node);
#endif
+ if (node) {
+ if (!of_property_read_bool(node, "ti,gpio-always-on"))
+ bank->loses_context = true;
+ } else {
+ bank->loses_context = pdata->loses_context;
+
+ if (bank->loses_context)
+ bank->get_context_loss_count =
+ pdata->get_context_loss_count;
+ }
+
+ if (bank->regs->set_dataout && bank->regs->clr_dataout) {
+ bank->set_dataout = omap_set_gpio_dataout_reg;
+ bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
+ } else {
+ bank->set_dataout = omap_set_gpio_dataout_mask;
+ bank->set_dataout_multiple =
+ omap_set_gpio_dataout_mask_multiple;
+ }
+
+ raw_spin_lock_init(&bank->lock);
+ raw_spin_lock_init(&bank->wa_lock);
+
+ /* Static mapping, never released */
+ bank->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bank->base)) {
+ return PTR_ERR(bank->base);
+ }
+
+ if (bank->dbck_flag) {
+ bank->dbck = devm_clk_get(dev, "dbclk");
+ if (IS_ERR(bank->dbck)) {
+ dev_err(dev,
+ "Could not get gpio dbck. Disable debounce\n");
+ bank->dbck_flag = false;
+ } else {
+ clk_prepare(bank->dbck);
+ }
+ }
+
+ platform_set_drvdata(pdev, bank);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ if (bank->is_mpuio)
+ omap_mpuio_init(bank);
+
+ omap_gpio_mod_init(bank);
+
+ ret = omap_gpio_chip_init(bank, irqc);
+ if (ret) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ if (bank->dbck_flag)
+ clk_unprepare(bank->dbck);
+ return ret;
+ }
+
+ omap_gpio_show_rev(bank);
+
+ bank->nb.notifier_call = gpio_omap_cpu_notifier;
+ cpu_pm_register_notifier(&bank->nb);
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int omap_gpio_remove(struct platform_device *pdev)
+{
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+
+ cpu_pm_unregister_notifier(&bank->nb);
+ list_del(&bank->node);
+ gpiochip_remove(&bank->chip);
+ pm_runtime_disable(&pdev->dev);
+ if (bank->dbck_flag)
+ clk_unprepare(bank->dbck);
+
+ return 0;
+}
+
+static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
+{
+ struct gpio_bank *bank = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap_gpio_idle(bank, true);
+ bank->is_suspended = true;
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
+{
+ struct gpio_bank *bank = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap_gpio_unidle(bank);
+ bank->is_suspended = false;
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static const struct dev_pm_ops gpio_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
+ NULL)
+};
+
static struct platform_driver omap_gpio_driver = {
.probe = omap_gpio_probe,
.remove = omap_gpio_remove,
.driver = {
.name = "omap_gpio",
.pm = &gpio_pm_ops,
- .of_match_table = of_match_ptr(omap_gpio_match),
+ .of_match_table = omap_gpio_match,
},
};
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 7e76830b3368..b7ef33f63392 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -73,6 +73,7 @@
#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
static const struct i2c_device_id pca953x_id[] = {
+ { "pca6416", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
{ "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
@@ -153,6 +154,7 @@ struct pca953x_chip {
u8 irq_trig_fall[MAX_BANK];
struct irq_chip irq_chip;
#endif
+ atomic_t wakeup_path;
struct i2c_client *client;
struct gpio_chip gpio_chip;
@@ -581,6 +583,11 @@ static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
+ if (on)
+ atomic_inc(&chip->wakeup_path);
+ else
+ atomic_dec(&chip->wakeup_path);
+
return irq_set_irq_wake(chip->client->irq, on);
}
@@ -1100,7 +1107,10 @@ static int pca953x_suspend(struct device *dev)
regcache_cache_only(chip->regmap, true);
- regulator_disable(chip->regulator);
+ if (atomic_read(&chip->wakeup_path))
+ device_set_wakeup_path(dev);
+ else
+ regulator_disable(chip->regulator);
return 0;
}
@@ -1110,10 +1120,12 @@ static int pca953x_resume(struct device *dev)
struct pca953x_chip *chip = dev_get_drvdata(dev);
int ret;
- ret = regulator_enable(chip->regulator);
- if (ret != 0) {
- dev_err(dev, "Failed to enable regulator: %d\n", ret);
- return 0;
+ if (!atomic_read(&chip->wakeup_path)) {
+ ret = regulator_enable(chip->regulator);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable regulator: %d\n", ret);
+ return 0;
+ }
}
regcache_cache_only(chip->regmap, false);
@@ -1137,6 +1149,7 @@ static int pca953x_resume(struct device *dev)
#define OF_957X(__nrgpio, __int) (void *)(__nrgpio | PCA957X_TYPE | __int)
static const struct of_device_id pca953x_dt_ids[] = {
+ { .compatible = "nxp,pca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "nxp,pca9505", .data = OF_953X(40, PCA_INT), },
{ .compatible = "nxp,pca9534", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "nxp,pca9535", .data = OF_953X(16, PCA_INT), },
@@ -1152,6 +1165,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pca9575", .data = OF_957X(16, PCA_INT), },
{ .compatible = "nxp,pca9698", .data = OF_953X(40, 0), },
+ { .compatible = "nxp,pcal6416", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal6524", .data = OF_953X(24, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9555a", .data = OF_953X(16, PCA_LATCH_INT), },
@@ -1167,6 +1181,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
+ { .compatible = "onnn,cat9554", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index bcc6be4a5cb2..26f77fdb217e 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -577,7 +577,7 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-const struct irq_domain_ops pxa_irq_domain_ops = {
+static const struct irq_domain_ops pxa_irq_domain_ops = {
.map = pxa_irq_domain_map,
.xlate = irq_domain_xlate_twocell,
};
@@ -622,7 +622,6 @@ static int pxa_gpio_probe(struct platform_device *pdev)
{
struct pxa_gpio_chip *pchip;
struct pxa_gpio_bank *c;
- struct resource *res;
struct clk *clk;
struct pxa_gpio_platform_data *info;
void __iomem *gpio_reg_base;
@@ -665,11 +664,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
pchip->irq0 = irq0;
pchip->irq1 = irq1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- gpio_reg_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+
+ gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
if (!gpio_reg_base)
return -EINVAL;
@@ -816,7 +812,7 @@ static void pxa_gpio_resume(void)
#define pxa_gpio_resume NULL
#endif
-struct syscore_ops pxa_gpio_syscore_ops = {
+static struct syscore_ops pxa_gpio_syscore_ops = {
.suspend = pxa_gpio_suspend,
.resume = pxa_gpio_resume,
};
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 500a3596aaf4..70e95fc4779f 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -430,7 +430,7 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
static int gpio_rcar_probe(struct platform_device *pdev)
{
struct gpio_rcar_priv *p;
- struct resource *io, *irq;
+ struct resource *irq;
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
struct device *dev = &pdev->dev;
@@ -461,8 +461,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err0;
}
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- p->base = devm_ioremap_resource(dev, io);
+ p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base)) {
ret = PTR_ERR(p->base);
goto err0;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index c333046d02b8..fb143f28c386 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -23,7 +23,6 @@ struct sch_gpio {
struct gpio_chip chip;
spinlock_t lock;
unsigned short iobase;
- unsigned short core_base;
unsigned short resume_base;
};
@@ -166,7 +165,6 @@ static int sch_gpio_probe(struct platform_device *pdev)
switch (pdev->id) {
case PCI_DEVICE_ID_INTEL_SCH_LPC:
- sch->core_base = 0;
sch->resume_base = 10;
sch->chip.ngpio = 14;
@@ -185,19 +183,16 @@ static int sch_gpio_probe(struct platform_device *pdev)
break;
case PCI_DEVICE_ID_INTEL_ITC_LPC:
- sch->core_base = 0;
sch->resume_base = 5;
sch->chip.ngpio = 14;
break;
case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
- sch->core_base = 0;
sch->resume_base = 21;
sch->chip.ngpio = 30;
break;
case PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB:
- sch->core_base = 0;
sch->resume_base = 2;
sch->chip.ngpio = 8;
break;
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index ee3039f091f4..6eca531b7d96 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -122,15 +122,13 @@ static int spics_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_spics *spics;
- struct resource *res;
int ret;
spics = devm_kzalloc(&pdev->dev, sizeof(*spics), GFP_KERNEL);
if (!spics)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spics->base = devm_ioremap_resource(&pdev->dev, res);
+ spics->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spics->base))
return PTR_ERR(spics->base);
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 55072d2b367f..f5c8b3a351d5 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -219,7 +219,6 @@ static int sprd_gpio_probe(struct platform_device *pdev)
{
struct gpio_irq_chip *irq;
struct sprd_gpio *sprd_gpio;
- struct resource *res;
int ret;
sprd_gpio = devm_kzalloc(&pdev->dev, sizeof(*sprd_gpio), GFP_KERNEL);
@@ -232,8 +231,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
return sprd_gpio->irq;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sprd_gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ sprd_gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sprd_gpio->base))
return PTR_ERR(sprd_gpio->base);
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 2283c869ad5d..a51c310708b8 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -360,7 +360,6 @@ static int gsta_probe(struct platform_device *dev)
struct pci_dev *pdev;
struct sta2x11_gpio_pdata *gpio_pdata;
struct gsta_gpio *chip;
- struct resource *res;
pdev = *(struct pci_dev **)dev_get_platdata(&dev->dev);
gpio_pdata = dev_get_platdata(&pdev->dev);
@@ -369,13 +368,11 @@ static int gsta_probe(struct platform_device *dev)
dev_err(&dev->dev, "no gpio config\n");
pr_debug("gpio config: %p\n", gpio_pdata);
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-
chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->dev = &dev->dev;
- chip->reg_base = devm_ioremap_resource(&dev->dev, res);
+ chip->reg_base = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(chip->reg_base))
return PTR_ERR(chip->reg_base);
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 19972084c45b..8a319d56c5de 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -210,7 +210,6 @@ static int xway_stp_hw_init(struct xway_stp *chip)
static int xway_stp_probe(struct platform_device *pdev)
{
- struct resource *res;
u32 shadow, groups, dsl, phy;
struct xway_stp *chip;
struct clk *clk;
@@ -220,8 +219,7 @@ static int xway_stp_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->virt = devm_ioremap_resource(&pdev->dev, res);
+ chip->virt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->virt))
return PTR_ERR(chip->virt);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index d5e5d19f4c0a..6bbac6c83f29 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -120,7 +120,6 @@ static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
static int tb10x_gpio_probe(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio;
- struct resource *mem;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret = -EBUSY;
@@ -136,8 +135,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (tb10x_gpio == NULL)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tb10x_gpio->base = devm_ioremap_resource(dev, mem);
+ tb10x_gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 1ececf2c3282..6d9b6906b9d0 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -569,7 +569,6 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
static int tegra_gpio_probe(struct platform_device *pdev)
{
struct tegra_gpio_info *tgi;
- struct resource *res;
struct tegra_gpio_bank *bank;
unsigned int gpio, i, j;
int ret;
@@ -645,8 +644,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank->tgi = tgi;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tgi->regs = devm_ioremap_resource(&pdev->dev, res);
+ tgi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tgi->regs))
return PTR_ERR(tgi->regs);
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 314e300d6ba3..1c70e831069c 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -229,7 +229,6 @@ static int timbgpio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
struct timbgpio *tgpio;
- struct resource *iomem;
struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
int irq = platform_get_irq(pdev, 0);
@@ -246,8 +245,7 @@ static int timbgpio_probe(struct platform_device *pdev)
spin_lock_init(&tgpio->lock);
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tgpio->membase = devm_ioremap_resource(dev, iomem);
+ tgpio->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tgpio->membase))
return PTR_ERR(tgpio->membase);
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index c2a80b4cbf32..8c0d82d926dd 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -23,7 +23,6 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
{
struct device_node *node;
struct gpio_chip *chip;
- struct resource *res;
void __iomem *base_addr;
int retval;
u32 ngpios;
@@ -32,8 +31,7 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_addr = devm_ioremap_resource(&pdev->dev, res);
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_addr))
return PTR_ERR(base_addr);
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 0f662b297a95..93cdcc41e9fb 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -346,7 +346,6 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
struct uniphier_gpio_priv *priv;
struct gpio_chip *chip;
struct irq_chip *irq_chip;
- struct resource *regs;
unsigned int nregs;
u32 ngpios;
int ret;
@@ -370,8 +369,7 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, regs);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 541fa6ac399d..30aef41e3b7e 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -29,6 +29,7 @@ struct fsl_gpio_soc_data {
struct vf610_gpio_port {
struct gpio_chip gc;
+ struct irq_chip ic;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -60,8 +61,6 @@ struct vf610_gpio_port {
#define PORT_INT_EITHER_EDGE 0xb
#define PORT_INT_LOGIC_ONE 0xc
-static struct irq_chip vf610_gpio_irq_chip;
-
static const struct fsl_gpio_soc_data imx_data = {
.have_paddr = true,
};
@@ -86,28 +85,24 @@ static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
unsigned long mask = BIT(gpio);
- void __iomem *addr;
+ unsigned long offset = GPIO_PDIR;
if (port->sdata && port->sdata->have_paddr) {
mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- addr = mask ? port->gpio_base + GPIO_PDOR :
- port->gpio_base + GPIO_PDIR;
- return !!(vf610_gpio_readl(addr) & BIT(gpio));
- } else {
- return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR)
- & BIT(gpio));
+ if (mask)
+ offset = GPIO_PDOR;
}
+
+ return !!(vf610_gpio_readl(port->gpio_base + offset) & BIT(gpio));
}
static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
unsigned long mask = BIT(gpio);
+ unsigned long offset = val ? GPIO_PSOR : GPIO_PCOR;
- if (val)
- vf610_gpio_writel(mask, port->gpio_base + GPIO_PSOR);
- else
- vf610_gpio_writel(mask, port->gpio_base + GPIO_PCOR);
+ vf610_gpio_writel(mask, port->gpio_base + offset);
}
static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
@@ -237,37 +232,31 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
return 0;
}
-static struct irq_chip vf610_gpio_irq_chip = {
- .name = "gpio-vf610",
- .irq_ack = vf610_gpio_irq_ack,
- .irq_mask = vf610_gpio_irq_mask,
- .irq_unmask = vf610_gpio_irq_unmask,
- .irq_set_type = vf610_gpio_irq_set_type,
- .irq_set_wake = vf610_gpio_irq_set_wake,
-};
+static void vf610_gpio_disable_clk(void *data)
+{
+ clk_disable_unprepare(data);
+}
static int vf610_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct vf610_gpio_port *port;
- struct resource *iores;
struct gpio_chip *gc;
+ struct irq_chip *ic;
int i;
int ret;
- port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->sdata = of_device_get_match_data(dev);
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- port->base = devm_ioremap_resource(dev, iores);
+ port->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- port->gpio_base = devm_ioremap_resource(dev, iores);
+ port->gpio_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(port->gpio_base))
return PTR_ERR(port->gpio_base);
@@ -275,11 +264,15 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (port->irq < 0)
return port->irq;
- port->clk_port = devm_clk_get(&pdev->dev, "port");
+ port->clk_port = devm_clk_get(dev, "port");
if (!IS_ERR(port->clk_port)) {
ret = clk_prepare_enable(port->clk_port);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, vf610_gpio_disable_clk,
+ port->clk_port);
+ if (ret)
+ return ret;
} else if (port->clk_port == ERR_PTR(-EPROBE_DEFER)) {
/*
* Percolate deferrals, for anything else,
@@ -288,20 +281,19 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return PTR_ERR(port->clk_port);
}
- port->clk_gpio = devm_clk_get(&pdev->dev, "gpio");
+ port->clk_gpio = devm_clk_get(dev, "gpio");
if (!IS_ERR(port->clk_gpio)) {
ret = clk_prepare_enable(port->clk_gpio);
- if (ret) {
- clk_disable_unprepare(port->clk_port);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, vf610_gpio_disable_clk,
+ port->clk_gpio);
+ if (ret)
return ret;
- }
} else if (port->clk_gpio == ERR_PTR(-EPROBE_DEFER)) {
- clk_disable_unprepare(port->clk_port);
return PTR_ERR(port->clk_gpio);
}
- platform_set_drvdata(pdev, port);
-
gc = &port->gc;
gc->of_node = np;
gc->parent = dev;
@@ -316,7 +308,15 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->direction_output = vf610_gpio_direction_output;
gc->set = vf610_gpio_set;
- ret = gpiochip_add_data(gc, port);
+ ic = &port->ic;
+ ic->name = "gpio-vf610";
+ ic->irq_ack = vf610_gpio_irq_ack;
+ ic->irq_mask = vf610_gpio_irq_mask;
+ ic->irq_unmask = vf610_gpio_irq_unmask;
+ ic->irq_set_type = vf610_gpio_irq_set_type;
+ ic->irq_set_wake = vf610_gpio_irq_set_wake;
+
+ ret = devm_gpiochip_add_data(dev, gc, port);
if (ret < 0)
return ret;
@@ -327,39 +327,23 @@ static int vf610_gpio_probe(struct platform_device *pdev)
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);
- ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
- handle_edge_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add irqchip\n");
- gpiochip_remove(gc);
return ret;
}
- gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq,
+ gpiochip_set_chained_irqchip(gc, ic, port->irq,
vf610_gpio_irq_handler);
return 0;
}
-static int vf610_gpio_remove(struct platform_device *pdev)
-{
- struct vf610_gpio_port *port = platform_get_drvdata(pdev);
-
- gpiochip_remove(&port->gc);
- if (!IS_ERR(port->clk_port))
- clk_disable_unprepare(port->clk_port);
- if (!IS_ERR(port->clk_gpio))
- clk_disable_unprepare(port->clk_gpio);
-
- return 0;
-}
-
static struct platform_driver vf610_gpio_driver = {
.driver = {
.name = "gpio-vf610",
.of_match_table = vf610_gpio_dt_ids,
},
.probe = vf610_gpio_probe,
- .remove = vf610_gpio_remove,
};
builtin_platform_driver(vf610_gpio_driver);
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 2eb76f35aa7e..641a05181017 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -229,7 +229,6 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv;
int ret;
- struct resource *res;
void __iomem *regs;
struct irq_domain *parent_domain = NULL;
u32 val32;
@@ -238,8 +237,7 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 0a3607fd21af..54d3359444f3 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -290,22 +290,17 @@ MODULE_DEVICE_TABLE(of, xlp_gpio_of_ids);
static int xlp_gpio_probe(struct platform_device *pdev)
{
struct gpio_chip *gc;
- struct resource *iores;
struct xlp_gpio_priv *priv;
void __iomem *gpio_base;
int irq_base, irq, err;
int ngpio;
u32 soc_type;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENODEV;
-
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- gpio_base = devm_ioremap_resource(&pdev->dev, iores);
+ gpio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio_base))
return PTR_ERR(gpio_base);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 5eacad9b2692..fb927559aefa 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -218,15 +218,13 @@ static int zx_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zx_gpio *chip;
- struct resource *res;
int irq, id, ret;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 00ff7b1fa8a1..9392edaeec3f 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -834,7 +834,6 @@ static int zynq_gpio_probe(struct platform_device *pdev)
int ret, bank_num;
struct zynq_gpio *gpio;
struct gpio_chip *chip;
- struct resource *res;
const struct of_device_id *match;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
@@ -849,8 +848,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
gpio->p_data = match->data;
platform_set_drvdata(pdev, gpio);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ gpio->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base_addr))
return PTR_ERR(gpio->base_addr);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 30d0baf7ddae..c9fc9e232aaf 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -24,13 +24,13 @@
*
* @node: list-entry of the events list of the struct acpi_gpio_chip
* @handle: handle of ACPI method to execute when the IRQ triggers
- * @handler: irq_handler to pass to request_irq when requesting the IRQ
- * @pin: GPIO pin number on the gpio_chip
- * @irq: Linux IRQ number for the event, for request_ / free_irq
- * @irqflags: flags to pass to request_irq when requesting the IRQ
+ * @handler: handler function to pass to request_irq() when requesting the IRQ
+ * @pin: GPIO pin number on the struct gpio_chip
+ * @irq: Linux IRQ number for the event, for request_irq() / free_irq()
+ * @irqflags: flags to pass to request_irq() when requesting the IRQ
* @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
- * @irq_requested:True if request_irq has been done
- * @desc: gpio_desc for the GPIO pin for this event
+ * @irq_requested:True if request_irq() has been done
+ * @desc: struct gpio_desc for the GPIO pin for this event
*/
struct acpi_gpio_event {
struct list_head node;
@@ -65,10 +65,10 @@ struct acpi_gpio_chip {
};
/*
- * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
+ * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
* (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
- * late_initcall_sync handler, so that other builtin drivers can register their
- * OpRegions before the event handlers can run. This list contains gpiochips
+ * late_initcall_sync() handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run. This list contains GPIO chips
* for which the acpi_gpiochip_request_irqs() call has been deferred.
*/
static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
@@ -90,7 +90,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
*
* Return: GPIO descriptor to use with Linux generic GPIO API, or ERR_PTR
* error value. Specifically returns %-EPROBE_DEFER if the referenced GPIO
- * controller does not have gpiochip registered at the moment. This is to
+ * controller does not have GPIO chip registered at the moment. This is to
* support probe deferral.
*/
static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
@@ -287,9 +287,9 @@ fail_free_desc:
*
* ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
* handled by ACPI event methods which need to be called from the GPIO
- * chip's interrupt handler. acpi_gpiochip_request_interrupts finds out which
- * gpio pins have acpi event methods and assigns interrupt handlers that calls
- * the acpi event methods for those pins.
+ * chip's interrupt handler. acpi_gpiochip_request_interrupts() finds out which
+ * GPIO pins have ACPI event methods and assigns interrupt handlers that calls
+ * the ACPI event methods for those pins.
*/
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
{
@@ -444,8 +444,6 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
static enum gpiod_flags
acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
{
- bool pull_up = agpio->pin_config == ACPI_PIN_CONFIG_PULLUP;
-
switch (agpio->io_restriction) {
case ACPI_IO_RESTRICT_INPUT:
return GPIOD_IN;
@@ -454,16 +452,26 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
* ACPI GPIO resources don't contain an initial value for the
* GPIO. Therefore we deduce that value from the pull field
* instead. If the pin is pulled up we assume default to be
- * high, otherwise low.
+ * high, if it is pulled down we assume default to be low,
+ * otherwise we leave pin untouched.
*/
- return pull_up ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ switch (agpio->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ return GPIOD_OUT_HIGH;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ return GPIOD_OUT_LOW;
+ default:
+ break;
+ }
default:
- /*
- * Assume that the BIOS has configured the direction and pull
- * accordingly.
- */
- return GPIOD_ASIS;
+ break;
}
+
+ /*
+ * Assume that the BIOS has configured the direction and pull
+ * accordingly.
+ */
+ return GPIOD_ASIS;
}
static int
@@ -517,6 +525,26 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *inf
return ret;
}
+int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info)
+{
+ switch (info->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ *lookupflags |= GPIO_PULL_UP;
+ break;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ *lookupflags |= GPIO_PULL_DOWN;
+ break;
+ default:
+ break;
+ }
+
+ if (info->polarity == GPIO_ACTIVE_LOW)
+ *lookupflags |= GPIO_ACTIVE_LOW;
+
+ return 0;
+}
+
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
@@ -550,6 +578,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
+ lookup->info.pin_config = agpio->pin_config;
lookup->info.gpioint = gpioint;
/*
@@ -653,7 +682,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* that case @index is used to select the GPIO entry in the property value
* (in case of multiple).
*
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
+ * If the GPIO cannot be translated or there is an error, an ERR_PTR is
* returned.
*
* Note: if the GPIO resource has multiple entries in the pin list, this
@@ -696,7 +725,7 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags *dflags,
- enum gpio_lookup_flags *lookupflags)
+ unsigned long *lookupflags)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_gpio_info info;
@@ -737,10 +766,8 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
return ERR_PTR(-ENOENT);
}
- if (info.polarity == GPIO_ACTIVE_LOW)
- *lookupflags |= GPIO_ACTIVE_LOW;
-
acpi_gpio_update_gpiod_flags(dflags, &info);
+ acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
return desc;
}
@@ -751,10 +778,13 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
* @index: index of GpioIo/GpioInt resource (starting from %0)
* @info: info pointer to fill in (optional)
*
- * If @fwnode is an ACPI device object, call %acpi_get_gpiod_by_index() for it.
- * Otherwise (ie. it is a data-only non-device object), use the property-based
+ * If @fwnode is an ACPI device object, call acpi_get_gpiod_by_index() for it.
+ * Otherwise (i.e. it is a data-only non-device object), use the property-based
* GPIO lookup to get to the GPIO resource with the relevant information and use
* that to obtain the GPIO descriptor to return.
+ *
+ * If the GPIO cannot be translated or there is an error an ERR_PTR is
+ * returned.
*/
struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
@@ -816,6 +846,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
return PTR_ERR(desc);
if (info.gpioint && idx++ == index) {
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
char label[32];
int irq;
@@ -827,7 +858,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
return irq;
snprintf(label, sizeof(label), "GpioInt() %d", index);
- ret = gpiod_configure_flags(desc, label, 0, info.flags);
+ ret = gpiod_configure_flags(desc, label, lflags, info.flags);
if (ret < 0)
return ret;
@@ -992,16 +1023,19 @@ static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
}
}
-static struct gpio_desc *acpi_gpiochip_parse_own_gpio(
- struct acpi_gpio_chip *achip, struct fwnode_handle *fwnode,
- const char **name, unsigned int *lflags, unsigned int *dflags)
+static struct gpio_desc *
+acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
+ struct fwnode_handle *fwnode,
+ const char **name,
+ unsigned long *lflags,
+ enum gpiod_flags *dflags)
{
struct gpio_chip *chip = achip->chip;
struct gpio_desc *desc;
u32 gpios[2];
int ret;
- *lflags = 0;
+ *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
*dflags = 0;
*name = NULL;
@@ -1037,7 +1071,8 @@ static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
struct fwnode_handle *fwnode;
device_for_each_child_node(chip->parent, fwnode) {
- unsigned int lflags, dflags;
+ unsigned long lflags;
+ enum gpiod_flags dflags;
struct gpio_desc *desc;
const char *name;
int ret;
@@ -1158,11 +1193,13 @@ static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
}
/**
- * acpi_gpio_count - return the number of GPIOs associated with a
- * device / function or -ENOENT if no GPIO has been
- * assigned to the requested function.
- * @dev: GPIO consumer, can be NULL for system-global GPIOs
+ * acpi_gpio_count - count the GPIOs associated with a device / function
+ * @dev: GPIO consumer, can be %NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
+ *
+ * Return:
+ * The number of GPIOs associated with a device / function or %-ENOENT,
+ * if no GPIO has been assigned to the requested function.
*/
int acpi_gpio_count(struct device *dev, const char *con_id)
{
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index dd517098ab95..53781b253986 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/export.h>
#include "gpiolib.h"
@@ -56,3 +57,4 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip,
kfree(names);
}
+EXPORT_SYMBOL_GPL(devprop_gpiochip_set_names);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 6a3ec575a404..aec7bd86ae7e 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -86,9 +86,9 @@ static void of_gpio_flags_quirks(struct device_node *np,
if (IS_ENABLED(CONFIG_REGULATOR) &&
(of_device_is_compatible(np, "regulator-fixed") ||
of_device_is_compatible(np, "reg-fixed-voltage") ||
- (of_device_is_compatible(np, "regulator-gpio") &&
- !(strcmp(propname, "enable-gpio") &&
- strcmp(propname, "enable-gpios"))))) {
+ (!(strcmp(propname, "enable-gpio") &&
+ strcmp(propname, "enable-gpios")) &&
+ of_device_is_compatible(np, "regulator-gpio")))) {
/*
* The regulator GPIO handles are specified such that the
* presence or absence of "enable-active-high" solely controls
@@ -119,9 +119,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* property named "cs-gpios" we need to inspect the child node
* to determine if the flags should have inverted semantics.
*/
- if (IS_ENABLED(CONFIG_SPI_MASTER) &&
- of_property_read_bool(np, "cs-gpios") &&
- !strcmp(propname, "cs-gpios")) {
+ if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
+ of_property_read_bool(np, "cs-gpios")) {
struct device_node *child;
u32 cs;
int ret;
@@ -288,8 +287,7 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
}
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx,
- enum gpio_lookup_flags *flags)
+ unsigned int idx, unsigned long *flags)
{
char prop_name[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
@@ -362,8 +360,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
* @chip: GPIO chip whose hog is parsed
* @idx: Index of the GPIO to parse
* @name: GPIO line name
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_parse_own_gpio()
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
+ * of_find_gpio() or of_parse_own_gpio()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Returns GPIO descriptor to use with Linux GPIO API, or one of the errno
@@ -372,7 +370,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
struct gpio_chip *chip,
unsigned int idx, const char **name,
- enum gpio_lookup_flags *lflags,
+ unsigned long *lflags,
enum gpiod_flags *dflags)
{
struct device_node *chip_np;
@@ -388,7 +386,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
return ERR_PTR(-EINVAL);
xlate_flags = 0;
- *lflags = 0;
+ *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
*dflags = 0;
ret = of_property_read_u32(chip_np, "#gpio-cells", &tmp);
@@ -445,7 +443,7 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
struct gpio_desc *desc = NULL;
struct device_node *np;
const char *name;
- enum gpio_lookup_flags lflags;
+ unsigned long lflags;
enum gpiod_flags dflags;
unsigned int i;
int ret;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 0495bf1d480a..e013d417a936 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1379,7 +1379,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
status = gpiochip_add_irqchip(chip, lock_key, request_key);
if (status)
- goto err_remove_chip;
+ goto err_free_gpiochip_mask;
status = of_gpiochip_add(chip);
if (status)
@@ -1387,7 +1387,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
status = gpiochip_init_valid_mask(chip);
if (status)
- goto err_remove_chip;
+ goto err_remove_of_chip;
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
@@ -1415,14 +1415,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (gpiolib_initialized) {
status = gpiochip_setup_dev(gdev);
if (status)
- goto err_remove_chip;
+ goto err_remove_acpi_chip;
}
return 0;
-err_remove_chip:
+err_remove_acpi_chip:
acpi_gpiochip_remove(chip);
+err_remove_of_chip:
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
+err_remove_chip:
+ gpiochip_irqchip_remove(chip);
+err_free_gpiochip_mask:
gpiochip_free_valid_mask(chip);
err_remove_irqchip_mask:
gpiochip_irqchip_free_valid_mask(chip);
@@ -2515,6 +2519,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label,
enum gpiod_flags flags)
{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum);
int err;
@@ -2527,7 +2532,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
if (err < 0)
return ERR_PTR(err);
- err = gpiod_configure_flags(desc, label, 0, flags);
+ err = gpiod_configure_flags(desc, label, lflags, flags);
if (err) {
chip_err(chip, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
@@ -2565,8 +2570,20 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
enum pin_config_param mode)
{
- unsigned long config = { PIN_CONF_PACKED(mode, 0) };
+ unsigned long config;
+ unsigned arg;
+
+ switch (mode) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = 1;
+ break;
+ default:
+ arg = 0;
+ }
+
+ config = PIN_CONF_PACKED(mode, arg);
return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
}
@@ -3911,8 +3928,7 @@ found:
}
static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
- unsigned int idx,
- enum gpio_lookup_flags *flags)
+ unsigned int idx, unsigned long *flags)
{
struct gpio_desc *desc = ERR_PTR(-ENOENT);
struct gpiod_lookup_table *table;
@@ -4068,8 +4084,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
* gpiod_configure_flags - helper function to configure a given GPIO
* @desc: gpio whose value will be assigned
* @con_id: function within the GPIO consumer
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_get_gpio_hog()
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
+ * of_find_gpio() or of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -4151,9 +4167,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
unsigned int idx,
enum gpiod_flags flags)
{
+ unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = NULL;
int status;
- enum gpio_lookup_flags lookupflags = 0;
/* Maybe we have a device name, maybe not */
const char *devname = dev ? dev_name(dev) : "?";
@@ -4238,8 +4254,8 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
enum gpiod_flags dflags,
const char *label)
{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc;
- unsigned long lflags = 0;
enum of_gpio_flags flags;
bool active_low = false;
bool single_ended = false;
@@ -4317,8 +4333,8 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
enum gpiod_flags dflags,
const char *label)
{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = ERR_PTR(-ENODEV);
- unsigned long lflags = 0;
int ret;
if (!fwnode)
@@ -4338,9 +4354,7 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
return desc;
acpi_gpio_update_gpiod_flags(&dflags, &info);
-
- if (info.polarity == GPIO_ACTIVE_LOW)
- lflags |= GPIO_ACTIVE_LOW;
+ acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
}
/* Currently only ACPI takes this path */
@@ -4391,8 +4405,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
* gpiod_hog - Hog the specified GPIO desc given the provided flags
* @desc: gpio whose value will be assigned
* @name: gpio line name
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_get_gpio_hog()
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
+ * of_find_gpio() or of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*/
int gpiod_hog(struct gpio_desc *desc, const char *name,
@@ -4445,8 +4459,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/**
* gpiochip_free_hogs - Scan gpio-controller chip and release GPIO hog
* @chip: gpio chip to act on
- *
- * This is only used by of_gpiochip_remove to free hogged gpios
*/
static void gpiochip_free_hogs(struct gpio_chip *chip)
{
@@ -4616,7 +4628,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_optional);
*/
void gpiod_put(struct gpio_desc *desc)
{
- gpiod_free(desc);
+ if (desc)
+ gpiod_free(desc);
}
EXPORT_SYMBOL_GPL(gpiod_put);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 078ab17b96bf..7a65dad43932 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -17,7 +17,6 @@
#include <linux/cdev.h>
enum of_gpio_flags;
-enum gpio_lookup_flags;
struct acpi_device;
/**
@@ -75,6 +74,7 @@ struct gpio_device {
* @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
@@ -83,6 +83,7 @@ struct acpi_gpio_info {
struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
+ int pin_config;
int polarity;
int triggering;
unsigned int quirks;
@@ -95,7 +96,7 @@ static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
struct gpio_desc *of_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
- enum gpio_lookup_flags *flags);
+ unsigned long *lookupflags);
struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags);
int of_gpiochip_add(struct gpio_chip *gc);
@@ -104,7 +105,7 @@ void of_gpiochip_remove(struct gpio_chip *gc);
static inline struct gpio_desc *of_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
- enum gpio_lookup_flags *flags)
+ unsigned long *lookupflags)
{
return ERR_PTR(-ENOENT);
}
@@ -126,12 +127,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
struct acpi_gpio_info *info);
+int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info);
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags *dflags,
- enum gpio_lookup_flags *lookupflags);
+ unsigned long *lookupflags);
struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
struct acpi_gpio_info *info);
@@ -154,11 +157,17 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *inf
{
return 0;
}
+static inline int
+acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info)
+{
+ return 0;
+}
static inline struct gpio_desc *
acpi_find_gpio(struct device *dev, const char *con_id,
unsigned int idx, enum gpiod_flags *dflags,
- enum gpio_lookup_flags *lookupflags)
+ unsigned long *lookupflags)
{
return ERR_PTR(-ENOENT);
}
@@ -243,9 +252,6 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
return desc - &desc->gdev->descs[0];
}
-void devprop_gpiochip_set_names(struct gpio_chip *chip,
- const struct fwnode_handle *fwnode);
-
/* With descriptor prefix */
#define gpiod_emerg(desc, fmt, ...) \
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index bd943a71756c..e360a4a131e1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -173,6 +173,12 @@ config DRM_KMS_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
+config DRM_GEM_SHMEM_HELPER
+ bool
+ depends on DRM
+ help
+ Choose this if you need the GEM shmem helper functions
+
config DRM_VM
bool
depends on DRM && MMU
@@ -194,7 +200,6 @@ config DRM_RADEON
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
help
Choose this option if you have an ATI Radeon graphics card. There
@@ -215,7 +220,6 @@ config DRM_AMDGPU
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
select CHASH
help
@@ -225,8 +229,6 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
-source "drivers/gpu/drm/amd/lib/Kconfig"
-
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig"
@@ -251,6 +253,9 @@ config DRM_VKMS
If M is selected the module will be called vkms.
+config DRM_ATI_PCIGART
+ bool
+
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/rockchip/Kconfig"
@@ -329,12 +334,21 @@ source "drivers/gpu/drm/tve200/Kconfig"
source "drivers/gpu/drm/xen/Kconfig"
+source "drivers/gpu/drm/vboxvideo/Kconfig"
+
+source "drivers/gpu/drm/lima/Kconfig"
+
+source "drivers/gpu/drm/panfrost/Kconfig"
+
+source "drivers/gpu/drm/aspeed/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
bool "Enable legacy drivers (DANGEROUS)"
depends on DRM && MMU
select DRM_VM
+ select DRM_ATI_PCIGART if PCI
help
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
APIs to user-space, which can be used to circumvent access
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1ac55c65eac0..72f5036d9bfa 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -3,11 +3,9 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-drm-y := drm_auth.o drm_bufs.o drm_cache.o \
- drm_context.o drm_dma.o \
+drm-y := drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_drv.o \
- drm_scatter.o drm_pci.o \
+ drm_memory.o drm_drv.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_encoder_slave.o \
@@ -21,11 +19,13 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
drm_atomic_uapi.o
+drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
-drm-$(CONFIG_PCI) += ati_pcigart.o
+drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
+drm-$(CONFIG_DRM_ATI_PCIGART) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
@@ -37,7 +37,8 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o \
- drm_atomic_state_helper.o drm_damage_helper.o
+ drm_atomic_state_helper.o drm_damage_helper.o \
+ drm_format_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
@@ -56,7 +57,6 @@ obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
-obj-y += amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
@@ -109,3 +109,7 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
+obj-$(CONFIG_DRM_LIMA) += lima/
+obj-$(CONFIG_DRM_PANFROST) += panfrost/
+obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 466da5954a68..fdd0ca4b0f0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -23,7 +23,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-FULL_AMD_PATH=$(src)/..
+FULL_AMD_PATH=$(srctree)/$(src)/..
DISPLAY_FOLDER_NAME=display
FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
@@ -53,7 +53,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
+ amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_vm_sdma.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8d0d7f3dd5fb..14398f55f602 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -83,6 +83,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_doorbell.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_smu.h"
#define MAX_GPU_INSTANCE 16
@@ -156,6 +157,8 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern uint amdgpu_dc_feature_mask;
extern struct amdgpu_mgpu_info mgpu_info;
+extern int amdgpu_ras_enable;
+extern uint amdgpu_ras_mask;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -433,6 +436,12 @@ struct amdgpu_cs_chunk {
void *kdata;
};
+struct amdgpu_cs_post_dep {
+ struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
+ u64 point;
+};
+
struct amdgpu_cs_parser {
struct amdgpu_device *adev;
struct drm_file *filp;
@@ -462,8 +471,8 @@ struct amdgpu_cs_parser {
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
- unsigned num_post_dep_syncobjs;
- struct drm_syncobj **post_dep_syncobjs;
+ unsigned num_post_deps;
+ struct amdgpu_cs_post_dep *post_deps;
};
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
@@ -702,7 +711,6 @@ enum amd_hw_ip_block_type {
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
- uint32_t pp_feature;
};
#define AMDGPU_RESET_MAGIC_NUM 64
@@ -825,6 +833,7 @@ struct amdgpu_device {
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
+ struct amdgpu_irq_src vupdate_irq;
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;
@@ -842,6 +851,9 @@ struct amdgpu_device {
struct amd_powerplay powerplay;
bool pp_force_state_enabled;
+ /* smu */
+ struct smu_context smu;
+
/* dpm */
struct amdgpu_pm pm;
u32 cg_flags;
@@ -922,6 +934,8 @@ struct amdgpu_device {
int asic_reset_res;
struct work_struct xgmi_reset_work;
+
+ bool in_baco_reset;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4376b17ca594..56f8ca2a3bb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -464,8 +464,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
}
}
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
- if ((adev->flags & AMD_IS_PX) &&
- amdgpu_atpx_dgpu_req_power_for_displays()) {
+ if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fe1d7368c1e6..aeead072fa79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -335,6 +335,43 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
amdgpu_bo_unref(&(bo));
}
+uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+ enum kgd_engine_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ return adev->gfx.pfp_fw_version;
+
+ case KGD_ENGINE_ME:
+ return adev->gfx.me_fw_version;
+
+ case KGD_ENGINE_CE:
+ return adev->gfx.ce_fw_version;
+
+ case KGD_ENGINE_MEC1:
+ return adev->gfx.mec_fw_version;
+
+ case KGD_ENGINE_MEC2:
+ return adev->gfx.mec2_fw_version;
+
+ case KGD_ENGINE_RLC:
+ return adev->gfx.rlc_fw_version;
+
+ case KGD_ENGINE_SDMA1:
+ return adev->sdma.instance[0].fw_version;
+
+ case KGD_ENGINE_SDMA2:
+ return adev->sdma.instance[1].fw_version;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info)
{
@@ -640,4 +677,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
}
+
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 0b31a1859023..4e37fa7e85b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -81,6 +81,18 @@ struct amdgpu_kfd_dev {
uint64_t vram_used;
};
+enum kgd_engine_type {
+ KGD_ENGINE_PFP = 1,
+ KGD_ENGINE_ME,
+ KGD_ENGINE_CE,
+ KGD_ENGINE_MEC1,
+ KGD_ENGINE_MEC2,
+ KGD_ENGINE_RLC,
+ KGD_ENGINE_SDMA1,
+ KGD_ENGINE_SDMA2,
+ KGD_ENGINE_MAX
+};
+
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -142,6 +154,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9);
void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+ enum kgd_engine_type type);
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info);
uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
@@ -230,5 +244,6 @@ int kgd2kfd_quiesce_mm(struct mm_struct *mm);
int kgd2kfd_resume_mm(struct mm_struct *mm);
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
struct dma_fence *fence);
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ff7fac7df34b..fa09e11a600c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -22,14 +22,12 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
#include "cik_sdma.h"
-#include "amdgpu_ucode.h"
#include "gfx_v7_0.h"
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_enum.h"
@@ -139,7 +137,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
@@ -191,7 +188,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
@@ -792,63 +788,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
unlock_srbm(kgd);
}
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
-
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 56ea929f524b..fec3a6aa1de6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -23,12 +23,10 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
#include "gfx_v8_0.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
@@ -95,7 +93,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
uint8_t vmid);
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
@@ -148,7 +145,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
- .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
@@ -751,63 +747,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
unlock_srbm(kgd);
}
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
-
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 5c51d4910650..ef3d93b995b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -25,12 +25,10 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
#include "soc15_hw_ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
@@ -111,7 +109,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
@@ -158,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
- .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
@@ -874,56 +870,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
*/
}
-/* FIXME: Does this need to be ASIC-specific code? */
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
-
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1921dec3df7a..a6e5184d436c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -410,15 +410,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
if (p_bo_va_entry)
*p_bo_va_entry = bo_va_entry;
- /* Allocate new page tables if needed and validate
- * them.
- */
- ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
- if (ret) {
- pr_err("Failed to allocate pts, err=%d\n", ret);
- goto err_alloc_pts;
- }
-
+ /* Allocate validate page tables if needed */
ret = vm_validate_pt_pd_bos(vm);
if (ret) {
pr_err("validate_pt_pd_bos() failed\n");
@@ -741,13 +733,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
struct amdgpu_sync *sync)
{
int ret;
- struct amdgpu_vm *vm;
- struct amdgpu_bo_va *bo_va;
- struct amdgpu_bo *bo;
-
- bo_va = entry->bo_va;
- vm = bo_va->base.vm;
- bo = bo_va->base.bo;
+ struct amdgpu_bo_va *bo_va = entry->bo_va;
/* Update the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false);
@@ -906,7 +892,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
}
- amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false);
+ ret = amdgpu_bo_sync_wait(vm->root.base.bo,
+ AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
amdgpu_bo_fence(vm->root.base.bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index b61e1dc61b4c..f96d75c6e099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -28,8 +28,6 @@
#include "atom.h"
#include "atombios.h"
-#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
-
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
{
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -238,10 +236,71 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
return 0;
}
+/*
+ * Return true if vbios enabled ecc by default, if umc info table is available
+ * or false if ecc is not enabled or umc info table is not available
+ */
+bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union umc_info *umc_info;
+ u8 frev, crev;
+ bool ecc_default_enabled = false;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ umc_info);
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size, &frev, &crev, &data_offset)) {
+ /* support umc_info 3.1+ */
+ if ((frev == 3 && crev >= 1) || (frev > 3)) {
+ umc_info = (union umc_info *)
+ (mode_info->atom_context->bios + data_offset);
+ ecc_default_enabled =
+ (le32_to_cpu(umc_info->v31.umc_config) &
+ UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ }
+ }
+
+ return ecc_default_enabled;
+}
+
union firmware_info {
struct atom_firmware_info_v3_1 v31;
};
+/*
+ * Return true if vbios supports sram ecc or false if not
+ */
+bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union firmware_info *firmware_info;
+ u8 frev, crev;
+ bool sram_ecc_supported = false;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+ index, &size, &frev, &crev, &data_offset)) {
+ /* support firmware_info 3.1 + */
+ if ((frev == 3 && crev >=1) || (frev > 3)) {
+ firmware_info = (union firmware_info *)
+ (mode_info->atom_context->bios + data_offset);
+ sram_ecc_supported =
+ (le32_to_cpu(firmware_info->v31.firmware_capability) &
+ ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
+ }
+ }
+
+ return sram_ecc_supported;
+}
+
union smu_info {
struct atom_smu_info_v3_1 v31;
};
@@ -346,11 +405,11 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 4:
- adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se;
- adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh;
- adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se;
- adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se;
- adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs;
+ adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
+ adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
+ adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
+ adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
+ adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 20f158fd3b76..5ec6f92f353c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -24,6 +24,8 @@
#ifndef __AMDGPU_ATOMFIRMWARE_H__
#define __AMDGPU_ATOMFIRMWARE_H__
+#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
+
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
@@ -31,5 +33,7 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 52a5e4fdc95b..2f6239b6be6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
break;
default:
@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
- for (i = 0; i < parser->num_post_dep_syncobjs; i++)
- drm_syncobj_put(parser->post_dep_syncobjs[i]);
- kfree(parser->post_dep_syncobjs);
+ for (i = 0; i < parser->num_post_deps; i++) {
+ drm_syncobj_put(parser->post_deps[i].syncobj);
+ kfree(parser->post_deps[i].chain);
+ }
+ kfree(parser->post_deps);
dma_fence_put(parser->fence);
@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
}
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
- uint32_t handle)
+ uint32_t handle, u64 point,
+ u64 flags)
{
- int r;
struct dma_fence *fence;
- r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
- if (r)
+ int r;
+
+ r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+ if (r) {
+ DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ handle, point, r);
return r;
+ }
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
dma_fence_put(fence);
@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
+ struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps;
int i, r;
- struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
+ 0, 0);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+ unsigned num_deps;
+ int i, r;
+
+ syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
for (i = 0; i < num_deps; ++i) {
- r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
+ r = amdgpu_syncobj_lookup_and_add_to_sync(p,
+ syncobj_deps[i].handle,
+ syncobj_deps[i].point,
+ syncobj_deps[i].flags);
if (r)
return r;
}
+
return 0;
}
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
+ struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps;
int i;
- struct drm_amdgpu_cs_chunk_sem *deps;
+
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
- p->post_dep_syncobjs = kmalloc_array(num_deps,
- sizeof(struct drm_syncobj *),
- GFP_KERNEL);
- p->num_post_dep_syncobjs = 0;
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
- if (!p->post_dep_syncobjs)
+ if (!p->post_deps)
return -ENOMEM;
+
for (i = 0; i < num_deps; ++i) {
- p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
- if (!p->post_dep_syncobjs[i])
+ p->post_deps[i].syncobj =
+ drm_syncobj_find(p->filp, deps[i].handle);
+ if (!p->post_deps[i].syncobj)
return -EINVAL;
- p->num_post_dep_syncobjs++;
+ p->post_deps[i].chain = NULL;
+ p->post_deps[i].point = 0;
+ p->num_post_deps++;
}
+
+ return 0;
+}
+
+
+static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk
+ *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+ unsigned num_deps;
+ int i;
+
+ syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+ dep->chain = NULL;
+ if (syncobj_deps[i].point) {
+ dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
+ if (!dep->chain)
+ return -ENOMEM;
+ }
+
+ dep->syncobj = drm_syncobj_find(p->filp,
+ syncobj_deps[i].handle);
+ if (!dep->syncobj) {
+ kfree(dep->chain);
+ return -EINVAL;
+ }
+ dep->point = syncobj_deps[i].point;
+ p->num_post_deps++;
+ }
+
return 0;
}
@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
chunk = &p->chunks[i];
- if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
- chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+ switch (chunk->chunk_id) {
+ case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
return r;
- } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
return r;
- } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
+ if (r)
+ return r;
+ break;
}
}
@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
int i;
- for (i = 0; i < p->num_post_dep_syncobjs; ++i)
- drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
+ for (i = 0; i < p->num_post_deps; ++i) {
+ if (p->post_deps[i].chain && p->post_deps[i].point) {
+ drm_syncobj_add_point(p->post_deps[i].syncobj,
+ p->post_deps[i].chain,
+ p->fence, p->post_deps[i].point);
+ p->post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(p->post_deps[i].syncobj,
+ p->fence);
+ }
+ }
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 7e22be7ca68a..54dd02a898b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -92,15 +92,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return -ENOMEM;
}
- r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
- size);
- if (r) {
- DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 7b526593eb77..a28a3d722ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -26,6 +26,7 @@
#include <drm/drm_auth.h>
#include "amdgpu.h"
#include "amdgpu_sched.h"
+#include "amdgpu_ras.h"
#define to_amdgpu_ctx_entity(e) \
container_of((e), struct amdgpu_ctx_entity, entity)
@@ -344,6 +345,7 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
+ uint32_t ras_counter;
if (!fpriv)
return -EINVAL;
@@ -368,6 +370,21 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
+ /*query ue count*/
+ ras_counter = amdgpu_ras_query_error_count(adev, false);
+ /*ras counter is monotonic increasing*/
+ if (ras_counter != ctx->ras_counter_ue) {
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
+ ctx->ras_counter_ue = ras_counter;
+ }
+
+ /*query ce count*/
+ ras_counter = amdgpu_ras_query_error_count(adev, true);
+ if (ras_counter != ctx->ras_counter_ce) {
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
+ ctx->ras_counter_ce = ras_counter;
+ }
+
mutex_unlock(&mgr->lock);
return 0;
}
@@ -541,32 +558,26 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
idr_init(&mgr->ctx_handles);
}
-void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
+long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
unsigned num_entities = amdgput_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i;
- long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
idp = &mgr->ctx_handles;
mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
-
- if (!ctx->adev) {
- mutex_unlock(&mgr->lock);
- return;
- }
-
for (i = 0; i < num_entities; i++) {
struct drm_sched_entity *entity;
entity = &ctx->entities[0][i].entity;
- max_wait = drm_sched_entity_flush(entity, max_wait);
+ timeout = drm_sched_entity_flush(entity, timeout);
}
}
mutex_unlock(&mgr->lock);
+ return timeout;
}
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
@@ -579,10 +590,6 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id) {
-
- if (!ctx->adev)
- return;
-
if (kref_read(&ctx->refcount) != 1) {
DRM_ERROR("ctx %p is still alive\n", ctx);
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index b3b012c0a7da..5f1b54c9bcdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -49,6 +49,8 @@ struct amdgpu_ctx {
enum drm_sched_priority override_priority;
struct mutex lock;
atomic_t guilty;
+ uint32_t ras_counter_ce;
+ uint32_t ras_counter_ue;
};
struct amdgpu_ctx_mgr {
@@ -82,7 +84,7 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
+long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 4ae3ff9a1d4c..8930d66f2204 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -568,10 +568,9 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
idx = *pos >> 2;
valuesize = sizeof(values);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- else
- return -EINVAL;
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+ if (r)
+ return r;
if (size > valuesize)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4f8fb4ecde34..cc8ad3831982 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -60,6 +60,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_ras.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -1506,7 +1507,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return -EAGAIN;
}
- adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
+ adev->pm.pp_feature = amdgpu_pp_feature_mask;
+ if (amdgpu_sriov_vf(adev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@ -1638,6 +1641,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
int i, r;
+ r = amdgpu_ras_init(adev);
+ if (r)
+ return r;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1681,6 +1688,13 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
}
+ r = amdgpu_ib_pool_init(adev);
+ if (r) {
+ dev_err(adev->dev, "IB initialization failed (%d).\n", r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
+ goto init_failed;
+ }
+
r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
if (r)
goto init_failed;
@@ -1869,6 +1883,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_ras_pre_fini(adev);
+
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
@@ -1917,6 +1933,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev);
+ amdgpu_ib_pool_fini(adev);
}
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
@@ -1937,6 +1954,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false;
}
+ amdgpu_ras_fini(adev);
+
if (amdgpu_sriov_vf(adev))
if (amdgpu_virt_release_full_gpu(adev, false))
DRM_ERROR("failed to release exclusive mode on fini\n");
@@ -1999,6 +2018,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+
+ /*set to low pstate by default */
+ amdgpu_xgmi_set_pstate(adev, 0);
+
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -2369,7 +2392,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
adev->asic_reset_res = amdgpu_asic_reset(adev);
if (adev->asic_reset_res)
- DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev->ddev->unique);
}
@@ -2448,6 +2471,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
+ mutex_init(&adev->virt.dpm_mutex);
amdgpu_device_check_arguments(adev);
@@ -2642,13 +2666,6 @@ fence_driver_init:
/* Get a log2 for easy divisions. */
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
- r = amdgpu_ib_pool_init(adev);
- if (r) {
- dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
- goto failed;
- }
-
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
@@ -2694,6 +2711,9 @@ fence_driver_init:
goto failed;
}
+ /* must succeed. */
+ amdgpu_ras_post_init(adev);
+
return 0;
failed:
@@ -2726,7 +2746,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
else
drm_atomic_helper_shutdown(adev->ddev);
}
- amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev);
@@ -3165,6 +3184,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
/* No need to recover an evicted BO */
if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
+ shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
continue;
@@ -3173,11 +3193,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
break;
if (fence) {
- r = dma_fence_wait_timeout(fence, false, tmo);
+ tmo = dma_fence_wait_timeout(fence, false, tmo);
dma_fence_put(fence);
fence = next;
- if (r <= 0)
+ if (tmo == 0) {
+ r = -ETIMEDOUT;
+ break;
+ } else if (tmo < 0) {
+ r = tmo;
break;
+ }
} else {
fence = next;
}
@@ -3188,8 +3213,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
tmo = dma_fence_wait_timeout(fence, false, tmo);
dma_fence_put(fence);
- if (r <= 0 || tmo <= 0) {
- DRM_ERROR("recover vram bo from shadow failed\n");
+ if (r < 0 || tmo <= 0) {
+ DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
return -EIO;
}
@@ -3219,6 +3244,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
@@ -3238,6 +3265,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
+ amdgpu_amdkfd_post_reset(adev);
error:
amdgpu_virt_init_data_exchange(adev);
@@ -3370,7 +3398,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
r = amdgpu_asic_reset(tmp_adev);
if (r) {
- DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+ DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
r, tmp_adev->ddev->unique);
break;
}
@@ -3387,6 +3415,11 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
break;
}
}
+
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ amdgpu_ras_reserve_bad_pages(tmp_adev);
+ }
}
}
@@ -3405,7 +3438,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
+ DRM_INFO("VRAM is lost due to GPU reset!\n");
atomic_inc(&tmp_adev->vram_lost_counter);
}
@@ -3625,6 +3658,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
struct pci_dev *pdev = adev->pdev;
enum pci_bus_speed cur_speed;
enum pcie_link_width cur_width;
+ u32 ret = 1;
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3666,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
while (pdev) {
cur_speed = pcie_get_speed_cap(pdev);
cur_width = pcie_get_width_cap(pdev);
+ ret = pcie_bandwidth_available(adev->pdev, NULL,
+ NULL, &cur_width);
+ if (!ret)
+ cur_width = PCIE_LNK_WIDTH_RESRV;
if (cur_speed != PCI_SPEED_UNKNOWN) {
if (*speed == PCI_SPEED_UNKNOWN)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 344967df3137..523b8ab6b04e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -904,3 +904,19 @@ amdgpu_get_vce_clock_state(void *handle, u32 idx)
return NULL;
}
+
+int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+ if (is_support_sw_smu(adev))
+ return smu_get_sclk(&adev->smu, low);
+ else
+ return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
+}
+
+int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+ if (is_support_sw_smu(adev))
+ return smu_get_mclk(&adev->smu, low);
+ else
+ return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index e871e022c129..dca35407879d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -260,9 +260,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
- ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
-
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
@@ -281,18 +278,18 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
-
#define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
+#define amdgpu_smu_get_current_power_state(adev) \
+ ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
+
+#define amdgpu_smu_set_power_state(adev) \
+ ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
+
#define amdgpu_dpm_get_pp_num_states(adev, data) \
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
@@ -448,6 +445,9 @@ struct amdgpu_pm {
uint32_t smu_prv_buffer_size;
struct amdgpu_bo *smu_prv_buffer;
bool ac_power;
+ /* powerplay feature */
+ uint32_t pp_feature;
+
};
#define R600_SSTU_DFLT 0
@@ -486,6 +486,8 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
@@ -504,4 +506,8 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
struct amd_vce_state*
amdgpu_get_vce_clock_state(void *handle, u32 idx);
+extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
+
+extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7419ea8a388b..1e2cc9d68a05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -74,9 +74,11 @@
* - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
* - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
+ * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
+ * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 30
+#define KMS_DRIVER_MINOR 32
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -117,8 +119,8 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
-uint amdgpu_pp_feature_mask = 0xfffd3fff;
+/* OverDrive(bit 14) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xffffbfff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -136,6 +138,8 @@ uint amdgpu_dc_feature_mask = 0;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
+int amdgpu_ras_enable = -1;
+uint amdgpu_ras_mask = 0xffffffff;
/**
* DOC: vramlimit (int)
@@ -495,6 +499,21 @@ MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
/**
+ * DOC: ras_enable (int)
+ * Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))
+ */
+MODULE_PARM_DESC(ras_enable, "Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))");
+module_param_named(ras_enable, amdgpu_ras_enable, int, 0444);
+
+/**
+ * DOC: ras_mask (uint)
+ * Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1
+ * See the flags in drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+ */
+MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1");
+module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);
+
+/**
* DOC: si_support (int)
* Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
* set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
@@ -974,6 +993,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
drm_dev_unplug(dev);
+ drm_dev_put(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -1158,13 +1178,14 @@ static int amdgpu_flush(struct file *f, fl_owner_t id)
{
struct drm_file *file_priv = f->private_data;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
- amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
+ timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout);
+ timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
- return 0;
+ return timeout >= 0 ? 0 : timeout;
}
-
static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 5cbde74b97dd..e47609218839 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -49,12 +49,11 @@
static int
amdgpufb_open(struct fb_info *info, int user)
{
- struct amdgpu_fbdev *rfbdev = info->par;
- struct amdgpu_device *adev = rfbdev->adev;
- int ret = pm_runtime_get_sync(adev->ddev->dev);
+ struct drm_fb_helper *fb_helper = info->par;
+ int ret = pm_runtime_get_sync(fb_helper->dev->dev);
if (ret < 0 && ret != -EACCES) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(fb_helper->dev->dev);
+ pm_runtime_put_autosuspend(fb_helper->dev->dev);
return ret;
}
return 0;
@@ -63,11 +62,10 @@ amdgpufb_open(struct fb_info *info, int user)
static int
amdgpufb_release(struct fb_info *info, int user)
{
- struct amdgpu_fbdev *rfbdev = info->par;
- struct amdgpu_device *adev = rfbdev->adev;
+ struct drm_fb_helper *fb_helper = info->par;
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(fb_helper->dev->dev);
+ pm_runtime_put_autosuspend(fb_helper->dev->dev);
return 0;
}
@@ -233,9 +231,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
goto out;
}
- info->par = rfbdev;
- info->skip_vt_switch = true;
-
ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
&mode_cmd, gobj);
if (ret) {
@@ -248,10 +243,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
/* setup helper */
rfbdev->helper.fb = fb;
- strcpy(info->fix.id, "amdgpudrmfb");
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
-
info->fbops = &amdgpufb_ops;
tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
@@ -260,7 +251,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
- drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index ee47c11e92ce..4dee2326b29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
- struct dma_fence *old, **ptr;
+ struct dma_fence __rcu **ptr;
uint32_t seq;
+ int r;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if (fence == NULL)
@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq, flags | AMDGPU_FENCE_FLAG_INT);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+ if (unlikely(rcu_dereference_protected(*ptr, 1))) {
+ struct dma_fence *old;
+
+ rcu_read_lock();
+ old = dma_fence_get_rcu_safe(ptr);
+ rcu_read_unlock();
+
+ if (old) {
+ r = dma_fence_wait(old, false);
+ dma_fence_put(old);
+ if (r)
+ return r;
+ }
+ }
+
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
- old = rcu_dereference_protected(*ptr, 1);
- if (old && !dma_fence_is_signaled(old)) {
- DRM_INFO("rcu slot is busy\n");
- dma_fence_wait(old, false);
- }
-
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d21dd2f369da..d4fcf5475464 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -31,6 +31,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
+#include "amdgpu_xgmi.h"
void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
@@ -627,11 +628,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
- args->map_size);
- if (r)
- goto error_backoff;
-
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
@@ -647,11 +643,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
- args->map_size);
- if (r)
- goto error_backoff;
-
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
@@ -678,6 +669,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
+ struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
int r;
@@ -716,6 +708,15 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(robj);
break;
}
+ for (base = robj->vm_bo; base; base = base->next)
+ if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
+ amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
+ r = -EINVAL;
+ amdgpu_bo_unreserve(robj);
+ goto out;
+ }
+
+
robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
@@ -745,17 +746,25 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct amdgpu_device *adev = dev->dev_private;
struct drm_gem_object *gobj;
uint32_t handle;
+ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
u32 domain;
int r;
+ /*
+ * The buffer returned from this function should be cleared, but
+ * it can only be done if the ring is enabled or we'll fail to
+ * create the buffer.
+ */
+ if (adev->mman.buffer_funcs_enabled)
+ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
args->pitch = amdgpu_align_pitch(adev, args->width,
DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
domain = amdgpu_bo_get_preferred_pin_domain(adev,
amdgpu_display_supported_domains(adev));
- r = amdgpu_gem_object_create(adev, args->size, 0, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
ttm_bo_type_device, NULL, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 97a60da62004..997932ebbb83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -390,7 +390,7 @@ void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
{
- if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK))
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return;
if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index f790e15bcd08..09fc53af3d35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -258,6 +258,9 @@ struct amdgpu_gfx {
/* pipe reservation */
struct mutex pipe_reserve_mutex;
DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+
+ /*ras */
+ struct ras_common_if *ras_if;
};
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index d73367cab4f3..250d9212cc38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -80,6 +80,33 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_gmc_set_pte_pde - update the page tables using CPU
+ *
+ * @adev: amdgpu_device pointer
+ * @cpu_pt_addr: cpu address of the page table
+ * @gpu_page_idx: entry in the page table to update
+ * @addr: dst addr to write into pte/pde
+ * @flags: access flags
+ *
+ * Update the page tables using CPU.
+ */
+int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags)
+{
+ void __iomem *ptr = (void *)cpu_pt_addr;
+ uint64_t value;
+
+ /*
+ * The following is for PTE only. GART does not have PDEs.
+ */
+ value = addr & 0x0000FFFFFFFFF000ULL;
+ value |= flags;
+ writeq(value, ptr + (gpu_page_idx * 8));
+ return 0;
+}
+
+/**
* amdgpu_gmc_agp_addr - return the address in the AGP address space
*
* @tbo: TTM BO which needs the address, must be in GTT domain
@@ -213,3 +240,58 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
mc->agp_size >> 20, mc->agp_start, mc->agp_end);
}
+
+/**
+ * amdgpu_gmc_filter_faults - filter VM faults
+ *
+ * @adev: amdgpu device structure
+ * @addr: address of the VM fault
+ * @pasid: PASID of the process causing the fault
+ * @timestamp: timestamp of the fault
+ *
+ * Returns:
+ * True if the fault was filtered and should not be processed further.
+ * False if the fault is a new one and needs to be handled.
+ */
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid, uint64_t timestamp)
+{
+ struct amdgpu_gmc *gmc = &adev->gmc;
+
+ uint64_t stamp, key = addr << 4 | pasid;
+ struct amdgpu_gmc_fault *fault;
+ uint32_t hash;
+
+ /* If we don't have space left in the ring buffer return immediately */
+ stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
+ AMDGPU_GMC_FAULT_TIMEOUT;
+ if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
+ return true;
+
+ /* Try to find the fault in the hash */
+ hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
+ fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
+ while (fault->timestamp >= stamp) {
+ uint64_t tmp;
+
+ if (fault->key == key)
+ return true;
+
+ tmp = fault->timestamp;
+ fault = &gmc->fault_ring[fault->next];
+
+ /* Check if the entry was reused */
+ if (fault->timestamp >= tmp)
+ break;
+ }
+
+ /* Add the fault to the ring */
+ fault = &gmc->fault_ring[gmc->last_fault];
+ fault->key = key;
+ fault->timestamp = timestamp;
+
+ /* And update the hash */
+ fault->next = gmc->fault_hash[hash].idx;
+ gmc->fault_hash[hash].idx = gmc->last_fault++;
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 81e6070d255b..071145ac67b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -43,9 +43,35 @@
*/
#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
+/*
+ * Ring size as power of two for the log of recent faults.
+ */
+#define AMDGPU_GMC_FAULT_RING_ORDER 8
+#define AMDGPU_GMC_FAULT_RING_SIZE (1 << AMDGPU_GMC_FAULT_RING_ORDER)
+
+/*
+ * Hash size as power of two for the log of recent faults
+ */
+#define AMDGPU_GMC_FAULT_HASH_ORDER 8
+#define AMDGPU_GMC_FAULT_HASH_SIZE (1 << AMDGPU_GMC_FAULT_HASH_ORDER)
+
+/*
+ * Number of IH timestamp ticks until a fault is considered handled
+ */
+#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+
struct firmware;
/*
+ * GMC page fault information
+ */
+struct amdgpu_gmc_fault {
+ uint64_t timestamp;
+ uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER;
+ uint64_t key:52;
+};
+
+/*
* VMHUB structures, functions & helpers
*/
struct amdgpu_vmhub {
@@ -71,12 +97,6 @@ struct amdgpu_gmc_funcs {
/* Change the VMID -> PASID mapping */
void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
unsigned pasid);
- /* write pte/pde updates using the cpu */
- int (*set_pte_pde)(struct amdgpu_device *adev,
- void *cpu_pt_addr, /* cpu addr of page table */
- uint32_t gpu_page_idx, /* pte/pde to update */
- uint64_t addr, /* addr to write into pte/pde */
- uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */
@@ -147,15 +167,22 @@ struct amdgpu_gmc {
struct kfd_vm_fault_info *vm_fault_info;
atomic_t vm_fault_info_updated;
+ struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE];
+ struct {
+ uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER;
+ } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
+ uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
+
const struct amdgpu_gmc_funcs *gmc_funcs;
struct amdgpu_xgmi xgmi;
+ struct amdgpu_irq_src ecc_irq;
+ struct ras_common_if *ras_if;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
@@ -189,6 +216,9 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
+int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags);
uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
@@ -197,5 +227,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid, uint64_t timestamp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index da7b1b92d9cf..62591d081856 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -37,6 +37,47 @@ struct amdgpu_gtt_node {
};
/**
+ * DOC: mem_info_gtt_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total size of
+ * the GTT.
+ * The file mem_info_gtt_total is used for this, and returns the total size of
+ * the GTT block, in bytes
+ */
+static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE);
+}
+
+/**
+ * DOC: mem_info_gtt_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total amount of
+ * used GTT.
+ * The file mem_info_gtt_used is used for this, and returns the current used
+ * size of the GTT block, in bytes
+ */
+static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]));
+}
+
+static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
+ amdgpu_mem_info_gtt_total_show, NULL);
+static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
+ amdgpu_mem_info_gtt_used_show, NULL);
+
+/**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
*
* @man: TTM memory type manager
@@ -50,6 +91,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr;
uint64_t start, size;
+ int ret;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -61,6 +103,18 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
spin_lock_init(&mgr->lock);
atomic64_set(&mgr->available, p_size);
man->priv = mgr;
+
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_gtt_total\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_gtt_used\n");
+ return ret;
+ }
+
return 0;
}
@@ -74,12 +128,17 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
*/
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
+
+ device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total);
+ device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 0b8ef2d27d6b..fe393a46f881 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -35,6 +35,7 @@
#include "amdgpu_trace.h"
#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
/*
* IB
@@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
* cost waiting for it coming back under RUNTIME only
*/
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ } else if (adev->gmc.xgmi.hive_id) {
+ tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
}
for (i = 0; i < adev->num_rings; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 1c50be3ab8a9..934dfdcb4e73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -142,6 +142,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
*/
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
+ unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
u32 wptr;
if (!ih->enabled || adev->shutdown)
@@ -159,7 +160,7 @@ restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
- while (ih->rptr != wptr) {
+ while (ih->rptr != wptr && --count) {
amdgpu_irq_dispatch(adev, ih);
ih->rptr &= ih->ptr_mask;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 113a1ba13d4a..4e0bb645176d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -24,6 +24,9 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
+/* Maximum number of IVs processed at once */
+#define AMDGPU_IH_MAX_NUM_IVS 32
+
struct amdgpu_device;
struct amdgpu_iv_entry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e860412043bb..b17d0545728e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -39,6 +39,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
+#include "amdgpu_ras.h"
static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -296,6 +297,17 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->pm.fw_version;
fw_info->feature = 0;
break;
+ case AMDGPU_INFO_FW_TA:
+ if (query_fw->index > 1)
+ return -EINVAL;
+ if (query_fw->index == 0) {
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_xgmi_ucode_version;
+ } else {
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_ras_ucode_version;
+ }
+ break;
case AMDGPU_INFO_FW_SDMA:
if (query_fw->index >= adev->sdma.num_instances)
return -EINVAL;
@@ -684,6 +696,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (adev->pm.dpm_enabled) {
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk) {
+ dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
@@ -909,6 +925,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_VRAM_LOST_COUNTER:
ui32 = atomic_read(&adev->vram_lost_counter);
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
+ case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint64_t ras_mask;
+
+ if (!ras)
+ return -EINVAL;
+ ras_mask = (uint64_t)ras->supported << 32 | ras->features;
+
+ return copy_to_user(out, &ras_mask,
+ min_t(u64, size, sizeof(ras_mask))) ?
+ -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1328,6 +1356,16 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ query_fw.fw_type = AMDGPU_INFO_FW_TA;
+ for (i = 0; i < 2; i++) {
+ query_fw.index = i;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ continue;
+ seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
+ i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
+ }
+
/* SMC */
query_fw.fw_type = AMDGPU_INFO_FW_SMC;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 3e6823fdd939..58ed401c5996 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -256,14 +256,14 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
/* TODO we should be able to split locking for interval tree and
* amdgpu_mn_invalidate_node
*/
- if (amdgpu_mn_read_lock(amn, range->blockable))
+ if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
@@ -299,7 +299,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end = range->end - 1;
- if (amdgpu_mn_read_lock(amn, range->blockable))
+ if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, range->start, end);
@@ -307,7 +307,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 889e443eeee7..2e9e3db778c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -58,7 +58,7 @@ struct amdgpu_hpd;
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
-#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
+#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base)
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
@@ -406,7 +406,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
- u64 last_flip_vblank;
+ u32 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ec9e45004bff..93b2c5a48a71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,12 +88,14 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
if (bo->gem_base.import_attach)
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
- amdgpu_bo_unref(&bo->parent);
+ /* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&bo->shadow_list)) {
mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list);
mutex_unlock(&adev->shadow_list_lock);
}
+ amdgpu_bo_unref(&bo->parent);
+
kfree(bo->metadata);
kfree(bo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 220a6a7b1bc1..c430e8259038 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -72,6 +72,8 @@ struct amdgpu_bo_va {
/* If the mappings are cleared or filled */
bool cleared;
+
+ bool is_xgmi;
};
struct amdgpu_bo {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index a7adb7b6bd98..34471dbaa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -28,6 +28,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_display.h"
+#include "amdgpu_smu.h"
#include "atom.h"
#include <linux/power_supply.h>
#include <linux/hwmon.h>
@@ -80,6 +81,27 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
}
}
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_read_sensor(&adev->smu, sensor, data, size);
+ else {
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
+ sensor, data, size);
+ else
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
/**
* DOC: power_dpm_state
*
@@ -122,7 +144,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
- if (adev->powerplay.pp_funcs->get_current_power_state)
+ if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
+ pm = amdgpu_smu_get_current_power_state(adev);
+ else if (adev->powerplay.pp_funcs->get_current_power_state)
pm = amdgpu_dpm_get_current_power_state(adev);
else
pm = adev->pm.dpm.user_state;
@@ -240,7 +264,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
- if (adev->powerplay.pp_funcs->get_performance_level)
+ if (is_support_sw_smu(adev))
+ level = smu_get_performance_level(&adev->smu);
+ else if (adev->powerplay.pp_funcs->get_performance_level)
level = amdgpu_dpm_get_performance_level(adev);
else
level = adev->pm.dpm.forced_level;
@@ -273,7 +299,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_performance_level)
+ if (is_support_sw_smu(adev))
+ current_level = smu_get_performance_level(&adev->smu);
+ else if (adev->powerplay.pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
if (strncmp("low", buf, strlen("low")) == 0) {
@@ -299,10 +327,45 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
goto fail;
}
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgim_is_hwperf(adev) &&
+ adev->virt.ops->force_dpm_level) {
+ mutex_lock(&adev->pm.mutex);
+ adev->virt.ops->force_dpm_level(adev, level);
+ mutex_unlock(&adev->pm.mutex);
+ return count;
+ } else {
+ return -EINVAL;
+ }
+ }
+
if (current_level == level)
return count;
- if (adev->powerplay.pp_funcs->force_performance_level) {
+ /* profile_exit setting is valid only when current mode is in profile mode */
+ if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
+ pr_err("Currently not in any profile mode!\n");
+ return -EINVAL;
+ }
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ if (adev->pm.dpm.thermal_active) {
+ count = -EINVAL;
+ mutex_unlock(&adev->pm.mutex);
+ goto fail;
+ }
+ ret = smu_force_performance_level(&adev->smu, level);
+ if (ret)
+ count = -EINVAL;
+ else
+ adev->pm.dpm.forced_level = level;
+ mutex_unlock(&adev->pm.mutex);
+ } else if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
@@ -328,9 +391,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct pp_states_info data;
- int i, buf_len;
+ int i, buf_len, ret;
- if (adev->powerplay.pp_funcs->get_pp_num_states)
+ if (is_support_sw_smu(adev)) {
+ ret = smu_get_power_num_states(&adev->smu, &data);
+ if (ret)
+ return ret;
+ } else if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@@ -351,23 +418,29 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct pp_states_info data;
+ struct smu_context *smu = &adev->smu;
enum amd_pm_state_type pm = 0;
- int i = 0;
+ int i = 0, ret = 0;
- if (adev->powerplay.pp_funcs->get_current_power_state
+ if (is_support_sw_smu(adev)) {
+ pm = smu_get_current_power_state(smu);
+ ret = smu_get_power_num_states(smu, &data);
+ if (ret)
+ return ret;
+ } else if (adev->powerplay.pp_funcs->get_current_power_state
&& adev->powerplay.pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data);
+ }
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
-
- if (i == data.nums)
- i = -EINVAL;
+ for (i = 0; i < data.nums; i++) {
+ if (pm == data.states[i])
+ break;
}
+ if (i == data.nums)
+ i = -EINVAL;
+
return snprintf(buf, PAGE_SIZE, "%d\n", i);
}
@@ -397,6 +470,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
+ else if (is_support_sw_smu(adev))
+ adev->pp_force_state_enabled = false;
else if (adev->powerplay.pp_funcs->dispatch_tasks &&
adev->powerplay.pp_funcs->get_pp_num_states) {
struct pp_states_info data;
@@ -442,7 +517,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size;
- if (adev->powerplay.pp_funcs->get_pp_table)
+ if (is_support_sw_smu(adev)) {
+ size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
+ if (size < 0)
+ return size;
+ }
+ else if (adev->powerplay.pp_funcs->get_pp_table)
size = amdgpu_dpm_get_pp_table(adev, &table);
else
return 0;
@@ -462,8 +542,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ int ret = 0;
- if (adev->powerplay.pp_funcs->set_pp_table)
+ if (is_support_sw_smu(adev)) {
+ ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
+ if (ret)
+ return ret;
+ } else if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
return count;
@@ -586,19 +671,29 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
- ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
- parameter, parameter_size);
+ if (is_support_sw_smu(adev)) {
+ ret = smu_od_edit_dpm_table(&adev->smu, type,
+ parameter, parameter_size);
- if (ret)
- return -EINVAL;
+ if (ret)
+ return -EINVAL;
+ } else {
+ if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
+ ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
+ parameter, parameter_size);
- if (type == PP_OD_COMMIT_DPM_TABLE) {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- return count;
- } else {
+ if (ret)
return -EINVAL;
+
+ if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL);
+ return count;
+ } else {
+ return -EINVAL;
+ }
}
}
@@ -613,7 +708,13 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t size = 0;
- if (adev->powerplay.pp_funcs->print_clock_levels) {
+ if (is_support_sw_smu(adev)) {
+ size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf);
+ size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size);
+ size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size);
+ size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size);
+ return size;
+ } else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
@@ -711,7 +812,13 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk)
+ return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -767,7 +874,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
if (ret)
@@ -783,7 +892,9 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_MCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -803,7 +914,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
if (ret)
@@ -819,7 +932,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -839,7 +954,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
if (ret)
@@ -855,7 +972,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_FCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -875,7 +994,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
if (ret)
@@ -891,7 +1012,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -911,7 +1034,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
if (ret)
@@ -927,7 +1052,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_PCIE, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -947,7 +1074,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
if (ret)
@@ -964,7 +1093,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->powerplay.pp_funcs->get_sclk_od)
+ if (is_support_sw_smu(adev))
+ value = smu_get_od_percentage(&(adev->smu), OD_SCLK);
+ else if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -986,14 +1117,19 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
- if (adev->powerplay.pp_funcs->set_sclk_od)
- amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ if (is_support_sw_smu(adev)) {
+ value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value);
} else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
+ if (adev->powerplay.pp_funcs->set_sclk_od)
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ } else {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
}
fail:
@@ -1008,7 +1144,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->powerplay.pp_funcs->get_mclk_od)
+ if (is_support_sw_smu(adev))
+ value = smu_get_od_percentage(&(adev->smu), OD_MCLK);
+ else if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -1030,14 +1168,19 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
- if (adev->powerplay.pp_funcs->set_mclk_od)
- amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ if (is_support_sw_smu(adev)) {
+ value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value);
} else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
+ if (adev->powerplay.pp_funcs->set_mclk_od)
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ } else {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
}
fail:
@@ -1071,7 +1214,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->get_power_profile_mode)
+ if (is_support_sw_smu(adev))
+ return smu_get_power_profile_mode(&adev->smu, buf);
+ else if (adev->powerplay.pp_funcs->get_power_profile_mode)
return amdgpu_dpm_get_power_profile_mode(adev, buf);
return snprintf(buf, PAGE_SIZE, "\n");
@@ -1121,9 +1266,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
}
parameter[parameter_size] = profile_mode;
- if (adev->powerplay.pp_funcs->set_power_profile_mode)
+ if (is_support_sw_smu(adev))
+ ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
+ else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
-
if (!ret)
return count;
fail:
@@ -1146,14 +1292,10 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
(void *)&value, &size);
+
if (r)
return r;
@@ -1247,11 +1389,6 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp, &size);
@@ -1283,11 +1420,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
+ if (is_support_sw_smu(adev)) {
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ } else {
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ return -EINVAL;
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
- return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ }
return sprintf(buf, "%i\n", pwm_mode);
}
@@ -1306,14 +1446,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
- return -EINVAL;
+ if (is_support_sw_smu(adev)) {
+ err = kstrtoint(buf, 10, &value);
+ if (err)
+ return err;
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
+ smu_set_fan_control_mode(&adev->smu, value);
+ } else {
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ return -EINVAL;
- amdgpu_dpm_set_fan_control_mode(adev, value);
+ err = kstrtoint(buf, 10, &value);
+ if (err)
+ return err;
+
+ amdgpu_dpm_set_fan_control_mode(adev, value);
+ }
return count;
}
@@ -1345,8 +1493,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if ((adev->flags & AMD_IS_PX) &&
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (is_support_sw_smu(adev))
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ else
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
return -EINVAL;
@@ -1358,7 +1508,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
value = (value * 100) / 255;
- if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_set_fan_speed_percent(&adev->smu, value);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
if (err)
return err;
@@ -1380,7 +1534,11 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_get_fan_speed_percent(&adev->smu, &speed);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
if (err)
return err;
@@ -1404,7 +1562,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_get_current_rpm(&adev->smu, &speed);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
if (err)
return err;
@@ -1422,9 +1584,6 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
- if (!adev->powerplay.pp_funcs->read_sensor)
- return -EINVAL;
-
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm, &size);
if (r)
@@ -1442,9 +1601,6 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
- if (!adev->powerplay.pp_funcs->read_sensor)
- return -EINVAL;
-
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm, &size);
if (r)
@@ -1466,7 +1622,11 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_get_current_rpm(&adev->smu, &rpm);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
if (err)
return err;
@@ -1484,7 +1644,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (is_support_sw_smu(adev))
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ else
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
if (pwm_mode != AMD_FAN_CTRL_MANUAL)
return -ENODATA;
@@ -1497,7 +1661,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (err)
return err;
- if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_set_fan_speed_rpm(&adev->smu, value);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
if (err)
return err;
@@ -1513,11 +1681,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
- return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (is_support_sw_smu(adev)) {
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ } else {
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ return -EINVAL;
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ }
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
@@ -1536,8 +1707,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
- return -EINVAL;
err = kstrtoint(buf, 10, &value);
if (err)
@@ -1550,7 +1719,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ if (is_support_sw_smu(adev)) {
+ smu_set_fan_control_mode(&adev->smu, pwm_mode);
+ } else {
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ return -EINVAL;
+ amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ }
return count;
}
@@ -1569,11 +1744,6 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
(void *)&vddgfx, &size);
@@ -1608,11 +1778,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
(void *)&vddnb, &size);
@@ -1644,11 +1809,6 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
(void *)&query, &size);
@@ -1675,7 +1835,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+ if (is_support_sw_smu(adev)) {
+ smu_get_power_limit(&adev->smu, &limit, true);
+ return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
@@ -1690,7 +1853,10 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+ if (is_support_sw_smu(adev)) {
+ smu_get_power_limit(&adev->smu, &limit, false);
+ return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
@@ -1713,7 +1879,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
value = value / 1000000; /* convert to Watt */
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
+ if (is_support_sw_smu(adev)) {
+ adev->smu.funcs->set_power_limit(&adev->smu, value);
+ } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
if (err)
return err;
@@ -1967,18 +2135,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
- /* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->powerplay.pp_funcs->get_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
- effective_mode &= ~S_IRUGO;
+ if (!is_support_sw_smu(adev)) {
+ /* mask fan attributes if we have no bindings for this asic to expose */
+ if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+ (!adev->powerplay.pp_funcs->get_fan_control_mode &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+ effective_mode &= ~S_IRUGO;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->powerplay.pp_funcs->set_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
- effective_mode &= ~S_IWUSR;
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+ (!adev->powerplay.pp_funcs->set_fan_control_mode &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+ effective_mode &= ~S_IWUSR;
+ }
if ((adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
@@ -1987,20 +2157,22 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;
- /* hide max/min values if we can't both query and manage the fan */
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
- (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
+ if (!is_support_sw_smu(adev)) {
+ /* hide max/min values if we can't both query and manage the fan */
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
+ (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+ (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ return 0;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
- return 0;
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+ (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
+ return 0;
+ }
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU) &&
@@ -2039,9 +2211,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
if (!adev->pm.dpm_enabled)
return;
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor &&
- !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp, &size)) {
if (temp < adev->pm.dpm.thermal.min_temp)
/* switch back the user state */
@@ -2267,7 +2437,13 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ int ret = 0;
+ if (is_support_sw_smu(adev)) {
+ ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
+ if (ret)
+ DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
+ enable ? "true" : "false", ret);
+ } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
@@ -2288,7 +2464,13 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ int ret = 0;
+ if (is_support_sw_smu(adev)) {
+ ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
+ if (ret)
+ DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
+ enable ? "true" : "false", ret);
+ } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
@@ -2413,7 +2595,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_power_profile_mode\n");
return ret;
}
- if (hwmgr->od_enabled) {
+ if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
ret = device_create_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
if (ret) {
@@ -2489,7 +2672,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
device_remove_file(adev->dev,
&dev_attr_pp_power_profile_mode);
- if (hwmgr->od_enabled)
+ if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
@@ -2516,28 +2700,38 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- if (!amdgpu_device_has_dc_support(adev)) {
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
+ struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
+ mutex_lock(&(smu->mutex));
+ smu_handle_task(&adev->smu,
+ smu_dpm->dpm_level,
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
+ mutex_unlock(&(smu->mutex));
+ } else {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+ adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+ if (adev->pm.pm_display_cfg.vrefresh > 120)
+ adev->pm.pm_display_cfg.min_vblank_time = 0;
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+ mutex_unlock(&adev->pm.mutex);
+ }
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
+ } else {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
+ amdgpu_dpm_change_power_state_locked(adev);
mutex_unlock(&adev->pm.mutex);
}
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- amdgpu_dpm_change_power_state_locked(adev);
- mutex_unlock(&adev->pm.mutex);
}
}
@@ -2553,11 +2747,6 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
uint32_t query = 0;
int size;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* GPU Clocks */
size = sizeof(value);
seq_printf(m, "GFX Clocks and Power:\n");
@@ -2649,7 +2838,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
- } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
+ } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3091488cd8cc..05897b05766b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -38,18 +38,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
psp_set_funcs(adev);
- return 0;
-}
-
-static int psp_sw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct psp_context *psp = &adev->psp;
- int ret;
-
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
psp->adev = adev;
+ return 0;
+}
+
+static int psp_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
+ int ret;
+
ret = psp_init_microcode(psp);
if (ret) {
DRM_ERROR("Failed to load psp firmware!\n");
@@ -120,6 +121,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
{
int ret;
int index;
+ int timeout = 2000;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@@ -133,8 +135,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
- while (*((unsigned int *)psp->fence_buf) != index)
+ while (*((unsigned int *)psp->fence_buf) != index) {
+ if (--timeout == 0)
+ break;
msleep(1);
+ }
/* In some cases, psp response status is not 0 even there is no
* problem while the command is submitted. Some version of PSP FW
@@ -143,12 +148,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if (psp->cmd_buf_mem->resp.status) {
+ if (psp->cmd_buf_mem->resp.status || !timeout) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
DRM_WARN("psp command failed and response status is (%d)\n",
psp->cmd_buf_mem->resp.status);
+ if (!timeout)
+ return -EINVAL;
}
/* get xGMI session id from response buffer */
@@ -181,13 +188,13 @@ static int psp_tmr_init(struct psp_context *psp)
int ret;
/*
- * Allocate 3M memory aligned to 1M from Frame Buffer (local
- * physical).
+ * According to HW engineer, they prefer the TMR address be "naturally
+ * aligned" , e.g. the start address be an integer divide of TMR size.
*
* Note: this memory need be reserved till the driver
* uninitializes.
*/
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, 0x100000,
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, PSP_TMR_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -466,6 +473,206 @@ static int psp_xgmi_initialize(struct psp_context *psp)
return ret;
}
+// ras begin
+static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t ras_ta_mc, uint64_t ras_mc_shared,
+ uint32_t ras_ta_size, uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = ras_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_ras_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for ras ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->ras.ras_shared_bo,
+ &psp->ras.ras_shared_mc_addr,
+ &psp->ras.ras_shared_buf);
+
+ return ret;
+}
+
+static int psp_ras_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
+
+ psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->ras.ras_shared_mc_addr,
+ psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->ras.ras_initialized = 1;
+ psp->ras.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ras_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = ras_session_id;
+}
+
+static int psp_ras_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t ras_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->ras.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+int psp_ras_enable_features(struct psp_context *psp,
+ union ta_ras_cmd_input *info, bool enable)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ if (enable)
+ ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
+ else
+ ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
+
+ ras_cmd->ras_in_message = *info;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ return ras_cmd->ras_status;
+}
+
+static int psp_ras_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return 0;
+
+ ret = psp_ras_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->ras.ras_initialized = 0;
+
+ /* free ras shared memory */
+ amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
+ &psp->ras.ras_shared_mc_addr,
+ &psp->ras.ras_shared_buf);
+
+ return 0;
+}
+
+static int psp_ras_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->ras.ras_initialized) {
+ ret = psp_ras_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_ras_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+// ras end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -473,25 +680,35 @@ static int psp_hw_start(struct psp_context *psp)
if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
ret = psp_bootloader_load_sysdrv(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load sysdrv failed!\n");
return ret;
+ }
ret = psp_bootloader_load_sos(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load sos failed!\n");
return ret;
+ }
}
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP create ring failed!\n");
return ret;
+ }
ret = psp_tmr_load(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
return ret;
+ }
ret = psp_asd_load(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load asd failed!\n");
return ret;
+ }
if (adev->gmc.xgmi.num_physical_nodes > 1) {
ret = psp_xgmi_initialize(psp);
@@ -502,6 +719,15 @@ static int psp_hw_start(struct psp_context *psp)
dev_err(psp->adev->dev,
"XGMI: Failed to initialize XGMI session\n");
}
+
+
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAS: Failed to initialize RAS\n");
+ }
+
return 0;
}
@@ -665,53 +891,52 @@ static int psp_load_fw(struct amdgpu_device *adev)
&psp->fence_buf_mc_addr,
&psp->fence_buf);
if (ret)
- goto failed_mem2;
+ goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
(void **)&psp->cmd_buf_mem);
if (ret)
- goto failed_mem1;
+ goto failed;
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
- if (ret)
- goto failed_mem;
+ if (ret) {
+ DRM_ERROR("PSP ring init failed!\n");
+ goto failed;
+ }
ret = psp_tmr_init(psp);
- if (ret)
- goto failed_mem;
+ if (ret) {
+ DRM_ERROR("PSP tmr init failed!\n");
+ goto failed;
+ }
ret = psp_asd_init(psp);
- if (ret)
- goto failed_mem;
+ if (ret) {
+ DRM_ERROR("PSP asd init failed!\n");
+ goto failed;
+ }
skip_memalloc:
ret = psp_hw_start(psp);
if (ret)
- goto failed_mem;
+ goto failed;
ret = psp_np_fw_load(psp);
if (ret)
- goto failed_mem;
+ goto failed;
return 0;
-failed_mem:
- amdgpu_bo_free_kernel(&psp->cmd_buf_bo,
- &psp->cmd_buf_mc_addr,
- (void **)&psp->cmd_buf_mem);
-failed_mem1:
- amdgpu_bo_free_kernel(&psp->fence_buf_bo,
- &psp->fence_buf_mc_addr, &psp->fence_buf);
-failed_mem2:
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed:
- kfree(psp->cmd);
- psp->cmd = NULL;
+ /*
+ * all cleanup jobs (xgmi terminate, ras terminate,
+ * ring destroy, cmd/fence/fw buffers destory,
+ * psp->cmd destory) are delayed to psp_hw_fini
+ */
return ret;
}
@@ -753,6 +978,9 @@ static int psp_hw_fini(void *handle)
psp->xgmi_context.initialized == 1)
psp_xgmi_terminate(psp);
+ if (psp->adev->psp.ta_fw)
+ psp_ras_terminate(psp);
+
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -786,6 +1014,14 @@ static int psp_suspend(void *handle)
}
}
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate ras ta\n");
+ return ret;
+ }
+ }
+
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 2ef98cc755d6..cde113f07c96 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -28,11 +28,13 @@
#include "amdgpu.h"
#include "psp_gfx_if.h"
#include "ta_xgmi_if.h"
+#include "ta_ras_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
+#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
@@ -88,6 +90,9 @@ struct psp_funcs
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
bool (*support_vmr_ring)(struct psp_context *psp);
+ int (*ras_trigger_error)(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info);
+ int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
};
struct psp_xgmi_context {
@@ -98,6 +103,16 @@ struct psp_xgmi_context {
void *xgmi_shared_buf;
};
+struct psp_ras_context {
+ /*ras fw*/
+ bool ras_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *ras_shared_bo;
+ uint64_t ras_shared_mc_addr;
+ void *ras_shared_buf;
+ struct amdgpu_ras *ras;
+};
+
struct psp_context
{
struct amdgpu_device *adev;
@@ -150,10 +165,15 @@ struct psp_context
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
+ uint32_t ta_fw_version;
uint32_t ta_xgmi_ucode_version;
uint32_t ta_xgmi_ucode_size;
uint8_t *ta_xgmi_start_addr;
+ uint32_t ta_ras_ucode_version;
+ uint32_t ta_ras_ucode_size;
+ uint8_t *ta_ras_start_addr;
struct psp_xgmi_context xgmi_context;
+ struct psp_ras_context ras;
};
struct amdgpu_psp_funcs {
@@ -207,6 +227,13 @@ struct psp_xgmi_topology_info {
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+#define psp_ras_trigger_error(psp, info) \
+ ((psp)->funcs->ras_trigger_error ? \
+ (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
+#define psp_ras_cure_posion(psp, addr) \
+ ((psp)->funcs->ras_cure_posion ? \
+ (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -217,6 +244,11 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+
+int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_ras_enable_features(struct psp_context *psp,
+ union ta_ras_cmd_input *info, bool enable);
+
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
new file mode 100644
index 000000000000..22bd21efe6b1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -0,0 +1,1482 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_atomfirmware.h"
+
+struct ras_ih_data {
+ /* interrupt bottom half */
+ struct work_struct ih_work;
+ int inuse;
+ /* IP callback */
+ ras_ih_cb cb;
+ /* full of entries */
+ unsigned char *ring;
+ unsigned int ring_size;
+ unsigned int element_size;
+ unsigned int aligned_element_size;
+ unsigned int rptr;
+ unsigned int wptr;
+};
+
+struct ras_fs_data {
+ char sysfs_name[32];
+ char debugfs_name[32];
+};
+
+struct ras_err_data {
+ unsigned long ue_count;
+ unsigned long ce_count;
+};
+
+struct ras_err_handler_data {
+ /* point to bad pages array */
+ struct {
+ unsigned long bp;
+ struct amdgpu_bo *bo;
+ } *bps;
+ /* the count of entries */
+ int count;
+ /* the space can place new entries */
+ int space_left;
+ /* last reserved entry's index + 1 */
+ int last_reserved;
+};
+
+struct ras_manager {
+ struct ras_common_if head;
+ /* reference count */
+ int use;
+ /* ras block link */
+ struct list_head node;
+ /* the device */
+ struct amdgpu_device *adev;
+ /* debugfs */
+ struct dentry *ent;
+ /* sysfs */
+ struct device_attribute sysfs_attr;
+ int attr_inuse;
+
+ /* fs node name */
+ struct ras_fs_data fs_data;
+
+ /* IH data */
+ struct ras_ih_data ih_data;
+
+ struct ras_err_data err_data;
+};
+
+const char *ras_error_string[] = {
+ "none",
+ "parity",
+ "single_correctable",
+ "multi_uncorrectable",
+ "poison",
+};
+
+const char *ras_block_string[] = {
+ "umc",
+ "sdma",
+ "gfx",
+ "mmhub",
+ "athub",
+ "pcie_bif",
+ "hdp",
+ "xgmi_wafl",
+ "df",
+ "smn",
+ "sem",
+ "mp0",
+ "mp1",
+ "fuse",
+};
+
+#define ras_err_str(i) (ras_error_string[ffs(i)])
+#define ras_block_str(i) (ras_block_string[i])
+
+#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
+#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
+
+static void amdgpu_ras_self_test(struct amdgpu_device *adev)
+{
+ /* TODO */
+}
+
+static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+ ssize_t s;
+ char val[128];
+
+ if (amdgpu_ras_error_query(obj->adev, &info))
+ return -EINVAL;
+
+ s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
+ "ue", info.ue_count,
+ "ce", info.ce_count);
+ if (*pos >= s)
+ return 0;
+
+ s -= *pos;
+ s = min_t(u64, s, size);
+
+
+ if (copy_to_user(buf, &val[*pos], s))
+ return -EINVAL;
+
+ *pos += s;
+
+ return s;
+}
+
+static const struct file_operations amdgpu_ras_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_ras_debugfs_read,
+ .write = NULL,
+ .llseek = default_llseek
+};
+
+static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
+ *block_id = i;
+ if (strcmp(name, ras_block_str(i)) == 0)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
+ const char __user *buf, size_t size,
+ loff_t *pos, struct ras_debug_if *data)
+{
+ ssize_t s = min_t(u64, 64, size);
+ char str[65];
+ char block_name[33];
+ char err[9] = "ue";
+ int op = -1;
+ int block_id;
+ u64 address, value;
+
+ if (*pos)
+ return -EINVAL;
+ *pos = size;
+
+ memset(str, 0, sizeof(str));
+ memset(data, 0, sizeof(*data));
+
+ if (copy_from_user(str, buf, s))
+ return -EINVAL;
+
+ if (sscanf(str, "disable %32s", block_name) == 1)
+ op = 0;
+ else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
+ op = 1;
+ else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
+ op = 2;
+ else if (str[0] && str[1] && str[2] && str[3])
+ /* ascii string, but commands are not matched. */
+ return -EINVAL;
+
+ if (op != -1) {
+ if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
+ return -EINVAL;
+
+ data->head.block = block_id;
+ data->head.type = memcmp("ue", err, 2) == 0 ?
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE :
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+ data->op = op;
+
+ if (op == 2) {
+ if (sscanf(str, "%*s %*s %*s %llu %llu",
+ &address, &value) != 2)
+ if (sscanf(str, "%*s %*s %*s 0x%llx 0x%llx",
+ &address, &value) != 2)
+ return -EINVAL;
+ data->inject.address = address;
+ data->inject.value = value;
+ }
+ } else {
+ if (size < sizeof(*data))
+ return -EINVAL;
+
+ if (copy_from_user(data, buf, sizeof(*data)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+/*
+ * DOC: ras debugfs control interface
+ *
+ * It accepts struct ras_debug_if who has two members.
+ *
+ * First member: ras_debug_if::head or ras_debug_if::inject.
+ *
+ * head is used to indicate which IP block will be under control.
+ *
+ * head has four members, they are block, type, sub_block_index, name.
+ * block: which IP will be under control.
+ * type: what kind of error will be enabled/disabled/injected.
+ * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
+ * name: the name of IP.
+ *
+ * inject has two more members than head, they are address, value.
+ * As their names indicate, inject operation will write the
+ * value to the address.
+ *
+ * Second member: struct ras_debug_if::op.
+ * It has three kinds of operations.
+ * 0: disable RAS on the block. Take ::head as its data.
+ * 1: enable RAS on the block. Take ::head as its data.
+ * 2: inject errors on the block. Take ::inject as its data.
+ *
+ * How to use the interface?
+ * programs:
+ * copy the struct ras_debug_if in your codes and initialize it.
+ * write the struct to the control node.
+ *
+ * bash:
+ * echo op block [error [address value]] > .../ras/ras_ctrl
+ * op: disable, enable, inject
+ * disable: only block is needed
+ * enable: block and error are needed
+ * inject: error, address, value are needed
+ * block: umc, smda, gfx, .........
+ * see ras_block_string[] for details
+ * error: ue, ce
+ * ue: multi_uncorrectable
+ * ce: single_correctable
+ *
+ * here are some examples for bash commands,
+ * echo inject umc ue 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo inject umc ce 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ *
+ * How to check the result?
+ *
+ * For disable/enable, please check ras features at
+ * /sys/class/drm/card[0/1/2...]/device/ras/features
+ *
+ * For inject, please check corresponding err count at
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ *
+ * NOTE: operation is only allowed on blocks which are supported.
+ * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ */
+static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct ras_debug_if data;
+ int ret = 0;
+
+ ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
+ if (ret)
+ return -EINVAL;
+
+ if (!amdgpu_ras_is_supported(adev, data.head.block))
+ return -EINVAL;
+
+ switch (data.op) {
+ case 0:
+ ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
+ break;
+ case 1:
+ ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
+ break;
+ case 2:
+ ret = amdgpu_ras_error_inject(adev, &data.inject);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ };
+
+ if (ret)
+ return -EINVAL;
+
+ return size;
+}
+
+static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_ctrl_write,
+ .llseek = default_llseek
+};
+
+static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (amdgpu_ras_error_query(obj->adev, &info))
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
+ "ue", info.ue_count,
+ "ce", info.ce_count);
+}
+
+/* obj begin */
+
+#define get_obj(obj) do { (obj)->use++; } while (0)
+#define alive_obj(obj) ((obj)->use)
+
+static inline void put_obj(struct ras_manager *obj)
+{
+ if (obj && --obj->use == 0)
+ list_del(&obj->node);
+ if (obj && obj->use < 0) {
+ DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
+ }
+}
+
+/* make one obj and return it. */
+static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return NULL;
+
+ if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return NULL;
+
+ obj = &con->objs[head->block];
+ /* already exist. return obj? */
+ if (alive_obj(obj))
+ return NULL;
+
+ obj->head = *head;
+ obj->adev = adev;
+ list_add(&obj->node, &con->head);
+ get_obj(obj);
+
+ return obj;
+}
+
+/* return an obj equal to head, or the first when head is NULL */
+static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ int i;
+
+ if (!con)
+ return NULL;
+
+ if (head) {
+ if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return NULL;
+
+ obj = &con->objs[head->block];
+
+ if (alive_obj(obj)) {
+ WARN_ON(head->block != obj->head.block);
+ return obj;
+ }
+ } else {
+ for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
+ obj = &con->objs[i];
+ if (alive_obj(obj)) {
+ WARN_ON(i != obj->head.block);
+ return obj;
+ }
+ }
+ }
+
+ return NULL;
+}
+/* obj end */
+
+/* feature ctl begin */
+static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return con->hw_supported & BIT(head->block);
+}
+
+static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return con->features & BIT(head->block);
+}
+
+/*
+ * if obj is not created, then create one.
+ * set feature enable flag.
+ */
+static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, int enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ /* If hardware does not support ras, then do not create obj.
+ * But if hardware support ras, we can create the obj.
+ * Ras framework checks con->hw_supported to see if it need do
+ * corresponding initialization.
+ * IP checks con->support to see if it need disable ras.
+ */
+ if (!amdgpu_ras_is_feature_allowed(adev, head))
+ return 0;
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
+ return 0;
+
+ if (enable) {
+ if (!obj) {
+ obj = amdgpu_ras_create_obj(adev, head);
+ if (!obj)
+ return -EINVAL;
+ } else {
+ /* In case we create obj somewhere else */
+ get_obj(obj);
+ }
+ con->features |= BIT(head->block);
+ } else {
+ if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
+ con->features &= ~BIT(head->block);
+ put_obj(obj);
+ }
+ }
+
+ return 0;
+}
+
+/* wrapper of psp_ras_enable_features */
+int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ union ta_ras_cmd_input info;
+ int ret;
+
+ if (!con)
+ return -EINVAL;
+
+ if (!enable) {
+ info.disable_features = (struct ta_ras_disable_features_input) {
+ .block_id = amdgpu_ras_block_to_ta(head->block),
+ .error_type = amdgpu_ras_error_to_ta(head->type),
+ };
+ } else {
+ info.enable_features = (struct ta_ras_enable_features_input) {
+ .block_id = amdgpu_ras_block_to_ta(head->block),
+ .error_type = amdgpu_ras_error_to_ta(head->type),
+ };
+ }
+
+ /* Do not enable if it is not allowed. */
+ WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
+ /* Are we alerady in that state we are going to set? */
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
+ return 0;
+
+ ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ if (ret) {
+ DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ ret);
+ return -EINVAL;
+ }
+
+ /* setup the obj */
+ __amdgpu_ras_feature_enable(adev, head, enable);
+
+ return 0;
+}
+
+/* Only used in device probe stage and called only once. */
+int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret;
+
+ if (!con)
+ return -EINVAL;
+
+ if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
+ /* If ras is enabled by vbios, we set up ras object first in
+ * both case. For enable, that is all what we need do. For
+ * disable, we need perform a ras TA disable cmd after that.
+ */
+ ret = __amdgpu_ras_feature_enable(adev, head, 1);
+ if (ret)
+ return ret;
+
+ if (!enable)
+ ret = amdgpu_ras_feature_enable(adev, head, 0);
+ } else
+ ret = amdgpu_ras_feature_enable(adev, head, enable);
+
+ return ret;
+}
+
+static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
+ bool bypass)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ /* bypass psp.
+ * aka just release the obj and corresponding flags
+ */
+ if (bypass) {
+ if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
+ break;
+ }
+ }
+
+ return con->features;
+}
+
+static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
+ bool bypass)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
+ int i;
+ const enum amdgpu_ras_error_type default_ras_type =
+ AMDGPU_RAS_ERROR__NONE;
+
+ for (i = 0; i < ras_block_count; i++) {
+ struct ras_common_if head = {
+ .block = i,
+ .type = default_ras_type,
+ .sub_block_index = 0,
+ };
+ strcpy(head.name, ras_block_str(i));
+ if (bypass) {
+ /*
+ * bypass psp. vbios enable ras for us.
+ * so just create the obj
+ */
+ if (__amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ }
+ }
+
+ return con->features;
+}
+/* feature ctl end */
+
+/* query/inject/cure begin */
+int amdgpu_ras_error_query(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+
+ if (!obj)
+ return -EINVAL;
+ /* TODO might read the register to read the count */
+
+ info->ue_count = obj->err_data.ue_count;
+ info->ce_count = obj->err_data.ce_count;
+
+ return 0;
+}
+
+/* wrapper of psp_ras_trigger_error */
+int amdgpu_ras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ta_ras_trigger_error_input block_info = {
+ .block_id = amdgpu_ras_block_to_ta(info->head.block),
+ .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
+ .sub_block_index = info->head.sub_block_index,
+ .address = info->address,
+ .value = info->value,
+ };
+ int ret = 0;
+
+ if (!obj)
+ return -EINVAL;
+
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ if (ret)
+ DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
+ ras_block_str(info->head.block),
+ ret);
+
+ return ret;
+}
+
+int amdgpu_ras_error_cure(struct amdgpu_device *adev,
+ struct ras_cure_if *info)
+{
+ /* psp fw has no cure interface for now. */
+ return 0;
+}
+
+/* get the total error counts on all IPs */
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ bool is_ce)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ struct ras_err_data data = {0, 0};
+
+ if (!con)
+ return -EINVAL;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (amdgpu_ras_error_query(adev, &info))
+ return -EINVAL;
+
+ data.ce_count += info.ce_count;
+ data.ue_count += info.ue_count;
+ }
+
+ return is_ce ? data.ce_count : data.ue_count;
+}
+/* query/inject/cure end */
+
+
+/* sysfs begin */
+
+static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_ras *con =
+ container_of(attr, struct amdgpu_ras, features_attr);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct ras_common_if head;
+ int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
+ int i;
+ ssize_t s;
+ struct ras_manager *obj;
+
+ s = scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
+
+ for (i = 0; i < ras_block_count; i++) {
+ head.block = i;
+
+ if (amdgpu_ras_is_feature_enabled(adev, &head)) {
+ obj = amdgpu_ras_find_obj(adev, &head);
+ s += scnprintf(&buf[s], PAGE_SIZE - s,
+ "%s: %s\n",
+ ras_block_str(i),
+ ras_err_str(obj->head.type));
+ } else
+ s += scnprintf(&buf[s], PAGE_SIZE - s,
+ "%s: disabled\n",
+ ras_block_str(i));
+ }
+
+ return s;
+}
+
+static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ NULL
+ };
+ struct attribute_group group = {
+ .name = "ras",
+ .attrs = attrs,
+ };
+
+ con->features_attr = (struct device_attribute) {
+ .attr = {
+ .name = "features",
+ .mode = S_IRUGO,
+ },
+ .show = amdgpu_ras_sysfs_features_read,
+ };
+ sysfs_attr_init(attrs[0]);
+
+ return sysfs_create_group(&adev->dev->kobj, &group);
+}
+
+static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ NULL
+ };
+ struct attribute_group group = {
+ .name = "ras",
+ .attrs = attrs,
+ };
+
+ sysfs_remove_group(&adev->dev->kobj, &group);
+
+ return 0;
+}
+
+int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
+
+ if (!obj || obj->attr_inuse)
+ return -EINVAL;
+
+ get_obj(obj);
+
+ memcpy(obj->fs_data.sysfs_name,
+ head->sysfs_name,
+ sizeof(obj->fs_data.sysfs_name));
+
+ obj->sysfs_attr = (struct device_attribute){
+ .attr = {
+ .name = obj->fs_data.sysfs_name,
+ .mode = S_IRUGO,
+ },
+ .show = amdgpu_ras_sysfs_read,
+ };
+ sysfs_attr_init(&obj->sysfs_attr.attr);
+
+ if (sysfs_add_file_to_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ "ras")) {
+ put_obj(obj);
+ return -EINVAL;
+ }
+
+ obj->attr_inuse = 1;
+
+ return 0;
+}
+
+int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ if (!obj || !obj->attr_inuse)
+ return -EINVAL;
+
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ "ras");
+ obj->attr_inuse = 0;
+ put_obj(obj);
+
+ return 0;
+}
+
+static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ amdgpu_ras_sysfs_remove(adev, &obj->head);
+ }
+
+ amdgpu_ras_sysfs_remove_feature_node(adev);
+
+ return 0;
+}
+/* sysfs end */
+
+/* debugfs begin */
+static int amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *root = minor->debugfs_root, *dir;
+ struct dentry *ent;
+
+ dir = debugfs_create_dir("ras", root);
+ if (IS_ERR(dir))
+ return -EINVAL;
+
+ con->dir = dir;
+
+ ent = debugfs_create_file("ras_ctrl",
+ S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_ctrl_ops);
+ if (IS_ERR(ent)) {
+ debugfs_remove(con->dir);
+ return -EINVAL;
+ }
+
+ con->ent = ent;
+ return 0;
+}
+
+int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
+ struct dentry *ent;
+
+ if (!obj || obj->ent)
+ return -EINVAL;
+
+ get_obj(obj);
+
+ memcpy(obj->fs_data.debugfs_name,
+ head->debugfs_name,
+ sizeof(obj->fs_data.debugfs_name));
+
+ ent = debugfs_create_file(obj->fs_data.debugfs_name,
+ S_IWUGO | S_IRUGO, con->dir,
+ obj, &amdgpu_ras_debugfs_ops);
+
+ if (IS_ERR(ent))
+ return -EINVAL;
+
+ obj->ent = ent;
+
+ return 0;
+}
+
+int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ if (!obj || !obj->ent)
+ return 0;
+
+ debugfs_remove(obj->ent);
+ obj->ent = NULL;
+ put_obj(obj);
+
+ return 0;
+}
+
+static int amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ amdgpu_ras_debugfs_remove(adev, &obj->head);
+ }
+
+ debugfs_remove(con->ent);
+ debugfs_remove(con->dir);
+ con->dir = NULL;
+ con->ent = NULL;
+
+ return 0;
+}
+/* debugfs end */
+
+/* ras fs */
+
+static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_sysfs_create_feature_node(adev);
+ amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
+{
+ amdgpu_ras_debugfs_remove_all(adev);
+ amdgpu_ras_sysfs_remove_all(adev);
+ return 0;
+}
+/* ras fs end */
+
+/* ih begin */
+static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
+{
+ struct ras_ih_data *data = &obj->ih_data;
+ struct amdgpu_iv_entry entry;
+ int ret;
+
+ while (data->rptr != data->wptr) {
+ rmb();
+ memcpy(&entry, &data->ring[data->rptr],
+ data->element_size);
+
+ wmb();
+ data->rptr = (data->aligned_element_size +
+ data->rptr) % data->ring_size;
+
+ /* Let IP handle its data, maybe we need get the output
+ * from the callback to udpate the error type/count, etc
+ */
+ if (data->cb) {
+ ret = data->cb(obj->adev, &entry);
+ /* ue will trigger an interrupt, and in that case
+ * we need do a reset to recovery the whole system.
+ * But leave IP do that recovery, here we just dispatch
+ * the error.
+ */
+ if (ret == AMDGPU_RAS_UE) {
+ obj->err_data.ue_count++;
+ }
+ /* Might need get ce count by register, but not all IP
+ * saves ce count, some IP just use one bit or two bits
+ * to indicate ce happened.
+ */
+ }
+ }
+}
+
+static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
+{
+ struct ras_ih_data *data =
+ container_of(work, struct ras_ih_data, ih_work);
+ struct ras_manager *obj =
+ container_of(data, struct ras_manager, ih_data);
+
+ amdgpu_ras_interrupt_handler(obj);
+}
+
+int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ struct ras_dispatch_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_ih_data *data = &obj->ih_data;
+
+ if (!obj)
+ return -EINVAL;
+
+ if (data->inuse == 0)
+ return 0;
+
+ /* Might be overflow... */
+ memcpy(&data->ring[data->wptr], info->entry,
+ data->element_size);
+
+ wmb();
+ data->wptr = (data->aligned_element_size +
+ data->wptr) % data->ring_size;
+
+ schedule_work(&data->ih_work);
+
+ return 0;
+}
+
+int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_ih_data *data;
+
+ if (!obj)
+ return -EINVAL;
+
+ data = &obj->ih_data;
+ if (data->inuse == 0)
+ return 0;
+
+ cancel_work_sync(&data->ih_work);
+
+ kfree(data->ring);
+ memset(data, 0, sizeof(*data));
+ put_obj(obj);
+
+ return 0;
+}
+
+int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_ih_data *data;
+
+ if (!obj) {
+ /* in case we registe the IH before enable ras feature */
+ obj = amdgpu_ras_create_obj(adev, &info->head);
+ if (!obj)
+ return -EINVAL;
+ } else
+ get_obj(obj);
+
+ data = &obj->ih_data;
+ /* add the callback.etc */
+ *data = (struct ras_ih_data) {
+ .inuse = 0,
+ .cb = info->cb,
+ .element_size = sizeof(struct amdgpu_iv_entry),
+ .rptr = 0,
+ .wptr = 0,
+ };
+
+ INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
+
+ data->aligned_element_size = ALIGN(data->element_size, 8);
+ /* the ring can store 64 iv entries. */
+ data->ring_size = 64 * data->aligned_element_size;
+ data->ring = kmalloc(data->ring_size, GFP_KERNEL);
+ if (!data->ring) {
+ put_obj(obj);
+ return -ENOMEM;
+ }
+
+ /* IH is ready */
+ data->inuse = 1;
+
+ return 0;
+}
+
+static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ struct ras_ih_if info = {
+ .head = obj->head,
+ };
+ amdgpu_ras_interrupt_remove_handler(adev, &info);
+ }
+
+ return 0;
+}
+/* ih end */
+
+/* recovery begin */
+static void amdgpu_ras_do_recovery(struct work_struct *work)
+{
+ struct amdgpu_ras *ras =
+ container_of(work, struct amdgpu_ras, recovery_work);
+
+ amdgpu_device_gpu_recover(ras->adev, 0);
+ atomic_set(&ras->in_recovery, 0);
+}
+
+static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
+ struct amdgpu_bo **bo_ptr)
+{
+ /* no need to free it actually. */
+ amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
+ return 0;
+}
+
+/* reserve vram with size@offset */
+static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size,
+ struct amdgpu_bo **bo_ptr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_param bp;
+ int r = 0;
+ int i;
+ struct amdgpu_bo *bo;
+
+ if (bo_ptr)
+ *bo_ptr = NULL;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+
+ r = amdgpu_bo_create(adev, &bp, &bo);
+ if (r)
+ return -EINVAL;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (r)
+ goto error_reserve;
+
+ offset = ALIGN(offset, PAGE_SIZE);
+ for (i = 0; i < bo->placement.num_placement; ++i) {
+ bo->placements[i].fpfn = offset >> PAGE_SHIFT;
+ bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+
+ ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
+ r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
+ if (r)
+ goto error_pin;
+
+ r = amdgpu_bo_pin_restricted(bo,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ offset,
+ offset + size);
+ if (r)
+ goto error_pin;
+
+ if (bo_ptr)
+ *bo_ptr = bo;
+
+ amdgpu_bo_unreserve(bo);
+ return r;
+
+error_pin:
+ amdgpu_bo_unreserve(bo);
+error_reserve:
+ amdgpu_bo_unref(&bo);
+ return r;
+}
+
+/* alloc/realloc bps array */
+static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
+ struct ras_err_handler_data *data, int pages)
+{
+ unsigned int old_space = data->count + data->space_left;
+ unsigned int new_space = old_space + pages;
+ unsigned int align_space = ALIGN(new_space, 1024);
+ void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+
+ if (data->bps) {
+ memcpy(tmp, data->bps,
+ data->count * sizeof(*data->bps));
+ kfree(data->bps);
+ }
+
+ data->bps = tmp;
+ data->space_left += align_space - old_space;
+ return 0;
+}
+
+/* it deal with vram only. */
+int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+ unsigned long *bps, int pages)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i = pages;
+ int ret = 0;
+
+ if (!con || !con->eh_data || !bps || pages <= 0)
+ return 0;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ if (data->space_left <= pages)
+ if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ while (i--)
+ data->bps[data->count++].bp = bps[i];
+
+ data->space_left -= pages;
+out:
+ mutex_unlock(&con->recovery_lock);
+
+ return ret;
+}
+
+/* called in gpu recovery/init */
+int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ uint64_t bp;
+ struct amdgpu_bo *bo;
+ int i;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+ /* reserve vram at driver post stage. */
+ for (i = data->last_reserved; i < data->count; i++) {
+ bp = data->bps[i].bp;
+
+ if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
+ PAGE_SIZE, &bo))
+ DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
+
+ data->bps[i].bo = bo;
+ data->last_reserved = i + 1;
+ }
+out:
+ mutex_unlock(&con->recovery_lock);
+ return 0;
+}
+
+/* called when driver unload */
+static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ struct amdgpu_bo *bo;
+ int i;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ for (i = data->last_reserved - 1; i >= 0; i--) {
+ bo = data->bps[i].bo;
+
+ amdgpu_ras_release_vram(adev, &bo);
+
+ data->bps[i].bo = bo;
+ data->last_reserved = i;
+ }
+out:
+ mutex_unlock(&con->recovery_lock);
+ return 0;
+}
+
+static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+{
+ /* TODO
+ * write the array to eeprom when SMU disabled.
+ */
+ return 0;
+}
+
+static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
+{
+ /* TODO
+ * read the array to eeprom when SMU disabled.
+ */
+ return 0;
+}
+
+static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data **data = &con->eh_data;
+
+ *data = kmalloc(sizeof(**data),
+ GFP_KERNEL|__GFP_ZERO);
+ if (!*data)
+ return -ENOMEM;
+
+ mutex_init(&con->recovery_lock);
+ INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
+ atomic_set(&con->in_recovery, 0);
+ con->adev = adev;
+
+ amdgpu_ras_load_bad_pages(adev);
+ amdgpu_ras_reserve_bad_pages(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data = con->eh_data;
+
+ cancel_work_sync(&con->recovery_work);
+ amdgpu_ras_save_bad_pages(adev);
+ amdgpu_ras_release_bad_pages(adev);
+
+ mutex_lock(&con->recovery_lock);
+ con->eh_data = NULL;
+ kfree(data->bps);
+ kfree(data);
+ mutex_unlock(&con->recovery_lock);
+
+ return 0;
+}
+/* recovery end */
+
+/*
+ * check hardware's ras ability which will be saved in hw_supported.
+ * if hardware does not support ras, we can skip some ras initializtion and
+ * forbid some ras operations from IP.
+ * if software itself, say boot parameter, limit the ras ability. We still
+ * need allow IP do some limited operations, like disable. In such case,
+ * we have to initialize ras as normal. but need check if operation is
+ * allowed or not in each function.
+ */
+static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
+ uint32_t *hw_supported, uint32_t *supported)
+{
+ *hw_supported = 0;
+ *supported = 0;
+
+ if (amdgpu_sriov_vf(adev) ||
+ adev->asic_type != CHIP_VEGA20)
+ return;
+
+ if (adev->is_atom_fw &&
+ (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
+ amdgpu_atomfirmware_sram_ecc_supported(adev)))
+ *hw_supported = AMDGPU_RAS_BLOCK_MASK;
+
+ *supported = amdgpu_ras_enable == 0 ?
+ 0 : *hw_supported & amdgpu_ras_mask;
+}
+
+int amdgpu_ras_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (con)
+ return 0;
+
+ con = kmalloc(sizeof(struct amdgpu_ras) +
+ sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
+ GFP_KERNEL|__GFP_ZERO);
+ if (!con)
+ return -ENOMEM;
+
+ con->objs = (struct ras_manager *)(con + 1);
+
+ amdgpu_ras_set_context(adev, con);
+
+ amdgpu_ras_check_supported(adev, &con->hw_supported,
+ &con->supported);
+ con->features = 0;
+ INIT_LIST_HEAD(&con->head);
+ /* Might need get this flag from vbios. */
+ con->flags = RAS_DEFAULT_FLAGS;
+
+ if (amdgpu_ras_recovery_init(adev))
+ goto recovery_out;
+
+ amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
+
+ if (amdgpu_ras_fs_init(adev))
+ goto fs_out;
+
+ amdgpu_ras_self_test(adev);
+
+ DRM_INFO("RAS INFO: ras initialized successfully, "
+ "hardware ability[%x] ras_mask[%x]\n",
+ con->hw_supported, con->supported);
+ return 0;
+fs_out:
+ amdgpu_ras_recovery_fini(adev);
+recovery_out:
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+
+ return -EINVAL;
+}
+
+/* do some init work after IP late init as dependence */
+void amdgpu_ras_post_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ if (!con)
+ return;
+
+ if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
+ /* Set up all other IPs which are not implemented. There is a
+ * tricky thing that IP's actual ras error type should be
+ * MULTI_UNCORRECTABLE, but as driver does not handle it, so
+ * ERROR_NONE make sense anyway.
+ */
+ amdgpu_ras_enable_all_features(adev, 1);
+
+ /* We enable ras on all hw_supported block, but as boot
+ * parameter might disable some of them and one or more IP has
+ * not implemented yet. So we disable them on behalf.
+ */
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
+ amdgpu_ras_feature_enable(adev, &obj->head, 0);
+ /* there should be no any reference. */
+ WARN_ON(alive_obj(obj));
+ }
+ }
+ }
+}
+
+/* do some fini work before IP fini as dependence */
+int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return 0;
+
+ /* Need disable ras on all IPs here before ip [hw/sw]fini */
+ amdgpu_ras_disable_all_features(adev, 0);
+ amdgpu_ras_recovery_fini(adev);
+ return 0;
+}
+
+int amdgpu_ras_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return 0;
+
+ amdgpu_ras_fs_fini(adev);
+ amdgpu_ras_interrupt_remove_all(adev);
+
+ WARN(con->features, "Feature mask is not cleared");
+
+ if (con->features)
+ amdgpu_ras_disable_all_features(adev, 1);
+
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
new file mode 100644
index 000000000000..eaef5edefc34
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_RAS_H
+#define _AMDGPU_RAS_H
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include "amdgpu.h"
+#include "amdgpu_psp.h"
+#include "ta_ras_if.h"
+
+enum amdgpu_ras_block {
+ AMDGPU_RAS_BLOCK__UMC = 0,
+ AMDGPU_RAS_BLOCK__SDMA,
+ AMDGPU_RAS_BLOCK__GFX,
+ AMDGPU_RAS_BLOCK__MMHUB,
+ AMDGPU_RAS_BLOCK__ATHUB,
+ AMDGPU_RAS_BLOCK__PCIE_BIF,
+ AMDGPU_RAS_BLOCK__HDP,
+ AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ AMDGPU_RAS_BLOCK__DF,
+ AMDGPU_RAS_BLOCK__SMN,
+ AMDGPU_RAS_BLOCK__SEM,
+ AMDGPU_RAS_BLOCK__MP0,
+ AMDGPU_RAS_BLOCK__MP1,
+ AMDGPU_RAS_BLOCK__FUSE,
+
+ AMDGPU_RAS_BLOCK__LAST
+};
+
+#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
+#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
+
+enum amdgpu_ras_error_type {
+ AMDGPU_RAS_ERROR__NONE = 0,
+ AMDGPU_RAS_ERROR__PARITY = 1,
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE = 2,
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
+ AMDGPU_RAS_ERROR__POISON = 8,
+};
+
+enum amdgpu_ras_ret {
+ AMDGPU_RAS_SUCCESS = 0,
+ AMDGPU_RAS_FAIL,
+ AMDGPU_RAS_UE,
+ AMDGPU_RAS_CE,
+ AMDGPU_RAS_PT,
+};
+
+struct ras_common_if {
+ enum amdgpu_ras_block block;
+ enum amdgpu_ras_error_type type;
+ uint32_t sub_block_index;
+ /* block name */
+ char name[32];
+};
+
+typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+struct amdgpu_ras {
+ /* ras infrastructure */
+ /* for ras itself. */
+ uint32_t hw_supported;
+ /* for IP to check its ras ability. */
+ uint32_t supported;
+ uint32_t features;
+ struct list_head head;
+ /* debugfs */
+ struct dentry *dir;
+ /* debugfs ctrl */
+ struct dentry *ent;
+ /* sysfs */
+ struct device_attribute features_attr;
+ /* block array */
+ struct ras_manager *objs;
+
+ /* gpu recovery */
+ struct work_struct recovery_work;
+ atomic_t in_recovery;
+ struct amdgpu_device *adev;
+ /* error handler data */
+ struct ras_err_handler_data *eh_data;
+ struct mutex recovery_lock;
+
+ uint32_t flags;
+};
+
+/* interfaces for IP */
+
+struct ras_fs_if {
+ struct ras_common_if head;
+ char sysfs_name[32];
+ char debugfs_name[32];
+};
+
+struct ras_query_if {
+ struct ras_common_if head;
+ unsigned long ue_count;
+ unsigned long ce_count;
+};
+
+struct ras_inject_if {
+ struct ras_common_if head;
+ uint64_t address;
+ uint64_t value;
+};
+
+struct ras_cure_if {
+ struct ras_common_if head;
+ uint64_t address;
+};
+
+struct ras_ih_if {
+ struct ras_common_if head;
+ ras_ih_cb cb;
+};
+
+struct ras_dispatch_if {
+ struct ras_common_if head;
+ struct amdgpu_iv_entry *entry;
+};
+
+struct ras_debug_if {
+ union {
+ struct ras_common_if head;
+ struct ras_inject_if inject;
+ };
+ int op;
+};
+/* work flow
+ * vbios
+ * 1: ras feature enable (enabled by default)
+ * psp
+ * 2: ras framework init (in ip_init)
+ * IP
+ * 3: IH add
+ * 4: debugfs/sysfs create
+ * 5: query/inject
+ * 6: debugfs/sysfs remove
+ * 7: IH remove
+ * 8: feature disable
+ */
+
+#define amdgpu_ras_get_context(adev) ((adev)->psp.ras.ras)
+#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras.ras = (ras_con))
+
+/* check if ras is supported on block, say, sdma, gfx */
+static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
+ unsigned int block)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ return ras && (ras->supported & (1 << block));
+}
+
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ bool is_ce);
+
+/* error handling functions */
+int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+ unsigned long *bps, int pages);
+
+int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
+
+static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
+ bool is_baco)
+{
+ /* remove me when gpu reset works on vega20 A1. */
+#if 0
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
+ schedule_work(&ras->recovery_work);
+#endif
+ return 0;
+}
+
+static inline enum ta_ras_block
+amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
+ switch (block) {
+ case AMDGPU_RAS_BLOCK__UMC:
+ return TA_RAS_BLOCK__UMC;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ return TA_RAS_BLOCK__SDMA;
+ case AMDGPU_RAS_BLOCK__GFX:
+ return TA_RAS_BLOCK__GFX;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ return TA_RAS_BLOCK__MMHUB;
+ case AMDGPU_RAS_BLOCK__ATHUB:
+ return TA_RAS_BLOCK__ATHUB;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ return TA_RAS_BLOCK__PCIE_BIF;
+ case AMDGPU_RAS_BLOCK__HDP:
+ return TA_RAS_BLOCK__HDP;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ return TA_RAS_BLOCK__XGMI_WAFL;
+ case AMDGPU_RAS_BLOCK__DF:
+ return TA_RAS_BLOCK__DF;
+ case AMDGPU_RAS_BLOCK__SMN:
+ return TA_RAS_BLOCK__SMN;
+ case AMDGPU_RAS_BLOCK__SEM:
+ return TA_RAS_BLOCK__SEM;
+ case AMDGPU_RAS_BLOCK__MP0:
+ return TA_RAS_BLOCK__MP0;
+ case AMDGPU_RAS_BLOCK__MP1:
+ return TA_RAS_BLOCK__MP1;
+ case AMDGPU_RAS_BLOCK__FUSE:
+ return TA_RAS_BLOCK__FUSE;
+ default:
+ WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
+ return TA_RAS_BLOCK__UMC;
+ }
+}
+
+static inline enum ta_ras_error_type
+amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
+ switch (error) {
+ case AMDGPU_RAS_ERROR__NONE:
+ return TA_RAS_ERROR__NONE;
+ case AMDGPU_RAS_ERROR__PARITY:
+ return TA_RAS_ERROR__PARITY;
+ case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
+ return TA_RAS_ERROR__SINGLE_CORRECTABLE;
+ case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
+ return TA_RAS_ERROR__MULTI_UNCORRECTABLE;
+ case AMDGPU_RAS_ERROR__POISON:
+ return TA_RAS_ERROR__POISON;
+ default:
+ WARN_ONCE(1, "RAS ERROR: unexpected error type %d\n", error);
+ return TA_RAS_ERROR__NONE;
+ }
+}
+
+/* called in ip_init and ip_fini */
+int amdgpu_ras_init(struct amdgpu_device *adev);
+void amdgpu_ras_post_init(struct amdgpu_device *adev);
+int amdgpu_ras_fini(struct amdgpu_device *adev);
+int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
+
+int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable);
+
+int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable);
+
+int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head);
+
+int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head);
+
+int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+int amdgpu_ras_error_query(struct amdgpu_device *adev,
+ struct ras_query_if *info);
+
+int amdgpu_ras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info);
+
+int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info);
+
+int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info);
+
+int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ struct ras_dispatch_if *info);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 335a0edf114b..8f5026c123ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -248,6 +248,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
sched_hw_submission = max(sched_hw_submission, 256);
+ else if (ring == &adev->sdma.instance[0].page)
+ sched_hw_submission = 256;
if (ring->adev == NULL) {
if (adev->num_rings >= AMDGPU_MAX_RINGS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 0767a93e4d91..639297250c21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -53,26 +53,25 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
enum drm_sched_priority priority)
{
- struct file *filp = fget(fd);
+ struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
- if (!filp)
+ if (!f.file)
return -EINVAL;
- r = amdgpu_file_to_fpriv(filp, &fpriv);
+ r = amdgpu_file_to_fpriv(f.file, &fpriv);
if (r) {
- fput(filp);
+ fdput(f);
return r;
}
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
- fput(filp);
-
+ fdput(f);
return 0;
}
@@ -81,30 +80,30 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
unsigned ctx_id,
enum drm_sched_priority priority)
{
- struct file *filp = fget(fd);
+ struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
int r;
- if (!filp)
+ if (!f.file)
return -EINVAL;
- r = amdgpu_file_to_fpriv(filp, &fpriv);
+ r = amdgpu_file_to_fpriv(f.file, &fpriv);
if (r) {
- fput(filp);
+ fdput(f);
return r;
}
ctx = amdgpu_ctx_get(fpriv, ctx_id);
if (!ctx) {
- fput(filp);
+ fdput(f);
return -EINVAL;
}
amdgpu_ctx_priority_override(ctx, priority);
amdgpu_ctx_put(ctx);
- fput(filp);
+ fdput(f);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 16b1a6ae5ba6..1ba9ba3b54f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -28,9 +28,8 @@
#define AMDGPU_MAX_SDMA_INSTANCES 2
enum amdgpu_sdma_irq {
- AMDGPU_SDMA_IRQ_TRAP0 = 0,
- AMDGPU_SDMA_IRQ_TRAP1,
-
+ AMDGPU_SDMA_IRQ_INSTANCE0 = 0,
+ AMDGPU_SDMA_IRQ_INSTANCE1,
AMDGPU_SDMA_IRQ_LAST
};
@@ -49,9 +48,11 @@ struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
+ struct amdgpu_irq_src ecc_irq;
int num_instances;
uint32_t srbm_soft_reset;
bool has_page_queue;
+ struct ras_common_if *ras_if;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 73e71e61dc99..0c52d1f9fe0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -50,8 +50,6 @@
#include "amdgpu_sdma.h"
#include "bif/bif_4_1_d.h"
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window,
@@ -1424,6 +1422,13 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
struct dma_fence *f;
int i;
+ /* Don't evict VM page tables while they are busy, otherwise we can't
+ * cleanly handle page faults.
+ */
+ if (bo->type == ttm_bo_type_kernel &&
+ !reservation_object_test_signaled_rcu(bo->resv, true))
+ return false;
+
/* If bo is a KFD BO, check if the bo belongs to the current process.
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
@@ -1671,7 +1676,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
adev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1877,14 +1881,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct amdgpu_device *adev;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
- file_priv = filp->private_data;
- adev = file_priv->minor->dev->dev_private;
if (adev == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 462a04e0f5e6..7d484fad3909 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -36,6 +36,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
/* enable virtual display */
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
+ adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
adev->pg_flags = 0;
}
@@ -375,4 +376,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
+static uint32_t parse_clk(char *buf, bool min)
+{
+ char *ptr = buf;
+ uint32_t clk = 0;
+
+ do {
+ ptr = strchr(ptr, ':');
+ if (!ptr)
+ break;
+ ptr+=2;
+ clk = simple_strtoul(ptr, NULL, 10);
+ } while (!min);
+
+ return clk * 100;
+}
+
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
+
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 722deefc0a7e..584947b7ccf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+ int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
+ int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};
/*
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+ /* HW PERF SIM in GIM */
+ AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
};
struct amd_sriov_msg_pf2vf_info_header {
@@ -252,6 +256,8 @@ struct amdgpu_virt {
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
+ /* protect DPM events to GIM */
+ struct mutex dpm_mutex;
};
#define amdgpu_sriov_enabled(adev) \
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
#endif
}
+#define amdgim_is_hwperf(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
@@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 16fcb56c232b..4f10f5aba00b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -34,6 +34,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
+#include "amdgpu_xgmi.h"
/**
* DOC: GPUVM
@@ -66,50 +67,6 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
#undef LAST
/**
- * struct amdgpu_pte_update_params - Local structure
- *
- * Encapsulate some VM table update parameters to reduce
- * the number of function parameters
- *
- */
-struct amdgpu_pte_update_params {
-
- /**
- * @adev: amdgpu device we do this update for
- */
- struct amdgpu_device *adev;
-
- /**
- * @vm: optional amdgpu_vm we do this update for
- */
- struct amdgpu_vm *vm;
-
- /**
- * @src: address where to copy page table entries from
- */
- uint64_t src;
-
- /**
- * @ib: indirect buffer to fill with commands
- */
- struct amdgpu_ib *ib;
-
- /**
- * @func: Function which actually does the update
- */
- void (*func)(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo, uint64_t pe,
- uint64_t addr, unsigned count, uint32_t incr,
- uint64_t flags);
- /**
- * @pages_addr:
- *
- * DMA addresses to use for mapping, used during VM update by CPU
- */
- dma_addr_t *pages_addr;
-};
-
-/**
* struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
*/
struct amdgpu_prt_cb {
@@ -183,6 +140,22 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * The number of entries in the root page directory which needs the ATS setting.
+ */
+static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
+{
+ unsigned shift;
+
+ shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
+ return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
+}
+
+/**
* amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
*
* @adev: amdgpu_device pointer
@@ -333,7 +306,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
return;
vm->bulk_moveable = false;
- if (bo->tbo.type == ttm_bo_type_kernel)
+ if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
amdgpu_vm_bo_relocated(base);
else
amdgpu_vm_bo_idle(base);
@@ -505,61 +478,39 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_pt_first_leaf - get first leaf PD/PT
+ * amdgpu_vm_pt_first_dfs - start a deep first search
*
- * @adev: amdgpu_device pointer
+ * @adev: amdgpu_device structure
* @vm: amdgpu_vm structure
- * @start: start addr of the walk
* @cursor: state to initialize
*
- * Start a walk and go directly to the leaf node.
- */
-static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, uint64_t start,
- struct amdgpu_vm_pt_cursor *cursor)
-{
- amdgpu_vm_pt_start(adev, vm, start, cursor);
- while (amdgpu_vm_pt_descendant(adev, cursor));
-}
-
-/**
- * amdgpu_vm_pt_next_leaf - get next leaf PD/PT
- *
- * @adev: amdgpu_device pointer
- * @cursor: current state
- *
- * Walk the PD/PT tree to the next leaf node.
+ * Starts a deep first traversal of the PD/PT tree.
*/
-static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
+static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *start,
struct amdgpu_vm_pt_cursor *cursor)
{
- amdgpu_vm_pt_next(adev, cursor);
- if (cursor->pfn != ~0ll)
- while (amdgpu_vm_pt_descendant(adev, cursor));
+ if (start)
+ *cursor = *start;
+ else
+ amdgpu_vm_pt_start(adev, vm, 0, cursor);
+ while (amdgpu_vm_pt_descendant(adev, cursor));
}
/**
- * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
- */
-#define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \
- for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \
- (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
-
-/**
- * amdgpu_vm_pt_first_dfs - start a deep first search
+ * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
*
- * @adev: amdgpu_device structure
- * @vm: amdgpu_vm structure
- * @cursor: state to initialize
+ * @start: starting point for the search
+ * @entry: current entry
*
- * Starts a deep first traversal of the PD/PT tree.
+ * Returns:
+ * True when the search should continue, false otherwise.
*/
-static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *cursor)
+static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
+ struct amdgpu_vm_pt *entry)
{
- amdgpu_vm_pt_start(adev, vm, 0, cursor);
- while (amdgpu_vm_pt_descendant(adev, cursor));
+ return entry && (!start || entry != start->entry);
}
/**
@@ -587,11 +538,11 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
/**
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
*/
-#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \
- for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \
+#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
+ for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
(entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
- (entry); (entry) = (cursor).entry, \
- amdgpu_vm_pt_next_dfs((adev), &(cursor)))
+ amdgpu_vm_pt_continue_dfs((start), (entry)); \
+ (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
/**
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
@@ -712,18 +663,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
} else {
- if (vm->use_cpu_for_update)
- r = amdgpu_bo_kmap(bo, NULL);
+ vm->update_funcs->map_table(bo);
+ if (bo->parent)
+ amdgpu_vm_bo_relocated(bo_base);
else
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
- if (r)
- break;
- if (bo->shadow) {
- r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
- if (r)
- break;
- }
- amdgpu_vm_bo_relocated(bo_base);
+ amdgpu_vm_bo_idle(bo_base);
}
}
@@ -751,8 +695,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
- * @level: level this BO is at
- * @pte_support_ats: indicate ATS support from PTE
*
* Root PD needs to be reserved when calling this.
*
@@ -760,99 +702,112 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* 0 on success, errno otherwise.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, struct amdgpu_bo *bo,
- unsigned level, bool pte_support_ats)
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { true, false };
- struct dma_fence *fence = NULL;
+ unsigned level = adev->vm_manager.root_level;
+ struct amdgpu_vm_update_params params;
+ struct amdgpu_bo *ancestor = bo;
unsigned entries, ats_entries;
- struct amdgpu_ring *ring;
- struct amdgpu_job *job;
uint64_t addr;
int r;
+ /* Figure out our place in the hierarchy */
+ if (ancestor->parent) {
+ ++level;
+ while (ancestor->parent->parent) {
+ ++level;
+ ancestor = ancestor->parent;
+ }
+ }
+
entries = amdgpu_bo_size(bo) / 8;
+ if (!vm->pte_support_ats) {
+ ats_entries = 0;
- if (pte_support_ats) {
- if (level == adev->vm_manager.root_level) {
- ats_entries = amdgpu_vm_level_shift(adev, level);
- ats_entries += AMDGPU_GPU_PAGE_SHIFT;
- ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
- ats_entries = min(ats_entries, entries);
- entries -= ats_entries;
+ } else if (!bo->parent) {
+ ats_entries = amdgpu_vm_num_ats_entries(adev);
+ ats_entries = min(ats_entries, entries);
+ entries -= ats_entries;
+
+ } else {
+ struct amdgpu_vm_pt *pt;
+
+ pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
+ ats_entries = amdgpu_vm_num_ats_entries(adev);
+ if ((pt - vm->root.entries) >= ats_entries) {
+ ats_entries = 0;
} else {
ats_entries = entries;
entries = 0;
}
- } else {
- ats_entries = 0;
}
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
-
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
- goto error;
+ return r;
+
+ if (bo->shadow) {
+ r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
+ &ctx);
+ if (r)
+ return r;
+ }
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ r = vm->update_funcs->map_table(bo);
if (r)
return r;
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.vm = vm;
+
+ r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
if (r)
- goto error;
+ return r;
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = 0;
if (ats_entries) {
- uint64_t ats_value;
+ uint64_t value = 0, flags;
+
+ flags = AMDGPU_PTE_DEFAULT_ATC;
+ if (level != AMDGPU_VM_PTB) {
+ /* Handle leaf PDEs as PTEs */
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
+ }
- ats_value = AMDGPU_PTE_DEFAULT_ATC;
- if (level != AMDGPU_VM_PTB)
- ats_value |= AMDGPU_PDE_PTE;
+ r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
+ value, flags);
+ if (r)
+ return r;
- amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
- ats_entries, 0, ats_value);
addr += ats_entries * 8;
}
if (entries) {
- uint64_t value = 0;
-
- /* Workaround for fault priority problem on GMC9 */
- if (level == AMDGPU_VM_PTB && adev->asic_type >= CHIP_VEGA10)
- value = AMDGPU_PTE_EXECUTABLE;
+ uint64_t value = 0, flags = 0;
+
+ if (adev->asic_type >= CHIP_VEGA10) {
+ if (level != AMDGPU_VM_PTB) {
+ /* Handle leaf PDEs as PTEs */
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(adev, level,
+ &value, &flags);
+ } else {
+ /* Workaround for fault priority problem on GMC9 */
+ flags = AMDGPU_PTE_EXECUTABLE;
+ }
+ }
- amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
- entries, 0, value);
+ r = vm->update_funcs->update(&params, bo, addr, 0, entries,
+ value, flags);
+ if (r)
+ return r;
}
- amdgpu_ring_pad_ib(ring, &job->ibs[0]);
-
- WARN_ON(job->ibs[0].length_dw > 64);
- r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
- AMDGPU_FENCE_OWNER_KFD, false);
- if (r)
- goto error_free;
-
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
- &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
-
- if (bo->shadow)
- return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
- level, pte_support_ats);
-
- return 0;
-
-error_free:
- amdgpu_job_free(job);
-
-error:
- return r;
+ return vm->update_funcs->commit(&params, NULL);
}
/**
@@ -883,89 +838,56 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_alloc_pts - Allocate page tables.
+ * amdgpu_vm_alloc_pts - Allocate a specific page table
*
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
- * @saddr: Start address which needs to be allocated
- * @size: Size from start address we need.
+ * @cursor: Which page table to allocate
*
- * Make sure the page directories and page tables are allocated
+ * Make sure a specific page table or directory is allocated.
*
* Returns:
- * 0 on success, errno otherwise.
+ * 1 if page table needed to be allocated, 0 if page table was already
+ * allocated, negative errno if an error occurred.
*/
-int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- uint64_t saddr, uint64_t size)
+static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *cursor)
{
- struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_pt *entry = cursor->entry;
+ struct amdgpu_bo_param bp;
struct amdgpu_bo *pt;
- bool ats = false;
- uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
- return -EINVAL;
-
- eaddr = saddr + size - 1;
-
- if (vm->pte_support_ats)
- ats = saddr < AMDGPU_GMC_HOLE_START;
+ if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
+ unsigned num_entries;
- saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
-
- if (eaddr >= adev->vm_manager.max_pfn) {
- dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
- eaddr, adev->vm_manager.max_pfn);
- return -EINVAL;
+ num_entries = amdgpu_vm_num_entries(adev, cursor->level);
+ entry->entries = kvmalloc_array(num_entries,
+ sizeof(*entry->entries),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!entry->entries)
+ return -ENOMEM;
}
- for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) {
- struct amdgpu_vm_pt *entry = cursor.entry;
- struct amdgpu_bo_param bp;
-
- if (cursor.level < AMDGPU_VM_PTB) {
- unsigned num_entries;
-
- num_entries = amdgpu_vm_num_entries(adev, cursor.level);
- entry->entries = kvmalloc_array(num_entries,
- sizeof(*entry->entries),
- GFP_KERNEL |
- __GFP_ZERO);
- if (!entry->entries)
- return -ENOMEM;
- }
-
-
- if (entry->base.bo)
- continue;
-
- amdgpu_vm_bo_param(adev, vm, cursor.level, &bp);
-
- r = amdgpu_bo_create(adev, &bp, &pt);
- if (r)
- return r;
+ if (entry->base.bo)
+ return 0;
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_kmap(pt, NULL);
- if (r)
- goto error_free_pt;
- }
+ amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
- /* Keep a reference to the root directory to avoid
- * freeing them up in the wrong order.
- */
- pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
+ r = amdgpu_bo_create(adev, &bp, &pt);
+ if (r)
+ return r;
- amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+ /* Keep a reference to the root directory to avoid
+ * freeing them up in the wrong order.
+ */
+ pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
+ amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
- if (r)
- goto error_free_pt;
- }
+ r = amdgpu_vm_clear_bo(adev, vm, pt);
+ if (r)
+ goto error_free_pt;
return 0;
@@ -976,31 +898,45 @@ error_free_pt:
}
/**
+ * amdgpu_vm_free_table - fre one PD/PT
+ *
+ * @entry: PDE to free
+ */
+static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
+{
+ if (entry->base.bo) {
+ entry->base.bo->vm_bo = NULL;
+ list_del(&entry->base.vm_status);
+ amdgpu_bo_unref(&entry->base.bo->shadow);
+ amdgpu_bo_unref(&entry->base.bo);
+ }
+ kvfree(entry->entries);
+ entry->entries = NULL;
+}
+
+/**
* amdgpu_vm_free_pts - free PD/PT levels
*
* @adev: amdgpu device structure
* @vm: amdgpu vm structure
+ * @start: optional cursor where to start freeing PDs/PTs
*
* Free the page directory or page table level and all sub levels.
*/
static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *start)
{
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_pt *entry;
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
+ vm->bulk_moveable = false;
- if (entry->base.bo) {
- entry->base.bo->vm_bo = NULL;
- list_del(&entry->base.vm_status);
- amdgpu_bo_unref(&entry->base.bo->shadow);
- amdgpu_bo_unref(&entry->base.bo);
- }
- kvfree(entry->entries);
- }
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ amdgpu_vm_free_table(entry);
- BUG_ON(vm->root.base.bo);
+ if (start)
+ amdgpu_vm_free_table(start->entry);
}
/**
@@ -1212,66 +1148,6 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_do_set_ptes - helper to call the right asic function
- *
- * @params: see amdgpu_pte_update_params definition
- * @bo: PD/PT to update
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Traces the parameters and calls the right asic functions
- * to setup the page table using the DMA.
- */
-static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
-
- if (count < 3) {
- amdgpu_vm_write_pte(params->adev, params->ib, pe,
- addr | flags, count, incr);
-
- } else {
- amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
- count, incr, flags);
- }
-}
-
-/**
- * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
- *
- * @params: see amdgpu_pte_update_params definition
- * @bo: PD/PT to update
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Traces the parameters and calls the DMA function to copy the PTEs.
- */
-static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- uint64_t src = (params->src + (addr >> 12) * 8);
-
- pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_copy_ptes(pe, src, count);
-
- amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
-}
-
-/**
* amdgpu_vm_map_gart - Resolve gart mapping of addr
*
* @pages_addr: optional DMA address to use for lookup
@@ -1283,7 +1159,7 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
* Returns:
* The pointer for the page table entry.
*/
-static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
uint64_t result;
@@ -1298,88 +1174,31 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-/**
- * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
- *
- * @params: see amdgpu_pte_update_params definition
- * @bo: PD/PT to update
- * @pe: kmap addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Write count number of PT/PD entries directly.
- */
-static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- unsigned int i;
- uint64_t value;
-
- pe += (unsigned long)amdgpu_bo_kptr(bo);
-
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
-
- for (i = 0; i < count; i++) {
- value = params->pages_addr ?
- amdgpu_vm_map_gart(params->pages_addr, addr) :
- addr;
- amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
- i, value, flags);
- addr += incr;
- }
-}
-
-/**
- * amdgpu_vm_update_func - helper to call update function
- *
- * Calls the update function for both the given BO as well as its shadow.
- */
-static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- if (bo->shadow)
- params->func(params, bo->shadow, pe, addr, count, incr, flags);
- params->func(params, bo, pe, addr, count, incr, flags);
-}
-
/*
* amdgpu_vm_update_pde - update a single level in the hierarchy
*
* @param: parameters for the update
* @vm: requested vm
- * @parent: parent directory
* @entry: entry to update
*
* Makes sure the requested entry in parent is up to date.
*/
-static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- struct amdgpu_vm_pt *entry)
+static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *entry)
{
+ struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
struct amdgpu_bo *bo = parent->base.bo, *pbo;
uint64_t pde, pt, flags;
unsigned level;
- /* Don't update huge pages here */
- if (entry->huge)
- return;
-
for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent;
level += params->adev->vm_manager.root_level;
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
pde = (entry - parent->entries) * 8;
- amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
+ return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
}
/*
@@ -1396,7 +1215,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_pt *entry;
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
if (entry->base.bo && !entry->base.moved)
amdgpu_vm_bo_relocated(&entry->base);
}
@@ -1415,89 +1234,39 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct amdgpu_pte_update_params params;
- struct amdgpu_job *job;
- unsigned ndw = 0;
- int r = 0;
+ struct amdgpu_vm_update_params params;
+ int r;
if (list_empty(&vm->relocated))
return 0;
-restart:
memset(&params, 0, sizeof(params));
params.adev = adev;
+ params.vm = vm;
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_sync_wait(vm->root.base.bo,
- AMDGPU_FENCE_OWNER_VM, true);
- if (unlikely(r))
- return r;
-
- params.func = amdgpu_vm_cpu_set_ptes;
- } else {
- ndw = 512 * 8;
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
- if (r)
- return r;
-
- params.ib = &job->ibs[0];
- params.func = amdgpu_vm_do_set_ptes;
- }
+ r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
+ if (r)
+ return r;
while (!list_empty(&vm->relocated)) {
- struct amdgpu_vm_pt *pt, *entry;
+ struct amdgpu_vm_pt *entry;
entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
base.vm_status);
amdgpu_vm_bo_idle(&entry->base);
- pt = amdgpu_vm_pt_parent(entry);
- if (!pt)
- continue;
-
- amdgpu_vm_update_pde(&params, vm, pt, entry);
-
- if (!vm->use_cpu_for_update &&
- (ndw - params.ib->length_dw) < 32)
- break;
- }
-
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_asic_flush_hdp(adev, NULL);
- } else if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- struct amdgpu_bo *root = vm->root.base.bo;
- struct amdgpu_ring *ring;
- struct dma_fence *fence;
-
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
- sched);
-
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
- &fence);
+ r = amdgpu_vm_update_pde(&params, vm, entry);
if (r)
goto error;
-
- amdgpu_bo_fence(root, fence, true);
- dma_fence_put(vm->last_update);
- vm->last_update = fence;
}
- if (!list_empty(&vm->relocated))
- goto restart;
-
+ r = vm->update_funcs->commit(&params, &vm->last_update);
+ if (r)
+ goto error;
return 0;
error:
amdgpu_vm_invalidate_pds(adev, vm);
- amdgpu_job_free(job);
return r;
}
@@ -1506,7 +1275,7 @@ error:
*
* Make sure to set the right flags for the PTEs at the desired level.
*/
-static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
+static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
struct amdgpu_bo *bo, unsigned level,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
@@ -1525,13 +1294,14 @@ static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
flags |= AMDGPU_PTE_EXECUTABLE;
}
- amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags);
+ params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
+ flags);
}
/**
* amdgpu_vm_fragment - get fragment for PTEs
*
- * @params: see amdgpu_pte_update_params definition
+ * @params: see amdgpu_vm_update_params definition
* @start: first PTE to handle
* @end: last PTE to handle
* @flags: hw mapping flags
@@ -1540,7 +1310,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
*
* Returns the first possible fragment for the start and end address.
*/
-static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
+static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end, uint64_t flags,
unsigned int *frag, uint64_t *frag_end)
{
@@ -1573,7 +1343,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
max_frag = 31;
/* system pages are non continuously */
- if (params->src) {
+ if (params->pages_addr) {
*frag = 0;
*frag_end = end;
return;
@@ -1592,7 +1362,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
- * @params: see amdgpu_pte_update_params definition
+ * @params: see amdgpu_vm_update_params definition
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to, the next dst inside the function
@@ -1603,7 +1373,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
* Returns:
* 0 for success, -EINVAL for failure.
*/
-static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
@@ -1611,6 +1381,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_vm_pt_cursor cursor;
uint64_t frag_start = start, frag_end;
unsigned int frag;
+ int r;
/* figure out the initial fragment */
amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
@@ -1618,12 +1389,15 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the PTs */
amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
while (cursor.pfn < end) {
- struct amdgpu_bo *pt = cursor.entry->base.bo;
unsigned shift, parent_shift, mask;
uint64_t incr, entry_end, pe_start;
+ struct amdgpu_bo *pt;
- if (!pt)
- return -ENOENT;
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
+ if (r)
+ return r;
+
+ pt = cursor.entry->base.bo;
/* The root level can't be a huge page */
if (cursor.level == adev->vm_manager.root_level) {
@@ -1632,16 +1406,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
continue;
}
- /* If it isn't already handled it can't be a huge page */
- if (cursor.entry->huge) {
- /* Add the entry to the relocated list to update it. */
- cursor.entry->huge = false;
- amdgpu_vm_bo_relocated(&cursor.entry->base);
- }
-
shift = amdgpu_vm_level_shift(adev, cursor.level);
parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
- if (adev->asic_type < CHIP_VEGA10) {
+ if (adev->asic_type < CHIP_VEGA10 &&
+ (flags & AMDGPU_PTE_VALID)) {
/* No huge page support before GMC v9 */
if (cursor.level != AMDGPU_VM_PTB) {
if (!amdgpu_vm_pt_descendant(adev, &cursor))
@@ -1697,9 +1465,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
} while (frag_start < entry_end);
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
- /* Mark all child entries as huge */
+ /* Free all child entries */
while (cursor.pfn < frag_start) {
- cursor.entry->huge = true;
+ amdgpu_vm_free_pts(adev, params->vm, &cursor);
amdgpu_vm_pt_next(adev, &cursor);
}
@@ -1738,137 +1506,28 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
uint64_t flags, uint64_t addr,
struct dma_fence **fence)
{
- struct amdgpu_ring *ring;
+ struct amdgpu_vm_update_params params;
void *owner = AMDGPU_FENCE_OWNER_VM;
- unsigned nptes, ncmds, ndw;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- struct dma_fence *f = NULL;
int r;
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
owner = AMDGPU_FENCE_OWNER_KFD;
- if (vm->use_cpu_for_update) {
- /* params.src is used as flag to indicate system Memory */
- if (pages_addr)
- params.src = ~0;
-
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true);
- if (unlikely(r))
- return r;
-
- /* Wait for any BO move to be completed */
- if (exclusive) {
- r = dma_fence_wait(exclusive, true);
- if (unlikely(r))
- return r;
- }
-
- params.func = amdgpu_vm_cpu_set_ptes;
- params.pages_addr = pages_addr;
- return amdgpu_vm_update_ptes(&params, start, last + 1,
- addr, flags);
- }
-
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
-
- nptes = last - start + 1;
-
- /*
- * reserve space for two commands every (1 << BLOCK_SIZE)
- * entries or 2k dwords (whatever is smaller)
- */
- ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
-
- /* The second command is for the shadow pagetables. */
- if (vm->root.base.bo->shadow)
- ncmds *= 2;
-
- /* padding, etc. */
- ndw = 64;
-
- if (pages_addr) {
- /* copy commands needed */
- ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
-
- /* and also PTEs */
- ndw += nptes * 2;
-
- params.func = amdgpu_vm_do_copy_ptes;
-
- } else {
- /* set page commands needed */
- ndw += ncmds * 10;
-
- /* extra commands for begin/end fragments */
- ncmds = 2 * adev->vm_manager.fragment_size;
- if (vm->root.base.bo->shadow)
- ncmds *= 2;
-
- ndw += 10 * ncmds;
-
- params.func = amdgpu_vm_do_set_ptes;
- }
-
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+ r = vm->update_funcs->prepare(&params, owner, exclusive);
if (r)
return r;
- params.ib = &job->ibs[0];
-
- if (pages_addr) {
- uint64_t *pte;
- unsigned i;
-
- /* Put the PTEs at the end of the IB. */
- i = ndw - nptes * 2;
- pte= (uint64_t *)&(job->ibs->ptr[i]);
- params.src = job->ibs->gpu_addr + i * 4;
-
- for (i = 0; i < nptes; ++i) {
- pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
- AMDGPU_GPU_PAGE_SIZE);
- pte[i] |= flags;
- }
- addr = 0;
- }
-
- r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
- if (r)
- goto error_free;
-
- r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
- owner, false);
- if (r)
- goto error_free;
-
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
- goto error_free;
-
- amdgpu_ring_pad_ib(ring, params.ib);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(vm->root.base.bo, f, true);
- dma_fence_put(*fence);
- *fence = f;
- return 0;
+ return r;
-error_free:
- amdgpu_job_free(job);
- return r;
+ return vm->update_funcs->commit(&params, fence);
}
/**
@@ -1880,6 +1539,7 @@ error_free:
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
* @flags: HW flags for the mapping
+ * @bo_adev: amdgpu_device pointer that bo actually been allocated
* @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
@@ -1895,6 +1555,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t flags,
+ struct amdgpu_device *bo_adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
@@ -1949,7 +1610,6 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (pages_addr) {
uint64_t count;
- max_entries = min(max_entries, 16ull * 1024ull);
for (count = 1;
count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
++count) {
@@ -1969,7 +1629,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
} else if (flags & AMDGPU_PTE_VALID) {
- addr += adev->vm_manager.vram_base_offset;
+ addr += bo_adev->vm_manager.vram_base_offset;
addr += pfn << PAGE_SHIFT;
}
@@ -2016,6 +1676,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct drm_mm_node *nodes;
struct dma_fence *exclusive, **last_update;
uint64_t flags;
+ struct amdgpu_device *bo_adev = adev;
int r;
if (clear || !bo) {
@@ -2034,10 +1695,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
exclusive = reservation_object_get_excl(bo->tbo.resv);
}
- if (bo)
+ if (bo) {
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
- else
+ bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ } else {
flags = 0x0;
+ }
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
last_update = &vm->last_update;
@@ -2054,7 +1717,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
- mapping, flags, nodes,
+ mapping, flags, bo_adev, nodes,
last_update);
if (r)
return r;
@@ -2374,6 +2037,16 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
+ if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
+ (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
+ bo_va->is_xgmi = true;
+ mutex_lock(&adev->vm_manager.lock_pstate);
+ /* Power up XGMI if it can be potentially used */
+ if (++adev->vm_manager.xgmi_map_counter == 1)
+ amdgpu_xgmi_set_pstate(adev, 1);
+ mutex_unlock(&adev->vm_manager.lock_pstate);
+ }
+
return bo_va;
}
@@ -2792,6 +2465,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
}
dma_fence_put(bo_va->last_pt_update);
+
+ if (bo && bo_va->is_xgmi) {
+ mutex_lock(&adev->vm_manager.lock_pstate);
+ if (--adev->vm_manager.xgmi_map_counter == 0)
+ amdgpu_xgmi_set_pstate(adev, 0);
+ mutex_unlock(&adev->vm_manager.lock_pstate);
+ }
+
kfree(bo_va);
}
@@ -2949,20 +2630,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
adev->vm_manager.fragment_size);
}
-static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
+/**
+ * amdgpu_vm_wait_idle - wait for the VM to become idle
+ *
+ * @vm: VM object to wait for
+ * @timeout: timeout to wait for VM to become idle
+ */
+long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- struct amdgpu_retryfault_hashtable *fault_hash;
-
- fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
- if (!fault_hash)
- return fault_hash;
-
- INIT_CHASH_TABLE(fault_hash->hash,
- AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
- spin_lock_init(&fault_hash->lock);
- fault_hash->count = 0;
-
- return fault_hash;
+ return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.resv,
+ true, true, timeout);
}
/**
@@ -3018,6 +2695,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
+
+ if (vm->use_cpu_for_update)
+ vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ else
+ vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
@@ -3037,9 +2719,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
- r = amdgpu_vm_clear_bo(adev, vm, root,
- adev->vm_manager.root_level,
- vm->pte_support_ats);
+ r = amdgpu_vm_clear_bo(adev, vm, root);
if (r)
goto error_unreserve;
@@ -3058,12 +2738,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->pasid = pasid;
}
- vm->fault_hash = init_fault_hash();
- if (!vm->fault_hash) {
- r = -ENOMEM;
- goto error_free_root;
- }
-
INIT_KFIFO(vm->faults);
return 0;
@@ -3083,6 +2757,37 @@ error_free_sched_entity:
}
/**
+ * amdgpu_vm_check_clean_reserved - check if a VM is clean
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: the VM to check
+ *
+ * check all entries of the root PD, if any subsequent PDs are allocated,
+ * it means there are page table creating and filling, and is no a clean
+ * VM
+ *
+ * Returns:
+ * 0 if this VM is clean
+ */
+static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ enum amdgpu_vm_level root = adev->vm_manager.root_level;
+ unsigned int entries = amdgpu_vm_num_entries(adev, root);
+ unsigned int i = 0;
+
+ if (!(vm->root.entries))
+ return 0;
+
+ for (i = 0; i < entries; i++) {
+ if (vm->root.entries[i].base.bo)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
*
* @adev: amdgpu_device pointer
@@ -3112,10 +2817,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
return r;
/* Sanity checks */
- if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
- r = -EINVAL;
+ r = amdgpu_vm_check_clean_reserved(adev, vm);
+ if (r)
goto unreserve_bo;
- }
if (pasid) {
unsigned long flags;
@@ -3134,9 +2838,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
* changing any other state, in case it fails.
*/
if (pte_support_ats != vm->pte_support_ats) {
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
- adev->vm_manager.root_level,
- pte_support_ats);
+ vm->pte_support_ats = pte_support_ats;
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
if (r)
goto free_idr;
}
@@ -3144,7 +2847,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- vm->pte_support_ats = pte_support_ats;
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
@@ -3219,15 +2921,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
- u64 fault;
int i, r;
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
- /* Clear pending page faults from IH when the VM is destroyed */
- while (kfifo_get(&vm->faults, &fault))
- amdgpu_vm_clear_fault(vm->fault_hash, fault);
-
if (vm->pasid) {
unsigned long flags;
@@ -3236,9 +2933,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- kfree(vm->fault_hash);
- vm->fault_hash = NULL;
-
drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -3267,10 +2961,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
- amdgpu_vm_free_pts(adev, vm);
+ amdgpu_vm_free_pts(adev, vm, NULL);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);
+ WARN_ON(vm->root.base.bo);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vmid_free_reserved(adev, vm, i);
@@ -3315,6 +3010,9 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
idr_init(&adev->vm_manager.pasid_idr);
spin_lock_init(&adev->vm_manager.pasid_lock);
+
+ adev->vm_manager.xgmi_map_counter = 0;
+ mutex_init(&adev->vm_manager.lock_pstate);
}
/**
@@ -3405,78 +3103,3 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
}
}
}
-
-/**
- * amdgpu_vm_add_fault - Add a page fault record to fault hash table
- *
- * @fault_hash: fault hash table
- * @key: 64-bit encoding of PASID and address
- *
- * This should be called when a retry page fault interrupt is
- * received. If this is a new page fault, it will be added to a hash
- * table. The return value indicates whether this is a new fault, or
- * a fault that was already known and is already being handled.
- *
- * If there are too many pending page faults, this will fail. Retry
- * interrupts should be ignored in this case until there is enough
- * free space.
- *
- * Returns 0 if the fault was added, 1 if the fault was already known,
- * -ENOSPC if there are too many pending faults.
- */
-int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
-{
- unsigned long flags;
- int r = -ENOSPC;
-
- if (WARN_ON_ONCE(!fault_hash))
- /* Should be allocated in amdgpu_vm_init
- */
- return r;
-
- spin_lock_irqsave(&fault_hash->lock, flags);
-
- /* Only let the hash table fill up to 50% for best performance */
- if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
- goto unlock_out;
-
- r = chash_table_copy_in(&fault_hash->hash, key, NULL);
- if (!r)
- fault_hash->count++;
-
- /* chash_table_copy_in should never fail unless we're losing count */
- WARN_ON_ONCE(r < 0);
-
-unlock_out:
- spin_unlock_irqrestore(&fault_hash->lock, flags);
- return r;
-}
-
-/**
- * amdgpu_vm_clear_fault - Remove a page fault record
- *
- * @fault_hash: fault hash table
- * @key: 64-bit encoding of PASID and address
- *
- * This should be called when a page fault has been handled. Any
- * future interrupt with this key will be processed as a new
- * page fault.
- */
-void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
-{
- unsigned long flags;
- int r;
-
- if (!fault_hash)
- return;
-
- spin_lock_irqsave(&fault_hash->lock, flags);
-
- r = chash_table_remove(&fault_hash->hash, key, NULL);
- if (!WARN_ON_ONCE(r < 0)) {
- fault_hash->count--;
- WARN_ON_ONCE(fault_hash->count < 0);
- }
-
- spin_unlock_irqrestore(&fault_hash->lock, flags);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 81ff8177f092..91baf95212a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -30,7 +30,6 @@
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <linux/chash.h>
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
@@ -140,7 +139,6 @@ struct amdgpu_vm_bo_base {
struct amdgpu_vm_pt {
struct amdgpu_vm_bo_base base;
- bool huge;
/* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries;
@@ -167,11 +165,6 @@ struct amdgpu_vm_pte_funcs {
uint32_t incr, uint64_t flags);
};
-#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
-#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
-#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
-
-
struct amdgpu_task_info {
char process_name[TASK_COMM_LEN];
char task_name[TASK_COMM_LEN];
@@ -179,11 +172,52 @@ struct amdgpu_task_info {
pid_t tgid;
};
-#define AMDGPU_PAGEFAULT_HASH_BITS 8
-struct amdgpu_retryfault_hashtable {
- DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
- spinlock_t lock;
- int count;
+/**
+ * struct amdgpu_vm_update_params
+ *
+ * Encapsulate some VM table update parameters to reduce
+ * the number of function parameters
+ *
+ */
+struct amdgpu_vm_update_params {
+
+ /**
+ * @adev: amdgpu device we do this update for
+ */
+ struct amdgpu_device *adev;
+
+ /**
+ * @vm: optional amdgpu_vm we do this update for
+ */
+ struct amdgpu_vm *vm;
+
+ /**
+ * @pages_addr:
+ *
+ * DMA addresses to use for mapping
+ */
+ dma_addr_t *pages_addr;
+
+ /**
+ * @job: job to used for hw submission
+ */
+ struct amdgpu_job *job;
+
+ /**
+ * @num_dw_left: number of dw left for the IB
+ */
+ unsigned int num_dw_left;
+};
+
+struct amdgpu_vm_update_funcs {
+ int (*map_table)(struct amdgpu_bo *bo);
+ int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
+ struct dma_fence *exclusive);
+ int (*update)(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr, uint64_t flags);
+ int (*commit)(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence);
};
struct amdgpu_vm {
@@ -221,7 +255,10 @@ struct amdgpu_vm {
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
- bool use_cpu_for_update;
+ bool use_cpu_for_update;
+
+ /* Functions to use for VM table updates */
+ const struct amdgpu_vm_update_funcs *update_funcs;
/* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats;
@@ -245,7 +282,6 @@ struct amdgpu_vm {
struct ttm_lru_bulk_move lru_bulk_move;
/* mark whether can do the bulk move */
bool bulk_moveable;
- struct amdgpu_retryfault_hashtable *fault_hash;
};
struct amdgpu_vm_manager {
@@ -267,6 +303,7 @@ struct amdgpu_vm_manager {
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rqs;
+ struct amdgpu_ring *page_fault;
/* partial resident texture handling */
spinlock_t prt_lock;
@@ -283,14 +320,23 @@ struct amdgpu_vm_manager {
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
+
+ /* counter of mapped memory through xgmi */
+ uint32_t xgmi_map_counter;
+ struct mutex lock_pstate;
};
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
+
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+
+long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
@@ -303,9 +349,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
-int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- uint64_t saddr, uint64_t size);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
@@ -319,6 +362,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@@ -358,11 +402,6 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
-
-int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
-
-void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
-
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
new file mode 100644
index 000000000000..5222d165abfc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+/**
+ * amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped
+ *
+ * @table: newly allocated or validated PD/PT
+ */
+static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
+{
+ return amdgpu_bo_kmap(table, NULL);
+}
+
+/**
+ * amdgpu_vm_cpu_prepare - prepare page table update with the CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @owner: owner we need to sync to
+ * @exclusive: exclusive move fence we need to sync to
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
+ struct dma_fence *exclusive)
+{
+ int r;
+
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
+ * as the root PD BO
+ */
+ r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
+ if (unlikely(r))
+ return r;
+
+ /* Wait for any BO move to be completed */
+ if (exclusive) {
+ r = dma_fence_wait(exclusive, true);
+ if (unlikely(r))
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_cpu_update - helper to update page tables via CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: kmap addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Write count number of PT/PD entries directly.
+ */
+static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ unsigned int i;
+ uint64_t value;
+
+ pe += (unsigned long)amdgpu_bo_kptr(bo);
+
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
+ for (i = 0; i < count; i++) {
+ value = p->pages_addr ?
+ amdgpu_vm_map_gart(p->pages_addr, addr) :
+ addr;
+ amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
+ i, value, flags);
+ addr += incr;
+ }
+ return 0;
+}
+
+/**
+ * amdgpu_vm_cpu_commit - commit page table update to the HW
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: unused
+ *
+ * Make sure that the hardware sees the page table updates.
+ */
+static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ /* Flush HDP */
+ mb();
+ amdgpu_asic_flush_hdp(p->adev, NULL);
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
+ .map_table = amdgpu_vm_cpu_map_table,
+ .prepare = amdgpu_vm_cpu_prepare,
+ .update = amdgpu_vm_cpu_update,
+ .commit = amdgpu_vm_cpu_commit
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
new file mode 100644
index 000000000000..ddd181f5ed37
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
+#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
+
+/**
+ * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
+ *
+ * @table: newly allocated or validated PD/PT
+ */
+static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
+{
+ int r;
+
+ r = amdgpu_ttm_alloc_gart(&table->tbo);
+ if (r)
+ return r;
+
+ if (table->shadow)
+ r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
+
+ return r;
+}
+
+/**
+ * amdgpu_vm_sdma_prepare - prepare SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @owner: owner we need to sync to
+ * @exclusive: exclusive move fence we need to sync to
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
+ void *owner, struct dma_fence *exclusive)
+{
+ struct amdgpu_bo *root = p->vm->root.base.bo;
+ unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
+ int r;
+
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
+ owner, false);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ return 0;
+}
+
+/**
+ * amdgpu_vm_sdma_commit - commit SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: resulting fence
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ struct amdgpu_bo *root = p->vm->root.base.bo;
+ struct amdgpu_ib *ib = p->job->ibs;
+ struct amdgpu_ring *ring;
+ struct dma_fence *f;
+ int r;
+
+ ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+
+ WARN_ON(ib->length_dw == 0);
+ amdgpu_ring_pad_ib(ring, ib);
+ WARN_ON(ib->length_dw > p->num_dw_left);
+ r = amdgpu_job_submit(p->job, &p->vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &f);
+ if (r)
+ goto error;
+
+ amdgpu_bo_fence(root, f, true);
+ if (fence)
+ swap(*fence, f);
+ dma_fence_put(f);
+ return 0;
+
+error:
+ amdgpu_job_free(p->job);
+ return r;
+}
+
+
+/**
+ * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @count: number of page entries to copy
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ unsigned count)
+{
+ struct amdgpu_ib *ib = p->job->ibs;
+ uint64_t src = ib->gpu_addr;
+
+ src += p->num_dw_left * 4;
+
+ pe += amdgpu_bo_gpu_offset(bo);
+ trace_amdgpu_vm_copy_ptes(pe, src, count);
+
+ amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
+}
+
+/**
+ * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags)
+{
+ struct amdgpu_ib *ib = p->job->ibs;
+
+ pe += amdgpu_bo_gpu_offset(bo);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ if (count < 3) {
+ amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
+ count, incr);
+ } else {
+ amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
+ * amdgpu_vm_sdma_update - execute VM update
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Reserve space in the IB, setup mapping buffer on demand and write commands to
+ * the IB.
+ */
+static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ unsigned int i, ndw, nptes;
+ uint64_t *pte;
+ int r;
+
+ do {
+ ndw = p->num_dw_left;
+ ndw -= p->job->ibs->length_dw;
+
+ if (ndw < 32) {
+ r = amdgpu_vm_sdma_commit(p, NULL);
+ if (r)
+ return r;
+
+ /* estimate how many dw we need */
+ ndw = 32;
+ if (p->pages_addr)
+ ndw += count * 2;
+ ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
+ ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
+
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ }
+
+ if (!p->pages_addr) {
+ /* set page commands needed */
+ if (bo->shadow)
+ amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
+ count, incr, flags);
+ amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
+ incr, flags);
+ return 0;
+ }
+
+ /* copy commands needed */
+ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
+ (bo->shadow ? 2 : 1);
+
+ /* for padding */
+ ndw -= 7;
+
+ nptes = min(count, ndw / 2);
+
+ /* Put the PTEs at the end of the IB. */
+ p->num_dw_left -= nptes * 2;
+ pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
+ for (i = 0; i < nptes; ++i, addr += incr) {
+ pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
+ pte[i] |= flags;
+ }
+
+ if (bo->shadow)
+ amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
+ amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
+
+ pe += nptes * 8;
+ count -= nptes;
+ } while (count);
+
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
+ .map_table = amdgpu_vm_sdma_map_table,
+ .prepare = amdgpu_vm_sdma_prepare,
+ .update = amdgpu_vm_sdma_update,
+ .commit = amdgpu_vm_sdma_commit
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 3f9d5d00c9b3..ec9ea3fdbb4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -33,6 +33,85 @@ struct amdgpu_vram_mgr {
};
/**
+ * DOC: mem_info_vram_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
+ * available on the device
+ * The file mem_info_vram_total is used for this and returns the total
+ * amount of VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
+}
+
+/**
+ * DOC: mem_info_vis_vram_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total
+ * visible VRAM available on the device
+ * The file mem_info_vis_vram_total is used for this and returns the total
+ * amount of visible VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
+}
+
+/**
+ * DOC: mem_info_vram_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
+ * available on the device
+ * The file mem_info_vram_used is used for this and returns the total
+ * amount of currently used VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+}
+
+/**
+ * DOC: mem_info_vis_vram_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total of
+ * used visible VRAM
+ * The file mem_info_vis_vram_used is used for this and returns the total
+ * amount of currently used visible VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+}
+
+static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
+ amdgpu_mem_info_vram_total_show, NULL);
+static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
+ amdgpu_mem_info_vis_vram_total_show,NULL);
+static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
+ amdgpu_mem_info_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
+ amdgpu_mem_info_vis_vram_used_show, NULL);
+
+/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
* @man: TTM memory type manager
@@ -43,7 +122,9 @@ struct amdgpu_vram_mgr {
static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr;
+ int ret;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -52,6 +133,29 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
drm_mm_init(&mgr->mm, 0, p_size);
spin_lock_init(&mgr->lock);
man->priv = mgr;
+
+ /* Add the two VRAM-related sysfs files */
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_total\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_used\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
+ return ret;
+ }
+
return 0;
}
@@ -65,6 +169,7 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
*/
static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
@@ -72,6 +177,10 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 407dd16cc35c..a48c84c51775 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_smu.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -34,12 +35,132 @@ static DEFINE_MUTEX(xgmi_mutex);
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
-
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
return &hive->device_list;
}
+static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_hive_info *hive =
+ container_of(attr, struct amdgpu_hive_info, dev_attr);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
+}
+
+static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ int ret = 0;
+
+ if (WARN_ON(hive->kobj))
+ return -EINVAL;
+
+ hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
+ if (!hive->kobj) {
+ dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
+ return -EINVAL;
+ }
+
+ hive->dev_attr = (struct device_attribute) {
+ .attr = {
+ .name = "xgmi_hive_id",
+ .mode = S_IRUGO,
+
+ },
+ .show = amdgpu_xgmi_show_hive_id,
+ };
+
+ ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
+ kobject_del(hive->kobj);
+ kobject_put(hive->kobj);
+ hive->kobj = NULL;
+ }
+
+ return ret;
+}
+
+static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
+ kobject_del(hive->kobj);
+ kobject_put(hive->kobj);
+ hive->kobj = NULL;
+}
+
+static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
+
+}
+
+
+static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
+
+
+static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ int ret = 0;
+ char node[10] = { 0 };
+
+ /* Create xgmi device id file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
+ return ret;
+ }
+
+ /* Create sysfs link to hive info folder on the first device */
+ if (adev != hive->adev) {
+ ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
+ "xgmi_hive_info");
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create link to hive info");
+ goto remove_file;
+ }
+ }
+
+ sprintf(node, "node%d", hive->number_devices);
+ /* Create sysfs link form the hive folder to yourself */
+ ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create link from hive info");
+ goto remove_link;
+ }
+
+ goto success;
+
+
+remove_link:
+ sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
+
+remove_file:
+ device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+
+success:
+ return ret;
+}
+
+static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+ sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
+ sysfs_remove_link(hive->kobj, adev->ddev->unique);
+}
+
+
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
{
int i;
@@ -66,18 +187,50 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
/* initialize new hive if not exist */
tmp = &xgmi_hives[hive_count++];
+
+ if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
+ mutex_unlock(&xgmi_mutex);
+ return NULL;
+ }
+
+ tmp->adev = adev;
tmp->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&tmp->device_list);
mutex_init(&tmp->hive_lock);
mutex_init(&tmp->reset_lock);
+
if (lock)
mutex_lock(&tmp->hive_lock);
-
+ tmp->pstate = -1;
mutex_unlock(&xgmi_mutex);
return tmp;
}
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
+{
+ int ret = 0;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+
+ if (!hive)
+ return 0;
+
+ if (hive->pstate == pstate)
+ return 0;
+
+ dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
+
+ if (is_support_sw_smu(adev))
+ ret = smu_set_xgmi_pstate(&adev->smu, pstate);
+ if (ret)
+ dev_err(adev->dev,
+ "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+
+ return ret;
+}
+
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
int ret = -EINVAL;
@@ -156,8 +309,17 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
break;
}
- dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
- adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
+ if (!ret)
+ ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
+
+ if (!ret)
+ dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
+ adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
+ else
+ dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
+ adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
+ ret);
+
mutex_unlock(&hive->hive_lock);
exit:
@@ -176,9 +338,11 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return;
if (!(hive->number_devices--)) {
+ amdgpu_xgmi_sysfs_destroy(adev, hive);
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock);
} else {
+ amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 14bc60664159..3e9c91e9a4bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -29,13 +29,25 @@ struct amdgpu_hive_info {
struct list_head device_list;
struct psp_xgmi_topology_info topology_info;
int number_devices;
- struct mutex hive_lock,
- reset_lock;
+ struct mutex hive_lock, reset_lock;
+ struct kobject *kobj;
+ struct device_attribute dev_attr;
+ struct amdgpu_device *adev;
+ int pstate; /*0 -- low , 1 -- high , -1 unknown*/
};
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
+
+static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev)
+{
+ return (adev != bo_adev &&
+ adev->gmc.xgmi.hive_id &&
+ adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 189599b694e8..d42808b05971 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -977,8 +977,8 @@ static int cik_sdma_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1114,7 +1114,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1130,7 +1130,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 305276c7e4bf..c0cb244f58cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -782,6 +782,25 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16);
+ tilemode[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size);
+ tilemode[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size);
tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b8e50a34bdb3..02955e6e9dd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3236,6 +3236,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
dev_warn(adev->dev,
"Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
adev->asic_type);
+ /* fall through */
case CHIP_CARRIZO:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d0309e8c9d12..ba67d1023264 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -40,6 +40,8 @@
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
+#include "amdgpu_ras.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -576,6 +578,27 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
}
}
+static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ break;
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ break;
+ if ((adev->gfx.rlc_fw_version < 531) ||
+ (adev->gfx.rlc_fw_version == 53815) ||
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ break;
+ default:
+ break;
+ }
+}
+
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -828,6 +851,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
}
out:
+ gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
if (err) {
dev_err(adev->dev,
@@ -1639,6 +1663,18 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
+ /* ECC error */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
+ /* FUE error */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
gfx_v9_0_scratch_init(adev);
@@ -1731,6 +1767,20 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
+ adev->gfx.ras_if) {
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ };
+
+ amdgpu_ras_debugfs_remove(adev, ras_if);
+ amdgpu_ras_sysfs_remove(adev, ras_if);
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+ amdgpu_ras_feature_enable(adev, ras_if, 0);
+ kfree(ras_if);
+ }
+
amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
@@ -2405,8 +2455,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- adev->gfx.rlc.funcs->reset(adev);
-
gfx_v9_0_init_pg(adev);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
@@ -3305,6 +3353,7 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -3494,6 +3543,80 @@ static int gfx_v9_0_early_init(void *handle)
return 0;
}
+static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+static int gfx_v9_0_ecc_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct ras_common_if **ras_if = &adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = gfx_v9_0_process_ras_data_cb,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "gfx_err_count",
+ .debugfs_name = "gfx_err_inject",
+ };
+ struct ras_common_if ras_block = {
+ .block = AMDGPU_RAS_BLOCK__GFX,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .sub_block_index = 0,
+ .name = "gfx",
+ };
+ int r;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
+ amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
+ return 0;
+ }
+
+ if (*ras_if)
+ goto resume;
+
+ *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
+ if (!*ras_if)
+ return -ENOMEM;
+
+ **ras_if = ras_block;
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r)
+ goto feature;
+
+ ih_info.head = **ras_if;
+ fs_info.head = **ras_if;
+
+ r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
+ if (r)
+ goto interrupt;
+
+ r = amdgpu_ras_debugfs_create(adev, &fs_info);
+ if (r)
+ goto debugfs;
+
+ r = amdgpu_ras_sysfs_create(adev, &fs_info);
+ if (r)
+ goto sysfs;
+resume:
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto irq;
+
+ return 0;
+irq:
+ amdgpu_ras_sysfs_remove(adev, *ras_if);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, *ras_if);
+debugfs:
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, *ras_if, 0);
+feature:
+ kfree(*ras_if);
+ *ras_if = NULL;
+ return -EINVAL;
+}
+
static int gfx_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3507,6 +3630,10 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
+ r = gfx_v9_0_ecc_late_init(handle);
+ if (r)
+ return r;
+
return 0;
}
@@ -4543,6 +4670,45 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
return 0;
}
+#define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
+ WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
+ CP_ECC_ERROR_INT_ENABLE, 1)
+
+#define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
+ WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
+ CP_ECC_ERROR_INT_ENABLE, 0)
+
+static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ CP_ECC_ERROR_INT_ENABLE, 0);
+ DISABLE_ECC_ON_ME_PIPE(1, 0);
+ DISABLE_ECC_ON_ME_PIPE(1, 1);
+ DISABLE_ECC_ON_ME_PIPE(1, 2);
+ DISABLE_ECC_ON_ME_PIPE(1, 3);
+ break;
+
+ case AMDGPU_IRQ_STATE_ENABLE:
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ CP_ECC_ERROR_INT_ENABLE, 1);
+ ENABLE_ECC_ON_ME_PIPE(1, 0);
+ ENABLE_ECC_ON_ME_PIPE(1, 1);
+ ENABLE_ECC_ON_ME_PIPE(1, 2);
+ ENABLE_ECC_ON_ME_PIPE(1, 3);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -4659,6 +4825,34 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ /* TODO ue will trigger an interrupt. */
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+ return AMDGPU_RAS_UE;
+}
+
+static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ DRM_ERROR("CP ECC ERROR IRQ\n");
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -4820,6 +5014,12 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
.process = gfx_v9_0_priv_inst_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
+ .set = gfx_v9_0_set_cp_ecc_error_state,
+ .process = gfx_v9_0_cp_ecc_error_irq,
+};
+
+
static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -4830,6 +5030,9 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
+
+ adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
}
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index f5edddf3b29d..7bb5359d0bbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -143,7 +143,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL, tmp);
@@ -236,7 +236,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 9fc3296592fe..b06d876da2d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -225,7 +225,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
@@ -383,20 +383,6 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- value = addr & 0xFFFFFFFFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
-
- return 0;
-}
-
static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
@@ -886,7 +872,7 @@ static int gmc_v6_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v6_0_init_microcode(adev);
if (r) {
@@ -1169,7 +1155,6 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
- .set_pte_pde = gmc_v6_0_set_pte_pde,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 761dcfb2fec0..75aa3332aee2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -242,7 +242,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
@@ -460,31 +460,6 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-/**
- * gmc_v7_0_set_pte_pde - update the page tables using MMIO
- *
- * @adev: amdgpu_device pointer
- * @cpu_pt_addr: cpu address of the page table
- * @gpu_page_idx: entry in the page table to update
- * @addr: dst addr to write into pte/pde
- * @flags: access flags
- *
- * Update the page tables using the CPU.
- */
-static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- value = addr & 0xFFFFFFFFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
-
- return 0;
-}
-
static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
@@ -1030,7 +1005,7 @@ static int gmc_v7_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v7_0_init_microcode(adev);
if (r) {
@@ -1376,7 +1351,6 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
- .set_pte_pde = gmc_v7_0_set_pte_pde,
.set_prt = gmc_v7_0_set_prt,
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
.get_vm_pde = gmc_v7_0_get_vm_pde
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 34440672f938..8a3b5e6fc6c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -433,7 +433,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
@@ -662,50 +662,26 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-/**
- * gmc_v8_0_set_pte_pde - update the page tables using MMIO
- *
- * @adev: amdgpu_device pointer
- * @cpu_pt_addr: cpu address of the page table
- * @gpu_page_idx: entry in the page table to update
- * @addr: dst addr to write into pte/pde
- * @flags: access flags
+/*
+ * PTE format on VI:
+ * 63:40 reserved
+ * 39:12 4k physical page base address
+ * 11:7 fragment
+ * 6 write
+ * 5 read
+ * 4 exe
+ * 3 reserved
+ * 2 snooped
+ * 1 system
+ * 0 valid
*
- * Update the page tables using the CPU.
+ * PDE format on VI:
+ * 63:59 block fragment size
+ * 58:40 reserved
+ * 39:1 physical base address of PTE
+ * bits 5:1 must be 0.
+ * 0 valid
*/
-static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- /*
- * PTE format on VI:
- * 63:40 reserved
- * 39:12 4k physical page base address
- * 11:7 fragment
- * 6 write
- * 5 read
- * 4 exe
- * 3 reserved
- * 2 snooped
- * 1 system
- * 0 valid
- *
- * PDE format on VI:
- * 63:59 block fragment size
- * 58:40 reserved
- * 39:1 physical base address of PTE
- * bits 5:1 must be 0.
- * 0 valid
- */
- value = addr & 0x000000FFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
-
- return 0;
-}
static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
@@ -1155,7 +1131,7 @@ static int gmc_v8_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v8_0_init_microcode(adev);
if (r) {
@@ -1743,7 +1719,6 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
- .set_pte_pde = gmc_v8_0_set_pte_pde,
.set_prt = gmc_v8_0_set_prt,
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
.get_vm_pde = gmc_v8_0_get_vm_pde
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2fe8397241ea..3fd79e07944d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -47,6 +47,8 @@
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+#include "amdgpu_ras.h"
+
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
@@ -84,121 +86,182 @@ static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
};
-/* Ecc related register addresses, (BASE + reg offset) */
-/* Universal Memory Controller caps (may be fused). */
-/* UMCCH:UmcLocalCap */
-#define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800)
-#define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800)
-#define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800)
-#define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800)
-
-/* Universal Memory Controller Channel config. */
-/* UMCCH:UMC_CONFIG */
-#define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800)
-#define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800)
-#define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
-#define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
-
-/* Universal Memory Controller Channel Ecc config. */
-/* UMCCH:EccCtrl */
-#define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800)
-#define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800)
-#define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800)
-#define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800)
-
-static const uint32_t ecc_umclocalcap_addrs[] = {
- UMCLOCALCAPS_ADDR0,
- UMCLOCALCAPS_ADDR1,
- UMCLOCALCAPS_ADDR2,
- UMCLOCALCAPS_ADDR3,
- UMCLOCALCAPS_ADDR4,
- UMCLOCALCAPS_ADDR5,
- UMCLOCALCAPS_ADDR6,
- UMCLOCALCAPS_ADDR7,
- UMCLOCALCAPS_ADDR8,
- UMCLOCALCAPS_ADDR9,
- UMCLOCALCAPS_ADDR10,
- UMCLOCALCAPS_ADDR11,
- UMCLOCALCAPS_ADDR12,
- UMCLOCALCAPS_ADDR13,
- UMCLOCALCAPS_ADDR14,
- UMCLOCALCAPS_ADDR15,
+static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
+ (0x000143c0 + 0x00000000),
+ (0x000143c0 + 0x00000800),
+ (0x000143c0 + 0x00001000),
+ (0x000143c0 + 0x00001800),
+ (0x000543c0 + 0x00000000),
+ (0x000543c0 + 0x00000800),
+ (0x000543c0 + 0x00001000),
+ (0x000543c0 + 0x00001800),
+ (0x000943c0 + 0x00000000),
+ (0x000943c0 + 0x00000800),
+ (0x000943c0 + 0x00001000),
+ (0x000943c0 + 0x00001800),
+ (0x000d43c0 + 0x00000000),
+ (0x000d43c0 + 0x00000800),
+ (0x000d43c0 + 0x00001000),
+ (0x000d43c0 + 0x00001800),
+ (0x001143c0 + 0x00000000),
+ (0x001143c0 + 0x00000800),
+ (0x001143c0 + 0x00001000),
+ (0x001143c0 + 0x00001800),
+ (0x001543c0 + 0x00000000),
+ (0x001543c0 + 0x00000800),
+ (0x001543c0 + 0x00001000),
+ (0x001543c0 + 0x00001800),
+ (0x001943c0 + 0x00000000),
+ (0x001943c0 + 0x00000800),
+ (0x001943c0 + 0x00001000),
+ (0x001943c0 + 0x00001800),
+ (0x001d43c0 + 0x00000000),
+ (0x001d43c0 + 0x00000800),
+ (0x001d43c0 + 0x00001000),
+ (0x001d43c0 + 0x00001800),
};
-static const uint32_t ecc_umcch_umc_config_addrs[] = {
- UMCCH_UMC_CONFIG_ADDR0,
- UMCCH_UMC_CONFIG_ADDR1,
- UMCCH_UMC_CONFIG_ADDR2,
- UMCCH_UMC_CONFIG_ADDR3,
- UMCCH_UMC_CONFIG_ADDR4,
- UMCCH_UMC_CONFIG_ADDR5,
- UMCCH_UMC_CONFIG_ADDR6,
- UMCCH_UMC_CONFIG_ADDR7,
- UMCCH_UMC_CONFIG_ADDR8,
- UMCCH_UMC_CONFIG_ADDR9,
- UMCCH_UMC_CONFIG_ADDR10,
- UMCCH_UMC_CONFIG_ADDR11,
- UMCCH_UMC_CONFIG_ADDR12,
- UMCCH_UMC_CONFIG_ADDR13,
- UMCCH_UMC_CONFIG_ADDR14,
- UMCCH_UMC_CONFIG_ADDR15,
+static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
+ (0x000143e0 + 0x00000000),
+ (0x000143e0 + 0x00000800),
+ (0x000143e0 + 0x00001000),
+ (0x000143e0 + 0x00001800),
+ (0x000543e0 + 0x00000000),
+ (0x000543e0 + 0x00000800),
+ (0x000543e0 + 0x00001000),
+ (0x000543e0 + 0x00001800),
+ (0x000943e0 + 0x00000000),
+ (0x000943e0 + 0x00000800),
+ (0x000943e0 + 0x00001000),
+ (0x000943e0 + 0x00001800),
+ (0x000d43e0 + 0x00000000),
+ (0x000d43e0 + 0x00000800),
+ (0x000d43e0 + 0x00001000),
+ (0x000d43e0 + 0x00001800),
+ (0x001143e0 + 0x00000000),
+ (0x001143e0 + 0x00000800),
+ (0x001143e0 + 0x00001000),
+ (0x001143e0 + 0x00001800),
+ (0x001543e0 + 0x00000000),
+ (0x001543e0 + 0x00000800),
+ (0x001543e0 + 0x00001000),
+ (0x001543e0 + 0x00001800),
+ (0x001943e0 + 0x00000000),
+ (0x001943e0 + 0x00000800),
+ (0x001943e0 + 0x00001000),
+ (0x001943e0 + 0x00001800),
+ (0x001d43e0 + 0x00000000),
+ (0x001d43e0 + 0x00000800),
+ (0x001d43e0 + 0x00001000),
+ (0x001d43e0 + 0x00001800),
};
-static const uint32_t ecc_umcch_eccctrl_addrs[] = {
- UMCCH_ECCCTRL_ADDR0,
- UMCCH_ECCCTRL_ADDR1,
- UMCCH_ECCCTRL_ADDR2,
- UMCCH_ECCCTRL_ADDR3,
- UMCCH_ECCCTRL_ADDR4,
- UMCCH_ECCCTRL_ADDR5,
- UMCCH_ECCCTRL_ADDR6,
- UMCCH_ECCCTRL_ADDR7,
- UMCCH_ECCCTRL_ADDR8,
- UMCCH_ECCCTRL_ADDR9,
- UMCCH_ECCCTRL_ADDR10,
- UMCCH_ECCCTRL_ADDR11,
- UMCCH_ECCCTRL_ADDR12,
- UMCCH_ECCCTRL_ADDR13,
- UMCCH_ECCCTRL_ADDR14,
- UMCCH_ECCCTRL_ADDR15,
+static const uint32_t ecc_umc_mcumc_status_addrs[] = {
+ (0x000143c2 + 0x00000000),
+ (0x000143c2 + 0x00000800),
+ (0x000143c2 + 0x00001000),
+ (0x000143c2 + 0x00001800),
+ (0x000543c2 + 0x00000000),
+ (0x000543c2 + 0x00000800),
+ (0x000543c2 + 0x00001000),
+ (0x000543c2 + 0x00001800),
+ (0x000943c2 + 0x00000000),
+ (0x000943c2 + 0x00000800),
+ (0x000943c2 + 0x00001000),
+ (0x000943c2 + 0x00001800),
+ (0x000d43c2 + 0x00000000),
+ (0x000d43c2 + 0x00000800),
+ (0x000d43c2 + 0x00001000),
+ (0x000d43c2 + 0x00001800),
+ (0x001143c2 + 0x00000000),
+ (0x001143c2 + 0x00000800),
+ (0x001143c2 + 0x00001000),
+ (0x001143c2 + 0x00001800),
+ (0x001543c2 + 0x00000000),
+ (0x001543c2 + 0x00000800),
+ (0x001543c2 + 0x00001000),
+ (0x001543c2 + 0x00001800),
+ (0x001943c2 + 0x00000000),
+ (0x001943c2 + 0x00000800),
+ (0x001943c2 + 0x00001000),
+ (0x001943c2 + 0x00001800),
+ (0x001d43c2 + 0x00000000),
+ (0x001d43c2 + 0x00000800),
+ (0x001d43c2 + 0x00001000),
+ (0x001d43c2 + 0x00001800),
};
+static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 bits, i, tmp, reg;
+
+ bits = 0x7f;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_addrs[i];
+ tmp = RREG32(reg);
+ tmp &= ~bits;
+ WREG32(reg, tmp);
+ }
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
+ tmp = RREG32(reg);
+ tmp &= ~bits;
+ WREG32(reg, tmp);
+ }
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_addrs[i];
+ tmp = RREG32(reg);
+ tmp |= bits;
+ WREG32(reg, tmp);
+ }
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
+ tmp = RREG32(reg);
+ tmp |= bits;
+ WREG32(reg, tmp);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+ return AMDGPU_RAS_UE;
+}
+
+static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gmc.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -244,62 +307,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry,
- uint64_t addr)
-{
- struct amdgpu_vm *vm;
- u64 key;
- int r;
-
- /* No PASID, can't identify faulting process */
- if (!entry->pasid)
- return true;
-
- /* Not a retry fault */
- if (!(entry->src_data[1] & 0x80))
- return true;
-
- /* Track retry faults in per-VM fault FIFO. */
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
- if (!vm) {
- /* VM not found, process it normally */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- }
-
- key = AMDGPU_VM_FAULT(entry->pasid, addr);
- r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
- /* Hash table is full or the fault is already being processed,
- * ignore further page faults
- */
- if (r != 0) {
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
- /* No locking required with single writer and single reader */
- r = kfifo_put(&vm->faults, key);
- if (!r) {
- /* FIFO is full. Ignore it until there is space */
- amdgpu_vm_clear_fault(vm->fault_hash, key);
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
-
- spin_unlock(&adev->vm_manager.pasid_lock);
- /* It's the first fault for this address, process it normally */
- return true;
-}
-
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -312,9 +319,11 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
- if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+ if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
+ entry->timestamp))
return 1; /* This also prevents sending it to KFD */
+ /* If it's the first fault for this address, process it normally */
if (!amdgpu_sriov_vf(adev)) {
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -350,10 +359,19 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
.process = gmc_v9_0_process_interrupt,
};
+
+static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
+ .set = gmc_v9_0_ecc_interrupt_state,
+ .process = gmc_v9_0_process_ecc_irq,
+};
+
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
+
+ adev->gmc.ecc_irq.num_types = 1;
+ adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
}
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
@@ -466,64 +484,37 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, reg, pasid);
}
-/**
- * gmc_v9_0_set_pte_pde - update the page tables using MMIO
- *
- * @adev: amdgpu_device pointer
- * @cpu_pt_addr: cpu address of the page table
- * @gpu_page_idx: entry in the page table to update
- * @addr: dst addr to write into pte/pde
- * @flags: access flags
+/*
+ * PTE format on VEGA 10:
+ * 63:59 reserved
+ * 58:57 mtype
+ * 56 F
+ * 55 L
+ * 54 P
+ * 53 SW
+ * 52 T
+ * 50:48 reserved
+ * 47:12 4k physical page base address
+ * 11:7 fragment
+ * 6 write
+ * 5 read
+ * 4 exe
+ * 3 Z
+ * 2 snooped
+ * 1 system
+ * 0 valid
*
- * Update the page tables using the CPU.
+ * PDE format on VEGA 10:
+ * 63:59 block fragment size
+ * 58:55 reserved
+ * 54 P
+ * 53:48 reserved
+ * 47:6 physical base address of PD or PTE
+ * 5:3 reserved
+ * 2 C
+ * 1 system
+ * 0 valid
*/
-static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- /*
- * PTE format on VEGA 10:
- * 63:59 reserved
- * 58:57 mtype
- * 56 F
- * 55 L
- * 54 P
- * 53 SW
- * 52 T
- * 50:48 reserved
- * 47:12 4k physical page base address
- * 11:7 fragment
- * 6 write
- * 5 read
- * 4 exe
- * 3 Z
- * 2 snooped
- * 1 system
- * 0 valid
- *
- * PDE format on VEGA 10:
- * 63:59 block fragment size
- * 58:55 reserved
- * 54 P
- * 53:48 reserved
- * 47:6 physical base address of PD or PTE
- * 5:3 reserved
- * 2 C
- * 1 system
- * 0 valid
- */
-
- /*
- * The following is for PTE only. GART does not have PDEs.
- */
- value = addr & 0x0000FFFFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
- return 0;
-}
static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
@@ -593,7 +584,6 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .set_pte_pde = gmc_v9_0_set_pte_pde,
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.get_vm_pde = gmc_v9_0_get_vm_pde
};
@@ -620,85 +610,6 @@ static int gmc_v9_0_early_init(void *handle)
return 0;
}
-static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
-{
- uint32_t reg_val;
- uint32_t reg_addr;
- uint32_t field_val;
- size_t i;
- uint32_t fv2;
- size_t lost_sheep;
-
- DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
-
- lost_sheep = 0;
- for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
- reg_addr = ecc_umclocalcap_addrs[i];
- DRM_DEBUG("ecc: "
- "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
- i, reg_addr);
- reg_val = RREG32(reg_addr);
- field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
- EccDis);
- DRM_DEBUG("ecc: "
- "reg_val: 0x%08x, "
- "EccDis: 0x%08x, ",
- reg_val, field_val);
- if (field_val) {
- DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
- ++lost_sheep;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
- reg_addr = ecc_umcch_umc_config_addrs[i];
- DRM_DEBUG("ecc: "
- "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
- i, reg_addr);
- reg_val = RREG32(reg_addr);
- field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
- DramReady);
- DRM_DEBUG("ecc: "
- "reg_val: 0x%08x, "
- "DramReady: 0x%08x\n",
- reg_val, field_val);
-
- if (!field_val) {
- DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
- ++lost_sheep;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
- reg_addr = ecc_umcch_eccctrl_addrs[i];
- DRM_DEBUG("ecc: "
- "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
- i, reg_addr);
- reg_val = RREG32(reg_addr);
- field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
- WrEccEn);
- fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
- RdEccEn);
- DRM_DEBUG("ecc: "
- "reg_val: 0x%08x, "
- "WrEccEn: 0x%08x, "
- "RdEccEn: 0x%08x\n",
- reg_val, field_val, fv2);
-
- if (!field_val) {
- DRM_DEBUG("ecc: WrEccEn is not set\n");
- ++lost_sheep;
- }
- if (!fv2) {
- DRM_DEBUG("ecc: RdEccEn is not set\n");
- ++lost_sheep;
- }
- }
-
- DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
- return lost_sheep == 0;
-}
-
static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
{
@@ -751,31 +662,119 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v9_0_late_init(void *handle)
+static int gmc_v9_0_ecc_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct ras_common_if **ras_if = &adev->gmc.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = gmc_v9_0_process_ras_data_cb,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "umc_err_count",
+ .debugfs_name = "umc_err_inject",
+ };
+ struct ras_common_if ras_block = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .sub_block_index = 0,
+ .name = "umc",
+ };
int r;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
+ return 0;
+ }
+ /* handle resume path. */
+ if (*ras_if)
+ goto resume;
+
+ *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
+ if (!*ras_if)
+ return -ENOMEM;
+
+ **ras_if = ras_block;
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r)
+ goto feature;
+
+ ih_info.head = **ras_if;
+ fs_info.head = **ras_if;
+
+ r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
+ if (r)
+ goto interrupt;
+
+ r = amdgpu_ras_debugfs_create(adev, &fs_info);
+ if (r)
+ goto debugfs;
+
+ r = amdgpu_ras_sysfs_create(adev, &fs_info);
+ if (r)
+ goto sysfs;
+resume:
+ r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
+ if (r)
+ goto irq;
+
+ return 0;
+irq:
+ amdgpu_ras_sysfs_remove(adev, *ras_if);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, *ras_if);
+debugfs:
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, *ras_if, 0);
+feature:
+ kfree(*ras_if);
+ *ras_if = NULL;
+ return -EINVAL;
+}
+
+
+static int gmc_v9_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool r;
+
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
r = gmc_v9_0_allocate_vm_inv_eng(adev);
if (r)
return r;
+ /* Check if ecc is available */
+ if (!amdgpu_sriov_vf(adev)) {
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ r = amdgpu_atomfirmware_mem_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("ECC is not present.\n");
+ if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
+ adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ } else {
+ DRM_INFO("ECC is active.\n");
+ }
- if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
- r = gmc_v9_0_ecc_available(adev);
- if (r == 1) {
- DRM_INFO("ECC is active.\n");
- } else if (r == 0) {
- DRM_INFO("ECC is not present.\n");
- adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
- } else {
- DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
- return r;
+ r = amdgpu_atomfirmware_sram_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("SRAM ECC is not present.\n");
+ } else {
+ DRM_INFO("SRAM ECC is active.\n");
+ }
+ break;
+ default:
+ break;
}
}
+ r = gmc_v9_0_ecc_late_init(handle);
+ if (r)
+ return r;
+
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
@@ -787,7 +786,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
base = mmhub_v1_0_get_fb_location(adev);
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
if (!amdgpu_sriov_vf(adev))
amdgpu_gmc_agp_location(adev, mc);
@@ -987,6 +986,12 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
+ /* interrupt sent to DF. */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
+ &adev->gmc.ecc_irq);
+ if (r)
+ return r;
+
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
@@ -1011,7 +1016,7 @@ static int gmc_v9_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
@@ -1052,6 +1057,22 @@ static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
+ adev->gmc.ras_if) {
+ struct ras_common_if *ras_if = adev->gmc.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ };
+
+ /*remove fs first*/
+ amdgpu_ras_debugfs_remove(adev, ras_if);
+ amdgpu_ras_sysfs_remove(adev, ras_if);
+ /*remove the IH*/
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+ amdgpu_ras_feature_enable(adev, ras_if, 0);
+ kfree(ras_if);
+ }
+
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1198,6 +1219,7 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0;
}
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v9_0_gart_disable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 0c9a2c03504e..f2e6b148ccad 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2824,7 +2824,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
- if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
+ if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index d0d966d6080a..41a9a5779623 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -163,7 +163,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
@@ -255,7 +256,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 73851ebb3833..2471e7cf75ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_ai_mailbox_set_valid(adev, false);
}
+static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
+{
+ int r = 0;
+ u32 req, val, size;
+
+ if (!amdgim_is_hwperf(adev) || buf == NULL)
+ return -EBADRQC;
+
+ switch(type) {
+ case PP_SCLK:
+ req = IDH_IRQ_GET_PP_SCLK;
+ break;
+ case PP_MCLK:
+ req = IDH_IRQ_GET_PP_MCLK;
+ break;
+ default:
+ return -EBADRQC;
+ }
+
+ mutex_lock(&adev->virt.dpm_mutex);
+
+ xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r && adev->fw_vram_usage.va != NULL) {
+ val = RREG32_NO_KIQ(
+ SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
+ size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
+ val), PAGE_SIZE);
+
+ if (size < PAGE_SIZE)
+ strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
+ else
+ size = 0;
+
+ r = size;
+ goto out;
+ }
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if(r)
+ pr_info("%s DPM request failed",
+ (type == PP_SCLK)? "SCLK" : "MCLK");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
+static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
+{
+ int r = 0;
+ u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
+
+ if (!amdgim_is_hwperf(adev))
+ return -EBADRQC;
+
+ mutex_lock(&adev->virt.dpm_mutex);
+ xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r)
+ goto out;
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if (!r)
+ pr_info("DPM request failed");
+ else
+ pr_info("Mailbox is broken");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
@@ -296,6 +372,9 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
if (amdgpu_sriov_runtime(adev))
schedule_work(&adev->virt.flr_work);
break;
+ case IDH_QUERY_ALIVE:
+ xgpu_ai_mailbox_send_ack(adev);
+ break;
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
* it byfar since that polling thread will handle it,
* other msg like flr complete is not handled here.
@@ -375,4 +454,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
+ .get_pp_clk = xgpu_ai_get_pp_clk,
+ .force_dpm_level = xgpu_ai_force_dpm_level,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index b4a9ceea334b..077e91a33d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,6 +35,10 @@ enum idh_request {
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
+ IDH_IRQ_FORCE_DPM_LEVEL = 10,
+ IDH_IRQ_GET_PP_SCLK,
+ IDH_IRQ_GET_PP_MCLK,
+
IDH_LOG_VF_ERROR = 200,
};
@@ -43,6 +47,9 @@ enum idh_event {
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
+ IDH_SUCCESS,
+ IDH_FAIL,
+ IDH_QUERY_ALIVE,
IDH_EVENT_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 6a0fcd67662a..aef9d059ae52 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -515,7 +515,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
/* wait until RCV_MSG become 3 */
if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
- pr_err("failed to recieve FLR_CMPL\n");
+ pr_err("failed to receive FLR_CMPL\n");
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index cc967dbfd631..6590143c3f75 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -118,7 +118,8 @@ static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
- ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ BIF_IH_DOORBELL_RANGE, SIZE, 6);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index f3a7d207af07..2f79765b4bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,7 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 860b70d80d3c..b91df7bd1d98 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -33,6 +33,9 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_7_4_offset.h"
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
+
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
@@ -113,6 +116,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
+ adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
+ adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
+ le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
}
return 0;
@@ -217,6 +227,37 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
+static void psp_v11_0_reroute_ih(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t tmp;
+
+ /* Change IH ring for VMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+
+ /* Change IH ring for UMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+}
+
static int psp_v11_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -224,6 +265,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
+ psp_v11_0_reroute_ih(psp);
+
ring = &psp->km_ring;
ring->ring_type = ring_type;
@@ -631,7 +674,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
- topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].is_sharing_enabled = 1;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
@@ -679,6 +722,54 @@ static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id
return 0;
}
+static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
+ ras_cmd->ras_in_message.trigger_error = *info;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ return ras_cmd->ras_status;
+}
+
+static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
+{
+#if 0
+ // not support yet.
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
+ ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ return ras_cmd->ras_status;
+#else
+ return -EINVAL;
+#endif
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
@@ -695,6 +786,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
.support_vmr_ring = psp_v11_0_support_vmr_ring,
+ .ras_trigger_error = psp_v11_0_ras_trigger_error,
+ .ras_cure_posion = psp_v11_0_ras_cure_posion,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 0487e3a4e9e7..143f0fae69d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -37,6 +37,9 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_6_1_offset.h"
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
+
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
@@ -252,6 +255,37 @@ static int psp_v3_1_ring_init(struct psp_context *psp,
return 0;
}
+static void psp_v3_1_reroute_ih(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t tmp;
+
+ /* Change IH ring for VMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+
+ /* Change IH ring for UMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+}
+
static int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -260,6 +294,8 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
+ psp_v3_1_reroute_ih(psp);
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index cca3552b36ed..36196372e8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -870,8 +870,8 @@ static int sdma_v2_4_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1006,7 +1006,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1022,7 +1022,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 0ce8331baeb2..6d39544e7829 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1154,8 +1154,8 @@ static int sdma_v3_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1340,7 +1340,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1356,7 +1356,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index c816e55d43a9..9c88ce513d78 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -41,6 +41,8 @@
#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+#include "amdgpu_ras.h"
+
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
@@ -154,7 +156,6 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000),
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
@@ -184,7 +185,6 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000),
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@ -849,7 +849,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
/* enable DMA RB */
@@ -940,7 +940,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_PAGE_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
/* enable DMA RB */
@@ -1493,6 +1493,87 @@ static int sdma_v4_0_early_init(void *handle)
return 0;
}
+static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+static int sdma_v4_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct ras_common_if **ras_if = &adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = sdma_v4_0_process_ras_data_cb,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "sdma_err_count",
+ .debugfs_name = "sdma_err_inject",
+ };
+ struct ras_common_if ras_block = {
+ .block = AMDGPU_RAS_BLOCK__SDMA,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .sub_block_index = 0,
+ .name = "sdma",
+ };
+ int r;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
+ return 0;
+ }
+
+ /* handle resume path. */
+ if (*ras_if)
+ goto resume;
+
+ *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
+ if (!*ras_if)
+ return -ENOMEM;
+
+ **ras_if = ras_block;
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r)
+ goto feature;
+
+ ih_info.head = **ras_if;
+ fs_info.head = **ras_if;
+
+ r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
+ if (r)
+ goto interrupt;
+
+ r = amdgpu_ras_debugfs_create(adev, &fs_info);
+ if (r)
+ goto debugfs;
+
+ r = amdgpu_ras_sysfs_create(adev, &fs_info);
+ if (r)
+ goto sysfs;
+resume:
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
+ if (r)
+ goto irq;
+
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
+ goto irq;
+ }
+
+ return 0;
+irq:
+ amdgpu_ras_sysfs_remove(adev, *ras_if);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, *ras_if);
+debugfs:
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, *ras_if, 0);
+feature:
+ kfree(*ras_if);
+ *ras_if = NULL;
+ return -EINVAL;
+}
+
static int sdma_v4_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -1511,6 +1592,18 @@ static int sdma_v4_0_sw_init(void *handle)
if (r)
return r;
+ /* SDMA SRAM ECC event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+
+ /* SDMA SRAM ECC event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1526,8 +1619,8 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
@@ -1546,8 +1639,8 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1561,6 +1654,22 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
+ adev->sdma.ras_if) {
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ };
+
+ /*remove fs first*/
+ amdgpu_ras_debugfs_remove(adev, ras_if);
+ amdgpu_ras_sysfs_remove(adev, ras_if);
+ /*remove the IH*/
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+ amdgpu_ras_feature_enable(adev, ras_if, 0);
+ kfree(ras_if);
+ }
+
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
if (adev->sdma.has_page_queue)
@@ -1598,6 +1707,9 @@ static int sdma_v4_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
return 0;
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
+
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
@@ -1666,13 +1778,12 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
u32 sdma_cntl;
- sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
+ sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
+ WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
return 0;
}
@@ -1714,6 +1825,58 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t instance, err_source;
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_SDMA1:
+ instance = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
+ err_source = 0;
+ break;
+ case SDMA0_4_0__SRCID__SDMA_ECC:
+ err_source = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ amdgpu_ras_reset_gpu(adev, 0);
+
+ return AMDGPU_RAS_UE;
+}
+
+static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1741,6 +1904,25 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_edc_config;
+
+ u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
+ sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_EDC_CONFIG) :
+ sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_EDC_CONFIG);
+
+ sdma_edc_config = RREG32(reg_offset);
+ sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_edc_config);
+
+ return 0;
+}
+
static void sdma_v4_0_update_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
@@ -1906,7 +2088,7 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.name = "sdma_v4_0",
.early_init = sdma_v4_0_early_init,
- .late_init = NULL,
+ .late_init = sdma_v4_0_late_init,
.sw_init = sdma_v4_0_sw_init,
.sw_fini = sdma_v4_0_sw_fini,
.hw_init = sdma_v4_0_hw_init,
@@ -2008,11 +2190,20 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
.process = sdma_v4_0_process_illegal_inst_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
+ .set = sdma_v4_0_set_ecc_irq_state,
+ .process = sdma_v4_0_process_ecc_irq,
+};
+
+
+
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
+ adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
}
/**
@@ -2077,8 +2268,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- if (adev->sdma.has_page_queue)
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+ if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[1].page;
else
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
@@ -2097,15 +2288,22 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.has_page_queue)
+ if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1) {
+ for (i = 1; i < adev->sdma.num_instances; i++) {
sched = &adev->sdma.instance[i].page.sched;
- else
+ adev->vm_manager.vm_pte_rqs[i - 1] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ }
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
+ adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ }
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index f15f196684ba..3eeefd40dae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -503,8 +503,8 @@ static int si_dma_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -591,7 +591,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
@@ -607,7 +607,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 41e01a7f57a4..d57e75e5c71f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -4098,14 +4098,13 @@ static int si_notify_smc_display_change(struct amdgpu_device *adev,
static void si_program_response_times(struct amdgpu_device *adev)
{
- u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+ u32 voltage_response_time, acpi_delay_time, vbi_time_out;
u32 vddc_dly, acpi_dly, vbi_dly;
u32 reference_clock;
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
- backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index ed89a101f73f..4900e4958dec 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -63,6 +63,7 @@
#include "vcn_v1_0.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
+#include "amdgpu_smu.h"
#define mmMP0_MISC_CGTT_CTRL0 0x01b9
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
@@ -392,6 +393,7 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
{
u32 i;
+ int ret = 0;
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
@@ -402,7 +404,9 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
pci_save_state(adev->pdev);
- psp_gpu_reset(adev);
+ ret = psp_gpu_reset(adev);
+ if (ret)
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
pci_restore_state(adev->pdev);
@@ -417,7 +421,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
- return 0;
+ return ret;
}
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
@@ -451,6 +455,8 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU BACO reset\n");
+ adev->in_baco_reset = 1;
+
return 0;
}
@@ -461,8 +467,15 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
soc15_asic_get_baco_capability(adev, &baco_reset);
break;
+ case CHIP_VEGA20:
+ if (adev->psp.sos_fw_version >= 0x80067)
+ soc15_asic_get_baco_capability(adev, &baco_reset);
+ else
+ baco_reset = false;
+ break;
default:
baco_reset = false;
break;
@@ -602,8 +615,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
}
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (is_support_sw_smu(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ }
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -884,7 +901,8 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
} else if (adev->pdev->device == 0x15d8) {
- adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
@@ -927,7 +945,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
new file mode 100644
index 000000000000..0b4e7b55595a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -0,0 +1,108 @@
+/****************************************************************************\
+*
+* File Name ta_ras_if.h
+* Project AMD PSP SW IP Module
+*
+* Description Interface to the RAS Trusted Application
+*
+* Copyright 2019 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+* and associated documentation files (the "Software"), to deal in the Software without restriction,
+* including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
+* subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in all copies or substantial
+* portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+#ifndef _TA_RAS_IF_H
+#define _TA_RAS_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+#define TA_NUM_BLOCK_MAX 14
+
+enum ras_command {
+ TA_RAS_COMMAND__ENABLE_FEATURES = 0,
+ TA_RAS_COMMAND__DISABLE_FEATURES,
+ TA_RAS_COMMAND__TRIGGER_ERROR,
+};
+
+enum ta_ras_status {
+ TA_RAS_STATUS__SUCCESS = 0x00,
+ TA_RAS_STATUS__RESET_NEEDED = 0x01,
+ TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0x02,
+ TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0x03,
+ TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0x04,
+ TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0x05
+};
+
+enum ta_ras_block {
+ TA_RAS_BLOCK__UMC = 0,
+ TA_RAS_BLOCK__SDMA,
+ TA_RAS_BLOCK__GFX,
+ TA_RAS_BLOCK__MMHUB,
+ TA_RAS_BLOCK__ATHUB,
+ TA_RAS_BLOCK__PCIE_BIF,
+ TA_RAS_BLOCK__HDP,
+ TA_RAS_BLOCK__XGMI_WAFL,
+ TA_RAS_BLOCK__DF,
+ TA_RAS_BLOCK__SMN,
+ TA_RAS_BLOCK__SEM,
+ TA_RAS_BLOCK__MP0,
+ TA_RAS_BLOCK__MP1,
+ TA_RAS_BLOCK__FUSE = (TA_NUM_BLOCK_MAX - 1),
+};
+
+enum ta_ras_error_type {
+ TA_RAS_ERROR__NONE = 0,
+ TA_RAS_ERROR__PARITY = 1,
+ TA_RAS_ERROR__SINGLE_CORRECTABLE = 2,
+ TA_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
+ TA_RAS_ERROR__POISON = 8
+};
+
+struct ta_ras_enable_features_input {
+ enum ta_ras_block block_id;
+ enum ta_ras_error_type error_type;
+};
+
+struct ta_ras_disable_features_input {
+ enum ta_ras_block block_id;
+ enum ta_ras_error_type error_type;
+};
+
+struct ta_ras_trigger_error_input {
+ enum ta_ras_block block_id;
+ enum ta_ras_error_type inject_error_type;
+ uint32_t sub_block_index;
+ uint64_t address;
+ uint64_t value;
+};
+
+union ta_ras_cmd_input {
+ struct ta_ras_enable_features_input enable_features;
+ struct ta_ras_disable_features_input disable_features;
+ struct ta_ras_trigger_error_input trigger_error;
+};
+
+struct ta_ras_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_ras_status ras_status;
+ uint32_t reserved;
+ union ta_ras_cmd_input ras_in_message;
+};
+
+#endif // TL_RAS_IF_H_
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index dc461df48da0..2191d3d0a219 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -787,10 +787,13 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
0xFFFFFFFF, 0x00000004);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
- lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
- upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
offset = 0;
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
@@ -798,10 +801,11 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.inst[i].gpu_addr));
offset = size;
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+
}
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index bed78a778e3f..40363ca6c5f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -283,7 +283,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
}
if (vce_v2_0_wait_for_idle(adev)) {
- DRM_INFO("VCE is busy, Can't set clock gateing");
+ DRM_INFO("VCE is busy, Can't set clock gating");
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index aadc3e66ebd7..c0ec27991c22 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -244,13 +244,18 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
+ offset = AMDGPU_VCE_FIRMWARE_OFFSET;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
+ uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
+ uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
+
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
- mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
+ mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
+ (tmr_mc_addr >> 40) & 0xff);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
@@ -258,6 +263,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
(adev->vce.gpu_addr >> 40) & 0xff);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
+ offset & ~0x0f000000);
+
}
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
@@ -272,10 +280,7 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
(adev->vce.gpu_addr >> 40) & 0xff);
- offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V4_0_FW_SIZE;
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
- offset & ~0x0f000000);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
@@ -382,6 +387,7 @@ static int vce_v4_0_start(struct amdgpu_device *adev)
static int vce_v4_0_stop(struct amdgpu_device *adev)
{
+ /* Disable VCPU */
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
/* hold on ECPU */
@@ -389,8 +395,8 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- /* clear BUSY flag */
- WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
+ /* clear VCE_STATUS */
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0);
/* Set Clock-Gating off */
/* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
@@ -922,6 +928,7 @@ static int vce_v4_0_set_clockgating_state(void *handle,
return 0;
}
+#endif
static int vce_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
@@ -935,16 +942,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
- return 0;
-
if (state == AMD_PG_STATE_GATE)
- /* XXX do we need a vce_v4_0_stop()? */
- return 0;
+ return vce_v4_0_stop(adev);
else
return vce_v4_0_start(adev);
}
-#endif
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct amdgpu_ib *ib, uint32_t flags)
@@ -1059,7 +1061,7 @@ const struct amd_ip_funcs vce_v4_0_ip_funcs = {
.soft_reset = NULL /* vce_v4_0_soft_reset */,
.post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
.set_clockgating_state = vce_v4_0_set_clockgating_state,
- .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */,
+ .set_powergating_state = vce_v4_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 6d1f804277f8..8d89ab7f0ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -31,7 +31,7 @@
#include "soc15_common.h"
#include "vega10_ih.h"
-
+#define MAX_REARM_RETRY 10
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
@@ -136,6 +136,25 @@ static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
return ih_rb_cntl;
}
+static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
/**
* vega10_ih_irq_init - init and enable the interrupt ring
*
@@ -150,8 +169,8 @@ static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
static int vega10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih;
+ u32 ih_rb_cntl;
int ret = 0;
- u32 ih_rb_cntl, ih_doorbell_rtpr;
u32 tmp;
/* disable irqs */
@@ -177,23 +196,11 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
upper_32_bits(ih->wptr_addr) & 0xFFFF);
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
- if (adev->irq.ih.use_doorbell) {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, OFFSET,
- adev->irq.ih.doorbell_index);
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR,
- ENABLE, 1);
- } else {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR,
- ENABLE, 0);
- }
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
+ vega10_ih_doorbell_rptr(ih));
ih = &adev->irq.ih1;
if (ih->ring_size) {
@@ -203,11 +210,18 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ RB_FULL_DRAIN_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
+ vega10_ih_doorbell_rptr(ih));
}
ih = &adev->irq.ih2;
@@ -216,13 +230,16 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
(ih->gpu_addr >> 40) & 0xff);
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
+ vega10_ih_doorbell_rptr(ih));
}
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
@@ -365,6 +382,38 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
}
/**
+ * vega10_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t reg_rptr = 0;
+ uint32_t v = 0;
+ uint32_t i = 0;
+
+ if (ih == &adev->irq.ih)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ else if (ih == &adev->irq.ih1)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ else
+ return;
+
+ /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(reg_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
* vega10_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
@@ -378,6 +427,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ vega10_ih_irq_rearm(adev, ih);
} else if (ih == &adev->irq.ih) {
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
} else if (ih == &adev->irq.ih1) {
@@ -449,20 +501,23 @@ static int vega10_ih_sw_init(void *handle)
if (r)
return r;
- if (adev->asic_type == CHIP_VEGA10) {
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
-
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
- }
-
- /* TODO add doorbell for IH1 & IH2 as well */
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 8be9677c0c07..c1e4d44d6137 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x9876, &carrizo_device_info }, /* Carrizo */
{ 0x9877, &carrizo_device_info }, /* Carrizo */
{ 0x15DD, &raven_device_info }, /* Raven */
+ { 0x15D8, &raven_device_info }, /* Raven */
#endif
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
@@ -466,6 +467,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
memset(&kfd->doorbell_available_index, 0,
sizeof(kfd->doorbell_available_index));
+ atomic_set(&kfd->sram_ecc_flag, 0);
+
return kfd;
}
@@ -491,9 +494,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
{
unsigned int size;
- kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
+ kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
- kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
+ kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
@@ -661,6 +664,9 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
return ret;
count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count != 0, "KFD reset ref. error");
+
+ atomic_set(&kfd->sram_ecc_flag, 0);
+
return 0;
}
@@ -1024,6 +1030,12 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
return 0;
}
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+ if (kfd)
+ atomic_inc(&kfd->sram_ecc_flag);
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index e9f0e0a1b41c..6e1d41c5bf86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1011,25 +1011,41 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
void kfd_signal_reset_event(struct kfd_dev *dev)
{
struct kfd_hsa_hw_exception_data hw_exception_data;
+ struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_process *p;
struct kfd_event *ev;
unsigned int temp;
uint32_t id, idx;
+ int reset_cause = atomic_read(&dev->sram_ecc_flag) ?
+ KFD_HW_EXCEPTION_ECC :
+ KFD_HW_EXCEPTION_GPU_HANG;
/* Whole gpu reset caused by GPU hang and memory is lost */
memset(&hw_exception_data, 0, sizeof(hw_exception_data));
hw_exception_data.gpu_id = dev->id;
hw_exception_data.memory_lost = 1;
+ hw_exception_data.reset_cause = reset_cause;
+
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+ memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
+ memory_exception_data.gpu_id = dev->id;
+ memory_exception_data.failure.imprecise = true;
idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
mutex_lock(&p->event_mutex);
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
- idr_for_each_entry_continue(&p->event_idr, ev, id)
+ idr_for_each_entry_continue(&p->event_idr, ev, id) {
if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
ev->hw_exception_data = hw_exception_data;
set_event(ev);
}
+ if (ev->type == KFD_EVENT_TYPE_MEMORY &&
+ reset_cause == KFD_HW_EXCEPTION_ECC) {
+ ev->memory_exception_data = memory_exception_data;
+ set_event(ev);
+ }
+ }
mutex_unlock(&p->event_mutex);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 0eeee3c6d6dc..9e0230965675 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -276,6 +276,9 @@ struct kfd_dev {
uint64_t hive_id;
bool pci_atomic_requested;
+
+ /* SRAM ECC flag */
+ atomic_t sram_ecc_flag;
};
enum kfd_mempool {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 09da91644f9f..769dbc7be8cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -37,6 +37,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_iommu.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
/* topology_device_list - Master list of all topology devices */
static struct list_head topology_device_list;
@@ -1197,6 +1198,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
void *crat_image = NULL;
size_t image_size = 0;
int proximity_domain;
+ struct amdgpu_ras *ctx;
INIT_LIST_HEAD(&temp_topology_device_list);
@@ -1270,8 +1272,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
- dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
- gpu->pdev->devfn);
+ dev->node_props.location_id = pci_dev_id(gpu->pdev);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =
@@ -1328,6 +1329,20 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
}
+ ctx = amdgpu_ras_get_context((struct amdgpu_device *)(dev->gpu->kgd));
+ if (ctx) {
+ /* kfd only concerns sram ecc on GFX/SDMA and HBM ecc on UMC */
+ dev->node_props.capability |=
+ (((ctx->features & BIT(AMDGPU_RAS_BLOCK__SDMA)) != 0) ||
+ ((ctx->features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0)) ?
+ HSA_CAP_SRAM_EDCSUPPORTED : 0;
+ dev->node_props.capability |= ((ctx->features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
+ HSA_CAP_MEM_EDCSUPPORTED : 0;
+
+ dev->node_props.capability |= (ctx->features != 0) ?
+ HSA_CAP_RASEVENTNOTIFY : 0;
+ }
+
kfd_debug_print_topology();
if (!res)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 92a19be07344..84710cfd23c2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -48,6 +48,10 @@
#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+#define HSA_CAP_SRAM_EDCSUPPORTED 0x00080000
+#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
+#define HSA_CAP_RASEVENTNOTIFY 0x00200000
+
struct kfd_node_properties {
uint64_t hive_id;
uint32_t cpu_cores_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 81127f7d6ed1..995f9df66142 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -111,7 +111,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
- unsigned long possible_crtcs);
+ unsigned long possible_crtcs,
+ const struct dc_plane_cap *plane_cap);
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t link_index);
@@ -137,30 +138,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
-
-
-static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
-};
-
-static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
-};
-
-static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
-};
-
/*
* dm_vblank_get_counter
*
@@ -275,12 +252,22 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
return NULL;
}
+static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
+{
+ return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
+ dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+}
+
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
+ struct drm_pending_vblank_event *e;
+ struct dm_crtc_state *acrtc_state;
+ uint32_t vpos, hpos, v_blank_start, v_blank_end;
+ bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@ -303,26 +290,116 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
- /* Update to correct count(s) if racing with vblank irq */
- amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+ /* page flip completed. */
+ e = amdgpu_crtc->event;
+ amdgpu_crtc->event = NULL;
- /* wake up userspace */
- if (amdgpu_crtc->event) {
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
+ if (!e)
+ WARN_ON(1);
- /* page flip completed. clean up */
- amdgpu_crtc->event = NULL;
+ acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
+ vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+
+ /* Fixed refresh rate, or VRR scanout position outside front-porch? */
+ if (!vrr_active ||
+ !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
+ &v_blank_end, &hpos, &vpos) ||
+ (vpos < v_blank_start)) {
+ /* Update to correct count and vblank timestamp if racing with
+ * vblank irq. This also updates to the correct vblank timestamp
+ * even in VRR mode, as scanout is past the front-porch atm.
+ */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
- } else
- WARN_ON(1);
+ /* Wake up userspace by sending the pageflip event with proper
+ * count and timestamp of vblank of flip completion.
+ */
+ if (e) {
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
+
+ /* Event sent, so done with vblank for this flip */
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ }
+ } else if (e) {
+ /* VRR active and inside front-porch: vblank count and
+ * timestamp for pageflip event will only be up to date after
+ * drm_crtc_handle_vblank() has been executed from late vblank
+ * irq handler after start of back-porch (vline 0). We queue the
+ * pageflip event for send-out by drm_crtc_handle_vblank() with
+ * updated timestamp and count, once it runs after us.
+ *
+ * We need to open-code this instead of using the helper
+ * drm_crtc_arm_vblank_event(), as that helper would
+ * call drm_crtc_accurate_vblank_count(), which we must
+ * not call in VRR mode while we are in front-porch!
+ */
+
+ /* sequence will be replaced by real count during send-out. */
+ e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
+ e->pipe = amdgpu_crtc->crtc_id;
+
+ list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
+ e = NULL;
+ }
+
+ /* Keep track of vblank of this flip for flip throttling. We use the
+ * cooked hw counter, as that one incremented at start of this vblank
+ * of pageflip completion, so last_flip_vblank is the forbidden count
+ * for queueing new pageflips if vsync + VRR is enabled.
+ */
+ amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
+ amdgpu_crtc->crtc_id);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
- __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
+ DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc,
+ vrr_active, (int) !e);
+}
+
+static void dm_vupdate_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dm_crtc_state *acrtc_state;
+ unsigned long flags;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
+
+ if (acrtc) {
+ acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
- drm_crtc_vblank_put(&amdgpu_crtc->base);
+ /* Core vblank handling is done here after end of front-porch in
+ * vrr mode, as vblank timestamping will give valid results
+ * while now done after front-porch. This will also deliver
+ * page-flip completion events that have been queued to us
+ * if a pageflip happened inside front-porch.
+ */
+ if (amdgpu_dm_vrr_active(acrtc_state)) {
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ /* BTR processing for pre-DCE12 ASICs */
+ if (acrtc_state->stream &&
+ adev->family < AMDGPU_FAMILY_AI) {
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ }
+ }
+ }
}
static void dm_crtc_high_irq(void *interrupt_params)
@@ -331,18 +408,33 @@ static void dm_crtc_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
struct dm_crtc_state *acrtc_state;
+ unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (acrtc) {
- drm_crtc_handle_vblank(&acrtc->base);
- amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- if (acrtc_state->stream &&
+ DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
+
+ /* Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!amdgpu_dm_vrr_active(acrtc_state))
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ /* Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc_state->stream,
@@ -352,6 +444,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
adev->dm.dc,
acrtc_state->stream,
&acrtc_state->vrr_params.adjust);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
}
}
@@ -462,6 +555,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
+ init_data.flags.power_down_display_on_boot = true;
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -912,9 +1007,16 @@ static int dm_resume(void *handle)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
int i;
+ /* Recreate dc_state - DC invalidates it when setting power state to S3. */
+ dc_release_state(dm_state->context);
+ dm_state->context = dc_create_state(dm->dc);
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
+ dc_resource_state_construct(dm->dc, dm_state->context);
+
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -1457,6 +1559,27 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
dm_crtc_high_irq, c_irq_params);
}
+ /* Use VUPDATE interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
/* Use GRPH_PFLIP interrupt */
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
@@ -1542,6 +1665,34 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
dm_crtc_high_irq, c_irq_params);
}
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
@@ -1593,15 +1744,10 @@ static int dm_atomic_get_state(struct drm_atomic_state *state,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_state *priv_state;
- int ret;
if (*dm_state)
return 0;
- ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
- if (ret)
- return ret;
-
priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
if (IS_ERR(priv_state))
return PTR_ERR(priv_state);
@@ -1658,17 +1804,16 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
- new_state->context = dc_create_state();
+ old_state = to_dm_atomic_state(obj->state);
+
+ if (old_state && old_state->context)
+ new_state->context = dc_copy_state(old_state->context);
+
if (!new_state->context) {
kfree(new_state);
return NULL;
}
- old_state = to_dm_atomic_state(obj->state);
- if (old_state && old_state->context)
- dc_resource_state_copy_construct(old_state->context,
- new_state->context);
-
return &new_state->base;
}
@@ -1708,13 +1853,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
- drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
-
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- state->context = dc_create_state();
+ state->context = dc_create_state(adev->dm.dc);
if (!state->context) {
kfree(state);
return -ENOMEM;
@@ -1841,39 +1984,42 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
#endif
static int initialize_plane(struct amdgpu_display_manager *dm,
- struct amdgpu_mode_info *mode_info,
- int plane_id)
+ struct amdgpu_mode_info *mode_info, int plane_id,
+ enum drm_plane_type plane_type,
+ const struct dc_plane_cap *plane_cap)
{
struct drm_plane *plane;
unsigned long possible_crtcs;
int ret = 0;
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
- mode_info->planes[plane_id] = plane;
-
if (!plane) {
DRM_ERROR("KMS: Failed to allocate plane\n");
return -ENOMEM;
}
- plane->type = mode_info->plane_type[plane_id];
+ plane->type = plane_type;
/*
- * HACK: IGT tests expect that each plane can only have
- * one possible CRTC. For now, set one CRTC for each
- * plane that is not an underlay, but still allow multiple
- * CRTCs for underlay planes.
+ * HACK: IGT tests expect that the primary plane for a CRTC
+ * can only have one possible CRTC. Only expose support for
+ * any CRTC if they're not going to be used as a primary plane
+ * for a CRTC - like overlay or underlay planes.
*/
possible_crtcs = 1 << plane_id;
if (plane_id >= dm->dc->caps.max_streams)
possible_crtcs = 0xff;
- ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
+ ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
DRM_ERROR("KMS: Failed to initialize plane\n");
+ kfree(plane);
return ret;
}
+ if (mode_info)
+ mode_info->planes[plane_id] = plane;
+
return ret;
}
@@ -1916,8 +2062,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
- int32_t total_overlay_planes, total_primary_planes;
+ int32_t primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
+ const struct dc_plane_cap *plane;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1925,24 +2072,53 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
return -EINVAL;
}
- /* Identify the number of planes to be initialized */
- total_overlay_planes = dm->dc->caps.max_slave_planes;
- total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
+ /* There is one primary plane per CRTC */
+ primary_planes = dm->dc->caps.max_streams;
+ ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
- /* First initialize overlay planes, index starting after primary planes */
- for (i = (total_overlay_planes - 1); i >= 0; i--) {
- if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ /*
+ * Initialize primary planes, implicit planes for legacy IOCTLS.
+ * Order is reversed to match iteration order in atomic check.
+ */
+ for (i = (primary_planes - 1); i >= 0; i--) {
+ plane = &dm->dc->caps.planes[i];
+
+ if (initialize_plane(dm, mode_info, i,
+ DRM_PLANE_TYPE_PRIMARY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize primary plane\n");
goto fail;
}
}
- /* Initialize primary planes */
- for (i = (total_primary_planes - 1); i >= 0; i--) {
- if (initialize_plane(dm, mode_info, i)) {
- DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ /*
+ * Initialize overlay planes, index starting after primary planes.
+ * These planes have a higher DRM index than the primary planes since
+ * they should be considered as having a higher z-order.
+ * Order is reversed to match iteration order in atomic check.
+ *
+ * Only support DCN for now, and only expose one so we don't encourage
+ * userspace to use up all the pipes.
+ */
+ for (i = 0; i < dm->dc->caps.max_planes; ++i) {
+ struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
+
+ if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
+ continue;
+
+ if (!plane->blends_with_above || !plane->blends_with_below)
+ continue;
+
+ if (!plane->pixel_format_support.argb8888)
+ continue;
+
+ if (initialize_plane(dm, NULL, primary_planes + i,
+ DRM_PLANE_TYPE_OVERLAY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
goto fail;
}
+
+ /* Only create one overlay plane. */
+ break;
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
@@ -2042,8 +2218,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
fail:
kfree(aencoder);
kfree(aconnector);
- for (i = 0; i < dm->dc->caps.max_planes; i++)
- kfree(mode_info->planes[i]);
+
return -EINVAL;
}
@@ -2124,53 +2299,45 @@ static int dm_early_init(void *handle)
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_KAVERI:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_FIJI:
case CHIP_TONGA:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_CARRIZO:
adev->mode_info.num_crtc = 3;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
- adev->mode_info.plane_type = dm_plane_type_carizzo;
break;
case CHIP_STONEY:
adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
- adev->mode_info.plane_type = dm_plane_type_stoney;
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_POLARIS10:
case CHIP_VEGAM:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -2178,14 +2345,12 @@ static int dm_early_init(void *handle)
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
#endif
default:
@@ -2243,56 +2408,63 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
.destroy = amdgpu_dm_encoder_destroy,
};
-static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
- struct dc_plane_state *plane_state)
+
+static int fill_dc_scaling_info(const struct drm_plane_state *state,
+ struct dc_scaling_info *scaling_info)
{
- plane_state->src_rect.x = state->src_x >> 16;
- plane_state->src_rect.y = state->src_y >> 16;
- /* we ignore the mantissa for now and do not deal with floating pixels :( */
- plane_state->src_rect.width = state->src_w >> 16;
+ int scale_w, scale_h;
- if (plane_state->src_rect.width == 0)
- return false;
+ memset(scaling_info, 0, sizeof(*scaling_info));
- plane_state->src_rect.height = state->src_h >> 16;
- if (plane_state->src_rect.height == 0)
- return false;
+ /* Source is fixed 16.16 but we ignore mantissa for now... */
+ scaling_info->src_rect.x = state->src_x >> 16;
+ scaling_info->src_rect.y = state->src_y >> 16;
+
+ scaling_info->src_rect.width = state->src_w >> 16;
+ if (scaling_info->src_rect.width == 0)
+ return -EINVAL;
+
+ scaling_info->src_rect.height = state->src_h >> 16;
+ if (scaling_info->src_rect.height == 0)
+ return -EINVAL;
- plane_state->dst_rect.x = state->crtc_x;
- plane_state->dst_rect.y = state->crtc_y;
+ scaling_info->dst_rect.x = state->crtc_x;
+ scaling_info->dst_rect.y = state->crtc_y;
if (state->crtc_w == 0)
- return false;
+ return -EINVAL;
- plane_state->dst_rect.width = state->crtc_w;
+ scaling_info->dst_rect.width = state->crtc_w;
if (state->crtc_h == 0)
- return false;
+ return -EINVAL;
- plane_state->dst_rect.height = state->crtc_h;
+ scaling_info->dst_rect.height = state->crtc_h;
- plane_state->clip_rect = plane_state->dst_rect;
+ /* DRM doesn't specify clipping on destination output. */
+ scaling_info->clip_rect = scaling_info->dst_rect;
- switch (state->rotation & DRM_MODE_ROTATE_MASK) {
- case DRM_MODE_ROTATE_0:
- plane_state->rotation = ROTATION_ANGLE_0;
- break;
- case DRM_MODE_ROTATE_90:
- plane_state->rotation = ROTATION_ANGLE_90;
- break;
- case DRM_MODE_ROTATE_180:
- plane_state->rotation = ROTATION_ANGLE_180;
- break;
- case DRM_MODE_ROTATE_270:
- plane_state->rotation = ROTATION_ANGLE_270;
- break;
- default:
- plane_state->rotation = ROTATION_ANGLE_0;
- break;
- }
+ /* TODO: Validate scaling per-format with DC plane caps */
+ scale_w = scaling_info->dst_rect.width * 1000 /
+ scaling_info->src_rect.width;
- return true;
+ if (scale_w < 250 || scale_w > 16000)
+ return -EINVAL;
+
+ scale_h = scaling_info->dst_rect.height * 1000 /
+ scaling_info->src_rect.height;
+
+ if (scale_h < 250 || scale_h > 16000)
+ return -EINVAL;
+
+ /*
+ * The "scaling_quality" can be ignored for now, quality = 0 has DC
+ * assume reasonable defaults based on the format.
+ */
+
+ return 0;
}
+
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags)
{
@@ -2321,10 +2493,16 @@ static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
return offset ? (address + offset * 256) : 0;
}
-static bool fill_plane_dcc_attributes(struct amdgpu_device *adev,
- const struct amdgpu_framebuffer *afb,
- struct dc_plane_state *plane_state,
- uint64_t info)
+static int
+fill_plane_dcc_attributes(struct amdgpu_device *adev,
+ const struct amdgpu_framebuffer *afb,
+ const enum surface_pixel_format format,
+ const enum dc_rotation_angle rotation,
+ const union plane_size *plane_size,
+ const union dc_tiling_info *tiling_info,
+ const uint64_t info,
+ struct dc_plane_dcc_param *dcc,
+ struct dc_plane_address *address)
{
struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input;
@@ -2337,133 +2515,103 @@ static bool fill_plane_dcc_attributes(struct amdgpu_device *adev,
memset(&output, 0, sizeof(output));
if (!offset)
- return false;
+ return 0;
+
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
if (!dc->cap_funcs.get_dcc_compression_cap)
- return false;
+ return -EINVAL;
- input.format = plane_state->format;
- input.surface_size.width =
- plane_state->plane_size.grph.surface_size.width;
- input.surface_size.height =
- plane_state->plane_size.grph.surface_size.height;
- input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle;
+ input.format = format;
+ input.surface_size.width = plane_size->grph.surface_size.width;
+ input.surface_size.height = plane_size->grph.surface_size.height;
+ input.swizzle_mode = tiling_info->gfx9.swizzle;
- if (plane_state->rotation == ROTATION_ANGLE_0 ||
- plane_state->rotation == ROTATION_ANGLE_180)
+ if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
input.scan = SCAN_DIRECTION_HORIZONTAL;
- else if (plane_state->rotation == ROTATION_ANGLE_90 ||
- plane_state->rotation == ROTATION_ANGLE_270)
+ else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
input.scan = SCAN_DIRECTION_VERTICAL;
if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
- return false;
+ return -EINVAL;
if (!output.capable)
- return false;
+ return -EINVAL;
if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
- return false;
+ return -EINVAL;
- plane_state->dcc.enable = 1;
- plane_state->dcc.grph.meta_pitch =
+ dcc->enable = 1;
+ dcc->grph.meta_pitch =
AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
- plane_state->dcc.grph.independent_64b_blks = i64b;
+ dcc->grph.independent_64b_blks = i64b;
dcc_address = get_dcc_address(afb->address, info);
- plane_state->address.grph.meta_addr.low_part =
- lower_32_bits(dcc_address);
- plane_state->address.grph.meta_addr.high_part =
- upper_32_bits(dcc_address);
+ address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
+ address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
- return true;
+ return 0;
}
-static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
- struct dc_plane_state *plane_state,
- const struct amdgpu_framebuffer *amdgpu_fb)
-{
- uint64_t tiling_flags;
- unsigned int awidth;
- const struct drm_framebuffer *fb = &amdgpu_fb->base;
- int ret = 0;
- struct drm_format_name_buf format_name;
-
- ret = get_fb_info(
- amdgpu_fb,
- &tiling_flags);
-
- if (ret)
- return ret;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
- break;
- case DRM_FORMAT_RGB565:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
- break;
- case DRM_FORMAT_NV21:
- plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
- break;
- case DRM_FORMAT_NV12:
- plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
- break;
- default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(fb->format->format, &format_name));
- return -EINVAL;
- }
-
- memset(&plane_state->address, 0, sizeof(plane_state->address));
- memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
- memset(&plane_state->dcc, 0, sizeof(plane_state->dcc));
-
- if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
- plane_state->plane_size.grph.surface_size.x = 0;
- plane_state->plane_size.grph.surface_size.y = 0;
- plane_state->plane_size.grph.surface_size.width = fb->width;
- plane_state->plane_size.grph.surface_size.height = fb->height;
- plane_state->plane_size.grph.surface_pitch =
- fb->pitches[0] / fb->format->cpp[0];
- /* TODO: unhardcode */
- plane_state->color_space = COLOR_SPACE_SRGB;
+static int
+fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ const struct amdgpu_framebuffer *afb,
+ const enum surface_pixel_format format,
+ const enum dc_rotation_angle rotation,
+ const uint64_t tiling_flags,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+ struct dc_plane_address *address)
+{
+ const struct drm_framebuffer *fb = &afb->base;
+ int ret;
+ memset(tiling_info, 0, sizeof(*tiling_info));
+ memset(plane_size, 0, sizeof(*plane_size));
+ memset(dcc, 0, sizeof(*dcc));
+ memset(address, 0, sizeof(*address));
+
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_size->grph.surface_size.x = 0;
+ plane_size->grph.surface_size.y = 0;
+ plane_size->grph.surface_size.width = fb->width;
+ plane_size->grph.surface_size.height = fb->height;
+ plane_size->grph.surface_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+
+ address->type = PLN_ADDR_TYPE_GRAPHICS;
+ address->grph.addr.low_part = lower_32_bits(afb->address);
+ address->grph.addr.high_part = upper_32_bits(afb->address);
} else {
- awidth = ALIGN(fb->width, 64);
- plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
- plane_state->plane_size.video.luma_size.x = 0;
- plane_state->plane_size.video.luma_size.y = 0;
- plane_state->plane_size.video.luma_size.width = awidth;
- plane_state->plane_size.video.luma_size.height = fb->height;
- /* TODO: unhardcode */
- plane_state->plane_size.video.luma_pitch = awidth;
-
- plane_state->plane_size.video.chroma_size.x = 0;
- plane_state->plane_size.video.chroma_size.y = 0;
- plane_state->plane_size.video.chroma_size.width = awidth;
- plane_state->plane_size.video.chroma_size.height = fb->height;
- plane_state->plane_size.video.chroma_pitch = awidth / 2;
-
- /* TODO: unhardcode */
- plane_state->color_space = COLOR_SPACE_YCBCR709;
+ uint64_t chroma_addr = afb->address + fb->offsets[1];
+
+ plane_size->video.luma_size.x = 0;
+ plane_size->video.luma_size.y = 0;
+ plane_size->video.luma_size.width = fb->width;
+ plane_size->video.luma_size.height = fb->height;
+ plane_size->video.luma_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+
+ plane_size->video.chroma_size.x = 0;
+ plane_size->video.chroma_size.y = 0;
+ /* TODO: set these based on surface format */
+ plane_size->video.chroma_size.width = fb->width / 2;
+ plane_size->video.chroma_size.height = fb->height / 2;
+
+ plane_size->video.chroma_pitch =
+ fb->pitches[1] / fb->format->cpp[1];
+
+ address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ address->video_progressive.luma_addr.low_part =
+ lower_32_bits(afb->address);
+ address->video_progressive.luma_addr.high_part =
+ upper_32_bits(afb->address);
+ address->video_progressive.chroma_addr.low_part =
+ lower_32_bits(chroma_addr);
+ address->video_progressive.chroma_addr.high_part =
+ upper_32_bits(chroma_addr);
}
/* Fill GFX8 params */
@@ -2477,21 +2625,21 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
/* XXX fix me for VI */
- plane_state->tiling_info.gfx8.num_banks = num_banks;
- plane_state->tiling_info.gfx8.array_mode =
+ tiling_info->gfx8.num_banks = num_banks;
+ tiling_info->gfx8.array_mode =
DC_ARRAY_2D_TILED_THIN1;
- plane_state->tiling_info.gfx8.tile_split = tile_split;
- plane_state->tiling_info.gfx8.bank_width = bankw;
- plane_state->tiling_info.gfx8.bank_height = bankh;
- plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
- plane_state->tiling_info.gfx8.tile_mode =
+ tiling_info->gfx8.tile_split = tile_split;
+ tiling_info->gfx8.bank_width = bankw;
+ tiling_info->gfx8.bank_height = bankh;
+ tiling_info->gfx8.tile_aspect = mtaspect;
+ tiling_info->gfx8.tile_mode =
DC_ADDR_SURF_MICRO_TILING_DISPLAY;
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
== DC_ARRAY_1D_TILED_THIN1) {
- plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
+ tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
}
- plane_state->tiling_info.gfx8.pipe_config =
+ tiling_info->gfx8.pipe_config =
AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
if (adev->asic_type == CHIP_VEGA10 ||
@@ -2499,60 +2647,249 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
- plane_state->tiling_info.gfx9.num_pipes =
+ tiling_info->gfx9.num_pipes =
adev->gfx.config.gb_addr_config_fields.num_pipes;
- plane_state->tiling_info.gfx9.num_banks =
+ tiling_info->gfx9.num_banks =
adev->gfx.config.gb_addr_config_fields.num_banks;
- plane_state->tiling_info.gfx9.pipe_interleave =
+ tiling_info->gfx9.pipe_interleave =
adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
- plane_state->tiling_info.gfx9.num_shader_engines =
+ tiling_info->gfx9.num_shader_engines =
adev->gfx.config.gb_addr_config_fields.num_se;
- plane_state->tiling_info.gfx9.max_compressed_frags =
+ tiling_info->gfx9.max_compressed_frags =
adev->gfx.config.gb_addr_config_fields.max_compress_frags;
- plane_state->tiling_info.gfx9.num_rb_per_se =
+ tiling_info->gfx9.num_rb_per_se =
adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
- plane_state->tiling_info.gfx9.swizzle =
+ tiling_info->gfx9.swizzle =
AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
- plane_state->tiling_info.gfx9.shaderEnable = 1;
+ tiling_info->gfx9.shaderEnable = 1;
- fill_plane_dcc_attributes(adev, amdgpu_fb, plane_state,
- tiling_flags);
+ ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
+ plane_size, tiling_info,
+ tiling_flags, dcc, address);
+ if (ret)
+ return ret;
}
- plane_state->visible = true;
- plane_state->scaling_quality.h_taps_c = 0;
- plane_state->scaling_quality.v_taps_c = 0;
+ return 0;
+}
- /* is this needed? is plane_state zeroed at allocation? */
- plane_state->scaling_quality.h_taps = 0;
- plane_state->scaling_quality.v_taps = 0;
- plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
+static void
+fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+ bool *per_pixel_alpha, bool *global_alpha,
+ int *global_alpha_value)
+{
+ *per_pixel_alpha = false;
+ *global_alpha = false;
+ *global_alpha_value = 0xff;
- return ret;
+ if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return;
+ if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ static const uint32_t alpha_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ };
+ uint32_t format = plane_state->fb->format->format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
+ if (format == alpha_formats[i]) {
+ *per_pixel_alpha = true;
+ break;
+ }
+ }
+ }
+
+ if (plane_state->alpha < 0xffff) {
+ *global_alpha = true;
+ *global_alpha_value = plane_state->alpha >> 8;
+ }
}
-static int fill_plane_attributes(struct amdgpu_device *adev,
- struct dc_plane_state *dc_plane_state,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+static int
+fill_plane_color_attributes(const struct drm_plane_state *plane_state,
+ const enum surface_pixel_format format,
+ enum dc_color_space *color_space)
{
- const struct amdgpu_framebuffer *amdgpu_fb =
+ bool full_range;
+
+ *color_space = COLOR_SPACE_SRGB;
+
+ /* DRM color properties only affect non-RGB formats. */
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
+
+ full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
+
+ switch (plane_state->color_encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR601;
+ else
+ *color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT709:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR709;
+ else
+ *color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT2020:
+ if (full_range)
+ *color_space = COLOR_SPACE_2020_YCBCR;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ const struct drm_plane_state *plane_state,
+ const uint64_t tiling_flags,
+ struct dc_plane_info *plane_info,
+ struct dc_plane_address *address)
+{
+ const struct drm_framebuffer *fb = plane_state->fb;
+ const struct amdgpu_framebuffer *afb =
to_amdgpu_framebuffer(plane_state->fb);
- const struct drm_crtc *crtc = plane_state->crtc;
- int ret = 0;
+ struct drm_format_name_buf format_name;
+ int ret;
+
+ memset(plane_info, 0, sizeof(*plane_info));
- if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_info->format =
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ default:
+ DRM_ERROR(
+ "Unsupported screen format %s\n",
+ drm_get_format_name(fb->format->format, &format_name));
return -EINVAL;
+ }
+
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_info->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_info->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_info->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ }
- ret = fill_plane_attributes_from_fb(
- crtc->dev->dev_private,
- dc_plane_state,
- amdgpu_fb);
+ plane_info->visible = true;
+ plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+ ret = fill_plane_color_attributes(plane_state, plane_info->format,
+ &plane_info->color_space);
if (ret)
return ret;
+ ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ plane_info->rotation, tiling_flags,
+ &plane_info->tiling_info,
+ &plane_info->plane_size,
+ &plane_info->dcc, address);
+ if (ret)
+ return ret;
+
+ fill_blending_from_plane_state(
+ plane_state, &plane_info->per_pixel_alpha,
+ &plane_info->global_alpha, &plane_info->global_alpha_value);
+
+ return 0;
+}
+
+static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ const struct amdgpu_framebuffer *amdgpu_fb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ struct dc_scaling_info scaling_info;
+ struct dc_plane_info plane_info;
+ uint64_t tiling_flags;
+ int ret;
+
+ ret = fill_dc_scaling_info(plane_state, &scaling_info);
+ if (ret)
+ return ret;
+
+ dc_plane_state->src_rect = scaling_info.src_rect;
+ dc_plane_state->dst_rect = scaling_info.dst_rect;
+ dc_plane_state->clip_rect = scaling_info.clip_rect;
+ dc_plane_state->scaling_quality = scaling_info.scaling_quality;
+
+ ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ if (ret)
+ return ret;
+
+ ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
+ &plane_info,
+ &dc_plane_state->address);
+ if (ret)
+ return ret;
+
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->color_space = plane_info.color_space;
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->plane_size = plane_info.plane_size;
+ dc_plane_state->rotation = plane_info.rotation;
+ dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
+ dc_plane_state->stereo_format = plane_info.stereo_format;
+ dc_plane_state->tiling_info = plane_info.tiling_info;
+ dc_plane_state->visible = plane_info.visible;
+ dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->global_alpha = plane_info.global_alpha;
+ dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
+ dc_plane_state->dcc = plane_info.dcc;
+
/*
* Always set input transfer function, since plane state is refreshed
* every time.
@@ -3124,6 +3461,8 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
dc_stream_retain(state->stream);
}
+ state->active_planes = cur->active_planes;
+ state->interrupts_enabled = cur->interrupts_enabled;
state->vrr_params = cur->vrr_params;
state->vrr_infopacket = cur->vrr_infopacket;
state->abm_level = cur->abm_level;
@@ -3136,12 +3475,41 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
return &state->base;
}
+static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ int rc;
+
+ irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
+
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+
+ DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
+ return rc;
+}
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+ int rc = 0;
+
+ if (enable) {
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
+ if (amdgpu_dm_vrr_active(acrtc_state))
+ rc = dm_set_vupdate_irq(crtc, true);
+ } else {
+ /* vblank irq off -> vupdate irq off */
+ rc = dm_set_vupdate_irq(crtc, false);
+ }
+
+ if (rc)
+ return rc;
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -3519,6 +3887,76 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
{
}
+static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_device *dev = new_crtc_state->crtc->dev;
+ struct drm_plane *plane;
+
+ drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return true;
+ }
+
+ return false;
+}
+
+static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_atomic_state *state = new_crtc_state->state;
+ struct drm_plane *plane;
+ int num_active = 0;
+
+ drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
+ struct drm_plane_state *new_plane_state;
+
+ /* Cursor planes are "fake". */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state) {
+ /*
+ * The plane is enable on the CRTC and hasn't changed
+ * state. This means that it previously passed
+ * validation and is therefore enabled.
+ */
+ num_active += 1;
+ continue;
+ }
+
+ /* We need a framebuffer to be considered enabled. */
+ num_active += (new_plane_state->fb != NULL);
+ }
+
+ return num_active;
+}
+
+/*
+ * Sets whether interrupts should be enabled on a specific CRTC.
+ * We require that the stream be enabled and that there exist active
+ * DC planes on the stream.
+ */
+static void
+dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+
+ dm_new_crtc_state->active_planes = 0;
+ dm_new_crtc_state->interrupts_enabled = false;
+
+ if (!dm_new_crtc_state->stream)
+ return;
+
+ dm_new_crtc_state->active_planes =
+ count_crtc_active_planes(new_crtc_state);
+
+ dm_new_crtc_state->interrupts_enabled =
+ dm_new_crtc_state->active_planes > 0;
+}
+
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -3527,6 +3965,14 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
int ret = -EINVAL;
+ /*
+ * Update interrupt state for the CRTC. This needs to happen whenever
+ * the CRTC has changed or whenever any of its planes have changed.
+ * Atomic check satisfies both of these requirements since the CRTC
+ * is added to the state by DRM during drm_atomic_helper_check_planes.
+ */
+ dm_update_crtc_interrupt_state(crtc, state);
+
if (unlikely(!dm_crtc_state->stream &&
modeset_required(state, NULL, dm_crtc_state->stream))) {
WARN_ON(1);
@@ -3537,6 +3983,15 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
if (!dm_crtc_state->stream)
return 0;
+ /*
+ * We want at least one hardware plane enabled to use
+ * the stream with a cursor enabled.
+ */
+ if (state->enable && state->active &&
+ does_crtc_have_active_cursor(state) &&
+ dm_crtc_state->active_planes == 0)
+ return -EINVAL;
+
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
return 0;
@@ -3583,11 +4038,8 @@ static void dm_drm_plane_reset(struct drm_plane *plane)
amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
WARN_ON(amdgpu_state == NULL);
- if (amdgpu_state) {
- plane->state = &amdgpu_state->base;
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
- }
+ if (amdgpu_state)
+ __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
}
static struct drm_plane_state *
@@ -3637,10 +4089,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct amdgpu_device *adev;
struct amdgpu_bo *rbo;
- uint64_t chroma_addr = 0;
struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
- uint64_t tiling_flags, dcc_address;
- unsigned int awidth;
+ uint64_t tiling_flags;
uint32_t domain;
int r;
@@ -3693,29 +4143,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
- if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
- plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
-
- dcc_address =
- get_dcc_address(afb->address, tiling_flags);
- plane_state->address.grph.meta_addr.low_part =
- lower_32_bits(dcc_address);
- plane_state->address.grph.meta_addr.high_part =
- upper_32_bits(dcc_address);
- } else {
- awidth = ALIGN(new_state->fb->width, 64);
- plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
- plane_state->address.video_progressive.luma_addr.low_part
- = lower_32_bits(afb->address);
- plane_state->address.video_progressive.luma_addr.high_part
- = upper_32_bits(afb->address);
- chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
- plane_state->address.video_progressive.chroma_addr.low_part
- = lower_32_bits(chroma_addr);
- plane_state->address.video_progressive.chroma_addr.high_part
- = upper_32_bits(chroma_addr);
- }
+ fill_plane_buffer_attributes(
+ adev, afb, plane_state->format, plane_state->rotation,
+ tiling_flags, &plane_state->tiling_info,
+ &plane_state->plane_size, &plane_state->dcc,
+ &plane_state->address);
}
return 0;
@@ -3747,13 +4179,18 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
{
struct amdgpu_device *adev = plane->dev->dev_private;
struct dc *dc = adev->dm.dc;
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ struct dm_plane_state *dm_plane_state;
+ struct dc_scaling_info scaling_info;
+ int ret;
+
+ dm_plane_state = to_dm_plane_state(state);
if (!dm_plane_state->dc_state)
return 0;
- if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
- return -EINVAL;
+ ret = fill_dc_scaling_info(state, &scaling_info);
+ if (ret)
+ return ret;
if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
return 0;
@@ -3826,64 +4263,115 @@ static const uint32_t rgb_formats[] = {
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB565,
};
-static const uint32_t yuv_formats[] = {
- DRM_FORMAT_NV12,
- DRM_FORMAT_NV21,
+static const uint32_t overlay_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB565
};
static const u32 cursor_formats[] = {
DRM_FORMAT_ARGB8888
};
-static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct drm_plane *plane,
- unsigned long possible_crtcs)
+static int get_plane_formats(const struct drm_plane *plane,
+ const struct dc_plane_cap *plane_cap,
+ uint32_t *formats, int max_formats)
{
- int res = -EPERM;
+ int i, num_formats = 0;
+
+ /*
+ * TODO: Query support for each group of formats directly from
+ * DC plane caps. This will require adding more formats to the
+ * caps list.
+ */
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- rgb_formats,
- ARRAY_SIZE(rgb_formats),
- NULL, plane->type, NULL);
+ for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
+
+ formats[num_formats++] = rgb_formats[i];
+ }
+
+ if (plane_cap && plane_cap->pixel_format_support.nv12)
+ formats[num_formats++] = DRM_FORMAT_NV12;
break;
+
case DRM_PLANE_TYPE_OVERLAY:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- yuv_formats,
- ARRAY_SIZE(yuv_formats),
- NULL, plane->type, NULL);
+ for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
+
+ formats[num_formats++] = overlay_formats[i];
+ }
break;
+
case DRM_PLANE_TYPE_CURSOR:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- cursor_formats,
- ARRAY_SIZE(cursor_formats),
- NULL, plane->type, NULL);
+ for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
+
+ formats[num_formats++] = cursor_formats[i];
+ }
break;
}
+ return num_formats;
+}
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct dc_plane_cap *plane_cap)
+{
+ uint32_t formats[32];
+ int num_formats;
+ int res = -EPERM;
+
+ num_formats = get_plane_formats(plane, plane_cap, formats,
+ ARRAY_SIZE(formats));
+
+ res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
+ &dm_plane_funcs, formats, num_formats,
+ NULL, plane->type, NULL);
+ if (res)
+ return res;
+
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
+ plane_cap && plane_cap->per_pixel_alpha) {
+ unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI);
+
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane, blend_caps);
+ }
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
+ plane_cap && plane_cap->pixel_format_support.nv12) {
+ /* This only affects YUV formats. */
+ drm_plane_create_color_properties(
+ plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
+ }
+
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
-
- return res;
+ return 0;
}
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
@@ -3900,7 +4388,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
goto fail;
cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
- res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
if (!acrtc)
@@ -4334,6 +4822,8 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
DRM_ERROR("Failed to create debugfs for connector");
goto out_free;
}
+ aconnector->debugfs_dpcd_address = 0;
+ aconnector->debugfs_dpcd_size = 0;
#endif
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
@@ -4473,9 +4963,13 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
x = plane->state->crtc_x;
y = plane->state->crtc_y;
- /* avivo cursor are offset into the total surface */
- x += crtc->primary->state->src_x >> 16;
- y += crtc->primary->state->src_y >> 16;
+
+ if (crtc->primary->state) {
+ /* avivo cursor are offset into the total surface */
+ x += crtc->primary->state->src_x >> 16;
+ y += crtc->primary->state->src_y >> 16;
+ }
+
if (x < 0) {
xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
x = 0;
@@ -4533,6 +5027,7 @@ static void handle_cursor_update(struct drm_plane *plane,
amdgpu_crtc->cursor_width = plane->state->crtc_w;
amdgpu_crtc->cursor_height = plane->state->crtc_h;
+ memset(&attributes, 0, sizeof(attributes));
attributes.address.high_part = upper_32_bits(address);
attributes.address.low_part = lower_32_bits(address);
attributes.width = plane->state->crtc_w;
@@ -4581,9 +5076,10 @@ static void update_freesync_state_on_stream(
struct dc_plane_state *surface,
u32 flip_timestamp_in_us)
{
- struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
+ struct mod_vrr_params vrr_params;
struct dc_info_packet vrr_infopacket = {0};
- struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ unsigned long flags;
if (!new_stream)
return;
@@ -4596,19 +5092,8 @@ static void update_freesync_state_on_stream(
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
return;
- if (new_crtc_state->vrr_supported &&
- config.min_refresh_in_uhz &&
- config.max_refresh_in_uhz) {
- config.state = new_crtc_state->base.vrr_enabled ?
- VRR_STATE_ACTIVE_VARIABLE :
- VRR_STATE_INACTIVE;
- } else {
- config.state = VRR_STATE_UNSUPPORTED;
- }
-
- mod_freesync_build_vrr_params(dm->freesync_module,
- new_stream,
- &config, &vrr_params);
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ vrr_params = new_crtc_state->vrr_params;
if (surface) {
mod_freesync_handle_preflip(
@@ -4617,6 +5102,12 @@ static void update_freesync_state_on_stream(
new_stream,
flip_timestamp_in_us,
&vrr_params);
+
+ if (adev->family < AMDGPU_FAMILY_AI &&
+ amdgpu_dm_vrr_active(new_crtc_state)) {
+ mod_freesync_handle_v_update(dm->freesync_module,
+ new_stream, &vrr_params);
+ }
}
mod_freesync_build_vrr_infopacket(
@@ -4648,6 +5139,100 @@ static void update_freesync_state_on_stream(
new_crtc_state->base.crtc->base.id,
(int)new_crtc_state->base.vrr_enabled,
(int)vrr_params.state);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+
+static void pre_update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state)
+{
+ struct dc_stream_state *new_stream = new_crtc_state->stream;
+ struct mod_vrr_params vrr_params;
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ unsigned long flags;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ vrr_params = new_crtc_state->vrr_params;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr_params);
+
+ new_crtc_state->freesync_timing_changed |=
+ (memcmp(&new_crtc_state->vrr_params.adjust,
+ &vrr_params.adjust,
+ sizeof(vrr_params.adjust)) != 0);
+
+ new_crtc_state->vrr_params = vrr_params;
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+
+static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
+
+ if (!old_vrr_active && new_vrr_active) {
+ /* Transition VRR inactive -> active:
+ * While VRR is active, we must not disable vblank irq, as a
+ * reenable after disable would compute bogus vblank/pflip
+ * timestamps if it likely happened inside display front-porch.
+ *
+ * We also need vupdate irq for the actual core vblank handling
+ * at end of vblank.
+ */
+ dm_set_vupdate_irq(new_state->base.crtc, true);
+ drm_crtc_vblank_get(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ } else if (old_vrr_active && !new_vrr_active) {
+ /* Transition VRR active -> inactive:
+ * Allow vblank irq disable again for fixed refresh rate.
+ */
+ dm_set_vupdate_irq(new_state->base.crtc, false);
+ drm_crtc_vblank_put(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ }
+}
+
+static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ int i;
+
+ /*
+ * TODO: Make this per-stream so we don't issue redundant updates for
+ * commits with multiple streams.
+ */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ handle_cursor_update(plane, old_plane_state);
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
@@ -4655,9 +5240,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
- bool *wait_for_vblank)
+ bool wait_for_vblank)
{
- uint32_t i, r;
+ uint32_t i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -4667,42 +5252,43 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
- int flip_count = 0, planes_count = 0, vpos, hpos;
+ int planes_count = 0, vpos, hpos;
+ long r;
unsigned long flags;
struct amdgpu_bo *abo;
- uint64_t tiling_flags, dcc_address;
- uint32_t target, target_vblank;
- uint64_t last_flip_vblank;
- bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
-
- struct {
- struct dc_surface_update surface_updates[MAX_SURFACES];
- struct dc_flip_addrs flip_addrs[MAX_SURFACES];
- struct dc_stream_update stream_update;
- } *flip;
-
+ uint64_t tiling_flags;
+ uint32_t target_vblank, last_flip_vblank;
+ bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ bool pflip_present = false;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } *full;
+ } *bundle;
- flip = kzalloc(sizeof(*flip), GFP_KERNEL);
- full = kzalloc(sizeof(*full), GFP_KERNEL);
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
- if (!flip || !full) {
- dm_error("Failed to allocate update bundles\n");
+ if (!bundle) {
+ dm_error("Failed to allocate update bundle\n");
goto cleanup;
}
+ /*
+ * Disable the cursor first if we're disabling all the planes.
+ * It'll remain on the screen after the planes are re-enabled
+ * if we don't.
+ */
+ if (acrtc_state->active_planes == 0)
+ amdgpu_dm_commit_cursors(state);
+
/* update planes when needed */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
- struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
- bool pflip_needed;
+ bool plane_needs_flip;
struct dc_plane_state *dc_plane;
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -4717,122 +5303,96 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (!new_crtc_state->active)
continue;
- pflip_needed = old_plane_state->fb &&
- old_plane_state->fb != new_plane_state->fb;
-
dc_plane = dm_new_plane_state->dc_state;
- if (pflip_needed) {
- /*
- * Assume even ONE crtc with immediate flip means
- * entire can't wait for VBLANK
- * TODO Check if it's correct
- */
- if (new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
- *wait_for_vblank = false;
+ bundle->surface_updates[planes_count].surface = dc_plane;
+ if (new_pcrtc_state->color_mgmt_changed) {
+ bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ }
- /*
- * TODO This might fail and hence better not used, wait
- * explicitly on fences instead
- * and in general should be called for
- * blocking commit to as per framework helpers
- */
- abo = gem_to_amdgpu_bo(fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- DRM_ERROR("failed to reserve buffer before flip\n");
+ fill_dc_scaling_info(new_plane_state,
+ &bundle->scaling_infos[planes_count]);
- /*
- * Wait for all fences on this FB. Do limited wait to avoid
- * deadlock during GPU reset when this fence will not signal
- * but we hold reservation lock for the BO.
- */
- r = reservation_object_wait_timeout_rcu(abo->tbo.resv,
- true, false,
- msecs_to_jiffies(5000));
- if (unlikely(r == 0))
- DRM_ERROR("Waiting for fences timed out.");
+ bundle->surface_updates[planes_count].scaling_info =
+ &bundle->scaling_infos[planes_count];
+ plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
+ pflip_present = pflip_present || plane_needs_flip;
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ if (!plane_needs_flip) {
+ planes_count += 1;
+ continue;
+ }
- amdgpu_bo_unreserve(abo);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
- flip->flip_addrs[flip_count].address.grph.addr.low_part = lower_32_bits(afb->address);
- flip->flip_addrs[flip_count].address.grph.addr.high_part = upper_32_bits(afb->address);
+ /*
+ * Wait for all fences on this FB. Do limited wait to avoid
+ * deadlock during GPU reset when this fence will not signal
+ * but we hold reservation lock for the BO.
+ */
+ r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true,
+ false,
+ msecs_to_jiffies(5000));
+ if (unlikely(r <= 0))
+ DRM_ERROR("Waiting for fences timed out or interrupted!");
- dcc_address = get_dcc_address(afb->address, tiling_flags);
- flip->flip_addrs[flip_count].address.grph.meta_addr.low_part = lower_32_bits(dcc_address);
- flip->flip_addrs[flip_count].address.grph.meta_addr.high_part = upper_32_bits(dcc_address);
+ /*
+ * TODO This might fail and hence better not used, wait
+ * explicitly on fences instead
+ * and in general should be called for
+ * blocking commit to as per framework helpers
+ */
+ r = amdgpu_bo_reserve(abo, true);
+ if (unlikely(r != 0))
+ DRM_ERROR("failed to reserve buffer before flip\n");
- flip->flip_addrs[flip_count].flip_immediate =
- (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
- timestamp_ns = ktime_get_ns();
- flip->flip_addrs[flip_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
- flip->surface_updates[flip_count].flip_addr = &flip->flip_addrs[flip_count];
- flip->surface_updates[flip_count].surface = dc_plane;
+ amdgpu_bo_unreserve(abo);
- if (!flip->surface_updates[flip_count].surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
- acrtc_attach->crtc_id);
- continue;
- }
+ fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state, tiling_flags,
+ &bundle->plane_infos[planes_count],
+ &bundle->flip_addrs[planes_count].address);
- if (plane == pcrtc->primary)
- update_freesync_state_on_stream(
- dm,
- acrtc_state,
- acrtc_state->stream,
- dc_plane,
- flip->flip_addrs[flip_count].flip_timestamp_in_us);
+ bundle->surface_updates[planes_count].plane_info =
+ &bundle->plane_infos[planes_count];
- DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
- __func__,
- flip->flip_addrs[flip_count].address.grph.addr.high_part,
- flip->flip_addrs[flip_count].address.grph.addr.low_part);
+ bundle->flip_addrs[planes_count].flip_immediate =
+ (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
- flip_count += 1;
- }
+ timestamp_ns = ktime_get_ns();
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+ bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
+ bundle->surface_updates[planes_count].surface = dc_plane;
- full->surface_updates[planes_count].surface = dc_plane;
- if (new_pcrtc_state->color_mgmt_changed) {
- full->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
- full->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ if (!bundle->surface_updates[planes_count].surface) {
+ DRM_ERROR("No surface for CRTC: id=%d\n",
+ acrtc_attach->crtc_id);
+ continue;
}
+ if (plane == pcrtc->primary)
+ update_freesync_state_on_stream(
+ dm,
+ acrtc_state,
+ acrtc_state->stream,
+ dc_plane,
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us);
- full->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality;
- full->scaling_infos[planes_count].src_rect = dc_plane->src_rect;
- full->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect;
- full->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
- full->surface_updates[planes_count].scaling_info = &full->scaling_infos[planes_count];
-
-
- full->plane_infos[planes_count].color_space = dc_plane->color_space;
- full->plane_infos[planes_count].format = dc_plane->format;
- full->plane_infos[planes_count].plane_size = dc_plane->plane_size;
- full->plane_infos[planes_count].rotation = dc_plane->rotation;
- full->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror;
- full->plane_infos[planes_count].stereo_format = dc_plane->stereo_format;
- full->plane_infos[planes_count].tiling_info = dc_plane->tiling_info;
- full->plane_infos[planes_count].visible = dc_plane->visible;
- full->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha;
- full->plane_infos[planes_count].dcc = dc_plane->dcc;
- full->surface_updates[planes_count].plane_info = &full->plane_infos[planes_count];
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
+ __func__,
+ bundle->flip_addrs[planes_count].address.grph.addr.high_part,
+ bundle->flip_addrs[planes_count].address.grph.addr.low_part);
planes_count += 1;
}
- /*
- * TODO: For proper atomic behaviour, we should be calling into DC once with
- * all the changes. However, DC refuses to do pageflips and non-pageflip
- * changes in the same call. Change DC to respect atomic behaviour,
- * hopefully eliminating dc_*_update structs in their entirety.
- */
- if (flip_count) {
+ if (pflip_present) {
if (!vrr_active) {
/* Use old throttling in non-vrr fixed refresh rate mode
* to keep flip scheduling based on target vblank counts
@@ -4840,7 +5400,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* clients using the GLX_OML_sync_control extension or
* DRI3/Present extension with defined target_msc.
*/
- last_flip_vblank = drm_crtc_vblank_count(pcrtc);
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
}
else {
/* For variable refresh rate mode only:
@@ -4856,11 +5416,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
- target = (uint32_t)last_flip_vblank + *wait_for_vblank;
-
- /* Prepare wait for target vblank early - before the fence-waits */
- target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
- amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
+ target_vblank = last_flip_vblank + wait_for_vblank;
/*
* Wait until we're out of the vertical blank period before the one
@@ -4891,54 +5447,102 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->stream) {
if (acrtc_state->freesync_timing_changed)
- flip->stream_update.adjust =
+ bundle->stream_update.adjust =
&acrtc_state->stream->adjust;
if (acrtc_state->freesync_vrr_info_changed)
- flip->stream_update.vrr_infopacket =
+ bundle->stream_update.vrr_infopacket =
&acrtc_state->stream->vrr_infopacket;
}
-
- mutex_lock(&dm->dc_lock);
- dc_commit_updates_for_stream(dm->dc,
- flip->surface_updates,
- flip_count,
- acrtc_state->stream,
- &flip->stream_update,
- dc_state);
- mutex_unlock(&dm->dc_lock);
}
- if (planes_count) {
+ /* Update the planes if changed or disable if we don't have any. */
+ if (planes_count || acrtc_state->active_planes == 0) {
if (new_pcrtc_state->mode_changed) {
- full->stream_update.src = acrtc_state->stream->src;
- full->stream_update.dst = acrtc_state->stream->dst;
+ bundle->stream_update.src = acrtc_state->stream->src;
+ bundle->stream_update.dst = acrtc_state->stream->dst;
}
if (new_pcrtc_state->color_mgmt_changed)
- full->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
+ bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
acrtc_state->stream->abm_level = acrtc_state->abm_level;
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
- full->stream_update.abm_level = &acrtc_state->abm_level;
+ bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
- full->surface_updates,
+ bundle->surface_updates,
planes_count,
acrtc_state->stream,
- &full->stream_update,
+ &bundle->stream_update,
dc_state);
mutex_unlock(&dm->dc_lock);
}
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- handle_cursor_update(plane, old_plane_state);
+ /*
+ * Update cursor state *after* programming all the planes.
+ * This avoids redundant programming in the case where we're going
+ * to be disabling a single plane - those pipes are being disabled.
+ */
+ if (acrtc_state->active_planes)
+ amdgpu_dm_commit_cursors(state);
cleanup:
- kfree(flip);
- kfree(full);
+ kfree(bundle);
+}
+
+/*
+ * Enable interrupts on CRTCs that are newly active, undergone
+ * a modeset, or have active planes again.
+ *
+ * Done in two passes, based on the for_modeset flag:
+ * Pass 1: For CRTCs going through modeset
+ * Pass 2: For CRTCs going from 0 to n active planes
+ *
+ * Interrupts can only be enabled after the planes are programmed,
+ * so this requires a two-pass approach since we don't want to
+ * just defer the interrupts until after commit planes every time.
+ */
+static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool for_modeset)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(old_crtc_state);
+ bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
+ bool run_pass;
+
+ run_pass = (for_modeset && modeset) ||
+ (!for_modeset && !modeset &&
+ !dm_old_crtc_state->interrupts_enabled);
+
+ if (!run_pass)
+ continue;
+
+ if (!dm_new_crtc_state->interrupts_enabled)
+ continue;
+
+ manage_dm_interrupts(adev, acrtc, true);
+
+#ifdef CONFIG_DEBUG_FS
+ /* The stream has changed so CRC capture needs to re-enabled. */
+ if (dm_new_crtc_state->crc_enabled) {
+ dm_new_crtc_state->crc_enabled = false;
+ amdgpu_dm_crtc_set_crc_source(crtc, "auto");
+ }
+#endif
+ }
}
/*
@@ -4952,8 +5556,7 @@ cleanup:
static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
struct dc_stream_state *stream_state)
{
- stream_state->mode_changed =
- crtc_state->mode_changed || crtc_state->active_changed;
+ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
@@ -4966,30 +5569,41 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
int i;
/*
- * We evade vblanks and pflips on crtc that
- * should be changed. We do it here to flush & disable
- * interrupts before drm_swap_state is called in drm_atomic_helper_commit
- * it will update crtc->dm_crtc_state->stream pointer which is used in
- * the ISRs.
+ * We evade vblank and pflip interrupts on CRTCs that are undergoing
+ * a modeset, being disabled, or have no active planes.
+ *
+ * It's done in atomic commit rather than commit tail for now since
+ * some of these interrupt handlers access the current CRTC state and
+ * potentially the stream pointer itself.
+ *
+ * Since the atomic state is swapped within atomic commit and not within
+ * commit tail this would leave to new state (that hasn't been committed yet)
+ * being accesssed from within the handlers.
+ *
+ * TODO: Fix this so we can do this in commit tail and not have to block
+ * in atomic check.
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- if (drm_atomic_crtc_needs_modeset(new_crtc_state)
- && dm_old_crtc_state->stream) {
+ if (dm_old_crtc_state->interrupts_enabled &&
+ (!dm_new_crtc_state->interrupts_enabled ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
/*
- * If the stream is removed and CRC capture was
- * enabled on the CRTC the extra vblank reference
- * needs to be dropped since CRC capture will be
- * disabled.
+ * Drop the extra vblank reference added by CRC
+ * capture if applicable.
*/
- if (!dm_new_crtc_state->stream
- && dm_new_crtc_state->crc_enabled) {
+ if (dm_new_crtc_state->crc_enabled)
drm_crtc_vblank_put(crtc);
+
+ /*
+ * Only keep CRC capture enabled if there's
+ * still a stream for the CRTC.
+ */
+ if (!dm_new_crtc_state->stream)
dm_new_crtc_state->crc_enabled = false;
- }
manage_dm_interrupts(adev, acrtc, false);
}
@@ -5036,7 +5650,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_state = dm_state->context;
} else {
/* No state changes, retain current state. */
- dc_state_temp = dc_create_state();
+ dc_state_temp = dc_create_state(dm->dc);
ASSERT(dc_state_temp);
dc_state = dc_state_temp;
dc_resource_state_copy_construct_current(dm->dc, dc_state);
@@ -5205,45 +5819,41 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&dm->dc_lock);
}
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- /*
- * loop to enable interrupts on newly arrived crtc
- */
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- bool modeset_needed;
-
+ new_crtc_state, i) {
if (old_crtc_state->active && !new_crtc_state->active)
crtc_disable_count++;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- modeset_needed = modeset_required(
- new_crtc_state,
- dm_new_crtc_state->stream,
- dm_old_crtc_state->stream);
- if (dm_new_crtc_state->stream == NULL || !modeset_needed)
- continue;
-
- manage_dm_interrupts(adev, acrtc, true);
+ /* Update freesync active state. */
+ pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
-#ifdef CONFIG_DEBUG_FS
- /* The stream has changed so CRC capture needs to re-enabled. */
- if (dm_new_crtc_state->crc_enabled)
- amdgpu_dm_crtc_set_crc_source(crtc, "auto");
-#endif
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
+ dm_new_crtc_state);
}
+ /* Enable interrupts for CRTCs going through a modeset. */
+ amdgpu_dm_enable_crtc_interrupts(dev, state, true);
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
+ if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ wait_for_vblank = false;
+
/* update planes when needed per crtc*/
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
amdgpu_dm_commit_planes(state, dc_state, dev,
- dm, crtc, &wait_for_vblank);
+ dm, crtc, wait_for_vblank);
}
+ /* Enable interrupts for CRTCs going from 0 to n active planes. */
+ amdgpu_dm_enable_crtc_interrupts(dev, state, false);
/*
* send vblank event on all events not handled in flip and
@@ -5483,21 +6093,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
- struct drm_plane_state *new_plane_state = NULL;
new_stream = NULL;
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
acrtc = to_amdgpu_crtc(crtc);
-
- new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
-
- if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
- ret = -EINVAL;
- goto fail;
- }
-
aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
/* TODO This hack should go away */
@@ -5660,6 +6261,9 @@ skip_modeset:
update_stream_scaling_settings(
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+ /* ABM settings */
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
/*
* Color management settings. We also update color properties
* when a modeset is needed, to ensure it gets reprogrammed.
@@ -5684,6 +6288,69 @@ fail:
return ret;
}
+static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
+ int i;
+
+ /*
+ * TODO: Remove this hack once the checks below are sufficient
+ * enough to determine when we need to reset all the planes on
+ * the stream.
+ */
+ if (state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+ if (old_plane_state->crtc != new_plane_state->crtc)
+ return true;
+
+ /* old crtc == new_crtc == NULL, plane not in context. */
+ if (!new_plane_state->crtc)
+ return false;
+
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ if (!new_crtc_state)
+ return true;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * If there are any new primary or overlay planes being added or
+ * removed then the z-order can potentially change. To ensure
+ * correct z-order and pipe acquisition the current DC architecture
+ * requires us to remove and recreate all existing planes.
+ *
+ * TODO: Come up with a more elegant solution for this.
+ */
+ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+ if (other->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (old_other_state->crtc != new_plane_state->crtc &&
+ new_other_state->crtc != new_plane_state->crtc)
+ continue;
+
+ if (old_other_state->crtc != new_other_state->crtc)
+ return true;
+
+ /* TODO: Remove this once we can handle fast format changes. */
+ if (old_other_state->fb && new_other_state->fb &&
+ old_other_state->fb->format != new_other_state->fb->format)
+ return true;
+ }
+
+ return false;
+}
+
static int dm_update_plane_state(struct dc *dc,
struct drm_atomic_state *state,
struct drm_plane *plane,
@@ -5698,8 +6365,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
- /* TODO return page_flip_needed() function */
- bool pflip_needed = !state->allow_modeset;
+ bool needs_reset;
int ret = 0;
@@ -5712,10 +6378,12 @@ static int dm_update_plane_state(struct dc *dc,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return 0;
+ needs_reset = should_reset_plane(state, plane, old_plane_state,
+ new_plane_state);
+
/* Remove any changed/removed planes */
if (!enable) {
- if (pflip_needed &&
- plane->type != DRM_PLANE_TYPE_OVERLAY)
+ if (!needs_reset)
return 0;
if (!old_plane_crtc)
@@ -5766,7 +6434,7 @@ static int dm_update_plane_state(struct dc *dc,
if (!dm_new_crtc_state->stream)
return 0;
- if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
+ if (!needs_reset)
return 0;
WARN_ON(dm_new_plane_state->dc_state);
@@ -5778,7 +6446,7 @@ static int dm_update_plane_state(struct dc *dc,
DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, new_plane_crtc->base.id);
- ret = fill_plane_attributes(
+ ret = fill_dc_plane_attributes(
new_plane_crtc->dev->dev_private,
dc_new_plane_state,
new_plane_state,
@@ -5826,10 +6494,11 @@ static int dm_update_plane_state(struct dc *dc,
}
static int
-dm_determine_update_type_for_commit(struct dc *dc,
+dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
struct drm_atomic_state *state,
enum surface_update_type *out_type)
{
+ struct dc *dc = dm->dc;
struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -5843,21 +6512,22 @@ dm_determine_update_type_for_commit(struct dc *dc,
struct dc_stream_status *status = NULL;
struct dc_surface_update *updates;
- struct dc_plane_state *surface;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
- surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL);
- if (!updates || !surface) {
- DRM_ERROR("Plane or surface update failed to allocate");
+ if (!updates) {
+ DRM_ERROR("Failed to allocate plane updates\n");
/* Set type to FULL to avoid crashing in DC*/
update_type = UPDATE_TYPE_FULL;
goto cleanup;
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dc_stream_update stream_update = { 0 };
+ struct dc_scaling_info scaling_info;
+ struct dc_stream_update stream_update;
+
+ memset(&stream_update, 0, sizeof(stream_update));
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -5885,23 +6555,12 @@ dm_determine_update_type_for_commit(struct dc *dc,
goto cleanup;
}
- if (!state->allow_modeset)
- continue;
-
if (crtc != new_plane_crtc)
continue;
- updates[num_plane].surface = &surface[num_plane];
+ updates[num_plane].surface = new_dm_plane_state->dc_state;
if (new_crtc_state->mode_changed) {
- updates[num_plane].surface->src_rect =
- new_dm_plane_state->dc_state->src_rect;
- updates[num_plane].surface->dst_rect =
- new_dm_plane_state->dc_state->dst_rect;
- updates[num_plane].surface->rotation =
- new_dm_plane_state->dc_state->rotation;
- updates[num_plane].surface->in_transfer_func =
- new_dm_plane_state->dc_state->in_transfer_func;
stream_update.dst = new_dm_crtc_state->stream->dst;
stream_update.src = new_dm_crtc_state->stream->src;
}
@@ -5917,6 +6576,13 @@ dm_determine_update_type_for_commit(struct dc *dc,
new_dm_crtc_state->stream->out_transfer_func;
}
+ ret = fill_dc_scaling_info(new_plane_state,
+ &scaling_info);
+ if (ret)
+ goto cleanup;
+
+ updates[num_plane].scaling_info = &scaling_info;
+
num_plane++;
}
@@ -5936,8 +6602,14 @@ dm_determine_update_type_for_commit(struct dc *dc,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
+ /*
+ * TODO: DC modifies the surface during this call so we need
+ * to lock here - find a way to do this without locking.
+ */
+ mutex_lock(&dm->dc_lock);
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
&stream_update, status);
+ mutex_unlock(&dm->dc_lock);
if (update_type > UPDATE_TYPE_MED) {
update_type = UPDATE_TYPE_FULL;
@@ -5947,7 +6619,6 @@ dm_determine_update_type_for_commit(struct dc *dc,
cleanup:
kfree(updates);
- kfree(surface);
*out_type = update_type;
return ret;
@@ -6131,7 +6802,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
- ret = dm_determine_update_type_for_commit(dc, state, &update_type);
+ ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
if (ret)
goto fail;
@@ -6146,9 +6817,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
- else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
- WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
-
if (overall_update_type > UPDATE_TYPE_FAST) {
ret = dm_atomic_get_state(state, &dm_state);
@@ -6159,7 +6827,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
+ if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index fbd161ddc3f4..978ff14a7d45 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -132,8 +132,6 @@ struct amdgpu_display_manager {
*/
struct drm_private_obj atomic_obj;
- struct drm_modeset_lock atomic_obj_lock;
-
/**
* @dc_lock:
*
@@ -184,6 +182,15 @@ struct amdgpu_display_manager {
struct common_irq_params
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
+ /**
+ * @vupdate_params:
+ *
+ * Vertical update IRQ parameters, passed to registered handlers when
+ * triggered.
+ */
+ struct common_irq_params
+ vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
+
spinlock_t irq_handler_list_table_lock;
struct backlight_device *backlight_dev;
@@ -240,6 +247,10 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
+#ifdef CONFIG_DEBUG_FS
+ uint32_t debugfs_dpcd_address;
+ uint32_t debugfs_dpcd_size;
+#endif
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
@@ -260,6 +271,9 @@ struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
+ int active_planes;
+ bool interrupts_enabled;
+
int crc_skip_count;
bool crc_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 216e48cec716..7258c992a2bf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -126,46 +126,51 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
crtc->base.state->dev->dev_private;
struct drm_color_lut *lut;
uint32_t lut_size;
- struct dc_gamma *gamma;
+ struct dc_gamma *gamma = NULL;
enum dc_transfer_func_type old_type = stream->out_transfer_func->type;
bool ret;
- if (!blob) {
+ if (!blob && adev->asic_type <= CHIP_RAVEN) {
/* By default, use the SRGB predefined curve.*/
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
return 0;
}
- lut = (struct drm_color_lut *)blob->data;
- lut_size = blob->length / sizeof(struct drm_color_lut);
-
- gamma = dc_create_gamma();
- if (!gamma)
- return -ENOMEM;
+ if (blob) {
+ lut = (struct drm_color_lut *)blob->data;
+ lut_size = blob->length / sizeof(struct drm_color_lut);
+
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
+ gamma->type = GAMMA_RGB_256;
+ else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
+ gamma->type = GAMMA_CS_TFM_1D;
+ else {
+ /* Invalid lut size */
+ dc_gamma_release(&gamma);
+ return -EINVAL;
+ }
- gamma->num_entries = lut_size;
- if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
- gamma->type = GAMMA_RGB_256;
- else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
- gamma->type = GAMMA_CS_TFM_1D;
- else {
- /* Invalid lut size */
- dc_gamma_release(&gamma);
- return -EINVAL;
+ /* Convert drm_lut into dc_gamma */
+ __drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
}
- /* Convert drm_lut into dc_gamma */
- __drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
-
- /* Call color module to translate into something DC understands. Namely
- * a transfer function.
+ /* predefined gamma ROM only exist for RAVEN and pre-RAVEN ASIC,
+ * set canRomBeUsed accordingly
*/
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
- gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
- dc_gamma_release(&gamma);
+ gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
if (!ret) {
stream->out_transfer_func->type = old_type;
DRM_ERROR("Out of memory when calculating regamma params\n");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 4a55cde027cf..1d5fc5ad3bee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -29,6 +29,7 @@
#include "amdgpu.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
+#include "dm_helpers.h"
/* function description
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
@@ -688,8 +689,131 @@ static int vrr_range_show(struct seq_file *m, void *data)
return 0;
}
+
+/* function description
+ *
+ * generic SDP message access for testing
+ *
+ * debugfs sdp_message is located at /syskernel/debug/dri/0/DP-x
+ *
+ * SDP header
+ * Hb0 : Secondary-Data Packet ID
+ * Hb1 : Secondary-Data Packet type
+ * Hb2 : Secondary-Data-packet-specific header, Byte 0
+ * Hb3 : Secondary-Data-packet-specific header, Byte 1
+ *
+ * for using custom sdp message: input 4 bytes SDP header and 32 bytes raw data
+ */
+static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ uint8_t data[36];
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dm_crtc_state *acrtc_state;
+ uint32_t write_size = 36;
+
+ if (connector->base.status != connector_status_connected)
+ return -ENODEV;
+
+ if (size == 0)
+ return 0;
+
+ acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state);
+
+ r = copy_from_user(data, buf, write_size);
+
+ write_size -= r;
+
+ dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size);
+
+ return write_size;
+}
+
DEFINE_SHOW_ATTRIBUTE(vrr_range);
+static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+
+ if (size < sizeof(connector->debugfs_dpcd_address))
+ return 0;
+
+ r = copy_from_user(&connector->debugfs_dpcd_address,
+ buf, sizeof(connector->debugfs_dpcd_address));
+
+ return size - r;
+}
+
+static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+
+ if (size < sizeof(connector->debugfs_dpcd_size))
+ return 0;
+
+ r = copy_from_user(&connector->debugfs_dpcd_size,
+ buf, sizeof(connector->debugfs_dpcd_size));
+
+ if (connector->debugfs_dpcd_size > 256)
+ connector->debugfs_dpcd_size = 0;
+
+ return size - r;
+}
+
+static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ char *data;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ uint32_t write_size = connector->debugfs_dpcd_size;
+
+ if (size < write_size)
+ return 0;
+
+ data = kzalloc(write_size, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ r = copy_from_user(data, buf, write_size);
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ connector->debugfs_dpcd_address, data, write_size - r);
+ kfree(data);
+ return write_size - r;
+}
+
+static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ char *data;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ uint32_t read_size = connector->debugfs_dpcd_size;
+
+ if (size < read_size)
+ return 0;
+
+ data = kzalloc(read_size, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ dm_helpers_dp_read_dpcd(link->ctx, link,
+ connector->debugfs_dpcd_address, data, read_size);
+
+ r = copy_to_user(buf, data, read_size);
+
+ kfree(data);
+ return read_size - r;
+}
+
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_link_settings_read,
@@ -710,6 +834,31 @@ static const struct file_operations dp_phy_test_pattern_fops = {
.llseek = default_llseek
};
+static const struct file_operations sdp_message_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_sdp_message_debugfs_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dpcd_address_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_dpcd_address_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dpcd_size_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_dpcd_size_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dpcd_data_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dpcd_data_read,
+ .write = dp_dpcd_data_write,
+ .llseek = default_llseek
+};
+
static const struct {
char *name;
const struct file_operations *fops;
@@ -717,7 +866,11 @@ static const struct {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"vrr_range", &vrr_range_fops}
+ {"vrr_range", &vrr_range_fops},
+ {"sdp_message", &sdp_message_fops},
+ {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
+ {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
+ {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
int connector_debugfs_init(struct amdgpu_dm_connector *connector)
@@ -842,6 +995,35 @@ static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
{"amdgpu_target_backlight_pwm", &target_backlight_read},
};
+/*
+ * Sets the DC visual confirm debug option from the given string.
+ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
+ */
+static int visual_confirm_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val;
+
+ return 0;
+}
+
+/*
+ * Reads the DC visual confirm debug option value into the given buffer.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm
+ */
+static int visual_confirm_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.dc->debug.visual_confirm;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
+ visual_confirm_set, "%llu\n");
+
int dtn_debugfs_init(struct amdgpu_device *adev)
{
static const struct file_operations dtn_log_fops = {
@@ -867,5 +1049,13 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
adev,
&dtn_log_fops);
- return PTR_ERR_OR_ZERO(ent);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ ent = debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root,
+ adev, &visual_confirm_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b39766bd2840..e6cd67342df8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -264,7 +264,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
}
/*
- * poll pending down reply before clear payload allocation table
+ * poll pending down reply
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index cd10f77cdeb0..fd22b4474dbf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -674,11 +674,30 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
__func__);
}
+static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_VUPDATE,
+ __func__);
+}
+
static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
.set = amdgpu_dm_set_crtc_irq_state,
.process = amdgpu_dm_irq_handler,
};
+static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
+ .set = amdgpu_dm_set_vupdate_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
.set = amdgpu_dm_set_pflip_irq_state,
.process = amdgpu_dm_irq_handler,
@@ -695,6 +714,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+ adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
+ adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
+
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index c4ea3a91f17a..6e205ee36ac3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -84,6 +84,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
{
ssize_t result = 0;
struct aux_payload payload;
+ enum aux_channel_operation_result operation_result;
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -97,13 +98,27 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
payload.defer_delay = 0;
- result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, &payload);
+ result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
+ &operation_result);
if (payload.write)
result = msg->size;
- if (result < 0) /* DC doesn't know about kernel error codes */
- result = -EIO;
+ if (result < 0)
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ result = -EIO;
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ result = -EBUSY;
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ result = -ETIMEDOUT;
+ break;
+ }
return result;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index a114954d6a5b..350e7a620d45 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -33,6 +33,7 @@
#include "amdgpu_dm_irq.h"
#include "amdgpu_pm.h"
#include "dm_pp_smu.h"
+#include "amdgpu_smu.h"
bool dm_pp_apply_display_requirements(
@@ -40,6 +41,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_display_configuration *pp_display_cfg)
{
struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
int i;
if (adev->pm.dpm_enabled) {
@@ -105,6 +107,9 @@ bool dm_pp_apply_display_requirements(
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
+ else
+ smu_display_configuration_change(smu,
+ &adev->pm.pm_display_cfg);
amdgpu_pm_compute_clocks(adev);
}
@@ -308,6 +313,12 @@ bool dm_pp_get_clock_levels_by_type(
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
dc_to_pp_clock_type(clk_type), &pp_clks)) {
/* Error in pplib. Provide default values. */
+ return true;
+ }
+ } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
+ if (smu_get_clock_by_type(&adev->smu,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks)) {
get_default_clock_levels(clk_type, dc_clks);
return true;
}
@@ -324,6 +335,13 @@ bool dm_pp_get_clock_levels_by_type(
validation_clks.memory_max_clock = 80000;
validation_clks.level = 0;
}
+ } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
+ if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
}
DRM_INFO("DM_PPLIB: Validation clocks:\n");
@@ -374,14 +392,21 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
void *pp_handle = adev->powerplay.pp_handle;
struct pp_clock_levels_with_latency pp_clks = { 0 };
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret;
+
+ if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
+ ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks);
+ if (ret)
+ return false;
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
+ if (smu_get_clock_by_type_with_latency(&adev->smu,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks))
+ return false;
+ }
- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
- return false;
-
- if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
- dc_to_pp_clock_type(clk_type),
- &pp_clks))
- return false;
pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
@@ -397,14 +422,20 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
void *pp_handle = adev->powerplay.pp_handle;
struct pp_clock_levels_with_voltage pp_clk_info = {0};
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
- return false;
-
- if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
- dc_to_pp_clock_type(clk_type),
- &pp_clk_info))
- return false;
+ int ret;
+
+ if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
+ ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info);
+ if (ret)
+ return false;
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
+ if (smu_get_clock_by_type_with_voltage(&adev->smu,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info))
+ return false;
+ }
pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
@@ -445,6 +476,10 @@ bool dm_pp_apply_clock_for_voltage_request(
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
adev->powerplay.pp_handle,
&pp_clock_request);
+ else if (adev->smu.funcs &&
+ adev->smu.funcs->display_clock_voltage_request)
+ ret = smu_display_clock_voltage_request(&adev->smu,
+ &pp_clock_request);
if (ret)
return false;
return true;
@@ -462,6 +497,8 @@ bool dm_pp_get_static_clocks(
ret = adev->powerplay.pp_funcs->get_current_clocks(
adev->powerplay.pp_handle,
&pp_clk_info);
+ else if (adev->smu.funcs)
+ ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
if (ret)
return false;
@@ -472,27 +509,6 @@ bool dm_pp_get_static_clocks(
return true;
}
-void pp_rv_set_display_requirement(struct pp_smu *pp,
- struct pp_smu_display_requirement_rv *req)
-{
- const struct dc_context *ctx = pp->dm;
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- struct pp_display_clock_request clock = {0};
-
- if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
- return;
-
- clock.clock_type = amd_pp_dcf_clock;
- clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000;
- pp_funcs->display_clock_voltage_request(pp_handle, &clock);
-
- clock.clock_type = amd_pp_f_clock;
- clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000;
- pp_funcs->display_clock_voltage_request(pp_handle, &clock);
-}
-
void pp_rv_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
@@ -508,9 +524,6 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
- if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges)
- return;
-
for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
if (ranges->reader_wm_sets[i].wm_inst > 3)
wm_dce_clocks[i].wm_set_id = WM_SET_A;
@@ -543,7 +556,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
- pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+ if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
+ &wm_with_clock_ranges);
+ else if (adev->smu.funcs &&
+ adev->smu.funcs->set_watermarks_for_clock_ranges)
+ smu_set_watermarks_for_clock_ranges(&adev->smu,
+ &wm_with_clock_ranges);
}
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -553,10 +572,10 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
- return;
-
- pp_funcs->notify_smu_enable_pwe(pp_handle);
+ if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
+ pp_funcs->notify_smu_enable_pwe(pp_handle);
+ else if (adev->smu.funcs)
+ smu_notify_smu_enable_pwe(&adev->smu);
}
void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
@@ -611,17 +630,16 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
}
-void dm_pp_get_funcs_rv(
+void dm_pp_get_funcs(
struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs)
+ struct pp_smu_funcs *funcs)
{
- funcs->pp_smu.dm = ctx;
- funcs->set_display_requirement = pp_rv_set_display_requirement;
- funcs->set_wm_ranges = pp_rv_set_wm_ranges;
- funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
- funcs->set_display_count = pp_rv_set_active_display_count;
- funcs->set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
- funcs->set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
- funcs->set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
+ funcs->rv_funcs.pp_smu.dm = ctx;
+ funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
+ funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+ funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count;
+ funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
+ funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
+ funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index f28989860fd8..1e9a2d352068 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy(
return min_clamp;
}
+unsigned int dc_fixpt_u4d19(struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 4, 19);
+}
+
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg)
{
return ux_dy(arg.value, 3, 19);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index eb62d10bb65c..1b4b51657f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -247,6 +247,53 @@ static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format for
}
}
+enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode)
+{
+ switch (sw_mode) {
+ /* for 4/8/16 high tiles */
+ case DC_SW_LINEAR:
+ return dm_4k_tile;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_X:
+ case DC_SW_64KB_S_T:
+ return dm_64k_tile;
+ case DC_SW_VAR_S:
+ case DC_SW_VAR_S_X:
+ return dm_256k_tile;
+
+ /* For 64bpp 2 high tiles */
+ case DC_SW_4KB_D:
+ case DC_SW_4KB_D_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_D:
+ case DC_SW_64KB_D_X:
+ case DC_SW_64KB_D_T:
+ return dm_64k_tile;
+ case DC_SW_VAR_D:
+ case DC_SW_VAR_D_X:
+ return dm_256k_tile;
+
+ case DC_SW_4KB_R:
+ case DC_SW_4KB_R_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_R:
+ case DC_SW_64KB_R_X:
+ return dm_64k_tile;
+ case DC_SW_VAR_R:
+ case DC_SW_VAR_R_X:
+ return dm_256k_tile;
+
+ /* Unsupported swizzle modes for dcn */
+ case DC_SW_256B_S:
+ default:
+ ASSERT(0); /* Not supported */
+ return 0;
+ }
+}
+
static void pipe_ctx_to_e2e_pipe_params (
const struct pipe_ctx *pipe,
struct _vcs_dpi_display_pipe_params_st *input)
@@ -287,46 +334,7 @@ static void pipe_ctx_to_e2e_pipe_params (
input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */
input->src.cur0_bpp = 32;
- switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
- /* for 4/8/16 high tiles */
- case DC_SW_LINEAR:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_4KB_S:
- case DC_SW_4KB_S_X:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_64KB_S:
- case DC_SW_64KB_S_X:
- case DC_SW_64KB_S_T:
- input->src.macro_tile_size = dm_64k_tile;
- break;
- case DC_SW_VAR_S:
- case DC_SW_VAR_S_X:
- input->src.macro_tile_size = dm_256k_tile;
- break;
-
- /* For 64bpp 2 high tiles */
- case DC_SW_4KB_D:
- case DC_SW_4KB_D_X:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_64KB_D:
- case DC_SW_64KB_D_X:
- case DC_SW_64KB_D_T:
- input->src.macro_tile_size = dm_64k_tile;
- break;
- case DC_SW_VAR_D:
- case DC_SW_VAR_D_X:
- input->src.macro_tile_size = dm_256k_tile;
- break;
-
- /* Unsupported swizzle modes for dcn */
- case DC_SW_256B_S:
- default:
- ASSERT(0); /* Not supported */
- break;
- }
+ input->src.macro_tile_size = swizzle_mode_to_macro_tile_size(pipe->plane_state->tiling_info.gfx9.swizzle);
switch (pipe->plane_state->rotation) {
case ROTATION_ANGLE_0:
@@ -466,7 +474,7 @@ static void dcn_bw_calc_rq_dlg_ttu(
input.clks_cfg.dcfclk_mhz = v->dcfclk;
input.clks_cfg.dispclk_mhz = v->dispclk;
input.clks_cfg.dppclk_mhz = v->dppclk;
- input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz / 1000.0;
+ input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
input.clks_cfg.socclk_mhz = v->socclk;
input.clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger;
@@ -536,28 +544,28 @@ static void calc_wm_sets_and_perf_params(
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
v->dcfclk = v->dcfclkv_nom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
}
if (v->voltage_level < 3) {
@@ -571,14 +579,14 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclkv_max0p9;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
}
v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
@@ -591,20 +599,20 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclk_per_state[v->voltage_level];
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
if (v->voltage_level >= 2) {
- context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
}
if (v->voltage_level >= 3)
- context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
}
#endif
@@ -693,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
bool dcn_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
+ /*
+ * we want a breakdown of the various stages of validation, which the
+ * perf_trace macro doesn't support
+ */
+ BW_VAL_TRACE_SETUP();
+
const struct resource_pool *pool = dc->res_pool;
struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
int i, input_idx;
@@ -703,6 +718,9 @@ bool dcn_validate_bandwidth(
float bw_limit;
PERFORMANCE_TRACE_START();
+
+ BW_VAL_TRACE_COUNT();
+
if (dcn_bw_apply_registry_override(dc))
dcn_bw_sync_calcs_and_dml(dc);
@@ -1000,13 +1018,16 @@ bool dcn_validate_bandwidth(
dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
if (dc->debug.sr_exit_time_dpm0_ns)
v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
- dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
- dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
+ context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time;
mode_support_and_system_configuration(v);
}
- if (v->voltage_level != 5) {
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
+
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
@@ -1027,58 +1048,60 @@ bool dcn_validate_bandwidth(
*/
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
- context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
- context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
- context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
- context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
- context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
- if (context->bw.dcn.clk.dispclk_khz <
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
- context->bw.dcn.clk.dispclk_khz =
+ context->bw_ctx.bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
- context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
+ BW_VAL_TRACE_END_WATERMARKS();
+
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1141,7 +1164,7 @@ bool dcn_validate_bandwidth(
hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
} else {
/* pipe not split previously needs split */
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool);
+ hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe);
ASSERT(hsplit_pipe);
split_stream_across_pipes(
&context->res_ctx, pool,
@@ -1169,13 +1192,17 @@ bool dcn_validate_bandwidth(
input_idx++;
}
+ } else if (v->voltage_level == number_of_states_plus_one) {
+ BW_VAL_TRACE_SKIP(fail);
+ } else if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
}
if (v->voltage_level == 0) {
- dc->dml.soc.sr_enter_plus_exit_time_us =
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
dc->dcn_soc->sr_enter_plus_exit_time;
- dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
}
/*
@@ -1188,6 +1215,7 @@ bool dcn_validate_bandwidth(
kernel_fpu_end();
PERFORMANCE_TRACE_END();
+ BW_VAL_TRACE_FINISH();
if (bw_limit_pass && v->voltage_level != 5)
return true;
@@ -1395,12 +1423,14 @@ void dcn_bw_update_from_pplib(struct dc *dc)
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
{
- struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
+ struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
- if (!pp->set_wm_ranges)
+ if (dc->res_pool->pp_smu)
+ pp = &dc->res_pool->pp_smu->rv_funcs;
+ if (!pp || !pp->set_wm_ranges)
return;
kernel_fpu_begin();
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index c68fbd55db3c..18c775a950cc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -524,6 +524,14 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_stream_state *link_stream;
struct dc_link_settings store_settings = *link_setting;
+ link->preferred_link_setting = store_settings;
+
+ /* Retrain with preferred link settings only relevant for
+ * DP signal type
+ */
+ if (!dc_is_dp_signal(link->connector_signal))
+ return;
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->link) {
@@ -538,7 +546,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
- link->preferred_link_setting = store_settings;
+ /* Cannot retrain link if backend is off */
+ if (link_stream->dpms_off)
+ return;
+
if (link_stream)
decide_link_settings(link_stream, &store_settings);
@@ -573,6 +584,28 @@ void dc_link_set_test_pattern(struct dc_link *link,
cust_pattern_size);
}
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting)
+{
+ uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
+
+ link_bw_kbps *= 8; /* 8 bits per byte*/
+ link_bw_kbps *= link_setting->lane_count;
+
+ return link_bw_kbps;
+
+}
+
+const struct dc_link_settings *dc_link_get_link_cap(
+ const struct dc_link *link)
+{
+ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+ return &link->preferred_link_setting;
+ return &link->verified_link_cap;
+}
+
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@@ -621,6 +654,10 @@ static bool construct(struct dc *dc,
#endif
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ dc->config = init_params->flags;
+
+ memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
+
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
if (!dc_dceip) {
dm_error("%s: failed to create dceip\n", __func__);
@@ -668,13 +705,6 @@ static bool construct(struct dc *dc,
dc_ctx->dc_stream_id_count = 0;
dc->ctx = dc_ctx;
- dc->current_state = dc_create_state();
-
- if (!dc->current_state) {
- dm_error("%s: failed to create validate ctx\n", __func__);
- goto fail;
- }
-
/* Create logger */
dc_ctx->dce_environment = init_params->dce_environment;
@@ -722,14 +752,22 @@ static bool construct(struct dc *dc,
goto fail;
}
- dc->res_pool = dc_create_resource_pool(
- dc,
- init_params->num_virtual_links,
- dc_version,
- init_params->asic_id);
+ dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
if (!dc->res_pool)
goto fail;
+ /* Creation of current_state must occur after dc->dml
+ * is initialized in dc_create_resource_pool because
+ * on creation it copies the contents of dc->dml
+ */
+
+ dc->current_state = dc_create_state(dc);
+
+ if (!dc->current_state) {
+ dm_error("%s: failed to create validate ctx\n", __func__);
+ goto fail;
+ }
+
dc_resource_state_construct(dc, dc->current_state);
if (!create_links(dc, init_params->num_virtual_links))
@@ -746,7 +784,7 @@ fail:
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
- struct dc_state *dangling_context = dc_create_state();
+ struct dc_state *dangling_context = dc_create_state(dc);
struct dc_state *current_ctx;
if (dangling_context == NULL)
@@ -811,8 +849,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
- dc->config = init_params->flags;
-
dc->build_id = DC_BUILD_ID;
DC_LOG_DC("Display Core initialized\n");
@@ -969,7 +1005,7 @@ static bool context_changed(
return false;
}
-bool dc_validate_seamless_boot_timing(struct dc *dc,
+bool dc_validate_seamless_boot_timing(const struct dc *dc,
const struct dc_sink *sink,
struct dc_crtc_timing *crtc_timing)
{
@@ -1060,7 +1096,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb))
dc->hwss.enable_accelerated_mode(dc, context);
- dc->hwss.prepare_bandwidth(dc, context);
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->apply_seamless_boot_optimization)
+ dc->optimize_seamless_boot = true;
+ }
+
+ if (!dc->optimize_seamless_boot)
+ dc->hwss.prepare_bandwidth(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -1135,12 +1177,15 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- /* pplib is notified if disp_num changed */
- dc->hwss.optimize_bandwidth(dc, context);
+ if (!dc->optimize_seamless_boot)
+ /* pplib is notified if disp_num changed */
+ dc->hwss.optimize_bandwidth(dc, context);
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
+ memset(&context->commit_hints, 0, sizeof(context->commit_hints));
+
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1177,7 +1222,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
int i;
struct dc_state *context = dc->current_state;
- if (dc->optimized_required == false)
+ if (!dc->optimized_required || dc->optimize_seamless_boot)
return true;
post_surface_trace(dc);
@@ -1195,18 +1240,60 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
return true;
}
-struct dc_state *dc_create_state(void)
+struct dc_state *dc_create_state(struct dc *dc)
{
struct dc_state *context = kzalloc(sizeof(struct dc_state),
GFP_KERNEL);
if (!context)
return NULL;
+ /* Each context must have their own instance of VBA and in order to
+ * initialize and obtain IP and SOC the base DML instance from DC is
+ * initially copied into every context
+ */
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
+#endif
kref_init(&context->refcount);
+
return context;
}
+struct dc_state *dc_copy_state(struct dc_state *src_ctx)
+{
+ int i, j;
+ struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!new_ctx)
+ return NULL;
+
+ memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++) {
+ dc_stream_retain(new_ctx->streams[i]);
+ for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ new_ctx->stream_status[i].plane_states[j]);
+ }
+
+ kref_init(&new_ctx->refcount);
+
+ return new_ctx;
+}
+
void dc_retain_state(struct dc_state *context)
{
kref_get(&context->refcount);
@@ -1377,6 +1464,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
return UPDATE_TYPE_FULL;
}
+ if (u->surface->force_full_update) {
+ update_flags->bits.full_update = 1;
+ return UPDATE_TYPE_FULL;
+ }
+
type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type);
@@ -1661,6 +1753,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
continue;
if (stream_update->dpms_off) {
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
dc->hwss.optimize_bandwidth(dc, dc->current_state);
@@ -1668,6 +1761,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.prepare_bandwidth(dc, dc->current_state);
core_link_enable_stream(dc->current_state, pipe_ctx);
}
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -1695,7 +1789,16 @@ static void commit_planes_for_stream(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- if (update_type == UPDATE_TYPE_FULL) {
+ if (dc->optimize_seamless_boot && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth.
+ */
+ dc->optimize_seamless_boot = false;
+ dc->optimized_required = true;
+ }
+
+ if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
dc->hwss.prepare_bandwidth(dc, context);
context_clock_trace(dc, context);
}
@@ -1795,13 +1898,21 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
- context = dc_create_state();
+ context = dc_create_state(dc);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
}
dc_resource_state_copy_construct(state, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+ new_pipe->plane_state->force_full_update = true;
+ }
}
@@ -1838,6 +1949,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc->current_state = context;
dc_release_state(old);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
}
/*let's use current_state to update watermark etc*/
if (update_type >= UPDATE_TYPE_FULL)
@@ -2080,13 +2197,13 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
- info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz;
- info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
- info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz;
- info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
- info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz;
- info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz;
- info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
- info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
- info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
+ info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
+ info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
+ info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
+ info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
+ info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
+ info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
+ info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
+ info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 73d049506618..5903e7822f98 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -351,19 +351,19 @@ void context_clock_trace(
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.clk.dispclk_khz,
- context->bw.dcn.clk.dppclk_khz,
- context->bw.dcn.clk.dcfclk_khz,
- context->bw.dcn.clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.clk.fclk_khz,
- context->bw.dcn.clk.socclk_khz);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz,
+ context->bw_ctx.bw.dcn.clk.dppclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw_ctx.bw.dcn.clk.fclk_khz,
+ context->bw_ctx.bw.dcn.clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.clk.dispclk_khz,
- context->bw.dcn.clk.dppclk_khz,
- context->bw.dcn.clk.dcfclk_khz,
- context->bw.dcn.clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.clk.fclk_khz,
- context->bw.dcn.clk.socclk_khz);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz,
+ context->bw_ctx.bw.dcn.clk.dppclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw_ctx.bw.dcn.clk.fclk_khz,
+ context->bw_ctx.bw.dcn.clk.socclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4eba3c4800b6..b37ecc3ede61 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -58,7 +58,6 @@
******************************************************************************/
enum {
- LINK_RATE_REF_FREQ_IN_MHZ = 27,
PEAK_FACTOR_X1000 = 1006,
/*
* Some receivers fail to train on first try and are good
@@ -515,6 +514,40 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
+static void read_edp_current_link_settings_on_detect(struct dc_link *link)
+{
+ union lane_count_set lane_count_set = { {0} };
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ core_link_read_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, sizeof(lane_count_set));
+ link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set == 0) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ link->cur_link_settings.link_rate = link_bw_set;
+ link->cur_link_settings.use_link_rate_set = false;
+ }
+}
+
static bool detect_dp(
struct dc_link *link,
struct display_sink_capability *sink_caps,
@@ -640,7 +673,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
DC_LOGGER_INIT(link->ctx->logger);
- if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+
+ if (dc_is_virtual_signal(link->connector_signal))
return false;
if (false == dc_link_detect_sink(link, &new_connection_type)) {
@@ -648,9 +682,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
return false;
}
- if (link->connector_signal == SIGNAL_TYPE_EDP &&
- link->local_sink)
- return true;
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* On detect, we want to make sure current link settings are
+ * up to date, especially if link was powered on by GOP.
+ */
+ read_edp_current_link_settings_on_detect(link);
+ if (link->local_sink)
+ return true;
+ }
if (link->connector_signal == SIGNAL_TYPE_LVDS &&
link->local_sink)
@@ -720,9 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
same_dpcd = false;
}
/* Active dongle plug in without display or downstream unplug*/
- if (link->type == dc_connection_active_dongle
- && link->dpcd_caps.sink_count.
- bits.SINK_COUNT == 0) {
+ if (link->type == dc_connection_active_dongle &&
+ link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
if (prev_sink != NULL) {
/* Downstream unplug */
dc_sink_release(prev_sink);
@@ -1172,8 +1210,6 @@ static bool construct(
goto create_fail;
}
-
-
/* TODO: #DAL3 Implement id to str function.*/
LINK_INFO("Connector[%d] description:"
"signal %d\n",
@@ -1207,7 +1243,7 @@ static bool construct(
link->link_enc = link->dc->res_pool->funcs->link_enc_create(
&enc_init_data);
- if( link->link_enc == NULL) {
+ if (link->link_enc == NULL) {
DC_ERROR("Failed to create link encoder!\n");
goto link_enc_create_fail;
}
@@ -1399,9 +1435,24 @@ static enum dc_status enable_link_dp(
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
+ /* If link settings are different than current and link already enabled
+ * then need to disable before programming to new rate.
+ */
+ if (link->link_status.link_active &&
+ (link->cur_link_settings.lane_count != link_settings.lane_count ||
+ link->cur_link_settings.link_rate != link_settings.link_rate)) {
+ dp_disable_link_phy(link, pipe_ctx->stream->signal);
+ }
+
+ /*in case it is not on*/
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- state->dccg->funcs->update_clocks(state->dccg, state, false);
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
dp_enable_link_phy(
link,
@@ -1442,15 +1493,9 @@ static enum dc_status enable_link_edp(
struct pipe_ctx *pipe_ctx)
{
enum dc_status status;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- /*in case it is not on*/
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
status = enable_link_dp(state, pipe_ctx);
-
return status;
}
@@ -1466,14 +1511,14 @@ static enum dc_status enable_link_dp_mst(
if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
return DC_OK;
+ /* clear payload table */
+ dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
+
/* to make sure the pending down rep can be processed
- * before clear payload table
+ * before enabling the link
*/
dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
- /* clear payload table */
- dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
-
/* set the sink to MST mode before enabling the link */
dp_enable_mst_on_sink(link, true);
@@ -1982,7 +2027,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->signal,
stream->phy_pix_clk);
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
dal_ddc_service_read_scdc_data(link->ddc);
}
@@ -2074,11 +2119,28 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
}
}
+static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
+{
+
+ uint32_t pxl_clk = timing->pix_clk_100hz;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pxl_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pxl_clk = pxl_clk * 2 / 3;
+
+ if (timing->display_color_depth == COLOR_DEPTH_101010)
+ pxl_clk = pxl_clk * 10 / 8;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212)
+ pxl_clk = pxl_clk * 12 / 8;
+
+ return pxl_clk;
+}
+
static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
const struct dpcd_caps *dpcd_caps)
{
- unsigned int required_pix_clk_100hz = timing->pix_clk_100hz;
const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
switch (dpcd_caps->dongle_type) {
@@ -2115,13 +2177,6 @@ static bool dp_active_dongle_validate_timing(
return false;
}
-
- /* Check Color Depth and Pixel Clock */
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- required_pix_clk_100hz /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- required_pix_clk_100hz = required_pix_clk_100hz * 2 / 3;
-
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
case COLOR_DEPTH_888:
@@ -2130,14 +2185,11 @@ static bool dp_active_dongle_validate_timing(
case COLOR_DEPTH_101010:
if (dongle_caps->dp_hdmi_max_bpc < 10)
return false;
- required_pix_clk_100hz = required_pix_clk_100hz * 10 / 8;
break;
case COLOR_DEPTH_121212:
if (dongle_caps->dp_hdmi_max_bpc < 12)
return false;
- required_pix_clk_100hz = required_pix_clk_100hz * 12 / 8;
break;
-
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
@@ -2145,7 +2197,7 @@ static bool dp_active_dongle_validate_timing(
return false;
}
- if (required_pix_clk_100hz > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
+ if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
return true;
@@ -2166,7 +2218,7 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
/* Passive Dongle */
- if (0 != max_pix_clk && timing->pix_clk_100hz > max_pix_clk)
+ if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk)
return DC_EXCEED_DONGLE_CAP;
/* Active Dongle*/
@@ -2284,14 +2336,13 @@ void core_link_resume(struct dc_link *link)
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
- struct dc_link_settings *link_settings =
- &stream->link->cur_link_settings;
- uint32_t link_rate_in_mbps =
- link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
- struct fixed31_32 mbps = dc_fixpt_from_int(
- link_rate_in_mbps * link_settings->lane_count);
-
- return dc_fixpt_div_int(mbps, 54);
+ struct fixed31_32 mbytes_per_sec;
+ uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
+ link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
+
+ mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
+
+ return dc_fixpt_div_int(mbytes_per_sec, 54);
}
static int get_color_depth(enum dc_color_depth color_depth)
@@ -2316,7 +2367,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
uint32_t denominator;
bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
- kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3;
+ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -2551,12 +2602,12 @@ void core_link_enable_stream(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) {
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
pipe_ctx->stream->signal);
@@ -2570,9 +2621,10 @@ void core_link_enable_stream(
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
- stream->output_color_space);
+ stream->output_color_space,
+ stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
@@ -2660,12 +2712,18 @@ void core_link_enable_stream(
void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ dal_ddc_service_write_scdc_data(
+ stream->link->ddc, 0,
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
@@ -2730,3 +2788,49 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
}
}
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index b7ee63cd8dc7..f02092a0dc76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -573,12 +573,28 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-int dc_link_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload)
+/* dc_link_aux_transfer_raw() - Attempt to transfer
+ * the given aux payload. This function does not perform
+ * retries or handle error states. The reply is returned
+ * in the payload->reply and the result through
+ * *operation_result. Returns the number of bytes transferred,
+ * or -1 on a failure.
+ */
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result)
{
- return dce_aux_transfer(ddc, payload);
+ return dce_aux_transfer_raw(ddc, payload, operation_result);
}
+/* dc_link_aux_transfer_with_retries() - Attempt to submit an
+ * aux payload, retrying on timeouts, defers, and busy states
+ * as outlined in the DP spec. Returns true if the request
+ * was successful.
+ *
+ * Unless you want to implement your own retry semantics, this
+ * is probably the one you want.
+ */
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 09d301216076..1ee544a32ebb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -93,12 +93,10 @@ static void dpcd_set_link_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings)
{
- uint8_t rate = (uint8_t)
- (lt_settings->link_settings.link_rate);
+ uint8_t rate;
union down_spread_ctrl downspread = { {0} };
union lane_count_set lane_count_set = { {0} };
- uint8_t link_set_buffer[2];
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -111,29 +109,42 @@ static void dpcd_set_link_settings(
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- link_set_buffer[0] = rate;
- link_set_buffer[1] = lane_count_set.raw;
-
- core_link_write_dpcd(link, DP_LINK_BW_SET,
- link_set_buffer, 2);
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
&downspread.raw, sizeof(downspread));
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- (link->dpcd_caps.link_rate_set >= 1 &&
- link->dpcd_caps.link_rate_set <= 8)) {
+ lt_settings->link_settings.use_link_rate_set == true) {
+ rate = 0;
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
core_link_write_dpcd(link, DP_LINK_RATE_SET,
- &link->dpcd_caps.link_rate_set, 1);
+ &lt_settings->link_settings.link_rate_set, 1);
+ } else {
+ rate = (uint8_t) (lt_settings->link_settings.link_rate);
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
+ if (rate) {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_RATE_SET,
+ lt_settings->link_settings.link_rate_set,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ }
}
@@ -952,6 +963,8 @@ enum link_training_result dc_link_dp_perform_link_training(
lt_settings.link_settings.link_rate = link_setting->link_rate;
lt_settings.link_settings.lane_count = link_setting->lane_count;
+ lt_settings.link_settings.use_link_rate_set = link_setting->use_link_rate_set;
+ lt_settings.link_settings.link_rate_set = link_setting->link_rate_set;
/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
@@ -1075,7 +1088,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
/* Set Default link settings */
struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
- LINK_SPREAD_05_DOWNSPREAD_30KHZ};
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
/* Higher link settings based on feature supported */
if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
@@ -1520,69 +1533,6 @@ static bool decide_fallback_link_setting(
return true;
}
-static uint32_t bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
-{
- uint32_t bits_per_channel = 0;
- uint32_t kbps;
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- bits_per_channel = 6;
- break;
- case COLOR_DEPTH_888:
- bits_per_channel = 8;
- break;
- case COLOR_DEPTH_101010:
- bits_per_channel = 10;
- break;
- case COLOR_DEPTH_121212:
- bits_per_channel = 12;
- break;
- case COLOR_DEPTH_141414:
- bits_per_channel = 14;
- break;
- case COLOR_DEPTH_161616:
- bits_per_channel = 16;
- break;
- default:
- break;
- }
-
- ASSERT(bits_per_channel != 0);
-
- kbps = timing->pix_clk_100hz / 10;
- kbps *= bits_per_channel;
-
- if (timing->flags.Y_ONLY != 1) {
- /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
- kbps *= 3;
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- kbps /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- kbps = kbps * 2 / 3;
- }
-
- return kbps;
-
-}
-
-static uint32_t bandwidth_in_kbps_from_link_settings(
- const struct dc_link_settings *link_setting)
-{
- uint32_t link_rate_in_kbps = link_setting->link_rate *
- LINK_RATE_REF_FREQ_IN_KHZ;
-
- uint32_t lane_count = link_setting->lane_count;
- uint32_t kbps = link_rate_in_kbps;
-
- kbps *= lane_count;
- kbps *= 8; /* 8 bits per byte*/
-
- return kbps;
-
-}
-
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@@ -1598,8 +1548,7 @@ bool dp_validate_mode_timing(
timing->v_addressable == (uint32_t) 480)
return true;
- /* We always use verified link settings */
- link_setting = &link->verified_link_cap;
+ link_setting = dc_link_get_link_cap(link);
/* TODO: DYNAMIC_VALIDATION needs to be implemented */
/*if (flags.DYNAMIC_VALIDATION == 1 &&
@@ -1607,8 +1556,8 @@ bool dp_validate_mode_timing(
link_setting = &link->verified_link_cap;
*/
- req_bw = bandwidth_in_kbps_from_timing(timing);
- max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+ req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ max_bw = dc_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
/* remember the biggest mode here, during
@@ -1629,58 +1578,78 @@ bool dp_validate_mode_timing(
return false;
}
-void decide_link_settings(struct dc_stream_state *stream,
- struct dc_link_settings *link_setting)
+static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
{
-
struct dc_link_settings initial_link_setting = {
- LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED};
+ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
struct dc_link_settings current_link_setting =
initial_link_setting;
- struct dc_link *link;
- uint32_t req_bw;
uint32_t link_bw;
- req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
-
- link = stream->link;
-
- /* if preferred is specified through AMDDP, use it, if it's enough
- * to drive the mode
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
*/
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
- *link_setting = link->preferred_link_setting;
- return;
- }
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
- /* MST doesn't perform link training for now
- * TODO: add MST specific link training routine
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- *link_setting = link->verified_link_cap;
- return;
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
}
- /* EDP use the link cap setting */
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ return false;
+}
+
+static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+ link->dc->config.optimize_edp_link_rate == false) {
*link_setting = link->verified_link_cap;
- return;
+ return true;
}
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
- link_bw = bandwidth_in_kbps_from_link_settings(
+ link_bw = dc_link_bandwidth_kbps(
+ link,
&current_link_setting);
if (req_bw <= link_bw) {
*link_setting = current_link_setting;
- return;
+ return true;
}
if (current_link_setting.lane_count <
@@ -1689,13 +1658,53 @@ void decide_link_settings(struct dc_stream_state *stream,
increase_lane_count(
current_link_setting.lane_count);
} else {
- current_link_setting.link_rate =
- increase_link_rate(
- current_link_setting.link_rate);
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
}
}
+ return false;
+}
+
+void decide_link_settings(struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting)
+{
+ struct dc_link *link;
+ uint32_t req_bw;
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ link = stream->link;
+
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ *link_setting = link->preferred_link_setting;
+ return;
+ }
+
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (decide_edp_link_settings(link, link_setting, req_bw))
+ return;
+ } else if (decide_dp_link_settings(link, link_setting, req_bw))
+ return;
BREAK_TO_DEBUGGER();
ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
@@ -2155,11 +2164,7 @@ bool is_mst_supported(struct dc_link *link)
bool is_dp_active_dongle(const struct dc_link *link)
{
- enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
-
- return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
- (dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
- (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+ return link->dpcd_caps.is_branch_dev;
}
static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
@@ -2180,6 +2185,30 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
return -1;
}
+static void read_dp_device_vendor_id(struct dc_link *link)
+{
+ struct dp_device_vendor_id dp_id;
+
+ /* read IEEE branch device id */
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_OUI,
+ (uint8_t *)&dp_id,
+ sizeof(dp_id));
+
+ link->dpcd_caps.branch_dev_id =
+ (dp_id.ieee_oui[0] << 16) +
+ (dp_id.ieee_oui[1] << 8) +
+ dp_id.ieee_oui[2];
+
+ memmove(
+ link->dpcd_caps.branch_dev_name,
+ dp_id.ieee_device_id,
+ sizeof(dp_id.ieee_device_id));
+}
+
+
+
static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
@@ -2193,6 +2222,9 @@ static void get_active_converter_info(
return;
}
+ /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
@@ -2234,8 +2266,8 @@ static void get_active_converter_info(
hdmi_caps = {.raw = det_caps[3] };
union dwnstream_port_caps_byte2
hdmi_color_caps = {.raw = det_caps[2] };
- link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
- det_caps[1] * 25000;
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
+ det_caps[1] * 2500;
link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
@@ -2252,7 +2284,7 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
- if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
@@ -2263,27 +2295,6 @@ static void get_active_converter_info(
ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
{
- struct dp_device_vendor_id dp_id;
-
- /* read IEEE branch device id */
- core_link_read_dpcd(
- link,
- DP_BRANCH_OUI,
- (uint8_t *)&dp_id,
- sizeof(dp_id));
-
- link->dpcd_caps.branch_dev_id =
- (dp_id.ieee_oui[0] << 16) +
- (dp_id.ieee_oui[1] << 8) +
- dp_id.ieee_oui[2];
-
- memmove(
- link->dpcd_caps.branch_dev_name,
- dp_id.ieee_device_id,
- sizeof(dp_id.ieee_device_id));
- }
-
- {
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
core_link_read_dpcd(
@@ -2347,6 +2358,10 @@ static bool retrieve_link_cap(struct dc_link *link)
{
uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+ /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
+ */
+ uint8_t dpcd_dprx_data = '\0';
+
struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
union edp_configuration_cap edp_config_cap;
@@ -2383,7 +2398,10 @@ static bool retrieve_link_cap(struct dc_link *link)
aux_rd_interval.raw =
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
- if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
+ link->dpcd_caps.ext_receiver_cap_field_present =
+ aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false;
+
+ if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
uint8_t ext_cap_data[16];
memset(ext_cap_data, '\0', sizeof(ext_cap_data));
@@ -2404,11 +2422,44 @@ static bool retrieve_link_cap(struct dc_link *link)
}
link->dpcd_caps.dpcd_rev.raw =
- dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (link->dpcd_caps.dpcd_rev.raw >= 0x14) {
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dpcd_dprx_data,
+ sizeof(dpcd_dprx_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
+
+ if (status != DC_OK)
+ dm_error("%s: Read DPRX caps data failed.\n", __func__);
+ }
+
+ else {
+ link->dpcd_caps.dprx_feature.raw = 0;
+ }
+
+
+ /* Error condition checking...
+ * It is impossible for Sink to report Max Lane Count = 0.
+ * It is possible for Sink to report Max Link Rate = 0, if it is
+ * an eDP device that is reporting specialized link rates in the
+ * SUPPORTED_LINK_RATE table.
+ */
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
DP_DPCD_REV];
+ read_dp_device_vendor_id(link);
+
get_active_converter_info(ds_port.byte, link);
dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
@@ -2536,31 +2587,31 @@ enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
void detect_edp_sink_caps(struct dc_link *link)
{
- uint8_t supported_link_rates[16] = {0};
+ uint8_t supported_link_rates[16];
uint32_t entry;
uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
retrieve_link_cap(link);
+ link->dpcd_caps.edp_supported_link_rates_count = 0;
+ memset(supported_link_rates, 0, sizeof(supported_link_rates));
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ link->dc->config.optimize_edp_link_rate) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
supported_link_rates, sizeof(supported_link_rates));
- link->dpcd_caps.link_rate_set = 0;
for (entry = 0; entry < 16; entry += 2) {
// DPCD register reports per-lane link rate = 16-bit link rate capability
- // value X 200 kHz. Need multipler to find link rate in kHz.
+ // value X 200 kHz. Need multiplier to find link rate in kHz.
link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
supported_link_rates[entry]) * 200;
if (link_rate_in_khz != 0) {
link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
- if (link->reported_link_cap.link_rate < link_rate) {
- link->reported_link_cap.link_rate = link_rate;
- link->dpcd_caps.link_rate_set = entry;
- }
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
+ link->dpcd_caps.edp_supported_link_rates_count++;
}
}
}
@@ -2601,6 +2652,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
enum dc_color_depth color_depth = pipe_ctx->
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
memset(&params, 0, sizeof(params));
@@ -2640,8 +2692,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
{
/* disable bit depth reduction */
pipe_ctx->stream->bit_depth_params = params;
- pipe_ctx->stream_res.opp->funcs->
- opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
@@ -2650,11 +2701,9 @@ static void set_crtc_test_pattern(struct dc_link *link,
case DP_TEST_PATTERN_VIDEO_MODE:
{
/* restore bitdepth reduction */
- resource_build_bit_depth_reduction_params(pipe_ctx->stream,
- &params);
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
pipe_ctx->stream->bit_depth_params = params;
- pipe_ctx->stream_res.opp->funcs->
- opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index f7f7515f65f4..b0dea759cd86 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -58,6 +58,8 @@ void dp_enable_link_phy(
const struct dc_link_settings *link_settings)
{
struct link_encoder *link_enc = link->link_enc;
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
struct pipe_ctx *pipes =
link->dc->current_state->res_ctx.pipe_ctx;
@@ -84,6 +86,9 @@ void dp_enable_link_phy(
}
}
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
if (dc_is_dp_sst_signal(signal)) {
link_enc->funcs->enable_dp_output(
link_enc,
@@ -95,6 +100,10 @@ void dp_enable_link_phy(
link_settings,
clock_source);
}
+
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
link->cur_link_settings = *link_settings;
dp_receiver_power_ctrl(link, true);
@@ -150,15 +159,25 @@ bool edp_receiver_ready_T7(struct dc_link *link)
void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
if (!link->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(link, false);
if (signal == SIGNAL_TYPE_EDP) {
link->link_enc->funcs->disable_output(link->link_enc, signal);
link->dc->hwss.edp_power_control(link, false);
- } else
+ } else {
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
link->link_enc->funcs->disable_output(link->link_enc, signal);
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+ }
+
/* Clear current link setting.*/
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 349ab8017776..eac7186e4f08 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -31,6 +31,8 @@
#include "opp.h"
#include "timing_generator.h"
#include "transform.h"
+#include "dccg.h"
+#include "dchubbub.h"
#include "dpp.h"
#include "core_types.h"
#include "set_mode_types.h"
@@ -104,44 +106,43 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
return dc_version;
}
-struct resource_pool *dc_create_resource_pool(
- struct dc *dc,
- int num_virtual_links,
- enum dce_version dc_version,
- struct hw_asic_id asic_id)
+struct resource_pool *dc_create_resource_pool(struct dc *dc,
+ const struct dc_init_data *init_data,
+ enum dce_version dc_version)
{
struct resource_pool *res_pool = NULL;
switch (dc_version) {
case DCE_VERSION_8_0:
res_pool = dce80_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_8_1:
res_pool = dce81_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_8_3:
res_pool = dce83_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_10_0:
res_pool = dce100_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_11_0:
res_pool = dce110_create_resource_pool(
- num_virtual_links, dc, asic_id);
+ init_data->num_virtual_links, dc,
+ init_data->asic_id);
break;
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
res_pool = dce112_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_12_0:
case DCE_VERSION_12_1:
res_pool = dce120_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -149,8 +150,7 @@ struct resource_pool *dc_create_resource_pool(
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
case DCN_VERSION_1_01:
#endif
- res_pool = dcn10_create_resource_pool(
- num_virtual_links, dc);
+ res_pool = dcn10_create_resource_pool(init_data, dc);
break;
#endif
@@ -163,7 +163,28 @@ struct resource_pool *dc_create_resource_pool(
if (dc->ctx->dc_bios->funcs->get_firmware_info(
dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
- res_pool->ref_clock_inKhz = fw_info.pll_info.crystal_frequency;
+ res_pool->ref_clocks.xtalin_clock_inKhz = fw_info.pll_info.crystal_frequency;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ // On FPGA these dividers are currently not configured by GDB
+ res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ } else if (res_pool->dccg && res_pool->hubbub) {
+ // If DCCG reference frequency cannot be determined (usually means not set to xtalin) then this is a critical error
+ // as this value must be known for DCHUB programming
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ // Similarly, if DCHUB reference frequency cannot be determined, then it is also a critical error
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
} else
ASSERT_CRITICAL(false);
}
@@ -260,6 +281,7 @@ bool resource_construct(
pool->stream_enc_count++;
}
}
+
dc->caps.dynamic_audio = false;
if (pool->audio_count < pool->stream_enc_count) {
dc->caps.dynamic_audio = true;
@@ -1014,24 +1036,60 @@ enum dc_status resource_build_scaling_params_for_context(
struct pipe_ctx *find_idle_secondary_pipe(
struct resource_context *res_ctx,
- const struct resource_pool *pool)
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
{
int i;
struct pipe_ctx *secondary_pipe = NULL;
/*
- * search backwards for the second pipe to keep pipe
- * assignment more consistent
+ * We add a preferred pipe mapping to avoid the chance that
+ * MPCCs already in use will need to be reassigned to other trees.
+ * For example, if we went with the strict, assign backwards logic:
+ *
+ * (State 1)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, no surface, top pipe = 1
+ *
+ * (State 2)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 5
+ *
+ * (State 3)
+ * Display A on, surface enable, top pipe = 0, bottom pipe = 5
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 4
+ *
+ * The state 2->3 transition requires remapping MPCC 5 from display B
+ * to display A.
+ *
+ * However, with the preferred pipe logic, state 2 would look like:
+ *
+ * (State 2)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 4
+ *
+ * This would then cause 2->3 to not require remapping any MPCCs.
*/
-
- for (i = pool->pipe_count - 1; i >= 0; i--) {
- if (res_ctx->pipe_ctx[i].stream == NULL) {
- secondary_pipe = &res_ctx->pipe_ctx[i];
- secondary_pipe->pipe_idx = i;
- break;
+ if (primary_pipe) {
+ int preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
}
}
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (!secondary_pipe)
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
return secondary_pipe;
}
@@ -1214,6 +1272,9 @@ bool dc_add_plane_to_context(
free_pipe->clock_source = tail_pipe->clock_source;
free_pipe->top_pipe = tail_pipe;
tail_pipe->bottom_pipe = free_pipe;
+ } else if (free_pipe->bottom_pipe && free_pipe->bottom_pipe->plane_state == NULL) {
+ ASSERT(free_pipe->bottom_pipe->stream_res.opp != free_pipe->stream_res.opp);
+ free_pipe->bottom_pipe->plane_state = plane_state;
}
/* assign new surfaces*/
@@ -1224,6 +1285,35 @@ bool dc_add_plane_to_context(
return true;
}
+struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *bottom_pipe = pipe_ctx->bottom_pipe;
+
+ /* ODM should only be updated once per otg */
+ if (pipe_ctx->top_pipe)
+ return NULL;
+
+ while (bottom_pipe) {
+ if (bottom_pipe->stream_res.opp != pipe_ctx->stream_res.opp)
+ break;
+ bottom_pipe = bottom_pipe->bottom_pipe;
+ }
+
+ return bottom_pipe;
+}
+
+bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *top_pipe = pipe_ctx->top_pipe;
+
+ if (!top_pipe)
+ return false;
+ if (top_pipe && top_pipe->stream_res.opp == pipe_ctx->stream_res.opp)
+ return false;
+
+ return true;
+}
+
bool dc_remove_plane_from_context(
const struct dc *dc,
struct dc_stream_state *stream,
@@ -1247,10 +1337,14 @@ bool dc_remove_plane_from_context(
/* release pipe for plane*/
for (i = pool->pipe_count - 1; i >= 0; i--) {
- struct pipe_ctx *pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (context->res_ctx.pipe_ctx[i].plane_state == plane_state) {
- pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->plane_state == plane_state) {
+ if (dc_res_is_odm_head_pipe(pipe_ctx)) {
+ pipe_ctx->plane_state = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ continue;
+ }
if (pipe_ctx->top_pipe)
pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
@@ -1268,8 +1362,9 @@ bool dc_remove_plane_from_context(
*/
if (!pipe_ctx->top_pipe) {
pipe_ctx->plane_state = NULL;
- pipe_ctx->bottom_pipe = NULL;
- } else {
+ if (!dc_res_get_odm_bottom_pipe(pipe_ctx))
+ pipe_ctx->bottom_pipe = NULL;
+ } else {
memset(pipe_ctx, 0, sizeof(*pipe_ctx));
}
}
@@ -1674,6 +1769,9 @@ enum dc_status dc_remove_stream_from_ctx(
for (i = 0; i < MAX_PIPES; i++) {
if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
!new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+ struct pipe_ctx *odm_pipe =
+ dc_res_get_odm_bottom_pipe(&new_ctx->res_ctx.pipe_ctx[i]);
+
del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
ASSERT(del_pipe->stream_res.stream_enc);
@@ -1698,6 +1796,8 @@ enum dc_status dc_remove_stream_from_ctx(
dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
memset(del_pipe, 0, sizeof(*del_pipe));
+ if (odm_pipe)
+ memset(odm_pipe, 0, sizeof(*odm_pipe));
break;
}
@@ -1855,6 +1955,7 @@ enum dc_status resource_map_pool_resources(
struct dc_context *dc_ctx = dc->ctx;
struct pipe_ctx *pipe_ctx = NULL;
int pipe_idx = -1;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
/* TODO Check if this is needed */
/*if (!resource_is_stream_unchanged(old_context, stream)) {
@@ -1869,6 +1970,13 @@ enum dc_status resource_map_pool_resources(
calculate_phy_pix_clks(stream);
+ /* TODO: Check Linux */
+ if (dc->config.allow_seamless_boot_optimization &&
+ !dcb->funcs->is_accelerated_mode(dcb)) {
+ if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
+ stream->apply_seamless_boot_optimization = true;
+ }
+
if (stream->apply_seamless_boot_optimization)
pipe_idx = acquire_resource_from_hw_enabled_state(
&context->res_ctx,
@@ -1951,7 +2059,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->dccg = dc->res_pool->clk_mgr;
+ dst_ctx->clk_mgr = dc->res_pool->clk_mgr;
}
/**
@@ -1959,12 +2067,14 @@ void dc_resource_state_construct(
* Checks HW resource availability and bandwidth requirement.
* @dc: dc struct for this driver
* @new_ctx: state to be validated
+ * @fast_validate: set to true if only yes/no to support matters
*
* Return: DC_OK if the result can be programmed. Otherwise, an error code.
*/
enum dc_status dc_validate_global_state(
struct dc *dc,
- struct dc_state *new_ctx)
+ struct dc_state *new_ctx,
+ bool fast_validate)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
@@ -2019,7 +2129,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx))
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
return result;
@@ -2315,6 +2425,21 @@ static void set_spd_info_packet(
*info_packet = stream->vrr_infopacket;
}
+static void set_dp_sdp_info_packet(
+ struct dc_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ /* SPD info packet for custom sdp message */
+
+ /* Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (!stream->dpsdp_infopacket.valid)
+ return;
+
+ *info_packet = stream->dpsdp_infopacket;
+}
+
static void set_hdr_static_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
@@ -2411,6 +2536,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->spd.valid = false;
info->hdrsmd.valid = false;
info->vsc.valid = false;
+ info->dpsdp.valid = false;
signal = pipe_ctx->stream->signal;
@@ -2430,6 +2556,8 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_spd_info_packet(&info->spd, pipe_ctx->stream);
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
+
+ set_dp_sdp_info_packet(&info->dpsdp, pipe_ctx->stream);
}
patch_gamut_packet_checksum(&info->gamut);
@@ -2657,10 +2785,11 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
if (!tg->funcs->validate_timing(tg, &stream->timing))
res = DC_FAIL_CONTROLLER_VALIDATE;
- if (res == DC_OK)
+ if (res == DC_OK) {
if (!link->link_enc->funcs->validate_output_with_stream(
link->link_enc, stream))
res = DC_FAIL_ENC_VALIDATE;
+ }
/* TODO: validate audio ASIC caps, encoder */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 996298c35f42..96e97d25d639 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -29,6 +29,9 @@
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/dcn10_hw_sequencer.h"
+#endif
#define DC_LOGGER dc->ctx->logger
@@ -160,6 +163,27 @@ struct dc_stream_state *dc_create_stream_for_sink(
return stream;
}
+struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
+{
+ struct dc_stream_state *new_stream;
+
+ new_stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
+ if (!new_stream)
+ return NULL;
+
+ memcpy(new_stream, stream, sizeof(struct dc_stream_state));
+
+ if (new_stream->sink)
+ dc_sink_retain(new_stream->sink);
+
+ if (new_stream->out_transfer_func)
+ dc_transfer_func_retain(new_stream->out_transfer_func);
+
+ kref_init(&new_stream->refcount);
+
+ return new_stream;
+}
+
/**
* dc_stream_get_status_from_state - Get stream status from given dc state
* @state: DC state to find the stream status in
@@ -196,6 +220,35 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream);
}
+static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ unsigned int vupdate_line;
+ unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int us_per_line;
+
+ if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
+ ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
+
+ vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
+ return;
+
+ if (vpos >= vupdate_line)
+ return;
+
+ us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
+ lines_to_vupdate = vupdate_line - vpos;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ /* 70 us is a conservative estimate of cursor update time*/
+ if (us_to_vupdate < 70)
+ udelay(us_to_vupdate);
+ }
+#endif
+}
+
/**
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
@@ -234,6 +287,8 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
+
+ delay_cursor_until_vupdate(pipe_ctx, core_dc);
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
}
@@ -278,11 +333,13 @@ bool dc_stream_set_cursor_position(
(!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
!pipe_ctx->plane_state ||
(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
- !pipe_ctx->plane_res.ipp)
+ (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp))
continue;
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
+
+ delay_cursor_until_vupdate(pipe_ctx, core_dc);
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
}
@@ -314,6 +371,68 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
return 0;
}
+static void build_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ uint8_t i;
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* set valid info */
+ info->dpsdp.valid = true;
+
+ /* set sdp message header */
+ info->dpsdp.hb0 = custom_sdp_message[0]; /* package id */
+ info->dpsdp.hb1 = custom_sdp_message[1]; /* package type */
+ info->dpsdp.hb2 = custom_sdp_message[2]; /* package specific byte 0 any data */
+ info->dpsdp.hb3 = custom_sdp_message[3]; /* package specific byte 0 any data */
+
+ /* set sdp message data */
+ for (i = 0; i < 32; i++)
+ info->dpsdp.sb[i] = (custom_sdp_message[i+4]);
+
+}
+
+static void invalid_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* in-valid info */
+ info->dpsdp.valid = false;
+}
+
+bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (stream == NULL) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ build_dp_sdp_info_frame(pipe_ctx, custom_sdp_message, sdp_message_size);
+
+ core_dc->hwss.update_info_frame(pipe_ctx);
+
+ invalid_dp_sdp_info_frame(pipe_ctx);
+ }
+
+ return true;
+}
+
bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ee6bd50f60b8..a5e86f9b148f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -119,6 +119,19 @@ const struct dc_plane_status *dc_plane_get_status(
if (core_dc->current_state == NULL)
return NULL;
+ /* Find the current plane state and set its pending bit to false */
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx =
+ &core_dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ pipe_ctx->plane_state->status.is_flip_pending = false;
+
+ break;
+ }
+
for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&core_dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1a7fd6aa77eb..44e4b0465587 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,9 +39,10 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.17"
+#define DC_VER "3.2.27"
#define MAX_SURFACES 3
+#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MAX_SINKS_PER_LINK 4
@@ -53,6 +54,41 @@ struct dc_versions {
struct dmcu_version dmcu_version;
};
+enum dc_plane_type {
+ DC_PLANE_TYPE_INVALID,
+ DC_PLANE_TYPE_DCE_RGB,
+ DC_PLANE_TYPE_DCE_UNDERLAY,
+ DC_PLANE_TYPE_DCN_UNIVERSAL,
+};
+
+struct dc_plane_cap {
+ enum dc_plane_type type;
+ uint32_t blends_with_above : 1;
+ uint32_t blends_with_below : 1;
+ uint32_t per_pixel_alpha : 1;
+ struct {
+ uint32_t argb8888 : 1;
+ uint32_t nv12 : 1;
+ uint32_t fp16 : 1;
+ } pixel_format_support;
+ // max upscaling factor x1000
+ // upscaling factors are always >= 1
+ // for example, 1080p -> 8K is 4.0, or 4000 raw value
+ struct {
+ uint32_t argb8888;
+ uint32_t nv12;
+ uint32_t fp16;
+ } max_upscale_factor;
+ // max downscale factor x1000
+ // downscale factors are always <= 1
+ // for example, 8K -> 1080p is 0.25, or 250 raw value
+ struct {
+ uint32_t argb8888;
+ uint32_t nv12;
+ uint32_t fp16;
+ } max_downscale_factor;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -73,6 +109,7 @@ struct dc_caps {
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
+ struct dc_plane_cap planes[MAX_PLANES];
};
struct dc_dcc_surface_param {
@@ -164,6 +201,10 @@ struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
bool fbc_support;
+ bool optimize_edp_link_rate;
+ bool disable_fractional_pwm;
+ bool allow_seamless_boot_optimization;
+ bool power_down_display_on_boot;
};
enum visual_confirm {
@@ -203,7 +244,59 @@ struct dc_clocks {
int fclk_khz;
int phyclk_khz;
int dramclk_khz;
-};
+ bool p_state_change_support;
+};
+
+struct dc_bw_validation_profile {
+ bool enable;
+
+ unsigned long long total_ticks;
+ unsigned long long voltage_level_ticks;
+ unsigned long long watermark_ticks;
+ unsigned long long rq_dlg_ticks;
+
+ unsigned long long total_count;
+ unsigned long long skip_fast_count;
+ unsigned long long skip_pass_count;
+ unsigned long long skip_fail_count;
+};
+
+#define BW_VAL_TRACE_SETUP() \
+ unsigned long long end_tick = 0; \
+ unsigned long long voltage_level_tick = 0; \
+ unsigned long long watermark_tick = 0; \
+ unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \
+ dm_get_timestamp(dc->ctx) : 0
+
+#define BW_VAL_TRACE_COUNT() \
+ if (dc->debug.bw_val_profile.enable) \
+ dc->debug.bw_val_profile.total_count++
+
+#define BW_VAL_TRACE_SKIP(status) \
+ if (dc->debug.bw_val_profile.enable) { \
+ if (!voltage_level_tick) \
+ voltage_level_tick = dm_get_timestamp(dc->ctx); \
+ dc->debug.bw_val_profile.skip_ ## status ## _count++; \
+ }
+
+#define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \
+ if (dc->debug.bw_val_profile.enable) \
+ voltage_level_tick = dm_get_timestamp(dc->ctx)
+
+#define BW_VAL_TRACE_END_WATERMARKS() \
+ if (dc->debug.bw_val_profile.enable) \
+ watermark_tick = dm_get_timestamp(dc->ctx)
+
+#define BW_VAL_TRACE_FINISH() \
+ if (dc->debug.bw_val_profile.enable) { \
+ end_tick = dm_get_timestamp(dc->ctx); \
+ dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \
+ dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \
+ if (watermark_tick) { \
+ dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \
+ dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \
+ } \
+ }
struct dc_debug_options {
enum visual_confirm visual_confirm;
@@ -257,6 +350,8 @@ struct dc_debug_options {
bool skip_detection_link_training;
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int force_fclk_khz;
+ bool disable_tri_buf;
+ struct dc_bw_validation_profile bw_val_profile;
};
struct dc_debug_data {
@@ -265,6 +360,14 @@ struct dc_debug_data {
uint32_t auxErrorCount;
};
+struct dc_bounding_box_overrides {
+ int sr_exit_time_ns;
+ int sr_enter_plus_exit_time_ns;
+ int urgent_latency_ns;
+ int percent_of_ideal_drambw;
+ int dram_clock_change_latency_ns;
+};
+
struct dc_state;
struct resource_pool;
struct dce_hwseq;
@@ -274,6 +377,7 @@ struct dc {
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_debug_options debug;
+ struct dc_bounding_box_overrides bb_overrides;
struct dc_context *ctx;
uint8_t link_count;
@@ -298,8 +402,12 @@ struct dc {
struct hw_sequencer_funcs hwss;
struct dce_hwseq *hwseq;
+ /* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
+ /* Require to maintain clocks and bandwidth for UEFI enabled HW */
+ bool optimize_seamless_boot;
+
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -327,6 +435,7 @@ struct dc_init_data {
struct hw_asic_id asic_id;
void *driver; /* ctx */
struct cgs_device *cgs_device;
+ struct dc_bounding_box_overrides bb_overrides;
int num_virtual_links;
/*
@@ -503,6 +612,9 @@ struct dc_plane_state {
struct dc_plane_status status;
struct dc_context *ctx;
+ /* HACK: Workaround for forcing full reprogramming under some conditions */
+ bool force_full_update;
+
/* private to dc_surface.c */
enum dc_irq_source irq_source;
struct kref refcount;
@@ -594,7 +706,7 @@ struct dc_validation_set {
uint8_t plane_count;
};
-bool dc_validate_seamless_boot_timing(struct dc *dc,
+bool dc_validate_seamless_boot_timing(const struct dc *dc,
const struct dc_sink *sink,
struct dc_crtc_timing *crtc_timing);
@@ -602,9 +714,14 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
+/*
+ * fast_validate: we return after determining if we can support the new state,
+ * but before we populate the programming info
+ */
enum dc_status dc_validate_global_state(
struct dc *dc,
- struct dc_state *new_ctx);
+ struct dc_state *new_ctx,
+ bool fast_validate);
void dc_resource_state_construct(
@@ -633,7 +750,8 @@ void dc_resource_state_destruct(struct dc_state *context);
bool dc_commit_state(struct dc *dc, struct dc_state *context);
-struct dc_state *dc_create_state(void);
+struct dc_state *dc_create_state(struct dc *dc);
+struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
void dc_release_state(struct dc_state *context);
@@ -645,9 +763,16 @@ struct dpcd_caps {
union dpcd_rev dpcd_rev;
union max_lane_count max_ln_count;
union max_down_spread max_down_spread;
+ union dprx_feature dprx_feature;
+
+ /* valid only for eDP v1.4 or higher*/
+ uint8_t edp_supported_link_rates_count;
+ enum dc_link_rate edp_supported_link_rates[8];
/* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type;
+ /* branch device or sink device */
+ bool is_branch_dev;
/* Dongle's downstream count. */
union sink_count sink_count;
/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
@@ -663,11 +788,11 @@ struct dpcd_caps {
int8_t branch_dev_name[6];
int8_t branch_hw_revision;
int8_t branch_fw_revision[2];
- uint8_t link_rate_set;
bool allow_invalid_MSA_timing_param;
bool panel_mode_edp;
bool dpcd_display_control_capable;
+ bool ext_receiver_cap_field_present;
};
#include "dc_link.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 05c8c31d8b31..4ef97f65e55d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -68,6 +68,8 @@ enum aux_transaction_reply {
AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+ AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK = 0x04,
+ AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER = 0x08,
AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index d4eab33c453b..11c68a399267 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -94,6 +94,8 @@ struct dc_link_settings {
enum dc_lane_count lane_count;
enum dc_link_rate link_rate;
enum dc_link_spread link_spread;
+ bool use_link_rate_set;
+ uint8_t link_rate_set;
};
struct dc_lane_settings {
@@ -420,10 +422,24 @@ union edp_configuration_cap {
uint8_t raw;
};
+union dprx_feature {
+ struct {
+ uint8_t GTC_CAP:1; // bit 0: DP 1.3+
+ uint8_t SST_SPLIT_SDP_CAP:1; // bit 1: DP 1.4
+ uint8_t AV_SYNC_CAP:1; // bit 2: DP 1.3+
+ uint8_t VSC_SDP_COLORIMETRY_SUPPORTED:1; // bit 3: DP 1.3+
+ uint8_t VSC_EXT_VESA_SDP_SUPPORTED:1; // bit 4: DP 1.4
+ uint8_t VSC_EXT_VESA_SDP_CHAINING_SUPPORTED:1; // bit 5: DP 1.4
+ uint8_t VSC_EXT_CEA_SDP_SUPPORTED:1; // bit 6: DP 1.4
+ uint8_t VSC_EXT_CEA_SDP_CHAINING_SUPPORTED:1; // bit 7: DP 1.4
+ } bits;
+ uint8_t raw;
+};
+
union training_aux_rd_interval {
struct {
uint8_t TRAINIG_AUX_RD_INTERVAL:7;
- uint8_t EXT_RECIEVER_CAP_FIELD_PRESENT:1;
+ uint8_t EXT_RECEIVER_CAP_FIELD_PRESENT:1;
} bits;
uint8_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 597d38393379..5e6c5eff49cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -51,20 +51,16 @@ static inline void set_reg_field_value_masks(
field_value_mask->mask = field_value_mask->mask | mask;
}
-uint32_t generic_reg_update_ex(const struct dc_context *ctx,
- uint32_t addr, uint32_t reg_val, int n,
+static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
+ uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
- ...)
+ va_list ap)
{
- struct dc_reg_value_masks field_value_mask = {0};
uint32_t shift, mask, field_value;
int i = 1;
- va_list ap;
- va_start(ap, field_value1);
-
/* gather all bits value/mask getting updated in this register */
- set_reg_field_value_masks(&field_value_mask,
+ set_reg_field_value_masks(field_value_mask,
field_value1, mask1, shift1);
while (i < n) {
@@ -72,10 +68,48 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
mask = va_arg(ap, uint32_t);
field_value = va_arg(ap, uint32_t);
- set_reg_field_value_masks(&field_value_mask,
+ set_reg_field_value_masks(field_value_mask,
field_value, mask, shift);
i++;
}
+}
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ struct dc_reg_value_masks field_value_mask = {0};
+ uint32_t reg_val;
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+
+ va_end(ap);
+
+ /* mmio write directly */
+ reg_val = dm_read_reg(ctx, addr);
+ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+ dm_write_reg(ctx, addr, reg_val);
+ return reg_val;
+}
+
+uint32_t generic_reg_set_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ struct dc_reg_value_masks field_value_mask = {0};
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+
va_end(ap);
@@ -85,6 +119,24 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
return reg_val;
}
+uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name)
+{
+ uint32_t value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+ value = cgs_read_register(ctx->cgs_device, address);
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
+
+ return value;
+}
+
uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
uint8_t shift, uint32_t mask, uint32_t *field_value)
{
@@ -235,7 +287,7 @@ uint32_t generic_reg_get(const struct dc_context *ctx,
}
*/
-uint32_t generic_reg_wait(const struct dc_context *ctx,
+void generic_reg_wait(const struct dc_context *ctx,
uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line)
@@ -265,7 +317,7 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
delay_between_poll_us * i / 1000,
func_name, line);
- return reg_val;
+ return;
}
}
@@ -275,8 +327,6 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
BREAK_TO_DEBUGGER();
-
- return reg_val;
}
void generic_write_indirect_reg(const struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8fc223defed4..7b9429e30d82 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -120,6 +120,7 @@ struct dc_link {
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -246,10 +247,18 @@ void dc_link_set_test_pattern(struct dc_link *link,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting);
+
+const struct dc_link_settings *dc_link_get_link_cap(
+ const struct dc_link *link);
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
struct i2c_command *cmd);
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 5657cb3a2ad3..189bdab929a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -80,6 +80,7 @@ struct dc_stream_state {
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
struct dc_info_packet vsp_infopacket;
+ struct dc_info_packet dpsdp_infopacket;
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -221,6 +222,13 @@ struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
*/
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
+/*
+ * Send dp sdp message.
+ */
+bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+
/* TODO: Return parsed values rather than direct register read
* This has a dependency on the caller (amdgpu_display_get_crtc_scanoutpos)
* being refactored properly to be dce-specific
@@ -299,6 +307,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
*/
struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
+struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream);
+
void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index da2009a108cf..6c2a3d9a4c2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -103,7 +103,7 @@ struct dc_context {
};
-#define DC_MAX_EDID_BUFFER_SIZE 512
+#define DC_MAX_EDID_BUFFER_SIZE 1024
#define EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
@@ -395,7 +395,7 @@ struct dc_dongle_caps {
bool is_dp_hdmi_ycbcr422_converter;
bool is_dp_hdmi_ycbcr420_converter;
uint32_t dp_hdmi_max_bpc;
- uint32_t dp_hdmi_max_pixel_clk;
+ uint32_t dp_hdmi_max_pixel_clk_in_khz;
};
/* Scaling format */
enum scaling_transformation {
@@ -550,9 +550,9 @@ struct psr_config {
unsigned char psr_version;
unsigned int psr_rfb_setup_time;
bool psr_exit_link_training_required;
-
bool psr_frame_capture_indication_req;
unsigned int psr_sdp_transmit_line_num_deadline;
+ bool allow_smu_optimizations;
};
union dmcu_psr_level {
@@ -654,6 +654,7 @@ struct psr_context {
* continuing powerd own
*/
unsigned int frame_delay;
+ bool allow_smu_optimizations;
};
struct colorspace_transform {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 4febf4ef7240..bd33c47183fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -171,25 +171,31 @@ static void submit_channel_request(
(request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
if (REG(AUXN_IMPCAL)) {
/* clear_aux_error */
- REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXN_IMPCAL,
+ AUXN_CALOUT_ERROR_AK, 1,
+ AUXN_CALOUT_ERROR_AK, 0);
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+ AUXP_CALOUT_ERROR_AK, 1,
+ AUXP_CALOUT_ERROR_AK, 0);
/* force_default_calibrate */
- REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ REG_UPDATE_SEQ_2(AUXN_IMPCAL,
AUXN_IMPCAL_ENABLE, 1,
AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
/* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+ AUXP_IMPCAL_OVERRIDE_ENABLE, 1,
+ AUXP_IMPCAL_OVERRIDE_ENABLE, 0);
}
+
+ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+
/* set the delay and the number of bytes to write */
/* The length include
@@ -242,9 +248,6 @@ static void submit_channel_request(
}
}
- REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
- REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
- 10, aux110->timeout_period/10);
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
}
@@ -267,7 +270,7 @@ static int read_channel_reply(struct dce_aux *engine, uint32_t size,
if (!bytes_replied)
return -1;
- REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ REG_UPDATE_SEQ_3(AUX_SW_DATA,
AUX_SW_INDEX, 0,
AUX_SW_AUTOINCREMENT_DISABLE, 1,
AUX_SW_DATA_RW, 1);
@@ -317,9 +320,10 @@ static enum aux_channel_operation_result get_channel_status(
*returned_bytes = 0;
/* poll to make sure that SW_DONE is asserted */
- value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
10, aux110->timeout_period/10);
+ value = REG_READ(AUX_SW_STATUS);
/* in case HPD is LOW, exit AUX transaction */
if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
@@ -374,7 +378,6 @@ static bool acquire(
struct dce_aux *engine,
struct ddc *ddc)
{
-
enum gpio_result result;
if (!is_engine_available(engine))
@@ -439,12 +442,12 @@ static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payl
return I2CAUX_TRANSACTION_ACTION_DP_READ;
}
-int dce_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload)
+int dce_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result)
{
struct ddc *ddc_pin = ddc->ddc_pin;
struct dce_aux *aux_engine;
- enum aux_channel_operation_result operation_result;
struct aux_request_transaction_data aux_req;
struct aux_reply_transaction_data aux_rep;
uint8_t returned_bytes = 0;
@@ -455,7 +458,8 @@ int dce_aux_transfer(struct ddc_service *ddc,
memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- acquire(aux_engine, ddc_pin);
+ if (!acquire(aux_engine, ddc_pin))
+ return -1;
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -470,28 +474,26 @@ int dce_aux_transfer(struct ddc_service *ddc,
aux_req.data = payload->data;
submit_channel_request(aux_engine, &aux_req);
- operation_result = get_channel_status(aux_engine, &returned_bytes);
-
- switch (operation_result) {
- case AUX_CHANNEL_OPERATION_SUCCEEDED:
- res = read_channel_reply(aux_engine, payload->length,
- payload->data, payload->reply,
- &status);
- break;
- case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
- res = 0;
- break;
- case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
- case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
- case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ *operation_result = get_channel_status(aux_engine, &returned_bytes);
+
+ if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
+ read_channel_reply(aux_engine, payload->length,
+ payload->data, payload->reply,
+ &status);
+ res = returned_bytes;
+ } else {
res = -1;
- break;
}
+
release_engine(aux_engine);
return res;
}
-#define AUX_RETRY_MAX 7
+#define AUX_MAX_RETRIES 7
+#define AUX_MAX_DEFER_RETRIES 7
+#define AUX_MAX_I2C_DEFER_RETRIES 7
+#define AUX_MAX_INVALID_REPLY_RETRIES 2
+#define AUX_MAX_TIMEOUT_RETRIES 3
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload)
@@ -499,24 +501,85 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
int i, ret = 0;
uint8_t reply;
bool payload_reply = true;
+ enum aux_channel_operation_result operation_result;
+ int aux_ack_retries = 0,
+ aux_defer_retries = 0,
+ aux_i2c_defer_retries = 0,
+ aux_timeout_retries = 0,
+ aux_invalid_reply_retries = 0;
if (!payload->reply) {
payload_reply = false;
payload->reply = &reply;
}
- for (i = 0; i < AUX_RETRY_MAX; i++) {
- ret = dce_aux_transfer(ddc, payload);
-
- if (ret >= 0) {
- if (*payload->reply == 0) {
- if (!payload_reply)
- payload->reply = NULL;
- return true;
+ for (i = 0; i < AUX_MAX_RETRIES; i++) {
+ ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ aux_timeout_retries = 0;
+ aux_invalid_reply_retries = 0;
+
+ switch (*payload->reply) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ if (!payload->write && payload->length != ret) {
+ if (++aux_ack_retries >= AUX_MAX_RETRIES)
+ goto fail;
+ else
+ udelay(300);
+ } else
+ return true;
+ break;
+
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ goto fail;
+ break;
+
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ aux_defer_retries = 0;
+ if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES)
+ goto fail;
+ break;
+
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ default:
+ goto fail;
}
- }
+ break;
+
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES)
+ goto fail;
+ else
+ udelay(400);
+ break;
+
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+ goto fail;
+ else {
+ /*
+ * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
+ * According to the DP spec there should be 3 retries total
+ * with a 400us wait inbetween each. Hardware already waits
+ * for 550us therefore no wait is required here.
+ */
+ }
+ break;
- udelay(1000);
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ default:
+ goto fail;
+ }
}
+
+fail:
+ if (!payload_reply)
+ payload->reply = NULL;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index d27f22c05e4b..ce6a26d189b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a,
* at most within ~240usec. That means,
* increasing this timeout will not affect normal operation,
* and we'll timeout after
- * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
* This timeout is especially important for
- * resume from S3 and CTS.
+ * converters, resume from S3, and CTS.
*/
- SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
};
struct dce_aux {
@@ -123,8 +123,9 @@ bool dce110_aux_engine_acquire(
struct dce_aux *aux_engine,
struct ddc *ddc);
-int dce_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *cmd);
+int dce_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *cmd,
+ enum aux_channel_operation_result *operation_result);
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 6e142c2db986..963686380738 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -222,7 +222,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
* all required clocks
*/
for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
- if (context->bw.dce.dispclk_khz >
+ if (context->bw_ctx.bw.dce.dispclk_khz >
clk_mgr_dce->max_clks_by_state[i].display_clk_khz
|| max_pix_clk >
clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
@@ -232,7 +232,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
if (low_req_clk > clk_mgr_dce->max_clks_state) {
/* set max clock state for high phyclock, invalid on exceeding display clock */
if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
- < context->bw.dce.dispclk_khz)
+ < context->bw_ctx.bw.dce.dispclk_khz)
low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
else
low_req_clk = clk_mgr_dce->max_clks_state;
@@ -610,22 +610,22 @@ static void dce11_pplib_apply_display_requirements(
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->all_displays_in_sync =
- context->bw.dce.all_displays_in_sync;
+ context->bw_ctx.bw.dce.all_displays_in_sync;
pp_display_cfg->nb_pstate_switch_disable =
- context->bw.dce.nbp_state_change_enable == false;
+ context->bw_ctx.bw.dce.nbp_state_change_enable == false;
pp_display_cfg->cpu_cc6_disable =
- context->bw.dce.cpuc_state_change_enable == false;
+ context->bw_ctx.bw.dce.cpuc_state_change_enable == false;
pp_display_cfg->cpu_pstate_disable =
- context->bw.dce.cpup_state_change_enable == false;
+ context->bw_ctx.bw.dce.cpup_state_change_enable == false;
pp_display_cfg->cpu_pstate_separation_time =
- context->bw.dce.blackout_recovery_time_us;
+ context->bw_ctx.bw.dce.blackout_recovery_time_us;
- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
/ MEMORY_TYPE_MULTIPLIER_CZ;
pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
dc,
- context->bw.dce.sclk_khz);
+ context->bw_ctx.bw.dce.sclk_khz);
/*
* As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
@@ -638,7 +638,7 @@ static void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
- = context->bw.dce.sclk_deep_sleep_khz;
+ = context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
pp_display_cfg->avail_mclk_switch_time_us =
dce110_get_min_vblank_time_us(context);
@@ -669,7 +669,7 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -696,7 +696,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -711,7 +711,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
- context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
+ context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
clk_mgr->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
@@ -723,7 +723,7 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -751,7 +751,7 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr,
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 71d5777de961..f70437aae8e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -978,7 +978,7 @@ static bool dce110_clock_source_power_down(
}
static bool get_pixel_clk_frequency_100hz(
- struct clock_source *clock_source,
+ const struct clock_source *clock_source,
unsigned int inst,
unsigned int *pixel_clk_khz)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index c2926cf19dee..818536eea00a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -51,6 +51,9 @@
#define PSR_SET_WAITLOOP 0x31
#define MCP_INIT_DMCU 0x88
#define MCP_INIT_IRAM 0x89
+#define MCP_SYNC_PHY_LOCK 0x90
+#define MCP_SYNC_PHY_UNLOCK 0x91
+#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */
#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
static bool dce_dmcu_init(struct dmcu *dmcu)
@@ -213,9 +216,6 @@ static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
- if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
- REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
-
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
dmcu_wait_reg_ready_interval,
@@ -342,9 +342,32 @@ static void dcn10_get_dmcu_version(struct dmcu *dmcu)
IRAM_RD_ADDR_AUTO_INC, 0);
}
+static void dcn10_dmcu_enable_fractional_pwm(struct dmcu *dmcu,
+ uint32_t fractional_pwm)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ /* Wait until microcontroller is ready to process interrupt */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
+ /* Set PWM fractional enable/disable */
+ REG_WRITE(MASTER_COMM_DATA_REG1, fractional_pwm);
+
+ /* Set command to enable or disable fractional PWM microcontroller */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ MCP_BL_SET_PWM_FRAC);
+
+ /* Notify microcontroller of new command */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* Ensure command has been executed before continuing */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+}
+
static bool dcn10_dmcu_init(struct dmcu *dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false;
/* Definition of DC_DMCU_SCRATCH
@@ -382,9 +405,14 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
if (dmcu->dmcu_state == DMCU_RUNNING) {
/* Retrieve and cache the DMCU firmware version. */
dcn10_get_dmcu_version(dmcu);
+
+ /* Initialize DMCU to use fractional PWM or not */
+ dcn10_dmcu_enable_fractional_pwm(dmcu,
+ (config->disable_fractional_pwm == false) ? 1 : 0);
status = true;
- } else
+ } else {
status = false;
+ }
break;
case DMCU_RUNNING:
@@ -594,7 +622,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
- if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ if (psr_context->allow_smu_optimizations)
REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
/* waitDMCUReadyForCmd */
@@ -615,6 +643,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
psr_context->psrFrameCaptureIndicationReq;
masterCmdData1.bits.aux_chan = psr_context->channel;
masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ masterCmdData1.bits.allow_smu_optimizations = psr_context->allow_smu_optimizations;
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
masterCmdData1.u32All);
@@ -635,6 +664,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
masterCmdData3.u32All);
+
/* setDMCUParam_Cmd */
REG_UPDATE(MASTER_COMM_CMD_REG,
MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
@@ -691,7 +721,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)
return true;
}
-#endif
+#endif //(CONFIG_DRM_AMD_DC_DCN1_0)
static const struct dmcu_funcs dce_funcs = {
.dmcu_init = dce_dmcu_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index c24c0e5ea44e..60ce56f60ae3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -199,16 +199,16 @@ struct dce_dmcu {
******************************************************************/
union dce_dmcu_psr_config_data_reg1 {
struct {
- unsigned int timehyst_frames:8; /*[7:0]*/
- unsigned int hyst_lines:7; /*[14:8]*/
- unsigned int rfb_update_auto_en:1; /*[15:15]*/
- unsigned int dp_port_num:3; /*[18:16]*/
- unsigned int dcp_sel:3; /*[21:19]*/
- unsigned int phy_type:1; /*[22:22]*/
- unsigned int frame_cap_ind:1; /*[23:23]*/
- unsigned int aux_chan:3; /*[26:24]*/
- unsigned int aux_repeat:4; /*[30:27]*/
- unsigned int reserved:1; /*[31:31]*/
+ unsigned int timehyst_frames:8; /*[7:0]*/
+ unsigned int hyst_lines:7; /*[14:8]*/
+ unsigned int rfb_update_auto_en:1; /*[15:15]*/
+ unsigned int dp_port_num:3; /*[18:16]*/
+ unsigned int dcp_sel:3; /*[21:19]*/
+ unsigned int phy_type:1; /*[22:22]*/
+ unsigned int frame_cap_ind:1; /*[23:23]*/
+ unsigned int aux_chan:3; /*[26:24]*/
+ unsigned int aux_repeat:4; /*[30:27]*/
+ unsigned int allow_smu_optimizations:1; /*[31:31]*/
} bits;
unsigned int u32All;
};
@@ -236,7 +236,7 @@ union dce_dmcu_psr_config_data_reg3 {
struct {
unsigned int psr_level:16; /*[15:0]*/
unsigned int link_rate:4; /*[19:16]*/
- unsigned int reserved:12; /*[31:20]*/
+ unsigned int reserved:12; /*[31:20]*/
} bits;
unsigned int u32All;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 40f2d6e0b122..cd26161bcc4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -346,6 +346,16 @@ static void release_engine(
}
+static bool is_engine_available(struct dce_i2c_hw *dce_i2c_hw)
+{
+ unsigned int arbitrate;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate == DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY)
+ return false;
+ return true;
+}
+
struct dce_i2c_hw *acquire_i2c_hw_engine(
struct resource_pool *pool,
struct ddc *ddc)
@@ -368,7 +378,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
if (!dce_i2c_hw)
return NULL;
- if (pool->i2c_hw_buffer_in_use)
+ if (pool->i2c_hw_buffer_in_use || !is_engine_available(dce_i2c_hw))
return NULL;
do {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index 7f19bb439665..575500755b2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -29,7 +29,8 @@
enum dc_i2c_status {
DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
- DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW,
+ DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY = 2,
};
enum dc_i2c_arbitration {
@@ -129,7 +130,8 @@ enum {
I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
- I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
+ I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, mask_sh)
#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
@@ -170,6 +172,7 @@ struct dce_i2c_shift {
uint8_t DC_I2C_INDEX;
uint8_t DC_I2C_INDEX_WRITE;
uint8_t XTAL_REF_DIV;
+ uint8_t DC_I2C_REG_RW_CNTL_STATUS;
};
struct dce_i2c_mask {
@@ -207,6 +210,7 @@ struct dce_i2c_mask {
uint32_t DC_I2C_INDEX;
uint32_t DC_I2C_INDEX_WRITE;
uint32_t XTAL_REF_DIV;
+ uint32_t DC_I2C_REG_RW_CNTL_STATUS;
};
struct dce_i2c_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 1fa2d4fd7a35..14309fe6f2e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -272,7 +272,8 @@ static void dce110_update_hdmi_info_packet(
static void dce110_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space)
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting)
{
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
uint32_t h_active_start;
@@ -977,7 +978,7 @@ static void dce110_stream_encoder_dp_unblank(
uint64_t m_vid_l = n_vid;
- m_vid_l *= param->pixel_clk_khz;
+ m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 23044e6723e8..e938bf9986d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -378,6 +378,28 @@ static const struct resource_caps res_cap = {
.num_ddc = 6,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -756,7 +778,8 @@ static enum dc_status build_mapped_resource(
bool dce100_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
int i;
bool at_least_one_pipe = false;
@@ -768,11 +791,11 @@ bool dce100_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ context->bw_ctx.bw.dce.dispclk_khz = 681000;
+ context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw.dce.dispclk_khz = 0;
- context->bw.dce.yclk_khz = 0;
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+ context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;
@@ -1023,6 +1046,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5e4db3712eef..7ac50ab1b762 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -616,7 +616,7 @@ dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
{
- bool is_hdmi;
+ bool is_hdmi_tmds;
bool is_dp;
ASSERT(pipe_ctx->stream);
@@ -624,13 +624,13 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.stream_enc == NULL)
return; /* this is not root pipe */
- is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
+ is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
- if (!is_hdmi && !is_dp)
+ if (!is_hdmi_tmds && !is_dp)
return;
- if (is_hdmi)
+ if (is_hdmi_tmds)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -935,13 +935,31 @@ void hwss_edp_backlight_control(
edp_receiver_ready_T9(link);
}
+// Static helper function which calls the correct function
+// based on pp_smu version
+static void set_pme_wa_enable_by_version(struct dc *dc)
+{
+ struct pp_smu_funcs *pp_smu = NULL;
+
+ if (dc->res_pool->pp_smu)
+ pp_smu = dc->res_pool->pp_smu;
+
+ if (pp_smu) {
+ if (pp_smu->ctx.ver == PP_SMU_VER_RV && pp_smu->rv_funcs.set_pme_wa_enable)
+ pp_smu->rv_funcs.set_pme_wa_enable(&(pp_smu->ctx));
+ }
+}
+
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
/* notify audio driver for audio modes of monitor */
- struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct pp_smu_funcs *pp_smu = NULL;
unsigned int i, num_audio = 1;
+ if (core_dc->res_pool->pp_smu)
+ pp_smu = core_dc->res_pool->pp_smu;
+
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
@@ -951,30 +969,31 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (num_audio >= 1 && pp_smu != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ set_pme_wa_enable_by_version(core_dc);
/* un-mute audio */
/* TODO: audio should be per stream rather than per link */
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
- pipe_ctx->stream_res.stream_enc, false);
+ pipe_ctx->stream_res.stream_enc, false);
}
}
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct pp_smu_funcs *pp_smu = NULL;
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ if (dc->res_pool->pp_smu)
+ pp_smu = dc->res_pool->pp_smu;
if (option != KEEP_ACQUIRED_RESOURCE ||
- !dc->debug.az_endpoint_mute_only) {
+ !dc->debug.az_endpoint_mute_only)
/*only disalbe az_endpoint if power down or free*/
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
- }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
@@ -989,9 +1008,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
- if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (pp_smu != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ set_pme_wa_enable_by_version(dc);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */
@@ -1007,7 +1026,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc);
@@ -1032,7 +1051,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link *link = stream->link;
/* only 3 items below are used by unblank */
- params.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ params.timing = pipe_ctx->stream->timing;
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1147,8 +1166,8 @@ static void build_audio_output(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
- state->dccg->funcs->get_dp_ref_clk_frequency(
- state->dccg);
+ state->clk_mgr->funcs->get_dp_ref_clk_frequency(
+ state->clk_mgr);
}
audio_output->pll_info.feed_back_divider =
@@ -1349,7 +1368,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers);
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1358,7 +1377,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601,
stream->timing.display_color_depth,
- pipe_ctx->stream->signal);
+ stream->signal);
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
pipe_ctx->stream_res.opp,
@@ -1532,6 +1551,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
}
}
+ if (dc->hwss.init_pipes)
+ dc->hwss.init_pipes(dc, context);
+
if (edp_link) {
/* this seems to cause blank screens on DCE8 */
if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
@@ -1608,18 +1630,18 @@ static void dce110_set_displaymarks(
dc->bw_vbios->blackout_duration, pipe_ctx->stream);
pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks(
pipe_ctx->plane_res.mi,
- context->bw.dce.nbp_state_change_wm_ns[num_pipes],
- context->bw.dce.stutter_exit_wm_ns[num_pipes],
- context->bw.dce.stutter_entry_wm_ns[num_pipes],
- context->bw.dce.urgent_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_entry_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
if (i == underlay_idx) {
num_pipes++;
pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks(
pipe_ctx->plane_res.mi,
- context->bw.dce.nbp_state_change_wm_ns[num_pipes],
- context->bw.dce.stutter_exit_wm_ns[num_pipes],
- context->bw.dce.urgent_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
}
num_pipes++;
@@ -2612,7 +2634,7 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct mem_input *mi = pipe_ctx->plane_res.mi;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
- .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.xtalin_clock_inKhz,
.viewport = pipe_ctx->plane_res.scl_data.viewport,
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7549adaa1542..dcd04e9ea76b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -392,6 +392,55 @@ static const struct resource_caps stoney_resource_cap = {
.num_ddc = 3,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+ .blends_with_below = true,
+ .blends_with_above = true,
+ .per_pixel_alpha = 1,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
+static const struct dc_plane_cap underlay_plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_UNDERLAY,
+ .blends_with_above = true,
+ .per_pixel_alpha = 1,
+
+ .pixel_format_support = {
+ .argb8888 = false,
+ .nv12 = true,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 1,
+ .nv12 = 16000,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 1,
+ .nv12 = 250,
+ .fp16 = 1
+ }
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -854,7 +903,8 @@ static enum dc_status build_mapped_resource(
static bool dce110_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
bool result = false;
@@ -868,7 +918,7 @@ static bool dce110_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
- &context->bw.dce))
+ &context->bw_ctx.bw.dce))
result = true;
if (!result)
@@ -878,8 +928,8 @@ static bool dce110_validate_bandwidth(
context->streams[0]->timing.v_addressable,
context->streams[0]->timing.pix_clk_100hz / 10);
- if (memcmp(&dc->current_state->bw.dce,
- &context->bw.dce, sizeof(context->bw.dce))) {
+ if (memcmp(&dc->current_state->bw_ctx.bw.dce,
+ &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@@ -893,34 +943,34 @@ static bool dce110_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
- context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
- context->bw.dce.urgent_wm_ns[0].b_mark,
- context->bw.dce.urgent_wm_ns[0].a_mark,
- context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
- context->bw.dce.urgent_wm_ns[1].b_mark,
- context->bw.dce.urgent_wm_ns[1].a_mark,
- context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
- context->bw.dce.urgent_wm_ns[2].b_mark,
- context->bw.dce.urgent_wm_ns[2].a_mark,
- context->bw.dce.stutter_exit_wm_ns[2].b_mark,
- context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable,
- context->bw.dce.cpuc_state_change_enable,
- context->bw.dce.cpup_state_change_enable,
- context->bw.dce.nbp_state_change_enable,
- context->bw.dce.all_displays_in_sync,
- context->bw.dce.dispclk_khz,
- context->bw.dce.sclk_khz,
- context->bw.dce.sclk_deep_sleep_khz,
- context->bw.dce.yclk_khz,
- context->bw.dce.blackout_recovery_time_us);
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_mode_enable,
+ context->bw_ctx.bw.dce.cpuc_state_change_enable,
+ context->bw_ctx.bw.dce.cpup_state_change_enable,
+ context->bw_ctx.bw.dce.nbp_state_change_enable,
+ context->bw_ctx.bw.dce.all_displays_in_sync,
+ context->bw_ctx.bw.dce.dispclk_khz,
+ context->bw_ctx.bw.dce.sclk_khz,
+ context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
+ context->bw_ctx.bw.dce.yclk_khz,
+ context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}
@@ -1371,6 +1421,11 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < pool->base.underlay_pipe_index; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap;
+
bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
bw_calcs_data_update_from_pplib(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index ea3065d63372..a480b15f6885 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -397,6 +397,28 @@ static const struct resource_caps polaris_11_resource_cap = {
.num_ddc = 5,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -804,7 +826,8 @@ static enum dc_status build_mapped_resource(
bool dce112_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
bool result = false;
@@ -818,7 +841,7 @@ bool dce112_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
- &context->bw.dce))
+ &context->bw_ctx.bw.dce))
result = true;
if (!result)
@@ -826,8 +849,8 @@ bool dce112_validate_bandwidth(
"%s: Bandwidth validation failed!",
__func__);
- if (memcmp(&dc->current_state->bw.dce,
- &context->bw.dce, sizeof(context->bw.dce))) {
+ if (memcmp(&dc->current_state->bw_ctx.bw.dce,
+ &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@@ -841,34 +864,34 @@ bool dce112_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
- context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
- context->bw.dce.urgent_wm_ns[0].b_mark,
- context->bw.dce.urgent_wm_ns[0].a_mark,
- context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
- context->bw.dce.urgent_wm_ns[1].b_mark,
- context->bw.dce.urgent_wm_ns[1].a_mark,
- context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
- context->bw.dce.urgent_wm_ns[2].b_mark,
- context->bw.dce.urgent_wm_ns[2].a_mark,
- context->bw.dce.stutter_exit_wm_ns[2].b_mark,
- context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable,
- context->bw.dce.cpuc_state_change_enable,
- context->bw.dce.cpup_state_change_enable,
- context->bw.dce.nbp_state_change_enable,
- context->bw.dce.all_displays_in_sync,
- context->bw.dce.dispclk_khz,
- context->bw.dce.sclk_khz,
- context->bw.dce.sclk_deep_sleep_khz,
- context->bw.dce.yclk_khz,
- context->bw.dce.blackout_recovery_time_us);
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_mode_enable,
+ context->bw_ctx.bw.dce.cpuc_state_change_enable,
+ context->bw_ctx.bw.dce.cpup_state_change_enable,
+ context->bw_ctx.bw.dce.nbp_state_change_enable,
+ context->bw_ctx.bw.dce.all_displays_in_sync,
+ context->bw_ctx.bw.dce.dispclk_khz,
+ context->bw_ctx.bw.dce.sclk_khz,
+ context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
+ context->bw_ctx.bw.dce.yclk_khz,
+ context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}
@@ -887,7 +910,7 @@ enum dc_status resource_map_phy_clock_resources(
return DC_ERROR_UNEXPECTED;
if (dc_is_dp_signal(pipe_ctx->stream->signal)
- || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ || dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->clock_source =
dc->res_pool->dp_clock_source;
else
@@ -1310,6 +1333,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
/* Create hardware sequencer */
dce112_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
index 95a403396219..1f57ebc6f9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -44,7 +44,8 @@ enum dc_status dce112_validate_with_context(
bool dce112_validate_bandwidth(
struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ bool fast_validate);
enum dc_status dce112_add_stream_to_ctx(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 312a0aebf91f..6d49c7143c67 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -454,6 +454,28 @@ static const struct resource_caps res_cap = {
.num_ddc = 6,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
@@ -1171,6 +1193,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
bw_calcs_data_update_from_pplib(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index c109ace96be9..27d0cc394963 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -387,6 +387,28 @@ static const struct resource_caps res_cap_83 = {
.num_ddc = 2,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCE80_REG_LIST()
};
@@ -790,7 +812,8 @@ static void destruct(struct dce110_resource_pool *pool)
bool dce80_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
int i;
bool at_least_one_pipe = false;
@@ -802,11 +825,11 @@ bool dce80_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ context->bw_ctx.bw.dce.dispclk_khz = 681000;
+ context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw.dce.dispclk_khz = 0;
- context->bw.dce.yclk_khz = 0;
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+ context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;
@@ -1032,6 +1055,10 @@ static bool dce80_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
@@ -1237,6 +1264,10 @@ static bool dce81_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
@@ -1438,6 +1469,10 @@ static bool dce83_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
index afe8c42211cd..2b2de1d913c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -43,23 +43,6 @@
#define DC_LOGGER \
clk_mgr->ctx->logger
-void dcn1_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
-{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
- pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
- dce110_fill_display_configs(context, pp_display_cfg);
-
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-}
-
static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
@@ -167,11 +150,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
{
struct dc *dc = clk_mgr->ctx->dc;
struct dc_debug_options *debug = &dc->debug;
- struct dc_clocks *new_clocks = &context->bw.dcn.clk;
- struct pp_smu_display_requirement_rv *smu_req_cur =
- &dc->res_pool->pp_smu_req;
- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct pp_smu_funcs_rv *pp_smu = NULL;
bool send_request_to_increase = false;
bool send_request_to_lower = false;
int display_count;
@@ -179,7 +159,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
bool enter_display_off = false;
display_count = get_active_display_cnt(dc, context);
-
+ if (dc->res_pool->pp_smu)
+ pp_smu = &dc->res_pool->pp_smu->rv_funcs;
if (display_count == 0)
enter_display_off = true;
@@ -189,10 +170,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
* if function pointer not set up, this message is
* sent as part of pplib_apply_display_requirements.
*/
- if (pp_smu->set_display_count)
+ if (pp_smu && pp_smu->set_display_count)
pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
-
- smu_req.display_count = display_count;
}
if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
@@ -203,7 +182,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
-
send_request_to_lower = true;
}
@@ -213,24 +191,18 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
- smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
-
send_request_to_lower = true;
}
//DCF Clock
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
- smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000;
-
send_request_to_lower = true;
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
- smu_req.min_deep_sleep_dcefclk_mhz = (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000;
-
send_request_to_lower = true;
}
@@ -239,17 +211,13 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
*/
if (send_request_to_increase) {
/*use dcfclk to request voltage*/
- if (pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
- } else {
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- dcn1_pplib_apply_display_requirements(dc, context);
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
@@ -259,27 +227,20 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
|| new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
-
send_request_to_lower = true;
}
if (!send_request_to_increase && send_request_to_lower) {
/*use dcfclk to request voltage*/
- if (pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
- } else {
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- dcn1_pplib_apply_display_requirements(dc, context);
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
-
- *smu_req_cur = smu_req;
}
static const struct clk_mgr_funcs dcn1_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
index a995eda443a3..97007cf33665 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -34,10 +34,6 @@ struct clk_bypass {
uint32_t dprefclk_bypass;
};
-void dcn1_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context);
-
struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index cd1ebe57ed59..6f4b24756323 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -91,13 +91,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-enum gamut_remap_select {
- GAMUT_REMAP_BYPASS = 0,
- GAMUT_REMAP_COEFF,
- GAMUT_REMAP_COMA_COEFF,
- GAMUT_REMAP_COMB_COEFF
-};
-
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s)
{
@@ -392,6 +385,10 @@ void dpp1_cnv_setup (
default:
break;
}
+
+ /* Set default color space based on format if none is given. */
+ color_space = input_color_space ? input_color_space : color_space;
+
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
@@ -403,7 +400,7 @@ void dpp1_cnv_setup (
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
- tbl_entry.color_space = input_color_space;
+ tbl_entry.color_space = color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
select = INPUT_CSC_SELECT_ICSC;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 41f0f4c912e7..882bcc5a40f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -88,13 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-enum gamut_remap_select {
- GAMUT_REMAP_BYPASS = 0,
- GAMUT_REMAP_COEFF,
- GAMUT_REMAP_COMA_COEFF,
- GAMUT_REMAP_COMB_COEFF
-};
-
static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
{COLOR_SPACE_SRGB,
{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index c7642e748297..ce21a290bf3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions(
int *num_part_y,
int *num_part_c)
{
+ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
+ lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
+
int line_size = scl_data->viewport.width < scl_data->recout.width ?
scl_data->viewport.width : scl_data->recout.width;
int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
scl_data->viewport_c.width : scl_data->recout.width;
- int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
- int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
- int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
- int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
- int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ if (line_size == 0)
+ line_size = 1;
+
+ if (line_size_c == 0)
+ line_size_c = 1;
+
+
+ lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
+ memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
+ memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
+ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
if (lb_config == LB_MEMORY_CONFIG_1) {
lb_memory_size = 816;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index e161ad836812..0db2a6e96fc0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -258,8 +258,9 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
- REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
+ REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
void hubbub1_program_watermarks(
@@ -282,7 +283,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
@@ -309,7 +311,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -322,7 +325,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -336,7 +340,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -347,7 +352,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
@@ -374,7 +380,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -387,7 +394,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -401,7 +409,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -412,7 +421,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
@@ -439,7 +449,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -452,7 +463,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -466,7 +478,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -477,7 +490,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
@@ -504,7 +518,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -517,7 +532,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -531,7 +547,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -866,6 +883,7 @@ static const struct hubbub_funcs hubbub1_funcs = {
.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
.wm_read_state = hubbub1_wm_read_state,
+ .program_watermarks = hubbub1_program_watermarks,
};
void hubbub1_construct(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 9cd4a5194154..85811b24a497 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -32,18 +32,14 @@
#define TO_DCN10_HUBBUB(hubbub)\
container_of(hubbub, struct dcn10_hubbub, base)
-#define HUBHUB_REG_LIST_DCN()\
+#define HUBBUB_REG_LIST_DCN_COMMON()\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
@@ -54,6 +50,12 @@
SR(DCHUBBUB_TEST_DEBUG_DATA),\
SR(DCHUBBUB_SOFT_RESET)
+#define HUBBUB_VM_REG_LIST() \
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)
+
#define HUBBUB_SR_WATERMARK_REG_LIST()\
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
@@ -65,7 +67,8 @@
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
#define HUBBUB_REG_LIST_DCN10(id)\
- HUBHUB_REG_LIST_DCN(), \
+ HUBBUB_REG_LIST_DCN_COMMON(), \
+ HUBBUB_VM_REG_LIST(), \
HUBBUB_SR_WATERMARK_REG_LIST(), \
SR(DCHUBBUB_SDPIF_FB_TOP),\
SR(DCHUBBUB_SDPIF_FB_BASE),\
@@ -122,8 +125,7 @@ struct dcn_hubbub_registers {
#define HUBBUB_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
-
-#define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\
+#define HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
@@ -133,10 +135,29 @@ struct dcn_hubbub_registers {
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
- HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh)
+ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh)
+
+#define HUBBUB_MASK_SH_LIST_STUTTER(mask_sh) \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, mask_sh)
#define HUBBUB_MASK_SH_LIST_DCN10(mask_sh)\
- HUBBUB_MASK_SH_LIST_DCN(mask_sh), \
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
HUBBUB_SF(DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \
HUBBUB_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \
@@ -167,15 +188,35 @@ struct dcn_hubbub_registers {
type FB_OFFSET;\
type AGP_BOT;\
type AGP_TOP;\
- type AGP_BASE
+ type AGP_BASE;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
+
+#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;\
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;\
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
struct dcn_hubbub_shift {
DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
+ HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
};
struct dcn_hubbub_mask {
DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
+ HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
};
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 683829466a44..54b219a710d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position(
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
- //account for cases where we see negative offset relative to overlay plane
- if (src_x_offset < 0 && src_y_offset < 0) {
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, 0,
- CURSOR_Y_POSITION, 0);
- x_hotspot -= src_x_offset;
- y_hotspot -= src_y_offset;
- } else if (src_x_offset < 0) {
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, 0,
- CURSOR_Y_POSITION, pos->y);
- x_hotspot -= src_x_offset;
- } else if (src_y_offset < 0) {
- REG_SET_2(CURSOR_POSITION, 0,
+ REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, 0);
- y_hotspot -= src_y_offset;
- } else {
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
- }
+ CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
CURSOR_HOT_SPOT_X, x_hotspot,
@@ -1197,6 +1178,10 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
+void hubp1_init(struct hubp *hubp)
+{
+ //do nothing
+}
static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
@@ -1220,7 +1205,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
-
+ .hubp_init = hubp1_init,
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index a6d6dfe00617..99d2b7e2a578 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -34,6 +34,7 @@
#define HUBP_REG_LIST_DCN(id)\
SRI(DCHUBP_CNTL, HUBP, id),\
SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
+ SRI(HUBPREQ_DEBUG, HUBP, id),\
SRI(DCSURF_ADDR_CONFIG, HUBP, id),\
SRI(DCSURF_TILING_CONFIG, HUBP, id),\
SRI(DCSURF_SURFACE_PITCH, HUBPREQ, id),\
@@ -138,6 +139,7 @@
#define HUBP_COMMON_REG_VARIABLE_LIST \
uint32_t DCHUBP_CNTL; \
uint32_t HUBPREQ_DEBUG_DB; \
+ uint32_t HUBPREQ_DEBUG; \
uint32_t DCSURF_ADDR_CONFIG; \
uint32_t DCSURF_TILING_CONFIG; \
uint32_t DCSURF_SURFACE_PITCH; \
@@ -247,7 +249,7 @@
.field_name = reg_name ## __ ## field_name ## post_fix
/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
-#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
@@ -331,7 +333,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
@@ -339,7 +340,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
@@ -373,6 +373,11 @@
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
+#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
+
/* Mask/shift struct generation macro for ASICs with VM */
#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
@@ -595,6 +600,9 @@
type AGP_BASE;\
type AGP_BOT;\
type AGP_TOP;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
@@ -743,4 +751,6 @@ enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
void hubp1_vready_workaround(struct hubp *hubp,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+void hubp1_init(struct hubp *hubp);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index d1a8f1c302a9..33d311cea28c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -65,7 +65,7 @@ void print_microsec(struct dc_context *dc_ctx,
struct dc_log_buffer_ctx *log_ctx,
uint32_t ref_cycle)
{
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
@@ -345,13 +345,13 @@ void dcn10_log_hw_state(struct dc *dc,
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
- dc->current_state->bw.dcn.clk.dcfclk_khz,
- dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.clk.dispclk_khz,
- dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
- dc->current_state->bw.dcn.clk.fclk_khz,
- dc->current_state->bw.dcn.clk.socclk_khz);
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
log_mpc_crc(dc, log_ctx);
@@ -714,7 +714,7 @@ static enum dc_status dcn10_enable_stream_timing(
return DC_OK;
}
-static void reset_back_end_for_pipe(
+static void dcn10_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -889,22 +889,23 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
dcn10_verify_allow_pstate_change_high(dc);
}
-static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp)
{
struct dce_hwseq *hws = dc->hwseq;
- struct dpp *dpp = pipe_ctx->plane_res.dpp;
DC_LOGGER_INIT(dc->ctx->logger);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
dpp_pg_control(hws, dpp->inst, false);
- hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, false);
+ hubp_pg_control(hws, hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
- "Power gated front end %d\n", pipe_ctx->pipe_idx);
+ "Power gated front end %d\n", hubp->inst);
}
}
@@ -931,7 +932,9 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
- plane_atomic_power_down(dc, pipe_ctx);
+ plane_atomic_power_down(dc,
+ pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp);
pipe_ctx->stream = NULL;
memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
@@ -976,16 +979,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
- if (pipe_ctx->stream != NULL)
+ if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
continue;
- if (tg->funcs->is_tg_enabled(tg))
- tg->funcs->lock(tg);
-
/* Blank controller using driver code instead of
* command table.
*/
if (tg->funcs->is_tg_enabled(tg)) {
+ tg->funcs->lock(tg);
tg->funcs->set_blank(tg, true);
hwss_wait_for_blank_complete(tg);
}
@@ -1001,16 +1002,19 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
struct dpp *dpp = dc->res_pool->dpps[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- // W/A for issue with dc_post_update_surfaces_to_stream
- hubp->power_gated = true;
-
/* There is assumption that pipe_ctx is not mapping irregularly
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
- if (pipe_ctx->stream != NULL)
+ if (can_apply_seamless_boot &&
+ pipe_ctx->stream != NULL &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
+ pipe_ctx->stream_res.tg))
continue;
+ /* Disable on the current state so the new one isn't cleared. */
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -1108,6 +1112,25 @@ static void dcn10_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
+ /* If taking control over from VBIOS, we may want to optimize our first
+ * mode set, so we need to skip powering down pipes until we know which
+ * pipes we want to use.
+ * Otherwise, if taking control is not possible, we need to power
+ * everything down.
+ */
+ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+
+ hubp->funcs->hubp_init(hubp);
+ dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ plane_atomic_power_down(dc, dpp, hubp);
+ }
+
+ apply_DEGVIDCN10_253_wa(dc);
+ }
+
for (i = 0; i < dc->res_pool->audio_count; i++) {
struct audio *audio = dc->res_pool->audios[i];
@@ -1137,12 +1160,9 @@ static void dcn10_init_hw(struct dc *dc)
enable_power_gating_plane(dc->hwseq, true);
memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
-
- if (dc->hwss.init_pipes)
- dc->hwss.init_pipes(dc, dc->current_state);
}
-static void reset_hw_ctx_wrap(
+static void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
@@ -1164,10 +1184,9 @@ static void reset_hw_ctx_wrap(
pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
struct clock_source *old_clk = pipe_ctx_old->clock_source;
- reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating) {
+ dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (dc->hwss.enable_stream_gating)
dc->hwss.enable_stream_gating(dc, pipe_ctx);
- }
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -1836,7 +1855,7 @@ void dcn10_get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value;
}
@@ -1931,7 +1950,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
- COLOR_SPACE_YCBCR601_LIMITED);
+ plane_state->color_space);
//set scale and bias registers
build_prescale_params(&bns_params, plane_state);
@@ -2051,7 +2070,7 @@ void update_dchubp_dpp(
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
- bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+ bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
@@ -2120,6 +2139,9 @@ void update_dchubp_dpp(
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (plane_state->update_flags.bits.full_update) {
@@ -2310,6 +2332,7 @@ static void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
bool removed_pipe[4] = { false };
+ bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2319,7 +2342,13 @@ static void dcn10_apply_ctx_for_surface(
tg = top_pipe_to_program->stream_res.tg;
- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
+ interdependent_update = top_pipe_to_program->plane_state &&
+ top_pipe_to_program->plane_state->update_flags.bits.full_update;
+
+ if (interdependent_update)
+ lock_all_pipes(dc, context, true);
+ else
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (num_planes == 0) {
/* OTG blank before remove all front end */
@@ -2339,15 +2368,9 @@ static void dcn10_apply_ctx_for_surface(
*/
if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
+ old_pipe_ctx->plane_res.hubp &&
+ old_pipe_ctx->plane_res.hubp->opp_id != 0xf)
dcn10_disable_plane(dc, old_pipe_ctx);
- /*
- * power down fe will unlock when calling reset, need
- * to lock it back here. Messy, need rework.
- */
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
- }
}
if ((!pipe_ctx->plane_state ||
@@ -2366,29 +2389,25 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes > 0)
program_all_pipe_in_tree(dc, top_pipe_to_program, context);
- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
-
- if (top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update)
+ if (interdependent_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream
- || !pipe_ctx->plane_state
- || !tg->funcs->is_tg_enabled(tg))
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
+ !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
continue;
- tg->funcs->lock(tg);
-
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
-
- tg->funcs->unlock(tg);
}
+ if (interdependent_update)
+ lock_all_pipes(dc, context, false);
+ else
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
@@ -2420,12 +2439,14 @@ static void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
- context->bw.dcn.clk.phyclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@@ -2433,9 +2454,9 @@ static void dcn10_prepare_bandwidth(
false);
}
- hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks,
- dc->res_pool->ref_clock_inKhz / 1000,
+ hubbub->funcs->program_watermarks(hubbub,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@@ -2450,12 +2471,14 @@ static void dcn10_optimize_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
- context->bw.dcn.clk.phyclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@@ -2463,9 +2486,9 @@ static void dcn10_optimize_bandwidth(
true);
}
- hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks,
- dc->res_pool->ref_clock_inKhz / 1000,
+ hubbub->funcs->program_watermarks(hubbub,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@@ -2654,7 +2677,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
pipe_ctx->plane_res.hubp);
- plane_state->status.is_flip_pending = flip_pending;
+ plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
if (!flip_pending)
plane_state->status.current_address = plane_state->status.requested_address;
@@ -2685,16 +2708,22 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
- .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
.viewport = pipe_ctx->plane_res.scl_data.viewport,
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
+ uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
+ uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
+ uint32_t x_offset = min(x_plane, pos_cpy.x);
+ uint32_t y_offset = min(y_plane, pos_cpy.y);
- pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
- pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
+ pos_cpy.x -= x_offset;
+ pos_cpy.y -= y_offset;
+ pos_cpy.x_hotspot += (x_plane - x_offset);
+ pos_cpy.y_hotspot += (y_plane - y_offset);
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
@@ -2789,6 +2818,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
+void lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock)
+{
+ struct pipe_ctx *pipe_ctx;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ tg = pipe_ctx->stream_res.tg;
+ /*
+ * Only lock the top pipe's tg to prevent redundant
+ * (un)locking. Also skip if pipe is disabled.
+ */
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream || !pipe_ctx->plane_state ||
+ !tg->funcs->is_tg_enabled(tg))
+ continue;
+
+ if (lock)
+ tg->funcs->lock(tg);
+ else
+ tg->funcs->unlock(tg);
+ }
+}
+
static void calc_vupdate_position(
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
@@ -2882,6 +2938,29 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
+static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = { { 0 } };
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ /* only 3 items below are used by unblank */
+ params.timing = pipe_ctx->stream->timing;
+
+ params.link_settings.link_rate = link_settings->link_rate;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ params.timing.pix_clk_100hz /= 2;
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ }
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_backlight_control(link, true);
+ }
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.init_hw = dcn10_init_hw,
@@ -2903,7 +2982,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.update_info_frame = dce110_update_info_frame,
.enable_stream = dce110_enable_stream,
.disable_stream = dce110_disable_stream,
- .unblank_stream = dce110_unblank_stream,
+ .unblank_stream = dcn10_unblank_stream,
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
@@ -2913,7 +2992,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.pipe_control_lock = dcn10_pipe_control_lock,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
- .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
+ .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.set_drr = set_drr,
.get_position = get_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 6d66084df55f..4b3b27a5d23b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 98f41d250978..991622da9ed5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -77,7 +77,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
@@ -115,7 +115,7 @@ static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned in
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
if (invarOnly)
@@ -472,12 +472,12 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i
chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
"dppclk,fclk,socclk\n"
"%d,%d,%d,%d,%d,%d\n",
- dc->current_state->bw.dcn.clk.dcfclk_khz,
- dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.clk.dispclk_khz,
- dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.fclk_khz,
- dc->current_state->bw.dcn.clk.socclk_khz);
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index a9db372688ff..0126a44ba012 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1304,7 +1304,6 @@ void dcn10_link_encoder_connect_dig_be_to_fe(
#define HPD_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
HPD_REG(reg_name), \
- HPD_REG_READ(reg_name), \
n, __VA_ARGS__)
#define HPD_REG_UPDATE(reg_name, field, val) \
@@ -1337,7 +1336,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
#define AUX_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
AUX_REG(reg_name), \
- AUX_REG_READ(reg_name), \
n, __VA_ARGS__)
#define AUX_REG_UPDATE(reg_name, field, val) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 09d74070a49b..7eccb54c421d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -516,6 +516,31 @@ static const struct resource_caps rv2_res_cap = {
};
#endif
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .blends_with_above = true,
+ .blends_with_below = true,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 250,
+ .fp16 = 1
+ }
+};
+
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
@@ -848,14 +873,14 @@ void dcn10_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
- dm_pp_get_funcs_rv(ctx, pp_smu);
+ dm_pp_get_funcs(ctx, pp_smu);
return pp_smu;
}
@@ -865,10 +890,7 @@ static void destruct(struct dcn10_resource_pool *pool)
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
- /* TODO: free dcn version of stream encoder once implemented
- * rather than using virtual stream encoder
- */
- kfree(pool->base.stream_enc[i]);
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
@@ -921,9 +943,6 @@ static void destruct(struct dcn10_resource_pool *pool)
}
}
- for (i = 0; i < pool->base.stream_enc_count; i++)
- kfree(pool->base.stream_enc[i]);
-
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
@@ -1078,7 +1097,7 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
{
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
+ struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
if (!head_pipe) {
ASSERT(0);
@@ -1143,7 +1162,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
continue;
if (context->stream_status[i].plane_count > 2)
- return false;
+ return DC_FAIL_UNSUPPORTED_1;
for (j = 0; j < context->stream_status[i].plane_count; j++) {
struct dc_plane_state *plane =
@@ -1351,7 +1370,7 @@ static bool construct(
goto fail;
}
- dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
+ dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1);
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
@@ -1510,6 +1529,9 @@ static bool construct(
dcn10_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->cap_funcs = cap_funcs;
return true;
@@ -1522,7 +1544,7 @@ fail:
}
struct resource_pool *dcn10_create_resource_pool(
- uint8_t num_virtual_links,
+ const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn10_resource_pool *pool =
@@ -1531,7 +1553,7 @@ struct resource_pool *dcn10_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
index 8f71225bc61b..999c684a0b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
@@ -39,7 +39,7 @@ struct dcn10_resource_pool {
struct resource_pool base;
};
struct resource_pool *dcn10_create_resource_pool(
- uint8_t num_virtual_links,
+ const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b08254121251..8ee9f6dc1d62 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -245,7 +245,8 @@ static void enc1_update_hdmi_info_packet(
void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space)
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting)
{
uint32_t h_active_start;
uint32_t v_active_start;
@@ -298,7 +299,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
break;
case PIXEL_ENCODING_YCBCR420:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
- REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
break;
default:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
@@ -726,13 +726,19 @@ void enc1_stream_encoder_update_dp_info_packets(
3, /* packetIndex */
&info_frame->hdrsmd);
+ if (info_frame->dpsdp.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 4,/* packetIndex */
+ &info_frame->dpsdp);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
-
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, info_frame->dpsdp.valid);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
@@ -797,10 +803,10 @@ void enc1_stream_encoder_dp_blank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
- * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- max_retries = DP_BLANK_MAX_RETRY * 150;
+ max_retries = DP_BLANK_MAX_RETRY * 250;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
@@ -833,14 +839,19 @@ void enc1_stream_encoder_dp_unblank(
if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
uint32_t n_vid = 0x8000;
uint32_t m_vid;
+ uint32_t n_multiply = 0;
+ uint64_t m_vid_l = n_vid;
+ /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
+ if (param->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ /*this param->pixel_clk_khz is half of 444 rate for 420 already*/
+ n_multiply = 1;
+ }
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
*/
- uint64_t m_vid_l = n_vid;
-
- m_vid_l *= param->pixel_clk_khz;
+ m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
@@ -859,7 +870,9 @@ void enc1_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
- REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+ REG_UPDATE_2(DP_VID_TIMING,
+ DP_VID_M_N_GEN_EN, 1,
+ DP_VID_N_MUL, n_multiply);
}
/* set DIG_START to 0x1 to resync FIFO */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index b7c800e10a32..e654c2f55971 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -462,7 +462,8 @@ void enc1_update_generic_info_packet(
void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space);
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting);
void enc1_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index e81b24374bcb..ccbfe9680d27 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -58,7 +58,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
bool enable);
/*
- * poll pending down reply before clear payload allocation table
+ * poll pending down reply
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 14bed5b1fa97..4fc4208d1472 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -30,6 +30,8 @@
* interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
*/
+typedef bool BOOLEAN;
+
enum pp_smu_ver {
/*
* PP_SMU_INTERFACE_X should be interpreted as the interface defined
@@ -72,29 +74,6 @@ struct pp_smu_wm_range_sets {
struct pp_smu_wm_set_range writer_wm_sets[MAX_WATERMARK_SETS];
};
-struct pp_smu_display_requirement_rv {
- /* PPSMC_MSG_SetDisplayCount: count
- * 0 triggers S0i2 optimization
- */
- unsigned int display_count;
-
- /* PPSMC_MSG_SetHardMinFclkByFreq: mhz
- * FCLK will vary with DPM, but never below requested hard min
- */
- unsigned int hard_min_fclk_mhz;
-
- /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz
- * fixed clock at requested freq, either from FCH bypass or DFS
- */
- unsigned int hard_min_dcefclk_mhz;
-
- /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
- * when DF is in cstate, dcf clock is further divided down
- * to just above given frequency
- */
- unsigned int min_deep_sleep_dcefclk_mhz;
-};
-
struct pp_smu_funcs_rv {
struct pp_smu pp_smu;
@@ -137,12 +116,6 @@ struct pp_smu_funcs_rv {
/* PME w/a */
void (*set_pme_wa_enable)(struct pp_smu *pp);
- /*
- * Legacy functions. Used for backwards comp. with existing
- * PPlib code.
- */
- void (*set_display_requirement)(struct pp_smu *pp,
- struct pp_smu_display_requirement_rv *req);
};
struct pp_smu_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 1961cc6d9143..b426ba02b793 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -52,30 +52,17 @@ irq_handler_idx dm_register_interrupt(
* GPU registers access
*
*/
-
+uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name);
/* enable for debugging new code, this adds 50k to the driver size. */
/* #define DM_CHECK_ADDR_0 */
#define dm_read_reg(ctx, address) \
dm_read_reg_func(ctx, address, __func__)
-static inline uint32_t dm_read_reg_func(
- const struct dc_context *ctx,
- uint32_t address,
- const char *func_name)
-{
- uint32_t value;
-#ifdef DM_CHECK_ADDR_0
- if (address == 0) {
- DC_ERR("invalid register read; address = 0\n");
- return 0;
- }
-#endif
- value = cgs_read_register(ctx->cgs_device, address);
- trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
- return value;
-}
#define dm_write_reg(ctx, address, value) \
dm_write_reg_func(ctx, address, value, __func__)
@@ -144,10 +131,14 @@ static inline uint32_t set_reg_field_value_ex(
reg_name ## __ ## reg_field ## _MASK,\
reg_name ## __ ## reg_field ## __SHIFT)
-uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+uint32_t generic_reg_set_ex(const struct dc_context *ctx,
uint32_t addr, uint32_t reg_val, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
#define FD(reg_field) reg_field ## __SHIFT, \
reg_field ## _MASK
@@ -155,7 +146,7 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
* return number of poll before condition is met
* return 0 if condition is not meet after specified time out tries
*/
-unsigned int generic_reg_wait(const struct dc_context *ctx,
+void generic_reg_wait(const struct dc_context *ctx,
uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line);
@@ -172,11 +163,10 @@ unsigned int generic_reg_wait(const struct dc_context *ctx,
#define generic_reg_update_soc15(ctx, inst_offset, reg_name, n, ...)\
generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, \
- dm_read_reg_func(ctx, mm##reg_name + DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + inst_offset, __func__), \
n, __VA_ARGS__)
#define generic_reg_set_soc15(ctx, inst_offset, reg_name, n, ...)\
- generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
+ generic_reg_set_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
n, __VA_ARGS__)
#define get_reg_field_value_soc15(reg_value, block, reg_num, reg_name, reg_field)\
@@ -223,8 +213,8 @@ bool dm_pp_notify_wm_clock_changes(
const struct dc_context *ctx,
struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
-void dm_pp_get_funcs_rv(struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs);
+void dm_pp_get_funcs(struct dc_context *ctx,
+ struct pp_smu_funcs *funcs);
/* DAL calls this function to notify PP about completion of Mode Set.
* For PP it means that current DCE clocks are those which were returned
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index 77200711abbe..a3d1be20dd9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -29,7 +29,7 @@
#include "os_types.h"
#include "dc_types.h"
-struct pp_smu_funcs_rv;
+struct pp_smu_funcs;
struct dm_pp_clock_range {
int min_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index d303b789adfe..80ffd7d958b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,40 +26,14 @@
#include "display_mode_lib.h"
#include "dc_features.h"
-extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
-extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
-
-static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
-{
- switch (project) {
- case DML_PROJECT_RAVEN1:
- *soc = dcn1_0_soc;
- break;
- default:
- ASSERT(0);
- break;
- }
-}
-
-static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
+void dml_init_instance(struct display_mode_lib *lib,
+ const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
+ const struct _vcs_dpi_ip_params_st *ip_params,
+ enum dml_project project)
{
- switch (project) {
- case DML_PROJECT_RAVEN1:
- *ip = dcn1_0_ip;
- break;
- default:
- ASSERT(0);
- break;
- }
-}
-
-void dml_init_instance(struct display_mode_lib *lib, enum dml_project project)
-{
- if (lib->project != project) {
- set_soc_bounding_box(&lib->soc, project);
- set_ip_params(&lib->ip, project);
- lib->project = project;
- }
+ lib->soc = *soc_bb;
+ lib->ip = *ip_params;
+ lib->project = project;
}
const char *dml_get_status_message(enum dm_validation_status status)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index a730e0209c05..1b546dba34bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -41,7 +41,10 @@ struct display_mode_lib {
struct dal_logger *logger;
};
-void dml_init_instance(struct display_mode_lib *lib, enum dml_project project);
+void dml_init_instance(struct display_mode_lib *lib,
+ const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
+ const struct _vcs_dpi_ip_params_st *ip_params,
+ enum dml_project project);
const char *dml_get_status_message(enum dm_validation_status status);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 391183e3428f..c5b791d158a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -25,6 +25,8 @@
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
+#define MAX_CLOCK_LIMIT_STATES 8
+
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
typedef struct _vcs_dpi_ip_params_st ip_params_st;
@@ -103,7 +105,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
unsigned int num_states;
- struct _vcs_dpi_voltage_scaling_st clock_limits[8];
+ struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
};
struct _vcs_dpi_ip_params_st {
@@ -416,6 +418,7 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_per_vm_group_flip;
unsigned int refcyc_per_vm_req_vblank;
unsigned int refcyc_per_vm_req_flip;
+ unsigned int refcyc_per_vm_dmdata;
};
struct _vcs_dpi_display_ttu_regs_st {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 48400d642610..e2d82aacd3bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -321,6 +321,9 @@ void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st d
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
dlg_regs.xfc_reg_remote_surface_flip_latency);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_vm_dmdata = 0x%0x\n",
+ dlg_regs.refcyc_per_vm_dmdata);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
index fe6301cb8681..1b01a9a58d14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
@@ -167,7 +167,7 @@ struct clock_source_funcs {
struct pixel_clk_params *,
struct pll_settings *);
bool (*get_pixel_clk_frequency_100hz)(
- struct clock_source *clock_source,
+ const struct clock_source *clock_source,
unsigned int inst,
unsigned int *pixel_clk_khz);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index 2e61a22ef4b2..8dca3b7700e5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -43,7 +43,7 @@ enum dc_status {
DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
DC_FAIL_SCALING = 14,
DC_FAIL_DP_LINK_TRAINING = 15,
-
+ DC_FAIL_UNSUPPORTED_1 = 18,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 986ed1728644..6f5ab05d6467 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -95,10 +95,10 @@ struct resource_funcs {
void (*link_init)(struct dc_link *link);
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
-
bool (*validate_bandwidth)(
struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ bool fast_validate);
enum dc_status (*validate_global)(
struct dc *dc,
@@ -144,8 +144,7 @@ struct resource_pool {
struct stream_encoder *stream_enc[MAX_PIPES * 2];
struct hubbub *hubbub;
struct mpc *mpc;
- struct pp_smu_funcs_rv *pp_smu;
- struct pp_smu_display_requirement_rv pp_smu_req;
+ struct pp_smu_funcs *pp_smu;
struct dce_aux *engines[MAX_PIPES];
struct dce_i2c_hw *hw_i2cs[MAX_PIPES];
struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
@@ -154,7 +153,12 @@ struct resource_pool {
unsigned int pipe_count;
unsigned int underlay_pipe_index;
unsigned int stream_enc_count;
- unsigned int ref_clock_inKhz;
+
+ struct {
+ unsigned int xtalin_clock_inKhz;
+ unsigned int dccg_ref_clock_inKhz;
+ unsigned int dchub_ref_clock_inKhz;
+ } ref_clocks;
unsigned int timing_generator_count;
/*
@@ -262,18 +266,22 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
};
-union bw_context {
+union bw_output {
struct dcn_bw_output dcn;
struct dce_bw_output dce;
};
+struct bw_context {
+ union bw_output bw;
+ struct display_mode_lib dml;
+};
/**
* struct dc_state - The full description of a state requested by a user
*
* @streams: Stream properties
* @stream_status: The planes on a given stream
* @res_ctx: Persistent state of resources
- * @bw: The output from bandwidth and watermark calculations
+ * @bw_ctx: The output from bandwidth and watermark calculations and the DML
* @pp_display_cfg: PowerPlay clocks and settings
* @dcn_bw_vars: non-stack memory to support bandwidth calculations
*
@@ -285,7 +293,7 @@ struct dc_state {
struct resource_context res_ctx;
- union bw_context bw;
+ struct bw_context bw_ctx;
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
@@ -293,7 +301,11 @@ struct dc_state {
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
- struct clk_mgr *dccg;
+ struct clk_mgr *clk_mgr;
+
+ struct {
+ bool full_update_needed : 1;
+ } commit_hints;
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 16fd4dc6c4dd..b1fab251c09b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -95,8 +95,9 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-int dc_link_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload);
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result);
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index ece954a40a8e..263c09630c06 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -621,7 +621,8 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth(
struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ bool fast_validate);
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
@@ -631,5 +632,7 @@ void dcn_bw_update_from_pplib(struct dc *dc);
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
void dcn_bw_sync_calcs_and_dml(struct dc *dc);
+enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
+
#endif /* __DCN_CALCS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 23a4b18e5fee..31bd6d5183ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -42,6 +42,8 @@ struct clk_mgr_funcs {
bool safe_to_lower);
int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
+
+ void (*init_clocks)(struct clk_mgr *clk_mgr);
};
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 95a56d012626..05ee5295d2c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -39,6 +39,10 @@ struct dccg_funcs {
void (*update_dpp_dto)(struct dccg *dccg,
int dpp_inst,
int req_dppclk);
+ void (*get_dccg_ref_freq)(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz);
+ void (*dccg_init)(struct dccg *dccg);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 9d2d8e51306c..93667e8b23b3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -73,6 +73,16 @@ struct hubbub_funcs {
void (*wm_read_state)(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
+
+ void (*get_dchub_ref_freq)(struct hubbub *hubbub,
+ unsigned int dccg_ref_freq_inKhz,
+ unsigned int *dchub_ref_freq_inKhz);
+
+ void (*program_watermarks)(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index cbaa43853611..c68f0ce346c7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -70,6 +70,8 @@ struct dmcu_funcs {
void (*get_psr_wait_loop)(struct dmcu *dmcu,
unsigned int *psr_wait_loop_number);
bool (*is_dmcu_initialized)(struct dmcu *dmcu);
+ bool (*lock_phy)(struct dmcu *dmcu);
+ bool (*unlock_phy)(struct dmcu *dmcu);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 1cd07e94ee63..455df4999797 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -130,6 +130,7 @@ struct hubp_funcs {
void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
+ void (*hubp_init)(struct hubp *hubp);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index da85537a4488..4c8e2c6fb6db 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -146,6 +146,12 @@ struct out_csc_color_matrix {
uint16_t regval[12];
};
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
enum opp_regamma {
OPP_REGAMMA_BYPASS = 0,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 4051493557bc..49854eb73d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -63,11 +63,13 @@ struct encoder_info_frame {
struct dc_info_packet vsc;
/* HDR Static MetaData */
struct dc_info_packet hdrsmd;
+ /* custom sdp message */
+ struct dc_info_packet dpsdp;
};
struct encoder_unblank_param {
struct dc_link_settings link_settings;
- unsigned int pixel_clk_khz;
+ struct dc_crtc_timing timing;
};
struct encoder_set_dp_phy_pattern_param {
@@ -88,7 +90,8 @@ struct stream_encoder_funcs {
void (*dp_set_stream_attribute)(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space);
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting);
void (*hdmi_set_stream_attribute)(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index c25f7df7b5e3..067d53caf28a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -187,8 +187,10 @@ struct timing_generator_funcs {
bool (*did_triggered_reset_occur)(struct timing_generator *tg);
void (*setup_global_swap_lock)(struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params);
+ void (*setup_global_lock)(struct timing_generator *tg);
void (*unlock)(struct timing_generator *tg);
void (*lock)(struct timing_generator *tg);
+ void (*lock_global)(struct timing_generator *tg);
void (*enable_reset_trigger)(struct timing_generator *tg,
int source_tg_inst);
void (*enable_crtc_reset)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 7676f25216b1..33905468e2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -176,6 +176,10 @@ struct hw_sequencer_funcs {
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
+ void (*pipe_control_lock_global)(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
void (*blank_pixel_data)(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index cf5a84b9e27c..8503d9cc4763 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -52,7 +52,7 @@
/* macro to set register fields. */
#define REG_SET_N(reg_name, n, initial_val, ...) \
- generic_reg_update_ex(CTX, \
+ generic_reg_set_ex(CTX, \
REG(reg_name), \
initial_val, \
n, __VA_ARGS__)
@@ -225,7 +225,6 @@
#define REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
REG(reg_name), \
- REG_READ(reg_name), \
n, __VA_ARGS__)
#define REG_UPDATE(reg_name, field, val) \
@@ -380,16 +379,11 @@
/* macro to update a register field to specified values in given sequences.
* useful when toggling bits
*/
-#define REG_UPDATE_SEQ(reg, field, value1, value2) \
-{ uint32_t val = REG_UPDATE(reg, field, value1); \
- REG_SET(reg, val, field, value2); }
-
-/* macro to update fields in register 1 field at a time in given order */
-#define REG_UPDATE_1BY1_2(reg, f1, v1, f2, v2) \
+#define REG_UPDATE_SEQ_2(reg, f1, v1, f2, v2) \
{ uint32_t val = REG_UPDATE(reg, f1, v1); \
REG_SET(reg, val, f2, v2); }
-#define REG_UPDATE_1BY1_3(reg, f1, v1, f2, v2, f3, v3) \
+#define REG_UPDATE_SEQ_3(reg, f1, v1, f2, v2, f3, v3) \
{ uint32_t val = REG_UPDATE(reg, f1, v1); \
val = REG_SET(reg, val, f2, v2); \
REG_SET(reg, val, f3, v3); }
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 0086a2f1d21a..3ce0a4fc5822 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -70,11 +70,9 @@ bool resource_construct(
struct resource_pool *pool,
const struct resource_create_funcs *create_funcs);
-struct resource_pool *dc_create_resource_pool(
- struct dc *dc,
- int num_virtual_links,
- enum dce_version dc_version,
- struct hw_asic_id asic_id);
+struct resource_pool *dc_create_resource_pool(struct dc *dc,
+ const struct dc_init_data *init_data,
+ enum dce_version dc_version);
void dc_destroy_resource_pool(struct dc *dc);
@@ -131,7 +129,8 @@ bool resource_attach_surfaces_to_context(
struct pipe_ctx *find_idle_secondary_pipe(
struct resource_context *res_ctx,
- const struct resource_pool *pool);
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
bool resource_is_stream_unchanged(
struct dc_state *old_context, struct dc_stream_state *stream);
@@ -172,4 +171,7 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
+struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx);
+bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index afe0876fe6f8..86987f5e8bd5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -81,6 +81,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
.enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
@@ -137,7 +142,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 1ea7256ec89b..750ba0ab4106 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -84,6 +84,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -140,7 +145,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
IRQ_REG_ENTRY(CRTC, reg_num,\
CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\
CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 8a2066c313fe..de218fe84a43 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -84,6 +84,10 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_INVALID + reg_num] = {\
@@ -142,7 +146,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index e04ae49243f6..10ac6deff5ff 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -56,6 +56,18 @@ enum dc_irq_source to_dal_irq_source_dcn10(
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
@@ -153,6 +165,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -203,12 +220,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
-#define vupdate_int_entry(reg_num)\
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
@@ -315,12 +335,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_int_entry(0),
- vupdate_int_entry(1),
- vupdate_int_entry(2),
- vupdate_int_entry(3),
- vupdate_int_entry(4),
- vupdate_int_entry(5),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
+ vupdate_no_lock_int_entry(4),
+ vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index 3dc1733eea20..fdcf9e66d852 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -29,7 +29,8 @@
static void virtual_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space) {}
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting) {}
static void virtual_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 52a73332befb..89ef9f6860e5 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -503,6 +503,8 @@ static inline int dc_fixpt_ceil(struct fixed31_32 arg)
* fractional
*/
+unsigned int dc_fixpt_u4d19(struct fixed31_32 arg);
+
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg);
unsigned int dc_fixpt_u2d19(struct fixed31_32 arg);
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index f56d2891475f..beed70179bb5 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -45,6 +45,11 @@ enum signal_type {
};
/* help functions for signal types manipulation */
+static inline bool dc_is_hdmi_tmds_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
+}
+
static inline bool dc_is_hdmi_signal(enum signal_type signal)
{
return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 0fbc8fbc3541..a1055413bade 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1854,6 +1854,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+ if (ramp->type == GAMMA_CUSTOM)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index bfd27f10879e..19b1eaebe484 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,6 +37,8 @@
#define RENDER_TIMES_MAX_COUNT 10
/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
#define BTR_EXIT_MARGIN 2000
+/* Threshold to change BTR multiplier (to avoid frequent changes) */
+#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4
/* Number of consecutive frames to check before entering/exiting fixed refresh*/
@@ -48,6 +50,93 @@ struct core_freesync {
struct dc *dc;
};
+void setFieldWithMask(unsigned char *dest, unsigned int mask, unsigned int value)
+{
+ unsigned int shift = 0;
+
+ if (!mask || !dest)
+ return;
+
+ while (!((mask >> shift) & 1))
+ shift++;
+
+ //reset
+ *dest = *dest & ~mask;
+ //set
+ //dont let value span past mask
+ value = value & (mask >> shift);
+ //insert value
+ *dest = *dest | (value << shift);
+}
+
+// VTEM Byte Offset
+#define VRR_VTEM_PB0 0
+#define VRR_VTEM_PB1 1
+#define VRR_VTEM_PB2 2
+#define VRR_VTEM_PB3 3
+#define VRR_VTEM_PB4 4
+#define VRR_VTEM_PB5 5
+#define VRR_VTEM_PB6 6
+
+#define VRR_VTEM_MD0 7
+#define VRR_VTEM_MD1 8
+#define VRR_VTEM_MD2 9
+#define VRR_VTEM_MD3 10
+
+
+// VTEM Byte Masks
+//PB0
+#define MASK__VRR_VTEM_PB0__RESERVED0 0x01
+#define MASK__VRR_VTEM_PB0__SYNC 0x02
+#define MASK__VRR_VTEM_PB0__VFR 0x04
+#define MASK__VRR_VTEM_PB0__AFR 0x08
+#define MASK__VRR_VTEM_PB0__DS_TYPE 0x30
+ //0: Periodic pseudo-static EM Data Set
+ //1: Periodic dynamic EM Data Set
+ //2: Unique EM Data Set
+ //3: Reserved
+#define MASK__VRR_VTEM_PB0__END 0x40
+#define MASK__VRR_VTEM_PB0__NEW 0x80
+
+//PB1
+#define MASK__VRR_VTEM_PB1__RESERVED1 0xFF
+
+//PB2
+#define MASK__VRR_VTEM_PB2__ORGANIZATION_ID 0xFF
+ //0: This is a Vendor Specific EM Data Set
+ //1: This EM Data Set is defined by This Specification (HDMI 2.1 r102.clean)
+ //2: This EM Data Set is defined by CTA-861-G
+ //3: This EM Data Set is defined by VESA
+//PB3
+#define MASK__VRR_VTEM_PB3__DATA_SET_TAG_MSB 0xFF
+//PB4
+#define MASK__VRR_VTEM_PB4__DATA_SET_TAG_LSB 0xFF
+//PB5
+#define MASK__VRR_VTEM_PB5__DATA_SET_LENGTH_MSB 0xFF
+//PB6
+#define MASK__VRR_VTEM_PB6__DATA_SET_LENGTH_LSB 0xFF
+
+
+
+//PB7-27 (20 bytes):
+//PB7 = MD0
+#define MASK__VRR_VTEM_MD0__VRR_EN 0x01
+#define MASK__VRR_VTEM_MD0__M_CONST 0x02
+#define MASK__VRR_VTEM_MD0__RESERVED2 0x0C
+#define MASK__VRR_VTEM_MD0__FVA_FACTOR_M1 0xF0
+
+//MD1
+#define MASK__VRR_VTEM_MD1__BASE_VFRONT 0xFF
+
+//MD2
+#define MASK__VRR_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
+#define MASK__VRR_VTEM_MD2__RB 0x04
+#define MASK__VRR_VTEM_MD2__RESERVED3 0xF8
+
+//MD3
+#define MASK__VRR_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
+
+
#define MOD_FREESYNC_TO_CORE(mod_freesync)\
container_of(mod_freesync, struct core_freesync, public)
@@ -248,6 +337,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int frames_to_insert = 0;
unsigned int min_frame_duration_in_ns = 0;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
+ unsigned int delta_from_mid_point_delta_in_us;
min_frame_duration_in_ns = ((unsigned int) (div64_u64(
(1000000000ULL * 1000000),
@@ -318,10 +408,27 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2)
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
frames_to_insert = mid_point_frames_ceil;
- else
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
+ delta_from_mid_point_in_us_1;
+ } else {
frames_to_insert = mid_point_frames_floor;
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
+ delta_from_mid_point_in_us_2;
+ }
+
+ /* Prefer current frame multiplier when BTR is enabled unless it drifts
+ * too far from the midpoint
+ */
+ if (in_out_vrr->btr.frames_to_insert != 0 &&
+ delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
+ if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
+ in_out_vrr->max_duration_in_us) &&
+ ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
+ in_out_vrr->min_duration_in_us))
+ frames_to_insert = in_out_vrr->btr.frames_to_insert;
+ }
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
@@ -330,10 +437,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
inserted_frame_duration_in_us = last_render_time_in_us /
frames_to_insert;
- if (inserted_frame_duration_in_us <
- (1000000 / in_out_vrr->max_refresh_in_uhz))
- inserted_frame_duration_in_us =
- (1000000 / in_out_vrr->max_refresh_in_uhz);
+ if (inserted_frame_duration_in_us < in_out_vrr->min_duration_in_us)
+ inserted_frame_duration_in_us = in_out_vrr->min_duration_in_us;
/* Cache the calculated variables */
in_out_vrr->btr.inserted_duration_in_us =
@@ -469,16 +574,14 @@ static void build_vrr_infopacket_header_vtem(enum signal_type signal,
// HB0, HB1, HB2 indicates PacketType VTEMPacket
infopacket->hb0 = 0x7F;
infopacket->hb1 = 0xC0;
- infopacket->hb2 = 0x00;
- /* HB3 Bit Fields
- * Reserved :1 = 0
- * Sync :1 = 0
- * VFR :1 = 1
- * Ds_Type :2 = 0
- * End :1 = 0
- * New :1 = 0
- */
- infopacket->hb3 = 0x20;
+ infopacket->hb2 = 0x00; //sequence_index
+
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB0], MASK__VRR_VTEM_PB0__VFR, 1);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB2], MASK__VRR_VTEM_PB2__ORGANIZATION_ID, 1);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB3], MASK__VRR_VTEM_PB3__DATA_SET_TAG_MSB, 0);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB4], MASK__VRR_VTEM_PB4__DATA_SET_TAG_LSB, 1);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB5], MASK__VRR_VTEM_PB5__DATA_SET_LENGTH_MSB, 0);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB6], MASK__VRR_VTEM_PB6__DATA_SET_LENGTH_LSB, 4);
}
static void build_vrr_infopacket_header_v1(enum signal_type signal,
@@ -583,45 +686,36 @@ static void build_vrr_vtem_infopacket_data(const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket)
{
- /* dc_info_packet to VtemPacket Translation of Bit-fields,
- * SB[6]
- * unsigned char VRR_EN :1
- * unsigned char M_CONST :1
- * unsigned char Reserved2 :2
- * unsigned char FVA_Factor_M1 :4
- * SB[7]
- * unsigned char Base_Vfront :8
- * SB[8]
- * unsigned char Base_Refresh_Rate_98 :2
- * unsigned char RB :1
- * unsigned char Reserved3 :5
- * SB[9]
- * unsigned char Base_RefreshRate_07 :8
- */
unsigned int fieldRateInHz;
if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
- vrr->state == VRR_STATE_ACTIVE_FIXED){
- infopacket->sb[6] |= 0x80; //VRR_EN Bit = 1
+ vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD0], MASK__VRR_VTEM_MD0__VRR_EN, 1);
} else {
- infopacket->sb[6] &= 0x7F; //VRR_EN Bit = 0
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD0], MASK__VRR_VTEM_MD0__VRR_EN, 0);
}
if (!stream->timing.vic) {
- infopacket->sb[7] = stream->timing.v_front_porch;
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD1], MASK__VRR_VTEM_MD1__BASE_VFRONT,
+ stream->timing.v_front_porch);
+
/* TODO: In dal2, we check mode flags for a reduced blanking timing.
* Need a way to relay that information to this function.
* if("ReducedBlanking")
* {
- * infopacket->sb[8] |= 0x20; //Set 3rd bit to 1
+ * setFieldWithMask(&infopacket->sb[VRR_VTEM_MD2], MASK__VRR_VTEM_MD2__RB, 1;
* }
*/
+
+ //TODO: DAL2 does FixPoint and rounding. Here we might need to account for that
fieldRateInHz = (stream->timing.pix_clk_100hz * 100)/
- (stream->timing.h_total * stream->timing.v_total);
+ (stream->timing.h_total * stream->timing.v_total);
- infopacket->sb[8] |= ((fieldRateInHz & 0x300) >> 2);
- infopacket->sb[9] |= fieldRateInHz & 0xFF;
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD2], MASK__VRR_VTEM_MD2__BASE_REFRESH_RATE_98,
+ fieldRateInHz >> 8);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD3], MASK__VRR_VTEM_MD3__BASE_REFRESH_RATE_07,
+ fieldRateInHz);
}
infopacket->valid = true;
@@ -745,6 +839,8 @@ static void build_vrr_infopacket_vtem(const struct dc_stream_state *stream,
{
//VTEM info packet for HdmiVrr
+ memset(infopacket, 0, sizeof(struct dc_info_packet));
+
//VTEM Packet is structured differently
build_vrr_infopacket_header_vtem(stream->signal, infopacket);
build_vrr_vtem_infopacket_data(stream, vrr, infopacket);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 038b88221c5f..b3810b864676 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -41,9 +41,12 @@ static const unsigned char min_reduction_table[13] = {
static const unsigned char max_reduction_table[13] = {
0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
-/* ABM 2.2 Min Reduction effectively disabled (100% for all configs)*/
+/* Possible ABM 2.2 Min Reduction configs from least aggressive to most aggressive
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * 100 100 100 100 100 100 100 100 100 92.2 83.1 75.3 75.3 %
+ */
static const unsigned char min_reduction_table_v_2_2[13] = {
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd4, 0xc0, 0xc0};
/* Possible ABM 2.2 Max Reduction configs from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
@@ -408,9 +411,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->flags = 0x0;
ram_table->deviation_gain[0] = 0xb3;
- ram_table->deviation_gain[1] = 0xb3;
- ram_table->deviation_gain[2] = 0xb3;
- ram_table->deviation_gain[3] = 0xb3;
+ ram_table->deviation_gain[1] = 0xa8;
+ ram_table->deviation_gain[2] = 0x98;
+ ram_table->deviation_gain[3] = 0x68;
ram_table->min_reduction[0][0] = min_reduction_table_v_2_2[abm_config[set][0]];
ram_table->min_reduction[1][0] = min_reduction_table_v_2_2[abm_config[set][0]];
@@ -505,7 +508,7 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->contrastFactor[0] = 0x99;
ram_table->contrastFactor[1] = 0x99;
- ram_table->contrastFactor[2] = 0x99;
+ ram_table->contrastFactor[2] = 0x90;
ram_table->contrastFactor[3] = 0x80;
ram_table->iir_curve[0] = 0x65;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 470d7b89071a..574bf6e70763 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -137,6 +137,7 @@ enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
};
+enum amd_dpm_forced_level;
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
*/
@@ -186,6 +187,8 @@ struct amd_ip_funcs {
enum amd_powergating_state state);
/** @get_clockgating_state: get current clockgating status */
void (*get_clockgating_state)(void *handle, u32 *flags);
+ /** @enable_umd_pstate: enable UMD powerstate */
+ int (*enable_umd_pstate)(void *handle, enum amd_dpm_forced_level *level);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
index 721c61171045..5a44e614ab7e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
@@ -2347,6 +2347,8 @@
#define mmHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP0_HUBPREQ_DEBUG_DB 0x0569
#define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP0_HUBPREQ_DEBUG 0x056a
+#define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x056e
#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x056f
@@ -2631,6 +2633,8 @@
#define mmHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP1_HUBPREQ_DEBUG_DB 0x062d
#define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP1_HUBPREQ_DEBUG 0x062e
+#define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x0632
#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0633
@@ -2915,6 +2919,8 @@
#define mmHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP2_HUBPREQ_DEBUG_DB 0x06f1
#define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP2_HUBPREQ_DEBUG 0x06f2
+#define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06f6
#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06f7
@@ -3199,6 +3205,8 @@
#define mmHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP3_HUBPREQ_DEBUG_DB 0x07b5
#define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP3_HUBPREQ_DEBUG 0x07b6
+#define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07ba
#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07bb
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
index 442ca7c471a5..6109f5ad25ad 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
@@ -141,6 +141,8 @@
#define mmUVD_GPCOM_VCPU_DATA0_BASE_IDX 1
#define mmUVD_GPCOM_VCPU_DATA1 0x03c5
#define mmUVD_GPCOM_VCPU_DATA1_BASE_IDX 1
+#define mmUVD_ENGINE_CNTL 0x03c6
+#define mmUVD_ENGINE_CNTL_BASE_IDX 1
#define mmUVD_UDEC_DBW_UV_ADDR_CONFIG 0x03d2
#define mmUVD_UDEC_DBW_UV_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_UDEC_ADDR_CONFIG 0x03d3
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
index 63457f9df4c5..f84bed6eecb9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
@@ -312,6 +312,11 @@
//UVD_GPCOM_VCPU_DATA1
#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0
#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_ENGINE_CNTL
+#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x1
+#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x0
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x2
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x1
//UVD_UDEC_DBW_UV_ADDR_CONFIG
#define UVD_UDEC_DBW_UV_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
#define UVD_UDEC_DBW_UV_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 8eb0bb241210..d3075adb3297 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -494,6 +494,9 @@ enum atombios_firmware_capability
ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
+ ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
+ ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
+ ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
};
enum atom_cooling_solution_id{
@@ -528,6 +531,35 @@ struct atom_firmware_info_v3_2 {
uint32_t reserved2[3];
};
+struct atom_firmware_info_v3_3
+{
+ struct atom_common_table_header table_header;
+ uint32_t firmware_revision;
+ uint32_t bootup_sclk_in10khz;
+ uint32_t bootup_mclk_in10khz;
+ uint32_t firmware_capability; // enum atombios_firmware_capability
+ uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */
+ uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address
+ uint16_t bootup_vddc_mv;
+ uint16_t bootup_vddci_mv;
+ uint16_t bootup_mvddc_mv;
+ uint16_t bootup_vddgfx_mv;
+ uint8_t mem_module_id;
+ uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */
+ uint8_t reserved1[2];
+ uint32_t mc_baseaddr_high;
+ uint32_t mc_baseaddr_low;
+ uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
+ uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
+ uint8_t board_i2c_feature_slave_addr;
+ uint8_t reserved3;
+ uint16_t bootup_mvddq_mv;
+ uint16_t bootup_mvpp_mv;
+ uint32_t zfbstartaddrin16mb;
+ uint32_t pplib_pptable_id; // if pplib_pptable_id!=0, pplib get powerplay table inside driver instead of from VBIOS
+ uint32_t reserved2[2];
+};
+
/*
***************************************************************************
Data Table lcd_info structure
@@ -686,6 +718,7 @@ enum atom_encoder_caps_def
ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not.
ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board.
+ ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type.
};
struct atom_encoder_caps_record
@@ -1226,16 +1259,17 @@ struct atom_gfx_info_v2_3 {
uint32_t rm21_sram_vmin_value;
};
-struct atom_gfx_info_v2_4 {
+struct atom_gfx_info_v2_4
+{
struct atom_common_table_header table_header;
uint8_t gfxip_min_ver;
uint8_t gfxip_max_ver;
- uint8_t gc_num_se;
- uint8_t max_tile_pipes;
- uint8_t gc_num_cu_per_sh;
- uint8_t gc_num_sh_per_se;
- uint8_t gc_num_rb_per_se;
- uint8_t gc_num_tccs;
+ uint8_t max_shader_engines;
+ uint8_t reserved;
+ uint8_t max_cu_per_sh;
+ uint8_t max_sh_per_se;
+ uint8_t max_backends_per_se;
+ uint8_t max_texture_channel_caches;
uint32_t regaddr_cp_dma_src_addr;
uint32_t regaddr_cp_dma_src_addr_hi;
uint32_t regaddr_cp_dma_dst_addr;
@@ -1780,6 +1814,56 @@ struct atom_umc_info_v3_1
uint32_t mem_refclk_10khz;
};
+// umc_info.umc_config
+enum atom_umc_config_def {
+ UMC_CONFIG__ENABLE_1KB_INTERLEAVE_MODE = 0x00000001,
+ UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE = 0x00000002,
+ UMC_CONFIG__ENABLE_HBM_LANE_REPAIR = 0x00000004,
+ UMC_CONFIG__ENABLE_BANK_HARVESTING = 0x00000008,
+ UMC_CONFIG__ENABLE_PHY_REINIT = 0x00000010,
+ UMC_CONFIG__DISABLE_UCODE_CHKSTATUS = 0x00000020,
+};
+
+struct atom_umc_info_v3_2
+{
+ struct atom_common_table_header table_header;
+ uint32_t ucode_version;
+ uint32_t ucode_rom_startaddr;
+ uint32_t ucode_length;
+ uint16_t umc_reg_init_offset;
+ uint16_t customer_ucode_name_offset;
+ uint16_t mclk_ss_percentage;
+ uint16_t mclk_ss_rate_10hz;
+ uint8_t umcip_min_ver;
+ uint8_t umcip_max_ver;
+ uint8_t vram_type; //enum of atom_dgpu_vram_type
+ uint8_t umc_config;
+ uint32_t mem_refclk_10khz;
+ uint32_t pstate_uclk_10khz[4];
+ uint16_t umcgoldenoffset;
+ uint16_t densitygoldenoffset;
+};
+
+struct atom_umc_info_v3_3
+{
+ struct atom_common_table_header table_header;
+ uint32_t ucode_reserved;
+ uint32_t ucode_rom_startaddr;
+ uint32_t ucode_length;
+ uint16_t umc_reg_init_offset;
+ uint16_t customer_ucode_name_offset;
+ uint16_t mclk_ss_percentage;
+ uint16_t mclk_ss_rate_10hz;
+ uint8_t umcip_min_ver;
+ uint8_t umcip_max_ver;
+ uint8_t vram_type; //enum of atom_dgpu_vram_type
+ uint8_t umc_config;
+ uint32_t mem_refclk_10khz;
+ uint32_t pstate_uclk_10khz[4];
+ uint16_t umcgoldenoffset;
+ uint16_t densitygoldenoffset;
+ uint32_t reserved[4];
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 5f3c10ebff08..b897aca9b4c9 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -85,18 +85,6 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
-enum kgd_engine_type {
- KGD_ENGINE_PFP = 1,
- KGD_ENGINE_ME,
- KGD_ENGINE_CE,
- KGD_ENGINE_MEC1,
- KGD_ENGINE_MEC2,
- KGD_ENGINE_RLC,
- KGD_ENGINE_SDMA1,
- KGD_ENGINE_SDMA2,
- KGD_ENGINE_MAX
-};
-
/**
* enum kfd_sched_policy
*
@@ -230,8 +218,6 @@ struct tile_config {
* @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
* SDMA hqd slot.
*
- * @get_fw_version: Returns FW versions from the header
- *
* @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
* Only used for no cp scheduling mode
*
@@ -311,8 +297,6 @@ struct kfd2kgd_calls {
struct kgd_dev *kgd,
uint8_t vmid);
- uint16_t (*get_fw_version)(struct kgd_dev *kgd,
- enum kgd_engine_type type);
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/include/linux/chash.h b/drivers/gpu/drm/amd/include/linux/chash.h
deleted file mode 100644
index 6dc159924ed1..000000000000
--- a/drivers/gpu/drm/amd/include/linux/chash.h
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _LINUX_CHASH_H
-#define _LINUX_CHASH_H
-
-#include <linux/types.h>
-#include <linux/hash.h>
-#include <linux/bug.h>
-#include <asm/bitsperlong.h>
-
-#if BITS_PER_LONG == 32
-# define _CHASH_LONG_SHIFT 5
-#elif BITS_PER_LONG == 64
-# define _CHASH_LONG_SHIFT 6
-#else
-# error "Unexpected BITS_PER_LONG"
-#endif
-
-struct __chash_table {
- u8 bits;
- u8 key_size;
- unsigned int value_size;
- u32 size_mask;
- unsigned long *occup_bitmap, *valid_bitmap;
- union {
- u32 *keys32;
- u64 *keys64;
- };
- u8 *values;
-
-#ifdef CONFIG_CHASH_STATS
- u64 hits, hits_steps, hits_time_ns;
- u64 miss, miss_steps, miss_time_ns;
- u64 relocs, reloc_dist;
-#endif
-};
-
-#define __CHASH_BITMAP_SIZE(bits) \
- (((1 << (bits)) + BITS_PER_LONG - 1) / BITS_PER_LONG)
-#define __CHASH_ARRAY_SIZE(bits, size) \
- ((((size) << (bits)) + sizeof(long) - 1) / sizeof(long))
-
-#define __CHASH_DATA_SIZE(bits, key_size, value_size) \
- (__CHASH_BITMAP_SIZE(bits) * 2 + \
- __CHASH_ARRAY_SIZE(bits, key_size) + \
- __CHASH_ARRAY_SIZE(bits, value_size))
-
-#define STRUCT_CHASH_TABLE(bits, key_size, value_size) \
- struct { \
- struct __chash_table table; \
- unsigned long data \
- [__CHASH_DATA_SIZE(bits, key_size, value_size)];\
- }
-
-/**
- * struct chash_table - Dynamically allocated closed hash table
- *
- * Use this struct for dynamically allocated hash tables (using
- * chash_table_alloc and chash_table_free), where the size is
- * determined at runtime.
- */
-struct chash_table {
- struct __chash_table table;
- unsigned long *data;
-};
-
-/**
- * DECLARE_CHASH_TABLE - macro to declare a closed hash table
- * @table: name of the declared hash table
- * @bts: Table size will be 2^bits entries
- * @key_sz: Size of hash keys in bytes, 4 or 8
- * @val_sz: Size of data values in bytes, can be 0
- *
- * This declares the hash table variable with a static size.
- *
- * The closed hash table stores key-value pairs with low memory and
- * lookup overhead. In operation it performs no dynamic memory
- * management. The data being stored does not require any
- * list_heads. The hash table performs best with small @val_sz and as
- * long as some space (about 50%) is left free in the table. But the
- * table can still work reasonably efficiently even when filled up to
- * about 90%. If bigger data items need to be stored and looked up,
- * store the pointer to it as value in the hash table.
- *
- * @val_sz may be 0. This can be useful when all the stored
- * information is contained in the key itself and the fact that it is
- * in the hash table (or not).
- */
-#define DECLARE_CHASH_TABLE(table, bts, key_sz, val_sz) \
- STRUCT_CHASH_TABLE(bts, key_sz, val_sz) table
-
-#ifdef CONFIG_CHASH_STATS
-#define __CHASH_STATS_INIT(prefix), \
- prefix.hits = 0, \
- prefix.hits_steps = 0, \
- prefix.hits_time_ns = 0, \
- prefix.miss = 0, \
- prefix.miss_steps = 0, \
- prefix.miss_time_ns = 0, \
- prefix.relocs = 0, \
- prefix.reloc_dist = 0
-#else
-#define __CHASH_STATS_INIT(prefix)
-#endif
-
-#define __CHASH_TABLE_INIT(prefix, data, bts, key_sz, val_sz) \
- prefix.bits = (bts), \
- prefix.key_size = (key_sz), \
- prefix.value_size = (val_sz), \
- prefix.size_mask = ((1 << bts) - 1), \
- prefix.occup_bitmap = &data[0], \
- prefix.valid_bitmap = &data \
- [__CHASH_BITMAP_SIZE(bts)], \
- prefix.keys64 = (u64 *)&data \
- [__CHASH_BITMAP_SIZE(bts) * 2], \
- prefix.values = (u8 *)&data \
- [__CHASH_BITMAP_SIZE(bts) * 2 + \
- __CHASH_ARRAY_SIZE(bts, key_sz)] \
- __CHASH_STATS_INIT(prefix)
-
-/**
- * DEFINE_CHASH_TABLE - macro to define and initialize a closed hash table
- * @tbl: name of the declared hash table
- * @bts: Table size will be 2^bits entries
- * @key_sz: Size of hash keys in bytes, 4 or 8
- * @val_sz: Size of data values in bytes, can be 0
- *
- * Note: the macro can be used for global and local hash table variables.
- */
-#define DEFINE_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
- DECLARE_CHASH_TABLE(tbl, bts, key_sz, val_sz) = { \
- .table = { \
- __CHASH_TABLE_INIT(, (tbl).data, bts, key_sz, val_sz) \
- }, \
- .data = {0} \
- }
-
-/**
- * INIT_CHASH_TABLE - Initialize a hash table declared by DECLARE_CHASH_TABLE
- * @tbl: name of the declared hash table
- * @bts: Table size will be 2^bits entries
- * @key_sz: Size of hash keys in bytes, 4 or 8
- * @val_sz: Size of data values in bytes, can be 0
- */
-#define INIT_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
- __CHASH_TABLE_INIT(((tbl).table), (tbl).data, bts, key_sz, val_sz)
-
-int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
- unsigned int value_size, gfp_t gfp_mask);
-void chash_table_free(struct chash_table *table);
-
-/**
- * chash_table_dump_stats - Dump statistics of a closed hash table
- * @tbl: Pointer to the table structure
- *
- * Dumps some performance statistics of the table gathered in operation
- * in the kernel log using pr_debug. If CONFIG_DYNAMIC_DEBUG is enabled,
- * user must turn on messages for chash.c (file chash.c +p).
- */
-#ifdef CONFIG_CHASH_STATS
-#define chash_table_dump_stats(tbl) __chash_table_dump_stats(&(*tbl).table)
-
-void __chash_table_dump_stats(struct __chash_table *table);
-#else
-#define chash_table_dump_stats(tbl)
-#endif
-
-/**
- * chash_table_reset_stats - Reset statistics of a closed hash table
- * @tbl: Pointer to the table structure
- */
-#ifdef CONFIG_CHASH_STATS
-#define chash_table_reset_stats(tbl) __chash_table_reset_stats(&(*tbl).table)
-
-static inline void __chash_table_reset_stats(struct __chash_table *table)
-{
- (void)table __CHASH_STATS_INIT((*table));
-}
-#else
-#define chash_table_reset_stats(tbl)
-#endif
-
-/**
- * chash_table_copy_in - Copy a new value into the hash table
- * @tbl: Pointer to the table structure
- * @key: Key of the entry to add or update
- * @value: Pointer to value to copy, may be NULL
- *
- * If @key already has an entry, its value is replaced. Otherwise a
- * new entry is added. If @value is NULL, the value is left unchanged
- * or uninitialized. Returns 1 if an entry already existed, 0 if a new
- * entry was added or %-ENOMEM if there was no free space in the
- * table.
- */
-#define chash_table_copy_in(tbl, key, value) \
- __chash_table_copy_in(&(*tbl).table, key, value)
-
-int __chash_table_copy_in(struct __chash_table *table, u64 key,
- const void *value);
-
-/**
- * chash_table_copy_out - Copy a value out of the hash table
- * @tbl: Pointer to the table structure
- * @key: Key of the entry to find
- * @value: Pointer to value to copy, may be NULL
- *
- * If @value is not NULL and the table has a non-0 value_size, the
- * value at @key is copied to @value. Returns the slot index of the
- * entry or %-EINVAL if @key was not found.
- */
-#define chash_table_copy_out(tbl, key, value) \
- __chash_table_copy_out(&(*tbl).table, key, value, false)
-
-int __chash_table_copy_out(struct __chash_table *table, u64 key,
- void *value, bool remove);
-
-/**
- * chash_table_remove - Remove an entry from the hash table
- * @tbl: Pointer to the table structure
- * @key: Key of the entry to find
- * @value: Pointer to value to copy, may be NULL
- *
- * If @value is not NULL and the table has a non-0 value_size, the
- * value at @key is copied to @value. The entry is removed from the
- * table. Returns the slot index of the removed entry or %-EINVAL if
- * @key was not found.
- */
-#define chash_table_remove(tbl, key, value) \
- __chash_table_copy_out(&(*tbl).table, key, value, true)
-
-/*
- * Low level iterator API used internally by the above functions.
- */
-struct chash_iter {
- struct __chash_table *table;
- unsigned long mask;
- int slot;
-};
-
-/**
- * CHASH_ITER_INIT - Initialize a hash table iterator
- * @tbl: Pointer to hash table to iterate over
- * @s: Initial slot number
- */
-#define CHASH_ITER_INIT(table, s) { \
- table, \
- 1UL << ((s) & (BITS_PER_LONG - 1)), \
- s \
- }
-/**
- * CHASH_ITER_SET - Set hash table iterator to new slot
- * @iter: Iterator
- * @s: Slot number
- */
-#define CHASH_ITER_SET(iter, s) \
- (iter).mask = 1UL << ((s) & (BITS_PER_LONG - 1)), \
- (iter).slot = (s)
-/**
- * CHASH_ITER_INC - Increment hash table iterator
- * @table: Hash table to iterate over
- *
- * Wraps around at the end.
- */
-#define CHASH_ITER_INC(iter) do { \
- (iter).mask = (iter).mask << 1 | \
- (iter).mask >> (BITS_PER_LONG - 1); \
- (iter).slot = ((iter).slot + 1) & (iter).table->size_mask; \
- } while (0)
-
-static inline bool chash_iter_is_valid(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return !!(iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
- iter.mask);
-}
-static inline bool chash_iter_is_empty(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return !(iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
- iter.mask);
-}
-
-static inline void chash_iter_set_valid(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
- iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
-}
-static inline void chash_iter_set_invalid(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
-}
-static inline void chash_iter_set_empty(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
-}
-
-static inline u32 chash_iter_key32(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 4);
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return iter.table->keys32[iter.slot];
-}
-static inline u64 chash_iter_key64(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 8);
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return iter.table->keys64[iter.slot];
-}
-static inline u64 chash_iter_key(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return (iter.table->key_size == 4) ?
- iter.table->keys32[iter.slot] : iter.table->keys64[iter.slot];
-}
-
-static inline u32 chash_iter_hash32(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 4);
- return hash_32(chash_iter_key32(iter), iter.table->bits);
-}
-
-static inline u32 chash_iter_hash64(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 8);
- return hash_64(chash_iter_key64(iter), iter.table->bits);
-}
-
-static inline u32 chash_iter_hash(const struct chash_iter iter)
-{
- return (iter.table->key_size == 4) ?
- hash_32(chash_iter_key32(iter), iter.table->bits) :
- hash_64(chash_iter_key64(iter), iter.table->bits);
-}
-
-static inline void *chash_iter_value(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return iter.table->values +
- ((unsigned long)iter.slot * iter.table->value_size);
-}
-
-#endif /* _LINUX_CHASH_H */
diff --git a/drivers/gpu/drm/amd/lib/Kconfig b/drivers/gpu/drm/amd/lib/Kconfig
deleted file mode 100644
index 776ef3434c10..000000000000
--- a/drivers/gpu/drm/amd/lib/Kconfig
+++ /dev/null
@@ -1,28 +0,0 @@
-menu "AMD Library routines"
-
-#
-# Closed hash table
-#
-config CHASH
- tristate
- default DRM_AMDGPU
- help
- Statically sized closed hash table implementation with low
- memory and CPU overhead.
-
-config CHASH_STATS
- bool "Closed hash table performance statistics"
- depends on CHASH
- default n
- help
- Enable collection of performance statistics for closed hash tables.
-
-config CHASH_SELFTEST
- bool "Closed hash table self test"
- depends on CHASH
- default n
- help
- Runs a selftest during module load. Several module parameters
- are available to modify the behaviour of the test.
-
-endmenu
diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile
deleted file mode 100644
index 690243001e1a..000000000000
--- a/drivers/gpu/drm/amd/lib/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Copyright 2017 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-#
-# Makefile for AMD library routines, which are used by AMD driver
-# components.
-#
-# This is for common library routines that can be shared between AMD
-# driver components or later moved to kernel/lib for sharing with
-# other drivers.
-
-ccflags-y := -I$(src)/../include
-
-obj-$(CONFIG_CHASH) += chash.o
diff --git a/drivers/gpu/drm/amd/lib/chash.c b/drivers/gpu/drm/amd/lib/chash.c
deleted file mode 100644
index b8e45f356a1c..000000000000
--- a/drivers/gpu/drm/amd/lib/chash.c
+++ /dev/null
@@ -1,638 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/hash.h>
-#include <linux/bug.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/sched/clock.h>
-#include <asm/div64.h>
-#include <linux/chash.h>
-
-/**
- * chash_table_alloc - Allocate closed hash table
- * @table: Pointer to the table structure
- * @bits: Table size will be 2^bits entries
- * @key_size: Size of hash keys in bytes, 4 or 8
- * @value_size: Size of data values in bytes, can be 0
- */
-int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
- unsigned int value_size, gfp_t gfp_mask)
-{
- if (bits > 31)
- return -EINVAL;
-
- if (key_size != 4 && key_size != 8)
- return -EINVAL;
-
- table->data = kcalloc(__CHASH_DATA_SIZE(bits, key_size, value_size),
- sizeof(long), gfp_mask);
- if (!table->data)
- return -ENOMEM;
-
- __CHASH_TABLE_INIT(table->table, table->data,
- bits, key_size, value_size);
-
- return 0;
-}
-EXPORT_SYMBOL(chash_table_alloc);
-
-/**
- * chash_table_free - Free closed hash table
- * @table: Pointer to the table structure
- */
-void chash_table_free(struct chash_table *table)
-{
- kfree(table->data);
-}
-EXPORT_SYMBOL(chash_table_free);
-
-#ifdef CONFIG_CHASH_STATS
-
-#define DIV_FRAC(nom, denom, quot, frac, frac_digits) do { \
- u64 __nom = (nom); \
- u64 __denom = (denom); \
- u64 __quot, __frac; \
- u32 __rem; \
- \
- while (__denom >> 32) { \
- __nom >>= 1; \
- __denom >>= 1; \
- } \
- __quot = __nom; \
- __rem = do_div(__quot, __denom); \
- __frac = __rem * (frac_digits) + (__denom >> 1); \
- do_div(__frac, __denom); \
- (quot) = __quot; \
- (frac) = __frac; \
- } while (0)
-
-void __chash_table_dump_stats(struct __chash_table *table)
-{
- struct chash_iter iter = CHASH_ITER_INIT(table, 0);
- u32 filled = 0, empty = 0, tombstones = 0;
- u64 quot1, quot2;
- u32 frac1, frac2;
-
- do {
- if (chash_iter_is_valid(iter))
- filled++;
- else if (chash_iter_is_empty(iter))
- empty++;
- else
- tombstones++;
- CHASH_ITER_INC(iter);
- } while (iter.slot);
-
- pr_debug("chash: key size %u, value size %u\n",
- table->key_size, table->value_size);
- pr_debug(" Slots total/filled/empty/tombstones: %u / %u / %u / %u\n",
- 1 << table->bits, filled, empty, tombstones);
- if (table->hits > 0) {
- DIV_FRAC(table->hits_steps, table->hits, quot1, frac1, 1000);
- DIV_FRAC(table->hits * 1000, table->hits_time_ns,
- quot2, frac2, 1000);
- } else {
- quot1 = quot2 = 0;
- frac1 = frac2 = 0;
- }
- pr_debug(" Hits (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
- table->hits, quot1, frac1, quot2, frac2);
- if (table->miss > 0) {
- DIV_FRAC(table->miss_steps, table->miss, quot1, frac1, 1000);
- DIV_FRAC(table->miss * 1000, table->miss_time_ns,
- quot2, frac2, 1000);
- } else {
- quot1 = quot2 = 0;
- frac1 = frac2 = 0;
- }
- pr_debug(" Misses (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
- table->miss, quot1, frac1, quot2, frac2);
- if (table->hits + table->miss > 0) {
- DIV_FRAC(table->hits_steps + table->miss_steps,
- table->hits + table->miss, quot1, frac1, 1000);
- DIV_FRAC((table->hits + table->miss) * 1000,
- (table->hits_time_ns + table->miss_time_ns),
- quot2, frac2, 1000);
- } else {
- quot1 = quot2 = 0;
- frac1 = frac2 = 0;
- }
- pr_debug(" Total (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
- table->hits + table->miss, quot1, frac1, quot2, frac2);
- if (table->relocs > 0) {
- DIV_FRAC(table->hits + table->miss, table->relocs,
- quot1, frac1, 1000);
- DIV_FRAC(table->reloc_dist, table->relocs, quot2, frac2, 1000);
- pr_debug(" Relocations (freq, avg.dist): %llu (1:%llu.%03u, %llu.%03u)\n",
- table->relocs, quot1, frac1, quot2, frac2);
- } else {
- pr_debug(" No relocations\n");
- }
-}
-EXPORT_SYMBOL(__chash_table_dump_stats);
-
-#undef DIV_FRAC
-#endif
-
-#define CHASH_INC(table, a) ((a) = ((a) + 1) & (table)->size_mask)
-#define CHASH_ADD(table, a, b) (((a) + (b)) & (table)->size_mask)
-#define CHASH_SUB(table, a, b) (((a) - (b)) & (table)->size_mask)
-#define CHASH_IN_RANGE(table, slot, first, last) \
- (CHASH_SUB(table, slot, first) <= CHASH_SUB(table, last, first))
-
-/*#define CHASH_DEBUG Uncomment this to enable verbose debug output*/
-#ifdef CHASH_DEBUG
-static void chash_table_dump(struct __chash_table *table)
-{
- struct chash_iter iter = CHASH_ITER_INIT(table, 0);
-
- do {
- if ((iter.slot & 3) == 0)
- pr_debug("%04x: ", iter.slot);
-
- if (chash_iter_is_valid(iter))
- pr_debug("[%016llx] ", chash_iter_key(iter));
- else if (chash_iter_is_empty(iter))
- pr_debug("[ <empty> ] ");
- else
- pr_debug("[ <tombstone> ] ");
-
- if ((iter.slot & 3) == 3)
- pr_debug("\n");
-
- CHASH_ITER_INC(iter);
- } while (iter.slot);
-
- if ((iter.slot & 3) != 0)
- pr_debug("\n");
-}
-
-static int chash_table_check(struct __chash_table *table)
-{
- u32 hash;
- struct chash_iter iter = CHASH_ITER_INIT(table, 0);
- struct chash_iter cur = CHASH_ITER_INIT(table, 0);
-
- do {
- if (!chash_iter_is_valid(iter)) {
- CHASH_ITER_INC(iter);
- continue;
- }
-
- hash = chash_iter_hash(iter);
- CHASH_ITER_SET(cur, hash);
- while (cur.slot != iter.slot) {
- if (chash_iter_is_empty(cur)) {
- pr_err("Path to element at %x with hash %x broken at slot %x\n",
- iter.slot, hash, cur.slot);
- chash_table_dump(table);
- return -EINVAL;
- }
- CHASH_ITER_INC(cur);
- }
-
- CHASH_ITER_INC(iter);
- } while (iter.slot);
-
- return 0;
-}
-#endif
-
-static void chash_iter_relocate(struct chash_iter dst, struct chash_iter src)
-{
- BUG_ON(src.table == dst.table && src.slot == dst.slot);
- BUG_ON(src.table->key_size != dst.table->key_size);
- BUG_ON(src.table->value_size != dst.table->value_size);
-
- if (dst.table->key_size == 4)
- dst.table->keys32[dst.slot] = src.table->keys32[src.slot];
- else
- dst.table->keys64[dst.slot] = src.table->keys64[src.slot];
-
- if (dst.table->value_size)
- memcpy(chash_iter_value(dst), chash_iter_value(src),
- dst.table->value_size);
-
- chash_iter_set_valid(dst);
- chash_iter_set_invalid(src);
-
-#ifdef CONFIG_CHASH_STATS
- if (src.table == dst.table) {
- dst.table->relocs++;
- dst.table->reloc_dist +=
- CHASH_SUB(dst.table, src.slot, dst.slot);
- }
-#endif
-}
-
-/**
- * __chash_table_find - Helper for looking up a hash table entry
- * @iter: Pointer to hash table iterator
- * @key: Key of the entry to find
- * @for_removal: set to true if the element will be removed soon
- *
- * Searches for an entry in the hash table with a given key. iter must
- * be initialized by the caller to point to the home position of the
- * hypothetical entry, i.e. it must be initialized with the hash table
- * and the key's hash as the initial slot for the search.
- *
- * This function also does some local clean-up to speed up future
- * look-ups by relocating entries to better slots and removing
- * tombstones that are no longer needed.
- *
- * If @for_removal is true, the function avoids relocating the entry
- * that is being returned.
- *
- * Returns 0 if the search is successful. In this case iter is updated
- * to point to the found entry. Otherwise %-EINVAL is returned and the
- * iter is updated to point to the first available slot for the given
- * key. If the table is full, the slot is set to -1.
- */
-static int chash_table_find(struct chash_iter *iter, u64 key,
- bool for_removal)
-{
-#ifdef CONFIG_CHASH_STATS
- u64 ts1 = local_clock();
-#endif
- u32 hash = iter->slot;
- struct chash_iter first_redundant = CHASH_ITER_INIT(iter->table, -1);
- int first_avail = (for_removal ? -2 : -1);
-
- while (!chash_iter_is_valid(*iter) || chash_iter_key(*iter) != key) {
- if (chash_iter_is_empty(*iter)) {
- /* Found an empty slot, which ends the
- * search. Clean up any preceding tombstones
- * that are no longer needed because they lead
- * to no-where
- */
- if ((int)first_redundant.slot < 0)
- goto not_found;
- while (first_redundant.slot != iter->slot) {
- if (!chash_iter_is_valid(first_redundant))
- chash_iter_set_empty(first_redundant);
- CHASH_ITER_INC(first_redundant);
- }
-#ifdef CHASH_DEBUG
- chash_table_check(iter->table);
-#endif
- goto not_found;
- } else if (!chash_iter_is_valid(*iter)) {
- /* Found a tombstone. Remember it as candidate
- * for relocating the entry we're looking for
- * or for adding a new entry with the given key
- */
- if (first_avail == -1)
- first_avail = iter->slot;
- /* Or mark it as the start of a series of
- * potentially redundant tombstones
- */
- else if (first_redundant.slot == -1)
- CHASH_ITER_SET(first_redundant, iter->slot);
- } else if (first_redundant.slot >= 0) {
- /* Found a valid, occupied slot with a
- * preceding series of tombstones. Relocate it
- * to a better position that no longer depends
- * on those tombstones
- */
- u32 cur_hash = chash_iter_hash(*iter);
-
- if (!CHASH_IN_RANGE(iter->table, cur_hash,
- first_redundant.slot + 1,
- iter->slot)) {
- /* This entry has a hash at or before
- * the first tombstone we found. We
- * can relocate it to that tombstone
- * and advance to the next tombstone
- */
- chash_iter_relocate(first_redundant, *iter);
- do {
- CHASH_ITER_INC(first_redundant);
- } while (chash_iter_is_valid(first_redundant));
- } else if (cur_hash != iter->slot) {
- /* Relocate entry to its home position
- * or as close as possible so it no
- * longer depends on any preceding
- * tombstones
- */
- struct chash_iter new_iter =
- CHASH_ITER_INIT(iter->table, cur_hash);
-
- while (new_iter.slot != iter->slot &&
- chash_iter_is_valid(new_iter))
- CHASH_ITER_INC(new_iter);
-
- if (new_iter.slot != iter->slot)
- chash_iter_relocate(new_iter, *iter);
- }
- }
-
- CHASH_ITER_INC(*iter);
- if (iter->slot == hash) {
- iter->slot = -1;
- goto not_found;
- }
- }
-
-#ifdef CONFIG_CHASH_STATS
- iter->table->hits++;
- iter->table->hits_steps += CHASH_SUB(iter->table, iter->slot, hash) + 1;
-#endif
-
- if (first_avail >= 0) {
- CHASH_ITER_SET(first_redundant, first_avail);
- chash_iter_relocate(first_redundant, *iter);
- iter->slot = first_redundant.slot;
- iter->mask = first_redundant.mask;
- }
-
-#ifdef CONFIG_CHASH_STATS
- iter->table->hits_time_ns += local_clock() - ts1;
-#endif
-
- return 0;
-
-not_found:
-#ifdef CONFIG_CHASH_STATS
- iter->table->miss++;
- iter->table->miss_steps += (iter->slot < 0) ?
- (1 << iter->table->bits) :
- CHASH_SUB(iter->table, iter->slot, hash) + 1;
-#endif
-
- if (first_avail >= 0)
- CHASH_ITER_SET(*iter, first_avail);
-
-#ifdef CONFIG_CHASH_STATS
- iter->table->miss_time_ns += local_clock() - ts1;
-#endif
-
- return -EINVAL;
-}
-
-int __chash_table_copy_in(struct __chash_table *table, u64 key,
- const void *value)
-{
- u32 hash = (table->key_size == 4) ?
- hash_32(key, table->bits) : hash_64(key, table->bits);
- struct chash_iter iter = CHASH_ITER_INIT(table, hash);
- int r = chash_table_find(&iter, key, false);
-
- /* Found an existing entry */
- if (!r) {
- if (value && table->value_size)
- memcpy(chash_iter_value(iter), value,
- table->value_size);
- return 1;
- }
-
- /* Is there a place to add a new entry? */
- if (iter.slot < 0) {
- pr_err("Hash table overflow\n");
- return -ENOMEM;
- }
-
- chash_iter_set_valid(iter);
-
- if (table->key_size == 4)
- table->keys32[iter.slot] = key;
- else
- table->keys64[iter.slot] = key;
- if (value && table->value_size)
- memcpy(chash_iter_value(iter), value, table->value_size);
-
- return 0;
-}
-EXPORT_SYMBOL(__chash_table_copy_in);
-
-int __chash_table_copy_out(struct __chash_table *table, u64 key,
- void *value, bool remove)
-{
- u32 hash = (table->key_size == 4) ?
- hash_32(key, table->bits) : hash_64(key, table->bits);
- struct chash_iter iter = CHASH_ITER_INIT(table, hash);
- int r = chash_table_find(&iter, key, remove);
-
- if (r < 0)
- return r;
-
- if (value && table->value_size)
- memcpy(value, chash_iter_value(iter), table->value_size);
-
- if (remove)
- chash_iter_set_invalid(iter);
-
- return iter.slot;
-}
-EXPORT_SYMBOL(__chash_table_copy_out);
-
-#ifdef CONFIG_CHASH_SELFTEST
-/**
- * chash_self_test - Run a self-test of the hash table implementation
- * @bits: Table size will be 2^bits entries
- * @key_size: Size of hash keys in bytes, 4 or 8
- * @min_fill: Minimum fill level during the test
- * @max_fill: Maximum fill level during the test
- * @iterations: Number of test iterations
- *
- * The test adds and removes entries from a hash table, cycling the
- * fill level between min_fill and max_fill entries. Also tests lookup
- * and value retrieval.
- */
-static int __init chash_self_test(u8 bits, u8 key_size,
- int min_fill, int max_fill,
- u64 iterations)
-{
- struct chash_table table;
- int ret;
- u64 add_count, rmv_count;
- u64 value;
-
- if (key_size == 4 && iterations > 0xffffffff)
- return -EINVAL;
- if (min_fill >= max_fill)
- return -EINVAL;
-
- ret = chash_table_alloc(&table, bits, key_size, sizeof(u64),
- GFP_KERNEL);
- if (ret) {
- pr_err("chash_table_alloc failed: %d\n", ret);
- return ret;
- }
-
- for (add_count = 0, rmv_count = 0; add_count < iterations;
- add_count++) {
- /* When we hit the max_fill level, remove entries down
- * to min_fill
- */
- if (add_count - rmv_count == max_fill) {
- u64 find_count = rmv_count;
-
- /* First try to find all entries that we're
- * about to remove, confirm their value, test
- * writing them back a second time.
- */
- for (; add_count - find_count > min_fill;
- find_count++) {
- ret = chash_table_copy_out(&table, find_count,
- &value);
- if (ret < 0) {
- pr_err("chash_table_copy_out failed: %d\n",
- ret);
- goto out;
- }
- if (value != ~find_count) {
- pr_err("Wrong value retrieved for key 0x%llx, expected 0x%llx got 0x%llx\n",
- find_count, ~find_count, value);
-#ifdef CHASH_DEBUG
- chash_table_dump(&table.table);
-#endif
- ret = -EFAULT;
- goto out;
- }
- ret = chash_table_copy_in(&table, find_count,
- &value);
- if (ret != 1) {
- pr_err("copy_in second time returned %d, expected 1\n",
- ret);
- ret = -EFAULT;
- goto out;
- }
- }
- /* Remove them until we hit min_fill level */
- for (; add_count - rmv_count > min_fill; rmv_count++) {
- ret = chash_table_remove(&table, rmv_count,
- NULL);
- if (ret < 0) {
- pr_err("chash_table_remove failed: %d\n",
- ret);
- goto out;
- }
- }
- }
-
- /* Add a new value */
- value = ~add_count;
- ret = chash_table_copy_in(&table, add_count, &value);
- if (ret != 0) {
- pr_err("copy_in first time returned %d, expected 0\n",
- ret);
- ret = -EFAULT;
- goto out;
- }
- }
-
- chash_table_dump_stats(&table);
- chash_table_reset_stats(&table);
-
-out:
- chash_table_free(&table);
- return ret;
-}
-
-static unsigned int chash_test_bits = 10;
-MODULE_PARM_DESC(test_bits,
- "Selftest number of hash bits ([4..20], default=10)");
-module_param_named(test_bits, chash_test_bits, uint, 0444);
-
-static unsigned int chash_test_keysize = 8;
-MODULE_PARM_DESC(test_keysize, "Selftest keysize (4 or 8, default=8)");
-module_param_named(test_keysize, chash_test_keysize, uint, 0444);
-
-static unsigned int chash_test_minfill;
-MODULE_PARM_DESC(test_minfill, "Selftest minimum #entries (default=50%)");
-module_param_named(test_minfill, chash_test_minfill, uint, 0444);
-
-static unsigned int chash_test_maxfill;
-MODULE_PARM_DESC(test_maxfill, "Selftest maximum #entries (default=80%)");
-module_param_named(test_maxfill, chash_test_maxfill, uint, 0444);
-
-static unsigned long chash_test_iters;
-MODULE_PARM_DESC(test_iters, "Selftest iterations (default=1000 x #entries)");
-module_param_named(test_iters, chash_test_iters, ulong, 0444);
-
-static int __init chash_init(void)
-{
- int ret;
- u64 ts1_ns;
-
- /* Skip self test on user errors */
- if (chash_test_bits < 4 || chash_test_bits > 20) {
- pr_err("chash: test_bits out of range [4..20].\n");
- return 0;
- }
- if (chash_test_keysize != 4 && chash_test_keysize != 8) {
- pr_err("chash: test_keysize invalid. Must be 4 or 8.\n");
- return 0;
- }
-
- if (!chash_test_minfill)
- chash_test_minfill = (1 << chash_test_bits) / 2;
- if (!chash_test_maxfill)
- chash_test_maxfill = (1 << chash_test_bits) * 4 / 5;
- if (!chash_test_iters)
- chash_test_iters = (1 << chash_test_bits) * 1000;
-
- if (chash_test_minfill >= (1 << chash_test_bits)) {
- pr_err("chash: test_minfill too big. Must be < table size.\n");
- return 0;
- }
- if (chash_test_maxfill >= (1 << chash_test_bits)) {
- pr_err("chash: test_maxfill too big. Must be < table size.\n");
- return 0;
- }
- if (chash_test_minfill >= chash_test_maxfill) {
- pr_err("chash: test_minfill must be < test_maxfill.\n");
- return 0;
- }
- if (chash_test_keysize == 4 && chash_test_iters > 0xffffffff) {
- pr_err("chash: test_iters must be < 4G for 4 byte keys.\n");
- return 0;
- }
-
- ts1_ns = local_clock();
- ret = chash_self_test(chash_test_bits, chash_test_keysize,
- chash_test_minfill, chash_test_maxfill,
- chash_test_iters);
- if (!ret) {
- u64 ts_delta_us = local_clock() - ts1_ns;
- u64 iters_per_second = (u64)chash_test_iters * 1000000;
-
- do_div(ts_delta_us, 1000);
- do_div(iters_per_second, ts_delta_us);
- pr_info("chash: self test took %llu us, %llu iterations/s\n",
- ts_delta_us, iters_per_second);
- } else {
- pr_err("chash: self test failed: %d\n", ret);
- }
-
- return ret;
-}
-
-module_init(chash_init);
-
-#endif /* CONFIG_CHASH_SELFTEST */
-
-MODULE_DESCRIPTION("Closed hash table");
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 231785a9e24c..ec87b3430d12 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -35,7 +35,7 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
include $(AMD_POWERPLAY)
-POWER_MGR = amd_powerplay.o
+POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o vega20_ppt.o
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 3f73f7cd18b9..bea1587d352d 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -53,7 +53,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
mutex_init(&hwmgr->smu_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
- hwmgr->feature_mask = adev->powerplay.pp_feature;
+ hwmgr->feature_mask = adev->pm.pp_feature;
hwmgr->display_config = &adev->pm.pm_display_cfg;
adev->powerplay.pp_handle = hwmgr;
adev->powerplay.pp_funcs = &pp_dpm_funcs;
@@ -1304,7 +1304,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
@@ -1341,7 +1341,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
pr_debug("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
@@ -1360,7 +1360,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
pr_debug("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
@@ -1379,7 +1379,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
pr_debug("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
new file mode 100644
index 000000000000..c058c784180e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -0,0 +1,1253 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "soc15_common.h"
+#include "smu_v11_0.h"
+#include "atom.h"
+#include "amd_pcie.h"
+
+int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
+ bool gate)
+{
+ int ret = 0;
+
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ ret = smu_dpm_set_uvd_enable(smu, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ ret = smu_dpm_set_vce_enable(smu, gate);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
+{
+ /* not support power state */
+ return POWER_STATE_TYPE_DEFAULT;
+}
+
+int smu_get_power_num_states(struct smu_context *smu,
+ struct pp_states_info *state_info)
+{
+ if (!state_info)
+ return -EINVAL;
+
+ /* not support power state */
+ memset(state_info, 0, sizeof(struct pp_states_info));
+ state_info->nums = 0;
+
+ return 0;
+}
+
+int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
+ *((uint32_t *)data) = smu->pstate_sclk;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
+ *((uint32_t *)data) = smu->pstate_mclk;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
+ ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
+ *size = 8;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ *size = 0;
+
+ return ret;
+}
+
+int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+ void *table_data, bool drv2smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = NULL;
+ int ret = 0;
+ uint32_t table_index;
+
+ if (!table_data || table_id >= smu_table->table_count)
+ return -EINVAL;
+
+ table_index = (exarg << 16) | table_id;
+
+ table = &smu_table->tables[table_id];
+
+ if (drv2smu)
+ memcpy(table->cpu_addr, table_data, table->size);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(table->mc_address));
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(table->mc_address));
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, drv2smu ?
+ SMU_MSG_TransferTableDram2Smu :
+ SMU_MSG_TransferTableSmu2Dram,
+ table_index);
+ if (ret)
+ return ret;
+
+ if (!drv2smu)
+ memcpy(table_data, table->cpu_addr, table->size);
+
+ return ret;
+}
+
+bool is_support_sw_smu(struct amdgpu_device *adev)
+{
+ if (amdgpu_dpm != 1)
+ return false;
+
+ if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
+ return true;
+
+ return false;
+}
+
+int smu_sys_get_pp_table(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
+ return -EINVAL;
+
+ if (smu_table->hardcode_pptable)
+ *table = smu_table->hardcode_pptable;
+ else
+ *table = smu_table->power_play_table;
+
+ return smu_table->power_play_table_size;
+}
+
+int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
+ int ret = 0;
+
+ if (header->usStructureSize != size) {
+ pr_err("pp table size not matched !\n");
+ return -EIO;
+ }
+
+ mutex_lock(&smu->mutex);
+ if (!smu_table->hardcode_pptable)
+ smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
+ if (!smu_table->hardcode_pptable) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ memcpy(smu_table->hardcode_pptable, buf, size);
+ smu_table->power_play_table = smu_table->hardcode_pptable;
+ smu_table->power_play_table_size = size;
+ mutex_unlock(&smu->mutex);
+
+ ret = smu_reset(smu);
+ if (ret)
+ pr_info("smu reset failed, ret = %d\n", ret);
+
+ return ret;
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+int smu_feature_init_dpm(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
+
+ mutex_lock(&feature->mutex);
+ bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
+ SMU_FEATURE_MAX/32);
+ if (ret)
+ return ret;
+
+ mutex_lock(&feature->mutex);
+ bitmap_andnot(feature->allowed, feature->allowed,
+ (unsigned long *)unallowed_feature_mask,
+ feature->feature_num);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = test_bit(feature_id, feature->enabled);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = smu_feature_update_enable_state(smu, feature_id, enable);
+ if (ret)
+ goto failed;
+
+ if (enable)
+ test_and_set_bit(feature_id, feature->enabled);
+ else
+ test_and_clear_bit(feature_id, feature->enabled);
+
+failed:
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_is_supported(struct smu_context *smu, int feature_id)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = test_bit(feature_id, feature->supported);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_set_supported(struct smu_context *smu, int feature_id,
+ bool enable)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_unlock(&feature->mutex);
+ if (enable)
+ test_and_set_bit(feature_id, feature->supported);
+ else
+ test_and_clear_bit(feature_id, feature->supported);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+static int smu_set_funcs(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+ smu->od_enabled = true;
+ smu_v11_0_set_smu_funcs(smu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smu_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ smu->adev = adev;
+ mutex_init(&smu->mutex);
+
+ return smu_set_funcs(adev);
+}
+
+static int smu_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ mutex_lock(&smu->mutex);
+ smu_handle_task(&adev->smu,
+ smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_COMPLETE_INIT);
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
+ uint16_t *size, uint8_t *frev, uint8_t *crev,
+ uint8_t **addr)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t data_start;
+
+ if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
+ size, frev, crev, &data_start))
+ return -EINVAL;
+
+ *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
+
+ return 0;
+}
+
+static int smu_initialize_pptable(struct smu_context *smu)
+{
+ /* TODO */
+ return 0;
+}
+
+static int smu_smc_table_sw_init(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_initialize_pptable(smu);
+ if (ret) {
+ pr_err("Failed to init smu_initialize_pptable!\n");
+ return ret;
+ }
+
+ /**
+ * Create smu_table structure, and init smc tables such as
+ * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
+ */
+ ret = smu_init_smc_tables(smu);
+ if (ret) {
+ pr_err("Failed to init smc tables!\n");
+ return ret;
+ }
+
+ /**
+ * Create smu_power_context structure, and allocate smu_dpm_context and
+ * context size to fill the smu_power_context data.
+ */
+ ret = smu_init_power(smu);
+ if (ret) {
+ pr_err("Failed to init smu_init_power!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_smc_table_sw_fini(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_fini_smc_tables(smu);
+ if (ret) {
+ pr_err("Failed to smu_fini_smc_tables!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ smu->pool_size = adev->pm.smu_prv_buffer_size;
+ smu->smu_feature.feature_num = SMU_FEATURE_MAX;
+ mutex_init(&smu->smu_feature.mutex);
+ bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
+ bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
+ bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
+ smu->watermarks_bitmap = 0;
+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+
+ smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
+ smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
+ smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
+ smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
+ smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
+ smu->display_config = &adev->pm.pm_display_cfg;
+
+ smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ ret = smu_init_microcode(smu);
+ if (ret) {
+ pr_err("Failed to load smu firmware!\n");
+ return ret;
+ }
+
+ ret = smu_smc_table_sw_init(smu);
+ if (ret) {
+ pr_err("Failed to sw init smc table!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ ret = smu_smc_table_sw_fini(smu);
+ if (ret) {
+ pr_err("Failed to sw fini smc table!\n");
+ return ret;
+ }
+
+ ret = smu_fini_power(smu);
+ if (ret) {
+ pr_err("Failed to init smu_fini_power!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_init_fb_allocations(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ uint32_t table_count = smu_table->table_count;
+ uint32_t i = 0;
+ int32_t ret = 0;
+
+ if (table_count <= 0)
+ return -EINVAL;
+
+ for (i = 0 ; i < table_count; i++) {
+ if (tables[i].size == 0)
+ continue;
+ ret = amdgpu_bo_create_kernel(adev,
+ tables[i].size,
+ tables[i].align,
+ tables[i].domain,
+ &tables[i].bo,
+ &tables[i].mc_address,
+ &tables[i].cpu_addr);
+ if (ret)
+ goto failed;
+ }
+
+ return 0;
+failed:
+ for (; i > 0; i--) {
+ if (tables[i].size == 0)
+ continue;
+ amdgpu_bo_free_kernel(&tables[i].bo,
+ &tables[i].mc_address,
+ &tables[i].cpu_addr);
+
+ }
+ return ret;
+}
+
+static int smu_fini_fb_allocations(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ uint32_t table_count = smu_table->table_count;
+ uint32_t i = 0;
+
+ if (table_count == 0 || tables == NULL)
+ return 0;
+
+ for (i = 0 ; i < table_count; i++) {
+ if (tables[i].size == 0)
+ continue;
+ amdgpu_bo_free_kernel(&tables[i].bo,
+ &tables[i].mc_address,
+ &tables[i].cpu_addr);
+ }
+
+ return 0;
+}
+
+static int smu_override_pcie_parameters(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
+ int ret;
+
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ if (ret)
+ pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+ return ret;
+}
+
+static int smu_smc_table_hw_init(struct smu_context *smu,
+ bool initialize)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ if (smu_is_dpm_running(smu) && adev->in_suspend) {
+ pr_info("dpm has been enabled\n");
+ return 0;
+ }
+
+ ret = smu_init_display(smu);
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = smu_read_pptable_from_vbios(smu);
+ if (ret)
+ return ret;
+
+ /* get boot_values from vbios to set revision, gfxclk, and etc. */
+ ret = smu_get_vbios_bootup_values(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_get_clk_info_from_vbios(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * check if the format_revision in vbios is up to pptable header
+ * version, and the structure size is not 0.
+ */
+ ret = smu_get_clk_info_from_vbios(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_check_pptable(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * allocate vram bos to store smc table contents.
+ */
+ ret = smu_init_fb_allocations(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Parse pptable format and fill PPTable_t smc_pptable to
+ * smu_table_context structure. And read the smc_dpm_table from vbios,
+ * then fill it into smc_pptable.
+ */
+ ret = smu_parse_pptable(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send msg GetDriverIfVersion to check if the return value is equal
+ * with DRIVER_IF_VERSION of smc header.
+ */
+ ret = smu_check_fw_version(smu);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Copy pptable bo in the vram to smc with SMU MSGs such as
+ * SetDriverDramAddr and TransferTableDram2Smu.
+ */
+ ret = smu_write_pptable(smu);
+ if (ret)
+ return ret;
+
+ /* issue RunAfllBtc msg */
+ ret = smu_run_afll_btc(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_feature_set_allowed_mask(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_system_features_control(smu, true);
+ if (ret)
+ return ret;
+
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_notify_display_change(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Set min deep sleep dce fclk with bootup value from vbios via
+ * SetMinDeepSleepDcefclk MSG.
+ */
+ ret = smu_set_min_dcef_deep_sleep(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Set initialized values (get from vbios) to dpm tables context such as
+ * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+ * type of clks.
+ */
+ if (initialize) {
+ ret = smu_populate_smc_pptable(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_init_max_sustainable_clocks(smu);
+ if (ret)
+ return ret;
+ }
+
+ ret = smu_set_od8_default_settings(smu, initialize);
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = smu_populate_umd_state_clk(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
+ */
+ ret = smu_set_tool_table_location(smu);
+
+ return ret;
+}
+
+/**
+ * smu_alloc_memory_pool - allocate memory pool in the system memory
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
+ * and DramLogSetDramAddr can notify it changed.
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int smu_alloc_memory_pool(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ uint64_t pool_size = smu->pool_size;
+ int ret = 0;
+
+ if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
+ return ret;
+
+ memory_pool->size = pool_size;
+ memory_pool->align = PAGE_SIZE;
+ memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
+
+ switch (pool_size) {
+ case SMU_MEMORY_POOL_SIZE_256_MB:
+ case SMU_MEMORY_POOL_SIZE_512_MB:
+ case SMU_MEMORY_POOL_SIZE_1_GB:
+ case SMU_MEMORY_POOL_SIZE_2_GB:
+ ret = amdgpu_bo_create_kernel(adev,
+ memory_pool->size,
+ memory_pool->align,
+ memory_pool->domain,
+ &memory_pool->bo,
+ &memory_pool->mc_address,
+ &memory_pool->cpu_addr);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_free_memory_pool(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+
+ if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
+ return ret;
+
+ amdgpu_bo_free_kernel(&memory_pool->bo,
+ &memory_pool->mc_address,
+ &memory_pool->cpu_addr);
+
+ memset(memory_pool, 0, sizeof(struct smu_table));
+
+ return ret;
+}
+
+static int smu_hw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ ret = smu_load_microcode(smu);
+ if (ret)
+ return ret;
+ }
+
+ ret = smu_check_fw_status(smu);
+ if (ret) {
+ pr_err("SMC firmware status is not correct\n");
+ return ret;
+ }
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu_feature_init_dpm(smu);
+ if (ret)
+ goto failed;
+
+ ret = smu_smc_table_hw_init(smu, true);
+ if (ret)
+ goto failed;
+
+ ret = smu_alloc_memory_pool(smu);
+ if (ret)
+ goto failed;
+
+ /*
+ * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
+ * pool location.
+ */
+ ret = smu_notify_memory_pool_location(smu);
+ if (ret)
+ goto failed;
+
+ ret = smu_start_thermal_control(smu);
+ if (ret)
+ goto failed;
+
+ mutex_unlock(&smu->mutex);
+
+ adev->pm.dpm_enabled = true;
+
+ pr_info("SMU is initialized successfully!\n");
+
+ return 0;
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static int smu_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ kfree(table_context->driver_pptable);
+ table_context->driver_pptable = NULL;
+
+ kfree(table_context->max_sustainable_clocks);
+ table_context->max_sustainable_clocks = NULL;
+
+ kfree(table_context->od_feature_capabilities);
+ table_context->od_feature_capabilities = NULL;
+
+ kfree(table_context->od_settings_max);
+ table_context->od_settings_max = NULL;
+
+ kfree(table_context->od_settings_min);
+ table_context->od_settings_min = NULL;
+
+ kfree(table_context->overdrive_table);
+ table_context->overdrive_table = NULL;
+
+ kfree(table_context->od8_settings);
+ table_context->od8_settings = NULL;
+
+ ret = smu_fini_fb_allocations(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_free_memory_pool(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int smu_reset(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ ret = smu_hw_fini(adev);
+ if (ret)
+ return ret;
+
+ ret = smu_hw_init(adev);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_suspend(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ ret = smu_system_features_control(smu, false);
+ if (ret)
+ return ret;
+
+ smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
+
+ return 0;
+}
+
+static int smu_resume(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ pr_info("SMU is resuming...\n");
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu_smc_table_hw_init(smu, false);
+ if (ret)
+ goto failed;
+
+ ret = smu_start_thermal_control(smu);
+ if (ret)
+ goto failed;
+
+ mutex_unlock(&smu->mutex);
+
+ pr_info("SMU is resumed successfully!\n");
+
+ return 0;
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+int smu_display_configuration_change(struct smu_context *smu,
+ const struct amd_pp_display_configuration *display_config)
+{
+ int index = 0;
+ int num_of_active_display = 0;
+
+ if (!is_support_sw_smu(smu->adev))
+ return -EINVAL;
+
+ if (!display_config)
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ smu_set_deep_sleep_dcefclk(smu,
+ display_config->min_dcef_deep_sleep_set_clk / 100);
+
+ for (index = 0; index < display_config->num_path_including_non_display; index++) {
+ if (display_config->displays[index].controller_id != 0)
+ num_of_active_display++;
+ }
+
+ smu_set_active_display_count(smu, num_of_active_display);
+
+ smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
+ display_config->cpu_cc6_disable,
+ display_config->cpu_pstate_disable,
+ display_config->nb_pstate_switch_disable);
+
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+static int smu_get_clock_info(struct smu_context *smu,
+ struct smu_clock_info *clk_info,
+ enum smu_perf_level_designation designation)
+{
+ int ret;
+ struct smu_performance_level level = {0};
+
+ if (!clk_info)
+ return -EINVAL;
+
+ ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
+ if (ret)
+ return -EINVAL;
+
+ clk_info->min_mem_clk = level.memory_clock;
+ clk_info->min_eng_clk = level.core_clock;
+ clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
+
+ ret = smu_get_perf_level(smu, designation, &level);
+ if (ret)
+ return -EINVAL;
+
+ clk_info->min_mem_clk = level.memory_clock;
+ clk_info->min_eng_clk = level.core_clock;
+ clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
+
+ return 0;
+}
+
+int smu_get_current_clocks(struct smu_context *smu,
+ struct amd_pp_clock_info *clocks)
+{
+ struct amd_pp_simple_clock_info simple_clocks = {0};
+ struct smu_clock_info hw_clocks;
+ int ret = 0;
+
+ if (!is_support_sw_smu(smu->adev))
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ smu_get_dal_power_level(smu, &simple_clocks);
+
+ if (smu->support_power_containment)
+ ret = smu_get_clock_info(smu, &hw_clocks,
+ PERF_LEVEL_POWER_CONTAINMENT);
+ else
+ ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
+
+ if (ret) {
+ pr_err("Error in smu_get_clock_info\n");
+ goto failed;
+ }
+
+ clocks->min_engine_clock = hw_clocks.min_eng_clk;
+ clocks->max_engine_clock = hw_clocks.max_eng_clk;
+ clocks->min_memory_clock = hw_clocks.min_mem_clk;
+ clocks->max_memory_clock = hw_clocks.max_mem_clk;
+ clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
+ clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+
+ if (simple_clocks.level == 0)
+ clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
+ else
+ clocks->max_clocks_state = simple_clocks.level;
+
+ if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+ }
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static int smu_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int smu_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static int smu_enable_umd_pstate(void *handle,
+ enum amd_dpm_forced_level *level)
+{
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ struct smu_context *smu = (struct smu_context*)(handle);
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
+ /* enter umd pstate, save current level, disable gfx cg*/
+ if (*level & profile_mode_mask) {
+ smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
+ smu_dpm_ctx->enable_umd_pstate = true;
+ amdgpu_device_ip_set_clockgating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ }
+ } else {
+ /* exit umd pstate, restore level, enable gfx cg*/
+ if (!(*level & profile_mode_mask)) {
+ if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ *level = smu_dpm_ctx->saved_dpm_level;
+ smu_dpm_ctx->enable_umd_pstate = false;
+ amdgpu_device_ip_set_clockgating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+ }
+ }
+
+ return 0;
+}
+
+int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ bool skip_display_settings)
+{
+ int ret = 0;
+ int index = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+ long workload;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!skip_display_settings) {
+ ret = smu_display_config_changed(smu);
+ if (ret) {
+ pr_err("Failed to change display config!");
+ return ret;
+ }
+ }
+
+ ret = smu_apply_clocks_adjust_rules(smu);
+ if (ret) {
+ pr_err("Failed to apply clocks adjust rules!");
+ return ret;
+ }
+
+ if (!skip_display_settings) {
+ ret = smu_notify_smc_dispaly_config(smu);
+ if (ret) {
+ pr_err("Failed to notify smc display config!");
+ return ret;
+ }
+ }
+
+ if (smu_dpm_ctx->dpm_level != level) {
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
+ smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+
+ if (!ret)
+ smu_dpm_ctx->dpm_level = level;
+ }
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ index = fls(smu->workload_mask);
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ workload = smu->workload_setting[index];
+
+ if (smu->power_profile_mode != workload)
+ smu_set_power_profile_mode(smu, &workload, 0);
+ }
+
+ return ret;
+}
+
+int smu_handle_task(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum amd_pp_task task_id)
+{
+ int ret = 0;
+
+ switch (task_id) {
+ case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = smu_pre_display_config_changed(smu);
+ if (ret)
+ return ret;
+ ret = smu_set_cpu_power_state(smu);
+ if (ret)
+ return ret;
+ ret = smu_adjust_power_state_dynamic(smu, level, false);
+ break;
+ case AMD_PP_TASK_COMPLETE_INIT:
+ case AMD_PP_TASK_READJUST_POWER_STATE:
+ ret = smu_adjust_power_state_dynamic(smu, level, true);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+const struct amd_ip_funcs smu_ip_funcs = {
+ .name = "smu",
+ .early_init = smu_early_init,
+ .late_init = smu_late_init,
+ .sw_init = smu_sw_init,
+ .sw_fini = smu_sw_fini,
+ .hw_init = smu_hw_init,
+ .hw_fini = smu_hw_fini,
+ .suspend = smu_suspend,
+ .resume = smu_resume,
+ .is_idle = NULL,
+ .check_soft_reset = NULL,
+ .wait_for_idle = NULL,
+ .soft_reset = NULL,
+ .set_clockgating_state = smu_set_clockgating_state,
+ .set_powergating_state = smu_set_powergating_state,
+ .enable_umd_pstate = smu_enable_umd_pstate,
+};
+
+const struct amdgpu_ip_block_version smu_v11_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &smu_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index 0b3c6d1d52e4..cc63705920dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -35,7 +35,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
vega12_thermal.o \
pp_overdriver.o smu_helper.o \
vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
- vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o
+ vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \
+ vega12_baco.o smu9_baco.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index c1c51c115e57..70f7f47a2fcf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -76,7 +76,7 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = NULL;
- int ret = -EINVAL;;
+ int ret = -EINVAL;
PHM_FUNC_CHECK(hwmgr);
adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 0ad8fe4a6277..9a595f7525e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -35,6 +35,7 @@
#include "smu10_hwmgr.h"
#include "power_state.h"
#include "soc15_common.h"
+#include "smu10.h"
#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
@@ -114,11 +115,6 @@ static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
smu10_data->num_active_display = 0;
smu10_data->deep_sleep_dcefclk = 0;
- if (hwmgr->feature_mask & PP_GFXOFF_MASK)
- smu10_data->gfx_off_controled_by_driver = true;
- else
- smu10_data->gfx_off_controled_by_driver = false;
-
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
@@ -209,18 +205,13 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
return 0;
}
-static inline uint32_t convert_10k_to_mhz(uint32_t clock)
-{
- return (clock + 99) / 100;
-}
-
static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->need_min_deep_sleep_dcefclk &&
- smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
- smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
+ smu10_data->deep_sleep_dcefclk != clock) {
+ smu10_data->deep_sleep_dcefclk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
smu10_data->deep_sleep_dcefclk);
@@ -233,8 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->dcf_actual_hard_min_freq &&
- smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
- smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smu10_data->dcf_actual_hard_min_freq != clock) {
+ smu10_data->dcf_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinDcefclkByFreq,
smu10_data->dcf_actual_hard_min_freq);
@@ -247,8 +238,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->f_actual_hard_min_freq &&
- smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
- smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smu10_data->f_actual_hard_min_freq != clock) {
+ smu10_data->f_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
smu10_data->f_actual_hard_min_freq);
@@ -330,9 +321,9 @@ static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
- struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
- if (smu10_data->gfx_off_controled_by_driver) {
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
/* confirm gfx is back to "on" state */
@@ -350,9 +341,9 @@ static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
{
- struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
- if (smu10_data->gfx_off_controled_by_driver)
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
return 0;
@@ -577,7 +568,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
struct smu10_hwmgr *data = hwmgr->backend;
- struct amdgpu_device *adev = hwmgr->adev;
uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
@@ -586,11 +576,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return 0;
}
- /* Disable UMDPSTATE support on rv2 temporarily */
- if ((adev->asic_type == CHIP_RAVEN) &&
- (adev->rev_id >= 8))
- return 0;
-
if (min_sclk < data->gfx_min_freq_limit)
min_sclk = data->gfx_min_freq_limit;
@@ -1205,6 +1190,94 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
}
}
+static int conv_power_profile_to_pplib_workload(int power_profile)
+{
+ int pplib_workload = 0;
+
+ switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
+ pplib_workload = WORKLOAD_DEFAULT_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_POWERSAVING:
+ pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ }
+
+ return pplib_workload;
+}
+
+static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
+{
+ uint32_t i, size = 0;
+ static const uint8_t
+ profile_mode_setting[6][4] = {{70, 60, 0, 0,},
+ {70, 60, 1, 3,},
+ {90, 60, 0, 0,},
+ {70, 60, 0, 0,},
+ {70, 90, 0, 0,},
+ {30, 60, 0, 6,},
+ };
+ static const char *profile_name[6] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE"};
+ static const char *title[6] = {"NUM",
+ "MODE_NAME",
+ "BUSY_SET_POINT",
+ "FPS",
+ "USE_RLC_BUSY",
+ "MIN_ACTIVE_LEVEL"};
+
+ if (!buf)
+ return -EINVAL;
+
+ size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
+ title[1], title[2], title[3], title[4], title[5]);
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
+ size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
+ i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ profile_mode_setting[i][0], profile_mode_setting[i][1],
+ profile_mode_setting[i][2], profile_mode_setting[i][3]);
+
+ return size;
+}
+
+static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
+{
+ int workload_type = 0;
+
+ if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
+ pr_err("Invalid power profile mode %ld\n", input[size]);
+ return -EINVAL;
+ }
+ hwmgr->power_profile_mode = input[size];
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type =
+ conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
+ 1 << workload_type);
+
+ return 0;
+}
+
+
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
.backend_fini = smu10_hwmgr_backend_fini,
@@ -1246,6 +1319,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.powergate_sdma = smu10_powergate_sdma,
.set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
.set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
+ .get_power_profile_mode = smu10_get_power_profile_mode,
+ .set_power_profile_mode = smu10_set_power_profile_mode,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 83d3d935f3ac..048757e8f494 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -77,7 +77,7 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static const struct profile_mode_setting smu7_profiling[7] =
+static struct profile_mode_setting smu7_profiling[7] =
{{0, 0, 0, 0, 0, 0, 0, 0},
{1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
@@ -4984,17 +4984,27 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
mode = input[size];
switch (mode) {
case PP_SMC_POWER_PROFILE_CUSTOM:
- if (size < 8)
+ if (size < 8 && size != 0)
return -EINVAL;
-
- tmp.bupdate_sclk = input[0];
- tmp.sclk_up_hyst = input[1];
- tmp.sclk_down_hyst = input[2];
- tmp.sclk_activity = input[3];
- tmp.bupdate_mclk = input[4];
- tmp.mclk_up_hyst = input[5];
- tmp.mclk_down_hyst = input[6];
- tmp.mclk_activity = input[7];
+ /* If only CUSTOM is passed in, use the saved values. Check
+ * that we actually have a CUSTOM profile by ensuring that
+ * the "use sclk" or the "use mclk" bits are set
+ */
+ tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
+ if (size == 0) {
+ if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
+ return -EINVAL;
+ } else {
+ tmp.bupdate_sclk = input[0];
+ tmp.sclk_up_hyst = input[1];
+ tmp.sclk_down_hyst = input[2];
+ tmp.sclk_activity = input[3];
+ tmp.bupdate_mclk = input[4];
+ tmp.mclk_up_hyst = input[5];
+ tmp.mclk_down_hyst = input[6];
+ tmp.mclk_activity = input[7];
+ smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
+ }
if (!smum_update_dpm_settings(hwmgr, &tmp)) {
memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
hwmgr->power_profile_mode = mode;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c
new file mode 100644
index 000000000000..de0a37f7c632
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+#include "soc15_common.h"
+#include "vega10_inc.h"
+#include "smu9_baco.h"
+
+int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg, data;
+
+ *cap = false;
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
+ return 0;
+
+ WREG32(0x12074, 0xFFF0003B);
+ data = RREG32(0x12075);
+
+ if (data == 0x1) {
+ reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
+
+ if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
+ *cap = true;
+ }
+
+ return 0;
+}
+
+int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL);
+
+ if (reg & BACO_CNTL__BACO_MODE_MASK)
+ /* gfx has already entered BACO state */
+ *state = BACO_STATE_IN;
+ else
+ *state = BACO_STATE_OUT;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h
new file mode 100644
index 000000000000..84e90f801ac3
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU9_BACO_H__
+#define __SMU9_BACO_H__
+#include "hwmgr.h"
+#include "common_baco.h"
+
+extern int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
index 7337be5602e4..d168af4a4d78 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
@@ -85,48 +85,11 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
};
-int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
- uint32_t reg, data;
-
- *cap = false;
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return 0;
-
- WREG32(0x12074, 0xFFF0003B);
- data = RREG32(0x12075);
-
- if (data == 0x1) {
- reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
-
- if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- *cap = true;
- }
-
- return 0;
-}
-
-int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL);
-
- if (reg & BACO_CNTL__BACO_MODE_MASK)
- /* gfx has already entered BACO state */
- *state = BACO_STATE_IN;
- else
- *state = BACO_STATE_OUT;
- return 0;
-}
-
int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
enum BACO_STATE cur_state;
- vega10_baco_get_state(hwmgr, &cur_state);
+ smu9_baco_get_state(hwmgr, &cur_state);
if (cur_state == state)
/* aisc already in the target state */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
index f7a3ffa744b3..96d793f026a5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
@@ -22,11 +22,8 @@
*/
#ifndef __VEGA10_BACO_H__
#define __VEGA10_BACO_H__
-#include "hwmgr.h"
-#include "common_baco.h"
+#include "smu9_baco.h"
-extern int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
-extern int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 5c4f701939ea..384c37875cd0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -1427,6 +1427,15 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
vega10_setup_default_pcie_table(hwmgr);
+ /* Zero out the saved copy of the CUSTOM profile
+ * This will be checked when trying to set the profile
+ * and will require that new values be passed in
+ */
+ data->custom_profile_mode[0] = 0;
+ data->custom_profile_mode[1] = 0;
+ data->custom_profile_mode[2] = 0;
+ data->custom_profile_mode[3] = 0;
+
/* save a copy of the default DPM table */
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega10_dpm_table));
@@ -4904,16 +4913,23 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
uint8_t FPS;
uint8_t use_rlc_busy;
uint8_t min_active_level;
+ uint32_t power_profile_mode = input[size];
- hwmgr->power_profile_mode = input[size];
-
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1<<hwmgr->power_profile_mode);
-
- if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size == 0 || size > 4)
+ if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (size != 0 && size != 4)
return -EINVAL;
+ /* If size = 0 and the CUSTOM profile has been set already
+ * then just apply the profile. The copy stored in the hwmgr
+ * is zeroed out on init
+ */
+ if (size == 0) {
+ if (data->custom_profile_mode[0] != 0)
+ goto out;
+ else
+ return -EINVAL;
+ }
+
data->custom_profile_mode[0] = busy_set_point = input[0];
data->custom_profile_mode[1] = FPS = input[1];
data->custom_profile_mode[2] = use_rlc_busy = input[2];
@@ -4924,6 +4940,11 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
use_rlc_busy << 16 | min_active_level<<24);
}
+out:
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ 1 << power_profile_mode);
+ hwmgr->power_profile_mode = power_profile_mode;
+
return 0;
}
@@ -5170,8 +5191,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.set_power_limit = vega10_set_power_limit,
.odn_edit_dpm_table = vega10_odn_edit_dpm_table,
.get_performance_level = vega10_get_performance_level,
- .get_asic_baco_capability = vega10_baco_get_capability,
- .get_asic_baco_state = vega10_baco_get_state,
+ .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_asic_baco_state = smu9_baco_get_state,
.set_asic_baco_state = vega10_baco_set_state,
.enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
.get_ppfeature_status = vega10_get_ppfeature_status,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
new file mode 100644
index 000000000000..9d8ca94a8f0c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+#include "soc15_common.h"
+#include "vega12_inc.h"
+#include "vega12_ppsmc.h"
+#include "vega12_baco.h"
+
+static const struct soc15_baco_cmd_entry pre_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmBIF_DOORBELL_CNTL_BASE_IDX, mmBIF_DOORBELL_CNTL, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT, 0, 0 },
+ { CMD_WRITE, NBIF_HWID, 0, mmBIF_FB_EN_BASE_IDX, mmBIF_FB_EN, 0, 0, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DSTATE_BYPASS_MASK, BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_RST_INTR_MASK_MASK, BACO_CNTL__BACO_RST_INTR_MASK__SHIFT, 0, 1 }
+};
+
+static const struct soc15_baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_WAITFOR, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__SOC_DOMAIN_IDLE_MASK, THM_BACO_CNTL__SOC_DOMAIN_IDLE__SHIFT, 0xffffffff, 0x80000000 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 1 },
+ { CMD_DELAY_MS, 0, 0, 0, 5, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 0 },
+ { CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, BACO_CNTL__BACO_MODE__SHIFT, 0xffffffff, 0x100 }
+};
+
+static const struct soc15_baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0 },
+ { CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_EXIT_MASK, THM_BACO_CNTL__BACO_EXIT__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0 },
+ { CMD_WAITFOR, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_EXIT_MASK, 0, 0xffffffff, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SB_AXI_FENCE_MASK, THM_BACO_CNTL__BACO_SB_AXI_FENCE__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0 },
+ { CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0 }
+};
+
+static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_6_BASE_IDX, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_7_BASE_IDX, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu9_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
+ ARRAY_SIZE(pre_baco_tbl))) {
+ if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+
+ if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+ }
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (soc15_baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (soc15_baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h
new file mode 100644
index 000000000000..57b72e5a95ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __VEGA12_BACO_H__
+#define __VEGA12_BACO_H__
+#include "smu9_baco.h"
+
+extern int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index bdb48e94eff6..707cd4b0357f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -45,6 +45,7 @@
#include "ppinterrupt.h"
#include "pp_overdriver.h"
#include "pp_thermal.h"
+#include "vega12_baco.h"
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
@@ -2626,8 +2627,12 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.start_thermal_controller = vega12_start_thermal_controller,
.powergate_gfx = vega12_gfx_off_control,
.get_performance_level = vega12_get_performance_level,
+ .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_asic_baco_state = smu9_baco_get_state,
+ .set_asic_baco_state = vega12_baco_set_state,
.get_ppfeature_status = vega12_get_ppfeature_status,
.set_ppfeature_status = vega12_set_ppfeature_status,
+
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
index 30b278c50222..e6d9e84059e1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
@@ -35,5 +35,7 @@
#include "asic_reg/gc/gc_9_2_1_sh_mask.h"
#include "asic_reg/nbio/nbio_6_1_offset.h"
+#include "asic_reg/nbio/nbio_6_1_offset.h"
+#include "asic_reg/nbio/nbio_6_1_sh_mask.h"
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index 5e8602a79b1c..df6ff9252401 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -27,6 +27,7 @@
#include "vega20_inc.h"
#include "vega20_ppsmc.h"
#include "vega20_baco.h"
+#include "vega20_smumgr.h"
@@ -101,3 +102,14 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
}
+
+int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+
+ ret = vega20_set_pptable_driver_address(hwmgr);
+ if (ret)
+ return ret;
+
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
index 51c7f8392925..f06471e712dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
@@ -28,5 +28,6 @@
extern int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 9aa7bec1b5fe..9b9f87b84910 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
* MP0CLK DS
*/
data->registry_data.disallowed_features = 0xE0041C00;
+ /* ECC feature should be disabled on old SMUs */
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ hwmgr->smu_version = smum_get_argument(hwmgr);
+ if (hwmgr->smu_version < 0x282100)
+ data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
+
data->registry_data.od_state_in_dc_support = 0;
data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
+ data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
data->smu_features[i].smu_feature_bitmap =
@@ -427,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
data->total_active_cus = adev->gfx.cu_info.number;
+ data->is_custom_profile_set = false;
return 0;
}
@@ -443,6 +451,7 @@ static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
int ret = 0;
ret = vega20_init_sclk_threshold(hwmgr);
@@ -450,7 +459,15 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to init sclk threshold!",
return ret);
- return 0;
+ if (adev->in_baco_reset) {
+ adev->in_baco_reset = 0;
+
+ ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
+ if (ret)
+ pr_err("Failed to apply vega20 baco workaround!\n");
+ }
+
+ return ret;
}
/*
@@ -3020,7 +3037,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
"FCLK_DS",
"MP1CLK_DS",
"MP0CLK_DS",
- "XGMI"};
+ "XGMI",
+ "ECC"};
static const char *output_title[] = {
"FEATURES",
"BITMASK",
@@ -3442,7 +3460,18 @@ static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
return ;
data->vce_power_gated = bgate;
- vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ if (bgate) {
+ vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ } else {
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ }
+
}
static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
@@ -3462,6 +3491,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
struct vega20_single_dpm_table *dpm_table;
bool vblank_too_short = false;
bool disable_mclk_switching;
+ bool disable_fclk_switching;
uint32_t i, latency;
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3567,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
if (hwmgr->display_config->nb_pstate_switch_disable)
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ if ((disable_mclk_switching &&
+ (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
+ hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
+ disable_fclk_switching = true;
+ else
+ disable_fclk_switching = false;
+
/* fclk */
dpm_table = &(data->dpm_table.fclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
- if (hwmgr->display_config->nb_pstate_switch_disable)
+ if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
/* vclk */
@@ -3810,16 +3847,19 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
{
DpmActivityMonitorCoeffInt_t activity_monitor;
int workload_type, result = 0;
+ uint32_t power_profile_mode = input[size];
- hwmgr->power_profile_mode = input[size];
-
- if (hwmgr->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- pr_err("Invalid power profile mode %d\n", hwmgr->power_profile_mode);
+ if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", power_profile_mode);
return -EINVAL;
}
- if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size < 10)
+ if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ if (size == 0 && !data->is_custom_profile_set)
+ return -EINVAL;
+ if (size < 10 && size != 0)
return -EINVAL;
result = vega20_get_activity_monitor_coeff(hwmgr,
@@ -3829,6 +3869,13 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
"[SetPowerProfile] Failed to get activity monitor!",
return result);
+ /* If size==0, then we want to apply the already-configured
+ * CUSTOM profile again. Just apply it, since we checked its
+ * validity above
+ */
+ if (size == 0)
+ goto out;
+
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor.Gfx_FPS = input[1];
@@ -3879,17 +3926,21 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
result = vega20_set_activity_monitor_coeff(hwmgr,
(uint8_t *)(&activity_monitor),
WORKLOAD_PPLIB_CUSTOM_BIT);
+ data->is_custom_profile_set = true;
PP_ASSERT_WITH_CODE(!result,
"[SetPowerProfile] Failed to set activity monitor!",
return result);
}
+out:
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type =
- conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
+ conv_power_profile_to_pplib_workload(power_profile_mode);
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
1 << workload_type);
+ hwmgr->power_profile_mode = power_profile_mode;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index a5bc758ae097..2c3125f82b24 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -80,6 +80,7 @@ enum {
GNLD_DS_MP1CLK,
GNLD_DS_MP0CLK,
GNLD_XGMI,
+ GNLD_ECC,
GNLD_FEATURES_MAX
};
@@ -530,6 +531,8 @@ struct vega20_hwmgr {
bool pcie_parameters_override;
uint32_t pcie_gen_level1;
uint32_t pcie_width_level1;
+
+ bool is_custom_profile_set;
};
#define VEGA20_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
new file mode 100644
index 000000000000..c8b168b3413b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -0,0 +1,770 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_SMU_H__
+#define __AMDGPU_SMU_H__
+
+#include "amdgpu.h"
+#include "kgd_pp_interface.h"
+#include "dm_pp_interface.h"
+
+struct smu_hw_power_state {
+ unsigned int magic;
+};
+
+struct smu_power_state;
+
+enum smu_state_ui_label {
+ SMU_STATE_UI_LABEL_NONE,
+ SMU_STATE_UI_LABEL_BATTERY,
+ SMU_STATE_UI_TABEL_MIDDLE_LOW,
+ SMU_STATE_UI_LABEL_BALLANCED,
+ SMU_STATE_UI_LABEL_MIDDLE_HIGHT,
+ SMU_STATE_UI_LABEL_PERFORMANCE,
+ SMU_STATE_UI_LABEL_BACO,
+};
+
+enum smu_state_classification_flag {
+ SMU_STATE_CLASSIFICATION_FLAG_BOOT = 0x0001,
+ SMU_STATE_CLASSIFICATION_FLAG_THERMAL = 0x0002,
+ SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = 0x0004,
+ SMU_STATE_CLASSIFICATION_FLAG_RESET = 0x0008,
+ SMU_STATE_CLASSIFICATION_FLAG_FORCED = 0x0010,
+ SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = 0x0020,
+ SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = 0x0040,
+ SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = 0x0080,
+ SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = 0x0100,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD = 0x0200,
+ SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = 0x0400,
+ SMU_STATE_CLASSIFICATION_FLAG_ACPI = 0x0800,
+ SMU_STATE_CLASSIFICATION_FLAG_HD2 = 0x1000,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = 0x2000,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = 0x4000,
+ SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = 0x8000,
+ SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = 0x10000,
+ SMU_STATE_CLASSIFICATION_FLAG_BACO = 0x20000,
+ SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = 0x40000,
+ SMU_STATE_CLASSIFICATION_FLAG_ULV = 0x80000,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = 0x100000,
+};
+
+struct smu_state_classification_block {
+ enum smu_state_ui_label ui_label;
+ enum smu_state_classification_flag flags;
+ int bios_index;
+ bool temporary_state;
+ bool to_be_deleted;
+};
+
+struct smu_state_pcie_block {
+ unsigned int lanes;
+};
+
+enum smu_refreshrate_source {
+ SMU_REFRESHRATE_SOURCE_EDID,
+ SMU_REFRESHRATE_SOURCE_EXPLICIT
+};
+
+struct smu_state_display_block {
+ bool disable_frame_modulation;
+ bool limit_refreshrate;
+ enum smu_refreshrate_source refreshrate_source;
+ int explicit_refreshrate;
+ int edid_refreshrate_index;
+ bool enable_vari_bright;
+};
+
+struct smu_state_memroy_block {
+ bool dll_off;
+ uint8_t m3arb;
+ uint8_t unused[3];
+};
+
+struct smu_state_software_algorithm_block {
+ bool disable_load_balancing;
+ bool enable_sleep_for_timestamps;
+};
+
+struct smu_temperature_range {
+ int min;
+ int max;
+};
+
+struct smu_state_validation_block {
+ bool single_display_only;
+ bool disallow_on_dc;
+ uint8_t supported_power_levels;
+};
+
+struct smu_uvd_clocks {
+ uint32_t vclk;
+ uint32_t dclk;
+};
+
+/**
+* Structure to hold a SMU Power State.
+*/
+struct smu_power_state {
+ uint32_t id;
+ struct list_head ordered_list;
+ struct list_head all_states_list;
+
+ struct smu_state_classification_block classification;
+ struct smu_state_validation_block validation;
+ struct smu_state_pcie_block pcie;
+ struct smu_state_display_block display;
+ struct smu_state_memroy_block memory;
+ struct smu_temperature_range temperatures;
+ struct smu_state_software_algorithm_block software;
+ struct smu_uvd_clocks uvd_clocks;
+ struct smu_hw_power_state hardware;
+};
+
+enum smu_message_type
+{
+ SMU_MSG_TestMessage = 0,
+ SMU_MSG_GetSmuVersion,
+ SMU_MSG_GetDriverIfVersion,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ SMU_MSG_SetAllowedFeaturesMaskHigh,
+ SMU_MSG_EnableAllSmuFeatures,
+ SMU_MSG_DisableAllSmuFeatures,
+ SMU_MSG_EnableSmuFeaturesLow,
+ SMU_MSG_EnableSmuFeaturesHigh,
+ SMU_MSG_DisableSmuFeaturesLow,
+ SMU_MSG_DisableSmuFeaturesHigh,
+ SMU_MSG_GetEnabledSmuFeaturesLow,
+ SMU_MSG_GetEnabledSmuFeaturesHigh,
+ SMU_MSG_SetWorkloadMask,
+ SMU_MSG_SetPptLimit,
+ SMU_MSG_SetDriverDramAddrHigh,
+ SMU_MSG_SetDriverDramAddrLow,
+ SMU_MSG_SetToolsDramAddrHigh,
+ SMU_MSG_SetToolsDramAddrLow,
+ SMU_MSG_TransferTableSmu2Dram,
+ SMU_MSG_TransferTableDram2Smu,
+ SMU_MSG_UseDefaultPPTable,
+ SMU_MSG_UseBackupPPTable,
+ SMU_MSG_RunBtc,
+ SMU_MSG_RequestI2CBus,
+ SMU_MSG_ReleaseI2CBus,
+ SMU_MSG_SetFloorSocVoltage,
+ SMU_MSG_SoftReset,
+ SMU_MSG_StartBacoMonitor,
+ SMU_MSG_CancelBacoMonitor,
+ SMU_MSG_EnterBaco,
+ SMU_MSG_SetSoftMinByFreq,
+ SMU_MSG_SetSoftMaxByFreq,
+ SMU_MSG_SetHardMinByFreq,
+ SMU_MSG_SetHardMaxByFreq,
+ SMU_MSG_GetMinDpmFreq,
+ SMU_MSG_GetMaxDpmFreq,
+ SMU_MSG_GetDpmFreqByIndex,
+ SMU_MSG_GetDpmClockFreq,
+ SMU_MSG_GetSsVoltageByDpm,
+ SMU_MSG_SetMemoryChannelConfig,
+ SMU_MSG_SetGeminiMode,
+ SMU_MSG_SetGeminiApertureHigh,
+ SMU_MSG_SetGeminiApertureLow,
+ SMU_MSG_SetMinLinkDpmByIndex,
+ SMU_MSG_OverridePcieParameters,
+ SMU_MSG_OverDriveSetPercentage,
+ SMU_MSG_SetMinDeepSleepDcefclk,
+ SMU_MSG_ReenableAcDcInterrupt,
+ SMU_MSG_NotifyPowerSource,
+ SMU_MSG_SetUclkFastSwitch,
+ SMU_MSG_SetUclkDownHyst,
+ SMU_MSG_GfxDeviceDriverReset,
+ SMU_MSG_GetCurrentRpm,
+ SMU_MSG_SetVideoFps,
+ SMU_MSG_SetTjMax,
+ SMU_MSG_SetFanTemperatureTarget,
+ SMU_MSG_PrepareMp1ForUnload,
+ SMU_MSG_DramLogSetDramAddrHigh,
+ SMU_MSG_DramLogSetDramAddrLow,
+ SMU_MSG_DramLogSetDramSize,
+ SMU_MSG_SetFanMaxRpm,
+ SMU_MSG_SetFanMinPwm,
+ SMU_MSG_ConfigureGfxDidt,
+ SMU_MSG_NumOfDisplays,
+ SMU_MSG_RemoveMargins,
+ SMU_MSG_ReadSerialNumTop32,
+ SMU_MSG_ReadSerialNumBottom32,
+ SMU_MSG_SetSystemVirtualDramAddrHigh,
+ SMU_MSG_SetSystemVirtualDramAddrLow,
+ SMU_MSG_WaflTest,
+ SMU_MSG_SetFclkGfxClkRatio,
+ SMU_MSG_AllowGfxOff,
+ SMU_MSG_DisallowGfxOff,
+ SMU_MSG_GetPptLimit,
+ SMU_MSG_GetDcModeMaxDpmFreq,
+ SMU_MSG_GetDebugData,
+ SMU_MSG_SetXgmiMode,
+ SMU_MSG_RunAfllBtc,
+ SMU_MSG_ExitBaco,
+ SMU_MSG_PrepareMp1ForReset,
+ SMU_MSG_PrepareMp1ForShutdown,
+ SMU_MSG_SetMGpuFanBoostLimitRpm,
+ SMU_MSG_GetAVFSVoltageByDpm,
+ SMU_MSG_MAX_COUNT,
+};
+
+enum smu_memory_pool_size
+{
+ SMU_MEMORY_POOL_SIZE_ZERO = 0,
+ SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000,
+ SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000,
+ SMU_MEMORY_POOL_SIZE_1_GB = 0x40000000,
+ SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000,
+};
+
+#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
+ do { \
+ tables[table_id].size = s; \
+ tables[table_id].align = a; \
+ tables[table_id].domain = d; \
+ } while (0)
+
+struct smu_table {
+ uint64_t size;
+ uint32_t align;
+ uint8_t domain;
+ uint64_t mc_address;
+ void *cpu_addr;
+ struct amdgpu_bo *bo;
+};
+
+enum smu_perf_level_designation {
+ PERF_LEVEL_ACTIVITY,
+ PERF_LEVEL_POWER_CONTAINMENT,
+};
+
+struct smu_performance_level {
+ uint32_t core_clock;
+ uint32_t memory_clock;
+ uint32_t vddc;
+ uint32_t vddci;
+ uint32_t non_local_mem_freq;
+ uint32_t non_local_mem_width;
+};
+
+struct smu_clock_info {
+ uint32_t min_mem_clk;
+ uint32_t max_mem_clk;
+ uint32_t min_eng_clk;
+ uint32_t max_eng_clk;
+ uint32_t min_bus_bandwidth;
+ uint32_t max_bus_bandwidth;
+};
+
+struct smu_bios_boot_up_values
+{
+ uint32_t revision;
+ uint32_t gfxclk;
+ uint32_t uclk;
+ uint32_t socclk;
+ uint32_t dcefclk;
+ uint32_t eclk;
+ uint32_t vclk;
+ uint32_t dclk;
+ uint16_t vddc;
+ uint16_t vddci;
+ uint16_t mvddc;
+ uint16_t vdd_gfx;
+ uint8_t cooling_id;
+ uint32_t pp_table_id;
+};
+
+struct smu_table_context
+{
+ void *power_play_table;
+ uint32_t power_play_table_size;
+ void *hardcode_pptable;
+
+ void *max_sustainable_clocks;
+ struct smu_bios_boot_up_values boot_values;
+ void *driver_pptable;
+ struct smu_table *tables;
+ uint32_t table_count;
+ struct smu_table memory_pool;
+ uint16_t software_shutdown_temp;
+ uint8_t thermal_controller_type;
+ uint16_t TDPODLimit;
+
+ uint8_t *od_feature_capabilities;
+ uint32_t *od_settings_max;
+ uint32_t *od_settings_min;
+ void *overdrive_table;
+ void *od8_settings;
+ bool od_gfxclk_update;
+ bool od_memclk_update;
+};
+
+struct smu_dpm_context {
+ uint32_t dpm_context_size;
+ void *dpm_context;
+ void *golden_dpm_context;
+ bool enable_umd_pstate;
+ enum amd_dpm_forced_level dpm_level;
+ enum amd_dpm_forced_level saved_dpm_level;
+ enum amd_dpm_forced_level requested_dpm_level;
+ struct smu_power_state *dpm_request_power_state;
+ struct smu_power_state *dpm_current_power_state;
+ struct mclock_latency_table *mclk_latency_table;
+};
+
+struct smu_power_context {
+ void *power_context;
+ uint32_t power_context_size;
+};
+
+
+#define SMU_FEATURE_MAX (64)
+struct smu_feature
+{
+ uint32_t feature_num;
+ DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
+ DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
+ DECLARE_BITMAP(enabled, SMU_FEATURE_MAX);
+ struct mutex mutex;
+};
+
+struct smu_clocks {
+ uint32_t engine_clock;
+ uint32_t memory_clock;
+ uint32_t bus_bandwidth;
+ uint32_t engine_clock_in_sr;
+ uint32_t dcef_clock;
+ uint32_t dcef_clock_in_sr;
+};
+
+#define MAX_REGULAR_DPM_NUM 16
+struct mclk_latency_entries {
+ uint32_t frequency;
+ uint32_t latency;
+};
+struct mclock_latency_table {
+ uint32_t count;
+ struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
+};
+
+#define WORKLOAD_POLICY_MAX 7
+struct smu_context
+{
+ struct amdgpu_device *adev;
+
+ const struct smu_funcs *funcs;
+ const struct pptable_funcs *ppt_funcs;
+ struct mutex mutex;
+ uint64_t pool_size;
+
+ struct smu_table_context smu_table;
+ struct smu_dpm_context smu_dpm;
+ struct smu_power_context smu_power;
+ struct smu_feature smu_feature;
+ struct amd_pp_display_configuration *display_config;
+
+ uint32_t pstate_sclk;
+ uint32_t pstate_mclk;
+
+ bool od_enabled;
+ uint32_t power_limit;
+ uint32_t default_power_limit;
+
+ bool support_power_containment;
+ bool disable_watermark;
+
+#define WATERMARKS_EXIST (1 << 0)
+#define WATERMARKS_LOADED (1 << 1)
+ uint32_t watermarks_bitmap;
+
+ uint32_t workload_mask;
+ uint32_t workload_prority[WORKLOAD_POLICY_MAX];
+ uint32_t workload_setting[WORKLOAD_POLICY_MAX];
+ uint32_t power_profile_mode;
+ uint32_t default_power_profile_mode;
+
+ uint32_t smc_if_version;
+};
+
+struct pptable_funcs {
+ int (*alloc_dpm_context)(struct smu_context *smu);
+ int (*store_powerplay_table)(struct smu_context *smu);
+ int (*check_powerplay_table)(struct smu_context *smu);
+ int (*append_powerplay_table)(struct smu_context *smu);
+ int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index);
+ int (*run_afll_btc)(struct smu_context *smu);
+ int (*get_unallowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
+ int (*set_default_dpm_table)(struct smu_context *smu);
+ int (*set_power_state)(struct smu_context *smu);
+ int (*populate_umd_state_clk)(struct smu_context *smu);
+ int (*print_clk_levels)(struct smu_context *smu, enum pp_clock_type type, char *buf);
+ int (*force_clk_levels)(struct smu_context *smu, enum pp_clock_type type, uint32_t mask);
+ int (*set_default_od8_settings)(struct smu_context *smu);
+ int (*update_specified_od8_value)(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value);
+ int (*get_od_percentage)(struct smu_context *smu, enum pp_clock_type type);
+ int (*set_od_percentage)(struct smu_context *smu,
+ enum pp_clock_type type,
+ uint32_t value);
+ int (*od_edit_dpm_table)(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+ int (*get_clock_by_type_with_latency)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct
+ pp_clock_levels_with_latency
+ *clocks);
+ int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct
+ pp_clock_levels_with_voltage
+ *clocks);
+ int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+ int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+ enum amd_dpm_forced_level (*get_performance_level)(struct smu_context *smu);
+ int (*force_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
+ int (*pre_display_config_changed)(struct smu_context *smu);
+ int (*display_config_changed)(struct smu_context *smu);
+ int (*apply_clocks_adjust_rules)(struct smu_context *smu);
+ int (*notify_smc_dispaly_config)(struct smu_context *smu);
+ int (*force_dpm_limit_value)(struct smu_context *smu, bool highest);
+ int (*unforce_dpm_levels)(struct smu_context *smu);
+ int (*upload_dpm_level)(struct smu_context *smu, bool max,
+ uint32_t feature_mask);
+ int (*get_profiling_clk_mask)(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask);
+ int (*set_cpu_power_state)(struct smu_context *smu);
+};
+
+struct smu_funcs
+{
+ int (*init_microcode)(struct smu_context *smu);
+ int (*init_smc_tables)(struct smu_context *smu);
+ int (*fini_smc_tables)(struct smu_context *smu);
+ int (*init_power)(struct smu_context *smu);
+ int (*fini_power)(struct smu_context *smu);
+ int (*load_microcode)(struct smu_context *smu);
+ int (*check_fw_status)(struct smu_context *smu);
+ int (*read_pptable_from_vbios)(struct smu_context *smu);
+ int (*get_vbios_bootup_values)(struct smu_context *smu);
+ int (*get_clk_info_from_vbios)(struct smu_context *smu);
+ int (*check_pptable)(struct smu_context *smu);
+ int (*parse_pptable)(struct smu_context *smu);
+ int (*populate_smc_pptable)(struct smu_context *smu);
+ int (*check_fw_version)(struct smu_context *smu);
+ int (*write_pptable)(struct smu_context *smu);
+ int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+ int (*set_tool_table_location)(struct smu_context *smu);
+ int (*notify_memory_pool_location)(struct smu_context *smu);
+ int (*write_watermarks_table)(struct smu_context *smu);
+ int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
+ int (*system_features_control)(struct smu_context *smu, bool en);
+ int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
+ int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param);
+ int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
+ int (*init_display)(struct smu_context *smu);
+ int (*set_allowed_mask)(struct smu_context *smu);
+ int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ bool (*is_dpm_running)(struct smu_context *smu);
+ int (*update_feature_enable_state)(struct smu_context *smu, uint32_t feature_id, bool enabled);
+ int (*notify_display_change)(struct smu_context *smu);
+ int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool def);
+ int (*set_power_limit)(struct smu_context *smu, uint32_t n);
+ int (*get_current_clk_freq)(struct smu_context *smu, uint32_t clk_id, uint32_t *value);
+ int (*init_max_sustainable_clocks)(struct smu_context *smu);
+ int (*start_thermal_control)(struct smu_context *smu);
+ int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+ int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
+ int (*set_active_display_count)(struct smu_context *smu, uint32_t count);
+ int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time,
+ bool cc6_disable, bool pstate_disable,
+ bool pstate_switch_disable);
+ int (*get_clock_by_type)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+ int (*get_max_high_clocks)(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+ int (*display_clock_voltage_request)(struct smu_context *smu, struct
+ pp_display_clock_request
+ *clock_req);
+ int (*get_dal_power_level)(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+ int (*get_perf_level)(struct smu_context *smu,
+ enum smu_perf_level_designation designation,
+ struct smu_performance_level *level);
+ int (*get_current_shallow_sleep_clocks)(struct smu_context *smu,
+ struct smu_clock_info *clocks);
+ int (*notify_smu_enable_pwe)(struct smu_context *smu);
+ int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
+ int (*set_od8_default_settings)(struct smu_context *smu,
+ bool initialize);
+ int (*conv_power_profile_to_pplib_workload)(int power_profile);
+ int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+ int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+ int (*update_od8_settings)(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value);
+ int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable);
+ int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable);
+ uint32_t (*get_sclk)(struct smu_context *smu, bool low);
+ uint32_t (*get_mclk)(struct smu_context *smu, bool low);
+ int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed);
+ uint32_t (*get_fan_control_mode)(struct smu_context *smu);
+ int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
+ int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+ int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
+ int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
+ int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
+
+};
+
+#define smu_init_microcode(smu) \
+ ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0)
+#define smu_init_smc_tables(smu) \
+ ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0)
+#define smu_fini_smc_tables(smu) \
+ ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0)
+#define smu_init_power(smu) \
+ ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0)
+#define smu_fini_power(smu) \
+ ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0)
+#define smu_load_microcode(smu) \
+ ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0)
+#define smu_check_fw_status(smu) \
+ ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
+#define smu_read_pptable_from_vbios(smu) \
+ ((smu)->funcs->read_pptable_from_vbios ? (smu)->funcs->read_pptable_from_vbios((smu)) : 0)
+#define smu_get_vbios_bootup_values(smu) \
+ ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
+#define smu_get_clk_info_from_vbios(smu) \
+ ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0)
+#define smu_check_pptable(smu) \
+ ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0)
+#define smu_parse_pptable(smu) \
+ ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0)
+#define smu_populate_smc_pptable(smu) \
+ ((smu)->funcs->populate_smc_pptable ? (smu)->funcs->populate_smc_pptable((smu)) : 0)
+#define smu_check_fw_version(smu) \
+ ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0)
+#define smu_write_pptable(smu) \
+ ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0)
+#define smu_set_min_dcef_deep_sleep(smu) \
+ ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_tool_table_location(smu) \
+ ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0)
+#define smu_notify_memory_pool_location(smu) \
+ ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0)
+#define smu_write_watermarks_table(smu) \
+ ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0)
+#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
+ ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
+#define smu_system_features_control(smu, en) \
+ ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0)
+#define smu_init_max_sustainable_clocks(smu) \
+ ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
+#define smu_set_od8_default_settings(smu, initialize) \
+ ((smu)->funcs->set_od8_default_settings ? (smu)->funcs->set_od8_default_settings((smu), (initialize)) : 0)
+#define smu_update_od8_settings(smu, index, value) \
+ ((smu)->funcs->update_od8_settings ? (smu)->funcs->update_od8_settings((smu), (index), (value)) : 0)
+#define smu_get_current_rpm(smu, speed) \
+ ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0)
+#define smu_set_fan_speed_rpm(smu, speed) \
+ ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
+#define smu_send_smc_msg(smu, msg) \
+ ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0)
+#define smu_send_smc_msg_with_param(smu, msg, param) \
+ ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
+#define smu_read_smc_arg(smu, arg) \
+ ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0)
+#define smu_alloc_dpm_context(smu) \
+ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
+#define smu_init_display(smu) \
+ ((smu)->funcs->init_display ? (smu)->funcs->init_display((smu)) : 0)
+#define smu_feature_set_allowed_mask(smu) \
+ ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0)
+#define smu_feature_get_enabled_mask(smu, mask, num) \
+ ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
+#define smu_is_dpm_running(smu) \
+ ((smu)->funcs->is_dpm_running ? (smu)->funcs->is_dpm_running((smu)) : 0)
+#define smu_feature_update_enable_state(smu, feature_id, enabled) \
+ ((smu)->funcs->update_feature_enable_state? (smu)->funcs->update_feature_enable_state((smu), (feature_id), (enabled)) : 0)
+#define smu_notify_display_change(smu) \
+ ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0)
+#define smu_store_powerplay_table(smu) \
+ ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
+#define smu_check_powerplay_table(smu) \
+ ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
+#define smu_append_powerplay_table(smu) \
+ ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
+#define smu_set_default_dpm_table(smu) \
+ ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
+#define smu_populate_umd_state_clk(smu) \
+ ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
+#define smu_set_default_od8_settings(smu) \
+ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
+#define smu_update_specified_od8_value(smu, index, value) \
+ ((smu)->ppt_funcs->update_specified_od8_value ? (smu)->ppt_funcs->update_specified_od8_value((smu), (index), (value)) : 0)
+#define smu_get_power_limit(smu, limit, def) \
+ ((smu)->funcs->get_power_limit ? (smu)->funcs->get_power_limit((smu), (limit), (def)) : 0)
+#define smu_set_power_limit(smu, limit) \
+ ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
+#define smu_get_current_clk_freq(smu, clk_id, value) \
+ ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
+#define smu_print_clk_levels(smu, type, buf) \
+ ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (type), (buf)) : 0)
+#define smu_force_clk_levels(smu, type, level) \
+ ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (type), (level)) : 0)
+#define smu_get_od_percentage(smu, type) \
+ ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
+#define smu_set_od_percentage(smu, type, value) \
+ ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0)
+#define smu_od_edit_dpm_table(smu, type, input, size) \
+ ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0)
+#define smu_start_thermal_control(smu) \
+ ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
+#define smu_read_sensor(smu, sensor, data, size) \
+ ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
+#define smu_get_power_profile_mode(smu, buf) \
+ ((smu)->funcs->get_power_profile_mode ? (smu)->funcs->get_power_profile_mode((smu), buf) : 0)
+#define smu_set_power_profile_mode(smu, param, param_size) \
+ ((smu)->funcs->set_power_profile_mode ? (smu)->funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
+#define smu_get_performance_level(smu) \
+ ((smu)->ppt_funcs->get_performance_level ? (smu)->ppt_funcs->get_performance_level((smu)) : 0)
+#define smu_force_performance_level(smu, level) \
+ ((smu)->ppt_funcs->force_performance_level ? (smu)->ppt_funcs->force_performance_level((smu), (level)) : 0)
+#define smu_pre_display_config_changed(smu) \
+ ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
+#define smu_display_config_changed(smu) \
+ ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
+#define smu_apply_clocks_adjust_rules(smu) \
+ ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
+#define smu_notify_smc_dispaly_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_force_dpm_limit_value(smu, highest) \
+ ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
+#define smu_unforce_dpm_levels(smu) \
+ ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
+#define smu_upload_dpm_level(smu, max, feature_mask) \
+ ((smu)->ppt_funcs->upload_dpm_level ? (smu)->ppt_funcs->upload_dpm_level((smu), (max), (feature_mask)) : 0)
+#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
+ ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
+#define smu_set_cpu_power_state(smu) \
+ ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
+#define smu_get_fan_control_mode(smu) \
+ ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0)
+#define smu_set_fan_control_mode(smu, value) \
+ ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0)
+#define smu_get_fan_speed_percent(smu, speed) \
+ ((smu)->funcs->get_fan_speed_percent ? (smu)->funcs->get_fan_speed_percent((smu), (speed)) : 0)
+#define smu_set_fan_speed_percent(smu, speed) \
+ ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
+
+#define smu_msg_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_run_afll_btc(smu) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
+#define smu_get_unallowed_feature_mask(smu, feature_mask, num) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_unallowed_feature_mask? (smu)->ppt_funcs->get_unallowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
+#define smu_set_deep_sleep_dcefclk(smu, clk) \
+ ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0)
+#define smu_set_active_display_count(smu, count) \
+ ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0)
+#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
+ ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
+#define smu_get_clock_by_type(smu, type, clocks) \
+ ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0)
+#define smu_get_max_high_clocks(smu, clocks) \
+ ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0)
+#define smu_get_clock_by_type_with_latency(smu, type, clocks) \
+ ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (type), (clocks)) : 0)
+#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \
+ ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
+#define smu_display_clock_voltage_request(smu, clock_req) \
+ ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0)
+#define smu_get_dal_power_level(smu, clocks) \
+ ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
+#define smu_get_perf_level(smu, designation, level) \
+ ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0)
+#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
+ ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
+#define smu_notify_smu_enable_pwe(smu) \
+ ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
+#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \
+ ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0)
+#define smu_dpm_set_uvd_enable(smu, enable) \
+ ((smu)->funcs->dpm_set_uvd_enable ? (smu)->funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
+#define smu_dpm_set_vce_enable(smu, enable) \
+ ((smu)->funcs->dpm_set_vce_enable ? (smu)->funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+#define smu_get_sclk(smu, low) \
+ ((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0)
+#define smu_get_mclk(smu, low) \
+ ((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0)
+#define smu_set_xgmi_pstate(smu, pstate) \
+ ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
+
+
+extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
+ uint16_t *size, uint8_t *frev, uint8_t *crev,
+ uint8_t **addr);
+
+extern const struct amd_ip_funcs smu_ip_funcs;
+
+extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
+extern int smu_feature_init_dpm(struct smu_context *smu);
+
+extern int smu_feature_is_enabled(struct smu_context *smu, int feature_id);
+extern int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable);
+extern int smu_feature_is_supported(struct smu_context *smu, int feature_id);
+extern int smu_feature_set_supported(struct smu_context *smu, int feature_id, bool enable);
+
+int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+ void *table_data, bool drv2smu);
+#define smu_update_table(smu, table_id, table_data, drv2smu) \
+ smu_update_table_with_arg((smu), (table_id), 0, (table_data), (drv2smu))
+
+bool is_support_sw_smu(struct amdgpu_device *adev);
+int smu_reset(struct smu_context *smu);
+int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+int smu_sys_get_pp_table(struct smu_context *smu, void **table);
+int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
+int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
+
+/* smu to display interface */
+extern int smu_display_configuration_change(struct smu_context *smu, const
+ struct amd_pp_display_configuration
+ *display_config);
+extern int smu_get_current_clocks(struct smu_context *smu,
+ struct amd_pp_clock_info *clocks);
+extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
+extern int smu_handle_task(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum amd_pp_task task_id);
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index a2991fa2e6f8..90879e4092a3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -85,7 +85,6 @@
#define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36
#define PPSMC_Message_Count 0x37
-
typedef uint16_t PPSMC_Result;
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu10.h b/drivers/gpu/drm/amd/powerplay/inc/smu10.h
index 9e837a5014c5..b96520528240 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu10.h
@@ -136,12 +136,14 @@
#define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT)
/* Workload bits */
-#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
-#define WORKLOAD_PPLIB_VIDEO_BIT 2
-#define WORKLOAD_PPLIB_VR_BIT 3
-#define WORKLOAD_PPLIB_COMPUTE_BIT 4
-#define WORKLOAD_PPLIB_CUSTOM_BIT 5
-#define WORKLOAD_PPLIB_COUNT 6
+#define WORKLOAD_DEFAULT_BIT 0
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
+#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
+#define WORKLOAD_PPLIB_VIDEO_BIT 3
+#define WORKLOAD_PPLIB_VR_BIT 4
+#define WORKLOAD_PPLIB_COMPUTE_BIT 5
+#define WORKLOAD_PPLIB_CUSTOM_BIT 6
+#define WORKLOAD_PPLIB_COUNT 7
typedef struct {
/* MP1_EXT_SCRATCH0 */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 63d5cf691549..195c4ae67058 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -99,7 +99,7 @@
#define FEATURE_DS_MP1CLK_BIT 30
#define FEATURE_DS_MP0CLK_BIT 31
#define FEATURE_XGMI_BIT 32
-#define FEATURE_SPARE_33_BIT 33
+#define FEATURE_ECC_BIT 33
#define FEATURE_SPARE_34_BIT 34
#define FEATURE_SPARE_35_BIT 35
#define FEATURE_SPARE_36_BIT 36
@@ -165,7 +165,8 @@
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
-#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT )
+#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT )
+#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT )
#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
new file mode 100644
index 000000000000..aa8d81f4111e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V11_0_H__
+#define __SMU_V11_0_H__
+
+#include "amdgpu_smu.h"
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+/* address block */
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
+#define smnMP0_FW_INTF 0x30101c0
+#define smnMP1_PUB_CTRL 0x3010b14
+
+struct smu_11_0_max_sustainable_clocks {
+ uint32_t display_clock;
+ uint32_t phy_clock;
+ uint32_t pixel_clock;
+ uint32_t uclock;
+ uint32_t dcef_clock;
+ uint32_t soc_clock;
+};
+
+struct smu_11_0_dpm_table {
+ uint32_t min; /* MHz */
+ uint32_t max; /* MHz */
+};
+
+struct smu_11_0_dpm_tables {
+ struct smu_11_0_dpm_table soc_table;
+ struct smu_11_0_dpm_table gfx_table;
+ struct smu_11_0_dpm_table uclk_table;
+ struct smu_11_0_dpm_table eclk_table;
+ struct smu_11_0_dpm_table vclk_table;
+ struct smu_11_0_dpm_table dclk_table;
+ struct smu_11_0_dpm_table dcef_table;
+ struct smu_11_0_dpm_table pixel_table;
+ struct smu_11_0_dpm_table display_table;
+ struct smu_11_0_dpm_table phy_table;
+ struct smu_11_0_dpm_table fclk_table;
+};
+
+struct smu_11_0_dpm_context {
+ struct smu_11_0_dpm_tables dpm_tables;
+ uint32_t workload_policy_mask;
+ uint32_t dcef_min_ds_clk;
+};
+
+enum smu_11_0_power_state {
+ SMU_11_0_POWER_STATE__D0 = 0,
+ SMU_11_0_POWER_STATE__D1,
+ SMU_11_0_POWER_STATE__D3, /* Sleep*/
+ SMU_11_0_POWER_STATE__D4, /* Hibernate*/
+ SMU_11_0_POWER_STATE__D5, /* Power off*/
+};
+
+struct smu_11_0_power_context {
+ uint32_t power_source;
+ uint8_t in_power_limit_boost_mode;
+ enum smu_11_0_power_state power_state;
+};
+
+void smu_v11_0_set_smu_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
new file mode 100644
index 000000000000..f466f624ad32
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_V11_0_PPSMC_H
+#define SMU_V11_0_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+// BASIC
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
+#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
+#define PPSMC_MSG_EnableAllSmuFeatures 0x6
+#define PPSMC_MSG_DisableAllSmuFeatures 0x7
+#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
+#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
+#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
+#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xE
+#define PPSMC_MSG_SetDriverDramAddrLow 0xF
+#define PPSMC_MSG_SetToolsDramAddrHigh 0x10
+#define PPSMC_MSG_SetToolsDramAddrLow 0x11
+#define PPSMC_MSG_TransferTableSmu2Dram 0x12
+#define PPSMC_MSG_TransferTableDram2Smu 0x13
+#define PPSMC_MSG_UseDefaultPPTable 0x14
+#define PPSMC_MSG_UseBackupPPTable 0x15
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17
+
+//BACO/BAMACO/BOMACO
+#define PPSMC_MSG_EnterBaco 0x18
+#define PPSMC_MSG_ExitBaco 0x19
+
+//DPM
+#define PPSMC_MSG_SetSoftMinByFreq 0x1A
+#define PPSMC_MSG_SetSoftMaxByFreq 0x1B
+#define PPSMC_MSG_SetHardMinByFreq 0x1C
+#define PPSMC_MSG_SetHardMaxByFreq 0x1D
+#define PPSMC_MSG_GetMinDpmFreq 0x1E
+#define PPSMC_MSG_GetMaxDpmFreq 0x1F
+#define PPSMC_MSG_GetDpmFreqByIndex 0x20
+#define PPSMC_MSG_OverridePcieParameters 0x21
+#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x22
+#define PPSMC_MSG_SetWorkloadMask 0x23
+#define PPSMC_MSG_SetUclkFastSwitch 0x24
+#define PPSMC_MSG_GetAvfsVoltageByDpm 0x25
+#define PPSMC_MSG_SetVideoFps 0x26
+#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27
+
+//Power Gating
+#define PPSMC_MSG_AllowGfxOff 0x28
+#define PPSMC_MSG_DisallowGfxOff 0x29
+#define PPSMC_MSG_PowerUpVcn 0x2A
+#define PPSMC_MSG_PowerDownVcn 0x2B
+#define PPSMC_MSG_PowerUpJpeg 0x2C
+#define PPSMC_MSG_PowerDownJpeg 0x2D
+//reserve 0x2A to 0x2F for PG harvesting TBD
+
+//I2C Interface
+#define PPSMC_RequestI2cTransaction 0x30
+
+//Resets
+#define PPSMC_MSG_SoftReset 0x31 //FIXME Need confirmation from driver
+#define PPSMC_MSG_PrepareMp1ForUnload 0x32
+#define PPSMC_MSG_PrepareMp1ForReset 0x33
+#define PPSMC_MSG_PrepareMp1ForShutdown 0x34
+
+//ACDC Power Source
+#define PPSMC_MSG_SetPptLimit 0x35
+#define PPSMC_MSG_GetPptLimit 0x36
+#define PPSMC_MSG_ReenableAcDcInterrupt 0x37
+#define PPSMC_MSG_NotifyPowerSource 0x38
+//#define PPSMC_MSG_GfxDeviceDriverReset 0x39 //FIXME mode1 and 2 resets will go directly go PSP
+
+//BTC
+#define PPSMC_MSG_RunBtc 0x3A
+
+//Debug
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x3B
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x3C
+#define PPSMC_MSG_DramLogSetDramSize 0x3D
+#define PPSMC_MSG_GetDebugData 0x3E
+
+//Others
+#define PPSMC_MSG_ConfigureGfxDidt 0x3F
+#define PPSMC_MSG_NumOfDisplays 0x40
+
+#define PPSMC_MSG_SetMemoryChannelConfig 0x41
+#define PPSMC_MSG_SetGeminiMode 0x42
+#define PPSMC_MSG_SetGeminiApertureHigh 0x43
+#define PPSMC_MSG_SetGeminiApertureLow 0x44
+
+#define PPSMC_Message_Count 0x45
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_Msg;
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
new file mode 100644
index 000000000000..92c65b80bde2
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef SMU_11_0_PPTABLE_H
+#define SMU_11_0_PPTABLE_H
+
+
+#define SMU_11_0_TABLE_FORMAT_REVISION 12
+
+//// POWERPLAYTABLE::ulPlatformCaps
+#define SMU_11_0_PP_PLATFORM_CAP_POWERPLAY 0x1
+#define SMU_11_0_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2
+#define SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC 0x4
+#define SMU_11_0_PP_PLATFORM_CAP_BACO 0x8
+#define SMU_11_0_PP_PLATFORM_CAP_MACO 0x10
+#define SMU_11_0_PP_PLATFORM_CAP_SHADOWPSTATE 0x20
+
+// SMU_11_0_PP_THERMALCONTROLLER - Thermal Controller Type
+#define SMU_11_0_PP_THERMALCONTROLLER_NONE 0
+
+#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
+#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
+
+enum SMU_11_0_ODFEATURE_ID {
+ SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
+ SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
+ SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
+ SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
+ SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
+ SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
+ SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
+ SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
+ SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
+ SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
+ SMU_11_0_ODFEATURE_COUNT = 14,
+};
+#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
+
+enum SMU_11_0_ODSETTING_ID {
+ SMU_11_0_ODSETTING_GFXCLKFMAX = 0,
+ SMU_11_0_ODSETTING_GFXCLKFMIN,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+ SMU_11_0_ODSETTING_UCLKFMAX,
+ SMU_11_0_ODSETTING_POWERPERCENTAGE,
+ SMU_11_0_ODSETTING_FANRPMMIN,
+ SMU_11_0_ODSETTING_FANRPMACOUSTICLIMIT,
+ SMU_11_0_ODSETTING_FANTARGETTEMPERATURE,
+ SMU_11_0_ODSETTING_OPERATINGTEMPMAX,
+ SMU_11_0_ODSETTING_ACTIMING,
+ SMU_11_0_ODSETTING_FAN_ZERO_RPM_CONTROL,
+ SMU_11_0_ODSETTING_AUTOUVENGINE,
+ SMU_11_0_ODSETTING_AUTOOCENGINE,
+ SMU_11_0_ODSETTING_AUTOOCMEMORY,
+ SMU_11_0_ODSETTING_COUNT,
+};
+#define SMU_11_0_MAX_ODSETTING 32 //Maximum Number of ODSettings
+
+struct smu_11_0_overdrive_table
+{
+ uint8_t revision; //Revision = SMU_11_0_PP_OVERDRIVE_VERSION
+ uint8_t reserve[3]; //Zero filled field reserved for future use
+ uint32_t feature_count; //Total number of supported features
+ uint32_t setting_count; //Total number of supported settings
+ uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags
+ uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings
+ uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings
+} __attribute__((packed));
+
+enum SMU_11_0_PPCLOCK_ID {
+ SMU_11_0_PPCLOCK_GFXCLK = 0,
+ SMU_11_0_PPCLOCK_VCLK,
+ SMU_11_0_PPCLOCK_DCLK,
+ SMU_11_0_PPCLOCK_ECLK,
+ SMU_11_0_PPCLOCK_SOCCLK,
+ SMU_11_0_PPCLOCK_UCLK,
+ SMU_11_0_PPCLOCK_DCEFCLK,
+ SMU_11_0_PPCLOCK_DISPCLK,
+ SMU_11_0_PPCLOCK_PIXCLK,
+ SMU_11_0_PPCLOCK_PHYCLK,
+ SMU_11_0_PPCLOCK_COUNT,
+};
+#define SMU_11_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks
+
+struct smu_11_0_power_saving_clock_table
+{
+ uint8_t revision; //Revision = SMU_11_0_PP_POWERSAVINGCLOCK_VERSION
+ uint8_t reserve[3]; //Zero filled field reserved for future use
+ uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
+ uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz
+ uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
+} __attribute__((packed));
+
+struct smu_11_0_powerplay_table
+{
+ struct atom_common_table_header header;
+ uint8_t table_revision;
+ uint32_t table_size; //Driver portion table size. The offset to smc_pptable including header size
+ uint32_t golden_pp_id;
+ uint32_t golden_revision;
+ uint16_t format_id;
+ uint32_t platform_caps; //POWERPLAYABLE::ulPlatformCaps
+
+ uint8_t thermal_controller_type; //one of SMU_11_0_PP_THERMALCONTROLLER
+
+ uint16_t small_power_limit1;
+ uint16_t small_power_limit2;
+ uint16_t boost_power_limit;
+ uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning.
+ uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning.
+ uint16_t software_shutdown_temp;
+
+ uint16_t reserve[6]; //Zero filled field reserved for future use
+
+ struct smu_11_0_power_saving_clock_table power_saving_clock;
+ struct smu_11_0_overdrive_table overdrive_table;
+
+ PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
+} __attribute__((packed));
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index 4f63a736ea0e..a0883038f3c3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -119,7 +119,8 @@
#define PPSMC_MSG_PrepareMp1ForShutdown 0x5A
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
-#define PPSMC_Message_Count 0x60
+#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60
+#define PPSMC_Message_Count 0x61
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
new file mode 100644
index 000000000000..92903a4cc4d8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -0,0 +1,1977 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v11_0.h"
+#include "smu11_driver_if.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "vega20_ppt.h"
+#include "pp_thermal.h"
+
+#include "asic_reg/thm/thm_11_0_2_offset.h"
+#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
+#include "asic_reg/mp/mp_9_0_offset.h"
+#include "asic_reg/mp/mp_9_0_sh_mask.h"
+#include "asic_reg/nbio/nbio_7_4_offset.h"
+#include "asic_reg/smuio/smuio_9_0_offset.h"
+#include "asic_reg/smuio/smuio_9_0_sh_mask.h"
+
+MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
+
+#define SMU11_TOOL_SIZE 0x19000
+#define SMU11_THERMAL_MINIMUM_ALERT_TEMP 0
+#define SMU11_THERMAL_MAXIMUM_ALERT_TEMP 255
+
+#define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
+#define SMU11_VOLTAGE_SCALE 4
+
+#define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
+ FEATURE_DPM_GFXCLK_MASK | \
+ FEATURE_DPM_UCLK_MASK | \
+ FEATURE_DPM_SOCCLK_MASK | \
+ FEATURE_DPM_UVD_MASK | \
+ FEATURE_DPM_VCE_MASK | \
+ FEATURE_DPM_MP0CLK_MASK | \
+ FEATURE_DPM_LINK_MASK | \
+ FEATURE_DPM_DCEFCLK_MASK)
+
+static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ return 0;
+}
+
+static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ return 0;
+}
+
+static int smu_v11_0_wait_for_response(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t cur_value, i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == adev->usec_timeout)
+ return -ETIME;
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+}
+
+static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_msg_get_index(smu, msg);
+ if (index < 0)
+ return index;
+
+ smu_v11_0_wait_for_response(smu);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_v11_0_wait_for_response(smu);
+
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x\n", index,
+ ret);
+
+ return ret;
+
+}
+
+static int
+smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param)
+{
+
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_msg_get_index(smu, msg);
+ if (index < 0)
+ return index;
+
+ ret = smu_v11_0_wait_for_response(smu);
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
+ index, ret, param);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+
+ smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_v11_0_wait_for_response(smu);
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
+ index, ret, param);
+
+ return ret;
+}
+
+static int smu_v11_0_init_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const char *chip_name;
+ char fw_name[30];
+ int err = 0;
+ const struct smc_firmware_header_v1_0 *hdr;
+ const struct common_firmware_header *header;
+ struct amdgpu_firmware_info *ucode = NULL;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ chip_name = "vega20";
+ break;
+ default:
+ BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->pm.fw);
+ if (err)
+ goto out;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
+ ucode->fw = adev->pm.fw;
+ header = (const struct common_firmware_header *)ucode->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+
+out:
+ if (err) {
+ DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ }
+ return err;
+}
+
+static int smu_v11_0_load_microcode(struct smu_context *smu)
+{
+ return 0;
+}
+
+static int smu_v11_0_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+static int smu_v11_0_check_fw_version(struct smu_context *smu)
+{
+ uint32_t smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
+ if (ret)
+ goto err;
+
+ ret = smu_read_smc_arg(smu, &smu_version);
+ if (ret)
+ goto err;
+
+ if (smu_version != smu->smc_if_version)
+ ret = -EINVAL;
+err:
+ return ret;
+}
+
+static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ void *table;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ powerplayinfo);
+
+ ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
+ (uint8_t **)&table);
+ if (ret)
+ return ret;
+
+ if (!smu->smu_table.power_play_table)
+ smu->smu_table.power_play_table = table;
+ if (!smu->smu_table.power_play_table_size)
+ smu->smu_table.power_play_table_size = size;
+
+ return 0;
+}
+
+static int smu_v11_0_init_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
+ return -EINVAL;
+
+ return smu_alloc_dpm_context(smu);
+}
+
+static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_dpm->dpm_context);
+ kfree(smu_dpm->golden_dpm_context);
+ kfree(smu_dpm->dpm_current_power_state);
+ kfree(smu_dpm->dpm_request_power_state);
+ smu_dpm->dpm_context = NULL;
+ smu_dpm->golden_dpm_context = NULL;
+ smu_dpm->dpm_context_size = 0;
+ smu_dpm->dpm_current_power_state = NULL;
+ smu_dpm->dpm_request_power_state = NULL;
+
+ return 0;
+}
+
+static int smu_v11_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = NULL;
+ int ret = 0;
+
+ if (smu_table->tables || smu_table->table_count != 0)
+ return -EINVAL;
+
+ tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
+ if (!tables)
+ return -ENOMEM;
+
+ smu_table->tables = tables;
+ smu_table->table_count = TABLE_COUNT;
+
+ SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF,
+ sizeof(DpmActivityMonitorCoeffInt_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+
+ ret = smu_v11_0_init_dpm_context(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ int ret = 0;
+
+ if (!smu_table->tables || smu_table->table_count == 0)
+ return -EINVAL;
+
+ kfree(smu_table->tables);
+ smu_table->tables = NULL;
+ smu_table->table_count = 0;
+
+ ret = smu_v11_0_fini_dpm_context(smu);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int smu_v11_0_init_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (smu_power->power_context || smu_power->power_context_size != 0)
+ return -EINVAL;
+
+ smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
+ GFP_KERNEL);
+ if (!smu_power->power_context)
+ return -ENOMEM;
+ smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
+
+ return 0;
+}
+
+static int smu_v11_0_fini_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (!smu_power->power_context || smu_power->power_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_power->power_context);
+ smu_power->power_context = NULL;
+ smu_power->power_context_size = 0;
+
+ return 0;
+}
+
+int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_3 *v_3_3;
+ struct atom_firmware_info_v3_1 *v_3_1;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ pr_err("unknown atom_firmware_info version! for smu11\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ break;
+ case 3:
+ default:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ }
+
+ return 0;
+}
+
+static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
+{
+ int ret, index;
+ struct amdgpu_device *adev = smu->adev;
+ struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
+
+ input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_ECLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_VCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_DCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ return 0;
+}
+
+static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+ uint64_t address;
+ uint32_t address_low, address_high;
+
+ if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
+ return ret;
+
+ address = (uintptr_t)memory_pool->cpu_addr;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSystemVirtualDramAddrHigh,
+ address_high);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSystemVirtualDramAddrLow,
+ address_low);
+ if (ret)
+ return ret;
+
+ address = memory_pool->mc_address;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+ address_high);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+ address_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+ (uint32_t)memory_pool->size);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v11_0_check_pptable(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_check_powerplay_table(smu);
+ return ret;
+}
+
+static int smu_v11_0_parse_pptable(struct smu_context *smu)
+{
+ int ret;
+
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ if (table_context->driver_pptable)
+ return -EINVAL;
+
+ table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
+
+ if (!table_context->driver_pptable)
+ return -ENOMEM;
+
+ ret = smu_store_powerplay_table(smu);
+ if (ret)
+ return -EINVAL;
+
+ ret = smu_append_powerplay_table(smu);
+
+ return ret;
+}
+
+static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_set_default_dpm_table(smu);
+
+ return ret;
+}
+
+static int smu_v11_0_write_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true);
+
+ return ret;
+}
+
+static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
+{
+ return smu_update_table(smu, TABLE_WATERMARKS,
+ smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true);
+}
+
+static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
+{
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMinDeepSleepDcefclk, clk);
+ if (ret)
+ pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
+
+ return ret;
+}
+
+static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ if (!table_context)
+ return -EINVAL;
+
+ return smu_set_deep_sleep_dcefclk(smu,
+ table_context->boot_values.dcefclk / 100);
+}
+
+static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
+
+ if (tool_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(tool_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrLow,
+ lower_32_bits(tool_table->mc_address));
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_init_display(struct smu_context *smu)
+{
+ int ret = 0;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ return ret;
+}
+
+static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
+{
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (feature_id >= 0 && feature_id < 31)
+ feature_low = (1 << feature_id);
+ else if (feature_id > 31 && feature_id < 63)
+ feature_high = (1 << feature_id);
+ else
+ return -EINVAL;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t feature_mask[2];
+
+ mutex_lock(&feature->mutex);
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
+ goto failed;
+
+ bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+ feature_mask[1]);
+ if (ret)
+ goto failed;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0]);
+ if (ret)
+ goto failed;
+
+failed:
+ mutex_unlock(&feature->mutex);
+ return ret;
+}
+
+static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ uint32_t feature_mask_high = 0, feature_mask_low = 0;
+ int ret = 0;
+
+ if (!feature_mask || num < 2)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_high);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_low);
+ if (ret)
+ return ret;
+
+ feature_mask[0] = feature_mask_low;
+ feature_mask[1] = feature_mask_high;
+
+ return ret;
+}
+
+static bool smu_v11_0_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t feature_mask[2];
+ unsigned long feature_enabled;
+ ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2);
+ feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
+ ((uint64_t)feature_mask[1] << 32));
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static int smu_v11_0_system_features_control(struct smu_context *smu,
+ bool en)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_mask[2];
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures));
+ if (ret)
+ return ret;
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+ feature->feature_num);
+
+ return ret;
+}
+
+static int smu_v11_0_notify_display_change(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
+
+ return ret;
+}
+
+static int
+smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
+ PPCLK_e clock_select)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
+ clock_select << 16);
+ if (ret) {
+ pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
+ return ret;
+ }
+
+ ret = smu_read_smc_arg(smu, clock);
+ if (ret)
+ return ret;
+
+ if (*clock != 0)
+ return 0;
+
+ /* if DC limit is zero, return AC limit */
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
+ clock_select << 16);
+ if (ret) {
+ pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
+ return ret;
+ }
+
+ ret = smu_read_smc_arg(smu, clock);
+
+ return ret;
+}
+
+static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
+{
+ struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
+ int ret = 0;
+
+ max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
+ GFP_KERNEL);
+ smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
+
+ max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
+ max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
+ max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
+ max_sustainable_clocks->display_clock = 0xFFFFFFFF;
+ max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
+ max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->uclock),
+ PPCLK_UCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max UCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->soc_clock),
+ PPCLK_SOCCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max SOCCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->dcef_clock),
+ PPCLK_DCEFCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max DCEFCLK from SMC!",
+ __func__);
+ return ret;
+ }
+
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->display_clock),
+ PPCLK_DISPCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max DISPCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->phy_clock),
+ PPCLK_PHYCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max PHYCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->pixel_clock),
+ PPCLK_PIXCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max PIXCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
+ max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
+
+ return 0;
+}
+
+static int smu_v11_0_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool get_default)
+{
+ int ret = 0;
+
+ if (get_default) {
+ mutex_lock(&smu->mutex);
+ *limit = smu->default_power_limit;
+ if (smu->od_enabled) {
+ *limit *= (100 + smu->smu_table.TDPODLimit);
+ *limit /= 100;
+ }
+ mutex_unlock(&smu->mutex);
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
+ POWER_SOURCE_AC << 16);
+ if (ret) {
+ pr_err("[%s] get PPT limit failed!", __func__);
+ return ret;
+ }
+ smu_read_smc_arg(smu, limit);
+ smu->power_limit = *limit;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+{
+ uint32_t max_power_limit;
+ int ret = 0;
+
+ if (n == 0)
+ n = smu->default_power_limit;
+
+ max_power_limit = smu->default_power_limit;
+
+ if (smu->od_enabled) {
+ max_power_limit *= (100 + smu->smu_table.TDPODLimit);
+ max_power_limit /= 100;
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT))
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+ if (ret) {
+ pr_err("[%s] Set power limit Failed!", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
+{
+ int ret = 0;
+ uint32_t freq;
+
+ if (clk_id >= PPCLK_COUNT || !value)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmClockFreq, (clk_id << 16));
+ if (ret)
+ return ret;
+
+ ret = smu_read_smc_arg(smu, &freq);
+ if (ret)
+ return ret;
+
+ freq *= 100;
+ *value = freq;
+
+ return ret;
+}
+
+static int smu_v11_0_get_thermal_range(struct smu_context *smu,
+ struct PP_TemperatureRange *range)
+{
+ memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
+
+ range->max = smu->smu_table.software_shutdown_temp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int smu_v11_0_set_thermal_range(struct smu_context *smu,
+ struct PP_TemperatureRange *range)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ uint32_t val;
+
+ if (low < range->min)
+ low = range->min;
+ if (high > range->max)
+ high = range->max;
+
+ if (low > high)
+ return -EINVAL;
+
+ val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+
+ return 0;
+}
+
+static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val = 0;
+
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
+
+ return 0;
+}
+
+static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
+{
+ int ret;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
+ (uint32_t)pptable->FanTargetTemperature);
+
+ return ret;
+}
+
+static int smu_v11_0_start_thermal_control(struct smu_context *smu)
+{
+ int ret = 0;
+ struct PP_TemperatureRange range;
+ struct amdgpu_device *adev = smu->adev;
+
+ smu_v11_0_get_thermal_range(smu, &range);
+
+ if (smu->smu_table.thermal_controller_type) {
+ ret = smu_v11_0_set_thermal_range(smu, &range);
+ if (ret)
+ return ret;
+
+ ret = smu_v11_0_enable_thermal_alert(smu);
+ if (ret)
+ return ret;
+ ret = smu_v11_0_set_thermal_fan_table(smu);
+ if (ret)
+ return ret;
+ }
+
+ adev->pm.dpm.thermal.min_temp = range.min;
+ adev->pm.dpm.thermal.max_temp = range.max;
+
+ return ret;
+}
+
+static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
+ uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
+ if (ret)
+ return ret;
+
+ *value = metrics.AverageGfxActivity;
+
+ return 0;
+}
+
+static int smu_v11_0_thermal_get_temperature(struct smu_context *smu, uint32_t *value)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t temp = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
+ temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
+
+ temp = temp & 0x1ff;
+ temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ *value = temp;
+
+ return 0;
+}
+
+static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
+ if (ret)
+ return ret;
+
+ *value = metrics.CurrSocketPower << 8;
+
+ return 0;
+}
+
+static uint16_t convert_to_vddc(uint8_t vid)
+{
+ return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
+}
+
+static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t vdd = 0, val_vid = 0;
+
+ if (!value)
+ return -EINVAL;
+ val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
+ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
+ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
+
+ vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
+
+ *value = vdd;
+
+ return 0;
+
+}
+
+static int smu_v11_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ int ret = 0;
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v11_0_get_current_activity_percent(smu,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ ret = smu_v11_0_thermal_get_temperature(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
+ *(uint32_t *)data = 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
+ *(uint32_t *)data = pptable->FanMaximumRpm;
+ *size = 4;
+ break;
+ default:
+ ret = smu_common_read_sensor(smu, sensor, data, size);
+ break;
+ }
+
+ if (ret)
+ *size = 0;
+
+ return ret;
+}
+
+static int
+smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request
+ *clock_req)
+{
+ enum amd_pp_clock_type clk_type = clock_req->clock_type;
+ int ret = 0;
+ PPCLK_e clk_select = 0;
+ uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ switch (clk_type) {
+ case amd_pp_dcef_clock:
+ clk_select = PPCLK_DCEFCLK;
+ break;
+ case amd_pp_disp_clock:
+ clk_select = PPCLK_DISPCLK;
+ break;
+ case amd_pp_pixel_clock:
+ clk_select = PPCLK_PIXCLK;
+ break;
+ case amd_pp_phy_clock:
+ clk_select = PPCLK_PHYCLK;
+ break;
+ default:
+ pr_info("[%s] Invalid Clock Type!", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto failed;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ (clk_select << 16) | clk_freq);
+ }
+
+failed:
+ return ret;
+}
+
+static int smu_v11_0_set_watermarks_table(struct smu_context *smu,
+ Watermarks_t *table, struct
+ dm_pp_wm_sets_with_clock_ranges_soc15
+ *clock_ranges)
+{
+ int i;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[1][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[0][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+
+ return 0;
+}
+
+static int
+smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
+ dm_pp_wm_sets_with_clock_ranges_soc15
+ *clock_ranges)
+{
+ int ret = 0;
+ struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS];
+ Watermarks_t *table = watermarks->cpu_addr;
+
+ if (!smu->disable_watermark &&
+ smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ smu_v11_0_set_watermarks_table(smu, table, clock_ranges);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_get_clock_ranges(struct smu_context *smu,
+ uint32_t *clock,
+ PPCLK_e clock_select,
+ bool max)
+{
+ int ret;
+ *clock = 0;
+ if (max) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
+ (clock_select << 16));
+ if (ret) {
+ pr_err("[GetClockRanges] Failed to get max clock from SMC!\n");
+ return ret;
+ }
+ smu_read_smc_arg(smu, clock);
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq,
+ (clock_select << 16));
+ if (ret) {
+ pr_err("[GetClockRanges] Failed to get min clock from SMC!\n");
+ return ret;
+ }
+ smu_read_smc_arg(smu, clock);
+ }
+
+ return 0;
+}
+
+static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low)
+{
+ uint32_t gfx_clk;
+ int ret;
+
+ if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
+ pr_err("[GetSclks]: gfxclk dpm not enabled!\n");
+ return -EPERM;
+ }
+
+ if (low) {
+ ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false);
+ if (ret) {
+ pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n");
+ return ret;
+ }
+ } else {
+ ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true);
+ if (ret) {
+ pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n");
+ return ret;
+ }
+ }
+
+ return (gfx_clk * 100);
+}
+
+static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low)
+{
+ uint32_t mem_clk;
+ int ret;
+
+ if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ pr_err("[GetMclks]: memclk dpm not enabled!\n");
+ return -EPERM;
+ }
+
+ if (low) {
+ ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false);
+ if (ret) {
+ pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n");
+ return ret;
+ }
+ } else {
+ ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true);
+ if (ret) {
+ pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n");
+ return ret;
+ }
+ }
+
+ return (mem_clk * 100);
+}
+
+static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
+ bool initialize)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+ if (initialize) {
+ if (table_context->overdrive_table)
+ return -EINVAL;
+
+ table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
+
+ if (!table_context->overdrive_table)
+ return -ENOMEM;
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export over drive table!\n");
+ return ret;
+ }
+
+ smu_set_default_od8_settings(smu);
+ }
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import over drive table!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
+{
+ int pplib_workload = 0;
+
+ switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
+ pplib_workload = WORKLOAD_DEFAULT_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_POWERSAVING:
+ pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
+ break;
+ }
+
+ return pplib_workload;
+}
+
+static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
+{
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+ uint32_t i, size = 0;
+ uint16_t workload_type = 0;
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ static const char *title[] = {
+ "PROFILE_INDEX(NAME)",
+ "CLOCK_TYPE(NAME)",
+ "FPS",
+ "UseRlcBusy",
+ "MinActiveFreqType",
+ "MinActiveFreq",
+ "BoosterFreqType",
+ "BoosterFreq",
+ "PD_Data_limit_c",
+ "PD_Data_error_coeff",
+ "PD_Data_error_rate_coeff"};
+ int result = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ title[0], title[1], title[2], title[3], title[4], title[5],
+ title[6], title[7], title[8], title[9], title[10]);
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
+ result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
+ workload_type, &activity_monitor, false);
+ if (result) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return result;
+ }
+
+ size += sprintf(buf + size, "%2d %14s%s:\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 0,
+ "GFXCLK",
+ activity_monitor.Gfx_FPS,
+ activity_monitor.Gfx_UseRlcBusy,
+ activity_monitor.Gfx_MinActiveFreqType,
+ activity_monitor.Gfx_MinActiveFreq,
+ activity_monitor.Gfx_BoosterFreqType,
+ activity_monitor.Gfx_BoosterFreq,
+ activity_monitor.Gfx_PD_Data_limit_c,
+ activity_monitor.Gfx_PD_Data_error_coeff,
+ activity_monitor.Gfx_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 1,
+ "SOCCLK",
+ activity_monitor.Soc_FPS,
+ activity_monitor.Soc_UseRlcBusy,
+ activity_monitor.Soc_MinActiveFreqType,
+ activity_monitor.Soc_MinActiveFreq,
+ activity_monitor.Soc_BoosterFreqType,
+ activity_monitor.Soc_BoosterFreq,
+ activity_monitor.Soc_PD_Data_limit_c,
+ activity_monitor.Soc_PD_Data_error_coeff,
+ activity_monitor.Soc_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 2,
+ "UCLK",
+ activity_monitor.Mem_FPS,
+ activity_monitor.Mem_UseRlcBusy,
+ activity_monitor.Mem_MinActiveFreqType,
+ activity_monitor.Mem_MinActiveFreq,
+ activity_monitor.Mem_BoosterFreqType,
+ activity_monitor.Mem_BoosterFreq,
+ activity_monitor.Mem_PD_Data_limit_c,
+ activity_monitor.Mem_PD_Data_error_coeff,
+ activity_monitor.Mem_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 3,
+ "FCLK",
+ activity_monitor.Fclk_FPS,
+ activity_monitor.Fclk_UseRlcBusy,
+ activity_monitor.Fclk_MinActiveFreqType,
+ activity_monitor.Fclk_MinActiveFreq,
+ activity_monitor.Fclk_BoosterFreqType,
+ activity_monitor.Fclk_BoosterFreq,
+ activity_monitor.Fclk_PD_Data_limit_c,
+ activity_monitor.Fclk_PD_Data_error_coeff,
+ activity_monitor.Fclk_PD_Data_error_rate_coeff);
+ }
+
+ return size;
+}
+
+static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+ int workload_type = 0, ret = 0;
+
+ smu->power_profile_mode = input[size];
+
+ if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false);
+ if (ret) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ switch (input[0]) {
+ case 0: /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[1];
+ activity_monitor.Gfx_UseRlcBusy = input[2];
+ activity_monitor.Gfx_MinActiveFreqType = input[3];
+ activity_monitor.Gfx_MinActiveFreq = input[4];
+ activity_monitor.Gfx_BoosterFreqType = input[5];
+ activity_monitor.Gfx_BoosterFreq = input[6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 1: /* Socclk */
+ activity_monitor.Soc_FPS = input[1];
+ activity_monitor.Soc_UseRlcBusy = input[2];
+ activity_monitor.Soc_MinActiveFreqType = input[3];
+ activity_monitor.Soc_MinActiveFreq = input[4];
+ activity_monitor.Soc_BoosterFreqType = input[5];
+ activity_monitor.Soc_BoosterFreq = input[6];
+ activity_monitor.Soc_PD_Data_limit_c = input[7];
+ activity_monitor.Soc_PD_Data_error_coeff = input[8];
+ activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 2: /* Uclk */
+ activity_monitor.Mem_FPS = input[1];
+ activity_monitor.Mem_UseRlcBusy = input[2];
+ activity_monitor.Mem_MinActiveFreqType = input[3];
+ activity_monitor.Mem_MinActiveFreq = input[4];
+ activity_monitor.Mem_BoosterFreqType = input[5];
+ activity_monitor.Mem_BoosterFreq = input[6];
+ activity_monitor.Mem_PD_Data_limit_c = input[7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 3: /* Fclk */
+ activity_monitor.Fclk_FPS = input[1];
+ activity_monitor.Fclk_UseRlcBusy = input[2];
+ activity_monitor.Fclk_MinActiveFreqType = input[3];
+ activity_monitor.Fclk_MinActiveFreq = input[4];
+ activity_monitor.Fclk_BoosterFreqType = input[5];
+ activity_monitor.Fclk_BoosterFreq = input[6];
+ activity_monitor.Fclk_PD_Data_limit_c = input[7];
+ activity_monitor.Fclk_PD_Data_error_coeff = input[8];
+ activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
+ break;
+ }
+
+ ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true);
+ if (ret) {
+ pr_err("[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type =
+ smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode);
+ smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+
+ return ret;
+}
+
+static int smu_v11_0_update_od8_settings(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE,
+ table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export over drive table!\n");
+ return ret;
+ }
+
+ smu_update_specified_od8_value(smu, index, value);
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE,
+ table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import over drive table!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
+ return 0;
+
+ if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
+ return 0;
+
+ return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
+}
+
+static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
+{
+ if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
+ return 0;
+
+ if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
+ return 0;
+
+ return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
+}
+
+static int smu_v11_0_get_current_rpm(struct smu_context *smu,
+ uint32_t *current_rpm)
+{
+ int ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+
+ if (ret) {
+ pr_err("Attempt to get current RPM from SMC Failed!\n");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, current_rpm);
+
+ return 0;
+}
+
+static uint32_t
+smu_v11_0_get_fan_control_mode(struct smu_context *smu)
+{
+ if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT))
+ return AMD_FAN_CTRL_MANUAL;
+ else
+ return AMD_FAN_CTRL_AUTO;
+}
+
+static int
+smu_v11_0_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret = 0;
+ uint32_t percent = 0;
+ uint32_t current_rpm;
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ ret = smu_v11_0_get_current_rpm(smu, &current_rpm);
+ percent = current_rpm * 100 / pptable->FanMaximumRpm;
+ *speed = percent > 100 ? 100 : percent;
+
+ return ret;
+}
+
+static int
+smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
+{
+ int ret = 0;
+
+ if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT))
+ return 0;
+
+ ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start);
+ if (ret)
+ pr_err("[%s]%s smc FAN CONTROL feature failed!",
+ __func__, (start ? "Start" : "Stop"));
+
+ return ret;
+}
+
+static int
+smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
+ CG_FDO_CTRL2, TMIN, 0));
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
+ CG_FDO_CTRL2, FDO_PWM_MODE, mode));
+
+ return 0;
+}
+
+static int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100;
+ uint32_t duty;
+ uint64_t tmp64;
+ bool stop = 0;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (smu_v11_0_smc_fan_control(smu, stop))
+ return -EINVAL;
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
+static int
+smu_v11_0_set_fan_control_mode(struct smu_context *smu,
+ uint32_t mode)
+{
+ int ret = 0;
+ bool start = 1;
+ bool stop = 0;
+
+ switch (mode) {
+ case AMD_FAN_CTRL_NONE:
+ ret = smu_v11_0_set_fan_speed_percent(smu, 100);
+ break;
+ case AMD_FAN_CTRL_MANUAL:
+ ret = smu_v11_0_smc_fan_control(smu, stop);
+ break;
+ case AMD_FAN_CTRL_AUTO:
+ ret = smu_v11_0_smc_fan_control(smu, start);
+ break;
+ default:
+ break;
+ }
+
+ if (ret) {
+ pr_err("[%s]Set fan control mode failed!", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+ uint32_t tach_period, crystal_clock_freq;
+ bool stop = 0;
+
+ if (!speed)
+ return -EINVAL;
+
+ mutex_lock(&(smu->mutex));
+ ret = smu_v11_0_smc_fan_control(smu, stop);
+ if (ret)
+ goto set_fan_speed_rpm_failed;
+
+ crystal_clock_freq = amdgpu_asic_get_xclk(adev);
+ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
+ CG_TACH_CTRL, TARGET_PERIOD,
+ tach_period));
+
+ ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
+
+set_fan_speed_rpm_failed:
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate)
+{
+ int ret = 0;
+ mutex_lock(&(smu->mutex));
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetXgmiMode,
+ pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static const struct smu_funcs smu_v11_0_funcs = {
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
+ .write_pptable = smu_v11_0_write_pptable,
+ .write_watermarks_table = smu_v11_0_write_watermarks_table,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .init_display = smu_v11_0_init_display,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .is_dpm_running = smu_v11_0_is_dpm_running,
+ .system_features_control = smu_v11_0_system_features_control,
+ .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .get_power_limit = smu_v11_0_get_power_limit,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .read_sensor = smu_v11_0_read_sensor,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
+ .get_sclk = smu_v11_0_dpm_get_sclk,
+ .get_mclk = smu_v11_0_dpm_get_mclk,
+ .set_od8_default_settings = smu_v11_0_set_od8_default_settings,
+ .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
+ .get_power_profile_mode = smu_v11_0_get_power_profile_mode,
+ .set_power_profile_mode = smu_v11_0_set_power_profile_mode,
+ .update_od8_settings = smu_v11_0_update_od8_settings,
+ .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable,
+ .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable,
+ .get_current_rpm = smu_v11_0_get_current_rpm,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+};
+
+void smu_v11_0_set_smu_funcs(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->funcs = &smu_v11_0_funcs;
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ vega20_set_ppt_funcs(smu);
+ break;
+ default:
+ pr_warn("Unknown asic for smu11\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index d111dd4e03d7..6d11076a79ba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -212,6 +212,10 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
adev->pm.fw_version = hwmgr->smu_version >> 8;
+ if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
+ adev->pm.fw_version < 0x1e45)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
if (smu10_verify_smc_interface(hwmgr))
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index ba00744c3413..f301a73f6df1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -367,6 +367,26 @@ static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
return ret;
}
+int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_smumgr *priv =
+ (struct vega20_smumgr *)(hwmgr->smu_backend);
+ int ret = 0;
+
+ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ "[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
+ return ret);
+ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ "[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
+ return ret);
+
+ return ret;
+}
+
static int vega20_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega20_smumgr *priv;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
index 77349c3f0162..ec953ab13e87 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
@@ -55,6 +55,7 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
uint8_t *table, uint16_t workload_type);
int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
uint8_t *table, uint16_t workload_type);
+int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
new file mode 100644
index 000000000000..8fafcbdb1dfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -0,0 +1,2413 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v11_0.h"
+#include "smu11_driver_if.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "power_state.h"
+#include "vega20_ppt.h"
+#include "vega20_pptable.h"
+#include "vega20_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
+#define MSG_MAP(msg) \
+ [SMU_MSG_##msg] = PPSMC_MSG_##msg
+
+static int vega20_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage),
+ MSG_MAP(GetSmuVersion),
+ MSG_MAP(GetDriverIfVersion),
+ MSG_MAP(SetAllowedFeaturesMaskLow),
+ MSG_MAP(SetAllowedFeaturesMaskHigh),
+ MSG_MAP(EnableAllSmuFeatures),
+ MSG_MAP(DisableAllSmuFeatures),
+ MSG_MAP(EnableSmuFeaturesLow),
+ MSG_MAP(EnableSmuFeaturesHigh),
+ MSG_MAP(DisableSmuFeaturesLow),
+ MSG_MAP(DisableSmuFeaturesHigh),
+ MSG_MAP(GetEnabledSmuFeaturesLow),
+ MSG_MAP(GetEnabledSmuFeaturesHigh),
+ MSG_MAP(SetWorkloadMask),
+ MSG_MAP(SetPptLimit),
+ MSG_MAP(SetDriverDramAddrHigh),
+ MSG_MAP(SetDriverDramAddrLow),
+ MSG_MAP(SetToolsDramAddrHigh),
+ MSG_MAP(SetToolsDramAddrLow),
+ MSG_MAP(TransferTableSmu2Dram),
+ MSG_MAP(TransferTableDram2Smu),
+ MSG_MAP(UseDefaultPPTable),
+ MSG_MAP(UseBackupPPTable),
+ MSG_MAP(RunBtc),
+ MSG_MAP(RequestI2CBus),
+ MSG_MAP(ReleaseI2CBus),
+ MSG_MAP(SetFloorSocVoltage),
+ MSG_MAP(SoftReset),
+ MSG_MAP(StartBacoMonitor),
+ MSG_MAP(CancelBacoMonitor),
+ MSG_MAP(EnterBaco),
+ MSG_MAP(SetSoftMinByFreq),
+ MSG_MAP(SetSoftMaxByFreq),
+ MSG_MAP(SetHardMinByFreq),
+ MSG_MAP(SetHardMaxByFreq),
+ MSG_MAP(GetMinDpmFreq),
+ MSG_MAP(GetMaxDpmFreq),
+ MSG_MAP(GetDpmFreqByIndex),
+ MSG_MAP(GetDpmClockFreq),
+ MSG_MAP(GetSsVoltageByDpm),
+ MSG_MAP(SetMemoryChannelConfig),
+ MSG_MAP(SetGeminiMode),
+ MSG_MAP(SetGeminiApertureHigh),
+ MSG_MAP(SetGeminiApertureLow),
+ MSG_MAP(SetMinLinkDpmByIndex),
+ MSG_MAP(OverridePcieParameters),
+ MSG_MAP(OverDriveSetPercentage),
+ MSG_MAP(SetMinDeepSleepDcefclk),
+ MSG_MAP(ReenableAcDcInterrupt),
+ MSG_MAP(NotifyPowerSource),
+ MSG_MAP(SetUclkFastSwitch),
+ MSG_MAP(SetUclkDownHyst),
+ MSG_MAP(GetCurrentRpm),
+ MSG_MAP(SetVideoFps),
+ MSG_MAP(SetTjMax),
+ MSG_MAP(SetFanTemperatureTarget),
+ MSG_MAP(PrepareMp1ForUnload),
+ MSG_MAP(DramLogSetDramAddrHigh),
+ MSG_MAP(DramLogSetDramAddrLow),
+ MSG_MAP(DramLogSetDramSize),
+ MSG_MAP(SetFanMaxRpm),
+ MSG_MAP(SetFanMinPwm),
+ MSG_MAP(ConfigureGfxDidt),
+ MSG_MAP(NumOfDisplays),
+ MSG_MAP(RemoveMargins),
+ MSG_MAP(ReadSerialNumTop32),
+ MSG_MAP(ReadSerialNumBottom32),
+ MSG_MAP(SetSystemVirtualDramAddrHigh),
+ MSG_MAP(SetSystemVirtualDramAddrLow),
+ MSG_MAP(WaflTest),
+ MSG_MAP(SetFclkGfxClkRatio),
+ MSG_MAP(AllowGfxOff),
+ MSG_MAP(DisallowGfxOff),
+ MSG_MAP(GetPptLimit),
+ MSG_MAP(GetDcModeMaxDpmFreq),
+ MSG_MAP(GetDebugData),
+ MSG_MAP(SetXgmiMode),
+ MSG_MAP(RunAfllBtc),
+ MSG_MAP(ExitBaco),
+ MSG_MAP(PrepareMp1ForReset),
+ MSG_MAP(PrepareMp1ForShutdown),
+ MSG_MAP(SetMGpuFanBoostLimitRpm),
+ MSG_MAP(GetAVFSVoltageByDpm),
+};
+
+static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index)
+{
+ int val;
+
+ if (index >= SMU_MSG_MAX_COUNT)
+ return -EINVAL;
+
+ val = vega20_message_map[index];
+ if (val > PPSMC_Message_Count)
+ return -EINVAL;
+
+ return val;
+}
+
+static int vega20_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (smu_dpm->dpm_context)
+ return -EINVAL;
+
+ smu_dpm->dpm_context = kzalloc(sizeof(struct vega20_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+
+ if (smu_dpm->golden_dpm_context)
+ return -EINVAL;
+
+ smu_dpm->golden_dpm_context = kzalloc(sizeof(struct vega20_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->golden_dpm_context)
+ return -ENOMEM;
+
+ smu_dpm->dpm_context_size = sizeof(struct vega20_dpm_table);
+
+ smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_current_power_state)
+ return -ENOMEM;
+
+ smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_request_power_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int vega20_setup_od8_information(struct smu_context *smu)
+{
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ uint32_t od_feature_count, od_feature_array_size,
+ od_setting_count, od_setting_array_size;
+
+ if (!table_context->power_play_table)
+ return -EINVAL;
+
+ powerplay_table = table_context->power_play_table;
+
+ if (powerplay_table->OverDrive8Table.ucODTableRevision == 1) {
+ /* Setup correct ODFeatureCount, and store ODFeatureArray from
+ * powerplay table to od_feature_capabilities */
+ od_feature_count =
+ (le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount) >
+ ATOM_VEGA20_ODFEATURE_COUNT) ?
+ ATOM_VEGA20_ODFEATURE_COUNT :
+ le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount);
+
+ od_feature_array_size = sizeof(uint8_t) * od_feature_count;
+
+ if (table_context->od_feature_capabilities)
+ return -EINVAL;
+
+ table_context->od_feature_capabilities = kmemdup(&powerplay_table->OverDrive8Table.ODFeatureCapabilities,
+ od_feature_array_size,
+ GFP_KERNEL);
+ if (!table_context->od_feature_capabilities)
+ return -ENOMEM;
+
+ /* Setup correct ODSettingCount, and store ODSettingArray from
+ * powerplay table to od_settings_max and od_setting_min */
+ od_setting_count =
+ (le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount) >
+ ATOM_VEGA20_ODSETTING_COUNT) ?
+ ATOM_VEGA20_ODSETTING_COUNT :
+ le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount);
+
+ od_setting_array_size = sizeof(uint32_t) * od_setting_count;
+
+ if (table_context->od_settings_max)
+ return -EINVAL;
+
+ table_context->od_settings_max = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMax,
+ od_setting_array_size,
+ GFP_KERNEL);
+
+ if (!table_context->od_settings_max) {
+ kfree(table_context->od_feature_capabilities);
+ table_context->od_feature_capabilities = NULL;
+ return -ENOMEM;
+ }
+
+ if (table_context->od_settings_min)
+ return -EINVAL;
+
+ table_context->od_settings_min = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMin,
+ od_setting_array_size,
+ GFP_KERNEL);
+
+ if (!table_context->od_settings_min) {
+ kfree(table_context->od_feature_capabilities);
+ table_context->od_feature_capabilities = NULL;
+ kfree(table_context->od_settings_max);
+ table_context->od_settings_max = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int vega20_store_powerplay_table(struct smu_context *smu)
+{
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+ if (!table_context->power_play_table)
+ return -EINVAL;
+
+ powerplay_table = table_context->power_play_table;
+
+ memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
+ sizeof(PPTable_t));
+
+ table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
+ table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
+ table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
+
+ ret = vega20_setup_od8_information(smu);
+
+ return ret;
+}
+
+static int vega20_append_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ struct atom_smc_dpm_info_v4_4 *smc_dpm_table;
+ int index, i, ret;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smc_dpm_info);
+
+ ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table);
+ if (ret)
+ return ret;
+
+ smc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
+ smc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
+
+ smc_pptable->VddGfxVrMapping = smc_dpm_table->vddgfxvrmapping;
+ smc_pptable->VddSocVrMapping = smc_dpm_table->vddsocvrmapping;
+ smc_pptable->VddMem0VrMapping = smc_dpm_table->vddmem0vrmapping;
+ smc_pptable->VddMem1VrMapping = smc_dpm_table->vddmem1vrmapping;
+
+ smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->gfxulvphasesheddingmask;
+ smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->soculvphasesheddingmask;
+ smc_pptable->ExternalSensorPresent = smc_dpm_table->externalsensorpresent;
+
+ smc_pptable->GfxMaxCurrent = smc_dpm_table->gfxmaxcurrent;
+ smc_pptable->GfxOffset = smc_dpm_table->gfxoffset;
+ smc_pptable->Padding_TelemetryGfx = smc_dpm_table->padding_telemetrygfx;
+
+ smc_pptable->SocMaxCurrent = smc_dpm_table->socmaxcurrent;
+ smc_pptable->SocOffset = smc_dpm_table->socoffset;
+ smc_pptable->Padding_TelemetrySoc = smc_dpm_table->padding_telemetrysoc;
+
+ smc_pptable->Mem0MaxCurrent = smc_dpm_table->mem0maxcurrent;
+ smc_pptable->Mem0Offset = smc_dpm_table->mem0offset;
+ smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->padding_telemetrymem0;
+
+ smc_pptable->Mem1MaxCurrent = smc_dpm_table->mem1maxcurrent;
+ smc_pptable->Mem1Offset = smc_dpm_table->mem1offset;
+ smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->padding_telemetrymem1;
+
+ smc_pptable->AcDcGpio = smc_dpm_table->acdcgpio;
+ smc_pptable->AcDcPolarity = smc_dpm_table->acdcpolarity;
+ smc_pptable->VR0HotGpio = smc_dpm_table->vr0hotgpio;
+ smc_pptable->VR0HotPolarity = smc_dpm_table->vr0hotpolarity;
+
+ smc_pptable->VR1HotGpio = smc_dpm_table->vr1hotgpio;
+ smc_pptable->VR1HotPolarity = smc_dpm_table->vr1hotpolarity;
+ smc_pptable->Padding1 = smc_dpm_table->padding1;
+ smc_pptable->Padding2 = smc_dpm_table->padding2;
+
+ smc_pptable->LedPin0 = smc_dpm_table->ledpin0;
+ smc_pptable->LedPin1 = smc_dpm_table->ledpin1;
+ smc_pptable->LedPin2 = smc_dpm_table->ledpin2;
+
+ smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->pllgfxclkspreadenabled;
+ smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->pllgfxclkspreadpercent;
+ smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->pllgfxclkspreadfreq;
+
+ smc_pptable->UclkSpreadEnabled = 0;
+ smc_pptable->UclkSpreadPercent = smc_dpm_table->uclkspreadpercent;
+ smc_pptable->UclkSpreadFreq = smc_dpm_table->uclkspreadfreq;
+
+ smc_pptable->FclkSpreadEnabled = smc_dpm_table->fclkspreadenabled;
+ smc_pptable->FclkSpreadPercent = smc_dpm_table->fclkspreadpercent;
+ smc_pptable->FclkSpreadFreq = smc_dpm_table->fclkspreadfreq;
+
+ smc_pptable->FllGfxclkSpreadEnabled = smc_dpm_table->fllgfxclkspreadenabled;
+ smc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
+ smc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
+
+ for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
+ smc_pptable->I2cControllers[i].Enabled =
+ smc_dpm_table->i2ccontrollers[i].enabled;
+ smc_pptable->I2cControllers[i].SlaveAddress =
+ smc_dpm_table->i2ccontrollers[i].slaveaddress;
+ smc_pptable->I2cControllers[i].ControllerPort =
+ smc_dpm_table->i2ccontrollers[i].controllerport;
+ smc_pptable->I2cControllers[i].ThermalThrottler =
+ smc_dpm_table->i2ccontrollers[i].thermalthrottler;
+ smc_pptable->I2cControllers[i].I2cProtocol =
+ smc_dpm_table->i2ccontrollers[i].i2cprotocol;
+ smc_pptable->I2cControllers[i].I2cSpeed =
+ smc_dpm_table->i2ccontrollers[i].i2cspeed;
+ }
+
+ return 0;
+}
+
+static int vega20_check_powerplay_table(struct smu_context *smu)
+{
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ powerplay_table = table_context->power_play_table;
+
+ if (powerplay_table->sHeader.format_revision < ATOM_VEGA20_TABLE_REVISION_VEGA20) {
+ pr_err("Unsupported PPTable format!");
+ return -EINVAL;
+ }
+
+ if (!powerplay_table->sHeader.structuresize) {
+ pr_err("Invalid PowerPlay Table!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vega20_run_btc_afll(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+}
+
+static int
+vega20_get_unallowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ if (num > 2)
+ return -EINVAL;
+
+ feature_mask[0] = 0xE0041C00;
+ feature_mask[1] = 0xFFFFFFFE; /* bit32~bit63 is Unsupported */
+
+ return 0;
+}
+
+static enum
+amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context ||
+ !smu_dpm_ctx->dpm_current_power_state)
+ return -EINVAL;
+
+ mutex_lock(&(smu->mutex));
+ switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
+ case SMU_STATE_UI_LABEL_BATTERY:
+ pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
+ case SMU_STATE_UI_LABEL_BALLANCED:
+ pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
+ case SMU_STATE_UI_LABEL_PERFORMANCE:
+ pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
+ pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+ mutex_unlock(&(smu->mutex));
+
+ return pm_type;
+}
+
+static int
+vega20_set_single_dpm_table(struct smu_context *smu,
+ struct vega20_single_dpm_table *single_dpm_table,
+ PPCLK_e clk_id)
+{
+ int ret = 0;
+ uint32_t i, num_of_levels = 0, clk;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | 0xFF));
+ if (ret) {
+ pr_err("[GetNumOfDpmLevel] failed to get dpm levels!");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, &num_of_levels);
+ if (!num_of_levels) {
+ pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!");
+ return -EINVAL;
+ }
+
+ single_dpm_table->count = num_of_levels;
+
+ for (i = 0; i < num_of_levels; i++) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | i));
+ if (ret) {
+ pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!");
+ return ret;
+ }
+ smu_read_smc_arg(smu, &clk);
+ if (!clk) {
+ pr_err("[GetDpmFreqByIndex] clk value is invalid!");
+ return -EINVAL;
+ }
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+ }
+ return 0;
+}
+
+static void vega20_init_single_dpm_state(struct vega20_dpm_state *dpm_state)
+{
+ dpm_state->soft_min_level = 0x0;
+ dpm_state->soft_max_level = 0xffff;
+ dpm_state->hard_min_level = 0x0;
+ dpm_state->hard_max_level = 0xffff;
+}
+
+static int vega20_set_default_dpm_table(struct smu_context *smu)
+{
+ int ret;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ /* socclk */
+ single_dpm_table = &(dpm_table->soc_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_SOCCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get socclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* gfxclk */
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_GFXCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get gfxclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* memclk */
+ single_dpm_table = &(dpm_table->mem_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_UCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get memclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* eclk */
+ single_dpm_table = &(dpm_table->eclk_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_ECLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get eclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.eclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* vclk */
+ single_dpm_table = &(dpm_table->vclk_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_VCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get vclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* dclk */
+ single_dpm_table = &(dpm_table->dclk_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_DCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get dclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* dcefclk */
+ single_dpm_table = &(dpm_table->dcef_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_DCEFCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get dcefclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* pixclk */
+ single_dpm_table = &(dpm_table->pixel_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_PIXCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get pixclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* dispclk */
+ single_dpm_table = &(dpm_table->display_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_DISPCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get dispclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* phyclk */
+ single_dpm_table = &(dpm_table->phy_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_PHYCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get phyclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* fclk */
+ single_dpm_table = &(dpm_table->fclk_table);
+
+ if (smu_feature_is_enabled(smu,FEATURE_DPM_FCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_FCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get fclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ memcpy(smu_dpm->golden_dpm_context, dpm_table,
+ sizeof(struct vega20_dpm_table));
+
+ return 0;
+}
+
+static int vega20_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_single_dpm_table *gfx_table = NULL;
+ struct vega20_single_dpm_table *mem_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+ gfx_table = &(dpm_table->gfx_table);
+ mem_table = &(dpm_table->mem_table);
+
+ smu->pstate_sclk = gfx_table->dpm_levels[0].value;
+ smu->pstate_mclk = mem_table->dpm_levels[0].value;
+
+ if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
+ smu->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ smu->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ smu->pstate_sclk = smu->pstate_sclk * 100;
+ smu->pstate_mclk = smu->pstate_mclk * 100;
+
+ return 0;
+}
+
+static int vega20_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct vega20_single_dpm_table *dpm_table)
+{
+ int i, count;
+
+ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
+ clocks->num_levels = count;
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ return 0;
+}
+
+static int vega20_print_clk_levels(struct smu_context *smu,
+ enum pp_clock_type type, char *buf)
+{
+ int i, now, size = 0;
+ int ret = 0;
+ uint32_t gen_speed, lane_width;
+ struct amdgpu_device *adev = smu->adev;
+ struct pp_clock_levels_with_latency clocks;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_od8_settings *od8_settings =
+ (struct vega20_od8_settings *)table_context->od8_settings;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ switch (type) {
+ case PP_SCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current gfx clk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->gfx_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get gfx clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10)
+ ? "*" : "");
+ break;
+
+ case PP_MCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current mclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10)
+ ? "*" : "");
+ break;
+
+ case PP_SOCCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_SOCCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current socclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->soc_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get socclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10)
+ ? "*" : "");
+ break;
+
+ case PP_FCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_FCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current fclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->fclk_table);
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ (single_dpm_table->dpm_levels[i].value == now / 100)
+ ? "*" : "");
+ break;
+
+ case PP_DCEFCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_DCEFCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current dcefclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->dcef_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get dcefclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
+ break;
+
+ case PP_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
+ (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
+ (pptable->PcieLaneCount[i] == 1) ? "x1" :
+ (pptable->PcieLaneCount[i] == 2) ? "x2" :
+ (pptable->PcieLaneCount[i] == 3) ? "x4" :
+ (pptable->PcieLaneCount[i] == 4) ? "x8" :
+ (pptable->PcieLaneCount[i] == 5) ? "x12" :
+ (pptable->PcieLaneCount[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == pptable->PcieGenSpeed[i]) &&
+ (lane_width == pptable->PcieLaneCount[i]) ?
+ "*" : "");
+ break;
+
+ case OD_SCLK:
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ od_table->GfxclkFmin);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ od_table->GfxclkFmax);
+ }
+
+ break;
+
+ case OD_MCLK:
+ if (od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ od_table->UclkFmax);
+ }
+
+ break;
+
+ case OD_VDDC_CURVE:
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE");
+ size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq1,
+ od_table->GfxclkVolt1 / VOLTAGE_SCALE);
+ size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq2,
+ od_table->GfxclkVolt2 / VOLTAGE_SCALE);
+ size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq3,
+ od_table->GfxclkVolt3 / VOLTAGE_SCALE);
+ }
+
+ break;
+
+ case OD_RANGE:
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id) {
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value);
+ }
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ clocks.data[0].clocks_in_khz / 1000,
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value);
+ }
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+ return size;
+}
+
+static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
+ uint32_t feature_mask)
+{
+ struct vega20_dpm_table *dpm_table;
+ struct vega20_single_dpm_table *single_dpm_table;
+ uint32_t freq;
+ int ret = 0;
+
+ dpm_table = smu->smu_dpm.dpm_context;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
+ single_dpm_table = &(dpm_table->gfx_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s gfxclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
+ single_dpm_table = &(dpm_table->mem_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_UCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s memclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
+ single_dpm_table = &(dpm_table->soc_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s socclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_FCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_FCLK_MASK)) {
+ single_dpm_table = &(dpm_table->fclk_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_FCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s fclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) {
+ single_dpm_table = &(dpm_table->dcef_table);
+ freq = single_dpm_table->dpm_state.hard_min_level;
+ if (!max) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinByFreq,
+ (PPCLK_DCEFCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set hard min dcefclk !\n");
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int vega20_force_clk_levels(struct smu_context *smu,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct vega20_dpm_table *dpm_table;
+ struct vega20_single_dpm_table *single_dpm_table;
+ uint32_t soft_min_level, soft_max_level, hard_min_level;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ int ret = 0;
+
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_info("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(smu->mutex));
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ dpm_table = smu->smu_dpm.dpm_context;
+
+ switch (type) {
+ case PP_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_SOCCLK:
+ single_dpm_table = &(dpm_table->soc_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_FCLK:
+ single_dpm_table = &(dpm_table->fclk_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_DCEFCLK:
+ hard_min_level = soft_min_level;
+ single_dpm_table = &(dpm_table->dcef_table);
+
+ if (hard_min_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ hard_min_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.hard_min_level =
+ single_dpm_table->dpm_levels[hard_min_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_DCEFCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload boot level to lowest!\n");
+
+ break;
+
+ case PP_PCIE:
+ if (soft_min_level >= NUM_LINK_LEVELS ||
+ soft_max_level >= NUM_LINK_LEVELS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ if (ret)
+ pr_err("Failed to set min link dpm level!\n");
+
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ int ret;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ mutex_lock(&smu->mutex);
+
+ switch (type) {
+ case amd_pp_sys_clock:
+ single_dpm_table = &(dpm_table->gfx_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ case amd_pp_mem_clock:
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ case amd_pp_dcef_clock:
+ single_dpm_table = &(dpm_table->dcef_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ case amd_pp_soc_clock:
+ single_dpm_table = &(dpm_table->soc_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
+ uint32_t *voltage,
+ uint32_t freq)
+{
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetAVFSVoltageByDpm,
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ if (ret) {
+ pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, voltage);
+ *voltage = *voltage / VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static int vega20_set_default_od8_setttings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table = (OverDriveTable_t *)(table_context->overdrive_table);
+ struct vega20_od8_settings *od8_settings = NULL;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ int i, ret;
+
+ if (table_context->od8_settings)
+ return -EINVAL;
+
+ table_context->od8_settings = kzalloc(sizeof(struct vega20_od8_settings), GFP_KERNEL);
+
+ if (!table_context->od8_settings)
+ return -ENOMEM;
+
+ memset(table_context->od8_settings, 0, sizeof(struct vega20_od8_settings));
+ od8_settings = (struct vega20_od8_settings *)table_context->od8_settings;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
+ table_context->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
+ table_context->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >=
+ table_context->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) {
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
+ OD8_GFXCLK_LIMITS;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
+ OD8_GFXCLK_LIMITS;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
+ od_table->GfxclkFmin;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
+ od_table->GfxclkFmax;
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] &&
+ (table_context->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >=
+ smc_pptable->MinVoltageGfx / VOLTAGE_SCALE) &&
+ (table_context->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <=
+ smc_pptable->MaxVoltageGfx / VOLTAGE_SCALE) &&
+ (table_context->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] <=
+ table_context->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3])) {
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
+ OD8_GFXCLK_CURVE;
+
+ od_table->GfxclkFreq1 = od_table->GfxclkFmin;
+ od_table->GfxclkFreq2 = (od_table->GfxclkFmin + od_table->GfxclkFmax) / 2;
+ od_table->GfxclkFreq3 = od_table->GfxclkFmax;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
+ od_table->GfxclkFreq1;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
+ od_table->GfxclkFreq2;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
+ od_table->GfxclkFreq3;
+
+ ret = vega20_overdrive_get_gfx_clk_base_voltage(smu,
+ &od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value,
+ od_table->GfxclkFreq1);
+ if (ret)
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0;
+ od_table->GfxclkVolt1 =
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value
+ * VOLTAGE_SCALE;
+ ret = vega20_overdrive_get_gfx_clk_base_voltage(smu,
+ &od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value,
+ od_table->GfxclkFreq2);
+ if (ret)
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0;
+ od_table->GfxclkVolt2 =
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value
+ * VOLTAGE_SCALE;
+ ret = vega20_overdrive_get_gfx_clk_base_voltage(smu,
+ &od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value,
+ od_table->GfxclkFreq3);
+ if (ret)
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0;
+ od_table->GfxclkVolt3 =
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value
+ * VOLTAGE_SCALE;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
+ table_context->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_UCLK_FMAX] >=
+ table_context->od_settings_min[OD8_SETTING_UCLK_FMAX])) {
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id =
+ OD8_UCLK_MAX;
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
+ od_table->UclkFmax;
+ }
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] &&
+ table_context->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
+ table_context->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100 &&
+ table_context->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100) {
+ od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id =
+ OD8_POWER_LIMIT;
+ od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
+ od_table->OverDrivePct;
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] &&
+ table_context->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >=
+ table_context->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) {
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
+ OD8_ACOUSTIC_LIMIT_SCLK;
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
+ od_table->FanMaximumRpm;
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] &&
+ table_context->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >=
+ table_context->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) {
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
+ OD8_FAN_SPEED_MIN;
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
+ od_table->FanMinimumPwm * smc_pptable->FanMaximumRpm / 100;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_THERMAL_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] &&
+ table_context->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >=
+ table_context->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) {
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
+ OD8_TEMPERATURE_FAN;
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
+ od_table->FanTargetTemperature;
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] &&
+ table_context->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >=
+ table_context->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) {
+ od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
+ OD8_TEMPERATURE_SYSTEM;
+ od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
+ od_table->MaxOpTemp;
+ }
+ }
+
+ for (i = 0; i < OD8_SETTING_COUNT; i++) {
+ if (od8_settings->od8_settings_array[i].feature_id) {
+ od8_settings->od8_settings_array[i].min_value =
+ table_context->od_settings_min[i];
+ od8_settings->od8_settings_array[i].max_value =
+ table_context->od_settings_max[i];
+ od8_settings->od8_settings_array[i].current_value =
+ od8_settings->od8_settings_array[i].default_value;
+ } else {
+ od8_settings->od8_settings_array[i].min_value = 0;
+ od8_settings->od8_settings_array[i].max_value = 0;
+ od8_settings->od8_settings_array[i].current_value = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int vega20_get_od_percentage(struct smu_context *smu,
+ enum pp_clock_type type)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_dpm_table *golden_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct vega20_single_dpm_table *golden_dpm_table;
+ int value, golden_value;
+
+ dpm_table = smu_dpm->dpm_context;
+ golden_table = smu_dpm->golden_dpm_context;
+
+ switch (type) {
+ case OD_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+ golden_dpm_table = &(golden_table->gfx_table);
+ break;
+ case OD_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+ golden_dpm_table = &(golden_table->mem_table);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ value = single_dpm_table->dpm_levels[single_dpm_table->count - 1].value;
+ golden_value = golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value;
+
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
+
+ return value;
+}
+
+static int
+vega20_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+ struct vega20_dpm_table *dpm_table = (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
+ struct vega20_single_dpm_table *gfx_dpm_table;
+ struct vega20_single_dpm_table *mem_dpm_table;
+ struct vega20_single_dpm_table *soc_dpm_table;
+
+ if (!smu->smu_dpm.dpm_context)
+ return -EINVAL;
+
+ gfx_dpm_table = &dpm_table->gfx_table;
+ mem_dpm_table = &dpm_table->mem_table;
+ soc_dpm_table = &dpm_table->soc_table;
+
+ *sclk_mask = 0;
+ *mclk_mask = 0;
+ *soc_mask = 0;
+
+ if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
+ soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
+ *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
+ *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
+ *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ *sclk_mask = gfx_dpm_table->count - 1;
+ *mclk_mask = mem_dpm_table->count - 1;
+ *soc_mask = soc_dpm_table->count - 1;
+ }
+
+ return 0;
+}
+
+static int
+vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
+ struct vega20_single_dpm_table *dpm_table)
+{
+ int ret = 0;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ if (dpm_table->count <= 0) {
+ pr_err("[%s] Dpm table has no entry!", __func__);
+ return -EINVAL;
+ }
+
+ if (dpm_table->count > NUM_UCLK_DPM_LEVELS) {
+ pr_err("[%s] Dpm table has too many entries!", __func__);
+ return -EINVAL;
+ }
+
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level);
+ if (ret) {
+ pr_err("[%s] Set hard min uclk failed!", __func__);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int vega20_pre_display_config_changed(struct smu_context *smu)
+{
+ int ret = 0;
+ struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
+
+ if (!smu->smu_dpm.dpm_context)
+ return -EINVAL;
+
+ smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ ret = vega20_set_uclk_to_highest_dpm_level(smu,
+ &dpm_table->mem_table);
+ if (ret)
+ pr_err("Failed to set uclk to highest dpm level");
+ return ret;
+}
+
+static int vega20_display_config_changed(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->funcs)
+ return -EINVAL;
+
+ if (!smu->smu_dpm.dpm_context ||
+ !smu->smu_table.tables ||
+ !smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr)
+ return -EINVAL;
+
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu->funcs->write_watermarks_table(smu);
+ if (ret) {
+ pr_err("Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ smu_feature_is_supported(smu, FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_supported(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ smu_send_smc_msg_with_param(smu,
+ SMU_MSG_NumOfDisplays,
+ smu->display_config->num_display);
+ }
+
+ return ret;
+}
+
+static int vega20_apply_clocks_adjust_rules(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct vega20_dpm_table *dpm_ctx = (struct vega20_dpm_table *)(smu_dpm_ctx->dpm_context);
+ struct vega20_single_dpm_table *dpm_table;
+ bool vblank_too_short = false;
+ bool disable_mclk_switching;
+ uint32_t i, latency;
+
+ disable_mclk_switching = ((1 < smu->display_config->num_display) &&
+ !smu->display_config->multi_monitor_in_sync) || vblank_too_short;
+ latency = smu->display_config->dce_tolerable_mclk_in_active_latency;
+
+ /* gfxclk */
+ dpm_table = &(dpm_ctx->gfx_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* memclk */
+ dpm_table = &(dpm_ctx->mem_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* honour DAL's UCLK Hardmin */
+ if (dpm_table->dpm_state.hard_min_level < (smu->display_config->min_mem_set_clock / 100))
+ dpm_table->dpm_state.hard_min_level = smu->display_config->min_mem_set_clock / 100;
+
+ /* Hardmin is dependent on displayconfig */
+ if (disable_mclk_switching) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ for (i = 0; i < smu_dpm_ctx->mclk_latency_table->count - 1; i++) {
+ if (smu_dpm_ctx->mclk_latency_table->entries[i].latency <= latency) {
+ if (dpm_table->dpm_levels[i].value >= (smu->display_config->min_mem_set_clock / 100)) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
+ break;
+ }
+ }
+ }
+ }
+
+ if (smu->display_config->nb_pstate_switch_disable)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ /* vclk */
+ dpm_table = &(dpm_ctx->vclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* dclk */
+ dpm_table = &(dpm_ctx->dclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* socclk */
+ dpm_table = &(dpm_ctx->soc_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* eclk */
+ dpm_table = &(dpm_ctx->eclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ return 0;
+}
+
+static int
+vega20_notify_smc_dispaly_config(struct smu_context *smu)
+{
+ struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
+ struct vega20_single_dpm_table *memtable = &dpm_table->mem_table;
+ struct smu_clocks min_clocks = {0};
+ struct pp_display_clock_request clock_req;
+ int ret = 0;
+
+ min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
+ min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
+ min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
+
+ if (smu_feature_is_supported(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ clock_req.clock_type = amd_pp_dcef_clock;
+ clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
+ if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) {
+ if (smu_feature_is_supported(smu, FEATURE_DS_DCEFCLK_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMinDeepSleepDcefclk,
+ min_clocks.dcef_clock_in_sr/100);
+ if (ret) {
+ pr_err("Attempt to set divider for DCEFCLK Failed!");
+ return ret;
+ }
+ }
+ } else {
+ pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level);
+ if (ret) {
+ pr_err("[%s] Set hard min uclk failed!", __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t vega20_find_lowest_dpm_level(struct vega20_single_dpm_table *table)
+{
+ uint32_t i;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i >= table->count) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+static uint32_t vega20_find_highest_dpm_level(struct vega20_single_dpm_table *table)
+{
+ int i = 0;
+
+ if (!table) {
+ pr_err("[%s] DPM Table does not exist!", __func__);
+ return 0;
+ }
+ if (table->count <= 0) {
+ pr_err("[%s] DPM Table has no entry!", __func__);
+ return 0;
+ }
+ if (table->count > MAX_REGULAR_DPM_NUMBER) {
+ pr_err("[%s] DPM Table has too many entries!", __func__);
+ return MAX_REGULAR_DPM_NUMBER - 1;
+ }
+
+ for (i = table->count - 1; i >= 0; i--) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+static int vega20_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ uint32_t soft_level;
+ int ret = 0;
+ struct vega20_dpm_table *dpm_table =
+ (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
+
+ if (highest)
+ soft_level = vega20_find_highest_dpm_level(&(dpm_table->gfx_table));
+ else
+ soft_level = vega20_find_lowest_dpm_level(&(dpm_table->gfx_table));
+
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_level].value;
+
+ if (highest)
+ soft_level = vega20_find_highest_dpm_level(&(dpm_table->mem_table));
+ else
+ soft_level = vega20_find_lowest_dpm_level(&(dpm_table->mem_table));
+
+ dpm_table->mem_table.dpm_state.soft_min_level =
+ dpm_table->mem_table.dpm_state.soft_max_level =
+ dpm_table->mem_table.dpm_levels[soft_level].value;
+
+ if (highest)
+ soft_level = vega20_find_highest_dpm_level(&(dpm_table->soc_table));
+ else
+ soft_level = vega20_find_lowest_dpm_level(&(dpm_table->soc_table));
+
+ dpm_table->soc_table.dpm_state.soft_min_level =
+ dpm_table->soc_table.dpm_state.soft_max_level =
+ dpm_table->soc_table.dpm_levels[soft_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload boot level to %s!\n",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload dpm max level to %s!\n!",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vega20_unforce_dpm_levels(struct smu_context *smu)
+{
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+ struct vega20_dpm_table *dpm_table =
+ (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
+
+ soft_min_level = vega20_find_lowest_dpm_level(&(dpm_table->gfx_table));
+ soft_max_level = vega20_find_highest_dpm_level(&(dpm_table->gfx_table));
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_levels[soft_min_level].value;
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_max_level].value;
+
+ soft_min_level = vega20_find_lowest_dpm_level(&(dpm_table->mem_table));
+ soft_max_level = vega20_find_highest_dpm_level(&(dpm_table->mem_table));
+ dpm_table->mem_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_levels[soft_min_level].value;
+ dpm_table->mem_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_max_level].value;
+
+ soft_min_level = vega20_find_lowest_dpm_level(&(dpm_table->soc_table));
+ soft_max_level = vega20_find_highest_dpm_level(&(dpm_table->soc_table));
+ dpm_table->soc_table.dpm_state.soft_min_level =
+ dpm_table->soc_table.dpm_levels[soft_min_level].value;
+ dpm_table->soc_table.dpm_state.soft_max_level =
+ dpm_table->soc_table.dpm_levels[soft_max_level].value;
+
+ ret = smu_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload DPM Bootup Levels!");
+ return ret;
+ }
+
+ ret = smu_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload DPM Max Levels!");
+ return ret;
+ }
+
+ return ret;
+}
+
+static enum amd_dpm_forced_level vega20_get_performance_level(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
+ mutex_lock(&(smu->mutex));
+ smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
+ mutex_unlock(&(smu->mutex));
+ }
+ return smu_dpm_ctx->dpm_level;
+}
+
+static int
+vega20_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ int i;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ for (i = 0; i < smu->adev->num_ip_blocks; i++) {
+ if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
+ break;
+ }
+
+ mutex_lock(&smu->mutex);
+
+ smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
+ ret = smu_handle_task(smu, level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+static int vega20_update_specified_od8_value(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
+ struct vega20_od8_settings *od8_settings =
+ (struct vega20_od8_settings *)table_context->od8_settings;
+
+ switch (index) {
+ case OD8_SETTING_GFXCLK_FMIN:
+ od_table->GfxclkFmin = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FMAX:
+ if (value < od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].min_value ||
+ value > od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value)
+ return -EINVAL;
+ od_table->GfxclkFmax = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FREQ1:
+ od_table->GfxclkFreq1 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_VOLTAGE1:
+ od_table->GfxclkVolt1 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FREQ2:
+ od_table->GfxclkFreq2 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_VOLTAGE2:
+ od_table->GfxclkVolt2 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FREQ3:
+ od_table->GfxclkFreq3 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_VOLTAGE3:
+ od_table->GfxclkVolt3 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_UCLK_FMAX:
+ if (value < od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].min_value ||
+ value > od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value)
+ return -EINVAL;
+ od_table->UclkFmax = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_POWER_PERCENTAGE:
+ od_table->OverDrivePct = (int16_t)value;
+ break;
+
+ case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
+ od_table->FanMaximumRpm = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_FAN_MIN_SPEED:
+ od_table->FanMinimumPwm = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_FAN_TARGET_TEMP:
+ od_table->FanTargetTemperature = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_OPERATING_TEMP_MAX:
+ od_table->MaxOpTemp = (uint16_t)value;
+ break;
+ }
+
+ return 0;
+}
+
+static int vega20_set_od_percentage(struct smu_context *smu,
+ enum pp_clock_type type,
+ uint32_t value)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_dpm_table *golden_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct vega20_single_dpm_table *golden_dpm_table;
+ uint32_t od_clk, index;
+ int ret = 0;
+ int feature_enabled;
+ PPCLK_e clk_id;
+
+ mutex_lock(&(smu->mutex));
+
+ dpm_table = smu_dpm->dpm_context;
+ golden_table = smu_dpm->golden_dpm_context;
+
+ switch (type) {
+ case OD_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+ golden_dpm_table = &(golden_table->gfx_table);
+ feature_enabled = smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT);
+ clk_id = PPCLK_GFXCLK;
+ index = OD8_SETTING_GFXCLK_FMAX;
+ break;
+ case OD_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+ golden_dpm_table = &(golden_table->mem_table);
+ feature_enabled = smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT);
+ clk_id = PPCLK_UCLK;
+ index = OD8_SETTING_UCLK_FMAX;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto set_od_failed;
+
+ od_clk = golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value * value;
+ od_clk /= 100;
+ od_clk += golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value;
+
+ ret = smu_update_od8_settings(smu, index, od_clk);
+ if (ret) {
+ pr_err("[Setoverdrive] failed to set od clk!\n");
+ goto set_od_failed;
+ }
+
+ if (feature_enabled) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ clk_id);
+ if (ret) {
+ pr_err("[Setoverdrive] failed to refresh dpm table!\n");
+ goto set_od_failed;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+
+ ret = smu_handle_task(smu, smu_dpm->dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+
+set_od_failed:
+ mutex_unlock(&(smu->mutex));
+
+ return ret;
+}
+
+static int vega20_odn_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct vega20_od8_settings *od8_settings =
+ (struct vega20_od8_settings *)table_context->od8_settings;
+ struct pp_clock_levels_with_latency clocks;
+ int32_t input_index, input_clk, input_vol, i;
+ int od8_id;
+ int ret = 0;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ if (!input) {
+ pr_warn("NULL user input for clock and voltage\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id)) {
+ pr_info("Sclk min/max frequency overdrive not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ input_index = input[i];
+ input_clk = input[i + 1];
+
+ if (input_index != 0 && input_index != 1) {
+ pr_info("Invalid index %d\n", input_index);
+ pr_info("Support min/max sclk frequency settingonly which index by 0/1\n");
+ return -EINVAL;
+ }
+
+ if (input_clk < od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].min_value ||
+ input_clk > od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value);
+ return -EINVAL;
+ }
+
+ if (input_index == 0 && od_table->GfxclkFmin != input_clk) {
+ od_table->GfxclkFmin = input_clk;
+ table_context->od_gfxclk_update = true;
+ } else if (input_index == 1 && od_table->GfxclkFmax != input_clk) {
+ od_table->GfxclkFmax = input_clk;
+ table_context->od_gfxclk_update = true;
+ }
+ }
+
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ pr_info("Mclk max frequency overdrive not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n",
+ size);
+ return -EINVAL;
+ }
+
+ input_index = input[i];
+ input_clk = input[i + 1];
+
+ if (input_index != 1) {
+ pr_info("Invalid index %d\n", input_index);
+ pr_info("Support max Mclk frequency setting only which index by 1\n");
+ return -EINVAL;
+ }
+
+ if (input_clk < clocks.data[0].clocks_in_khz / 1000 ||
+ input_clk > od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+ clocks.data[0].clocks_in_khz / 1000,
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value);
+ return -EINVAL;
+ }
+
+ if (input_index == 1 && od_table->UclkFmax != input_clk) {
+ table_context->od_gfxclk_update = true;
+ od_table->UclkFmax = input_clk;
+ }
+ }
+
+ break;
+
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) {
+ pr_info("Voltage curve calibrate not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 3) {
+ if (i + 3 > size) {
+ pr_info("invalid number of input parameters %d\n",
+ size);
+ return -EINVAL;
+ }
+
+ input_index = input[i];
+ input_clk = input[i + 1];
+ input_vol = input[i + 2];
+
+ if (input_index > 2) {
+ pr_info("Setting for point %d is not supported\n",
+ input_index + 1);
+ pr_info("Three supported points index by 0, 1, 2\n");
+ return -EINVAL;
+ }
+
+ od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index;
+ if (input_clk < od8_settings->od8_settings_array[od8_id].min_value ||
+ input_clk > od8_settings->od8_settings_array[od8_id].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+ od8_settings->od8_settings_array[od8_id].min_value,
+ od8_settings->od8_settings_array[od8_id].max_value);
+ return -EINVAL;
+ }
+
+ od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index;
+ if (input_vol < od8_settings->od8_settings_array[od8_id].min_value ||
+ input_vol > od8_settings->od8_settings_array[od8_id].max_value) {
+ pr_info("clock voltage %d is not within allowed range [%d- %d]\n",
+ input_vol,
+ od8_settings->od8_settings_array[od8_id].min_value,
+ od8_settings->od8_settings_array[od8_id].max_value);
+ return -EINVAL;
+ }
+
+ switch (input_index) {
+ case 0:
+ od_table->GfxclkFreq1 = input_clk;
+ od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE;
+ break;
+ case 1:
+ od_table->GfxclkFreq2 = input_clk;
+ od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE;
+ break;
+ case 2:
+ od_table->GfxclkFreq3 = input_clk;
+ od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE;
+ break;
+ }
+ }
+
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export over drive table!\n");
+ return ret;
+ }
+
+ break;
+
+ case PP_OD_COMMIT_DPM_TABLE:
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import over drive table!\n");
+ return ret;
+ }
+
+ /* retrieve updated gfxclk table */
+ if (table_context->od_gfxclk_update) {
+ table_context->od_gfxclk_update = false;
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_GFXCLK);
+ if (ret) {
+ pr_err("[Setoverdrive] failed to refresh dpm table!\n");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+ }
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (type == PP_OD_COMMIT_DPM_TABLE) {
+ mutex_lock(&(smu->mutex));
+ ret = smu_handle_task(smu, smu_dpm->dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+ mutex_unlock(&(smu->mutex));
+ }
+
+ return ret;
+}
+
+static const struct pptable_funcs vega20_ppt_funcs = {
+ .alloc_dpm_context = vega20_allocate_dpm_context,
+ .store_powerplay_table = vega20_store_powerplay_table,
+ .check_powerplay_table = vega20_check_powerplay_table,
+ .append_powerplay_table = vega20_append_powerplay_table,
+ .get_smu_msg_index = vega20_get_smu_msg_index,
+ .run_afll_btc = vega20_run_btc_afll,
+ .get_unallowed_feature_mask = vega20_get_unallowed_feature_mask,
+ .get_current_power_state = vega20_get_current_power_state,
+ .set_default_dpm_table = vega20_set_default_dpm_table,
+ .set_power_state = NULL,
+ .populate_umd_state_clk = vega20_populate_umd_state_clk,
+ .print_clk_levels = vega20_print_clk_levels,
+ .force_clk_levels = vega20_force_clk_levels,
+ .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
+ .set_default_od8_settings = vega20_set_default_od8_setttings,
+ .get_od_percentage = vega20_get_od_percentage,
+ .get_performance_level = vega20_get_performance_level,
+ .force_performance_level = vega20_force_performance_level,
+ .update_specified_od8_value = vega20_update_specified_od8_value,
+ .set_od_percentage = vega20_set_od_percentage,
+ .od_edit_dpm_table = vega20_odn_edit_dpm_table,
+ .pre_display_config_changed = vega20_pre_display_config_changed,
+ .display_config_changed = vega20_display_config_changed,
+ .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
+ .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config,
+ .force_dpm_limit_value = vega20_force_dpm_limit_value,
+ .unforce_dpm_levels = vega20_unforce_dpm_levels,
+ .upload_dpm_level = vega20_upload_dpm_level,
+ .get_profiling_clk_mask = vega20_get_profiling_clk_mask,
+};
+
+void vega20_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &vega20_ppt_funcs;
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.h b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h
new file mode 100644
index 000000000000..5a0d2af63173
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __VEGA20_PPT_H__
+#define __VEGA20_PPT_H__
+
+#define VEGA20_UMD_PSTATE_GFXCLK_LEVEL 0x3
+#define VEGA20_UMD_PSTATE_SOCCLK_LEVEL 0x3
+#define VEGA20_UMD_PSTATE_MCLK_LEVEL 0x2
+#define VEGA20_UMD_PSTATE_UVDCLK_LEVEL 0x3
+#define VEGA20_UMD_PSTATE_VCEMCLK_LEVEL 0x3
+
+#define MAX_REGULAR_DPM_NUMBER 16
+#define MAX_PCIE_CONF 2
+
+#define VOLTAGE_SCALE 4
+#define AVFS_CURVE 0
+#define OD8_HOTCURVE_TEMPERATURE 85
+
+struct vega20_dpm_level {
+ bool enabled;
+ uint32_t value;
+ uint32_t param1;
+};
+
+struct vega20_dpm_state {
+ uint32_t soft_min_level;
+ uint32_t soft_max_level;
+ uint32_t hard_min_level;
+ uint32_t hard_max_level;
+};
+
+struct vega20_single_dpm_table {
+ uint32_t count;
+ struct vega20_dpm_state dpm_state;
+ struct vega20_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct vega20_pcie_table {
+ uint16_t count;
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint32_t lclk[MAX_PCIE_CONF];
+};
+
+struct vega20_dpm_table {
+ struct vega20_single_dpm_table soc_table;
+ struct vega20_single_dpm_table gfx_table;
+ struct vega20_single_dpm_table mem_table;
+ struct vega20_single_dpm_table eclk_table;
+ struct vega20_single_dpm_table vclk_table;
+ struct vega20_single_dpm_table dclk_table;
+ struct vega20_single_dpm_table dcef_table;
+ struct vega20_single_dpm_table pixel_table;
+ struct vega20_single_dpm_table display_table;
+ struct vega20_single_dpm_table phy_table;
+ struct vega20_single_dpm_table fclk_table;
+ struct vega20_pcie_table pcie_table;
+};
+
+enum OD8_FEATURE_ID
+{
+ OD8_GFXCLK_LIMITS = 1 << 0,
+ OD8_GFXCLK_CURVE = 1 << 1,
+ OD8_UCLK_MAX = 1 << 2,
+ OD8_POWER_LIMIT = 1 << 3,
+ OD8_ACOUSTIC_LIMIT_SCLK = 1 << 4, //FanMaximumRpm
+ OD8_FAN_SPEED_MIN = 1 << 5, //FanMinimumPwm
+ OD8_TEMPERATURE_FAN = 1 << 6, //FanTargetTemperature
+ OD8_TEMPERATURE_SYSTEM = 1 << 7, //MaxOpTemp
+ OD8_MEMORY_TIMING_TUNE = 1 << 8,
+ OD8_FAN_ZERO_RPM_CONTROL = 1 << 9
+};
+
+enum OD8_SETTING_ID
+{
+ OD8_SETTING_GFXCLK_FMIN = 0,
+ OD8_SETTING_GFXCLK_FMAX,
+ OD8_SETTING_GFXCLK_FREQ1,
+ OD8_SETTING_GFXCLK_VOLTAGE1,
+ OD8_SETTING_GFXCLK_FREQ2,
+ OD8_SETTING_GFXCLK_VOLTAGE2,
+ OD8_SETTING_GFXCLK_FREQ3,
+ OD8_SETTING_GFXCLK_VOLTAGE3,
+ OD8_SETTING_UCLK_FMAX,
+ OD8_SETTING_POWER_PERCENTAGE,
+ OD8_SETTING_FAN_ACOUSTIC_LIMIT,
+ OD8_SETTING_FAN_MIN_SPEED,
+ OD8_SETTING_FAN_TARGET_TEMP,
+ OD8_SETTING_OPERATING_TEMP_MAX,
+ OD8_SETTING_AC_TIMING,
+ OD8_SETTING_FAN_ZERO_RPM_CONTROL,
+ OD8_SETTING_COUNT
+};
+
+struct vega20_od8_single_setting {
+ uint32_t feature_id;
+ int32_t min_value;
+ int32_t max_value;
+ int32_t current_value;
+ int32_t default_value;
+};
+
+struct vega20_od8_settings {
+ struct vega20_od8_single_setting od8_settings_array[OD8_SETTING_COUNT];
+};
+
+extern void vega20_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h
index b35fc5db866b..1053b11352eb 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_product.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_product.h
@@ -20,4 +20,16 @@
/* Mali-display product IDs */
#define MALIDP_D71_PRODUCT_ID 0x0071
+union komeda_config_id {
+ struct {
+ __u32 max_line_sz:16,
+ n_pipelines:2,
+ n_scalers:2, /* number of scalers per pipeline */
+ n_layers:3, /* number of layers per pipeline */
+ n_richs:3, /* number of rich layers per pipeline */
+ reserved_bits:6;
+ };
+ __u32 value;
+};
+
#endif /* _MALIDP_PRODUCT_H_ */
diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
index 63cc47cefcf8..8cfd91196e15 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
@@ -7,10 +7,41 @@
#ifndef _MALIDP_UTILS_
#define _MALIDP_UTILS_
+#include <linux/delay.h>
+
#define has_bit(nr, mask) (BIT(nr) & (mask))
#define has_bits(bits, mask) (((bits) & (mask)) == (bits))
#define dp_for_each_set_bit(bit, mask) \
for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
+#define dp_wait_cond(__cond, __tries, __min_range, __max_range) \
+({ \
+ int num_tries = __tries; \
+ while (!__cond && (num_tries > 0)) { \
+ usleep_range(__min_range, __max_range); \
+ if (__cond) \
+ break; \
+ num_tries--; \
+ } \
+ num_tries; \
+})
+
+/* the restriction of range is [start, end] */
+struct malidp_range {
+ u32 start;
+ u32 end;
+};
+
+static inline void set_range(struct malidp_range *rg, u32 start, u32 end)
+{
+ rg->start = start;
+ rg->end = end;
+}
+
+static inline bool in_range(struct malidp_range *rg, u32 v)
+{
+ return (v >= rg->start) && (v <= rg->end);
+}
+
#endif /* _MALIDP_UTILS_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index 1b875e5dc0f6..412eeba8c39f 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -1,14 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := \
- -I$(src)/../include \
- -I$(src)
+ -I $(srctree)/$(src)/../include \
+ -I $(srctree)/$(src)
komeda-y := \
komeda_drv.o \
komeda_dev.o \
komeda_format_caps.o \
komeda_pipeline.o \
+ komeda_pipeline_state.o \
komeda_framebuffer.o \
komeda_kms.o \
komeda_crtc.o \
@@ -16,6 +17,7 @@ komeda-y := \
komeda_private_obj.o
komeda-y += \
- d71/d71_dev.o
+ d71/d71_dev.o \
+ d71/d71_component.o
obj-$(CONFIG_DRM_KOMEDA) += komeda.o
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
new file mode 100644
index 000000000000..031e5f305a3c
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+
+#include <drm/drm_print.h>
+#include "d71_dev.h"
+#include "komeda_kms.h"
+#include "malidp_io.h"
+#include "komeda_framebuffer.h"
+
+static void get_resources_id(u32 hw_id, u32 *pipe_id, u32 *comp_id)
+{
+ u32 id = BLOCK_INFO_BLK_ID(hw_id);
+ u32 pipe = id;
+
+ switch (BLOCK_INFO_BLK_TYPE(hw_id)) {
+ case D71_BLK_TYPE_LPU_WB_LAYER:
+ id = KOMEDA_COMPONENT_WB_LAYER;
+ break;
+ case D71_BLK_TYPE_CU_SPLITTER:
+ id = KOMEDA_COMPONENT_SPLITTER;
+ break;
+ case D71_BLK_TYPE_CU_SCALER:
+ pipe = id / D71_PIPELINE_MAX_SCALERS;
+ id %= D71_PIPELINE_MAX_SCALERS;
+ id += KOMEDA_COMPONENT_SCALER0;
+ break;
+ case D71_BLK_TYPE_CU:
+ id += KOMEDA_COMPONENT_COMPIZ0;
+ break;
+ case D71_BLK_TYPE_LPU_LAYER:
+ pipe = id / D71_PIPELINE_MAX_LAYERS;
+ id %= D71_PIPELINE_MAX_LAYERS;
+ id += KOMEDA_COMPONENT_LAYER0;
+ break;
+ case D71_BLK_TYPE_DOU_IPS:
+ id += KOMEDA_COMPONENT_IPS0;
+ break;
+ case D71_BLK_TYPE_CU_MERGER:
+ id = KOMEDA_COMPONENT_MERGER;
+ break;
+ case D71_BLK_TYPE_DOU:
+ id = KOMEDA_COMPONENT_TIMING_CTRLR;
+ break;
+ default:
+ id = 0xFFFFFFFF;
+ }
+
+ if (comp_id)
+ *comp_id = id;
+
+ if (pipe_id)
+ *pipe_id = pipe;
+}
+
+static u32 get_valid_inputs(struct block_header *blk)
+{
+ u32 valid_inputs = 0, comp_id;
+ int i;
+
+ for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++) {
+ get_resources_id(blk->input_ids[i], NULL, &comp_id);
+ if (comp_id == 0xFFFFFFFF)
+ continue;
+ valid_inputs |= BIT(comp_id);
+ }
+
+ return valid_inputs;
+}
+
+static void get_values_from_reg(void __iomem *reg, u32 offset,
+ u32 count, u32 *val)
+{
+ u32 i, addr;
+
+ for (i = 0; i < count; i++) {
+ addr = offset + (i << 2);
+ /* 0xA4 is WO register */
+ if (addr != 0xA4)
+ val[i] = malidp_read32(reg, addr);
+ else
+ val[i] = 0xDEADDEAD;
+ }
+}
+
+static void dump_block_header(struct seq_file *sf, void __iomem *reg)
+{
+ struct block_header hdr;
+ u32 i, n_input, n_output;
+
+ d71_read_block_header(reg, &hdr);
+ seq_printf(sf, "BLOCK_INFO:\t\t0x%X\n", hdr.block_info);
+ seq_printf(sf, "PIPELINE_INFO:\t\t0x%X\n", hdr.pipeline_info);
+
+ n_output = PIPELINE_INFO_N_OUTPUTS(hdr.pipeline_info);
+ n_input = PIPELINE_INFO_N_VALID_INPUTS(hdr.pipeline_info);
+
+ for (i = 0; i < n_input; i++)
+ seq_printf(sf, "VALID_INPUT_ID%u:\t0x%X\n",
+ i, hdr.input_ids[i]);
+
+ for (i = 0; i < n_output; i++)
+ seq_printf(sf, "OUTPUT_ID%u:\t\t0x%X\n",
+ i, hdr.output_ids[i]);
+}
+
+static u32 to_rot_ctrl(u32 rot)
+{
+ u32 lr_ctrl = 0;
+
+ switch (rot & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ lr_ctrl |= L_ROT(L_ROT_R0);
+ break;
+ case DRM_MODE_ROTATE_90:
+ lr_ctrl |= L_ROT(L_ROT_R90);
+ break;
+ case DRM_MODE_ROTATE_180:
+ lr_ctrl |= L_ROT(L_ROT_R180);
+ break;
+ case DRM_MODE_ROTATE_270:
+ lr_ctrl |= L_ROT(L_ROT_R270);
+ break;
+ }
+
+ if (rot & DRM_MODE_REFLECT_X)
+ lr_ctrl |= L_HFLIP;
+ if (rot & DRM_MODE_REFLECT_Y)
+ lr_ctrl |= L_VFLIP;
+
+ return lr_ctrl;
+}
+
+static inline u32 to_d71_input_id(struct komeda_component_output *output)
+{
+ struct komeda_component *comp = output->component;
+
+ return comp ? (comp->hw_id + output->output_port) : 0;
+}
+
+static void d71_layer_disable(struct komeda_component *c)
+{
+ malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0);
+}
+
+static void d71_layer_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(state);
+ struct drm_plane_state *plane_st = state->plane->state;
+ struct drm_framebuffer *fb = plane_st->fb;
+ struct komeda_fb *kfb = to_kfb(fb);
+ u32 __iomem *reg = c->reg;
+ u32 ctrl_mask = L_EN | L_ROT(L_ROT_R270) | L_HFLIP | L_VFLIP | L_TBU_EN;
+ u32 ctrl = L_EN | to_rot_ctrl(st->rot);
+ int i;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ malidp_write32(reg,
+ BLK_P0_PTR_LOW + i * LAYER_PER_PLANE_REGS * 4,
+ lower_32_bits(st->addr[i]));
+ malidp_write32(reg,
+ BLK_P0_PTR_HIGH + i * LAYER_PER_PLANE_REGS * 4,
+ upper_32_bits(st->addr[i]));
+ if (i >= 2)
+ break;
+
+ malidp_write32(reg,
+ BLK_P0_STRIDE + i * LAYER_PER_PLANE_REGS * 4,
+ fb->pitches[i] & 0xFFFF);
+ }
+
+ malidp_write32(reg, LAYER_FMT, kfb->format_caps->hw_id);
+ malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize));
+
+ malidp_write32_mask(reg, BLK_CONTROL, ctrl_mask, ctrl);
+}
+
+static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
+{
+ u32 v[15], i;
+ bool rich, rgb2rgb;
+ char *prefix;
+
+ get_values_from_reg(c->reg, LAYER_INFO, 1, &v[14]);
+ if (v[14] & 0x1) {
+ rich = true;
+ prefix = "LR_";
+ } else {
+ rich = false;
+ prefix = "LS_";
+ }
+
+ rgb2rgb = !!(v[14] & L_INFO_CM);
+
+ dump_block_header(sf, c->reg);
+
+ seq_printf(sf, "%sLAYER_INFO:\t\t0x%X\n", prefix, v[14]);
+
+ get_values_from_reg(c->reg, 0xD0, 1, v);
+ seq_printf(sf, "%sCONTROL:\t\t0x%X\n", prefix, v[0]);
+ if (rich) {
+ get_values_from_reg(c->reg, 0xD4, 1, v);
+ seq_printf(sf, "LR_RICH_CONTROL:\t0x%X\n", v[0]);
+ }
+ get_values_from_reg(c->reg, 0xD8, 4, v);
+ seq_printf(sf, "%sFORMAT:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sIT_COEFFTAB:\t\t0x%X\n", prefix, v[1]);
+ seq_printf(sf, "%sIN_SIZE:\t\t0x%X\n", prefix, v[2]);
+ seq_printf(sf, "%sPALPHA:\t\t0x%X\n", prefix, v[3]);
+
+ get_values_from_reg(c->reg, 0x100, 3, v);
+ seq_printf(sf, "%sP0_PTR_LOW:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sP0_PTR_HIGH:\t\t0x%X\n", prefix, v[1]);
+ seq_printf(sf, "%sP0_STRIDE:\t\t0x%X\n", prefix, v[2]);
+
+ get_values_from_reg(c->reg, 0x110, 2, v);
+ seq_printf(sf, "%sP1_PTR_LOW:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sP1_PTR_HIGH:\t\t0x%X\n", prefix, v[1]);
+ if (rich) {
+ get_values_from_reg(c->reg, 0x118, 1, v);
+ seq_printf(sf, "LR_P1_STRIDE:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(c->reg, 0x120, 2, v);
+ seq_printf(sf, "LR_P2_PTR_LOW:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "LR_P2_PTR_HIGH:\t\t0x%X\n", v[1]);
+
+ get_values_from_reg(c->reg, 0x130, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "LR_YUV_RGB_COEFF%u:\t0x%X\n", i, v[i]);
+ }
+
+ if (rgb2rgb) {
+ get_values_from_reg(c->reg, LAYER_RGB_RGB_COEFF0, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "LS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]);
+ }
+
+ get_values_from_reg(c->reg, 0x160, 3, v);
+ seq_printf(sf, "%sAD_CONTROL:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sAD_H_CROP:\t\t0x%X\n", prefix, v[1]);
+ seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
+}
+
+static struct komeda_component_funcs d71_layer_funcs = {
+ .update = d71_layer_update,
+ .disable = d71_layer_disable,
+ .dump_register = d71_layer_dump,
+};
+
+static int d71_layer_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_layer *layer;
+ u32 pipe_id, layer_id, layer_info;
+
+ get_resources_id(blk->block_info, &pipe_id, &layer_id);
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*layer),
+ layer_id,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_layer_funcs, 0,
+ get_valid_inputs(blk),
+ 1, reg, "LPU%d_LAYER%d", pipe_id, layer_id);
+ if (IS_ERR(c)) {
+ DRM_ERROR("Failed to add layer component\n");
+ return PTR_ERR(c);
+ }
+
+ layer = to_layer(c);
+ layer_info = malidp_read32(reg, LAYER_INFO);
+
+ if (layer_info & L_INFO_RF)
+ layer->layer_type = KOMEDA_FMT_RICH_LAYER;
+ else
+ layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER;
+
+ set_range(&layer->hsize_in, 4, d71->max_line_size);
+ set_range(&layer->vsize_in, 4, d71->max_vsize);
+
+ malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP);
+
+ layer->supported_rots = DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK;
+
+ return 0;
+}
+
+static int d71_wb_layer_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ DRM_DEBUG("Detect D71_Wb_Layer.\n");
+
+ return 0;
+}
+
+static void d71_component_disable(struct komeda_component *c)
+{
+ u32 __iomem *reg = c->reg;
+ u32 i;
+
+ malidp_write32(reg, BLK_CONTROL, 0);
+
+ for (i = 0; i < c->max_active_inputs; i++)
+ malidp_write32(reg, BLK_INPUT_ID0 + (i << 2), 0);
+}
+
+static void compiz_enable_input(u32 __iomem *id_reg,
+ u32 __iomem *cfg_reg,
+ u32 input_hw_id,
+ struct komeda_compiz_input_cfg *cin)
+{
+ u32 ctrl = CU_INPUT_CTRL_EN;
+ u8 blend = cin->pixel_blend_mode;
+
+ if (blend == DRM_MODE_BLEND_PIXEL_NONE)
+ ctrl |= CU_INPUT_CTRL_PAD;
+ else if (blend == DRM_MODE_BLEND_PREMULTI)
+ ctrl |= CU_INPUT_CTRL_PMUL;
+
+ ctrl |= CU_INPUT_CTRL_ALPHA(cin->layer_alpha);
+
+ malidp_write32(id_reg, BLK_INPUT_ID0, input_hw_id);
+
+ malidp_write32(cfg_reg, CU_INPUT0_SIZE,
+ HV_SIZE(cin->hsize, cin->vsize));
+ malidp_write32(cfg_reg, CU_INPUT0_OFFSET,
+ HV_OFFSET(cin->hoffset, cin->voffset));
+ malidp_write32(cfg_reg, CU_INPUT0_CONTROL, ctrl);
+}
+
+static void d71_compiz_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_compiz_state *st = to_compiz_st(state);
+ u32 __iomem *reg = c->reg;
+ u32 __iomem *id_reg, *cfg_reg;
+ u32 index, input_hw_id;
+
+ for_each_changed_input(state, index) {
+ id_reg = reg + index;
+ cfg_reg = reg + index * CU_PER_INPUT_REGS;
+ input_hw_id = to_d71_input_id(&state->inputs[index]);
+ if (state->active_inputs & BIT(index)) {
+ compiz_enable_input(id_reg, cfg_reg,
+ input_hw_id, &st->cins[index]);
+ } else {
+ malidp_write32(id_reg, BLK_INPUT_ID0, 0);
+ malidp_write32(cfg_reg, CU_INPUT0_CONTROL, 0);
+ }
+ }
+
+ malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+}
+
+static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf)
+{
+ u32 v[8], i;
+
+ dump_block_header(sf, c->reg);
+
+ get_values_from_reg(c->reg, 0x80, 5, v);
+ for (i = 0; i < 5; i++)
+ seq_printf(sf, "CU_INPUT_ID%u:\t\t0x%X\n", i, v[i]);
+
+ get_values_from_reg(c->reg, 0xA0, 5, v);
+ seq_printf(sf, "CU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "CU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "CU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "CU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "CU_STATUS:\t\t0x%X\n", v[4]);
+
+ get_values_from_reg(c->reg, 0xD0, 2, v);
+ seq_printf(sf, "CU_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "CU_SIZE:\t\t0x%X\n", v[1]);
+
+ get_values_from_reg(c->reg, 0xDC, 1, v);
+ seq_printf(sf, "CU_BG_COLOR:\t\t0x%X\n", v[0]);
+
+ for (i = 0, v[4] = 0xE0; i < 5; i++, v[4] += 0x10) {
+ get_values_from_reg(c->reg, v[4], 3, v);
+ seq_printf(sf, "CU_INPUT%u_SIZE:\t\t0x%X\n", i, v[0]);
+ seq_printf(sf, "CU_INPUT%u_OFFSET:\t0x%X\n", i, v[1]);
+ seq_printf(sf, "CU_INPUT%u_CONTROL:\t0x%X\n", i, v[2]);
+ }
+
+ get_values_from_reg(c->reg, 0x130, 2, v);
+ seq_printf(sf, "CU_USER_LOW:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]);
+}
+
+static struct komeda_component_funcs d71_compiz_funcs = {
+ .update = d71_compiz_update,
+ .disable = d71_component_disable,
+ .dump_register = d71_compiz_dump,
+};
+
+static int d71_compiz_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_compiz *compiz;
+ u32 pipe_id, comp_id;
+
+ get_resources_id(blk->block_info, &pipe_id, &comp_id);
+
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*compiz),
+ comp_id,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_compiz_funcs,
+ CU_NUM_INPUT_IDS, get_valid_inputs(blk),
+ CU_NUM_OUTPUT_IDS, reg,
+ "CU%d", pipe_id);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ compiz = to_compiz(c);
+
+ set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size);
+ set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+
+ return 0;
+}
+
+static void d71_improc_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_improc_state *st = to_improc_st(state);
+ u32 __iomem *reg = c->reg;
+ u32 index, input_hw_id;
+
+ for_each_changed_input(state, index) {
+ input_hw_id = state->active_inputs & BIT(index) ?
+ to_d71_input_id(&state->inputs[index]) : 0;
+ malidp_write32(reg, BLK_INPUT_ID0 + index * 4, input_hw_id);
+ }
+
+ malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+}
+
+static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
+{
+ u32 v[12], i;
+
+ dump_block_header(sf, c->reg);
+
+ get_values_from_reg(c->reg, 0x80, 2, v);
+ seq_printf(sf, "IPS_INPUT_ID0:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "IPS_INPUT_ID1:\t\t0x%X\n", v[1]);
+
+ get_values_from_reg(c->reg, 0xC0, 1, v);
+ seq_printf(sf, "IPS_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(c->reg, 0xD0, 3, v);
+ seq_printf(sf, "IPS_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "IPS_SIZE:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "IPS_DEPTH:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(c->reg, 0x130, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "IPS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]);
+
+ get_values_from_reg(c->reg, 0x170, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
+}
+
+static struct komeda_component_funcs d71_improc_funcs = {
+ .update = d71_improc_update,
+ .disable = d71_component_disable,
+ .dump_register = d71_improc_dump,
+};
+
+static int d71_improc_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_improc *improc;
+ u32 pipe_id, comp_id, value;
+
+ get_resources_id(blk->block_info, &pipe_id, &comp_id);
+
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*improc),
+ comp_id,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_improc_funcs, IPS_NUM_INPUT_IDS,
+ get_valid_inputs(blk),
+ IPS_NUM_OUTPUT_IDS, reg, "DOU%d_IPS", pipe_id);
+ if (IS_ERR(c)) {
+ DRM_ERROR("Failed to add improc component\n");
+ return PTR_ERR(c);
+ }
+
+ improc = to_improc(c);
+ improc->supported_color_depths = BIT(8) | BIT(10);
+ improc->supported_color_formats = DRM_COLOR_FORMAT_RGB444 |
+ DRM_COLOR_FORMAT_YCRCB444 |
+ DRM_COLOR_FORMAT_YCRCB422;
+ value = malidp_read32(reg, BLK_INFO);
+ if (value & IPS_INFO_CHD420)
+ improc->supported_color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+
+ improc->supports_csc = true;
+ improc->supports_gamma = true;
+
+ return 0;
+}
+
+static void d71_timing_ctrlr_disable(struct komeda_component *c)
+{
+ malidp_write32_mask(c->reg, BLK_CONTROL, BS_CTRL_EN, 0);
+}
+
+static void d71_timing_ctrlr_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct drm_crtc_state *crtc_st = state->crtc->state;
+ u32 __iomem *reg = c->reg;
+ struct videomode vm;
+ u32 value;
+
+ drm_display_mode_to_videomode(&crtc_st->adjusted_mode, &vm);
+
+ malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(vm.hactive, vm.vactive));
+ malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(vm.hfront_porch,
+ vm.hback_porch));
+ malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vm.vfront_porch,
+ vm.vback_porch));
+
+ value = BS_SYNC_VSW(vm.vsync_len) | BS_SYNC_HSW(vm.hsync_len);
+ value |= vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ? BS_SYNC_VSP : 0;
+ value |= vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ? BS_SYNC_HSP : 0;
+ malidp_write32(reg, BS_SYNC, value);
+
+ malidp_write32(reg, BS_PROG_LINE, D71_DEFAULT_PREPRETCH_LINE - 1);
+ malidp_write32(reg, BS_PREFETCH_LINE, D71_DEFAULT_PREPRETCH_LINE);
+
+ /* configure bs control register */
+ value = BS_CTRL_EN | BS_CTRL_VM;
+
+ malidp_write32(reg, BLK_CONTROL, value);
+}
+
+static void d71_timing_ctrlr_dump(struct komeda_component *c,
+ struct seq_file *sf)
+{
+ u32 v[8], i;
+
+ dump_block_header(sf, c->reg);
+
+ get_values_from_reg(c->reg, 0xC0, 1, v);
+ seq_printf(sf, "BS_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(c->reg, 0xD0, 8, v);
+ seq_printf(sf, "BS_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "BS_PROG_LINE:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "BS_PREFETCH_LINE:\t0x%X\n", v[2]);
+ seq_printf(sf, "BS_BG_COLOR:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "BS_ACTIVESIZE:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "BS_HINTERVALS:\t\t0x%X\n", v[5]);
+ seq_printf(sf, "BS_VINTERVALS:\t\t0x%X\n", v[6]);
+ seq_printf(sf, "BS_SYNC:\t\t0x%X\n", v[7]);
+
+ get_values_from_reg(c->reg, 0x100, 3, v);
+ seq_printf(sf, "BS_DRIFT_TO:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "BS_FRAME_TO:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "BS_TE_TO:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(c->reg, 0x110, 3, v);
+ for (i = 0; i < 3; i++)
+ seq_printf(sf, "BS_T%u_INTERVAL:\t\t0x%X\n", i, v[i]);
+
+ get_values_from_reg(c->reg, 0x120, 5, v);
+ for (i = 0; i < 2; i++) {
+ seq_printf(sf, "BS_CRC%u_LOW:\t\t0x%X\n", i, v[i << 1]);
+ seq_printf(sf, "BS_CRC%u_HIGH:\t\t0x%X\n", i, v[(i << 1) + 1]);
+ }
+ seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]);
+}
+
+static struct komeda_component_funcs d71_timing_ctrlr_funcs = {
+ .update = d71_timing_ctrlr_update,
+ .disable = d71_timing_ctrlr_disable,
+ .dump_register = d71_timing_ctrlr_dump,
+};
+
+static int d71_timing_ctrlr_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_timing_ctrlr *ctrlr;
+ u32 pipe_id, comp_id;
+
+ get_resources_id(blk->block_info, &pipe_id, &comp_id);
+
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*ctrlr),
+ KOMEDA_COMPONENT_TIMING_CTRLR,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_timing_ctrlr_funcs,
+ 1, BIT(KOMEDA_COMPONENT_IPS0 + pipe_id),
+ BS_NUM_OUTPUT_IDS, reg, "DOU%d_BS", pipe_id);
+ if (IS_ERR(c)) {
+ DRM_ERROR("Failed to add display_ctrl component\n");
+ return PTR_ERR(c);
+ }
+
+ ctrlr = to_ctrlr(c);
+
+ ctrlr->supports_dual_link = true;
+
+ return 0;
+}
+
+int d71_probe_block(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct d71_pipeline *pipe;
+ int blk_id = BLOCK_INFO_BLK_ID(blk->block_info);
+
+ int err = 0;
+
+ switch (BLOCK_INFO_BLK_TYPE(blk->block_info)) {
+ case D71_BLK_TYPE_GCU:
+ break;
+
+ case D71_BLK_TYPE_LPU:
+ pipe = d71->pipes[blk_id];
+ pipe->lpu_addr = reg;
+ break;
+
+ case D71_BLK_TYPE_LPU_LAYER:
+ err = d71_layer_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_LPU_WB_LAYER:
+ err = d71_wb_layer_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_CU:
+ pipe = d71->pipes[blk_id];
+ pipe->cu_addr = reg;
+ err = d71_compiz_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_CU_SPLITTER:
+ case D71_BLK_TYPE_CU_SCALER:
+ case D71_BLK_TYPE_CU_MERGER:
+ break;
+
+ case D71_BLK_TYPE_DOU:
+ pipe = d71->pipes[blk_id];
+ pipe->dou_addr = reg;
+ break;
+
+ case D71_BLK_TYPE_DOU_IPS:
+ err = d71_improc_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_DOU_FT_COEFF:
+ pipe = d71->pipes[blk_id];
+ pipe->dou_ft_coeff_addr = reg;
+ break;
+
+ case D71_BLK_TYPE_DOU_BS:
+ err = d71_timing_ctrlr_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_GLB_LT_COEFF:
+ break;
+
+ case D71_BLK_TYPE_GLB_SCL_COEFF:
+ d71->glb_scl_coeff_addr[blk_id] = reg;
+ break;
+
+ default:
+ DRM_ERROR("Unknown block (block_info: 0x%x) is found\n",
+ blk->block_info);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index edbf9daa1545..34506ef7ad40 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -4,13 +4,425 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
+
+#include <drm/drm_print.h>
+#include "d71_dev.h"
#include "malidp_io.h"
-#include "komeda_dev.h"
+
+static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
+{
+ u32 __iomem *reg = d71_pipeline->lpu_addr;
+ u32 status, raw_status;
+ u64 evts = 0ULL;
+
+ raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
+ if (raw_status & LPU_IRQ_IBSY)
+ evts |= KOMEDA_EVENT_IBSY;
+ if (raw_status & LPU_IRQ_EOW)
+ evts |= KOMEDA_EVENT_EOW;
+
+ if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY)) {
+ u32 restore = 0, tbu_status;
+ /* Check error of LPU status */
+ status = malidp_read32(reg, BLK_STATUS);
+ if (status & LPU_STATUS_AXIE) {
+ restore |= LPU_STATUS_AXIE;
+ evts |= KOMEDA_ERR_AXIE;
+ }
+ if (status & LPU_STATUS_ACE0) {
+ restore |= LPU_STATUS_ACE0;
+ evts |= KOMEDA_ERR_ACE0;
+ }
+ if (status & LPU_STATUS_ACE1) {
+ restore |= LPU_STATUS_ACE1;
+ evts |= KOMEDA_ERR_ACE1;
+ }
+ if (status & LPU_STATUS_ACE2) {
+ restore |= LPU_STATUS_ACE2;
+ evts |= KOMEDA_ERR_ACE2;
+ }
+ if (status & LPU_STATUS_ACE3) {
+ restore |= LPU_STATUS_ACE3;
+ evts |= KOMEDA_ERR_ACE3;
+ }
+ if (restore != 0)
+ malidp_write32_mask(reg, BLK_STATUS, restore, 0);
+
+ restore = 0;
+ /* Check errors of TBU status */
+ tbu_status = malidp_read32(reg, LPU_TBU_STATUS);
+ if (tbu_status & LPU_TBU_STATUS_TCF) {
+ restore |= LPU_TBU_STATUS_TCF;
+ evts |= KOMEDA_ERR_TCF;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TTNG) {
+ restore |= LPU_TBU_STATUS_TTNG;
+ evts |= KOMEDA_ERR_TTNG;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TITR) {
+ restore |= LPU_TBU_STATUS_TITR;
+ evts |= KOMEDA_ERR_TITR;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TEMR) {
+ restore |= LPU_TBU_STATUS_TEMR;
+ evts |= KOMEDA_ERR_TEMR;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TTF) {
+ restore |= LPU_TBU_STATUS_TTF;
+ evts |= KOMEDA_ERR_TTF;
+ }
+ if (restore != 0)
+ malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0);
+ }
+
+ malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
+ return evts;
+}
+
+static u64 get_cu_event(struct d71_pipeline *d71_pipeline)
+{
+ u32 __iomem *reg = d71_pipeline->cu_addr;
+ u32 status, raw_status;
+ u64 evts = 0ULL;
+
+ raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
+ if (raw_status & CU_IRQ_OVR)
+ evts |= KOMEDA_EVENT_OVR;
+
+ if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) {
+ status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF;
+ if (status & CU_STATUS_CPE)
+ evts |= KOMEDA_ERR_CPE;
+ if (status & CU_STATUS_ZME)
+ evts |= KOMEDA_ERR_ZME;
+ if (status & CU_STATUS_CFGE)
+ evts |= KOMEDA_ERR_CFGE;
+ if (status)
+ malidp_write32_mask(reg, BLK_STATUS, status, 0);
+ }
+
+ malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
+
+ return evts;
+}
+
+static u64 get_dou_event(struct d71_pipeline *d71_pipeline)
+{
+ u32 __iomem *reg = d71_pipeline->dou_addr;
+ u32 status, raw_status;
+ u64 evts = 0ULL;
+
+ raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
+ if (raw_status & DOU_IRQ_PL0)
+ evts |= KOMEDA_EVENT_VSYNC;
+ if (raw_status & DOU_IRQ_UND)
+ evts |= KOMEDA_EVENT_URUN;
+
+ if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) {
+ u32 restore = 0;
+
+ status = malidp_read32(reg, BLK_STATUS);
+ if (status & DOU_STATUS_DRIFTTO) {
+ restore |= DOU_STATUS_DRIFTTO;
+ evts |= KOMEDA_ERR_DRIFTTO;
+ }
+ if (status & DOU_STATUS_FRAMETO) {
+ restore |= DOU_STATUS_FRAMETO;
+ evts |= KOMEDA_ERR_FRAMETO;
+ }
+ if (status & DOU_STATUS_TETO) {
+ restore |= DOU_STATUS_TETO;
+ evts |= KOMEDA_ERR_TETO;
+ }
+ if (status & DOU_STATUS_CSCE) {
+ restore |= DOU_STATUS_CSCE;
+ evts |= KOMEDA_ERR_CSCE;
+ }
+
+ if (restore != 0)
+ malidp_write32_mask(reg, BLK_STATUS, restore, 0);
+ }
+
+ malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
+ return evts;
+}
+
+static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status)
+{
+ u32 evts = 0ULL;
+
+ if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1))
+ evts |= get_lpu_event(d71_pipeline);
+
+ if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1))
+ evts |= get_cu_event(d71_pipeline);
+
+ if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1))
+ evts |= get_dou_event(d71_pipeline);
+
+ return evts;
+}
+
+static irqreturn_t
+d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ u32 status, gcu_status, raw_status;
+
+ gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS);
+
+ if (gcu_status & GLB_IRQ_STATUS_GCU) {
+ raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS);
+ if (raw_status & GCU_IRQ_CVAL0)
+ evts->pipes[0] |= KOMEDA_EVENT_FLIP;
+ if (raw_status & GCU_IRQ_CVAL1)
+ evts->pipes[1] |= KOMEDA_EVENT_FLIP;
+ if (raw_status & GCU_IRQ_ERR) {
+ status = malidp_read32(d71->gcu_addr, BLK_STATUS);
+ if (status & GCU_STATUS_MERR) {
+ evts->global |= KOMEDA_ERR_MERR;
+ malidp_write32_mask(d71->gcu_addr, BLK_STATUS,
+ GCU_STATUS_MERR, 0);
+ }
+ }
+
+ malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status);
+ }
+
+ if (gcu_status & GLB_IRQ_STATUS_PIPE0)
+ evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status);
+
+ if (gcu_status & GLB_IRQ_STATUS_PIPE1)
+ evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
+
+ return gcu_status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+#define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
+ GCU_IRQ_MODE | GCU_IRQ_ERR)
+#define ENABLED_LPU_IRQS (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW)
+#define ENABLED_CU_IRQS (CU_IRQ_OVR | CU_IRQ_ERR)
+#define ENABLED_DOU_IRQS (DOU_IRQ_UND | DOU_IRQ_ERR)
+
+static int d71_enable_irq(struct komeda_dev *mdev)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ struct d71_pipeline *pipe;
+ u32 i;
+
+ malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK,
+ ENABLED_GCU_IRQS, ENABLED_GCU_IRQS);
+ for (i = 0; i < d71->num_pipelines; i++) {
+ pipe = d71->pipes[i];
+ malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
+ ENABLED_CU_IRQS, ENABLED_CU_IRQS);
+ malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
+ ENABLED_LPU_IRQS, ENABLED_LPU_IRQS);
+ malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
+ ENABLED_DOU_IRQS, ENABLED_DOU_IRQS);
+ }
+ return 0;
+}
+
+static int d71_disable_irq(struct komeda_dev *mdev)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ struct d71_pipeline *pipe;
+ u32 i;
+
+ malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0);
+ for (i = 0; i < d71->num_pipelines; i++) {
+ pipe = d71->pipes[i];
+ malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
+ ENABLED_CU_IRQS, 0);
+ malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
+ ENABLED_LPU_IRQS, 0);
+ malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
+ ENABLED_DOU_IRQS, 0);
+ }
+ return 0;
+}
+
+static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ struct d71_pipeline *pipe = d71->pipes[master_pipe];
+
+ malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
+ DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0);
+}
+
+static int to_d71_opmode(int core_mode)
+{
+ switch (core_mode) {
+ case KOMEDA_MODE_DISP0:
+ return DO0_ACTIVE_MODE;
+ case KOMEDA_MODE_DISP1:
+ return DO1_ACTIVE_MODE;
+ case KOMEDA_MODE_DUAL_DISP:
+ return DO01_ACTIVE_MODE;
+ case KOMEDA_MODE_INACTIVE:
+ return INACTIVE_MODE;
+ default:
+ WARN(1, "Unknown operation mode");
+ return INACTIVE_MODE;
+ }
+}
+
+static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ u32 opmode = to_d71_opmode(new_mode);
+ int ret;
+
+ malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode);
+
+ ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
+ 100, 1000, 10000);
+
+ return ret > 0 ? 0 : -ETIMEDOUT;
+}
+
+static void d71_flush(struct komeda_dev *mdev,
+ int master_pipe, u32 active_pipes)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ u32 reg_offset = (master_pipe == 0) ?
+ GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1;
+
+ malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL);
+}
+
+static int d71_reset(struct d71_dev *d71)
+{
+ u32 __iomem *gcu = d71->gcu_addr;
+ int ret;
+
+ malidp_write32_mask(gcu, BLK_CONTROL,
+ GCU_CONTROL_SRST, GCU_CONTROL_SRST);
+
+ ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
+ 100, 1000, 10000);
+
+ return ret > 0 ? 0 : -ETIMEDOUT;
+}
+
+void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
+{
+ int i;
+
+ blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO);
+ if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED)
+ return;
+
+ blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO);
+
+ /* get valid input and output ids */
+ for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++)
+ blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0);
+ for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++)
+ blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0);
+}
+
+static void d71_cleanup(struct komeda_dev *mdev)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+
+ if (!d71)
+ return;
+
+ devm_kfree(mdev->dev, d71);
+ mdev->chip_data = NULL;
+}
static int d71_enum_resources(struct komeda_dev *mdev)
{
- /* TODO add enum resources */
- return -1;
+ struct d71_dev *d71;
+ struct komeda_pipeline *pipe;
+ struct block_header blk;
+ u32 __iomem *blk_base;
+ u32 i, value, offset;
+ int err;
+
+ d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
+ if (!d71)
+ return -ENOMEM;
+
+ mdev->chip_data = d71;
+ d71->mdev = mdev;
+ d71->gcu_addr = mdev->reg_base;
+ d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
+
+ err = d71_reset(d71);
+ if (err) {
+ DRM_ERROR("Fail to reset d71 device.\n");
+ goto err_cleanup;
+ }
+
+ /* probe GCU */
+ value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO);
+ d71->num_blocks = value & 0xFF;
+ d71->num_pipelines = (value >> 8) & 0x7;
+
+ if (d71->num_pipelines > D71_MAX_PIPELINE) {
+ DRM_ERROR("d71 supports %d pipelines, but got: %d.\n",
+ D71_MAX_PIPELINE, d71->num_pipelines);
+ err = -EINVAL;
+ goto err_cleanup;
+ }
+
+ /* probe PERIPH */
+ value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
+ if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) {
+ DRM_ERROR("access blk periph but got blk: %d.\n",
+ BLOCK_INFO_BLK_TYPE(value));
+ err = -EINVAL;
+ goto err_cleanup;
+ }
+
+ value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
+
+ d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
+ d71->max_vsize = 4096;
+ d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
+ d71->supports_dual_link = value & PERIPH_SPLIT_EN ? true : false;
+ d71->integrates_tbu = value & PERIPH_TBU_EN ? true : false;
+
+ for (i = 0; i < d71->num_pipelines; i++) {
+ pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
+ NULL);
+ if (IS_ERR(pipe)) {
+ err = PTR_ERR(pipe);
+ goto err_cleanup;
+ }
+ d71->pipes[i] = to_d71_pipeline(pipe);
+ }
+
+ /* loop the register blks and probe */
+ i = 2; /* exclude GCU and PERIPH */
+ offset = D71_BLOCK_SIZE; /* skip GCU */
+ while (i < d71->num_blocks) {
+ blk_base = mdev->reg_base + (offset >> 2);
+
+ d71_read_block_header(blk_base, &blk);
+ if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) {
+ err = d71_probe_block(d71, &blk, blk_base);
+ if (err)
+ goto err_cleanup;
+ i++;
+ }
+
+ offset += D71_BLOCK_SIZE;
+ }
+
+ DRM_DEBUG("total %d (out of %d) blocks are found.\n",
+ i, d71->num_blocks);
+
+ return 0;
+
+err_cleanup:
+ d71_cleanup(mdev);
+ return err;
}
#define __HW_ID(__group, __format) \
@@ -93,19 +505,22 @@ static void d71_init_fmt_tbl(struct komeda_dev *mdev)
static struct komeda_dev_funcs d71_chip_funcs = {
.init_format_table = d71_init_fmt_tbl,
.enum_resources = d71_enum_resources,
- .cleanup = NULL,
+ .cleanup = d71_cleanup,
+ .irq_handler = d71_irq_handler,
+ .enable_irq = d71_enable_irq,
+ .disable_irq = d71_disable_irq,
+ .on_off_vblank = d71_on_off_vblank,
+ .change_opmode = d71_change_opmode,
+ .flush = d71_flush,
};
-#define GLB_ARCH_ID 0x000
-#define GLB_CORE_ID 0x004
-#define GLB_CORE_INFO 0x008
-
struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
{
chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
+ chip->bus_width = D71_BUS_WIDTH_16_BYTES;
return &d71_chip_funcs;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
new file mode 100644
index 000000000000..7465c57d9774
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _D71_DEV_H_
+#define _D71_DEV_H_
+
+#include "komeda_dev.h"
+#include "komeda_pipeline.h"
+#include "d71_regs.h"
+
+struct d71_pipeline {
+ struct komeda_pipeline base;
+
+ /* d71 private pipeline blocks */
+ u32 __iomem *lpu_addr;
+ u32 __iomem *cu_addr;
+ u32 __iomem *dou_addr;
+ u32 __iomem *dou_ft_coeff_addr; /* forward transform coeffs table */
+};
+
+struct d71_dev {
+ struct komeda_dev *mdev;
+
+ int num_blocks;
+ int num_pipelines;
+ int num_rich_layers;
+ u32 max_line_size;
+ u32 max_vsize;
+ u32 supports_dual_link : 1;
+ u32 integrates_tbu : 1;
+
+ /* global register blocks */
+ u32 __iomem *gcu_addr;
+ /* scaling coeffs table */
+ u32 __iomem *glb_scl_coeff_addr[D71_MAX_GLB_SCL_COEFF];
+ u32 __iomem *periph_addr;
+
+ struct d71_pipeline *pipes[D71_MAX_PIPELINE];
+};
+
+#define to_d71_pipeline(x) container_of(x, struct d71_pipeline, base)
+
+int d71_probe_block(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg);
+void d71_read_block_header(u32 __iomem *reg, struct block_header *blk);
+
+#endif /* !_D71_DEV_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
new file mode 100644
index 000000000000..2d5e6d00b42c
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _D71_REG_H_
+#define _D71_REG_H_
+
+/* Common block registers offset */
+#define BLK_BLOCK_INFO 0x000
+#define BLK_PIPELINE_INFO 0x004
+#define BLK_VALID_INPUT_ID0 0x020
+#define BLK_OUTPUT_ID0 0x060
+#define BLK_INPUT_ID0 0x080
+#define BLK_IRQ_RAW_STATUS 0x0A0
+#define BLK_IRQ_CLEAR 0x0A4
+#define BLK_IRQ_MASK 0x0A8
+#define BLK_IRQ_STATUS 0x0AC
+#define BLK_STATUS 0x0B0
+#define BLK_INFO 0x0C0
+#define BLK_CONTROL 0x0D0
+#define BLK_SIZE 0x0D4
+#define BLK_IN_SIZE 0x0E0
+
+#define BLK_P0_PTR_LOW 0x100
+#define BLK_P0_PTR_HIGH 0x104
+#define BLK_P0_STRIDE 0x108
+#define BLK_P1_PTR_LOW 0x110
+#define BLK_P1_PTR_HIGH 0x114
+#define BLK_P1_STRIDE 0x118
+#define BLK_P2_PTR_LOW 0x120
+#define BLK_P2_PTR_HIGH 0x124
+
+#define BLOCK_INFO_N_SUBBLKS(x) ((x) & 0x000F)
+#define BLOCK_INFO_BLK_ID(x) (((x) & 0x00F0) >> 4)
+#define BLOCK_INFO_BLK_TYPE(x) (((x) & 0xFF00) >> 8)
+#define BLOCK_INFO_INPUT_ID(x) ((x) & 0xFFF0)
+#define BLOCK_INFO_TYPE_ID(x) (((x) & 0x0FF0) >> 4)
+
+#define PIPELINE_INFO_N_OUTPUTS(x) ((x) & 0x000F)
+#define PIPELINE_INFO_N_VALID_INPUTS(x) (((x) & 0x0F00) >> 8)
+
+/* Common block control register bits */
+#define BLK_CTRL_EN BIT(0)
+/* Common size macro */
+#define HV_SIZE(h, v) (((h) & 0x1FFF) + (((v) & 0x1FFF) << 16))
+#define HV_OFFSET(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16))
+#define HV_CROP(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16))
+
+/* AD_CONTROL register */
+#define AD_CONTROL 0x160
+
+/* AD_CONTROL register bits */
+#define AD_AEN BIT(0)
+#define AD_YT BIT(1)
+#define AD_BS BIT(2)
+#define AD_WB BIT(3)
+#define AD_TH BIT(4)
+
+/* Global Control Unit */
+#define GLB_ARCH_ID 0x000
+#define GLB_CORE_ID 0x004
+#define GLB_CORE_INFO 0x008
+#define GLB_IRQ_STATUS 0x010
+
+#define GCU_CONFIG_VALID0 0x0D4
+#define GCU_CONFIG_VALID1 0x0D8
+
+/* GCU_CONTROL_BITS */
+#define GCU_CONTROL_MODE(x) ((x) & 0x7)
+#define GCU_CONTROL_SRST BIT(16)
+
+/* GCU opmode */
+#define INACTIVE_MODE 0
+#define TBU_CONNECT_MODE 1
+#define TBU_DISCONNECT_MODE 2
+#define DO0_ACTIVE_MODE 3
+#define DO1_ACTIVE_MODE 4
+#define DO01_ACTIVE_MODE 5
+
+/* GLB_IRQ_STATUS bits */
+#define GLB_IRQ_STATUS_GCU BIT(0)
+#define GLB_IRQ_STATUS_LPU0 BIT(8)
+#define GLB_IRQ_STATUS_LPU1 BIT(9)
+#define GLB_IRQ_STATUS_ATU0 BIT(10)
+#define GLB_IRQ_STATUS_ATU1 BIT(11)
+#define GLB_IRQ_STATUS_ATU2 BIT(12)
+#define GLB_IRQ_STATUS_ATU3 BIT(13)
+#define GLB_IRQ_STATUS_CU0 BIT(16)
+#define GLB_IRQ_STATUS_CU1 BIT(17)
+#define GLB_IRQ_STATUS_DOU0 BIT(24)
+#define GLB_IRQ_STATUS_DOU1 BIT(25)
+
+#define GLB_IRQ_STATUS_PIPE0 (GLB_IRQ_STATUS_LPU0 |\
+ GLB_IRQ_STATUS_ATU0 |\
+ GLB_IRQ_STATUS_ATU1 |\
+ GLB_IRQ_STATUS_CU0 |\
+ GLB_IRQ_STATUS_DOU0)
+
+#define GLB_IRQ_STATUS_PIPE1 (GLB_IRQ_STATUS_LPU1 |\
+ GLB_IRQ_STATUS_ATU2 |\
+ GLB_IRQ_STATUS_ATU3 |\
+ GLB_IRQ_STATUS_CU1 |\
+ GLB_IRQ_STATUS_DOU1)
+
+#define GLB_IRQ_STATUS_ATU (GLB_IRQ_STATUS_ATU0 |\
+ GLB_IRQ_STATUS_ATU1 |\
+ GLB_IRQ_STATUS_ATU2 |\
+ GLB_IRQ_STATUS_ATU3)
+
+/* GCU_IRQ_BITS */
+#define GCU_IRQ_CVAL0 BIT(0)
+#define GCU_IRQ_CVAL1 BIT(1)
+#define GCU_IRQ_MODE BIT(4)
+#define GCU_IRQ_ERR BIT(11)
+
+/* GCU_STATUS_BITS */
+#define GCU_STATUS_MODE(x) ((x) & 0x7)
+#define GCU_STATUS_MERR BIT(4)
+#define GCU_STATUS_TCS0 BIT(8)
+#define GCU_STATUS_TCS1 BIT(9)
+#define GCU_STATUS_ACTIVE BIT(31)
+
+/* GCU_CONFIG_VALIDx BITS */
+#define GCU_CONFIG_CVAL BIT(0)
+
+/* PERIPHERAL registers */
+#define PERIPH_MAX_LINE_SIZE BIT(0)
+#define PERIPH_NUM_RICH_LAYERS BIT(4)
+#define PERIPH_SPLIT_EN BIT(8)
+#define PERIPH_TBU_EN BIT(12)
+#define PERIPH_AFBC_DMA_EN BIT(16)
+#define PERIPH_CONFIGURATION_ID 0x1D4
+
+/* LPU register */
+#define LPU_TBU_STATUS 0x0B4
+#define LPU_RAXI_CONTROL 0x0D0
+#define LPU_WAXI_CONTROL 0x0D4
+#define LPU_TBU_CONTROL 0x0D8
+
+/* LPU_xAXI_CONTROL_BITS */
+#define TO_RAXI_AOUTSTDCAPB(x) (x)
+#define TO_RAXI_BOUTSTDCAPB(x) ((x) << 8)
+#define TO_RAXI_BEN(x) ((x) << 15)
+#define TO_xAXI_BURSTLEN(x) ((x) << 16)
+#define TO_xAXI_AxQOS(x) ((x) << 24)
+#define TO_xAXI_ORD(x) ((x) << 31)
+#define TO_WAXI_OUTSTDCAPB(x) (x)
+
+#define RAXI_AOUTSTDCAPB_MASK 0x7F
+#define RAXI_BOUTSTDCAPB_MASK 0x7F00
+#define RAXI_BEN_MASK BIT(15)
+#define xAXI_BURSTLEN_MASK 0x3F0000
+#define xAXI_AxQOS_MASK 0xF000000
+#define xAXI_ORD_MASK BIT(31)
+#define WAXI_OUTSTDCAPB_MASK 0x3F
+
+/* LPU_TBU_CONTROL BITS */
+#define TO_TBU_DOUTSTDCAPB(x) (x)
+#define TBU_DOUTSTDCAPB_MASK 0x3F
+
+/* LPU_IRQ_BITS */
+#define LPU_IRQ_IBSY BIT(10)
+#define LPU_IRQ_ERR BIT(11)
+#define LPU_IRQ_EOW BIT(12)
+#define LPU_IRQ_PL0 BIT(13)
+
+/* LPU_STATUS_BITS */
+#define LPU_STATUS_AXIED(x) ((x) & 0xF)
+#define LPU_STATUS_AXIE BIT(4)
+#define LPU_STATUS_AXIRP BIT(5)
+#define LPU_STATUS_AXIWP BIT(6)
+#define LPU_STATUS_ACE0 BIT(16)
+#define LPU_STATUS_ACE1 BIT(17)
+#define LPU_STATUS_ACE2 BIT(18)
+#define LPU_STATUS_ACE3 BIT(19)
+#define LPU_STATUS_ACTIVE BIT(31)
+
+#define AXIEID_MASK 0xF
+#define AXIE_MASK LPU_STATUS_AXIE
+#define AXIRP_MASK LPU_STATUS_AXIRP
+#define AXIWP_MASK LPU_STATUS_AXIWP
+
+#define FROM_AXIEID(reg) ((reg) & AXIEID_MASK)
+#define TO_AXIE(x) ((x) << 4)
+#define FROM_AXIRP(reg) (((reg) & AXIRP_MASK) >> 5)
+#define FROM_AXIWP(reg) (((reg) & AXIWP_MASK) >> 6)
+
+/* LPU_TBU_STATUS_BITS */
+#define LPU_TBU_STATUS_TCF BIT(1)
+#define LPU_TBU_STATUS_TTNG BIT(2)
+#define LPU_TBU_STATUS_TITR BIT(8)
+#define LPU_TBU_STATUS_TEMR BIT(16)
+#define LPU_TBU_STATUS_TTF BIT(31)
+
+/* LPU_TBU_CONTROL BITS */
+#define LPU_TBU_CTRL_TLBPEN BIT(16)
+
+/* CROSSBAR CONTROL BITS */
+#define CBU_INPUT_CTRL_EN BIT(0)
+#define CBU_NUM_INPUT_IDS 5
+#define CBU_NUM_OUTPUT_IDS 5
+
+/* CU register */
+#define CU_BG_COLOR 0x0DC
+#define CU_INPUT0_SIZE 0x0E0
+#define CU_INPUT0_OFFSET 0x0E4
+#define CU_INPUT0_CONTROL 0x0E8
+#define CU_INPUT1_SIZE 0x0F0
+#define CU_INPUT1_OFFSET 0x0F4
+#define CU_INPUT1_CONTROL 0x0F8
+#define CU_INPUT2_SIZE 0x100
+#define CU_INPUT2_OFFSET 0x104
+#define CU_INPUT2_CONTROL 0x108
+#define CU_INPUT3_SIZE 0x110
+#define CU_INPUT3_OFFSET 0x114
+#define CU_INPUT3_CONTROL 0x118
+#define CU_INPUT4_SIZE 0x120
+#define CU_INPUT4_OFFSET 0x124
+#define CU_INPUT4_CONTROL 0x128
+
+#define CU_PER_INPUT_REGS 4
+
+#define CU_NUM_INPUT_IDS 5
+#define CU_NUM_OUTPUT_IDS 1
+
+/* CU control register bits */
+#define CU_CTRL_COPROC BIT(0)
+
+/* CU_IRQ_BITS */
+#define CU_IRQ_OVR BIT(9)
+#define CU_IRQ_ERR BIT(11)
+
+/* CU_STATUS_BITS */
+#define CU_STATUS_CPE BIT(0)
+#define CU_STATUS_ZME BIT(1)
+#define CU_STATUS_CFGE BIT(2)
+#define CU_STATUS_ACTIVE BIT(31)
+
+/* CU input control register bits */
+#define CU_INPUT_CTRL_EN BIT(0)
+#define CU_INPUT_CTRL_PAD BIT(1)
+#define CU_INPUT_CTRL_PMUL BIT(2)
+#define CU_INPUT_CTRL_ALPHA(x) (((x) & 0xFF) << 8)
+
+/* DOU register */
+
+/* DOU_IRQ_BITS */
+#define DOU_IRQ_UND BIT(8)
+#define DOU_IRQ_ERR BIT(11)
+#define DOU_IRQ_PL0 BIT(13)
+#define DOU_IRQ_PL1 BIT(14)
+
+/* DOU_STATUS_BITS */
+#define DOU_STATUS_DRIFTTO BIT(0)
+#define DOU_STATUS_FRAMETO BIT(1)
+#define DOU_STATUS_TETO BIT(2)
+#define DOU_STATUS_CSCE BIT(8)
+#define DOU_STATUS_ACTIVE BIT(31)
+
+/* Layer registers */
+#define LAYER_INFO 0x0C0
+#define LAYER_R_CONTROL 0x0D4
+#define LAYER_FMT 0x0D8
+#define LAYER_LT_COEFFTAB 0x0DC
+#define LAYER_PALPHA 0x0E4
+
+#define LAYER_YUV_RGB_COEFF0 0x130
+
+#define LAYER_AD_H_CROP 0x164
+#define LAYER_AD_V_CROP 0x168
+
+#define LAYER_RGB_RGB_COEFF0 0x170
+
+/* L_CONTROL_BITS */
+#define L_EN BIT(0)
+#define L_IT BIT(4)
+#define L_R2R BIT(5)
+#define L_FT BIT(6)
+#define L_ROT(x) (((x) & 3) << 8)
+#define L_HFLIP BIT(10)
+#define L_VFLIP BIT(11)
+#define L_TBU_EN BIT(16)
+#define L_A_RCACHE(x) (((x) & 0xF) << 28)
+#define L_ROT_R0 0
+#define L_ROT_R90 1
+#define L_ROT_R180 2
+#define L_ROT_R270 3
+
+/* LAYER_R_CONTROL BITS */
+#define LR_CHI422_BILINEAR 0
+#define LR_CHI422_REPLICATION 1
+#define LR_CHI420_JPEG (0 << 2)
+#define LR_CHI420_MPEG (1 << 2)
+
+#define L_ITSEL(x) ((x) & 0xFFF)
+#define L_FTSEL(x) (((x) & 0xFFF) << 16)
+
+#define LAYER_PER_PLANE_REGS 4
+
+/* Layer_WR registers */
+#define LAYER_WR_PROG_LINE 0x0D4
+#define LAYER_WR_FORMAT 0x0D8
+
+/* Layer_WR control bits */
+#define LW_OFM BIT(4)
+#define LW_LALPHA(x) (((x) & 0xFF) << 8)
+#define LW_A_WCACHE(x) (((x) & 0xF) << 28)
+#define LW_TBU_EN BIT(16)
+
+#define AxCACHE_MASK 0xF0000000
+
+/* Layer AXI R/W cache setting */
+#define AxCACHE_B BIT(0) /* Bufferable */
+#define AxCACHE_M BIT(1) /* Modifiable */
+#define AxCACHE_RA BIT(2) /* Read-Allocate */
+#define AxCACHE_WA BIT(3) /* Write-Allocate */
+
+/* Layer info bits */
+#define L_INFO_RF BIT(0)
+#define L_INFO_CM BIT(1)
+#define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7)
+
+/* Scaler registers */
+#define SC_COEFFTAB 0x0DC
+#define SC_OUT_SIZE 0x0E4
+#define SC_H_CROP 0x0E8
+#define SC_V_CROP 0x0EC
+#define SC_H_INIT_PH 0x0F0
+#define SC_H_DELTA_PH 0x0F4
+#define SC_V_INIT_PH 0x0F8
+#define SC_V_DELTA_PH 0x0FC
+#define SC_ENH_LIMITS 0x130
+#define SC_ENH_COEFF0 0x134
+
+#define SC_MAX_ENH_COEFF 9
+
+/* SC_CTRL_BITS */
+#define SC_CTRL_SCL BIT(0)
+#define SC_CTRL_LS BIT(1)
+#define SC_CTRL_AP BIT(4)
+#define SC_CTRL_IENH BIT(8)
+#define SC_CTRL_RGBSM BIT(16)
+#define SC_CTRL_ASM BIT(17)
+
+#define SC_VTSEL(vtal) ((vtal) << 16)
+
+#define SC_NUM_INPUTS_IDS 1
+#define SC_NUM_OUTPUTS_IDS 1
+
+#define MG_NUM_INPUTS_IDS 2
+#define MG_NUM_OUTPUTS_IDS 1
+
+/* Merger registers */
+#define MG_INPUT_ID0 BLK_INPUT_ID0
+#define MG_INPUT_ID1 (MG_INPUT_ID0 + 4)
+#define MG_SIZE BLK_SIZE
+
+/* Splitter registers */
+#define SP_OVERLAP_SIZE 0xD8
+
+/* Backend registers */
+#define BS_INFO 0x0C0
+#define BS_PROG_LINE 0x0D4
+#define BS_PREFETCH_LINE 0x0D8
+#define BS_BG_COLOR 0x0DC
+#define BS_ACTIVESIZE 0x0E0
+#define BS_HINTERVALS 0x0E4
+#define BS_VINTERVALS 0x0E8
+#define BS_SYNC 0x0EC
+#define BS_DRIFT_TO 0x100
+#define BS_FRAME_TO 0x104
+#define BS_TE_TO 0x108
+#define BS_T0_INTERVAL 0x110
+#define BS_T1_INTERVAL 0x114
+#define BS_T2_INTERVAL 0x118
+#define BS_CRC0_LOW 0x120
+#define BS_CRC0_HIGH 0x124
+#define BS_CRC1_LOW 0x128
+#define BS_CRC1_HIGH 0x12C
+#define BS_USER 0x130
+
+/* BS control register bits */
+#define BS_CTRL_EN BIT(0)
+#define BS_CTRL_VM BIT(1)
+#define BS_CTRL_BM BIT(2)
+#define BS_CTRL_HMASK BIT(4)
+#define BS_CTRL_VD BIT(5)
+#define BS_CTRL_TE BIT(8)
+#define BS_CTRL_TS BIT(9)
+#define BS_CTRL_TM BIT(12)
+#define BS_CTRL_DL BIT(16)
+#define BS_CTRL_SBS BIT(17)
+#define BS_CTRL_CRC BIT(18)
+#define BS_CTRL_PM BIT(20)
+
+/* BS active size/intervals */
+#define BS_H_INTVALS(hfp, hbp) (((hfp) & 0xFFF) + (((hbp) & 0x3FF) << 16))
+#define BS_V_INTVALS(vfp, vbp) (((vfp) & 0x3FFF) + (((vbp) & 0xFF) << 16))
+
+/* BS_SYNC bits */
+#define BS_SYNC_HSW(x) ((x) & 0x3FF)
+#define BS_SYNC_HSP BIT(12)
+#define BS_SYNC_VSW(x) (((x) & 0xFF) << 16)
+#define BS_SYNC_VSP BIT(28)
+
+#define BS_NUM_INPUT_IDS 0
+#define BS_NUM_OUTPUT_IDS 0
+
+/* Image process registers */
+#define IPS_DEPTH 0x0D8
+#define IPS_RGB_RGB_COEFF0 0x130
+#define IPS_RGB_YUV_COEFF0 0x170
+
+#define IPS_DEPTH_MARK 0xF
+
+/* IPS control register bits */
+#define IPS_CTRL_RGB BIT(0)
+#define IPS_CTRL_FT BIT(4)
+#define IPS_CTRL_YUV BIT(8)
+#define IPS_CTRL_CHD422 BIT(9)
+#define IPS_CTRL_CHD420 BIT(10)
+#define IPS_CTRL_LPF BIT(11)
+#define IPS_CTRL_DITH BIT(12)
+#define IPS_CTRL_CLAMP BIT(16)
+#define IPS_CTRL_SBS BIT(17)
+
+/* IPS info register bits */
+#define IPS_INFO_CHD420 BIT(10)
+
+#define IPS_NUM_INPUT_IDS 2
+#define IPS_NUM_OUTPUT_IDS 1
+
+/* FT_COEFF block registers */
+#define FT_COEFF0 0x80
+#define GLB_IT_COEFF 0x80
+
+/* GLB_SC_COEFF registers */
+#define GLB_SC_COEFF_ADDR 0x0080
+#define GLB_SC_COEFF_DATA 0x0084
+#define GLB_LT_COEFF_DATA 0x0080
+
+#define GLB_SC_COEFF_MAX_NUM 1024
+#define GLB_LT_COEFF_NUM 65
+/* GLB_SC_ADDR */
+#define SC_COEFF_R_ADDR BIT(18)
+#define SC_COEFF_G_ADDR BIT(17)
+#define SC_COEFF_B_ADDR BIT(16)
+
+#define SC_COEFF_DATA(x, y) (((y) & 0xFFFF) | (((x) & 0xFFFF) << 16))
+
+enum d71_blk_type {
+ D71_BLK_TYPE_GCU = 0x00,
+ D71_BLK_TYPE_LPU = 0x01,
+ D71_BLK_TYPE_CU = 0x02,
+ D71_BLK_TYPE_DOU = 0x03,
+ D71_BLK_TYPE_AEU = 0x04,
+ D71_BLK_TYPE_GLB_LT_COEFF = 0x05,
+ D71_BLK_TYPE_GLB_SCL_COEFF = 0x06, /* SH/SV scaler coeff */
+ D71_BLK_TYPE_GLB_SC_COEFF = 0x07,
+ D71_BLK_TYPE_PERIPH = 0x08,
+ D71_BLK_TYPE_LPU_TRUSTED = 0x09,
+ D71_BLK_TYPE_AEU_TRUSTED = 0x0A,
+ D71_BLK_TYPE_LPU_LAYER = 0x10,
+ D71_BLK_TYPE_LPU_WB_LAYER = 0x11,
+ D71_BLK_TYPE_CU_SPLITTER = 0x20,
+ D71_BLK_TYPE_CU_SCALER = 0x21,
+ D71_BLK_TYPE_CU_MERGER = 0x22,
+ D71_BLK_TYPE_DOU_IPS = 0x30,
+ D71_BLK_TYPE_DOU_BS = 0x31,
+ D71_BLK_TYPE_DOU_FT_COEFF = 0x32,
+ D71_BLK_TYPE_AEU_DS = 0x40,
+ D71_BLK_TYPE_AEU_AES = 0x41,
+ D71_BLK_TYPE_RESERVED = 0xFF
+};
+
+/* Constant of components */
+#define D71_MAX_PIPELINE 2
+#define D71_PIPELINE_MAX_SCALERS 2
+#define D71_PIPELINE_MAX_LAYERS 4
+
+#define D71_MAX_GLB_IT_COEFF 3
+#define D71_MAX_GLB_SCL_COEFF 4
+
+#define D71_MAX_LAYERS_PER_LPU 4
+#define D71_BLOCK_MAX_INPUT 9
+#define D71_BLOCK_MAX_OUTPUT 5
+#define D71_MAX_SC_PER_CU 2
+
+#define D71_BLOCK_OFFSET_PERIPH 0xFE00
+#define D71_BLOCK_SIZE 0x0200
+
+#define D71_DEFAULT_PREPRETCH_LINE 5
+#define D71_BUS_WIDTH_16_BYTES 16
+
+#define D71_MIN_LINE_SIZE 64
+#define D71_MIN_VERTICAL_SIZE 64
+#define D71_SC_MIN_LIN_SIZE 4
+#define D71_SC_MIN_VERTICAL_SIZE 4
+#define D71_SC_MAX_LIN_SIZE 2048
+#define D71_SC_MAX_VERTICAL_SIZE 4096
+
+#define D71_SC_MAX_UPSCALING 64
+#define D71_SC_MAX_DOWNSCALING 6
+#define D71_SC_SPLIT_OVERLAP 8
+#define D71_SC_ENH_SPLIT_OVERLAP 1
+
+#define D71_MG_MIN_MERGED_SIZE 4
+#define D71_MG_MAX_MERGED_HSIZE 4032
+#define D71_MG_MAX_MERGED_VSIZE 4096
+
+#define D71_PALPHA_DEF_MAP 0xFFAA5500
+#define D71_LAYER_CONTROL_DEFAULT 0x30000000
+#define D71_WB_LAYER_CONTROL_DEFAULT 0x3000FF00
+#define D71_BS_CONTROL_DEFAULT 0x00000002
+
+struct block_header {
+ u32 block_info;
+ u32 pipeline_info;
+ u32 input_ids[D71_BLOCK_MAX_INPUT];
+ u32 output_ids[D71_BLOCK_MAX_OUTPUT];
+};
+
+static inline u32 get_block_type(struct block_header *blk)
+{
+ return BLOCK_INFO_BLK_TYPE(blk->block_info);
+}
+
+#endif /* !_D71_REG_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 3ca5718aa0c2..62fad59f5a6a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -18,10 +18,415 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
-struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+/**
+ * komeda_crtc_atomic_check - build display output data flow
+ * @crtc: DRM crtc
+ * @state: the crtc state object
+ *
+ * crtc_atomic_check is the final check stage, so beside build a display data
+ * pipeline according to the crtc_state, but still needs to release or disable
+ * the unclaimed pipeline resources.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int
+komeda_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state);
+ int err;
+
+ if (state->active) {
+ err = komeda_build_display_data_flow(kcrtc, kcrtc_st);
+ if (err)
+ return err;
+ }
+
+ /* release unclaimed pipeline resources */
+ err = komeda_release_unclaimed_resources(kcrtc->master, kcrtc_st);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static u32 komeda_calc_mclk(struct komeda_crtc_state *kcrtc_st)
+{
+ unsigned long mclk = kcrtc_st->base.adjusted_mode.clock * 1000;
+
+ return mclk;
+}
+
+/* For active a crtc, mainly need two parts of preparation
+ * 1. adjust display operation mode.
+ * 2. enable needed clk
+ */
+static int
+komeda_crtc_prepare(struct komeda_crtc *kcrtc)
+{
+ struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state);
+ unsigned long pxlclk_rate = kcrtc_st->base.adjusted_mode.clock * 1000;
+ u32 new_mode;
+ int err;
+
+ mutex_lock(&mdev->lock);
+
+ new_mode = mdev->dpmode | BIT(master->id);
+ if (WARN_ON(new_mode == mdev->dpmode)) {
+ err = 0;
+ goto unlock;
+ }
+
+ err = mdev->funcs->change_opmode(mdev, new_mode);
+ if (err) {
+ DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
+ goto unlock;
+ }
+
+ mdev->dpmode = new_mode;
+ /* Only need to enable mclk on single display mode, but no need to
+ * enable mclk it on dual display mode, since the dual mode always
+ * switch from single display mode, the mclk already enabled, no need
+ * to enable it again.
+ */
+ if (new_mode != KOMEDA_MODE_DUAL_DISP) {
+ err = clk_set_rate(mdev->mclk, komeda_calc_mclk(kcrtc_st));
+ if (err)
+ DRM_ERROR("failed to set mclk.\n");
+ err = clk_prepare_enable(mdev->mclk);
+ if (err)
+ DRM_ERROR("failed to enable mclk.\n");
+ }
+
+ err = clk_prepare_enable(master->aclk);
+ if (err)
+ DRM_ERROR("failed to enable axi clk for pipe%d.\n", master->id);
+ err = clk_set_rate(master->pxlclk, pxlclk_rate);
+ if (err)
+ DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
+ err = clk_prepare_enable(master->pxlclk);
+ if (err)
+ DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id);
+
+unlock:
+ mutex_unlock(&mdev->lock);
+
+ return err;
+}
+
+static int
+komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
+{
+ struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+ u32 new_mode;
+ int err;
+
+ mutex_lock(&mdev->lock);
+
+ new_mode = mdev->dpmode & (~BIT(master->id));
+
+ if (WARN_ON(new_mode == mdev->dpmode)) {
+ err = 0;
+ goto unlock;
+ }
+
+ err = mdev->funcs->change_opmode(mdev, new_mode);
+ if (err) {
+ DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
+ goto unlock;
+ }
+
+ mdev->dpmode = new_mode;
+
+ clk_disable_unprepare(master->pxlclk);
+ clk_disable_unprepare(master->aclk);
+ if (new_mode == KOMEDA_MODE_INACTIVE)
+ clk_disable_unprepare(mdev->mclk);
+
+unlock:
+ mutex_unlock(&mdev->lock);
+
+ return err;
+}
+
+void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
+ struct komeda_events *evts)
+{
+ struct drm_crtc *crtc = &kcrtc->base;
+ u32 events = evts->pipes[kcrtc->master->id];
+
+ if (events & KOMEDA_EVENT_VSYNC)
+ drm_crtc_handle_vblank(crtc);
+
+ /* will handle it together with the write back support */
+ if (events & KOMEDA_EVENT_EOW)
+ DRM_DEBUG("EOW.\n");
+
+ if (events & KOMEDA_EVENT_FLIP) {
+ unsigned long flags;
+ struct drm_pending_vblank_event *event;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (kcrtc->disable_done) {
+ complete_all(kcrtc->disable_done);
+ kcrtc->disable_done = NULL;
+ } else if (crtc->state->event) {
+ event = crtc->state->event;
+ /*
+ * Consume event before notifying drm core that flip
+ * happened.
+ */
+ crtc->state->event = NULL;
+ drm_crtc_send_vblank_event(crtc, event);
+ } else {
+ DRM_WARN("CRTC[%d]: FLIP happen but no pending commit.\n",
+ drm_crtc_index(&kcrtc->base));
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static void
+komeda_crtc_do_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc->state);
+ struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+
+ DRM_DEBUG_ATOMIC("CRTC%d_FLUSH: active_pipes: 0x%x, affected: 0x%x.\n",
+ drm_crtc_index(crtc),
+ kcrtc_st->active_pipes, kcrtc_st->affected_pipes);
+
+ /* step 1: update the pipeline/component state to HW */
+ if (has_bit(master->id, kcrtc_st->affected_pipes))
+ komeda_pipeline_update(master, old->state);
+
+ /* step 2: notify the HW to kickoff the update */
+ mdev->funcs->flush(mdev, master->id, kcrtc_st->active_pipes);
+}
+
+static void
+komeda_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ komeda_crtc_prepare(to_kcrtc(crtc));
+ drm_crtc_vblank_on(crtc);
+ komeda_crtc_do_flush(crtc, old);
+}
+
+static void
+komeda_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_crtc_state *old_st = to_kcrtc_st(old);
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+ struct completion *disable_done = &crtc->state->commit->flip_done;
+ struct completion temp;
+ int timeout;
+
+ DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x.\n",
+ drm_crtc_index(crtc),
+ old_st->active_pipes, old_st->affected_pipes);
+
+ if (has_bit(master->id, old_st->active_pipes))
+ komeda_pipeline_disable(master, old->state);
+
+ /* crtc_disable has two scenarios according to the state->active switch.
+ * 1. active -> inactive
+ * this commit is a disable commit. and the commit will be finished
+ * or done after the disable operation. on this case we can directly
+ * use the crtc->state->event to tracking the HW disable operation.
+ * 2. active -> active
+ * the crtc->commit is not for disable, but a modeset operation when
+ * crtc is active, such commit actually has been completed by 3
+ * DRM operations:
+ * crtc_disable, update_planes(crtc_flush), crtc_enable
+ * so on this case the crtc->commit is for the whole process.
+ * we can not use it for tracing the disable, we need a temporary
+ * flip_done for tracing the disable. and crtc->state->event for
+ * the crtc_enable operation.
+ * That's also the reason why skip modeset commit in
+ * komeda_crtc_atomic_flush()
+ */
+ if (crtc->state->active) {
+ struct komeda_pipeline_state *pipe_st;
+ /* clear the old active_comps to zero */
+ pipe_st = komeda_pipeline_get_old_state(master, old->state);
+ pipe_st->active_comps = 0;
+
+ init_completion(&temp);
+ kcrtc->disable_done = &temp;
+ disable_done = &temp;
+ }
+
+ mdev->funcs->flush(mdev, master->id, 0);
+
+ /* wait the disable take affect.*/
+ timeout = wait_for_completion_timeout(disable_done, HZ);
+ if (timeout == 0) {
+ DRM_ERROR("disable pipeline%d timeout.\n", kcrtc->master->id);
+ if (crtc->state->active) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ kcrtc->disable_done = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+ }
+
+ drm_crtc_vblank_off(crtc);
+ komeda_crtc_unprepare(kcrtc);
+}
+
+static void
+komeda_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ /* commit with modeset will be handled in enable/disable */
+ if (drm_atomic_crtc_needs_modeset(crtc->state))
+ return;
+
+ komeda_crtc_do_flush(crtc, old);
+}
+
+static enum drm_mode_status
+komeda_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *m)
+{
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_pipeline *master = kcrtc->master;
+ long mode_clk, pxlclk;
+
+ if (m->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /* main clock/AXI clk must be faster than pxlclk*/
+ mode_clk = m->clock * 1000;
+ pxlclk = clk_round_rate(master->pxlclk, mode_clk);
+ if (pxlclk != mode_clk) {
+ DRM_DEBUG_ATOMIC("pxlclk doesn't support %ld Hz\n", mode_clk);
+
+ return MODE_NOCLOCK;
+ }
+
+ if (clk_round_rate(mdev->mclk, mode_clk) < pxlclk) {
+ DRM_DEBUG_ATOMIC("mclk can't satisfy the requirement of %s-clk: %ld.\n",
+ m->name, pxlclk);
+
+ return MODE_CLOCK_HIGH;
+ }
+
+ if (clk_round_rate(master->aclk, mode_clk) < pxlclk) {
+ DRM_DEBUG_ATOMIC("aclk can't satisfy the requirement of %s-clk: %ld.\n",
+ m->name, pxlclk);
+
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *m,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_pipeline *master = kcrtc->master;
+ long mode_clk = m->clock * 1000;
+
+ adjusted_mode->clock = clk_round_rate(master->pxlclk, mode_clk) / 1000;
+
+ return true;
+}
+
+static struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+ .atomic_check = komeda_crtc_atomic_check,
+ .atomic_flush = komeda_crtc_atomic_flush,
+ .atomic_enable = komeda_crtc_atomic_enable,
+ .atomic_disable = komeda_crtc_atomic_disable,
+ .mode_valid = komeda_crtc_mode_valid,
+ .mode_fixup = komeda_crtc_mode_fixup,
};
+static void komeda_crtc_reset(struct drm_crtc *crtc)
+{
+ struct komeda_crtc_state *state;
+
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(to_kcrtc_st(crtc->state));
+ crtc->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+ }
+}
+
+static struct drm_crtc_state *
+komeda_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct komeda_crtc_state *old = to_kcrtc_st(crtc->state);
+ struct komeda_crtc_state *new;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new->base);
+
+ new->affected_pipes = old->active_pipes;
+
+ return &new->base;
+}
+
+static void komeda_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_kcrtc_st(state));
+}
+
+static int komeda_crtc_vblank_enable(struct drm_crtc *crtc)
+{
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+
+ mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, true);
+ return 0;
+}
+
+static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
+{
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+
+ mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, false);
+}
+
static const struct drm_crtc_funcs komeda_crtc_funcs = {
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = komeda_crtc_reset,
+ .atomic_duplicate_state = komeda_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_crtc_atomic_destroy_state,
+ .enable_vblank = komeda_crtc_vblank_enable,
+ .disable_vblank = komeda_crtc_vblank_disable,
};
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 70e9bb7fa30c..ca3599e4a4d3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,11 +8,99 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif
#include <drm/drm_print.h>
#include "komeda_dev.h"
+static int komeda_register_show(struct seq_file *sf, void *x)
+{
+ struct komeda_dev *mdev = sf->private;
+ int i;
+
+ if (mdev->funcs->dump_register)
+ mdev->funcs->dump_register(mdev, sf);
+
+ for (i = 0; i < mdev->n_pipelines; i++)
+ komeda_pipeline_dump_register(mdev->pipelines[i], sf);
+
+ return 0;
+}
+
+static int komeda_register_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, komeda_register_show, inode->i_private);
+}
+
+static const struct file_operations komeda_register_fops = {
+ .owner = THIS_MODULE,
+ .open = komeda_register_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void komeda_debugfs_init(struct komeda_dev *mdev)
+{
+ if (!debugfs_initialized())
+ return;
+
+ mdev->debugfs_root = debugfs_create_dir("komeda", NULL);
+ if (IS_ERR_OR_NULL(mdev->debugfs_root))
+ return;
+
+ debugfs_create_file("register", 0444, mdev->debugfs_root,
+ mdev, &komeda_register_fops);
+}
+#endif
+
+static ssize_t
+core_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", mdev->chip.core_id);
+}
+static DEVICE_ATTR_RO(core_id);
+
+static ssize_t
+config_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+ struct komeda_pipeline *pipe = mdev->pipelines[0];
+ union komeda_config_id config_id;
+ int i;
+
+ memset(&config_id, 0, sizeof(config_id));
+
+ config_id.max_line_sz = pipe->layers[0]->hsize_in.end;
+ config_id.n_pipelines = mdev->n_pipelines;
+ config_id.n_scalers = pipe->n_scalers;
+ config_id.n_layers = pipe->n_layers;
+ config_id.n_richs = 0;
+ for (i = 0; i < pipe->n_layers; i++) {
+ if (pipe->layers[i]->layer_type == KOMEDA_FMT_RICH_LAYER)
+ config_id.n_richs++;
+ }
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", config_id.value);
+}
+static DEVICE_ATTR_RO(config_id);
+
+static struct attribute *komeda_sysfs_entries[] = {
+ &dev_attr_core_id.attr,
+ &dev_attr_config_id.attr,
+ NULL,
+};
+
+static struct attribute_group komeda_sysfs_attr_group = {
+ .attrs = komeda_sysfs_entries,
+};
+
static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
{
struct komeda_pipeline *pipe;
@@ -53,6 +141,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *child, *np = dev->of_node;
struct clk *clk;
int ret;
@@ -62,6 +151,11 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
return PTR_ERR(clk);
mdev->mclk = clk;
+ mdev->irq = platform_get_irq(pdev, 0);
+ if (mdev->irq < 0) {
+ DRM_ERROR("could not get IRQ number.\n");
+ return mdev->irq;
+ }
for_each_available_child_of_node(np, child) {
if (of_node_cmp(child->name, "pipeline") == 0) {
@@ -99,6 +193,8 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
if (!mdev)
return ERR_PTR(-ENOMEM);
+ mutex_init(&mdev->lock);
+
mdev->dev = dev;
mdev->reg_base = devm_ioremap_resource(dev, io_res);
if (IS_ERR(mdev->reg_base)) {
@@ -147,6 +243,22 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto err_cleanup;
}
+ err = komeda_assemble_pipelines(mdev);
+ if (err) {
+ DRM_ERROR("assemble display pipelines failed.\n");
+ goto err_cleanup;
+ }
+
+ err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
+ if (err) {
+ DRM_ERROR("create sysfs group failed.\n");
+ goto err_cleanup;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ komeda_debugfs_init(mdev);
+#endif
+
return mdev;
err_cleanup:
@@ -160,6 +272,12 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
struct komeda_dev_funcs *funcs = mdev->funcs;
int i;
+ sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(mdev->debugfs_root);
+#endif
+
for (i = 0; i < mdev->n_pipelines; i++) {
komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
mdev->pipelines[i] = NULL;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index 0f77dead6a23..29e03c4e1ffc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -13,6 +13,33 @@
#include "malidp_product.h"
#include "komeda_format_caps.h"
+#define KOMEDA_EVENT_VSYNC BIT_ULL(0)
+#define KOMEDA_EVENT_FLIP BIT_ULL(1)
+#define KOMEDA_EVENT_URUN BIT_ULL(2)
+#define KOMEDA_EVENT_IBSY BIT_ULL(3)
+#define KOMEDA_EVENT_OVR BIT_ULL(4)
+#define KOMEDA_EVENT_EOW BIT_ULL(5)
+#define KOMEDA_EVENT_MODE BIT_ULL(6)
+
+#define KOMEDA_ERR_TETO BIT_ULL(14)
+#define KOMEDA_ERR_TEMR BIT_ULL(15)
+#define KOMEDA_ERR_TITR BIT_ULL(16)
+#define KOMEDA_ERR_CPE BIT_ULL(17)
+#define KOMEDA_ERR_CFGE BIT_ULL(18)
+#define KOMEDA_ERR_AXIE BIT_ULL(19)
+#define KOMEDA_ERR_ACE0 BIT_ULL(20)
+#define KOMEDA_ERR_ACE1 BIT_ULL(21)
+#define KOMEDA_ERR_ACE2 BIT_ULL(22)
+#define KOMEDA_ERR_ACE3 BIT_ULL(23)
+#define KOMEDA_ERR_DRIFTTO BIT_ULL(24)
+#define KOMEDA_ERR_FRAMETO BIT_ULL(25)
+#define KOMEDA_ERR_CSCE BIT_ULL(26)
+#define KOMEDA_ERR_ZME BIT_ULL(27)
+#define KOMEDA_ERR_MERR BIT_ULL(28)
+#define KOMEDA_ERR_TCF BIT_ULL(29)
+#define KOMEDA_ERR_TTNG BIT_ULL(30)
+#define KOMEDA_ERR_TTF BIT_ULL(31)
+
/* malidp device id */
enum {
MALI_D71 = 0,
@@ -39,6 +66,11 @@ struct komeda_product_data {
struct komeda_dev;
+struct komeda_events {
+ u64 global;
+ u64 pipes[KOMEDA_MAX_PIPELINES];
+};
+
/**
* struct komeda_dev_funcs
*
@@ -60,6 +92,49 @@ struct komeda_dev_funcs {
int (*enum_resources)(struct komeda_dev *mdev);
/** @cleanup: call to chip to cleanup komeda_dev->chip data */
void (*cleanup)(struct komeda_dev *mdev);
+ /**
+ * @irq_handler:
+ *
+ * for CORE to get the HW event from the CHIP when interrupt happened.
+ */
+ irqreturn_t (*irq_handler)(struct komeda_dev *mdev,
+ struct komeda_events *events);
+ /** @enable_irq: enable irq */
+ int (*enable_irq)(struct komeda_dev *mdev);
+ /** @disable_irq: disable irq */
+ int (*disable_irq)(struct komeda_dev *mdev);
+ /** @on_off_vblank: notify HW to on/off vblank */
+ void (*on_off_vblank)(struct komeda_dev *mdev,
+ int master_pipe, bool on);
+
+ /** @dump_register: Optional, dump registers to seq_file */
+ void (*dump_register)(struct komeda_dev *mdev, struct seq_file *seq);
+ /**
+ * @change_opmode:
+ *
+ * Notify HW to switch to a new display operation mode.
+ */
+ int (*change_opmode)(struct komeda_dev *mdev, int new_mode);
+ /** @flush: Notify the HW to flush or kickoff the update */
+ void (*flush)(struct komeda_dev *mdev,
+ int master_pipe, u32 active_pipes);
+};
+
+/*
+ * DISPLAY_MODE describes how many display been enabled, and which will be
+ * passed to CHIP by &komeda_dev_funcs->change_opmode(), then CHIP can do the
+ * pipeline resources assignment according to this usage hint.
+ * - KOMEDA_MODE_DISP0: Only one display enabled, pipeline-0 work as master.
+ * - KOMEDA_MODE_DISP1: Only one display enabled, pipeline-0 work as master.
+ * - KOMEDA_MODE_DUAL_DISP: Dual display mode, both display has been enabled.
+ * And D71 supports assign two pipelines to one single display on mode
+ * KOMEDA_MODE_DISP0/DISP1
+ */
+enum {
+ KOMEDA_MODE_INACTIVE = 0,
+ KOMEDA_MODE_DISP0 = BIT(0),
+ KOMEDA_MODE_DISP1 = BIT(1),
+ KOMEDA_MODE_DUAL_DISP = KOMEDA_MODE_DISP0 | KOMEDA_MODE_DISP1,
};
/**
@@ -70,18 +145,31 @@ struct komeda_dev_funcs {
* control-abilites of device.
*/
struct komeda_dev {
+ /** @dev: the base device structure */
struct device *dev;
+ /** @reg_base: the base address of komeda io space */
u32 __iomem *reg_base;
+ /** @chip: the basic chip information */
struct komeda_chip_info chip;
/** @fmt_tbl: initialized by &komeda_dev_funcs->init_format_table */
struct komeda_format_caps_table fmt_tbl;
/** @pclk: APB clock for register access */
struct clk *pclk;
- /** @mck: HW main engine clk */
+ /** @mclk: HW main engine clk */
struct clk *mclk;
+ /** @irq: irq number */
+ int irq;
+
+ /** @lock: used to protect dpmode */
+ struct mutex lock;
+ /** @dpmode: current display mode */
+ u32 dpmode;
+
+ /** @n_pipelines: the number of pipe in @pipelines */
int n_pipelines;
+ /** @pipelines: the komeda pipelines */
struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES];
/** @funcs: chip funcs to access to HW */
@@ -93,6 +181,9 @@ struct komeda_dev {
* destroyed by &komeda_dev_funcs.cleanup()
*/
void *chip_data;
+
+ /** @debugfs_root: root directory of komeda debugfs */
+ struct dentry *debugfs_root;
};
static inline bool
@@ -107,4 +198,6 @@ d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
struct komeda_dev *komeda_dev_create(struct device *dev);
void komeda_dev_destroy(struct komeda_dev *mdev);
+struct komeda_dev *dev_to_mdev(struct device *dev);
+
#endif /*_KOMEDA_DEV_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 2bdd189b041d..cfa5068d9d1e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -17,6 +17,13 @@ struct komeda_drv {
struct komeda_kms_dev *kms;
};
+struct komeda_dev *dev_to_mdev(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+ return mdrv ? mdrv->mdev : NULL;
+}
+
static void komeda_unbind(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
@@ -120,7 +127,7 @@ static const struct komeda_product_data komeda_products[] = {
},
};
-const struct of_device_id komeda_of_match[] = {
+static const struct of_device_id komeda_of_match[] = {
{ .compatible = "arm,mali-d71", .data = &komeda_products[MALI_D71], },
{},
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
index 0de2e4a2afd2..ea2fe190c1e3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
@@ -10,11 +10,16 @@
#include <drm/drm_framebuffer.h>
#include "komeda_format_caps.h"
-/** struct komeda_fb - entend drm_framebuffer with komeda attribute */
+/**
+ * struct komeda_fb - Entending drm_framebuffer with komeda attribute
+ */
struct komeda_fb {
/** @base: &drm_framebuffer */
struct drm_framebuffer base;
- /* @format_caps: &komeda_format_caps */
+ /**
+ * @format_caps:
+ * extends drm_format_info for komeda specific information
+ */
const struct komeda_format_caps *format_caps;
/** @aligned_w: aligned frame buffer width */
u32 aligned_w;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 47a58ab20434..86f6542afb40 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -13,6 +13,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_vblank.h>
#include "komeda_dev.h"
@@ -25,18 +26,39 @@ static int komeda_gem_cma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- u32 alignment = 16; /* TODO get alignment from dev */
+ struct komeda_dev *mdev = dev->dev_private;
+ u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8),
- alignment);
+ args->pitch = ALIGN(pitch, mdev->chip.bus_width);
return drm_gem_cma_dumb_create_internal(file, dev, args);
}
+static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
+{
+ struct drm_device *drm = data;
+ struct komeda_dev *mdev = drm->dev_private;
+ struct komeda_kms_dev *kms = to_kdev(drm);
+ struct komeda_events evts;
+ irqreturn_t status;
+ u32 i;
+
+ /* Call into the CHIP to recognize events */
+ memset(&evts, 0, sizeof(evts));
+ status = mdev->funcs->irq_handler(mdev, &evts);
+
+ /* Notify the crtc to handle the events */
+ for (i = 0; i < kms->n_crtcs; i++)
+ komeda_crtc_handle_event(&kms->crtcs[i], &evts);
+
+ return status;
+}
+
static struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
- DRIVER_PRIME,
+ DRIVER_PRIME | DRIVER_HAVE_IRQ,
.lastclose = drm_fb_helper_lastclose,
+ .irq_handler = komeda_kms_irq_handler,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = komeda_gem_cma_dumb_create,
@@ -78,9 +100,37 @@ static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
.atomic_commit_tail = komeda_kms_commit_tail,
};
+static int komeda_kms_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_st, *new_crtc_st;
+ int i, err;
+
+ err = drm_atomic_helper_check_modeset(dev, state);
+ if (err)
+ return err;
+
+ /* komeda need to re-calculate resource assumption in every commit
+ * so need to add all affected_planes (even unchanged) to
+ * drm_atomic_state.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) {
+ err = drm_atomic_add_affected_planes(state, crtc);
+ if (err)
+ return err;
+ }
+
+ err = drm_atomic_helper_check_planes(dev, state);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
.fb_create = komeda_fb_create,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = komeda_kms_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -144,14 +194,25 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
drm_mode_config_reset(drm);
- err = drm_dev_register(drm, 0);
+ err = drm_irq_install(drm, mdev->irq);
if (err)
goto cleanup_mode_config;
+ err = mdev->funcs->enable_irq(mdev);
+ if (err)
+ goto uninstall_irq;
+
+ err = drm_dev_register(drm, 0);
+ if (err)
+ goto uninstall_irq;
+
return kms;
+uninstall_irq:
+ drm_irq_uninstall(drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
+ komeda_kms_cleanup_private_objs(kms);
free_kms:
kfree(kms);
return ERR_PTR(err);
@@ -162,9 +223,11 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
struct drm_device *drm = &kms->base;
struct komeda_dev *mdev = drm->dev_private;
+ mdev->funcs->disable_irq(mdev);
drm_dev_unregister(drm);
+ drm_irq_uninstall(drm);
component_unbind_all(mdev->dev, drm);
- komeda_kms_cleanup_private_objs(mdev);
+ komeda_kms_cleanup_private_objs(kms);
drm_mode_config_cleanup(drm);
drm->dev_private = NULL;
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 874e9c9f0749..ac3d9209b4d9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -12,8 +12,12 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
+#include <video/videomode.h>
+#include <video/display_timing.h>
-/** struct komeda_plane - komeda instance of drm_plane */
+/**
+ * struct komeda_plane - komeda instance of drm_plane
+ */
struct komeda_plane {
/** @base: &drm_plane */
struct drm_plane base;
@@ -68,9 +72,14 @@ struct komeda_crtc {
* merge into the master.
*/
struct komeda_pipeline *slave;
+
+ /** @disable_done: this flip_done is for tracing the disable */
+ struct completion *disable_done;
};
-/** struct komeda_crtc_state */
+/**
+ * struct komeda_crtc_state
+ */
struct komeda_crtc_state {
/** @base: &drm_crtc_state */
struct drm_crtc_state base;
@@ -78,7 +87,15 @@ struct komeda_crtc_state {
/* private properties */
/* computed state which are used by validate/check */
+ /**
+ * @affected_pipes:
+ * the affected pipelines in once display instance
+ */
u32 affected_pipes;
+ /**
+ * @active_pipes:
+ * the active pipelines in once display instance
+ */
u32 active_pipes;
};
@@ -106,7 +123,10 @@ int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev);
-void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev);
+void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms);
+
+void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
+ struct komeda_events *evts);
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
void komeda_kms_detach(struct komeda_kms_dev *kms);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index f1908e9ef128..c379439c6194 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -19,17 +19,17 @@ komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
if (mdev->n_pipelines + 1 > KOMEDA_MAX_PIPELINES) {
DRM_ERROR("Exceed max support %d pipelines.\n",
KOMEDA_MAX_PIPELINES);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
if (size < sizeof(*pipe)) {
DRM_ERROR("Request pipeline size too small.\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
pipe = devm_kzalloc(mdev->dev, size, GFP_KERNEL);
if (!pipe)
- return NULL;
+ return ERR_PTR(-ENOMEM);
pipe->mdev = mdev;
pipe->id = mdev->n_pipelines;
@@ -62,7 +62,7 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
devm_kfree(mdev->dev, pipe);
}
-struct komeda_component **
+static struct komeda_component **
komeda_pipeline_get_component_pos(struct komeda_pipeline *pipe, int id)
{
struct komeda_dev *mdev = pipe->mdev;
@@ -142,32 +142,32 @@ komeda_component_add(struct komeda_pipeline *pipe,
if (max_active_inputs > KOMEDA_COMPONENT_N_INPUTS) {
WARN(1, "please large KOMEDA_COMPONENT_N_INPUTS to %d.\n",
max_active_inputs);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
pos = komeda_pipeline_get_component_pos(pipe, id);
if (!pos || (*pos))
- return NULL;
+ return ERR_PTR(-EINVAL);
if (has_bit(id, KOMEDA_PIPELINE_LAYERS)) {
idx = id - KOMEDA_COMPONENT_LAYER0;
num = &pipe->n_layers;
if (idx != pipe->n_layers) {
DRM_ERROR("please add Layer by id sequence.\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
} else if (has_bit(id, KOMEDA_PIPELINE_SCALERS)) {
idx = id - KOMEDA_COMPONENT_SCALER0;
num = &pipe->n_scalers;
if (idx != pipe->n_scalers) {
DRM_ERROR("please add Scaler by id sequence.\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
}
c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL);
if (!c)
- return NULL;
+ return ERR_PTR(-ENOMEM);
c->id = id;
c->hw_id = hw_id;
@@ -200,3 +200,98 @@ void komeda_component_destroy(struct komeda_dev *mdev,
{
devm_kfree(mdev->dev, c);
}
+
+static void komeda_component_dump(struct komeda_component *c)
+{
+ if (!c)
+ return;
+
+ DRM_DEBUG(" %s: ID %d-0x%08lx.\n",
+ c->name, c->id, BIT(c->id));
+ DRM_DEBUG(" max_active_inputs:%d, supported_inputs: 0x%08x.\n",
+ c->max_active_inputs, c->supported_inputs);
+ DRM_DEBUG(" max_active_outputs:%d, supported_outputs: 0x%08x.\n",
+ c->max_active_outputs, c->supported_outputs);
+}
+
+static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
+{
+ struct komeda_component *c;
+ int id;
+
+ DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s\n",
+ pipe->id, pipe->n_layers, pipe->n_scalers,
+ pipe->of_output_dev ? pipe->of_output_dev->full_name : "none");
+
+ dp_for_each_set_bit(id, pipe->avail_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ komeda_component_dump(c);
+ }
+}
+
+static void komeda_component_verify_inputs(struct komeda_component *c)
+{
+ struct komeda_pipeline *pipe = c->pipeline;
+ struct komeda_component *input;
+ int id;
+
+ dp_for_each_set_bit(id, c->supported_inputs) {
+ input = komeda_pipeline_get_component(pipe, id);
+ if (!input) {
+ c->supported_inputs &= ~(BIT(id));
+ DRM_WARN("Can not find input(ID-%d) for component: %s.\n",
+ id, c->name);
+ continue;
+ }
+
+ input->supported_outputs |= BIT(c->id);
+ }
+}
+
+static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
+{
+ struct komeda_component *c;
+ int id;
+
+ dp_for_each_set_bit(id, pipe->avail_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ komeda_component_verify_inputs(c);
+ }
+}
+
+int komeda_assemble_pipelines(struct komeda_dev *mdev)
+{
+ struct komeda_pipeline *pipe;
+ int i;
+
+ for (i = 0; i < mdev->n_pipelines; i++) {
+ pipe = mdev->pipelines[i];
+
+ komeda_pipeline_assemble(pipe);
+ komeda_pipeline_dump(pipe);
+ }
+
+ return 0;
+}
+
+void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
+ struct seq_file *sf)
+{
+ struct komeda_component *c;
+ u32 id;
+
+ seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
+
+ if (pipe->funcs && pipe->funcs->dump_register)
+ pipe->funcs->dump_register(pipe, sf);
+
+ dp_for_each_set_bit(id, pipe->avail_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ seq_printf(sf, "\n------%s------\n", c->name);
+ if (c->funcs->dump_register)
+ c->funcs->dump_register(c, sf);
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index 8c950bc8ae96..b1f813a349a4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -90,32 +90,35 @@ struct komeda_component {
u32 __iomem *reg;
/** @id: component id */
u32 id;
- /** @hw_ic: component hw id,
- * which is initialized by chip and used by chip only
+ /**
+ * @hw_id: component hw id,
+ * which is initialized by chip and used by chip only
*/
u32 hw_id;
/**
* @max_active_inputs:
- * @max_active_outpus:
+ * @max_active_outputs:
*
- * maximum number of inputs/outputs that can be active in the same time
+ * maximum number of inputs/outputs that can be active at the same time
* Note:
* the number isn't the bit number of @supported_inputs or
* @supported_outputs, but may be less than it, since component may not
* support enabling all @supported_inputs/outputs at the same time.
*/
u8 max_active_inputs;
+ /** @max_active_outputs: maximum number of outputs */
u8 max_active_outputs;
/**
* @supported_inputs:
* @supported_outputs:
*
- * bitmask of BIT(component->id) for the supported inputs/outputs
+ * bitmask of BIT(component->id) for the supported inputs/outputs,
* describes the possibilities of how a component is linked into a
* pipeline.
*/
u32 supported_inputs;
+ /** @supported_outputs: bitmask of supported output componenet ids */
u32 supported_outputs;
/**
@@ -134,7 +137,8 @@ struct komeda_component {
struct komeda_component_output {
/** @component: indicate which component the data comes from */
struct komeda_component *component;
- /** @output_port:
+ /**
+ * @output_port:
* the output port of the &komeda_component_output.component
*/
u8 output_port;
@@ -150,11 +154,12 @@ struct komeda_component_output {
struct komeda_component_state {
/** @obj: tracking component_state by drm_atomic_state */
struct drm_private_state obj;
+ /** @component: backpointer to the component */
struct komeda_component *component;
/**
* @binding_user:
- * currently bound user, the user can be crtc/plane/wb_conn, which is
- * valid decided by @component and @inputs
+ * currently bound user, the user can be @crtc, @plane or @wb_conn,
+ * which is valid decided by @component and @inputs
*
* - Layer: its user always is plane.
* - compiz/improc/timing_ctrlr: the user is crtc.
@@ -162,20 +167,24 @@ struct komeda_component_state {
* - scaler: plane when input is layer, wb_conn if input is compiz.
*/
union {
+ /** @crtc: backpointer for user crtc */
struct drm_crtc *crtc;
+ /** @plane: backpointer for user plane */
struct drm_plane *plane;
+ /** @wb_conn: backpointer for user wb_connector */
struct drm_connector *wb_conn;
void *binding_user;
};
+
/**
* @active_inputs:
*
* active_inputs is bitmask of @inputs index
*
- * - active_inputs = changed_active_inputs + unchanged_active_inputs
- * - affected_inputs = old->active_inputs + new->active_inputs;
+ * - active_inputs = changed_active_inputs | unchanged_active_inputs
+ * - affected_inputs = old->active_inputs | new->active_inputs;
* - disabling_inputs = affected_inputs ^ active_inputs;
- * - changed_inputs = disabling_inputs + changed_active_inputs;
+ * - changed_inputs = disabling_inputs | changed_active_inputs;
*
* NOTE:
* changed_inputs doesn't include all active_input but only
@@ -183,7 +192,9 @@ struct komeda_component_state {
* level for dirty update.
*/
u16 active_inputs;
+ /** @changed_active_inputs: bitmask of the changed @active_inputs */
u16 changed_active_inputs;
+ /** @affected_inputs: bitmask for affected @inputs */
u16 affected_inputs;
/**
* @inputs:
@@ -204,57 +215,96 @@ static inline u16 component_changed_inputs(struct komeda_component_state *st)
return component_disabling_inputs(st) | st->changed_active_inputs;
}
+#define for_each_changed_input(st, i) \
+ for ((i) = 0; (i) < (st)->component->max_active_inputs; (i)++) \
+ if (has_bit((i), component_changed_inputs(st)))
+
#define to_comp(__c) (((__c) == NULL) ? NULL : &((__c)->base))
#define to_cpos(__c) ((struct komeda_component **)&(__c))
-/* these structures are going to be filled in in uture patches */
struct komeda_layer {
struct komeda_component base;
- /* layer specific features and caps */
- int layer_type; /* RICH, SIMPLE or WB */
+ /* accepted h/v input range before rotation */
+ struct malidp_range hsize_in, vsize_in;
+ u32 layer_type; /* RICH, SIMPLE or WB */
+ u32 supported_rots;
};
struct komeda_layer_state {
struct komeda_component_state base;
/* layer specific configuration state */
+ u16 hsize, vsize;
+ u32 rot;
+ dma_addr_t addr[3];
};
-struct komeda_compiz {
+struct komeda_scaler {
struct komeda_component base;
- /* compiz specific features and caps */
+ /* scaler features and caps */
};
-struct komeda_compiz_state {
+struct komeda_scaler_state {
struct komeda_component_state base;
- /* compiz specific configuration state */
};
-struct komeda_scaler {
+struct komeda_compiz {
struct komeda_component base;
- /* scaler features and caps */
+ struct malidp_range hsize, vsize;
};
-struct komeda_scaler_state {
+struct komeda_compiz_input_cfg {
+ u16 hsize, vsize;
+ u16 hoffset, voffset;
+ u8 pixel_blend_mode, layer_alpha;
+};
+
+struct komeda_compiz_state {
struct komeda_component_state base;
+ /* composition size */
+ u16 hsize, vsize;
+ struct komeda_compiz_input_cfg cins[KOMEDA_COMPONENT_N_INPUTS];
};
struct komeda_improc {
struct komeda_component base;
+ u32 supported_color_formats; /* DRM_RGB/YUV444/YUV420*/
+ u32 supported_color_depths; /* BIT(8) | BIT(10)*/
+ u8 supports_degamma : 1;
+ u8 supports_csc : 1;
+ u8 supports_gamma : 1;
};
struct komeda_improc_state {
struct komeda_component_state base;
+ u16 hsize, vsize;
};
/* display timing controller */
struct komeda_timing_ctrlr {
struct komeda_component base;
+ u8 supports_dual_link : 1;
};
struct komeda_timing_ctrlr_state {
struct komeda_component_state base;
};
+/* Why define A separated structure but not use plane_state directly ?
+ * 1. Komeda supports layer_split which means a plane_state can be split and
+ * handled by two layers, one layer only handle half of plane image.
+ * 2. Fix up the user properties according to HW's capabilities, like user
+ * set rotation to R180, but HW only supports REFLECT_X+Y. the rot here is
+ * after drm_rotation_simplify()
+ */
+struct komeda_data_flow_cfg {
+ struct komeda_component_output input;
+ u16 in_x, in_y, in_w, in_h;
+ u32 out_x, out_y, out_w, out_h;
+ u32 rot;
+ int blending_zorder;
+ u8 pixel_blend_mode, layer_alpha;
+};
+
/** struct komeda_pipeline_funcs */
struct komeda_pipeline_funcs {
/* dump_register: Optional, dump registers to seq_file */
@@ -280,14 +330,23 @@ struct komeda_pipeline {
int id;
/** @avail_comps: available components mask of pipeline */
u32 avail_comps;
+ /** @n_layers: the number of layer on @layers */
int n_layers;
+ /** @layers: the pipeline layers */
struct komeda_layer *layers[KOMEDA_PIPELINE_MAX_LAYERS];
+ /** @n_scalers: the number of scaler on @scalers */
int n_scalers;
+ /** @scalers: the pipeline scalers */
struct komeda_scaler *scalers[KOMEDA_PIPELINE_MAX_SCALERS];
+ /** @compiz: compositor */
struct komeda_compiz *compiz;
+ /** @wb_layer: writeback layer */
struct komeda_layer *wb_layer;
+ /** @improc: post image processor */
struct komeda_improc *improc;
+ /** @ctrlr: timing controller */
struct komeda_timing_ctrlr *ctrlr;
+ /** @funcs: chip pipeline functions */
struct komeda_pipeline_funcs *funcs; /* private pipeline functions */
/** @of_node: pipeline dt node */
@@ -308,6 +367,7 @@ struct komeda_pipeline {
struct komeda_pipeline_state {
/** @obj: tracking pipeline_state by drm_atomic_state */
struct drm_private_state obj;
+ /** @pipe: backpointer to the pipeline */
struct komeda_pipeline *pipe;
/** @crtc: currently bound crtc */
struct drm_crtc *crtc;
@@ -340,10 +400,13 @@ komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
struct komeda_pipeline_funcs *funcs);
void komeda_pipeline_destroy(struct komeda_dev *mdev,
struct komeda_pipeline *pipe);
-
+int komeda_assemble_pipelines(struct komeda_dev *mdev);
struct komeda_component *
komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id);
+void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
+ struct seq_file *sf);
+
/* component APIs */
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
@@ -356,4 +419,26 @@ komeda_component_add(struct komeda_pipeline *pipe,
void komeda_component_destroy(struct komeda_dev *mdev,
struct komeda_component *c);
+struct komeda_plane_state;
+struct komeda_crtc_state;
+struct komeda_crtc;
+
+int komeda_build_layer_data_flow(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow);
+int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ struct komeda_crtc_state *kcrtc_st);
+
+int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ struct komeda_crtc_state *kcrtc_st);
+
+struct komeda_pipeline_state *
+komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state);
+void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state);
+void komeda_pipeline_update(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state);
+
#endif /* _KOMEDA_PIPELINE_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
new file mode 100644
index 000000000000..36570d7dad61
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+
+#include <drm/drm_print.h>
+#include <linux/clk.h>
+#include "komeda_dev.h"
+#include "komeda_kms.h"
+#include "komeda_pipeline.h"
+#include "komeda_framebuffer.h"
+
+static inline bool is_switching_user(void *old, void *new)
+{
+ if (!old || !new)
+ return false;
+
+ return old != new;
+}
+
+static struct komeda_pipeline_state *
+komeda_pipeline_get_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj);
+ if (IS_ERR(priv_st))
+ return ERR_CAST(priv_st);
+
+ return priv_to_pipe_st(priv_st);
+}
+
+struct komeda_pipeline_state *
+komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj);
+ if (priv_st)
+ return priv_to_pipe_st(priv_st);
+ return NULL;
+}
+
+static struct komeda_pipeline_state *
+komeda_pipeline_get_new_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj);
+ if (priv_st)
+ return priv_to_pipe_st(priv_st);
+ return NULL;
+}
+
+/* Assign pipeline for crtc */
+static struct komeda_pipeline_state *
+komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct komeda_pipeline_state *st;
+
+ st = komeda_pipeline_get_state(pipe, state);
+ if (IS_ERR(st))
+ return st;
+
+ if (is_switching_user(crtc, st->crtc)) {
+ DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n",
+ drm_crtc_index(crtc), pipe->id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* pipeline only can be disabled when the it is free or unused */
+ if (!crtc && st->active_comps) {
+ DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ st->crtc = crtc;
+
+ if (crtc) {
+ struct komeda_crtc_state *kcrtc_st;
+
+ kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state,
+ crtc));
+
+ kcrtc_st->active_pipes |= BIT(pipe->id);
+ kcrtc_st->affected_pipes |= BIT(pipe->id);
+ }
+ return st;
+}
+
+static struct komeda_component_state *
+komeda_component_get_state(struct komeda_component *c,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock));
+
+ priv_st = drm_atomic_get_private_obj_state(state, &c->obj);
+ if (IS_ERR(priv_st))
+ return ERR_CAST(priv_st);
+
+ return priv_to_comp_st(priv_st);
+}
+
+static struct komeda_component_state *
+komeda_component_get_old_state(struct komeda_component *c,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj);
+ if (priv_st)
+ return priv_to_comp_st(priv_st);
+ return NULL;
+}
+
+/**
+ * komeda_component_get_state_and_set_user()
+ *
+ * @c: component to get state and set user
+ * @state: global atomic state
+ * @user: direct user, the binding user
+ * @crtc: the CRTC user, the big boss :)
+ *
+ * This function accepts two users:
+ * - The direct user: can be plane/crtc/wb_connector depends on component
+ * - The big boss (CRTC)
+ * CRTC is the big boss (the final user), because all component resources
+ * eventually will be assigned to CRTC, like the layer will be binding to
+ * kms_plane, but kms plane will be binding to a CRTC eventually.
+ *
+ * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't
+ * independent and can be assigned to CRTC freely, but belongs to a specific
+ * pipeline, only pipeline can be shared between crtc, and pipeline as a whole
+ * (include all the internal components) assigned to a specific CRTC.
+ *
+ * So when set a user to komeda_component, need first to check the status of
+ * component->pipeline to see if the pipeline is available on this specific
+ * CRTC. if the pipeline is busy (assigned to another CRTC), even the required
+ * component is free, the component still cannot be assigned to the direct user.
+ */
+static struct komeda_component_state *
+komeda_component_get_state_and_set_user(struct komeda_component *c,
+ struct drm_atomic_state *state,
+ void *user,
+ struct drm_crtc *crtc)
+{
+ struct komeda_pipeline_state *pipe_st;
+ struct komeda_component_state *st;
+
+ /* First check if the pipeline is available */
+ pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline,
+ state, crtc);
+ if (IS_ERR(pipe_st))
+ return ERR_CAST(pipe_st);
+
+ st = komeda_component_get_state(c, state);
+ if (IS_ERR(st))
+ return st;
+
+ /* check if the component has been occupied */
+ if (is_switching_user(user, st->binding_user)) {
+ DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name);
+ return ERR_PTR(-EBUSY);
+ }
+
+ st->binding_user = user;
+ /* mark the component as active if user is valid */
+ if (st->binding_user)
+ pipe_st->active_comps |= BIT(c->id);
+
+ return st;
+}
+
+static void
+komeda_component_add_input(struct komeda_component_state *state,
+ struct komeda_component_output *input,
+ int idx)
+{
+ struct komeda_component *c = state->component;
+
+ WARN_ON((idx < 0 || idx >= c->max_active_inputs));
+
+ /* since the inputs[i] is only valid when it is active. So if a input[i]
+ * is a newly enabled input which switches from disable to enable, then
+ * the old inputs[i] is undefined (NOT zeroed), we can not rely on
+ * memcmp, but directly mark it changed
+ */
+ if (!has_bit(idx, state->affected_inputs) ||
+ memcmp(&state->inputs[idx], input, sizeof(*input))) {
+ memcpy(&state->inputs[idx], input, sizeof(*input));
+ state->changed_active_inputs |= BIT(idx);
+ }
+ state->active_inputs |= BIT(idx);
+ state->affected_inputs |= BIT(idx);
+}
+
+static int
+komeda_component_check_input(struct komeda_component_state *state,
+ struct komeda_component_output *input,
+ int idx)
+{
+ struct komeda_component *c = state->component;
+
+ if ((idx < 0) || (idx >= c->max_active_inputs)) {
+ DRM_DEBUG_ATOMIC("%s invalid input id: %d.\n", c->name, idx);
+ return -EINVAL;
+ }
+
+ if (has_bit(idx, state->active_inputs)) {
+ DRM_DEBUG_ATOMIC("%s required input_id: %d has been occupied already.\n",
+ c->name, idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+komeda_component_set_output(struct komeda_component_output *output,
+ struct komeda_component *comp,
+ u8 output_port)
+{
+ output->component = comp;
+ output->output_port = output_port;
+}
+
+static int
+komeda_component_validate_private(struct komeda_component *c,
+ struct komeda_component_state *st)
+{
+ int err;
+
+ if (!c->funcs->validate)
+ return 0;
+
+ err = c->funcs->validate(c, st);
+ if (err)
+ DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name);
+
+ return err;
+}
+
+static int
+komeda_layer_check_cfg(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ if (!in_range(&layer->hsize_in, dflow->in_w)) {
+ DRM_DEBUG_ATOMIC("src_w: %d is out of range.\n", dflow->in_w);
+ return -EINVAL;
+ }
+
+ if (!in_range(&layer->vsize_in, dflow->in_h)) {
+ DRM_DEBUG_ATOMIC("src_h: %d is out of range.\n", dflow->in_h);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+komeda_layer_validate(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_plane_state *plane_st = &kplane_st->base;
+ struct drm_framebuffer *fb = plane_st->fb;
+ struct komeda_fb *kfb = to_kfb(fb);
+ struct komeda_component_state *c_st;
+ struct komeda_layer_state *st;
+ int i, err;
+
+ err = komeda_layer_check_cfg(layer, kplane_st, dflow);
+ if (err)
+ return err;
+
+ c_st = komeda_component_get_state_and_set_user(&layer->base,
+ plane_st->state, plane_st->plane, plane_st->crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_layer_st(c_st);
+
+ st->rot = dflow->rot;
+ st->hsize = kfb->aligned_w;
+ st->vsize = kfb->aligned_h;
+
+ for (i = 0; i < fb->format->num_planes; i++)
+ st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
+ dflow->in_y, i);
+
+ err = komeda_component_validate_private(&layer->base, c_st);
+ if (err)
+ return err;
+
+ /* update the data flow for the next stage */
+ komeda_component_set_output(&dflow->input, &layer->base, 0);
+
+ return 0;
+}
+
+static void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
+ u16 *hsize, u16 *vsize)
+{
+ struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
+
+ if (hsize)
+ *hsize = m->hdisplay;
+ if (vsize)
+ *vsize = m->vdisplay;
+}
+
+static int
+komeda_compiz_set_input(struct komeda_compiz *compiz,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_atomic_state *drm_st = kcrtc_st->base.state;
+ struct komeda_component_state *c_st, *old_st;
+ struct komeda_compiz_input_cfg *cin;
+ u16 compiz_w, compiz_h;
+ int idx = dflow->blending_zorder;
+
+ pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h);
+ /* check display rect */
+ if ((dflow->out_x + dflow->out_w > compiz_w) ||
+ (dflow->out_y + dflow->out_h > compiz_h) ||
+ dflow->out_w == 0 || dflow->out_h == 0) {
+ DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n",
+ dflow->out_x, dflow->out_y,
+ dflow->out_w, dflow->out_h);
+ return -EINVAL;
+ }
+
+ c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st,
+ kcrtc_st->base.crtc, kcrtc_st->base.crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ if (komeda_component_check_input(c_st, &dflow->input, idx))
+ return -EINVAL;
+
+ cin = &(to_compiz_st(c_st)->cins[idx]);
+
+ cin->hsize = dflow->out_w;
+ cin->vsize = dflow->out_h;
+ cin->hoffset = dflow->out_x;
+ cin->voffset = dflow->out_y;
+ cin->pixel_blend_mode = dflow->pixel_blend_mode;
+ cin->layer_alpha = dflow->layer_alpha;
+
+ old_st = komeda_component_get_old_state(&compiz->base, drm_st);
+ WARN_ON(!old_st);
+
+ /* compare with old to check if this input has been changed */
+ if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
+ c_st->changed_active_inputs |= BIT(idx);
+
+ komeda_component_add_input(c_st, &dflow->input, idx);
+
+ return 0;
+}
+
+static int
+komeda_compiz_validate(struct komeda_compiz *compiz,
+ struct komeda_crtc_state *state,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct komeda_component_state *c_st;
+ struct komeda_compiz_state *st;
+
+ c_st = komeda_component_get_state_and_set_user(&compiz->base,
+ state->base.state, state->base.crtc, state->base.crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_compiz_st(c_st);
+
+ pipeline_composition_size(state, &st->hsize, &st->vsize);
+
+ komeda_component_set_output(&dflow->input, &compiz->base, 0);
+
+ /* compiz output dflow will be fed to the next pipeline stage, prepare
+ * the data flow configuration for the next stage
+ */
+ if (dflow) {
+ dflow->in_w = st->hsize;
+ dflow->in_h = st->vsize;
+ dflow->out_w = dflow->in_w;
+ dflow->out_h = dflow->in_h;
+ /* the output data of compiz doesn't have alpha, it only can be
+ * used as bottom layer when blend it with master layers
+ */
+ dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
+ dflow->layer_alpha = 0xFF;
+ dflow->blending_zorder = 0;
+ }
+
+ return 0;
+}
+
+static int
+komeda_improc_validate(struct komeda_improc *improc,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct komeda_component_state *c_st;
+ struct komeda_improc_state *st;
+
+ c_st = komeda_component_get_state_and_set_user(&improc->base,
+ kcrtc_st->base.state, crtc, crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_improc_st(c_st);
+
+ st->hsize = dflow->in_w;
+ st->vsize = dflow->in_h;
+
+ komeda_component_add_input(&st->base, &dflow->input, 0);
+ komeda_component_set_output(&dflow->input, &improc->base, 0);
+
+ return 0;
+}
+
+static int
+komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct komeda_timing_ctrlr_state *st;
+ struct komeda_component_state *c_st;
+
+ c_st = komeda_component_get_state_and_set_user(&ctrlr->base,
+ kcrtc_st->base.state, crtc, crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_ctrlr_st(c_st);
+
+ komeda_component_add_input(&st->base, &dflow->input, 0);
+ komeda_component_set_output(&dflow->input, &ctrlr->base, 0);
+
+ return 0;
+}
+
+int komeda_build_layer_data_flow(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_plane *plane = kplane_st->base.plane;
+ struct komeda_pipeline *pipe = layer->base.pipeline;
+ int err;
+
+ DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
+ layer->base.name, plane->base.id, plane->name,
+ dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
+ dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
+
+ err = komeda_layer_validate(layer, kplane_st, dflow);
+ if (err)
+ return err;
+
+ err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
+
+ return err;
+}
+
+/* build display output data flow, the data path is:
+ * compiz -> improc -> timing_ctrlr
+ */
+int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ struct komeda_crtc_state *kcrtc_st)
+{
+ struct komeda_pipeline *master = kcrtc->master;
+ struct komeda_data_flow_cfg m_dflow; /* master data flow */
+ int err;
+
+ memset(&m_dflow, 0, sizeof(m_dflow));
+
+ err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
+ if (err)
+ return err;
+
+ err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow);
+ if (err)
+ return err;
+
+ err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void
+komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ struct komeda_pipeline_state *new)
+{
+ struct drm_atomic_state *drm_st = new->obj.state;
+ struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
+ struct komeda_component_state *c_st;
+ struct komeda_component *c;
+ u32 disabling_comps, id;
+
+ WARN_ON(!old);
+
+ disabling_comps = (~new->active_comps) & old->active_comps;
+
+ /* unbound all disabling component */
+ dp_for_each_set_bit(id, disabling_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+ c_st = komeda_component_get_state_and_set_user(c,
+ drm_st, NULL, new->crtc);
+ WARN_ON(IS_ERR(c_st));
+ }
+}
+
+/* release unclaimed pipeline resource */
+int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ struct komeda_crtc_state *kcrtc_st)
+{
+ struct drm_atomic_state *drm_st = kcrtc_st->base.state;
+ struct komeda_pipeline_state *st;
+
+ /* ignore the pipeline which is not affected */
+ if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes))
+ return 0;
+
+ if (has_bit(pipe->id, kcrtc_st->active_pipes))
+ st = komeda_pipeline_get_new_state(pipe, drm_st);
+ else
+ st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
+
+ if (WARN_ON(IS_ERR_OR_NULL(st)))
+ return -EINVAL;
+
+ komeda_pipeline_unbound_components(pipe, st);
+
+ return 0;
+}
+
+void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state)
+{
+ struct komeda_pipeline_state *old;
+ struct komeda_component *c;
+ struct komeda_component_state *c_st;
+ u32 id, disabling_comps = 0;
+
+ old = komeda_pipeline_get_old_state(pipe, old_state);
+
+ disabling_comps = old->active_comps;
+ DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
+ pipe->id, disabling_comps);
+
+ dp_for_each_set_bit(id, disabling_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+ c_st = priv_to_comp_st(c->obj.state);
+
+ /*
+ * If we disabled a component then all active_inputs should be
+ * put in the list of changed_active_inputs, so they get
+ * re-enabled.
+ * This usually happens during a modeset when the pipeline is
+ * first disabled and then the actual state gets committed
+ * again.
+ */
+ c_st->changed_active_inputs |= c_st->active_inputs;
+
+ c->funcs->disable(c);
+ }
+}
+
+void komeda_pipeline_update(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state)
+{
+ struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
+ struct komeda_pipeline_state *old;
+ struct komeda_component *c;
+ u32 id, changed_comps = 0;
+
+ old = komeda_pipeline_get_old_state(pipe, old_state);
+
+ changed_comps = new->active_comps | old->active_comps;
+
+ DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
+ pipe->id, new->active_comps, changed_comps);
+
+ dp_for_each_set_bit(id, changed_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ if (new->active_comps & BIT(c->id))
+ c->funcs->update(c, priv_to_comp_st(c->obj.state));
+ else
+ c->funcs->disable(c);
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 0a4953a9a909..07ed0cc1bc44 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -7,10 +7,96 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
+static int
+komeda_plane_init_data_flow(struct drm_plane_state *st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_framebuffer *fb = st->fb;
+
+ memset(dflow, 0, sizeof(*dflow));
+
+ dflow->blending_zorder = st->zpos;
+
+ /* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
+ dflow->pixel_blend_mode = fb->format->has_alpha ?
+ st->pixel_blend_mode : DRM_MODE_BLEND_PIXEL_NONE;
+ dflow->layer_alpha = st->alpha >> 8;
+
+ dflow->out_x = st->crtc_x;
+ dflow->out_y = st->crtc_y;
+ dflow->out_w = st->crtc_w;
+ dflow->out_h = st->crtc_h;
+
+ dflow->in_x = st->src_x >> 16;
+ dflow->in_y = st->src_y >> 16;
+ dflow->in_w = st->src_w >> 16;
+ dflow->in_h = st->src_h >> 16;
+
+ return 0;
+}
+
+/**
+ * komeda_plane_atomic_check - build input data flow
+ * @plane: DRM plane
+ * @state: the plane state object
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int
+komeda_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct komeda_plane *kplane = to_kplane(plane);
+ struct komeda_plane_state *kplane_st = to_kplane_st(state);
+ struct komeda_layer *layer = kplane->layer;
+ struct drm_crtc_state *crtc_st;
+ struct komeda_crtc *kcrtc;
+ struct komeda_crtc_state *kcrtc_st;
+ struct komeda_data_flow_cfg dflow;
+ int err;
+
+ if (!state->crtc || !state->fb)
+ return 0;
+
+ crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (!crtc_st->enable) {
+ DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
+ return -EINVAL;
+ }
+
+ /* crtc is inactive, skip the resource assignment */
+ if (!crtc_st->active)
+ return 0;
+
+ kcrtc = to_kcrtc(state->crtc);
+ kcrtc_st = to_kcrtc_st(crtc_st);
+
+ err = komeda_plane_init_data_flow(state, &dflow);
+ if (err)
+ return err;
+
+ err = komeda_build_layer_data_flow(layer, kplane_st, kcrtc_st, &dflow);
+
+ return err;
+}
+
+/* plane doesn't represent a real HW, so there is no HW update for plane.
+ * komeda handles all the HW update in crtc->atomic_flush
+ */
+static void
+komeda_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
static const struct drm_plane_helper_funcs komeda_plane_helper_funcs = {
+ .atomic_check = komeda_plane_atomic_check,
+ .atomic_update = komeda_plane_atomic_update,
};
static void komeda_plane_destroy(struct drm_plane *plane)
@@ -20,7 +106,60 @@ static void komeda_plane_destroy(struct drm_plane *plane)
kfree(to_kplane(plane));
}
+static void komeda_plane_reset(struct drm_plane *plane)
+{
+ struct komeda_plane_state *state;
+ struct komeda_plane *kplane = to_kplane(plane);
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
+ plane->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->base.rotation = DRM_MODE_ROTATE_0;
+ state->base.pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+ state->base.alpha = DRM_BLEND_ALPHA_OPAQUE;
+ state->base.zpos = kplane->layer->base.id;
+ plane->state = &state->base;
+ plane->state->plane = plane;
+ }
+}
+
+static struct drm_plane_state *
+komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct komeda_plane_state *new;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+ return &new->base;
+}
+
+static void
+komeda_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_kplane_st(state));
+}
+
static const struct drm_plane_funcs komeda_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = komeda_plane_destroy,
+ .reset = komeda_plane_reset,
+ .atomic_duplicate_state = komeda_plane_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_plane_atomic_destroy_state,
};
/* for komeda, which is pipeline can be share between crtcs */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
index f1c9e3fefa86..a54878cbd6e4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
@@ -7,6 +7,188 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
+static void
+komeda_component_state_reset(struct komeda_component_state *st)
+{
+ st->binding_user = NULL;
+ st->affected_inputs = st->active_inputs;
+ st->active_inputs = 0;
+ st->changed_active_inputs = 0;
+}
+
+static struct drm_private_state *
+komeda_layer_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_layer_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_layer_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(priv_to_comp_st(state));
+
+ kfree(st);
+}
+
+static const struct drm_private_state_funcs komeda_layer_obj_funcs = {
+ .atomic_duplicate_state = komeda_layer_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_layer_atomic_destroy_state,
+};
+
+static int komeda_layer_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_layer *layer)
+{
+ struct komeda_layer_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &layer->base;
+ drm_atomic_private_obj_init(&kms->base, &layer->base.obj, &st->base.obj,
+ &komeda_layer_obj_funcs);
+ return 0;
+}
+
+static struct drm_private_state *
+komeda_compiz_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_compiz_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_compiz_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(to_compiz_st(priv_to_comp_st(state)));
+}
+
+static const struct drm_private_state_funcs komeda_compiz_obj_funcs = {
+ .atomic_duplicate_state = komeda_compiz_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_compiz_atomic_destroy_state,
+};
+
+static int komeda_compiz_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_compiz *compiz)
+{
+ struct komeda_compiz_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &compiz->base;
+ drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, &st->base.obj,
+ &komeda_compiz_obj_funcs);
+
+ return 0;
+}
+
+static struct drm_private_state *
+komeda_improc_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_improc_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_improc_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(to_improc_st(priv_to_comp_st(state)));
+}
+
+static const struct drm_private_state_funcs komeda_improc_obj_funcs = {
+ .atomic_duplicate_state = komeda_improc_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_improc_atomic_destroy_state,
+};
+
+static int komeda_improc_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_improc *improc)
+{
+ struct komeda_improc_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &improc->base;
+ drm_atomic_private_obj_init(&kms->base, &improc->base.obj, &st->base.obj,
+ &komeda_improc_obj_funcs);
+
+ return 0;
+}
+
+static struct drm_private_state *
+komeda_timing_ctrlr_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_timing_ctrlr_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_timing_ctrlr_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(to_ctrlr_st(priv_to_comp_st(state)));
+}
+
+static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = {
+ .atomic_duplicate_state = komeda_timing_ctrlr_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_timing_ctrlr_atomic_destroy_state,
+};
+
+static int komeda_timing_ctrlr_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_timing_ctrlr *ctrlr)
+{
+ struct komeda_compiz_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &ctrlr->base;
+ drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, &st->base.obj,
+ &komeda_timing_ctrlr_obj_funcs);
+
+ return 0;
+}
+
static struct drm_private_state *
komeda_pipeline_atomic_duplicate_state(struct drm_private_obj *obj)
{
@@ -55,7 +237,7 @@ int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
struct komeda_pipeline *pipe;
- int i, err;
+ int i, j, err;
for (i = 0; i < mdev->n_pipelines; i++) {
pipe = mdev->pipelines[i];
@@ -64,25 +246,33 @@ int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
if (err)
return err;
- /* Add component */
+ for (j = 0; j < pipe->n_layers; j++) {
+ err = komeda_layer_obj_add(kms, pipe->layers[j]);
+ if (err)
+ return err;
+ }
+
+ err = komeda_compiz_obj_add(kms, pipe->compiz);
+ if (err)
+ return err;
+
+ err = komeda_improc_obj_add(kms, pipe->improc);
+ if (err)
+ return err;
+
+ err = komeda_timing_ctrlr_obj_add(kms, pipe->ctrlr);
+ if (err)
+ return err;
}
return 0;
}
-void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev)
+void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms)
{
- struct komeda_pipeline *pipe;
- struct komeda_component *c;
- int i, id;
+ struct drm_mode_config *config = &kms->base.mode_config;
+ struct drm_private_obj *obj, *next;
- for (i = 0; i < mdev->n_pipelines; i++) {
- pipe = mdev->pipelines[i];
- dp_for_each_set_bit(id, pipe->avail_comps) {
- c = komeda_pipeline_get_component(pipe, id);
-
- drm_atomic_private_obj_fini(&c->obj);
- }
- drm_atomic_private_obj_fini(&pipe->obj);
- }
+ list_for_each_entry_safe(obj, next, &config->privobj_list, head)
+ drm_atomic_private_obj_fini(obj);
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index ab50ad06e271..21725c9b9f5e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -264,37 +264,17 @@ static bool
malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- if ((mode_cmd->modifier[0] >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
- DRM_DEBUG_KMS("Unknown modifier (not Arm)\n");
- return false;
- }
-
- if (mode_cmd->modifier[0] &
- ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
- DRM_DEBUG_KMS("Unsupported modifiers\n");
- return false;
- }
-
- info = drm_get_format_info(dev, mode_cmd);
- if (!info) {
- DRM_DEBUG_KMS("Unable to get the format information\n");
+ if (malidp_format_mod_supported(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]) == false)
return false;
- }
-
- if (info->num_planes != 1) {
- DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
- return false;
- }
if (mode_cmd->offsets[0] != 0) {
DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n");
return false;
}
- switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
- case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
+ case AFBC_SIZE_16X16:
if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) {
DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n");
return false;
@@ -318,9 +298,10 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
struct drm_gem_object *objs = NULL;
u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
u32 afbc_superblock_width = 0, afbc_size = 0;
+ int bpp = 0;
- switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
- case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
+ case AFBC_SIZE_16X16:
afbc_superblock_height = 16;
afbc_superblock_width = 16;
break;
@@ -334,15 +315,19 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
n_superblocks = (mode_cmd->width / afbc_superblock_width) *
(mode_cmd->height / afbc_superblock_height);
- afbc_superblock_size = info->cpp[0] * afbc_superblock_width *
- afbc_superblock_height;
+ bpp = malidp_format_get_bpp(info->format);
+
+ afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height)
+ / BITS_PER_BYTE;
afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT);
afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT);
- if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
- DRM_DEBUG_KMS("Invalid value of pitch (=%u) should be same as width (=%u) * cpp (=%u)\n",
- mode_cmd->pitches[0], mode_cmd->width, info->cpp[0]);
+ if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) {
+ DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) "
+ "should be same as width (=%u) * bpp (=%u)\n",
+ (mode_cmd->pitches[0] * BITS_PER_BYTE),
+ mode_cmd->width, bpp);
return false;
}
@@ -406,6 +391,7 @@ static int malidp_init(struct drm_device *drm)
drm->mode_config.max_height = hwdev->max_line_size;
drm->mode_config.funcs = &malidp_mode_config_funcs;
drm->mode_config.helper_private = &malidp_mode_config_helpers;
+ drm->mode_config.allow_fb_modifiers = true;
ret = malidp_crtc_init(drm);
if (ret)
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index b76c86f18a56..019a682b2716 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -90,6 +90,12 @@ struct malidp_crtc_state {
int malidp_de_planes_init(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
+bool malidp_hw_format_is_linear_only(u32 format);
+bool malidp_hw_format_is_afbc_only(u32 format);
+
+bool malidp_format_mod_supported(struct drm_device *drm,
+ u32 format, u64 modifier);
+
#ifdef CONFIG_DEBUG_FS
void malidp_error(struct malidp_drm *malidp,
struct malidp_error_stats *error_stats, u32 status,
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index b9bed1138fa3..8df12e9a33bb 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -49,11 +49,19 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
{ DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 },
{ DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
+ { DRM_FORMAT_XYUV8888, DE_VIDEO1, 16 },
+ /* These are supported with AFBC only */
+ { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1, 14 },
+ { DRM_FORMAT_VUY888, DE_VIDEO1, 16 },
+ { DRM_FORMAT_VUY101010, DE_VIDEO1, 17 },
+ { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1, 18 }
};
#define MALIDP_ID(__group, __format) \
((((__group) & 0x7) << 3) | ((__format) & 0x7))
+#define AFBC_YUV_422_FORMAT_ID MALIDP_ID(5, 1)
+
#define MALIDP_COMMON_FORMATS \
/* fourcc, layers supporting the format, internal id */ \
{ DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \
@@ -74,11 +82,25 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
{ DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
+ /* This is only supported with linear modifier */ \
+ { DRM_FORMAT_XYUV8888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) },\
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_VUY888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) }, \
{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
+ /* This is only supported with linear modifier */ \
{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
{ DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \
- { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}
+ /* This is only supported with linear modifier */ \
+ { DRM_FORMAT_XVYU2101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_VUY101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
+ { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}, \
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}, \
+ { DRM_FORMAT_P010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}
static const struct malidp_format_id malidp550_de_formats[] = {
MALIDP_COMMON_FORMATS,
@@ -94,11 +116,14 @@ static const struct malidp_layer malidp500_layers[] = {
* yuv2rgb matrix offset, mmu control register offset, rotation_features
*/
{ DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE,
- MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY },
+ MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY,
+ MALIDP500_DE_LV_AD_CTRL },
{ DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE,
- MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
+ MALIDP500_DE_LG1_AD_CTRL },
{ DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE,
- MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
+ MALIDP500_DE_LG2_AD_CTRL },
};
static const struct malidp_layer malidp550_layers[] = {
@@ -106,13 +131,16 @@ static const struct malidp_layer malidp550_layers[] = {
* yuv2rgb matrix offset, mmu control register offset, rotation_features
*/
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
- MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY },
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
+ MALIDP550_DE_LV1_AD_CTRL },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
- MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
+ MALIDP550_DE_LG_AD_CTRL },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
- MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY },
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
+ MALIDP550_DE_LV2_AD_CTRL },
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
- MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE },
+ MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE, 0 },
};
static const struct malidp_layer malidp650_layers[] = {
@@ -122,16 +150,44 @@ static const struct malidp_layer malidp650_layers[] = {
*/
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
- MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY },
+ MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
+ MALIDP550_DE_LV1_AD_CTRL },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL,
- ROTATE_COMPRESSED },
+ ROTATE_COMPRESSED, MALIDP550_DE_LG_AD_CTRL },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
- MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY },
+ MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
+ MALIDP550_DE_LV2_AD_CTRL },
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL,
- ROTATE_NONE },
+ ROTATE_NONE, 0 },
+};
+
+const u64 malidp_format_modifiers[] = {
+ /* All RGB formats (except XRGB, RGBX, XBGR, BGRX) */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR),
+
+ /* All RGB formats > 16bpp (except XRGB, RGBX, XBGR, BGRX) */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE | AFBC_SPLIT),
+
+ /* All 8 or 10 bit YUV 444 formats. */
+ /* In DP550, 10 bit YUV 420 format also supported */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE | AFBC_SPLIT),
+
+ /* YUV 420, 422 P1 8 bit and YUV 444 8 bit/10 bit formats */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16),
+
+ /* YUV 420, 422 P1 8, 10 bit formats */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR | AFBC_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR),
+
+ /* All formats */
+ DRM_FORMAT_MOD_LINEAR,
+
+ DRM_FORMAT_MOD_INVALID
};
#define SE_N_SCALING_COEFFS 96
@@ -324,14 +380,39 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
}
-static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+int malidp_format_get_bpp(u32 fmt)
+{
+ int bpp = drm_format_plane_cpp(fmt, 0) * 8;
+
+ if (bpp == 0) {
+ switch (fmt) {
+ case DRM_FORMAT_VUY101010:
+ bpp = 30;
+ case DRM_FORMAT_YUV420_10BIT:
+ bpp = 15;
+ break;
+ case DRM_FORMAT_YUV420_8BIT:
+ bpp = 12;
+ break;
+ default:
+ bpp = 0;
+ }
+ }
+
+ return bpp;
+}
+
+static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
+ u16 h, u32 fmt, bool has_modifier)
{
/*
* Each layer needs enough rotation memory to fit 8 lines
* worth of pixel data. Required size is then:
* size = rotated_width * (bpp / 8) * 8;
*/
- return w * drm_format_plane_cpp(fmt, 0) * 8;
+ int bpp = malidp_format_get_bpp(fmt);
+
+ return w * bpp;
}
static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev,
@@ -609,9 +690,9 @@ static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
}
-static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+static int malidpx50_get_bytes_per_column(u32 fmt)
{
- u32 bytes_per_col;
+ u32 bytes_per_column;
switch (fmt) {
/* 8 lines at 4 bytes per pixel */
@@ -637,19 +718,77 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_X0L0:
- case DRM_FORMAT_X0L2:
- bytes_per_col = 32;
+ bytes_per_column = 32;
break;
/* 16 lines at 1.5 bytes per pixel */
case DRM_FORMAT_NV12:
case DRM_FORMAT_YUV420:
- bytes_per_col = 24;
+ /* 8 lines at 3 bytes per pixel */
+ case DRM_FORMAT_VUY888:
+ /* 16 lines at 12 bits per pixel */
+ case DRM_FORMAT_YUV420_8BIT:
+ /* 8 lines at 3 bytes per pixel */
+ case DRM_FORMAT_P010:
+ bytes_per_column = 24;
+ break;
+ /* 8 lines at 30 bits per pixel */
+ case DRM_FORMAT_VUY101010:
+ /* 16 lines at 15 bits per pixel */
+ case DRM_FORMAT_YUV420_10BIT:
+ bytes_per_column = 30;
break;
default:
return -EINVAL;
}
- return w * bytes_per_col;
+ return bytes_per_column;
+}
+
+static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
+ u16 h, u32 fmt, bool has_modifier)
+{
+ int bytes_per_column = 0;
+
+ switch (fmt) {
+ /* 8 lines at 15 bits per pixel */
+ case DRM_FORMAT_YUV420_10BIT:
+ bytes_per_column = 15;
+ break;
+ /* Uncompressed YUV 420 10 bit single plane cannot be rotated */
+ case DRM_FORMAT_X0L2:
+ if (has_modifier)
+ bytes_per_column = 8;
+ else
+ return -EINVAL;
+ break;
+ default:
+ bytes_per_column = malidpx50_get_bytes_per_column(fmt);
+ }
+
+ if (bytes_per_column == -EINVAL)
+ return bytes_per_column;
+
+ return w * bytes_per_column;
+}
+
+static int malidp650_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
+ u16 h, u32 fmt, bool has_modifier)
+{
+ int bytes_per_column = 0;
+
+ switch (fmt) {
+ /* 16 lines at 2 bytes per pixel */
+ case DRM_FORMAT_X0L2:
+ bytes_per_column = 32;
+ break;
+ default:
+ bytes_per_column = malidpx50_get_bytes_per_column(fmt);
+ }
+
+ if (bytes_per_column == -EINVAL)
+ return bytes_per_column;
+
+ return w * bytes_per_column;
}
static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
@@ -838,7 +977,10 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
- .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ |
+ MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
+ MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT |
+ MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
.n_layers = ARRAY_SIZE(malidp550_layers),
.layers = malidp550_layers,
.de_irq_map = {
@@ -884,7 +1026,9 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
- .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ |
+ MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
+ MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
.n_layers = ARRAY_SIZE(malidp650_layers),
.layers = malidp650_layers,
.de_irq_map = {
@@ -923,7 +1067,7 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.in_config_mode = malidp550_in_config_mode,
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
- .rotmem_required = malidp550_rotmem_required,
+ .rotmem_required = malidp650_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
.enable_memwrite = malidp550_enable_memwrite,
@@ -933,19 +1077,72 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
};
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
- u8 layer_id, u32 format)
+ u8 layer_id, u32 format, bool has_modifier)
{
unsigned int i;
for (i = 0; i < map->n_pixel_formats; i++) {
if (((map->pixel_formats[i].layer & layer_id) == layer_id) &&
- (map->pixel_formats[i].format == format))
- return map->pixel_formats[i].id;
+ (map->pixel_formats[i].format == format)) {
+ /*
+ * In some DP550 and DP650, DRM_FORMAT_YUYV + AFBC modifier
+ * is supported by a different h/w format id than
+ * DRM_FORMAT_YUYV (only).
+ */
+ if (format == DRM_FORMAT_YUYV &&
+ (has_modifier) &&
+ (map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2))
+ return AFBC_YUV_422_FORMAT_ID;
+ else
+ return map->pixel_formats[i].id;
+ }
}
return MALIDP_INVALID_FORMAT_ID;
}
+bool malidp_hw_format_is_linear_only(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_X0L2:
+ case DRM_FORMAT_X0L0:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool malidp_hw_format_is_afbc_only(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_VUY888:
+ case DRM_FORMAT_VUY101010:
+ case DRM_FORMAT_YUV420_8BIT:
+ case DRM_FORMAT_YUV420_10BIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
{
u32 base = malidp_get_block_base(hwdev, block);
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 40155e2ea9d9..207c3ce52f1a 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -70,6 +70,8 @@ struct malidp_layer {
s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */
u16 mmu_ctrl_offset; /* offset to the MMU control register */
enum rotation_features rot; /* type of rotation supported */
+ /* address offset for the AFBC decoder registers */
+ u16 afbc_decoder_offset;
};
enum malidp_scaling_coeff_set {
@@ -93,7 +95,10 @@ struct malidp_se_config {
};
/* regmap features */
-#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
+#define MALIDP_REGMAP_HAS_CLEARIRQ BIT(0)
+#define MALIDP_DEVICE_AFBC_SUPPORT_SPLIT BIT(1)
+#define MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT BIT(2)
+#define MALIDP_DEVICE_AFBC_YUYV_USE_422_P2 BIT(3)
struct malidp_hw_regmap {
/* address offset of the DE register bank */
@@ -179,7 +184,8 @@ struct malidp_hw {
* Calculate the required rotation memory given the active area
* and the buffer format.
*/
- int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
+ int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h,
+ u32 fmt, bool has_modifier);
int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
@@ -319,7 +325,9 @@ int malidp_se_irq_init(struct drm_device *drm, int irq);
void malidp_se_irq_fini(struct malidp_hw_device *hwdev);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
- u8 layer_id, u32 format);
+ u8 layer_id, u32 format, bool has_modifier);
+
+int malidp_format_get_bpp(u32 fmt);
static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated)
{
@@ -388,9 +396,18 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
#define MALIDP_GAMMA_LUT_SIZE 4096
-#define AFBC_MOD_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_MASK | \
- AFBC_FORMAT_MOD_YTR | AFBC_FORMAT_MOD_SPLIT | \
- AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_CBR | \
- AFBC_FORMAT_MOD_TILED | AFBC_FORMAT_MOD_SC)
+#define AFBC_SIZE_MASK AFBC_FORMAT_MOD_BLOCK_SIZE_MASK
+#define AFBC_SIZE_16X16 AFBC_FORMAT_MOD_BLOCK_SIZE_16x16
+#define AFBC_YTR AFBC_FORMAT_MOD_YTR
+#define AFBC_SPARSE AFBC_FORMAT_MOD_SPARSE
+#define AFBC_CBR AFBC_FORMAT_MOD_CBR
+#define AFBC_SPLIT AFBC_FORMAT_MOD_SPLIT
+#define AFBC_TILED AFBC_FORMAT_MOD_TILED
+#define AFBC_SC AFBC_FORMAT_MOD_SC
+
+#define AFBC_MOD_VALID_BITS (AFBC_SIZE_MASK | AFBC_YTR | AFBC_SPLIT | \
+ AFBC_SPARSE | AFBC_CBR | AFBC_TILED | AFBC_SC)
+
+extern const u64 malidp_format_modifiers[];
#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 041a64dc7167..5f102bdaf841 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -141,9 +141,14 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
+ if (fb->modifier) {
+ DRM_DEBUG_KMS("Writeback framebuffer does not support modifiers\n");
+ return -EINVAL;
+ }
+
mw_state->format =
malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
- fb->format->format);
+ fb->format->format, !!fb->modifier);
if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
struct drm_format_name_buf format_name;
@@ -252,8 +257,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
&mw_state->addrs[0],
mw_state->format);
- drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
- conn_state->writeback_job = NULL;
+ drm_writeback_queue_job(mw_conn, conn_state);
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes,
fb->width, fb->height, mw_state->format,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index c9a6d3e0cada..d42e0ea9a303 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -52,6 +52,8 @@
#define MALIDP550_LS_ENABLE 0x01c
#define MALIDP550_LS_R1_IN_SIZE 0x020
+#define MODIFIERS_COUNT_MAX 15
+
/*
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
* for formats with 1- or 2-bit alpha channels.
@@ -145,6 +147,119 @@ static void malidp_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
}
+bool malidp_format_mod_supported(struct drm_device *drm,
+ u32 format, u64 modifier)
+{
+ const struct drm_format_info *info;
+ const u64 *modifiers;
+ struct malidp_drm *malidp = drm->dev_private;
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
+
+ if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
+ return false;
+
+ /* Some pixel formats are supported without any modifier */
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ /*
+ * However these pixel formats need to be supported with
+ * modifiers only
+ */
+ return !malidp_hw_format_is_afbc_only(format);
+ }
+
+ if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
+ DRM_ERROR("Unknown modifier (not Arm)\n");
+ return false;
+ }
+
+ if (modifier &
+ ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
+ DRM_DEBUG_KMS("Unsupported modifiers\n");
+ return false;
+ }
+
+ modifiers = malidp_format_modifiers;
+
+ /* SPLIT buffers must use SPARSE layout */
+ if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE)))
+ return false;
+
+ /* CBR only applies to YUV formats, where YTR should be always 0 */
+ if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR)))
+ return false;
+
+ while (*modifiers != DRM_FORMAT_MOD_INVALID) {
+ if (*modifiers == modifier)
+ break;
+
+ modifiers++;
+ }
+
+ /* return false, if the modifier was not found */
+ if (*modifiers == DRM_FORMAT_MOD_INVALID) {
+ DRM_DEBUG_KMS("Unsupported modifier\n");
+ return false;
+ }
+
+ info = drm_format_info(format);
+
+ if (info->num_planes != 1) {
+ DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
+ return false;
+ }
+
+ if (malidp_hw_format_is_linear_only(format) == true) {
+ DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n",
+ format);
+ return false;
+ }
+
+ /*
+ * RGB formats need to provide YTR modifier and YUV formats should not
+ * provide YTR modifier.
+ */
+ if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) {
+ DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n",
+ info->is_yuv ? "disallowed" : "mandatory",
+ info->is_yuv ? "YUV" : "RGB");
+ return false;
+ }
+
+ if (modifier & AFBC_SPLIT) {
+ if (!info->is_yuv) {
+ if (drm_format_plane_cpp(format, 0) <= 2) {
+ DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n");
+ return false;
+ }
+ }
+
+ if ((drm_format_horz_chroma_subsampling(format) != 1) ||
+ (drm_format_vert_chroma_subsampling(format) != 1)) {
+ if (!(format == DRM_FORMAT_YUV420_10BIT &&
+ (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) {
+ DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n");
+ return false;
+ }
+ }
+ }
+
+ if (modifier & AFBC_CBR) {
+ if ((drm_format_horz_chroma_subsampling(format) == 1) ||
+ (drm_format_vert_chroma_subsampling(format) == 1)) {
+ DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ return malidp_format_mod_supported(plane->dev, format, modifier);
+}
+
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -153,6 +268,7 @@ static const struct drm_plane_funcs malidp_de_plane_funcs = {
.atomic_duplicate_state = malidp_duplicate_plane_state,
.atomic_destroy_state = malidp_destroy_plane_state,
.atomic_print_state = malidp_plane_atomic_print_state,
+ .format_mod_supported = malidp_format_mod_supported_per_plane,
};
static int malidp_se_check_scaling(struct malidp_plane *mp,
@@ -406,8 +522,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
fb = state->fb;
ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
- mp->layer->id,
- fb->format->format);
+ mp->layer->id, fb->format->format,
+ !!fb->modifier);
if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
@@ -415,8 +531,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
for (i = 0; i < ms->n_planes; i++) {
u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
- if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
- & (alignment - 1)) {
+ if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
+ & (alignment - 1)) && !(fb->modifier)) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
fb->pitches[i], i);
return -EINVAL;
@@ -469,13 +585,20 @@ static int malidp_de_plane_check(struct drm_plane *plane,
return -EINVAL;
}
+ /* SMART layer does not support AFBC */
+ if (mp->layer->id == DE_SMART && fb->modifier) {
+ DRM_ERROR("AFBC framebuffer not supported in SMART layer");
+ return -EINVAL;
+ }
+
ms->rotmem_size = 0;
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
state->crtc_h,
- fb->format->format);
+ fb->format->format,
+ !!(fb->modifier));
if (val < 0)
return val;
@@ -592,6 +715,83 @@ static void malidp_de_set_mmu_control(struct malidp_plane *mp,
mp->layer->base + mp->layer->mmu_ctrl_offset);
}
+static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
+ struct malidp_plane *mp,
+ int plane_index)
+{
+ dma_addr_t paddr;
+ u16 ptr;
+ struct drm_plane *plane = &mp->base;
+ bool afbc = fb->modifier ? true : false;
+
+ ptr = mp->layer->ptr + (plane_index << 4);
+
+ /*
+ * drm_fb_cma_get_gem_addr() alters the physical base address of the
+ * framebuffer as per the plane's src_x, src_y co-ordinates (ie to
+ * take care of source cropping).
+ * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H
+ * and _AD_CROP_V registers.
+ */
+ if (!afbc) {
+ paddr = drm_fb_cma_get_gem_addr(fb, plane->state,
+ plane_index);
+ } else {
+ struct drm_gem_cma_object *obj;
+
+ obj = drm_fb_cma_get_gem_obj(fb, plane_index);
+
+ if (WARN_ON(!obj))
+ return;
+ paddr = obj->paddr;
+ }
+
+ malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr);
+ malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4);
+}
+
+static void malidp_de_set_plane_afbc(struct drm_plane *plane)
+{
+ struct malidp_plane *mp;
+ u32 src_w, src_h, val = 0, src_x, src_y;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ mp = to_malidp_plane(plane);
+
+ /* no afbc_decoder_offset means AFBC is not supported on this plane */
+ if (!mp->layer->afbc_decoder_offset)
+ return;
+
+ if (!fb->modifier) {
+ malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset);
+ return;
+ }
+
+ /* convert src values from Q16 fixed point to integer */
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+ src_x = plane->state->src_x >> 16;
+ src_y = plane->state->src_y >> 16;
+
+ val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) |
+ src_x;
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H);
+
+ val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) |
+ src_y;
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V);
+
+ val = MALIDP_AD_EN;
+ if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
+ val |= MALIDP_AD_BS;
+ if (fb->modifier & AFBC_FORMAT_MOD_YTR)
+ val |= MALIDP_AD_YTR;
+
+ malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset);
+}
+
static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -602,12 +802,23 @@ static void malidp_de_plane_update(struct drm_plane *plane,
u8 plane_alpha = state->alpha >> 8;
u32 src_w, src_h, dest_w, dest_h, val;
int i;
+ struct drm_framebuffer *fb = plane->state->fb;
mp = to_malidp_plane(plane);
- /* convert src values from Q16 fixed point to integer */
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ /*
+ * For AFBC framebuffer, use the framebuffer width and height for
+ * configuring layer input size register.
+ */
+ if (fb->modifier) {
+ src_w = fb->width;
+ src_h = fb->height;
+ } else {
+ /* convert src values from Q16 fixed point to integer */
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+ }
+
dest_w = state->crtc_w;
dest_h = state->crtc_h;
@@ -615,15 +826,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
val = (val & ~LAYER_FORMAT_MASK) | ms->format;
malidp_hw_write(mp->hwdev, val, mp->layer->base);
- for (i = 0; i < ms->n_planes; i++) {
- /* calculate the offset for the layer's plane registers */
- u16 ptr = mp->layer->ptr + (i << 4);
- dma_addr_t fb_addr = drm_fb_cma_get_gem_addr(state->fb,
- state, i);
-
- malidp_hw_write(mp->hwdev, lower_32_bits(fb_addr), ptr);
- malidp_hw_write(mp->hwdev, upper_32_bits(fb_addr), ptr + 4);
- }
+ for (i = 0; i < ms->n_planes; i++)
+ malidp_set_plane_base_addr(fb, mp, i);
malidp_de_set_mmu_control(mp, ms);
@@ -657,6 +861,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
}
+ malidp_de_set_plane_afbc(plane);
+
/* first clear the rotation bits */
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
val &= ~LAYER_ROT_MASK;
@@ -733,7 +939,26 @@ int malidp_de_planes_init(struct drm_device *drm)
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE);
u32 *formats;
- int ret, i, j, n;
+ int ret, i = 0, j = 0, n;
+ u64 supported_modifiers[MODIFIERS_COUNT_MAX];
+ const u64 *modifiers;
+
+ modifiers = malidp_format_modifiers;
+
+ if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) {
+ /*
+ * Since our hardware does not support SPLIT, so build the list
+ * of supported modifiers excluding SPLIT ones.
+ */
+ while (*modifiers != DRM_FORMAT_MOD_INVALID) {
+ if (!(*modifiers & AFBC_SPLIT))
+ supported_modifiers[j++] = *modifiers;
+
+ modifiers++;
+ }
+ supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID;
+ modifiers = supported_modifiers;
+ }
formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
if (!formats) {
@@ -758,9 +983,15 @@ int malidp_de_planes_init(struct drm_device *drm)
plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
+
+ /*
+ * All the layers except smart layer supports AFBC modifiers.
+ */
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
- &malidp_de_plane_funcs, formats,
- n, NULL, plane_type, NULL);
+ &malidp_de_plane_funcs, formats, n,
+ (id == DE_SMART) ? NULL : modifiers, plane_type,
+ NULL);
+
if (ret < 0)
goto cleanup;
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 7ce3e141464d..a0dd6e1676a8 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -198,10 +198,13 @@
#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8))
#define MALIDP500_DE_LV_BASE 0x00100
#define MALIDP500_DE_LV_PTR_BASE 0x00124
+#define MALIDP500_DE_LV_AD_CTRL 0x00400
#define MALIDP500_DE_LG1_BASE 0x00200
#define MALIDP500_DE_LG1_PTR_BASE 0x0021c
+#define MALIDP500_DE_LG1_AD_CTRL 0x0040c
#define MALIDP500_DE_LG2_BASE 0x00300
#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
+#define MALIDP500_DE_LG2_AD_CTRL 0x00418
#define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
@@ -228,10 +231,13 @@
#define MALIDP550_LV_YUV2RGB 0x00084
#define MALIDP550_DE_LV1_BASE 0x00100
#define MALIDP550_DE_LV1_PTR_BASE 0x00124
+#define MALIDP550_DE_LV1_AD_CTRL 0x001B8
#define MALIDP550_DE_LV2_BASE 0x00200
#define MALIDP550_DE_LV2_PTR_BASE 0x00224
+#define MALIDP550_DE_LV2_AD_CTRL 0x002B8
#define MALIDP550_DE_LG_BASE 0x00300
#define MALIDP550_DE_LG_PTR_BASE 0x0031c
+#define MALIDP550_DE_LG_AD_CTRL 0x00330
#define MALIDP550_DE_LS_BASE 0x00400
#define MALIDP550_DE_LS_PTR_BASE 0x0042c
#define MALIDP550_DE_PERF_BASE 0x00500
@@ -258,6 +264,20 @@
#define MALIDP_MMU_CTRL_PX_PS(x) (1 << (8 + (x)))
#define MALIDP_MMU_CTRL_PP_NUM_REQ(x) (((x) & 0x7f) << 12)
+/* AFBC register offsets relative to MALIDPXXX_DE_LX_AD_CTRL */
+/* The following register offsets are common for DP500, DP550 and DP650 */
+#define MALIDP_AD_CROP_H 0x4
+#define MALIDP_AD_CROP_V 0x8
+#define MALIDP_AD_END_PTR_LOW 0xc
+#define MALIDP_AD_END_PTR_HIGH 0x10
+
+/* AFBC decoder Registers */
+#define MALIDP_AD_EN BIT(0)
+#define MALIDP_AD_YTR BIT(4)
+#define MALIDP_AD_BS BIT(8)
+#define MALIDP_AD_CROP_RIGHT_OFFSET 16
+#define MALIDP_AD_CROP_BOTTOM_OFFSET 16
+
/*
* Starting with DP550 the register map blocks has been standardised to the
* following layout:
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 8d23700848df..1e7140f005a5 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -78,8 +78,6 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
goto err_fballoc;
}
- strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
- info->par = fbh;
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
@@ -87,9 +85,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
info->screen_base = ptr;
fbh->fb = &dfb->fb;
- drm_fb_helper_fill_fix(info, dfb->fb.pitches[0],
- dfb->fb.format->depth);
- drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, fbh, sizes);
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
dfb->fb.width, dfb->fb.height, dfb->fb.format->cpp[0] * 8,
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
new file mode 100644
index 000000000000..cccab520e02f
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -0,0 +1,14 @@
+config DRM_ASPEED_GFX
+ tristate "ASPEED BMC Display Controller"
+ depends on DRM && OF
+ depends on (COMPILE_TEST || ARCH_ASPEED)
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DMA_CMA if HAVE_DMA_CONTIGUOUS
+ select CMA if HAVE_DMA_CONTIGUOUS
+ select MFD_SYSCON
+ help
+ Chose this option if you have an ASPEED AST2500 SOC Display
+ Controller (aka GFX).
+
+ If M is selected this module will be called aspeed_gfx.
diff --git a/drivers/gpu/drm/aspeed/Makefile b/drivers/gpu/drm/aspeed/Makefile
new file mode 100644
index 000000000000..6e194cd790d8
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/Makefile
@@ -0,0 +1,3 @@
+aspeed_gfx-y := aspeed_gfx_drv.o aspeed_gfx_crtc.o aspeed_gfx_out.o
+
+obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed_gfx.o
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h
new file mode 100644
index 000000000000..a10358bb61ec
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright 2018 IBM Corporation */
+
+#include <drm/drm_device.h>
+#include <drm/drm_simple_kms_helper.h>
+
+struct aspeed_gfx {
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+ struct regmap *scu;
+
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector connector;
+ struct drm_fbdev_cma *fbdev;
+};
+
+int aspeed_gfx_create_pipe(struct drm_device *drm);
+int aspeed_gfx_create_output(struct drm_device *drm);
+
+#define CRT_CTRL1 0x60 /* CRT Control I */
+#define CRT_CTRL2 0x64 /* CRT Control II */
+#define CRT_STATUS 0x68 /* CRT Status */
+#define CRT_MISC 0x6c /* CRT Misc Setting */
+#define CRT_HORIZ0 0x70 /* CRT Horizontal Total & Display Enable End */
+#define CRT_HORIZ1 0x74 /* CRT Horizontal Retrace Start & End */
+#define CRT_VERT0 0x78 /* CRT Vertical Total & Display Enable End */
+#define CRT_VERT1 0x7C /* CRT Vertical Retrace Start & End */
+#define CRT_ADDR 0x80 /* CRT Display Starting Address */
+#define CRT_OFFSET 0x84 /* CRT Display Offset & Terminal Count */
+#define CRT_THROD 0x88 /* CRT Threshold */
+#define CRT_XSCALE 0x8C /* CRT Scaling-Up Factor */
+#define CRT_CURSOR0 0x90 /* CRT Hardware Cursor X & Y Offset */
+#define CRT_CURSOR1 0x94 /* CRT Hardware Cursor X & Y Position */
+#define CRT_CURSOR2 0x98 /* CRT Hardware Cursor Pattern Address */
+#define CRT_9C 0x9C
+#define CRT_OSD_H 0xA0 /* CRT OSD Horizontal Start/End */
+#define CRT_OSD_V 0xA4 /* CRT OSD Vertical Start/End */
+#define CRT_OSD_ADDR 0xA8 /* CRT OSD Pattern Address */
+#define CRT_OSD_DISP 0xAC /* CRT OSD Offset */
+#define CRT_OSD_THRESH 0xB0 /* CRT OSD Threshold & Alpha */
+#define CRT_B4 0xB4
+#define CRT_STS_V 0xB8 /* CRT Status V */
+#define CRT_SCRATCH 0xBC /* Scratchpad */
+#define CRT_BB0_ADDR 0xD0 /* CRT Display BB0 Starting Address */
+#define CRT_BB1_ADDR 0xD4 /* CRT Display BB1 Starting Address */
+#define CRT_BB_COUNT 0xD8 /* CRT Display BB Terminal Count */
+#define OSD_COLOR1 0xE0 /* OSD Color Palette Index 1 & 0 */
+#define OSD_COLOR2 0xE4 /* OSD Color Palette Index 3 & 2 */
+#define OSD_COLOR3 0xE8 /* OSD Color Palette Index 5 & 4 */
+#define OSD_COLOR4 0xEC /* OSD Color Palette Index 7 & 6 */
+#define OSD_COLOR5 0xF0 /* OSD Color Palette Index 9 & 8 */
+#define OSD_COLOR6 0xF4 /* OSD Color Palette Index 11 & 10 */
+#define OSD_COLOR7 0xF8 /* OSD Color Palette Index 13 & 12 */
+#define OSD_COLOR8 0xFC /* OSD Color Palette Index 15 & 14 */
+
+/* CTRL1 */
+#define CRT_CTRL_EN BIT(0)
+#define CRT_CTRL_HW_CURSOR_EN BIT(1)
+#define CRT_CTRL_OSD_EN BIT(2)
+#define CRT_CTRL_INTERLACED BIT(3)
+#define CRT_CTRL_COLOR_RGB565 (0 << 7)
+#define CRT_CTRL_COLOR_YUV444 (1 << 7)
+#define CRT_CTRL_COLOR_XRGB8888 (2 << 7)
+#define CRT_CTRL_COLOR_RGB888 (3 << 7)
+#define CRT_CTRL_COLOR_YUV444_2RGB (5 << 7)
+#define CRT_CTRL_COLOR_YUV422 (7 << 7)
+#define CRT_CTRL_COLOR_MASK GENMASK(9, 7)
+#define CRT_CTRL_HSYNC_NEGATIVE BIT(16)
+#define CRT_CTRL_VSYNC_NEGATIVE BIT(17)
+#define CRT_CTRL_VERTICAL_INTR_EN BIT(30)
+#define CRT_CTRL_VERTICAL_INTR_STS BIT(31)
+
+/* CTRL2 */
+#define CRT_CTRL_DAC_EN BIT(0)
+#define CRT_CTRL_VBLANK_LINE(x) (((x) << 20) & CRT_CTRL_VBLANK_LINE_MASK)
+#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(20, 31)
+
+/* CRT_HORIZ0 */
+#define CRT_H_TOTAL(x) (x)
+#define CRT_H_DE(x) ((x) << 16)
+
+/* CRT_HORIZ1 */
+#define CRT_H_RS_START(x) (x)
+#define CRT_H_RS_END(x) ((x) << 16)
+
+/* CRT_VIRT0 */
+#define CRT_V_TOTAL(x) (x)
+#define CRT_V_DE(x) ((x) << 16)
+
+/* CRT_VIRT1 */
+#define CRT_V_RS_START(x) (x)
+#define CRT_V_RS_END(x) ((x) << 16)
+
+/* CRT_OFFSET */
+#define CRT_DISP_OFFSET(x) (x)
+#define CRT_TERM_COUNT(x) ((x) << 16)
+
+/* CRT_THROD */
+#define CRT_THROD_LOW(x) (x)
+#define CRT_THROD_HIGH(x) ((x) << 8)
+
+/* Default Threshold Seting */
+#define G5_CRT_THROD_VAL (CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3C))
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
new file mode 100644
index 000000000000..15db9e426ec4
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corporation
+
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "aspeed_gfx.h"
+
+static struct aspeed_gfx *
+drm_pipe_to_aspeed_gfx(struct drm_simple_display_pipe *pipe)
+{
+ return container_of(pipe, struct aspeed_gfx, pipe);
+}
+
+static int aspeed_gfx_set_pixel_fmt(struct aspeed_gfx *priv, u32 *bpp)
+{
+ struct drm_crtc *crtc = &priv->pipe.crtc;
+ struct drm_device *drm = crtc->dev;
+ const u32 format = crtc->primary->state->fb->format->format;
+ u32 ctrl1;
+
+ ctrl1 = readl(priv->base + CRT_CTRL1);
+ ctrl1 &= ~CRT_CTRL_COLOR_MASK;
+
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ dev_dbg(drm->dev, "Setting up RGB565 mode\n");
+ ctrl1 |= CRT_CTRL_COLOR_RGB565;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
+ ctrl1 |= CRT_CTRL_COLOR_XRGB8888;
+ *bpp = 32;
+ break;
+ default:
+ dev_err(drm->dev, "Unhandled pixel format %08x\n", format);
+ return -EINVAL;
+ }
+
+ writel(ctrl1, priv->base + CRT_CTRL1);
+
+ return 0;
+}
+
+static void aspeed_gfx_enable_controller(struct aspeed_gfx *priv)
+{
+ u32 ctrl1 = readl(priv->base + CRT_CTRL1);
+ u32 ctrl2 = readl(priv->base + CRT_CTRL2);
+
+ /* SCU2C: set DAC source for display output to Graphics CRT (GFX) */
+ regmap_update_bits(priv->scu, 0x2c, BIT(16), BIT(16));
+
+ writel(ctrl1 | CRT_CTRL_EN, priv->base + CRT_CTRL1);
+ writel(ctrl2 | CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
+}
+
+static void aspeed_gfx_disable_controller(struct aspeed_gfx *priv)
+{
+ u32 ctrl1 = readl(priv->base + CRT_CTRL1);
+ u32 ctrl2 = readl(priv->base + CRT_CTRL2);
+
+ writel(ctrl1 & ~CRT_CTRL_EN, priv->base + CRT_CTRL1);
+ writel(ctrl2 & ~CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
+
+ regmap_update_bits(priv->scu, 0x2c, BIT(16), 0);
+}
+
+static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
+{
+ struct drm_display_mode *m = &priv->pipe.crtc.state->adjusted_mode;
+ u32 ctrl1, d_offset, t_count, bpp;
+ int err;
+
+ err = aspeed_gfx_set_pixel_fmt(priv, &bpp);
+ if (err)
+ return;
+
+#if 0
+ /* TODO: we have only been able to test with the 40MHz USB clock. The
+ * clock is fixed, so we cannot adjust it here. */
+ clk_set_rate(priv->pixel_clk, m->crtc_clock * 1000);
+#endif
+
+ ctrl1 = readl(priv->base + CRT_CTRL1);
+ ctrl1 &= ~(CRT_CTRL_INTERLACED |
+ CRT_CTRL_HSYNC_NEGATIVE |
+ CRT_CTRL_VSYNC_NEGATIVE);
+
+ if (m->flags & DRM_MODE_FLAG_INTERLACE)
+ ctrl1 |= CRT_CTRL_INTERLACED;
+
+ if (!(m->flags & DRM_MODE_FLAG_PHSYNC))
+ ctrl1 |= CRT_CTRL_HSYNC_NEGATIVE;
+
+ if (!(m->flags & DRM_MODE_FLAG_PVSYNC))
+ ctrl1 |= CRT_CTRL_VSYNC_NEGATIVE;
+
+ writel(ctrl1, priv->base + CRT_CTRL1);
+
+ /* Horizontal timing */
+ writel(CRT_H_TOTAL(m->htotal - 1) | CRT_H_DE(m->hdisplay - 1),
+ priv->base + CRT_HORIZ0);
+ writel(CRT_H_RS_START(m->hsync_start - 1) | CRT_H_RS_END(m->hsync_end),
+ priv->base + CRT_HORIZ1);
+
+
+ /* Vertical timing */
+ writel(CRT_V_TOTAL(m->vtotal - 1) | CRT_V_DE(m->vdisplay - 1),
+ priv->base + CRT_VERT0);
+ writel(CRT_V_RS_START(m->vsync_start) | CRT_V_RS_END(m->vsync_end),
+ priv->base + CRT_VERT1);
+
+ /*
+ * Display Offset: address difference between consecutive scan lines
+ * Terminal Count: memory size of one scan line
+ */
+ d_offset = m->hdisplay * bpp / 8;
+ t_count = (m->hdisplay * bpp + 127) / 128;
+ writel(CRT_DISP_OFFSET(d_offset) | CRT_TERM_COUNT(t_count),
+ priv->base + CRT_OFFSET);
+
+ /*
+ * Threshold: FIFO thresholds of refill and stop (16 byte chunks
+ * per line, rounded up)
+ */
+ writel(G5_CRT_THROD_VAL, priv->base + CRT_THROD);
+}
+
+static void aspeed_gfx_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+
+ aspeed_gfx_crtc_mode_set_nofb(priv);
+ aspeed_gfx_enable_controller(priv);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void aspeed_gfx_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+
+ drm_crtc_vblank_off(crtc);
+ aspeed_gfx_disable_controller(priv);
+}
+
+static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_framebuffer *fb = pipe->plane.state->fb;
+ struct drm_pending_vblank_event *event;
+ struct drm_gem_cma_object *gem;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ if (!fb)
+ return;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!gem)
+ return;
+ writel(gem->paddr, priv->base + CRT_ADDR);
+}
+
+static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ u32 reg = readl(priv->base + CRT_CTRL1);
+
+ /* Clear pending VBLANK IRQ */
+ writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
+
+ reg |= CRT_CTRL_VERTICAL_INTR_EN;
+ writel(reg, priv->base + CRT_CTRL1);
+
+ return 0;
+}
+
+static void aspeed_gfx_disable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ u32 reg = readl(priv->base + CRT_CTRL1);
+
+ reg &= ~CRT_CTRL_VERTICAL_INTR_EN;
+ writel(reg, priv->base + CRT_CTRL1);
+
+ /* Clear pending VBLANK IRQ */
+ writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
+}
+
+static struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
+ .enable = aspeed_gfx_pipe_enable,
+ .disable = aspeed_gfx_pipe_disable,
+ .update = aspeed_gfx_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .enable_vblank = aspeed_gfx_enable_vblank,
+ .disable_vblank = aspeed_gfx_disable_vblank,
+};
+
+static const uint32_t aspeed_gfx_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+};
+
+int aspeed_gfx_create_pipe(struct drm_device *drm)
+{
+ struct aspeed_gfx *priv = drm->dev_private;
+
+ return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs,
+ aspeed_gfx_formats,
+ ARRAY_SIZE(aspeed_gfx_formats),
+ NULL,
+ &priv->connector);
+}
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
new file mode 100644
index 000000000000..eeb22eccd1fc
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corporation
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_drv.h>
+
+#include "aspeed_gfx.h"
+
+/**
+ * DOC: ASPEED GFX Driver
+ *
+ * This driver is for the ASPEED BMC SoC's 'GFX' display hardware, also called
+ * the 'SOC Display Controller' in the datasheet. This driver runs on the ARM
+ * based BMC systems, unlike the ast driver which runs on a host CPU and is for
+ * a PCIe graphics device.
+ *
+ * The AST2500 supports a total of 3 output paths:
+ *
+ * 1. VGA output, the output target can choose either or both to the DAC
+ * or DVO interface.
+ *
+ * 2. Graphics CRT output, the output target can choose either or both to
+ * the DAC or DVO interface.
+ *
+ * 3. Video input from DVO, the video input can be used for video engine
+ * capture or DAC display output.
+ *
+ * Output options are selected in SCU2C.
+ *
+ * The "VGA mode" device is the PCI attached controller. The "Graphics CRT"
+ * is the ARM's internal display controller.
+ *
+ * The driver only supports a simple configuration consisting of a 40MHz
+ * pixel clock, fixed by hardware limitations, and the VGA output path.
+ *
+ * The driver was written with the 'AST2500 Software Programming Guide' v17,
+ * which is available under NDA from ASPEED.
+ */
+
+static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void aspeed_gfx_setup_mode_config(struct drm_device *drm)
+{
+ drm_mode_config_init(drm);
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = 800;
+ drm->mode_config.max_height = 600;
+ drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs;
+}
+
+static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
+{
+ struct drm_device *drm = data;
+ struct aspeed_gfx *priv = drm->dev_private;
+ u32 reg;
+
+ reg = readl(priv->base + CRT_CTRL1);
+
+ if (reg & CRT_CTRL_VERTICAL_INTR_STS) {
+ drm_crtc_handle_vblank(&priv->pipe.crtc);
+ writel(reg, priv->base + CRT_CTRL1);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+
+
+static int aspeed_gfx_load(struct drm_device *drm)
+{
+ struct platform_device *pdev = to_platform_device(drm->dev);
+ struct aspeed_gfx *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ drm->dev_private = priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(drm->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
+ if (IS_ERR(priv->scu)) {
+ dev_err(&pdev->dev, "failed to find SCU regmap\n");
+ return PTR_ERR(priv->scu);
+ }
+
+ ret = of_reserved_mem_device_init(drm->dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to initialize reserved mem: %d\n", ret);
+ return ret;
+ }
+
+ ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ priv->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(&pdev->dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(priv->rst);
+ }
+ reset_control_deassert(priv->rst);
+
+ priv->clk = devm_clk_get(drm->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev,
+ "missing or invalid clk device tree entry");
+ return PTR_ERR(priv->clk);
+ }
+ clk_prepare_enable(priv->clk);
+
+ /* Sanitize control registers */
+ writel(0, priv->base + CRT_CTRL1);
+ writel(0, priv->base + CRT_CTRL2);
+
+ aspeed_gfx_setup_mode_config(drm);
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialise vblank\n");
+ return ret;
+ }
+
+ ret = aspeed_gfx_create_output(drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to create outputs\n");
+ return ret;
+ }
+
+ ret = aspeed_gfx_create_pipe(drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Cannot setup simple display pipe\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(drm->dev, platform_get_irq(pdev, 0),
+ aspeed_gfx_irq_handler, 0, "aspeed gfx", drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to install IRQ handler\n");
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ drm_fbdev_generic_setup(drm, 32);
+
+ return 0;
+}
+
+static void aspeed_gfx_unload(struct drm_device *drm)
+{
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+
+ drm->dev_private = NULL;
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(fops);
+
+static struct drm_driver aspeed_gfx_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_PRIME | DRIVER_ATOMIC,
+ .gem_create_object = drm_cma_gem_create_object_default_funcs,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
+ .fops = &fops,
+ .name = "aspeed-gfx-drm",
+ .desc = "ASPEED GFX DRM",
+ .date = "20180319",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id aspeed_gfx_match[] = {
+ { .compatible = "aspeed,ast2500-gfx" },
+ { }
+};
+
+static int aspeed_gfx_probe(struct platform_device *pdev)
+{
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&aspeed_gfx_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ ret = aspeed_gfx_load(drm);
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_unload;
+
+ return 0;
+
+err_unload:
+ aspeed_gfx_unload(drm);
+err_free:
+ drm_dev_put(drm);
+
+ return ret;
+}
+
+static int aspeed_gfx_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+ aspeed_gfx_unload(drm);
+ drm_dev_put(drm);
+
+ return 0;
+}
+
+static struct platform_driver aspeed_gfx_platform_driver = {
+ .probe = aspeed_gfx_probe,
+ .remove = aspeed_gfx_remove,
+ .driver = {
+ .name = "aspeed_gfx",
+ .of_match_table = aspeed_gfx_match,
+ },
+};
+
+module_platform_driver(aspeed_gfx_platform_driver);
+
+MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
new file mode 100644
index 000000000000..67ee5fa10055
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corporation
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "aspeed_gfx.h"
+
+static int aspeed_gfx_get_modes(struct drm_connector *connector)
+{
+ return drm_add_modes_noedid(connector, 800, 600);
+}
+
+static const struct
+drm_connector_helper_funcs aspeed_gfx_connector_helper_funcs = {
+ .get_modes = aspeed_gfx_get_modes,
+};
+
+static const struct drm_connector_funcs aspeed_gfx_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+int aspeed_gfx_create_output(struct drm_device *drm)
+{
+ struct aspeed_gfx *priv = drm->dev_private;
+ int ret;
+
+ priv->connector.dpms = DRM_MODE_DPMS_OFF;
+ priv->connector.polled = 0;
+ drm_connector_helper_add(&priv->connector,
+ &aspeed_gfx_connector_helper_funcs);
+ ret = drm_connector_init(drm, &priv->connector,
+ &aspeed_gfx_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ return ret;
+}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index bfc65040dfcb..1cf0c75e411d 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -259,7 +259,7 @@ struct ast_framebuffer {
};
struct ast_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct ast_framebuffer afb;
void *sysram;
int size;
@@ -353,8 +353,6 @@ extern int ast_dumb_mmap_offset(struct drm_file *file,
uint32_t handle,
uint64_t *offset);
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
int ast_mm_init(struct ast_private *ast);
void ast_mm_fini(struct ast_private *ast);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 2c9f8dd9733a..e718d0f60d6b 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -217,8 +217,6 @@ static int astfb_create(struct drm_fb_helper *helper,
ret = PTR_ERR(info);
goto out;
}
- info->par = afbdev;
-
ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
if (ret)
goto out;
@@ -229,15 +227,12 @@ static int astfb_create(struct drm_fb_helper *helper,
fb = &afbdev->afb.base;
afbdev->helper.fb = fb;
- strcpy(info->fix.id, "astdrmfb");
-
info->fbops = &astfb_ops;
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &afbdev->helper, sizes);
info->screen_base = sysram;
info->screen_size = size;
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index c168d62fe8f9..75d477b37854 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -178,7 +178,6 @@ int ast_mm_init(struct ast_private *ast)
ret = ttm_bo_device_init(&ast->ttm.bdev,
&ast_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -344,13 +343,8 @@ int ast_bo_push_sysram(struct ast_bo *bo)
int ast_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct ast_private *ast;
+ struct drm_file *file_priv = filp->private_data;
+ struct ast_private *ast = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- ast = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
}
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 03711394f1ed..341cc9d1bab4 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -7,6 +7,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_gem.h>
@@ -69,11 +70,9 @@ struct bochs_device {
struct edid *edid;
/* drm */
- struct drm_device *dev;
- struct drm_crtc crtc;
- struct drm_encoder encoder;
+ struct drm_device *dev;
+ struct drm_simple_display_pipe pipe;
struct drm_connector connector;
- bool mode_config_initialized;
/* ttm */
struct {
@@ -101,8 +100,6 @@ static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem)
return container_of(gem, struct bochs_bo, gem);
}
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
{
return drm_vma_node_offset_addr(&bo->bo.vma_node);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9cd82e3631fb..5e905f50449d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -22,76 +22,55 @@ MODULE_PARM_DESC(defy, "default y resolution");
/* ---------------------------------------------------------------------- */
-static void bochs_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static const uint32_t bochs_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGRX8888,
+};
+
+static void bochs_plane_update(struct bochs_device *bochs,
+ struct drm_plane_state *state)
{
- struct bochs_device *bochs =
- container_of(crtc, struct bochs_device, crtc);
+ struct bochs_bo *bo;
- bochs_hw_setmode(bochs, &crtc->mode);
+ if (!state->fb || !bochs->stride)
+ return;
+
+ bo = gem_to_bochs_bo(state->fb->obj[0]);
+ bochs_hw_setbase(bochs,
+ state->crtc_x,
+ state->crtc_y,
+ bo->bo.offset);
+ bochs_hw_setformat(bochs, state->fb->format);
}
-static void bochs_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
{
+ struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+
+ bochs_hw_setmode(bochs, &crtc_state->mode);
+ bochs_plane_update(bochs, plane_state);
}
-static void bochs_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_pending_vblank_event *event;
+ struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+ struct drm_crtc *crtc = &pipe->crtc;
- if (crtc->state && crtc->state->event) {
- unsigned long irqflags;
+ bochs_plane_update(bochs, pipe->plane.state);
- spin_lock_irqsave(&dev->event_lock, irqflags);
- event = crtc->state->event;
+ if (crtc->state->event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ spin_unlock_irq(&crtc->dev->event_lock);
}
}
-
-/* These provide the minimum set of functions required to handle a CRTC */
-static const struct drm_crtc_funcs bochs_crtc_funcs = {
- .set_config = drm_atomic_helper_set_config,
- .destroy = drm_crtc_cleanup,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
-};
-
-static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
- .mode_set_nofb = bochs_crtc_mode_set_nofb,
- .atomic_enable = bochs_crtc_atomic_enable,
- .atomic_flush = bochs_crtc_atomic_flush,
-};
-
-static const uint32_t bochs_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_BGRX8888,
-};
-
-static void bochs_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct bochs_device *bochs = plane->dev->dev_private;
- struct bochs_bo *bo;
-
- if (!plane->state->fb)
- return;
- bo = gem_to_bochs_bo(plane->state->fb->obj[0]);
- bochs_hw_setbase(bochs,
- plane->state->crtc_x,
- plane->state->crtc_y,
- bo->bo.offset);
- bochs_hw_setformat(bochs, plane->state->fb->format);
-}
-
-static int bochs_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state)
{
struct bochs_bo *bo;
@@ -101,8 +80,8 @@ static int bochs_plane_prepare_fb(struct drm_plane *plane,
return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
}
-static void bochs_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
{
struct bochs_bo *bo;
@@ -112,73 +91,13 @@ static void bochs_plane_cleanup_fb(struct drm_plane *plane,
bochs_bo_unpin(bo);
}
-static const struct drm_plane_helper_funcs bochs_plane_helper_funcs = {
- .atomic_update = bochs_plane_atomic_update,
- .prepare_fb = bochs_plane_prepare_fb,
- .cleanup_fb = bochs_plane_cleanup_fb,
-};
-
-static const struct drm_plane_funcs bochs_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static struct drm_plane *bochs_primary_plane(struct drm_device *dev)
-{
- struct drm_plane *primary;
- int ret;
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- ret = drm_universal_plane_init(dev, primary, 0,
- &bochs_plane_funcs,
- bochs_formats,
- ARRAY_SIZE(bochs_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- return NULL;
- }
-
- drm_plane_helper_add(primary, &bochs_plane_helper_funcs);
- return primary;
-}
-
-static void bochs_crtc_init(struct drm_device *dev)
-{
- struct bochs_device *bochs = dev->dev_private;
- struct drm_crtc *crtc = &bochs->crtc;
- struct drm_plane *primary = bochs_primary_plane(dev);
-
- drm_crtc_init_with_planes(dev, crtc, primary, NULL,
- &bochs_crtc_funcs, NULL);
- drm_crtc_helper_add(crtc, &bochs_helper_funcs);
-}
-
-static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
+static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
+ .enable = bochs_pipe_enable,
+ .update = bochs_pipe_update,
+ .prepare_fb = bochs_pipe_prepare_fb,
+ .cleanup_fb = bochs_pipe_cleanup_fb,
};
-static void bochs_encoder_init(struct drm_device *dev)
-{
- struct bochs_device *bochs = dev->dev_private;
- struct drm_encoder *encoder = &bochs->encoder;
-
- encoder->possible_crtcs = 0x1;
- drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
-}
-
-
static int bochs_connector_get_modes(struct drm_connector *connector)
{
struct bochs_device *bochs =
@@ -214,20 +133,9 @@ static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *con
return MODE_OK;
}
-static struct drm_encoder *
-bochs_connector_best_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
.mode_valid = bochs_connector_mode_valid,
- .best_encoder = bochs_connector_best_encoder,
};
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
@@ -278,7 +186,6 @@ const struct drm_mode_config_funcs bochs_mode_funcs = {
int bochs_kms_init(struct bochs_device *bochs)
{
drm_mode_config_init(bochs->dev);
- bochs->mode_config_initialized = true;
bochs->dev->mode_config.max_width = 8192;
bochs->dev->mode_config.max_height = 8192;
@@ -290,11 +197,14 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
- bochs_crtc_init(bochs->dev);
- bochs_encoder_init(bochs->dev);
bochs_connector_init(bochs->dev);
- drm_connector_attach_encoder(&bochs->connector,
- &bochs->encoder);
+ drm_simple_display_pipe_init(bochs->dev,
+ &bochs->pipe,
+ &bochs_pipe_funcs,
+ bochs_formats,
+ ARRAY_SIZE(bochs_formats),
+ NULL,
+ &bochs->connector);
drm_mode_config_reset(bochs->dev);
@@ -303,8 +213,6 @@ int bochs_kms_init(struct bochs_device *bochs)
void bochs_kms_fini(struct bochs_device *bochs)
{
- if (bochs->mode_config_initialized) {
- drm_mode_config_cleanup(bochs->dev);
- bochs->mode_config_initialized = false;
- }
+ drm_atomic_helper_shutdown(bochs->dev);
+ drm_mode_config_cleanup(bochs->dev);
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 49463348a07a..4a40308169c4 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -156,7 +156,6 @@ int bochs_mm_init(struct bochs_device *bochs)
ret = ttm_bo_device_init(&bochs->ttm.bdev,
&bochs_bo_driver,
bochs->dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -264,14 +263,9 @@ int bochs_bo_unpin(struct bochs_bo *bo)
int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct bochs_device *bochs;
+ struct drm_file *file_priv = filp->private_data;
+ struct bochs_device *bochs = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- bochs = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8840f396a7b6..3dff9997f5e3 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -76,7 +76,6 @@ config DRM_PARADE_PS8622
depends on OF
select DRM_PANEL
select DRM_KMS_HELPER
- select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
---help---
Parade eDP-LVDS bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index ec2ca71e1323..c532e9c9e491 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -748,11 +748,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (mode->vrefresh <= 24000)
+ if (drm_mode_vrefresh(mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (mode->vrefresh <= 25000)
+ else if (drm_mode_vrefresh(mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (mode->vrefresh <= 30000)
+ else if (drm_mode_vrefresh(mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 0805801f4e94..e64736c39a9f 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -234,7 +234,7 @@ static int dumb_vga_remove(struct platform_device *pdev)
*/
static const struct drm_bridge_timings default_dac_timings = {
/* Timing specifications, datasheet page 7 */
- .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
.setup_time_ps = 500,
.hold_time_ps = 1500,
};
@@ -245,7 +245,7 @@ static const struct drm_bridge_timings default_dac_timings = {
*/
static const struct drm_bridge_timings ti_ths8134_dac_timings = {
/* From timing diagram, datasheet page 9 */
- .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
/* From datasheet, page 12 */
.setup_time_ps = 3000,
/* I guess this means latched input */
@@ -258,7 +258,7 @@ static const struct drm_bridge_timings ti_ths8134_dac_timings = {
*/
static const struct drm_bridge_timings ti_ths8135_dac_timings = {
/* From timing diagram, datasheet page 14 */
- .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
/* From datasheet, page 16 */
.setup_time_ps = 2000,
.hold_time_ps = 500,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index a63e5f0dae56..ab7968c8f6a2 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1037,6 +1037,35 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
}
EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write);
+/* Filter out invalid setups to avoid configuring SCDC and scrambling */
+static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
+{
+ struct drm_display_info *display = &hdmi->connector.display_info;
+
+ /* Completely disable SCDC support for older controllers */
+ if (hdmi->version < 0x200a)
+ return false;
+
+ /* Disable if no DDC bus */
+ if (!hdmi->ddc)
+ return false;
+
+ /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
+ if (!display->hdmi.scdc.supported ||
+ !display->hdmi.scdc.scrambling.supported)
+ return false;
+
+ /*
+ * Disable if display only support low TMDS rates and scrambling
+ * for low rates is not supported either
+ */
+ if (!display->hdmi.scdc.scrambling.low_rates &&
+ display->max_tmds_clock <= 340000)
+ return false;
+
+ return true;
+}
+
/*
* HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
* - The Source shall suspend transmission of the TMDS clock and data
@@ -1055,7 +1084,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi)
unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
- if (hdmi->connector.display_info.hdmi.scdc.supported) {
+ if (dw_hdmi_support_scdc(hdmi)) {
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
else
@@ -1579,8 +1608,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
/* Set up HDMI_FC_INVIDCONF */
inv_val = (hdmi->hdmi_data.hdcp_enable ||
- vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
- hdmi_info->scdc.scrambling.low_rates ?
+ (dw_hdmi_support_scdc(hdmi) &&
+ (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
+ hdmi_info->scdc.scrambling.low_rates)) ?
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
@@ -1646,7 +1676,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
}
/* Scrambling Control */
- if (hdmi_info->scdc.supported) {
+ if (dw_hdmi_support_scdc(hdmi)) {
if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
hdmi_info->scdc.scrambling.low_rates) {
/*
@@ -1658,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
* Source Devices compliant shall set the
* Source Version = 1.
*/
- drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION,
+ drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION,
&bytes);
- drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION,
+ drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION,
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
/* Enabled Scrambling in the Sink */
- drm_scdc_set_scrambling(&hdmi->i2c->adap, 1);
+ drm_scdc_set_scrambling(hdmi->ddc, 1);
/*
* To activate the scrambler feature, you must ensure
@@ -1680,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
HDMI_MC_SWRSTZ);
- drm_scdc_set_scrambling(&hdmi->i2c->adap, 0);
+ drm_scdc_set_scrambling(hdmi->ddc, 0);
}
}
@@ -1774,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
* iteration for others.
* The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
* the workaround with a single iteration.
+ * The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
+ * been identified as needing the workaround with a single iteration.
*/
switch (hdmi->version) {
@@ -1782,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
break;
case 0x131a:
case 0x132a:
+ case 0x200a:
case 0x201a:
+ case 0x211a:
case 0x212a:
count = 1;
break;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 888980d4bc74..e570c9dee180 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1222,8 +1222,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
&bus_format, 1);
tc->connector.display_info.bus_flags =
DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_NEGEDGE |
- DRM_BUS_FLAG_SYNC_NEGEDGE;
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 7bfb4f338813..8b0e71bd3ca7 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -27,10 +27,16 @@
struct tfp410 {
struct drm_bridge bridge;
struct drm_connector connector;
+ unsigned int connector_type;
+ u32 bus_format;
struct i2c_adapter *ddc;
struct gpio_desc *hpd;
+ int hpd_irq;
struct delayed_work hpd_work;
+ struct gpio_desc *powerdown;
+
+ struct drm_bridge_timings timings;
struct device *dev;
};
@@ -120,26 +126,47 @@ static int tfp410_attach(struct drm_bridge *bridge)
return -ENODEV;
}
- if (dvi->hpd)
+ if (dvi->hpd_irq >= 0)
dvi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
drm_connector_helper_add(&dvi->connector,
&tfp410_con_helper_funcs);
ret = drm_connector_init(bridge->dev, &dvi->connector,
- &tfp410_con_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ &tfp410_con_funcs, dvi->connector_type);
if (ret) {
dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
return ret;
}
+ drm_display_info_set_bus_formats(&dvi->connector.display_info,
+ &dvi->bus_format, 1);
+
drm_connector_attach_encoder(&dvi->connector,
bridge->encoder);
return 0;
}
+static void tfp410_enable(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ gpiod_set_value_cansleep(dvi->powerdown, 0);
+}
+
+static void tfp410_disable(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ gpiod_set_value_cansleep(dvi->powerdown, 1);
+}
+
static const struct drm_bridge_funcs tfp410_bridge_funcs = {
.attach = tfp410_attach,
+ .enable = tfp410_enable,
+ .disable = tfp410_disable,
};
static void tfp410_hpd_work_func(struct work_struct *work)
@@ -162,6 +189,83 @@ static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
+static const struct drm_bridge_timings tfp410_default_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+ .setup_time_ps = 1200,
+ .hold_time_ps = 1300,
+};
+
+static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c)
+{
+ struct drm_bridge_timings *timings = &dvi->timings;
+ struct device_node *ep;
+ u32 pclk_sample = 0;
+ u32 bus_width = 24;
+ s32 deskew = 0;
+
+ /* Start with defaults. */
+ *timings = tfp410_default_timings;
+
+ if (i2c)
+ /*
+ * In I2C mode timings are configured through the I2C interface.
+ * As the driver doesn't support I2C configuration yet, we just
+ * go with the defaults (BSEL=1, DSEL=1, DKEN=0, EDGE=1).
+ */
+ return 0;
+
+ /*
+ * In non-I2C mode, timings are configured through the BSEL, DSEL, DKEN
+ * and EDGE pins. They are specified in DT through endpoint properties
+ * and vendor-specific properties.
+ */
+ ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 0, 0);
+ if (!ep)
+ return -EINVAL;
+
+ /* Get the sampling edge from the endpoint. */
+ of_property_read_u32(ep, "pclk-sample", &pclk_sample);
+ of_property_read_u32(ep, "bus-width", &bus_width);
+ of_node_put(ep);
+
+ timings->input_bus_flags = DRM_BUS_FLAG_DE_HIGH;
+
+ switch (pclk_sample) {
+ case 0:
+ timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
+ break;
+ case 1:
+ timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (bus_width) {
+ case 12:
+ dvi->bus_format = MEDIA_BUS_FMT_RGB888_2X12_LE;
+ break;
+ case 24:
+ dvi->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Get the setup and hold time from vendor-specific properties. */
+ of_property_read_u32(dvi->dev->of_node, "ti,deskew", (u32 *)&deskew);
+ if (deskew < -4 || deskew > 3)
+ return -EINVAL;
+
+ timings->setup_time_ps = min(0, 1200 - 350 * deskew);
+ timings->hold_time_ps = min(0, 1300 + 350 * deskew);
+
+ return 0;
+}
+
static int tfp410_get_connector_properties(struct tfp410 *dvi)
{
struct device_node *connector_node, *ddc_phandle;
@@ -172,6 +276,11 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi)
if (!connector_node)
return -ENODEV;
+ if (of_device_is_compatible(connector_node, "hdmi-connector"))
+ dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ else
+ dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
+
dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode,
"hpd-gpios", 0, GPIOD_IN, "hpd");
if (IS_ERR(dvi->hpd)) {
@@ -200,7 +309,7 @@ fail:
return ret;
}
-static int tfp410_init(struct device *dev)
+static int tfp410_init(struct device *dev, bool i2c)
{
struct tfp410 *dvi;
int ret;
@@ -217,16 +326,33 @@ static int tfp410_init(struct device *dev)
dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
+ dvi->bridge.timings = &dvi->timings;
dvi->dev = dev;
+ ret = tfp410_parse_timings(dvi, i2c);
+ if (ret)
+ goto fail;
+
ret = tfp410_get_connector_properties(dvi);
if (ret)
goto fail;
- if (dvi->hpd) {
+ dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(dvi->powerdown)) {
+ dev_err(dev, "failed to parse powerdown gpio\n");
+ return PTR_ERR(dvi->powerdown);
+ }
+
+ if (dvi->hpd)
+ dvi->hpd_irq = gpiod_to_irq(dvi->hpd);
+ else
+ dvi->hpd_irq = -ENXIO;
+
+ if (dvi->hpd_irq >= 0) {
INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
- ret = devm_request_threaded_irq(dev, gpiod_to_irq(dvi->hpd),
+ ret = devm_request_threaded_irq(dev, dvi->hpd_irq,
NULL, tfp410_hpd_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi-hpd", dvi);
@@ -264,7 +390,7 @@ static int tfp410_fini(struct device *dev)
static int tfp410_probe(struct platform_device *pdev)
{
- return tfp410_init(&pdev->dev);
+ return tfp410_init(&pdev->dev, false);
}
static int tfp410_remove(struct platform_device *pdev)
@@ -301,7 +427,7 @@ static int tfp410_i2c_probe(struct i2c_client *client,
return -ENXIO;
}
- return tfp410_init(&client->dev);
+ return tfp410_init(&client->dev, true);
}
static int tfp410_i2c_remove(struct i2c_client *client)
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index fc78c90ee931..dd4f52a0bc1c 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -2,7 +2,7 @@ config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_GEM_SHMEM_HELPER
help
This is a KMS driver for emulated cirrus device in qemu.
It is *NOT* intended for real cirrus devices. This requires
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
index 919c0a336c97..acf8971d37a1 100644
--- a/drivers/gpu/drm/cirrus/Makefile
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -1,4 +1 @@
-cirrus-y := cirrus_main.o cirrus_mode.o \
- cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
-
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
new file mode 100644
index 000000000000..be4ea370ba31
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -0,0 +1,657 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2012-2019 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ * Gerd Hoffmann
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <video/cirrus.h>
+#include <video/vga.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#define DRIVER_NAME "cirrus"
+#define DRIVER_DESC "qemu cirrus vga"
+#define DRIVER_DATE "2019"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 0
+
+#define CIRRUS_MAX_PITCH (0x1FF << 3) /* (4096 - 1) & ~111b bytes */
+#define CIRRUS_VRAM_SIZE (4 * 1024 * 1024) /* 4 MB */
+
+struct cirrus_device {
+ struct drm_device dev;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector conn;
+ unsigned int cpp;
+ unsigned int pitch;
+ void __iomem *vram;
+ void __iomem *mmio;
+};
+
+/* ------------------------------------------------------------------ */
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+static u8 rreg_seq(struct cirrus_device *cirrus, u8 reg)
+{
+ iowrite8(reg, cirrus->mmio + SEQ_INDEX);
+ return ioread8(cirrus->mmio + SEQ_DATA);
+}
+
+static void wreg_seq(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+ iowrite8(reg, cirrus->mmio + SEQ_INDEX);
+ iowrite8(val, cirrus->mmio + SEQ_DATA);
+}
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+static u8 rreg_crt(struct cirrus_device *cirrus, u8 reg)
+{
+ iowrite8(reg, cirrus->mmio + CRT_INDEX);
+ return ioread8(cirrus->mmio + CRT_DATA);
+}
+
+static void wreg_crt(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+ iowrite8(reg, cirrus->mmio + CRT_INDEX);
+ iowrite8(val, cirrus->mmio + CRT_DATA);
+}
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+static void wreg_gfx(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+ iowrite8(reg, cirrus->mmio + GFX_INDEX);
+ iowrite8(val, cirrus->mmio + GFX_DATA);
+}
+
+#define VGA_DAC_MASK 0x06
+
+static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
+{
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
+}
+
+static int cirrus_convert_to(struct drm_framebuffer *fb)
+{
+ if (fb->format->cpp[0] == 4 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
+ if (fb->width * 3 <= CIRRUS_MAX_PITCH)
+ /* convert from XR24 to RG24 */
+ return 3;
+ else
+ /* convert from XR24 to RG16 */
+ return 2;
+ }
+ return 0;
+}
+
+static int cirrus_cpp(struct drm_framebuffer *fb)
+{
+ int convert_cpp = cirrus_convert_to(fb);
+
+ if (convert_cpp)
+ return convert_cpp;
+ return fb->format->cpp[0];
+}
+
+static int cirrus_pitch(struct drm_framebuffer *fb)
+{
+ int convert_cpp = cirrus_convert_to(fb);
+
+ if (convert_cpp)
+ return convert_cpp * fb->width;
+ return fb->pitches[0];
+}
+
+static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
+{
+ u32 addr;
+ u8 tmp;
+
+ addr = offset >> 2;
+ wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
+ wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
+
+ tmp = rreg_crt(cirrus, 0x1b);
+ tmp &= 0xf2;
+ tmp |= (addr >> 16) & 0x01;
+ tmp |= (addr >> 15) & 0x0c;
+ wreg_crt(cirrus, 0x1b, tmp);
+
+ tmp = rreg_crt(cirrus, 0x1d);
+ tmp &= 0x7f;
+ tmp |= (addr >> 12) & 0x80;
+ wreg_crt(cirrus, 0x1d, tmp);
+}
+
+static int cirrus_mode_set(struct cirrus_device *cirrus,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb)
+{
+ int hsyncstart, hsyncend, htotal, hdispend;
+ int vtotal, vdispend;
+ int tmp;
+ int sr07 = 0, hdr = 0;
+
+ htotal = mode->htotal / 8;
+ hsyncend = mode->hsync_end / 8;
+ hsyncstart = mode->hsync_start / 8;
+ hdispend = mode->hdisplay / 8;
+
+ vtotal = mode->vtotal;
+ vdispend = mode->vdisplay;
+
+ vdispend -= 1;
+ vtotal -= 2;
+
+ htotal -= 5;
+ hdispend -= 1;
+ hsyncstart += 1;
+ hsyncend += 1;
+
+ wreg_crt(cirrus, VGA_CRTC_V_SYNC_END, 0x20);
+ wreg_crt(cirrus, VGA_CRTC_H_TOTAL, htotal);
+ wreg_crt(cirrus, VGA_CRTC_H_DISP, hdispend);
+ wreg_crt(cirrus, VGA_CRTC_H_SYNC_START, hsyncstart);
+ wreg_crt(cirrus, VGA_CRTC_H_SYNC_END, hsyncend);
+ wreg_crt(cirrus, VGA_CRTC_V_TOTAL, vtotal & 0xff);
+ wreg_crt(cirrus, VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+ tmp = 0x40;
+ if ((vdispend + 1) & 512)
+ tmp |= 0x20;
+ wreg_crt(cirrus, VGA_CRTC_MAX_SCAN, tmp);
+
+ /*
+ * Overflow bits for values that don't fit in the standard registers
+ */
+ tmp = 0x10;
+ if (vtotal & 0x100)
+ tmp |= 0x01;
+ if (vdispend & 0x100)
+ tmp |= 0x02;
+ if ((vdispend + 1) & 0x100)
+ tmp |= 0x08;
+ if (vtotal & 0x200)
+ tmp |= 0x20;
+ if (vdispend & 0x200)
+ tmp |= 0x40;
+ wreg_crt(cirrus, VGA_CRTC_OVERFLOW, tmp);
+
+ tmp = 0;
+
+ /* More overflow bits */
+
+ if ((htotal + 5) & 0x40)
+ tmp |= 0x10;
+ if ((htotal + 5) & 0x80)
+ tmp |= 0x20;
+ if (vtotal & 0x100)
+ tmp |= 0x40;
+ if (vtotal & 0x200)
+ tmp |= 0x80;
+
+ wreg_crt(cirrus, CL_CRT1A, tmp);
+
+ /* Disable Hercules/CGA compatibility */
+ wreg_crt(cirrus, VGA_CRTC_MODE, 0x03);
+
+ sr07 = rreg_seq(cirrus, 0x07);
+ sr07 &= 0xe0;
+ hdr = 0;
+
+ cirrus->cpp = cirrus_cpp(fb);
+ switch (cirrus->cpp * 8) {
+ case 8:
+ sr07 |= 0x11;
+ break;
+ case 16:
+ sr07 |= 0x17;
+ hdr = 0xc1;
+ break;
+ case 24:
+ sr07 |= 0x15;
+ hdr = 0xc5;
+ break;
+ case 32:
+ sr07 |= 0x19;
+ hdr = 0xc5;
+ break;
+ default:
+ return -1;
+ }
+
+ wreg_seq(cirrus, 0x7, sr07);
+
+ /* Program the pitch */
+ cirrus->pitch = cirrus_pitch(fb);
+ tmp = cirrus->pitch / 8;
+ wreg_crt(cirrus, VGA_CRTC_OFFSET, tmp);
+
+ /* Enable extended blanking and pitch bits, and enable full memory */
+ tmp = 0x22;
+ tmp |= (cirrus->pitch >> 7) & 0x10;
+ tmp |= (cirrus->pitch >> 6) & 0x40;
+ wreg_crt(cirrus, 0x1b, tmp);
+
+ /* Enable high-colour modes */
+ wreg_gfx(cirrus, VGA_GFX_MODE, 0x40);
+
+ /* And set graphics mode */
+ wreg_gfx(cirrus, VGA_GFX_MISC, 0x01);
+
+ wreg_hdr(cirrus, hdr);
+
+ cirrus_set_start_address(cirrus, 0);
+
+ /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
+ outb(0x20, 0x3c0);
+ return 0;
+}
+
+static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
+ struct drm_rect *rect)
+{
+ struct cirrus_device *cirrus = fb->dev->dev_private;
+ void *vmap;
+
+ vmap = drm_gem_shmem_vmap(fb->obj[0]);
+ if (!vmap)
+ return -ENOMEM;
+
+ if (cirrus->cpp == fb->format->cpp[0])
+ drm_fb_memcpy_dstclip(cirrus->vram,
+ vmap, fb, rect);
+
+ else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2)
+ drm_fb_xrgb8888_to_rgb565_dstclip(cirrus->vram,
+ cirrus->pitch,
+ vmap, fb, rect, false);
+
+ else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3)
+ drm_fb_xrgb8888_to_rgb888_dstclip(cirrus->vram,
+ cirrus->pitch,
+ vmap, fb, rect);
+
+ else
+ WARN_ON_ONCE("cpp mismatch");
+
+ drm_gem_shmem_vunmap(fb->obj[0], vmap);
+ return 0;
+}
+
+static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
+{
+ struct drm_rect fullscreen = {
+ .x1 = 0,
+ .x2 = fb->width,
+ .y1 = 0,
+ .y2 = fb->height,
+ };
+ return cirrus_fb_blit_rect(fb, &fullscreen);
+}
+
+static int cirrus_check_size(int width, int height,
+ struct drm_framebuffer *fb)
+{
+ int pitch = width * 2;
+
+ if (fb)
+ pitch = cirrus_pitch(fb);
+
+ if (pitch > CIRRUS_MAX_PITCH)
+ return -EINVAL;
+ if (pitch * height > CIRRUS_VRAM_SIZE)
+ return -EINVAL;
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus connector */
+
+static int cirrus_conn_get_modes(struct drm_connector *conn)
+{
+ int count;
+
+ count = drm_add_modes_noedid(conn,
+ conn->dev->mode_config.max_width,
+ conn->dev->mode_config.max_height);
+ drm_set_preferred_mode(conn, 1024, 768);
+ return count;
+}
+
+static const struct drm_connector_helper_funcs cirrus_conn_helper_funcs = {
+ .get_modes = cirrus_conn_get_modes,
+};
+
+static const struct drm_connector_funcs cirrus_conn_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int cirrus_conn_init(struct cirrus_device *cirrus)
+{
+ drm_connector_helper_add(&cirrus->conn, &cirrus_conn_helper_funcs);
+ return drm_connector_init(&cirrus->dev, &cirrus->conn,
+ &cirrus_conn_funcs, DRM_MODE_CONNECTOR_VGA);
+
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus (simple) display pipe */
+
+static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
+ return MODE_BAD;
+ return MODE_OK;
+}
+
+static int cirrus_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+
+ if (!fb)
+ return 0;
+ return cirrus_check_size(fb->width, fb->height, fb);
+}
+
+static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+
+ cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
+ cirrus_fb_blit_fullscreen(plane_state->fb);
+}
+
+static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+ struct drm_plane_state *state = pipe->plane.state;
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_rect rect;
+
+ if (pipe->plane.state->fb &&
+ cirrus->cpp != cirrus_cpp(pipe->plane.state->fb))
+ cirrus_mode_set(cirrus, &crtc->mode,
+ pipe->plane.state->fb);
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+ cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
+
+ if (crtc->state->event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
+ .mode_valid = cirrus_pipe_mode_valid,
+ .check = cirrus_pipe_check,
+ .enable = cirrus_pipe_enable,
+ .update = cirrus_pipe_update,
+};
+
+static const uint32_t cirrus_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t cirrus_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int cirrus_pipe_init(struct cirrus_device *cirrus)
+{
+ return drm_simple_display_pipe_init(&cirrus->dev,
+ &cirrus->pipe,
+ &cirrus_pipe_funcs,
+ cirrus_formats,
+ ARRAY_SIZE(cirrus_formats),
+ cirrus_modifiers,
+ &cirrus->conn);
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus framebuffers & mode config */
+
+static struct drm_framebuffer*
+cirrus_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ if (mode_cmd->pixel_format != DRM_FORMAT_RGB565 &&
+ mode_cmd->pixel_format != DRM_FORMAT_RGB888 &&
+ mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
+ return ERR_PTR(-EINVAL);
+ if (cirrus_check_size(mode_cmd->width, mode_cmd->height, NULL) < 0)
+ return ERR_PTR(-EINVAL);
+ return drm_gem_fb_create_with_dirty(dev, file_priv, mode_cmd);
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
+ .fb_create = cirrus_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void cirrus_mode_config_init(struct cirrus_device *cirrus)
+{
+ struct drm_device *dev = &cirrus->dev;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
+ dev->mode_config.max_height = 1024;
+ dev->mode_config.preferred_depth = 16;
+ dev->mode_config.prefer_shadow = 0;
+ dev->mode_config.funcs = &cirrus_mode_config_funcs;
+}
+
+/* ------------------------------------------------------------------ */
+
+DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops);
+
+static struct drm_driver cirrus_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_PRIME,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .fops = &cirrus_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+static int cirrus_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct drm_device *dev;
+ struct cirrus_device *cirrus;
+ int ret;
+
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
+ if (ret)
+ return ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pdev, DRIVER_NAME);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
+ cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL);
+ if (cirrus == NULL)
+ goto err_pci_release;
+
+ dev = &cirrus->dev;
+ ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev);
+ if (ret)
+ goto err_free_cirrus;
+ dev->dev_private = cirrus;
+
+ ret = -ENOMEM;
+ cirrus->vram = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (cirrus->vram == NULL)
+ goto err_dev_put;
+
+ cirrus->mmio = ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (cirrus->mmio == NULL)
+ goto err_unmap_vram;
+
+ cirrus_mode_config_init(cirrus);
+
+ ret = cirrus_conn_init(cirrus);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = cirrus_pipe_init(cirrus);
+ if (ret < 0)
+ goto err_cleanup;
+
+ drm_mode_config_reset(dev);
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_cleanup;
+
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+ return 0;
+
+err_cleanup:
+ drm_mode_config_cleanup(dev);
+ iounmap(cirrus->mmio);
+err_unmap_vram:
+ iounmap(cirrus->vram);
+err_dev_put:
+ drm_dev_put(dev);
+err_free_cirrus:
+ kfree(cirrus);
+err_pci_release:
+ pci_release_regions(pdev);
+ return ret;
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct cirrus_device *cirrus = dev->dev_private;
+
+ drm_dev_unregister(dev);
+ drm_mode_config_cleanup(dev);
+ iounmap(cirrus->mmio);
+ iounmap(cirrus->vram);
+ drm_dev_put(dev);
+ kfree(cirrus);
+ pci_release_regions(pdev);
+}
+
+static const struct pci_device_id pciidlist[] = {
+ {
+ .vendor = PCI_VENDOR_ID_CIRRUS,
+ .device = PCI_DEVICE_ID_CIRRUS_5446,
+ /* only bind to the cirrus chip in qemu */
+ .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+ .subdevice = PCI_SUBDEVICE_ID_QEMU,
+ }, {
+ .vendor = PCI_VENDOR_ID_CIRRUS,
+ .device = PCI_DEVICE_ID_CIRRUS_5446,
+ .subvendor = PCI_VENDOR_ID_XEN,
+ .subdevice = 0x0001,
+ },
+ { /* end if list */ }
+};
+
+static struct pci_driver cirrus_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = cirrus_pci_probe,
+ .remove = cirrus_pci_remove,
+};
+
+static int __init cirrus_init(void)
+{
+ if (vgacon_text_force())
+ return -EINVAL;
+ return pci_register_driver(&cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+ pci_unregister_driver(&cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
deleted file mode 100644
index 8ec880f3a322..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2012 Red Hat <mjg@redhat.com>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#include <linux/module.h>
-#include <linux/console.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include "cirrus_drv.h"
-
-int cirrus_modeset = -1;
-int cirrus_bpp = 16;
-
-MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
-module_param_named(modeset, cirrus_modeset, int, 0400);
-MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:16)");
-module_param_named(bpp, cirrus_bpp, int, 0400);
-
-/*
- * This is the generic driver code. This binds the driver to the drm core,
- * which then performs further device association and calls our graphics init
- * functions
- */
-
-static struct drm_driver driver;
-
-/* only bind to the cirrus chip in qemu */
-static const struct pci_device_id pciidlist[] = {
- { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
- PCI_SUBVENDOR_ID_REDHAT_QUMRANET, PCI_SUBDEVICE_ID_QEMU,
- 0, 0, 0 },
- { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
- 0x0001, 0, 0, 0 },
- {0,}
-};
-
-
-static int cirrus_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int ret;
-
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
- if (ret)
- return ret;
-
- return drm_get_pci_dev(pdev, ent, &driver);
-}
-
-static void cirrus_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_put_dev(dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int cirrus_pm_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct cirrus_device *cdev = drm_dev->dev_private;
-
- drm_kms_helper_poll_disable(drm_dev);
-
- if (cdev->mode_info.gfbdev) {
- console_lock();
- drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1);
- console_unlock();
- }
-
- return 0;
-}
-
-static int cirrus_pm_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct cirrus_device *cdev = drm_dev->dev_private;
-
- drm_helper_resume_force_mode(drm_dev);
-
- if (cdev->mode_info.gfbdev) {
- console_lock();
- drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0);
- console_unlock();
- }
-
- drm_kms_helper_poll_enable(drm_dev);
- return 0;
-}
-#endif
-
-static const struct file_operations cirrus_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = cirrus_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
-};
-static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
- .load = cirrus_driver_load,
- .unload = cirrus_driver_unload,
- .fops = &cirrus_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object_unlocked = cirrus_gem_free_object,
- .dumb_create = cirrus_dumb_create,
- .dumb_map_offset = cirrus_dumb_mmap_offset,
-};
-
-static const struct dev_pm_ops cirrus_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend,
- cirrus_pm_resume)
-};
-
-static struct pci_driver cirrus_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = cirrus_pci_probe,
- .remove = cirrus_pci_remove,
- .driver.pm = &cirrus_pm_ops,
-};
-
-static int __init cirrus_init(void)
-{
- if (vgacon_text_force() && cirrus_modeset == -1)
- return -EINVAL;
-
- if (cirrus_modeset == 0)
- return -EINVAL;
- return pci_register_driver(&cirrus_pci_driver);
-}
-
-static void __exit cirrus_exit(void)
-{
- pci_unregister_driver(&cirrus_pci_driver);
-}
-
-module_init(cirrus_init);
-module_exit(cirrus_exit);
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index f2b2e0d169fa..1bd816be3aae 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -101,7 +101,6 @@ struct cirrus_crtc {
struct cirrus_fbdev;
struct cirrus_mode_info {
- bool mode_config_initialized;
struct cirrus_crtc *crtc;
/* pointer to fbdev info structure */
struct cirrus_fbdev *gfbdev;
@@ -143,7 +142,7 @@ struct cirrus_device {
struct cirrus_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct drm_framebuffer *gfb;
void *sysram;
int size;
@@ -169,7 +168,6 @@ cirrus_bo(struct ttm_buffer_object *bo)
#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
/* cirrus_main.c */
int cirrus_device_init(struct cirrus_device *cdev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
deleted file mode 100644
index 39df62acac69..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright 2012 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/drm_util.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "cirrus_drv.h"
-
-static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
- int x, int y, int width, int height)
-{
- int i;
- struct drm_gem_object *obj;
- struct cirrus_bo *bo;
- int src_offset, dst_offset;
- int bpp = afbdev->gfb->format->cpp[0];
- int ret = -EBUSY;
- bool unmap = false;
- bool store_for_later = false;
- int x2, y2;
- unsigned long flags;
-
- obj = afbdev->gfb->obj[0];
- bo = gem_to_cirrus_bo(obj);
-
- /*
- * try and reserve the BO, if we fail with busy
- * then the BO is being moved and we should
- * store up the damage until later.
- */
- if (drm_can_sleep())
- ret = cirrus_bo_reserve(bo, true);
- if (ret) {
- if (ret != -EBUSY)
- return;
- store_for_later = true;
- }
-
- x2 = x + width - 1;
- y2 = y + height - 1;
- spin_lock_irqsave(&afbdev->dirty_lock, flags);
-
- if (afbdev->y1 < y)
- y = afbdev->y1;
- if (afbdev->y2 > y2)
- y2 = afbdev->y2;
- if (afbdev->x1 < x)
- x = afbdev->x1;
- if (afbdev->x2 > x2)
- x2 = afbdev->x2;
-
- if (store_for_later) {
- afbdev->x1 = x;
- afbdev->x2 = x2;
- afbdev->y1 = y;
- afbdev->y2 = y2;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
- return;
- }
-
- afbdev->x1 = afbdev->y1 = INT_MAX;
- afbdev->x2 = afbdev->y2 = 0;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
-
- if (!bo->kmap.virtual) {
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret) {
- DRM_ERROR("failed to kmap fb updates\n");
- cirrus_bo_unreserve(bo);
- return;
- }
- unmap = true;
- }
- for (i = y; i < y + height; i++) {
- /* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->gfb->pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
-
- }
- if (unmap)
- ttm_bo_kunmap(&bo->kmap);
-
- cirrus_bo_unreserve(bo);
-}
-
-static void cirrus_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct cirrus_fbdev *afbdev = info->par;
- drm_fb_helper_sys_fillrect(info, rect);
- cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
- rect->height);
-}
-
-static void cirrus_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct cirrus_fbdev *afbdev = info->par;
- drm_fb_helper_sys_copyarea(info, area);
- cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
- area->height);
-}
-
-static void cirrus_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct cirrus_fbdev *afbdev = info->par;
- drm_fb_helper_sys_imageblit(info, image);
- cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
- image->height);
-}
-
-
-static struct fb_ops cirrusfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cirrus_fillrect,
- .fb_copyarea = cirrus_copyarea,
- .fb_imageblit = cirrus_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct drm_device *dev = afbdev->helper.dev;
- struct cirrus_device *cdev = dev->dev_private;
- u32 bpp;
- u32 size;
- struct drm_gem_object *gobj;
- int ret = 0;
-
- bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
-
- if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
- bpp, mode_cmd->pitches[0]))
- return -EINVAL;
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- ret = cirrus_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
- return ret;
-}
-
-static int cirrusfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct cirrus_fbdev *gfbdev =
- container_of(helper, struct cirrus_fbdev, helper);
- struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
- void *sysram;
- struct drm_gem_object *gobj = NULL;
- int size, ret;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- size = mode_cmd.pitches[0] * mode_cmd.height;
-
- ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- sysram = vmalloc(size);
- if (!sysram)
- return -ENOMEM;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_vfree;
- }
-
- info->par = gfbdev;
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb) {
- ret = -ENOMEM;
- goto err_drm_gem_object_put_unlocked;
- }
-
- ret = cirrus_framebuffer_init(cdev->dev, fb, &mode_cmd, gobj);
- if (ret)
- goto err_kfree;
-
- gfbdev->sysram = sysram;
- gfbdev->size = size;
- gfbdev->gfb = fb;
-
- /* setup helper */
- gfbdev->helper.fb = fb;
-
- strcpy(info->fix.id, "cirrusdrmfb");
-
- info->fbops = &cirrusfb_ops;
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
- sizes->fb_height);
-
- /* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
- info->apertures->ranges[0].size = cdev->mc.vram_size;
-
- info->fix.smem_start = cdev->dev->mode_config.fb_base;
- info->fix.smem_len = cdev->mc.vram_size;
-
- info->screen_base = sysram;
- info->screen_size = size;
-
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
-
- DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
- DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
- DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
- DRM_INFO("fb depth is %d\n", fb->format->depth);
- DRM_INFO(" pitch is %d\n", fb->pitches[0]);
-
- return 0;
-
-err_kfree:
- kfree(fb);
-err_drm_gem_object_put_unlocked:
- drm_gem_object_put_unlocked(gobj);
-err_vfree:
- vfree(sysram);
- return ret;
-}
-
-static int cirrus_fbdev_destroy(struct drm_device *dev,
- struct cirrus_fbdev *gfbdev)
-{
- struct drm_framebuffer *gfb = gfbdev->gfb;
-
- drm_helper_force_disable_all(dev);
-
- drm_fb_helper_unregister_fbi(&gfbdev->helper);
-
- vfree(gfbdev->sysram);
- drm_fb_helper_fini(&gfbdev->helper);
- if (gfb)
- drm_framebuffer_put(gfb);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
- .fb_probe = cirrusfb_create,
-};
-
-int cirrus_fbdev_init(struct cirrus_device *cdev)
-{
- struct cirrus_fbdev *gfbdev;
- int ret;
-
- /*bpp_sel = 8;*/
- gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
- if (!gfbdev)
- return -ENOMEM;
-
- cdev->mode_info.gfbdev = gfbdev;
- spin_lock_init(&gfbdev->dirty_lock);
-
- drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
- &cirrus_fb_helper_funcs);
-
- ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
- CIRRUSFB_CONN_LIMIT);
- if (ret)
- return ret;
-
- ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
- if (ret)
- return ret;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(cdev->dev);
-
- return drm_fb_helper_initial_config(&gfbdev->helper, cirrus_bpp);
-}
-
-void cirrus_fbdev_fini(struct cirrus_device *cdev)
-{
- if (!cdev->mode_info.gfbdev)
- return;
-
- cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
- kfree(cdev->mode_info.gfbdev);
- cdev->mode_info.gfbdev = NULL;
-}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
deleted file mode 100644
index 57f8fe6d020b..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright 2012 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
-#include "cirrus_drv.h"
-
-static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
- .create_handle = drm_gem_fb_create_handle,
- .destroy = drm_gem_fb_destroy,
-};
-
-int cirrus_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
- gfb->obj[0] = obj;
- ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
- if (ret) {
- DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static struct drm_framebuffer *
-cirrus_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct cirrus_device *cdev = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
- u32 bpp;
- int ret;
-
- bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
-
- if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
- bpp, mode_cmd->pitches[0]))
- return ERR_PTR(-EINVAL);
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_put_unlocked(obj);
- kfree(fb);
- return ERR_PTR(ret);
- }
- return fb;
-}
-
-static const struct drm_mode_config_funcs cirrus_mode_funcs = {
- .fb_create = cirrus_user_framebuffer_create,
-};
-
-/* Unmap the framebuffer from the core and release the memory */
-static void cirrus_vram_fini(struct cirrus_device *cdev)
-{
- iounmap(cdev->rmmio);
- cdev->rmmio = NULL;
- if (cdev->mc.vram_base)
- release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
-}
-
-/* Map the framebuffer from the card and configure the core */
-static int cirrus_vram_init(struct cirrus_device *cdev)
-{
- /* BAR 0 is VRAM */
- cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
- cdev->mc.vram_size = pci_resource_len(cdev->dev->pdev, 0);
-
- if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
- "cirrusdrmfb_vram")) {
- DRM_ERROR("can't reserve VRAM\n");
- return -ENXIO;
- }
-
- return 0;
-}
-
-/*
- * Our emulated hardware has two sets of memory. One is video RAM and can
- * simply be used as a linear framebuffer - the other provides mmio access
- * to the display registers. The latter can also be accessed via IO port
- * access, but we map the range and use mmio to program them instead
- */
-
-int cirrus_device_init(struct cirrus_device *cdev,
- struct drm_device *ddev,
- struct pci_dev *pdev, uint32_t flags)
-{
- int ret;
-
- cdev->dev = ddev;
- cdev->flags = flags;
-
- /* Hardcode the number of CRTCs to 1 */
- cdev->num_crtc = 1;
-
- /* BAR 0 is the framebuffer, BAR 1 contains registers */
- cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
- cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
-
- if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
- "cirrusdrmfb_mmio")) {
- DRM_ERROR("can't reserve mmio registers\n");
- return -ENOMEM;
- }
-
- cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
-
- if (cdev->rmmio == NULL)
- return -ENOMEM;
-
- ret = cirrus_vram_init(cdev);
- if (ret) {
- release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
- return ret;
- }
-
- return 0;
-}
-
-void cirrus_device_fini(struct cirrus_device *cdev)
-{
- release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
- cirrus_vram_fini(cdev);
-}
-
-/*
- * Functions here will be called by the core once it's bound the driver to
- * a PCI device
- */
-
-int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct cirrus_device *cdev;
- int r;
-
- cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
- if (cdev == NULL)
- return -ENOMEM;
- dev->dev_private = (void *)cdev;
-
- r = cirrus_device_init(cdev, dev, dev->pdev, flags);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
- goto out;
- }
-
- r = cirrus_mm_init(cdev);
- if (r) {
- dev_err(&dev->pdev->dev, "fatal err on mm init\n");
- goto out;
- }
-
- /*
- * cirrus_modeset_init() is initializing/registering the emulated fbdev
- * and DRM internals can access/test some of the fields in
- * mode_config->funcs as part of the fbdev registration process.
- * Make sure dev->mode_config.funcs is properly set to avoid
- * dereferencing a NULL pointer.
- * FIXME: mode_config.funcs assignment should probably be done in
- * cirrus_modeset_init() (that's a common pattern seen in other DRM
- * drivers).
- */
- dev->mode_config.funcs = &cirrus_mode_funcs;
- r = cirrus_modeset_init(cdev);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
- goto out;
- }
-
- return 0;
-out:
- cirrus_driver_unload(dev);
- return r;
-}
-
-void cirrus_driver_unload(struct drm_device *dev)
-{
- struct cirrus_device *cdev = dev->dev_private;
-
- if (cdev == NULL)
- return;
- cirrus_modeset_fini(cdev);
- cirrus_mm_fini(cdev);
- cirrus_device_fini(cdev);
- kfree(cdev);
- dev->dev_private = NULL;
-}
-
-int cirrus_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct cirrus_bo *cirrusbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
- *obj = &cirrusbo->gem;
- return 0;
-}
-
-int cirrus_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- int ret;
- struct drm_gem_object *gobj;
- u32 handle;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = cirrus_gem_create(dev, args->size, false,
- &gobj);
- if (ret)
- return ret;
-
- ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_put_unlocked(gobj);
- if (ret)
- return ret;
-
- args->handle = handle;
- return 0;
-}
-
-static void cirrus_bo_unref(struct cirrus_bo **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
-
- tbo = &((*bo)->bo);
- ttm_bo_put(tbo);
- *bo = NULL;
-}
-
-void cirrus_gem_free_object(struct drm_gem_object *obj)
-{
- struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
-
- cirrus_bo_unref(&cirrus_bo);
-}
-
-
-static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
-{
- return drm_vma_node_offset_addr(&bo->bo.vma_node);
-}
-
-int
-cirrus_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- struct cirrus_bo *bo;
-
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL)
- return -ENOENT;
-
- bo = gem_to_cirrus_bo(obj);
- *offset = cirrus_bo_mmap_offset(bo);
-
- drm_gem_object_put_unlocked(obj);
-
- return 0;
-}
-
-bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
- int bpp, int pitch)
-{
- const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
- const int max_size = cdev->mc.vram_size;
-
- if (bpp > cirrus_bpp)
- return false;
- if (bpp > 32)
- return false;
-
- if (pitch > max_pitch)
- return false;
-
- if (pitch * height > max_size)
- return false;
-
- return true;
-}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
deleted file mode 100644
index 7f9bc32af685..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ /dev/null
@@ -1,621 +0,0 @@
-
-/*
- * Copyright 2012 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- *
- * Portions of this code derived from cirrusfb.c:
- * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
- *
- * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
- */
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include <video/cirrus.h>
-
-#include "cirrus_drv.h"
-
-#define CIRRUS_LUT_SIZE 256
-
-#define PALETTE_INDEX 0x8
-#define PALETTE_DATA 0x9
-
-/*
- * This file contains setup code for the CRTC.
- */
-
-/*
- * The DRM core requires DPMS functions, but they make little sense in our
- * case and so are just stubs
- */
-
-static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- u8 sr01, gr0e;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- sr01 = 0x00;
- gr0e = 0x00;
- break;
- case DRM_MODE_DPMS_STANDBY:
- sr01 = 0x20;
- gr0e = 0x02;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- sr01 = 0x20;
- gr0e = 0x04;
- break;
- case DRM_MODE_DPMS_OFF:
- sr01 = 0x20;
- gr0e = 0x06;
- break;
- default:
- return;
- }
-
- WREG8(SEQ_INDEX, 0x1);
- sr01 |= RREG8(SEQ_DATA) & ~0x20;
- WREG_SEQ(0x1, sr01);
-
- WREG8(GFX_INDEX, 0xe);
- gr0e |= RREG8(GFX_DATA) & ~0x06;
- WREG_GFX(0xe, gr0e);
-}
-
-static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
-{
- struct cirrus_device *cdev = crtc->dev->dev_private;
- u32 addr;
- u8 tmp;
-
- addr = offset >> 2;
- WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
- WREG_CRT(0x0d, (u8)(addr & 0xff));
-
- WREG8(CRT_INDEX, 0x1b);
- tmp = RREG8(CRT_DATA);
- tmp &= 0xf2;
- tmp |= (addr >> 16) & 0x01;
- tmp |= (addr >> 15) & 0x0c;
- WREG_CRT(0x1b, tmp);
- WREG8(CRT_INDEX, 0x1d);
- tmp = RREG8(CRT_DATA);
- tmp &= 0x7f;
- tmp |= (addr >> 12) & 0x80;
- WREG_CRT(0x1d, tmp);
-}
-
-/* cirrus is different - we will force move buffers out of VRAM */
-static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct cirrus_device *cdev = crtc->dev->dev_private;
- struct cirrus_bo *bo;
- int ret;
- u64 gpu_addr;
-
- /* push the previous fb to system ram */
- if (!atomic && fb) {
- bo = gem_to_cirrus_bo(fb->obj[0]);
- ret = cirrus_bo_reserve(bo, false);
- if (ret)
- return ret;
- cirrus_bo_push_sysram(bo);
- cirrus_bo_unreserve(bo);
- }
-
- bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
-
- ret = cirrus_bo_reserve(bo, false);
- if (ret)
- return ret;
-
- ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- if (ret) {
- cirrus_bo_unreserve(bo);
- return ret;
- }
-
- if (cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
- /* if pushing console in kmap it */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret)
- DRM_ERROR("failed to kmap fbcon\n");
- }
- cirrus_bo_unreserve(bo);
-
- cirrus_set_start_address(crtc, (u32)gpu_addr);
- return 0;
-}
-
-static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-/*
- * The meat of this driver. The core passes us a mode and we have to program
- * it. The modesetting here is the bare minimum required to satisfy the qemu
- * emulation of this hardware, and running this against a real device is
- * likely to result in an inadequately programmed mode. We've already had
- * the opportunity to modify the mode, so whatever we receive here should
- * be something that can be correctly programmed and displayed
- */
-static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
- int hsyncstart, hsyncend, htotal, hdispend;
- int vtotal, vdispend;
- int tmp;
- int sr07 = 0, hdr = 0;
-
- htotal = mode->htotal / 8;
- hsyncend = mode->hsync_end / 8;
- hsyncstart = mode->hsync_start / 8;
- hdispend = mode->hdisplay / 8;
-
- vtotal = mode->vtotal;
- vdispend = mode->vdisplay;
-
- vdispend -= 1;
- vtotal -= 2;
-
- htotal -= 5;
- hdispend -= 1;
- hsyncstart += 1;
- hsyncend += 1;
-
- WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
- WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
- WREG_CRT(VGA_CRTC_H_DISP, hdispend);
- WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
- WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
- WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
- WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
-
- tmp = 0x40;
- if ((vdispend + 1) & 512)
- tmp |= 0x20;
- WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
-
- /*
- * Overflow bits for values that don't fit in the standard registers
- */
- tmp = 16;
- if (vtotal & 256)
- tmp |= 1;
- if (vdispend & 256)
- tmp |= 2;
- if ((vdispend + 1) & 256)
- tmp |= 8;
- if (vtotal & 512)
- tmp |= 32;
- if (vdispend & 512)
- tmp |= 64;
- WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
-
- tmp = 0;
-
- /* More overflow bits */
-
- if ((htotal + 5) & 64)
- tmp |= 16;
- if ((htotal + 5) & 128)
- tmp |= 32;
- if (vtotal & 256)
- tmp |= 64;
- if (vtotal & 512)
- tmp |= 128;
-
- WREG_CRT(CL_CRT1A, tmp);
-
- /* Disable Hercules/CGA compatibility */
- WREG_CRT(VGA_CRTC_MODE, 0x03);
-
- WREG8(SEQ_INDEX, 0x7);
- sr07 = RREG8(SEQ_DATA);
- sr07 &= 0xe0;
- hdr = 0;
- switch (fb->format->cpp[0] * 8) {
- case 8:
- sr07 |= 0x11;
- break;
- case 16:
- sr07 |= 0x17;
- hdr = 0xc1;
- break;
- case 24:
- sr07 |= 0x15;
- hdr = 0xc5;
- break;
- case 32:
- sr07 |= 0x19;
- hdr = 0xc5;
- break;
- default:
- return -1;
- }
-
- WREG_SEQ(0x7, sr07);
-
- /* Program the pitch */
- tmp = fb->pitches[0] / 8;
- WREG_CRT(VGA_CRTC_OFFSET, tmp);
-
- /* Enable extended blanking and pitch bits, and enable full memory */
- tmp = 0x22;
- tmp |= (fb->pitches[0] >> 7) & 0x10;
- tmp |= (fb->pitches[0] >> 6) & 0x40;
- WREG_CRT(0x1b, tmp);
-
- /* Enable high-colour modes */
- WREG_GFX(VGA_GFX_MODE, 0x40);
-
- /* And set graphics mode */
- WREG_GFX(VGA_GFX_MISC, 0x01);
-
- WREG_HDR(hdr);
- cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
-
- /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
- outb(0x20, 0x3c0);
- return 0;
-}
-
-/*
- * This is called before a mode is programmed. A typical use might be to
- * enable DPMS during the programming to avoid seeing intermediate stages,
- * but that's not relevant to us
- */
-static void cirrus_crtc_prepare(struct drm_crtc *crtc)
-{
-}
-
-static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- u16 *r, *g, *b;
- int i;
-
- if (!crtc->enabled)
- return;
-
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
-
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
- /* VGA registers */
- WREG8(PALETTE_INDEX, i);
- WREG8(PALETTE_DATA, *r++ >> 8);
- WREG8(PALETTE_DATA, *g++ >> 8);
- WREG8(PALETTE_DATA, *b++ >> 8);
- }
-}
-
-/*
- * This is called after a mode is programmed. It should reverse anything done
- * by the prepare function
- */
-static void cirrus_crtc_commit(struct drm_crtc *crtc)
-{
- cirrus_crtc_load_lut(crtc);
-}
-
-/*
- * The core can pass us a set of gamma values to program. We actually only
- * use this for 8-bit mode so can't perform smooth fades on deeper modes,
- * but it's a requirement that we provide the function
- */
-static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- cirrus_crtc_load_lut(crtc);
-
- return 0;
-}
-
-/* Simple cleanup function */
-static void cirrus_crtc_destroy(struct drm_crtc *crtc)
-{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(cirrus_crtc);
-}
-
-/* These provide the minimum set of functions required to handle a CRTC */
-static const struct drm_crtc_funcs cirrus_crtc_funcs = {
- .gamma_set = cirrus_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = cirrus_crtc_destroy,
-};
-
-static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
- .dpms = cirrus_crtc_dpms,
- .mode_set = cirrus_crtc_mode_set,
- .mode_set_base = cirrus_crtc_mode_set_base,
- .prepare = cirrus_crtc_prepare,
- .commit = cirrus_crtc_commit,
-};
-
-/* CRTC setup */
-static const uint32_t cirrus_formats_16[] = {
- DRM_FORMAT_RGB565,
-};
-
-static const uint32_t cirrus_formats_24[] = {
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
-};
-
-static const uint32_t cirrus_formats_32[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
-};
-
-static struct drm_plane *cirrus_primary_plane(struct drm_device *dev)
-{
- const uint32_t *formats;
- uint32_t nformats;
- struct drm_plane *primary;
- int ret;
-
- switch (cirrus_bpp) {
- case 16:
- formats = cirrus_formats_16;
- nformats = ARRAY_SIZE(cirrus_formats_16);
- break;
- case 24:
- formats = cirrus_formats_24;
- nformats = ARRAY_SIZE(cirrus_formats_24);
- break;
- case 32:
- formats = cirrus_formats_32;
- nformats = ARRAY_SIZE(cirrus_formats_32);
- break;
- default:
- return NULL;
- }
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- ret = drm_universal_plane_init(dev, primary, 0,
- &drm_primary_helper_funcs,
- formats, nformats,
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- primary = NULL;
- }
-
- return primary;
-}
-
-static void cirrus_crtc_init(struct drm_device *dev)
-{
- struct cirrus_device *cdev = dev->dev_private;
- struct cirrus_crtc *cirrus_crtc;
- struct drm_plane *primary;
-
- cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
- (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
- GFP_KERNEL);
-
- if (cirrus_crtc == NULL)
- return;
-
- primary = cirrus_primary_plane(dev);
- if (primary == NULL) {
- kfree(cirrus_crtc);
- return;
- }
-
- drm_crtc_init_with_planes(dev, &cirrus_crtc->base,
- primary, NULL,
- &cirrus_crtc_funcs, NULL);
-
- drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
- cdev->mode_info.crtc = cirrus_crtc;
-
- drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
-}
-
-static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
-{
- return;
-}
-
-static void cirrus_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void cirrus_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void cirrus_encoder_destroy(struct drm_encoder *encoder)
-{
- struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(cirrus_encoder);
-}
-
-static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
- .dpms = cirrus_encoder_dpms,
- .mode_set = cirrus_encoder_mode_set,
- .prepare = cirrus_encoder_prepare,
- .commit = cirrus_encoder_commit,
-};
-
-static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
- .destroy = cirrus_encoder_destroy,
-};
-
-static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
- struct cirrus_encoder *cirrus_encoder;
-
- cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
- if (!cirrus_encoder)
- return NULL;
-
- encoder = &cirrus_encoder->base;
- encoder->possible_crtcs = 0x1;
-
- drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
-
- return encoder;
-}
-
-
-static int cirrus_vga_get_modes(struct drm_connector *connector)
-{
- int count;
-
- /* Just add a static list of modes */
- if (cirrus_bpp <= 24) {
- count = drm_add_modes_noedid(connector, 1280, 1024);
- drm_set_preferred_mode(connector, 1024, 768);
- } else {
- count = drm_add_modes_noedid(connector, 800, 600);
- drm_set_preferred_mode(connector, 800, 600);
- }
- return count;
-}
-
-static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
- *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
-static void cirrus_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
-static const struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
- .get_modes = cirrus_vga_get_modes,
- .best_encoder = cirrus_connector_best_encoder,
-};
-
-static const struct drm_connector_funcs cirrus_vga_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = cirrus_connector_destroy,
-};
-
-static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct cirrus_connector *cirrus_connector;
-
- cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
- if (!cirrus_connector)
- return NULL;
-
- connector = &cirrus_connector->base;
-
- drm_connector_init(dev, connector,
- &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
-
- drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
-
- drm_connector_register(connector);
- return connector;
-}
-
-
-int cirrus_modeset_init(struct cirrus_device *cdev)
-{
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
- drm_mode_config_init(cdev->dev);
- cdev->mode_info.mode_config_initialized = true;
-
- cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
- cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
-
- cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
- cdev->dev->mode_config.preferred_depth = cirrus_bpp;
- /* don't prefer a shadow on virt GPU */
- cdev->dev->mode_config.prefer_shadow = 0;
-
- cirrus_crtc_init(cdev->dev);
-
- encoder = cirrus_encoder_init(cdev->dev);
- if (!encoder) {
- DRM_ERROR("cirrus_encoder_init failed\n");
- return -1;
- }
-
- connector = cirrus_vga_init(cdev->dev);
- if (!connector) {
- DRM_ERROR("cirrus_vga_init failed\n");
- return -1;
- }
-
- drm_connector_attach_encoder(connector, encoder);
-
- ret = cirrus_fbdev_init(cdev);
- if (ret) {
- DRM_ERROR("cirrus_fbdev_init failed\n");
- return ret;
- }
-
- return 0;
-}
-
-void cirrus_modeset_fini(struct cirrus_device *cdev)
-{
- cirrus_fbdev_fini(cdev);
-
- if (cdev->mode_info.mode_config_initialized) {
- drm_mode_config_cleanup(cdev->dev);
- cdev->mode_info.mode_config_initialized = false;
- }
-}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index e075810b4bd4..e6b98467a428 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -178,7 +178,6 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
ret = ttm_bo_device_init(&cirrus->ttm.bdev,
&cirrus_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -331,13 +330,8 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct cirrus_device *cirrus;
+ struct drm_file *file_priv = filp->private_data;
+ struct cirrus_device *cirrus = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- cirrus = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 5eb40130fafb..f4924cb7f495 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -798,6 +798,50 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
/**
+ * drm_atomic_get_old_private_obj_state
+ * @state: global atomic state object
+ * @obj: private_obj to grab
+ *
+ * This function returns the old private object state for the given private_obj,
+ * or NULL if the private_obj is not part of the global atomic state.
+ */
+struct drm_private_state *
+drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_private_objs; i++)
+ if (obj == state->private_objs[i].ptr)
+ return state->private_objs[i].old_state;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
+
+/**
+ * drm_atomic_get_new_private_obj_state
+ * @state: global atomic state object
+ * @obj: private_obj to grab
+ *
+ * This function returns the new private object state for the given private_obj,
+ * or NULL if the private_obj is not part of the global atomic state.
+ */
+struct drm_private_state *
+drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_private_objs; i++)
+ if (obj == state->private_objs[i].ptr)
+ return state->private_objs[i].new_state;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
+
+/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
@@ -1236,4 +1280,3 @@ int drm_atomic_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, minor);
}
#endif
-
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 40ac19848034..2e0cb4246cbd 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -495,7 +495,7 @@ mode_fixup(struct drm_atomic_state *state)
static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
struct drm_encoder *encoder,
struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
enum drm_mode_status ret;
@@ -534,7 +534,7 @@ mode_valid(struct drm_atomic_state *state)
struct drm_crtc *crtc = conn_state->crtc;
struct drm_crtc_state *crtc_state;
enum drm_mode_status mode_status;
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
if (!crtc || !encoder)
continue;
@@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->atomic_disable(crtc, old_crtc_state);
else if (funcs->disable)
funcs->disable(crtc);
- else
+ else if (funcs->dpms)
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (!(dev->irq_enabled && dev->num_crtcs))
@@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
if (new_crtc_state->enable) {
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
-
if (funcs->atomic_enable)
funcs->atomic_enable(crtc, old_crtc_state);
- else
+ else if (funcs->commit)
funcs->commit(crtc);
}
}
@@ -1752,7 +1751,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
*
* NOTE: Commit work has multiple phases, first hardware commit, then
* cleanup. We want them to overlap, hence need system_unbound_wq to
- * make sure work items don't artifically stall on each another.
+ * make sure work items don't artificially stall on each another.
*/
drm_atomic_state_get(state);
@@ -1786,7 +1785,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
*
* Asynchronous workers need to have sufficient parallelism to be able to run
* different atomic commits on different CRTCs in parallel. The simplest way to
- * achive this is by running them on the &system_unbound_wq work queue. Note
+ * achieve this is by running them on the &system_unbound_wq work queue. Note
* that drivers are not required to split up atomic commits and run an
* individual commit in parallel - userspace is supposed to do that if it cares.
* But it might be beneficial to do that for modesets, since those necessarily
@@ -2261,10 +2260,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
int ret, i, j;
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ if (!new_conn_state->writeback_job)
+ continue;
+
+ ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
+ if (ret < 0)
+ return ret;
+ }
+
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 4985384e51f6..59ffb6b9c745 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -30,6 +30,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
+#include <drm/drm_writeback.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
@@ -412,6 +413,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
if (state->commit)
drm_crtc_commit_put(state->commit);
+
+ if (state->writeback_job)
+ drm_writeback_cleanup_job(state->writeback_job);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 0aabd401d3ca..428d82662dc4 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -512,8 +512,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
}
static int drm_atomic_plane_set_property(struct drm_plane *plane,
- struct drm_plane_state *state, struct drm_property *property,
- uint64_t val)
+ struct drm_plane_state *state, struct drm_file *file_priv,
+ struct drm_property *property, uint64_t val)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
@@ -521,7 +521,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
int ret;
if (property == config->prop_fb_id) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+ struct drm_framebuffer *fb;
+ fb = drm_framebuffer_lookup(dev, file_priv, val);
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_put(fb);
@@ -537,7 +538,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return -EINVAL;
} else if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
+ if (val && !crtc)
+ return -EACCES;
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
@@ -647,28 +650,15 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
-static struct drm_writeback_job *
-drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
-{
- WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
-
- if (!conn_state->writeback_job)
- conn_state->writeback_job =
- kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
-
- return conn_state->writeback_job;
-}
-
static int drm_atomic_set_writeback_fb_for_connector(
struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
- struct drm_writeback_job *job =
- drm_atomic_get_writeback_job(conn_state);
- if (!job)
- return -ENOMEM;
+ int ret;
- drm_framebuffer_assign(&job->fb, fb);
+ ret = drm_writeback_set_fb(conn_state, fb);
+ if (ret < 0)
+ return ret;
if (fb)
DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
@@ -681,14 +671,16 @@ static int drm_atomic_set_writeback_fb_for_connector(
}
static int drm_atomic_connector_set_property(struct drm_connector *connector,
- struct drm_connector_state *state, struct drm_property *property,
- uint64_t val)
+ struct drm_connector_state *state, struct drm_file *file_priv,
+ struct drm_property *property, uint64_t val)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
+ if (val && !crtc)
+ return -EACCES;
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
@@ -746,9 +738,13 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == connector->colorspace_property) {
+ state->colorspace = val;
} else if (property == config->writeback_fb_id_property) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
- int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+ struct drm_framebuffer *fb;
+ int ret;
+ fb = drm_framebuffer_lookup(dev, file_priv, val);
+ ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
if (fb)
drm_framebuffer_put(fb);
return ret;
@@ -814,6 +810,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->picture_aspect_ratio;
} else if (property == config->content_type_property) {
*val = state->content_type;
+ } else if (property == connector->colorspace_property) {
+ *val = state->colorspace;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
} else if (property == connector->content_protection_property) {
@@ -943,6 +941,7 @@ out:
}
int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
uint64_t prop_value)
@@ -965,7 +964,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
ret = drm_atomic_connector_set_property(connector,
- connector_state, prop, prop_value);
+ connector_state, file_priv,
+ prop, prop_value);
break;
}
case DRM_MODE_OBJECT_CRTC: {
@@ -993,7 +993,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
ret = drm_atomic_plane_set_property(plane,
- plane_state, prop, prop_value);
+ plane_state, file_priv,
+ prop, prop_value);
break;
}
default:
@@ -1158,19 +1159,17 @@ static int prepare_signaling(struct drm_device *dev,
for_each_new_connector_in_state(state, conn, conn_state, i) {
struct drm_writeback_connector *wb_conn;
- struct drm_writeback_job *job;
struct drm_out_fence_state *f;
struct dma_fence *fence;
s32 __user *fence_ptr;
+ if (!conn_state->writeback_job)
+ continue;
+
fence_ptr = get_out_fence_for_connector(state, conn);
if (!fence_ptr)
continue;
- job = drm_atomic_get_writeback_job(conn_state);
- if (!job)
- return -ENOMEM;
-
f = krealloc(*fence_state, sizeof(**fence_state) *
(*num_fences + 1), GFP_KERNEL);
if (!f)
@@ -1192,7 +1191,7 @@ static int prepare_signaling(struct drm_device *dev,
return ret;
}
- job->out_fence = fence;
+ conn_state->writeback_job->out_fence = fence;
}
/*
@@ -1365,8 +1364,8 @@ retry:
goto out;
}
- ret = drm_atomic_set_property(state, obj, prop,
- prop_value);
+ ret = drm_atomic_set_property(state, file_priv,
+ obj, prop, prop_value);
if (ret) {
drm_mode_object_put(obj);
goto out;
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 1669c42c40ed..22c7a104b802 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -103,14 +103,11 @@ struct drm_master *drm_master_create(struct drm_device *dev)
return NULL;
kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
+ drm_master_legacy_init(master);
idr_init(&master->magic_map);
master->dev = dev;
/* initialize the tree of output resource lessees */
- master->lessor = NULL;
- master->lessee_id = 0;
INIT_LIST_HEAD(&master->lessees);
INIT_LIST_HEAD(&master->lessee_list);
idr_init(&master->leases);
@@ -274,21 +271,7 @@ void drm_master_release(struct drm_file *file_priv)
if (!drm_is_current_master(file_priv))
goto out;
- if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
- /*
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
- mutex_lock(&dev->struct_mutex);
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- mutex_unlock(&dev->struct_mutex);
- }
+ drm_legacy_lock_master_cleanup(dev, master);
if (dev->master == file_priv->master)
drm_drop_master(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index e407adb033e7..bfc419ed9d6c 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -584,6 +584,14 @@ void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
mutex_unlock(&dev->struct_mutex);
}
+void drm_legacy_rmmaps(struct drm_device *dev)
+{
+ struct drm_map_list *r_list, *list_temp;
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+ drm_legacy_rmmap(dev, r_list->map);
+}
+
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
* exit uncleanly. Therefore, having userland manually remove mappings seems
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 9b2bd28dde0a..f20d1dda3961 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -69,7 +69,8 @@ EXPORT_SYMBOL(drm_client_close);
* @name: Client name
* @funcs: DRM client functions (optional)
*
- * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
+ * This initialises the client and opens a &drm_file.
+ * Use drm_client_register() to complete the process.
* The caller needs to hold a reference on @dev before calling this function.
* The client is freed when the &drm_device is unregistered. See drm_client_release().
*
@@ -108,16 +109,16 @@ err_put_module:
EXPORT_SYMBOL(drm_client_init);
/**
- * drm_client_add - Add client to the device list
+ * drm_client_register - Register client
* @client: DRM client
*
* Add the client to the &drm_device client list to activate its callbacks.
* @client must be initialized by a call to drm_client_init(). After
- * drm_client_add() it is no longer permissible to call drm_client_release()
+ * drm_client_register() it is no longer permissible to call drm_client_release()
* directly (outside the unregister callback), instead cleanup will happen
* automatically on driver unload.
*/
-void drm_client_add(struct drm_client_dev *client)
+void drm_client_register(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
@@ -125,7 +126,7 @@ void drm_client_add(struct drm_client_dev *client)
list_add(&client->list, &dev->clientlist);
mutex_unlock(&dev->clientlist_mutex);
}
-EXPORT_SYMBOL(drm_client_add);
+EXPORT_SYMBOL(drm_client_register);
/**
* drm_client_release - Release DRM client resources
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index dd40eff0911c..b34c3d38bf15 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -245,6 +245,7 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL;
+ connector->tile_blob_ptr = NULL;
connector->status = connector_status_unknown;
connector->display_info.panel_orientation =
DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
@@ -272,6 +273,9 @@ int drm_connector_init(struct drm_device *dev,
drm_object_attach_property(&connector->base,
config->non_desktop_property,
0);
+ drm_object_attach_property(&connector->base,
+ config->tile_property,
+ 0);
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
@@ -826,6 +830,33 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = {
};
DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
+static const struct drm_prop_enum_list hdmi_colorspaces[] = {
+ /* For Default case, driver will set the colorspace */
+ { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+ /* Standard Definition Colorimetry based on CEA 861 */
+ { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" },
+ { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+ /* Standard Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+ /* High Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+ /* Colorimetry based on IEC 61966-2-1/Amendment 1 */
+ { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 [33] */
+ { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 */
+ { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+ /* Added as part of Additional Colorimetry Extension in 861.G */
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
+};
+
/**
* DOC: standard connector properties
*
@@ -1385,12 +1416,6 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
*
* The driver may place further restrictions within these minimum
* and maximum bounds.
- *
- * The semantics for the vertical blank timestamp differ when
- * variable refresh rate is active. The vertical blank timestamp
- * is defined to be an estimate using the current mode's fixed
- * refresh rate timings. The semantics for the page-flip event
- * timestamp remain the same.
*/
/**
@@ -1548,6 +1573,57 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
/**
+ * DOC: standard connector properties
+ *
+ * Colorspace:
+ * drm_mode_create_colorspace_property - create colorspace property
+ * This property helps select a suitable colorspace based on the sink
+ * capability. Modern sink devices support wider gamut like BT2020.
+ * This helps switch to BT2020 mode if the BT2020 encoded video stream
+ * is being played by the user, same for any other colorspace. Thereby
+ * giving a good visual experience to users.
+ *
+ * The expectation from userspace is that it should parse the EDID
+ * and get supported colorspaces. Use this property and switch to the
+ * one supported. Sink supported colorspaces should be retrieved by
+ * userspace from EDID and driver will not explicitly expose them.
+ *
+ * Basically the expectation from userspace is:
+ * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink
+ * colorspace
+ * - Set this new property to let the sink know what it
+ * converted the CRTC output to.
+ * - This property is just to inform sink what colorspace
+ * source is trying to drive.
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_colorspace_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ "Colorspace",
+ hdmi_colorspaces,
+ ARRAY_SIZE(hdmi_colorspaces));
+ if (!prop)
+ return -ENOMEM;
+ } else {
+ DRM_DEBUG_KMS("Colorspace property not supported\n");
+ return 0;
+ }
+
+ connector->colorspace_property = prop;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_colorspace_property);
+
+/**
* drm_mode_create_content_type_property - create content type property
* @dev: DRM device
*
@@ -1634,6 +1710,8 @@ EXPORT_SYMBOL(drm_connector_set_path_property);
* This looks up the tile information for a connector, and creates a
* property for userspace to parse if it exists. The property is of
* the form of 8 integers using ':' as a separator.
+ * This is used for dual port tiled displays with DisplayPort SST
+ * or DisplayPort MST connectors.
*
* Returns:
* Zero on success, errno on failure.
@@ -1677,6 +1755,9 @@ EXPORT_SYMBOL(drm_connector_set_tile_property);
*
* This function creates a new blob modeset object and assigns its id to the
* connector's edid property.
+ * Since we also parse tile information from EDID's displayID block, we also
+ * set the connector's tile property here. See drm_connector_set_tile_property()
+ * for more details.
*
* Returns:
* Zero on success, negative errno on failure.
@@ -1718,7 +1799,9 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
edid,
&connector->base,
dev->mode_config.edid_property);
- return ret;
+ if (ret)
+ return ret;
+ return drm_connector_set_tile_property(connector);
}
EXPORT_SYMBOL(drm_connector_update_edid_property);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 7dabbaf033a1..790ba5941954 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -559,6 +559,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
plane = crtc->primary;
+ /* allow disabling with the primary plane leased */
+ if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id))
+ return -EACCES;
+
mutex_lock(&crtc->dev->mode_config.mutex);
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 216f2a9ee3d4..0719a235d6cc 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -214,6 +214,7 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
struct drm_connector *connector,
int mode);
int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
uint64_t prop_value);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index dc7ac0c60547..c630ed157994 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3022,7 +3022,6 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
edid = drm_edid_duplicate(port->cached_edid);
else {
edid = drm_get_edid(connector, &port->aux.ddc);
- drm_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_mst_topology_put_port(port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 05bbc2b622fc..862621494a93 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -286,6 +286,138 @@ void drm_minor_release(struct drm_minor *minor)
* Note that the lifetime rules for &drm_device instance has still a lot of
* historical baggage. Hence use the reference counting provided by
* drm_dev_get() and drm_dev_put() only carefully.
+ *
+ * Display driver example
+ * ~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The following example shows a typical structure of a DRM display driver.
+ * The example focus on the probe() function and the other functions that is
+ * almost always present and serves as a demonstration of devm_drm_dev_init()
+ * usage with its accompanying drm_driver->release callback.
+ *
+ * .. code-block:: c
+ *
+ * struct driver_device {
+ * struct drm_device drm;
+ * void *userspace_facing;
+ * struct clk *pclk;
+ * };
+ *
+ * static void driver_drm_release(struct drm_device *drm)
+ * {
+ * struct driver_device *priv = container_of(...);
+ *
+ * drm_mode_config_cleanup(drm);
+ * drm_dev_fini(drm);
+ * kfree(priv->userspace_facing);
+ * kfree(priv);
+ * }
+ *
+ * static struct drm_driver driver_drm_driver = {
+ * [...]
+ * .release = driver_drm_release,
+ * };
+ *
+ * static int driver_probe(struct platform_device *pdev)
+ * {
+ * struct driver_device *priv;
+ * struct drm_device *drm;
+ * int ret;
+ *
+ * [
+ * devm_kzalloc() can't be used here because the drm_device
+ * lifetime can exceed the device lifetime if driver unbind
+ * happens when userspace still has open file descriptors.
+ * ]
+ * priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ * if (!priv)
+ * return -ENOMEM;
+ *
+ * drm = &priv->drm;
+ *
+ * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
+ * if (ret) {
+ * kfree(drm);
+ * return ret;
+ * }
+ *
+ * drm_mode_config_init(drm);
+ *
+ * priv->userspace_facing = kzalloc(..., GFP_KERNEL);
+ * if (!priv->userspace_facing)
+ * return -ENOMEM;
+ *
+ * priv->pclk = devm_clk_get(dev, "PCLK");
+ * if (IS_ERR(priv->pclk))
+ * return PTR_ERR(priv->pclk);
+ *
+ * [ Further setup, display pipeline etc ]
+ *
+ * platform_set_drvdata(pdev, drm);
+ *
+ * drm_mode_config_reset(drm);
+ *
+ * ret = drm_dev_register(drm);
+ * if (ret)
+ * return ret;
+ *
+ * drm_fbdev_generic_setup(drm, 32);
+ *
+ * return 0;
+ * }
+ *
+ * [ This function is called before the devm_ resources are released ]
+ * static int driver_remove(struct platform_device *pdev)
+ * {
+ * struct drm_device *drm = platform_get_drvdata(pdev);
+ *
+ * drm_dev_unregister(drm);
+ * drm_atomic_helper_shutdown(drm)
+ *
+ * return 0;
+ * }
+ *
+ * [ This function is called on kernel restart and shutdown ]
+ * static void driver_shutdown(struct platform_device *pdev)
+ * {
+ * drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+ * }
+ *
+ * static int __maybe_unused driver_pm_suspend(struct device *dev)
+ * {
+ * return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
+ * }
+ *
+ * static int __maybe_unused driver_pm_resume(struct device *dev)
+ * {
+ * drm_mode_config_helper_resume(dev_get_drvdata(dev));
+ *
+ * return 0;
+ * }
+ *
+ * static const struct dev_pm_ops driver_pm_ops = {
+ * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
+ * };
+ *
+ * static struct platform_driver driver_driver = {
+ * .driver = {
+ * [...]
+ * .pm = &driver_pm_ops,
+ * },
+ * .probe = driver_probe,
+ * .remove = driver_remove,
+ * .shutdown = driver_shutdown,
+ * };
+ * module_platform_driver(driver_driver);
+ *
+ * Drivers that want to support device unplugging (USB, DT overlay unload) should
+ * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
+ * regions that is accessing device resources to prevent use after they're
+ * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
+ * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
+ * drm_atomic_helper_shutdown() is called. This means that if the disable code
+ * paths are protected, they will not run on regular driver module unload,
+ * possibily leaving the hardware enabled.
*/
/**
@@ -376,7 +508,6 @@ void drm_dev_unplug(struct drm_device *dev)
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
- drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_dev_unplug);
@@ -453,6 +584,31 @@ static void drm_fs_inode_free(struct inode *inode)
}
/**
+ * DOC: component helper usage recommendations
+ *
+ * DRM drivers that drive hardware where a logical device consists of a pile of
+ * independent hardware blocks are recommended to use the :ref:`component helper
+ * library<component>`. For consistency and better options for code reuse the
+ * following guidelines apply:
+ *
+ * - The entire device initialization procedure should be run from the
+ * &component_master_ops.master_bind callback, starting with drm_dev_init(),
+ * then binding all components with component_bind_all() and finishing with
+ * drm_dev_register().
+ *
+ * - The opaque pointer passed to all components through component_bind_all()
+ * should point at &struct drm_device of the device instance, not some driver
+ * specific private structure.
+ *
+ * - The component helper fills the niche where further standardization of
+ * interfaces is not practical. When there already is, or will be, a
+ * standardized interface like &drm_bridge or &drm_panel, providing its own
+ * functions to find such components at driver load time, like
+ * drm_of_find_panel_or_bridge(), then the component helper should not be
+ * used.
+ */
+
+/**
* drm_dev_init - Initialise new DRM device
* @dev: DRM device
* @driver: DRM driver
@@ -497,26 +653,22 @@ int drm_dev_init(struct drm_device *dev,
BUG_ON(!parent);
kref_init(&dev->ref);
- dev->dev = parent;
+ dev->dev = get_device(parent);
dev->driver = driver;
/* no per-device feature limits by default */
dev->driver_features = ~0u;
+ drm_legacy_init_members(dev);
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
- spin_lock_init(&dev->buf_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
- mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->master_mutex);
dev->anon_inode = drm_fs_inode_new();
@@ -536,7 +688,7 @@ int drm_dev_init(struct drm_device *dev,
if (ret)
goto err_minors;
- ret = drm_ht_create(&dev->map_hash, 12);
+ ret = drm_legacy_create_map_hash(dev);
if (ret)
goto err_minors;
@@ -561,21 +713,61 @@ err_setunique:
drm_gem_destroy(dev);
err_ctxbitmap:
drm_legacy_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
+ drm_legacy_remove_map_hash(dev);
err_minors:
drm_minor_free(dev, DRM_MINOR_PRIMARY);
drm_minor_free(dev, DRM_MINOR_RENDER);
drm_fs_inode_free(dev->anon_inode);
err_free:
+ put_device(dev->dev);
mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->ctxlist_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
+ drm_legacy_destroy_members(dev);
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
+static void devm_drm_dev_init_release(void *data)
+{
+ drm_dev_put(data);
+}
+
+/**
+ * devm_drm_dev_init - Resource managed drm_dev_init()
+ * @parent: Parent device object
+ * @dev: DRM device
+ * @driver: DRM driver
+ *
+ * Managed drm_dev_init(). The DRM device initialized with this function is
+ * automatically put on driver detach using drm_dev_put(). You must supply a
+ * &drm_driver.release callback to control the finalization explicitly.
+ *
+ * RETURNS:
+ * 0 on success, or error code on failure.
+ */
+int devm_drm_dev_init(struct device *parent,
+ struct drm_device *dev,
+ struct drm_driver *driver)
+{
+ int ret;
+
+ if (WARN_ON(!parent || !driver->release))
+ return -EINVAL;
+
+ ret = drm_dev_init(dev, driver, parent);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
+ if (ret)
+ devm_drm_dev_init_release(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_drm_dev_init);
+
/**
* drm_dev_fini - Finalize a dead DRM device
* @dev: DRM device
@@ -596,17 +788,19 @@ void drm_dev_fini(struct drm_device *dev)
drm_gem_destroy(dev);
drm_legacy_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
+ drm_legacy_remove_map_hash(dev);
drm_fs_inode_free(dev->anon_inode);
drm_minor_free(dev, DRM_MINOR_PRIMARY);
drm_minor_free(dev, DRM_MINOR_RENDER);
+ put_device(dev->dev);
+
mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->ctxlist_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
+ drm_legacy_destroy_members(dev);
kfree(dev->unique);
}
EXPORT_SYMBOL(drm_dev_fini);
@@ -840,8 +1034,6 @@ EXPORT_SYMBOL(drm_dev_register);
*/
void drm_dev_unregister(struct drm_device *dev)
{
- struct drm_map_list *r_list, *list_temp;
-
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_lastclose(dev);
@@ -858,8 +1050,7 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->agp)
drm_pci_agp_destroy(dev);
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_legacy_rmmap(dev, r_list->map);
+ drm_legacy_rmmaps(dev);
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index bce99f95c1a3..77f4e5ae4197 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/byteorder/generic.h>
+#include <drm/drm_print.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_dsc.h>
@@ -31,75 +32,74 @@
/**
* drm_dsc_dp_pps_header_init() - Initializes the PPS Header
* for DisplayPort as per the DP 1.4 spec.
- * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
- * as defined in &struct drm_dsc_pps_infoframe
+ * @pps_header: Secondary data packet header for DSC Picture
+ * Parameter Set as defined in &struct dp_sdp_header
*
* DP 1.4 spec defines the secondary data packet for sending the
* picture parameter infoframes from the source to the sink.
- * This function populates the pps header defined in
- * &struct drm_dsc_pps_infoframe as per the header bytes defined
- * in &struct dp_sdp_header.
+ * This function populates the SDP header defined in
+ * &struct dp_sdp_header.
*/
-void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
{
- memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+ memset(pps_header, 0, sizeof(*pps_header));
- pps_sdp->pps_header.HB1 = DP_SDP_PPS;
- pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+ pps_header->HB1 = DP_SDP_PPS;
+ pps_header->HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
}
EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
/**
- * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * drm_dsc_pps_payload_pack() - Populates the DSC PPS
*
- * @pps_sdp:
- * Secondary data packet for DSC Picture Parameter Set. This is defined
- * by &struct drm_dsc_pps_infoframe
+ * @pps_payload:
+ * Bitwise struct for DSC Picture Parameter Set. This is defined
+ * by &struct drm_dsc_picture_parameter_set
* @dsc_cfg:
* DSC Configuration data filled by driver as defined by
* &struct drm_dsc_config
*
- * DSC source device sends a secondary data packet filled with all the
- * picture parameter set (PPS) information required by the sink to decode
- * the compressed frame. Driver populates the dsC PPS infoframe using the DSC
- * configuration parameters in the order expected by the DSC Display Sink
- * device. For the DSC, the sink device expects the PPS payload in the big
- * endian format for the fields that span more than 1 byte.
+ * DSC source device sends a picture parameter set (PPS) containing the
+ * information required by the sink to decode the compressed frame. Driver
+ * populates the DSC PPS struct using the DSC configuration parameters in
+ * the order expected by the DSC Display Sink device. For the DSC, the sink
+ * device expects the PPS payload in big endian format for fields
+ * that span more than 1 byte.
*/
-void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
const struct drm_dsc_config *dsc_cfg)
{
int i;
/* Protect against someone accidently changing struct size */
- BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+ BUILD_BUG_ON(sizeof(*pps_payload) !=
DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
- memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+ memset(pps_payload, 0, sizeof(*pps_payload));
/* PPS 0 */
- pps_sdp->pps_payload.dsc_version =
+ pps_payload->dsc_version =
dsc_cfg->dsc_version_minor |
dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
/* PPS 1, 2 is 0 */
/* PPS 3 */
- pps_sdp->pps_payload.pps_3 =
+ pps_payload->pps_3 =
dsc_cfg->line_buf_depth |
dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
/* PPS 4 */
- pps_sdp->pps_payload.pps_4 =
+ pps_payload->pps_4 =
((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT) |
dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
- dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+ dsc_cfg->simple_422 << DSC_PPS_SIMPLE422_SHIFT |
dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
/* PPS 5 */
- pps_sdp->pps_payload.bits_per_pixel_low =
+ pps_payload->bits_per_pixel_low =
(dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
/*
@@ -110,103 +110,103 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
*/
/* PPS 6, 7 */
- pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+ pps_payload->pic_height = cpu_to_be16(dsc_cfg->pic_height);
/* PPS 8, 9 */
- pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+ pps_payload->pic_width = cpu_to_be16(dsc_cfg->pic_width);
/* PPS 10, 11 */
- pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+ pps_payload->slice_height = cpu_to_be16(dsc_cfg->slice_height);
/* PPS 12, 13 */
- pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+ pps_payload->slice_width = cpu_to_be16(dsc_cfg->slice_width);
/* PPS 14, 15 */
- pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+ pps_payload->chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
/* PPS 16 */
- pps_sdp->pps_payload.initial_xmit_delay_high =
+ pps_payload->initial_xmit_delay_high =
((dsc_cfg->initial_xmit_delay &
DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 17 */
- pps_sdp->pps_payload.initial_xmit_delay_low =
+ pps_payload->initial_xmit_delay_low =
(dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
/* PPS 18, 19 */
- pps_sdp->pps_payload.initial_dec_delay =
+ pps_payload->initial_dec_delay =
cpu_to_be16(dsc_cfg->initial_dec_delay);
/* PPS 20 is 0 */
/* PPS 21 */
- pps_sdp->pps_payload.initial_scale_value =
+ pps_payload->initial_scale_value =
dsc_cfg->initial_scale_value;
/* PPS 22, 23 */
- pps_sdp->pps_payload.scale_increment_interval =
+ pps_payload->scale_increment_interval =
cpu_to_be16(dsc_cfg->scale_increment_interval);
/* PPS 24 */
- pps_sdp->pps_payload.scale_decrement_interval_high =
+ pps_payload->scale_decrement_interval_high =
((dsc_cfg->scale_decrement_interval &
DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 25 */
- pps_sdp->pps_payload.scale_decrement_interval_low =
+ pps_payload->scale_decrement_interval_low =
(dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
/* PPS 26[7:0], PPS 27[7:5] RESERVED */
/* PPS 27 */
- pps_sdp->pps_payload.first_line_bpg_offset =
+ pps_payload->first_line_bpg_offset =
dsc_cfg->first_line_bpg_offset;
/* PPS 28, 29 */
- pps_sdp->pps_payload.nfl_bpg_offset =
+ pps_payload->nfl_bpg_offset =
cpu_to_be16(dsc_cfg->nfl_bpg_offset);
/* PPS 30, 31 */
- pps_sdp->pps_payload.slice_bpg_offset =
+ pps_payload->slice_bpg_offset =
cpu_to_be16(dsc_cfg->slice_bpg_offset);
/* PPS 32, 33 */
- pps_sdp->pps_payload.initial_offset =
+ pps_payload->initial_offset =
cpu_to_be16(dsc_cfg->initial_offset);
/* PPS 34, 35 */
- pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+ pps_payload->final_offset = cpu_to_be16(dsc_cfg->final_offset);
/* PPS 36 */
- pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+ pps_payload->flatness_min_qp = dsc_cfg->flatness_min_qp;
/* PPS 37 */
- pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+ pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
/* PPS 38, 39 */
- pps_sdp->pps_payload.rc_model_size =
+ pps_payload->rc_model_size =
cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
/* PPS 40 */
- pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+ pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
/* PPS 41 */
- pps_sdp->pps_payload.rc_quant_incr_limit0 =
+ pps_payload->rc_quant_incr_limit0 =
dsc_cfg->rc_quant_incr_limit0;
/* PPS 42 */
- pps_sdp->pps_payload.rc_quant_incr_limit1 =
+ pps_payload->rc_quant_incr_limit1 =
dsc_cfg->rc_quant_incr_limit1;
/* PPS 43 */
- pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+ pps_payload->rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
/* PPS 44 - 57 */
for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
- pps_sdp->pps_payload.rc_buf_thresh[i] =
+ pps_payload->rc_buf_thresh[i] =
dsc_cfg->rc_buf_thresh[i];
/* PPS 58 - 87 */
@@ -215,32 +215,181 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
* are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
*/
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
- pps_sdp->pps_payload.rc_range_parameters[i] =
+ pps_payload->rc_range_parameters[i] =
((dsc_cfg->rc_range_params[i].range_min_qp <<
DSC_PPS_RC_RANGE_MINQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_max_qp <<
DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_bpg_offset));
- pps_sdp->pps_payload.rc_range_parameters[i] =
- cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+ pps_payload->rc_range_parameters[i] =
+ cpu_to_be16(pps_payload->rc_range_parameters[i]);
}
/* PPS 88 */
- pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+ pps_payload->native_422_420 = dsc_cfg->native_422 |
dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
/* PPS 89 */
- pps_sdp->pps_payload.second_line_bpg_offset =
+ pps_payload->second_line_bpg_offset =
dsc_cfg->second_line_bpg_offset;
/* PPS 90, 91 */
- pps_sdp->pps_payload.nsl_bpg_offset =
+ pps_payload->nsl_bpg_offset =
cpu_to_be16(dsc_cfg->nsl_bpg_offset);
/* PPS 92, 93 */
- pps_sdp->pps_payload.second_line_offset_adj =
+ pps_payload->second_line_offset_adj =
cpu_to_be16(dsc_cfg->second_line_offset_adj);
/* PPS 94 - 127 are O */
}
-EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
+EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
+
+/**
+ * drm_dsc_compute_rc_parameters() - Write rate control
+ * parameters to the dsc configuration defined in
+ * &struct drm_dsc_config in accordance with the DSC 1.2
+ * specification. Some configuration fields must be present
+ * beforehand.
+ *
+ * @vdsc_cfg:
+ * DSC Configuration data partially filled by driver
+ */
+int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+ unsigned long groups_per_line = 0;
+ unsigned long groups_total = 0;
+ unsigned long num_extra_mux_bits = 0;
+ unsigned long slice_bits = 0;
+ unsigned long hrd_delay = 0;
+ unsigned long final_scale = 0;
+ unsigned long rbs_min = 0;
+
+ if (vdsc_cfg->native_420 || vdsc_cfg->native_422) {
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width / 2,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width / 2 *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+ } else {
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+ }
+
+ if (vdsc_cfg->convert_rgb)
+ num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4)
+ - 2);
+ else if (vdsc_cfg->native_422)
+ num_extra_mux_bits = 4 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 3 * (4 * vdsc_cfg->bits_per_component) - 2;
+ else
+ num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 2 * (4 * vdsc_cfg->bits_per_component) - 2;
+ /* Number of bits in one Slice */
+ slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+ while ((num_extra_mux_bits > 0) &&
+ ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+ num_extra_mux_bits--;
+
+ if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+ vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+ /* scale_decrement_interval calculation according to DSC spec 1.11 */
+ if (vdsc_cfg->initial_scale_value > 8)
+ vdsc_cfg->scale_decrement_interval = groups_per_line /
+ (vdsc_cfg->initial_scale_value - 8);
+ else
+ vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+ vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+ (vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+ if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+ DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+ return -ERANGE;
+ }
+
+ final_scale = (vdsc_cfg->rc_model_size * 8) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+ if (vdsc_cfg->slice_height > 1)
+ /*
+ * NflBpgOffset is 16 bit value with 11 fractional bits
+ * hence we multiply by 2^11 for preserving the
+ * fractional part
+ */
+ vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+ (vdsc_cfg->slice_height - 1));
+ else
+ vdsc_cfg->nfl_bpg_offset = 0;
+
+ /* 2^16 - 1 */
+ if (vdsc_cfg->nfl_bpg_offset > 65535) {
+ DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+ return -ERANGE;
+ }
+
+ /* Number of groups used to code the entire slice */
+ groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+ /* slice_bpg_offset is 16 bit value with 11 fractional bits */
+ vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+ vdsc_cfg->initial_offset +
+ num_extra_mux_bits) << 11),
+ groups_total);
+
+ if (final_scale > 9) {
+ /*
+ * ScaleIncrementInterval =
+ * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+ * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+ * we need divide by 2^11 from pstDscCfg values
+ */
+ vdsc_cfg->scale_increment_interval =
+ (vdsc_cfg->final_offset * (1 << 11)) /
+ ((vdsc_cfg->nfl_bpg_offset +
+ vdsc_cfg->slice_bpg_offset) *
+ (final_scale - 9));
+ } else {
+ /*
+ * If finalScaleValue is less than or equal to 9, a value of 0 should
+ * be used to disable the scale increment at the end of the slice
+ */
+ vdsc_cfg->scale_increment_interval = 0;
+ }
+
+ if (vdsc_cfg->scale_increment_interval > 65535) {
+ DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+ return -ERANGE;
+ }
+
+ /*
+ * DSC spec mentions that bits_per_pixel specifies the target
+ * bits/pixel (bpp) rate that is used by the encoder,
+ * in steps of 1/16 of a bit per pixel
+ */
+ rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+ DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel, 16) +
+ groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+ vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+ vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 990b1909f9d7..649cfd8b4200 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,8 +68,6 @@
* maximum size and use that.
*/
#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* Monitor forgot to set the first detailed is preferred bit. */
-#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
@@ -107,8 +105,6 @@ static const struct edid_quirk {
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
{ "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
- /* Unknown Acer */
- { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
@@ -145,12 +141,6 @@ static const struct edid_quirk {
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
- /* Philips 107p5 CRT */
- { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Proview AY765C */
- { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
/* Samsung SyncMaster 205BW. Note: irony */
{ "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
/* Samsung SyncMaster 22[5-6]BW */
@@ -172,6 +162,25 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+ /* Valve Index Headset */
+ { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
+
/* HTC Vive and Vive Pro VR Headsets */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
@@ -193,6 +202,12 @@ static const struct edid_quirk {
/* Sony PlayStation VR Headset */
{ "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
+
+ /* Sensics VR Headsets */
+ { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
+
+ /* OSVR HDK and HDK2 VR Headsets */
+ { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
};
/*
@@ -4924,6 +4939,76 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+/* HDMI Colorspace Spec Definitions */
+#define FULL_COLORIMETRY_MASK 0x1FF
+#define NORMAL_COLORIMETRY_MASK 0x3
+#define EXTENDED_COLORIMETRY_MASK 0x7
+#define EXTENDED_ACE_COLORIMETRY_MASK 0xF
+
+#define C(x) ((x) << 0)
+#define EC(x) ((x) << 2)
+#define ACE(x) ((x) << 5)
+
+#define HDMI_COLORIMETRY_NO_DATA 0x0
+#define HDMI_COLORIMETRY_SMPTE_170M_YCC (C(1) | EC(0) | ACE(0))
+#define HDMI_COLORIMETRY_BT709_YCC (C(2) | EC(0) | ACE(0))
+#define HDMI_COLORIMETRY_XVYCC_601 (C(3) | EC(0) | ACE(0))
+#define HDMI_COLORIMETRY_XVYCC_709 (C(3) | EC(1) | ACE(0))
+#define HDMI_COLORIMETRY_SYCC_601 (C(3) | EC(2) | ACE(0))
+#define HDMI_COLORIMETRY_OPYCC_601 (C(3) | EC(3) | ACE(0))
+#define HDMI_COLORIMETRY_OPRGB (C(3) | EC(4) | ACE(0))
+#define HDMI_COLORIMETRY_BT2020_CYCC (C(3) | EC(5) | ACE(0))
+#define HDMI_COLORIMETRY_BT2020_RGB (C(3) | EC(6) | ACE(0))
+#define HDMI_COLORIMETRY_BT2020_YCC (C(3) | EC(6) | ACE(0))
+#define HDMI_COLORIMETRY_DCI_P3_RGB_D65 (C(3) | EC(7) | ACE(0))
+#define HDMI_COLORIMETRY_DCI_P3_RGB_THEATER (C(3) | EC(7) | ACE(1))
+
+static const u32 hdmi_colorimetry_val[] = {
+ [DRM_MODE_COLORIMETRY_NO_DATA] = HDMI_COLORIMETRY_NO_DATA,
+ [DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = HDMI_COLORIMETRY_SMPTE_170M_YCC,
+ [DRM_MODE_COLORIMETRY_BT709_YCC] = HDMI_COLORIMETRY_BT709_YCC,
+ [DRM_MODE_COLORIMETRY_XVYCC_601] = HDMI_COLORIMETRY_XVYCC_601,
+ [DRM_MODE_COLORIMETRY_XVYCC_709] = HDMI_COLORIMETRY_XVYCC_709,
+ [DRM_MODE_COLORIMETRY_SYCC_601] = HDMI_COLORIMETRY_SYCC_601,
+ [DRM_MODE_COLORIMETRY_OPYCC_601] = HDMI_COLORIMETRY_OPYCC_601,
+ [DRM_MODE_COLORIMETRY_OPRGB] = HDMI_COLORIMETRY_OPRGB,
+ [DRM_MODE_COLORIMETRY_BT2020_CYCC] = HDMI_COLORIMETRY_BT2020_CYCC,
+ [DRM_MODE_COLORIMETRY_BT2020_RGB] = HDMI_COLORIMETRY_BT2020_RGB,
+ [DRM_MODE_COLORIMETRY_BT2020_YCC] = HDMI_COLORIMETRY_BT2020_YCC,
+};
+
+#undef C
+#undef EC
+#undef ACE
+
+/**
+ * drm_hdmi_avi_infoframe_colorspace() - fill the HDMI AVI infoframe
+ * colorspace information
+ * @frame: HDMI AVI infoframe
+ * @conn_state: connector state
+ */
+void
+drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ u32 colorimetry_val;
+ u32 colorimetry_index = conn_state->colorspace & FULL_COLORIMETRY_MASK;
+
+ if (colorimetry_index >= ARRAY_SIZE(hdmi_colorimetry_val))
+ colorimetry_val = HDMI_COLORIMETRY_NO_DATA;
+ else
+ colorimetry_val = hdmi_colorimetry_val[colorimetry_index];
+
+ frame->colorimetry = colorimetry_val & NORMAL_COLORIMETRY_MASK;
+ /*
+ * ToDo: Extend it for ACE formats as well. Modify the infoframe
+ * structure and extend it in drivers/video/hdmi
+ */
+ frame->extended_colorimetry = (colorimetry_val >> 2) &
+ EXTENDED_COLORIMETRY_MASK;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorspace);
+
/**
* drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
* quantization range information
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index af2ab640cadb..498f95c3e81d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -639,20 +639,19 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_mode_set *modeset;
int i, j;
drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ modeset = &fb_helper->crtc_info[i].mode_set;
- if (!crtc->enabled)
+ if (!modeset->crtc->enabled)
continue;
- /* Walk the connectors & encoders on this fb turning them on/off */
- drm_fb_helper_for_each_connector(fb_helper, j) {
- connector = fb_helper->connector_info[j]->connector;
+ for (j = 0; j < modeset->num_connectors; j++) {
+ connector = modeset->connectors[j];
connector->funcs->dpms(connector, dpms_mode);
drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
@@ -934,6 +933,7 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
}
fb_helper->fbdev = info;
+ info->skip_vt_switch = true;
return info;
@@ -1873,7 +1873,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int crtc_count = 0;
int i;
struct drm_fb_helper_surface_size sizes;
- int gamma_size = 0;
int best_depth = 0;
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
@@ -1889,7 +1888,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- /* first up get a count of crtcs now in use and new min/maxes width/heights */
drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
@@ -1969,6 +1967,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
sizes.surface_depth = best_depth;
}
+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
@@ -1991,9 +1990,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
x = fb_helper->crtc_info[i].x;
y = fb_helper->crtc_info[i].y;
- if (gamma_size == 0)
- gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
-
sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
@@ -2036,21 +2032,8 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
return 0;
}
-/**
- * drm_fb_helper_fill_fix - initializes fixed fbdev information
- * @info: fbdev registered by the helper
- * @pitch: desired pitch
- * @depth: desired depth
- *
- * Helper to fill in the fixed fbdev information useful for a non-accelerated
- * fbdev emulations. Drivers which support acceleration methods which impose
- * additional constraints need to set up their own limits.
- *
- * Drivers should call this (or their equivalent setup code) from their
- * &drm_fb_helper_funcs.fb_probe callback.
- */
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth)
+static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
@@ -2065,24 +2048,10 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
info->fix.line_length = pitch;
}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-/**
- * drm_fb_helper_fill_var - initalizes variable fbdev information
- * @info: fbdev instance to set up
- * @fb_helper: fb helper instance to use as template
- * @fb_width: desired fb width
- * @fb_height: desired fb height
- *
- * Sets up the variable fbdev metainformation from the given fb helper instance
- * and the drm framebuffer allocated in &drm_fb_helper.fb.
- *
- * Drivers should call this (or their equivalent setup code) from their
- * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
- * backing storage framebuffer.
- */
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
- uint32_t fb_width, uint32_t fb_height)
+static void drm_fb_helper_fill_var(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height)
{
struct drm_framebuffer *fb = fb_helper->fb;
@@ -2102,7 +2071,36 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xres = fb_width;
info->var.yres = fb_height;
}
-EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+/**
+ * drm_fb_helper_fill_info - initializes fbdev information
+ * @info: fbdev instance to set up
+ * @fb_helper: fb helper instance to use as template
+ * @sizes: describes fbdev size and scanout surface size
+ *
+ * Sets up the variable and fixed fbdev metainformation from the given fb helper
+ * instance and the drm framebuffer allocated in &drm_fb_helper.fb.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
+ * backing storage framebuffer.
+ */
+void drm_fb_helper_fill_info(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_var(info, fb_helper,
+ sizes->fb_width, sizes->fb_height);
+
+ info->par = fb_helper;
+ snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
+ fb_helper->dev->driver->name);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_info);
static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
uint32_t maxX,
@@ -2561,6 +2559,195 @@ static void drm_setup_crtc_rotation(struct drm_fb_helper *fb_helper,
fb_helper->sw_rotations |= DRM_MODE_ROTATE_0;
}
+static struct drm_fb_helper_crtc *
+drm_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
+ return &fb_helper->crtc_info[i];
+
+ return NULL;
+}
+
+/* Try to read the BIOS display configuration and use it for the initial config */
+static bool drm_fb_helper_firmware_config(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **crtcs,
+ struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
+ bool *enabled, int width, int height)
+{
+ struct drm_device *dev = fb_helper->dev;
+ unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+ unsigned long conn_configured, conn_seq, mask;
+ int i, j;
+ bool *save_enabled;
+ bool fallback = true, ret = true;
+ int num_connectors_enabled = 0;
+ int num_connectors_detected = 0;
+ struct drm_modeset_acquire_ctx ctx;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ return false;
+
+ save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
+ if (!save_enabled)
+ return false;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (drm_modeset_lock_all_ctx(dev, &ctx) != 0)
+ drm_modeset_backoff(&ctx);
+
+ memcpy(save_enabled, enabled, count);
+ mask = GENMASK(count - 1, 0);
+ conn_configured = 0;
+retry:
+ conn_seq = conn_configured;
+ for (i = 0; i < count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *new_crtc;
+
+ fb_conn = fb_helper->connector_info[i];
+ connector = fb_conn->connector;
+
+ if (conn_configured & BIT(i))
+ continue;
+
+ if (conn_seq == 0 && !connector->has_tile)
+ continue;
+
+ if (connector->status == connector_status_connected)
+ num_connectors_detected++;
+
+ if (!enabled[i]) {
+ DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
+ connector->name);
+ conn_configured |= BIT(i);
+ continue;
+ }
+
+ if (connector->force == DRM_FORCE_OFF) {
+ DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
+ connector->name);
+ enabled[i] = false;
+ continue;
+ }
+
+ encoder = connector->state->best_encoder;
+ if (!encoder || WARN_ON(!connector->state->crtc)) {
+ if (connector->force > DRM_FORCE_OFF)
+ goto bail;
+
+ DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
+ connector->name);
+ enabled[i] = false;
+ conn_configured |= BIT(i);
+ continue;
+ }
+
+ num_connectors_enabled++;
+
+ new_crtc = drm_fb_helper_crtc(fb_helper, connector->state->crtc);
+
+ /*
+ * Make sure we're not trying to drive multiple connectors
+ * with a single CRTC, since our cloning support may not
+ * match the BIOS.
+ */
+ for (j = 0; j < count; j++) {
+ if (crtcs[j] == new_crtc) {
+ DRM_DEBUG_KMS("fallback: cloned configuration\n");
+ goto bail;
+ }
+ }
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
+ connector->name);
+
+ /* go for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_conn);
+
+ /* try for preferred next */
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+ connector->name, connector->has_tile);
+ modes[i] = drm_has_preferred_mode(fb_conn, width,
+ height);
+ }
+
+ /* No preferred mode marked by the EDID? Are there any modes? */
+ if (!modes[i] && !list_empty(&connector->modes)) {
+ DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+ connector->name);
+ modes[i] = list_first_entry(&connector->modes,
+ struct drm_display_mode,
+ head);
+ }
+
+ /* last resort: use current mode */
+ if (!modes[i]) {
+ /*
+ * IMPORTANT: We want to use the adjusted mode (i.e.
+ * after the panel fitter upscaling) as the initial
+ * config, not the input mode, which is what crtc->mode
+ * usually contains. But since our current
+ * code puts a mode derived from the post-pfit timings
+ * into crtc->mode this works out correctly.
+ *
+ * This is crtc->mode and not crtc->state->mode for the
+ * fastboot check to work correctly.
+ */
+ DRM_DEBUG_KMS("looking for current mode on connector %s\n",
+ connector->name);
+ modes[i] = &connector->state->crtc->mode;
+ }
+ crtcs[i] = new_crtc;
+
+ DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
+ connector->name,
+ connector->state->crtc->base.id,
+ connector->state->crtc->name,
+ modes[i]->hdisplay, modes[i]->vdisplay,
+ modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+
+ fallback = false;
+ conn_configured |= BIT(i);
+ }
+
+ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ goto retry;
+
+ /*
+ * If the BIOS didn't enable everything it could, fall back to have the
+ * same user experiencing of lighting up as much as possible like the
+ * fbdev helper library.
+ */
+ if (num_connectors_enabled != num_connectors_detected &&
+ num_connectors_enabled < dev->mode_config.num_crtc) {
+ DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
+ DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
+ num_connectors_detected);
+ fallback = true;
+ }
+
+ if (fallback) {
+bail:
+ DRM_DEBUG_KMS("Not using firmware configuration\n");
+ memcpy(enabled, save_enabled, count);
+ ret = false;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ kfree(save_enabled);
+ return ret;
+}
+
static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
u32 width, u32 height)
{
@@ -2593,10 +2780,8 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
DRM_DEBUG_KMS("No connectors reported connected with modes\n");
drm_enable_connectors(fb_helper, enabled);
- if (!(fb_helper->funcs->initial_config &&
- fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
- offsets,
- enabled, width, height))) {
+ if (!drm_fb_helper_firmware_config(fb_helper, crtcs, modes, offsets,
+ enabled, width, height)) {
memset(modes, 0, fb_helper->connector_count*sizeof(modes[0]));
memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0]));
memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0]));
@@ -2780,9 +2965,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
*
* This function will call down into the &drm_fb_helper_funcs.fb_probe callback
* to let the driver allocate and initialize the fbdev info structure and the
- * drm framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
- * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
- * values for the fbdev info structure.
+ * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided
+ * as a helper to setup simple default values for the fbdev info structure.
*
* HANG DEBUGGING:
*
@@ -3024,7 +3208,8 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
- if (!try_module_get(fb_helper->dev->driver->fops->owner))
+ /* No need to take a ref for fbcon because it unbinds on unregister */
+ if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
return -ENODEV;
return 0;
@@ -3034,7 +3219,8 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
- module_put(fb_helper->dev->driver->fops->owner);
+ if (user)
+ module_put(fb_helper->dev->driver->fops->owner);
return 0;
}
@@ -3149,7 +3335,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
if (IS_ERR(fbi))
return PTR_ERR(fbi);
- fbi->par = fb_helper;
fbi->fbops = &drm_fbdev_fb_ops;
fbi->screen_size = fb->height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
@@ -3160,10 +3345,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fix.smem_start =
page_to_phys(virt_to_page(fbi->screen_buffer));
#endif
- strcpy(fbi->fix.id, "DRM emulated");
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, fb_helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, fb_helper, sizes);
if (fb->funcs->dirty) {
struct fb_ops *fbops;
@@ -3317,8 +3499,6 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
return ret;
}
- drm_client_add(&fb_helper->client);
-
if (!preferred_bpp)
preferred_bpp = dev->mode_config.preferred_depth;
if (!preferred_bpp)
@@ -3329,6 +3509,8 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
if (ret)
DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret);
+ drm_client_register(&fb_helper->client);
+
return 0;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 7caa3c7ed978..233f114d2186 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -128,7 +128,6 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
/* for compatibility root is always authenticated */
file->authenticated = capable(CAP_SYS_ADMIN);
- file->lock_count = 0;
INIT_LIST_HEAD(&file->lhead);
INIT_LIST_HEAD(&file->fbs);
@@ -425,30 +424,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return 0;
}
-static void drm_legacy_dev_reinit(struct drm_device *dev)
-{
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- drm_legacy_agp_clear(dev);
-
- drm_legacy_sg_cleanup(dev);
- drm_legacy_vma_flush(dev);
- drm_legacy_dma_takedown(dev);
-
- mutex_unlock(&dev->struct_mutex);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-
- DRM_DEBUG("lastclose completed\n");
-}
-
void drm_lastclose(struct drm_device * dev)
{
DRM_DEBUG("\n");
@@ -577,6 +552,7 @@ put_back_event:
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
+ wake_up_interruptible(&file_priv->event_wait);
break;
}
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
new file mode 100644
index 000000000000..a18da35145b7
--- /dev/null
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_rect.h>
+
+static unsigned int clip_offset(struct drm_rect *clip,
+ unsigned int pitch, unsigned int cpp)
+{
+ return clip->y1 * pitch + clip->x1 * cpp;
+}
+
+/**
+ * drm_fb_memcpy - Copy clip buffer
+ * @dst: Destination buffer
+ * @vaddr: Source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
+ */
+void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y, lines = clip->y2 - clip->y1;
+
+ vaddr += clip_offset(clip, fb->pitches[0], cpp);
+ for (y = 0; y < lines; y++) {
+ memcpy(dst, vaddr, len);
+ vaddr += fb->pitches[0];
+ dst += len;
+ }
+}
+EXPORT_SYMBOL(drm_fb_memcpy);
+
+/**
+ * drm_fb_memcpy_dstclip - Copy clip buffer
+ * @dst: Destination buffer (iomem)
+ * @vaddr: Source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function applies clipping on dst, i.e. the destination is a
+ * full (iomem) framebuffer but only the clip rect content is copied over.
+ */
+void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ unsigned int offset = clip_offset(clip, fb->pitches[0], cpp);
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y, lines = clip->y2 - clip->y1;
+
+ vaddr += offset;
+ dst += offset;
+ for (y = 0; y < lines; y++) {
+ memcpy_toio(dst, vaddr, len);
+ vaddr += fb->pitches[0];
+ dst += fb->pitches[0];
+ }
+}
+EXPORT_SYMBOL(drm_fb_memcpy_dstclip);
+
+/**
+ * drm_fb_swab16 - Swap bytes into clip buffer
+ * @dst: RGB565 destination buffer
+ * @vaddr: RGB565 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ */
+void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t len = (clip->x2 - clip->x1) * sizeof(u16);
+ unsigned int x, y;
+ u16 *src, *buf;
+
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
+ src = buf;
+ for (x = clip->x1; x < clip->x2; x++)
+ *dst++ = swab16(*src++);
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(drm_fb_swab16);
+
+static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
+ unsigned int pixels,
+ bool swab)
+{
+ unsigned int x;
+ u16 val16;
+
+ for (x = 0; x < pixels; x++) {
+ val16 = ((sbuf[x] & 0x00F80000) >> 8) |
+ ((sbuf[x] & 0x0000FC00) >> 5) |
+ ((sbuf[x] & 0x000000F8) >> 3);
+ if (swab)
+ dbuf[x] = swab16(val16);
+ else
+ dbuf[x] = val16;
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
+ * @dst: RGB565 destination buffer
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @swab: Swap bytes
+ *
+ * Drivers can use this function for RGB565 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
+ */
+void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swab)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t src_len = linepixels * sizeof(u32);
+ size_t dst_len = linepixels * sizeof(u16);
+ unsigned y, lines = clip->y2 - clip->y1;
+ void *sbuf;
+
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ sbuf = kmalloc(src_len, GFP_KERNEL);
+ if (!sbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < lines; y++) {
+ memcpy(sbuf, vaddr, src_len);
+ drm_fb_xrgb8888_to_rgb565_line(dst, sbuf, linepixels, swab);
+ vaddr += fb->pitches[0];
+ dst += dst_len;
+ }
+
+ kfree(sbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+
+/**
+ * drm_fb_xrgb8888_to_rgb565_dstclip - Convert XRGB8888 to RGB565 clip buffer
+ * @dst: RGB565 destination buffer (iomem)
+ * @dst_pitch: destination buffer pitch
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @swab: Swap bytes
+ *
+ * Drivers can use this function for RGB565 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function applies clipping on dst, i.e. the destination is a
+ * full (iomem) framebuffer but only the clip rect content is copied over.
+ */
+void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swab)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t dst_len = linepixels * sizeof(u16);
+ unsigned y, lines = clip->y2 - clip->y1;
+ void *dbuf;
+
+ dbuf = kmalloc(dst_len, GFP_KERNEL);
+ if (!dbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ dst += clip_offset(clip, dst_pitch, sizeof(u16));
+ for (y = 0; y < lines; y++) {
+ drm_fb_xrgb8888_to_rgb565_line(dbuf, vaddr, linepixels, swab);
+ memcpy_toio(dst, dbuf, dst_len);
+ vaddr += fb->pitches[0];
+ dst += dst_len;
+ }
+
+ kfree(dbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_dstclip);
+
+static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf,
+ unsigned int pixels)
+{
+ unsigned int x;
+
+ for (x = 0; x < pixels; x++) {
+ *dbuf++ = (sbuf[x] & 0x000000FF) >> 0;
+ *dbuf++ = (sbuf[x] & 0x0000FF00) >> 8;
+ *dbuf++ = (sbuf[x] & 0x00FF0000) >> 16;
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb888_dstclip - Convert XRGB8888 to RGB888 clip buffer
+ * @dst: RGB565 destination buffer (iomem)
+ * @dst_pitch: destination buffer pitch
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for RGB888 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function applies clipping on dst, i.e. the destination is a
+ * full (iomem) framebuffer but only the clip rect content is copied over.
+ */
+void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t dst_len = linepixels * 3;
+ unsigned y, lines = clip->y2 - clip->y1;
+ void *dbuf;
+
+ dbuf = kmalloc(dst_len, GFP_KERNEL);
+ if (!dbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ dst += clip_offset(clip, dst_pitch, sizeof(u16));
+ for (y = 0; y < lines; y++) {
+ drm_fb_xrgb8888_to_rgb888_line(dbuf, vaddr, linepixels);
+ memcpy_toio(dst, dbuf, dst_len);
+ vaddr += fb->pitches[0];
+ dst += dst_len;
+ }
+
+ kfree(dbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_dstclip);
+
+/**
+ * drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
+ * @dst: 8-bit grayscale destination buffer
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drm doesn't have native monochrome or grayscale support.
+ * Such drivers can announce the commonly supported XR24 format to userspace
+ * and use this function to convert to the native format.
+ *
+ * Monochrome drivers will use the most significant bit,
+ * where 1 means foreground color and 0 background color.
+ *
+ * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
+ */
+void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
+ unsigned int x, y;
+ void *buf;
+ u32 *src;
+
+ if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
+ return;
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
+ src = buf;
+ for (x = clip->x1; x < clip->x2; x++) {
+ u8 r = (*src & 0x00ff0000) >> 16;
+ u8 g = (*src & 0x0000ff00) >> 8;
+ u8 b = *src & 0x000000ff;
+
+ /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ *dst++ = (3 * r + 6 * g + b) / 10;
+ src++;
+ }
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
+
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index ba7e19d4336c..6ea55fb4526d 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -198,6 +198,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
@@ -225,7 +229,17 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VUY888, .depth = 0, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y210, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Y212, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Y216, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_XVYU2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XVYU12_16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XVYU16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1,
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
@@ -247,6 +261,19 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_P210, .depth = 0,
+ .num_planes = 2, .char_per_block = { 2, 4, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2,
+ .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VUY101010, .depth = 0,
+ .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1,
+ .is_yuv = true },
+ { .format = DRM_FORMAT_YUV420_8BIT, .depth = 0,
+ .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
+ .is_yuv = true },
+ { .format = DRM_FORMAT_YUV420_10BIT, .depth = 0,
+ .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
+ .is_yuv = true },
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d0b9f6a9953f..50de138c89e0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -72,23 +72,6 @@
* up at a later date, and as our interface with shmfs for memory allocation.
*/
-/*
- * We make up offsets for buffer objects so we can recognize them at
- * mmap time.
- */
-
-/* pgoff in mmap is an unsigned long, so we need to make sure that
- * the faked up offset will fit
- */
-
-#if BITS_PER_LONG == 64
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-#else
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
-#endif
-
/**
* drm_gem_init - Initialize the GEM device fields
* @dev: drm_devic structure to initialize
@@ -171,6 +154,10 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
+ reservation_object_init(&obj->_resv);
+ if (!obj->resv)
+ obj->resv = &obj->_resv;
+
drm_vma_node_reset(&obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -659,6 +646,85 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
}
EXPORT_SYMBOL(drm_gem_put_pages);
+static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
+ struct drm_gem_object **objs)
+{
+ int i, ret = 0;
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ for (i = 0; i < count; i++) {
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle[i]);
+ if (!obj) {
+ ret = -ENOENT;
+ break;
+ }
+ drm_gem_object_get(obj);
+ objs[i] = obj;
+ }
+ spin_unlock(&filp->table_lock);
+
+ return ret;
+}
+
+/**
+ * drm_gem_objects_lookup - look up GEM objects from an array of handles
+ * @filp: DRM file private date
+ * @bo_handles: user pointer to array of userspace handle
+ * @count: size of handle array
+ * @objs_out: returned pointer to array of drm_gem_object pointers
+ *
+ * Takes an array of userspace handles and returns a newly allocated array of
+ * GEM objects.
+ *
+ * For a single handle lookup, use drm_gem_object_lookup().
+ *
+ * Returns:
+ *
+ * @objs filled in with GEM object pointers. Returned GEM objects need to be
+ * released with drm_gem_object_put(). -ENOENT is returned on a lookup
+ * failure. 0 is returned on success.
+ *
+ */
+int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
+ int count, struct drm_gem_object ***objs_out)
+{
+ int ret;
+ u32 *handles;
+ struct drm_gem_object **objs;
+
+ if (!count)
+ return 0;
+
+ objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!objs)
+ return -ENOMEM;
+
+ handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
+ if (!handles) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy in GEM handles\n");
+ goto out;
+ }
+
+ ret = objects_lookup(filp, handles, count, objs);
+ *objs_out = objs;
+
+out:
+ kvfree(handles);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_gem_objects_lookup);
+
/**
* drm_gem_object_lookup - look up a GEM object from its handle
* @filp: DRM file private date
@@ -668,24 +734,56 @@ EXPORT_SYMBOL(drm_gem_put_pages);
*
* A reference to the object named by the handle if such exists on @filp, NULL
* otherwise.
+ *
+ * If looking up an array of handles, use drm_gem_objects_lookup().
*/
struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *filp, u32 handle)
{
+ struct drm_gem_object *obj = NULL;
+
+ objects_lookup(filp, &handle, 1, &obj);
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
+ * shared and/or exclusive fences.
+ * @filep: DRM file private date
+ * @handle: userspace handle
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * Returns:
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than 0 on success.
+ */
+long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+ bool wait_all, unsigned long timeout)
+{
+ long ret;
struct drm_gem_object *obj;
- spin_lock(&filp->table_lock);
+ obj = drm_gem_object_lookup(filep, handle);
+ if (!obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
+ return -EINVAL;
+ }
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj)
- drm_gem_object_get(obj);
+ ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
+ true, timeout);
+ if (ret == 0)
+ ret = -ETIME;
+ else if (ret > 0)
+ ret = 0;
- spin_unlock(&filp->table_lock);
+ drm_gem_object_put_unlocked(obj);
- return obj;
+ return ret;
}
-EXPORT_SYMBOL(drm_gem_object_lookup);
+EXPORT_SYMBOL(drm_gem_reservation_object_wait);
/**
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
@@ -851,6 +949,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
if (obj->filp)
fput(obj->filp);
+ reservation_object_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -1190,3 +1289,174 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
obj->dev->driver->gem_prime_vunmap(obj, vaddr);
}
EXPORT_SYMBOL(drm_gem_vunmap);
+
+/**
+ * drm_gem_lock_reservations - Sets up the ww context and acquires
+ * the lock on an array of GEM objects.
+ *
+ * Once you've locked your reservations, you'll want to set up space
+ * for your shared fences (if applicable), submit your job, then
+ * drm_gem_unlock_reservations().
+ *
+ * @objs: drm_gem_objects to lock
+ * @count: Number of objects in @objs
+ * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
+ * part of tracking this set of locked reservations.
+ */
+int
+drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int contended = -1;
+ int i, ret;
+
+ ww_acquire_init(acquire_ctx, &reservation_ww_class);
+
+retry:
+ if (contended != -1) {
+ struct drm_gem_object *obj = objs[contended];
+
+ ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ if (i == contended)
+ continue;
+
+ ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ ww_mutex_unlock(&objs[j]->resv->lock);
+
+ if (contended != -1 && contended >= i)
+ ww_mutex_unlock(&objs[contended]->resv->lock);
+
+ if (ret == -EDEADLK) {
+ contended = i;
+ goto retry;
+ }
+
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ ww_acquire_done(acquire_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_lock_reservations);
+
+void
+drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ ww_mutex_unlock(&objs[i]->resv->lock);
+
+ ww_acquire_fini(acquire_ctx);
+}
+EXPORT_SYMBOL(drm_gem_unlock_reservations);
+
+/**
+ * drm_gem_fence_array_add - Adds the fence to an array of fences to be
+ * waited on, deduplicating fences from the same context.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_gem_fence_array_add(struct xarray *fence_array,
+ struct dma_fence *fence)
+{
+ struct dma_fence *entry;
+ unsigned long index;
+ u32 id = 0;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ /* Deduplicate if we already depend on a fence from the same context.
+ * This lets the size of the array of deps scale with the number of
+ * engines involved, rather than the number of BOs.
+ */
+ xa_for_each(fence_array, index, entry) {
+ if (entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ xa_store(fence_array, index, fence, GFP_KERNEL);
+ } else {
+ dma_fence_put(fence);
+ }
+ return 0;
+ }
+
+ ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
+ if (ret != 0)
+ dma_fence_put(fence);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add);
+
+/**
+ * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
+ * in the GEM object's reservation object to an array of dma_fences for use in
+ * scheduling a rendering job.
+ *
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
+ */
+int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ int ret;
+ struct dma_fence **fences;
+ unsigned int i, fence_count;
+
+ if (!write) {
+ struct dma_fence *fence =
+ reservation_object_get_excl_rcu(obj->resv);
+
+ return drm_gem_fence_array_add(fence_array, fence);
+ }
+
+ ret = reservation_object_get_fences_rcu(obj->resv, NULL,
+ &fence_count, &fences);
+ if (ret || !fence_count)
+ return ret;
+
+ for (i = 0; i < fence_count; i++) {
+ ret = drm_gem_fence_array_add(fence_array, fences[i]);
+ if (ret)
+ break;
+ }
+
+ for (; i < fence_count; i++)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index cc26625b4b33..e01ceed09e67 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -186,13 +186,13 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
cma_obj = to_drm_gem_cma_obj(gem_obj);
- if (cma_obj->vaddr) {
- dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
- } else if (gem_obj->import_attach) {
+ if (gem_obj->import_attach) {
if (cma_obj->vaddr)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
+ } else if (cma_obj->vaddr) {
+ dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
}
drm_gem_object_release(gem_obj);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
new file mode 100644
index 000000000000..1ee208c2c85e
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers for GEM objects backed by shmem buffers
+ * allocated using anonymous pageable memory.
+ */
+
+static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
+ .free = drm_gem_shmem_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * drm_gem_shmem_create - Allocate an object with the given size
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This function creates a shmem GEM object.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+
+ if (dev->driver->gem_create_object)
+ obj = dev->driver->gem_create_object(dev, size);
+ else
+ obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ if (!obj->funcs)
+ obj->funcs = &drm_gem_shmem_funcs;
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto err_free;
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto err_release;
+
+ shmem = to_drm_gem_shmem_obj(obj);
+ mutex_init(&shmem->pages_lock);
+ mutex_init(&shmem->vmap_lock);
+
+ /*
+ * Our buffers are kept pinned, so allocating them
+ * from the MOVABLE zone is a really bad idea, and
+ * conflicts with CMA. See comments above new_inode()
+ * why this is required _and_ expected if you're
+ * going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+
+ return shmem;
+
+err_release:
+ drm_gem_object_release(obj);
+err_free:
+ kfree(obj);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
+
+/**
+ * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
+ * @obj: GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself.
+ */
+void drm_gem_shmem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ WARN_ON(shmem->vmap_use_count);
+
+ if (obj->import_attach) {
+ shmem->pages_use_count--;
+ drm_prime_gem_destroy(obj, shmem->sgt);
+ kvfree(shmem->pages);
+ } else {
+ if (shmem->sgt) {
+ dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
+ shmem->sgt->nents, DMA_BIDIRECTIONAL);
+
+ drm_gem_shmem_put_pages(shmem);
+ sg_free_table(shmem->sgt);
+ kfree(shmem->sgt);
+ }
+ }
+
+ WARN_ON(shmem->pages_use_count);
+
+ drm_gem_object_release(obj);
+ mutex_destroy(&shmem->pages_lock);
+ mutex_destroy(&shmem->vmap_lock);
+ kfree(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
+
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ struct page **pages;
+
+ if (shmem->pages_use_count++ > 0)
+ return 0;
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages)) {
+ DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
+ shmem->pages_use_count = 0;
+ return PTR_ERR(pages);
+ }
+
+ shmem->pages = pages;
+
+ return 0;
+}
+
+/*
+ * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that backing pages exists for the shmem GEM object
+ * and increases the use count.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&shmem->pages_lock);
+ if (ret)
+ return ret;
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ mutex_unlock(&shmem->pages_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_shmem_get_pages);
+
+static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+
+ if (WARN_ON_ONCE(!shmem->pages_use_count))
+ return;
+
+ if (--shmem->pages_use_count > 0)
+ return;
+
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+}
+
+/*
+ * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function decreases the use count and puts the backing pages when use drops to zero.
+ */
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+{
+ mutex_lock(&shmem->pages_lock);
+ drm_gem_shmem_put_pages_locked(shmem);
+ mutex_unlock(&shmem->pages_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+
+/**
+ * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function makes sure the backing pages are pinned in memory while the
+ * buffer is exported.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_pin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_get_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_pin);
+
+/**
+ * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function removes the requirement that the backing pages are pinned in
+ * memory.
+ */
+void drm_gem_shmem_unpin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_put_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_unpin);
+
+static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ int ret;
+
+ if (shmem->vmap_use_count++ > 0)
+ return shmem->vaddr;
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ goto err_zero_use;
+
+ if (obj->import_attach)
+ shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ else
+ shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
+
+ if (!shmem->vaddr) {
+ DRM_DEBUG_KMS("Failed to vmap pages\n");
+ ret = -ENOMEM;
+ goto err_put_pages;
+ }
+
+ return shmem->vaddr;
+
+err_put_pages:
+ drm_gem_shmem_put_pages(shmem);
+err_zero_use:
+ shmem->vmap_use_count = 0;
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that a virtual address exists for the buffer backing
+ * the shmem GEM object.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ void *vaddr;
+ int ret;
+
+ ret = mutex_lock_interruptible(&shmem->vmap_lock);
+ if (ret)
+ return ERR_PTR(ret);
+ vaddr = drm_gem_shmem_vmap_locked(shmem);
+ mutex_unlock(&shmem->vmap_lock);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_shmem_vmap);
+
+static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+
+ if (WARN_ON_ONCE(!shmem->vmap_use_count))
+ return;
+
+ if (--shmem->vmap_use_count > 0)
+ return;
+
+ if (obj->import_attach)
+ dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
+ else
+ vunmap(shmem->vaddr);
+
+ shmem->vaddr = NULL;
+ drm_gem_shmem_put_pages(shmem);
+}
+
+/*
+ * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function removes the virtual address when use count drops to zero.
+ */
+void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ mutex_lock(&shmem->vmap_lock);
+ drm_gem_shmem_vunmap_locked(shmem);
+ mutex_unlock(&shmem->vmap_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+
+struct drm_gem_shmem_object *
+drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *dev, size_t size,
+ uint32_t *handle)
+{
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return shmem;
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put_unlocked(&shmem->base);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return shmem;
+}
+EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
+
+/**
+ * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
+ * @file: DRM file structure to create the dumb buffer for
+ * @dev: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their &drm_driver.dumb_create callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace before calling into this function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct drm_gem_shmem_object *shmem;
+
+ if (!args->pitch || !args->size) {
+ args->pitch = min_pitch;
+ args->size = args->pitch * args->height;
+ } else {
+ /* ensure sane minimum values */
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+ }
+
+ shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
+
+ return PTR_ERR_OR_ZERO(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
+
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ loff_t num_pages = obj->size >> PAGE_SHIFT;
+ struct page *page;
+
+ if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
+ return VM_FAULT_SIGBUS;
+
+ page = shmem->pages[vmf->pgoff];
+
+ return vmf_insert_page(vma, vmf->address, page);
+}
+
+static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ int ret;
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ WARN_ON_ONCE(ret != 0);
+
+ drm_gem_vm_open(vma);
+}
+
+static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_put_pages(shmem);
+ drm_gem_vm_close(vma);
+}
+
+const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+ .fault = drm_gem_shmem_fault,
+ .open = drm_gem_shmem_vm_open,
+ .close = drm_gem_shmem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
+
+/**
+ * drm_gem_shmem_mmap - Memory-map a shmem GEM object
+ * @filp: File object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function implements an augmented version of the GEM DRM file mmap
+ * operation for shmem objects. Drivers which employ the shmem helpers should
+ * use this function as their &file_operations.mmap handler in the DRM device file's
+ * file_operations structure.
+ *
+ * Instead of directly referencing this function, drivers should use the
+ * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret) {
+ drm_gem_vm_close(vma);
+ return ret;
+ }
+
+ /* VM_PFNMAP was set by drm_gem_mmap() */
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
+
+/**
+ * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ */
+void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
+}
+EXPORT_SYMBOL(drm_gem_shmem_print_info);
+
+/**
+ * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
+ * pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function exports a scatter/gather table suitable for PRIME usage by
+ * calling the standard DMA mapping API.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
+
+/**
+ * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
+ * scatter/gather table for a shmem GEM object.
+ * @obj: GEM object
+ *
+ * This function returns a scatter/gather table suitable for driver usage. If
+ * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
+ * table created.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or errno on failure.
+ */
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct sg_table *sgt;
+
+ if (shmem->sgt)
+ return shmem->sgt;
+
+ WARN_ON(obj->import_attach);
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ return ERR_PTR(ret);
+
+ sgt = drm_gem_shmem_get_sg_table(&shmem->base);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_put_pages;
+ }
+ /* Map the pages for use by the h/w. */
+ dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+
+ shmem->sgt = sgt;
+
+ return sgt;
+
+err_put_pages:
+ drm_gem_shmem_put_pages(shmem);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
+
+/**
+ * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
+ * another driver's scatter/gather table of pinned pages
+ * @dev: Device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Drivers that use the shmem helpers should set this as their
+ * &drm_driver.gem_prime_import_sg_table callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ size_t size = PAGE_ALIGN(attach->dmabuf->size);
+ size_t npages = size >> PAGE_SHIFT;
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!shmem->pages) {
+ ret = -ENOMEM;
+ goto err_free_gem;
+ }
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
+ if (ret < 0)
+ goto err_free_array;
+
+ shmem->sgt = sgt;
+ shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
+
+ DRM_DEBUG_PRIME("size = %zu\n", size);
+
+ return &shmem->base;
+
+err_free_array:
+ kvfree(shmem->pages);
+err_free_gem:
+ drm_gem_object_put_unlocked(&shmem->base);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 251d67e04c2d..e19ac7ca602d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -71,8 +71,10 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
/* drm_irq.c */
/* IOCTLS */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#endif
int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -180,12 +182,20 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
/* drm_framebuffer.c */
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 0e3043e08c69..374b372da58a 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -156,6 +156,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
return -EINVAL;
}
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_map32 {
u32 offset; /* Requested physical address (0 for SAREA) */
u32 size; /* Requested physical size (bytes) */
@@ -239,6 +240,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
map.handle = compat_ptr(handle);
return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
}
+#endif
typedef struct drm_client32 {
int idx; /* Which client desired? */
@@ -301,6 +303,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_buf_desc32 {
int count; /* Number of buffers of this size */
int size; /* Size in bytes */
@@ -604,6 +607,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
return 0;
}
+#endif
#if IS_ENABLED(CONFIG_AGP)
typedef struct drm_agp_mode32 {
@@ -748,6 +752,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
}
#endif /* CONFIG_AGP */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_scatter_gather32 {
u32 size; /**< In bytes -- will round to page boundary */
u32 handle; /**< Used for mapping / unmapping */
@@ -788,7 +793,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
-
+#endif
#if defined(CONFIG_X86)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
@@ -903,10 +908,13 @@ static struct {
#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
+#endif
DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
@@ -918,6 +926,7 @@ static struct {
DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
+#endif
#if IS_ENABLED(CONFIG_AGP)
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
@@ -926,8 +935,10 @@ static struct {
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
#endif
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
+#endif
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 687943df58e1..2263e3ddd822 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -245,6 +245,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
case DRM_CAP_SYNCOBJ:
req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ);
return 0;
+ case DRM_CAP_SYNCOBJ_TIMELINE:
+ req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE);
+ return 0;
}
/* Other caps only work with KMS drivers */
@@ -508,13 +511,6 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
-static inline bool
-drm_render_driver_and_ioctl(const struct drm_device *dev, u32 flags)
-{
- return drm_core_check_feature(dev, DRIVER_RENDER) &&
- (flags & DRM_RENDER_ALLOW);
-}
-
/**
* drm_ioctl_permit - Check ioctl permissions against caller
*
@@ -529,19 +525,14 @@ drm_render_driver_and_ioctl(const struct drm_device *dev, u32 flags)
*/
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
- const struct drm_device *dev = file_priv->minor->dev;
-
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
return -EACCES;
- /* AUTH is only for master ... */
- if (unlikely((flags & DRM_AUTH) && drm_is_primary_client(file_priv))) {
- /* authenticated ones, or render capable on DRM_RENDER_ALLOW. */
- if (!file_priv->authenticated &&
- !drm_render_driver_and_ioctl(dev, flags))
- return -EACCES;
- }
+ /* AUTH is only for authenticated or render client */
+ if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
+ !file_priv->authenticated))
+ return -EACCES;
/* MASTER is only for master or control clients */
if (unlikely((flags & DRM_MASTER) &&
@@ -565,6 +556,12 @@ EXPORT_SYMBOL(drm_ioctl_permit);
.name = #ioctl \
}
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags)
+#else
+#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags)
+#endif
+
/* Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
@@ -572,7 +569,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
+
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
+
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
@@ -584,39 +583,38 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if IS_ENABLED(CONFIG_AGP)
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -629,8 +627,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
@@ -686,12 +684,20 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 9bd8908d5fd8..02f38cc9f468 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -213,6 +213,7 @@ int drm_irq_uninstall(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_irq_uninstall);
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -253,3 +254,4 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data,
return -EINVAL;
}
}
+#endif
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 93e2b30fe1a5..9c5ae825c507 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -39,7 +39,7 @@ MODULE_LICENSE("GPL and additional rights");
/* Backward compatibility for drm_kms_helper.edid_firmware */
static int edid_firmware_set(const char *val, const struct kernel_param *kp)
{
- DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware intead.\n");
+ DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
return __drm_set_edid_firmware_path(val);
}
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 603b0bd9c5ce..694ff363a90b 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -111,7 +111,7 @@ static bool _drm_has_leased(struct drm_master *master, int id)
*/
bool _drm_lease_held(struct drm_file *file_priv, int id)
{
- if (file_priv == NULL || file_priv->master == NULL)
+ if (!file_priv || !file_priv->master)
return true;
return _drm_lease_held_master(file_priv->master, id);
@@ -133,7 +133,7 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
struct drm_master *master;
bool ret;
- if (file_priv == NULL || file_priv->master == NULL)
+ if (!file_priv || !file_priv->master || !file_priv->master->lessor)
return true;
master = file_priv->master;
@@ -159,7 +159,7 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
int count_in, count_out;
uint32_t crtcs_out = 0;
- if (file_priv == NULL || file_priv->master == NULL)
+ if (!file_priv || !file_priv->master || !file_priv->master->lessor)
return crtcs_in;
master = file_priv->master;
@@ -220,8 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
error = 0;
if (!idr_find(&dev->mode_config.object_idr, object))
error = -ENOENT;
- else if (!_drm_lease_held_master(lessor, object))
- error = -EACCES;
else if (_drm_has_leased(lessor, object))
error = -EBUSY;
@@ -403,11 +401,6 @@ static int fill_object_idr(struct drm_device *dev,
/* step one - get references to all the mode objects
and check for validity. */
for (o = 0; o < object_count; o++) {
- if ((int) object_ids[o] < 0) {
- ret = -EINVAL;
- goto out_free_objects;
- }
-
objects[o] = drm_mode_object_find(dev, lessor_priv,
object_ids[o],
DRM_MODE_OBJECT_ANY);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 280fbeb846ff..51f1fabfa145 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -42,11 +42,19 @@ struct drm_file;
#define DRM_KERNEL_CONTEXT 0
#define DRM_RESERVED_CONTEXTS 1
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_ctxbitmap_init(struct drm_device *dev);
void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
+#else
+static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {}
+static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {}
+static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {}
+#endif
+void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
@@ -56,6 +64,7 @@ int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
+#endif
/*
* Generic Buffer Management
@@ -63,16 +72,39 @@ int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
#define DRM_MAP_HASH_OFFSET 0x10000000
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+static inline int drm_legacy_create_map_hash(struct drm_device *dev)
+{
+ return drm_ht_create(&dev->map_hash, 12);
+}
+
+static inline void drm_legacy_remove_map_hash(struct drm_device *dev)
+{
+ drm_ht_remove(&dev->map_hash);
+}
+#else
+static inline int drm_legacy_create_map_hash(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {}
+#endif
+
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+
int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+#endif
int __drm_legacy_infobufs(struct drm_device *, void *, int *,
int (*)(void *, int, struct drm_buf_entry *));
@@ -81,7 +113,17 @@ int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
int (*)(void *, int, unsigned long, struct drm_buf *),
struct drm_file *);
-#ifdef CONFIG_DRM_VM
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_legacy_master_rmmaps(struct drm_device *dev,
+ struct drm_master *master);
+void drm_legacy_rmmaps(struct drm_device *dev);
+#else
+static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
+ struct drm_master *master) {}
+static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_VM) && IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *d);
#else
static inline void drm_legacy_vma_flush(struct drm_device *d)
@@ -103,23 +145,64 @@ struct drm_agp_mem {
};
/* drm_lock.c */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
+#else
+static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {}
+#endif
/* DMA support */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_dma_setup(struct drm_device *dev);
void drm_legacy_dma_takedown(struct drm_device *dev);
+#else
+static inline int drm_legacy_dma_setup(struct drm_device *dev)
+{
+ return 0;
+}
+#endif
+
void drm_legacy_free_buffer(struct drm_device *dev,
struct drm_buf * buf);
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_reclaim_buffers(struct drm_device *dev,
struct drm_file *filp);
+#else
+static inline void drm_legacy_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *filp) {}
+#endif
/* Scatter Gather Support */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_sg_cleanup(struct drm_device *dev);
int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_legacy_init_members(struct drm_device *dev);
+void drm_legacy_destroy_members(struct drm_device *dev);
+void drm_legacy_dev_reinit(struct drm_device *dev);
+#else
+static inline void drm_legacy_init_members(struct drm_device *dev) {}
+static inline void drm_legacy_destroy_members(struct drm_device *dev) {}
+static inline void drm_legacy_dev_reinit(struct drm_device *dev) {}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master);
+#else
+static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_master_legacy_init(struct drm_master *master);
+#else
+static inline void drm_master_legacy_init(struct drm_master *master) {}
+#endif
#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
new file mode 100644
index 000000000000..2fe786839ca8
--- /dev/null
+++ b/drivers/gpu/drm/drm_legacy_misc.c
@@ -0,0 +1,82 @@
+/**
+ * \file drm_legacy_misc.c
+ * Misc legacy support functions.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include "drm_internal.h"
+#include "drm_legacy.h"
+
+void drm_legacy_init_members(struct drm_device *dev)
+{
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+ spin_lock_init(&dev->buf_lock);
+ mutex_init(&dev->ctxlist_mutex);
+}
+
+void drm_legacy_destroy_members(struct drm_device *dev)
+{
+ mutex_destroy(&dev->ctxlist_mutex);
+}
+
+void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+
+ mutex_lock(&dev->struct_mutex);
+
+ drm_legacy_agp_clear(dev);
+
+ drm_legacy_sg_cleanup(dev);
+ drm_legacy_vma_flush(dev);
+ drm_legacy_dma_takedown(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ dev->sigdata.lock = NULL;
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+
+ DRM_DEBUG("lastclose completed\n");
+}
+
+void drm_master_legacy_init(struct drm_master *master)
+{
+ spin_lock_init(&master->lock.spinlock);
+ init_waitqueue_head(&master->lock.lock_queue);
+}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 67a1a2ca7174..b70058e77a28 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -347,3 +347,22 @@ void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
}
+
+void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
+{
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
+ return;
+
+ /*
+ * Since the master is disappearing, so is the
+ * possibility to lock.
+ */ mutex_lock(&dev->struct_mutex);
+ if (master->lock.hw_lock) {
+ if (dev->sigdata.lock == master->lock.hw_lock)
+ dev->sigdata.lock = NULL;
+ master->lock.hw_lock = NULL;
+ master->lock.file_priv = NULL;
+ wake_up_interruptible_all(&master->lock.lock_queue);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 40c4349cb939..132fef8ff1b6 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -35,6 +35,7 @@
#include <linux/highmem.h>
#include <linux/export.h>
+#include <xen/xen.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
@@ -150,15 +151,34 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
-u64 drm_get_max_iomem(void)
+bool drm_need_swiotlb(int dma_bits)
{
struct resource *tmp;
resource_size_t max_iomem = 0;
+ /*
+ * Xen paravirtual hosts require swiotlb regardless of requested dma
+ * transfer size.
+ *
+ * NOTE: Really, what it requires is use of the dma_alloc_coherent
+ * allocator used in ttm_dma_populate() instead of
+ * ttm_populate_and_map_pages(), which bounce buffers so much in
+ * Xen it leads to swiotlb buffer exhaustion.
+ */
+ if (xen_pv_domain())
+ return true;
+
+ /*
+ * Enforce dma_alloc_coherent when memory encryption is active as well
+ * for the same reasons as for Xen paravirtual hosts.
+ */
+ if (mem_encrypt_active())
+ return true;
+
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
max_iomem = max(max_iomem, tmp->end);
}
- return max_iomem;
+ return max_iomem > ((u64)1 << dma_bits);
}
-EXPORT_SYMBOL(drm_get_max_iomem);
+EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2b4f373736c7..8b4cd31ce7bd 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -106,25 +106,19 @@
static noinline void save_stack(struct drm_mm_node *node)
{
unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = STACKDEPTH,
- .skip = 1
- };
+ unsigned int n;
- save_stack_trace(&trace);
- if (trace.nr_entries != 0 &&
- trace.entries[trace.nr_entries-1] == ULONG_MAX)
- trace.nr_entries--;
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
/* May be called under spinlock, so avoid sleeping */
- node->stack = depot_save_stack(&trace, GFP_NOWAIT);
+ node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
}
static void show_leaks(struct drm_mm *mm)
{
struct drm_mm_node *node;
- unsigned long entries[STACKDEPTH];
+ unsigned long *entries;
+ unsigned int nr_entries;
char *buf;
buf = kmalloc(BUFSZ, GFP_KERNEL);
@@ -132,19 +126,14 @@ static void show_leaks(struct drm_mm *mm)
return;
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = STACKDEPTH
- };
-
if (!node->stack) {
DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
node->start, node->size);
continue;
}
- depot_fetch_stack(node->stack, &trace);
- snprint_stack_trace(buf, BUFSZ, &trace, 0);
+ nr_entries = stack_depot_fetch(node->stack, &entries);
+ stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
node->start, node->size, buf);
}
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 4a1c2023ccf0..1a346ae1599d 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -297,8 +297,9 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
- prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS",
- 0);
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+ "FB_DAMAGE_CLIPS", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_fb_damage_clips = prop;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index a9005c1c2384..f32507e65b79 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -451,6 +451,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
}
static int set_property_atomic(struct drm_mode_object *obj,
+ struct drm_file *file_priv,
struct drm_property *prop,
uint64_t prop_value)
{
@@ -477,7 +478,7 @@ retry:
obj_to_connector(obj),
prop_value);
} else {
- ret = drm_atomic_set_property(state, obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value);
if (ret)
goto out;
ret = drm_atomic_commit(state);
@@ -520,7 +521,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
goto out_unref;
if (drm_drv_uses_atomic_modeset(property->dev))
- ret = set_property_atomic(arg_obj, property, arg->value);
+ ret = set_property_atomic(arg_obj, file_priv, property, arg->value);
else
ret = set_property_legacy(arg_obj, property, arg->value);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 869ac6f4671e..56f92a0bba62 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -655,22 +655,22 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
* @bus_flags: information about pixelclk, sync and DE polarity will be stored
* here
*
- * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and
- * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_DRIVE_(POS|NEG)EDGE
+ * and DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
* found in @vm
*/
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
{
*bus_flags = 0;
if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
- *bus_flags |= DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
- *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
- *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE;
+ *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE)
- *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE;
+ *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
if (vm->flags & DISPLAY_FLAGS_DE_LOW)
*bus_flags |= DRM_BUS_FLAG_DE_LOW;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 52e445bb1aa5..521aff99b08a 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -80,6 +80,12 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
+ .width = 1200,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -148,6 +154,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo Ideapad D330 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 4cfb56893b7f..d6ad60ab0d38 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -960,6 +960,11 @@ retry:
if (ret)
goto out;
+ if (!drm_lease_held(file_priv, crtc->cursor->base.id)) {
+ ret = -EACCES;
+ goto out;
+ }
+
ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx);
goto out;
}
@@ -1062,6 +1067,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
plane = crtc->primary;
+ if (!drm_lease_held(file_priv, plane->base.id))
+ return -EACCES;
+
if (crtc->funcs->page_flip_target) {
u32 current_vblank;
int r;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 231e3f6d5f41..dc079efb3b0f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -504,6 +504,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
.size = obj->size,
.flags = flags,
.priv = obj,
+ .resv = obj->resv,
};
if (dev->driver->gem_prime_res_obj)
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 0e7fc3e7dfb4..f5cb0aabfe35 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -253,3 +253,31 @@ void drm_err(const char *format, ...)
va_end(args);
}
EXPORT_SYMBOL(drm_err);
+
+/**
+ * drm_print_regset32 - print the contents of registers to a
+ * &drm_printer stream.
+ *
+ * @p: the &drm printer
+ * @regset: the list of registers to print.
+ *
+ * Often in driver debug, it's useful to be able to either capture the
+ * contents of registers in the steady state using debugfs or at
+ * specific points during operation. This lets the driver have a
+ * single list of registers for both.
+ */
+void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset)
+{
+ int namelen = 0;
+ int i;
+
+ for (i = 0; i < regset->nregs; i++)
+ namelen = max(namelen, (int)strlen(regset->regs[i].name));
+
+ for (i = 0; i < regset->nregs; i++) {
+ drm_printf(p, "%*s = 0x%08x\n",
+ namelen, regset->regs[i].name,
+ readl(regset->base + regset->regs[i].offset));
+ }
+}
+EXPORT_SYMBOL(drm_print_regset32);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index e19525af0cce..3d400905100b 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -61,6 +61,7 @@ struct syncobj_wait_entry {
struct task_struct *task;
struct dma_fence *fence;
struct dma_fence_cb fence_cb;
+ u64 point;
};
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
@@ -95,6 +96,8 @@ EXPORT_SYMBOL(drm_syncobj_find);
static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
+ struct dma_fence *fence;
+
if (wait->fence)
return;
@@ -103,11 +106,15 @@ static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
* have the lock, try one more time just to be sure we don't add a
* callback when a fence has already been set.
*/
- if (syncobj->fence)
- wait->fence = dma_fence_get(
- rcu_dereference_protected(syncobj->fence, 1));
- else
+ fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+ if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+ dma_fence_put(fence);
list_add_tail(&wait->node, &syncobj->cb_list);
+ } else if (!fence) {
+ wait->fence = dma_fence_get_stub();
+ } else {
+ wait->fence = fence;
+ }
spin_unlock(&syncobj->lock);
}
@@ -123,6 +130,44 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
}
/**
+ * drm_syncobj_add_point - add new timeline point to the syncobj
+ * @syncobj: sync object to add timeline point do
+ * @chain: chain node to use to add the point
+ * @fence: fence to encapsulate in the chain node
+ * @point: sequence number to use for the point
+ *
+ * Add the chain node as new timeline point to the syncobj.
+ */
+void drm_syncobj_add_point(struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain,
+ struct dma_fence *fence,
+ uint64_t point)
+{
+ struct syncobj_wait_entry *cur, *tmp;
+ struct dma_fence *prev;
+
+ dma_fence_get(fence);
+
+ spin_lock(&syncobj->lock);
+
+ prev = drm_syncobj_fence_get(syncobj);
+ /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
+ if (prev && prev->seqno >= point)
+ DRM_ERROR("You are adding an unorder point to timeline!\n");
+ dma_fence_chain_init(chain, prev, fence, point);
+ rcu_assign_pointer(syncobj->fence, &chain->base);
+
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
+ syncobj_wait_syncobj_func(syncobj, cur);
+ spin_unlock(&syncobj->lock);
+
+ /* Walk the chain once to trigger garbage collection */
+ dma_fence_chain_for_each(fence, prev);
+ dma_fence_put(prev);
+}
+EXPORT_SYMBOL(drm_syncobj_add_point);
+
+/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
* @fence: fence to install in sync file.
@@ -145,10 +190,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
rcu_assign_pointer(syncobj->fence, fence);
if (fence != old_fence) {
- list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
- list_del_init(&cur->node);
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
syncobj_wait_syncobj_func(syncobj, cur);
- }
}
spin_unlock(&syncobj->lock);
@@ -171,6 +214,8 @@ static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
dma_fence_put(fence);
}
+/* 5s default for wait submission */
+#define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
/**
* drm_syncobj_find_fence - lookup and reference the fence in a sync object
* @file_private: drm file private pointer
@@ -191,16 +236,58 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
- int ret = 0;
+ struct syncobj_wait_entry wait;
+ u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
+ int ret;
if (!syncobj)
return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj);
- if (!*fence) {
+ drm_syncobj_put(syncobj);
+
+ if (*fence) {
+ ret = dma_fence_chain_find_seqno(fence, point);
+ if (!ret)
+ return 0;
+ dma_fence_put(*fence);
+ } else {
ret = -EINVAL;
}
- drm_syncobj_put(syncobj);
+
+ if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ return ret;
+
+ memset(&wait, 0, sizeof(wait));
+ wait.task = current;
+ wait.point = point;
+ drm_syncobj_fence_add_wait(syncobj, &wait);
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (wait.fence) {
+ ret = 0;
+ break;
+ }
+ if (timeout == 0) {
+ ret = -ETIME;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ timeout = schedule_timeout(timeout);
+ } while (1);
+
+ __set_current_state(TASK_RUNNING);
+ *fence = wait.fence;
+
+ if (wait.node.next)
+ drm_syncobj_remove_wait(syncobj, &wait);
+
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
@@ -388,20 +475,19 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
int fd, u32 *handle)
{
struct drm_syncobj *syncobj;
- struct file *file;
+ struct fd f = fdget(fd);
int ret;
- file = fget(fd);
- if (!file)
+ if (!f.file)
return -EINVAL;
- if (file->f_op != &drm_syncobj_file_fops) {
- fput(file);
+ if (f.file->f_op != &drm_syncobj_file_fops) {
+ fdput(f);
return -EINVAL;
}
/* take a reference to put in the idr */
- syncobj = file->private_data;
+ syncobj = f.file->private_data;
drm_syncobj_get(syncobj);
idr_preload(GFP_KERNEL);
@@ -416,7 +502,7 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
} else
drm_syncobj_put(syncobj);
- fput(file);
+ fdput(f);
return ret;
}
@@ -593,6 +679,80 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
&args->handle);
}
+static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
+ struct drm_syncobj_transfer *args)
+{
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence *fence;
+ struct dma_fence_chain *chain;
+ int ret;
+
+ timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+ if (!timeline_syncobj) {
+ return -ENOENT;
+ }
+ ret = drm_syncobj_find_fence(file_private, args->src_handle,
+ args->src_point, args->flags,
+ &fence);
+ if (ret)
+ goto err;
+ chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+ if (!chain) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
+err1:
+ dma_fence_put(fence);
+err:
+ drm_syncobj_put(timeline_syncobj);
+
+ return ret;
+}
+
+static int
+drm_syncobj_transfer_to_binary(struct drm_file *file_private,
+ struct drm_syncobj_transfer *args)
+{
+ struct drm_syncobj *binary_syncobj = NULL;
+ struct dma_fence *fence;
+ int ret;
+
+ binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+ if (!binary_syncobj)
+ return -ENOENT;
+ ret = drm_syncobj_find_fence(file_private, args->src_handle,
+ args->src_point, args->flags, &fence);
+ if (ret)
+ goto err;
+ drm_syncobj_replace_fence(binary_syncobj, fence);
+ dma_fence_put(fence);
+err:
+ drm_syncobj_put(binary_syncobj);
+
+ return ret;
+}
+int
+drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_transfer *args = data;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->dst_point)
+ ret = drm_syncobj_transfer_to_timeline(file_private, args);
+ else
+ ret = drm_syncobj_transfer_to_binary(file_private, args);
+
+ return ret;
+}
+
static void syncobj_wait_fence_func(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
@@ -605,13 +765,27 @@ static void syncobj_wait_fence_func(struct dma_fence *fence,
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
+ struct dma_fence *fence;
+
/* This happens inside the syncobj lock */
- wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
- lockdep_is_held(&syncobj->lock)));
+ fence = rcu_dereference_protected(syncobj->fence,
+ lockdep_is_held(&syncobj->lock));
+ dma_fence_get(fence);
+ if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+ dma_fence_put(fence);
+ return;
+ } else if (!fence) {
+ wait->fence = dma_fence_get_stub();
+ } else {
+ wait->fence = fence;
+ }
+
wake_up_process(wait->task);
+ list_del_init(&wait->node);
}
static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ void __user *user_points,
uint32_t count,
uint32_t flags,
signed long timeout,
@@ -619,12 +793,27 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
{
struct syncobj_wait_entry *entries;
struct dma_fence *fence;
+ uint64_t *points;
uint32_t signaled_count, i;
- entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
- if (!entries)
+ points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
+ if (points == NULL)
return -ENOMEM;
+ if (!user_points) {
+ memset(points, 0, count * sizeof(uint64_t));
+
+ } else if (copy_from_user(points, user_points,
+ sizeof(uint64_t) * count)) {
+ timeout = -EFAULT;
+ goto err_free_points;
+ }
+
+ entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+ if (!entries) {
+ timeout = -ENOMEM;
+ goto err_free_points;
+ }
/* Walk the list of sync objects and initialize entries. We do
* this up-front so that we can properly return -EINVAL if there is
* a syncobj with a missing fence and then never have the chance of
@@ -632,9 +821,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
*/
signaled_count = 0;
for (i = 0; i < count; ++i) {
+ struct dma_fence *fence;
+
entries[i].task = current;
- entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
- if (!entries[i].fence) {
+ entries[i].point = points[i];
+ fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+ dma_fence_put(fence);
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
continue;
} else {
@@ -643,7 +836,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
}
}
- if (dma_fence_is_signaled(entries[i].fence)) {
+ if (fence)
+ entries[i].fence = fence;
+ else
+ entries[i].fence = dma_fence_get_stub();
+
+ if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+ dma_fence_is_signaled(entries[i].fence)) {
if (signaled_count == 0 && idx)
*idx = i;
signaled_count++;
@@ -676,7 +875,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (!fence)
continue;
- if (dma_fence_is_signaled(fence) ||
+ if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+ dma_fence_is_signaled(fence) ||
(!entries[i].fence_cb.func &&
dma_fence_add_callback(fence,
&entries[i].fence_cb,
@@ -721,6 +921,9 @@ cleanup_entries:
}
kfree(entries);
+err_free_points:
+ kfree(points);
+
return timeout;
}
@@ -731,7 +934,7 @@ cleanup_entries:
*
* Calculate the timeout in jiffies from an absolute time in sec/nsec.
*/
-static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
+signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
{
ktime_t abs_timeout, now;
u64 timeout_ns, timeout_jiffies64;
@@ -755,23 +958,38 @@ static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
return timeout_jiffies64 + 1;
}
+EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
static int drm_syncobj_array_wait(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
- struct drm_syncobj **syncobjs)
+ struct drm_syncobj_timeline_wait *timeline_wait,
+ struct drm_syncobj **syncobjs, bool timeline)
{
- signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ signed long timeout = 0;
uint32_t first = ~0;
- timeout = drm_syncobj_array_wait_timeout(syncobjs,
- wait->count_handles,
- wait->flags,
- timeout, &first);
- if (timeout < 0)
- return timeout;
-
- wait->first_signaled = first;
+ if (!timeline) {
+ timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ timeout = drm_syncobj_array_wait_timeout(syncobjs,
+ NULL,
+ wait->count_handles,
+ wait->flags,
+ timeout, &first);
+ if (timeout < 0)
+ return timeout;
+ wait->first_signaled = first;
+ } else {
+ timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
+ timeout = drm_syncobj_array_wait_timeout(syncobjs,
+ u64_to_user_ptr(timeline_wait->points),
+ timeline_wait->count_handles,
+ timeline_wait->flags,
+ timeout, &first);
+ if (timeout < 0)
+ return timeout;
+ timeline_wait->first_signaled = first;
+ }
return 0;
}
@@ -857,13 +1075,48 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
return ret;
ret = drm_syncobj_array_wait(dev, file_private,
- args, syncobjs);
+ args, NULL, syncobjs, false);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int
+drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_timeline_wait *args = data;
+ struct drm_syncobj **syncobjs;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_syncobj_array_wait(dev, file_private,
+ NULL, args, syncobjs, true);
drm_syncobj_array_free(syncobjs, args->count_handles);
return ret;
}
+
int
drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
@@ -929,3 +1182,138 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
return ret;
}
+
+int
+drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_timeline_array *args = data;
+ struct drm_syncobj **syncobjs;
+ struct dma_fence_chain **chains;
+ uint64_t *points;
+ uint32_t i, j;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ points = kmalloc_array(args->count_handles, sizeof(*points),
+ GFP_KERNEL);
+ if (!points) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!u64_to_user_ptr(args->points)) {
+ memset(points, 0, args->count_handles * sizeof(uint64_t));
+ } else if (copy_from_user(points, u64_to_user_ptr(args->points),
+ sizeof(uint64_t) * args->count_handles)) {
+ ret = -EFAULT;
+ goto err_points;
+ }
+
+ chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
+ if (!chains) {
+ ret = -ENOMEM;
+ goto err_points;
+ }
+ for (i = 0; i < args->count_handles; i++) {
+ chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+ if (!chains[i]) {
+ for (j = 0; j < i; j++)
+ kfree(chains[j]);
+ ret = -ENOMEM;
+ goto err_chains;
+ }
+ }
+
+ for (i = 0; i < args->count_handles; i++) {
+ struct dma_fence *fence = dma_fence_get_stub();
+
+ drm_syncobj_add_point(syncobjs[i], chains[i],
+ fence, points[i]);
+ dma_fence_put(fence);
+ }
+err_chains:
+ kfree(chains);
+err_points:
+ kfree(points);
+out:
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_timeline_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint64_t __user *points = u64_to_user_ptr(args->points);
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++) {
+ struct dma_fence_chain *chain;
+ struct dma_fence *fence;
+ uint64_t point;
+
+ fence = drm_syncobj_fence_get(syncobjs[i]);
+ chain = to_dma_fence_chain(fence);
+ if (chain) {
+ struct dma_fence *iter, *last_signaled = NULL;
+
+ dma_fence_chain_for_each(iter, fence) {
+ if (!iter)
+ break;
+ dma_fence_put(last_signaled);
+ last_signaled = dma_fence_get(iter);
+ if (!to_dma_fence_chain(last_signaled)->prev_seqno)
+ /* It is most likely that timeline has
+ * unorder points. */
+ break;
+ }
+ point = dma_fence_is_signaled(last_signaled) ?
+ last_signaled->seqno :
+ to_dma_fence_chain(last_signaled)->prev_seqno;
+ dma_fence_put(last_signaled);
+ } else {
+ point = 0;
+ }
+ ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
+ ret = ret ? -EFAULT : 0;
+ if (ret)
+ break;
+ }
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c3301046dfaa..10cf83d569e1 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -584,8 +584,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_ops;
break;
}
- /* fall through to _DRM_FRAME_BUFFER... */
#endif
+ /* fall through - to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
@@ -610,7 +610,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
- /* fall through to _DRM_SHM */
+ /* fall through - to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
@@ -646,6 +646,7 @@ int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
}
EXPORT_SYMBOL(drm_legacy_mmap);
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *dev)
{
struct drm_vma_entry *vma, *vma_temp;
@@ -656,3 +657,4 @@ void drm_legacy_vma_flush(struct drm_device *dev)
kfree(vma);
}
}
+#endif
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index c20e6fe00cb3..79ac014701c8 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -239,14 +239,52 @@ fail:
}
EXPORT_SYMBOL(drm_writeback_connector_init);
+int drm_writeback_set_fb(struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb)
+{
+ WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+ if (!conn_state->writeback_job) {
+ conn_state->writeback_job =
+ kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+ if (!conn_state->writeback_job)
+ return -ENOMEM;
+
+ conn_state->writeback_job->connector =
+ drm_connector_to_writeback(conn_state->connector);
+ }
+
+ drm_framebuffer_assign(&conn_state->writeback_job->fb, fb);
+ return 0;
+}
+
+int drm_writeback_prepare_job(struct drm_writeback_job *job)
+{
+ struct drm_writeback_connector *connector = job->connector;
+ const struct drm_connector_helper_funcs *funcs =
+ connector->base.helper_private;
+ int ret;
+
+ if (funcs->prepare_writeback_job) {
+ ret = funcs->prepare_writeback_job(connector, job);
+ if (ret < 0)
+ return ret;
+ }
+
+ job->prepared = true;
+ return 0;
+}
+EXPORT_SYMBOL(drm_writeback_prepare_job);
+
/**
* drm_writeback_queue_job - Queue a writeback job for later signalling
* @wb_connector: The writeback connector to queue a job on
- * @job: The job to queue
+ * @conn_state: The connector state containing the job to queue
*
- * This function adds a job to the job_queue for a writeback connector. It
- * should be considered to take ownership of the writeback job, and so any other
- * references to the job must be cleared after calling this function.
+ * This function adds the job contained in @conn_state to the job_queue for a
+ * writeback connector. It takes ownership of the writeback job and sets the
+ * @conn_state->writeback_job to NULL, and so no access to the job may be
+ * performed by the caller after this function returns.
*
* Drivers must ensure that for a given writeback connector, jobs are queued in
* exactly the same order as they will be completed by the hardware (and
@@ -258,16 +296,36 @@ EXPORT_SYMBOL(drm_writeback_connector_init);
* See also: drm_writeback_signal_completion()
*/
void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
- struct drm_writeback_job *job)
+ struct drm_connector_state *conn_state)
{
+ struct drm_writeback_job *job;
unsigned long flags;
+ job = conn_state->writeback_job;
+ conn_state->writeback_job = NULL;
+
spin_lock_irqsave(&wb_connector->job_lock, flags);
list_add_tail(&job->list_entry, &wb_connector->job_queue);
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
}
EXPORT_SYMBOL(drm_writeback_queue_job);
+void drm_writeback_cleanup_job(struct drm_writeback_job *job)
+{
+ struct drm_writeback_connector *connector = job->connector;
+ const struct drm_connector_helper_funcs *funcs =
+ connector->base.helper_private;
+
+ if (job->prepared && funcs->cleanup_writeback_job)
+ funcs->cleanup_writeback_job(connector, job);
+
+ if (job->fb)
+ drm_framebuffer_put(job->fb);
+
+ kfree(job);
+}
+EXPORT_SYMBOL(drm_writeback_cleanup_job);
+
/*
* @cleanup_work: deferred cleanup of a writeback job
*
@@ -280,10 +338,9 @@ static void cleanup_work(struct work_struct *work)
struct drm_writeback_job *job = container_of(work,
struct drm_writeback_job,
cleanup_work);
- drm_framebuffer_put(job->fb);
- kfree(job);
-}
+ drm_writeback_cleanup_job(job);
+}
/**
* drm_writeback_signal_completion - Signal the completion of a writeback job
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 18c27f795cf6..7eb7cf9c3fa8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -473,7 +473,6 @@ static struct drm_driver etnaviv_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
.gem_prime_pin = etnaviv_gem_prime_pin,
.gem_prime_unpin = etnaviv_gem_prime_unpin,
.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
@@ -515,6 +514,9 @@ static int etnaviv_bind(struct device *dev)
}
drm->dev_private = priv;
+ dev->dma_parms = &priv->dma_parms;
+ dma_set_max_seg_size(dev, SZ_2G);
+
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
@@ -552,6 +554,8 @@ static void etnaviv_unbind(struct device *dev)
component_unbind_all(dev, drm);
+ dev->dma_parms = NULL;
+
drm->dev_private = NULL;
kfree(priv);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index a6a7ded37ef1..8798423705e1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -42,6 +42,7 @@ struct etnaviv_file_private {
struct etnaviv_drm_private {
int num_gpus;
+ struct device_dma_parameters dma_parms;
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
/* list of GEM objects: */
@@ -60,7 +61,6 @@ void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
-struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5c48915f492d..e8778ebb72e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
+ if (!reservation_object_test_signaled_rcu(obj->resv,
write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+ ret = reservation_object_wait_timeout_rcu(obj->resv,
write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
@@ -459,7 +459,7 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- struct reservation_object *robj = etnaviv_obj->resv;
+ struct reservation_object *robj = obj->resv;
struct reservation_object_list *fobj;
struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
@@ -549,8 +549,6 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
drm_gem_free_mmap_offset(obj);
etnaviv_obj->ops->release(etnaviv_obj);
- if (etnaviv_obj->resv == &etnaviv_obj->_resv)
- reservation_object_fini(&etnaviv_obj->_resv);
drm_gem_object_release(obj);
kfree(etnaviv_obj);
@@ -596,12 +594,8 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
etnaviv_obj->flags = flags;
etnaviv_obj->ops = ops;
- if (robj) {
- etnaviv_obj->resv = robj;
- } else {
- etnaviv_obj->resv = &etnaviv_obj->_resv;
- reservation_object_init(&etnaviv_obj->_resv);
- }
+ if (robj)
+ etnaviv_obj->base.resv = robj;
mutex_init(&etnaviv_obj->lock);
INIT_LIST_HEAD(&etnaviv_obj->vram_list);
@@ -628,24 +622,18 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
ret = drm_gem_object_init(dev, obj, size);
- if (ret == 0) {
- struct address_space *mapping;
-
- /*
- * Our buffers are kept pinned, so allocating them
- * from the MOVABLE zone is a really bad idea, and
- * conflicts with CMA. See comments above new_inode()
- * why this is required _and_ expected if you're
- * going to pin these pages.
- */
- mapping = obj->filp->f_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
- __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- }
-
if (ret)
goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the MOVABLE
+ * zone is a really bad idea, and conflicts with CMA. See comments
+ * above new_inode() why this is required _and_ expected if you're
+ * going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+
etnaviv_gem_obj_add(dev, obj);
ret = drm_gem_handle_create(file, obj, handle);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index f0abb744ef95..753c458497d0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -47,10 +47,6 @@ struct etnaviv_gem_object {
struct sg_table *sgt;
void *vaddr;
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
-
struct list_head vram_list;
/* cache maintenance */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index f21529e635e3..00e8b6a817e3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -139,10 +139,3 @@ fail:
return ERR_PTR(ret);
}
-
-struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
-
- return etnaviv_obj->resv;
-}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index b2fe3446bfbc..e054f09ac828 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -108,9 +108,9 @@ out_unlock:
static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
{
if (submit->bos[i].flags & BO_LOCKED) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
- ww_mutex_unlock(&etnaviv_obj->resv->lock);
+ ww_mutex_unlock(&obj->resv->lock);
submit->bos[i].flags &= ~BO_LOCKED;
}
}
@@ -122,7 +122,7 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit,
retry:
for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (slow_locked == i)
slow_locked = -1;
@@ -130,7 +130,7 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
+ ret = ww_mutex_lock_interruptible(&obj->resv->lock,
ticket);
if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
@@ -153,12 +153,12 @@ fail:
submit_unlock_object(submit, slow_locked);
if (ret == -EDEADLK) {
- struct etnaviv_gem_object *etnaviv_obj;
+ struct drm_gem_object *obj;
- etnaviv_obj = submit->bos[contended].obj;
+ obj = &submit->bos[contended].obj->base;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
+ ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
@@ -176,7 +176,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
- struct reservation_object *robj = bo->obj->resv;
+ struct reservation_object *robj = bo->obj->base.resv;
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
ret = reservation_object_reserve_shared(robj, 1);
@@ -207,13 +207,13 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
int i;
for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- reservation_object_add_excl_fence(etnaviv_obj->resv,
+ reservation_object_add_excl_fence(obj->resv,
submit->out_fence);
else
- reservation_object_add_shared_fence(etnaviv_obj->resv,
+ reservation_object_add_shared_fence(obj->resv,
submit->out_fence);
submit_unlock_object(submit, i);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 6904535475de..72d01e873160 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -365,6 +365,7 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
gpu->identity.model, gpu->identity.revision);
+ gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
/*
* If there is a match in the HWDB, we aren't interested in the
* remaining register values, as they might be wrong.
@@ -412,7 +413,7 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
}
/* GC600 idle register reports zero bits where modules aren't present */
- if (gpu->identity.model == chipModel_GC600) {
+ if (gpu->identity.model == chipModel_GC600)
gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
VIVS_HI_IDLE_STATE_RA |
VIVS_HI_IDLE_STATE_SE |
@@ -421,9 +422,6 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
VIVS_HI_IDLE_STATE_PE |
VIVS_HI_IDLE_STATE_DE |
VIVS_HI_IDLE_STATE_FE;
- } else {
- gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
- }
etnaviv_hw_specs(gpu);
}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 5b4e0e8b23bc..73b318a7ef49 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -188,7 +188,7 @@ static void decon_setup_trigger(struct decon_context *ctx)
if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX,
DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0))
- DRM_ERROR("Cannot update sysreg.\n");
+ DRM_DEV_ERROR(ctx->dev, "Cannot update sysreg.\n");
}
static void decon_commit(struct exynos_drm_crtc *crtc)
@@ -356,7 +356,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
break;
}
- DRM_DEBUG_KMS("cpp = %u\n", fb->format->cpp[0]);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %u\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -561,8 +561,6 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int win, i, ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
ret = clk_prepare_enable(ctx->clks[i]);
if (ret < 0)
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 381aa3d60e37..0217ee9a118d 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -99,7 +99,7 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
- DRM_DEBUG_KMS("vblank wait timed out.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -107,8 +107,6 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + WINCON(win));
@@ -315,7 +313,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
break;
}
- DRM_DEBUG_KMS("cpp = %d\n", fb->format->cpp[0]);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %d\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -422,9 +420,9 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win));
writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win));
- DRM_DEBUG_KMS("start addr = 0x%lx\n",
+ DRM_DEV_DEBUG_KMS(ctx->dev, "start addr = 0x%lx\n",
(unsigned long)val);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
state->crtc.w, state->crtc.h);
val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
@@ -442,7 +440,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->regs + VIDOSD_B(win));
- DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+ DRM_DEV_DEBUG_KMS(ctx->dev, "osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD alpha */
@@ -622,7 +620,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
ret = decon_ctx_initialize(ctx, drm_dev);
if (ret) {
- DRM_ERROR("decon_ctx_initialize failed.\n");
+ DRM_DEV_ERROR(dev, "decon_ctx_initialize failed.\n");
return ret;
}
@@ -802,25 +800,29 @@ static int exynos7_decon_resume(struct device *dev)
ret = clk_prepare_enable(ctx->pclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->aclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->eclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->vclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 471242a5e580..b0288cf85701 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -83,7 +83,8 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_ERROR("failed to create a new display mode.\n");
+ DRM_DEV_ERROR(dp->dev,
+ "failed to create a new display mode.\n");
return num_modes;
}
@@ -111,7 +112,8 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
if (dp->ptn_bridge) {
ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge);
if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
+ DRM_DEV_ERROR(dp->dev,
+ "Failed to attach bridge to drm\n");
bridge->next = NULL;
return ret;
}
@@ -147,7 +149,8 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
if (ret) {
- DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+ DRM_DEV_ERROR(dp->dev,
+ "failed: of_get_videomode() : %d\n", ret);
return ret;
}
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 3432c5ee9f0c..bef8bc3c8e00 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -62,7 +62,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
int ret;
if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
- DRM_ERROR("Device %s lacks support for IOMMU\n",
+ DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
dev_name(subdrv_dev));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index ae425c9a3f7b..6ea92173db9f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -77,7 +77,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_ERROR("failed to create a new display mode\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to create a new display mode\n");
return 0;
}
drm_display_mode_from_videomode(ctx->vm, mode);
@@ -108,7 +109,8 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
&exynos_dpi_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
if (ret) {
- DRM_ERROR("failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to initialize connector with drm\n");
return ret;
}
@@ -213,7 +215,8 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
ret = exynos_dpi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(encoder_to_dpi(encoder)->dev,
+ "failed to create connector ret = %d\n", ret);
drm_encoder_cleanup(encoder);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a4253dd55f86..63a4b5074a99 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1483,7 +1483,8 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "Failed to initialize connector with drm\n");
return ret;
}
@@ -1527,7 +1528,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
int ret = exynos_dsi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to create connector ret = %d\n",
+ ret);
drm_encoder_cleanup(encoder);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 1f11ab0f8e9d..832d22f57b4b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -45,7 +45,8 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
* supported without IOMMU.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_ERROR("Non-contiguous GEM memory is not supported.\n");
+ DRM_DEV_ERROR(drm_dev->dev,
+ "Non-contiguous GEM memory is not supported.\n");
return -EINVAL;
}
@@ -83,7 +84,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
- DRM_ERROR("failed to initialize framebuffer\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to initialize framebuffer\n");
goto err;
}
@@ -113,7 +115,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
exynos_gem[i] = exynos_drm_gem_get(file_priv,
mode_cmd->handles[i]);
if (!exynos_gem[i]) {
- DRM_ERROR("failed to lookup gem object\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to lookup gem object\n");
ret = -ENOENT;
goto err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index c30dd88cdb25..724cb52a374a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -55,7 +55,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
exynos_gem->dma_addr, exynos_gem->size,
exynos_gem->dma_attrs);
if (ret < 0) {
- DRM_ERROR("failed to mmap.\n");
+ DRM_DEV_ERROR(to_dma_dev(helper->dev), "failed to mmap.\n");
return ret;
}
@@ -83,22 +83,22 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
- DRM_ERROR("failed to allocate fb info.\n");
+ DRM_DEV_ERROR(to_dma_dev(helper->dev),
+ "failed to allocate fb info.\n");
return PTR_ERR(fbi);
}
- fbi->par = helper;
fbi->fbops = &exynos_drm_fb_ops;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
nr_pages = exynos_gem->size >> PAGE_SHIFT;
exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (!exynos_gem->kvaddr) {
- DRM_ERROR("failed to map pages to kernel space.\n");
+ DRM_DEV_ERROR(to_dma_dev(helper->dev),
+ "failed to map pages to kernel space.\n");
return -EIO;
}
@@ -122,9 +122,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
unsigned long size;
int ret;
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
+ DRM_DEV_DEBUG_KMS(dev->dev,
+ "surface width(%d), height(%d) and bpp(%d\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@@ -154,7 +155,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
helper->fb =
exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
- DRM_ERROR("failed to create drm framebuffer.\n");
+ DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto err_destroy_gem;
}
@@ -203,20 +204,23 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
if (ret < 0) {
- DRM_ERROR("failed to initialize drm fb helper.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to initialize drm fb helper.\n");
goto err_init;
}
ret = drm_fb_helper_single_add_all_connectors(helper);
if (ret < 0) {
- DRM_ERROR("failed to register drm_fb_helper_connector.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to register drm_fb_helper_connector.\n");
goto err_setup;
}
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
- DRM_ERROR("failed to set up hw configuration.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to set up hw configuration.\n");
goto err_setup;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 90dfea0aec4d..c50b0f9270a4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -186,7 +186,7 @@ static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]\n", enable);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable)
@@ -201,7 +201,7 @@ static void fimc_mask_irq(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]\n", enable);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable) {
@@ -225,15 +225,16 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
EXYNOS_CISTATUS_OVFICR;
- DRM_DEBUG_KMS("flag[0x%x]\n", flag);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "flag[0x%x]\n", flag);
if (status & flag) {
fimc_set_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
- dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n",
- ctx->id, status);
+ DRM_DEV_ERROR(ctx->dev,
+ "occurred overflow at %d, status 0x%x.\n",
+ ctx->id, status);
return true;
}
@@ -246,7 +247,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
cfg = fimc_read(ctx, EXYNOS_CISTATUS);
- DRM_DEBUG_KMS("cfg[0x%x]\n", cfg);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]\n", cfg);
if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
return false;
@@ -268,17 +269,17 @@ static int fimc_get_buf_id(struct fimc_context *ctx)
if (frame_cnt == 0)
frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
- DRM_DEBUG_KMS("present[%d]before[%d]\n",
- EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
- EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+ DRM_DEV_DEBUG_KMS(ctx->dev, "present[%d]before[%d]\n",
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
if (frame_cnt == 0) {
- DRM_ERROR("failed to get frame count.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get frame count.\n");
return -EIO;
}
buf_id = frame_cnt - 1;
- DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]\n", buf_id);
return buf_id;
}
@@ -287,7 +288,7 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]\n", enable);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
if (enable)
@@ -302,7 +303,7 @@ static void fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
@@ -367,7 +368,7 @@ static void fimc_src_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
@@ -420,7 +421,7 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
u32 cfg1, cfg2;
- DRM_DEBUG_KMS("rotation[%x]\n", rotation);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "rotation[%x]\n", rotation);
cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
@@ -478,10 +479,11 @@ static void fimc_set_window(struct fimc_context *ctx,
v1 = buf->rect.y;
v2 = buf->buf.height - buf->rect.h - buf->rect.y;
- DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
- buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
- real_width, buf->buf.height);
- DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
+ real_width, buf->buf.height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1,
+ v2);
/*
* set window offset 1, 2 size
@@ -506,7 +508,8 @@ static void fimc_src_set_size(struct fimc_context *ctx,
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hsize[%d]vsize[%d]\n", real_width,
+ buf->buf.height);
/* original size */
cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
@@ -514,8 +517,8 @@ static void fimc_src_set_size(struct fimc_context *ctx,
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
- DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
- buf->rect.w, buf->rect.h);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x,
+ buf->rect.y, buf->rect.w, buf->rect.h);
/* set input DMA image size */
cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
@@ -560,7 +563,7 @@ static void fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
@@ -631,7 +634,7 @@ static void fimc_dst_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = fimc_read(ctx, EXYNOS_CIEXTEN);
@@ -691,7 +694,7 @@ static void fimc_dst_set_transf(struct fimc_context *ctx, unsigned int rotation)
unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
u32 cfg;
- DRM_DEBUG_KMS("rotation[0x%x]\n", rotation);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "rotation[0x%x]\n", rotation);
cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
@@ -775,19 +778,20 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
pre_dst_width = src_w >> hfactor;
pre_dst_height = src_h >> vfactor;
- DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n",
- pre_dst_width, pre_dst_height);
- DRM_DEBUG_KMS("hfactor[%d]vfactor[%d]\n", hfactor, vfactor);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "pre_dst_width[%d]pre_dst_height[%d]\n",
+ pre_dst_width, pre_dst_height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hfactor[%d]vfactor[%d]\n", hfactor,
+ vfactor);
sc->hratio = (src_w << 14) / (dst_w << hfactor);
sc->vratio = (src_h << 14) / (dst_h << vfactor);
sc->up_h = (dst_w >= src_w) ? true : false;
sc->up_v = (dst_h >= src_h) ? true : false;
- DRM_DEBUG_KMS("hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
- sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ sc->hratio, sc->vratio, sc->up_h, sc->up_v);
shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
- DRM_DEBUG_KMS("shfactor[%d]\n", shfactor);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "shfactor[%d]\n", shfactor);
cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
EXYNOS_CISCPRERATIO_PREHORRATIO(1 << hfactor) |
@@ -805,10 +809,10 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
{
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
- sc->range, sc->bypass, sc->up_h, sc->up_v);
- DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n",
- sc->hratio, sc->vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]\n",
+ sc->hratio, sc->vratio);
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
@@ -846,7 +850,8 @@ static void fimc_dst_set_size(struct fimc_context *ctx,
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hsize[%d]vsize[%d]\n", real_width,
+ buf->buf.height);
/* original size */
cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
@@ -854,8 +859,9 @@ static void fimc_dst_set_size(struct fimc_context *ctx,
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
- DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
- buf->rect.w, buf->rect.h);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x,
+ buf->rect.y,
+ buf->rect.w, buf->rect.h);
/* CSC ITU */
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
@@ -905,7 +911,7 @@ static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
u32 buf_num;
u32 cfg;
- DRM_DEBUG_KMS("buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
spin_lock_irqsave(&ctx->lock, flags);
@@ -945,7 +951,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
struct fimc_context *ctx = dev_id;
int buf_id;
- DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fimc id[%d]\n", ctx->id);
fimc_clear_irq(ctx);
if (fimc_check_ovf(ctx))
@@ -958,7 +964,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
if (buf_id < 0)
return IRQ_HANDLED;
- DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]\n", buf_id);
if (ctx->task) {
struct exynos_drm_ipp_task *task = ctx->task;
@@ -1128,9 +1134,10 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
ctx->formats, ctx->num_formats, "fimc");
@@ -1147,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev);
}
@@ -1380,7 +1387,7 @@ static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
return 0;
}
@@ -1389,7 +1396,7 @@ static int fimc_runtime_resume(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 786a8ee6f10f..8039e1a3671d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -315,7 +315,7 @@ static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
- DRM_DEBUG_KMS("vblank wait timed out.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win,
@@ -350,8 +350,6 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
struct fimd_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
/* Hardware is in unknown state, so ensure it gets enabled properly */
pm_runtime_get_sync(ctx->dev);
@@ -400,7 +398,7 @@ static int fimd_atomic_check(struct exynos_drm_crtc *crtc,
u32 clkdiv;
if (mode->clock == 0) {
- DRM_INFO("Mode has zero clock value.\n");
+ DRM_DEV_ERROR(ctx->dev, "Mode has zero clock value.\n");
return -EINVAL;
}
@@ -416,15 +414,17 @@ static int fimd_atomic_check(struct exynos_drm_crtc *crtc,
lcd_rate = clk_get_rate(ctx->lcd_clk);
if (2 * lcd_rate < ideal_clk) {
- DRM_INFO("sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n",
- lcd_rate, ideal_clk);
+ DRM_DEV_ERROR(ctx->dev,
+ "sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n",
+ lcd_rate, ideal_clk);
return -EINVAL;
}
/* Find the clock divider value that gets us closest to ideal_clk */
clkdiv = DIV_ROUND_CLOSEST(lcd_rate, ideal_clk);
if (clkdiv >= 0x200) {
- DRM_INFO("requested pixel clock(%lu) too low\n", ideal_clk);
+ DRM_DEV_ERROR(ctx->dev, "requested pixel clock(%lu) too low\n",
+ ideal_clk);
return -EINVAL;
}
@@ -481,7 +481,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
driver_data->lcdblk_offset,
0x3 << driver_data->lcdblk_vt_shift,
0x1 << driver_data->lcdblk_vt_shift)) {
- DRM_ERROR("Failed to update sysreg for I80 i/f.\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to update sysreg for I80 i/f.\n");
return;
}
} else {
@@ -525,7 +526,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
driver_data->lcdblk_offset,
0x1 << driver_data->lcdblk_bypass_shift,
0x1 << driver_data->lcdblk_bypass_shift)) {
- DRM_ERROR("Failed to update sysreg for bypass setting.\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to update sysreg for bypass setting.\n");
return;
}
@@ -537,7 +539,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
driver_data->lcdblk_offset,
0x1 << driver_data->lcdblk_mic_bypass_shift,
0x1 << driver_data->lcdblk_mic_bypass_shift)) {
- DRM_ERROR("Failed to update sysreg for bypass mic.\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to update sysreg for bypass mic.\n");
return;
}
@@ -814,10 +817,11 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
- DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)dma_addr, val, size);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- state->crtc.w, state->crtc.h);
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
+ (unsigned long)dma_addr, val, size);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
+ state->crtc.w, state->crtc.h);
/* buffer size */
buf_offsize = pitch - (state->crtc.w * cpp);
@@ -847,8 +851,9 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->regs + VIDOSD_B(win));
- DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- state->crtc.x, state->crtc.y, last_x, last_y);
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+ state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD size */
if (win != 3 && win != 4) {
@@ -858,7 +863,8 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
val = state->crtc.w * state->crtc.h;
writel(val, ctx->regs + offset);
- DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "osd size = 0x%x\n",
+ (unsigned int)val);
}
fimd_win_set_pixfmt(ctx, win, fb, state->src.w);
@@ -1252,13 +1258,17 @@ static int exynos_fimd_resume(struct device *dev)
ret = clk_prepare_enable(ctx->bus_clk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the bus clk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->lcd_clk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the lcd clk [%d]\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 24c536d6d9cf..c20b3a759370 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -429,7 +429,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
int ret;
if (!size) {
- DRM_ERROR("invalid userptr size.\n");
+ DRM_DEV_ERROR(g2d->dev, "invalid userptr size.\n");
return ERR_PTR(-EINVAL);
}
@@ -482,7 +482,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
g2d_userptr->vec);
if (ret != npages) {
- DRM_ERROR("failed to get user pages from userptr.\n");
+ DRM_DEV_ERROR(g2d->dev,
+ "failed to get user pages from userptr.\n");
if (ret < 0)
goto err_destroy_framevec;
ret = -EFAULT;
@@ -503,7 +504,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
frame_vector_pages(g2d_userptr->vec),
npages, offset, size, GFP_KERNEL);
if (ret < 0) {
- DRM_ERROR("failed to get sgt from pages.\n");
+ DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
goto err_free_sgt;
}
@@ -511,7 +512,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL)) {
- DRM_ERROR("failed to map sgt with dma region.\n");
+ DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
ret = -ENOMEM;
goto err_sg_free_table;
}
@@ -560,7 +561,7 @@ static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
g2d->current_pool = 0;
}
-static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
+static enum g2d_reg_type g2d_get_reg_type(struct g2d_data *g2d, int reg_offset)
{
enum g2d_reg_type reg_type;
@@ -593,7 +594,8 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
break;
default:
reg_type = REG_TYPE_NONE;
- DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
+ DRM_DEV_ERROR(g2d->dev, "Unknown register offset![%d]\n",
+ reg_offset);
break;
}
@@ -627,9 +629,10 @@ static unsigned long g2d_get_buf_bpp(unsigned int format)
return bpp;
}
-static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
- enum g2d_reg_type reg_type,
- unsigned long size)
+static bool g2d_check_buf_desc_is_valid(struct g2d_data *g2d,
+ struct g2d_buf_desc *buf_desc,
+ enum g2d_reg_type reg_type,
+ unsigned long size)
{
int width, height;
unsigned long bpp, last_pos;
@@ -644,14 +647,15 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
/* This check also makes sure that right_x > left_x. */
width = (int)buf_desc->right_x - (int)buf_desc->left_x;
if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
- DRM_ERROR("width[%d] is out of range!\n", width);
+ DRM_DEV_ERROR(g2d->dev, "width[%d] is out of range!\n", width);
return false;
}
/* This check also makes sure that bottom_y > top_y. */
height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
- DRM_ERROR("height[%d] is out of range!\n", height);
+ DRM_DEV_ERROR(g2d->dev,
+ "height[%d] is out of range!\n", height);
return false;
}
@@ -670,8 +674,8 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
*/
if (last_pos >= size) {
- DRM_ERROR("last engine access position [%lu] "
- "is out of range [%lu]!\n", last_pos, size);
+ DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] "
+ "is out of range [%lu]!\n", last_pos, size);
return false;
}
@@ -701,7 +705,7 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
offset = cmdlist->data[reg_pos];
handle = cmdlist->data[reg_pos + 1];
- reg_type = g2d_get_reg_type(offset);
+ reg_type = g2d_get_reg_type(g2d, offset);
if (reg_type == REG_TYPE_NONE) {
ret = -EFAULT;
goto err;
@@ -718,7 +722,7 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc,
+ if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
reg_type, exynos_gem->size)) {
exynos_drm_gem_put(exynos_gem);
ret = -EFAULT;
@@ -736,8 +740,9 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
- g2d_userptr.size)) {
+ if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
+ reg_type,
+ g2d_userptr.size)) {
ret = -EFAULT;
goto err;
}
@@ -845,7 +850,7 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d,
*
* Has to be called under runqueue lock.
*/
-static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file* file)
+static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file *file)
{
struct g2d_runqueue_node *node, *n;
@@ -1044,7 +1049,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (!for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
/* check userptr buffer type. */
if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
@@ -1058,7 +1063,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
buf_desc->stride = cmdlist->data[index + 1];
@@ -1068,7 +1073,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
@@ -1080,7 +1085,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
@@ -1093,7 +1098,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index df66c383a877..a55f5ac41bf3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -29,7 +29,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
int ret = -ENOMEM;
if (exynos_gem->dma_addr) {
- DRM_DEBUG_KMS("already allocated.\n");
+ DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
return 0;
}
@@ -61,7 +61,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL | __GFP_ZERO);
if (!exynos_gem->pages) {
- DRM_ERROR("failed to allocate pages.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
return -ENOMEM;
}
@@ -69,7 +69,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
&exynos_gem->dma_addr, GFP_KERNEL,
exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
- DRM_ERROR("failed to allocate buffer.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
goto err_free;
}
@@ -77,20 +77,20 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
exynos_gem->dma_addr, exynos_gem->size,
exynos_gem->dma_attrs);
if (ret < 0) {
- DRM_ERROR("failed to get sgtable.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
goto err_dma_free;
}
if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
nr_pages)) {
- DRM_ERROR("invalid sgtable.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
ret = -EINVAL;
goto err_sgt_free;
}
sg_free_table(&sgt);
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
return 0;
@@ -111,11 +111,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
struct drm_device *dev = exynos_gem->base.dev;
if (!exynos_gem->dma_addr) {
- DRM_DEBUG_KMS("dma_addr is invalid.\n");
+ DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
return;
}
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
@@ -139,7 +139,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
if (ret)
return ret;
- DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+ DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put_unlocked(obj);
@@ -151,7 +151,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
{
struct drm_gem_object *obj = &exynos_gem->base;
- DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
+ DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
+ obj->handle_count);
/*
* do not release memory region from exporter.
@@ -186,7 +187,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initialize gem object\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
kfree(exynos_gem);
return ERR_PTR(ret);
}
@@ -198,7 +199,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
- DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
+ DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
return exynos_gem;
}
@@ -211,12 +212,13 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
int ret;
if (flags & ~(EXYNOS_BO_MASK)) {
- DRM_ERROR("invalid GEM buffer flags: %u\n", flags);
+ DRM_DEV_ERROR(dev->dev,
+ "invalid GEM buffer flags: %u\n", flags);
return ERR_PTR(-EINVAL);
}
if (!size) {
- DRM_ERROR("invalid GEM buffer size: %lu\n", size);
+ DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
return ERR_PTR(-EINVAL);
}
@@ -325,7 +327,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
+ DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
return -EINVAL;
}
@@ -408,7 +410,8 @@ static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
int ret;
- DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
+ DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
+ exynos_gem->flags);
/* non-cachable as default. */
if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index f048d97fe9e2..0bfb5e9f6e91 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -395,7 +395,7 @@ static int gsc_sw_reset(struct gsc_context *ctx)
}
if (cfg) {
- DRM_ERROR("failed to reset gsc h/w.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to reset gsc h/w.\n");
return -EBUSY;
}
@@ -422,8 +422,8 @@ static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n",
- enable, overflow, done);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]overflow[%d]level[%d]\n",
+ enable, overflow, done);
cfg = gsc_read(GSC_IRQ);
cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
@@ -451,7 +451,7 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = gsc_read(GSC_IN_CON);
cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
@@ -638,7 +638,7 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = gsc_read(GSC_OUT_CON);
cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
@@ -706,12 +706,13 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
gsc_write(cfg, GSC_OUT_CON);
}
-static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+static int gsc_get_ratio_shift(struct gsc_context *ctx, u32 src, u32 dst,
+ u32 *ratio)
{
- DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "src[%d]dst[%d]\n", src, dst);
if (src >= dst * 8) {
- DRM_ERROR("failed to make ratio and shift.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to make ratio and shift.\n");
return -EINVAL;
} else if (src >= dst * 4)
*ratio = 4;
@@ -759,31 +760,31 @@ static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
dst_h = dst->h;
}
- ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+ ret = gsc_get_ratio_shift(ctx, src_w, dst_w, &sc->pre_hratio);
if (ret) {
- dev_err(ctx->dev, "failed to get ratio horizontal.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get ratio horizontal.\n");
return ret;
}
- ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+ ret = gsc_get_ratio_shift(ctx, src_h, dst_h, &sc->pre_vratio);
if (ret) {
- dev_err(ctx->dev, "failed to get ratio vertical.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get ratio vertical.\n");
return ret;
}
- DRM_DEBUG_KMS("pre_hratio[%d]pre_vratio[%d]\n",
- sc->pre_hratio, sc->pre_vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "pre_hratio[%d]pre_vratio[%d]\n",
+ sc->pre_hratio, sc->pre_vratio);
sc->main_hratio = (src_w << 16) / dst_w;
sc->main_vratio = (src_h << 16) / dst_h;
- DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
- sc->main_hratio, sc->main_vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "main_hratio[%ld]main_vratio[%ld]\n",
+ sc->main_hratio, sc->main_vratio);
gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
&sc->pre_shfactor);
- DRM_DEBUG_KMS("pre_shfactor[%d]\n", sc->pre_shfactor);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "pre_shfactor[%d]\n", sc->pre_shfactor);
cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
GSC_PRESC_H_RATIO(sc->pre_hratio) |
@@ -849,8 +850,8 @@ static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
{
u32 cfg;
- DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
- sc->main_hratio, sc->main_vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "main_hratio[%ld]main_vratio[%ld]\n",
+ sc->main_hratio, sc->main_vratio);
gsc_set_h_coef(ctx, sc->main_hratio);
cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
@@ -916,7 +917,7 @@ static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
if (cfg & (mask << i))
buf_num--;
- DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_num[%d]\n", buf_num);
return buf_num;
}
@@ -963,7 +964,7 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
u32 cfg, curr_index, i;
u32 buf_id = GSC_MAX_SRC;
- DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
curr_index = GSC_IN_CURR_GET_INDEX(cfg);
@@ -975,11 +976,11 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
}
}
- DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
- curr_index, buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
+ curr_index, buf_id);
if (buf_id == GSC_MAX_SRC) {
- DRM_ERROR("failed to get in buffer index.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get in buffer index.\n");
return -EINVAL;
}
@@ -993,7 +994,7 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
u32 cfg, curr_index, i;
u32 buf_id = GSC_MAX_DST;
- DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
@@ -1006,14 +1007,14 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
}
if (buf_id == GSC_MAX_DST) {
- DRM_ERROR("failed to get out buffer index.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get out buffer index.\n");
return -EINVAL;
}
gsc_dst_set_buf_seq(ctx, buf_id, false);
- DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
- curr_index, buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
+ curr_index, buf_id);
return buf_id;
}
@@ -1024,7 +1025,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
u32 status;
int err = 0;
- DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
status = gsc_read(GSC_IRQ);
if (status & GSC_IRQ_STATUS_OR_IRQ) {
@@ -1042,8 +1043,8 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
src_buf_id = gsc_get_src_buf_index(ctx);
dst_buf_id = gsc_get_dst_buf_index(ctx);
- DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n", src_buf_id,
- dst_buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id_src[%d]buf_id_dst[%d]\n",
+ src_buf_id, dst_buf_id);
if (src_buf_id < 0 || dst_buf_id < 0)
err = -EINVAL;
@@ -1169,9 +1170,10 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
+ ctx->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
ctx->formats, ctx->num_formats, "gsc");
@@ -1188,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev);
}
@@ -1324,7 +1326,7 @@ static int __maybe_unused gsc_runtime_suspend(struct device *dev)
struct gsc_context *ctx = get_gsc_context(dev);
int i;
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
for (i = ctx->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(ctx->clocks[i]);
@@ -1337,7 +1339,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
struct gsc_context *ctx = get_gsc_context(dev);
int i, ret;
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
for (i = 0; i < ctx->num_clocks; i++) {
ret = clk_prepare_enable(ctx->clocks[i]);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 23226a0212e8..c862099723a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -43,7 +43,7 @@ static LIST_HEAD(ipp_list);
* Returns:
* Zero on success, error code on failure.
*/
-int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
+int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
const struct exynos_drm_ipp_formats *formats,
unsigned int num_formats, const char *name)
@@ -67,7 +67,7 @@ int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
list_add_tail(&ipp->head, &ipp_list);
ipp->id = num_ipp++;
- DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id);
+ DRM_DEV_DEBUG_DRIVER(dev, "Registered ipp %d\n", ipp->id);
return 0;
}
@@ -77,7 +77,7 @@ int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
* @dev: DRM device
* @ipp: ipp module
*/
-void exynos_drm_ipp_unregister(struct drm_device *dev,
+void exynos_drm_ipp_unregister(struct device *dev,
struct exynos_drm_ipp *ipp)
{
WARN_ON(ipp->task);
@@ -268,7 +268,7 @@ static inline struct exynos_drm_ipp_task *
task->src.rect.h = task->dst.rect.h = UINT_MAX;
task->transform.rotation = DRM_MODE_ROTATE_0;
- DRM_DEBUG_DRIVER("Allocated task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task);
return task;
}
@@ -335,7 +335,9 @@ static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
size -= map[i].size;
}
- DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Got task %pK configuration from userspace\n",
+ task);
return 0;
}
@@ -389,12 +391,12 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
- DRM_DEBUG_DRIVER("Freeing task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task);
exynos_drm_ipp_task_release_buf(&task->src);
exynos_drm_ipp_task_release_buf(&task->dst);
if (task->event)
- drm_event_cancel_free(ipp->dev, &task->event->base);
+ drm_event_cancel_free(ipp->drm_dev, &task->event->base);
kfree(task);
}
@@ -553,8 +555,9 @@ static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
DRM_EXYNOS_IPP_FORMAT_DESTINATION);
if (!fmt) {
- DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
- buf == src ? "src" : "dst");
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: %s format not supported\n",
+ task, buf == src ? "src" : "dst");
return -EINVAL;
}
@@ -603,7 +606,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
bool rotate = (rotation != DRM_MODE_ROTATE_0);
bool scale = false;
- DRM_DEBUG_DRIVER("Checking task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task);
if (src->rect.w == UINT_MAX)
src->rect.w = src->buf.width;
@@ -618,8 +621,9 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
src->rect.y + src->rect.h > (src->buf.height) ||
dst->rect.x + dst->rect.w > (dst->buf.width) ||
dst->rect.y + dst->rect.h > (dst->buf.height)) {
- DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n",
- task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: defined area is outside provided buffers\n",
+ task);
return -EINVAL;
}
@@ -635,7 +639,8 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
src->buf.fourcc != dst->buf.fourcc)) {
- DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n",
+ task);
return -EINVAL;
}
@@ -647,7 +652,8 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
if (ret)
return ret;
- DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n",
+ task);
return ret;
}
@@ -658,20 +664,26 @@ static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
int ret = 0;
- DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n",
+ task);
ret = exynos_drm_ipp_task_setup_buffer(src, filp);
if (ret) {
- DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: src buffer setup failed\n",
+ task);
return ret;
}
ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
if (ret) {
- DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: dst buffer setup failed\n",
+ task);
return ret;
}
- DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n",
+ task);
return ret;
}
@@ -691,7 +703,7 @@ static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- ret = drm_event_reserve_init(task->dev, file_priv, &e->base,
+ ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base,
&e->event.base);
if (ret)
goto free;
@@ -712,7 +724,7 @@ static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
- drm_send_event(task->dev, &task->event->base);
+ drm_send_event(task->ipp->drm_dev, &task->event->base);
}
static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
@@ -749,7 +761,8 @@ void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
struct exynos_drm_ipp *ipp = task->ipp;
unsigned long flags;
- DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n",
+ ipp->id, task, ret);
spin_lock_irqsave(&ipp->lock, flags);
if (ipp->task == task)
@@ -773,7 +786,8 @@ static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
unsigned long flags;
int ret;
- DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, try to run new task\n",
+ ipp->id);
spin_lock_irqsave(&ipp->lock, flags);
@@ -789,7 +803,9 @@ static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
spin_unlock_irqrestore(&ipp->lock, flags);
- DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev,
+ "ipp: %d, selected task %pK to run\n", ipp->id,
+ task);
ret = ipp->funcs->commit(ipp, task);
if (ret)
@@ -897,15 +913,16 @@ int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
* then freed after exynos_drm_ipp_task_done()
*/
if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
- DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n",
- ipp->id, task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev,
+ "ipp: %d, nonblocking processing task %pK\n",
+ ipp->id, task);
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
exynos_drm_ipp_schedule_task(task->ipp, task);
ret = 0;
} else {
- DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id,
- task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n",
+ ipp->id, task);
exynos_drm_ipp_schedule_task(ipp, task);
ret = wait_event_interruptible(ipp->done_wq,
task->flags & DRM_EXYNOS_IPP_TASK_DONE);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 0b27d4a9bf94..5524c457a947 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -54,7 +54,8 @@ struct exynos_drm_ipp_funcs {
* struct exynos_drm_ipp - central picture processor module structure
*/
struct exynos_drm_ipp {
- struct drm_device *dev;
+ struct drm_device *drm_dev;
+ struct device *dev;
struct list_head head;
unsigned int id;
@@ -85,7 +86,7 @@ struct exynos_drm_ipp_buffer {
* has to be performed by the picture processor hardware module
*/
struct exynos_drm_ipp_task {
- struct drm_device *dev;
+ struct device *dev;
struct exynos_drm_ipp *ipp;
struct list_head head;
@@ -129,11 +130,11 @@ struct exynos_drm_ipp_formats {
#define IPP_SCALE_LIMIT(val...) \
.type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE), val
-int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
+int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
const struct exynos_drm_ipp_formats *formats,
unsigned int num_formats, const char *name);
-void exynos_drm_ipp_unregister(struct drm_device *dev,
+void exynos_drm_ipp_unregister(struct device *dev,
struct exynos_drm_ipp *ipp);
void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index dd02e8a323ef..d1c8411ae7d4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -113,7 +113,8 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
ret = regmap_read(mic->sysreg, DSD_CFG_MUX, &val);
if (ret) {
- DRM_ERROR("mic: Failed to read system register\n");
+ DRM_DEV_ERROR(mic->dev,
+ "mic: Failed to read system register\n");
return;
}
@@ -129,7 +130,8 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
if (ret)
- DRM_ERROR("mic: Failed to read system register\n");
+ DRM_DEV_ERROR(mic->dev,
+ "mic: Failed to read system register\n");
}
static int mic_sw_reset(struct exynos_mic *mic)
@@ -190,7 +192,7 @@ static void mic_set_output_timing(struct exynos_mic *mic)
struct videomode vm = mic->vm;
u32 reg, bs_size_2d;
- DRM_DEBUG("w: %u, h: %u\n", vm.hactive, vm.vactive);
+ DRM_DEV_DEBUG(mic->dev, "w: %u, h: %u\n", vm.hactive, vm.vactive);
bs_size_2d = ((vm.hactive >> 2) << 1) + (vm.vactive % 4);
reg = MIC_BS_SIZE_2D(bs_size_2d);
writel(reg, mic->reg + MIC_2D_OUTPUT_TIMING_2);
@@ -274,7 +276,7 @@ static void mic_pre_enable(struct drm_bridge *bridge)
ret = mic_sw_reset(mic);
if (ret) {
- DRM_ERROR("Failed to reset\n");
+ DRM_DEV_ERROR(mic->dev, "Failed to reset\n");
goto turn_off;
}
@@ -354,8 +356,8 @@ static int exynos_mic_resume(struct device *dev)
for (i = 0; i < NUM_CLKS; i++) {
ret = clk_prepare_enable(mic->clks[i]);
if (ret < 0) {
- DRM_ERROR("Failed to enable clock (%s)\n",
- clk_names[i]);
+ DRM_DEV_ERROR(dev, "Failed to enable clock (%s)\n",
+ clk_names[i]);
while (--i > -1)
clk_disable_unprepare(mic->clks[i]);
return ret;
@@ -380,7 +382,8 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
if (!mic) {
- DRM_ERROR("mic: Failed to allocate memory for MIC object\n");
+ DRM_DEV_ERROR(dev,
+ "mic: Failed to allocate memory for MIC object\n");
ret = -ENOMEM;
goto err;
}
@@ -389,12 +392,12 @@ static int exynos_mic_probe(struct platform_device *pdev)
ret = of_address_to_resource(dev->of_node, 0, &res);
if (ret) {
- DRM_ERROR("mic: Failed to get mem region for MIC\n");
+ DRM_DEV_ERROR(dev, "mic: Failed to get mem region for MIC\n");
goto err;
}
mic->reg = devm_ioremap(dev, res.start, resource_size(&res));
if (!mic->reg) {
- DRM_ERROR("mic: Failed to remap for MIC\n");
+ DRM_DEV_ERROR(dev, "mic: Failed to remap for MIC\n");
ret = -ENOMEM;
goto err;
}
@@ -402,7 +405,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,disp-syscon");
if (IS_ERR(mic->sysreg)) {
- DRM_ERROR("mic: Failed to get system register.\n");
+ DRM_DEV_ERROR(dev, "mic: Failed to get system register.\n");
ret = PTR_ERR(mic->sysreg);
goto err;
}
@@ -410,8 +413,8 @@ static int exynos_mic_probe(struct platform_device *pdev)
for (i = 0; i < NUM_CLKS; i++) {
mic->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(mic->clks[i])) {
- DRM_ERROR("mic: Failed to get clock (%s)\n",
- clk_names[i]);
+ DRM_DEV_ERROR(dev, "mic: Failed to get clock (%s)\n",
+ clk_names[i]);
ret = PTR_ERR(mic->clks[i]);
goto err;
}
@@ -430,7 +433,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
if (ret)
goto err_pm;
- DRM_DEBUG_KMS("MIC has been probed\n");
+ DRM_DEV_DEBUG_KMS(dev, "MIC has been probed\n");
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index df0508e0e49e..e18babb25170 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -119,9 +119,10 @@ static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
exynos_state->crtc.w = actual_w;
exynos_state->crtc.h = actual_h;
- DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
- exynos_state->crtc.x, exynos_state->crtc.y,
- exynos_state->crtc.w, exynos_state->crtc.h);
+ DRM_DEV_DEBUG_KMS(crtc->dev->dev,
+ "plane : offset_x/y(%d,%d), width/height(%d,%d)",
+ exynos_state->crtc.x, exynos_state->crtc.y,
+ exynos_state->crtc.w, exynos_state->crtc.h);
}
static void exynos_drm_plane_reset(struct drm_plane *plane)
@@ -181,6 +182,7 @@ exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
struct drm_framebuffer *fb = state->base.fb;
+ struct drm_device *dev = fb->dev;
switch (fb->modifier) {
case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
@@ -192,7 +194,7 @@ exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
break;
default:
- DRM_ERROR("unsupported pixel format modifier");
+ DRM_DEV_ERROR(dev->dev, "unsupported pixel format modifier");
return -ENOTSUPP;
}
@@ -203,6 +205,7 @@ static int
exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
+ struct drm_crtc *crtc = state->base.crtc;
bool width_ok = false, height_ok = false;
if (config->capabilities & EXYNOS_DRM_PLANE_CAP_SCALE)
@@ -225,7 +228,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
if (width_ok && height_ok)
return 0;
- DRM_DEBUG_KMS("scaling mode is not supported");
+ DRM_DEV_DEBUG_KMS(crtc->dev->dev, "scaling mode is not supported");
return -ENOTSUPP;
}
@@ -310,7 +313,7 @@ int exynos_plane_init(struct drm_device *dev,
config->num_pixel_formats,
NULL, config->type, NULL);
if (err) {
- DRM_ERROR("failed to initialize plane\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize plane\n");
return err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 05abfed6f7f8..b6586fa95ad1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -243,9 +243,10 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &rot->ipp;
rot->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
rot->formats, rot->num_formats, "rotator");
@@ -258,10 +259,9 @@ static void rotator_unbind(struct device *dev, struct device *master,
void *data)
{
struct rot_context *rot = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &rot->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index ed1dd1aec902..f1cbdd1e6e3c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -451,9 +451,10 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &scaler->ipp;
scaler->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
scaler->scaler_data->formats,
@@ -468,10 +469,9 @@ static void scaler_unbind(struct device *dev, struct device *master,
void *data)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &scaler->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 29f4c1932aed..44bcb2d60bb2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -40,8 +40,8 @@
struct vidi_context {
struct drm_encoder encoder;
- struct platform_device *pdev;
struct drm_device *drm_dev;
+ struct device *dev;
struct exynos_drm_crtc *crtc;
struct drm_connector connector;
struct exynos_drm_plane planes[WINDOWS_NR];
@@ -123,7 +123,7 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc,
return;
addr = exynos_drm_fb_dma_addr(state->fb, 0);
- DRM_DEBUG_KMS("dma_addr = %pad\n", &addr);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "dma_addr = %pad\n", &addr);
}
static void vidi_enable(struct exynos_drm_crtc *crtc)
@@ -205,11 +205,11 @@ static ssize_t vidi_store_connection(struct device *dev,
/* if raw_edid isn't same as fake data then it can't be tested. */
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
- DRM_DEBUG_KMS("edid data is not fake data.\n");
+ DRM_DEV_DEBUG_KMS(dev, "edid data is not fake data.\n");
return -EINVAL;
}
- DRM_DEBUG_KMS("requested connection.\n");
+ DRM_DEV_DEBUG_KMS(dev, "requested connection.\n");
drm_helper_hpd_irq_event(ctx->drm_dev);
@@ -226,17 +226,20 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_exynos_vidi_connection *vidi = data;
if (!vidi) {
- DRM_DEBUG_KMS("user data for vidi is null.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "user data for vidi is null.\n");
return -EINVAL;
}
if (vidi->connection > 1) {
- DRM_DEBUG_KMS("connection should be 0 or 1.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "connection should be 0 or 1.\n");
return -EINVAL;
}
if (ctx->connected == vidi->connection) {
- DRM_DEBUG_KMS("same connection request.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "same connection request.\n");
return -EINVAL;
}
@@ -245,12 +248,14 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
raw_edid = (struct edid *)(unsigned long)vidi->edid;
if (!drm_edid_is_valid(raw_edid)) {
- DRM_DEBUG_KMS("edid data is invalid.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "edid data is invalid.\n");
return -EINVAL;
}
ctx->raw_edid = drm_edid_duplicate(raw_edid);
if (!ctx->raw_edid) {
- DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "failed to allocate raw_edid.\n");
return -ENOMEM;
}
} else {
@@ -308,14 +313,14 @@ static int vidi_get_modes(struct drm_connector *connector)
* to ctx->raw_edid through specific ioctl.
*/
if (!ctx->raw_edid) {
- DRM_DEBUG_KMS("raw_edid is null.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
return -EFAULT;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
- DRM_DEBUG_KMS("failed to allocate edid\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
return -ENOMEM;
}
@@ -339,7 +344,8 @@ static int vidi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(ctx->drm_dev, connector,
&vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to initialize connector with drm\n");
return ret;
}
@@ -402,7 +408,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
EXYNOS_DISPLAY_TYPE_VIDI, &vidi_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
- DRM_ERROR("failed to create crtc.\n");
+ DRM_DEV_ERROR(dev, "failed to create crtc.\n");
return PTR_ERR(ctx->crtc);
}
@@ -417,7 +423,8 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
ret = vidi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to create connector ret = %d\n",
+ ret);
drm_encoder_cleanup(encoder);
return ret;
}
@@ -441,13 +448,14 @@ static const struct component_ops vidi_component_ops = {
static int vidi_probe(struct platform_device *pdev)
{
struct vidi_context *ctx;
+ struct device *dev = &pdev->dev;
int ret;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->pdev = pdev;
+ ctx->dev = dev;
timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
@@ -455,20 +463,21 @@ static int vidi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ret = device_create_file(&pdev->dev, &dev_attr_connection);
+ ret = device_create_file(dev, &dev_attr_connection);
if (ret < 0) {
- DRM_ERROR("failed to create connection sysfs.\n");
+ DRM_DEV_ERROR(dev,
+ "failed to create connection sysfs.\n");
return ret;
}
- ret = component_add(&pdev->dev, &vidi_component_ops);
+ ret = component_add(dev, &vidi_component_ops);
if (ret)
goto err_remove_file;
return ret;
err_remove_file:
- device_remove_file(&pdev->dev, &dev_attr_connection);
+ device_remove_file(dev, &dev_attr_connection);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 8e2c02fc66e8..19c252f659dd 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -885,9 +885,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
return -ENODEV;
hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
- DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
- (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
- edid->width_cm, edid->height_cm);
+ DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ edid->width_cm, edid->height_cm);
drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
@@ -908,7 +908,8 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
if (confs->data[i].pixel_clock == pixel_clock)
return i;
- DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
+ DRM_DEV_DEBUG_KMS(hdata->dev, "Could not find phy config for %d\n",
+ pixel_clock);
return -EINVAL;
}
@@ -918,10 +919,11 @@ static int hdmi_mode_valid(struct drm_connector *connector,
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
- mode->hdisplay, mode->vdisplay, mode->vrefresh,
- (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
- false, mode->clock * 1000);
+ DRM_DEV_DEBUG_KMS(hdata->dev,
+ "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
+ mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
+ false, mode->clock * 1000);
ret = hdmi_find_phy_conf(hdata, mode->clock * 1000);
if (ret < 0)
@@ -947,7 +949,8 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(hdata->drm_dev, connector,
&hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "Failed to initialize connector with drm\n");
return ret;
}
@@ -957,7 +960,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
if (hdata->bridge) {
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
if (ret)
- DRM_ERROR("Failed to attach bridge\n");
+ DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
return ret;
@@ -1002,8 +1005,10 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
- DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
+ DRM_DEV_DEBUG_KMS(dev->dev,
+ "Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+ m->hdisplay, m->vdisplay,
+ m->vrefresh);
drm_mode_copy(adjusted_mode, m);
break;
@@ -1169,13 +1174,15 @@ static void hdmiphy_wait_for_pll(struct hdmi_context *hdata)
u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
if (val & HDMI_PHY_STATUS_READY) {
- DRM_DEBUG_KMS("PLL stabilized after %d tries\n", tries);
+ DRM_DEV_DEBUG_KMS(hdata->dev,
+ "PLL stabilized after %d tries\n",
+ tries);
return;
}
usleep_range(10, 20);
}
- DRM_ERROR("PLL could not reach steady state\n");
+ DRM_DEV_ERROR(hdata->dev, "PLL could not reach steady state\n");
}
static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
@@ -1411,7 +1418,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
ret = hdmi_find_phy_conf(hdata, m->clock * 1000);
if (ret < 0) {
- DRM_ERROR("failed to find hdmiphy conf\n");
+ DRM_DEV_ERROR(hdata->dev, "failed to find hdmiphy conf\n");
return;
}
phy_conf = hdata->drv_data->phy_confs.data[ret].conf;
@@ -1423,7 +1430,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
hdmiphy_enable_mode_set(hdata, true);
ret = hdmiphy_reg_write_buf(hdata, 0, phy_conf, 32);
if (ret) {
- DRM_ERROR("failed to configure hdmiphy\n");
+ DRM_DEV_ERROR(hdata->dev, "failed to configure hdmiphy\n");
return;
}
hdmiphy_enable_mode_set(hdata, false);
@@ -1460,7 +1467,8 @@ static void hdmiphy_enable(struct hdmi_context *hdata)
pm_runtime_get_sync(hdata->dev);
if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
- DRM_DEBUG_KMS("failed to enable regulator bulk\n");
+ DRM_DEV_DEBUG_KMS(hdata->dev,
+ "failed to enable regulator bulk\n");
regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
PMU_HDMI_PHY_ENABLE_BIT, 1);
@@ -1734,7 +1742,7 @@ static int hdmi_bridge_init(struct hdmi_context *hdata)
np = of_graph_get_remote_port_parent(ep);
of_node_put(ep);
if (!np) {
- DRM_ERROR("failed to get remote port parent");
+ DRM_DEV_ERROR(dev, "failed to get remote port parent");
return -EINVAL;
}
@@ -1752,17 +1760,17 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
struct device *dev = hdata->dev;
int i, ret;
- DRM_DEBUG_KMS("HDMI resource init\n");
+ DRM_DEV_DEBUG_KMS(dev, "HDMI resource init\n");
hdata->hpd_gpio = devm_gpiod_get(dev, "hpd", GPIOD_IN);
if (IS_ERR(hdata->hpd_gpio)) {
- DRM_ERROR("cannot get hpd gpio property\n");
+ DRM_DEV_ERROR(dev, "cannot get hpd gpio property\n");
return PTR_ERR(hdata->hpd_gpio);
}
hdata->irq = gpiod_to_irq(hdata->hpd_gpio);
if (hdata->irq < 0) {
- DRM_ERROR("failed to get GPIO irq\n");
+ DRM_DEV_ERROR(dev, "failed to get GPIO irq\n");
return hdata->irq;
}
@@ -1780,7 +1788,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_ERROR("failed to get regulators\n");
+ DRM_DEV_ERROR(dev, "failed to get regulators\n");
return ret;
}
@@ -1792,7 +1800,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
ret = regulator_enable(hdata->reg_hdmi_en);
if (ret) {
- DRM_ERROR("failed to enable hdmi-en regulator\n");
+ DRM_DEV_ERROR(dev,
+ "failed to enable hdmi-en regulator\n");
return ret;
}
}
@@ -1845,7 +1854,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
ret = hdmi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to create connector ret = %d\n",
+ ret);
drm_encoder_cleanup(encoder);
return ret;
}
@@ -1875,7 +1885,8 @@ static int hdmi_get_ddc_adapter(struct hdmi_context *hdata)
np = of_parse_phandle(hdata->dev->of_node, "ddc", 0);
if (!np) {
- DRM_ERROR("Failed to find ddc node in device tree\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "Failed to find ddc node in device tree\n");
return -ENODEV;
}
@@ -1902,7 +1913,8 @@ static int hdmi_get_phy_io(struct hdmi_context *hdata)
if (!np) {
np = of_parse_phandle(hdata->dev->of_node, "phy", 0);
if (!np) {
- DRM_ERROR("Failed to find hdmiphy node in device tree\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "Failed to find hdmiphy node in device tree\n");
return -ENODEV;
}
}
@@ -1910,7 +1922,8 @@ static int hdmi_get_phy_io(struct hdmi_context *hdata)
if (hdata->drv_data->is_apb_phy) {
hdata->regs_hdmiphy = of_iomap(np, 0);
if (!hdata->regs_hdmiphy) {
- DRM_ERROR("failed to ioremap hdmi phy\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "failed to ioremap hdmi phy\n");
ret = -ENOMEM;
goto out;
}
@@ -1951,7 +1964,7 @@ static int hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_ERROR("hdmi_resources_init failed\n");
+ DRM_DEV_ERROR(dev, "hdmi_resources_init failed\n");
return ret;
}
@@ -1977,14 +1990,14 @@ static int hdmi_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi", hdata);
if (ret) {
- DRM_ERROR("failed to register hdmi interrupt\n");
+ DRM_DEV_ERROR(dev, "failed to register hdmi interrupt\n");
goto err_hdmiphy;
}
hdata->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,syscon-phandle");
if (IS_ERR(hdata->pmureg)) {
- DRM_ERROR("syscon regmap lookup failed.\n");
+ DRM_DEV_ERROR(dev, "syscon regmap lookup failed.\n");
ret = -EPROBE_DEFER;
goto err_hdmiphy;
}
@@ -1993,7 +2006,7 @@ static int hdmi_probe(struct platform_device *pdev)
hdata->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,sysreg-phandle");
if (IS_ERR(hdata->sysreg)) {
- DRM_ERROR("sysreg regmap lookup failed.\n");
+ DRM_DEV_ERROR(dev, "sysreg regmap lookup failed.\n");
ret = -EPROBE_DEFER;
goto err_hdmiphy;
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index f35e4ab55b27..b8415e53964d 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -228,8 +228,8 @@ static void mixer_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
- DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32)readl(ctx->mixer_regs + reg_id)); \
+ DRM_DEV_DEBUG_KMS(ctx->dev, #reg_id " = %08x\n", \
+ (u32)readl(ctx->mixer_regs + reg_id)); \
} while (0)
DUMPREG(MXR_STATUS);
@@ -260,8 +260,8 @@ static void vp_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
- DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32) readl(ctx->vp_regs + reg_id)); \
+ DRM_DEV_DEBUG_KMS(ctx->dev, #reg_id " = %08x\n", \
+ (u32) readl(ctx->vp_regs + reg_id)); \
} while (0)
DUMPREG(VP_ENABLE);
@@ -885,7 +885,8 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
/* acquire resources: regs, irqs, clocks */
ret = mixer_resources_init(mixer_ctx);
if (ret) {
- DRM_ERROR("mixer_resources_init failed ret=%d\n", ret);
+ DRM_DEV_ERROR(mixer_ctx->dev,
+ "mixer_resources_init failed ret=%d\n", ret);
return ret;
}
@@ -893,7 +894,8 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
/* acquire vp resources: regs, irqs, clocks */
ret = vp_resources_init(mixer_ctx);
if (ret) {
- DRM_ERROR("vp_resources_init failed ret=%d\n", ret);
+ DRM_DEV_ERROR(mixer_ctx->dev,
+ "vp_resources_init failed ret=%d\n", ret);
return ret;
}
}
@@ -952,7 +954,7 @@ static void mixer_update_plane(struct exynos_drm_crtc *crtc,
{
struct mixer_context *mixer_ctx = crtc->ctx;
- DRM_DEBUG_KMS("win: %d\n", plane->index);
+ DRM_DEV_DEBUG_KMS(mixer_ctx->dev, "win: %d\n", plane->index);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
@@ -969,7 +971,7 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
struct mixer_context *mixer_ctx = crtc->ctx;
unsigned long flags;
- DRM_DEBUG_KMS("win: %d\n", plane->index);
+ DRM_DEV_DEBUG_KMS(mixer_ctx->dev, "win: %d\n", plane->index);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
@@ -1046,8 +1048,9 @@ static int mixer_mode_valid(struct exynos_drm_crtc *crtc,
struct mixer_context *ctx = crtc->ctx;
u32 w = mode->hdisplay, h = mode->vdisplay;
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n", w, h,
- mode->vrefresh, !!(mode->flags & DRM_MODE_FLAG_INTERLACE));
+ DRM_DEV_DEBUG_KMS(ctx->dev, "xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ w, h, mode->vrefresh,
+ !!(mode->flags & DRM_MODE_FLAG_INTERLACE));
if (ctx->mxr_ver == MXR_VER_128_0_0_184)
return MODE_OK;
@@ -1227,7 +1230,7 @@ static int mixer_probe(struct platform_device *pdev)
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
- DRM_ERROR("failed to alloc mixer context.\n");
+ DRM_DEV_ERROR(dev, "failed to alloc mixer context.\n");
return -ENOMEM;
}
@@ -1282,27 +1285,33 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev)
ret = clk_prepare_enable(ctx->mixer);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to prepare_enable the mixer clk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->hdmi);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the hdmi clk [%d]\n",
+ ret);
return ret;
}
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
ret = clk_prepare_enable(ctx->vp);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
- ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the vp clk [%d]\n",
+ ret);
return ret;
}
if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) {
ret = clk_prepare_enable(ctx->sclk_mixer);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the " \
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the " \
"sclk_mixer clk [%d]\n",
- ret);
+ ret);
return ret;
}
}
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index 14a72c4c496d..dc825883400d 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -2,7 +2,6 @@ config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index bf256971063d..83c841b50272 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -94,7 +94,7 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
drm_display_mode_to_videomode(mode, &vm);
/* INV_PXCK as default (most display sample data on rising edge) */
- if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE))
+ if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE))
pol |= DCU_SYN_POL_INV_PXCK;
if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index c934b3df1f81..a9d3a4a30ab8 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -389,7 +389,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
ret = PTR_ERR(info);
goto out;
}
- info->par = fbdev;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
@@ -402,9 +401,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
fbdev->psb_fb_helper.fb = fb;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- strcpy(info->fix.id, "psbdrmfb");
-
if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
info->fbops = &psbfb_ops;
else if (gtt_roll) { /* GTT rolling seems best */
@@ -427,8 +423,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
- drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
- sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 23dc3c5f8f0d..e8e6357f033b 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -34,7 +34,7 @@ struct psb_framebuffer {
};
struct psb_fbdev {
- struct drm_fb_helper psb_fb_helper;
+ struct drm_fb_helper psb_fb_helper; /* must be first */
struct psb_framebuffer pfb;
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 3c168ae77b0c..0a381c22de26 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -31,7 +31,7 @@ struct hibmc_framebuffer {
};
struct hibmc_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct hibmc_framebuffer *fb;
int size;
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index de9d7cc97e44..8026859aa07d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -116,8 +116,6 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
goto out_release_fbi;
}
- info->par = hi_fbdev;
-
hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
if (IS_ERR(hi_fbdev->fb)) {
ret = PTR_ERR(hi_fbdev->fb);
@@ -129,14 +127,9 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
priv->fbdev->size = size;
hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
- strcpy(info->fix.id, "hibmcdrmfb");
-
info->fbops = &hibmc_drm_fb_ops;
- drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
- hi_fbdev->fb->fb.format->depth);
- drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width,
- sizes->fb_height);
+ drm_fb_helper_fill_info(info, &priv->fbdev->helper, sizes);
info->screen_base = bo->kmap.virtual;
info->screen_size = size;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index d2cf7317930a..8c2f9b9cafb3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -33,17 +33,10 @@ static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *con
return MODE_OK;
}
-static struct drm_encoder *
-hibmc_connector_best_encoder(struct drm_connector *connector)
-{
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
.mode_valid = hibmc_connector_mode_valid,
- .best_encoder = hibmc_connector_best_encoder,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index dd383267884c..6093c421daff 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -21,8 +21,6 @@
#include "hibmc_drm_drv.h"
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static inline struct hibmc_drm_private *
hibmc_bdev(struct ttm_bo_device *bd)
{
@@ -191,7 +189,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
ret = ttm_bo_device_init(&hibmc->bdev,
&hibmc_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("error initializing bo driver: %d\n", ret);
@@ -322,14 +319,9 @@ int hibmc_bo_unpin(struct hibmc_bo *bo)
int hibmc_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct hibmc_drm_private *hibmc;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ struct drm_file *file_priv = filp->private_data;
+ struct hibmc_drm_private *hibmc = file_priv->minor->dev->dev_private;
- file_priv = filp->private_data;
- hibmc = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &hibmc->bdev);
}
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
new file mode 100644
index 000000000000..cff45d81f42f
--- /dev/null
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -0,0 +1 @@
+header_test_*.c
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 148be8e1a090..3d5f1cb6a76c 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -15,7 +15,6 @@ config DRM_I915
select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select BACKLIGHT_LCD_SUPPORT if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1787e1299b1b..fbcb0904f4a8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,10 +32,13 @@ CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
+# Extra header tests
+include $(src)/Makefile.header-test
+
# Please keep these build lists sorted!
# core driver code
-i915-y := i915_drv.o \
+i915-y += i915_drv.o \
i915_irq.o \
i915_memcpy.o \
i915_mm.o \
@@ -46,6 +49,7 @@ i915-y := i915_drv.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_sysfs.o \
+ i915_user_extensions.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \
@@ -77,6 +81,7 @@ i915-y += \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gemfs.o \
+ i915_globals.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
@@ -84,6 +89,7 @@ i915-y += \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
+ intel_context.o \
intel_engine_cs.o \
intel_hangcheck.o \
intel_lrc.o \
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
new file mode 100644
index 000000000000..c1c391816fa7
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile.header-test
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := \
+ i915_active_types.h \
+ i915_gem_context_types.h \
+ i915_priolist_types.h \
+ i915_scheduler_types.h \
+ i915_timeline_types.h \
+ intel_atomic_plane.h \
+ intel_audio.h \
+ intel_cdclk.h \
+ intel_color.h \
+ intel_connector.h \
+ intel_context_types.h \
+ intel_crt.h \
+ intel_csr.h \
+ intel_ddi.h \
+ intel_dp.h \
+ intel_dvo.h \
+ intel_engine_types.h \
+ intel_fbc.h \
+ intel_fbdev.h \
+ intel_frontbuffer.h \
+ intel_hdcp.h \
+ intel_hdmi.h \
+ intel_lspcon.h \
+ intel_lvds.h \
+ intel_panel.h \
+ intel_pipe_crc.h \
+ intel_pm.h \
+ intel_psr.h \
+ intel_sdvo.h \
+ intel_sprite.h \
+ intel_tv.h \
+ intel_workarounds_types.h
+
+quiet_cmd_header_test = HDRTEST $@
+ cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+ $(call cmd,header_test)
+
+i915-$(CONFIG_DRM_I915_WERROR) += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 271fb46d4dd0..ea8324abc784 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -5,5 +5,5 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o page_track.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
+ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 3592d04c33b2..ab002cfd3cab 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -391,12 +391,12 @@ struct cmd_info {
#define F_POST_HANDLE (1<<2)
u32 flag;
-#define R_RCS (1 << RCS)
-#define R_VCS1 (1 << VCS)
-#define R_VCS2 (1 << VCS2)
+#define R_RCS BIT(RCS0)
+#define R_VCS1 BIT(VCS0)
+#define R_VCS2 BIT(VCS1)
#define R_VCS (R_VCS1 | R_VCS2)
-#define R_BCS (1 << BCS)
-#define R_VECS (1 << VECS)
+#define R_BCS BIT(BCS0)
+#define R_VECS BIT(VECS0)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
u16 rings;
@@ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = {
};
static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
- [RCS] = {
+ [RCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS] = {
+ [VCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [BCS] = {
+ [BCS0] = {
&decode_info_mi,
NULL,
&decode_info_2d,
@@ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VECS] = {
+ [VECS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS2] = {
+ [VCS1] = {
&decode_info_mi,
NULL,
NULL,
@@ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if ((opcode == e->info->opcode) &&
- (e->info->rings & (1 << ring_id)))
+ if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
return e->info;
}
return NULL;
@@ -943,15 +942,12 @@ static int cmd_handler_lri(struct parser_exec_state *s)
struct intel_gvt *gvt = s->vgpu->gvt;
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) &&
- (s->ring_id != RCS)) {
- if (s->ring_id == BCS &&
- cmd_reg(s, i) ==
- i915_mmio_reg_offset(DERRMR))
+ if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
+ if (s->ring_id == BCS0 &&
+ cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
- ret |= (cmd_reg_inhibit(s, i)) ?
- -EBADRQC : 0;
+ ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
}
if (ret)
break;
@@ -1047,27 +1043,27 @@ struct cmd_interrupt_event {
};
static struct cmd_interrupt_event cmd_interrupt_events[] = {
- [RCS] = {
+ [RCS0] = {
.pipe_control_notify = RCS_PIPE_CONTROL,
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
},
- [BCS] = {
+ [BCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = BCS_MI_FLUSH_DW,
.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
},
- [VCS] = {
+ [VCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS_MI_FLUSH_DW,
.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
},
- [VCS2] = {
+ [VCS1] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS2_MI_FLUSH_DW,
.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
},
- [VECS] = {
+ [VECS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VECS_MI_FLUSH_DW,
.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
@@ -1081,6 +1077,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
bool index_mode = false;
unsigned int post_sync;
int ret = 0;
+ u32 hws_pga, val;
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
@@ -1104,6 +1101,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
index_mode = true;
ret |= cmd_address_audit(s, gma, sizeof(u64),
index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 2), gma);
+ val = cmd_val(s, 1) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 1), val);
+ }
}
}
}
@@ -1321,8 +1327,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
info->tile_val << 10);
}
- vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
- intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ if (info->plane == PLANE_PRIMARY)
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
+
+ if (info->async_flip)
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ else
+ set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
+
return 0;
}
@@ -1567,6 +1579,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
unsigned long gma;
bool index_mode = false;
int ret = 0;
+ u32 hws_pga, val;
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
@@ -1577,6 +1590,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (cmd_val(s, 0) & (1 << 21))
index_mode = true;
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 1), gma);
+ val = cmd_val(s, 0) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 0), val);
+ }
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2ec89bcb59f1..8a9606f91e68 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -196,9 +196,9 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
{
struct dentry *ent;
- char name[10] = "";
+ char name[16] = "";
- sprintf(name, "vgpu%d", vgpu->id);
+ snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
if (!vgpu->debugfs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 035479e273be..e1c313da6c00 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe))
continue;
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
@@ -448,7 +447,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
/**
* intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
* @vgpu: a vGPU
- * @conncted: link state
+ * @connected: link state
*
* This function is used to trigger hotplug interrupt for vGPU
*
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 3e7e2b80c857..41c8ebc60c63 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -45,6 +45,7 @@ static int vgpu_gem_get_pages(
int i, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
+ u32 page_num;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
if (WARN_ON(!fb_info))
@@ -54,14 +55,15 @@ static int vgpu_gem_get_pages(
if (unlikely(!st))
return -ENOMEM;
- ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
+ page_num = obj->base.size >> PAGE_SHIFT;
+ ret = sg_alloc_table(st, page_num, GFP_KERNEL);
if (ret) {
kfree(st);
return ret;
}
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
- for_each_sg(st->sgl, sg, fb_info->size, i) {
+ for_each_sg(st->sgl, sg, page_num, i) {
sg->offset = 0;
sg->length = PAGE_SIZE;
sg_dma_address(sg) =
@@ -153,12 +155,12 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
drm_gem_private_object_init(dev, &obj->base,
- info->size << PAGE_SHIFT);
+ roundup(info->size, PAGE_SIZE));
i915_gem_object_init(obj, &intel_vgpu_gem_ops);
obj->read_domains = I915_GEM_DOMAIN_GTT;
@@ -206,10 +208,11 @@ static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu_fb_info *info,
int plane_id)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_vgpu_primary_plane_format p;
struct intel_vgpu_cursor_plane_format c;
- int ret;
+ int ret, tile_height = 1;
+
+ memset(info, 0, sizeof(*info));
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
@@ -228,19 +231,19 @@ static int vgpu_get_plane_info(struct drm_device *dev,
break;
case PLANE_CTL_TILED_X:
info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+ tile_height = 8;
break;
case PLANE_CTL_TILED_Y:
info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+ tile_height = 32;
break;
case PLANE_CTL_TILED_YF:
info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+ tile_height = 32;
break;
default:
gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
}
-
- info->size = (((p.stride * p.height * p.bpp) / 8) +
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
if (ret)
@@ -262,14 +265,12 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->x_hot = UINT_MAX;
info->y_hot = UINT_MAX;
}
-
- info->size = (((info->stride * c.height * c.bpp) / 8)
- + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
gvt_vgpu_err("invalid plane id:%d\n", plane_id);
return -EINVAL;
}
+ info->size = info->stride * roundup(info->height, tile_height);
if (info->size == 0) {
gvt_vgpu_err("fb size is zero\n");
return -EINVAL;
@@ -279,11 +280,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
return -EFAULT;
}
- if (((info->start >> PAGE_SHIFT) + info->size) >
- ggtt_total_entries(&dev_priv->ggtt)) {
- gvt_vgpu_err("Invalid GTT offset or size\n");
- return -EFAULT;
- }
if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
gvt_vgpu_err("invalid gma addr\n");
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 70494e394d2c..f21b8fb5b37e 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -47,17 +47,16 @@
((a)->lrca == (b)->lrca))
static int context_switch_events[] = {
- [RCS] = RCS_AS_CONTEXT_SWITCH,
- [BCS] = BCS_AS_CONTEXT_SWITCH,
- [VCS] = VCS_AS_CONTEXT_SWITCH,
- [VCS2] = VCS2_AS_CONTEXT_SWITCH,
- [VECS] = VECS_AS_CONTEXT_SWITCH,
+ [RCS0] = RCS_AS_CONTEXT_SWITCH,
+ [BCS0] = BCS_AS_CONTEXT_SWITCH,
+ [VCS0] = VCS_AS_CONTEXT_SWITCH,
+ [VCS1] = VCS2_AS_CONTEXT_SWITCH,
+ [VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(int ring_id)
+static int ring_id_to_context_switch_event(unsigned int ring_id)
{
- if (WARN_ON(ring_id < RCS ||
- ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
@@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
+ if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
goto out;
if (!list_empty(workload_q_head(vgpu, ring_id))) {
@@ -527,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
-static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
+static void clean_execlist(struct intel_vgpu *vgpu,
+ intel_engine_mask_t engine_mask)
{
- unsigned int tmp;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
@@ -542,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
}
static void reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
static int init_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
reset_execlist(vgpu, engine_mask);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 714d709829a2..5ccc2c695848 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index d7052ab7908c..08c74e65836b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
{
- struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_vgpu_ppgtt_spt *spt, *spn;
struct radix_tree_iter iter;
- void **slot;
+ LIST_HEAD(all_spt);
+ void __rcu **slot;
+ rcu_read_lock();
radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
spt = radix_tree_deref_slot(slot);
- ppgtt_free_spt(spt);
+ list_move(&spt->post_shadow_list, &all_spt);
}
+ rcu_read_unlock();
+
+ list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
+ ppgtt_free_spt(spt);
}
static int ppgtt_handle_guest_write_page_table_bytes(
@@ -805,7 +811,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -855,7 +861,7 @@ err_free_spt:
/* Allocate shadow page table associated with specific gfn. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
unsigned long gfn, bool guest_pde_ips)
{
struct intel_vgpu_ppgtt_spt *spt;
@@ -930,7 +936,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
- intel_gvt_gtt_type_t cur_pt_type;
+ enum intel_gvt_gtt_type cur_pt_type;
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
@@ -1070,6 +1076,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
} else {
int type = get_next_pt_type(we->type);
+ if (!gtt_type_is_pt(type))
+ goto err;
+
spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
@@ -1849,7 +1858,7 @@ static void vgpu_free_mm(struct intel_vgpu_mm *mm)
* Zero on success, negative error code in pointer if failed.
*/
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_mm *mm;
@@ -1946,7 +1955,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
*/
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
{
- atomic_dec(&mm->pincount);
+ atomic_dec_if_positive(&mm->pincount);
}
/**
@@ -2303,7 +2312,7 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
}
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t type)
+ enum intel_gvt_gtt_type type)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
@@ -2498,6 +2507,7 @@ static void clean_spt_oos(struct intel_gvt *gvt)
list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
list_del(&oos_page->list);
+ free_page((unsigned long)oos_page->mem);
kfree(oos_page);
}
}
@@ -2518,6 +2528,12 @@ static int setup_spt_oos(struct intel_gvt *gvt)
ret = -ENOMEM;
goto fail;
}
+ oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
+ if (!oos_page->mem) {
+ ret = -ENOMEM;
+ kfree(oos_page);
+ goto fail;
+ }
INIT_LIST_HEAD(&oos_page->list);
INIT_LIST_HEAD(&oos_page->vm_list);
@@ -2581,7 +2597,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
* Zero on success, negative error code if failed.
*/
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_vgpu_mm *mm;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index edb610dc5d86..42d0394f0de2 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -95,8 +95,8 @@ struct intel_gvt_gtt {
unsigned long scratch_mfn;
};
-typedef enum {
- GTT_TYPE_INVALID = -1,
+enum intel_gvt_gtt_type {
+ GTT_TYPE_INVALID = 0,
GTT_TYPE_GGTT_PTE,
@@ -124,7 +124,7 @@ typedef enum {
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_MAX,
-} intel_gvt_gtt_type_t;
+};
enum intel_gvt_mm_type {
INTEL_GVT_MM_GGTT,
@@ -148,7 +148,7 @@ struct intel_vgpu_mm {
union {
struct {
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
/*
* The 4 PDPs in ring context. For 48bit addressing,
* only PDP0 is valid and point to PML4. For 32it
@@ -169,7 +169,7 @@ struct intel_vgpu_mm {
};
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
{
@@ -222,7 +222,7 @@ struct intel_vgpu_oos_page {
struct list_head list;
struct list_head vm_list;
int id;
- unsigned char mem[I915_GTT_PAGE_SIZE];
+ void *mem;
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
@@ -233,7 +233,7 @@ struct intel_vgpu_ppgtt_spt {
struct intel_vgpu *vgpu;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
void *vaddr;
struct page *page;
@@ -241,7 +241,7 @@ struct intel_vgpu_ppgtt_spt {
} shadow_page;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
unsigned long gfn;
unsigned long write_cnt;
@@ -267,7 +267,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
u64 pdps[]);
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 8bce09de4b82..f5a328b5290a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -94,7 +94,6 @@ struct intel_vgpu_fence {
struct intel_vgpu_mmio {
void *vreg;
- void *sreg;
};
#define INTEL_GVT_MAX_BAR_NUM 4
@@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space {
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
-#define INTEL_GVT_MAX_PIPE 4
-
struct intel_vgpu_irq {
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
- DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+ DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
INTEL_GVT_EVENT_MAX);
};
@@ -144,9 +141,9 @@ enum {
struct intel_vgpu_submission_ops {
const char *name;
- int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+ int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
};
struct intel_vgpu_submission {
@@ -449,10 +446,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
#define vgpu_vreg64(vgpu, offset) \
(*(u64 *)(vgpu->mmio.vreg + (offset)))
-#define vgpu_sreg_t(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
-#define vgpu_sreg(vgpu, offset) \
- (*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
@@ -488,7 +481,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bc64b810e0d5..90673fca792f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int engine_mask = 0;
+ intel_engine_mask_t engine_mask = 0;
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
@@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else {
if (data & GEN6_GRDOM_RENDER) {
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
- engine_mask |= (1 << RCS);
+ engine_mask |= BIT(RCS0);
}
if (data & GEN6_GRDOM_MEDIA) {
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
- engine_mask |= (1 << VCS);
+ engine_mask |= BIT(VCS0);
}
if (data & GEN6_GRDOM_BLT) {
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
- engine_mask |= (1 << BCS);
+ engine_mask |= BIT(BCS0);
}
if (data & GEN6_GRDOM_VECS) {
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
- engine_mask |= (1 << VECS);
+ engine_mask |= BIT(VECS0);
}
if (data & GEN8_GRDOM_MEDIA2) {
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
- if (HAS_BSD2(vgpu->gvt->dev_priv))
- engine_mask |= (1 << VCS2);
+ engine_mask |= BIT(VCS1);
}
+ engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -750,18 +750,19 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- unsigned int index = DSPSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = DSPSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = PRIMARY_A_FLIP_DONE,
- [PIPE_B] = PRIMARY_B_FLIP_DONE,
- [PIPE_C] = PRIMARY_C_FLIP_DONE,
- };
+ u32 pipe = DSPSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+
+ if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -771,18 +772,42 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int index = SPRSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = SPRSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = SPRITE_A_FLIP_DONE,
- [PIPE_B] = SPRITE_B_FLIP_DONE,
- [PIPE_C] = SPRITE_C_FLIP_DONE,
- };
+ u32 pipe = SPRSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
+
+ return 0;
+}
+
+static int reg50080_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum pipe pipe = REG_50080_TO_PIPE(offset);
+ enum plane_id plane = REG_50080_TO_PLANE(offset);
+ int event = SKL_FLIP_EVENT(pipe, plane);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ if (plane == PLANE_PRIMARY) {
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ } else {
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ }
+
+ if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -1181,7 +1206,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{
- intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
struct intel_vgpu_mm *mm;
u64 *pdps;
@@ -1704,7 +1729,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
- ENGINE_MASK(ring_id),
+ BIT(ring_id),
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -1724,19 +1749,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
switch (offset) {
case 0x4260:
- id = RCS;
+ id = RCS0;
break;
case 0x4264:
- id = VCS;
+ id = VCS0;
break;
case 0x4268:
- id = VCS2;
+ id = VCS1;
break;
case 0x426c:
- id = BCS;
+ id = BCS0;
break;
case 0x4270:
- id = VECS;
+ id = VECS0;
break;
default:
return -EINVAL;
@@ -1793,7 +1818,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
- if (HAS_BSD2(dev_priv)) \
+ if (HAS_ENGINE(dev_priv, VCS1)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
@@ -1848,7 +1873,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
- MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
@@ -1969,6 +1994,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_B), D_ALL);
MMIO_D(DSPADDR(PIPE_B), D_ALL);
@@ -1978,6 +2005,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_C), D_ALL);
MMIO_D(DSPADDR(PIPE_C), D_ALL);
@@ -1987,6 +2016,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_A), D_ALL);
MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
@@ -2000,6 +2031,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_A), D_ALL);
MMIO_D(SPRSCALE(PIPE_A), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_B), D_ALL);
MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
@@ -2013,6 +2046,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_B), D_ALL);
MMIO_D(SPRSCALE(PIPE_B), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_C), D_ALL);
MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
@@ -2026,6 +2061,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_C), D_ALL);
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
@@ -2827,26 +2864,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
- MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
+ MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
- MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
+ MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
+ MMIO_D(DC_STATE_EN, D_SKL_PLUS);
+ MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
+ MMIO_D(CDCLK_CTL, D_SKL_PLUS);
+ MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
+ MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2965,40 +3002,41 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
- MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
+ MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
+ MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
+ MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
- MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
+ MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
- MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
+ MMIO_D(SKL_DFSM, D_SKL_PLUS);
+ MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
- MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
/* TRTT */
- MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+ MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
NULL, gen9_trtte_write);
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
@@ -3011,7 +3049,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+ MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
@@ -3042,8 +3080,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
- MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
+ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
return 0;
}
@@ -3489,12 +3527,11 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
return mmio_info->read(vgpu, offset, pdata, bytes);
else {
u64 ro_mask = mmio_info->ro_mask;
- u32 old_vreg = 0, old_sreg = 0;
+ u32 old_vreg = 0;
u64 data = 0;
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
- old_sreg = vgpu_sreg(vgpu, offset);
}
if (likely(!ro_mask))
@@ -3516,8 +3553,6 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
- vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
- | (vgpu_sreg(vgpu, offset) & mask);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 67125c5eec6e..951681813230 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -536,7 +536,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_BSD2(gvt->dev_priv)) {
+ if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index d5fcc447d22f..a68addf95c23 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
void *buf, unsigned long count, bool is_write)
{
- void *aperture_va;
+ void __iomem *aperture_va;
if (!intel_vgpu_in_aperture(vgpu, off) ||
!intel_vgpu_in_aperture(vgpu, off + count)) {
@@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return -EIO;
if (is_write)
- memcpy(aperture_va + offset_in_page(off), buf, count);
+ memcpy_toio(aperture_va + offset_in_page(off), buf, count);
else
- memcpy(buf, aperture_va + offset_in_page(off), count);
+ memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
io_mapping_unmap(aperture_va);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index ed4df2f6d60b..a55178884d67 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
if (dmlr) {
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
- memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
@@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
* touched
*/
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
- memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
}
}
@@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
- vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
+ vgpu->mmio.vreg = vzalloc(info->mmio_size);
if (!vgpu->mmio.vreg)
return -ENOMEM;
- vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
intel_vgpu_reset_mmio(vgpu, true);
return 0;
@@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
- vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+ vgpu->mmio.vreg = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7902fb162d09..edf6d646eb25 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -41,103 +41,103 @@
/* Raw offset is appened to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
- {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
- {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
- {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
- {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
- {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
- {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
- {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
- {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
- {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
- {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
- {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
- {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
- {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
- {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
- {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
- {RCS, TRVADR, 0, false}, /* 0x4df0 */
- {RCS, TRTTE, 0, false}, /* 0x4df4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
-
- {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
-
- {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
-
- {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
- {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
-
- {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
- {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
- {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
-
- {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
- {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
- {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
+ {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
+ {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
+ {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
+ {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
+ {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
+ {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
+ {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
+ {RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
+ {RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
+ {RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */
+ {RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */
+ {RCS0, TRVADR, 0, false}, /* 0x4df0 */
+ {RCS0, TRTTE, 0, false}, /* 0x4df4 */
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+
+ {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
+ {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
+
+ {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
+ {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
+
+ {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
+ {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+ {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
+
+ {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
+ {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+ {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct {
@@ -150,11 +150,11 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
{
i915_reg_t offset;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int ring_id, i;
@@ -302,7 +302,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
goto out;
/* no MOCS register in context except render engine */
- if (req->engine->id != RCS)
+ if (req->engine->id != RCS0)
goto out;
ret = restore_render_mocs_control_for_inhibit(vgpu, req);
@@ -328,15 +328,16 @@ out:
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
- [RCS] = 0x4260,
- [VCS] = 0x4264,
- [VCS2] = 0x4268,
- [BCS] = 0x426c,
- [VECS] = 0x4270,
+ [RCS0] = 0x4260,
+ [VCS0] = 0x4264,
+ [VCS1] = 0x4268,
+ [BCS0] = 0x426c,
+ [VECS0] = 0x4270,
};
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
@@ -352,21 +353,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
* otherwise device can go to RC6 state and interrupt invalidation
* process
*/
- fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
+ if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
fw |= FORCEWAKE_RENDER;
- intel_uncore_forcewake_get(dev_priv, fw);
+ intel_uncore_forcewake_get(uncore, fw);
- I915_WRITE_FW(reg, 0x1);
+ intel_uncore_write_fw(uncore, reg, 0x1);
- if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else
vgpu_vreg_t(vgpu, reg) = 0;
- intel_uncore_forcewake_put(dev_priv, fw);
+ intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
@@ -379,11 +380,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
u32 old_v, new_v;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int i;
@@ -391,8 +392,10 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
- || IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
+ if (ring_id == RCS0 &&
+ (IS_KABYLAKE(dev_priv) ||
+ IS_BROXTON(dev_priv) ||
+ IS_COFFEELAKE(dev_priv)))
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -415,7 +418,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
offset.reg += 4;
}
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@@ -493,7 +496,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
+ !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
+ dev_priv->engine[ring_id])))
continue;
if (mmio->mask)
@@ -550,9 +554,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 428d252344f1..33aaa14bfdde 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -60,6 +60,37 @@
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe))
+
+#define PLANE_CTL_ASYNC_FLIP (1 << 9)
+#define REG50080_FLIP_TYPE_MASK 0x3
+#define REG50080_FLIP_TYPE_ASYNC 0x1
+
+#define REG_50080(_pipe, _plane) ({ \
+ typeof(_pipe) (p) = (_pipe); \
+ typeof(_plane) (q) = (_plane); \
+ (((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
+ (_MMIO(0x50090))) : \
+ (((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
+ (_MMIO(0x50098))) : \
+ (((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
+ (_MMIO(0x5009C))) : \
+ (_MMIO(0x50080))))); })
+
+#define REG_50080_TO_PIPE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50090) ? (PIPE_A) : \
+ (((reg) == 0x50088 || (reg) == 0x50098) ? (PIPE_B) : \
+ (((reg) == 0x5008C || (reg) == 0x5009C) ? (PIPE_C) : \
+ (INVALID_PIPE)))); })
+
+#define REG_50080_TO_PLANE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50088 || (reg) == 0x5008C) ? \
+ (PLANE_PRIMARY) : \
+ (((reg) == 0x50090 || (reg) == 0x50098 || (reg) == 0x5009C) ? \
+ (PLANE_SPRITE0) : (I915_MAX_PLANES))); })
+
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 159192c097cc..7c99bbc3e2b8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (workload->ring_id != RCS)
+ if (workload->ring_id != RCS0)
return;
if (save) {
@@ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@@ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
context_page_num = 19;
i = 2;
@@ -434,8 +434,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (ret)
goto err_unpin;
- if ((workload->ring_id == RCS) &&
- (workload->wa_ctx.indirect_ctx.size != 0)) {
+ if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@@ -803,7 +802,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -851,13 +850,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
@@ -903,8 +902,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- if (!workload->status && !(vgpu->resetting_eng &
- ENGINE_MASK(ring_id))) {
+ if (!workload->status &&
+ !(vgpu->resetting_eng & BIT(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
@@ -927,7 +926,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
list_del_init(&workload->list);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
@@ -941,7 +940,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
}
workload->complete(workload);
@@ -1001,7 +1000,7 @@ static int workload_thread(void *priv)
workload->ring_id, workload);
if (need_force_wake)
- intel_uncore_forcewake_get(gvt->dev_priv,
+ intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
ret = dispatch_workload(workload);
@@ -1023,7 +1022,7 @@ complete:
complete_current_workload(gvt, ring_id);
if (need_force_wake)
- intel_uncore_forcewake_put(gvt->dev_priv,
+ intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
intel_runtime_pm_put_unchecked(gvt->dev_priv);
@@ -1114,9 +1113,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
- else {
+ } else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
px_dma(i915_ppgtt->pdp.page_directory[i]) =
s->i915_context_pdps[i];
@@ -1150,7 +1149,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
*
*/
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1167,7 +1166,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm))
s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
@@ -1240,7 +1239,7 @@ out_shadow_ctx:
*
*/
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1344,7 +1343,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
u64 pdps[GVT_RING_CTX_NR_PDPS];
switch (desc->addressing_mode) {
@@ -1450,7 +1449,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start;
workload->rb_ctl = ctl;
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1486,8 +1485,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_runtime_pm_put_unchecked(dev_priv);
}
- if (ret && (vgpu_is_vm_unhealthy(ret))) {
- enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+ if (ret) {
+ if (vgpu_is_vm_unhealthy(ret))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
intel_vgpu_destroy_workload(workload);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0635b2c4bed7..90c6756f5453 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,12 +142,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface);
extern const struct intel_vgpu_submission_ops
@@ -160,6 +160,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 720e2b10adaa..44ce3c2b9ac1 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
- vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
@@ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* GPU engines. For FLR, engine_mask is ignored.
*/
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
+ intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index db7bb5bd5add..863ae12707ba 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_active.h"
+#include "i915_globals.h"
#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
@@ -17,6 +18,7 @@
* nodes from a local slab cache to hopefully reduce the fragmentation.
*/
static struct i915_global_active {
+ struct i915_global base;
struct kmem_cache *slab_cache;
} global;
@@ -285,16 +287,27 @@ void i915_active_retire_noop(struct i915_active_request *active,
#include "selftests/i915_active.c"
#endif
+static void i915_global_active_shrink(void)
+{
+ kmem_cache_shrink(global.slab_cache);
+}
+
+static void i915_global_active_exit(void)
+{
+ kmem_cache_destroy(global.slab_cache);
+}
+
+static struct i915_global_active global = { {
+ .shrink = i915_global_active_shrink,
+ .exit = i915_global_active_exit,
+} };
+
int __init i915_global_active_init(void)
{
global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
if (!global.slab_cache)
return -ENOMEM;
+ i915_global_register(&global.base);
return 0;
}
-
-void __exit i915_global_active_exit(void)
-{
- kmem_cache_destroy(global.slab_cache);
-}
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 12b5c1d287d1..7d758719ce39 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -108,19 +108,6 @@ i915_active_request_set_retire_fn(struct i915_active_request *active,
active->retire = fn ?: i915_active_retire_noop;
}
-static inline struct i915_request *
-__i915_active_request_peek(const struct i915_active_request *active)
-{
- /*
- * Inside the error capture (running with the driver in an unknown
- * state), we want to bend the rules slightly (a lot).
- *
- * Work is in progress to make it safer, in the meantime this keeps
- * the known issue from spamming the logs.
- */
- return rcu_dereference_protected(active->request, 1);
-}
-
/**
* i915_active_request_raw - return the active request
* @active - the active tracker
@@ -419,7 +406,4 @@ void i915_active_fini(struct i915_active *ref);
static inline void i915_active_fini(struct i915_active *ref) { }
#endif
-int i915_global_active_init(void);
-void i915_global_active_exit(void);
-
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 33e8eed64423..503d548a55f7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -868,8 +868,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
if (!IS_GEN(engine->i915, 7))
return;
- switch (engine->id) {
- case RCS:
+ switch (engine->class) {
+ case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
@@ -889,12 +889,12 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
- case VCS:
+ case VIDEO_DECODE_CLASS:
cmd_tables = gen7_video_cmds;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
- case BCS:
+ case COPY_ENGINE_CLASS:
if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
@@ -913,14 +913,14 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
- case VECS:
+ case VIDEO_ENHANCEMENT_CLASS:
cmd_tables = hsw_vebox_cmds;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
- MISSING_CASE(engine->id);
+ MISSING_CASE(engine->class);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bd890c04fe4..5823ffb17821 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -26,14 +26,21 @@
*
*/
-#include <linux/sort.h>
#include <linux/sched/mm.h>
+#include <linux/sort.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
-#include "intel_drv.h"
-#include "intel_guc_submission.h"
#include "i915_reset.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_guc_submission.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -388,12 +395,9 @@ static void print_context_stats(struct seq_file *m,
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &i915->contexts.list, link) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
+ list_for_each_entry(ce, &ctx->active_engines, active_link) {
if (ce->state)
per_file_stats(0, ce->state->obj, &kstats);
if (ce->ring)
@@ -412,9 +416,8 @@ static void print_context_stats(struct seq_file *m,
rcu_read_lock();
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
- snprintf(name, sizeof(name), "%s/%d",
- task ? task->comm : "<unknown>",
- ctx->user_handle);
+ snprintf(name, sizeof(name), "%s",
+ task ? task->comm : "<unknown>");
rcu_read_unlock();
print_file_stats(m, name, stats);
@@ -830,11 +833,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
} else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
- I915_READ(IER));
+ I915_READ(GEN2_IER));
seq_printf(m, "Interrupt identity: %08x\n",
- I915_READ(IIR));
+ I915_READ(GEN2_IIR));
seq_printf(m, "Interrupt mask: %08x\n",
- I915_READ(IMR));
+ I915_READ(GEN2_IMR));
for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat: %08x\n",
pipe_name(pipe),
@@ -884,7 +887,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
for_each_engine(engine, dev_priv, id) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
- engine->name, I915_READ_IMR(engine));
+ engine->name, ENGINE_READ(engine, RING_IMR));
}
}
@@ -1097,7 +1100,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
/* RPSTAT1 is in the GT power well */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
if (INTEL_GEN(dev_priv) >= 9)
@@ -1125,7 +1128,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
cagf = intel_gpu_freq(dev_priv,
intel_get_cagf(dev_priv, rpstat));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
if (INTEL_GEN(dev_priv) >= 11) {
pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
@@ -1281,14 +1284,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_wakeref_t wakeref;
enum intel_engine_id id;
+ seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
- seq_puts(m, "Wedged\n");
+ seq_puts(m, "\tWedged\n");
if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
- seq_puts(m, "Reset in progress: struct_mutex backoff\n");
- if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
- seq_puts(m, "Waiter holding struct mutex\n");
- if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
- seq_puts(m, "struct_mutex blocked for reset\n");
+ seq_puts(m, "\tDevice (global) reset in progress\n");
if (!i915_modparams.enable_hangcheck) {
seq_puts(m, "Hangcheck disabled\n");
@@ -1298,10 +1298,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
with_intel_runtime_pm(dev_priv, wakeref) {
for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
- seqno[id] = intel_engine_get_seqno(engine);
+ seqno[id] = intel_engine_get_hangcheck_seqno(engine);
}
- intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
+ intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
}
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
@@ -1318,8 +1318,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
- engine->hangcheck.seqno, seqno[id],
- intel_engine_last_submit(engine),
+ engine->hangcheck.last_seqno,
+ seqno[id],
+ engine->hangcheck.next_seqno,
jiffies_to_msecs(jiffies -
engine->hangcheck.action_timestamp));
@@ -1327,7 +1328,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
- if (engine->id == RCS) {
+ if (engine->id == RCS0) {
seq_puts(m, "\tinstdone read =\n");
i915_instdone_info(dev_priv, m, &instdone);
@@ -1419,13 +1420,14 @@ static int ironlake_drpc_info(struct seq_file *m)
static int i915_forcewake_domains(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_uncore *uncore = &i915->uncore;
struct intel_uncore_forcewake_domain *fw_domain;
unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n",
- i915->uncore.user_forcewake.count);
+ uncore->user_forcewake.count);
- for_each_fw_domain(fw_domain, i915, tmp)
+ for_each_fw_domain(fw_domain, uncore, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
READ_ONCE(fw_domain->wake_count));
@@ -1882,9 +1884,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
- enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1892,6 +1892,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ struct intel_context *ce;
+
seq_puts(m, "HW context ");
if (!list_empty(&ctx->hw_id_link))
seq_printf(m, "%x [pin %u]", ctx->hw_id,
@@ -1914,11 +1916,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv, id) {
- struct intel_context *ce =
- to_intel_context(ctx, engine);
-
- seq_printf(m, "%s: ", engine->name);
+ list_for_each_entry(ce, &ctx->active_engines, active_link) {
+ seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
describe_obj(m, ce->state->obj);
if (ce->ring)
@@ -2023,11 +2022,9 @@ static const char *rps_power_to_str(unsigned int power)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
- struct drm_file *file;
with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -2061,22 +2058,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, rps->efficient_freq),
intel_gpu_freq(dev_priv, rps->boost_freq));
- mutex_lock(&dev->filelist_mutex);
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s [%d]: %d boosts\n",
- task ? task->comm : "<unknown>",
- task ? task->pid : -1,
- atomic_read(&file_priv->rps_client.boosts));
- rcu_read_unlock();
- }
- seq_printf(m, "Kernel (anonymous) boosts: %d\n",
- atomic_read(&rps->boosts));
- mutex_unlock(&dev->filelist_mutex);
+ seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
if (INTEL_GEN(dev_priv) >= 6 &&
rps->enabled &&
@@ -2084,12 +2066,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
u32 rpup, rpupei;
u32 rpdown, rpdownei;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
rps_power_to_str(rps->power.mode));
@@ -2112,8 +2094,8 @@ static int i915_llc(struct seq_file *m, void *data)
const bool edram = INTEL_GEN(dev_priv) > 8;
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
- seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
- intel_uncore_edram_size(dev_priv)/1024/1024);
+ seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
+ dev_priv->edram_size_mb);
return 0;
}
@@ -2270,7 +2252,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
const struct intel_guc *guc = &dev_priv->guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
struct intel_guc_client *client = guc->execbuf_client;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
int index;
if (!USES_GUC_SUBMISSION(dev_priv))
@@ -2607,7 +2589,6 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
- struct drm_modeset_acquire_ctx ctx;
intel_wakeref_t wakeref;
int ret;
@@ -2618,18 +2599,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
wakeref = intel_runtime_pm_get(dev_priv);
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
-retry:
- ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ ret = intel_psr_debug_set(dev_priv, val);
intel_runtime_pm_put(dev_priv, wakeref);
@@ -2686,8 +2656,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_printf(m, "Runtime power status: %s\n",
enableddisabled(!dev_priv->power_domains.wakeref));
- seq_printf(m, "GPU idle: %s (epoch %u)\n",
- yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@@ -2904,7 +2873,6 @@ static void intel_connector_info(struct seq_file *m,
if (connector->status == connector_status_disconnected)
return;
- seq_printf(m, "\tname: %s\n", connector->display_info.name);
seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
connector->display_info.width_mm,
connector->display_info.height_mm);
@@ -3123,8 +3091,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(dev_priv);
- seq_printf(m, "GT awake? %s (epoch %u)\n",
- yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
+ seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
seq_printf(m, "CS timestamp frequency: %u kHz\n",
@@ -3211,7 +3178,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+ const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
struct i915_wa *wa;
unsigned int i;
@@ -3865,11 +3832,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int
i915_wedged_get(void *data, u64 *val)
{
- struct drm_i915_private *dev_priv = data;
+ int ret = i915_terminally_wedged(data);
- *val = i915_terminally_wedged(&dev_priv->gpu_error);
-
- return 0;
+ switch (ret) {
+ case -EIO:
+ *val = 1;
+ return 0;
+ case 0:
+ *val = 0;
+ return 0;
+ default:
+ return ret;
+ }
}
static int
@@ -3877,16 +3851,9 @@ i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- /*
- * There is no safeguard against this debugfs entry colliding
- * with the hangcheck calling same i915_handle_error() in
- * parallel, causing an explosion. For now we assume that the
- * test harness is responsible enough not to inject gpu hangs
- * while it is writing to 'i915_wedged'
- */
-
- if (i915_reset_backoff(&i915->gpu_error))
- return -EAGAIN;
+ /* Flush any previous reset before applying for a new one */
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
i915_handle_error(i915, val, I915_ERROR_CAPTURE,
"Manually set wedged engine mask = %llx", val);
@@ -3927,12 +3894,9 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- intel_wakeref_t wakeref;
- int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
- wakeref = intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE &&
wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
@@ -3941,9 +3905,11 @@ i915_drop_caches_set(void *data, u64 val)
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
+ int ret;
+
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
- goto out;
+ return ret;
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(i915,
@@ -3957,7 +3923,7 @@ i915_drop_caches_set(void *data, u64 val)
mutex_unlock(&i915->drm.struct_mutex);
}
- if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
+ if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
i915_handle_error(i915, ALL_ENGINES, 0, NULL);
fs_reclaim_acquire(GFP_KERNEL);
@@ -3982,10 +3948,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
-out:
- intel_runtime_pm_put(i915, wakeref);
-
- return ret;
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
@@ -4293,7 +4256,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
return 0;
file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
- intel_uncore_forcewake_user_get(i915);
+ intel_uncore_forcewake_user_get(&i915->uncore);
return 0;
}
@@ -4305,7 +4268,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_GEN(i915) < 6)
return 0;
- intel_uncore_forcewake_user_put(i915);
+ intel_uncore_forcewake_user_put(&i915->uncore);
intel_runtime_pm_put(i915,
(intel_wakeref_t)(uintptr_t)file->private_data);
@@ -4830,7 +4793,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
&ctx);
if (ret) {
- ret = -EINTR;
+ if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+ try_again = true;
+ continue;
+ }
break;
}
crtc = connector->state->crtc;
@@ -4855,6 +4821,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
yesno(crtc_state->dsc_params.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ seq_printf(m, "Force_DSC_Enable: %s\n",
+ yesno(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
seq_printf(m, "FEC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9df65d386d11..1ad88e6d7c04 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -48,12 +48,19 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_trace.h"
#include "i915_pmu.h"
-#include "i915_reset.h"
#include "i915_query.h"
+#include "i915_reset.h"
+#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_audio.h"
+#include "intel_cdclk.h"
+#include "intel_csr.h"
+#include "intel_dp.h"
#include "intel_drv.h"
+#include "intel_fbdev.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
#include "intel_uc.h"
#include "intel_workarounds.h"
@@ -188,6 +195,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* CometPoint is CNP Compatible */
+ return PCH_CNP;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
@@ -219,20 +231,20 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN(dev_priv, 5))
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 5))
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -330,16 +342,16 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
- value = !!dev_priv->engine[VCS];
+ value = !!dev_priv->engine[VCS0];
break;
case I915_PARAM_HAS_BLT:
- value = !!dev_priv->engine[BCS];
+ value = !!dev_priv->engine[BCS0];
break;
case I915_PARAM_HAS_VEBOX:
- value = !!dev_priv->engine[VECS];
+ value = !!dev_priv->engine[VECS0];
break;
case I915_PARAM_HAS_BSD2:
- value = !!dev_priv->engine[VCS2];
+ value = !!dev_priv->engine[VCS1];
break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev_priv);
@@ -348,10 +360,10 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_WT(dev_priv);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
+ value = INTEL_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = 0;
+ value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@@ -714,8 +726,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
- if (i915_gem_suspend(dev_priv))
- DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+ i915_gem_suspend(dev_priv);
i915_gem_fini(dev_priv);
cleanup_modeset:
intel_modeset_cleanup(dev);
@@ -864,15 +875,19 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
+ intel_device_info_subplatform_init(dev_priv);
+
+ intel_uncore_init_early(&dev_priv->uncore);
+
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
- spin_lock_init(&dev_priv->uncore.lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
+ mutex_init(&dev_priv->hdcp_comp_mutex);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(dev_priv);
@@ -930,46 +945,6 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
i915_engines_cleanup(dev_priv);
}
-static int i915_mmio_setup(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- int mmio_bar;
- int mmio_size;
-
- mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
- /*
- * Before gen4, the registers and the GTT are behind different BARs.
- * However, from gen4 onwards, the registers and the GTT are shared
- * in the same BAR, so we want to restrict this ioremap from
- * clobbering the GTT which we want ioremap_wc instead. Fortunately,
- * the register BAR remains the same size for all the earlier
- * generations up to Ironlake.
- */
- if (INTEL_GEN(dev_priv) < 5)
- mmio_size = 512 * 1024;
- else
- mmio_size = 2 * 1024 * 1024;
- dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
- if (dev_priv->regs == NULL) {
- DRM_ERROR("failed to map registers\n");
-
- return -EIO;
- }
-
- /* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev_priv);
-
- return 0;
-}
-
-static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- intel_teardown_mchbar(dev_priv);
- pci_iounmap(pdev, dev_priv->regs);
-}
-
/**
* i915_driver_init_mmio - setup device MMIO
* @dev_priv: device private
@@ -989,15 +964,16 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
if (i915_get_bridge_dev(dev_priv))
return -EIO;
- ret = i915_mmio_setup(dev_priv);
+ ret = intel_uncore_init_mmio(&dev_priv->uncore);
if (ret < 0)
goto err_bridge;
- intel_uncore_init(dev_priv);
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev_priv);
intel_device_info_init_mmio(dev_priv);
- intel_uncore_prune(dev_priv);
+ intel_uncore_prune_mmio_domains(&dev_priv->uncore);
intel_uc_init_mmio(dev_priv);
@@ -1010,8 +986,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
return 0;
err_uncore:
- intel_uncore_fini(dev_priv);
- i915_mmio_cleanup(dev_priv);
+ intel_teardown_mchbar(dev_priv);
+ intel_uncore_fini_mmio(&dev_priv->uncore);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@@ -1024,8 +1000,8 @@ err_bridge:
*/
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
- intel_uncore_fini(dev_priv);
- i915_mmio_cleanup(dev_priv);
+ intel_teardown_mchbar(dev_priv);
+ intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
}
@@ -1034,110 +1010,180 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
-static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank)
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
{
- if (size == 0)
- return I915_DRAM_RANK_INVALID;
- if (rank == SKL_DRAM_RANK_SINGLE)
- return I915_DRAM_RANK_SINGLE;
- else if (rank == SKL_DRAM_RANK_DUAL)
- return I915_DRAM_RANK_DUAL;
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ };
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
- return I915_DRAM_RANK_INVALID;
+ return str[type];
}
-static bool
-skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width)
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
- if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16)
- return true;
- else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32)
- return true;
- else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8)
- return true;
- else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16)
- return true;
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
- return false;
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return val & SKL_DRAM_SIZE_MASK;
}
-static int
-skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val)
+static int skl_get_dimm_width(u16 val)
{
- u32 tmp_l, tmp_s;
- u32 s_val = val >> SKL_DRAM_S_SHIFT;
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
- if (!val)
- return -EINVAL;
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+ return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & CNL_DRAM_WIDTH_MASK) {
+ case CNL_DRAM_WIDTH_X8:
+ case CNL_DRAM_WIDTH_X16:
+ case CNL_DRAM_WIDTH_X32:
+ val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
- tmp_l = val & SKL_DRAM_SIZE_MASK;
- tmp_s = s_val & SKL_DRAM_SIZE_MASK;
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total GB to Gb per DRAM device */
+ return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (INTEL_GEN(dev_priv) >= 10) {
+ dimm->size = cnl_get_dimm_size(val);
+ dimm->width = cnl_get_dimm_width(val);
+ dimm->ranks = cnl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
+ channel, 'S', val >> 16);
- if (tmp_l == 0 && tmp_s == 0)
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ DRM_DEBUG_KMS("CH%u not populated\n", channel);
return -EINVAL;
+ }
- ch->l_info.size = tmp_l;
- ch->s_info.size = tmp_s;
-
- tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- ch->l_info.width = (1 << tmp_l) * 8;
- ch->s_info.width = (1 << tmp_s) * 8;
-
- tmp_l = val & SKL_DRAM_RANK_MASK;
- tmp_s = s_val & SKL_DRAM_RANK_MASK;
- ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l);
- ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s);
-
- if (ch->l_info.rank == I915_DRAM_RANK_DUAL ||
- ch->s_info.rank == I915_DRAM_RANK_DUAL)
- ch->rank = I915_DRAM_RANK_DUAL;
- else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE &&
- ch->s_info.rank == I915_DRAM_RANK_SINGLE)
- ch->rank = I915_DRAM_RANK_DUAL;
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
else
- ch->rank = I915_DRAM_RANK_SINGLE;
+ ch->ranks = 1;
- ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size,
- ch->l_info.width) ||
- skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size,
- ch->s_info.width);
+ ch->is_16gb_dimm =
+ skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
- DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n",
- ch->l_info.size, ch->l_info.width,
- ch->l_info.rank ? "dual" : "single",
- ch->s_info.size, ch->s_info.width,
- ch->s_info.rank ? "dual" : "single");
+ DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ channel, ch->ranks, yesno(ch->is_16gb_dimm));
return 0;
}
static bool
-intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
- struct dram_channel_info *ch0)
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
{
- return (val_ch0 == val_ch1 &&
- (ch0->s_info.size == 0 ||
- (ch0->l_info.size == ch0->s_info.size &&
- ch0->l_info.width == ch0->s_info.width &&
- ch0->l_info.rank == ch0->s_info.rank)));
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
}
static int
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
- struct dram_channel_info ch0, ch1;
- u32 val_ch0, val_ch1;
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
int ret;
- val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(&ch0, val_ch0);
+ val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
if (ret == 0)
dram_info->num_channels++;
- val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(&ch1, val_ch1);
+ val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
if (ret == 0)
dram_info->num_channels++;
@@ -1151,28 +1197,47 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
* will be same as if single rank memory, so consider single rank
* memory.
*/
- if (ch0.rank == I915_DRAM_RANK_SINGLE ||
- ch1.rank == I915_DRAM_RANK_SINGLE)
- dram_info->rank = I915_DRAM_RANK_SINGLE;
+ if (ch0.ranks == 1 || ch1.ranks == 1)
+ dram_info->ranks = 1;
else
- dram_info->rank = max(ch0.rank, ch1.rank);
+ dram_info->ranks = max(ch0.ranks, ch1.ranks);
- if (dram_info->rank == I915_DRAM_RANK_INVALID) {
+ if (dram_info->ranks == 0) {
DRM_INFO("couldn't get memory rank information\n");
return -EINVAL;
}
dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
- dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
- val_ch1,
- &ch0);
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
- DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
- dev_priv->dram_info.symmetric_memory ? "" : "not ");
+ DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
+ yesno(dram_info->symmetric_memory));
return 0;
}
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
static int
skl_get_dram_info(struct drm_i915_private *dev_priv)
{
@@ -1180,6 +1245,9 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
u32 mem_freq_khz, val;
int ret;
+ dram_info->type = skl_get_dram_type(dev_priv);
+ DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
+
ret = skl_dram_get_channels_info(dev_priv);
if (ret)
return ret;
@@ -1200,6 +1268,85 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
return 0;
}
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
+ u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * GB to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
static int
bxt_get_dram_info(struct drm_i915_private *dev_priv)
{
@@ -1228,57 +1375,44 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
*/
for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
- u8 size, width;
- enum dram_rank rank;
- u32 tmp;
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
if (val == 0xFFFFFFFF)
continue;
dram_info->num_channels++;
- tmp = val & BXT_DRAM_RANK_MASK;
-
- if (tmp == BXT_DRAM_RANK_SINGLE)
- rank = I915_DRAM_RANK_SINGLE;
- else if (tmp == BXT_DRAM_RANK_DUAL)
- rank = I915_DRAM_RANK_DUAL;
- else
- rank = I915_DRAM_RANK_INVALID;
-
- tmp = val & BXT_DRAM_SIZE_MASK;
- if (tmp == BXT_DRAM_SIZE_4GB)
- size = 4;
- else if (tmp == BXT_DRAM_SIZE_6GB)
- size = 6;
- else if (tmp == BXT_DRAM_SIZE_8GB)
- size = 8;
- else if (tmp == BXT_DRAM_SIZE_12GB)
- size = 12;
- else if (tmp == BXT_DRAM_SIZE_16GB)
- size = 16;
- else
- size = 0;
-
- tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
- width = (1 << tmp) * 8;
- DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size,
- width, rank == I915_DRAM_RANK_SINGLE ? "single" :
- rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown");
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ WARN_ON(type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks,
+ intel_dram_type_str(type));
/*
* If any of the channel is single rank channel,
* worst case output will be same as if single rank
* memory, so consider single rank memory.
*/
- if (dram_info->rank == I915_DRAM_RANK_INVALID)
- dram_info->rank = rank;
- else if (rank == I915_DRAM_RANK_SINGLE)
- dram_info->rank = I915_DRAM_RANK_SINGLE;
+ if (dram_info->ranks == 0)
+ dram_info->ranks = dimm.ranks;
+ else if (dimm.ranks == 1)
+ dram_info->ranks = 1;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
}
- if (dram_info->rank == I915_DRAM_RANK_INVALID) {
- DRM_INFO("couldn't get memory rank information\n");
+ if (dram_info->type == INTEL_DRAM_UNKNOWN ||
+ dram_info->ranks == 0) {
+ DRM_INFO("couldn't get memory information\n");
return -EINVAL;
}
@@ -1290,14 +1424,8 @@ static void
intel_get_dram_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
- char bandwidth_str[32];
int ret;
- dram_info->valid = false;
- dram_info->rank = I915_DRAM_RANK_INVALID;
- dram_info->bandwidth_kbps = 0;
- dram_info->num_channels = 0;
-
/*
* Assume 16Gb DIMMs are present until proven otherwise.
* This is only used for the level 0 watermark latency
@@ -1305,28 +1433,61 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
*/
dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
- if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) < 9)
return;
- /* Need to calculate bandwidth only for Gen9 */
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ret = bxt_get_dram_info(dev_priv);
- else if (IS_GEN(dev_priv, 9))
- ret = skl_get_dram_info(dev_priv);
else
- ret = skl_dram_get_channels_info(dev_priv);
+ ret = skl_get_dram_info(dev_priv);
if (ret)
return;
- if (dram_info->bandwidth_kbps)
- sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
+ DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
+ dram_info->bandwidth_kbps,
+ dram_info->num_channels);
+
+ DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
+ dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
+{
+ const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ const unsigned int sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+static void edram_detect(struct drm_i915_private *dev_priv)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 9))
+ return;
+
+ edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ dev_priv->edram_size_mb = 128;
else
- sprintf(bandwidth_str, "unknown");
- DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
- bandwidth_str, dram_info->num_channels);
- DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n",
- (dram_info->rank == I915_DRAM_RANK_DUAL) ?
- "dual" : "single", yesno(dram_info->is_16gb_dimm));
+ dev_priv->edram_size_mb =
+ gen9_edram_size_mb(dev_priv, edram_cap);
+
+ DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
}
/**
@@ -1348,7 +1509,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
- !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
+ !intel_vgpu_has_full_ppgtt(dev_priv)) {
i915_report_error(dev_priv,
"incompatible vGPU found, support for isolated ppGTT required\n");
return -ENXIO;
@@ -1371,6 +1532,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
+ /* needs to be done before ggtt probe */
+ edram_detect(dev_priv);
+
i915_perf_init(dev_priv);
ret = i915_ggtt_probe_hw(dev_priv);
@@ -1606,10 +1770,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer("i915 device info:");
- drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+ drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
INTEL_DEVID(dev_priv),
INTEL_REVID(dev_priv),
intel_platform_name(INTEL_INFO(dev_priv)->platform),
+ intel_subplatform(RUNTIME_INFO(dev_priv),
+ INTEL_INFO(dev_priv)->platform),
INTEL_GEN(dev_priv));
intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
@@ -1652,8 +1818,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(device_info, match_info, sizeof(*device_info));
RUNTIME_INFO(i915)->device_id = pdev->device;
- BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
- BITS_PER_TYPE(device_info->platform_mask));
BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
return i915;
@@ -1750,11 +1914,17 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_unregister(dev_priv);
+ /*
+ * After unregistering the device to prevent any new users, cancel
+ * all in-flight requests so that we can quickly unbind the active
+ * resources.
+ */
+ i915_gem_set_wedged(dev_priv);
+
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
- if (i915_gem_suspend(dev_priv))
- DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+ i915_gem_suspend(dev_priv);
drm_atomic_helper_shutdown(dev);
@@ -1862,7 +2032,6 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
- int err;
/*
* NB intel_display_suspend() may issue new requests after we've
@@ -1870,12 +2039,9 @@ static int i915_drm_prepare(struct drm_device *dev)
* split out that work and pull it forward so that after point,
* the GPU is not woken again.
*/
- err = i915_gem_suspend(i915);
- if (err)
- dev_err(&i915->drm.pdev->dev,
- "GEM idle failed, suspend/resume might fail\n");
+ i915_gem_suspend(i915);
- return err;
+ return 0;
}
static int i915_drm_suspend(struct drm_device *dev)
@@ -1945,7 +2111,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv);
- intel_uncore_suspend(dev_priv);
+ intel_uncore_suspend(&dev_priv->uncore);
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
@@ -2141,7 +2307,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
- intel_uncore_resume_early(dev_priv);
+ intel_uncore_resume_early(&dev_priv->uncore);
+
+ i915_check_and_clear_faults(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
gen9_sanitize_dc_state(dev_priv);
@@ -2545,7 +2713,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
if (!force_on)
return 0;
- err = intel_wait_for_register(dev_priv,
+ err = intel_wait_for_register(&dev_priv->uncore,
VLV_GTLC_SURVIVABILITY_REG,
VLV_GFX_CLK_STATUS_BIT,
VLV_GFX_CLK_STATUS_BIT,
@@ -2711,7 +2879,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_disable_interrupts(dev_priv);
- intel_uncore_suspend(dev_priv);
+ intel_uncore_suspend(&dev_priv->uncore);
ret = 0;
if (INTEL_GEN(dev_priv) >= 11) {
@@ -2728,7 +2896,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
- intel_uncore_runtime_resume(dev_priv);
+ intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2745,7 +2913,7 @@ static int intel_runtime_suspend(struct device *kdev)
enable_rpm_wakeref_asserts(dev_priv);
intel_runtime_pm_cleanup(dev_priv);
- if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
+ if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
dev_priv->runtime_pm.suspended = true;
@@ -2773,7 +2941,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D1);
}
- assert_forcewakes_inactive(dev_priv);
+ assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
@@ -2799,7 +2967,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
dev_priv->runtime_pm.suspended = false;
- if (intel_uncore_unclaimed_mmio(dev_priv))
+ if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
if (INTEL_GEN(dev_priv) >= 11) {
@@ -2825,7 +2993,7 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
}
- intel_uncore_runtime_resume(dev_priv);
+ intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2969,7 +3137,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a67a63b5aa84..066fd2a12851 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,6 +55,7 @@
#include <drm/drm_util.h>
#include <drm/drm_dsc.h>
#include <drm/drm_connector.h>
+#include <drm/i915_mei_hdcp_interface.h>
#include "i915_fixed.h"
#include "i915_params.h"
@@ -65,13 +66,14 @@
#include "intel_device_info.h"
#include "intel_display.h"
#include "intel_dpll_mgr.h"
+#include "intel_frontbuffer.h"
#include "intel_lrc.h"
#include "intel_opregion.h"
#include "intel_ringbuffer.h"
+#include "intel_uc.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
#include "intel_workarounds.h"
-#include "intel_uc.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
@@ -91,8 +93,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190207"
-#define DRIVER_TIMESTAMP 1549572331
+#define DRIVER_DATE "20190417"
+#define DRIVER_TIMESTAMP 1555492067
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -215,11 +217,12 @@ struct drm_i915_file_private {
*/
#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
} mm;
+
struct idr context_idr;
+ struct mutex context_idr_lock; /* guards context_idr */
- struct intel_rps_client {
- atomic_t boosts;
- } rps_client;
+ struct idr vm_idr;
+ struct mutex vm_idr_lock; /* guards vm_idr */
unsigned int bsd_engine;
@@ -280,7 +283,8 @@ struct drm_i915_display_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state);
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
@@ -323,6 +327,7 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
+ int (*color_check)(struct intel_crtc_state *crtc_state);
/*
* Program double buffered color management registers during
* vblank evasion. The registers should then latch during the
@@ -371,14 +376,6 @@ enum i915_cache_level {
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-enum fb_op_origin {
- ORIGIN_GTT,
- ORIGIN_CPU,
- ORIGIN_CS,
- ORIGIN_FLIP,
- ORIGIN_DIRTYFB,
-};
-
struct intel_fbc {
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
@@ -508,7 +505,7 @@ struct i915_psr {
u32 debug;
bool sink_support;
- bool prepared, enabled;
+ bool enabled;
struct intel_dp *dp;
enum pipe pipe;
bool active;
@@ -526,16 +523,22 @@ struct i915_psr {
u16 su_x_granularity;
};
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kaby Lake PCH */
- PCH_CNP, /* Cannon Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
- PCH_NOP, /* PCH without south display */
};
enum intel_sbi_destination {
@@ -949,6 +952,7 @@ struct ddi_vbt_port_info {
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
u8 hdmi_level_shift;
+ u8 present:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
u8 supports_dp:1;
@@ -1009,6 +1013,7 @@ struct intel_vbt_data {
enum psr_lines_to_wait lines_to_wait;
int tp1_wakeup_time_us;
int tp2_tp3_wakeup_time_us;
+ int psr2_tp2_tp3_wakeup_time_us;
} psr;
struct {
@@ -1130,6 +1135,7 @@ struct skl_wm_level {
u16 plane_res_b;
u8 plane_res_l;
bool plane_en;
+ bool ignore_lines;
};
/* Stores plane specific WM parameters */
@@ -1200,7 +1206,11 @@ enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
INTEL_PIPE_CRC_SOURCE_PLANE2,
- INTEL_PIPE_CRC_SOURCE_PF,
+ INTEL_PIPE_CRC_SOURCE_PLANE3,
+ INTEL_PIPE_CRC_SOURCE_PLANE4,
+ INTEL_PIPE_CRC_SOURCE_PLANE5,
+ INTEL_PIPE_CRC_SOURCE_PLANE6,
+ INTEL_PIPE_CRC_SOURCE_PLANE7,
INTEL_PIPE_CRC_SOURCE_PIPE,
/* TV/DP on pre-gen5/vlv can't use the pipe source. */
INTEL_PIPE_CRC_SOURCE_TV,
@@ -1468,13 +1478,6 @@ struct intel_cdclk_state {
struct drm_i915_private {
struct drm_device drm;
- struct kmem_cache *objects;
- struct kmem_cache *vmas;
- struct kmem_cache *luts;
- struct kmem_cache *requests;
- struct kmem_cache *dependencies;
- struct kmem_cache *priorities;
-
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
@@ -1503,8 +1506,6 @@ struct drm_i915_private {
*/
resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
- void __iomem *regs;
-
struct intel_uncore uncore;
struct i915_virtual_gpu vgpu;
@@ -1622,6 +1623,8 @@ struct drm_i915_private {
struct intel_cdclk_state actual;
/* The current hardware cdclk state */
struct intel_cdclk_state hw;
+
+ int force_min_cdclk;
} cdclk;
/**
@@ -1700,8 +1703,11 @@ struct drm_i915_private {
struct intel_l3_parity l3_parity;
- /* Cannot be determined by PCIID. You must always read a register. */
- u32 edram_cap;
+ /*
+ * edram size in MB.
+ * Cannot be determined by PCIID. You must always read a register.
+ */
+ u32 edram_size_mb;
/*
* Protects RPS/RC6 register access and PCU communication.
@@ -1741,6 +1747,7 @@ struct drm_i915_private {
*
*/
struct mutex av_mutex;
+ int audio_power_refcount;
struct {
struct mutex mutex;
@@ -1831,13 +1838,16 @@ struct drm_i915_private {
bool valid;
bool is_16gb_dimm;
u8 num_channels;
- enum dram_rank {
- I915_DRAM_RANK_INVALID = 0,
- I915_DRAM_RANK_SINGLE,
- I915_DRAM_RANK_DUAL
- } rank;
+ u8 ranks;
u32 bandwidth_kbps;
bool symmetric_memory;
+ enum intel_dram_type {
+ INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR3,
+ INTEL_DRAM_DDR4,
+ INTEL_DRAM_LPDDR3,
+ INTEL_DRAM_LPDDR4
+ } type;
} dram_info;
struct i915_runtime_pm runtime_pm;
@@ -1985,7 +1995,6 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
- void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
struct i915_gt_timelines {
@@ -1997,6 +2006,7 @@ struct drm_i915_private {
struct list_head hwsp_free_list;
} timelines;
+ intel_engine_mask_t active_engines;
struct list_head active_rings;
struct list_head closed_vma;
u32 active_requests;
@@ -2011,12 +2021,6 @@ struct drm_i915_private {
intel_wakeref_t awake;
/**
- * The number of times we have woken up.
- */
- unsigned int epoch;
-#define I915_EPOCH_INVALID 0
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -2039,6 +2043,14 @@ struct drm_i915_private {
struct i915_vma *scratch;
} gt;
+ /* For i945gm vblank irq vs. C3 workaround */
+ struct {
+ struct work_struct work;
+ struct pm_qos_request pm_qos;
+ u8 c3_disable_latency;
+ u8 enabled;
+ } i945gm_vblank;
+
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -2055,18 +2067,25 @@ struct drm_i915_private {
struct i915_pmu pmu;
+ struct i915_hdcp_comp_master *hdcp_master;
+ bool hdcp_comp_added;
+
+ /* Mutex to protect the above hdcp component related values. */
+ struct mutex hdcp_comp_mutex;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
*/
};
+struct dram_dimm_info {
+ u8 size, width, ranks;
+};
+
struct dram_channel_info {
- struct info {
- u8 size, width;
- enum dram_rank rank;
- } l_info, s_info;
- enum dram_rank rank;
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
bool is_16gb_dimm;
};
@@ -2095,6 +2114,11 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
return container_of(huc, struct drm_i915_private, huc);
}
+static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
+{
+ return container_of(uncore, struct drm_i915_private, uncore);
+}
+
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
@@ -2104,7 +2128,7 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
- for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \
+ for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
(tmp__) ? \
((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
@@ -2274,7 +2298,69 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
+static __always_inline unsigned int
+__platform_mask_index(const struct intel_runtime_info *info,
+ enum intel_platform p)
+{
+ const unsigned int pbits =
+ BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
+
+ /* Expand the platform_mask array if this fails. */
+ BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
+ pbits * ARRAY_SIZE(info->platform_mask));
+
+ return p / pbits;
+}
+
+static __always_inline unsigned int
+__platform_mask_bit(const struct intel_runtime_info *info,
+ enum intel_platform p)
+{
+ const unsigned int pbits =
+ BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
+
+ return p % pbits + INTEL_SUBPLATFORM_BITS;
+}
+
+static inline u32
+intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
+{
+ const unsigned int pi = __platform_mask_index(info, p);
+
+ return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+}
+
+static __always_inline bool
+IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
+{
+ const struct intel_runtime_info *info = RUNTIME_INFO(i915);
+ const unsigned int pi = __platform_mask_index(info, p);
+ const unsigned int pb = __platform_mask_bit(info, p);
+
+ BUILD_BUG_ON(!__builtin_constant_p(p));
+
+ return info->platform_mask[pi] & BIT(pb);
+}
+
+static __always_inline bool
+IS_SUBPLATFORM(const struct drm_i915_private *i915,
+ enum intel_platform p, unsigned int s)
+{
+ const struct intel_runtime_info *info = RUNTIME_INFO(i915);
+ const unsigned int pi = __platform_mask_index(info, p);
+ const unsigned int pb = __platform_mask_bit(info, p);
+ const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
+ const u32 mask = info->platform_mask[pi];
+
+ BUILD_BUG_ON(!__builtin_constant_p(p));
+ BUILD_BUG_ON(!__builtin_constant_p(s));
+ BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
+
+ /* Shift and test on the MSB position so sign flag can be used. */
+ return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
+}
+
+#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2289,11 +2375,11 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
-#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
-#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
-#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
+#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
+#define IS_IRONLAKE_M(dev_priv) \
+ (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
@@ -2308,46 +2394,35 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
-#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
+#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
- ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
- (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
- (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
-/* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
+#define IS_BDW_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
-#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
- INTEL_DEVID(dev_priv) == 0x0A1E)
-#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
- INTEL_DEVID(dev_priv) == 0x1913 || \
- INTEL_DEVID(dev_priv) == 0x1916 || \
- INTEL_DEVID(dev_priv) == 0x1921 || \
- INTEL_DEVID(dev_priv) == 0x1926)
-#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
- INTEL_DEVID(dev_priv) == 0x1915 || \
- INTEL_DEVID(dev_priv) == 0x191E)
-#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
- INTEL_DEVID(dev_priv) == 0x5913 || \
- INTEL_DEVID(dev_priv) == 0x5916 || \
- INTEL_DEVID(dev_priv) == 0x5921 || \
- INTEL_DEVID(dev_priv) == 0x5926)
-#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
- INTEL_DEVID(dev_priv) == 0x5915 || \
- INTEL_DEVID(dev_priv) == 0x591E)
-#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
- INTEL_DEVID(dev_priv) == 0x87C0 || \
- INTEL_DEVID(dev_priv) == 0x87CA)
+#define IS_HSW_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
+#define IS_SKL_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_SKL_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_KBL_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_KBL_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_AML_ULX(dev_priv) \
+ (IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2358,16 +2433,16 @@ static inline unsigned int i915_sg_segment_size(void)
INTEL_INFO(dev_priv)->gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
-#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
+#define IS_CFL_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
-#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
-#define IS_ICL_WITH_PORT_F(dev_priv) (IS_ICELAKE(dev_priv) && \
- INTEL_DEVID(dev_priv) != 0x8A51)
+#define IS_CNL_WITH_PORT_F(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
+#define IS_ICL_WITH_PORT_F(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@@ -2426,28 +2501,22 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
-#define ENGINE_MASK(id) BIT(id)
-#define RENDER_RING ENGINE_MASK(RCS)
-#define BSD_RING ENGINE_MASK(VCS)
-#define BLT_RING ENGINE_MASK(BCS)
-#define VEBOX_RING ENGINE_MASK(VECS)
-#define BSD2_RING ENGINE_MASK(VCS2)
-#define BSD3_RING ENGINE_MASK(VCS3)
-#define BSD4_RING ENGINE_MASK(VCS4)
-#define VEBOX2_RING ENGINE_MASK(VECS2)
-#define ALL_ENGINES (~0)
-
-#define HAS_ENGINE(dev_priv, id) \
- (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
-
-#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
-#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
-#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
-#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
+#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
+
+#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
+ unsigned int first__ = (first); \
+ unsigned int count__ = (count); \
+ (INTEL_INFO(dev_priv)->engine_mask & \
+ GENMASK(first__ + count__ - 1, first__)) >> first__; \
+})
+#define VDBOX_MASK(dev_priv) \
+ ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(dev_priv) \
+ ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
-#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
+#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
@@ -2462,13 +2531,11 @@ static inline unsigned int i915_sg_segment_size(void)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
#define HAS_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
#define HAS_FULL_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
-#define HAS_FULL_48BIT_PPGTT(dev_priv) \
- (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
@@ -2512,6 +2579,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
+#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -2558,6 +2626,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
@@ -2567,8 +2636,6 @@ static inline unsigned int i915_sg_segment_size(void)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_CNP_LP(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
@@ -2800,8 +2867,6 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
-void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *
@@ -2844,6 +2909,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
int pass = 2;
do {
rcu_barrier();
+ i915_gem_drain_freed_objects(i915);
drain_workqueue(i915->wq);
} while (--pass);
}
@@ -2974,6 +3040,14 @@ i915_coherent_map_type(struct drm_i915_private *i915)
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type);
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size);
+static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_flush_map(obj, 0, obj->base.size);
+}
+
/**
* i915_gem_object_unpin_map - releases an earlier mapping
* @obj: the object to unmap
@@ -3002,7 +3076,12 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+static inline int __must_check
+i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ return mutex_lock_interruptible(&dev->struct_mutex);
+}
+
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -3016,22 +3095,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-struct i915_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine);
-
-static inline bool i915_reset_backoff(struct i915_gpu_error *error)
-{
- return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
-}
-
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+static inline bool __i915_wedged(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_WEDGED, &error->flags));
}
-static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_failed(struct drm_i915_private *i915)
{
- return i915_reset_backoff(error) | i915_terminally_wedged(error);
+ return __i915_wedged(&i915->gpu_error);
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
@@ -3056,14 +3127,13 @@ void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
-int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps);
+ long timeout);
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
@@ -3106,7 +3176,6 @@ struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
@@ -3142,7 +3211,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
+ struct intel_context *ce,
u32 *reg_state);
/* i915_gem_evict.c */
@@ -3457,18 +3526,21 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
}
-#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+#define __I915_REG_OP(op__, dev_priv__, ...) \
+ intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
+
+#define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__))
+#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__))
-#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+#define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__))
+#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__))
+#define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__))
+#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__))
-#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
-#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
+#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
+#define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__))
+#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__))
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
* will be implemented using 2 32-bit writes in an arbitrary order with
@@ -3484,46 +3556,12 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
*
* You have been warned.
*/
-#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
-
-#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
- u32 upper, lower, old_upper, loop = 0; \
- upper = I915_READ(upper_reg); \
- do { \
- old_upper = upper; \
- lower = I915_READ(lower_reg); \
- upper = I915_READ(upper_reg); \
- } while (upper != old_upper && loop++ < 2); \
- (u64)upper << 32 | lower; })
-
-#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
-#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
-
-#define __raw_read(x, s) \
-static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
- i915_reg_t reg) \
-{ \
- return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
-}
-
-#define __raw_write(x, s) \
-static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
- i915_reg_t reg, uint##x##_t val) \
-{ \
- write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
-}
-__raw_read(8, b)
-__raw_read(16, w)
-__raw_read(32, l)
-__raw_read(64, q)
-
-__raw_write(8, b)
-__raw_write(16, w)
-__raw_write(32, l)
-__raw_write(64, q)
+#define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__))
+#define I915_READ64_2x32(lower_reg__, upper_reg__) \
+ __I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__))
-#undef __raw_read
-#undef __raw_write
+#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
+#define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__))
/* These are untraced mmio-accessors that are only valid to be used inside
* critical sections, such as inside IRQ handlers, where forcewake is explicitly
@@ -3551,10 +3589,10 @@ __raw_write(64, q)
* therefore generally be serialised, by either the dev_priv->uncore.lock or
* a more localised lock guarding all access to that bank of registers.
*/
-#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
-#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
-#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
+#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
+#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__))
+#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__))
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8558e81fdc2a..ad01c92aaf74 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -42,6 +42,7 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gemfs.h"
+#include "i915_globals.h"
#include "i915_reset.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
@@ -49,6 +50,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include "intel_pm.h"
#include "intel_workarounds.h"
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
@@ -100,48 +102,7 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->mm.object_stat_lock);
}
-static int
-i915_gem_wait_for_error(struct i915_gpu_error *error)
-{
- int ret;
-
- might_sleep();
-
- /*
- * Only wait 10 seconds for the gpu reset to complete to avoid hanging
- * userspace. If it takes that long something really bad is going on and
- * we should simply try to bail out and fail as gracefully as possible.
- */
- ret = wait_event_interruptible_timeout(error->reset_queue,
- !i915_reset_backoff(error),
- I915_RESET_TIMEOUT);
- if (ret == 0) {
- DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
- return -EIO;
- } else if (ret < 0) {
- return ret;
- } else {
- return 0;
- }
-}
-
-int i915_mutex_lock_interruptible(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
-
- ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
- if (ret)
- return ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static u32 __i915_gem_park(struct drm_i915_private *i915)
+static void __i915_gem_park(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
@@ -152,9 +113,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
if (!i915->gt.awake)
- return I915_EPOCH_INVALID;
-
- GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
+ return;
/*
* Be paranoid and flush a concurrent interrupt to make sure
@@ -183,7 +142,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
- return i915->gt.epoch;
+ i915_globals_park();
}
void i915_gem_park(struct drm_i915_private *i915)
@@ -225,8 +184,7 @@ void i915_gem_unpark(struct drm_i915_private *i915)
i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!i915->gt.awake);
- if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
- i915->gt.epoch = 1;
+ i915_globals_unpark();
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
@@ -351,7 +309,7 @@ static void __start_cpu_write(struct drm_i915_gem_object *obj)
obj->cache_dirty = true;
}
-static void
+void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush)
@@ -459,8 +417,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps_client)
+ long timeout)
{
struct i915_request *rq;
@@ -478,27 +435,6 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
if (i915_request_completed(rq))
goto out;
- /*
- * This client is about to stall waiting for the GPU. In many cases
- * this is undesirable and limits the throughput of the system, as
- * many clients cannot continue processing user input/output whilst
- * blocked. RPS autotuning may take tens of milliseconds to respond
- * to the GPU load and thus incurs additional latency for the client.
- * We can circumvent that by promoting the GPU frequency to maximum
- * before we wait. This makes the GPU throttle up much more quickly
- * (good for benchmarks and user experience, e.g. window animations),
- * but at a cost of spending more power processing the workload
- * (bad for battery). Not all clients even want their results
- * immediately and for them we should just let the GPU select its own
- * frequency to maximise efficiency. To prevent a single client from
- * forcing the clocks too high for the whole system, we only allow
- * each client to waitboost once in a busy period.
- */
- if (rps_client && !i915_request_started(rq)) {
- if (INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq, rps_client);
- }
-
timeout = i915_request_wait(rq, flags, timeout);
out:
@@ -511,8 +447,7 @@ out:
static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps_client)
+ long timeout)
{
unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
@@ -530,8 +465,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
for (i = 0; i < count; i++) {
timeout = i915_gem_object_wait_fence(shared[i],
- flags, timeout,
- rps_client);
+ flags, timeout);
if (timeout < 0)
break;
@@ -557,8 +491,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
}
if (excl && timeout >= 0)
- timeout = i915_gem_object_wait_fence(excl, flags, timeout,
- rps_client);
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout);
dma_fence_put(excl);
@@ -652,30 +585,19 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
* @obj: i915 gem object
* @flags: how to wait (under a lock, for all rendering or just for writes etc)
* @timeout: how long to wait
- * @rps_client: client (user process) to charge for any waitboosting
*/
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps_client)
+ long timeout)
{
might_sleep();
GEM_BUG_ON(timeout < 0);
- timeout = i915_gem_object_wait_reservation(obj->resv,
- flags, timeout,
- rps_client);
+ timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
return timeout < 0 ? timeout : 0;
}
-static struct intel_rps_client *to_rps_client(struct drm_file *file)
-{
- struct drm_i915_file_private *fpriv = file->driver_priv;
-
- return &fpriv->rps_client;
-}
-
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -698,28 +620,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
-{
- return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
-}
-
-void i915_gem_object_free(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- kmem_cache_free(dev_priv->objects, obj);
-}
-
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
- u64 size,
+ u64 *size_p,
u32 *handle_p)
{
struct drm_i915_gem_object *obj;
- int ret;
u32 handle;
+ u64 size;
+ int ret;
- size = roundup(size, PAGE_SIZE);
+ size = round_up(*size_p, PAGE_SIZE);
if (size == 0)
return -EINVAL;
@@ -735,6 +647,7 @@ i915_gem_create(struct drm_file *file,
return ret;
*handle_p = handle;
+ *size_p = size;
return 0;
}
@@ -747,7 +660,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, to_i915(dev),
- args->size, &args->handle);
+ &args->size, &args->handle);
}
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -772,7 +685,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
i915_gem_flush_free_objects(dev_priv);
return i915_gem_create(file, dev_priv,
- args->size, &args->handle);
+ &args->size, &args->handle);
}
static inline enum fb_op_origin
@@ -881,8 +794,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -934,8 +846,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -1197,8 +1108,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -1497,8 +1407,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err;
@@ -1578,17 +1487,37 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
return -EINVAL;
- /* Having something in the write domain implies it's in the read
+ /*
+ * Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
- if (write_domain != 0 && read_domains != write_domain)
+ if (write_domain && read_domains != write_domain)
return -EINVAL;
+ if (!read_domains)
+ return 0;
+
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
- /* Try to flush the object off the GPU without holding the lock.
+ /*
+ * Already in the desired write domain? Nothing for us to do!
+ *
+ * We apply a little bit of cunning here to catch a broader set of
+ * no-ops. If obj->write_domain is set, we must be in the same
+ * obj->read_domains, and only that domain. Therefore, if that
+ * obj->write_domain matches the request read_domains, we are
+ * already in the same read/write domain and can skip the operation,
+ * without having to further check the requested write_domain.
+ */
+ if (READ_ONCE(obj->write_domain) == read_domains) {
+ err = 0;
+ goto out;
+ }
+
+ /*
+ * Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
@@ -1596,8 +1525,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
(write_domain ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (err)
goto out;
@@ -1808,6 +1736,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 2 - Recognise WC as a separate cache domain so that we can flush the
* delayed writes via GTT before performing direct access via WC.
*
+ * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
+ * pagefault; swapin remains transparent.
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -1835,7 +1766,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 2;
+ return 3;
}
static inline struct i915_ggtt_view
@@ -1891,6 +1822,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
+ int srcu;
int ret;
/* Sanity check that we allow writing into this object */
@@ -1902,27 +1834,21 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
- /* Try to flush the object off the GPU first without holding the lock.
- * Upon acquiring the lock, we will perform our sanity checks and then
- * repeat the flush holding the lock in the normal manner to catch cases
- * where we are gazumped.
- */
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
- if (ret)
- goto err;
-
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
wakeref = intel_runtime_pm_get(dev_priv);
+ srcu = i915_reset_trylock(dev_priv);
+ if (srcu < 0) {
+ ret = srcu;
+ goto err_rpm;
+ }
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto err_rpm;
+ goto err_reset;
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
@@ -1930,7 +1856,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
-
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
@@ -1964,10 +1889,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto err_unpin;
-
ret = i915_vma_pin_fence(vma);
if (ret)
goto err_unpin;
@@ -1995,6 +1916,8 @@ err_unpin:
__i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
+err_reset:
+ i915_reset_unlock(dev_priv, srcu);
err_rpm:
intel_runtime_pm_put(dev_priv, wakeref);
i915_gem_object_unpin_pages(obj);
@@ -2007,7 +1930,7 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error))
+ if (!i915_terminally_wedged(dev_priv))
return VM_FAULT_SIGBUS;
/* else: fall through */
case -EAGAIN:
@@ -2280,7 +2203,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct page *page;
__i915_gem_object_release_shmem(obj, pages, true);
-
i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -2488,7 +2410,7 @@ rebuild_st:
do {
cond_resched();
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (likely(!IS_ERR(page)))
+ if (!IS_ERR(page))
break;
if (!*s) {
@@ -2622,6 +2544,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock);
+ /* Make the pages coherent with the GPU (flushing any swapin). */
+ if (obj->cache_dirty) {
+ obj->write_domain = 0;
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(pages);
+ obj->cache_dirty = false;
+ }
+
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
@@ -2823,6 +2753,33 @@ err_unlock:
goto out_unlock;
}
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size)
+{
+ enum i915_map_type has_type;
+ void *ptr;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
+ offset, size, obj->base.size));
+
+ obj->mm.dirty = true;
+
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
+ return;
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (has_type == I915_MAP_WC)
+ return;
+
+ drm_clflush_virt_range(ptr + offset, size);
+ if (size == obj->base.size) {
+ obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
+ obj->cache_dirty = false;
+ }
+}
+
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
@@ -2832,7 +2789,11 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
u64 remain, offset;
unsigned int pg;
- /* Before we instantiate/pin the backing store for our use, we
+ /* Caller already validated user args */
+ GEM_BUG_ON(!access_ok(user_data, arg->size));
+
+ /*
+ * Before we instantiate/pin the backing store for our use, we
* can prepopulate the shmemfs filp efficiently using a write into
* the pagecache. We avoid the penalty of instantiating all the
* pages, important if the user is just writing to a few and never
@@ -2846,7 +2807,8 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
- /* Before the pages are instantiated the object is treated as being
+ /*
+ * Before the pages are instantiated the object is treated as being
* in the CPU domain. The pages will be clflushed as required before
* use, and we can freely write into the pages directly. If userspace
* races pwrite with any other operation; corruption will ensue -
@@ -2862,20 +2824,32 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
struct page *page;
void *data, *vaddr;
int err;
+ char c;
len = PAGE_SIZE - pg;
if (len > remain)
len = remain;
+ /* Prefault the user page to reduce potential recursion */
+ err = __get_user(c, user_data);
+ if (err)
+ return err;
+
+ err = __get_user(c, user_data + len - 1);
+ if (err)
+ return err;
+
err = pagecache_write_begin(obj->base.filp, mapping,
offset, len, 0,
&page, &data);
if (err < 0)
return err;
- vaddr = kmap(page);
- unwritten = copy_from_user(vaddr + pg, user_data, len);
- kunmap(page);
+ vaddr = kmap_atomic(page);
+ unwritten = __copy_from_user_inatomic(vaddr + pg,
+ user_data,
+ len);
+ kunmap_atomic(vaddr);
err = pagecache_write_end(obj->base.filp, mapping,
offset, len, len - unwritten,
@@ -2883,8 +2857,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (err < 0)
return err;
+ /* We don't handle -EFAULT, leave it to the caller to check */
if (unwritten)
- return -EFAULT;
+ return -ENODEV;
remain -= len;
user_data += len;
@@ -2895,51 +2870,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
-static bool match_ring(struct i915_request *rq)
-{
- struct drm_i915_private *dev_priv = rq->i915;
- u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
-
- return ring == i915_ggtt_offset(rq->ring->vma);
-}
-
-struct i915_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine)
-{
- struct i915_request *request, *active = NULL;
- unsigned long flags;
-
- /*
- * We are called by the error capture, reset and to dump engine
- * state at random points in time. In particular, note that neither is
- * crucially ordered with an interrupt. After a hang, the GPU is dead
- * and we assume that no more writes can happen (we waited long enough
- * for all writes that were in transaction to be flushed) - adding an
- * extra delay for a recent interrupt is pointless. Hence, we do
- * not need an engine->irq_seqno_barrier() before the seqno reads.
- * At all other times, we must assume the GPU is still running, but
- * we only care about the snapshot of this moment.
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- list_for_each_entry(request, &engine->timeline.requests, link) {
- if (i915_request_completed(request))
- continue;
-
- if (!i915_request_started(request))
- break;
-
- /* More than one preemptible request may match! */
- if (!match_ring(request))
- break;
-
- active = request;
- break;
- }
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
- return active;
-}
-
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
@@ -2964,180 +2894,105 @@ i915_gem_retire_work_handler(struct work_struct *work)
round_jiffies_up_relative(HZ));
}
-static void shrink_caches(struct drm_i915_private *i915)
+static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
+ unsigned long mask)
{
- /*
- * kmem_cache_shrink() discards empty slabs and reorders partially
- * filled slabs to prioritise allocating from the mostly full slabs,
- * with the aim of reducing fragmentation.
- */
- kmem_cache_shrink(i915->priorities);
- kmem_cache_shrink(i915->dependencies);
- kmem_cache_shrink(i915->requests);
- kmem_cache_shrink(i915->luts);
- kmem_cache_shrink(i915->vmas);
- kmem_cache_shrink(i915->objects);
-}
-
-struct sleep_rcu_work {
- union {
- struct rcu_head rcu;
- struct work_struct work;
- };
- struct drm_i915_private *i915;
- unsigned int epoch;
-};
+ bool result = true;
-static inline bool
-same_epoch(struct drm_i915_private *i915, unsigned int epoch)
-{
/*
- * There is a small chance that the epoch wrapped since we started
- * sleeping. If we assume that epoch is at least a u32, then it will
- * take at least 2^32 * 100ms for it to wrap, or about 326 years.
+ * Even if we fail to switch, give whatever is running a small chance
+ * to save itself before we report the failure. Yes, this may be a
+ * false positive due to e.g. ENOMEM, caveat emptor!
*/
- return epoch == READ_ONCE(i915->gt.epoch);
-}
-
-static void __sleep_work(struct work_struct *work)
-{
- struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
- struct drm_i915_private *i915 = s->i915;
- unsigned int epoch = s->epoch;
-
- kfree(s);
- if (same_epoch(i915, epoch))
- shrink_caches(i915);
-}
+ if (i915_gem_switch_to_kernel_context(i915, mask))
+ result = false;
-static void __sleep_rcu(struct rcu_head *rcu)
-{
- struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
- struct drm_i915_private *i915 = s->i915;
-
- destroy_rcu_head(&s->rcu);
+ if (i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ I915_GEM_IDLE_TIMEOUT))
+ result = false;
+
+ if (!result) {
+ if (i915_modparams.reset) { /* XXX hide warning from gem_eio */
+ dev_err(i915->drm.dev,
+ "Failed to idle engines, declaring wedged!\n");
+ GEM_TRACE_DUMP();
+ }
- if (same_epoch(i915, s->epoch)) {
- INIT_WORK(&s->work, __sleep_work);
- queue_work(i915->wq, &s->work);
- } else {
- kfree(s);
+ /* Forcibly cancel outstanding work and leave the gpu quiet. */
+ i915_gem_set_wedged(i915);
}
-}
-static inline bool
-new_requests_since_last_retire(const struct drm_i915_private *i915)
-{
- return (READ_ONCE(i915->gt.active_requests) ||
- work_pending(&i915->gt.idle_work.work));
+ i915_retire_requests(i915); /* ensure we flush after wedging */
+ return result;
}
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+static bool load_power_context(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ /* Force loading the kernel context on all engines */
+ if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
+ return false;
- if (i915_terminally_wedged(&i915->gpu_error))
- return;
+ /*
+ * Immediately park the GPU so that we enable powersaving and
+ * treat it as idle. The next time we issue a request, we will
+ * unpark and start using the engine->pinned_default_state, otherwise
+ * it is in limbo and an early reset may fail.
+ */
+ __i915_gem_park(i915);
- GEM_BUG_ON(i915->gt.active_requests);
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
- GEM_BUG_ON(engine->last_retired_context !=
- to_intel_context(i915->kernel_context, engine));
- }
+ return true;
}
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), gt.idle_work.work);
- unsigned int epoch = I915_EPOCH_INVALID;
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gt.idle_work.work);
bool rearm_hangcheck;
- if (!READ_ONCE(dev_priv->gt.awake))
+ if (!READ_ONCE(i915->gt.awake))
return;
- if (READ_ONCE(dev_priv->gt.active_requests))
+ if (READ_ONCE(i915->gt.active_requests))
return;
- /*
- * Flush out the last user context, leaving only the pinned
- * kernel context resident. When we are idling on the kernel_context,
- * no more new requests (with a context switch) are emitted and we
- * can finally rest. A consequence is that the idle work handler is
- * always called at least twice before idling (and if the system is
- * idle that implies a round trip through the retire worker).
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_switch_to_kernel_context(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
- READ_ONCE(dev_priv->gt.active_requests));
-
- /*
- * Wait for last execlists context complete, but bail out in case a
- * new request is submitted. As we don't trust the hardware, we
- * continue on if the wait times out. This is necessary to allow
- * the machine to suspend even if the hardware dies, and we will
- * try to recover in resume (after depriving the hardware of power,
- * it may be in a better mmod).
- */
- __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
- intel_engines_are_idle(dev_priv),
- I915_IDLE_ENGINES_TIMEOUT * 1000,
- 10, 500);
-
rearm_hangcheck =
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
- if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
+ if (!mutex_trylock(&i915->drm.struct_mutex)) {
/* Currently busy, come back later */
- mod_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
+ mod_delayed_work(i915->wq,
+ &i915->gt.idle_work,
msecs_to_jiffies(50));
goto out_rearm;
}
/*
- * New request retired after this work handler started, extend active
- * period until next instance of the work.
+ * Flush out the last user context, leaving only the pinned
+ * kernel context resident. Should anything unfortunate happen
+ * while we are idle (such as the GPU being power cycled), no users
+ * will be harmed.
*/
- if (new_requests_since_last_retire(dev_priv))
- goto out_unlock;
+ if (!work_pending(&i915->gt.idle_work.work) &&
+ !i915->gt.active_requests) {
+ ++i915->gt.active_requests; /* don't requeue idle */
- epoch = __i915_gem_park(dev_priv);
+ switch_to_kernel_context_sync(i915, i915->gt.active_engines);
- assert_kernel_context_is_current(dev_priv);
+ if (!--i915->gt.active_requests) {
+ __i915_gem_park(i915);
+ rearm_hangcheck = false;
+ }
+ }
- rearm_hangcheck = false;
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
out_rearm:
if (rearm_hangcheck) {
- GEM_BUG_ON(!dev_priv->gt.awake);
- i915_queue_hangcheck(dev_priv);
- }
-
- /*
- * When we are idle, it is an opportune time to reap our caches.
- * However, we have many objects that utilise RCU and the ordered
- * i915->wq that this work is executing on. To try and flush any
- * pending frees now we are idle, we first wait for an RCU grace
- * period, and then queue a task (that will run last on the wq) to
- * shrink and re-optimize the caches.
- */
- if (same_epoch(dev_priv, epoch)) {
- struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s) {
- init_rcu_head(&s->rcu);
- s->i915 = dev_priv;
- s->epoch = epoch;
- call_rcu(&s->rcu, __sleep_rcu);
- }
+ GEM_BUG_ON(!i915->gt.awake);
+ i915_queue_hangcheck(i915);
}
}
@@ -3171,7 +3026,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_del(&lut->obj_link);
list_del(&lut->ctx_link);
- kmem_cache_free(i915->luts, lut);
+ i915_lut_handle_free(lut);
__i915_gem_object_release_unless_active(obj);
}
@@ -3234,8 +3089,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
I915_WAIT_ALL,
- to_wait_timeout(args->timeout_ns),
- to_rps_client(file));
+ to_wait_timeout(args->timeout_ns));
if (args->timeout_ns > 0) {
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
@@ -3304,7 +3158,7 @@ wait_for_timelines(struct drm_i915_private *i915,
* stalls, so allow the gpu to boost to maximum clocks.
*/
if (flags & I915_WAIT_FOR_IDLE_BOOST)
- gen6_rps_boost(rq, NULL);
+ gen6_rps_boost(rq);
timeout = i915_request_wait(rq, flags, timeout);
i915_request_put(rq);
@@ -3340,19 +3194,11 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
lockdep_assert_held(&i915->drm.struct_mutex);
- if (GEM_SHOW_DEBUG() && !timeout) {
- /* Presume that timeout was non-zero to begin with! */
- dev_warn(&i915->drm.pdev->dev,
- "Missed idle-completion interrupt!\n");
- GEM_TRACE_DUMP();
- }
-
err = wait_for_engines(i915);
if (err)
return err;
i915_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
}
return 0;
@@ -3399,8 +3245,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3462,8 +3307,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3578,8 +3422,7 @@ restart:
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3717,8 +3560,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -3844,8 +3686,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3891,8 +3732,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
long ret;
/* ABI: return -EIO if already wedged */
- if (i915_terminally_wedged(&dev_priv->gpu_error))
- return -EIO;
+ ret = i915_terminally_wedged(dev_priv);
+ if (ret)
+ return ret;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
@@ -3968,7 +3810,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
}
vma = i915_vma_instance(obj, vm, view);
- if (unlikely(IS_ERR(vma)))
+ if (IS_ERR(vma))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
@@ -4000,22 +3842,19 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return vma;
}
-static __always_inline unsigned int __busy_read_flag(unsigned int id)
+static __always_inline u32 __busy_read_flag(u8 id)
{
- /* Note that we could alias engines in the execbuf API, but
- * that would be very unwise as it prevents userspace from
- * fine control over engine selection. Ahem.
- *
- * This should be something like EXEC_MAX_ENGINE instead of
- * I915_NUM_ENGINES.
- */
- BUILD_BUG_ON(I915_NUM_ENGINES > 16);
- return 0x10000 << id;
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffff0000u;
+
+ GEM_BUG_ON(id >= 16);
+ return 0x10000u << id;
}
-static __always_inline unsigned int __busy_write_id(unsigned int id)
+static __always_inline u32 __busy_write_id(u8 id)
{
- /* The uABI guarantees an active writer is also amongst the read
+ /*
+ * The uABI guarantees an active writer is also amongst the read
* engines. This would be true if we accessed the activity tracking
* under the lock, but as we perform the lookup of the object and
* its activity locklessly we can not guarantee that the last_write
@@ -4023,16 +3862,19 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
* last_read - hence we always set both read and write busy for
* last_write.
*/
- return id | __busy_read_flag(id);
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffffffffu;
+
+ return (id + 1) | __busy_read_flag(id);
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence,
- unsigned int (*flag)(unsigned int id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
{
- struct i915_request *rq;
+ const struct i915_request *rq;
- /* We have to check the current hw status of the fence as the uABI
+ /*
+ * We have to check the current hw status of the fence as the uABI
* guarantees forward progress. We could rely on the idle worker
* to eventually flush us, but to minimise latency just ask the
* hardware.
@@ -4043,11 +3885,13 @@ __busy_set_if_active(const struct dma_fence *fence,
return 0;
/* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, struct i915_request, fence);
+ rq = container_of(fence, const struct i915_request, fence);
if (i915_request_completed(rq))
return 0;
- return flag(rq->engine->uabi_id);
+ /* Beware type-expansion follies! */
+ BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ return flag(rq->engine->uabi_class);
}
static __always_inline unsigned int
@@ -4081,7 +3925,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (!obj)
goto out;
- /* A discrepancy here is that we do not report the status of
+ /*
+ * A discrepancy here is that we do not report the status of
* non-i915 fences, i.e. even though we may report the object as idle,
* a call to set-domain may still stall waiting for foreign rendering.
* This also means that wait-ioctl may report an object as busy,
@@ -4281,7 +4126,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -4354,7 +4199,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
* acquiring such a reference whilst we are in the middle of
* freeing the object.
*/
- return atomic_long_read(&obj->base.filp->f_count) == 1;
+ return file_count(obj->base.filp) == 1;
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
@@ -4414,7 +4259,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(i915, obj->base.size);
- kfree(obj->bit_17);
+ bitmap_free(obj->bit_17);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -4537,7 +4382,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/*
* As we have just resumed the machine and woken the device up from
@@ -4545,7 +4390,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
i915_gem_unset_wedged(i915);
/*
@@ -4558,7 +4403,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
*/
intel_engines_sanitize(i915, false);
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(i915, wakeref);
mutex_lock(&i915->drm.struct_mutex);
@@ -4566,15 +4411,13 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
}
-int i915_gem_suspend(struct drm_i915_private *i915)
+void i915_gem_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- int ret;
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
- intel_suspend_gt_powersave(i915);
flush_workqueue(i915->wq);
@@ -4589,22 +4432,7 @@ int i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- if (!i915_terminally_wedged(&i915->gpu_error)) {
- ret = i915_gem_switch_to_kernel_context(i915);
- if (ret)
- goto err_unlock;
-
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- MAX_SCHEDULE_TIMEOUT);
- if (ret && ret != -EIO)
- goto err_unlock;
-
- assert_kernel_context_is_current(i915);
- }
- i915_retire_requests(i915); /* ensure we flush after wedging */
+ switch_to_kernel_context_sync(i915, i915->gt.active_engines);
mutex_unlock(&i915->drm.struct_mutex);
i915_reset_flush(i915);
@@ -4617,23 +4445,15 @@ int i915_gem_suspend(struct drm_i915_private *i915)
*/
drain_delayed_work(&i915->gt.idle_work);
- intel_uc_suspend(i915);
-
/*
* Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- WARN_ON(i915->gt.awake);
- if (WARN_ON(!intel_engines_are_idle(i915)))
- i915_gem_set_wedged(i915); /* no hope, discard everything */
+ GEM_BUG_ON(i915->gt.awake);
- intel_runtime_pm_put(i915, wakeref);
- return 0;
+ intel_uc_suspend(i915);
-err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915, wakeref);
- return ret;
}
void i915_gem_suspend_late(struct drm_i915_private *i915)
@@ -4683,7 +4503,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
@@ -4693,7 +4513,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- i915->gt.resume(i915);
+ intel_gt_resume(i915);
if (i915_gem_init_hw(i915))
goto err_wedged;
@@ -4701,17 +4521,18 @@ void i915_gem_resume(struct drm_i915_private *i915)
intel_uc_resume(i915);
/* Always reload a context for powersaving. */
- if (i915_gem_switch_to_kernel_context(i915))
+ if (!load_power_context(i915))
goto err_wedged;
out_unlock:
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
mutex_unlock(&i915->drm.struct_mutex);
return;
err_wedged:
- if (!i915_terminally_wedged(&i915->gpu_error)) {
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ if (!i915_reset_failed(i915)) {
+ dev_err(i915->drm.dev,
+ "Failed to re-initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(i915);
}
goto out_unlock;
@@ -4781,6 +4602,8 @@ static int __i915_gem_restart_engines(void *data)
}
}
+ intel_engines_set_scheduler_caps(i915);
+
return 0;
}
@@ -4791,7 +4614,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
dev_priv->gt.last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
@@ -4816,10 +4639,9 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
- if (i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = -EIO;
+ ret = i915_terminally_wedged(dev_priv);
+ if (ret)
goto out;
- }
ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
@@ -4847,14 +4669,14 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto cleanup_uc;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return 0;
cleanup_uc:
intel_uc_fini_hw(dev_priv);
out:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -4864,7 +4686,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err;
+ int err = 0;
/*
* As we reset the gpu during very early sanitisation, the current
@@ -4897,36 +4719,27 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
}
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- goto err_active;
-
- if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
- i915_gem_set_wedged(i915);
- err = -EIO; /* Caller will declare us wedged */
+ /* Flush the default context image to memory, and enable powersaving. */
+ if (!load_power_context(i915)) {
+ err = -EIO;
goto err_active;
}
- assert_kernel_context_is_current(i915);
-
- /*
- * Immediately park the GPU so that we enable powersaving and
- * treat it as idle. The next time we issue a request, we will
- * unpark and start using the engine->pinned_default_state, otherwise
- * it is in limbo and an early reset may fail.
- */
- __i915_gem_park(i915);
-
for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
struct i915_vma *state;
void *vaddr;
- GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
+ ce = intel_context_lookup(ctx, engine);
+ if (!ce)
+ continue;
- state = to_intel_context(ctx, engine)->state;
+ state = ce->state;
if (!state)
continue;
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
/*
* As we will hold a reference to the logical state, it will
* not be torn down with the context, and importantly the
@@ -4944,6 +4757,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
engine->default_state = i915_gem_object_get(state->obj);
+ i915_gem_object_set_cache_coherency(engine->default_state,
+ I915_CACHE_LLC);
/* Check we can acquire the image of the context state */
vaddr = i915_gem_object_pin_map(engine->default_state,
@@ -4982,19 +4797,10 @@ out_ctx:
err_active:
/*
* If we have to abandon now, we expect the engines to be idle
- * and ready to be torn-down. First try to flush any remaining
- * request, ensure we are pointing at the kernel context and
- * then remove it.
+ * and ready to be torn-down. The quickest way we can accomplish
+ * this is by declaring ourselves wedged.
*/
- if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
- goto out_ctx;
-
- if (WARN_ON(i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT)))
- goto out_ctx;
-
- i915_gem_contexts_lost(i915);
+ i915_gem_set_wedged(i915);
goto out_ctx;
}
@@ -5047,13 +4853,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->gt.resume = intel_lr_context_resume;
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
- } else {
- dev_priv->gt.resume = intel_legacy_submission_resume;
+ else
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
- }
i915_timelines_init(dev_priv);
@@ -5076,7 +4879,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* just magically go away.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = i915_gem_init_ggtt(dev_priv);
if (ret) {
@@ -5138,7 +4941,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_init_hw;
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
@@ -5152,7 +4955,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_init_hw:
mutex_unlock(&dev_priv->drm.struct_mutex);
- WARN_ON(i915_gem_suspend(dev_priv));
+ i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
i915_gem_drain_workqueue(dev_priv);
@@ -5173,7 +4976,7 @@ err_scratch:
i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_uc_misc:
@@ -5192,7 +4995,7 @@ err_uc_misc:
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+ if (!i915_reset_failed(dev_priv)) {
i915_load_error(dev_priv,
"Failed to initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(dev_priv);
@@ -5305,36 +5108,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
- int err = -ENOMEM;
-
- dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->objects)
- goto err_out;
-
- dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->vmas)
- goto err_objects;
-
- dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
- if (!dev_priv->luts)
- goto err_vmas;
-
- dev_priv->requests = KMEM_CACHE(i915_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
- if (!dev_priv->requests)
- goto err_luts;
-
- dev_priv->dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!dev_priv->dependencies)
- goto err_requests;
-
- dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->priorities)
- goto err_dependencies;
+ int err;
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
@@ -5348,6 +5122,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
mutex_init(&dev_priv->gpu_error.wedge_mutex);
+ init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
@@ -5358,19 +5133,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
return 0;
-
-err_dependencies:
- kmem_cache_destroy(dev_priv->dependencies);
-err_requests:
- kmem_cache_destroy(dev_priv->requests);
-err_luts:
- kmem_cache_destroy(dev_priv->luts);
-err_vmas:
- kmem_cache_destroy(dev_priv->vmas);
-err_objects:
- kmem_cache_destroy(dev_priv->objects);
-err_out:
- return err;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
@@ -5380,15 +5142,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.object_count);
- kmem_cache_destroy(dev_priv->priorities);
- kmem_cache_destroy(dev_priv->dependencies);
- kmem_cache_destroy(dev_priv->requests);
- kmem_cache_destroy(dev_priv->luts);
- kmem_cache_destroy(dev_priv->vmas);
- kmem_cache_destroy(dev_priv->objects);
-
- /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
- rcu_barrier();
+ cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
i915_gemfs_fini(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index b0e4b976880c..9074eb1e843f 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -73,14 +73,14 @@ struct drm_i915_private;
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-#define I915_NUM_ENGINES 8
+#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915);
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
- if (atomic_inc_return(&t->count) == 1)
+ if (!atomic_fetch_inc(&t->count))
tasklet_unlock_wait(t);
}
@@ -89,4 +89,9 @@ static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
return !atomic_read(&t->count);
}
+static inline bool __tasklet_enable(struct tasklet_struct *t)
+{
+ return atomic_dec_and_test(&t->count);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 280813a4bf82..dd728b26b5aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -88,12 +88,32 @@
#include <linux/log2.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_trace.h"
+#include "i915_user_extensions.h"
#include "intel_lrc_reg.h"
#include "intel_workarounds.h"
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1 << 1)
+#define I915_CONTEXT_PARAM_VM 0x9
+
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
+static struct i915_global_gem_context {
+ struct i915_global base;
+ struct kmem_cache *slab_luts;
+} global;
+
+struct i915_lut_handle *i915_lut_handle_alloc(void)
+{
+ return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
+}
+
+void i915_lut_handle_free(struct i915_lut_handle *lut)
+{
+ return kmem_cache_free(global.slab_luts, lut);
+}
+
static void lut_close(struct i915_gem_context *ctx)
{
struct i915_lut_handle *lut, *ln;
@@ -102,14 +122,17 @@ static void lut_close(struct i915_gem_context *ctx)
list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
list_del(&lut->obj_link);
- kmem_cache_free(ctx->i915->luts, lut);
+ i915_lut_handle_free(lut);
}
+ INIT_LIST_HEAD(&ctx->handles_list);
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+
+ vma->open_count--;
__i915_gem_object_release_unless_active(vma->obj);
}
rcu_read_unlock();
@@ -206,25 +229,26 @@ static void release_hw_id(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- unsigned int n;
+ struct intel_context *it, *n;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ GEM_BUG_ON(!list_empty(&ctx->active_engines));
release_hw_id(ctx);
i915_ppgtt_put(ctx->ppgtt);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
- struct intel_context *ce = &ctx->__engine[n];
+ rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
+ intel_context_put(it);
- if (ce->ops)
- ce->ops->destroy(ce);
- }
+ if (ctx->timeline)
+ i915_timeline_put(ctx->timeline);
kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
+ mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
@@ -291,8 +315,6 @@ static void context_close(struct i915_gem_context *ctx)
* the ppgtt).
*/
lut_close(ctx);
- if (ctx->ppgtt)
- i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
@@ -307,7 +329,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
+ if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -322,134 +344,115 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
return desc;
}
-static void intel_context_retire(struct i915_active_request *active,
- struct i915_request *rq)
-{
- struct intel_context *ce =
- container_of(active, typeof(*ce), active_tracker);
-
- intel_context_unpin(ce);
-}
-
-void
-intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- ce->gem_context = ctx;
-
- INIT_LIST_HEAD(&ce->signal_link);
- INIT_LIST_HEAD(&ce->signals);
-
- /* Use the whole device by default */
- ce->sseu = intel_device_default_sseu(ctx->i915);
-
- i915_active_request_init(&ce->active_tracker,
- NULL, intel_context_retire);
-}
-
static struct i915_gem_context *
-__create_hw_context(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+__create_context(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- unsigned int n;
- int ret;
+ int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (ctx == NULL)
+ if (!ctx)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
+ INIT_LIST_HEAD(&ctx->active_engines);
+ mutex_init(&ctx->mutex);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
- intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
+ ctx->hw_contexts = RB_ROOT;
+ spin_lock_init(&ctx->hw_contexts_lock);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
- /* Default context will never have a file_priv */
- ret = DEFAULT_CONTEXT_HANDLE;
- if (file_priv) {
- ret = idr_alloc(&file_priv->context_idr, ctx,
- DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
- if (ret < 0)
- goto err_lut;
- }
- ctx->user_handle = ret;
-
- ctx->file_priv = file_priv;
- if (file_priv) {
- ctx->pid = get_task_pid(current, PIDTYPE_PID);
- ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
- current->comm,
- pid_nr(ctx->pid),
- ctx->user_handle);
- if (!ctx->name) {
- ret = -ENOMEM;
- goto err_pid;
- }
- }
-
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
i915_gem_context_set_bannable(ctx);
+ i915_gem_context_set_recoverable(ctx);
+
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
+ for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
+ ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+
return ctx;
+}
-err_pid:
- put_pid(ctx->pid);
- idr_remove(&file_priv->context_idr, ctx->user_handle);
-err_lut:
- context_close(ctx);
- return ERR_PTR(ret);
+static struct i915_hw_ppgtt *
+__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
+{
+ struct i915_hw_ppgtt *old = ctx->ppgtt;
+
+ ctx->ppgtt = i915_ppgtt_get(ppgtt);
+ ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
+
+ return old;
}
-static void __destroy_hw_context(struct i915_gem_context *ctx,
- struct drm_i915_file_private *file_priv)
+static void __assign_ppgtt(struct i915_gem_context *ctx,
+ struct i915_hw_ppgtt *ppgtt)
{
- idr_remove(&file_priv->context_idr, ctx->user_handle);
- context_close(ctx);
+ if (ppgtt == ctx->ppgtt)
+ return;
+
+ ppgtt = __set_ppgtt(ctx, ppgtt);
+ if (ppgtt)
+ i915_ppgtt_put(ppgtt);
}
static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
{
struct i915_gem_context *ctx;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ BUILD_BUG_ON(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &
+ ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN);
+ if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
+ !HAS_EXECLISTS(dev_priv))
+ return ERR_PTR(-EINVAL);
+
/* Reap the most stale context */
contexts_free_first(dev_priv);
- ctx = __create_hw_context(dev_priv, file_priv);
+ ctx = __create_context(dev_priv);
if (IS_ERR(ctx))
return ctx;
if (HAS_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv, file_priv);
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
- __destroy_hw_context(ctx, file_priv);
+ context_close(ctx);
return ERR_CAST(ppgtt);
}
- ctx->ppgtt = ppgtt;
- ctx->desc_template = default_desc_template(dev_priv, ppgtt);
+ __assign_ppgtt(ctx, ppgtt);
+ i915_ppgtt_put(ppgtt);
+ }
+
+ if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
+ struct i915_timeline *timeline;
+
+ timeline = i915_timeline_create(dev_priv, NULL);
+ if (IS_ERR(timeline)) {
+ context_close(ctx);
+ return ERR_CAST(timeline);
+ }
+
+ ctx->timeline = timeline;
}
trace_i915_context_create(ctx);
@@ -480,10 +483,17 @@ i915_gem_context_create_gvt(struct drm_device *dev)
if (ret)
return ERR_PTR(ret);
- ctx = i915_gem_create_context(to_i915(dev), NULL);
+ ctx = i915_gem_create_context(to_i915(dev), 0);
if (IS_ERR(ctx))
goto out;
+ ret = i915_gem_context_pin_hw_id(ctx);
+ if (ret) {
+ context_close(ctx);
+ ctx = ERR_PTR(ret);
+ goto out;
+ }
+
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
@@ -516,7 +526,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
struct i915_gem_context *ctx;
int err;
- ctx = i915_gem_create_context(i915, NULL);
+ ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
@@ -552,7 +562,7 @@ static void init_contexts(struct drm_i915_private *i915)
static bool needs_preempt_context(struct drm_i915_private *i915)
{
- return HAS_LOGICAL_RING_PREEMPTION(i915);
+ return HAS_EXECLISTS(i915);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
@@ -563,7 +573,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
- intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
+ intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@@ -624,31 +634,87 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
static int context_idr_cleanup(int id, void *p, void *data)
{
- struct i915_gem_context *ctx = p;
+ context_close(p);
+ return 0;
+}
- context_close(ctx);
+static int vm_idr_cleanup(int id, void *p, void *data)
+{
+ i915_ppgtt_put(p);
return 0;
}
+static int gem_context_register(struct i915_gem_context *ctx,
+ struct drm_i915_file_private *fpriv)
+{
+ int ret;
+
+ ctx->file_priv = fpriv;
+ if (ctx->ppgtt)
+ ctx->ppgtt->vm.file = fpriv;
+
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
+ current->comm, pid_nr(ctx->pid));
+ if (!ctx->name) {
+ ret = -ENOMEM;
+ goto err_pid;
+ }
+
+ /* And finally expose ourselves to userspace via the idr */
+ mutex_lock(&fpriv->context_idr_lock);
+ ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
+ mutex_unlock(&fpriv->context_idr_lock);
+ if (ret >= 0)
+ goto out;
+
+ kfree(fetch_and_zero(&ctx->name));
+err_pid:
+ put_pid(fetch_and_zero(&ctx->pid));
+out:
+ return ret;
+}
+
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
+ int err;
+
+ mutex_init(&file_priv->context_idr_lock);
+ mutex_init(&file_priv->vm_idr_lock);
idr_init(&file_priv->context_idr);
+ idr_init_base(&file_priv->vm_idr, 1);
mutex_lock(&i915->drm.struct_mutex);
- ctx = i915_gem_create_context(i915, file_priv);
+ ctx = i915_gem_create_context(i915, 0);
mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
- idr_destroy(&file_priv->context_idr);
- return PTR_ERR(ctx);
+ err = PTR_ERR(ctx);
+ goto err;
}
+ err = gem_context_register(ctx, file_priv);
+ if (err < 0)
+ goto err_ctx;
+
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+ GEM_BUG_ON(err > 0);
return 0;
+
+err_ctx:
+ mutex_lock(&i915->drm.struct_mutex);
+ context_close(ctx);
+ mutex_unlock(&i915->drm.struct_mutex);
+err:
+ idr_destroy(&file_priv->vm_idr);
+ idr_destroy(&file_priv->context_idr);
+ mutex_destroy(&file_priv->vm_idr_lock);
+ mutex_destroy(&file_priv->context_idr_lock);
+ return err;
}
void i915_gem_context_close(struct drm_file *file)
@@ -659,6 +725,100 @@ void i915_gem_context_close(struct drm_file *file)
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
+ mutex_destroy(&file_priv->context_idr_lock);
+
+ idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
+ idr_destroy(&file_priv->vm_idr);
+ mutex_destroy(&file_priv->vm_idr_lock);
+}
+
+int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_vm_control *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ if (!HAS_FULL_PPGTT(i915))
+ return -ENODEV;
+
+ if (args->flags)
+ return -EINVAL;
+
+ ppgtt = i915_ppgtt_create(i915);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ ppgtt->vm.file = file_priv;
+
+ if (args->extensions) {
+ err = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ NULL, 0,
+ ppgtt);
+ if (err)
+ goto err_put;
+ }
+
+ err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (err)
+ goto err_put;
+
+ err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ if (err < 0)
+ goto err_unlock;
+
+ GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */
+ ppgtt->user_handle = err;
+
+ mutex_unlock(&file_priv->vm_idr_lock);
+
+ args->vm_id = err;
+ return 0;
+
+err_unlock:
+ mutex_unlock(&file_priv->vm_idr_lock);
+err_put:
+ i915_ppgtt_put(ppgtt);
+ return err;
+}
+
+int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_vm_control *args = data;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+ u32 id;
+
+ if (args->flags)
+ return -EINVAL;
+
+ if (args->extensions)
+ return -EINVAL;
+
+ id = args->vm_id;
+ if (!id)
+ return -ENOENT;
+
+ err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (err)
+ return err;
+
+ ppgtt = idr_remove(&file_priv->vm_idr, id);
+ if (ppgtt) {
+ GEM_BUG_ON(ppgtt->user_handle != id);
+ ppgtt->user_handle = 0;
+ }
+
+ mutex_unlock(&file_priv->vm_idr_lock);
+ if (!ppgtt)
+ return -ENOENT;
+
+ i915_ppgtt_put(ppgtt);
+ return 0;
}
static struct i915_request *
@@ -671,10 +831,9 @@ last_request_on_engine(struct i915_timeline *timeline,
rq = i915_active_request_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex);
- if (rq && rq->engine == engine) {
- GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
- timeline->name, engine->name,
- rq->fence.context, rq->fence.seqno);
+ if (rq && rq->engine->mask & engine->mask) {
+ GEM_TRACE("last request on engine %s: %llx:%llu\n",
+ engine->name, rq->fence.context, rq->fence.seqno);
GEM_BUG_ON(rq->timeline != timeline);
return rq;
}
@@ -682,81 +841,104 @@ last_request_on_engine(struct i915_timeline *timeline,
return NULL;
}
-static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
+struct context_barrier_task {
+ struct i915_active base;
+ void (*task)(void *data);
+ void *data;
+};
+
+static void cb_retire(struct i915_active *base)
+{
+ struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
+
+ if (cb->task)
+ cb->task(cb->data);
+
+ i915_active_fini(&cb->base);
+ kfree(cb);
+}
+
+I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
+static int context_barrier_task(struct i915_gem_context *ctx,
+ intel_engine_mask_t engines,
+ int (*emit)(struct i915_request *rq, void *data),
+ void (*task)(void *data),
+ void *data)
{
- struct drm_i915_private *i915 = engine->i915;
- const struct intel_context * const ce =
- to_intel_context(i915->kernel_context, engine);
- struct i915_timeline *barrier = ce->ring->timeline;
- struct intel_ring *ring;
- bool any_active = false;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct context_barrier_task *cb;
+ struct intel_context *ce, *next;
+ intel_wakeref_t wakeref;
+ int err = 0;
lockdep_assert_held(&i915->drm.struct_mutex);
- list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
- struct i915_request *rq;
+ GEM_BUG_ON(!task);
- rq = last_request_on_engine(ring->timeline, engine);
- if (!rq)
- continue;
+ cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+ if (!cb)
+ return -ENOMEM;
- any_active = true;
+ i915_active_init(i915, &cb->base, cb_retire);
+ i915_active_acquire(&cb->base);
- if (rq->hw_context == ce)
+ wakeref = intel_runtime_pm_get(i915);
+ rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) {
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_request *rq;
+
+ if (!(engine->mask & engines))
continue;
- /*
- * Was this request submitted after the previous
- * switch-to-kernel-context?
- */
- if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
- GEM_TRACE("%s needs barrier for %llx:%lld\n",
- ring->timeline->name,
- rq->fence.context,
- rq->fence.seqno);
- return false;
+ if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
+ engine->mask)) {
+ err = -ENXIO;
+ break;
+ }
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
}
- GEM_TRACE("%s has barrier after %llx:%lld\n",
- ring->timeline->name,
- rq->fence.context,
- rq->fence.seqno);
+ err = 0;
+ if (emit)
+ err = emit(rq, data);
+ if (err == 0)
+ err = i915_active_ref(&cb->base, rq->fence.context, rq);
+
+ i915_request_add(rq);
+ if (err)
+ break;
}
+ intel_runtime_pm_put(i915, wakeref);
- /*
- * If any other timeline was still active and behind the last barrier,
- * then our last switch-to-kernel-context must still be queued and
- * will run last (leaving the engine in the kernel context when it
- * eventually idles).
- */
- if (any_active)
- return true;
+ cb->task = err ? NULL : task; /* caller needs to unwind instead */
+ cb->data = data;
- /* The engine is idle; check that it is idling in the kernel context. */
- return engine->last_retired_context == ce;
+ i915_active_release(&cb->base);
+
+ return err;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+ intel_engine_mask_t mask)
{
struct intel_engine_cs *engine;
- enum intel_engine_id id;
GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->kernel_context);
- i915_retire_requests(i915);
+ /* Inoperable, so presume the GPU is safely pointing into the void! */
+ if (i915_terminally_wedged(i915))
+ return 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine_masked(engine, i915, mask, mask) {
struct intel_ring *ring;
struct i915_request *rq;
- GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
- if (engine_has_kernel_context_barrier(engine))
- continue;
-
- GEM_TRACE("emit barrier on %s\n", engine->name);
-
rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -779,7 +961,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
I915_FENCE_GFP);
- i915_timeline_sync_set(rq->timeline, &prev->fence);
}
i915_request_add(rq);
@@ -788,183 +969,173 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
return 0;
}
-static bool client_is_banned(struct drm_i915_file_private *file_priv)
+static int get_ppgtt(struct drm_i915_file_private *file_priv,
+ struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
-}
-
-int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_context_create *args = data;
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
int ret;
- if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
+ return -EINVAL; /* nothing to see here; please move along */
+
+ if (!ctx->ppgtt)
return -ENODEV;
- if (args->pad != 0)
- return -EINVAL;
+ /* XXX rcu acquire? */
+ ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ if (ret)
+ return ret;
- if (client_is_banned(file_priv)) {
- DRM_DEBUG("client %s[%d] banned from creating ctx\n",
- current->comm,
- pid_nr(get_task_pid(current, PIDTYPE_PID)));
+ ppgtt = i915_ppgtt_get(ctx->ppgtt);
+ mutex_unlock(&ctx->i915->drm.struct_mutex);
- return -EIO;
+ ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (ret)
+ goto err_put;
+
+ if (!ppgtt->user_handle) {
+ ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ GEM_BUG_ON(!ret);
+ if (ret < 0)
+ goto err_unlock;
+
+ ppgtt->user_handle = ret;
+ i915_ppgtt_get(ppgtt);
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ args->size = 0;
+ args->value = ppgtt->user_handle;
- ctx = i915_gem_create_context(dev_priv, file_priv);
- mutex_unlock(&dev->struct_mutex);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ret = 0;
+err_unlock:
+ mutex_unlock(&file_priv->vm_idr_lock);
+err_put:
+ i915_ppgtt_put(ppgtt);
+ return ret;
+}
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+static void set_ppgtt_barrier(void *data)
+{
+ struct i915_hw_ppgtt *old = data;
- args->ctx_id = ctx->user_handle;
- DRM_DEBUG("HW context %d created\n", args->ctx_id);
+ if (INTEL_GEN(old->vm.i915) < 8)
+ gen6_ppgtt_unpin_all(old);
- return 0;
+ i915_ppgtt_put(old);
}
-int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct drm_i915_gem_context_destroy *args = data;
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_gem_context *ctx;
- int ret;
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
+ struct intel_engine_cs *engine = rq->engine;
+ u32 base = engine->mmio_base;
+ u32 *cs;
+ int i;
- if (args->pad != 0)
- return -EINVAL;
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
- if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
- return -ENOENT;
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (!ctx)
- return -ENOENT;
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
+ *cs++ = lower_32_bits(pd_daddr);
- __destroy_hw_context(ctx, file_priv);
- mutex_unlock(&dev->struct_mutex);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+ } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+ } else {
+ /* ppGTT is not part of the legacy context image */
+ gen6_ppgtt_pin(ppgtt);
+ }
-out:
- i915_gem_context_put(ctx);
return 0;
}
-static int get_sseu(struct i915_gem_context *ctx,
- struct drm_i915_gem_context_param *args)
+static int set_ppgtt(struct drm_i915_file_private *file_priv,
+ struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- struct drm_i915_gem_context_param_sseu user_sseu;
- struct intel_engine_cs *engine;
- struct intel_context *ce;
- int ret;
-
- if (args->size == 0)
- goto out;
- else if (args->size < sizeof(user_sseu))
- return -EINVAL;
-
- if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
- sizeof(user_sseu)))
- return -EFAULT;
+ struct i915_hw_ppgtt *ppgtt, *old;
+ int err;
- if (user_sseu.flags || user_sseu.rsvd)
- return -EINVAL;
+ return -EINVAL; /* nothing to see here; please move along */
- engine = intel_engine_lookup_user(ctx->i915,
- user_sseu.engine_class,
- user_sseu.engine_instance);
- if (!engine)
+ if (args->size)
return -EINVAL;
- /* Only use for mutex here is to serialize get_param and set_param. */
- ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- ce = to_intel_context(ctx, engine);
-
- user_sseu.slice_mask = ce->sseu.slice_mask;
- user_sseu.subslice_mask = ce->sseu.subslice_mask;
- user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
- user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
+ if (!ctx->ppgtt)
+ return -ENODEV;
- mutex_unlock(&ctx->i915->drm.struct_mutex);
+ if (upper_32_bits(args->value))
+ return -ENOENT;
- if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
- sizeof(user_sseu)))
- return -EFAULT;
+ err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (err)
+ return err;
-out:
- args->size = sizeof(user_sseu);
+ ppgtt = idr_find(&file_priv->vm_idr, args->value);
+ if (ppgtt) {
+ GEM_BUG_ON(ppgtt->user_handle != args->value);
+ i915_ppgtt_get(ppgtt);
+ }
+ mutex_unlock(&file_priv->vm_idr_lock);
+ if (!ppgtt)
+ return -ENOENT;
- return 0;
-}
+ err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ if (err)
+ goto out;
-int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_context_param *args = data;
- struct i915_gem_context *ctx;
- int ret = 0;
+ if (ppgtt == ctx->ppgtt)
+ goto unlock;
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (!ctx)
- return -ENOENT;
+ /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
+ lut_close(ctx);
- switch (args->param) {
- case I915_CONTEXT_PARAM_BAN_PERIOD:
- ret = -EINVAL;
- break;
- case I915_CONTEXT_PARAM_NO_ZEROMAP:
- args->size = 0;
- args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
- break;
- case I915_CONTEXT_PARAM_GTT_SIZE:
- args->size = 0;
+ old = __set_ppgtt(ctx, ppgtt);
- if (ctx->ppgtt)
- args->value = ctx->ppgtt->vm.total;
- else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
- else
- args->value = to_i915(dev)->ggtt.vm.total;
- break;
- case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
- args->size = 0;
- args->value = i915_gem_context_no_error_capture(ctx);
- break;
- case I915_CONTEXT_PARAM_BANNABLE:
- args->size = 0;
- args->value = i915_gem_context_is_bannable(ctx);
- break;
- case I915_CONTEXT_PARAM_PRIORITY:
- args->size = 0;
- args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
- break;
- case I915_CONTEXT_PARAM_SSEU:
- ret = get_sseu(ctx, args);
- break;
- default:
- ret = -EINVAL;
- break;
+ /*
+ * We need to flush any requests using the current ppgtt before
+ * we release it as the requests do not hold a reference themselves,
+ * only indirectly through the context.
+ */
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ emit_ppgtt_update,
+ set_ppgtt_barrier,
+ old);
+ if (err) {
+ ctx->ppgtt = old;
+ ctx->desc_template = default_desc_template(ctx->i915, old);
+ i915_ppgtt_put(ppgtt);
}
- i915_gem_context_put(ctx);
- return ret;
+unlock:
+ mutex_unlock(&ctx->i915->drm.struct_mutex);
+
+out:
+ i915_ppgtt_put(ppgtt);
+ return err;
}
static int gen8_emit_rpcs_config(struct i915_request *rq,
@@ -993,39 +1164,35 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
}
static int
-gen8_modify_rpcs_gpu(struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_sseu sseu)
+gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
{
- struct drm_i915_private *i915 = engine->i915;
- struct i915_request *rq, *prev;
+ struct drm_i915_private *i915 = ce->engine->i915;
+ struct i915_request *rq;
intel_wakeref_t wakeref;
int ret;
- GEM_BUG_ON(!ce->pin_count);
+ lockdep_assert_held(&ce->pin_mutex);
- lockdep_assert_held(&i915->drm.struct_mutex);
+ /*
+ * If the context is not idle, we have to submit an ordered request to
+ * modify its context image via the kernel context (writing to our own
+ * image, or into the registers directory, does not stick). Pristine
+ * and idle contexts will be configured on pinning.
+ */
+ if (!intel_context_is_pinned(ce))
+ return 0;
/* Submitting requests etc needs the hw awake. */
wakeref = intel_runtime_pm_get(i915);
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_alloc(ce->engine, i915->kernel_context);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto out_put;
}
/* Queue this switch after all other activity by this context. */
- prev = i915_active_request_raw(&ce->ring->timeline->last_request,
- &i915->drm.struct_mutex);
- if (prev && !i915_request_completed(prev)) {
- ret = i915_request_await_dma_fence(rq, &prev->fence);
- if (ret < 0)
- goto out_add;
- }
-
- /* Order all following requests to be after. */
- ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
+ ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
if (ret)
goto out_add;
@@ -1057,27 +1224,26 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_sseu sseu)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
int ret = 0;
GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS0);
+
+ ce = intel_context_pin_lock(ctx, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
/* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
- return 0;
-
- /*
- * If context is not idle we have to submit an ordered request to modify
- * its context image via the kernel context. Pristine and idle contexts
- * will be configured on pinning.
- */
- if (ce->pin_count)
- ret = gen8_modify_rpcs_gpu(ce, engine, sseu);
+ goto unlock;
+ ret = gen8_modify_rpcs(ce, sseu);
if (!ret)
ce->sseu = sseu;
+unlock:
+ intel_context_pin_unlock(ce);
return ret;
}
@@ -1220,8 +1386,8 @@ static int set_sseu(struct i915_gem_context *ctx,
return -EINVAL;
engine = intel_engine_lookup_user(i915,
- user_sseu.engine_class,
- user_sseu.engine_instance);
+ user_sseu.engine.engine_class,
+ user_sseu.engine.engine_instance);
if (!engine)
return -EINVAL;
@@ -1242,22 +1408,13 @@ static int set_sseu(struct i915_gem_context *ctx,
return 0;
}
-int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int ctx_setparam(struct drm_i915_file_private *fpriv,
+ struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_context_param *args = data;
- struct i915_gem_context *ctx;
int ret = 0;
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (!ctx)
- return -ENOENT;
-
switch (args->param) {
- case I915_CONTEXT_PARAM_BAN_PERIOD:
- ret = -EINVAL;
- break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
if (args->size)
ret = -EINVAL;
@@ -1266,6 +1423,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
break;
+
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
if (args->size)
ret = -EINVAL;
@@ -1274,6 +1432,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
i915_gem_context_clear_no_error_capture(ctx);
break;
+
case I915_CONTEXT_PARAM_BANNABLE:
if (args->size)
ret = -EINVAL;
@@ -1285,13 +1444,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
i915_gem_context_clear_bannable(ctx);
break;
+ case I915_CONTEXT_PARAM_RECOVERABLE:
+ if (args->size)
+ ret = -EINVAL;
+ else if (args->value)
+ i915_gem_context_set_recoverable(ctx);
+ else
+ i915_gem_context_clear_recoverable(ctx);
+ break;
+
case I915_CONTEXT_PARAM_PRIORITY:
{
s64 priority = args->value;
if (args->size)
ret = -EINVAL;
- else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
ret = -ENODEV;
else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
priority < I915_CONTEXT_MIN_USER_PRIORITY)
@@ -1304,14 +1472,266 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
I915_USER_PRIORITY(priority);
}
break;
+
case I915_CONTEXT_PARAM_SSEU:
ret = set_sseu(ctx, args);
break;
+
+ case I915_CONTEXT_PARAM_VM:
+ ret = set_ppgtt(fpriv, ctx, args);
+ break;
+
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
break;
}
+ return ret;
+}
+
+struct create_ext {
+ struct i915_gem_context *ctx;
+ struct drm_i915_file_private *fpriv;
+};
+
+static int create_setparam(struct i915_user_extension __user *ext, void *data)
+{
+ struct drm_i915_gem_context_create_ext_setparam local;
+ const struct create_ext *arg = data;
+
+ if (copy_from_user(&local, ext, sizeof(local)))
+ return -EFAULT;
+
+ if (local.param.ctx_id)
+ return -EINVAL;
+
+ return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
+}
+
+static const i915_user_extension_fn create_extensions[] = {
+ [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
+};
+
+static bool client_is_banned(struct drm_i915_file_private *file_priv)
+{
+ return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_context_create_ext *args = data;
+ struct create_ext ext_data;
+ int ret;
+
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return -ENODEV;
+
+ if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
+ return -EINVAL;
+
+ ret = i915_terminally_wedged(i915);
+ if (ret)
+ return ret;
+
+ ext_data.fpriv = file->driver_priv;
+ if (client_is_banned(ext_data.fpriv)) {
+ DRM_DEBUG("client %s[%d] banned from creating ctx\n",
+ current->comm,
+ pid_nr(get_task_pid(current, PIDTYPE_PID)));
+ return -EIO;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ext_data.ctx = i915_gem_create_context(i915, args->flags);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(ext_data.ctx))
+ return PTR_ERR(ext_data.ctx);
+
+ if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
+ ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ create_extensions,
+ ARRAY_SIZE(create_extensions),
+ &ext_data);
+ if (ret)
+ goto err_ctx;
+ }
+
+ ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
+ if (ret < 0)
+ goto err_ctx;
+
+ args->ctx_id = ret;
+ DRM_DEBUG("HW context %d created\n", args->ctx_id);
+
+ return 0;
+
+err_ctx:
+ mutex_lock(&dev->struct_mutex);
+ context_close(ext_data.ctx);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_context_destroy *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_gem_context *ctx;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (!args->ctx_id)
+ return -ENOENT;
+
+ if (mutex_lock_interruptible(&file_priv->context_idr_lock))
+ return -EINTR;
+
+ ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
+ mutex_unlock(&file_priv->context_idr_lock);
+ if (!ctx)
+ return -ENOENT;
+
+ mutex_lock(&dev->struct_mutex);
+ context_close(ctx);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int get_sseu(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ struct drm_i915_gem_context_param_sseu user_sseu;
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+
+ if (args->size == 0)
+ goto out;
+ else if (args->size < sizeof(user_sseu))
+ return -EINVAL;
+
+ if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+ if (user_sseu.flags || user_sseu.rsvd)
+ return -EINVAL;
+
+ engine = intel_engine_lookup_user(ctx->i915,
+ user_sseu.engine.engine_class,
+ user_sseu.engine.engine_instance);
+ if (!engine)
+ return -EINVAL;
+
+ ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ user_sseu.slice_mask = ce->sseu.slice_mask;
+ user_sseu.subslice_mask = ce->sseu.subslice_mask;
+ user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
+ user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
+
+ intel_context_pin_unlock(ce);
+
+ if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+out:
+ args->size = sizeof(user_sseu);
+
+ return 0;
+}
+
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct i915_gem_context *ctx;
+ int ret = 0;
+
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+ if (!ctx)
+ return -ENOENT;
+
+ switch (args->param) {
+ case I915_CONTEXT_PARAM_NO_ZEROMAP:
+ args->size = 0;
+ args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
+ break;
+
+ case I915_CONTEXT_PARAM_GTT_SIZE:
+ args->size = 0;
+ if (ctx->ppgtt)
+ args->value = ctx->ppgtt->vm.total;
+ else if (to_i915(dev)->mm.aliasing_ppgtt)
+ args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+ else
+ args->value = to_i915(dev)->ggtt.vm.total;
+ break;
+
+ case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+ args->size = 0;
+ args->value = i915_gem_context_no_error_capture(ctx);
+ break;
+
+ case I915_CONTEXT_PARAM_BANNABLE:
+ args->size = 0;
+ args->value = i915_gem_context_is_bannable(ctx);
+ break;
+
+ case I915_CONTEXT_PARAM_RECOVERABLE:
+ args->size = 0;
+ args->value = i915_gem_context_is_recoverable(ctx);
+ break;
+
+ case I915_CONTEXT_PARAM_PRIORITY:
+ args->size = 0;
+ args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
+ break;
+
+ case I915_CONTEXT_PARAM_SSEU:
+ ret = get_sseu(ctx, args);
+ break;
+
+ case I915_CONTEXT_PARAM_VM:
+ ret = get_ppgtt(file_priv, ctx, args);
+ break;
+
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ i915_gem_context_put(ctx);
+ return ret;
+}
+
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct i915_gem_context *ctx;
+ int ret;
+
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+ if (!ctx)
+ return -ENOENT;
+
+ ret = ctx_setparam(file_priv, ctx, args);
+
i915_gem_context_put(ctx);
return ret;
}
@@ -1385,3 +1805,28 @@ out_unlock:
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
#endif
+
+static void i915_global_gem_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_luts);
+}
+
+static void i915_global_gem_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_luts);
+}
+
+static struct i915_global_gem_context global = { {
+ .shrink = i915_global_gem_context_shrink,
+ .exit = i915_global_gem_context_exit,
+} };
+
+int __init i915_global_gem_context_init(void)
+{
+ global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
+ if (!global.slab_luts)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ca150a764c24..23dcb01bfd82 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -25,210 +25,16 @@
#ifndef __I915_GEM_CONTEXT_H__
#define __I915_GEM_CONTEXT_H__
-#include <linux/bitops.h>
-#include <linux/list.h>
-#include <linux/radix-tree.h>
+#include "i915_gem_context_types.h"
#include "i915_gem.h"
#include "i915_scheduler.h"
+#include "intel_context.h"
#include "intel_device_info.h"
-struct pid;
-
struct drm_device;
struct drm_file;
-struct drm_i915_private;
-struct drm_i915_file_private;
-struct i915_hw_ppgtt;
-struct i915_request;
-struct i915_vma;
-struct intel_ring;
-
-#define DEFAULT_CONTEXT_HANDLE 0
-
-struct intel_context;
-
-struct intel_context_ops {
- void (*unpin)(struct intel_context *ce);
- void (*destroy)(struct intel_context *ce);
-};
-
-/*
- * Powergating configuration for a particular (context,engine).
- */
-struct intel_sseu {
- u8 slice_mask;
- u8 subslice_mask;
- u8 min_eus_per_subslice;
- u8 max_eus_per_subslice;
-};
-
-/**
- * struct i915_gem_context - client state
- *
- * The struct i915_gem_context represents the combined view of the driver and
- * logical hardware state for a particular client.
- */
-struct i915_gem_context {
- /** i915: i915 device backpointer */
- struct drm_i915_private *i915;
-
- /** file_priv: owning file descriptor */
- struct drm_i915_file_private *file_priv;
-
- /**
- * @ppgtt: unique address space (GTT)
- *
- * In full-ppgtt mode, each context has its own address space ensuring
- * complete seperation of one client from all others.
- *
- * In other modes, this is a NULL pointer with the expectation that
- * the caller uses the shared global GTT.
- */
- struct i915_hw_ppgtt *ppgtt;
-
- /**
- * @pid: process id of creator
- *
- * Note that who created the context may not be the principle user,
- * as the context may be shared across a local socket. However,
- * that should only affect the default context, all contexts created
- * explicitly by the client are expected to be isolated.
- */
- struct pid *pid;
-
- /**
- * @name: arbitrary name
- *
- * A name is constructed for the context from the creator's process
- * name, pid and user handle in order to uniquely identify the
- * context in messages.
- */
- const char *name;
-
- /** link: place with &drm_i915_private.context_list */
- struct list_head link;
- struct llist_node free_link;
-
- /**
- * @ref: reference count
- *
- * A reference to a context is held by both the client who created it
- * and on each request submitted to the hardware using the request
- * (to ensure the hardware has access to the state until it has
- * finished all pending writes). See i915_gem_context_get() and
- * i915_gem_context_put() for access.
- */
- struct kref ref;
-
- /**
- * @rcu: rcu_head for deferred freeing.
- */
- struct rcu_head rcu;
-
- /**
- * @user_flags: small set of booleans controlled by the user
- */
- unsigned long user_flags;
-#define UCONTEXT_NO_ZEROMAP 0
-#define UCONTEXT_NO_ERROR_CAPTURE 1
-#define UCONTEXT_BANNABLE 2
-
- /**
- * @flags: small set of booleans
- */
- unsigned long flags;
-#define CONTEXT_BANNED 0
-#define CONTEXT_CLOSED 1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
-
- /**
- * @hw_id: - unique identifier for the context
- *
- * The hardware needs to uniquely identify the context for a few
- * functions like fault reporting, PASID, scheduling. The
- * &drm_i915_private.context_hw_ida is used to assign a unqiue
- * id for the lifetime of the context.
- *
- * @hw_id_pin_count: - number of times this context had been pinned
- * for use (should be, at most, once per engine).
- *
- * @hw_id_link: - all contexts with an assigned id are tracked
- * for possible repossession.
- */
- unsigned int hw_id;
- atomic_t hw_id_pin_count;
- struct list_head hw_id_link;
-
- /**
- * @user_handle: userspace identifier
- *
- * A unique per-file identifier is generated from
- * &drm_i915_file_private.contexts.
- */
- u32 user_handle;
-
- struct i915_sched_attr sched;
-
- /** engine: per-engine logical HW state */
- struct intel_context {
- struct i915_gem_context *gem_context;
- struct intel_engine_cs *active;
- struct list_head signal_link;
- struct list_head signals;
- struct i915_vma *state;
- struct intel_ring *ring;
- u32 *lrc_reg_state;
- u64 lrc_desc;
- int pin_count;
-
- /**
- * active_tracker: Active tracker for the external rq activity
- * on this intel_context object.
- */
- struct i915_active_request active_tracker;
-
- const struct intel_context_ops *ops;
-
- /** sseu: Control eu/slice partitioning */
- struct intel_sseu sseu;
- } __engine[I915_NUM_ENGINES];
-
- /** ring_size: size for allocating the per-engine ring buffer */
- u32 ring_size;
- /** desc_template: invariant fields for the HW context descriptor */
- u32 desc_template;
-
- /** guilty_count: How many times this context has caused a GPU hang. */
- atomic_t guilty_count;
- /**
- * @active_count: How many times this context was active during a GPU
- * hang, but did not cause it.
- */
- atomic_t active_count;
-
-#define CONTEXT_SCORE_GUILTY 10
-#define CONTEXT_SCORE_BAN_THRESHOLD 40
- /** ban_score: Accumulated score of all hangs caused by this context. */
- atomic_t ban_score;
-
- /** remap_slice: Bitmask of cache lines that need remapping */
- u8 remap_slice;
-
- /** handles_vma: rbtree to look up our context specific obj/vma for
- * the user handle. (user handles are per fd, but the binding is
- * per vm, which may be one per context or shared with the global GTT)
- */
- struct radix_tree_root handles_vma;
-
- /** handles_list: reverse list of all the rbtree entries in use for
- * this context, which allows us to free all the allocations on
- * context close.
- */
- struct list_head handles_list;
-};
-
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_CLOSED, &ctx->flags);
@@ -270,6 +76,21 @@ static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
+static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
+{
+ return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
+{
+ set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
+{
+ clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNED, &ctx->flags);
@@ -305,45 +126,11 @@ static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
atomic_dec(&ctx->hw_id_pin_count);
}
-static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
-{
- return c->user_handle == DEFAULT_CONTEXT_HANDLE;
-}
-
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
{
return !ctx->file_priv;
}
-static inline struct intel_context *
-to_intel_context(struct i915_gem_context *ctx,
- const struct intel_engine_cs *engine)
-{
- return &ctx->__engine[engine->id];
-}
-
-static inline struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
-{
- return engine->context_pin(engine, ctx);
-}
-
-static inline void __intel_context_pin(struct intel_context *ce)
-{
- GEM_BUG_ON(!ce->pin_count);
- ce->pin_count++;
-}
-
-static inline void intel_context_unpin(struct intel_context *ce)
-{
- GEM_BUG_ON(!ce->pin_count);
- if (--ce->pin_count)
- return;
-
- GEM_BUG_ON(!ce->ops);
- ce->ops->unpin(ce);
-}
-
/* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
@@ -354,12 +141,18 @@ int i915_gem_context_open(struct drm_i915_private *i915,
void i915_gem_context_close(struct drm_file *file);
int i915_switch_context(struct i915_request *rq);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask);
void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev);
+int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
@@ -386,8 +179,7 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
-void intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+struct i915_lut_handle *i915_lut_handle_alloc(void);
+void i915_lut_handle_free(struct i915_lut_handle *lut);
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h
new file mode 100644
index 000000000000..e2ec58b10fb2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context_types.h
@@ -0,0 +1,175 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_CONTEXT_TYPES_H__
+#define __I915_GEM_CONTEXT_TYPES_H__
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+#include <linux/types.h>
+
+#include "i915_scheduler.h"
+#include "intel_context_types.h"
+
+struct pid;
+
+struct drm_i915_private;
+struct drm_i915_file_private;
+struct i915_hw_ppgtt;
+struct i915_timeline;
+struct intel_ring;
+
+/**
+ * struct i915_gem_context - client state
+ *
+ * The struct i915_gem_context represents the combined view of the driver and
+ * logical hardware state for a particular client.
+ */
+struct i915_gem_context {
+ /** i915: i915 device backpointer */
+ struct drm_i915_private *i915;
+
+ /** file_priv: owning file descriptor */
+ struct drm_i915_file_private *file_priv;
+
+ struct i915_timeline *timeline;
+
+ /**
+ * @ppgtt: unique address space (GTT)
+ *
+ * In full-ppgtt mode, each context has its own address space ensuring
+ * complete seperation of one client from all others.
+ *
+ * In other modes, this is a NULL pointer with the expectation that
+ * the caller uses the shared global GTT.
+ */
+ struct i915_hw_ppgtt *ppgtt;
+
+ /**
+ * @pid: process id of creator
+ *
+ * Note that who created the context may not be the principle user,
+ * as the context may be shared across a local socket. However,
+ * that should only affect the default context, all contexts created
+ * explicitly by the client are expected to be isolated.
+ */
+ struct pid *pid;
+
+ /**
+ * @name: arbitrary name
+ *
+ * A name is constructed for the context from the creator's process
+ * name, pid and user handle in order to uniquely identify the
+ * context in messages.
+ */
+ const char *name;
+
+ /** link: place with &drm_i915_private.context_list */
+ struct list_head link;
+ struct llist_node free_link;
+
+ /**
+ * @ref: reference count
+ *
+ * A reference to a context is held by both the client who created it
+ * and on each request submitted to the hardware using the request
+ * (to ensure the hardware has access to the state until it has
+ * finished all pending writes). See i915_gem_context_get() and
+ * i915_gem_context_put() for access.
+ */
+ struct kref ref;
+
+ /**
+ * @rcu: rcu_head for deferred freeing.
+ */
+ struct rcu_head rcu;
+
+ /**
+ * @user_flags: small set of booleans controlled by the user
+ */
+ unsigned long user_flags;
+#define UCONTEXT_NO_ZEROMAP 0
+#define UCONTEXT_NO_ERROR_CAPTURE 1
+#define UCONTEXT_BANNABLE 2
+#define UCONTEXT_RECOVERABLE 3
+
+ /**
+ * @flags: small set of booleans
+ */
+ unsigned long flags;
+#define CONTEXT_BANNED 0
+#define CONTEXT_CLOSED 1
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
+
+ /**
+ * @hw_id: - unique identifier for the context
+ *
+ * The hardware needs to uniquely identify the context for a few
+ * functions like fault reporting, PASID, scheduling. The
+ * &drm_i915_private.context_hw_ida is used to assign a unqiue
+ * id for the lifetime of the context.
+ *
+ * @hw_id_pin_count: - number of times this context had been pinned
+ * for use (should be, at most, once per engine).
+ *
+ * @hw_id_link: - all contexts with an assigned id are tracked
+ * for possible repossession.
+ */
+ unsigned int hw_id;
+ atomic_t hw_id_pin_count;
+ struct list_head hw_id_link;
+
+ struct list_head active_engines;
+ struct mutex mutex;
+
+ struct i915_sched_attr sched;
+
+ /** hw_contexts: per-engine logical HW state */
+ struct rb_root hw_contexts;
+ spinlock_t hw_contexts_lock;
+
+ /** ring_size: size for allocating the per-engine ring buffer */
+ u32 ring_size;
+ /** desc_template: invariant fields for the HW context descriptor */
+ u32 desc_template;
+
+ /** guilty_count: How many times this context has caused a GPU hang. */
+ atomic_t guilty_count;
+ /**
+ * @active_count: How many times this context was active during a GPU
+ * hang, but did not cause it.
+ */
+ atomic_t active_count;
+
+ /**
+ * @hang_timestamp: The last time(s) this context caused a GPU hang
+ */
+ unsigned long hang_timestamp[2];
+#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */
+
+ /** remap_slice: Bitmask of cache lines that need remapping */
+ u8 remap_slice;
+
+ /** handles_vma: rbtree to look up our context specific obj/vma for
+ * the user handle. (user handles are per fd, but the binding is
+ * per vm, which may be one per context or shared with the global GTT)
+ */
+ struct radix_tree_root handles_vma;
+
+ /** handles_list: reverse list of all the rbtree entries in use for
+ * this context, which allows us to free all the allocations on
+ * context close.
+ */
+ struct list_head handles_list;
+};
+
+#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 02f7298bfe57..5a101a9462d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -107,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
}
@@ -300,7 +301,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- obj = i915_gem_object_alloc(to_i915(dev));
+ obj = i915_gem_object_alloc();
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 68d74c50ac39..060f5903544a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,31 +38,21 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
static bool ggtt_is_idle(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (i915->gt.active_requests)
- return false;
-
- for_each_engine(engine, i915, id) {
- if (!intel_engine_has_kernel_context(engine))
- return false;
- }
-
- return true;
+ return !i915->gt.active_requests;
}
static int ggtt_flush(struct drm_i915_private *i915)
{
int err;
- /* Not everything in the GGTT is tracked via vma (otherwise we
+ /*
+ * Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
- err = i915_gem_switch_to_kernel_context(i915);
+ err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 02adcaf6ebea..c83d2a195d15 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -794,8 +794,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
* keeping all of their resources pinned.
*/
- ce = to_intel_context(eb->ctx, eb->engine);
- if (!ce->ring) /* first use, assume empty! */
+ ce = intel_context_lookup(eb->ctx, eb->engine);
+ if (!ce || !ce->ring) /* first use, assume empty! */
return 0;
rq = __eb_wait_for_ring(ce->ring);
@@ -849,12 +849,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
vma = i915_vma_instance(obj, eb->vm, NULL);
- if (unlikely(IS_ERR(vma))) {
+ if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
- lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
+ lut = i915_lut_handle_alloc();
if (unlikely(!lut)) {
err = -ENOMEM;
goto err_obj;
@@ -862,7 +862,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
- kmem_cache_free(eb->i915->luts, lut);
+ i915_lut_handle_free(lut);
goto err_obj;
}
@@ -1001,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
{
GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+
+ __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
i915_gem_object_unpin_map(cache->rq->batch->obj);
+
i915_gem_chipset_flush(cache->rq->i915);
i915_request_add(cache->rq);
@@ -1214,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (IS_ERR(cmd))
return PTR_ERR(cmd);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- if (err)
- goto err_unmap;
-
batch = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -1667,6 +1666,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
len)) {
end_user:
user_access_end();
+end:
kvfree(relocs);
err = -EFAULT;
goto err;
@@ -1686,7 +1686,7 @@ end_user:
* relocations were valid.
*/
if (!user_access_begin(urelocs, size))
- goto end_user;
+ goto end;
for (copied = 0; copied < nreloc; copied++)
unsafe_put_user(-1,
@@ -1957,7 +1957,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
- if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
+ if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -2082,11 +2082,11 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
#define I915_USER_RINGS (4)
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
- [I915_EXEC_DEFAULT] = RCS,
- [I915_EXEC_RENDER] = RCS,
- [I915_EXEC_BLT] = BCS,
- [I915_EXEC_BSD] = VCS,
- [I915_EXEC_VEBOX] = VECS
+ [I915_EXEC_DEFAULT] = RCS0,
+ [I915_EXEC_RENDER] = RCS0,
+ [I915_EXEC_BLT] = BCS0,
+ [I915_EXEC_BSD] = VCS0,
+ [I915_EXEC_VEBOX] = VECS0
};
static struct intel_engine_cs *
@@ -2109,7 +2109,7 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return NULL;
}
- if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+ if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@ -2312,10 +2312,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (args->flags & I915_EXEC_IS_PINNED)
eb.batch_flags |= I915_DISPATCH_PINNED;
- eb.engine = eb_select_engine(eb.i915, file, args);
- if (!eb.engine)
- return -EINVAL;
-
if (args->flags & I915_EXEC_FENCE_IN) {
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
if (!in_fence)
@@ -2340,6 +2336,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
+ eb.engine = eb_select_engine(eb.i915, file, args);
+ if (!eb.engine) {
+ err = -EINVAL;
+ goto err_engine;
+ }
+
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
@@ -2505,6 +2507,7 @@ err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(eb.i915, wakeref);
+err_engine:
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);
@@ -2695,7 +2698,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
* when we did the "copy_from_user()" above.
*/
if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
- goto end_user;
+ goto end;
for (i = 0; i < args->buffer_count; i++) {
if (!(exec2_list[i].offset & UPDATE))
@@ -2709,6 +2712,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
}
end_user:
user_access_end();
+end:;
}
args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index e037e94792f3..3084f52e3372 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -210,6 +210,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
+ struct i915_vma *old;
int ret;
if (vma) {
@@ -229,49 +230,55 @@ static int fence_update(struct drm_i915_fence_reg *fence,
return ret;
}
- if (fence->vma) {
- struct i915_vma *old = fence->vma;
-
+ old = xchg(&fence->vma, NULL);
+ if (old) {
ret = i915_active_request_retire(&old->last_fence,
&old->obj->base.dev->struct_mutex);
- if (ret)
+ if (ret) {
+ fence->vma = old;
return ret;
+ }
i915_vma_flush_writes(old);
- }
- if (fence->vma && fence->vma != vma) {
- /* Ensure that all userspace CPU access is completed before
+ /*
+ * Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
- GEM_BUG_ON(fence->vma->fence != fence);
- i915_vma_revoke_mmap(fence->vma);
-
- fence->vma->fence = NULL;
- fence->vma = NULL;
+ if (old != vma) {
+ GEM_BUG_ON(old->fence != fence);
+ i915_vma_revoke_mmap(old);
+ old->fence = NULL;
+ }
list_move(&fence->link, &fence->i915->mm.fence_list);
}
- /* We only need to update the register itself if the device is awake.
+ /*
+ * We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
+ *
+ * This only works for removing the fence register, on acquisition
+ * the caller must hold the rpm wakeref. The fence register must
+ * be cleared before we can use any other fences to ensure that
+ * the new fences do not overlap the elided clears, confusing HW.
*/
wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
- if (wakeref) {
- fence_write(fence, vma);
- intel_runtime_pm_put(fence->i915, wakeref);
+ if (!wakeref) {
+ GEM_BUG_ON(vma);
+ return 0;
}
- if (vma) {
- if (fence->vma != vma) {
- vma->fence = fence;
- fence->vma = vma;
- }
+ WRITE_ONCE(fence->vma, vma);
+ fence_write(fence, vma);
+ if (vma) {
+ vma->fence = fence;
list_move_tail(&fence->link, &fence->i915->mm.fence_list);
}
+ intel_runtime_pm_put(fence->i915, wakeref);
return 0;
}
@@ -436,32 +443,6 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
}
/**
- * i915_gem_revoke_fences - revoke fence state
- * @dev_priv: i915 device private
- *
- * Removes all GTT mmappings via the fence registers. This forces any user
- * of the fence to reacquire that fence before continuing with their access.
- * One use is during GPU reset where the fence register is lost and we need to
- * revoke concurrent userspace access via GTT mmaps until the hardware has been
- * reset and the fence registers have been restored.
- */
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
-{
- int i;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
-
- GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
-
- if (fence->vma)
- i915_vma_revoke_mmap(fence->vma);
- }
-}
-
-/**
* i915_gem_restore_fences - restore fence state
* @dev_priv: i915 device private
*
@@ -473,9 +454,10 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
int i;
+ rcu_read_lock(); /* keep obj alive as we dereference */
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- struct i915_vma *vma = reg->vma;
+ struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@@ -483,18 +465,12 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
- if (vma && !i915_gem_object_is_tiled(vma->obj)) {
- GEM_BUG_ON(!reg->dirty);
- GEM_BUG_ON(i915_vma_has_userfault(vma));
-
- list_move(&reg->link, &dev_priv->mm.fence_list);
- vma->fence = NULL;
+ if (vma && !i915_gem_object_is_tiled(vma->obj))
vma = NULL;
- }
fence_write(reg, vma);
- reg->vma = vma;
}
+ rcu_read_unlock();
}
/**
@@ -609,8 +585,38 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev_priv) ||
- IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
+ } else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) {
+ /* The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on the G965:
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ *
+ * Reports indicate that the swizzling actually
+ * varies depending upon page placement inside the
+ * channels, i.e. we see swizzled pages where the
+ * banks of memory are paired and unswizzled on the
+ * uneven portion, so leave that as unknown.
+ */
+ if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ } else {
u32 dcc;
/* On 9xx chipsets, channel interleave by the CPU is
@@ -660,37 +666,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
- } else {
- /* The 965, G33, and newer, have a very flexible memory
- * configuration. It will enable dual-channel mode
- * (interleaving) on as much memory as it can, and the GPU
- * will additionally sometimes enable different bit 6
- * swizzling for tiled objects from the CPU.
- *
- * Here's what I found on the G965:
- * slot fill memory size swizzling
- * 0A 0B 1A 1B 1-ch 2-ch
- * 512 0 0 0 512 0 O
- * 512 0 512 0 16 1008 X
- * 512 0 0 512 16 1008 X
- * 0 512 0 512 16 1008 X
- * 1024 1024 1024 0 2048 1024 O
- *
- * We could probably detect this based on either the DRB
- * matching, which was the case for the swizzling required in
- * the table above, or from the 1-ch value being less than
- * the minimum size of a rank.
- *
- * Reports indicate that the swizzling actually
- * varies depending upon page placement inside the
- * channels, i.e. we see swizzled pages where the
- * banks of memory are paired and unswizzled on the
- * uneven portion, so leave that as unknown.
- */
- if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- }
}
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
@@ -790,8 +765,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
int i;
if (obj->bit_17 == NULL) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
- sizeof(long), GFP_KERNEL);
+ obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d646d37eec2f..8f460cc4cc1f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -584,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
- if (i915_vm_is_48bit(vm) &&
+ if (i915_vm_is_4lvl(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
size = I915_GTT_PAGE_SIZE_64K;
gfp |= __GFP_NOWARN;
@@ -613,7 +613,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
vm->scratch_page.page = page;
vm->scratch_page.daddr = addr;
- vm->scratch_page.order = order;
+ vm->scratch_order = order;
return 0;
unmap_page:
@@ -632,10 +632,11 @@ skip:
static void cleanup_scratch_page(struct i915_address_space *vm)
{
struct i915_page_dma *p = &vm->scratch_page;
+ int order = vm->scratch_order;
- dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
+ dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
PCI_DMA_BIDIRECTIONAL);
- __free_pages(p->page, p->order);
+ __free_pages(p->page, order);
}
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@@ -726,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
pdp->page_directory = NULL;
}
-static inline bool use_4lvl(const struct i915_address_space *vm)
-{
- return i915_vm_is_48bit(vm);
-}
-
static struct i915_page_directory_pointer *
alloc_pdp(struct i915_address_space *vm)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- GEM_BUG_ON(!use_4lvl(vm));
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
@@ -766,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm,
{
__pdp_fini(pdp);
- if (!use_4lvl(vm))
+ if (!i915_vm_is_4lvl(vm))
return;
cleanup_px(vm, pdp);
@@ -791,14 +787,15 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
-/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+/*
+ * PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
* to ensure that tlbs are flushed.
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
+ ppgtt->pd_dirty_engines = ALL_ENGINES;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -809,8 +806,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
u64 start, u64 length)
{
unsigned int num_entries = gen8_pte_count(start, length);
- unsigned int pte = gen8_pte_index(start);
- unsigned int pte_end = pte + num_entries;
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -820,8 +815,7 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
return true;
vaddr = kmap_atomic_px(pt);
- while (pte < pte_end)
- vaddr[pte++] = vm->scratch_pte;
+ memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
kunmap_atomic(vaddr);
return false;
@@ -872,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
gen8_ppgtt_pdpe_t *vaddr;
pdp->page_directory[pdpe] = pd;
- if (!use_4lvl(vm))
+ if (!i915_vm_is_4lvl(vm))
return;
vaddr = kmap_atomic_px(pdp);
@@ -937,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp;
unsigned int pml4e;
- GEM_BUG_ON(!use_4lvl(vm));
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
GEM_BUG_ON(pdp == vm->scratch_pdp);
@@ -1219,7 +1213,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
GEM_BUG_ON(!clone->has_read_only);
- vm->scratch_page.order = clone->scratch_page.order;
+ vm->scratch_order = clone->scratch_order;
vm->scratch_pte = clone->scratch_pte;
vm->scratch_pt = clone->scratch_pt;
vm->scratch_pd = clone->scratch_pd;
@@ -1234,7 +1228,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC,
- PTE_READ_ONLY);
+ vm->has_read_only);
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
@@ -1248,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
goto free_pt;
}
- if (use_4lvl(vm)) {
+ if (i915_vm_is_4lvl(vm)) {
vm->scratch_pdp = alloc_pdp(vm);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
@@ -1258,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
- if (use_4lvl(vm))
+ if (i915_vm_is_4lvl(vm))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
@@ -1280,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
enum vgt_g2v_type msg;
int i;
- if (use_4lvl(vm)) {
+ if (i915_vm_is_4lvl(vm)) {
const u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@@ -1310,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
if (!vm->scratch_page.daddr)
return;
- if (use_4lvl(vm))
+ if (i915_vm_is_4lvl(vm))
free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
free_pt(vm, vm->scratch_pt);
@@ -1356,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (use_4lvl(vm))
+ if (i915_vm_is_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
@@ -1519,6 +1513,23 @@ unwind:
return -ENOMEM;
}
+static void ppgtt_init(struct drm_i915_private *i915,
+ struct i915_hw_ppgtt *ppgtt)
+{
+ kref_init(&ppgtt->ref);
+
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
+}
+
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1535,19 +1546,15 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- kref_init(&ppgtt->ref);
+ ppgtt_init(i915, ppgtt);
- ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
-
- ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
- 1ULL << 48 :
- 1ULL << 32;
-
- /* From bdw, there is support for read-only pages in the PPGTT. */
- ppgtt->vm.has_read_only = true;
-
- i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+ /*
+ * From bdw, there is hw support for read-only pages in the PPGTT.
+ *
+ * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
+ * for now.
+ */
+ ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
@@ -1559,7 +1566,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
- if (use_4lvl(&ppgtt->vm)) {
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
err = setup_px(&ppgtt->vm, &ppgtt->pml4);
if (err)
goto err_scratch;
@@ -1592,11 +1599,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
- ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
-
return ppgtt;
err_scratch:
@@ -1672,8 +1674,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
while (num_entries) {
struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
- const unsigned int end = min(pte + num_entries, GEN6_PTES);
- const unsigned int count = end - pte;
+ const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
GEM_BUG_ON(pt == vm->scratch_pt);
@@ -1693,9 +1694,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
*/
vaddr = kmap_atomic_px(pt);
- do {
- vaddr[pte++] = scratch_pte;
- } while (pte < end);
+ memset32(vaddr + pte, scratch_pte, count);
kunmap_atomic(vaddr);
pte = 0;
@@ -1913,7 +1912,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(size > ggtt->vm.total);
- vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+ vma = i915_vma_alloc();
if (!vma)
return ERR_PTR(-ENOMEM);
@@ -1943,6 +1942,8 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
+ GEM_BUG_ON(ppgtt->base.vm.closed);
+
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
* which will be pinned into every active context.
@@ -1981,6 +1982,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
i915_vma_unpin(ppgtt->vma);
}
+void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
+{
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ if (!ppgtt->pin_count)
+ return;
+
+ ppgtt->pin_count = 0;
+ i915_vma_unpin(ppgtt->vma);
+}
+
static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_ggtt * const ggtt = &i915->ggtt;
@@ -1991,25 +2003,13 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- kref_init(&ppgtt->base.ref);
-
- ppgtt->base.vm.i915 = i915;
- ppgtt->base.vm.dma = &i915->drm.pdev->dev;
-
- ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
-
- i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
+ ppgtt_init(i915, &ppgtt->base);
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
- ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
-
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
err = gen6_ppgtt_init_scratch(ppgtt);
@@ -2087,8 +2087,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915)
}
struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *i915,
- struct drm_i915_file_private *fpriv)
+i915_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_hw_ppgtt *ppgtt;
@@ -2096,19 +2095,11 @@ i915_ppgtt_create(struct drm_i915_private *i915,
if (IS_ERR(ppgtt))
return ppgtt;
- ppgtt->vm.file = fpriv;
-
trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
-void i915_ppgtt_close(struct i915_address_space *vm)
-{
- GEM_BUG_ON(vm->closed);
- vm->closed = true;
-}
-
static void ppgtt_destroy_vma(struct i915_address_space *vm)
{
struct list_head *phases[] = {
@@ -2675,7 +2666,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
struct i915_hw_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
+ ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -3701,7 +3692,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
}
ret = 0;
- if (unlikely(IS_ERR(vma->pages))) {
+ if (IS_ERR(vma->pages)) {
ret = PTR_ERR(vma->pages);
vma->pages = NULL;
DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 03ade71b8d9a..f597f35b109b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -213,7 +213,6 @@ struct i915_vma;
struct i915_page_dma {
struct page *page;
- int order;
union {
dma_addr_t daddr;
@@ -293,6 +292,7 @@ struct i915_address_space {
#define VM_CLASS_PPGTT 1
u64 scratch_pte;
+ int scratch_order;
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -348,7 +348,7 @@ struct i915_address_space {
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
static inline bool
-i915_vm_is_48bit(const struct i915_address_space *vm)
+i915_vm_is_4lvl(const struct i915_address_space *vm)
{
return (vm->total - 1) >> 32;
}
@@ -356,7 +356,7 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
static inline bool
i915_vm_has_scratch_64K(struct i915_address_space *vm)
{
- return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
+ return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
}
/* The Graphics Translation Table is the way in which GEN hardware translates a
@@ -390,12 +390,14 @@ struct i915_hw_ppgtt {
struct i915_address_space vm;
struct kref ref;
- unsigned long pd_dirty_rings;
+ intel_engine_mask_t pd_dirty_engines;
union {
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
+
+ u32 user_handle;
};
struct gen6_hw_ppgtt {
@@ -488,7 +490,7 @@ static inline u32 gen6_pde_index(u32 addr)
static inline unsigned int
i915_pdpes_per_pdp(const struct i915_address_space *vm)
{
- if (i915_vm_is_48bit(vm))
+ if (i915_vm_is_4lvl(vm))
return GEN8_PML4ES_PER_PML4;
return GEN8_3LVL_PDPES;
@@ -603,15 +605,16 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
+
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv);
-void i915_ppgtt_close(struct i915_address_space *vm);
-static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
+
+static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
- if (ppgtt)
- kref_get(&ppgtt->ref);
+ kref_get(&ppgtt->ref);
+ return ppgtt;
}
+
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -620,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base);
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index fddde1033e74..ab627ed1269c 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -193,7 +193,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
index aab8cdd80e6d..ac6a5ab84586 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -24,6 +24,22 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
+#include "i915_globals.h"
+
+static struct i915_global_object {
+ struct i915_global base;
+ struct kmem_cache *slab_objects;
+} global;
+
+struct drm_i915_gem_object *i915_gem_object_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ return kmem_cache_free(global.slab_objects, obj);
+}
/**
* Mark up the object's coherency levels for a given cache_level
@@ -46,3 +62,29 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
obj->cache_dirty =
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
+
+static void i915_global_objects_shrink(void)
+{
+ kmem_cache_shrink(global.slab_objects);
+}
+
+static void i915_global_objects_exit(void)
+{
+ kmem_cache_destroy(global.slab_objects);
+}
+
+static struct i915_global_object global = { {
+ .shrink = i915_global_objects_shrink,
+ .exit = i915_global_objects_exit,
+} };
+
+int __init i915_global_objects_init(void)
+{
+ global.slab_objects =
+ KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_objects)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index fab040331cdb..ca93a40c0c87 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -304,6 +304,9 @@ to_intel_bo(struct drm_gem_object *gem)
return container_of(gem, struct drm_i915_gem_object, base);
}
+struct drm_i915_gem_object *i915_gem_object_alloc(void);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
* @filp: DRM file private date
@@ -499,5 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
-#endif
+void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ bool needs_clflush);
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 90baf9086d0a..9440024c763f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -42,7 +42,7 @@ struct intel_render_state {
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
- if (engine->id != RCS)
+ if (engine->id != RCS0)
return NULL;
switch (INTEL_GEN(engine->i915)) {
@@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so,
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
- ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+ ret = 0;
out:
i915_gem_obj_finish_shmem_access(so->obj);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 74a9661479ca..0a8082cfc761 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -565,7 +565,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *obj;
unsigned int cache_level;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 16cc9ddbce34..a9b5329dae3b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -301,11 +301,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (!obj->bit_17) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
- sizeof(long), GFP_KERNEL);
+ obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
+ GFP_KERNEL);
}
} else {
- kfree(obj->bit_17);
+ bitmap_free(obj->bit_17);
obj->bit_17 = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1d3f9a31ad61..8079ea3af103 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -122,7 +122,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
while (it) {
struct drm_i915_gem_object *obj;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
break;
}
@@ -673,9 +673,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
if (!pages)
return;
- if (obj->mm.madv != I915_MADV_WILLNEED)
- obj->mm.dirty = false;
-
+ __i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
for_each_sgt_page(page, sgt_iter, pages) {
@@ -795,7 +793,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENODEV;
}
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
new file mode 100644
index 000000000000..81e5c2ce336b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -0,0 +1,125 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "i915_active.h"
+#include "i915_gem_context.h"
+#include "i915_gem_object.h"
+#include "i915_globals.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+#include "i915_vma.h"
+
+static LIST_HEAD(globals);
+
+static atomic_t active;
+static atomic_t epoch;
+static struct park_work {
+ struct rcu_work work;
+ int epoch;
+} park;
+
+static void i915_globals_shrink(void)
+{
+ struct i915_global *global;
+
+ /*
+ * kmem_cache_shrink() discards empty slabs and reorders partially
+ * filled slabs to prioritise allocating from the mostly full slabs,
+ * with the aim of reducing fragmentation.
+ */
+ list_for_each_entry(global, &globals, link)
+ global->shrink();
+}
+
+static void __i915_globals_park(struct work_struct *work)
+{
+ /* Confirm nothing woke up in the last grace period */
+ if (park.epoch == atomic_read(&epoch))
+ i915_globals_shrink();
+}
+
+void __init i915_global_register(struct i915_global *global)
+{
+ GEM_BUG_ON(!global->shrink);
+ GEM_BUG_ON(!global->exit);
+
+ list_add_tail(&global->link, &globals);
+}
+
+static void __i915_globals_cleanup(void)
+{
+ struct i915_global *global, *next;
+
+ list_for_each_entry_safe_reverse(global, next, &globals, link)
+ global->exit();
+}
+
+static __initconst int (* const initfn[])(void) = {
+ i915_global_active_init,
+ i915_global_context_init,
+ i915_global_gem_context_init,
+ i915_global_objects_init,
+ i915_global_request_init,
+ i915_global_scheduler_init,
+ i915_global_vma_init,
+};
+
+int __init i915_globals_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(initfn); i++) {
+ int err;
+
+ err = initfn[i]();
+ if (err) {
+ __i915_globals_cleanup();
+ return err;
+ }
+ }
+
+ INIT_RCU_WORK(&park.work, __i915_globals_park);
+ return 0;
+}
+
+void i915_globals_park(void)
+{
+ /*
+ * Defer shrinking the global slab caches (and other work) until
+ * after a RCU grace period has completed with no activity. This
+ * is to try and reduce the latency impact on the consumers caused
+ * by us shrinking the caches the same time as they are trying to
+ * allocate, with the assumption being that if we idle long enough
+ * for an RCU grace period to elapse since the last use, it is likely
+ * to be longer until we need the caches again.
+ */
+ if (!atomic_dec_and_test(&active))
+ return;
+
+ park.epoch = atomic_inc_return(&epoch);
+ queue_rcu_work(system_wq, &park.work);
+}
+
+void i915_globals_unpark(void)
+{
+ atomic_inc(&epoch);
+ atomic_inc(&active);
+}
+
+void __exit i915_globals_exit(void)
+{
+ /* Flush any residual park_work */
+ atomic_inc(&epoch);
+ flush_rcu_work(&park.work);
+
+ __i915_globals_cleanup();
+
+ /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+ rcu_barrier();
+}
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
new file mode 100644
index 000000000000..04c1ce107fc0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_GLOBALS_H_
+#define _I915_GLOBALS_H_
+
+typedef void (*i915_global_func_t)(void);
+
+struct i915_global {
+ struct list_head link;
+
+ i915_global_func_t shrink;
+ i915_global_func_t exit;
+};
+
+void i915_global_register(struct i915_global *global);
+
+int i915_globals_init(void);
+void i915_globals_park(void);
+void i915_globals_unpark(void);
+void i915_globals_exit(void);
+
+/* constructors */
+int i915_global_active_init(void);
+int i915_global_context_init(void);
+int i915_global_gem_context_init(void);
+int i915_global_objects_init(void);
+int i915_global_request_init(void);
+int i915_global_scheduler_init(void);
+int i915_global_vma_init(void);
+
+#endif /* _I915_GLOBALS_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aa6791255252..f51ff683dd2e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -380,19 +380,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x %02x",
+ err_printf(m, " %08x_%08x %8u %02x %02x",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
- err->write_domain,
- err->wseqno);
+ err->write_domain);
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
err_puts(m, err->userptr ? " userptr" : "");
- err_puts(m, err->engine != -1 ? " " : "");
- err_puts(m, engine_name(m->i915, err->engine));
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
if (err->name)
@@ -414,7 +411,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
- if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
+ if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
@@ -434,11 +431,6 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
ee->instdone.row[slice][subslice]);
}
-static const char *bannable(const struct drm_i915_error_context *ctx)
-{
- return ctx->bannable ? "" : " (unbannable)";
-}
-
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
const struct drm_i915_error_request *erq,
@@ -447,9 +439,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
- prefix, erq->pid, erq->ban_score,
- erq->context, erq->seqno,
+ err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+ prefix, erq->pid, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@@ -463,10 +454,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
- err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
- header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
- ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
- ctx->guilty, ctx->active);
+ err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
+ header, ctx->comm, ctx->pid, ctx->hw_id,
+ ctx->sched_attr.priority, ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -512,13 +502,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
- err_printf(m, " SYNC_0: 0x%08x\n",
- ee->semaphore_mboxes[0]);
- err_printf(m, " SYNC_1: 0x%08x\n",
- ee->semaphore_mboxes[1]);
- if (HAS_VEBOX(m->i915))
- err_printf(m, " SYNC_2: 0x%08x\n",
- ee->semaphore_mboxes[2]);
}
if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -533,8 +516,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
ee->vm_info.pp_dir_base);
}
}
- err_printf(m, " seqno: 0x%08x\n", ee->seqno);
- err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
@@ -688,16 +669,17 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (!error->engine[i].context.pid)
continue;
- err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
+ err_printf(m, "Active process (on ring %s): %s [%d]\n",
engine_name(m->i915, i),
error->engine[i].context.comm,
- error->engine[i].context.pid,
- error->engine[i].context.ban_score,
- bannable(&error->engine[i].context));
+ error->engine[i].context.pid);
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
+ err_printf(m, "Subplatform: 0x%x\n",
+ intel_subplatform(&error->runtime_info,
+ error->device_info.platform));
err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
@@ -779,13 +761,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (obj) {
err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
- err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
+ err_printf(m, " (submitted by %s [%d])",
ee->context.comm,
- ee->context.pid,
- ee->context.handle,
- ee->context.hw_id,
- ee->context.ban_score,
- bannable(&ee->context));
+ ee->context.pid);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@@ -1061,27 +1039,6 @@ i915_error_object_create(struct drm_i915_private *i915,
return dst;
}
-/* The error capture is special as tries to run underneath the normal
- * locking rules - so we use the raw version of the i915_active_request lookup.
- */
-static inline u32
-__active_get_seqno(struct i915_active_request *active)
-{
- struct i915_request *request;
-
- request = __i915_active_request_peek(active);
- return request ? request->global_seqno : 0;
-}
-
-static inline int
-__active_get_engine_id(struct i915_active_request *active)
-{
- struct i915_request *request;
-
- request = __i915_active_request_peek(active);
- return request ? request->engine->id : -1;
-}
-
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
@@ -1090,9 +1047,6 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
- err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
- err->engine = __active_get_engine_id(&obj->frontbuffer_write);
-
err->gtt_offset = vma->node.start;
err->read_domains = obj->read_domains;
err->write_domain = obj->write_domain;
@@ -1142,7 +1096,7 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
* It's only a small step better than a random number in its current form.
*/
static u32 i915_error_generate_code(struct i915_gpu_state *error,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
/*
* IPEHR would be an ideal way to detect errors, as it's the gross
@@ -1178,18 +1132,6 @@ static void gem_record_fences(struct i915_gpu_state *error)
error->nfence = i;
}
-static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
- ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
- if (HAS_VEBOX(dev_priv))
- ee->semaphore_mboxes[2] =
- I915_READ(RING_SYNC_2(engine->mmio_base));
-}
-
static void error_record_engine_registers(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
@@ -1197,44 +1139,40 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
struct drm_i915_private *dev_priv = engine->i915;
if (INTEL_GEN(dev_priv) >= 6) {
- ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
- if (INTEL_GEN(dev_priv) >= 8) {
+ ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
+ if (INTEL_GEN(dev_priv) >= 8)
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
- } else {
- gen6_record_semaphore_state(engine, ee);
+ else
ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
- }
}
if (INTEL_GEN(dev_priv) >= 4) {
- ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
- ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
- ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
- ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+ ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
+ ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
+ ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
+ ee->instps = ENGINE_READ(engine, RING_INSTPS);
+ ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
if (INTEL_GEN(dev_priv) >= 8) {
- ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
- ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
+ ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
+ ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
}
- ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
+ ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
} else {
- ee->faddr = I915_READ(DMA_FADD_I8XX);
- ee->ipeir = I915_READ(IPEIR);
- ee->ipehr = I915_READ(IPEHR);
+ ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
+ ee->ipeir = ENGINE_READ(engine, IPEIR);
+ ee->ipehr = ENGINE_READ(engine, IPEHR);
}
intel_engine_get_instdone(engine, &ee->instdone);
- ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+ ee->instpm = ENGINE_READ(engine, RING_INSTPM);
ee->acthd = intel_engine_get_active_head(engine);
- ee->seqno = intel_engine_get_seqno(engine);
- ee->last_seqno = intel_engine_last_submit(engine);
- ee->start = I915_READ_START(engine);
- ee->head = I915_READ_HEAD(engine);
- ee->tail = I915_READ_TAIL(engine);
- ee->ctl = I915_READ_CTL(engine);
+ ee->start = ENGINE_READ(engine, RING_START);
+ ee->head = ENGINE_READ(engine, RING_HEAD);
+ ee->tail = ENGINE_READ(engine, RING_TAIL);
+ ee->ctl = ENGINE_READ(engine, RING_CTL);
if (INTEL_GEN(dev_priv) > 2)
- ee->mode = I915_READ_MODE(engine);
+ ee->mode = ENGINE_READ(engine, RING_MI_MODE);
if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
i915_reg_t mmio;
@@ -1242,16 +1180,17 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (IS_GEN(dev_priv, 7)) {
switch (engine->id) {
default:
- case RCS:
+ MISSING_CASE(engine->id);
+ case RCS0:
mmio = RENDER_HWS_PGA_GEN7;
break;
- case BCS:
+ case BCS0:
mmio = BLT_HWS_PGA_GEN7;
break;
- case VCS:
+ case VCS0:
mmio = BSD_HWS_PGA_GEN7;
break;
- case VECS:
+ case VECS0:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
@@ -1276,20 +1215,23 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
- if (IS_GEN(dev_priv, 6))
+ if (IS_GEN(dev_priv, 6)) {
ee->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE_READ(engine));
- else if (IS_GEN(dev_priv, 7))
+ ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
+ } else if (IS_GEN(dev_priv, 7)) {
ee->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE(engine));
- else if (INTEL_GEN(dev_priv) >= 8)
+ ENGINE_READ(engine, RING_PP_DIR_BASE);
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ u32 base = engine->mmio_base;
+
for (i = 0; i < 4; i++) {
ee->vm_info.pdp[i] =
- I915_READ(GEN8_RING_PDP_UDW(engine, i));
+ I915_READ(GEN8_RING_PDP_UDW(base, i));
ee->vm_info.pdp[i] <<= 32;
ee->vm_info.pdp[i] |=
- I915_READ(GEN8_RING_PDP_LDW(engine, i));
+ I915_READ(GEN8_RING_PDP_LDW(base, i));
}
+ }
}
}
@@ -1299,10 +1241,9 @@ static void record_request(struct i915_request *request,
struct i915_gem_context *ctx = request->gem_context;
erq->flags = request->fence.flags;
- erq->context = ctx->hw_id;
+ erq->context = request->fence.context;
+ erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
- erq->ban_score = atomic_read(&ctx->ban_score);
- erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
@@ -1393,11 +1334,8 @@ static void record_context(struct drm_i915_error_context *e,
rcu_read_unlock();
}
- e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
e->sched_attr = ctx->sched;
- e->ban_score = atomic_read(&ctx->ban_score);
- e->bannable = i915_gem_context_is_bannable(ctx);
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
}
@@ -1476,7 +1414,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
error_record_engine_registers(error, engine, ee);
error_record_engine_execlists(engine, ee);
- request = i915_gem_find_active_request(engine);
+ request = intel_engine_find_active_request(engine);
if (request) {
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
@@ -1669,7 +1607,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
}
if (INTEL_GEN(dev_priv) >= 5)
- error->ccid = I915_READ(CCID);
+ error->ccid = I915_READ(CCID(RENDER_RING_BASE));
/* 3: Feature specific registers */
if (IS_GEN_RANGE(dev_priv, 6, 7)) {
@@ -1697,16 +1635,17 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->gtier[0] = I915_READ(GTIER);
error->ngtier = 1;
} else if (IS_GEN(dev_priv, 2)) {
- error->ier = I915_READ16(IER);
+ error->ier = I915_READ16(GEN2_IER);
} else if (!IS_VALLEYVIEW(dev_priv)) {
- error->ier = I915_READ(IER);
+ error->ier = I915_READ(GEN2_IER);
}
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
}
static const char *
-error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
+error_msg(struct i915_gpu_state *error,
+ intel_engine_mask_t engines, const char *msg)
{
int len;
int i;
@@ -1716,7 +1655,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
engines &= ~BIT(i);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:%lx:0x%08x",
+ "GPU HANG: ecode %d:%x:0x%08x",
INTEL_GEN(error->i915), engines,
i915_error_generate_code(error, engines));
if (engines) {
@@ -1855,7 +1794,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
* to pick up.
*/
void i915_capture_error_state(struct drm_i915_private *i915,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
const char *msg)
{
static bool warned;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 53b1f22dd365..5dc761e85d9d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -94,8 +94,6 @@ struct i915_gpu_state {
u32 cpu_ring_head;
u32 cpu_ring_tail;
- u32 last_seqno;
-
/* Register state */
u32 start;
u32 tail;
@@ -108,24 +106,19 @@ struct i915_gpu_state {
u32 bbstate;
u32 instpm;
u32 instps;
- u32 seqno;
u64 bbaddr;
u64 acthd;
u32 fault_reg;
u64 faddr;
u32 rc_psmi; /* sleep state */
- u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct intel_instdone instdone;
struct drm_i915_error_context {
char comm[TASK_COMM_LEN];
pid_t pid;
- u32 handle;
u32 hw_id;
- int ban_score;
int active;
int guilty;
- bool bannable;
struct i915_sched_attr sched_attr;
} context;
@@ -149,7 +142,6 @@ struct i915_gpu_state {
long jiffies;
pid_t pid;
u32 context;
- int ban_score;
u32 seqno;
u32 start;
u32 head;
@@ -170,7 +162,6 @@ struct i915_gpu_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
@@ -179,7 +170,6 @@ struct i915_gpu_state {
u32 dirty:1;
u32 purgeable:1;
u32 userptr:1;
- s32 engine:4;
u32 cache_level:3;
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
@@ -205,38 +195,12 @@ struct i915_gpu_error {
atomic_t pending_fb_pin;
/**
- * State variable controlling the reset flow and count
- *
- * This is a counter which gets incremented when reset is triggered,
- *
- * Before the reset commences, the I915_RESET_BACKOFF bit is set
- * meaning that any waiters holding onto the struct_mutex should
- * relinquish the lock immediately in order for the reset to start.
- *
- * If reset is not completed successfully, the I915_WEDGE bit is
- * set meaning that hardware is terminally sour and there is no
- * recovery. All waiters on the reset_queue will be woken when
- * that happens.
- *
- * This counter is used by the wait_seqno code to notice that reset
- * event happened and it needs to restart the entire ioctl (since most
- * likely the seqno it waited for won't ever signal anytime soon).
- *
- * This is important for lock-free wait paths, where no contended lock
- * naturally enforces the correct ordering between the bail-out of the
- * waiter and the gpu reset work code.
- */
- unsigned long reset_count;
-
- /**
* flags: Control various stages of the GPU reset
*
- * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
- * other users acquiring the struct_mutex. To do this we set the
- * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
- * and then check for that bit before acquiring the struct_mutex (in
- * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
- * secondary role in preventing two concurrent global reset attempts.
+ * #I915_RESET_BACKOFF - When we start a global reset, we need to
+ * serialise with any other users attempting to do the same, and
+ * any global resources that may be clobber by the reset (such as
+ * FENCE registers).
*
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
* acquire the struct_mutex to reset an engine, we need an explicit
@@ -255,6 +219,9 @@ struct i915_gpu_error {
#define I915_RESET_ENGINE 2
#define I915_WEDGED (BITS_PER_LONG - 1)
+ /** Number of times the device has been reset (global) */
+ u32 reset_count;
+
/** Number of times an engine has been reset */
u32 reset_engine_count[I915_NUM_ENGINES];
@@ -272,6 +239,8 @@ struct i915_gpu_error {
*/
wait_queue_head_t reset_queue;
+ struct srcu_struct reset_backoff_srcu;
+
struct i915_gpu_restart *restart;
};
@@ -294,7 +263,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
const char *error_msg);
static inline struct i915_gpu_state *
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 441d2674b272..b92cfd69134b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -28,15 +28,19 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/sysrq.h>
-#include <linux/slab.h>
#include <linux/circ_buf.h>
-#include <drm/drm_irq.h>
+#include <linux/cpuidle.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+
#include <drm/drm_drv.h>
+#include <drm/drm_irq.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_psr.h"
/**
* DOC: interrupt handling
@@ -132,92 +136,120 @@ static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
};
-/* IIR can theoretically queue up two events. Be paranoid. */
-#define GEN8_IRQ_RESET_NDX(type, which) do { \
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IMR(which)); \
- I915_WRITE(GEN8_##type##_IER(which), 0); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
-} while (0)
-
-#define GEN3_IRQ_RESET(type) do { \
- I915_WRITE(type##IMR, 0xffffffff); \
- POSTING_READ(type##IMR); \
- I915_WRITE(type##IER, 0); \
- I915_WRITE(type##IIR, 0xffffffff); \
- POSTING_READ(type##IIR); \
- I915_WRITE(type##IIR, 0xffffffff); \
- POSTING_READ(type##IIR); \
-} while (0)
-
-#define GEN2_IRQ_RESET(type) do { \
- I915_WRITE16(type##IMR, 0xffff); \
- POSTING_READ16(type##IMR); \
- I915_WRITE16(type##IER, 0); \
- I915_WRITE16(type##IIR, 0xffff); \
- POSTING_READ16(type##IIR); \
- I915_WRITE16(type##IIR, 0xffff); \
- POSTING_READ16(type##IIR); \
-} while (0)
+static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier)
+{
+ intel_uncore_write(uncore, imr, 0xffffffff);
+ intel_uncore_posting_read(uncore, imr);
+
+ intel_uncore_write(uncore, ier, 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+}
+
+static void gen2_irq_reset(struct intel_uncore *uncore)
+{
+ intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
+
+ intel_uncore_write16(uncore, GEN2_IER, 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+}
+
+#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
+ GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
+})
+
+#define GEN3_IRQ_RESET(uncore, type) \
+ gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
+
+#define GEN2_IRQ_RESET(uncore) \
+ gen2_irq_reset(uncore)
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
+static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
{
- u32 val = I915_READ(reg);
+ u32 val = intel_uncore_read(uncore, reg);
if (val == 0)
return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
i915_mmio_reg_offset(reg), val);
- I915_WRITE(reg, 0xffffffff);
- POSTING_READ(reg);
- I915_WRITE(reg, 0xffffffff);
- POSTING_READ(reg);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
}
-static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
+static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
{
- u16 val = I915_READ16(reg);
+ u16 val = intel_uncore_read16(uncore, GEN2_IIR);
if (val == 0)
return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(reg), val);
- I915_WRITE16(reg, 0xffff);
- POSTING_READ16(reg);
- I915_WRITE16(reg, 0xffff);
- POSTING_READ16(reg);
-}
-
-#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
- gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
- I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
- POSTING_READ(GEN8_##type##_IMR(which)); \
-} while (0)
-
-#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
- gen3_assert_iir_is_zero(dev_priv, type##IIR); \
- I915_WRITE(type##IER, (ier_val)); \
- I915_WRITE(type##IMR, (imr_val)); \
- POSTING_READ(type##IMR); \
-} while (0)
-
-#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
- gen2_assert_iir_is_zero(dev_priv, type##IIR); \
- I915_WRITE16(type##IER, (ier_val)); \
- I915_WRITE16(type##IMR, (imr_val)); \
- POSTING_READ16(type##IMR); \
-} while (0)
+ i915_mmio_reg_offset(GEN2_IIR), val);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+}
+
+static void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir)
+{
+ gen3_assert_iir_is_zero(uncore, iir);
+
+ intel_uncore_write(uncore, ier, ier_val);
+ intel_uncore_write(uncore, imr, imr_val);
+ intel_uncore_posting_read(uncore, imr);
+}
+
+static void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val)
+{
+ gen2_assert_iir_is_zero(uncore);
+
+ intel_uncore_write16(uncore, GEN2_IER, ier_val);
+ intel_uncore_write16(uncore, GEN2_IMR, imr_val);
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
+}
+
+#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_init((uncore), \
+ GEN8_##type##_IMR(which_), imr_val, \
+ GEN8_##type##_IER(which_), ier_val, \
+ GEN8_##type##_IIR(which_)); \
+})
+
+#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
+ gen3_irq_init((uncore), \
+ type##IMR, imr_val, \
+ type##IER, ier_val, \
+ type##IIR)
+
+#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
+ gen2_irq_init((uncore), imr_val, ier_val)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
@@ -268,7 +300,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
const unsigned int bank,
const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 dw;
lockdep_assert_held(&i915->irq_lock);
@@ -365,24 +397,41 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
+static void write_pm_imr(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_MASK;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IMR(2);
- else
- return GEN6_PMIMR;
+ i915_reg_t reg;
+ u32 mask = dev_priv->pm_imr;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
+ /* pm is in upper half */
+ mask = mask << 16;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ reg = GEN8_GT_IMR(2);
+ } else {
+ reg = GEN6_PMIMR;
+ }
+
+ I915_WRITE(reg, mask);
+ POSTING_READ(reg);
}
-static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
+static void write_pm_ier(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IER(2);
- else
- return GEN6_PMIER;
+ i915_reg_t reg;
+ u32 mask = dev_priv->pm_ier;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+ /* pm is in upper half */
+ mask = mask << 16;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ reg = GEN8_GT_IER(2);
+ } else {
+ reg = GEN6_PMIER;
+ }
+
+ I915_WRITE(reg, mask);
}
/**
@@ -407,8 +456,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->pm_imr) {
dev_priv->pm_imr = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
- POSTING_READ(gen6_pm_imr(dev_priv));
+ write_pm_imr(dev_priv);
}
}
@@ -449,7 +497,7 @@ static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mas
lockdep_assert_held(&dev_priv->irq_lock);
dev_priv->pm_ier |= enable_mask;
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ write_pm_ier(dev_priv);
gen6_unmask_pm_irq(dev_priv, enable_mask);
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
@@ -460,7 +508,7 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m
dev_priv->pm_ier &= ~disable_mask;
__gen6_mask_pm_irq(dev_priv, disable_mask);
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ write_pm_ier(dev_priv);
/* though a barrier is missing here, but don't really need a one */
}
@@ -748,13 +796,21 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
POSTING_READ(reg);
}
+static bool i915_has_asle(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->opregion.asle)
+ return false;
+
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
* @dev_priv: i915 device private
*/
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
+ if (!i915_has_asle(dev_priv))
return;
spin_lock_irq(&dev_priv->irq_lock);
@@ -1288,6 +1344,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
rps->last_adj = adj;
+ /*
+ * Limit deboosting and boosting to keep ourselves at the extremes
+ * when in the respective power modes (i.e. slowly decrease frequencies
+ * while in the HIGH_POWER zone and slowly increase frequencies while
+ * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+ * to the next level quickly, and conversely if busy we expect to
+ * hit a waitboost and rapidly switch into max power.
+ */
+ if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+ (adj > 0 && rps->power.mode == LOW_POWER))
+ rps->last_adj = 0;
+
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
@@ -1415,20 +1483,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1449,7 +1517,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_breadcrumbs_irq(engine);
- tasklet |= USES_GUC_SUBMISSION(engine->i915);
+ tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
}
if (tasklet)
@@ -1459,12 +1527,12 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS0_IRQ | \
GEN8_GT_VCS1_IRQ | \
- GEN8_GT_VCS2_IRQ | \
GEN8_GT_VECS_IRQ | \
GEN8_GT_PM_IRQ | \
GEN8_GT_GUC_IRQ)
@@ -1475,7 +1543,7 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
}
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(gt_iir[1]))
raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
@@ -1498,21 +1566,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gen8_cs_irq_handler(i915->engine[RCS],
+ gen8_cs_irq_handler(i915->engine[RCS0],
gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[BCS],
+ gen8_cs_irq_handler(i915->engine[BCS0],
gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
}
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
- gen8_cs_irq_handler(i915->engine[VCS],
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ gen8_cs_irq_handler(i915->engine[VCS0],
+ gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
+ gen8_cs_irq_handler(i915->engine[VCS1],
gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[VCS2],
- gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- gen8_cs_irq_handler(i915->engine[VECS],
+ gen8_cs_irq_handler(i915->engine[VECS0],
gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
}
@@ -1693,7 +1761,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- u32 crcs[5];
+ u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
+
+ trace_intel_pipe_crc(crtc, crcs);
spin_lock(&pipe_crc->lock);
/*
@@ -1712,11 +1782,6 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
spin_unlock(&pipe_crc->lock);
- crcs[0] = crc0;
- crcs[1] = crc1;
- crcs[2] = crc2;
- crcs[3] = crc3;
- crcs[4] = crc4;
drm_crtc_add_crc_entry(&crtc->base, true,
drm_crtc_accurate_vblank_count(&crtc->base),
crcs);
@@ -1775,6 +1840,25 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
+static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
+{
+ struct intel_rps *rps = &i915->gt_pm.rps;
+ const u32 events = i915->pm_rps_events & pm_iir;
+
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (unlikely(!events))
+ return;
+
+ gen6_mask_pm_irq(i915, events);
+
+ if (!rps->interrupts_enabled)
+ return;
+
+ rps->pm_iir |= events;
+ schedule_work(&rps->work);
+}
+
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -1792,13 +1876,11 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (INTEL_GEN(dev_priv) >= 8)
return;
- if (HAS_VEBOX(dev_priv)) {
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]);
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
- }
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
@@ -2667,6 +2749,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
}
+static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask = GEN8_AUX_CHANNEL_A;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ mask |= GEN9_AUX_CHANNEL_B |
+ GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv))
+ mask |= CNL_AUX_CHANNEL_F;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask |= ICL_AUX_CHANNEL_E |
+ CNL_AUX_CHANNEL_F;
+
+ return mask;
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2722,20 +2823,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
- tmp_mask = GEN8_AUX_CHANNEL_A;
- if (INTEL_GEN(dev_priv) >= 9)
- tmp_mask |= GEN9_AUX_CHANNEL_B |
- GEN9_AUX_CHANNEL_C |
- GEN9_AUX_CHANNEL_D;
-
- if (INTEL_GEN(dev_priv) >= 11)
- tmp_mask |= ICL_AUX_CHANNEL_E;
-
- if (IS_CNL_WITH_PORT_F(dev_priv) ||
- INTEL_GEN(dev_priv) >= 11)
- tmp_mask |= CNL_AUX_CHANNEL_F;
-
- if (iir & tmp_mask) {
+ if (iir & gen8_de_port_aux_mask(dev_priv)) {
dp_aux_irq_handler(dev_priv);
found = true;
}
@@ -2816,11 +2904,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_handler(dev_priv, iir);
- else if (HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@@ -2857,7 +2943,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
u32 gt_iir[4];
@@ -2891,7 +2977,7 @@ static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 timeout_ts;
u32 ident;
@@ -2926,7 +3012,7 @@ gen11_other_irq_handler(struct drm_i915_private * const i915,
const u8 instance, const u16 iir)
{
if (instance == OTHER_GTPM_INSTANCE)
- return gen6_rps_irq_handler(i915, iir);
+ return gen11_rps_irq_handler(i915, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
@@ -2975,7 +3061,7 @@ static void
gen11_gt_bank_handler(struct drm_i915_private * const i915,
const unsigned int bank)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
unsigned long intr_dw;
unsigned int bit;
@@ -2983,14 +3069,8 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915,
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
- if (unlikely(!intr_dw)) {
- DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
- return;
- }
-
for_each_set_bit(bit, &intr_dw, 32) {
- const u32 ident = gen11_gt_engine_identity(i915,
- bank, bit);
+ const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
gen11_gt_identity_handler(i915, ident);
}
@@ -3018,7 +3098,7 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
static u32
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
{
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3059,7 +3139,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 master_ctl;
u32 gu_misc_iir;
@@ -3112,6 +3192,16 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
+static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (dev_priv->i945gm_vblank.enabled++ == 0)
+ schedule_work(&dev_priv->i945gm_vblank.work);
+
+ return i8xx_enable_vblank(dev, pipe);
+}
+
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3176,6 +3266,16 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ i8xx_disable_vblank(dev, pipe);
+
+ if (--dev_priv->i945gm_vblank.enabled == 0)
+ schedule_work(&dev_priv->i945gm_vblank.work);
+}
+
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3209,12 +3309,68 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+static void i945gm_vblank_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, i945gm_vblank.work);
+
+ /*
+ * Vblank interrupts fail to wake up the device from C3,
+ * hence we want to prevent C3 usage while vblank interrupts
+ * are enabled.
+ */
+ pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
+ READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
+ dev_priv->i945gm_vblank.c3_disable_latency :
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static int cstate_disable_latency(const char *name)
+{
+ const struct cpuidle_driver *drv;
+ int i;
+
+ drv = cpuidle_get_driver();
+ if (!drv)
+ return 0;
+
+ for (i = 0; i < drv->state_count; i++) {
+ const struct cpuidle_state *state = &drv->states[i];
+
+ if (!strcmp(state->name, name))
+ return state->exit_latency ?
+ state->exit_latency - 1 : 0;
+ }
+
+ return 0;
+}
+
+static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
+{
+ INIT_WORK(&dev_priv->i945gm_vblank.work,
+ i945gm_vblank_work_func);
+
+ dev_priv->i945gm_vblank.c3_disable_latency =
+ cstate_disable_latency("C3");
+ pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
+{
+ cancel_work_sync(&dev_priv->i945gm_vblank.work);
+ pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
+}
+
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (HAS_PCH_NOP(dev_priv))
return;
- GEN3_IRQ_RESET(SDE);
+ GEN3_IRQ_RESET(uncore, SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
@@ -3242,13 +3398,17 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN3_IRQ_RESET(GT);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ GEN3_IRQ_RESET(uncore, GT);
if (INTEL_GEN(dev_priv) >= 6)
- GEN3_IRQ_RESET(GEN6_PM);
+ GEN3_IRQ_RESET(uncore, GEN6_PM);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
@@ -3259,12 +3419,14 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET(VLV_);
+ GEN3_IRQ_RESET(uncore, VLV_);
dev_priv->irq_mask = ~0u;
}
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
@@ -3289,7 +3451,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
}
/* drm_dma.h hooks
@@ -3297,8 +3459,9 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
- GEN3_IRQ_RESET(DE);
+ GEN3_IRQ_RESET(uncore, DE);
if (IS_GEN(dev_priv, 7))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
@@ -3329,18 +3492,21 @@ static void valleyview_irq_reset(struct drm_device *dev)
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN8_IRQ_RESET_NDX(GT, 0);
- GEN8_IRQ_RESET_NDX(GT, 1);
- GEN8_IRQ_RESET_NDX(GT, 2);
- GEN8_IRQ_RESET_NDX(GT, 3);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ GEN8_IRQ_RESET_NDX(uncore, GT, 0);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 1);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 2);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 3);
}
static void gen8_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
- gen8_master_intr_disable(dev_priv->regs);
+ gen8_master_intr_disable(dev_priv->uncore.regs);
gen8_gt_irq_reset(dev_priv);
@@ -3350,11 +3516,11 @@ static void gen8_irq_reset(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
@@ -3380,9 +3546,10 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
- gen11_master_intr_disable(dev_priv->regs);
+ gen11_master_intr_disable(dev_priv->uncore.regs);
gen11_gt_irq_reset(dev_priv);
@@ -3394,21 +3561,23 @@ static void gen11_irq_reset(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
- GEN3_IRQ_RESET(GEN11_DE_HPD_);
- GEN3_IRQ_RESET(GEN11_GU_MISC_);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
+ GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
- if (HAS_PCH_ICP(dev_priv))
- GEN3_IRQ_RESET(SDE);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ GEN3_IRQ_RESET(uncore, SDE);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
@@ -3420,7 +3589,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
~dev_priv->de_irq_mask[pipe] | extra_ier);
@@ -3430,6 +3599,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -3440,7 +3610,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -3451,13 +3621,14 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
static void cherryview_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_reset(dev_priv);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3583,7 +3754,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_hpd_irq_setup(dev_priv);
}
@@ -3729,7 +3900,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
else
mask = SDE_GMBUS_CPT;
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
@@ -3742,6 +3913,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 pm_irqs, gt_irqs;
pm_irqs = gt_irqs = 0;
@@ -3760,26 +3932,27 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
- GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
+ GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_VEBOX(dev_priv)) {
+ if (HAS_ENGINE(dev_priv, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
dev_priv->pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
+ GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
static int ironlake_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
if (INTEL_GEN(dev_priv) >= 7) {
@@ -3798,7 +3971,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
}
if (IS_HASWELL(dev_priv)) {
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -3807,7 +3980,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_pre_postinstall(dev);
- GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
+ GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
+ display_mask | extra_mask);
gen5_gt_irq_postinstall(dev);
@@ -3877,35 +4051,42 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
/* These are interrupts we'll toggle with the ring mask register */
u32 gt_interrupts[] = {
- GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+ (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
+
0,
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
- };
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ };
dev_priv->pm_ier = 0x0;
dev_priv->pm_imr = ~dev_priv->pm_ier;
- GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
- GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
- GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
@@ -3941,7 +4122,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
for_each_pipe(dev_priv, pipe) {
@@ -3949,20 +4130,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
}
- GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
- GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+ GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
if (INTEL_GEN(dev_priv) >= 11) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
- GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+ GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
+ de_hpd_enables);
gen11_hpd_detection_setup(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
bxt_hpd_detection_setup(dev_priv);
@@ -3984,7 +4166,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
- gen8_master_intr_enable(dev_priv->regs);
+ gen8_master_intr_enable(dev_priv->uncore.regs);
return 0;
}
@@ -4025,7 +4207,7 @@ static void icp_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
icp_hpd_detection_setup(dev_priv);
@@ -4034,19 +4216,20 @@ static void icp_irq_postinstall(struct drm_device *dev)
static int gen11_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_postinstall(dev);
gen11_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+ GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(dev_priv->regs);
+ gen11_master_intr_enable(dev_priv->uncore.regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
return 0;
@@ -4072,15 +4255,17 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
static void i8xx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_pipestat_irq_reset(dev_priv);
- GEN2_IRQ_RESET();
+ GEN2_IRQ_RESET(uncore);
}
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
@@ -4098,7 +4283,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
- GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4202,7 +4387,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
u16 eir = 0, eir_stuck = 0;
u16 iir;
- iir = I915_READ16(IIR);
+ iir = I915_READ16(GEN2_IIR);
if (iir == 0)
break;
@@ -4215,10 +4400,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE16(IIR, iir);
+ I915_WRITE16(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4234,6 +4419,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
static void i915_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4242,12 +4428,13 @@ static void i915_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET();
+ GEN3_IRQ_RESET(uncore, GEN2_);
}
static int i915_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
@@ -4274,7 +4461,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4306,7 +4493,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(IIR);
+ iir = I915_READ(GEN2_IIR);
if (iir == 0)
break;
@@ -4323,10 +4510,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(IIR, iir);
+ I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4345,18 +4532,20 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET();
+ GEN3_IRQ_RESET(uncore, GEN2_);
}
static int i965_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
u32 error_mask;
@@ -4394,7 +4583,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4452,7 +4641,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(IIR);
+ iir = I915_READ(GEN2_IIR);
if (iir == 0)
break;
@@ -4468,13 +4657,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(IIR, iir);
+ I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4503,6 +4692,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
+ if (IS_I945GM(dev_priv))
+ i945gm_vblank_work_init(dev_priv);
+
intel_hpd_init_work(dev_priv);
INIT_WORK(&rps->work, gen6_pm_rps_work);
@@ -4523,6 +4715,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_DOWN_TIMEOUT);
+ /* We share the register with other engine */
+ if (INTEL_GEN(dev_priv) > 9)
+ GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
+
rps->pm_intrmsk_mbz = 0;
/*
@@ -4542,13 +4738,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 3)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
- /*
- * Opt out of the vblank disable timer on everything except gen2.
- * Gen2 doesn't have a hardware frame counter and so depends on
- * vblank interrupts to produce sane vblank seuquence numbers.
- */
- if (!IS_GEN(dev_priv, 2))
- dev->vblank_disable_immediate = true;
+ dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -4605,8 +4795,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
@@ -4626,6 +4815,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
+ } else if (IS_I945GM(dev_priv)) {
+ dev->driver->irq_preinstall = i915_irq_reset;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_reset;
+ dev->driver->irq_handler = i915_irq_handler;
+ dev->driver->enable_vblank = i945gm_enable_vblank;
+ dev->driver->disable_vblank = i945gm_disable_vblank;
} else if (IS_GEN(dev_priv, 3)) {
dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
@@ -4656,6 +4852,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
{
int i;
+ if (IS_I945GM(i915))
+ i945gm_vblank_work_fini(i915);
+
for (i = 0; i < MAX_L3_SLICES; ++i)
kfree(i915->l3_parity.remap_info[i]);
}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 66f82f3f050f..f893c2cbce15 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -28,14 +28,45 @@
#include <drm/drm_drv.h>
-#include "i915_active.h"
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_selftest.h"
+#include "intel_fbdev.h"
-#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
+#define PLATFORM(x) .platform = (x)
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
-#define GEN_DEFAULT_PIPEOFFSETS \
+#define I845_PIPE_OFFSETS \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ }
+
+#define I9XX_PIPE_OFFSETS \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ }
+
+#define IVB_PIPE_OFFSETS \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ }
+
+#define HSW_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -49,7 +80,7 @@
[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
}
-#define GEN_CHV_PIPEOFFSETS \
+#define CHV_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -61,21 +92,48 @@
[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
}
-#define CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+#define I845_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ }
+
+#define I9XX_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = CURSOR_B_OFFSET, \
+ }
+
+#define CHV_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = CURSOR_B_OFFSET, \
+ [PIPE_C] = CHV_CURSOR_C_OFFSET, \
+ }
#define IVB_CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = IVB_CURSOR_B_OFFSET, \
+ [PIPE_C] = IVB_CURSOR_C_OFFSET, \
+ }
-#define BDW_COLORS \
- .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define I9XX_COLORS \
+ .color = { .gamma_lut_size = 256 }
+#define I965_COLORS \
+ .color = { .gamma_lut_size = 129, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ }
+#define ILK_COLORS \
+ .color = { .gamma_lut_size = 1024 }
+#define IVB_COLORS \
+ .color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
#define GLK_COLORS \
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024, \
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
DRM_COLOR_LUT_EQUAL_CHANNELS, \
}
@@ -85,7 +143,26 @@
#define GEN_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K
-#define GEN2_FEATURES \
+#define I830_FEATURES \
+ GEN(2), \
+ .is_mobile = 1, \
+ .num_pipes = 2, \
+ .display.has_overlay = 1, \
+ .display.cursor_needs_physical = 1, \
+ .display.overlay_needs_physical = 1, \
+ .display.has_gmch = 1, \
+ .gpu_reset_clobbers_display = true, \
+ .hws_needs_physical = 1, \
+ .unfenced_needs_alignment = 1, \
+ .engine_mask = BIT(RCS0), \
+ .has_snoop = true, \
+ .has_coherent_ggtt = false, \
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ I9XX_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
+
+#define I845_FEATURES \
GEN(2), \
.num_pipes = 1, \
.display.has_overlay = 1, \
@@ -94,37 +171,32 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .ring_mask = RENDER_RING, \
+ .engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I845_PIPE_OFFSETS, \
+ I845_CURSOR_OFFSETS, \
+ I9XX_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i830_info = {
- GEN2_FEATURES,
+ I830_FEATURES,
PLATFORM(INTEL_I830),
- .is_mobile = 1,
- .display.cursor_needs_physical = 1,
- .num_pipes = 2, /* legal, last one wins */
};
static const struct intel_device_info intel_i845g_info = {
- GEN2_FEATURES,
+ I845_FEATURES,
PLATFORM(INTEL_I845G),
};
static const struct intel_device_info intel_i85x_info = {
- GEN2_FEATURES,
+ I830_FEATURES,
PLATFORM(INTEL_I85X),
- .is_mobile = 1,
- .num_pipes = 2, /* legal, last one wins */
- .display.cursor_needs_physical = 1,
.display.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
- GEN2_FEATURES,
+ I845_FEATURES,
PLATFORM(INTEL_I865G),
};
@@ -133,12 +205,13 @@ static const struct intel_device_info intel_i865g_info = {
.num_pipes = 2, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .ring_mask = RENDER_RING, \
+ .engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ I9XX_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
@@ -196,7 +269,14 @@ static const struct intel_device_info intel_g33_info = {
.display.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_info = {
+static const struct intel_device_info intel_pineview_g_info = {
+ GEN3_FEATURES,
+ PLATFORM(INTEL_PINEVIEW),
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
+};
+
+static const struct intel_device_info intel_pineview_m_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
@@ -210,12 +290,13 @@ static const struct intel_device_info intel_pineview_info = {
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .ring_mask = RENDER_RING, \
+ .engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ I965_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
@@ -239,7 +320,7 @@ static const struct intel_device_info intel_i965gm_info = {
static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
- .ring_mask = RENDER_RING | BSD_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -249,7 +330,7 @@ static const struct intel_device_info intel_gm45_info = {
.is_mobile = 1,
.display.has_fbc = 1,
.display.supports_tv = 1,
- .ring_mask = RENDER_RING | BSD_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -257,14 +338,15 @@ static const struct intel_device_info intel_gm45_info = {
GEN(5), \
.num_pipes = 2, \
.display.has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
.has_rc6 = 0, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ ILK_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
@@ -283,15 +365,17 @@ static const struct intel_device_info intel_ironlake_m_info = {
.num_pipes = 2, \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .ppgtt = INTEL_PPGTT_ALIASING, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .ppgtt_size = 31, \
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ ILK_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
@@ -328,15 +412,17 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.num_pipes = 3, \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .ppgtt = INTEL_PPGTT_FULL, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- IVB_CURSOR_OFFSETS
+ .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_size = 31, \
+ IVB_PIPE_OFFSETS, \
+ IVB_CURSOR_OFFSETS, \
+ IVB_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
@@ -386,24 +472,27 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rc6 = 1,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
.display_mmio_offset = VLV_DISPLAY_BASE,
+ I9XX_PIPE_OFFSETS,
+ I9XX_CURSOR_OFFSETS,
+ I965_COLORS,
GEN_DEFAULT_PAGE_SIZES,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS
};
#define G75_FEATURES \
GEN7_FEATURES, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_psr = 1, \
.display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
+ HSW_PIPE_OFFSETS, \
.has_runtime_pm = 1
#define HSW_PLATFORM \
@@ -429,11 +518,11 @@ static const struct intel_device_info intel_haswell_gt3_info = {
#define GEN8_FEATURES \
G75_FEATURES, \
GEN(8), \
- BDW_COLORS, \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
- .ppgtt = INTEL_PPGTT_FULL_4LVL, \
+ .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_size = 48, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
@@ -462,7 +551,8 @@ static const struct intel_device_info intel_broadwell_rsvd_info = {
static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_PLATFORM,
.gt = 3,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .engine_mask =
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
static const struct intel_device_info intel_cherryview_info = {
@@ -471,21 +561,22 @@ static const struct intel_device_info intel_cherryview_info = {
.num_pipes = 3,
.display.has_hotplug = 1,
.is_lp = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
- .ppgtt = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
.display_mmio_offset = VLV_DISPLAY_BASE,
- GEN_DEFAULT_PAGE_SIZES,
- GEN_CHV_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ CHV_PIPE_OFFSETS,
+ CHV_CURSOR_OFFSETS,
CHV_COLORS,
+ GEN_DEFAULT_PAGE_SIZES,
};
#define GEN9_DEFAULT_PAGE_SIZES \
@@ -521,7 +612,8 @@ static const struct intel_device_info intel_skylake_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
+ .engine_mask = \
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
static const struct intel_device_info intel_skylake_gt3_info = {
@@ -538,7 +630,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
GEN(9), \
.is_lp = 1, \
.display.has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.num_pipes = 3, \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
@@ -552,15 +644,16 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
- .ppgtt = INTEL_PPGTT_FULL_4LVL, \
+ .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_size = 48, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.display.has_ipc = 1, \
- GEN9_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_PIPEOFFSETS, \
+ HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
- BDW_COLORS
+ IVB_COLORS, \
+ GEN9_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
@@ -592,7 +685,8 @@ static const struct intel_device_info intel_kabylake_gt2_info = {
static const struct intel_device_info intel_kabylake_gt3_info = {
KBL_PLATFORM,
.gt = 3,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .engine_mask =
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
#define CFL_PLATFORM \
@@ -612,7 +706,8 @@ static const struct intel_device_info intel_coffeelake_gt2_info = {
static const struct intel_device_info intel_coffeelake_gt3_info = {
CFL_PLATFORM,
.gt = 3,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .engine_mask =
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
#define GEN10_FEATURES \
@@ -648,13 +743,22 @@ static const struct intel_device_info intel_cannonlake_info = {
}, \
GEN(11), \
.ddb_size = 2048, \
- .has_logical_ring_elsq = 1
+ .has_logical_ring_elsq = 1, \
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 }
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
+ .engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+};
+
+static const struct intel_device_info intel_elkhartlake_info = {
+ GEN11_FEATURES,
+ PLATFORM(INTEL_ELKHARTLAKE),
.is_alpha_support = 1,
- .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING,
+ .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
+ .ppgtt_size = 36,
};
#undef GEN
@@ -680,7 +784,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_I965GM_IDS(&intel_i965gm_info),
INTEL_GM45_IDS(&intel_gm45_info),
INTEL_G45_IDS(&intel_g45_info),
- INTEL_PINEVIEW_IDS(&intel_pineview_info),
+ INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info),
+ INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
@@ -722,8 +827,11 @@ static const struct pci_device_id pciidlist[] = {
INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+ INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
+ INTEL_EHL_IDS(&intel_elkhartlake_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -801,7 +909,9 @@ static int __init i915_init(void)
bool use_kms = true;
int err;
- i915_global_active_init();
+ err = i915_globals_init();
+ if (err)
+ return err;
err = i915_mock_selftests();
if (err)
@@ -834,7 +944,7 @@ static void __exit i915_exit(void)
return;
pci_unregister_driver(&i915_pci_driver);
- i915_global_active_exit();
+ i915_globals_exit();
}
module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9ebf99f3d8d3..39a4804091d7 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1202,7 +1202,7 @@ static int i915_oa_read(struct i915_perf_stream *stream,
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct intel_context *ce;
int ret;
@@ -1364,7 +1364,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
if (stream->ctx)
@@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto unlock;
}
- ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
- if (ret)
- goto err_unref;
+ i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
/* PreHSW required 512K alignment, HSW requires 16M */
vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
@@ -1629,13 +1627,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* It's fine to put out-of-date values into these per-context registers
* in the case that the OA unit has been disabled.
*/
-static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
- u32 *reg_state,
- const struct i915_oa_config *oa_config)
+static void
+gen8_update_reg_state_unlocked(struct intel_context *ce,
+ u32 *reg_state,
+ const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *dev_priv = ctx->i915;
- u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ struct drm_i915_private *i915 = ce->gem_context->i915;
+ u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1649,8 +1648,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
int i;
CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME);
for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
@@ -1678,10 +1677,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
CTX_REG(reg_state, state_offset, flex_regs[i], value);
}
- CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- gen8_make_rpcs(dev_priv,
- &to_intel_context(ctx,
- dev_priv->engine[RCS])->sseu));
+ CTX_REG(reg_state,
+ CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+ gen8_make_rpcs(i915, &ce->sseu));
}
/*
@@ -1711,7 +1709,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS0];
unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
struct i915_request *rq;
@@ -1740,11 +1738,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 *regs;
/* OA settings will be set upon first use */
- if (!ce->state)
+ if (!ce || !ce->state)
continue;
regs = i915_gem_object_pin_map(ce->state->obj, map_type);
@@ -1754,7 +1752,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
ce->state->obj->mm.dirty = true;
regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
- gen8_update_reg_state_unlocked(ctx, regs, oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, oa_config);
i915_gem_object_unpin_map(ce->state->obj);
}
@@ -1922,10 +1920,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = &stream->dev_priv->uncore;
- I915_WRITE(GEN7_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN7_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@@ -1933,10 +1931,10 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = &stream->dev_priv->uncore;
- I915_WRITE(GEN8_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN8_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@@ -2093,7 +2091,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* references will effectively disable RC6.
*/
stream->wakeref = intel_runtime_pm_get(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
if (ret)
@@ -2127,7 +2125,7 @@ err_lock:
err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
err_config:
@@ -2138,17 +2136,17 @@ err_config:
}
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- u32 *reg_state)
+ struct intel_context *ce,
+ u32 *regs)
{
struct i915_perf_stream *stream;
- if (engine->id != RCS)
+ if (engine->class != RENDER_CLASS)
return;
stream = engine->i915->perf.oa.exclusive_stream;
if (stream)
- gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
}
/**
@@ -2881,12 +2879,24 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
- if (IS_HASWELL(dev_priv)) {
- i915_perf_load_test_config_hsw(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- i915_perf_load_test_config_bdw(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- i915_perf_load_test_config_chv(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 11) {
+ i915_perf_load_test_config_icl(dev_priv);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ i915_perf_load_test_config_cnl(dev_priv);
+ } else if (IS_COFFEELAKE(dev_priv)) {
+ if (IS_CFL_GT2(dev_priv))
+ i915_perf_load_test_config_cflgt2(dev_priv);
+ if (IS_CFL_GT3(dev_priv))
+ i915_perf_load_test_config_cflgt3(dev_priv);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ i915_perf_load_test_config_glk(dev_priv);
+ } else if (IS_KABYLAKE(dev_priv)) {
+ if (IS_KBL_GT2(dev_priv))
+ i915_perf_load_test_config_kblgt2(dev_priv);
+ else if (IS_KBL_GT3(dev_priv))
+ i915_perf_load_test_config_kblgt3(dev_priv);
+ } else if (IS_BROXTON(dev_priv)) {
+ i915_perf_load_test_config_bxt(dev_priv);
} else if (IS_SKYLAKE(dev_priv)) {
if (IS_SKL_GT2(dev_priv))
i915_perf_load_test_config_sklgt2(dev_priv);
@@ -2894,25 +2904,13 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_sklgt3(dev_priv);
else if (IS_SKL_GT4(dev_priv))
i915_perf_load_test_config_sklgt4(dev_priv);
- } else if (IS_BROXTON(dev_priv)) {
- i915_perf_load_test_config_bxt(dev_priv);
- } else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_load_test_config_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_load_test_config_kblgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv)) {
- i915_perf_load_test_config_glk(dev_priv);
- } else if (IS_COFFEELAKE(dev_priv)) {
- if (IS_CFL_GT2(dev_priv))
- i915_perf_load_test_config_cflgt2(dev_priv);
- if (IS_CFL_GT3(dev_priv))
- i915_perf_load_test_config_cflgt3(dev_priv);
- } else if (IS_CANNONLAKE(dev_priv)) {
- i915_perf_load_test_config_cnl(dev_priv);
- } else if (IS_ICELAKE(dev_priv)) {
- i915_perf_load_test_config_icl(dev_priv);
- }
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ i915_perf_load_test_config_chv(dev_priv);
+ } else if (IS_BROADWELL(dev_priv)) {
+ i915_perf_load_test_config_bdw(dev_priv);
+ } else if (IS_HASWELL(dev_priv)) {
+ i915_perf_load_test_config_hsw(dev_priv);
+}
if (dev_priv->perf.oa.test_config.id == 0)
goto sysfs_error;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index b745c49a5af6..46a52da3db29 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -102,7 +102,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
*
* Use RCS as proxy for all engines.
*/
- else if (intel_engine_supports_stats(i915->engine[RCS]))
+ else if (intel_engine_supports_stats(i915->engine[RCS0]))
enable &= ~BIT(I915_SAMPLE_BUSY);
/*
@@ -149,14 +149,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
spin_unlock_irq(&i915->pmu.lock);
}
-static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
-{
- if (!fw)
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
-
- return true;
-}
-
static void
add_sample(struct i915_pmu_sample *sample, u32 val)
{
@@ -169,49 +161,48 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
- bool fw = false;
+ unsigned long flags;
if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
- if (!dev_priv->gt.awake)
- return;
-
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = 0;
+ if (READ_ONCE(dev_priv->gt.awake))
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return;
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
for_each_engine(engine, dev_priv, id) {
- u32 current_seqno = intel_engine_get_seqno(engine);
- u32 last_seqno = intel_engine_last_submit(engine);
+ struct intel_engine_pmu *pmu = &engine->pmu;
+ bool busy;
u32 val;
- val = !i915_seqno_passed(current_seqno, last_seqno);
-
- if (val)
- add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
- period_ns);
-
- if (val && (engine->pmu.enable &
- (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
- fw = grab_forcewake(dev_priv, fw);
-
- val = I915_READ_FW(RING_CTL(engine->mmio_base));
- } else {
- val = 0;
- }
+ val = I915_READ_FW(RING_CTL(engine->mmio_base));
+ if (val == 0) /* powerwell off => engine idle */
+ continue;
if (val & RING_WAIT)
- add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
- period_ns);
-
+ add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
if (val & RING_WAIT_SEMAPHORE)
- add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
- period_ns);
- }
+ add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
- if (fw)
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ /*
+ * While waiting on a semaphore or event, MI_MODE reports the
+ * ring as idle. However, previously using the seqno, and with
+ * execlists sampling, we account for the ring waiting as the
+ * engine being busy. Therefore, we record the sample as being
+ * busy if either waiting or !idle.
+ */
+ busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
+ if (!busy) {
+ val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
+ busy = !(val & MODE_IDLE);
+ }
+ if (busy)
+ add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+ }
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
intel_runtime_pm_put(dev_priv, wakeref);
}
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
new file mode 100644
index 000000000000..cc44ebd3b553
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -0,0 +1,42 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_PRIOLIST_TYPES_H_
+#define _I915_PRIOLIST_TYPES_H_
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+#include <uapi/drm/i915_drm.h>
+
+enum {
+ I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+ I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+ I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+ I915_PRIORITY_INVALID = INT_MIN
+};
+
+#define I915_USER_PRIORITY_SHIFT 3
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
+#define I915_PRIORITY_WAIT ((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
+#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2))
+
+#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
+
+struct i915_priolist {
+ struct list_head requests[I915_PRIORITY_COUNT];
+ struct rb_node node;
+ unsigned long used;
+ int priority;
+};
+
+#endif /* _I915_PRIOLIST_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index eeaa3d506d95..969e514916ab 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -52,7 +52,7 @@ enum vgt_g2v_type {
/*
* VGT capabilities type
*/
-#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
+#define VGT_CAPS_FULL_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
#define VGT_CAPS_HUGE_GTT BIT(4)
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index cbcb957b7141..782183b78f49 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -10,12 +10,34 @@
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
+static int copy_query_item(void *query_hdr, size_t query_sz,
+ u32 total_length,
+ struct drm_i915_query_item *query_item)
+{
+ if (query_item->length == 0)
+ return total_length;
+
+ if (query_item->length < total_length)
+ return -EINVAL;
+
+ if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
+ query_sz))
+ return -EFAULT;
+
+ if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
+ total_length))
+ return -EFAULT;
+
+ return 0;
+}
+
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
+ int ret;
if (query_item->flags != 0)
return -EINVAL;
@@ -33,23 +55,14 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
- if (query_item->length == 0)
- return total_length;
-
- if (query_item->length < total_length)
- return -EINVAL;
-
- if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr),
- sizeof(topo)))
- return -EFAULT;
+ ret = copy_query_item(&topo, sizeof(topo), total_length,
+ query_item);
+ if (ret != 0)
+ return ret;
if (topo.flags != 0)
return -EINVAL;
- if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
- total_length))
- return -EFAULT;
-
memset(&topo, 0, sizeof(topo));
topo.max_slices = sseu->max_slices;
topo.max_subslices = sseu->max_subslices;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 047855dd8c6b..b74824f0b5b1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,6 +25,9 @@
#ifndef _I915_REG_H_
#define _I915_REG_H_
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
/**
* DOC: The i915 register macro definition style guide
*
@@ -59,15 +62,13 @@
* significant to least significant bit. Indent the register content macros
* using two extra spaces between ``#define`` and the macro name.
*
- * For bit fields, define a ``_MASK`` and a ``_SHIFT`` macro. Define bit field
- * contents so that they are already shifted in place, and can be directly
- * OR'd. For convenience, function-like macros may be used to define bit fields,
- * but do note that the macros may be needed to read as well as write the
- * register contents.
+ * Define bit fields using ``REG_GENMASK(h, l)``. Define bit field contents
+ * using ``REG_FIELD_PREP(mask, value)``. This will define the values already
+ * shifted in place, so they can be directly OR'd together. For convenience,
+ * function-like macros may be used to define bit fields, but do note that the
+ * macros may be needed to read as well as write the register contents.
*
- * Define bits using ``(1 << N)`` instead of ``BIT(N)``. We may change this in
- * the future, but this is the prevailing style. Do **not** add ``_BIT`` suffix
- * to the name.
+ * Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name.
*
* Group the register and its contents together without blank lines, separate
* from other registers and their contents with one blank line.
@@ -105,17 +106,78 @@
* #define _FOO_A 0xf000
* #define _FOO_B 0xf001
* #define FOO(pipe) _MMIO_PIPE(pipe, _FOO_A, _FOO_B)
- * #define FOO_ENABLE (1 << 31)
- * #define FOO_MODE_MASK (0xf << 16)
- * #define FOO_MODE_SHIFT 16
- * #define FOO_MODE_BAR (0 << 16)
- * #define FOO_MODE_BAZ (1 << 16)
- * #define FOO_MODE_QUX_SNB (2 << 16)
+ * #define FOO_ENABLE REG_BIT(31)
+ * #define FOO_MODE_MASK REG_GENMASK(19, 16)
+ * #define FOO_MODE_BAR REG_FIELD_PREP(FOO_MODE_MASK, 0)
+ * #define FOO_MODE_BAZ REG_FIELD_PREP(FOO_MODE_MASK, 1)
+ * #define FOO_MODE_QUX_SNB REG_FIELD_PREP(FOO_MODE_MASK, 2)
*
* #define BAR _MMIO(0xb000)
* #define GEN8_BAR _MMIO(0xb888)
*/
+/**
+ * REG_BIT() - Prepare a u32 bit value
+ * @__n: 0-based bit number
+ *
+ * Local wrapper for BIT() to force u32, with compile time checks.
+ *
+ * @return: Value with bit @__n set.
+ */
+#define REG_BIT(__n) \
+ ((u32)(BIT(__n) + \
+ BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) && \
+ ((__n) < 0 || (__n) > 31))))
+
+/**
+ * REG_GENMASK() - Prepare a continuous u32 bitmask
+ * @__high: 0-based high bit
+ * @__low: 0-based low bit
+ *
+ * Local wrapper for GENMASK() to force u32, with compile time checks.
+ *
+ * @return: Continuous bitmask from @__high to @__low, inclusive.
+ */
+#define REG_GENMASK(__high, __low) \
+ ((u32)(GENMASK(__high, __low) + \
+ BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) && \
+ __builtin_constant_p(__low) && \
+ ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
+
+/*
+ * Local integer constant expression version of is_power_of_2().
+ */
+#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0))
+
+/**
+ * REG_FIELD_PREP() - Prepare a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP(__mask, __val) \
+ ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_GET() - Extract a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u32 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
+
typedef struct {
u32 reg;
} i915_reg_t;
@@ -210,14 +272,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Engine ID */
-#define RCS_HW 0
-#define VCS_HW 1
-#define BCS_HW 2
-#define VECS_HW 3
-#define VCS2_HW 4
-#define VCS3_HW 6
-#define VCS4_HW 7
-#define VECS2_HW 12
+#define RCS0_HW 0
+#define VCS0_HW 1
+#define BCS0_HW 2
+#define VECS0_HW 3
+#define VCS1_HW 4
+#define VCS2_HW 6
+#define VCS3_HW 7
+#define VECS1_HW 12
/* Engine class */
@@ -372,13 +434,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
-#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
-#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
-#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220)
+#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
+#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
+#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
#define PP_DIR_DCLV_2G 0xffffffff
-#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
@@ -1044,7 +1106,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
-#define PUNIT_REG_DSPFREQ 0x36
+/* PUNIT_REG_*SSPM0 */
+#define _SSPM0_SSC(val) ((val) << 0)
+#define SSPM0_SSC_MASK _SSPM0_SSC(0x3)
+#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0)
+#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1)
+#define SSPM0_SSC_RESET _SSPM0_SSC(0x2)
+#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3)
+#define _SSPM0_SSS(val) ((val) << 24)
+#define SSPM0_SSS_MASK _SSPM0_SSS(0x3)
+#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0)
+#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1)
+#define SSPM0_SSS_RESET _SSPM0_SSS(0x2)
+#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3)
+
+/* PUNIT_REG_*SSPM1 */
+#define SSPM1_FREQSTAT_SHIFT 24
+#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT)
+#define SSPM1_FREQGUAR_SHIFT 8
+#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT)
+#define SSPM1_FREQ_SHIFT 0
+#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT)
+
+#define PUNIT_REG_VEDSSPM0 0x32
+#define PUNIT_REG_VEDSSPM1 0x33
+
+#define PUNIT_REG_DSPSSPM 0x36
#define DSPFREQSTAT_SHIFT_CHV 24
#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV)
#define DSPFREQGUAR_SHIFT_CHV 8
@@ -1069,6 +1156,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
+#define PUNIT_REG_ISPSSPM0 0x39
+#define PUNIT_REG_ISPSSPM1 0x3a
+
/*
* i915_power_well_id:
*
@@ -1860,13 +1950,13 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
-#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
+#define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
-#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
+#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1893,11 +1983,11 @@ enum i915_power_well_id {
#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
-#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
+#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
-#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
+#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
@@ -1908,8 +1998,8 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
-#define MG_TX1_LINK_PARAMS(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+#define MG_TX1_LINK_PARAMS(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
MG_TX_LINK_PARAMS_TX1LN1_PORT1)
@@ -1921,8 +2011,8 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
-#define MG_TX2_LINK_PARAMS(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+#define MG_TX2_LINK_PARAMS(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
MG_TX_LINK_PARAMS_TX2LN1_PORT1)
#define CRI_USE_FS32 (1 << 5)
@@ -1935,8 +2025,8 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
-#define MG_TX1_PISO_READLOAD(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+#define MG_TX1_PISO_READLOAD(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
MG_TX_PISO_READLOAD_TX1LN1_PORT1)
@@ -1948,8 +2038,8 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
-#define MG_TX2_PISO_READLOAD(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+#define MG_TX2_PISO_READLOAD(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
MG_TX_PISO_READLOAD_TX2LN1_PORT1)
#define CRI_CALCINIT (1 << 1)
@@ -1962,8 +2052,8 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
-#define MG_TX1_SWINGCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+#define MG_TX1_SWINGCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
MG_TX_SWINGCTRL_TX1LN0_PORT2, \
MG_TX_SWINGCTRL_TX1LN1_PORT1)
@@ -1975,8 +2065,8 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
-#define MG_TX2_SWINGCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+#define MG_TX2_SWINGCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
MG_TX_SWINGCTRL_TX2LN0_PORT2, \
MG_TX_SWINGCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
@@ -1990,8 +2080,8 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
-#define MG_TX1_DRVCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+#define MG_TX1_DRVCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
MG_TX_DRVCTRL_TX1LN1_TXPORT1)
@@ -2003,8 +2093,8 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
-#define MG_TX2_DRVCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+#define MG_TX2_DRVCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
MG_TX_DRVCTRL_TX2LN0_PORT2, \
MG_TX_DRVCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
@@ -2023,8 +2113,8 @@ enum i915_power_well_id {
#define MG_CLKHUB_LN1_PORT3 0x16A79C
#define MG_CLKHUB_LN0_PORT4 0x16B39C
#define MG_CLKHUB_LN1_PORT4 0x16B79C
-#define MG_CLKHUB(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \
+#define MG_CLKHUB(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \
MG_CLKHUB_LN0_PORT2, \
MG_CLKHUB_LN1_PORT1)
#define CFG_LOW_RATE_LKREN_EN (1 << 11)
@@ -2037,8 +2127,8 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX1LN1_PORT3 0x16A510
#define MG_TX_DCC_TX1LN0_PORT4 0x16B110
#define MG_TX_DCC_TX1LN1_PORT4 0x16B510
-#define MG_TX1_DCC(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \
+#define MG_TX1_DCC(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \
MG_TX_DCC_TX1LN0_PORT2, \
MG_TX_DCC_TX1LN1_PORT1)
#define MG_TX_DCC_TX2LN0_PORT1 0x168090
@@ -2049,8 +2139,8 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX2LN1_PORT3 0x16A490
#define MG_TX_DCC_TX2LN0_PORT4 0x16B090
#define MG_TX_DCC_TX2LN1_PORT4 0x16B490
-#define MG_TX2_DCC(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \
+#define MG_TX2_DCC(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \
MG_TX_DCC_TX2LN0_PORT2, \
MG_TX_DCC_TX2LN1_PORT1)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
@@ -2065,8 +2155,8 @@ enum i915_power_well_id {
#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0
#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0
#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0
-#define MG_DP_MODE(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \
+#define MG_DP_MODE(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \
MG_DP_MODE_LN0_ACU_PORT2, \
MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
@@ -2356,8 +2446,10 @@ enum i915_power_well_id {
#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
-#define RESET_CTL_REQUEST_RESET (1 << 0)
-#define RESET_CTL_READY_TO_RESET (1 << 1)
+#define RESET_CTL_CAT_ERROR REG_BIT(2)
+#define RESET_CTL_READY_TO_RESET REG_BIT(1)
+#define RESET_CTL_REQUEST_RESET REG_BIT(0)
+
#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
@@ -2478,12 +2570,12 @@ enum i915_power_well_id {
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
#define PWRCTX_EN (1 << 0)
-#define IPEIR _MMIO(0x2088)
-#define IPEHR _MMIO(0x208c)
+#define IPEIR(base) _MMIO((base) + 0x88)
+#define IPEHR(base) _MMIO((base) + 0x8c)
#define GEN2_INSTDONE _MMIO(0x2090)
#define NOPID _MMIO(0x2094)
#define HWSTAM _MMIO(0x2098)
-#define DMA_FADD_I8XX _MMIO(0x20d0)
+#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0)
#define RING_BBSTATE(base) _MMIO((base) + 0x110)
#define RING_BB_PPGTT (1 << 5)
#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
@@ -2623,10 +2715,10 @@ enum i915_power_well_id {
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
-#define IER _MMIO(0x20a0)
-#define IIR _MMIO(0x20a4)
-#define IMR _MMIO(0x20a8)
-#define ISR _MMIO(0x20ac)
+#define GEN2_IER _MMIO(0x20a0)
+#define GEN2_IIR _MMIO(0x20a4)
+#define GEN2_IMR _MMIO(0x20a8)
+#define GEN2_ISR _MMIO(0x20ac)
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
#define GINT_DIS (1 << 22)
#define GCFG_DIS (1 << 8)
@@ -2657,7 +2749,7 @@ enum i915_power_well_id {
#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
#define INSTPM_TLB_INVALIDATE (1 << 9)
#define INSTPM_SYNC_FLUSH (1 << 5)
-#define ACTHD _MMIO(0x20c8)
+#define ACTHD(base) _MMIO((base) + 0xc8)
#define MEM_MODE _MMIO(0x20cc)
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
@@ -3857,7 +3949,7 @@ enum i915_power_well_id {
/*
* Logical Context regs
*/
-#define CCID _MMIO(0x2180)
+#define CCID(base) _MMIO((base) + 0x180)
#define CCID_EN BIT(0)
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
@@ -3989,6 +4081,15 @@ enum {
/* Pipe A CRC regs */
#define _PIPE_CRC_CTL_A 0x60050
#define PIPE_CRC_ENABLE (1 << 31)
+/* skl+ source selection */
+#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28)
+#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28)
+#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28)
+#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28)
+#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28)
+#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28)
+#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28)
+#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28)
/* ivb+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
@@ -4110,42 +4211,6 @@ enum {
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-/* VLV eDP PSR registers */
-#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
-#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
-#define VLV_EDP_PSR_ENABLE (1 << 0)
-#define VLV_EDP_PSR_RESET (1 << 1)
-#define VLV_EDP_PSR_MODE_MASK (7 << 2)
-#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3)
-#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2)
-#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7)
-#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8)
-#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9)
-#define VLV_EDP_PSR_DBL_FRAME (1 << 10)
-#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16)
-#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
-#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
-
-#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
-#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
-#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30)
-#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31)
-#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30)
-#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
-
-#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
-#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
-#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3)
-#define VLV_EDP_PSR_CURR_STATE_MASK 7
-#define VLV_EDP_PSR_DISABLED (0 << 0)
-#define VLV_EDP_PSR_INACTIVE (1 << 0)
-#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0)
-#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0)
-#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0)
-#define VLV_EDP_PSR_EXIT (5 << 0)
-#define VLV_EDP_PSR_IN_TRANS (1 << 7)
-#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
-
/* HSW+ eDP PSR registers */
#define HSW_EDP_PSR_BASE 0x64800
#define BDW_EDP_PSR_BASE 0x6f800
@@ -4168,6 +4233,7 @@ enum {
#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
+#define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */
#define EDP_PSR_TP1_TIME_500us (0 << 4)
#define EDP_PSR_TP1_TIME_100us (1 << 4)
#define EDP_PSR_TP1_TIME_2500us (2 << 4)
@@ -4612,13 +4678,14 @@ enum {
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT(port) ((port) << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
-#define VIDEO_DIP_ENABLE_GCP (1 << 25)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
-#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
#define VIDEO_DIP_SELECT_SPD (3 << 19)
#define VIDEO_DIP_SELECT_MASK (3 << 19)
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
@@ -4653,18 +4720,17 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
-#define PP_ON (1 << 31)
+#define PP_ON REG_BIT(31)
#define _PP_CONTROL_1 0xc7204
#define _PP_CONTROL_2 0xc7304
#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
_PP_CONTROL_2)
-#define POWER_CYCLE_DELAY_MASK (0x1f << 4)
-#define POWER_CYCLE_DELAY_SHIFT 4
-#define VDD_OVERRIDE_FORCE (1 << 3)
-#define BACKLIGHT_ENABLE (1 << 2)
-#define PWR_DOWN_ON_RESET (1 << 1)
-#define PWR_STATE_TARGET (1 << 0)
+#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
+#define VDD_OVERRIDE_FORCE REG_BIT(3)
+#define BACKLIGHT_ENABLE REG_BIT(2)
+#define PWR_DOWN_ON_RESET REG_BIT(1)
+#define PWR_STATE_TARGET REG_BIT(0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -4672,62 +4738,53 @@ enum {
* - pipe enabled
* - LVDS/DVOB/DVOC on
*/
-#define PP_READY (1 << 30)
-#define PP_SEQUENCE_NONE (0 << 28)
-#define PP_SEQUENCE_POWER_UP (1 << 28)
-#define PP_SEQUENCE_POWER_DOWN (2 << 28)
-#define PP_SEQUENCE_MASK (3 << 28)
-#define PP_SEQUENCE_SHIFT 28
-#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
-#define PP_SEQUENCE_STATE_MASK 0x0000000f
-#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
-#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
-#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
-#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
-#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
-#define PP_SEQUENCE_STATE_RESET (0xf << 0)
+#define PP_READY REG_BIT(30)
+#define PP_SEQUENCE_MASK REG_GENMASK(29, 28)
+#define PP_SEQUENCE_NONE REG_FIELD_PREP(PP_SEQUENCE_MASK, 0)
+#define PP_SEQUENCE_POWER_UP REG_FIELD_PREP(PP_SEQUENCE_MASK, 1)
+#define PP_SEQUENCE_POWER_DOWN REG_FIELD_PREP(PP_SEQUENCE_MASK, 2)
+#define PP_CYCLE_DELAY_ACTIVE REG_BIT(27)
+#define PP_SEQUENCE_STATE_MASK REG_GENMASK(3, 0)
+#define PP_SEQUENCE_STATE_OFF_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0)
+#define PP_SEQUENCE_STATE_OFF_S0_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1)
+#define PP_SEQUENCE_STATE_OFF_S0_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2)
+#define PP_SEQUENCE_STATE_OFF_S0_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3)
+#define PP_SEQUENCE_STATE_ON_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8)
+#define PP_SEQUENCE_STATE_ON_S1_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9)
+#define PP_SEQUENCE_STATE_ON_S1_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa)
+#define PP_SEQUENCE_STATE_ON_S1_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb)
+#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf)
#define _PP_CONTROL 0x61204
#define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL)
-#define PANEL_UNLOCK_REGS (0xabcd << 16)
-#define PANEL_UNLOCK_MASK (0xffff << 16)
-#define BXT_POWER_CYCLE_DELAY_MASK 0x1f0
-#define BXT_POWER_CYCLE_DELAY_SHIFT 4
-#define EDP_FORCE_VDD (1 << 3)
-#define EDP_BLC_ENABLE (1 << 2)
-#define PANEL_POWER_RESET (1 << 1)
-#define PANEL_POWER_ON (1 << 0)
+#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16)
+#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd)
+#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
+#define EDP_FORCE_VDD REG_BIT(3)
+#define EDP_BLC_ENABLE REG_BIT(2)
+#define PANEL_POWER_RESET REG_BIT(1)
+#define PANEL_POWER_ON REG_BIT(0)
#define _PP_ON_DELAYS 0x61208
#define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS)
-#define PANEL_PORT_SELECT_SHIFT 30
-#define PANEL_PORT_SELECT_MASK (3 << 30)
-#define PANEL_PORT_SELECT_LVDS (0 << 30)
-#define PANEL_PORT_SELECT_DPA (1 << 30)
-#define PANEL_PORT_SELECT_DPC (2 << 30)
-#define PANEL_PORT_SELECT_DPD (3 << 30)
-#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
-#define PANEL_POWER_UP_DELAY_MASK 0x1fff0000
-#define PANEL_POWER_UP_DELAY_SHIFT 16
-#define PANEL_LIGHT_ON_DELAY_MASK 0x1fff
-#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30)
+#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0)
+#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1)
+#define PANEL_PORT_SELECT_DPC REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2)
+#define PANEL_PORT_SELECT_DPD REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3)
+#define PANEL_PORT_SELECT_VLV(port) REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port)
+#define PANEL_POWER_UP_DELAY_MASK REG_GENMASK(28, 16)
+#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_OFF_DELAYS 0x6120C
#define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS)
-#define PANEL_POWER_DOWN_DELAY_MASK 0x1fff0000
-#define PANEL_POWER_DOWN_DELAY_SHIFT 16
-#define PANEL_LIGHT_OFF_DELAY_MASK 0x1fff
-#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16)
+#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_DIVISOR 0x61210
#define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR)
-#define PP_REFERENCE_DIVIDER_MASK 0xffffff00
-#define PP_REFERENCE_DIVIDER_SHIFT 8
-#define PANEL_POWER_CYCLE_DELAY_MASK 0x1f
-#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
+#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8)
+#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0)
/* Panel fitting */
#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
@@ -5590,9 +5647,15 @@ enum {
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1 << 25)
-#define PIPECONF_PALETTE 0
-#define PIPECONF_GAMMA (1 << 24)
#define PIPECONF_FORCE_BORDER (1 << 25)
+#define PIPECONF_GAMMA_MODE_MASK_I9XX (1 << 24) /* gmch */
+#define PIPECONF_GAMMA_MODE_MASK_ILK (3 << 24) /* ilk-ivb */
+#define PIPECONF_GAMMA_MODE_8BIT (0 << 24) /* gmch,ilk-ivb */
+#define PIPECONF_GAMMA_MODE_10BIT (1 << 24) /* gmch,ilk-ivb */
+#define PIPECONF_GAMMA_MODE_12BIT (2 << 24) /* ilk-ivb */
+#define PIPECONF_GAMMA_MODE_SPLIT (3 << 24) /* ivb */
+#define PIPECONF_GAMMA_MODE(x) ((x) << 24) /* pass in GAMMA_MODE_MODE_* */
+#define PIPECONF_GAMMA_MODE_SHIFT 24
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5698,6 +5761,10 @@ enum {
#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
+#define _PIPEAGCMAX 0x70010
+#define _PIPEBGCMAX 0x71010
+#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
+
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_YUV420_ENABLE (1 << 27)
@@ -5998,6 +6065,7 @@ enum {
#define _CUR_WM_TRANS_A_0 0x70168
#define _CUR_WM_TRANS_B_0 0x71168
#define PLANE_WM_EN (1 << 31)
+#define PLANE_WM_IGNORE_LINES (1 << 30)
#define PLANE_WM_LINES_SHIFT 14
#define PLANE_WM_LINES_MASK 0x1f
#define PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */
@@ -6124,7 +6192,7 @@ enum {
#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */
#define MCURSOR_ROTATE_180 (1 << 15)
#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
@@ -6179,7 +6247,7 @@ enum {
#define DISPPLANE_RGBA888 (0xf << 26)
#define DISPPLANE_STEREO_ENABLE (1 << 25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24)
+#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
@@ -6557,13 +6625,22 @@ enum {
#define PLANE_CTL_FORMAT_YUV422 (0 << 24)
#define PLANE_CTL_FORMAT_NV12 (1 << 24)
#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24)
+#define PLANE_CTL_FORMAT_P010 (3 << 24)
#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24)
+#define PLANE_CTL_FORMAT_P012 (5 << 24)
#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
+#define PLANE_CTL_FORMAT_P016 (7 << 24)
#define PLANE_CTL_FORMAT_AYUV (8 << 24)
#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
+#define PLANE_CTL_FORMAT_Y210 (1 << 23)
+#define PLANE_CTL_FORMAT_Y212 (3 << 23)
+#define PLANE_CTL_FORMAT_Y216 (5 << 23)
+#define PLANE_CTL_FORMAT_Y410 (7 << 23)
+#define PLANE_CTL_FORMAT_Y412 (9 << 23)
+#define PLANE_CTL_FORMAT_Y416 (0xb << 23)
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21)
#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
@@ -7102,14 +7179,25 @@ enum {
#define _LGC_PALETTE_B 0x4a800
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
+/* ilk/snb precision palette */
+#define _PREC_PALETTE_A 0x4b000
+#define _PREC_PALETTE_B 0x4c000
+#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4)
+
+#define _PREC_PIPEAGCMAX 0x4d000
+#define _PREC_PIPEBGCMAX 0x4d010
+#define PREC_PIPEGCMAX(pipe, i) _MMIO(_PIPE(pipe, _PIPEAGCMAX, _PIPEBGCMAX) + (i) * 4)
+
#define _GAMMA_MODE_A 0x4a480
#define _GAMMA_MODE_B 0x4ac80
#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
-#define GAMMA_MODE_MODE_MASK (3 << 0)
-#define GAMMA_MODE_MODE_8BIT (0 << 0)
-#define GAMMA_MODE_MODE_10BIT (1 << 0)
-#define GAMMA_MODE_MODE_12BIT (2 << 0)
-#define GAMMA_MODE_MODE_SPLIT (3 << 0)
+#define PRE_CSC_GAMMA_ENABLE (1 << 31)
+#define POST_CSC_GAMMA_ENABLE (1 << 30)
+#define GAMMA_MODE_MODE_MASK (3 << 0)
+#define GAMMA_MODE_MODE_8BIT (0 << 0)
+#define GAMMA_MODE_MODE_10BIT (1 << 0)
+#define GAMMA_MODE_MODE_12BIT (2 << 0)
+#define GAMMA_MODE_MODE_SPLIT (3 << 0)
/* DMC/CSR */
#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
@@ -7204,8 +7292,8 @@ enum {
#define GEN8_GT_VECS_IRQ (1 << 6)
#define GEN8_GT_GUC_IRQ (1 << 5)
#define GEN8_GT_PM_IRQ (1 << 4)
-#define GEN8_GT_VCS2_IRQ (1 << 3)
-#define GEN8_GT_VCS1_IRQ (1 << 2)
+#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */
+#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */
#define GEN8_GT_BCS_IRQ (1 << 1)
#define GEN8_GT_RCS_IRQ (1 << 0)
@@ -7226,8 +7314,8 @@ enum {
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
-#define GEN8_VCS1_IRQ_SHIFT 0
-#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */
+#define GEN8_VCS1_IRQ_SHIFT 16 /* NB: VCS2 in bpsec! */
#define GEN8_VECS_IRQ_SHIFT 0
#define GEN8_WD_IRQ_SHIFT 16
@@ -7613,13 +7701,13 @@ enum {
#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
/*GEN11 chicken */
-#define _PIPEA_CHICKEN 0x70038
-#define _PIPEB_CHICKEN 0x71038
-#define _PIPEC_CHICKEN 0x72038
-#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
-#define PM_FILL_MAINTAIN_DBUF_FULLNESS (1 << 0)
-#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
- _PIPEB_CHICKEN)
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
+#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
+#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
/* PCH */
@@ -8089,10 +8177,11 @@ enum {
#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
+#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
+#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
@@ -8600,8 +8689,9 @@ enum {
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
-#define GEN9_RENDER_PG_ENABLE (1 << 0)
-#define GEN9_MEDIA_PG_ENABLE (1 << 1)
+#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
+#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
+#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
@@ -8616,6 +8706,11 @@ enum {
#define GEN6_PMIER _MMIO(0x4402C)
#define GEN6_PM_MBOX_EVENT (1 << 25)
#define GEN6_PM_THERMAL_EVENT (1 << 24)
+
+/*
+ * For Gen11 these are in the upper word of the GPM_WGBOXPERF
+ * registers. Shifting is handled on accessing the imr and ier.
+ */
#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
@@ -9741,7 +9836,7 @@ enum skl_power_gate {
#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6)
-#define DPLL_CFGCR1_KDIV_4 (4 << 6)
+#define DPLL_CFGCR1_KDIV_3 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
#define DPLL_CFGCR1_PDIV_SHIFT (2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
@@ -9810,16 +9905,29 @@ enum skl_power_gate {
#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
#define BXT_DRAM_SIZE_MASK (0x7 << 6)
#define BXT_DRAM_SIZE_SHIFT 6
-#define BXT_DRAM_SIZE_4GB (0x0 << 6)
-#define BXT_DRAM_SIZE_6GB (0x1 << 6)
-#define BXT_DRAM_SIZE_8GB (0x2 << 6)
-#define BXT_DRAM_SIZE_12GB (0x3 << 6)
-#define BXT_DRAM_SIZE_16GB (0x4 << 6)
+#define BXT_DRAM_SIZE_4GBIT (0x0 << 6)
+#define BXT_DRAM_SIZE_6GBIT (0x1 << 6)
+#define BXT_DRAM_SIZE_8GBIT (0x2 << 6)
+#define BXT_DRAM_SIZE_12GBIT (0x3 << 6)
+#define BXT_DRAM_SIZE_16GBIT (0x4 << 6)
+#define BXT_DRAM_TYPE_MASK (0x7 << 22)
+#define BXT_DRAM_TYPE_SHIFT 22
+#define BXT_DRAM_TYPE_DDR3 (0x0 << 22)
+#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22)
+#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
+#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
#define SKL_REQ_DATA_MASK (0xF << 0)
+#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
+#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0)
+
#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
#define SKL_DRAM_S_SHIFT 16
@@ -9831,8 +9939,21 @@ enum skl_power_gate {
#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
#define SKL_DRAM_RANK_MASK (0x1 << 10)
#define SKL_DRAM_RANK_SHIFT 10
-#define SKL_DRAM_RANK_SINGLE (0x0 << 10)
-#define SKL_DRAM_RANK_DUAL (0x1 << 10)
+#define SKL_DRAM_RANK_1 (0x0 << 10)
+#define SKL_DRAM_RANK_2 (0x1 << 10)
+#define SKL_DRAM_RANK_MASK (0x1 << 10)
+#define CNL_DRAM_SIZE_MASK 0x7F
+#define CNL_DRAM_WIDTH_MASK (0x3 << 7)
+#define CNL_DRAM_WIDTH_SHIFT 7
+#define CNL_DRAM_WIDTH_X8 (0x0 << 7)
+#define CNL_DRAM_WIDTH_X16 (0x1 << 7)
+#define CNL_DRAM_WIDTH_X32 (0x2 << 7)
+#define CNL_DRAM_RANK_MASK (0x3 << 9)
+#define CNL_DRAM_RANK_SHIFT 9
+#define CNL_DRAM_RANK_1 (0x0 << 9)
+#define CNL_DRAM_RANK_2 (0x1 << 9)
+#define CNL_DRAM_RANK_3 (0x2 << 9)
+#define CNL_DRAM_RANK_4 (0x3 << 9)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
@@ -9877,10 +9998,14 @@ enum skl_power_gate {
#define _PIPE_A_CSC_COEFF_BU 0x4901c
#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
#define _PIPE_A_CSC_COEFF_BV 0x49024
+
#define _PIPE_A_CSC_MODE 0x49028
-#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
-#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
-#define CSC_MODE_YUV_TO_RGB (1 << 0)
+#define ICL_CSC_ENABLE (1 << 31)
+#define ICL_OUTPUT_CSC_ENABLE (1 << 30)
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
+#define CSC_MODE_YUV_TO_RGB (1 << 0)
+
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
#define _PIPE_A_CSC_PREOFF_LO 0x49038
@@ -9916,6 +10041,70 @@ enum skl_power_gate {
#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+/* Pipe Output CSC */
+#define _PIPE_A_OUTPUT_CSC_COEFF_RY_GY 0x49050
+#define _PIPE_A_OUTPUT_CSC_COEFF_BY 0x49054
+#define _PIPE_A_OUTPUT_CSC_COEFF_RU_GU 0x49058
+#define _PIPE_A_OUTPUT_CSC_COEFF_BU 0x4905c
+#define _PIPE_A_OUTPUT_CSC_COEFF_RV_GV 0x49060
+#define _PIPE_A_OUTPUT_CSC_COEFF_BV 0x49064
+#define _PIPE_A_OUTPUT_CSC_PREOFF_HI 0x49068
+#define _PIPE_A_OUTPUT_CSC_PREOFF_ME 0x4906c
+#define _PIPE_A_OUTPUT_CSC_PREOFF_LO 0x49070
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_HI 0x49074
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_ME 0x49078
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_LO 0x4907c
+
+#define _PIPE_B_OUTPUT_CSC_COEFF_RY_GY 0x49150
+#define _PIPE_B_OUTPUT_CSC_COEFF_BY 0x49154
+#define _PIPE_B_OUTPUT_CSC_COEFF_RU_GU 0x49158
+#define _PIPE_B_OUTPUT_CSC_COEFF_BU 0x4915c
+#define _PIPE_B_OUTPUT_CSC_COEFF_RV_GV 0x49160
+#define _PIPE_B_OUTPUT_CSC_COEFF_BV 0x49164
+#define _PIPE_B_OUTPUT_CSC_PREOFF_HI 0x49168
+#define _PIPE_B_OUTPUT_CSC_PREOFF_ME 0x4916c
+#define _PIPE_B_OUTPUT_CSC_PREOFF_LO 0x49170
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_HI 0x49174
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_ME 0x49178
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_LO 0x4917c
+
+#define PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe,\
+ _PIPE_A_OUTPUT_CSC_COEFF_RY_GY,\
+ _PIPE_B_OUTPUT_CSC_COEFF_RY_GY)
+#define PIPE_CSC_OUTPUT_COEFF_BY(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_BY, \
+ _PIPE_B_OUTPUT_CSC_COEFF_BY)
+#define PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_RU_GU, \
+ _PIPE_B_OUTPUT_CSC_COEFF_RU_GU)
+#define PIPE_CSC_OUTPUT_COEFF_BU(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_BU, \
+ _PIPE_B_OUTPUT_CSC_COEFF_BU)
+#define PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_RV_GV, \
+ _PIPE_B_OUTPUT_CSC_COEFF_RV_GV)
+#define PIPE_CSC_OUTPUT_COEFF_BV(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_BV, \
+ _PIPE_B_OUTPUT_CSC_COEFF_BV)
+#define PIPE_CSC_OUTPUT_PREOFF_HI(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_PREOFF_HI, \
+ _PIPE_B_OUTPUT_CSC_PREOFF_HI)
+#define PIPE_CSC_OUTPUT_PREOFF_ME(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_PREOFF_ME, \
+ _PIPE_B_OUTPUT_CSC_PREOFF_ME)
+#define PIPE_CSC_OUTPUT_PREOFF_LO(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_PREOFF_LO, \
+ _PIPE_B_OUTPUT_CSC_PREOFF_LO)
+#define PIPE_CSC_OUTPUT_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_POSTOFF_HI, \
+ _PIPE_B_OUTPUT_CSC_POSTOFF_HI)
+#define PIPE_CSC_OUTPUT_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_POSTOFF_ME, \
+ _PIPE_B_OUTPUT_CSC_POSTOFF_ME)
+#define PIPE_CSC_OUTPUT_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_POSTOFF_LO, \
+ _PIPE_B_OUTPUT_CSC_POSTOFF_LO)
+
/* pipe degamma/gamma LUTs on IVB+ */
#define _PAL_PREC_INDEX_A 0x4A400
#define _PAL_PREC_INDEX_B 0x4AC00
@@ -9924,6 +10113,7 @@ enum skl_power_gate {
#define PAL_PREC_SPLIT_MODE (1 << 31)
#define PAL_PREC_AUTO_INCREMENT (1 << 15)
#define PAL_PREC_INDEX_VALUE_MASK (0x3ff << 0)
+#define PAL_PREC_INDEX_VALUE(x) ((x) << 0)
#define _PAL_PREC_DATA_A 0x4A404
#define _PAL_PREC_DATA_B 0x4AC04
#define _PAL_PREC_DATA_C 0x4B404
@@ -9941,6 +10131,7 @@ enum skl_power_gate {
#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
+#define PREC_PAL_EXT2_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT2_GC_MAX_A, _PAL_PREC_EXT2_GC_MAX_B) + (i) * 4)
#define _PRE_CSC_GAMC_INDEX_A 0x4A484
#define _PRE_CSC_GAMC_INDEX_B 0x4AC84
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c2a5c48c7541..f6c78c0fa74b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -22,15 +22,31 @@
*
*/
-#include <linux/prefetch.h>
#include <linux/dma-fence-array.h>
+#include <linux/irq_work.h>
+#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
-#include "i915_drv.h"
#include "i915_active.h"
+#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_reset.h"
+#include "intel_pm.h"
+
+struct execute_cb {
+ struct list_head link;
+ struct irq_work work;
+ struct i915_sw_fence *fence;
+};
+
+static struct i915_global_request {
+ struct i915_global base;
+ struct kmem_cache *slab_requests;
+ struct kmem_cache *slab_dependencies;
+ struct kmem_cache *slab_execute_cbs;
+} global;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
@@ -51,7 +67,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
- return to_request(fence)->timeline->name;
+ return to_request(fence)->gem_context->name ?: "[i915]";
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -68,7 +84,9 @@ static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{
- return i915_request_wait(to_request(fence), interruptible, timeout);
+ return i915_request_wait(to_request(fence),
+ interruptible | I915_WAIT_PRIORITY,
+ timeout);
}
static void i915_fence_release(struct dma_fence *fence)
@@ -83,8 +101,9 @@ static void i915_fence_release(struct dma_fence *fence)
* caught trying to reuse dead objects.
*/
i915_sw_fence_fini(&rq->submit);
+ i915_sw_fence_fini(&rq->semaphore);
- kmem_cache_free(rq->i915->requests, rq);
+ kmem_cache_free(global.slab_requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
@@ -150,7 +169,6 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
- GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail);
list_del(&ring->active_link);
} else {
@@ -177,12 +195,10 @@ static void free_capture_list(struct i915_request *request)
static void __retire_engine_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
- GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
+ GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
- hwsp_seqno(rq),
- intel_engine_get_seqno(engine));
+ hwsp_seqno(rq));
GEM_BUG_ON(!i915_request_completed(rq));
@@ -241,12 +257,10 @@ static void i915_request_retire(struct i915_request *request)
{
struct i915_active_request *active, *next;
- GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
request->engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
- hwsp_seqno(request),
- intel_engine_get_seqno(request->engine));
+ hwsp_seqno(request));
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
@@ -288,15 +302,13 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
- /* Retirement decays the ban score as it is a sign of ctx progress */
- atomic_dec_if_positive(&request->gem_context->ban_score);
intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request);
unreserve_gt(request->i915);
- i915_sched_node_fini(request->i915, &request->sched);
+ i915_sched_node_fini(&request->sched);
i915_request_put(request);
}
@@ -305,12 +317,10 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_ring *ring = rq->ring;
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
rq->engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
- hwsp_seqno(rq),
- intel_engine_get_seqno(rq->engine));
+ hwsp_seqno(rq));
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_request_completed(rq));
@@ -326,9 +336,67 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (tmp != rq);
}
-static u32 timeline_get_seqno(struct i915_timeline *tl)
+static void irq_execute_cb(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+}
+
+static void __notify_execute_cb(struct i915_request *rq)
{
- return tl->seqno += 1 + tl->has_initial_breadcrumb;
+ struct execute_cb *cb;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (list_empty(&rq->execute_cb))
+ return;
+
+ list_for_each_entry(cb, &rq->execute_cb, link)
+ irq_work_queue(&cb->work);
+
+ /*
+ * XXX Rollback on __i915_request_unsubmit()
+ *
+ * In the future, perhaps when we have an active time-slicing scheduler,
+ * it will be interesting to unsubmit parallel execution and remove
+ * busywaits from the GPU until their master is restarted. This is
+ * quite hairy, we have to carefully rollback the fence and do a
+ * preempt-to-idle cycle on the target engine, all the while the
+ * master execute_cb may refire.
+ */
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
+static int
+i915_request_await_execution(struct i915_request *rq,
+ struct i915_request *signal,
+ gfp_t gfp)
+{
+ struct execute_cb *cb;
+
+ if (i915_request_is_active(signal))
+ return 0;
+
+ cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
+ if (!cb)
+ return -ENOMEM;
+
+ cb->fence = &rq->submit;
+ i915_sw_fence_await(cb->fence);
+ init_irq_work(&cb->work, irq_execute_cb);
+
+ spin_lock_irq(&signal->lock);
+ if (i915_request_is_active(signal)) {
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+ } else {
+ list_add_tail(&cb->link, &signal->execute_cb);
+ }
+ spin_unlock_irq(&signal->lock);
+
+ return 0;
}
static void move_to_timeline(struct i915_request *request,
@@ -342,42 +410,54 @@ static void move_to_timeline(struct i915_request *request,
spin_unlock(&request->timeline->lock);
}
-static u32 next_global_seqno(struct i915_timeline *tl)
-{
- if (!++tl->seqno)
- ++tl->seqno;
- return tl->seqno;
-}
-
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- u32 seqno;
- GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld -> current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- engine->timeline.seqno + 1,
- hwsp_seqno(request),
- intel_engine_get_seqno(engine));
+ hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
- GEM_BUG_ON(request->global_seqno);
+ if (i915_gem_context_is_banned(request->gem_context))
+ i915_request_skip(request, -EIO);
- seqno = next_global_seqno(&engine->timeline);
- GEM_BUG_ON(!seqno);
- GEM_BUG_ON(intel_engine_signaled(engine, seqno));
+ /*
+ * Are we using semaphores when the gpu is already saturated?
+ *
+ * Using semaphores incurs a cost in having the GPU poll a
+ * memory location, busywaiting for it to change. The continual
+ * memory reads can have a noticeable impact on the rest of the
+ * system with the extra bus traffic, stalling the cpu as it too
+ * tries to access memory across the bus (perf stat -e bus-cycles).
+ *
+ * If we installed a semaphore on this request and we only submit
+ * the request after the signaler completed, that indicates the
+ * system is overloaded and using semaphores at this time only
+ * increases the amount of work we are doing. If so, we disable
+ * further use of semaphores until we are idle again, whence we
+ * optimistically try again.
+ */
+ if (request->sched.semaphores &&
+ i915_sw_fence_signaled(&request->semaphore))
+ request->hw_context->saturated |= request->sched.semaphores;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
- request->global_seqno = seqno;
+
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_queue_breadcrumbs(engine);
+
+ __notify_execute_cb(request);
+
spin_unlock(&request->lock);
engine->emit_fini_breadcrumb(request,
@@ -406,12 +486,10 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
- hwsp_seqno(request),
- intel_engine_get_seqno(engine));
+ hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
@@ -420,18 +498,25 @@ void __i915_request_unsubmit(struct i915_request *request)
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
- GEM_BUG_ON(!request->global_seqno);
- GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
- GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
- engine->timeline.seqno--;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- request->global_seqno = 0;
+
+ /*
+ * As we do not allow WAIT to preempt inflight requests,
+ * once we have executed a request, along with triggering
+ * any execution callbacks, we must preserve its ordering
+ * within the non-preemptible FIFO.
+ */
+ BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
+ request->sched.attr.priority |= __NO_PREEMPTION;
+
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
i915_request_cancel_breadcrumb(request);
+
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+
spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */
@@ -489,6 +574,36 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static int __i915_sw_fence_call
+semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct i915_request *request =
+ container_of(fence, typeof(*request), semaphore);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ /*
+ * We only check a small portion of our dependencies
+ * and so cannot guarantee that there remains no
+ * semaphore chain across all. Instead of opting
+ * for the full NOSEMAPHORE boost, we go for the
+ * smaller (but still preempting) boost of
+ * NEWCLIENT. This will be enough to boost over
+ * a busywaiting request (as that cannot be
+ * NEWCLIENT) without accidentally boosting
+ * a busywait over real work elsewhere.
+ */
+ i915_schedule_bump_priority(request, I915_PRIORITY_NEWCLIENT);
+ break;
+
+ case FENCE_FREE:
+ i915_request_put(request);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static void ring_retire_requests(struct intel_ring *ring)
{
struct i915_request *rq, *rn;
@@ -518,12 +633,7 @@ i915_request_alloc_slow(struct intel_context *ce)
ring_retire_requests(ring);
out:
- return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
-}
-
-static int add_timeline_barrier(struct i915_request *rq)
-{
- return i915_request_await_active_request(rq, &rq->timeline->barrier);
+ return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
}
/**
@@ -539,8 +649,10 @@ struct i915_request *
i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_request *rq;
struct intel_context *ce;
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+ u32 seqno;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -556,8 +668,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- if (i915_terminally_wedged(&i915->gpu_error))
- return ERR_PTR(-EIO);
+ ret = i915_terminally_wedged(i915);
+ if (ret)
+ return ERR_PTR(ret);
/*
* Pinning the contexts may generate requests in order to acquire
@@ -569,6 +682,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
return ERR_CAST(ce);
reserve_gt(i915);
+ mutex_lock(&ce->ring->timeline->mutex);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -605,7 +719,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*
* Do not use kmem_cache_zalloc() here!
*/
- rq = kmem_cache_alloc(i915->requests,
+ rq = kmem_cache_alloc(global.slab_requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
rq = i915_request_alloc_slow(ce);
@@ -615,32 +729,36 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
}
}
- rq->rcustate = get_state_synchronize_rcu();
-
INIT_LIST_HEAD(&rq->active_list);
+ INIT_LIST_HEAD(&rq->execute_cb);
+
+ tl = ce->ring->timeline;
+ ret = i915_timeline_get_seqno(tl, rq, &seqno);
+ if (ret)
+ goto err_free;
+
rq->i915 = i915;
rq->engine = engine;
rq->gem_context = ctx;
rq->hw_context = ce;
rq->ring = ce->ring;
- rq->timeline = ce->ring->timeline;
+ rq->timeline = tl;
GEM_BUG_ON(rq->timeline == &engine->timeline);
- rq->hwsp_seqno = rq->timeline->hwsp_seqno;
+ rq->hwsp_seqno = tl->hwsp_seqno;
+ rq->hwsp_cacheline = tl->hwsp_cacheline;
+ rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
spin_lock_init(&rq->lock);
- dma_fence_init(&rq->fence,
- &i915_fence_ops,
- &rq->lock,
- rq->timeline->fence_context,
- timeline_get_seqno(rq->timeline));
+ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+ tl->fence_context, seqno);
/* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
+ i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
i915_sched_node_init(&rq->sched);
/* No zalloc, must clear what we need by hand */
- rq->global_seqno = 0;
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
@@ -668,10 +786,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
- ret = add_timeline_barrier(rq);
- if (ret)
- goto err_unwind;
-
ret = engine->request_alloc(rq);
if (ret)
goto err_unwind;
@@ -682,7 +796,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
/* Check that we didn't interrupt ourselves with a new request */
+ lockdep_assert_held(&rq->timeline->mutex);
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
+ rq->cookie = lockdep_pin_lock(&rq->timeline->mutex);
+
return rq;
err_unwind:
@@ -693,14 +810,113 @@ err_unwind:
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
- kmem_cache_free(i915->requests, rq);
+err_free:
+ kmem_cache_free(global.slab_requests, rq);
err_unreserve:
+ mutex_unlock(&ce->ring->timeline->mutex);
unreserve_gt(i915);
intel_context_unpin(ce);
return ERR_PTR(ret);
}
static int
+i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
+{
+ if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+ return 0;
+
+ signal = list_prev_entry(signal, ring_link);
+ if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
+ return 0;
+
+ return i915_sw_fence_await_dma_fence(&rq->submit,
+ &signal->fence, 0,
+ I915_FENCE_GFP);
+}
+
+static intel_engine_mask_t
+already_busywaiting(struct i915_request *rq)
+{
+ /*
+ * Polling a semaphore causes bus traffic, delaying other users of
+ * both the GPU and CPU. We want to limit the impact on others,
+ * while taking advantage of early submission to reduce GPU
+ * latency. Therefore we restrict ourselves to not using more
+ * than one semaphore from each source, and not using a semaphore
+ * if we have detected the engine is saturated (i.e. would not be
+ * submitted early and cause bus traffic reading an already passed
+ * semaphore).
+ *
+ * See the are-we-too-late? check in __i915_request_submit().
+ */
+ return rq->sched.semaphores | rq->hw_context->saturated;
+}
+
+static int
+emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ gfp_t gfp)
+{
+ u32 hwsp_offset;
+ u32 *cs;
+ int err;
+
+ GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+
+ /* Just emit the first semaphore we see as request space is limited. */
+ if (already_busywaiting(to) & from->engine->mask)
+ return i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+
+ err = i915_request_await_start(to, from);
+ if (err < 0)
+ return err;
+
+ err = i915_sw_fence_await_dma_fence(&to->semaphore,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+ if (err < 0)
+ return err;
+
+ /* We need to pin the signaler's HWSP until we are finished reading. */
+ err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ if (err)
+ return err;
+
+ /* Only submit our spinner after the signaler is running! */
+ err = i915_request_await_execution(to, from, gfp);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(to, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Using greater-than-or-equal here means we have to worry
+ * about seqno wraparound. To side step that issue, we swap
+ * the timeline HWSP upon wrapping, so that everyone listening
+ * for the old (pre-wrap) values do not see the much smaller
+ * (post-wrap) values than they were expecting (and so wait
+ * forever).
+ */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = from->fence.seqno;
+ *cs++ = hwsp_offset;
+ *cs++ = 0;
+
+ intel_ring_advance(to, cs);
+ to->sched.semaphores |= from->engine->mask;
+ to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+ return 0;
+}
+
+static int
i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
int ret;
@@ -712,9 +928,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return 0;
if (to->engine->schedule) {
- ret = i915_sched_node_add_dependency(to->i915,
- &to->sched,
- &from->sched);
+ ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
if (ret < 0)
return ret;
}
@@ -723,6 +937,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
+ } else if (intel_engine_has_semaphores(to->engine) &&
+ to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+ ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
} else {
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
@@ -873,6 +1090,60 @@ void i915_request_skip(struct i915_request *rq, int error)
memset(vaddr + head, 0, rq->postfix - head);
}
+static struct i915_request *
+__i915_request_add_to_timeline(struct i915_request *rq)
+{
+ struct i915_timeline *timeline = rq->timeline;
+ struct i915_request *prev;
+
+ /*
+ * Dependency tracking and request ordering along the timeline
+ * is special cased so that we can eliminate redundant ordering
+ * operations while building the request (we know that the timeline
+ * itself is ordered, and here we guarantee it).
+ *
+ * As we know we will need to emit tracking along the timeline,
+ * we embed the hooks into our request struct -- at the cost of
+ * having to have specialised no-allocation interfaces (which will
+ * be beneficial elsewhere).
+ *
+ * A second benefit to open-coding i915_request_await_request is
+ * that we can apply a slight variant of the rules specialised
+ * for timelines that jump between engines (such as virtual engines).
+ * If we consider the case of virtual engine, we must emit a dma-fence
+ * to prevent scheduling of the second request until the first is
+ * complete (to maximise our greedy late load balancing) and this
+ * precludes optimising to use semaphores serialisation of a single
+ * timeline across engines.
+ */
+ prev = i915_active_request_raw(&timeline->last_request,
+ &rq->i915->drm.struct_mutex);
+ if (prev && !i915_request_completed(prev)) {
+ if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+ i915_sw_fence_await_sw_fence(&rq->submit,
+ &prev->submit,
+ &rq->submitq);
+ else
+ __i915_sw_fence_await_dma_fence(&rq->submit,
+ &prev->fence,
+ &rq->dmaq);
+ if (rq->engine->schedule)
+ __i915_sched_node_add_dependency(&rq->sched,
+ &prev->sched,
+ &rq->dep,
+ 0);
+ }
+
+ spin_lock_irq(&timeline->lock);
+ list_add_tail(&rq->link, &timeline->requests);
+ spin_unlock_irq(&timeline->lock);
+
+ GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
+ __i915_active_request_set(&timeline->last_request, rq);
+
+ return prev;
+}
+
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
@@ -889,7 +1160,9 @@ void i915_request_add(struct i915_request *request)
GEM_TRACE("%s fence %llx:%lld\n",
engine->name, request->fence.context, request->fence.seqno);
- lockdep_assert_held(&request->i915->drm.struct_mutex);
+ lockdep_assert_held(&request->timeline->mutex);
+ lockdep_unpin_lock(&request->timeline->mutex, request->cookie);
+
trace_i915_request_add(request);
/*
@@ -917,37 +1190,12 @@ void i915_request_add(struct i915_request *request)
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
- /*
- * Seal the request and mark it as pending execution. Note that
- * we may inspect this state, without holding any locks, during
- * hangcheck. Hence we apply the barrier to ensure that we do not
- * see a more recent value in the hws than we are tracking.
- */
-
- prev = i915_active_request_raw(&timeline->last_request,
- &request->i915->drm.struct_mutex);
- if (prev && !i915_request_completed(prev)) {
- i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
- &request->submitq);
- if (engine->schedule)
- __i915_sched_node_add_dependency(&request->sched,
- &prev->sched,
- &request->dep,
- 0);
- }
-
- spin_lock_irq(&timeline->lock);
- list_add_tail(&request->link, &timeline->requests);
- spin_unlock_irq(&timeline->lock);
-
- GEM_BUG_ON(timeline->seqno != request->fence.seqno);
- __i915_active_request_set(&timeline->last_request, request);
+ prev = __i915_request_add_to_timeline(request);
list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list)) {
- GEM_TRACE("marking %s as active\n", ring->timeline->name);
+ if (list_is_first(&request->ring_link, &ring->request_list))
list_add(&ring->active_link, &request->i915->gt.active_rings);
- }
+ request->i915->gt.active_engines |= request->engine->mask;
request->emitted_jiffies = jiffies;
/*
@@ -962,11 +1210,27 @@ void i915_request_add(struct i915_request *request)
* run at the earliest possible convenience.
*/
local_bh_disable();
+ i915_sw_fence_commit(&request->semaphore);
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule) {
struct i915_sched_attr attr = request->gem_context->sched;
/*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
* Boost priorities to new clients (new request flows).
*
* Allow interactive/synchronous clients to jump ahead of
@@ -1000,6 +1264,8 @@ void i915_request_add(struct i915_request *request)
*/
if (prev && i915_request_completed(prev))
i915_request_retire_upto(prev);
+
+ mutex_unlock(&request->timeline->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@@ -1136,8 +1402,25 @@ long i915_request_wait(struct i915_request *rq,
if (__i915_spin_request(rq, state, 5))
goto out;
- if (flags & I915_WAIT_PRIORITY)
+ /*
+ * This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we sleep. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery).
+ */
+ if (flags & I915_WAIT_PRIORITY) {
+ if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
+ gen6_rps_boost(rq);
+ local_bh_disable(); /* suspend tasklets for reprioritisation */
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
+ local_bh_enable(); /* kick tasklets en masse */
+ }
wait.tsk = current;
if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
@@ -1179,11 +1462,66 @@ void i915_retire_requests(struct drm_i915_private *i915)
if (!i915->gt.active_requests)
return;
- list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
+ list_for_each_entry_safe(ring, tmp,
+ &i915->gt.active_rings, active_link) {
+ intel_ring_get(ring); /* last rq holds reference! */
ring_retire_requests(ring);
+ intel_ring_put(ring);
+ }
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
#endif
+
+static void i915_global_request_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_execute_cbs);
+ kmem_cache_shrink(global.slab_requests);
+}
+
+static void i915_global_request_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_execute_cbs);
+ kmem_cache_destroy(global.slab_requests);
+}
+
+static struct i915_global_request global = { {
+ .shrink = i915_global_request_shrink,
+ .exit = i915_global_request_exit,
+} };
+
+int __init i915_global_request_init(void)
+{
+ global.slab_requests = KMEM_CACHE(i915_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+ if (!global.slab_requests)
+ return -ENOMEM;
+
+ global.slab_execute_cbs = KMEM_CACHE(execute_cb,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+ if (!global.slab_execute_cbs)
+ goto err_requests;
+
+ global.slab_dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!global.slab_dependencies)
+ goto err_execute_cbs;
+
+ i915_global_register(&global.base);
+ return 0;
+
+err_execute_cbs:
+ kmem_cache_destroy(global.slab_execute_cbs);
+err_requests:
+ kmem_cache_destroy(global.slab_requests);
+ return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 40f3e8dcbdd5..a982664618c2 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -26,9 +26,11 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
+#include <linux/lockdep.h>
#include "i915_gem.h"
#include "i915_scheduler.h"
+#include "i915_selftest.h"
#include "i915_sw_fence.h"
#include <uapi/drm/i915_drm.h>
@@ -37,6 +39,7 @@ struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
struct i915_timeline;
+struct i915_timeline_cacheline;
struct i915_capture_list {
struct i915_capture_list *next;
@@ -119,6 +122,15 @@ struct i915_request {
unsigned long rcustate;
/*
+ * We pin the timeline->mutex while constructing the request to
+ * ensure that no caller accidentally drops it during construction.
+ * The timeline->mutex must be held to ensure that only this caller
+ * can use the ring and manipulate the associated timeline during
+ * construction.
+ */
+ struct pin_cookie cookie;
+
+ /*
* Fences for the various phases in the request's lifetime.
*
* The submit fence is used to await upon all of the request's
@@ -126,7 +138,12 @@ struct i915_request {
* It is used by the driver to then queue the request for execution.
*/
struct i915_sw_fence submit;
- wait_queue_entry_t submitq;
+ union {
+ wait_queue_entry_t submitq;
+ struct i915_sw_dma_fence_cb dmaq;
+ };
+ struct list_head execute_cb;
+ struct i915_sw_fence semaphore;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -147,13 +164,15 @@ struct i915_request {
*/
const u32 *hwsp_seqno;
- /**
- * GEM sequence number associated with this request on the
- * global execution timeline. It is zero when the request is not
- * on the HW queue (i.e. not on the engine timeline list).
- * Its value is guarded by the timeline spinlock.
+ /*
+ * If we need to access the timeline's seqno for this request in
+ * another request, we need to keep a read reference to this associated
+ * cacheline, so that we do not free and recycle it before the foreign
+ * observers have completed. Hence, we keep a pointer to the cacheline
+ * inside the timeline's HWSP vma, but it is only valid while this
+ * request has not completed and guarded by the timeline mutex.
*/
- u32 global_seqno;
+ struct i915_timeline_cacheline *hwsp_cacheline;
/** Position in the ring of the start of the request */
u32 head;
@@ -204,6 +223,11 @@ struct i915_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_link;
+
+ I915_SELFTEST_DECLARE(struct {
+ struct list_head link;
+ unsigned long delay;
+ } mock;)
};
#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -247,30 +271,6 @@ i915_request_put(struct i915_request *rq)
dma_fence_put(&rq->fence);
}
-/**
- * i915_request_global_seqno - report the current global seqno
- * @request - the request
- *
- * A request is assigned a global seqno only when it is on the hardware
- * execution queue. The global seqno can be used to maintain a list of
- * requests on the same engine in retirement order, for example for
- * constructing a priority queue for waiting. Prior to its execution, or
- * if it is subsequently removed in the event of preemption, its global
- * seqno is zero. As both insertion and removal from the execution queue
- * may operate in IRQ context, it is not guarded by the usual struct_mutex
- * BKL. Instead those relying on the global seqno must be prepared for its
- * value to change between reads. Only when the request is complete can
- * the global seqno be stable (due to the memory barriers on submitting
- * the commands to the hardware to write the breadcrumb, if the HWS shows
- * that it has passed the global seqno and the global seqno is unchanged
- * after the read, it is indeed complete).
- */
-static inline u32
-i915_request_global_seqno(const struct i915_request *request)
-{
- return READ_ONCE(request->global_seqno);
-}
-
int i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write);
@@ -358,10 +358,27 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
* i915_request_started - check if the request has begun being executed
* @rq: the request
*
- * Returns true if the request has been submitted to hardware, and the hardware
- * has advanced passed the end of the previous request and so should be either
- * currently processing the request (though it may be preempted and so
- * not necessarily the next request to complete) or have completed the request.
+ * If the timeline is not using initial breadcrumbs, a request is
+ * considered started if the previous request on its timeline (i.e.
+ * context) has been signaled.
+ *
+ * If the timeline is using semaphores, it will also be emitting an
+ * "initial breadcrumb" after the semaphores are complete and just before
+ * it began executing the user payload. A request can therefore be active
+ * on the HW and not yet started as it is still busywaiting on its
+ * dependencies (via HW semaphores).
+ *
+ * If the request has started, its dependencies will have been signaled
+ * (either by fences or by semaphores) and it will have begun processing
+ * the user payload.
+ *
+ * However, even if a request has started, it may have been preempted and
+ * so no longer active, or it may have already completed.
+ *
+ * See also i915_request_is_active().
+ *
+ * Returns true if the request has begun executing the user payload, or
+ * has completed:
*/
static inline bool i915_request_started(const struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index 0e0ddf2e6815..677d59304e78 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -18,28 +18,39 @@
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
+static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw(uncore, reg, 0, set);
+}
+
+static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw(uncore, reg, clr, 0);
+}
+
+static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw_fw(uncore, reg, 0, set);
+}
+
+static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw_fw(uncore, reg, clr, 0);
+}
+
static void engine_skip_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context;
- struct i915_timeline *timeline = rq->timeline;
lockdep_assert_held(&engine->timeline.lock);
- GEM_BUG_ON(timeline == &engine->timeline);
-
- spin_lock(&timeline->lock);
-
- if (i915_request_is_active(rq)) {
- list_for_each_entry_continue(rq,
- &engine->timeline.requests, link)
- if (rq->gem_context == hung_ctx)
- i915_request_skip(rq, -EIO);
- }
- list_for_each_entry(rq, &timeline->requests, link)
- i915_request_skip(rq, -EIO);
+ if (!i915_request_is_active(rq))
+ return;
- spin_unlock(&timeline->lock);
+ list_for_each_entry_continue(rq, &engine->timeline.requests, link)
+ if (rq->gem_context == hung_ctx)
+ i915_request_skip(rq, -EIO);
}
static void client_mark_guilty(struct drm_i915_file_private *file_priv,
@@ -68,23 +79,29 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
static bool context_mark_guilty(struct i915_gem_context *ctx)
{
- unsigned int score;
- bool banned, bannable;
+ unsigned long prev_hang;
+ bool banned;
+ int i;
atomic_inc(&ctx->guilty_count);
- bannable = i915_gem_context_is_bannable(ctx);
- score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
- banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
-
- /* Cool contexts don't accumulate client ban score */
- if (!bannable)
+ /* Cool contexts are too cool to be banned! (Used for reset testing.) */
+ if (!i915_gem_context_is_bannable(ctx))
return false;
+ /* Record the timestamp for the last N hangs */
+ prev_hang = ctx->hang_timestamp[0];
+ for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
+ ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
+ ctx->hang_timestamp[i] = jiffies;
+
+ /* If we have hung N+1 times in rapid succession, we ban the context! */
+ banned = !i915_gem_context_is_recoverable(ctx);
+ if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
+ banned = true;
if (banned) {
- DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
- ctx->name, atomic_read(&ctx->guilty_count),
- score);
+ DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count));
i915_gem_context_set_banned(ctx);
}
@@ -101,6 +118,12 @@ static void context_mark_innocent(struct i915_gem_context *ctx)
void i915_reset_request(struct i915_request *rq, bool guilty)
{
+ GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
+ rq->engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ yesno(guilty));
+
lockdep_assert_held(&rq->engine->timeline.lock);
GEM_BUG_ON(i915_request_completed(rq));
@@ -116,38 +139,43 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
static void gen3_stop_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
+ GEM_TRACE("%s\n", engine->name);
+
if (intel_engine_stop_cs(engine))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
+ GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
- I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
- POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
+ intel_uncore_write_fw(uncore,
+ RING_HEAD(base),
+ intel_uncore_read_fw(uncore, RING_TAIL(base)));
+ intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
- I915_WRITE_FW(RING_HEAD(base), 0);
- I915_WRITE_FW(RING_TAIL(base), 0);
- POSTING_READ_FW(RING_TAIL(base));
+ intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
+ intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
+ intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
/* The ring must be empty before it is disabled */
- I915_WRITE_FW(RING_CTL(base), 0);
+ intel_uncore_write_fw(uncore, RING_CTL(base), 0);
/* Check acts as a post */
- if (I915_READ_FW(RING_HEAD(base)) != 0)
- DRM_DEBUG_DRIVER("%s: ring head not parked\n",
- engine->name);
+ if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
+ GEM_TRACE("%s: ring head [%x] not parked\n",
+ engine->name,
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
static void i915_stop_engines(struct drm_i915_private *i915,
- unsigned int engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ intel_engine_mask_t tmp;
if (INTEL_GEN(i915) < 3)
return;
- for_each_engine_masked(engine, i915, engine_mask, id)
+ for_each_engine_masked(engine, i915, engine_mask, tmp)
gen3_stop_engine(engine);
}
@@ -160,7 +188,7 @@ static bool i915_in_reset(struct pci_dev *pdev)
}
static int i915_do_reset(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = i915->drm.pdev;
@@ -189,7 +217,7 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
}
static int g33_do_reset(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = i915->drm.pdev;
@@ -198,17 +226,17 @@ static int g33_do_reset(struct drm_i915_private *i915,
return wait_for_atomic(g4x_reset_complete(pdev), 50);
}
-static int g4x_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
+static int g4x_do_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_uncore *uncore = &i915->uncore;
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
- I915_WRITE_FW(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ_FW(VDECCLK_GATE_D);
+ rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
@@ -229,21 +257,22 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv,
out:
pci_write_config_byte(pdev, I915_GDRST, 0);
- I915_WRITE_FW(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ_FW(VDECCLK_GATE_D);
+ rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
return ret;
}
-static int ironlake_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
+static int ironlake_do_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
+ struct intel_uncore *uncore = &i915->uncore;
int ret;
- I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
- ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+ intel_uncore_write_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
@@ -252,8 +281,9 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
goto out;
}
- I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
- ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+ intel_uncore_write_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
@@ -263,15 +293,16 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
}
out:
- I915_WRITE_FW(ILK_GDSR, 0);
- POSTING_READ_FW(ILK_GDSR);
+ intel_uncore_write_fw(uncore, ILK_GDSR, 0);
+ intel_uncore_posting_read_fw(uncore, ILK_GDSR);
return ret;
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
-static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
+static int gen6_hw_domain_reset(struct drm_i915_private *i915,
u32 hw_domain_mask)
{
+ struct intel_uncore *uncore = &i915->uncore;
int err;
/*
@@ -279,10 +310,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
* for fifo space for the write or forcewake the chip for
* the read
*/
- I915_WRITE_FW(GEN6_GDRST, hw_domain_mask);
+ intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
/* Wait for the device to ack the reset requests */
- err = __intel_wait_for_register_fw(dev_priv,
+ err = __intel_wait_for_register_fw(uncore,
GEN6_GDRST, hw_domain_mask, 0,
500, 0,
NULL);
@@ -294,36 +325,38 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
}
static int gen6_reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN6_GRDOM_RENDER,
- [BCS] = GEN6_GRDOM_BLT,
- [VCS] = GEN6_GRDOM_MEDIA,
- [VCS2] = GEN8_GRDOM_MEDIA2,
- [VECS] = GEN6_GRDOM_VECS,
+ const u32 hw_engine_mask[] = {
+ [RCS0] = GEN6_GRDOM_RENDER,
+ [BCS0] = GEN6_GRDOM_BLT,
+ [VCS0] = GEN6_GRDOM_MEDIA,
+ [VCS1] = GEN8_GRDOM_MEDIA2,
+ [VECS0] = GEN6_GRDOM_VECS,
};
u32 hw_mask;
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN6_GRDOM_FULL;
} else {
- unsigned int tmp;
+ intel_engine_mask_t tmp;
hw_mask = 0;
- for_each_engine_masked(engine, i915, engine_mask, tmp)
+ for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
+ }
}
return gen6_hw_domain_reset(i915, hw_mask);
}
-static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
{
- u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ struct intel_uncore *uncore = engine->uncore;
+ u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
i915_reg_t sfc_usage;
@@ -370,10 +403,9 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
* ends up being locked to the engine we want to reset, we have to reset
* it as well (we will unlock it once the reset sequence is completed).
*/
- I915_WRITE_FW(sfc_forced_lock,
- I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
+ rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(uncore,
sfc_forced_lock_ack,
sfc_forced_lock_ack_bit,
sfc_forced_lock_ack_bit,
@@ -382,16 +414,16 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
return 0;
}
- if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
+ if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
return sfc_reset_bit;
return 0;
}
-static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static void gen11_unlock_sfc(struct intel_engine_cs *engine)
{
- u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ struct intel_uncore *uncore = engine->uncore;
+ u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock;
u32 sfc_forced_lock_bit;
@@ -413,38 +445,36 @@ static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
return;
}
- I915_WRITE_FW(sfc_forced_lock,
- I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
+ rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
}
static int gen11_reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN11_GRDOM_RENDER,
- [BCS] = GEN11_GRDOM_BLT,
- [VCS] = GEN11_GRDOM_MEDIA,
- [VCS2] = GEN11_GRDOM_MEDIA2,
- [VCS3] = GEN11_GRDOM_MEDIA3,
- [VCS4] = GEN11_GRDOM_MEDIA4,
- [VECS] = GEN11_GRDOM_VECS,
- [VECS2] = GEN11_GRDOM_VECS2,
+ const u32 hw_engine_mask[] = {
+ [RCS0] = GEN11_GRDOM_RENDER,
+ [BCS0] = GEN11_GRDOM_BLT,
+ [VCS0] = GEN11_GRDOM_MEDIA,
+ [VCS1] = GEN11_GRDOM_MEDIA2,
+ [VCS2] = GEN11_GRDOM_MEDIA3,
+ [VCS3] = GEN11_GRDOM_MEDIA4,
+ [VECS0] = GEN11_GRDOM_VECS,
+ [VECS1] = GEN11_GRDOM_VECS2,
};
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
u32 hw_mask;
int ret;
- BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
-
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
- hw_mask |= gen11_lock_sfc(i915, engine);
+ hw_mask |= gen11_lock_sfc(engine);
}
}
@@ -452,46 +482,62 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
if (engine_mask != ALL_ENGINES)
for_each_engine_masked(engine, i915, engine_mask, tmp)
- gen11_unlock_sfc(i915, engine);
+ gen11_unlock_sfc(engine);
return ret;
}
static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
+ u32 request, mask, ack;
int ret;
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+ ack = intel_uncore_read_fw(uncore, reg);
+ if (ack & RESET_CTL_CAT_ERROR) {
+ /*
+ * For catastrophic errors, ready-for-reset sequence
+ * needs to be bypassed: HAS#396813
+ */
+ request = RESET_CTL_CAT_ERROR;
+ mask = RESET_CTL_CAT_ERROR;
+
+ /* Catastrophic errors need to be cleared by HW */
+ ack = 0;
+ } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
+ request = RESET_CTL_REQUEST_RESET;
+ mask = RESET_CTL_READY_TO_RESET;
+ ack = RESET_CTL_READY_TO_RESET;
+ } else {
+ return 0;
+ }
- ret = __intel_wait_for_register_fw(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700, 0,
- NULL);
+ intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
+ ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
+ 700, 0, NULL);
if (ret)
- DRM_ERROR("%s: reset request timeout\n", engine->name);
+ DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
+ engine->name, request,
+ intel_uncore_read_fw(uncore, reg));
return ret;
}
static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+ intel_uncore_write_fw(engine->uncore,
+ RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
static int gen8_reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
const bool reset_non_ready = retry >= 1;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
int ret;
for_each_engine_masked(engine, i915, engine_mask, tmp) {
@@ -527,14 +573,11 @@ skip_reset:
}
typedef int (*reset_func)(struct drm_i915_private *,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry);
static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
{
- if (!i915_modparams.reset)
- return NULL;
-
if (INTEL_GEN(i915) >= 8)
return gen8_reset_engines;
else if (INTEL_GEN(i915) >= 6)
@@ -551,7 +594,8 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
return NULL;
}
-int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
+int intel_gpu_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
@@ -566,7 +610,7 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
/*
* We stop engines, otherwise we might get failed reset and a
@@ -582,14 +626,15 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
*
* FIXME: Wa for more modern gens needs to be validated
*/
- i915_stop_engines(i915, engine_mask);
+ if (retry)
+ i915_stop_engines(i915, engine_mask);
GEM_TRACE("engine_mask=%x\n", engine_mask);
preempt_disable();
ret = reset(i915, engine_mask, retry);
preempt_enable();
}
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -599,6 +644,9 @@ bool intel_has_gpu_reset(struct drm_i915_private *i915)
if (USES_GUC(i915))
return false;
+ if (!i915_modparams.reset)
+ return NULL;
+
return intel_get_gpu_reset(i915);
}
@@ -615,9 +663,9 @@ int intel_reset_guc(struct drm_i915_private *i915)
GEM_BUG_ON(!HAS_GUC(i915));
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
ret = gen6_hw_domain_reset(i915, guc_domain);
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -635,10 +683,36 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
+static void revoke_mmaps(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->num_fence_regs; i++) {
+ struct drm_vma_offset_node *node;
+ struct i915_vma *vma;
+ u64 vma_offset;
+
+ vma = READ_ONCE(i915->fence_regs[i].vma);
+ if (!vma)
+ continue;
+
+ if (!i915_vma_has_userfault(vma))
+ continue;
+
+ GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
+ node = &vma->obj->base.vma_node;
+ vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ unmap_mapping_range(i915->drm.anon_inode->i_mapping,
+ drm_vma_node_offset_addr(node) + vma_offset,
+ vma->size,
+ 1);
+ }
+}
+
static void reset_prepare(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -647,10 +721,16 @@ static void reset_prepare(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
reset_prepare_engine(engine);
- intel_uc_sanitize(i915);
+ intel_uc_reset_prepare(i915);
+}
+
+static void gt_revoke(struct drm_i915_private *i915)
+{
+ revoke_mmaps(i915);
}
-static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+static int gt_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -665,7 +745,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
return err;
for_each_engine(engine, i915, id)
- intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id));
+ intel_engine_reset(engine, stalled_mask & engine->mask);
i915_gem_restore_fences(i915);
@@ -675,7 +755,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
struct i915_gpu_restart {
@@ -722,8 +802,10 @@ static void reset_finish(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
reset_finish_engine(engine);
+ intel_engine_signal_breadcrumbs(engine);
+ }
}
static void reset_restart(struct drm_i915_private *i915)
@@ -761,23 +843,19 @@ static void nop_submit_request(struct i915_request *request)
spin_lock_irqsave(&engine->timeline.lock, flags);
__i915_request_submit(request);
i915_request_mark_complete(request);
- intel_engine_write_global_seqno(engine, request->global_seqno);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
intel_engine_queue_breadcrumbs(engine);
}
-void i915_gem_set_wedged(struct drm_i915_private *i915)
+static void __i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- mutex_lock(&error->wedge_mutex);
- if (test_bit(I915_WEDGED, &error->flags)) {
- mutex_unlock(&error->wedge_mutex);
+ if (test_bit(I915_WEDGED, &error->flags))
return;
- }
if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
struct drm_printer p = drm_debug_printer(__func__);
@@ -793,11 +871,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id)
- reset_prepare_engine(engine);
+ reset_prepare(i915);
/* Even if the GPU reset fails, it should still stop the engines */
- if (INTEL_GEN(i915) >= 5)
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
intel_gpu_reset(i915, ALL_ENGINES);
for_each_engine(engine, i915, id) {
@@ -811,31 +888,35 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* either this call here to intel_engine_write_global_seqno, or the one
* in nop_submit_request.
*/
- synchronize_rcu();
+ synchronize_rcu_expedited();
/* Mark all executing requests as skipped */
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
- for_each_engine(engine, i915, id) {
- reset_finish_engine(engine);
- intel_engine_signal_breadcrumbs(engine);
- }
+ reset_finish(i915);
smp_mb__before_atomic();
set_bit(I915_WEDGED, &error->flags);
GEM_TRACE("end\n");
- mutex_unlock(&error->wedge_mutex);
+}
- wake_up_all(&error->reset_queue);
+void i915_gem_set_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ intel_wakeref_t wakeref;
+
+ mutex_lock(&error->wedge_mutex);
+ with_intel_runtime_pm(i915, wakeref)
+ __i915_gem_set_wedged(i915);
+ mutex_unlock(&error->wedge_mutex);
}
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct i915_timeline *tl;
- bool ret = false;
if (!test_bit(I915_WEDGED, &error->flags))
return true;
@@ -843,8 +924,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
return false;
- mutex_lock(&error->wedge_mutex);
-
GEM_TRACE("start\n");
/*
@@ -860,30 +939,20 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
mutex_lock(&i915->gt.timelines.mutex);
list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
struct i915_request *rq;
- long timeout;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
/*
- * We can't use our normal waiter as we want to
- * avoid recursively trying to handle the current
- * reset. The basic dma_fence_default_wait() installs
- * a callback for dma_fence_signal(), which is
- * triggered by our nop handler (indirectly, the
- * callback enables the signaler thread which is
- * woken by the nop_submit_request() advancing the seqno
- * and when the seqno passes the fence, the signaler
- * then signals the fence waking us up).
+ * All internal dependencies (i915_requests) will have
+ * been flushed by the set-wedge, but we may be stuck waiting
+ * for external fences. These should all be capped to 10s
+ * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
+ * in the worst case.
*/
- timeout = dma_fence_default_wait(&rq->fence, true,
- MAX_SCHEDULE_TIMEOUT);
+ dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
- if (timeout < 0) {
- mutex_unlock(&i915->gt.timelines.mutex);
- goto unlock;
- }
}
mutex_unlock(&i915->gt.timelines.mutex);
@@ -904,57 +973,38 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
- ret = true;
-unlock:
- mutex_unlock(&i915->gpu_error.wedge_mutex);
- return ret;
+ return true;
}
-struct __i915_reset {
- struct drm_i915_private *i915;
- unsigned int stalled_mask;
-};
-
-static int __i915_reset__BKL(void *data)
+bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
- struct __i915_reset *arg = data;
- int err;
+ struct i915_gpu_error *error = &i915->gpu_error;
+ bool result;
- err = intel_gpu_reset(arg->i915, ALL_ENGINES);
- if (err)
- return err;
+ mutex_lock(&error->wedge_mutex);
+ result = __i915_gem_unset_wedged(i915);
+ mutex_unlock(&error->wedge_mutex);
- return gt_reset(arg->i915, arg->stalled_mask);
+ return result;
}
-#if RESET_UNDER_STOP_MACHINE
-/*
- * XXX An alternative to using stop_machine would be to park only the
- * processes that have a GGTT mmap. By remote parking the threads (SIGSTOP)
- * we should be able to prevent their memmory accesses via the lost fence
- * registers over the course of the reset without the potential recursive
- * of mutexes between the pagefault handler and reset.
- *
- * See igt/gem_mmap_gtt/hang
- */
-#define __do_reset(fn, arg) stop_machine(fn, arg, NULL)
-#else
-#define __do_reset(fn, arg) fn(arg)
-#endif
-
-static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+static int do_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t stalled_mask)
{
- struct __i915_reset arg = { i915, stalled_mask };
int err, i;
- err = __do_reset(__i915_reset__BKL, &arg);
+ gt_revoke(i915);
+
+ err = intel_gpu_reset(i915, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
- msleep(100);
- err = __do_reset(__i915_reset__BKL, &arg);
+ msleep(10 * (i + 1));
+ err = intel_gpu_reset(i915, ALL_ENGINES);
}
+ if (err)
+ return err;
- return err;
+ return gt_reset(i915, stalled_mask);
}
/**
@@ -966,8 +1016,6 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
*
- * Caller must hold the struct_mutex.
- *
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
@@ -977,7 +1025,7 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
* - re-init display
*/
void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
+ intel_engine_mask_t stalled_mask,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
@@ -990,7 +1038,7 @@ void i915_reset(struct drm_i915_private *i915,
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
/* Clear any previous failed attempts at recovery. Time to try again. */
- if (!i915_gem_unset_wedged(i915))
+ if (!__i915_gem_unset_wedged(i915))
return;
if (reason)
@@ -1007,11 +1055,17 @@ void i915_reset(struct drm_i915_private *i915,
goto error;
}
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_disable_interrupts(i915);
+
if (do_reset(i915, stalled_mask)) {
dev_err(i915->drm.dev, "Failed to reset chip\n");
goto taint;
}
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_enable_interrupts(i915);
+
intel_overlay_reset(i915);
/*
@@ -1033,7 +1087,7 @@ void i915_reset(struct drm_i915_private *i915,
finish:
reset_finish(i915);
- if (!i915_terminally_wedged(error))
+ if (!__i915_wedged(error))
reset_restart(i915);
return;
@@ -1052,14 +1106,14 @@ taint:
*/
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
- i915_gem_set_wedged(i915);
+ __i915_gem_set_wedged(i915);
goto finish;
}
static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
struct intel_engine_cs *engine)
{
- return intel_gpu_reset(i915, intel_engine_flag(engine));
+ return intel_gpu_reset(i915, engine->mask);
}
/**
@@ -1144,7 +1198,12 @@ static void i915_reset_device(struct drm_i915_private *i915,
i915_wedge_on_timeout(&w, i915, 5 * HZ) {
intel_prepare_reset(i915);
+ /* Flush everyone using a resource about to be clobbered */
+ synchronize_srcu_expedited(&error->reset_backoff_srcu);
+
+ mutex_lock(&error->wedge_mutex);
i915_reset(i915, engine_mask, reason);
+ mutex_unlock(&error->wedge_mutex);
intel_finish_reset(i915);
}
@@ -1153,44 +1212,50 @@ static void i915_reset_device(struct drm_i915_private *i915,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
-void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
{
+ intel_uncore_rmw(uncore, reg, 0, 0);
+}
+
+void i915_clear_error_registers(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
u32 eir;
- if (!IS_GEN(dev_priv, 2))
- I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
+ if (!IS_GEN(i915, 2))
+ clear_register(uncore, PGTBL_ER);
- if (INTEL_GEN(dev_priv) < 4)
- I915_WRITE(IPEIR, I915_READ(IPEIR));
+ if (INTEL_GEN(i915) < 4)
+ clear_register(uncore, IPEIR(RENDER_RING_BASE));
else
- I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
+ clear_register(uncore, IPEIR_I965);
- I915_WRITE(EIR, I915_READ(EIR));
- eir = I915_READ(EIR);
+ clear_register(uncore, EIR);
+ eir = intel_uncore_read(uncore, EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
- I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
+ rmw_set(uncore, EMR, eir);
+ intel_uncore_write(uncore, GEN2_IIR,
+ I915_MASTER_ERROR_INTERRUPT);
}
- if (INTEL_GEN(dev_priv) >= 8) {
- I915_WRITE(GEN8_RING_FAULT_REG,
- I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
- POSTING_READ(GEN8_RING_FAULT_REG);
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(i915) >= 8) {
+ rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
+ } else if (INTEL_GEN(i915) >= 6) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
- I915_WRITE(RING_FAULT_REG(engine),
- I915_READ(RING_FAULT_REG(engine)) &
- ~RING_FAULT_VALID);
+ for_each_engine(engine, i915, id) {
+ rmw_clear(uncore,
+ RING_FAULT_REG(engine), RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore,
+ RING_FAULT_REG(engine));
}
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
}
@@ -1208,13 +1273,14 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
* of a ring dump etc.).
*/
void i915_handle_error(struct drm_i915_private *i915,
- u32 engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...)
{
+ struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
char error_msg[80];
char *msg = NULL;
@@ -1237,7 +1303,7 @@ void i915_handle_error(struct drm_i915_private *i915,
*/
wakeref = intel_runtime_pm_get(i915);
- engine_mask &= INTEL_INFO(i915)->ring_mask;
+ engine_mask &= INTEL_INFO(i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(i915, engine_mask, msg);
@@ -1248,20 +1314,19 @@ void i915_handle_error(struct drm_i915_private *i915,
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(i915) &&
- !i915_terminally_wedged(&i915->gpu_error)) {
+ if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
for_each_engine_masked(engine, i915, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags))
+ &error->flags))
continue;
if (i915_reset_engine(engine, msg) == 0)
- engine_mask &= ~intel_engine_flag(engine);
+ engine_mask &= ~engine->mask;
clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
- wake_up_bit(&i915->gpu_error.flags,
+ &error->flags);
+ wake_up_bit(&error->flags,
I915_RESET_ENGINE + engine->id);
}
}
@@ -1270,18 +1335,20 @@ void i915_handle_error(struct drm_i915_private *i915,
goto out;
/* Full reset needs the mutex, stop any other user trying to do so. */
- if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) {
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
- goto out;
+ if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
+ wait_event(error->reset_queue,
+ !test_bit(I915_RESET_BACKOFF, &error->flags));
+ goto out; /* piggy-back on the other reset */
}
+ /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
+ synchronize_rcu_expedited();
+
/* Prevent any other reset-engine attempt. */
for_each_engine(engine, i915, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
+ &error->flags))
+ wait_on_bit(&error->flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
@@ -1290,16 +1357,74 @@ void i915_handle_error(struct drm_i915_private *i915,
for_each_engine(engine, i915, tmp) {
clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ &error->flags);
}
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
+ clear_bit(I915_RESET_BACKOFF, &error->flags);
+ wake_up_all(&error->reset_queue);
out:
intel_runtime_pm_put(i915, wakeref);
}
+int i915_reset_trylock(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ int srcu;
+
+ might_lock(&error->reset_backoff_srcu);
+ might_sleep();
+
+ rcu_read_lock();
+ while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
+ rcu_read_unlock();
+
+ if (wait_event_interruptible(error->reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &error->flags)))
+ return -EINTR;
+
+ rcu_read_lock();
+ }
+ srcu = srcu_read_lock(&error->reset_backoff_srcu);
+ rcu_read_unlock();
+
+ return srcu;
+}
+
+void i915_reset_unlock(struct drm_i915_private *i915, int tag)
+__releases(&i915->gpu_error.reset_backoff_srcu)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+
+ srcu_read_unlock(&error->reset_backoff_srcu, tag);
+}
+
+int i915_terminally_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+
+ might_sleep();
+
+ if (!__i915_wedged(error))
+ return 0;
+
+ /* Reset still in progress? Maybe we will recover? */
+ if (!test_bit(I915_RESET_BACKOFF, &error->flags))
+ return -EIO;
+
+ /* XXX intel_reset_finish() still takes struct_mutex!!! */
+ if (mutex_is_locked(&i915->drm.struct_mutex))
+ return -EAGAIN;
+
+ if (wait_event_interruptible(error->reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &error->flags)))
+ return -EINTR;
+
+ return __i915_wedged(error) ? -EIO : 0;
+}
+
bool i915_reset_flush(struct drm_i915_private *i915)
{
int err;
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h
index f2d347f319df..3c0450289b8f 100644
--- a/drivers/gpu/drm/i915/i915_reset.h
+++ b/drivers/gpu/drm/i915/i915_reset.h
@@ -9,14 +9,18 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/srcu.h>
+
+#include "intel_engine_types.h"
struct drm_i915_private;
+struct i915_request;
struct intel_engine_cs;
struct intel_guc;
__printf(4, 5)
void i915_handle_error(struct drm_i915_private *i915,
- u32 engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
@@ -24,7 +28,7 @@ void i915_handle_error(struct drm_i915_private *i915,
void i915_clear_error_registers(struct drm_i915_private *i915);
void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
+ intel_engine_mask_t stalled_mask,
const char *reason);
int i915_reset_engine(struct intel_engine_cs *engine,
const char *reason);
@@ -32,10 +36,16 @@ int i915_reset_engine(struct intel_engine_cs *engine,
void i915_reset_request(struct i915_request *rq, bool guilty);
bool i915_reset_flush(struct drm_i915_private *i915);
+int __must_check i915_reset_trylock(struct drm_i915_private *i915);
+void i915_reset_unlock(struct drm_i915_private *i915, int tag);
+
+int i915_terminally_wedged(struct drm_i915_private *i915);
+
bool intel_has_gpu_reset(struct drm_i915_private *i915);
bool intel_has_reset_engine(struct drm_i915_private *i915);
-int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
+int intel_gpu_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask);
int intel_reset_guc(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 8bc042551692..39bc4f54e272 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -7,9 +7,16 @@
#include <linux/mutex.h>
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
+static struct i915_global_scheduler {
+ struct i915_global base;
+ struct kmem_cache *slab_dependencies;
+ struct kmem_cache *slab_priorities;
+} global;
+
static DEFINE_SPINLOCK(schedule_lock);
static const struct i915_request *
@@ -18,6 +25,11 @@ node_to_request(const struct i915_sched_node *node)
return container_of(node, const struct i915_request, sched);
}
+static inline bool node_started(const struct i915_sched_node *node)
+{
+ return i915_request_started(node_to_request(node));
+}
+
static inline bool node_signaled(const struct i915_sched_node *node)
{
return i915_request_completed(node_to_request(node));
@@ -29,19 +41,20 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->attr.priority = I915_PRIORITY_INVALID;
+ node->semaphores = 0;
+ node->flags = 0;
}
static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
+i915_dependency_alloc(void)
{
- return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+ return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
}
static void
-i915_dependency_free(struct drm_i915_private *i915,
- struct i915_dependency *dep)
+i915_dependency_free(struct i915_dependency *dep)
{
- kmem_cache_free(i915->dependencies, dep);
+ kmem_cache_free(global.slab_dependencies, dep);
}
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -51,7 +64,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
{
bool ret = false;
- spin_lock(&schedule_lock);
+ spin_lock_irq(&schedule_lock);
if (!node_signaled(signal)) {
INIT_LIST_HEAD(&dep->dfs_link);
@@ -60,39 +73,42 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
dep->signaler = signal;
dep->flags = flags;
+ /* Keep track of whether anyone on this chain has a semaphore */
+ if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
+ !node_started(signal))
+ node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+
ret = true;
}
- spin_unlock(&schedule_lock);
+ spin_unlock_irq(&schedule_lock);
return ret;
}
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;
- dep = i915_dependency_alloc(i915);
+ dep = i915_dependency_alloc();
if (!dep)
return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC))
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
return 0;
}
-void i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node)
+void i915_sched_node_fini(struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
GEM_BUG_ON(!list_empty(&node->link));
- spin_lock(&schedule_lock);
+ spin_lock_irq(&schedule_lock);
/*
* Everyone we depended upon (the fences we wait to be signaled)
@@ -106,7 +122,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
}
/* Remove ourselves from everyone who depends upon us */
@@ -116,10 +132,10 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
}
- spin_unlock(&schedule_lock);
+ spin_unlock_irq(&schedule_lock);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
@@ -193,7 +209,7 @@ find_priolist:
if (prio == I915_PRIORITY_NORMAL) {
p = &execlists->default_priolist;
} else {
- p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */
if (unlikely(!p)) {
prio = I915_PRIORITY_NORMAL; /* recurses just once */
@@ -301,6 +317,10 @@ static void __i915_schedule(struct i915_request *rq,
list_for_each_entry(dep, &dfs, dfs_link) {
struct i915_sched_node *node = dep->signaler;
+ /* If we are already flying, we know we have no signalers */
+ if (node_started(node))
+ continue;
+
/*
* Within an engine, there can be no cycle, but we may
* refer to the same dependency chain multiple times
@@ -313,7 +333,6 @@ static void __i915_schedule(struct i915_request *rq,
if (node_signaled(p->signaler))
continue;
- GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
if (prio > READ_ONCE(p->signaler->attr.priority))
list_move_tail(&p->dfs_link, &dfs);
}
@@ -337,7 +356,7 @@ static void __i915_schedule(struct i915_request *rq,
memset(&cache, 0, sizeof(cache));
engine = rq->engine;
- spin_lock_irq(&engine->timeline.lock);
+ spin_lock(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
@@ -388,30 +407,73 @@ static void __i915_schedule(struct i915_request *rq,
tasklet_hi_schedule(&engine->execlists.tasklet);
}
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock(&engine->timeline.lock);
}
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
{
- spin_lock(&schedule_lock);
+ spin_lock_irq(&schedule_lock);
__i915_schedule(rq, attr);
- spin_unlock(&schedule_lock);
+ spin_unlock_irq(&schedule_lock);
}
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
{
struct i915_sched_attr attr;
+ unsigned long flags;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
return;
- spin_lock_bh(&schedule_lock);
+ spin_lock_irqsave(&schedule_lock, flags);
attr = rq->sched.attr;
attr.priority |= bump;
__i915_schedule(rq, &attr);
- spin_unlock_bh(&schedule_lock);
+ spin_unlock_irqrestore(&schedule_lock, flags);
+}
+
+void __i915_priolist_free(struct i915_priolist *p)
+{
+ kmem_cache_free(global.slab_priorities, p);
+}
+
+static void i915_global_scheduler_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_priorities);
+}
+
+static void i915_global_scheduler_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_priorities);
+}
+
+static struct i915_global_scheduler global = { {
+ .shrink = i915_global_scheduler_shrink,
+ .exit = i915_global_scheduler_exit,
+} };
+
+int __init i915_global_scheduler_init(void)
+{
+ global.slab_dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN);
+ if (!global.slab_dependencies)
+ return -ENOMEM;
+
+ global.slab_priorities = KMEM_CACHE(i915_priolist,
+ SLAB_HWCACHE_ALIGN);
+ if (!global.slab_priorities)
+ goto err_priorities;
+
+ i915_global_register(&global.base);
+ return 0;
+
+err_priorities:
+ kmem_cache_destroy(global.slab_priorities);
+ return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index dbe9cb7ecd82..07d243acf553 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -8,80 +8,22 @@
#define _I915_SCHEDULER_H_
#include <linux/bitops.h>
+#include <linux/list.h>
#include <linux/kernel.h>
-#include <uapi/drm/i915_drm.h>
+#include "i915_scheduler_types.h"
-struct drm_i915_private;
-struct i915_request;
-struct intel_engine_cs;
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-enum {
- I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
- I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
- I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
- I915_PRIORITY_INVALID = INT_MIN
-};
-
-#define I915_USER_PRIORITY_SHIFT 2
-#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
-
-#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
-#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
-
-#define I915_PRIORITY_WAIT ((u8)BIT(0))
-#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
-
-struct i915_sched_attr {
- /**
- * @priority: execution and service priority
- *
- * All clients are equal, but some are more equal than others!
- *
- * Requests from a context with a greater (more positive) value of
- * @priority will be executed before those with a lower @priority
- * value, forming a simple QoS.
- *
- * The &drm_i915_private.kernel_context is assigned the lowest priority.
- */
- int priority;
-};
-
-/*
- * "People assume that time is a strict progression of cause to effect, but
- * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
- * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
- *
- * Requests exist in a complex web of interdependencies. Each request
- * has to wait for some other request to complete before it is ready to be run
- * (e.g. we have to wait until the pixels have been rendering into a texture
- * before we can copy from it). We track the readiness of a request in terms
- * of fences, but we also need to keep the dependency tree for the lifetime
- * of the request (beyond the life of an individual fence). We use the tree
- * at various points to reorder the requests whilst keeping the requests
- * in order with respect to their various dependencies.
- *
- * There is no active component to the "scheduler". As we know the dependency
- * DAG of each request, we are able to insert it into a sorted queue when it
- * is ready, and are able to reorder its portion of the graph to accommodate
- * dynamic priority changes.
- */
-struct i915_sched_node {
- struct list_head signalers_list; /* those before us, we depend upon */
- struct list_head waiters_list; /* those after us, they depend upon us */
- struct list_head link;
- struct i915_sched_attr attr;
-};
-
-struct i915_dependency {
- struct i915_sched_node *signaler;
- struct list_head signal_link;
- struct list_head wait_link;
- struct list_head dfs_link;
- unsigned long flags;
-#define I915_DEPENDENCY_ALLOC BIT(0)
-};
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; \
+ (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \
+ (plist)->used &= ~BIT(idx)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx], \
+ sched.link)
void i915_sched_node_init(struct i915_sched_node *node);
@@ -90,12 +32,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_dependency *dep,
unsigned long flags);
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal);
-void i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node);
+void i915_sched_node_fini(struct i915_sched_node *node);
void i915_schedule(struct i915_request *request,
const struct i915_sched_attr *attr);
@@ -105,4 +45,11 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+void __i915_priolist_free(struct i915_priolist *p);
+static inline void i915_priolist_free(struct i915_priolist *p)
+{
+ if (p->priority != I915_PRIORITY_NORMAL)
+ __i915_priolist_free(p);
+}
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
new file mode 100644
index 000000000000..f1af3916a808
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -0,0 +1,72 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_SCHEDULER_TYPES_H_
+#define _I915_SCHEDULER_TYPES_H_
+
+#include <linux/list.h>
+
+#include "i915_priolist_types.h"
+#include "intel_engine_types.h"
+
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
+struct i915_sched_attr {
+ /**
+ * @priority: execution and service priority
+ *
+ * All clients are equal, but some are more equal than others!
+ *
+ * Requests from a context with a greater (more positive) value of
+ * @priority will be executed before those with a lower @priority
+ * value, forming a simple QoS.
+ *
+ * The &drm_i915_private.kernel_context is assigned the lowest priority.
+ */
+ int priority;
+};
+
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ *
+ * There is no active component to the "scheduler". As we know the dependency
+ * DAG of each request, we are able to insert it into a sorted queue when it
+ * is ready, and are able to reorder its portion of the graph to accommodate
+ * dynamic priority changes.
+ */
+struct i915_sched_node {
+ struct list_head signalers_list; /* those before us, we depend upon */
+ struct list_head waiters_list; /* those after us, they depend upon us */
+ struct list_head link;
+ struct i915_sched_attr attr;
+ unsigned int flags;
+#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
+ intel_engine_mask_t semaphores;
+};
+
+struct i915_dependency {
+ struct i915_sched_node *signaler;
+ struct list_head signal_link;
+ struct list_head wait_link;
+ struct list_head dfs_link;
+ unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+#endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d2f2a9c2fabd..95f3dab1b229 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -25,8 +25,10 @@
*/
#include <drm/i915_drm.h>
-#include "intel_drv.h"
+
#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 7c58b049ecb5..5387aafd3424 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -192,7 +192,7 @@ static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
__i915_sw_fence_notify(fence, FENCE_FREE);
}
-static void i915_sw_fence_complete(struct i915_sw_fence *fence)
+void i915_sw_fence_complete(struct i915_sw_fence *fence)
{
debug_fence_assert(fence);
@@ -202,7 +202,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence)
__i915_sw_fence_complete(fence, NULL);
}
-static void i915_sw_fence_await(struct i915_sw_fence *fence)
+void i915_sw_fence_await(struct i915_sw_fence *fence)
{
debug_fence_assert(fence);
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
@@ -359,11 +359,6 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
}
-struct i915_sw_dma_fence_cb {
- struct dma_fence_cb base;
- struct i915_sw_fence *fence;
-};
-
struct i915_sw_dma_fence_cb_timer {
struct i915_sw_dma_fence_cb base;
struct dma_fence *dma;
@@ -480,6 +475,40 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
return ret;
}
+static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
+ struct dma_fence_cb *data)
+{
+ struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+ i915_sw_fence_complete(cb->fence);
+}
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+ struct dma_fence *dma,
+ struct i915_sw_dma_fence_cb *cb)
+{
+ int ret;
+
+ debug_fence_assert(fence);
+
+ if (dma_fence_is_signaled(dma))
+ return 0;
+
+ cb->fence = fence;
+ i915_sw_fence_await(fence);
+
+ ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
+ if (ret == 0) {
+ ret = 1;
+ } else {
+ i915_sw_fence_complete(fence);
+ if (ret == -ENOENT) /* fence already signaled */
+ ret = 0;
+ }
+
+ return ret;
+}
+
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
const struct dma_fence_ops *exclude,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 0e055ea0179f..9cb5c3b307a6 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -9,14 +9,13 @@
#ifndef _I915_SW_FENCE_H_
#define _I915_SW_FENCE_H_
+#include <linux/dma-fence.h>
#include <linux/gfp.h>
#include <linux/kref.h>
#include <linux/notifier.h> /* for NOTIFY_DONE */
#include <linux/wait.h>
struct completion;
-struct dma_fence;
-struct dma_fence_ops;
struct reservation_object;
struct i915_sw_fence {
@@ -68,10 +67,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
gfp_t gfp);
+
+struct i915_sw_dma_fence_cb {
+ struct dma_fence_cb base;
+ struct i915_sw_fence *fence;
+};
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+ struct dma_fence *dma,
+ struct i915_sw_dma_fence_cb *cb);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp);
+
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
const struct dma_fence_ops *exclude,
@@ -79,6 +88,9 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
unsigned long timeout,
gfp_t gfp);
+void i915_sw_fence_await(struct i915_sw_fence *fence);
+void i915_sw_fence_complete(struct i915_sw_fence *fence);
+
static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
{
return atomic_read(&fence->pending) <= 0;
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index b2202d2e58a2..5fbea0892f33 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -6,19 +6,32 @@
#include "i915_drv.h"
-#include "i915_timeline.h"
+#include "i915_active.h"
#include "i915_syncmap.h"
+#include "i915_timeline.h"
+
+#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
+#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
struct i915_timeline_hwsp {
- struct i915_vma *vma;
+ struct i915_gt_timelines *gt;
struct list_head free_link;
+ struct i915_vma *vma;
u64 free_bitmap;
};
-static inline struct i915_timeline_hwsp *
-i915_timeline_hwsp(const struct i915_timeline *tl)
+struct i915_timeline_cacheline {
+ struct i915_active active;
+ struct i915_timeline_hwsp *hwsp;
+ void *vaddr;
+#define CACHELINE_BITS 6
+#define CACHELINE_FREE CACHELINE_BITS
+};
+
+static inline struct drm_i915_private *
+hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
{
- return tl->hwsp_ggtt->private;
+ return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
}
static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
@@ -71,6 +84,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
vma->private = hwsp;
hwsp->vma = vma;
hwsp->free_bitmap = ~0ull;
+ hwsp->gt = gt;
spin_lock(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list);
@@ -88,14 +102,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
return hwsp->vma;
}
-static void hwsp_free(struct i915_timeline *timeline)
+static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
{
- struct i915_gt_timelines *gt = &timeline->i915->gt.timelines;
- struct i915_timeline_hwsp *hwsp;
-
- hwsp = i915_timeline_hwsp(timeline);
- if (!hwsp) /* leave global HWSP alone! */
- return;
+ struct i915_gt_timelines *gt = hwsp->gt;
spin_lock(&gt->hwsp_lock);
@@ -103,7 +112,8 @@ static void hwsp_free(struct i915_timeline *timeline)
if (!hwsp->free_bitmap)
list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
- hwsp->free_bitmap |= BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES);
+ GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
+ hwsp->free_bitmap |= BIT_ULL(cacheline);
/* And if no one is left using it, give the page back to the system */
if (hwsp->free_bitmap == ~0ull) {
@@ -115,9 +125,78 @@ static void hwsp_free(struct i915_timeline *timeline)
spin_unlock(&gt->hwsp_lock);
}
+static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
+{
+ GEM_BUG_ON(!i915_active_is_idle(&cl->active));
+
+ i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+ i915_vma_put(cl->hwsp->vma);
+ __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
+ i915_active_fini(&cl->active);
+ kfree(cl);
+}
+
+static void __cacheline_retire(struct i915_active *active)
+{
+ struct i915_timeline_cacheline *cl =
+ container_of(active, typeof(*cl), active);
+
+ i915_vma_unpin(cl->hwsp->vma);
+ if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
+ __idle_cacheline_free(cl);
+}
+
+static struct i915_timeline_cacheline *
+cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
+{
+ struct i915_timeline_cacheline *cl;
+ void *vaddr;
+
+ GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
+
+ cl = kmalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return ERR_PTR(-ENOMEM);
+
+ vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ kfree(cl);
+ return ERR_CAST(vaddr);
+ }
+
+ i915_vma_get(hwsp->vma);
+ cl->hwsp = hwsp;
+ cl->vaddr = page_pack_bits(vaddr, cacheline);
+
+ i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
+
+ return cl;
+}
+
+static void cacheline_acquire(struct i915_timeline_cacheline *cl)
+{
+ if (cl && i915_active_acquire(&cl->active))
+ __i915_vma_pin(cl->hwsp->vma);
+}
+
+static void cacheline_release(struct i915_timeline_cacheline *cl)
+{
+ if (cl)
+ i915_active_release(&cl->active);
+}
+
+static void cacheline_free(struct i915_timeline_cacheline *cl)
+{
+ GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
+ cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
+
+ if (i915_active_is_idle(&cl->active))
+ __idle_cacheline_free(cl);
+}
+
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *timeline,
- const char *name,
struct i915_vma *hwsp)
{
void *vaddr;
@@ -133,37 +212,47 @@ int i915_timeline_init(struct drm_i915_private *i915,
BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
timeline->i915 = i915;
- timeline->name = name;
timeline->pin_count = 0;
timeline->has_initial_breadcrumb = !hwsp;
+ timeline->hwsp_cacheline = NULL;
- timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
if (!hwsp) {
+ struct i915_timeline_cacheline *cl;
unsigned int cacheline;
hwsp = hwsp_alloc(timeline, &cacheline);
if (IS_ERR(hwsp))
return PTR_ERR(hwsp);
+ cl = cacheline_alloc(hwsp->private, cacheline);
+ if (IS_ERR(cl)) {
+ __idle_hwsp_free(hwsp->private, cacheline);
+ return PTR_ERR(cl);
+ }
+
+ timeline->hwsp_cacheline = cl;
timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
- }
- timeline->hwsp_ggtt = i915_vma_get(hwsp);
- vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- hwsp_free(timeline);
- i915_vma_put(hwsp);
- return PTR_ERR(vaddr);
+ vaddr = page_mask_bits(cl->vaddr);
+ } else {
+ timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
+
+ vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
}
timeline->hwsp_seqno =
memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
+ timeline->hwsp_ggtt = i915_vma_get(hwsp);
+ GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
+
timeline->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&timeline->lock);
+ mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
@@ -236,18 +325,19 @@ void i915_timeline_fini(struct i915_timeline *timeline)
{
GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(&timeline->requests));
- GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
i915_syncmap_free(&timeline->sync);
- hwsp_free(timeline);
- i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+ if (timeline->hwsp_cacheline)
+ cacheline_free(timeline->hwsp_cacheline);
+ else
+ i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+
i915_vma_put(timeline->hwsp_ggtt);
}
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
- const char *name,
struct i915_vma *global_hwsp)
{
struct i915_timeline *timeline;
@@ -257,7 +347,7 @@ i915_timeline_create(struct drm_i915_private *i915,
if (!timeline)
return ERR_PTR(-ENOMEM);
- err = i915_timeline_init(i915, timeline, name, global_hwsp);
+ err = i915_timeline_init(i915, timeline, global_hwsp);
if (err) {
kfree(timeline);
return ERR_PTR(err);
@@ -284,6 +374,7 @@ int i915_timeline_pin(struct i915_timeline *tl)
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
+ cacheline_acquire(tl->hwsp_cacheline);
timeline_add_to_active(tl);
return 0;
@@ -293,6 +384,157 @@ unpin:
return err;
}
+static u32 timeline_advance(struct i915_timeline *tl)
+{
+ GEM_BUG_ON(!tl->pin_count);
+ GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
+
+ return tl->seqno += 1 + tl->has_initial_breadcrumb;
+}
+
+static void timeline_rollback(struct i915_timeline *tl)
+{
+ tl->seqno -= 1 + tl->has_initial_breadcrumb;
+}
+
+static noinline int
+__i915_timeline_get_seqno(struct i915_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
+{
+ struct i915_timeline_cacheline *cl;
+ unsigned int cacheline;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err;
+
+ /*
+ * If there is an outstanding GPU reference to this cacheline,
+ * such as it being sampled by a HW semaphore on another timeline,
+ * we cannot wraparound our seqno value (the HW semaphore does
+ * a strict greater-than-or-equals compare, not i915_seqno_passed).
+ * So if the cacheline is still busy, we must detach ourselves
+ * from it and leave it inflight alongside its users.
+ *
+ * However, if nobody is watching and we can guarantee that nobody
+ * will, we could simply reuse the same cacheline.
+ *
+ * if (i915_active_request_is_signaled(&tl->last_request) &&
+ * i915_active_is_signaled(&tl->hwsp_cacheline->active))
+ * return 0;
+ *
+ * That seems unlikely for a busy timeline that needed to wrap in
+ * the first place, so just replace the cacheline.
+ */
+
+ vma = hwsp_alloc(tl, &cacheline);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_rollback;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err) {
+ __idle_hwsp_free(vma->private, cacheline);
+ goto err_rollback;
+ }
+
+ cl = cacheline_alloc(vma->private, cacheline);
+ if (IS_ERR(cl)) {
+ err = PTR_ERR(cl);
+ __idle_hwsp_free(vma->private, cacheline);
+ goto err_unpin;
+ }
+ GEM_BUG_ON(cl->hwsp->vma != vma);
+
+ /*
+ * Attach the old cacheline to the current request, so that we only
+ * free it after the current request is retired, which ensures that
+ * all writes into the cacheline from previous requests are complete.
+ */
+ err = i915_active_ref(&tl->hwsp_cacheline->active,
+ tl->fence_context, rq);
+ if (err)
+ goto err_cacheline;
+
+ cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
+ cacheline_free(tl->hwsp_cacheline);
+
+ i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
+ i915_vma_put(tl->hwsp_ggtt);
+
+ tl->hwsp_ggtt = i915_vma_get(vma);
+
+ vaddr = page_mask_bits(cl->vaddr);
+ tl->hwsp_offset = cacheline * CACHELINE_BYTES;
+ tl->hwsp_seqno =
+ memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
+
+ tl->hwsp_offset += i915_ggtt_offset(vma);
+
+ cacheline_acquire(cl);
+ tl->hwsp_cacheline = cl;
+
+ *seqno = timeline_advance(tl);
+ GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
+ return 0;
+
+err_cacheline:
+ cacheline_free(cl);
+err_unpin:
+ i915_vma_unpin(vma);
+err_rollback:
+ timeline_rollback(tl);
+ return err;
+}
+
+int i915_timeline_get_seqno(struct i915_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
+{
+ *seqno = timeline_advance(tl);
+
+ /* Replace the HWSP on wraparound for HW semaphores */
+ if (unlikely(!*seqno && tl->hwsp_cacheline))
+ return __i915_timeline_get_seqno(tl, rq, seqno);
+
+ return 0;
+}
+
+static int cacheline_ref(struct i915_timeline_cacheline *cl,
+ struct i915_request *rq)
+{
+ return i915_active_ref(&cl->active, rq->fence.context, rq);
+}
+
+int i915_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *to,
+ u32 *hwsp)
+{
+ struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
+ struct i915_timeline *tl = from->timeline;
+ int err;
+
+ GEM_BUG_ON(to->timeline == tl);
+
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = i915_request_completed(from);
+ if (!err)
+ err = cacheline_ref(cl, to);
+ if (!err) {
+ if (likely(cl == tl->hwsp_cacheline)) {
+ *hwsp = tl->hwsp_offset;
+ } else { /* across a seqno wrap, recover the original offset */
+ *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+ ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
+ CACHELINE_BYTES;
+ }
+ }
+ mutex_unlock(&tl->mutex);
+
+ return err;
+}
+
void i915_timeline_unpin(struct i915_timeline *tl)
{
GEM_BUG_ON(!tl->pin_count);
@@ -300,6 +542,7 @@ void i915_timeline_unpin(struct i915_timeline *tl)
return;
timeline_remove_from_active(tl);
+ cacheline_release(tl->hwsp_cacheline);
/*
* Since this timeline is idle, all bariers upon which we were waiting
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index 7bec7d2e45bf..27668a1a69a3 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -25,76 +25,14 @@
#ifndef I915_TIMELINE_H
#define I915_TIMELINE_H
-#include <linux/list.h>
-#include <linux/kref.h>
+#include <linux/lockdep.h>
#include "i915_active.h"
-#include "i915_request.h"
#include "i915_syncmap.h"
-#include "i915_utils.h"
-
-struct i915_vma;
-struct i915_timeline_hwsp;
-
-struct i915_timeline {
- u64 fence_context;
- u32 seqno;
-
- spinlock_t lock;
-#define TIMELINE_CLIENT 0 /* default subclass */
-#define TIMELINE_ENGINE 1
-
- unsigned int pin_count;
- const u32 *hwsp_seqno;
- struct i915_vma *hwsp_ggtt;
- u32 hwsp_offset;
-
- bool has_initial_breadcrumb;
-
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head requests;
-
- /* Contains an RCU guarded pointer to the last request. No reference is
- * held to the request, users must carefully acquire a reference to
- * the request using i915_active_request_get_request_rcu(), or hold the
- * struct_mutex.
- */
- struct i915_active_request last_request;
-
- /**
- * We track the most recent seqno that we wait on in every context so
- * that we only have to emit a new await and dependency on a more
- * recent sync point. As the contexts may be executed out-of-order, we
- * have to track each individually and can not rely on an absolute
- * global_seqno. When we know that all tracked fences are completed
- * (i.e. when the driver is idle), we know that the syncmap is
- * redundant and we can discard it without loss of generality.
- */
- struct i915_syncmap *sync;
-
- /**
- * Barrier provides the ability to serialize ordering between different
- * timelines.
- *
- * Users can call i915_timeline_set_barrier which will make all
- * subsequent submissions to this timeline be executed only after the
- * barrier has been completed.
- */
- struct i915_active_request barrier;
-
- struct list_head link;
- const char *name;
- struct drm_i915_private *i915;
-
- struct kref kref;
-};
+#include "i915_timeline_types.h"
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *tl,
- const char *name,
struct i915_vma *hwsp);
void i915_timeline_fini(struct i915_timeline *tl);
@@ -119,7 +57,6 @@ i915_timeline_set_subclass(struct i915_timeline *timeline,
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
- const char *name,
struct i915_vma *global_hwsp);
static inline struct i915_timeline *
@@ -160,25 +97,17 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
}
int i915_timeline_pin(struct i915_timeline *tl);
+int i915_timeline_get_seqno(struct i915_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno);
void i915_timeline_unpin(struct i915_timeline *tl);
+int i915_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *until,
+ u32 *hwsp_offset);
+
void i915_timelines_init(struct drm_i915_private *i915);
void i915_timelines_park(struct drm_i915_private *i915);
void i915_timelines_fini(struct drm_i915_private *i915);
-/**
- * i915_timeline_set_barrier - orders submission between different timelines
- * @timeline: timeline to set the barrier on
- * @rq: request after which new submissions can proceed
- *
- * Sets the passed in request as the serialization point for all subsequent
- * submissions on @timeline. Subsequent requests will not be submitted to GPU
- * until the barrier has been completed.
- */
-static inline int
-i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
-{
- return i915_active_request_set(&tl->barrier, rq);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
new file mode 100644
index 000000000000..5256a0b5c5f7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -0,0 +1,70 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_TIMELINE_TYPES_H__
+#define __I915_TIMELINE_TYPES_H__
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_private;
+struct i915_vma;
+struct i915_timeline_cacheline;
+struct i915_syncmap;
+
+struct i915_timeline {
+ u64 fence_context;
+ u32 seqno;
+
+ spinlock_t lock;
+#define TIMELINE_CLIENT 0 /* default subclass */
+#define TIMELINE_ENGINE 1
+ struct mutex mutex; /* protects the flow of requests */
+
+ unsigned int pin_count;
+ const u32 *hwsp_seqno;
+ struct i915_vma *hwsp_ggtt;
+ u32 hwsp_offset;
+
+ struct i915_timeline_cacheline *hwsp_cacheline;
+
+ bool has_initial_breadcrumb;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /* Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_active_request_get_request_rcu(), or hold the
+ * struct_mutex.
+ */
+ struct i915_active_request last_request;
+
+ /**
+ * We track the most recent seqno that we wait on in every context so
+ * that we only have to emit a new await and dependency on a more
+ * recent sync point. As the contexts may be executed out-of-order, we
+ * have to track each individually and can not rely on an absolute
+ * global_seqno. When we know that all tracked fences are completed
+ * (i.e. when the driver is idle), we know that the syncmap is
+ * redundant and we can discard it without loss of generality.
+ */
+ struct i915_syncmap *sync;
+
+ struct list_head link;
+ struct drm_i915_private *i915;
+
+ struct kref kref;
+};
+
+#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index eab313c3163c..12893304c8f8 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -18,6 +18,87 @@
/* watermark/fifo updates */
+TRACE_EVENT(intel_pipe_enable,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+ TP_ARGS(dev_priv, pipe),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+
+ TP_fast_assign(
+ enum pipe _pipe;
+ for_each_pipe(dev_priv, _pipe) {
+ __entry->frame[_pipe] =
+ dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+ __entry->scanline[_pipe] =
+ intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ }
+ __entry->pipe = pipe;
+ ),
+
+ TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_disable,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+ TP_ARGS(dev_priv, pipe),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+
+ TP_fast_assign(
+ enum pipe _pipe;
+ for_each_pipe(dev_priv, _pipe) {
+ __entry->frame[_pipe] =
+ dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+ __entry->scanline[_pipe] =
+ intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ }
+ __entry->pipe = pipe;
+ ),
+
+ TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_crc,
+ TP_PROTO(struct intel_crtc *crtc, const u32 *crcs),
+ TP_ARGS(crtc, crcs),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(u32, crcs, 5)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
+ __entry->crcs[0], __entry->crcs[1], __entry->crcs[2],
+ __entry->crcs[3], __entry->crcs[4])
+);
+
TRACE_EVENT(intel_cpu_fifo_underrun,
TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
TP_ARGS(dev_priv, pipe),
@@ -627,7 +708,6 @@ DECLARE_EVENT_CLASS(i915_request,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global)
),
TP_fast_assign(
@@ -637,13 +717,11 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global = rq->global_seqno;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global)
+ __entry->hw_id, __entry->ctx, __entry->seqno)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -673,7 +751,6 @@ TRACE_EVENT(i915_request_in,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global_seqno)
__field(u32, port)
__field(u32, prio)
),
@@ -685,15 +762,14 @@ TRACE_EVENT(i915_request_in,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global_seqno = rq->global_seqno;
__entry->prio = rq->sched.attr.priority;
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->prio, __entry->global_seqno, __entry->port)
+ __entry->prio, __entry->port)
);
TRACE_EVENT(i915_request_out,
@@ -707,7 +783,6 @@ TRACE_EVENT(i915_request_out,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global_seqno)
__field(u32, completed)
),
@@ -718,14 +793,13 @@ TRACE_EVENT(i915_request_out,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global_seqno, __entry->completed)
+ __entry->completed)
);
#else
@@ -768,7 +842,6 @@ TRACE_EVENT(i915_request_wait_begin,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global)
__field(unsigned int, flags)
),
@@ -785,14 +858,13 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global = rq->global_seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+ !!(__entry->flags & I915_WAIT_LOCKED),
__entry->flags)
);
diff --git a/drivers/gpu/drm/i915/i915_user_extensions.c b/drivers/gpu/drm/i915/i915_user_extensions.c
new file mode 100644
index 000000000000..c822d0aafd2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_user_extensions.c
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/nospec.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include <uapi/drm/i915_drm.h>
+
+#include "i915_user_extensions.h"
+#include "i915_utils.h"
+
+int i915_user_extensions(struct i915_user_extension __user *ext,
+ const i915_user_extension_fn *tbl,
+ unsigned int count,
+ void *data)
+{
+ unsigned int stackdepth = 512;
+
+ while (ext) {
+ int i, err;
+ u32 name;
+ u64 next;
+
+ if (!stackdepth--) /* recursion vs useful flexibility */
+ return -E2BIG;
+
+ err = check_user_mbz(&ext->flags);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ext->rsvd); i++) {
+ err = check_user_mbz(&ext->rsvd[i]);
+ if (err)
+ return err;
+ }
+
+ if (get_user(name, &ext->name))
+ return -EFAULT;
+
+ err = -EINVAL;
+ if (name < count) {
+ name = array_index_nospec(name, count);
+ if (tbl[name])
+ err = tbl[name](ext, data);
+ }
+ if (err)
+ return err;
+
+ if (get_user(next, &ext->next_extension) ||
+ overflows_type(next, ext))
+ return -EFAULT;
+
+ ext = u64_to_user_ptr(next);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_user_extensions.h b/drivers/gpu/drm/i915/i915_user_extensions.h
new file mode 100644
index 000000000000..a14bf6bba9a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_user_extensions.h
@@ -0,0 +1,20 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef I915_USER_EXTENSIONS_H
+#define I915_USER_EXTENSIONS_H
+
+struct i915_user_extension;
+
+typedef int (*i915_user_extension_fn)(struct i915_user_extension __user *ext,
+ void *data);
+
+int i915_user_extensions(struct i915_user_extension __user *ext,
+ const i915_user_extension_fn *tbl,
+ unsigned int count,
+ void *data);
+
+#endif /* I915_USER_EXTENSIONS_H */
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 540e20eb032c..2dbe8933b50a 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -105,6 +105,37 @@
__T; \
})
+/*
+ * container_of_user: Extract the superclass from a pointer to a member.
+ *
+ * Exactly like container_of() with the exception that it plays nicely
+ * with sparse for __user @ptr.
+ */
+#define container_of_user(ptr, type, member) ({ \
+ void __user *__mptr = (void __user *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type __user *)(__mptr - offsetof(type, member))); })
+
+/*
+ * check_user_mbz: Check that a user value exists and is zero
+ *
+ * Frequently in our uABI we reserve space for future extensions, and
+ * two ensure that userspace is prepared we enforce that space must
+ * be zero. (Then any future extension can safely assume a default value
+ * of 0.)
+ *
+ * check_user_mbz() combines checking that the user pointer is accessible
+ * and that the contained value is zero.
+ *
+ * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success.
+ */
+#define check_user_mbz(U) ({ \
+ typeof(*(U)) mbz__; \
+ get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
+})
+
static inline u64 ptr_to_u64(const void *ptr)
{
return (uintptr_t)ptr;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 869cf4a3b6de..94d3992b599d 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,30 +60,31 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u64 magic;
u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
+ magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
- version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+ version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
- dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps));
+ dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
}
-bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv)
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
- return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT;
+ return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
struct _balloon_info_ {
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 551acc390046..ebe1b7bced98 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -28,7 +28,7 @@
void i915_check_vgpu(struct drm_i915_private *dev_priv);
-bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
static inline bool
intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b713bed20c38..961268f66c63 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -25,22 +25,35 @@
#include "i915_vma.h"
#include "i915_drv.h"
+#include "i915_globals.h"
#include "intel_ringbuffer.h"
#include "intel_frontbuffer.h"
#include <drm/drm_gem.h>
+static struct i915_global_vma {
+ struct i915_global base;
+ struct kmem_cache *slab_vmas;
+} global;
+
+struct i915_vma *i915_vma_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
+}
+
+void i915_vma_free(struct i915_vma *vma)
+{
+ return kmem_cache_free(global.slab_vmas, vma);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
#include <linux/stackdepot.h>
static void vma_print_allocator(struct i915_vma *vma, const char *reason)
{
- unsigned long entries[12];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = ARRAY_SIZE(entries),
- };
+ unsigned long *entries;
+ unsigned int nr_entries;
char buf[512];
if (!vma->node.stack) {
@@ -49,8 +62,8 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
return;
}
- depot_fetch_stack(vma->node.stack, &trace);
- snprint_stack_trace(buf, sizeof(buf), &trace, 0);
+ nr_entries = stack_depot_fetch(vma->node.stack, &entries);
+ stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
vma->node.start, vma->node.size, reason, buf);
}
@@ -115,7 +128,7 @@ vma_create(struct drm_i915_gem_object *obj,
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
- vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
+ vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -190,7 +203,7 @@ vma_create(struct drm_i915_gem_object *obj,
cmp = i915_vma_compare(pos, vm, view);
if (cmp == 0) {
spin_unlock(&obj->vma.lock);
- kmem_cache_free(vm->i915->vmas, vma);
+ i915_vma_free(vma);
return pos;
}
@@ -222,7 +235,7 @@ vma_create(struct drm_i915_gem_object *obj,
return vma;
err_vma:
- kmem_cache_free(vm->i915->vmas, vma);
+ i915_vma_free(vma);
return ERR_PTR(-E2BIG);
}
@@ -803,8 +816,6 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
@@ -825,7 +836,7 @@ static void __i915_vma_destroy(struct i915_vma *vma)
i915_active_fini(&vma->active);
- kmem_cache_free(i915->vmas, vma);
+ i915_vma_free(vma);
}
void i915_vma_destroy(struct i915_vma *vma)
@@ -1041,3 +1052,28 @@ unpin:
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
+
+static void i915_global_vma_shrink(void)
+{
+ kmem_cache_shrink(global.slab_vmas);
+}
+
+static void i915_global_vma_exit(void)
+{
+ kmem_cache_destroy(global.slab_vmas);
+}
+
+static struct i915_global_vma global = { {
+ .shrink = i915_global_vma_shrink,
+ .exit = i915_global_vma_exit,
+} };
+
+int __init i915_global_vma_init(void)
+{
+ global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_vmas)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 7c742027f866..6eab70953a57 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -440,4 +440,7 @@ void i915_vma_parked(struct drm_i915_private *i915);
list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V))
+struct i915_vma *i915_vma_alloc(void);
+void i915_vma_free(struct i915_vma *vma);
+
#endif
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 73a7bee24a66..9d962ea1e635 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -25,9 +25,13 @@
* Jani Nikula <jani.nikula@intel.com>
*/
-#include <drm/drm_mipi_dsi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "intel_connector.h"
+#include "intel_ddi.h"
#include "intel_dsi.h"
+#include "intel_panel.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
@@ -246,13 +250,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
}
}
}
@@ -323,6 +327,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
}
}
+static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+ struct intel_dsi *intel_dsi)
+{
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ WARN_ON(intel_dsi->io_wakeref[port]);
+ intel_dsi->io_wakeref[port] =
+ intel_display_power_get(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO);
+ }
+}
+
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -336,13 +355,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
}
- for_each_dsi_port(port, intel_dsi->ports) {
- intel_dsi->io_wakeref[port] =
- intel_display_power_get(dev_priv,
- port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO);
- }
+ get_dsi_io_power_domains(dev_priv, intel_dsi);
}
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
@@ -390,11 +403,11 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp &= ~LOADGEN_SELECT;
I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
for (lane = 0; lane <= 3; lane++) {
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
}
}
@@ -589,6 +602,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
}
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
POSTING_READ(DPCLKA_CFGCR0_ICL);
mutex_unlock(&dev_priv->dpll_lock);
@@ -861,7 +880,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
- if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE,
I965_PIPECONF_ACTIVE, 10))
DRM_ERROR("DSI transcoder not enabled\n");
@@ -1039,7 +1059,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
- if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 0, 50))
DRM_ERROR("DSI trancoder not disabled\n");
}
@@ -1117,7 +1138,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
DRM_ERROR("DDI port:%c buffer not idle\n",
port_name(port));
}
- gen11_dsi_ungate_clocks(encoder);
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
@@ -1131,13 +1152,11 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
intel_wakeref_t wakeref;
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
- if (wakeref) {
- intel_display_power_put(dev_priv,
- port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO,
- wakeref);
- }
+ intel_display_power_put(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ wakeref);
}
/* set mode to DDI */
@@ -1179,11 +1198,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u32 pll_id;
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
- pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ pipe_config->port_clock =
+ cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
}
@@ -1218,20 +1236,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
return 0;
}
-static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u64 domains = 0;
- enum port port;
-
- for_each_dsi_port(port, intel_dsi->ports)
- if (port == PORT_A)
- domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
- else
- domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
-
- return domains;
+ get_dsi_io_power_domains(to_i915(encoder->base.dev),
+ enc_to_intel_dsi(&encoder->base));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -1361,7 +1370,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *scan, *fixed_mode = NULL;
+ struct drm_display_mode *fixed_mode;
enum port port;
if (!intel_bios_is_dsi_present(dev_priv, &port))
@@ -1411,15 +1420,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
- /* fill mode info from VBT */
mutex_lock(&dev->mode_config.mutex);
- intel_dsi_vbt_get_modes(intel_dsi);
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- break;
- }
- }
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
@@ -1427,12 +1429,9 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
-
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
else
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7cf9290ea34a..8c8fae32ec50 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -35,6 +35,8 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_sprite.h"
/**
* intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
@@ -126,6 +128,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
*/
if (new_conn_state->force_audio != old_conn_state->force_audio ||
new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
+ new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
@@ -234,10 +237,11 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
if (plane_state && plane_state->base.fb &&
plane_state->base.fb->format->is_yuv &&
plane_state->base.fb->format->num_planes > 1) {
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
- } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
+ } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
/*
* On gen11+'s HDR planes we only use the scaler for
* scaling. They have a dedicated chroma upsampler, so
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index db0965904439..d11681d71add 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -35,7 +35,10 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "intel_atomic_plane.h"
#include "intel_drv.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
struct intel_plane *intel_plane_alloc(void)
{
@@ -121,6 +124,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
+ new_crtc_state->c8_planes &= ~BIT(plane->id);
new_plane_state->base.visible = false;
if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@@ -135,9 +139,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
- new_plane_state->base.fb->format->format == DRM_FORMAT_NV12)
+ is_planar_yuv_format(new_plane_state->base.fb->format->format))
new_crtc_state->nv12_planes |= BIT(plane->id);
+ if (new_plane_state->base.visible &&
+ new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
+ new_crtc_state->c8_planes |= BIT(plane->id);
+
if (new_plane_state->base.visible || old_plane_state->base.visible)
new_crtc_state->update_planes |= BIT(plane->id);
@@ -214,6 +222,35 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
return NULL;
}
+void intel_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, crtc_state, plane_state);
+}
+
+void intel_update_slave(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_slave(plane, crtc_state, plane_state);
+}
+
+void intel_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, crtc_state);
+}
+
void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -238,8 +275,7 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
intel_atomic_get_new_plane_state(state, plane);
if (new_plane_state->base.visible) {
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_plane(plane, new_crtc_state, new_plane_state);
+ intel_update_plane(plane, new_crtc_state, new_plane_state);
} else if (new_plane_state->slave) {
struct intel_plane *master =
new_plane_state->linked_plane;
@@ -256,11 +292,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
new_plane_state =
intel_atomic_get_new_plane_state(state, master);
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_slave(plane, new_crtc_state, new_plane_state);
+ intel_update_slave(plane, new_crtc_state, new_plane_state);
} else {
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, new_crtc_state);
+ intel_disable_plane(plane, new_crtc_state);
}
}
}
@@ -280,13 +314,10 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- if (new_plane_state->base.visible) {
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_plane(plane, new_crtc_state, new_plane_state);
- } else {
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, new_crtc_state);
- }
+ if (new_plane_state->base.visible)
+ intel_update_plane(plane, new_crtc_state, new_plane_state);
+ else
+ intel_disable_plane(plane, new_crtc_state);
}
}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.h b/drivers/gpu/drm/i915/intel_atomic_plane.h
new file mode 100644
index 000000000000..14678620440f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ATOMIC_PLANE_H__
+#define __INTEL_ATOMIC_PLANE_H__
+
+struct drm_plane;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+struct intel_plane_state;
+
+extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+
+void intel_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_update_slave(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
+struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
+void intel_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct intel_plane_state *intel_state);
+
+#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 5104c6bbd66f..bca4cc025d3d 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -21,14 +21,16 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/kernel.h>
#include <linux/component.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_edid.h>
#include <drm/i915_component.h>
#include <drm/intel_lpe_audio.h>
-#include "intel_drv.h"
-#include <drm/drm_edid.h>
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_drv.h"
/**
* DOC: High Definition Audio over HDMI and Display Port
@@ -741,27 +743,91 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
}
}
-static void i915_audio_component_get_power(struct device *kdev)
+static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
+ bool enable)
{
- intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (WARN_ON(!state))
+ return;
+
+ state->acquire_ctx = &ctx;
+
+retry:
+ to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
+ to_intel_atomic_state(state)->cdclk.force_min_cdclk =
+ enable ? 2 * 96000 : 0;
+
+ /*
+ * Protects dev_priv->cdclk.force_min_cdclk
+ * Need to lock this here in case we have no active pipes
+ * and thus wouldn't lock it during the commit otherwise.
+ */
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ &ctx);
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ WARN_ON(ret);
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
-static void i915_audio_component_put_power(struct device *kdev)
+static unsigned long i915_audio_component_get_power(struct device *kdev)
{
- intel_display_power_put_unchecked(kdev_to_i915(kdev),
- POWER_DOMAIN_AUDIO);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ intel_wakeref_t ret;
+
+ /* Catch potential impedance mismatches before they occur! */
+ BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
+
+ ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+
+ /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
+ if (dev_priv->audio_power_refcount++ == 0)
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ glk_force_audio_cdclk(dev_priv, true);
+
+ return ret;
+}
+
+static void i915_audio_component_put_power(struct device *kdev,
+ unsigned long cookie)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+
+ /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
+ if (--dev_priv->audio_power_refcount == 0)
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ glk_force_audio_cdclk(dev_priv, false);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,
bool enable)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ unsigned long cookie;
u32 tmp;
if (!IS_GEN(dev_priv, 9))
return;
- i915_audio_component_get_power(kdev);
+ cookie = i915_audio_component_get_power(kdev);
/*
* Enable/disable generating the codec wake signal, overriding the
@@ -779,7 +845,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
usleep_range(1000, 1500);
}
- i915_audio_component_put_power(kdev);
+ i915_audio_component_put_power(kdev, cookie);
}
/* Get CDCLK in kHz */
@@ -850,12 +916,13 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
+ unsigned long cookie;
int err = 0;
if (!HAS_DDI(dev_priv))
return 0;
- i915_audio_component_get_power(kdev);
+ cookie = i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
@@ -875,7 +942,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
unlock:
mutex_unlock(&dev_priv->av_mutex);
- i915_audio_component_put_power(kdev);
+ i915_audio_component_put_power(kdev, cookie);
return err;
}
@@ -980,7 +1047,7 @@ static const struct component_ops i915_audio_component_bind_ops = {
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
-void i915_audio_component_init(struct drm_i915_private *dev_priv)
+static void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -1003,7 +1070,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
-void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
if (!dev_priv->audio_component_registered)
return;
diff --git a/drivers/gpu/drm/i915/intel_audio.h b/drivers/gpu/drm/i915/intel_audio.h
new file mode 100644
index 000000000000..a3657c7a7ba2
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_audio.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_AUDIO_H__
+#define __INTEL_AUDIO_H__
+
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_deinit(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4364f42cac6b..1dc8d03ff127 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -760,6 +760,31 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
+
+ if (bdb->version >= 226) {
+ u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
+
+ wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
+ switch (wakeup_time) {
+ case 0:
+ wakeup_time = 500;
+ break;
+ case 1:
+ wakeup_time = 100;
+ break;
+ case 3:
+ wakeup_time = 50;
+ break;
+ default:
+ case 2:
+ wakeup_time = 2500;
+ break;
+ }
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+ } else {
+ /* Reusing PSR1 wakeup time for PSR2 in older VBTs */
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us;
+ }
}
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -1222,10 +1247,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
if (!info->alternate_ddc_pin)
return;
- for_each_port_masked(p, (1 << port) - 1) {
+ for (p = PORT_A; p < I915_MAX_PORTS; p++) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
- if (info->alternate_ddc_pin != i->alternate_ddc_pin)
+ if (p == port || !i->present ||
+ info->alternate_ddc_pin != i->alternate_ddc_pin)
continue;
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
@@ -1239,8 +1265,8 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
- * Due to parsing the ports in alphabetical order,
- * a higher port will always clobber a lower one.
+ * Due to parsing the ports in child device order,
+ * a later device will always clobber an earlier one.
*/
i->supports_dvi = false;
i->supports_hdmi = false;
@@ -1258,10 +1284,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
if (!info->alternate_aux_channel)
return;
- for_each_port_masked(p, (1 << port) - 1) {
+ for (p = PORT_A; p < I915_MAX_PORTS; p++) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
- if (info->alternate_aux_channel != i->alternate_aux_channel)
+ if (p == port || !i->present ||
+ info->alternate_aux_channel != i->alternate_aux_channel)
continue;
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
@@ -1275,8 +1302,8 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
- * Due to parsing the ports in alphabetical order,
- * a higher port will always clobber a lower one.
+ * Due to parsing the ports in child device order,
+ * a later device will always clobber an earlier one.
*/
i->supports_dp = false;
i->alternate_aux_channel = 0;
@@ -1324,48 +1351,57 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
return 0;
}
-static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
- u8 bdb_version)
+static enum port dvo_port_to_port(u8 dvo_port)
{
- struct child_device_config *it, *child = NULL;
- struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
- int i, j;
- bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
- /* Each DDI port can have more than one value on the "DVO Port" field,
+ /*
+ * Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
*/
- int dvo_ports[][3] = {
- {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
- {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
- {DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
- {DVO_PORT_HDMID, DVO_PORT_DPD, -1},
- {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
- {DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
+ static const int dvo_ports[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+ [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+ [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+ [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
+ [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
};
+ enum port port;
+ int i;
- /*
- * Find the first child device to reference the port, report if more
- * than one found.
- */
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- it = dev_priv->vbt.child_dev + i;
-
- for (j = 0; j < 3; j++) {
- if (dvo_ports[port][j] == -1)
+ for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
+ for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
+ if (dvo_ports[port][i] == -1)
break;
- if (it->dvo_port == dvo_ports[port][j]) {
- if (child) {
- DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
- port_name(port));
- } else {
- child = it;
- }
- }
+ if (dvo_port == dvo_ports[port][i])
+ return port;
}
}
- if (!child)
+
+ return PORT_NONE;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv,
+ const struct child_device_config *child,
+ u8 bdb_version)
+{
+ struct ddi_vbt_port_info *info;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+ enum port port;
+
+ port = dvo_port_to_port(child->dvo_port);
+ if (port == PORT_NONE)
+ return;
+
+ info = &dev_priv->vbt.ddi_port_info[port];
+
+ if (info->present) {
+ DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
return;
+ }
+
+ info->present = true;
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1498,19 +1534,20 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
{
- enum port port;
+ const struct child_device_config *child;
+ int i;
if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
- if (!dev_priv->vbt.child_dev_num)
- return;
-
if (bdb_version < 155)
return;
- for (port = PORT_A; port < I915_MAX_PORTS; port++)
- parse_ddi_port(dev_priv, port, bdb_version);
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ child = dev_priv->vbt.child_dev + i;
+
+ parse_ddi_port(dev_priv, child, bdb_version);
+ }
}
static void
@@ -2094,8 +2131,8 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
dvo_port = child->dvo_port;
if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
- (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
+ (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) ||
+ (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 09ed90c0ba00..832cb6b1e9bd 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -23,12 +23,11 @@
*/
#include <linux/kthread.h>
+#include <trace/events/dma_fence.h>
#include <uapi/linux/sched/types.h>
#include "i915_drv.h"
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
-
static void irq_enable(struct intel_engine_cs *engine)
{
if (!engine->irq_enable)
@@ -82,9 +81,39 @@ static inline bool __request_completed(const struct i915_request *rq)
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
-bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
+static bool
+__dma_fence_signal(struct dma_fence *fence)
+{
+ return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+}
+
+static void
+__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
+{
+ fence->timestamp = timestamp;
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+ trace_dma_fence_signaled(fence);
+}
+
+static void
+__dma_fence_signal__notify(struct dma_fence *fence)
+{
+ struct dma_fence_cb *cur, *tmp;
+
+ lockdep_assert_held(fence->lock);
+ lockdep_assert_irqs_disabled();
+
+ list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+ INIT_LIST_HEAD(&cur->node);
+ cur->func(fence, cur);
+ }
+ INIT_LIST_HEAD(&fence->cb_list);
+}
+
+void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
LIST_HEAD(signal);
@@ -106,6 +135,10 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
&rq->fence.flags));
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+ if (!__dma_fence_signal(&rq->fence))
+ continue;
/*
* Queue for execution after dropping the signaling
@@ -113,14 +146,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
* more signalers to the same context or engine.
*/
i915_request_get(rq);
-
- /*
- * We may race with direct invocation of
- * dma_fence_signal(), e.g. i915_request_retire(),
- * so we need to acquire our reference to the request
- * before we cancel the breadcrumb.
- */
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
list_add_tail(&rq->signal_link, &signal);
}
@@ -143,22 +168,21 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
- dma_fence_signal(&rq->fence);
+ __dma_fence_signal__timestamp(&rq->fence, timestamp);
+
+ spin_lock(&rq->lock);
+ __dma_fence_signal__notify(&rq->fence);
+ spin_unlock(&rq->lock);
+
i915_request_put(rq);
}
-
- return !list_empty(&signal);
}
-bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
+void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
- bool result;
-
local_irq_disable();
- result = intel_engine_breadcrumbs_irq(engine);
+ intel_engine_breadcrumbs_irq(engine);
local_irq_enable();
-
- return result;
}
static void signal_irq_work(struct irq_work *work)
@@ -251,19 +275,17 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
+ lockdep_assert_held(&rq->lock);
+ lockdep_assert_irqs_disabled();
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
-
- if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
- return true;
-
- spin_lock(&b->irq_lock);
- if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
- !__request_completed(rq)) {
+ if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
+ struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
struct intel_context *ce = rq->hw_context;
struct list_head *pos;
+ spin_lock(&b->irq_lock);
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+
__intel_breadcrumbs_arm_irq(b);
/*
@@ -292,8 +314,8 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
list_move_tail(&ce->signal_link, &b->signalers);
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ spin_unlock(&b->irq_lock);
}
- spin_unlock(&b->irq_lock);
return !__request_completed(rq);
}
@@ -302,9 +324,15 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
- return;
+ lockdep_assert_held(&rq->lock);
+ lockdep_assert_irqs_disabled();
+ /*
+ * We must wait for b->irq_lock so that we know the interrupt handler
+ * has released its reference to the intel_context and has completed
+ * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
+ * required).
+ */
spin_lock(&b->irq_lock);
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
struct intel_context *ce = rq->hw_context;
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 15ba950dee00..ae40a8679314 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_cdclk.h"
#include "intel_drv.h"
/**
@@ -234,7 +235,8 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
else
return 0;
- tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
+ tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
+ HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@@ -468,7 +470,7 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->vco);
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
mutex_unlock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv))
@@ -516,7 +518,8 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
}
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
@@ -543,11 +546,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
@@ -598,7 +601,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
@@ -624,11 +628,11 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
@@ -697,7 +701,8 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val;
@@ -964,7 +969,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5))
DRM_ERROR("DPLL0 not locked\n");
@@ -978,16 +983,17 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
- LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
- 1))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+ 1))
DRM_ERROR("Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
}
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
@@ -1123,16 +1129,7 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-/**
- * skl_init_cdclk - Initialize CDCLK on SKL
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for SKL and derivatives. This is generally
- * done only during the display core initialization sequence,
- * after which the DMC will take care of turning CDCLK off/on
- * as needed.
- */
-void skl_init_cdclk(struct drm_i915_private *dev_priv)
+static void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
@@ -1158,17 +1155,10 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state);
+ skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * skl_uninit_cdclk - Uninitialize CDCLK on SKL
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for SKL and derivatives. This is done only
- * during the display core uninitialization sequence.
- */
-void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -1176,7 +1166,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state);
+ skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
static int bxt_calc_cdclk(int min_cdclk)
@@ -1323,7 +1313,7 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
1))
DRM_ERROR("timeout waiting for DE PLL unlock\n");
@@ -1344,7 +1334,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE,
BXT_DE_PLL_LOCK,
BXT_DE_PLL_LOCK,
@@ -1355,7 +1345,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
}
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
@@ -1408,11 +1399,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
bxt_de_pll_enable(dev_priv, vco);
val = divider | skl_cdclk_decimal(cdclk);
- /*
- * FIXME if only the cd2x divider needs changing, it could be done
- * without shutting off the pipe (if only one pipe is active).
- */
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ if (pipe == INVALID_PIPE)
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ val |= BXT_CDCLK_CD2X_PIPE(pipe);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
@@ -1421,6 +1411,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
I915_WRITE(CDCLK_CTL, val);
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
+
mutex_lock(&dev_priv->pcu_lock);
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -1490,16 +1483,7 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-/**
- * bxt_init_cdclk - Initialize CDCLK on BXT
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for BXT and derivatives. This is generally
- * done only during the display core initialization sequence,
- * after which the DMC will take care of turning CDCLK off/on
- * as needed.
- */
-void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
@@ -1525,17 +1509,10 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
}
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * bxt_uninit_cdclk - Uninitialize CDCLK on BXT
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for BXT and derivatives. This is done only
- * during the display core uninitialization sequence.
- */
-void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -1543,7 +1520,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
static int cnl_calc_cdclk(int min_cdclk)
@@ -1663,7 +1640,8 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
}
static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
@@ -1704,13 +1682,15 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
cnl_cdclk_pll_enable(dev_priv, vco);
val = divider | skl_cdclk_decimal(cdclk);
- /*
- * FIXME if only the cd2x divider needs changing, it could be done
- * without shutting off the pipe (if only one pipe is active).
- */
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ if (pipe == INVALID_PIPE)
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ val |= BXT_CDCLK_CD2X_PIPE(pipe);
I915_WRITE(CDCLK_CTL, val);
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
+
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
@@ -1847,7 +1827,8 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
}
static void icl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
unsigned int cdclk = cdclk_state->cdclk;
unsigned int vco = cdclk_state->vco;
@@ -1872,6 +1853,11 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
if (dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_enable(dev_priv, vco);
+ /*
+ * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
+ * divider here synchronized to a pipe while CDCLK is on, nor will we
+ * need the corresponding vblank wait.
+ */
I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
skl_cdclk_decimal(cdclk));
@@ -1959,16 +1945,7 @@ out:
icl_calc_voltage_level(cdclk_state->cdclk);
}
-/**
- * icl_init_cdclk - Initialize CDCLK on ICL
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for ICL. This consists mainly of initializing
- * dev_priv->cdclk.hw and sanitizing the state of the hardware if needed. This
- * is generally done only during the display core initialization sequence, after
- * which the DMC will take care of turning CDCLK off/on as needed.
- */
-void icl_init_cdclk(struct drm_i915_private *dev_priv)
+static void icl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state sanitized_state;
u32 val;
@@ -2002,17 +1979,10 @@ sanitize:
sanitized_state.voltage_level =
icl_calc_voltage_level(sanitized_state.cdclk);
- icl_set_cdclk(dev_priv, &sanitized_state);
+ icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
}
-/**
- * icl_uninit_cdclk - Uninitialize CDCLK on ICL
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for ICL. This is done only during the display core
- * uninitialization sequence.
- */
-void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -2020,19 +1990,10 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
- icl_set_cdclk(dev_priv, &cdclk_state);
+ icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * cnl_init_cdclk - Initialize CDCLK on CNL
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for CNL. This is generally
- * done only during the display core initialization sequence,
- * after which the DMC will take care of turning CDCLK off/on
- * as needed.
- */
-void cnl_init_cdclk(struct drm_i915_private *dev_priv)
+static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
@@ -2048,17 +2009,10 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state);
+ cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * cnl_uninit_cdclk - Uninitialize CDCLK on CNL
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for CNL. This is done only
- * during the display core uninitialization sequence.
- */
-void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -2066,7 +2020,47 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state);
+ cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+/**
+ * intel_cdclk_init - Initialize CDCLK
+ * @i915: i915 device
+ *
+ * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
+ * sanitizing the state of the hardware if needed. This is generally done only
+ * during the display core initialization sequence, after which the DMC will
+ * take care of turning CDCLK off/on as needed.
+ */
+void intel_cdclk_init(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_init_cdclk(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_init_cdclk(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_init_cdclk(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_init_cdclk(i915);
+}
+
+/**
+ * intel_cdclk_uninit - Uninitialize CDCLK
+ * @i915: i915 device
+ *
+ * Uninitialize CDCLK. This is done only during the display core
+ * uninitialization sequence.
+ */
+void intel_cdclk_uninit(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_uninit_cdclk(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_uninit_cdclk(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_uninit_cdclk(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_uninit_cdclk(i915);
}
/**
@@ -2086,6 +2080,28 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
}
/**
+ * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
+ * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
+ * @a: first CDCLK state
+ * @b: second CDCLK state
+ *
+ * Returns:
+ * True if the CDCLK states require just a cd2x divider update, false if not.
+ */
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
+{
+ /* Older hw doesn't have the capability */
+ if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
+ return false;
+
+ return a->cdclk != b->cdclk &&
+ a->vco == b->vco &&
+ a->ref == b->ref;
+}
+
+/**
* intel_cdclk_changed - Determine if two CDCLK states are different
* @a: first CDCLK state
* @b: second CDCLK state
@@ -2100,6 +2116,26 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a,
a->voltage_level != b->voltage_level;
}
+/**
+ * intel_cdclk_swap_state - make atomic CDCLK configuration effective
+ * @state: atomic state
+ *
+ * This is the CDCLK version of drm_atomic_helper_swap_state() since the
+ * helper does not handle driver-specific global state.
+ *
+ * Similarly to the atomic helpers this function does a complete swap,
+ * i.e. it also puts the old state into @state. This is used by the commit
+ * code to determine how CDCLK has changed (for instance did it increase or
+ * decrease).
+ */
+void intel_cdclk_swap_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ swap(state->cdclk.logical, dev_priv->cdclk.logical);
+ swap(state->cdclk.actual, dev_priv->cdclk.actual);
+}
+
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context)
{
@@ -2113,12 +2149,14 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
* intel_set_cdclk - Push the CDCLK state to the hardware
* @dev_priv: i915 device
* @cdclk_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
*
* Program the hardware based on the passed in CDCLK state,
* if necessary.
*/
-void intel_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+static void intel_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
return;
@@ -2128,7 +2166,7 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
- dev_priv->display.set_cdclk(dev_priv, cdclk_state);
+ dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
"cdclk state doesn't match!\n")) {
@@ -2137,6 +2175,46 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
}
}
+/**
+ * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @old_state: old CDCLK state
+ * @new_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware before updating the HW plane state based on the passed
+ * in CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe)
+{
+ if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
+ intel_set_cdclk(dev_priv, new_state, pipe);
+}
+
+/**
+ * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @old_state: old CDCLK state
+ * @new_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware after updating the HW plane state based on the passed
+ * in CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe)
+{
+ if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
+ intel_set_cdclk(dev_priv, new_state, pipe);
+}
+
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
@@ -2187,19 +2265,8 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
/*
* According to BSpec, "The CD clock frequency must be at least twice
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
- *
- * FIXME: Check the actual, not default, BCLK being used.
- *
- * FIXME: This does not depend on ->has_audio because the higher CDCLK
- * is required for audio probe, also when there are no audio capable
- * displays connected at probe time. This leads to unnecessarily high
- * CDCLK when audio is not required.
- *
- * FIXME: This limit is only applied when there are displays connected
- * at probe time. If we probe without displays, we'll still end up using
- * the platform minimum CDCLK, failing audio probe.
*/
- if (INTEL_GEN(dev_priv) >= 9)
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
/*
@@ -2239,7 +2306,7 @@ static int intel_compute_min_cdclk(struct drm_atomic_state *state)
intel_state->min_cdclk[i] = min_cdclk;
}
- min_cdclk = 0;
+ min_cdclk = intel_state->cdclk.force_min_cdclk;
for_each_pipe(dev_priv, pipe)
min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
@@ -2300,7 +2367,8 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
vlv_calc_voltage_level(dev_priv, cdclk);
if (!intel_state->active_crtcs) {
- cdclk = vlv_calc_cdclk(dev_priv, 0);
+ cdclk = vlv_calc_cdclk(dev_priv,
+ intel_state->cdclk.force_min_cdclk);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
@@ -2333,7 +2401,7 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
bdw_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
- cdclk = bdw_calc_cdclk(0);
+ cdclk = bdw_calc_cdclk(intel_state->cdclk.force_min_cdclk);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
@@ -2405,7 +2473,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
skl_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
- cdclk = skl_calc_cdclk(0, vco);
+ cdclk = skl_calc_cdclk(intel_state->cdclk.force_min_cdclk, vco);
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
@@ -2444,10 +2512,10 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
if (!intel_state->active_crtcs) {
if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(0);
+ cdclk = glk_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = glk_de_pll_vco(dev_priv, cdclk);
} else {
- cdclk = bxt_calc_cdclk(0);
+ cdclk = bxt_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
@@ -2483,7 +2551,7 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
- cdclk = cnl_calc_cdclk(0);
+ cdclk = cnl_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.actual.vco = vco;
@@ -2519,7 +2587,7 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
- cdclk = icl_calc_cdclk(0, ref);
+ cdclk = icl_calc_cdclk(intel_state->cdclk.force_min_cdclk, ref);
vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.actual.vco = vco;
@@ -2560,7 +2628,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 648000;
else
@@ -2668,7 +2736,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
@@ -2723,7 +2791,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);
@@ -2744,18 +2812,13 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.set_cdclk = chv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.set_cdclk = vlv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
- } else if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.set_cdclk = bdw_set_cdclk;
+ if (INTEL_GEN(dev_priv) >= 11) {
+ dev_priv->display.set_cdclk = icl_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ dev_priv->display.set_cdclk = cnl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
- bdw_modeset_calc_cdclk;
+ cnl_modeset_calc_cdclk;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
@@ -2764,23 +2827,28 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
skl_modeset_calc_cdclk;
- } else if (IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.set_cdclk = cnl_set_cdclk;
+ } else if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.set_cdclk = bdw_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
- cnl_modeset_calc_cdclk;
- } else if (IS_ICELAKE(dev_priv)) {
- dev_priv->display.set_cdclk = icl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ bdw_modeset_calc_cdclk;
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.set_cdclk = chv_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ vlv_modeset_calc_cdclk;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.set_cdclk = vlv_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ vlv_modeset_calc_cdclk;
}
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.get_cdclk = icl_get_cdclk;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.get_cdclk = cnl_get_cdclk;
- else if (IS_GEN9_BC(dev_priv))
- dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
+ else if (IS_GEN9_BC(dev_priv))
+ dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.get_cdclk = bdw_get_cdclk;
else if (IS_HASWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_cdclk.h b/drivers/gpu/drm/i915/intel_cdclk.h
new file mode 100644
index 000000000000..4d6f7f5f8930
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cdclk.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CDCLK_H__
+#define __INTEL_CDCLK_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_cdclk_state;
+struct intel_crtc_state;
+
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
+void intel_cdclk_init(struct drm_i915_private *i915);
+void intel_cdclk_uninit(struct drm_i915_private *i915);
+void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
+void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+void intel_cdclk_swap_state(struct intel_atomic_state *state);
+void
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe);
+void
+intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe);
+void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+ const char *context);
+
+#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 71a1f12c6b2a..9093daabc290 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -22,6 +22,7 @@
*
*/
+#include "intel_color.h"
#include "intel_drv.h"
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -40,23 +41,6 @@
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
#define LEGACY_LUT_LENGTH 256
-
-/* Post offset values for RGB->YCBCR conversion */
-#define POSTOFF_RGB_TO_YUV_HI 0x800
-#define POSTOFF_RGB_TO_YUV_ME 0x100
-#define POSTOFF_RGB_TO_YUV_LO 0x800
-
-/*
- * These values are direct register values specified in the Bspec,
- * for RGB->YUV conversion matrix (colorspace BT709)
- */
-#define CSC_RGB_TO_YUV_RU_GU 0x2ba809d8
-#define CSC_RGB_TO_YUV_BU 0x37e80000
-#define CSC_RGB_TO_YUV_RY_GY 0x1e089cc0
-#define CSC_RGB_TO_YUV_BY 0xb5280000
-#define CSC_RGB_TO_YUV_RV_GV 0xbce89ad8
-#define CSC_RGB_TO_YUV_BV 0x1e080000
-
/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
@@ -69,10 +53,45 @@
#define ILK_CSC_COEFF_FP(coeff, fbits) \
(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
-#define ILK_CSC_COEFF_LIMITED_RANGE \
- ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
-#define ILK_CSC_COEFF_1_0 \
- ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0
+#define ILK_CSC_COEFF_1_0 0x7800
+
+#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+
+static const u16 ilk_csc_off_zero[3] = {};
+
+static const u16 ilk_csc_coeff_identity[9] = {
+ ILK_CSC_COEFF_1_0, 0, 0,
+ 0, ILK_CSC_COEFF_1_0, 0,
+ 0, 0, ILK_CSC_COEFF_1_0,
+};
+
+static const u16 ilk_csc_postoff_limited_range[3] = {
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+};
+
+static const u16 ilk_csc_coeff_limited_range[9] = {
+ ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
+ 0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
+ 0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
+};
+
+/*
+ * These values are direct register values specified in the Bspec,
+ * for RGB->YUV conversion matrix (colorspace BT709)
+ */
+static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
+ 0x1e08, 0x9cc0, 0xb528,
+ 0x2ba8, 0x09d8, 0x37e8,
+ 0xbce8, 0x9ad8, 0x1e08,
+};
+
+/* Post offset values for RGB->YCBCR conversion */
+static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
+ 0x0800, 0x0100, 0x0800,
+};
static bool lut_is_legacy(const struct drm_property_blob *lut)
{
@@ -113,145 +132,188 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
return result;
}
-static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
+static void ilk_update_pipe_csc(struct intel_crtc *crtc,
+ const u16 preoff[3],
+ const u16 coeff[9],
+ const u16 postoff[3])
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), CSC_RGB_TO_YUV_RU_GU);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), CSC_RGB_TO_YUV_BU);
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), CSC_RGB_TO_YUV_RY_GY);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), CSC_RGB_TO_YUV_BY);
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), CSC_RGB_TO_YUV_RV_GV);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), CSC_RGB_TO_YUV_BV);
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), POSTOFF_RGB_TO_YUV_HI);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), POSTOFF_RGB_TO_YUV_ME);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), POSTOFF_RGB_TO_YUV_LO);
- I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ if (INTEL_GEN(dev_priv) >= 7) {
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
+ }
}
-static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void icl_update_output_csc(struct intel_crtc *crtc,
+ const u16 preoff[3],
+ const u16 coeff[9],
+ const u16 postoff[3])
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool limited_color_range = false;
enum pipe pipe = crtc->pipe;
- u16 coeffs[9] = {};
- int i;
+
+ I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+ I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+ I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+ I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+ I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+}
+
+static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
/*
* FIXME if there's a gamma LUT after the CSC, we should
* do the range compression using the gamma LUT instead.
*/
- if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
- limited_color_range = crtc_state->limited_color_range;
+ return crtc_state->limited_color_range &&
+ (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+ IS_GEN_RANGE(dev_priv, 9, 10));
+}
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
- ilk_load_ycbcr_conversion_matrix(crtc);
- return;
- } else if (crtc_state->base.ctm) {
- struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
- const u64 *input;
- u64 temp[9];
+static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
+ u16 coeffs[9])
+{
+ const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ const u64 *input;
+ u64 temp[9];
+ int i;
- if (limited_color_range)
- input = ctm_mult_by_limited(temp, ctm->matrix);
- else
- input = ctm->matrix;
+ if (ilk_csc_limited_range(crtc_state))
+ input = ctm_mult_by_limited(temp, ctm->matrix);
+ else
+ input = ctm->matrix;
+
+ /*
+ * Convert fixed point S31.32 input to format supported by the
+ * hardware.
+ */
+ for (i = 0; i < 9; i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
/*
- * Convert fixed point S31.32 input to format supported by the
+ * Clamp input value to min/max supported by
* hardware.
*/
- for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
-
- /*
- * Clamp input value to min/max supported by
- * hardware.
- */
- abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
-
- /* sign bit */
- if (CTM_COEFF_NEGATIVE(input[i]))
- coeffs[i] |= 1 << 15;
-
- if (abs_coeff < CTM_COEFF_0_125)
- coeffs[i] |= (3 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 12);
- else if (abs_coeff < CTM_COEFF_0_25)
- coeffs[i] |= (2 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 11);
- else if (abs_coeff < CTM_COEFF_0_5)
- coeffs[i] |= (1 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 10);
- else if (abs_coeff < CTM_COEFF_1_0)
- coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
- else if (abs_coeff < CTM_COEFF_2_0)
- coeffs[i] |= (7 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 8);
- else
- coeffs[i] |= (6 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 7);
- }
- } else {
- /*
- * Load an identity matrix if no coefficients are provided.
- *
- * TODO: Check what kind of values actually come out of the
- * pipe with these coeff/postoff values and adjust to get the
- * best accuracy. Perhaps we even need to take the bpc value
- * into consideration.
- */
- for (i = 0; i < 3; i++) {
- if (limited_color_range)
- coeffs[i * 3 + i] =
- ILK_CSC_COEFF_LIMITED_RANGE;
- else
- coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0;
- }
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+ coeffs[i] = 0;
+
+ /* sign bit */
+ if (CTM_COEFF_NEGATIVE(input[i]))
+ coeffs[i] |= 1 << 15;
+
+ if (abs_coeff < CTM_COEFF_0_125)
+ coeffs[i] |= (3 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 12);
+ else if (abs_coeff < CTM_COEFF_0_25)
+ coeffs[i] |= (2 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 11);
+ else if (abs_coeff < CTM_COEFF_0_5)
+ coeffs[i] |= (1 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 10);
+ else if (abs_coeff < CTM_COEFF_1_0)
+ coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+ else if (abs_coeff < CTM_COEFF_2_0)
+ coeffs[i] |= (7 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 8);
+ else
+ coeffs[i] |= (6 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 7);
}
+}
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16);
-
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16);
-
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16);
+static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+ if (crtc_state->base.ctm) {
+ u16 coeff[9];
+
+ ilk_csc_convert_ctm(crtc_state, coeff);
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff,
+ limited_color_range ?
+ ilk_csc_postoff_limited_range :
+ ilk_csc_off_zero);
+ } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_rgb_to_ycbcr,
+ ilk_csc_postoff_rgb_to_ycbcr);
+ } else if (limited_color_range) {
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_limited_range,
+ ilk_csc_postoff_limited_range);
+ } else if (crtc_state->csc_enable) {
+ /*
+ * On GLK+ both pipe CSC and degamma LUT are controlled
+ * by csc_enable. Hence for the cases where the degama
+ * LUT is needed but CSC is not we need to load an
+ * identity matrix.
+ */
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
- if (INTEL_GEN(dev_priv) > 6) {
- u16 postoff = 0;
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_identity,
+ ilk_csc_off_zero);
+ }
- if (limited_color_range)
- postoff = (16 * (1 << 12) / 255) & 0x1fff;
+ I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+}
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(PIPE_CSC_MODE(pipe), 0);
- } else {
- u32 mode = CSC_MODE_YUV_TO_RGB;
+ if (crtc_state->base.ctm) {
+ u16 coeff[9];
- if (limited_color_range)
- mode |= CSC_BLACK_SCREEN_OFFSET;
+ ilk_csc_convert_ctm(crtc_state, coeff);
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ coeff, ilk_csc_off_zero);
+ }
- I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+ icl_update_output_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_rgb_to_ycbcr,
+ ilk_csc_postoff_rgb_to_ycbcr);
+ } else if (crtc_state->limited_color_range) {
+ icl_update_output_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_limited_range,
+ ilk_csc_postoff_limited_range);
}
+
+ I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
}
/*
@@ -262,7 +324,6 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 mode;
if (crtc_state->base.ctm) {
const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
@@ -296,12 +357,30 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
}
- mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
- if (!crtc_state_is_legacy_gamma(crtc_state)) {
- mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
- (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
- }
- I915_WRITE(CGM_PIPE_MODE(pipe), mode);
+ I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+}
+
+/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
+static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
+{
+ return (color->red & 0xff) << 16 |
+ (color->green & 0xff) << 8 |
+ (color->blue & 0xff);
+}
+
+/* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */
+static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
+{
+ return (color->red >> 8) << 16 |
+ (color->green >> 8) << 8 |
+ (color->blue >> 8);
+}
+
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10) << 20 |
+ drm_color_lut_extract(color->green, 10) << 10 |
+ drm_color_lut_extract(color->blue, 10);
}
/* Loads the legacy palette/gamma unit for the CRTC. */
@@ -334,15 +413,6 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
}
- } else {
- for (i = 0; i < 256; i++) {
- u32 word = (i << 16) | (i << 8) | i;
-
- if (HAS_GMCH(dev_priv))
- I915_WRITE(PALETTE(pipe, i), word);
- else
- I915_WRITE(LGC_PALETTE(pipe, i), word);
- }
}
}
@@ -351,6 +421,34 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
}
+static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 val;
+
+ val = I915_READ(PIPECONF(pipe));
+ val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ I915_WRITE(PIPECONF(pipe), val);
+}
+
+static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 val;
+
+ val = I915_READ(PIPECONF(pipe));
+ val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ I915_WRITE(PIPECONF(pipe), val);
+
+ ilk_load_csc_matrix(crtc_state);
+}
+
static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -361,106 +459,219 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
ilk_load_csc_matrix(crtc_state);
}
-static void bdw_load_degamma_lut(const struct intel_crtc_state *crtc_state)
+static void skl_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
- u32 i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
enum pipe pipe = crtc->pipe;
+ u32 val = 0;
- I915_WRITE(PREC_PAL_INDEX(pipe),
- PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
+ /*
+ * We don't (yet) allow userspace to control the pipe background color,
+ * so force it to black, but apply pipe gamma and CSC appropriately
+ * so that its handling will match how we program our planes.
+ */
+ if (crtc_state->gamma_enable)
+ val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
+ if (crtc_state->csc_enable)
+ val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
+ I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
- if (degamma_lut) {
- const struct drm_color_lut *lut = degamma_lut->data;
+ I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
- for (i = 0; i < lut_size; i++) {
- u32 word =
- drm_color_lut_extract(lut[i].red, 10) << 20 |
- drm_color_lut_extract(lut[i].green, 10) << 10 |
- drm_color_lut_extract(lut[i].blue, 10);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_load_csc_matrix(crtc_state);
+ else
+ ilk_load_csc_matrix(crtc_state);
+}
- I915_WRITE(PREC_PAL_DATA(pipe), word);
- }
- } else {
- for (i = 0; i < lut_size; i++) {
- u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+static void i965_load_lut_10p6(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
- I915_WRITE(PREC_PAL_DATA(pipe),
- (v << 20) | (v << 10) | v);
- }
+ for (i = 0; i < lut_size - 1; i++) {
+ I915_WRITE(PALETTE(pipe, 2 * i + 0),
+ i965_lut_10p6_ldw(&lut[i]));
+ I915_WRITE(PALETTE(pipe, 2 * i + 1),
+ i965_lut_10p6_udw(&lut[i]));
}
+
+ I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
+ I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
+ I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
}
-static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 offset)
+static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- u32 i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ i9xx_load_luts(crtc_state);
+ else
+ i965_load_lut_10p6(crtc, gamma_lut);
+}
+
+static void ilk_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
+ for (i = 0; i < lut_size; i++)
+ I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
+}
- I915_WRITE(PREC_PAL_INDEX(pipe),
- (offset ? PAL_PREC_SPLIT_MODE : 0) |
- PAL_PREC_AUTO_INCREMENT |
- offset);
+static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- if (gamma_lut) {
- const struct drm_color_lut *lut = gamma_lut->data;
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ i9xx_load_luts(crtc_state);
+ else
+ ilk_load_lut_10(crtc, gamma_lut);
+}
- for (i = 0; i < lut_size; i++) {
- u32 word =
- (drm_color_lut_extract(lut[i].red, 10) << 20) |
- (drm_color_lut_extract(lut[i].green, 10) << 10) |
- drm_color_lut_extract(lut[i].blue, 10);
+static int ivb_lut_10_size(u32 prec_index)
+{
+ if (prec_index & PAL_PREC_SPLIT_MODE)
+ return 512;
+ else
+ return 1024;
+}
- I915_WRITE(PREC_PAL_DATA(pipe), word);
- }
+/*
+ * IVB/HSW Bspec / PAL_PREC_INDEX:
+ * "Restriction : Index auto increment mode is not
+ * supported and must not be enabled."
+ */
+static void ivb_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
- /* Program the max register to clamp values > 1.0. */
- i = lut_size - 1;
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
- drm_color_lut_extract(lut[i].red, 16));
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
- drm_color_lut_extract(lut[i].green, 16));
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2),
- drm_color_lut_extract(lut[i].blue, 16));
- } else {
- for (i = 0; i < lut_size; i++) {
- u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+ for (i = 0; i < hw_lut_size; i++) {
+ /* We discard half the user entries in split gamma mode */
+ const struct drm_color_lut *entry =
+ &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- I915_WRITE(PREC_PAL_DATA(pipe),
- (v << 20) | (v << 10) | v);
- }
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+ }
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+}
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
+/* On BDW+ the index auto increment mode actually works */
+static void bdw_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < hw_lut_size; i++) {
+ /* We discard half the user entries in split gamma mode */
+ const struct drm_color_lut *entry =
+ &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
+
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
}
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
-/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
-static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
+static void ivb_load_lut_10_max(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (crtc_state_is_legacy_gamma(crtc_state)) {
+ /* Program the max register to clamp values > 1.0. */
+ I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+
+ /*
+ * Program the gc max 2 register to clamp values > 1.0.
+ * ToDo: Extend the ABI to be able to program values
+ * from 3.0 to 7.0
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+ }
+}
+
+static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
+ } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
+ ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
} else {
- bdw_load_degamma_lut(crtc_state);
- bdw_load_gamma_lut(crtc_state,
- INTEL_INFO(dev_priv)->color.degamma_lut_size);
+ const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
- /*
- * Reset the index, otherwise it prevents the legacy palette to be
- * written properly.
- */
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ ivb_load_lut_10(crtc, blob,
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ }
+}
+
+static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
+ i9xx_load_luts(crtc_state);
+ } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
+ bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
+ } else {
+ const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
+
+ bdw_load_lut_10(crtc, blob,
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
}
}
@@ -469,7 +680,8 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = 33;
+ const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
u32 i;
/*
@@ -480,39 +692,95 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+ for (i = 0; i < lut_size; i++) {
+ /*
+ * First 33 entries represent range from 0 to 1.0
+ * 34th and 35th entry will represent extended range
+ * inputs 3.0 and 7.0 respectively, currently clamped
+ * at 1.0. Since the precision is 16bit, the user
+ * value can be directly filled to register.
+ * The pipe degamma table in GLK+ onwards doesn't
+ * support different values per channel, so this just
+ * programs green value which will be equal to Red and
+ * Blue into the lut registers.
+ * ToDo: Extend to max 7.0. Enable 32 bit input value
+ * as compared to just 16 to achieve this.
+ */
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
+ }
+
+ /* Clamp values > 1.0. */
+ while (i++ < 35)
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+}
+
+static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ u32 i;
+
/*
- * FIXME: The pipe degamma table in geminilake doesn't support
- * different values per channel, so this just loads a linear table.
+ * When setting the auto-increment bit, the hardware seems to
+ * ignore the index bits, so we need to reset it to index 0
+ * separately.
*/
+ I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
+ I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+
for (i = 0; i < lut_size; i++) {
- u32 v = (i * (1 << 16)) / (lut_size - 1);
+ u32 v = (i << 16) / (lut_size - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- glk_load_degamma_lut(crtc_state);
+ /*
+ * On GLK+ both pipe CSC and degamma LUT are controlled
+ * by csc_enable. Hence for the cases where the CSC is
+ * needed but degamma LUT is not we need to load a
+ * linear degamma LUT. In fact we'll just always load
+ * the degama LUT so that we don't have to reload
+ * it every time the pipe CSC is being enabled.
+ */
+ if (crtc_state->base.degamma_lut)
+ glk_load_degamma_lut(crtc_state);
+ else
+ glk_load_degamma_lut_linear(crtc_state);
- if (crtc_state_is_legacy_gamma(crtc_state)) {
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
} else {
- bdw_load_gamma_lut(crtc_state, 0);
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ }
+}
- /*
- * Reset the index, otherwise it prevents the legacy palette to be
- * written properly.
- */
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+static void icl_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ if (crtc_state->base.degamma_lut)
+ glk_load_degamma_lut(crtc_state);
+
+ if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) ==
+ GAMMA_MODE_MODE_8BIT) {
+ i9xx_load_luts(crtc_state);
+ } else {
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
}
}
@@ -527,7 +795,7 @@ static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
cherryview_load_csc_matrix(crtc_state);
if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts_internal(crtc_state, gamma_lut);
+ i9xx_load_luts(crtc_state);
return;
}
@@ -566,12 +834,6 @@ static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
}
}
-
- /*
- * Also program a linear LUT in the legacy block (behind the
- * CGM block).
- */
- i9xx_load_luts_internal(crtc_state, NULL);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -585,8 +847,64 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- if (dev_priv->display.color_commit)
- dev_priv->display.color_commit(crtc_state);
+ dev_priv->display.color_commit(crtc_state);
+}
+
+int intel_color_check(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ return dev_priv->display.color_check(crtc_state);
+}
+
+static bool need_plane_update(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ /*
+ * On pre-SKL the pipe gamma enable and pipe csc enable for
+ * the pipe bottom color are configured via the primary plane.
+ * We have to reconfigure that even if the plane is inactive.
+ */
+ return crtc_state->active_planes & BIT(plane->id) ||
+ (INTEL_GEN(dev_priv) < 9 &&
+ plane->id == PLANE_PRIMARY);
+}
+
+static int
+intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ if (!new_crtc_state->base.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
+ return 0;
+
+ if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
+ new_crtc_state->csc_enable == old_crtc_state->csc_enable)
+ return 0;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if (!need_plane_update(plane, new_crtc_state))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane->id);
+ }
+
+ return 0;
}
static int check_lut_size(const struct drm_property_blob *lut, int expected)
@@ -606,7 +924,7 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
return 0;
}
-int intel_color_check(struct intel_crtc_state *crtc_state)
+static int check_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
@@ -614,17 +932,19 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
int gamma_length, degamma_length;
u32 gamma_tests, degamma_tests;
+ /* Always allow legacy gamma LUT with no further checking. */
+ if (crtc_state_is_legacy_gamma(crtc_state))
+ return 0;
+
+ /* C8 relies on its palette being stored in the legacy LUT */
+ if (crtc_state->c8_planes)
+ return -EINVAL;
+
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
- /* Always allow legacy gamma LUT with no further checking. */
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
- return 0;
- }
-
if (check_lut_size(degamma_lut, degamma_length) ||
check_lut_size(gamma_lut, gamma_length))
return -EINVAL;
@@ -633,12 +953,270 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
drm_color_lut_check(gamma_lut, gamma_tests))
return -EINVAL;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
- else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+ return 0;
+}
+
+static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
else
- crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+ return GAMMA_MODE_MODE_10BIT; /* i965+ only */
+}
+
+static int i9xx_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 cgm_mode = 0;
+
+ if (crtc_state_is_legacy_gamma(crtc_state))
+ return 0;
+
+ if (crtc_state->base.degamma_lut)
+ cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
+ if (crtc_state->base.ctm)
+ cgm_mode |= CGM_PIPE_MODE_CSC;
+ if (crtc_state->base.gamma_lut)
+ cgm_mode |= CGM_PIPE_MODE_GAMMA;
+
+ return cgm_mode;
+}
+
+/*
+ * CHV color pipeline:
+ * u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma ->
+ * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10
+ *
+ * We always bypass the WGC csc and use the CGM csc
+ * instead since it has degamma and better precision.
+ */
+static int chv_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Pipe gamma will be used only for the legacy LUT.
+ * Otherwise we bypass it and use the CGM gamma instead.
+ */
+ crtc_state->gamma_enable =
+ crtc_state_is_legacy_gamma(crtc_state) &&
+ !crtc_state->c8_planes;
+
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+
+ crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static int ilk_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ /*
+ * We don't expose the ctm on ilk/snb currently,
+ * nor do we enable YCbCr output. Also RGB limited
+ * range output is handled by the hw automagically.
+ */
+ crtc_state->csc_enable = false;
+
+ crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = 0;
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else if (crtc_state->base.gamma_lut &&
+ crtc_state->base.degamma_lut)
+ return GAMMA_MODE_MODE_SPLIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
+
+ /*
+ * CSC comes after the LUT in degamma, RGB->YCbCr,
+ * and RGB full->limited range mode.
+ */
+ if (crtc_state->base.degamma_lut ||
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ limited_color_range)
+ return 0;
+
+ return CSC_POSITION_BEFORE_GAMMA;
+}
+
+static int ivb_color_check(struct intel_crtc_state *crtc_state)
+{
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ (crtc_state->base.gamma_lut ||
+ crtc_state->base.degamma_lut) &&
+ !crtc_state->c8_planes;
+
+ crtc_state->csc_enable =
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->base.ctm || limited_color_range;
+
+ crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = ivb_csc_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static int glk_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ /* On GLK+ degamma LUT is controlled by csc_enable */
+ crtc_state->csc_enable =
+ crtc_state->base.degamma_lut ||
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->base.ctm || crtc_state->limited_color_range;
+
+ crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = 0;
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 gamma_mode = 0;
+
+ if (crtc_state->base.degamma_lut)
+ gamma_mode |= PRE_CSC_GAMMA_ENABLE;
+
+ if (crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes)
+ gamma_mode |= POST_CSC_GAMMA_ENABLE;
+
+ if (!crtc_state->base.gamma_lut ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ gamma_mode |= GAMMA_MODE_MODE_8BIT;
+ else
+ gamma_mode |= GAMMA_MODE_MODE_10BIT;
+
+ return gamma_mode;
+}
+
+static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 csc_mode = 0;
+
+ if (crtc_state->base.ctm)
+ csc_mode |= ICL_CSC_ENABLE;
+
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->limited_color_range)
+ csc_mode |= ICL_OUTPUT_CSC_ENABLE;
+
+ return csc_mode;
+}
+
+static int icl_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_mode = icl_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = icl_csc_mode(crtc_state);
return 0;
}
@@ -646,30 +1224,55 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0;
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.load_luts = cherryview_load_luts;
- } else if (IS_HASWELL(dev_priv)) {
- dev_priv->display.load_luts = i9xx_load_luts;
- dev_priv->display.color_commit = hsw_color_commit;
- } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
- IS_BROXTON(dev_priv)) {
- dev_priv->display.load_luts = broadwell_load_luts;
- dev_priv->display.color_commit = hsw_color_commit;
- } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.load_luts = glk_load_luts;
- dev_priv->display.color_commit = hsw_color_commit;
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.color_check = chv_color_check;
+ dev_priv->display.color_commit = i9xx_color_commit;
+ dev_priv->display.load_luts = cherryview_load_luts;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ dev_priv->display.color_check = i9xx_color_check;
+ dev_priv->display.color_commit = i9xx_color_commit;
+ dev_priv->display.load_luts = i965_load_luts;
+ } else {
+ dev_priv->display.color_check = i9xx_color_check;
+ dev_priv->display.color_commit = i9xx_color_commit;
+ dev_priv->display.load_luts = i9xx_load_luts;
+ }
} else {
- dev_priv->display.load_luts = i9xx_load_luts;
+ if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.color_check = icl_color_check;
+ else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ dev_priv->display.color_check = glk_color_check;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ dev_priv->display.color_check = ivb_color_check;
+ else
+ dev_priv->display.color_check = ilk_color_check;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ dev_priv->display.color_commit = skl_color_commit;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ dev_priv->display.color_commit = hsw_color_commit;
+ else
+ dev_priv->display.color_commit = ilk_color_commit;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.load_luts = icl_load_luts;
+ else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ dev_priv->display.load_luts = glk_load_luts;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ dev_priv->display.load_luts = bdw_load_luts;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ dev_priv->display.load_luts = ivb_load_luts;
+ else
+ dev_priv->display.load_luts = ilk_load_luts;
}
- /* Enable color management support when we have degamma & gamma LUTs. */
- if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
- INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
- drm_crtc_enable_color_mgmt(&crtc->base,
- INTEL_INFO(dev_priv)->color.degamma_lut_size,
- true,
- INTEL_INFO(dev_priv)->color.gamma_lut_size);
+ drm_crtc_enable_color_mgmt(&crtc->base,
+ INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ has_ctm,
+ INTEL_INFO(dev_priv)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_color.h b/drivers/gpu/drm/i915/intel_color.h
new file mode 100644
index 000000000000..b8a3ce609587
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_color.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOR_H__
+#define __INTEL_COLOR_H__
+
+struct intel_crtc_state;
+struct intel_crtc;
+
+void intel_color_init(struct intel_crtc *crtc);
+int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_commit(const struct intel_crtc_state *crtc_state);
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
index 3d0271cebf99..2bf4359d7e41 100644
--- a/drivers/gpu/drm/i915/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -239,7 +239,8 @@ void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
for_each_combo_port_reverse(dev_priv, port) {
u32 val;
- if (!icl_combo_phy_verify_state(dev_priv, port))
+ if (port == PORT_A &&
+ !icl_combo_phy_verify_state(dev_priv, port))
DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
port_name(port));
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
index ee16758747c5..073b6c3ab7cc 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -23,12 +23,17 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_panel.h"
int intel_connector_init(struct intel_connector *connector)
{
@@ -88,6 +93,8 @@ void intel_connector_destroy(struct drm_connector *connector)
kfree(intel_connector->detect_edid);
+ intel_hdcp_cleanup(intel_connector);
+
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
@@ -265,3 +272,11 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
connector->dev->mode_config.aspect_ratio_property,
DRM_MODE_PICTURE_ASPECT_NONE);
}
+
+void
+intel_attach_colorspace_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_connector.h b/drivers/gpu/drm/i915/intel_connector.h
new file mode 100644
index 000000000000..93a7375c8196
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_connector.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONNECTOR_H__
+#define __INTEL_CONNECTOR_H__
+
+#include "intel_display.h"
+
+struct drm_connector;
+struct edid;
+struct i2c_adapter;
+struct intel_connector;
+struct intel_encoder;
+
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+void intel_attach_colorspace_property(struct drm_connector *connector);
+
+#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
new file mode 100644
index 000000000000..924cc556223a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context.c
@@ -0,0 +1,270 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_context.h"
+#include "i915_globals.h"
+#include "intel_context.h"
+#include "intel_ringbuffer.h"
+
+static struct i915_global_context {
+ struct i915_global base;
+ struct kmem_cache *slab_ce;
+} global;
+
+struct intel_context *intel_context_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
+}
+
+void intel_context_free(struct intel_context *ce)
+{
+ kmem_cache_free(global.slab_ce, ce);
+}
+
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = NULL;
+ struct rb_node *p;
+
+ spin_lock(&ctx->hw_contexts_lock);
+ p = ctx->hw_contexts.rb_node;
+ while (p) {
+ struct intel_context *this =
+ rb_entry(p, struct intel_context, node);
+
+ if (this->engine == engine) {
+ GEM_BUG_ON(this->gem_context != ctx);
+ ce = this;
+ break;
+ }
+
+ if (this->engine < engine)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
+ spin_unlock(&ctx->hw_contexts_lock);
+
+ return ce;
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
+{
+ struct rb_node **p, *parent;
+ int err = 0;
+
+ spin_lock(&ctx->hw_contexts_lock);
+
+ parent = NULL;
+ p = &ctx->hw_contexts.rb_node;
+ while (*p) {
+ struct intel_context *this;
+
+ parent = *p;
+ this = rb_entry(parent, struct intel_context, node);
+
+ if (this->engine == engine) {
+ err = -EEXIST;
+ ce = this;
+ break;
+ }
+
+ if (this->engine < engine)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ if (!err) {
+ rb_link_node(&ce->node, parent, p);
+ rb_insert_color(&ce->node, &ctx->hw_contexts);
+ }
+
+ spin_unlock(&ctx->hw_contexts_lock);
+
+ return ce;
+}
+
+void __intel_context_remove(struct intel_context *ce)
+{
+ struct i915_gem_context *ctx = ce->gem_context;
+
+ spin_lock(&ctx->hw_contexts_lock);
+ rb_erase(&ce->node, &ctx->hw_contexts);
+ spin_unlock(&ctx->hw_contexts_lock);
+}
+
+static struct intel_context *
+intel_context_instance(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce, *pos;
+
+ ce = intel_context_lookup(ctx, engine);
+ if (likely(ce))
+ return ce;
+
+ ce = intel_context_alloc();
+ if (!ce)
+ return ERR_PTR(-ENOMEM);
+
+ intel_context_init(ce, ctx, engine);
+
+ pos = __intel_context_insert(ctx, engine, ce);
+ if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
+ intel_context_free(ce);
+
+ GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
+ return pos;
+}
+
+struct intel_context *
+intel_context_pin_lock(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+ __acquires(ce->pin_mutex)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return ERR_PTR(-EINTR);
+
+ return ce;
+}
+
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ return ce;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return ERR_PTR(-EINTR);
+
+ if (likely(!atomic_read(&ce->pin_count))) {
+ err = ce->ops->pin(ce);
+ if (err)
+ goto err;
+
+ i915_gem_context_get(ctx);
+ GEM_BUG_ON(ce->gem_context != ctx);
+
+ mutex_lock(&ctx->mutex);
+ list_add(&ce->active_link, &ctx->active_engines);
+ mutex_unlock(&ctx->mutex);
+
+ intel_context_get(ce);
+ smp_mb__before_atomic(); /* flush pin before it is visible */
+ }
+
+ atomic_inc(&ce->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+
+ mutex_unlock(&ce->pin_mutex);
+ return ce;
+
+err:
+ mutex_unlock(&ce->pin_mutex);
+ return ERR_PTR(err);
+}
+
+void intel_context_unpin(struct intel_context *ce)
+{
+ if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ return;
+
+ /* We may be called from inside intel_context_pin() to evict another */
+ intel_context_get(ce);
+ mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
+
+ if (likely(atomic_dec_and_test(&ce->pin_count))) {
+ ce->ops->unpin(ce);
+
+ mutex_lock(&ce->gem_context->mutex);
+ list_del(&ce->active_link);
+ mutex_unlock(&ce->gem_context->mutex);
+
+ i915_gem_context_put(ce->gem_context);
+ intel_context_put(ce);
+ }
+
+ mutex_unlock(&ce->pin_mutex);
+ intel_context_put(ce);
+}
+
+static void intel_context_retire(struct i915_active_request *active,
+ struct i915_request *rq)
+{
+ struct intel_context *ce =
+ container_of(active, typeof(*ce), active_tracker);
+
+ intel_context_unpin(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ kref_init(&ce->ref);
+
+ ce->gem_context = ctx;
+ ce->engine = engine;
+ ce->ops = engine->cops;
+ ce->saturated = 0;
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ mutex_init(&ce->pin_mutex);
+
+ /* Use the whole device by default */
+ ce->sseu = intel_device_default_sseu(ctx->i915);
+
+ i915_active_request_init(&ce->active_tracker,
+ NULL, intel_context_retire);
+}
+
+static void i915_global_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_ce);
+}
+
+static void i915_global_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_ce);
+}
+
+static struct i915_global_context global = { {
+ .shrink = i915_global_context_shrink,
+ .exit = i915_global_context_exit,
+} };
+
+int __init i915_global_context_init(void)
+{
+ global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_ce)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
new file mode 100644
index 000000000000..ebc861b1a49e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context.h
@@ -0,0 +1,87 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_H__
+#define __INTEL_CONTEXT_H__
+
+#include <linux/lockdep.h>
+
+#include "intel_context_types.h"
+#include "intel_engine_types.h"
+
+struct intel_context *intel_context_alloc(void);
+void intel_context_free(struct intel_context *ce);
+
+void intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+/**
+ * intel_context_lookup - Find the matching HW context for this (ctx, engine)
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * May return NULL if the HW context hasn't been instantiated (i.e. unused).
+ */
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+/**
+ * intel_context_pin_lock - Stablises the 'pinned' status of the HW context
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * Acquire a lock on the pinned status of the HW context, such that the context
+ * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
+ * intel_context_is_pinned() remains stable.
+ */
+struct intel_context *
+intel_context_pin_lock(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+static inline bool
+intel_context_is_pinned(struct intel_context *ce)
+{
+ return atomic_read(&ce->pin_count);
+}
+
+static inline void intel_context_pin_unlock(struct intel_context *ce)
+__releases(ce->pin_mutex)
+{
+ mutex_unlock(&ce->pin_mutex);
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context *ce);
+void
+__intel_context_remove(struct intel_context *ce);
+
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+
+static inline void __intel_context_pin(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ atomic_inc(&ce->pin_count);
+}
+
+void intel_context_unpin(struct intel_context *ce);
+
+static inline struct intel_context *intel_context_get(struct intel_context *ce)
+{
+ kref_get(&ce->ref);
+ return ce;
+}
+
+static inline void intel_context_put(struct intel_context *ce)
+{
+ kref_put(&ce->ref, ce->ops->destroy);
+}
+
+#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
new file mode 100644
index 000000000000..339c7437fe82
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -0,0 +1,77 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_TYPES__
+#define __INTEL_CONTEXT_TYPES__
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+#include "intel_engine_types.h"
+
+struct i915_gem_context;
+struct i915_vma;
+struct intel_context;
+struct intel_ring;
+
+struct intel_context_ops {
+ int (*pin)(struct intel_context *ce);
+ void (*unpin)(struct intel_context *ce);
+
+ void (*reset)(struct intel_context *ce);
+ void (*destroy)(struct kref *kref);
+};
+
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+struct intel_sseu {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 min_eus_per_subslice;
+ u8 max_eus_per_subslice;
+};
+
+struct intel_context {
+ struct kref ref;
+
+ struct i915_gem_context *gem_context;
+ struct intel_engine_cs *engine;
+ struct intel_engine_cs *active;
+
+ struct list_head active_link;
+ struct list_head signal_link;
+ struct list_head signals;
+
+ struct i915_vma *state;
+ struct intel_ring *ring;
+
+ u32 *lrc_reg_state;
+ u64 lrc_desc;
+
+ atomic_t pin_count;
+ struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
+
+ intel_engine_mask_t saturated; /* submitting semaphores too late? */
+
+ /**
+ * active_tracker: Active tracker for the external rq activity
+ * on this intel_context object.
+ */
+ struct i915_active_request active_tracker;
+
+ const struct intel_context_ops *ops;
+ struct rb_node node;
+
+ /** sseu: Control eu/slice partitioning */
+ struct intel_sseu sseu;
+};
+
+#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3716b2ee362f..b665c370111b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -27,13 +27,18 @@
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_crt.h"
+#include "intel_ddi.h"
+#include "intel_drv.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@@ -435,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
1000))
@@ -489,7 +494,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
1000)) {
@@ -542,7 +547,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
+ if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
CRT_HOTPLUG_FORCE_DETECT, 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
diff --git a/drivers/gpu/drm/i915/intel_crt.h b/drivers/gpu/drm/i915/intel_crt.h
new file mode 100644
index 000000000000..1b3fba359efc
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_crt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CRT_H__
+#define __INTEL_CRT_H__
+
+#include "i915_reg.h"
+
+enum pipe;
+struct drm_encoder;
+struct drm_i915_private;
+struct drm_i915_private;
+
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe);
+void intel_crt_init(struct drm_i915_private *dev_priv);
+void intel_crt_reset(struct drm_encoder *encoder);
+
+#endif /* __INTEL_CRT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index e8ac04c33e29..f43c2a2563a5 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -21,9 +21,12 @@
* IN THE SOFTWARE.
*
*/
+
#include <linux/firmware.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_csr.h"
/**
* DOC: csr support for dmc
@@ -486,7 +489,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 12) {
/* Allow to load fw via parameter using the last known size */
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
- } else if (IS_ICELAKE(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 11)) {
csr->fw_path = ICL_CSR_PATH;
csr->required_version = ICL_CSR_VERSION_REQUIRED;
csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/intel_csr.h
new file mode 100644
index 000000000000..17a32c1e8a35
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_csr.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CSR_H__
+#define __INTEL_CSR_H__
+
+struct drm_i915_private;
+
+void intel_csr_ucode_init(struct drm_i915_private *i915);
+void intel_csr_load_program(struct drm_i915_private *i915);
+void intel_csr_ucode_fini(struct drm_i915_private *i915);
+void intel_csr_ucode_suspend(struct drm_i915_private *i915);
+void intel_csr_ucode_resume(struct drm_i915_private *i915);
+
+#endif /* __INTEL_CSR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 14d580cdefd3..f181c26f62fd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -26,9 +26,19 @@
*/
#include <drm/drm_scdc_helper.h>
+
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -851,7 +861,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
0, &n_entries);
@@ -1240,24 +1250,15 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
-static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
{
- i915_reg_t cfgcr1_reg, cfgcr2_reg;
- u32 cfgcr1_val, cfgcr2_val;
u32 p0, p1, p2, dco_freq;
- cfgcr1_reg = DPLL_CFGCR1(pll_id);
- cfgcr2_reg = DPLL_CFGCR2(pll_id);
-
- cfgcr1_val = I915_READ(cfgcr1_reg);
- cfgcr2_val = I915_READ(cfgcr2_reg);
+ p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
- p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
- p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
-
- if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
else
p1 = 1;
@@ -1292,10 +1293,11 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
break;
}
- dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
+ dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
+ * 24 * 1000;
- dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
- 1000) / 0x8000;
+ dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
+ * 24 * 1000) / 0x8000;
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
return 0;
@@ -1304,24 +1306,15 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
}
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+ struct intel_dpll_hw_state *pll_state)
{
- u32 cfgcr0, cfgcr1;
u32 p0, p1, p2, dco_freq, ref_clock;
- if (INTEL_GEN(dev_priv) >= 11) {
- cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
- } else {
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
- }
+ p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
- p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
-
- if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
else
p1 = 1;
@@ -1349,16 +1342,17 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
case DPLL_CFGCR1_KDIV_2:
p2 = 2;
break;
- case DPLL_CFGCR1_KDIV_4:
- p2 = 4;
+ case DPLL_CFGCR1_KDIV_3:
+ p2 = 3;
break;
}
ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
- dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
+ dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
+ * ref_clock;
- dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
@@ -1390,25 +1384,21 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
- enum port port)
+ const struct intel_dpll_hw_state *pll_state)
{
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 mg_pll_div0, mg_clktop_hsclkctl;
- u32 m1, m2_int, m2_frac, div1, div2, refclk;
+ u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
- refclk = dev_priv->cdclk.hw.ref;
-
- mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
- mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ ref_clock = dev_priv->cdclk.hw.ref;
- m1 = I915_READ(MG_PLL_DIV1(tc_port)) & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
- (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
- MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+ m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
+ (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
+ MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
- switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ switch (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
div1 = 2;
break;
@@ -1422,12 +1412,14 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
div1 = 7;
break;
default:
- MISSING_CASE(mg_clktop_hsclkctl);
+ MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
return 0;
}
- div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ div2 = (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
/* div2 value of 0 is same as 1 means no div */
if (div2 == 0)
div2 = 1;
@@ -1436,8 +1428,8 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
* Adjust the original formula to delay the division by 2^22 in order to
* minimize possible rounding errors.
*/
- tmp = (u64)m1 * m2_int * refclk +
- (((u64)m1 * m2_frac * refclk) >> 22);
+ tmp = (u64)m1 * m2_int * ref_clock +
+ (((u64)m1 * m2_frac * ref_clock) >> 22);
tmp = div_u64(tmp, 5 * div1 * div2);
return tmp;
@@ -1471,25 +1463,24 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
enum port port = encoder->port;
- int link_clock = 0;
- u32 pll_id;
+ int link_clock;
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
if (intel_port_is_combophy(dev_priv, port)) {
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
- else
- link_clock = icl_calc_dp_combo_pll_link(dev_priv,
- pll_id);
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
+ enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
+ pipe_config->shared_dpll);
+
if (pll_id == DPLL_ID_ICL_TBTPLL)
link_clock = icl_calc_tbt_pll_link(dev_priv, port);
else
- link_clock = icl_calc_mg_pll_link(dev_priv, port);
+ link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
}
pipe_config->port_clock = link_clock;
+
ddi_dotclock_get(pipe_config);
}
@@ -1497,18 +1488,13 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 cfgcr0;
- enum intel_dpll_id pll_id;
+ struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
+ int link_clock;
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
-
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
-
- if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
- link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
+ link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
switch (link_clock) {
case DPLL_CFGCR0_LINK_RATE_810:
@@ -1548,22 +1534,20 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
}
static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 dpll_ctl1;
- enum intel_dpll_id pll_id;
-
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
-
- dpll_ctl1 = I915_READ(DPLL_CTRL1);
+ struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
+ int link_clock;
- if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) {
- link_clock = skl_calc_wrpll_link(dev_priv, pll_id);
+ /*
+ * ctrl1 register is already shifted for each pll, just use 0 to get
+ * the internal shift for each field
+ */
+ if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
+ link_clock = skl_calc_wrpll_link(pll_state);
} else {
- link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id);
+ link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
+ link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
switch (link_clock) {
case DPLL_CTRL1_LINK_RATE_810:
@@ -1643,24 +1627,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
ddi_dotclock_get(pipe_config);
}
-static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
+static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
{
- struct intel_dpll_hw_state *state;
struct dpll clock;
- /* For DDI ports we always use a shared PLL. */
- if (WARN_ON(!crtc_state->shared_dpll))
- return 0;
-
- state = &crtc_state->dpll_hw_state;
-
clock.m1 = 2;
- clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
- if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
- clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+ clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
return chv_calc_dpll_params(100000, &clock);
}
@@ -1668,7 +1645,8 @@ static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- pipe_config->port_clock = bxt_calc_pll_link(pipe_config);
+ pipe_config->port_clock =
+ bxt_calc_pll_link(&pipe_config->dpll_hw_state);
ddi_dotclock_get(pipe_config);
}
@@ -1678,7 +1656,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
@@ -1911,7 +1889,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (port == PORT_A)
+ if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
@@ -1973,7 +1951,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (port == PORT_A) {
+ if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
@@ -2075,12 +2053,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
intel_aux_power_domain(dig_port);
}
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
- u64 domains;
/*
* TODO: Add support for MST encoders. Atm, the following should never
@@ -2088,10 +2065,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
* hook.
*/
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
- return 0;
+ return;
dig_port = enc_to_dig_port(&encoder->base);
- domains = BIT_ULL(dig_port->ddi_io_power_domain);
+ intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
@@ -2099,15 +2076,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
- domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
/*
* VDSC power is needed when DSC is enabled
*/
if (crtc_state->dsc_params.compression_enable)
- domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
-
- return domains;
+ intel_display_power_get(dev_priv,
+ intel_dsc_power_domain(crtc_state));
}
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2225,7 +2202,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
enum port port = encoder->port;
int n_entries;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, encoder->type,
intel_dp->link_rate, &n_entries);
@@ -2317,13 +2294,13 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW5 */
@@ -2379,14 +2356,14 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@@ -2448,13 +2425,13 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW7 */
@@ -2505,14 +2482,14 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@@ -2555,33 +2532,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(port, ln));
+ val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val);
+ I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(port, ln));
+ val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val);
+ I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(port, ln));
+ val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val);
+ I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(port, ln));
+ val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val);
+ I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(port, ln));
+ val = I915_READ(MG_TX1_DRVCTRL(ln, port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2589,9 +2566,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(port, ln), val);
+ I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
- val = I915_READ(MG_TX2_DRVCTRL(port, ln));
+ val = I915_READ(MG_TX2_DRVCTRL(ln, port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2599,7 +2576,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(port, ln), val);
+ I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2610,17 +2587,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(port, ln));
+ val = I915_READ(MG_CLKHUB(ln, port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(port, ln), val);
+ I915_WRITE(MG_CLKHUB(ln, port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(port, ln));
+ val = I915_READ(MG_TX1_DCC(ln, port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2628,9 +2605,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(port, ln), val);
+ I915_WRITE(MG_TX1_DCC(ln, port), val);
- val = I915_READ(MG_TX2_DCC(port, ln));
+ val = I915_READ(MG_TX2_DCC(ln, port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2638,18 +2615,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(port, ln), val);
+ I915_WRITE(MG_TX2_DCC(ln, port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(port, ln));
+ val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val);
+ I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
- val = I915_READ(MG_TX2_PISO_READLOAD(port, ln));
+ val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val);
+ I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
}
}
@@ -2698,7 +2675,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -2825,10 +2802,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
return;
}
/*
- * DSI ports should have their DDI clock ungated when disabled
- * and gated when enabled.
+ * For DSI we keep the ddi clocks gated
+ * except during enable/disable sequence.
*/
- ddi_clk_needed = !encoder->base.crtc;
+ ddi_clk_needed = false;
}
val = I915_READ(DPCLKA_CFGCR0_ICL);
@@ -2867,7 +2844,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
@@ -2909,7 +2886,7 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -2928,21 +2905,20 @@ static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
u32 val;
- int i;
+ int ln;
if (tc_port == PORT_TC_NONE)
return;
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_DP_MODE(ln, port));
val |= MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(mg_regs[i], val);
+ I915_WRITE(MG_DP_MODE(ln, port), val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
@@ -2961,21 +2937,20 @@ static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
u32 val;
- int i;
+ int ln;
if (tc_port == PORT_TC_NONE)
return;
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_DP_MODE(ln, port));
val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(mg_regs[i], val);
+ I915_WRITE(MG_DP_MODE(ln, port), val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
@@ -2999,8 +2974,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
return;
- ln0 = I915_READ(MG_DP_MODE(port, 0));
- ln1 = I915_READ(MG_DP_MODE(port, 1));
+ ln0 = I915_READ(MG_DP_MODE(0, port));
+ ln1 = I915_READ(MG_DP_MODE(1, port));
switch (intel_dig_port->tc_type) {
case TC_PORT_TYPEC:
@@ -3050,8 +3025,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
return;
}
- I915_WRITE(MG_DP_MODE(port, 0), ln0);
- I915_WRITE(MG_DP_MODE(port, 1), ln1);
+ I915_WRITE(MG_DP_MODE(0, port), ln0);
+ I915_WRITE(MG_DP_MODE(1, port), ln1);
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -3078,7 +3053,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
- if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_FEC_ENABLE_LIVE,
DP_TP_STATUS_FEC_ENABLE_LIVE,
1))
@@ -3126,7 +3101,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -3175,7 +3150,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
@@ -3556,7 +3531,9 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_psr_enable(intel_dp, crtc_state);
+ intel_ddi_set_pipe_settings(crtc_state);
+
+ intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
intel_panel_update_backlight(encoder, crtc_state, conn_state);
@@ -3711,7 +3688,7 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+ if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
@@ -3764,7 +3741,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
intel_dig_port = enc_to_dig_port(&encoder->base);
- if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+ if (pipe_config->infoframes.enable)
pipe_config->has_infoframe = true;
if (temp & TRANS_DDI_HDMI_SCRAMBLING)
@@ -3828,6 +3808,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+
+ intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &pipe_config->infoframes.avi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &pipe_config->infoframes.spd);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &pipe_config->infoframes.hdmi);
}
static enum intel_output_type
@@ -3856,21 +3848,23 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
enum port port = encoder->port;
int ret;
- if (port == PORT_A)
+ if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
else
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
+ if (ret)
+ return ret;
- if (IS_GEN9_LP(dev_priv) && ret)
+ if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
- return ret;
+ return 0;
}
@@ -3958,15 +3952,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
goto out;
}
- crtc_state->mode_changed = true;
-
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- goto out;
-
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- goto out;
+ crtc_state->connectors_changed = true;
ret = drm_atomic_commit(state);
out:
diff --git a/drivers/gpu/drm/i915/intel_ddi.h b/drivers/gpu/drm/i915/intel_ddi.h
new file mode 100644
index 000000000000..9cf69175942e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DDI_H__
+#define __INTEL_DDI_H__
+
+#include <drm/i915_drm.h>
+
+#include "intel_display.h"
+
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_dpll_hw_state;
+struct intel_encoder;
+
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+void hsw_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
+void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
+void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
+ bool state);
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state);
+u32 bxt_signal_levels(struct intel_dp *intel_dp);
+u32 ddi_signal_levels(struct intel_dp *intel_dp);
+u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
+ u8 voltage_swing);
+int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+ bool enable);
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *state);
+
+#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 855a5074ad77..6af480b95bc6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -57,6 +57,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(COFFEELAKE),
PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
+ PLATFORM_NAME(ELKHARTLAKE),
};
#undef PLATFORM_NAME
@@ -155,9 +156,15 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
u8 eu_en;
int s;
- sseu->max_slices = 1;
- sseu->max_subslices = 8;
- sseu->max_eus_per_subslice = 8;
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ sseu->max_slices = 1;
+ sseu->max_subslices = 4;
+ sseu->max_eus_per_subslice = 8;
+ } else {
+ sseu->max_slices = 1;
+ sseu->max_subslices = 8;
+ sseu->max_eus_per_subslice = 8;
+ }
s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
@@ -707,6 +714,99 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
return 0;
}
+#undef INTEL_VGA_DEVICE
+#define INTEL_VGA_DEVICE(id, info) (id)
+
+static const u16 subplatform_ult_ids[] = {
+ INTEL_HSW_ULT_GT1_IDS(0),
+ INTEL_HSW_ULT_GT2_IDS(0),
+ INTEL_HSW_ULT_GT3_IDS(0),
+ INTEL_BDW_ULT_GT1_IDS(0),
+ INTEL_BDW_ULT_GT2_IDS(0),
+ INTEL_BDW_ULT_GT3_IDS(0),
+ INTEL_BDW_ULT_RSVD_IDS(0),
+ INTEL_SKL_ULT_GT1_IDS(0),
+ INTEL_SKL_ULT_GT2_IDS(0),
+ INTEL_SKL_ULT_GT3_IDS(0),
+ INTEL_KBL_ULT_GT1_IDS(0),
+ INTEL_KBL_ULT_GT2_IDS(0),
+ INTEL_KBL_ULT_GT3_IDS(0),
+ INTEL_CFL_U_GT2_IDS(0),
+ INTEL_CFL_U_GT3_IDS(0),
+ INTEL_WHL_U_GT1_IDS(0),
+ INTEL_WHL_U_GT2_IDS(0),
+ INTEL_WHL_U_GT3_IDS(0)
+};
+
+static const u16 subplatform_ulx_ids[] = {
+ INTEL_HSW_ULX_GT1_IDS(0),
+ INTEL_HSW_ULX_GT2_IDS(0),
+ INTEL_BDW_ULX_GT1_IDS(0),
+ INTEL_BDW_ULX_GT2_IDS(0),
+ INTEL_BDW_ULX_GT3_IDS(0),
+ INTEL_BDW_ULX_RSVD_IDS(0),
+ INTEL_SKL_ULX_GT1_IDS(0),
+ INTEL_SKL_ULX_GT2_IDS(0),
+ INTEL_KBL_ULX_GT1_IDS(0),
+ INTEL_KBL_ULX_GT2_IDS(0)
+};
+
+static const u16 subplatform_aml_ids[] = {
+ INTEL_AML_KBL_GT2_IDS(0),
+ INTEL_AML_CFL_GT2_IDS(0)
+};
+
+static const u16 subplatform_portf_ids[] = {
+ INTEL_CNL_PORT_F_IDS(0),
+ INTEL_ICL_PORT_F_IDS(0)
+};
+
+static bool find_devid(u16 id, const u16 *p, unsigned int num)
+{
+ for (; num; num--, p++) {
+ if (*p == id)
+ return true;
+ }
+
+ return false;
+}
+
+void intel_device_info_subplatform_init(struct drm_i915_private *i915)
+{
+ const struct intel_device_info *info = INTEL_INFO(i915);
+ const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
+ const unsigned int pi = __platform_mask_index(rinfo, info->platform);
+ const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
+ u16 devid = INTEL_DEVID(i915);
+ u32 mask = 0;
+
+ /* Make sure IS_<platform> checks are working. */
+ RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
+
+ /* Find and mark subplatform bits based on the PCI device id. */
+ if (find_devid(devid, subplatform_ult_ids,
+ ARRAY_SIZE(subplatform_ult_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ULT);
+ } else if (find_devid(devid, subplatform_ulx_ids,
+ ARRAY_SIZE(subplatform_ulx_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ULX);
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ /* ULX machines are also considered ULT. */
+ mask |= BIT(INTEL_SUBPLATFORM_ULT);
+ }
+ } else if (find_devid(devid, subplatform_aml_ids,
+ ARRAY_SIZE(subplatform_aml_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_AML);
+ } else if (find_devid(devid, subplatform_portf_ids,
+ ARRAY_SIZE(subplatform_portf_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_PORTF);
+ }
+
+ GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
+
+ RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
+}
+
/**
* intel_device_info_runtime_init - initialize runtime info
* @dev_priv: the i915 device
@@ -738,9 +838,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_scalers[PIPE_C] = 1;
}
- BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
+ BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
- if (IS_GEN(dev_priv, 11))
+ if (INTEL_GEN(dev_priv) >= 11)
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 6;
else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
@@ -844,7 +944,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
- info->ppgtt = INTEL_PPGTT_NONE;
+ info->ppgtt_type = INTEL_PPGTT_NONE;
}
/* Initialize command stream timestamp frequency */
@@ -871,23 +971,24 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
unsigned int logical_vdbox = 0;
unsigned int i;
u32 media_fuse;
+ u16 vdbox_mask;
+ u16 vebox_mask;
if (INTEL_GEN(dev_priv) < 11)
return;
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
- RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
+ vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
- DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
- if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
- info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+ if (!(BIT(i) & vdbox_mask)) {
+ info->engine_mask &= ~BIT(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
continue;
}
@@ -899,15 +1000,20 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
+ DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(dev_priv));
+ GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
- DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
- if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
- info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+ if (!(BIT(i) & vebox_mask)) {
+ info->engine_mask &= ~BIT(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
}
}
+ DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(dev_priv));
+ GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index e8b8661df746..0e579f158016 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -27,6 +27,7 @@
#include <uapi/drm/i915_drm.h>
+#include "intel_engine_types.h"
#include "intel_display.h"
struct drm_printer;
@@ -73,14 +74,29 @@ enum intel_platform {
INTEL_CANNONLAKE,
/* gen11 */
INTEL_ICELAKE,
+ INTEL_ELKHARTLAKE,
INTEL_MAX_PLATFORMS
};
-enum intel_ppgtt {
+/*
+ * Subplatform bits share the same namespace per parent platform. In other words
+ * it is fine for the same bit to be used on multiple parent platforms.
+ */
+
+#define INTEL_SUBPLATFORM_BITS (3)
+
+/* HSW/BDW/SKL/KBL/CFL */
+#define INTEL_SUBPLATFORM_ULT (0)
+#define INTEL_SUBPLATFORM_ULX (1)
+#define INTEL_SUBPLATFORM_AML (2)
+
+/* CNL/ICL */
+#define INTEL_SUBPLATFORM_PORTF (0)
+
+enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
- INTEL_PPGTT_FULL_4LVL,
};
#define DEV_INFO_FOR_EACH_FLAG(func) \
@@ -150,19 +166,18 @@ struct sseu_dev_info {
u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
};
-typedef u8 intel_ring_mask_t;
-
struct intel_device_info {
u16 gen_mask;
u8 gen;
u8 gt; /* GT number, 0 if undefined */
- intel_ring_mask_t ring_mask; /* Rings supported by the HW */
+ intel_engine_mask_t engine_mask; /* Engines supported by the HW */
enum intel_platform platform;
- u32 platform_mask;
- enum intel_ppgtt ppgtt;
+ enum intel_ppgtt_type ppgtt_type;
+ unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
+
unsigned int page_sizes; /* page sizes supported by the HW */
u32 display_mmio_offset;
@@ -195,22 +210,28 @@ struct intel_device_info {
};
struct intel_runtime_info {
+ /*
+ * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
+ * into single runtime conditionals, and also to provide groundwork
+ * for future per platform, or per SKU build optimizations.
+ *
+ * Array can be extended when necessary if the corresponding
+ * BUILD_BUG_ON is hit.
+ */
+ u32 platform_mask[2];
+
u16 device_id;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
- u8 num_rings;
+ u8 num_engines;
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
u32 cs_timestamp_frequency_khz;
- /* Enabled (not fused off) media engine bitmasks. */
- u8 vdbox_enable;
- u8 vebox_enable;
-
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
};
@@ -269,6 +290,7 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
const char *intel_platform_name(enum intel_platform platform);
+void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_dump_flags(const struct intel_device_info *info,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ccb616351bba..5098228f1302 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,19 +46,29 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
+#include "i915_reset.h"
#include "i915_trace.h"
+#include "intel_atomic_plane.h"
+#include "intel_color.h"
+#include "intel_cdclk.h"
+#include "intel_crt.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_dvo.h"
+#include "intel_fbc.h"
+#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
-
-#include "intel_drv.h"
-#include "intel_dsi.h"
-#include "intel_frontbuffer.h"
-
-#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_reset.h"
-#include "i915_trace.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lvds.h"
+#include "intel_pipe_crc.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sdvo.h"
+#include "intel_sprite.h"
+#include "intel_tv.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -115,8 +125,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
+static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
@@ -467,10 +477,11 @@ static const struct intel_limit intel_limits_bxt = {
};
static void
-skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
+skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
{
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
+ I915_READ(CLKGATE_DIS_PSL(pipe)) |
DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
I915_WRITE(CLKGATE_DIS_PSL(pipe),
@@ -595,7 +606,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -603,7 +614,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
- if (intel_is_dual_link_lvds(dev))
+ if (intel_is_dual_link_lvds(dev_priv))
return limit->p2.p2_fast;
else
return limit->p2.p2_slow;
@@ -951,14 +962,15 @@ chv_find_best_dpll(const struct intel_limit *limit,
return found;
}
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock)
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_bxt;
return chv_find_best_dpll(limit, crtc_state,
- target_clock, refclk, NULL, best_clock);
+ crtc_state->port_clock, refclk,
+ NULL, best_clock);
}
bool intel_crtc_active(struct intel_crtc *crtc)
@@ -1039,7 +1051,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
reg, I965_PIPECONF_ACTIVE, 0,
100))
WARN(1, "pipe_off wait timed out\n");
@@ -1345,7 +1357,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DPLL(pipe),
DPLL_LOCK_VLV,
DPLL_LOCK_VLV,
@@ -1398,7 +1410,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
@@ -1441,17 +1453,12 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
-static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
{
- struct intel_crtc *crtc;
- int count = 0;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- count += crtc->base.state->active &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
- }
+ if (IS_I830(dev_priv))
+ return false;
- return count;
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
static void i9xx_enable_pll(struct intel_crtc *crtc,
@@ -1465,29 +1472,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
assert_pipe_disabled(dev_priv, crtc->pipe);
/* PLL is protected by panel, make sure we can write it */
- if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
+ if (i9xx_has_pps(dev_priv))
assert_panel_unlocked(dev_priv, crtc->pipe);
- /* Enable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
- /*
- * It appears to be important that we don't enable this
- * for the current pipe before otherwise configuring the
- * PLL. No idea how this should be handled if multiple
- * DVO outputs are enabled simultaneosly.
- */
- dpll |= DPLL_DVO_2X_MODE;
- I915_WRITE(DPLL(!crtc->pipe),
- I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
- }
-
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- I915_WRITE(reg, 0);
-
+ I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
I915_WRITE(reg, dpll);
/* Wait for the clocks to stabilize. */
@@ -1520,16 +1513,6 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- /* Disable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev_priv) &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
- !intel_num_dvo_pipes(dev_priv)) {
- I915_WRITE(DPLL(PIPE_B),
- I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
- I915_WRITE(DPLL(PIPE_A),
- I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
- }
-
/* Don't disable pipe or pipe PLLs if needed */
if (IS_I830(dev_priv))
return;
@@ -1608,7 +1591,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
dpll_reg, port_mask, expected_mask,
1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
@@ -1658,17 +1641,18 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
}
val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
- else
+ } else {
val |= TRANS_PROGRESSIVE;
+ }
I915_WRITE(reg, val | TRANS_ENABLE);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
100))
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
@@ -1698,7 +1682,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LPT_TRANSCONF,
TRANS_STATE_ENABLE,
TRANS_STATE_ENABLE,
@@ -1724,7 +1708,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
reg, TRANS_STATE_ENABLE, 0,
50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
@@ -1746,7 +1730,7 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
val &= ~TRANS_ENABLE;
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
50))
DRM_ERROR("Failed to disable PCH transcoder\n");
@@ -1830,6 +1814,8 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
+ trace_intel_pipe_enable(dev_priv, pipe);
+
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE) {
@@ -1869,6 +1855,8 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
+ trace_intel_pipe_disable(dev_priv, pipe);
+
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
@@ -2677,6 +2665,24 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_RGB565;
case PLANE_CTL_FORMAT_NV12:
return DRM_FORMAT_NV12;
+ case PLANE_CTL_FORMAT_P010:
+ return DRM_FORMAT_P010;
+ case PLANE_CTL_FORMAT_P012:
+ return DRM_FORMAT_P012;
+ case PLANE_CTL_FORMAT_P016:
+ return DRM_FORMAT_P016;
+ case PLANE_CTL_FORMAT_Y210:
+ return DRM_FORMAT_Y210;
+ case PLANE_CTL_FORMAT_Y212:
+ return DRM_FORMAT_Y212;
+ case PLANE_CTL_FORMAT_Y216:
+ return DRM_FORMAT_Y216;
+ case PLANE_CTL_FORMAT_Y410:
+ return DRM_FORMAT_XVYU2101010;
+ case PLANE_CTL_FORMAT_Y412:
+ return DRM_FORMAT_XVYU12_16161616;
+ case PLANE_CTL_FORMAT_Y416:
+ return DRM_FORMAT_XVYU16161616;
default:
case PLANE_CTL_FORMAT_XRGB_8888:
if (rgb_order) {
@@ -2695,6 +2701,18 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_XBGR2101010;
else
return DRM_FORMAT_XRGB2101010;
+ case PLANE_CTL_FORMAT_XRGB_16161616F:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR16161616F;
+ else
+ return DRM_FORMAT_XBGR16161616F;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB16161616F;
+ else
+ return DRM_FORMAT_XRGB16161616F;
+ }
}
}
@@ -2825,8 +2843,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc_state);
+ intel_disable_plane(plane, crtc_state);
}
static void
@@ -3176,7 +3193,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since
* the main surface setup depends on it.
*/
- if (fb->format->format == DRM_FORMAT_NV12) {
+ if (is_planar_yuv_format(fb->format->format)) {
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3230,9 +3247,10 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dspcntr = 0;
- dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ if (crtc_state->gamma_enable)
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (crtc_state->csc_enable)
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 5)
@@ -3459,7 +3477,7 @@ static void i9xx_disable_plane(struct intel_plane *plane,
*
* On pre-g4x there is no way to gamma correct the
* pipe bottom color but we'll keep on doing this
- * anyway.
+ * anyway so that the crtc state readout works correctly.
*/
dspcntr = i9xx_plane_ctl_crtc(crtc_state);
@@ -3590,6 +3608,12 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR2101010:
return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F;
case DRM_FORMAT_YUYV:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
case DRM_FORMAT_YVYU:
@@ -3600,6 +3624,24 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
case DRM_FORMAT_NV12:
return PLANE_CTL_FORMAT_NV12;
+ case DRM_FORMAT_P010:
+ return PLANE_CTL_FORMAT_P010;
+ case DRM_FORMAT_P012:
+ return PLANE_CTL_FORMAT_P012;
+ case DRM_FORMAT_P016:
+ return PLANE_CTL_FORMAT_P016;
+ case DRM_FORMAT_Y210:
+ return PLANE_CTL_FORMAT_Y210;
+ case DRM_FORMAT_Y212:
+ return PLANE_CTL_FORMAT_Y212;
+ case DRM_FORMAT_Y216:
+ return PLANE_CTL_FORMAT_Y216;
+ case DRM_FORMAT_XVYU2101010:
+ return PLANE_CTL_FORMAT_Y410;
+ case DRM_FORMAT_XVYU12_16161616:
+ return PLANE_CTL_FORMAT_Y412;
+ case DRM_FORMAT_XVYU16161616:
+ return PLANE_CTL_FORMAT_Y416;
default:
MISSING_CASE(pixel_format);
}
@@ -3710,8 +3752,11 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return plane_ctl;
- plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
- plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+ if (crtc_state->gamma_enable)
+ plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
return plane_ctl;
}
@@ -3763,8 +3808,11 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 11)
return plane_color_ctl;
- plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
- plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+ if (crtc_state->gamma_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
return plane_color_ctl;
}
@@ -3772,6 +3820,8 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 plane_color_ctl = 0;
@@ -3779,7 +3829,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
- if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
+ if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
@@ -3921,9 +3971,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
* The display has been reset as well,
* so need a full re-initialization.
*/
- intel_runtime_pm_disable_interrupts(dev_priv);
- intel_runtime_pm_enable_interrupts(dev_priv);
-
intel_pps_unlock_regs_wa(dev_priv);
intel_modeset_init_hw(dev);
intel_init_clock_gating(dev_priv);
@@ -3963,13 +4010,13 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
* and rounding for per-pixel values 00 and 0xff
*/
tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
-
/*
- * W/A for underruns with linear/X-tiled with
- * WM1+ disabled.
+ * Display WA # 1605353570: icl
+ * Set the pixel rounding bit to 1 for allowing
+ * passthrough of Frame buffer pixels unmodified
+ * across pipe
*/
- tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
-
+ tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
I915_WRITE(PIPE_CHICKEN(pipe), tmp);
}
@@ -4008,16 +4055,6 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
ironlake_pfit_disable(old_crtc_state);
}
- /*
- * We don't (yet) allow userspace to control the pipe background color,
- * so force it to black, but apply pipe gamma and CSC so that its
- * handling will match how we program our planes.
- */
- if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
- SKL_BOTTOM_COLOR_GAMMA_ENABLE |
- SKL_BOTTOM_COLOR_CSC_ENABLE);
-
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
}
@@ -5036,19 +5073,19 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && format->format == DRM_FORMAT_NV12 &&
+ if (format && is_planar_yuv_format(format->format) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- DRM_DEBUG_KMS("NV12: src dimensions not met\n");
+ DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
return -EINVAL;
}
/* range checks */
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
- (IS_GEN(dev_priv, 11) &&
+ (INTEL_GEN(dev_priv) >= 11 &&
(src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
- (!IS_GEN(dev_priv, 11) &&
+ (INTEL_GEN(dev_priv) < 11 &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@@ -5105,14 +5142,15 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
{
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
bool force_detach = !fb || !plane_state->base.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
- if (!icl_is_hdr_plane(intel_plane) &&
- fb && fb->format->format == DRM_FORMAT_NV12)
+ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+ fb && is_planar_yuv_format(fb->format->format))
need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
@@ -5144,11 +5182,24 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
break;
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
@@ -5259,7 +5310,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
IPS_CTL, IPS_ENABLE, IPS_ENABLE,
50))
DRM_ERROR("Timed out waiting for IPS enable\n");
@@ -5284,7 +5335,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
IPS_CTL, IPS_ENABLE, 0,
100))
DRM_ERROR("Timed out waiting for IPS disable\n");
@@ -5490,7 +5541,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
/* Display WA 827 */
if (needs_nv12_wa(dev_priv, old_crtc_state) &&
!needs_nv12_wa(dev_priv, pipe_config)) {
- skl_wa_clkgate(dev_priv, crtc->pipe, false);
+ skl_wa_827(dev_priv, crtc->pipe, false);
}
}
@@ -5529,7 +5580,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
/* Display WA 827 */
if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
needs_nv12_wa(dev_priv, pipe_config)) {
- skl_wa_clkgate(dev_priv, crtc->pipe, true);
+ skl_wa_827(dev_priv, crtc->pipe, true);
}
/*
@@ -5603,7 +5654,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- plane->disable_plane(plane, new_crtc_state);
+ intel_disable_plane(plane, new_crtc_state);
if (old_plane_state->base.visible)
fb_bits |= plane->frontbuffer_bit;
@@ -5754,6 +5805,14 @@ static void intel_encoders_update_pipe(struct drm_crtc *crtc,
}
}
+static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+
+ plane->disable_plane(plane, crtc_state);
+}
+
static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@@ -5819,6 +5878,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -5947,6 +6008,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma/csc for pipe bottom color */
+ if (INTEL_GEN(dev_priv) < 9)
+ intel_disable_primary_plane(pipe_config);
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(intel_crtc);
@@ -6127,7 +6191,10 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
if (port == PORT_NONE)
return false;
- if (IS_ICELAKE(dev_priv))
+ if (IS_ELKHARTLAKE(dev_priv))
+ return port <= PORT_C;
+
+ if (INTEL_GEN(dev_priv) >= 11)
return port <= PORT_B;
return false;
@@ -6135,7 +6202,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
{
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
return port >= PORT_C && port <= PORT_F;
return false;
@@ -6304,6 +6371,8 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(pipe_config);
dev_priv->display.initial_watermarks(old_intel_state,
pipe_config);
@@ -6361,6 +6430,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state,
@@ -6743,7 +6814,13 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
if (!hsw_crtc_state_ips_capable(crtc_state))
return false;
- if (crtc_state->ips_force_disable)
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ if (crtc_state->crc_enabled)
return false;
/* IPS should be fine as long as at least one plane is enabled. */
@@ -6818,8 +6895,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
@@ -6869,7 +6945,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(dev)) {
+ intel_is_dual_link_lvds(dev_priv)) {
DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
return -EINVAL;
}
@@ -7486,7 +7562,19 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev_priv) &&
+ /*
+ * Bspec:
+ * "[Almador Errata}: For the correct operation of the muxed DVO pins
+ * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+ * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+ * Enable) must be set to “1” in both the DPLL A Control Register
+ * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+ *
+ * For simplicity We simply keep both bits always enabled in
+ * both DPLLS. The spec says we should disable the DVO 2X clock
+ * when not needed, but this seems to work fine in practice.
+ */
+ if (IS_I830(dev_priv) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
@@ -7694,13 +7782,16 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
- } else
+ } else {
pipeconf |= PIPECONF_PROGRESSIVE;
+ }
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
crtc_state->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+ pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(crtc->pipe));
}
@@ -7744,8 +7835,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@@ -7758,7 +7848,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
- if (intel_is_dual_link_lvds(dev))
+ if (intel_is_dual_link_lvds(dev_priv))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
@@ -7894,14 +7984,22 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
+static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv))
+ return false;
+
+ return INTEL_GEN(dev_priv) >= 4 ||
+ IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- if (INTEL_GEN(dev_priv) <= 3 &&
- (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
+ if (!i9xx_has_pfit(dev_priv))
return;
tmp = I915_READ(PFIT_CONTROL);
@@ -8108,6 +8206,24 @@ static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
pipe_config->output_format = output;
}
+static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 tmp;
+
+ tmp = I915_READ(DSPCNTR(i9xx_plane));
+
+ if (tmp & DISPPLANE_GAMMA_ENABLE)
+ crtc_state->gamma_enable = true;
+
+ if (!HAS_GMCH(dev_priv) &&
+ tmp & DISPPLANE_PIPE_CSC_ENABLE)
+ crtc_state->csc_enable = true;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8153,6 +8269,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
+ pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
+ PIPECONF_GAMMA_MODE_SHIFT;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
+
+ i9xx_get_pipe_color_config(pipe_config);
+
if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
@@ -8185,14 +8309,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- /*
- * DPLL_DVO_2X_MODE must be enabled for both DPLLs
- * on 830. Filter it out here so that we don't
- * report errors due to that.
- */
- if (IS_I830(dev_priv))
- pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
-
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
} else {
@@ -8692,6 +8808,8 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
@@ -8772,13 +8890,11 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
-static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+static void ironlake_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll, fp, fp2;
int factor;
@@ -8787,10 +8903,12 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
+ (HAS_PCH_IBX(dev_priv) &&
+ intel_is_dual_link_lvds(dev_priv)))
factor = 25;
- } else if (crtc_state->sdvo_tv_clock)
+ } else if (crtc_state->sdvo_tv_clock) {
factor = 20;
+ }
fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
@@ -8877,8 +8995,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 120000;
@@ -8896,7 +9013,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
refclk = dev_priv->vbt.lvds_ssc_freq;
}
- if (intel_is_dual_link_lvds(dev)) {
+ if (intel_is_dual_link_lvds(dev_priv)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
@@ -8920,7 +9037,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
ironlake_compute_dpll(crtc, crtc_state, NULL);
- if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
+ if (!intel_get_shared_dpll(crtc_state, NULL)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9226,6 +9343,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (tmp & PIPECONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
+ pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
+ PIPECONF_GAMMA_MODE_SHIFT;
+
+ pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+ i9xx_get_pipe_color_config(pipe_config);
+
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
@@ -9371,7 +9495,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
- if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
DRM_ERROR("LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
@@ -9409,7 +9534,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -9426,7 +9551,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5))
DRM_ERROR("LCPLL not locked yet\n");
@@ -9441,7 +9566,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
@@ -9510,11 +9635,11 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
to_intel_atomic_state(crtc_state->base.state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- IS_ICELAKE(dev_priv)) {
+ INTEL_GEN(dev_priv) >= 11) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
+ if (!intel_get_shared_dpll(crtc_state, encoder)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9552,9 +9677,6 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
temp = I915_READ(DPCLKA_CFGCR0_ICL) &
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
-
- if (WARN_ON(!intel_dpll_is_combophy(id)))
- return;
} else if (intel_port_is_tc(dev_priv, port)) {
id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
} else {
@@ -9643,20 +9765,25 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask)
+ u64 *power_domain_mask,
+ intel_wakeref_t *wakerefs)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+ unsigned long panel_transcoder_mask = 0;
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
+ intel_wakeref_t wf;
u32 tmp;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+ if (HAS_TRANSCODER_EDP(dev_priv))
+ panel_transcoder_mask |= BIT(TRANSCODER_EDP);
+
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
* and DSI transcoders handled below.
@@ -9713,10 +9840,13 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
enabled_panel_transcoders != BIT(TRANSCODER_EDP));
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wf)
return false;
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ wakerefs[power_domain] = wf;
*power_domain_mask |= BIT_ULL(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
@@ -9726,13 +9856,15 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask)
+ u64 *power_domain_mask,
+ intel_wakeref_t *wakerefs)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- enum port port;
enum transcoder cpu_transcoder;
+ intel_wakeref_t wf;
+ enum port port;
u32 tmp;
for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
@@ -9742,10 +9874,13 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
cpu_transcoder = TRANSCODER_DSI_C;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wf)
continue;
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ wakerefs[power_domain] = wf;
*power_domain_mask |= BIT_ULL(power_domain);
/*
@@ -9786,7 +9921,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icelake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -9824,6 +9959,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
enum intel_display_power_domain power_domain;
u64 power_domain_mask;
bool active;
@@ -9831,16 +9967,21 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_crtc_init_scalers(crtc, pipe_config);
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wf)
return false;
+
+ wakerefs[power_domain] = wf;
power_domain_mask = BIT_ULL(power_domain);
pipe_config->shared_dpll = NULL;
- active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
+ active = hsw_get_transcoder_state(crtc, pipe_config,
+ &power_domain_mask, wakerefs);
if (IS_GEN9_LP(dev_priv) &&
- bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
+ bxt_get_dsi_transcoder_state(crtc, pipe_config,
+ &power_domain_mask, wakerefs)) {
WARN_ON(active);
active = true;
}
@@ -9849,7 +9990,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
goto out;
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
- IS_ICELAKE(dev_priv)) {
+ INTEL_GEN(dev_priv) >= 11) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@@ -9857,12 +9998,28 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_src_size(crtc, pipe_config);
intel_get_crtc_ycbcr_config(crtc, pipe_config);
- pipe_config->gamma_mode =
- I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
+ pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
+
+ pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+ if (INTEL_GEN(dev_priv) >= 9) {
+ u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
+
+ if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
+ pipe_config->gamma_enable = true;
+
+ if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
+ pipe_config->csc_enable = true;
+ } else {
+ i9xx_get_pipe_color_config(pipe_config);
+ }
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
- WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+ WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (wf) {
+ wakerefs[power_domain] = wf;
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
@@ -9894,7 +10051,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
out:
for_each_power_domain(power_domain, power_domain_mask)
- intel_display_power_put_unchecked(dev_priv, power_domain);
+ intel_display_power_put(dev_priv,
+ power_domain, wakerefs[power_domain]);
return active;
}
@@ -10030,7 +10188,12 @@ i845_cursor_max_stride(struct intel_plane *plane,
static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- return CURSOR_GAMMA_ENABLE;
+ u32 cntl = 0;
+
+ if (crtc_state->gamma_enable)
+ cntl |= CURSOR_GAMMA_ENABLE;
+
+ return cntl;
}
static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
@@ -10184,9 +10347,10 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 11)
return cntl;
- cntl |= MCURSOR_GAMMA_ENABLE;
+ if (crtc_state->gamma_enable)
+ cntl = MCURSOR_GAMMA_ENABLE;
- if (HAS_DDI(dev_priv))
+ if (crtc_state->csc_enable)
cntl |= MCURSOR_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
@@ -11134,7 +11298,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
}
if (!linked_state) {
- DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
+ DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
hweight8(crtc_state->nv12_planes));
return -EINVAL;
@@ -11175,16 +11339,11 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
}
- if (mode_changed || crtc_state->color_mgmt_changed) {
+ if (mode_changed || pipe_config->update_pipe ||
+ crtc_state->color_mgmt_changed) {
ret = intel_color_check(pipe_config);
if (ret)
return ret;
-
- /*
- * Changing color management on Intel hardware is
- * handled as part of planes update.
- */
- crtc_state->planes_changed = true;
}
ret = 0;
@@ -11355,6 +11514,16 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
m_n->link_m, m_n->link_n, m_n->tu);
}
+static void
+intel_dump_infoframe(struct drm_i915_private *dev_priv,
+ const union hdmi_infoframe *frame)
+{
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
+}
+
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -11458,6 +11627,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
pipe_config->has_audio, pipe_config->has_infoframe);
+ DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
+ pipe_config->infoframes.enable);
+
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
+ DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
+
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
@@ -11606,7 +11791,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
- saved_state->ips_force_disable = crtc_state->ips_force_disable;
+ saved_state->crc_enabled = crtc_state->crc_enabled;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
@@ -11825,6 +12010,37 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
return false;
}
+static bool
+intel_compare_infoframe(const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
+{
+ return memcmp(a, b, sizeof(*a)) == 0;
+}
+
+static void
+pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
+ bool adjust, const char *name,
+ const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
+{
+ if (adjust) {
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
+ drm_dbg(DRM_UT_KMS, "expected:");
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
+ drm_dbg(DRM_UT_KMS, "found");
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ } else {
+ drm_err("mismatch in %s infoframe", name);
+ drm_err("expected:");
+ hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
+ drm_err("found");
+ hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+ }
+}
+
static void __printf(3, 4)
pipe_config_err(bool adjust, const char *name, const char *format, ...)
{
@@ -11866,6 +12082,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config,
bool adjust)
{
+ struct intel_crtc *crtc = to_intel_crtc(current_config->base.crtc);
bool ret = true;
bool fixup_inherited = adjust &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
@@ -12008,7 +12225,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
} \
} while (0)
-#define PIPE_CONF_QUIRK(quirk) \
+#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
+ if (!intel_compare_infoframe(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
PIPE_CONF_CHECK_I(cpu_transcoder);
@@ -12077,6 +12304,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
+ /*
+ * Changing the EDP transcoder input mux
+ * (A_ONOFF vs. A_ON) requires a full modeset.
+ */
+ if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ current_config->cpu_transcoder == TRANSCODER_EDP)
+ PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
+
if (!adjust) {
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
@@ -12089,6 +12324,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+
+ PIPE_CONF_CHECK_X(gamma_mode);
+ if (IS_CHERRYVIEW(dev_priv))
+ PIPE_CONF_CHECK_X(cgm_mode);
+ else
+ PIPE_CONF_CHECK_X(csc_mode);
+ PIPE_CONF_CHECK_BOOL(gamma_enable);
+ PIPE_CONF_CHECK_BOOL(csc_enable);
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -12137,6 +12380,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(min_voltage_level);
+ PIPE_CONF_CHECK_X(infoframes.enable);
+ PIPE_CONF_CHECK_X(infoframes.gcp);
+ PIPE_CONF_CHECK_INFOFRAME(avi);
+ PIPE_CONF_CHECK_INFOFRAME(spd);
+ PIPE_CONF_CHECK_INFOFRAME(hdmi);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -12171,12 +12420,15 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct skl_ddb_allocation hw_ddb, *sw_ddb;
- struct skl_pipe_wm hw_wm, *sw_wm;
- struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
+ struct skl_ddb_allocation ddb;
+ struct skl_pipe_wm wm;
+ } *hw;
+ struct skl_ddb_allocation *sw_ddb;
+ struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
- struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -12184,22 +12436,29 @@ static void verify_wm_state(struct drm_crtc *crtc,
if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
- skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
- skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+ skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
- skl_ddb_get_hw_state(dev_priv, &hw_ddb);
+ skl_ddb_get_hw_state(dev_priv, &hw->ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
- if (INTEL_GEN(dev_priv) >= 11)
- if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
- DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
- sw_ddb->enabled_slices,
- hw_ddb.enabled_slices);
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ hw->ddb.enabled_slices != sw_ddb->enabled_slices)
+ DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
+ sw_ddb->enabled_slices,
+ hw->ddb.enabled_slices);
+
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
- hw_plane_wm = &hw_wm.planes[plane];
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+ hw_plane_wm = &hw->wm.planes[plane];
sw_plane_wm = &sw_wm->planes[plane];
/* Watermarks */
@@ -12231,7 +12490,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb_y[plane];
+ hw_ddb_entry = &hw->ddb_y[plane];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@@ -12249,7 +12508,9 @@ static void verify_wm_state(struct drm_crtc *crtc,
* once the plane becomes visible, we can skip this check
*/
if (1) {
- hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+ hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
/* Watermarks */
@@ -12281,7 +12542,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+ hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@@ -12291,6 +12552,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
hw_ddb_entry->start, hw_ddb_entry->end);
}
}
+
+ kfree(hw);
}
static void
@@ -12447,7 +12710,8 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
- assert_plane(plane, plane_state->base.visible);
+ assert_plane(plane, plane_state->slave ||
+ plane_state->base.visible);
}
static void
@@ -12769,10 +13033,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
return -EINVAL;
}
+ /* keep the current setting */
+ if (!intel_state->cdclk.force_min_cdclk_changed)
+ intel_state->cdclk.force_min_cdclk =
+ dev_priv->cdclk.force_min_cdclk;
+
intel_state->modeset = true;
intel_state->active_crtcs = dev_priv->active_crtcs;
intel_state->cdclk.logical = dev_priv->cdclk.logical;
intel_state->cdclk.actual = dev_priv->cdclk.actual;
+ intel_state->cdclk.pipe = INVALID_PIPE;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (new_crtc_state->active)
@@ -12792,6 +13062,8 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
+ enum pipe pipe;
+
ret = dev_priv->display.modeset_calc_cdclk(state);
if (ret < 0)
return ret;
@@ -12808,12 +13080,36 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
return ret;
}
+ if (is_power_of_2(intel_state->active_crtcs)) {
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ pipe = ilog2(intel_state->active_crtcs);
+ crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (crtc_state && needs_modeset(crtc_state))
+ pipe = INVALID_PIPE;
+ } else {
+ pipe = INVALID_PIPE;
+ }
+
/* All pipes must be switched off while we change the cdclk. */
- if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
+ if (pipe != INVALID_PIPE &&
+ intel_cdclk_needs_cd2x_update(dev_priv,
+ &dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
+ ret = intel_lock_all_pipes(state);
+ if (ret < 0)
+ return ret;
+
+ intel_state->cdclk.pipe = pipe;
+ } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
+
+ intel_state->cdclk.pipe = INVALID_PIPE;
}
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
@@ -12822,8 +13118,6 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
intel_state->cdclk.logical.voltage_level,
intel_state->cdclk.actual.voltage_level);
- } else {
- to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
}
intel_modeset_clear_plls(state);
@@ -12864,7 +13158,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *crtc_state;
int ret, i;
- bool any_ms = false;
+ bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
@@ -12989,14 +13283,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
else if (new_plane_state)
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
- intel_begin_crtc_commit(crtc, old_crtc_state);
+ intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
if (INTEL_GEN(dev_priv) >= 9)
skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
else
i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
- intel_finish_crtc_commit(crtc, old_crtc_state);
+ intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
}
static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -13224,7 +13518,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (intel_state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
- intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
+ intel_set_cdclk_pre_plane_update(dev_priv,
+ &intel_state->cdclk.actual,
+ &dev_priv->cdclk.actual,
+ intel_state->cdclk.pipe);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -13253,6 +13550,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.update_crtcs(state);
+ if (intel_state->modeset)
+ intel_set_cdclk_post_plane_update(dev_priv,
+ &intel_state->cdclk.actual,
+ &dev_priv->cdclk.actual,
+ intel_state->cdclk.pipe);
+
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
* fix this:
@@ -13313,7 +13616,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* so enable debugging for the next modeset - and hope we catch
* the culprit.
*/
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
@@ -13454,8 +13757,10 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_state->min_voltage_level,
sizeof(intel_state->min_voltage_level));
dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->cdclk.logical = intel_state->cdclk.logical;
- dev_priv->cdclk.actual = intel_state->cdclk.actual;
+ dev_priv->cdclk.force_min_cdclk =
+ intel_state->cdclk.force_min_cdclk;
+
+ intel_cdclk_swap_state(intel_state);
}
drm_atomic_state_get(state);
@@ -13506,7 +13811,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_request_started(rq))
- gen6_rps_boost(rq, NULL);
+ gen6_rps_boost(rq);
i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -13767,7 +14072,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
* or
* cdclk/crtc_clock
*/
- mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
+ mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
tmpclk1 = (1 << 16) * mult - 1;
tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
max_scale = min(tmpclk1, tmpclk2);
@@ -13775,39 +14080,35 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
return max_scale;
}
-static void intel_begin_crtc_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void intel_begin_crtc_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *old_intel_cstate =
- to_intel_crtc_state(old_crtc_state);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_crtc_state->state);
- struct intel_crtc_state *intel_cstate =
- intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
- bool modeset = needs_modeset(&intel_cstate->base);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = needs_modeset(&new_crtc_state->base);
/* Perform vblank evasion around commit operation */
- intel_pipe_update_start(intel_cstate);
+ intel_pipe_update_start(new_crtc_state);
if (modeset)
goto out;
- if (intel_cstate->base.color_mgmt_changed ||
- intel_cstate->update_pipe)
- intel_color_commit(intel_cstate);
+ if (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe)
+ intel_color_commit(new_crtc_state);
- if (intel_cstate->update_pipe)
- intel_update_pipe_config(old_intel_cstate, intel_cstate);
+ if (new_crtc_state->update_pipe)
+ intel_update_pipe_config(old_crtc_state, new_crtc_state);
else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(intel_cstate);
+ skl_detach_scalers(new_crtc_state);
out:
if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(old_intel_state,
- intel_cstate);
+ dev_priv->display.atomic_update_watermarks(state,
+ new_crtc_state);
}
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
@@ -13826,21 +14127,20 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
}
}
-static void intel_finish_crtc_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void intel_finish_crtc_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_crtc_state->state);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
+ intel_atomic_get_new_crtc_state(state, crtc);
intel_pipe_update_end(new_crtc_state);
if (new_crtc_state->update_pipe &&
!needs_modeset(&new_crtc_state->base) &&
- old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
+ old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
/**
@@ -14039,14 +14339,11 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (plane->state->visible) {
- trace_intel_update_plane(plane, to_intel_crtc(crtc));
- intel_plane->update_plane(intel_plane, crtc_state,
- to_intel_plane_state(plane->state));
- } else {
- trace_intel_disable_plane(plane, to_intel_crtc(crtc));
- intel_plane->disable_plane(intel_plane, crtc_state);
- }
+ if (plane->state->visible)
+ intel_update_plane(intel_plane, crtc_state,
+ to_intel_plane_state(plane->state));
+ else
+ intel_disable_plane(intel_plane, crtc_state);
intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -14496,7 +14793,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ICELAKE(dev_priv)) {
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ icl_dsi_init(dev_priv);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
@@ -15397,6 +15699,8 @@ int intel_modeset_init(struct drm_device *dev)
intel_update_czclk(dev_priv);
intel_modeset_init_hw(dev);
+ intel_hdcp_component_init(dev_priv);
+
if (dev_priv->max_cdclk_freq == 0)
intel_update_max_cdclk(dev_priv);
@@ -15472,7 +15776,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
pipe_name(pipe), clock.vco, clock.dot);
fp = i9xx_dpll_compute_fp(&clock);
- dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
+ dpll = DPLL_DVO_2X_MODE |
DPLL_VGA_MODE_DIS |
((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
PLL_P2_DIVIDE_BY_4 |
@@ -15986,8 +16290,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
for_each_intel_encoder(&dev_priv->drm, encoder) {
- u64 get_domains;
- enum intel_display_power_domain domain;
struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
@@ -16001,9 +16303,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
continue;
crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
- get_domains = encoder->get_power_domains(encoder, crtc_state);
- for_each_power_domain(domain, get_domains)
- intel_display_power_get(dev_priv, domain);
+ encoder->get_power_domains(encoder, crtc_state);
}
}
@@ -16258,6 +16558,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
+ intel_hdcp_component_fini(dev_priv);
+
drm_mode_config_cleanup(dev);
intel_overlay_cleanup(dev_priv);
@@ -16304,8 +16606,6 @@ struct intel_display_error_state {
u32 power_well_driver;
- int num_transcoders;
-
struct intel_cursor_error_state {
u32 control;
u32 position;
@@ -16330,6 +16630,7 @@ struct intel_display_error_state {
} plane[I915_MAX_PIPES];
struct intel_transcoder_error_state {
+ bool available;
bool power_domain_on;
enum transcoder cpu_transcoder;
@@ -16356,6 +16657,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
};
int i;
+ BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
+
if (!HAS_DISPLAY(dev_priv))
return NULL;
@@ -16396,14 +16699,13 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
- /* Note: this does not include DSI transcoders. */
- error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
- if (HAS_DDI(dev_priv))
- error->num_transcoders++; /* Account for eDP. */
-
- for (i = 0; i < error->num_transcoders; i++) {
+ for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
+ if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+ continue;
+
+ error->transcoder[i].available = true;
error->transcoder[i].power_domain_on =
__intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
@@ -16467,7 +16769,10 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
- for (i = 0; i < error->num_transcoders; i++) {
+ for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
+ if (!error->transcoder[i].available)
+ continue;
+
err_printf(m, "CPU transcoder: %s\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cf709835fb9a..560274d1c50b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -25,22 +25,34 @@
*
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/types.h>
+#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <asm/byteorder.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_probe_helper.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
#define DP_DPRX_ESI_LEN 14
@@ -949,8 +961,11 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx);
- if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
- !HAS_PCH_ICP(dev_priv))
+
+ /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+ if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ regs->pp_div = INVALID_MMIO_REG;
+ else
regs->pp_div = PP_DIVISOR(pps_idx);
}
@@ -1720,12 +1735,6 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
}
}
-struct link_config_limits {
- int min_clock, max_clock;
- int min_lane_count, max_lane_count;
- int min_bpp, max_bpp;
-};
-
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
@@ -1788,7 +1797,7 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
}
/* Adjust link config limits based on compliance test requests. */
-static void
+void
intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits)
@@ -1859,42 +1868,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-/* Optimize link config in order: max bpp, min lanes, min clock */
-static int
-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- const struct link_config_limits *limits)
-{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int bpp, clock, lane_count;
- int mode_rate, link_clock, link_avail;
-
- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- bpp);
-
- for (lane_count = limits->min_lane_count;
- lane_count <= limits->max_lane_count;
- lane_count <<= 1) {
- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
- lane_count);
-
- if (mode_rate <= link_avail) {
- pipe_config->lane_count = lane_count;
- pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = link_clock;
-
- return 0;
- }
- }
- }
- }
-
- return -EINVAL;
-}
-
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@@ -1922,6 +1895,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
int pipe_bpp;
int ret;
+ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, pipe_config);
+
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
@@ -2005,6 +1981,14 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return 0;
}
+int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
+ return 6 * 3;
+ else
+ return 8 * 3;
+}
+
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2028,18 +2012,16 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_lane_count = 1;
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
- limits.min_bpp = 6 * 3;
+ limits.min_bpp = intel_dp_min_bpp(pipe_config);
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
+ if (intel_dp_is_edp(intel_dp)) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The eDP 1.3 and earlier panels
- * are generally designed to support only a single clock and
- * lane configuration, and typically these values correspond to
- * the native resolution of the panel. With eDP 1.4 rate select
- * and DSC, this is decreasingly the case, and we need to be
- * able to select less than maximum link config.
+ * advertizes being capable of. The panels are generally
+ * designed to support only a single clock and lane
+ * configuration, and typically these values correspond to the
+ * native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -2053,22 +2035,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->common_rates[limits.max_clock],
limits.max_bpp, adjusted_mode->crtc_clock);
- if (intel_dp_is_edp(intel_dp))
- /*
- * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
- * section A.1: "It is recommended that the minimum number of
- * lanes be used, using the minimum link rate allowed for that
- * lane configuration."
- *
- * Note that we use the max clock and lane count for eDP 1.3 and
- * earlier, and fast vs. wide is irrelevant.
- */
- ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
- &limits);
- else
- /* Optimize for slow and wide. */
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
- &limits);
+ /*
+ * Optimize for slow and wide. This is the place to add alternative
+ * optimization policy.
+ */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2104,6 +2075,29 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+ /*
+ * See:
+ * CEA-861-E - 5.1 Default Encoding Parameters
+ * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+ */
+ return crtc_state->pipe_bpp != 18 &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ return intel_conn_state->broadcast_rgb ==
+ INTEL_BROADCAST_RGB_LIMITED;
+ }
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2120,7 +2114,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
to_intel_digital_connector_state(conn_state);
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
- int ret;
+ int ret, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -2165,47 +2159,29 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
- pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
- intel_dp_supports_fec(intel_dp, pipe_config);
-
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
if (ret < 0)
return ret;
- if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /*
- * See:
- * CEA-861-E - 5.1 Default Encoding Parameters
- * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
- */
- pipe_config->limited_color_range =
- pipe_config->pipe_bpp != 18 &&
- drm_default_rgb_quant_range(adjusted_mode) ==
- HDMI_QUANTIZATION_RANGE_LIMITED;
- } else {
- pipe_config->limited_color_range =
- intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
- }
+ pipe_config->limited_color_range =
+ intel_dp_limited_color_range(pipe_config, conn_state);
- if (!pipe_config->dsc_params.compression_enable)
- intel_link_compute_m_n(pipe_config->pipe_bpp,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ if (pipe_config->dsc_params.compression_enable)
+ output_bpp = pipe_config->dsc_params.compressed_bpp;
else
- intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ output_bpp = pipe_config->pipe_bpp;
+
+ intel_link_compute_m_n(output_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
pipe_config->has_drrs = true;
- intel_link_compute_m_n(pipe_config->pipe_bpp,
+ intel_link_compute_m_n(output_bpp,
pipe_config->lane_count,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
@@ -2345,7 +2321,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
pp_stat_reg, mask, value,
5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
@@ -3934,7 +3910,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (port == PORT_A)
return;
- if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_IDLE_DONE,
DP_TP_STATUS_IDLE_DONE,
1))
@@ -4780,7 +4756,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
intel_dp_handle_test_request(intel_dp);
if (val & DP_CP_IRQ)
- intel_hdcp_check_link(intel_dp->attached_connector);
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
@@ -5623,6 +5599,18 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
edp_panel_vdd_off_sync(intel_dp);
}
+static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
+{
+ long ret;
+
+#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
+ ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
+ msecs_to_jiffies(timeout));
+
+ if (!ret)
+ DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
+}
+
static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
@@ -5847,6 +5835,336 @@ int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
return 0;
}
+struct hdcp2_dp_errata_stream_type {
+ u8 msg_id;
+ u8 stream_type;
+} __packed;
+
+static struct hdcp2_dp_msg_data {
+ u8 msg_id;
+ u32 offset;
+ bool msg_detectable;
+ u32 timeout;
+ u32 timeout2; /* Added for non_paired situation */
+ } hdcp2_msg_data[] = {
+ {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
+ {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+ {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+ false, 0, 0},
+ {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+ false, 0, 0},
+ {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+ true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+ {HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
+ {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
+ {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
+ {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_SEND_RECVID_LIST,
+ DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+ {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_STREAM_MANAGE,
+ DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
+/* local define to shovel this through the write_2_2 interface */
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+ {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+ DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+ 0, 0},
+ };
+
+static inline
+int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+ u8 *rx_status)
+{
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
+ HDCP_2_2_DP_RXSTATUS_LEN);
+ if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, bool *msg_ready)
+{
+ u8 rx_status;
+ int ret;
+
+ *msg_ready = false;
+ ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ if (ret < 0)
+ return ret;
+
+ switch (msg_id) {
+ case HDCP_2_2_AKE_SEND_HPRIME:
+ if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_AKE_SEND_PAIRING_INFO:
+ if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_REP_SEND_RECVID_LIST:
+ if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ *msg_ready = true;
+ break;
+ default:
+ DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+ struct hdcp2_dp_msg_data *hdcp2_msg_data)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ u8 msg_id = hdcp2_msg_data->msg_id;
+ int ret, timeout;
+ bool msg_ready = false;
+
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
+ timeout = hdcp2_msg_data->timeout2;
+ else
+ timeout = hdcp2_msg_data->timeout;
+
+ /*
+ * There is no way to detect the CERT, LPRIME and STREAM_READY
+ * availability. So Wait for timeout and read the msg.
+ */
+ if (!hdcp2_msg_data->msg_detectable) {
+ mdelay(timeout);
+ ret = 0;
+ } else {
+ /*
+ * As we want to check the msg availability at timeout, Ignoring
+ * the timeout at wait for CP_IRQ.
+ */
+ intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+ ret = hdcp2_detect_msg_availability(intel_dig_port,
+ msg_id, &msg_ready);
+ if (!msg_ready)
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret)
+ DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
+ hdcp2_msg_data->msg_id, ret, timeout);
+
+ return ret;
+}
+
+static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
+ if (hdcp2_msg_data[i].msg_id == msg_id)
+ return &hdcp2_msg_data[i];
+
+ return NULL;
+}
+
+static
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_write, len;
+ struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+
+ offset = hdcp2_msg_data->offset;
+
+ /* No msg_id in DP HDCP2.2 msgs */
+ bytes_to_write = size - 1;
+ byte++;
+
+ hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
+
+ while (bytes_to_write) {
+ len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
+
+ ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
+ offset, (void *)byte, len);
+ if (ret < 0)
+ return ret;
+
+ bytes_to_write -= ret;
+ byte += ret;
+ offset += ret;
+ }
+
+ return size;
+}
+
+static
+ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u32 dev_cnt;
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXINFO_OFFSET,
+ (void *)rx_info, HDCP_2_2_RXINFO_LEN);
+ if (ret != HDCP_2_2_RXINFO_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+
+ if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+ dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
+
+ ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+ (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_recv, len;
+ struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+ offset = hdcp2_msg_data->offset;
+
+ ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
+ if (ret < 0)
+ return ret;
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
+ ret = get_receiver_id_list_size(intel_dig_port);
+ if (ret < 0)
+ return ret;
+
+ size = ret;
+ }
+ bytes_to_recv = size - 1;
+
+ /* DP adaptation msgs has no msg_id */
+ byte++;
+
+ while (bytes_to_recv) {
+ len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
+ (void *)byte, len);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
+ return ret;
+ }
+
+ bytes_to_recv -= ret;
+ byte += ret;
+ offset += ret;
+ }
+ byte = buf;
+ *byte = msg_id;
+
+ return size;
+}
+
+static
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
+ bool is_repeater, u8 content_type)
+{
+ struct hdcp2_dp_errata_stream_type stream_type_msg;
+
+ if (is_repeater)
+ return 0;
+
+ /*
+ * Errata for DP: As Stream type is used for encryption, Receiver
+ * should be communicated with stream type for the decryption of the
+ * content.
+ * Repeater will be communicated with stream type as a part of it's
+ * auth later in time.
+ */
+ stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
+ stream_type_msg.stream_type = content_type;
+
+ return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ sizeof(stream_type_msg));
+}
+
+static
+int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_status;
+ int ret;
+
+ ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ if (ret)
+ return ret;
+
+ if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
+ ret = HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
+ ret = HDCP_LINK_INTEGRITY_FAILURE;
+ else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ ret = HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+ bool *capable)
+{
+ u8 rx_caps[3];
+ int ret;
+
+ *capable = false;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+ rx_caps, HDCP_2_2_RXCAPS_LEN);
+ if (ret != HDCP_2_2_RXCAPS_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+ HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
+ *capable = true;
+
+ return 0;
+}
+
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -5859,6 +6177,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.toggle_signalling = intel_dp_hdcp_toggle_signalling,
.check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .check_2_2_link = intel_dp_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_DP,
};
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
@@ -6072,43 +6396,34 @@ static void
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+ u32 pp_on, pp_off, pp_ctl;
struct pps_registers regs;
intel_pps_get_registers(intel_dp, &regs);
- /* Workaround: Need to write PP_CONTROL with the unlock key as
- * the very first thing. */
pp_ctl = ironlake_get_pp_control(intel_dp);
+ /* Ensure PPS is unlocked */
+ if (!HAS_DDI(dev_priv))
+ I915_WRITE(regs.pp_ctrl, pp_ctl);
+
pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off);
- if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
- !HAS_PCH_ICP(dev_priv)) {
- I915_WRITE(regs.pp_ctrl, pp_ctl);
- pp_div = I915_READ(regs.pp_div);
- }
/* Pull timing values out of registers */
- seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
+ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
- seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ u32 pp_div;
- seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
-
- seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
+ pp_div = I915_READ(regs.pp_div);
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv)) {
- seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
- BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
+ seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
- seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+ seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
}
}
@@ -6233,7 +6548,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_div, port_sel = 0;
+ u32 pp_on, pp_off, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
@@ -6268,23 +6583,10 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
I915_WRITE(regs.pp_ctrl, pp);
}
- pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
- (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
- pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
- (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
- /* Compute the divisor for the pp clock, simply match the Bspec
- * formula. */
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv)) {
- pp_div = I915_READ(regs.pp_ctrl);
- pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
- << BXT_POWER_CYCLE_DELAY_SHIFT);
- } else {
- pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
- pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
- << PANEL_POWER_CYCLE_DELAY_SHIFT);
- }
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
@@ -6311,19 +6613,29 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv))
- I915_WRITE(regs.pp_ctrl, pp_div);
- else
- I915_WRITE(regs.pp_div, pp_div);
+
+ /*
+ * Compute the divisor for the pp clock, simply match the Bspec formula.
+ */
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ I915_WRITE(regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ } else {
+ u32 pp_ctl;
+
+ pp_ctl = I915_READ(regs.pp_ctrl);
+ pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+ I915_WRITE(regs.pp_ctrl, pp_ctl);
+ }
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv)) ?
- (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
- I915_READ(regs.pp_div));
+ i915_mmio_reg_valid(regs.pp_div) ?
+ I915_READ(regs.pp_div) :
+ (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
static void intel_dp_pps_init(struct intel_dp *intel_dp)
@@ -6694,9 +7006,7 @@ intel_dp_drrs_init(struct intel_connector *connector,
return NULL;
}
- downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
- &connector->base);
-
+ downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
if (!downclock_mode) {
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
return NULL;
@@ -6718,7 +7028,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
- struct drm_display_mode *scan;
enum pipe pipe = INVALID_PIPE;
intel_wakeref_t wakeref;
struct edid *edid;
@@ -6734,7 +7043,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(&dev_priv->drm)) {
+ if (intel_get_lvds_encoder(dev_priv)) {
WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
DRM_INFO("LVDS was detected, not registering eDP\n");
@@ -6771,26 +7080,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
- /* prefer fixed mode from EDID if available */
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- downclock_mode = intel_dp_drrs_init(
- intel_connector, fixed_mode);
- break;
- }
- }
+ fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
+ if (fixed_mode)
+ downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
/* fallback to VBT if available for eDP */
- if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
- fixed_mode = drm_mode_duplicate(dev,
- dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode) {
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
- }
- }
+ if (!fixed_mode)
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
new file mode 100644
index 000000000000..5e9e8d13de6e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_H__
+#define __INTEL_DP_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_reg.h"
+
+enum pipe;
+struct drm_connector_state;
+struct drm_encoder;
+struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
+struct intel_encoder;
+
+struct link_config_limits {
+ int min_clock, max_clock;
+ int min_lane_count, max_lane_count;
+ int min_bpp, max_bpp;
+};
+
+void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct link_config_limits *limits);
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state);
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe);
+bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
+ enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ int link_rate, u8 lane_count,
+ bool link_mst);
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, u8 lane_count);
+int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
+int intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
+bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
+void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
+void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void intel_edp_panel_on(struct intel_dp *intel_dp);
+void intel_edp_panel_off(struct intel_dp *intel_dp);
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
+int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_max_lane_count(struct intel_dp *intel_dp);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
+
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+
+void
+intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
+ u8 dp_train_pat);
+void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp);
+void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
+u8
+intel_dp_voltage_max(struct intel_dp *intel_dp);
+u8
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
+void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+ u8 *link_bw, u8 *rate_select);
+bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
+bool
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
+u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
+ int mode_clock, int mode_hdisplay);
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+ int mode_hdisplay);
+
+bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+int intel_dp_link_required(int pixel_clock, int bpp);
+int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_digital_port_connected(struct intel_encoder *encoder);
+void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port);
+
+static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+ return ~((1 << lane_count) - 1) & 0xf;
+}
+
+#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index b59c87daa4f7..54b069333e2f 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -21,6 +21,7 @@
* IN THE SOFTWARE.
*/
+#include "intel_dp.h"
#include "intel_drv.h"
static void
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index fb67cd931117..8839eaea8371 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -23,78 +23,119 @@
*
*/
-#include "i915_drv.h"
-#include "intel_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+
+static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ void *port = connector->port;
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ DP_DPCD_QUIRK_CONSTANT_N);
+ int bpp, slots = -EINVAL;
+
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_clock;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ crtc_state->pipe_bpp = bpp;
+
+ crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
+ crtc_state->pipe_bpp);
+
+ slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
+ port, crtc_state->pbn);
+ if (slots == -EDEADLK)
+ return slots;
+ if (slots >= 0)
+ break;
+ }
+
+ if (slots < 0) {
+ DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
+ return slots;
+ }
+
+ intel_link_compute_m_n(crtc_state->pipe_bpp,
+ crtc_state->lane_count,
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ &crtc_state->dp_m_n,
+ constant_n);
+ crtc_state->dp_m_n.tu = slots;
+
+ return 0;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_connector *connector = conn_state->connector;
- void *port = to_intel_connector(connector)->port;
- struct drm_atomic_state *state = pipe_config->base.state;
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_crtc_state *old_crtc_state =
- drm_atomic_get_old_crtc_state(state, crtc);
- int bpp;
- int lane_count, slots =
- to_intel_crtc_state(old_crtc_state)->dp_m_n.tu;
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int mst_pbn;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
- DP_DPCD_QUIRK_CONSTANT_N);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ void *port = connector->port;
+ struct link_config_limits limits;
+ int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- bpp = 24;
- if (intel_dp->compliance.test_data.bpc) {
- bpp = intel_dp->compliance.test_data.bpc * 3;
- DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
- bpp);
- }
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ pipe_config->has_audio =
+ drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port);
+ else
+ pipe_config->has_audio =
+ intel_conn_state->force_audio == HDMI_AUDIO_ON;
+
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
- lane_count = intel_dp_max_lane_count(intel_dp);
-
- pipe_config->lane_count = lane_count;
-
- pipe_config->pipe_bpp = bpp;
+ limits.min_clock =
+ limits.max_clock = intel_dp_max_link_rate(intel_dp);
- pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
+ limits.min_lane_count =
+ limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
- if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port))
- pipe_config->has_audio = true;
+ limits.min_bpp = intel_dp_min_bpp(pipe_config);
+ limits.max_bpp = pipe_config->pipe_bpp;
- mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
- pipe_config->pbn = mst_pbn;
+ intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
- slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port,
- mst_pbn);
- if (slots < 0) {
- DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
- slots);
- return slots;
- }
-
- intel_link_compute_m_n(bpp, lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
+ conn_state, &limits);
+ if (ret)
+ return ret;
- pipe_config->dp_m_n.tu = slots;
+ pipe_config->limited_color_range =
+ intel_dp_limited_color_range(pipe_config, conn_state);
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
@@ -117,7 +158,11 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_crtc *new_crtc = new_conn_state->crtc;
struct drm_crtc_state *crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
- int ret = 0;
+ int ret;
+
+ ret = intel_digital_connector_atomic_check(connector, new_conn_state);
+ if (ret)
+ return ret;
if (!old_conn_state->crtc)
return 0;
@@ -289,7 +334,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DP_TP_STATUS(port),
DP_TP_STATUS_ACT_SENT,
DP_TP_STATUS_ACT_SENT,
@@ -354,11 +399,13 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
@@ -373,7 +420,6 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- int bpp = 24; /* MST uses fixed bpp */
int max_rate, mode_rate, max_lanes, max_link_clock;
if (drm_connector_is_unregistered(connector))
@@ -386,7 +432,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
- mode_rate = intel_dp_link_required(mode->clock, bpp);
+ mode_rate = intel_dp_link_required(mode->clock, 18);
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
@@ -487,6 +533,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (ret)
goto err;
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
+
return connector;
err:
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 95cb8b154f87..ab4ac7158b79 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_dp.h"
#include "intel_drv.h"
/**
@@ -341,7 +342,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_PORT_REF_DW3(phy),
GRC_DONE, GRC_DONE,
10))
@@ -383,7 +384,8 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
- if (intel_wait_for_register_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+ if (intel_wait_for_register_fw(&dev_priv->uncore,
+ BXT_PORT_CL1CM_DW0(phy),
PHY_RESERVED | PHY_POWER_GOOD,
PHY_POWER_GOOD,
1))
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 0a42d11c4c33..e01c057ce50b 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -241,11 +241,11 @@ out:
}
static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
+intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
enum intel_dpll_id range_min,
enum intel_dpll_id range_max)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll, *unused_pll = NULL;
struct intel_shared_dpll_state *shared_dpll;
@@ -420,9 +420,10 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
}
static struct intel_shared_dpll *
-ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ibx_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
@@ -436,7 +437,7 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
crtc->base.base.id, crtc->base.name,
pll->info->name);
} else {
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_PCH_PLL_A,
DPLL_ID_PCH_PLL_B);
}
@@ -764,15 +765,13 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
- struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
{
struct intel_shared_dpll *pll;
u32 val;
unsigned int p, n2, r2;
- hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+ hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@@ -780,7 +779,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
crtc_state->dpll_hw_state.wrpll = val;
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
if (!pll)
@@ -790,11 +789,12 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
}
static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
+hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
+ int clock = crtc_state->port_clock;
switch (clock / 2) {
case 81000:
@@ -820,19 +820,18 @@ hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
}
static struct intel_shared_dpll *
-hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+hsw_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- int clock = crtc_state->port_clock;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
+ pll = hsw_ddi_hdmi_get_dpll(crtc_state);
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- pll = hsw_ddi_dp_get_dpll(encoder, clock);
+ pll = hsw_ddi_dp_get_dpll(crtc_state);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
return NULL;
@@ -840,7 +839,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
crtc_state->dpll_hw_state.spll =
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SPLL, DPLL_ID_SPLL);
} else {
return NULL;
@@ -961,7 +960,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(regs[id].ctl,
I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DPLL_STATUS,
DPLL_LOCK(id),
DPLL_LOCK(id),
@@ -1308,9 +1307,7 @@ skip_remaining_dividers:
return true;
}
-static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- int clock)
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
@@ -1323,7 +1320,8 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
- if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+ if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ &wrpll_params))
return false;
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
@@ -1346,8 +1344,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
}
static bool
-skl_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
u32 ctrl1;
@@ -1356,7 +1353,7 @@ skl_ddi_dp_set_dpll_hw_state(int clock,
* as the DPLL id in this function.
*/
ctrl1 = DPLL_CTRL1_OVERRIDE(0);
- switch (clock / 2) {
+ switch (crtc_state->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
break;
@@ -1378,44 +1375,43 @@ skl_ddi_dp_set_dpll_hw_state(int clock,
break;
}
- dpll_hw_state->ctrl1 = ctrl1;
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+
return true;
}
static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+skl_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- int clock = crtc_state->port_clock;
bool bret;
- struct intel_dpll_hw_state dpll_hw_state;
-
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+ bret = skl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+ bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
return NULL;
}
- crtc_state->dpll_hw_state = dpll_hw_state;
} else {
return NULL;
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL0);
else
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SKL_DPLL1,
DPLL_ID_SKL_DPLL3);
if (!pll)
@@ -1692,10 +1688,10 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
};
static bool
-bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state, int clock,
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct bxt_clk_div *clk_div)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct dpll best_clock;
/* Calculate HDMI div */
@@ -1703,9 +1699,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
* FIXME: tie the following calculation into
* i9xx_crtc_compute_clock
*/
- if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+ if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
- clock, pipe_name(intel_crtc->pipe));
+ crtc_state->port_clock,
+ pipe_name(crtc->pipe));
return false;
}
@@ -1722,8 +1719,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
return true;
}
-static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
+static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
+ struct bxt_clk_div *clk_div)
{
+ int clock = crtc_state->port_clock;
int i;
*clk_div = bxt_dp_clk_val[0];
@@ -1737,14 +1736,17 @@ static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
}
-static bool bxt_ddi_set_dpll_hw_state(int clock,
- struct bxt_clk_div *clk_div,
- struct intel_dpll_hw_state *dpll_hw_state)
+static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
+ const struct bxt_clk_div *clk_div)
{
+ struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
+ int clock = crtc_state->port_clock;
int vco = clk_div->vco;
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
u32 lanestagger;
+ memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
+
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
int_coef = 9;
@@ -1804,55 +1806,45 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
}
static bool
-bxt_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
- struct bxt_clk_div clk_div = {0};
+ struct bxt_clk_div clk_div = {};
- bxt_ddi_dp_pll_dividers(clock, &clk_div);
+ bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
- return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+ return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
static bool
-bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state, int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
- struct bxt_clk_div clk_div = { };
+ struct bxt_clk_div clk_div = {};
- bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
+ bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
- return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+ return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+bxt_get_dpll(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
{
- struct intel_dpll_hw_state dpll_hw_state = { };
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
- int i, clock = crtc_state->port_clock;
+ enum intel_dpll_id id;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
- &dpll_hw_state))
+ !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
return NULL;
if (intel_crtc_has_dp_encoder(crtc_state) &&
- !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+ !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
return NULL;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- crtc_state->dpll_hw_state = dpll_hw_state;
-
/* 1:1 mapping between ports and PLLs */
- i = (enum intel_dpll_id) encoder->port;
- pll = intel_get_shared_dpll_by_id(dev_priv, i);
+ id = (enum intel_dpll_id) encoder->port;
+ pll = intel_get_shared_dpll_by_id(dev_priv, id);
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
@@ -1911,8 +1903,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
- struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
void (*dump_hw_state)(struct drm_i915_private *dev_priv,
@@ -1986,7 +1977,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
PLL_POWER_STATE,
@@ -2027,7 +2018,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_LOCK,
PLL_LOCK,
@@ -2075,7 +2066,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_LOCK,
0,
@@ -2097,7 +2088,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
0,
@@ -2242,11 +2233,11 @@ int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
}
static bool
-cnl_ddi_calculate_wrpll(int clock,
- struct drm_i915_private *dev_priv,
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *wrpll_params)
{
- u32 afe_clock = clock * 5;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ u32 afe_clock = crtc_state->port_clock * 5;
u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
@@ -2282,23 +2273,20 @@ cnl_ddi_calculate_wrpll(int clock,
ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
- cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv,
- kdiv);
+ cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
+ pdiv, qdiv, kdiv);
return true;
}
-static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- int clock)
+static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params wrpll_params = { 0, };
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
- if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
+ if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
return false;
cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -2319,14 +2307,13 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
}
static bool
-cnl_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
u32 cfgcr0;
cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
- switch (clock / 2) {
+ switch (crtc_state->port_clock / 2) {
case 81000:
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
break;
@@ -2356,41 +2343,40 @@ cnl_ddi_dp_set_dpll_hw_state(int clock,
break;
}
- dpll_hw_state->cfgcr0 = cfgcr0;
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+
return true;
}
static struct intel_shared_dpll *
-cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+cnl_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- int clock = crtc_state->port_clock;
bool bret;
- struct intel_dpll_hw_state dpll_hw_state;
-
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+ bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+ bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
return NULL;
}
- crtc_state->dpll_hw_state = dpll_hw_state;
} else {
DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
crtc_state->output_types);
return NULL;
}
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL2);
if (!pll) {
@@ -2431,47 +2417,69 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
.dump_hw_state = cnl_dump_hw_state,
};
+struct icl_combo_pll_params {
+ int clock;
+ struct skl_wrpll_params wrpll;
+};
+
/*
* These values alrea already adjusted: they're the bits we write to the
* registers, not the logical values.
*/
-static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = {
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
- .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
- .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
- .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
- { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
- .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
+ { 540000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 270000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 162000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 324000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 216000,
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+ { 432000,
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 648000,
+ { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 810000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
};
+
/* Also used for 38.4 MHz values. */
-static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
- .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
- .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
- .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
- { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
- .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
+ { 540000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 270000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 162000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 324000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 216000,
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+ { 432000,
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 648000,
+ { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 810000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
};
static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
@@ -2484,72 +2492,53 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};
-static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
+static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- const struct skl_wrpll_params *params;
-
- params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_dp_combo_pll_24MHz_values :
- icl_dp_combo_pll_19_2MHz_values;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ const struct icl_combo_pll_params *params =
+ dev_priv->cdclk.hw.ref == 24000 ?
+ icl_dp_combo_pll_24MHz_values :
+ icl_dp_combo_pll_19_2MHz_values;
+ int clock = crtc_state->port_clock;
+ int i;
- switch (clock) {
- case 540000:
- *pll_params = params[0];
- break;
- case 270000:
- *pll_params = params[1];
- break;
- case 162000:
- *pll_params = params[2];
- break;
- case 324000:
- *pll_params = params[3];
- break;
- case 216000:
- *pll_params = params[4];
- break;
- case 432000:
- *pll_params = params[5];
- break;
- case 648000:
- *pll_params = params[6];
- break;
- case 810000:
- *pll_params = params[7];
- break;
- default:
- MISSING_CASE(clock);
- return false;
+ for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
+ if (clock == params[i].clock) {
+ *pll_params = params[i].wrpll;
+ return true;
+ }
}
- return true;
+ MISSING_CASE(clock);
+ return false;
}
-static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock,
+static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
*pll_params = dev_priv->cdclk.hw.ref == 24000 ?
icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
return true;
}
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder, int clock,
- struct intel_dpll_hw_state *pll_state)
+ struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
if (intel_port_is_tc(dev_priv, encoder->port))
- ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
+ ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
+ ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
else
- ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
+ ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
if (!ret)
return false;
@@ -2563,82 +2552,16 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
DPLL_CFGCR1_PDIV(pll_params.pdiv) |
DPLL_CFGCR1_CENTRAL_FREQ_8400;
- pll_state->cfgcr0 = cfgcr0;
- pll_state->cfgcr1 = cfgcr1;
- return true;
-}
-
-int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- u32 pll_id)
-{
- u32 cfgcr0, cfgcr1;
- u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
- const struct skl_wrpll_params *params;
- int index, n_entries, link_clock;
-
- /* Read back values from DPLL CFGCR registers */
- cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
-
- dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
- dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRACTION_SHIFT;
- pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
- kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
- qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
- DPLL_CFGCR1_QDIV_MODE_SHIFT;
- qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
- DPLL_CFGCR1_QDIV_RATIO_SHIFT;
-
- params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_dp_combo_pll_24MHz_values :
- icl_dp_combo_pll_19_2MHz_values;
- n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
-
- for (index = 0; index < n_entries; index++) {
- if (dco_integer == params[index].dco_integer &&
- dco_fraction == params[index].dco_fraction &&
- pdiv == params[index].pdiv &&
- kdiv == params[index].kdiv &&
- qdiv_mode == params[index].qdiv_mode &&
- qdiv_ratio == params[index].qdiv_ratio)
- break;
- }
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
- /* Map PLL Index to Link Clock */
- switch (index) {
- default:
- MISSING_CASE(index);
- /* fall through */
- case 0:
- link_clock = 540000;
- break;
- case 1:
- link_clock = 270000;
- break;
- case 2:
- link_clock = 162000;
- break;
- case 3:
- link_clock = 324000;
- break;
- case 4:
- link_clock = 216000;
- break;
- case 5:
- link_clock = 432000;
- break;
- case 6:
- link_clock = 648000;
- break;
- case 7:
- link_clock = 810000;
- break;
- }
+ crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- return link_clock;
+ return true;
}
+
static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
{
return id - DPLL_ID_ICL_MGPLL1;
@@ -2649,11 +2572,6 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
return tc_port + DPLL_ID_ICL_MGPLL1;
}
-bool intel_dpll_is_combophy(enum intel_dpll_id id)
-{
- return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
-}
-
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
struct intel_dpll_hw_state *state)
@@ -2728,12 +2646,12 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
* The specification for this function uses real numbers, so the math had to be
* adapted to integer-only calculation, that's why it looks so different.
*/
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder, int clock,
- struct intel_dpll_hw_state *pll_state)
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
int refclk_khz = dev_priv->cdclk.hw.ref;
+ int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
u32 prop_coeff, int_coeff;
@@ -2743,6 +2661,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+ memset(pll_state, 0, sizeof(*pll_state));
+
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
pll_state)) {
DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
@@ -2892,23 +2812,20 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
static struct intel_shared_dpll *
-icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+icl_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
- struct intel_dpll_hw_state pll_state = {};
enum port port = encoder->port;
enum intel_dpll_id min, max;
- int clock = crtc_state->port_clock;
bool ret;
if (intel_port_is_combophy(dev_priv, port)) {
min = DPLL_ID_ICL_DPLL0;
max = DPLL_ID_ICL_DPLL1;
- ret = icl_calc_dpll_state(crtc_state, encoder, clock,
- &pll_state);
+ ret = icl_calc_dpll_state(crtc_state, encoder);
} else if (intel_port_is_tc(dev_priv, port)) {
if (encoder->type == INTEL_OUTPUT_DP_MST) {
struct intel_dp_mst_encoder *mst_encoder;
@@ -2922,16 +2839,14 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
if (intel_dig_port->tc_type == TC_PORT_TBT) {
min = DPLL_ID_ICL_TBTPLL;
max = min;
- ret = icl_calc_dpll_state(crtc_state, encoder, clock,
- &pll_state);
+ ret = icl_calc_dpll_state(crtc_state, encoder);
} else {
enum tc_port tc_port;
tc_port = intel_port_to_tc(dev_priv, port);
min = icl_tc_port_to_pll_id(tc_port);
max = min;
- ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
- &pll_state);
+ ret = icl_calc_mg_pll_state(crtc_state);
}
} else {
MISSING_CASE(port);
@@ -2943,9 +2858,8 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return NULL;
}
- crtc_state->dpll_hw_state = pll_state;
- pll = intel_find_shared_dpll(crtc, crtc_state, min, max);
+ pll = intel_find_shared_dpll(crtc_state, min, max);
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
return NULL;
@@ -2956,19 +2870,72 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
-static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
+static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
{
- if (intel_dpll_is_combophy(id))
- return CNL_DPLL_ENABLE(id);
- else if (id == DPLL_ID_ICL_TBTPLL)
- return TBT_PLL_ENABLE;
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
+ return false;
- return MG_PLL_ENABLE(icl_pll_id_to_tc_port(id));
+ val = I915_READ(MG_PLL_ENABLE(tc_port));
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+ hw_state->mg_clktop2_coreclkctl1 =
+ I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+ hw_state->mg_clktop2_hsclkctl =
+ I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+ hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
+ hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
+ hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
+ hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
+
+ hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
+ hw_state->mg_pll_tdc_coldst_bias =
+ I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+
+ if (dev_priv->cdclk.hw.ref == 38400) {
+ hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ hw_state->mg_pll_bias_mask = 0;
+ } else {
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
+ }
+
+ hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ return ret;
}
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *hw_state,
+ i915_reg_t enable_reg)
{
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
@@ -2980,54 +2947,12 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(icl_pll_id_to_enable_reg(id));
+ val = I915_READ(enable_reg);
if (!(val & PLL_ENABLE))
goto out;
- if (intel_dpll_is_combophy(id) ||
- id == DPLL_ID_ICL_TBTPLL) {
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
- } else {
- enum tc_port tc_port = icl_pll_id_to_tc_port(id);
-
- hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
- hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
-
- hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
- hw_state->mg_clktop2_coreclkctl1 &=
- MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
-
- hw_state->mg_clktop2_hsclkctl =
- I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
- hw_state->mg_clktop2_hsclkctl &=
- MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
-
- hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
- hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
- hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
- hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
- hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
-
- hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
- hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
-
- if (dev_priv->cdclk.hw.ref == 38400) {
- hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
- hw_state->mg_pll_bias_mask = 0;
- } else {
- hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
- hw_state->mg_pll_bias_mask = -1U;
- }
-
- hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
- hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
- }
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
ret = true;
out:
@@ -3035,6 +2960,21 @@ out:
return ret;
}
+static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state,
+ CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
+}
+
static void icl_dpll_write(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
@@ -3096,11 +3036,10 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
}
-static void icl_pll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
{
- const enum intel_dpll_id id = pll->info->id;
- i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
u32 val;
val = I915_READ(enable_reg);
@@ -3111,37 +3050,90 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
- PLL_POWER_STATE, 1))
- DRM_ERROR("PLL %d Power not enabled\n", id);
+ if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+ PLL_POWER_STATE, PLL_POWER_STATE, 1))
+ DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
+}
- if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
- icl_dpll_write(dev_priv, pll);
- else
- icl_mg_pll_write(dev_priv, pll);
+static void icl_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
+{
+ u32 val;
+
+ val = I915_READ(enable_reg);
+ val |= PLL_ENABLE;
+ I915_WRITE(enable_reg, val);
+
+ /* Timeout is actually 600us. */
+ if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+ PLL_LOCK, PLL_LOCK, 1))
+ DRM_ERROR("PLL %d not locked\n", pll->info->id);
+}
+
+static void combo_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+ icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+ icl_dpll_write(dev_priv, pll);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
* paths should already be setting the appropriate voltage, hence we do
- * nothign here.
+ * nothing here.
*/
- val = I915_READ(enable_reg);
- val |= PLL_ENABLE;
- I915_WRITE(enable_reg, val);
+ icl_pll_enable(dev_priv, pll, enable_reg);
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
- 1)) /* 600us actually. */
- DRM_ERROR("PLL %d not locked\n", id);
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void tbt_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+ icl_dpll_write(dev_priv, pll);
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void mg_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg =
+ MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+ icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+ icl_mg_pll_write(dev_priv, pll);
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ icl_pll_enable(dev_priv, pll, enable_reg);
/* DVFS post sequence would be here. See the comment above. */
}
static void icl_pll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
{
- const enum intel_dpll_id id = pll->info->id;
- i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
u32 val;
/* The first steps are done by intel_ddi_post_disable(). */
@@ -3157,8 +3149,9 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 1us. */
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
- DRM_ERROR("PLL %d locked\n", id);
+ if (intel_wait_for_register(&dev_priv->uncore,
+ enable_reg, PLL_LOCK, 0, 1))
+ DRM_ERROR("PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
@@ -3170,9 +3163,30 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
- 1))
- DRM_ERROR("PLL %d Power not disabled\n", id);
+ if (intel_wait_for_register(&dev_priv->uncore,
+ enable_reg, PLL_POWER_STATE, 0, 1))
+ DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
+}
+
+static void combo_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static void tbt_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
+}
+
+static void mg_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg =
+ MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+ icl_pll_disable(dev_priv, pll, enable_reg);
}
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -3197,20 +3211,32 @@ static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias);
}
-static const struct intel_shared_dpll_funcs icl_pll_funcs = {
- .enable = icl_pll_enable,
- .disable = icl_pll_disable,
- .get_hw_state = icl_pll_get_hw_state,
+static const struct intel_shared_dpll_funcs combo_pll_funcs = {
+ .enable = combo_pll_enable,
+ .disable = combo_pll_disable,
+ .get_hw_state = combo_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
+ .enable = tbt_pll_enable,
+ .disable = tbt_pll_disable,
+ .get_hw_state = tbt_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs mg_pll_funcs = {
+ .enable = mg_pll_enable,
+ .disable = mg_pll_disable,
+ .get_hw_state = mg_pll_get_hw_state,
};
static const struct dpll_info icl_plls[] = {
- { "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "TBT PLL", &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- { "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
- { "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
- { "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
- { "MG PLL 4", &icl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
{ },
};
@@ -3220,6 +3246,18 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct dpll_info ehl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr ehl_pll_mgr = {
+ .dpll_info = ehl_plls,
+ .get_dpll = icl_get_dpll,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@@ -3233,7 +3271,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_ICELAKE(dev_priv))
+ if (IS_ELKHARTLAKE(dev_priv))
+ dpll_mgr = &ehl_pll_mgr;
+ else if (INTEL_GEN(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
else if (IS_CANNONLAKE(dev_priv))
dpll_mgr = &cnl_pll_mgr;
@@ -3271,31 +3311,29 @@ void intel_shared_dpll_init(struct drm_device *dev)
/**
* intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
- * @crtc: CRTC
- * @crtc_state: atomic state for @crtc
+ * @crtc_state: atomic state for the crtc
* @encoder: encoder
*
* Find an appropriate DPLL for the given CRTC and encoder combination. A
- * reference from the @crtc to the returned pll is registered in the atomic
- * state. That configuration is made effective by calling
+ * reference from the @crtc_state to the returned pll is registered in the
+ * atomic state. That configuration is made effective by calling
* intel_shared_dpll_swap_state(). The reference should be released by calling
* intel_release_shared_dpll().
*
* Returns:
- * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
+ * A shared DPLL to be used by @crtc_state and @encoder.
*/
struct intel_shared_dpll *
-intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
+intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
if (WARN_ON(!dpll_mgr))
return NULL;
- return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
+ return dpll_mgr->get_dpll(crtc_state, encoder);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 40e8391a92f2..bd8124cc81ed 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -327,8 +327,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *state,
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
struct intel_encoder *encoder);
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_crtc *crtc,
@@ -341,8 +340,6 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
-int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- u32 pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 15db41394b9e..a38b9cff5cd0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -27,22 +27,24 @@
#include <linux/async.h>
#include <linux/i2c.h>
-#include <linux/hdmi.h>
#include <linux/sched/clock.h>
#include <linux/stackdepot.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
+
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/drm_atomic.h>
+#include <drm/i915_drm.h>
+#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
+#include "i915_drv.h"
+
struct drm_printer;
/**
@@ -270,10 +272,12 @@ struct intel_encoder {
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_state *pipe_config);
- /* Returns a mask of power domains that need to be referenced as part
- * of the hardware state readout code. */
- u64 (*get_power_domains)(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state);
+ /*
+ * Acquires the power domains needed for an active encoder during
+ * hardware state readout.
+ */
+ void (*get_power_domains)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
@@ -323,6 +327,13 @@ struct intel_panel {
struct intel_digital_port;
+enum check_link_response {
+ HDCP_LINK_PROTECTED = 0,
+ HDCP_TOPOLOGY_CHANGE,
+ HDCP_LINK_INTEGRITY_FAILURE,
+ HDCP_REAUTH_REQUEST
+};
+
/*
* This structure serves as a translation layer between the generic HDCP code
* and the bus-specific code. What that means is that HDCP over HDMI differs
@@ -395,6 +406,32 @@ struct intel_hdcp_shim {
/* Detects panel's hdcp capability. This is optional for HDMI. */
int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
bool *hdcp_capable);
+
+ /* HDCP adaptation(DP/HDMI) required on the port */
+ enum hdcp_wired_protocol protocol;
+
+ /* Detects whether sink is HDCP2.2 capable */
+ int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port,
+ bool *capable);
+
+ /* Write HDCP2.2 messages */
+ int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size);
+
+ /* Read HDCP2.2 messages */
+ int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size);
+
+ /*
+ * Implementation of DP HDCP2.2 Errata for the communication of stream
+ * type to Receivers. In DP HDCP2.2 Stream type is one of the input to
+ * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
+ */
+ int (*config_stream_type)(struct intel_digital_port *intel_dig_port,
+ bool is_repeater, u8 type);
+
+ /* HDCP2.2 Link Integrity Check */
+ int (*check_2_2_link)(struct intel_digital_port *intel_dig_port);
};
struct intel_hdcp {
@@ -404,6 +441,50 @@ struct intel_hdcp {
u64 value;
struct delayed_work check_work;
struct work_struct prop_work;
+
+ /* HDCP1.4 Encryption status */
+ bool hdcp_encrypted;
+
+ /* HDCP2.2 related definitions */
+ /* Flag indicates whether this connector supports HDCP2.2 or not. */
+ bool hdcp2_supported;
+
+ /* HDCP2.2 Encryption status */
+ bool hdcp2_encrypted;
+
+ /*
+ * Content Stream Type defined by content owner. TYPE0(0x0) content can
+ * flow in the link protected by HDCP2.2 or HDCP1.4, where as TYPE1(0x1)
+ * content can flow only through a link protected by HDCP2.2.
+ */
+ u8 content_type;
+ struct hdcp_port_data port_data;
+
+ bool is_paired;
+ bool is_repeater;
+
+ /*
+ * Count of ReceiverID_List received. Initialized to 0 at AKE_INIT.
+ * Incremented after processing the RepeaterAuth_Send_ReceiverID_List.
+ * When it rolls over re-auth has to be triggered.
+ */
+ u32 seq_num_v;
+
+ /*
+ * Count of RepeaterAuth_Stream_Manage msg propagated.
+ * Initialized to 0 on AKE_INIT. Incremented after every successful
+ * transmission of RepeaterAuth_Stream_Manage message. When it rolls
+ * over re-Auth has to be triggered.
+ */
+ u32 seq_num_m;
+
+ /*
+ * Work queue to signal the CP_IRQ. Used for the waiters to read the
+ * available information from HDCP DP sink.
+ */
+ wait_queue_head_t cp_irq_queue;
+ atomic_t cp_irq_count;
+ int cp_irq_count_cached;
};
struct intel_connector {
@@ -478,6 +559,11 @@ struct intel_atomic_state {
* state only when all crtc's are DPMS off.
*/
struct intel_cdclk_state actual;
+
+ int force_min_cdclk;
+ bool force_min_cdclk_changed;
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
} cdclk;
bool dpll_set, modeset;
@@ -921,7 +1007,8 @@ struct intel_crtc_state {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
- bool ips_force_disable;
+
+ bool crc_enabled;
bool enable_fbc;
@@ -942,13 +1029,30 @@ struct intel_crtc_state {
/* Gamma mode programmed on the pipe */
u32 gamma_mode;
+ union {
+ /* CSC mode programmed on the pipe */
+ u32 csc_mode;
+
+ /* CHV CGM mode */
+ u32 cgm_mode;
+ };
+
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
u8 nv12_planes;
+ u8 c8_planes;
/* bitmask of planes that will be updated during the commit */
u8 update_planes;
+ struct {
+ u32 enable;
+ u32 gcp;
+ union hdmi_infoframe avi;
+ union hdmi_infoframe spd;
+ union hdmi_infoframe hdmi;
+ } infoframes;
+
/* HDMI scrambling status */
bool hdmi_scrambling;
@@ -961,6 +1065,12 @@ struct intel_crtc_state {
/* Output down scaling is done in LSPCON device */
bool lspcon_downsampling;
+ /* enable pipe gamma? */
+ bool gamma_enable;
+
+ /* enable pipe csc? */
+ bool csc_enable;
+
/* Display Stream compression state */
struct {
bool compression_enable;
@@ -989,9 +1099,6 @@ struct intel_crtc {
struct intel_crtc_state *config;
- /* global reset count when the last flip was submitted */
- unsigned int reset_count;
-
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
@@ -1260,11 +1367,15 @@ struct intel_digital_port {
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
+ void (*read_infoframe)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
void (*set_infoframes)(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
- bool (*infoframe_enabled)(struct intel_encoder *encoder,
+ u32 (*infoframes_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@@ -1492,6 +1603,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
u32 mask)
@@ -1519,85 +1631,8 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
-/* intel_crt.c */
-bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t adpa_reg, enum pipe *pipe);
-void intel_crt_init(struct drm_i915_private *dev_priv);
-void intel_crt_reset(struct drm_encoder *encoder);
-
-/* intel_ddi.c */
-void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
-void hsw_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state);
-void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
-bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
-void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
-bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
-
-void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
- bool state);
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state);
-u32 bxt_signal_levels(struct intel_dp *intel_dp);
-u32 ddi_signal_levels(struct intel_dp *intel_dp);
-u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
-u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
- u8 voltage_swing);
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- bool enable);
-void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id);
-
-unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height);
-
-/* intel_audio.c */
-void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_audio_codec_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
-void i915_audio_component_init(struct drm_i915_private *dev_priv);
-void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
-void intel_audio_init(struct drm_i915_private *dev_priv);
-void intel_audio_deinit(struct drm_i915_private *dev_priv);
-
-/* intel_cdclk.c */
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
-void skl_init_cdclk(struct drm_i915_private *dev_priv);
-void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
-void cnl_init_cdclk(struct drm_i915_private *dev_priv);
-void cnl_uninit_cdclk(struct drm_i915_private *dev_priv);
-void bxt_init_cdclk(struct drm_i915_private *dev_priv);
-void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
-void icl_init_cdclk(struct drm_i915_private *dev_priv);
-void icl_uninit_cdclk(struct drm_i915_private *dev_priv);
-void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
-void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
-void intel_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state);
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
- const char *context);
-
/* intel_display.c */
+void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -1612,6 +1647,8 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int plane);
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height);
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
@@ -1738,7 +1775,7 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
@@ -1779,106 +1816,9 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
-/* intel_connector.c */
-int intel_connector_init(struct intel_connector *connector);
-struct intel_connector *intel_connector_alloc(void);
-void intel_connector_free(struct intel_connector *connector);
-void intel_connector_destroy(struct drm_connector *connector);
-int intel_connector_register(struct drm_connector *connector);
-void intel_connector_unregister(struct drm_connector *connector);
-void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder);
-bool intel_connector_get_hw_state(struct intel_connector *connector);
-enum pipe intel_connector_get_pipe(struct intel_connector *connector);
-int intel_connector_update_modes(struct drm_connector *connector,
- struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-void intel_attach_force_audio_property(struct drm_connector *connector);
-void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-
-/* intel_csr.c */
-void intel_csr_ucode_init(struct drm_i915_private *);
-void intel_csr_load_program(struct drm_i915_private *);
-void intel_csr_ucode_fini(struct drm_i915_private *);
-void intel_csr_ucode_suspend(struct drm_i915_private *);
-void intel_csr_ucode_resume(struct drm_i915_private *);
-
-/* intel_dp.c */
-bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t dp_reg, enum port port,
- enum pipe *pipe);
-bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
- enum port port);
-bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-void intel_dp_set_link_params(struct intel_dp *intel_dp,
- int link_rate, u8 lane_count,
- bool link_mst);
-int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
- int link_rate, u8 lane_count);
+/* intel_dp_link_training.c */
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-int intel_dp_retrain_link(struct intel_encoder *encoder,
- struct drm_modeset_acquire_ctx *ctx);
-void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool enable);
-void intel_dp_encoder_reset(struct drm_encoder *encoder);
-void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
-void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
-int intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct intel_dp *intel_dp);
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
- bool long_hpd);
-void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_on(struct intel_dp *intel_dp);
-void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
-int intel_dp_max_link_rate(struct intel_dp *intel_dp);
-int intel_dp_max_lane_count(struct intel_dp *intel_dp);
-int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
-void intel_plane_destroy(struct drm_plane *plane);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-
-void
-intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
- u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp);
-void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
-u8
-intel_dp_voltage_max(struct intel_dp *intel_dp);
-u8
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
-void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
- u8 *link_bw, u8 *rate_select);
-bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
-bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
-bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]);
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
- int mode_clock, int mode_hdisplay);
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
- int mode_hdisplay);
/* intel_vdsc.c */
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
@@ -1886,18 +1826,6 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
-static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
-{
- return ~((1 << lane_count) - 1) & 0xf;
-}
-
-bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
-int intel_dp_link_required(int pixel_clock, int bpp);
-int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
-bool intel_digital_port_connected(struct intel_encoder *encoder);
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port);
-
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1913,100 +1841,11 @@ void icl_dsi_init(struct drm_i915_private *dev_priv);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
-/* intel_dvo.c */
-void intel_dvo_init(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
bool intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
-/* legacy fbdev emulation in intel_fbdev.c */
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-extern int intel_fbdev_init(struct drm_device *dev);
-extern void intel_fbdev_initial_config_async(struct drm_device *dev);
-extern void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
-extern void intel_fbdev_fini(struct drm_i915_private *dev_priv);
-extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
-extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
-extern void intel_fbdev_restore_mode(struct drm_device *dev);
-#else
-static inline int intel_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
-{
-}
-
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_restore_mode(struct drm_device *dev)
-{
-}
-#endif
-
-/* intel_fbc.c */
-void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
- struct intel_atomic_state *state);
-bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
-void intel_fbc_post_update(struct intel_crtc *crtc);
-void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
-void intel_fbc_disable(struct intel_crtc *crtc);
-void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits,
- enum fb_op_origin origin);
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits, enum fb_op_origin origin);
-void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
-int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
-
-/* intel_hdmi.c */
-void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
- enum port port);
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-int intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state);
-bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
- struct drm_connector *connector,
- bool high_tmds_clock_ratio,
- bool scrambling);
-void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
-void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
-
-/* intel_lvds.c */
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t lvds_reg, enum pipe *pipe);
-void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
-bool intel_is_dual_link_lvds(struct drm_device *dev);
-
/* intel_overlay.c */
void intel_overlay_setup(struct drm_i915_private *dev_priv);
void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
@@ -2017,86 +1856,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv);
-
-/* intel_panel.c */
-int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode,
- struct drm_display_mode *downclock_mode);
-void intel_panel_fini(struct intel_panel *panel);
-void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode);
-void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
-void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
-void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
- u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector,
- enum pipe pipe);
-void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_update_backlight(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
-extern struct drm_display_mode *intel_find_panel_downclock(
- struct drm_i915_private *dev_priv,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector);
-
-#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-int intel_backlight_device_register(struct intel_connector *connector);
-void intel_backlight_device_unregister(struct intel_connector *connector);
-#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static inline int intel_backlight_device_register(struct intel_connector *connector)
-{
- return 0;
-}
-static inline void intel_backlight_device_unregister(struct intel_connector *connector)
-{
-}
-#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-
-/* intel_hdcp.c */
-void intel_hdcp_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *old_state,
- struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector,
- const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector);
-int intel_hdcp_disable(struct intel_connector *connector);
-int intel_hdcp_check_link(struct intel_connector *connector);
-bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
-bool intel_hdcp_capable(struct intel_connector *connector);
-
-/* intel_psr.c */
-#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
-void intel_psr_init_dpcd(struct intel_dp *intel_dp);
-void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state);
-int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
- struct drm_modeset_acquire_ctx *ctx,
- u64 value);
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits,
- enum fb_op_origin origin);
-void intel_psr_flush(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits,
- enum fb_op_origin origin);
-void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
-void intel_psr_short_pulse(struct intel_dp *intel_dp);
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
- u32 *out_value);
-bool intel_psr_enabled(struct intel_dp *intel_dp);
-
/* intel_quirks.c */
void intel_init_quirks(struct drm_i915_private *dev_priv);
@@ -2151,20 +1910,26 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
static inline void
-assert_rpm_device_not_suspended(struct drm_i915_private *i915)
+assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
{
- WARN_ONCE(i915->runtime_pm.suspended,
+ WARN_ONCE(rpm->suspended,
"Device suspended during HW access\n");
}
static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *i915)
+__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
{
- assert_rpm_device_not_suspended(i915);
- WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
+ assert_rpm_device_not_suspended(rpm);
+ WARN_ONCE(!atomic_read(&rpm->wakeref_count),
"RPM wakelock ref not held during HW access");
}
+static inline void
+assert_rpm_wakelock_held(struct drm_i915_private *i915)
+{
+ __assert_rpm_wakelock_held(&i915->runtime_pm);
+}
+
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @i915: i915 device instance
@@ -2240,101 +2005,6 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override);
-
-/* intel_pm.c */
-void intel_init_clock_gating(struct drm_i915_private *dev_priv);
-void intel_suspend_hw(struct drm_i915_private *dev_priv);
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
-void intel_update_watermarks(struct intel_crtc *crtc);
-void intel_init_pm(struct drm_i915_private *dev_priv);
-void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
-void intel_pm_setup(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb_y,
- struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */);
-void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out);
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_atomic_state *state);
-int intel_enable_sagv(struct drm_i915_private *dev_priv);
-int intel_disable_sagv(struct drm_i915_private *dev_priv);
-bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry entries[],
- int num_entries, int ignore_idx);
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
-bool ilk_disable_lp_wm(struct drm_device *dev);
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate);
-void intel_init_ipc(struct drm_i915_private *dev_priv);
-void intel_enable_ipc(struct drm_i915_private *dev_priv);
-
-/* intel_sdvo.c */
-bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t sdvo_reg, enum pipe *pipe);
-bool intel_sdvo_init(struct drm_i915_private *dev_priv,
- i915_reg_t reg, enum port port);
-
-
-/* intel_sprite.c */
-int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
- int usecs);
-struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, int plane);
-int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-int intel_plane_check_stride(const struct intel_plane_state *plane_state);
-int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
-int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
-struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
-
-static inline bool icl_is_nv12_y_plane(enum plane_id id)
-{
- /* Don't need to do a gen check, these planes are only available on gen11 */
- if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
- return true;
-
- return false;
-}
-
-static inline bool icl_is_hdr_plane(struct intel_plane *plane)
-{
- if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
- return false;
-
- return plane->id < PLANE_SPRITE2;
-}
-
-/* intel_tv.c */
-void intel_tv_init(struct drm_i915_private *dev_priv);
-
/* intel_atomic.c */
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
@@ -2371,64 +2041,4 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
-/* intel_atomic_plane.c */
-struct intel_plane *intel_plane_alloc(void);
-void intel_plane_free(struct intel_plane *plane);
-struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
-void intel_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-void skl_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *intel_state);
-
-/* intel_color.c */
-void intel_color_init(struct intel_crtc *crtc);
-int intel_color_check(struct intel_crtc_state *crtc_state);
-void intel_color_commit(const struct intel_crtc_state *crtc_state);
-void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
-
-/* intel_lspcon.c */
-bool lspcon_init(struct intel_digital_port *intel_dig_port);
-void lspcon_resume(struct intel_lspcon *lspcon);
-void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
-void lspcon_write_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- const void *buf, ssize_t len);
-void lspcon_set_infoframes(struct intel_encoder *encoder,
- bool enable,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config);
-void lspcon_ycbcr420_config(struct drm_connector *connector,
- struct intel_crtc_state *crtc_state);
-
-/* intel_pipe_crc.c */
-#ifdef CONFIG_DEBUG_FS
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
-int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
- const char *source_name, size_t *values_cnt);
-const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
- size_t *count);
-void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
-void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
-#else
-#define intel_crtc_set_crc_source NULL
-#define intel_crtc_verify_crc_source NULL
-#define intel_crtc_get_crc_sources NULL
-static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
-{
-}
-
-static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
-{
-}
-#endif
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index a9a19778dc7f..705a609050c0 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -189,7 +189,6 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
-int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 06a11c35a784..3074448446bc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -194,7 +194,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- if (!IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -365,7 +365,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@@ -532,24 +532,6 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
msleep(msec);
}
-int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
-{
- struct intel_connector *connector = intel_dsi->attached_connector;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
- if (!mode)
- return 0;
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- drm_mode_probed_add(&connector->base, mode);
-
- return 1;
-}
-
#define ICL_PREPARE_CNT_MAX 0x7
#define ICL_CLK_ZERO_CNT_MAX 0xf
#define ICL_TRAIL_CNT_MAX 0x7
@@ -890,7 +872,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->burst_mode_ratio = burst_mode_ratio;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_dphy_param_init(intel_dsi);
else
vlv_dphy_param_init(intel_dsi);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a6c82482a841..adef81c8cccb 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -24,14 +24,20 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
+
#include <linux/i2c.h>
#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
-#include "i915_drv.h"
+
#include "dvo.h"
+#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_dvo.h"
+#include "intel_panel.h"
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
diff --git a/drivers/gpu/drm/i915/intel_dvo.h b/drivers/gpu/drm/i915/intel_dvo.h
new file mode 100644
index 000000000000..3ed0fdf8efff
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dvo.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DVO_H__
+#define __INTEL_DVO_H__
+
+struct drm_i915_private;
+
+void intel_dvo_init(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_DVO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 49fa43ff02ba..eea9bec04f1b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -84,7 +84,6 @@ static const struct engine_class_info intel_engine_classes[] = {
#define MAX_MMIO_BASES 3
struct engine_info {
unsigned int hw_id;
- unsigned int uabi_id;
u8 class;
u8 instance;
/* mmio bases table *must* be sorted in reverse gen order */
@@ -95,27 +94,24 @@ struct engine_info {
};
static const struct engine_info intel_engines[] = {
- [RCS] = {
- .hw_id = RCS_HW,
- .uabi_id = I915_EXEC_RENDER,
+ [RCS0] = {
+ .hw_id = RCS0_HW,
.class = RENDER_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 1, .base = RENDER_RING_BASE }
},
},
- [BCS] = {
- .hw_id = BCS_HW,
- .uabi_id = I915_EXEC_BLT,
+ [BCS0] = {
+ .hw_id = BCS0_HW,
.class = COPY_ENGINE_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 6, .base = BLT_RING_BASE }
},
},
- [VCS] = {
- .hw_id = VCS_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS0] = {
+ .hw_id = VCS0_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 0,
.mmio_bases = {
@@ -124,9 +120,8 @@ static const struct engine_info intel_engines[] = {
{ .gen = 4, .base = BSD_RING_BASE }
},
},
- [VCS2] = {
- .hw_id = VCS2_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS1] = {
+ .hw_id = VCS1_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 1,
.mmio_bases = {
@@ -134,27 +129,24 @@ static const struct engine_info intel_engines[] = {
{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
},
},
- [VCS3] = {
- .hw_id = VCS3_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS2] = {
+ .hw_id = VCS2_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 2,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
},
},
- [VCS4] = {
- .hw_id = VCS4_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS3] = {
+ .hw_id = VCS3_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 3,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
},
},
- [VECS] = {
- .hw_id = VECS_HW,
- .uabi_id = I915_EXEC_VEBOX,
+ [VECS0] = {
+ .hw_id = VECS0_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 0,
.mmio_bases = {
@@ -162,9 +154,8 @@ static const struct engine_info intel_engines[] = {
{ .gen = 7, .base = VEBOX_RING_BASE }
},
},
- [VECS2] = {
- .hw_id = VECS2_HW,
- .uabi_id = I915_EXEC_VEBOX,
+ [VECS1] = {
+ .hw_id = VECS1_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 1,
.mmio_bases = {
@@ -264,21 +255,17 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
{
- struct drm_i915_private *dev_priv = engine->i915;
- i915_reg_t hwstam;
-
/*
* Though they added more rings on g4x/ilk, they did not add
* per-engine HWSTAM until gen6.
*/
- if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
+ if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
return;
- hwstam = RING_HWSTAM(engine->mmio_base);
- if (INTEL_GEN(dev_priv) >= 3)
- I915_WRITE(hwstam, mask);
+ if (INTEL_GEN(engine->i915) >= 3)
+ ENGINE_WRITE(engine, RING_HWSTAM, mask);
else
- I915_WRITE16(hwstam, mask);
+ ENGINE_WRITE16(engine, RING_HWSTAM, mask);
}
static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
@@ -313,15 +300,18 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
if (!engine)
return -ENOMEM;
+ BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
+
engine->id = id;
+ engine->mask = BIT(id);
engine->i915 = dev_priv;
+ engine->uncore = &dev_priv->uncore;
__sprint_engine_name(engine->name, info);
engine->hw_id = engine->guc_id = info->hw_id;
engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
- engine->uabi_id = info->uabi_id;
engine->uabi_class = intel_engine_classes[info->class].uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv,
@@ -355,15 +345,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
- const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
+ const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
unsigned int i;
int err;
- WARN_ON(ring_mask == 0);
- WARN_ON(ring_mask &
+ WARN_ON(engine_mask == 0);
+ WARN_ON(engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_load_failure())
@@ -377,7 +367,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
if (err)
goto cleanup;
- mask |= ENGINE_MASK(i);
+ mask |= BIT(i);
}
/*
@@ -385,16 +375,16 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != ring_mask))
- device_info->ring_mask = mask;
+ if (WARN_ON(mask != engine_mask))
+ device_info->engine_mask = mask;
/* We always presume we have at least RCS available for later probing */
- if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
+ if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
err = -ENODEV;
goto cleanup;
}
- RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
+ RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
i915_check_and_clear_faults(dev_priv);
@@ -455,12 +445,6 @@ cleanup:
return err;
}
-void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
- GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
-}
-
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
{
i915_gem_batch_pool_init(&engine->batch_pool, engine);
@@ -541,9 +525,7 @@ static int init_status_page(struct intel_engine_cs *engine)
return PTR_ERR(obj);
}
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- if (ret)
- goto err;
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -594,7 +576,6 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
err = i915_timeline_init(engine->i915,
&engine->timeline,
- engine->name,
engine->status_page.vma);
if (err)
goto err_hwsp;
@@ -614,10 +595,44 @@ err_hwsp:
return err;
}
-static void __intel_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
{
- intel_context_unpin(to_intel_context(ctx, engine));
+ static const struct {
+ u8 engine;
+ u8 sched;
+ } map[] = {
+#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+ MAP(PREEMPTION, PREEMPTION),
+ MAP(SEMAPHORES, SEMAPHORES),
+#undef MAP
+ };
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 enabled, disabled;
+
+ enabled = 0;
+ disabled = 0;
+ for_each_engine(engine, i915, id) { /* all engines must agree! */
+ int i;
+
+ if (engine->schedule)
+ enabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+ else
+ disabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+
+ for (i = 0; i < ARRAY_SIZE(map); i++) {
+ if (engine->flags & BIT(map[i].engine))
+ enabled |= BIT(map[i].sched);
+ else
+ disabled |= BIT(map[i].sched);
+ }
+ }
+
+ i915->caps.scheduler = enabled & ~disabled;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
+ i915->caps.scheduler = 0;
}
struct measure_breadcrumb {
@@ -639,7 +654,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
return -ENOMEM;
if (i915_timeline_init(engine->i915,
- &frame->timeline, "measure",
+ &frame->timeline,
engine->status_page.vma))
goto out_frame;
@@ -670,6 +685,20 @@ out_frame:
return dw;
}
+static int pin_context(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context **out)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_pin(ctx, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ *out = ce;
+ return 0;
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -684,11 +713,8 @@ out_frame:
int intel_engine_init_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- struct intel_context *ce;
int ret;
- engine->set_default_submission(engine);
-
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -696,39 +722,61 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ce = intel_context_pin(i915->kernel_context, engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
+ ret = pin_context(i915->kernel_context, engine,
+ &engine->kernel_context);
+ if (ret)
+ return ret;
/*
* Similarly the preempt context must always be available so that
- * we can interrupt the engine at any time.
+ * we can interrupt the engine at any time. However, as preemption
+ * is optional, we allow it to fail.
*/
- if (i915->preempt_context) {
- ce = intel_context_pin(i915->preempt_context, engine);
- if (IS_ERR(ce)) {
- ret = PTR_ERR(ce);
- goto err_unpin_kernel;
- }
- }
+ if (i915->preempt_context)
+ pin_context(i915->preempt_context, engine,
+ &engine->preempt_context);
ret = measure_breadcrumb_dw(engine);
if (ret < 0)
- goto err_unpin_preempt;
+ goto err_unpin;
engine->emit_fini_breadcrumb_dw = ret;
- return 0;
+ engine->set_default_submission(engine);
-err_unpin_preempt:
- if (i915->preempt_context)
- __intel_context_unpin(i915->preempt_context, engine);
+ return 0;
-err_unpin_kernel:
- __intel_context_unpin(i915->kernel_context, engine);
+err_unpin:
+ if (engine->preempt_context)
+ intel_context_unpin(engine->preempt_context);
+ intel_context_unpin(engine->kernel_context);
return ret;
}
+void intel_gt_resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * After resume, we may need to poke into the pinned kernel
+ * contexts to paper over any damage caused by the sudden suspend.
+ * Only the kernel contexts should remain pinned over suspend,
+ * allowing us to fixup the user contexts on their first pin.
+ */
+ for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
+
+ ce = engine->kernel_context;
+ if (ce)
+ ce->ops->reset(ce);
+
+ ce = engine->preempt_context;
+ if (ce)
+ ce->ops->reset(ce);
+ }
+}
+
/**
* intel_engines_cleanup_common - cleans up the engine state created by
* the common initiailizers.
@@ -738,8 +786,6 @@ err_unpin_kernel:
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -749,9 +795,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (i915->preempt_context)
- __intel_context_unpin(i915->preempt_context, engine);
- __intel_context_unpin(i915->kernel_context, engine);
+ if (engine->preempt_context)
+ intel_context_unpin(engine->preempt_context);
+ intel_context_unpin(engine->kernel_context);
i915_timeline_fini(&engine->timeline);
@@ -762,50 +808,48 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
+
u64 acthd;
- if (INTEL_GEN(dev_priv) >= 8)
- acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
- RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_GEN(dev_priv) >= 4)
- acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+ if (INTEL_GEN(i915) >= 8)
+ acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
+ else if (INTEL_GEN(i915) >= 4)
+ acthd = ENGINE_READ(engine, RING_ACTHD);
else
- acthd = I915_READ(ACTHD);
+ acthd = ENGINE_READ(engine, ACTHD);
return acthd;
}
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
u64 bbaddr;
- if (INTEL_GEN(dev_priv) >= 8)
- bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
- RING_BBADDR_UDW(engine->mmio_base));
+ if (INTEL_GEN(engine->i915) >= 8)
+ bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
else
- bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+ bbaddr = ENGINE_READ(engine, RING_BBADDR);
return bbaddr;
}
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
const i915_reg_t mode = RING_MI_MODE(base);
int err;
- if (INTEL_GEN(dev_priv) < 3)
+ if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
GEM_TRACE("%s\n", engine->name);
- I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+ intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
err = 0;
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
1000, 0,
NULL)) {
@@ -814,19 +858,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
}
/* A final mmio read to let GPU writes be hopefully flushed to memory */
- POSTING_READ_FW(mode);
+ intel_uncore_posting_read_fw(uncore, mode);
return err;
}
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
GEM_TRACE("%s\n", engine->name);
- I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
- _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
@@ -863,6 +904,7 @@ static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select;
@@ -884,33 +926,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
- spin_lock_irq(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
- mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
+ mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
default_mcr_s_ss_select);
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
- I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- ret = I915_READ_FW(reg);
+ ret = intel_uncore_read_fw(uncore, reg);
mcr &= ~mcr_slice_subslice_mask;
mcr |= default_mcr_s_ss_select;
- I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irq(&uncore->lock);
return ret;
}
@@ -920,6 +962,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
int subslice;
@@ -928,12 +971,14 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
switch (INTEL_GEN(dev_priv)) {
default:
- instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
- if (engine->id != RCS)
+ if (engine->id != RCS0)
break;
- instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN7_SC_INSTDONE);
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(dev_priv, slice, subslice,
@@ -944,28 +989,33 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
}
break;
case 7:
- instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
- if (engine->id != RCS)
+ if (engine->id != RCS0)
break;
- instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
- instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+ instdone->sampler[0][0] =
+ intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
+ instdone->row[0][0] =
+ intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
break;
case 6:
case 5:
case 4:
- instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
-
- if (engine->id == RCS)
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
+ if (engine->id == RCS0)
/* HACK: Using the wrong struct member */
- instdone->slice_common = I915_READ(GEN4_INSTDONE1);
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN4_INSTDONE1);
break;
case 3:
case 2:
- instdone->instdone = I915_READ(GEN2_INSTDONE);
+ instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
break;
}
}
@@ -985,12 +1035,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return true;
/* First check that no commands are left in the ring */
- if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
- (I915_READ_TAIL(engine) & TAIL_ADDR))
+ if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
+ (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
- if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+ if (INTEL_GEN(dev_priv) > 2 &&
+ !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
intel_runtime_pm_put(dev_priv, wakeref);
@@ -1007,16 +1058,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
/* More white lies, if wedged, hw state is inconsistent */
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_reset_failed(engine->i915))
return true;
- /* Any inflight/incomplete requests? */
- if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
- return false;
-
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
@@ -1045,7 +1090,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return ring_is_idle(engine);
}
-bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
+bool intel_engines_are_idle(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1054,10 +1099,14 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
* If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_reset_failed(i915))
return true;
- for_each_engine(engine, dev_priv, id) {
+ /* Already parked (and passed an idleness test); must still be idle */
+ if (!READ_ONCE(i915->gt.awake))
+ return true;
+
+ for_each_engine(engine, i915, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@@ -1065,34 +1114,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
return true;
}
-/**
- * intel_engine_has_kernel_context:
- * @engine: the engine
- *
- * Returns true if the last context to be executed on this engine, or has been
- * executed if the engine is already idle, is the kernel context
- * (#i915.kernel_context).
- */
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
-{
- const struct intel_context *kernel_context =
- to_intel_context(engine->i915->kernel_context, engine);
- struct i915_request *rq;
-
- lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
- /*
- * Check the last context seen by the engine. If active, it will be
- * the last request that remains in the timeline. When idle, it is
- * the last executed context as tracked by retirement.
- */
- rq = __i915_active_request_peek(&engine->timeline.last_request);
- if (rq)
- return rq->hw_context == kernel_context;
- else
- return engine->last_retired_context == kernel_context;
-}
-
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -1180,6 +1201,8 @@ void intel_engines_park(struct drm_i915_private *i915)
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
}
+
+ i915->gt.active_engines = 0;
}
/**
@@ -1283,15 +1306,14 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
- drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n",
+ drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
prefix,
- rq->global_seqno,
+ rq->fence.context, rq->fence.seqno,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags) ? "+" : "",
- rq->fence.context, rq->fence.seqno,
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
name);
@@ -1334,25 +1356,26 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
&engine->execlists;
u64 addr;
- if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
- drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
+ if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
+ drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
- I915_READ(RING_START(engine->mmio_base)));
+ ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
- I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
drm_printf(m, "\tRING_TAIL: 0x%08x\n",
- I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
- I915_READ(RING_CTL(engine->mmio_base)),
- I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
if (INTEL_GEN(engine->i915) > 2) {
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
- I915_READ(RING_MI_MODE(engine->mmio_base)),
- I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
+ ENGINE_READ(engine, RING_MI_MODE),
+ ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
if (INTEL_GEN(dev_priv) >= 6) {
- drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+ drm_printf(m, "\tRING_IMR: %08x\n",
+ ENGINE_READ(engine, RING_IMR));
}
addr = intel_engine_get_active_head(engine);
@@ -1362,57 +1385,53 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 8)
- addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
- RING_DMA_FADD_UDW(engine->mmio_base));
+ addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
else if (INTEL_GEN(dev_priv) >= 4)
- addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ addr = ENGINE_READ(engine, RING_DMA_FADD);
else
- addr = I915_READ(DMA_FADD_I8XX);
+ addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
- I915_READ(RING_IPEIR(engine->mmio_base)));
+ ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
- I915_READ(RING_IPEHR(engine->mmio_base)));
+ ENGINE_READ(engine, RING_IPEHR));
} else {
- drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
- drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
+ drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
+ drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+ const u8 num_entries = execlists->csb_size;
unsigned int idx;
u8 read, write;
- drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
- I915_READ(RING_EXECLIST_STATUS_LO(engine)),
- I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+ drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
+ num_entries);
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
- drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
+ drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
read, write,
- GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
- if (read >= GEN8_CSB_ENTRIES)
+ if (read >= num_entries)
read = 0;
- if (write >= GEN8_CSB_ENTRIES)
+ if (write >= num_entries)
write = 0;
if (read > write)
- write += GEN8_CSB_ENTRIES;
+ write += num_entries;
while (read < write) {
- idx = ++read % GEN8_CSB_ENTRIES;
- drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
- idx,
- hws[idx * 2],
- I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
- hws[idx * 2 + 1],
- I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+ idx = ++read % num_entries;
+ drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+ idx, hws[idx * 2], hws[idx * 2 + 1]);
}
rcu_read_lock();
@@ -1425,10 +1444,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
char hdr[80];
snprintf(hdr, sizeof(hdr),
- "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ",
+ "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
idx, count,
i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset);
+ rq->timeline->hwsp_offset,
+ hwsp_seqno(rq));
print_request(m, rq, hdr);
} else {
drm_printf(m, "\t\tELSP[%d] idle\n", idx);
@@ -1438,11 +1458,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
rcu_read_unlock();
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE(engine)));
+ ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
- I915_READ(RING_PP_DIR_DCLV(engine)));
+ ENGINE_READ(engine, RING_PP_DIR_DCLV));
}
}
@@ -1495,13 +1515,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
va_end(ap);
}
- if (i915_terminally_wedged(&engine->i915->gpu_error))
+ if (i915_reset_failed(engine->i915))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine),
- engine->hangcheck.seqno,
+ drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
+ engine->hangcheck.last_seqno,
+ engine->hangcheck.next_seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@@ -1521,7 +1540,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tlast ");
- rq = i915_gem_find_active_request(engine);
+ rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
@@ -1688,6 +1707,50 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
+static bool match_ring(struct i915_request *rq)
+{
+ u32 ring = ENGINE_READ(rq->engine, RING_START);
+
+ return ring == i915_ggtt_offset(rq->ring->vma);
+}
+
+struct i915_request *
+intel_engine_find_active_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *request, *active = NULL;
+ unsigned long flags;
+
+ /*
+ * We are called by the error capture, reset and to dump engine
+ * state at random points in time. In particular, note that neither is
+ * crucially ordered with an interrupt. After a hang, the GPU is dead
+ * and we assume that no more writes can happen (we waited long enough
+ * for all writes that were in transaction to be flushed) - adding an
+ * extra delay for a recent interrupt is pointless. Hence, we do
+ * not need an engine->irq_seqno_barrier() before the seqno reads.
+ * At all other times, we must assume the GPU is still running, but
+ * we only care about the snapshot of this moment.
+ */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ list_for_each_entry(request, &engine->timeline.requests, link) {
+ if (i915_request_completed(request))
+ continue;
+
+ if (!i915_request_started(request))
+ break;
+
+ /* More than one preemptible request may match! */
+ if (!match_ring(request))
+ break;
+
+ active = request;
+ break;
+ }
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ return active;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#include "selftests/intel_engine_cs.c"
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
new file mode 100644
index 000000000000..1f970c76b6a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_engine_types.h
@@ -0,0 +1,546 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_TYPES__
+#define __INTEL_ENGINE_TYPES__
+
+#include <linux/hashtable.h>
+#include <linux/irq_work.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "i915_gem.h"
+#include "i915_priolist_types.h"
+#include "i915_selftest.h"
+#include "i915_timeline_types.h"
+#include "intel_workarounds_types.h"
+
+#include "i915_gem_batch_pool.h"
+#include "i915_pmu.h"
+
+#define I915_MAX_SLICES 3
+#define I915_MAX_SUBSLICES 8
+
+#define I915_CMD_HASH_ORDER 9
+
+struct dma_fence;
+struct drm_i915_reg_table;
+struct i915_gem_context;
+struct i915_request;
+struct i915_sched_attr;
+struct intel_uncore;
+
+typedef u8 intel_engine_mask_t;
+#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
+
+struct intel_hw_status_page {
+ struct i915_vma *vma;
+ u32 *addr;
+};
+
+struct intel_instdone {
+ u32 instdone;
+ /* The following exist only in the RCS engine */
+ u32 slice_common;
+ u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+ u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+};
+
+struct intel_engine_hangcheck {
+ u64 acthd;
+ u32 last_seqno;
+ u32 next_seqno;
+ unsigned long action_timestamp;
+ struct intel_instdone instdone;
+};
+
+struct intel_ring {
+ struct kref ref;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ struct i915_timeline *timeline;
+ struct list_head request_list;
+ struct list_head active_link;
+
+ u32 head;
+ u32 tail;
+ u32 emit;
+
+ u32 space;
+ u32 size;
+ u32 effective_size;
+};
+
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ * offset: specifies batch starting position, also helpful in case
+ * if we want to have multiple batches at different offsets based on
+ * some criteria. It is not a requirement at the moment but provides
+ * an option for future use.
+ * size: size of the batch in DWORDS
+ */
+struct i915_ctx_workarounds {
+ struct i915_wa_ctx_bb {
+ u32 offset;
+ u32 size;
+ } indirect_ctx, per_ctx;
+ struct i915_vma *vma;
+};
+
+#define I915_MAX_VCS 4
+#define I915_MAX_VECS 2
+
+/*
+ * Engine IDs definitions.
+ * Keep instances of the same type engine together.
+ */
+enum intel_engine_id {
+ RCS0 = 0,
+ BCS0,
+ VCS0,
+ VCS1,
+ VCS2,
+ VCS3,
+#define _VCS(n) (VCS0 + (n))
+ VECS0,
+ VECS1,
+#define _VECS(n) (VECS0 + (n))
+ I915_NUM_ENGINES
+};
+
+struct st_preempt_hang {
+ struct completion completion;
+ unsigned int count;
+ bool inject_hang;
+};
+
+/**
+ * struct intel_engine_execlists - execlist submission queue and port state
+ *
+ * The struct intel_engine_execlists represents the combined logical state of
+ * driver and the hardware state for execlist mode of submission.
+ */
+struct intel_engine_execlists {
+ /**
+ * @tasklet: softirq tasklet for bottom handler
+ */
+ struct tasklet_struct tasklet;
+
+ /**
+ * @default_priolist: priority list for I915_PRIORITY_NORMAL
+ */
+ struct i915_priolist default_priolist;
+
+ /**
+ * @no_priolist: priority lists disabled
+ */
+ bool no_priolist;
+
+ /**
+ * @submit_reg: gen-specific execlist submission register
+ * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
+ * the ExecList Submission Queue Contents register array for Gen11+
+ */
+ u32 __iomem *submit_reg;
+
+ /**
+ * @ctrl_reg: the enhanced execlists control register, used to load the
+ * submit queue on the HW and to request preemptions to idle
+ */
+ u32 __iomem *ctrl_reg;
+
+ /**
+ * @port: execlist port states
+ *
+ * For each hardware ELSP (ExecList Submission Port) we keep
+ * track of the last request and the number of times we submitted
+ * that port to hw. We then count the number of times the hw reports
+ * a context completion or preemption. As only one context can
+ * be active on hw, we limit resubmission of context to port[0]. This
+ * is called Lite Restore, of the context.
+ */
+ struct execlist_port {
+ /**
+ * @request_count: combined request and submission count
+ */
+ struct i915_request *request_count;
+#define EXECLIST_COUNT_BITS 2
+#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
+#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
+#define port_set(p, packed) ((p)->request_count = (packed))
+#define port_isset(p) ((p)->request_count)
+#define port_index(p, execlists) ((p) - (execlists)->port)
+
+ /**
+ * @context_id: context ID for port
+ */
+ GEM_DEBUG_DECL(u32 context_id);
+
+#define EXECLIST_MAX_PORTS 2
+ } port[EXECLIST_MAX_PORTS];
+
+ /**
+ * @active: is the HW active? We consider the HW as active after
+ * submitting any context for execution and until we have seen the
+ * last context completion event. After that, we do not expect any
+ * more events until we submit, and so can park the HW.
+ *
+ * As we have a small number of different sources from which we feed
+ * the HW, we track the state of each inside a single bitfield.
+ */
+ unsigned int active;
+#define EXECLISTS_ACTIVE_USER 0
+#define EXECLISTS_ACTIVE_PREEMPT 1
+#define EXECLISTS_ACTIVE_HWACK 2
+
+ /**
+ * @port_mask: number of execlist ports - 1
+ */
+ unsigned int port_mask;
+
+ /**
+ * @queue_priority_hint: Highest pending priority.
+ *
+ * When we add requests into the queue, or adjust the priority of
+ * executing requests, we compute the maximum priority of those
+ * pending requests. We can then use this value to determine if
+ * we need to preempt the executing requests to service the queue.
+ * However, since the we may have recorded the priority of an inflight
+ * request we wanted to preempt but since completed, at the time of
+ * dequeuing the priority hint may no longer may match the highest
+ * available request priority.
+ */
+ int queue_priority_hint;
+
+ /**
+ * @queue: queue of requests, in priority lists
+ */
+ struct rb_root_cached queue;
+
+ /**
+ * @csb_write: control register for Context Switch buffer
+ *
+ * Note this register may be either mmio or HWSP shadow.
+ */
+ u32 *csb_write;
+
+ /**
+ * @csb_status: status array for Context Switch buffer
+ *
+ * Note these register may be either mmio or HWSP shadow.
+ */
+ u32 *csb_status;
+
+ /**
+ * @preempt_complete_status: expected CSB upon completing preemption
+ */
+ u32 preempt_complete_status;
+
+ /**
+ * @csb_size: context status buffer FIFO size
+ */
+ u8 csb_size;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ u8 csb_head;
+
+ I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
+};
+
+#define INTEL_ENGINE_CS_MAX_NAME 8
+
+struct intel_engine_cs {
+ struct drm_i915_private *i915;
+ struct intel_uncore *uncore;
+ char name[INTEL_ENGINE_CS_MAX_NAME];
+
+ enum intel_engine_id id;
+ unsigned int hw_id;
+ unsigned int guc_id;
+ intel_engine_mask_t mask;
+
+ u8 uabi_class;
+
+ u8 class;
+ u8 instance;
+ u32 context_size;
+ u32 mmio_base;
+
+ struct intel_ring *buffer;
+
+ struct i915_timeline timeline;
+
+ struct intel_context *kernel_context; /* pinned */
+ struct intel_context *preempt_context; /* pinned; optional */
+
+ struct drm_i915_gem_object *default_state;
+ void *pinned_default_state;
+
+ /* Rather than have every client wait upon all user interrupts,
+ * with the herd waking after every interrupt and each doing the
+ * heavyweight seqno dance, we delegate the task (of being the
+ * bottom-half of the user interrupt) to the first client. After
+ * every interrupt, we wake up one client, who does the heavyweight
+ * coherent seqno read and either goes back to sleep (if incomplete),
+ * or wakes up all the completed clients in parallel, before then
+ * transferring the bottom-half status to the next client in the queue.
+ *
+ * Compared to walking the entire list of waiters in a single dedicated
+ * bottom-half, we reduce the latency of the first waiter by avoiding
+ * a context switch, but incur additional coherent seqno reads when
+ * following the chain of request breadcrumbs. Since it is most likely
+ * that we have a single client waiting on each seqno, then reducing
+ * the overhead of waking that client is much preferred.
+ */
+ struct intel_breadcrumbs {
+ spinlock_t irq_lock;
+ struct list_head signalers;
+
+ struct irq_work irq_work; /* for use from inside irq_lock */
+
+ unsigned int irq_enabled;
+
+ bool irq_armed;
+ } breadcrumbs;
+
+ struct intel_engine_pmu {
+ /**
+ * @enable: Bitmask of enable sample events on this engine.
+ *
+ * Bits correspond to sample event types, for instance
+ * I915_SAMPLE_QUEUED is bit 0 etc.
+ */
+ u32 enable;
+ /**
+ * @enable_count: Reference count for the enabled samplers.
+ *
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+ */
+ unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
+ /**
+ * @sample: Counter values for sampling events.
+ *
+ * Our internal timer stores the current counters in this field.
+ *
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+ */
+ struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
+ } pmu;
+
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ */
+ struct i915_gem_batch_pool batch_pool;
+
+ struct intel_hw_status_page status_page;
+ struct i915_ctx_workarounds wa_ctx;
+ struct i915_wa_list ctx_wa_list;
+ struct i915_wa_list wa_list;
+ struct i915_wa_list whitelist;
+
+ u32 irq_keep_mask; /* always keep these interrupts */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ void (*irq_enable)(struct intel_engine_cs *engine);
+ void (*irq_disable)(struct intel_engine_cs *engine);
+
+ int (*init_hw)(struct intel_engine_cs *engine);
+
+ struct {
+ void (*prepare)(struct intel_engine_cs *engine);
+ void (*reset)(struct intel_engine_cs *engine, bool stalled);
+ void (*finish)(struct intel_engine_cs *engine);
+ } reset;
+
+ void (*park)(struct intel_engine_cs *engine);
+ void (*unpark)(struct intel_engine_cs *engine);
+
+ void (*set_default_submission)(struct intel_engine_cs *engine);
+
+ const struct intel_context_ops *cops;
+
+ int (*request_alloc)(struct i915_request *rq);
+ int (*init_context)(struct i915_request *rq);
+
+ int (*emit_flush)(struct i915_request *request, u32 mode);
+#define EMIT_INVALIDATE BIT(0)
+#define EMIT_FLUSH BIT(1)
+#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
+ int (*emit_bb_start)(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+ int (*emit_init_breadcrumb)(struct i915_request *rq);
+ u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
+ u32 *cs);
+ unsigned int emit_fini_breadcrumb_dw;
+
+ /* Pass the request to the hardware queue (e.g. directly into
+ * the legacy ringbuffer or to the end of an execlist).
+ *
+ * This is called from an atomic context with irqs disabled; must
+ * be irq safe.
+ */
+ void (*submit_request)(struct i915_request *rq);
+
+ /*
+ * Call when the priority on a request has changed and it and its
+ * dependencies may need rescheduling. Note the request itself may
+ * not be ready to run!
+ */
+ void (*schedule)(struct i915_request *request,
+ const struct i915_sched_attr *attr);
+
+ /*
+ * Cancel all requests on the hardware, or queued for execution.
+ * This should only cancel the ready requests that have been
+ * submitted to the engine (via the engine->submit_request callback).
+ * This is called when marking the device as wedged.
+ */
+ void (*cancel_requests)(struct intel_engine_cs *engine);
+
+ void (*cleanup)(struct intel_engine_cs *engine);
+
+ struct intel_engine_execlists execlists;
+
+ /* Contexts are pinned whilst they are active on the GPU. The last
+ * context executed remains active whilst the GPU is idle - the
+ * switch away and write to the context object only occurs on the
+ * next execution. Contexts are only unpinned on retirement of the
+ * following request ensuring that we can always write to the object
+ * on the context switch even after idling. Across suspend, we switch
+ * to the kernel context and trash it as the save may not happen
+ * before the hardware is powered down.
+ */
+ struct intel_context *last_retired_context;
+
+ /* status_notifier: list of callbacks for context-switch changes */
+ struct atomic_notifier_head context_status_notifier;
+
+ struct intel_engine_hangcheck hangcheck;
+
+#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_SUPPORTS_STATS BIT(1)
+#define I915_ENGINE_HAS_PREEMPTION BIT(2)
+#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
+#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
+ unsigned int flags;
+
+ /*
+ * Table of commands the command parser needs to know about
+ * for this engine.
+ */
+ DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
+
+ /*
+ * Table of registers allowed in commands that read/write registers.
+ */
+ const struct drm_i915_reg_table *reg_tables;
+ int reg_table_count;
+
+ /*
+ * Returns the bitmask for the length field of the specified command.
+ * Return 0 for an unrecognized/invalid command.
+ *
+ * If the command parser finds an entry for a command in the engine's
+ * cmd_tables, it gets the command's length based on the table entry.
+ * If not, it calls this function to determine the per-engine length
+ * field encoding for the command (i.e. different opcode ranges use
+ * certain bits to encode the command length in the header).
+ */
+ u32 (*get_cmd_length_mask)(u32 cmd_header);
+
+ struct {
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqlock_t lock;
+ /**
+ * @enabled: Reference count indicating number of listeners.
+ */
+ unsigned int enabled;
+ /**
+ * @active: Number of contexts currently scheduled in.
+ */
+ unsigned int active;
+ /**
+ * @enabled_at: Timestamp when busy stats were enabled.
+ */
+ ktime_t enabled_at;
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+ } stats;
+};
+
+static inline bool
+intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_supports_stats(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_SUPPORTS_STATS;
+}
+
+static inline bool
+intel_engine_has_preemption(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_HAS_PREEMPTION;
+}
+
+static inline bool
+intel_engine_has_semaphores(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
+}
+
+static inline bool
+intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+}
+
+#define instdone_slice_mask(dev_priv__) \
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+ for ((slice__) = 0, (subslice__) = 0; \
+ (slice__) < I915_MAX_SLICES; \
+ (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+ (slice__) += ((subslice__) == 0)) \
+ for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+ (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+
+#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 656e684e7c9a..5679f2fffb7c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -40,8 +40,10 @@
#include <drm/drm_fourcc.h>
-#include "intel_drv.h"
#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_frontbuffer.h"
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
@@ -108,7 +110,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
FBC_STATUS, FBC_STAT_COMPRESSING, 0,
10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
@@ -1278,6 +1280,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
+ /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
+ if (IS_GEMINILAKE(dev_priv))
+ return 0;
+
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
diff --git a/drivers/gpu/drm/i915/intel_fbc.h b/drivers/gpu/drm/i915/intel_fbc.h
new file mode 100644
index 000000000000..50272eda8d43
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBC_H__
+#define __INTEL_FBC_H__
+
+#include <linux/types.h>
+
+#include "intel_frontbuffer.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state);
+bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+void intel_fbc_post_update(struct intel_crtc *crtc);
+void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
+void intel_fbc_enable(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+void intel_fbc_disable(struct intel_crtc *crtc);
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits, enum fb_op_origin origin);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
+int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index e8f694b57b8a..89db71996148 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -25,26 +25,27 @@
*/
#include <linux/async.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/console.h>
+#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
+#include <linux/tty.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
@@ -235,12 +236,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
- info->par = helper;
-
ifbdev->helper.fb = fb;
- strcpy(info->fix.id, "inteldrmfb");
-
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
@@ -259,11 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr;
info->screen_size = vma->node.size;
- /* This driver doesn't need a VT switch to restore the mode on resume */
- info->skip_vt_switch = true;
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
@@ -292,225 +285,7 @@ out_unlock:
return ret;
}
-static struct drm_fb_helper_crtc *
-intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
-{
- int i;
-
- for (i = 0; i < fb_helper->crtc_count; i++)
- if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
- return &fb_helper->crtc_info[i];
-
- return NULL;
-}
-
-/*
- * Try to read the BIOS display configuration and use it for the initial
- * fb configuration.
- *
- * The BIOS or boot loader will generally create an initial display
- * configuration for us that includes some set of active pipes and displays.
- * This routine tries to figure out which pipes and connectors are active
- * and stuffs them into the crtcs and modes array given to us by the
- * drm_fb_helper code.
- *
- * The overall sequence is:
- * intel_fbdev_init - from driver load
- * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
- * drm_fb_helper_init - build fb helper structs
- * drm_fb_helper_single_add_all_connectors - more fb helper structs
- * intel_fbdev_initial_config - apply the config
- * drm_fb_helper_initial_config - call ->probe then register_framebuffer()
- * drm_setup_crtcs - build crtc config for fbdev
- * intel_fb_initial_config - find active connectors etc
- * drm_fb_helper_single_fb_probe - set up fbdev
- * intelfb_create - re-use or alloc fb, build out fbdev structs
- *
- * Note that we don't make special consideration whether we could actually
- * switch to the selected modes without a full modeset. E.g. when the display
- * is in VGA mode we need to recalculate watermarks and set a new high-res
- * framebuffer anyway.
- */
-static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_fb_offset *offsets,
- bool *enabled, int width, int height)
-{
- struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
- unsigned long conn_configured, conn_seq;
- int i, j;
- bool *save_enabled;
- bool fallback = true, ret = true;
- int num_connectors_enabled = 0;
- int num_connectors_detected = 0;
- struct drm_modeset_acquire_ctx ctx;
-
- save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
- if (!save_enabled)
- return false;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- while (drm_modeset_lock_all_ctx(fb_helper->dev, &ctx) != 0)
- drm_modeset_backoff(&ctx);
-
- memcpy(save_enabled, enabled, count);
- conn_seq = GENMASK(count - 1, 0);
- conn_configured = 0;
-retry:
- for (i = 0; i < count; i++) {
- struct drm_fb_helper_connector *fb_conn;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct drm_fb_helper_crtc *new_crtc;
-
- fb_conn = fb_helper->connector_info[i];
- connector = fb_conn->connector;
-
- if (conn_configured & BIT(i))
- continue;
-
- /* First pass, only consider tiled connectors */
- if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
- continue;
-
- if (connector->status == connector_status_connected)
- num_connectors_detected++;
-
- if (!enabled[i]) {
- DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
- connector->name);
- conn_configured |= BIT(i);
- continue;
- }
-
- if (connector->force == DRM_FORCE_OFF) {
- DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
- connector->name);
- enabled[i] = false;
- continue;
- }
-
- encoder = connector->state->best_encoder;
- if (!encoder || WARN_ON(!connector->state->crtc)) {
- if (connector->force > DRM_FORCE_OFF)
- goto bail;
-
- DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
- connector->name);
- enabled[i] = false;
- conn_configured |= BIT(i);
- continue;
- }
-
- num_connectors_enabled++;
-
- new_crtc = intel_fb_helper_crtc(fb_helper,
- connector->state->crtc);
-
- /*
- * Make sure we're not trying to drive multiple connectors
- * with a single CRTC, since our cloning support may not
- * match the BIOS.
- */
- for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- DRM_DEBUG_KMS("fallback: cloned configuration\n");
- goto bail;
- }
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
- connector->name);
-
- /* go for command line mode first */
- modes[i] = drm_pick_cmdline_mode(fb_conn);
-
- /* try for preferred next */
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
- connector->name, connector->has_tile);
- modes[i] = drm_has_preferred_mode(fb_conn, width,
- height);
- }
-
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- DRM_DEBUG_KMS("using first mode listed on connector %s\n",
- connector->name);
- modes[i] = list_first_entry(&connector->modes,
- struct drm_display_mode,
- head);
- }
-
- /* last resort: use current mode */
- if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly. crtc_state->mode has
- * I915_MODE_FLAG_INHERITED, which we clear to force check
- * state.
- */
- DRM_DEBUG_KMS("looking for current mode on connector %s\n",
- connector->name);
- modes[i] = &connector->state->crtc->mode;
- }
- crtcs[i] = new_crtc;
-
- DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
- connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
-
- fallback = false;
- conn_configured |= BIT(i);
- }
-
- if (conn_configured != conn_seq) { /* repeat until no more are found */
- conn_seq = conn_configured;
- goto retry;
- }
-
- /*
- * If the BIOS didn't enable everything it could, fall back to have the
- * same user experiencing of lighting up as much as possible like the
- * fbdev helper library.
- */
- if (num_connectors_enabled != num_connectors_detected &&
- num_connectors_enabled < INTEL_INFO(dev_priv)->num_pipes) {
- DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
- DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
- num_connectors_detected);
- fallback = true;
- }
-
- if (fallback) {
-bail:
- DRM_DEBUG_KMS("Not using firmware configuration\n");
- memcpy(enabled, save_enabled, count);
- ret = false;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- kfree(save_enabled);
- return ret;
-}
-
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .initial_config = intel_fb_initial_config,
.fb_probe = intelfb_create,
};
diff --git a/drivers/gpu/drm/i915/intel_fbdev.h b/drivers/gpu/drm/i915/intel_fbdev.h
new file mode 100644
index 000000000000..de7c84250eb5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbdev.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBDEV_H__
+#define __INTEL_FBDEV_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_i915_private;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int intel_fbdev_init(struct drm_device *dev);
+void intel_fbdev_initial_config_async(struct drm_device *dev);
+void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
+void intel_fbdev_fini(struct drm_i915_private *dev_priv);
+void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
+void intel_fbdev_output_poll_changed(struct drm_device *dev);
+void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+{
+}
+
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index f33de4be4b89..74c8b0528294 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_fbc.h"
/**
* DOC: fifo underrun handling
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 16f253deaf8d..aa34e33b6087 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -61,9 +61,12 @@
*/
+#include "i915_drv.h"
+#include "intel_dp.h"
#include "intel_drv.h"
+#include "intel_fbc.h"
#include "intel_frontbuffer.h"
-#include "i915_drv.h"
+#include "intel_psr.h"
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin,
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 63cd9a753a72..d5894666f658 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -24,9 +24,19 @@
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
+#include "i915_gem_object.h"
+
struct drm_i915_private;
struct drm_i915_gem_object;
+enum fb_op_origin {
+ ORIGIN_GTT,
+ ORIGIN_CPU,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+ ORIGIN_DIRTYFB,
+};
+
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h
index b96a31bc1080..a34ece53a771 100644
--- a/drivers/gpu/drm/i915/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/intel_gpu_commands.h
@@ -105,8 +105,13 @@
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
-#define MI_SEMAPHORE_POLL (1<<15)
-#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
+#define MI_SEMAPHORE_POLL (1 << 15)
+#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
+#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12)
+#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
+#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
+#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 8660af3fd755..3aabfa2d9198 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) {
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}
@@ -203,11 +203,19 @@ int intel_guc_init(struct intel_guc *guc)
goto err_log;
GEM_BUG_ON(!guc->ads_vma);
+ if (HAS_GUC_CT(dev_priv)) {
+ ret = intel_guc_ct_init(&guc->ct);
+ if (ret)
+ goto err_ads;
+ }
+
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
return 0;
+err_ads:
+ intel_guc_ads_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
err_shared:
@@ -222,6 +230,10 @@ void intel_guc_fini(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
i915_ggtt_disable_guc(dev_priv);
+
+ if (HAS_GUC_CT(dev_priv))
+ intel_guc_ct_fini(&guc->ct);
+
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
@@ -357,14 +369,14 @@ void intel_guc_init_params(struct intel_guc *guc)
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
I915_WRITE(SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
}
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
@@ -386,6 +398,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 status;
int i;
int ret;
@@ -402,12 +415,12 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
mutex_lock(&guc->send_mutex);
- intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
+ intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
for (i = 0; i < len; i++)
- I915_WRITE(guc_send_reg(guc, i), action[i]);
+ intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
- POSTING_READ(guc_send_reg(guc, i - 1));
+ intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
intel_guc_notify(guc);
@@ -415,7 +428,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
* No GuC command should ever take longer than 10ms.
* Fast commands should still complete in 10us.
*/
- ret = __intel_wait_for_register_fw(dev_priv,
+ ret = __intel_wait_for_register_fw(uncore,
guc_send_reg(guc, 0),
INTEL_GUC_MSG_TYPE_MASK,
INTEL_GUC_MSG_TYPE_RESPONSE <<
@@ -442,7 +455,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
ret = INTEL_GUC_MSG_TO_DATA(status);
out:
- intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
+ intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
mutex_unlock(&guc->send_mutex);
return ret;
@@ -472,17 +485,25 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
spin_unlock(&guc->irq_lock);
enable_rpm_wakeref_asserts(dev_priv);
- intel_guc_to_host_process_recv_msg(guc, msg);
+ intel_guc_to_host_process_recv_msg(guc, &msg, 1);
}
-void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+ const u32 *payload, u32 len)
{
+ u32 msg;
+
+ if (unlikely(!len))
+ return -EPROTO;
+
/* Make sure to handle only enabled messages */
- msg &= guc->msg_enabled_mask;
+ msg = payload[0] & guc->msg_enabled_mask;
if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
intel_guc_log_handle_flush_event(&guc->log);
+
+ return 0;
}
int intel_guc_sample_forcewake(struct intel_guc *guc)
@@ -544,7 +565,7 @@ static int guc_sleep_state_action(struct intel_guc *guc,
if (ret)
return ret;
- ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
+ ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
INTEL_GUC_SLEEP_STATE_INVALID_MASK,
0, 0, 10, &status);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 744220296653..2c59ff8d9f39 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -32,6 +32,7 @@
#include "intel_guc_log.h"
#include "intel_guc_reg.h"
#include "intel_uc_fw.h"
+#include "i915_utils.h"
#include "i915_vma.h"
struct guc_preempt_work {
@@ -164,7 +165,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
void intel_guc_to_host_event_handler(struct intel_guc *guc);
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
-void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg);
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+ const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index f0db62887f50..bec62f34b15a 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -121,8 +121,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
* to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there.
*/
- kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
- dev_priv->engine[RCS])->state;
+ kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state;
blob->ads.golden_context_lrca =
intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index a52883e9146f..dde1dc0d6e69 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -140,11 +140,6 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
return err;
}
-static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
-{
- return ctch->vma != NULL;
-}
-
static int ctch_init(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
@@ -214,25 +209,21 @@ err_out:
static void ctch_fini(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
+ GEM_BUG_ON(ctch->enabled);
+
i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
}
-static int ctch_open(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+static int ctch_enable(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
{
u32 base;
int err;
int i;
- CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
- ctch->owner, yesno(ctch_is_open(ctch)));
+ GEM_BUG_ON(!ctch->vma);
- if (!ctch->vma) {
- err = ctch_init(guc, ctch);
- if (unlikely(err))
- goto err_out;
- GEM_BUG_ON(!ctch->vma);
- }
+ GEM_BUG_ON(ctch->enabled);
/* vma should be already allocated and map'ed */
base = intel_guc_ggtt_offset(guc, ctch->vma);
@@ -255,7 +246,7 @@ static int ctch_open(struct intel_guc *guc,
base + PAGE_SIZE/4 * CTB_RECV,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
if (unlikely(err))
- goto err_fini;
+ goto err_out;
err = guc_action_register_ct_buffer(guc,
base + PAGE_SIZE/4 * CTB_SEND,
@@ -263,23 +254,25 @@ static int ctch_open(struct intel_guc *guc,
if (unlikely(err))
goto err_deregister;
+ ctch->enabled = true;
+
return 0;
err_deregister:
guc_action_deregister_ct_buffer(guc,
ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
-err_fini:
- ctch_fini(guc, ctch);
err_out:
DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
return err;
}
-static void ctch_close(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+static void ctch_disable(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
{
- GEM_BUG_ON(!ctch_is_open(ctch));
+ GEM_BUG_ON(!ctch->enabled);
+
+ ctch->enabled = false;
guc_action_deregister_ct_buffer(guc,
ctch->owner,
@@ -287,7 +280,6 @@ static void ctch_close(struct intel_guc *guc,
guc_action_deregister_ct_buffer(guc,
ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
- ctch_fini(guc, ctch);
}
static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
@@ -481,7 +473,7 @@ static int ctch_send(struct intel_guc_ct *ct,
u32 fence;
int err;
- GEM_BUG_ON(!ctch_is_open(ctch));
+ GEM_BUG_ON(!ctch->enabled);
GEM_BUG_ON(!len);
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
GEM_BUG_ON(!response_buf && response_buf_size);
@@ -709,14 +701,15 @@ static void ct_process_request(struct intel_guc_ct *ct,
u32 action, u32 len, const u32 *payload)
{
struct intel_guc *guc = ct_to_guc(ct);
+ int ret;
CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
switch (action) {
case INTEL_GUC_ACTION_DEFAULT:
- if (unlikely(len < 1))
+ ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
+ if (unlikely(ret))
goto fail_unexpected;
- intel_guc_to_host_process_recv_msg(guc, *payload);
break;
default:
@@ -817,7 +810,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
- if (!ctch_is_open(ctch))
+ if (!ctch->enabled)
return;
do {
@@ -849,6 +842,51 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
}
/**
+ * intel_guc_ct_init - Init CT communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for communication via
+ * the CT channel.
+ *
+ * Shall only be called for platforms with HAS_GUC_CT.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
+ int err;
+
+ err = ctch_init(guc, ctch);
+ if (unlikely(err)) {
+ DRM_ERROR("CT: can't open channel %d; err=%d\n",
+ ctch->owner, err);
+ return err;
+ }
+
+ GEM_BUG_ON(!ctch->vma);
+ return 0;
+}
+
+/**
+ * intel_guc_ct_fini - Fini CT communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for communication via
+ * the CT channel.
+ *
+ * Shall only be called for platforms with HAS_GUC_CT.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
+
+ ctch_fini(guc, ctch);
+}
+
+/**
* intel_guc_ct_enable - Enable buffer based command transport.
* @ct: pointer to CT struct
*
@@ -865,7 +903,10 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
GEM_BUG_ON(!HAS_GUC_CT(i915));
- err = ctch_open(guc, ctch);
+ if (ctch->enabled)
+ return 0;
+
+ err = ctch_enable(guc, ctch);
if (unlikely(err))
return err;
@@ -890,10 +931,10 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
GEM_BUG_ON(!HAS_GUC_CT(i915));
- if (!ctch_is_open(ctch))
+ if (!ctch->enabled)
return;
- ctch_close(guc, ctch);
+ ctch_disable(guc, ctch);
/* Disable send */
guc->send = intel_guc_send_nop;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index d774895ab143..f5e7f0663304 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -66,6 +66,7 @@ struct intel_guc_ct_channel {
struct intel_guc_ct_buffer ctbs[2];
u32 owner;
u32 next_fence;
+ bool enabled;
};
/** Holds all command transport channels.
@@ -90,6 +91,8 @@ struct intel_guc_ct {
};
void intel_guc_ct_init_early(struct intel_guc_ct *ct);
+int intel_guc_ct_init(struct intel_guc_ct *ct);
+void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 13ff7003c6be..792a551450c7 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -241,7 +241,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
guc_prepare_xfer(guc);
@@ -254,7 +254,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
ret = guc_xfer_ucode(guc, vma);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 806fdfd7c78a..7146524264dd 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -620,7 +620,12 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
void intel_guc_log_relay_close(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
guc_log_disable_flush_events(log);
+ synchronize_irq(i915->drm.irq);
+
flush_work(&log->relay.flush_work);
mutex_lock(&log->relay.lock);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 8bc8aa54aa35..46cd0e70aecb 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -23,7 +23,6 @@
*/
#include <linux/circ_buf.h>
-#include <trace/events/dma_fence.h>
#include "intel_guc_submission.h"
#include "intel_lrc_reg.h"
@@ -382,7 +381,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
@@ -393,7 +392,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- if (!ce->state)
+ if (!ce || !ce->state)
break; /* XXX: continue? */
/*
@@ -535,7 +534,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
spin_lock(&client->wq_lock);
guc_wq_item_append(client, engine->guc_id, ctx_desc,
- ring_tail, rq->global_seqno);
+ ring_tail, rq->fence.seqno);
guc_ring_doorbell(client);
client->submissions[engine->id] += 1;
@@ -567,7 +566,7 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- struct intel_context *ce = to_intel_context(client->owner, engine);
+ struct intel_context *ce = engine->preempt_context;
u32 data[7];
if (!ce->ring->emit) { /* recreate upon load/resume */
@@ -575,7 +574,7 @@ static void inject_preempt_context(struct work_struct *work)
u32 *cs;
cs = ce->ring->vaddr;
- if (engine->id == RCS) {
+ if (engine->class == RENDER_CLASS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
addr,
@@ -583,7 +582,8 @@ static void inject_preempt_context(struct work_struct *work)
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
- addr);
+ addr,
+ 0);
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
@@ -649,9 +649,10 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
struct guc_ctx_report *report =
&data->preempt_ctx_report[engine->guc_id];
- WARN_ON(wait_for_atomic(report->report_return_status ==
- INTEL_GUC_REPORT_STATUS_COMPLETE,
- GUC_PREEMPT_POSTPROCESS_DELAY_MS));
+ if (wait_for_atomic(report->report_return_status ==
+ INTEL_GUC_REPORT_STATUS_COMPLETE,
+ GUC_PREEMPT_POSTPROCESS_DELAY_MS))
+ DRM_ERROR("Timed out waiting for GuC preemption report\n");
/*
* GuC is expecting that we're also going to clear the affected context
* counter, let's also reset the return status to not depend on GuC
@@ -720,7 +721,7 @@ static inline int rq_prio(const struct i915_request *rq)
static inline int port_prio(const struct execlist_port *port)
{
- return rq_prio(port_request(port));
+ return rq_prio(port_request(port)) | __NO_PREEMPTION;
}
static bool __guc_dequeue(struct intel_engine_cs *engine)
@@ -781,8 +782,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
}
rb_erase_cached(&p->node, &execlists->queue);
- if (p->priority != I915_PRIORITY_NORMAL)
- kmem_cache_free(engine->i915->priorities, p);
+ i915_priolist_free(p);
}
done:
execlists->queue_priority_hint =
@@ -871,6 +871,104 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
flush_workqueue(engine->i915->guc.preempt_wq);
}
+static void guc_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ execlists_cancel_port_requests(execlists);
+
+ /* Push back any incomplete requests for replay after the reset. */
+ rq = execlists_unwind_incomplete_requests(execlists);
+ if (!rq)
+ goto out_unlock;
+
+ if (!i915_request_started(rq))
+ stalled = false;
+
+ i915_reset_request(rq, stalled);
+ intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
+
+out_unlock:
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void guc_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /* Cancel the requests on the HW and clear the ELSP tracker. */
+ execlists_cancel_port_requests(execlists);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+
+ i915_request_mark_complete(rq);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
+ __i915_request_submit(rq);
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+ GEM_BUG_ON(port_isset(execlists->port));
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void guc_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ if (__tasklet_enable(&execlists->tasklet))
+ /* And kick in case we missed a new request submission. */
+ tasklet_hi_schedule(&execlists->tasklet);
+
+ GEM_TRACE("%s: depth->%d\n", engine->name,
+ atomic_read(&execlists->tasklet.count));
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -1031,7 +1129,7 @@ static int guc_clients_create(struct intel_guc *guc)
GEM_BUG_ON(guc->preempt_client);
client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
GUC_CLIENT_PRIORITY_KMD_NORMAL,
dev_priv->kernel_context);
if (IS_ERR(client)) {
@@ -1042,7 +1140,7 @@ static int guc_clients_create(struct intel_guc *guc)
if (dev_priv->preempt_context) {
client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
GUC_CLIENT_PRIORITY_KMD_HIGH,
dev_priv->preempt_context);
if (IS_ERR(client)) {
@@ -1262,10 +1360,12 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_submission_park(struct intel_engine_cs *engine)
{
intel_engine_unpin_breadcrumbs_irq(engine);
+ engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}
static void guc_submission_unpark(struct intel_engine_cs *engine)
{
+ engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
intel_engine_pin_breadcrumbs_irq(engine);
}
@@ -1290,6 +1390,10 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->unpark = guc_submission_unpark;
engine->reset.prepare = guc_reset_prepare;
+ engine->reset.reset = guc_reset;
+ engine->reset.finish = guc_reset_finish;
+
+ engine->cancel_requests = guc_cancel_requests;
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index 169c54568340..aa5e6749c925 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -29,6 +29,7 @@
#include "i915_gem.h"
#include "i915_selftest.h"
+#include "intel_engine_types.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index a219c796e56d..3d51ed1428d4 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -56,7 +56,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
int slice;
int subslice;
- if (engine->id != RCS)
+ if (engine->id != RCS0)
return true;
intel_engine_get_instdone(engine, &instdone);
@@ -118,11 +118,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
- tmp = I915_READ_CTL(engine);
+ tmp = ENGINE_READ(engine, RING_CTL);
if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, BIT(engine->id), 0,
+ i915_handle_error(dev_priv, engine->mask, 0,
"stuck wait on %s", engine->name);
- I915_WRITE_CTL(engine, tmp);
+ ENGINE_WRITE(engine, RING_CTL, tmp);
return ENGINE_WAIT_KICK;
}
@@ -133,21 +133,21 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine,
struct hangcheck *hc)
{
hc->acthd = intel_engine_get_active_head(engine);
- hc->seqno = intel_engine_get_seqno(engine);
+ hc->seqno = intel_engine_get_hangcheck_seqno(engine);
}
static void hangcheck_store_sample(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
engine->hangcheck.acthd = hc->acthd;
- engine->hangcheck.seqno = hc->seqno;
+ engine->hangcheck.last_seqno = hc->seqno;
}
static enum intel_engine_hangcheck_action
hangcheck_get_action(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
- if (engine->hangcheck.seqno != hc->seqno)
+ if (engine->hangcheck.last_seqno != hc->seqno)
return ENGINE_ACTIVE_SEQNO;
if (intel_engine_is_idle(engine))
@@ -221,8 +221,8 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
unsigned int stuck)
{
struct intel_engine_cs *engine;
+ intel_engine_mask_t tmp;
char msg[80];
- unsigned int tmp;
int len;
/* If some rings hung but others were still busy, only
@@ -263,14 +263,14 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return;
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
for_each_engine(engine, dev_priv, id) {
struct hangcheck hc;
@@ -282,13 +282,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
hangcheck_store_sample(engine, &hc);
if (hc.stalled) {
- hung |= intel_engine_flag(engine);
+ hung |= engine->mask;
if (hc.action != ENGINE_DEAD)
- stuck |= intel_engine_flag(engine);
+ stuck |= engine->mask;
}
if (hc.wedged)
- wedged |= intel_engine_flag(engine);
+ wedged |= engine->mask;
}
if (GEM_SHOW_DEBUG() && (hung | stuck)) {
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index ce7ba3a9c000..99b007169c49 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -6,15 +6,20 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#include <drm/drm_hdcp.h>
+#include <linux/component.h>
#include <linux/i2c.h>
#include <linux/random.h>
-#include "intel_drv.h"
+#include <drm/drm_hdcp.h>
+#include <drm/i915_component.h>
+
#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
+#define HDCP2_LC_RETRY_CNT 3
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
@@ -72,6 +77,52 @@ bool intel_hdcp_capable(struct intel_connector *connector)
return capable;
}
+/* Is HDCP2.2 capable on Platform and Sink */
+static bool intel_hdcp2_capable(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool capable = false;
+
+ /* I915 support for HDCP2.2 */
+ if (!hdcp->hdcp2_supported)
+ return false;
+
+ /* MEI interface is solid */
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return false;
+ }
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ /* Sink's capability for HDCP2.2 */
+ hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
+
+ return capable;
+}
+
+static inline bool intel_hdcp_in_use(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum port port = connector->encoder->port;
+ u32 reg;
+
+ reg = I915_READ(PORT_HDCP_STATUS(port));
+ return reg & HDCP_STATUS_ENC;
+}
+
+static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum port port = connector->encoder->port;
+ u32 reg;
+
+ reg = I915_READ(HDCP2_STATUS_DDI(port));
+ return reg & LINK_ENCRYPTION_STATUS;
+}
+
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim)
{
@@ -176,7 +227,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
}
/* Wait for the keys to load (500us) */
- ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
+ ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
10, 1, &val);
if (ret)
@@ -194,7 +245,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
I915_WRITE(HDCP_SHA_TEXT, sha_text);
- if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
DRM_ERROR("Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
@@ -425,7 +476,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Tell the HW we're done with the hash and wait for it to ACK */
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
- if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
DRM_ERROR("Timed out waiting for SHA1 complete\n");
@@ -555,7 +606,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_AN_READY,
HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
@@ -636,7 +687,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
}
/* Wait for encryption confirmation */
- if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_ENC, HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
@@ -666,8 +717,10 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
+ hdcp->hdcp_encrypted = false;
I915_WRITE(PORT_HDCP_CONF(port), 0);
- if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PORT_HDCP_STATUS(port), ~0, 0,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -711,8 +764,10 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
- if (!ret)
+ if (!ret) {
+ hdcp->hdcp_encrypted = true;
return 0;
+ }
DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
@@ -730,16 +785,64 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
return container_of(hdcp, struct intel_connector, hdcp);
}
-static void intel_hdcp_check_work(struct work_struct *work)
+/* Implements Part 3 of the HDCP authorization procedure */
+static int intel_hdcp_check_link(struct intel_connector *connector)
{
- struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
- struct intel_hdcp,
- check_work);
- struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ enum port port = intel_dig_port->base.port;
+ int ret = 0;
- if (!intel_hdcp_check_link(connector))
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP_CHECK_PERIOD_MS);
+ mutex_lock(&hdcp->mutex);
+
+ /* Check_link valid only when HDCP1.4 is enabled */
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+ !hdcp->hdcp_encrypted) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (WARN_ON(!intel_hdcp_in_use(connector))) {
+ DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
+ connector->base.name, connector->base.base.id,
+ I915_READ(PORT_HDCP_STATUS(port)));
+ ret = -ENXIO;
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ if (hdcp->shim->check_link(intel_dig_port)) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ }
+ goto out;
+ }
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
+ connector->base.name, connector->base.base.id);
+
+ ret = _intel_hdcp_disable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ ret = _intel_hdcp_enable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&hdcp->mutex);
+ return ret;
}
static void intel_hdcp_prop_work(struct work_struct *work)
@@ -773,14 +876,929 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
}
+static int
+hdcp2_prepare_ake_init(struct intel_connector *connector,
+ struct hdcp2_ake_init *ake_data)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
+ if (ret)
+ DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
+ struct hdcp2_ake_send_cert *rx_cert,
+ bool *paired,
+ struct hdcp2_ake_no_stored_km *ek_pub_km,
+ size_t *msg_sz)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
+ rx_cert, paired,
+ ek_pub_km, msg_sz);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_verify_hprime(struct intel_connector *connector,
+ struct hdcp2_ake_send_hprime *rx_hprime)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_store_pairing_info(struct intel_connector *connector,
+ struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_prepare_lc_init(struct intel_connector *connector,
+ struct hdcp2_lc_init *lc_init)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_lprime(struct intel_connector *connector,
+ struct hdcp2_lc_send_lprime *rx_lprime)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_prepare_skey(struct intel_connector *connector,
+ struct hdcp2_ske_send_eks *ske_data)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack *rep_send_ack)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
+ rep_topology,
+ rep_send_ack);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_mprime(struct intel_connector *connector,
+ struct hdcp2_rep_stream_ready *stream_ready)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_authenticate_port(struct intel_connector *connector)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_close_mei_session(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->close_hdcp_session(comp->mei_dev,
+ &connector->hdcp.port_data);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_deauthenticate_port(struct intel_connector *connector)
+{
+ return hdcp2_close_mei_session(connector);
+}
+
+/* Authentication flow starts from here */
+static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_ake_init ake_init;
+ struct hdcp2_ake_send_cert send_cert;
+ struct hdcp2_ake_no_stored_km no_stored_km;
+ struct hdcp2_ake_send_hprime send_hprime;
+ struct hdcp2_ake_send_pairing_info pairing_info;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ size_t size;
+ int ret;
+
+ /* Init for seq_num */
+ hdcp->seq_num_v = 0;
+ hdcp->seq_num_m = 0;
+
+ ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
+ sizeof(msgs.ake_init));
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
+ &msgs.send_cert, sizeof(msgs.send_cert));
+ if (ret < 0)
+ return ret;
+
+ if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
+ return -EINVAL;
+
+ hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
+
+ /*
+ * Here msgs.no_stored_km will hold msgs corresponding to the km
+ * stored also.
+ */
+ ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
+ &hdcp->is_paired,
+ &msgs.no_stored_km, &size);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
+ &msgs.send_hprime, sizeof(msgs.send_hprime));
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
+ if (ret < 0)
+ return ret;
+
+ if (!hdcp->is_paired) {
+ /* Pairing is required */
+ ret = shim->read_2_2_msg(intel_dig_port,
+ HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ &msgs.pairing_info,
+ sizeof(msgs.pairing_info));
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
+ if (ret < 0)
+ return ret;
+ hdcp->is_paired = true;
+ }
+
+ return 0;
+}
+
+static int hdcp2_locality_check(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_lc_init lc_init;
+ struct hdcp2_lc_send_lprime send_lprime;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int tries = HDCP2_LC_RETRY_CNT, ret, i;
+
+ for (i = 0; i < tries; i++) {
+ ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
+ if (ret < 0)
+ continue;
+
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
+ sizeof(msgs.lc_init));
+ if (ret < 0)
+ continue;
+
+ ret = shim->read_2_2_msg(intel_dig_port,
+ HDCP_2_2_LC_SEND_LPRIME,
+ &msgs.send_lprime,
+ sizeof(msgs.send_lprime));
+ if (ret < 0)
+ continue;
+
+ ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
+ if (!ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int hdcp2_session_key_exchange(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct hdcp2_ske_send_eks send_eks;
+ int ret;
+
+ ret = hdcp2_prepare_skey(connector, &send_eks);
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
+ sizeof(send_eks));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static
+int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_rep_stream_manage stream_manage;
+ struct hdcp2_rep_stream_ready stream_ready;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int ret;
+
+ /* Prepare RepeaterAuth_Stream_Manage msg */
+ msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
+ drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
+
+ /* K no of streams is fixed as 1. Stored as big-endian. */
+ msgs.stream_manage.k = cpu_to_be16(1);
+
+ /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+ msgs.stream_manage.streams[0].stream_id = 0;
+ msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+
+ /* Send it to Repeater */
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
+ sizeof(msgs.stream_manage));
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
+ &msgs.stream_ready, sizeof(msgs.stream_ready));
+ if (ret < 0)
+ return ret;
+
+ hdcp->port_data.seq_num_m = hdcp->seq_num_m;
+ hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+
+ ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
+ if (ret < 0)
+ return ret;
+
+ hdcp->seq_num_m++;
+
+ if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
+ DRM_DEBUG_KMS("seq_num_m roll over.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static
+int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_rep_send_receiverid_list recvid_list;
+ struct hdcp2_rep_send_ack rep_ack;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ u8 *rx_info;
+ u32 seq_num_v;
+ int ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
+ &msgs.recvid_list, sizeof(msgs.recvid_list));
+ if (ret < 0)
+ return ret;
+
+ rx_info = msgs.recvid_list.rx_info;
+
+ if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
+ HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
+ DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+ return -EINVAL;
+ }
+
+ /* Converting and Storing the seq_num_v to local variable as DWORD */
+ seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v);
+
+ if (seq_num_v < hdcp->seq_num_v) {
+ /* Roll over of the seq_num_v from repeater. Reauthenticate. */
+ DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+ return -EINVAL;
+ }
+
+ ret = hdcp2_verify_rep_topology_prepare_ack(connector,
+ &msgs.recvid_list,
+ &msgs.rep_ack);
+ if (ret < 0)
+ return ret;
+
+ hdcp->seq_num_v = seq_num_v;
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
+ sizeof(msgs.rep_ack));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hdcp2_authenticate_repeater(struct intel_connector *connector)
+{
+ int ret;
+
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (ret < 0)
+ return ret;
+
+ return hdcp2_propagate_stream_management_info(connector);
+}
+
+static int hdcp2_authenticate_sink(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int ret;
+
+ ret = hdcp2_authentication_key_exchange(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ ret = hdcp2_locality_check(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ ret = hdcp2_session_key_exchange(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ if (shim->config_stream_type) {
+ ret = shim->config_stream_type(intel_dig_port,
+ hdcp->is_repeater,
+ hdcp->content_type);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (hdcp->is_repeater) {
+ ret = hdcp2_authenticate_repeater(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+ return ret;
+ }
+ }
+
+ hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+ ret = hdcp2_authenticate_port(connector);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int hdcp2_enable_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = connector->encoder->port;
+ int ret;
+
+ WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
+
+ if (hdcp->shim->toggle_signalling) {
+ ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
+ if (ret) {
+ DRM_ERROR("Failed to enable HDCP signalling. %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
+ /* Link is Authenticated. Now set for Encryption */
+ I915_WRITE(HDCP2_CTL_DDI(port),
+ I915_READ(HDCP2_CTL_DDI(port)) |
+ CTL_LINK_ENCRYPTION_REQ);
+ }
+
+ ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS,
+ LINK_ENCRYPTION_STATUS,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+
+ return ret;
+}
+
+static int hdcp2_disable_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = connector->encoder->port;
+ int ret;
+
+ WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
+
+ I915_WRITE(HDCP2_CTL_DDI(port),
+ I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
+
+ ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS, 0x0,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ if (ret == -ETIMEDOUT)
+ DRM_DEBUG_KMS("Disable Encryption Timedout");
+
+ if (hdcp->shim->toggle_signalling) {
+ ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable HDCP signalling. %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
+{
+ int ret, i, tries = 3;
+
+ for (i = 0; i < tries; i++) {
+ ret = hdcp2_authenticate_sink(connector);
+ if (!ret)
+ break;
+
+ /* Clearing the mei hdcp session */
+ DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ DRM_DEBUG_KMS("Port deauth failed.\n");
+ }
+
+ if (i != tries) {
+ /*
+ * Ensuring the required 200mSec min time interval between
+ * Session Key Exchange and encryption.
+ */
+ msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
+ ret = hdcp2_enable_encryption(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ DRM_DEBUG_KMS("Port deauth failed.\n");
+ }
+ }
+
+ return ret;
+}
+
+static int _intel_hdcp2_enable(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
+
+ ret = hdcp2_authenticate_and_encrypt(connector);
+ if (ret) {
+ DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
+ hdcp->content_type, ret);
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
+
+ hdcp->hdcp2_encrypted = true;
+ return 0;
+}
+
+static int _intel_hdcp2_disable(struct intel_connector *connector)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
+ connector->base.name, connector->base.base.id);
+
+ ret = hdcp2_disable_encryption(connector);
+
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ DRM_DEBUG_KMS("Port deauth failed.\n");
+
+ connector->hdcp.hdcp2_encrypted = false;
+
+ return ret;
+}
+
+/* Implements the Link Integrity Check for HDCP2.2 */
+static int intel_hdcp2_check_link(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = connector->encoder->port;
+ int ret = 0;
+
+ mutex_lock(&hdcp->mutex);
+
+ /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+ !hdcp->hdcp2_encrypted) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (WARN_ON(!intel_hdcp2_in_use(connector))) {
+ DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
+ I915_READ(HDCP2_STATUS_DDI(port)));
+ ret = -ENXIO;
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ ret = hdcp->shim->check_2_2_link(intel_dig_port);
+ if (ret == HDCP_LINK_PROTECTED) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ }
+ goto out;
+ }
+
+ if (ret == HDCP_TOPOLOGY_CHANGE) {
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ goto out;
+
+ DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+ DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
+ } else {
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
+ connector->base.name, connector->base.base.id);
+ }
+
+ ret = _intel_hdcp2_disable(connector);
+ if (ret) {
+ DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id, ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ ret = _intel_hdcp2_enable(connector);
+ if (ret) {
+ DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&hdcp->mutex);
+ return ret;
+}
+
+static void intel_hdcp_check_work(struct work_struct *work)
+{
+ struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+ struct intel_hdcp,
+ check_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
+ if (!intel_hdcp2_check_link(connector))
+ schedule_delayed_work(&hdcp->check_work,
+ DRM_HDCP2_CHECK_PERIOD_MS);
+ else if (!intel_hdcp_check_link(connector))
+ schedule_delayed_work(&hdcp->check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+static int i915_hdcp_component_bind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+ DRM_DEBUG("I915 HDCP comp bind\n");
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
+ dev_priv->hdcp_master->mei_dev = mei_kdev;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return 0;
+}
+
+static void i915_hdcp_component_unbind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+ DRM_DEBUG("I915 HDCP comp unbind\n");
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->hdcp_master = NULL;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+}
+
+static const struct component_ops i915_hdcp_component_ops = {
+ .bind = i915_hdcp_component_bind,
+ .unbind = i915_hdcp_component_unbind,
+};
+
+static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct hdcp_port_data *data = &hdcp->port_data;
+
+ data->port = connector->encoder->port;
+ data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
+ data->protocol = (u8)hdcp->shim->protocol;
+
+ data->k = 1;
+ if (!data->streams)
+ data->streams = kcalloc(data->k,
+ sizeof(struct hdcp2_streamid_type),
+ GFP_KERNEL);
+ if (!data->streams) {
+ DRM_ERROR("Out of Memory\n");
+ return -ENOMEM;
+ }
+
+ data->streams[0].stream_id = 0;
+ data->streams[0].stream_type = hdcp->content_type;
+
+ return 0;
+}
+
+static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
+{
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
+ return false;
+
+ return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv));
+}
+
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (!is_hdcp2_supported(dev_priv))
+ return;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ WARN_ON(dev_priv->hdcp_comp_added);
+
+ dev_priv->hdcp_comp_added = true;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
+ I915_COMPONENT_HDCP);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->hdcp_comp_added = false;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return;
+ }
+}
+
+static void intel_hdcp2_init(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ ret = initialize_hdcp_port_data(connector);
+ if (ret) {
+ DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+ return;
+ }
+
+ hdcp->hdcp2_supported = true;
+}
+
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = drm_connector_attach_content_protection_property(
- &connector->base);
+ if (!shim)
+ return -EINVAL;
+
+ ret = drm_connector_attach_content_protection_property(&connector->base);
if (ret)
return ret;
@@ -788,28 +1806,47 @@ int intel_hdcp_init(struct intel_connector *connector,
mutex_init(&hdcp->mutex);
INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
+
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector);
+ init_waitqueue_head(&hdcp->cp_irq_queue);
+
return 0;
}
int intel_hdcp_enable(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
- int ret;
+ unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
+ int ret = -EINVAL;
if (!hdcp->shim)
return -ENOENT;
mutex_lock(&hdcp->mutex);
+ WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
- ret = _intel_hdcp_enable(connector);
- if (ret)
- goto out;
+ /*
+ * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
+ * is capable of HDCP2.2, it is preferred to use HDCP2.2.
+ */
+ if (intel_hdcp2_capable(connector)) {
+ ret = _intel_hdcp2_enable(connector);
+ if (!ret)
+ check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
+ }
+
+ /* When HDCP2.2 fails, HDCP1.4 will be attempted */
+ if (ret && intel_hdcp_capable(connector)) {
+ ret = _intel_hdcp_enable(connector);
+ }
+
+ if (!ret) {
+ schedule_delayed_work(&hdcp->check_work, check_link_interval);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ }
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP_CHECK_PERIOD_MS);
-out:
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -826,7 +1863,10 @@ int intel_hdcp_disable(struct intel_connector *connector)
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
- ret = _intel_hdcp_disable(connector);
+ if (hdcp->hdcp2_encrypted)
+ ret = _intel_hdcp2_disable(connector);
+ else if (hdcp->hdcp_encrypted)
+ ret = _intel_hdcp_disable(connector);
}
mutex_unlock(&hdcp->mutex);
@@ -834,6 +1874,30 @@ int intel_hdcp_disable(struct intel_connector *connector)
return ret;
}
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ if (!dev_priv->hdcp_comp_added) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return;
+ }
+
+ dev_priv->hdcp_comp_added = false;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+}
+
+void intel_hdcp_cleanup(struct intel_connector *connector)
+{
+ if (!connector->hdcp.shim)
+ return;
+
+ mutex_lock(&connector->hdcp.mutex);
+ kfree(connector->hdcp.port_data.streams);
+ mutex_unlock(&connector->hdcp.mutex);
+}
+
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state)
@@ -867,61 +1931,16 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
crtc_state->mode_changed = true;
}
-/* Implements Part 3 of the HDCP authorization procedure */
-int intel_hdcp_check_link(struct intel_connector *connector)
+/* Handles the CP_IRQ raised from the DP HDCP sink */
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
- enum port port = intel_dig_port->base.port;
- int ret = 0;
if (!hdcp->shim)
- return -ENOENT;
-
- mutex_lock(&hdcp->mutex);
-
- if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
- goto out;
-
- if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
- DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
- connector->base.name, connector->base.base.id,
- I915_READ(PORT_HDCP_STATUS(port)));
- ret = -ENXIO;
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- goto out;
- }
-
- if (hdcp->shim->check_link(intel_dig_port)) {
- if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
- }
- goto out;
- }
-
- DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
- connector->base.name, connector->base.base.id);
-
- ret = _intel_hdcp_disable(connector);
- if (ret) {
- DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- goto out;
- }
+ return;
- ret = _intel_hdcp_enable(connector);
- if (ret) {
- DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- goto out;
- }
+ atomic_inc(&connector->hdcp.cp_irq_count);
+ wake_up_all(&connector->hdcp.cp_irq_queue);
-out:
- mutex_unlock(&hdcp->mutex);
- return ret;
+ schedule_delayed_work(&hdcp->check_work, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_hdcp.h b/drivers/gpu/drm/i915/intel_hdcp.h
new file mode 100644
index 000000000000..a75f25f09d39
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdcp.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_H__
+#define __INTEL_HDCP_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_hdcp_shim;
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
+int intel_hdcp_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *hdcp_shim);
+int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_disable(struct intel_connector *connector);
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
+void intel_hdcp_cleanup(struct intel_connector *connector);
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
+
+#endif /* __INTEL_HDCP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f125a62eba8c..34be2cfd0ec8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -26,19 +26,30 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
+
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_sdvo.h"
+#include "intel_panel.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
@@ -82,6 +93,8 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
static u32 g4x_infoframe_index(unsigned int type)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_SELECT_GAMUT;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_SELECT_AVI;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -97,6 +110,12 @@ static u32 g4x_infoframe_index(unsigned int type)
static u32 g4x_infoframe_enable(unsigned int type)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+ return VIDEO_DIP_ENABLE_GCP;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_ENABLE_GAMUT;
+ case DP_SDP_VSC:
+ return 0;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -112,6 +131,10 @@ static u32 g4x_infoframe_enable(unsigned int type)
static u32 hsw_infoframe_enable(unsigned int type)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+ return VIDEO_DIP_ENABLE_GCP_HSW;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_ENABLE_GMP_HSW;
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
case DP_SDP_PPS:
@@ -135,6 +158,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
int i)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
case DP_SDP_PPS:
@@ -182,7 +207,6 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(VIDEO_DIP_CTL, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
@@ -190,7 +214,6 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VIDEO_DIP_DATA, 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -200,17 +223,37 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
-static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
+static void g4x_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(VIDEO_DIP_CTL);
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(VIDEO_DIP_CTL, val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(VIDEO_DIP_DATA);
+}
+
+static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
@@ -237,7 +280,6 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
@@ -245,7 +287,6 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -255,7 +296,28 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(reg);
}
-static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
+static void ibx_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -264,10 +326,10 @@ static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
u32 val = I915_READ(reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -298,7 +360,6 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
@@ -306,7 +367,6 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -316,7 +376,28 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(reg);
}
-static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
+static void cpt_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -324,7 +405,7 @@ static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -352,7 +433,6 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
@@ -360,7 +440,6 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -370,7 +449,28 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(reg);
}
-static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
+static void vlv_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -378,10 +478,10 @@ static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -406,7 +506,6 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), *data);
@@ -416,14 +515,30 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
for (; i < data_size; i += 4)
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), 0);
- mmiowb();
val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
POSTING_READ(ctl_reg);
}
-static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
+static void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+ type, i >> 2));
+}
+
+static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -434,6 +549,53 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
}
+static const u8 infoframe_type_to_idx[] = {
+ HDMI_PACKET_TYPE_GENERAL_CONTROL,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ DP_SDP_VSC,
+ HDMI_INFOFRAME_TYPE_AVI,
+ HDMI_INFOFRAME_TYPE_SPD,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+};
+
+u32 intel_hdmi_infoframe_enable(unsigned int type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+ if (infoframe_type_to_idx[i] == type)
+ return BIT(i);
+ }
+
+ return 0;
+}
+
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ u32 val, ret = 0;
+ int i;
+
+ val = dig_port->infoframes_enabled(encoder, crtc_state);
+
+ /* map from hardware bits to dip idx */
+ for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+ unsigned int type = infoframe_type_to_idx[i];
+
+ if (HAS_DDI(dev_priv)) {
+ if (val & hsw_infoframe_enable(type))
+ ret |= BIT(i);
+ } else {
+ if (val & g4x_infoframe_enable(type))
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -453,15 +615,23 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
*/
static void intel_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- union hdmi_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const union hdmi_infoframe *frame)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ if (WARN_ON(frame->any.type != type))
+ return;
+
/* see comment above for the reason for this offset */
- len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
- if (len < 0)
+ len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
+ if (WARN_ON(len < 0))
return;
/* Insert the 'hole' (see big comment above) at position 3 */
@@ -469,84 +639,143 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
buffer[3] = 0;
len++;
- intel_dig_port->write_infoframe(encoder,
- crtc_state,
- frame->any.type, buffer, len);
+ intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
}
-static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+void intel_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ enum hdmi_infoframe_type type,
+ union hdmi_infoframe *frame)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ u8 buffer[VIDEO_DIP_DATA_SIZE];
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ intel_dig_port->read_infoframe(encoder, crtc_state,
+ type, buffer, sizeof(buffer));
+
+ /* Fill the 'hole' (see big comment above) at position 3 */
+ memmove(&buffer[1], &buffer[0], 3);
+
+ /* see comment above for the reason for this offset */
+ ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
+ return;
+ }
+
+ if (frame->any.type != type)
+ DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, type);
+}
+
+static bool
+intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
+ struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- union hdmi_infoframe frame;
+ struct drm_connector *connector = conn_state->connector;
int ret;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- conn_state->connector,
+ if (!crtc_state->has_infoframe)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
adjusted_mode);
- if (ret < 0) {
- DRM_ERROR("couldn't fill AVI infoframe\n");
- return;
- }
+ if (ret)
+ return false;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ frame->colorspace = HDMI_COLORSPACE_YUV420;
else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ frame->colorspace = HDMI_COLORSPACE_YUV444;
else
- frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ frame->colorspace = HDMI_COLORSPACE_RGB;
+
+ drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
- conn_state->connector,
+ drm_hdmi_avi_infoframe_quant_range(frame, connector,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- drm_hdmi_avi_infoframe_content_type(&frame.avi,
- conn_state);
+ drm_hdmi_avi_infoframe_content_type(frame, conn_state);
/* TODO: handle pixel repetition for YCBCR420 outputs */
- intel_write_infoframe(encoder, crtc_state,
- &frame);
+
+ ret = hdmi_avi_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
}
-static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static bool
+intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- union hdmi_infoframe frame;
+ struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
int ret;
- ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
- if (ret < 0) {
- DRM_ERROR("couldn't fill SPD infoframe\n");
- return;
- }
+ if (!crtc_state->has_infoframe)
+ return true;
- frame.spd.sdi = HDMI_SPD_SDI_PC;
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
- intel_write_infoframe(encoder, crtc_state,
- &frame);
+ ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+ if (WARN_ON(ret))
+ return false;
+
+ frame->sdi = HDMI_SPD_SDI_PC;
+
+ ret = hdmi_spd_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
}
-static void
-intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- union hdmi_infoframe frame;
+static bool
+intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_vendor_infoframe *frame =
+ &crtc_state->infoframes.hdmi.vendor.hdmi;
+ const struct drm_display_info *info =
+ &conn_state->connector->display_info;
int ret;
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ if (!crtc_state->has_infoframe || !info->has_hdmi_infoframe)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR);
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
conn_state->connector,
&crtc_state->base.adjusted_mode);
- if (ret < 0)
- return;
+ if (WARN_ON(ret))
+ return false;
- intel_write_infoframe(encoder, crtc_state,
- &frame);
+ ret = hdmi_vendor_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
}
static void g4x_set_infoframes(struct intel_encoder *encoder,
@@ -606,9 +835,15 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
@@ -674,7 +909,10 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg;
- u32 val = 0;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+ return false;
if (HAS_DDI(dev_priv))
reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
@@ -685,18 +923,54 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
else
return false;
+ I915_WRITE(reg, crtc_state->infoframes.gcp);
+
+ return true;
+}
+
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ i915_reg_t reg;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+ return;
+
+ if (HAS_DDI(dev_priv))
+ reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+ else if (HAS_PCH_SPLIT(dev_priv))
+ reg = TVIDEO_DIP_GCP(crtc->pipe);
+ else
+ return;
+
+ crtc_state->infoframes.gcp = I915_READ(reg);
+}
+
+static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+ return;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL);
+
/* Indicate color depth whenever the sink supports deep color */
if (hdmi_sink_is_deep_color(conn_state))
- val |= GCP_COLOR_INDICATION;
+ crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION;
/* Enable default_phase whenever the display mode is suitably aligned */
if (gcp_default_phase_possible(crtc_state->pipe_bpp,
&crtc_state->base.adjusted_mode))
- val |= GCP_DEFAULT_PHASE_ENABLE;
-
- I915_WRITE(reg, val);
-
- return val != 0;
+ crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
}
static void ibx_set_infoframes(struct intel_encoder *encoder,
@@ -747,9 +1021,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static void cpt_set_infoframes(struct intel_encoder *encoder,
@@ -790,9 +1070,15 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static void vlv_set_infoframes(struct intel_encoder *encoder,
@@ -842,9 +1128,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static void hsw_set_infoframes(struct intel_encoder *encoder,
@@ -875,9 +1167,15 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -1083,10 +1381,44 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
return ret;
}
+static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_crtc *crtc = connector->base.state->crtc;
+ struct intel_crtc *intel_crtc = container_of(crtc,
+ struct intel_crtc, base);
+ u32 scanline;
+ int ret;
+
+ for (;;) {
+ scanline = I915_READ(PIPEDSL(intel_crtc->pipe));
+ if (scanline > 100 && scanline < 200)
+ break;
+ usleep_range(25, 50);
+ }
+
+ ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
+ if (ret) {
+ DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
+ return ret;
+ }
+ ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
+ if (ret) {
+ DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static
int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
bool enable)
{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct intel_connector *connector = hdmi->attached_connector;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int ret;
if (!enable)
@@ -1098,6 +1430,14 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
enable ? "Enable" : "Disable", ret);
return ret;
}
+
+ /*
+ * WA: To fix incorrect positioning of the window of
+ * opportunity and enc_en signalling in KABYLAKE.
+ */
+ if (IS_KABYLAKE(dev_priv) && enable)
+ return kbl_repositioning_enc_en_signal(connector);
+
return 0;
}
@@ -1129,6 +1469,190 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
return true;
}
+static struct hdcp2_hdmi_msg_data {
+ u8 msg_id;
+ u32 timeout;
+ u32 timeout2;
+ } hdcp2_msg_data[] = {
+ {HDCP_2_2_AKE_INIT, 0, 0},
+ {HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+ {HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
+ {HDCP_2_2_AKE_STORED_KM, 0, 0},
+ {HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+ {HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
+ 0},
+ {HDCP_2_2_LC_INIT, 0, 0},
+ {HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
+ {HDCP_2_2_SKE_SEND_EKS, 0, 0},
+ {HDCP_2_2_REP_SEND_RECVID_LIST,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+ {HDCP_2_2_REP_SEND_ACK, 0, 0},
+ {HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
+ {HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
+ 0},
+ };
+
+static
+int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+ u8 *rx_status)
+{
+ return intel_hdmi_hdcp_read(intel_dig_port,
+ HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
+ rx_status,
+ HDCP_2_2_HDMI_RXSTATUS_LEN);
+}
+
+static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
+ if (hdcp2_msg_data[i].msg_id == msg_id &&
+ (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired))
+ return hdcp2_msg_data[i].timeout;
+ else if (hdcp2_msg_data[i].msg_id == msg_id)
+ return hdcp2_msg_data[i].timeout2;
+
+ return -EINVAL;
+}
+
+static inline
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
+ u8 msg_id, bool *msg_ready,
+ ssize_t *msg_sz)
+{
+ u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+ int ret;
+
+ ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
+ return ret;
+ }
+
+ *msg_sz = ((HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rx_status[1]) << 8) |
+ rx_status[0]);
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST)
+ *msg_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]) &&
+ *msg_sz);
+ else
+ *msg_ready = *msg_sz;
+
+ return 0;
+}
+
+static ssize_t
+intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, bool paired)
+{
+ bool msg_ready = false;
+ int timeout, ret;
+ ssize_t msg_sz = 0;
+
+ timeout = get_hdcp2_msg_timeout(msg_id, paired);
+ if (timeout < 0)
+ return timeout;
+
+ ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port,
+ msg_id, &msg_ready,
+ &msg_sz),
+ !ret && msg_ready && msg_sz, timeout * 1000,
+ 1000, 5 * 1000);
+ if (ret)
+ DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
+ msg_id, ret, timeout);
+
+ return ret ? ret : msg_sz;
+}
+
+static
+int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size)
+{
+ unsigned int offset;
+
+ offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
+ return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size);
+}
+
+static
+int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
+ unsigned int offset;
+ ssize_t ret;
+
+ ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id,
+ hdcp->is_paired);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Available msg size should be equal to or lesser than the
+ * available buffer.
+ */
+ if (ret > size) {
+ DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
+ ret, size);
+ return -1;
+ }
+
+ offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
+ if (ret)
+ DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
+
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+ int ret;
+
+ ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+ if (ret)
+ return ret;
+
+ /*
+ * Re-auth request and Link Integrity Failures are represented by
+ * same bit. i.e reauth_req.
+ */
+ if (HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(rx_status[1]))
+ ret = HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]))
+ ret = HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+ bool *capable)
+{
+ u8 hdcp2_version;
+ int ret;
+
+ *capable = false;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
+ &hdcp2_version, sizeof(hdcp2_version));
+ if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
+ *capable = true;
+
+ return ret;
+}
+
+static inline
+enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
+{
+ return HDCP_PROTOCOL_HDMI;
+}
+
static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
.read_bksv = intel_hdmi_hdcp_read_bksv,
@@ -1140,6 +1664,11 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part,
.toggle_signalling = intel_hdmi_hdcp_toggle_signalling,
.check_link = intel_hdmi_hdcp_check_link,
+ .write_2_2_msg = intel_hdmi_hdcp2_write_msg,
+ .read_2_2_msg = intel_hdmi_hdcp2_read_msg,
+ .check_2_2_link = intel_hdmi_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_hdmi_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_HDMI,
};
static void intel_hdmi_prepare(struct intel_encoder *encoder,
@@ -1205,7 +1734,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
@@ -1228,7 +1756,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+ if (pipe_config->infoframes.enable)
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@@ -1251,6 +1782,18 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
pipe_config->lane_count = 4;
+
+ intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &pipe_config->infoframes.avi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &pipe_config->infoframes.spd);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &pipe_config->infoframes.hdmi);
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
@@ -1664,7 +2207,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
/* Display Wa_1405510057:icl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && IS_ICELAKE(dev_priv) &&
+ bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -1822,6 +2365,23 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
+ intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
+
+ if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad AVI infoframe\n");
+ return -EINVAL;
+ }
+
+ if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad SPD infoframe\n");
+ return -EINVAL;
+ }
+
+ if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad HDMI infoframe\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1941,7 +2501,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- if (IS_ICELAKE(dev_priv) &&
+ if (INTEL_GEN(dev_priv) >= 11 &&
!intel_digital_port_connected(encoder))
goto out;
@@ -2143,10 +2703,21 @@ static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
+
+ /*
+ * Attach Colorspace property for Non LSPCON based device
+ * ToDo: This needs to be extended for LSPCON implementation
+ * as well. Will be implemented separately.
+ */
+ if (!intel_dig_port->lspcon.active)
+ intel_attach_colorspace_property(connector);
+
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
@@ -2331,14 +2902,14 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
return info->alternate_ddc_pin;
}
- if (IS_CHERRYVIEW(dev_priv))
- ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
- else if (IS_GEN9_LP(dev_priv))
- ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ if (HAS_PCH_ICP(dev_priv))
+ ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_ICP(dev_priv))
- ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
+ else if (IS_GEN9_LP(dev_priv))
+ ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ else if (IS_CHERRYVIEW(dev_priv))
+ ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
@@ -2355,33 +2926,36 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_dig_port->write_infoframe = vlv_write_infoframe;
+ intel_dig_port->read_infoframe = vlv_read_infoframe;
intel_dig_port->set_infoframes = vlv_set_infoframes;
- intel_dig_port->infoframe_enabled = vlv_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
} else if (IS_G4X(dev_priv)) {
intel_dig_port->write_infoframe = g4x_write_infoframe;
+ intel_dig_port->read_infoframe = g4x_read_infoframe;
intel_dig_port->set_infoframes = g4x_set_infoframes;
- intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
} else if (HAS_DDI(dev_priv)) {
if (intel_dig_port->lspcon.active) {
- intel_dig_port->write_infoframe =
- lspcon_write_infoframe;
+ intel_dig_port->write_infoframe = lspcon_write_infoframe;
+ intel_dig_port->read_infoframe = lspcon_read_infoframe;
intel_dig_port->set_infoframes = lspcon_set_infoframes;
- intel_dig_port->infoframe_enabled =
- lspcon_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled;
} else {
- intel_dig_port->set_infoframes = hsw_set_infoframes;
- intel_dig_port->infoframe_enabled =
- hsw_infoframe_enabled;
intel_dig_port->write_infoframe = hsw_write_infoframe;
+ intel_dig_port->read_infoframe = hsw_read_infoframe;
+ intel_dig_port->set_infoframes = hsw_set_infoframes;
+ intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
}
} else if (HAS_PCH_IBX(dev_priv)) {
intel_dig_port->write_infoframe = ibx_write_infoframe;
+ intel_dig_port->read_infoframe = ibx_read_infoframe;
intel_dig_port->set_infoframes = ibx_set_infoframes;
- intel_dig_port->infoframe_enabled = ibx_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = ibx_infoframes_enabled;
} else {
intel_dig_port->write_infoframe = cpt_write_infoframe;
+ intel_dig_port->read_infoframe = cpt_read_infoframe;
intel_dig_port->set_infoframes = cpt_set_infoframes;
- intel_dig_port->infoframe_enabled = cpt_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = cpt_infoframes_enabled;
}
}
@@ -2427,6 +3001,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi_add_properties(intel_hdmi, connector);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_hdmi->attached_connector = intel_connector;
+
if (is_hdcp_supported(dev_priv, port)) {
int ret = intel_hdcp_init(intel_connector,
&intel_hdmi_hdcp_shim);
@@ -2434,9 +3011,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
}
- intel_connector_attach_encoder(intel_connector, intel_encoder);
- intel_hdmi->attached_connector = intel_connector;
-
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
diff --git a/drivers/gpu/drm/i915/intel_hdmi.h b/drivers/gpu/drm/i915/intel_hdmi.h
new file mode 100644
index 000000000000..106c2e0bc3c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdmi.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HDMI_H__
+#define __INTEL_HDMI_H__
+
+#include <linux/hdmi.h>
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_reg.h"
+
+struct drm_connector;
+struct drm_encoder;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_digital_port;
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_hdmi;
+struct drm_connector_state;
+union hdmi_infoframe;
+
+void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
+ enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+int intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
+bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling);
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
+void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+u32 intel_hdmi_infoframe_enable(unsigned int type);
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+void intel_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ enum hdmi_infoframe_type type,
+ union hdmi_infoframe *frame);
+
+#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 9bd1c9002c2a..94c04f16a2ad 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -79,7 +79,7 @@ int intel_huc_auth(struct intel_huc *huc)
}
/* Check authentication status, it should be done by now */
- ret = __intel_wait_for_register(i915,
+ ret = __intel_wait_for_register(&i915->uncore,
HUC_STATUS2,
HUC_FW_VERIFIED,
HUC_FW_VERIFIED,
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 7d7bfc7f7ca7..68d47c105939 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -106,41 +106,46 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long offset = 0;
u32 size;
int ret;
GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
huc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+ intel_uncore_write(uncore, DMA_ADDR_0_LOW,
+ lower_32_bits(offset));
+ intel_uncore_write(uncore, DMA_ADDR_0_HIGH,
+ upper_32_bits(offset) & 0xFFFF);
- /* Hardware doesn't look at destination address for HuC. Set it to 0,
+ /*
+ * Hardware doesn't look at destination address for HuC. Set it to 0,
* but still program the correct address space.
*/
- I915_WRITE(DMA_ADDR_1_LOW, 0);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+ intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0);
+ intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
size = huc_fw->header_size + huc_fw->ucode_size;
- I915_WRITE(DMA_COPY_SIZE, size);
+ intel_uncore_write(uncore, DMA_COPY_SIZE, size);
/* Start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
+ intel_uncore_write(uncore, DMA_CTRL,
+ _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
/* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
/* Disable the bits once DMA is over */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
+ intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 5a733e711355..422685d120e9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -348,7 +348,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
I915_WRITE_FW(GMBUS4, irq_enable);
- ret = intel_wait_for_register_fw(dev_priv,
+ ret = intel_wait_for_register_fw(&dev_priv->uncore,
GMBUS2, GMBUS_ACTIVE, 0,
10);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5e98fd79bd9d..4e0a351bfbca 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -164,20 +164,15 @@
#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce);
+#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
- struct i915_gem_context *ctx,
+ struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring);
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_INDEX_ADDR);
-}
-
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -188,6 +183,34 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority;
}
+static int effective_prio(const struct i915_request *rq)
+{
+ int prio = rq_prio(rq);
+
+ /*
+ * On unwinding the active request, we give it a priority bump
+ * equivalent to a freshly submitted request. This protects it from
+ * being gazumped again, but it would be preferable if we didn't
+ * let it be gazumped in the first place!
+ *
+ * See __unwind_incomplete_requests()
+ */
+ if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
+ /*
+ * After preemption, we insert the active request at the
+ * end of the new priority level. This means that we will be
+ * _lower_ priority than the preemptee all things equal (and
+ * so the preemption is valid), so adjust our comparison
+ * accordingly.
+ */
+ prio |= ACTIVE_PRIORITY;
+ prio--;
+ }
+
+ /* Restrict mere WAIT boosts from triggering preemption */
+ return prio | __NO_PREEMPTION;
+}
+
static int queue_prio(const struct intel_engine_execlists *execlists)
{
struct i915_priolist *p;
@@ -208,9 +231,9 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
static inline bool need_preempt(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
- const int last_prio = rq_prio(rq);
+ int last_prio;
- if (!intel_engine_has_preemption(engine))
+ if (!engine->preempt_context)
return false;
if (i915_request_completed(rq))
@@ -228,6 +251,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* preempt. If that hint is stale or we may be trying to preempt
* ourselves, ignore the request.
*/
+ last_prio = effective_prio(rq);
if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
last_prio))
return false;
@@ -254,12 +278,11 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
}
__maybe_unused static inline bool
-assert_priority_queue(const struct intel_engine_execlists *execlists,
- const struct i915_request *prev,
+assert_priority_queue(const struct i915_request *prev,
const struct i915_request *next)
{
- if (!prev)
- return true;
+ const struct intel_engine_execlists *execlists =
+ &prev->engine->execlists;
/*
* Without preemption, the prev may refer to the still active element
@@ -300,11 +323,10 @@ assert_priority_queue(const struct intel_engine_execlists *execlists,
* engine info, SW context ID and SW counter need to form a unique number
* (Context ID) per lrc.
*/
-static void
-intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce)
+static u64
+lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
+ struct i915_gem_context *ctx = ce->gem_context;
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -322,7 +344,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
* anything below.
*/
- if (INTEL_GEN(ctx->i915) >= 11) {
+ if (INTEL_GEN(engine->i915) >= 11) {
GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
/* bits 37-47 */
@@ -339,7 +361,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
}
- ce->lrc_desc = desc;
+ return desc;
}
static void unwind_wa_tail(struct i915_request *rq)
@@ -353,7 +375,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl);
- int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
+ int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
lockdep_assert_held(&engine->timeline.lock);
@@ -384,9 +406,21 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* The active request is now effectively the start of a new client
* stream, so give it the equivalent small priority bump to prevent
* it being gazumped a second time by another peer.
+ *
+ * Note we have to be careful not to apply a priority boost to a request
+ * still spinning on its semaphores. If the request hasn't started, that
+ * means it is still waiting for its dependencies to be signaled, and
+ * if we apply a priority boost to this request, we will boost it past
+ * its signalers and so break PI.
+ *
+ * One consequence of this preemption boost is that we may jump
+ * over lesser priorities (such as I915_PRIORITY_WAIT), effectively
+ * making those priorities non-preemptible. They will be moved forward
+ * in the priority queue, but they will not gain immediate access to
+ * the GPU.
*/
- if (!(prio & I915_PRIORITY_NEWCLIENT)) {
- prio |= I915_PRIORITY_NEWCLIENT;
+ if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) {
+ prio |= ACTIVE_PRIORITY;
active->sched.attr.priority = prio;
list_move_tail(&active->sched.link,
i915_sched_lookup_priolist(engine, prio));
@@ -395,13 +429,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
return active;
}
-void
+struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- __unwind_incomplete_requests(engine);
+ return __unwind_incomplete_requests(engine);
}
static inline void
@@ -523,13 +557,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
- GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name, n,
port[n].context_id, count,
- rq->global_seqno,
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq),
- intel_engine_get_seqno(engine),
rq_prio(rq));
} else {
GEM_BUG_ON(!n);
@@ -564,6 +596,17 @@ static bool can_merge_ctx(const struct intel_context *prev,
return true;
}
+static bool can_merge_rq(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+ if (!can_merge_ctx(prev->hw_context, next->hw_context))
+ return false;
+
+ return true;
+}
+
static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
GEM_BUG_ON(rq == port_request(port));
@@ -577,8 +620,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
static void inject_preempt_context(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- struct intel_context *ce =
- to_intel_context(engine->i915->preempt_context, engine);
+ struct intel_context *ce = engine->preempt_context;
unsigned int n;
GEM_BUG_ON(execlists->preempt_complete_status !=
@@ -716,8 +758,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
- GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
-
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -729,8 +769,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* second request, and so we never need to tell the
* hardware about the first.
*/
- if (last &&
- !can_merge_ctx(rq->hw_context, last->hw_context)) {
+ if (last && !can_merge_rq(last, rq)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
@@ -740,6 +779,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
goto done;
/*
+ * We must not populate both ELSP[] with the
+ * same LRCA, i.e. we must submit 2 different
+ * contexts if we submit 2 ELSP.
+ */
+ if (last->hw_context == rq->hw_context)
+ goto done;
+
+ /*
* If GVT overrides us we only ever submit
* port[0], leaving port[1] empty. Note that we
* also have to be careful that we don't queue
@@ -750,7 +797,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
ctx_single_port_submission(rq->hw_context))
goto done;
- GEM_BUG_ON(last->hw_context == rq->hw_context);
if (submit)
port_assign(port, last);
@@ -769,8 +815,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
rb_erase_cached(&p->node, &execlists->queue);
- if (p->priority != I915_PRIORITY_NORMAL)
- kmem_cache_free(engine->i915->priorities, p);
+ i915_priolist_free(p);
}
done:
@@ -790,8 +835,7 @@ done:
* request triggering preemption on the next dequeue (or subsequent
* interrupt for secondary ports).
*/
- execlists->queue_priority_hint =
- port != execlists->port ? rq_prio(last) : INT_MIN;
+ execlists->queue_priority_hint = queue_prio(execlists);
if (submit) {
port_assign(port, last);
@@ -821,13 +865,11 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
while (num_ports-- && port_isset(port)) {
struct i915_request *rq = port_request(port);
- GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
+ GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
rq->engine->name,
(unsigned int)(port - execlists->port),
- rq->global_seqno,
rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq),
- intel_engine_get_seqno(rq->engine));
+ hwsp_seqno(rq));
GEM_BUG_ON(!execlists->active);
execlists_context_schedule_out(rq,
@@ -851,104 +893,6 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
clflush((void *)last);
}
-static void reset_csb_pointers(struct intel_engine_execlists *execlists)
-{
- const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
-
- /*
- * After a reset, the HW starts writing into CSB entry [0]. We
- * therefore have to set our HEAD pointer back one entry so that
- * the *first* entry we check is entry 0. To complicate this further,
- * as we don't wait for the first interrupt after reset, we have to
- * fake the HW write to point back to the last entry so that our
- * inline comparison of our cached head position against the last HW
- * write works even before the first interrupt.
- */
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
-
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
-}
-
-static void nop_submission_tasklet(unsigned long data)
-{
- /* The driver is wedged; don't process any more events. */
-}
-
-static void execlists_cancel_requests(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *rq, *rn;
- struct rb_node *rb;
- unsigned long flags;
-
- GEM_TRACE("%s current %d\n",
- engine->name, intel_engine_get_seqno(engine));
-
- /*
- * Before we call engine->cancel_requests(), we should have exclusive
- * access to the submission state. This is arranged for us by the
- * caller disabling the interrupt generation, the tasklet and other
- * threads that may then access the same state, giving us a free hand
- * to reset state. However, we still need to let lockdep be aware that
- * we know this state may be accessed in hardirq context, so we
- * disable the irq around this manipulation and we want to keep
- * the spinlock focused on its duties and not accidentally conflate
- * coverage to the submission's irq state. (Similarly, although we
- * shouldn't need to disable irq around the manipulation of the
- * submission's irq state, we also wish to remind ourselves that
- * it is irq state.)
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
-
- /* Cancel the requests on the HW and clear the ELSP tracker. */
- execlists_cancel_port_requests(execlists);
- execlists_user_end(execlists);
-
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->timeline.requests, link) {
- GEM_BUG_ON(!rq->global_seqno);
-
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
- i915_request_mark_complete(rq);
- }
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- list_del_init(&rq->sched.link);
- __i915_request_submit(rq);
- dma_fence_set_error(&rq->fence, -EIO);
- i915_request_mark_complete(rq);
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- if (p->priority != I915_PRIORITY_NORMAL)
- kmem_cache_free(engine->i915->priorities, p);
- }
-
- intel_write_status_page(engine,
- I915_GEM_HWS_INDEX,
- intel_engine_last_submit(engine));
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
-
- execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(port_isset(execlists->port));
-
- GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.func = nop_submission_tasklet;
-
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-}
-
static inline bool
reset_in_progress(const struct intel_engine_execlists *execlists)
{
@@ -960,6 +904,7 @@ static void process_csb(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
const u32 * const buf = execlists->csb_status;
+ const u8 num_entries = execlists->csb_size;
u8 head, tail;
lockdep_assert_held(&engine->timeline.lock);
@@ -995,7 +940,7 @@ static void process_csb(struct intel_engine_cs *engine)
unsigned int status;
unsigned int count;
- if (++head == GEN8_CSB_ENTRIES)
+ if (++head == num_entries)
head = 0;
/*
@@ -1052,14 +997,12 @@ static void process_csb(struct intel_engine_cs *engine)
EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
+ GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name,
port->context_id, count,
- rq ? rq->global_seqno : 0,
rq ? rq->fence.context : 0,
rq ? rq->fence.seqno : 0,
rq ? hwsp_seqno(rq) : 0,
- intel_engine_get_seqno(engine),
rq ? rq_prio(rq) : 0);
/* Check the context/desc id for this event matches */
@@ -1119,7 +1062,7 @@ static void process_csb(struct intel_engine_cs *engine)
* the wash as hardware, working or not, will need to do the
* invalidation before.
*/
- invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
+ invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
}
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
@@ -1196,19 +1139,50 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static void execlists_context_destroy(struct intel_context *ce)
+static void __execlists_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(ce->pin_count);
-
- if (!ce->state)
- return;
-
- intel_ring_free(ce->ring);
+ intel_ring_put(ce->ring);
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
+static void execlists_context_destroy(struct kref *kref)
+{
+ struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ if (ce->state)
+ __execlists_context_fini(ce);
+
+ intel_context_free(ce);
+}
+
+static int __context_pin(struct i915_vma *vma)
+{
+ unsigned int flags;
+ int err;
+
+ flags = PIN_GLOBAL | PIN_HIGH;
+ flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ return err;
+
+ vma->obj->pin_global++;
+ vma->obj->mm.dirty = true;
+
+ return 0;
+}
+
+static void __context_unpin(struct i915_vma *vma)
+{
+ vma->obj->pin_global--;
+ __i915_vma_unpin(vma);
+}
+
static void execlists_context_unpin(struct intel_context *ce)
{
struct intel_engine_cs *engine;
@@ -1237,41 +1211,19 @@ static void execlists_context_unpin(struct intel_context *ce)
intel_ring_unpin(ce->ring);
- ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj);
- i915_vma_unpin(ce->state);
-
- i915_gem_context_put(ce->gem_context);
-}
-
-static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
-{
- unsigned int flags;
- int err;
-
- /*
- * Clear this page out of any CPU caches for coherent swap-in/out.
- * We only want to do this on the first bind so that we do not stall
- * on an active context (which by nature is already on the GPU).
- */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- err = i915_gem_object_set_to_wc_domain(vma->obj, true);
- if (err)
- return err;
- }
-
- flags = PIN_GLOBAL | PIN_HIGH;
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
- return i915_vma_pin(vma, 0, 0, flags);
+ __context_unpin(ce->state);
}
static void
-__execlists_update_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce)
+__execlists_update_reg_state(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
- u32 *regs = ce->lrc_reg_state;
struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD + 1] = ring->head;
@@ -1279,29 +1231,30 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
/* RPCS */
if (engine->class == RENDER_CLASS)
- regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915,
- &ce->sseu);
+ regs[CTX_R_PWR_CLK_STATE + 1] =
+ gen8_make_rpcs(engine->i915, &ce->sseu);
}
-static struct intel_context *
-__execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- struct intel_context *ce)
+static int
+__execlists_context_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
void *vaddr;
int ret;
- ret = execlists_context_deferred_alloc(ctx, engine, ce);
+ GEM_BUG_ON(!ce->gem_context->ppgtt);
+
+ ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
- ret = __context_pin(ctx, ce->state);
+ ret = __context_pin(ce->state);
if (ret)
goto err;
vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ctx->i915) |
+ i915_coherent_map_type(engine->i915) |
I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
@@ -1312,55 +1265,60 @@ __execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto unpin_map;
- ret = i915_gem_context_pin_hw_id(ctx);
+ ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret)
goto unpin_ring;
- intel_lr_context_descriptor_update(ctx, engine, ce);
-
- GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
-
+ ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ __execlists_update_reg_state(ce, engine);
- __execlists_update_reg_state(engine, ce);
-
- ce->state->obj->pin_global++;
- i915_gem_context_get(ctx);
- return ce;
+ return 0;
unpin_ring:
intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_vma:
- __i915_vma_unpin(ce->state);
+ __context_unpin(ce->state);
err:
- ce->pin_count = 0;
- return ERR_PTR(ret);
+ return ret;
}
-static const struct intel_context_ops execlists_context_ops = {
- .unpin = execlists_context_unpin,
- .destroy = execlists_context_destroy,
-};
-
-static struct intel_context *
-execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static int execlists_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(!ctx->ppgtt);
+ return __execlists_context_pin(ce, ce->engine);
+}
- if (likely(ce->pin_count++))
- return ce;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+static void execlists_context_reset(struct intel_context *ce)
+{
+ /*
+ * Because we emit WA_TAIL_DWORDS there may be a disparity
+ * between our bookkeeping in ce->ring->head and ce->ring->tail and
+ * that stored in context. As we only write new commands from
+ * ce->ring->tail onwards, everything before that is junk. If the GPU
+ * starts reading from its RING_HEAD from the context, it may try to
+ * execute that junk and die.
+ *
+ * The contexts that are stilled pinned on resume belong to the
+ * kernel, and are local to each engine. All other contexts will
+ * have their head/tail sanitized upon pinning before use, so they
+ * will never see garbage,
+ *
+ * So to avoid that we reset the context images upon resume. For
+ * simplicity, we just zero everything out.
+ */
+ intel_ring_reset(ce->ring, 0);
+ __execlists_update_reg_state(ce, ce->engine);
+}
- ce->ops = &execlists_context_ops;
+static const struct intel_context_ops execlists_context_ops = {
+ .pin = execlists_context_pin,
+ .unpin = execlists_context_unpin,
- return __execlists_context_pin(engine, ctx, ce);
-}
+ .reset = execlists_context_reset,
+ .destroy = execlists_context_destroy,
+};
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
@@ -1387,6 +1345,10 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = rq->fence.seqno - 1;
intel_ring_advance(rq, cs);
+
+ /* Record the updated position of the request's payload */
+ rq->infix = intel_ring_offset(rq, cs);
+
return 0;
}
@@ -1424,10 +1386,11 @@ static int emit_pdps(struct i915_request *rq)
*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
*cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
*cs++ = lower_32_bits(pd_daddr);
}
*cs++ = MI_NOOP;
@@ -1447,7 +1410,7 @@ static int execlists_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!request->hw_context->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -1465,7 +1428,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
+ if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@@ -1732,7 +1695,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
- if (GEM_DEBUG_WARN_ON(engine->id != RCS))
+ if (GEM_DEBUG_WARN_ON(engine->id != RCS0))
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
@@ -1796,17 +1759,9 @@ static void enable_execlists(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
- /*
- * Make sure we're not enabling the new 12-deep CSB
- * FIFO as that requires a slightly updated handling
- * in the ctx switch irq. Since we're currently only
- * using only 2 elements of the enhanced execlists the
- * deeper FIFO it's not needed and it's not worth adding
- * more statements to the irq handler to support it.
- */
if (INTEL_GEN(dev_priv) >= 11)
I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_DISABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
else
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
@@ -1872,20 +1827,72 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
GEM_BUG_ON(!reset_in_progress(execlists));
+ intel_engine_stop_cs(engine);
+
/* And flush any current direct submission. */
spin_lock_irqsave(&engine->timeline.lock, flags);
- process_csb(engine); /* drain preemption events */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+static bool lrc_regs_ok(const struct i915_request *rq)
+{
+ const struct intel_ring *ring = rq->ring;
+ const u32 *regs = rq->hw_context->lrc_reg_state;
+
+ /* Quick spot check for the common signs of context corruption */
+
+ if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID))
+ return false;
+
+ if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
+ return false;
+
+ return true;
+}
+
+static void reset_csb_pointers(struct intel_engine_execlists *execlists)
+{
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[reset_value]);
+}
+
+static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_context *ce;
struct i915_request *rq;
- unsigned long flags;
u32 *regs;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ process_csb(engine); /* drain preemption events */
+
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(&engine->execlists);
+
+ /*
+ * Save the currently executing context, even if we completed
+ * its request, it was still running at the time of the
+ * reset and will have been clobbered.
+ */
+ if (!port_isset(execlists->port))
+ goto out_clear;
+
+ ce = port_request(execlists->port)->hw_context;
/*
* Catch up with any missed context-switch interrupts.
@@ -1900,17 +1907,28 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
/* Push back any incomplete requests for replay after the reset. */
rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ goto out_replay;
- /* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(&engine->execlists);
+ if (rq->hw_context != ce) { /* caught just before a CS event */
+ rq = NULL;
+ goto out_replay;
+ }
- GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
- engine->name,
- rq ? rq->global_seqno : 0,
- intel_engine_get_seqno(engine),
- yesno(stalled));
- if (!rq)
- goto out_unlock;
+ /*
+ * If this request hasn't started yet, e.g. it is waiting on a
+ * semaphore, we need to avoid skipping the request or else we
+ * break the signaling chain. However, if the context is corrupt
+ * the request will not restart and we will be stuck with a wedged
+ * device. It is quite often the case that if we issue a reset
+ * while the GPU is loading the context image, that the context
+ * image becomes corrupt.
+ *
+ * Otherwise, if we have not started yet, the request should replay
+ * perfectly and we do not need to flag the result as being erroneous.
+ */
+ if (!i915_request_started(rq) && lrc_regs_ok(rq))
+ goto out_replay;
/*
* If the request was innocent, we leave the request in the ELSP
@@ -1924,8 +1942,8 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
* image back to the expected values to skip over the guilty request.
*/
i915_reset_request(rq, stalled);
- if (!stalled)
- goto out_unlock;
+ if (!stalled && lrc_regs_ok(rq))
+ goto out_replay;
/*
* We want a simple context + ring to execute the breadcrumb update.
@@ -1935,21 +1953,103 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = rq->hw_context->lrc_reg_state;
+ regs = ce->lrc_reg_state;
if (engine->pinned_default_state) {
memcpy(regs, /* skip restoring the vanilla PPHWSP */
engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
engine->context_size - PAGE_SIZE);
}
+ execlists_init_reg_state(regs, ce, engine, ce->ring);
- /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
- rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix);
- intel_ring_update_space(rq->ring);
+ /* Rerun the request; its payload has been neutered (if guilty). */
+out_replay:
+ ce->ring->head =
+ rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
+ intel_ring_update_space(ce->ring);
+ __execlists_update_reg_state(ce, engine);
+
+out_clear:
+ execlists_clear_all_active(execlists);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
- execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring);
- __execlists_update_reg_state(engine, rq->hw_context);
+ __execlists_reset(engine, stalled);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void nop_submission_tasklet(unsigned long data)
+{
+ /* The driver is wedged; don't process any more events. */
+}
+
+static void execlists_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ __execlists_reset(engine, true);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+
+ i915_request_mark_complete(rq);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
+ __i915_request_submit(rq);
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+ GEM_BUG_ON(port_isset(execlists->port));
+
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
-out_unlock:
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
@@ -1961,13 +2061,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
* After a GPU reset, we may have requests to replay. Do so now while
* we still have the forcewake to be sure that the GPU is not allowed
* to sleep before we restart and reload a context.
- *
*/
GEM_BUG_ON(!reset_in_progress(execlists));
if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
execlists->tasklet.func(execlists->tasklet.data);
- tasklet_enable(&execlists->tasklet);
+ if (__tasklet_enable(&execlists->tasklet))
+ /* And kick in case we missed a new request submission. */
+ tasklet_hi_schedule(&execlists->tasklet);
GEM_TRACE("%s: depth->%d\n", engine->name,
atomic_read(&execlists->tasklet.count));
}
@@ -1978,7 +2079,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
{
u32 *cs;
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1989,19 +2090,37 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* particular all the gen that do not need the w/a at all!), if we
* took care to make sure that on every switch into this context
* (both ordinary and for preemption) that arbitrartion was enabled
- * we would be fine. However, there doesn't seem to be a downside to
- * being paranoid and making sure it is set before each batch and
- * every context-switch.
- *
- * Note that if we fail to enable arbitration before the request
- * is complete, then we do not see the context-switch interrupt and
- * the engine hangs (with RING_HEAD == RING_TAIL).
- *
- * That satisfies both the GPGPU w/a and our heavy-handed paranoia.
+ * we would be fine. However, for gen8 there is another w/a that
+ * requires us to not preempt inside GPGPU execution, so we keep
+ * arbitration disabled for gen8 batches. Arbitration will be
+ * re-enabled before we close the request
+ * (engine->emit_fini_breadcrumb).
*/
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* FIXME(BDW+): Address space and security selectors. */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen9_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- /* FIXME(BDW): Address space and security selectors. */
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
*cs++ = lower_32_bits(offset);
@@ -2017,16 +2136,14 @@ static int gen8_emit_bb_start(struct i915_request *rq,
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ ENGINE_POSTING_READ(engine, RING_IMR);
}
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
}
static int gen8_emit_flush(struct i915_request *request, u32 mode)
@@ -2148,16 +2265,16 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
- /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
- BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
-
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
- request->timeline->hwsp_offset);
+ request->timeline->hwsp_offset,
+ 0);
cs = gen8_emit_ggtt_write(cs,
- request->global_seqno,
- intel_hws_seqno_address(request->engine));
+ intel_engine_next_hangcheck_seqno(request->engine),
+ I915_GEM_HWS_HANGCHECK_ADDR,
+ MI_FLUSH_DW_STORE_INDEX);
+
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2180,9 +2297,9 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
PIPE_CONTROL_CS_STALL);
cs = gen8_emit_ggtt_write_rcs(cs,
- request->global_seqno,
- intel_hws_seqno_address(request->engine),
- PIPE_CONTROL_CS_STALL);
+ intel_engine_next_hangcheck_seqno(request->engine),
+ I915_GEM_HWS_HANGCHECK_ADDR,
+ PIPE_CONTROL_STORE_DATA_INDEX);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2231,7 +2348,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
dev_priv = engine->i915;
if (engine->buffer) {
- WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
}
if (engine->cleanup)
@@ -2254,19 +2371,18 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.reset = execlists_reset;
+ engine->reset.finish = execlists_reset_finish;
engine->park = NULL;
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (engine->i915->preempt_context)
+ if (!intel_vgpu_active(engine->i915))
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (engine->preempt_context &&
+ HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
-
- engine->i915->caps.scheduler =
- I915_SCHEDULER_CAP_ENABLED |
- I915_SCHEDULER_CAP_PRIORITY;
- if (intel_engine_has_preemption(engine))
- engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
}
static void
@@ -2279,7 +2395,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->reset.reset = execlists_reset;
engine->reset.finish = execlists_reset_finish;
- engine->context_pin = execlists_context_pin;
+ engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
@@ -2299,7 +2415,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
* until a more refined solution exists.
*/
}
- engine->emit_bb_start = gen8_emit_bb_start;
+ if (IS_GEN(engine->i915, 8))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen9_emit_bb_start;
}
static inline void
@@ -2309,11 +2428,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
if (INTEL_GEN(engine->i915) < 11) {
const u8 irq_shifts[] = {
- [RCS] = GEN8_RCS_IRQ_SHIFT,
- [BCS] = GEN8_BCS_IRQ_SHIFT,
- [VCS] = GEN8_VCS1_IRQ_SHIFT,
- [VCS2] = GEN8_VCS2_IRQ_SHIFT,
- [VECS] = GEN8_VECS_IRQ_SHIFT,
+ [RCS0] = GEN8_RCS_IRQ_SHIFT,
+ [BCS0] = GEN8_BCS_IRQ_SHIFT,
+ [VCS0] = GEN8_VCS0_IRQ_SHIFT,
+ [VCS1] = GEN8_VCS1_IRQ_SHIFT,
+ [VECS0] = GEN8_VECS_IRQ_SHIFT,
};
shift = irq_shifts[engine->id];
@@ -2348,6 +2467,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
+ u32 base = engine->mmio_base;
int ret;
ret = intel_engine_init_common(engine);
@@ -2357,23 +2477,19 @@ static int logical_ring_init(struct intel_engine_cs *engine)
intel_engine_init_workarounds(engine);
if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = i915->regs +
- i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
- execlists->ctrl_reg = i915->regs +
- i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
+ execlists->submit_reg = i915->uncore.regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+ execlists->ctrl_reg = i915->uncore.regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
} else {
- execlists->submit_reg = i915->regs +
- i915_mmio_reg_offset(RING_ELSP(engine));
+ execlists->submit_reg = i915->uncore.regs +
+ i915_mmio_reg_offset(RING_ELSP(base));
}
execlists->preempt_complete_status = ~0u;
- if (i915->preempt_context) {
- struct intel_context *ce =
- to_intel_context(i915->preempt_context, engine);
-
+ if (engine->preempt_context)
execlists->preempt_complete_status =
- upper_32_bits(ce->lrc_desc);
- }
+ upper_32_bits(engine->preempt_context->lrc_desc);
execlists->csb_status =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2381,6 +2497,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
execlists->csb_write =
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
+ if (INTEL_GEN(engine->i915) < 11)
+ execlists->csb_size = GEN8_CSB_ENTRIES;
+ else
+ execlists->csb_size = GEN11_CSB_ENTRIES;
+
reset_csb_pointers(execlists);
return 0;
@@ -2592,13 +2713,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
}
static void execlists_init_reg_state(u32 *regs,
- struct i915_gem_context *ctx,
+ struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
- struct drm_i915_private *dev_priv = engine->i915;
- u32 base = engine->mmio_base;
+ struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
bool rcs = engine->class == RENDER_CLASS;
+ u32 base = engine->mmio_base;
/* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2610,10 +2731,10 @@ static void execlists_init_reg_state(u32 *regs,
regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
MI_LRI_FORCE_POSTED;
- CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
+ CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
- if (INTEL_GEN(dev_priv) < 11) {
+ if (INTEL_GEN(engine->i915) < 11) {
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
@@ -2659,42 +2780,42 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
/* PDP values well be assigned later if needed */
- CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 0);
- CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 0);
- CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 0);
- CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 0);
- CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 0);
- CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 0);
- CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
- CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
-
- if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
+ CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
+ CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
+ CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
+ CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
+ CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
+ CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
+ CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
+ CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
+
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
- ASSIGN_CTX_PML4(ctx->ppgtt, regs);
+ ASSIGN_CTX_PML4(ppgtt, regs);
} else {
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
+ ASSIGN_CTX_PDP(ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
- i915_oa_init_reg_state(engine, ctx, regs);
+ i915_oa_init_reg_state(engine, ce, regs);
}
regs[CTX_END] = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(engine->i915) >= 10)
regs[CTX_END] |= BIT(0);
}
static int
-populate_lr_context(struct i915_gem_context *ctx,
+populate_lr_context(struct intel_context *ce,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
struct intel_ring *ring)
@@ -2703,19 +2824,12 @@ populate_lr_context(struct i915_gem_context *ctx,
u32 *regs;
int ret;
- ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
- if (ret) {
- DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
- return ret;
- }
-
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
- ctx_obj->mm.dirty = true;
if (engine->default_state) {
/*
@@ -2740,23 +2854,35 @@ populate_lr_context(struct i915_gem_context *ctx,
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
- execlists_init_reg_state(regs, ctx, engine, ring);
+ execlists_init_reg_state(regs, ce, engine, ring);
if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
+ if (ce->gem_context == engine->i915->preempt_context &&
+ INTEL_GEN(engine->i915) < 11)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
+ ret = 0;
err_unpin_ctx:
+ __i915_gem_object_flush_map(ctx_obj,
+ LRC_HEADER_PAGES * PAGE_SIZE,
+ engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
return ret;
}
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce)
+static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
+{
+ if (ctx->timeline)
+ return i915_timeline_get(ctx->timeline);
+ else
+ return i915_timeline_create(ctx->i915, NULL);
+}
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
struct i915_vma *vma;
@@ -2776,30 +2902,32 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
*/
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
- ctx_obj = i915_gem_object_create(ctx->i915, context_size);
+ ctx_obj = i915_gem_object_create(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
}
- timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
+ timeline = get_timeline(ce->gem_context);
if (IS_ERR(timeline)) {
ret = PTR_ERR(timeline);
goto error_deref_obj;
}
- ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
+ ring = intel_engine_create_ring(engine,
+ timeline,
+ ce->gem_context->ring_size);
i915_timeline_put(timeline);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
}
- ret = populate_lr_context(ctx, ctx_obj, engine, ring);
+ ret = populate_lr_context(ce, ctx_obj, engine, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error_ring_free;
@@ -2811,45 +2939,12 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
return 0;
error_ring_free:
- intel_ring_free(ring);
+ intel_ring_put(ring);
error_deref_obj:
i915_gem_object_put(ctx_obj);
return ret;
}
-void intel_lr_context_resume(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- enum intel_engine_id id;
-
- /*
- * Because we emit WA_TAIL_DWORDS there may be a disparity
- * between our bookkeeping in ce->ring->head and ce->ring->tail and
- * that stored in context. As we only write new commands from
- * ce->ring->tail onwards, everything before that is junk. If the GPU
- * starts reading from its RING_HEAD from the context, it may try to
- * execute that junk and die.
- *
- * So to avoid that we reset the context images upon resume. For
- * simplicity, we just zero everything out.
- */
- list_for_each_entry(ctx, &i915->contexts.list, link) {
- for_each_engine(engine, i915, id) {
- struct intel_context *ce =
- to_intel_context(ctx, engine);
-
- if (!ce->state)
- continue;
-
- intel_ring_reset(ce->ring, 0);
-
- if (ce->pin_count) /* otherwise done in context_pin */
- __execlists_update_reg_state(engine, ce);
- }
- }
-}
-
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -2910,6 +3005,37 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
+void intel_lr_context_reset(struct intel_engine_cs *engine,
+ struct intel_context *ce,
+ u32 head,
+ bool scrub)
+{
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+ if (scrub) {
+ u32 *regs = ce->lrc_reg_state;
+
+ if (engine->pinned_default_state) {
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+ }
+ execlists_init_reg_state(regs, ce, engine, ce->ring);
+ }
+
+ /* Rerun the request; its payload has been neutered (if guilty). */
+ ce->ring->head = head;
+ intel_ring_update_space(ce->ring);
+
+ __execlists_update_reg_state(ce, engine);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index f1aec8a6986f..84aa230ea27b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -28,20 +28,18 @@
#include "i915_gem_context.h"
/* Execlists regs */
-#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
-#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234)
-#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244)
+#define RING_ELSP(base) _MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
-#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
+#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
-#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370)
-#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
-#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
-#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
-#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510)
-#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550)
+#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
+#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
+
#define EL_CTRL_LOAD (1 << 0)
/* The docs specify that the write pointer wraps around after 5h, "After status
@@ -55,10 +53,11 @@
#define GEN8_CSB_PTR_MASK 0x7
#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
-#define GEN8_CSB_WRITE_PTR(csb_status) \
- (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
-#define GEN8_CSB_READ_PTR(csb_status) \
- (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
+
+#define GEN11_CSB_ENTRIES 12
+#define GEN11_CSB_PTR_MASK 0xf
+#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
+#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
@@ -102,9 +101,13 @@ struct drm_printer;
struct drm_i915_private;
struct i915_gem_context;
-void intel_lr_context_resume(struct drm_i915_private *dev_priv);
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
+void intel_lr_context_reset(struct intel_engine_cs *engine,
+ struct intel_context *ce,
+ u32 head,
+ bool scrub);
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 322bdddda164..7028d0cf3bb1 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -22,10 +22,14 @@
*
*
*/
-#include <drm/drm_edid.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
+#include <drm/drm_edid.h>
+
+#include "intel_dp.h"
#include "intel_drv.h"
+#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
@@ -452,6 +456,14 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
}
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ /* FIXME implement this */
+}
+
void lspcon_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -470,6 +482,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
return;
}
+ /* FIXME precompute infoframes */
+
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
conn_state->connector,
adjusted_mode);
@@ -504,9 +518,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
buf, ret);
}
-bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ /* FIXME actually read this from the hw */
return enc_to_intel_lspcon(&encoder->base)->active;
}
diff --git a/drivers/gpu/drm/i915/intel_lspcon.h b/drivers/gpu/drm/i915/intel_lspcon.h
new file mode 100644
index 000000000000..37cfddf8a9c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lspcon.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LSPCON_H__
+#define __INTEL_LSPCON_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_encoder;
+struct intel_lspcon;
+
+bool lspcon_init(struct intel_digital_port *intel_dig_port);
+void lspcon_resume(struct intel_lspcon *lspcon);
+void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *buf, ssize_t len);
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b4aa49768e90..51d1d59c1619 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -28,17 +28,22 @@
*/
#include <acpi/button.h>
+#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
-#include <linux/acpi.h>
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
/* Private structure for the integrated LVDS support */
struct intel_lvds_pps {
@@ -152,24 +157,17 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
val = I915_READ(PP_ON_DELAYS(0));
- pps->port = (val & PANEL_PORT_SELECT_MASK) >>
- PANEL_PORT_SELECT_SHIFT;
- pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
- pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
+ pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
+ pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
+ pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
val = I915_READ(PP_OFF_DELAYS(0));
- pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
- pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
+ pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
+ pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
val = I915_READ(PP_DIVISOR(0));
- pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >>
- PP_REFERENCE_DIVIDER_SHIFT;
- val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT;
+ pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
+ val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
* Remove the BSpec specified +1 (100ms) offset that accounts for a
* too short power-cycle delay due to the asynchronous programming of
@@ -209,16 +207,19 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
val |= PANEL_POWER_RESET;
I915_WRITE(PP_CONTROL(0), val);
- I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) |
- (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) |
- (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT));
- I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) |
- (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT));
+ I915_WRITE(PP_ON_DELAYS(0),
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+
+ I915_WRITE(PP_OFF_DELAYS(0),
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
- val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT;
- val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) <<
- PANEL_POWER_CYCLE_DELAY_SHIFT;
- I915_WRITE(PP_DIVISOR(0), val);
+ I915_WRITE(PP_DIVISOR(0),
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
@@ -315,7 +316,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PP_STATUS(0), PP_ON, PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
@@ -329,7 +331,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PP_STATUS(0), PP_ON, 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -746,20 +749,21 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
{
- struct intel_encoder *intel_encoder;
+ struct intel_encoder *encoder;
- for_each_intel_encoder(dev, intel_encoder)
- if (intel_encoder->type == INTEL_OUTPUT_LVDS)
- return intel_encoder;
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_LVDS)
+ return encoder;
+ }
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_device *dev)
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
}
@@ -813,7 +817,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
@@ -952,30 +955,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_connector->edid = edid;
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- DRM_DEBUG_KMS("using preferred mode from EDID: ");
- drm_mode_debug_printmodeline(scan);
-
- fixed_mode = drm_mode_duplicate(dev, scan);
- if (fixed_mode)
- goto out;
- }
- }
+ fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
+ if (fixed_mode)
+ goto out;
/* Failed to get EDID, what about VBT? */
- if (dev_priv->vbt.lfp_lvds_vbt_mode) {
- DRM_DEBUG_KMS("using mode from VBT: ");
- drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
-
- fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode) {
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
- goto out;
- }
- }
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
+ if (fixed_mode)
+ goto out;
/*
* If we didn't get EDID, try checking if the panel is already turned
diff --git a/drivers/gpu/drm/i915/intel_lvds.h b/drivers/gpu/drm/i915/intel_lvds.h
new file mode 100644
index 000000000000..bc9c8b84ba2f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lvds.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LVDS_H__
+#define __INTEL_LVDS_H__
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+enum pipe;
+struct drm_i915_private;
+
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe);
+void intel_lvds_init(struct drm_i915_private *dev_priv);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_LVDS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 331e7a678fb7..274ba78500c0 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -252,7 +252,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{
bool result = false;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
table->size = ARRAY_SIZE(icelake_mocs_table);
table->table = icelake_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
@@ -288,17 +288,17 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
{
switch (engine_id) {
- case RCS:
+ case RCS0:
return GEN9_GFX_MOCS(index);
- case VCS:
+ case VCS0:
return GEN9_MFX0_MOCS(index);
- case BCS:
+ case BCS0:
return GEN9_BLT_MOCS(index);
- case VECS:
+ case VECS0:
return GEN9_VEBOX_MOCS(index);
- case VCS2:
+ case VCS1:
return GEN9_MFX1_MOCS(index);
- case VCS3:
+ case VCS2:
return GEN11_MFX2_MOCS(index);
default:
MISSING_CASE(engine_id);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5e00ee9270b5..8fa1159d097f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -32,9 +32,10 @@
#include <drm/i915_drm.h>
-#include "intel_opregion.h"
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_opregion.h"
+#include "intel_panel.h"
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c0df1dbb0069..eb317759b5d3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -236,7 +236,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS0];
return i915_request_alloc(engine, dev_priv->kernel_context);
}
@@ -446,7 +446,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (!overlay->old_vma)
return 0;
- if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct i915_request *rq;
@@ -1430,7 +1430,7 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
return NULL;
error->dovsta = I915_READ(DOVSTA);
- error->isr = I915_READ(ISR);
+ error->isr = I915_READ(GEN2_ISR);
error->base = overlay->flip_addr;
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index beca98d2b035..4ab4ce6569e7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -33,7 +33,10 @@
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/pwm.h>
+
+#include "intel_connector.h"
#include "intel_drv.h"
+#include "intel_panel.h"
#define CRC_PMIC_PWM_PERIOD_NS 21333
@@ -46,27 +49,26 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
-/**
- * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
- * @dev_priv: i915 device instance
- * @fixed_mode : panel native mode
- * @connector: LVDS/eDP connector
- *
- * Return downclock_avail
- * Find the reduced downclock for LVDS/eDP in EDID.
- */
-struct drm_display_mode *
-intel_find_panel_downclock(struct drm_i915_private *dev_priv,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector)
+static bool is_downclock_mode(const struct drm_display_mode *downclock_mode,
+ const struct drm_display_mode *fixed_mode)
{
- struct drm_display_mode *scan, *tmp_mode;
- int temp_downclock;
+ return drm_mode_match(downclock_mode, fixed_mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ downclock_mode->clock < fixed_mode->clock;
+}
- temp_downclock = fixed_mode->clock;
- tmp_mode = NULL;
+struct drm_display_mode *
+intel_panel_edid_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *fixed_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *scan, *best_mode = NULL;
+ struct drm_display_mode *downclock_mode;
+ int best_clock = fixed_mode->clock;
- list_for_each_entry(scan, &connector->probed_modes, head) {
+ list_for_each_entry(scan, &connector->base.probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
* mode while they have the different refresh rate, it means
@@ -74,29 +76,98 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv,
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
- if (scan->hdisplay == fixed_mode->hdisplay &&
- scan->hsync_start == fixed_mode->hsync_start &&
- scan->hsync_end == fixed_mode->hsync_end &&
- scan->htotal == fixed_mode->htotal &&
- scan->vdisplay == fixed_mode->vdisplay &&
- scan->vsync_start == fixed_mode->vsync_start &&
- scan->vsync_end == fixed_mode->vsync_end &&
- scan->vtotal == fixed_mode->vtotal) {
- if (scan->clock < temp_downclock) {
- /*
- * The downclock is already found. But we
- * expect to find the lower downclock.
- */
- temp_downclock = scan->clock;
- tmp_mode = scan;
- }
+ if (is_downclock_mode(scan, fixed_mode) &&
+ scan->clock < best_clock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ best_clock = scan->clock;
+ best_mode = scan;
}
}
- if (temp_downclock < fixed_mode->clock)
- return drm_mode_duplicate(&dev_priv->drm, tmp_mode);
- else
+ if (!best_mode)
+ return NULL;
+
+ downclock_mode = drm_mode_duplicate(&dev_priv->drm, best_mode);
+ if (!downclock_mode)
+ return NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(downclock_mode);
+
+ return downclock_mode;
+}
+
+struct drm_display_mode *
+intel_panel_edid_fixed_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *scan;
+ struct drm_display_mode *fixed_mode;
+
+ if (list_empty(&connector->base.probed_modes))
+ return NULL;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, &connector->base.probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0)
+ continue;
+
+ fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+ if (!fixed_mode)
+ return NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(fixed_mode);
+
+ return fixed_mode;
+ }
+
+ scan = list_first_entry(&connector->base.probed_modes,
+ typeof(*scan), head);
+
+ fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+ if (!fixed_mode)
return NULL;
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(fixed_mode);
+
+ return fixed_mode;
+}
+
+struct drm_display_mode *
+intel_panel_vbt_fixed_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_display_info *info = &connector->base.display_info;
+ struct drm_display_mode *fixed_mode;
+
+ if (!dev_priv->vbt.lfp_lvds_vbt_mode)
+ return NULL;
+
+ fixed_mode = drm_mode_duplicate(&dev_priv->drm,
+ dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (!fixed_mode)
+ return NULL;
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(fixed_mode);
+
+ info->width_mm = fixed_mode->width_mm;
+ info->height_mm = fixed_mode->height_mm;
+
+ return fixed_mode;
}
/* adjusted_mode has been preset to be the panel's fixed mode */
@@ -1894,15 +1965,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
- } else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) {
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
panel->backlight.setup = cnp_setup_backlight;
panel->backlight.enable = cnp_enable_backlight;
panel->backlight.disable = cnp_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
- } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv)) {
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_panel.h b/drivers/gpu/drm/i915/intel_panel.h
new file mode 100644
index 000000000000..cedeea443336
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_panel.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PANEL_H__
+#define __INTEL_PANEL_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_display_mode;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_panel;
+
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *downclock_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ int fitting_mode);
+void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
+ u32 level, u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector,
+ enum pipe pipe);
+void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_update_backlight(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
+struct drm_display_mode *
+intel_panel_edid_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *fixed_mode);
+struct drm_display_mode *
+intel_panel_edid_fixed_mode(struct intel_connector *connector);
+struct drm_display_mode *
+intel_panel_vbt_fixed_mode(struct intel_connector *connector);
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
+void intel_backlight_device_unregister(struct intel_connector *connector);
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static inline int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static inline void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index a8554dc4f196..e7c7be4911c1 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -24,23 +24,29 @@
*
*/
-#include <linux/seq_file.h>
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
#include "intel_drv.h"
+#include "intel_pipe_crc.h"
static const char * const pipe_crc_sources[] = {
- "none",
- "plane1",
- "plane2",
- "pf",
- "pipe",
- "TV",
- "DP-B",
- "DP-C",
- "DP-D",
- "auto",
+ [INTEL_PIPE_CRC_SOURCE_NONE] = "none",
+ [INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1",
+ [INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2",
+ [INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3",
+ [INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4",
+ [INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5",
+ [INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6",
+ [INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7",
+ [INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe",
+ [INTEL_PIPE_CRC_SOURCE_TV] = "TV",
+ [INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B",
+ [INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C",
+ [INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D",
+ [INTEL_PIPE_CRC_SOURCE_AUTO] = "auto",
};
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -192,8 +198,6 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source *source,
u32 *val)
{
- bool need_stable_symbols = false;
-
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
if (ret)
@@ -209,56 +213,23 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
- need_stable_symbols = true;
- break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
default:
+ /*
+ * The DP CRC source doesn't work on g4x.
+ * It can be made to work to some degree by selecting
+ * the correct CRC source before the port is enabled,
+ * and not touching the CRC source bits again until
+ * the port is disabled. But even then the bits
+ * eventually get stuck and a reboot is needed to get
+ * working CRCs on the pipe again. Let's simply
+ * refuse to use DP CRCs on g4x.
+ */
return -EINVAL;
}
- /*
- * When the pipe CRC tap point is after the transcoders we need
- * to tweak symbol-level features to produce a deterministic series of
- * symbols for a given frame. We need to reset those features only once
- * a frame (instead of every nth symbol):
- * - DC-balance: used to ensure a better clock recovery from the data
- * link (SDVO)
- * - DisplayPort scrambling: used for EMI reduction
- */
- if (need_stable_symbols) {
- u32 tmp = I915_READ(PORT_DFT2_G4X);
-
- WARN_ON(!IS_G4X(dev_priv));
-
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
-
- if (pipe == PIPE_A)
- tmp |= PIPE_A_SCRAMBLE_RESET;
- else
- tmp |= PIPE_B_SCRAMBLE_RESET;
-
- I915_WRITE(PORT_DFT2_G4X, tmp);
- }
-
return 0;
}
@@ -283,24 +254,6 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
I915_WRITE(PORT_DFT2_G4X, tmp);
-
-}
-
-static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 tmp = I915_READ(PORT_DFT2_G4X);
-
- if (pipe == PIPE_A)
- tmp &= ~PIPE_A_SCRAMBLE_RESET;
- else
- tmp &= ~PIPE_B_SCRAMBLE_RESET;
- I915_WRITE(PORT_DFT2_G4X, tmp);
-
- if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
- }
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -329,19 +282,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
- bool enable)
+static void
+intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
- int ret = 0;
+ int ret;
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(dev);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
ret = -ENOMEM;
goto unlock;
@@ -356,20 +308,20 @@ retry:
goto put_state;
}
- if (HAS_IPS(dev_priv)) {
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- pipe_config->ips_force_disable = enable;
- }
+ pipe_config->base.mode_changed = pipe_config->has_psr;
+ pipe_config->crc_enabled = enable;
+
+ if (IS_HASWELL(dev_priv) &&
+ pipe_config->base.active && crtc->pipe == PIPE_A &&
+ pipe_config->cpu_transcoder == TRANSCODER_EDP) {
+ bool old_need_power_well = pipe_config->pch_pfit.enabled ||
+ pipe_config->pch_pfit.force_thru;
+ bool new_need_power_well = pipe_config->pch_pfit.enabled ||
+ enable;
- if (IS_HASWELL(dev_priv)) {
pipe_config->pch_pfit.force_thru = enable;
- if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
- pipe_config->pch_pfit.enabled != enable)
+
+ if (old_need_power_well != new_need_power_well)
pipe_config->base.connectors_changed = true;
}
@@ -392,11 +344,10 @@ unlock:
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- u32 *val,
- bool set_wa)
+ u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- *source = INTEL_PIPE_CRC_SOURCE_PF;
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PLANE1:
@@ -405,11 +356,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
case INTEL_PIPE_CRC_SOURCE_PLANE2:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
- case INTEL_PIPE_CRC_SOURCE_PF:
- if (set_wa && (IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
- hsw_pipe_A_crc_wa(dev_priv, true);
-
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -422,10 +369,52 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
+static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ u32 *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE3:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE4:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE5:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE6:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE7:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
- enum intel_pipe_crc_source *source, u32 *val,
- bool set_wa)
+ enum intel_pipe_crc_source *source, u32 *val)
{
if (IS_GEN(dev_priv, 2))
return i8xx_pipe_crc_ctl_reg(source, val);
@@ -435,8 +424,10 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_pipe_crc_ctl_reg(source, val);
+ else if (INTEL_GEN(dev_priv) < 9)
+ return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else
- return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
+ return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
}
static int
@@ -486,9 +477,6 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
switch (source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
case INTEL_PIPE_CRC_SOURCE_TV:
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- case INTEL_PIPE_CRC_SOURCE_DP_D:
case INTEL_PIPE_CRC_SOURCE_NONE:
return 0;
default:
@@ -532,7 +520,25 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
case INTEL_PIPE_CRC_SOURCE_PIPE:
case INTEL_PIPE_CRC_SOURCE_PLANE1:
case INTEL_PIPE_CRC_SOURCE_PLANE2:
- case INTEL_PIPE_CRC_SOURCE_PF:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_PLANE3:
+ case INTEL_PIPE_CRC_SOURCE_PLANE4:
+ case INTEL_PIPE_CRC_SOURCE_PLANE5:
+ case INTEL_PIPE_CRC_SOURCE_PLANE6:
+ case INTEL_PIPE_CRC_SOURCE_PLANE7:
case INTEL_PIPE_CRC_SOURCE_NONE:
return 0;
default:
@@ -552,8 +558,10 @@ intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
return vlv_crc_source_valid(dev_priv, source);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_crc_source_valid(dev_priv, source);
- else
+ else if (INTEL_GEN(dev_priv) < 9)
return ivb_crc_source_valid(dev_priv, source);
+ else
+ return skl_crc_source_valid(dev_priv, source);
}
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -592,6 +600,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
intel_wakeref_t wakeref;
u32 val = 0; /* shut up gcc */
int ret = 0;
+ bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
@@ -605,7 +614,11 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
return -EIO;
}
- ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true);
+ enable = source != INTEL_PIPE_CRC_SOURCE_NONE;
+ if (enable)
+ intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true);
+
+ ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
if (ret != 0)
goto out;
@@ -614,18 +627,16 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
POSTING_READ(PIPE_CRC_CTL(crtc->index));
if (!source) {
- if (IS_G4X(dev_priv))
- g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
- else if ((IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
- hsw_pipe_A_crc_wa(dev_priv, false);
}
pipe_crc->skipped = 0;
out:
+ if (!enable)
+ intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false);
+
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
@@ -641,7 +652,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
if (!crtc->crc.opened)
return;
- if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0)
+ if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.h b/drivers/gpu/drm/i915/intel_pipe_crc.h
new file mode 100644
index 000000000000..81eaf1854788
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PIPE_CRC_H__
+#define __INTEL_PIPE_CRC_H__
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct intel_crtc;
+
+#ifdef CONFIG_DEBUG_FS
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *source_name, size_t *values_cnt);
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
+void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
+#else
+#define intel_crtc_set_crc_source NULL
+#define intel_crtc_verify_crc_source NULL
+#define intel_crtc_get_crc_sources NULL
+static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+
+static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+#endif
+
+#endif /* __INTEL_PIPE_CRC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 54307f1df6cf..44be676fabd6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -35,6 +35,9 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
#include "../../../platform/x86/intel_ips.h"
/**
@@ -338,12 +341,12 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (enable)
val |= DSP_MAXFIFO_PM5_ENABLE;
else
val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
mutex_unlock(&dev_priv->pcu_lock);
}
@@ -850,7 +853,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
u32 reg;
unsigned int wm;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq);
@@ -3624,7 +3627,12 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 11)
return enabled_slices;
- if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
+ /*
+ * FIXME: for now we'll only ever use 1 slice; pretend that we have
+ * only that 1 slice enabled until we have a proper way for on-demand
+ * toggling of the second slice.
+ */
+ if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
enabled_slices++;
return enabled_slices;
@@ -3919,12 +3927,43 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
}
-static unsigned int skl_cursor_allocation(int num_active)
+static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane);
+static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+ int level,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */);
+
+static unsigned int
+skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
+ int num_active)
{
- if (num_active == 1)
- return 32;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
+ struct skl_wm_level wm = {};
+ int ret, min_ddb_alloc = 0;
+ struct skl_wm_params wp;
+
+ ret = skl_compute_wm_params(crtc_state, 256,
+ drm_format_info(DRM_FORMAT_ARGB8888),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_MODE_ROTATE_0,
+ crtc_state->pixel_rate, &wp, 0);
+ WARN_ON(ret);
- return 8;
+ for (level = 0; level <= max_level; level++) {
+ skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
+ if (wm.min_ddb_alloc == U16_MAX)
+ break;
+
+ min_ddb_alloc = wm.min_ddb_alloc;
+ }
+
+ return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
}
static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
@@ -3970,7 +4009,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (fourcc == DRM_FORMAT_NV12)
+ if (is_planar_yuv_format(fourcc))
swap(val, val2);
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
@@ -4180,7 +4219,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
if (intel_plane->id == PLANE_CURSOR)
return 0;
- if (plane == 1 && format != DRM_FORMAT_NV12)
+ if (plane == 1 && !is_planar_yuv_format(format))
return 0;
/*
@@ -4192,7 +4231,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
height = drm_rect_height(&intel_pstate->base.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
- if (plane == 1 && format == DRM_FORMAT_NV12) {
+ if (plane == 1 && is_planar_yuv_format(format)) {
width /= 2;
height /= 2;
}
@@ -4308,7 +4347,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
- struct skl_plane_wm *wm;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
@@ -4349,7 +4387,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
/* Allocate fixed number of blocks for cursor. */
- total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+ total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
alloc_size -= total[PLANE_CURSOR];
cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
alloc->end - total[PLANE_CURSOR];
@@ -4365,15 +4403,23 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
+ const struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR) {
+ if (WARN_ON(wm->wm[level].min_ddb_alloc >
+ total[PLANE_CURSOR])) {
+ blocks = U32_MAX;
+ break;
+ }
continue;
+ }
- wm = &cstate->wm.skl.optimal.planes[plane_id];
blocks += wm->wm[level].min_ddb_alloc;
blocks += wm->uv_wm[level].min_ddb_alloc;
}
- if (blocks < alloc_size) {
+ if (blocks <= alloc_size) {
alloc_size -= blocks;
break;
}
@@ -4392,6 +4438,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* proportional to its relative data rate.
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
u64 rate;
u16 extra;
@@ -4405,8 +4453,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
if (total_data_rate == 0)
break;
- wm = &cstate->wm.skl.optimal.planes[plane_id];
-
rate = plane_data_rate[plane_id];
extra = min_t(u16, alloc_size,
DIV64_U64_ROUND_UP(alloc_size * rate,
@@ -4431,14 +4477,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/* Set the actual DDB start/end points for each plane */
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
+ struct skl_ddb_entry *plane_alloc =
+ &cstate->wm.skl.plane_ddb_y[plane_id];
+ struct skl_ddb_entry *uv_plane_alloc =
+ &cstate->wm.skl.plane_ddb_uv[plane_id];
if (plane_id == PLANE_CURSOR)
continue;
- plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
- uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
-
/* Gen11+ uses a separate plane for UV watermarks */
WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
@@ -4464,8 +4510,35 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- wm = &cstate->wm.skl.optimal.planes[plane_id];
- memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+ struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * We only disable the watermarks for each plane if
+ * they exceed the ddb allocation of said plane. This
+ * is done so that we don't end up touching cursor
+ * watermarks needlessly when some other plane reduces
+ * our max possible watermark level.
+ *
+ * Bspec has this to say about the PLANE_WM enable bit:
+ * "All the watermarks at this level for all enabled
+ * planes must be enabled before the level will be used."
+ * So this is actually safe to do.
+ */
+ if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
+ wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
+ memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+
+ /*
+ * Wa_1408961008:icl, ehl
+ * Underruns with WM1+ disabled
+ */
+ if (IS_GEN(dev_priv, 11) &&
+ level == 1 && wm->wm[0].plane_en) {
+ wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
+ wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
+ wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ }
}
}
@@ -4474,7 +4547,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* don't have enough DDB blocks for it.
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- wm = &cstate->wm.skl.optimal.planes[plane_id];
+ struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
+
if (wm->trans_wm.plane_res_b >= total[plane_id])
memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
}
@@ -4568,57 +4643,45 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
}
static int
-skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
- struct skl_wm_params *wp, int color_plane)
+skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane)
{
- struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_plane_state *pstate = &intel_pstate->base;
- const struct drm_framebuffer *fb = pstate->fb;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 interm_pbpl;
- /* only NV12 format has two planes */
- if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
- DRM_DEBUG_KMS("Non NV12 format have single plane\n");
+ /* only planar format has two planes */
+ if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
+ DRM_DEBUG_KMS("Non planar format have single plane\n");
return -EINVAL;
}
- wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
- wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = fb->format->format == DRM_FORMAT_NV12;
-
- if (plane->id == PLANE_CURSOR) {
- wp->width = intel_pstate->base.crtc_w;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
- }
+ wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+ modifier == I915_FORMAT_MOD_Yf_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
+ wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->is_planar = is_planar_yuv_format(format->format);
+ wp->width = width;
if (color_plane == 1 && wp->is_planar)
wp->width /= 2;
- wp->cpp = fb->format->cpp[color_plane];
- wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
- intel_pstate);
+ wp->cpp = format->cpp[color_plane];
+ wp->plane_pixel_rate = plane_pixel_rate;
if (INTEL_GEN(dev_priv) >= 11 &&
- fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
+ modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
wp->dbuf_block_size = 512;
- if (drm_rotation_90_or_270(pstate->rotation)) {
-
+ if (drm_rotation_90_or_270(rotation)) {
switch (wp->cpp) {
case 1:
wp->y_min_scanlines = 16;
@@ -4663,12 +4726,40 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
wp->plane_blocks_per_line);
+
wp->linetime_us = fixed16_to_u32_round_up(
- intel_get_linetime_us(cstate));
+ intel_get_linetime_us(crtc_state));
return 0;
}
+static int
+skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct skl_wm_params *wp, int color_plane)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int width;
+
+ if (plane->id == PLANE_CURSOR) {
+ width = plane_state->base.crtc_w;
+ } else {
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->base.src) >> 16;
+ }
+
+ return skl_compute_wm_params(crtc_state, width,
+ fb->format, fb->modifier,
+ plane_state->base.rotation,
+ skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
+ wp, color_plane);
+}
+
static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
{
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -4679,14 +4770,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
}
static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *dev_priv =
- to_i915(intel_pstate->base.plane->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
@@ -4805,19 +4894,17 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
static void
skl_compute_wm_levels(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *dev_priv =
- to_i915(intel_pstate->base.plane->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
+ skl_compute_plane_wm(cstate, level, wm_params,
result_prev, result);
result_prev = result;
@@ -4914,7 +5001,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
+ skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
skl_compute_transition_wm(crtc_state, &wm_params, wm);
return 0;
@@ -4936,13 +5023,12 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
+ skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
return 0;
}
-static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
- struct intel_crtc_state *crtc_state,
+static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
@@ -4968,8 +5054,7 @@ static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
return 0;
}
-static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
- struct intel_crtc_state *crtc_state,
+static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
@@ -5006,10 +5091,10 @@ static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
return 0;
}
-static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_pipe_wm *pipe_wm)
+static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
@@ -5026,11 +5111,9 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
to_intel_plane_state(pstate);
if (INTEL_GEN(dev_priv) >= 11)
- ret = icl_build_plane_wm(pipe_wm,
- cstate, intel_pstate);
+ ret = icl_build_plane_wm(cstate, intel_pstate);
else
- ret = skl_build_plane_wm(pipe_wm,
- cstate, intel_pstate);
+ ret = skl_build_plane_wm(cstate, intel_pstate);
if (ret)
return ret;
}
@@ -5056,11 +5139,12 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
{
u32 val = 0;
- if (level->plane_en) {
+ if (level->plane_en)
val |= PLANE_WM_EN;
- val |= level->plane_res_b;
- val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
- }
+ if (level->ignore_lines)
+ val |= PLANE_WM_IGNORE_LINES;
+ val |= level->plane_res_b;
+ val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
I915_WRITE_FW(reg, val);
}
@@ -5126,6 +5210,7 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
return l1->plane_en == l2->plane_en &&
+ l1->ignore_lines == l2->ignore_lines &&
l1->plane_res_l == l2->plane_res_l &&
l1->plane_res_b == l2->plane_res_b;
}
@@ -5169,7 +5254,7 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
}
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry entries[],
+ const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx)
{
int i;
@@ -5183,23 +5268,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
- const struct skl_pipe_wm *old_pipe_wm,
- struct skl_pipe_wm *pipe_wm, /* out */
- bool *changed /* out */)
-{
- struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
- int ret;
-
- ret = skl_build_pipe_wm(cstate, pipe_wm);
- if (ret)
- return ret;
-
- *changed = !skl_pipe_wm_equals(crtc, old_pipe_wm, pipe_wm);
-
- return 0;
-}
-
static u32
pipes_modified(struct intel_atomic_state *state)
{
@@ -5269,6 +5337,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
return 0;
}
+static char enast(bool enable)
+{
+ return enable ? '*' : ' ';
+}
+
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
@@ -5279,8 +5352,16 @@ skl_print_wm_changes(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int i;
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
+ const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
+
+ old_pipe_wm = &old_crtc_state->wm.skl.optimal;
+ new_pipe_wm = &new_crtc_state->wm.skl.optimal;
+
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
@@ -5291,10 +5372,86 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ }
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_plane_wm *old_wm, *new_wm;
+
+ old_wm = &old_pipe_wm->planes[plane_id];
+ new_wm = &new_pipe_wm->planes[plane_id];
+
+ if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
+ continue;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
+ enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
+ enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
+ enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
+ enast(old_wm->trans_wm.plane_en),
+ enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
+ enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
+ enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
+ enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
+ enast(new_wm->trans_wm.plane_en));
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
plane->base.base.id, plane->base.name,
- old->start, old->end,
- new->start, new->end);
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
+ old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
+ old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
+ old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
+ old_wm->trans_wm.plane_res_b,
+ new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
+ new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
+ new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
+ new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
+ new_wm->trans_wm.plane_res_b);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc);
}
}
}
@@ -5449,10 +5606,9 @@ static int
skl_compute_wm(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
struct skl_ddb_values *results = &state->wm_results;
- struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
@@ -5470,12 +5626,8 @@ skl_compute_wm(struct intel_atomic_state *state)
* pipe allocations had to change.
*/
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- cstate, i) {
- const struct skl_pipe_wm *old_pipe_wm =
- &old_crtc_state->wm.skl.optimal;
-
- pipe_wm = &cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+ new_crtc_state, i) {
+ ret = skl_build_pipe_wm(new_crtc_state);
if (ret)
return ret;
@@ -5483,7 +5635,9 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- if (changed)
+ if (!skl_pipe_wm_equals(crtc,
+ &old_crtc_state->wm.skl.optimal,
+ &new_crtc_state->wm.skl.optimal))
results->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
@@ -5609,6 +5763,7 @@ static inline void skl_wm_level_from_reg_val(u32 val,
struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
+ level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
@@ -5986,7 +6141,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
@@ -6451,7 +6606,7 @@ static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
ei_down * threshold_down / 100));
I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
+ (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
@@ -6629,9 +6784,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* punit into committing the voltage change) as that takes a lot less
* power than the render powerwell.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
err = valleyview_set_rps(dev_priv, val);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
if (err)
DRM_ERROR("Failed to set RPS for idle\n");
@@ -6691,8 +6846,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->pcu_lock);
}
-void gen6_rps_boost(struct i915_request *rq,
- struct intel_rps_client *rps_client)
+void gen6_rps_boost(struct i915_request *rq)
{
struct intel_rps *rps = &rq->i915->gt_pm.rps;
unsigned long flags;
@@ -6721,7 +6875,7 @@ void gen6_rps_boost(struct i915_request *rq,
if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
- atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
+ atomic_inc(&rps->boosts);
}
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
@@ -6782,11 +6936,11 @@ static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
{
/* We're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RC_CONTROL, 0);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
@@ -6945,7 +7099,7 @@ static void reset_rps(struct drm_i915_private *dev_priv,
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Program defaults and thresholds for RPS */
if (IS_GEN(dev_priv, 9))
@@ -6963,7 +7117,79 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
reset_rps(dev_priv, gen6_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+}
+
+static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 1a: Software RC state - RC0 */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /*
+ * 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.
+ */
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ /* 2a: Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ /* 2b: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, dev_priv, id)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
+
+ if (HAS_GUC(dev_priv))
+ I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from intel_pstate is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1));
+
+ /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
+ I915_WRITE(GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE);
+
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
@@ -6977,7 +7203,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7054,7 +7280,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_PG_ENABLE,
GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7067,7 +7293,7 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7088,14 +7314,14 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_RC6_ENABLE);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 1 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
@@ -7128,7 +7354,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, gen6_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7148,7 +7374,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7196,7 +7422,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -7207,7 +7433,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
* Perhaps there might be some value in exposing these to
* userspace...
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Power down if completely idle for over 50ms */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
@@ -7215,7 +7441,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, gen6_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
@@ -7638,7 +7864,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
/* 1a & 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7670,14 +7896,14 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
u32 val;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 1: Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
@@ -7712,7 +7938,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, valleyview_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7730,7 +7956,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7755,14 +7981,14 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_CONTROL,
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
u32 val;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
@@ -7796,7 +8022,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, valleyview_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -8037,14 +8263,14 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
return val;
}
-static struct drm_i915_private *i915_mch_dev;
+static struct drm_i915_private __rcu *i915_mch_dev;
static struct drm_i915_private *mchdev_get(void)
{
struct drm_i915_private *i915;
rcu_read_lock();
- i915 = i915_mch_dev;
+ i915 = rcu_dereference(i915_mch_dev);
if (!kref_get_unless_zero(&i915->drm.ref))
i915 = NULL;
rcu_read_unlock();
@@ -8343,22 +8569,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
pm_runtime_put(&dev_priv->drm.pdev->dev);
}
-/**
- * intel_suspend_gt_powersave - suspend PM work and helper threads
- * @dev_priv: i915 device
- *
- * We don't want to disable RC6 or other features here, we just want
- * to make sure any work we've queued has finished and won't bother
- * us while we're suspended.
- */
-void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 6)
- return;
-
- /* gen6_rps_idle() will be called later to disable interrupts */
-}
-
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
{
dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
@@ -8458,6 +8668,8 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv)
cherryview_enable_rc6(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_enable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 11)
+ gen11_enable_rc6(dev_priv);
else if (INTEL_GEN(dev_priv) >= 9)
gen9_enable_rc6(dev_priv);
else if (IS_BROADWELL(dev_priv))
@@ -9361,7 +9573,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_ICELAKE(dev_priv))
+ if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
@@ -9454,7 +9666,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.initial_watermarks = g4x_initial_watermarks;
dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
} else if (IS_PINEVIEW(dev_priv)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
@@ -9552,7 +9764,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(&dev_priv->uncore,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
500, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
@@ -9600,7 +9812,7 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(&dev_priv->uncore,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
fast_timeout_us, slow_timeout_ms,
NULL)) {
@@ -9824,6 +10036,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u64 time_hw, prev_hw, overflow_hw;
unsigned int fw_domains;
unsigned long flags;
@@ -9845,10 +10058,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
return 0;
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -9867,7 +10080,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
}
overflow_hw = BIT_ULL(32);
- time_hw = I915_READ_FW(reg);
+ time_hw = intel_uncore_read_fw(uncore, reg);
}
/*
@@ -9889,8 +10102,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
- intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, flags);
return mul_u64_u32_div(time_hw, mul, div);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
new file mode 100644
index 000000000000..674a3f0f16a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PM_H__
+#define __INTEL_PM_H__
+
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_device;
+struct drm_i915_private;
+struct i915_request;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+struct skl_ddb_allocation;
+struct skl_ddb_entry;
+struct skl_pipe_wm;
+struct skl_wm_level;
+
+void intel_init_clock_gating(struct drm_i915_private *dev_priv);
+void intel_suspend_hw(struct drm_i915_private *dev_priv);
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
+void intel_update_watermarks(struct intel_crtc *crtc);
+void intel_init_pm(struct drm_i915_private *dev_priv);
+void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
+void intel_pm_setup(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+void gen6_rps_busy(struct drm_i915_private *dev_priv);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct i915_request *rq);
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */);
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
+ struct skl_pipe_wm *out);
+void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
+void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
+bool intel_can_enable_sagv(struct drm_atomic_state *state);
+int intel_enable_sagv(struct drm_i915_private *dev_priv);
+int intel_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+bool ilk_disable_lp_wm(struct drm_device *dev);
+int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *cstate);
+void intel_init_ipc(struct drm_i915_private *dev_priv);
+void intel_enable_ipc(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 84a0fb981561..963663ba0edf 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -21,6 +21,14 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_atomic_helper.h>
+
+#include "i915_drv.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
/**
* DOC: Panel Self Refresh (PSR/SRD)
*
@@ -51,10 +59,6 @@
* must be correctly synchronized/cancelled when shutting down the pipe."
*/
-
-#include "intel_drv.h"
-#include "i915_drv.h"
-
static bool psr_global_enabled(u32 debug)
{
switch (debug & I915_PSR_DEBUG_MODE_MASK) {
@@ -78,9 +82,6 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
- case I915_PSR_DEBUG_DEFAULT:
- if (i915_modparams.enable_psr <= 0)
- return false;
default:
return crtc_state->has_psr2;
}
@@ -435,32 +436,16 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
-static void hsw_activate_psr1(struct intel_dp *intel_dp)
+static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 max_sleep_time = 0x1f;
- u32 val = EDP_PSR_ENABLE;
+ u32 val = 0;
- /* Let's use 6 as the minimum to cover all known cases including the
- * off-by-one issue that HW has in some cases.
- */
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- /* sink_sync_latency of 8 means source has to wait for more than 8
- * frames, we'll go with 9 frames for now
- */
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
-
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
- if (IS_HASWELL(dev_priv))
- val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
-
- if (dev_priv->psr.link_standby)
- val |= EDP_PSR_LINK_STANDBY;
+ if (INTEL_GEN(dev_priv) >= 11)
+ val |= EDP_PSR_TP4_TIME_0US;
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
- val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
@@ -469,7 +454,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
val |= EDP_PSR_TP1_TIME_2500us;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
- val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
@@ -483,6 +468,35 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP1_TP2_SEL;
+ return val;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
+ */
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+ /* sink_sync_latency of 8 means source has to wait for more than 8
+ * frames, we'll go with 9 frames for now
+ */
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ if (IS_HASWELL(dev_priv))
+ val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+ if (dev_priv->psr.link_standby)
+ val |= EDP_PSR_LINK_STANDBY;
+
+ val |= intel_psr1_get_tp_time(intel_dp);
+
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
@@ -509,16 +523,22 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
- dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+ if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
val |= EDP_PSR2_TP2_TIME_50us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR2_TP2_TIME_100us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR2_TP2_TIME_500us;
else
val |= EDP_PSR2_TP2_TIME_2500us;
+ /*
+ * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
+ * recommending keep this bit unset while PSR2 is enabled.
+ */
+ I915_WRITE(EDP_PSR_CTL, 0);
+
I915_WRITE(EDP_PSR2_CTL, val);
}
@@ -530,11 +550,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0;
- /*
- * FIXME psr2_support is messed up. It's both computed
- * dynamically during PSR enable, and extracted from sink
- * caps during eDP detection.
- */
if (!dev_priv->psr.sink_psr2_support)
return false;
@@ -575,6 +590,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->crc_enabled) {
+ DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
return true;
}
@@ -610,9 +630,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (IS_HASWELL(dev_priv) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
return;
}
@@ -718,8 +737,11 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
- if (dev_priv->psr.enabled)
- return;
+ WARN_ON(dev_priv->psr.enabled);
+
+ dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@@ -752,20 +774,13 @@ void intel_psr_enable(struct intel_dp *intel_dp,
WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
- if (dev_priv->psr.prepared) {
- DRM_DEBUG_KMS("PSR already in use\n");
+
+ if (!psr_global_enabled(dev_priv->psr.debug)) {
+ DRM_DEBUG_KMS("PSR disabled by flag\n");
goto unlock;
}
- dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
- dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.prepared = true;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
-
- if (psr_global_enabled(dev_priv->psr.debug))
- intel_psr_enable_locked(dev_priv, crtc_state);
- else
- DRM_DEBUG_KMS("PSR disabled by flag\n");
+ intel_psr_enable_locked(dev_priv, crtc_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -819,8 +834,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
- if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
- 2000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ psr_status, psr_status_mask, 0, 2000))
DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
@@ -848,18 +863,69 @@ void intel_psr_disable(struct intel_dp *intel_dp,
return;
mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.prepared) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
intel_psr_disable_locked(intel_dp);
- dev_priv->psr.prepared = false;
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
}
+static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+{
+ /*
+ * Display WA #0884: all
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+}
+
+/**
+ * intel_psr_update - Update PSR state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This functions will update PSR states, disabling, enabling or switching PSR
+ * version when executing fastsets. For full modeset, intel_psr_disable() and
+ * intel_psr_enable() should be called instead.
+ */
+void intel_psr_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_psr *psr = &dev_priv->psr;
+ bool enable, psr2_enable;
+
+ if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+ return;
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+ psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
+
+ if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
+ /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
+ if (crtc_state->crc_enabled && psr->enabled)
+ psr_force_hw_tracking_exit(dev_priv);
+
+ goto unlock;
+ }
+
+ if (psr->enabled)
+ intel_psr_disable_locked(intel_dp);
+
+ if (enable)
+ intel_psr_enable_locked(dev_priv, crtc_state);
+
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
/**
* intel_psr_wait_for_idle - wait for PSR1 to idle
* @new_crtc_state: new CRTC state
@@ -890,7 +956,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
- return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
+ return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@@ -915,7 +981,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->psr.lock);
- err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+ err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
if (err)
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
@@ -924,36 +990,63 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return err == 0 && dev_priv->psr.enabled;
}
-static bool switching_psr(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state,
- u32 mode)
+static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
{
- /* Can't switch psr state anyway if PSR2 is not supported. */
- if (!crtc_state || !crtc_state->has_psr2)
- return false;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ int err;
- if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1)
- return true;
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
- if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1)
- return true;
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+ state->acquire_ctx = &ctx;
+
+retry:
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_crtc_state *crtc_state;
+ struct intel_crtc_state *intel_crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ err = PTR_ERR(crtc_state);
+ goto error;
+ }
+
+ intel_crtc_state = to_intel_crtc_state(crtc_state);
- return false;
+ if (crtc_state->active && intel_crtc_state->has_psr) {
+ /* Mark mode as changed to trigger a pipe->update() */
+ crtc_state->mode_changed = true;
+ break;
+ }
+ }
+
+ err = drm_atomic_commit(state);
+
+error:
+ if (err == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ err = drm_modeset_backoff(&ctx);
+ if (!err)
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_atomic_state_put(state);
+
+ return err;
}
-int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
- struct drm_modeset_acquire_ctx *ctx,
- u64 val)
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
{
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector_state *conn_state;
- struct intel_crtc_state *crtc_state = NULL;
- struct drm_crtc_commit *commit;
- struct drm_crtc *crtc;
- struct intel_dp *dp;
+ const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
+ u32 old_mode;
int ret;
- bool enable;
- u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_FORCE_PSR1) {
@@ -961,49 +1054,19 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return -EINVAL;
}
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
- if (ret)
- return ret;
-
- /* dev_priv->psr.dp should be set once and then never touched again. */
- dp = READ_ONCE(dev_priv->psr.dp);
- conn_state = dp->attached_connector->base.state;
- crtc = conn_state->crtc;
- if (crtc) {
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(crtc->state);
- commit = crtc_state->base.commit;
- } else {
- commit = conn_state->commit;
- }
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (ret)
- return ret;
- }
-
ret = mutex_lock_interruptible(&dev_priv->psr.lock);
if (ret)
return ret;
- enable = psr_global_enabled(val);
-
- if (!enable || switching_psr(dev_priv, crtc_state, mode))
- intel_psr_disable_locked(dev_priv->psr.dp);
-
+ old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
- if (crtc)
- dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
-
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
- if (dev_priv->psr.prepared && enable)
- intel_psr_enable_locked(dev_priv, crtc_state);
-
mutex_unlock(&dev_priv->psr.lock);
+
+ if (old_mode != mode)
+ ret = intel_psr_fastset_force(dev_priv);
+
return ret;
}
@@ -1121,18 +1184,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
- if (frontbuffer_bits) {
- /*
- * Display WA #0884: all
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
- }
+ if (frontbuffer_bits)
+ psr_force_hw_tracking_exit(dev_priv);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_work(&dev_priv->psr.work);
@@ -1176,7 +1229,6 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (val) {
DRM_DEBUG_KMS("PSR interruption error set\n");
dev_priv->psr.sink_not_reliable = true;
- return;
}
/* Set link_standby x link_off defaults */
diff --git a/drivers/gpu/drm/i915/intel_psr.h b/drivers/gpu/drm/i915/intel_psr.h
new file mode 100644
index 000000000000..dc818826f36d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PSR_H__
+#define __INTEL_PSR_H__
+
+#include "intel_frontbuffer.h"
+
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_dp;
+
+#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+void intel_psr_init_dpcd(struct intel_dp *intel_dp);
+void intel_psr_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state);
+void intel_psr_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_psr_flush(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_psr_init(struct drm_i915_private *dev_priv);
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+ u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7f841dba87b3..029fd8ec1857 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -43,12 +43,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_INDEX_ADDR);
-}
-
unsigned int intel_ring_update_space(struct intel_ring *ring)
{
unsigned int space;
@@ -317,9 +311,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
- *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = rq->global_seqno;
+ *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -424,10 +418,10 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL);
- *cs++ = intel_hws_seqno_address(rq->engine);
- *cs++ = rq->global_seqno;
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_GLOBAL_GTT_IVB);
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -448,8 +442,8 @@ static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -473,8 +467,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
@@ -554,16 +548,17 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
*/
default:
GEM_BUG_ON(engine->id);
- case RCS:
+ /* fallthrough */
+ case RCS0:
hwsp = RENDER_HWS_PGA_GEN7;
break;
- case BCS:
+ case BCS0:
hwsp = BLT_HWS_PGA_GEN7;
break;
- case VCS:
+ case VCS0:
hwsp = BSD_HWS_PGA_GEN7;
break;
- case VECS:
+ case VECS0:
hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
@@ -580,19 +575,19 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
if (!IS_GEN_RANGE(dev_priv, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
-
- I915_WRITE(instpm,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (intel_wait_for_register(dev_priv,
- instpm, INSTPM_SYNC_FLUSH, 0,
+ WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ ENGINE_WRITE(engine, RING_INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (intel_wait_for_register(engine->uncore,
+ RING_INSTPM(engine->mmio_base),
+ INSTPM_SYNC_FLUSH, 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
engine->name);
@@ -611,32 +606,36 @@ static bool stop_ring(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
if (INTEL_GEN(dev_priv) > 2) {
- I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register(dev_priv,
+ ENGINE_WRITE(engine,
+ RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
+ if (intel_wait_for_register(engine->uncore,
RING_MI_MODE(engine->mmio_base),
MODE_IDLE,
MODE_IDLE,
1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n",
engine->name);
- /* Sometimes we observe that the idle flag is not
+
+ /*
+ * Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
- if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
+ if (ENGINE_READ(engine, RING_HEAD) !=
+ ENGINE_READ(engine, RING_TAIL))
return false;
}
}
- I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
+ ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
- I915_WRITE_HEAD(engine, 0);
- I915_WRITE_TAIL(engine, 0);
+ ENGINE_WRITE(engine, RING_HEAD, 0);
+ ENGINE_WRITE(engine, RING_TAIL, 0);
/* The ring must be empty before it is disabled */
- I915_WRITE_CTL(engine, 0);
+ ENGINE_WRITE(engine, RING_CTL, 0);
- return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
+ return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
static int init_ring_common(struct intel_engine_cs *engine)
@@ -645,26 +644,26 @@ static int init_ring_common(struct intel_engine_cs *engine)
struct intel_ring *ring = engine->buffer;
int ret = 0;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_DRIVER("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
- I915_READ_CTL(engine),
- I915_READ_HEAD(engine),
- I915_READ_TAIL(engine),
- I915_READ_START(engine));
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_HEAD),
+ ENGINE_READ(engine, RING_TAIL),
+ ENGINE_READ(engine, RING_START));
if (!stop_ring(engine)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
- I915_READ_CTL(engine),
- I915_READ_HEAD(engine),
- I915_READ_TAIL(engine),
- I915_READ_START(engine));
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_HEAD),
+ ENGINE_READ(engine, RING_TAIL),
+ ENGINE_READ(engine, RING_START));
ret = -EIO;
goto out;
}
@@ -678,18 +677,18 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
/* Enforce ordering by reading HEAD register back */
- I915_READ_HEAD(engine);
+ ENGINE_READ(engine, RING_HEAD);
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
+ ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
/* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (I915_READ_HEAD(engine))
+ if (ENGINE_READ(engine, RING_HEAD))
DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
- engine->name, I915_READ_HEAD(engine));
+ engine->name, ENGINE_READ(engine, RING_HEAD));
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@@ -697,42 +696,44 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_ring_update_space(ring);
/* First wake the ring up to an empty/idle ring */
- I915_WRITE_HEAD(engine, ring->head);
- I915_WRITE_TAIL(engine, ring->head);
- (void)I915_READ_TAIL(engine);
+ ENGINE_WRITE(engine, RING_HEAD, ring->head);
+ ENGINE_WRITE(engine, RING_TAIL, ring->head);
+ ENGINE_POSTING_READ(engine, RING_TAIL);
- I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
+ ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
+ if (intel_wait_for_register(engine->uncore,
+ RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
- I915_READ_CTL(engine),
- I915_READ_CTL(engine) & RING_VALID,
- I915_READ_HEAD(engine), ring->head,
- I915_READ_TAIL(engine), ring->tail,
- I915_READ_START(engine),
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
i915_ggtt_offset(ring->vma));
ret = -EIO;
goto out;
}
if (INTEL_GEN(dev_priv) > 2)
- I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE(engine,
+ RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
/* Now awake, let it get started */
if (ring->tail != ring->head) {
- I915_WRITE_TAIL(engine, ring->tail);
- (void)I915_READ_TAIL(engine);
+ ENGINE_WRITE(engine, RING_TAIL, ring->tail);
+ ENGINE_POSTING_READ(engine, RING_TAIL);
}
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_queue_breadcrumbs(engine);
out:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -758,11 +759,6 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
}
}
- GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
- engine->name,
- rq ? rq->global_seqno : 0,
- intel_engine_get_seqno(engine),
- yesno(stalled));
/*
* The guilty request will get skipped on a hung engine.
*
@@ -878,7 +874,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (INTEL_GEN(dev_priv) >= 6)
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
return 0;
}
@@ -892,18 +888,12 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
- GEM_BUG_ON(!request->global_seqno);
-
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
i915_request_mark_complete(request);
}
- intel_write_status_page(engine,
- I915_GEM_HWS_INDEX,
- intel_engine_last_submit(engine));
-
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -911,12 +901,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
static void i9xx_submit_request(struct i915_request *request)
{
- struct drm_i915_private *dev_priv = request->i915;
-
i915_request_submit(request);
- I915_WRITE_TAIL(request->engine,
- intel_ring_set_tail(request->ring, request->tail));
+ ENGINE_WRITE(request->engine, RING_TAIL,
+ intel_ring_set_tail(request->ring, request->tail));
}
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
@@ -931,8 +919,8 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
@@ -953,14 +941,14 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = MI_FLUSH;
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
BUILD_BUG_ON(GEN5_WA_STORES < 1);
for (i = 0; i < GEN5_WA_STORES; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
}
*cs++ = MI_USER_INTERRUPT;
@@ -988,20 +976,16 @@ gen5_irq_disable(struct intel_engine_cs *engine)
static void
i9xx_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ engine->i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
}
static void
i9xx_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
+ engine->i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
}
static void
@@ -1010,7 +994,7 @@ i8xx_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
+ I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
POSTING_READ16(RING_IMR(engine->mmio_base));
}
@@ -1020,7 +1004,7 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
+ I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
}
static int
@@ -1041,47 +1025,38 @@ bsd_ring_flush(struct i915_request *rq, u32 mode)
static void
gen6_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- engine->irq_keep_mask));
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ ENGINE_POSTING_READ(engine, RING_IMR);
- gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
+ gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
- gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+ gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ ENGINE_POSTING_READ(engine, RING_IMR);
- gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~0);
- gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~0);
+ gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
}
static int
@@ -1211,15 +1186,6 @@ int intel_ring_pin(struct intel_ring *ring)
else
flags |= PIN_HIGH;
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- else
- ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
- if (unlikely(ret))
- goto unpin_timeline;
- }
-
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
goto unpin_timeline;
@@ -1323,6 +1289,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
if (!ring)
return ERR_PTR(-ENOMEM);
+ kref_init(&ring->ref);
INIT_LIST_HEAD(&ring->request_list);
ring->timeline = i915_timeline_get(timeline);
@@ -1347,9 +1314,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
return ring;
}
-void
-intel_ring_free(struct intel_ring *ring)
+void intel_ring_free(struct kref *ref)
{
+ struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
struct drm_i915_gem_object *obj = ring->vma->obj;
i915_vma_close(ring->vma);
@@ -1359,17 +1326,24 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static void intel_ring_context_destroy(struct intel_context *ce)
+static void __ring_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(ce->pin_count);
-
- if (!ce->state)
- return;
-
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
+static void ring_context_destroy(struct kref *ref)
+{
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ if (ce->state)
+ __ring_context_fini(ce);
+
+ intel_context_free(ce);
+}
+
static int __context_pin_ppgtt(struct i915_gem_context *ctx)
{
struct i915_hw_ppgtt *ppgtt;
@@ -1400,17 +1374,6 @@ static int __context_pin(struct intel_context *ce)
if (!vma)
return 0;
- /*
- * Clear this page out of any CPU caches for coherent swap-in/out.
- * We only want to do this on the first bind so that we do not stall
- * on an active context (which by nature is already on the GPU).
- */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (err)
- return err;
- }
-
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
return err;
@@ -1420,6 +1383,7 @@ static int __context_pin(struct intel_context *ce)
* it cannot reclaim the object until we release it.
*/
vma->obj->pin_global++;
+ vma->obj->mm.dirty = true;
return 0;
}
@@ -1436,12 +1400,10 @@ static void __context_unpin(struct intel_context *ce)
i915_vma_unpin(vma);
}
-static void intel_ring_context_unpin(struct intel_context *ce)
+static void ring_context_unpin(struct intel_context *ce)
{
__context_unpin_ppgtt(ce->gem_context);
__context_unpin(ce);
-
- i915_gem_context_put(ce->gem_context);
}
static struct i915_vma *
@@ -1456,6 +1418,24 @@ alloc_context_vma(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return ERR_CAST(obj);
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ *
+ * Snooping is required on non-llc platforms in execlist
+ * mode, but since all GGTT accesses use PAT entry 0 we
+ * get snooping anyway regardless of cache_level.
+ *
+ * This is only applicable for Ivy Bridge devices since
+ * later platforms don't have L3 control bits in the PTE.
+ */
+ if (IS_IVYBRIDGE(i915))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
+
if (engine->default_state) {
void *defaults, *vaddr;
@@ -1473,29 +1453,10 @@ alloc_context_vma(struct intel_engine_cs *engine)
}
memcpy(vaddr, defaults, engine->context_size);
-
i915_gem_object_unpin_map(engine->default_state);
- i915_gem_object_unpin_map(obj);
- }
- /*
- * Try to make the context utilize L3 as well as LLC.
- *
- * On VLV we don't have L3 controls in the PTEs so we
- * shouldn't touch the cache level, especially as that
- * would make the object snooped which might have a
- * negative performance impact.
- *
- * Snooping is required on non-llc platforms in execlist
- * mode, but since all GGTT accesses use PAT entry 0 we
- * get snooping anyway regardless of cache_level.
- *
- * This is only applicable for Ivy Bridge devices since
- * later platforms don't have L3 control bits in the PTE.
- */
- if (IS_IVYBRIDGE(i915)) {
- /* Ignore any error, regard it as a simple optimisation */
- i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
@@ -1513,69 +1474,52 @@ err_obj:
return ERR_PTR(err);
}
-static struct intel_context *
-__ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- struct intel_context *ce)
+static int ring_context_pin(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
int err;
+ /* One ringbuffer to rule them all */
+ GEM_BUG_ON(!engine->buffer);
+ ce->ring = engine->buffer;
+
if (!ce->state && engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
ce->state = vma;
}
err = __context_pin(ce);
if (err)
- goto err;
+ return err;
err = __context_pin_ppgtt(ce->gem_context);
if (err)
goto err_unpin;
- i915_gem_context_get(ctx);
-
- /* One ringbuffer to rule them all */
- GEM_BUG_ON(!engine->buffer);
- ce->ring = engine->buffer;
-
- return ce;
+ return 0;
err_unpin:
__context_unpin(ce);
-err:
- ce->pin_count = 0;
- return ERR_PTR(err);
+ return err;
}
-static const struct intel_context_ops ring_context_ops = {
- .unpin = intel_ring_context_unpin,
- .destroy = intel_ring_context_destroy,
-};
-
-static struct intel_context *
-intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static void ring_context_reset(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- return ce;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+ intel_ring_reset(ce->ring, 0);
+}
- ce->ops = &ring_context_ops;
+static const struct intel_context_ops ring_context_ops = {
+ .pin = ring_context_pin,
+ .unpin = ring_context_unpin,
- return __ring_context_pin(engine, ctx, ce);
-}
+ .reset = ring_context_reset,
+ .destroy = ring_context_destroy,
+};
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
@@ -1587,9 +1531,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (err)
return err;
- timeline = i915_timeline_create(engine->i915,
- engine->name,
- engine->status_page.vma);
+ timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
@@ -1621,7 +1563,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
err_unpin:
intel_ring_unpin(ring);
err_ring:
- intel_ring_free(ring);
+ intel_ring_put(ring);
err:
intel_engine_cleanup_common(engine);
return err;
@@ -1632,10 +1574,10 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer);
- intel_ring_free(engine->buffer);
+ intel_ring_put(engine->buffer);
if (engine->cleanup)
engine->cleanup(engine);
@@ -1646,16 +1588,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
kfree(engine);
}
-void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* Restart from the beginning of the rings for convenience */
- for_each_engine(engine, dev_priv, id)
- intel_ring_reset(engine->buffer, 0);
-}
-
static int load_pd_dir(struct i915_request *rq,
const struct i915_hw_ppgtt *ppgtt)
{
@@ -1667,11 +1599,11 @@ static int load_pd_dir(struct i915_request *rq,
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
*cs++ = PP_DIR_DCLV_2G;
*cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = ppgtt->pd.base.ggtt_offset << 10;
intel_ring_advance(rq, cs);
@@ -1690,7 +1622,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
@@ -1703,8 +1635,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
enum intel_engine_id id;
- const int num_rings =
- IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
+ const int num_engines =
+ IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
@@ -1718,7 +1650,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
len = 4;
if (IS_GEN(i915, 7))
- len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+ len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
flags &= ~MI_FORCE_RESTORE;
@@ -1733,10 +1665,10 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (IS_GEN(i915, 7)) {
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- if (num_rings) {
+ if (num_engines) {
struct intel_engine_cs *signaller;
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, i915, id) {
if (signaller == engine)
continue;
@@ -1763,8 +1695,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
* placeholder we use to flush other contexts.
*/
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
- engine)->state) |
+ *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
MI_MM_SPACE_GTT |
MI_RESTORE_INHIBIT;
}
@@ -1779,11 +1710,11 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
if (IS_GEN(i915, 7)) {
- if (num_rings) {
+ if (num_engines) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, i915, id) {
if (signaller == engine)
continue;
@@ -1861,7 +1792,7 @@ static int switch_context(struct i915_request *rq)
* explanation.
*/
loops = 1;
- if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
+ if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
loops = 32;
do {
@@ -1870,15 +1801,15 @@ static int switch_context(struct i915_request *rq)
goto err;
} while (--loops);
- if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
- unwind_mm = intel_engine_flag(engine);
- ppgtt->pd_dirty_rings &= ~unwind_mm;
+ if (ppgtt->pd_dirty_engines & engine->mask) {
+ unwind_mm = engine->mask;
+ ppgtt->pd_dirty_engines &= ~unwind_mm;
hw_flags = MI_FORCE_RESTORE;
}
}
if (rq->hw_context->state) {
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS0);
/*
* The kernel context(s) is treated as pure scratch and is not
@@ -1938,7 +1869,7 @@ static int switch_context(struct i915_request *rq)
err_mm:
if (unwind_mm)
- ppgtt->pd_dirty_rings |= unwind_mm;
+ ppgtt->pd_dirty_engines |= unwind_mm;
err:
return ret;
}
@@ -1947,7 +1878,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!request->hw_context->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
/*
@@ -2108,23 +2039,23 @@ int intel_ring_cacheline_align(struct i915_request *rq)
static void gen6_bsd_submit_request(struct i915_request *request)
{
- struct drm_i915_private *dev_priv = request->i915;
+ struct intel_uncore *uncore = request->engine->uncore;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Every tail move must follow the sequence below */
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
- I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
- I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
+ intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(uncore,
GEN6_BSD_SLEEP_PSMI_CONTROL,
GEN6_BSD_SLEEP_INDICATOR,
0,
@@ -2137,10 +2068,10 @@ static void gen6_bsd_submit_request(struct i915_request *request)
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
- I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
static int mi_flush_dw(struct i915_request *rq, u32 flags)
@@ -2282,7 +2213,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->reset.reset = reset_ring;
engine->reset.finish = reset_finish;
- engine->context_pin = intel_ring_context_pin;
+ engine->cops = &ring_context_ops;
engine->request_alloc = ring_request_alloc;
/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 710ffb221775..72c7c337ace9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -6,22 +6,20 @@
#include <linux/hashtable.h>
#include <linux/irq_work.h>
+#include <linux/random.h>
#include <linux/seqlock.h>
#include "i915_gem_batch_pool.h"
-
-#include "i915_reg.h"
#include "i915_pmu.h"
+#include "i915_reg.h"
#include "i915_request.h"
#include "i915_selftest.h"
#include "i915_timeline.h"
+#include "intel_engine_types.h"
#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
struct drm_printer;
-struct i915_sched_attr;
-
-#define I915_CMD_HASH_ORDER 9
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
@@ -31,28 +29,44 @@ struct i915_sched_attr;
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
-struct intel_hw_status_page {
- struct i915_vma *vma;
- u32 *addr;
-};
+/*
+ * The register defines to be used with the following macros need to accept a
+ * base param, e.g:
+ *
+ * REG_FOO(base) _MMIO((base) + <relative offset>)
+ * ENGINE_READ(engine, REG_FOO);
+ *
+ * register arrays are to be defined and accessed as follows:
+ *
+ * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
+ * ENGINE_READ_IDX(engine, REG_BAR, i)
+ */
+
+#define __ENGINE_REG_OP(op__, engine__, ...) \
+ intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
-#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
-#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
+#define __ENGINE_READ_OP(op__, engine__, reg__) \
+ __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
-#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
-#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
+#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
+#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
+#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
+#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
-#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
-#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
+#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
+ __ENGINE_REG_OP(read64_2x32, (engine__), \
+ lower_reg__((engine__)->mmio_base), \
+ upper_reg__((engine__)->mmio_base))
-#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
-#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
+#define ENGINE_READ_IDX(engine__, reg__, idx__) \
+ __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
-#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
-#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
+#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
+ __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
-#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
-#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
+#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__)
+#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
+#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
@@ -90,506 +104,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
return "unknown";
}
-#define I915_MAX_SLICES 3
-#define I915_MAX_SUBSLICES 8
-
-#define instdone_slice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
-
-#define instdone_subslice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
-
-#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
- for ((slice__) = 0, (subslice__) = 0; \
- (slice__) < I915_MAX_SLICES; \
- (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
- (slice__) += ((subslice__) == 0)) \
- for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
- (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
-
-struct intel_instdone {
- u32 instdone;
- /* The following exist only in the RCS engine */
- u32 slice_common;
- u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
- u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
-};
-
-struct intel_engine_hangcheck {
- u64 acthd;
- u32 seqno;
- unsigned long action_timestamp;
- struct intel_instdone instdone;
-};
-
-struct intel_ring {
- struct i915_vma *vma;
- void *vaddr;
-
- struct i915_timeline *timeline;
- struct list_head request_list;
- struct list_head active_link;
-
- u32 head;
- u32 tail;
- u32 emit;
-
- u32 space;
- u32 size;
- u32 effective_size;
-};
-
-struct i915_gem_context;
-struct drm_i915_reg_table;
-
-/*
- * we use a single page to load ctx workarounds so all of these
- * values are referred in terms of dwords
- *
- * struct i915_wa_ctx_bb:
- * offset: specifies batch starting position, also helpful in case
- * if we want to have multiple batches at different offsets based on
- * some criteria. It is not a requirement at the moment but provides
- * an option for future use.
- * size: size of the batch in DWORDS
- */
-struct i915_ctx_workarounds {
- struct i915_wa_ctx_bb {
- u32 offset;
- u32 size;
- } indirect_ctx, per_ctx;
- struct i915_vma *vma;
-};
-
-struct i915_request;
-
-#define I915_MAX_VCS 4
-#define I915_MAX_VECS 2
-
-/*
- * Engine IDs definitions.
- * Keep instances of the same type engine together.
- */
-enum intel_engine_id {
- RCS = 0,
- BCS,
- VCS,
- VCS2,
- VCS3,
- VCS4,
-#define _VCS(n) (VCS + (n))
- VECS,
- VECS2
-#define _VECS(n) (VECS + (n))
-};
-
-struct i915_priolist {
- struct list_head requests[I915_PRIORITY_COUNT];
- struct rb_node node;
- unsigned long used;
- int priority;
-};
-
-#define priolist_for_each_request(it, plist, idx) \
- for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
- list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-
-#define priolist_for_each_request_consume(it, n, plist, idx) \
- for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
- list_for_each_entry_safe(it, n, \
- &(plist)->requests[idx - 1], \
- sched.link)
-
-struct st_preempt_hang {
- struct completion completion;
- unsigned int count;
- bool inject_hang;
-};
-
-/**
- * struct intel_engine_execlists - execlist submission queue and port state
- *
- * The struct intel_engine_execlists represents the combined logical state of
- * driver and the hardware state for execlist mode of submission.
- */
-struct intel_engine_execlists {
- /**
- * @tasklet: softirq tasklet for bottom handler
- */
- struct tasklet_struct tasklet;
-
- /**
- * @default_priolist: priority list for I915_PRIORITY_NORMAL
- */
- struct i915_priolist default_priolist;
-
- /**
- * @no_priolist: priority lists disabled
- */
- bool no_priolist;
-
- /**
- * @submit_reg: gen-specific execlist submission register
- * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
- * the ExecList Submission Queue Contents register array for Gen11+
- */
- u32 __iomem *submit_reg;
-
- /**
- * @ctrl_reg: the enhanced execlists control register, used to load the
- * submit queue on the HW and to request preemptions to idle
- */
- u32 __iomem *ctrl_reg;
-
- /**
- * @port: execlist port states
- *
- * For each hardware ELSP (ExecList Submission Port) we keep
- * track of the last request and the number of times we submitted
- * that port to hw. We then count the number of times the hw reports
- * a context completion or preemption. As only one context can
- * be active on hw, we limit resubmission of context to port[0]. This
- * is called Lite Restore, of the context.
- */
- struct execlist_port {
- /**
- * @request_count: combined request and submission count
- */
- struct i915_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, execlists) ((p) - (execlists)->port)
-
- /**
- * @context_id: context ID for port
- */
- GEM_DEBUG_DECL(u32 context_id);
-
-#define EXECLIST_MAX_PORTS 2
- } port[EXECLIST_MAX_PORTS];
-
- /**
- * @active: is the HW active? We consider the HW as active after
- * submitting any context for execution and until we have seen the
- * last context completion event. After that, we do not expect any
- * more events until we submit, and so can park the HW.
- *
- * As we have a small number of different sources from which we feed
- * the HW, we track the state of each inside a single bitfield.
- */
- unsigned int active;
-#define EXECLISTS_ACTIVE_USER 0
-#define EXECLISTS_ACTIVE_PREEMPT 1
-#define EXECLISTS_ACTIVE_HWACK 2
-
- /**
- * @port_mask: number of execlist ports - 1
- */
- unsigned int port_mask;
-
- /**
- * @queue_priority_hint: Highest pending priority.
- *
- * When we add requests into the queue, or adjust the priority of
- * executing requests, we compute the maximum priority of those
- * pending requests. We can then use this value to determine if
- * we need to preempt the executing requests to service the queue.
- * However, since the we may have recorded the priority of an inflight
- * request we wanted to preempt but since completed, at the time of
- * dequeuing the priority hint may no longer may match the highest
- * available request priority.
- */
- int queue_priority_hint;
-
- /**
- * @queue: queue of requests, in priority lists
- */
- struct rb_root_cached queue;
-
- /**
- * @csb_write: control register for Context Switch buffer
- *
- * Note this register may be either mmio or HWSP shadow.
- */
- u32 *csb_write;
-
- /**
- * @csb_status: status array for Context Switch buffer
- *
- * Note these register may be either mmio or HWSP shadow.
- */
- u32 *csb_status;
-
- /**
- * @preempt_complete_status: expected CSB upon completing preemption
- */
- u32 preempt_complete_status;
-
- /**
- * @csb_head: context status buffer head
- */
- u8 csb_head;
-
- I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
-};
-
-#define INTEL_ENGINE_CS_MAX_NAME 8
-
-struct intel_engine_cs {
- struct drm_i915_private *i915;
- char name[INTEL_ENGINE_CS_MAX_NAME];
-
- enum intel_engine_id id;
- unsigned int hw_id;
- unsigned int guc_id;
-
- u8 uabi_id;
- u8 uabi_class;
-
- u8 class;
- u8 instance;
- u32 context_size;
- u32 mmio_base;
-
- struct intel_ring *buffer;
-
- struct i915_timeline timeline;
-
- struct drm_i915_gem_object *default_state;
- void *pinned_default_state;
-
- /* Rather than have every client wait upon all user interrupts,
- * with the herd waking after every interrupt and each doing the
- * heavyweight seqno dance, we delegate the task (of being the
- * bottom-half of the user interrupt) to the first client. After
- * every interrupt, we wake up one client, who does the heavyweight
- * coherent seqno read and either goes back to sleep (if incomplete),
- * or wakes up all the completed clients in parallel, before then
- * transferring the bottom-half status to the next client in the queue.
- *
- * Compared to walking the entire list of waiters in a single dedicated
- * bottom-half, we reduce the latency of the first waiter by avoiding
- * a context switch, but incur additional coherent seqno reads when
- * following the chain of request breadcrumbs. Since it is most likely
- * that we have a single client waiting on each seqno, then reducing
- * the overhead of waking that client is much preferred.
- */
- struct intel_breadcrumbs {
- spinlock_t irq_lock;
- struct list_head signalers;
-
- struct irq_work irq_work; /* for use from inside irq_lock */
-
- unsigned int irq_enabled;
-
- bool irq_armed;
- } breadcrumbs;
-
- struct {
- /**
- * @enable: Bitmask of enable sample events on this engine.
- *
- * Bits correspond to sample event types, for instance
- * I915_SAMPLE_QUEUED is bit 0 etc.
- */
- u32 enable;
- /**
- * @enable_count: Reference count for the enabled samplers.
- *
- * Index number corresponds to @enum drm_i915_pmu_engine_sample.
- */
- unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
- /**
- * @sample: Counter values for sampling events.
- *
- * Our internal timer stores the current counters in this field.
- *
- * Index number corresponds to @enum drm_i915_pmu_engine_sample.
- */
- struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
- } pmu;
-
- /*
- * A pool of objects to use as shadow copies of client batch buffers
- * when the command parser is enabled. Prevents the client from
- * modifying the batch contents after software parsing.
- */
- struct i915_gem_batch_pool batch_pool;
-
- struct intel_hw_status_page status_page;
- struct i915_ctx_workarounds wa_ctx;
- struct i915_wa_list ctx_wa_list;
- struct i915_wa_list wa_list;
- struct i915_wa_list whitelist;
-
- u32 irq_keep_mask; /* always keep these interrupts */
- u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- void (*irq_enable)(struct intel_engine_cs *engine);
- void (*irq_disable)(struct intel_engine_cs *engine);
-
- int (*init_hw)(struct intel_engine_cs *engine);
-
- struct {
- void (*prepare)(struct intel_engine_cs *engine);
- void (*reset)(struct intel_engine_cs *engine, bool stalled);
- void (*finish)(struct intel_engine_cs *engine);
- } reset;
-
- void (*park)(struct intel_engine_cs *engine);
- void (*unpark)(struct intel_engine_cs *engine);
-
- void (*set_default_submission)(struct intel_engine_cs *engine);
-
- struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
-
- int (*request_alloc)(struct i915_request *rq);
- int (*init_context)(struct i915_request *rq);
-
- int (*emit_flush)(struct i915_request *request, u32 mode);
-#define EMIT_INVALIDATE BIT(0)
-#define EMIT_FLUSH BIT(1)
-#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
- int (*emit_bb_start)(struct i915_request *rq,
- u64 offset, u32 length,
- unsigned int dispatch_flags);
-#define I915_DISPATCH_SECURE BIT(0)
-#define I915_DISPATCH_PINNED BIT(1)
- int (*emit_init_breadcrumb)(struct i915_request *rq);
- u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
- u32 *cs);
- unsigned int emit_fini_breadcrumb_dw;
-
- /* Pass the request to the hardware queue (e.g. directly into
- * the legacy ringbuffer or to the end of an execlist).
- *
- * This is called from an atomic context with irqs disabled; must
- * be irq safe.
- */
- void (*submit_request)(struct i915_request *rq);
-
- /*
- * Call when the priority on a request has changed and it and its
- * dependencies may need rescheduling. Note the request itself may
- * not be ready to run!
- */
- void (*schedule)(struct i915_request *request,
- const struct i915_sched_attr *attr);
-
- /*
- * Cancel all requests on the hardware, or queued for execution.
- * This should only cancel the ready requests that have been
- * submitted to the engine (via the engine->submit_request callback).
- * This is called when marking the device as wedged.
- */
- void (*cancel_requests)(struct intel_engine_cs *engine);
-
- void (*cleanup)(struct intel_engine_cs *engine);
-
- struct intel_engine_execlists execlists;
-
- /* Contexts are pinned whilst they are active on the GPU. The last
- * context executed remains active whilst the GPU is idle - the
- * switch away and write to the context object only occurs on the
- * next execution. Contexts are only unpinned on retirement of the
- * following request ensuring that we can always write to the object
- * on the context switch even after idling. Across suspend, we switch
- * to the kernel context and trash it as the save may not happen
- * before the hardware is powered down.
- */
- struct intel_context *last_retired_context;
-
- /* status_notifier: list of callbacks for context-switch changes */
- struct atomic_notifier_head context_status_notifier;
-
- struct intel_engine_hangcheck hangcheck;
-
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
-#define I915_ENGINE_SUPPORTS_STATS BIT(1)
-#define I915_ENGINE_HAS_PREEMPTION BIT(2)
- unsigned int flags;
-
- /*
- * Table of commands the command parser needs to know about
- * for this engine.
- */
- DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
-
- /*
- * Table of registers allowed in commands that read/write registers.
- */
- const struct drm_i915_reg_table *reg_tables;
- int reg_table_count;
-
- /*
- * Returns the bitmask for the length field of the specified command.
- * Return 0 for an unrecognized/invalid command.
- *
- * If the command parser finds an entry for a command in the engine's
- * cmd_tables, it gets the command's length based on the table entry.
- * If not, it calls this function to determine the per-engine length
- * field encoding for the command (i.e. different opcode ranges use
- * certain bits to encode the command length in the header).
- */
- u32 (*get_cmd_length_mask)(u32 cmd_header);
-
- struct {
- /**
- * @lock: Lock protecting the below fields.
- */
- seqlock_t lock;
- /**
- * @enabled: Reference count indicating number of listeners.
- */
- unsigned int enabled;
- /**
- * @active: Number of contexts currently scheduled in.
- */
- unsigned int active;
- /**
- * @enabled_at: Timestamp when busy stats were enabled.
- */
- ktime_t enabled_at;
- /**
- * @start: Timestamp of the last idle to active transition.
- *
- * Idle is defined as active == 0, active is active > 0.
- */
- ktime_t start;
- /**
- * @total: Total time this engine was busy.
- *
- * Accumulated time not counting the most recent block in cases
- * where engine is currently busy (active > 0).
- */
- ktime_t total;
- } stats;
-};
-
-static inline bool
-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
-}
-
-static inline bool
-intel_engine_supports_stats(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_SUPPORTS_STATS;
-}
-
-static inline bool
-intel_engine_has_preemption(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_HAS_PREEMPTION;
-}
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
static inline bool __execlists_need_preempt(int prio, int last)
{
@@ -650,7 +165,7 @@ void execlists_user_end(struct intel_engine_execlists *execlists);
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
-void
+struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
static inline unsigned int
@@ -674,12 +189,6 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
return port;
}
-static inline unsigned int
-intel_engine_flag(const struct intel_engine_cs *engine)
-{
- return BIT(engine->id);
-}
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
@@ -722,10 +231,10 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
*
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
-#define I915_GEM_HWS_INDEX 0x30
-#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX * sizeof(u32))
#define I915_GEM_HWS_PREEMPT 0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
+#define I915_GEM_HWS_HANGCHECK 0x34
+#define I915_GEM_HWS_HANGCHECK_ADDR (I915_GEM_HWS_HANGCHECK * sizeof(u32))
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
@@ -743,13 +252,22 @@ int intel_ring_pin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
unsigned int intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring);
-void intel_ring_free(struct intel_ring *ring);
+void intel_ring_free(struct kref *ref);
+
+static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
+{
+ kref_get(&ring->ref);
+ return ring;
+}
+
+static inline void intel_ring_put(struct intel_ring *ring)
+{
+ kref_put(&ring->ref, intel_ring_free);
+}
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
-void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
@@ -844,8 +362,6 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
return (head - tail - CACHELINE_BYTES) & (size - 1);
}
-void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
-
int intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
@@ -863,44 +379,6 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
-static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
-{
- /*
- * We are only peeking at the tail of the submit queue (and not the
- * queue itself) in order to gain a hint as to the current active
- * state of the engine. Callers are not expected to be taking
- * engine->timeline->lock, nor are they expected to be concerned
- * wtih serialising this hint with anything, so document it as
- * a hint and nothing more.
- */
- return READ_ONCE(engine->timeline.seqno);
-}
-
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
- u32 seqno)
-{
- return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
-}
-
-static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
- u32 seqno)
-{
- GEM_BUG_ON(!seqno);
- return intel_engine_signaled(engine, seqno);
-}
-
-static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
- u32 seqno)
-{
- GEM_BUG_ON(!seqno);
- return intel_engine_signaled(engine, seqno - 1);
-}
-
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
@@ -910,7 +388,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
-bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void
@@ -919,7 +397,7 @@ intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
irq_work_queue(&engine->breadcrumbs.irq_work);
}
-bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
+void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
@@ -960,14 +438,14 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
}
static inline u32 *
-gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
GEM_BUG_ON(gtt_offset & (1 << 5));
/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
- *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = value;
@@ -983,11 +461,11 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
}
void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
+void intel_gt_resume(struct drm_i915_private *i915);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
void intel_engine_lost_context(struct intel_engine_cs *engine);
void intel_engines_park(struct drm_i915_private *i915);
@@ -1066,6 +544,9 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine);
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
+struct i915_request *
+intel_engine_find_active_request(struct intel_engine_cs *engine);
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
@@ -1086,4 +567,17 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
#endif
+static inline u32
+intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine)
+{
+ return engine->hangcheck.next_seqno =
+ next_pseudo_random32(engine->hangcheck.next_seqno);
+}
+
+static inline u32
+intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine)
+{
+ return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK);
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index a017a4232c0f..6150e35bf7b5 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -32,6 +32,10 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "intel_cdclk.h"
+#include "intel_crt.h"
+#include "intel_csr.h"
+#include "intel_dp.h"
#include "intel_drv.h"
/**
@@ -60,31 +64,20 @@
static noinline depot_stack_handle_t __save_depot_stack(void)
{
unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = ARRAY_SIZE(entries),
- .skip = 1,
- };
-
- save_stack_trace(&trace);
- if (trace.nr_entries &&
- trace.entries[trace.nr_entries - 1] == ULONG_MAX)
- trace.nr_entries--;
+ unsigned int n;
- return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+ return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
static void __print_depot_stack(depot_stack_handle_t stack,
char *buf, int sz, int indent)
{
- unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = ARRAY_SIZE(entries),
- };
+ unsigned long *entries;
+ unsigned int nr_entries;
- depot_fetch_stack(stack, &trace);
- snprint_stack_trace(buf, sz, &trace, indent);
+ nr_entries = stack_depot_fetch(stack, &entries);
+ stack_trace_snprint(buf, sz, entries, nr_entries, indent);
}
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
@@ -158,7 +151,7 @@ static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
char *buf;
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
@@ -194,7 +187,7 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p,
unsigned long i;
char *buf;
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
@@ -278,7 +271,9 @@ void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
if (dbg.count <= alloc)
break;
- s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
+ s = krealloc(dbg.owners,
+ dbg.count * sizeof(*s),
+ GFP_NOWAIT | __GFP_NOWARN);
if (!s)
goto out;
@@ -565,7 +560,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- WARN_ON(intel_wait_for_register(dev_priv,
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore,
regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx),
HSW_PWR_WELL_CTL_STATE(pw_idx),
@@ -620,7 +615,7 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
SKL_FUSE_PG_DIST_STATUS(pg),
SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
@@ -1521,7 +1516,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DISPLAY_PHY_STATUS,
phy_status_mask,
phy_status,
@@ -1556,7 +1551,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DISPLAY_PHY_STATUS,
PHY_POWERGOOD(phy),
PHY_POWERGOOD(phy),
@@ -1760,7 +1755,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->pcu_lock);
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1772,7 +1767,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
WARN_ON(ctrl << 16 != state);
mutex_unlock(&dev_priv->pcu_lock);
@@ -1793,20 +1788,20 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->pcu_lock);
#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
if (COND)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
DRM_ERROR("timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
#undef COND
@@ -3442,7 +3437,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_ICELAKE(dev_priv)) {
+ if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -3576,7 +3571,11 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
!(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power enable timeout\n");
else
- dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
+ /*
+ * FIXME: for now pretend that we only have 1 slice, see
+ * intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -3591,7 +3590,11 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power disable timeout!\n");
else
- dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
+ /*
+ * FIXME: for now pretend that the first slice is always
+ * enabled, see intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
@@ -3652,7 +3655,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- skl_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -3669,7 +3672,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- skl_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
@@ -3714,7 +3717,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- bxt_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -3731,7 +3734,7 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- bxt_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -3773,7 +3776,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
mutex_unlock(&power_domains->lock);
/* 5. Enable CD clock */
- cnl_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
@@ -3795,7 +3798,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- cnl_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -3837,7 +3840,7 @@ void icl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
/* 5. Enable CDCLK. */
- icl_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
/* 6. Enable DBUF. */
icl_dbuf_enable(dev_priv);
@@ -3862,7 +3865,7 @@ void icl_display_core_uninit(struct drm_i915_private *dev_priv)
icl_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- icl_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -3993,6 +3996,36 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
cmn->desc->ops->disable(dev_priv, cmn);
}
+static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+{
+ bool ret;
+
+ mutex_lock(&dev_priv->pcu_lock);
+ ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ mutex_unlock(&dev_priv->pcu_lock);
+
+ return ret;
+}
+
+static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+{
+ WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
+}
+
+static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+{
+ static const struct pci_device_id isp_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+ {}
+ };
+
+ WARN(!pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
+}
+
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
/**
@@ -4017,7 +4050,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
- if (IS_ICELAKE(i915)) {
+ if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
cnl_display_core_init(i915, resume);
@@ -4029,10 +4062,13 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
mutex_lock(&power_domains->lock);
chv_phy_control_init(i915);
mutex_unlock(&power_domains->lock);
+ assert_isp_power_gated(i915);
} else if (IS_VALLEYVIEW(i915)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(i915);
mutex_unlock(&power_domains->lock);
+ assert_ved_power_gated(i915);
+ assert_isp_power_gated(i915);
} else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
}
@@ -4162,7 +4198,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
intel_power_domains_verify_state(i915);
}
- if (IS_ICELAKE(i915))
+ if (INTEL_GEN(i915) >= 11)
icl_display_core_uninit(i915);
else if (IS_CANNONLAKE(i915))
cnl_display_core_uninit(i915);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7b0884ba5a5..0e3d91d9ef13 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -25,16 +25,23 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
+
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_hdmi.h"
+#include "intel_panel.h"
+#include "intel_sdvo.h"
#include "intel_sdvo_regs.h"
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
@@ -978,34 +985,109 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1);
}
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned int if_index,
+ u8 *data, unsigned int length)
{
+ u8 set_buf_index[2] = { if_index, 0 };
+ u8 hbuf_size, tx_rate, av_split;
+ int i;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_HBUF_AV_SPLIT,
+ &av_split, 1))
+ return -ENXIO;
+
+ if (av_split < if_index)
+ return 0;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_HBUF_TXRATE,
+ &tx_rate, 1))
+ return -ENXIO;
+
+ if (tx_rate == SDVO_HBUF_TX_DISABLED)
+ return 0;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return -ENXIO;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ &hbuf_size, 1))
+ return -ENXIO;
+
+ /* Buffer size is 0 based, hooray! */
+ hbuf_size++;
+
+ DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ hbuf_size = min_t(unsigned int, length, hbuf_size);
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HBUF_DATA, NULL, 0))
+ return -ENXIO;
+ if (!intel_sdvo_read_response(intel_sdvo, &data[i],
+ min_t(unsigned int, 8, hbuf_size - i)))
+ return -ENXIO;
+ }
+
+ return hbuf_size;
+}
+
+static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
- u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
- union hdmi_infoframe frame;
+ &crtc_state->base.adjusted_mode;
int ret;
- ssize_t len;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ if (!crtc_state->has_hdmi_sink)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame,
conn_state->connector,
adjusted_mode);
- if (ret < 0) {
- DRM_ERROR("couldn't fill AVI infoframe\n");
+ if (ret)
return false;
- }
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ drm_hdmi_avi_infoframe_quant_range(frame,
conn_state->connector,
adjusted_mode,
- pipe_config->limited_color_range ?
+ crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
- if (len < 0)
+ ret = hdmi_avi_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
+}
+
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ const union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+ ssize_t len;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0)
+ return true;
+
+ if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
+ return false;
+
+ len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data));
+ if (WARN_ON(len < 0))
return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
@@ -1013,6 +1095,40 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
sdvo_data, sizeof(sdvo_data));
}
+static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state)
+{
+ u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+ ssize_t len;
+ int ret;
+
+ if (!crtc_state->has_hdmi_sink)
+ return;
+
+ len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ sdvo_data, sizeof(sdvo_data));
+ if (len < 0) {
+ DRM_DEBUG_KMS("failed to read AVI infoframe\n");
+ return;
+ } else if (len == 0) {
+ return;
+ }
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = hdmi_infoframe_unpack(frame, sdvo_data, sizeof(sdvo_data));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
+ return;
+ }
+
+ if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI)
+ DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
+}
+
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
@@ -1193,6 +1309,12 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (intel_sdvo_connector->is_hdmi)
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+ if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
+ pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad AVI infoframe\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1315,8 +1437,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
- intel_sdvo_set_avi_infoframe(intel_sdvo,
- crtc_state, conn_state);
+ intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1507,6 +1628,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
}
+ WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+ "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+ pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -1519,9 +1644,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
}
- WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
- "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
- pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+ intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
}
static void intel_disable_sdvo(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.h b/drivers/gpu/drm/i915/intel_sdvo.h
new file mode 100644
index 000000000000..c9e05bcdd141
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sdvo.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SDVO_H__
+#define __INTEL_SDVO_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_reg.h"
+
+struct drm_i915_private;
+enum pipe;
+
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe);
+bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, enum port port);
+
+#endif /* __INTEL_SDVO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 75c872bb8cc9..57de41b1f989 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -51,7 +51,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
@@ -63,7 +63,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
@@ -208,7 +208,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
@@ -224,7 +224,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT,
SBI_BUSY,
0,
@@ -248,7 +248,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
@@ -264,7 +264,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT,
SBI_BUSY,
0,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b56a1a9ad01d..2913e89280d7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -29,17 +29,36 @@
* registers; newer ones are much simpler and we can use the new DRM plane
* support.
*/
+
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_rect.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
+#include <drm/drm_rect.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
-#include <drm/drm_color_mgmt.h>
+#include "intel_atomic_plane.h"
+#include "intel_drv.h"
+#include "intel_frontbuffer.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+bool is_planar_yuv_format(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -256,7 +275,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_rect *src = &plane_state->base.src;
- u32 src_x, src_y, src_w, src_h;
+ u32 src_x, src_y, src_w, src_h, hsub, vsub;
+ bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -274,18 +294,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
- if (fb->format->is_yuv &&
- (src_x & 1 || src_w & 1)) {
- DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
- src_x, src_w);
+ if (!fb->format->is_yuv)
+ return 0;
+
+ /* YUV specific checks */
+ if (!rotated) {
+ hsub = fb->format->hsub;
+ vsub = fb->format->vsub;
+ } else {
+ hsub = vsub = max(fb->format->hsub, fb->format->vsub);
+ }
+
+ if (src_x % hsub || src_w % hsub) {
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
+ src_x, src_w, hsub, rotated ? "rotated " : "");
return -EINVAL;
}
- if (fb->format->is_yuv &&
- fb->format->num_planes > 1 &&
- (src_y & 1 || src_h & 1)) {
- DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
- src_y, src_h);
+ if (src_y % vsub || src_h % vsub) {
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
+ src_y, src_h, vsub, rotated ? "rotated " : "");
return -EINVAL;
}
@@ -335,8 +363,8 @@ skl_program_scaler(struct intel_plane *plane,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
- !icl_is_hdr_plane(plane)) {
+ if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
+ !icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -518,7 +546,7 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- if (icl_is_hdr_plane(plane)) {
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
u32 cus_ctl = 0;
if (linked) {
@@ -542,7 +570,7 @@ skl_program_plane(struct intel_plane *plane,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
- if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+ if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
skl_write_plane_wm(plane, crtc_state);
@@ -609,6 +637,9 @@ skl_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
+
skl_write_plane_wm(plane, crtc_state);
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
@@ -741,7 +772,12 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- return SP_GAMMA_ENABLE;
+ u32 sprctl = 0;
+
+ if (crtc_state->gamma_enable)
+ sprctl |= SP_GAMMA_ENABLE;
+
+ return sprctl;
}
static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
@@ -916,12 +952,12 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 sprctl = 0;
- sprctl |= SPRITE_GAMMA_ENABLE;
+ if (crtc_state->gamma_enable)
+ sprctl |= SPRITE_GAMMA_ENABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (crtc_state->csc_enable)
sprctl |= SPRITE_PIPE_CSC_ENABLE;
return sprctl;
@@ -1107,7 +1143,15 @@ g4x_sprite_max_stride(struct intel_plane *plane,
static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- return DVS_GAMMA_ENABLE;
+ u32 dvscntr = 0;
+
+ if (crtc_state->gamma_enable)
+ dvscntr |= DVS_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ dvscntr |= DVS_PIPE_CSC_ENABLE;
+
+ return dvscntr;
}
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
@@ -1482,8 +1526,6 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
/*
* 90/270 is not allowed with RGB64 16:16:16:16 and
* Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
- * TBD: Add RGB64 case once its added in supported format
- * list.
*/
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
@@ -1491,6 +1533,15 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
break;
/* fall through */
case DRM_FORMAT_C8:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(fb->format->format,
&format_name));
@@ -1551,10 +1602,10 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
/* Display WA #1106 */
- if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
+ if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
- DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
+ DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
return -EINVAL;
}
@@ -1790,6 +1841,52 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static const u32 icl_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ARGB16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -1806,6 +1903,79 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_NV12,
};
+static const u32 glk_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+};
+
+static const u32 icl_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ARGB16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
static const u64 skl_plane_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
@@ -1945,10 +2115,23 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_XVYU2101010:
if (modifier == I915_FORMAT_MOD_Yf_TILED)
return true;
/* fall through */
case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED)
@@ -2085,8 +2268,25 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->update_slave = icl_update_slave;
if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- formats = skl_planar_formats;
- num_formats = ARRAY_SIZE(skl_planar_formats);
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ formats = icl_hdr_planar_formats;
+ num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ formats = icl_planar_formats;
+ num_formats = ARRAY_SIZE(icl_planar_formats);
+ } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
+ formats = glk_planar_formats;
+ num_formats = ARRAY_SIZE(glk_planar_formats);
+ } else {
+ formats = skl_planar_formats;
+ num_formats = ARRAY_SIZE(skl_planar_formats);
+ }
+ } else if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ formats = icl_hdr_plane_formats;
+ num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ formats = icl_plane_formats;
+ num_formats = ARRAY_SIZE(icl_plane_formats);
} else {
formats = skl_plane_formats;
num_formats = ARRAY_SIZE(skl_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_sprite.h b/drivers/gpu/drm/i915/intel_sprite.h
new file mode 100644
index 000000000000..84be8686be16
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sprite.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SPRITE_H__
+#define __INTEL_SPRITE_H__
+
+#include <linux/types.h>
+
+#include "i915_drv.h"
+#include "intel_display.h"
+
+struct drm_device;
+struct drm_display_mode;
+struct drm_file;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+bool is_planar_yuv_format(u32 pixelformat);
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs);
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane);
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
+int intel_plane_check_stride(const struct intel_plane_state *plane_state);
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
+int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
+
+static inline bool icl_is_nv12_y_plane(enum plane_id id)
+{
+ /* Don't need to do a gen check, these planes are only available on gen11 */
+ if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
+ return true;
+
+ return false;
+}
+
+static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ if (INTEL_GEN(dev_priv) < 11)
+ return false;
+
+ return plane_id < PLANE_SPRITE2;
+}
+
+#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 3924c4944e1f..5dbba33f4202 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -33,9 +33,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_tv.h"
enum tv_margin {
TV_MARGIN_LEFT, TV_MARGIN_TOP,
diff --git a/drivers/gpu/drm/i915/intel_tv.h b/drivers/gpu/drm/i915/intel_tv.h
new file mode 100644
index 000000000000..44518575ec5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_tv.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_TV_H__
+#define __INTEL_TV_H__
+
+struct drm_i915_private;
+
+void intel_tv_init(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_TV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index e711eb3268bc..25b80ffe71ad 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -332,8 +332,6 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
GEM_BUG_ON(!HAS_GUC(i915));
- guc_disable_communication(guc);
-
intel_huc_sanitize(huc);
intel_guc_sanitize(guc);
@@ -377,7 +375,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
- if (ret == 0 || ret != -ETIMEDOUT)
+ if (ret == 0)
break;
DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
@@ -451,6 +449,23 @@ void intel_uc_fini_hw(struct drm_i915_private *i915)
guc_disable_communication(guc);
}
+/**
+ * intel_uc_reset_prepare - Prepare for reset
+ * @i915: device private
+ *
+ * Preparing for full gpu reset.
+ */
+void intel_uc_reset_prepare(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+
+ if (!USES_GUC(i915))
+ return;
+
+ guc_disable_communication(guc);
+ intel_uc_sanitize(i915);
+}
+
int intel_uc_suspend(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
@@ -468,7 +483,7 @@ int intel_uc_suspend(struct drm_i915_private *i915)
return err;
}
- gen9_disable_guc_interrupts(i915);
+ guc_disable_communication(guc);
return 0;
}
@@ -484,7 +499,7 @@ int intel_uc_resume(struct drm_i915_private *i915)
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
- gen9_enable_guc_interrupts(i915);
+ guc_enable_communication(guc);
err = intel_guc_resume(guc);
if (err) {
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 870faf9011b9..c14729786652 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -38,6 +38,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_uc_init(struct drm_i915_private *dev_priv);
void intel_uc_fini(struct drm_i915_private *dev_priv);
+void intel_uc_reset_prepare(struct drm_i915_private *i915);
int intel_uc_suspend(struct drm_i915_private *dev_priv);
int intel_uc_resume(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 75646a1e0051..d1d51e1121e2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -21,17 +21,18 @@
* IN THE SOFTWARE.
*/
+#include <linux/pm_runtime.h>
+#include <asm/iosf_mbi.h>
+
#include "i915_drv.h"
-#include "intel_drv.h"
#include "i915_vgpu.h"
-
-#include <asm/iosf_mbi.h>
-#include <linux/pm_runtime.h>
+#include "intel_drv.h"
+#include "intel_pm.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
#define GT_FIFO_TIMEOUT_MS 10
-#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
+#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
static const char * const forcewake_domain_names[] = {
"render",
@@ -58,16 +59,20 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
return "unknown";
}
+#define fw_ack(d) readl((d)->reg_ack)
+#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
+#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
+
static inline void
-fw_domain_reset(struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{
/*
* We don't really know if the powerwell for the forcewake domain we are
* trying to reset here does exist at this point (engines could be fused
* off in ICL+), so no waiting for acks
*/
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
+ /* WaRsClearFWBitsAtReset:bdw,skl */
+ fw_clear(d, 0xffff);
}
static inline void
@@ -81,36 +86,32 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
}
static inline int
-__wait_for_ack(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
const u32 ack,
const u32 value)
{
- return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
+ return wait_for_atomic((fw_ack(d) & ack) == value,
FORCEWAKE_ACK_TIMEOUT_MS);
}
static inline int
-wait_ack_clear(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
const u32 ack)
{
- return __wait_for_ack(i915, d, ack, 0);
+ return __wait_for_ack(d, ack, 0);
}
static inline int
-wait_ack_set(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+wait_ack_set(const struct intel_uncore_forcewake_domain *d,
const u32 ack)
{
- return __wait_for_ack(i915, d, ack, ack);
+ return __wait_for_ack(d, ack, ack);
}
static inline void
-fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
+ if (wait_ack_clear(d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
@@ -121,8 +122,7 @@ enum ack_type {
};
static int
-fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
const enum ack_type type)
{
const u32 ack_bit = FORCEWAKE_KERNEL;
@@ -146,129 +146,122 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
pass = 1;
do {
- wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+ wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
- __raw_i915_write32(i915, d->reg_set,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
+ fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
/* Give gt some time to relax before the polling frenzy */
udelay(10 * pass);
- wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+ wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
- ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
+ ack_detected = (fw_ack(d) & ack_bit) == value;
- __raw_i915_write32(i915, d->reg_set,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
+ fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
} while (!ack_detected && pass++ < 10);
DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
intel_uncore_forcewake_domain_to_str(d->id),
type == ACK_SET ? "set" : "clear",
- __raw_i915_read32(i915, d->reg_ack),
+ fw_ack(d),
pass);
return ack_detected ? 0 : -ETIMEDOUT;
}
static inline void
-fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
{
- if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
+ if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
return;
- if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
- fw_domain_wait_ack_clear(i915, d);
+ if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
+ fw_domain_wait_ack_clear(d);
}
static inline void
-fw_domain_get(struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_get(const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
+ fw_set(d, FORCEWAKE_KERNEL);
}
static inline void
-fw_domain_wait_ack_set(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
+ if (wait_ack_set(d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
static inline void
-fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
{
- if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
+ if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
return;
- if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
- fw_domain_wait_ack_set(i915, d);
+ if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
+ fw_domain_wait_ack_set(d);
}
static inline void
-fw_domain_put(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_put(const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
+ fw_clear(d, FORCEWAKE_KERNEL);
}
static void
-fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
+fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
- fw_domain_wait_ack_clear(i915, d);
- fw_domain_get(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
+ fw_domain_wait_ack_clear(d);
+ fw_domain_get(d);
}
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_wait_ack_set(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_wait_ack_set(d);
- i915->uncore.fw_domains_active |= fw_domains;
+ uncore->fw_domains_active |= fw_domains;
}
static void
-fw_domains_get_with_fallback(struct drm_i915_private *i915,
+fw_domains_get_with_fallback(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
- fw_domain_wait_ack_clear_fallback(i915, d);
- fw_domain_get(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
+ fw_domain_wait_ack_clear_fallback(d);
+ fw_domain_get(d);
}
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_wait_ack_set_fallback(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_wait_ack_set_fallback(d);
- i915->uncore.fw_domains_active |= fw_domains;
+ uncore->fw_domains_active |= fw_domains;
}
static void
-fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
+fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_put(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_put(d);
- i915->uncore.fw_domains_active &= ~fw_domains;
+ uncore->fw_domains_active &= ~fw_domains;
}
static void
-fw_domains_reset(struct drm_i915_private *i915,
+fw_domains_reset(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
@@ -277,61 +270,61 @@ fw_domains_reset(struct drm_i915_private *i915,
if (!fw_domains)
return;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_reset(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_reset(d);
}
-static inline u32 gt_thread_status(struct drm_i915_private *dev_priv)
+static inline u32 gt_thread_status(struct intel_uncore *uncore)
{
u32 val;
- val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG);
+ val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
return val;
}
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
{
/*
* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000),
+ WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
"GT thread status wait timed out\n");
}
-static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
+static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- fw_domains_get(dev_priv, fw_domains);
+ fw_domains_get(uncore, fw_domains);
/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
+ __gen6_gt_wait_for_thread_c0(uncore);
}
-static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
+static inline u32 fifo_free_entries(struct intel_uncore *uncore)
{
- u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
+ u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
return count & GT_FIFO_FREE_ENTRIES_MASK;
}
-static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
{
u32 n;
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
- if (IS_VALLEYVIEW(dev_priv))
- n = fifo_free_entries(dev_priv);
+ if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
+ n = fifo_free_entries(uncore);
else
- n = dev_priv->uncore.fifo_count;
+ n = uncore->fifo_count;
if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
- if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
+ if (wait_for_atomic((n = fifo_free_entries(uncore)) >
GT_FIFO_NUM_RESERVED_ENTRIES,
GT_FIFO_TIMEOUT_MS)) {
DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
@@ -339,7 +332,7 @@ static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
}
}
- dev_priv->uncore.fifo_count = n - 1;
+ uncore->fifo_count = n - 1;
}
static enum hrtimer_restart
@@ -347,30 +340,29 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
- struct drm_i915_private *dev_priv =
- container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
+ struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
unsigned long irqflags;
- assert_rpm_device_not_suspended(dev_priv);
+ assert_rpm_device_not_suspended(uncore->rpm);
if (xchg(&domain->active, false))
return HRTIMER_RESTART;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
if (--domain->wake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
+ uncore->funcs.force_wake_put(uncore, domain->mask);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
return HRTIMER_NORESTART;
}
/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
static unsigned int
-intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
+intel_uncore_forcewake_reset(struct intel_uncore *uncore)
{
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
@@ -388,7 +380,7 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
active_domains = 0;
- for_each_fw_domain(domain, dev_priv, tmp) {
+ for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
if (hrtimer_cancel(&domain->timer) == 0)
continue;
@@ -396,9 +388,9 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
intel_uncore_fw_release_timer(&domain->timer);
}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
- for_each_fw_domain(domain, dev_priv, tmp) {
+ for_each_fw_domain(domain, uncore, tmp) {
if (hrtimer_active(&domain->timer))
active_domains |= domain->mask;
}
@@ -411,185 +403,134 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
break;
}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
cond_resched();
}
WARN_ON(active_domains);
- fw = dev_priv->uncore.fw_domains_active;
+ fw = uncore->fw_domains_active;
if (fw)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
+ uncore->funcs.force_wake_put(uncore, fw);
- fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
- assert_forcewakes_inactive(dev_priv);
+ fw_domains_reset(uncore, uncore->fw_domains);
+ assert_forcewakes_inactive(uncore);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
return fw; /* track the lost user forcewake domains */
}
-static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
-{
- const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- const unsigned int sets[4] = { 1, 1, 2, 2 };
- const u32 cap = dev_priv->edram_cap;
-
- return EDRAM_NUM_BANKS(cap) *
- ways[EDRAM_WAYS_IDX(cap)] *
- sets[EDRAM_SETS_IDX(cap)] *
- 1024 * 1024;
-}
-
-u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
-{
- if (!HAS_EDRAM(dev_priv))
- return 0;
-
- /* The needed capability bits for size calculation
- * are not there with pre gen9 so return 128MB always.
- */
- if (INTEL_GEN(dev_priv) < 9)
- return 128 * 1024 * 1024;
-
- return gen9_edram_size(dev_priv);
-}
-
-static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv) ||
- INTEL_GEN(dev_priv) >= 9) {
- dev_priv->edram_cap = __raw_i915_read32(dev_priv,
- HSW_EDRAM_CAP);
-
- /* NB: We can't write IDICR yet because we do not have gt funcs
- * set up */
- } else {
- dev_priv->edram_cap = 0;
- }
-
- if (HAS_EDRAM(dev_priv))
- DRM_INFO("Found %lluMB of eDRAM\n",
- intel_uncore_edram_size(dev_priv) / (1024 * 1024));
-}
-
static bool
-fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
u32 dbg;
- dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
+ dbg = __raw_uncore_read32(uncore, FPGA_DBG);
if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
return false;
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
return true;
}
static bool
-vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
u32 cer;
- cer = __raw_i915_read32(dev_priv, CLAIM_ER);
+ cer = __raw_uncore_read32(uncore, CLAIM_ER);
if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
return false;
- __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
+ __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
return true;
}
static bool
-gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
+gen6_check_for_fifo_debug(struct intel_uncore *uncore)
{
u32 fifodbg;
- fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+ fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
if (unlikely(fifodbg)) {
DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
- __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
+ __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
}
return fifodbg;
}
static bool
-check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
bool ret = false;
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
- ret |= fpga_check_for_unclaimed_mmio(dev_priv);
+ if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
+ ret |= fpga_check_for_unclaimed_mmio(uncore);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret |= vlv_check_for_unclaimed_mmio(dev_priv);
+ if (intel_uncore_has_dbg_unclaimed(uncore))
+ ret |= vlv_check_for_unclaimed_mmio(uncore);
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- ret |= gen6_check_for_fifo_debug(dev_priv);
+ if (intel_uncore_has_fifo(uncore))
+ ret |= gen6_check_for_fifo_debug(uncore);
return ret;
}
-static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
unsigned int restore_forcewake)
{
/* clear out unclaimed reg detection bit */
- if (check_for_unclaimed_mmio(dev_priv))
+ if (check_for_unclaimed_mmio(uncore))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* WaDisableShadowRegForCpd:chv */
- if (IS_CHERRYVIEW(dev_priv)) {
- __raw_i915_write32(dev_priv, GTFIFOCTL,
- __raw_i915_read32(dev_priv, GTFIFOCTL) |
- GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
- GT_FIFO_CTL_RC6_POLICY_STALL);
+ if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
+ __raw_uncore_write32(uncore, GTFIFOCTL,
+ __raw_uncore_read32(uncore, GTFIFOCTL) |
+ GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
+ GT_FIFO_CTL_RC6_POLICY_STALL);
}
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv);
+ intel_uncore_forcewake_reset(uncore);
if (restore_forcewake) {
- spin_lock_irq(&dev_priv->uncore.lock);
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- restore_forcewake);
-
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- dev_priv->uncore.fifo_count =
- fifo_free_entries(dev_priv);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_lock_irq(&uncore->lock);
+ uncore->funcs.force_wake_get(uncore, restore_forcewake);
+
+ if (intel_uncore_has_fifo(uncore))
+ uncore->fifo_count = fifo_free_entries(uncore);
+ spin_unlock_irq(&uncore->lock);
}
iosf_mbi_punit_release();
}
-void intel_uncore_suspend(struct drm_i915_private *dev_priv)
+void intel_uncore_suspend(struct intel_uncore *uncore)
{
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
- &dev_priv->uncore.pmic_bus_access_nb);
- dev_priv->uncore.fw_domains_saved =
- intel_uncore_forcewake_reset(dev_priv);
+ &uncore->pmic_bus_access_nb);
+ uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
}
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
+void intel_uncore_resume_early(struct intel_uncore *uncore)
{
unsigned int restore_forcewake;
- restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
- __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+ restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
+ __intel_uncore_early_sanitize(uncore, restore_forcewake);
- iosf_mbi_register_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
- i915_check_and_clear_faults(dev_priv);
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+void intel_uncore_runtime_resume(struct intel_uncore *uncore)
{
- iosf_mbi_register_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
@@ -598,15 +539,15 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
intel_sanitize_gt_powersave(dev_priv);
}
-static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= uncore->fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
if (domain->wake_count++) {
fw_domains &= ~domain->mask;
domain->active = true;
@@ -614,12 +555,12 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
}
if (fw_domains)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ uncore->funcs.force_wake_get(uncore, fw_domains);
}
/**
* intel_uncore_forcewake_get - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
*
* This function can be used get GT's forcewake domain references.
@@ -630,100 +571,100 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
* call to intel_unforce_forcewake_put(). Usually caller wants all the domains
* to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
*/
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- assert_rpm_wakelock_held(dev_priv);
+ __assert_rpm_wakelock_held(uncore->rpm);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- __intel_uncore_forcewake_get(dev_priv, fw_domains);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
+ __intel_uncore_forcewake_get(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
}
/**
* intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
*
* This function is a wrapper around intel_uncore_forcewake_get() to acquire
* the GT powerwell and in the process disable our debugging for the
* duration of userspace's bypass.
*/
-void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
+void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
{
- spin_lock_irq(&dev_priv->uncore.lock);
- if (!dev_priv->uncore.user_forcewake.count++) {
- intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+ spin_lock_irq(&uncore->lock);
+ if (!uncore->user_forcewake.count++) {
+ intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
/* Save and disable mmio debugging for the user bypass */
- dev_priv->uncore.user_forcewake.saved_mmio_check =
- dev_priv->uncore.unclaimed_mmio_check;
- dev_priv->uncore.user_forcewake.saved_mmio_debug =
+ uncore->user_forcewake.saved_mmio_check =
+ uncore->unclaimed_mmio_check;
+ uncore->user_forcewake.saved_mmio_debug =
i915_modparams.mmio_debug;
- dev_priv->uncore.unclaimed_mmio_check = 0;
+ uncore->unclaimed_mmio_check = 0;
i915_modparams.mmio_debug = 0;
}
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irq(&uncore->lock);
}
/**
* intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
*
* This function complements intel_uncore_forcewake_user_get() and releases
* the GT powerwell taken on behalf of the userspace bypass.
*/
-void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
+void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{
- spin_lock_irq(&dev_priv->uncore.lock);
- if (!--dev_priv->uncore.user_forcewake.count) {
- if (intel_uncore_unclaimed_mmio(dev_priv))
- dev_info(dev_priv->drm.dev,
+ spin_lock_irq(&uncore->lock);
+ if (!--uncore->user_forcewake.count) {
+ if (intel_uncore_unclaimed_mmio(uncore))
+ dev_info(uncore_to_i915(uncore)->drm.dev,
"Invalid mmio detected during user access\n");
- dev_priv->uncore.unclaimed_mmio_check =
- dev_priv->uncore.user_forcewake.saved_mmio_check;
+ uncore->unclaimed_mmio_check =
+ uncore->user_forcewake.saved_mmio_check;
i915_modparams.mmio_debug =
- dev_priv->uncore.user_forcewake.saved_mmio_debug;
+ uncore->user_forcewake.saved_mmio_debug;
- intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
}
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irq(&uncore->lock);
}
/**
* intel_uncore_forcewake_get__locked - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
*
* See intel_uncore_forcewake_get(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
*/
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- lockdep_assert_held(&dev_priv->uncore.lock);
+ lockdep_assert_held(&uncore->lock);
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- __intel_uncore_forcewake_get(dev_priv, fw_domains);
+ __intel_uncore_forcewake_get(uncore, fw_domains);
}
-static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= uncore->fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
if (WARN_ON(domain->wake_count == 0))
continue;
@@ -738,66 +679,66 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
/**
* intel_uncore_forcewake_put - release a forcewake domain reference
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to put references
*
* This function drops the device-level forcewakes for specified
* domains obtained by intel_uncore_forcewake_get().
*/
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- if (!dev_priv->uncore.funcs.force_wake_put)
+ if (!uncore->funcs.force_wake_put)
return;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- __intel_uncore_forcewake_put(dev_priv, fw_domains);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
+ __intel_uncore_forcewake_put(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
}
/**
* intel_uncore_forcewake_put__locked - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
*
* See intel_uncore_forcewake_put(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
*/
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- lockdep_assert_held(&dev_priv->uncore.lock);
+ lockdep_assert_held(&uncore->lock);
- if (!dev_priv->uncore.funcs.force_wake_put)
+ if (!uncore->funcs.force_wake_put)
return;
- __intel_uncore_forcewake_put(dev_priv, fw_domains);
+ __intel_uncore_forcewake_put(uncore, fw_domains);
}
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
+void assert_forcewakes_inactive(struct intel_uncore *uncore)
{
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- WARN(dev_priv->uncore.fw_domains_active,
+ WARN(uncore->fw_domains_active,
"Expected all fw_domains to be inactive, but %08x are still on\n",
- dev_priv->uncore.fw_domains_active);
+ uncore->fw_domains_active);
}
-void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- assert_rpm_wakelock_held(dev_priv);
+ __assert_rpm_wakelock_held(uncore->rpm);
- fw_domains &= dev_priv->uncore.fw_domains;
- WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
+ fw_domains &= uncore->fw_domains;
+ WARN(fw_domains & ~uncore->fw_domains_active,
"Expected %08x fw_domains to be active, but %08x are off\n",
- fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
+ fw_domains, fw_domains & ~uncore->fw_domains_active);
}
/* We give fast paths for the really cool registers */
@@ -806,7 +747,7 @@ void assert_forcewakes_active(struct drm_i915_private *dev_priv,
#define GEN11_NEEDS_FORCE_WAKE(reg) \
((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
-#define __gen6_reg_read_fw_domains(offset) \
+#define __gen6_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
if (NEEDS_FORCE_WAKE(offset)) \
@@ -846,13 +787,13 @@ static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
})
static enum forcewake_domains
-find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
+find_fw_domain(struct intel_uncore *uncore, u32 offset)
{
const struct intel_forcewake_range *entry;
entry = BSEARCH(offset,
- dev_priv->uncore.fw_domains_table,
- dev_priv->uncore.fw_domains_table_entries,
+ uncore->fw_domains_table,
+ uncore->fw_domains_table_entries,
fw_range_cmp);
if (!entry)
@@ -864,11 +805,11 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
* translate it here to the list of available domains.
*/
if (entry->domains == FORCEWAKE_ALL)
- return dev_priv->uncore.fw_domains;
+ return uncore->fw_domains;
- WARN(entry->domains & ~dev_priv->uncore.fw_domains,
+ WARN(entry->domains & ~uncore->fw_domains,
"Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
- entry->domains & ~dev_priv->uncore.fw_domains, offset);
+ entry->domains & ~uncore->fw_domains, offset);
return entry->domains;
}
@@ -892,19 +833,19 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
-#define __fwtable_reg_read_fw_domains(offset) \
+#define __fwtable_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
-#define __gen11_fwtable_reg_read_fw_domains(offset) \
+#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (GEN11_NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
@@ -956,7 +897,7 @@ static bool is_gen##x##_shadowed(u32 offset) \
__is_genX_shadowed(8)
__is_genX_shadowed(11)
-#define __gen8_reg_write_fw_domains(offset) \
+#define __gen8_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
@@ -986,19 +927,19 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
};
-#define __fwtable_reg_write_fw_domains(offset) \
+#define __fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
-#define __gen11_fwtable_reg_write_fw_domains(offset) \
+#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
@@ -1073,21 +1014,21 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
};
static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
+ilk_dummy_write(struct intel_uncore *uncore)
{
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
- __raw_i915_write32(dev_priv, MI_MODE, 0);
+ __raw_uncore_write32(uncore, MI_MODE, 0);
}
static void
-__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+__unclaimed_reg_debug(struct intel_uncore *uncore,
const i915_reg_t reg,
const bool read,
const bool before)
{
- if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
+ if (WARN(check_for_unclaimed_mmio(uncore) && !before,
"Unclaimed %s register 0x%x\n",
read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
@@ -1096,7 +1037,7 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
}
static inline void
-unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+unclaimed_reg_debug(struct intel_uncore *uncore,
const i915_reg_t reg,
const bool read,
const bool before)
@@ -1104,12 +1045,12 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
if (likely(!i915_modparams.mmio_debug))
return;
- __unclaimed_reg_debug(dev_priv, reg, read, before);
+ __unclaimed_reg_debug(uncore, reg, read, before);
}
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
- assert_rpm_wakelock_held(dev_priv);
+ __assert_rpm_wakelock_held(uncore->rpm);
#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
@@ -1117,18 +1058,18 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
#define __gen2_read(x) \
static u##x \
-gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
GEN2_READ_HEADER(x); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ val = __raw_uncore_read##x(uncore, reg); \
GEN2_READ_FOOTER; \
}
#define __gen5_read(x) \
static u##x \
-gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
GEN2_READ_HEADER(x); \
- ilk_dummy_write(dev_priv); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ ilk_dummy_write(uncore); \
+ val = __raw_uncore_read##x(uncore, reg); \
GEN2_READ_FOOTER; \
}
@@ -1151,53 +1092,53 @@ __gen2_read(64)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
u##x val = 0; \
- assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
- unclaimed_reg_debug(dev_priv, reg, true, true)
+ __assert_rpm_wakelock_held(uncore->rpm); \
+ spin_lock_irqsave(&uncore->lock, irqflags); \
+ unclaimed_reg_debug(uncore, reg, true, true)
#define GEN6_READ_FOOTER \
- unclaimed_reg_debug(dev_priv, reg, true, false); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ unclaimed_reg_debug(uncore, reg, true, false); \
+ spin_unlock_irqrestore(&uncore->lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
+static noinline void ___force_wake_auto(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
fw_domain_arm_timer(domain);
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ uncore->funcs.force_wake_get(uncore, fw_domains);
}
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+static inline void __force_wake_auto(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
if (WARN_ON(!fw_domains))
return;
/* Turn on all requested but inactive supported forcewake domains. */
- fw_domains &= dev_priv->uncore.fw_domains;
- fw_domains &= ~dev_priv->uncore.fw_domains_active;
+ fw_domains &= uncore->fw_domains;
+ fw_domains &= ~uncore->fw_domains_active;
if (fw_domains)
- ___force_wake_auto(dev_priv, fw_domains);
+ ___force_wake_auto(uncore, fw_domains);
}
#define __gen_read(func, x) \
static u##x \
-func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __##func##_reg_read_fw_domains(offset); \
+ fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ __force_wake_auto(uncore, fw_engine); \
+ val = __raw_uncore_read##x(uncore, reg); \
GEN6_READ_FOOTER; \
}
#define __gen6_read(x) __gen_read(gen6, x)
@@ -1225,24 +1166,24 @@ __gen6_read(64)
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- assert_rpm_wakelock_held(dev_priv); \
+ __assert_rpm_wakelock_held(uncore->rpm); \
#define GEN2_WRITE_FOOTER
#define __gen2_write(x) \
static void \
-gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
GEN2_WRITE_HEADER; \
- __raw_i915_write##x(dev_priv, reg, val); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN2_WRITE_FOOTER; \
}
#define __gen5_write(x) \
static void \
-gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
GEN2_WRITE_HEADER; \
- ilk_dummy_write(dev_priv); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ ilk_dummy_write(uncore); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN2_WRITE_FOOTER; \
}
@@ -1263,33 +1204,33 @@ __gen2_write(32)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
- unclaimed_reg_debug(dev_priv, reg, false, true)
+ __assert_rpm_wakelock_held(uncore->rpm); \
+ spin_lock_irqsave(&uncore->lock, irqflags); \
+ unclaimed_reg_debug(uncore, reg, false, true)
#define GEN6_WRITE_FOOTER \
- unclaimed_reg_debug(dev_priv, reg, false, false); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+ unclaimed_reg_debug(uncore, reg, false, false); \
+ spin_unlock_irqrestore(&uncore->lock, irqflags)
#define __gen6_write(x) \
static void \
-gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE(offset)) \
- __gen6_gt_wait_for_fifo(dev_priv); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ __gen6_gt_wait_for_fifo(uncore); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
#define __gen_write(func, x) \
static void \
-func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __##func##_reg_write_fw_domains(offset); \
+ fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ __force_wake_auto(uncore, fw_engine); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
#define __gen8_write(x) __gen_write(gen8, x)
@@ -1316,23 +1257,23 @@ __gen6_write(32)
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
-#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
do { \
- (i915)->uncore.funcs.mmio_writeb = x##_write8; \
- (i915)->uncore.funcs.mmio_writew = x##_write16; \
- (i915)->uncore.funcs.mmio_writel = x##_write32; \
+ (uncore)->funcs.mmio_writeb = x##_write8; \
+ (uncore)->funcs.mmio_writew = x##_write16; \
+ (uncore)->funcs.mmio_writel = x##_write32; \
} while (0)
-#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
do { \
- (i915)->uncore.funcs.mmio_readb = x##_read8; \
- (i915)->uncore.funcs.mmio_readw = x##_read16; \
- (i915)->uncore.funcs.mmio_readl = x##_read32; \
- (i915)->uncore.funcs.mmio_readq = x##_read64; \
+ (uncore)->funcs.mmio_readb = x##_read8; \
+ (uncore)->funcs.mmio_readw = x##_read16; \
+ (uncore)->funcs.mmio_readl = x##_read32; \
+ (uncore)->funcs.mmio_readq = x##_read64; \
} while (0)
-static void fw_domain_init(struct drm_i915_private *dev_priv,
+static void fw_domain_init(struct intel_uncore *uncore,
enum forcewake_domain_id domain_id,
i915_reg_t reg_set,
i915_reg_t reg_ack)
@@ -1342,7 +1283,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
return;
- d = &dev_priv->uncore.fw_domain[domain_id];
+ d = &uncore->fw_domain[domain_id];
WARN_ON(d->wake_count);
@@ -1350,8 +1291,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
WARN_ON(!i915_mmio_reg_valid(reg_ack));
d->wake_count = 0;
- d->reg_set = reg_set;
- d->reg_ack = reg_ack;
+ d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
+ d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
d->id = domain_id;
@@ -1371,12 +1312,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
d->timer.function = intel_uncore_fw_release_timer;
- dev_priv->uncore.fw_domains |= BIT(domain_id);
+ uncore->fw_domains |= BIT(domain_id);
- fw_domain_reset(dev_priv, d);
+ fw_domain_reset(d);
}
-static void fw_domain_fini(struct drm_i915_private *dev_priv,
+static void fw_domain_fini(struct intel_uncore *uncore,
enum forcewake_domain_id domain_id)
{
struct intel_uncore_forcewake_domain *d;
@@ -1384,85 +1325,76 @@ static void fw_domain_fini(struct drm_i915_private *dev_priv,
if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
return;
- d = &dev_priv->uncore.fw_domain[domain_id];
+ d = &uncore->fw_domain[domain_id];
WARN_ON(d->wake_count);
WARN_ON(hrtimer_cancel(&d->timer));
memset(d, 0, sizeof(*d));
- dev_priv->uncore.fw_domains &= ~BIT(domain_id);
+ uncore->fw_domains &= ~BIT(domain_id);
}
-static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
+static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
- if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
- return;
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
- if (IS_GEN(dev_priv, 6)) {
- dev_priv->uncore.fw_reset = 0;
- dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
- dev_priv->uncore.fw_clear = 0;
- } else {
- /* WaRsClearFWBitsAtReset:bdw,skl */
- dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
- dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
- dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
- }
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 11) {
int i;
- dev_priv->uncore.funcs.force_wake_get =
+ uncore->funcs.force_wake_get =
fw_domains_get_with_fallback;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ if (!HAS_ENGINE(i915, _VCS(i)))
continue;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
FORCEWAKE_MEDIA_VDBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
}
for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ if (!HAS_ENGINE(i915, _VECS(i)))
continue;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
- dev_priv->uncore.funcs.force_wake_get =
+ } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ uncore->funcs.force_wake_get =
fw_domains_get_with_fallback;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ uncore->funcs.force_wake_get = fw_domains_get;
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- dev_priv->uncore.funcs.force_wake_get =
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
- } else if (IS_IVYBRIDGE(dev_priv)) {
+ } else if (IS_IVYBRIDGE(i915)) {
u32 ecobus;
/* IVB configs may use multi-threaded forcewake */
@@ -1474,9 +1406,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
- dev_priv->uncore.funcs.force_wake_get =
+ uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ uncore->funcs.force_wake_put = fw_domains_put;
/* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is
@@ -1485,41 +1417,41 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
* before the ecobus check.
*/
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
- __raw_posting_read(dev_priv, ECOBUS);
+ __raw_uncore_write32(uncore, FORCEWAKE, 0);
+ __raw_posting_read(uncore, ECOBUS);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
- spin_lock_irq(&dev_priv->uncore.lock);
- fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
- ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- fw_domains_put(dev_priv, FORCEWAKE_RENDER);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_lock_irq(&uncore->lock);
+ fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
+ ecobus = __raw_uncore_read32(uncore, ECOBUS);
+ fw_domains_put(uncore, FORCEWAKE_RENDER);
+ spin_unlock_irq(&uncore->lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN(dev_priv, 6)) {
- dev_priv->uncore.funcs.force_wake_get =
+ } else if (IS_GEN(i915, 6)) {
+ uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
/* All future platforms are expected to require complex power gating */
- WARN_ON(dev_priv->uncore.fw_domains == 0);
+ WARN_ON(uncore->fw_domains == 0);
}
-#define ASSIGN_FW_DOMAINS_TABLE(d) \
+#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
{ \
- dev_priv->uncore.fw_domains_table = \
+ (uncore)->fw_domains_table = \
(struct intel_forcewake_range *)(d); \
- dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+ (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
}
static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
@@ -1544,66 +1476,132 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
* the access.
*/
disable_rpm_wakeref_asserts(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
enable_rpm_wakeref_asserts(dev_priv);
break;
case MBI_PMIC_BUS_ACCESS_END:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
break;
}
return NOTIFY_OK;
}
-void intel_uncore_init(struct drm_i915_private *dev_priv)
+static int uncore_mmio_setup(struct intel_uncore *uncore)
{
- i915_check_vgpu(dev_priv);
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct pci_dev *pdev = i915->drm.pdev;
+ int mmio_bar;
+ int mmio_size;
- intel_uncore_edram_detect(dev_priv);
- intel_uncore_fw_domains_init(dev_priv);
- __intel_uncore_early_sanitize(dev_priv, 0);
+ mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
+ /*
+ * Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (INTEL_GEN(i915) < 5)
+ mmio_size = 512 * 1024;
+ else
+ mmio_size = 2 * 1024 * 1024;
+ uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
+ if (uncore->regs == NULL) {
+ DRM_ERROR("failed to map registers\n");
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void uncore_mmio_cleanup(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ pci_iounmap(pdev, uncore->regs);
+}
+
+void intel_uncore_init_early(struct intel_uncore *uncore)
+{
+ spin_lock_init(&uncore->lock);
+}
+
+int intel_uncore_init_mmio(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ int ret;
+
+ ret = uncore_mmio_setup(uncore);
+ if (ret)
+ return ret;
+
+ i915_check_vgpu(i915);
+
+ if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
+ uncore->flags |= UNCORE_HAS_FORCEWAKE;
- dev_priv->uncore.unclaimed_mmio_check = 1;
- dev_priv->uncore.pmic_bus_access_nb.notifier_call =
+ intel_uncore_fw_domains_init(uncore);
+ __intel_uncore_early_sanitize(uncore, 0);
+
+ uncore->unclaimed_mmio_check = 1;
+ uncore->pmic_bus_access_nb.notifier_call =
i915_pmic_bus_access_notifier;
- if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
- } else if (IS_GEN(dev_priv, 5)) {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
- } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
-
- if (IS_VALLEYVIEW(dev_priv)) {
- ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ uncore->rpm = &i915->runtime_pm;
+
+ if (!intel_uncore_has_forcewake(uncore)) {
+ if (IS_GEN(i915, 5)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
+ } else {
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
+ }
+ } else if (IS_GEN_RANGE(i915, 6, 7)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
+
+ if (IS_VALLEYVIEW(i915)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else {
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN(dev_priv, 8)) {
- if (IS_CHERRYVIEW(dev_priv)) {
- ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ } else if (IS_GEN(i915, 8)) {
+ if (IS_CHERRYVIEW(i915)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
- ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else {
- ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
}
- iosf_mbi_register_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
+ if (HAS_FPGA_DBG_UNCLAIMED(i915))
+ uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
+
+ if (IS_GEN_RANGE(i915, 6, 7))
+ uncore->flags |= UNCORE_HAS_FIFO;
+
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+
+ return 0;
}
/*
@@ -1611,45 +1609,48 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
* the forcewake domains. Prune them, to make sure they only reference existing
* engines.
*/
-void intel_uncore_prune(struct drm_i915_private *dev_priv)
+void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
{
- if (INTEL_GEN(dev_priv) >= 11) {
- enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
+ if (INTEL_GEN(i915) >= 11) {
+ enum forcewake_domains fw_domains = uncore->fw_domains;
enum forcewake_domain_id domain_id;
int i;
for (i = 0; i < I915_MAX_VCS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
- if (HAS_ENGINE(dev_priv, _VCS(i)))
+ if (HAS_ENGINE(i915, _VCS(i)))
continue;
if (fw_domains & BIT(domain_id))
- fw_domain_fini(dev_priv, domain_id);
+ fw_domain_fini(uncore, domain_id);
}
for (i = 0; i < I915_MAX_VECS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
- if (HAS_ENGINE(dev_priv, _VECS(i)))
+ if (HAS_ENGINE(i915, _VECS(i)))
continue;
if (fw_domains & BIT(domain_id))
- fw_domain_fini(dev_priv, domain_id);
+ fw_domain_fini(uncore, domain_id);
}
}
}
-void intel_uncore_fini(struct drm_i915_private *dev_priv)
+void intel_uncore_fini_mmio(struct intel_uncore *uncore)
{
/* Paranoia: make sure we have disabled everything before we exit. */
- intel_uncore_sanitize(dev_priv);
+ intel_uncore_sanitize(uncore_to_i915(uncore));
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
- &dev_priv->uncore.pmic_bus_access_nb);
- intel_uncore_forcewake_reset(dev_priv);
+ &uncore->pmic_bus_access_nb);
+ intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
+ uncore_mmio_cleanup(uncore);
}
static const struct reg_whitelist {
@@ -1717,7 +1718,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
/**
* __intel_wait_for_register_fw - wait until register matches expected state
- * @dev_priv: the i915 device
+ * @uncore: the struct intel_uncore
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
@@ -1741,7 +1742,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
@@ -1750,7 +1751,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
u32 *out_value)
{
u32 uninitialized_var(reg_value);
-#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
+#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
int ret;
/* Catch any overuse of this function */
@@ -1772,7 +1773,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
/**
* __intel_wait_for_register - wait until register matches expected state
- * @dev_priv: the i915 device
+ * @uncore: the struct intel_uncore
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
@@ -1789,33 +1790,34 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int __intel_wait_for_register(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- u32 mask,
- u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms,
- u32 *out_value)
+int __intel_wait_for_register(struct intel_uncore *uncore,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value)
{
unsigned fw =
- intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
u32 reg_value;
int ret;
might_sleep_if(slow_timeout_ms);
- spin_lock_irq(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, fw);
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw);
- ret = __intel_wait_for_register_fw(dev_priv,
+ ret = __intel_wait_for_register_fw(uncore,
reg, mask, value,
fast_timeout_us, 0, &reg_value);
- intel_uncore_forcewake_put__locked(dev_priv, fw);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_put__locked(uncore, fw);
+ spin_unlock_irq(&uncore->lock);
if (ret && slow_timeout_ms)
- ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
+ ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
+ reg),
(reg_value & mask) == value,
slow_timeout_ms * 1000, 10, 1000);
@@ -1828,82 +1830,90 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
return ret;
}
-bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
+bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{
- return check_for_unclaimed_mmio(dev_priv);
+ return check_for_unclaimed_mmio(uncore);
}
bool
-intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
+intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{
bool ret = false;
- spin_lock_irq(&dev_priv->uncore.lock);
+ spin_lock_irq(&uncore->lock);
- if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0))
+ if (unlikely(uncore->unclaimed_mmio_check <= 0))
goto out;
- if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
+ if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
if (!i915_modparams.mmio_debug) {
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
i915_modparams.mmio_debug++;
}
- dev_priv->uncore.unclaimed_mmio_check--;
+ uncore->unclaimed_mmio_check--;
ret = true;
}
out:
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irq(&uncore->lock);
return ret;
}
static enum forcewake_domains
-intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
i915_reg_t reg)
{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (INTEL_GEN(dev_priv) >= 11) {
- fw_domains = __gen11_fwtable_reg_read_fw_domains(offset);
- } else if (HAS_FWTABLE(dev_priv)) {
- fw_domains = __fwtable_reg_read_fw_domains(offset);
- } else if (INTEL_GEN(dev_priv) >= 6) {
- fw_domains = __gen6_reg_read_fw_domains(offset);
+ if (INTEL_GEN(i915) >= 11) {
+ fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
+ } else if (HAS_FWTABLE(i915)) {
+ fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
+ } else if (INTEL_GEN(i915) >= 6) {
+ fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
} else {
- WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
+ /* on devices with FW we expect to hit one of the above cases */
+ if (intel_uncore_has_forcewake(uncore))
+ MISSING_CASE(INTEL_GEN(i915));
+
fw_domains = 0;
}
- WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+ WARN_ON(fw_domains & ~uncore->fw_domains);
return fw_domains;
}
static enum forcewake_domains
-intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
i915_reg_t reg)
{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (INTEL_GEN(dev_priv) >= 11) {
- fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
- } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
- fw_domains = __fwtable_reg_write_fw_domains(offset);
- } else if (IS_GEN(dev_priv, 8)) {
- fw_domains = __gen8_reg_write_fw_domains(offset);
- } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
+ if (INTEL_GEN(i915) >= 11) {
+ fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
+ } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
+ fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
+ } else if (IS_GEN(i915, 8)) {
+ fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
+ } else if (IS_GEN_RANGE(i915, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
} else {
- WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
+ /* on devices with FW we expect to hit one of the above cases */
+ if (intel_uncore_has_forcewake(uncore))
+ MISSING_CASE(INTEL_GEN(i915));
+
fw_domains = 0;
}
- WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+ WARN_ON(fw_domains & ~uncore->fw_domains);
return fw_domains;
}
@@ -1911,7 +1921,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
/**
* intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
* a register
- * @dev_priv: pointer to struct drm_i915_private
+ * @uncore: pointer to struct intel_uncore
* @reg: register in question
* @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
*
@@ -1923,21 +1933,21 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
* callers to do FIFO management on their own or risk losing writes.
*/
enum forcewake_domains
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op)
{
enum forcewake_domains fw_domains = 0;
WARN_ON(!op);
- if (intel_vgpu_active(dev_priv))
+ if (!intel_uncore_has_forcewake(uncore))
return 0;
if (op & FW_REG_READ)
- fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
+ fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
if (op & FW_REG_WRITE)
- fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
+ fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
return fw_domains;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index e5e157d288de..d6af3de70121 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -28,10 +28,13 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/hrtimer.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "i915_reg.h"
struct drm_i915_private;
+struct i915_runtime_pm;
+struct intel_uncore;
enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
@@ -62,25 +65,25 @@ enum forcewake_domains {
};
struct intel_uncore_funcs {
- void (*force_wake_get)(struct drm_i915_private *dev_priv,
+ void (*force_wake_get)(struct intel_uncore *uncore,
enum forcewake_domains domains);
- void (*force_wake_put)(struct drm_i915_private *dev_priv,
+ void (*force_wake_put)(struct intel_uncore *uncore,
enum forcewake_domains domains);
- u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
+ u8 (*mmio_readb)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
+ u16 (*mmio_readw)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
+ u32 (*mmio_readl)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
+ u64 (*mmio_readq)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- void (*mmio_writeb)(struct drm_i915_private *dev_priv,
+ void (*mmio_writeb)(struct intel_uncore *uncore,
i915_reg_t r, u8 val, bool trace);
- void (*mmio_writew)(struct drm_i915_private *dev_priv,
+ void (*mmio_writew)(struct intel_uncore *uncore,
i915_reg_t r, u16 val, bool trace);
- void (*mmio_writel)(struct drm_i915_private *dev_priv,
+ void (*mmio_writel)(struct intel_uncore *uncore,
i915_reg_t r, u32 val, bool trace);
};
@@ -92,8 +95,18 @@ struct intel_forcewake_range {
};
struct intel_uncore {
+ void __iomem *regs;
+
+ struct i915_runtime_pm *rpm;
+
spinlock_t lock; /** lock is also taken in irq contexts. */
+ unsigned int flags;
+#define UNCORE_HAS_FORCEWAKE BIT(0)
+#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1)
+#define UNCORE_HAS_DBG_UNCLAIMED BIT(2)
+#define UNCORE_HAS_FIFO BIT(3)
+
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
@@ -106,18 +119,14 @@ struct intel_uncore {
enum forcewake_domains fw_domains_active;
enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
- u32 fw_set;
- u32 fw_clear;
- u32 fw_reset;
-
struct intel_uncore_forcewake_domain {
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned int wake_count;
bool active;
struct hrtimer timer;
- i915_reg_t reg_set;
- i915_reg_t reg_ack;
+ u32 __iomem *reg_set;
+ u32 __iomem *reg_ack;
} fw_domain[FW_DOMAIN_ID_COUNT];
struct {
@@ -131,86 +140,257 @@ struct intel_uncore {
};
/* Iterate over initialised fw domains */
-#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
+#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
for (tmp__ = (mask__); \
- tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+ tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+
+#define for_each_fw_domain(domain__, uncore__, tmp__) \
+ for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
+
+static inline struct intel_uncore *
+forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
+{
+ return container_of(d, struct intel_uncore, fw_domain[d->id]);
+}
-#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
- for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
+static inline bool
+intel_uncore_has_forcewake(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_FORCEWAKE;
+}
+static inline bool
+intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
+}
+
+static inline bool
+intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
+}
+
+static inline bool
+intel_uncore_has_fifo(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_FIFO;
+}
void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init(struct drm_i915_private *dev_priv);
-void intel_uncore_prune(struct drm_i915_private *dev_priv);
-bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
-bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-void intel_uncore_fini(struct drm_i915_private *dev_priv);
-void intel_uncore_suspend(struct drm_i915_private *dev_priv);
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
-
-u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+void intel_uncore_init_early(struct intel_uncore *uncore);
+int intel_uncore_init_mmio(struct intel_uncore *uncore);
+void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
+bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
+bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
+void intel_uncore_fini_mmio(struct intel_uncore *uncore);
+void intel_uncore_suspend(struct intel_uncore *uncore);
+void intel_uncore_resume_early(struct intel_uncore *uncore);
+void intel_uncore_runtime_resume(struct intel_uncore *uncore);
+
+void assert_forcewakes_inactive(struct intel_uncore *uncore);
+void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
enum forcewake_domains
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op);
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
/* Like above but the caller must manage the uncore.lock itself.
* Must be used with I915_READ_FW and friends.
*/
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
-void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
-void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
+void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
+void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
-int __intel_wait_for_register(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_ms,
u32 *out_value);
-static inline
-int intel_wait_for_register(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- u32 mask,
- u32 value,
- unsigned int timeout_ms)
+static inline int
+intel_wait_for_register(struct intel_uncore *uncore,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int timeout_ms)
{
- return __intel_wait_for_register(dev_priv, reg, mask, value, 2,
+ return __intel_wait_for_register(uncore, reg, mask, value, 2,
timeout_ms, NULL);
}
-int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+
+int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_ms,
u32 *out_value);
-static inline
-int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- u32 mask,
- u32 value,
+static inline int
+intel_wait_for_register_fw(struct intel_uncore *uncore,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
unsigned int timeout_ms)
{
- return __intel_wait_for_register_fw(dev_priv, reg, mask, value,
+ return __intel_wait_for_register_fw(uncore, reg, mask, value,
2, timeout_ms, NULL);
}
+/* register access functions */
+#define __raw_read(x__, s__) \
+static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
+ i915_reg_t reg) \
+{ \
+ return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
+}
+
+#define __raw_write(x__, s__) \
+static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
+ i915_reg_t reg, u##x__ val) \
+{ \
+ write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
+}
+__raw_read(8, b)
+__raw_read(16, w)
+__raw_read(32, l)
+__raw_read(64, q)
+
+__raw_write(8, b)
+__raw_write(16, w)
+__raw_write(32, l)
+__raw_write(64, q)
+
+#undef __raw_read
+#undef __raw_write
+
+#define __uncore_read(name__, x__, s__, trace__) \
+static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
+ i915_reg_t reg) \
+{ \
+ return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
+}
+
+#define __uncore_write(name__, x__, s__, trace__) \
+static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
+ i915_reg_t reg, u##x__ val) \
+{ \
+ uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
+}
+
+__uncore_read(read8, 8, b, true)
+__uncore_read(read16, 16, w, true)
+__uncore_read(read, 32, l, true)
+__uncore_read(read16_notrace, 16, w, false)
+__uncore_read(read_notrace, 32, l, false)
+
+__uncore_write(write8, 8, b, true)
+__uncore_write(write16, 16, w, true)
+__uncore_write(write, 32, l, true)
+__uncore_write(write_notrace, 32, l, false)
+
+/* Be very careful with read/write 64-bit values. On 32-bit machines, they
+ * will be implemented using 2 32-bit writes in an arbitrary order with
+ * an arbitrary delay between them. This can cause the hardware to
+ * act upon the intermediate value, possibly leading to corruption and
+ * machine death. For this reason we do not support I915_WRITE64, or
+ * uncore->funcs.mmio_writeq.
+ *
+ * When reading a 64-bit value as two 32-bit values, the delay may cause
+ * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+ * occasionally a 64-bit register does not actually support a full readq
+ * and must be read using two 32-bit reads.
+ *
+ * You have been warned.
+ */
+__uncore_read(read64, 64, q, true)
+
+static inline u64
+intel_uncore_read64_2x32(struct intel_uncore *uncore,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
+{
+ u32 upper, lower, old_upper, loop = 0;
+ upper = intel_uncore_read(uncore, upper_reg);
+ do {
+ old_upper = upper;
+ lower = intel_uncore_read(uncore, lower_reg);
+ upper = intel_uncore_read(uncore, upper_reg);
+ } while (upper != old_upper && loop++ < 2);
+ return (u64)upper << 32 | lower;
+}
+
+#define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
+#define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
+
+#undef __uncore_read
+#undef __uncore_write
+
+/* These are untraced mmio-accessors that are only valid to be used inside
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
+ * controlled.
+ *
+ * Think twice, and think again, before using these.
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&uncore->lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&uncore->lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
+ */
+#define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
+#define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
+#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
+#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
+
+static inline void intel_uncore_rmw(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 clear, u32 set)
+{
+ u32 val;
+
+ val = intel_uncore_read(uncore, reg);
+ val &= ~clear;
+ val |= set;
+ intel_uncore_write(uncore, reg, val);
+}
+
+static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 clear, u32 set)
+{
+ u32 val;
+
+ val = intel_uncore_read_fw(uncore, reg);
+ val &= ~clear;
+ val |= set;
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bf3662ad5fed..fdbbb9a53804 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -772,6 +772,9 @@ struct psr_table {
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
+
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time;
} __packed;
struct bdb_psr {
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
index 23abf03736e7..3f9921ba4a76 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -317,129 +317,6 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
}
}
-static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
-{
- unsigned long groups_per_line = 0;
- unsigned long groups_total = 0;
- unsigned long num_extra_mux_bits = 0;
- unsigned long slice_bits = 0;
- unsigned long hrd_delay = 0;
- unsigned long final_scale = 0;
- unsigned long rbs_min = 0;
-
- /* Number of groups used to code each line of a slice */
- groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
- DSC_RC_PIXELS_PER_GROUP);
-
- /* chunksize in Bytes */
- vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
- vdsc_cfg->bits_per_pixel,
- (8 * 16));
-
- if (vdsc_cfg->convert_rgb)
- num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
- (4 * vdsc_cfg->bits_per_component + 4)
- - 2);
- else
- num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
- (4 * vdsc_cfg->bits_per_component + 4) +
- 2 * (4 * vdsc_cfg->bits_per_component) - 2;
- /* Number of bits in one Slice */
- slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
-
- while ((num_extra_mux_bits > 0) &&
- ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
- num_extra_mux_bits--;
-
- if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
- vdsc_cfg->initial_scale_value = groups_per_line + 8;
-
- /* scale_decrement_interval calculation according to DSC spec 1.11 */
- if (vdsc_cfg->initial_scale_value > 8)
- vdsc_cfg->scale_decrement_interval = groups_per_line /
- (vdsc_cfg->initial_scale_value - 8);
- else
- vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
-
- vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
- (vdsc_cfg->initial_xmit_delay *
- vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
-
- if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
- DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
- return -ERANGE;
- }
-
- final_scale = (vdsc_cfg->rc_model_size * 8) /
- (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
- if (vdsc_cfg->slice_height > 1)
- /*
- * NflBpgOffset is 16 bit value with 11 fractional bits
- * hence we multiply by 2^11 for preserving the
- * fractional part
- */
- vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
- (vdsc_cfg->slice_height - 1));
- else
- vdsc_cfg->nfl_bpg_offset = 0;
-
- /* 2^16 - 1 */
- if (vdsc_cfg->nfl_bpg_offset > 65535) {
- DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
- return -ERANGE;
- }
-
- /* Number of groups used to code the entire slice */
- groups_total = groups_per_line * vdsc_cfg->slice_height;
-
- /* slice_bpg_offset is 16 bit value with 11 fractional bits */
- vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
- vdsc_cfg->initial_offset +
- num_extra_mux_bits) << 11),
- groups_total);
-
- if (final_scale > 9) {
- /*
- * ScaleIncrementInterval =
- * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
- * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
- * we need divide by 2^11 from pstDscCfg values
- */
- vdsc_cfg->scale_increment_interval =
- (vdsc_cfg->final_offset * (1 << 11)) /
- ((vdsc_cfg->nfl_bpg_offset +
- vdsc_cfg->slice_bpg_offset) *
- (final_scale - 9));
- } else {
- /*
- * If finalScaleValue is less than or equal to 9, a value of 0 should
- * be used to disable the scale increment at the end of the slice
- */
- vdsc_cfg->scale_increment_interval = 0;
- }
-
- if (vdsc_cfg->scale_increment_interval > 65535) {
- DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
- return -ERANGE;
- }
-
- /*
- * DSC spec mentions that bits_per_pixel specifies the target
- * bits/pixel (bpp) rate that is used by the encoder,
- * in steps of 1/16 of a bit per pixel
- */
- rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
- DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
- vdsc_cfg->bits_per_pixel, 16) +
- groups_per_line * vdsc_cfg->first_line_bpg_offset;
-
- hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
- vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
- vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
-
- return 0;
-}
-
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
@@ -491,7 +368,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
/* Gen 11 does not support YCbCr */
- vdsc_cfg->enable422 = false;
+ vdsc_cfg->simple_422 = false;
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
vdsc_cfg->block_pred_enable =
@@ -574,7 +451,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
- return intel_compute_rc_parameters(vdsc_cfg);
+ return drm_dsc_compute_rc_parameters(vdsc_cfg);
}
enum intel_display_power_domain
@@ -618,7 +495,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_BLOCK_PREDICTION;
if (vdsc_cfg->convert_rgb)
pps_val |= DSC_COLOR_SPACE_CONVERSION;
- if (vdsc_cfg->enable422)
+ if (vdsc_cfg->simple_422)
pps_val |= DSC_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
@@ -1004,10 +881,10 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
- drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header);
/* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
- drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+ drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg);
intel_dig_port->write_infoframe(encoder, crtc_state,
DP_SDP_PPS, &dp_dsc_pps_sdp,
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 15f4a6dee5aa..9682dd575152 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -541,10 +541,6 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
- /* WaEnableStateCacheRedirectToCS:icl */
- WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
- GEN11_STATE_CACHE_REDIRECT_TO_CS);
-
/* Wa_2006665173:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
@@ -555,6 +551,11 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN10_CACHE_MODE_SS,
0, /* write-only, so skip validation */
_MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+
+ /* WaDisableGPGPUMidThreadPreemption:icl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
@@ -564,26 +565,26 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
wa_init_start(wal, "context");
- if (INTEL_GEN(i915) < 8)
- return;
- else if (IS_BROADWELL(i915))
- bdw_ctx_workarounds_init(engine);
- else if (IS_CHERRYVIEW(i915))
- chv_ctx_workarounds_init(engine);
- else if (IS_SKYLAKE(i915))
- skl_ctx_workarounds_init(engine);
- else if (IS_BROXTON(i915))
- bxt_ctx_workarounds_init(engine);
- else if (IS_KABYLAKE(i915))
- kbl_ctx_workarounds_init(engine);
- else if (IS_GEMINILAKE(i915))
- glk_ctx_workarounds_init(engine);
- else if (IS_COFFEELAKE(i915))
- cfl_ctx_workarounds_init(engine);
+ if (IS_GEN(i915, 11))
+ icl_ctx_workarounds_init(engine);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine);
- else if (IS_ICELAKE(i915))
- icl_ctx_workarounds_init(engine);
+ else if (IS_COFFEELAKE(i915))
+ cfl_ctx_workarounds_init(engine);
+ else if (IS_GEMINILAKE(i915))
+ glk_ctx_workarounds_init(engine);
+ else if (IS_KABYLAKE(i915))
+ kbl_ctx_workarounds_init(engine);
+ else if (IS_BROXTON(i915))
+ bxt_ctx_workarounds_init(engine);
+ else if (IS_SKYLAKE(i915))
+ skl_ctx_workarounds_init(engine);
+ else if (IS_CHERRYVIEW(i915))
+ chv_ctx_workarounds_init(engine);
+ else if (IS_BROADWELL(i915))
+ bdw_ctx_workarounds_init(engine);
+ else if (INTEL_GEN(i915) < 8)
+ return;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -724,9 +725,9 @@ cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
+wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
u32 mcr_slice_subslice_mask;
/*
@@ -742,14 +743,15 @@ wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
* something more complex that requires checking the range of every
* MMIO read).
*/
- if (INTEL_GEN(dev_priv) >= 10 &&
+ if (INTEL_GEN(i915) >= 10 &&
is_power_of_2(sseu->slice_mask)) {
/*
* read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
* enabled subslice, no need to redirect MCR packet
*/
u32 slice = fls(sseu->slice_mask);
- u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
+ u32 fuse3 =
+ intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
u8 ss_mask = sseu->subslice_mask[slice];
u8 enabled_mask = (ss_mask | ss_mask >>
@@ -763,7 +765,7 @@ wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(i915) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
else
@@ -783,7 +785,7 @@ wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
wa_write_masked_or(wal,
GEN8_MCR_SELECTOR,
mcr_slice_subslice_mask,
- intel_calculate_mcr_s_ss_select(dev_priv));
+ intel_calculate_mcr_s_ss_select(i915));
}
static void
@@ -862,26 +864,22 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- if (INTEL_GEN(i915) < 8)
- return;
- else if (IS_BROADWELL(i915))
- return;
- else if (IS_CHERRYVIEW(i915))
- return;
- else if (IS_SKYLAKE(i915))
- skl_gt_workarounds_init(i915, wal);
- else if (IS_BROXTON(i915))
- bxt_gt_workarounds_init(i915, wal);
- else if (IS_KABYLAKE(i915))
- kbl_gt_workarounds_init(i915, wal);
- else if (IS_GEMINILAKE(i915))
- glk_gt_workarounds_init(i915, wal);
- else if (IS_COFFEELAKE(i915))
- cfl_gt_workarounds_init(i915, wal);
+ if (IS_GEN(i915, 11))
+ icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
- else if (IS_ICELAKE(i915))
- icl_gt_workarounds_init(i915, wal);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915, wal);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915, wal);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915, wal);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915, wal);
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915, wal);
+ else if (INTEL_GEN(i915) <= 8)
+ return;
else
MISSING_CASE(INTEL_GEN(i915));
}
@@ -896,15 +894,14 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
}
static enum forcewake_domains
-wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
- const struct i915_wa_list *wal)
+wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw = 0;
struct i915_wa *wa;
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ fw |= intel_uncore_forcewake_for_reg(uncore,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
@@ -913,7 +910,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
}
static void
-wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
+wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw;
unsigned long flags;
@@ -923,27 +920,22 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
if (!wal->count)
return;
- fw = wal_get_fw_for_rmw(dev_priv, wal);
+ fw = wal_get_fw_for_rmw(uncore, wal);
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- intel_uncore_forcewake_get__locked(dev_priv, fw);
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
- u32 val = I915_READ_FW(wa->reg);
-
- val &= ~wa->mask;
- val |= wa->val;
-
- I915_WRITE_FW(wa->reg, val);
+ intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
}
- intel_uncore_forcewake_put__locked(dev_priv, fw);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_put__locked(uncore, fw);
+ spin_unlock_irqrestore(&uncore->lock, flags);
}
-void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+void intel_gt_apply_workarounds(struct drm_i915_private *i915)
{
- wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
+ wa_list_apply(&i915->uncore, &i915->gt_wa_list);
}
static bool
@@ -960,7 +952,7 @@ wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
return true;
}
-static bool wa_list_verify(struct drm_i915_private *dev_priv,
+static bool wa_list_verify(struct intel_uncore *uncore,
const struct i915_wa_list *wal,
const char *from)
{
@@ -969,15 +961,17 @@ static bool wa_list_verify(struct drm_i915_private *dev_priv,
bool ok = true;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+ ok &= wa_verify(wa,
+ intel_uncore_read(uncore, wa->reg),
+ wal->name, from);
return ok;
}
-bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
const char *from)
{
- return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
+ return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from);
}
static void
@@ -1052,6 +1046,9 @@ static void icl_whitelist_build(struct i915_wa_list *w)
/* WaAllowUMDToModifySamplerMode:icl */
whitelist_reg(w, GEN10_SAMPLER_MODE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
@@ -1059,30 +1056,26 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS0);
wa_init_start(w, "whitelist");
- if (INTEL_GEN(i915) < 8)
- return;
- else if (IS_BROADWELL(i915))
- return;
- else if (IS_CHERRYVIEW(i915))
- return;
- else if (IS_SKYLAKE(i915))
- skl_whitelist_build(w);
- else if (IS_BROXTON(i915))
- bxt_whitelist_build(w);
- else if (IS_KABYLAKE(i915))
- kbl_whitelist_build(w);
- else if (IS_GEMINILAKE(i915))
- glk_whitelist_build(w);
- else if (IS_COFFEELAKE(i915))
- cfl_whitelist_build(w);
+ if (IS_GEN(i915, 11))
+ icl_whitelist_build(w);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(w);
- else if (IS_ICELAKE(i915))
- icl_whitelist_build(w);
+ else if (IS_COFFEELAKE(i915))
+ cfl_whitelist_build(w);
+ else if (IS_GEMINILAKE(i915))
+ glk_whitelist_build(w);
+ else if (IS_KABYLAKE(i915))
+ kbl_whitelist_build(w);
+ else if (IS_BROXTON(i915))
+ bxt_whitelist_build(w);
+ else if (IS_SKYLAKE(i915))
+ skl_whitelist_build(w);
+ else if (INTEL_GEN(i915) <= 8)
+ return;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -1091,8 +1084,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
const struct i915_wa_list *wal = &engine->whitelist;
+ struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
struct i915_wa *wa;
unsigned int i;
@@ -1101,13 +1094,15 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
return;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(wa->reg));
+ intel_uncore_write(uncore,
+ RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
- I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(RING_NOPID(base)));
+ intel_uncore_write(uncore,
+ RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(RING_NOPID(base)));
}
static void
@@ -1115,7 +1110,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_ICELAKE(i915)) {
+ if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
_3D_CHICKEN3,
@@ -1170,8 +1165,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
}
- if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) {
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ if (IS_GEN_RANGE(i915, 9, 11)) {
+ /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1236,7 +1231,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
return;
- if (engine->id == RCS)
+ if (engine->id == RCS0)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
@@ -1256,7 +1251,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
- wa_list_apply(engine->i915, &engine->wa_list);
+ wa_list_apply(engine->uncore, &engine->wa_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index 7c734714b05e..34eee5ec511e 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -9,18 +9,7 @@
#include <linux/slab.h>
-struct i915_wa {
- i915_reg_t reg;
- u32 mask;
- u32 val;
-};
-
-struct i915_wa_list {
- const char *name;
- struct i915_wa *list;
- unsigned int count;
- unsigned int wa_count;
-};
+#include "intel_workarounds_types.h"
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
@@ -31,9 +20,9 @@ static inline void intel_wa_list_free(struct i915_wa_list *wal)
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
int intel_engine_emit_ctx_wa(struct i915_request *rq);
-void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
-void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
-bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+void intel_gt_init_workarounds(struct drm_i915_private *i915);
+void intel_gt_apply_workarounds(struct drm_i915_private *i915);
+bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
const char *from);
void intel_engine_init_whitelist(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/intel_workarounds_types.h
new file mode 100644
index 000000000000..30918da180ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_workarounds_types.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef __INTEL_WORKAROUNDS_TYPES_H__
+#define __INTEL_WORKAROUNDS_TYPES_H__
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
+
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+ unsigned int wa_count;
+};
+
+#endif /* __INTEL_WORKAROUNDS_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index 391f3d9ffdf1..419fd4d6a8f0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -122,7 +122,7 @@ huge_gem_object(struct drm_i915_private *i915,
if (overflows_type(dma_size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index a9a2fa35876f..90721b54e7ae 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -171,7 +171,7 @@ huge_pages_object(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
@@ -320,7 +320,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
@@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
if (IS_ERR(obj))
return ERR_CAST(obj);
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err;
-
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@@ -1449,7 +1445,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
+ if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1535,7 +1531,7 @@ static int igt_ppgtt_pin_update(void *arg)
* land in the now stale 2M page.
*/
- err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
if (err)
goto out_unpin;
@@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg)
}
*vaddr = 0xdeadbeaf;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, vm, NULL);
@@ -1653,7 +1650,7 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_unpin;
- err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
if (err)
goto out_unpin;
@@ -1709,16 +1706,17 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
+ mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
+ mkwrite_device_info(dev_priv)->ppgtt_size = 48;
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- if (!i915_vm_is_48bit(&ppgtt->vm)) {
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;
@@ -1734,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
@@ -1764,7 +1761,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return 0;
}
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return 0;
file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 337b1f98b923..27d8f853111b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -150,7 +150,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_retire),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e77b7ed449ae..6fd70d326468 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -84,14 +84,9 @@ static void simulate_hibernate(struct drm_i915_private *i915)
static int pm_prepare(struct drm_i915_private *i915)
{
- int err = 0;
-
- if (i915_gem_suspend(i915)) {
- pr_err("i915_gem_suspend failed\n");
- err = -EINVAL;
- }
+ i915_gem_suspend(i915);
- return err;
+ return 0;
}
static void pm_suspend(struct drm_i915_private *i915)
@@ -220,5 +215,8 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_hibernate),
};
+ if (i915_terminally_wedged(i915))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index fd89a5a33c1a..e43630b40fce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -248,15 +248,15 @@ static bool always_valid(struct drm_i915_private *i915)
static bool needs_fence_registers(struct drm_i915_private *i915)
{
- return !i915_terminally_wedged(&i915->gpu_error);
+ return !i915_terminally_wedged(i915);
}
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return false;
- return intel_engine_can_store_dword(i915->engine[RCS]);
+ return intel_engine_can_store_dword(i915->engine[RCS0]);
}
static const struct igt_coherency_mode {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7eb58a9d1319..4e1b6efc6b22 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -76,7 +76,7 @@ static int live_nop_switch(void *arg)
}
for (n = 0; n < nctx; n++) {
- ctx[n] = i915_gem_create_context(i915, file->driver_priv);
+ ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
goto out_unlock;
@@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
offset += PAGE_SIZE;
}
*cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -372,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
return 0;
}
-static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
+static noinline int cpu_check(struct drm_i915_gem_object *obj,
+ unsigned int idx, unsigned int max)
{
unsigned int n, m, needs_flush;
int err;
@@ -390,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (m = 0; m < max; m++) {
if (map[m] != m) {
- pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], m);
+ pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
+ __builtin_return_address(0), idx,
+ n, real_page_count(obj), m, max,
+ map[m], m);
err = -EINVAL;
goto out_unmap;
}
@@ -399,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (; m < DW_PER_PAGE; m++) {
if (map[m] != STACK_MAGIC) {
- pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], STACK_MAGIC);
+ pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
+ __builtin_return_address(0), idx, n, m,
+ map[m], STACK_MAGIC);
err = -EINVAL;
goto out_unmap;
}
@@ -478,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj = NULL;
- unsigned long ncontexts, ndwords, dw;
- struct igt_live_test t;
- struct drm_file *file;
- IGT_TIMEOUT(end_time);
- LIST_HEAD(objects);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err = -ENODEV;
/*
@@ -495,44 +496,167 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
+ for_each_engine(engine, i915, id) {
+ struct drm_i915_gem_object *obj = NULL;
+ unsigned long ncontexts, ndwords, dw;
+ struct igt_live_test t;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (!engine->context_size)
+ continue; /* No logical context support in HW */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_unlock;
+
+ ncontexts = 0;
+ ndwords = 0;
+ dw = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct i915_gem_context *ctx;
+ intel_wakeref_t wakeref;
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ if (!obj) {
+ obj = create_test_object(ctx, file, &objects);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+ }
+
+ with_intel_runtime_pm(i915, wakeref)
+ err = gpu_fill(obj, ctx, engine, dw);
+ if (err) {
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ ndwords, dw, max_dwords(obj),
+ engine->name, ctx->hw_id,
+ yesno(!!ctx->ppgtt), err);
+ goto out_unlock;
+ }
+
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
+ dw = 0;
+ }
+
+ ndwords++;
+ ncontexts++;
+ }
+
+ pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+ ncontexts, engine->name, ndwords);
+
+ ncontexts = dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
+
+ err = cpu_check(obj, ncontexts++, rem);
+ if (err)
+ break;
+
+ dw += rem;
+ }
+
+out_unlock:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int igt_shared_ctx_exec(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *parent;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_live_test t;
+ struct drm_file *file;
+ int err = 0;
+
+ /*
+ * Create a few different contexts with the same mm and write
+ * through each ctx using the GPU making sure those writes end
+ * up in the expected pages of our obj.
+ */
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
+ parent = live_context(i915, file);
+ if (IS_ERR(parent)) {
+ err = PTR_ERR(parent);
+ goto out_unlock;
+ }
+
+ if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
+ err = 0;
+ goto out_unlock;
+ }
+
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
- ncontexts = 0;
- ndwords = 0;
- dw = 0;
- while (!time_after(jiffies, end_time)) {
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- unsigned int id;
+ for_each_engine(engine, i915, id) {
+ unsigned long ncontexts, ndwords, dw;
+ struct drm_i915_gem_object *obj = NULL;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
- ctx = i915_gem_create_context(i915, file->driver_priv);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_unlock;
- }
+ if (!intel_engine_can_store_dword(engine))
+ continue;
- for_each_engine(engine, i915, id) {
+ dw = 0;
+ ndwords = 0;
+ ncontexts = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
- if (!engine->context_size)
- continue; /* No logical context support in HW */
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_test;
+ }
- if (!intel_engine_can_store_dword(engine))
- continue;
+ __assign_ppgtt(ctx, parent->ppgtt);
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(parent, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ kernel_context_close(ctx);
+ goto out_test;
}
}
@@ -544,35 +668,39 @@ static int igt_ctx_exec(void *arg)
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err);
- goto out_unlock;
+ kernel_context_close(ctx);
+ goto out_test;
}
if (++dw == max_dwords(obj)) {
obj = NULL;
dw = 0;
}
+
ndwords++;
+ ncontexts++;
+
+ kernel_context_close(ctx);
}
- ncontexts++;
- }
- pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
- ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
+ pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+ ncontexts, engine->name, ndwords);
- dw = 0;
- list_for_each_entry(obj, &objects, st_link) {
- unsigned int rem =
- min_t(unsigned int, ndwords - dw, max_dwords(obj));
+ ncontexts = dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
- err = cpu_check(obj, rem);
- if (err)
- break;
+ err = cpu_check(obj, ncontexts++, rem);
+ if (err)
+ goto out_test;
- dw += rem;
+ dw += rem;
+ }
}
-
-out_unlock:
+out_test:
if (igt_live_test_end(&t))
err = -EIO;
+out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
@@ -604,12 +732,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
*cmd++ = upper_32_bits(vma->node.start);
*cmd = MI_BATCH_BUFFER_END;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -923,7 +1048,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
unsigned int flags)
{
struct intel_sseu default_sseu = intel_device_default_sseu(i915);
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
struct intel_sseu pg_sseu;
@@ -962,11 +1087,12 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
- ctx = i915_gem_create_context(i915, file->driver_priv);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out_unlock;
}
+ i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1047,7 +1173,7 @@ static int igt_ctx_readonly(void *arg)
struct drm_i915_gem_object *obj = NULL;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
- unsigned long ndwords, dw;
+ unsigned long idx, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
@@ -1071,7 +1197,7 @@ static int igt_ctx_readonly(void *arg)
if (err)
goto out_unlock;
- ctx = i915_gem_create_context(i915, file->driver_priv);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@@ -1125,9 +1251,10 @@ static int igt_ctx_readonly(void *arg)
}
}
pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, RUNTIME_INFO(i915)->num_rings);
+ ndwords, RUNTIME_INFO(i915)->num_engines);
dw = 0;
+ idx = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
@@ -1137,7 +1264,7 @@ static int igt_ctx_readonly(void *arg)
if (i915_gem_object_is_readonly(obj))
num_writes = 0;
- err = cpu_check(obj, num_writes);
+ err = cpu_check(obj, idx++, num_writes);
if (err)
break;
@@ -1201,12 +1328,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
}
*cmd++ = value;
*cmd = MI_BATCH_BUFFER_END;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -1298,11 +1422,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
*cmd++ = result;
}
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
@@ -1396,13 +1518,13 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_unlock;
- ctx_a = i915_gem_create_context(i915, file->driver_priv);
+ ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
goto out_unlock;
}
- ctx_b = i915_gem_create_context(i915, file->driver_priv);
+ ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
goto out_unlock;
@@ -1432,7 +1554,7 @@ static int igt_vm_isolation(void *arg)
div64_u64_rem(i915_prandom_u64_state(&prng),
vm_total, &offset);
- offset &= ~sizeof(u32);
+ offset &= -sizeof(u32);
offset += I915_GTT_PAGE_SIZE;
err = write_to_scratch(ctx_a, engine,
@@ -1458,7 +1580,7 @@ static int igt_vm_isolation(void *arg)
count += this;
}
pr_info("Checked %lu scratch offsets across %d engines\n",
- count, RUNTIME_INFO(i915)->num_rings);
+ count, RUNTIME_INFO(i915)->num_engines);
out_rpm:
intel_runtime_pm_put(i915, wakeref);
@@ -1472,10 +1594,10 @@ out_unlock:
}
static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, unsigned int engines)
+__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
if (engines == ALL_ENGINES)
return "all";
@@ -1488,67 +1610,60 @@ __engine_name(struct drm_i915_private *i915, unsigned int engines)
static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx,
- unsigned int engines)
+ intel_engine_mask_t engines)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
- int err;
+ intel_engine_mask_t tmp;
+ int pass;
GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
- for_each_engine_masked(engine, i915, engines, tmp) {
- struct i915_request *rq;
+ for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */
+ bool from_idle = pass & 1;
+ int err;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (!from_idle) {
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ struct i915_request *rq;
- i915_request_add(rq);
- }
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
-
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!engine_has_kernel_context_barrier(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
+ i915_request_add(rq);
+ }
}
- }
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- return err;
+ err = i915_gem_switch_to_kernel_context(i915,
+ i915->gt.active_engines);
+ if (err)
+ return err;
- GEM_BUG_ON(i915->gt.active_requests);
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (engine->last_retired_context->gem_context != i915->kernel_context) {
- pr_err("engine %s not idling in kernel context!\n",
- engine->name);
+ if (!from_idle) {
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ if (i915->gt.active_requests) {
+ pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n",
+ i915->gt.active_requests,
+ pass, from_idle ? "idle" : "busy",
+ __engine_name(i915, engines),
+ is_power_of_2(engines) ? "" : "s");
return -EINVAL;
}
- }
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
+ /* XXX Bonus points for proving we are the kernel context! */
- if (i915->gt.active_requests) {
- pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
- i915->gt.active_requests);
- return -EINVAL;
+ mutex_unlock(&i915->drm.struct_mutex);
+ drain_delayed_work(&i915->gt.idle_work);
+ mutex_lock(&i915->drm.struct_mutex);
}
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!intel_engine_has_kernel_context(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
- }
- }
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ return -EIO;
return 0;
}
@@ -1592,8 +1707,6 @@ static int igt_switch_to_kernel_context(void *arg)
out_unlock:
GEM_TRACE_DUMP_ON(err);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
@@ -1602,10 +1715,117 @@ out_unlock:
return err;
}
+static void mock_barrier_task(void *data)
+{
+ unsigned int *counter = data;
+
+ ++*counter;
+}
+
+static int mock_context_barrier(void *arg)
+{
+#undef pr_fmt
+#define pr_fmt(x) "context_barrier_task():" # x
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ unsigned int counter;
+ int err;
+
+ /*
+ * The context barrier provides us with a callback after it emits
+ * a request; useful for retiring old state after loading new.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = mock_context(i915, "mock");
+ if (!ctx) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx, 0,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately with 0 engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately for all inactive engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(i915, wakeref)
+ rq = i915_request_alloc(i915->engine[RCS0], ctx);
+ if (IS_ERR(rq)) {
+ pr_err("Request allocation failed!\n");
+ goto out;
+ }
+ i915_request_add(rq);
+ GEM_BUG_ON(list_empty(&ctx->active_engines));
+
+ counter = 0;
+ context_barrier_inject_fault = BIT(RCS0);
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ context_barrier_inject_fault = 0;
+ if (err == -ENXIO)
+ err = 0;
+ else
+ pr_err("Did not hit fault injection!\n");
+ if (counter != 0) {
+ pr_err("Invoked callback on error!\n");
+ err = -EIO;
+ }
+ if (err)
+ goto out;
+
+ counter = 0;
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ mock_device_flush(i915);
+ if (counter == 0) {
+ pr_err("Did not retire on each active engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ mock_context_close(ctx);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+#undef pr_fmt
+#define pr_fmt(x) x
+}
+
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_switch_to_kernel_context),
+ SUBTEST(mock_context_barrier),
};
struct drm_i915_private *i915;
int err;
@@ -1628,10 +1848,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),
+ SUBTEST(igt_shared_ctx_exec),
SUBTEST(igt_vm_isolation),
};
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return 0;
return i915_subtests(tests, dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index a7055b12e53c..2b943ee246c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg)
goto err;
}
memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
ptr = dma_buf_kmap(dmabuf, 1);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index b9b0ea4e2404..89766688e420 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
/* Neighbouring; same colour - should fit */
@@ -547,7 +547,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 3850ef4a5ec8..9cca66e4420a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -120,7 +120,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
goto err;
@@ -1010,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1681,25 +1680,31 @@ int i915_gem_gtt_mock_selftests(void)
SUBTEST(igt_gtt_insert),
};
struct drm_i915_private *i915;
- struct i915_ggtt ggtt;
+ struct i915_ggtt *ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- mock_init_ggtt(i915, &ggtt);
+ ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt) {
+ err = -ENOMEM;
+ goto out_put;
+ }
+ mock_init_ggtt(i915, ggtt);
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, &ggtt);
+ err = i915_subtests(tests, ggtt);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&ggtt);
+ mock_fini_ggtt(ggtt);
+ kfree(ggtt);
+out_put:
drm_dev_put(&i915->drm);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 395ae878e0f7..971148fbe6f5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -468,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -583,7 +583,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
for (loop = 0; loop < 3; loop++) {
intel_wakeref_t wakeref;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6733dc5b6b4c..e6ffe2240126 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -42,7 +42,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS],
+ request = mock_request(i915->engine[RCS0],
i915->kernel_context,
HZ / 10);
if (!request)
@@ -66,7 +66,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_unlock;
@@ -136,19 +136,17 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_locked;
}
- mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_device;
+ goto out_locked;
}
- mutex_lock(&i915->drm.struct_mutex);
i915_request_add(request);
mutex_unlock(&i915->drm.struct_mutex);
@@ -195,7 +193,7 @@ static int igt_request_rewind(void *arg)
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
- request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
+ request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ);
if (!request) {
err = -ENOMEM;
goto err_context_0;
@@ -205,7 +203,7 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
- vip = mock_request(i915->engine[RCS], ctx[1], 0);
+ vip = mock_request(i915->engine[RCS0], ctx[1], 0);
if (!vip) {
err = -ENOMEM;
goto err_context_1;
@@ -226,8 +224,7 @@ static int igt_request_rewind(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
- pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
- vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
+ pr_err("timed out waiting for high priority request\n");
goto err;
}
@@ -418,7 +415,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
struct smoketest t = {
- .engine = i915->engine[RCS],
+ .engine = i915->engine[RCS0],
.ncontexts = 1024,
.max_batch = 1024,
.request_alloc = __mock_request_alloc
@@ -622,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
}
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
+ i915_gem_chipset_flush(i915);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -780,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (err)
goto err;
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err;
-
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@@ -802,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
- i915_gem_chipset_flush(i915);
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
+ i915_gem_chipset_flush(i915);
+
return vma;
err:
@@ -1219,7 +1212,7 @@ out_flush:
num_fences += atomic_long_read(&t[id].num_fences);
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
- num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus);
+ num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
@@ -1246,7 +1239,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_breadcrumbs_smoketest),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 10ef0e636a24..b18eaefef798 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -133,7 +133,7 @@ static int __run_selftests(const char *name,
if (signal_pending(current))
return -EINTR;
- pr_debug(DRIVER_NAME ": Running %s\n", st->name);
+ pr_info(DRIVER_NAME ": Running %s\n", st->name);
if (data)
err = st->live(data);
else
@@ -255,7 +255,7 @@ int __i915_subtests(const char *caller,
if (!apply_subtest_filter(caller, st->name))
continue;
- pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
GEM_TRACE("Running %s/%s\n", caller, st->name);
err = st->func(data);
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index cdbc8f134e5e..cbf45d85cbff 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -571,21 +571,27 @@ static int test_timer(void *arg)
unsigned long target, delay;
struct timed_fence tf;
+ preempt_disable();
timed_fence_init(&tf, target = jiffies);
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with immediate expiration not signaled\n");
goto err;
}
+ preempt_enable();
timed_fence_fini(&tf);
for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+ preempt_disable();
timed_fence_init(&tf, target = jiffies + delay);
if (i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
goto err;
}
+ preempt_enable();
i915_sw_fence_wait(&tf.fence);
+
+ preempt_disable();
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence not signaled after wait\n");
goto err;
@@ -595,13 +601,14 @@ static int test_timer(void *arg)
target, jiffies);
goto err;
}
-
+ preempt_enable();
timed_fence_fini(&tf);
}
return 0;
err:
+ preempt_enable();
timed_fence_fini(&tf);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index 12ea69b1a1e5..bd96afcadfe7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -64,7 +64,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned long cacheline;
int err;
- tl = i915_timeline_create(state->i915, "mock", NULL);
+ tl = i915_timeline_create(state->i915, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -476,7 +476,7 @@ checked_i915_timeline_create(struct drm_i915_private *i915)
{
struct i915_timeline *tl;
- tl = i915_timeline_create(i915, "live", NULL);
+ tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl))
return tl;
@@ -641,6 +641,118 @@ out:
#undef NUM_TIMELINES
}
+static int live_hwsp_wrap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_timeline *tl;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ /*
+ * Across a seqno wrap, we need to keep the old cacheline alive for
+ * foreign GPU references.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ tl = i915_timeline_create(i915, NULL);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out_rpm;
+ }
+ if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ goto out_free;
+
+ err = i915_timeline_pin(tl);
+ if (err)
+ goto out_free;
+
+ for_each_engine(engine, i915, id) {
+ const u32 *hwsp_seqno[2];
+ struct i915_request *rq;
+ u32 seqno[2];
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ tl->seqno = -4u;
+
+ err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
+ seqno[0], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[0] = tl->hwsp_seqno;
+
+ err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
+ seqno[1], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[1] = tl->hwsp_seqno;
+
+ /* With wrap should come a new hwsp */
+ GEM_BUG_ON(seqno[1] >= seqno[0]);
+ GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
+
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ pr_err("Wait for timeline writes timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
+ pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
+ *hwsp_seqno[0], *hwsp_seqno[1],
+ seqno[0], seqno[1]);
+ err = -EINVAL;
+ goto out;
+ }
+
+ i915_retire_requests(i915); /* recycle HWSP */
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ i915_timeline_unpin(tl);
+out_free:
+ i915_timeline_put(tl);
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
static int live_hwsp_recycle(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -723,7 +835,11 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_recycle),
SUBTEST(live_hwsp_engine),
SUBTEST(live_hwsp_alternate),
+ SUBTEST(live_hwsp_wrap),
};
+ if (i915_terminally_wedged(i915))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index cf1de82741fa..fc594b030f5a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -725,24 +725,30 @@ int i915_vma_mock_selftests(void)
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
- struct i915_ggtt ggtt;
+ struct i915_ggtt *ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- mock_init_ggtt(i915, &ggtt);
+ ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt) {
+ err = -ENOMEM;
+ goto out_put;
+ }
+ mock_init_ggtt(i915, ggtt);
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, &ggtt);
+ err = i915_subtests(tests, ggtt);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&ggtt);
+ mock_fini_ggtt(ggtt);
+ kfree(ggtt);
+out_put:
drm_dev_put(&i915->drm);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index af66e3d4e23a..94aee4071a66 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
cond_resched();
if (flags & I915_WAIT_LOCKED &&
- i915_gem_switch_to_kernel_context(i915)) {
+ i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) {
pr_err("Failed to switch back to kernel context; declaring wedged\n");
i915_gem_set_wedged(i915);
}
@@ -29,5 +29,5 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
i915_gem_set_wedged(i915);
}
- return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+ return i915_terminally_wedged(i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 9ebd9225684e..16890dfe74c0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
goto err_hws;
}
- i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -144,6 +144,13 @@ igt_spinner_create_request(struct igt_spinner *spin,
i915_gem_chipset_flush(spin->i915);
+ if (engine->emit_init_breadcrumb &&
+ rq->timeline->has_initial_breadcrumb) {
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
cancel_rq:
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index c5e0a0e98fcb..b05a21eaa8f4 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -111,7 +111,7 @@ static int validate_client(struct intel_guc_client *client,
dev_priv->preempt_context : dev_priv->kernel_context;
if (client->owner != ctx_owner ||
- client->engines != INTEL_INFO(dev_priv)->ring_mask ||
+ client->engines != INTEL_INFO(dev_priv)->engine_mask ||
client->priority != client_priority ||
client->doorbell_id == GUC_DOORBELL_INVALID)
return -EINVAL;
@@ -261,7 +261,7 @@ static int igt_guc_doorbells(void *arg)
for (i = 0; i < ATTEMPTS; i++) {
clients[i] = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
i % GUC_CLIENT_PRIORITY_NUM,
dev_priv->kernel_context);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 7b6f3bea9ef8..050bd1e19e02 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -56,6 +56,8 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
+ GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
+
h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(h->hws)) {
err = PTR_ERR(h->hws);
@@ -68,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
goto err_hws;
}
- i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -242,6 +244,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
i915_gem_chipset_flush(h->i915);
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
@@ -334,7 +342,7 @@ static int igt_hang_sanitycheck(void *arg)
timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
timeout = -EIO;
i915_request_put(rq);
@@ -375,7 +383,7 @@ static int igt_global_reset(void *arg)
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
return err;
@@ -393,15 +401,13 @@ static int igt_wedged_reset(void *arg)
i915_gem_set_wedged(i915);
- mutex_lock(&i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
+ GEM_BUG_ON(!i915_reset_failed(i915));
i915_reset(i915, ALL_ENGINES, NULL);
- mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
- return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+ return i915_reset_failed(i915) ? -EIO : 0;
}
static bool wait_for_idle(struct intel_engine_cs *engine)
@@ -409,6 +415,222 @@ static bool wait_for_idle(struct intel_engine_cs *engine)
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
}
+static int igt_reset_nop(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ unsigned int reset_count, count;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ int err = 0;
+
+ /* Check that we can reset during non-user portions of requests */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ i915_gem_context_clear_bannable(ctx);
+ wakeref = intel_runtime_pm_get(i915);
+ reset_count = i915_reset_count(&i915->gpu_error);
+ count = 0;
+ do {
+ mutex_lock(&i915->drm.struct_mutex);
+ for_each_engine(engine, i915, id) {
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ igt_global_reset_lock(i915);
+ i915_reset(i915, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(i915);
+ if (i915_reset_failed(i915)) {
+ err = -EIO;
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) !=
+ reset_count + ++count) {
+ pr_err("Full GPU reset not recorded!\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (!i915_reset_flush(i915)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("%s failed to idle after reset\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
+
+ err = igt_flush_test(i915, 0);
+ if (err)
+ break;
+ } while (time_before(jiffies, end_time));
+ pr_info("%s: %d resets\n", __func__, count);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(i915, wakeref);
+
+out:
+ mock_file_free(i915, file);
+ if (i915_reset_failed(i915))
+ err = -EIO;
+ return err;
+}
+
+static int igt_reset_nop_engine(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ int err = 0;
+
+ /* Check that we can engine-reset during non-user portions */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ i915_gem_context_clear_bannable(ctx);
+ wakeref = intel_runtime_pm_get(i915);
+ for_each_engine(engine, i915, id) {
+ unsigned int reset_count, reset_engine_count;
+ unsigned int count;
+ IGT_TIMEOUT(end_time);
+
+ reset_count = i915_reset_count(&i915->gpu_error);
+ reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
+ engine);
+ count = 0;
+
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ do {
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ err = i915_reset_engine(engine, NULL);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ reset_engine_count + ++count) {
+ pr_err("%s engine reset not recorded!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (!i915_reset_flush(i915)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("%s failed to idle after reset\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
+ } while (time_before(jiffies, end_time));
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+
+ if (err)
+ break;
+
+ err = igt_flush_test(i915, 0);
+ if (err)
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(i915, wakeref);
+out:
+ mock_file_free(i915, file);
+ if (i915_reset_failed(i915))
+ err = -EIO;
+ return err;
+}
+
static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
struct intel_engine_cs *engine;
@@ -523,7 +745,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
if (active) {
@@ -565,11 +787,10 @@ static int active_request_put(struct i915_request *rq)
return 0;
if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
- GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n",
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
rq->engine->name,
rq->fence.context,
- rq->fence.seqno,
- i915_request_global_seqno(rq));
+ rq->fence.seqno);
GEM_TRACE_DUMP();
i915_gem_set_wedged(rq->i915);
@@ -762,7 +983,23 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
count++;
if (rq) {
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("i915_reset_engine(%s:%s):"
+ " failed to complete request after reset\n",
+ engine->name, test_name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ break;
+ }
+
i915_request_put(rq);
}
@@ -837,7 +1074,7 @@ unwind:
break;
}
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
if (flags & TEST_ACTIVE) {
@@ -887,7 +1124,8 @@ static int igt_reset_engines(void *arg)
return 0;
}
-static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask)
+static u32 fake_hangcheck(struct drm_i915_private *i915,
+ intel_engine_mask_t mask)
{
u32 count = i915_reset_count(&i915->gpu_error);
@@ -905,7 +1143,7 @@ static int igt_reset_wait(void *arg)
long timeout;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ if (!intel_engine_can_store_dword(i915->engine[RCS0]))
return 0;
/* Check that we detect a stuck waiter and issue a reset */
@@ -917,7 +1155,7 @@ static int igt_reset_wait(void *arg)
if (err)
goto unlock;
- rq = hang_create_request(&h, i915->engine[RCS]);
+ rq = hang_create_request(&h, i915->engine[RCS0]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto fini;
@@ -963,7 +1201,7 @@ unlock:
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1034,13 +1272,11 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
struct hang h;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ if (!intel_engine_can_store_dword(i915->engine[RCS0]))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- igt_global_reset_lock(i915);
-
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
if (err)
@@ -1066,7 +1302,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
goto out_obj;
}
- rq = hang_create_request(&h, i915->engine[RCS]);
+ rq = hang_create_request(&h, i915->engine[RCS0]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_obj;
@@ -1138,7 +1374,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
out_reset:
- fake_hangcheck(rq->i915, intel_engine_flag(rq->engine));
+ igt_global_reset_lock(i915);
+ fake_hangcheck(rq->i915, rq->engine->mask);
+ igt_global_reset_unlock(i915);
if (tsk) {
struct igt_wedge_me w;
@@ -1159,9 +1397,8 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1317,7 +1554,7 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- reset_count = fake_hangcheck(i915, ENGINE_MASK(id));
+ reset_count = fake_hangcheck(i915, BIT(id));
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1367,7 +1604,7 @@ unlock:
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1376,7 +1613,7 @@ unlock:
static int igt_handle_error(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct hang h;
struct i915_request *rq;
struct i915_gpu_state *error;
@@ -1423,7 +1660,7 @@ static int igt_handle_error(void *arg)
/* Temporarily disable error capture */
error = xchg(&i915->gpu_error.first_error, (void *)-1);
- i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL);
+ i915_handle_error(i915, engine->mask, 0, NULL);
xchg(&i915->gpu_error.first_error, error);
@@ -1547,7 +1784,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
}
@@ -1586,7 +1823,7 @@ static int igt_atomic_reset(void *arg)
/* Flush any requests before we get started and check basics */
force_reset(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
goto unlock;
if (intel_has_gpu_reset(i915)) {
@@ -1642,6 +1879,8 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_wedged_reset),
SUBTEST(igt_hang_sanitycheck),
+ SUBTEST(igt_reset_nop),
+ SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
@@ -1660,7 +1899,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (!intel_has_gpu_reset(i915))
return 0;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return -EIO; /* we're long past hope of a successful reset */
wakeref = intel_runtime_pm_get(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 58144e024751..fbee030db940 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -10,6 +10,7 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
+#include "igt_live_test.h"
#include "igt_spinner.h"
#include "i915_random.h"
@@ -75,6 +76,185 @@ err_unlock:
return err;
}
+static int live_busywait_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+ u32 *map;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+ * preempt the busywaits used to synchronise between rings.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ ctx_hi = kernel_context(i915);
+ if (!ctx_hi)
+ goto err_unlock;
+ ctx_hi->sched.priority = INT_MAX;
+
+ ctx_lo = kernel_context(i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = INT_MIN;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ctx_lo;
+ }
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_map;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *lo, *hi;
+ struct igt_live_test t;
+ u32 *cs;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_vma;
+ }
+
+ /*
+ * We create two requests. The low priority request
+ * busywaits on a semaphore (inside the ringbuffer where
+ * is should be preemptible) and the high priority requests
+ * uses a MI_STORE_DWORD_IMM to update the semaphore value
+ * allowing the first request to complete. If preemption
+ * fails, we hang instead.
+ */
+
+ lo = i915_request_alloc(engine, ctx_lo);
+ if (IS_ERR(lo)) {
+ err = PTR_ERR(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(lo, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /* XXX Do we need a flush + invalidate here? */
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+
+ intel_ring_advance(lo, cs);
+ i915_request_add(lo);
+
+ if (wait_for(READ_ONCE(*map), 10)) {
+ err = -ETIMEDOUT;
+ goto err_vma;
+ }
+
+ /* Low priority request should be busywaiting now */
+ if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
+ pr_err("%s: Busywaiting request did not!\n",
+ engine->name);
+ err = -EIO;
+ goto err_vma;
+ }
+
+ hi = i915_request_alloc(engine, ctx_hi);
+ if (IS_ERR(hi)) {
+ err = PTR_ERR(hi);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(hi, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(hi);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(hi, cs);
+ i915_request_add(hi);
+
+ if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to preempt semaphore busywait!\n",
+ engine->name);
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_vma;
+ }
+ GEM_BUG_ON(READ_ONCE(*map));
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_vma;
+ }
+ }
+
+ err = 0;
+err_vma:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -88,6 +268,9 @@ static int live_preempt(void *arg)
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ pr_err("Logical preemption supported, but not exposed\n");
+
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
@@ -110,8 +293,17 @@ static int live_preempt(void *arg)
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
+ struct igt_live_test t;
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -147,7 +339,8 @@ static int live_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+ if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -201,8 +394,17 @@ static int live_late_preempt(void *arg)
goto err_ctx_hi;
for_each_engine(engine, i915, id) {
+ struct igt_live_test t;
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -241,7 +443,8 @@ static int live_late_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+ if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -335,6 +538,9 @@ static int live_suppress_self_preempt(void *arg)
struct i915_request *rq_a, *rq_b;
int depth;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
engine->execlists.preempt_hang.count = 0;
rq_a = igt_spinner_create_request(&a.spin,
@@ -407,6 +613,171 @@ err_wedged:
goto err_client_b;
}
+static int __i915_sw_fence_call
+dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+static struct i915_request *dummy_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
+ return NULL;
+
+ INIT_LIST_HEAD(&rq->active_list);
+ rq->engine = engine;
+
+ i915_sched_node_init(&rq->sched);
+
+ /* mark this request as permanently incomplete */
+ rq->fence.seqno = 1;
+ BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
+ rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ i915_sw_fence_init(&rq->submit, dummy_notify);
+ i915_sw_fence_commit(&rq->submit);
+
+ return rq;
+}
+
+static void dummy_request_free(struct i915_request *dummy)
+{
+ i915_request_mark_complete(dummy);
+ i915_sched_node_fini(&dummy->sched);
+ i915_sw_fence_fini(&dummy->submit);
+
+ dma_fence_free(&dummy->fence);
+}
+
+static int live_suppress_wait_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct preempt_client client[4];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+ int i;
+
+ /*
+ * Waiters are given a little priority nudge, but not enough
+ * to actually cause any preemption. Double check that we do
+ * not needlessly generate preempt-to-idle cycles.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
+ goto err_unlock;
+ if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
+ goto err_client_0;
+ if (preempt_client_init(i915, &client[2])) /* head of queue */
+ goto err_client_1;
+ if (preempt_client_init(i915, &client[3])) /* bystander */
+ goto err_client_2;
+
+ for_each_engine(engine, i915, id) {
+ int depth;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!engine->emit_init_breadcrumb)
+ continue;
+
+ for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
+ struct i915_request *rq[ARRAY_SIZE(client)];
+ struct i915_request *dummy;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ dummy = dummy_request(engine);
+ if (!dummy)
+ goto err_client_3;
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ rq[i] = igt_spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq[i])) {
+ err = PTR_ERR(rq[i]);
+ goto err_wedged;
+ }
+
+ /* Disable NEWCLIENT promotion */
+ __i915_active_request_set(&rq[i]->timeline->last_request,
+ dummy);
+ i915_request_add(rq[i]);
+ }
+
+ dummy_request_free(dummy);
+
+ GEM_BUG_ON(i915_request_completed(rq[0]));
+ if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
+ pr_err("%s: First client failed to start\n",
+ engine->name);
+ goto err_wedged;
+ }
+ GEM_BUG_ON(!i915_request_started(rq[0]));
+
+ if (i915_request_wait(rq[depth],
+ I915_WAIT_LOCKED |
+ I915_WAIT_PRIORITY,
+ 1) != -ETIME) {
+ pr_err("%s: Waiter depth:%d completed!\n",
+ engine->name, depth);
+ goto err_wedged;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ igt_spinner_end(&client[i].spin);
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ goto err_wedged;
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
+ engine->execlists.preempt_hang.count,
+ depth);
+ err = -EINVAL;
+ goto err_client_3;
+ }
+ }
+ }
+
+ err = 0;
+err_client_3:
+ preempt_client_fini(&client[3]);
+err_client_2:
+ preempt_client_fini(&client[2]);
+err_client_1:
+ preempt_client_fini(&client[1]);
+err_client_0:
+ preempt_client_fini(&client[0]);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ igt_spinner_end(&client[i].spin);
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_client_3;
+}
+
static int live_chain_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -438,11 +809,39 @@ static int live_chain_preempt(void *arg)
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
- int count, i;
+ struct igt_live_test t;
+ struct i915_request *rq;
+ int ring_size, count, i;
- for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
- struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+ rq = igt_spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ ring_size = rq->wa_tail - rq->head;
+ if (ring_size < 0)
+ ring_size += rq->ring->size;
+ ring_size = rq->ring->size / ring_size;
+ pr_debug("%s(%s): Using maximum of %d requests\n",
+ __func__, engine->name, ring_size);
+
+ igt_spinner_end(&lo.spin);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
+ pr_err("Timed out waiting to flush %s\n", engine->name);
+ goto err_wedged;
+ }
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+
+ for_each_prime_number_from(count, 1, ring_size) {
rq = igt_spinner_create_request(&hi.spin,
hi.ctx, engine,
MI_ARB_CHECK);
@@ -484,6 +883,26 @@ static int live_chain_preempt(void *arg)
goto err_wedged;
}
igt_spinner_end(&lo.spin);
+
+ rq = i915_request_alloc(engine, lo.ctx);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to flush low priority chain of %d requests\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ goto err_wedged;
+ }
+ }
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_wedged;
}
}
@@ -767,7 +1186,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -795,7 +1214,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -808,6 +1227,7 @@ static int live_preempt_smoke(void *arg)
};
const unsigned int phase[] = { 0, BATCH };
intel_wakeref_t wakeref;
+ struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
@@ -838,11 +1258,13 @@ static int live_preempt_smoke(void *arg)
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
cs[n] = MI_ARB_CHECK;
cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
- err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
- if (err)
+ if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
+ err = -EIO;
goto err_batch;
+ }
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915);
@@ -861,7 +1283,7 @@ static int live_preempt_smoke(void *arg)
}
err_ctx:
- if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+ if (igt_live_test_end(&t))
err = -EIO;
for (n = 0; n < smoke.ncontext; n++) {
@@ -884,9 +1306,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke),
@@ -895,7 +1319,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
if (!HAS_EXECLISTS(i915))
return 0;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 81d9d31042a9..e0d7ebecb215 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -119,9 +119,143 @@ int intel_uncore_mock_selftests(void)
return 0;
}
-static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
+static int live_forcewake_ops(void *arg)
+{
+ static const struct reg {
+ const char *name;
+ unsigned long platforms;
+ unsigned int offset;
+ } registers[] = {
+ {
+ "RING_START",
+ INTEL_GEN_MASK(6, 7),
+ 0x38,
+ },
+ {
+ "RING_MI_MODE",
+ INTEL_GEN_MASK(8, BITS_PER_LONG),
+ 0x9c,
+ }
+ };
+ const struct reg *r;
+ struct drm_i915_private *i915 = arg;
+ struct intel_uncore_forcewake_domain *domain;
+ struct intel_uncore *uncore = &i915->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned int tmp;
+ int err = 0;
+
+ GEM_BUG_ON(i915->gt.awake);
+
+ /* vlv/chv with their pcu behave differently wrt reads */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ pr_debug("PCU fakes forcewake badly; skipping\n");
+ return 0;
+ }
+
+ /*
+ * Not quite as reliable across the gen as one would hope.
+ *
+ * Either our theory of operation is incorrect, or there remain
+ * external parties interfering with the powerwells.
+ *
+ * https://bugs.freedesktop.org/show_bug.cgi?id=110210
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ /* We have to pick carefully to get the exact behaviour we need */
+ for (r = registers; r->name; r++)
+ if (r->platforms & INTEL_INFO(i915)->gen_mask)
+ break;
+ if (!r->name) {
+ pr_debug("Forcewaked register not known for %s; skipping\n",
+ intel_platform_name(INTEL_INFO(i915)->platform));
+ return 0;
+ }
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ for_each_fw_domain(domain, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (!hrtimer_cancel(&domain->timer))
+ continue;
+
+ intel_uncore_fw_release_timer(&domain->timer);
+ }
+
+ for_each_engine(engine, i915, id) {
+ i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
+ u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
+ enum forcewake_domains fw_domains;
+ u32 val;
+
+ if (!engine->default_state)
+ continue;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
+ FW_REG_READ);
+ if (!fw_domains)
+ continue;
+
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ if (!domain->wake_count)
+ continue;
+
+ pr_err("fw_domain %s still active, aborting test!\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ intel_uncore_forcewake_get(uncore, fw_domains);
+ val = readl(reg);
+ intel_uncore_forcewake_put(uncore, fw_domains);
+
+ /* Flush the forcewake release (delayed onto a timer) */
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (hrtimer_cancel(&domain->timer))
+ intel_uncore_fw_release_timer(&domain->timer);
+
+ preempt_disable();
+ err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
+ preempt_enable();
+ if (err) {
+ pr_err("Failed to clear fw_domain %s\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ goto out_rpm;
+ }
+ }
+
+ if (!val) {
+ pr_err("%s:%s was zero while fw was held!\n",
+ engine->name, r->name);
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ /* We then expect the read to return 0 outside of the fw */
+ if (wait_for(readl(reg) == 0, 100)) {
+ pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
+ engine->name, r->name, readl(reg), fw_domains);
+ err = -ETIMEDOUT;
+ goto out_rpm;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ return err;
+}
+
+static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
+ struct drm_i915_private *dev_priv = arg;
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long *valid;
u32 offset;
int err;
@@ -137,48 +271,52 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
- valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
- GFP_KERNEL);
+ valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
if (!valid)
return -ENOMEM;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- check_for_unclaimed_mmio(dev_priv);
+ check_for_unclaimed_mmio(uncore);
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
(void)I915_READ_FW(reg);
- if (!check_for_unclaimed_mmio(dev_priv))
+ if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
err = 0;
for_each_set_bit(offset, valid, FW_RANGE) {
i915_reg_t reg = { offset };
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv);
+ intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
- check_for_unclaimed_mmio(dev_priv);
+ check_for_unclaimed_mmio(uncore);
(void)I915_READ(reg);
- if (check_for_unclaimed_mmio(dev_priv)) {
+ if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
err = -EINVAL;
}
}
- kfree(valid);
+ bitmap_free(valid);
return err;
}
int intel_uncore_live_selftests(struct drm_i915_private *i915)
{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_forcewake_ops),
+ SUBTEST(live_forcewake_domains),
+ };
+
int err;
/* Confirm the table we load is still valid */
@@ -188,9 +326,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915)
if (err)
return err;
- err = intel_uncore_check_forcewake_domains(i915);
- if (err)
- return err;
-
- return 0;
+ return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index b15c4f26c593..567b6f8dae86 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -12,6 +12,14 @@
#include "igt_spinner.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
+#include "mock_drm.h"
+
+static const struct wo_register {
+ enum intel_platform platform;
+ u32 reg;
+} wo_registers[] = {
+ { INTEL_GEMINILAKE, 0x731c }
+};
#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
struct wa_lists {
@@ -74,7 +82,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (IS_ERR(result))
return result;
- i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
cs = i915_gem_object_pin_map(result, I915_MAP_WB);
if (IS_ERR(cs)) {
@@ -82,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_obj;
}
memset(cs, 0xc5, PAGE_SIZE);
+ i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
@@ -181,7 +190,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
- if (i915_terminally_wedged(&ctx->i915->gpu_error))
+ if (i915_terminally_wedged(ctx->i915))
err = -EIO;
if (err)
goto out_put;
@@ -214,7 +223,7 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
+ i915_reset(engine->i915, engine->mask, "live_workarounds");
return 0;
}
@@ -236,15 +245,11 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
+
rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(engine->i915, wakeref) {
- if (spin)
- rq = igt_spinner_create_request(spin,
- ctx, engine,
- MI_NOOP);
- else
- rq = i915_request_alloc(engine, ctx);
- }
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
kernel_context_close(ctx);
@@ -273,7 +278,6 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
const char *name)
{
struct drm_i915_private *i915 = engine->i915;
- bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
struct igt_spinner spin;
intel_wakeref_t wakeref;
@@ -282,11 +286,9 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
engine->whitelist.count, name);
- if (want_spin) {
- err = igt_spinner_init(&spin, i915);
- if (err)
- return err;
- }
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ return err;
ctx = kernel_context(i915);
if (IS_ERR(ctx))
@@ -298,17 +300,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
goto out;
}
- err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
+ err = switch_to_scratch_context(engine, &spin);
if (err)
goto out;
with_intel_runtime_pm(i915, wakeref)
err = reset(engine);
- if (want_spin) {
- igt_spinner_end(&spin);
- igt_spinner_fini(&spin);
- }
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
if (err) {
pr_err("%s reset failed\n", name);
@@ -340,10 +340,379 @@ out:
return err;
}
+static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void *ptr;
+ int err;
+
+ obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err_obj;
+ }
+ memset(ptr, 0xc5, PAGE_SIZE);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u32 reg_write(u32 old, u32 new, u32 rsvd)
+{
+ if (rsvd == 0x0000ffff) {
+ old &= ~(new >> 16);
+ old |= new & (new >> 16);
+ } else {
+ old &= ~rsvd;
+ old |= new & rsvd;
+ }
+
+ return old;
+}
+
+static bool wo_register(struct intel_engine_cs *engine, u32 reg)
+{
+ enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
+ if (wo_registers[i].platform == platform &&
+ wo_registers[i].reg == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static int check_dirty_whitelist(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ const u32 values[] = {
+ 0x00000000,
+ 0x01010101,
+ 0x10100101,
+ 0x03030303,
+ 0x30300303,
+ 0x05050505,
+ 0x50500505,
+ 0x0f0f0f0f,
+ 0xf00ff00f,
+ 0x10101010,
+ 0xf0f01010,
+ 0x30303030,
+ 0xa0a03030,
+ 0x50505050,
+ 0xc0c05050,
+ 0xf0f0f0f0,
+ 0x11111111,
+ 0x33333333,
+ 0x55555555,
+ 0x0000ffff,
+ 0x00ff00ff,
+ 0xff0000ff,
+ 0xffff00ff,
+ 0xffffffff,
+ };
+ struct i915_vma *scratch;
+ struct i915_vma *batch;
+ int err = 0, i, v;
+ u32 *cs, *results;
+
+ scratch = create_scratch(ctx);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_scratch;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ u64 addr = scratch->node.start;
+ struct i915_request *rq;
+ u32 srm, lrm, rsvd;
+ u32 expect;
+ int idx;
+
+ if (wo_register(engine, reg))
+ continue;
+
+ srm = MI_STORE_REGISTER_MEM;
+ lrm = MI_LOAD_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ lrm++, srm++;
+
+ pr_debug("%s: Writing garbage to %x\n",
+ engine->name, reg);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_batch;
+ }
+
+ /* SRM original */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = ~values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
+
+ /* LRM original -- don't leave garbage in the context! */
+ *cs++ = lrm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, PAGE_SIZE,
+ 0);
+ if (err)
+ goto err_request;
+
+err_request:
+ i915_request_add(rq);
+ if (err)
+ goto out_batch;
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ pr_err("%s: Futzing %x timedout; cancelling test\n",
+ engine->name, reg);
+ i915_gem_set_wedged(ctx->i915);
+ err = -EIO;
+ goto out_batch;
+ }
+
+ results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto out_batch;
+ }
+
+ GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
+ rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
+ if (!rsvd) {
+ pr_err("%s: Unable to write to whitelisted register %x\n",
+ engine->name, reg);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ expect = reg_write(expect, values[v], rsvd);
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ expect = reg_write(expect, ~values[v], rsvd);
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ if (err) {
+ pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
+ engine->name, err, reg);
+
+ pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+ engine->name, reg, results[0], rsvd);
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = values[v];
+
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = ~values[v];
+
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+
+ err = -EINVAL;
+ }
+out_unpin:
+ i915_gem_object_unpin_map(scratch->obj);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+ err = -EIO;
+out_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_dirty_whitelist(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ int err = 0;
+
+ /* Can the user write to the whitelisted registers? */
+
+ if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ return 0;
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+ file = mock_file(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto out_rpm;
+ }
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (engine->whitelist.count == 0)
+ continue;
+
+ err = check_dirty_whitelist(ctx, engine);
+ if (err)
+ goto out_file;
+ }
+
+out_file:
+ mutex_unlock(&i915->drm.struct_mutex);
+ mock_file_free(i915, file);
+ mutex_lock(&i915->drm.struct_mutex);
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ return err;
+}
+
static int live_reset_whitelist(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
@@ -381,10 +750,11 @@ static bool verify_gt_engine_wa(struct drm_i915_private *i915,
enum intel_engine_id id;
bool ok = true;
- ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
+ ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
for_each_engine(engine, i915, id)
- ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
+ ok &= wa_list_verify(engine->uncore,
+ &lists->engine[id].wa_list, str);
return ok;
}
@@ -513,13 +883,14 @@ err:
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
SUBTEST(live_gpu_reset_gt_engine_workarounds),
SUBTEST(live_engine_reset_gt_engine_workarounds),
};
int err;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index b646cdcdd602..0426093bf1d9 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
- unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -41,25 +40,31 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ ctx->hw_contexts = RB_ROOT;
+ spin_lock_init(&ctx->hw_contexts_lock);
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
-
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
- intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
+ INIT_LIST_HEAD(&ctx->active_engines);
+ mutex_init(&ctx->mutex);
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
goto err_handles;
if (name) {
+ struct i915_hw_ppgtt *ppgtt;
+
ctx->name = kstrdup(name, GFP_KERNEL);
if (!ctx->name)
goto err_put;
- ctx->ppgtt = mock_ppgtt(i915, name);
- if (!ctx->ppgtt)
+ ppgtt = mock_ppgtt(i915, name);
+ if (!ppgtt)
goto err_put;
+
+ __set_ppgtt(ctx, ppgtt);
}
return ctx;
@@ -87,9 +92,24 @@ void mock_init_contexts(struct drm_i915_private *i915)
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct drm_file *file)
{
+ struct i915_gem_context *ctx;
+ int err;
+
lockdep_assert_held(&i915->drm.struct_mutex);
- return i915_gem_create_context(i915, file->driver_priv);
+ ctx = i915_gem_create_context(i915, 0);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ err = gem_context_register(ctx, file->driver_priv);
+ if (err < 0)
+ goto err_ctx;
+
+ return ctx;
+
+err_ctx:
+ context_close(ctx);
+ return ERR_PTR(err);
}
struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 08f0cab02e0f..61a8206ed677 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -50,13 +50,12 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
if (!ring)
return NULL;
- if (i915_timeline_init(engine->i915,
- &ring->timeline, engine->name,
- NULL)) {
+ if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
kfree(ring);
return NULL;
}
+ kref_init(&ring->base.ref);
ring->base.size = sz;
ring->base.effective_size = sz;
ring->base.vaddr = (void *)(ring + 1);
@@ -76,28 +75,26 @@ static void mock_ring_free(struct intel_ring *base)
kfree(ring);
}
-static struct mock_request *first_request(struct mock_engine *engine)
+static struct i915_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
- struct mock_request,
- link);
+ struct i915_request,
+ mock.link);
}
-static void advance(struct mock_request *request)
+static void advance(struct i915_request *request)
{
- list_del_init(&request->link);
- intel_engine_write_global_seqno(request->base.engine,
- request->base.global_seqno);
- i915_request_mark_complete(&request->base);
- GEM_BUG_ON(!i915_request_completed(&request->base));
+ list_del_init(&request->mock.link);
+ i915_request_mark_complete(request);
+ GEM_BUG_ON(!i915_request_completed(request));
- intel_engine_queue_breadcrumbs(request->base.engine);
+ intel_engine_queue_breadcrumbs(request->engine);
}
static void hw_delay_complete(struct timer_list *t)
{
struct mock_engine *engine = from_timer(engine, t, hw_delay);
- struct mock_request *request;
+ struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->hw_lock, flags);
@@ -112,8 +109,9 @@ static void hw_delay_complete(struct timer_list *t)
* requeue the timer for the next delayed request.
*/
while ((request = first_request(engine))) {
- if (request->delay) {
- mod_timer(&engine->hw_delay, jiffies + request->delay);
+ if (request->mock.delay) {
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
break;
}
@@ -126,55 +124,43 @@ static void hw_delay_complete(struct timer_list *t)
static void mock_context_unpin(struct intel_context *ce)
{
mock_timeline_unpin(ce->ring->timeline);
- i915_gem_context_put(ce->gem_context);
}
-static void mock_context_destroy(struct intel_context *ce)
+static void mock_context_destroy(struct kref *ref)
{
- GEM_BUG_ON(ce->pin_count);
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->ring)
mock_ring_free(ce->ring);
-}
-static const struct intel_context_ops mock_context_ops = {
- .unpin = mock_context_unpin,
- .destroy = mock_context_destroy,
-};
+ intel_context_free(ce);
+}
-static struct intel_context *
-mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static int mock_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
- int err = -ENOMEM;
-
- if (ce->pin_count++)
- return ce;
-
if (!ce->ring) {
- ce->ring = mock_ring(engine);
+ ce->ring = mock_ring(ce->engine);
if (!ce->ring)
- goto err;
+ return -ENOMEM;
}
mock_timeline_pin(ce->ring->timeline);
+ return 0;
+}
- ce->ops = &mock_context_ops;
- i915_gem_context_get(ctx);
- return ce;
+static const struct intel_context_ops mock_context_ops = {
+ .pin = mock_context_pin,
+ .unpin = mock_context_unpin,
-err:
- ce->pin_count = 0;
- return ERR_PTR(err);
-}
+ .destroy = mock_context_destroy,
+};
static int mock_request_alloc(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
-
- INIT_LIST_HEAD(&mock->link);
- mock->delay = 0;
+ INIT_LIST_HEAD(&request->mock.link);
+ request->mock.delay = 0;
return 0;
}
@@ -192,25 +178,55 @@ static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
static void mock_submit_request(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
unsigned long flags;
i915_request_submit(request);
- GEM_BUG_ON(!request->global_seqno);
spin_lock_irqsave(&engine->hw_lock, flags);
- list_add_tail(&mock->link, &engine->hw_queue);
- if (mock->link.prev == &engine->hw_queue) {
- if (mock->delay)
- mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ list_add_tail(&request->mock.link, &engine->hw_queue);
+ if (list_is_first(&request->mock.link, &engine->hw_queue)) {
+ if (request->mock.delay)
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
else
- advance(mock);
+ advance(request);
}
spin_unlock_irqrestore(&engine->hw_lock, flags);
}
+static void mock_reset_prepare(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ GEM_BUG_ON(stalled);
+}
+
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(request, &engine->timeline.requests, sched.link) {
+ if (!i915_request_signaled(request))
+ dma_fence_set_error(&request->fence, -EIO);
+
+ i915_request_mark_complete(request);
+ }
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
@@ -227,18 +243,21 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.i915 = i915;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
+ engine->base.mask = BIT(id);
engine->base.status_page.addr = (void *)(engine + 1);
- engine->base.context_pin = mock_context_pin;
+ engine->base.cops = &mock_context_ops;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
- if (i915_timeline_init(i915,
- &engine->base.timeline,
- engine->base.name,
- NULL))
+ engine->base.reset.prepare = mock_reset_prepare;
+ engine->base.reset.reset = mock_reset;
+ engine->base.reset.finish = mock_reset_finish;
+ engine->base.cancel_requests = mock_cancel_requests;
+
+ if (i915_timeline_init(i915, &engine->base.timeline, NULL))
goto err_free;
i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
@@ -249,7 +268,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
- if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+ if (pin_context(i915->kernel_context, &engine->base,
+ &engine->base.kernel_context))
goto err_breadcrumbs;
return &engine->base;
@@ -266,19 +286,18 @@ void mock_engine_flush(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
- struct mock_request *request, *rn;
+ struct i915_request *request, *rn;
del_timer_sync(&mock->hw_delay);
spin_lock_irq(&mock->hw_lock);
- list_for_each_entry_safe(request, rn, &mock->hw_queue, link)
+ list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
advance(request);
spin_unlock_irq(&mock->hw_lock);
}
void mock_engine_reset(struct intel_engine_cs *engine)
{
- intel_engine_write_global_seqno(engine, 0);
}
void mock_engine_free(struct intel_engine_cs *engine)
@@ -293,7 +312,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
if (ce)
intel_context_unpin(ce);
- __intel_context_unpin(engine->i915->kernel_context, engine);
+ intel_context_unpin(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine);
i915_timeline_fini(&engine->timeline);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 14ae46fda49f..60bbf8b4df40 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -79,12 +79,6 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
- kmem_cache_destroy(i915->priorities);
- kmem_cache_destroy(i915->dependencies);
- kmem_cache_destroy(i915->requests);
- kmem_cache_destroy(i915->vmas);
- kmem_cache_destroy(i915->objects);
-
i915_gemfs_fini(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -115,6 +109,10 @@ static void mock_retire_work_handler(struct work_struct *work)
static void mock_idle_work_handler(struct work_struct *work)
{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gt.idle_work.work);
+
+ i915->gt.active_engines = 0;
}
static int pm_domain_resume(struct device *dev)
@@ -184,11 +182,12 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mock_uncore_init(i915);
+ mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
+ init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
mutex_init(&i915->gpu_error.wedge_mutex);
i915->wq = alloc_ordered_workqueue("mock", 0);
@@ -202,31 +201,6 @@ struct drm_i915_private *mock_gem_device(void)
i915->gt.awake = true;
- i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
- if (!i915->objects)
- goto err_wq;
-
- i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!i915->vmas)
- goto err_objects;
-
- i915->requests = KMEM_CACHE(mock_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
- if (!i915->requests)
- goto err_vmas;
-
- i915->dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!i915->dependencies)
- goto err_requests;
-
- i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
- if (!i915->priorities)
- goto err_dependencies;
-
i915_timelines_init(i915);
INIT_LIST_HEAD(&i915->gt.active_rings);
@@ -236,13 +210,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915, &i915->ggtt);
- mkwrite_device_info(i915)->ring_mask = BIT(0);
+ mkwrite_device_info(i915)->engine_mask = BIT(0);
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
goto err_unlock;
- i915->engine[RCS] = mock_engine(i915, "mock", RCS);
- if (!i915->engine[RCS])
+ i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!i915->engine[RCS0])
goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -256,16 +230,6 @@ err_context:
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
i915_timelines_fini(i915);
- kmem_cache_destroy(i915->priorities);
-err_dependencies:
- kmem_cache_destroy(i915->dependencies);
-err_requests:
- kmem_cache_destroy(i915->requests);
-err_vmas:
- kmem_cache_destroy(i915->vmas);
-err_objects:
- kmem_cache_destroy(i915->objects);
-err_wq:
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 0dc29e242597..d1a7c9608712 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -31,29 +31,25 @@ mock_request(struct intel_engine_cs *engine,
unsigned long delay)
{
struct i915_request *request;
- struct mock_request *mock;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
request = i915_request_alloc(engine, context);
if (IS_ERR(request))
return NULL;
- mock = container_of(request, typeof(*mock), base);
- mock->delay = delay;
-
- return &mock->base;
+ request->mock.delay = delay;
+ return request;
}
bool mock_cancel_request(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
bool was_queued;
spin_lock_irq(&engine->hw_lock);
- was_queued = !list_empty(&mock->link);
- list_del_init(&mock->link);
+ was_queued = !list_empty(&request->mock.link);
+ list_del_init(&request->mock.link);
spin_unlock_irq(&engine->hw_lock);
if (was_queued)
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 995fb728380c..4acf0211df20 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -29,13 +29,6 @@
#include "../i915_request.h"
-struct mock_request {
- struct i915_request base;
-
- struct list_head link;
- unsigned long delay;
-};
-
struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index d2de9ece2118..e084476469ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -14,8 +14,8 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
timeline->fence_context = context;
spin_lock_init(&timeline->lock);
+ mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 8ef14c7e5e38..ff8999c63a12 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -26,21 +26,21 @@
#define __nop_write(x) \
static void \
-nop_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { }
+nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { }
__nop_write(8)
__nop_write(16)
__nop_write(32)
#define __nop_read(x) \
static u##x \
-nop_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { return 0; }
+nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; }
__nop_read(8)
__nop_read(16)
__nop_read(32)
__nop_read(64)
-void mock_uncore_init(struct drm_i915_private *i915)
+void mock_uncore_init(struct intel_uncore *uncore)
{
- ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
- ASSIGN_READ_MMIO_VFUNCS(i915, nop);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index d79aa3ca4d51..dacb36b5ffcd 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,6 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
-void mock_uncore_init(struct drm_i915_private *i915);
+void mock_uncore_init(struct intel_uncore *uncore);
#endif /* !__MOCK_UNCORE_H */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 6403728fe778..e0b1ec821960 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -23,16 +23,20 @@
* Author: Jani Nikula <jani.nikula@intel.com>
*/
+#include <linux/gpio/consumer.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <drm/drm_mipi_dsi.h>
-#include <linux/slab.h>
-#include <linux/gpio/consumer.h>
+#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_panel.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
@@ -78,7 +82,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port), mask, mask,
100))
DRM_ERROR("DPI FIFOs are not empty\n");
@@ -148,7 +152,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
data_mask, 0,
50))
@@ -162,7 +166,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
ctrl_mask, 0,
50)) {
@@ -174,7 +178,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port),
data_mask, data_mask,
50))
@@ -234,7 +238,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port), mask, mask,
100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
@@ -256,6 +260,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->sb_lock);
}
+static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
+
+ tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+ switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
+ case PIPEMISC_DITHER_6_BPC:
+ return 18;
+ case PIPEMISC_DITHER_8_BPC:
+ return 24;
+ case PIPEMISC_DITHER_10_BPC:
+ return 30;
+ case PIPEMISC_DITHER_12_BPC:
+ return 36;
+ default:
+ MISSING_CASE(tmp);
+ return 0;
+ }
+}
+
static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -353,16 +379,18 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED,
- GLK_MIPIIO_PORT_POWERED, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED,
+ GLK_MIPIIO_PORT_POWERED,
+ 20))
DRM_ERROR("MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
- cold_boot |= !(I915_READ(MIPI_DEVICE_READY(port)) &
- DEVICE_READY);
+ cold_boot |=
+ !(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY);
}
return cold_boot;
@@ -377,9 +405,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY,
- GLK_PHY_STATUS_PORT_READY, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY,
+ GLK_PHY_STATUS_PORT_READY,
+ 20))
DRM_ERROR("PHY is not ON\n");
}
@@ -403,8 +433,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_ULPS_NOT_ACTIVE,
+ 0,
+ 20))
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
@@ -427,17 +460,21 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE,
- GLK_DATA_LANE_STOP_STATE, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_DATA_LANE_STOP_STATE,
+ GLK_DATA_LANE_STOP_STATE,
+ 20))
DRM_ERROR("Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT,
- AFE_LATCHOUT, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ BXT_MIPI_PORT_CTRL(port),
+ AFE_LATCHOUT,
+ AFE_LATCHOUT,
+ 20))
DRM_ERROR("D-PHY not entering LP-11 state\n");
}
}
@@ -537,7 +574,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
@@ -545,7 +582,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 0, 20))
DRM_ERROR("MIPI IO Port is not powergated\n");
@@ -566,7 +603,7 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
@@ -616,7 +653,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
- intel_wait_for_register(dev_priv,
+ intel_wait_for_register(&dev_priv->uncore,
port_ctrl, AFE_LATCHOUT, 0,
30))
DRM_ERROR("DSI LP not going Low\n");
@@ -1071,6 +1108,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
+ pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
/* Enable Frame time stamo based scanline reporting */
adjusted_mode->private_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
@@ -1658,7 +1697,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *scan, *fixed_mode = NULL;
+ struct drm_display_mode *fixed_mode;
enum port port;
DRM_DEBUG_KMS("\n");
@@ -1769,13 +1808,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
mutex_lock(&dev->mode_config.mutex);
- intel_dsi_vbt_get_modes(intel_dsi);
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- break;
- }
- }
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
@@ -1783,9 +1816,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
-
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index 954d5a8c4fa7..5e7b1fb2db5d 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -244,7 +244,7 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
0,
@@ -528,7 +528,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
BXT_DSI_PLL_LOCKED,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index c935cbe059a7..3e8bece620df 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -185,7 +185,7 @@ static int compare_of(struct device *dev, void *data)
}
/* Special case for LDB, one device for two channels */
- if (of_node_cmp(np->name, "lvds-channel") == 0) {
+ if (of_node_name_eq(np, "lvds-channel")) {
np = of_get_parent(np);
of_node_put(np);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index ec3602ebbc1c..9cc1d678674f 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
if (disable_partial)
ipu_plane_disable(ipu_crtc->plane[1], true);
if (disable_full)
- ipu_plane_disable(ipu_crtc->plane[0], false);
+ ipu_plane_disable(ipu_crtc->plane[0], true);
}
static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -295,7 +295,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
/* Default to driving pixel data on negative clock edges */
sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
- DRM_BUS_FLAG_PIXDATA_POSEDGE);
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE);
sig_cfg.bus_format = imx_crtc_state->bus_format;
sig_cfg.v_to_h_sync = 0;
sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
new file mode 100644
index 000000000000..bb4ddc6bb0a6
--- /dev/null
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
+
+config DRM_LIMA
+ tristate "LIMA (DRM support for ARM Mali 400/450 GPU)"
+ depends on DRM
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on MMU
+ depends on COMMON_CLK
+ depends on OF
+ select DRM_SCHED
+ help
+ DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
new file mode 100644
index 000000000000..38cc70281ba5
--- /dev/null
+++ b/drivers/gpu/drm/lima/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
+
+lima-y := \
+ lima_drv.o \
+ lima_device.o \
+ lima_pmu.o \
+ lima_l2_cache.o \
+ lima_mmu.o \
+ lima_gp.o \
+ lima_pp.o \
+ lima_gem.o \
+ lima_vm.o \
+ lima_sched.o \
+ lima_ctx.o \
+ lima_gem_prime.o \
+ lima_dlbu.o \
+ lima_bcast.o \
+ lima_object.o
+
+obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
new file mode 100644
index 000000000000..288398027bfa
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_bcast.h"
+#include "lima_regs.h"
+
+#define bcast_write(reg, data) writel(data, ip->iomem + reg)
+#define bcast_read(reg) readl(ip->iomem + reg)
+
+void lima_bcast_enable(struct lima_device *dev, int num_pp)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct lima_ip *ip = dev->ip + lima_ip_bcast;
+ int i, mask = bcast_read(LIMA_BCAST_BROADCAST_MASK) & 0xffff0000;
+
+ for (i = 0; i < num_pp; i++) {
+ struct lima_ip *pp = pipe->processor[i];
+
+ mask |= 1 << (pp->id - lima_ip_pp0);
+ }
+
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
+}
+
+int lima_bcast_init(struct lima_ip *ip)
+{
+ int i, mask = 0;
+
+ for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
+ if (ip->dev->ip[i].present)
+ mask |= 1 << (i - lima_ip_pp0);
+ }
+
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
+ bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
+ return 0;
+}
+
+void lima_bcast_fini(struct lima_ip *ip)
+{
+
+}
+
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
new file mode 100644
index 000000000000..c47e58563d0a
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_BCAST_H__
+#define __LIMA_BCAST_H__
+
+struct lima_ip;
+
+int lima_bcast_init(struct lima_ip *ip);
+void lima_bcast_fini(struct lima_ip *ip);
+
+void lima_bcast_enable(struct lima_device *dev, int num_pp);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
new file mode 100644
index 000000000000..22fff6caa961
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/slab.h>
+
+#include "lima_device.h"
+#include "lima_ctx.h"
+
+int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
+{
+ struct lima_ctx *ctx;
+ int i, err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dev = dev;
+ kref_init(&ctx->refcnt);
+
+ for (i = 0; i < lima_pipe_num; i++) {
+ err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty);
+ if (err)
+ goto err_out0;
+ }
+
+ err = xa_alloc(&mgr->handles, id, ctx, xa_limit_32b, GFP_KERNEL);
+ if (err < 0)
+ goto err_out0;
+
+ return 0;
+
+err_out0:
+ for (i--; i >= 0; i--)
+ lima_sched_context_fini(dev->pipe + i, ctx->context + i);
+ kfree(ctx);
+ return err;
+}
+
+static void lima_ctx_do_release(struct kref *ref)
+{
+ struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt);
+ int i;
+
+ for (i = 0; i < lima_pipe_num; i++)
+ lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i);
+ kfree(ctx);
+}
+
+int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id)
+{
+ struct lima_ctx *ctx;
+ int ret = 0;
+
+ mutex_lock(&mgr->lock);
+ ctx = xa_erase(&mgr->handles, id);
+ if (ctx)
+ kref_put(&ctx->refcnt, lima_ctx_do_release);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+
+struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id)
+{
+ struct lima_ctx *ctx;
+
+ mutex_lock(&mgr->lock);
+ ctx = xa_load(&mgr->handles, id);
+ if (ctx)
+ kref_get(&ctx->refcnt);
+ mutex_unlock(&mgr->lock);
+ return ctx;
+}
+
+void lima_ctx_put(struct lima_ctx *ctx)
+{
+ kref_put(&ctx->refcnt, lima_ctx_do_release);
+}
+
+void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr)
+{
+ mutex_init(&mgr->lock);
+ xa_init_flags(&mgr->handles, XA_FLAGS_ALLOC);
+}
+
+void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr)
+{
+ struct lima_ctx *ctx;
+ unsigned long id;
+
+ xa_for_each(&mgr->handles, id, ctx) {
+ kref_put(&ctx->refcnt, lima_ctx_do_release);
+ }
+
+ xa_destroy(&mgr->handles);
+ mutex_destroy(&mgr->lock);
+}
diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h
new file mode 100644
index 000000000000..6154e5c9bfe4
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ctx.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_CTX_H__
+#define __LIMA_CTX_H__
+
+#include <linux/xarray.h>
+
+#include "lima_device.h"
+
+struct lima_ctx {
+ struct kref refcnt;
+ struct lima_device *dev;
+ struct lima_sched_context context[lima_pipe_num];
+ atomic_t guilty;
+};
+
+struct lima_ctx_mgr {
+ struct mutex lock;
+ struct xarray handles;
+};
+
+int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id);
+int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id);
+struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id);
+void lima_ctx_put(struct lima_ctx *ctx);
+void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr);
+void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
new file mode 100644
index 000000000000..570d0e93f9a9
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include "lima_device.h"
+#include "lima_gp.h"
+#include "lima_pp.h"
+#include "lima_mmu.h"
+#include "lima_pmu.h"
+#include "lima_l2_cache.h"
+#include "lima_dlbu.h"
+#include "lima_bcast.h"
+#include "lima_vm.h"
+
+struct lima_ip_desc {
+ char *name;
+ char *irq_name;
+ bool must_have[lima_gpu_num];
+ int offset[lima_gpu_num];
+
+ int (*init)(struct lima_ip *ip);
+ void (*fini)(struct lima_ip *ip);
+};
+
+#define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
+ [lima_ip_##ipname] = { \
+ .name = #ipname, \
+ .irq_name = irq, \
+ .must_have = { \
+ [lima_gpu_mali400] = mst0, \
+ [lima_gpu_mali450] = mst1, \
+ }, \
+ .offset = { \
+ [lima_gpu_mali400] = off0, \
+ [lima_gpu_mali450] = off1, \
+ }, \
+ .init = lima_##func##_init, \
+ .fini = lima_##func##_fini, \
+ }
+
+static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
+ LIMA_IP_DESC(pmu, false, false, 0x02000, 0x02000, pmu, "pmu"),
+ LIMA_IP_DESC(l2_cache0, true, true, 0x01000, 0x10000, l2_cache, NULL),
+ LIMA_IP_DESC(l2_cache1, false, true, -1, 0x01000, l2_cache, NULL),
+ LIMA_IP_DESC(l2_cache2, false, false, -1, 0x11000, l2_cache, NULL),
+ LIMA_IP_DESC(gp, true, true, 0x00000, 0x00000, gp, "gp"),
+ LIMA_IP_DESC(pp0, true, true, 0x08000, 0x08000, pp, "pp0"),
+ LIMA_IP_DESC(pp1, false, false, 0x0A000, 0x0A000, pp, "pp1"),
+ LIMA_IP_DESC(pp2, false, false, 0x0C000, 0x0C000, pp, "pp2"),
+ LIMA_IP_DESC(pp3, false, false, 0x0E000, 0x0E000, pp, "pp3"),
+ LIMA_IP_DESC(pp4, false, false, -1, 0x28000, pp, "pp4"),
+ LIMA_IP_DESC(pp5, false, false, -1, 0x2A000, pp, "pp5"),
+ LIMA_IP_DESC(pp6, false, false, -1, 0x2C000, pp, "pp6"),
+ LIMA_IP_DESC(pp7, false, false, -1, 0x2E000, pp, "pp7"),
+ LIMA_IP_DESC(gpmmu, true, true, 0x03000, 0x03000, mmu, "gpmmu"),
+ LIMA_IP_DESC(ppmmu0, true, true, 0x04000, 0x04000, mmu, "ppmmu0"),
+ LIMA_IP_DESC(ppmmu1, false, false, 0x05000, 0x05000, mmu, "ppmmu1"),
+ LIMA_IP_DESC(ppmmu2, false, false, 0x06000, 0x06000, mmu, "ppmmu2"),
+ LIMA_IP_DESC(ppmmu3, false, false, 0x07000, 0x07000, mmu, "ppmmu3"),
+ LIMA_IP_DESC(ppmmu4, false, false, -1, 0x1C000, mmu, "ppmmu4"),
+ LIMA_IP_DESC(ppmmu5, false, false, -1, 0x1D000, mmu, "ppmmu5"),
+ LIMA_IP_DESC(ppmmu6, false, false, -1, 0x1E000, mmu, "ppmmu6"),
+ LIMA_IP_DESC(ppmmu7, false, false, -1, 0x1F000, mmu, "ppmmu7"),
+ LIMA_IP_DESC(dlbu, false, true, -1, 0x14000, dlbu, NULL),
+ LIMA_IP_DESC(bcast, false, true, -1, 0x13000, bcast, NULL),
+ LIMA_IP_DESC(pp_bcast, false, true, -1, 0x16000, pp_bcast, "pp"),
+ LIMA_IP_DESC(ppmmu_bcast, false, true, -1, 0x15000, mmu, NULL),
+};
+
+const char *lima_ip_name(struct lima_ip *ip)
+{
+ return lima_ip_desc[ip->id].name;
+}
+
+static int lima_clk_init(struct lima_device *dev)
+{
+ int err;
+ unsigned long bus_rate, gpu_rate;
+
+ dev->clk_bus = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->clk_bus)) {
+ dev_err(dev->dev, "get bus clk failed %ld\n", PTR_ERR(dev->clk_bus));
+ return PTR_ERR(dev->clk_bus);
+ }
+
+ dev->clk_gpu = devm_clk_get(dev->dev, "core");
+ if (IS_ERR(dev->clk_gpu)) {
+ dev_err(dev->dev, "get core clk failed %ld\n", PTR_ERR(dev->clk_gpu));
+ return PTR_ERR(dev->clk_gpu);
+ }
+
+ bus_rate = clk_get_rate(dev->clk_bus);
+ dev_info(dev->dev, "bus rate = %lu\n", bus_rate);
+
+ gpu_rate = clk_get_rate(dev->clk_gpu);
+ dev_info(dev->dev, "mod rate = %lu", gpu_rate);
+
+ err = clk_prepare_enable(dev->clk_bus);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(dev->clk_gpu);
+ if (err)
+ goto error_out0;
+
+ dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
+ if (IS_ERR(dev->reset)) {
+ err = PTR_ERR(dev->reset);
+ goto error_out1;
+ } else if (dev->reset != NULL) {
+ err = reset_control_deassert(dev->reset);
+ if (err)
+ goto error_out1;
+ }
+
+ return 0;
+
+error_out1:
+ clk_disable_unprepare(dev->clk_gpu);
+error_out0:
+ clk_disable_unprepare(dev->clk_bus);
+ return err;
+}
+
+static void lima_clk_fini(struct lima_device *dev)
+{
+ if (dev->reset != NULL)
+ reset_control_assert(dev->reset);
+ clk_disable_unprepare(dev->clk_gpu);
+ clk_disable_unprepare(dev->clk_bus);
+}
+
+static int lima_regulator_init(struct lima_device *dev)
+{
+ int ret;
+
+ dev->regulator = devm_regulator_get_optional(dev->dev, "mali");
+ if (IS_ERR(dev->regulator)) {
+ ret = PTR_ERR(dev->regulator);
+ dev->regulator = NULL;
+ if (ret == -ENODEV)
+ return 0;
+ dev_err(dev->dev, "failed to get regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(dev->regulator);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lima_regulator_fini(struct lima_device *dev)
+{
+ if (dev->regulator)
+ regulator_disable(dev->regulator);
+}
+
+static int lima_init_ip(struct lima_device *dev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = dev->ip + index;
+ int offset = desc->offset[dev->id];
+ bool must = desc->must_have[dev->id];
+ int err;
+
+ if (offset < 0)
+ return 0;
+
+ ip->dev = dev;
+ ip->id = index;
+ ip->iomem = dev->iomem + offset;
+ if (desc->irq_name) {
+ err = platform_get_irq_byname(dev->pdev, desc->irq_name);
+ if (err < 0)
+ goto out;
+ ip->irq = err;
+ }
+
+ err = desc->init(ip);
+ if (!err) {
+ ip->present = true;
+ return 0;
+ }
+
+out:
+ return must ? err : 0;
+}
+
+static void lima_fini_ip(struct lima_device *ldev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = ldev->ip + index;
+
+ if (ip->present)
+ desc->fini(ip);
+}
+
+static int lima_init_gp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ int err;
+
+ err = lima_sched_pipe_init(pipe, "gp");
+ if (err)
+ return err;
+
+ pipe->l2_cache[pipe->num_l2_cache++] = dev->ip + lima_ip_l2_cache0;
+ pipe->mmu[pipe->num_mmu++] = dev->ip + lima_ip_gpmmu;
+ pipe->processor[pipe->num_processor++] = dev->ip + lima_ip_gp;
+
+ err = lima_gp_pipe_init(dev);
+ if (err) {
+ lima_sched_pipe_fini(pipe);
+ return err;
+ }
+
+ return 0;
+}
+
+static void lima_fini_gp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+
+ lima_gp_pipe_fini(dev);
+ lima_sched_pipe_fini(pipe);
+}
+
+static int lima_init_pp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ int err, i;
+
+ err = lima_sched_pipe_init(pipe, "pp");
+ if (err)
+ return err;
+
+ for (i = 0; i < LIMA_SCHED_PIPE_MAX_PROCESSOR; i++) {
+ struct lima_ip *pp = dev->ip + lima_ip_pp0 + i;
+ struct lima_ip *ppmmu = dev->ip + lima_ip_ppmmu0 + i;
+ struct lima_ip *l2_cache;
+
+ if (dev->id == lima_gpu_mali400)
+ l2_cache = dev->ip + lima_ip_l2_cache0;
+ else
+ l2_cache = dev->ip + lima_ip_l2_cache1 + (i >> 2);
+
+ if (pp->present && ppmmu->present && l2_cache->present) {
+ pipe->mmu[pipe->num_mmu++] = ppmmu;
+ pipe->processor[pipe->num_processor++] = pp;
+ if (!pipe->l2_cache[i >> 2])
+ pipe->l2_cache[pipe->num_l2_cache++] = l2_cache;
+ }
+ }
+
+ if (dev->ip[lima_ip_bcast].present) {
+ pipe->bcast_processor = dev->ip + lima_ip_pp_bcast;
+ pipe->bcast_mmu = dev->ip + lima_ip_ppmmu_bcast;
+ }
+
+ err = lima_pp_pipe_init(dev);
+ if (err) {
+ lima_sched_pipe_fini(pipe);
+ return err;
+ }
+
+ return 0;
+}
+
+static void lima_fini_pp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+ lima_pp_pipe_fini(dev);
+ lima_sched_pipe_fini(pipe);
+}
+
+int lima_device_init(struct lima_device *ldev)
+{
+ int err, i;
+ struct resource *res;
+
+ dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
+
+ err = lima_clk_init(ldev);
+ if (err) {
+ dev_err(ldev->dev, "clk init fail %d\n", err);
+ return err;
+ }
+
+ err = lima_regulator_init(ldev);
+ if (err) {
+ dev_err(ldev->dev, "regulator init fail %d\n", err);
+ goto err_out0;
+ }
+
+ ldev->empty_vm = lima_vm_create(ldev);
+ if (!ldev->empty_vm) {
+ err = -ENOMEM;
+ goto err_out1;
+ }
+
+ ldev->va_start = 0;
+ if (ldev->id == lima_gpu_mali450) {
+ ldev->va_end = LIMA_VA_RESERVE_START;
+ ldev->dlbu_cpu = dma_alloc_wc(
+ ldev->dev, LIMA_PAGE_SIZE,
+ &ldev->dlbu_dma, GFP_KERNEL);
+ if (!ldev->dlbu_cpu) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ } else
+ ldev->va_end = LIMA_VA_RESERVE_END;
+
+ res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
+ ldev->iomem = devm_ioremap_resource(ldev->dev, res);
+ if (IS_ERR(ldev->iomem)) {
+ dev_err(ldev->dev, "fail to ioremap iomem\n");
+ err = PTR_ERR(ldev->iomem);
+ goto err_out3;
+ }
+
+ for (i = 0; i < lima_ip_num; i++) {
+ err = lima_init_ip(ldev, i);
+ if (err)
+ goto err_out4;
+ }
+
+ err = lima_init_gp_pipe(ldev);
+ if (err)
+ goto err_out4;
+
+ err = lima_init_pp_pipe(ldev);
+ if (err)
+ goto err_out5;
+
+ return 0;
+
+err_out5:
+ lima_fini_gp_pipe(ldev);
+err_out4:
+ while (--i >= 0)
+ lima_fini_ip(ldev, i);
+err_out3:
+ if (ldev->dlbu_cpu)
+ dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
+ ldev->dlbu_cpu, ldev->dlbu_dma);
+err_out2:
+ lima_vm_put(ldev->empty_vm);
+err_out1:
+ lima_regulator_fini(ldev);
+err_out0:
+ lima_clk_fini(ldev);
+ return err;
+}
+
+void lima_device_fini(struct lima_device *ldev)
+{
+ int i;
+
+ lima_fini_pp_pipe(ldev);
+ lima_fini_gp_pipe(ldev);
+
+ for (i = lima_ip_num - 1; i >= 0; i--)
+ lima_fini_ip(ldev, i);
+
+ if (ldev->dlbu_cpu)
+ dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
+ ldev->dlbu_cpu, ldev->dlbu_dma);
+
+ lima_vm_put(ldev->empty_vm);
+
+ lima_regulator_fini(ldev);
+
+ lima_clk_fini(ldev);
+}
diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h
new file mode 100644
index 000000000000..31158d86271c
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_device.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DEVICE_H__
+#define __LIMA_DEVICE_H__
+
+#include <drm/drm_device.h>
+#include <linux/delay.h>
+
+#include "lima_sched.h"
+
+enum lima_gpu_id {
+ lima_gpu_mali400 = 0,
+ lima_gpu_mali450,
+ lima_gpu_num,
+};
+
+enum lima_ip_id {
+ lima_ip_pmu,
+ lima_ip_gpmmu,
+ lima_ip_ppmmu0,
+ lima_ip_ppmmu1,
+ lima_ip_ppmmu2,
+ lima_ip_ppmmu3,
+ lima_ip_ppmmu4,
+ lima_ip_ppmmu5,
+ lima_ip_ppmmu6,
+ lima_ip_ppmmu7,
+ lima_ip_gp,
+ lima_ip_pp0,
+ lima_ip_pp1,
+ lima_ip_pp2,
+ lima_ip_pp3,
+ lima_ip_pp4,
+ lima_ip_pp5,
+ lima_ip_pp6,
+ lima_ip_pp7,
+ lima_ip_l2_cache0,
+ lima_ip_l2_cache1,
+ lima_ip_l2_cache2,
+ lima_ip_dlbu,
+ lima_ip_bcast,
+ lima_ip_pp_bcast,
+ lima_ip_ppmmu_bcast,
+ lima_ip_num,
+};
+
+struct lima_device;
+
+struct lima_ip {
+ struct lima_device *dev;
+ enum lima_ip_id id;
+ bool present;
+
+ void __iomem *iomem;
+ int irq;
+
+ union {
+ /* gp/pp */
+ bool async_reset;
+ /* l2 cache */
+ spinlock_t lock;
+ } data;
+};
+
+enum lima_pipe_id {
+ lima_pipe_gp,
+ lima_pipe_pp,
+ lima_pipe_num,
+};
+
+struct lima_device {
+ struct device *dev;
+ struct drm_device *ddev;
+ struct platform_device *pdev;
+
+ enum lima_gpu_id id;
+ u32 gp_version;
+ u32 pp_version;
+ int num_pp;
+
+ void __iomem *iomem;
+ struct clk *clk_bus;
+ struct clk *clk_gpu;
+ struct reset_control *reset;
+ struct regulator *regulator;
+
+ struct lima_ip ip[lima_ip_num];
+ struct lima_sched_pipe pipe[lima_pipe_num];
+
+ struct lima_vm *empty_vm;
+ uint64_t va_start;
+ uint64_t va_end;
+
+ u32 *dlbu_cpu;
+ dma_addr_t dlbu_dma;
+};
+
+static inline struct lima_device *
+to_lima_dev(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
+int lima_device_init(struct lima_device *ldev);
+void lima_device_fini(struct lima_device *ldev);
+
+const char *lima_ip_name(struct lima_ip *ip);
+
+typedef int (*lima_poll_func_t)(struct lima_ip *);
+
+static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
+ int sleep_us, int timeout_us)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ might_sleep_if(sleep_us);
+ while (1) {
+ if (func(ip))
+ return 0;
+
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+
+ if (sleep_us)
+ usleep_range((sleep_us >> 2) + 1, sleep_us);
+ }
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
new file mode 100644
index 000000000000..8399ceffb94b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dlbu.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_dlbu.h"
+#include "lima_vm.h"
+#include "lima_regs.h"
+
+#define dlbu_write(reg, data) writel(data, ip->iomem + reg)
+#define dlbu_read(reg) readl(ip->iomem + reg)
+
+void lima_dlbu_enable(struct lima_device *dev, int num_pp)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct lima_ip *ip = dev->ip + lima_ip_dlbu;
+ int i, mask = 0;
+
+ for (i = 0; i < num_pp; i++) {
+ struct lima_ip *pp = pipe->processor[i];
+
+ mask |= 1 << (pp->id - lima_ip_pp0);
+ }
+
+ dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, mask);
+}
+
+void lima_dlbu_disable(struct lima_device *dev)
+{
+ struct lima_ip *ip = dev->ip + lima_ip_dlbu;
+
+ dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, 0);
+}
+
+void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
+{
+ dlbu_write(LIMA_DLBU_TLLIST_VBASEADDR, reg[0]);
+ dlbu_write(LIMA_DLBU_FB_DIM, reg[1]);
+ dlbu_write(LIMA_DLBU_TLLIST_CONF, reg[2]);
+ dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
+}
+
+int lima_dlbu_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+
+ dlbu_write(LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1);
+ dlbu_write(LIMA_DLBU_MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU);
+
+ return 0;
+}
+
+void lima_dlbu_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
new file mode 100644
index 000000000000..16f877984466
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dlbu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DLBU_H__
+#define __LIMA_DLBU_H__
+
+struct lima_ip;
+struct lima_device;
+
+void lima_dlbu_enable(struct lima_device *dev, int num_pp);
+void lima_dlbu_disable(struct lima_device *dev);
+
+void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
+
+int lima_dlbu_init(struct lima_ip *ip);
+void lima_dlbu_fini(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
new file mode 100644
index 000000000000..f9a281a62083
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_prime.h>
+#include <drm/lima_drm.h>
+
+#include "lima_drv.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+#include "lima_vm.h"
+
+int lima_sched_timeout_ms;
+
+MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
+module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
+
+static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_get_param *args = data;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ if (args->pad)
+ return -EINVAL;
+
+ switch (args->param) {
+ case DRM_LIMA_PARAM_GPU_ID:
+ switch (ldev->id) {
+ case lima_gpu_mali400:
+ args->value = DRM_LIMA_PARAM_GPU_ID_MALI400;
+ break;
+ case lima_gpu_mali450:
+ args->value = DRM_LIMA_PARAM_GPU_ID_MALI450;
+ break;
+ default:
+ args->value = DRM_LIMA_PARAM_GPU_ID_UNKNOWN;
+ break;
+ }
+ break;
+
+ case DRM_LIMA_PARAM_NUM_PP:
+ args->value = ldev->pipe[lima_pipe_pp].num_processor;
+ break;
+
+ case DRM_LIMA_PARAM_GP_VERSION:
+ args->value = ldev->gp_version;
+ break;
+
+ case DRM_LIMA_PARAM_PP_VERSION:
+ args->value = ldev->pp_version;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_create *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->flags)
+ return -EINVAL;
+
+ if (args->size == 0)
+ return -EINVAL;
+
+ return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle);
+}
+
+static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_info *args = data;
+
+ return lima_gem_get_info(file, args->handle, &args->va, &args->offset);
+}
+
+static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_submit *args = data;
+ struct lima_device *ldev = to_lima_dev(dev);
+ struct lima_drm_priv *priv = file->driver_priv;
+ struct drm_lima_gem_submit_bo *bos;
+ struct lima_sched_pipe *pipe;
+ struct lima_sched_task *task;
+ struct lima_ctx *ctx;
+ struct lima_submit submit = {0};
+ size_t size;
+ int err = 0;
+
+ if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
+ return -EINVAL;
+
+ if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE))
+ return -EINVAL;
+
+ pipe = ldev->pipe + args->pipe;
+ if (args->frame_size != pipe->frame_size)
+ return -EINVAL;
+
+ bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL);
+ if (!bos)
+ return -ENOMEM;
+
+ size = args->nr_bos * sizeof(*submit.bos);
+ if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
+ err = -EFAULT;
+ goto out0;
+ }
+
+ task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
+ if (!task) {
+ err = -ENOMEM;
+ goto out0;
+ }
+
+ task->frame = task + 1;
+ if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
+ err = -EFAULT;
+ goto out1;
+ }
+
+ err = pipe->task_validate(pipe, task);
+ if (err)
+ goto out1;
+
+ ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx);
+ if (!ctx) {
+ err = -ENOENT;
+ goto out1;
+ }
+
+ submit.pipe = args->pipe;
+ submit.bos = bos;
+ submit.lbos = (void *)bos + size;
+ submit.nr_bos = args->nr_bos;
+ submit.task = task;
+ submit.ctx = ctx;
+ submit.flags = args->flags;
+ submit.in_sync[0] = args->in_sync[0];
+ submit.in_sync[1] = args->in_sync[1];
+ submit.out_sync = args->out_sync;
+
+ err = lima_gem_submit(file, &submit);
+
+ lima_ctx_put(ctx);
+out1:
+ if (err)
+ kmem_cache_free(pipe->task_slab, task);
+out0:
+ kvfree(bos);
+ return err;
+}
+
+static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_wait *args = data;
+
+ if (args->op & ~(LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE))
+ return -EINVAL;
+
+ return lima_gem_wait(file, args->handle, args->op, args->timeout_ns);
+}
+
+static int lima_ioctl_ctx_create(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_ctx_create *args = data;
+ struct lima_drm_priv *priv = file->driver_priv;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ if (args->_pad)
+ return -EINVAL;
+
+ return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
+}
+
+static int lima_ioctl_ctx_free(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_ctx_create *args = data;
+ struct lima_drm_priv *priv = file->driver_priv;
+
+ if (args->_pad)
+ return -EINVAL;
+
+ return lima_ctx_free(&priv->ctx_mgr, args->id);
+}
+
+static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ int err;
+ struct lima_drm_priv *priv;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->vm = lima_vm_create(ldev);
+ if (!priv->vm) {
+ err = -ENOMEM;
+ goto err_out0;
+ }
+
+ lima_ctx_mgr_init(&priv->ctx_mgr);
+
+ file->driver_priv = priv;
+ return 0;
+
+err_out0:
+ kfree(priv);
+ return err;
+}
+
+static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct lima_drm_priv *priv = file->driver_priv;
+
+ lima_ctx_mgr_fini(&priv->ctx_mgr);
+ lima_vm_put(priv->vm);
+ kfree(priv);
+}
+
+static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_AUTH|DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations lima_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .mmap = lima_gem_mmap,
+};
+
+static struct drm_driver lima_drm_driver = {
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | DRIVER_SYNCOBJ,
+ .open = lima_drm_driver_open,
+ .postclose = lima_drm_driver_postclose,
+ .ioctls = lima_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
+ .fops = &lima_drm_driver_fops,
+ .gem_free_object_unlocked = lima_gem_free_object,
+ .gem_open_object = lima_gem_object_open,
+ .gem_close_object = lima_gem_object_close,
+ .gem_vm_ops = &lima_gem_vm_ops,
+ .name = "lima",
+ .desc = "lima DRM",
+ .date = "20190217",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
+ .gem_prime_mmap = lima_gem_prime_mmap,
+};
+
+static int lima_pdev_probe(struct platform_device *pdev)
+{
+ struct lima_device *ldev;
+ struct drm_device *ddev;
+ int err;
+
+ err = lima_sched_slab_init();
+ if (err)
+ return err;
+
+ ldev = devm_kzalloc(&pdev->dev, sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ err = -ENOMEM;
+ goto err_out0;
+ }
+
+ ldev->pdev = pdev;
+ ldev->dev = &pdev->dev;
+ ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
+
+ platform_set_drvdata(pdev, ldev);
+
+ /* Allocate and initialize the DRM device. */
+ ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ddev->dev_private = ldev;
+ ldev->ddev = ddev;
+
+ err = lima_device_init(ldev);
+ if (err) {
+ dev_err(&pdev->dev, "Fatal error during GPU init\n");
+ goto err_out1;
+ }
+
+ /*
+ * Register the DRM device with the core and the connectors with
+ * sysfs.
+ */
+ err = drm_dev_register(ddev, 0);
+ if (err < 0)
+ goto err_out2;
+
+ return 0;
+
+err_out2:
+ lima_device_fini(ldev);
+err_out1:
+ drm_dev_put(ddev);
+err_out0:
+ lima_sched_slab_fini();
+ return err;
+}
+
+static int lima_pdev_remove(struct platform_device *pdev)
+{
+ struct lima_device *ldev = platform_get_drvdata(pdev);
+ struct drm_device *ddev = ldev->ddev;
+
+ drm_dev_unregister(ddev);
+ lima_device_fini(ldev);
+ drm_dev_put(ddev);
+ lima_sched_slab_fini();
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 },
+ { .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver lima_platform_driver = {
+ .probe = lima_pdev_probe,
+ .remove = lima_pdev_remove,
+ .driver = {
+ .name = "lima",
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init lima_init(void)
+{
+ return platform_driver_register(&lima_platform_driver);
+}
+module_init(lima_init);
+
+static void __exit lima_exit(void)
+{
+ platform_driver_unregister(&lima_platform_driver);
+}
+module_exit(lima_exit);
+
+MODULE_AUTHOR("Lima Project Developers");
+MODULE_DESCRIPTION("Lima DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
new file mode 100644
index 000000000000..69c7344715c9
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DRV_H__
+#define __LIMA_DRV_H__
+
+#include <drm/drm_file.h>
+
+#include "lima_ctx.h"
+
+extern int lima_sched_timeout_ms;
+
+struct lima_vm;
+struct lima_bo;
+struct lima_sched_task;
+
+struct drm_lima_gem_submit_bo;
+
+struct lima_drm_priv {
+ struct lima_vm *vm;
+ struct lima_ctx_mgr ctx_mgr;
+};
+
+struct lima_submit {
+ struct lima_ctx *ctx;
+ int pipe;
+ u32 flags;
+
+ struct drm_lima_gem_submit_bo *bos;
+ struct lima_bo **lbos;
+ u32 nr_bos;
+
+ u32 in_sync[2];
+ u32 out_sync;
+
+ struct lima_sched_task *task;
+};
+
+static inline struct lima_drm_priv *
+to_lima_drm_priv(struct drm_file *file)
+{
+ return file->driver_priv;
+}
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
new file mode 100644
index 000000000000..477c0f766663
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/mm.h>
+#include <linux/sync_file.h>
+#include <linux/pfn_t.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_drv.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+
+int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle)
+{
+ int err;
+ struct lima_bo *bo;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ bo = lima_bo_create(ldev, size, flags, NULL, NULL);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ err = drm_gem_handle_create(file, &bo->gem, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put_unlocked(&bo->gem);
+
+ return err;
+}
+
+void lima_gem_free_object(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (!list_empty(&bo->va))
+ dev_err(obj->dev->dev, "lima gem free bo still has va\n");
+
+ lima_bo_destroy(bo);
+}
+
+int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+
+ return lima_vm_bo_add(vm, bo, true);
+}
+
+void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+
+ lima_vm_bo_del(vm, bo);
+}
+
+int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ struct lima_bo *bo;
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+ int err;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ bo = to_lima_bo(obj);
+
+ *va = lima_vm_get_va(vm, bo);
+
+ err = drm_gem_create_mmap_offset(obj);
+ if (!err)
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+ drm_gem_object_put_unlocked(obj);
+ return err;
+}
+
+static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct lima_bo *bo = to_lima_bo(obj);
+ pfn_t pfn;
+ pgoff_t pgoff;
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+
+ return vmf_insert_mixed(vma, vmf->address, pfn);
+}
+
+const struct vm_operations_struct lima_gem_vm_ops = {
+ .fault = lima_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+void lima_set_vma_flags(struct vm_area_struct *vma)
+{
+ pgprot_t prot = vm_get_page_prot(vma->vm_flags);
+
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_page_prot = pgprot_writecombine(prot);
+}
+
+int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ lima_set_vma_flags(vma);
+ return 0;
+}
+
+static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
+ bool write, bool explicit)
+{
+ int err = 0;
+
+ if (!write) {
+ err = reservation_object_reserve_shared(bo->gem.resv, 1);
+ if (err)
+ return err;
+ }
+
+ /* explicit sync use user passed dep fence */
+ if (explicit)
+ return 0;
+
+ return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
+}
+
+static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
+ struct ww_acquire_ctx *ctx)
+{
+ int i, ret = 0, contended, slow_locked = -1;
+
+ ww_acquire_init(ctx, &reservation_ww_class);
+
+retry:
+ for (i = 0; i < nr_bos; i++) {
+ if (i == slow_locked) {
+ slow_locked = -1;
+ continue;
+ }
+
+ ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
+ if (ret < 0) {
+ contended = i;
+ goto err;
+ }
+ }
+
+ ww_acquire_done(ctx);
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ ww_mutex_unlock(&bos[i]->gem.resv->lock);
+
+ if (slow_locked >= 0)
+ ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
+
+ if (ret == -EDEADLK) {
+ /* we lost out in a seqno race, lock and retry.. */
+ ret = ww_mutex_lock_slow_interruptible(
+ &bos[contended]->gem.resv->lock, ctx);
+ if (!ret) {
+ slow_locked = contended;
+ goto retry;
+ }
+ }
+ ww_acquire_fini(ctx);
+
+ return ret;
+}
+
+static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
+ struct ww_acquire_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < nr_bos; i++)
+ ww_mutex_unlock(&bos[i]->gem.resv->lock);
+ ww_acquire_fini(ctx);
+}
+
+static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
+ struct dma_fence *fence = NULL;
+
+ if (!submit->in_sync[i])
+ continue;
+
+ err = drm_syncobj_find_fence(file, submit->in_sync[i],
+ 0, 0, &fence);
+ if (err)
+ return err;
+
+ err = drm_gem_fence_array_add(&submit->task->deps, fence);
+ if (err) {
+ dma_fence_put(fence);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
+{
+ int i, err = 0;
+ struct ww_acquire_ctx ctx;
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+ struct drm_syncobj *out_sync = NULL;
+ struct dma_fence *fence;
+ struct lima_bo **bos = submit->lbos;
+
+ if (submit->out_sync) {
+ out_sync = drm_syncobj_find(file, submit->out_sync);
+ if (!out_sync)
+ return -ENOENT;
+ }
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj;
+ struct lima_bo *bo;
+
+ obj = drm_gem_object_lookup(file, submit->bos[i].handle);
+ if (!obj) {
+ err = -ENOENT;
+ goto err_out0;
+ }
+
+ bo = to_lima_bo(obj);
+
+ /* increase refcnt of gpu va map to prevent unmapped when executing,
+ * will be decreased when task done
+ */
+ err = lima_vm_bo_add(vm, bo, false);
+ if (err) {
+ drm_gem_object_put_unlocked(obj);
+ goto err_out0;
+ }
+
+ bos[i] = bo;
+ }
+
+ err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
+ if (err)
+ goto err_out0;
+
+ err = lima_sched_task_init(
+ submit->task, submit->ctx->context + submit->pipe,
+ bos, submit->nr_bos, vm);
+ if (err)
+ goto err_out1;
+
+ err = lima_gem_add_deps(file, submit);
+ if (err)
+ goto err_out2;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ err = lima_gem_sync_bo(
+ submit->task, bos[i],
+ submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
+ submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
+ if (err)
+ goto err_out2;
+ }
+
+ fence = lima_sched_context_queue_task(
+ submit->ctx->context + submit->pipe, submit->task);
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
+ reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
+ else
+ reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
+ }
+
+ lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+
+ for (i = 0; i < submit->nr_bos; i++)
+ drm_gem_object_put_unlocked(&bos[i]->gem);
+
+ if (out_sync) {
+ drm_syncobj_replace_fence(out_sync, fence);
+ drm_syncobj_put(out_sync);
+ }
+
+ dma_fence_put(fence);
+
+ return 0;
+
+err_out2:
+ lima_sched_task_fini(submit->task);
+err_out1:
+ lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+err_out0:
+ for (i = 0; i < submit->nr_bos; i++) {
+ if (!bos[i])
+ break;
+ lima_vm_bo_del(vm, bos[i]);
+ drm_gem_object_put_unlocked(&bos[i]->gem);
+ }
+ if (out_sync)
+ drm_syncobj_put(out_sync);
+ return err;
+}
+
+int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
+{
+ bool write = op & LIMA_GEM_WAIT_WRITE;
+ long ret, timeout;
+
+ if (!op)
+ return 0;
+
+ timeout = drm_timeout_abs_to_jiffies(timeout_ns);
+
+ ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
+ if (ret == 0)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
new file mode 100644
index 000000000000..556111a01135
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GEM_H__
+#define __LIMA_GEM_H__
+
+struct lima_bo;
+struct lima_submit;
+
+extern const struct vm_operations_struct lima_gem_vm_ops;
+
+struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
+int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle);
+void lima_gem_free_object(struct drm_gem_object *obj);
+int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
+void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
+int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset);
+int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
+int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns);
+
+void lima_set_vma_flags(struct vm_area_struct *vma);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
new file mode 100644
index 000000000000..9c6d9f1dba55
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem_prime.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/dma-buf.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+
+#include "lima_device.h"
+#include "lima_object.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+
+struct drm_gem_object *lima_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct lima_device *ldev = to_lima_dev(dev);
+ struct lima_bo *bo;
+
+ bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt,
+ attach->dmabuf->resv);
+ if (IS_ERR(bo))
+ return ERR_CAST(bo);
+
+ return &bo->gem;
+}
+
+struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+ int npages = obj->size >> PAGE_SHIFT;
+
+ return drm_prime_pages_to_sg(bo->pages, npages);
+}
+
+int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ if (ret)
+ return ret;
+
+ lima_set_vma_flags(vma);
+ return 0;
+}
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h
new file mode 100644
index 000000000000..34b4d35c21e3
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem_prime.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GEM_PRIME_H__
+#define __LIMA_GEM_PRIME_H__
+
+struct drm_gem_object *lima_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
+int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
new file mode 100644
index 000000000000..ccf49faedebf
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_device.h"
+#include "lima_gp.h"
+#include "lima_regs.h"
+
+#define gp_write(reg, data) writel(data, ip->iomem + reg)
+#define gp_read(reg) readl(ip->iomem + reg)
+
+static irqreturn_t lima_gp_irq_handler(int irq, void *data)
+{
+ struct lima_ip *ip = data;
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ u32 state = gp_read(LIMA_GP_INT_STAT);
+ u32 status = gp_read(LIMA_GP_STATUS);
+ bool done = false;
+
+ /* for shared irq case */
+ if (!state)
+ return IRQ_NONE;
+
+ if (state & LIMA_GP_IRQ_MASK_ERROR) {
+ dev_err(dev->dev, "gp error irq state=%x status=%x\n",
+ state, status);
+
+ /* mask all interrupts before hard reset */
+ gp_write(LIMA_GP_INT_MASK, 0);
+
+ pipe->error = true;
+ done = true;
+ } else {
+ bool valid = state & (LIMA_GP_IRQ_VS_END_CMD_LST |
+ LIMA_GP_IRQ_PLBU_END_CMD_LST);
+ bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
+ LIMA_GP_STATUS_PLBU_ACTIVE);
+ done = valid && !active;
+ }
+
+ gp_write(LIMA_GP_INT_CLEAR, state);
+
+ if (done)
+ lima_sched_pipe_task_done(pipe);
+
+ return IRQ_HANDLED;
+}
+
+static void lima_gp_soft_reset_async(struct lima_ip *ip)
+{
+ if (ip->data.async_reset)
+ return;
+
+ gp_write(LIMA_GP_INT_MASK, 0);
+ gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_RESET_COMPLETED);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_SOFT_RESET);
+ ip->data.async_reset = true;
+}
+
+static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ if (!ip->data.async_reset)
+ return 0;
+
+ err = readl_poll_timeout(ip->iomem + LIMA_GP_INT_RAWSTAT, v,
+ v & LIMA_GP_IRQ_RESET_COMPLETED,
+ 0, 100);
+ if (err) {
+ dev_err(dev->dev, "gp soft reset time out\n");
+ return err;
+ }
+
+ gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+
+ ip->data.async_reset = false;
+ return 0;
+}
+
+static int lima_gp_task_validate(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ (void)pipe;
+
+ if (f[LIMA_GP_VSCL_START_ADDR >> 2] >
+ f[LIMA_GP_VSCL_END_ADDR >> 2] ||
+ f[LIMA_GP_PLBUCL_START_ADDR >> 2] >
+ f[LIMA_GP_PLBUCL_END_ADDR >> 2] ||
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] >
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2])
+ return -EINVAL;
+
+ if (f[LIMA_GP_VSCL_START_ADDR >> 2] ==
+ f[LIMA_GP_VSCL_END_ADDR >> 2] &&
+ f[LIMA_GP_PLBUCL_START_ADDR >> 2] ==
+ f[LIMA_GP_PLBUCL_END_ADDR >> 2])
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lima_gp_task_run(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ struct lima_ip *ip = pipe->processor[0];
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ u32 cmd = 0;
+ int i;
+
+ if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
+ f[LIMA_GP_VSCL_END_ADDR >> 2])
+ cmd |= LIMA_GP_CMD_START_VS;
+ if (f[LIMA_GP_PLBUCL_START_ADDR >> 2] !=
+ f[LIMA_GP_PLBUCL_END_ADDR >> 2])
+ cmd |= LIMA_GP_CMD_START_PLBU;
+
+ /* before any hw ops, wait last success task async soft reset */
+ lima_gp_soft_reset_async_wait(ip);
+
+ for (i = 0; i < LIMA_GP_FRAME_REG_NUM; i++)
+ writel(f[i], ip->iomem + LIMA_GP_VSCL_START_ADDR + i * 4);
+
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
+ gp_write(LIMA_GP_CMD, cmd);
+}
+
+static int lima_gp_hard_reset_poll(struct lima_ip *ip)
+{
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
+ return gp_read(LIMA_GP_PERF_CNT_0_LIMIT) == 0xC01A0000;
+}
+
+static int lima_gp_hard_reset(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int ret;
+
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
+ gp_write(LIMA_GP_INT_MASK, 0);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
+ ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "gp hard reset timeout\n");
+ return ret;
+ }
+
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0);
+ gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+ return 0;
+}
+
+static void lima_gp_task_fini(struct lima_sched_pipe *pipe)
+{
+ lima_gp_soft_reset_async(pipe->processor[0]);
+}
+
+static void lima_gp_task_error(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+
+ dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n",
+ gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS));
+
+ lima_gp_hard_reset(ip);
+}
+
+static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
+{
+ lima_sched_pipe_task_done(pipe);
+}
+
+static void lima_gp_print_version(struct lima_ip *ip)
+{
+ u32 version, major, minor;
+ char *name;
+
+ version = gp_read(LIMA_GP_VERSION);
+ major = (version >> 8) & 0xFF;
+ minor = version & 0xFF;
+ switch (version >> 16) {
+ case 0xA07:
+ name = "mali200";
+ break;
+ case 0xC07:
+ name = "mali300";
+ break;
+ case 0xB07:
+ name = "mali400";
+ break;
+ case 0xD07:
+ name = "mali450";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+ dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
+ lima_ip_name(ip), name, major, minor);
+}
+
+static struct kmem_cache *lima_gp_task_slab;
+static int lima_gp_task_slab_refcnt;
+
+int lima_gp_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
+ lima_gp_print_version(ip);
+
+ ip->data.async_reset = false;
+ lima_gp_soft_reset_async(ip);
+ err = lima_gp_soft_reset_async_wait(ip);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "gp %s fail to request irq\n",
+ lima_ip_name(ip));
+ return err;
+ }
+
+ dev->gp_version = gp_read(LIMA_GP_VERSION);
+
+ return 0;
+}
+
+void lima_gp_fini(struct lima_ip *ip)
+{
+
+}
+
+int lima_gp_pipe_init(struct lima_device *dev)
+{
+ int frame_size = sizeof(struct drm_lima_gp_frame);
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+
+ if (!lima_gp_task_slab) {
+ lima_gp_task_slab = kmem_cache_create_usercopy(
+ "lima_gp_task", sizeof(struct lima_sched_task) + frame_size,
+ 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
+ frame_size, NULL);
+ if (!lima_gp_task_slab)
+ return -ENOMEM;
+ }
+ lima_gp_task_slab_refcnt++;
+
+ pipe->frame_size = frame_size;
+ pipe->task_slab = lima_gp_task_slab;
+
+ pipe->task_validate = lima_gp_task_validate;
+ pipe->task_run = lima_gp_task_run;
+ pipe->task_fini = lima_gp_task_fini;
+ pipe->task_error = lima_gp_task_error;
+ pipe->task_mmu_error = lima_gp_task_mmu_error;
+
+ return 0;
+}
+
+void lima_gp_pipe_fini(struct lima_device *dev)
+{
+ if (!--lima_gp_task_slab_refcnt) {
+ kmem_cache_destroy(lima_gp_task_slab);
+ lima_gp_task_slab = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_gp.h b/drivers/gpu/drm/lima/lima_gp.h
new file mode 100644
index 000000000000..516e5c1babbb
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gp.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GP_H__
+#define __LIMA_GP_H__
+
+struct lima_ip;
+struct lima_device;
+
+int lima_gp_init(struct lima_ip *ip);
+void lima_gp_fini(struct lima_ip *ip);
+
+int lima_gp_pipe_init(struct lima_device *dev);
+void lima_gp_pipe_fini(struct lima_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
new file mode 100644
index 000000000000..6873a7af5a5c
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_l2_cache.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/iopoll.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_l2_cache.h"
+#include "lima_regs.h"
+
+#define l2_cache_write(reg, data) writel(data, ip->iomem + reg)
+#define l2_cache_read(reg) readl(ip->iomem + reg)
+
+static int lima_l2_cache_wait_idle(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ err = readl_poll_timeout(ip->iomem + LIMA_L2_CACHE_STATUS, v,
+ !(v & LIMA_L2_CACHE_STATUS_COMMAND_BUSY),
+ 0, 1000);
+ if (err) {
+ dev_err(dev->dev, "l2 cache wait command timeout\n");
+ return err;
+ }
+ return 0;
+}
+
+int lima_l2_cache_flush(struct lima_ip *ip)
+{
+ int ret;
+
+ spin_lock(&ip->data.lock);
+ l2_cache_write(LIMA_L2_CACHE_COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL);
+ ret = lima_l2_cache_wait_idle(ip);
+ spin_unlock(&ip->data.lock);
+ return ret;
+}
+
+int lima_l2_cache_init(struct lima_ip *ip)
+{
+ int i, err;
+ u32 size;
+ struct lima_device *dev = ip->dev;
+
+ /* l2_cache2 only exists when one of PP4-7 present */
+ if (ip->id == lima_ip_l2_cache2) {
+ for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
+ if (dev->ip[i].present)
+ break;
+ }
+ if (i > lima_ip_pp7)
+ return -ENODEV;
+ }
+
+ spin_lock_init(&ip->data.lock);
+
+ size = l2_cache_read(LIMA_L2_CACHE_SIZE);
+ dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
+ 1 << (((size >> 16) & 0xff) - 10),
+ 1 << ((size >> 8) & 0xff),
+ 1 << (size & 0xff),
+ 1 << ((size >> 24) & 0xff));
+
+ err = lima_l2_cache_flush(ip);
+ if (err)
+ return err;
+
+ l2_cache_write(LIMA_L2_CACHE_ENABLE,
+ LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
+ l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
+
+ return 0;
+}
+
+void lima_l2_cache_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
new file mode 100644
index 000000000000..c63fb676ff14
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_l2_cache.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_L2_CACHE_H__
+#define __LIMA_L2_CACHE_H__
+
+struct lima_ip;
+
+int lima_l2_cache_init(struct lima_ip *ip);
+void lima_l2_cache_fini(struct lima_ip *ip);
+
+int lima_l2_cache_flush(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
new file mode 100644
index 000000000000..8e1651d6a61f
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_mmu.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+#include "lima_regs.h"
+
+#define mmu_write(reg, data) writel(data, ip->iomem + reg)
+#define mmu_read(reg) readl(ip->iomem + reg)
+
+#define lima_mmu_send_command(cmd, addr, val, cond) \
+({ \
+ int __ret; \
+ \
+ mmu_write(LIMA_MMU_COMMAND, cmd); \
+ __ret = readl_poll_timeout(ip->iomem + (addr), val, \
+ cond, 0, 100); \
+ if (__ret) \
+ dev_err(dev->dev, \
+ "mmu command %x timeout\n", cmd); \
+ __ret; \
+})
+
+static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
+{
+ struct lima_ip *ip = data;
+ struct lima_device *dev = ip->dev;
+ u32 status = mmu_read(LIMA_MMU_INT_STATUS);
+ struct lima_sched_pipe *pipe;
+
+ /* for shared irq case */
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & LIMA_MMU_INT_PAGE_FAULT) {
+ u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR);
+
+ dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
+ fault, LIMA_MMU_STATUS_BUS_ID(status),
+ status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
+ lima_ip_name(ip));
+ }
+
+ if (status & LIMA_MMU_INT_READ_BUS_ERROR)
+ dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
+
+ /* mask all interrupts before resume */
+ mmu_write(LIMA_MMU_INT_MASK, 0);
+ mmu_write(LIMA_MMU_INT_CLEAR, status);
+
+ pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp);
+ lima_sched_pipe_mmu_error(pipe);
+
+ return IRQ_HANDLED;
+}
+
+int lima_mmu_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ if (ip->id == lima_ip_ppmmu_bcast)
+ return 0;
+
+ mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
+ if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) {
+ dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip));
+ return -EIO;
+ }
+
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
+ err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+ LIMA_MMU_DTE_ADDR, v, v == 0);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip));
+ return err;
+ }
+
+ mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+ mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+ return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_PAGING_ENABLED);
+}
+
+void lima_mmu_fini(struct lima_ip *ip)
+{
+
+}
+
+void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
+{
+ struct lima_device *dev = ip->dev;
+ u32 v;
+
+ lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_STALL,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_STALL_ACTIVE);
+
+ if (vm)
+ mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
+
+ /* flush the TLB */
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
+
+ lima_mmu_send_command(LIMA_MMU_COMMAND_DISABLE_STALL,
+ LIMA_MMU_STATUS, v,
+ !(v & LIMA_MMU_STATUS_STALL_ACTIVE));
+}
+
+void lima_mmu_page_fault_resume(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ u32 status = mmu_read(LIMA_MMU_STATUS);
+ u32 v;
+
+ if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) {
+ dev_info(dev->dev, "mmu resume\n");
+
+ mmu_write(LIMA_MMU_INT_MASK, 0);
+ mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
+ lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+ LIMA_MMU_DTE_ADDR, v, v == 0);
+ mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+ mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+ lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_PAGING_ENABLED);
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
new file mode 100644
index 000000000000..8c78319bcc8e
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_MMU_H__
+#define __LIMA_MMU_H__
+
+struct lima_ip;
+struct lima_vm;
+
+int lima_mmu_init(struct lima_ip *ip);
+void lima_mmu_fini(struct lima_ip *ip);
+
+void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
+void lima_mmu_page_fault_resume(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
new file mode 100644
index 000000000000..5c41f859a72f
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_object.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <drm/drm_prime.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+
+#include "lima_object.h"
+
+void lima_bo_destroy(struct lima_bo *bo)
+{
+ if (bo->sgt) {
+ kfree(bo->pages);
+ drm_prime_gem_destroy(&bo->gem, bo->sgt);
+ } else {
+ if (bo->pages_dma_addr) {
+ int i, npages = bo->gem.size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (bo->pages_dma_addr[i])
+ dma_unmap_page(bo->gem.dev->dev,
+ bo->pages_dma_addr[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ }
+
+ if (bo->pages)
+ drm_gem_put_pages(&bo->gem, bo->pages, true, true);
+ }
+
+ kfree(bo->pages_dma_addr);
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags,
+ struct reservation_object *resv)
+{
+ struct lima_bo *bo;
+ int err;
+
+ size = PAGE_ALIGN(size);
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&bo->lock);
+ INIT_LIST_HEAD(&bo->va);
+ bo->gem.resv = resv;
+
+ err = drm_gem_object_init(dev->ddev, &bo->gem, size);
+ if (err) {
+ kfree(bo);
+ return ERR_PTR(err);
+ }
+
+ return bo;
+}
+
+struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
+ u32 flags, struct sg_table *sgt,
+ struct reservation_object *resv)
+{
+ int i, err;
+ size_t npages;
+ struct lima_bo *bo, *ret;
+
+ bo = lima_bo_create_struct(dev, size, flags, resv);
+ if (IS_ERR(bo))
+ return bo;
+
+ npages = bo->gem.size >> PAGE_SHIFT;
+
+ bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!bo->pages_dma_addr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_out;
+ }
+
+ if (sgt) {
+ bo->sgt = sgt;
+
+ bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
+ if (!bo->pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_out;
+ }
+
+ err = drm_prime_sg_to_page_addr_arrays(
+ sgt, bo->pages, bo->pages_dma_addr, npages);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_out;
+ }
+ } else {
+ mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
+ bo->pages = drm_gem_get_pages(&bo->gem);
+ if (IS_ERR(bo->pages)) {
+ ret = ERR_CAST(bo->pages);
+ bo->pages = NULL;
+ goto err_out;
+ }
+
+ for (i = 0; i < npages; i++) {
+ dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->dev, addr)) {
+ ret = ERR_PTR(-EFAULT);
+ goto err_out;
+ }
+ bo->pages_dma_addr[i] = addr;
+ }
+
+ }
+
+ return bo;
+
+err_out:
+ lima_bo_destroy(bo);
+ return ret;
+}
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
new file mode 100644
index 000000000000..6738724afb7b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_object.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_OBJECT_H__
+#define __LIMA_OBJECT_H__
+
+#include <drm/drm_gem.h>
+
+#include "lima_device.h"
+
+struct lima_bo {
+ struct drm_gem_object gem;
+
+ struct page **pages;
+ dma_addr_t *pages_dma_addr;
+ struct sg_table *sgt;
+ void *vaddr;
+
+ struct mutex lock;
+ struct list_head va;
+};
+
+static inline struct lima_bo *
+to_lima_bo(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct lima_bo, gem);
+}
+
+struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
+ u32 flags, struct sg_table *sgt,
+ struct reservation_object *resv);
+void lima_bo_destroy(struct lima_bo *bo);
+void *lima_bo_vmap(struct lima_bo *bo);
+void lima_bo_vunmap(struct lima_bo *bo);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c
new file mode 100644
index 000000000000..571f6d661581
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pmu.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/iopoll.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_pmu.h"
+#include "lima_regs.h"
+
+#define pmu_write(reg, data) writel(data, ip->iomem + reg)
+#define pmu_read(reg) readl(ip->iomem + reg)
+
+static int lima_pmu_wait_cmd(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ err = readl_poll_timeout(ip->iomem + LIMA_PMU_INT_RAWSTAT,
+ v, v & LIMA_PMU_INT_CMD_MASK,
+ 100, 100000);
+ if (err) {
+ dev_err(dev->dev, "timeout wait pmd cmd\n");
+ return err;
+ }
+
+ pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
+ return 0;
+}
+
+int lima_pmu_init(struct lima_ip *ip)
+{
+ int err;
+ u32 stat;
+
+ pmu_write(LIMA_PMU_INT_MASK, 0);
+
+ /* If this value is too low, when in high GPU clk freq,
+ * GPU will be in unstable state.
+ */
+ pmu_write(LIMA_PMU_SW_DELAY, 0xffff);
+
+ /* status reg 1=off 0=on */
+ stat = pmu_read(LIMA_PMU_STATUS);
+
+ /* power up all ip */
+ if (stat) {
+ pmu_write(LIMA_PMU_POWER_UP, stat);
+ err = lima_pmu_wait_cmd(ip);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+void lima_pmu_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_pmu.h b/drivers/gpu/drm/lima/lima_pmu.h
new file mode 100644
index 000000000000..a2a18775eb07
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pmu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_PMU_H__
+#define __LIMA_PMU_H__
+
+struct lima_ip;
+
+int lima_pmu_init(struct lima_ip *ip);
+void lima_pmu_fini(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
new file mode 100644
index 000000000000..d29721e177bf
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_device.h"
+#include "lima_pp.h"
+#include "lima_dlbu.h"
+#include "lima_bcast.h"
+#include "lima_vm.h"
+#include "lima_regs.h"
+
+#define pp_write(reg, data) writel(data, ip->iomem + reg)
+#define pp_read(reg) readl(ip->iomem + reg)
+
+static void lima_pp_handle_irq(struct lima_ip *ip, u32 state)
+{
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+ if (state & LIMA_PP_IRQ_MASK_ERROR) {
+ u32 status = pp_read(LIMA_PP_STATUS);
+
+ dev_err(dev->dev, "pp error irq state=%x status=%x\n",
+ state, status);
+
+ pipe->error = true;
+
+ /* mask all interrupts before hard reset */
+ pp_write(LIMA_PP_INT_MASK, 0);
+ }
+
+ pp_write(LIMA_PP_INT_CLEAR, state);
+}
+
+static irqreturn_t lima_pp_irq_handler(int irq, void *data)
+{
+ struct lima_ip *ip = data;
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ u32 state = pp_read(LIMA_PP_INT_STATUS);
+
+ /* for shared irq case */
+ if (!state)
+ return IRQ_NONE;
+
+ lima_pp_handle_irq(ip, state);
+
+ if (atomic_dec_and_test(&pipe->task))
+ lima_sched_pipe_task_done(pipe);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data)
+{
+ int i;
+ irqreturn_t ret = IRQ_NONE;
+ struct lima_ip *pp_bcast = data;
+ struct lima_device *dev = pp_bcast->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+ u32 status, state;
+
+ if (pipe->done & (1 << i))
+ continue;
+
+ /* status read first in case int state change in the middle
+ * which may miss the interrupt handling
+ */
+ status = pp_read(LIMA_PP_STATUS);
+ state = pp_read(LIMA_PP_INT_STATUS);
+
+ if (state) {
+ lima_pp_handle_irq(ip, state);
+ ret = IRQ_HANDLED;
+ } else {
+ if (status & LIMA_PP_STATUS_RENDERING_ACTIVE)
+ continue;
+ }
+
+ pipe->done |= (1 << i);
+ if (atomic_dec_and_test(&pipe->task))
+ lima_sched_pipe_task_done(pipe);
+ }
+
+ return ret;
+}
+
+static void lima_pp_soft_reset_async(struct lima_ip *ip)
+{
+ if (ip->data.async_reset)
+ return;
+
+ pp_write(LIMA_PP_INT_MASK, 0);
+ pp_write(LIMA_PP_INT_RAWSTAT, LIMA_PP_IRQ_MASK_ALL);
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_SOFT_RESET);
+ ip->data.async_reset = true;
+}
+
+static int lima_pp_soft_reset_poll(struct lima_ip *ip)
+{
+ return !(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_RENDERING_ACTIVE) &&
+ pp_read(LIMA_PP_INT_RAWSTAT) == LIMA_PP_IRQ_RESET_COMPLETED;
+}
+
+static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int ret;
+
+ ret = lima_poll_timeout(ip, lima_pp_soft_reset_poll, 0, 100);
+ if (ret) {
+ dev_err(dev->dev, "pp %s reset time out\n", lima_ip_name(ip));
+ return ret;
+ }
+
+ pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
+ pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
+ return 0;
+}
+
+static int lima_pp_soft_reset_async_wait(struct lima_ip *ip)
+{
+ int i, err = 0;
+
+ if (!ip->data.async_reset)
+ return 0;
+
+ if (ip->id == lima_ip_pp_bcast) {
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
+
+ for (i = 0; i < frame->num_pp; i++)
+ err |= lima_pp_soft_reset_async_wait_one(pipe->processor[i]);
+ } else
+ err = lima_pp_soft_reset_async_wait_one(ip);
+
+ ip->data.async_reset = false;
+ return err;
+}
+
+static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb)
+{
+ int i, j, n = 0;
+
+ for (i = 0; i < LIMA_PP_FRAME_REG_NUM; i++)
+ writel(frame[i], ip->iomem + LIMA_PP_FRAME + i * 4);
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < LIMA_PP_WB_REG_NUM; j++)
+ writel(wb[n++], ip->iomem + LIMA_PP_WB(i) + j * 4);
+ }
+}
+
+static int lima_pp_hard_reset_poll(struct lima_ip *ip)
+{
+ pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC01A0000);
+ return pp_read(LIMA_PP_PERF_CNT_0_LIMIT) == 0xC01A0000;
+}
+
+static int lima_pp_hard_reset(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int ret;
+
+ pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC0FFE000);
+ pp_write(LIMA_PP_INT_MASK, 0);
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_FORCE_RESET);
+ ret = lima_poll_timeout(ip, lima_pp_hard_reset_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "pp hard reset timeout\n");
+ return ret;
+ }
+
+ pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0);
+ pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
+ pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
+ return 0;
+}
+
+static void lima_pp_print_version(struct lima_ip *ip)
+{
+ u32 version, major, minor;
+ char *name;
+
+ version = pp_read(LIMA_PP_VERSION);
+ major = (version >> 8) & 0xFF;
+ minor = version & 0xFF;
+ switch (version >> 16) {
+ case 0xC807:
+ name = "mali200";
+ break;
+ case 0xCE07:
+ name = "mali300";
+ break;
+ case 0xCD07:
+ name = "mali400";
+ break;
+ case 0xCF07:
+ name = "mali450";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+ dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
+ lima_ip_name(ip), name, major, minor);
+}
+
+int lima_pp_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
+ lima_pp_print_version(ip);
+
+ ip->data.async_reset = false;
+ lima_pp_soft_reset_async(ip);
+ err = lima_pp_soft_reset_async_wait(ip);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "pp %s fail to request irq\n",
+ lima_ip_name(ip));
+ return err;
+ }
+
+ dev->pp_version = pp_read(LIMA_PP_VERSION);
+
+ return 0;
+}
+
+void lima_pp_fini(struct lima_ip *ip)
+{
+
+}
+
+int lima_pp_bcast_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "pp %s fail to request irq\n",
+ lima_ip_name(ip));
+ return err;
+ }
+
+ return 0;
+}
+
+void lima_pp_bcast_fini(struct lima_ip *ip)
+{
+
+}
+
+static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ u32 num_pp;
+
+ if (pipe->bcast_processor) {
+ struct drm_lima_m450_pp_frame *f = task->frame;
+
+ num_pp = f->num_pp;
+
+ if (f->_pad)
+ return -EINVAL;
+ } else {
+ struct drm_lima_m400_pp_frame *f = task->frame;
+
+ num_pp = f->num_pp;
+ }
+
+ if (num_pp == 0 || num_pp > pipe->num_processor)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lima_pp_task_run(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ if (pipe->bcast_processor) {
+ struct drm_lima_m450_pp_frame *frame = task->frame;
+ struct lima_device *dev = pipe->bcast_processor->dev;
+ struct lima_ip *ip = pipe->bcast_processor;
+ int i;
+
+ pipe->done = 0;
+ atomic_set(&pipe->task, frame->num_pp);
+
+ if (frame->use_dlbu) {
+ lima_dlbu_enable(dev, frame->num_pp);
+
+ frame->frame[LIMA_PP_FRAME >> 2] = LIMA_VA_RESERVE_DLBU;
+ lima_dlbu_set_reg(dev->ip + lima_ip_dlbu, frame->dlbu_regs);
+ } else
+ lima_dlbu_disable(dev);
+
+ lima_bcast_enable(dev, frame->num_pp);
+
+ lima_pp_soft_reset_async_wait(ip);
+
+ lima_pp_write_frame(ip, frame->frame, frame->wb);
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ pp_write(LIMA_PP_STACK, frame->fragment_stack_address[i]);
+ if (!frame->use_dlbu)
+ pp_write(LIMA_PP_FRAME, frame->plbu_array_address[i]);
+ }
+
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING);
+ } else {
+ struct drm_lima_m400_pp_frame *frame = task->frame;
+ int i;
+
+ atomic_set(&pipe->task, frame->num_pp);
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ frame->frame[LIMA_PP_FRAME >> 2] =
+ frame->plbu_array_address[i];
+ frame->frame[LIMA_PP_STACK >> 2] =
+ frame->fragment_stack_address[i];
+
+ lima_pp_soft_reset_async_wait(ip);
+
+ lima_pp_write_frame(ip, frame->frame, frame->wb);
+
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING);
+ }
+ }
+}
+
+static void lima_pp_task_fini(struct lima_sched_pipe *pipe)
+{
+ if (pipe->bcast_processor)
+ lima_pp_soft_reset_async(pipe->bcast_processor);
+ else {
+ int i;
+
+ for (i = 0; i < pipe->num_processor; i++)
+ lima_pp_soft_reset_async(pipe->processor[i]);
+ }
+}
+
+static void lima_pp_task_error(struct lima_sched_pipe *pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe->num_processor; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ dev_err(ip->dev->dev, "pp task error %d int_state=%x status=%x\n",
+ i, pp_read(LIMA_PP_INT_STATUS), pp_read(LIMA_PP_STATUS));
+
+ lima_pp_hard_reset(ip);
+ }
+}
+
+static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+{
+ if (atomic_dec_and_test(&pipe->task))
+ lima_sched_pipe_task_done(pipe);
+}
+
+static struct kmem_cache *lima_pp_task_slab;
+static int lima_pp_task_slab_refcnt;
+
+int lima_pp_pipe_init(struct lima_device *dev)
+{
+ int frame_size;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+ if (dev->id == lima_gpu_mali400)
+ frame_size = sizeof(struct drm_lima_m400_pp_frame);
+ else
+ frame_size = sizeof(struct drm_lima_m450_pp_frame);
+
+ if (!lima_pp_task_slab) {
+ lima_pp_task_slab = kmem_cache_create_usercopy(
+ "lima_pp_task", sizeof(struct lima_sched_task) + frame_size,
+ 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
+ frame_size, NULL);
+ if (!lima_pp_task_slab)
+ return -ENOMEM;
+ }
+ lima_pp_task_slab_refcnt++;
+
+ pipe->frame_size = frame_size;
+ pipe->task_slab = lima_pp_task_slab;
+
+ pipe->task_validate = lima_pp_task_validate;
+ pipe->task_run = lima_pp_task_run;
+ pipe->task_fini = lima_pp_task_fini;
+ pipe->task_error = lima_pp_task_error;
+ pipe->task_mmu_error = lima_pp_task_mmu_error;
+
+ return 0;
+}
+
+void lima_pp_pipe_fini(struct lima_device *dev)
+{
+ if (!--lima_pp_task_slab_refcnt) {
+ kmem_cache_destroy(lima_pp_task_slab);
+ lima_pp_task_slab = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_pp.h b/drivers/gpu/drm/lima/lima_pp.h
new file mode 100644
index 000000000000..bf60c77b2633
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pp.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_PP_H__
+#define __LIMA_PP_H__
+
+struct lima_ip;
+struct lima_device;
+
+int lima_pp_init(struct lima_ip *ip);
+void lima_pp_fini(struct lima_ip *ip);
+
+int lima_pp_bcast_init(struct lima_ip *ip);
+void lima_pp_bcast_fini(struct lima_ip *ip);
+
+int lima_pp_pipe_init(struct lima_device *dev);
+void lima_pp_pipe_fini(struct lima_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_regs.h b/drivers/gpu/drm/lima/lima_regs.h
new file mode 100644
index 000000000000..ace8ecefbe90
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_regs.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2010-2017 ARM Limited. All rights reserved.
+ * Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
+ */
+
+#ifndef __LIMA_REGS_H__
+#define __LIMA_REGS_H__
+
+/* This file's register definition is collected from the
+ * official ARM Mali Utgard GPU kernel driver source code
+ */
+
+/* PMU regs */
+#define LIMA_PMU_POWER_UP 0x00
+#define LIMA_PMU_POWER_DOWN 0x04
+#define LIMA_PMU_POWER_GP0_MASK BIT(0)
+#define LIMA_PMU_POWER_L2_MASK BIT(1)
+#define LIMA_PMU_POWER_PP_MASK(i) BIT(2 + i)
+
+/*
+ * On Mali450 each block automatically starts up its corresponding L2
+ * and the PPs are not fully independent controllable.
+ * Instead PP0, PP1-3 and PP4-7 can be turned on or off.
+ */
+#define LIMA450_PMU_POWER_PP0_MASK BIT(1)
+#define LIMA450_PMU_POWER_PP13_MASK BIT(2)
+#define LIMA450_PMU_POWER_PP47_MASK BIT(3)
+
+#define LIMA_PMU_STATUS 0x08
+#define LIMA_PMU_INT_MASK 0x0C
+#define LIMA_PMU_INT_RAWSTAT 0x10
+#define LIMA_PMU_INT_CLEAR 0x18
+#define LIMA_PMU_INT_CMD_MASK BIT(0)
+#define LIMA_PMU_SW_DELAY 0x1C
+
+/* L2 cache regs */
+#define LIMA_L2_CACHE_SIZE 0x0004
+#define LIMA_L2_CACHE_STATUS 0x0008
+#define LIMA_L2_CACHE_STATUS_COMMAND_BUSY BIT(0)
+#define LIMA_L2_CACHE_STATUS_DATA_BUSY BIT(1)
+#define LIMA_L2_CACHE_COMMAND 0x0010
+#define LIMA_L2_CACHE_COMMAND_CLEAR_ALL BIT(0)
+#define LIMA_L2_CACHE_CLEAR_PAGE 0x0014
+#define LIMA_L2_CACHE_MAX_READS 0x0018
+#define LIMA_L2_CACHE_ENABLE 0x001C
+#define LIMA_L2_CACHE_ENABLE_ACCESS BIT(0)
+#define LIMA_L2_CACHE_ENABLE_READ_ALLOCATE BIT(1)
+#define LIMA_L2_CACHE_PERFCNT_SRC0 0x0020
+#define LIMA_L2_CACHE_PERFCNT_VAL0 0x0024
+#define LIMA_L2_CACHE_PERFCNT_SRC1 0x0028
+#define LIMA_L2_CACHE_ERFCNT_VAL1 0x002C
+
+/* GP regs */
+#define LIMA_GP_VSCL_START_ADDR 0x00
+#define LIMA_GP_VSCL_END_ADDR 0x04
+#define LIMA_GP_PLBUCL_START_ADDR 0x08
+#define LIMA_GP_PLBUCL_END_ADDR 0x0c
+#define LIMA_GP_PLBU_ALLOC_START_ADDR 0x10
+#define LIMA_GP_PLBU_ALLOC_END_ADDR 0x14
+#define LIMA_GP_CMD 0x20
+#define LIMA_GP_CMD_START_VS BIT(0)
+#define LIMA_GP_CMD_START_PLBU BIT(1)
+#define LIMA_GP_CMD_UPDATE_PLBU_ALLOC BIT(4)
+#define LIMA_GP_CMD_RESET BIT(5)
+#define LIMA_GP_CMD_FORCE_HANG BIT(6)
+#define LIMA_GP_CMD_STOP_BUS BIT(9)
+#define LIMA_GP_CMD_SOFT_RESET BIT(10)
+#define LIMA_GP_INT_RAWSTAT 0x24
+#define LIMA_GP_INT_CLEAR 0x28
+#define LIMA_GP_INT_MASK 0x2C
+#define LIMA_GP_INT_STAT 0x30
+#define LIMA_GP_IRQ_VS_END_CMD_LST BIT(0)
+#define LIMA_GP_IRQ_PLBU_END_CMD_LST BIT(1)
+#define LIMA_GP_IRQ_PLBU_OUT_OF_MEM BIT(2)
+#define LIMA_GP_IRQ_VS_SEM_IRQ BIT(3)
+#define LIMA_GP_IRQ_PLBU_SEM_IRQ BIT(4)
+#define LIMA_GP_IRQ_HANG BIT(5)
+#define LIMA_GP_IRQ_FORCE_HANG BIT(6)
+#define LIMA_GP_IRQ_PERF_CNT_0_LIMIT BIT(7)
+#define LIMA_GP_IRQ_PERF_CNT_1_LIMIT BIT(8)
+#define LIMA_GP_IRQ_WRITE_BOUND_ERR BIT(9)
+#define LIMA_GP_IRQ_SYNC_ERROR BIT(10)
+#define LIMA_GP_IRQ_AXI_BUS_ERROR BIT(11)
+#define LIMA_GP_IRQ_AXI_BUS_STOPPED BIT(12)
+#define LIMA_GP_IRQ_VS_INVALID_CMD BIT(13)
+#define LIMA_GP_IRQ_PLB_INVALID_CMD BIT(14)
+#define LIMA_GP_IRQ_RESET_COMPLETED BIT(19)
+#define LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW BIT(20)
+#define LIMA_GP_IRQ_SEMAPHORE_OVERFLOW BIT(21)
+#define LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS BIT(22)
+#define LIMA_GP_WRITE_BOUND_LOW 0x34
+#define LIMA_GP_PERF_CNT_0_ENABLE 0x3C
+#define LIMA_GP_PERF_CNT_1_ENABLE 0x40
+#define LIMA_GP_PERF_CNT_0_SRC 0x44
+#define LIMA_GP_PERF_CNT_1_SRC 0x48
+#define LIMA_GP_PERF_CNT_0_VALUE 0x4C
+#define LIMA_GP_PERF_CNT_1_VALUE 0x50
+#define LIMA_GP_PERF_CNT_0_LIMIT 0x54
+#define LIMA_GP_STATUS 0x68
+#define LIMA_GP_STATUS_VS_ACTIVE BIT(1)
+#define LIMA_GP_STATUS_BUS_STOPPED BIT(2)
+#define LIMA_GP_STATUS_PLBU_ACTIVE BIT(3)
+#define LIMA_GP_STATUS_BUS_ERROR BIT(6)
+#define LIMA_GP_STATUS_WRITE_BOUND_ERR BIT(8)
+#define LIMA_GP_VERSION 0x6C
+#define LIMA_GP_VSCL_START_ADDR_READ 0x80
+#define LIMA_GP_PLBCL_START_ADDR_READ 0x84
+#define LIMA_GP_CONTR_AXI_BUS_ERROR_STAT 0x94
+
+#define LIMA_GP_IRQ_MASK_ALL \
+ ( \
+ LIMA_GP_IRQ_VS_END_CMD_LST | \
+ LIMA_GP_IRQ_PLBU_END_CMD_LST | \
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM | \
+ LIMA_GP_IRQ_VS_SEM_IRQ | \
+ LIMA_GP_IRQ_PLBU_SEM_IRQ | \
+ LIMA_GP_IRQ_HANG | \
+ LIMA_GP_IRQ_FORCE_HANG | \
+ LIMA_GP_IRQ_PERF_CNT_0_LIMIT | \
+ LIMA_GP_IRQ_PERF_CNT_1_LIMIT | \
+ LIMA_GP_IRQ_WRITE_BOUND_ERR | \
+ LIMA_GP_IRQ_SYNC_ERROR | \
+ LIMA_GP_IRQ_AXI_BUS_ERROR | \
+ LIMA_GP_IRQ_AXI_BUS_STOPPED | \
+ LIMA_GP_IRQ_VS_INVALID_CMD | \
+ LIMA_GP_IRQ_PLB_INVALID_CMD | \
+ LIMA_GP_IRQ_RESET_COMPLETED | \
+ LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \
+ LIMA_GP_IRQ_SEMAPHORE_OVERFLOW | \
+ LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+#define LIMA_GP_IRQ_MASK_ERROR \
+ ( \
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM | \
+ LIMA_GP_IRQ_FORCE_HANG | \
+ LIMA_GP_IRQ_WRITE_BOUND_ERR | \
+ LIMA_GP_IRQ_SYNC_ERROR | \
+ LIMA_GP_IRQ_AXI_BUS_ERROR | \
+ LIMA_GP_IRQ_VS_INVALID_CMD | \
+ LIMA_GP_IRQ_PLB_INVALID_CMD | \
+ LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \
+ LIMA_GP_IRQ_SEMAPHORE_OVERFLOW | \
+ LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+#define LIMA_GP_IRQ_MASK_USED \
+ ( \
+ LIMA_GP_IRQ_VS_END_CMD_LST | \
+ LIMA_GP_IRQ_PLBU_END_CMD_LST | \
+ LIMA_GP_IRQ_MASK_ERROR)
+
+/* PP regs */
+#define LIMA_PP_FRAME 0x0000
+#define LIMA_PP_RSW 0x0004
+#define LIMA_PP_STACK 0x0030
+#define LIMA_PP_STACK_SIZE 0x0034
+#define LIMA_PP_ORIGIN_OFFSET_X 0x0040
+#define LIMA_PP_WB(i) (0x0100 * (i + 1))
+#define LIMA_PP_WB_SOURCE_SELECT 0x0000
+#define LIMA_PP_WB_SOURCE_ADDR 0x0004
+
+#define LIMA_PP_VERSION 0x1000
+#define LIMA_PP_CURRENT_REND_LIST_ADDR 0x1004
+#define LIMA_PP_STATUS 0x1008
+#define LIMA_PP_STATUS_RENDERING_ACTIVE BIT(0)
+#define LIMA_PP_STATUS_BUS_STOPPED BIT(4)
+#define LIMA_PP_CTRL 0x100c
+#define LIMA_PP_CTRL_STOP_BUS BIT(0)
+#define LIMA_PP_CTRL_FLUSH_CACHES BIT(3)
+#define LIMA_PP_CTRL_FORCE_RESET BIT(5)
+#define LIMA_PP_CTRL_START_RENDERING BIT(6)
+#define LIMA_PP_CTRL_SOFT_RESET BIT(7)
+#define LIMA_PP_INT_RAWSTAT 0x1020
+#define LIMA_PP_INT_CLEAR 0x1024
+#define LIMA_PP_INT_MASK 0x1028
+#define LIMA_PP_INT_STATUS 0x102c
+#define LIMA_PP_IRQ_END_OF_FRAME BIT(0)
+#define LIMA_PP_IRQ_END_OF_TILE BIT(1)
+#define LIMA_PP_IRQ_HANG BIT(2)
+#define LIMA_PP_IRQ_FORCE_HANG BIT(3)
+#define LIMA_PP_IRQ_BUS_ERROR BIT(4)
+#define LIMA_PP_IRQ_BUS_STOP BIT(5)
+#define LIMA_PP_IRQ_CNT_0_LIMIT BIT(6)
+#define LIMA_PP_IRQ_CNT_1_LIMIT BIT(7)
+#define LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR BIT(8)
+#define LIMA_PP_IRQ_INVALID_PLIST_COMMAND BIT(9)
+#define LIMA_PP_IRQ_CALL_STACK_UNDERFLOW BIT(10)
+#define LIMA_PP_IRQ_CALL_STACK_OVERFLOW BIT(11)
+#define LIMA_PP_IRQ_RESET_COMPLETED BIT(12)
+#define LIMA_PP_WRITE_BOUNDARY_LOW 0x1044
+#define LIMA_PP_BUS_ERROR_STATUS 0x1050
+#define LIMA_PP_PERF_CNT_0_ENABLE 0x1080
+#define LIMA_PP_PERF_CNT_0_SRC 0x1084
+#define LIMA_PP_PERF_CNT_0_LIMIT 0x1088
+#define LIMA_PP_PERF_CNT_0_VALUE 0x108c
+#define LIMA_PP_PERF_CNT_1_ENABLE 0x10a0
+#define LIMA_PP_PERF_CNT_1_SRC 0x10a4
+#define LIMA_PP_PERF_CNT_1_LIMIT 0x10a8
+#define LIMA_PP_PERF_CNT_1_VALUE 0x10ac
+#define LIMA_PP_PERFMON_CONTR 0x10b0
+#define LIMA_PP_PERFMON_BASE 0x10b4
+
+#define LIMA_PP_IRQ_MASK_ALL \
+ ( \
+ LIMA_PP_IRQ_END_OF_FRAME | \
+ LIMA_PP_IRQ_END_OF_TILE | \
+ LIMA_PP_IRQ_HANG | \
+ LIMA_PP_IRQ_FORCE_HANG | \
+ LIMA_PP_IRQ_BUS_ERROR | \
+ LIMA_PP_IRQ_BUS_STOP | \
+ LIMA_PP_IRQ_CNT_0_LIMIT | \
+ LIMA_PP_IRQ_CNT_1_LIMIT | \
+ LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR | \
+ LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \
+ LIMA_PP_IRQ_CALL_STACK_UNDERFLOW | \
+ LIMA_PP_IRQ_CALL_STACK_OVERFLOW | \
+ LIMA_PP_IRQ_RESET_COMPLETED)
+
+#define LIMA_PP_IRQ_MASK_ERROR \
+ ( \
+ LIMA_PP_IRQ_FORCE_HANG | \
+ LIMA_PP_IRQ_BUS_ERROR | \
+ LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR | \
+ LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \
+ LIMA_PP_IRQ_CALL_STACK_UNDERFLOW | \
+ LIMA_PP_IRQ_CALL_STACK_OVERFLOW)
+
+#define LIMA_PP_IRQ_MASK_USED \
+ ( \
+ LIMA_PP_IRQ_END_OF_FRAME | \
+ LIMA_PP_IRQ_MASK_ERROR)
+
+/* MMU regs */
+#define LIMA_MMU_DTE_ADDR 0x0000
+#define LIMA_MMU_STATUS 0x0004
+#define LIMA_MMU_STATUS_PAGING_ENABLED BIT(0)
+#define LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
+#define LIMA_MMU_STATUS_STALL_ACTIVE BIT(2)
+#define LIMA_MMU_STATUS_IDLE BIT(3)
+#define LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
+#define LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
+#define LIMA_MMU_STATUS_BUS_ID(x) ((x >> 6) & 0x1F)
+#define LIMA_MMU_COMMAND 0x0008
+#define LIMA_MMU_COMMAND_ENABLE_PAGING 0x00
+#define LIMA_MMU_COMMAND_DISABLE_PAGING 0x01
+#define LIMA_MMU_COMMAND_ENABLE_STALL 0x02
+#define LIMA_MMU_COMMAND_DISABLE_STALL 0x03
+#define LIMA_MMU_COMMAND_ZAP_CACHE 0x04
+#define LIMA_MMU_COMMAND_PAGE_FAULT_DONE 0x05
+#define LIMA_MMU_COMMAND_HARD_RESET 0x06
+#define LIMA_MMU_PAGE_FAULT_ADDR 0x000C
+#define LIMA_MMU_ZAP_ONE_LINE 0x0010
+#define LIMA_MMU_INT_RAWSTAT 0x0014
+#define LIMA_MMU_INT_CLEAR 0x0018
+#define LIMA_MMU_INT_MASK 0x001C
+#define LIMA_MMU_INT_PAGE_FAULT BIT(0)
+#define LIMA_MMU_INT_READ_BUS_ERROR BIT(1)
+#define LIMA_MMU_INT_STATUS 0x0020
+
+#define LIMA_VM_FLAG_PRESENT BIT(0)
+#define LIMA_VM_FLAG_READ_PERMISSION BIT(1)
+#define LIMA_VM_FLAG_WRITE_PERMISSION BIT(2)
+#define LIMA_VM_FLAG_OVERRIDE_CACHE BIT(3)
+#define LIMA_VM_FLAG_WRITE_CACHEABLE BIT(4)
+#define LIMA_VM_FLAG_WRITE_ALLOCATE BIT(5)
+#define LIMA_VM_FLAG_WRITE_BUFFERABLE BIT(6)
+#define LIMA_VM_FLAG_READ_CACHEABLE BIT(7)
+#define LIMA_VM_FLAG_READ_ALLOCATE BIT(8)
+#define LIMA_VM_FLAG_MASK 0x1FF
+
+#define LIMA_VM_FLAGS_CACHE ( \
+ LIMA_VM_FLAG_PRESENT | \
+ LIMA_VM_FLAG_READ_PERMISSION | \
+ LIMA_VM_FLAG_WRITE_PERMISSION | \
+ LIMA_VM_FLAG_OVERRIDE_CACHE | \
+ LIMA_VM_FLAG_WRITE_CACHEABLE | \
+ LIMA_VM_FLAG_WRITE_BUFFERABLE | \
+ LIMA_VM_FLAG_READ_CACHEABLE | \
+ LIMA_VM_FLAG_READ_ALLOCATE)
+
+#define LIMA_VM_FLAGS_UNCACHE ( \
+ LIMA_VM_FLAG_PRESENT | \
+ LIMA_VM_FLAG_READ_PERMISSION | \
+ LIMA_VM_FLAG_WRITE_PERMISSION)
+
+/* DLBU regs */
+#define LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR 0x0000
+#define LIMA_DLBU_MASTER_TLLIST_VADDR 0x0004
+#define LIMA_DLBU_TLLIST_VBASEADDR 0x0008
+#define LIMA_DLBU_FB_DIM 0x000C
+#define LIMA_DLBU_TLLIST_CONF 0x0010
+#define LIMA_DLBU_START_TILE_POS 0x0014
+#define LIMA_DLBU_PP_ENABLE_MASK 0x0018
+
+/* BCAST regs */
+#define LIMA_BCAST_BROADCAST_MASK 0x0
+#define LIMA_BCAST_INTERRUPT_MASK 0x4
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
new file mode 100644
index 000000000000..d53bd45f8d96
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "lima_drv.h"
+#include "lima_sched.h"
+#include "lima_vm.h"
+#include "lima_mmu.h"
+#include "lima_l2_cache.h"
+#include "lima_object.h"
+
+struct lima_fence {
+ struct dma_fence base;
+ struct lima_sched_pipe *pipe;
+};
+
+static struct kmem_cache *lima_fence_slab;
+static int lima_fence_slab_refcnt;
+
+int lima_sched_slab_init(void)
+{
+ if (!lima_fence_slab) {
+ lima_fence_slab = kmem_cache_create(
+ "lima_fence", sizeof(struct lima_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!lima_fence_slab)
+ return -ENOMEM;
+ }
+
+ lima_fence_slab_refcnt++;
+ return 0;
+}
+
+void lima_sched_slab_fini(void)
+{
+ if (!--lima_fence_slab_refcnt) {
+ kmem_cache_destroy(lima_fence_slab);
+ lima_fence_slab = NULL;
+ }
+}
+
+static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct lima_fence, base);
+}
+
+static const char *lima_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "lima";
+}
+
+static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct lima_fence *f = to_lima_fence(fence);
+
+ return f->pipe->base.name;
+}
+
+static void lima_fence_release_rcu(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+ struct lima_fence *fence = to_lima_fence(f);
+
+ kmem_cache_free(lima_fence_slab, fence);
+}
+
+static void lima_fence_release(struct dma_fence *fence)
+{
+ struct lima_fence *f = to_lima_fence(fence);
+
+ call_rcu(&f->base.rcu, lima_fence_release_rcu);
+}
+
+static const struct dma_fence_ops lima_fence_ops = {
+ .get_driver_name = lima_fence_get_driver_name,
+ .get_timeline_name = lima_fence_get_timeline_name,
+ .release = lima_fence_release,
+};
+
+static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
+{
+ struct lima_fence *fence;
+
+ fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->pipe = pipe;
+ dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
+ pipe->fence_context, ++pipe->fence_seqno);
+
+ return fence;
+}
+
+static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
+{
+ return container_of(job, struct lima_sched_task, base);
+}
+
+static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
+{
+ return container_of(sched, struct lima_sched_pipe, base);
+}
+
+int lima_sched_task_init(struct lima_sched_task *task,
+ struct lima_sched_context *context,
+ struct lima_bo **bos, int num_bos,
+ struct lima_vm *vm)
+{
+ int err, i;
+
+ task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
+ if (!task->bos)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bos; i++)
+ drm_gem_object_get(&bos[i]->gem);
+
+ err = drm_sched_job_init(&task->base, &context->base, vm);
+ if (err) {
+ kfree(task->bos);
+ return err;
+ }
+
+ task->num_bos = num_bos;
+ task->vm = lima_vm_get(vm);
+
+ xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
+
+ return 0;
+}
+
+void lima_sched_task_fini(struct lima_sched_task *task)
+{
+ struct dma_fence *fence;
+ unsigned long index;
+ int i;
+
+ drm_sched_job_cleanup(&task->base);
+
+ xa_for_each(&task->deps, index, fence) {
+ dma_fence_put(fence);
+ }
+ xa_destroy(&task->deps);
+
+ if (task->bos) {
+ for (i = 0; i < task->num_bos; i++)
+ drm_gem_object_put_unlocked(&task->bos[i]->gem);
+ kfree(task->bos);
+ }
+
+ lima_vm_put(task->vm);
+}
+
+int lima_sched_context_init(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context,
+ atomic_t *guilty)
+{
+ struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
+
+ return drm_sched_entity_init(&context->base, &rq, 1, guilty);
+}
+
+void lima_sched_context_fini(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context)
+{
+ drm_sched_entity_fini(&context->base);
+}
+
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
+ struct lima_sched_task *task)
+{
+ struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
+
+ drm_sched_entity_push_job(&task->base, &context->base);
+ return fence;
+}
+
+static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *entity)
+{
+ struct lima_sched_task *task = to_lima_task(job);
+
+ if (!xa_empty(&task->deps))
+ return xa_erase(&task->deps, task->last_dep++);
+
+ return NULL;
+}
+
+static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
+{
+ struct lima_sched_task *task = to_lima_task(job);
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_fence *fence;
+ struct dma_fence *ret;
+ struct lima_vm *vm = NULL, *last_vm = NULL;
+ int i;
+
+ /* after GPU reset */
+ if (job->s_fence->finished.error < 0)
+ return NULL;
+
+ fence = lima_fence_create(pipe);
+ if (!fence)
+ return NULL;
+ task->fence = &fence->base;
+
+ /* for caller usage of the fence, otherwise irq handler
+ * may consume the fence before caller use it
+ */
+ ret = dma_fence_get(task->fence);
+
+ pipe->current_task = task;
+
+ /* this is needed for MMU to work correctly, otherwise GP/PP
+ * will hang or page fault for unknown reason after running for
+ * a while.
+ *
+ * Need to investigate:
+ * 1. is it related to TLB
+ * 2. how much performance will be affected by L2 cache flush
+ * 3. can we reduce the calling of this function because all
+ * GP/PP use the same L2 cache on mali400
+ *
+ * TODO:
+ * 1. move this to task fini to save some wait time?
+ * 2. when GP/PP use different l2 cache, need PP wait GP l2
+ * cache flush?
+ */
+ for (i = 0; i < pipe->num_l2_cache; i++)
+ lima_l2_cache_flush(pipe->l2_cache[i]);
+
+ if (task->vm != pipe->current_vm) {
+ vm = lima_vm_get(task->vm);
+ last_vm = pipe->current_vm;
+ pipe->current_vm = task->vm;
+ }
+
+ if (pipe->bcast_mmu)
+ lima_mmu_switch_vm(pipe->bcast_mmu, vm);
+ else {
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_switch_vm(pipe->mmu[i], vm);
+ }
+
+ if (last_vm)
+ lima_vm_put(last_vm);
+
+ pipe->error = false;
+ pipe->task_run(pipe, task);
+
+ return task->fence;
+}
+
+static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ drm_sched_stop(&pipe->base);
+
+ if (task)
+ drm_sched_increase_karma(&task->base);
+
+ pipe->task_error(pipe);
+
+ if (pipe->bcast_mmu)
+ lima_mmu_page_fault_resume(pipe->bcast_mmu);
+ else {
+ int i;
+
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_page_fault_resume(pipe->mmu[i]);
+ }
+
+ if (pipe->current_vm)
+ lima_vm_put(pipe->current_vm);
+
+ pipe->current_vm = NULL;
+ pipe->current_task = NULL;
+
+ drm_sched_resubmit_jobs(&pipe->base);
+ drm_sched_start(&pipe->base, true);
+}
+
+static void lima_sched_timedout_job(struct drm_sched_job *job)
+{
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_sched_task *task = to_lima_task(job);
+
+ DRM_ERROR("lima job timeout\n");
+
+ lima_sched_handle_error_task(pipe, task);
+}
+
+static void lima_sched_free_job(struct drm_sched_job *job)
+{
+ struct lima_sched_task *task = to_lima_task(job);
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_vm *vm = task->vm;
+ struct lima_bo **bos = task->bos;
+ int i;
+
+ dma_fence_put(task->fence);
+
+ for (i = 0; i < task->num_bos; i++)
+ lima_vm_bo_del(vm, bos[i]);
+
+ lima_sched_task_fini(task);
+ kmem_cache_free(pipe->task_slab, task);
+}
+
+static const struct drm_sched_backend_ops lima_sched_ops = {
+ .dependency = lima_sched_dependency,
+ .run_job = lima_sched_run_job,
+ .timedout_job = lima_sched_timedout_job,
+ .free_job = lima_sched_free_job,
+};
+
+static void lima_sched_error_work(struct work_struct *work)
+{
+ struct lima_sched_pipe *pipe =
+ container_of(work, struct lima_sched_pipe, error_work);
+ struct lima_sched_task *task = pipe->current_task;
+
+ lima_sched_handle_error_task(pipe, task);
+}
+
+int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
+{
+ long timeout;
+
+ if (lima_sched_timeout_ms <= 0)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(lima_sched_timeout_ms);
+
+ pipe->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&pipe->fence_lock);
+
+ INIT_WORK(&pipe->error_work, lima_sched_error_work);
+
+ return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0, timeout, name);
+}
+
+void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
+{
+ drm_sched_fini(&pipe->base);
+}
+
+void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
+{
+ if (pipe->error)
+ schedule_work(&pipe->error_work);
+ else {
+ struct lima_sched_task *task = pipe->current_task;
+
+ pipe->task_fini(pipe);
+ dma_fence_signal(task->fence);
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
new file mode 100644
index 000000000000..928af91c1118
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_SCHED_H__
+#define __LIMA_SCHED_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct lima_vm;
+
+struct lima_sched_task {
+ struct drm_sched_job base;
+
+ struct lima_vm *vm;
+ void *frame;
+
+ struct xarray deps;
+ unsigned long last_dep;
+
+ struct lima_bo **bos;
+ int num_bos;
+
+ /* pipe fence */
+ struct dma_fence *fence;
+};
+
+struct lima_sched_context {
+ struct drm_sched_entity base;
+};
+
+#define LIMA_SCHED_PIPE_MAX_MMU 8
+#define LIMA_SCHED_PIPE_MAX_L2_CACHE 2
+#define LIMA_SCHED_PIPE_MAX_PROCESSOR 8
+
+struct lima_ip;
+
+struct lima_sched_pipe {
+ struct drm_gpu_scheduler base;
+
+ u64 fence_context;
+ u32 fence_seqno;
+ spinlock_t fence_lock;
+
+ struct lima_sched_task *current_task;
+ struct lima_vm *current_vm;
+
+ struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
+ int num_mmu;
+
+ struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE];
+ int num_l2_cache;
+
+ struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR];
+ int num_processor;
+
+ struct lima_ip *bcast_processor;
+ struct lima_ip *bcast_mmu;
+
+ u32 done;
+ bool error;
+ atomic_t task;
+
+ int frame_size;
+ struct kmem_cache *task_slab;
+
+ int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
+ void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
+ void (*task_fini)(struct lima_sched_pipe *pipe);
+ void (*task_error)(struct lima_sched_pipe *pipe);
+ void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+
+ struct work_struct error_work;
+};
+
+int lima_sched_task_init(struct lima_sched_task *task,
+ struct lima_sched_context *context,
+ struct lima_bo **bos, int num_bos,
+ struct lima_vm *vm);
+void lima_sched_task_fini(struct lima_sched_task *task);
+
+int lima_sched_context_init(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context,
+ atomic_t *guilty);
+void lima_sched_context_fini(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context);
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
+ struct lima_sched_task *task);
+
+int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
+void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
+void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe);
+
+static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe)
+{
+ pipe->error = true;
+ pipe->task_mmu_error(pipe);
+}
+
+int lima_sched_slab_init(void);
+void lima_sched_slab_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
new file mode 100644
index 000000000000..19e88ca16527
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include "lima_device.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+#include "lima_regs.h"
+
+struct lima_bo_va {
+ struct list_head list;
+ unsigned int ref_count;
+
+ struct drm_mm_node node;
+
+ struct lima_vm *vm;
+};
+
+#define LIMA_VM_PD_SHIFT 22
+#define LIMA_VM_PT_SHIFT 12
+#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
+#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
+
+#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
+#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
+
+#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
+#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
+#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
+#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
+
+
+static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
+{
+ u32 addr;
+
+ for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
+ u32 pbe = LIMA_PBE(addr);
+ u32 bte = LIMA_BTE(addr);
+
+ vm->bts[pbe].cpu[bte] = 0;
+ }
+}
+
+static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
+ u32 start, u32 end)
+{
+ u64 addr;
+ int i = 0;
+
+ for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
+ u32 pbe = LIMA_PBE(addr);
+ u32 bte = LIMA_BTE(addr);
+
+ if (!vm->bts[pbe].cpu) {
+ dma_addr_t pts;
+ u32 *pd;
+ int j;
+
+ vm->bts[pbe].cpu = dma_alloc_wc(
+ vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
+ if (!vm->bts[pbe].cpu) {
+ if (addr != start)
+ lima_vm_unmap_page_table(vm, start, addr - 1);
+ return -ENOMEM;
+ }
+
+ pts = vm->bts[pbe].dma;
+ pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ pd[j] = pts | LIMA_VM_FLAG_PRESENT;
+ pts += LIMA_PAGE_SIZE;
+ }
+ }
+
+ vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
+ }
+
+ return 0;
+}
+
+static struct lima_bo_va *
+lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
+{
+ struct lima_bo_va *bo_va, *ret = NULL;
+
+ list_for_each_entry(bo_va, &bo->va, list) {
+ if (bo_va->vm == vm) {
+ ret = bo_va;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
+{
+ struct lima_bo_va *bo_va;
+ int err;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (bo_va) {
+ bo_va->ref_count++;
+ mutex_unlock(&bo->lock);
+ return 0;
+ }
+
+ /* should not create new bo_va if not asked by caller */
+ if (!create) {
+ mutex_unlock(&bo->lock);
+ return -ENOENT;
+ }
+
+ bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
+ if (!bo_va) {
+ err = -ENOMEM;
+ goto err_out0;
+ }
+
+ bo_va->vm = vm;
+ bo_va->ref_count = 1;
+
+ mutex_lock(&vm->lock);
+
+ err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
+ if (err)
+ goto err_out1;
+
+ err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
+ if (err)
+ goto err_out2;
+
+ mutex_unlock(&vm->lock);
+
+ list_add_tail(&bo_va->list, &bo->va);
+
+ mutex_unlock(&bo->lock);
+ return 0;
+
+err_out2:
+ drm_mm_remove_node(&bo_va->node);
+err_out1:
+ mutex_unlock(&vm->lock);
+ kfree(bo_va);
+err_out0:
+ mutex_unlock(&bo->lock);
+ return err;
+}
+
+void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
+{
+ struct lima_bo_va *bo_va;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (--bo_va->ref_count > 0) {
+ mutex_unlock(&bo->lock);
+ return;
+ }
+
+ mutex_lock(&vm->lock);
+
+ lima_vm_unmap_page_table(vm, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
+
+ drm_mm_remove_node(&bo_va->node);
+
+ mutex_unlock(&vm->lock);
+
+ list_del(&bo_va->list);
+
+ mutex_unlock(&bo->lock);
+
+ kfree(bo_va);
+}
+
+u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
+{
+ struct lima_bo_va *bo_va;
+ u32 ret;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ ret = bo_va->node.start;
+
+ mutex_unlock(&bo->lock);
+
+ return ret;
+}
+
+struct lima_vm *lima_vm_create(struct lima_device *dev)
+{
+ struct lima_vm *vm;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return NULL;
+
+ vm->dev = dev;
+ mutex_init(&vm->lock);
+ kref_init(&vm->refcount);
+
+ vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vm->pd.cpu)
+ goto err_out0;
+
+ if (dev->dlbu_cpu) {
+ int err = lima_vm_map_page_table(
+ vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
+ LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
+ if (err)
+ goto err_out1;
+ }
+
+ drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
+
+ return vm;
+
+err_out1:
+ dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
+err_out0:
+ kfree(vm);
+ return NULL;
+}
+
+void lima_vm_release(struct kref *kref)
+{
+ struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
+ int i;
+
+ drm_mm_takedown(&vm->mm);
+
+ for (i = 0; i < LIMA_VM_NUM_BT; i++) {
+ if (vm->bts[i].cpu)
+ dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ vm->bts[i].cpu, vm->bts[i].dma);
+ }
+
+ if (vm->pd.cpu)
+ dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
+
+ kfree(vm);
+}
+
+void lima_vm_print(struct lima_vm *vm)
+{
+ int i, j, k;
+ u32 *pd, *pt;
+
+ if (!vm->pd.cpu)
+ return;
+
+ pd = vm->pd.cpu;
+ for (i = 0; i < LIMA_VM_NUM_BT; i++) {
+ if (!vm->bts[i].cpu)
+ continue;
+
+ pt = vm->bts[i].cpu;
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
+
+ printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
+
+ for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
+ u32 pte = *pt++;
+
+ if (pte)
+ printk(KERN_INFO " pt %03x:%08x\n", k, pte);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
new file mode 100644
index 000000000000..caee2f8a29b4
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_VM_H__
+#define __LIMA_VM_H__
+
+#include <drm/drm_mm.h>
+#include <linux/kref.h>
+
+#define LIMA_PAGE_SIZE 4096
+#define LIMA_PAGE_MASK (LIMA_PAGE_SIZE - 1)
+#define LIMA_PAGE_ENT_NUM (LIMA_PAGE_SIZE / sizeof(u32))
+
+#define LIMA_VM_NUM_PT_PER_BT_SHIFT 3
+#define LIMA_VM_NUM_PT_PER_BT (1 << LIMA_VM_NUM_PT_PER_BT_SHIFT)
+#define LIMA_VM_NUM_BT (LIMA_PAGE_ENT_NUM >> LIMA_VM_NUM_PT_PER_BT_SHIFT)
+
+#define LIMA_VA_RESERVE_START 0xFFF00000
+#define LIMA_VA_RESERVE_DLBU LIMA_VA_RESERVE_START
+#define LIMA_VA_RESERVE_END 0x100000000
+
+struct lima_device;
+
+struct lima_vm_page {
+ u32 *cpu;
+ dma_addr_t dma;
+};
+
+struct lima_vm {
+ struct mutex lock;
+ struct kref refcount;
+
+ struct drm_mm mm;
+
+ struct lima_device *dev;
+
+ struct lima_vm_page pd;
+ struct lima_vm_page bts[LIMA_VM_NUM_BT];
+};
+
+int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create);
+void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo);
+
+u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo);
+
+struct lima_vm *lima_vm_create(struct lima_device *dev);
+void lima_vm_release(struct kref *kref);
+
+static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
+{
+ kref_get(&vm->refcount);
+ return vm;
+}
+
+static inline void lima_vm_put(struct lima_vm *vm)
+{
+ kref_put(&vm->refcount, lima_vm_release);
+}
+
+void lima_vm_print(struct lima_vm *vm);
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 22e68a100e7b..5d333138f913 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock)
static unsigned int mt2701_calculate_factor(int clock)
{
if (clock <= 64000)
- return 16;
- else if (clock <= 128000)
- return 8;
- else if (clock <= 256000)
return 4;
- else
+ else if (clock <= 128000)
return 2;
+ else
+ return 1;
}
static const struct mtk_dpi_conf mt8173_conf = {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf59ea9bccfd..57ce4708ef1b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -15,6 +15,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
@@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = {
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
+ .gem_prime_vmap = mtk_drm_gem_prime_vmap,
+ .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
.fops = &mtk_drm_fops,
.name = DRIVER_NAME,
@@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev)
if (ret < 0)
goto err_deinit;
+ ret = drm_fbdev_generic_setup(drm, 32);
+ if (ret)
+ DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+
return 0;
err_deinit:
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 259b7b0de1d2..38483e9ee071 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -241,3 +241,49 @@ err_gem_free:
kfree(mtk_gem);
return ERR_PTR(ret);
}
+
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct sg_table *sgt;
+ struct sg_page_iter iter;
+ unsigned int npages;
+ unsigned int i = 0;
+
+ if (mtk_gem->kvaddr)
+ return mtk_gem->kvaddr;
+
+ sgt = mtk_gem_prime_get_sg_table(obj);
+ if (IS_ERR(sgt))
+ return NULL;
+
+ npages = obj->size >> PAGE_SHIFT;
+ mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
+ if (!mtk_gem->pages)
+ goto out;
+
+ for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+ mtk_gem->pages[i++] = sg_page_iter_page(&iter);
+ if (i > npages)
+ break;
+ }
+ mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+
+out:
+ kfree((void *)sgt);
+
+ return mtk_gem->kvaddr;
+}
+
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+
+ if (!mtk_gem->pages)
+ return;
+
+ vunmap(vaddr);
+ mtk_gem->kvaddr = 0;
+ kfree((void *)mtk_gem->pages);
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
index 534639b43a1c..c047a7ef294f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -37,6 +37,7 @@ struct mtk_drm_gem_obj {
dma_addr_t dma_addr;
unsigned long dma_attrs;
struct sg_table *sg;
+ struct page **pages;
};
#define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base)
@@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 915cc84621ae..e04e6c293d39 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
if (IS_ERR(regmap))
ret = PTR_ERR(regmap);
if (ret) {
- ret = PTR_ERR(regmap);
dev_err(dev,
"Failed to get system configuration registers: %d\n",
ret);
@@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(remote);
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+ of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
index 4ef9c57ffd44..5223498502c4 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
@@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = {
.owner = THIS_MODULE,
};
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
- hdmi_phy->pll_rate = rate;
- if (rate <= 74250000)
- *parent_rate = rate;
- else
- *parent_rate = rate / 2;
-
- return rate;
-}
-
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
- return hdmi_phy->pll_rate;
-}
-
void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
u32 bits)
{
@@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
return NULL;
}
-static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy,
- const struct clk_ops **ops)
+static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
+ struct clk_init_data *clk_init)
{
- if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops)
- *ops = hdmi_phy->conf->hdmi_phy_clk_ops;
- else
- dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n");
+ clk_init->flags = hdmi_phy->conf->flags;
+ clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
}
static int mtk_hdmi_phy_probe(struct platform_device *pdev)
@@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
struct clk_init_data clk_init = {
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
- .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
};
struct phy *phy;
@@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
hdmi_phy->dev = dev;
hdmi_phy->conf =
(struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
- mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops);
+ mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
hdmi_phy->pll_hw.init = &clk_init;
hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
if (IS_ERR(hdmi_phy->pll)) {
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
index f39b1fc66612..2d8b3182470d 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
@@ -21,6 +21,7 @@ struct mtk_hdmi_phy;
struct mtk_hdmi_phy_conf {
bool tz_disabled;
+ unsigned long flags;
const struct clk_ops *hdmi_phy_clk_ops;
void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
u32 val, u32 mask);
struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate);
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate);
extern struct platform_driver mtk_hdmi_phy_driver;
extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
index fcc42dc6ea7f..d3cc4022e988 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
@@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
usleep_range(80, 100);
}
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate <= 64000000)
pos_div = 3;
- else if (rate <= 12800000)
- pos_div = 1;
+ else if (rate <= 128000000)
+ pos_div = 2;
else
pos_div = 1;
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
RG_HTPLL_IC_MASK);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
@@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ unsigned long out_rate, val;
+
+ val = (readl(hdmi_phy->regs + HDMI_CON6)
+ & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+ switch (val) {
+ case 0x00:
+ out_rate = parent_rate;
+ break;
+ case 0x01:
+ out_rate = parent_rate / 2;
+ break;
+ default:
+ out_rate = parent_rate / 4;
+ break;
+ }
+
+ val = (readl(hdmi_phy->regs + HDMI_CON6)
+ & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+ out_rate *= (val + 1) * 2;
+ val = (readl(hdmi_phy->regs + HDMI_CON2)
+ & RG_HDMITX_TX_POSDIV_MASK);
+ out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
+
+ if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+ out_rate /= 5;
+
+ return out_rate;
+}
+
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
@@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
.tz_disabled = true,
+ .flags = CLK_SET_RATE_GATE,
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index ed5916b27658..47f8a2951682 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
usleep_range(100, 150);
}
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ hdmi_phy->pll_rate = rate;
+ if (rate <= 74250000)
+ *parent_rate = rate;
+ else
+ *parent_rate = rate / 2;
+
+ return rate;
+}
+
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ return hdmi_phy->pll_rate;
+}
+
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
@@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
}
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index 7709f2fbb9f7..d4ea82fc493b 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,5 +1,5 @@
meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
deleted file mode 100644
index 5de11aa7c775..000000000000
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2016 BayLibre, SAS
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
- * Copyright (C) 2014 Endless Mobile
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include "meson_drv.h"
-#include "meson_canvas.h"
-#include "meson_registers.h"
-
-/**
- * DOC: Canvas
- *
- * CANVAS is a memory zone where physical memory frames information
- * are stored for the VIU to scanout.
- */
-
-/* DMC Registers */
-#define DMC_CAV_LUT_DATAL 0x48 /* 0x12 offset in data sheet */
-#define CANVAS_WIDTH_LBIT 29
-#define CANVAS_WIDTH_LWID 3
-#define DMC_CAV_LUT_DATAH 0x4c /* 0x13 offset in data sheet */
-#define CANVAS_WIDTH_HBIT 0
-#define CANVAS_HEIGHT_BIT 9
-#define CANVAS_BLKMODE_BIT 24
-#define CANVAS_ENDIAN_BIT 26
-#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
-#define CANVAS_LUT_WR_EN (0x2 << 8)
-#define CANVAS_LUT_RD_EN (0x1 << 8)
-
-void meson_canvas_setup(struct meson_drm *priv,
- uint32_t canvas_index, uint32_t addr,
- uint32_t stride, uint32_t height,
- unsigned int wrap,
- unsigned int blkmode,
- unsigned int endian)
-{
- unsigned int val;
-
- regmap_write(priv->dmc, DMC_CAV_LUT_DATAL,
- (((addr + 7) >> 3)) |
- (((stride + 7) >> 3) << CANVAS_WIDTH_LBIT));
-
- regmap_write(priv->dmc, DMC_CAV_LUT_DATAH,
- ((((stride + 7) >> 3) >> CANVAS_WIDTH_LWID) <<
- CANVAS_WIDTH_HBIT) |
- (height << CANVAS_HEIGHT_BIT) |
- (wrap << 22) |
- (blkmode << CANVAS_BLKMODE_BIT) |
- (endian << CANVAS_ENDIAN_BIT));
-
- regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
- CANVAS_LUT_WR_EN | canvas_index);
-
- /* Force a read-back to make sure everything is flushed. */
- regmap_read(priv->dmc, DMC_CAV_LUT_DATAH, &val);
-}
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
deleted file mode 100644
index 85dbf26e2826..000000000000
--- a/drivers/gpu/drm/meson/meson_canvas.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2016 BayLibre, SAS
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2014 Endless Mobile
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* Canvas LUT Memory */
-
-#ifndef __MESON_CANVAS_H
-#define __MESON_CANVAS_H
-
-#define MESON_CANVAS_ID_OSD1 0x4e
-#define MESON_CANVAS_ID_VD1_0 0x60
-#define MESON_CANVAS_ID_VD1_1 0x61
-#define MESON_CANVAS_ID_VD1_2 0x62
-
-/* Canvas configuration. */
-#define MESON_CANVAS_WRAP_NONE 0x00
-#define MESON_CANVAS_WRAP_X 0x01
-#define MESON_CANVAS_WRAP_Y 0x02
-
-#define MESON_CANVAS_BLKMODE_LINEAR 0x00
-#define MESON_CANVAS_BLKMODE_32x32 0x01
-#define MESON_CANVAS_BLKMODE_64x64 0x02
-
-#define MESON_CANVAS_ENDIAN_SWAP16 0x1
-#define MESON_CANVAS_ENDIAN_SWAP32 0x3
-#define MESON_CANVAS_ENDIAN_SWAP64 0x7
-#define MESON_CANVAS_ENDIAN_SWAP128 0xf
-
-void meson_canvas_setup(struct meson_drm *priv,
- uint32_t canvas_index, uint32_t addr,
- uint32_t stride, uint32_t height,
- unsigned int wrap,
- unsigned int blkmode,
- unsigned int endian);
-
-#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 43e29984f8b1..5579f8ac3e3f 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -37,15 +37,19 @@
#include "meson_venc.h"
#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
+#define MESON_G12A_VIU_OFFSET 0x5ec0
+
/* CRTC definition */
struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
+ void (*enable_osd1)(struct meson_drm *priv);
+ void (*enable_vd1)(struct meson_drm *priv);
+ unsigned int viu_offset;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -81,6 +85,44 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
+static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct meson_drm *priv = meson_crtc->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!crtc_state) {
+ DRM_ERROR("Invalid crtc_state\n");
+ return;
+ }
+
+ /* VD1 Preblend vertical start/end */
+ writel(FIELD_PREP(GENMASK(11, 0), 2303),
+ priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
+
+ /* Setup Blender */
+ writel(crtc_state->mode.hdisplay |
+ crtc_state->mode.vdisplay << 16,
+ priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
+
+ writel_relaxed(0 << 16 |
+ (crtc_state->mode.hdisplay - 1),
+ priv->io_base + _REG(VPP_OSD1_BLD_H_SCOPE));
+ writel_relaxed(0 << 16 |
+ (crtc_state->mode.vdisplay - 1),
+ priv->io_base + _REG(VPP_OSD1_BLD_V_SCOPE));
+ writel_relaxed(crtc_state->mode.hdisplay << 16 |
+ crtc_state->mode.vdisplay,
+ priv->io_base + _REG(VPP_OUT_H_V_SIZE));
+
+ drm_crtc_vblank_on(crtc);
+
+ priv->viu.osd1_enabled = true;
+}
+
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -111,6 +153,31 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
priv->viu.osd1_enabled = true;
}
+static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_crtc_vblank_off(crtc);
+
+ priv->viu.osd1_enabled = false;
+ priv->viu.osd1_commit = false;
+
+ priv->viu.vd1_enabled = false;
+ priv->viu.vd1_commit = false;
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+}
+
static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -174,6 +241,53 @@ static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
.atomic_disable = meson_crtc_atomic_disable,
};
+static const struct drm_crtc_helper_funcs meson_g12a_crtc_helper_funcs = {
+ .atomic_begin = meson_crtc_atomic_begin,
+ .atomic_flush = meson_crtc_atomic_flush,
+ .atomic_enable = meson_g12a_crtc_atomic_enable,
+ .atomic_disable = meson_g12a_crtc_atomic_disable,
+};
+
+static void meson_crtc_enable_osd1(struct meson_drm *priv)
+{
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
+ priv->io_base + _REG(VPP_MISC));
+}
+
+static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
+{
+ writel_relaxed(priv->viu.osd_blend_din0_scope_h,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_DIN0_SCOPE_H));
+ writel_relaxed(priv->viu.osd_blend_din0_scope_v,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_DIN0_SCOPE_V));
+ writel_relaxed(priv->viu.osb_blend0_size,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_BLEND0_SIZE));
+ writel_relaxed(priv->viu.osb_blend1_size,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_BLEND1_SIZE));
+}
+
+static void meson_crtc_enable_vd1(struct meson_drm *priv)
+{
+ writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ priv->io_base + _REG(VPP_MISC));
+}
+
+static void meson_g12a_crtc_enable_vd1(struct meson_drm *priv)
+{
+ writel_relaxed(((1 << 16) | /* post bld premult*/
+ (1 << 8) | /* post src */
+ (1 << 4) | /* pre bld premult*/
+ (1 << 0)),
+ priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+}
+
void meson_crtc_irq(struct meson_drm *priv)
{
struct meson_crtc *meson_crtc = to_meson_crtc(priv->crtc);
@@ -214,20 +328,14 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(priv->viu.osd_sc_v_ctrl0,
priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
- if (priv->canvas)
- meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
- priv->viu.osd1_addr, priv->viu.osd1_stride,
- priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR, 0);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
priv->viu.osd1_addr, priv->viu.osd1_stride,
priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR, 0);
/* Enable OSD1 */
- writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_crtc->enable_osd1)
+ meson_crtc->enable_osd1(priv);
priv->viu.osd1_commit = false;
}
@@ -237,147 +345,164 @@ void meson_crtc_irq(struct meson_drm *priv)
switch (priv->viu.vd1_planes) {
case 3:
- if (priv->canvas)
- meson_canvas_config(priv->canvas,
- priv->canvas_id_vd1_2,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_2,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
/* fallthrough */
case 2:
- if (priv->canvas)
- meson_canvas_config(priv->canvas,
- priv->canvas_id_vd1_1,
- priv->viu.vd1_addr1,
- priv->viu.vd1_stride1,
- priv->viu.vd1_height1,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_1,
+ priv->viu.vd1_addr1,
+ priv->viu.vd1_stride1,
+ priv->viu.vd1_height1,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
/* fallthrough */
case 1:
- if (priv->canvas)
- meson_canvas_config(priv->canvas,
- priv->canvas_id_vd1_0,
- priv->viu.vd1_addr0,
- priv->viu.vd1_stride0,
- priv->viu.vd1_height0,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_0,
+ priv->viu.vd1_addr0,
+ priv->viu.vd1_stride0,
+ priv->viu.vd1_height0,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
};
writel_relaxed(priv->viu.vd1_if0_gen_reg,
- priv->io_base + _REG(VD1_IF0_GEN_REG));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_GEN_REG));
writel_relaxed(priv->viu.vd1_if0_gen_reg,
- priv->io_base + _REG(VD2_IF0_GEN_REG));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_GEN_REG));
writel_relaxed(priv->viu.vd1_if0_gen_reg2,
- priv->io_base + _REG(VD1_IF0_GEN_REG2));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_GEN_REG2));
writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
- priv->io_base + _REG(VIU_VD1_FMT_CTRL));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD1_FMT_CTRL));
writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
- priv->io_base + _REG(VIU_VD2_FMT_CTRL));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD2_FMT_CTRL));
writel_relaxed(priv->viu.viu_vd1_fmt_w,
- priv->io_base + _REG(VIU_VD1_FMT_W));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD1_FMT_W));
writel_relaxed(priv->viu.viu_vd1_fmt_w,
- priv->io_base + _REG(VIU_VD2_FMT_W));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD2_FMT_W));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD1_IF0_CANVAS0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CANVAS0));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD1_IF0_CANVAS1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CANVAS1));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD2_IF0_CANVAS0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CANVAS0));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD2_IF0_CANVAS1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CANVAS1));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD1_IF0_LUMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_X0));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD1_IF0_LUMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_X1));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD2_IF0_LUMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_X0));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD2_IF0_LUMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_X1));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD1_IF0_LUMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_Y0));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD1_IF0_LUMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_Y1));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD2_IF0_LUMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_Y0));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD2_IF0_LUMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_Y1));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD1_IF0_CHROMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_X0));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD1_IF0_CHROMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_X1));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD2_IF0_CHROMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_X0));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD2_IF0_CHROMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_X1));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD1_IF0_CHROMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_Y0));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD1_IF0_CHROMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_Y1));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD2_IF0_CHROMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_Y0));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD2_IF0_CHROMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_Y1));
writel_relaxed(priv->viu.vd1_if0_repeat_loop,
- priv->io_base + _REG(VD1_IF0_RPT_LOOP));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RPT_LOOP));
writel_relaxed(priv->viu.vd1_if0_repeat_loop,
- priv->io_base + _REG(VD2_IF0_RPT_LOOP));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_RPT_LOOP));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_LUMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_LUMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_LUMA1_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_LUMA1_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_CHROMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_CHROMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_CHROMA1_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_CHROMA1_RPT_PAT));
- writel_relaxed(0, priv->io_base + _REG(VD1_IF0_LUMA_PSEL));
- writel_relaxed(0, priv->io_base + _REG(VD1_IF0_CHROMA_PSEL));
- writel_relaxed(0, priv->io_base + _REG(VD2_IF0_LUMA_PSEL));
- writel_relaxed(0, priv->io_base + _REG(VD2_IF0_CHROMA_PSEL));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA1_RPT_PAT));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_PSEL));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_PSEL));
writel_relaxed(priv->viu.vd1_range_map_y,
- priv->io_base + _REG(VD1_IF0_RANGE_MAP_Y));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RANGE_MAP_Y));
writel_relaxed(priv->viu.vd1_range_map_cb,
- priv->io_base + _REG(VD1_IF0_RANGE_MAP_CB));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RANGE_MAP_CB));
writel_relaxed(priv->viu.vd1_range_map_cr,
- priv->io_base + _REG(VD1_IF0_RANGE_MAP_CR));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RANGE_MAP_CR));
writel_relaxed(0x78404,
priv->io_base + _REG(VPP_SC_MISC));
writel_relaxed(priv->viu.vpp_pic_in_height,
@@ -423,11 +548,8 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX));
/* Enable VD1 */
- writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
- VPP_COLOR_MNG_ENABLE,
- VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
- VPP_COLOR_MNG_ENABLE,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_crtc->enable_vd1)
+ meson_crtc->enable_vd1(priv);
priv->viu.vd1_commit = false;
}
@@ -464,7 +586,16 @@ int meson_crtc_create(struct meson_drm *priv)
return ret;
}
- drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ meson_crtc->enable_osd1 = meson_g12a_crtc_enable_osd1;
+ meson_crtc->enable_vd1 = meson_g12a_crtc_enable_vd1;
+ meson_crtc->viu_offset = MESON_G12A_VIU_OFFSET;
+ drm_crtc_helper_add(crtc, &meson_g12a_crtc_helper_funcs);
+ } else {
+ meson_crtc->enable_osd1 = meson_crtc_enable_osd1;
+ meson_crtc->enable_vd1 = meson_crtc_enable_vd1;
+ drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
+ }
priv->crtc = crtc;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 8a4ebcb6405c..72b01e6be0d9 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -48,7 +48,6 @@
#include "meson_vpp.h"
#include "meson_viu.h"
#include "meson_venc.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
#define DRIVER_NAME "meson"
@@ -91,6 +90,18 @@ static irqreturn_t meson_irq(int irq, void *arg)
return IRQ_HANDLED;
}
+static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /*
+ * We need 64bytes aligned stride, and PAGE aligned size
+ */
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), SZ_64);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver meson_driver = {
@@ -113,7 +124,7 @@ static struct drm_driver meson_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
/* GEM Ops */
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = meson_dumb_create,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -231,50 +242,31 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
}
priv->canvas = meson_canvas_get(dev);
- if (!IS_ERR(priv->canvas)) {
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
- if (ret)
- goto free_drm;
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- goto free_drm;
- }
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
- goto free_drm;
- }
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
- goto free_drm;
- }
- } else {
- priv->canvas = NULL;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
- if (!res) {
- ret = -EINVAL;
- goto free_drm;
- }
- /* Simply ioremap since it may be a shared register zone */
- regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs) {
- ret = -EADDRNOTAVAIL;
- goto free_drm;
- }
+ if (IS_ERR(priv->canvas)) {
+ ret = PTR_ERR(priv->canvas);
+ goto free_drm;
+ }
- priv->dmc = devm_regmap_init_mmio(dev, regs,
- &meson_regmap_config);
- if (IS_ERR(priv->dmc)) {
- dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
- ret = PTR_ERR(priv->dmc);
- goto free_drm;
- }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
+ if (ret)
+ goto free_drm;
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+ goto free_drm;
}
priv->vsync_irq = platform_get_irq(pdev, 0);
@@ -467,6 +459,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "amlogic,meson-gxbb-vpu" },
{ .compatible = "amlogic,meson-gxl-vpu" },
{ .compatible = "amlogic,meson-gxm-vpu" },
+ { .compatible = "amlogic,meson-g12a-vpu" },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 4dccf4cd042a..9614baa836b9 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -29,7 +29,6 @@ struct meson_drm {
struct device *dev;
void __iomem *io_base;
struct regmap *hhi;
- struct regmap *dmc;
int vsync_irq;
struct meson_canvas *canvas;
@@ -63,6 +62,10 @@ struct meson_drm {
uint32_t osd_sc_h_phase_step;
uint32_t osd_sc_h_ctrl0;
uint32_t osd_sc_v_ctrl0;
+ uint32_t osd_blend_din0_scope_h;
+ uint32_t osd_blend_din0_scope_v;
+ uint32_t osb_blend0_size;
+ uint32_t osb_blend1_size;
bool vd1_enabled;
bool vd1_commit;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 563953ec6ad0..779da21143b9 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/component.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/reset.h>
#include <linux/clk.h>
@@ -105,6 +106,7 @@
#define HDMITX_TOP_ADDR_REG 0x0
#define HDMITX_TOP_DATA_REG 0x4
#define HDMITX_TOP_CTRL_REG 0x8
+#define HDMITX_TOP_G12A_OFFSET 0x8000
/* Controller Communication Channel */
#define HDMITX_DWC_ADDR_REG 0x10
@@ -118,6 +120,8 @@
#define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */
#define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */
#define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */
+#define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */
+#define HHI_HDMI_PHY_CNTL5 0x3b4 /* 0xed */
static DEFINE_SPINLOCK(reg_lock);
@@ -127,12 +131,26 @@ enum meson_venc_source {
MESON_VENC_SOURCE_ENCP = 2,
};
+struct meson_dw_hdmi;
+
+struct meson_dw_hdmi_data {
+ unsigned int (*top_read)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr);
+ void (*top_write)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data);
+ unsigned int (*dwc_read)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr);
+ void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data);
+};
+
struct meson_dw_hdmi {
struct drm_encoder encoder;
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
void __iomem *hdmitx;
+ const struct meson_dw_hdmi_data *data;
struct reset_control *hdmitx_apb;
struct reset_control *hdmitx_ctrl;
struct reset_control *hdmitx_phy;
@@ -174,6 +192,12 @@ static unsigned int dw_hdmi_top_read(struct meson_dw_hdmi *dw_hdmi,
return data;
}
+static unsigned int dw_hdmi_g12a_top_read(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr)
+{
+ return readl(dw_hdmi->hdmitx + HDMITX_TOP_G12A_OFFSET + (addr << 2));
+}
+
static inline void dw_hdmi_top_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
@@ -191,18 +215,24 @@ static inline void dw_hdmi_top_write(struct meson_dw_hdmi *dw_hdmi,
spin_unlock_irqrestore(&reg_lock, flags);
}
+static inline void dw_hdmi_g12a_top_write(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data)
+{
+ writel(data, dw_hdmi->hdmitx + HDMITX_TOP_G12A_OFFSET + (addr << 2));
+}
+
/* Helper to change specific bits in PHY registers */
static inline void dw_hdmi_top_write_bits(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr,
unsigned int mask,
unsigned int val)
{
- unsigned int data = dw_hdmi_top_read(dw_hdmi, addr);
+ unsigned int data = dw_hdmi->data->top_read(dw_hdmi, addr);
data &= ~mask;
data |= val;
- dw_hdmi_top_write(dw_hdmi, addr, data);
+ dw_hdmi->data->top_write(dw_hdmi, addr, data);
}
static unsigned int dw_hdmi_dwc_read(struct meson_dw_hdmi *dw_hdmi,
@@ -226,6 +256,12 @@ static unsigned int dw_hdmi_dwc_read(struct meson_dw_hdmi *dw_hdmi,
return data;
}
+static unsigned int dw_hdmi_g12a_dwc_read(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr)
+{
+ return readb(dw_hdmi->hdmitx + addr);
+}
+
static inline void dw_hdmi_dwc_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
@@ -243,18 +279,24 @@ static inline void dw_hdmi_dwc_write(struct meson_dw_hdmi *dw_hdmi,
spin_unlock_irqrestore(&reg_lock, flags);
}
+static inline void dw_hdmi_g12a_dwc_write(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data)
+{
+ writeb(data, dw_hdmi->hdmitx + addr);
+}
+
/* Helper to change specific bits in controller registers */
static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr,
unsigned int mask,
unsigned int val)
{
- unsigned int data = dw_hdmi_dwc_read(dw_hdmi, addr);
+ unsigned int data = dw_hdmi->data->dwc_read(dw_hdmi, addr);
data &= ~mask;
data |= val;
- dw_hdmi_dwc_write(dw_hdmi, addr, data);
+ dw_hdmi->data->dwc_write(dw_hdmi, addr, data);
}
/* Bridge */
@@ -300,6 +342,24 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33632122);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2000115b);
}
+ } else if (dw_hdmi_is_compatible(dw_hdmi,
+ "amlogic,meson-g12a-dw-hdmi")) {
+ if (pixel_clock >= 371250) {
+ /* 5.94Gbps, 3.7125Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x37eb65c4);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x0000080b);
+ } else if (pixel_clock >= 297000) {
+ /* 2.97Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33eb6262);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x00000003);
+ } else {
+ /* 1.485Gbps, and below */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33eb4242);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x00000003);
+ }
}
}
@@ -375,7 +435,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
/* Bring out of reset */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
/* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
@@ -384,24 +444,25 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
0x3 << 4, 0x3 << 4);
/* Enable normal output to PHY */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
/* TMDS pattern setup (TOFIX Handle the YUV420 case) */
if (mode->clock > 340000) {
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
+ 0);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
0x03ff03ff);
} else {
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0x001f001f);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
0x001f001f);
}
/* Load TMDS pattern */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
msleep(20);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
/* Setup PHY parameters */
meson_hdmi_phy_setup_mode(dw_hdmi, mode);
@@ -412,7 +473,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* BIT_INVERT */
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
- dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi"))
+ dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+ dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
BIT(17), 0);
else
@@ -480,7 +542,7 @@ static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
{
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
- return !!dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_STAT0) ?
+ return !!dw_hdmi->data->top_read(dw_hdmi, HDMITX_TOP_STAT0) ?
connector_status_connected : connector_status_disconnected;
}
@@ -490,11 +552,11 @@ static void dw_hdmi_setup_hpd(struct dw_hdmi *hdmi,
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
/* Setup HPD Filter */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_HPD_FILTER,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_HPD_FILTER,
(0xa << 12) | 0xa0);
/* Clear interrupts */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL);
/* Unmask interrupts */
@@ -515,8 +577,8 @@ static irqreturn_t dw_hdmi_top_irq(int irq, void *dev_id)
struct meson_dw_hdmi *dw_hdmi = dev_id;
u32 stat;
- stat = dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_INTR_STAT);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, stat);
+ stat = dw_hdmi->data->top_read(dw_hdmi, HDMITX_TOP_INTR_STAT);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, stat);
/* HPD Events, handle in the threaded interrupt handler */
if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) {
@@ -686,7 +748,9 @@ static const struct drm_encoder_helper_funcs
static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
unsigned int *result)
{
- *result = dw_hdmi_dwc_read(context, reg);
+ struct meson_dw_hdmi *dw_hdmi = context;
+
+ *result = dw_hdmi->data->dwc_read(dw_hdmi, reg);
return 0;
@@ -695,7 +759,9 @@ static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
static int meson_dw_hdmi_reg_write(void *context, unsigned int reg,
unsigned int val)
{
- dw_hdmi_dwc_write(context, reg, val);
+ struct meson_dw_hdmi *dw_hdmi = context;
+
+ dw_hdmi->data->dwc_write(dw_hdmi, reg, val);
return 0;
}
@@ -709,6 +775,20 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
.fast_io = true,
};
+static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = {
+ .top_read = dw_hdmi_top_read,
+ .top_write = dw_hdmi_top_write,
+ .dwc_read = dw_hdmi_dwc_read,
+ .dwc_write = dw_hdmi_dwc_write,
+};
+
+static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
+ .top_read = dw_hdmi_g12a_top_read,
+ .top_write = dw_hdmi_g12a_top_write,
+ .dwc_read = dw_hdmi_g12a_dwc_read,
+ .dwc_write = dw_hdmi_g12a_dwc_write,
+};
+
static bool meson_hdmi_connector_is_available(struct device *dev)
{
struct device_node *ep, *remote;
@@ -735,6 +815,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ const struct meson_dw_hdmi_data *match;
struct meson_dw_hdmi *meson_dw_hdmi;
struct drm_device *drm = data;
struct meson_drm *priv = drm->dev_private;
@@ -751,6 +832,12 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return -ENODEV;
}
+ match = of_device_get_match_data(&pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
meson_dw_hdmi = devm_kzalloc(dev, sizeof(*meson_dw_hdmi),
GFP_KERNEL);
if (!meson_dw_hdmi)
@@ -758,6 +845,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
meson_dw_hdmi->priv = priv;
meson_dw_hdmi->dev = dev;
+ meson_dw_hdmi->data = match;
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
encoder = &meson_dw_hdmi->encoder;
@@ -858,24 +946,28 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
reset_control_reset(meson_dw_hdmi->hdmitx_phy);
/* Enable APB3 fail on error */
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ }
/* Bring out of reset */
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_SW_RESET, 0);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
msleep(20);
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_CLK_CNTL, 0xff);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_CLK_CNTL, 0xff);
/* Enable HDMI-TX Interrupt */
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_CORE);
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
+ HDMITX_TOP_INTR_CORE);
/* Bridge / Connector */
@@ -924,9 +1016,14 @@ static int meson_dw_hdmi_remove(struct platform_device *pdev)
}
static const struct of_device_id meson_dw_hdmi_of_table[] = {
- { .compatible = "amlogic,meson-gxbb-dw-hdmi" },
- { .compatible = "amlogic,meson-gxl-dw-hdmi" },
- { .compatible = "amlogic,meson-gxm-dw-hdmi" },
+ { .compatible = "amlogic,meson-gxbb-dw-hdmi",
+ .data = &meson_dw_hdmi_gx_data },
+ { .compatible = "amlogic,meson-gxl-dw-hdmi",
+ .data = &meson_dw_hdmi_gx_data },
+ { .compatible = "amlogic,meson-gxm-dw-hdmi",
+ .data = &meson_dw_hdmi_gx_data },
+ { .compatible = "amlogic,meson-g12a-dw-hdmi",
+ .data = &meson_dw_hdmi_g12a_data },
{ }
};
MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.h b/drivers/gpu/drm/meson/meson_dw_hdmi.h
index 0b81183125e3..03e2f0c1a2d5 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.h
@@ -21,9 +21,12 @@
#define __MESON_DW_HDMI_H
/*
- * Bit 7 RW Reserved. Default 1.
- * Bit 6 RW Reserved. Default 1.
- * Bit 5 RW Reserved. Default 1.
+ * Bit 15-10: RW Reserved. Default 1 starting from G12A
+ * Bit 9 RW sw_reset_i2c starting from G12A
+ * Bit 8 RW sw_reset_axiarb starting from G12A
+ * Bit 7 RW Reserved. Default 1, sw_reset_emp starting from G12A
+ * Bit 6 RW Reserved. Default 1, sw_reset_flt starting from G12A
+ * Bit 5 RW Reserved. Default 1, sw_reset_hdcp22 starting from G12A
* Bit 4 RW sw_reset_phyif: PHY interface. 1=Apply reset; 0=Release from reset.
* Default 1.
* Bit 3 RW sw_reset_intr: interrupt module. 1=Apply reset;
@@ -39,12 +42,16 @@
#define HDMITX_TOP_SW_RESET (0x000)
/*
+ * Bit 31 RW free_clk_en: 0=Enable clock gating for power saving; 1= Disable
* Bit 12 RW i2s_ws_inv:1=Invert i2s_ws; 0=No invert. Default 0.
* Bit 11 RW i2s_clk_inv: 1=Invert i2s_clk; 0=No invert. Default 0.
* Bit 10 RW spdif_clk_inv: 1=Invert spdif_clk; 0=No invert. Default 0.
* Bit 9 RW tmds_clk_inv: 1=Invert tmds_clk; 0=No invert. Default 0.
* Bit 8 RW pixel_clk_inv: 1=Invert pixel_clk; 0=No invert. Default 0.
- * Bit 4 RW cec_clk_en: 1=enable cec_clk; 0=disable. Default 0.
+ * Bit 7 RW hdcp22_skpclk_en: starting from G12A, 1=enable; 0=disable
+ * Bit 6 RW hdcp22_esmclk_en: starting from G12A, 1=enable; 0=disable
+ * Bit 5 RW hdcp22_tmdsclk_en: starting from G12A, 1=enable; 0=disable
+ * Bit 4 RW cec_clk_en: 1=enable cec_clk; 0=disable. Default 0. Reserved for G12A
* Bit 3 RW i2s_clk_en: 1=enable i2s_clk; 0=disable. Default 0.
* Bit 2 RW spdif_clk_en: 1=enable spdif_clk; 0=disable. Default 0.
* Bit 1 RW tmds_clk_en: 1=enable tmds_clk; 0=disable. Default 0.
@@ -53,6 +60,8 @@
#define HDMITX_TOP_CLK_CNTL (0x001)
/*
+ * Bit 31:28 RW rxsense_glitch_width: starting from G12A
+ * Bit 27:16 RW rxsense_valid_width: starting from G12A
* Bit 11: 0 RW hpd_valid_width: filter out width <= M*1024. Default 0.
* Bit 15:12 RW hpd_glitch_width: filter out glitch <= N. Default 0.
*/
@@ -61,6 +70,9 @@
/*
* intr_maskn: MASK_N, one bit per interrupt source.
* 1=Enable interrupt source; 0=Disable interrupt source. Default 0.
+ * [ 7] rxsense_fall starting from G12A
+ * [ 6] rxsense_rise starting from G12A
+ * [ 5] err_i2c_timeout starting from G12A
* [ 4] hdcp22_rndnum_err
* [ 3] nonce_rfrsh_rise
* [ 2] hpd_fall_intr
@@ -73,6 +85,9 @@
* Bit 30: 0 RW intr_stat: For each bit, write 1 to manually set the interrupt
* bit, read back the interrupt status.
* Bit 31 R IP interrupt status
+ * Bit 7 RW rxsense_fall starting from G12A
+ * Bit 6 RW rxsense_rise starting from G12A
+ * Bit 5 RW err_i2c_timeout starting from G12A
* Bit 2 RW hpd_fall
* Bit 1 RW hpd_rise
* Bit 0 RW IP interrupt
@@ -80,6 +95,9 @@
#define HDMITX_TOP_INTR_STAT (0x004)
/*
+ * [7] rxsense_fall starting from G12A
+ * [6] rxsense_rise starting from G12A
+ * [5] err_i2c_timeout starting from G12A
* [4] hdcp22_rndnum_err
* [3] nonce_rfrsh_rise
* [2] hpd_fall
@@ -91,6 +109,8 @@
#define HDMITX_TOP_INTR_CORE BIT(0)
#define HDMITX_TOP_INTR_HPD_RISE BIT(1)
#define HDMITX_TOP_INTR_HPD_FALL BIT(2)
+#define HDMITX_TOP_INTR_RXSENSE_RISE BIT(6)
+#define HDMITX_TOP_INTR_RXSENSE_FALL BIT(7)
/* Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
* 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0.
@@ -140,7 +160,9 @@
*/
#define HDMITX_TOP_REVOCMEM_STAT (0x00D)
-/* Bit 0 R filtered HPD status. */
+/* Bit 1 R filtered RxSense status
+ * Bit 0 R filtered HPD status.
+ */
#define HDMITX_TOP_STAT0 (0x00E)
#endif /* __MESON_DW_HDMI_H */
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index 691a9fd16b36..bdbf925ff3e8 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -22,7 +22,6 @@
#include "meson_overlay.h"
#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
/* VD1_IF0_GEN_REG */
@@ -350,13 +349,6 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
DRM_DEBUG_DRIVER("\n");
- /* Fallback is canvas provider is not available */
- if (!priv->canvas) {
- priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0;
- priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1;
- priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2;
- }
-
interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
spin_lock_irqsave(&priv->drm->event_lock, flags);
@@ -524,8 +516,14 @@ static void meson_overlay_atomic_disable(struct drm_plane *plane,
priv->viu.vd1_enabled = false;
/* Disable VD1 */
- writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD1_IF0_GEN_REG + 0x17b0));
+ writel_relaxed(0, priv->io_base + _REG(VD2_IF0_GEN_REG + 0x17b0));
+ } else
+ writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
}
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 6119a0224278..bf8f1fab63aa 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -38,7 +38,6 @@
#include "meson_plane.h"
#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
/* OSD_SCI_WH_M1 */
@@ -148,10 +147,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
(0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
- if (priv->canvas)
- canvas_id_osd1 = priv->canvas_id_osd1;
- else
- canvas_id_osd1 = MESON_CANVAS_ID_OSD1;
+ canvas_id_osd1 = priv->canvas_id_osd1;
/* Set up BLK0 to point to the right canvas */
priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
@@ -298,6 +294,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ priv->viu.osd_blend_din0_scope_h = ((dest.x2 - 1) << 16) | dest.x1;
+ priv->viu.osd_blend_din0_scope_v = ((dest.y2 - 1) << 16) | dest.y1;
+ priv->viu.osb_blend0_size = dst_h << 16 | dst_w;
+ priv->viu.osb_blend1_size = dst_h << 16 | dst_w;
+ }
+
/* Update Canvas with buffer address */
gem = drm_fb_cma_get_gem_obj(fb, 0);
@@ -324,8 +327,12 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
struct meson_drm *priv = meson_plane->priv;
/* Disable OSD1 */
- writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ writel_bits_relaxed(BIT(0) | BIT(21), 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ else
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
meson_plane->enabled = false;
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 5c7e02c703bc..cfaf90501bb1 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -216,6 +216,29 @@
#define VIU_OSD2_FIFO_CTRL_STAT 0x1a4b
#define VIU_OSD2_TEST_RDDATA 0x1a4c
#define VIU_OSD2_PROT_CTRL 0x1a4e
+#define VIU_OSD2_MALI_UNPACK_CTRL 0x1abd
+#define VIU_OSD2_DIMM_CTRL 0x1acf
+
+#define VIU_OSD3_CTRL_STAT 0x3d80
+#define VIU_OSD3_CTRL_STAT2 0x3d81
+#define VIU_OSD3_COLOR_ADDR 0x3d82
+#define VIU_OSD3_COLOR 0x3d83
+#define VIU_OSD3_TCOLOR_AG0 0x3d84
+#define VIU_OSD3_TCOLOR_AG1 0x3d85
+#define VIU_OSD3_TCOLOR_AG2 0x3d86
+#define VIU_OSD3_TCOLOR_AG3 0x3d87
+#define VIU_OSD3_BLK0_CFG_W0 0x3d88
+#define VIU_OSD3_BLK0_CFG_W1 0x3d8c
+#define VIU_OSD3_BLK0_CFG_W2 0x3d90
+#define VIU_OSD3_BLK0_CFG_W3 0x3d94
+#define VIU_OSD3_BLK0_CFG_W4 0x3d98
+#define VIU_OSD3_BLK1_CFG_W4 0x3d99
+#define VIU_OSD3_BLK2_CFG_W4 0x3d9a
+#define VIU_OSD3_FIFO_CTRL_STAT 0x3d9c
+#define VIU_OSD3_TEST_RDDATA 0x3d9d
+#define VIU_OSD3_PROT_CTRL 0x3d9e
+#define VIU_OSD3_MALI_UNPACK_CTRL 0x3d9f
+#define VIU_OSD3_DIMM_CTRL 0x3da0
#define VD1_IF0_GEN_REG 0x1a50
#define VD1_IF0_CANVAS0 0x1a51
@@ -287,6 +310,27 @@
#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
#define VD1_IF0_GEN_REG3 0x1aa7
+
+#define VIU_OSD_BLENDO_H_START_END 0x1aa9
+#define VIU_OSD_BLENDO_V_START_END 0x1aaa
+#define VIU_OSD_BLEND_GEN_CTRL0 0x1aab
+#define VIU_OSD_BLEND_GEN_CTRL1 0x1aac
+#define VIU_OSD_BLEND_DUMMY_DATA 0x1aad
+#define VIU_OSD_BLEND_CURRENT_XY 0x1aae
+
+#define VIU_OSD2_MATRIX_CTRL 0x1ab0
+#define VIU_OSD2_MATRIX_COEF00_01 0x1ab1
+#define VIU_OSD2_MATRIX_COEF02_10 0x1ab2
+#define VIU_OSD2_MATRIX_COEF11_12 0x1ab3
+#define VIU_OSD2_MATRIX_COEF20_21 0x1ab4
+#define VIU_OSD2_MATRIX_COEF22 0x1ab5
+#define VIU_OSD2_MATRIX_OFFSET0_1 0x1ab6
+#define VIU_OSD2_MATRIX_OFFSET2 0x1ab7
+#define VIU_OSD2_MATRIX_PRE_OFFSET0_1 0x1ab8
+#define VIU_OSD2_MATRIX_PRE_OFFSET2 0x1ab9
+#define VIU_OSD2_MATRIX_PROBE_COLOR 0x1aba
+#define VIU_OSD2_MATRIX_HL_COLOR 0x1abb
+#define VIU_OSD2_MATRIX_PROBE_POS 0x1abc
#define VIU_OSD1_EOTF_CTL 0x1ad4
#define VIU_OSD1_EOTF_COEF00_01 0x1ad5
#define VIU_OSD1_EOTF_COEF02_10 0x1ad6
@@ -481,6 +525,82 @@
#define VPP_OSD_SCALE_COEF 0x1dcd
#define VPP_INT_LINE_NUM 0x1dce
+#define VPP_WRAP_OSD1_MATRIX_COEF00_01 0x3d60
+#define VPP_WRAP_OSD1_MATRIX_COEF02_10 0x3d61
+#define VPP_WRAP_OSD1_MATRIX_COEF11_12 0x3d62
+#define VPP_WRAP_OSD1_MATRIX_COEF20_21 0x3d63
+#define VPP_WRAP_OSD1_MATRIX_COEF22 0x3d64
+#define VPP_WRAP_OSD1_MATRIX_COEF13_14 0x3d65
+#define VPP_WRAP_OSD1_MATRIX_COEF23_24 0x3d66
+#define VPP_WRAP_OSD1_MATRIX_COEF15_25 0x3d67
+#define VPP_WRAP_OSD1_MATRIX_CLIP 0x3d68
+#define VPP_WRAP_OSD1_MATRIX_OFFSET0_1 0x3d69
+#define VPP_WRAP_OSD1_MATRIX_OFFSET2 0x3d6a
+#define VPP_WRAP_OSD1_MATRIX_PRE_OFFSET0_1 0x3d6b
+#define VPP_WRAP_OSD1_MATRIX_PRE_OFFSET2 0x3d6c
+#define VPP_WRAP_OSD1_MATRIX_EN_CTRL 0x3d6d
+
+#define VPP_WRAP_OSD2_MATRIX_COEF00_01 0x3d70
+#define VPP_WRAP_OSD2_MATRIX_COEF02_10 0x3d71
+#define VPP_WRAP_OSD2_MATRIX_COEF11_12 0x3d72
+#define VPP_WRAP_OSD2_MATRIX_COEF20_21 0x3d73
+#define VPP_WRAP_OSD2_MATRIX_COEF22 0x3d74
+#define VPP_WRAP_OSD2_MATRIX_COEF13_14 0x3d75
+#define VPP_WRAP_OSD2_MATRIX_COEF23_24 0x3d76
+#define VPP_WRAP_OSD2_MATRIX_COEF15_25 0x3d77
+#define VPP_WRAP_OSD2_MATRIX_CLIP 0x3d78
+#define VPP_WRAP_OSD2_MATRIX_OFFSET0_1 0x3d79
+#define VPP_WRAP_OSD2_MATRIX_OFFSET2 0x3d7a
+#define VPP_WRAP_OSD2_MATRIX_PRE_OFFSET0_1 0x3d7b
+#define VPP_WRAP_OSD2_MATRIX_PRE_OFFSET2 0x3d7c
+#define VPP_WRAP_OSD2_MATRIX_EN_CTRL 0x3d7d
+
+#define VPP_WRAP_OSD3_MATRIX_COEF00_01 0x3db0
+#define VPP_WRAP_OSD3_MATRIX_COEF02_10 0x3db1
+#define VPP_WRAP_OSD3_MATRIX_COEF11_12 0x3db2
+#define VPP_WRAP_OSD3_MATRIX_COEF20_21 0x3db3
+#define VPP_WRAP_OSD3_MATRIX_COEF22 0x3db4
+#define VPP_WRAP_OSD3_MATRIX_COEF13_14 0x3db5
+#define VPP_WRAP_OSD3_MATRIX_COEF23_24 0x3db6
+#define VPP_WRAP_OSD3_MATRIX_COEF15_25 0x3db7
+#define VPP_WRAP_OSD3_MATRIX_CLIP 0x3db8
+#define VPP_WRAP_OSD3_MATRIX_OFFSET0_1 0x3db9
+#define VPP_WRAP_OSD3_MATRIX_OFFSET2 0x3dba
+#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET0_1 0x3dbb
+#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
+#define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
+
+/* osd2 scaler */
+#define OSD2_VSC_PHASE_STEP 0x3d00
+#define OSD2_VSC_INI_PHASE 0x3d01
+#define OSD2_VSC_CTRL0 0x3d02
+#define OSD2_HSC_PHASE_STEP 0x3d03
+#define OSD2_HSC_INI_PHASE 0x3d04
+#define OSD2_HSC_CTRL0 0x3d05
+#define OSD2_HSC_INI_PAT_CTRL 0x3d06
+#define OSD2_SC_DUMMY_DATA 0x3d07
+#define OSD2_SC_CTRL0 0x3d08
+#define OSD2_SCI_WH_M1 0x3d09
+#define OSD2_SCO_H_START_END 0x3d0a
+#define OSD2_SCO_V_START_END 0x3d0b
+#define OSD2_SCALE_COEF_IDX 0x3d18
+#define OSD2_SCALE_COEF 0x3d19
+
+/* osd34 scaler */
+#define OSD34_SCALE_COEF_IDX 0x3d1e
+#define OSD34_SCALE_COEF 0x3d1f
+#define OSD34_VSC_PHASE_STEP 0x3d20
+#define OSD34_VSC_INI_PHASE 0x3d21
+#define OSD34_VSC_CTRL0 0x3d22
+#define OSD34_HSC_PHASE_STEP 0x3d23
+#define OSD34_HSC_INI_PHASE 0x3d24
+#define OSD34_HSC_CTRL0 0x3d25
+#define OSD34_HSC_INI_PAT_CTRL 0x3d26
+#define OSD34_SC_DUMMY_DATA 0x3d27
+#define OSD34_SC_CTRL0 0x3d28
+#define OSD34_SCI_WH_M1 0x3d29
+#define OSD34_SCO_H_START_END 0x3d2a
+#define OSD34_SCO_V_START_END 0x3d2b
/* viu2 */
#define VIU2_ADDR_START 0x1e00
#define VIU2_ADDR_END 0x1eff
@@ -1400,4 +1520,131 @@
#define OSDSR_YBIC_VCOEF0 0x3149
#define OSDSR_CBIC_VCOEF0 0x314a
+/* osd afbcd on gxtvbb */
+#define OSD1_AFBCD_ENABLE 0x31a0
+#define OSD1_AFBCD_MODE 0x31a1
+#define OSD1_AFBCD_SIZE_IN 0x31a2
+#define OSD1_AFBCD_HDR_PTR 0x31a3
+#define OSD1_AFBCD_FRAME_PTR 0x31a4
+#define OSD1_AFBCD_CHROMA_PTR 0x31a5
+#define OSD1_AFBCD_CONV_CTRL 0x31a6
+#define OSD1_AFBCD_STATUS 0x31a8
+#define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9
+#define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa
+#define VIU_MISC_CTRL1 0x1a07
+
+/* add for gxm and 962e dv core2 */
+#define DOLBY_CORE2A_SWAP_CTRL1 0x3434
+#define DOLBY_CORE2A_SWAP_CTRL2 0x3435
+
+/* osd afbc on g12a */
+#define VPU_MAFBC_BLOCK_ID 0x3a00
+#define VPU_MAFBC_IRQ_RAW_STATUS 0x3a01
+#define VPU_MAFBC_IRQ_CLEAR 0x3a02
+#define VPU_MAFBC_IRQ_MASK 0x3a03
+#define VPU_MAFBC_IRQ_STATUS 0x3a04
+#define VPU_MAFBC_COMMAND 0x3a05
+#define VPU_MAFBC_STATUS 0x3a06
+#define VPU_MAFBC_SURFACE_CFG 0x3a07
+
+/* osd afbc on g12a */
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11
+#define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12
+#define VPU_MAFBC_BUFFER_WIDTH_S0 0x3a13
+#define VPU_MAFBC_BUFFER_HEIGHT_S0 0x3a14
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S0 0x3a15
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S0 0x3a16
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S0 0x3a17
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S0 0x3a18
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S0 0x3a19
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0 0x3a1a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S0 0x3a1b
+#define VPU_MAFBC_PREFETCH_CFG_S0 0x3a1c
+
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S1 0x3a30
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S1 0x3a31
+#define VPU_MAFBC_FORMAT_SPECIFIER_S1 0x3a32
+#define VPU_MAFBC_BUFFER_WIDTH_S1 0x3a33
+#define VPU_MAFBC_BUFFER_HEIGHT_S1 0x3a34
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S1 0x3a35
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S1 0x3a36
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S1 0x3a37
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S1 0x3a38
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S1 0x3a39
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S1 0x3a3a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S1 0x3a3b
+#define VPU_MAFBC_PREFETCH_CFG_S1 0x3a3c
+
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S2 0x3a50
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S2 0x3a51
+#define VPU_MAFBC_FORMAT_SPECIFIER_S2 0x3a52
+#define VPU_MAFBC_BUFFER_WIDTH_S2 0x3a53
+#define VPU_MAFBC_BUFFER_HEIGHT_S2 0x3a54
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S2 0x3a55
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S2 0x3a56
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S2 0x3a57
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S2 0x3a58
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S2 0x3a59
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S2 0x3a5a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S2 0x3a5b
+#define VPU_MAFBC_PREFETCH_CFG_S2 0x3a5c
+
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S3 0x3a70
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S3 0x3a71
+#define VPU_MAFBC_FORMAT_SPECIFIER_S3 0x3a72
+#define VPU_MAFBC_BUFFER_WIDTH_S3 0x3a73
+#define VPU_MAFBC_BUFFER_HEIGHT_S3 0x3a74
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S3 0x3a75
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S3 0x3a76
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S3 0x3a77
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S3 0x3a78
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S3 0x3a79
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S3 0x3a7a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S3 0x3a7b
+#define VPU_MAFBC_PREFETCH_CFG_S3 0x3a7c
+
+#define DOLBY_PATH_CTRL 0x1a0c
+#define OSD_PATH_MISC_CTRL 0x1a0e
+#define MALI_AFBCD_TOP_CTRL 0x1a0f
+
+#define VIU_OSD_BLEND_CTRL 0x39b0
+#define VIU_OSD_BLEND_CTRL1 0x39c0
+#define VIU_OSD_BLEND_DIN0_SCOPE_H 0x39b1
+#define VIU_OSD_BLEND_DIN0_SCOPE_V 0x39b2
+#define VIU_OSD_BLEND_DIN1_SCOPE_H 0x39b3
+#define VIU_OSD_BLEND_DIN1_SCOPE_V 0x39b4
+#define VIU_OSD_BLEND_DIN2_SCOPE_H 0x39b5
+#define VIU_OSD_BLEND_DIN2_SCOPE_V 0x39b6
+#define VIU_OSD_BLEND_DIN3_SCOPE_H 0x39b7
+#define VIU_OSD_BLEND_DIN3_SCOPE_V 0x39b8
+#define VIU_OSD_BLEND_DUMMY_DATA0 0x39b9
+#define VIU_OSD_BLEND_DUMMY_ALPHA 0x39ba
+#define VIU_OSD_BLEND_BLEND0_SIZE 0x39bb
+#define VIU_OSD_BLEND_BLEND1_SIZE 0x39bc
+#define VIU_OSD_BLEND_RO_CURRENT_XY 0x39bf
+
+#define VPP_OUT_H_V_SIZE 0x1da5
+
+#define VPP_VD2_HDR_IN_SIZE 0x1df0
+#define VPP_OSD1_IN_SIZE 0x1df1
+#define VPP_GCLK_CTRL2 0x1df2
+#define VD2_PPS_DUMMY_DATA 0x1df4
+#define VPP_OSD1_BLD_H_SCOPE 0x1df5
+#define VPP_OSD1_BLD_V_SCOPE 0x1df6
+#define VPP_OSD2_BLD_H_SCOPE 0x1df7
+#define VPP_OSD2_BLD_V_SCOPE 0x1df8
+#define VPP_WRBAK_CTRL 0x1df9
+#define VPP_SLEEP_CTRL 0x1dfa
+#define VD1_BLEND_SRC_CTRL 0x1dfb
+#define VD2_BLEND_SRC_CTRL 0x1dfc
+#define OSD1_BLEND_SRC_CTRL 0x1dfd
+#define OSD2_BLEND_SRC_CTRL 0x1dfe
+
+#define VPP_POST_BLEND_BLEND_DUMMY_DATA 0x3968
+#define VPP_POST_BLEND_DUMMY_ALPHA 0x3969
+#define VPP_RDARB_MODE 0x3978
+#define VPP_RDARB_REQEN_SLV 0x3979
+#define VPU_RDARB_MODE_L2C1 0x279d
+
#endif /* __MESON_REGISTERS_H */
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f6ba35a405f8..b39034745444 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -113,9 +113,12 @@
#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
+#define HHI_HDMI_PLL_CNTL7 0x338 /* 0xce offset in data sheet */
#define HDMI_PLL_RESET BIT(28)
+#define HDMI_PLL_RESET_G12A BIT(29)
#define HDMI_PLL_LOCK BIT(31)
+#define HDMI_PLL_LOCK_G12A (3 << 30)
#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
@@ -257,6 +260,10 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4800023d);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ (val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
@@ -271,11 +278,26 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
HDMI_PLL_RESET, HDMI_PLL_RESET);
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
HDMI_PLL_RESET, 0);
- }
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
- (val & HDMI_PLL_LOCK), 10, 0);
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ (val & HDMI_PLL_LOCK), 10, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00010000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x6a28dc00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x56540000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x3a0504f7);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ ((val & HDMI_PLL_LOCK_G12A) == HDMI_PLL_LOCK_G12A),
+ 10, 0);
+ }
/* Disable VCLK2 */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0);
@@ -288,8 +310,13 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
VCLK2_DIV_MASK, (55 - 1));
/* select vid_pll for vclk2 */
- regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
- VCLK2_SEL_MASK, (4 << VCLK2_SEL_SHIFT));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SEL_MASK, (0 << VCLK2_SEL_SHIFT));
+ else
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SEL_MASK, (4 << VCLK2_SEL_SHIFT));
+
/* enable vclk2 gate */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, VCLK2_EN);
@@ -396,8 +423,8 @@ struct meson_vclk_params {
},
[MESON_VCLK_HDMI_297000] = {
.pixel_freq = 297000,
- .pll_base_freq = 2970000,
- .pll_od1 = 1,
+ .pll_base_freq = 5940000,
+ .pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
.vid_pll_div = VID_PLL_DIV_5,
@@ -476,32 +503,80 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x0b3a0400 | m);
+
+ /* Enable and reset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 0x3 << 28, 0x3 << 28);
+
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, frac);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
+
+ /* G12A HDMI PLL Needs specific parameters for 5.4GHz */
+ if (m >= 0xf7) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0xea68dc00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x55540000);
+ } else {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0a691c00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x33771290);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39270000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x50540000);
+ }
+
+ do {
+ /* Reset PLL */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET_G12A, HDMI_PLL_RESET_G12A);
+
+ /* UN-Reset PLL */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET_G12A, 0);
+
+ /* Poll for lock bits */
+ if (!regmap_read_poll_timeout(priv->hhi,
+ HHI_HDMI_PLL_CNTL, val,
+ ((val & HDMI_PLL_LOCK_G12A)
+ == HDMI_PLL_LOCK_G12A),
+ 10, 100))
+ break;
+ } while(1);
}
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 16, pll_od_to_reg(od1) << 16);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 21, pll_od_to_reg(od1) << 21);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 3 << 16, pll_od_to_reg(od1) << 16);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 22, pll_od_to_reg(od2) << 22);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 23, pll_od_to_reg(od2) << 23);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 3 << 18, pll_od_to_reg(od2) << 18);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 18, pll_od_to_reg(od3) << 18);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 19, pll_od_to_reg(od3) << 19);
-
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 3 << 20, pll_od_to_reg(od3) << 20);
}
#define XTAL_FREQ 24000
@@ -518,6 +593,7 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
#define HDMI_FRAC_MAX_GXBB 4096
#define HDMI_FRAC_MAX_GXL 1024
+#define HDMI_FRAC_MAX_G12A 131072
static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
unsigned int m,
@@ -534,6 +610,9 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
parent_freq *= 2;
}
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ frac_max = HDMI_FRAC_MAX_G12A;
+
/* We can have a perfect match !*/
if (pll_freq / m == parent_freq &&
pll_freq % m == 0)
@@ -559,7 +638,8 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
if (frac >= HDMI_FRAC_MAX_GXBB)
return false;
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
@@ -714,6 +794,23 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ switch (pll_base_freq) {
+ case 2970000:
+ m = 0x7b;
+ frac = vic_alternate_clock ? 0x140b4 : 0x18000;
+ break;
+ case 4320000:
+ m = vic_alternate_clock ? 0xb3 : 0xb4;
+ frac = vic_alternate_clock ? 0x1a3ee : 0;
+ break;
+ case 5940000:
+ m = 0xf7;
+ frac = vic_alternate_clock ? 0x8148 : 0x10000;
+ break;
+ }
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
}
/* Setup vid_pll divider */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 66d73a932d19..6faca7313339 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -73,7 +73,9 @@
/* HHI Registers */
#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
@@ -1675,8 +1677,13 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
void meson_venc_init(struct meson_drm *priv)
{
/* Disable CVBS VDAC */
- regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 8);
+ } else {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ }
/* Power Down Dacs */
writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index d622d817b6df..2c5341c881c4 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -37,7 +37,9 @@
/* HHI VDAC Registers */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
struct meson_venc_cvbs {
struct drm_encoder encoder;
@@ -166,8 +168,13 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
struct meson_drm *priv = meson_venc_cvbs->priv;
/* Disable CVBS VDAC */
- regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
+ } else {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ }
}
static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
@@ -179,13 +186,17 @@ static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
/* VDAC0 source is not from ATV */
writel_bits_relaxed(BIT(5), 0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
-
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
+ }
}
static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index e46e05f50bad..b59072342cae 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -25,7 +25,6 @@
#include "meson_viu.h"
#include "meson_vpp.h"
#include "meson_venc.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
/**
@@ -91,8 +90,36 @@ static int eotf_bypass_coeff[EOTF_COEFF_SIZE] = {
EOTF_COEFF_RIGHTSHIFT /* right shift */
};
-void meson_viu_set_osd_matrix(struct meson_drm *priv,
- enum viu_matrix_sel_e m_select,
+static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
+ int *m, bool csc_on)
+{
+ /* VPP WRAP OSD1 matrix */
+ writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET0_1));
+ writel(m[2] & 0xfff,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET2));
+ writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF00_01));
+ writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF02_10));
+ writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
+ writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
+ writel((m[11] & 0x1fff) << 16,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
+
+ writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET0_1));
+ writel(m[20] & 0xfff,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET2));
+
+ writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL));
+}
+
+static void meson_viu_set_osd_matrix(struct meson_drm *priv,
+ enum viu_matrix_sel_e m_select,
int *m, bool csc_on)
{
if (m_select == VIU_MATRIX_OSD) {
@@ -160,10 +187,10 @@ void meson_viu_set_osd_matrix(struct meson_drm *priv,
#define OSD_EOTF_LUT_SIZE 33
#define OSD_OETF_LUT_SIZE 41
-void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
- unsigned int *r_map, unsigned int *g_map,
- unsigned int *b_map,
- bool csc_on)
+static void
+meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
+ unsigned int *r_map, unsigned int *g_map,
+ unsigned int *b_map, bool csc_on)
{
unsigned int addr_port;
unsigned int data_port;
@@ -337,14 +364,24 @@ void meson_viu_init(struct meson_drm *priv)
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
meson_viu_load_matrix(priv);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
+ true);
/* Initialize OSD1 fifo control register */
reg = BIT(0) | /* Urgent DDR request priority */
- (4 << 5) | /* hold_fifo_lines */
- (3 << 10) | /* burst length 64 */
- (32 << 12) | /* fifo_depth_val: 32*8=256 */
- (2 << 22) | /* 4 words in 1 burst */
- (2 << 24);
+ (4 << 5); /* hold_fifo_lines */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ reg |= (1 << 10) | /* burst length 32 */
+ (32 << 12) | /* fifo_depth_val: 32*8=256 */
+ (2 << 22) | /* 4 words in 1 burst */
+ (2 << 24) |
+ (1 << 31);
+ else
+ reg |= (3 << 10) | /* burst length 64 */
+ (32 << 12) | /* fifo_depth_val: 32*8=256 */
+ (2 << 22) | /* 4 words in 1 burst */
+ (2 << 24);
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
@@ -370,6 +407,30 @@ void meson_viu_init(struct meson_drm *priv)
writel_relaxed(0x00FF00C0,
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ writel_relaxed(4 << 29 |
+ 1 << 27 |
+ 1 << 26 | /* blend_din0 input to blend0 */
+ 1 << 25 | /* blend1_dout to blend2 */
+ 1 << 24 | /* blend1_din3 input to blend1 */
+ 1 << 20 |
+ 0 << 16 |
+ 1,
+ priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+ writel_relaxed(3 << 8 |
+ 1 << 20,
+ priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
+ writel_relaxed(1 << 20,
+ priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
+ writel_relaxed(0,
+ priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_DATA0));
+ writel_relaxed(0,
+ priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_ALPHA));
+ writel_bits_relaxed(0x3 << 2, 0x3 << 2,
+ priv->io_base + _REG(DOLBY_PATH_CTRL));
+ }
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index f9efb431e953..8c52a3455ef4 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -112,32 +112,39 @@ void meson_vpp_init(struct meson_drm *priv)
writel_relaxed(0x20000, priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
- }
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
/* Initialize vpu fifo control registers */
- writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
- 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ writel_relaxed(0xfff << 20 | 0x1000,
+ priv->io_base + _REG(VPP_OFIFO_SIZE));
+ else
+ writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
+ 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
writel_relaxed(0x08080808, priv->io_base + _REG(VPP_HOLD_LINES));
- /* Turn off preblend */
- writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
- priv->io_base + _REG(VPP_MISC));
-
- /* Turn off POSTBLEND */
- writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
- priv->io_base + _REG(VPP_MISC));
-
- /* Force all planes off */
- writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
- VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
- VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
- priv->io_base + _REG(VPP_MISC));
-
- /* Setup default VD settings */
- writel_relaxed(4096,
- priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
- writel_relaxed(4096,
- priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+ if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ /* Turn off preblend */
+ writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Turn off POSTBLEND */
+ writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Force all planes off */
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
+ VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
+ VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Setup default VD settings */
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+ }
/* Disable Scalers */
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 0aaedc554879..8c31e4422cae 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -113,7 +113,7 @@ struct mga_framebuffer {
};
struct mga_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct mga_framebuffer mfb;
void *sysram;
int size;
@@ -269,7 +269,6 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 6893934b26c0..5b7e64cac004 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -195,8 +195,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
goto err_alloc_fbi;
}
- info->par = mfbdev;
-
ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
if (ret)
goto err_alloc_fbi;
@@ -209,17 +207,13 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
/* setup helper */
mfbdev->helper.fb = fb;
- strcpy(info->fix.id, "mgadrmfb");
-
info->fbops = &mgag200fb_ops;
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = mdev->mc.vram_size;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
- sizes->fb_height);
+ drm_fb_helper_fill_info(info, &mfbdev->helper, sizes);
info->screen_base = sysram;
info->screen_size = size;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d96a9b32455e..bd42365a8aa8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -178,7 +178,6 @@ int mgag200_mm_init(struct mga_device *mdev)
ret = ttm_bo_device_init(&mdev->ttm.bdev,
&mgag200_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -345,13 +344,8 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct mga_device *mdev;
+ struct drm_file *file_priv = filp->private_data;
+ struct mga_device *mdev = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- mdev = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 78c9e5a5e793..9f2029eca39f 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -21,6 +21,11 @@ config DRM_MSM
help
DRM/KMS driver for MSM/snapdragon.
+config DRM_MSM_GPU_STATE
+ bool
+ depends on DRM_MSM && (DEBUG_FS || DEV_COREDUMP)
+ default y
+
config DRM_MSM_REGISTER_LOGGING
bool "MSM DRM register logging"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 56a70c74af4e..7a05cbf2f820 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/gpu/drm/msm
-ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
-ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
+ccflags-y := -I $(srctree)/$(src)
+ccflags-y += -I $(srctree)/$(src)/disp/dpu1
+ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
msm-y := \
adreno/adreno_device.o \
@@ -15,7 +15,6 @@ msm-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
- adreno/a6xx_gpu_state.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -96,6 +95,8 @@ msm-y := \
msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
+
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index d5f5e56422f5..e5fcefa49f19 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,9 +15,6 @@
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
-#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
#include <linux/slab.h>
@@ -30,94 +27,6 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
-{
- struct device *dev = &gpu->pdev->dev;
- const struct firmware *fw;
- struct device_node *np;
- struct resource r;
- phys_addr_t mem_phys;
- ssize_t mem_size;
- void *mem_region = NULL;
- int ret;
-
- if (!IS_ENABLED(CONFIG_ARCH_QCOM))
- return -EINVAL;
-
- np = of_get_child_by_name(dev->of_node, "zap-shader");
- if (!np)
- return -ENODEV;
-
- np = of_parse_phandle(np, "memory-region", 0);
- if (!np)
- return -EINVAL;
-
- ret = of_address_to_resource(np, 0, &r);
- if (ret)
- return ret;
-
- mem_phys = r.start;
- mem_size = resource_size(&r);
-
- /* Request the MDT file for the firmware */
- fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
- if (IS_ERR(fw)) {
- DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
- return PTR_ERR(fw);
- }
-
- /* Figure out how much memory we need */
- mem_size = qcom_mdt_get_size(fw);
- if (mem_size < 0) {
- ret = mem_size;
- goto out;
- }
-
- /* Allocate memory for the firmware image */
- mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
- if (!mem_region) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Load the rest of the MDT
- *
- * Note that we could be dealing with two different paths, since
- * with upstream linux-firmware it would be in a qcom/ subdir..
- * adreno_request_fw() handles this, but qcom_mdt_load() does
- * not. But since we've already gotten thru adreno_request_fw()
- * we know which of the two cases it is:
- */
- if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
- ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size, NULL);
- } else {
- char *newname;
-
- newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
-
- ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size, NULL);
- kfree(newname);
- }
- if (ret)
- goto out;
-
- /* Send the image to the secure world */
- ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID);
- if (ret)
- DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
-
-out:
- if (mem_region)
- memunmap(mem_region);
-
- release_firmware(fw);
-
- return ret;
-}
-
static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -563,8 +472,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct platform_device *pdev = gpu->pdev;
int ret;
/*
@@ -574,23 +481,9 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
if (loaded)
return a5xx_zap_shader_resume(gpu);
- /* We need SCM to be able to load the firmware */
- if (!qcom_scm_is_available()) {
- DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
- return -EPROBE_DEFER;
- }
-
- /* Each GPU has a target specific zap shader firmware name to use */
- if (!adreno_gpu->info->zapfw) {
- DRM_DEV_ERROR(&pdev->dev,
- "Zap shader firmware file not specified for this target\n");
- return -ENODEV;
- }
-
- ret = zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw);
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
loaded = !ret;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index d1662a75c7ec..38e2cfa9cec7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -3,12 +3,31 @@
#include <linux/clk.h>
#include <linux/interconnect.h>
+#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* FIXME: add a banner here */
+ gmu->hung = true;
+
+ /* Turn off the hangcheck timer while we are resetting */
+ del_timer(&gpu->hangcheck_timer);
+
+ /* Queue the GPU handler because we need to treat this as a recovery */
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
static irqreturn_t a6xx_gmu_irq(int irq, void *data)
{
struct a6xx_gmu *gmu = data;
@@ -20,8 +39,7 @@ static irqreturn_t a6xx_gmu_irq(int irq, void *data)
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
- /* Temporary until we can recover safely */
- BUG();
+ a6xx_gmu_fault(gmu);
}
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
@@ -45,8 +63,7 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
- /* Temporary until we can recover safely */
- BUG();
+ a6xx_gmu_fault(gmu);
}
return IRQ_HANDLED;
@@ -165,10 +182,8 @@ static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
}
/* Wait for the GMU to get to its most idle state */
-int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
-
return spin_until(a6xx_gmu_check_idle_level(gmu));
}
@@ -567,7 +582,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
if (!rpmh_init) {
a6xx_gmu_rpmh_init(gmu);
rpmh_init = true;
- } else if (state != GMU_RESET) {
+ } else {
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
@@ -633,20 +648,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
-static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
-{
- gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
- gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
-
- gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
- ~A6XX_GMU_IRQ_MASK);
- gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
- ~A6XX_HFI_IRQ_MASK);
-
- enable_irq(gmu->gmu_irq);
- enable_irq(gmu->hfi_irq);
-}
-
static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
{
disable_irq(gmu->gmu_irq);
@@ -656,21 +657,10 @@ static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
}
-int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
+static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- int ret;
u32 val;
- /* Flush all the queues */
- a6xx_hfi_stop(gmu);
-
- /* Stop the interrupts */
- a6xx_gmu_irq_disable(gmu);
-
- /* Force off SPTP in case the GMU is managing it */
- a6xx_sptprac_disable(gmu);
-
/* Make sure there are no outstanding RPMh votes */
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
(val & 1), 100, 10000);
@@ -680,37 +670,22 @@ int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
(val & 1), 100, 10000);
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
(val & 1), 100, 1000);
+}
- /* Force off the GX GSDC */
- regulator_force_disable(gmu->gx);
-
- /* Disable the resources */
- clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
- pm_runtime_put_sync(gmu->dev);
-
- /* Re-enable the resources */
- pm_runtime_get_sync(gmu->dev);
-
- /* Use a known rate to bring up the GMU */
- clk_set_rate(gmu->core_clk, 200000000);
- ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
- if (ret)
- goto out;
-
- a6xx_gmu_irq_enable(gmu);
-
- ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
- if (!ret)
- ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
+/* Force the GMU off in case it isn't responsive */
+static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
+{
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
- /* Set the GPU back to the highest power frequency */
- __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
-out:
- if (ret)
- a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
- return ret;
+ /* Make sure there are no outstanding RPMh votes */
+ a6xx_gmu_rpmh_off(gmu);
}
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
@@ -723,19 +698,26 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
return 0;
+ gmu->hung = false;
+
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
- if (ret)
- goto out;
+ if (ret) {
+ pm_runtime_put(gmu->dev);
+ return ret;
+ }
/* Set the bus quota to a reasonable value for boot */
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
- a6xx_gmu_irq_enable(gmu);
+ /* Enable the GMU interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
+ enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
@@ -746,14 +728,35 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
goto out;
ret = a6xx_hfi_start(gmu, status);
+ if (ret)
+ goto out;
+
+ /*
+ * Turn on the GMU firmware fault interrupt after we know the boot
+ * sequence is successful
+ */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
+ enable_irq(gmu->hfi_irq);
/* Set the GPU to the highest power frequency */
__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ /*
+ * "enable" the GX power domain which won't actually do anything but it
+ * will make sure that the refcounting is correct in case we need to
+ * bring down the GX after a GMU failure
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_get(gmu->gxpd);
+
out:
- /* Make sure to turn off the boot OOB request on error */
- if (ret)
- a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ /* On failure, shut down the GMU to leave it in a good state */
+ if (ret) {
+ disable_irq(gmu->gmu_irq);
+ a6xx_rpmh_stop(gmu);
+ pm_runtime_put(gmu->dev);
+ }
return ret;
}
@@ -773,11 +776,12 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
-int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+/* Gracefully try to shut down the GMU and by extension the GPU */
+static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 val;
/*
@@ -787,10 +791,19 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
if (val != 0xf) {
- int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
+ int ret = a6xx_gmu_wait_for_idle(gmu);
- /* Temporary until we can recover safely */
- BUG_ON(ret);
+ /* If the GMU isn't responding assume it is hung */
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
+
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
+ == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
/* tell the GMU we want to slumber */
a6xx_gmu_notify_slumber(gmu);
@@ -822,10 +835,37 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+}
+
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct msm_gpu *gpu = &a6xx_gpu->base.base;
+
+ if (!pm_runtime_active(gmu->dev))
+ return 0;
+
+ /*
+ * Force the GMU off if we detected a hang, otherwise try to shut it
+ * down gracefully
+ */
+ if (gmu->hung)
+ a6xx_gmu_force_off(gmu);
+ else
+ a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */
icc_set_bw(gpu->icc_path, 0, 0);
+ /*
+ * Make sure the GX domain is off before turning off the GMU (CX)
+ * domain. Usually the GMU does this but only if the shutdown sequence
+ * was successful
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_put_sync(gmu->gxpd);
+
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
pm_runtime_put_sync(gmu->dev);
@@ -948,25 +988,20 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
}
/* Return the 'arc-level' for the given frequency */
-static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
+static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
+ unsigned long freq)
{
struct dev_pm_opp *opp;
- struct device_node *np;
- u32 val = 0;
+ unsigned int val;
if (!freq)
return 0;
- opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
if (IS_ERR(opp))
return 0;
- np = dev_pm_opp_get_of_node(opp);
-
- if (np) {
- of_property_read_u32(np, "opp-level", &val);
- of_node_put(np);
- }
+ val = dev_pm_opp_get_level(opp);
dev_pm_opp_put(opp);
@@ -1002,7 +1037,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
/* Construct a vote for each frequency */
for (i = 0; i < freqs_count; i++) {
u8 pindex = 0, sindex = 0;
- u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+ unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
/* Get the primary index that matches the arc level */
for (j = 0; j < pri_count; j++) {
@@ -1195,9 +1230,15 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
if (IS_ERR_OR_NULL(gmu->mmio))
return;
- pm_runtime_disable(gmu->dev);
a6xx_gmu_stop(a6xx_gpu);
+ pm_runtime_disable(gmu->dev);
+
+ if (!IS_ERR_OR_NULL(gmu->gxpd)) {
+ pm_runtime_disable(gmu->gxpd);
+ dev_pm_domain_detach(gmu->gxpd, false);
+ }
+
a6xx_gmu_irq_disable(gmu);
a6xx_gmu_memory_free(gmu, gmu->hfi);
@@ -1223,7 +1264,6 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
pm_runtime_enable(gmu->dev);
- gmu->gx = devm_regulator_get(gmu->dev, "vdd");
/* Get the list of clocks */
ret = a6xx_gmu_clocks_probe(gmu);
@@ -1257,6 +1297,12 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
goto err;
+ /*
+ * Get a link to the GX power domain to reset the GPU in case of GMU
+ * crash
+ */
+ gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
+
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index c721d9165d8e..bedd8e6a63aa 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -27,9 +27,6 @@ struct a6xx_gmu_bo {
/* the GMU is coming up for the first time or back from a power collapse */
#define GMU_COLD_BOOT 1
-/* The GMU is being soft reset after a fault */
-#define GMU_RESET 2
-
/*
* These define the level of control that the GMU has - the higher the number
* the more things that the GMU hardware controls on its own.
@@ -52,11 +49,11 @@ struct a6xx_gmu {
int hfi_irq;
int gmu_irq;
- struct regulator *gx;
-
struct iommu_domain *domain;
u64 uncached_iova_base;
+ struct device *gxpd;
+
int idle_level;
struct a6xx_gmu_bo *hfi;
@@ -78,7 +75,7 @@ struct a6xx_gmu {
struct a6xx_hfi_queue queues[2];
- struct tasklet_struct hfi_tasklet;
+ bool hung;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index fefe773c989e..e74dce474250 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,8 @@
#include <linux/devfreq.h>
+#define GPU_PAS_ID 13
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -343,6 +345,20 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
return 0;
}
+static int a6xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ static bool loaded;
+ int ret;
+
+ if (loaded)
+ return 0;
+
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
+
+ loaded = !ret;
+ return ret;
+}
+
#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
@@ -491,7 +507,28 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret)
goto out;
- gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ /*
+ * Try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a6xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a6xx_flush(gpu, gpu->rb[0]);
+ if (!a6xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else {
+ /* Print a warning so if we die, we know why */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ ret = 0;
+ }
out:
/*
@@ -678,13 +715,15 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
- ret = a6xx_gmu_resume(a6xx_gpu);
-
gpu->needs_hw_init = true;
+ ret = a6xx_gmu_resume(a6xx_gpu);
+ if (ret)
+ return ret;
+
msm_gpu_resume_devfreq(gpu);
- return ret;
+ return 0;
}
static int a6xx_pm_suspend(struct msm_gpu *gpu)
@@ -694,18 +733,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
- /*
- * Make sure the GMU is idle before continuing (because some transitions
- * may use VBIF
- */
- a6xx_gmu_wait_for_idle(a6xx_gpu);
-
- /* Clear the VBIF pipe before shutting down */
- /* FIXME: This accesses the GPU - do we need to make sure it is on? */
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
-
return a6xx_gmu_stop(a6xx_gpu);
}
@@ -781,14 +808,16 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = a6xx_active_ring,
.irq = a6xx_irq,
.destroy = a6xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
.show = a6xx_show,
#endif
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gmu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
+#endif
},
.get_timestamp = a6xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 528a4cfe07cd..b46279eb18c5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -46,9 +46,8 @@ struct a6xx_gpu {
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
-int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu);
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
-int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu);
bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 714ed6505e47..b907245d3d96 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -155,6 +155,7 @@ static const struct adreno_info gpulist[] = {
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
+ .zapfw = "a630_zap.mdt",
},
};
@@ -229,6 +230,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
+ pm_runtime_put_sync(&pdev->dev);
DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
return NULL;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 27898475cdf4..6f7f4114afcf 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -19,13 +19,148 @@
#include <linux/ascii85.h>
#include <linux/interconnect.h>
+#include <linux/qcom_scm.h>
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
+static bool zap_available = true;
+
+static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ u32 pasid)
+{
+ struct device *dev = &gpu->pdev->dev;
+ const struct firmware *fw;
+ struct device_node *np, *mem_np;
+ struct resource r;
+ phys_addr_t mem_phys;
+ ssize_t mem_size;
+ void *mem_region = NULL;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np) {
+ zap_available = false;
+ return -ENODEV;
+ }
+
+ mem_np = of_parse_phandle(np, "memory-region", 0);
+ of_node_put(np);
+ if (!mem_np) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(mem_np, 0, &r);
+ of_node_put(mem_np);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+ mem_size = resource_size(&r);
+
+ /* Request the MDT file for the firmware */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ if (IS_ERR(fw)) {
+ DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
+ return PTR_ERR(fw);
+ }
+
+ /* Figure out how much memory we need */
+ mem_size = qcom_mdt_get_size(fw);
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto out;
+ }
+
+ /* Allocate memory for the firmware image */
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
+ if (!mem_region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Load the rest of the MDT
+ *
+ * Note that we could be dealing with two different paths, since
+ * with upstream linux-firmware it would be in a qcom/ subdir..
+ * adreno_request_fw() handles this, but qcom_mdt_load() does
+ * not. But since we've already gotten through adreno_request_fw()
+ * we know which of the two cases it is:
+ */
+ if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+ ret = qcom_mdt_load(dev, fw, fwname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ } else {
+ char *newname;
+
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+
+ ret = qcom_mdt_load(dev, fw, newname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
+ }
+ if (ret)
+ goto out;
+
+ /* Send the image to the secure world */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+
+ /*
+ * If the scm call returns -EOPNOTSUPP we assume that this target
+ * doesn't need/support the zap shader so quietly fail
+ */
+ if (ret == -EOPNOTSUPP)
+ zap_available = false;
+ else if (ret)
+ DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
+
+out:
+ if (mem_region)
+ memunmap(mem_region);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct platform_device *pdev = gpu->pdev;
+
+ /* Short cut if we determine the zap shader isn't available/needed */
+ if (!zap_available)
+ return -ENODEV;
+
+ /* We need SCM to be able to load the firmware */
+ if (!qcom_scm_is_available()) {
+ DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* Each GPU has a target specific zap shader firmware name to use */
+ if (!adreno_gpu->info->zapfw) {
+ zap_available = false;
+ DRM_DEV_ERROR(&pdev->dev,
+ "Zap shader firmware file not specified for this target\n");
+ return -ENODEV;
+ }
+
+ return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -63,6 +198,12 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_NR_RINGS:
*value = gpu->nr_rings;
return 0;
+ case MSM_PARAM_PP_PGTABLE:
+ *value = 0;
+ return 0;
+ case MSM_PARAM_FAULTS:
+ *value = gpu->global_faults;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 5db459bc28a7..0925606ec9b5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -252,6 +252,12 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state);
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
+/*
+ * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
+ * out of secure mode
+ */
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
+
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index b776fca571f3..dfdfa766da8f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -46,6 +46,9 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
+/* timeout in ms waiting for frame done */
+#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
+
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -425,65 +428,6 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc,
trace_dpu_crtc_complete_commit(DRMID(crtc));
}
-static void _dpu_crtc_setup_mixer_for_encoder(
- struct drm_crtc *crtc,
- struct drm_encoder *enc)
-{
- struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
- struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
- struct dpu_rm *rm = &dpu_kms->rm;
- struct dpu_crtc_mixer *mixer;
- struct dpu_hw_ctl *last_valid_ctl = NULL;
- int i;
- struct dpu_rm_hw_iter lm_iter, ctl_iter;
-
- dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
- dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
-
- /* Set up all the mixers and ctls reserved by this encoder */
- for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
- mixer = &cstate->mixers[i];
-
- if (!dpu_rm_get_hw(rm, &lm_iter))
- break;
- mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
-
- /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
- if (!dpu_rm_get_hw(rm, &ctl_iter)) {
- DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
- mixer->hw_lm->idx - LM_0);
- mixer->lm_ctl = last_valid_ctl;
- } else {
- mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
- last_valid_ctl = mixer->lm_ctl;
- }
-
- /* Shouldn't happen, mixers are always >= ctls */
- if (!mixer->lm_ctl) {
- DPU_ERROR("no valid ctls found for lm %d\n",
- mixer->hw_lm->idx - LM_0);
- return;
- }
-
- cstate->num_mixers++;
- DPU_DEBUG("setup mixer %d: lm %d\n",
- i, mixer->hw_lm->idx - LM_0);
- DPU_DEBUG("setup mixer %d: ctl %d\n",
- i, mixer->lm_ctl->idx - CTL_0);
- }
-}
-
-static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
-{
- struct drm_encoder *enc;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- /* Check for mixers on all encoders attached to this crtc */
- drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
- _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
-}
-
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -533,10 +477,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
dev = crtc->dev;
smmu_state = &dpu_crtc->smmu_state;
- if (!cstate->num_mixers) {
- _dpu_crtc_setup_mixers(crtc);
- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
- }
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
if (dpu_crtc->event) {
WARN_ON(dpu_crtc->event);
@@ -683,7 +624,7 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("frame done completion wait");
ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
- msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+ msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
if (!ret) {
DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
rc = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 5aa3307f3f0c..82bf16d61a45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -69,6 +69,9 @@
#define MAX_VDISPLAY_SPLIT 1080
+/* timeout in frames waiting for frame done */
+#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
+
/**
* enum dpu_enc_rc_events - events for resource control state machine
* @DPU_ENC_RC_EVENT_KICKOFF:
@@ -158,7 +161,7 @@ enum dpu_enc_rc_states {
* Bit0 = phys_encs[0] etc.
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
- * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timeout_ms: frame done timeout in ms
* @frame_done_timer: watchdog timer for frame done event
* @vsync_event_timer: vsync timer
* @disp_info: local copy of msm_display_info struct
@@ -196,7 +199,7 @@ struct dpu_encoder_virt {
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
- atomic_t frame_done_timeout;
+ atomic_t frame_done_timeout_ms;
struct timer_list frame_done_timer;
struct timer_list vsync_event_timer;
@@ -520,8 +523,8 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
list_for_each_entry(cur_mode, &connector->modes, head) {
if (cur_mode->vdisplay == adj_mode->vdisplay &&
- cur_mode->hdisplay == adj_mode->hdisplay &&
- cur_mode->vrefresh == adj_mode->vrefresh) {
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
adj_mode->private = cur_mode->private;
adj_mode->private_flags |= cur_mode->private_flags;
}
@@ -959,10 +962,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_kms *dpu_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
- struct dpu_rm_hw_iter pp_iter, ctl_iter;
+ struct drm_crtc *drm_crtc;
+ struct dpu_crtc_state *cstate;
+ struct dpu_rm_hw_iter hw_iter;
struct msm_display_topology topology;
struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
- int i = 0, ret;
+ struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
+ int num_lm = 0, num_ctl = 0;
+ int i, j, ret;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
@@ -990,10 +997,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
return;
}
+ drm_for_each_crtc(drm_crtc, drm_enc->dev)
+ if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
+ break;
+
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
topology, false);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
@@ -1001,21 +1012,41 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
return;
}
- dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
+ }
+
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
+ num_ctl++;
}
- dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter))
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
- hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
+ hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
+ num_lm++;
}
+ cstate = to_dpu_crtc_state(drm_crtc->state);
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = hw_lm[i];
+ cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+ }
+
+ cstate->num_mixers = num_lm;
+
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -1023,18 +1054,38 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
"at idx: %d\n", i);
- return;
+ goto error;
}
if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
"at idx: %d\n", i);
- return;
+ goto error;
}
phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = hw_ctl[i];
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
+ DPU_HW_BLK_INTF);
+ for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ struct dpu_hw_intf *hw_intf;
+
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+
+ hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ if (hw_intf->idx == phys->intf_idx)
+ phys->hw_intf = hw_intf;
+ }
+
+ if (!phys->hw_intf) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no intf block assigned at idx: %d\n",
+ i);
+ goto error;
+ }
+
phys->connector = conn->state->connector;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
@@ -1042,6 +1093,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
}
dpu_enc->mode_set_complete = true;
+
+error:
+ dpu_rm_release(&dpu_kms->rm, drm_enc);
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1182,7 +1236,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
}
/* after phys waits for frame-done, should be no more frames pending */
- if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
del_timer_sync(&dpu_enc->frame_done_timer);
}
@@ -1339,7 +1393,7 @@ static void dpu_encoder_frame_done_callback(
}
if (!dpu_enc->frame_busy_mask[0]) {
- atomic_set(&dpu_enc->frame_done_timeout, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
del_timer(&dpu_enc->frame_done_timer);
dpu_encoder_resource_control(drm_enc,
@@ -1547,8 +1601,14 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
if (!ctl)
continue;
- if (phys->split_role != ENC_ROLE_SLAVE)
+ /*
+ * This is cleared in frame_done worker, which isn't invoked
+ * for async commits. So don't set this for async, since it'll
+ * roll over to the next commit.
+ */
+ if (!async && phys->split_role != ENC_ROLE_SLAVE)
set_bit(i, dpu_enc->frame_busy_mask);
+
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
@@ -1800,11 +1860,20 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
trace_dpu_enc_kickoff(DRMID(drm_enc));
- atomic_set(&dpu_enc->frame_done_timeout,
- DPU_FRAME_DONE_TIMEOUT * 1000 /
- drm_enc->crtc->state->adjusted_mode.vrefresh);
- mod_timer(&dpu_enc->frame_done_timer, jiffies +
- ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+ /*
+ * Asynchronous frames don't handle FRAME_DONE events. As such, they
+ * shouldn't enable the frame_done watchdog since it will always time
+ * out.
+ */
+ if (!async) {
+ unsigned long timeout_ms;
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
+ }
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc, async);
@@ -2124,7 +2193,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
return;
- } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
return;
}
@@ -2170,7 +2239,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
spin_lock_init(&dpu_enc->enc_spinlock);
- atomic_set(&dpu_enc->frame_done_timeout, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index db94f3d3bea3..97fb868a4ef6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -200,6 +200,7 @@ struct dpu_encoder_irq {
* @hw_mdptop: Hardware interface to the top registers
* @hw_ctl: Hardware interface to the ctl registers
* @hw_pp: Hardware interface to the ping pong registers
+ * @hw_intf: Hardware interface to the intf registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @enabled: Whether the encoder has enabled and running a mode
@@ -228,6 +229,7 @@ struct dpu_encoder_phys {
struct dpu_hw_mdp *hw_mdptop;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_intf *hw_intf;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
@@ -251,19 +253,6 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
}
/**
- * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
- * mode specific operations
- * @base: Baseclass physical encoder structure
- * @hw_intf: Hardware interface to the intf registers
- * @timing_params: Current timing parameter
- */
-struct dpu_encoder_phys_vid {
- struct dpu_encoder_phys base;
- struct dpu_hw_intf *hw_intf;
- struct intf_timing_params timing_params;
-};
-
-/**
* struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
* mode specific operations
* @base: Baseclass physical encoder structure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index a399e1edd313..973737fb5c9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -404,7 +404,8 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
return;
}
- tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+ tc_cfg.vsync_count = vsync_hz /
+ (mode->vtotal * drm_mode_vrefresh(mode));
/* enable external TE after kickoff to avoid premature autorefresh */
tc_cfg.hw_vsync_mode = 0;
@@ -424,7 +425,7 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
- mode->vtotal, mode->vrefresh);
+ mode->vtotal, drm_mode_vrefresh(mode));
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 3c4eb470a82c..1b7a335a6140 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -18,14 +18,14 @@
#include "dpu_trace.h"
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
- (e) && (e)->base.parent ? \
- (e)->base.parent->base.id : -1, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
- (e) && (e)->base.parent ? \
- (e)->base.parent->base.id : -1, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
@@ -44,7 +44,7 @@ static bool dpu_encoder_phys_vid_is_master(
}
static void drm_mode_to_intf_timing_params(
- const struct dpu_encoder_phys_vid *vid_enc,
+ const struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct intf_timing_params *timing)
{
@@ -92,7 +92,7 @@ static void drm_mode_to_intf_timing_params(
timing->hsync_skew = mode->hskew;
/* DSI controller cannot handle active-low sync signals. */
- if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ if (phys_enc->hw_intf->cap->type == INTF_DSI) {
timing->hsync_polarity = 0;
timing->vsync_polarity = 0;
}
@@ -143,11 +143,11 @@ static u32 get_vertical_total(const struct intf_timing_params *timing)
* lines based on the chip worst case latencies.
*/
static u32 programmable_fetch_get_num_lines(
- struct dpu_encoder_phys_vid *vid_enc,
+ struct dpu_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
u32 worst_case_needed_lines =
- vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
u32 start_of_frame_lines =
timing->v_back_porch + timing->vsync_pulse_width;
u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
@@ -155,26 +155,26 @@ static u32 programmable_fetch_get_num_lines(
/* Fetch must be outside active lines, otherwise undefined. */
if (start_of_frame_lines >= worst_case_needed_lines) {
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"prog fetch is not needed, large vbp+vsw\n");
actual_vfp_lines = 0;
} else if (timing->v_front_porch < needed_vfp_lines) {
/* Warn fetch needed, but not enough porch in panel config */
pr_warn_once
("low vbp+vfp may lead to perf issues in some cases\n");
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"less vfp than fetch req, using entire vfp\n");
actual_vfp_lines = timing->v_front_porch;
} else {
- DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n");
actual_vfp_lines = needed_vfp_lines;
}
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
timing->v_front_porch, timing->v_back_porch,
timing->vsync_pulse_width);
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
@@ -194,8 +194,6 @@ static u32 programmable_fetch_get_num_lines(
static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
- struct dpu_encoder_phys_vid *vid_enc =
- to_dpu_encoder_phys_vid(phys_enc);
struct intf_prog_fetch f = { 0 };
u32 vfp_fetch_lines = 0;
u32 horiz_total = 0;
@@ -203,10 +201,10 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
u32 vfp_fetch_start_vsync_counter = 0;
unsigned long lock_flags;
- if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
return;
- vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
if (vfp_fetch_lines) {
vert_total = get_vertical_total(timing);
horiz_total = get_horizontal_total(timing);
@@ -216,12 +214,12 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
f.fetch_start = vfp_fetch_start_vsync_counter;
}
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
vfp_fetch_lines, vfp_fetch_start_vsync_counter);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
}
@@ -231,7 +229,7 @@ static bool dpu_encoder_phys_vid_mode_fixup(
struct drm_display_mode *adj_mode)
{
if (phys_enc)
- DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
/*
* Modifying mode has consequences when the mode comes back to us
@@ -242,7 +240,6 @@ static bool dpu_encoder_phys_vid_mode_fixup(
static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
struct drm_display_mode mode;
struct intf_timing_params timing_params = { 0 };
const struct dpu_format *fmt = NULL;
@@ -256,13 +253,12 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
}
mode = phys_enc->cached_mode;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ if (!phys_enc->hw_intf->ops.setup_timing_gen) {
DPU_ERROR("timing engine setup is not supported\n");
return;
}
- DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
drm_mode_debug_printmodeline(&mode);
if (phys_enc->split_role != ENC_ROLE_SOLO) {
@@ -271,32 +267,30 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"split_role %d, halve horizontal %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
mode.hsync_start, mode.hsync_end);
}
- drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+ drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
fmt = dpu_get_dpu_format(fmt_fourcc);
- DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+ DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
- intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
-
- vid_enc->timing_params = timing_params;
}
static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
@@ -353,22 +347,10 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
phys_enc);
}
-static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_crtc_state *dpu_cstate;
-
- if (!phys_enc)
- return false;
-
- dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
-
- return dpu_cstate->num_ctls > 1;
-}
-
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+ return phys_enc->split_role != ENC_ROLE_SOLO;
}
static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
@@ -396,19 +378,15 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc || !phys_enc->dpu_kms) {
DPU_ERROR("invalid encoder/kms\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
- DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ DPU_DEBUG_VIDENC(phys_enc, "caching mode:\n");
}
_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
@@ -419,7 +397,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
bool enable)
{
int ret = 0;
- struct dpu_encoder_phys_vid *vid_enc;
int refcount;
if (!phys_enc) {
@@ -428,7 +405,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
}
refcount = atomic_read(&phys_enc->vblank_refcount);
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_vid_is_master(phys_enc))
@@ -453,7 +429,7 @@ end:
if (ret) {
DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0, ret, enable,
+ phys_enc->hw_intf->idx - INTF_0, ret, enable,
refcount);
}
return ret;
@@ -461,43 +437,17 @@ end:
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
- struct msm_drm_private *priv;
- struct dpu_encoder_phys_vid *vid_enc;
- struct dpu_rm_hw_iter iter;
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
- !phys_enc->parent->dev->dev_private) {
- DPU_ERROR("invalid encoder/device\n");
- return;
- }
- priv = phys_enc->parent->dev->dev_private;
-
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_INTF);
- while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &iter)) {
- struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
-
- if (hw_intf->idx == phys_enc->intf_idx) {
- vid_enc->hw_intf = hw_intf;
- break;
- }
- }
-
- if (!vid_enc->hw_intf) {
- DPU_ERROR("hw_intf not assigned\n");
- return;
- }
-
- DPU_DEBUG_VIDENC(vid_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
- if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
- dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
@@ -510,12 +460,13 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
!dpu_encoder_phys_vid_is_master(phys_enc))
goto skip_flush;
- ctl->ops.get_bitmask_intf(ctl, &flush_mask, vid_enc->hw_intf->idx);
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
- DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
- ctl->idx - CTL_0, flush_mask);
+ DPU_DEBUG_VIDENC(phys_enc,
+ "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
@@ -524,16 +475,13 @@ skip_flush:
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- DPU_DEBUG_VIDENC(vid_enc, "\n");
- kfree(vid_enc);
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+ kfree(phys_enc);
}
static void dpu_encoder_phys_vid_get_hw_resources(
@@ -589,7 +537,6 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_ctl *ctl;
int rc;
@@ -597,7 +544,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR("invalid encoder/parameters\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
if (!ctl || !ctl->ops.wait_reset_status)
@@ -609,7 +555,7 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
*/
rc = ctl->ops.wait_reset_status(ctl);
if (rc) {
- DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
}
@@ -618,7 +564,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
- struct dpu_encoder_phys_vid *vid_enc;
unsigned long lock_flags;
int ret;
@@ -629,16 +574,13 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
}
priv = phys_enc->parent->dev->dev_private;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
}
- DPU_DEBUG_VIDENC(vid_enc, "\n");
-
- if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
@@ -647,7 +589,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
}
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
if (dpu_encoder_phys_vid_is_master(phys_enc))
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -666,7 +608,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0, ret);
+ phys_enc->hw_intf->idx - INTF_0, ret);
}
}
@@ -677,25 +619,21 @@ static void dpu_encoder_phys_vid_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
unsigned long lock_flags;
- struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
-
/*
* Video mode must flush CTL before enabling timing engine
* Video encoders need to turn on their interfaces now
*/
if (phys_enc->enable_state == DPU_ENC_ENABLING) {
trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0);
+ phys_enc->hw_intf->idx - INTF_0);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@@ -704,16 +642,13 @@ static void dpu_encoder_phys_vid_handle_post_kickoff(
static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
- struct dpu_encoder_phys_vid *vid_enc;
int ret;
if (!phys_enc)
return;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0,
+ phys_enc->hw_intf->idx - INTF_0,
enable,
atomic_read(&phys_enc->vblank_refcount));
@@ -732,19 +667,16 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc)
return -EINVAL;
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+ if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
return -EINVAL;
- return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+ return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
}
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
@@ -771,7 +703,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
- struct dpu_encoder_phys_vid *vid_enc = NULL;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@@ -780,18 +711,16 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
goto fail;
}
- vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
- if (!vid_enc) {
+ phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
+ if (!phys_enc) {
ret = -ENOMEM;
goto fail;
}
- phys_enc = &vid_enc->base;
-
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
- DPU_DEBUG_VIDENC(vid_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
@@ -825,13 +754,13 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = DPU_ENC_DISABLED;
- DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+ DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx);
return phys_enc;
fail:
DPU_ERROR("failed to create encoder\n");
- if (vid_enc)
+ if (phys_enc)
dpu_encoder_phys_vid_destroy(phys_enc);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 018df2c3b7ed..45a5bc6ede5d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -15,7 +15,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
-#include "dpu_kms.h"
#define LM_OP_MODE 0x00
#define LM_OUT_SIZE 0x04
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index ac75cfc267f4..31e9ef96ca5d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -73,9 +73,6 @@
#define DPU_NAME_SIZE 12
-/* timeout in frames waiting for frame done */
-#define DPU_FRAME_DONE_TIMEOUT 60
-
/*
* struct dpu_irq_callback - IRQ callback handlers
* @list: list to callback
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index b01183b309b9..ce1a555e1f31 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -387,7 +387,7 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
ot_params.is_wfd = !pdpu->is_rt_pipe;
- ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode);
ot_params.vbif_idx = VBIF_RT;
ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
ot_params.rd = true;
@@ -780,7 +780,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
struct dpu_hw_fmt_layout layout;
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
struct dma_fence *fence;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
@@ -799,8 +798,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
* implicit fence and fb prepare by hand here.
*/
obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->resv);
if (fence)
drm_atomic_set_fence_for_plane(new_state, fence);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index 9bf9d6065c55..7b9edc21bc2c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -59,10 +59,10 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
return -EINVAL;
}
- total_lines_x100 = mode->vtotal * mode->vrefresh;
+ total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
if (!total_lines_x100) {
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
- __func__, mode->vtotal, mode->vrefresh);
+ __func__, mode->vtotal, drm_mode_vrefresh(mode));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f5b1256e32b6..131c23a267ee 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -49,15 +49,13 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
struct dma_fence *fence;
if (!new_state->fb)
return 0;
obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->resv);
drm_atomic_set_fence_for_plane(new_state, fence);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index fb423d309e91..67ef300559cf 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -75,7 +75,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
struct msm_gpu_show_priv *show_priv;
int ret;
- if (!gpu)
+ if (!gpu || !gpu->funcs->gpu_state_get)
return -ENODEV;
show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0bdd93648761..31deb87abfc6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -39,9 +39,10 @@
* MSM_GEM_INFO ioctl.
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
* GEM object's debug name
+ * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 4
+#define MSM_VERSION_MINOR 5
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -457,6 +458,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0);
+ INIT_WORK(&priv->free_work, msm_gem_free_work);
+ init_llist_head(&priv->free_list);
+
INIT_LIST_HEAD(&priv->inactive_list);
drm_mode_config_init(ddev);
@@ -964,6 +968,11 @@ static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
args->flags, &args->id);
}
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return msm_submitqueue_query(dev, file->driver_priv, data);
+}
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -984,6 +993,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -1019,7 +1029,7 @@ static struct drm_driver msm_driver = {
.irq_uninstall = msm_irq_uninstall,
.enable_vblank = msm_enable_vblank,
.disable_vblank = msm_disable_vblank,
- .gem_free_object = msm_gem_free_object,
+ .gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
@@ -1027,7 +1037,6 @@ static struct drm_driver msm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c56dade2c1dc..e20e6b429804 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -33,7 +33,7 @@
#include <linux/types.h>
#include <linux/of_graph.h>
#include <linux/of_device.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <linux/kthread.h>
#include <drm/drmP.h>
@@ -185,6 +185,10 @@ struct msm_drm_private {
/* list of GEM objects: */
struct list_head inactive_list;
+ /* worker for delayed free of objects: */
+ struct work_struct free_work;
+ struct llist_head free_list;
+
struct workqueue_struct *wq;
unsigned int num_planes;
@@ -292,7 +296,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -325,6 +328,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
struct msm_gem_address_space *aspace, bool locked);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
+void msm_gem_free_work(struct work_struct *work);
__printf(2, 3)
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
@@ -420,6 +424,8 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c03e860ba737..d088299babf3 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -122,13 +122,9 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- fbi->par = helper;
fbi->fbops = &msm_fb_ops;
- strcpy(fbi->fix.id, "msm");
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = paddr;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 18ca651ab942..35f55dd25994 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -672,14 +672,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object_list *fobj;
struct dma_fence *fence;
int i, ret;
- fobj = reservation_object_get_list(msm_obj->resv);
+ fobj = reservation_object_get_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
- fence = reservation_object_get_excl(msm_obj->resv);
+ fence = reservation_object_get_excl(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true);
@@ -693,7 +692,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
for (i = 0; i < fobj->shared_count; i++) {
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(msm_obj->resv));
+ reservation_object_held(obj->resv));
if (fence->context != fctx->context) {
ret = dma_fence_wait(fence, true);
if (ret)
@@ -711,9 +710,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
- reservation_object_add_excl_fence(msm_obj->resv, fence);
+ reservation_object_add_excl_fence(obj->resv, fence);
else
- reservation_object_add_shared_fence(msm_obj->resv, fence);
+ reservation_object_add_shared_fence(obj->resv, fence);
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
@@ -733,13 +732,12 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
bool write = !!(op & MSM_PREP_WRITE);
unsigned long remain =
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+ ret = reservation_object_wait_timeout_rcu(obj->resv, write,
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -771,7 +769,7 @@ static void describe_fence(struct dma_fence *fence, const char *type,
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct reservation_object *robj = msm_obj->resv;
+ struct reservation_object *robj = obj->resv;
struct reservation_object_list *fobj;
struct dma_fence *fence;
struct msm_gem_vma *vma;
@@ -805,7 +803,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+ seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
+ vma->aspace != NULL ? vma->aspace->name : NULL,
vma->iova, vma->mapped ? "mapped" : "unmapped",
vma->inuse);
@@ -853,8 +852,18 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
/* don't call directly! Use drm_gem_object_put() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (llist_add(&msm_obj->freed, &priv->free_list))
+ queue_work(priv->wq, &priv->free_work);
+}
+
+static void free_object(struct msm_gem_object *msm_obj)
+{
+ struct drm_gem_object *obj = &msm_obj->base;
+ struct drm_device *dev = obj->dev;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -883,15 +892,35 @@ void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
- if (msm_obj->resv == &msm_obj->_resv)
- reservation_object_fini(msm_obj->resv);
-
drm_gem_object_release(obj);
mutex_unlock(&msm_obj->lock);
kfree(msm_obj);
}
+void msm_gem_free_work(struct work_struct *work)
+{
+ struct msm_drm_private *priv =
+ container_of(work, struct msm_drm_private, free_work);
+ struct drm_device *dev = priv->dev;
+ struct llist_node *freed;
+ struct msm_gem_object *msm_obj, *next;
+
+ while ((freed = llist_del_all(&priv->free_list))) {
+
+ mutex_lock(&dev->struct_mutex);
+
+ llist_for_each_entry_safe(msm_obj, next,
+ freed, freed)
+ free_object(msm_obj);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (need_resched())
+ break;
+ }
+}
+
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle,
@@ -945,12 +974,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
- if (resv) {
- msm_obj->resv = resv;
- } else {
- msm_obj->resv = &msm_obj->_resv;
- reservation_object_init(msm_obj->resv);
- }
+ if (resv)
+ msm_obj->base.resv = resv;
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
@@ -1026,6 +1051,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
return obj;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2064fac871b8..812d1b1369a5 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -84,9 +84,7 @@ struct msm_gem_object {
struct list_head vmas; /* list of msm_gem_vma */
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
+ struct llist_node freed;
/* For physically contiguous buffers. Used when we don't have
* an IOMMU. Also used for stolen/splashscreen buffer.
@@ -133,6 +131,7 @@ enum msm_gem_lock {
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
+void msm_gem_free_work(struct work_struct *work);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
@@ -163,7 +162,10 @@ struct msm_gem_submit {
} *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
- struct msm_gem_object *obj;
+ union {
+ struct msm_gem_object *obj;
+ uint32_t handle;
+ };
uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 13403c6da6c7..60bb290700ce 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -70,10 +70,3 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
if (!obj->import_attach)
msm_gem_put_pages(obj);
}
-
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
- return msm_obj->resv;
-}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 12b983fc0b56..1b681306aca3 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -74,27 +74,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
kfree(submit);
}
-static inline unsigned long __must_check
-copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
-{
- if (access_ok(from, n))
- return __copy_from_user_inatomic(to, from, n);
- return -EFAULT;
-}
-
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
unsigned i;
int ret = 0;
- spin_lock(&file->table_lock);
- pagefault_disable();
-
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
- struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
@@ -103,15 +90,10 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
submit->bos[i].flags = 0;
- if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
- pagefault_enable();
- spin_unlock(&file->table_lock);
- if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
- ret = -EFAULT;
- goto out;
- }
- spin_lock(&file->table_lock);
- pagefault_disable();
+ if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
+ ret = -EFAULT;
+ i = 0;
+ goto out;
}
/* at least one of READ and/or WRITE flags should be set: */
@@ -121,19 +103,28 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
!(submit_bo.flags & MANDATORY_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
- goto out_unlock;
+ i = 0;
+ goto out;
}
+ submit->bos[i].handle = submit_bo.handle;
submit->bos[i].flags = submit_bo.flags;
/* in validate_objects() we figure out if this is true: */
submit->bos[i].iova = submit_bo.presumed;
+ }
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0; i < args->nr_bos; i++) {
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
- obj = idr_find(&file->object_idr, submit_bo.handle);
+ obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
+ DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -142,7 +133,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if (!list_empty(&msm_obj->submit_entry)) {
DRM_ERROR("handle %u at index %u already on submit list\n",
- submit_bo.handle, i);
+ submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -155,7 +146,6 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
out_unlock:
- pagefault_enable();
spin_unlock(&file->table_lock);
out:
@@ -173,7 +163,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
- ww_mutex_unlock(&msm_obj->resv->lock);
+ ww_mutex_unlock(&msm_obj->base.resv->lock);
if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
@@ -196,7 +186,7 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
+ ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
&submit->ticket);
if (ret)
goto fail;
@@ -218,7 +208,7 @@ fail:
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
+ ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
&submit->ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
@@ -244,7 +234,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
- ret = reservation_object_reserve_shared(msm_obj->resv,
+ ret = reservation_object_reserve_shared(msm_obj->base.resv,
1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 49c04829cf34..fcf7a83f0e6f 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -85,7 +85,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
vma->mapped = true;
- if (aspace->mmu)
+ if (aspace && aspace->mmu)
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
size, prot);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 10babd18e286..bf4ee2766431 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -443,24 +443,15 @@ static void recover_worker(struct work_struct *work)
if (submit) {
struct task_struct *task;
+ /* Increment the fault counts */
+ gpu->global_faults++;
+ submit->queue->faults++;
+
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
-
- /*
- * So slightly annoying, in other paths like
- * mmap'ing gem buffers, mmap_sem is acquired
- * before struct_mutex, which means we can't
- * hold struct_mutex across the call to
- * get_cmdline(). But submits are retired
- * from the same in-order workqueue, so we can
- * safely drop the lock here without worrying
- * about the submit going away.
- */
- mutex_unlock(&dev->struct_mutex);
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
put_task_struct(task);
- mutex_lock(&dev->struct_mutex);
}
if (comm && cmd) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 6241986bab51..f2739cd97cea 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -104,6 +104,9 @@ struct msm_gpu {
/* does gpu need hw_init? */
bool needs_hw_init;
+ /* number of GPU hangs (for all contexts) */
+ int global_faults;
+
/* worker for handling active-list retiring: */
struct work_struct retire_work;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 4d62790cd425..12bb54cefd46 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -38,13 +38,8 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int ret;
- pm_runtime_get_sync(mmu->dev);
- ret = iommu_attach_device(iommu->domain, mmu->dev);
- pm_runtime_put_sync(mmu->dev);
-
- return ret;
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
@@ -52,9 +47,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- pm_runtime_get_sync(mmu->dev);
iommu_detach_device(iommu->domain, mmu->dev);
- pm_runtime_put_sync(mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
@@ -63,9 +56,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
-// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
-// pm_runtime_put_sync(mmu->dev);
WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
@@ -75,9 +66,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- pm_runtime_get_sync(mmu->dev);
iommu_unmap(iommu->domain, iova, len);
- pm_runtime_put_sync(mmu->dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 5115f75b5b7f..f160ec40a39b 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -120,6 +120,47 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}
+static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
+ struct drm_msm_submitqueue_query *args)
+{
+ size_t size = min_t(size_t, args->len, sizeof(queue->faults));
+ int ret;
+
+ /* If a zero length was passed in, return the data size we expect */
+ if (!args->len) {
+ args->len = sizeof(queue->faults);
+ return 0;
+ }
+
+ /* Set the length to the actual size of the data */
+ args->len = size;
+
+ ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
+
+ return ret ? -EFAULT : 0;
+}
+
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args)
+{
+ struct msm_gpu_submitqueue *queue;
+ int ret = -EINVAL;
+
+ if (args->pad)
+ return -EINVAL;
+
+ queue = msm_submitqueue_get(ctx, args->id);
+ if (!queue)
+ return -ENOENT;
+
+ if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
+ ret = msm_submitqueue_query_faults(queue, args);
+
+ msm_submitqueue_put(queue);
+
+ return ret;
+}
+
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 0ee1ca8a316a..98e9bda91e80 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -253,12 +253,12 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
/*
- * DRM_BUS_FLAG_PIXDATA_ defines are controller centric,
+ * DRM_BUS_FLAG_PIXDATA_DRIVE_ defines are controller centric,
* controllers VDCTRL0_DOTCLK is display centric.
* Drive on positive edge -> display samples on falling edge
- * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
+ * DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
*/
- if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 581404e6544d..378c5dd692b0 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,7 +1,7 @@
-ccflags-y += -I$(src)/include
-ccflags-y += -I$(src)/include/nvkm
-ccflags-y += -I$(src)/nvkm
-ccflags-y += -I$(src)
+ccflags-y += -I $(srctree)/$(src)/include
+ccflags-y += -I $(srctree)/$(src)/include/nvkm
+ccflags-y += -I $(srctree)/$(src)/nvkm
+ccflags-y += -I $(srctree)/$(src)
# NVKM - HW resource manager
#- code also used by various userspace tools/tests
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 00cd9ab8948d..1f1395148ff0 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -5,22 +5,31 @@ config DRM_NOUVEAU
select DRM_KMS_HELPER
select DRM_TTM
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
- select BACKLIGHT_LCD_SUPPORT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
select MXM_WMI if ACPI && X86
select POWER_SUPPLY
# Similar to i915, we need to select ACPI_VIDEO and it's dependencies
- select BACKLIGHT_LCD_SUPPORT if ACPI && X86
select BACKLIGHT_CLASS_DEVICE if ACPI && X86
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
- select DRM_VM
help
Choose this option for open-source NVIDIA support.
+config NOUVEAU_LEGACY_CTX_SUPPORT
+ bool "Nouveau legacy context support"
+ depends on DRM_NOUVEAU
+ select DRM_LEGACY
+ default y
+ help
+ There was a version of the nouveau DDX that relied on legacy
+ ctx ioctls not erroring out. But that was back in time a long
+ ways, so offer a way to disable it now. For uapi compat with
+ old nouveau ddx this should be on by default, but modern distros
+ should consider turning it off.
+
config NOUVEAU_PLATFORM_DRIVER
bool "Nouveau (NVIDIA) SoC GPUs"
depends on DRM_NOUVEAU && ARCH_TEGRA
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 2216c58620c2..7c41b0599d1a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -41,6 +41,7 @@ struct nv50_disp_interlock {
NV50_DISP_INTERLOCK__SIZE
} type;
u32 data;
+ u32 wimm;
};
void corec37d_ntfy_init(struct nouveau_bo *, u32);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 2e7a0c347ddb..06ee23823a68 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->set.or = head->func->or != NULL;
}
- if (asyh->state.mode_changed)
+ if (asyh->state.mode_changed || asyh->state.connectors_changed)
nv50_head_atomic_check_mode(head, asyh);
if (asyh->state.color_mgmt_changed ||
@@ -413,6 +413,7 @@ nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
asyh->ovly = armh->ovly;
asyh->dither = armh->dither;
asyh->procamp = armh->procamp;
+ asyh->or = armh->or;
asyh->dp = armh->dp;
asyh->clr.mask = 0;
asyh->set.mask = 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index 9103b8494279..f7dbd965e4e7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
return ret;
}
+ wndw->interlock.wimm = wndw->interlock.data;
wndw->immd = func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index b95181027b31..283ff690350e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -127,7 +127,7 @@ void
nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
struct nv50_wndw_atom *asyw)
{
- if (interlock) {
+ if (interlock[NV50_DISP_INTERLOCK_CORE]) {
asyw->image.mode = 0;
asyw->image.interval = 1;
}
@@ -149,7 +149,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
if (asyw->set.point) {
if (asyw->set.point = false, asyw->set.mask)
interlock[wndw->interlock.type] |= wndw->interlock.data;
- interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data;
+ interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
wndw->immd->point(wndw, asyw);
wndw->immd->update(wndw, interlock);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index eef54e9b5d77..7957eafa5f0e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -38,6 +38,7 @@ struct nvkm_i2c_bus {
struct mutex mutex;
struct list_head head;
struct i2c_adapter i2c;
+ u8 enabled;
};
int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *);
@@ -57,6 +58,7 @@ struct nvkm_i2c_aux {
struct mutex mutex;
struct list_head head;
struct i2c_adapter i2c;
+ u8 enabled;
u32 intr;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 55c0fa451163..832da8e0020d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -358,15 +358,6 @@ nouveau_display_hpd_work(struct work_struct *work)
#ifdef CONFIG_ACPI
-/*
- * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
- * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
- * This should be dropped once that is merged.
- */
-#ifndef ACPI_VIDEO_NOTIFY_PROBE
-#define ACPI_VIDEO_NOTIFY_PROBE 0x81
-#endif
-
static int
nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
void *data)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5020265bfbd9..7c2fcaba42d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -631,7 +631,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
/* We need to check that the chipset is supported before booting
* fbdev off the hardware, as there's no way to put it back.
*/
- ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
+ true, false, 0, &device);
if (ret)
return ret;
@@ -802,10 +803,15 @@ fail_display:
static int
nouveau_do_resume(struct drm_device *dev, bool runtime)
{
+ int ret = 0;
struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n");
- nvif_client_resume(&drm->master.base);
+ ret = nvif_client_resume(&drm->master.base);
+ if (ret) {
+ NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
+ return ret;
+ }
NV_DEBUG(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume)
@@ -925,6 +931,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
int ret;
@@ -941,6 +948,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
+ if (ret) {
+ NV_ERROR(drm, "resume failed with: %d\n", ret);
+ return ret;
+ }
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
@@ -1094,8 +1105,11 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features =
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
- DRIVER_KMS_LEGACY_CONTEXT,
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
+#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
+ | DRIVER_KMS_LEGACY_CONTEXT
+#endif
+ ,
.open = nouveau_drm_open,
.postclose = nouveau_drm_postclose,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index da847244479d..35ff0ca01a3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -60,8 +60,6 @@
struct nouveau_channel;
struct platform_device;
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
#include "nouveau_fence.h"
#include "nouveau_bios.h"
#include "nouveau_vmm.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0d3cd4e05728..73cc3217068a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -365,14 +365,10 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
ret = PTR_ERR(info);
goto out_unlock;
}
- info->skip_vt_switch = 1;
-
- info->par = fbcon;
/* setup helper */
fbcon->helper.fb = &fb->base;
- strcpy(info->fix.id, "nouveaufb");
if (!chan)
info->flags = FBINFO_HWACCEL_DISABLED;
else
@@ -387,9 +383,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- drm_fb_helper_fill_fix(info, fb->base.pitches[0],
- fb->base.format->depth);
- drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index db9d52047ef8..73a7eeba3973 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -32,7 +32,7 @@
#include "nouveau_display.h"
struct nouveau_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1543c2f8d3d3..f0daf958e03a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -168,9 +168,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_file *file_priv = filp->private_data;
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return drm_legacy_mmap(filp, vma);
-
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
@@ -239,7 +236,6 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_bo_device_init(&drm->ttm.bdev,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7971096b6767..10d91e8bbb94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2540,6 +2540,41 @@ nv166_chipset = {
.sec2 = tu102_sec2_new,
};
+static const struct nvkm_device_chip
+nv167_chipset = {
+ .name = "TU117",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu102_devinit_new,
+ .fault = tu102_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .gsp = gv100_gsp_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu102_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu102_ce_new,
+ .ce[1] = tu102_ce_new,
+ .ce[2] = tu102_ce_new,
+ .ce[3] = tu102_ce_new,
+ .ce[4] = tu102_ce_new,
+ .disp = tu102_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu102_fifo_new,
+ .nvdec[0] = gp102_nvdec_new,
+ .sec2 = tu102_sec2_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -2824,8 +2859,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
u64 mmio_base, mmio_size;
u32 boot0, strap;
void __iomem *map;
- int ret = -EEXIST;
- int i;
+ int ret = -EEXIST, i;
+ unsigned chipset;
mutex_lock(&nv_devices_mutex);
if (nvkm_device_find_locked(handle))
@@ -2870,6 +2905,26 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
strap = ioread32_native(map + 0x101000);
iounmap(map);
+ /* chipset can be overridden for devel/testing purposes */
+ chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
+ if (chipset) {
+ u32 override_boot0;
+
+ if (chipset >= 0x10) {
+ override_boot0 = ((chipset & 0x1ff) << 20);
+ override_boot0 |= 0x000000a1;
+ } else {
+ if (chipset != 0x04)
+ override_boot0 = 0x20104000;
+ else
+ override_boot0 = 0x20004000;
+ }
+
+ nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
+ boot0, override_boot0);
+ boot0 = override_boot0;
+ }
+
/* determine chipset and derive architecture from it */
if ((boot0 & 0x1f000000) > 0) {
device->chipset = (boot0 & 0x1ff00000) >> 20;
@@ -2996,6 +3051,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x162: device->chip = &nv162_chipset; break;
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
+ case 0x167: device->chip = &nv167_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 5f301e632599..818d21bd28d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
* and it's better to have a failed modeset than that.
*/
for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
- if (cfg->nr <= outp_nr && cfg->nr <= outp_bw)
- failsafe = cfg;
+ if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
+ /* Try to respect sink limits too when selecting
+ * lowest link configuration.
+ */
+ if (!failsafe ||
+ (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
+ failsafe = cfg;
+ }
+
if (failsafe && cfg[1].rate < dataKBps)
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
index d131cca999dd..10f2aa9f29a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
@@ -23,38 +23,55 @@ void pack_hdmi_infoframe(struct packed_hdmi_infoframe *packed_frame,
*/
case 17:
subpack1_high = (raw_frame[16] << 16);
+ /* fall through */
case 16:
subpack1_high |= (raw_frame[15] << 8);
+ /* fall through */
case 15:
subpack1_high |= raw_frame[14];
+ /* fall through */
case 14:
subpack1_low = (raw_frame[13] << 24);
+ /* fall through */
case 13:
subpack1_low |= (raw_frame[12] << 16);
+ /* fall through */
case 12:
subpack1_low |= (raw_frame[11] << 8);
+ /* fall through */
case 11:
subpack1_low |= raw_frame[10];
+ /* fall through */
case 10:
subpack0_high = (raw_frame[9] << 16);
+ /* fall through */
case 9:
subpack0_high |= (raw_frame[8] << 8);
+ /* fall through */
case 8:
subpack0_high |= raw_frame[7];
+ /* fall through */
case 7:
subpack0_low = (raw_frame[6] << 24);
+ /* fall through */
case 6:
subpack0_low |= (raw_frame[5] << 16);
+ /* fall through */
case 5:
subpack0_low |= (raw_frame[4] << 8);
+ /* fall through */
case 4:
subpack0_low |= raw_frame[3];
+ /* fall through */
case 3:
header = (raw_frame[2] << 16);
+ /* fall through */
case 2:
header |= (raw_frame[1] << 8);
+ /* fall through */
case 1:
header |= raw_frame[0];
+ /* fall through */
case 0:
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
index 49ef7e57aad4..7f1adab21a5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -122,6 +122,7 @@ nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
break;
case NV_MEM_ACCESS_WO:
dmaobj->flags0 |= 0x00008000;
+ /* fall through */
case NV_MEM_ACCESS_RW:
dmaobj->flags2 |= 0x00000002;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index ad707ff176cc..93493b335d76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -117,8 +117,10 @@ nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
switch (mthd) {
case 0x0000 ... 0x0000: /* subchannel's engine -> software */
nvkm_wr32(device, 0x003280, (engine &= ~mask));
+ /* fall through */
case 0x0180 ... 0x01fc: /* handle -> instance */
data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
+ /* fall through */
case 0x0100 ... 0x017c:
case 0x0200 ... 0x1ffc: /* pass method down to sw */
if (!(engine & mask) && sw)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 8c7ba32763c4..47c16821c37f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -81,6 +81,7 @@ nv40_fifo_init(struct nvkm_fifo *base)
case 0x49:
case 0x4b:
nvkm_wr32(device, 0x002230, 0x00000001);
+ /* fall through */
case 0x40:
case 0x41:
case 0x42:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index a3ba7f50198b..a3dcb09a40ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -94,6 +94,8 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
return ret;
bar_len = device->func->resource_size(device, bar_nr);
+ if (!bar_len)
+ return -ENOMEM;
if (bar_nr == 3 && bar->bar2_halve)
bar_len >>= 1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 157b076a1272..f23a0ccc2bec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
struct nvkm_device *device = bar->base.subdev.device;
static struct lock_class_key bar1_lock;
static struct lock_class_key bar2_lock;
- u64 start, limit;
+ u64 start, limit, size;
int ret;
ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
@@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
start = 0x0100000000ULL;
- limit = start + device->func->resource_size(device, 3);
+ size = device->func->resource_size(device, 3);
+ if (!size)
+ return -ENOMEM;
+ limit = start + size;
ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
&bar2_lock, "bar2", &bar->bar2_vmm);
@@ -164,10 +167,15 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR1 */
start = 0x0000000000ULL;
- limit = start + device->func->resource_size(device, 1);
+ size = device->func->resource_size(device, 1);
+ if (!size)
+ return -ENOMEM;
+ limit = start + size;
ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
&bar1_lock, "bar1", &bar->bar1_vmm);
+ if (ret)
+ return ret;
atomic_inc(&bar->bar1_vmm->engref[NVKM_SUBDEV_BAR]);
bar->bar1_vmm->debug = bar->base.subdev.debug;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index c3068358f695..7112992e0e38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -135,6 +135,7 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
break;
case 0x30:
info->script = nvbios_rd16(bios, perf + 0x02);
+ /* fall through */
case 0x35:
info->fanspeed = nvbios_rd08(bios, perf + 0x06);
info->voltage = nvbios_rd08(bios, perf + 0x07);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index e6e804cee2bc..bda6cc9a7aaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -134,6 +134,7 @@ pll_map(struct nvkm_bios *bios)
device->chipset == 0xaa ||
device->chipset == 0xac)
return g84_pll_mapping;
+ /* fall through */
default:
return NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index ba6a868d4c95..40e564524b7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -90,6 +90,7 @@ nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
case NVKM_CLK_BOOST_NONE:
if (clk->base_khz && freq > clk->base_khz)
return false;
+ /* fall through */
case NVKM_CLK_BOOST_BIOS:
if (clk->boost_khz && freq > clk->boost_khz)
return false;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index 1c21b8b53b78..4f000237796f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -363,6 +363,7 @@ mcp77_clk_prog(struct nvkm_clk *base)
switch (clk->vsrc) {
case nv_clk_src_cclk:
mast |= 0x00400000;
+ /* fall through */
default:
nvkm_wr32(device, 0x4600, clk->vdiv);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 8bcb7e79a0cb..456aed1f2a02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1070,7 +1070,7 @@ gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
nvkm_error(subdev, "unable to calc plls\n");
return -EINVAL;
}
- nvkm_debug(subdev, "sucessfully calced PLLs for clock %i kHz"
+ nvkm_debug(subdev, "successfully calced PLLs for clock %i kHz"
" (refclock: %i kHz)\n", next->freq, ret);
} else {
/* calculate refpll coefficients */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 2b12e388f47a..5f4c287d7943 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -131,11 +131,13 @@ nv40_ram_prog(struct nvkm_ram *base)
nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x004048, ram->coef);
nvkm_wr32(device, 0x004030, ram->coef);
+ /* fall through */
case 0x43:
case 0x49:
case 0x4b:
nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x00403c, ram->coef);
+ /* fall through */
default:
nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x004024, ram->coef);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 4c1f547da463..b4e7404fe660 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -105,9 +105,15 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
{
struct nvkm_i2c_pad *pad = aux->pad;
int ret;
+
AUX_TRACE(aux, "acquire");
mutex_lock(&aux->mutex);
- ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+
+ if (aux->enabled)
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+ else
+ ret = -EIO;
+
if (ret)
mutex_unlock(&aux->mutex);
return ret;
@@ -145,6 +151,24 @@ nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
}
}
+void
+nvkm_i2c_aux_init(struct nvkm_i2c_aux *aux)
+{
+ AUX_TRACE(aux, "init");
+ mutex_lock(&aux->mutex);
+ aux->enabled = true;
+ mutex_unlock(&aux->mutex);
+}
+
+void
+nvkm_i2c_aux_fini(struct nvkm_i2c_aux *aux)
+{
+ AUX_TRACE(aux, "fini");
+ mutex_lock(&aux->mutex);
+ aux->enabled = false;
+ mutex_unlock(&aux->mutex);
+}
+
int
nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
struct nvkm_i2c_pad *pad, int id,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 7d56c4ba693c..08f6b2ee64ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -16,6 +16,8 @@ int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_aux **);
void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
+void nvkm_i2c_aux_init(struct nvkm_i2c_aux *);
+void nvkm_i2c_aux_fini(struct nvkm_i2c_aux *);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 4f197b15acf6..ecacb22834d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -160,8 +160,18 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_pad *pad;
+ struct nvkm_i2c_bus *bus;
+ struct nvkm_i2c_aux *aux;
u32 mask;
+ list_for_each_entry(aux, &i2c->aux, head) {
+ nvkm_i2c_aux_fini(aux);
+ }
+
+ list_for_each_entry(bus, &i2c->bus, head) {
+ nvkm_i2c_bus_fini(bus);
+ }
+
if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
@@ -180,6 +190,7 @@ nvkm_i2c_init(struct nvkm_subdev *subdev)
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_bus *bus;
struct nvkm_i2c_pad *pad;
+ struct nvkm_i2c_aux *aux;
list_for_each_entry(pad, &i2c->pad, head) {
nvkm_i2c_pad_init(pad);
@@ -189,6 +200,10 @@ nvkm_i2c_init(struct nvkm_subdev *subdev)
nvkm_i2c_bus_init(bus);
}
+ list_for_each_entry(aux, &i2c->aux, head) {
+ nvkm_i2c_aux_init(aux);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
index 807a2b67bd64..ed50cc3736b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
@@ -110,6 +110,19 @@ nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
BUS_TRACE(bus, "init");
if (bus->func->init)
bus->func->init(bus);
+
+ mutex_lock(&bus->mutex);
+ bus->enabled = true;
+ mutex_unlock(&bus->mutex);
+}
+
+void
+nvkm_i2c_bus_fini(struct nvkm_i2c_bus *bus)
+{
+ BUS_TRACE(bus, "fini");
+ mutex_lock(&bus->mutex);
+ bus->enabled = false;
+ mutex_unlock(&bus->mutex);
}
void
@@ -126,9 +139,15 @@ nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
{
struct nvkm_i2c_pad *pad = bus->pad;
int ret;
+
BUS_TRACE(bus, "acquire");
mutex_lock(&bus->mutex);
- ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+
+ if (bus->enabled)
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+ else
+ ret = -EIO;
+
if (ret)
mutex_unlock(&bus->mutex);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
index bea0dd33961e..465464bba58b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -18,6 +18,7 @@ int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_bus **);
void nvkm_i2c_bus_del(struct nvkm_i2c_bus **);
void nvkm_i2c_bus_init(struct nvkm_i2c_bus *);
+void nvkm_i2c_bus_fini(struct nvkm_i2c_bus *);
int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index fa93f964e6a4..41640e0584ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1783,7 +1783,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
- if (inst && vmm->func->part) {
+ if (inst && vmm && vmm->func->part) {
mutex_lock(&vmm->mutex);
vmm->func->part(vmm, inst);
mutex_unlock(&vmm->mutex);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index 844971e5e874..2a6150ab5611 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -159,6 +159,7 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
break;
case 0x0e: /* eDP, falls through to DPint */
ctx.outp[1] |= 0x00010000;
+ /* fall through */
case 0x07: /* DP internal, wtf is this?? HP8670w */
ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
type = DCB_CONNECTOR_eDP;
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index a349cb61961e..7b0bcb494b5c 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -6,23 +6,12 @@ config DRM_OMAP_ENCODER_OPA362
Driver for OPA362 external analog TV amplifier controlled
through a GPIO.
-config DRM_OMAP_ENCODER_TFP410
- tristate "TFP410 DPI to DVI Encoder"
- help
- Driver for TFP410 DPI to DVI encoder.
-
config DRM_OMAP_ENCODER_TPD12S015
tristate "TPD12S015 HDMI ESD protection and level shifter"
help
Driver for TPD12S015, which offers HDMI ESD protection and level
shifting.
-config DRM_OMAP_CONNECTOR_DVI
- tristate "DVI Connector"
- depends on I2C
- help
- Driver for a generic DVI connector.
-
config DRM_OMAP_CONNECTOR_HDMI
tristate "HDMI Connector"
help
@@ -33,12 +22,6 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
help
Driver for a generic analog TV connector.
-config DRM_OMAP_PANEL_DPI
- tristate "Generic DPI panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Driver for generic DPI panels.
-
config DRM_OMAP_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index d99659e1381b..1db34d4fed64 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
-obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o
obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 28a3ce8f88d2..6c0561101874 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -35,50 +35,9 @@ static void tvc_disconnect(struct omap_dss_device *src,
{
}
-static int tvc_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- dev_dbg(ddata->dev, "enable\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-}
-
-static void tvc_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- dev_dbg(ddata->dev, "disable\n");
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
static const struct omap_dss_device_ops tvc_ops = {
.connect = tvc_connect,
.disconnect = tvc_disconnect,
-
- .enable = tvc_enable,
- .disable = tvc_disable,
};
static int tvc_probe(struct platform_device *pdev)
@@ -97,6 +56,7 @@ static int tvc_probe(struct platform_device *pdev)
dssdev->ops = &tvc_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
@@ -109,12 +69,9 @@ static int tvc_probe(struct platform_device *pdev)
static int __exit tvc_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
omapdss_device_unregister(&ddata->dssdev);
- tvc_disable(dssdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
deleted file mode 100644
index 24b14f44248e..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Generic DVI Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <drm/drm_edid.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct i2c_adapter *i2c_adapter;
-
- struct gpio_desc *hpd_gpio;
-
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- bool hpd_enabled;
- /* mutex for hpd fields above */
- struct mutex hpd_lock;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int dvic_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void dvic_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static int dvic_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void dvic_disable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int dvic_ddc_read(struct i2c_adapter *adapter,
- unsigned char *buf, u16 count, u8 offset)
-{
- int r, retries;
-
- for (retries = 3; retries > 0; retries--) {
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &offset,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = count,
- .buf = buf,
- }
- };
-
- r = i2c_transfer(adapter, msgs, 2);
- if (r == 2)
- return 0;
-
- if (r != -EAGAIN)
- break;
- }
-
- return r < 0 ? r : -EIO;
-}
-
-static int dvic_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r, l, bytes_read;
-
- l = min(EDID_LENGTH, len);
- r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0);
- if (r)
- return r;
-
- bytes_read = l;
-
- /* if there are extensions, read second block */
- if (len > EDID_LENGTH && edid[0x7e] > 0) {
- l = min(EDID_LENGTH, len - EDID_LENGTH);
-
- r = dvic_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
- l, EDID_LENGTH);
- if (r)
- return r;
-
- bytes_read += l;
- }
-
- return bytes_read;
-}
-
-static bool dvic_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- unsigned char out;
- int r;
-
- if (ddata->hpd_gpio)
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-
- if (!ddata->i2c_adapter)
- return true;
-
- r = dvic_ddc_read(ddata->i2c_adapter, &out, 1, 0);
-
- return r == 0;
-}
-
-static void dvic_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops dvic_ops = {
- .connect = dvic_connect,
- .disconnect = dvic_disconnect,
-
- .enable = dvic_enable,
- .disable = dvic_disable,
-
- .read_edid = dvic_read_edid,
- .detect = dvic_detect,
-
- .register_hpd_cb = dvic_register_hpd_cb,
- .unregister_hpd_cb = dvic_unregister_hpd_cb,
-};
-
-static irqreturn_t dvic_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_enabled && ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (dvic_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int dvic_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- struct device_node *adapter_node;
- struct i2c_adapter *adapter;
- struct gpio_desc *gpio;
- int r;
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse HPD gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->hpd_gpio = gpio;
-
- mutex_init(&ddata->hpd_lock);
-
- if (ddata->hpd_gpio) {
- r = devm_request_threaded_irq(&pdev->dev,
- gpiod_to_irq(ddata->hpd_gpio), NULL, dvic_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "DVI HPD", ddata);
- if (r)
- return r;
- }
-
- adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
- if (adapter_node) {
- adapter = of_get_i2c_adapter_by_node(adapter_node);
- of_node_put(adapter_node);
- if (adapter == NULL) {
- dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
- return -EPROBE_DEFER;
- }
-
- ddata->i2c_adapter = adapter;
- }
-
- return 0;
-}
-
-static int dvic_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- r = dvic_probe_of(pdev);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &dvic_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_DVI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
-
- if (ddata->hpd_gpio)
- dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_HPD;
- if (ddata->i2c_adapter)
- dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_EDID;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit dvic_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(&ddata->dssdev);
-
- dvic_disable(dssdev);
-
- i2c_put_adapter(ddata->i2c_adapter);
-
- mutex_destroy(&ddata->hpd_lock);
-
- return 0;
-}
-
-static const struct of_device_id dvic_of_match[] = {
- { .compatible = "omapdss,dvi-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, dvic_of_match);
-
-static struct platform_driver dvi_connector_driver = {
- .probe = dvic_probe,
- .remove = __exit_p(dvic_remove),
- .driver = {
- .name = "connector-dvi",
- .of_match_table = dvic_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(dvi_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Generic DVI Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index e602fa4a50a4..68d6f6e44b03 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -41,44 +41,6 @@ static void hdmic_disconnect(struct omap_dss_device *src,
{
}
-static int hdmic_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- dev_dbg(ddata->dev, "enable\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-}
-
-static void hdmic_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- dev_dbg(ddata->dev, "disable\n");
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
static bool hdmic_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -113,9 +75,6 @@ static const struct omap_dss_device_ops hdmic_ops = {
.connect = hdmic_connect,
.disconnect = hdmic_disconnect,
- .enable = hdmic_enable,
- .disable = hdmic_disable,
-
.detect = hdmic_detect,
.register_hpd_cb = hdmic_register_hpd_cb,
.unregister_hpd_cb = hdmic_unregister_hpd_cb,
@@ -181,6 +140,7 @@ static int hdmic_probe(struct platform_device *pdev)
dssdev->ops = &hdmic_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
dssdev->ops_flags = ddata->hpd_gpio
@@ -196,12 +156,9 @@ static int hdmic_probe(struct platform_device *pdev)
static int __exit hdmic_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
omapdss_device_unregister(&ddata->dssdev);
- hdmic_disable(dssdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 4fefd80f53bb..29a5a130ebd1 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -41,48 +41,20 @@ static void opa362_disconnect(struct omap_dss_device *src,
omapdss_device_disconnect(dst, dst->next);
}
-static int opa362_enable(struct omap_dss_device *dssdev)
+static void opa362_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- dev_dbg(dssdev->dev, "enable\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void opa362_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- dev_dbg(dssdev->dev, "disable\n");
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
static const struct omap_dss_device_ops opa362_ops = {
@@ -116,7 +88,6 @@ static int opa362_probe(struct platform_device *pdev)
dssdev->ops = &opa362_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->output_type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(1) | BIT(0);
@@ -141,13 +112,7 @@ static int __exit opa362_remove(struct platform_device *pdev)
omapdss_device_put(dssdev->next);
omapdss_device_unregister(&ddata->dssdev);
- WARN_ON(omapdss_device_is_enabled(dssdev));
- if (omapdss_device_is_enabled(dssdev))
- opa362_disable(dssdev);
-
- WARN_ON(omapdss_device_is_connected(dssdev));
- if (omapdss_device_is_connected(dssdev))
- omapdss_device_disconnect(NULL, dssdev);
+ opa362_disable(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
deleted file mode 100644
index f1a748353279..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * TFP410 DPI-to-DVI encoder driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct gpio_desc *pd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tfp410_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void tfp410_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static int tfp410_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- if (ddata->pd_gpio)
- gpiod_set_value_cansleep(ddata->pd_gpio, 0);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void tfp410_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- if (ddata->pd_gpio)
- gpiod_set_value_cansleep(ddata->pd_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static const struct omap_dss_device_ops tfp410_ops = {
- .connect = tfp410_connect,
- .disconnect = tfp410_disconnect,
- .enable = tfp410_enable,
- .disable = tfp410_disable,
-};
-
-static int tfp410_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- /* Powerdown GPIO */
- gpio = devm_gpiod_get_optional(&pdev->dev, "powerdown", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse powerdown gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->pd_gpio = gpio;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tfp410_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->output_type = OMAP_DISPLAY_TYPE_DVI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tfp410_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- WARN_ON(omapdss_device_is_enabled(dssdev));
- if (omapdss_device_is_enabled(dssdev))
- tfp410_disable(dssdev);
-
- WARN_ON(omapdss_device_is_connected(dssdev));
- if (omapdss_device_is_connected(dssdev))
- omapdss_device_disconnect(NULL, dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tfp410_of_match[] = {
- { .compatible = "omapdss,ti,tfp410", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tfp410_of_match);
-
-static struct platform_driver tfp410_driver = {
- .probe = tfp410_probe,
- .remove = __exit_p(tfp410_remove),
- .driver = {
- .name = "tfp410",
- .of_match_table = tfp410_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tfp410_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("TFP410 DPI to DVI encoder driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 94de55fd8884..bc03752d2762 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -62,35 +62,6 @@ static void tpd_disconnect(struct omap_dss_device *src,
omapdss_device_disconnect(dst, dst->next);
}
-static int tpd_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-}
-
-static void tpd_disable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
static bool tpd_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -124,8 +95,6 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
static const struct omap_dss_device_ops tpd_ops = {
.connect = tpd_connect,
.disconnect = tpd_disconnect,
- .enable = tpd_enable,
- .disable = tpd_disable,
.detect = tpd_detect,
.register_hpd_cb = tpd_register_hpd_cb,
.unregister_hpd_cb = tpd_unregister_hpd_cb,
@@ -198,7 +167,6 @@ static int tpd_probe(struct platform_device *pdev)
dssdev->ops = &tpd_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(1) | BIT(0);
dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
@@ -225,14 +193,6 @@ static int __exit tpd_remove(struct platform_device *pdev)
omapdss_device_put(dssdev->next);
omapdss_device_unregister(&ddata->dssdev);
- WARN_ON(omapdss_device_is_enabled(dssdev));
- if (omapdss_device_is_enabled(dssdev))
- tpd_disable(dssdev);
-
- WARN_ON(omapdss_device_is_connected(dssdev));
- if (omapdss_device_is_connected(dssdev))
- omapdss_device_disconnect(NULL, dssdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
deleted file mode 100644
index 465120809eb3..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Generic MIPI DPI Panel Driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/regulator/consumer.h>
-#include <linux/backlight.h>
-
-#include <video/of_display_timing.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct videomode vm;
-
- struct backlight_device *backlight;
-
- struct gpio_desc *enable_gpio;
- struct regulator *vcc_supply;
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int panel_dpi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void panel_dpi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static int panel_dpi_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- r = regulator_enable(ddata->vcc_supply);
- if (r) {
- src->ops->disable(src);
- return r;
- }
-
- gpiod_set_value_cansleep(ddata->enable_gpio, 1);
- backlight_enable(ddata->backlight);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void panel_dpi_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- backlight_disable(ddata->backlight);
-
- gpiod_set_value_cansleep(ddata->enable_gpio, 0);
- regulator_disable(ddata->vcc_supply);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static const struct omap_dss_device_ops panel_dpi_ops = {
- .connect = panel_dpi_connect,
- .disconnect = panel_dpi_disconnect,
-
- .enable = panel_dpi_enable,
- .disable = panel_dpi_disable,
-
- .get_timings = panel_dpi_get_timings,
-};
-
-static int panel_dpi_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- int r;
- struct display_timing timing;
- struct gpio_desc *gpio;
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->enable_gpio = gpio;
-
- /*
- * Many different panels are supported by this driver and there are
- * probably very different needs for their reset pins in regards to
- * timing and order relative to the enable gpio. So for now it's just
- * ensured that the reset line isn't active.
- */
- gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc");
- if (IS_ERR(ddata->vcc_supply))
- return PTR_ERR(ddata->vcc_supply);
-
- ddata->backlight = devm_of_find_backlight(&pdev->dev);
-
- if (IS_ERR(ddata->backlight))
- return PTR_ERR(ddata->backlight);
-
- r = of_get_display_timing(node, "panel-timing", &timing);
- if (r) {
- dev_err(&pdev->dev, "failed to get video timing\n");
- return r;
- }
-
- videomode_from_timing(&timing, &ddata->vm);
-
- return 0;
-}
-
-static int panel_dpi_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- r = panel_dpi_probe_of(pdev);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &pdev->dev;
- dssdev->ops = &panel_dpi_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit panel_dpi_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(dssdev);
-
- panel_dpi_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id panel_dpi_of_match[] = {
- { .compatible = "omapdss,panel-dpi", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, panel_dpi_of_match);
-
-static struct platform_driver panel_dpi_driver = {
- .probe = panel_dpi_probe,
- .remove = __exit_p(panel_dpi_remove),
- .driver = {
- .name = "panel-dpi",
- .of_match_table = panel_dpi_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(panel_dpi_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Generic MIPI DPI Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 29692a5217c5..741a5e324767 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -24,6 +24,8 @@
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_connector.h>
+
#include <video/mipi_display.h>
#include <video/of_display_timing.h>
@@ -41,6 +43,7 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
+ struct omap_dss_device *src;
struct videomode vm;
@@ -141,7 +144,7 @@ static void hw_guard_wait(struct panel_drv_data *ddata)
static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
u8 buf[1];
@@ -157,14 +160,14 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1);
}
static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 buf[2] = { dcs_cmd, param };
return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2);
@@ -173,7 +176,7 @@ static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
static int dsicm_sleep_in(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 cmd;
int r;
@@ -228,7 +231,7 @@ static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
static int dsicm_set_update_window(struct panel_drv_data *ddata,
u16 x, u16 y, u16 w, u16 h)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
u16 x1 = x;
u16 x2 = x + w - 1;
@@ -275,7 +278,7 @@ static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
static int dsicm_enter_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
if (ddata->ulps_enabled)
@@ -309,18 +312,13 @@ err:
static int dsicm_exit_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
if (!ddata->ulps_enabled)
return 0;
- r = src->ops->enable(src);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
- goto err1;
- }
-
+ src->ops->enable(src);
src->ops->dsi.enable_hs(src, ddata->channel, true);
r = _dsicm_enable_te(ddata, true);
@@ -347,7 +345,7 @@ err2:
enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
ddata->ulps_enabled = false;
}
-err1:
+
dsicm_queue_ulps_work(ddata);
return r;
@@ -366,7 +364,7 @@ static int dsicm_wake_up(struct panel_drv_data *ddata)
static int dsicm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r = 0;
int level;
@@ -414,7 +412,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 errors = 0;
int r;
@@ -446,7 +444,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 id1, id2, id3;
int r;
@@ -478,7 +476,7 @@ static ssize_t dsicm_store_ulps(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
unsigned long t;
int r;
@@ -528,7 +526,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
unsigned long t;
int r;
@@ -603,7 +601,7 @@ static void dsicm_hw_reset(struct panel_drv_data *ddata)
static int dsicm_power_on(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 id1, id2, id3;
int r;
struct omap_dss_dsi_config dsi_config = {
@@ -649,11 +647,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
goto err_vddi;
}
- r = src->ops->enable(src);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
- goto err_vddi;
- }
+ src->ops->enable(src);
dsicm_hw_reset(ddata);
@@ -722,7 +716,7 @@ err_vpnl:
static void dsicm_power_off(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
src->ops->dsi.disable_video_output(src, ddata->channel);
@@ -776,6 +770,7 @@ static int dsicm_connect(struct omap_dss_device *src,
return r;
}
+ ddata->src = src;
return 0;
}
@@ -785,28 +780,17 @@ static void dsicm_disconnect(struct omap_dss_device *src,
struct panel_drv_data *ddata = to_panel_data(dst);
src->ops->dsi.release_vc(src, ddata->channel);
+ ddata->src = NULL;
}
-static int dsicm_enable(struct omap_dss_device *dssdev)
+static void dsicm_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
- dev_dbg(&ddata->pdev->dev, "enable\n");
-
mutex_lock(&ddata->lock);
- if (!omapdss_device_is_connected(dssdev)) {
- r = -ENODEV;
- goto err;
- }
-
- if (omapdss_device_is_enabled(dssdev)) {
- r = 0;
- goto err;
- }
-
src->ops->dsi.bus_lock(src);
r = dsicm_power_on(ddata);
@@ -816,27 +800,22 @@ static int dsicm_enable(struct omap_dss_device *dssdev)
if (r)
goto err;
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
mutex_unlock(&ddata->lock);
dsicm_bl_power(ddata, true);
- return 0;
+ return;
err:
- dev_dbg(&ddata->pdev->dev, "enable failed\n");
+ dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r);
mutex_unlock(&ddata->lock);
- return r;
}
static void dsicm_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
- dev_dbg(&ddata->pdev->dev, "disable\n");
-
dsicm_bl_power(ddata, false);
mutex_lock(&ddata->lock);
@@ -845,23 +824,19 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
src->ops->dsi.bus_lock(src);
- if (omapdss_device_is_enabled(dssdev)) {
- r = dsicm_wake_up(ddata);
- if (!r)
- dsicm_power_off(ddata);
- }
+ r = dsicm_wake_up(ddata);
+ if (!r)
+ dsicm_power_off(ddata);
src->ops->dsi.bus_unlock(src);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
mutex_unlock(&ddata->lock);
}
static void dsicm_framedone_cb(int err, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
src->ops->dsi.bus_unlock(src);
@@ -870,7 +845,7 @@ static void dsicm_framedone_cb(int err, void *data)
static irqreturn_t dsicm_te_isr(int irq, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int old;
int r;
@@ -896,7 +871,7 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work)
{
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
te_timeout_work.work);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
@@ -908,7 +883,7 @@ static int dsicm_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
@@ -954,7 +929,7 @@ err:
static int dsicm_sync(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
dev_dbg(&ddata->pdev->dev, "sync\n");
@@ -970,7 +945,7 @@ static int dsicm_sync(struct omap_dss_device *dssdev)
static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
if (enable)
@@ -990,7 +965,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
mutex_lock(&ddata->lock);
@@ -1041,7 +1016,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
int first = 1;
int plen;
@@ -1123,7 +1098,7 @@ static void dsicm_ulps_work(struct work_struct *work)
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
ulps_work.work);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
mutex_lock(&ddata->lock);
@@ -1140,29 +1115,32 @@ static void dsicm_ulps_work(struct work_struct *work)
mutex_unlock(&ddata->lock);
}
-static void dsicm_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int dsicm_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ connector->display_info.width_mm = ddata->width_mm;
+ connector->display_info.height_mm = ddata->height_mm;
+
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static int dsicm_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
int ret = 0;
- if (vm->hactive != ddata->vm.hactive)
+ if (mode->hdisplay != ddata->vm.hactive)
ret = -EINVAL;
- if (vm->vactive != ddata->vm.vactive)
+ if (mode->vdisplay != ddata->vm.vactive)
ret = -EINVAL;
if (ret) {
dev_warn(dssdev->dev, "wrong resolution: %d x %d",
- vm->hactive, vm->vactive);
+ mode->hdisplay, mode->vdisplay);
dev_warn(dssdev->dev, "panel resolution: %d x %d",
ddata->vm.hactive, ddata->vm.vactive);
}
@@ -1170,15 +1148,6 @@ static int dsicm_check_timings(struct omap_dss_device *dssdev,
return ret;
}
-static void dsicm_get_size(struct omap_dss_device *dssdev,
- unsigned int *width, unsigned int *height)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *width = ddata->width_mm;
- *height = ddata->height_mm;
-}
-
static const struct omap_dss_device_ops dsicm_ops = {
.connect = dsicm_connect,
.disconnect = dsicm_disconnect,
@@ -1186,7 +1155,7 @@ static const struct omap_dss_device_ops dsicm_ops = {
.enable = dsicm_enable,
.disable = dsicm_disable,
- .get_timings = dsicm_get_timings,
+ .get_modes = dsicm_get_modes,
.check_timings = dsicm_check_timings,
};
@@ -1194,8 +1163,6 @@ static const struct omap_dss_driver dsicm_dss_driver = {
.update = dsicm_update,
.sync = dsicm_sync,
- .get_size = dsicm_get_size,
-
.enable_te = dsicm_enable_te,
.get_te = dsicm_get_te,
@@ -1305,8 +1272,10 @@ static int dsicm_probe(struct platform_device *pdev)
dssdev->ops = &dsicm_ops;
dssdev->driver = &dsicm_dss_driver;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
@@ -1385,8 +1354,9 @@ static int __exit dsicm_remove(struct platform_device *pdev)
omapdss_device_unregister(dssdev);
- dsicm_disable(dssdev);
- omapdss_device_disconnect(dssdev->src, dssdev);
+ if (omapdss_device_is_enabled(dssdev))
+ dsicm_disable(dssdev);
+ omapdss_device_disconnect(ddata->src, dssdev);
sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index f6ef8ff964dd..99f2350d462c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -123,52 +123,28 @@ static void lb035q02_disconnect(struct omap_dss_device *src,
{
}
-static int lb035q02_enable(struct omap_dss_device *dssdev)
+static void lb035q02_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void lb035q02_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void lb035q02_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int lb035q02_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops lb035q02_ops = {
@@ -178,7 +154,7 @@ static const struct omap_dss_device_ops lb035q02_ops = {
.enable = lb035q02_enable,
.disable = lb035q02_disable,
- .get_timings = lb035q02_get_timings,
+ .get_modes = lb035q02_get_modes,
};
static int lb035q02_probe_of(struct spi_device *spi)
@@ -221,16 +197,19 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* DE is active LOW
* DATA needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index f445de6369f7..c2409815a204 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -118,50 +118,26 @@ static void nec_8048_disconnect(struct omap_dss_device *src,
{
}
-static int nec_8048_enable(struct omap_dss_device *dssdev)
+static void nec_8048_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
gpiod_set_value_cansleep(ddata->res_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void nec_8048_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
gpiod_set_value_cansleep(ddata->res_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void nec_8048_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int nec_8048_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops nec_8048_ops = {
@@ -171,7 +147,7 @@ static const struct omap_dss_device_ops nec_8048_ops = {
.enable = nec_8048_enable,
.disable = nec_8048_disable,
- .get_timings = nec_8048_get_timings,
+ .get_modes = nec_8048_get_modes,
};
static int nec_8048_probe(struct spi_device *spi)
@@ -216,10 +192,13 @@ static int nec_8048_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 64b1369cb274..9c545de430f6 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -62,29 +62,22 @@ static void sharp_ls_disconnect(struct omap_dss_device *src,
{
}
-static int sharp_ls_enable(struct omap_dss_device *dssdev)
+static void sharp_ls_pre_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
int r;
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
- if (r != 0)
- return r;
+ if (r)
+ dev_err(dssdev->dev, "%s: failed to enable regulator\n",
+ __func__);
}
+}
- r = src->ops->enable(src);
- if (r) {
- regulator_disable(ddata->vcc);
- return r;
- }
+static void sharp_ls_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
/* wait couple of vsyncs until enabling the LCD */
msleep(50);
@@ -94,19 +87,11 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (ddata->ini_gpio)
gpiod_set_value_cansleep(ddata->ini_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void sharp_ls_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
if (ddata->ini_gpio)
gpiod_set_value_cansleep(ddata->ini_gpio, 0);
@@ -115,33 +100,35 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
gpiod_set_value_cansleep(ddata->resb_gpio, 0);
/* wait at least 5 vsyncs after disabling the LCD */
-
msleep(100);
+}
- src->ops->disable(src);
+static void sharp_ls_post_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
if (ddata->vcc)
regulator_disable(ddata->vcc);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int sharp_ls_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops sharp_ls_ops = {
.connect = sharp_ls_connect,
.disconnect = sharp_ls_disconnect,
+ .pre_enable = sharp_ls_pre_enable,
.enable = sharp_ls_enable,
.disable = sharp_ls_disable,
+ .post_disable = sharp_ls_post_disable,
- .get_timings = sharp_ls_get_timings,
+ .get_modes = sharp_ls_get_modes,
};
static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
@@ -220,15 +207,18 @@ static int sharp_ls_probe(struct platform_device *pdev)
dssdev->dev = &pdev->dev;
dssdev->ops = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* DATA needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
@@ -243,7 +233,10 @@ static int __exit sharp_ls_remove(struct platform_device *pdev)
omapdss_device_unregister(dssdev);
- sharp_ls_disable(dssdev);
+ if (omapdss_device_is_enabled(dssdev)) {
+ sharp_ls_disable(dssdev);
+ sharp_ls_post_disable(dssdev);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index e04663856b31..2038def14ba1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -516,17 +516,9 @@ static void acx565akm_disconnect(struct omap_dss_device *src,
static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- r = src->ops->enable(src);
- if (r) {
- pr_err("%s sdi enable failed\n", __func__);
- return r;
- }
-
/*FIXME tweak me */
msleep(50);
@@ -562,7 +554,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
dev_dbg(dssdev->dev, "%s\n", __func__);
@@ -585,56 +576,32 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
/* FIXME need to tweak this delay */
msleep(100);
-
- src->ops->disable(src);
}
-static int acx565akm_enable(struct omap_dss_device *dssdev)
+static void acx565akm_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
mutex_lock(&ddata->mutex);
- r = acx565akm_panel_power_on(dssdev);
+ acx565akm_panel_power_on(dssdev);
mutex_unlock(&ddata->mutex);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void acx565akm_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
mutex_lock(&ddata->mutex);
acx565akm_panel_power_off(dssdev);
mutex_unlock(&ddata->mutex);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void acx565akm_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int acx565akm_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops acx565akm_ops = {
@@ -644,7 +611,7 @@ static const struct omap_dss_device_ops acx565akm_ops = {
.enable = acx565akm_enable,
.disable = acx565akm_disable,
- .get_timings = acx565akm_get_timings,
+ .get_modes = acx565akm_get_modes,
};
static int acx565akm_probe(struct spi_device *spi)
@@ -739,10 +706,13 @@ static int acx565akm_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
@@ -766,7 +736,8 @@ static int acx565akm_remove(struct spi_device *spi)
omapdss_device_unregister(dssdev);
- acx565akm_disable(dssdev);
+ if (omapdss_device_is_enabled(dssdev))
+ acx565akm_disable(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index 7ddc8c574a61..2ad161e33106 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -35,6 +35,8 @@ struct panel_drv_data {
struct videomode vm;
+ struct backlight_device *backlight;
+
struct spi_device *spi_dev;
};
@@ -169,24 +171,12 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *src,
{
}
-static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
+static void td028ttec1_panel_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
+ int r = 0;
- dev_dbg(dssdev->dev, "td028ttec1_panel_enable() - state %d\n",
- dssdev->state);
+ dev_dbg(dssdev->dev, "%s: state %d\n", __func__, dssdev->state);
/* three times command zero */
r |= jbt_ret_write_0(ddata, 0x00);
@@ -197,8 +187,8 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
usleep_range(1000, 2000);
if (r) {
- dev_warn(dssdev->dev, "transfer error\n");
- goto transfer_err;
+ dev_warn(dssdev->dev, "%s: transfer error\n", __func__);
+ return;
}
/* deep standby out */
@@ -268,20 +258,17 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-transfer_err:
+ if (r)
+ dev_err(dssdev->dev, "%s: write error\n", __func__);
- return r ? -EIO : 0;
+ backlight_enable(ddata->backlight);
}
static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- if (!omapdss_device_is_enabled(dssdev))
- return;
+ backlight_disable(ddata->backlight);
dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n");
@@ -289,18 +276,14 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002);
jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int td028ttec1_panel_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops td028ttec1_ops = {
@@ -310,7 +293,7 @@ static const struct omap_dss_device_ops td028ttec1_ops = {
.enable = td028ttec1_panel_enable,
.disable = td028ttec1_panel_disable,
- .get_timings = td028ttec1_panel_get_timings,
+ .get_modes = td028ttec1_panel_get_modes,
};
static int td028ttec1_panel_probe(struct spi_device *spi)
@@ -334,6 +317,10 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
if (ddata == NULL)
return -ENOMEM;
+ ddata->backlight = devm_of_find_backlight(&spi->dev);
+ if (IS_ERR(ddata->backlight))
+ return PTR_ERR(ddata->backlight);
+
dev_set_drvdata(&spi->dev, ddata);
ddata->spi_dev = spi;
@@ -344,15 +331,18 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* SYNC needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 8440fcb744d9..0b692fc7e5ea 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -320,22 +320,11 @@ static void tpo_td043_disconnect(struct omap_dss_device *src,
{
}
-static int tpo_td043_enable(struct omap_dss_device *dssdev)
+static void tpo_td043_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
int r;
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
/*
* If we are resuming from system suspend, SPI clocks might not be
* enabled yet, so we'll program the LCD from SPI PM resume callback.
@@ -343,38 +332,27 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (!ddata->spi_suspended) {
r = tpo_td043_power_on(ddata);
if (r) {
- src->ops->disable(src);
- return r;
+ dev_err(&ddata->spi->dev, "%s: power on failed (%d)\n",
+ __func__, r);
+ return;
}
}
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void tpo_td043_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
if (!ddata->spi_suspended)
tpo_td043_power_off(ddata);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int tpo_td043_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops tpo_td043_ops = {
@@ -384,7 +362,7 @@ static const struct omap_dss_device_ops tpo_td043_ops = {
.enable = tpo_td043_enable,
.disable = tpo_td043_disable,
- .get_timings = tpo_td043_get_timings,
+ .get_modes = tpo_td043_get_modes,
};
static int tpo_td043_probe(struct spi_device *spi)
@@ -442,15 +420,18 @@ static int tpo_td043_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* SYNC needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
@@ -467,7 +448,8 @@ static int tpo_td043_remove(struct spi_device *spi)
omapdss_device_unregister(dssdev);
- tpo_td043_disable(dssdev);
+ if (omapdss_device_is_enabled(dssdev))
+ tpo_td043_disable(dssdev);
sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 472f56e3de70..f8dad99013e8 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include "dss.h"
#include "omapdss.h"
@@ -112,13 +113,12 @@ void omapdss_device_put(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL(omapdss_device_put);
-struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
- unsigned int port)
+struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node)
{
struct omap_dss_device *dssdev;
list_for_each_entry(dssdev, &omapdss_devices_list, list) {
- if (dssdev->dev->of_node == src && dssdev->of_ports & BIT(port))
+ if (dssdev->dev->of_node == node)
return omapdss_device_get(dssdev);
}
@@ -126,13 +126,10 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
}
/*
- * Search for the next device starting at @from. The type argument specfies
- * which device types to consider when searching. Searching for multiple types
- * is supported by and'ing their type flags. Release the reference to the @from
- * device, and acquire a reference to the returned device if found.
+ * Search for the next output device starting at @from. Release the reference to
+ * the @from device, and acquire a reference to the returned device if found.
*/
-struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
- enum omap_dss_device_type type)
+struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
{
struct omap_dss_device *dssdev;
struct list_head *list;
@@ -160,15 +157,8 @@ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
goto done;
}
- /*
- * Accept display entities if the display type is requested,
- * and output entities if the output type is requested.
- */
- if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) &&
- !dssdev->output_type)
- goto done;
- if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id &&
- dssdev->next)
+ if (dssdev->id &&
+ (dssdev->next || dssdev->bridge || dssdev->panel))
goto done;
}
@@ -183,7 +173,12 @@ done:
mutex_unlock(&omapdss_devices_lock);
return dssdev;
}
-EXPORT_SYMBOL(omapdss_device_get_next);
+EXPORT_SYMBOL(omapdss_device_next_output);
+
+static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
+{
+ return dssdev->dss;
+}
int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
@@ -191,7 +186,19 @@ int omapdss_device_connect(struct dss_device *dss,
{
int ret;
- dev_dbg(dst->dev, "connect\n");
+ dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
+ src ? dev_name(src->dev) : "NULL",
+ dst ? dev_name(dst->dev) : "NULL");
+
+ if (!dst) {
+ /*
+ * The destination is NULL when the source is connected to a
+ * bridge or panel instead of a DSS device. Stop here, we will
+ * attach the bridge or panel later when we will have a DRM
+ * encoder.
+ */
+ return src && (src->bridge || src->panel) ? 0 : -EINVAL;
+ }
if (omapdss_device_is_connected(dst))
return -EBUSY;
@@ -204,12 +211,6 @@ int omapdss_device_connect(struct dss_device *dss,
return ret;
}
- if (src) {
- WARN_ON(src->dst);
- dst->src = src;
- src->dst = dst;
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(omapdss_device_connect);
@@ -217,19 +218,20 @@ EXPORT_SYMBOL_GPL(omapdss_device_connect);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dev_dbg(dst->dev, "disconnect\n");
+ struct dss_device *dss = src ? src->dss : dst->dss;
- if (!dst->id && !omapdss_device_is_connected(dst)) {
- WARN_ON(dst->output_type);
+ dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n",
+ src ? dev_name(src->dev) : "NULL",
+ dst ? dev_name(dst->dev) : "NULL");
+
+ if (!dst) {
+ WARN_ON(!src->bridge && !src->panel);
return;
}
- if (src) {
- if (WARN_ON(dst != src->dst))
- return;
-
- dst->src = NULL;
- src->dst = NULL;
+ if (!dst->id && !omapdss_device_is_connected(dst)) {
+ WARN_ON(!dst->display);
+ return;
}
WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
@@ -239,6 +241,58 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
}
EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
+void omapdss_device_pre_enable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ omapdss_device_pre_enable(dssdev->next);
+
+ if (dssdev->ops->pre_enable)
+ dssdev->ops->pre_enable(dssdev);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_pre_enable);
+
+void omapdss_device_enable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ if (dssdev->ops->enable)
+ dssdev->ops->enable(dssdev);
+
+ omapdss_device_enable(dssdev->next);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_enable);
+
+void omapdss_device_disable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ omapdss_device_disable(dssdev->next);
+
+ if (dssdev->ops->disable)
+ dssdev->ops->disable(dssdev);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_disable);
+
+void omapdss_device_post_disable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ if (dssdev->ops->post_disable)
+ dssdev->ops->post_disable(dssdev);
+
+ omapdss_device_post_disable(dssdev->next);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_post_disable);
+
/* -----------------------------------------------------------------------------
* Components Handling
*/
@@ -249,6 +303,7 @@ struct omapdss_comp_node {
struct list_head list;
struct device_node *node;
bool dss_core_component;
+ const char *compat;
};
static bool omapdss_list_contains(const struct device_node *node)
@@ -266,13 +321,20 @@ static bool omapdss_list_contains(const struct device_node *node)
static void omapdss_walk_device(struct device *dev, struct device_node *node,
bool dss_core)
{
+ struct omapdss_comp_node *comp;
struct device_node *n;
- struct omapdss_comp_node *comp = devm_kzalloc(dev, sizeof(*comp),
- GFP_KERNEL);
+ const char *compat;
+ int ret;
+ ret = of_property_read_string(node, "compatible", &compat);
+ if (ret < 0)
+ return;
+
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (comp) {
comp->node = node;
comp->dss_core_component = dss_core;
+ comp->compat = compat;
list_add(&comp->list, &omapdss_comp_list);
}
@@ -312,12 +374,8 @@ void omapdss_gather_components(struct device *dev)
omapdss_walk_device(dev, dev->of_node, true);
- for_each_available_child_of_node(dev->of_node, child) {
- if (!of_find_property(child, "compatible", NULL))
- continue;
-
+ for_each_available_child_of_node(dev->of_node, child)
omapdss_walk_device(dev, child, true);
- }
}
EXPORT_SYMBOL(omapdss_gather_components);
@@ -325,6 +383,8 @@ static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
{
if (comp->dss_core_component)
return true;
+ if (!strstarts(comp->compat, "omapdss,"))
+ return true;
if (omapdss_device_is_registered(comp->node))
return true;
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 34b2a4ef63a4..e93f61a567a8 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -23,6 +23,9 @@
#include <linux/kernel.h>
#include <linux/of.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+
#include "omapdss.h"
static int disp_num_counter;
@@ -39,8 +42,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev)
if (id < 0)
id = disp_num_counter++;
- dssdev->alias_id = id;
-
/* Use 'label' property for name, if it exists */
of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
@@ -58,3 +59,22 @@ struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
return omapdss_device_get(output);
}
EXPORT_SYMBOL_GPL(omapdss_display_get);
+
+int omapdss_display_get_modes(struct drm_connector *connector,
+ const struct videomode *vm)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return 0;
+
+ drm_display_mode_from_videomode(vm, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(omapdss_display_get_modes);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index ca4f3c4c6318..cc78dfa07f04 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -47,8 +47,8 @@ struct dpi_data {
struct mutex lock;
- struct videomode vm;
struct dss_lcd_mgr_config mgr_config;
+ unsigned long pixelclock;
int data_lines;
struct omap_dss_device output;
@@ -347,16 +347,15 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
static int dpi_set_mode(struct dpi_data *dpi)
{
- const struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
int r = 0;
if (dpi->pll)
r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
- vm->pixelclock, &fck, &lck_div, &pck_div);
+ dpi->pixelclock, &fck, &lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
+ r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
@@ -378,7 +377,7 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
-static int dpi_display_enable(struct omap_dss_device *dssdev)
+static void dpi_display_enable(struct omap_dss_device *dssdev)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
struct omap_dss_device *out = &dpi->output;
@@ -386,12 +385,6 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&dpi->lock);
- if (!out->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err_no_out_mgr;
- }
-
if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
@@ -426,7 +419,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mutex_unlock(&dpi->lock);
- return 0;
+ return;
err_mgr_enable:
err_set_mode:
@@ -439,9 +432,7 @@ err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
err_reg_enable:
-err_no_out_mgr:
mutex_unlock(&dpi->lock);
- return r;
}
static void dpi_display_disable(struct omap_dss_device *dssdev)
@@ -467,7 +458,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -475,13 +466,13 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_lock(&dpi->lock);
- dpi->vm = *vm;
+ dpi->pixelclock = mode->clock * 1000;
mutex_unlock(&dpi->lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
int lck_div, pck_div;
@@ -490,20 +481,20 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct dpi_clk_calc_ctx ctx;
bool ok;
- if (vm->hactive % 8 != 0)
+ if (mode->hdisplay % 8 != 0)
return -EINVAL;
- if (vm->pixelclock == 0)
+ if (mode->clock == 0)
return -EINVAL;
if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(dpi, vm->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx);
if (!ok)
return -EINVAL;
@@ -515,7 +506,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
pck = fck / lck_div / pck_div;
- vm->pixelclock = pck;
+ mode->clock = pck / 1000;
return 0;
}
@@ -596,23 +587,15 @@ static int dpi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
- int r;
dpi_init_pll(dpi);
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void dpi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -651,25 +634,15 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->dev = &dpi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
- out->output_type = OMAP_DISPLAY_TYPE_DPI;
+ out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
out->of_ports = BIT(port_num);
out->ops = &dpi_ops;
out->owner = THIS_MODULE;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -681,9 +654,8 @@ static void dpi_uninit_output_port(struct device_node *port)
struct dpi_data *dpi = port->data;
struct omap_dss_device *out = &dpi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static const struct soc_device_attribute dpi_soc_devices[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 64fb788b6647..5202862d89b5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1342,12 +1342,9 @@ static int dsi_pll_enable(struct dss_pll *pll)
*/
dsi_enable_scp_clk(dsi);
- if (!dsi->vdds_dsi_enabled) {
- r = regulator_enable(dsi->vdds_dsi_reg);
- if (r)
- goto err0;
- dsi->vdds_dsi_enabled = true;
- }
+ r = regulator_enable(dsi->vdds_dsi_reg);
+ if (r)
+ goto err0;
/* XXX PLL does not come out of reset without this... */
dispc_pck_free_enable(dsi->dss->dispc, 1);
@@ -1372,36 +1369,25 @@ static int dsi_pll_enable(struct dss_pll *pll)
return 0;
err1:
- if (dsi->vdds_dsi_enabled) {
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
+ regulator_disable(dsi->vdds_dsi_reg);
err0:
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
return r;
}
-static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
+static void dsi_pll_disable(struct dss_pll *pll)
{
+ struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
+
dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
- if (disconnect_lanes) {
- WARN_ON(!dsi->vdds_dsi_enabled);
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
+
+ regulator_disable(dsi->vdds_dsi_reg);
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
- DSSDBG("PLL uninit done\n");
-}
-
-static void dsi_pll_disable(struct dss_pll *pll)
-{
- struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
-
- dsi_pll_uninit(dsi, true);
+ DSSDBG("PLL disable done\n");
}
static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
@@ -3753,19 +3739,13 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct omap_dss_device *out = &dsi->output;
u8 data_type;
u16 word_count;
int r;
- if (!out->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- return -ENODEV;
- }
-
r = dsi_display_init_dispc(dsi);
if (r)
- goto err_init_dispc;
+ return r;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
switch (dsi->pix_fmt) {
@@ -3814,7 +3794,6 @@ err_mgr_enable:
}
err_pix_fmt:
dsi_display_uninit_dispc(dsi);
-err_init_dispc:
return r;
}
@@ -4096,11 +4075,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
r = dss_pll_enable(&dsi->pll);
if (r)
- goto err0;
+ return r;
r = dsi_configure_dsi_clocks(dsi);
if (r)
- goto err1;
+ goto err0;
dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
dsi->module_id == 0 ?
@@ -4108,6 +4087,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
DSSDBG("PLL OK\n");
+ if (!dsi->vdds_dsi_enabled) {
+ r = regulator_enable(dsi->vdds_dsi_reg);
+ if (r)
+ goto err1;
+
+ dsi->vdds_dsi_enabled = true;
+ }
+
r = dsi_cio_init(dsi);
if (r)
goto err2;
@@ -4136,10 +4123,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
err3:
dsi_cio_uninit(dsi);
err2:
- dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
err1:
- dss_pll_disable(&dsi->pll);
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
err0:
+ dss_pll_disable(&dsi->pll);
+
return r;
}
@@ -4158,13 +4148,18 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsi);
- dsi_pll_uninit(dsi, disconnect_lanes);
+ dss_pll_disable(&dsi->pll);
+
+ if (disconnect_lanes) {
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
+ }
}
-static int dsi_display_enable(struct omap_dss_device *dssdev)
+static void dsi_display_enable(struct omap_dss_device *dssdev)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- int r = 0;
+ int r;
DSSDBG("dsi_display_enable\n");
@@ -4184,14 +4179,13 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
mutex_unlock(&dsi->lock);
- return 0;
+ return;
err_init_dsi:
dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
- return r;
}
static void dsi_display_disable(struct omap_dss_device *dssdev,
@@ -4888,21 +4882,12 @@ static int dsi_get_clocks(struct dsi_data *dsi)
static int dsi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void dsi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -5138,29 +5123,19 @@ static int dsi_init_output(struct dsi_data *dsi)
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
- out->output_type = OMAP_DISPLAY_TYPE_DSI;
+ out->type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
out->dispc_channel = dsi_get_channel(dsi);
out->ops = &dsi_ops;
out->owner = THIS_MODULE;
out->of_ports = BIT(0);
- out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_NEGEDGE;
-
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -5171,9 +5146,8 @@ static void dsi_uninit_output(struct dsi_data *dsi)
{
struct omap_dss_device *out = &dsi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static int dsi_probe_of(struct dsi_data *dsi)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index 0422597ac6b0..b2094055c5fc 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -12,71 +12,25 @@
* more details.
*/
-#include <linux/device.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/seq_file.h>
#include "omapdss.h"
-static struct device_node *
-dss_of_port_get_parent_device(struct device_node *port)
-{
- struct device_node *np;
- int i;
-
- if (!port)
- return NULL;
-
- np = of_get_parent(port);
-
- for (i = 0; i < 2 && np; ++i) {
- struct property *prop;
-
- prop = of_find_property(np, "compatible", NULL);
-
- if (prop)
- return np;
-
- np = of_get_next_parent(np);
- }
-
- return NULL;
-}
-
struct omap_dss_device *
omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
{
- struct device_node *src_node;
- struct device_node *src_port;
- struct device_node *ep;
- struct omap_dss_device *src;
- u32 port_number = 0;
+ struct device_node *remote_node;
+ struct omap_dss_device *dssdev;
- /* Get the endpoint... */
- ep = of_graph_get_endpoint_by_regs(node, port, 0);
- if (!ep)
+ remote_node = of_graph_get_remote_node(node, port, 0);
+ if (!remote_node)
return NULL;
- /* ... and its remote port... */
- src_port = of_graph_get_remote_port(ep);
- of_node_put(ep);
- if (!src_port)
- return NULL;
-
- /* ... and the remote port's number and parent... */
- of_property_read_u32(src_port, "reg", &port_number);
- src_node = dss_of_port_get_parent_device(src_port);
- of_node_put(src_port);
- if (!src_node)
- return ERR_PTR(-EINVAL);
-
- /* ... and finally the connected device. */
- src = omapdss_find_device_by_port(src_node, port_number);
- of_node_put(src_node);
+ dssdev = omapdss_find_device_by_node(remote_node);
+ of_node_put(remote_node);
- return src ? src : ERR_PTR(-EPROBE_DEFER);
+ return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 7553c7fc1c45..55e68863ef15 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1560,7 +1560,7 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
- for_each_dss_display(dssdev) {
+ for_each_dss_output(dssdev) {
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
dssdev->ops->disable(dssdev);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index aabdda394c9c..6339e2756b34 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -249,15 +249,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
}
static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
mutex_lock(&hdmi->lock);
- hdmi->cfg.vm = *vm;
+ drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
- dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
mutex_unlock(&hdmi->lock);
}
@@ -312,26 +312,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
hdmi_wp_audio_enable(&hd->wp, false);
}
-static int hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
- int r = 0;
+ int r;
DSSDBG("ENTER hdmi_display_enable\n");
mutex_lock(&hdmi->lock);
- if (!dssdev->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
- goto err0;
+ goto done;
}
if (hdmi->audio_configured) {
@@ -351,12 +345,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
hdmi->display_enabled = true;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+done:
mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
@@ -417,21 +407,12 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
static int hdmi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void hdmi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -698,7 +679,7 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->ops = &hdmi_ops;
@@ -706,19 +687,9 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
out->of_ports = BIT(0);
out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -729,9 +700,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static int hdmi4_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index 340383150fb9..ebf9c96d43ee 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
hdmi4_core_disable(core);
return 0;
}
@@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
if (err)
return err;
+ /*
+ * Initialize CEC clock divider: CEC needs 2MHz clock hence
+ * set the divider to 24 to get 48/24=2MHz clock
+ */
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+
/* Clear TX FIFO */
if (!hdmi_cec_clear_tx_fifo(adap)) {
pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
- return -EIO;
+ err = -EIO;
+ goto err_disable_clk;
}
/* Clear RX FIFO */
if (!hdmi_cec_clear_rx_fifo(adap)) {
pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
- return -EIO;
+ err = -EIO;
+ goto err_disable_clk;
}
/* Clear CEC interrupts */
@@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
}
return 0;
+
+err_disable_clk:
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
+ hdmi4_core_disable(core);
+
+ return err;
}
static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
@@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
return ret;
core->wp = wp;
- /*
- * Initialize CEC clock divider: CEC needs 2MHz clock hence
- * set the devider to 24 to get 48/24=2MHz clock
- */
- REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+ /* Disable clock initially, hdmi_cec_adap_enable() manages it */
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
ret = cec_register_adapter(core->adap, &pdev->dev);
if (ret < 0) {
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 813ba42f2753..e384b95ad857 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
else
acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
/*
- * The I2S input word length is twice the lenght given in the IEC-60958
+ * The I2S input word length is twice the length given in the IEC-60958
* status word. If the word size is greater than
* 20 bits, increment by one.
*/
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 9e8556f67a29..2955bbad13bb 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -248,15 +248,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
}
static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
mutex_lock(&hdmi->lock);
- hdmi->cfg.vm = *vm;
+ drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
- dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
mutex_unlock(&hdmi->lock);
}
@@ -320,26 +320,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
}
-static int hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
- int r = 0;
+ int r;
DSSDBG("ENTER hdmi_display_enable\n");
mutex_lock(&hdmi->lock);
- if (!dssdev->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
- goto err0;
+ goto done;
}
if (hdmi->audio_configured) {
@@ -359,12 +353,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
hdmi->display_enabled = true;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+done:
mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
@@ -422,21 +412,12 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
static int hdmi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void hdmi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -682,7 +663,7 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->ops = &hdmi_ops;
@@ -690,19 +671,9 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
out->of_ports = BIT(0);
out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -713,9 +684,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static int hdmi5_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 3bfb95d230e0..2b41c75ce988 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -184,6 +184,22 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
{},
};
+static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
+ { .compatible = "composite-video-connector" },
+ { .compatible = "hdmi-connector" },
+ { .compatible = "lgphilips,lb035q02" },
+ { .compatible = "nec,nl8048hl11" },
+ { .compatible = "panel-dsi-cm" },
+ { .compatible = "sharp,ls037v7dw01" },
+ { .compatible = "sony,acx565akm" },
+ { .compatible = "svideo-connector" },
+ { .compatible = "ti,opa362" },
+ { .compatible = "ti,tpd12s015" },
+ { .compatible = "toppoly,td028ttec1" },
+ { .compatible = "tpo,td028ttec1" },
+ { .compatible = "tpo,td043mtea1" },
+};
+
static int __init omapdss_boot_init(void)
{
struct device_node *dss, *child;
@@ -210,7 +226,7 @@ static int __init omapdss_boot_init(void)
n = list_first_entry(&dss_conv_list, struct dss_conv_node,
list);
- if (!n->root)
+ if (of_match_node(omapdss_of_fixups_whitelist, n->node))
omapdss_omapify_node(n->node);
list_del(&n->list);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 33e15cb77efa..0c734d1f89e1 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -19,7 +19,6 @@
#define __OMAP_DRM_DSS_H
#include <linux/list.h>
-#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <video/videomode.h>
@@ -68,6 +67,7 @@ struct dss_lcd_mgr_config;
struct snd_aes_iec958;
struct snd_cea_861_aud_if;
struct hdmi_avi_infoframe;
+struct drm_connector;
enum omap_display_type {
OMAP_DISPLAY_TYPE_NONE = 0,
@@ -360,15 +360,15 @@ struct omap_dss_device_ops {
void (*disconnect)(struct omap_dss_device *dssdev,
struct omap_dss_device *dst);
- int (*enable)(struct omap_dss_device *dssdev);
+ void (*pre_enable)(struct omap_dss_device *dssdev);
+ void (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev);
+ void (*post_disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
+ struct drm_display_mode *mode);
void (*set_timings)(struct omap_dss_device *dssdev,
- const struct videomode *vm);
+ const struct drm_display_mode *mode);
bool (*detect)(struct omap_dss_device *dssdev);
@@ -380,6 +380,9 @@ struct omap_dss_device_ops {
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ int (*get_modes)(struct omap_dss_device *dssdev,
+ struct drm_connector *connector);
+
union {
const struct omapdss_hdmi_ops hdmi;
const struct omapdss_dsi_ops dsi;
@@ -390,42 +393,40 @@ struct omap_dss_device_ops {
* enum omap_dss_device_ops_flag - Indicates which device ops are supported
* @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
* @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
- * @OMAP_DSS_DEVICE_OP_EDID: The device supports readind EDID
+ * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID
+ * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
*/
enum omap_dss_device_ops_flag {
OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
OMAP_DSS_DEVICE_OP_HPD = BIT(1),
OMAP_DSS_DEVICE_OP_EDID = BIT(2),
-};
-
-enum omap_dss_device_type {
- OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0),
- OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1),
+ OMAP_DSS_DEVICE_OP_MODES = BIT(3),
};
struct omap_dss_device {
- struct kobject kobj;
struct device *dev;
struct module *owner;
struct dss_device *dss;
- struct omap_dss_device *src;
- struct omap_dss_device *dst;
struct omap_dss_device *next;
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
struct list_head list;
- unsigned int alias_id;
-
+ /*
+ * DSS type that this device generates (for DSS internal devices) or
+ * requires (for external encoders, connectors and panels). Must be a
+ * non-zero (different than OMAP_DISPLAY_TYPE_NONE) value.
+ */
enum omap_display_type type;
+
/*
- * DSS output type that this device generates (for DSS internal devices)
- * or requires (for external encoders). Must be OMAP_DISPLAY_TYPE_NONE
- * for display devices (connectors and panels) and to non-zero value for
- * all other devices.
+ * True if the device is a display (panel or connector) at the end of
+ * the pipeline, false otherwise.
*/
- enum omap_display_type output_type;
+ bool display;
const char *name;
@@ -434,9 +435,6 @@ struct omap_dss_device {
unsigned long ops_flags;
u32 bus_flags;
- /* helper variable for driver suspend/resume */
- bool activate_after_resume;
-
enum omap_display_caps caps;
enum omap_dss_display_state state;
@@ -445,7 +443,6 @@ struct omap_dss_device {
/* DISPC channel for this output */
enum omap_channel dispc_channel;
- bool dispc_channel_connected;
/* output instance */
enum omap_dss_output_id id;
@@ -465,9 +462,6 @@ struct omap_dss_driver {
int (*memory_read)(struct omap_dss_device *dssdev,
void *buf, size_t size,
u16 x, u16 y, u16 w, u16 h);
-
- void (*get_size)(struct omap_dss_device *dssdev,
- unsigned int *width, unsigned int *height);
};
struct dss_device *omapdss_get_dss(void);
@@ -477,32 +471,35 @@ static inline bool omapdss_is_initialized(void)
return !!omapdss_get_dss();
}
-#define for_each_dss_display(d) \
- while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL)
void omapdss_display_init(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
+int omapdss_display_get_modes(struct drm_connector *connector,
+ const struct videomode *vm);
void omapdss_device_register(struct omap_dss_device *dssdev);
void omapdss_device_unregister(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
void omapdss_device_put(struct omap_dss_device *dssdev);
-struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
- unsigned int port);
-struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
- enum omap_dss_device_type type);
+struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node);
int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
+void omapdss_device_pre_enable(struct omap_dss_device *dssdev);
+void omapdss_device_enable(struct omap_dss_device *dssdev);
+void omapdss_device_disable(struct omap_dss_device *dssdev);
+void omapdss_device_post_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
int omap_dss_get_num_overlays(void);
#define for_each_dss_output(d) \
- while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL)
-int omapdss_output_validate(struct omap_dss_device *out);
+ while ((d = omapdss_device_next_output(d)) != NULL)
+struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from);
+int omapdss_device_init_output(struct omap_dss_device *out);
+void omapdss_device_cleanup_output(struct omap_dss_device *out);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -511,11 +508,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omapdss_compat_init(void);
void omapdss_compat_uninit(void);
-static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
-{
- return dssdev->src;
-}
-
static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
{
return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 18505bc70f7e..10a9ee5cdc61 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -20,20 +20,48 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_panel.h>
#include "dss.h"
#include "omapdss.h"
-int omapdss_output_validate(struct omap_dss_device *out)
+int omapdss_device_init_output(struct omap_dss_device *out)
{
- if (out->next && out->output_type != out->next->type) {
+ struct device_node *remote_node;
+
+ remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0);
+ if (!remote_node) {
+ dev_dbg(out->dev, "failed to find video sink\n");
+ return 0;
+ }
+
+ out->next = omapdss_find_device_by_node(remote_node);
+ out->bridge = of_drm_find_bridge(remote_node);
+ out->panel = of_drm_find_panel(remote_node);
+ if (IS_ERR(out->panel))
+ out->panel = NULL;
+
+ of_node_put(remote_node);
+
+ if (out->next && out->type != out->next->type) {
dev_err(out->dev, "output type and display type don't match\n");
+ omapdss_device_put(out->next);
+ out->next = NULL;
return -EINVAL;
}
- return 0;
+ return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(omapdss_device_init_output);
+
+void omapdss_device_cleanup_output(struct omap_dss_device *out)
+{
+ if (out->next)
+ omapdss_device_put(out->next);
}
-EXPORT_SYMBOL(omapdss_output_validate);
+EXPORT_SYMBOL(omapdss_device_cleanup_output);
int dss_install_mgr_ops(struct dss_device *dss,
const struct dss_mgr_ops *mgr_ops,
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index b2fe2387037a..7aae52984fed 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -37,7 +37,7 @@ struct sdi_device {
struct regulator *vdds_sdi_reg;
struct dss_lcd_mgr_config mgr_config;
- struct videomode vm;
+ unsigned long pixelclock;
int datapairs;
struct omap_dss_device output;
@@ -129,27 +129,22 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
-static int sdi_display_enable(struct omap_dss_device *dssdev)
+static void sdi_display_enable(struct omap_dss_device *dssdev)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
int r;
- if (!sdi->output.dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- return -ENODEV;
- }
-
r = regulator_enable(sdi->vdds_sdi_reg);
if (r)
- goto err_reg_enable;
+ return;
r = dispc_runtime_get(sdi->dss->dispc);
if (r)
goto err_get_dispc;
- r = sdi_calc_clock_div(sdi, sdi->vm.pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, sdi->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
@@ -185,7 +180,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_mgr_enable;
- return 0;
+ return;
err_mgr_enable:
dss_sdi_disable(sdi->dss);
@@ -195,8 +190,6 @@ err_calc_clock_div:
dispc_runtime_put(sdi->dss->dispc);
err_get_dispc:
regulator_disable(sdi->vdds_sdi_reg);
-err_reg_enable:
- return r;
}
static void sdi_display_disable(struct omap_dss_device *dssdev)
@@ -213,36 +206,37 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- sdi->vm = *vm;
+ sdi->pixelclock = mode->clock * 1000;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
struct dispc_clock_info dispc_cinfo;
+ unsigned long pixelclock = mode->clock * 1000;
unsigned long fck;
unsigned long pck;
int r;
- if (vm->pixelclock == 0)
+ if (pixelclock == 0)
return -EINVAL;
- r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
if (r)
return r;
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (pck != vm->pixelclock) {
+ if (pck != pixelclock) {
DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
- vm->pixelclock, pck);
+ pixelclock, pck);
- vm->pixelclock = pck;
+ mode->clock = pck / 1000;
}
return 0;
@@ -251,21 +245,12 @@ static int sdi_check_timings(struct omap_dss_device *dssdev,
static int sdi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void sdi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -287,29 +272,19 @@ static int sdi_init_output(struct sdi_device *sdi)
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
- out->output_type = OMAP_DISPLAY_TYPE_SDI;
+ out->type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
out->of_ports = BIT(1);
out->ops = &sdi_ops;
out->owner = THIS_MODULE;
- out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE /* 15.5.9.1.2 */
- | DRM_BUS_FLAG_SYNC_POSEDGE;
-
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 1);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -318,9 +293,8 @@ static int sdi_init_output(struct sdi_device *sdi)
static void sdi_uninit_output(struct sdi_device *sdi)
{
- if (sdi->output.next)
- omapdss_device_put(sdi->output.next);
omapdss_device_unregister(&sdi->output);
+ omapdss_device_cleanup_output(&sdi->output);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index b5f52727f8b1..da43b865d973 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -267,63 +267,40 @@ enum venc_videomode {
VENC_MODE_NTSC,
};
-static const struct videomode omap_dss_pal_vm = {
- .hactive = 720,
- .vactive = 574,
- .pixelclock = 13500000,
- .hsync_len = 64,
- .hfront_porch = 12,
- .hback_porch = 68,
- .vsync_len = 5,
- .vfront_porch = 5,
- .vback_porch = 41,
-
- .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
- DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_SYNC_NEGEDGE,
+static const struct drm_display_mode omap_dss_pal_mode = {
+ .hdisplay = 720,
+ .hsync_start = 732,
+ .hsync_end = 796,
+ .htotal = 864,
+ .vdisplay = 574,
+ .vsync_start = 579,
+ .vsync_end = 584,
+ .vtotal = 625,
+ .clock = 13500,
+
+ .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_NVSYNC,
};
-static const struct videomode omap_dss_ntsc_vm = {
- .hactive = 720,
- .vactive = 482,
- .pixelclock = 13500000,
- .hsync_len = 64,
- .hfront_porch = 16,
- .hback_porch = 58,
- .vsync_len = 6,
- .vfront_porch = 6,
- .vback_porch = 31,
-
- .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
- DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_SYNC_NEGEDGE,
+static const struct drm_display_mode omap_dss_ntsc_mode = {
+ .hdisplay = 720,
+ .hsync_start = 736,
+ .hsync_end = 800,
+ .htotal = 858,
+ .vdisplay = 482,
+ .vsync_start = 488,
+ .vsync_end = 494,
+ .vtotal = 525,
+ .clock = 13500,
+
+ .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_NVSYNC,
};
-static enum venc_videomode venc_get_videomode(const struct videomode *vm)
-{
- if (!(vm->flags & DISPLAY_FLAGS_INTERLACED))
- return VENC_MODE_UNKNOWN;
-
- if (vm->pixelclock == omap_dss_pal_vm.pixelclock &&
- vm->hactive == omap_dss_pal_vm.hactive &&
- vm->vactive == omap_dss_pal_vm.vactive)
- return VENC_MODE_PAL;
-
- if (vm->pixelclock == omap_dss_ntsc_vm.pixelclock &&
- vm->hactive == omap_dss_ntsc_vm.hactive &&
- vm->vactive == omap_dss_ntsc_vm.vactive)
- return VENC_MODE_NTSC;
-
- return VENC_MODE_UNKNOWN;
-}
-
struct venc_device {
struct platform_device *pdev;
void __iomem *base;
struct mutex venc_lock;
- u32 wss_data;
struct regulator *vdda_dac_reg;
struct dss_device *dss;
@@ -331,7 +308,7 @@ struct venc_device {
struct clk *tv_dac_clk;
- struct videomode vm;
+ const struct venc_config *config;
enum omap_dss_venc_type type;
bool invert_polarity;
bool requires_tv_dac_clk;
@@ -367,8 +344,7 @@ static void venc_write_config(struct venc_device *venc,
venc_write_reg(venc, VENC_BLACK_LEVEL, config->black_level);
venc_write_reg(venc, VENC_BLANK_LEVEL, config->blank_level);
venc_write_reg(venc, VENC_M_CONTROL, config->m_control);
- venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
- venc->wss_data);
+ venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data);
venc_write_reg(venc, VENC_S_CARR, config->s_carr);
venc_write_reg(venc, VENC_L21__WC_CTL, config->l21__wc_ctl);
venc_write_reg(venc, VENC_SAVID__EAVID, config->savid__eavid);
@@ -452,18 +428,6 @@ static void venc_runtime_put(struct venc_device *venc)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(const struct videomode *vm)
-{
- switch (venc_get_videomode(vm)) {
- default:
- WARN_ON_ONCE(1);
- case VENC_MODE_PAL:
- return &venc_config_pal_trm;
- case VENC_MODE_NTSC:
- return &venc_config_ntsc_trm;
- }
-}
-
static int venc_power_on(struct venc_device *venc)
{
u32 l;
@@ -474,7 +438,7 @@ static int venc_power_on(struct venc_device *venc)
goto err0;
venc_reset(venc);
- venc_write_config(venc, venc_timings_to_config(&venc->vm));
+ venc_write_config(venc, venc->config);
dss_set_venc_output(venc->dss, venc->type);
dss_set_dac_pwrdn_bgz(venc->dss, 1);
@@ -524,33 +488,17 @@ static void venc_power_off(struct venc_device *venc)
venc_runtime_put(venc);
}
-static int venc_display_enable(struct omap_dss_device *dssdev)
+static void venc_display_enable(struct omap_dss_device *dssdev)
{
struct venc_device *venc = dssdev_to_venc(dssdev);
- int r;
DSSDBG("venc_display_enable\n");
mutex_lock(&venc->venc_lock);
- if (!dssdev->dispc_channel_connected) {
- DSSERR("Failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
- r = venc_power_on(venc);
- if (r)
- goto err0;
-
- venc->wss_data = 0;
+ venc_power_on(venc);
mutex_unlock(&venc->venc_lock);
-
- return 0;
-err0:
- mutex_unlock(&venc->venc_lock);
- return r;
}
static void venc_display_disable(struct omap_dss_device *dssdev)
@@ -566,30 +514,70 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&venc->venc_lock);
}
-static void venc_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int venc_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
- struct venc_device *venc = dssdev_to_venc(dssdev);
+ static const struct drm_display_mode *modes[] = {
+ &omap_dss_pal_mode,
+ &omap_dss_ntsc_mode,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(modes); ++i) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, modes[i]);
+ if (!mode)
+ return i;
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
- mutex_lock(&venc->venc_lock);
- *vm = venc->vm;
- mutex_unlock(&venc->venc_lock);
+ return ARRAY_SIZE(modes);
+}
+
+static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode)
+{
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ return VENC_MODE_UNKNOWN;
+
+ if (mode->clock == omap_dss_pal_mode.clock &&
+ mode->hdisplay == omap_dss_pal_mode.hdisplay &&
+ mode->vdisplay == omap_dss_pal_mode.vdisplay)
+ return VENC_MODE_PAL;
+
+ if (mode->clock == omap_dss_ntsc_mode.clock &&
+ mode->hdisplay == omap_dss_ntsc_mode.hdisplay &&
+ mode->vdisplay == omap_dss_ntsc_mode.vdisplay)
+ return VENC_MODE_NTSC;
+
+ return VENC_MODE_UNKNOWN;
}
static void venc_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct venc_device *venc = dssdev_to_venc(dssdev);
+ enum venc_videomode venc_mode = venc_get_videomode(mode);
DSSDBG("venc_set_timings\n");
mutex_lock(&venc->venc_lock);
- /* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc->vm, vm, sizeof(*vm)))
- venc->wss_data = 0;
+ switch (venc_mode) {
+ default:
+ WARN_ON_ONCE(1);
+ /* Fall-through */
+ case VENC_MODE_PAL:
+ venc->config = &venc_config_pal_trm;
+ break;
- venc->vm = *vm;
+ case VENC_MODE_NTSC:
+ venc->config = &venc_config_ntsc_trm;
+ break;
+ }
dispc_set_tv_pclk(venc->dss->dispc, 13500000);
@@ -597,22 +585,26 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
static int venc_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
DSSDBG("venc_check_timings\n");
- switch (venc_get_videomode(vm)) {
+ switch (venc_get_videomode(mode)) {
case VENC_MODE_PAL:
- *vm = omap_dss_pal_vm;
- return 0;
+ drm_mode_copy(mode, &omap_dss_pal_mode);
+ break;
case VENC_MODE_NTSC:
- *vm = omap_dss_ntsc_vm;
- return 0;
+ drm_mode_copy(mode, &omap_dss_ntsc_mode);
+ break;
default:
return -EINVAL;
}
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_name(mode);
+ return 0;
}
static int venc_dump_regs(struct seq_file *s, void *p)
@@ -695,21 +687,12 @@ static int venc_get_clocks(struct venc_device *venc)
static int venc_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void venc_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -721,8 +704,9 @@ static const struct omap_dss_device_ops venc_ops = {
.disable = venc_display_disable,
.check_timings = venc_check_timings,
- .get_timings = venc_get_timings,
.set_timings = venc_set_timings,
+
+ .get_modes = venc_get_modes,
};
/* -----------------------------------------------------------------------------
@@ -776,26 +760,17 @@ static int venc_init_output(struct venc_device *venc)
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
- out->output_type = OMAP_DISPLAY_TYPE_VENC;
+ out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->ops = &venc_ops;
out->owner = THIS_MODULE;
out->of_ports = BIT(0);
+ out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -804,9 +779,8 @@ static int venc_init_output(struct venc_device *venc)
static void venc_uninit_output(struct venc_device *venc)
{
- if (venc->output.next)
- omapdss_device_put(venc->output.next);
omapdss_device_unregister(&venc->output);
+ omapdss_device_cleanup_output(&venc->output);
}
static int venc_probe_of(struct venc_device *venc)
@@ -878,8 +852,7 @@ static int venc_probe(struct platform_device *pdev)
mutex_init(&venc->venc_lock);
- venc->wss_data = 0;
- venc->vm = omap_dss_pal_vm;
+ venc->config = &venc_config_pal_trm;
venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
venc->base = devm_ioremap_resource(&pdev->dev, venc_mem);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 9da94d10782a..5967283934e1 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include "omap_drv.h"
@@ -30,24 +31,27 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *output;
- struct omap_dss_device *display;
struct omap_dss_device *hpd;
bool hdmi_mode;
};
static void omap_connector_hpd_notify(struct drm_connector *connector,
- struct omap_dss_device *src,
enum drm_connector_status status)
{
- if (status == connector_status_disconnected) {
- /*
- * If the source is an HDMI encoder, notify it of disconnection.
- * This is required to let the HDMI encoder reset any internal
- * state related to connection status, such as the CEC address.
- */
- if (src && src->type == OMAP_DISPLAY_TYPE_HDMI &&
- src->ops->hdmi.lost_hotplug)
- src->ops->hdmi.lost_hotplug(src);
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev;
+
+ if (status != connector_status_disconnected)
+ return;
+
+ /*
+ * Notify all devics in the pipeline of disconnection. This is required
+ * to let the HDMI encoders reset their internal state related to
+ * connection status, such as the CEC address.
+ */
+ for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
+ if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
+ dssdev->ops->hdmi.lost_hotplug(dssdev);
}
}
@@ -67,7 +71,7 @@ static void omap_connector_hpd_cb(void *cb_data,
if (old_status == status)
return;
- omap_connector_hpd_notify(connector, omap_connector->hpd, status);
+ omap_connector_hpd_notify(connector, status);
drm_kms_helper_hotplug_event(dev);
}
@@ -103,20 +107,20 @@ omap_connector_find_device(struct drm_connector *connector,
enum omap_dss_device_ops_flag op)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *d;
- for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
- if (dssdev->ops_flags & op)
- return dssdev;
+ for (d = omap_connector->output; d; d = d->next) {
+ if (d->ops_flags & op)
+ dssdev = d;
}
- return NULL;
+ return dssdev;
}
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
- struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev;
enum drm_connector_status status;
@@ -128,13 +132,12 @@ static enum drm_connector_status omap_connector_detect(
? connector_status_connected
: connector_status_disconnected;
- omap_connector_hpd_notify(connector, dssdev->src, status);
+ omap_connector_hpd_notify(connector, status);
} else {
- switch (omap_connector->display->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- case OMAP_DISPLAY_TYPE_SDI:
- case OMAP_DISPLAY_TYPE_DSI:
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DPI:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DSI:
status = connector_status_connected;
break;
default:
@@ -143,7 +146,7 @@ static enum drm_connector_status omap_connector_detect(
}
}
- VERB("%s: %d (force=%d)", omap_connector->display->name, status, force);
+ VERB("%s: %d (force=%d)", connector->name, status, force);
return status;
}
@@ -152,7 +155,7 @@ static void omap_connector_destroy(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- DBG("%s", omap_connector->display->name);
+ DBG("%s", connector->name);
if (omap_connector->hpd) {
struct omap_dss_device *hpd = omap_connector->hpd;
@@ -166,7 +169,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
omapdss_device_put(omap_connector->output);
- omapdss_device_put(omap_connector->display);
kfree(omap_connector);
}
@@ -212,10 +214,8 @@ static int omap_connector_get_modes(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev;
- struct drm_display_mode *mode;
- struct videomode vm = {0};
- DBG("%s", omap_connector->display->name);
+ DBG("%s", connector->name);
/*
* If display exposes EDID, then we parse that in the normal way to
@@ -227,89 +227,71 @@ static int omap_connector_get_modes(struct drm_connector *connector)
return omap_connector_get_modes_edid(connector, dssdev);
/*
- * Otherwise we have either a fixed resolution panel or an output that
- * doesn't support modes discovery (e.g. DVI or VGA with the DDC bus
- * unconnected, or analog TV). Start by querying the size.
+ * Otherwise if the display pipeline reports modes (e.g. with a fixed
+ * resolution panel or an analog TV output), query it.
*/
- dssdev = omap_connector->display;
- if (dssdev->driver && dssdev->driver->get_size)
- dssdev->driver->get_size(dssdev,
- &connector->display_info.width_mm,
- &connector->display_info.height_mm);
+ dssdev = omap_connector_find_device(connector,
+ OMAP_DSS_DEVICE_OP_MODES);
+ if (dssdev)
+ return dssdev->ops->get_modes(dssdev, connector);
/*
- * Iterate over the pipeline to find the first device that can provide
- * timing information. If we can't find any, we just let the KMS core
- * add the default modes.
+ * Otherwise if the display pipeline uses a drm_panel, we delegate the
+ * operation to the panel API.
*/
- for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
- if (dssdev->ops->get_timings)
- break;
- }
- if (!dssdev)
- return 0;
+ if (omap_connector->output->panel)
+ return drm_panel_get_modes(omap_connector->output->panel);
- /* Add a single mode corresponding to the fixed panel timings. */
- mode = drm_mode_create(connector->dev);
- if (!mode)
- return 0;
+ /*
+ * We can't retrieve modes, which can happen for instance for a DVI or
+ * VGA output with the DDC bus unconnected. The KMS core will add the
+ * default modes.
+ */
+ return 0;
+}
- dssdev->ops->get_timings(dssdev, &vm);
+enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int ret;
- drm_display_mode_from_videomode(&vm, mode);
+ drm_mode_copy(adjusted_mode, mode);
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
+ for (; dssdev; dssdev = dssdev->next) {
+ if (!dssdev->ops->check_timings)
+ continue;
+
+ ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
+ if (ret)
+ return MODE_BAD;
+ }
- return 1;
+ return MODE_OK;
}
-static int omap_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- enum omap_channel channel = omap_connector->output->dispc_channel;
- struct omap_drm_private *priv = connector->dev->dev_private;
- struct omap_dss_device *dssdev;
- struct videomode vm = {0};
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *new_mode;
- int r, ret = MODE_BAD;
-
- drm_display_mode_to_videomode(mode, &vm);
- mode->vrefresh = drm_mode_vrefresh(mode);
+ struct drm_display_mode new_mode = { { 0 } };
+ enum drm_mode_status status;
- r = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
- if (r)
+ status = omap_connector_mode_fixup(omap_connector->output, mode,
+ &new_mode);
+ if (status != MODE_OK)
goto done;
- for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
- continue;
-
- r = dssdev->ops->check_timings(dssdev, &vm);
- if (r)
- goto done;
- }
-
- /* check if vrefresh is still valid */
- new_mode = drm_mode_duplicate(dev, mode);
- if (!new_mode)
- return MODE_BAD;
-
- new_mode->clock = vm.pixelclock / 1000;
- new_mode->vrefresh = 0;
- if (mode->vrefresh == drm_mode_vrefresh(new_mode))
- ret = MODE_OK;
- drm_mode_destroy(dev, new_mode);
+ /* Check if vrefresh is still valid. */
+ if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode))
+ status = MODE_NOCLOCK;
done:
DBG("connector: mode %s: " DRM_MODE_FMT,
- (ret == MODE_OK) ? "valid" : "invalid",
+ (status == MODE_OK) ? "valid" : "invalid",
DRM_MODE_ARG(mode));
- return ret;
+ return status;
}
static const struct drm_connector_funcs omap_connector_funcs = {
@@ -326,9 +308,16 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.mode_valid = omap_connector_mode_valid,
};
-static int omap_connector_get_type(struct omap_dss_device *display)
+static int omap_connector_get_type(struct omap_dss_device *output)
{
- switch (display->type) {
+ struct omap_dss_device *display;
+ enum omap_display_type type;
+
+ display = omapdss_display_get(output);
+ type = display->type;
+ omapdss_device_put(display);
+
+ switch (type) {
case OMAP_DISPLAY_TYPE_HDMI:
return DRM_MODE_CONNECTOR_HDMIA;
case OMAP_DISPLAY_TYPE_DVI:
@@ -351,28 +340,26 @@ static int omap_connector_get_type(struct omap_dss_device *display)
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
- struct omap_dss_device *display,
struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
struct omap_dss_device *dssdev;
- DBG("%s", display->name);
+ DBG("%s", output->name);
omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
if (!omap_connector)
goto fail;
omap_connector->output = omapdss_device_get(output);
- omap_connector->display = omapdss_device_get(display);
connector = &omap_connector->base;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_connector_init(dev, connector, &omap_connector_funcs,
- omap_connector_get_type(display));
+ omap_connector_get_type(output));
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
/*
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 854099801649..608085219336 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -22,6 +22,8 @@
#include <linux/types.h>
+enum drm_mode_status;
+
struct drm_connector;
struct drm_device;
struct drm_encoder;
@@ -29,12 +31,12 @@ struct omap_dss_device;
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
- struct omap_dss_device *display,
struct drm_encoder *encoder);
-struct drm_encoder *omap_connector_attached_encoder(
- struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
void omap_connector_enable_hpd(struct drm_connector *connector);
void omap_connector_disable_hpd(struct drm_connector *connector);
+enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index d99e24dcc0bf..5a29bf01c0e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
if (WARN_ON(omap_crtc->enabled == enable))
return;
- if (omap_crtc->pipe->output->output_type == OMAP_DISPLAY_TYPE_HDMI) {
+ if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) {
priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
@@ -390,6 +390,15 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct videomode vm = {0};
+ int r;
+
+ drm_display_mode_to_videomode(mode, &vm);
+ r = priv->dispc_ops->mgr_check_timings(priv->dispc, omap_crtc->channel,
+ &vm);
+ if (r)
+ return r;
/* Check for bandwidth limit */
if (priv->max_bandwidth) {
@@ -657,7 +666,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
&omap_crtc_funcs, NULL);
if (ret < 0) {
dev_err(dev->dev, "%s(): could not init crtc for: %s\n",
- __func__, pipe->display->name);
+ __func__, pipe->output->name);
kfree(omap_crtc);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index f8292278f57d..1b9b6f5e48e1 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -23,6 +23,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_panel.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -137,12 +138,13 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ if (pipe->output->panel)
+ drm_panel_detach(pipe->output->panel);
+
omapdss_device_disconnect(NULL, pipe->output);
omapdss_device_put(pipe->output);
- omapdss_device_put(pipe->display);
pipe->output = NULL;
- pipe->display = NULL;
}
memset(&priv->channels, 0, sizeof(priv->channels));
@@ -150,33 +152,17 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
priv->num_pipes = 0;
}
-static int omap_compare_pipes(const void *a, const void *b)
-{
- const struct omap_drm_pipeline *pipe1 = a;
- const struct omap_drm_pipeline *pipe2 = b;
-
- if (pipe1->display->alias_id > pipe2->display->alias_id)
- return 1;
- else if (pipe1->display->alias_id < pipe2->display->alias_id)
- return -1;
- return 0;
-}
-
static int omap_connect_pipelines(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
struct omap_dss_device *output = NULL;
- unsigned int i;
int r;
- if (!omapdss_stack_is_ready())
- return -EPROBE_DEFER;
-
for_each_dss_output(output) {
r = omapdss_device_connect(priv->dss, NULL, output);
if (r == -EPROBE_DEFER) {
omapdss_device_put(output);
- goto cleanup;
+ return r;
} else if (r) {
dev_warn(output->dev, "could not connect output %s\n",
output->name);
@@ -185,7 +171,6 @@ static int omap_connect_pipelines(struct drm_device *ddev)
pipe = &priv->pipes[priv->num_pipes++];
pipe->output = omapdss_device_get(output);
- pipe->display = omapdss_display_get(output);
if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) {
/* To balance the 'for_each_dss_output' loop */
@@ -195,36 +180,19 @@ static int omap_connect_pipelines(struct drm_device *ddev)
}
}
- /* Sort the list by DT aliases */
- sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
- omap_compare_pipes, NULL);
-
- /*
- * Populate the pipeline lookup table by DISPC channel. Only one display
- * is allowed per channel.
- */
- for (i = 0; i < priv->num_pipes; ++i) {
- struct omap_drm_pipeline *pipe = &priv->pipes[i];
- enum omap_channel channel = pipe->output->dispc_channel;
-
- if (WARN_ON(priv->channels[channel] != NULL)) {
- r = -EINVAL;
- goto cleanup;
- }
-
- priv->channels[channel] = pipe;
- }
-
return 0;
+}
-cleanup:
- /*
- * if we are deferring probe, we disconnect the devices we previously
- * connected
- */
- omap_disconnect_pipelines(ddev);
+static int omap_compare_pipelines(const void *a, const void *b)
+{
+ const struct omap_drm_pipeline *pipe1 = a;
+ const struct omap_drm_pipeline *pipe2 = b;
- return r;
+ if (pipe1->alias_id > pipe2->alias_id)
+ return 1;
+ else if (pipe1->alias_id < pipe2->alias_id)
+ return -1;
+ return 0;
}
static int omap_modeset_init_properties(struct drm_device *dev)
@@ -240,6 +208,30 @@ static int omap_modeset_init_properties(struct drm_device *dev)
return 0;
}
+static int omap_display_id(struct omap_dss_device *output)
+{
+ struct device_node *node = NULL;
+
+ if (output->next) {
+ struct omap_dss_device *display;
+
+ display = omapdss_display_get(output);
+ node = display->dev->of_node;
+ omapdss_device_put(display);
+ } else if (output->bridge) {
+ struct drm_bridge *bridge = output->bridge;
+
+ while (bridge->next)
+ bridge = bridge->next;
+
+ node = bridge->of_node;
+ } else if (output->panel) {
+ node = output->panel->dev->of_node;
+ }
+
+ return node ? of_alias_get_id(node, "display") : -ENODEV;
+}
+
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
@@ -249,6 +241,9 @@ static int omap_modeset_init(struct drm_device *dev)
int ret;
u32 plane_crtc_mask;
+ if (!omapdss_stack_is_ready())
+ return -EPROBE_DEFER;
+
drm_mode_config_init(dev);
ret = omap_modeset_init_properties(dev);
@@ -263,6 +258,10 @@ static int omap_modeset_init(struct drm_device *dev)
* configuration does not match the expectations or exceeds
* the available resources, the configuration is rejected.
*/
+ ret = omap_connect_pipelines(dev);
+ if (ret < 0)
+ return ret;
+
if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) {
dev_err(dev->dev, "%s(): Too many connected displays\n",
__func__);
@@ -288,33 +287,75 @@ static int omap_modeset_init(struct drm_device *dev)
priv->planes[priv->num_planes++] = plane;
}
- /* Create the CRTCs, encoders and connectors. */
+ /*
+ * Create the encoders, attach the bridges and get the pipeline alias
+ * IDs.
+ */
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
- struct omap_dss_device *display = pipe->display;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct drm_crtc *crtc;
+ int id;
- encoder = omap_encoder_init(dev, pipe->output, display);
- if (!encoder)
+ pipe->encoder = omap_encoder_init(dev, pipe->output);
+ if (!pipe->encoder)
return -ENOMEM;
- connector = omap_connector_init(dev, pipe->output, display,
- encoder);
- if (!connector)
- return -ENOMEM;
+ if (pipe->output->bridge) {
+ ret = drm_bridge_attach(pipe->encoder,
+ pipe->output->bridge, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ id = omap_display_id(pipe->output);
+ pipe->alias_id = id >= 0 ? id : i;
+ }
+
+ /* Sort the pipelines by DT aliases. */
+ sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
+ omap_compare_pipelines, NULL);
+
+ /*
+ * Populate the pipeline lookup table by DISPC channel. Only one display
+ * is allowed per channel.
+ */
+ for (i = 0; i < priv->num_pipes; ++i) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ enum omap_channel channel = pipe->output->dispc_channel;
+
+ if (WARN_ON(priv->channels[channel] != NULL))
+ return -EINVAL;
+
+ priv->channels[channel] = pipe;
+ }
+
+ /* Create the connectors and CRTCs. */
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ struct drm_encoder *encoder = pipe->encoder;
+ struct drm_crtc *crtc;
+
+ if (!pipe->output->bridge) {
+ pipe->connector = omap_connector_init(dev, pipe->output,
+ encoder);
+ if (!pipe->connector)
+ return -ENOMEM;
+
+ drm_connector_attach_encoder(pipe->connector, encoder);
+
+ if (pipe->output->panel) {
+ ret = drm_panel_attach(pipe->output->panel,
+ pipe->connector);
+ if (ret < 0)
+ return ret;
+ }
+ }
crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
- drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = 1 << i;
-
pipe->crtc = crtc;
- pipe->encoder = encoder;
- pipe->connector = connector;
}
DBG("registered %u planes, %u crtcs/encoders/connectors\n",
@@ -351,10 +392,12 @@ static int omap_modeset_init(struct drm_device *dev)
static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
- int i;
+ unsigned int i;
- for (i = 0; i < priv->num_pipes; i++)
- omap_connector_enable_hpd(priv->pipes[i].connector);
+ for (i = 0; i < priv->num_pipes; i++) {
+ if (priv->pipes[i].connector)
+ omap_connector_enable_hpd(priv->pipes[i].connector);
+ }
}
/*
@@ -363,10 +406,12 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
- int i;
+ unsigned int i;
- for (i = 0; i < priv->num_pipes; i++)
- omap_connector_disable_hpd(priv->pipes[i].connector);
+ for (i = 0; i < priv->num_pipes; i++) {
+ if (priv->pipes[i].connector)
+ omap_connector_disable_hpd(priv->pipes[i].connector);
+ }
}
/*
@@ -551,10 +596,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_crtc_pre_init(priv);
- ret = omap_connect_pipelines(ddev);
- if (ret)
- goto err_crtc_uninit;
-
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
@@ -612,7 +653,6 @@ err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
-err_crtc_uninit:
omap_crtc_pre_uninit(priv);
drm_dev_put(ddev);
return ret;
@@ -685,54 +725,12 @@ static int pdev_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int omap_drm_suspend_all_displays(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct omap_dss_device *display = priv->pipes[i].display;
-
- if (display->state == OMAP_DSS_DISPLAY_ACTIVE) {
- display->ops->disable(display);
- display->activate_after_resume = true;
- } else {
- display->activate_after_resume = false;
- }
- }
-
- return 0;
-}
-
-static int omap_drm_resume_all_displays(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct omap_dss_device *display = priv->pipes[i].display;
-
- if (display->activate_after_resume) {
- display->ops->enable(display);
- display->activate_after_resume = false;
- }
- }
-
- return 0;
-}
-
static int omap_drm_suspend(struct device *dev)
{
struct omap_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = priv->ddev;
- drm_kms_helper_poll_disable(drm_dev);
-
- drm_modeset_lock_all(drm_dev);
- omap_drm_suspend_all_displays(drm_dev);
- drm_modeset_unlock_all(drm_dev);
-
- return 0;
+ return drm_mode_config_helper_suspend(drm_dev);
}
static int omap_drm_resume(struct device *dev)
@@ -740,11 +738,7 @@ static int omap_drm_resume(struct device *dev)
struct omap_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = priv->ddev;
- drm_modeset_lock_all(drm_dev);
- omap_drm_resume_all_displays(drm_dev);
- drm_modeset_unlock_all(drm_dev);
-
- drm_kms_helper_poll_enable(drm_dev);
+ drm_mode_config_helper_resume(drm_dev);
return omap_gem_resume(drm_dev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 0c57d2814c51..3cca45cb25f3 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -49,7 +49,7 @@ struct omap_drm_pipeline {
struct drm_encoder *encoder;
struct drm_connector *connector;
struct omap_dss_device *output;
- struct omap_dss_device *display;
+ unsigned int alias_id;
};
struct omap_drm_private {
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 0d85b3a35767..40512419642b 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
+#include <drm/drm_panel.h>
#include "omap_drv.h"
@@ -37,7 +38,6 @@
struct omap_encoder {
struct drm_encoder base;
struct omap_dss_device *output;
- struct omap_dss_device *display;
};
static void omap_encoder_destroy(struct drm_encoder *encoder)
@@ -52,22 +52,43 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
-static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
+static void omap_encoder_update_videomode_flags(struct videomode *vm,
+ u32 bus_flags)
+{
+ if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW |
+ DISPLAY_FLAGS_DE_HIGH))) {
+ if (bus_flags & DRM_BUS_FLAG_DE_LOW)
+ vm->flags |= DISPLAY_FLAGS_DE_LOW;
+ else if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ vm->flags |= DISPLAY_FLAGS_DE_HIGH;
+ }
+
+ if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ }
+
+ if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE)
+ vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE)
+ vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+ }
+}
+
+static void omap_encoder_hdmi_mode_set(struct drm_connector *connector,
+ struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct omap_dss_device *dssdev = omap_encoder->output;
- struct drm_connector *connector;
bool hdmi_mode;
- hdmi_mode = false;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
- break;
- }
- }
+ hdmi_mode = omap_connector_get_hdmi_mode(connector);
if (dssdev->ops->hdmi.set_hdmi_mode)
dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
@@ -88,8 +109,18 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *output = omap_encoder->output;
struct omap_dss_device *dssdev;
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
struct videomode vm = { 0 };
+ u32 bus_flags;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ break;
+ }
drm_display_mode_to_videomode(adjusted_mode, &vm);
@@ -102,66 +133,102 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
- for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
- unsigned long bus_flags = dssdev->bus_flags;
-
- if (!(vm.flags & (DISPLAY_FLAGS_DE_LOW |
- DISPLAY_FLAGS_DE_HIGH))) {
- if (bus_flags & DRM_BUS_FLAG_DE_LOW)
- vm.flags |= DISPLAY_FLAGS_DE_LOW;
- else if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
- vm.flags |= DISPLAY_FLAGS_DE_HIGH;
- }
-
- if (!(vm.flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_PIXDATA_NEGEDGE))) {
- if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
- vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
- vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
- }
-
- if (!(vm.flags & (DISPLAY_FLAGS_SYNC_POSEDGE |
- DISPLAY_FLAGS_SYNC_NEGEDGE))) {
- if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE)
- vm.flags |= DISPLAY_FLAGS_SYNC_POSEDGE;
- else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE)
- vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
- }
+ for (dssdev = output; dssdev; dssdev = dssdev->next)
+ omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
+
+ for (bridge = output->bridge; bridge; bridge = bridge->next) {
+ if (!bridge->timings)
+ continue;
+
+ bus_flags = bridge->timings->input_bus_flags;
+ omap_encoder_update_videomode_flags(&vm, bus_flags);
}
+ bus_flags = connector->display_info.bus_flags;
+ omap_encoder_update_videomode_flags(&vm, bus_flags);
+
/* Set timings for all devices in the display pipeline. */
- dss_mgr_set_timings(omap_encoder->output, &vm);
+ dss_mgr_set_timings(output, &vm);
- for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
+ for (dssdev = output; dssdev; dssdev = dssdev->next) {
if (dssdev->ops->set_timings)
- dssdev->ops->set_timings(dssdev, &vm);
+ dssdev->ops->set_timings(dssdev, adjusted_mode);
}
/* Set the HDMI mode and HDMI infoframe if applicable. */
- if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
- omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
+ if (output->type == OMAP_DISPLAY_TYPE_HDMI)
+ omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->display;
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_device *dev = encoder->dev;
+
+ dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
+
+ /* Disable the panel if present. */
+ if (dssdev->panel) {
+ drm_panel_disable(dssdev->panel);
+ drm_panel_unprepare(dssdev->panel);
+ }
+
+ /*
+ * Disable the chain of external devices, starting at the one at the
+ * internal encoder's output.
+ */
+ omapdss_device_disable(dssdev->next);
+
+ /*
+ * Disable the internal encoder. This will disable the DSS output. The
+ * DSI is treated as an exception as DSI pipelines still use the legacy
+ * flow where the pipeline output controls the encoder.
+ */
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
+ dssdev->ops->disable(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+ }
- dssdev->ops->disable(dssdev);
+ /*
+ * Perform the post-disable operations on the chain of external devices
+ * to complete the display pipeline disable.
+ */
+ omapdss_device_post_disable(dssdev->next);
}
static void omap_encoder_enable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->display;
- int r;
-
- r = dssdev->ops->enable(dssdev);
- if (r)
- dev_err(encoder->dev->dev,
- "Failed to enable display '%s': %d\n",
- dssdev->name, r);
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_device *dev = encoder->dev;
+
+ dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
+
+ /* Prepare the chain of external devices for pipeline enable. */
+ omapdss_device_pre_enable(dssdev->next);
+
+ /*
+ * Enable the internal encoder. This will enable the DSS output. The
+ * DSI is treated as an exception as DSI pipelines still use the legacy
+ * flow where the pipeline output controls the encoder.
+ */
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
+ dssdev->ops->enable(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ }
+
+ /*
+ * Enable the chain of external devices, starting at the one at the
+ * internal encoder's output.
+ */
+ omapdss_device_enable(dssdev->next);
+
+ /* Enable the panel if present. */
+ if (dssdev->panel) {
+ drm_panel_prepare(dssdev->panel);
+ drm_panel_enable(dssdev->panel);
+ }
}
static int omap_encoder_atomic_check(struct drm_encoder *encoder,
@@ -169,35 +236,17 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- enum omap_channel channel = omap_encoder->output->dispc_channel;
- struct drm_device *dev = encoder->dev;
- struct omap_drm_private *priv = dev->dev_private;
- struct omap_dss_device *dssdev;
- struct videomode vm = { 0 };
- int ret;
-
- drm_display_mode_to_videomode(&crtc_state->mode, &vm);
-
- ret = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
- if (ret)
- goto done;
-
- for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
- continue;
-
- ret = dssdev->ops->check_timings(dssdev, &vm);
- if (ret)
- goto done;
+ enum drm_mode_status status;
+
+ status = omap_connector_mode_fixup(omap_encoder->output,
+ &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (status != MODE_OK) {
+ dev_err(encoder->dev->dev, "invalid timings: %d\n", status);
+ return -EINVAL;
}
- drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode);
-
-done:
- if (ret)
- dev_err(dev->dev, "invalid timings: %d\n", ret);
-
- return ret;
+ return 0;
}
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
@@ -209,8 +258,7 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct omap_dss_device *display)
+ struct omap_dss_device *output)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
@@ -220,7 +268,6 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
goto fail;
omap_encoder->output = output;
- omap_encoder->display = display;
encoder = &omap_encoder->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h
index a7b5dde63ecb..4aefb3142886 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.h
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.h
@@ -25,7 +25,6 @@ struct drm_encoder;
struct omap_dss_device;
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct omap_dss_device *display);
+ struct omap_dss_device *output);
#endif /* __OMAPDRM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 851c59f07eb1..50aabd854f4d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -183,13 +183,9 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- fbi->par = helper;
fbi->fbops = &omap_fb_ops;
- strcpy(fbi->fix.id, MODULE_NAME);
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = dma_addr;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 3e070153ef21..e36dbb4df867 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -38,6 +38,15 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_FEIYANG_FY07024DI26A30D
+ tristate "Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Feiyang FY07024DI26A30-D MIPI-DSI interface.
+
config DRM_PANEL_ILITEK_IL9322
tristate "Ilitek ILI9322 320x240 QVGA panels"
depends on OF && SPI
@@ -149,6 +158,28 @@ config DRM_PANEL_RAYDIUM_RM68200
Say Y here if you want to enable support for Raydium RM68200
720x1280 DSI video mode panel.
+config DRM_PANEL_ROCKTECH_JH057N00900
+ tristate "Rocktech JH057N00900 MIPI touchscreen panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Rocktech JH057N00900
+ MIPI DSI panel as e.g. used in the Librem 5 devkit. It has a
+ resolution of 720x1440 pixels, a built in backlight and touch
+ controller.
+ Touch input support is provided by the goodix driver and needs to be
+ selected separately.
+
+config DRM_PANEL_RONBO_RB070D30
+ tristate "Ronbo Electronics RB070D30 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Ronbo Electronics
+ RB070D30 1024x600 DSI panel.
+
config DRM_PANEL_SAMSUNG_S6D16D0
tristate "Samsung S6D16D0 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index e7ab71968bbf..78e3dc376bdd 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
@@ -13,6 +14,8 @@ obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
+obj-$(CONFIG_DRM_PANEL_ROCKTECH_JH057N00900) += panel-rocktech-jh057n00900.o
+obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index b428c4678106..a79908dfa3c8 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -191,7 +191,7 @@ static const struct versatile_panel_type versatile_panels[] = {
.vrefresh = 390,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
},
/*
* Sanyo ALR252RGT 240x320 portrait display found on the
@@ -215,7 +215,7 @@ static const struct versatile_panel_type versatile_panels[] = {
.vrefresh = 116,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.ib2 = true,
},
};
@@ -264,8 +264,6 @@ static int versatile_panel_get_modes(struct drm_panel *panel)
struct versatile_panel *vpanel = to_versatile_panel(panel);
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, vpanel->panel_type->name,
- DRM_DISPLAY_INFO_LEN);
connector->display_info.width_mm = vpanel->panel_type->width_mm;
connector->display_info.height_mm = vpanel->panel_type->height_mm;
connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
new file mode 100644
index 000000000000..dabf59e0f56f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Amarula Solutions
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#define FEIYANG_INIT_CMD_LEN 2
+
+struct feiyang {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct backlight_device *backlight;
+ struct regulator *dvdd;
+ struct regulator *avdd;
+ struct gpio_desc *reset;
+};
+
+static inline struct feiyang *panel_to_feiyang(struct drm_panel *panel)
+{
+ return container_of(panel, struct feiyang, panel);
+}
+
+struct feiyang_init_cmd {
+ u8 data[FEIYANG_INIT_CMD_LEN];
+};
+
+static const struct feiyang_init_cmd feiyang_init_cmds[] = {
+ { .data = { 0x80, 0x58 } },
+ { .data = { 0x81, 0x47 } },
+ { .data = { 0x82, 0xD4 } },
+ { .data = { 0x83, 0x88 } },
+ { .data = { 0x84, 0xA9 } },
+ { .data = { 0x85, 0xC3 } },
+ { .data = { 0x86, 0x82 } },
+};
+
+static int feiyang_prepare(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ unsigned int i;
+ int ret;
+
+ ret = regulator_enable(ctx->dvdd);
+ if (ret)
+ return ret;
+
+ /* T1 (dvdd start + dvdd rise) 0 < T1 <= 10ms */
+ msleep(10);
+
+ ret = regulator_enable(ctx->avdd);
+ if (ret)
+ return ret;
+
+ /* T3 (dvdd rise + avdd start + avdd rise) T3 >= 20ms */
+ msleep(20);
+
+ gpiod_set_value(ctx->reset, 0);
+
+ /*
+ * T5 + T6 (avdd rise + video & logic signal rise)
+ * T5 >= 10ms, 0 < T6 <= 10ms
+ */
+ msleep(20);
+
+ gpiod_set_value(ctx->reset, 1);
+
+ /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */
+ msleep(200);
+
+ for (i = 0; i < ARRAY_SIZE(feiyang_init_cmds); i++) {
+ const struct feiyang_init_cmd *cmd =
+ &feiyang_init_cmds[i];
+
+ ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data,
+ FEIYANG_INIT_CMD_LEN);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int feiyang_enable(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+
+ /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */
+ msleep(200);
+
+ mipi_dsi_dcs_set_display_on(ctx->dsi);
+ backlight_enable(ctx->backlight);
+
+ return 0;
+}
+
+static int feiyang_disable(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int feiyang_unprepare(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ ret);
+
+ /* T13 (backlight fall + video & logic signal fall) T13 >= 200ms */
+ msleep(200);
+
+ gpiod_set_value(ctx->reset, 0);
+
+ regulator_disable(ctx->avdd);
+
+ /* T11 (dvdd rise to fall) 0 < T11 <= 10ms */
+ msleep(10);
+
+ regulator_disable(ctx->dvdd);
+
+ return 0;
+}
+
+static const struct drm_display_mode feiyang_default_mode = {
+ .clock = 55000,
+
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 310,
+ .hsync_end = 1024 + 310 + 20,
+ .htotal = 1024 + 310 + 20 + 90,
+
+ .vdisplay = 600,
+ .vsync_start = 600 + 12,
+ .vsync_end = 600 + 12 + 2,
+ .vtotal = 600 + 12 + 2 + 21,
+ .vrefresh = 60,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int feiyang_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct feiyang *ctx = panel_to_feiyang(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &feiyang_default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ feiyang_default_mode.hdisplay,
+ feiyang_default_mode.vdisplay,
+ feiyang_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs feiyang_funcs = {
+ .disable = feiyang_disable,
+ .unprepare = feiyang_unprepare,
+ .prepare = feiyang_prepare,
+ .enable = feiyang_enable,
+ .get_modes = feiyang_get_modes,
+};
+
+static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct feiyang *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &dsi->dev;
+ ctx->panel.funcs = &feiyang_funcs;
+
+ ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
+ if (IS_ERR(ctx->dvdd)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get dvdd regulator\n");
+ return PTR_ERR(ctx->dvdd);
+ }
+
+ ctx->avdd = devm_regulator_get(&dsi->dev, "avdd");
+ if (IS_ERR(ctx->avdd)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get avdd regulator\n");
+ return PTR_ERR(ctx->avdd);
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ ctx->backlight = devm_of_find_backlight(&dsi->dev);
+ if (IS_ERR(ctx->backlight))
+ return PTR_ERR(ctx->backlight);
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int feiyang_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct feiyang *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id feiyang_of_match[] = {
+ { .compatible = "feiyang,fy07024di26a30d", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, feiyang_of_match);
+
+static struct mipi_dsi_driver feiyang_driver = {
+ .probe = feiyang_dsi_probe,
+ .remove = feiyang_dsi_remove,
+ .driver = {
+ .name = "feiyang-fy07024di26a30d",
+ .of_match_table = feiyang_of_match,
+ },
+};
+module_mipi_dsi_driver(feiyang_driver);
+
+MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_DESCRIPTION("Feiyang FY07024DI26A30-D MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index bd38bf4f1ba6..a1c4cd2940fb 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -412,11 +412,11 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
if (ili->conf->dclk_active_high) {
reg = ILI9322_POL_DCLK;
connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
} else {
reg = 0;
connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
}
if (ili->conf->de_active_high) {
reg |= ILI9322_POL_DE;
@@ -662,8 +662,6 @@ static int ili9322_get_modes(struct drm_panel *panel)
struct ili9322 *ili = panel_to_ili9322(panel);
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, "ILI9322 TFT LCD driver\0",
- DRM_DISPLAY_INFO_LEN);
connector->display_info.width_mm = ili->conf->width_mm;
connector->display_info.height_mm = ili->conf->height_mm;
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 5e8d4523e9ed..a1d8d92fac2b 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -190,7 +190,6 @@ static int lcd_olinuxino_get_modes(struct drm_panel *panel)
num++;
}
- memcpy(connector->display_info.name, lcd_info->name, 32);
connector->display_info.width_mm = lcd_info->width_mm;
connector->display_info.height_mm = lcd_info->height_mm;
connector->display_info.bpc = lcd_info->bpc;
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 87fa316e1d7b..f27a7e426574 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -67,15 +67,15 @@ struct otm8009a {
};
static const struct drm_display_mode default_mode = {
- .clock = 32729,
+ .clock = 29700,
.hdisplay = 480,
- .hsync_start = 480 + 120,
- .hsync_end = 480 + 120 + 63,
- .htotal = 480 + 120 + 63 + 120,
+ .hsync_start = 480 + 98,
+ .hsync_end = 480 + 98 + 32,
+ .htotal = 480 + 98 + 32 + 98,
.vdisplay = 800,
- .vsync_start = 800 + 12,
- .vsync_end = 800 + 12 + 12,
- .vtotal = 800 + 12 + 12 + 12,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 10,
+ .vtotal = 800 + 15 + 10 + 14,
.vrefresh = 50,
.flags = 0,
.width_mm = 52,
@@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
/* Send Command GRAM memory write (no parameters) */
dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
+ /* Wait a short while to let the panel be ready before the 1st frame */
+ mdelay(10);
+
return 0;
}
@@ -433,7 +436,8 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->supply = devm_regulator_get(dev, "power");
if (IS_ERR(ctx->supply)) {
ret = PTR_ERR(ctx->supply);
- dev_err(dev, "failed to request regulator: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to request regulator: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 77593533abcd..14186827e591 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -383,7 +383,8 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
ctx->supply = devm_regulator_get(dev, "power");
if (IS_ERR(ctx->supply)) {
ret = PTR_ERR(ctx->supply);
- dev_err(dev, "cannot get regulator: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "cannot get regulator: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
new file mode 100644
index 000000000000..d88ea8da2ec2
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ *
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <linux/backlight.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#define DRV_NAME "panel-rocktech-jh057n00900"
+
+/* Manufacturer specific Commands send via DSI */
+#define ST7703_CMD_ALL_PIXEL_OFF 0x22
+#define ST7703_CMD_ALL_PIXEL_ON 0x23
+#define ST7703_CMD_SETDISP 0xB2
+#define ST7703_CMD_SETRGBIF 0xB3
+#define ST7703_CMD_SETCYC 0xB4
+#define ST7703_CMD_SETBGP 0xB5
+#define ST7703_CMD_SETVCOM 0xB6
+#define ST7703_CMD_SETOTP 0xB7
+#define ST7703_CMD_SETPOWER_EXT 0xB8
+#define ST7703_CMD_SETEXTC 0xB9
+#define ST7703_CMD_SETMIPI 0xBA
+#define ST7703_CMD_SETVDC 0xBC
+#define ST7703_CMD_SETSCR 0xC0
+#define ST7703_CMD_SETPOWER 0xC1
+#define ST7703_CMD_SETPANEL 0xCC
+#define ST7703_CMD_SETGAMMA 0xE0
+#define ST7703_CMD_SETEQ 0xE3
+#define ST7703_CMD_SETGIP1 0xE9
+#define ST7703_CMD_SETGIP2 0xEA
+
+struct jh057n {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct backlight_device *backlight;
+ bool prepared;
+
+ struct dentry *debugfs;
+};
+
+static inline struct jh057n *panel_to_jh057n(struct drm_panel *panel)
+{
+ return container_of(panel, struct jh057n, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int jh057n_init_sequence(struct jh057n *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+ int ret;
+
+ /*
+ * Init sequence was supplied by the panel vendor. Most of the commands
+ * resemble the ST7703 but the number of parameters often don't match
+ * so it's likely a clone.
+ */
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETEXTC,
+ 0xF1, 0x12, 0x83);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETRGBIF,
+ 0x10, 0x10, 0x05, 0x05, 0x03, 0xFF, 0x00, 0x00,
+ 0x00, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, 0x12, 0x30);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETEQ,
+ 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETBGP, 0x08, 0x08);
+ msleep(20);
+
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F);
+ dsi_generic_write_seq(dsi, 0xBF, 0x02, 0x11, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1,
+ 0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12,
+ 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
+ 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
+ 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP2,
+ 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
+ 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0A,
+ 0xA5, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETGAMMA,
+ 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37,
+ 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11,
+ 0x18, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41,
+ 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10,
+ 0x11, 0x18);
+ msleep(20);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to exit sleep mode\n");
+ return ret;
+ }
+ /* Panel is operational 120 msec after reset */
+ msleep(60);
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret)
+ return ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int jh057n_enable(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+
+ return backlight_enable(ctx->backlight);
+}
+
+static int jh057n_disable(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+
+ return backlight_disable(ctx->backlight);
+}
+
+static int jh057n_unprepare(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ if (!ctx->prepared)
+ return 0;
+
+ mipi_dsi_dcs_set_display_off(dsi);
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int jh057n_prepare(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(20, 40);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(20);
+
+ ret = jh057n_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->prepared = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 90,
+ .hsync_end = 720 + 90 + 20,
+ .htotal = 720 + 90 + 20 + 20,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 20,
+ .vsync_end = 1440 + 20 + 4,
+ .vtotal = 1440 + 20 + 4 + 12,
+ .vrefresh = 60,
+ .clock = 75276,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 65,
+ .height_mm = 130,
+};
+
+static int jh057n_get_modes(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(panel->connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs jh057n_drm_funcs = {
+ .disable = jh057n_disable,
+ .unprepare = jh057n_unprepare,
+ .prepare = jh057n_prepare,
+ .enable = jh057n_enable,
+ .get_modes = jh057n_get_modes,
+};
+
+static int allpixelson_set(void *data, u64 val)
+{
+ struct jh057n *ctx = data;
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Setting all pixels on\n");
+ dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON);
+ msleep(val * 1000);
+ /* Reset the panel to get video back */
+ drm_panel_disable(&ctx->panel);
+ drm_panel_unprepare(&ctx->panel);
+ drm_panel_prepare(&ctx->panel);
+ drm_panel_enable(&ctx->panel);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(allpixelson_fops, NULL,
+ allpixelson_set, "%llu\n");
+
+static int jh057n_debugfs_init(struct jh057n *ctx)
+{
+ struct dentry *f;
+
+ ctx->debugfs = debugfs_create_dir(DRV_NAME, NULL);
+ if (!ctx->debugfs)
+ return -ENOMEM;
+
+ f = debugfs_create_file("allpixelson", 0600,
+ ctx->debugfs, ctx, &allpixelson_fops);
+ if (!f)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void jh057n_debugfs_remove(struct jh057n *ctx)
+{
+ debugfs_remove_recursive(ctx->debugfs);
+ ctx->debugfs = NULL;
+}
+
+static int jh057n_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct jh057n *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+ ctx->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(ctx->backlight))
+ return PTR_ERR(ctx->backlight);
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &jh057n_drm_funcs;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed. Is host ready?\n");
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ DRM_DEV_INFO(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh,
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
+
+ jh057n_debugfs_init(ctx);
+ return 0;
+}
+
+static void jh057n_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct jh057n *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = jh057n_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = jh057n_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int jh057n_remove(struct mipi_dsi_device *dsi)
+{
+ struct jh057n *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ jh057n_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ jh057n_debugfs_remove(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id jh057n_of_match[] = {
+ { .compatible = "rocktech,jh057n00900" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jh057n_of_match);
+
+static struct mipi_dsi_driver jh057n_driver = {
+ .probe = jh057n_probe,
+ .remove = jh057n_remove,
+ .shutdown = jh057n_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = jh057n_of_match,
+ },
+};
+module_mipi_dsi_driver(jh057n_driver);
+
+MODULE_AUTHOR("Guido Günther <agx@sigxcpu.org>");
+MODULE_DESCRIPTION("DRM driver for Rocktech JH057N00900 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
new file mode 100644
index 000000000000..3c15764f0c03
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018-2019, Bridge Systems BV
+ * Copyright (C) 2018-2019, Bootlin
+ * Copyright (C) 2017, Free Electrons
+ *
+ * This file based on panel-ilitek-ili9881c.c
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct rb070d30_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct backlight_device *backlight;
+ struct regulator *supply;
+
+ struct {
+ struct gpio_desc *power;
+ struct gpio_desc *reset;
+ struct gpio_desc *updn;
+ struct gpio_desc *shlr;
+ } gpios;
+};
+
+static inline struct rb070d30_panel *panel_to_rb070d30_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct rb070d30_panel, panel);
+}
+
+static int rb070d30_panel_prepare(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+ int ret;
+
+ ret = regulator_enable(ctx->supply);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "Failed to enable supply: %d\n", ret);
+ return ret;
+ }
+
+ msleep(20);
+ gpiod_set_value(ctx->gpios.power, 1);
+ msleep(20);
+ gpiod_set_value(ctx->gpios.reset, 1);
+ msleep(20);
+ return 0;
+}
+
+static int rb070d30_panel_unprepare(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+
+ gpiod_set_value(ctx->gpios.reset, 0);
+ gpiod_set_value(ctx->gpios.power, 0);
+ regulator_disable(ctx->supply);
+
+ return 0;
+}
+
+static int rb070d30_panel_enable(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+ if (ret)
+ return ret;
+
+ ret = backlight_enable(ctx->backlight);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ return ret;
+}
+
+static int rb070d30_panel_disable(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+}
+
+/* Default timings */
+static const struct drm_display_mode default_mode = {
+ .clock = 51206,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 160,
+ .hsync_end = 1024 + 160 + 80,
+ .htotal = 1024 + 160 + 80 + 80,
+ .vdisplay = 600,
+ .vsync_start = 600 + 12,
+ .vsync_end = 600 + 12 + 10,
+ .vtotal = 600 + 12 + 10 + 13,
+ .vrefresh = 60,
+
+ .width_mm = 154,
+ .height_mm = 85,
+};
+
+static int rb070d30_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev,
+ "Failed to add mode " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&default_mode));
+ return -EINVAL;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ panel->connector->display_info.bpc = 8;
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs rb070d30_panel_funcs = {
+ .get_modes = rb070d30_panel_get_modes,
+ .prepare = rb070d30_panel_prepare,
+ .enable = rb070d30_panel_enable,
+ .disable = rb070d30_panel_disable,
+ .unprepare = rb070d30_panel_unprepare,
+};
+
+static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct rb070d30_panel *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd");
+ if (IS_ERR(ctx->supply))
+ return PTR_ERR(ctx->supply);
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &dsi->dev;
+ ctx->panel.funcs = &rb070d30_panel_funcs;
+
+ ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->gpios.reset);
+ }
+
+ ctx->gpios.power = devm_gpiod_get(&dsi->dev, "power", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.power)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our power GPIO\n");
+ return PTR_ERR(ctx->gpios.power);
+ }
+
+ /*
+ * We don't change the state of that GPIO later on but we need
+ * to force it into a low state.
+ */
+ ctx->gpios.updn = devm_gpiod_get(&dsi->dev, "updn", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.updn)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our updn GPIO\n");
+ return PTR_ERR(ctx->gpios.updn);
+ }
+
+ /*
+ * We don't change the state of that GPIO later on but we need
+ * to force it into a low state.
+ */
+ ctx->gpios.shlr = devm_gpiod_get(&dsi->dev, "shlr", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.shlr)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our shlr GPIO\n");
+ return PTR_ERR(ctx->gpios.shlr);
+ }
+
+ ctx->backlight = devm_of_find_backlight(&dsi->dev);
+ if (IS_ERR(ctx->backlight)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our backlight\n");
+ return PTR_ERR(ctx->backlight);
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct rb070d30_panel *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id rb070d30_panel_of_match[] = {
+ { .compatible = "ronbo,rb070d30" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rb070d30_panel_of_match);
+
+static struct mipi_dsi_driver rb070d30_panel_driver = {
+ .probe = rb070d30_panel_dsi_probe,
+ .remove = rb070d30_panel_dsi_remove,
+ .driver = {
+ .name = "panel-ronbo-rb070d30",
+ .of_match_table = rb070d30_panel_of_match,
+ },
+};
+module_mipi_dsi_driver(rb070d30_panel_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_AUTHOR("Konstantin Sudakov <k.sudakov@integrasources.com>");
+MODULE_DESCRIPTION("Ronbo RB070D30 Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 33c22ee036f8..f75bef24e050 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -148,9 +148,6 @@ static int s6d16d0_get_modes(struct drm_panel *panel)
struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, "Samsung S6D16D0\0",
- DRM_DISPLAY_INFO_LEN);
-
mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
if (!mode) {
DRM_ERROR("bad mode or failed to add mode\n");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 2d99e28ff117..bdcc5d80823d 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -328,7 +328,7 @@ static const struct seiko_panel_desc seiko_43wvf1g = {
.height = 57,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct of_device_id platform_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9e8218f6a3f2..569be4efd8d1 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -914,7 +914,7 @@ static const struct panel_desc cdtech_s043wq26h_ct7 = {
.width = 95,
.height = 54,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode cdtech_s070wv95_ct16_mode = {
@@ -1034,7 +1034,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct display_timing dlc_dlc0700yzg_1_timing = {
@@ -1119,7 +1119,7 @@ static const struct panel_desc edt_et057090dhu = {
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct drm_display_mode edt_etm0700g0dh6_mode = {
@@ -1145,7 +1145,7 @@ static const struct panel_desc edt_etm0700g0dh6 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct panel_desc edt_etm0700g0bdh6 = {
@@ -1157,7 +1157,7 @@ static const struct panel_desc edt_etm0700g0bdh6 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
@@ -1311,7 +1311,7 @@ static const struct panel_desc innolux_at043tn24 = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode innolux_at070tn92_mode = {
@@ -1818,7 +1818,7 @@ static const struct panel_desc nec_nl4827hc19_05b = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode netron_dy_e231732_mode = {
@@ -1867,8 +1867,8 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
- DRM_BUS_FLAG_SYNC_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
};
static const struct display_timing nlt_nl192108ac18_02d_timing = {
@@ -2029,7 +2029,33 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
.height = 93,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+};
+
+static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = {
+ .clock = 33000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 210,
+ .hsync_end = 800 + 210 + 30,
+ .htotal = 800 + 210 + 30 + 16,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 13,
+ .vtotal = 480 + 22 + 13 + 10,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc osddisplays_osd070t1718_19ts = {
+ .modes = &osddisplays_osd070t1718_19ts_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode pda_91_00156_a0_mode = {
@@ -2398,7 +2424,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
.height = 116,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -2421,7 +2447,7 @@ static const struct panel_desc tpk_f07a_0102 = {
.width = 152,
.height = 91,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode tpk_f10a_0102_mode = {
@@ -2737,6 +2763,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ortustech,com43h4m85ulc",
.data = &ortustech_com43h4m85ulc,
}, {
+ .compatible = "osddisplays,osd070t1718-19ts",
+ .data = &osddisplays_osd070t1718_19ts,
+ }, {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
@@ -2996,6 +3025,34 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.lanes = 4,
};
+static const struct drm_display_mode lg_acx467akm_7_mode = {
+ .clock = 150000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 2,
+ .hsync_end = 1080 + 2 + 2,
+ .htotal = 1080 + 2 + 2 + 2,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 2,
+ .vsync_end = 1920 + 2 + 2,
+ .vtotal = 1920 + 2 + 2 + 2,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi lg_acx467akm_7 = {
+ .desc = {
+ .modes = &lg_acx467akm_7_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 62,
+ .height = 110,
+ },
+ },
+ .flags = 0,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct of_device_id dsi_of_match[] = {
{
.compatible = "auo,b080uan01",
@@ -3013,6 +3070,9 @@ static const struct of_device_id dsi_of_match[] = {
.compatible = "panasonic,vvx10f004b00",
.data = &panasonic_vvx10f004b00
}, {
+ .compatible = "lg,acx467akm-7",
+ .data = &lg_acx467akm_7
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 5a9f8f4d5d24..71591e5f5938 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -118,7 +118,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 480 + 10 + 1 + 35,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "640x480 RGB",
@@ -135,7 +135,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 480 + 18 + 1 + 27,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "480x272 RGB",
@@ -152,7 +152,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 272 + 2 + 1 + 12,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "480x640 RGB",
@@ -169,7 +169,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 640 + 4 + 1 + 8,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "400x240 RGB",
@@ -186,7 +186,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 240 + 2 + 1 + 20,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
};
@@ -390,8 +390,6 @@ static int tpg110_get_modes(struct drm_panel *panel)
struct tpg110 *tpg = to_tpg110(panel);
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, tpg->panel_mode->name,
- DRM_DISPLAY_INFO_LEN);
connector->display_info.width_mm = tpg->width;
connector->display_info.height_mm = tpg->height;
connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
new file mode 100644
index 000000000000..591611dc4e34
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config DRM_PANFROST
+ tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
+ depends on DRM
+ depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on MMU
+ select DRM_SCHED
+ select IOMMU_SUPPORT
+ select IOMMU_IO_PGTABLE_LPAE
+ select DRM_GEM_SHMEM_HELPER
+ help
+ DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
+ Bifrost (G3x, G5x, G7x) GPUs.
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
new file mode 100644
index 000000000000..6de72d13c58f
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+panfrost-y := \
+ panfrost_drv.o \
+ panfrost_device.o \
+ panfrost_devfreq.o \
+ panfrost_gem.o \
+ panfrost_gpu.o \
+ panfrost_job.o \
+ panfrost_mmu.o
+
+obj-$(CONFIG_DRM_PANFROST) += panfrost.o
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
new file mode 100644
index 000000000000..c2e44add37d8
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -0,0 +1,27 @@
+- Thermal support.
+
+- Bifrost support:
+ - DT bindings (Neil, WIP)
+ - MMU page table format and address space setup
+ - Bifrost specific feature and issue handling
+ - Coherent DMA support
+
+- Support for 2MB pages. The io-pgtable code already supports this. Finishing
+ support involves either copying or adapting the iommu API to handle passing
+ aligned addresses and sizes to the io-pgtable code.
+
+- Per FD address space support. The h/w supports multiple addresses spaces.
+ The hard part is handling when more address spaces are needed than what
+ the h/w provides.
+
+- Support pinning pages on demand (GPU page faults).
+
+- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
+
+- Support for madvise and a shrinker.
+
+- Compute job support. So called 'compute only' jobs need to be plumbed up to
+ userspace.
+
+- Performance counter support. (Boris)
+
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
new file mode 100644
index 000000000000..238bd1d89d43
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Collabora ltd. */
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_features.h"
+#include "panfrost_issues.h"
+#include "panfrost_gpu.h"
+#include "panfrost_regs.h"
+
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
+
+static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
+ struct dev_pm_opp *opp;
+ unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
+ unsigned long target_volt, target_rate;
+ int err;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ target_rate = dev_pm_opp_get_freq(opp);
+ target_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ if (old_clk_rate == target_rate)
+ return 0;
+
+ /*
+ * If frequency scaling from low to high, adjust voltage first.
+ * If frequency scaling from high to low, adjust frequency first.
+ */
+ if (old_clk_rate < target_rate) {
+ err = regulator_set_voltage(pfdev->regulator, target_volt,
+ target_volt);
+ if (err) {
+ dev_err(dev, "Cannot set voltage %lu uV\n",
+ target_volt);
+ return err;
+ }
+ }
+
+ err = clk_set_rate(pfdev->clock, target_rate);
+ if (err) {
+ dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
+ err);
+ regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
+ pfdev->devfreq.cur_volt);
+ return err;
+ }
+
+ if (old_clk_rate > target_rate) {
+ err = regulator_set_voltage(pfdev->regulator, target_volt,
+ target_volt);
+ if (err)
+ dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
+ }
+
+ pfdev->devfreq.cur_freq = target_rate;
+ pfdev->devfreq.cur_volt = target_volt;
+
+ return 0;
+}
+
+static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
+{
+ ktime_t now = ktime_get();
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ pfdev->devfreq.slot[i].busy_time = 0;
+ pfdev->devfreq.slot[i].idle_time = 0;
+ pfdev->devfreq.slot[i].time_last_update = now;
+ }
+}
+
+static int panfrost_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ panfrost_devfreq_update_utilization(pfdev, i);
+ }
+
+ status->current_frequency = clk_get_rate(pfdev->clock);
+ status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
+ pfdev->devfreq.slot[0].idle_time));
+
+ status->busy_time = 0;
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
+ }
+
+ /* We're scheduling only to one core atm, so don't divide for now */
+ /* status->busy_time /= NUM_JOB_SLOTS; */
+
+ panfrost_devfreq_reset(pfdev);
+
+ dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", status->busy_time,
+ status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
+
+ *freq = pfdev->devfreq.cur_freq;
+
+ return 0;
+}
+
+static struct devfreq_dev_profile panfrost_devfreq_profile = {
+ .polling_ms = 50, /* ~3 frames */
+ .target = panfrost_devfreq_target,
+ .get_dev_status = panfrost_devfreq_get_dev_status,
+ .get_cur_freq = panfrost_devfreq_get_cur_freq,
+};
+
+int panfrost_devfreq_init(struct panfrost_device *pfdev)
+{
+ int ret;
+ struct dev_pm_opp *opp;
+
+ if (!pfdev->regulator)
+ return 0;
+
+ ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
+ if (ret == -ENODEV) /* Optional, continue without devfreq */
+ return 0;
+
+ panfrost_devfreq_reset(pfdev);
+
+ pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
+
+ opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
+ dev_pm_opp_put(opp);
+
+ pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
+ &panfrost_devfreq_profile, "simple_ondemand", NULL);
+ if (IS_ERR(pfdev->devfreq.devfreq)) {
+ DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ ret = PTR_ERR(pfdev->devfreq.devfreq);
+ pfdev->devfreq.devfreq = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void panfrost_devfreq_resume(struct panfrost_device *pfdev)
+{
+ int i;
+
+ if (!pfdev->devfreq.devfreq)
+ return;
+
+ panfrost_devfreq_reset(pfdev);
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ pfdev->devfreq.slot[i].busy = false;
+
+ devfreq_resume_device(pfdev->devfreq.devfreq);
+}
+
+void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
+{
+ if (!pfdev->devfreq.devfreq)
+ return;
+
+ devfreq_suspend_device(pfdev->devfreq.devfreq);
+}
+
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
+{
+ struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+ ktime_t now;
+ ktime_t last;
+
+ if (!pfdev->devfreq.devfreq)
+ return;
+
+ now = ktime_get();
+ last = pfdev->devfreq.slot[slot].time_last_update;
+
+ /* If we last recorded a transition to busy, we have been idle since */
+ if (devfreq_slot->busy)
+ pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
+ else
+ pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
+
+ pfdev->devfreq.slot[slot].time_last_update = now;
+}
+
+/* The job scheduler is expected to call this at every transition busy <-> idle */
+void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
+{
+ struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+
+ panfrost_devfreq_update_utilization(pfdev, slot);
+ devfreq_slot->busy = !devfreq_slot->busy;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
new file mode 100644
index 000000000000..eb999531ed90
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANFROST_DEVFREQ_H__
+#define __PANFROST_DEVFREQ_H__
+
+int panfrost_devfreq_init(struct panfrost_device *pfdev);
+
+void panfrost_devfreq_resume(struct panfrost_device *pfdev);
+void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
+
+void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
+
+#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
new file mode 100644
index 000000000000..3b2bced1b015
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_features.h"
+#include "panfrost_gpu.h"
+#include "panfrost_job.h"
+#include "panfrost_mmu.h"
+
+static int panfrost_reset_init(struct panfrost_device *pfdev)
+{
+ int err;
+
+ pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
+ if (IS_ERR(pfdev->rstc)) {
+ dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
+ return PTR_ERR(pfdev->rstc);
+ }
+
+ err = reset_control_deassert(pfdev->rstc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void panfrost_reset_fini(struct panfrost_device *pfdev)
+{
+ reset_control_assert(pfdev->rstc);
+}
+
+static int panfrost_clk_init(struct panfrost_device *pfdev)
+{
+ int err;
+ unsigned long rate;
+
+ pfdev->clock = devm_clk_get(pfdev->dev, NULL);
+ if (IS_ERR(pfdev->clock)) {
+ dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock));
+ return PTR_ERR(pfdev->clock);
+ }
+
+ rate = clk_get_rate(pfdev->clock);
+ dev_info(pfdev->dev, "clock rate = %lu\n", rate);
+
+ err = clk_prepare_enable(pfdev->clock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void panfrost_clk_fini(struct panfrost_device *pfdev)
+{
+ clk_disable_unprepare(pfdev->clock);
+}
+
+static int panfrost_regulator_init(struct panfrost_device *pfdev)
+{
+ int ret;
+
+ pfdev->regulator = devm_regulator_get_optional(pfdev->dev, "mali");
+ if (IS_ERR(pfdev->regulator)) {
+ ret = PTR_ERR(pfdev->regulator);
+ pfdev->regulator = NULL;
+ if (ret == -ENODEV)
+ return 0;
+ dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(pfdev->regulator);
+ if (ret < 0) {
+ dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void panfrost_regulator_fini(struct panfrost_device *pfdev)
+{
+ if (pfdev->regulator)
+ regulator_disable(pfdev->regulator);
+}
+
+int panfrost_device_init(struct panfrost_device *pfdev)
+{
+ int err;
+ struct resource *res;
+
+ mutex_init(&pfdev->sched_lock);
+ mutex_init(&pfdev->reset_lock);
+ INIT_LIST_HEAD(&pfdev->scheduled_jobs);
+
+ spin_lock_init(&pfdev->hwaccess_lock);
+
+ err = panfrost_clk_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "clk init failed %d\n", err);
+ return err;
+ }
+
+ err = panfrost_regulator_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "regulator init failed %d\n", err);
+ goto err_out0;
+ }
+
+ err = panfrost_reset_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "reset init failed %d\n", err);
+ goto err_out1;
+ }
+
+ res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
+ pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
+ if (IS_ERR(pfdev->iomem)) {
+ dev_err(pfdev->dev, "failed to ioremap iomem\n");
+ err = PTR_ERR(pfdev->iomem);
+ goto err_out2;
+ }
+
+ err = panfrost_gpu_init(pfdev);
+ if (err)
+ goto err_out2;
+
+ err = panfrost_mmu_init(pfdev);
+ if (err)
+ goto err_out3;
+
+ err = panfrost_job_init(pfdev);
+ if (err)
+ goto err_out4;
+
+ /* runtime PM will wake us up later */
+ panfrost_gpu_power_off(pfdev);
+
+ pm_runtime_set_active(pfdev->dev);
+ pm_runtime_get_sync(pfdev->dev);
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+
+ return 0;
+err_out4:
+ panfrost_mmu_fini(pfdev);
+err_out3:
+ panfrost_gpu_fini(pfdev);
+err_out2:
+ panfrost_reset_fini(pfdev);
+err_out1:
+ panfrost_regulator_fini(pfdev);
+err_out0:
+ panfrost_clk_fini(pfdev);
+ return err;
+}
+
+void panfrost_device_fini(struct panfrost_device *pfdev)
+{
+ panfrost_job_fini(pfdev);
+ panfrost_mmu_fini(pfdev);
+ panfrost_gpu_fini(pfdev);
+ panfrost_reset_fini(pfdev);
+ panfrost_regulator_fini(pfdev);
+ panfrost_clk_fini(pfdev);
+}
+
+const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code)
+{
+ switch (exception_code) {
+ /* Non-Fault Status code */
+ case 0x00: return "NOT_STARTED/IDLE/OK";
+ case 0x01: return "DONE";
+ case 0x02: return "INTERRUPTED";
+ case 0x03: return "STOPPED";
+ case 0x04: return "TERMINATED";
+ case 0x08: return "ACTIVE";
+ /* Job exceptions */
+ case 0x40: return "JOB_CONFIG_FAULT";
+ case 0x41: return "JOB_POWER_FAULT";
+ case 0x42: return "JOB_READ_FAULT";
+ case 0x43: return "JOB_WRITE_FAULT";
+ case 0x44: return "JOB_AFFINITY_FAULT";
+ case 0x48: return "JOB_BUS_FAULT";
+ case 0x50: return "INSTR_INVALID_PC";
+ case 0x51: return "INSTR_INVALID_ENC";
+ case 0x52: return "INSTR_TYPE_MISMATCH";
+ case 0x53: return "INSTR_OPERAND_FAULT";
+ case 0x54: return "INSTR_TLS_FAULT";
+ case 0x55: return "INSTR_BARRIER_FAULT";
+ case 0x56: return "INSTR_ALIGN_FAULT";
+ case 0x58: return "DATA_INVALID_FAULT";
+ case 0x59: return "TILE_RANGE_FAULT";
+ case 0x5A: return "ADDR_RANGE_FAULT";
+ case 0x60: return "OUT_OF_MEMORY";
+ /* GPU exceptions */
+ case 0x80: return "DELAYED_BUS_FAULT";
+ case 0x88: return "SHAREABILITY_FAULT";
+ /* MMU exceptions */
+ case 0xC1: return "TRANSLATION_FAULT_LEVEL1";
+ case 0xC2: return "TRANSLATION_FAULT_LEVEL2";
+ case 0xC3: return "TRANSLATION_FAULT_LEVEL3";
+ case 0xC4: return "TRANSLATION_FAULT_LEVEL4";
+ case 0xC8: return "PERMISSION_FAULT";
+ case 0xC9 ... 0xCF: return "PERMISSION_FAULT";
+ case 0xD1: return "TRANSTAB_BUS_FAULT_LEVEL1";
+ case 0xD2: return "TRANSTAB_BUS_FAULT_LEVEL2";
+ case 0xD3: return "TRANSTAB_BUS_FAULT_LEVEL3";
+ case 0xD4: return "TRANSTAB_BUS_FAULT_LEVEL4";
+ case 0xD8: return "ACCESS_FLAG";
+ case 0xD9 ... 0xDF: return "ACCESS_FLAG";
+ case 0xE0 ... 0xE7: return "ADDRESS_SIZE_FAULT";
+ case 0xE8 ... 0xEF: return "MEMORY_ATTRIBUTES_FAULT";
+ }
+
+ return "UNKNOWN";
+}
+
+#ifdef CONFIG_PM
+int panfrost_device_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+
+ panfrost_gpu_soft_reset(pfdev);
+
+ /* TODO: Re-enable all other address spaces */
+ panfrost_gpu_power_on(pfdev);
+ panfrost_mmu_enable(pfdev, 0);
+ panfrost_job_enable_interrupts(pfdev);
+ panfrost_devfreq_resume(pfdev);
+
+ return 0;
+}
+
+int panfrost_device_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+
+ if (!panfrost_job_is_idle(pfdev))
+ return -EBUSY;
+
+ panfrost_devfreq_suspend(pfdev);
+ panfrost_gpu_power_off(pfdev);
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
new file mode 100644
index 000000000000..56f452dfb490
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#ifndef __PANFROST_DEVICE_H__
+#define __PANFROST_DEVICE_H__
+
+#include <linux/spinlock.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+
+struct panfrost_device;
+struct panfrost_mmu;
+struct panfrost_job_slot;
+struct panfrost_job;
+
+#define NUM_JOB_SLOTS 3
+
+struct panfrost_features {
+ u16 id;
+ u16 revision;
+
+ u64 shader_present;
+ u64 tiler_present;
+ u64 l2_present;
+ u64 stack_present;
+ u32 as_present;
+ u32 js_present;
+
+ u32 l2_features;
+ u32 core_features;
+ u32 tiler_features;
+ u32 mem_features;
+ u32 mmu_features;
+ u32 thread_features;
+ u32 max_threads;
+ u32 thread_max_workgroup_sz;
+ u32 thread_max_barrier_sz;
+ u32 coherency_features;
+ u32 texture_features[4];
+ u32 js_features[16];
+
+ u32 nr_core_groups;
+
+ unsigned long hw_features[64 / BITS_PER_LONG];
+ unsigned long hw_issues[64 / BITS_PER_LONG];
+};
+
+struct panfrost_devfreq_slot {
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ bool busy;
+};
+
+struct panfrost_device {
+ struct device *dev;
+ struct drm_device *ddev;
+ struct platform_device *pdev;
+
+ spinlock_t hwaccess_lock;
+
+ struct drm_mm mm;
+ spinlock_t mm_lock;
+
+ void __iomem *iomem;
+ struct clk *clock;
+ struct regulator *regulator;
+ struct reset_control *rstc;
+
+ struct panfrost_features features;
+
+ struct panfrost_mmu *mmu;
+ struct panfrost_job_slot *js;
+
+ struct panfrost_job *jobs[NUM_JOB_SLOTS];
+ struct list_head scheduled_jobs;
+
+ struct mutex sched_lock;
+ struct mutex reset_lock;
+
+ struct {
+ struct devfreq *devfreq;
+ struct thermal_cooling_device *cooling;
+ unsigned long cur_freq;
+ unsigned long cur_volt;
+ struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
+ } devfreq;
+};
+
+struct panfrost_file_priv {
+ struct panfrost_device *pfdev;
+
+ struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
+};
+
+static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
+{
+ return ddev->dev_private;
+}
+
+static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id)
+{
+ s32 match_id = pfdev->features.id;
+
+ if (match_id & 0xf000)
+ match_id &= 0xf00f;
+ return match_id - id;
+}
+
+static inline bool panfrost_model_eq(struct panfrost_device *pfdev, s32 id)
+{
+ return !panfrost_model_cmp(pfdev, id);
+}
+
+int panfrost_device_init(struct panfrost_device *pfdev);
+void panfrost_device_fini(struct panfrost_device *pfdev);
+
+int panfrost_device_resume(struct device *dev);
+int panfrost_device_suspend(struct device *dev);
+
+const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
new file mode 100644
index 000000000000..d11e2281dde6
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pagemap.h>
+#include <linux/pm_runtime.h>
+#include <drm/panfrost_drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+#include "panfrost_job.h"
+#include "panfrost_gpu.h"
+
+static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
+{
+ struct drm_panfrost_get_param *param = data;
+ struct panfrost_device *pfdev = ddev->dev_private;
+
+ if (param->pad != 0)
+ return -EINVAL;
+
+ switch (param->param) {
+ case DRM_PANFROST_PARAM_GPU_PROD_ID:
+ param->value = pfdev->features.id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_panfrost_create_bo *args = data;
+
+ if (!args->size || args->flags || args->pad)
+ return -EINVAL;
+
+ shmem = drm_gem_shmem_create_with_handle(file, dev, args->size,
+ &args->handle);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base));
+ if (ret)
+ goto err_free;
+
+ args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT;
+
+ return 0;
+
+err_free:
+ drm_gem_object_put_unlocked(&shmem->base);
+ return ret;
+}
+
+/**
+ * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @args: IOCTL args
+ * @job: job being set up
+ *
+ * Resolve handles from userspace to BOs and attach them to job.
+ *
+ * Note that this function doesn't need to unreference the BOs on
+ * failure, because that will happen at panfrost_job_cleanup() time.
+ */
+static int
+panfrost_lookup_bos(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_panfrost_submit *args,
+ struct panfrost_job *job)
+{
+ job->bo_count = args->bo_handle_count;
+
+ if (!job->bo_count)
+ return 0;
+
+ job->implicit_fences = kvmalloc_array(job->bo_count,
+ sizeof(struct dma_fence *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->implicit_fences)
+ return -ENOMEM;
+
+ return drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)args->bo_handles,
+ job->bo_count, &job->bos);
+}
+
+/**
+ * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @args: IOCTL args
+ * @job: job being set up
+ *
+ * Resolve syncobjs from userspace to fences and attach them to job.
+ *
+ * Note that this function doesn't need to unreference the fences on
+ * failure, because that will happen at panfrost_job_cleanup() time.
+ */
+static int
+panfrost_copy_in_sync(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_panfrost_submit *args,
+ struct panfrost_job *job)
+{
+ u32 *handles;
+ int ret = 0;
+ int i;
+
+ job->in_fence_count = args->in_sync_count;
+
+ if (!job->in_fence_count)
+ return 0;
+
+ job->in_fences = kvmalloc_array(job->in_fence_count,
+ sizeof(struct dma_fence *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->in_fences) {
+ DRM_DEBUG("Failed to allocate job in fences\n");
+ return -ENOMEM;
+ }
+
+ handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL);
+ if (!handles) {
+ ret = -ENOMEM;
+ DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
+ goto fail;
+ }
+
+ if (copy_from_user(handles,
+ (void __user *)(uintptr_t)args->in_syncs,
+ job->in_fence_count * sizeof(u32))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy in syncobj handles\n");
+ goto fail;
+ }
+
+ for (i = 0; i < job->in_fence_count; i++) {
+ ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
+ &job->in_fences[i]);
+ if (ret == -EINVAL)
+ goto fail;
+ }
+
+fail:
+ kvfree(handles);
+ return ret;
+}
+
+static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct drm_panfrost_submit *args = data;
+ struct drm_syncobj *sync_out = NULL;
+ struct panfrost_job *job;
+ int ret = 0;
+
+ if (!args->jc)
+ return -EINVAL;
+
+ if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
+ return -EINVAL;
+
+ if (args->out_sync > 0) {
+ sync_out = drm_syncobj_find(file, args->out_sync);
+ if (!sync_out)
+ return -ENODEV;
+ }
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job) {
+ ret = -ENOMEM;
+ goto fail_out_sync;
+ }
+
+ kref_init(&job->refcount);
+
+ job->pfdev = pfdev;
+ job->jc = args->jc;
+ job->requirements = args->requirements;
+ job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
+ job->file_priv = file->driver_priv;
+
+ ret = panfrost_copy_in_sync(dev, file, args, job);
+ if (ret)
+ goto fail_job;
+
+ ret = panfrost_lookup_bos(dev, file, args, job);
+ if (ret)
+ goto fail_job;
+
+ ret = panfrost_job_push(job);
+ if (ret)
+ goto fail_job;
+
+ /* Update the return sync object for the job */
+ if (sync_out)
+ drm_syncobj_replace_fence(sync_out, job->render_done_fence);
+
+fail_job:
+ panfrost_job_put(job);
+fail_out_sync:
+ if (sync_out)
+ drm_syncobj_put(sync_out);
+
+ return ret;
+}
+
+static int
+panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ long ret;
+ struct drm_panfrost_wait_bo *args = data;
+ struct drm_gem_object *gem_obj;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ if (args->pad)
+ return -EINVAL;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true,
+ true, timeout);
+ if (!ret)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_panfrost_mmap_bo *args = data;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ if (args->flags != 0) {
+ DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret == 0)
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_panfrost_get_bo_offset *args = data;
+ struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+ bo = to_panfrost_bo(gem_obj);
+
+ args->offset = bo->node.start << PAGE_SHIFT;
+
+ drm_gem_object_put_unlocked(gem_obj);
+ return 0;
+}
+
+static int
+panfrost_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_file_priv *panfrost_priv;
+
+ panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
+ if (!panfrost_priv)
+ return -ENOMEM;
+
+ panfrost_priv->pfdev = pfdev;
+ file->driver_priv = panfrost_priv;
+
+ return panfrost_job_open(panfrost_priv);
+}
+
+static void
+panfrost_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct panfrost_file_priv *panfrost_priv = file->driver_priv;
+
+ panfrost_job_close(panfrost_priv);
+
+ kfree(panfrost_priv);
+}
+
+/* DRM_AUTH is required on SUBMIT for now, while all clients share a single
+ * address space. Note that render nodes would be able to submit jobs that
+ * could access BOs from clients authenticated with the master node.
+ */
+static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
+#define PANFROST_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
+
+ PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW | DRM_AUTH),
+ PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
+};
+
+DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+
+static struct drm_driver panfrost_drm_driver = {
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_SYNCOBJ,
+ .open = panfrost_open,
+ .postclose = panfrost_postclose,
+ .ioctls = panfrost_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls),
+ .fops = &panfrost_drm_driver_fops,
+ .name = "panfrost",
+ .desc = "panfrost DRM",
+ .date = "20180908",
+ .major = 1,
+ .minor = 0,
+
+ .gem_create_object = panfrost_gem_create_object,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
+};
+
+static int panfrost_probe(struct platform_device *pdev)
+{
+ struct panfrost_device *pfdev;
+ struct drm_device *ddev;
+ int err;
+
+ pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
+ if (!pfdev)
+ return -ENOMEM;
+
+ pfdev->pdev = pdev;
+ pfdev->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, pfdev);
+
+ /* Allocate and initialze the DRM device. */
+ ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ddev->dev_private = pfdev;
+ pfdev->ddev = ddev;
+
+ spin_lock_init(&pfdev->mm_lock);
+
+ /* 4G enough for now. can be 48-bit */
+ drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
+
+ pm_runtime_use_autosuspend(pfdev->dev);
+ pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
+ pm_runtime_enable(pfdev->dev);
+
+ err = panfrost_device_init(pfdev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Fatal error during GPU init\n");
+ goto err_out0;
+ }
+
+ err = panfrost_devfreq_init(pfdev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Fatal error during devfreq init\n");
+ goto err_out1;
+ }
+
+ /*
+ * Register the DRM device with the core and the connectors with
+ * sysfs
+ */
+ err = drm_dev_register(ddev, 0);
+ if (err < 0)
+ goto err_out1;
+
+ return 0;
+
+err_out1:
+ panfrost_device_fini(pfdev);
+err_out0:
+ pm_runtime_disable(pfdev->dev);
+ drm_dev_put(ddev);
+ return err;
+}
+
+static int panfrost_remove(struct platform_device *pdev)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+ struct drm_device *ddev = pfdev->ddev;
+
+ drm_dev_unregister(ddev);
+ pm_runtime_get_sync(pfdev->dev);
+ pm_runtime_put_sync_autosuspend(pfdev->dev);
+ pm_runtime_disable(pfdev->dev);
+ panfrost_device_fini(pfdev);
+ drm_dev_put(ddev);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "arm,mali-t604" },
+ { .compatible = "arm,mali-t624" },
+ { .compatible = "arm,mali-t628" },
+ { .compatible = "arm,mali-t720" },
+ { .compatible = "arm,mali-t760" },
+ { .compatible = "arm,mali-t820" },
+ { .compatible = "arm,mali-t830" },
+ { .compatible = "arm,mali-t860" },
+ { .compatible = "arm,mali-t880" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static const struct dev_pm_ops panfrost_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
+};
+
+static struct platform_driver panfrost_driver = {
+ .probe = panfrost_probe,
+ .remove = panfrost_remove,
+ .driver = {
+ .name = "panfrost",
+ .pm = &panfrost_pm_ops,
+ .of_match_table = dt_match,
+ },
+};
+module_platform_driver(panfrost_driver);
+
+MODULE_AUTHOR("Panfrost Project Developers");
+MODULE_DESCRIPTION("Panfrost DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
new file mode 100644
index 000000000000..5056777c7744
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved. */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+#ifndef __PANFROST_FEATURES_H__
+#define __PANFROST_FEATURES_H__
+
+#include <linux/bitops.h>
+
+#include "panfrost_device.h"
+
+enum panfrost_hw_feature {
+ HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ HW_FEATURE_XAFFINITY,
+ HW_FEATURE_OUT_OF_ORDER_EXEC,
+ HW_FEATURE_MRT,
+ HW_FEATURE_BRNDOUT_CC,
+ HW_FEATURE_INTERPIPE_REG_ALIASING,
+ HW_FEATURE_LD_ST_TILEBUFFER,
+ HW_FEATURE_MSAA_16X,
+ HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+ HW_FEATURE_T7XX_PAIRING_RULES,
+ HW_FEATURE_LD_ST_LEA_TEX,
+ HW_FEATURE_LINEAR_FILTER_FLOAT,
+ HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+ HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS,
+ HW_FEATURE_TEST4_DATUM_MODE,
+ HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ HW_FEATURE_BRNDOUT_KILL,
+ HW_FEATURE_WARPING,
+ HW_FEATURE_V4,
+ HW_FEATURE_FLUSH_REDUCTION,
+ HW_FEATURE_PROTECTED_MODE,
+ HW_FEATURE_COHERENCY_REG,
+ HW_FEATURE_PROTECTED_DEBUG_MODE,
+ HW_FEATURE_AARCH64_MMU,
+ HW_FEATURE_TLS_HASHING,
+ HW_FEATURE_THREAD_GROUP_SPLIT,
+ HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
+};
+
+#define hw_features_t600 (\
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_V4))
+
+#define hw_features_t620 (\
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_V4))
+
+#define hw_features_t720 (\
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_OPTIMIZED_COVERAGE_MASK) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_V4))
+
+
+#define hw_features_t760 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+// T860
+#define hw_features_t860 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+#define hw_features_t880 hw_features_t860
+
+#define hw_features_t830 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+#define hw_features_t820 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+#define hw_features_g71 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g72 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g51 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g52 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g76 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
+ BIT_ULL(HW_FEATURE_TLS_HASHING) | \
+ BIT_ULL(HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+
+#define hw_features_g31 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
+ BIT_ULL(HW_FEATURE_TLS_HASHING) | \
+ BIT_ULL(HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+
+static inline bool panfrost_has_hw_feature(struct panfrost_device *pfdev,
+ enum panfrost_hw_feature feat)
+{
+ return test_bit(feat, pfdev->features.hw_features);
+}
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
new file mode 100644
index 000000000000..a5528a360ef4
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/panfrost_drm.h>
+#include "panfrost_device.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+
+/* Called DRM core on the last userspace/kernel unreference of the
+ * BO.
+ */
+static void panfrost_gem_free_object(struct drm_gem_object *obj)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_device *pfdev = obj->dev->dev_private;
+
+ panfrost_mmu_unmap(bo);
+
+ spin_lock(&pfdev->mm_lock);
+ drm_mm_remove_node(&bo->node);
+ spin_unlock(&pfdev->mm_lock);
+
+ drm_gem_shmem_free_object(obj);
+}
+
+static const struct drm_gem_object_funcs panfrost_gem_funcs = {
+ .free = panfrost_gem_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * panfrost_gem_create_object - Implementation of driver->gem_create_object.
+ * @dev: DRM device
+ * @size: Size in bytes of the memory the object will reference
+ *
+ * This lets the GEM helpers allocate object structs for us, and keep
+ * our BO stats correct.
+ */
+struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
+{
+ int ret;
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ obj->base.base.funcs = &panfrost_gem_funcs;
+
+ spin_lock(&pfdev->mm_lock);
+ ret = drm_mm_insert_node(&pfdev->mm, &obj->node,
+ roundup(size, PAGE_SIZE) >> PAGE_SHIFT);
+ spin_unlock(&pfdev->mm_lock);
+ if (ret)
+ goto free_obj;
+
+ return &obj->base.base;
+
+free_obj:
+ kfree(obj);
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *
+panfrost_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct drm_gem_object *obj;
+ struct panfrost_gem_object *pobj;
+
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ pobj = to_panfrost_bo(obj);
+
+ obj->resv = attach->dmabuf->resv;
+
+ panfrost_mmu_map(pobj);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
new file mode 100644
index 000000000000..045000eb5fcf
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#ifndef __PANFROST_GEM_H__
+#define __PANFROST_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_mm.h>
+
+struct panfrost_gem_object {
+ struct drm_gem_shmem_object base;
+
+ struct drm_mm_node node;
+};
+
+static inline
+struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
+}
+
+struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
+
+struct drm_gem_object *
+panfrost_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
new file mode 100644
index 000000000000..58ef25573cda
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+
+#include "panfrost_device.h"
+#include "panfrost_features.h"
+#include "panfrost_issues.h"
+#include "panfrost_gpu.h"
+#include "panfrost_regs.h"
+
+#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
+#define gpu_read(dev, reg) readl(dev->iomem + reg)
+
+static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 state = gpu_read(pfdev, GPU_INT_STAT);
+ u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+
+ if (!state)
+ return IRQ_NONE;
+
+ if (state & GPU_IRQ_MASK_ERROR) {
+ u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32;
+ address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO);
+
+ dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
+ fault_status & 0xFF, panfrost_exception_name(pfdev, fault_status),
+ address);
+
+ if (state & GPU_IRQ_MULTIPLE_FAULT)
+ dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n");
+
+ gpu_write(pfdev, GPU_INT_MASK, 0);
+ }
+
+ gpu_write(pfdev, GPU_INT_CLEAR, state);
+
+ return IRQ_HANDLED;
+}
+
+int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
+{
+ int ret;
+ u32 val;
+
+ gpu_write(pfdev, GPU_INT_MASK, 0);
+ gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
+ gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
+
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
+ val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
+
+ if (ret) {
+ dev_err(pfdev->dev, "gpu soft reset timed out\n");
+ return ret;
+ }
+
+ gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
+ gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
+
+ return 0;
+}
+
+static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
+{
+ u32 quirks = 0;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8443) ||
+ panfrost_has_hw_issue(pfdev, HW_ISSUE_11035))
+ quirks |= SC_LS_PAUSEBUFFER_DISABLE;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10327))
+ quirks |= SC_SDC_DISABLE_OQ_DISCARD;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10797))
+ quirks |= SC_ENABLE_TEXGRD_FLAGS;
+
+ if (!panfrost_has_hw_issue(pfdev, GPUCORE_1619)) {
+ if (panfrost_model_cmp(pfdev, 0x750) < 0) /* T60x, T62x, T72x */
+ quirks |= SC_LS_ATTR_CHECK_DISABLE;
+ else if (panfrost_model_cmp(pfdev, 0x880) <= 0) /* T76x, T8xx */
+ quirks |= SC_LS_ALLOW_ATTR_TYPES;
+ }
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_TLS_HASHING))
+ quirks |= SC_TLS_HASH_ENABLE;
+
+ if (quirks)
+ gpu_write(pfdev, GPU_SHADER_CONFIG, quirks);
+
+
+ quirks = gpu_read(pfdev, GPU_TILER_CONFIG);
+
+ /* Set tiler clock gate override if required */
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_T76X_3953))
+ quirks |= TC_CLOCK_GATE_OVERRIDE;
+
+ gpu_write(pfdev, GPU_TILER_CONFIG, quirks);
+
+
+ quirks = gpu_read(pfdev, GPU_L2_MMU_CONFIG);
+
+ /* Limit read & write ID width for AXI */
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+ quirks &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS |
+ L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES);
+ else
+ quirks &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS |
+ L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
+
+ gpu_write(pfdev, GPU_L2_MMU_CONFIG, quirks);
+
+ quirks = 0;
+ if ((panfrost_model_eq(pfdev, 0x860) || panfrost_model_eq(pfdev, 0x880)) &&
+ pfdev->features.revision >= 0x2000)
+ quirks |= JM_MAX_JOB_THROTTLE_LIMIT << JM_JOB_THROTTLE_LIMIT_SHIFT;
+ else if (panfrost_model_eq(pfdev, 0x6000) &&
+ pfdev->features.coherency_features == COHERENCY_ACE)
+ quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) <<
+ JM_FORCE_COHERENCY_FEATURES_SHIFT;
+
+ if (quirks)
+ gpu_write(pfdev, GPU_JM_CONFIG, quirks);
+}
+
+#define MAX_HW_REVS 6
+
+struct panfrost_model {
+ const char *name;
+ u32 id;
+ u32 id_mask;
+ u64 features;
+ u64 issues;
+ struct {
+ u32 revision;
+ u64 issues;
+ } revs[MAX_HW_REVS];
+};
+
+#define GPU_MODEL(_name, _id, ...) \
+{\
+ .name = __stringify(_name), \
+ .id = _id, \
+ .features = hw_features_##_name, \
+ .issues = hw_issues_##_name, \
+ .revs = { __VA_ARGS__ }, \
+}
+
+#define GPU_REV_EXT(name, _rev, _p, _s, stat) \
+{\
+ .revision = (_rev) << 12 | (_p) << 4 | (_s), \
+ .issues = hw_issues_##name##_r##_rev##p##_p##stat, \
+}
+#define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, )
+
+static const struct panfrost_model gpu_models[] = {
+ /* T60x has an oddball version */
+ GPU_MODEL(t600, 0x600,
+ GPU_REV_EXT(t600, 0, 0, 1, _15dev0)),
+ GPU_MODEL(t620, 0x620,
+ GPU_REV(t620, 0, 1), GPU_REV(t620, 1, 0)),
+ GPU_MODEL(t720, 0x720),
+ GPU_MODEL(t760, 0x750,
+ GPU_REV(t760, 0, 0), GPU_REV(t760, 0, 1),
+ GPU_REV_EXT(t760, 0, 1, 0, _50rel0),
+ GPU_REV(t760, 0, 2), GPU_REV(t760, 0, 3)),
+ GPU_MODEL(t820, 0x820),
+ GPU_MODEL(t830, 0x830),
+ GPU_MODEL(t860, 0x860),
+ GPU_MODEL(t880, 0x880),
+
+ GPU_MODEL(g71, 0x6000,
+ GPU_REV_EXT(g71, 0, 0, 1, _05dev0)),
+ GPU_MODEL(g72, 0x6001),
+ GPU_MODEL(g51, 0x7000),
+ GPU_MODEL(g76, 0x7001),
+ GPU_MODEL(g52, 0x7002),
+ GPU_MODEL(g31, 0x7003,
+ GPU_REV(g31, 1, 0)),
+};
+
+static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
+{
+ u32 gpu_id, num_js, major, minor, status, rev;
+ const char *name = "unknown";
+ u64 hw_feat = 0;
+ u64 hw_issues = hw_issues_all;
+ const struct panfrost_model *model;
+ int i;
+
+ pfdev->features.l2_features = gpu_read(pfdev, GPU_L2_FEATURES);
+ pfdev->features.core_features = gpu_read(pfdev, GPU_CORE_FEATURES);
+ pfdev->features.tiler_features = gpu_read(pfdev, GPU_TILER_FEATURES);
+ pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
+ pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
+ pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
+ pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
+ for (i = 0; i < 4; i++)
+ pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
+
+ pfdev->features.as_present = gpu_read(pfdev, GPU_AS_PRESENT);
+
+ pfdev->features.js_present = gpu_read(pfdev, GPU_JS_PRESENT);
+ num_js = hweight32(pfdev->features.js_present);
+ for (i = 0; i < num_js; i++)
+ pfdev->features.js_features[i] = gpu_read(pfdev, GPU_JS_FEATURES(i));
+
+ pfdev->features.shader_present = gpu_read(pfdev, GPU_SHADER_PRESENT_LO);
+ pfdev->features.shader_present |= (u64)gpu_read(pfdev, GPU_SHADER_PRESENT_HI) << 32;
+
+ pfdev->features.tiler_present = gpu_read(pfdev, GPU_TILER_PRESENT_LO);
+ pfdev->features.tiler_present |= (u64)gpu_read(pfdev, GPU_TILER_PRESENT_HI) << 32;
+
+ pfdev->features.l2_present = gpu_read(pfdev, GPU_L2_PRESENT_LO);
+ pfdev->features.l2_present |= (u64)gpu_read(pfdev, GPU_L2_PRESENT_HI) << 32;
+ pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present);
+
+ pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO);
+ pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32;
+
+ gpu_id = gpu_read(pfdev, GPU_ID);
+ pfdev->features.revision = gpu_id & 0xffff;
+ pfdev->features.id = gpu_id >> 16;
+
+ /* The T60x has an oddball ID value. Fix it up to the standard Midgard
+ * format so we (and userspace) don't have to special case it.
+ */
+ if (pfdev->features.id == 0x6956)
+ pfdev->features.id = 0x0600;
+
+ major = (pfdev->features.revision >> 12) & 0xf;
+ minor = (pfdev->features.revision >> 4) & 0xff;
+ status = pfdev->features.revision & 0xf;
+ rev = pfdev->features.revision;
+
+ gpu_id = pfdev->features.id;
+
+ for (model = gpu_models; model->name; model++) {
+ int best = -1;
+
+ if (!panfrost_model_eq(pfdev, model->id))
+ continue;
+
+ name = model->name;
+ hw_feat = model->features;
+ hw_issues |= model->issues;
+ for (i = 0; i < MAX_HW_REVS; i++) {
+ if (model->revs[i].revision == rev) {
+ best = i;
+ break;
+ } else if (model->revs[i].revision == (rev & ~0xf))
+ best = i;
+ }
+
+ if (best >= 0)
+ hw_issues |= model->revs[best].issues;
+
+ break;
+ }
+
+ bitmap_from_u64(pfdev->features.hw_features, hw_feat);
+ bitmap_from_u64(pfdev->features.hw_issues, hw_issues);
+
+ dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ name, gpu_id, major, minor, status);
+ dev_info(pfdev->dev, "features: %64pb, issues: %64pb",
+ pfdev->features.hw_features,
+ pfdev->features.hw_issues);
+
+ dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
+ pfdev->features.l2_features,
+ pfdev->features.core_features,
+ pfdev->features.tiler_features,
+ pfdev->features.mem_features,
+ pfdev->features.mmu_features,
+ pfdev->features.as_present,
+ pfdev->features.js_present);
+
+ dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx",
+ pfdev->features.shader_present, pfdev->features.l2_present);
+}
+
+void panfrost_gpu_power_on(struct panfrost_device *pfdev)
+{
+ int ret;
+ u32 val;
+
+ /* Just turn on everything for now */
+ gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
+ val, val == pfdev->features.l2_present, 100, 1000);
+
+ gpu_write(pfdev, STACK_PWRON_LO, pfdev->features.stack_present);
+ ret |= readl_relaxed_poll_timeout(pfdev->iomem + STACK_READY_LO,
+ val, val == pfdev->features.stack_present, 100, 1000);
+
+ gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
+ ret |= readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+ val, val == pfdev->features.shader_present, 100, 1000);
+
+ gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
+ ret |= readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
+ val, val == pfdev->features.tiler_present, 100, 1000);
+
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu");
+}
+
+void panfrost_gpu_power_off(struct panfrost_device *pfdev)
+{
+ gpu_write(pfdev, TILER_PWROFF_LO, 0);
+ gpu_write(pfdev, SHADER_PWROFF_LO, 0);
+ gpu_write(pfdev, STACK_PWROFF_LO, 0);
+ gpu_write(pfdev, L2_PWROFF_LO, 0);
+}
+
+int panfrost_gpu_init(struct panfrost_device *pfdev)
+{
+ int err, irq;
+
+ err = panfrost_gpu_soft_reset(pfdev);
+ if (err)
+ return err;
+
+ panfrost_gpu_init_features(pfdev);
+
+ dma_set_mask_and_coherent(pfdev->dev,
+ DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
+
+ irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
+ if (irq <= 0)
+ return -ENODEV;
+
+ err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
+ IRQF_SHARED, "gpu", pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "failed to request gpu irq");
+ return err;
+ }
+
+ panfrost_gpu_init_quirks(pfdev);
+ panfrost_gpu_power_on(pfdev);
+
+ return 0;
+}
+
+void panfrost_gpu_fini(struct panfrost_device *pfdev)
+{
+ panfrost_gpu_power_off(pfdev);
+}
+
+u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev)
+{
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
+ return gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
+ return 0;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
new file mode 100644
index 000000000000..4112412087b2
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANFROST_GPU_H__
+#define __PANFROST_GPU_H__
+
+struct panfrost_device;
+
+int panfrost_gpu_init(struct panfrost_device *pfdev);
+void panfrost_gpu_fini(struct panfrost_device *pfdev);
+
+u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev);
+
+int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
+void panfrost_gpu_power_on(struct panfrost_device *pfdev);
+void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h
new file mode 100644
index 000000000000..cec6dcdadb5c
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved. */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+#ifndef __PANFROST_ISSUES_H__
+#define __PANFROST_ISSUES_H__
+
+#include <linux/bitops.h>
+
+#include "panfrost_device.h"
+
+/*
+ * This is not a complete list of issues, but only the ones the driver needs
+ * to care about.
+ */
+enum panfrost_hw_issue {
+ HW_ISSUE_6367,
+ HW_ISSUE_6787,
+ HW_ISSUE_8186,
+ HW_ISSUE_8245,
+ HW_ISSUE_8316,
+ HW_ISSUE_8394,
+ HW_ISSUE_8401,
+ HW_ISSUE_8408,
+ HW_ISSUE_8443,
+ HW_ISSUE_8987,
+ HW_ISSUE_9435,
+ HW_ISSUE_9510,
+ HW_ISSUE_9630,
+ HW_ISSUE_10327,
+ HW_ISSUE_10649,
+ HW_ISSUE_10676,
+ HW_ISSUE_10797,
+ HW_ISSUE_10817,
+ HW_ISSUE_10883,
+ HW_ISSUE_10959,
+ HW_ISSUE_10969,
+ HW_ISSUE_11020,
+ HW_ISSUE_11024,
+ HW_ISSUE_11035,
+ HW_ISSUE_11056,
+ HW_ISSUE_T76X_3542,
+ HW_ISSUE_T76X_3953,
+ HW_ISSUE_TMIX_8463,
+ GPUCORE_1619,
+ HW_ISSUE_TMIX_8438,
+ HW_ISSUE_TGOX_R1_1234,
+ HW_ISSUE_END
+};
+
+#define hw_issues_all (\
+ BIT_ULL(HW_ISSUE_9435))
+
+#define hw_issues_t600 (\
+ BIT_ULL(HW_ISSUE_6367) | \
+ BIT_ULL(HW_ISSUE_6787) | \
+ BIT_ULL(HW_ISSUE_8408) | \
+ BIT_ULL(HW_ISSUE_9510) | \
+ BIT_ULL(HW_ISSUE_10649) | \
+ BIT_ULL(HW_ISSUE_10676) | \
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11035) | \
+ BIT_ULL(HW_ISSUE_11056) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t600_r0p0_15dev0 (\
+ BIT_ULL(HW_ISSUE_8186) | \
+ BIT_ULL(HW_ISSUE_8245) | \
+ BIT_ULL(HW_ISSUE_8316) | \
+ BIT_ULL(HW_ISSUE_8394) | \
+ BIT_ULL(HW_ISSUE_8401) | \
+ BIT_ULL(HW_ISSUE_8443) | \
+ BIT_ULL(HW_ISSUE_8987) | \
+ BIT_ULL(HW_ISSUE_9630) | \
+ BIT_ULL(HW_ISSUE_10969) | \
+ BIT_ULL(GPUCORE_1619))
+
+#define hw_issues_t620 (\
+ BIT_ULL(HW_ISSUE_10649) | \
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_10959) | \
+ BIT_ULL(HW_ISSUE_11056) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t620_r0p1 (\
+ BIT_ULL(HW_ISSUE_10327) | \
+ BIT_ULL(HW_ISSUE_10676) | \
+ BIT_ULL(HW_ISSUE_10817) | \
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_11035))
+
+#define hw_issues_t620_r1p0 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024))
+
+#define hw_issues_t720 (\
+ BIT_ULL(HW_ISSUE_10649) | \
+ BIT_ULL(HW_ISSUE_10797) | \
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_11056) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t760 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t760_r0p0 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p1 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p1_50rel0 (\
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p2 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p3 (\
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t820 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t830 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t860 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t880 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_g31 0
+
+#define hw_issues_g31_r1p0 (\
+ BIT_ULL(HW_ISSUE_TGOX_R1_1234))
+
+#define hw_issues_g51 0
+
+#define hw_issues_g52 0
+
+#define hw_issues_g71 (\
+ BIT_ULL(HW_ISSUE_TMIX_8463) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_g71_r0p0_05dev0 (\
+ BIT_ULL(HW_ISSUE_T76X_3953))
+
+#define hw_issues_g72 0
+
+#define hw_issues_g76 0
+
+static inline bool panfrost_has_hw_issue(struct panfrost_device *pfdev,
+ enum panfrost_hw_issue issue)
+{
+ return test_bit(issue, pfdev->features.hw_issues);
+}
+
+#endif /* __PANFROST_ISSUES_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
new file mode 100644
index 000000000000..a5716c8fe8b3
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reservation.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panfrost_drm.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_job.h"
+#include "panfrost_features.h"
+#include "panfrost_issues.h"
+#include "panfrost_gem.h"
+#include "panfrost_regs.h"
+#include "panfrost_gpu.h"
+#include "panfrost_mmu.h"
+
+#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
+#define job_read(dev, reg) readl(dev->iomem + (reg))
+
+struct panfrost_queue_state {
+ struct drm_gpu_scheduler sched;
+
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+struct panfrost_job_slot {
+ struct panfrost_queue_state queue[NUM_JOB_SLOTS];
+ spinlock_t job_lock;
+};
+
+static struct panfrost_job *
+to_panfrost_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct panfrost_job, base);
+}
+
+struct panfrost_fence {
+ struct dma_fence base;
+ struct drm_device *dev;
+ /* panfrost seqno for signaled() test */
+ u64 seqno;
+ int queue;
+};
+
+static inline struct panfrost_fence *
+to_panfrost_fence(struct dma_fence *fence)
+{
+ return (struct panfrost_fence *)fence;
+}
+
+static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "panfrost";
+}
+
+static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct panfrost_fence *f = to_panfrost_fence(fence);
+
+ switch (f->queue) {
+ case 0:
+ return "panfrost-js-0";
+ case 1:
+ return "panfrost-js-1";
+ case 2:
+ return "panfrost-js-2";
+ default:
+ return NULL;
+ }
+}
+
+static const struct dma_fence_ops panfrost_fence_ops = {
+ .get_driver_name = panfrost_fence_get_driver_name,
+ .get_timeline_name = panfrost_fence_get_timeline_name,
+};
+
+static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
+{
+ struct panfrost_fence *fence;
+ struct panfrost_job_slot *js = pfdev->js;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ fence->dev = pfdev->ddev;
+ fence->queue = js_num;
+ fence->seqno = ++js->queue[js_num].emit_seqno;
+ dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
+ js->queue[js_num].fence_context, fence->seqno);
+
+ return &fence->base;
+}
+
+static int panfrost_job_get_slot(struct panfrost_job *job)
+{
+ /* JS0: fragment jobs.
+ * JS1: vertex/tiler jobs
+ * JS2: compute jobs
+ */
+ if (job->requirements & PANFROST_JD_REQ_FS)
+ return 0;
+
+/* Not exposed to userspace yet */
+#if 0
+ if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
+ if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
+ (job->pfdev->features.nr_core_groups == 2))
+ return 2;
+ if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
+ return 2;
+ }
+#endif
+ return 1;
+}
+
+static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
+ u32 requirements,
+ int js)
+{
+ u64 affinity;
+
+ /*
+ * Use all cores for now.
+ * Eventually we may need to support tiler only jobs and h/w with
+ * multiple (2) coherent core groups
+ */
+ affinity = pfdev->features.shader_present;
+
+ job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
+ job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
+}
+
+static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
+{
+ struct panfrost_device *pfdev = job->pfdev;
+ unsigned long flags;
+ u32 cfg;
+ u64 jc_head = job->jc;
+ int ret;
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return;
+
+ if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js))))
+ goto end;
+
+ panfrost_devfreq_record_transition(pfdev, js);
+ spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+
+ job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
+ job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
+
+ panfrost_job_write_affinity(pfdev, job->requirements, js);
+
+ /* start MMU, medium priority, cache clean/flush on end, clean/flush on
+ * start */
+ /* TODO: different address spaces */
+ cfg = JS_CONFIG_THREAD_PRI(8) |
+ JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
+ JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
+ cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
+ cfg |= JS_CONFIG_START_MMU;
+
+ job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
+ job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
+
+ /* GO ! */
+ dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
+ job, js, jc_head);
+
+ job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
+
+ spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
+
+end:
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+}
+
+static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence **implicit_fences)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv);
+}
+
+static void panfrost_attach_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ reservation_object_add_excl_fence(bos[i]->resv, fence);
+}
+
+int panfrost_job_push(struct panfrost_job *job)
+{
+ struct panfrost_device *pfdev = job->pfdev;
+ int slot = panfrost_job_get_slot(job);
+ struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ mutex_lock(&pfdev->sched_lock);
+
+ ret = drm_gem_lock_reservations(job->bos, job->bo_count,
+ &acquire_ctx);
+ if (ret) {
+ mutex_unlock(&pfdev->sched_lock);
+ return ret;
+ }
+
+ ret = drm_sched_job_init(&job->base, entity, NULL);
+ if (ret) {
+ mutex_unlock(&pfdev->sched_lock);
+ goto unlock;
+ }
+
+ job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+
+ panfrost_acquire_object_fences(job->bos, job->bo_count,
+ job->implicit_fences);
+
+ drm_sched_entity_push_job(&job->base, entity);
+
+ mutex_unlock(&pfdev->sched_lock);
+
+ panfrost_attach_object_fences(job->bos, job->bo_count,
+ job->render_done_fence);
+
+unlock:
+ drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
+
+ return ret;
+}
+
+static void panfrost_job_cleanup(struct kref *ref)
+{
+ struct panfrost_job *job = container_of(ref, struct panfrost_job,
+ refcount);
+ unsigned int i;
+
+ if (job->in_fences) {
+ for (i = 0; i < job->in_fence_count; i++)
+ dma_fence_put(job->in_fences[i]);
+ kvfree(job->in_fences);
+ }
+ if (job->implicit_fences) {
+ for (i = 0; i < job->bo_count; i++)
+ dma_fence_put(job->implicit_fences[i]);
+ kvfree(job->implicit_fences);
+ }
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->render_done_fence);
+
+ if (job->bos) {
+ for (i = 0; i < job->bo_count; i++)
+ drm_gem_object_put_unlocked(job->bos[i]);
+ kvfree(job->bos);
+ }
+
+ kfree(job);
+}
+
+void panfrost_job_put(struct panfrost_job *job)
+{
+ kref_put(&job->refcount, panfrost_job_cleanup);
+}
+
+static void panfrost_job_free(struct drm_sched_job *sched_job)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ panfrost_job_put(job);
+}
+
+static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+ struct dma_fence *fence;
+ unsigned int i;
+
+ /* Explicit fences */
+ for (i = 0; i < job->in_fence_count; i++) {
+ if (job->in_fences[i]) {
+ fence = job->in_fences[i];
+ job->in_fences[i] = NULL;
+ return fence;
+ }
+ }
+
+ /* Implicit fences, max. one per BO */
+ for (i = 0; i < job->bo_count; i++) {
+ if (job->implicit_fences[i]) {
+ fence = job->implicit_fences[i];
+ job->implicit_fences[i] = NULL;
+ return fence;
+ }
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+ struct panfrost_device *pfdev = job->pfdev;
+ int slot = panfrost_job_get_slot(job);
+ struct dma_fence *fence = NULL;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ pfdev->jobs[slot] = job;
+
+ fence = panfrost_fence_create(pfdev, slot);
+ if (IS_ERR(fence))
+ return NULL;
+
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ panfrost_job_hw_submit(job, slot);
+
+ return fence;
+}
+
+void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
+{
+ int j;
+ u32 irq_mask = 0;
+
+ for (j = 0; j < NUM_JOB_SLOTS; j++) {
+ irq_mask |= MK_JS_MASK(j);
+ }
+
+ job_write(pfdev, JOB_INT_CLEAR, irq_mask);
+ job_write(pfdev, JOB_INT_MASK, irq_mask);
+}
+
+static void panfrost_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+ struct panfrost_device *pfdev = job->pfdev;
+ int js = panfrost_job_get_slot(job);
+ int i;
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timeout is
+ * spurious. Bail out.
+ */
+ if (dma_fence_is_signaled(job->done_fence))
+ return;
+
+ dev_err(pfdev->dev, "gpu sched timeout, js=%d, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
+ js,
+ job_read(pfdev, JS_STATUS(js)),
+ job_read(pfdev, JS_HEAD_LO(js)),
+ job_read(pfdev, JS_TAIL_LO(js)),
+ sched_job);
+
+ mutex_lock(&pfdev->reset_lock);
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_stop(&pfdev->js->queue[i].sched);
+
+ if (sched_job)
+ drm_sched_increase_karma(sched_job);
+
+ /* panfrost_core_dump(pfdev); */
+
+ panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_gpu_soft_reset(pfdev);
+
+ /* TODO: Re-enable all other address spaces */
+ panfrost_mmu_enable(pfdev, 0);
+ panfrost_gpu_power_on(pfdev);
+ panfrost_job_enable_interrupts(pfdev);
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
+
+ /* restart scheduler after GPU is usable again */
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_start(&pfdev->js->queue[i].sched, true);
+
+ mutex_unlock(&pfdev->reset_lock);
+}
+
+static const struct drm_sched_backend_ops panfrost_sched_ops = {
+ .dependency = panfrost_job_dependency,
+ .run_job = panfrost_job_run,
+ .timedout_job = panfrost_job_timedout,
+ .free_job = panfrost_job_free
+};
+
+static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 status = job_read(pfdev, JOB_INT_STAT);
+ int j;
+
+ dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+
+ for (j = 0; status; j++) {
+ u32 mask = MK_JS_MASK(j);
+
+ if (!(status & mask))
+ continue;
+
+ job_write(pfdev, JOB_INT_CLEAR, mask);
+
+ if (status & JOB_INT_MASK_ERR(j)) {
+ job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
+
+ dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
+ j,
+ panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
+ job_read(pfdev, JS_HEAD_LO(j)),
+ job_read(pfdev, JS_TAIL_LO(j)));
+
+ drm_sched_fault(&pfdev->js->queue[j].sched);
+ }
+
+ if (status & JOB_INT_MASK_DONE(j)) {
+ panfrost_devfreq_record_transition(pfdev, j);
+ dma_fence_signal(pfdev->jobs[j]->done_fence);
+ }
+
+ status &= ~mask;
+ }
+
+ return IRQ_HANDLED;
+}
+
+int panfrost_job_init(struct panfrost_device *pfdev)
+{
+ struct panfrost_job_slot *js;
+ int ret, j, irq;
+
+ pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
+ if (!js)
+ return -ENOMEM;
+
+ spin_lock_init(&js->job_lock);
+
+ irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
+ if (irq <= 0)
+ return -ENODEV;
+
+ ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
+ IRQF_SHARED, "job", pfdev);
+ if (ret) {
+ dev_err(pfdev->dev, "failed to request job irq");
+ return ret;
+ }
+
+ for (j = 0; j < NUM_JOB_SLOTS; j++) {
+ js->queue[j].fence_context = dma_fence_context_alloc(1);
+
+ ret = drm_sched_init(&js->queue[j].sched,
+ &panfrost_sched_ops,
+ 1, 0, msecs_to_jiffies(500),
+ "pan_js");
+ if (ret) {
+ dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
+ goto err_sched;
+ }
+ }
+
+ panfrost_job_enable_interrupts(pfdev);
+
+ return 0;
+
+err_sched:
+ for (j--; j >= 0; j--)
+ drm_sched_fini(&js->queue[j].sched);
+
+ return ret;
+}
+
+void panfrost_job_fini(struct panfrost_device *pfdev)
+{
+ struct panfrost_job_slot *js = pfdev->js;
+ int j;
+
+ job_write(pfdev, JOB_INT_MASK, 0);
+
+ for (j = 0; j < NUM_JOB_SLOTS; j++)
+ drm_sched_fini(&js->queue[j].sched);
+
+}
+
+int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
+{
+ struct panfrost_device *pfdev = panfrost_priv->pfdev;
+ struct panfrost_job_slot *js = pfdev->js;
+ struct drm_sched_rq *rq;
+ int ret, i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+ if (WARN_ON(ret))
+ return ret;
+ }
+ return 0;
+}
+
+void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
+{
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
+}
+
+int panfrost_job_is_idle(struct panfrost_device *pfdev)
+{
+ struct panfrost_job_slot *js = pfdev->js;
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&js->queue[i].sched.hw_rq_count))
+ return false;
+
+ /* Check whether the hardware is idle */
+ if (pfdev->devfreq.slot[i].busy)
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
new file mode 100644
index 000000000000..62454128a792
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANFROST_JOB_H__
+#define __PANFROST_JOB_H__
+
+#include <uapi/drm/panfrost_drm.h>
+#include <drm/gpu_scheduler.h>
+
+struct panfrost_device;
+struct panfrost_gem_object;
+struct panfrost_file_priv;
+
+struct panfrost_job {
+ struct drm_sched_job base;
+
+ struct kref refcount;
+
+ struct panfrost_device *pfdev;
+ struct panfrost_file_priv *file_priv;
+
+ /* Optional fences userspace can pass in for the job to depend on. */
+ struct dma_fence **in_fences;
+ u32 in_fence_count;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ __u64 jc;
+ __u32 requirements;
+ __u32 flush_id;
+
+ /* Exclusive fences we have taken from the BOs to wait for */
+ struct dma_fence **implicit_fences;
+ struct drm_gem_object **bos;
+ u32 bo_count;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *render_done_fence;
+};
+
+int panfrost_job_init(struct panfrost_device *pfdev);
+void panfrost_job_fini(struct panfrost_device *pfdev);
+int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
+void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+int panfrost_job_push(struct panfrost_job *job);
+void panfrost_job_put(struct panfrost_job *job);
+void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
+int panfrost_job_is_idle(struct panfrost_device *pfdev);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
new file mode 100644
index 000000000000..762b1bd2a8c2
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+
+#include "panfrost_device.h"
+#include "panfrost_mmu.h"
+#include "panfrost_gem.h"
+#include "panfrost_features.h"
+#include "panfrost_regs.h"
+
+#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
+#define mmu_read(dev, reg) readl(dev->iomem + reg)
+
+struct panfrost_mmu {
+ struct io_pgtable_cfg pgtbl_cfg;
+ struct io_pgtable_ops *pgtbl_ops;
+ struct mutex lock;
+};
+
+static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
+{
+ int ret;
+ u32 val;
+
+ /* Wait for the MMU status to indicate there is no active command, in
+ * case one is pending. */
+ ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
+ val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
+
+ if (ret)
+ dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
+
+ return ret;
+}
+
+static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
+{
+ int status;
+
+ /* write AS_COMMAND when MMU is ready to accept another command */
+ status = wait_ready(pfdev, as_nr);
+ if (!status)
+ mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
+
+ return status;
+}
+
+static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
+ u64 iova, size_t size)
+{
+ u8 region_width;
+ u64 region = iova & PAGE_MASK;
+ /*
+ * fls returns:
+ * 1 .. 32
+ *
+ * 10 + fls(num_pages)
+ * results in the range (11 .. 42)
+ */
+
+ size = round_up(size, PAGE_SIZE);
+
+ region_width = 10 + fls(size >> PAGE_SHIFT);
+ if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
+ /* not pow2, so must go up to the next pow2 */
+ region_width += 1;
+ }
+ region |= region_width;
+
+ /* Lock the region that needs to be updated */
+ mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
+ mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
+ write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
+}
+
+
+static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
+ u64 iova, size_t size, u32 op)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+
+ if (op != AS_COMMAND_UNLOCK)
+ lock_region(pfdev, as_nr, iova, size);
+
+ /* Run the MMU operation */
+ write_cmd(pfdev, as_nr, op);
+
+ /* Wait for the flush to complete */
+ ret = wait_ready(pfdev, as_nr);
+
+ spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
+
+ return ret;
+}
+
+void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
+{
+ struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
+ u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
+ u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
+
+ mmu_write(pfdev, MMU_INT_CLEAR, ~0);
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
+
+ mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
+ mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
+
+ /* Need to revisit mem attrs.
+ * NC is the default, Mali driver is inner WT.
+ */
+ mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
+ mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
+
+ write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
+{
+ mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
+ mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
+
+ mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
+ mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
+
+ write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static size_t get_pgsize(u64 addr, size_t size)
+{
+ if (addr & (SZ_2M - 1) || size < SZ_2M)
+ return SZ_4K;
+
+ return SZ_2M;
+}
+
+int panfrost_mmu_map(struct panfrost_gem_object *bo)
+{
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+ struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
+ u64 iova = bo->node.start << PAGE_SHIFT;
+ unsigned int count;
+ struct scatterlist *sgl;
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (WARN_ON(IS_ERR(sgt)))
+ return PTR_ERR(sgt);
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&pfdev->mmu->lock);
+
+ for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
+ unsigned long paddr = sg_dma_address(sgl);
+ size_t len = sg_dma_len(sgl);
+
+ dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
+
+ while (len) {
+ size_t pgsize = get_pgsize(iova | paddr, len);
+
+ ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
+ iova += pgsize;
+ paddr += pgsize;
+ len -= pgsize;
+ }
+ }
+
+ mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
+ bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+
+ mutex_unlock(&pfdev->mmu->lock);
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+
+ return 0;
+}
+
+void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+{
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+ struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
+ u64 iova = bo->node.start << PAGE_SHIFT;
+ size_t len = bo->node.size << PAGE_SHIFT;
+ size_t unmapped_len = 0;
+ int ret;
+
+ dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return;
+
+ mutex_lock(&pfdev->mmu->lock);
+
+ while (unmapped_len < len) {
+ size_t unmapped_page;
+ size_t pgsize = get_pgsize(iova, len - unmapped_len);
+
+ unmapped_page = ops->unmap(ops, iova, pgsize);
+ if (!unmapped_page)
+ break;
+
+ iova += unmapped_page;
+ unmapped_len += unmapped_page;
+ }
+
+ mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
+ bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+
+ mutex_unlock(&pfdev->mmu->lock);
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+}
+
+static void mmu_tlb_inv_context_s1(void *cookie)
+{
+ struct panfrost_device *pfdev = cookie;
+
+ mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+}
+
+static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{}
+
+static void mmu_tlb_sync_context(void *cookie)
+{
+ //struct panfrost_device *pfdev = cookie;
+ // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
+}
+
+static const struct iommu_gather_ops mmu_tlb_ops = {
+ .tlb_flush_all = mmu_tlb_inv_context_s1,
+ .tlb_add_flush = mmu_tlb_inv_range_nosync,
+ .tlb_sync = mmu_tlb_sync_context,
+};
+
+static const char *access_type_name(struct panfrost_device *pfdev,
+ u32 fault_status)
+{
+ switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+ case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
+ return "ATOMIC";
+ else
+ return "UNKNOWN";
+ case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+ return "READ";
+ case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+ return "WRITE";
+ case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+ return "EXECUTE";
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 status = mmu_read(pfdev, MMU_INT_STAT);
+ int i;
+
+ if (!status)
+ return IRQ_NONE;
+
+ dev_err(pfdev->dev, "mmu irq status=%x\n", status);
+
+ for (i = 0; status; i++) {
+ u32 mask = BIT(i) | BIT(i + 16);
+ u64 addr;
+ u32 fault_status;
+ u32 exception_type;
+ u32 access_type;
+ u32 source_id;
+
+ if (!(status & mask))
+ continue;
+
+ fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
+ addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
+ addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
+
+ /* decode the fault status */
+ exception_type = fault_status & 0xFF;
+ access_type = (fault_status >> 8) & 0x3;
+ source_id = (fault_status >> 16);
+
+ /* terminal fault, print info about the fault */
+ dev_err(pfdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n",
+ i, addr,
+ "TODO",
+ fault_status,
+ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, panfrost_exception_name(pfdev, exception_type),
+ access_type, access_type_name(pfdev, fault_status),
+ source_id);
+
+ mmu_write(pfdev, MMU_INT_CLEAR, mask);
+
+ status &= ~mask;
+ }
+
+ return IRQ_HANDLED;
+};
+
+int panfrost_mmu_init(struct panfrost_device *pfdev)
+{
+ struct io_pgtable_ops *pgtbl_ops;
+ int err, irq;
+
+ pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
+ if (!pfdev->mmu)
+ return -ENOMEM;
+
+ mutex_init(&pfdev->mmu->lock);
+
+ irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
+ if (irq <= 0)
+ return -ENODEV;
+
+ err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+ IRQF_SHARED, "mmu", pfdev);
+
+ if (err) {
+ dev_err(pfdev->dev, "failed to request mmu irq");
+ return err;
+ }
+ mmu_write(pfdev, MMU_INT_CLEAR, ~0);
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
+
+ pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = SZ_4K | SZ_2M,
+ .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
+ .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .tlb = &mmu_tlb_ops,
+ .iommu_dev = pfdev->dev,
+ };
+
+ pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
+ pfdev);
+ if (!pgtbl_ops)
+ return -ENOMEM;
+
+ pfdev->mmu->pgtbl_ops = pgtbl_ops;
+
+ panfrost_mmu_enable(pfdev, 0);
+
+ return 0;
+}
+
+void panfrost_mmu_fini(struct panfrost_device *pfdev)
+{
+ mmu_write(pfdev, MMU_INT_MASK, 0);
+ mmu_disable(pfdev, 0);
+
+ free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
new file mode 100644
index 000000000000..f5878d86a5ce
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#ifndef __PANFROST_MMU_H__
+#define __PANFROST_MMU_H__
+
+struct panfrost_gem_object;
+
+int panfrost_mmu_map(struct panfrost_gem_object *bo);
+void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+
+int panfrost_mmu_init(struct panfrost_device *pfdev);
+void panfrost_mmu_fini(struct panfrost_device *pfdev);
+
+void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
new file mode 100644
index 000000000000..578c5fc2188b
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/*
+ * Register definitions based on mali_midg_regmap.h
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ */
+#ifndef __PANFROST_REGS_H__
+#define __PANFROST_REGS_H__
+
+#define GPU_ID 0x00
+#define GPU_L2_FEATURES 0x004 /* (RO) Level 2 cache features */
+#define GPU_CORE_FEATURES 0x008 /* (RO) Shader Core Features */
+#define GPU_TILER_FEATURES 0x00C /* (RO) Tiler Features */
+#define GPU_MEM_FEATURES 0x010 /* (RO) Memory system features */
+#define GROUPS_L2_COHERENT BIT(0) /* Cores groups are l2 coherent */
+
+#define GPU_MMU_FEATURES 0x014 /* (RO) MMU features */
+#define GPU_AS_PRESENT 0x018 /* (RO) Address space slots present */
+#define GPU_JS_PRESENT 0x01C /* (RO) Job slots present */
+
+#define GPU_INT_RAWSTAT 0x20
+#define GPU_INT_CLEAR 0x24
+#define GPU_INT_MASK 0x28
+#define GPU_INT_STAT 0x2c
+#define GPU_IRQ_FAULT BIT(0)
+#define GPU_IRQ_MULTIPLE_FAULT BIT(7)
+#define GPU_IRQ_RESET_COMPLETED BIT(8)
+#define GPU_IRQ_POWER_CHANGED BIT(9)
+#define GPU_IRQ_POWER_CHANGED_ALL BIT(10)
+#define GPU_IRQ_PERFCNT_SAMPLE_COMPLETED BIT(16)
+#define GPU_IRQ_CLEAN_CACHES_COMPLETED BIT(17)
+#define GPU_IRQ_MASK_ALL \
+ (GPU_IRQ_FAULT |\
+ GPU_IRQ_MULTIPLE_FAULT |\
+ GPU_IRQ_RESET_COMPLETED |\
+ GPU_IRQ_POWER_CHANGED |\
+ GPU_IRQ_POWER_CHANGED_ALL |\
+ GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |\
+ GPU_IRQ_CLEAN_CACHES_COMPLETED)
+#define GPU_IRQ_MASK_ERROR \
+ ( \
+ GPU_IRQ_FAULT |\
+ GPU_IRQ_MULTIPLE_FAULT)
+#define GPU_CMD 0x30
+#define GPU_CMD_SOFT_RESET 0x01
+#define GPU_STATUS 0x34
+#define GPU_LATEST_FLUSH_ID 0x38
+#define GPU_FAULT_STATUS 0x3C
+#define GPU_FAULT_ADDRESS_LO 0x40
+#define GPU_FAULT_ADDRESS_HI 0x44
+
+#define GPU_THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */
+#define GPU_THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */
+#define GPU_THREAD_MAX_BARRIER_SIZE 0x0A8 /* (RO) Maximum threads waiting at a barrier */
+#define GPU_THREAD_FEATURES 0x0AC /* (RO) Thread features */
+#define GPU_THREAD_TLS_ALLOC 0x310 /* (RO) Number of threads per core that
+ * TLS must be allocated for */
+
+#define GPU_TEXTURE_FEATURES(n) (0x0B0 + ((n) * 4))
+#define GPU_JS_FEATURES(n) (0x0C0 + ((n) * 4))
+
+#define GPU_SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */
+#define GPU_SHADER_PRESENT_HI 0x104 /* (RO) Shader core present bitmap, high word */
+#define GPU_TILER_PRESENT_LO 0x110 /* (RO) Tiler core present bitmap, low word */
+#define GPU_TILER_PRESENT_HI 0x114 /* (RO) Tiler core present bitmap, high word */
+
+#define GPU_L2_PRESENT_LO 0x120 /* (RO) Level 2 cache present bitmap, low word */
+#define GPU_L2_PRESENT_HI 0x124 /* (RO) Level 2 cache present bitmap, high word */
+
+#define GPU_COHERENCY_FEATURES 0x300 /* (RO) Coherency features present */
+#define COHERENCY_ACE_LITE BIT(0)
+#define COHERENCY_ACE BIT(1)
+
+#define GPU_STACK_PRESENT_LO 0xE00 /* (RO) Core stack present bitmap, low word */
+#define GPU_STACK_PRESENT_HI 0xE04 /* (RO) Core stack present bitmap, high word */
+
+#define SHADER_READY_LO 0x140 /* (RO) Shader core ready bitmap, low word */
+#define SHADER_READY_HI 0x144 /* (RO) Shader core ready bitmap, high word */
+
+#define TILER_READY_LO 0x150 /* (RO) Tiler core ready bitmap, low word */
+#define TILER_READY_HI 0x154 /* (RO) Tiler core ready bitmap, high word */
+
+#define L2_READY_LO 0x160 /* (RO) Level 2 cache ready bitmap, low word */
+#define L2_READY_HI 0x164 /* (RO) Level 2 cache ready bitmap, high word */
+
+#define STACK_READY_LO 0xE10 /* (RO) Core stack ready bitmap, low word */
+#define STACK_READY_HI 0xE14 /* (RO) Core stack ready bitmap, high word */
+
+
+#define SHADER_PWRON_LO 0x180 /* (WO) Shader core power on bitmap, low word */
+#define SHADER_PWRON_HI 0x184 /* (WO) Shader core power on bitmap, high word */
+
+#define TILER_PWRON_LO 0x190 /* (WO) Tiler core power on bitmap, low word */
+#define TILER_PWRON_HI 0x194 /* (WO) Tiler core power on bitmap, high word */
+
+#define L2_PWRON_LO 0x1A0 /* (WO) Level 2 cache power on bitmap, low word */
+#define L2_PWRON_HI 0x1A4 /* (WO) Level 2 cache power on bitmap, high word */
+
+#define STACK_PWRON_LO 0xE20 /* (RO) Core stack power on bitmap, low word */
+#define STACK_PWRON_HI 0xE24 /* (RO) Core stack power on bitmap, high word */
+
+
+#define SHADER_PWROFF_LO 0x1C0 /* (WO) Shader core power off bitmap, low word */
+#define SHADER_PWROFF_HI 0x1C4 /* (WO) Shader core power off bitmap, high word */
+
+#define TILER_PWROFF_LO 0x1D0 /* (WO) Tiler core power off bitmap, low word */
+#define TILER_PWROFF_HI 0x1D4 /* (WO) Tiler core power off bitmap, high word */
+
+#define L2_PWROFF_LO 0x1E0 /* (WO) Level 2 cache power off bitmap, low word */
+#define L2_PWROFF_HI 0x1E4 /* (WO) Level 2 cache power off bitmap, high word */
+
+#define STACK_PWROFF_LO 0xE30 /* (RO) Core stack power off bitmap, low word */
+#define STACK_PWROFF_HI 0xE34 /* (RO) Core stack power off bitmap, high word */
+
+
+#define SHADER_PWRTRANS_LO 0x200 /* (RO) Shader core power transition bitmap, low word */
+#define SHADER_PWRTRANS_HI 0x204 /* (RO) Shader core power transition bitmap, high word */
+
+#define TILER_PWRTRANS_LO 0x210 /* (RO) Tiler core power transition bitmap, low word */
+#define TILER_PWRTRANS_HI 0x214 /* (RO) Tiler core power transition bitmap, high word */
+
+#define L2_PWRTRANS_LO 0x220 /* (RO) Level 2 cache power transition bitmap, low word */
+#define L2_PWRTRANS_HI 0x224 /* (RO) Level 2 cache power transition bitmap, high word */
+
+#define STACK_PWRTRANS_LO 0xE40 /* (RO) Core stack power transition bitmap, low word */
+#define STACK_PWRTRANS_HI 0xE44 /* (RO) Core stack power transition bitmap, high word */
+
+
+#define SHADER_PWRACTIVE_LO 0x240 /* (RO) Shader core active bitmap, low word */
+#define SHADER_PWRACTIVE_HI 0x244 /* (RO) Shader core active bitmap, high word */
+
+#define TILER_PWRACTIVE_LO 0x250 /* (RO) Tiler core active bitmap, low word */
+#define TILER_PWRACTIVE_HI 0x254 /* (RO) Tiler core active bitmap, high word */
+
+#define L2_PWRACTIVE_LO 0x260 /* (RO) Level 2 cache active bitmap, low word */
+#define L2_PWRACTIVE_HI 0x264 /* (RO) Level 2 cache active bitmap, high word */
+
+#define GPU_JM_CONFIG 0xF00 /* (RW) Job Manager configuration register (Implementation specific register) */
+#define GPU_SHADER_CONFIG 0xF04 /* (RW) Shader core configuration settings (Implementation specific register) */
+#define GPU_TILER_CONFIG 0xF08 /* (RW) Tiler core configuration settings (Implementation specific register) */
+#define GPU_L2_MMU_CONFIG 0xF0C /* (RW) Configuration of the L2 cache and MMU (Implementation specific register) */
+
+/* L2_MMU_CONFIG register */
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT 23
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT 24
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT 26
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT 12
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT 15
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
+/* SHADER_CONFIG register */
+#define SC_ALT_COUNTERS BIT(3)
+#define SC_OVERRIDE_FWD_PIXEL_KILL BIT(4)
+#define SC_SDC_DISABLE_OQ_DISCARD BIT(6)
+#define SC_LS_ALLOW_ATTR_TYPES BIT(16)
+#define SC_LS_PAUSEBUFFER_DISABLE BIT(16)
+#define SC_TLS_HASH_ENABLE BIT(17)
+#define SC_LS_ATTR_CHECK_DISABLE BIT(18)
+#define SC_ENABLE_TEXGRD_FLAGS BIT(25)
+/* End SHADER_CONFIG register */
+
+/* TILER_CONFIG register */
+#define TC_CLOCK_GATE_OVERRIDE BIT(0)
+
+/* JM_CONFIG register */
+#define JM_TIMESTAMP_OVERRIDE BIT(0)
+#define JM_CLOCK_GATE_OVERRIDE BIT(1)
+#define JM_JOB_THROTTLE_ENABLE BIT(2)
+#define JM_JOB_THROTTLE_LIMIT_SHIFT 3
+#define JM_MAX_JOB_THROTTLE_LIMIT 0x3F
+#define JM_FORCE_COHERENCY_FEATURES_SHIFT 2
+#define JM_IDVS_GROUP_SIZE_SHIFT 16
+#define JM_MAX_IDVS_GROUP_SIZE 0x3F
+
+
+/* Job Control regs */
+#define JOB_INT_RAWSTAT 0x1000
+#define JOB_INT_CLEAR 0x1004
+#define JOB_INT_MASK 0x1008
+#define JOB_INT_STAT 0x100c
+#define JOB_INT_JS_STATE 0x1010
+#define JOB_INT_THROTTLE 0x1014
+
+#define MK_JS_MASK(j) (0x10001 << (j))
+#define JOB_INT_MASK_ERR(j) BIT((j) + 16)
+#define JOB_INT_MASK_DONE(j) BIT(j)
+
+#define JS_BASE 0x1800
+#define JS_HEAD_LO(n) (JS_BASE + ((n) * 0x80) + 0x00)
+#define JS_HEAD_HI(n) (JS_BASE + ((n) * 0x80) + 0x04)
+#define JS_TAIL_LO(n) (JS_BASE + ((n) * 0x80) + 0x08)
+#define JS_TAIL_HI(n) (JS_BASE + ((n) * 0x80) + 0x0c)
+#define JS_AFFINITY_LO(n) (JS_BASE + ((n) * 0x80) + 0x10)
+#define JS_AFFINITY_HI(n) (JS_BASE + ((n) * 0x80) + 0x14)
+#define JS_CONFIG(n) (JS_BASE + ((n) * 0x80) + 0x18)
+#define JS_XAFFINITY(n) (JS_BASE + ((n) * 0x80) + 0x1c)
+#define JS_COMMAND(n) (JS_BASE + ((n) * 0x80) + 0x20)
+#define JS_STATUS(n) (JS_BASE + ((n) * 0x80) + 0x24)
+#define JS_HEAD_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x40)
+#define JS_HEAD_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x44)
+#define JS_AFFINITY_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x50)
+#define JS_AFFINITY_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x54)
+#define JS_CONFIG_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x58)
+#define JS_COMMAND_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x60)
+#define JS_FLUSH_ID_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x70)
+
+/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
+#define JS_CONFIG_START_FLUSH_CLEAN BIT(8)
+#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8)
+#define JS_CONFIG_START_MMU BIT(10)
+#define JS_CONFIG_JOB_CHAIN_FLAG BIT(11)
+#define JS_CONFIG_END_FLUSH_CLEAN BIT(12)
+#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE (3u << 12)
+#define JS_CONFIG_ENABLE_FLUSH_REDUCTION BIT(14)
+#define JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK BIT(15)
+#define JS_CONFIG_THREAD_PRI(n) ((n) << 16)
+
+#define JS_COMMAND_NOP 0x00
+#define JS_COMMAND_START 0x01
+#define JS_COMMAND_SOFT_STOP 0x02 /* Gently stop processing a job chain */
+#define JS_COMMAND_HARD_STOP 0x03 /* Rudely stop processing a job chain */
+#define JS_COMMAND_SOFT_STOP_0 0x04 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_HARD_STOP_0 0x05 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_SOFT_STOP_1 0x06 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */
+#define JS_COMMAND_HARD_STOP_1 0x07 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */
+
+#define JS_STATUS_EVENT_ACTIVE 0x08
+
+
+/* MMU regs */
+#define MMU_INT_RAWSTAT 0x2000
+#define MMU_INT_CLEAR 0x2004
+#define MMU_INT_MASK 0x2008
+#define MMU_INT_STAT 0x200c
+
+/* AS_COMMAND register commands */
+#define AS_COMMAND_NOP 0x00 /* NOP Operation */
+#define AS_COMMAND_UPDATE 0x01 /* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */
+#define AS_COMMAND_LOCK 0x02 /* Issue a lock region command to all MMUs */
+#define AS_COMMAND_UNLOCK 0x03 /* Issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs
+ (deprecated - only for use with T60x) */
+#define AS_COMMAND_FLUSH_PT 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then
+ flush all L2 caches then issue a flush region command to all MMUs */
+
+#define MMU_AS(as) (0x2400 + ((as) << 6))
+
+#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x00) /* (RW) Translation Table Base Address for address space n, low word */
+#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
+#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x08) /* (RW) Memory attributes for address space n, low word. */
+#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0x0C) /* (RW) Memory attributes for address space n, high word. */
+#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10) /* (RW) Lock region address for address space n, low word */
+#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14) /* (RW) Lock region address for address space n, high word */
+#define AS_COMMAND(as) (MMU_AS(as) + 0x18) /* (WO) MMU command register for address space n */
+#define AS_FAULTSTATUS(as) (MMU_AS(as) + 0x1C) /* (RO) MMU fault status register for address space n */
+#define AS_FAULTADDRESS_LO(as) (MMU_AS(as) + 0x20) /* (RO) Fault Address for address space n, low word */
+#define AS_FAULTADDRESS_HI(as) (MMU_AS(as) + 0x24) /* (RO) Fault Address for address space n, high word */
+#define AS_STATUS(as) (MMU_AS(as) + 0x28) /* (RO) Status flags for address space n */
+/* Additional Bifrost AS regsiters */
+#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30) /* (RW) Translation table configuration for address space n, low word */
+#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34) /* (RW) Translation table configuration for address space n, high word */
+#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
+#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
+
+/*
+ * Begin LPAE MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_LPAE_ADDR_SPACE_MASK 0xfffffffffffff000
+#define AS_TRANSTAB_LPAE_ADRMODE_IDENTITY 0x2
+#define AS_TRANSTAB_LPAE_ADRMODE_TABLE 0x3
+#define AS_TRANSTAB_LPAE_ADRMODE_MASK 0x3
+#define AS_TRANSTAB_LPAE_READ_INNER BIT(2)
+#define AS_TRANSTAB_LPAE_SHARE_OUTER BIT(4)
+
+#define AS_STATUS_AS_ACTIVE 0x01
+
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+
+#endif
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 754f6b25f265..4501597f30ab 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -188,7 +188,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
tim2 |= TIM2_IOE;
if (connector->display_info.bus_flags &
- DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
tim2 |= TIM2_IPC;
}
@@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm)
dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
return PTR_ERR(parent);
}
+
+ spin_lock_init(&priv->tim2_lock);
+
/* If the clock divider is broken, use the parent directly */
if (priv->variant->broken_clockdivider) {
priv->clk = parent;
return 0;
}
parent_name = __clk_get_name(parent);
-
- spin_lock_init(&priv->tim2_lock);
div->init = &init;
ret = devm_clk_hw_register(drm->dev, div);
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index b9baefdba38a..1c318ad32a8c 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
ret = vexpress_muxfpga_init();
if (ret) {
dev_err(dev, "unable to initialize muxfpga driver\n");
+ of_node_put(np);
return ret;
}
@@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
pdev = of_find_device_by_node(np);
if (!pdev) {
dev_err(dev, "can't find the sysreg device, deferring\n");
+ of_node_put(np);
return -EPROBE_DEFER;
}
map = dev_get_drvdata(&pdev->dev);
if (!map) {
dev_err(dev, "sysreg has not yet probed\n");
platform_device_put(pdev);
+ of_node_put(np);
return -EPROBE_DEFER;
}
} else {
map = syscon_node_to_regmap(np);
}
+ of_node_put(np);
if (IS_ERR(map)) {
dev_err(dev, "no Versatile syscon regmap\n");
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 08c725544a2f..8b319ebbb0fb 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -535,7 +535,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
{
struct qxl_device *qdev = plane->dev->dev_private;
struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
- struct qxl_bo *bo_old, *primary;
+ struct qxl_bo *primary;
struct drm_clip_rect norect = {
.x1 = 0,
.y1 = 0,
@@ -544,12 +544,6 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
};
uint32_t dumb_shadow_offset = 0;
- if (old_state->fb) {
- bo_old = gem_to_qxl_bo(old_state->fb->obj[0]);
- } else {
- bo_old = NULL;
- }
-
primary = bo->shadow ? bo->shadow : bo;
if (!primary->is_primary) {
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 578d867a81d5..f33e349c4ec5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -255,10 +255,14 @@ static struct drm_driver qxl_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = qxl_gem_prime_pin,
.gem_prime_unpin = qxl_gem_prime_unpin,
+ .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 4a0331b3ff7d..2896bb6fdbf4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -65,9 +65,6 @@
extern int qxl_num_crtc;
extern int qxl_max_ioctls;
-#define DRM_FILE_OFFSET 0x100000000ULL
-#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
-
#define QXL_INTERRUPT_MASK (\
QXL_INTERRUPT_DISPLAY |\
QXL_INTERRUPT_CURSOR |\
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 8b448eca1cd9..114653b471c6 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -42,6 +42,18 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
qxl_bo_unpin(bo);
}
+struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *qxl_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 92f5db5b296f..0234f8556ada 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -63,15 +63,10 @@ static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct qxl_device *qdev;
int r;
+ struct drm_file *file_priv = filp->private_data;
+ struct qxl_device *qdev = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- qdev = file_priv->minor->dev->dev_private;
if (qdev == NULL) {
DRM_ERROR(
"filp->private_data->minor->dev->dev_private == NULL\n");
@@ -328,7 +323,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_bo_device_init(&qdev->mman.bdev,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET, 0);
+ false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 53f29a115104..0a9312ea250a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1388,7 +1388,7 @@ int radeon_device_init(struct radeon_device *rdev,
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
pr_warn("radeon: No coherent DMA available\n");
}
- rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index aa898c699101..433df7036f96 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -922,12 +922,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
/* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *ref_div = min(max(den/post_div, 1u), ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
- *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+ *ref_div = (*ref_div * fb_div_max)/(*fb_div);
*fb_div = fb_div_max;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index afef2d9fccd8..173deb463414 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -35,7 +35,6 @@
#include <linux/platform_device.h>
#include <drm/drm_legacy.h>
-#include <drm/ati_pcigart.h>
#include "radeon_family.h"
/* General customization:
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 1179034024ae..1298b84cb1c7 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -42,7 +42,7 @@
* the helper contains a pointer to radeon framebuffer baseclass.
*/
struct radeon_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct drm_framebuffer fb;
struct radeon_device *rdev;
};
@@ -244,7 +244,8 @@ static int radeonfb_create(struct drm_fb_helper *helper,
goto out;
}
- info->par = rfbdev;
+ /* radeon resume is fragile and needs a vt switch to help it along */
+ info->skip_vt_switch = false;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj);
if (ret) {
@@ -259,10 +260,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
- strcpy(info->fix.id, "radeondrmfb");
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
-
info->fbops = &radeonfb_ops;
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
@@ -271,7 +268,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
info->screen_base = rbo->kptr;
info->screen_size = radeon_bo_size(rbo);
- drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index b3019505065a..c9bd1278f573 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -133,7 +133,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
/* TODO we should be able to split locking for interval tree and
* the tear down.
*/
- if (range->blockable)
+ if (mmu_notifier_range_blockable(range))
mutex_lock(&rmn->lock);
else if (!mutex_trylock(&rmn->lock))
return -EAGAIN;
@@ -144,7 +144,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct radeon_bo *bo;
long r;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 9920a6fc11bf..5d42f8d8e68d 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -45,8 +45,6 @@
#include "radeon_reg.h"
#include "radeon.h"
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
@@ -253,14 +251,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
- struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
- rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -300,14 +296,12 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
- struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
- rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -792,7 +786,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_bo_device_init(&rdev->mman.bdev,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
rdev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -901,16 +894,10 @@ static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct radeon_device *rdev;
int r;
+ struct drm_file *file_priv = filp->private_data;
+ struct radeon_device *rdev = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
- return -EINVAL;
- }
-
- file_priv = filp->private_data;
- rdev = file_priv->minor->dev->dev_private;
if (rdev == NULL) {
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 7c36e2777a15..1529849e217e 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -36,3 +36,7 @@ config DRM_RCAR_VSP
depends on VIDEO_RENESAS_VSP1=y || (VIDEO_RENESAS_VSP1 && DRM_RCAR_DU=m)
help
Enable support to expose the R-Car VSP Compositor as KMS planes.
+
+config DRM_RCAR_WRITEBACK
+ bool
+ default y if ARM64
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 2a3b8d7972b5..6c2ed9c46467 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -4,7 +4,7 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_encoder.o \
rcar_du_group.o \
rcar_du_kms.o \
- rcar_du_plane.o
+ rcar_du_plane.o \
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar_du_of_lvds_r8a7790.dtb.o \
@@ -13,6 +13,7 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar_du_of_lvds_r8a7795.dtb.o \
rcar_du_of_lvds_r8a7796.dtb.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 4cdea14d552f..2da46e3dc4ae 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -32,21 +32,21 @@
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
}
static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
}
static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
@@ -54,7 +54,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
@@ -62,7 +62,7 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set;
rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr);
@@ -157,10 +157,9 @@ static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
}
done:
- dev_dbg(rcrtc->group->dev->dev,
+ dev_dbg(rcrtc->dev->dev,
"output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
- dpll->output, dpll->fdpll, dpll->n, dpll->m,
- best_diff);
+ dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff);
}
struct du_clk_params {
@@ -212,7 +211,7 @@ static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
unsigned long mode_clock = mode->clock * 1000;
u32 dsmr;
u32 escr;
@@ -277,7 +276,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_escr_divider(rcrtc->extclock, mode_clock,
ESCR_DCLKSEL_DCLKIN, &params);
- dev_dbg(rcrtc->group->dev->dev, "mode clock %lu %s rate %lu\n",
+ dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n",
mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
params.rate);
@@ -285,7 +284,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
escr = params.escr;
}
- dev_dbg(rcrtc->group->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
+ dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
@@ -333,7 +332,7 @@ plane_format(struct rcar_du_plane *plane)
static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
{
struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
unsigned int num_planes = 0;
unsigned int dptsr_planes;
unsigned int hwplanes = 0;
@@ -463,7 +462,7 @@ static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
if (wait_event_timeout(rcrtc->flip_wait,
!rcar_du_crtc_page_flip_pending(rcrtc),
@@ -493,7 +492,7 @@ static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
/* Enable the VSP compositor. */
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_enable(rcrtc);
/* Turn vertical blanking interrupt reporting on. */
@@ -564,7 +563,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
struct drm_crtc *crtc = &rcrtc->crtc;
u32 status;
@@ -617,7 +616,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
drm_crtc_vblank_off(crtc);
/* Disable the VSP compositor. */
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_disable(rcrtc);
/*
@@ -627,7 +626,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
* TODO: Find another way to stop the display for DUs that don't support
* TVM sync.
*/
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_TVM_SYNC))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC))
rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK,
DSYSR_TVM_SWITCH);
@@ -648,8 +647,13 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
rstate->outputs = 0;
drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ struct rcar_du_encoder *renc;
+ /* Skip the writeback encoder. */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ continue;
+
+ renc = to_rcar_encoder(encoder);
rstate->outputs |= BIT(renc->output);
}
@@ -661,7 +665,7 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_crtc_get(rcrtc);
@@ -689,7 +693,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
@@ -735,7 +739,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
*/
rcar_du_crtc_get(rcrtc);
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_begin(rcrtc);
}
@@ -757,15 +761,16 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_flush(rcrtc);
}
-enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
+static enum drm_mode_status
+rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
unsigned int vbp;
@@ -797,7 +802,7 @@ static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
const char **sources;
unsigned int count;
int i = -1;
@@ -981,8 +986,8 @@ static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
return 0;
}
-const char *const *rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc,
- size_t *count)
+static const char *const *
+rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -1079,7 +1084,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
struct rcar_du_crtc *rcrtc = arg;
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
irqreturn_t ret = IRQ_NONE;
u32 status;
@@ -1171,6 +1176,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
init_waitqueue_head(&rcrtc->vblank_wait);
spin_lock_init(&rcrtc->vblank_lock);
+ rcrtc->dev = rcdu;
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[hwindex];
rcrtc->index = hwindex;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index bcb35b0b7612..3b7fc668996f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_writeback.h>
#include <media/vsp1.h>
@@ -24,10 +25,11 @@ struct rcar_du_vsp;
/**
* struct rcar_du_crtc - the CRTC, representing a DU superposition processor
* @crtc: base DRM CRTC
+ * @dev: the DU device
* @clock: the CRTC functional clock
* @extclock: external pixel dot clock (optional)
* @mmio_offset: offset of the CRTC registers in the DU MMIO block
- * @index: CRTC software and hardware index
+ * @index: CRTC hardware index
* @initialized: whether the CRTC has been initialized and clocks enabled
* @dsysr: cached value of the DSYSR register
* @vblank_enable: whether vblank events are enabled on this CRTC
@@ -39,10 +41,12 @@ struct rcar_du_vsp;
* @group: CRTC group this CRTC belongs to
* @vsp: VSP feeding video to this CRTC
* @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
+ * @writeback: the writeback connector
*/
struct rcar_du_crtc {
struct drm_crtc crtc;
+ struct rcar_du_device *dev;
struct clk *clock;
struct clk *extclock;
unsigned int mmio_offset;
@@ -65,9 +69,12 @@ struct rcar_du_crtc {
const char *const *sources;
unsigned int sources_count;
+
+ struct drm_writeback_connector writeback;
};
-#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+#define wb_to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, writeback)
/**
* struct rcar_du_crtc_state - Driver-specific CRTC state
@@ -97,8 +104,6 @@ enum rcar_du_output {
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
unsigned int hwindex);
-void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
-void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 8ee4e762f4e5..6c91753af7bc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -28,13 +28,33 @@ static const struct drm_encoder_funcs encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
+{
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int num_ports = 0;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = of_node_get(node);
+
+ for_each_child_of_node(ports, port) {
+ if (of_node_name_eq(port, "port"))
+ num_ports++;
+ }
+
+ of_node_put(ports);
+
+ return num_ports;
+}
+
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
struct drm_encoder *encoder;
- struct drm_bridge *bridge = NULL;
+ struct drm_bridge *bridge;
int ret;
renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
@@ -48,11 +68,33 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
enc_node, output);
- /* Locate the DRM bridge from the encoder DT node. */
- bridge = of_drm_find_bridge(enc_node);
- if (!bridge) {
- ret = -EPROBE_DEFER;
- goto done;
+ /*
+ * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
+ * DT node has a single port, assume that it describes a panel and
+ * create a panel bridge.
+ */
+ if ((output == RCAR_DU_OUTPUT_DPAD0 ||
+ output == RCAR_DU_OUTPUT_DPAD1) &&
+ rcar_du_encoder_count_ports(enc_node) == 1) {
+ struct drm_panel *panel = of_drm_find_panel(enc_node);
+
+ if (IS_ERR(panel)) {
+ ret = PTR_ERR(panel);
+ goto done;
+ }
+
+ bridge = devm_drm_panel_bridge_add(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ goto done;
+ }
+ } else {
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge) {
+ ret = -EPROBE_DEFER;
+ goto done;
+ }
}
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 3b7d50a8fb9b..f8f7fff34dff 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -26,6 +26,7 @@
#include "rcar_du_kms.h"
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
+#include "rcar_du_writeback.h"
/* -----------------------------------------------------------------------------
* Format helpers
@@ -34,60 +35,70 @@
static const struct rcar_du_format_info rcar_du_format_infos[] = {
{
.fourcc = DRM_FORMAT_RGB565,
+ .v4l2 = V4L2_PIX_FMT_RGB565,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_ARGB1555,
+ .v4l2 = V4L2_PIX_FMT_ARGB555,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB1555,
+ .v4l2 = V4L2_PIX_FMT_XRGB555,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
.bpp = 32,
.planes = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_RGB888,
}, {
.fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
.bpp = 32,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_ARGB8888,
}, {
.fourcc = DRM_FORMAT_UYVY,
+ .v4l2 = V4L2_PIX_FMT_UYVY,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_YUYV,
+ .v4l2 = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV12,
+ .v4l2 = V4L2_PIX_FMT_NV12M,
.bpp = 12,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV21,
+ .v4l2 = V4L2_PIX_FMT_NV21M,
.bpp = 12,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV16,
+ .v4l2 = V4L2_PIX_FMT_NV16M,
.bpp = 16,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
@@ -99,62 +110,77 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
*/
{
.fourcc = DRM_FORMAT_RGB332,
+ .v4l2 = V4L2_PIX_FMT_RGB332,
.bpp = 8,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_ARGB4444,
+ .v4l2 = V4L2_PIX_FMT_ARGB444,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_XRGB4444,
+ .v4l2 = V4L2_PIX_FMT_XRGB444,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGR888,
+ .v4l2 = V4L2_PIX_FMT_RGB24,
.bpp = 24,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
+ .v4l2 = V4L2_PIX_FMT_BGR24,
.bpp = 24,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGRA8888,
+ .v4l2 = V4L2_PIX_FMT_ARGB32,
.bpp = 32,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGRX8888,
+ .v4l2 = V4L2_PIX_FMT_XRGB32,
.bpp = 32,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_YVYU,
+ .v4l2 = V4L2_PIX_FMT_YVYU,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_NV61,
+ .v4l2 = V4L2_PIX_FMT_NV61M,
.bpp = 16,
.planes = 2,
}, {
.fourcc = DRM_FORMAT_YUV420,
+ .v4l2 = V4L2_PIX_FMT_YUV420M,
.bpp = 12,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU420,
+ .v4l2 = V4L2_PIX_FMT_YVU420M,
.bpp = 12,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YUV422,
+ .v4l2 = V4L2_PIX_FMT_YUV422M,
.bpp = 16,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU422,
+ .v4l2 = V4L2_PIX_FMT_YVU422M,
.bpp = 16,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YUV444,
+ .v4l2 = V4L2_PIX_FMT_YUV444M,
.bpp = 24,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU444,
+ .v4l2 = V4L2_PIX_FMT_YVU444M,
.bpp = 24,
.planes = 3,
},
@@ -639,6 +665,17 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
encoder->possible_clones = (1 << num_encoders) - 1;
}
+ /* Create the writeback connectors. */
+ if (rcdu->info->gen >= 3) {
+ for (i = 0; i < rcdu->num_crtcs; ++i) {
+ struct rcar_du_crtc *rcrtc = &rcdu->crtcs[i];
+
+ ret = rcar_du_writeback_init(rcdu, rcrtc);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
/*
* Initialize the default DPAD0 source to the index of the first DU
* channel that can be connected to DPAD0. The exact value doesn't
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index e171527abdaa..0346504d8c59 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -19,6 +19,7 @@ struct rcar_du_device;
struct rcar_du_format_info {
u32 fourcc;
+ u32 v4l2;
unsigned int bpp;
unsigned int planes;
unsigned int pnmr;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 0878accbd134..5e4faf258c31 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
@@ -26,16 +27,19 @@
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_vsp.h"
+#include "rcar_du_writeback.h"
-static void rcar_du_vsp_complete(void *private, bool completed, u32 crc)
+static void rcar_du_vsp_complete(void *private, unsigned int status, u32 crc)
{
struct rcar_du_crtc *crtc = private;
if (crtc->vblank_enable)
drm_crtc_handle_vblank(&crtc->crtc);
- if (completed)
+ if (status & VSP1_DU_STATUS_COMPLETE)
rcar_du_crtc_finish_page_flip(crtc);
+ if (status & VSP1_DU_STATUS_WRITEBACK)
+ rcar_du_writeback_complete(crtc);
drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc);
}
@@ -43,7 +47,7 @@ static void rcar_du_vsp_complete(void *private, bool completed, u32 crc)
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
{
const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
- struct rcar_du_device *rcdu = crtc->group->dev;
+ struct rcar_du_device *rcdu = crtc->dev;
struct vsp1_du_lif_config cfg = {
.width = mode->hdisplay,
.height = mode->vdisplay,
@@ -107,11 +111,12 @@ void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
state = to_rcar_crtc_state(crtc->crtc.state);
cfg.crc = state->crc;
+ rcar_du_writeback_setup(crtc, &cfg.writeback);
+
vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
-/* Keep the two tables in sync. */
-static const u32 formats_kms[] = {
+static const u32 rcar_du_vsp_formats[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
@@ -139,40 +144,13 @@ static const u32 formats_kms[] = {
DRM_FORMAT_YVU444,
};
-static const u32 formats_v4l2[] = {
- V4L2_PIX_FMT_RGB332,
- V4L2_PIX_FMT_ARGB444,
- V4L2_PIX_FMT_XRGB444,
- V4L2_PIX_FMT_ARGB555,
- V4L2_PIX_FMT_XRGB555,
- V4L2_PIX_FMT_RGB565,
- V4L2_PIX_FMT_RGB24,
- V4L2_PIX_FMT_BGR24,
- V4L2_PIX_FMT_ARGB32,
- V4L2_PIX_FMT_XRGB32,
- V4L2_PIX_FMT_ABGR32,
- V4L2_PIX_FMT_XBGR32,
- V4L2_PIX_FMT_UYVY,
- V4L2_PIX_FMT_YUYV,
- V4L2_PIX_FMT_YVYU,
- V4L2_PIX_FMT_NV12M,
- V4L2_PIX_FMT_NV21M,
- V4L2_PIX_FMT_NV16M,
- V4L2_PIX_FMT_NV61M,
- V4L2_PIX_FMT_YUV420M,
- V4L2_PIX_FMT_YVU420M,
- V4L2_PIX_FMT_YUV422M,
- V4L2_PIX_FMT_YVU422M,
- V4L2_PIX_FMT_YUV444M,
- V4L2_PIX_FMT_YVU444M,
-};
-
static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
to_rcar_vsp_plane_state(plane->plane.state);
struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc);
struct drm_framebuffer *fb = plane->plane.state->fb;
+ const struct rcar_du_format_info *format;
struct vsp1_du_atomic_config cfg = {
.pixelformat = 0,
.pitch = fb->pitches[0],
@@ -195,37 +173,23 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
- for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) {
- if (formats_kms[i] == state->format->fourcc) {
- cfg.pixelformat = formats_v4l2[i];
- break;
- }
- }
+ format = rcar_du_format_info(state->format->fourcc);
+ cfg.pixelformat = format->v4l2;
vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
plane->index, &cfg);
}
-static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state)
+int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
{
- struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
- struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
struct rcar_du_device *rcdu = vsp->dev;
unsigned int i;
int ret;
- /*
- * There's no need to prepare (and unprepare) the framebuffer when the
- * plane is not visible, as it will not be displayed.
- */
- if (!state->visible)
- return 0;
-
- for (i = 0; i < rstate->format->planes; ++i) {
- struct drm_gem_cma_object *gem =
- drm_fb_cma_get_gem_obj(state->fb, i);
- struct sg_table *sgt = &rstate->sg_tables[i];
+ for (i = 0; i < fb->format->num_planes; ++i) {
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+ struct sg_table *sgt = &sg_tables[i];
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr,
gem->base.size);
@@ -240,15 +204,11 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
}
}
- ret = drm_gem_fb_prepare_fb(plane, state);
- if (ret)
- goto fail;
-
return 0;
fail:
while (i--) {
- struct sg_table *sgt = &rstate->sg_tables[i];
+ struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
@@ -257,24 +217,52 @@ fail:
return ret;
}
-static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
- unsigned int i;
+ int ret;
+ /*
+ * There's no need to prepare (and unprepare) the framebuffer when the
+ * plane is not visible, as it will not be displayed.
+ */
if (!state->visible)
- return;
+ return 0;
+
+ ret = rcar_du_vsp_map_fb(vsp, state->fb, rstate->sg_tables);
+ if (ret < 0)
+ return ret;
+
+ return drm_gem_fb_prepare_fb(plane, state);
+}
+
+void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
+{
+ unsigned int i;
- for (i = 0; i < rstate->format->planes; ++i) {
- struct sg_table *sgt = &rstate->sg_tables[i];
+ for (i = 0; i < fb->format->num_planes; ++i) {
+ struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
}
}
+static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
+ struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
+
+ if (!state->visible)
+ return;
+
+ rcar_du_vsp_unmap_fb(vsp, state->fb, rstate->sg_tables);
+}
+
static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -395,8 +383,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_vsp_plane_funcs,
- formats_kms,
- ARRAY_SIZE(formats_kms),
+ rcar_du_vsp_formats,
+ ARRAY_SIZE(rcar_du_vsp_formats),
NULL, type, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index db232037f24a..9b4724159378 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -12,8 +12,10 @@
#include <drm/drm_plane.h>
+struct drm_framebuffer;
struct rcar_du_format_info;
struct rcar_du_vsp;
+struct sg_table;
struct rcar_du_vsp_plane {
struct drm_plane plane;
@@ -60,6 +62,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
+int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3]);
+void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3]);
#else
static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp,
struct device_node *np,
@@ -71,6 +77,17 @@ static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { };
+static inline int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp,
+ struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
+{
+ return -ENXIO;
+}
+static inline void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp,
+ struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
+{
+}
#endif
#endif /* __RCAR_DU_VSP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
new file mode 100644
index 000000000000..989a0be94131
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_du_writeback.c -- R-Car Display Unit Writeback Support
+ *
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_writeback.h>
+
+#include "rcar_du_crtc.h"
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+
+/**
+ * struct rcar_du_wb_conn_state - Driver-specific writeback connector state
+ * @state: base DRM connector state
+ * @format: format of the writeback framebuffer
+ */
+struct rcar_du_wb_conn_state {
+ struct drm_connector_state state;
+ const struct rcar_du_format_info *format;
+};
+
+#define to_rcar_wb_conn_state(s) \
+ container_of(s, struct rcar_du_wb_conn_state, state)
+
+/**
+ * struct rcar_du_wb_job - Driver-private data for writeback jobs
+ * @sg_tables: scatter-gather tables for the framebuffer memory
+ */
+struct rcar_du_wb_job {
+ struct sg_table sg_tables[3];
+};
+
+static int rcar_du_wb_conn_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static int rcar_du_wb_prepare_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
+ struct rcar_du_wb_job *rjob;
+ int ret;
+
+ if (!job->fb)
+ return 0;
+
+ rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
+ if (!rjob)
+ return -ENOMEM;
+
+ /* Map the framebuffer to the VSP. */
+ ret = rcar_du_vsp_map_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
+ if (ret < 0) {
+ kfree(rjob);
+ return ret;
+ }
+
+ job->priv = rjob;
+ return 0;
+}
+
+static void rcar_du_wb_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
+ struct rcar_du_wb_job *rjob = job->priv;
+
+ if (!job->fb)
+ return;
+
+ rcar_du_vsp_unmap_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
+ kfree(rjob);
+}
+
+static const struct drm_connector_helper_funcs rcar_du_wb_conn_helper_funcs = {
+ .get_modes = rcar_du_wb_conn_get_modes,
+ .prepare_writeback_job = rcar_du_wb_prepare_job,
+ .cleanup_writeback_job = rcar_du_wb_cleanup_job,
+};
+
+static struct drm_connector_state *
+rcar_du_wb_conn_duplicate_state(struct drm_connector *connector)
+{
+ struct rcar_du_wb_conn_state *copy;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &copy->state);
+
+ return &copy->state;
+}
+
+static void rcar_du_wb_conn_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ __drm_atomic_helper_connector_destroy_state(state);
+ kfree(to_rcar_wb_conn_state(state));
+}
+
+static void rcar_du_wb_conn_reset(struct drm_connector *connector)
+{
+ struct rcar_du_wb_conn_state *state;
+
+ if (connector->state) {
+ rcar_du_wb_conn_destroy_state(connector, connector->state);
+ connector->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return;
+
+ __drm_atomic_helper_connector_reset(connector, &state->state);
+}
+
+static const struct drm_connector_funcs rcar_du_wb_conn_funcs = {
+ .reset = rcar_du_wb_conn_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = rcar_du_wb_conn_duplicate_state,
+ .atomic_destroy_state = rcar_du_wb_conn_destroy_state,
+};
+
+static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rcar_du_wb_conn_state *wb_state =
+ to_rcar_wb_conn_state(conn_state);
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_device *dev = encoder->dev;
+ struct drm_framebuffer *fb;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+
+ /*
+ * Verify that the framebuffer format is supported and that its size
+ * matches the current mode.
+ */
+ if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
+ dev_dbg(dev->dev, "%s: invalid framebuffer size %ux%u\n",
+ __func__, fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ wb_state->format = rcar_du_format_info(fb->format->format);
+ if (wb_state->format == NULL) {
+ dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
+ fb->format->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs rcar_du_wb_enc_helper_funcs = {
+ .atomic_check = rcar_du_wb_enc_atomic_check,
+};
+
+/*
+ * Only RGB formats are currently supported as the VSP outputs RGB to the DU
+ * and can't convert to YUV separately for writeback.
+ */
+static const u32 writeback_formats[] = {
+ DRM_FORMAT_RGB332,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+};
+
+int rcar_du_writeback_init(struct rcar_du_device *rcdu,
+ struct rcar_du_crtc *rcrtc)
+{
+ struct drm_writeback_connector *wb_conn = &rcrtc->writeback;
+
+ wb_conn->encoder.possible_crtcs = 1 << drm_crtc_index(&rcrtc->crtc);
+ drm_connector_helper_add(&wb_conn->base,
+ &rcar_du_wb_conn_helper_funcs);
+
+ return drm_writeback_connector_init(rcdu->ddev, wb_conn,
+ &rcar_du_wb_conn_funcs,
+ &rcar_du_wb_enc_helper_funcs,
+ writeback_formats,
+ ARRAY_SIZE(writeback_formats));
+}
+
+void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ struct vsp1_du_writeback_config *cfg)
+{
+ struct rcar_du_wb_conn_state *wb_state;
+ struct drm_connector_state *state;
+ struct rcar_du_wb_job *rjob;
+ struct drm_framebuffer *fb;
+ unsigned int i;
+
+ state = rcrtc->writeback.base.state;
+ if (!state || !state->writeback_job || !state->writeback_job->fb)
+ return;
+
+ fb = state->writeback_job->fb;
+ rjob = state->writeback_job->priv;
+ wb_state = to_rcar_wb_conn_state(state);
+
+ cfg->pixelformat = wb_state->format->v4l2;
+ cfg->pitch = fb->pitches[0];
+
+ for (i = 0; i < wb_state->format->planes; ++i)
+ cfg->mem[i] = sg_dma_address(rjob->sg_tables[i].sgl)
+ + fb->offsets[i];
+
+ drm_writeback_queue_job(&rcrtc->writeback, state);
+}
+
+void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc)
+{
+ drm_writeback_signal_completion(&rcrtc->writeback, 0);
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.h b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h
new file mode 100644
index 000000000000..fa87ebf8d21f
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * rcar_du_writeback.h -- R-Car Display Unit Writeback Support
+ *
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __RCAR_DU_WRITEBACK_H__
+#define __RCAR_DU_WRITEBACK_H__
+
+#include <drm/drm_plane.h>
+
+struct rcar_du_crtc;
+struct rcar_du_device;
+struct vsp1_du_atomic_pipe_config;
+
+#ifdef CONFIG_DRM_RCAR_WRITEBACK
+int rcar_du_writeback_init(struct rcar_du_device *rcdu,
+ struct rcar_du_crtc *rcrtc);
+void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ struct vsp1_du_writeback_config *cfg);
+void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc);
+#else
+static inline int rcar_du_writeback_init(struct rcar_du_device *rcdu,
+ struct rcar_du_crtc *rcrtc)
+{
+ return -ENXIO;
+}
+static inline void
+rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ struct vsp1_du_writeback_config *cfg)
+{
+}
+static inline void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc)
+{
+}
+#endif
+
+#endif /* __RCAR_DU_WRITEBACK_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 7ef97b2a6eda..620b51aab291 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -283,7 +283,7 @@ static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
* divider.
*/
fout = fvco / (1 << e) / div7;
- div = DIV_ROUND_CLOSEST(fout, target);
+ div = max(1UL, DIV_ROUND_CLOSEST(fout, target));
diff = abs(fout / div - target);
if (diff < pll->diff) {
@@ -485,9 +485,13 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
}
if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) {
- /* Turn on the LVDS PHY. */
+ /*
+ * Turn on the LVDS PHY. On D3, the LVEN and LVRES bit must be
+ * set at the same time, so don't write the register yet.
+ */
lvdcr0 |= LVDCR0_LVEN;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_PWD))
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
@@ -531,11 +535,16 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+ int min_freq;
+
/*
* The internal LVDS encoder has a restricted clock frequency operating
- * range (31MHz to 148.5MHz). Clamp the clock accordingly.
+ * range, from 5MHz to 148.5MHz on D3 and E3, and from 31MHz to
+ * 148.5MHz on all other platforms. Clamp the clock accordingly.
*/
- adjusted_mode->clock = clamp(adjusted_mode->clock, 31000, 148500);
+ min_freq = lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL ? 5000 : 31000;
+ adjusted_mode->clock = clamp(adjusted_mode->clock, min_freq, 148500);
return true;
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 1e75196f9659..2cdf3b62d559 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -77,4 +77,12 @@ config ROCKCHIP_RGB
Some Rockchip CRTCs, like rv1108, can directly output parallel
and serial RGB format to panel or connect to a conversion chip.
say Y to enable its driver.
+
+config ROCKCHIP_RK3066_HDMI
+ bool "Rockchip specific extensions for RK3066 HDMI"
+ depends on DRM_ROCKCHIP
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the RK3066 HDMI driver. If you want to enable
+ HDMI on RK3066 based SoC, you should select this option.
endif
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index f6fc9d5dd0ad..524684ba7f6a 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -15,5 +15,6 @@ rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
+rockchipdrm-$(CONFIG_ROCKCHIP_RK3066_HDMI) += rk3066_hdmi.o
obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
new file mode 100644
index 000000000000..85fc5f01f761
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Zheng Yang <zhengyang@rock-chips.com>
+ */
+
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "rk3066_hdmi.h"
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define DEFAULT_PLLA_RATE 30000000
+
+struct hdmi_data_info {
+ int vic; /* The CEA Video ID (VIC) of the current drm display mode. */
+ bool sink_is_hdmi;
+ unsigned int enc_out_format;
+ unsigned int colorimetry;
+};
+
+struct rk3066_hdmi_i2c {
+ struct i2c_adapter adap;
+
+ u8 ddc_addr;
+ u8 segment_addr;
+ u8 stat;
+
+ struct mutex i2c_lock; /* For i2c operation. */
+ struct completion cmpltn;
+};
+
+struct rk3066_hdmi {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct regmap *grf_regmap;
+ int irq;
+ struct clk *hclk;
+ void __iomem *regs;
+
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+
+ struct rk3066_hdmi_i2c *i2c;
+ struct i2c_adapter *ddc;
+
+ unsigned int tmdsclk;
+
+ struct hdmi_data_info hdmi_data;
+ struct drm_display_mode previous_mode;
+};
+
+#define to_rk3066_hdmi(x) container_of(x, struct rk3066_hdmi, x)
+
+static inline u8 hdmi_readb(struct rk3066_hdmi *hdmi, u16 offset)
+{
+ return readl_relaxed(hdmi->regs + offset);
+}
+
+static inline void hdmi_writeb(struct rk3066_hdmi *hdmi, u16 offset, u32 val)
+{
+ writel_relaxed(val, hdmi->regs + offset);
+}
+
+static inline void hdmi_modb(struct rk3066_hdmi *hdmi, u16 offset,
+ u32 msk, u32 val)
+{
+ u8 temp = hdmi_readb(hdmi, offset) & ~msk;
+
+ temp |= val & msk;
+ hdmi_writeb(hdmi, offset, temp);
+}
+
+static void rk3066_hdmi_i2c_init(struct rk3066_hdmi *hdmi)
+{
+ int ddc_bus_freq;
+
+ ddc_bus_freq = (hdmi->tmdsclk >> 2) / HDMI_SCL_RATE;
+
+ hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF);
+ hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF);
+
+ /* Clear the EDID interrupt flag and mute the interrupt. */
+ hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0);
+ hdmi_writeb(hdmi, HDMI_INTR_STATUS1, HDMI_INTR_EDID_MASK);
+}
+
+static inline u8 rk3066_hdmi_get_power_mode(struct rk3066_hdmi *hdmi)
+{
+ return hdmi_readb(hdmi, HDMI_SYS_CTRL) & HDMI_SYS_POWER_MODE_MASK;
+}
+
+static void rk3066_hdmi_set_power_mode(struct rk3066_hdmi *hdmi, int mode)
+{
+ u8 current_mode, next_mode;
+ u8 i = 0;
+
+ current_mode = rk3066_hdmi_get_power_mode(hdmi);
+
+ DRM_DEV_DEBUG(hdmi->dev, "mode :%d\n", mode);
+ DRM_DEV_DEBUG(hdmi->dev, "current_mode :%d\n", current_mode);
+
+ if (current_mode == mode)
+ return;
+
+ do {
+ if (current_mode > mode) {
+ next_mode = current_mode / 2;
+ } else {
+ if (current_mode < HDMI_SYS_POWER_MODE_A)
+ next_mode = HDMI_SYS_POWER_MODE_A;
+ else
+ next_mode = current_mode * 2;
+ }
+
+ DRM_DEV_DEBUG(hdmi->dev, "%d: next_mode :%d\n", i, next_mode);
+
+ if (next_mode != HDMI_SYS_POWER_MODE_D) {
+ hdmi_modb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_MASK, next_mode);
+ } else {
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_D |
+ HDMI_SYS_PLL_RESET_MASK);
+ usleep_range(90, 100);
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_D |
+ HDMI_SYS_PLLB_RESET);
+ usleep_range(90, 100);
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_D);
+ }
+ current_mode = next_mode;
+ i = i + 1;
+ } while ((next_mode != mode) && (i < 5));
+
+ /*
+ * When the IP controller isn't configured with accurate video timing,
+ * DDC_CLK should be equal to the PLLA frequency, which is 30MHz,
+ * so we need to init the TMDS rate to the PCLK rate and reconfigure
+ * the DDC clock.
+ */
+ if (mode < HDMI_SYS_POWER_MODE_D)
+ hdmi->tmdsclk = DEFAULT_PLLA_RATE;
+}
+
+static int
+rk3066_hdmi_upload_frame(struct rk3066_hdmi *hdmi, int setup_rc,
+ union hdmi_infoframe *frame, u32 frame_index,
+ u32 mask, u32 disable, u32 enable)
+{
+ if (mask)
+ hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, disable);
+
+ hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, frame_index);
+
+ if (setup_rc >= 0) {
+ u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
+ ssize_t rc, i;
+
+ rc = hdmi_infoframe_pack(frame, packed_frame,
+ sizeof(packed_frame));
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i < rc; i++)
+ hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4,
+ packed_frame[i]);
+
+ if (mask)
+ hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, enable);
+ }
+
+ return setup_rc;
+}
+
+static int rk3066_hdmi_config_avi(struct rk3066_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int rc;
+
+ rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ &hdmi->connector, mode);
+
+ if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
+ else
+ frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+
+ frame.avi.colorimetry = hdmi->hdmi_data.colorimetry;
+ frame.avi.scan_mode = HDMI_SCAN_MODE_NONE;
+
+ return rk3066_hdmi_upload_frame(hdmi, rc, &frame,
+ HDMI_INFOFRAME_AVI, 0, 0, 0);
+}
+
+static int rk3066_hdmi_config_video_timing(struct rk3066_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ int value, vsync_offset;
+
+ /* Set the details for the external polarity and interlace mode. */
+ value = HDMI_EXT_VIDEO_SET_EN;
+ value |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
+ HDMI_VIDEO_HSYNC_ACTIVE_HIGH : HDMI_VIDEO_HSYNC_ACTIVE_LOW;
+ value |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
+ HDMI_VIDEO_VSYNC_ACTIVE_HIGH : HDMI_VIDEO_VSYNC_ACTIVE_LOW;
+ value |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
+ HDMI_VIDEO_MODE_INTERLACE : HDMI_VIDEO_MODE_PROGRESSIVE;
+
+ if (hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3)
+ vsync_offset = 6;
+ else
+ vsync_offset = 0;
+
+ value |= vsync_offset << HDMI_VIDEO_VSYNC_OFFSET_SHIFT;
+ hdmi_writeb(hdmi, HDMI_EXT_VIDEO_PARA, value);
+
+ /* Set the details for the external video timing. */
+ value = mode->htotal;
+ hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_H, (value >> 8) & 0xFF);
+
+ value = mode->htotal - mode->hdisplay;
+ hdmi_writeb(hdmi, HDMI_EXT_HBLANK_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HBLANK_H, (value >> 8) & 0xFF);
+
+ value = mode->htotal - mode->hsync_start;
+ hdmi_writeb(hdmi, HDMI_EXT_HDELAY_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HDELAY_H, (value >> 8) & 0xFF);
+
+ value = mode->hsync_end - mode->hsync_start;
+ hdmi_writeb(hdmi, HDMI_EXT_HDURATION_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HDURATION_H, (value >> 8) & 0xFF);
+
+ value = mode->vtotal;
+ hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_H, (value >> 8) & 0xFF);
+
+ value = mode->vtotal - mode->vdisplay;
+ hdmi_writeb(hdmi, HDMI_EXT_VBLANK_L, value & 0xFF);
+
+ value = mode->vtotal - mode->vsync_start + vsync_offset;
+ hdmi_writeb(hdmi, HDMI_EXT_VDELAY, value & 0xFF);
+
+ value = mode->vsync_end - mode->vsync_start;
+ hdmi_writeb(hdmi, HDMI_EXT_VDURATION, value & 0xFF);
+
+ return 0;
+}
+
+static void
+rk3066_hdmi_phy_write(struct rk3066_hdmi *hdmi, u16 offset, u8 value)
+{
+ hdmi_writeb(hdmi, offset, value);
+ hdmi_modb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_PLL_RESET_MASK, HDMI_SYS_PLL_RESET);
+ usleep_range(90, 100);
+ hdmi_modb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_PLL_RESET_MASK, 0);
+ usleep_range(900, 1000);
+}
+
+static void rk3066_hdmi_config_phy(struct rk3066_hdmi *hdmi)
+{
+ /* TMDS uses the same frequency as dclk. */
+ hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x22);
+
+ /*
+ * The semi-public documentation does not describe the hdmi registers
+ * used by the function rk3066_hdmi_phy_write(), so we keep using
+ * these magic values for now.
+ */
+ if (hdmi->tmdsclk > 100000000) {
+ rk3066_hdmi_phy_write(hdmi, 0x158, 0x0E);
+ rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
+ rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x168, 0xDA);
+ rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA1);
+ rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
+ rk3066_hdmi_phy_write(hdmi, 0x174, 0x22);
+ rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
+ } else if (hdmi->tmdsclk > 50000000) {
+ rk3066_hdmi_phy_write(hdmi, 0x158, 0x06);
+ rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
+ rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x168, 0xCA);
+ rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA3);
+ rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
+ rk3066_hdmi_phy_write(hdmi, 0x174, 0x20);
+ rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
+ } else {
+ rk3066_hdmi_phy_write(hdmi, 0x158, 0x02);
+ rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
+ rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x168, 0xC2);
+ rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA2);
+ rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
+ rk3066_hdmi_phy_write(hdmi, 0x174, 0x20);
+ rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
+ }
+}
+
+static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
+ hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
+
+ if (hdmi->hdmi_data.vic == 6 || hdmi->hdmi_data.vic == 7 ||
+ hdmi->hdmi_data.vic == 21 || hdmi->hdmi_data.vic == 22 ||
+ hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3 ||
+ hdmi->hdmi_data.vic == 17 || hdmi->hdmi_data.vic == 18)
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ else
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
+
+ hdmi->tmdsclk = mode->clock * 1000;
+
+ /* Mute video and audio output. */
+ hdmi_modb(hdmi, HDMI_VIDEO_CTRL2, HDMI_VIDEO_AUDIO_DISABLE_MASK,
+ HDMI_AUDIO_DISABLE | HDMI_VIDEO_DISABLE);
+
+ /* Set power state to mode B. */
+ if (rk3066_hdmi_get_power_mode(hdmi) != HDMI_SYS_POWER_MODE_B)
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
+
+ /* Input video mode is RGB 24 bit. Use external data enable signal. */
+ hdmi_modb(hdmi, HDMI_AV_CTRL1,
+ HDMI_VIDEO_DE_MASK, HDMI_VIDEO_EXTERNAL_DE);
+ hdmi_writeb(hdmi, HDMI_VIDEO_CTRL1,
+ HDMI_VIDEO_OUTPUT_RGB444 |
+ HDMI_VIDEO_INPUT_DATA_DEPTH_8BIT |
+ HDMI_VIDEO_INPUT_COLOR_RGB);
+ hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x20);
+
+ rk3066_hdmi_config_video_timing(hdmi, mode);
+
+ if (hdmi->hdmi_data.sink_is_hdmi) {
+ hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK,
+ HDMI_VIDEO_MODE_HDMI);
+ rk3066_hdmi_config_avi(hdmi, mode);
+ } else {
+ hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 0);
+ }
+
+ rk3066_hdmi_config_phy(hdmi);
+
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_E);
+
+ /*
+ * When the IP controller is configured with accurate video
+ * timing, the TMDS clock source should be switched to
+ * DCLK_LCDC, so we need to init the TMDS rate to the pixel mode
+ * clock rate and reconfigure the DDC clock.
+ */
+ rk3066_hdmi_i2c_init(hdmi);
+
+ /* Unmute video output. */
+ hdmi_modb(hdmi, HDMI_VIDEO_CTRL2,
+ HDMI_VIDEO_AUDIO_DISABLE_MASK, HDMI_AUDIO_DISABLE);
+ return 0;
+}
+
+static void
+rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+
+ /* Store the display mode for plugin/DPMS poweron events. */
+ memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
+}
+
+static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+ int mux, val;
+
+ mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
+ if (mux)
+ val = (HDMI_VIDEO_SEL << 16) | HDMI_VIDEO_SEL;
+ else
+ val = HDMI_VIDEO_SEL << 16;
+
+ regmap_write(hdmi->grf_regmap, GRF_SOC_CON0, val);
+
+ DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
+ (mux) ? "1" : "0");
+
+ rk3066_hdmi_setup(hdmi, &hdmi->previous_mode);
+}
+
+static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+
+ DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder disable\n");
+
+ if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_E) {
+ hdmi_writeb(hdmi, HDMI_VIDEO_CTRL2,
+ HDMI_VIDEO_AUDIO_DISABLE_MASK);
+ hdmi_modb(hdmi, HDMI_VIDEO_CTRL2,
+ HDMI_AUDIO_CP_LOGIC_RESET_MASK,
+ HDMI_AUDIO_CP_LOGIC_RESET);
+ usleep_range(500, 510);
+ }
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
+}
+
+static bool
+rk3066_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ return true;
+}
+
+static int
+rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ return 0;
+}
+
+static const
+struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
+ .enable = rk3066_hdmi_encoder_enable,
+ .disable = rk3066_hdmi_encoder_disable,
+ .mode_fixup = rk3066_hdmi_encoder_mode_fixup,
+ .mode_set = rk3066_hdmi_encoder_mode_set,
+ .atomic_check = rk3066_hdmi_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static enum drm_connector_status
+rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector);
+
+ return (hdmi_readb(hdmi, HDMI_HPG_MENS_STA) & HDMI_HPG_IN_STATUS_HIGH) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ if (!hdmi->ddc)
+ return 0;
+
+ edid = drm_get_edid(connector, hdmi->ddc);
+ if (edid) {
+ hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static enum drm_mode_status
+rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ u32 vic = drm_match_cea_mode(mode);
+
+ if (vic > 1)
+ return MODE_OK;
+ else
+ return MODE_BAD;
+}
+
+static struct drm_encoder *
+rk3066_hdmi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector);
+
+ return &hdmi->encoder;
+}
+
+static int
+rk3066_hdmi_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ if (maxX > 1920)
+ maxX = 1920;
+ if (maxY > 1080)
+ maxY = 1080;
+
+ return drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+}
+
+static void rk3066_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs rk3066_hdmi_connector_funcs = {
+ .fill_modes = rk3066_hdmi_probe_single_connector_modes,
+ .detect = rk3066_hdmi_connector_detect,
+ .destroy = rk3066_hdmi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const
+struct drm_connector_helper_funcs rk3066_hdmi_connector_helper_funcs = {
+ .get_modes = rk3066_hdmi_connector_get_modes,
+ .mode_valid = rk3066_hdmi_connector_mode_valid,
+ .best_encoder = rk3066_hdmi_connector_best_encoder,
+};
+
+static int
+rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder;
+ struct device *dev = hdmi->dev;
+
+ encoder->possible_crtcs =
+ drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_helper_add(&hdmi->connector,
+ &rk3066_hdmi_connector_helper_funcs);
+ drm_connector_init(drm, &hdmi->connector,
+ &rk3066_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+
+ drm_connector_attach_encoder(&hdmi->connector, encoder);
+
+ return 0;
+}
+
+static irqreturn_t rk3066_hdmi_hardirq(int irq, void *dev_id)
+{
+ struct rk3066_hdmi *hdmi = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ u8 interrupt;
+
+ if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_A)
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_POWER_MODE_B);
+
+ interrupt = hdmi_readb(hdmi, HDMI_INTR_STATUS1);
+ if (interrupt)
+ hdmi_writeb(hdmi, HDMI_INTR_STATUS1, interrupt);
+
+ if (interrupt & HDMI_INTR_EDID_MASK) {
+ hdmi->i2c->stat = interrupt;
+ complete(&hdmi->i2c->cmpltn);
+ }
+
+ if (interrupt & (HDMI_INTR_HOTPLUG | HDMI_INTR_MSENS))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static irqreturn_t rk3066_hdmi_irq(int irq, void *dev_id)
+{
+ struct rk3066_hdmi *hdmi = dev_id;
+
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3066_hdmi_i2c_read(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
+{
+ int length = msgs->len;
+ u8 *buf = msgs->buf;
+ int ret;
+
+ ret = wait_for_completion_timeout(&hdmi->i2c->cmpltn, HZ / 10);
+ if (!ret || hdmi->i2c->stat & HDMI_INTR_EDID_ERR)
+ return -EAGAIN;
+
+ while (length--)
+ *buf++ = hdmi_readb(hdmi, HDMI_DDC_READ_FIFO_ADDR);
+
+ return 0;
+}
+
+static int rk3066_hdmi_i2c_write(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
+{
+ /*
+ * The DDC module only supports read EDID message, so
+ * we assume that each word write to this i2c adapter
+ * should be the offset of the EDID word address.
+ */
+ if (msgs->len != 1 ||
+ (msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR))
+ return -EINVAL;
+
+ reinit_completion(&hdmi->i2c->cmpltn);
+
+ if (msgs->addr == DDC_SEGMENT_ADDR)
+ hdmi->i2c->segment_addr = msgs->buf[0];
+ if (msgs->addr == DDC_ADDR)
+ hdmi->i2c->ddc_addr = msgs->buf[0];
+
+ /* Set edid word address 0x00/0x80. */
+ hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
+
+ /* Set edid segment pointer. */
+ hdmi_writeb(hdmi, HDMI_EDID_SEGMENT_POINTER, hdmi->i2c->segment_addr);
+
+ return 0;
+}
+
+static int rk3066_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct rk3066_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct rk3066_hdmi_i2c *i2c = hdmi->i2c;
+ int i, ret = 0;
+
+ mutex_lock(&i2c->i2c_lock);
+
+ rk3066_hdmi_i2c_init(hdmi);
+
+ /* Unmute HDMI EDID interrupt. */
+ hdmi_modb(hdmi, HDMI_INTR_MASK1,
+ HDMI_INTR_EDID_MASK, HDMI_INTR_EDID_MASK);
+ i2c->stat = 0;
+
+ for (i = 0; i < num; i++) {
+ DRM_DEV_DEBUG(hdmi->dev,
+ "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = rk3066_hdmi_i2c_read(hdmi, &msgs[i]);
+ else
+ ret = rk3066_hdmi_i2c_write(hdmi, &msgs[i]);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Mute HDMI EDID interrupt. */
+ hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0);
+
+ mutex_unlock(&i2c->i2c_lock);
+
+ return ret;
+}
+
+static u32 rk3066_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm rk3066_hdmi_algorithm = {
+ .master_xfer = rk3066_hdmi_i2c_xfer,
+ .functionality = rk3066_hdmi_i2c_func,
+};
+
+static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct rk3066_hdmi_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&i2c->i2c_lock);
+ init_completion(&i2c->cmpltn);
+
+ adap = &i2c->adap;
+ adap->class = I2C_CLASS_DDC;
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = hdmi->dev;
+ adap->dev.of_node = hdmi->dev->of_node;
+ adap->algo = &rk3066_hdmi_algorithm;
+ strlcpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "cannot add %s I2C adapter\n",
+ adap->name);
+ devm_kfree(hdmi->dev, i2c);
+ return ERR_PTR(ret);
+ }
+
+ hdmi->i2c = i2c;
+
+ DRM_DEV_DEBUG(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+ return adap;
+}
+
+static int rk3066_hdmi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct rk3066_hdmi *hdmi;
+ struct resource *iores;
+ int irq;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+ hdmi->drm_dev = drm;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -ENXIO;
+
+ hdmi->regs = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ hdmi->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(hdmi->hclk)) {
+ DRM_DEV_ERROR(dev, "unable to get HDMI hclk clock\n");
+ return PTR_ERR(hdmi->hclk);
+ }
+
+ ret = clk_prepare_enable(hdmi->hclk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "cannot enable HDMI hclk clock: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->grf_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(hdmi->grf_regmap)) {
+ DRM_DEV_ERROR(dev, "unable to get rockchip,grf\n");
+ ret = PTR_ERR(hdmi->grf_regmap);
+ goto err_disable_hclk;
+ }
+
+ /* internal hclk = hdmi_hclk / 25 */
+ hdmi_writeb(hdmi, HDMI_INTERNAL_CLK_DIVIDER, 25);
+
+ hdmi->ddc = rk3066_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->ddc)) {
+ ret = PTR_ERR(hdmi->ddc);
+ hdmi->ddc = NULL;
+ goto err_disable_hclk;
+ }
+
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
+ usleep_range(999, 1000);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_HOTPLUG);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK2, 0);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK3, 0);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK4, 0);
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
+
+ ret = rk3066_hdmi_register(drm, hdmi);
+ if (ret)
+ goto err_disable_i2c;
+
+ dev_set_drvdata(dev, hdmi);
+
+ ret = devm_request_threaded_irq(dev, irq, rk3066_hdmi_hardirq,
+ rk3066_hdmi_irq, IRQF_SHARED,
+ dev_name(dev), hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to request hdmi irq: %d\n", ret);
+ goto err_cleanup_hdmi;
+ }
+
+ return 0;
+
+err_cleanup_hdmi:
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+err_disable_i2c:
+ i2c_put_adapter(hdmi->ddc);
+err_disable_hclk:
+ clk_disable_unprepare(hdmi->hclk);
+
+ return ret;
+}
+
+static void rk3066_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct rk3066_hdmi *hdmi = dev_get_drvdata(dev);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+
+ i2c_put_adapter(hdmi->ddc);
+ clk_disable_unprepare(hdmi->hclk);
+}
+
+static const struct component_ops rk3066_hdmi_ops = {
+ .bind = rk3066_hdmi_bind,
+ .unbind = rk3066_hdmi_unbind,
+};
+
+static int rk3066_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &rk3066_hdmi_ops);
+}
+
+static int rk3066_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &rk3066_hdmi_ops);
+
+ return 0;
+}
+
+static const struct of_device_id rk3066_hdmi_dt_ids[] = {
+ { .compatible = "rockchip,rk3066-hdmi" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rk3066_hdmi_dt_ids);
+
+struct platform_driver rk3066_hdmi_driver = {
+ .probe = rk3066_hdmi_probe,
+ .remove = rk3066_hdmi_remove,
+ .driver = {
+ .name = "rockchip-rk3066-hdmi",
+ .of_match_table = rk3066_hdmi_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.h b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
new file mode 100644
index 000000000000..39a31c62a428
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Zheng Yang <zhengyang@rock-chips.com>
+ */
+
+#ifndef __RK3066_HDMI_H__
+#define __RK3066_HDMI_H__
+
+#define GRF_SOC_CON0 0x150
+#define HDMI_VIDEO_SEL BIT(14)
+
+#define DDC_SEGMENT_ADDR 0x30
+#define HDMI_SCL_RATE (50 * 1000)
+#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
+
+#define N_32K 0x1000
+#define N_441K 0x1880
+#define N_882K 0x3100
+#define N_1764K 0x6200
+#define N_48K 0x1800
+#define N_96K 0x3000
+#define N_192K 0x6000
+
+#define HDMI_SYS_CTRL 0x000
+#define HDMI_LR_SWAP_N3 0x004
+#define HDMI_N2 0x008
+#define HDMI_N1 0x00c
+#define HDMI_SPDIF_FS_CTS_INT3 0x010
+#define HDMI_CTS_INT2 0x014
+#define HDMI_CTS_INT1 0x018
+#define HDMI_CTS_EXT3 0x01c
+#define HDMI_CTS_EXT2 0x020
+#define HDMI_CTS_EXT1 0x024
+#define HDMI_AUDIO_CTRL1 0x028
+#define HDMI_AUDIO_CTRL2 0x02c
+#define HDMI_I2S_AUDIO_CTRL 0x030
+#define HDMI_I2S_SWAP 0x040
+#define HDMI_AUDIO_STA_BIT_CTRL1 0x044
+#define HDMI_AUDIO_STA_BIT_CTRL2 0x048
+#define HDMI_AUDIO_SRC_NUM_AND_LENGTH 0x050
+#define HDMI_AV_CTRL1 0x054
+#define HDMI_VIDEO_CTRL1 0x058
+#define HDMI_DEEP_COLOR_MODE 0x05c
+
+#define HDMI_EXT_VIDEO_PARA 0x0c0
+#define HDMI_EXT_HTOTAL_L 0x0c4
+#define HDMI_EXT_HTOTAL_H 0x0c8
+#define HDMI_EXT_HBLANK_L 0x0cc
+#define HDMI_EXT_HBLANK_H 0x0d0
+#define HDMI_EXT_HDELAY_L 0x0d4
+#define HDMI_EXT_HDELAY_H 0x0d8
+#define HDMI_EXT_HDURATION_L 0x0dc
+#define HDMI_EXT_HDURATION_H 0x0e0
+#define HDMI_EXT_VTOTAL_L 0x0e4
+#define HDMI_EXT_VTOTAL_H 0x0e8
+#define HDMI_AV_CTRL2 0x0ec
+#define HDMI_EXT_VBLANK_L 0x0f4
+#define HDMI_EXT_VBLANK_H 0x10c
+#define HDMI_EXT_VDELAY 0x0f8
+#define HDMI_EXT_VDURATION 0x0fc
+
+#define HDMI_CP_MANU_SEND_CTRL 0x100
+#define HDMI_CP_AUTO_SEND_CTRL 0x104
+#define HDMI_AUTO_CHECKSUM_OPT 0x108
+
+#define HDMI_VIDEO_CTRL2 0x114
+
+#define HDMI_PHY_OPTION 0x144
+
+#define HDMI_CP_BUF_INDEX 0x17c
+#define HDMI_CP_BUF_ACC_HB0 0x180
+#define HDMI_CP_BUF_ACC_HB1 0x184
+#define HDMI_CP_BUF_ACC_HB2 0x188
+#define HDMI_CP_BUF_ACC_PB0 0x18c
+
+#define HDMI_DDC_READ_FIFO_ADDR 0x200
+#define HDMI_DDC_BUS_FREQ_L 0x204
+#define HDMI_DDC_BUS_FREQ_H 0x208
+#define HDMI_DDC_BUS_CTRL 0x2dc
+#define HDMI_DDC_I2C_LEN 0x278
+#define HDMI_DDC_I2C_OFFSET 0x280
+#define HDMI_DDC_I2C_CTRL 0x284
+#define HDMI_DDC_I2C_READ_BUF0 0x288
+#define HDMI_DDC_I2C_READ_BUF1 0x28c
+#define HDMI_DDC_I2C_READ_BUF2 0x290
+#define HDMI_DDC_I2C_READ_BUF3 0x294
+#define HDMI_DDC_I2C_WRITE_BUF0 0x298
+#define HDMI_DDC_I2C_WRITE_BUF1 0x29c
+#define HDMI_DDC_I2C_WRITE_BUF2 0x2a0
+#define HDMI_DDC_I2C_WRITE_BUF3 0x2a4
+#define HDMI_DDC_I2C_WRITE_BUF4 0x2ac
+#define HDMI_DDC_I2C_WRITE_BUF5 0x2b0
+#define HDMI_DDC_I2C_WRITE_BUF6 0x2b4
+
+#define HDMI_INTR_MASK1 0x248
+#define HDMI_INTR_MASK2 0x24c
+#define HDMI_INTR_STATUS1 0x250
+#define HDMI_INTR_STATUS2 0x254
+#define HDMI_INTR_MASK3 0x258
+#define HDMI_INTR_MASK4 0x25c
+#define HDMI_INTR_STATUS3 0x260
+#define HDMI_INTR_STATUS4 0x264
+
+#define HDMI_HDCP_CTRL 0x2bc
+
+#define HDMI_EDID_SEGMENT_POINTER 0x310
+#define HDMI_EDID_WORD_ADDR 0x314
+#define HDMI_EDID_FIFO_ADDR 0x318
+
+#define HDMI_HPG_MENS_STA 0x37c
+
+#define HDMI_INTERNAL_CLK_DIVIDER 0x800
+
+enum {
+ /* HDMI_SYS_CTRL */
+ HDMI_SYS_POWER_MODE_MASK = 0xf0,
+ HDMI_SYS_POWER_MODE_A = 0x10,
+ HDMI_SYS_POWER_MODE_B = 0x20,
+ HDMI_SYS_POWER_MODE_D = 0x40,
+ HDMI_SYS_POWER_MODE_E = 0x80,
+ HDMI_SYS_PLL_RESET_MASK = 0x0c,
+ HDMI_SYS_PLL_RESET = 0x0c,
+ HDMI_SYS_PLLB_RESET = 0x08,
+
+ /* HDMI_LR_SWAP_N3 */
+ HDMI_AUDIO_LR_SWAP_MASK = 0xf0,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET0 = 0x10,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET1 = 0x20,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET2 = 0x40,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET3 = 0x80,
+ HDMI_AUDIO_N_19_16_MASK = 0x0f,
+
+ /* HDMI_AUDIO_CTRL1 */
+ HDMI_AUDIO_EXTERNAL_CTS = BIT(7),
+ HDMI_AUDIO_INPUT_IIS = 0,
+ HDMI_AUDIO_INPUT_SPDIF = 0x08,
+ HDMI_AUDIO_INPUT_MCLK_ACTIVE = 0x04,
+ HDMI_AUDIO_INPUT_MCLK_DEACTIVE = 0,
+ HDMI_AUDIO_INPUT_MCLK_RATE_128X = 0,
+ HDMI_AUDIO_INPUT_MCLK_RATE_256X = 1,
+ HDMI_AUDIO_INPUT_MCLK_RATE_384X = 2,
+ HDMI_AUDIO_INPUT_MCLK_RATE_512X = 3,
+
+ /* HDMI_I2S_AUDIO_CTRL */
+ HDMI_AUDIO_I2S_FORMAT_STANDARD = 0,
+ HDMI_AUDIO_I2S_CHANNEL_1_2 = 0x04,
+ HDMI_AUDIO_I2S_CHANNEL_3_4 = 0x0c,
+ HDMI_AUDIO_I2S_CHANNEL_5_6 = 0x1c,
+ HDMI_AUDIO_I2S_CHANNEL_7_8 = 0x3c,
+
+ /* HDMI_AV_CTRL1 */
+ HDMI_AUDIO_SAMPLE_FRE_MASK = 0xf0,
+ HDMI_AUDIO_SAMPLE_FRE_32000 = 0x30,
+ HDMI_AUDIO_SAMPLE_FRE_44100 = 0,
+ HDMI_AUDIO_SAMPLE_FRE_48000 = 0x20,
+ HDMI_AUDIO_SAMPLE_FRE_88200 = 0x80,
+ HDMI_AUDIO_SAMPLE_FRE_96000 = 0xa0,
+ HDMI_AUDIO_SAMPLE_FRE_176400 = 0xc0,
+ HDMI_AUDIO_SAMPLE_FRE_192000 = 0xe0,
+ HDMI_AUDIO_SAMPLE_FRE_768000 = 0x90,
+
+ HDMI_VIDEO_INPUT_FORMAT_MASK = 0x0e,
+ HDMI_VIDEO_INPUT_RGB_YCBCR444 = 0,
+ HDMI_VIDEO_INPUT_YCBCR422 = 0x02,
+ HDMI_VIDEO_DE_MASK = 0x1,
+ HDMI_VIDEO_INTERNAL_DE = 0,
+ HDMI_VIDEO_EXTERNAL_DE = 0x01,
+
+ /* HDMI_VIDEO_CTRL1 */
+ HDMI_VIDEO_OUTPUT_FORMAT_MASK = 0xc0,
+ HDMI_VIDEO_OUTPUT_RGB444 = 0,
+ HDMI_VIDEO_OUTPUT_YCBCR444 = 0x40,
+ HDMI_VIDEO_OUTPUT_YCBCR422 = 0x80,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_MASK = 0x30,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_12BIT = 0,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_10BIT = 0x10,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_8BIT = 0x30,
+ HDMI_VIDEO_INPUT_COLOR_MASK = 1,
+ HDMI_VIDEO_INPUT_COLOR_RGB = 0,
+ HDMI_VIDEO_INPUT_COLOR_YCBCR = 1,
+
+ /* HDMI_EXT_VIDEO_PARA */
+ HDMI_VIDEO_VSYNC_OFFSET_SHIFT = 4,
+ HDMI_VIDEO_VSYNC_ACTIVE_HIGH = BIT(3),
+ HDMI_VIDEO_VSYNC_ACTIVE_LOW = 0,
+ HDMI_VIDEO_HSYNC_ACTIVE_HIGH = BIT(2),
+ HDMI_VIDEO_HSYNC_ACTIVE_LOW = 0,
+ HDMI_VIDEO_MODE_INTERLACE = BIT(1),
+ HDMI_VIDEO_MODE_PROGRESSIVE = 0,
+ HDMI_EXT_VIDEO_SET_EN = BIT(0),
+
+ /* HDMI_CP_AUTO_SEND_CTRL */
+
+ /* HDMI_VIDEO_CTRL2 */
+ HDMI_VIDEO_AV_MUTE_MASK = 0xc0,
+ HDMI_VIDEO_CLR_AV_MUTE = BIT(7),
+ HDMI_VIDEO_SET_AV_MUTE = BIT(6),
+ HDMI_AUDIO_CP_LOGIC_RESET_MASK = BIT(2),
+ HDMI_AUDIO_CP_LOGIC_RESET = BIT(2),
+ HDMI_VIDEO_AUDIO_DISABLE_MASK = 0x3,
+ HDMI_AUDIO_DISABLE = BIT(1),
+ HDMI_VIDEO_DISABLE = BIT(0),
+
+ /* HDMI_CP_BUF_INDEX */
+ HDMI_INFOFRAME_VSI = 0x05,
+ HDMI_INFOFRAME_AVI = 0x06,
+ HDMI_INFOFRAME_AAI = 0x08,
+
+ /* HDMI_INTR_MASK1 */
+ /* HDMI_INTR_STATUS1 */
+ HDMI_INTR_HOTPLUG = BIT(7),
+ HDMI_INTR_MSENS = BIT(6),
+ HDMI_INTR_VSYNC = BIT(5),
+ HDMI_INTR_AUDIO_FIFO_FULL = BIT(4),
+ HDMI_INTR_EDID_MASK = 0x6,
+ HDMI_INTR_EDID_READY = BIT(2),
+ HDMI_INTR_EDID_ERR = BIT(1),
+
+ /* HDMI_HDCP_CTRL */
+ HDMI_VIDEO_MODE_MASK = BIT(1),
+ HDMI_VIDEO_MODE_HDMI = BIT(1),
+
+ /* HDMI_HPG_MENS_STA */
+ HDMI_HPG_IN_STATUS_HIGH = BIT(7),
+ HDMI_MSENS_IN_STATUS_HIGH = BIT(6),
+};
+
+#endif /* __RK3066_HDMI_H__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index d7fa17f12769..cb938d3cd3c2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,6 +448,14 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
+static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (drm)
+ drm_atomic_helper_shutdown(drm);
+}
+
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -457,6 +465,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
+ .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
@@ -486,6 +495,8 @@ static int __init rockchip_drm_init(void)
ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
CONFIG_ROCKCHIP_DW_MIPI_DSI);
ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
+ ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver,
+ CONFIG_ROCKCHIP_RK3066_HDMI);
ret = platform_register_drivers(rockchip_sub_drivers,
num_rockchip_sub_drivers);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index ce48568ec8a0..e4bc4322bc3f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -73,4 +73,5 @@ extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
extern struct platform_driver rockchip_lvds_driver;
extern struct platform_driver vop_platform_driver;
+extern struct platform_driver rk3066_hdmi_driver;
#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 8ce68bd508be..30459de66b67 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -90,12 +90,10 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- fbi->par = helper;
fbi->fbops = &rockchip_drm_fbdev_ops;
fb = helper->fb;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
@@ -110,8 +108,6 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
rk_obj->kvaddr,
offset, size);
- fbi->skip_vt_switch = true;
-
return 0;
out:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index a8db758d523e..a2ebb08990e9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -221,26 +221,13 @@ static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
- unsigned int i, count = obj->size >> PAGE_SHIFT;
+ unsigned int count = obj->size >> PAGE_SHIFT;
unsigned long user_count = vma_pages(vma);
- unsigned long uaddr = vma->vm_start;
- unsigned long offset = vma->vm_pgoff;
- unsigned long end = user_count + offset;
- int ret;
if (user_count == 0)
return -ENXIO;
- if (end > count)
- return -ENXIO;
- for (i = offset; i < end; i++) {
- ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
- if (ret)
- return ret;
- uaddr += PAGE_SIZE;
- }
-
- return 0;
+ return vm_map_pages(vma, rk_obj->pages, count);
}
static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 0d4ade9d4722..20a9c296d027 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1041,6 +1041,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
u16 vact_end = vact_st + vdisplay;
uint32_t pin_pol, val;
+ int dither_bpc = s->output_bpc ? s->output_bpc : 10;
int ret;
mutex_lock(&vop->vop_lock);
@@ -1098,11 +1099,19 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
!(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
s->output_mode = ROCKCHIP_OUT_MODE_P888;
- if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && s->output_bpc == 8)
+ if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && dither_bpc <= 8)
VOP_REG_SET(vop, common, pre_dither_down, 1);
else
VOP_REG_SET(vop, common, pre_dither_down, 0);
+ if (dither_bpc == 6) {
+ VOP_REG_SET(vop, common, dither_down_sel, DITHER_DOWN_ALLEGRO);
+ VOP_REG_SET(vop, common, dither_down_mode, RGB888_TO_RGB666);
+ VOP_REG_SET(vop, common, dither_down_en, 1);
+ } else {
+ VOP_REG_SET(vop, common, dither_down_en, 0);
+ }
+
VOP_REG_SET(vop, common, out_mode, s->output_mode);
VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 04ed401d2325..e64351dab610 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -71,7 +71,9 @@ struct vop_common {
struct vop_reg dsp_blank;
struct vop_reg data_blank;
struct vop_reg pre_dither_down;
- struct vop_reg dither_down;
+ struct vop_reg dither_down_sel;
+ struct vop_reg dither_down_mode;
+ struct vop_reg dither_down_en;
struct vop_reg dither_up;
struct vop_reg gate_en;
struct vop_reg mmu_en;
@@ -287,6 +289,16 @@ enum scale_down_mode {
SCALE_DOWN_AVG = 0x1
};
+enum dither_down_mode {
+ RGB888_TO_RGB565 = 0x0,
+ RGB888_TO_RGB666 = 0x1
+};
+
+enum dither_down_mode_sel {
+ DITHER_DOWN_ALLEGRO = 0x0,
+ DITHER_DOWN_FRC = 0x1
+};
+
enum vop_pol {
HSYNC_POSITIVE = 0,
VSYNC_POSITIVE = 1,
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index bd76328c0fdb..e732b73033c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -137,6 +137,9 @@ static const struct vop_common rk3036_common = {
.standby = VOP_REG_SYNC(RK3036_SYS_CTRL, 0x1, 30),
.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
.dsp_blank = VOP_REG(RK3036_DSP_CTRL1, 0x1, 24),
+ .dither_down_sel = VOP_REG(RK3036_DSP_CTRL0, 0x1, 27),
+ .dither_down_en = VOP_REG(RK3036_DSP_CTRL0, 0x1, 11),
+ .dither_down_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 10),
.cfg_done = VOP_REG_SYNC(RK3036_REG_CFG_DONE, 0x1, 0),
};
@@ -200,6 +203,9 @@ static const struct vop_common px30_common = {
.standby = VOP_REG_SYNC(PX30_SYS_CTRL2, 0x1, 1),
.out_mode = VOP_REG(PX30_DSP_CTRL2, 0xf, 16),
.dsp_blank = VOP_REG(PX30_DSP_CTRL2, 0x1, 14),
+ .dither_down_en = VOP_REG(PX30_DSP_CTRL2, 0x1, 8),
+ .dither_down_sel = VOP_REG(PX30_DSP_CTRL2, 0x1, 7),
+ .dither_down_mode = VOP_REG(PX30_DSP_CTRL2, 0x1, 6),
.cfg_done = VOP_REG_SYNC(PX30_REG_CFG_DONE, 0x1, 0),
};
@@ -365,6 +371,8 @@ static const struct vop_common rk3066_common = {
.standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
.out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
+ .dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11),
+ .dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
};
@@ -458,6 +466,9 @@ static const struct vop_common rk3188_common = {
.standby = VOP_REG(RK3188_SYS_CTRL, 0x1, 30),
.out_mode = VOP_REG(RK3188_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3188_REG_CFG_DONE, 0x1, 0),
+ .dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27),
+ .dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11),
+ .dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24),
};
@@ -585,8 +596,10 @@ static const struct vop_common rk3288_common = {
.standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22),
.gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
.mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
+ .dither_down_sel = VOP_REG(RK3288_DSP_CTRL1, 0x1, 4),
+ .dither_down_mode = VOP_REG(RK3288_DSP_CTRL1, 0x1, 3),
+ .dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
- .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1),
.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
@@ -878,7 +891,10 @@ static const struct vop_misc rk3328_misc = {
static const struct vop_common rk3328_common = {
.standby = VOP_REG_SYNC(RK3328_SYS_CTRL, 0x1, 22),
- .dither_down = VOP_REG(RK3328_DSP_CTRL1, 0xf, 1),
+ .dither_down_sel = VOP_REG(RK3328_DSP_CTRL1, 0x1, 4),
+ .dither_down_mode = VOP_REG(RK3328_DSP_CTRL1, 0x1, 3),
+ .dither_down_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 2),
+ .pre_dither_down = VOP_REG(RK3328_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3328_DSP_CTRL1, 0x1, 6),
.dsp_blank = VOP_REG(RK3328_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3328_DSP_CTRL0, 0xf, 0),
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 19fc601c9eeb..a1bec2779e76 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
EXPORT_SYMBOL(drm_sched_increase_karma);
/**
- * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
+ * drm_sched_stop - stop the scheduler
*
* @sched: scheduler instance
- * @bad: bad scheduler job
*
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched)
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index fbed2c90fd51..286a0eeefcb6 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -1615,7 +1615,7 @@ static int igt_topdown(void *ignored)
DRM_RND_STATE(prng, random_seed);
const unsigned int count = 8192;
unsigned int size;
- unsigned long *bitmap = NULL;
+ unsigned long *bitmap;
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
@@ -1631,8 +1631,7 @@ static int igt_topdown(void *ignored)
if (!nodes)
goto err;
- bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
- GFP_KERNEL);
+ bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
goto err_nodes;
@@ -1717,7 +1716,7 @@ out:
drm_mm_takedown(&mm);
kfree(order);
err_bitmap:
- kfree(bitmap);
+ bitmap_free(bitmap);
err_nodes:
vfree(nodes);
err:
@@ -1745,8 +1744,7 @@ static int igt_bottomup(void *ignored)
if (!nodes)
goto err;
- bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
- GFP_KERNEL);
+ bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
goto err_nodes;
@@ -1818,7 +1816,7 @@ out:
drm_mm_takedown(&mm);
kfree(order);
err_bitmap:
- kfree(bitmap);
+ bitmap_free(bitmap);
err_nodes:
vfree(nodes);
err:
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 61bbe8e8bcc5..e2a6c82c8252 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -4,7 +4,6 @@ config DRM_SHMOBILE
depends on DRM && ARM
depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 35367ada3bc1..d15b10de1da6 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -6,7 +6,7 @@ config DRM_STM
select DRM_KMS_CMA_HELPER
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
- select FB_PROVIDE_GET_FB_UNMAPPED_AREA
+ select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
help
Enable support for the on-chip display controller on
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 0a7f933ab007..5834ef56fbaa 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -129,6 +129,40 @@ static void drv_unload(struct drm_device *ddev)
drm_mode_config_cleanup(ddev);
}
+static __maybe_unused int drv_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct ltdc_device *ldev = ddev->dev_private;
+ struct drm_atomic_state *state;
+
+ drm_kms_helper_poll_disable(ddev);
+ state = drm_atomic_helper_suspend(ddev);
+ if (IS_ERR(state)) {
+ drm_kms_helper_poll_enable(ddev);
+ return PTR_ERR(state);
+ }
+ ldev->suspend_state = state;
+ ltdc_suspend(ddev);
+
+ return 0;
+}
+
+static __maybe_unused int drv_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct ltdc_device *ldev = ddev->dev_private;
+
+ ltdc_resume(ddev);
+ drm_atomic_helper_resume(ddev, ldev->suspend_state);
+ drm_kms_helper_poll_enable(ddev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops drv_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(drv_suspend, drv_resume)
+};
+
static int stm_drm_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -186,6 +220,7 @@ static struct platform_driver stm_drm_platform_driver = {
.driver = {
.name = "stm32-display",
.of_match_table = drv_dt_ids,
+ .pm = &drv_pm_ops,
},
};
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index a672b59a2226..1bef73e8c8fe 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -356,12 +356,40 @@ static int dw_mipi_dsi_stm_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev)
+{
+ struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ clk_disable_unprepare(dsi->pllref_clk);
+
+ return 0;
+}
+
+static int __maybe_unused dw_mipi_dsi_stm_resume(struct device *dev)
+{
+ struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ clk_prepare_enable(dsi->pllref_clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw_mipi_dsi_stm_suspend,
+ dw_mipi_dsi_stm_resume)
+};
+
static struct platform_driver dw_mipi_dsi_stm_driver = {
.probe = dw_mipi_dsi_stm_probe,
.remove = dw_mipi_dsi_stm_remove,
.driver = {
.of_match_table = dw_mipi_dsi_stm_dt_ids,
.name = "stm32-display-dsi",
+ .pm = &dw_mipi_dsi_stm_pm_ops,
},
};
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index b1741a9d5be2..32fd6a3b37fb 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -1062,6 +1062,30 @@ static int ltdc_get_caps(struct drm_device *ddev)
return 0;
}
+void ltdc_suspend(struct drm_device *ddev)
+{
+ struct ltdc_device *ldev = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("\n");
+ clk_disable_unprepare(ldev->pixel_clk);
+}
+
+int ltdc_resume(struct drm_device *ddev)
+{
+ struct ltdc_device *ldev = ddev->dev_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ ret = clk_prepare_enable(ldev->pixel_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable pixel clock (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int ltdc_load(struct drm_device *ddev)
{
struct platform_device *pdev = to_platform_device(ddev->dev);
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index e46f477a8494..a1ad0ae3b006 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -36,6 +36,7 @@ struct ltdc_device {
u32 error_status;
u32 irq_status;
struct fps_info plane_fpsi[LTDC_MAX_LAYER];
+ struct drm_atomic_state *suspend_state;
};
bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
@@ -45,5 +46,7 @@ bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
int ltdc_load(struct drm_device *ddev);
void ltdc_unload(struct drm_device *ddev);
+void ltdc_suspend(struct drm_device *ddev);
+int ltdc_resume(struct drm_device *ddev);
#endif
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 4c0d51f73237..4e5922c89d7b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -361,13 +361,6 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
- /*
- * backend DMA accesses DRAM directly, bypassing the system
- * bus. As such, the address range is different and the buffer
- * address needs to be corrected.
- */
- paddr -= PHYS_OFFSET;
-
if (fb->format->is_yuv)
return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
@@ -720,33 +713,22 @@ static int sun4i_backend_free_sat(struct device *dev) {
*/
static int sun4i_backend_of_get_id(struct device_node *node)
{
- struct device_node *port, *ep;
- int ret = -EINVAL;
+ struct device_node *ep, *remote;
+ struct of_endpoint of_ep;
- /* input is port 0 */
- port = of_graph_get_port_by_id(node, 0);
- if (!port)
+ /* Input port is 0, and we want the first endpoint. */
+ ep = of_graph_get_endpoint_by_regs(node, 0, -1);
+ if (!ep)
return -EINVAL;
- /* try finding an upstream endpoint */
- for_each_available_child_of_node(port, ep) {
- struct device_node *remote;
- u32 reg;
-
- remote = of_graph_get_remote_endpoint(ep);
- if (!remote)
- continue;
-
- ret = of_property_read_u32(remote, "reg", &reg);
- if (ret)
- continue;
-
- ret = reg;
- }
-
- of_node_put(port);
+ remote = of_graph_get_remote_endpoint(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- return ret;
+ of_graph_parse_endpoint(remote, &of_ep);
+ of_node_put(remote);
+ return of_ep.id;
}
/* TODO: This needs to take multiple pipelines into account */
@@ -814,6 +796,27 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
dev_set_drvdata(dev, backend);
spin_lock_init(&backend->frontend_lock);
+ if (of_find_property(dev->of_node, "interconnects", NULL)) {
+ /*
+ * This assume we have the same DMA constraints for all our the
+ * devices in our pipeline (all the backends, but also the
+ * frontends). This sounds bad, but it has always been the case
+ * for us, and DRM doesn't do per-device allocation either, so
+ * we would need to fix DRM first...
+ */
+ ret = of_dma_configure(drm->dev, dev->of_node, true);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If we don't have the interconnect property, most likely
+ * because of an old DT, we need to set the DMA offset by hand
+ * on our device since the RAM mapping is at 0 for the DMA bus,
+ * unlike the CPU.
+ */
+ drm->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+ }
+
backend->engine.node = dev->of_node;
backend->engine.ops = &sun4i_backend_engine_ops;
backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 3ebd9f5e2719..29258b404e54 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -16,6 +16,7 @@
#include <linux/of_reserved_mem.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev)
ret = -ENOMEM;
goto free_drm;
}
+
+ dev_set_drvdata(dev, drm);
drm->dev_private = drv;
INIT_LIST_HEAD(&drv->frontend_list);
INIT_LIST_HEAD(&drv->engine_list);
@@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
+
+ component_unbind_all(dev, NULL);
of_reserved_mem_device_release(dev);
+
drm_dev_put(drm);
}
@@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev)
static int sun4i_drv_remove(struct platform_device *pdev)
{
+ component_master_del(&pdev->dev, &sun4i_drv_master_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index b685ee11623d..b08c4453d47c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -269,6 +269,7 @@ struct sun4i_hdmi {
struct clk *tmds_clk;
struct i2c_adapter *i2c;
+ struct i2c_adapter *ddc_i2c;
/* Regmap fields for I2C adapter */
struct regmap_field *field_ddc_en;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index d18862629301..8c122e637697 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -217,7 +217,7 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- edid = drm_get_edid(connector, hdmi->i2c);
+ edid = drm_get_edid(connector, hdmi->ddc_i2c ?: hdmi->i2c);
if (!edid)
return 0;
@@ -233,6 +233,28 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
return ret;
}
+static struct i2c_adapter *sun4i_hdmi_get_ddc(struct device *dev)
+{
+ struct device_node *phandle, *remote;
+ struct i2c_adapter *ddc;
+
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
+ return ERR_PTR(-EINVAL);
+
+ phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ of_node_put(remote);
+ if (!phandle)
+ return ERR_PTR(-ENODEV);
+
+ ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return ddc;
+}
+
static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = {
.get_modes = sun4i_hdmi_get_modes,
};
@@ -580,6 +602,15 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_mod_clk;
}
+ hdmi->ddc_i2c = sun4i_hdmi_get_ddc(dev);
+ if (IS_ERR(hdmi->ddc_i2c)) {
+ ret = PTR_ERR(hdmi->ddc_i2c);
+ if (ret == -ENODEV)
+ hdmi->ddc_i2c = NULL;
+ else
+ goto err_del_i2c_adapter;
+ }
+
drm_encoder_helper_add(&hdmi->encoder,
&sun4i_hdmi_helper_funcs);
ret = drm_encoder_init(drm,
@@ -589,14 +620,14 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
NULL);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
- goto err_del_i2c_adapter;
+ goto err_put_ddc_i2c;
}
hdmi->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
dev->of_node);
if (!hdmi->encoder.possible_crtcs) {
ret = -EPROBE_DEFER;
- goto err_del_i2c_adapter;
+ goto err_put_ddc_i2c;
}
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
@@ -635,6 +666,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
err_cleanup_connector:
cec_delete_adapter(hdmi->cec_adap);
drm_encoder_cleanup(&hdmi->encoder);
+err_put_ddc_i2c:
+ i2c_put_adapter(hdmi->ddc_i2c);
err_del_i2c_adapter:
i2c_del_adapter(hdmi->i2c);
err_disable_mod_clk:
@@ -655,6 +688,7 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
drm_connector_cleanup(&hdmi->connector);
drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
+ i2c_put_adapter(hdmi->ddc_i2c);
clk_disable_unprepare(hdmi->mod_clk);
clk_disable_unprepare(hdmi->bus_clk);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 147b97ed1a09..3a3ba99fed22 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -20,7 +20,7 @@ struct sun4i_lvds {
struct drm_connector connector;
struct drm_encoder encoder;
- struct sun4i_tcon *tcon;
+ struct drm_panel *panel;
};
static inline struct sun4i_lvds *
@@ -41,9 +41,8 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector)
{
struct sun4i_lvds *lvds =
drm_connector_to_sun4i_lvds(connector);
- struct sun4i_tcon *tcon = lvds->tcon;
- return drm_panel_get_modes(tcon->panel);
+ return drm_panel_get_modes(lvds->panel);
}
static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
@@ -54,9 +53,8 @@ static void
sun4i_lvds_connector_destroy(struct drm_connector *connector)
{
struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector);
- struct sun4i_tcon *tcon = lvds->tcon;
- drm_panel_detach(tcon->panel);
+ drm_panel_detach(lvds->panel);
drm_connector_cleanup(connector);
}
@@ -71,26 +69,24 @@ static const struct drm_connector_funcs sun4i_lvds_con_funcs = {
static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
- struct sun4i_tcon *tcon = lvds->tcon;
DRM_DEBUG_DRIVER("Enabling LVDS output\n");
- if (tcon->panel) {
- drm_panel_prepare(tcon->panel);
- drm_panel_enable(tcon->panel);
+ if (lvds->panel) {
+ drm_panel_prepare(lvds->panel);
+ drm_panel_enable(lvds->panel);
}
}
static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
- struct sun4i_tcon *tcon = lvds->tcon;
DRM_DEBUG_DRIVER("Disabling LVDS output\n");
- if (tcon->panel) {
- drm_panel_disable(tcon->panel);
- drm_panel_unprepare(tcon->panel);
+ if (lvds->panel) {
+ drm_panel_disable(lvds->panel);
+ drm_panel_unprepare(lvds->panel);
}
}
@@ -113,11 +109,10 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL);
if (!lvds)
return -ENOMEM;
- lvds->tcon = tcon;
encoder = &lvds->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
- &tcon->panel, &bridge);
+ &lvds->panel, &bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n");
return 0;
@@ -138,7 +133,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
/* The LVDS encoder can only work with the TCON channel 0 */
lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
- if (tcon->panel) {
+ if (lvds->panel) {
drm_connector_helper_add(&lvds->connector,
&sun4i_lvds_con_helper_funcs);
ret = drm_connector_init(drm, &lvds->connector,
@@ -152,7 +147,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_connector_attach_encoder(&lvds->connector,
&lvds->encoder);
- ret = drm_panel_attach(tcon->panel, &lvds->connector);
+ ret = drm_panel_attach(lvds->panel, &lvds->connector);
if (ret) {
dev_err(drm->dev, "Couldn't attach our panel\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index cae19e7bbeaa..d9e2502b49fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -27,6 +27,8 @@ struct sun4i_rgb {
struct drm_encoder encoder;
struct sun4i_tcon *tcon;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
static inline struct sun4i_rgb *
@@ -47,11 +49,18 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
{
struct sun4i_rgb *rgb =
drm_connector_to_sun4i_rgb(connector);
- struct sun4i_tcon *tcon = rgb->tcon;
- return drm_panel_get_modes(tcon->panel);
+ return drm_panel_get_modes(rgb->panel);
}
+/*
+ * VESA DMT defines a tolerance of 0.5% on the pixel clock, while the
+ * CVT spec reuses that tolerance in its examples, so it looks to be a
+ * good default tolerance for the EDID-based modes. Define it to 5 per
+ * mille to avoid floating point operations.
+ */
+#define SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE 5
+
static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
const struct drm_display_mode *mode)
{
@@ -59,8 +68,9 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
struct sun4i_tcon *tcon = rgb->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
- unsigned long rate = mode->clock * 1000;
- long rounded_rate;
+ unsigned long long rate = mode->clock * 1000;
+ unsigned long long lowest, highest;
+ unsigned long long rounded_rate;
DRM_DEBUG_DRIVER("Validating modes...\n");
@@ -92,15 +102,39 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+ /*
+ * TODO: We should use the struct display_timing if available
+ * and / or trying to stretch the timings within that
+ * tolerancy to take care of panels that we wouldn't be able
+ * to have a exact match for.
+ */
+ if (rgb->panel) {
+ DRM_DEBUG_DRIVER("RGB panel used, skipping clock rate checks");
+ goto out;
+ }
+
+ /*
+ * That shouldn't ever happen unless something is really wrong, but it
+ * doesn't harm to check.
+ */
+ if (!rgb->bridge)
+ goto out;
+
tcon->dclk_min_div = 6;
tcon->dclk_max_div = 127;
rounded_rate = clk_round_rate(tcon->dclk, rate);
- if (rounded_rate < rate)
+
+ lowest = rate * (1000 - SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
+ do_div(lowest, 1000);
+ if (rounded_rate < lowest)
return MODE_CLOCK_LOW;
- if (rounded_rate > rate)
+ highest = rate * (1000 + SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
+ do_div(highest, 1000);
+ if (rounded_rate > highest)
return MODE_CLOCK_HIGH;
+out:
DRM_DEBUG_DRIVER("Clock rate OK\n");
return MODE_OK;
@@ -114,9 +148,8 @@ static void
sun4i_rgb_connector_destroy(struct drm_connector *connector)
{
struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
- struct sun4i_tcon *tcon = rgb->tcon;
- drm_panel_detach(tcon->panel);
+ drm_panel_detach(rgb->panel);
drm_connector_cleanup(connector);
}
@@ -131,26 +164,24 @@ static const struct drm_connector_funcs sun4i_rgb_con_funcs = {
static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (tcon->panel) {
- drm_panel_prepare(tcon->panel);
- drm_panel_enable(tcon->panel);
+ if (rgb->panel) {
+ drm_panel_prepare(rgb->panel);
+ drm_panel_enable(rgb->panel);
}
}
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- if (tcon->panel) {
- drm_panel_disable(tcon->panel);
- drm_panel_unprepare(tcon->panel);
+ if (rgb->panel) {
+ drm_panel_disable(rgb->panel);
+ drm_panel_unprepare(rgb->panel);
}
}
@@ -172,7 +203,6 @@ static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
- struct drm_bridge *bridge;
struct sun4i_rgb *rgb;
int ret;
@@ -183,7 +213,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
encoder = &rgb->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
- &tcon->panel, &bridge);
+ &rgb->panel, &rgb->bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
return 0;
@@ -204,7 +234,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
/* The RGB encoder can only work with the TCON channel 0 */
rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
- if (tcon->panel) {
+ if (rgb->panel) {
drm_connector_helper_add(&rgb->connector,
&sun4i_rgb_con_helper_funcs);
ret = drm_connector_init(drm, &rgb->connector,
@@ -218,15 +248,15 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_connector_attach_encoder(&rgb->connector,
&rgb->encoder);
- ret = drm_panel_attach(tcon->panel, &rgb->connector);
+ ret = drm_panel_attach(rgb->panel, &rgb->connector);
if (ret) {
dev_err(drm->dev, "Couldn't attach our panel\n");
goto err_cleanup_connector;
}
}
- if (bridge) {
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ if (rgb->bridge) {
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 7136fc91c603..9d8d8124b1f6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -236,8 +236,8 @@ static struct sun4i_tcon *sun4i_get_tcon0(struct drm_device *drm)
return NULL;
}
-void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
- const struct drm_encoder *encoder)
+static void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
+ const struct drm_encoder *encoder)
{
int ret = -ENOTSUPP;
@@ -341,8 +341,8 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
u32 block_space, start_delay;
u32 tcon_div;
- tcon->dclk_min_div = 4;
- tcon->dclk_max_div = 127;
+ tcon->dclk_min_div = SUN6I_DSI_TCON_DIV;
+ tcon->dclk_max_div = SUN6I_DSI_TCON_DIV;
sun4i_tcon0_mode_set_common(tcon, mode);
@@ -561,10 +561,10 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
clk_set_phase(tcon->dclk, 240);
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
clk_set_phase(tcon->dclk, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index b5214d71610f..84cfb1952ff7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -257,8 +257,6 @@ struct sun4i_tcon {
struct reset_control *lcd_rst;
struct reset_control *lvds_rst;
- struct drm_panel *panel;
-
/* Platform adjustments */
const struct sun4i_tcon_quirks *quirks;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 318994cd1b85..6ff585055a07 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -24,7 +24,9 @@
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include "sun4i_crtc.h"
#include "sun4i_drv.h"
+#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
#include <video/mipi_display.h>
@@ -33,6 +35,8 @@
#define SUN6I_DSI_CTL_EN BIT(0)
#define SUN6I_DSI_BASIC_CTL_REG 0x00c
+#define SUN6I_DSI_BASIC_CTL_TRAIL_INV(n) (((n) & 0xf) << 4)
+#define SUN6I_DSI_BASIC_CTL_TRAIL_FILL BIT(3)
#define SUN6I_DSI_BASIC_CTL_HBP_DIS BIT(2)
#define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS BIT(1)
#define SUN6I_DSI_BASIC_CTL_VIDEO_BURST BIT(0)
@@ -153,6 +157,8 @@
#define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04)
+#define SUN6I_DSI_SYNC_POINT 40
+
enum sun6i_dsi_start_inst {
DSI_START_LPRX,
DSI_START_LPTX,
@@ -358,7 +364,54 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
- return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
+ u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
+ u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+
+ if (delay > mode->vtotal)
+ delay = delay % mode->vtotal;
+
+ return max_t(u16, delay, 1);
+}
+
+static u16 sun6i_dsi_get_line_num(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ struct mipi_dsi_device *device = dsi->device;
+ unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+
+ return mode->htotal * Bpp / device->lanes;
+}
+
+static u16 sun6i_dsi_get_drq_edge0(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode,
+ u16 line_num, u16 edge1)
+{
+ u16 edge0 = edge1;
+
+ edge0 += (mode->hdisplay + 40) * SUN6I_DSI_TCON_DIV / 8;
+
+ if (edge0 > line_num)
+ return edge0 - line_num;
+
+ return 1;
+}
+
+static u16 sun6i_dsi_get_drq_edge1(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode,
+ u16 line_num)
+{
+ struct mipi_dsi_device *device = dsi->device;
+ unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ unsigned int hbp = mode->htotal - mode->hsync_end;
+ u16 edge1;
+
+ edge1 = SUN6I_DSI_SYNC_POINT;
+ edge1 += (mode->hdisplay + hbp + 20) * Bpp / device->lanes;
+
+ if (edge1 > line_num)
+ return line_num;
+
+ return edge1;
}
static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
@@ -367,7 +420,23 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
struct mipi_dsi_device *device = dsi->device;
u32 val = 0;
- if ((mode->hsync_end - mode->hdisplay) > 20) {
+ if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ u16 line_num = sun6i_dsi_get_line_num(dsi, mode);
+ u16 edge0, edge1;
+
+ edge1 = sun6i_dsi_get_drq_edge1(dsi, mode, line_num);
+ edge0 = sun6i_dsi_get_drq_edge0(dsi, mode, line_num, edge1);
+
+ regmap_write(dsi->regs, SUN6I_DSI_BURST_DRQ_REG,
+ SUN6I_DSI_BURST_DRQ_EDGE0(edge0) |
+ SUN6I_DSI_BURST_DRQ_EDGE1(edge1));
+
+ regmap_write(dsi->regs, SUN6I_DSI_BURST_LINE_REG,
+ SUN6I_DSI_BURST_LINE_NUM(line_num) |
+ SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
+
+ val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
+ } else if ((mode->hsync_end - mode->hdisplay) > 20) {
/* Maaaaaagic */
u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
@@ -384,8 +453,19 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
+ struct mipi_dsi_device *device = dsi->device;
u16 delay = 50 - 1;
+ if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ delay = (mode->htotal - mode->hdisplay) * 150;
+ delay /= (mode->clock / 1000) * 8;
+ delay -= 50;
+ }
+
+ regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_SEL_REG,
+ 2 << (4 * DSI_INST_ID_LP11) |
+ 3 << (4 * DSI_INST_ID_DLY));
+
regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0),
SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
SUN6I_DSI_INST_LOOP_NUM_N1(delay));
@@ -451,49 +531,68 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
{
struct mipi_dsi_device *device = dsi->device;
unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
- u16 hbp, hfp, hsa, hblk, vblk;
+ u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
+ u32 basic_ctl = 0;
size_t bytes;
u8 *buffer;
/* Do all timing calculations up front to allocate buffer space */
- /*
- * A sync period is composed of a blanking packet (4 bytes +
- * payload + 2 bytes) and a sync event packet (4 bytes). Its
- * minimal size is therefore 10 bytes
- */
-#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
- (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+ if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ hblk = mode->hdisplay * Bpp;
+ basic_ctl = SUN6I_DSI_BASIC_CTL_VIDEO_BURST |
+ SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS |
+ SUN6I_DSI_BASIC_CTL_HBP_DIS;
- /*
- * The backporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
+ if (device->lanes == 4)
+ basic_ctl |= SUN6I_DSI_BASIC_CTL_TRAIL_FILL |
+ SUN6I_DSI_BASIC_CTL_TRAIL_INV(0xc);
+ } else {
+ /*
+ * A sync period is composed of a blanking packet (4
+ * bytes + payload + 2 bytes) and a sync event packet
+ * (4 bytes). Its minimal size is therefore 10 bytes
+ */
+#define HSA_PACKET_OVERHEAD 10
+ hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+
+ /*
+ * The backporch is set using a blanking packet (4
+ * bytes + payload + 2 bytes). Its minimal size is
+ * therefore 6 bytes
+ */
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
- (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
-
- /*
- * The frontporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
+ hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
+
+ /*
+ * The frontporch is set using a blanking packet (4
+ * bytes + payload + 2 bytes). Its minimal size is
+ * therefore 6 bytes
+ */
#define HFP_PACKET_OVERHEAD 6
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
- (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
-
- /*
- * hblk seems to be the line + porches length.
- */
- hblk = mode->htotal * Bpp - hsa;
-
- /*
- * And I'm not entirely sure what vblk is about. The driver in
- * Allwinner BSP is using a rather convoluted calculation
- * there only for 4 lanes. However, using 0 (the !4 lanes
- * case) even with a 4 lanes screen seems to work...
- */
- vblk = 0;
+ hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
+
+ /*
+ * The blanking is set using a sync event (4 bytes)
+ * and a blanking packet (4 bytes + payload + 2
+ * bytes). Its minimal size is therefore 10 bytes.
+ */
+#define HBLK_PACKET_OVERHEAD 10
+ hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ (mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
+ HBLK_PACKET_OVERHEAD);
+
+ /*
+ * And I'm not entirely sure what vblk is about. The driver in
+ * Allwinner BSP is using a rather convoluted calculation
+ * there only for 4 lanes. However, using 0 (the !4 lanes
+ * case) even with a 4 lanes screen seems to work...
+ */
+ vblk = 0;
+ }
/* How many bytes do we need to send all payloads? */
bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk);
@@ -501,7 +600,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
if (WARN_ON(!buffer))
return;
- regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
+ regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, basic_ctl);
regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG,
sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START,
@@ -526,8 +625,8 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG,
SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end -
mode->vsync_start) |
- SUN6I_DSI_BASIC_SIZE0_VBP(mode->vsync_start -
- mode->vdisplay));
+ SUN6I_DSI_BASIC_SIZE0_VBP(mode->vtotal -
+ mode->vsync_end));
regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG,
SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index a07090579f84..5c3ad5be0690 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -13,6 +13,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_mipi_dsi.h>
+#define SUN6I_DSI_TCON_DIV 4
+
struct sun6i_dsi {
struct drm_connector connector;
struct drm_encoder encoder;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index dc47720c99ba..39d8509d96a0 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -48,8 +48,13 @@ static enum drm_mode_status
sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
- /* This is max for HDMI 2.0b (4K@60Hz) */
- if (mode->clock > 594000)
+ /*
+ * Controller support maximum of 594 MHz, which correlates to
+ * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
+ * 340 MHz scrambling has to be enabled. Because scrambling is
+ * not yet implemented, just limit to 340 MHz for now.
+ */
+ if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
return MODE_OK;
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 30a2eff55687..fd20a928cf4d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -325,38 +325,22 @@ static struct regmap_config sun8i_mixer_regmap_config = {
static int sun8i_mixer_of_get_id(struct device_node *node)
{
- struct device_node *port, *ep;
- int ret = -EINVAL;
+ struct device_node *ep, *remote;
+ struct of_endpoint of_ep;
- /* output is port 1 */
- port = of_graph_get_port_by_id(node, 1);
- if (!port)
+ /* Output port is 1, and we want the first endpoint. */
+ ep = of_graph_get_endpoint_by_regs(node, 1, -1);
+ if (!ep)
return -EINVAL;
- /* try to find downstream endpoint */
- for_each_available_child_of_node(port, ep) {
- struct device_node *remote;
- u32 reg;
-
- remote = of_graph_get_remote_endpoint(ep);
- if (!remote)
- continue;
-
- ret = of_property_read_u32(remote, "reg", &reg);
- if (!ret) {
- of_node_put(remote);
- of_node_put(ep);
- of_node_put(port);
-
- return reg;
- }
-
- of_node_put(remote);
- }
-
- of_node_put(port);
+ remote = of_graph_get_remote_endpoint(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- return ret;
+ of_graph_parse_endpoint(remote, &of_ep);
+ of_node_put(remote);
+ return of_ep.id;
}
static int sun8i_mixer_bind(struct device *dev, struct device *master,
@@ -554,6 +538,7 @@ static int sun8i_mixer_remove(struct platform_device *pdev)
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
.ccsc = 0,
.scaler_mask = 0xf,
+ .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
@@ -561,6 +546,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
.ccsc = 1,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
@@ -569,6 +555,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 432000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
@@ -577,6 +564,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 297000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
@@ -585,6 +573,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
.ccsc = 1,
.mod_rate = 297000000,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
@@ -593,6 +582,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ccsc = 0,
.mod_rate = 150000000,
};
@@ -601,6 +591,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 297000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
@@ -609,6 +600,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.ccsc = 1,
.mod_rate = 297000000,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
@@ -618,6 +610,7 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.is_de3 = true,
.mod_rate = 600000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 913d14ce68b0..80e084caa084 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -159,6 +159,7 @@ struct de2_fmt_info {
* @mod_rate: module clock rate that needs to be set in order to have
* a functional block.
* @is_de3: true, if this is next gen display engine 3.0, false otherwise.
+ * @scaline_yuv: size of a scanline for VI scaler for YUV formats.
*/
struct sun8i_mixer_cfg {
int vi_num;
@@ -167,6 +168,7 @@ struct sun8i_mixer_cfg {
int ccsc;
unsigned long mod_rate;
unsigned int is_de3 : 1;
+ unsigned int scanline_yuv;
};
struct sun8i_mixer {
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index fc36e0c10a37..3267d0f9b9b2 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
err_unregister_gates:
for (i = 0; i < CLK_NUM; i++)
- if (clk_data->hws[i])
+ if (!IS_ERR_OR_NULL(clk_data->hws[i]))
clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
err_assert_reset:
@@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
of_clk_del_provider(dev->of_node);
for (i = 0; i < CLK_NUM; i++)
- clk_hw_unregister_gate(clk_data->hws[i]);
+ if (clk_data->hws[i])
+ clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
reset_control_assert(tcon_top->rst);
@@ -268,12 +269,12 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
return 0;
}
-const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
+static const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
.has_tcon_tv1 = true,
.has_dsi = true,
};
-const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
+static const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
/* Nothing special */
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 8a0616238467..bb8e026d6405 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -80,6 +80,8 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
+ u32 hn = 0, hm = 0;
+ u32 vn = 0, vm = 0;
bool subsampled;
DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
@@ -137,12 +139,41 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
subsampled = format->hsub > 1 || format->vsub > 1;
if (insize != outsize || subsampled || hphase || vphase) {
- u32 hscale, vscale;
+ unsigned int scanline, required;
+ struct drm_display_mode *mode;
+ u32 hscale, vscale, fps;
+ u64 ability;
DRM_DEBUG_DRIVER("HW scaling is enabled\n");
- hscale = state->src_w / state->crtc_w;
- vscale = state->src_h / state->crtc_h;
+ mode = &plane->state->crtc->state->mode;
+ fps = (mode->clock * 1000) / (mode->vtotal * mode->htotal);
+ ability = clk_get_rate(mixer->mod_clk);
+ /* BSP algorithm assumes 80% efficiency of VI scaler unit */
+ ability *= 80;
+ do_div(ability, mode->vdisplay * fps * max(src_w, dst_w));
+
+ required = src_h * 100 / dst_h;
+
+ if (ability < required) {
+ DRM_DEBUG_DRIVER("Using vertical coarse scaling\n");
+ vm = src_h;
+ vn = (u32)ability * dst_h / 100;
+ src_h = vn;
+ }
+
+ /* it seems that every RGB scaler has buffer for 2048 pixels */
+ scanline = subsampled ? mixer->cfg->scanline_yuv : 2048;
+
+ if (src_w > scanline) {
+ DRM_DEBUG_DRIVER("Using horizontal coarse scaling\n");
+ hm = src_w;
+ hn = scanline;
+ src_w = hn;
+ }
+
+ hscale = (src_w << 16) / dst_w;
+ vscale = (src_h << 16) / dst_h;
sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w,
dst_h, hscale, vscale, hphase, vphase,
@@ -153,6 +184,23 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
sun8i_vi_scaler_enable(mixer, channel, false);
}
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_HDS_Y(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(hn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(hm));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_HDS_UV(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(hn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(hm));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_VDS_Y(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(vn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(vm));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_VDS_UV(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(vn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(vm));
+
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
index 8a5e6d01c85d..a223a4839f45 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -24,6 +24,14 @@
((base) + 0x30 * (layer) + 0x18 + 4 * (plane))
#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \
((base) + 0xe8)
+#define SUN8I_MIXER_CHAN_VI_HDS_Y(base) \
+ ((base) + 0xf0)
+#define SUN8I_MIXER_CHAN_VI_HDS_UV(base) \
+ ((base) + 0xf4)
+#define SUN8I_MIXER_CHAN_VI_VDS_Y(base) \
+ ((base) + 0xf8)
+#define SUN8I_MIXER_CHAN_VI_VDS_UV(base) \
+ ((base) + 0xfc)
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0)
/* RGB mode should be set for RGB formats and cleared for YCbCr */
@@ -33,6 +41,9 @@
#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24)
+#define SUN8I_MIXER_CHAN_VI_DS_N(x) ((x) << 16)
+#define SUN8I_MIXER_CHAN_VI_DS_M(x) ((x) << 0)
+
struct sun8i_mixer;
struct sun8i_vi_layer {
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 0a4ce05e00ab..1dd83a757dba 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -255,11 +255,9 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
helper->fb = fb;
helper->fbdev = info;
- info->par = helper;
info->fbops = &tegra_fb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
+ drm_fb_helper_fill_info(info, helper, sizes);
offset = info->var.xoffset * bytes_per_pixel +
info->var.yoffset * fb->pitches[0];
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 4f80100ff5f3..4cce11fd8836 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -204,7 +204,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{
if (bo->pages) {
dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt);
kfree(bo->sgt);
@@ -230,7 +230,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
}
err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (err == 0) {
err = -EFAULT;
goto free_sgt;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 47c55974756d..d23c4bfde790 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
hdmi->dvi = !tegra_output_is_hdmi(output);
if (!hdmi->dvi) {
- err = tegra_hdmi_setup_audio(hdmi);
- if (err < 0)
- hdmi->dvi = true;
+ /*
+ * Make sure that the audio format has been configured before
+ * enabling audio, otherwise we may try to divide by zero.
+ */
+ if (hdmi->format.sample_rate > 0) {
+ err = tegra_hdmi_setup_audio(hdmi);
+ if (err < 0)
+ hdmi->dvi = true;
+ }
}
if (hdmi->config->has_hda)
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 40057106f5f3..5be5a0817dfe 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2871,6 +2871,13 @@ static int tegra_sor_init(struct host1x_client *client)
* kernel is possible.
*/
if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
+ err);
+ return err;
+ }
+
err = reset_control_assert(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to assert SOR reset: %d\n",
@@ -2894,6 +2901,8 @@ static int tegra_sor_init(struct host1x_client *client)
err);
return err;
}
+
+ reset_control_release(sor->rst);
}
err = clk_prepare_enable(sor->clk_safe);
@@ -3331,7 +3340,7 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ sor->rst = devm_reset_control_get_exclusive_released(&pdev->dev, "sor");
if (IS_ERR(sor->rst)) {
err = PTR_ERR(sor->rst);
@@ -3519,6 +3528,8 @@ static int tegra_sor_suspend(struct device *dev)
dev_err(dev, "failed to assert reset: %d\n", err);
return err;
}
+
+ reset_control_release(sor->rst);
}
usleep_range(1000, 2000);
@@ -3542,9 +3553,17 @@ static int tegra_sor_resume(struct device *dev)
usleep_range(1000, 2000);
if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ clk_disable_unprepare(sor->clk);
+ return err;
+ }
+
err = reset_control_deassert(sor->rst);
if (err < 0) {
dev_err(dev, "failed to deassert reset: %d\n", err);
+ reset_control_release(sor->rst);
clk_disable_unprepare(sor->clk);
return err;
}
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 52598049c096..cb7df2086aee 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -8,7 +8,6 @@ config DRM_TILCDC
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
help
Choose this option if you have an TI SoC with LCDC display
controller, for example AM33xx in beagle-bone, DA8xx, or
diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile
index fb221e6f8885..6f8f764560e0 100644
--- a/drivers/gpu/drm/tinydrm/core/Makefile
+++ b/drivers/gpu/drm/tinydrm/core/Makefile
@@ -1,3 +1,3 @@
-tinydrm-y := tinydrm-core.o tinydrm-pipe.o tinydrm-helpers.o
+tinydrm-y := tinydrm-pipe.o tinydrm-helpers.o
obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
deleted file mode 100644
index 554abd5d3b53..000000000000
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (C) 2016 Noralf Trønnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_print.h>
-#include <drm/tinydrm/tinydrm.h>
-#include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/module.h>
-
-/**
- * DOC: overview
- *
- * This library provides driver helpers for very simple display hardware.
- *
- * It is based on &drm_simple_display_pipe coupled with a &drm_connector which
- * has only one fixed &drm_display_mode. The framebuffers are backed by the
- * cma helper and have support for framebuffer flushing (dirty).
- * fbdev support is also included.
- *
- */
-
-/**
- * DOC: core
- *
- * The driver allocates &tinydrm_device, initializes it using
- * devm_tinydrm_init(), sets up the pipeline using tinydrm_display_pipe_init()
- * and registers the DRM device using devm_tinydrm_register().
- */
-
-static const struct drm_mode_config_funcs tinydrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
-static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
- struct drm_driver *driver)
-{
- struct drm_device *drm;
-
- /*
- * We don't embed drm_device, because that prevent us from using
- * devm_kzalloc() to allocate tinydrm_device in the driver since
- * drm_dev_put() frees the structure. The devm_ functions provide
- * for easy error handling.
- */
- drm = drm_dev_alloc(driver, parent);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- tdev->drm = drm;
- drm->dev_private = tdev;
- drm_mode_config_init(drm);
- drm->mode_config.funcs = &tinydrm_mode_config_funcs;
- drm->mode_config.allow_fb_modifiers = true;
-
- return 0;
-}
-
-static void tinydrm_fini(struct tinydrm_device *tdev)
-{
- drm_mode_config_cleanup(tdev->drm);
- tdev->drm->dev_private = NULL;
- drm_dev_put(tdev->drm);
-}
-
-static void devm_tinydrm_release(void *data)
-{
- tinydrm_fini(data);
-}
-
-/**
- * devm_tinydrm_init - Initialize tinydrm device
- * @parent: Parent device object
- * @tdev: tinydrm device
- * @driver: DRM driver
- *
- * This function initializes @tdev, the underlying DRM device and it's
- * mode_config. Resources will be automatically freed on driver detach (devres)
- * using drm_mode_config_cleanup() and drm_dev_put().
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
- struct drm_driver *driver)
-{
- int ret;
-
- ret = tinydrm_init(parent, tdev, driver);
- if (ret)
- return ret;
-
- ret = devm_add_action(parent, devm_tinydrm_release, tdev);
- if (ret)
- tinydrm_fini(tdev);
-
- return ret;
-}
-EXPORT_SYMBOL(devm_tinydrm_init);
-
-static int tinydrm_register(struct tinydrm_device *tdev)
-{
- struct drm_device *drm = tdev->drm;
- int ret;
-
- ret = drm_dev_register(tdev->drm, 0);
- if (ret)
- return ret;
-
- ret = drm_fbdev_generic_setup(drm, 0);
- if (ret)
- DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
-
- return 0;
-}
-
-static void tinydrm_unregister(struct tinydrm_device *tdev)
-{
- drm_atomic_helper_shutdown(tdev->drm);
- drm_dev_unregister(tdev->drm);
-}
-
-static void devm_tinydrm_register_release(void *data)
-{
- tinydrm_unregister(data);
-}
-
-/**
- * devm_tinydrm_register - Register tinydrm device
- * @tdev: tinydrm device
- *
- * This function registers the underlying DRM device and fbdev.
- * These resources will be automatically unregistered on driver detach (devres)
- * and the display pipeline will be disabled.
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int devm_tinydrm_register(struct tinydrm_device *tdev)
-{
- struct device *dev = tdev->drm->dev;
- int ret;
-
- ret = tinydrm_register(tdev);
- if (ret)
- return ret;
-
- ret = devm_add_action(dev, devm_tinydrm_register_release, tdev);
- if (ret)
- tinydrm_unregister(tdev);
-
- return ret;
-}
-EXPORT_SYMBOL(devm_tinydrm_register);
-
-/**
- * tinydrm_shutdown - Shutdown tinydrm
- * @tdev: tinydrm device
- *
- * This function makes sure that the display pipeline is disabled.
- * Used by drivers in their shutdown callback to turn off the display
- * on machine shutdown and reboot.
- */
-void tinydrm_shutdown(struct tinydrm_device *tdev)
-{
- drm_atomic_helper_shutdown(tdev->drm);
-}
-EXPORT_SYMBOL(tinydrm_shutdown);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index 2737b6fdadc8..6d540d93758f 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -26,164 +26,6 @@ static unsigned int spi_max;
module_param(spi_max, uint, 0400);
MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size");
-/**
- * tinydrm_memcpy - Copy clip buffer
- * @dst: Destination buffer
- * @vaddr: Source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- */
-void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
-{
- unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
- unsigned int pitch = fb->pitches[0];
- void *src = vaddr + (clip->y1 * pitch) + (clip->x1 * cpp);
- size_t len = (clip->x2 - clip->x1) * cpp;
- unsigned int y;
-
- for (y = clip->y1; y < clip->y2; y++) {
- memcpy(dst, src, len);
- src += pitch;
- dst += len;
- }
-}
-EXPORT_SYMBOL(tinydrm_memcpy);
-
-/**
- * tinydrm_swab16 - Swap bytes into clip buffer
- * @dst: RGB565 destination buffer
- * @vaddr: RGB565 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- */
-void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
-{
- size_t len = (clip->x2 - clip->x1) * sizeof(u16);
- unsigned int x, y;
- u16 *src, *buf;
-
- /*
- * The cma memory is write-combined so reads are uncached.
- * Speed up by fetching one line at a time.
- */
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return;
-
- for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++)
- *dst++ = swab16(*src++);
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(tinydrm_swab16);
-
-/**
- * tinydrm_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
- * @dst: RGB565 destination buffer
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- * @swap: Swap bytes
- *
- * Drivers can use this function for RGB565 devices that don't natively
- * support XRGB8888.
- */
-void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap)
-{
- size_t len = (clip->x2 - clip->x1) * sizeof(u32);
- unsigned int x, y;
- u32 *src, *buf;
- u16 val16;
-
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return;
-
- for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++) {
- val16 = ((*src & 0x00F80000) >> 8) |
- ((*src & 0x0000FC00) >> 5) |
- ((*src & 0x000000F8) >> 3);
- src++;
- if (swap)
- *dst++ = swab16(val16);
- else
- *dst++ = val16;
- }
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
-
-/**
- * tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
- * @dst: 8-bit grayscale destination buffer
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- *
- * Drm doesn't have native monochrome or grayscale support.
- * Such drivers can announce the commonly supported XR24 format to userspace
- * and use this function to convert to the native format.
- *
- * Monochrome drivers will use the most significant bit,
- * where 1 means foreground color and 0 background color.
- *
- * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
- */
-void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
-{
- unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
- unsigned int x, y;
- void *buf;
- u32 *src;
-
- if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
- return;
- /*
- * The cma memory is write-combined so reads are uncached.
- * Speed up by fetching one line at a time.
- */
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return;
-
- for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++) {
- u8 r = (*src & 0x00ff0000) >> 16;
- u8 g = (*src & 0x0000ff00) >> 8;
- u8 b = *src & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dst++ = (3 * r + 6 * g + b) / 10;
- src++;
- }
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8);
-
#if IS_ENABLED(CONFIG_SPI)
/**
@@ -365,3 +207,5 @@ int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
EXPORT_SYMBOL(tinydrm_spi_transfer);
#endif /* CONFIG_SPI */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index bb5b1c1e21ba..bb8a7ed8ddf6 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -13,7 +13,7 @@
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
-#include <drm/tinydrm/tinydrm.h>
+#include <drm/drm_simple_kms_helper.h>
struct tinydrm_connector {
struct drm_connector base;
@@ -129,7 +129,8 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode,
/**
* tinydrm_display_pipe_init - Initialize display pipe
- * @tdev: tinydrm device
+ * @drm: DRM device
+ * @pipe: Display pipe
* @funcs: Display pipe functions
* @connector_type: Connector type
* @formats: Array of supported formats (DRM_FORMAT\_\*)
@@ -143,16 +144,15 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode,
* Returns:
* Zero on success, negative error code on failure.
*/
-int
-tinydrm_display_pipe_init(struct tinydrm_device *tdev,
- const struct drm_simple_display_pipe_funcs *funcs,
- int connector_type,
- const uint32_t *formats,
- unsigned int format_count,
- const struct drm_display_mode *mode,
- unsigned int rotation)
+int tinydrm_display_pipe_init(struct drm_device *drm,
+ struct drm_simple_display_pipe *pipe,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ int connector_type,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const struct drm_display_mode *mode,
+ unsigned int rotation)
{
- struct drm_device *drm = tdev->drm;
struct drm_display_mode mode_copy;
struct drm_connector *connector;
int ret;
@@ -177,7 +177,7 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
if (IS_ERR(connector))
return PTR_ERR(connector);
- return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
+ return drm_simple_display_pipe_init(drm, pipe, funcs, formats,
format_count, modifiers, connector);
}
EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c
index 8bbd0beafc6a..5773d0fb6ca1 100644
--- a/drivers/gpu/drm/tinydrm/hx8357d.c
+++ b/drivers/gpu/drm/tinydrm/hx8357d.c
@@ -16,7 +16,9 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -46,16 +48,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
u8 addr_mode;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(mipi);
if (ret < 0)
- return;
+ goto out_exit;
if (ret == 1)
goto out_enable;
@@ -171,6 +175,8 @@ out_enable:
}
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
@@ -181,7 +187,7 @@ static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
};
static const struct drm_display_mode yx350hv15_mode = {
- TINYDRM_MODE(320, 480, 60, 75),
+ DRM_SIMPLE_MODE(320, 480, 60, 75),
};
DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
@@ -189,6 +195,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
static struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
@@ -213,15 +220,25 @@ MODULE_DEVICE_TABLE(spi, hx8357d_id);
static int hx8357d_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
@@ -238,21 +255,36 @@ static int hx8357d_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mipi_dbi_init(&spi->dev, mipi, &hx8357d_pipe_funcs,
- &hx8357d_driver, &yx350hv15_mode, rotation);
+ ret = mipi_dbi_init(mipi, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void hx8357d_shutdown(struct spi_device *spi)
+static int hx8357d_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
- tinydrm_shutdown(&mipi->tinydrm);
+ return 0;
+}
+
+static void hx8357d_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver hx8357d_spi_driver = {
@@ -262,6 +294,7 @@ static struct spi_driver hx8357d_spi_driver = {
},
.id_table = hx8357d_id,
.probe = hx8357d_probe,
+ .remove = hx8357d_remove,
.shutdown = hx8357d_shutdown,
};
module_spi_driver(hx8357d_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 43a3b68d90a2..4b1a587c0134 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,9 +20,11 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -81,20 +83,22 @@ static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data)
static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
bool swap = mipi->swap_bytes;
u16 x_start, y_start;
u16 x1, x2, y1, y2;
- int ret = 0;
+ int idx, ret = 0;
bool full;
void *tr;
if (!mipi->enabled)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -157,6 +161,8 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+
+ drm_dev_exit(idx);
}
static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -181,19 +187,21 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
- struct device *dev = tdev->drm->dev;
+ struct device *dev = pipe->crtc.dev->dev;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
.y1 = 0,
.y2 = fb->height,
};
- int ret;
+ int ret, idx;
u8 am_id;
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
DRM_DEBUG_KMS("\n");
mipi_dbi_hw_reset(mipi);
@@ -207,7 +215,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000);
if (ret) {
DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
- return;
+ goto out_exit;
}
ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000);
ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000);
@@ -280,15 +288,23 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
mipi->enabled = true;
ili9225_fb_dirty(fb, &rect);
+out_exit:
+ drm_dev_exit(idx);
}
static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
DRM_DEBUG_KMS("\n");
+ /*
+ * This callback is not protected by drm_dev_enter/exit since we want to
+ * turn off the display on regular driver unload. It's highly unlikely
+ * that the underlying SPI controller is gone should this be called after
+ * unplug.
+ */
+
if (!mipi->enabled)
return;
@@ -301,7 +317,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
mipi->enabled = false;
}
-static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
+static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
size_t num)
{
struct spi_device *spi = mipi->spi;
@@ -311,11 +327,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
gpiod_set_value_cansleep(mipi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
if (ret || !num)
return ret;
- if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
+ if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
bpw = 16;
gpiod_set_value_cansleep(mipi->dc, 1);
@@ -332,7 +348,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
};
static const struct drm_display_mode ili9225_mode = {
- TINYDRM_MODE(176, 220, 35, 44),
+ DRM_SIMPLE_MODE(176, 220, 35, 44),
};
DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
@@ -341,6 +357,7 @@ static struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &ili9225_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
@@ -364,15 +381,25 @@ MODULE_DEVICE_TABLE(spi, ili9225_id);
static int ili9225_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *rs;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -394,21 +421,36 @@ static int ili9225_probe(struct spi_device *spi)
/* override the command function set in mipi_dbi_spi_init() */
mipi->command = ili9225_dbi_command;
- ret = mipi_dbi_init(&spi->dev, mipi, &ili9225_pipe_funcs,
- &ili9225_driver, &ili9225_mode, rotation);
+ ret = mipi_dbi_init(mipi, &ili9225_pipe_funcs, &ili9225_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
- return devm_tinydrm_register(&mipi->tinydrm);
+ return 0;
}
-static void ili9225_shutdown(struct spi_device *spi)
+static int ili9225_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
- tinydrm_shutdown(&mipi->tinydrm);
+static void ili9225_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9225_spi_driver = {
@@ -419,6 +461,7 @@ static struct spi_driver ili9225_spi_driver = {
},
.id_table = ili9225_id,
.probe = ili9225_probe,
+ .remove = ili9225_remove,
.shutdown = ili9225_shutdown,
};
module_spi_driver(ili9225_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
index 713bb2dd7e04..4ade9e4b924f 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -15,7 +15,9 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -52,16 +54,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
u8 addr_mode;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(mipi);
if (ret < 0)
- return;
+ goto out_exit;
if (ret == 1)
goto out_enable;
@@ -127,6 +131,8 @@ out_enable:
addr_mode |= ILI9341_MADCTL_BGR;
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
@@ -137,7 +143,7 @@ static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
};
static const struct drm_display_mode yx240qv29_mode = {
- TINYDRM_MODE(240, 320, 37, 49),
+ DRM_SIMPLE_MODE(240, 320, 37, 49),
};
DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
@@ -145,6 +151,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &ili9341_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
@@ -169,15 +176,25 @@ MODULE_DEVICE_TABLE(spi, ili9341_id);
static int ili9341_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -200,21 +217,36 @@ static int ili9341_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs,
- &ili9341_driver, &yx240qv29_mode, rotation);
+ ret = mipi_dbi_init(mipi, &ili9341_pipe_funcs, &yx240qv29_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void ili9341_shutdown(struct spi_device *spi)
+static int ili9341_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
- tinydrm_shutdown(&mipi->tinydrm);
+ return 0;
+}
+
+static void ili9341_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9341_spi_driver = {
@@ -224,6 +256,7 @@ static struct spi_driver ili9341_spi_driver = {
},
.id_table = ili9341_id,
.probe = ili9341_probe,
+ .remove = ili9341_remove,
.shutdown = ili9341_shutdown,
};
module_spi_driver(ili9341_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 82a92ec9ae3c..8e169846fbd8 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -17,7 +17,9 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -54,16 +56,18 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
u8 addr_mode;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(mipi);
if (ret < 0)
- return;
+ goto out_exit;
if (ret == 1)
goto out_enable;
@@ -135,6 +139,8 @@ out_enable:
addr_mode |= ILI9341_MADCTL_BGR;
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
@@ -145,7 +151,7 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
};
static const struct drm_display_mode mi0283qt_mode = {
- TINYDRM_MODE(320, 240, 58, 43),
+ DRM_SIMPLE_MODE(320, 240, 58, 43),
};
DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
@@ -154,6 +160,7 @@ static struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
@@ -178,15 +185,25 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id);
static int mi0283qt_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -213,35 +230,46 @@ static int mi0283qt_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mipi_dbi_init(&spi->dev, mipi, &mi0283qt_pipe_funcs,
- &mi0283qt_driver, &mi0283qt_mode, rotation);
+ ret = mipi_dbi_init(mipi, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ spi_set_drvdata(spi, drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void mi0283qt_shutdown(struct spi_device *spi)
+static int mi0283qt_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
- tinydrm_shutdown(&mipi->tinydrm);
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
}
-static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
+static void mi0283qt_shutdown(struct spi_device *spi)
{
- struct mipi_dbi *mipi = dev_get_drvdata(dev);
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+}
- return drm_mode_config_helper_suspend(mipi->tinydrm.drm);
+static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
+{
+ return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
}
static int __maybe_unused mi0283qt_pm_resume(struct device *dev)
{
- struct mipi_dbi *mipi = dev_get_drvdata(dev);
-
- drm_mode_config_helper_resume(mipi->tinydrm.drm);
+ drm_mode_config_helper_resume(dev_get_drvdata(dev));
return 0;
}
@@ -259,6 +287,7 @@ static struct spi_driver mi0283qt_spi_driver = {
},
.id_table = mi0283qt_id,
.probe = mi0283qt_probe,
+ .remove = mi0283qt_remove,
.shutdown = mi0283qt_shutdown,
};
module_spi_driver(mi0283qt_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 918f77c7de34..85761b4abb83 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -21,6 +21,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
@@ -153,16 +154,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
*/
int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
{
+ u8 *cmdbuf;
int ret;
+ /* SPI requires dma-safe buffers */
+ cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL);
+ if (!cmdbuf)
+ return -ENOMEM;
+
mutex_lock(&mipi->cmdlock);
- ret = mipi->command(mipi, cmd, data, len);
+ ret = mipi->command(mipi, cmdbuf, data, len);
mutex_unlock(&mipi->cmdlock);
+ kfree(cmdbuf);
+
return ret;
}
EXPORT_SYMBOL(mipi_dbi_command_buf);
+/* This should only be used by mipi_dbi_command() */
+int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
+
/**
* mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
* @dst: The destination buffer
@@ -192,12 +219,12 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- tinydrm_swab16(dst, src, fb, clip);
+ drm_fb_swab16(dst, src, fb, clip);
else
- tinydrm_memcpy(dst, src, fb, clip);
+ drm_fb_memcpy(dst, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- tinydrm_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
break;
default:
dev_err_once(fb->dev->dev, "Format is not supported: %s\n",
@@ -216,18 +243,20 @@ EXPORT_SYMBOL(mipi_dbi_buf_copy);
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
bool swap = mipi->swap_bytes;
- int ret = 0;
+ int idx, ret = 0;
bool full;
void *tr;
if (!mipi->enabled)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -254,6 +283,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+
+ drm_dev_exit(idx);
}
/**
@@ -308,19 +339,29 @@ void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
.y1 = 0,
.y2 = fb->height,
};
+ int idx;
+
+ if (!drm_dev_enter(&mipi->drm, &idx))
+ return;
mipi->enabled = true;
mipi_dbi_fb_dirty(fb, &rect);
backlight_enable(mipi->backlight);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_enable_flush);
static void mipi_dbi_blank(struct mipi_dbi *mipi)
{
- struct drm_device *drm = mipi->tinydrm.drm;
+ struct drm_device *drm = &mipi->drm;
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
size_t len = width * height * 2;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
memset(mipi->tx_buf, 0, len);
@@ -330,6 +371,8 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
(height >> 8) & 0xFF, (height - 1) & 0xFF);
mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)mipi->tx_buf, len);
+
+ drm_dev_exit(idx);
}
/**
@@ -342,8 +385,10 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
*/
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+
+ if (!mipi->enabled)
+ return;
DRM_DEBUG_KMS("\n");
@@ -359,6 +404,12 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
+static const struct drm_mode_config_funcs mipi_dbi_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static const uint32_t mipi_dbi_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -366,31 +417,27 @@ static const uint32_t mipi_dbi_formats[] = {
/**
* mipi_dbi_init - MIPI DBI initialization
- * @dev: Parent device
* @mipi: &mipi_dbi structure to initialize
- * @pipe_funcs: Display pipe functions
- * @driver: DRM driver
+ * @funcs: Display pipe functions
* @mode: Display mode
* @rotation: Initial rotation in degrees Counter Clock Wise
*
- * This function initializes a &mipi_dbi structure and it's underlying
- * @tinydrm_device. It also sets up the display pipeline.
+ * This function sets up a &drm_simple_display_pipe with a &drm_connector that
+ * has one fixed &drm_display_mode which is rotated according to @rotation.
+ * This mode is used to set the mode config min/max width/height properties.
+ * Additionally &mipi_dbi.tx_buf is allocated.
*
* Supported formats: Native RGB565 and emulated XRGB8888.
*
- * Objects created by this function will be automatically freed on driver
- * detach (devres).
- *
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *pipe_funcs,
- struct drm_driver *driver,
+int mipi_dbi_init(struct mipi_dbi *mipi,
+ const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation)
{
size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
- struct tinydrm_device *tdev = &mipi->tinydrm;
+ struct drm_device *drm = &mipi->drm;
int ret;
if (!mipi->command)
@@ -398,16 +445,12 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
mutex_init(&mipi->cmdlock);
- mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ mipi->tx_buf = devm_kmalloc(drm->dev, bufsize, GFP_KERNEL);
if (!mipi->tx_buf)
return -ENOMEM;
- ret = devm_tinydrm_init(dev, tdev, driver);
- if (ret)
- return ret;
-
/* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
- ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
+ ret = tinydrm_display_pipe_init(drm, &mipi->pipe, funcs,
DRM_MODE_CONNECTOR_VIRTUAL,
mipi_dbi_formats,
ARRAY_SIZE(mipi_dbi_formats), mode,
@@ -415,21 +458,40 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
if (ret)
return ret;
- drm_plane_enable_fb_damage_clips(&tdev->pipe.plane);
+ drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
- tdev->drm->mode_config.preferred_depth = 16;
+ drm->mode_config.funcs = &mipi_dbi_mode_config_funcs;
+ drm->mode_config.preferred_depth = 16;
mipi->rotation = rotation;
- drm_mode_config_reset(tdev->drm);
-
DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- tdev->drm->mode_config.preferred_depth, rotation);
+ drm->mode_config.preferred_depth, rotation);
return 0;
}
EXPORT_SYMBOL(mipi_dbi_init);
/**
+ * mipi_dbi_release - DRM driver release helper
+ * @drm: DRM device
+ *
+ * This function finalizes and frees &mipi_dbi.
+ *
+ * Drivers can use this as their &drm_driver->release callback.
+ */
+void mipi_dbi_release(struct drm_device *drm)
+{
+ struct mipi_dbi *dbi = drm_to_mipi_dbi(drm);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_mode_config_cleanup(drm);
+ drm_dev_fini(drm);
+ kfree(dbi);
+}
+EXPORT_SYMBOL(mipi_dbi_release);
+
+/**
* mipi_dbi_hw_reset - Hardware reset of controller
* @mipi: MIPI DBI structure
*
@@ -481,7 +543,7 @@ EXPORT_SYMBOL(mipi_dbi_display_is_on);
static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
{
- struct device *dev = mipi->tinydrm.drm->dev;
+ struct device *dev = mipi->drm.dev;
int ret;
if (mipi->regulator) {
@@ -774,18 +836,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
return 0;
}
-static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
u8 *parameters, size_t num)
{
- unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
+ unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
int ret;
- if (mipi_dbi_command_is_read(mipi, cmd))
+ if (mipi_dbi_command_is_read(mipi, *cmd))
return -ENOTSUPP;
- MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num);
+ MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
- ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8);
+ ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
if (ret || !num)
return ret;
@@ -794,7 +856,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
/* MIPI DBI Type C Option 3 */
-static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
u8 *data, size_t len)
{
struct spi_device *spi = mipi->spi;
@@ -803,7 +865,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
struct spi_transfer tr[2] = {
{
.speed_hz = speed_hz,
- .tx_buf = &cmd,
+ .tx_buf = cmd,
.len = 1,
}, {
.speed_hz = speed_hz,
@@ -821,8 +883,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
* Support non-standard 24-bit and 32-bit Nokia read commands which
* start with a dummy clock, so we need to read an extra byte.
*/
- if (cmd == MIPI_DCS_GET_DISPLAY_ID ||
- cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
+ if (*cmd == MIPI_DCS_GET_DISPLAY_ID ||
+ *cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
if (!(len == 3 || len == 4))
return -EINVAL;
@@ -852,7 +914,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
}
- MIPI_DBI_DEBUG_COMMAND(cmd, data, len);
+ MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
err_free:
kfree(buf);
@@ -860,7 +922,7 @@ err_free:
return ret;
}
-static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
u8 *par, size_t num)
{
struct spi_device *spi = mipi->spi;
@@ -868,18 +930,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
u32 speed_hz;
int ret;
- if (mipi_dbi_command_is_read(mipi, cmd))
+ if (mipi_dbi_command_is_read(mipi, *cmd))
return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
- MIPI_DBI_DEBUG_COMMAND(cmd, par, num);
+ MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
gpiod_set_value_cansleep(mipi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
if (ret || !num)
return ret;
- if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+ if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
bpw = 16;
gpiod_set_value_cansleep(mipi->dc, 1);
@@ -926,7 +988,7 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
* Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in
* drm_gem_cma_create(). The dma_addr returned will be a physical
- * adddress which might be different from the bus address, but this is
+ * address which might be different from the bus address, but this is
* not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core
* re-maps it on the SPI master device using the DMA streaming API
@@ -976,11 +1038,16 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
u8 val, cmd = 0, parameters[64];
char *buf, *pos, *token;
unsigned int i;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(&mipi->drm, &idx))
+ return -ENODEV;
buf = memdup_user_nul(ubuf, count);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto err_exit;
+ }
/* strip trailing whitespace */
for (i = count - 1; i > 0; i--)
@@ -1016,6 +1083,8 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
err_free:
kfree(buf);
+err_exit:
+ drm_dev_exit(idx);
return ret < 0 ? ret : count;
}
@@ -1024,8 +1093,11 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
{
struct mipi_dbi *mipi = m->private;
u8 cmd, val[4];
+ int ret, idx;
size_t len;
- int ret;
+
+ if (!drm_dev_enter(&mipi->drm, &idx))
+ return -ENODEV;
for (cmd = 0; cmd < 255; cmd++) {
if (!mipi_dbi_command_is_read(mipi, cmd))
@@ -1056,6 +1128,8 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
seq_printf(m, "%*phN\n", (int)len, val);
}
+ drm_dev_exit(idx);
+
return 0;
}
@@ -1088,8 +1162,7 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
*/
int mipi_dbi_debugfs_init(struct drm_minor *minor)
{
- struct tinydrm_device *tdev = minor->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(minor->dev);
umode_t mode = S_IFREG | S_IWUSR;
if (mipi->read_commands)
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index b037c6540cf3..370629e2de94 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,14 +26,17 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/tinydrm/tinydrm.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/tinydrm/tinydrm-helpers.h>
#define REPAPER_RID_G2_COG_ID 0x12
@@ -59,7 +62,8 @@ enum repaper_epd_border_byte {
};
struct repaper_epd {
- struct tinydrm_device tinydrm;
+ struct drm_device drm;
+ struct drm_simple_display_pipe pipe;
struct spi_device *spi;
struct gpio_desc *panel_on;
@@ -88,10 +92,9 @@ struct repaper_epd {
bool partial;
};
-static inline struct repaper_epd *
-epd_from_tinydrm(struct tinydrm_device *tdev)
+static inline struct repaper_epd *drm_to_epd(struct drm_device *drm)
{
- return container_of(tdev, struct repaper_epd, tinydrm);
+ return container_of(drm, struct repaper_epd, drm);
}
static int repaper_spi_transfer(struct spi_device *spi, u8 header,
@@ -529,11 +532,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct repaper_epd *epd = drm_to_epd(fb->dev);
struct drm_rect clip;
+ int idx, ret = 0;
u8 *buf = NULL;
- int ret = 0;
+
+ if (!epd->enabled)
+ return 0;
+
+ if (!drm_dev_enter(fb->dev, &idx))
+ return -ENODEV;
/* repaper can't do partial updates */
clip.x1 = 0;
@@ -541,17 +549,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
clip.y1 = 0;
clip.y2 = fb->height;
- if (!epd->enabled)
- return 0;
-
repaper_get_temperature(epd);
DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
epd->factored_stage_time);
buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_exit;
+ }
if (import_attach) {
ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
@@ -560,7 +567,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
goto out_free;
}
- tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
+ drm_fb_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
if (import_attach) {
ret = dma_buf_end_cpu_access(import_attach->dmabuf,
@@ -620,6 +627,8 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
out_free:
kfree(buf);
+out_exit:
+ drm_dev_exit(idx);
return ret;
}
@@ -645,12 +654,14 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
struct spi_device *spi = epd->spi;
struct device *dev = &spi->dev;
bool dc_ok = false;
- int i, ret;
+ int i, ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_DRIVER("\n");
@@ -689,7 +700,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
if (!i) {
DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n");
power_off(epd);
- return;
+ goto out_exit;
}
repaper_read_id(spi);
@@ -700,7 +711,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
else
dev_err(dev, "wrong COG ID 0x%02x\n", ret);
power_off(epd);
- return;
+ goto out_exit;
}
/* Disable OE */
@@ -713,7 +724,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
else
DRM_DEV_ERROR(dev, "panel is reported broken\n");
power_off(epd);
- return;
+ goto out_exit;
}
/* Power saving mode */
@@ -753,7 +764,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
power_off(epd);
- return;
+ goto out_exit;
}
if (ret & 0x40) {
@@ -765,7 +776,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
if (!dc_ok) {
DRM_DEV_ERROR(dev, "dc/dc failed\n");
power_off(epd);
- return;
+ goto out_exit;
}
/*
@@ -776,15 +787,26 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
epd->enabled = true;
epd->partial = false;
+out_exit:
+ drm_dev_exit(idx);
}
static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
struct spi_device *spi = epd->spi;
unsigned int line;
+ /*
+ * This callback is not protected by drm_dev_enter/exit since we want to
+ * turn off the display on regular driver unload. It's highly unlikely
+ * that the underlying SPI controller is gone should this be called after
+ * unplug.
+ */
+
+ if (!epd->enabled)
+ return;
+
DRM_DEBUG_DRIVER("\n");
epd->enabled = false;
@@ -855,33 +877,50 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
+static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void repaper_release(struct drm_device *drm)
+{
+ struct repaper_epd *epd = drm_to_epd(drm);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_mode_config_cleanup(drm);
+ drm_dev_fini(drm);
+ kfree(epd);
+}
+
static const uint32_t repaper_formats[] = {
DRM_FORMAT_XRGB8888,
};
static const struct drm_display_mode repaper_e1144cs021_mode = {
- TINYDRM_MODE(128, 96, 29, 22),
+ DRM_SIMPLE_MODE(128, 96, 29, 22),
};
static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
0x00, 0x0f, 0xff, 0x00 };
static const struct drm_display_mode repaper_e1190cs021_mode = {
- TINYDRM_MODE(144, 128, 36, 32),
+ DRM_SIMPLE_MODE(144, 128, 36, 32),
};
static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03,
0xfc, 0x00, 0x00, 0xff };
static const struct drm_display_mode repaper_e2200cs021_mode = {
- TINYDRM_MODE(200, 96, 46, 22),
+ DRM_SIMPLE_MODE(200, 96, 46, 22),
};
static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
0x01, 0xff, 0xe0, 0x00 };
static const struct drm_display_mode repaper_e2271cs021_mode = {
- TINYDRM_MODE(264, 176, 57, 38),
+ DRM_SIMPLE_MODE(264, 176, 57, 38),
};
static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
@@ -893,6 +932,7 @@ static struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &repaper_fops,
+ .release = repaper_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
@@ -925,11 +965,11 @@ static int repaper_probe(struct spi_device *spi)
const struct spi_device_id *spi_id;
const struct of_device_id *match;
struct device *dev = &spi->dev;
- struct tinydrm_device *tdev;
enum repaper_model model;
const char *thermal_zone;
struct repaper_epd *epd;
size_t line_buffer_size;
+ struct drm_device *drm;
int ret;
match = of_match_device(repaper_of_match, dev);
@@ -949,10 +989,21 @@ static int repaper_probe(struct spi_device *spi)
}
}
- epd = devm_kzalloc(dev, sizeof(*epd), GFP_KERNEL);
+ epd = kzalloc(sizeof(*epd), GFP_KERNEL);
if (!epd)
return -ENOMEM;
+ drm = &epd->drm;
+
+ ret = devm_drm_dev_init(dev, drm, &repaper_driver);
+ if (ret) {
+ kfree(epd);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.funcs = &repaper_mode_config_funcs;
+
epd->spi = spi;
epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW);
@@ -1063,32 +1114,41 @@ static int repaper_probe(struct spi_device *spi)
if (!epd->current_frame)
return -ENOMEM;
- tdev = &epd->tinydrm;
-
- ret = devm_tinydrm_init(dev, tdev, &repaper_driver);
- if (ret)
- return ret;
-
- ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs,
+ ret = tinydrm_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs,
DRM_MODE_CONNECTOR_VIRTUAL,
repaper_formats,
ARRAY_SIZE(repaper_formats), mode, 0);
if (ret)
return ret;
- drm_mode_config_reset(tdev->drm);
- spi_set_drvdata(spi, tdev);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
- return devm_tinydrm_register(tdev);
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void repaper_shutdown(struct spi_device *spi)
+static int repaper_remove(struct spi_device *spi)
{
- struct tinydrm_device *tdev = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
- tinydrm_shutdown(tdev);
+static void repaper_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver repaper_spi_driver = {
@@ -1099,6 +1159,7 @@ static struct spi_driver repaper_spi_driver = {
},
.id_table = repaper_id,
.probe = repaper_probe,
+ .remove = repaper_remove,
.shutdown = repaper_shutdown,
};
module_spi_driver(repaper_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 01a8077954b3..36bb16a15f7e 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,9 +17,12 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_rect.h>
@@ -75,7 +78,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
if (!buf)
return;
- tinydrm_xrgb8888_to_gray8(buf, vaddr, fb, clip);
+ drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, clip);
src = buf;
for (y = clip->y1; y < clip->y2; y++) {
@@ -116,14 +119,15 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
- int start, end;
- int ret = 0;
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
+ int start, end, idx, ret = 0;
if (!mipi->enabled)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
/* 3 pixels per byte, so grow clip to nearest multiple of 3 */
rect->x1 = rounddown(rect->x1, 3);
rect->x2 = roundup(rect->x2, 3);
@@ -151,6 +155,8 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+
+ drm_dev_exit(idx);
}
static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -175,8 +181,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_rect rect = {
.x1 = 0,
@@ -184,14 +189,17 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
.y1 = 0,
.y2 = fb->height,
};
- int ret;
+ int idx, ret;
u8 addr_mode;
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_reset(mipi);
if (ret)
- return;
+ goto out_exit;
mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
@@ -244,12 +252,20 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
st7586_fb_dirty(fb, &rect);
mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+out_exit:
+ drm_dev_exit(idx);
}
static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+
+ /*
+ * This callback is not protected by drm_dev_enter/exit since we want to
+ * turn off the display on regular driver unload. It's highly unlikely
+ * that the underlying SPI controller is gone should this be called after
+ * unplug.
+ */
DRM_DEBUG_KMS("\n");
@@ -264,46 +280,6 @@ static const u32 st7586_formats[] = {
DRM_FORMAT_XRGB8888,
};
-static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *pipe_funcs,
- struct drm_driver *driver, const struct drm_display_mode *mode,
- unsigned int rotation)
-{
- size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay;
- struct tinydrm_device *tdev = &mipi->tinydrm;
- int ret;
-
- mutex_init(&mipi->cmdlock);
-
- mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
- if (!mipi->tx_buf)
- return -ENOMEM;
-
- ret = devm_tinydrm_init(dev, tdev, driver);
- if (ret)
- return ret;
-
- ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL,
- st7586_formats,
- ARRAY_SIZE(st7586_formats),
- mode, rotation);
- if (ret)
- return ret;
-
- drm_plane_enable_fb_damage_clips(&tdev->pipe.plane);
-
- tdev->drm->mode_config.preferred_depth = 32;
- mipi->rotation = rotation;
-
- drm_mode_config_reset(tdev->drm);
-
- DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- tdev->drm->mode_config.preferred_depth, rotation);
-
- return 0;
-}
-
static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.enable = st7586_pipe_enable,
.disable = st7586_pipe_disable,
@@ -311,8 +287,14 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
+static const struct drm_mode_config_funcs st7586_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static const struct drm_display_mode st7586_mode = {
- TINYDRM_MODE(178, 128, 37, 27),
+ DRM_SIMPLE_MODE(178, 128, 37, 27),
};
DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
@@ -321,6 +303,7 @@ static struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7586_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
@@ -345,15 +328,35 @@ MODULE_DEVICE_TABLE(spi, st7586_id);
static int st7586_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *a0;
u32 rotation = 0;
+ size_t bufsize;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &st7586_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.preferred_depth = 32;
+ drm->mode_config.funcs = &st7586_mode_config_funcs;
+
+ mutex_init(&mipi->cmdlock);
+
+ bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
+ mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ if (!mipi->tx_buf)
+ return -ENOMEM;
+
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -367,6 +370,7 @@ static int st7586_probe(struct spi_device *spi)
}
device_property_read_u32(dev, "rotation", &rotation);
+ mipi->rotation = rotation;
ret = mipi_dbi_spi_init(spi, mipi, a0);
if (ret)
@@ -384,21 +388,44 @@ static int st7586_probe(struct spi_device *spi)
*/
mipi->swap_bytes = true;
- ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver,
- &st7586_mode, rotation);
+ ret = tinydrm_display_pipe_init(drm, &mipi->pipe, &st7586_pipe_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ st7586_formats, ARRAY_SIZE(st7586_formats),
+ &st7586_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
- return devm_tinydrm_register(&mipi->tinydrm);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
+ drm->mode_config.preferred_depth, rotation);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void st7586_shutdown(struct spi_device *spi)
+static int st7586_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
- tinydrm_shutdown(&mipi->tinydrm);
+static void st7586_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver st7586_spi_driver = {
@@ -409,6 +436,7 @@ static struct spi_driver st7586_spi_driver = {
},
.id_table = st7586_id,
.probe = st7586_probe,
+ .remove = st7586_remove,
.shutdown = st7586_shutdown,
};
module_spi_driver(st7586_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 3bab9a9569a6..ce9109e613e0 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -14,7 +14,9 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
@@ -41,16 +43,18 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
- int ret;
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ int ret, idx;
u8 addr_mode;
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_reset(mipi);
if (ret)
- return;
+ goto out_exit;
msleep(150);
@@ -101,6 +105,8 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
msleep(20);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
@@ -111,7 +117,7 @@ static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
};
static const struct drm_display_mode jd_t18003_t01_mode = {
- TINYDRM_MODE(128, 160, 28, 35),
+ DRM_SIMPLE_MODE(128, 160, 28, 35),
};
DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
@@ -120,6 +126,7 @@ static struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7735r_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
@@ -144,15 +151,25 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -178,21 +195,36 @@ static int st7735r_probe(struct spi_device *spi)
/* Cannot read from Adafruit 1.8" display via SPI */
mipi->read_commands = NULL;
- ret = mipi_dbi_init(&spi->dev, mipi, &jd_t18003_t01_pipe_funcs,
- &st7735r_driver, &jd_t18003_t01_mode, rotation);
+ ret = mipi_dbi_init(mipi, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void st7735r_shutdown(struct spi_device *spi)
+static int st7735r_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
- tinydrm_shutdown(&mipi->tinydrm);
+ return 0;
+}
+
+static void st7735r_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver st7735r_spi_driver = {
@@ -203,6 +235,7 @@ static struct spi_driver st7735r_spi_driver = {
},
.id_table = st7735r_id,
.probe = st7735r_probe,
+ .remove = st7735r_remove,
.shutdown = st7735r_shutdown,
};
module_spi_driver(st7735r_spi_driver);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3f56647cdb35..2845fceb2fbd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
* ttm_global_mutex - protecting the global BO state
*/
DEFINE_MUTEX(ttm_global_mutex);
-struct ttm_bo_global ttm_bo_glob = {
- .use_count = 0
-};
+unsigned ttm_bo_glob_use_count;
+struct ttm_bo_global ttm_bo_glob;
static struct attribute ttm_bo_count = {
.name = "bo_count",
@@ -876,8 +875,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
reservation_object_add_shared_fence(bo->resv, fence);
ret = reservation_object_reserve_shared(bo->resv, 1);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ dma_fence_put(fence);
return ret;
+ }
dma_fence_put(bo->moving);
bo->moving = fence;
@@ -1529,12 +1530,13 @@ static void ttm_bo_global_release(void)
struct ttm_bo_global *glob = &ttm_bo_glob;
mutex_lock(&ttm_global_mutex);
- if (--glob->use_count > 0)
+ if (--ttm_bo_glob_use_count > 0)
goto out;
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
ttm_mem_global_release(&ttm_mem_glob);
+ memset(glob, 0, sizeof(*glob));
out:
mutex_unlock(&ttm_global_mutex);
}
@@ -1546,7 +1548,7 @@ static int ttm_bo_global_init(void)
unsigned i;
mutex_lock(&ttm_global_mutex);
- if (++glob->use_count > 1)
+ if (++ttm_bo_glob_use_count > 1)
goto out;
ret = ttm_mem_global_init(&ttm_mem_glob);
@@ -1624,7 +1626,6 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
- uint64_t file_page_offset,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
@@ -1646,8 +1647,9 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
- 0x10000000);
+ drm_vma_offset_manager_init(&bdev->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index e86a29a1e51f..6dacff49c1cc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -432,6 +432,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_buffer_object *bo;
int ret;
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
+ return -EINVAL;
+
bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
if (unlikely(!bo))
return -EINVAL;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 93860346c426..0075eb9a0b52 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -188,13 +188,11 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
- struct ttm_bo_device *bdev;
if (list_empty(list))
return;
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index f1567c353b54..8617958b7ae6 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -81,7 +81,7 @@ static void ttm_mem_zone_kobj_release(struct kobject *kobj)
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
- pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
+ pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
zone->name, (unsigned long long)zone->used_mem >> 10);
kfree(zone);
}
@@ -448,7 +448,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
#endif
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
- pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
+ pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
zone->name, (unsigned long long)zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
@@ -461,8 +461,8 @@ out_no_zone:
void ttm_mem_global_release(struct ttm_mem_global *glob)
{
- unsigned int i;
struct ttm_mem_zone *zone;
+ unsigned int i;
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
- }
+ }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
+ memset(glob, 0, sizeof(*glob));
}
static void ttm_check_swapping(struct ttm_mem_global *glob)
@@ -522,7 +523,7 @@ static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
- return ttm_mem_global_free_zone(glob, NULL, amount);
+ return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
}
EXPORT_SYMBOL(ttm_mem_global_free);
@@ -621,10 +622,10 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
{
/**
* Normal allocations of kernel memory are registered in
- * all zones.
+ * the kernel zone.
*/
- return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
+ return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index f841accc2c00..627f8dc91d0e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (!(flags & TTM_PAGE_FLAG_DMA32)) {
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- if (p++ != pages[i + j])
+ if (!(flags & TTM_PAGE_FLAG_DMA32) &&
+ (npages - i) >= HPAGE_PMD_NR) {
+ for (j = 1; j < HPAGE_PMD_NR; ++j)
+ if (++p != pages[i + j])
break;
if (j == HPAGE_PMD_NR)
@@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
unsigned max_size, n2free;
spin_lock_irqsave(&huge->lock, irq_flags);
- while (i < npages) {
+ while ((npages - i) >= HPAGE_PMD_NR) {
struct page *p = pages[i];
unsigned j;
if (!p)
break;
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- if (p++ != pages[i + j])
+ for (j = 1; j < HPAGE_PMD_NR; ++j)
+ if (++p != pages[i + j])
break;
if (j != HPAGE_PMD_NR)
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index e8723a2412a6..d775d10dbe6a 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -149,7 +149,8 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
/* Vsync IRQ at start of Vsync at first */
ctrl1 |= TVE200_VSTSTYPE_VSYNC;
- if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ if (connector->display_info.bus_flags &
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
ctrl1 |= TVE200_CTRL_TVCLKP;
if ((mode->hdisplay == 352 && mode->vdisplay == 240) || /* SIF(525) */
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 22cd2d13e272..312bf324841a 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -48,10 +48,17 @@ static const struct file_operations udl_driver_fops = {
.llseek = noop_llseek,
};
+static void udl_driver_release(struct drm_device *dev)
+{
+ udl_fini(dev);
+ udl_modeset_cleanup(dev);
+ drm_dev_fini(dev);
+ kfree(dev);
+}
+
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
- .load = udl_driver_load,
- .unload = udl_driver_unload,
+ .release = udl_driver_release,
/* gem hooks */
.gem_free_object_unlocked = udl_gem_free_object,
@@ -74,28 +81,56 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct udl_device *udl_driver_create(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct udl_device *udl;
+ int r;
+
+ udl = kzalloc(sizeof(*udl), GFP_KERNEL);
+ if (!udl)
+ return ERR_PTR(-ENOMEM);
+
+ r = drm_dev_init(&udl->drm, &driver, &interface->dev);
+ if (r) {
+ kfree(udl);
+ return ERR_PTR(r);
+ }
+
+ udl->udev = udev;
+ udl->drm.dev_private = udl;
+
+ r = udl_init(udl);
+ if (r) {
+ drm_dev_fini(&udl->drm);
+ kfree(udl);
+ return ERR_PTR(r);
+ }
+
+ usb_set_intfdata(interface, udl);
+ return udl;
+}
+
static int udl_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
- struct usb_device *udev = interface_to_usbdev(interface);
- struct drm_device *dev;
int r;
+ struct udl_device *udl;
- dev = drm_dev_alloc(&driver, &interface->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ udl = udl_driver_create(interface);
+ if (IS_ERR(udl))
+ return PTR_ERR(udl);
- r = drm_dev_register(dev, (unsigned long)udev);
+ r = drm_dev_register(&udl->drm, 0);
if (r)
goto err_free;
- usb_set_intfdata(interface, dev);
- DRM_INFO("Initialized udl on minor %d\n", dev->primary->index);
+ DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
return 0;
err_free:
- drm_dev_put(dev);
+ drm_dev_put(&udl->drm);
return r;
}
@@ -107,6 +142,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
+ drm_dev_put(dev);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index e9e9b1ff678e..35c1f33fbc1a 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -50,8 +50,8 @@ struct urb_list {
struct udl_fbdev;
struct udl_device {
+ struct drm_device drm;
struct device *dev;
- struct drm_device *ddev;
struct usb_device *udev;
struct drm_crtc *crtc;
@@ -71,6 +71,8 @@ struct udl_device {
atomic_t cpu_kcycles_used; /* transpired during pixel processing */
};
+#define to_udl(x) container_of(x, struct udl_device, drm)
+
struct udl_gem_object {
struct drm_gem_object base;
struct page **pages;
@@ -102,8 +104,8 @@ struct urb *udl_get_urb(struct drm_device *dev);
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
void udl_urb_completion(struct urb *urb);
-int udl_driver_load(struct drm_device *dev, unsigned long flags);
-void udl_driver_unload(struct drm_device *dev);
+int udl_init(struct udl_device *udl);
+void udl_fini(struct drm_device *dev);
int udl_fbdev_init(struct drm_device *dev);
void udl_fbdev_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dd9ffded223b..b9b67a546d4c 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -32,7 +32,7 @@ module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
struct udl_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct udl_framebuffer ufb;
int fb_count;
};
@@ -82,7 +82,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height)
{
struct drm_device *dev = fb->base.dev;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int i, ret;
char *cmd;
cycles_t start_cycles, end_cycles;
@@ -210,10 +210,10 @@ static int udl_fb_open(struct fb_info *info, int user)
{
struct udl_fbdev *ufbdev = info->par;
struct drm_device *dev = ufbdev->ufb.base.dev;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
/* If the USB device is gone, we don't accept new opens */
- if (drm_dev_is_unplugged(udl->ddev))
+ if (drm_dev_is_unplugged(&udl->drm))
return -ENODEV;
ufbdev->fb_count++;
@@ -392,7 +392,6 @@ static int udlfb_create(struct drm_fb_helper *helper,
ret = PTR_ERR(info);
goto out_gfree;
}
- info->par = ufbdev;
ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
if (ret)
@@ -402,15 +401,12 @@ static int udlfb_create(struct drm_fb_helper *helper,
ufbdev->helper.fb = fb;
- strcpy(info->fix.id, "udldrmfb");
-
info->screen_base = ufbdev->ufb.obj->vmapping;
info->fix.smem_len = size;
info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
info->fbops = &udlfb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &ufbdev->helper, sizes);
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
fb->width, fb->height,
@@ -441,7 +437,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
int udl_fbdev_init(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int bpp_sel = fb_bpp;
struct udl_fbdev *ufbdev;
int ret;
@@ -480,7 +476,7 @@ free:
void udl_fbdev_cleanup(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
if (!udl->fbdev)
return;
@@ -491,7 +487,7 @@ void udl_fbdev_cleanup(struct drm_device *dev)
void udl_fbdev_unplug(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct udl_fbdev *ufbdev;
if (!udl->fbdev)
return;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index bb7b58407039..3b3e17652bb2 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -203,7 +203,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
{
struct udl_gem_object *gobj;
struct drm_gem_object *obj;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int ret = 0;
mutex_lock(&udl->gem_lock);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 9086d0d1b880..6743eaef4594 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -30,7 +30,7 @@
static int udl_parse_vendor_descriptor(struct drm_device *dev,
struct usb_device *usbdev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
char *desc;
char *buf;
char *desc_end;
@@ -166,7 +166,7 @@ void udl_urb_completion(struct urb *urb)
static void udl_free_urb_list(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int count = udl->urbs.count;
struct list_head *node;
struct urb_node *unode;
@@ -199,7 +199,7 @@ static void udl_free_urb_list(struct drm_device *dev)
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
struct urb_node *unode;
char *buf;
@@ -263,7 +263,7 @@ retry:
struct urb *udl_get_urb(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int ret = 0;
struct list_head *entry;
struct urb_node *unode;
@@ -296,7 +296,7 @@ error:
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int ret;
BUG_ON(len > udl->urbs.size);
@@ -311,20 +311,12 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
return ret;
}
-int udl_driver_load(struct drm_device *dev, unsigned long flags)
+int udl_init(struct udl_device *udl)
{
- struct usb_device *udev = (void*)flags;
- struct udl_device *udl;
+ struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
DRM_DEBUG("\n");
- udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
- if (!udl)
- return -ENOMEM;
-
- udl->udev = udev;
- udl->ddev = dev;
- dev->dev_private = udl;
mutex_init(&udl->gem_lock);
@@ -358,7 +350,6 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
- kfree(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
@@ -369,9 +360,9 @@ int udl_drop_usb(struct drm_device *dev)
return 0;
}
-void udl_driver_unload(struct drm_device *dev)
+void udl_fini(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
drm_kms_helper_poll_fini(dev);
@@ -379,6 +370,4 @@ void udl_driver_unload(struct drm_device *dev)
udl_free_urb_list(dev);
udl_fbdev_cleanup(dev);
- udl_modeset_cleanup(dev);
- kfree(udl);
}
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
index 1552bf552c94..75a74c45f109 100644
--- a/drivers/gpu/drm/v3d/Kconfig
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -5,6 +5,7 @@ config DRM_V3D
depends on COMMON_CLK
depends on MMU
select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
help
Choose this option if you have a system that has a Broadcom
V3D 3.x or newer GPU, such as BCM7268.
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a08766d39eab..a22b75a3a533 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -25,162 +25,6 @@
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
-/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
- * it for DMA.
- */
-static int
-v3d_bo_get_pages(struct v3d_bo *bo)
-{
- struct drm_gem_object *obj = &bo->base;
- struct drm_device *dev = obj->dev;
- int npages = obj->size >> PAGE_SHIFT;
- int ret = 0;
-
- mutex_lock(&bo->lock);
- if (bo->pages_refcount++ != 0)
- goto unlock;
-
- if (!obj->import_attach) {
- bo->pages = drm_gem_get_pages(obj);
- if (IS_ERR(bo->pages)) {
- ret = PTR_ERR(bo->pages);
- goto unlock;
- }
-
- bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
- if (IS_ERR(bo->sgt)) {
- ret = PTR_ERR(bo->sgt);
- goto put_pages;
- }
-
- /* Map the pages for use by the GPU. */
- dma_map_sg(dev->dev, bo->sgt->sgl,
- bo->sgt->nents, DMA_BIDIRECTIONAL);
- } else {
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!bo->pages)
- goto put_pages;
-
- drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
- NULL, npages);
-
- /* Note that dma-bufs come in mapped. */
- }
-
- mutex_unlock(&bo->lock);
-
- return 0;
-
-put_pages:
- drm_gem_put_pages(obj, bo->pages, true, true);
- bo->pages = NULL;
-unlock:
- bo->pages_refcount--;
- mutex_unlock(&bo->lock);
- return ret;
-}
-
-static void
-v3d_bo_put_pages(struct v3d_bo *bo)
-{
- struct drm_gem_object *obj = &bo->base;
-
- mutex_lock(&bo->lock);
- if (--bo->pages_refcount == 0) {
- if (!obj->import_attach) {
- dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
- bo->sgt->nents, DMA_BIDIRECTIONAL);
- sg_free_table(bo->sgt);
- kfree(bo->sgt);
- drm_gem_put_pages(obj, bo->pages, true, true);
- } else {
- kfree(bo->pages);
- }
- }
- mutex_unlock(&bo->lock);
-}
-
-static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
- size_t unaligned_size)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_gem_object *obj;
- struct v3d_bo *bo;
- size_t size = roundup(unaligned_size, PAGE_SIZE);
- int ret;
-
- if (size == 0)
- return ERR_PTR(-EINVAL);
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
- obj = &bo->base;
-
- INIT_LIST_HEAD(&bo->vmas);
- INIT_LIST_HEAD(&bo->unref_head);
- mutex_init(&bo->lock);
-
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto free_bo;
-
- spin_lock(&v3d->mm_lock);
- ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
- obj->size >> PAGE_SHIFT,
- GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
- spin_unlock(&v3d->mm_lock);
- if (ret)
- goto free_obj;
-
- return bo;
-
-free_obj:
- drm_gem_object_release(obj);
-free_bo:
- kfree(bo);
- return ERR_PTR(ret);
-}
-
-struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
- size_t unaligned_size)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_gem_object *obj;
- struct v3d_bo *bo;
- int ret;
-
- bo = v3d_bo_create_struct(dev, unaligned_size);
- if (IS_ERR(bo))
- return bo;
- obj = &bo->base;
-
- bo->resv = &bo->_resv;
- reservation_object_init(bo->resv);
-
- ret = v3d_bo_get_pages(bo);
- if (ret)
- goto free_mm;
-
- v3d_mmu_insert_ptes(bo);
-
- mutex_lock(&v3d->bo_lock);
- v3d->bo_stats.num_allocated++;
- v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
- mutex_unlock(&v3d->bo_lock);
-
- return bo;
-
-free_mm:
- spin_lock(&v3d->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&v3d->mm_lock);
-
- drm_gem_object_release(obj);
- kfree(bo);
- return ERR_PTR(ret);
-}
-
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -189,92 +33,116 @@ void v3d_free_object(struct drm_gem_object *obj)
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
+ v3d_mmu_remove_ptes(bo);
+
mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated--;
v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
- reservation_object_fini(&bo->_resv);
-
- v3d_bo_put_pages(bo);
-
- if (obj->import_attach)
- drm_prime_gem_destroy(obj, bo->sgt);
-
- v3d_mmu_remove_ptes(bo);
spin_lock(&v3d->mm_lock);
drm_mm_remove_node(&bo->node);
spin_unlock(&v3d->mm_lock);
- mutex_destroy(&bo->lock);
+ /* GPU execution may have dirtied any pages in the BO. */
+ bo->base.pages_mark_dirty_on_put = true;
- drm_gem_object_release(obj);
- kfree(bo);
+ drm_gem_shmem_free_object(obj);
}
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
+static const struct drm_gem_object_funcs v3d_gem_funcs = {
+ .free = v3d_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/* gem_create_object function for allocating a BO struct and doing
+ * early setup.
+ */
+struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
{
- struct v3d_bo *bo = to_v3d_bo(obj);
+ struct v3d_bo *bo;
+ struct drm_gem_object *obj;
- return bo->resv;
-}
+ if (size == 0)
+ return NULL;
-static void
-v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
-{
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-}
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+ obj = &bo->base.base;
-vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct v3d_bo *bo = to_v3d_bo(obj);
- pfn_t pfn;
- pgoff_t pgoff;
+ obj->funcs = &v3d_gem_funcs;
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+ INIT_LIST_HEAD(&bo->unref_head);
- return vmf_insert_mixed(vma, vmf->address, pfn);
+ return &bo->base.base;
}
-int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
+static int
+v3d_bo_create_finish(struct drm_gem_object *obj)
{
+ struct v3d_dev *v3d = to_v3d_dev(obj->dev);
+ struct v3d_bo *bo = to_v3d_bo(obj);
+ struct sg_table *sgt;
int ret;
- ret = drm_gem_mmap(filp, vma);
+ /* So far we pin the BO in the MMU for its lifetime, so use
+ * shmem's helper for getting a lifetime sgt.
+ */
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ spin_lock(&v3d->mm_lock);
+ /* Allocate the object's space in the GPU's page tables.
+ * Inserting PTEs will happen later, but the offset is for the
+ * lifetime of the BO.
+ */
+ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
+ obj->size >> PAGE_SHIFT,
+ GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
+ spin_unlock(&v3d->mm_lock);
if (ret)
return ret;
- v3d_set_mmap_vma_flags(vma);
+ /* Track stats for /debug/dri/n/bo_stats. */
+ mutex_lock(&v3d->bo_lock);
+ v3d->bo_stats.num_allocated++;
+ v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
+ mutex_unlock(&v3d->bo_lock);
- return ret;
+ v3d_mmu_insert_ptes(bo);
+
+ return 0;
}
-int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
+ size_t unaligned_size)
{
+ struct drm_gem_shmem_object *shmem_obj;
+ struct v3d_bo *bo;
int ret;
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- v3d_set_mmap_vma_flags(vma);
+ shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
+ if (IS_ERR(shmem_obj))
+ return ERR_CAST(shmem_obj);
+ bo = to_v3d_bo(&shmem_obj->base);
- return 0;
-}
+ ret = v3d_bo_create_finish(&shmem_obj->base);
+ if (ret)
+ goto free_obj;
-struct sg_table *
-v3d_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct v3d_bo *bo = to_v3d_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
+ return bo;
- return drm_prime_pages_to_sg(bo->pages, npages);
+free_obj:
+ drm_gem_shmem_free_object(&shmem_obj->base);
+ return ERR_PTR(ret);
}
struct drm_gem_object *
@@ -283,20 +151,17 @@ v3d_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
- struct v3d_bo *bo;
-
- bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
- if (IS_ERR(bo))
- return ERR_CAST(bo);
- obj = &bo->base;
-
- bo->resv = attach->dmabuf->resv;
+ int ret;
- bo->sgt = sgt;
- obj->import_attach = attach;
- v3d_bo_get_pages(bo);
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj))
+ return obj;
- v3d_mmu_insert_ptes(bo);
+ ret = v3d_bo_create_finish(obj);
+ if (ret) {
+ drm_gem_shmem_free_object(obj);
+ return ERR_PTR(ret);
+ }
return obj;
}
@@ -319,8 +184,8 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
args->offset = bo->node.start << PAGE_SHIFT;
- ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
- drm_gem_object_put_unlocked(&bo->base);
+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
@@ -330,7 +195,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
{
struct drm_v3d_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
- int ret;
if (args->flags != 0) {
DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
@@ -343,12 +207,10 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
- ret = drm_gem_create_mmap_offset(gem_obj);
- if (ret == 0)
- args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_put_unlocked(gem_obj);
- return ret;
+ return 0;
}
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index eb2b2d2f8553..a24af2d2f574 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -187,6 +187,11 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
uint32_t cycles;
int core = 0;
int measure_ms = 1000;
+ int ret;
+
+ ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0)
+ return ret;
if (v3d->ver >= 40) {
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
@@ -210,6 +215,9 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
cycles / (measure_ms * 1000),
(cycles / (measure_ms * 100)) % 10);
+ pm_runtime_mark_last_busy(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index f0afcec72c34..a06b05f714a5 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -7,9 +7,9 @@
* This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
* For V3D 2.x support, see the VC4 driver.
*
- * Currently only single-core rendering using the binner and renderer
- * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD
- * (compute shader dispatch) are not yet supported.
+ * Currently only single-core rendering using the binner and renderer,
+ * along with TFU (texture formatting unit) rendering is supported.
+ * V3D 4.x's CSD (compute shader dispatch) is not yet supported.
*/
#include <linux/clk.h>
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
@@ -101,6 +102,8 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0)
+ return ret;
if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) {
args->value = V3D_CORE_READ(0, offset);
@@ -160,17 +163,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-static const struct file_operations v3d_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = v3d_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
@@ -188,12 +181,6 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
-static const struct vm_operations_struct v3d_vm_ops = {
- .fault = v3d_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static struct drm_driver v3d_drm_driver = {
.driver_features = (DRIVER_GEM |
DRIVER_RENDER |
@@ -207,17 +194,11 @@ static struct drm_driver v3d_drm_driver = {
.debugfs_init = v3d_debugfs_init,
#endif
- .gem_free_object_unlocked = v3d_free_object,
- .gem_vm_ops = &v3d_vm_ops,
-
+ .gem_create_object = v3d_create_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_res_obj = v3d_prime_res_obj,
- .gem_prime_get_sg_table = v3d_prime_get_sg_table,
.gem_prime_import_sg_table = v3d_prime_import_sg_table,
- .gem_prime_mmap = v3d_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.ioctls = v3d_drm_ioctls,
.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
@@ -265,10 +246,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
v3d->pdev = pdev;
drm = &v3d->drm;
- ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
- if (ret)
- goto dev_free;
-
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
goto dev_free;
@@ -283,6 +260,22 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
+ v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(v3d->reset)) {
+ ret = PTR_ERR(v3d->reset);
+
+ if (ret == -EPROBE_DEFER)
+ goto dev_free;
+
+ v3d->reset = NULL;
+ ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
+ if (ret) {
+ dev_err(dev,
+ "Failed to get reset control or bridge regs\n");
+ goto dev_free;
+ }
+ }
+
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
@@ -312,14 +305,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
goto dev_destroy;
- v3d_irq_init(v3d);
+ ret = v3d_irq_init(v3d);
+ if (ret)
+ goto gem_destroy;
ret = drm_dev_register(drm, 0);
if (ret)
- goto gem_destroy;
+ goto irq_disable;
return 0;
+irq_disable:
+ v3d_irq_disable(v3d);
gem_destroy:
v3d_gem_destroy(drm);
dev_destroy:
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index fdda3037f7af..e9d4a2fdcf44 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2015-2018 Broadcom */
-#include <linux/reservation.h>
#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
#include "uapi/drm/v3d_drm.h"
@@ -34,6 +34,7 @@ struct v3d_dev {
* and revision.
*/
int ver;
+ bool single_irq_line;
struct device *dev;
struct platform_device *pdev;
@@ -42,6 +43,7 @@ struct v3d_dev {
void __iomem *bridge_regs;
void __iomem *gca_regs;
struct clk *clk;
+ struct reset_control *reset;
/* Virtual and DMA addresses of the single shared page table. */
volatile u32 *pt;
@@ -109,34 +111,15 @@ struct v3d_file_priv {
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
};
-/* Tracks a mapping of a BO into a per-fd address space */
-struct v3d_vma {
- struct v3d_page_table *pt;
- struct list_head list; /* entry in v3d_bo.vmas */
-};
-
struct v3d_bo {
- struct drm_gem_object base;
-
- struct mutex lock;
+ struct drm_gem_shmem_object base;
struct drm_mm_node node;
- u32 pages_refcount;
- struct page **pages;
- struct sg_table *sgt;
- void *vaddr;
-
- struct list_head vmas; /* list of v3d_vma */
-
/* List entry for the BO's position in
* v3d_exec_info->unref_list
*/
struct list_head unref_head;
-
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
};
static inline struct v3d_bo *
@@ -180,7 +163,7 @@ struct v3d_job {
struct dma_fence *in_fence;
/* v3d fence to be signaled by IRQ handler when the job is complete. */
- struct dma_fence *done_fence;
+ struct dma_fence *irq_fence;
/* GPU virtual addresses of the start/end of the CL job. */
u32 start, end;
@@ -227,7 +210,7 @@ struct v3d_tfu_job {
struct dma_fence *in_fence;
/* v3d fence to be signaled by IRQ handler when the job is complete. */
- struct dma_fence *done_fence;
+ struct dma_fence *irq_fence;
struct v3d_dev *v3d;
@@ -270,6 +253,7 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
}
/* v3d_bo.c */
+struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
void v3d_free_object(struct drm_gem_object *gem_obj);
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t size);
@@ -279,11 +263,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
-int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
-int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
@@ -310,7 +289,7 @@ void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
/* v3d_irq.c */
-void v3d_irq_init(struct v3d_dev *v3d);
+int v3d_irq_init(struct v3d_dev *v3d);
void v3d_irq_enable(struct v3d_dev *v3d);
void v3d_irq_disable(struct v3d_dev *v3d);
void v3d_irq_reset(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 803f31467ec1..93ff8fcbe475 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/sched/signal.h>
@@ -24,7 +25,8 @@ v3d_init_core(struct v3d_dev *v3d, int core)
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
- V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
+ if (v3d->ver < 40)
+ V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
* the whole thing.
@@ -69,7 +71,7 @@ v3d_idle_gca(struct v3d_dev *v3d)
}
static void
-v3d_reset_v3d(struct v3d_dev *v3d)
+v3d_reset_by_bridge(struct v3d_dev *v3d)
{
int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
@@ -89,6 +91,15 @@ v3d_reset_v3d(struct v3d_dev *v3d)
V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
}
+}
+
+static void
+v3d_reset_v3d(struct v3d_dev *v3d)
+{
+ if (v3d->reset)
+ reset_control_reset(v3d->reset);
+ else
+ v3d_reset_by_bridge(v3d);
v3d_init_hw_state(v3d);
}
@@ -190,7 +201,8 @@ v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(bos[i]->resv, fence);
+ reservation_object_add_excl_fence(bos[i]->base.base.resv,
+ fence);
}
}
@@ -199,12 +211,8 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos,
int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
- int i;
-
- for (i = 0; i < bo_count; i++)
- ww_mutex_unlock(&bos[i]->resv->lock);
-
- ww_acquire_fini(acquire_ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count,
+ acquire_ctx);
}
/* Takes the reservation lock on all the BOs being referenced, so that
@@ -219,58 +227,19 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
- int contended_lock = -1;
int i, ret;
- ww_acquire_init(acquire_ctx, &reservation_ww_class);
-
-retry:
- if (contended_lock != -1) {
- struct v3d_bo *bo = bos[contended_lock];
-
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- acquire_ctx);
- if (ret) {
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- for (i = 0; i < bo_count; i++) {
- if (i == contended_lock)
- continue;
-
- ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
- acquire_ctx);
- if (ret) {
- int j;
-
- for (j = 0; j < i; j++)
- ww_mutex_unlock(&bos[j]->resv->lock);
-
- if (contended_lock != -1 && contended_lock >= i) {
- struct v3d_bo *bo = bos[contended_lock];
-
- ww_mutex_unlock(&bo->resv->lock);
- }
-
- if (ret == -EDEADLK) {
- contended_lock = i;
- goto retry;
- }
-
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- ww_acquire_done(acquire_ctx);
+ ret = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+ bo_count, acquire_ctx);
+ if (ret)
+ return ret;
/* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware.
*/
for (i = 0; i < bo_count; i++) {
- ret = reservation_object_reserve_shared(bos[i]->resv, 1);
+ ret = reservation_object_reserve_shared(bos[i]->base.base.resv,
+ 1);
if (ret) {
v3d_unlock_bo_reservations(bos, bo_count,
acquire_ctx);
@@ -371,18 +340,18 @@ v3d_exec_cleanup(struct kref *ref)
dma_fence_put(exec->bin.in_fence);
dma_fence_put(exec->render.in_fence);
- dma_fence_put(exec->bin.done_fence);
- dma_fence_put(exec->render.done_fence);
+ dma_fence_put(exec->bin.irq_fence);
+ dma_fence_put(exec->render.irq_fence);
dma_fence_put(exec->bin_done_fence);
dma_fence_put(exec->render_done_fence);
for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_put_unlocked(&exec->bo[i]->base);
+ drm_gem_object_put_unlocked(&exec->bo[i]->base.base);
kvfree(exec->bo);
list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put_unlocked(&bo->base.base);
}
pm_runtime_mark_last_busy(v3d->dev);
@@ -405,11 +374,11 @@ v3d_tfu_job_cleanup(struct kref *ref)
unsigned int i;
dma_fence_put(job->in_fence);
- dma_fence_put(job->done_fence);
+ dma_fence_put(job->irq_fence);
for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
if (job->bo[i])
- drm_gem_object_put_unlocked(&job->bo[i]->base);
+ drm_gem_object_put_unlocked(&job->bo[i]->base.base);
}
pm_runtime_mark_last_busy(v3d->dev);
@@ -429,8 +398,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
{
int ret;
struct drm_v3d_wait_bo *args = data;
- struct drm_gem_object *gem_obj;
- struct v3d_bo *bo;
ktime_t start = ktime_get();
u64 delta_ns;
unsigned long timeout_jiffies =
@@ -439,21 +406,8 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- gem_obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!gem_obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
- return -EINVAL;
- }
- bo = to_v3d_bo(gem_obj);
-
- ret = reservation_object_wait_timeout_rcu(bo->resv,
- true, true,
- timeout_jiffies);
-
- if (ret == 0)
- ret = -ETIME;
- else if (ret > 0)
- ret = 0;
+ ret = drm_gem_reservation_object_wait(file_priv, args->handle,
+ true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
* such that the ioctl will be restarted.
@@ -468,8 +422,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
- drm_gem_object_put_unlocked(gem_obj);
-
return ret;
}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 69338da70ddc..aa0a180ae700 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -27,6 +27,9 @@
V3D_HUB_INT_MMU_CAP | \
V3D_HUB_INT_TFUC))
+static irqreturn_t
+v3d_hub_irq(int irq, void *arg);
+
static void
v3d_overflow_mem_work(struct work_struct *work)
{
@@ -34,12 +37,14 @@ v3d_overflow_mem_work(struct work_struct *work)
container_of(work, struct v3d_dev, overflow_mem_work);
struct drm_device *dev = &v3d->drm;
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
+ struct drm_gem_object *obj;
unsigned long irqflags;
if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
return;
}
+ obj = &bo->base.base;
/* We lost a race, and our work task came in after the bin job
* completed and exited. This can happen because the HW
@@ -56,15 +61,15 @@ v3d_overflow_mem_work(struct work_struct *work)
goto out;
}
- drm_gem_object_get(&bo->base);
+ drm_gem_object_get(obj);
list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
- V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size);
+ V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
out:
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put_unlocked(obj);
}
static irqreturn_t
@@ -82,7 +87,8 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_OUTOMEM) {
/* Note that the OOM status is edge signaled, so the
* interrupt won't happen again until the we actually
- * add more memory.
+ * add more memory. Also, as of V3D 4.1, FLDONE won't
+ * be reported until any OOM state has been cleared.
*/
schedule_work(&v3d->overflow_mem_work);
status = IRQ_HANDLED;
@@ -90,7 +96,7 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->bin_job->bin.done_fence);
+ to_v3d_fence(v3d->bin_job->bin.irq_fence);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -99,7 +105,7 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->render_job->render.done_fence);
+ to_v3d_fence(v3d->render_job->render.irq_fence);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -112,6 +118,12 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_GMPV)
dev_err(v3d->dev, "GMP violation\n");
+ /* V3D 4.2 wires the hub and core IRQs together, so if we &
+ * didn't see the common one then check hub for MMU IRQs.
+ */
+ if (v3d->single_irq_line && status == IRQ_NONE)
+ return v3d_hub_irq(irq, arg);
+
return status;
}
@@ -129,7 +141,7 @@ v3d_hub_irq(int irq, void *arg)
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->tfu_job->done_fence);
+ to_v3d_fence(v3d->tfu_job->irq_fence);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -156,10 +168,10 @@ v3d_hub_irq(int irq, void *arg)
return status;
}
-void
+int
v3d_irq_init(struct v3d_dev *v3d)
{
- int ret, core;
+ int irq1, ret, core;
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
@@ -170,16 +182,37 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
- v3d_hub_irq, IRQF_SHARED,
- "v3d_hub", v3d);
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
- v3d_irq, IRQF_SHARED,
- "v3d_core0", v3d);
- if (ret)
- dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ irq1 = platform_get_irq(v3d->pdev, 1);
+ if (irq1 == -EPROBE_DEFER)
+ return irq1;
+ if (irq1 > 0) {
+ ret = devm_request_irq(v3d->dev, irq1,
+ v3d_irq, IRQF_SHARED,
+ "v3d_core0", v3d);
+ if (ret)
+ goto fail;
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ v3d_hub_irq, IRQF_SHARED,
+ "v3d_hub", v3d);
+ if (ret)
+ goto fail;
+ } else {
+ v3d->single_irq_line = true;
+
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ v3d_irq, IRQF_SHARED,
+ "v3d", v3d);
+ if (ret)
+ goto fail;
+ }
v3d_irq_enable(v3d);
+ return 0;
+
+fail:
+ if (ret != -EPROBE_DEFER)
+ dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ return ret;
}
void
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index b00f97c31b70..7a21f1787ab1 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -83,13 +83,14 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d)
void v3d_mmu_insert_ptes(struct v3d_bo *bo)
{
- struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
+ struct drm_gem_shmem_object *shmem_obj = &bo->base;
+ struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
u32 page = bo->node.start;
u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
unsigned int count;
struct scatterlist *sgl;
- for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) {
+ for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
u32 pte = page_prot | page_address;
u32 i;
@@ -102,7 +103,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
}
WARN_ON_ONCE(page - bo->node.start !=
- bo->base.size >> V3D_MMU_PAGE_SHIFT);
+ shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
if (v3d_mmu_flush_all(v3d))
dev_err(v3d->dev, "MMU flush timeout\n");
@@ -110,8 +111,8 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
void v3d_mmu_remove_ptes(struct v3d_bo *bo)
{
- struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
- u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT;
+ struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
+ u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
u32 page;
for (page = bo->node.start; page < bo->node.start + npages; page++)
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6ccdee9d47bd..8e88af237610 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -216,6 +216,8 @@
# define V3D_IDENT2_BCG_INT BIT(28)
#define V3D_CTL_MISCCFG 0x00018
+# define V3D_CTL_MISCCFG_QRMAXCNT_MASK V3D_MASK(3, 1)
+# define V3D_CTL_MISCCFG_QRMAXCNT_SHIFT 1
# define V3D_MISCCFG_OVRTMUOUT BIT(0)
#define V3D_CTL_L2CACTL 0x00020
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 4704b2df3688..e740f3b99aa5 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -156,9 +156,9 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
if (IS_ERR(fence))
return NULL;
- if (job->done_fence)
- dma_fence_put(job->done_fence);
- job->done_fence = dma_fence_get(fence);
+ if (job->irq_fence)
+ dma_fence_put(job->irq_fence);
+ job->irq_fence = dma_fence_get(fence);
trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno,
job->start, job->end);
@@ -199,9 +199,9 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
return NULL;
v3d->tfu_job = job;
- if (job->done_fence)
- dma_fence_put(job->done_fence);
- job->done_fence = dma_fence_get(fence);
+ if (job->irq_fence)
+ dma_fence_put(job->irq_fence);
+ job->irq_fence = dma_fence_get(fence);
trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
@@ -231,20 +231,17 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
mutex_lock(&v3d->reset_lock);
/* block scheduler */
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
-
- drm_sched_stop(sched);
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
+ drm_sched_stop(&v3d->queue[q].sched);
- if(sched_job)
- drm_sched_increase_karma(sched_job);
- }
+ if (sched_job)
+ drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
v3d_reset(v3d);
for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_resubmit_jobs(sched_job->sched);
+ drm_sched_resubmit_jobs(&v3d->queue[q].sched);
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
diff --git a/drivers/staging/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
index 1f4182e2e980..d6ab955c0768 100644
--- a/drivers/staging/vboxvideo/Kconfig
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DRM_VBOXVIDEO
tristate "Virtual Box Graphics Card"
depends on DRM && X86 && PCI
diff --git a/drivers/staging/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
index 1224f313af0c..1224f313af0c 100644
--- a/drivers/staging/vboxvideo/Makefile
+++ b/drivers/gpu/drm/vboxvideo/Makefile
diff --git a/drivers/staging/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
index 361d3193258e..361d3193258e 100644
--- a/drivers/staging/vboxvideo/hgsmi_base.c
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
diff --git a/drivers/staging/vboxvideo/hgsmi_ch_setup.h b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h
index 4e93418d6a13..4e93418d6a13 100644
--- a/drivers/staging/vboxvideo/hgsmi_ch_setup.h
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h
diff --git a/drivers/staging/vboxvideo/hgsmi_channels.h b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h
index 9b83f4ff3faf..9b83f4ff3faf 100644
--- a/drivers/staging/vboxvideo/hgsmi_channels.h
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h
diff --git a/drivers/staging/vboxvideo/hgsmi_defs.h b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h
index 6c8df1cdb087..6c8df1cdb087 100644
--- a/drivers/staging/vboxvideo/hgsmi_defs.h
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h
diff --git a/drivers/staging/vboxvideo/modesetting.c b/drivers/gpu/drm/vboxvideo/modesetting.c
index 7580b9002379..7580b9002379 100644
--- a/drivers/staging/vboxvideo/modesetting.c
+++ b/drivers/gpu/drm/vboxvideo/modesetting.c
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index e7755a179850..fb6a0f0b8167 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -200,36 +200,11 @@ static const struct file_operations vbox_fops = {
.read = drm_read,
};
-static int vbox_master_set(struct drm_device *dev,
- struct drm_file *file_priv, bool from_open)
-{
- struct vbox_private *vbox = dev->dev_private;
-
- /*
- * We do not yet know whether the new owner can handle hotplug, so we
- * do not advertise dynamic modes on the first query and send a
- * tentative hotplug notification after that to see if they query again.
- */
- vbox->initial_mode_queried = false;
-
- return 0;
-}
-
-static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct vbox_private *vbox = dev->dev_private;
-
- /* See vbox_master_set() */
- vbox->initial_mode_queried = false;
-}
-
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
- .master_set = vbox_master_set,
- .master_drop = vbox_master_drop,
.fops = &vbox_fops,
.irq_handler = vbox_irq_handler,
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index aa40e5cc2861..ece31f395540 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/irqreturn.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
@@ -83,12 +82,6 @@ struct vbox_private {
} ttm;
struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
- /*
- * We decide whether or not user-space supports display hot-plug
- * depending on whether they react to a hot-plug event after the initial
- * mode query.
- */
- bool initial_mode_queried;
struct work_struct hotplug_work;
u32 input_mapping_width;
u32 input_mapping_height;
@@ -209,8 +202,6 @@ int vbox_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
u32 handle, u64 *offset);
-#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
-
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
index 83a04afd1766..b724fe7c0c30 100644
--- a/drivers/staging/vboxvideo/vbox_fb.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_fb.c
@@ -90,13 +90,9 @@ int vboxfb_create(struct drm_fb_helper *helper,
if (IS_ERR(info->screen_base))
return PTR_ERR(info->screen_base);
- info->par = helper;
-
fb = &vbox->afb.base;
helper->fb = fb;
- strcpy(info->fix.id, "vboxdrmfb");
-
info->fbops = &vboxfb_ops;
/*
@@ -106,9 +102,7 @@ int vboxfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, helper, sizes->fb_width,
- sizes->fb_height);
+ drm_fb_helper_fill_info(info, helper, sizes);
gpu_addr = vbox_bo_gpu_offset(bo);
info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
diff --git a/drivers/staging/vboxvideo/vbox_hgsmi.c b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c
index 94b60654a012..94b60654a012 100644
--- a/drivers/staging/vboxvideo/vbox_hgsmi.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 195484713365..16a1e29f5292 100644
--- a/drivers/staging/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -105,6 +105,7 @@ static void validate_or_set_position_hints(struct vbox_private *vbox)
/* Query the host for the most recent video mode hints. */
static void vbox_update_mode_hints(struct vbox_private *vbox)
{
+ struct drm_connector_list_iter conn_iter;
struct drm_device *dev = &vbox->ddev;
struct drm_connector *connector;
struct vbox_connector *vbox_conn;
@@ -122,8 +123,10 @@ static void vbox_update_mode_hints(struct vbox_private *vbox)
}
validate_or_set_position_hints(vbox);
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
vbox_conn = to_vbox_connector(connector);
hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
@@ -152,7 +155,8 @@ static void vbox_update_mode_hints(struct vbox_private *vbox)
vbox_conn->vbox_crtc->disconnected = disconnected;
}
- drm_modeset_unlock_all(dev);
+ drm_connector_list_iter_end(&conn_iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
static void vbox_hotplug_worker(struct work_struct *work)
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index e1fb70a42d32..f4d02de5518a 100644
--- a/drivers/staging/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -32,9 +32,9 @@ void vbox_report_caps(struct vbox_private *vbox)
u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
- if (vbox->initial_mode_queried)
- caps |= VBVACAPS_VIDEO_MODE_HINTS;
-
+ /* The host only accepts VIDEO_MODE_HINTS if it is send separately. */
+ hgsmi_send_caps_info(vbox->guest_pool, caps);
+ caps |= VBVACAPS_VIDEO_MODE_HINTS;
hgsmi_send_caps_info(vbox->guest_pool, caps);
}
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 213551394495..58cea131470e 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -57,8 +57,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
vbox_write_ioport(VBE_DISPI_INDEX_BPP, bpp);
vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
- vbox_write_ioport(
- VBE_DISPI_INDEX_X_OFFSET,
+ vbox_write_ioport(VBE_DISPI_INDEX_X_OFFSET,
vbox_crtc->fb_offset % pitch / bpp * 8 + vbox_crtc->x);
vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
vbox_crtc->fb_offset / pitch + vbox_crtc->y);
@@ -736,29 +735,12 @@ static int vbox_get_modes(struct drm_connector *connector)
vbox_connector = to_vbox_connector(connector);
vbox = connector->dev->dev_private;
- /*
- * Heuristic: we do not want to tell the host that we support dynamic
- * resizing unless we feel confident that the user space client using
- * the video driver can handle hot-plug events. So the first time modes
- * are queried after a "master" switch we tell the host that we do not,
- * and immediately after we send the client a hot-plug notification as
- * a test to see if they will respond and query again.
- * That is also the reason why capabilities are reported to the host at
- * this place in the code rather than elsewhere.
- * We need to report the flags location before reporting the IRQ
- * capability.
- */
+
hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
HOST_FLAGS_OFFSET);
if (vbox_connector->vbox_crtc->crtc_id == 0)
vbox_report_caps(vbox);
- if (!vbox->initial_mode_queried) {
- if (vbox_connector->vbox_crtc->crtc_id == 0) {
- vbox->initial_mode_queried = true;
- vbox_report_hotplug(vbox);
- }
- return drm_add_modes_noedid(connector, 800, 600);
- }
+
num_modes = drm_add_modes_noedid(connector, 2560, 1600);
preferred_width = vbox_connector->mode_hint.width ?
vbox_connector->mode_hint.width : 1024;
diff --git a/drivers/staging/vboxvideo/vbox_prime.c b/drivers/gpu/drm/vboxvideo/vbox_prime.c
index d61985b0c6eb..702b1aa53494 100644
--- a/drivers/staging/vboxvideo/vbox_prime.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_prime.c
@@ -16,7 +16,7 @@
int vbox_gem_prime_pin(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
- return -ENOSYS;
+ return -ENODEV;
}
void vbox_gem_prime_unpin(struct drm_gem_object *obj)
@@ -27,7 +27,7 @@ void vbox_gem_prime_unpin(struct drm_gem_object *obj)
struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENODEV);
}
struct drm_gem_object *vbox_gem_prime_import_sg_table(
@@ -35,13 +35,13 @@ struct drm_gem_object *vbox_gem_prime_import_sg_table(
struct sg_table *table)
{
WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENODEV);
}
void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENODEV);
}
void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
@@ -52,5 +52,5 @@ void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
{
WARN_ONCE(1, "not implemented");
- return -ENOSYS;
+ return -ENODEV;
}
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index 30f270027acf..9d78438c2877 100644
--- a/drivers/staging/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -156,7 +156,7 @@ int vbox_mm_init(struct vbox_private *vbox)
ret = ttm_bo_device_init(&vbox->ttm.bdev,
&vbox_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET, true);
+ true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
return ret;
@@ -357,14 +357,8 @@ int vbox_bo_push_sysram(struct vbox_bo *bo)
int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct vbox_private *vbox;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- vbox = file_priv->minor->dev->dev_private;
+ struct drm_file *file_priv = filp->private_data;
+ struct vbox_private *vbox = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
}
diff --git a/drivers/staging/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index 0592004f71aa..0592004f71aa 100644
--- a/drivers/staging/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
diff --git a/drivers/staging/vboxvideo/vboxvideo_guest.h b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
index 55fcee3a6470..55fcee3a6470 100644
--- a/drivers/staging/vboxvideo/vboxvideo_guest.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
diff --git a/drivers/staging/vboxvideo/vboxvideo_vbe.h b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h
index 427235869297..427235869297 100644
--- a/drivers/staging/vboxvideo/vboxvideo_vbe.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h
diff --git a/drivers/staging/vboxvideo/vbva_base.c b/drivers/gpu/drm/vboxvideo/vbva_base.c
index 36bc9824ec3f..36bc9824ec3f 100644
--- a/drivers/staging/vboxvideo/vbva_base.c
+++ b/drivers/gpu/drm/vboxvideo/vbva_base.c
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 8dcce7182bb7..88ebd681d7eb 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -40,7 +40,7 @@ static bool is_user_label(int label)
return label >= VC4_BO_TYPE_COUNT;
}
-static void vc4_bo_stats_dump(struct vc4_dev *vc4)
+static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
{
int i;
@@ -48,58 +48,35 @@ static void vc4_bo_stats_dump(struct vc4_dev *vc4)
if (!vc4->bo_labels[i].num_allocated)
continue;
- DRM_INFO("%30s: %6dkb BOs (%d)\n",
- vc4->bo_labels[i].name,
- vc4->bo_labels[i].size_allocated / 1024,
- vc4->bo_labels[i].num_allocated);
+ drm_printf(p, "%30s: %6dkb BOs (%d)\n",
+ vc4->bo_labels[i].name,
+ vc4->bo_labels[i].size_allocated / 1024,
+ vc4->bo_labels[i].num_allocated);
}
mutex_lock(&vc4->purgeable.lock);
if (vc4->purgeable.num)
- DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
- vc4->purgeable.size / 1024, vc4->purgeable.num);
+ drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
+ vc4->purgeable.size / 1024, vc4->purgeable.num);
if (vc4->purgeable.purged_num)
- DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
- vc4->purgeable.purged_size / 1024,
- vc4->purgeable.purged_num);
+ drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
+ vc4->purgeable.purged_size / 1024,
+ vc4->purgeable.purged_num);
mutex_unlock(&vc4->purgeable.lock);
}
-#ifdef CONFIG_DEBUG_FS
-int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
+static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- mutex_lock(&vc4->bo_lock);
- for (i = 0; i < vc4->num_labels; i++) {
- if (!vc4->bo_labels[i].num_allocated)
- continue;
-
- seq_printf(m, "%30s: %6dkb BOs (%d)\n",
- vc4->bo_labels[i].name,
- vc4->bo_labels[i].size_allocated / 1024,
- vc4->bo_labels[i].num_allocated);
- }
- mutex_unlock(&vc4->bo_lock);
-
- mutex_lock(&vc4->purgeable.lock);
- if (vc4->purgeable.num)
- seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
- vc4->purgeable.size / 1024, vc4->purgeable.num);
+ struct drm_printer p = drm_seq_file_printer(m);
- if (vc4->purgeable.purged_num)
- seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
- vc4->purgeable.purged_size / 1024,
- vc4->purgeable.purged_num);
- mutex_unlock(&vc4->purgeable.lock);
+ vc4_bo_stats_print(&p, vc4);
return 0;
}
-#endif
/* Takes ownership of *name and returns the appropriate slot for it in
* the bo_labels[] array, extending it as necessary.
@@ -201,8 +178,6 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
- reservation_object_fini(&bo->_resv);
-
drm_gem_cma_free_object(obj);
}
@@ -427,8 +402,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
mutex_unlock(&vc4->bo_lock);
- bo->resv = &bo->_resv;
- reservation_object_init(bo->resv);
return &bo->base.base;
}
@@ -479,8 +452,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
}
if (IS_ERR(cma_obj)) {
+ struct drm_printer p = drm_info_printer(vc4->dev->dev);
DRM_ERROR("Failed to allocate from CMA:\n");
- vc4_bo_stats_dump(vc4);
+ vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
bo = to_vc4_bo(&cma_obj->base);
@@ -684,13 +658,6 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
schedule_work(&vc4->bo_cache.time_work);
}
-struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
-{
- struct vc4_bo *bo = to_vc4_bo(obj);
-
- return bo->resv;
-}
-
struct dma_buf *
vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
{
@@ -822,14 +789,12 @@ vc4_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
- struct vc4_bo *bo;
obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj))
return obj;
- bo = to_vc4_bo(obj);
- bo->resv = attach->dmabuf->resv;
+ obj->resv = attach->dmabuf->resv;
return obj;
}
@@ -1038,6 +1003,8 @@ int vc4_bo_cache_init(struct drm_device *dev)
mutex_init(&vc4->bo_lock);
+ vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
+
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 730008d3da76..5e09389e1514 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -35,6 +35,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
#include <drm/drm_fb_cma_helper.h>
@@ -67,67 +68,22 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
-#define CRTC_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} crtc_regs[] = {
- CRTC_REG(PV_CONTROL),
- CRTC_REG(PV_V_CONTROL),
- CRTC_REG(PV_VSYNCD_EVEN),
- CRTC_REG(PV_HORZA),
- CRTC_REG(PV_HORZB),
- CRTC_REG(PV_VERTA),
- CRTC_REG(PV_VERTB),
- CRTC_REG(PV_VERTA_EVEN),
- CRTC_REG(PV_VERTB_EVEN),
- CRTC_REG(PV_INTEN),
- CRTC_REG(PV_INTSTAT),
- CRTC_REG(PV_STAT),
- CRTC_REG(PV_HACT_ACT),
+static const struct debugfs_reg32 crtc_regs[] = {
+ VC4_REG32(PV_CONTROL),
+ VC4_REG32(PV_V_CONTROL),
+ VC4_REG32(PV_VSYNCD_EVEN),
+ VC4_REG32(PV_HORZA),
+ VC4_REG32(PV_HORZB),
+ VC4_REG32(PV_VERTA),
+ VC4_REG32(PV_VERTB),
+ VC4_REG32(PV_VERTA_EVEN),
+ VC4_REG32(PV_VERTB_EVEN),
+ VC4_REG32(PV_INTEN),
+ VC4_REG32(PV_INTSTAT),
+ VC4_REG32(PV_STAT),
+ VC4_REG32(PV_HACT_ACT),
};
-static void vc4_crtc_dump_regs(struct vc4_crtc *vc4_crtc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- crtc_regs[i].reg, crtc_regs[i].name,
- CRTC_READ(crtc_regs[i].reg));
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- int crtc_index = (uintptr_t)node->info_ent->data;
- struct drm_crtc *crtc;
- struct vc4_crtc *vc4_crtc;
- int i;
-
- i = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (i == crtc_index)
- break;
- i++;
- }
- if (!crtc)
- return 0;
- vc4_crtc = to_vc4_crtc(crtc);
-
- for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- crtc_regs[i].name, crtc_regs[i].reg,
- CRTC_READ(crtc_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
@@ -434,8 +390,10 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
bool debug_dump_regs = false;
if (debug_dump_regs) {
- DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
- vc4_crtc_dump_regs(vc4_crtc);
+ struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
+ dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n",
+ drm_crtc_index(crtc));
+ drm_print_regset32(&p, &vc4_crtc->regset);
}
if (vc4_crtc->channel == 2) {
@@ -476,8 +434,10 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
vc4_crtc_lut_load(crtc);
if (debug_dump_regs) {
- DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
- vc4_crtc_dump_regs(vc4_crtc);
+ struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
+ dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs after:\n",
+ drm_crtc_index(crtc));
+ drm_print_regset32(&p, &vc4_crtc->regset);
}
}
@@ -834,6 +794,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
+
+ /* Wait for the page flip to unmask the underrun to ensure that
+ * the display list was updated by the hardware. Before that
+ * happens, the HVS will be using the previous display list with
+ * the CRTC and encoder already reconfigured, leading to
+ * underruns. This can be seen when reconfiguring the CRTC.
+ */
+ vc4_hvs_unmask_underrun(dev, vc4_crtc->channel);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -1042,7 +1010,7 @@ static void
vc4_crtc_reset(struct drm_crtc *crtc)
{
if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ vc4_crtc_destroy_state(crtc, crtc->state);
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
if (crtc->state)
@@ -1075,6 +1043,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
static const struct vc4_crtc_data pv0_data = {
.hvs_channel = 0,
+ .debugfs_name = "crtc0_regs",
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
@@ -1083,6 +1052,7 @@ static const struct vc4_crtc_data pv0_data = {
static const struct vc4_crtc_data pv1_data = {
.hvs_channel = 2,
+ .debugfs_name = "crtc1_regs",
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
@@ -1091,6 +1061,7 @@ static const struct vc4_crtc_data pv1_data = {
static const struct vc4_crtc_data pv2_data = {
.hvs_channel = 1,
+ .debugfs_name = "crtc2_regs",
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
@@ -1169,11 +1140,16 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
if (!match)
return -ENODEV;
vc4_crtc->data = match->data;
+ vc4_crtc->pdev = pdev;
vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vc4_crtc->regs))
return PTR_ERR(vc4_crtc->regs);
+ vc4_crtc->regset.base = vc4_crtc->regs;
+ vc4_crtc->regset.regs = crtc_regs;
+ vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs);
+
/* For now, we create just the primary and the legacy cursor
* planes. We should be able to stack more planes on easily,
* but to do that we would need to compute the bandwidth
@@ -1247,6 +1223,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
platform_set_drvdata(pdev, vc4_crtc);
+ vc4_debugfs_add_regset32(drm, vc4_crtc->data->debugfs_name,
+ &vc4_crtc->regset);
+
return 0;
err_destroy_planes:
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 7a0003de71ab..f9dec08267dc 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -15,26 +15,82 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-static const struct drm_info_list vc4_debugfs_list[] = {
- {"bo_stats", vc4_bo_stats_debugfs, 0},
- {"dpi_regs", vc4_dpi_debugfs_regs, 0},
- {"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1},
- {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
- {"vec_regs", vc4_vec_debugfs_regs, 0},
- {"txp_regs", vc4_txp_debugfs_regs, 0},
- {"hvs_regs", vc4_hvs_debugfs_regs, 0},
- {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
- {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
- {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
- {"v3d_ident", vc4_v3d_debugfs_ident, 0},
- {"v3d_regs", vc4_v3d_debugfs_regs, 0},
+struct vc4_debugfs_info_entry {
+ struct list_head link;
+ struct drm_info_list info;
};
-#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
-
+/**
+ * Called at drm_dev_register() time on each of the minors registered
+ * by the DRM device, to attach the debugfs files.
+ */
int
vc4_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
+ struct vc4_debugfs_info_entry *entry;
+ struct dentry *dentry;
+
+ dentry = debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ &vc4->load_tracker_enabled);
+ if (!dentry)
+ return -ENOMEM;
+
+ list_for_each_entry(entry, &vc4->debugfs_list, link) {
+ int ret = drm_debugfs_create_files(&entry->info, 1,
+ minor->debugfs_root, minor);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct debugfs_regset32 *regset = node->info_ent->data;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_print_regset32(&p, regset);
+
+ return 0;
+}
+
+/**
+ * Registers a debugfs file with a callback function for a vc4 component.
+ *
+ * This is like drm_debugfs_create_files(), but that can only be
+ * called a given DRM minor, while the various VC4 components want to
+ * register their debugfs files during the component bind process. We
+ * track the request and delay it to be called on each minor during
+ * vc4_debugfs_init().
+ */
+void vc4_debugfs_add_file(struct drm_device *dev,
+ const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ struct vc4_debugfs_info_entry *entry =
+ devm_kzalloc(dev->dev, sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return;
+
+ entry->info.name = name;
+ entry->info.show = show;
+ entry->info.data = data;
+
+ list_add(&entry->link, &vc4->debugfs_list);
+}
+
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *name,
+ struct debugfs_regset32 *regset)
+{
+ vc4_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 169521e547ba..34f90ca8f479 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -101,6 +101,8 @@ struct vc4_dpi {
struct clk *pixel_clock;
struct clk *core_clock;
+
+ struct debugfs_regset32 regset;
};
#define DPI_READ(offset) readl(dpi->regs + (offset))
@@ -118,37 +120,11 @@ to_vc4_dpi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dpi_encoder, base.base);
}
-#define DPI_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} dpi_regs[] = {
- DPI_REG(DPI_C),
- DPI_REG(DPI_ID),
+static const struct debugfs_reg32 dpi_regs[] = {
+ VC4_REG32(DPI_C),
+ VC4_REG32(DPI_ID),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_dpi *dpi = vc4->dpi;
- int i;
-
- if (!dpi)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- dpi_regs[i].name, dpi_regs[i].reg,
- DPI_READ(dpi_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -314,6 +290,9 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
dpi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dpi->regs))
return PTR_ERR(dpi->regs);
+ dpi->regset.base = dpi->regs;
+ dpi->regset.regs = dpi_regs;
+ dpi->regset.nregs = ARRAY_SIZE(dpi_regs);
if (DPI_READ(DPI_ID) != DPI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
@@ -352,6 +331,8 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
vc4->dpi = dpi;
+ vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset);
+
return 0;
err_destroy_encoder:
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5fcd2f0da7f7..6d9be20a32be 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -72,30 +72,30 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
+ if (!vc4->v3d)
+ return -ENODEV;
+
switch (args->param) {
case DRM_VC4_PARAM_V3D_IDENT0:
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0)
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret)
return ret;
args->value = V3D_READ(V3D_IDENT0);
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ vc4_v3d_pm_put(vc4);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0)
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret)
return ret;
args->value = V3D_READ(V3D_IDENT1);
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ vc4_v3d_pm_put(vc4);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0)
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret)
return ret;
args->value = V3D_READ(V3D_IDENT2);
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ vc4_v3d_pm_put(vc4);
break;
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
case DRM_VC4_PARAM_SUPPORTS_ETC1:
@@ -200,7 +200,6 @@ static struct drm_driver vc4_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = vc4_prime_export,
- .gem_prime_res_obj = vc4_prime_res_obj,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = vc4_prime_import_sg_table,
.gem_prime_vmap = vc4_prime_vmap,
@@ -252,6 +251,7 @@ static int vc4_drm_bind(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm;
struct vc4_dev *vc4;
+ struct device_node *node;
int ret = 0;
dev->coherent_dma_mask = DMA_BIT_MASK(32);
@@ -260,12 +260,19 @@ static int vc4_drm_bind(struct device *dev)
if (!vc4)
return -ENOMEM;
+ /* If VC4 V3D is missing, don't advertise render nodes. */
+ node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
+ if (!node || !of_device_is_available(node))
+ vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
+ of_node_put(node);
+
drm = drm_dev_alloc(&vc4_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
platform_set_drvdata(pdev, drm);
vc4->dev = drm;
drm->dev_private = vc4;
+ INIT_LIST_HEAD(&vc4->debugfs_list);
ret = vc4_bo_cache_init(drm);
if (ret)
@@ -281,13 +288,15 @@ static int vc4_drm_bind(struct device *dev)
drm_fb_helper_remove_conflicting_framebuffers(NULL, "vc4drmfb", false);
- ret = drm_dev_register(drm, 0);
+ ret = vc4_kms_load(drm);
if (ret < 0)
goto unbind_all;
- vc4_kms_load(drm);
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto unbind_all;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_generic_setup(drm, 16);
return 0;
@@ -312,6 +321,7 @@ static void vc4_drm_unbind(struct device *dev)
drm_mode_config_cleanup(drm);
+ drm_atomic_private_obj_fini(&vc4->load_tracker);
drm_atomic_private_obj_fini(&vc4->ctm_manager);
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 2c635f001c71..4f13f6262491 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -7,7 +7,6 @@
*/
#include <linux/mm_types.h>
-#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_util.h>
#include <drm/drm_encoder.h>
@@ -185,10 +184,20 @@ struct vc4_dev {
/* Bitmask of the current bin_alloc used for overflow memory. */
uint32_t bin_alloc_overflow;
+ /* Incremented when an underrun error happened after an atomic commit.
+ * This is particularly useful to detect when a specific modeset is too
+ * demanding in term of memory or HVS bandwidth which is hard to guess
+ * at atomic check time.
+ */
+ atomic_t underrun;
+
struct work_struct overflow_mem_work;
int power_refcount;
+ /* Set to true when the load tracker is active. */
+ bool load_tracker_enabled;
+
/* Mutex controlling the power refcount. */
struct mutex power_lock;
@@ -201,6 +210,12 @@ struct vc4_dev {
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
+ struct drm_private_obj load_tracker;
+
+ /* List of vc4_debugfs_info_entry for adding to debugfs once
+ * the minor is available (after drm_dev_register()).
+ */
+ struct list_head debugfs_list;
};
static inline struct vc4_dev *
@@ -240,10 +255,6 @@ struct vc4_bo {
*/
struct vc4_validated_shader_info *validated_shader;
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
-
/* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
* for user-allocated labels.
*/
@@ -290,6 +301,7 @@ struct vc4_v3d {
struct platform_device *pdev;
void __iomem *regs;
struct clk *clk;
+ struct debugfs_regset32 regset;
};
struct vc4_hvs {
@@ -306,6 +318,7 @@ struct vc4_hvs {
spinlock_t mm_lock;
struct drm_mm_node mitchell_netravali_filter;
+ struct debugfs_regset32 regset;
};
struct vc4_plane {
@@ -376,6 +389,16 @@ struct vc4_plane_state {
* when async update is not possible.
*/
bool dlist_initialized;
+
+ /* Load of this plane on the HVS block. The load is expressed in HVS
+ * cycles/sec.
+ */
+ u64 hvs_load;
+
+ /* Memory bandwidth needed for this plane. This is expressed in
+ * bytes/sec.
+ */
+ u64 membus_load;
};
static inline struct vc4_plane_state *
@@ -411,10 +434,12 @@ struct vc4_crtc_data {
int hvs_channel;
enum vc4_encoder_type encoder_types[4];
+ const char *debugfs_name;
};
struct vc4_crtc {
struct drm_crtc base;
+ struct platform_device *pdev;
const struct vc4_crtc_data *data;
void __iomem *regs;
@@ -431,6 +456,8 @@ struct vc4_crtc {
u32 cob_size;
struct drm_pending_vblank_event *event;
+
+ struct debugfs_regset32 regset;
};
static inline struct vc4_crtc *
@@ -444,6 +471,8 @@ to_vc4_crtc(struct drm_crtc *crtc)
#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
+#define VC4_REG32(reg) { .name = #reg, .offset = reg }
+
struct vc4_exec_info {
/* Sequence number for this bin/render job. */
uint64_t seqno;
@@ -685,7 +714,6 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
vm_fault_t vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
-struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -693,7 +721,6 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
void *vc4_prime_vmap(struct drm_gem_object *obj);
int vc4_bo_cache_init(struct drm_device *dev);
void vc4_bo_cache_destroy(struct drm_device *dev);
-int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
@@ -701,7 +728,6 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
-int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
@@ -714,17 +740,37 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void vc4_debugfs_add_file(struct drm_device *drm,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data);
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset);
+#else
+static inline void vc4_debugfs_add_file(struct drm_device *drm,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{
+}
+
+static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset)
+{
+}
+#endif
/* vc4_drv.c */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
/* vc4_dpi.c */
extern struct platform_driver vc4_dpi_driver;
-int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_dsi.c */
extern struct platform_driver vc4_dsi_driver;
-int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_fence.c */
extern const struct dma_fence_ops vc4_fence_ops;
@@ -752,15 +798,12 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
/* vc4_hdmi.c */
extern struct platform_driver vc4_hdmi_driver;
-int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_vec.c */
extern struct platform_driver vc4_vec_driver;
-int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_txp.c */
extern struct platform_driver vc4_txp_driver;
-int vc4_txp_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_irq.c */
irqreturn_t vc4_irq(int irq, void *arg);
@@ -772,7 +815,8 @@ void vc4_irq_reset(struct drm_device *dev);
/* vc4_hvs.c */
extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_dump_state(struct drm_device *dev);
-int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
+void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
+void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
/* vc4_kms.c */
int vc4_kms_load(struct drm_device *dev);
@@ -787,9 +831,10 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
/* vc4_v3d.c */
extern struct platform_driver vc4_v3d_driver;
-int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
-int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
+extern const struct of_device_id vc4_v3d_dt_match[];
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
+int vc4_v3d_pm_get(struct vc4_dev *vc4);
+void vc4_v3d_pm_put(struct vc4_dev *vc4);
/* vc4_validate.c */
int
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 11702e1d9011..9412709067f5 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -545,6 +545,8 @@ struct vc4_dsi {
struct completion xfer_completion;
int xfer_result;
+
+ struct debugfs_regset32 regset;
};
#define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host)
@@ -605,113 +607,56 @@ to_vc4_dsi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dsi_encoder, base.base);
}
-#define DSI_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} dsi0_regs[] = {
- DSI_REG(DSI0_CTRL),
- DSI_REG(DSI0_STAT),
- DSI_REG(DSI0_HSTX_TO_CNT),
- DSI_REG(DSI0_LPRX_TO_CNT),
- DSI_REG(DSI0_TA_TO_CNT),
- DSI_REG(DSI0_PR_TO_CNT),
- DSI_REG(DSI0_DISP0_CTRL),
- DSI_REG(DSI0_DISP1_CTRL),
- DSI_REG(DSI0_INT_STAT),
- DSI_REG(DSI0_INT_EN),
- DSI_REG(DSI0_PHYC),
- DSI_REG(DSI0_HS_CLT0),
- DSI_REG(DSI0_HS_CLT1),
- DSI_REG(DSI0_HS_CLT2),
- DSI_REG(DSI0_HS_DLT3),
- DSI_REG(DSI0_HS_DLT4),
- DSI_REG(DSI0_HS_DLT5),
- DSI_REG(DSI0_HS_DLT6),
- DSI_REG(DSI0_HS_DLT7),
- DSI_REG(DSI0_PHY_AFEC0),
- DSI_REG(DSI0_PHY_AFEC1),
- DSI_REG(DSI0_ID),
+static const struct debugfs_reg32 dsi0_regs[] = {
+ VC4_REG32(DSI0_CTRL),
+ VC4_REG32(DSI0_STAT),
+ VC4_REG32(DSI0_HSTX_TO_CNT),
+ VC4_REG32(DSI0_LPRX_TO_CNT),
+ VC4_REG32(DSI0_TA_TO_CNT),
+ VC4_REG32(DSI0_PR_TO_CNT),
+ VC4_REG32(DSI0_DISP0_CTRL),
+ VC4_REG32(DSI0_DISP1_CTRL),
+ VC4_REG32(DSI0_INT_STAT),
+ VC4_REG32(DSI0_INT_EN),
+ VC4_REG32(DSI0_PHYC),
+ VC4_REG32(DSI0_HS_CLT0),
+ VC4_REG32(DSI0_HS_CLT1),
+ VC4_REG32(DSI0_HS_CLT2),
+ VC4_REG32(DSI0_HS_DLT3),
+ VC4_REG32(DSI0_HS_DLT4),
+ VC4_REG32(DSI0_HS_DLT5),
+ VC4_REG32(DSI0_HS_DLT6),
+ VC4_REG32(DSI0_HS_DLT7),
+ VC4_REG32(DSI0_PHY_AFEC0),
+ VC4_REG32(DSI0_PHY_AFEC1),
+ VC4_REG32(DSI0_ID),
};
-static const struct {
- u32 reg;
- const char *name;
-} dsi1_regs[] = {
- DSI_REG(DSI1_CTRL),
- DSI_REG(DSI1_STAT),
- DSI_REG(DSI1_HSTX_TO_CNT),
- DSI_REG(DSI1_LPRX_TO_CNT),
- DSI_REG(DSI1_TA_TO_CNT),
- DSI_REG(DSI1_PR_TO_CNT),
- DSI_REG(DSI1_DISP0_CTRL),
- DSI_REG(DSI1_DISP1_CTRL),
- DSI_REG(DSI1_INT_STAT),
- DSI_REG(DSI1_INT_EN),
- DSI_REG(DSI1_PHYC),
- DSI_REG(DSI1_HS_CLT0),
- DSI_REG(DSI1_HS_CLT1),
- DSI_REG(DSI1_HS_CLT2),
- DSI_REG(DSI1_HS_DLT3),
- DSI_REG(DSI1_HS_DLT4),
- DSI_REG(DSI1_HS_DLT5),
- DSI_REG(DSI1_HS_DLT6),
- DSI_REG(DSI1_HS_DLT7),
- DSI_REG(DSI1_PHY_AFEC0),
- DSI_REG(DSI1_PHY_AFEC1),
- DSI_REG(DSI1_ID),
+static const struct debugfs_reg32 dsi1_regs[] = {
+ VC4_REG32(DSI1_CTRL),
+ VC4_REG32(DSI1_STAT),
+ VC4_REG32(DSI1_HSTX_TO_CNT),
+ VC4_REG32(DSI1_LPRX_TO_CNT),
+ VC4_REG32(DSI1_TA_TO_CNT),
+ VC4_REG32(DSI1_PR_TO_CNT),
+ VC4_REG32(DSI1_DISP0_CTRL),
+ VC4_REG32(DSI1_DISP1_CTRL),
+ VC4_REG32(DSI1_INT_STAT),
+ VC4_REG32(DSI1_INT_EN),
+ VC4_REG32(DSI1_PHYC),
+ VC4_REG32(DSI1_HS_CLT0),
+ VC4_REG32(DSI1_HS_CLT1),
+ VC4_REG32(DSI1_HS_CLT2),
+ VC4_REG32(DSI1_HS_DLT3),
+ VC4_REG32(DSI1_HS_DLT4),
+ VC4_REG32(DSI1_HS_DLT5),
+ VC4_REG32(DSI1_HS_DLT6),
+ VC4_REG32(DSI1_HS_DLT7),
+ VC4_REG32(DSI1_PHY_AFEC0),
+ VC4_REG32(DSI1_PHY_AFEC1),
+ VC4_REG32(DSI1_ID),
};
-static void vc4_dsi_dump_regs(struct vc4_dsi *dsi)
-{
- int i;
-
- if (dsi->port == 0) {
- for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- dsi0_regs[i].reg, dsi0_regs[i].name,
- DSI_READ(dsi0_regs[i].reg));
- }
- } else {
- for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- dsi1_regs[i].reg, dsi1_regs[i].name,
- DSI_READ(dsi1_regs[i].reg));
- }
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- int dsi_index = (uintptr_t)node->info_ent->data;
- struct vc4_dsi *dsi = (dsi_index == 1 ? vc4->dsi1 : NULL);
- int i;
-
- if (!dsi)
- return 0;
-
- if (dsi->port == 0) {
- for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
- seq_printf(m, "0x%04x (%s): 0x%08x\n",
- dsi0_regs[i].reg, dsi0_regs[i].name,
- DSI_READ(dsi0_regs[i].reg));
- }
- } else {
- for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
- seq_printf(m, "0x%04x (%s): 0x%08x\n",
- dsi1_regs[i].reg, dsi1_regs[i].name,
- DSI_READ(dsi1_regs[i].reg));
- }
- }
-
- return 0;
-}
-#endif
-
static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -900,8 +845,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
}
if (debug_dump_regs) {
- DRM_INFO("DSI regs before:\n");
- vc4_dsi_dump_regs(dsi);
+ struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
+ dev_info(&dsi->pdev->dev, "DSI regs before:\n");
+ drm_print_regset32(&p, &dsi->regset);
}
/* Round up the clk_set_rate() request slightly, since
@@ -1135,8 +1081,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
drm_bridge_enable(dsi->bridge);
if (debug_dump_regs) {
- DRM_INFO("DSI regs after:\n");
- vc4_dsi_dump_regs(dsi);
+ struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
+ dev_info(&dsi->pdev->dev, "DSI regs after:\n");
+ drm_print_regset32(&p, &dsi->regset);
}
}
@@ -1527,6 +1474,15 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
+ dsi->regset.base = dsi->regs;
+ if (dsi->port == 0) {
+ dsi->regset.regs = dsi0_regs;
+ dsi->regset.nregs = ARRAY_SIZE(dsi0_regs);
+ } else {
+ dsi->regset.regs = dsi1_regs;
+ dsi->regset.nregs = ARRAY_SIZE(dsi1_regs);
+ }
+
if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
DSI_PORT_READ(ID), DSI_ID_VALUE);
@@ -1662,6 +1618,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
*/
dsi->encoder->bridge = NULL;
+ if (dsi->port == 0)
+ vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
+ else
+ vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset);
+
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index aea2b8dfec17..d9311be32a4f 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -74,6 +74,11 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
u32 i;
int ret = 0;
+ if (!vc4->v3d) {
+ DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
spin_lock_irqsave(&vc4->job_lock, irqflags);
kernel_state = vc4->hang_state;
if (!kernel_state) {
@@ -536,7 +541,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->bo[i]->base);
bo->seqno = seqno;
- reservation_object_add_shared_fence(bo->resv, exec->fence);
+ reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
}
list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -547,7 +552,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
bo->write_seqno = seqno;
- reservation_object_add_excl_fence(bo->resv, exec->fence);
+ reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
}
}
@@ -559,7 +564,7 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
int i;
for (i = 0; i < exec->bo_count; i++) {
- struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
+ struct drm_gem_object *bo = &exec->bo[i]->base;
ww_mutex_unlock(&bo->resv->lock);
}
@@ -581,13 +586,13 @@ vc4_lock_bo_reservations(struct drm_device *dev,
{
int contended_lock = -1;
int i, ret;
- struct vc4_bo *bo;
+ struct drm_gem_object *bo;
ww_acquire_init(acquire_ctx, &reservation_ww_class);
retry:
if (contended_lock != -1) {
- bo = to_vc4_bo(&exec->bo[contended_lock]->base);
+ bo = &exec->bo[contended_lock]->base;
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
acquire_ctx);
if (ret) {
@@ -600,19 +605,19 @@ retry:
if (i == contended_lock)
continue;
- bo = to_vc4_bo(&exec->bo[i]->base);
+ bo = &exec->bo[i]->base;
ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++) {
- bo = to_vc4_bo(&exec->bo[j]->base);
+ bo = &exec->bo[j]->base;
ww_mutex_unlock(&bo->resv->lock);
}
if (contended_lock != -1 && contended_lock >= i) {
- bo = to_vc4_bo(&exec->bo[contended_lock]->base);
+ bo = &exec->bo[contended_lock]->base;
ww_mutex_unlock(&bo->resv->lock);
}
@@ -633,7 +638,7 @@ retry:
* before we commit the CL to the hardware.
*/
for (i = 0; i < exec->bo_count; i++) {
- bo = to_vc4_bo(&exec->bo[i]->base);
+ bo = &exec->bo[i]->base;
ret = reservation_object_reserve_shared(bo->resv, 1);
if (ret) {
@@ -964,12 +969,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
/* Release the reference we had on the perf monitor. */
vc4_perfmon_put(exec->perfmon);
- mutex_lock(&vc4->power_lock);
- if (--vc4->power_refcount == 0) {
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
- }
- mutex_unlock(&vc4->power_lock);
+ vc4_v3d_pm_put(vc4);
kfree(exec);
}
@@ -1124,6 +1124,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct dma_fence *in_fence;
int ret = 0;
+ if (!vc4->v3d) {
+ DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
VC4_SUBMIT_CL_FIXED_RCL_ORDER |
VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
@@ -1143,17 +1148,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
}
- mutex_lock(&vc4->power_lock);
- if (vc4->power_refcount++ == 0) {
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0) {
- mutex_unlock(&vc4->power_lock);
- vc4->power_refcount--;
- kfree(exec);
- return ret;
- }
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret) {
+ kfree(exec);
+ return ret;
}
- mutex_unlock(&vc4->power_lock);
exec->args = args;
INIT_LIST_HEAD(&exec->unref_list);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 88fd5df7e7dc..99fc8569e0f5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -97,6 +97,9 @@ struct vc4_hdmi {
struct clk *pixel_clock;
struct clk *hsm_clock;
+
+ struct debugfs_regset32 hdmi_regset;
+ struct debugfs_regset32 hd_regset;
};
#define HDMI_READ(offset) readl(vc4->hdmi->hdmicore_regs + offset)
@@ -134,103 +137,69 @@ to_vc4_hdmi_connector(struct drm_connector *connector)
return container_of(connector, struct vc4_hdmi_connector, base);
}
-#define HDMI_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} hdmi_regs[] = {
- HDMI_REG(VC4_HDMI_CORE_REV),
- HDMI_REG(VC4_HDMI_SW_RESET_CONTROL),
- HDMI_REG(VC4_HDMI_HOTPLUG_INT),
- HDMI_REG(VC4_HDMI_HOTPLUG),
- HDMI_REG(VC4_HDMI_MAI_CHANNEL_MAP),
- HDMI_REG(VC4_HDMI_MAI_CONFIG),
- HDMI_REG(VC4_HDMI_MAI_FORMAT),
- HDMI_REG(VC4_HDMI_AUDIO_PACKET_CONFIG),
- HDMI_REG(VC4_HDMI_RAM_PACKET_CONFIG),
- HDMI_REG(VC4_HDMI_HORZA),
- HDMI_REG(VC4_HDMI_HORZB),
- HDMI_REG(VC4_HDMI_FIFO_CTL),
- HDMI_REG(VC4_HDMI_SCHEDULER_CONTROL),
- HDMI_REG(VC4_HDMI_VERTA0),
- HDMI_REG(VC4_HDMI_VERTA1),
- HDMI_REG(VC4_HDMI_VERTB0),
- HDMI_REG(VC4_HDMI_VERTB1),
- HDMI_REG(VC4_HDMI_TX_PHY_RESET_CTL),
- HDMI_REG(VC4_HDMI_TX_PHY_CTL0),
-
- HDMI_REG(VC4_HDMI_CEC_CNTRL_1),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_2),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_3),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_4),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_5),
- HDMI_REG(VC4_HDMI_CPU_STATUS),
- HDMI_REG(VC4_HDMI_CPU_MASK_STATUS),
-
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_1),
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_2),
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_3),
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_4),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_1),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_2),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_3),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_4),
+static const struct debugfs_reg32 hdmi_regs[] = {
+ VC4_REG32(VC4_HDMI_CORE_REV),
+ VC4_REG32(VC4_HDMI_SW_RESET_CONTROL),
+ VC4_REG32(VC4_HDMI_HOTPLUG_INT),
+ VC4_REG32(VC4_HDMI_HOTPLUG),
+ VC4_REG32(VC4_HDMI_MAI_CHANNEL_MAP),
+ VC4_REG32(VC4_HDMI_MAI_CONFIG),
+ VC4_REG32(VC4_HDMI_MAI_FORMAT),
+ VC4_REG32(VC4_HDMI_AUDIO_PACKET_CONFIG),
+ VC4_REG32(VC4_HDMI_RAM_PACKET_CONFIG),
+ VC4_REG32(VC4_HDMI_HORZA),
+ VC4_REG32(VC4_HDMI_HORZB),
+ VC4_REG32(VC4_HDMI_FIFO_CTL),
+ VC4_REG32(VC4_HDMI_SCHEDULER_CONTROL),
+ VC4_REG32(VC4_HDMI_VERTA0),
+ VC4_REG32(VC4_HDMI_VERTA1),
+ VC4_REG32(VC4_HDMI_VERTB0),
+ VC4_REG32(VC4_HDMI_VERTB1),
+ VC4_REG32(VC4_HDMI_TX_PHY_RESET_CTL),
+ VC4_REG32(VC4_HDMI_TX_PHY_CTL0),
+
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_1),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_2),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_3),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_4),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_5),
+ VC4_REG32(VC4_HDMI_CPU_STATUS),
+ VC4_REG32(VC4_HDMI_CPU_MASK_STATUS),
+
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_1),
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_2),
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_3),
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_4),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_1),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_2),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_3),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_4),
};
-static const struct {
- u32 reg;
- const char *name;
-} hd_regs[] = {
- HDMI_REG(VC4_HD_M_CTL),
- HDMI_REG(VC4_HD_MAI_CTL),
- HDMI_REG(VC4_HD_MAI_THR),
- HDMI_REG(VC4_HD_MAI_FMT),
- HDMI_REG(VC4_HD_MAI_SMP),
- HDMI_REG(VC4_HD_VID_CTL),
- HDMI_REG(VC4_HD_CSC_CTL),
- HDMI_REG(VC4_HD_FRAME_COUNT),
+static const struct debugfs_reg32 hd_regs[] = {
+ VC4_REG32(VC4_HD_M_CTL),
+ VC4_REG32(VC4_HD_MAI_CTL),
+ VC4_REG32(VC4_HD_MAI_THR),
+ VC4_REG32(VC4_HD_MAI_FMT),
+ VC4_REG32(VC4_HD_MAI_SMP),
+ VC4_REG32(VC4_HD_VID_CTL),
+ VC4_REG32(VC4_HD_CSC_CTL),
+ VC4_REG32(VC4_HD_FRAME_COUNT),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
+static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- hdmi_regs[i].name, hdmi_regs[i].reg,
- HDMI_READ(hdmi_regs[i].reg));
- }
+ struct vc4_hdmi *hdmi = vc4->hdmi;
+ struct drm_printer p = drm_seq_file_printer(m);
- for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- hd_regs[i].name, hd_regs[i].reg,
- HD_READ(hd_regs[i].reg));
- }
+ drm_print_regset32(&p, &hdmi->hdmi_regset);
+ drm_print_regset32(&p, &hdmi->hd_regset);
return 0;
}
-#endif /* CONFIG_DEBUG_FS */
-
-static void vc4_hdmi_dump_regs(struct drm_device *dev)
-{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- hdmi_regs[i].reg, hdmi_regs[i].name,
- HDMI_READ(hdmi_regs[i].reg));
- }
- for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- hd_regs[i].reg, hd_regs[i].name,
- HD_READ(hd_regs[i].reg));
- }
-}
static enum drm_connector_status
vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
@@ -561,8 +530,11 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0);
if (debug_dump_regs) {
- DRM_INFO("HDMI regs before:\n");
- vc4_hdmi_dump_regs(dev);
+ struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
+
+ dev_info(&hdmi->pdev->dev, "HDMI regs before:\n");
+ drm_print_regset32(&p, &hdmi->hdmi_regset);
+ drm_print_regset32(&p, &hdmi->hd_regset);
}
HD_WRITE(VC4_HD_VID_CTL, 0);
@@ -637,8 +609,11 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
if (debug_dump_regs) {
- DRM_INFO("HDMI regs after:\n");
- vc4_hdmi_dump_regs(dev);
+ struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
+
+ dev_info(&hdmi->pdev->dev, "HDMI regs after:\n");
+ drm_print_regset32(&p, &hdmi->hdmi_regset);
+ drm_print_regset32(&p, &hdmi->hd_regset);
}
HD_WRITE(VC4_HD_VID_CTL,
@@ -1333,6 +1308,13 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(hdmi->hd_regs))
return PTR_ERR(hdmi->hd_regs);
+ hdmi->hdmi_regset.base = hdmi->hdmicore_regs;
+ hdmi->hdmi_regset.regs = hdmi_regs;
+ hdmi->hdmi_regset.nregs = ARRAY_SIZE(hdmi_regs);
+ hdmi->hd_regset.base = hdmi->hd_regs;
+ hdmi->hd_regset.regs = hd_regs;
+ hdmi->hd_regset.nregs = ARRAY_SIZE(hd_regs);
+
hdmi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(hdmi->pixel_clock)) {
DRM_ERROR("Failed to get pixel clock\n");
@@ -1448,6 +1430,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_destroy_encoder;
+ vc4_debugfs_add_file(drm, "hdmi_regs", vc4_hdmi_debugfs_regs, hdmi);
+
return 0;
#ifdef CONFIG_DRM_VC4_HDMI_CEC
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 5d8c749c9749..f746e9a7a88c 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -22,58 +22,52 @@
* each CRTC.
*/
+#include <drm/drm_atomic_helper.h>
#include <linux/component.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
-#define HVS_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} hvs_regs[] = {
- HVS_REG(SCALER_DISPCTRL),
- HVS_REG(SCALER_DISPSTAT),
- HVS_REG(SCALER_DISPID),
- HVS_REG(SCALER_DISPECTRL),
- HVS_REG(SCALER_DISPPROF),
- HVS_REG(SCALER_DISPDITHER),
- HVS_REG(SCALER_DISPEOLN),
- HVS_REG(SCALER_DISPLIST0),
- HVS_REG(SCALER_DISPLIST1),
- HVS_REG(SCALER_DISPLIST2),
- HVS_REG(SCALER_DISPLSTAT),
- HVS_REG(SCALER_DISPLACT0),
- HVS_REG(SCALER_DISPLACT1),
- HVS_REG(SCALER_DISPLACT2),
- HVS_REG(SCALER_DISPCTRL0),
- HVS_REG(SCALER_DISPBKGND0),
- HVS_REG(SCALER_DISPSTAT0),
- HVS_REG(SCALER_DISPBASE0),
- HVS_REG(SCALER_DISPCTRL1),
- HVS_REG(SCALER_DISPBKGND1),
- HVS_REG(SCALER_DISPSTAT1),
- HVS_REG(SCALER_DISPBASE1),
- HVS_REG(SCALER_DISPCTRL2),
- HVS_REG(SCALER_DISPBKGND2),
- HVS_REG(SCALER_DISPSTAT2),
- HVS_REG(SCALER_DISPBASE2),
- HVS_REG(SCALER_DISPALPHA2),
- HVS_REG(SCALER_OLEDOFFS),
- HVS_REG(SCALER_OLEDCOEF0),
- HVS_REG(SCALER_OLEDCOEF1),
- HVS_REG(SCALER_OLEDCOEF2),
+static const struct debugfs_reg32 hvs_regs[] = {
+ VC4_REG32(SCALER_DISPCTRL),
+ VC4_REG32(SCALER_DISPSTAT),
+ VC4_REG32(SCALER_DISPID),
+ VC4_REG32(SCALER_DISPECTRL),
+ VC4_REG32(SCALER_DISPPROF),
+ VC4_REG32(SCALER_DISPDITHER),
+ VC4_REG32(SCALER_DISPEOLN),
+ VC4_REG32(SCALER_DISPLIST0),
+ VC4_REG32(SCALER_DISPLIST1),
+ VC4_REG32(SCALER_DISPLIST2),
+ VC4_REG32(SCALER_DISPLSTAT),
+ VC4_REG32(SCALER_DISPLACT0),
+ VC4_REG32(SCALER_DISPLACT1),
+ VC4_REG32(SCALER_DISPLACT2),
+ VC4_REG32(SCALER_DISPCTRL0),
+ VC4_REG32(SCALER_DISPBKGND0),
+ VC4_REG32(SCALER_DISPSTAT0),
+ VC4_REG32(SCALER_DISPBASE0),
+ VC4_REG32(SCALER_DISPCTRL1),
+ VC4_REG32(SCALER_DISPBKGND1),
+ VC4_REG32(SCALER_DISPSTAT1),
+ VC4_REG32(SCALER_DISPBASE1),
+ VC4_REG32(SCALER_DISPCTRL2),
+ VC4_REG32(SCALER_DISPBKGND2),
+ VC4_REG32(SCALER_DISPSTAT2),
+ VC4_REG32(SCALER_DISPBASE2),
+ VC4_REG32(SCALER_DISPALPHA2),
+ VC4_REG32(SCALER_OLEDOFFS),
+ VC4_REG32(SCALER_OLEDCOEF0),
+ VC4_REG32(SCALER_OLEDCOEF1),
+ VC4_REG32(SCALER_OLEDCOEF2),
};
void vc4_hvs_dump_state(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_printer p = drm_info_printer(&vc4->hvs->pdev->dev);
int i;
- for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- hvs_regs[i].reg, hvs_regs[i].name,
- HVS_READ(hvs_regs[i].reg));
- }
+ drm_print_regset32(&p, &vc4->hvs->regset);
DRM_INFO("HVS ctx:\n");
for (i = 0; i < 64; i += 4) {
@@ -86,23 +80,17 @@ void vc4_hvs_dump_state(struct drm_device *dev)
}
}
-#ifdef CONFIG_DEBUG_FS
-int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused)
+static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
+ struct drm_printer p = drm_seq_file_printer(m);
- for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- hvs_regs[i].name, hvs_regs[i].reg,
- HVS_READ(hvs_regs[i].reg));
- }
+ drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
return 0;
}
-#endif
/* The filter kernel is composed of dwords each containing 3 9-bit
* signed integers packed next to each other.
@@ -166,6 +154,67 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
return 0;
}
+void vc4_hvs_mask_underrun(struct drm_device *dev, int channel)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+
+ dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
+
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+}
+
+void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+
+ dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
+
+ HVS_WRITE(SCALER_DISPSTAT,
+ SCALER_DISPSTAT_EUFLOW(channel));
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+}
+
+static void vc4_hvs_report_underrun(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ atomic_inc(&vc4->underrun);
+ DRM_DEV_ERROR(dev->dev, "HVS underrun\n");
+}
+
+static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
+{
+ struct drm_device *dev = data;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ irqreturn_t irqret = IRQ_NONE;
+ int channel;
+ u32 control;
+ u32 status;
+
+ status = HVS_READ(SCALER_DISPSTAT);
+ control = HVS_READ(SCALER_DISPCTRL);
+
+ for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
+ /* Interrupt masking is not always honored, so check it here. */
+ if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
+ control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
+ vc4_hvs_mask_underrun(dev, channel);
+ vc4_hvs_report_underrun(dev);
+
+ irqret = IRQ_HANDLED;
+ }
+ }
+
+ /* Clear every per-channel interrupt flag. */
+ HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) |
+ SCALER_DISPSTAT_IRQMASK(1) |
+ SCALER_DISPSTAT_IRQMASK(2));
+
+ return irqret;
+}
+
static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -185,6 +234,10 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(hvs->regs))
return PTR_ERR(hvs->regs);
+ hvs->regset.base = hvs->regs;
+ hvs->regset.regs = hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
+
hvs->dlist = hvs->regs + SCALER_DLIST_START;
spin_lock_init(&hvs->mm_lock);
@@ -219,15 +272,40 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_ENABLE;
+ dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
+ SCALER_DISPCTRL_DISPEIRQ(1) |
+ SCALER_DISPCTRL_DISPEIRQ(2);
/* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
* be unused.
*/
dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+ SCALER_DISPCTRL_SLVWREIRQ |
+ SCALER_DISPCTRL_SLVRDEIRQ |
+ SCALER_DISPCTRL_DSPEIEOF(0) |
+ SCALER_DISPCTRL_DSPEIEOF(1) |
+ SCALER_DISPCTRL_DSPEIEOF(2) |
+ SCALER_DISPCTRL_DSPEIEOLN(0) |
+ SCALER_DISPCTRL_DSPEIEOLN(1) |
+ SCALER_DISPCTRL_DSPEIEOLN(2) |
+ SCALER_DISPCTRL_DSPEISLUR(0) |
+ SCALER_DISPCTRL_DSPEISLUR(1) |
+ SCALER_DISPCTRL_DSPEISLUR(2) |
+ SCALER_DISPCTRL_SCLEIRQ);
dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
+ if (ret)
+ return ret;
+
+ vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
+ vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
+ NULL);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 4cd2ccfe15f4..ffd0a4388752 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -229,6 +229,9 @@ vc4_irq_preinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (!vc4->v3d)
+ return;
+
init_waitqueue_head(&vc4->job_wait_queue);
INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
@@ -243,6 +246,9 @@ vc4_irq_postinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (!vc4->v3d)
+ return 0;
+
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
@@ -254,6 +260,9 @@ vc4_irq_uninstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (!vc4->v3d)
+ return;
+
/* Disable sending interrupts for our driver's IRQs. */
V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 91b8c72ff361..295dacc8bcb9 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -34,6 +34,18 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
return container_of(priv, struct vc4_ctm_state, base);
}
+struct vc4_load_tracker_state {
+ struct drm_private_state base;
+ u64 hvs_load;
+ u64 membus_load;
+};
+
+static struct vc4_load_tracker_state *
+to_vc4_load_tracker_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct vc4_load_tracker_state, base);
+}
+
static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
struct drm_private_obj *manager)
{
@@ -138,6 +150,16 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc;
+ int i;
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
+ continue;
+
+ vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
+ vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
+ }
drm_atomic_helper_wait_for_fences(dev, state, false);
@@ -385,6 +407,85 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
return 0;
}
+static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct vc4_load_tracker_state *load_state;
+ struct drm_private_state *priv_state;
+ struct drm_plane *plane;
+ int i;
+
+ priv_state = drm_atomic_get_private_obj_state(state,
+ &vc4->load_tracker);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ load_state = to_vc4_load_tracker_state(priv_state);
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ struct vc4_plane_state *vc4_plane_state;
+
+ if (old_plane_state->fb && old_plane_state->crtc) {
+ vc4_plane_state = to_vc4_plane_state(old_plane_state);
+ load_state->membus_load -= vc4_plane_state->membus_load;
+ load_state->hvs_load -= vc4_plane_state->hvs_load;
+ }
+
+ if (new_plane_state->fb && new_plane_state->crtc) {
+ vc4_plane_state = to_vc4_plane_state(new_plane_state);
+ load_state->membus_load += vc4_plane_state->membus_load;
+ load_state->hvs_load += vc4_plane_state->hvs_load;
+ }
+ }
+
+ /* Don't check the load when the tracker is disabled. */
+ if (!vc4->load_tracker_enabled)
+ return 0;
+
+ /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
+ * the system work when other blocks are accessing the memory.
+ */
+ if (load_state->membus_load > SZ_1G + SZ_512M)
+ return -ENOSPC;
+
+ /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
+ * consider the maximum number of cycles is 240M.
+ */
+ if (load_state->hvs_load > 240000000ULL)
+ return -ENOSPC;
+
+ return 0;
+}
+
+static struct drm_private_state *
+vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
+{
+ struct vc4_load_tracker_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct vc4_load_tracker_state *load_state;
+
+ load_state = to_vc4_load_tracker_state(state);
+ kfree(load_state);
+}
+
+static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
+ .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
+ .atomic_destroy_state = vc4_load_tracker_destroy_state,
+};
+
static int
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
@@ -394,7 +495,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
if (ret < 0)
return ret;
- return drm_atomic_helper_check(dev, state);
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ return vc4_load_tracker_atomic_check(state);
}
static const struct drm_mode_config_funcs vc4_mode_funcs = {
@@ -407,13 +512,20 @@ int vc4_kms_load(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_ctm_state *ctm_state;
+ struct vc4_load_tracker_state *load_state;
int ret;
+ /* Start with the load tracker enabled. Can be disabled through the
+ * debugfs load_tracker file.
+ */
+ vc4->load_tracker_enabled = true;
+
sema_init(&vc4->async_modeset, 1);
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev->vblank_disable_immediate = true;
+ dev->irq_enabled = true;
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
@@ -436,6 +548,15 @@ int vc4_kms_load(struct drm_device *dev)
drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
&vc4_ctm_state_funcs);
+ load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
+ if (!load_state) {
+ drm_atomic_private_obj_fini(&vc4->ctm_manager);
+ return -ENOMEM;
+ }
+
+ drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
+ &vc4_load_tracker_state_funcs);
+
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index 495150415020..f4aa75efd16b 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -100,12 +100,18 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_create *req = data;
struct vc4_perfmon *perfmon;
unsigned int i;
int ret;
+ if (!vc4->v3d) {
+ DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
/* Number of monitored counters cannot exceed HW limits. */
if (req->ncounters > DRM_VC4_MAX_PERF_COUNTERS ||
!req->ncounters)
@@ -146,10 +152,16 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_destroy *req = data;
struct vc4_perfmon *perfmon;
+ if (!vc4->v3d) {
+ DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_remove(&vc4file->perfmon.idr, req->id);
mutex_unlock(&vc4file->perfmon.lock);
@@ -164,11 +176,17 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_get_values *req = data;
struct vc4_perfmon *perfmon;
int ret;
+ if (!vc4->v3d) {
+ DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_find(&vc4file->perfmon.idr, req->id);
vc4_perfmon_get(perfmon);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index d098337c10e9..4d918d3e4858 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -488,6 +488,61 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state,
}
}
+static void vc4_plane_calc_load(struct drm_plane_state *state)
+{
+ unsigned int hvs_load_shift, vrefresh, i;
+ struct drm_framebuffer *fb = state->fb;
+ struct vc4_plane_state *vc4_state;
+ struct drm_crtc_state *crtc_state;
+ unsigned int vscale_factor;
+
+ vc4_state = to_vc4_plane_state(state);
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
+
+ /* The HVS is able to process 2 pixels/cycle when scaling the source,
+ * 4 pixels/cycle otherwise.
+ * Alpha blending step seems to be pipelined and it's always operating
+ * at 4 pixels/cycle, so the limiting aspect here seems to be the
+ * scaler block.
+ * HVS load is expressed in clk-cycles/sec (AKA Hz).
+ */
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE)
+ hvs_load_shift = 1;
+ else
+ hvs_load_shift = 2;
+
+ vc4_state->membus_load = 0;
+ vc4_state->hvs_load = 0;
+ for (i = 0; i < fb->format->num_planes; i++) {
+ /* Even if the bandwidth/plane required for a single frame is
+ *
+ * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh
+ *
+ * when downscaling, we have to read more pixels per line in
+ * the time frame reserved for a single line, so the bandwidth
+ * demand can be punctually higher. To account for that, we
+ * calculate the down-scaling factor and multiply the plane
+ * load by this number. We're likely over-estimating the read
+ * demand, but that's better than under-estimating it.
+ */
+ vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i],
+ vc4_state->crtc_h);
+ vc4_state->membus_load += vc4_state->src_w[i] *
+ vc4_state->src_h[i] * vscale_factor *
+ fb->format->cpp[i];
+ vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w;
+ }
+
+ vc4_state->hvs_load *= vrefresh;
+ vc4_state->hvs_load >>= hvs_load_shift;
+ vc4_state->membus_load *= vrefresh;
+}
+
static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
@@ -875,6 +930,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
*/
vc4_state->dlist_initialized = 1;
+ vc4_plane_calc_load(state);
+
return 0;
}
@@ -1082,7 +1139,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
- fence = reservation_object_get_excl_rcu(bo->resv);
+ fence = reservation_object_get_excl_rcu(bo->base.base.resv);
drm_atomic_set_fence_for_plane(state, fence);
if (plane->state->fb == state->fb)
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 931088014272..c0c5fadaf7e3 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -212,11 +212,11 @@
#define PV_HACT_ACT 0x30
+#define SCALER_CHANNELS_COUNT 3
+
#define SCALER_DISPCTRL 0x00000000
/* Global register for clock gating the HVS */
# define SCALER_DISPCTRL_ENABLE BIT(31)
-# define SCALER_DISPCTRL_DSP2EISLUR BIT(15)
-# define SCALER_DISPCTRL_DSP1EISLUR BIT(14)
# define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18)
# define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18
@@ -224,45 +224,25 @@
* SCALER_DISPSTAT_IRQDISP0. Note that short frame contributions are
* always enabled.
*/
-# define SCALER_DISPCTRL_DSP0EISLUR BIT(13)
-# define SCALER_DISPCTRL_DSP2EIEOLN BIT(12)
-# define SCALER_DISPCTRL_DSP2EIEOF BIT(11)
-# define SCALER_DISPCTRL_DSP1EIEOLN BIT(10)
-# define SCALER_DISPCTRL_DSP1EIEOF BIT(9)
+# define SCALER_DISPCTRL_DSPEISLUR(x) BIT(13 + (x))
/* Enables Display 0 end-of-line-N contribution to
* SCALER_DISPSTAT_IRQDISP0
*/
-# define SCALER_DISPCTRL_DSP0EIEOLN BIT(8)
+# define SCALER_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 2))
/* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */
-# define SCALER_DISPCTRL_DSP0EIEOF BIT(7)
+# define SCALER_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 2))
# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6)
# define SCALER_DISPCTRL_SLVWREIRQ BIT(5)
# define SCALER_DISPCTRL_DMAEIRQ BIT(4)
-# define SCALER_DISPCTRL_DISP2EIRQ BIT(3)
-# define SCALER_DISPCTRL_DISP1EIRQ BIT(2)
/* Enables interrupt generation on the enabled EOF/EOLN/EISLUR
* bits and short frames..
*/
-# define SCALER_DISPCTRL_DISP0EIRQ BIT(1)
+# define SCALER_DISPCTRL_DISPEIRQ(x) BIT(1 + (x))
/* Enables interrupt generation on scaler profiler interrupt. */
# define SCALER_DISPCTRL_SCLEIRQ BIT(0)
#define SCALER_DISPSTAT 0x00000004
-# define SCALER_DISPSTAT_COBLOW2 BIT(29)
-# define SCALER_DISPSTAT_EOLN2 BIT(28)
-# define SCALER_DISPSTAT_ESFRAME2 BIT(27)
-# define SCALER_DISPSTAT_ESLINE2 BIT(26)
-# define SCALER_DISPSTAT_EUFLOW2 BIT(25)
-# define SCALER_DISPSTAT_EOF2 BIT(24)
-
-# define SCALER_DISPSTAT_COBLOW1 BIT(21)
-# define SCALER_DISPSTAT_EOLN1 BIT(20)
-# define SCALER_DISPSTAT_ESFRAME1 BIT(19)
-# define SCALER_DISPSTAT_ESLINE1 BIT(18)
-# define SCALER_DISPSTAT_EUFLOW1 BIT(17)
-# define SCALER_DISPSTAT_EOF1 BIT(16)
-
# define SCALER_DISPSTAT_RESP_MASK VC4_MASK(15, 14)
# define SCALER_DISPSTAT_RESP_SHIFT 14
# define SCALER_DISPSTAT_RESP_OKAY 0
@@ -270,23 +250,26 @@
# define SCALER_DISPSTAT_RESP_SLVERR 2
# define SCALER_DISPSTAT_RESP_DECERR 3
-# define SCALER_DISPSTAT_COBLOW0 BIT(13)
+# define SCALER_DISPSTAT_COBLOW(x) BIT(13 + ((x) * 8))
/* Set when the DISPEOLN line is done compositing. */
-# define SCALER_DISPSTAT_EOLN0 BIT(12)
+# define SCALER_DISPSTAT_EOLN(x) BIT(12 + ((x) * 8))
/* Set when VSTART is seen but there are still pixels in the current
* output line.
*/
-# define SCALER_DISPSTAT_ESFRAME0 BIT(11)
+# define SCALER_DISPSTAT_ESFRAME(x) BIT(11 + ((x) * 8))
/* Set when HSTART is seen but there are still pixels in the current
* output line.
*/
-# define SCALER_DISPSTAT_ESLINE0 BIT(10)
+# define SCALER_DISPSTAT_ESLINE(x) BIT(10 + ((x) * 8))
/* Set when the the downstream tries to read from the display FIFO
* while it's empty.
*/
-# define SCALER_DISPSTAT_EUFLOW0 BIT(9)
+# define SCALER_DISPSTAT_EUFLOW(x) BIT(9 + ((x) * 8))
/* Set when the display mode changes from RUN to EOF */
-# define SCALER_DISPSTAT_EOF0 BIT(8)
+# define SCALER_DISPSTAT_EOF(x) BIT(8 + ((x) * 8))
+
+# define SCALER_DISPSTAT_IRQMASK(x) VC4_MASK(13 + ((x) * 8), \
+ 8 + ((x) * 8))
/* Set on AXI invalid DMA ID error. */
# define SCALER_DISPSTAT_DMA_ERROR BIT(7)
@@ -298,12 +281,10 @@
* SCALER_DISPSTAT_RESP_ERROR is not SCALER_DISPSTAT_RESP_OKAY.
*/
# define SCALER_DISPSTAT_IRQDMA BIT(4)
-# define SCALER_DISPSTAT_IRQDISP2 BIT(3)
-# define SCALER_DISPSTAT_IRQDISP1 BIT(2)
/* Set when any of the EOF/EOLN/ESFRAME/ESLINE bits are set and their
* corresponding interrupt bit is enabled in DISPCTRL.
*/
-# define SCALER_DISPSTAT_IRQDISP0 BIT(1)
+# define SCALER_DISPSTAT_IRQDISP(x) BIT(1 + (x))
/* On read, the profiler interrupt. On write, clear *all* interrupt bits. */
# define SCALER_DISPSTAT_IRQSCL BIT(0)
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 273984f71ae2..3c918eeaf56e 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -148,6 +148,12 @@ static void emit_tile(struct vc4_exec_info *exec,
}
if (setup->zs_read) {
+ if (setup->color_read) {
+ /* Exec previous load. */
+ vc4_tile_coordinates(setup, x, y);
+ vc4_store_before_load(setup);
+ }
+
if (args->zs_read.flags &
VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER);
@@ -156,12 +162,6 @@ static void emit_tile(struct vc4_exec_info *exec,
&args->zs_read, x, y) |
VC4_LOADSTORE_FULL_RES_DISABLE_COLOR);
} else {
- if (setup->color_read) {
- /* Exec previous load. */
- vc4_tile_coordinates(setup, x, y);
- vc4_store_before_load(setup);
- }
-
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
rcl_u16(setup, args->zs_read.bits);
rcl_u32(setup, setup->zs_read->paddr +
@@ -291,16 +291,15 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
}
}
if (setup->zs_read) {
+ if (setup->color_read) {
+ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
+ loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
+ }
+
if (args->zs_read.flags &
VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE;
} else {
- if (setup->color_read &&
- !(args->color_read.flags &
- VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES)) {
- loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
- loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
- }
loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index aa279b5b0de7..c8b89a78f9f4 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -148,6 +148,7 @@ struct vc4_txp {
struct drm_writeback_connector connector;
void __iomem *regs;
+ struct debugfs_regset32 regset;
};
static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
@@ -160,40 +161,14 @@ static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
return container_of(conn, struct vc4_txp, connector.base);
}
-#define TXP_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} txp_regs[] = {
- TXP_REG(TXP_DST_PTR),
- TXP_REG(TXP_DST_PITCH),
- TXP_REG(TXP_DIM),
- TXP_REG(TXP_DST_CTRL),
- TXP_REG(TXP_PROGRESS),
+static const struct debugfs_reg32 txp_regs[] = {
+ VC4_REG32(TXP_DST_PTR),
+ VC4_REG32(TXP_DST_PITCH),
+ VC4_REG32(TXP_DIM),
+ VC4_REG32(TXP_DST_CTRL),
+ VC4_REG32(TXP_PROGRESS),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_txp_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_txp *txp = vc4->txp;
- int i;
-
- if (!txp)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(txp_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- txp_regs[i].name, txp_regs[i].reg,
- TXP_READ(txp_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
static int vc4_txp_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -249,7 +224,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
struct drm_connector_state *conn_state)
{
struct drm_crtc_state *crtc_state;
- struct drm_gem_cma_object *gem;
struct drm_framebuffer *fb;
int i;
@@ -275,8 +249,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
if (i == ARRAY_SIZE(drm_fmts))
return -EINVAL;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
-
/* Pitch must be aligned on 16 bytes. */
if (fb->pitches[0] & GENMASK(3, 0))
return -EINVAL;
@@ -327,7 +299,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
TXP_WRITE(TXP_DST_CTRL, ctrl);
- drm_writeback_queue_job(&txp->connector, conn_state->writeback_job);
+ drm_writeback_queue_job(&txp->connector, conn_state);
}
static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
@@ -413,6 +385,9 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
return PTR_ERR(txp->regs);
+ txp->regset.base = txp->regs;
+ txp->regset.regs = txp_regs;
+ txp->regset.nregs = ARRAY_SIZE(txp_regs);
drm_connector_helper_add(&txp->connector.base,
&vc4_txp_connector_helper_funcs);
@@ -431,6 +406,8 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
dev_set_drvdata(dev, txp);
vc4->txp = txp;
+ vc4_debugfs_add_regset32(drm, "txp_regs", &txp->regset);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e47e29426078..a4b6859e3af6 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -22,129 +22,145 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-#ifdef CONFIG_DEBUG_FS
-#define REGDEF(reg) { reg, #reg }
-static const struct {
- uint32_t reg;
- const char *name;
-} vc4_reg_defs[] = {
- REGDEF(V3D_IDENT0),
- REGDEF(V3D_IDENT1),
- REGDEF(V3D_IDENT2),
- REGDEF(V3D_SCRATCH),
- REGDEF(V3D_L2CACTL),
- REGDEF(V3D_SLCACTL),
- REGDEF(V3D_INTCTL),
- REGDEF(V3D_INTENA),
- REGDEF(V3D_INTDIS),
- REGDEF(V3D_CT0CS),
- REGDEF(V3D_CT1CS),
- REGDEF(V3D_CT0EA),
- REGDEF(V3D_CT1EA),
- REGDEF(V3D_CT0CA),
- REGDEF(V3D_CT1CA),
- REGDEF(V3D_CT00RA0),
- REGDEF(V3D_CT01RA0),
- REGDEF(V3D_CT0LC),
- REGDEF(V3D_CT1LC),
- REGDEF(V3D_CT0PC),
- REGDEF(V3D_CT1PC),
- REGDEF(V3D_PCS),
- REGDEF(V3D_BFC),
- REGDEF(V3D_RFC),
- REGDEF(V3D_BPCA),
- REGDEF(V3D_BPCS),
- REGDEF(V3D_BPOA),
- REGDEF(V3D_BPOS),
- REGDEF(V3D_BXCF),
- REGDEF(V3D_SQRSV0),
- REGDEF(V3D_SQRSV1),
- REGDEF(V3D_SQCNTL),
- REGDEF(V3D_SRQPC),
- REGDEF(V3D_SRQUA),
- REGDEF(V3D_SRQUL),
- REGDEF(V3D_SRQCS),
- REGDEF(V3D_VPACNTL),
- REGDEF(V3D_VPMBASE),
- REGDEF(V3D_PCTRC),
- REGDEF(V3D_PCTRE),
- REGDEF(V3D_PCTR(0)),
- REGDEF(V3D_PCTRS(0)),
- REGDEF(V3D_PCTR(1)),
- REGDEF(V3D_PCTRS(1)),
- REGDEF(V3D_PCTR(2)),
- REGDEF(V3D_PCTRS(2)),
- REGDEF(V3D_PCTR(3)),
- REGDEF(V3D_PCTRS(3)),
- REGDEF(V3D_PCTR(4)),
- REGDEF(V3D_PCTRS(4)),
- REGDEF(V3D_PCTR(5)),
- REGDEF(V3D_PCTRS(5)),
- REGDEF(V3D_PCTR(6)),
- REGDEF(V3D_PCTRS(6)),
- REGDEF(V3D_PCTR(7)),
- REGDEF(V3D_PCTRS(7)),
- REGDEF(V3D_PCTR(8)),
- REGDEF(V3D_PCTRS(8)),
- REGDEF(V3D_PCTR(9)),
- REGDEF(V3D_PCTRS(9)),
- REGDEF(V3D_PCTR(10)),
- REGDEF(V3D_PCTRS(10)),
- REGDEF(V3D_PCTR(11)),
- REGDEF(V3D_PCTRS(11)),
- REGDEF(V3D_PCTR(12)),
- REGDEF(V3D_PCTRS(12)),
- REGDEF(V3D_PCTR(13)),
- REGDEF(V3D_PCTRS(13)),
- REGDEF(V3D_PCTR(14)),
- REGDEF(V3D_PCTRS(14)),
- REGDEF(V3D_PCTR(15)),
- REGDEF(V3D_PCTRS(15)),
- REGDEF(V3D_DBGE),
- REGDEF(V3D_FDBGO),
- REGDEF(V3D_FDBGB),
- REGDEF(V3D_FDBGR),
- REGDEF(V3D_FDBGS),
- REGDEF(V3D_ERRSTAT),
+static const struct debugfs_reg32 v3d_regs[] = {
+ VC4_REG32(V3D_IDENT0),
+ VC4_REG32(V3D_IDENT1),
+ VC4_REG32(V3D_IDENT2),
+ VC4_REG32(V3D_SCRATCH),
+ VC4_REG32(V3D_L2CACTL),
+ VC4_REG32(V3D_SLCACTL),
+ VC4_REG32(V3D_INTCTL),
+ VC4_REG32(V3D_INTENA),
+ VC4_REG32(V3D_INTDIS),
+ VC4_REG32(V3D_CT0CS),
+ VC4_REG32(V3D_CT1CS),
+ VC4_REG32(V3D_CT0EA),
+ VC4_REG32(V3D_CT1EA),
+ VC4_REG32(V3D_CT0CA),
+ VC4_REG32(V3D_CT1CA),
+ VC4_REG32(V3D_CT00RA0),
+ VC4_REG32(V3D_CT01RA0),
+ VC4_REG32(V3D_CT0LC),
+ VC4_REG32(V3D_CT1LC),
+ VC4_REG32(V3D_CT0PC),
+ VC4_REG32(V3D_CT1PC),
+ VC4_REG32(V3D_PCS),
+ VC4_REG32(V3D_BFC),
+ VC4_REG32(V3D_RFC),
+ VC4_REG32(V3D_BPCA),
+ VC4_REG32(V3D_BPCS),
+ VC4_REG32(V3D_BPOA),
+ VC4_REG32(V3D_BPOS),
+ VC4_REG32(V3D_BXCF),
+ VC4_REG32(V3D_SQRSV0),
+ VC4_REG32(V3D_SQRSV1),
+ VC4_REG32(V3D_SQCNTL),
+ VC4_REG32(V3D_SRQPC),
+ VC4_REG32(V3D_SRQUA),
+ VC4_REG32(V3D_SRQUL),
+ VC4_REG32(V3D_SRQCS),
+ VC4_REG32(V3D_VPACNTL),
+ VC4_REG32(V3D_VPMBASE),
+ VC4_REG32(V3D_PCTRC),
+ VC4_REG32(V3D_PCTRE),
+ VC4_REG32(V3D_PCTR(0)),
+ VC4_REG32(V3D_PCTRS(0)),
+ VC4_REG32(V3D_PCTR(1)),
+ VC4_REG32(V3D_PCTRS(1)),
+ VC4_REG32(V3D_PCTR(2)),
+ VC4_REG32(V3D_PCTRS(2)),
+ VC4_REG32(V3D_PCTR(3)),
+ VC4_REG32(V3D_PCTRS(3)),
+ VC4_REG32(V3D_PCTR(4)),
+ VC4_REG32(V3D_PCTRS(4)),
+ VC4_REG32(V3D_PCTR(5)),
+ VC4_REG32(V3D_PCTRS(5)),
+ VC4_REG32(V3D_PCTR(6)),
+ VC4_REG32(V3D_PCTRS(6)),
+ VC4_REG32(V3D_PCTR(7)),
+ VC4_REG32(V3D_PCTRS(7)),
+ VC4_REG32(V3D_PCTR(8)),
+ VC4_REG32(V3D_PCTRS(8)),
+ VC4_REG32(V3D_PCTR(9)),
+ VC4_REG32(V3D_PCTRS(9)),
+ VC4_REG32(V3D_PCTR(10)),
+ VC4_REG32(V3D_PCTRS(10)),
+ VC4_REG32(V3D_PCTR(11)),
+ VC4_REG32(V3D_PCTRS(11)),
+ VC4_REG32(V3D_PCTR(12)),
+ VC4_REG32(V3D_PCTRS(12)),
+ VC4_REG32(V3D_PCTR(13)),
+ VC4_REG32(V3D_PCTRS(13)),
+ VC4_REG32(V3D_PCTR(14)),
+ VC4_REG32(V3D_PCTRS(14)),
+ VC4_REG32(V3D_PCTR(15)),
+ VC4_REG32(V3D_PCTRS(15)),
+ VC4_REG32(V3D_DBGE),
+ VC4_REG32(V3D_FDBGO),
+ VC4_REG32(V3D_FDBGB),
+ VC4_REG32(V3D_FDBGR),
+ VC4_REG32(V3D_FDBGS),
+ VC4_REG32(V3D_ERRSTAT),
};
-int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
+static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
- V3D_READ(vc4_reg_defs[i].reg));
+ int ret = vc4_v3d_pm_get(vc4);
+
+ if (ret == 0) {
+ uint32_t ident1 = V3D_READ(V3D_IDENT1);
+ uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
+ uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
+ uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
+
+ seq_printf(m, "Revision: %d\n",
+ VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
+ seq_printf(m, "Slices: %d\n", nslc);
+ seq_printf(m, "TMUs: %d\n", nslc * tups);
+ seq_printf(m, "QPUs: %d\n", nslc * qups);
+ seq_printf(m, "Semaphores: %d\n",
+ VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
+ vc4_v3d_pm_put(vc4);
}
return 0;
}
-int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
+/**
+ * Wraps pm_runtime_get_sync() in a refcount, so that we can reliably
+ * get the pm_runtime refcount to 0 in vc4_reset().
+ */
+int
+vc4_v3d_pm_get(struct vc4_dev *vc4)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- uint32_t ident1 = V3D_READ(V3D_IDENT1);
- uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
- uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
- uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
-
- seq_printf(m, "Revision: %d\n",
- VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
- seq_printf(m, "Slices: %d\n", nslc);
- seq_printf(m, "TMUs: %d\n", nslc * tups);
- seq_printf(m, "QPUs: %d\n", nslc * qups);
- seq_printf(m, "Semaphores: %d\n",
- VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
+ mutex_lock(&vc4->power_lock);
+ if (vc4->power_refcount++ == 0) {
+ int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+
+ if (ret < 0) {
+ vc4->power_refcount--;
+ mutex_unlock(&vc4->power_lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&vc4->power_lock);
return 0;
}
-#endif /* CONFIG_DEBUG_FS */
+
+void
+vc4_v3d_pm_put(struct vc4_dev *vc4)
+{
+ mutex_lock(&vc4->power_lock);
+ if (--vc4->power_refcount == 0) {
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ }
+ mutex_unlock(&vc4->power_lock);
+}
static void vc4_v3d_init_hw(struct drm_device *dev)
{
@@ -354,6 +370,9 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
v3d->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(v3d->regs))
return PTR_ERR(v3d->regs);
+ v3d->regset.base = v3d->regs;
+ v3d->regset.regs = v3d_regs;
+ v3d->regset.nregs = ARRAY_SIZE(v3d_regs);
vc4->v3d = v3d;
v3d->vc4 = vc4;
@@ -409,6 +428,9 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
pm_runtime_enable(dev);
+ vc4_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL);
+ vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset);
+
return 0;
}
@@ -452,7 +474,7 @@ static int vc4_v3d_dev_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id vc4_v3d_dt_match[] = {
+const struct of_device_id vc4_v3d_dt_match[] = {
{ .compatible = "brcm,bcm2835-v3d" },
{ .compatible = "brcm,cygnus-v3d" },
{ .compatible = "brcm,vc4-v3d" },
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 858c3a483229..0a27e48fab31 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -176,6 +176,8 @@ struct vc4_vec {
struct clk *clock;
const struct vc4_vec_tv_mode *tv_mode;
+
+ struct debugfs_regset32 regset;
};
#define VEC_READ(offset) readl(vec->regs + (offset))
@@ -223,59 +225,33 @@ struct vc4_vec_tv_mode {
void (*mode_set)(struct vc4_vec *vec);
};
-#define VEC_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} vec_regs[] = {
- VEC_REG(VEC_WSE_CONTROL),
- VEC_REG(VEC_WSE_WSS_DATA),
- VEC_REG(VEC_WSE_VPS_DATA1),
- VEC_REG(VEC_WSE_VPS_CONTROL),
- VEC_REG(VEC_REVID),
- VEC_REG(VEC_CONFIG0),
- VEC_REG(VEC_SCHPH),
- VEC_REG(VEC_CLMP0_START),
- VEC_REG(VEC_CLMP0_END),
- VEC_REG(VEC_FREQ3_2),
- VEC_REG(VEC_FREQ1_0),
- VEC_REG(VEC_CONFIG1),
- VEC_REG(VEC_CONFIG2),
- VEC_REG(VEC_INTERRUPT_CONTROL),
- VEC_REG(VEC_INTERRUPT_STATUS),
- VEC_REG(VEC_FCW_SECAM_B),
- VEC_REG(VEC_SECAM_GAIN_VAL),
- VEC_REG(VEC_CONFIG3),
- VEC_REG(VEC_STATUS0),
- VEC_REG(VEC_MASK0),
- VEC_REG(VEC_CFG),
- VEC_REG(VEC_DAC_TEST),
- VEC_REG(VEC_DAC_CONFIG),
- VEC_REG(VEC_DAC_MISC),
+static const struct debugfs_reg32 vec_regs[] = {
+ VC4_REG32(VEC_WSE_CONTROL),
+ VC4_REG32(VEC_WSE_WSS_DATA),
+ VC4_REG32(VEC_WSE_VPS_DATA1),
+ VC4_REG32(VEC_WSE_VPS_CONTROL),
+ VC4_REG32(VEC_REVID),
+ VC4_REG32(VEC_CONFIG0),
+ VC4_REG32(VEC_SCHPH),
+ VC4_REG32(VEC_CLMP0_START),
+ VC4_REG32(VEC_CLMP0_END),
+ VC4_REG32(VEC_FREQ3_2),
+ VC4_REG32(VEC_FREQ1_0),
+ VC4_REG32(VEC_CONFIG1),
+ VC4_REG32(VEC_CONFIG2),
+ VC4_REG32(VEC_INTERRUPT_CONTROL),
+ VC4_REG32(VEC_INTERRUPT_STATUS),
+ VC4_REG32(VEC_FCW_SECAM_B),
+ VC4_REG32(VEC_SECAM_GAIN_VAL),
+ VC4_REG32(VEC_CONFIG3),
+ VC4_REG32(VEC_STATUS0),
+ VC4_REG32(VEC_MASK0),
+ VC4_REG32(VEC_CFG),
+ VC4_REG32(VEC_DAC_TEST),
+ VC4_REG32(VEC_DAC_CONFIG),
+ VC4_REG32(VEC_DAC_MISC),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_vec_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_vec *vec = vc4->vec;
- int i;
-
- if (!vec)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(vec_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- vec_regs[i].name, vec_regs[i].reg,
- VEC_READ(vec_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
static void vc4_vec_ntsc_mode_set(struct vc4_vec *vec)
{
VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN);
@@ -587,6 +563,9 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
vec->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vec->regs))
return PTR_ERR(vec->regs);
+ vec->regset.base = vec->regs;
+ vec->regset.regs = vec_regs;
+ vec->regset.nregs = ARRAY_SIZE(vec_regs);
vec->clock = devm_clk_get(dev, NULL);
if (IS_ERR(vec->clock)) {
@@ -612,6 +591,8 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
vc4->vec = vec;
+ vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset);
+
return 0;
err_destroy_encoder:
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 8bf3a7c23ed3..062067438f1d 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -243,7 +243,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
if (NULL == vsg->pages)
return -ENOMEM;
ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
- vsg->num_pages, vsg->direction == DMA_FROM_DEVICE,
+ vsg->num_pages,
+ vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
vsg->pages);
if (ret != vsg->num_pages) {
if (ret < 0)
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 73dc99046c43..ed0fcda713c3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -28,6 +28,30 @@
#include "virtgpu_drv.h"
+static void virtio_add_bool(struct seq_file *m, const char *name,
+ bool value)
+{
+ seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
+}
+
+static void virtio_add_int(struct seq_file *m, const char *name,
+ int value)
+{
+ seq_printf(m, "%-16s : %d\n", name, value);
+}
+
+static int virtio_gpu_features(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
+
+ virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
+ virtio_add_bool(m, "edid", vgdev->has_edid);
+ virtio_add_int(m, "cap sets", vgdev->num_capsets);
+ virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+ return 0;
+}
+
static int
virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
{
@@ -41,7 +65,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
}
static struct drm_info_list virtio_gpu_debugfs_list[] = {
- { "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL },
+ { "virtio-gpu-features", virtio_gpu_features },
+ { "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
};
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 653ec7d0bf4d..86843a4d6102 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -385,5 +385,6 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
+ drm_atomic_helper_shutdown(vgdev->ddev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index b996ac1d4fcc..c50868753132 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -205,10 +205,12 @@ static struct drm_driver driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_pin = virtgpu_gem_prime_pin,
- .gem_prime_unpin = virtgpu_gem_prime_unpin,
+ .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 3238fdf58eb4..b69ae10ca238 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -50,6 +50,23 @@
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
+struct virtio_gpu_object_params {
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
+ unsigned long size;
+ bool dumb;
+ /* 3d */
+ bool virgl;
+ uint32_t target;
+ uint32_t bind;
+ uint32_t depth;
+ uint32_t array_size;
+ uint32_t last_level;
+ uint32_t nr_samples;
+ uint32_t flags;
+};
+
struct virtio_gpu_object {
struct drm_gem_object gem_base;
uint32_t hw_res_handle;
@@ -204,6 +221,9 @@ struct virtio_gpu_fpriv {
/* virtio_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
+ struct list_head *head);
+void virtio_gpu_unref_list(struct list_head *head);
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
@@ -217,16 +237,17 @@ int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
- uint64_t size,
+ struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p);
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file);
-struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
- size_t size, bool kernel,
- bool pinned);
+struct virtio_gpu_object*
+virtio_gpu_alloc_object(struct drm_device *dev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence);
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -243,9 +264,8 @@ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- uint32_t format,
- uint32_t width,
- uint32_t height);
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
@@ -304,7 +324,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- struct virtio_gpu_resource_create_3d *rc_3d);
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
@@ -332,6 +353,7 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
/* virtio_gpu_fence.c */
+bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -342,8 +364,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
/* virtio_gpu_object */
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
- unsigned long size, bool kernel, bool pinned,
- struct virtio_gpu_object **bo_ptr);
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
@@ -352,8 +375,10 @@ void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
-int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
-void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 21bd4c4a32d1..87d1966192f4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -36,7 +36,7 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
return "controlq";
}
-static bool virtio_signaled(struct dma_fence *f)
+bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -62,7 +62,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
- .signaled = virtio_signaled,
+ .signaled = virtio_fence_signaled,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index f06586393974..1e49e08dd545 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -34,15 +34,16 @@ void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
virtio_gpu_object_unref(&obj);
}
-struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
- size_t size, bool kernel,
- bool pinned)
+struct virtio_gpu_object*
+virtio_gpu_alloc_object(struct drm_device *dev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
- ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
+ ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
if (ret)
return ERR_PTR(ret);
@@ -51,7 +52,7 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
- uint64_t size,
+ struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
@@ -59,7 +60,7 @@ int virtio_gpu_gem_create(struct drm_file *file,
int ret;
u32 handle;
- obj = virtio_gpu_alloc_object(dev, size, false, false);
+ obj = virtio_gpu_alloc_object(dev, params, NULL);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -82,12 +83,10 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_gem_object *gobj;
- struct virtio_gpu_object *obj;
+ struct virtio_gpu_object_params params = { 0 };
int ret;
uint32_t pitch;
- uint32_t format;
if (args->bpp != 32)
return -EINVAL;
@@ -96,22 +95,16 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
- ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
+ params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
+ params.width = args->width;
+ params.height = args->height;
+ params.size = args->size;
+ params.dumb = true;
+ ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
&args->handle);
if (ret)
goto fail;
- format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
- obj = gem_to_virtio_gpu_obj(gobj);
- virtio_gpu_cmd_create_resource(vgdev, obj, format,
- args->width, args->height);
-
- /* attach the object to the resource */
- ret = virtio_gpu_object_attach(vgdev, obj, NULL);
- if (ret)
- goto fail;
-
- obj->dumb = true;
args->pitch = pitch;
return ret;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 14ce8188c052..949a264985fc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -54,8 +54,8 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
&virtio_gpu_map->offset);
}
-static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head)
+int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
+ struct list_head *head)
{
struct ttm_operation_ctx ctx = { false, false };
struct ttm_validate_buffer *buf;
@@ -79,7 +79,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
return 0;
}
-static void virtio_gpu_unref_list(struct list_head *head)
+void virtio_gpu_unref_list(struct list_head *head)
{
struct ttm_validate_buffer *buf;
struct ttm_buffer_object *bo;
@@ -275,16 +275,12 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
+ struct virtio_gpu_fence *fence;
int ret;
struct virtio_gpu_object *qobj;
struct drm_gem_object *obj;
uint32_t handle = 0;
- uint32_t size;
- struct list_head validate_list;
- struct ttm_validate_buffer mainbuf;
- struct virtio_gpu_fence *fence = NULL;
- struct ww_acquire_ctx ticket;
- struct virtio_gpu_resource_create_3d rc_3d;
+ struct virtio_gpu_object_params params = { 0 };
if (vgdev->has_virgl_3d == false) {
if (rc->depth > 1)
@@ -299,94 +295,43 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- INIT_LIST_HEAD(&validate_list);
- memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
-
- size = rc->size;
-
+ params.format = rc->format;
+ params.width = rc->width;
+ params.height = rc->height;
+ params.size = rc->size;
+ if (vgdev->has_virgl_3d) {
+ params.virgl = true;
+ params.target = rc->target;
+ params.bind = rc->bind;
+ params.depth = rc->depth;
+ params.array_size = rc->array_size;
+ params.last_level = rc->last_level;
+ params.nr_samples = rc->nr_samples;
+ params.flags = rc->flags;
+ }
/* allocate a single page size object */
- if (size == 0)
- size = PAGE_SIZE;
+ if (params.size == 0)
+ params.size = PAGE_SIZE;
- qobj = virtio_gpu_alloc_object(dev, size, false, false);
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence)
+ return -ENOMEM;
+ qobj = virtio_gpu_alloc_object(dev, &params, fence);
+ dma_fence_put(&fence->f);
if (IS_ERR(qobj))
return PTR_ERR(qobj);
obj = &qobj->gem_base;
- if (!vgdev->has_virgl_3d) {
- virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format,
- rc->width, rc->height);
-
- ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
- } else {
- /* use a gem reference since unref list undoes them */
- drm_gem_object_get(&qobj->gem_base);
- mainbuf.bo = &qobj->tbo;
- list_add(&mainbuf.head, &validate_list);
-
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret) {
- DRM_DEBUG("failed to validate\n");
- goto fail_unref;
- }
-
- rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle);
- rc_3d.target = cpu_to_le32(rc->target);
- rc_3d.format = cpu_to_le32(rc->format);
- rc_3d.bind = cpu_to_le32(rc->bind);
- rc_3d.width = cpu_to_le32(rc->width);
- rc_3d.height = cpu_to_le32(rc->height);
- rc_3d.depth = cpu_to_le32(rc->depth);
- rc_3d.array_size = cpu_to_le32(rc->array_size);
- rc_3d.last_level = cpu_to_le32(rc->last_level);
- rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
- rc_3d.flags = cpu_to_le32(rc->flags);
-
- fence = virtio_gpu_fence_alloc(vgdev);
- if (!fence) {
- ret = -ENOMEM;
- goto fail_backoff;
- }
-
- virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
- ret = virtio_gpu_object_attach(vgdev, qobj, fence);
- if (ret) {
- dma_fence_put(&fence->f);
- goto fail_backoff;
- }
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
- }
-
ret = drm_gem_handle_create(file_priv, obj, &handle);
if (ret) {
-
drm_gem_object_release(obj);
- if (vgdev->has_virgl_3d) {
- virtio_gpu_unref_list(&validate_list);
- dma_fence_put(&fence->f);
- }
return ret;
}
drm_gem_object_put_unlocked(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
-
- if (vgdev->has_virgl_3d) {
- virtio_gpu_unref_list(&validate_list);
- dma_fence_put(&fence->f);
- }
return 0;
-fail_backoff:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
-fail_unref:
- if (vgdev->has_virgl_3d) {
- virtio_gpu_unref_list(&validate_list);
- dma_fence_put(&fence->f);
- }
-//fail_obj:
-// drm_gem_object_handle_unreference_unlocked(obj);
- return ret;
}
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index e7e946035027..b2da31310d24 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,8 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/ttm/ttm_execbuf_util.h>
+
#include "virtgpu_drv.h"
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
@@ -74,39 +76,34 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
- bool pinned)
+static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
{
u32 c = 1;
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
vgbo->placement.placement = &vgbo->placement_code;
vgbo->placement.busy_placement = &vgbo->placement_code;
vgbo->placement_code.fpfn = 0;
vgbo->placement_code.lpfn = 0;
vgbo->placement_code.flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
+ TTM_PL_FLAG_NO_EVICT;
vgbo->placement.num_placement = c;
vgbo->placement.num_busy_placement = c;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
- unsigned long size, bool kernel, bool pinned,
- struct virtio_gpu_object **bo_ptr)
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_object *bo;
- enum ttm_bo_type type;
size_t acc_size;
int ret;
- if (kernel)
- type = ttm_bo_type_kernel;
- else
- type = ttm_bo_type_device;
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
+ acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
sizeof(struct virtio_gpu_object));
bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
@@ -117,23 +114,62 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
kfree(bo);
return ret;
}
- size = roundup(size, PAGE_SIZE);
- ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
+ params->size = roundup(params->size, PAGE_SIZE);
+ ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
if (ret != 0) {
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
kfree(bo);
return ret;
}
- bo->dumb = false;
- virtio_gpu_init_ttm_placement(bo, pinned);
+ bo->dumb = params->dumb;
+
+ if (params->virgl) {
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
+ } else {
+ virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
+ }
- ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, !kernel, acc_size,
- NULL, NULL, &virtio_gpu_ttm_bo_destroy);
+ virtio_gpu_init_ttm_placement(bo);
+ ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
+ ttm_bo_type_device, &bo->placement, 0,
+ true, acc_size, NULL, NULL,
+ &virtio_gpu_ttm_bo_destroy);
/* ttm_bo_init failure will call the destroy */
if (ret != 0)
return ret;
+ if (fence) {
+ struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+ struct list_head validate_list;
+ struct ttm_validate_buffer mainbuf;
+ struct ww_acquire_ctx ticket;
+ unsigned long irq_flags;
+ bool signaled;
+
+ INIT_LIST_HEAD(&validate_list);
+ memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
+
+ /* use a gem reference since unref list undoes them */
+ drm_gem_object_get(&bo->gem_base);
+ mainbuf.bo = &bo->tbo;
+ list_add(&mainbuf.head, &validate_list);
+
+ ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
+ if (ret == 0) {
+ spin_lock_irqsave(&drv->lock, irq_flags);
+ signaled = virtio_fence_signaled(&fence->f);
+ if (!signaled)
+ /* virtio create command still in flight */
+ ttm_eu_fence_buffer_objects(&ticket, &validate_list,
+ &fence->f);
+ spin_unlock_irqrestore(&drv->lock, irq_flags);
+ if (signaled)
+ /* virtio create command finished */
+ ttm_eu_backoff_reservation(&ticket, &validate_list);
+ }
+ virtio_gpu_unref_list(&validate_list);
+ }
+
*bo_ptr = bo;
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index c59ec34c80a5..8fbf71bd0c5e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -28,15 +28,23 @@
* device that might share buffers with virtgpu
*/
-int virtgpu_gem_prime_pin(struct drm_gem_object *obj)
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- WARN_ONCE(1, "not implemented");
- return -ENODEV;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
+ /* should not happen */
+ return ERR_PTR(-EINVAL);
+
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+ bo->tbo.ttm->num_pages);
}
-void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
{
- WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENODEV);
}
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
@@ -56,7 +64,10 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
}
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *area)
+ struct vm_area_struct *vma)
{
- return -ENODEV;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
+ return drm_gem_prime_mmap(obj, vma);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 4bfbf25fabff..300ef3a83538 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -37,8 +37,6 @@
#include <linux/delay.h>
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static struct
virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
{
@@ -116,10 +114,6 @@ static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct virtio_gpu_device *vgdev;
-
- vgdev = virtio_gpu_get_vgdev(bdev);
-
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
@@ -194,42 +188,45 @@ static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
*/
struct virtio_gpu_ttm_tt {
struct ttm_dma_tt ttm;
- struct virtio_gpu_device *vgdev;
- u64 offset;
+ struct virtio_gpu_object *obj;
};
-static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
+static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
{
- struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+ struct virtio_gpu_ttm_tt *gtt =
+ container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
+ struct virtio_gpu_device *vgdev =
+ virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
- gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
- if (!ttm->num_pages)
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
- ttm->num_pages, bo_mem, ttm);
-
- /* Not implemented */
+ virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
return 0;
}
-static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
+static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
{
- /* Not implemented */
+ struct virtio_gpu_ttm_tt *gtt =
+ container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
+ struct virtio_gpu_device *vgdev =
+ virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
+
+ virtio_gpu_object_detach(vgdev, gtt->obj);
return 0;
}
-static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
+static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
{
- struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+ struct virtio_gpu_ttm_tt *gtt =
+ container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
ttm_dma_tt_fini(&gtt->ttm);
kfree(gtt);
}
-static struct ttm_backend_func virtio_gpu_backend_func = {
- .bind = &virtio_gpu_ttm_backend_bind,
- .unbind = &virtio_gpu_ttm_backend_unbind,
- .destroy = &virtio_gpu_ttm_backend_destroy,
+static struct ttm_backend_func virtio_gpu_tt_func = {
+ .bind = &virtio_gpu_ttm_tt_bind,
+ .unbind = &virtio_gpu_ttm_tt_unbind,
+ .destroy = &virtio_gpu_ttm_tt_destroy,
};
static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
@@ -242,8 +239,8 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL)
return NULL;
- gtt->ttm.ttm.func = &virtio_gpu_backend_func;
- gtt->vgdev = vgdev;
+ gtt->ttm.ttm.func = &virtio_gpu_tt_func;
+ gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
@@ -251,58 +248,11 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
return &gtt->ttm.ttm;
}
-static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- BUG_ON(old_mem->mm_node != NULL);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-}
-
-static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
-{
- int ret;
-
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
- if (ret)
- return ret;
-
- virtio_gpu_move_null(bo, new_mem);
- return 0;
-}
-
-static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
- bool evict,
- struct ttm_mem_reg *new_mem)
-{
- struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
-
- if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
- if (bo->hw_res_handle)
- virtio_gpu_object_detach(vgdev, bo);
-
- } else if (new_mem->placement & TTM_PL_FLAG_TT) {
- if (bo->hw_res_handle) {
- virtio_gpu_object_attach(vgdev, bo, NULL);
- }
- }
-}
-
static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
{
struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
if (bo->pages)
virtio_gpu_object_free_sg_table(bo);
@@ -314,11 +264,9 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.init_mem_type = &virtio_gpu_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &virtio_gpu_evict_flags,
- .move = &virtio_gpu_bo_move,
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
- .move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};
@@ -330,7 +278,7 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
r = ttm_bo_device_init(&vgdev->mman.bdev,
&virtio_gpu_bo_driver,
vgdev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET, 0);
+ false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
goto err_dev_init;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 6bc2008b0d0d..e62fe24b1a2e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -376,9 +376,8 @@ retry:
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- uint32_t format,
- uint32_t width,
- uint32_t height)
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -388,11 +387,11 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- cmd_p->format = cpu_to_le32(format);
- cmd_p->width = cpu_to_le32(width);
- cmd_p->height = cpu_to_le32(height);
+ cmd_p->format = cpu_to_le32(params->format);
+ cmd_p->width = cpu_to_le32(params->width);
+ cmd_p->height = cpu_to_le32(params->height);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
bo->created = true;
}
@@ -828,7 +827,8 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- struct virtio_gpu_resource_create_3d *rc_3d)
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -836,11 +836,21 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
- *cmd_p = *rc_3d;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
- cmd_p->hdr.flags = 0;
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->format = cpu_to_le32(params->format);
+ cmd_p->width = cpu_to_le32(params->width);
+ cmd_p->height = cpu_to_le32(params->height);
+
+ cmd_p->target = cpu_to_le32(params->target);
+ cmd_p->bind = cpu_to_le32(params->bind);
+ cmd_p->depth = cpu_to_le32(params->depth);
+ cmd_p->array_size = cpu_to_le32(params->array_size);
+ cmd_p->last_level = cpu_to_le32(params->last_level);
+ cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
+ cmd_p->flags = cpu_to_le32(params->flags);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
bo->created = true;
}
@@ -924,8 +934,8 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct scatterlist *sg;
int si, nents;
- if (!obj->created)
- return 0;
+ if (WARN_ON_ONCE(!obj->created))
+ return -EINVAL;
if (!obj->pages) {
int ret;
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 8a9aeb0a9ea8..bb66dbcd5e3f 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -219,6 +219,8 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
spin_lock_init(&vkms_out->state_lock);
vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
+ if (!vkms_out->crc_workq)
+ return -ENOMEM;
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 0b9ee7fb45d6..66e14e38d5e8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -499,12 +499,9 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdSetShader body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_SET_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -534,12 +531,9 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
SVGA3dCmdSetRenderTarget body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for render target "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -576,12 +570,9 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
} body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for texture "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
cmd->header.size = sizeof(cmd->body);
@@ -610,12 +601,10 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetShader body;
} *cmd;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
@@ -641,12 +630,9 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
cmd->header.size = sizeof(cmd->body);
@@ -768,12 +754,9 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX shader"
- " resource binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
cmd->header.size = sizeof(cmd->body) + view_id_size;
@@ -807,12 +790,9 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX render-target"
- " binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
cmd->header.size = sizeof(cmd->body) + view_id_size;
@@ -894,12 +874,9 @@ static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
cmd_size = sizeof(*cmd) + so_target_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX SO target"
- " binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
cmd->header.size = sizeof(cmd->body) + so_target_size;
@@ -1011,12 +988,9 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
cmd_size = sizeof(*cmd) + set_vb_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
- " binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
cmd->header.size = sizeof(cmd->body) + set_vb_size;
@@ -1167,12 +1141,10 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetIndexBuffer body;
} *cmd;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX index buffer "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
cmd->header.size = sizeof(cmd->body);
if (rebind) {
@@ -1269,6 +1241,32 @@ void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
vmw_binding_drop(entry);
}
+/**
+ * vmw_binding_dirtying - Return whether a binding type is dirtying its resource
+ * @binding_type: The binding type
+ *
+ * Each time a resource is put on the validation list as the result of a
+ * context binding referencing it, we need to determine whether that resource
+ * will be dirtied (written to by the GPU) as a result of the corresponding
+ * GPU operation. Currently rendertarget-, depth-stencil-, and
+ * stream-output-target bindings are capable of dirtying its resource.
+ *
+ * Return: Whether the binding type dirties the resource its binding points to.
+ */
+u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
+{
+ static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
+ [vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_so] = VMW_RES_DIRTY_SET,
+ };
+
+ /* Review this function as new bindings are added. */
+ BUILD_BUG_ON(vmw_ctx_binding_max != 11);
+ return is_binding_dirtying[binding_type];
+}
+
/*
* This function is unused at run-time, and only used to hold various build
* asserts important for code optimization assumptions.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index 6a2a9d69043b..f6ab79d23923 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -205,5 +205,7 @@ extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
extern struct list_head *
vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+extern u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type);
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 70dab55e7888..56979e412ca8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -393,6 +393,7 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
__vmw_cmdbuf_header_free(entry);
break;
case SVGA_CB_STATUS_COMMAND_ERROR:
+ WARN_ONCE(true, "Command buffer error.\n");
entry->cb_header->status = SVGA_CB_STATUS_NONE;
list_add_tail(&entry->list, &man->error);
schedule_work(&man->work);
@@ -511,17 +512,14 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
uint32_t dummy;
- bool restart[SVGA_CB_CONTEXT_MAX];
bool send_fence = false;
struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
int i;
struct vmw_cmdbuf_context *ctx;
bool global_block = false;
- for_each_cmdbuf_ctx(man, i, ctx) {
+ for_each_cmdbuf_ctx(man, i, ctx)
INIT_LIST_HEAD(&restart_head[i]);
- restart[i] = false;
- }
mutex_lock(&man->error_mutex);
spin_lock(&man->lock);
@@ -533,23 +531,23 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
const char *cmd_name;
list_del_init(&entry->list);
- restart[entry->cb_context] = true;
global_block = true;
if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
- DRM_ERROR("Unknown command causing device error.\n");
- DRM_ERROR("Command buffer offset is %lu\n",
- (unsigned long) cb_hdr->errorOffset);
+ VMW_DEBUG_USER("Unknown command causing device error.\n");
+ VMW_DEBUG_USER("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
__vmw_cmdbuf_header_free(entry);
send_fence = true;
continue;
}
- DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
- DRM_ERROR("Command buffer offset is %lu\n",
- (unsigned long) cb_hdr->errorOffset);
- DRM_ERROR("Command size is %lu\n",
- (unsigned long) error_cmd_size);
+ VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
+ cmd_name);
+ VMW_DEBUG_USER("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ VMW_DEBUG_USER("Command size is %lu\n",
+ (unsigned long) error_cmd_size);
new_start_offset = cb_hdr->errorOffset + error_cmd_size;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 14bd760a62fd..63f111068a44 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -156,12 +156,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
}
vmw_execbuf_release_pinned_bo(dev_priv);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return;
- }
cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
cmd->header.size = sizeof(cmd->body);
@@ -210,7 +207,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
- if (unlikely(IS_ERR(uctx->cotables[i]))) {
+ if (IS_ERR(uctx->cotables[i])) {
ret = PTR_ERR(uctx->cotables[i]);
goto out_cotables;
}
@@ -259,9 +256,8 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
vmw_resource_unreference(&res);
return -ENOMEM;
}
@@ -311,10 +307,8 @@ static int vmw_gb_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -345,12 +339,10 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
@@ -391,10 +383,8 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "unbinding.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -441,12 +431,9 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
@@ -487,10 +474,8 @@ static int vmw_dx_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -521,12 +506,9 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd->header.size = sizeof(cmd->body);
@@ -615,10 +597,8 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "unbinding.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -665,12 +645,9 @@ static int vmw_dx_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
cmd->header.size = sizeof(cmd->body);
@@ -751,7 +728,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
int ret;
if (!dev_priv->has_dx && dx) {
- DRM_ERROR("DX contexts not supported by device.\n");
+ VMW_DEBUG_USER("DX contexts not supported by device.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 44f3f6f107d3..b4f6e1217c9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -171,12 +171,9 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
lockdep_assert_held(&bo->resv->lock.base);
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
@@ -262,12 +259,9 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
if (readback)
submit_size += sizeof(*cmd0);
- cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
- if (!cmd1) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "unbinding.\n");
+ cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (!cmd1)
return -ENOMEM;
- }
vcotbl->size_read_back = 0;
if (readback) {
@@ -351,13 +345,10 @@ static int vmw_cotable_readback(struct vmw_resource *res)
struct vmw_fence_obj *fence;
if (!vcotbl->scrubbed) {
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
- SVGA3D_INVALID_ID);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "readback.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6165fe2c4504..bf6c3500d363 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -546,29 +546,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
}
/**
- * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
- * taking place.
- * @dev: Pointer to the struct drm_device.
- *
- * Return: true if iommu present, false otherwise.
- */
-static bool vmw_assume_iommu(struct drm_device *dev)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev->dev);
-
- return !dma_is_direct(ops) && ops &&
- ops->map_page != dma_direct_map_page;
-}
-
-/**
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
* system.
*
* @dev_priv: Pointer to a struct vmw_private
*
- * This functions tries to determine the IOMMU setup and what actions
- * need to be taken by the driver to make system pages visible to the
- * device.
+ * This functions tries to determine what actions need to be taken by the
+ * driver to make system pages visible to the device.
* If this function decides that DMA is not possible, it returns -EINVAL.
* The driver may then try to disable features of the device that require
* DMA.
@@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
static const char *names[vmw_dma_map_max] = {
[vmw_dma_phys] = "Using physical TTM page addresses.",
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
- [vmw_dma_map_populate] = "Keeping DMA mappings.",
+ [vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
- else if (vmw_assume_iommu(dev_priv->dev))
- dev_priv->map_mode = vmw_dma_map_populate;
- else if (!vmw_force_iommu)
- dev_priv->map_mode = vmw_dma_phys;
- else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
- dev_priv->map_mode = vmw_dma_alloc_coherent;
+ else if (vmw_restrict_iommu)
+ dev_priv->map_mode = vmw_dma_map_bind;
else
dev_priv->map_mode = vmw_dma_map_populate;
- if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
- dev_priv->map_mode = vmw_dma_map_bind;
-
/* No TTM coherent page pool? FIXME: Ask TTM instead! */
if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
(dev_priv->map_mode == vmw_dma_alloc_coherent))
@@ -851,7 +828,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ret = ttm_bo_device_init(&dev_priv->bdev,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
- VMWGFX_FILE_PAGE_OFFSET,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index accb2fafe2f1..96983c47fb40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -48,7 +48,6 @@
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 15
#define VMWGFX_DRIVER_PATCHLEVEL 0
-#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_VALIDATIONS 2048
@@ -700,6 +699,8 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
uint32_t *inout_id,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
+ bool dirty_set,
+ bool dirty,
bool switch_backup,
struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset);
@@ -812,7 +813,6 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
-extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void *
vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
@@ -828,6 +828,18 @@ extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
extern int vmw_fifo_flush(struct vmw_private *dev_priv,
bool interruptible);
+#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \
+({ \
+ vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \
+ DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
+ __func__, (unsigned int) __bytes); \
+ NULL; \
+ }); \
+})
+
+#define VMW_FIFO_RESERVE(__priv, __bytes) \
+ VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
+
/**
* TTM glue - vmwgfx_ttm_glue.c
*/
@@ -1312,6 +1324,20 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
int vmw_host_log(const char *log);
+/* VMW logging */
+
+/**
+ * VMW_DEBUG_USER - Debug output for user-space debugging.
+ *
+ * @fmt: printf() like format string.
+ *
+ * This macro is for logging user-space error and debugging messages for e.g.
+ * command buffer execution errors due to malformed commands, invalid context,
+ * etc.
+ */
+#define VMW_DEBUG_USER(fmt, ...) \
+ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+
/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 88b8178d4687..2ff7ba04d8c8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -36,6 +36,25 @@
#define VMW_RES_HT_ORDER 12
/*
+ * Helper macro to get dx_ctx_node if available otherwise print an error
+ * message. This is for use in command verifier function where if dx_ctx_node
+ * is not set then command is invalid.
+ */
+#define VMW_GET_CTX_NODE(__sw_context) \
+({ \
+ __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
+ VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
+ __sw_context->dx_ctx_node; \
+ }); \
+})
+
+#define VMW_DECLARE_CMD_VAR(__var, __type) \
+ struct { \
+ SVGA3dCmdHeader header; \
+ __type body; \
+ } __var
+
+/**
* struct vmw_relocation - Buffer object relocation
*
* @head: List head for the command submission context's relocation list
@@ -59,9 +78,8 @@ struct vmw_relocation {
* command stream is replaced with the actual id after validation.
* @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
* with a NOP.
- * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
- * after validation is -1, the command is replaced with a NOP. Otherwise no
- * action.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
+ * validation is -1, the command is replaced with a NOP. Otherwise no action.
*/
enum vmw_resource_relocation_type {
vmw_res_rel_normal,
@@ -75,8 +93,8 @@ enum vmw_resource_relocation_type {
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of single byte entries into the command buffer where the
- * id that needs fixup is located.
+ * @offset: Offset of single byte entries into the command buffer where the id
+ * that needs fixup is located.
* @rel_type: Type of relocation.
*/
struct vmw_resource_relocation {
@@ -86,8 +104,9 @@ struct vmw_resource_relocation {
enum vmw_resource_relocation_type rel_type:3;
};
-/*
+/**
* struct vmw_ctx_validation_info - Extra validation metadata for contexts
+ *
* @head: List head of context list
* @ctx: The context resource
* @cur: The context's persistent binding state
@@ -142,9 +161,10 @@ static size_t vmw_ptr_diff(void *a, void *b)
/**
* vmw_execbuf_bindings_commit - Commit modified binding state
+ *
* @sw_context: The command submission context
- * @backoff: Whether this is part of the error path and binding state
- * changes should be ignored
+ * @backoff: Whether this is part of the error path and binding state changes
+ * should be ignored
*/
static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
bool backoff)
@@ -154,6 +174,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
list_for_each_entry(entry, &sw_context->ctx_list, head) {
if (!backoff)
vmw_binding_state_commit(entry->cur, entry->staged);
+
if (entry->staged != sw_context->staged_bindings)
vmw_binding_state_free(entry->staged);
else
@@ -166,6 +187,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
/**
* vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
+ *
* @sw_context: The command submission context
*/
static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
@@ -176,8 +198,8 @@ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
- * added to the validate list.
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
+ * the validate list.
*
* @dev_priv: Pointer to the device private:
* @sw_context: The command submission context
@@ -195,11 +217,8 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
goto out_err;
if (!sw_context->staged_bindings) {
- sw_context->staged_bindings =
- vmw_binding_state_alloc(dev_priv);
+ sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(sw_context->staged_bindings)) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
ret = PTR_ERR(sw_context->staged_bindings);
sw_context->staged_bindings = NULL;
goto out_err;
@@ -209,8 +228,6 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
if (sw_context->staged_bindings_inuse) {
node->staged = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(node->staged)) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
ret = PTR_ERR(node->staged);
node->staged = NULL;
goto out_err;
@@ -225,19 +242,20 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
list_add_tail(&node->head, &sw_context->ctx_list);
return 0;
+
out_err:
return ret;
}
/**
- * vmw_execbuf_res_size - calculate extra size fore the resource validation
- * node
+ * vmw_execbuf_res_size - calculate extra size fore the resource validation node
+ *
* @dev_priv: Pointer to the device private struct.
* @res_type: The resource type.
*
- * Guest-backed contexts and DX contexts require extra size to store
- * execbuf private information in the validation node. Typically the
- * binding manager associated data structures.
+ * Guest-backed contexts and DX contexts require extra size to store execbuf
+ * private information in the validation node. Typically the binding manager
+ * associated data structures.
*
* Returns: The extra size requirement based on resource type.
*/
@@ -254,8 +272,8 @@ static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
*
* @rcache: Pointer to the entry to update.
* @res: Pointer to the resource.
- * @private: Pointer to the execbuf-private space in the resource
- * validation node.
+ * @private: Pointer to the execbuf-private space in the resource validation
+ * node.
*/
static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
struct vmw_resource *res,
@@ -268,17 +286,19 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
}
/**
- * vmw_execbuf_res_noref_val_add - Add a resource described by an
- * unreferenced rcu-protected pointer to the validation list.
+ * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
+ * rcu-protected pointer to the validation list.
+ *
* @sw_context: Pointer to the software context.
* @res: Unreferenced rcu-protected pointer to the resource.
+ * @dirty: Whether to change dirty status.
*
- * Returns: 0 on success. Negative error code on failure. Typical error
- * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
- * doomed.
+ * Returns: 0 on success. Negative error code on failure. Typical error codes
+ * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
*/
static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res)
+ struct vmw_resource *res,
+ u32 dirty)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -290,13 +310,17 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
rcache = &sw_context->res_cache[res_type];
if (likely(rcache->valid && rcache->res == res)) {
+ if (dirty)
+ vmw_validation_res_set_dirty(sw_context->ctx,
+ rcache->private, dirty);
vmw_user_resource_noref_release();
return 0;
}
priv_size = vmw_execbuf_res_size(dev_priv, res_type);
ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
- (void **)&ctx_info, &first_usage);
+ dirty, (void **)&ctx_info,
+ &first_usage);
vmw_user_resource_noref_release();
if (ret)
return ret;
@@ -304,8 +328,10 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
if (priv_size && first_usage) {
ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
ctx_info);
- if (ret)
+ if (ret) {
+ VMW_DEBUG_USER("Failed first usage context setup.\n");
return ret;
+ }
}
vmw_execbuf_rcache_update(rcache, res, ctx_info);
@@ -315,13 +341,16 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
/**
* vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
* validation list if it's not already on it
+ *
* @sw_context: Pointer to the software context.
* @res: Pointer to the resource.
+ * @dirty: Whether to change dirty status.
*
* Returns: Zero on success. Negative error code on failure.
*/
static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res)
+ struct vmw_resource *res,
+ u32 dirty)
{
struct vmw_res_cache_entry *rcache;
enum vmw_res_type res_type = vmw_res_type(res);
@@ -329,10 +358,15 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
int ret;
rcache = &sw_context->res_cache[res_type];
- if (likely(rcache->valid && rcache->res == res))
+ if (likely(rcache->valid && rcache->res == res)) {
+ if (dirty)
+ vmw_validation_res_set_dirty(sw_context->ctx,
+ rcache->private, dirty);
return 0;
+ }
- ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
+ ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
+ &ptr, NULL);
if (ret)
return ret;
@@ -342,8 +376,8 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
}
/**
- * vmw_view_res_val_add - Add a view and the surface it's pointing to
- * to the validation list
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
+ * validation list
*
* @sw_context: The software context holding the validation list.
* @view: Pointer to the view resource.
@@ -356,27 +390,29 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
int ret;
/*
- * First add the resource the view is pointing to, otherwise
- * it may be swapped out when the view is validated.
+ * First add the resource the view is pointing to, otherwise it may be
+ * swapped out when the view is validated.
*/
- ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
+ vmw_view_dirtying(view));
if (ret)
return ret;
- return vmw_execbuf_res_noctx_val_add(sw_context, view);
+ return vmw_execbuf_res_noctx_val_add(sw_context, view,
+ VMW_RES_DIRTY_NONE);
}
/**
- * vmw_view_id_val_add - Look up a view and add it and the surface it's
- * pointing to to the validation list.
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
+ * to to the validation list.
*
* @sw_context: The software context holding the validation list.
* @view_type: The view type to look up.
* @id: view id of the view.
*
- * The view is represented by a view id and the DX context it's created on,
- * or scheduled for creation on. If there is no DX context set, the function
- * will return an -EINVAL error pointer.
+ * The view is represented by a view id and the DX context it's created on, or
+ * scheduled for creation on. If there is no DX context set, the function will
+ * return an -EINVAL error pointer.
*
* Returns: Unreferenced pointer to the resource on success, negative error
* pointer on failure.
@@ -389,10 +425,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *view;
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return ERR_PTR(-EINVAL);
- }
view = vmw_view_lookup(sw_context->man, view_type, id);
if (IS_ERR(view))
@@ -413,8 +447,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context,
* @sw_context: Pointer to a software context used for this command submission
* @ctx: Pointer to the context resource
*
- * This function puts all resources that were previously bound to @ctx on
- * the resource validation list. This is part of the context state reemission
+ * This function puts all resources that were previously bound to @ctx on the
+ * resource validation list. This is part of the context state reemission
*/
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -433,13 +467,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (IS_ERR(res))
continue;
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_SET);
if (unlikely(ret != 0))
return ret;
}
}
-
/* Add all resources bound to the context to the validation list */
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
@@ -448,8 +482,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res);
else
- ret = vmw_execbuf_res_noctx_val_add(sw_context,
- entry->res);
+ ret = vmw_execbuf_res_noctx_val_add
+ (sw_context, entry->res,
+ vmw_binding_dirtying(entry->bt));
if (unlikely(ret != 0))
break;
}
@@ -472,8 +507,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
*
* @list: Pointer to head of relocation list.
* @res: The resource.
- * @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is one byte.
+ * @offset: Offset into the command buffer currently being parsed where the id
+ * that needs fixup is located. Granularity is one byte.
* @rel_type: Relocation type.
*/
static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
@@ -486,7 +521,7 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
if (unlikely(!rel)) {
- DRM_ERROR("Failed to allocate a resource relocation.\n");
+ VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
@@ -506,17 +541,15 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
static void vmw_resource_relocations_free(struct list_head *list)
{
/* Memory is validation context memory, so no need to free it */
-
INIT_LIST_HEAD(list);
}
/**
* vmw_resource_relocations_apply - Apply all relocations on a list
*
- * @cb: Pointer to the start of the command buffer bein patch. This need
- * not be the same buffer as the one being parsed when the relocation
- * list was built, but the contents must be the same modulo the
- * resource ids.
+ * @cb: Pointer to the start of the command buffer bein patch. This need not be
+ * the same buffer as the one being parsed when the relocation list was built,
+ * but the contents must be the same modulo the resource ids.
* @list: Pointer to the head of the relocation list.
*/
static void vmw_resource_relocations_apply(uint32_t *cb,
@@ -560,14 +593,14 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
}
/**
- * vmw_resources_reserve - Reserve all resources on the sw_context's
- * resource list.
+ * vmw_resources_reserve - Reserve all resources on the sw_context's resource
+ * list.
*
* @sw_context: Pointer to the software context.
*
- * Note that since vmware's command submission currently is protected by
- * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
- * since only a single thread at once will attempt this.
+ * Note that since vmware's command submission currently is protected by the
+ * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
+ * only a single thread at once will attempt this.
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
@@ -592,22 +625,24 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
- * on the resource validate list unless it's already there.
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
+ * resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
+ * @dirty: Whether to change dirty status.
* @converter: User-space visisble type specific information.
- * @id_loc: Pointer to the location in the command buffer currently being
- * parsed from where the user-space resource id handle is located.
- * @p_val: Pointer to pointer to resource validalidation node. Populated
- * on exit.
+ * @id_loc: Pointer to the location in the command buffer currently being parsed
+ * from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated on
+ * exit.
*/
static int
vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
+ u32 dirty,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
struct vmw_resource **p_res)
@@ -621,7 +656,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
if (*id_loc == SVGA3D_INVALID_ID) {
if (res_type == vmw_res_context) {
- DRM_ERROR("Illegal context invalid id.\n");
+ VMW_DEBUG_USER("Illegal context invalid id.\n");
return -EINVAL;
}
return 0;
@@ -629,6 +664,9 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
res = rcache->res;
+ if (dirty)
+ vmw_validation_res_set_dirty(sw_context->ctx,
+ rcache->private, dirty);
} else {
unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
@@ -638,13 +676,13 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
res = vmw_user_resource_noref_lookup_handle
(dev_priv, sw_context->fp->tfile, *id_loc, converter);
- if (unlikely(IS_ERR(res))) {
- DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned int) *id_loc);
+ if (IS_ERR(res)) {
+ VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
+ (unsigned int) *id_loc);
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noref_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
if (unlikely(ret != 0))
return ret;
@@ -675,23 +713,16 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
struct vmw_buffer_object *dx_query_mob;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBindAllQuery body;
- } *cmd;
-
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
if (!dx_query_mob || dx_query_mob->dx_query_ctx)
return 0;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
-
- if (cmd == NULL) {
- DRM_ERROR("Failed to rebind queries.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
+ if (cmd == NULL)
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -705,8 +736,8 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
}
/**
- * vmw_rebind_contexts - Rebind all resources previously bound to
- * referenced contexts.
+ * vmw_rebind_contexts - Rebind all resources previously bound to referenced
+ * contexts.
*
* @sw_context: Pointer to the software context.
*
@@ -721,21 +752,23 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
ret = vmw_binding_rebind_all(val->cur);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to rebind context.\n");
+ VMW_DEBUG_USER("Failed to rebind context.\n");
return ret;
}
ret = vmw_rebind_all_dx_query(val->ctx);
- if (ret != 0)
+ if (ret != 0) {
+ VMW_DEBUG_USER("Failed to rebind queries.\n");
return ret;
+ }
}
return 0;
}
/**
- * vmw_view_bindings_add - Add an array of view bindings to a context
- * binding state tracker.
+ * vmw_view_bindings_add - Add an array of view bindings to a context binding
+ * state tracker.
*
* @sw_context: The execbuf state used for this command.
* @view_type: View type for the bindings.
@@ -752,13 +785,11 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
uint32 view_ids[], u32 num_views,
u32 first_slot)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
u32 i;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
for (i = 0; i < num_views; ++i) {
struct vmw_ctx_bindinfo_view binding;
@@ -768,7 +799,7 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
view = vmw_view_id_val_add(sw_context, view_type,
view_ids[i]);
if (IS_ERR(view)) {
- DRM_ERROR("View not found.\n");
+ VMW_DEBUG_USER("View not found.\n");
return PTR_ERR(view);
}
}
@@ -798,19 +829,18 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_cid_cmd {
- SVGA3dCmdHeader header;
- uint32_t cid;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
+ container_of(header, typeof(*cmd), header);
- cmd = container_of(header, struct vmw_cid_cmd, header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->cid, NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body, NULL);
}
/**
* vmw_execbuf_info_from_res - Get the private validation metadata for a
* recently validated resource
+ *
* @sw_context: Pointer to the command submission context
* @res: The resource
*
@@ -818,8 +848,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
* context's resource cache and hence the last resource of that type to be
* processed by the validation code.
*
- * Return: a pointer to the private metadata of the resource, or NULL
- * if it wasn't found
+ * Return: a pointer to the private metadata of the resource, or NULL if it
+ * wasn't found
*/
static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
@@ -835,36 +865,32 @@ vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
return NULL;
}
-
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetRenderTarget body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
struct vmw_resource *ctx;
struct vmw_resource *res;
int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_RT_MAX) {
- DRM_ERROR("Illegal render target type %u.\n",
- (unsigned) cmd->body.type);
+ VMW_DEBUG_USER("Illegal render target type %u.\n",
+ (unsigned int) cmd->body.type);
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->body.target.sid,
- &res);
+ VMW_RES_DIRTY_SET, user_surface_converter,
+ &cmd->body.target.sid, &res);
if (unlikely(ret))
return ret;
@@ -890,44 +916,38 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceCopy body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.src.sid, NULL);
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (ret)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest.sid, NULL);
}
static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBufferCopy body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.src, NULL);
if (ret != 0)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest, NULL);
}
@@ -935,21 +955,18 @@ static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXPredCopyRegion body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.srcSid, NULL);
if (ret != 0)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dstSid, NULL);
}
@@ -957,20 +974,18 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceStretchBlt body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest.sid, NULL);
}
@@ -978,15 +993,11 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.srcImage.sid, NULL);
}
@@ -994,17 +1005,12 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdPresent body;
- } *cmd;
-
-
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->body.sid,
- NULL);
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.sid, NULL);
}
/**
@@ -1014,11 +1020,10 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
- * This function checks whether @new_query_bo is suitable for holding
- * query results, and if another buffer currently is pinned for query
- * results. If so, the function prepares the state of @sw_context for
- * switching pinned buffers after successful submission of the current
- * command batch.
+ * This function checks whether @new_query_bo is suitable for holding query
+ * results, and if another buffer currently is pinned for query results. If so,
+ * the function prepares the state of @sw_context for switching pinned buffers
+ * after successful submission of the current command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct vmw_buffer_object *new_query_bo,
@@ -1034,7 +1039,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(new_query_bo->base.num_pages > 4)) {
- DRM_ERROR("Query buffer too large.\n");
+ VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -1053,13 +1058,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
-
}
return 0;
}
-
/**
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer
*
@@ -1068,11 +1071,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
*
* This function will check if we're switching query buffers, and will then,
* issue a dummy occlusion query wait used as a query barrier. When the fence
- * object following that query wait has signaled, we are sure that all
- * preceding queries have finished, and the old query buffer can be unpinned.
- * However, since both the new query buffer and the old one are fenced with
- * that fence, we can do an asynchronus unpin now, and be sure that the
- * old query buffer won't be moved until the fence has signaled.
+ * object following that query wait has signaled, we are sure that all preceding
+ * queries have finished, and the old query buffer can be unpinned. However,
+ * since both the new query buffer and the old one are fenced with that fence,
+ * we can do an asynchronus unpin now, and be sure that the old query buffer
+ * won't be moved until the fence has signaled.
*
* As mentioned above, both the new - and old query buffers need to be fenced
* using a sequence emitted *after* calling this function.
@@ -1084,7 +1087,6 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* The validate list should still hold references to all
* contexts here.
*/
-
if (sw_context->needs_post_query_barrier) {
struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context];
@@ -1097,7 +1099,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
if (unlikely(ret != 0))
- DRM_ERROR("Out of fifo space for dummy query.\n");
+ VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
}
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
@@ -1111,10 +1113,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
/*
* We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
+ * don't need to validate it when emitting dummy queries
+ * in context destroy paths.
*/
-
if (!dev_priv->dummy_query_bo_pinned) {
vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
true);
@@ -1131,22 +1132,24 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
- * handle to a MOB id.
+ * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
+ * to a MOB id.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated.
- * @vmw_bo_p: Points to a location that, on successful return will carry
- * a non-reference-counted pointer to the buffer object identified by the
+ * @vmw_bo_p: Points to a location that, on successful return will carry a
+ * non-reference-counted pointer to the buffer object identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a MOB id. The translation does not take place immediately, but
- * during a call to vmw_apply_relocations(). This function builds a relocation
- * list and a list of buffers to validate. The former needs to be freed using
- * either vmw_apply_relocations() or vmw_free_relocations(). The latter
- * needs to be freed using vmw_clear_validations.
+ * during a call to vmw_apply_relocations().
+ *
+ * This function builds a relocation list and a list of buffers to validate. The
+ * former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
*/
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -1161,7 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
- DRM_ERROR("Could not find or use MOB buffer.\n");
+ VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
@@ -1184,19 +1187,20 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
- * handle to a valid SVGAGuestPtr
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
+ * to a valid SVGAGuestPtr
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated.
- * @vmw_bo_p: Points to a location that, on successful return will carry
- * a non-reference-counted pointer to the DMA buffer identified by the
- * user-space handle in @id.
+ * @vmw_bo_p: Points to a location that, on successful return will carry a
+ * non-reference-counted pointer to the DMA buffer identified by the user-space
+ * handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a valid SVGAGuestPtr. The translation does not take place
* immediately, but during a call to vmw_apply_relocations().
+ *
* This function builds a relocation list and a list of buffers to validate.
* The former needs to be freed using either vmw_apply_relocations() or
* vmw_free_relocations(). The latter needs to be freed using
@@ -1215,7 +1219,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
- DRM_ERROR("Could not find or use GMR region.\n");
+ VMW_DEBUG_USER("Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
@@ -1236,10 +1240,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return 0;
}
-
-
/**
- * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1251,67 +1253,52 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dx_define_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXDefineQuery q;
- } *cmd;
-
- int ret;
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *cotable_res;
+ int ret;
-
- if (ctx_node == NULL) {
- DRM_ERROR("DX Context not set for query.\n");
+ if (!ctx_node)
return -EINVAL;
- }
- cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
- if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
- cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
+ if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
+ cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
- ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
+ ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
return ret;
}
-
-
/**
- * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
+ * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*
- * The query bind operation will eventually associate the query ID
- * with its backing MOB. In this function, we take the user mode
- * MOB ID and use vmw_translate_mob_ptr() to translate it to its
- * kernel mode equivalent.
+ * The query bind operation will eventually associate the query ID with its
+ * backing MOB. In this function, we take the user mode MOB ID and use
+ * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
*/
static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dx_bind_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBindQuery q;
- } *cmd;
-
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
struct vmw_buffer_object *vmw_bo;
- int ret;
-
+ int ret;
- cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
/*
* Look up the buffer pointed to by q.mobid, put it on the relocation
* list so its kernel mode MOB ID can be filled in later
*/
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
if (ret != 0)
@@ -1322,10 +1309,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
return 0;
}
-
-
/**
- * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
+ * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1335,21 +1320,16 @@ static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_begin_gb_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginGBQuery q;
- } *cmd;
-
- cmd = container_of(header, struct vmw_begin_gb_query_cmd,
- header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->q.cid,
- NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, NULL);
}
/**
- * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
+ * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1359,38 +1339,30 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_begin_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginQuery q;
- } *cmd;
-
- cmd = container_of(header, struct vmw_begin_query_cmd,
- header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
+ container_of(header, typeof(*cmd), header);
if (unlikely(dev_priv->has_mob)) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginGBQuery q;
- } gb_cmd;
+ VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
+ gb_cmd.body.cid = cmd->body.cid;
+ gb_cmd.body.type = cmd->body.type;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
}
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->q.cid,
- NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, NULL);
}
/**
- * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
+ * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1401,19 +1373,15 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndGBQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context,
- &cmd->q.mobid,
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1424,7 +1392,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
+ * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1435,27 +1403,21 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
if (dev_priv->has_mob) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndGBQuery q;
- } gb_cmd;
+ VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
- gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
- gb_cmd.q.offset = cmd->q.guestResult.offset;
+ gb_cmd.body.cid = cmd->body.cid;
+ gb_cmd.body.type = cmd->body.type;
+ gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
+ gb_cmd.body.offset = cmd->body.guestResult.offset;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
@@ -1466,8 +1428,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->q.guestResult,
- &vmw_bo);
+ &cmd->body.guestResult, &vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1477,7 +1438,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
+ * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1488,19 +1449,15 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForGBQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context,
- &cmd->q.mobid,
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1509,7 +1466,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
+ * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1520,27 +1477,21 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
if (dev_priv->has_mob) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForGBQuery q;
- } gb_cmd;
+ VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
- gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
- gb_cmd.q.offset = cmd->q.guestResult.offset;
+ gb_cmd.body.cid = cmd->body.cid;
+ gb_cmd.body.type = cmd->body.type;
+ gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
+ gb_cmd.body.offset = cmd->body.guestResult.offset;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
@@ -1551,8 +1502,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->q.guestResult,
- &vmw_bo);
+ &cmd->body.guestResult, &vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1565,54 +1515,52 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
{
struct vmw_buffer_object *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
int ret;
SVGA3dCmdSurfaceDMASuffix *suffix;
uint32_t bo_size;
+ bool dirty;
- cmd = container_of(header, struct vmw_dma_cmd, header);
- suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+ cmd = container_of(header, typeof(*cmd), header);
+ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
header->size - sizeof(*suffix));
/* Make sure device and verifier stays in sync. */
if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
- DRM_ERROR("Invalid DMA suffix size.\n");
+ VMW_DEBUG_USER("Invalid DMA suffix size.\n");
return -EINVAL;
}
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->dma.guest.ptr,
- &vmw_bo);
+ &cmd->body.guest.ptr, &vmw_bo);
if (unlikely(ret != 0))
return ret;
/* Make sure DMA doesn't cross BO boundaries. */
bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
- if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
- DRM_ERROR("Invalid DMA offset.\n");
+ if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
+ VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL;
}
- bo_size -= cmd->dma.guest.ptr.offset;
+ bo_size -= cmd->body.guest.ptr.offset;
if (unlikely(suffix->maximumOffset > bo_size))
suffix->maximumOffset = bo_size;
+ dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
+ VMW_RES_DIRTY_SET : 0;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->dma.host.sid,
- NULL);
+ dirty, user_surface_converter,
+ &cmd->body.host.sid, NULL);
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
- DRM_ERROR("could not find surface for DMA.\n");
+ VMW_DEBUG_USER("could not find surface for DMA.\n");
return ret;
}
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
- header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
return 0;
}
@@ -1621,10 +1569,7 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_draw_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDrawPrimitives body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
(unsigned long)header + sizeof(*cmd));
SVGA3dPrimitiveRange *range;
@@ -1636,16 +1581,17 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- cmd = container_of(header, struct vmw_draw_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
if (unlikely(cmd->body.numVertexDecls > maxnum)) {
- DRM_ERROR("Illegal number of vertex declarations.\n");
+ VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
return -EINVAL;
}
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
@@ -1655,13 +1601,14 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
maxnum = (header->size - sizeof(cmd->body) -
cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
if (unlikely(cmd->body.numRanges > maxnum)) {
- DRM_ERROR("Illegal number of index ranges.\n");
+ VMW_DEBUG_USER("Illegal number of index ranges.\n");
return -EINVAL;
}
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
@@ -1670,30 +1617,24 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
return 0;
}
-
static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_tex_state_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetTextureState state;
- } *cmd;
-
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
- ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ ((unsigned long) header + sizeof(*cmd));
struct vmw_resource *ctx;
struct vmw_resource *res;
int ret;
- cmd = container_of(header, struct vmw_tex_state_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->state.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -1702,12 +1643,13 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
continue;
if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
- DRM_ERROR("Illegal texture/sampler unit %u.\n",
- (unsigned) cur_state->stage);
+ VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
+ (unsigned int) cur_state->stage);
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&cur_state->value, &res);
if (unlikely(ret != 0))
@@ -1744,12 +1686,10 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
SVGAFifoCmdDefineGMRFB body;
} *cmd = buf;
- return vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->body.ptr,
+ return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
&vmw_bo);
}
-
/**
* vmw_cmd_res_switch_backup - Utility function to handle backup buffer
* switching
@@ -1761,14 +1701,13 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
* stream.
* @backup_offset: Offset of backup into MOB.
*
- * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving. It's basically a wrapper
- * around vmw_cmd_res_switch_backup with a different interface.
+ * This function prepares for registering a switch of backup buffers in the
+ * resource metadata just prior to unreserving. It's basically a wrapper around
+ * vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- struct vmw_resource *res,
- uint32_t *buf_id,
+ struct vmw_resource *res, uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_buffer_object *vbo;
@@ -1788,7 +1727,6 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
return 0;
}
-
/**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
*
@@ -1801,34 +1739,31 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
* stream.
* @backup_offset: Offset of backup into MOB.
*
- * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving. It's basically a wrapper
- * around vmw_cmd_res_switch_backup with a different interface.
+ * This function prepares for registering a switch of backup buffers in the
+ * resource metadata just prior to unreserving. It's basically a wrapper around
+ * vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv
- *converter,
- uint32_t *res_id,
- uint32_t *buf_id,
+ *converter, uint32_t *res_id, uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_resource *res;
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
- converter, res_id, &res);
+ VMW_RES_DIRTY_NONE, converter, res_id, &res);
if (ret)
return ret;
- return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
- buf_id, backup_offset);
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
+ backup_offset);
}
/**
- * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
- * command
+ * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1838,22 +1773,16 @@ static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_bind_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBindGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.sid, &cmd->body.mobid,
- 0);
+ user_surface_converter, &cmd->body.sid,
+ &cmd->body.mobid, 0);
}
/**
- * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
- * command
+ * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1863,21 +1792,16 @@ static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBImage body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.image.sid, NULL);
}
/**
- * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
- * command
+ * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1887,21 +1811,16 @@ static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_CLEAR, user_surface_converter,
&cmd->body.sid, NULL);
}
/**
- * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
- * command
+ * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1911,20 +1830,16 @@ static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdReadbackGBImage body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.image.sid, NULL);
}
/**
- * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
+ * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
* command
*
* @dev_priv: Pointer to a device private struct.
@@ -1935,20 +1850,16 @@ static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdReadbackGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_CLEAR, user_surface_converter,
&cmd->body.sid, NULL);
}
/**
- * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
* command
*
* @dev_priv: Pointer to a device private struct.
@@ -1959,21 +1870,17 @@ static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdInvalidateGBImage body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.image.sid, NULL);
}
/**
- * vmw_cmd_invalidate_gb_surface - Validate an
- * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
+ * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1983,22 +1890,16 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdInvalidateGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_CLEAR, user_surface_converter,
&cmd->body.sid, NULL);
}
-
/**
- * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
- * command
+ * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2008,20 +1909,16 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_shader_define_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
int ret;
size_t size;
struct vmw_resource *ctx;
- cmd = container_of(header, struct vmw_shader_define_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2029,24 +1926,20 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return 0;
size = cmd->header.size - sizeof(cmd->body);
- ret = vmw_compat_shader_add(dev_priv,
- vmw_context_res_man(ctx),
- cmd->body.shid, cmd + 1,
- cmd->body.type, size,
- &sw_context->staged_cmd_res);
+ ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
+ cmd->body.shid, cmd + 1, cmd->body.type,
+ size, &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(sw_context,
- NULL,
+ return vmw_resource_relocation_add(sw_context, NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
vmw_res_rel_nop);
}
/**
- * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
- * command
+ * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2056,42 +1949,34 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_shader_destroy_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroyShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
int ret;
struct vmw_resource *ctx;
- cmd = container_of(header, struct vmw_shader_destroy_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_shader_remove(vmw_context_res_man(ctx),
- cmd->body.shid,
- cmd->body.type,
- &sw_context->staged_cmd_res);
+ ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
+ cmd->body.type, &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(sw_context,
- NULL,
+ return vmw_resource_relocation_add(sw_context, NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
vmw_res_rel_nop);
}
/**
- * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
- * command
+ * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2101,27 +1986,23 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_set_shader_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
struct vmw_ctx_bindinfo_shader binding;
struct vmw_resource *ctx, *res = NULL;
struct vmw_ctx_validation_info *ctx_info;
int ret;
- cmd = container_of(header, struct vmw_set_shader_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) cmd->body.type);
+ VMW_DEBUG_USER("Illegal shader type %u.\n",
+ (unsigned int) cmd->body.type);
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2130,21 +2011,20 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (cmd->body.shid != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(vmw_context_res_man(ctx),
- cmd->body.shid,
- cmd->body.type);
-
+ cmd->body.shid, cmd->body.type);
if (!IS_ERR(res)) {
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
if (unlikely(ret != 0))
return ret;
}
}
if (IS_ERR_OR_NULL(res)) {
- ret = vmw_cmd_res_check(dev_priv, sw_context,
- vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &res);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
+ VMW_RES_DIRTY_NONE,
+ user_shader_converter, &cmd->body.shid,
+ &res);
if (unlikely(ret != 0))
return ret;
}
@@ -2157,14 +2037,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_info->staged, &binding.bi,
- binding.shader_slot, 0);
+ vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
+
return 0;
}
/**
- * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
- * command
+ * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2174,18 +2053,14 @@ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_set_shader_const_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShaderConst body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
int ret;
- cmd = container_of(header, struct vmw_set_shader_const_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, NULL);
if (unlikely(ret != 0))
return ret;
@@ -2196,8 +2071,7 @@ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
- * command
+ * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2207,22 +2081,16 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_bind_gb_shader_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBindGBShader body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
- header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &cmd->body.mobid,
- cmd->body.offsetInBytes);
+ user_shader_converter, &cmd->body.shid,
+ &cmd->body.mobid, cmd->body.offsetInBytes);
}
/**
- * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * vmw_cmd_dx_set_single_constant_buffer - Validate
* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
@@ -2234,23 +2102,18 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetSingleConstantBuffer body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
struct vmw_resource *res = NULL;
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_cb binding;
int ret;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
@@ -2265,21 +2128,21 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
- DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
- (unsigned) cmd->body.type,
- (unsigned) binding.slot);
+ VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
+ (unsigned int) cmd->body.type,
+ (unsigned int) binding.slot);
return -EINVAL;
}
- vmw_binding_add(ctx_node->staged, &binding.bi,
- binding.shader_slot, binding.slot);
+ vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
+ binding.slot);
return 0;
}
/**
- * vmw_cmd_dx_set_shader_res - Validate an
- * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2289,17 +2152,15 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetShaderResources body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
+ container_of(header, typeof(*cmd), header);
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
- DRM_ERROR("Invalid shader binding.\n");
+ VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
@@ -2311,8 +2172,7 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
- * command
+ * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2322,36 +2182,32 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
struct vmw_resource *res = NULL;
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
int ret = 0;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) cmd->body.type);
+ VMW_DEBUG_USER("Illegal shader type %u.\n",
+ (unsigned int) cmd->body.type);
return -EINVAL;
}
if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
if (IS_ERR(res)) {
- DRM_ERROR("Could not find shader for binding.\n");
+ VMW_DEBUG_USER("Could not find shader for binding.\n");
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
if (ret)
return ret;
}
@@ -2361,15 +2217,14 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_node->staged, &binding.bi,
- binding.shader_slot, 0);
+ vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
return 0;
}
/**
- * vmw_cmd_dx_set_vertex_buffers - Validates an
- * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2379,7 +2234,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_vb binding;
struct vmw_resource *res;
struct {
@@ -2389,22 +2244,21 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
} *cmd;
int i, ret, num;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
num = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dVertexBuffer);
if ((u64)num + (u64)cmd->body.startBuffer >
(u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
- DRM_ERROR("Invalid number of vertex buffers.\n");
+ VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
return -EINVAL;
}
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&cmd->buf[i].sid, &res);
if (unlikely(ret != 0))
@@ -2417,15 +2271,14 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer;
- vmw_binding_add(ctx_node->staged, &binding.bi,
- 0, binding.slot);
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
}
return 0;
}
/**
- * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate
* SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
@@ -2436,23 +2289,18 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_ib binding;
struct vmw_resource *res;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetIndexBuffer body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
int ret;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
@@ -2469,8 +2317,8 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_set_rendertarget - Validate an
- * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2480,32 +2328,29 @@ static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetRenderTargets body;
- } *cmd = container_of(header, typeof(*cmd), header);
- int ret;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
+ container_of(header, typeof(*cmd), header);
u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dRenderTargetViewId);
+ int ret;
if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
- DRM_ERROR("Invalid DX Rendertarget binding.\n");
+ VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
return -EINVAL;
}
- ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
- vmw_ctx_binding_ds, 0,
- &cmd->body.depthStencilViewId, 1, 0);
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
+ 0, &cmd->body.depthStencilViewId, 1, 0);
if (ret)
return ret;
return vmw_view_bindings_add(sw_context, vmw_view_rt,
- vmw_ctx_binding_dx_rt, 0,
- (void *)&cmd[1], num_rt_view, 0);
+ vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
+ num_rt_view, 0);
}
/**
- * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * vmw_cmd_dx_clear_rendertarget_view - Validate
* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
*
* @dev_priv: Pointer to a device private struct.
@@ -2516,17 +2361,15 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXClearRenderTargetView body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
+ container_of(header, typeof(*cmd), header);
return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
cmd->body.renderTargetViewId));
}
/**
- * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * vmw_cmd_dx_clear_rendertarget_view - Validate
* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
*
* @dev_priv: Pointer to a device private struct.
@@ -2537,10 +2380,8 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXClearDepthStencilView body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
+ container_of(header, typeof(*cmd), header);
return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
cmd->body.depthStencilViewId));
@@ -2550,14 +2391,14 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *srf;
struct vmw_resource *res;
enum vmw_view_type view_type;
int ret;
/*
- * This is based on the fact that all affected define commands have
- * the same initial command body layout.
+ * This is based on the fact that all affected define commands have the
+ * same initial command body layout.
*/
struct {
SVGA3dCmdHeader header;
@@ -2565,17 +2406,16 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
uint32 sid;
} *cmd;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
view_type = vmw_view_cmd_to_type(header->id);
if (view_type == vmw_view_max)
return -EINVAL;
+
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, &srf);
if (unlikely(ret != 0))
return ret;
@@ -2585,19 +2425,14 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- return vmw_view_add(sw_context->man,
- ctx_node->ctx,
- srf,
- view_type,
- cmd->defined_id,
- header,
+ return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
+ cmd->defined_id, header,
header->size + sizeof(*header),
&sw_context->staged_cmd_res);
}
/**
- * vmw_cmd_dx_set_so_targets - Validate an
- * SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2607,7 +2442,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_so binding;
struct vmw_resource *res;
struct {
@@ -2617,22 +2452,20 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
} *cmd;
int i, ret, num;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
- num = (cmd->header.size - sizeof(cmd->body)) /
- sizeof(SVGA3dSoTarget);
+ num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
if (num > SVGA3D_DX_MAX_SOTARGETS) {
- DRM_ERROR("Invalid DX SO binding.\n");
+ VMW_DEBUG_USER("Invalid DX SO binding.\n");
return -EINVAL;
}
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_SET,
user_surface_converter,
&cmd->targets[i].sid, &res);
if (unlikely(ret != 0))
@@ -2645,8 +2478,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
- vmw_binding_add(ctx_node->staged, &binding.bi,
- 0, binding.slot);
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
}
return 0;
@@ -2656,7 +2488,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *res;
/*
* This is based on the fact that all affected define commands have
@@ -2669,10 +2501,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
enum vmw_so_type so_type;
int ret;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
so_type = vmw_so_cmd_to_type(header->id);
res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
@@ -2683,8 +2513,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_check_subresource - Validate an
- * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2714,7 +2544,7 @@ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, NULL);
}
@@ -2722,32 +2552,30 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
return 0;
}
/**
- * vmw_cmd_dx_view_remove - validate a view remove command and
- * schedule the view resource for removal.
+ * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
+ * resource for removal.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*
- * Check that the view exists, and if it was not created using this
- * command batch, conditionally make this command a NOP.
+ * Check that the view exists, and if it was not created using this command
+ * batch, conditionally make this command a NOP.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
@@ -2756,15 +2584,11 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_resource *view;
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
- ret = vmw_view_remove(sw_context->man,
- cmd->body.view_id, view_type,
- &sw_context->staged_cmd_res,
- &view);
+ ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
+ &sw_context->staged_cmd_res, &view);
if (ret || !view)
return ret;
@@ -2774,16 +2598,14 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
* relocation to conditionally make this command a NOP to avoid
* device errors.
*/
- return vmw_resource_relocation_add(sw_context,
- view,
+ return vmw_resource_relocation_add(sw_context, view,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
vmw_res_rel_cond_nop);
}
/**
- * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
- * command
+ * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2793,18 +2615,14 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *res;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXDefineShader body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
+ container_of(header, typeof(*cmd), header);
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
ret = vmw_cotable_notify(res, cmd->body.shaderId);
@@ -2817,8 +2635,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
- * command
+ * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2828,29 +2645,22 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXDestroyShader body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
+ container_of(header, typeof(*cmd), header);
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
&sw_context->staged_cmd_res);
- if (ret)
- DRM_ERROR("Could not find shader to remove.\n");
return ret;
}
/**
- * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
- * command
+ * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2862,36 +2672,37 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
{
struct vmw_resource *ctx;
struct vmw_resource *res;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBindShader body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
+ container_of(header, typeof(*cmd), header);
int ret;
if (cmd->body.cid != SVGA3D_INVALID_ID) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter,
- &cmd->body.cid, &ctx);
+ VMW_RES_DIRTY_SET,
+ user_context_converter, &cmd->body.cid,
+ &ctx);
if (ret)
return ret;
} else {
- if (!sw_context->dx_ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ struct vmw_ctx_validation_info *ctx_node =
+ VMW_GET_CTX_NODE(sw_context);
+
+ if (!ctx_node)
return -EINVAL;
- }
- ctx = sw_context->dx_ctx_node->ctx;
+
+ ctx = ctx_node->ctx;
}
- res = vmw_shader_lookup(vmw_context_res_man(ctx),
- cmd->body.shid, 0);
+ res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
if (IS_ERR(res)) {
- DRM_ERROR("Could not find shader to bind.\n");
+ VMW_DEBUG_USER("Could not find shader to bind.\n");
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
if (ret) {
- DRM_ERROR("Error creating resource validation node.\n");
+ VMW_DEBUG_USER("Error creating resource validation node.\n");
return ret;
}
@@ -2901,7 +2712,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
+ * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2911,18 +2722,16 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXGenMips body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
+ container_of(header, typeof(*cmd), header);
return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
cmd->body.shaderResourceViewId));
}
/**
- * vmw_cmd_dx_transfer_from_buffer -
- * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ * vmw_cmd_dx_transfer_from_buffer - Validate
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2932,26 +2741,23 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXTransferFromBuffer body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
+ container_of(header, typeof(*cmd), header);
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.srcSid, NULL);
if (ret != 0)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.destSid, NULL);
}
/**
- * vmw_cmd_intra_surface_copy -
- * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
+ * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2961,20 +2767,17 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdIntraSurfaceCopy body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
+ container_of(header, typeof(*cmd), header);
if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
return -EINVAL;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.surface.sid, NULL);
+ VMW_RES_DIRTY_SET, user_surface_converter,
+ &cmd->body.surface.sid, NULL);
}
-
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -2997,18 +2800,18 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
break;
default:
- DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
+ VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
return -EINVAL;
}
if (*size > size_remaining) {
- DRM_ERROR("Invalid SVGA command (size mismatch):"
- " %u.\n", cmd_id);
+ VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
+ cmd_id);
return -EINVAL;
}
if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
+ VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
return -EPERM;
}
@@ -3196,9 +2999,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
false, false, true),
- /*
- * DX commands
- */
+ /* SM commands */
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
@@ -3380,8 +3181,8 @@ bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
}
static int vmw_cmd_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf, uint32_t *size)
+ struct vmw_sw_context *sw_context, void *buf,
+ uint32_t *size)
{
uint32_t cmd_id;
uint32_t size_remaining = *size;
@@ -3420,31 +3221,33 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
goto out_new;
ret = entry->func(dev_priv, sw_context, header);
- if (unlikely(ret != 0))
- goto out_invalid;
+ if (unlikely(ret != 0)) {
+ VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
+ cmd_id + SVGA_3D_CMD_BASE, ret);
+ return ret;
+ }
return 0;
out_invalid:
- DRM_ERROR("Invalid SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
out_privileged:
- DRM_ERROR("Privileged SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EPERM;
out_old:
- DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
out_new:
- DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
}
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf,
+ struct vmw_sw_context *sw_context, void *buf,
uint32_t size)
{
int32_t cur_size = size;
@@ -3462,7 +3265,7 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
}
if (unlikely(cur_size != 0)) {
- DRM_ERROR("Command verifier out of sync.\n");
+ VMW_DEBUG_USER("Command verifier out of sync.\n");
return -EINVAL;
}
@@ -3472,7 +3275,6 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{
/* Memory is validation context memory, so no need to free it */
-
INIT_LIST_HEAD(&sw_context->bo_relocations);
}
@@ -3520,7 +3322,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
if (sw_context->cmd_bounce == NULL) {
- DRM_ERROR("Failed to allocate command bounce buffer.\n");
+ VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
sw_context->cmd_bounce_size = 0;
return -ENOMEM;
}
@@ -3535,8 +3337,8 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
* If this fails for some reason, We sync the fifo and return NULL.
* It is then safe to fence buffers with a NULL pointer.
*
- * If @p_handle is not NULL @file_priv must also not be NULL. Creates
- * a userspace handle if @p_handle is not NULL, otherwise not.
+ * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
+ * userspace handle if @p_handle is not NULL, otherwise not.
*/
int vmw_execbuf_fence_commands(struct drm_file *file_priv,
@@ -3553,7 +3355,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
ret = vmw_fifo_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
- DRM_ERROR("Fence submission error. Syncing.\n");
+ VMW_DEBUG_USER("Fence submission error. Syncing.\n");
synced = true;
}
@@ -3564,9 +3366,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
if (unlikely(ret != 0 && !synced)) {
- (void) vmw_fallback_wait(dev_priv, false, false,
- sequence, false,
- VMW_FENCE_WAIT_TIMEOUT);
+ (void) vmw_fallback_wait(dev_priv, false, false, sequence,
+ false, VMW_FENCE_WAIT_TIMEOUT);
*p_fence = NULL;
}
@@ -3574,36 +3375,32 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
}
/**
- * vmw_execbuf_copy_fence_user - copy fence object information to
- * user-space.
+ * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
*
* @dev_priv: Pointer to a vmw_private struct.
* @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
* @ret: Return value from fence object creation.
- * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
- * which the information should be copied.
+ * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
+ * the information should be copied.
* @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle.
* @out_fence_fd: exported file descriptor for the fence. -1 if not used
* @sync_file: Only used to clean up in case of an error in this function.
*
- * This function copies fence information to user-space. If copying fails,
- * The user-space struct drm_vmw_fence_rep::error member is hopefully
- * left untouched, and if it's preloaded with an -EFAULT by user-space,
- * the error will hopefully be detected.
- * Also if copying fails, user-space will be unable to signal the fence
- * object so we wait for it immediately, and then unreference the
- * user-space reference.
+ * This function copies fence information to user-space. If copying fails, the
+ * user-space struct drm_vmw_fence_rep::error member is hopefully left
+ * untouched, and if it's preloaded with an -EFAULT by user-space, the error
+ * will hopefully be detected.
+ *
+ * Also if copying fails, user-space will be unable to signal the fence object
+ * so we wait for it immediately, and then unreference the user-space reference.
*/
void
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
- struct vmw_fpriv *vmw_fp,
- int ret,
+ struct vmw_fpriv *vmw_fp, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj *fence,
- uint32_t fence_handle,
- int32_t out_fence_fd,
- struct sync_file *sync_file)
+ struct vmw_fence_obj *fence, uint32_t fence_handle,
+ int32_t out_fence_fd, struct sync_file *sync_file)
{
struct drm_vmw_fence_rep fence_rep;
@@ -3624,16 +3421,16 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
/*
- * copy_to_user errors will be detected by user space not
- * seeing fence_rep::error filled in. Typically
- * user-space would have pre-set that member to -EFAULT.
+ * copy_to_user errors will be detected by user space not seeing
+ * fence_rep::error filled in. Typically user-space would have pre-set
+ * that member to -EFAULT.
*/
ret = copy_to_user(user_fence_rep, &fence_rep,
sizeof(fence_rep));
/*
- * User-space lost the fence object. We need to sync
- * and unreference the handle.
+ * User-space lost the fence object. We need to sync and unreference the
+ * handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
if (sync_file)
@@ -3644,42 +3441,39 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.fd = -1;
}
- ttm_ref_object_base_unref(vmw_fp->tfile,
- fence_handle, TTM_REF_USAGE);
- DRM_ERROR("Fence copy error. Syncing.\n");
+ ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
+ TTM_REF_USAGE);
+ VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
}
/**
- * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
- * the fifo.
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
*
* @dev_priv: Pointer to a device private structure.
* @kernel_commands: Pointer to the unpatched command batch.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
- * Side effects: If this function returns 0, then the command batch
- * pointed to by @kernel_commands will have been modified.
+ * Side effects: If this function returns 0, then the command batch pointed to
+ * by @kernel_commands will have been modified.
*/
static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
- void *kernel_commands,
- u32 command_size,
+ void *kernel_commands, u32 command_size,
struct vmw_sw_context *sw_context)
{
void *cmd;
if (sw_context->dx_ctx_node)
- cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
sw_context->dx_ctx_node->ctx->id);
else
- cmd = vmw_fifo_reserve(dev_priv, command_size);
- if (!cmd) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
+
+ if (!cmd)
return -ENOMEM;
- }
vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
@@ -3691,16 +3485,16 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
}
/**
- * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
- * the command buffer manager.
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
+ * command buffer manager.
*
* @dev_priv: Pointer to a device private structure.
* @header: Opaque handle to the command buffer allocation.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
- * Side effects: If this function returns 0, then the command buffer
- * represented by @header will have been modified.
+ * Side effects: If this function returns 0, then the command buffer represented
+ * by @header will have been modified.
*/
static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
struct vmw_cmdbuf_header *header,
@@ -3709,8 +3503,8 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
{
u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
SVGA3D_INVALID_ID);
- void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
- id, false, header);
+ void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
+ header);
vmw_apply_relocations(sw_context);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
@@ -3730,22 +3524,23 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
* @header: Out parameter returning the opaque pointer to the command buffer.
*
* This function checks whether we can use the command buffer manager for
- * submission and if so, creates a command buffer of suitable size and
- * copies the user data into that buffer.
+ * submission and if so, creates a command buffer of suitable size and copies
+ * the user data into that buffer.
*
* On successful return, the function returns a pointer to the data in the
* command buffer and *@header is set to non-NULL.
- * If command buffers could not be used, the function will return the value
- * of @kernel_commands on function call. That value may be NULL. In that case,
- * the value of *@header will be set to NULL.
+ *
+ * If command buffers could not be used, the function will return the value of
+ * @kernel_commands on function call. That value may be NULL. In that case, the
+ * value of *@header will be set to NULL.
+ *
* If an error is encountered, the function will return a pointer error value.
* If the function is interrupted by a signal while sleeping, it will return
* -ERESTARTSYS casted to a pointer error value.
*/
static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
void __user *user_commands,
- void *kernel_commands,
- u32 command_size,
+ void *kernel_commands, u32 command_size,
struct vmw_cmdbuf_header **header)
{
size_t cmdbuf_size;
@@ -3753,7 +3548,7 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
*header = NULL;
if (command_size > SVGA_CB_MAX_SIZE) {
- DRM_ERROR("Command buffer is too large.\n");
+ VMW_DEBUG_USER("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
@@ -3763,15 +3558,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
- kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
- true, header);
+ kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
+ header);
if (IS_ERR(kernel_commands))
return kernel_commands;
- ret = copy_from_user(kernel_commands, user_commands,
- command_size);
+ ret = copy_from_user(kernel_commands, user_commands, command_size);
if (ret) {
- DRM_ERROR("Failed copying commands.\n");
+ VMW_DEBUG_USER("Failed copying commands.\n");
vmw_cmdbuf_header_free(*header);
*header = NULL;
return ERR_PTR(-EFAULT);
@@ -3799,13 +3593,13 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
res = vmw_user_resource_noref_lookup_handle
(dev_priv, sw_context->fp->tfile, handle,
user_context_converter);
- if (unlikely(IS_ERR(res))) {
- DRM_ERROR("Could not find or user DX context 0x%08x.\n",
- (unsigned) handle);
+ if (IS_ERR(res)) {
+ VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
+ (unsigned int) handle);
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noref_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
if (unlikely(ret != 0))
return ret;
@@ -3817,19 +3611,16 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
- void __user *user_commands,
- void *kernel_commands,
- uint32_t command_size,
- uint64_t throttle_us,
+ void __user *user_commands, void *kernel_commands,
+ uint32_t command_size, uint64_t throttle_us,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj **out_fence,
- uint32_t flags)
+ struct vmw_fence_obj **out_fence, uint32_t flags)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
struct vmw_cmdbuf_header *header;
- uint32_t handle;
+ uint32_t handle = 0;
int ret;
int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL;
@@ -3840,7 +3631,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
- DRM_ERROR("Failed to get a fence file descriptor.\n");
+ VMW_DEBUG_USER("Failed to get a fence fd.\n");
return out_fence_fd;
}
}
@@ -3873,18 +3664,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_unlock;
-
- ret = copy_from_user(sw_context->cmd_bounce,
- user_commands, command_size);
-
+ ret = copy_from_user(sw_context->cmd_bounce, user_commands,
+ command_size);
if (unlikely(ret != 0)) {
ret = -EFAULT;
- DRM_ERROR("Failed copying commands.\n");
+ VMW_DEBUG_USER("Failed copying commands.\n");
goto out_unlock;
}
+
kernel_commands = sw_context->cmd_bounce;
- } else if (!header)
+ } else if (!header) {
sw_context->kernel = true;
+ }
sw_context->fp = vmw_fpriv(file_priv);
INIT_LIST_HEAD(&sw_context->ctx_list);
@@ -3897,6 +3688,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->res_relocations);
INIT_LIST_HEAD(&sw_context->bo_relocations);
+
if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings);
@@ -3904,8 +3696,10 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
goto out_unlock;
+
sw_context->res_ht_initialized = true;
}
+
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
@@ -3932,6 +3726,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = vmw_validation_res_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
+
vmw_validation_drop_ht(&val_ctx);
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
@@ -3959,17 +3754,15 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err;
vmw_query_bo_switch_commit(dev_priv, sw_context);
- ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
- &fence,
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
(user_fence_rep) ? &handle : NULL);
/*
* This error is harmless, because if fence submission fails,
* vmw_fifo_send_fence will sync. The error will be propagated to
* user-space in @fence_rep
*/
-
if (ret != 0)
- DRM_ERROR("Fence submission error. Syncing.\n");
+ VMW_DEBUG_USER("Fence submission error. Syncing.\n");
vmw_execbuf_bindings_commit(sw_context, false);
vmw_bind_dx_query_mob(sw_context);
@@ -3977,21 +3770,19 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_validation_bo_fence(sw_context->ctx, fence);
- if (unlikely(dev_priv->pinned_bo != NULL &&
- !dev_priv->query_cid_valid))
+ if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
/*
- * If anything fails here, give up trying to export the fence
- * and do a sync since the user mode will not be able to sync
- * the fence itself. This ensures we are still functionally
- * correct.
+ * If anything fails here, give up trying to export the fence and do a
+ * sync since the user mode will not be able to sync the fence itself.
+ * This ensures we are still functionally correct.
*/
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
sync_file = sync_file_create(&fence->base);
if (!sync_file) {
- DRM_ERROR("Unable to create sync file for fence\n");
+ VMW_DEBUG_USER("Sync file create failed for fence\n");
put_unused_fd(out_fence_fd);
out_fence_fd = -1;
@@ -4004,8 +3795,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle,
- out_fence_fd, sync_file);
+ user_fence_rep, fence, handle, out_fence_fd,
+ sync_file);
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
@@ -4019,8 +3810,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
- * Unreference resources outside of the cmdbuf_mutex to
- * avoid deadlocks in resource destruction paths.
+ * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
+ * in resource destruction paths.
*/
vmw_validation_unref_lists(&val_ctx);
@@ -4035,8 +3826,7 @@ out_err_nores:
vmw_validation_res_unreserve(&val_ctx, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
- if (unlikely(dev_priv->pinned_bo != NULL &&
- !dev_priv->query_cid_valid))
+ if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
@@ -4045,8 +3835,8 @@ out_unlock:
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
- * Unreference resources outside of the cmdbuf_mutex to
- * avoid deadlocks in resource destruction paths.
+ * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
+ * in resource destruction paths.
*/
vmw_validation_unref_lists(&val_ctx);
out_free_header:
@@ -4064,13 +3854,13 @@ out_free_fence_fd:
*
* @dev_priv: The device private structure.
*
- * This function is called to idle the fifo and unpin the query buffer
- * if the normal way to do this hits an error, which should typically be
- * extremely rare.
+ * This function is called to idle the fifo and unpin the query buffer if the
+ * normal way to do this hits an error, which should typically be extremely
+ * rare.
*/
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
{
- DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
+ VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
@@ -4082,28 +3872,27 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
/**
- * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
- * query bo.
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
+ * bo.
*
* @dev_priv: The device private structure.
- * @fence: If non-NULL should point to a struct vmw_fence_obj issued
- * _after_ a query barrier that flushes all queries touching the current
- * buffer pointed to by @dev_priv->pinned_bo
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
+ * query barrier that flushes all queries touching the current buffer pointed to
+ * by @dev_priv->pinned_bo
*
- * This function should be used to unpin the pinned query bo, or
- * as a query barrier when we need to make sure that all queries have
- * finished before the next fifo command. (For example on hardware
- * context destructions where the hardware may otherwise leak unfinished
- * queries).
+ * This function should be used to unpin the pinned query bo, or as a query
+ * barrier when we need to make sure that all queries have finished before the
+ * next fifo command. (For example on hardware context destructions where the
+ * hardware may otherwise leak unfinished queries).
*
- * This function does not return any failure codes, but make attempts
- * to do safe unpinning in case of errors.
+ * This function does not return any failure codes, but make attempts to do safe
+ * unpinning in case of errors.
*
- * The function will synchronize on the previous query barrier, and will
- * thus not finish until that barrier has executed.
+ * The function will synchronize on the previous query barrier, and will thus
+ * not finish until that barrier has executed.
*
- * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
- * before calling this function.
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
+ * calling this function.
*/
void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence)
@@ -4153,35 +3942,32 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
vmw_validation_unref_lists(&val_ctx);
vmw_bo_unreference(&dev_priv->pinned_bo);
+
out_unlock:
return;
-
out_no_emit:
vmw_validation_bo_backoff(&val_ctx);
out_no_reserve:
vmw_validation_unref_lists(&val_ctx);
vmw_execbuf_unpin_panic(dev_priv);
vmw_bo_unreference(&dev_priv->pinned_bo);
-
}
/**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
- * query bo.
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
*
* @dev_priv: The device private structure.
*
- * This function should be used to unpin the pinned query bo, or
- * as a query barrier when we need to make sure that all queries have
- * finished before the next fifo command. (For example on hardware
- * context destructions where the hardware may otherwise leak unfinished
- * queries).
+ * This function should be used to unpin the pinned query bo, or as a query
+ * barrier when we need to make sure that all queries have finished before the
+ * next fifo command. (For example on hardware context destructions where the
+ * hardware may otherwise leak unfinished queries).
*
- * This function does not return any failure codes, but make attempts
- * to do safe unpinning in case of errors.
+ * This function does not return any failure codes, but make attempts to do safe
+ * unpinning in case of errors.
*
- * The function will synchronize on the previous query barrier, and will
- * thus not finish until that barrier has executed.
+ * The function will synchronize on the previous query barrier, and will thus
+ * not finish until that barrier has executed.
*/
void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
{
@@ -4203,8 +3989,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
struct dma_fence *in_fence = NULL;
if (unlikely(size < copy_offset[0])) {
- DRM_ERROR("Invalid command size, ioctl %d\n",
- DRM_VMW_EXECBUF);
+ VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
+ DRM_VMW_EXECBUF);
return -EINVAL;
}
@@ -4212,23 +3998,19 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
return -EFAULT;
/*
- * Extend the ioctl argument while
- * maintaining backwards compatibility:
- * We take different code paths depending on the value of
- * arg.version.
+ * Extend the ioctl argument while maintaining backwards compatibility:
+ * We take different code paths depending on the value of arg.version.
*/
-
if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
arg.version == 0)) {
- DRM_ERROR("Incorrect execbuf version.\n");
+ VMW_DEBUG_USER("Incorrect execbuf version.\n");
return -EINVAL;
}
if (arg.version > 1 &&
copy_from_user(&arg.context_handle,
(void __user *) (data + copy_offset[0]),
- copy_offset[arg.version - 1] -
- copy_offset[0]) != 0)
+ copy_offset[arg.version - 1] - copy_offset[0]) != 0)
return -EFAULT;
switch (arg.version) {
@@ -4240,13 +4022,12 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
break;
}
-
/* If imported a fence FD from elsewhere, then wait on it */
if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
in_fence = sync_file_get_fence(arg.imported_fence_fd);
if (!in_fence) {
- DRM_ERROR("Cannot get imported fence\n");
+ VMW_DEBUG_USER("Cannot get imported fence\n");
return -EINVAL;
}
@@ -4264,8 +4045,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
NULL, arg.command_size, arg.throttle_us,
arg.context_handle,
(void __user *)(unsigned long)arg.fence_rep,
- NULL,
- arg.flags);
+ NULL, arg.flags);
+
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 2a9112515f46..972e8fda6d35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -642,12 +642,11 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
struct vmw_fb_par *par;
struct fb_info *info;
unsigned fb_width, fb_height;
- unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+ unsigned int fb_bpp, fb_pitch, fb_size;
struct drm_display_mode *init_mode;
int ret;
fb_bpp = 32;
- fb_depth = 24;
/* XXX As shouldn't these be as well. */
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
@@ -655,7 +654,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
- fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
info = framebuffer_alloc(sizeof(*par), device);
if (!info)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index d0fd147ef75f..ff3586cb6851 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -395,12 +395,8 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
WARN(1, "Command buffer has not been allocated.\n");
ret = NULL;
}
- if (IS_ERR_OR_NULL(ret)) {
- DRM_ERROR("Fifo reserve failure of %u bytes.\n",
- (unsigned) bytes);
- dump_stack();
+ if (IS_ERR_OR_NULL(ret))
return NULL;
- }
return ret;
}
@@ -544,7 +540,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
int ret = 0;
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
- fm = vmw_fifo_reserve(dev_priv, bytes);
+ fm = VMW_FIFO_RESERVE(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
@@ -603,12 +599,9 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of fifo space for dummy query.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -652,12 +645,9 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForGBQuery body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of fifo space for dummy query.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -699,8 +689,3 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
}
-
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
-{
- return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 007a0cc7f232..ae7acc6f3dda 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -51,7 +51,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+ cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -110,11 +110,10 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t *cmd;
- cmd = vmw_fifo_reserve(dev_priv, define_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("GMR2 unbind failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
+ if (unlikely(cmd == NULL))
return;
- }
+
define_cmd.gmrId = gmr_id;
define_cmd.numPages = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 172a6ba6539c..a15375eb476e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -188,7 +188,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
- DRM_ERROR("Illegal GET_3D_CAP argument.\n");
+ VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
@@ -268,7 +268,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
return 0;
if (clips_ptr == NULL) {
- DRM_ERROR("Variable clips_ptr must be specified.\n");
+ VMW_DEBUG_USER("Variable clips_ptr must be specified.\n");
ret = -EINVAL;
goto out_clips;
}
@@ -291,7 +291,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
- DRM_ERROR("Invalid framebuffer id.\n");
+ VMW_DEBUG_USER("Invalid framebuffer id.\n");
ret = -ENOENT;
goto out_no_fb;
}
@@ -351,7 +351,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
return 0;
if (clips_ptr == NULL) {
- DRM_ERROR("Argument clips_ptr must be specified.\n");
+ VMW_DEBUG_USER("Argument clips_ptr must be specified.\n");
ret = -EINVAL;
goto out_clips;
}
@@ -374,14 +374,14 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
- DRM_ERROR("Invalid framebuffer id.\n");
+ VMW_DEBUG_USER("Invalid framebuffer id.\n");
ret = -ENOENT;
goto out_no_fb;
}
vfb = vmw_framebuffer_to_vfb(fb);
if (!vfb->bo) {
- DRM_ERROR("Framebuffer not buffer backed.\n");
+ VMW_DEBUG_USER("Framebuffer not buffer backed.\n");
ret = -EINVAL;
goto out_no_ttm_lock;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ed2f67822f45..b97bc8e5944b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -64,11 +64,9 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
if (!image)
return -EINVAL;
- cmd = vmw_fifo_reserve(dev_priv, cmd_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, sizeof(*cmd));
@@ -1202,7 +1200,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
vmw_bo_unreference(&res->backup);
res->backup = vmw_bo_reference(bo_mob);
res->backup_offset = 0;
- vmw_resource_unreserve(res, false, NULL, 0);
+ vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
return 0;
@@ -2468,13 +2466,11 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
dirty->unit = unit;
if (dirty->fifo_reserve_size > 0) {
- dirty->cmd = vmw_fifo_reserve(dev_priv,
+ dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
dirty->fifo_reserve_size);
- if (!dirty->cmd) {
- DRM_ERROR("Couldn't reserve fifo space "
- "for dirty blits.\n");
+ if (!dirty->cmd)
return -ENOMEM;
- }
+
memset(dirty->cmd, 0, dirty->fifo_reserve_size);
}
dirty->num_hits = 0;
@@ -2604,12 +2600,9 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
if (!clips)
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
- if (!cmd) {
- DRM_ERROR("Couldn't reserve fifo space for proxy surface "
- "update.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
+ if (!cmd)
return -ENOMEM;
- }
for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
box = &cmd->body.box;
@@ -2827,7 +2820,8 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
container_of(update->vfb, typeof(*vfbs), base);
ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
- 0, NULL, NULL);
+ 0, VMW_RES_DIRTY_NONE, NULL,
+ NULL);
}
if (ret)
@@ -2838,7 +2832,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
goto out_unref;
reserved_size = update->calc_fifo_size(update, num_hits);
- cmd_start = vmw_fifo_reserve(update->dev_priv, reserved_size);
+ cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
if (!cmd_start) {
ret = -ENOMEM;
goto out_revert;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 16be515c4c0f..25e6343bcf21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -554,11 +554,9 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
} *cmd;
fifo_size = sizeof(*cmd) * num_clips;
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index d83cc66e1210..406edc8cef35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -146,9 +146,8 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -202,12 +201,9 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
return;
bo = otable->page_table->pt_bo;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for OTable "
- "takedown.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return;
- }
memset(cmd, 0, sizeof(*cmd));
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
@@ -614,16 +610,14 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
BUG_ON(ret != 0);
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for Memory "
- "Object unbinding.\n");
- } else {
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (cmd) {
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
+
if (bo) {
vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
@@ -683,12 +677,9 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
vmw_fifo_resource_inc(dev_priv);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for Memory "
- "Object binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
goto out_no_cmd_space;
- }
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
cmd->header.size = sizeof(cmd->body);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 9f1b9d289bec..d5ef8cf802de 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -124,7 +124,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
- cmds = vmw_fifo_reserve(dev_priv, fifo_size);
+ cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size);
/* hardware has hung, can't do anything here */
if (!cmds)
return -ENOMEM;
@@ -194,7 +194,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
int ret;
for (;;) {
- cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds));
if (cmds)
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a7c30e567f09..711f8fd0dd45 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -365,14 +365,6 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
list_add_tail(&res->mob_head, &res->backup->res_list);
}
- /*
- * Only do this on write operations, and move to
- * vmw_resource_unreserve if it can be called after
- * backup buffers have been unreserved. Otherwise
- * sort out locking.
- */
- res->res_dirty = true;
-
return 0;
out_bind_failed:
@@ -386,6 +378,8 @@ out_bind_failed:
* command submission.
*
* @res: Pointer to the struct vmw_resource to unreserve.
+ * @dirty_set: Change dirty status of the resource.
+ * @dirty: When changing dirty status indicates the new status.
* @switch_backup: Backup buffer has been switched.
* @new_backup: Pointer to new backup buffer if command submission
* switched. May be NULL.
@@ -395,6 +389,8 @@ out_bind_failed:
* resource lru list, so that it can be evicted if necessary.
*/
void vmw_resource_unreserve(struct vmw_resource *res,
+ bool dirty_set,
+ bool dirty,
bool switch_backup,
struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset)
@@ -422,6 +418,9 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (switch_backup)
res->backup_offset = new_backup_offset;
+ if (dirty_set)
+ res->res_dirty = dirty;
+
if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
@@ -696,7 +695,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
if (!res->func->unbind)
continue;
- (void) res->func->unbind(res, true, &val_buf);
+ (void) res->func->unbind(res, res->res_dirty, &val_buf);
res->backup_dirty = true;
res->res_dirty = false;
list_del_init(&res->mob_head);
@@ -731,12 +730,9 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
dx_query_ctx = dx_query_mob->dx_query_ctx;
dev_priv = dx_query_ctx->dev_priv;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for "
- "query MOB read back.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -932,7 +928,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
res->pin_count++;
out_no_validate:
- vmw_resource_unreserve(res, false, NULL, 0UL);
+ vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
out_no_reserve:
mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_write_unlock(&dev_priv->reservation_sem);
@@ -968,7 +964,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
ttm_bo_unreserve(&vbo->base);
}
- vmw_resource_unreserve(res, false, NULL, 0UL);
+ vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_read_unlock(&dev_priv->reservation_sem);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index cd586c52af7e..9a2a3836d89a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -130,12 +130,9 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
BUG_ON(!sou->buffer);
fifo_size = sizeof(*cmd);
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- /* The hardware has hung, nothing we can do about it here. */
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, fifo_size);
cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
@@ -182,12 +179,9 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
return 0;
fifo_size = sizeof(*cmd);
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- /* the hardware has hung, nothing we can do about it here */
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, fifo_size);
cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
@@ -998,11 +992,9 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
if (depth == 32)
depth = 24;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (!cmd) {
- DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
@@ -1148,7 +1140,8 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+ NULL, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index bf32fe446219..d310d21f0d54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -218,10 +218,8 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -254,12 +252,9 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -285,12 +280,9 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -328,10 +320,8 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -400,13 +390,9 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
if (!list_empty(&shader->cotable_head) || !shader->committed)
return 0;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
- shader->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "scrubbing.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -491,12 +477,9 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
return 0;
WARN_ON_ONCE(!shader->committed);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "scrubbing.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -865,14 +848,13 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
ret = vmw_user_bo_lookup(tfile, buffer_handle,
&buffer, NULL);
if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find buffer for shader "
- "creation.\n");
+ VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
return ret;
}
if ((u64)buffer->base.num_pages * PAGE_SIZE <
(u64)size + (u64)offset) {
- DRM_ERROR("Illegal buffer- or shader size.\n");
+ VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
}
@@ -886,7 +868,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
shader_type = SVGA3D_SHADERTYPE_PS;
break;
default:
- DRM_ERROR("Illegal shader type.\n");
+ VMW_DEBUG_USER("Illegal shader type.\n");
ret = -EINVAL;
goto out_bad_arg;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 6a6865384e91..73e9a487e659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -239,17 +239,17 @@ vmw_simple_resource_lookup(struct ttm_object_file *tfile,
base = ttm_base_object_lookup(tfile, handle);
if (!base) {
- DRM_ERROR("Invalid %s handle 0x%08lx.\n",
- func->res_func.type_name,
- (unsigned long) handle);
+ VMW_DEBUG_USER("Invalid %s handle 0x%08lx.\n",
+ func->res_func.type_name,
+ (unsigned long) handle);
return ERR_PTR(-ESRCH);
}
if (ttm_base_object_type(base) != func->ttm_res_type) {
ttm_base_object_unref(&base);
- DRM_ERROR("Invalid type of %s handle 0x%08lx.\n",
- func->res_func.type_name,
- (unsigned long) handle);
+ VMW_DEBUG_USER("Invalid type of %s handle 0x%08lx.\n",
+ func->res_func.type_name,
+ (unsigned long) handle);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index bc8bb690f1ea..63807361e16f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -170,13 +170,12 @@ static int vmw_view_create(struct vmw_resource *res)
return 0;
}
- cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
- view->ctx->id);
+ cmd = VMW_FIFO_RESERVE_DX(res->dev_priv, view->cmd_size, view->ctx->id);
if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for view creation.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
+
memcpy(cmd, &view->cmd, view->cmd_size);
WARN_ON(cmd->body.view_id != view->view_id);
/* Sid may have changed due to surface eviction. */
@@ -214,12 +213,9 @@ static int vmw_view_destroy(struct vmw_resource *res)
if (!view->committed || res->id == -1)
return 0;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for view "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), view->ctx->id);
+ if (!cmd)
return -ENOMEM;
- }
cmd->header.id = vmw_view_destroy_cmds[view->view_type];
cmd->header.size = sizeof(cmd->body);
@@ -338,12 +334,12 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
if (cmd_size != vmw_view_define_sizes[view_type] +
sizeof(SVGA3dCmdHeader)) {
- DRM_ERROR("Illegal view create command size.\n");
+ VMW_DEBUG_USER("Illegal view create command size.\n");
return -EINVAL;
}
if (!vmw_view_id_ok(user_key, view_type)) {
- DRM_ERROR("Illegal view add view id.\n");
+ VMW_DEBUG_USER("Illegal view add view id.\n");
return -EINVAL;
}
@@ -352,8 +348,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for view"
- " creation.\n");
+ DRM_ERROR("Out of graphics memory for view creation\n");
return ret;
}
@@ -413,7 +408,7 @@ int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource **res_p)
{
if (!vmw_view_id_ok(user_key, view_type)) {
- DRM_ERROR("Illegal view remove view id.\n");
+ VMW_DEBUG_USER("Illegal view remove view id.\n");
return -EINVAL;
}
@@ -497,6 +492,30 @@ struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
vmw_view_key(user_key, view_type));
}
+/**
+ * vmw_view_dirtying - Return whether a view type is dirtying its resource
+ * @res: Pointer to the view
+ *
+ * Each time a resource is put on the validation list as the result of a
+ * view pointing to it, we need to determine whether that resource will
+ * be dirtied (written to by the GPU) as a result of the corresponding
+ * GPU operation. Currently only rendertarget- and depth-stencil views are
+ * capable of dirtying its resource.
+ *
+ * Return: Whether the view type of @res dirties the resource it points to.
+ */
+u32 vmw_view_dirtying(struct vmw_resource *res)
+{
+ static u32 view_is_dirtying[vmw_view_max] = {
+ [vmw_view_rt] = VMW_RES_DIRTY_SET,
+ [vmw_view_ds] = VMW_RES_DIRTY_SET,
+ };
+
+ /* Update this function as we add more view types */
+ BUILD_BUG_ON(vmw_view_max != 3);
+ return view_is_dirtying[vmw_view(res)->view_type];
+}
+
const u32 vmw_view_destroy_cmds[] = {
[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index b80c7252f2fd..12565047bc55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -157,4 +157,5 @@ extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_view_type view_type,
u32 user_key);
+extern u32 vmw_view_dirtying(struct vmw_resource *res);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 096c2941a8e4..f803bb5e782b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -111,7 +111,7 @@ struct vmw_stdu_update_gb_image {
*/
struct vmw_screen_target_display_unit {
struct vmw_display_unit base;
- const struct vmw_surface *display_srf;
+ struct vmw_surface *display_srf;
enum stdu_content_type content_fb_type;
s32 display_width, display_height;
@@ -167,12 +167,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
SVGA3dCmdDefineGBScreenTarget body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space defining Screen Target\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -229,12 +226,9 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
memset(&image, 0, sizeof(image));
image.sid = res ? res->id : SVGA3D_INVALID_ID;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space binding a screen target\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -296,12 +290,9 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
return -EINVAL;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space updating a Screen Target\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
vmw_stdu_populate_update(cmd, stdu->base.unit,
0, stdu->display_width,
@@ -335,12 +326,9 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
if (unlikely(!stdu->defined))
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -533,6 +521,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+ stdu->display_srf->res.res_dirty = true;
ddirty->left = ddirty->top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
}
@@ -629,20 +618,16 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
region.x2 = diff.rect.x2;
region.y1 = diff.rect.y1;
region.y2 = diff.rect.y2;
- ret = vmw_kms_update_proxy(
- (struct vmw_resource *) &stdu->display_srf->res,
- (const struct drm_clip_rect *) &region, 1, 1);
+ ret = vmw_kms_update_proxy(&stdu->display_srf->res, &region,
+ 1, 1);
if (ret)
goto out_cleanup;
dev_priv = vmw_priv(stdu->base.crtc.dev);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (!cmd) {
- DRM_ERROR("Cannot reserve FIFO space to update STDU");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
goto out_cleanup;
- }
vmw_stdu_populate_update(cmd, stdu->base.unit,
region.x1, region.x2,
@@ -820,6 +805,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
cmd->body.dest.sid = stdu->display_srf->res.id;
update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
+ stdu->display_srf->res.res_dirty = true;
} else {
update = dirty->cmd;
commit_size = sizeof(*update);
@@ -876,7 +862,8 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+ NULL, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index ef09f7edf931..219471903bc1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -342,12 +342,9 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
- cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
+ if (unlikely(!cmd))
return;
- }
vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
@@ -414,10 +411,8 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
*/
submit_size = vmw_surface_define_size(srf);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -468,12 +463,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "DMA.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd))
return -ENOMEM;
- }
+
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
@@ -556,12 +549,9 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
*/
submit_size = vmw_surface_destroy_size();
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "eviction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd))
return -ENOMEM;
- }
vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, submit_size);
@@ -748,11 +738,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
desc = svga3dsurface_get_desc(req->format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- DRM_ERROR("Invalid surface format for surface creation.\n");
- DRM_ERROR("Format requested is: %d\n", req->format);
+ VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
+ req->format);
return -EINVAL;
}
@@ -764,8 +753,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
size, &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
+ DRM_ERROR("Out of graphics memory for surface.\n");
goto out_unlock;
}
@@ -939,12 +927,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
if (unlikely(!base)) {
- DRM_ERROR("Could not find surface to reference.\n");
+ VMW_DEBUG_USER("Could not find surface to reference.\n");
goto out_no_lookup;
}
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
- DRM_ERROR("Referenced object is not a surface.\n");
+ VMW_DEBUG_USER("Referenced object is not a surface.\n");
goto out_bad_resource;
}
@@ -1022,8 +1010,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
ret = copy_to_user(user_sizes, &srf->base_size,
sizeof(srf->base_size));
if (unlikely(ret != 0)) {
- DRM_ERROR("copy_to_user failed %p %u\n",
- user_sizes, srf->num_sizes);
+ VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
+ srf->num_sizes);
ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT;
}
@@ -1088,12 +1076,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
submit_len = sizeof(*cmd);
}
- cmd = vmw_fifo_reserve(dev_priv, submit_len);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
cmd3 = (typeof(cmd3))cmd;
if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -1171,12 +1157,9 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
- cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd1)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "binding.\n");
+ cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd1))
return -ENOMEM;
- }
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
@@ -1221,12 +1204,9 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd))
return -ENOMEM;
- }
if (readback) {
cmd1 = (void *) cmd;
@@ -1280,10 +1260,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -1405,16 +1383,16 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
if (for_scanout) {
if (!svga3dsurface_is_screen_target_format(format)) {
- DRM_ERROR("Invalid Screen Target surface format.");
+ VMW_DEBUG_USER("Invalid Screen Target surface format.");
return -EINVAL;
}
if (size.width > dev_priv->texture_max_width ||
size.height > dev_priv->texture_max_height) {
- DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
- size.width, size.height,
- dev_priv->texture_max_width,
- dev_priv->texture_max_height);
+ VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
+ size.width, size.height,
+ dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
return -EINVAL;
}
} else {
@@ -1422,14 +1400,14 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
desc = svga3dsurface_get_desc(format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- DRM_ERROR("Invalid surface format.\n");
+ VMW_DEBUG_USER("Invalid surface format.\n");
return -EINVAL;
}
}
/* array_size must be null for non-GL3 host. */
if (array_size > 0 && !dev_priv->has_dx) {
- DRM_ERROR("Tried to create DX surface on non-DX host.\n");
+ VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
return -EINVAL;
}
@@ -1651,7 +1629,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (ret == 0) {
if (res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) {
- DRM_ERROR("Surface backup buffer too small.\n");
+ VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_bo_unreference(&res->backup);
ret = -EINVAL;
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index e6d75e377dd8..8bafa6eac5a8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -30,16 +30,9 @@
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct vmw_private *dev_priv;
+ struct drm_file *file_priv = filp->private_data;
+ struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
- DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
- return -EINVAL;
- }
-
- file_priv = filp->private_data;
- dev_priv = vmw_priv(file_priv->minor->dev);
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e9944ac2e057..f611b2290a1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -76,6 +76,8 @@ struct vmw_validation_res_node {
u32 switching_backup : 1;
u32 first_usage : 1;
u32 reserved : 1;
+ u32 dirty : 1;
+ u32 dirty_set : 1;
unsigned long private[0];
};
@@ -299,6 +301,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
* @ctx: The validation context.
* @res: The resource.
* @priv_size: Size of private, additional metadata.
+ * @dirty: Whether to change dirty status.
* @p_node: Output pointer of additional metadata address.
* @first_usage: Whether this was the first time this resource was seen.
*
@@ -307,6 +310,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
struct vmw_resource *res,
size_t priv_size,
+ u32 dirty,
void **p_node,
bool *first_usage)
{
@@ -321,8 +325,7 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
if (!node) {
- DRM_ERROR("Failed to allocate a resource validation "
- "entry.\n");
+ VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
return -ENOMEM;
}
@@ -358,6 +361,11 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
}
out_fill:
+ if (dirty) {
+ node->dirty_set = 1;
+ /* Overwriting previous information here is intentional! */
+ node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
+ }
if (first_usage)
*first_usage = node->first_usage;
if (p_node)
@@ -367,6 +375,29 @@ out_fill:
}
/**
+ * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
+ * validation.
+ * @ctx: The validation context.
+ * @val_private: The additional meta-data pointer returned when the
+ * resource was registered with the validation context. Used to identify
+ * the resource.
+ * @dirty: Dirty information VMW_RES_DIRTY_XX
+ */
+void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
+ void *val_private, u32 dirty)
+{
+ struct vmw_validation_res_node *val;
+
+ if (!dirty)
+ return;
+
+ val = container_of(val_private, typeof(*val), private);
+ val->dirty_set = 1;
+ /* Overwriting previous information here is intentional! */
+ val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
+}
+
+/**
* vmw_validation_res_switch_backup - Register a backup MOB switch during
* validation.
* @ctx: The validation context.
@@ -450,15 +481,23 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
struct vmw_validation_res_node *val;
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
-
- list_for_each_entry(val, &ctx->resource_list, head) {
- if (val->reserved)
- vmw_resource_unreserve(val->res,
- !backoff &&
- val->switching_backup,
- val->new_backup,
- val->new_backup_offset);
- }
+ if (backoff)
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ if (val->reserved)
+ vmw_resource_unreserve(val->res,
+ false, false, false,
+ NULL, 0);
+ }
+ else
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ if (val->reserved)
+ vmw_resource_unreserve(val->res,
+ val->dirty_set,
+ val->dirty,
+ val->switching_backup,
+ val->new_backup,
+ val->new_backup_offset);
+ }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 3b396fea40d7..523f6ac5c335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -33,6 +33,10 @@
#include <linux/ww_mutex.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#define VMW_RES_DIRTY_NONE 0
+#define VMW_RES_DIRTY_SET BIT(0)
+#define VMW_RES_DIRTY_CLEAR BIT(1)
+
/**
* struct vmw_validation_mem - Custom interface to provide memory reservations
* for the validation code.
@@ -237,6 +241,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
struct vmw_resource *res,
size_t priv_size,
+ u32 dirty,
void **p_node,
bool *first_usage);
void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
@@ -261,4 +266,6 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
int vmw_validation_preload_res(struct vmw_validation_context *ctx,
unsigned int size);
+void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
+ void *val_private, u32 dirty);
#endif
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 3e78a832d7f9..84aa4d61dc42 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -582,6 +582,7 @@ static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
drm_kms_helper_poll_fini(dev);
drm_dev_unplug(dev);
+ drm_dev_put(dev);
front_info->drm_info = NULL;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 53c376d55fcf..a24548489dde 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -224,8 +224,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
static int gem_mmap_obj(struct xen_gem_object *xen_obj,
struct vm_area_struct *vma)
{
- unsigned long addr = vma->vm_start;
- int i;
+ int ret;
/*
* clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
@@ -252,18 +251,11 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj,
* FIXME: as we insert all the pages now then no .fault handler must
* be called, so don't provide one
*/
- for (i = 0; i < xen_obj->num_pages; i++) {
- int ret;
-
- ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
- if (ret < 0) {
- DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
- return ret;
- }
+ ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
+ if (ret < 0)
+ DRM_ERROR("Failed to map pages into vma: %d\n", ret);
- addr += PAGE_SIZE;
- }
- return 0;
+ return ret;
}
int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 27101c04a827..0c0eb43abf65 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -115,8 +115,12 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
static void host1x_channel_set_streamid(struct host1x_channel *channel)
{
#if HOST1X_HW >= 6
+ u32 sid = 0x7f;
+#ifdef CONFIG_IOMMU_API
struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
- u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
+ if (spec)
+ sid = spec->ids[0] & 0xffff;
+#endif
host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
#endif
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index 9b2b3fa479c4..5e44ff1f2085 100644
--- a/drivers/gpu/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
DP_COM_CONF_CSC_DEF_BOTH);
} else {
- if (flow->foreground.in_cs == flow->out_cs)
+ if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
+ flow->foreground.in_cs == flow->out_cs)
/*
* foreground identical to output, apply color
* conversion on background
@@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
struct ipu_dp_priv *priv = flow->priv;
u32 reg, csc;
+ dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
+
if (!dp->foreground)
return;
@@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
reg = readl(flow->base + DP_COM_CONF);
csc = reg & DP_COM_CONF_CSC_DEF_MASK;
- if (csc == DP_COM_CONF_CSC_DEF_FG)
- reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+ if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
+ reg |= DP_COM_CONF_CSC_DEF_BG;
reg &= ~DP_COM_CONF_FG_EN;
writel(reg, flow->base + DP_COM_CONF);
@@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
mutex_init(&priv->mutex);
for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+ priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
+ priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
priv->flow[i].foreground.foreground = true;
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
priv->flow[i].priv = priv;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6ca8d322b487..c3c390ca3690 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -150,6 +150,7 @@ config HID_ASUS
tristate "Asus"
depends on LEDS_CLASS
depends on ASUS_WMI || ASUS_WMI=n
+ select POWER_SUPPLY
---help---
Support for Asus notebook built-in keyboard and touchpad via i2c, and
the Asus Republic of Gamers laptop keyboard special keys.
@@ -230,6 +231,16 @@ config HID_COUGAR
Supported devices:
- Cougar 500k Gaming Keyboard
+config HID_MACALLY
+ tristate "Macally devices"
+ depends on HID
+ help
+ Support for Macally devices that are not fully compliant with the
+ HID standard.
+
+ supported devices:
+ - Macally ikey keyboard
+
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
depends on HID && SND
@@ -510,6 +521,7 @@ config HID_LOGITECH
config HID_LOGITECH_DJ
tristate "Logitech Unifying receivers full support"
+ depends on USB_HID
depends on HIDRAW
depends on HID_LOGITECH
select HID_LOGITECH_HIDPP
@@ -1002,6 +1014,22 @@ config HID_UDRAW_PS3
Say Y here if you want to use the THQ uDraw gaming tablet for
the PS3.
+config HID_U2FZERO
+ tristate "U2F Zero LED and RNG support"
+ depends on USB_HID
+ depends on LEDS_CLASS
+ depends on HW_RANDOM
+ help
+ Support for the LED of the U2F Zero device.
+
+ U2F Zero supports custom commands for blinking the LED
+ and getting data from the internal hardware RNG.
+ The internal hardware can be used to feed the enthropy pool.
+
+ U2F Zero only supports blinking its LED, so this driver doesn't
+ allow setting the brightness to anything but 1, which will
+ trigger a single blink and immediately reset to back 0.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 170163b41303..cc5d827c9164 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
+obj-$(CONFIG_HID_MACALLY) += hid-macally.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MALTRON) += hid-maltron.o
obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
@@ -109,6 +110,7 @@ obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
+obj-$(CONFIG_HID_U2FZERO) += hid-u2fzero.o
hid-uclogic-objs := hid-uclogic-core.o \
hid-uclogic-rdesc.o \
hid-uclogic-params.o
@@ -134,3 +136,4 @@ obj-$(CONFIG_USB_KBD) += usbhid/
obj-$(CONFIG_I2C_HID) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
+obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9993b692598f..92387fc0bf18 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -30,6 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
+#include <linux/async.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@@ -218,13 +219,14 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
* Add a usage to the temporary parser table.
*/
-static int hid_add_usage(struct hid_parser *parser, unsigned usage)
+static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
{
if (parser->local.usage_index >= HID_MAX_USAGES) {
hid_err(parser->device, "usage index exceeded\n");
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
+ parser->local.usage_size[parser->local.usage_index] = size;
parser->local.collection_index[parser->local.usage_index] =
parser->collection_stack_ptr ?
parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
@@ -486,10 +488,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
- return hid_add_usage(parser, data);
+ return hid_add_usage(parser, data, item->size);
case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
@@ -498,9 +497,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
parser->local.usage_minimum = data;
return 0;
@@ -511,9 +507,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
count = data - parser->local.usage_minimum;
if (count + parser->local.usage_index >= HID_MAX_USAGES) {
/*
@@ -533,7 +526,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
for (n = parser->local.usage_minimum; n <= data; n++)
- if (hid_add_usage(parser, n)) {
+ if (hid_add_usage(parser, n, item->size)) {
dbg_hid("hid_add_usage failed\n");
return -1;
}
@@ -548,6 +541,22 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
/*
+ * Concatenate Usage Pages into Usages where relevant:
+ * As per specification, 6.2.2.8: "When the parser encounters a main item it
+ * concatenates the last declared Usage Page with a Usage to form a complete
+ * usage value."
+ */
+
+static void hid_concatenate_usage_page(struct hid_parser *parser)
+{
+ int i;
+
+ for (i = 0; i < parser->local.usage_index; i++)
+ if (parser->local.usage_size[i] <= 2)
+ parser->local.usage[i] += parser->global.usage_page << 16;
+}
+
+/*
* Process a main item.
*/
@@ -556,6 +565,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int ret;
+ hid_concatenate_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
@@ -765,6 +776,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int i;
+ hid_concatenate_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
@@ -1301,10 +1314,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
u32 hid_field_extract(const struct hid_device *hid, u8 *report,
unsigned offset, unsigned n)
{
- if (n > 32) {
- hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
+ if (n > 256) {
+ hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
n, current->comm);
- n = 32;
+ n = 256;
}
return __extract(report, offset, n);
@@ -1624,7 +1637,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
* Implement a generic .request() callback, using .raw_request()
* DO NOT USE in hid drivers directly, but through hid_hw_request instead.
*/
-void __hid_request(struct hid_device *hid, struct hid_report *report,
+int __hid_request(struct hid_device *hid, struct hid_report *report,
int reqtype)
{
char *buf;
@@ -1633,7 +1646,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
- return;
+ return -ENOMEM;
len = hid_report_len(report);
@@ -1650,8 +1663,11 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
if (reqtype == HID_REQ_GET_REPORT)
hid_input_report(hid, report->type, buf, ret, 0);
+ ret = 0;
+
out:
kfree(buf);
+ return ret;
}
EXPORT_SYMBOL_GPL(__hid_request);
@@ -2349,6 +2365,15 @@ int hid_add_device(struct hid_device *hdev)
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
+ /*
+ * Try loading the module for the device before the add, so that we do
+ * not first have hid-generic binding only to have it replaced
+ * immediately afterwards with a specialized driver.
+ */
+ if (!current_is_async())
+ request_module("hid:b%04Xg%04Xv%08Xp%08X", hdev->bus,
+ hdev->group, hdev->vendor, hdev->product);
+
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ac9fda1b5a72..1384e57182af 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
seq_printf(f, "\n\n");
/* dump parsed data and input mappings */
+ if (down_interruptible(&hdev->driver_input_lock))
+ return 0;
+
hid_dump_device(hdev, f);
seq_printf(f, "\n");
hid_dump_input_mapping(hdev, f);
+ up(&hdev->driver_input_lock);
+
return 0;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6d93f4ad037..1c8c72093b5a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -323,6 +323,7 @@
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
#define USB_DEVICE_ID_CYGNAL_CP2112 0xea90
+#define USB_DEVICE_ID_U2F_ZERO 0x8acf
#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
@@ -762,8 +763,12 @@
#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
+#define USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER 0xc51b
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING 0xc539
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
@@ -1034,6 +1039,7 @@
#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
#define USB_VENDOR_ID_SOLID_YEAR 0x060b
+#define USB_DEVICE_ID_MACALLY_IKEY_KEYBOARD 0x0001
#define USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD 0x500a
#define USB_DEVICE_ID_COUGAR_700K_GAMING_KEYBOARD 0x700a
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b10b1922c5bd..abdb01879caa 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -680,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
+ if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */
+ switch (usage->hid & 0xf) {
+ case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+ default: goto ignore;
+ }
+ break;
+ }
+
/*
* Some lazy vendors declare 255 usages for System Control,
* leading to the creation of ABS_X|Y axis and too many others.
@@ -902,7 +910,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x06a: map_key_clear(KEY_GREEN); break;
case 0x06b: map_key_clear(KEY_BLUE); break;
case 0x06c: map_key_clear(KEY_YELLOW); break;
- case 0x06d: map_key_clear(KEY_ZOOM); break;
+ case 0x06d: map_key_clear(KEY_ASPECT_RATIO); break;
case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break;
case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break;
@@ -911,6 +919,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break;
case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break;
+ case 0x079: map_key_clear(KEY_KBDILLUMUP); break;
+ case 0x07a: map_key_clear(KEY_KBDILLUMDOWN); break;
+ case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE); break;
+
case 0x082: map_key_clear(KEY_VIDEO_NEXT); break;
case 0x083: map_key_clear(KEY_LAST); break;
case 0x084: map_key_clear(KEY_ENTER); break;
@@ -998,6 +1010,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x1b8: map_key_clear(KEY_VIDEO); break;
case 0x1bc: map_key_clear(KEY_MESSENGER); break;
case 0x1bd: map_key_clear(KEY_INFO); break;
+ case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
case 0x201: map_key_clear(KEY_NEW); break;
case 0x202: map_key_clear(KEY_OPEN); break;
case 0x203: map_key_clear(KEY_CLOSE); break;
@@ -1021,6 +1034,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x22d: map_key_clear(KEY_ZOOMIN); break;
case 0x22e: map_key_clear(KEY_ZOOMOUT); break;
case 0x22f: map_key_clear(KEY_ZOOMRESET); break;
+ case 0x232: map_key_clear(KEY_FULL_SCREEN); break;
case 0x233: map_key_clear(KEY_SCROLLUP); break;
case 0x234: map_key_clear(KEY_SCROLLDOWN); break;
case 0x238: /* AC Pan */
@@ -1037,6 +1051,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x28b: map_key_clear(KEY_FORWARDMAIL); break;
case 0x28c: map_key_clear(KEY_SEND); break;
+ case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break;
+
case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
@@ -1044,6 +1060,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
+ case 0x29f: map_key_clear(KEY_SCALE); break;
+
default: map_key_clear(KEY_UNKNOWN);
}
break;
@@ -1541,52 +1559,71 @@ static void hidinput_close(struct input_dev *dev)
hid_hw_close(hid);
}
-static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+static bool __hidinput_change_resolution_multipliers(struct hid_device *hid,
+ struct hid_report *report, bool use_logical_max)
{
- struct hid_report_enum *rep_enum;
- struct hid_report *rep;
struct hid_usage *usage;
+ bool update_needed = false;
int i, j;
- rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
- list_for_each_entry(rep, &rep_enum->report_list, list) {
- bool update_needed = false;
+ if (report->maxfield == 0)
+ return false;
- if (rep->maxfield == 0)
- continue;
+ /*
+ * If we have more than one feature within this report we
+ * need to fill in the bits from the others before we can
+ * overwrite the ones for the Resolution Multiplier.
+ */
+ if (report->maxfield > 1) {
+ hid_hw_request(hid, report, HID_REQ_GET_REPORT);
+ hid_hw_wait(hid);
+ }
- /*
- * If we have more than one feature within this report we
- * need to fill in the bits from the others before we can
- * overwrite the ones for the Resolution Multiplier.
+ for (i = 0; i < report->maxfield; i++) {
+ __s32 value = use_logical_max ?
+ report->field[i]->logical_maximum :
+ report->field[i]->logical_minimum;
+
+ /* There is no good reason for a Resolution
+ * Multiplier to have a count other than 1.
+ * Ignore that case.
*/
- if (rep->maxfield > 1) {
- hid_hw_request(hid, rep, HID_REQ_GET_REPORT);
- hid_hw_wait(hid);
- }
+ if (report->field[i]->report_count != 1)
+ continue;
- for (i = 0; i < rep->maxfield; i++) {
- __s32 logical_max = rep->field[i]->logical_maximum;
+ for (j = 0; j < report->field[i]->maxusage; j++) {
+ usage = &report->field[i]->usage[j];
- /* There is no good reason for a Resolution
- * Multiplier to have a count other than 1.
- * Ignore that case.
- */
- if (rep->field[i]->report_count != 1)
+ if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
continue;
- for (j = 0; j < rep->field[i]->maxusage; j++) {
- usage = &rep->field[i]->usage[j];
+ report->field[i]->value[j] = value;
+ update_needed = true;
+ }
+ }
- if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
- continue;
+ return update_needed;
+}
- *rep->field[i]->value = logical_max;
- update_needed = true;
+static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+{
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ int ret;
+
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ bool update_needed = __hidinput_change_resolution_multipliers(hid,
+ rep, true);
+
+ if (update_needed) {
+ ret = __hid_request(hid, rep, HID_REQ_SET_REPORT);
+ if (ret) {
+ __hidinput_change_resolution_multipliers(hid,
+ rep, false);
+ return;
}
}
- if (update_needed)
- hid_hw_request(hid, rep, HID_REQ_SET_REPORT);
}
/* refresh our structs */
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 5d419a95b6c2..36d725fdb199 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -876,8 +876,6 @@ static const struct hid_device_id lg_devices[] = {
.driver_data = LG_RDESC | LG_WIRELESS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER),
.driver_data = LG_RDESC | LG_WIRELESS },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2),
- .driver_data = LG_RDESC | LG_WIRELESS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER),
.driver_data = LG_BAD_RELATIVE_KEYS },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 826fa1e1c8d9..b1e894618eed 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -25,13 +25,14 @@
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
-#include <linux/usb.h>
#include <linux/kfifo.h>
+#include <linux/delay.h>
+#include <linux/usb.h> /* For to_usb_interface for kvm extra intf check */
#include <asm/unaligned.h>
#include "hid-ids.h"
#define DJ_MAX_PAIRED_DEVICES 6
-#define DJ_MAX_NUMBER_NOTIFICATIONS 8
+#define DJ_MAX_NUMBER_NOTIFS 8
#define DJ_RECEIVER_INDEX 0
#define DJ_DEVICE_INDEX_MIN 1
#define DJ_DEVICE_INDEX_MAX 6
@@ -74,7 +75,6 @@
/* Device Un-Paired Notification */
#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
-
/* Connection Status Notification */
#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
#define CONNECTION_STATUS_PARAM_STATUS 0x00
@@ -94,12 +94,43 @@
#define REPORT_TYPE_LEDS 0x0E
/* RF Report types bitfield */
-#define STD_KEYBOARD 0x00000002
-#define STD_MOUSE 0x00000004
-#define MULTIMEDIA 0x00000008
-#define POWER_KEYS 0x00000010
-#define MEDIA_CENTER 0x00000100
-#define KBD_LEDS 0x00004000
+#define STD_KEYBOARD BIT(1)
+#define STD_MOUSE BIT(2)
+#define MULTIMEDIA BIT(3)
+#define POWER_KEYS BIT(4)
+#define MEDIA_CENTER BIT(8)
+#define KBD_LEDS BIT(14)
+/* Fake (bitnr > NUMBER_OF_HID_REPORTS) bit to track HID++ capability */
+#define HIDPP BIT_ULL(63)
+
+/* HID++ Device Connected Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_CONNECTED 0x41
+#define HIDPP_PARAM_PROTO_TYPE 0x00
+#define HIDPP_PARAM_DEVICE_INFO 0x01
+#define HIDPP_PARAM_EQUAD_LSB 0x02
+#define HIDPP_PARAM_EQUAD_MSB 0x03
+#define HIDPP_PARAM_27MHZ_DEVID 0x03
+#define HIDPP_DEVICE_TYPE_MASK GENMASK(3, 0)
+#define HIDPP_LINK_STATUS_MASK BIT(6)
+#define HIDPP_MANUFACTURER_MASK BIT(7)
+
+#define HIDPP_DEVICE_TYPE_KEYBOARD 1
+#define HIDPP_DEVICE_TYPE_MOUSE 2
+
+#define HIDPP_SET_REGISTER 0x80
+#define HIDPP_GET_LONG_REGISTER 0x83
+#define HIDPP_REG_CONNECTION_STATE 0x02
+#define HIDPP_REG_PAIRING_INFORMATION 0xB5
+#define HIDPP_PAIRING_INFORMATION 0x20
+#define HIDPP_FAKE_DEVICE_ARRIVAL 0x02
+
+enum recvr_type {
+ recvr_type_dj,
+ recvr_type_hidpp,
+ recvr_type_gaming_hidpp,
+ recvr_type_27mhz,
+ recvr_type_bluetooth,
+};
struct dj_report {
u8 report_id;
@@ -108,23 +139,51 @@ struct dj_report {
u8 report_params[DJREPORT_SHORT_LENGTH - 3];
};
+struct hidpp_event {
+ u8 report_id;
+ u8 device_index;
+ u8 sub_id;
+ u8 params[HIDPP_REPORT_LONG_LENGTH - 3U];
+} __packed;
+
struct dj_receiver_dev {
- struct hid_device *hdev;
+ struct hid_device *mouse;
+ struct hid_device *keyboard;
+ struct hid_device *hidpp;
struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
DJ_DEVICE_INDEX_MIN];
+ struct list_head list;
+ struct kref kref;
struct work_struct work;
struct kfifo notif_fifo;
+ unsigned long last_query; /* in jiffies */
+ bool ready;
+ enum recvr_type type;
+ unsigned int unnumbered_application;
spinlock_t lock;
- bool querying_devices;
};
struct dj_device {
struct hid_device *hdev;
struct dj_receiver_dev *dj_receiver_dev;
- u32 reports_supported;
+ u64 reports_supported;
u8 device_index;
};
+#define WORKITEM_TYPE_EMPTY 0
+#define WORKITEM_TYPE_PAIRED 1
+#define WORKITEM_TYPE_UNPAIRED 2
+#define WORKITEM_TYPE_UNKNOWN 255
+
+struct dj_workitem {
+ u8 type; /* WORKITEM_TYPE_* */
+ u8 device_index;
+ u8 device_type;
+ u8 quad_id_msb;
+ u8 quad_id_lsb;
+ u64 reports_supported;
+};
+
/* Keyboard descriptor (1) */
static const char kbd_descriptor[] = {
0x05, 0x01, /* USAGE_PAGE (generic Desktop) */
@@ -200,6 +259,131 @@ static const char mse_descriptor[] = {
0xC0, /* END_COLLECTION */
};
+/* Mouse descriptor (2) for 27 MHz receiver, only 8 buttons */
+static const char mse_27mhz_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x08, /* USAGE_MAX (8) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */
+ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */
+ 0x75, 0x0C, /* REPORT_SIZE (12) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Mouse descriptor (2) for Bluetooth receiver, low-res hwheel, 12 buttons */
+static const char mse_bluetooth_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x08, /* USAGE_MAX (8) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */
+ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */
+ 0x75, 0x0C, /* REPORT_SIZE (12) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x15, 0xF9, /* LOGICAL_MIN (-7) */
+ 0x25, 0x07, /* LOGICAL_MAX (7) */
+ 0x75, 0x04, /* REPORT_SIZE (4) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x09, /* USAGE_MIN (9) */
+ 0x29, 0x0C, /* USAGE_MAX (12) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x95, 0x04, /* REPORT_COUNT (4) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Gaming Mouse descriptor (2) */
+static const char mse_high_res_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x10, /* USAGE_MAX (16) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x10, /* REPORT_COUNT (16) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0x80, /* LOGICAL_MIN (-32767) */
+ 0x26, 0xFF, 0x7F, /* LOGICAL_MAX (32767) */
+ 0x75, 0x10, /* REPORT_SIZE (16) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
/* Consumer Control descriptor (3) */
static const char consumer_descriptor[] = {
0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
@@ -308,7 +492,7 @@ static const char hidpp_descriptor[] = {
/* Make sure all descriptors are present here */
#define MAX_RDESC_SIZE \
(sizeof(kbd_descriptor) + \
- sizeof(mse_descriptor) + \
+ sizeof(mse_bluetooth_descriptor) + \
sizeof(consumer_descriptor) + \
sizeof(syscontrol_descriptor) + \
sizeof(media_descriptor) + \
@@ -341,51 +525,160 @@ static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
static struct hid_ll_driver logi_dj_ll_driver;
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
+static void delayedwork_callback(struct work_struct *work);
+
+static LIST_HEAD(dj_hdev_list);
+static DEFINE_MUTEX(dj_hdev_list_lock);
+
+/*
+ * dj/HID++ receivers are really a single logical entity, but for BIOS/Windows
+ * compatibility they have multiple USB interfaces. On HID++ receivers we need
+ * to listen for input reports on both interfaces. The functions below are used
+ * to create a single struct dj_receiver_dev for all interfaces belonging to
+ * a single USB-device / receiver.
+ */
+static struct dj_receiver_dev *dj_find_receiver_dev(struct hid_device *hdev,
+ enum recvr_type type)
+{
+ struct dj_receiver_dev *djrcv_dev;
+ char sep;
+
+ /*
+ * The bluetooth receiver contains a built-in hub and has separate
+ * USB-devices for the keyboard and mouse interfaces.
+ */
+ sep = (type == recvr_type_bluetooth) ? '.' : '/';
+
+ /* Try to find an already-probed interface from the same device */
+ list_for_each_entry(djrcv_dev, &dj_hdev_list, list) {
+ if (djrcv_dev->mouse &&
+ hid_compare_device_paths(hdev, djrcv_dev->mouse, sep)) {
+ kref_get(&djrcv_dev->kref);
+ return djrcv_dev;
+ }
+ if (djrcv_dev->keyboard &&
+ hid_compare_device_paths(hdev, djrcv_dev->keyboard, sep)) {
+ kref_get(&djrcv_dev->kref);
+ return djrcv_dev;
+ }
+ if (djrcv_dev->hidpp &&
+ hid_compare_device_paths(hdev, djrcv_dev->hidpp, sep)) {
+ kref_get(&djrcv_dev->kref);
+ return djrcv_dev;
+ }
+ }
+
+ return NULL;
+}
+
+static void dj_release_receiver_dev(struct kref *kref)
+{
+ struct dj_receiver_dev *djrcv_dev = container_of(kref, struct dj_receiver_dev, kref);
+
+ list_del(&djrcv_dev->list);
+ kfifo_free(&djrcv_dev->notif_fifo);
+ kfree(djrcv_dev);
+}
+
+static void dj_put_receiver_dev(struct hid_device *hdev)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+
+ mutex_lock(&dj_hdev_list_lock);
+
+ if (djrcv_dev->mouse == hdev)
+ djrcv_dev->mouse = NULL;
+ if (djrcv_dev->keyboard == hdev)
+ djrcv_dev->keyboard = NULL;
+ if (djrcv_dev->hidpp == hdev)
+ djrcv_dev->hidpp = NULL;
+
+ kref_put(&djrcv_dev->kref, dj_release_receiver_dev);
+
+ mutex_unlock(&dj_hdev_list_lock);
+}
+
+static struct dj_receiver_dev *dj_get_receiver_dev(struct hid_device *hdev,
+ enum recvr_type type,
+ unsigned int application,
+ bool is_hidpp)
+{
+ struct dj_receiver_dev *djrcv_dev;
+
+ mutex_lock(&dj_hdev_list_lock);
+
+ djrcv_dev = dj_find_receiver_dev(hdev, type);
+ if (!djrcv_dev) {
+ djrcv_dev = kzalloc(sizeof(*djrcv_dev), GFP_KERNEL);
+ if (!djrcv_dev)
+ goto out;
+
+ INIT_WORK(&djrcv_dev->work, delayedwork_callback);
+ spin_lock_init(&djrcv_dev->lock);
+ if (kfifo_alloc(&djrcv_dev->notif_fifo,
+ DJ_MAX_NUMBER_NOTIFS * sizeof(struct dj_workitem),
+ GFP_KERNEL)) {
+ kfree(djrcv_dev);
+ djrcv_dev = NULL;
+ goto out;
+ }
+ kref_init(&djrcv_dev->kref);
+ list_add_tail(&djrcv_dev->list, &dj_hdev_list);
+ djrcv_dev->last_query = jiffies;
+ djrcv_dev->type = type;
+ }
+
+ if (application == HID_GD_KEYBOARD)
+ djrcv_dev->keyboard = hdev;
+ if (application == HID_GD_MOUSE)
+ djrcv_dev->mouse = hdev;
+ if (is_hidpp)
+ djrcv_dev->hidpp = hdev;
+
+ hid_set_drvdata(hdev, djrcv_dev);
+out:
+ mutex_unlock(&dj_hdev_list_lock);
+ return djrcv_dev;
+}
static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
- struct dj_report *dj_report)
+ struct dj_workitem *workitem)
{
/* Called in delayed work context */
struct dj_device *dj_dev;
unsigned long flags;
spin_lock_irqsave(&djrcv_dev->lock, flags);
- dj_dev = djrcv_dev->paired_dj_devices[dj_report->device_index];
- djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ dj_dev = djrcv_dev->paired_dj_devices[workitem->device_index];
+ djrcv_dev->paired_dj_devices[workitem->device_index] = NULL;
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
if (dj_dev != NULL) {
hid_destroy_device(dj_dev->hdev);
kfree(dj_dev);
} else {
- dev_err(&djrcv_dev->hdev->dev, "%s: can't destroy a NULL device\n",
+ hid_err(djrcv_dev->hidpp, "%s: can't destroy a NULL device\n",
__func__);
}
}
static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
- struct dj_report *dj_report)
+ struct dj_workitem *workitem)
{
/* Called in delayed work context */
- struct hid_device *djrcv_hdev = djrcv_dev->hdev;
- struct usb_interface *intf = to_usb_interface(djrcv_hdev->dev.parent);
- struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct hid_device *djrcv_hdev = djrcv_dev->hidpp;
struct hid_device *dj_hiddev;
struct dj_device *dj_dev;
+ u8 device_index = workitem->device_index;
+ unsigned long flags;
/* Device index goes from 1 to 6, we need 3 bytes to store the
* semicolon, the index, and a null terminator
*/
unsigned char tmpstr[3];
- if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
- SPFUNCTION_DEVICE_LIST_EMPTY) {
- dbg_hid("%s: device list is empty\n", __func__);
- djrcv_dev->querying_devices = false;
- return;
- }
-
- if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
+ /* We are the only one ever adding a device, no need to lock */
+ if (djrcv_dev->paired_dj_devices[device_index]) {
/* The device is already known. No need to reallocate it. */
dbg_hid("%s: device is already known\n", __func__);
return;
@@ -393,8 +686,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
dj_hiddev = hid_allocate_device();
if (IS_ERR(dj_hiddev)) {
- dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
- __func__);
+ hid_err(djrcv_hdev, "%s: hid_allocate_dev failed\n", __func__);
return;
}
@@ -402,48 +694,67 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
dj_hiddev->dev.parent = &djrcv_hdev->dev;
dj_hiddev->bus = BUS_USB;
- dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor);
- dj_hiddev->product =
- (dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB]
- << 8) |
- dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB];
- snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
- "Logitech Unifying Device. Wireless PID:%04x",
- dj_hiddev->product);
-
- dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE;
-
- usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys));
- snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index);
+ dj_hiddev->vendor = djrcv_hdev->vendor;
+ dj_hiddev->product = (workitem->quad_id_msb << 8) |
+ workitem->quad_id_lsb;
+ if (workitem->device_type) {
+ const char *type_str = "Device";
+
+ switch (workitem->device_type) {
+ case 0x01: type_str = "Keyboard"; break;
+ case 0x02: type_str = "Mouse"; break;
+ case 0x03: type_str = "Numpad"; break;
+ case 0x04: type_str = "Presenter"; break;
+ case 0x07: type_str = "Remote Control"; break;
+ case 0x08: type_str = "Trackball"; break;
+ case 0x09: type_str = "Touchpad"; break;
+ }
+ snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+ "Logitech Wireless %s PID:%04x",
+ type_str, dj_hiddev->product);
+ } else {
+ snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+ "Logitech Unifying Device. Wireless PID:%04x",
+ dj_hiddev->product);
+ }
+
+ if (djrcv_dev->type == recvr_type_27mhz)
+ dj_hiddev->group = HID_GROUP_LOGITECH_27MHZ_DEVICE;
+ else
+ dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE;
+
+ memcpy(dj_hiddev->phys, djrcv_hdev->phys, sizeof(djrcv_hdev->phys));
+ snprintf(tmpstr, sizeof(tmpstr), ":%d", device_index);
strlcat(dj_hiddev->phys, tmpstr, sizeof(dj_hiddev->phys));
dj_dev = kzalloc(sizeof(struct dj_device), GFP_KERNEL);
if (!dj_dev) {
- dev_err(&djrcv_hdev->dev, "%s: failed allocating dj_device\n",
- __func__);
+ hid_err(djrcv_hdev, "%s: failed allocating dj_dev\n", __func__);
goto dj_device_allocate_fail;
}
- dj_dev->reports_supported = get_unaligned_le32(
- dj_report->report_params + DEVICE_PAIRED_RF_REPORT_TYPE);
+ dj_dev->reports_supported = workitem->reports_supported;
dj_dev->hdev = dj_hiddev;
dj_dev->dj_receiver_dev = djrcv_dev;
- dj_dev->device_index = dj_report->device_index;
+ dj_dev->device_index = device_index;
dj_hiddev->driver_data = dj_dev;
- djrcv_dev->paired_dj_devices[dj_report->device_index] = dj_dev;
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->paired_dj_devices[device_index] = dj_dev;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
if (hid_add_device(dj_hiddev)) {
- dev_err(&djrcv_hdev->dev, "%s: failed adding dj_device\n",
- __func__);
+ hid_err(djrcv_hdev, "%s: failed adding dj_device\n", __func__);
goto hid_add_device_fail;
}
return;
hid_add_device_fail:
- djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->paired_dj_devices[device_index] = NULL;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
kfree(dj_dev);
dj_device_allocate_fail:
hid_destroy_device(dj_hiddev);
@@ -454,7 +765,7 @@ static void delayedwork_callback(struct work_struct *work)
struct dj_receiver_dev *djrcv_dev =
container_of(work, struct dj_receiver_dev, work);
- struct dj_report dj_report;
+ struct dj_workitem workitem;
unsigned long flags;
int count;
int retval;
@@ -463,69 +774,231 @@ static void delayedwork_callback(struct work_struct *work)
spin_lock_irqsave(&djrcv_dev->lock, flags);
- count = kfifo_out(&djrcv_dev->notif_fifo, &dj_report,
- sizeof(struct dj_report));
-
- if (count != sizeof(struct dj_report)) {
- dev_err(&djrcv_dev->hdev->dev, "%s: workitem triggered without "
- "notifications available\n", __func__);
+ /*
+ * Since we attach to multiple interfaces, we may get scheduled before
+ * we are bound to the HID++ interface, catch this.
+ */
+ if (!djrcv_dev->ready) {
+ pr_warn("%s: delayedwork queued before hidpp interface was enumerated\n",
+ __func__);
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
return;
}
- if (!kfifo_is_empty(&djrcv_dev->notif_fifo)) {
- if (schedule_work(&djrcv_dev->work) == 0) {
- dbg_hid("%s: did not schedule the work item, was "
- "already queued\n", __func__);
- }
+ count = kfifo_out(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+
+ if (count != sizeof(workitem)) {
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ return;
}
+ if (!kfifo_is_empty(&djrcv_dev->notif_fifo))
+ schedule_work(&djrcv_dev->work);
+
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
- switch (dj_report.report_type) {
- case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
- logi_dj_recv_add_djhid_device(djrcv_dev, &dj_report);
+ switch (workitem.type) {
+ case WORKITEM_TYPE_PAIRED:
+ logi_dj_recv_add_djhid_device(djrcv_dev, &workitem);
break;
- case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
- logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
+ case WORKITEM_TYPE_UNPAIRED:
+ logi_dj_recv_destroy_djhid_device(djrcv_dev, &workitem);
break;
- default:
- /* A normal report (i. e. not belonging to a pair/unpair notification)
- * arriving here, means that the report arrived but we did not have a
- * paired dj_device associated to the report's device_index, this
- * means that the original "device paired" notification corresponding
- * to this dj_device never arrived to this driver. The reason is that
- * hid-core discards all packets coming from a device while probe() is
- * executing. */
- if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
- /* ok, we don't know the device, just re-ask the
- * receiver for the list of connected devices. */
+ case WORKITEM_TYPE_UNKNOWN:
retval = logi_dj_recv_query_paired_devices(djrcv_dev);
- if (!retval) {
- /* everything went fine, so just leave */
- break;
- }
- dev_err(&djrcv_dev->hdev->dev,
- "%s:logi_dj_recv_query_paired_devices "
- "error:%d\n", __func__, retval);
+ if (retval) {
+ hid_err(djrcv_dev->hidpp, "%s: logi_dj_recv_query_paired_devices error: %d\n",
+ __func__, retval);
}
- dbg_hid("%s: unexpected report type\n", __func__);
+ break;
+ case WORKITEM_TYPE_EMPTY:
+ dbg_hid("%s: device list is empty\n", __func__);
+ break;
}
}
+/*
+ * Sometimes we receive reports for which we do not have a paired dj_device
+ * associated with the device_index or report-type to forward the report to.
+ * This means that the original "device paired" notification corresponding
+ * to the dj_device never arrived to this driver. Possible reasons for this are:
+ * 1) hid-core discards all packets coming from a device during probe().
+ * 2) if the receiver is plugged into a KVM switch then the pairing reports
+ * are only forwarded to it if the focus is on this PC.
+ * This function deals with this by re-asking the receiver for the list of
+ * connected devices in the delayed work callback.
+ * This function MUST be called with djrcv->lock held.
+ */
+static void logi_dj_recv_queue_unknown_work(struct dj_receiver_dev *djrcv_dev)
+{
+ struct dj_workitem workitem = { .type = WORKITEM_TYPE_UNKNOWN };
+
+ /* Rate limit queries done because of unhandeled reports to 2/sec */
+ if (time_before(jiffies, djrcv_dev->last_query + HZ / 2))
+ return;
+
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ schedule_work(&djrcv_dev->work);
+}
+
static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
/* We are called from atomic context (tasklet && djrcv->lock held) */
+ struct dj_workitem workitem = {
+ .device_index = dj_report->device_index,
+ };
+
+ switch (dj_report->report_type) {
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ workitem.type = WORKITEM_TYPE_PAIRED;
+ if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
+ SPFUNCTION_DEVICE_LIST_EMPTY) {
+ workitem.type = WORKITEM_TYPE_EMPTY;
+ break;
+ }
+ /* fall-through */
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+ workitem.quad_id_msb =
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB];
+ workitem.quad_id_lsb =
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB];
+ workitem.reports_supported = get_unaligned_le32(
+ dj_report->report_params +
+ DEVICE_PAIRED_RF_REPORT_TYPE);
+ workitem.reports_supported |= HIDPP;
+ if (dj_report->report_type == REPORT_TYPE_NOTIF_DEVICE_UNPAIRED)
+ workitem.type = WORKITEM_TYPE_UNPAIRED;
+ break;
+ default:
+ logi_dj_recv_queue_unknown_work(djrcv_dev);
+ return;
+ }
- kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ schedule_work(&djrcv_dev->work);
+}
- if (schedule_work(&djrcv_dev->work) == 0) {
- dbg_hid("%s: did not schedule the work item, was already "
- "queued\n", __func__);
+static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
+ struct dj_workitem *workitem)
+{
+ workitem->type = WORKITEM_TYPE_PAIRED;
+ workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
+ HIDPP_DEVICE_TYPE_MASK;
+ workitem->quad_id_msb = hidpp_report->params[HIDPP_PARAM_EQUAD_MSB];
+ workitem->quad_id_lsb = hidpp_report->params[HIDPP_PARAM_EQUAD_LSB];
+ switch (workitem->device_type) {
+ case REPORT_TYPE_KEYBOARD:
+ workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA |
+ POWER_KEYS | MEDIA_CENTER |
+ HIDPP;
+ break;
+ case REPORT_TYPE_MOUSE:
+ workitem->reports_supported |= STD_MOUSE | HIDPP;
+ break;
+ }
+}
+
+static void logi_hidpp_dev_conn_notif_27mhz(struct hid_device *hdev,
+ struct hidpp_event *hidpp_report,
+ struct dj_workitem *workitem)
+{
+ workitem->type = WORKITEM_TYPE_PAIRED;
+ workitem->quad_id_lsb = hidpp_report->params[HIDPP_PARAM_27MHZ_DEVID];
+ switch (hidpp_report->device_index) {
+ case 1: /* Index 1 is always a mouse */
+ case 2: /* Index 2 is always a mouse */
+ workitem->device_type = HIDPP_DEVICE_TYPE_MOUSE;
+ workitem->reports_supported |= STD_MOUSE | HIDPP;
+ break;
+ case 3: /* Index 3 is always the keyboard */
+ case 4: /* Index 4 is used for an optional separate numpad */
+ workitem->device_type = HIDPP_DEVICE_TYPE_KEYBOARD;
+ workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA |
+ POWER_KEYS | HIDPP;
+ break;
+ default:
+ hid_warn(hdev, "%s: unexpected device-index %d", __func__,
+ hidpp_report->device_index);
}
}
+static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
+ struct hidpp_event *hidpp_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ const char *device_type = "UNKNOWN";
+ struct dj_workitem workitem = {
+ .type = WORKITEM_TYPE_EMPTY,
+ .device_index = hidpp_report->device_index,
+ };
+
+ switch (hidpp_report->params[HIDPP_PARAM_PROTO_TYPE]) {
+ case 0x01:
+ device_type = "Bluetooth";
+ /* Bluetooth connect packet contents is the same as (e)QUAD */
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ if (!(hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
+ HIDPP_MANUFACTURER_MASK)) {
+ hid_info(hdev, "Non Logitech device connected on slot %d\n",
+ hidpp_report->device_index);
+ workitem.reports_supported &= ~HIDPP;
+ }
+ break;
+ case 0x02:
+ device_type = "27 Mhz";
+ logi_hidpp_dev_conn_notif_27mhz(hdev, hidpp_report, &workitem);
+ break;
+ case 0x03:
+ device_type = "QUAD or eQUAD";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x04:
+ device_type = "eQUAD step 4 DJ";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x05:
+ device_type = "DFU Lite";
+ break;
+ case 0x06:
+ device_type = "eQUAD step 4 Lite";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x07:
+ device_type = "eQUAD step 4 Gaming";
+ break;
+ case 0x08:
+ device_type = "eQUAD step 4 for gamepads";
+ break;
+ case 0x0a:
+ device_type = "eQUAD nano Lite";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x0c:
+ device_type = "eQUAD Lightspeed";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ workitem.reports_supported |= STD_KEYBOARD;
+ break;
+ }
+
+ if (workitem.type == WORKITEM_TYPE_EMPTY) {
+ hid_warn(hdev,
+ "unusable device of type %s (0x%02x) connected on slot %d",
+ device_type,
+ hidpp_report->params[HIDPP_PARAM_PROTO_TYPE],
+ hidpp_report->device_index);
+ return;
+ }
+
+ hid_info(hdev, "device of type %s (0x%02x) connected on slot %d",
+ device_type, hidpp_report->params[HIDPP_PARAM_PROTO_TYPE],
+ hidpp_report->device_index);
+
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ schedule_work(&djrcv_dev->work);
+}
+
static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
@@ -552,8 +1025,8 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
}
}
-static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
- struct dj_report *dj_report)
+static void logi_dj_recv_forward_dj(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
{
/* We are called from atomic context (tasklet && djrcv->lock held) */
struct dj_device *dj_device;
@@ -573,18 +1046,48 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
}
}
-static void logi_dj_recv_forward_hidpp(struct dj_device *dj_dev, u8 *data,
- int size)
+static void logi_dj_recv_forward_report(struct dj_device *dj_dev, u8 *data,
+ int size)
{
/* We are called from atomic context (tasklet && djrcv->lock held) */
if (hid_input_report(dj_dev->hdev, HID_INPUT_REPORT, data, size, 1))
dbg_hid("hid_input_report error\n");
}
+static void logi_dj_recv_forward_input_report(struct hid_device *hdev,
+ u8 *data, int size)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_device *dj_dev;
+ unsigned long flags;
+ u8 report = data[0];
+ int i;
+
+ if (report > REPORT_TYPE_RFREPORT_LAST) {
+ hid_err(hdev, "Unexpected input report number %d\n", report);
+ return;
+ }
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+ dj_dev = djrcv_dev->paired_dj_devices[i];
+ if (dj_dev && (dj_dev->reports_supported & BIT(report))) {
+ logi_dj_recv_forward_report(dj_dev, data, size);
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ return;
+ }
+ }
+
+ logi_dj_recv_queue_unknown_work(djrcv_dev);
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ dbg_hid("No dj-devs handling input report number %d\n", report);
+}
+
static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
- struct hid_device *hdev = djrcv_dev->hdev;
+ struct hid_device *hdev = djrcv_dev->hidpp;
struct hid_report *report;
struct hid_report_enum *output_report_enum;
u8 *data = (u8 *)(&dj_report->device_index);
@@ -594,7 +1097,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
if (!report) {
- dev_err(&hdev->dev, "%s: unable to find dj report\n", __func__);
+ hid_err(hdev, "%s: unable to find dj report\n", __func__);
return -ENODEV;
}
@@ -606,14 +1109,40 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
return 0;
}
+static int logi_dj_recv_query_hidpp_devices(struct dj_receiver_dev *djrcv_dev)
+{
+ const u8 template[] = {REPORT_ID_HIDPP_SHORT,
+ HIDPP_RECEIVER_INDEX,
+ HIDPP_SET_REGISTER,
+ HIDPP_REG_CONNECTION_STATE,
+ HIDPP_FAKE_DEVICE_ARRIVAL,
+ 0x00, 0x00};
+ u8 *hidpp_report;
+ int retval;
+
+ hidpp_report = kmemdup(template, sizeof(template), GFP_KERNEL);
+ if (!hidpp_report)
+ return -ENOMEM;
+
+ retval = hid_hw_raw_request(djrcv_dev->hidpp,
+ REPORT_ID_HIDPP_SHORT,
+ hidpp_report, sizeof(template),
+ HID_OUTPUT_REPORT,
+ HID_REQ_SET_REPORT);
+
+ kfree(hidpp_report);
+ return 0;
+}
+
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
{
struct dj_report *dj_report;
int retval;
- /* no need to protect djrcv_dev->querying_devices */
- if (djrcv_dev->querying_devices)
- return 0;
+ djrcv_dev->last_query = jiffies;
+
+ if (djrcv_dev->type != recvr_type_dj)
+ return logi_dj_recv_query_hidpp_devices(djrcv_dev);
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
@@ -630,27 +1159,33 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
unsigned timeout)
{
- struct hid_device *hdev = djrcv_dev->hdev;
+ struct hid_device *hdev = djrcv_dev->hidpp;
struct dj_report *dj_report;
u8 *buf;
- int retval;
+ int retval = 0;
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
- dj_report->report_id = REPORT_ID_DJ_SHORT;
- dj_report->device_index = 0xFF;
- dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
- dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
- dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
- retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
- /*
- * Ugly sleep to work around a USB 3.0 bug when the receiver is still
- * processing the "switch-to-dj" command while we send an other command.
- * 50 msec should gives enough time to the receiver to be ready.
- */
- msleep(50);
+ if (djrcv_dev->type == recvr_type_dj) {
+ dj_report->report_id = REPORT_ID_DJ_SHORT;
+ dj_report->device_index = 0xFF;
+ dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+ dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+ dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] =
+ (u8)timeout;
+
+ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+
+ /*
+ * Ugly sleep to work around a USB 3.0 bug when the receiver is
+ * still processing the "switch-to-dj" command while we send an
+ * other command.
+ * 50 msec should gives enough time to the receiver to be ready.
+ */
+ msleep(50);
+ }
/*
* Magical bits to set up hidpp notifications when the dj devices
@@ -682,22 +1217,28 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
static int logi_dj_ll_open(struct hid_device *hid)
{
- dbg_hid("%s:%s\n", __func__, hid->phys);
+ dbg_hid("%s: %s\n", __func__, hid->phys);
return 0;
}
static void logi_dj_ll_close(struct hid_device *hid)
{
- dbg_hid("%s:%s\n", __func__, hid->phys);
+ dbg_hid("%s: %s\n", __func__, hid->phys);
}
/*
* Register 0xB5 is "pairing information". It is solely intended for the
* receiver, so do not overwrite the device index.
*/
-static u8 unifying_pairing_query[] = {0x10, 0xff, 0x83, 0xb5};
-static u8 unifying_pairing_answer[] = {0x11, 0xff, 0x83, 0xb5};
+static u8 unifying_pairing_query[] = { REPORT_ID_HIDPP_SHORT,
+ HIDPP_RECEIVER_INDEX,
+ HIDPP_GET_LONG_REGISTER,
+ HIDPP_REG_PAIRING_INFORMATION };
+static u8 unifying_pairing_answer[] = { REPORT_ID_HIDPP_LONG,
+ HIDPP_RECEIVER_INDEX,
+ HIDPP_GET_LONG_REGISTER,
+ HIDPP_REG_PAIRING_INFORMATION };
static int logi_dj_ll_raw_request(struct hid_device *hid,
unsigned char reportnum, __u8 *buf,
@@ -721,13 +1262,23 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
buf[4] = (buf[4] & 0xf0) | (djdev->device_index - 1);
else
buf[1] = djdev->device_index;
- return hid_hw_raw_request(djrcv_dev->hdev, reportnum, buf,
+ return hid_hw_raw_request(djrcv_dev->hidpp, reportnum, buf,
count, report_type, reqtype);
}
if (buf[0] != REPORT_TYPE_LEDS)
return -EINVAL;
+ if (djrcv_dev->type != recvr_type_dj && count >= 2) {
+ if (!djrcv_dev->keyboard) {
+ hid_warn(hid, "Received REPORT_TYPE_LEDS request before the keyboard interface was enumerated\n");
+ return 0;
+ }
+ /* usbhid overrides the report ID and ignores the first byte */
+ return hid_hw_raw_request(djrcv_dev->keyboard, 0, buf, count,
+ report_type, reqtype);
+ }
+
out_buf = kzalloc(DJREPORT_SHORT_LENGTH, GFP_ATOMIC);
if (!out_buf)
return -ENOMEM;
@@ -739,7 +1290,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
out_buf[1] = djdev->device_index;
memcpy(out_buf + 2, buf, count);
- ret = hid_hw_raw_request(djrcv_dev->hdev, out_buf[0], out_buf,
+ ret = hid_hw_raw_request(djrcv_dev->hidpp, out_buf[0], out_buf,
DJREPORT_SHORT_LENGTH, report_type, reqtype);
kfree(out_buf);
@@ -769,41 +1320,55 @@ static int logi_dj_ll_parse(struct hid_device *hid)
return -ENOMEM;
if (djdev->reports_supported & STD_KEYBOARD) {
- dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
+ dbg_hid("%s: sending a kbd descriptor, reports_supported: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
}
if (djdev->reports_supported & STD_MOUSE) {
- dbg_hid("%s: sending a mouse descriptor, reports_supported: "
- "%x\n", __func__, djdev->reports_supported);
- rdcat(rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor));
+ dbg_hid("%s: sending a mouse descriptor, reports_supported: %llx\n",
+ __func__, djdev->reports_supported);
+ if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp)
+ rdcat(rdesc, &rsize, mse_high_res_descriptor,
+ sizeof(mse_high_res_descriptor));
+ else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
+ rdcat(rdesc, &rsize, mse_27mhz_descriptor,
+ sizeof(mse_27mhz_descriptor));
+ else if (djdev->dj_receiver_dev->type == recvr_type_bluetooth)
+ rdcat(rdesc, &rsize, mse_bluetooth_descriptor,
+ sizeof(mse_bluetooth_descriptor));
+ else
+ rdcat(rdesc, &rsize, mse_descriptor,
+ sizeof(mse_descriptor));
}
if (djdev->reports_supported & MULTIMEDIA) {
- dbg_hid("%s: sending a multimedia report descriptor: %x\n",
+ dbg_hid("%s: sending a multimedia report descriptor: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor));
}
if (djdev->reports_supported & POWER_KEYS) {
- dbg_hid("%s: sending a power keys report descriptor: %x\n",
+ dbg_hid("%s: sending a power keys report descriptor: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor));
}
if (djdev->reports_supported & MEDIA_CENTER) {
- dbg_hid("%s: sending a media center report descriptor: %x\n",
+ dbg_hid("%s: sending a media center report descriptor: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, media_descriptor, sizeof(media_descriptor));
}
if (djdev->reports_supported & KBD_LEDS) {
- dbg_hid("%s: need to send kbd leds report descriptor: %x\n",
+ dbg_hid("%s: need to send kbd leds report descriptor: %llx\n",
__func__, djdev->reports_supported);
}
- rdcat(rdesc, &rsize, hidpp_descriptor, sizeof(hidpp_descriptor));
+ if (djdev->reports_supported & HIDPP) {
+ rdcat(rdesc, &rsize, hidpp_descriptor,
+ sizeof(hidpp_descriptor));
+ }
retval = hid_parse_report(hid, rdesc, rsize);
kfree(rdesc);
@@ -866,7 +1431,7 @@ static int logi_dj_dj_event(struct hid_device *hdev,
* so ignore those reports too.
*/
if (dj_report->device_index != DJ_RECEIVER_INDEX)
- dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+ hid_err(hdev, "%s: invalid device index:%d\n",
__func__, dj_report->device_index);
return false;
}
@@ -893,7 +1458,7 @@ static int logi_dj_dj_event(struct hid_device *hdev,
}
break;
default:
- logi_dj_recv_forward_report(djrcv_dev, dj_report);
+ logi_dj_recv_forward_dj(djrcv_dev, dj_report);
}
out:
@@ -907,9 +1472,10 @@ static int logi_dj_hidpp_event(struct hid_device *hdev,
int size)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
- struct dj_report *dj_report = (struct dj_report *) data;
+ struct hidpp_event *hidpp_report = (struct hidpp_event *) data;
+ struct dj_device *dj_dev;
unsigned long flags;
- u8 device_index = dj_report->device_index;
+ u8 device_index = hidpp_report->device_index;
if (device_index == HIDPP_RECEIVER_INDEX) {
/* special case were the device wants to know its unifying
@@ -937,21 +1503,42 @@ static int logi_dj_hidpp_event(struct hid_device *hdev,
* This driver can ignore safely the receiver notifications,
* so ignore those reports too.
*/
- dev_err(&hdev->dev, "%s: invalid device index:%d\n",
- __func__, dj_report->device_index);
+ hid_err(hdev, "%s: invalid device index:%d\n", __func__,
+ hidpp_report->device_index);
return false;
}
spin_lock_irqsave(&djrcv_dev->lock, flags);
- if (!djrcv_dev->paired_dj_devices[device_index])
- /* received an event for an unknown device, bail out */
- goto out;
+ dj_dev = djrcv_dev->paired_dj_devices[device_index];
+
+ /*
+ * With 27 MHz receivers, we do not get an explicit unpair event,
+ * remove the old device if the user has paired a *different* device.
+ */
+ if (djrcv_dev->type == recvr_type_27mhz && dj_dev &&
+ hidpp_report->sub_id == REPORT_TYPE_NOTIF_DEVICE_CONNECTED &&
+ hidpp_report->params[HIDPP_PARAM_PROTO_TYPE] == 0x02 &&
+ hidpp_report->params[HIDPP_PARAM_27MHZ_DEVID] !=
+ dj_dev->hdev->product) {
+ struct dj_workitem workitem = {
+ .device_index = hidpp_report->device_index,
+ .type = WORKITEM_TYPE_UNPAIRED,
+ };
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ /* logi_hidpp_recv_queue_notif will queue the work */
+ dj_dev = NULL;
+ }
- logi_dj_recv_forward_hidpp(djrcv_dev->paired_dj_devices[device_index],
- data, size);
+ if (dj_dev) {
+ logi_dj_recv_forward_report(dj_dev, data, size);
+ } else {
+ if (hidpp_report->sub_id == REPORT_TYPE_NOTIF_DEVICE_CONNECTED)
+ logi_hidpp_recv_queue_notif(hdev, hidpp_report);
+ else
+ logi_dj_recv_queue_unknown_work(djrcv_dev);
+ }
-out:
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
return false;
@@ -961,112 +1548,176 @@ static int logi_dj_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data,
int size)
{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
dbg_hid("%s, size:%d\n", __func__, size);
+ if (!djrcv_dev)
+ return 0;
+
+ if (!hdev->report_enum[HID_INPUT_REPORT].numbered) {
+
+ if (djrcv_dev->unnumbered_application == HID_GD_KEYBOARD) {
+ /*
+ * For the keyboard, we can reuse the same report by
+ * using the second byte which is constant in the USB
+ * HID report descriptor.
+ */
+ data[1] = data[0];
+ data[0] = REPORT_TYPE_KEYBOARD;
+
+ logi_dj_recv_forward_input_report(hdev, data, size);
+
+ /* restore previous state */
+ data[0] = data[1];
+ data[1] = 0;
+ }
+ /* The 27 MHz mouse-only receiver sends unnumbered mouse data */
+ if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
+ size == 6) {
+ u8 mouse_report[7];
+
+ /* Prepend report id */
+ mouse_report[0] = REPORT_TYPE_MOUSE;
+ memcpy(mouse_report + 1, data, 6);
+ logi_dj_recv_forward_input_report(hdev, mouse_report, 7);
+ }
+
+ return false;
+ }
+
switch (data[0]) {
case REPORT_ID_DJ_SHORT:
if (size != DJREPORT_SHORT_LENGTH) {
- dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+ hid_err(hdev, "Short DJ report bad size (%d)", size);
+ return false;
+ }
+ return logi_dj_dj_event(hdev, report, data, size);
+ case REPORT_ID_DJ_LONG:
+ if (size != DJREPORT_LONG_LENGTH) {
+ hid_err(hdev, "Long DJ report bad size (%d)", size);
return false;
}
return logi_dj_dj_event(hdev, report, data, size);
case REPORT_ID_HIDPP_SHORT:
if (size != HIDPP_REPORT_SHORT_LENGTH) {
- dev_err(&hdev->dev,
- "Short HID++ report of bad size (%d)", size);
+ hid_err(hdev, "Short HID++ report bad size (%d)", size);
return false;
}
return logi_dj_hidpp_event(hdev, report, data, size);
case REPORT_ID_HIDPP_LONG:
if (size != HIDPP_REPORT_LONG_LENGTH) {
- dev_err(&hdev->dev,
- "Long HID++ report of bad size (%d)", size);
+ hid_err(hdev, "Long HID++ report bad size (%d)", size);
return false;
}
return logi_dj_hidpp_event(hdev, report, data, size);
}
+ logi_dj_recv_forward_input_report(hdev, data, size);
+
return false;
}
static int logi_dj_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
struct dj_receiver_dev *djrcv_dev;
+ struct usb_interface *intf;
+ unsigned int no_dj_interfaces = 0;
+ bool has_hidpp = false;
+ unsigned long flags;
int retval;
- dbg_hid("%s called for ifnum %d\n", __func__,
- intf->cur_altsetting->desc.bInterfaceNumber);
+ /*
+ * Call to usbhid to fetch the HID descriptors of the current
+ * interface subsequently call to the hid/hid-core to parse the
+ * fetched descriptors.
+ */
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "%s: parse failed\n", __func__);
+ return retval;
+ }
- /* Ignore interfaces 0 and 1, they will not carry any data, dont create
- * any hid_device for them */
- if (intf->cur_altsetting->desc.bInterfaceNumber !=
- LOGITECH_DJ_INTERFACE_NUMBER) {
- dbg_hid("%s: ignoring ifnum %d\n", __func__,
- intf->cur_altsetting->desc.bInterfaceNumber);
+ /*
+ * Some KVMs add an extra interface for e.g. mouse emulation. If we
+ * treat these as logitech-dj interfaces then this causes input events
+ * reported through this extra interface to not be reported correctly.
+ * To avoid this, we treat these as generic-hid devices.
+ */
+ switch (id->driver_data) {
+ case recvr_type_dj: no_dj_interfaces = 3; break;
+ case recvr_type_hidpp: no_dj_interfaces = 2; break;
+ case recvr_type_gaming_hidpp: no_dj_interfaces = 3; break;
+ case recvr_type_27mhz: no_dj_interfaces = 2; break;
+ case recvr_type_bluetooth: no_dj_interfaces = 2; break;
+ }
+ if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ intf = to_usb_interface(hdev->dev.parent);
+ if (intf && intf->altsetting->desc.bInterfaceNumber >=
+ no_dj_interfaces) {
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ }
+ }
+
+ rep_enum = &hdev->report_enum[HID_INPUT_REPORT];
+
+ /* no input reports, bail out */
+ if (list_empty(&rep_enum->report_list))
return -ENODEV;
+
+ /*
+ * Check for the HID++ application.
+ * Note: we should theoretically check for HID++ and DJ
+ * collections, but this will do.
+ */
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ if (rep->application == 0xff000001)
+ has_hidpp = true;
}
- /* Treat interface 2 */
+ /*
+ * Ignore interfaces without DJ/HID++ collection, they will not carry
+ * any data, dont create any hid_device for them.
+ */
+ if (!has_hidpp && id->driver_data == recvr_type_dj)
+ return -ENODEV;
- djrcv_dev = kzalloc(sizeof(struct dj_receiver_dev), GFP_KERNEL);
+ /* get the current application attached to the node */
+ rep = list_first_entry(&rep_enum->report_list, struct hid_report, list);
+ djrcv_dev = dj_get_receiver_dev(hdev, id->driver_data,
+ rep->application, has_hidpp);
if (!djrcv_dev) {
- dev_err(&hdev->dev,
- "%s:failed allocating dj_receiver_dev\n", __func__);
+ hid_err(hdev, "%s: dj_get_receiver_dev failed\n", __func__);
return -ENOMEM;
}
- djrcv_dev->hdev = hdev;
- INIT_WORK(&djrcv_dev->work, delayedwork_callback);
- spin_lock_init(&djrcv_dev->lock);
- if (kfifo_alloc(&djrcv_dev->notif_fifo,
- DJ_MAX_NUMBER_NOTIFICATIONS * sizeof(struct dj_report),
- GFP_KERNEL)) {
- dev_err(&hdev->dev,
- "%s:failed allocating notif_fifo\n", __func__);
- kfree(djrcv_dev);
- return -ENOMEM;
- }
- hid_set_drvdata(hdev, djrcv_dev);
- /* Call to usbhid to fetch the HID descriptors of interface 2 and
- * subsequently call to the hid/hid-core to parse the fetched
- * descriptors, this will in turn create the hidraw and hiddev nodes
- * for interface 2 of the receiver */
- retval = hid_parse(hdev);
- if (retval) {
- dev_err(&hdev->dev,
- "%s:parse of interface 2 failed\n", __func__);
- goto hid_parse_fail;
- }
-
- if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
- 0, DJREPORT_SHORT_LENGTH - 1)) {
- retval = -ENODEV;
- goto hid_parse_fail;
- }
+ if (!rep_enum->numbered)
+ djrcv_dev->unnumbered_application = rep->application;
/* Starts the usb device and connects to upper interfaces hiddev and
* hidraw */
- retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ retval = hid_hw_start(hdev, HID_CONNECT_HIDRAW|HID_CONNECT_HIDDEV);
if (retval) {
- dev_err(&hdev->dev,
- "%s:hid_hw_start returned error\n", __func__);
+ hid_err(hdev, "%s: hid_hw_start returned error\n", __func__);
goto hid_hw_start_fail;
}
- retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
- if (retval < 0) {
- dev_err(&hdev->dev,
- "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
- __func__, retval);
- goto switch_to_dj_mode_fail;
+ if (has_hidpp) {
+ retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+ if (retval < 0) {
+ hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ __func__, retval);
+ goto switch_to_dj_mode_fail;
+ }
}
/* This is enabling the polling urb on the IN endpoint */
retval = hid_hw_open(hdev);
if (retval < 0) {
- dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
+ hid_err(hdev, "%s: hid_hw_open returned error:%d\n",
__func__, retval);
goto llopen_failed;
}
@@ -1074,11 +1725,16 @@ static int logi_dj_probe(struct hid_device *hdev,
/* Allow incoming packets to arrive: */
hid_device_io_start(hdev);
- retval = logi_dj_recv_query_paired_devices(djrcv_dev);
- if (retval < 0) {
- dev_err(&hdev->dev, "%s:logi_dj_recv_query_paired_devices "
- "error:%d\n", __func__, retval);
- goto logi_dj_recv_query_paired_devices_failed;
+ if (has_hidpp) {
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->ready = true;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
+ if (retval < 0) {
+ hid_err(hdev, "%s: logi_dj_recv_query_paired_devices error:%d\n",
+ __func__, retval);
+ goto logi_dj_recv_query_paired_devices_failed;
+ }
}
return retval;
@@ -1091,12 +1747,8 @@ switch_to_dj_mode_fail:
hid_hw_stop(hdev);
hid_hw_start_fail:
-hid_parse_fail:
- kfifo_free(&djrcv_dev->notif_fifo);
- kfree(djrcv_dev);
- hid_set_drvdata(hdev, NULL);
+ dj_put_receiver_dev(hdev);
return retval;
-
}
#ifdef CONFIG_PM
@@ -1105,10 +1757,12 @@ static int logi_dj_reset_resume(struct hid_device *hdev)
int retval;
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ if (!djrcv_dev || djrcv_dev->hidpp != hdev)
+ return 0;
+
retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
if (retval < 0) {
- dev_err(&hdev->dev,
- "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n",
__func__, retval);
}
@@ -1120,39 +1774,83 @@ static void logi_dj_remove(struct hid_device *hdev)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
struct dj_device *dj_dev;
+ unsigned long flags;
int i;
dbg_hid("%s\n", __func__);
+ if (!djrcv_dev)
+ return hid_hw_stop(hdev);
+
+ /*
+ * This ensures that if the work gets requeued from another
+ * interface of the same receiver it will be a no-op.
+ */
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->ready = false;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
cancel_work_sync(&djrcv_dev->work);
hid_hw_close(hdev);
hid_hw_stop(hdev);
- /* I suppose that at this point the only context that can access
- * the djrecv_data is this thread as the work item is guaranteed to
- * have finished and no more raw_event callbacks should arrive after
- * the remove callback was triggered so no locks are put around the
- * code below */
+ /*
+ * For proper operation we need access to all interfaces, so we destroy
+ * the paired devices when we're unbound from any interface.
+ *
+ * Note we may still be bound to other interfaces, sharing the same
+ * djrcv_dev, so we need locking here.
+ */
for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
dj_dev = djrcv_dev->paired_dj_devices[i];
+ djrcv_dev->paired_dj_devices[i] = NULL;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
if (dj_dev != NULL) {
hid_destroy_device(dj_dev->hdev);
kfree(dj_dev);
- djrcv_dev->paired_dj_devices[i] = NULL;
}
}
- kfifo_free(&djrcv_dev->notif_fifo);
- kfree(djrcv_dev);
- hid_set_drvdata(hdev, NULL);
+ dj_put_receiver_dev(hdev);
}
static const struct hid_device_id logi_dj_receivers[] = {
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER),
+ .driver_data = recvr_type_dj},
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
+ .driver_data = recvr_type_dj},
+ { /* Logitech Nano (non DJ) receiver */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
+ .driver_data = recvr_type_hidpp},
+ { /* Logitech Nano (non DJ) receiver */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
+ .driver_data = recvr_type_hidpp},
+ { /* Logitech gaming receiver (0xc539) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING),
+ .driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_S510_RECEIVER_2),
+ .driver_data = recvr_type_27mhz},
+ { /* Logitech 27 MHz HID++ 1.0 mouse-only receiver (0xc51b) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER),
+ .driver_data = recvr_type_27mhz},
+ { /* Logitech MX5000 HID++ / bluetooth receiver keyboard intf. */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc70e),
+ .driver_data = recvr_type_bluetooth},
+ { /* Logitech MX5000 HID++ / bluetooth receiver mouse intf. */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc70a),
+ .driver_data = recvr_type_bluetooth},
{}
};
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 15ed6177a7a3..72fc9c0566db 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -51,7 +51,11 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_REPORT_SHORT_LENGTH 7
#define HIDPP_REPORT_LONG_LENGTH 20
-#define HIDPP_REPORT_VERY_LONG_LENGTH 64
+#define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64
+
+#define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03
+#define HIDPP_SUB_ID_ROLLER 0x05
+#define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06
#define HIDPP_QUIRK_CLASS_WTP BIT(0)
#define HIDPP_QUIRK_CLASS_M560 BIT(1)
@@ -68,6 +72,13 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
+#define HIDPP_QUIRK_HIDPP_WHEELS BIT(29)
+#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(30)
+#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(31)
+
+/* These are just aliases for now */
+#define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
+#define HIDPP_QUIRK_KBD_ZOOM_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
/* Convenience constant to check for any high-res support. */
#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
@@ -106,13 +117,13 @@ MODULE_PARM_DESC(disable_tap_to_click,
struct fap {
u8 feature_index;
u8 funcindex_clientid;
- u8 params[HIDPP_REPORT_VERY_LONG_LENGTH - 4U];
+ u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U];
};
struct rap {
u8 sub_id;
u8 reg_address;
- u8 params[HIDPP_REPORT_VERY_LONG_LENGTH - 4U];
+ u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U];
};
struct hidpp_report {
@@ -149,7 +160,6 @@ struct hidpp_battery {
* @last_time: last event time, used to reset remainder after inactivity
*/
struct hidpp_scroll_counter {
- struct input_dev *dev;
int wheel_multiplier;
int remainder;
int direction;
@@ -158,10 +168,12 @@ struct hidpp_scroll_counter {
struct hidpp_device {
struct hid_device *hid_dev;
+ struct input_dev *input;
struct mutex send_mutex;
void *send_receive_buf;
char *name; /* will never be NULL and should not be freed */
wait_queue_head_t wait;
+ int very_long_report_length;
bool answer_available;
u8 protocol_major;
u8 protocol_minor;
@@ -206,8 +218,6 @@ static int __hidpp_send_report(struct hid_device *hdev,
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
int fields_count, ret;
- hidpp = hid_get_drvdata(hdev);
-
switch (hidpp_report->report_id) {
case REPORT_ID_HIDPP_SHORT:
fields_count = HIDPP_REPORT_SHORT_LENGTH;
@@ -216,7 +226,7 @@ static int __hidpp_send_report(struct hid_device *hdev,
fields_count = HIDPP_REPORT_LONG_LENGTH;
break;
case REPORT_ID_HIDPP_VERY_LONG:
- fields_count = HIDPP_REPORT_VERY_LONG_LENGTH;
+ fields_count = hidpp->very_long_report_length;
break;
default:
return -ENODEV;
@@ -342,7 +352,7 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
max_count = HIDPP_REPORT_LONG_LENGTH - 4;
break;
case REPORT_ID_HIDPP_VERY_LONG:
- max_count = HIDPP_REPORT_VERY_LONG_LENGTH - 4;
+ max_count = hidpp_dev->very_long_report_length - 4;
break;
default:
return -EINVAL;
@@ -434,14 +444,15 @@ static void hidpp_prefix_name(char **name, int name_length)
* emit low-resolution scroll events when appropriate for
* backwards-compatibility with userspace input libraries.
*/
-static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *counter,
+static void hidpp_scroll_counter_handle_scroll(struct input_dev *input_dev,
+ struct hidpp_scroll_counter *counter,
int hi_res_value)
{
int low_res_value, remainder, direction;
unsigned long long now, previous;
hi_res_value = hi_res_value * 120/counter->wheel_multiplier;
- input_report_rel(counter->dev, REL_WHEEL_HI_RES, hi_res_value);
+ input_report_rel(input_dev, REL_WHEEL_HI_RES, hi_res_value);
remainder = counter->remainder;
direction = hi_res_value > 0 ? 1 : -1;
@@ -475,7 +486,7 @@ static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *coun
low_res_value = remainder / 120;
if (low_res_value == 0)
low_res_value = (hi_res_value > 0 ? 1 : -1);
- input_report_rel(counter->dev, REL_WHEEL, low_res_value);
+ input_report_rel(input_dev, REL_WHEEL, low_res_value);
remainder -= low_res_value * 120;
}
counter->remainder = remainder;
@@ -491,14 +502,16 @@ static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *coun
#define HIDPP_GET_LONG_REGISTER 0x83
/**
- * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register.
+ * hidpp10_set_register - Modify a HID++ 1.0 register.
* @hidpp_dev: the device to set the register on.
* @register_address: the address of the register to modify.
* @byte: the byte of the register to modify. Should be less than 3.
+ * @mask: mask of the bits to modify
+ * @value: new values for the bits in mask
* Return: 0 if successful, otherwise a negative error code.
*/
-static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
- u8 register_address, u8 byte, u8 bit)
+static int hidpp10_set_register(struct hidpp_device *hidpp_dev,
+ u8 register_address, u8 byte, u8 mask, u8 value)
{
struct hidpp_report response;
int ret;
@@ -514,7 +527,8 @@ static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
memcpy(params, response.rap.params, 3);
- params[byte] |= BIT(bit);
+ params[byte] &= ~mask;
+ params[byte] |= value & mask;
return hidpp_send_rap_command_sync(hidpp_dev,
REPORT_ID_HIDPP_SHORT,
@@ -523,20 +537,28 @@ static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
params, 3, &response);
}
-
-#define HIDPP_REG_GENERAL 0x00
+#define HIDPP_REG_ENABLE_REPORTS 0x00
+#define HIDPP_ENABLE_CONSUMER_REPORT BIT(0)
+#define HIDPP_ENABLE_WHEEL_REPORT BIT(2)
+#define HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT BIT(3)
+#define HIDPP_ENABLE_BAT_REPORT BIT(4)
+#define HIDPP_ENABLE_HWHEEL_REPORT BIT(5)
static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
{
- return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
+ return hidpp10_set_register(hidpp_dev, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_BAT_REPORT, HIDPP_ENABLE_BAT_REPORT);
}
#define HIDPP_REG_FEATURES 0x01
+#define HIDPP_ENABLE_SPECIAL_BUTTON_FUNC BIT(1)
+#define HIDPP_ENABLE_FAST_SCROLL BIT(6)
/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
{
- return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
+ return hidpp10_set_register(hidpp_dev, HIDPP_REG_FEATURES, 0,
+ HIDPP_ENABLE_FAST_SCROLL, HIDPP_ENABLE_FAST_SCROLL);
}
#define HIDPP_REG_BATTERY_STATUS 0x07
@@ -741,6 +763,9 @@ static char *hidpp_unifying_get_name(struct hidpp_device *hidpp_dev)
if (2 + len > sizeof(response.rap.params))
return NULL;
+ if (len < 4) /* logitech devices are usually at least Xddd */
+ return NULL;
+
name = kzalloc(len + 1, GFP_KERNEL);
if (!name)
return NULL;
@@ -836,18 +861,21 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
{
+ const u8 ping_byte = 0x5a;
+ u8 ping_data[3] = { 0, 0, ping_byte };
struct hidpp_report response;
int ret;
- ret = hidpp_send_fap_command_sync(hidpp,
+ ret = hidpp_send_rap_command_sync(hidpp,
+ REPORT_ID_HIDPP_SHORT,
HIDPP_PAGE_ROOT_IDX,
CMD_ROOT_GET_PROTOCOL_VERSION,
- NULL, 0, &response);
+ ping_data, sizeof(ping_data), &response);
if (ret == HIDPP_ERROR_INVALID_SUBID) {
hidpp->protocol_major = 1;
hidpp->protocol_minor = 0;
- return 0;
+ goto print_version;
}
/* the device might not be connected */
@@ -862,21 +890,19 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
if (ret)
return ret;
- hidpp->protocol_major = response.fap.params[0];
- hidpp->protocol_minor = response.fap.params[1];
-
- return ret;
-}
+ if (response.rap.params[2] != ping_byte) {
+ hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n",
+ __func__, response.rap.params[2], ping_byte);
+ return -EPROTO;
+ }
-static bool hidpp_is_connected(struct hidpp_device *hidpp)
-{
- int ret;
+ hidpp->protocol_major = response.rap.params[0];
+ hidpp->protocol_minor = response.rap.params[1];
- ret = hidpp_root_get_protocol_version(hidpp);
- if (!ret)
- hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
- return ret == 0;
+print_version:
+ hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+ return 0;
}
/* -------------------------------------------------------------------------- */
@@ -932,7 +958,7 @@ static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp,
switch (response.report_id) {
case REPORT_ID_HIDPP_VERY_LONG:
- count = HIDPP_REPORT_VERY_LONG_LENGTH - 4;
+ count = hidpp->very_long_report_length - 4;
break;
case REPORT_ID_HIDPP_LONG:
count = HIDPP_REPORT_LONG_LENGTH - 4;
@@ -1012,7 +1038,11 @@ static int hidpp_map_battery_level(int capacity)
{
if (capacity < 11)
return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
- else if (capacity < 31)
+ /*
+ * The spec says this should be < 31 but some devices report 30
+ * with brand new batteries and Windows reports 30 as "Good".
+ */
+ else if (capacity < 30)
return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
else if (capacity < 81)
return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
@@ -2111,6 +2141,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
kfree(data);
return -ENOMEM;
}
+ data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+ if (!data->wq) {
+ kfree(data->effect_ids);
+ kfree(data);
+ return -ENOMEM;
+ }
+
data->hidpp = hidpp;
data->feature_index = feature_index;
data->version = version;
@@ -2155,7 +2192,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
/* ignore boost value at response.fap.params[2] */
/* init the hardware command queue */
- data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
atomic_set(&data->workqueue_size, 0);
/* initialize with zero autocenter to get wheel in usable state */
@@ -2205,7 +2241,6 @@ static int hidpp_ff_deinit(struct hid_device *hid)
#define WTP_MANUAL_RESOLUTION 39
struct wtp_data {
- struct input_dev *input;
u16 x_size, y_size;
u8 finger_count;
u8 mt_feature_index;
@@ -2223,7 +2258,7 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
static void wtp_populate_input(struct hidpp_device *hidpp,
- struct input_dev *input_dev, bool origin_is_hid_core)
+ struct input_dev *input_dev)
{
struct wtp_data *wd = hidpp->private_data;
@@ -2249,31 +2284,30 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
input_mt_init_slots(input_dev, wd->maxcontacts, INPUT_MT_POINTER |
INPUT_MT_DROP_UNUSED);
-
- wd->input = input_dev;
}
-static void wtp_touch_event(struct wtp_data *wd,
+static void wtp_touch_event(struct hidpp_device *hidpp,
struct hidpp_touchpad_raw_xy_finger *touch_report)
{
+ struct wtp_data *wd = hidpp->private_data;
int slot;
if (!touch_report->finger_id || touch_report->contact_type)
/* no actual data */
return;
- slot = input_mt_get_slot_by_key(wd->input, touch_report->finger_id);
+ slot = input_mt_get_slot_by_key(hidpp->input, touch_report->finger_id);
- input_mt_slot(wd->input, slot);
- input_mt_report_slot_state(wd->input, MT_TOOL_FINGER,
+ input_mt_slot(hidpp->input, slot);
+ input_mt_report_slot_state(hidpp->input, MT_TOOL_FINGER,
touch_report->contact_status);
if (touch_report->contact_status) {
- input_event(wd->input, EV_ABS, ABS_MT_POSITION_X,
+ input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_X,
touch_report->x);
- input_event(wd->input, EV_ABS, ABS_MT_POSITION_Y,
+ input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_Y,
wd->flip_y ? wd->y_size - touch_report->y :
touch_report->y);
- input_event(wd->input, EV_ABS, ABS_MT_PRESSURE,
+ input_event(hidpp->input, EV_ABS, ABS_MT_PRESSURE,
touch_report->area);
}
}
@@ -2281,19 +2315,18 @@ static void wtp_touch_event(struct wtp_data *wd,
static void wtp_send_raw_xy_event(struct hidpp_device *hidpp,
struct hidpp_touchpad_raw_xy *raw)
{
- struct wtp_data *wd = hidpp->private_data;
int i;
for (i = 0; i < 2; i++)
- wtp_touch_event(wd, &(raw->fingers[i]));
+ wtp_touch_event(hidpp, &(raw->fingers[i]));
if (raw->end_of_frame &&
!(hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS))
- input_event(wd->input, EV_KEY, BTN_LEFT, raw->button);
+ input_event(hidpp->input, EV_KEY, BTN_LEFT, raw->button);
if (raw->end_of_frame || raw->finger_count <= 2) {
- input_mt_sync_frame(wd->input);
- input_sync(wd->input);
+ input_mt_sync_frame(hidpp->input);
+ input_sync(hidpp->input);
}
}
@@ -2343,7 +2376,7 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
struct hidpp_report *report = (struct hidpp_report *)data;
struct hidpp_touchpad_raw_xy raw;
- if (!wd || !wd->input)
+ if (!wd || !hidpp->input)
return 1;
switch (data[0]) {
@@ -2354,11 +2387,11 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
return 1;
}
if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
- input_event(wd->input, EV_KEY, BTN_LEFT,
+ input_event(hidpp->input, EV_KEY, BTN_LEFT,
!!(data[1] & 0x01));
- input_event(wd->input, EV_KEY, BTN_RIGHT,
+ input_event(hidpp->input, EV_KEY, BTN_RIGHT,
!!(data[1] & 0x02));
- input_sync(wd->input);
+ input_sync(hidpp->input);
return 0;
} else {
if (size < 21)
@@ -2476,10 +2509,6 @@ static int wtp_connect(struct hid_device *hdev, bool connected)
static const u8 m560_config_parameter[] = {0x00, 0xaf, 0x03};
-struct m560_private_data {
- struct input_dev *input;
-};
-
/* how buttons are mapped in the report */
#define M560_MOUSE_BTN_LEFT 0x01
#define M560_MOUSE_BTN_RIGHT 0x02
@@ -2507,28 +2536,12 @@ static int m560_send_config_command(struct hid_device *hdev, bool connected)
);
}
-static int m560_allocate(struct hid_device *hdev)
-{
- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct m560_private_data *d;
-
- d = devm_kzalloc(&hdev->dev, sizeof(struct m560_private_data),
- GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- hidpp->private_data = d;
-
- return 0;
-};
-
static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct m560_private_data *mydata = hidpp->private_data;
/* sanity check */
- if (!mydata || !mydata->input) {
+ if (!hidpp->input) {
hid_err(hdev, "error in parameter\n");
return -EINVAL;
}
@@ -2555,24 +2568,24 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
switch (data[5]) {
case 0xaf:
- input_report_key(mydata->input, BTN_MIDDLE, 1);
+ input_report_key(hidpp->input, BTN_MIDDLE, 1);
break;
case 0xb0:
- input_report_key(mydata->input, BTN_FORWARD, 1);
+ input_report_key(hidpp->input, BTN_FORWARD, 1);
break;
case 0xae:
- input_report_key(mydata->input, BTN_BACK, 1);
+ input_report_key(hidpp->input, BTN_BACK, 1);
break;
case 0x00:
- input_report_key(mydata->input, BTN_BACK, 0);
- input_report_key(mydata->input, BTN_FORWARD, 0);
- input_report_key(mydata->input, BTN_MIDDLE, 0);
+ input_report_key(hidpp->input, BTN_BACK, 0);
+ input_report_key(hidpp->input, BTN_FORWARD, 0);
+ input_report_key(hidpp->input, BTN_MIDDLE, 0);
break;
default:
hid_err(hdev, "error in report\n");
return 0;
}
- input_sync(mydata->input);
+ input_sync(hidpp->input);
} else if (data[0] == 0x02) {
/*
@@ -2586,58 +2599,55 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
int v;
- input_report_key(mydata->input, BTN_LEFT,
+ input_report_key(hidpp->input, BTN_LEFT,
!!(data[1] & M560_MOUSE_BTN_LEFT));
- input_report_key(mydata->input, BTN_RIGHT,
+ input_report_key(hidpp->input, BTN_RIGHT,
!!(data[1] & M560_MOUSE_BTN_RIGHT));
if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) {
- input_report_rel(mydata->input, REL_HWHEEL, -1);
- input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+ input_report_rel(hidpp->input, REL_HWHEEL, -1);
+ input_report_rel(hidpp->input, REL_HWHEEL_HI_RES,
-120);
} else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) {
- input_report_rel(mydata->input, REL_HWHEEL, 1);
- input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+ input_report_rel(hidpp->input, REL_HWHEEL, 1);
+ input_report_rel(hidpp->input, REL_HWHEEL_HI_RES,
120);
}
v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12);
- input_report_rel(mydata->input, REL_X, v);
+ input_report_rel(hidpp->input, REL_X, v);
v = hid_snto32(hid_field_extract(hdev, data+3, 12, 12), 12);
- input_report_rel(mydata->input, REL_Y, v);
+ input_report_rel(hidpp->input, REL_Y, v);
v = hid_snto32(data[6], 8);
- hidpp_scroll_counter_handle_scroll(
- &hidpp->vertical_wheel_counter, v);
+ if (v != 0)
+ hidpp_scroll_counter_handle_scroll(hidpp->input,
+ &hidpp->vertical_wheel_counter, v);
- input_sync(mydata->input);
+ input_sync(hidpp->input);
}
return 1;
}
static void m560_populate_input(struct hidpp_device *hidpp,
- struct input_dev *input_dev, bool origin_is_hid_core)
+ struct input_dev *input_dev)
{
- struct m560_private_data *mydata = hidpp->private_data;
-
- mydata->input = input_dev;
-
- __set_bit(EV_KEY, mydata->input->evbit);
- __set_bit(BTN_MIDDLE, mydata->input->keybit);
- __set_bit(BTN_RIGHT, mydata->input->keybit);
- __set_bit(BTN_LEFT, mydata->input->keybit);
- __set_bit(BTN_BACK, mydata->input->keybit);
- __set_bit(BTN_FORWARD, mydata->input->keybit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_BACK, input_dev->keybit);
+ __set_bit(BTN_FORWARD, input_dev->keybit);
- __set_bit(EV_REL, mydata->input->evbit);
- __set_bit(REL_X, mydata->input->relbit);
- __set_bit(REL_Y, mydata->input->relbit);
- __set_bit(REL_WHEEL, mydata->input->relbit);
- __set_bit(REL_HWHEEL, mydata->input->relbit);
- __set_bit(REL_WHEEL_HI_RES, mydata->input->relbit);
- __set_bit(REL_HWHEEL_HI_RES, mydata->input->relbit);
+ __set_bit(EV_REL, input_dev->evbit);
+ __set_bit(REL_X, input_dev->relbit);
+ __set_bit(REL_Y, input_dev->relbit);
+ __set_bit(REL_WHEEL, input_dev->relbit);
+ __set_bit(REL_HWHEEL, input_dev->relbit);
+ __set_bit(REL_WHEEL_HI_RES, input_dev->relbit);
+ __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit);
}
static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -2740,6 +2750,175 @@ static int g920_get_config(struct hidpp_device *hidpp)
}
/* -------------------------------------------------------------------------- */
+/* HID++1.0 devices which use HID++ reports for their wheels */
+/* -------------------------------------------------------------------------- */
+static int hidpp10_wheel_connect(struct hidpp_device *hidpp)
+{
+ return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT,
+ HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT);
+}
+
+static int hidpp10_wheel_raw_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ s8 value, hvalue;
+
+ if (!hidpp->input)
+ return -EINVAL;
+
+ if (size < 7)
+ return 0;
+
+ if (data[0] != REPORT_ID_HIDPP_SHORT || data[2] != HIDPP_SUB_ID_ROLLER)
+ return 0;
+
+ value = data[3];
+ hvalue = data[4];
+
+ input_report_rel(hidpp->input, REL_WHEEL, value);
+ input_report_rel(hidpp->input, REL_WHEEL_HI_RES, value * 120);
+ input_report_rel(hidpp->input, REL_HWHEEL, hvalue);
+ input_report_rel(hidpp->input, REL_HWHEEL_HI_RES, hvalue * 120);
+ input_sync(hidpp->input);
+
+ return 1;
+}
+
+static void hidpp10_wheel_populate_input(struct hidpp_device *hidpp,
+ struct input_dev *input_dev)
+{
+ __set_bit(EV_REL, input_dev->evbit);
+ __set_bit(REL_WHEEL, input_dev->relbit);
+ __set_bit(REL_WHEEL_HI_RES, input_dev->relbit);
+ __set_bit(REL_HWHEEL, input_dev->relbit);
+ __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit);
+}
+
+/* -------------------------------------------------------------------------- */
+/* HID++1.0 mice which use HID++ reports for extra mouse buttons */
+/* -------------------------------------------------------------------------- */
+static int hidpp10_extra_mouse_buttons_connect(struct hidpp_device *hidpp)
+{
+ return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT,
+ HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT);
+}
+
+static int hidpp10_extra_mouse_buttons_raw_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ int i;
+
+ if (!hidpp->input)
+ return -EINVAL;
+
+ if (size < 7)
+ return 0;
+
+ if (data[0] != REPORT_ID_HIDPP_SHORT ||
+ data[2] != HIDPP_SUB_ID_MOUSE_EXTRA_BTNS)
+ return 0;
+
+ /*
+ * Buttons are either delivered through the regular mouse report *or*
+ * through the extra buttons report. At least for button 6 how it is
+ * delivered differs per receiver firmware version. Even receivers with
+ * the same usb-id show different behavior, so we handle both cases.
+ */
+ for (i = 0; i < 8; i++)
+ input_report_key(hidpp->input, BTN_MOUSE + i,
+ (data[3] & (1 << i)));
+
+ /* Some mice report events on button 9+, use BTN_MISC */
+ for (i = 0; i < 8; i++)
+ input_report_key(hidpp->input, BTN_MISC + i,
+ (data[4] & (1 << i)));
+
+ input_sync(hidpp->input);
+ return 1;
+}
+
+static void hidpp10_extra_mouse_buttons_populate_input(
+ struct hidpp_device *hidpp, struct input_dev *input_dev)
+{
+ /* BTN_MOUSE - BTN_MOUSE+7 are set already by the descriptor */
+ __set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_2, input_dev->keybit);
+ __set_bit(BTN_3, input_dev->keybit);
+ __set_bit(BTN_4, input_dev->keybit);
+ __set_bit(BTN_5, input_dev->keybit);
+ __set_bit(BTN_6, input_dev->keybit);
+ __set_bit(BTN_7, input_dev->keybit);
+}
+
+/* -------------------------------------------------------------------------- */
+/* HID++1.0 kbds which only report 0x10xx consumer usages through sub-id 0x03 */
+/* -------------------------------------------------------------------------- */
+
+/* Find the consumer-page input report desc and change Maximums to 0x107f */
+static u8 *hidpp10_consumer_keys_report_fixup(struct hidpp_device *hidpp,
+ u8 *_rdesc, unsigned int *rsize)
+{
+ /* Note 0 terminated so we can use strnstr to search for this. */
+ const char consumer_rdesc_start[] = {
+ 0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
+ 0x09, 0x01, /* USAGE (Consumer Control) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x03, /* REPORT_ID = 3 */
+ 0x75, 0x10, /* REPORT_SIZE (16) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x15, 0x01, /* LOGICAL_MIN (1) */
+ 0x26, 0x00 /* LOGICAL_MAX (... */
+ };
+ char *consumer_rdesc, *rdesc = (char *)_rdesc;
+ unsigned int size;
+
+ consumer_rdesc = strnstr(rdesc, consumer_rdesc_start, *rsize);
+ size = *rsize - (consumer_rdesc - rdesc);
+ if (consumer_rdesc && size >= 25) {
+ consumer_rdesc[15] = 0x7f;
+ consumer_rdesc[16] = 0x10;
+ consumer_rdesc[20] = 0x7f;
+ consumer_rdesc[21] = 0x10;
+ }
+ return _rdesc;
+}
+
+static int hidpp10_consumer_keys_connect(struct hidpp_device *hidpp)
+{
+ return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_CONSUMER_REPORT,
+ HIDPP_ENABLE_CONSUMER_REPORT);
+}
+
+static int hidpp10_consumer_keys_raw_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ u8 consumer_report[5];
+
+ if (size < 7)
+ return 0;
+
+ if (data[0] != REPORT_ID_HIDPP_SHORT ||
+ data[2] != HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS)
+ return 0;
+
+ /*
+ * Build a normal consumer report (3) out of the data, this detour
+ * is necessary to get some keyboards to report their 0x10xx usages.
+ */
+ consumer_report[0] = 0x03;
+ memcpy(&consumer_report[1], &data[3], 4);
+ /* We are called from atomic context */
+ hid_report_raw_event(hidpp->hid_dev, HID_INPUT_REPORT,
+ consumer_report, 5, 1);
+
+ return 1;
+}
+
+/* -------------------------------------------------------------------------- */
/* High-resolution scroll wheels */
/* -------------------------------------------------------------------------- */
@@ -2774,12 +2953,31 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
+static u8 *hidpp_report_fixup(struct hid_device *hdev, u8 *rdesc,
+ unsigned int *rsize)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+
+ if (!hidpp)
+ return rdesc;
+
+ /* For 27 MHz keyboards the quirk gets set after hid_parse. */
+ if (hdev->group == HID_GROUP_LOGITECH_27MHZ_DEVICE ||
+ (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS))
+ rdesc = hidpp10_consumer_keys_report_fixup(hidpp, rdesc, rsize);
+
+ return rdesc;
+}
+
static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ if (!hidpp)
+ return 0;
+
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
return wtp_input_mapping(hdev, hi, field, usage, bit, max);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560 &&
@@ -2795,6 +2993,9 @@ static int hidpp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ if (!hidpp)
+ return 0;
+
/* Ensure that Logitech G920 is not given a default fuzz/flat value */
if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
if (usage->type == EV_ABS && (usage->code == ABS_X ||
@@ -2809,15 +3010,20 @@ static int hidpp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static void hidpp_populate_input(struct hidpp_device *hidpp,
- struct input_dev *input, bool origin_is_hid_core)
+ struct input_dev *input)
{
+ hidpp->input = input;
+
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
- wtp_populate_input(hidpp, input, origin_is_hid_core);
+ wtp_populate_input(hidpp, input);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
- m560_populate_input(hidpp, input, origin_is_hid_core);
+ m560_populate_input(hidpp, input);
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
- hidpp->vertical_wheel_counter.dev = input;
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS)
+ hidpp10_wheel_populate_input(hidpp, input);
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS)
+ hidpp10_extra_mouse_buttons_populate_input(hidpp, input);
}
static int hidpp_input_configured(struct hid_device *hdev,
@@ -2826,7 +3032,10 @@ static int hidpp_input_configured(struct hid_device *hdev,
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
struct input_dev *input = hidinput->input;
- hidpp_populate_input(hidpp, input, true);
+ if (!hidpp)
+ return 0;
+
+ hidpp_populate_input(hidpp, input);
return 0;
}
@@ -2886,6 +3095,24 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
return ret;
}
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) {
+ ret = hidpp10_wheel_raw_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) {
+ ret = hidpp10_extra_mouse_buttons_raw_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) {
+ ret = hidpp10_consumer_keys_raw_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ }
+
return 0;
}
@@ -2895,10 +3122,13 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
int ret = 0;
+ if (!hidpp)
+ return 0;
+
/* Generic HID++ processing. */
switch (data[0]) {
case REPORT_ID_HIDPP_VERY_LONG:
- if (size != HIDPP_REPORT_VERY_LONG_LENGTH) {
+ if (size != hidpp->very_long_report_length) {
hid_err(hdev, "received hid++ report of bad size (%d)",
size);
return 1;
@@ -2943,17 +3173,22 @@ static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
* restriction imposed in hidpp_usages.
*/
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct hidpp_scroll_counter *counter = &hidpp->vertical_wheel_counter;
+ struct hidpp_scroll_counter *counter;
+
+ if (!hidpp)
+ return 0;
+
+ counter = &hidpp->vertical_wheel_counter;
/* A scroll event may occur before the multiplier has been retrieved or
* the input device set, or high-res scroll enabling may fail. In such
* cases we must return early (falling back to default behaviour) to
* avoid a crash in hidpp_scroll_counter_handle_scroll.
*/
if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
- || counter->dev == NULL || counter->wheel_multiplier == 0)
+ || hidpp->input == NULL || counter->wheel_multiplier == 0)
return 0;
- hidpp_scroll_counter_handle_scroll(counter, value);
+ hidpp_scroll_counter_handle_scroll(hidpp->input, counter, value);
return 1;
}
@@ -3125,32 +3360,45 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
return;
}
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) {
+ ret = hidpp10_wheel_connect(hidpp);
+ if (ret)
+ return;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) {
+ ret = hidpp10_extra_mouse_buttons_connect(hidpp);
+ if (ret)
+ return;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) {
+ ret = hidpp10_consumer_keys_connect(hidpp);
+ if (ret)
+ return;
+ }
+
/* the device is already connected, we can ask for its name and
* protocol */
if (!hidpp->protocol_major) {
- ret = !hidpp_is_connected(hidpp);
+ ret = hidpp_root_get_protocol_version(hidpp);
if (ret) {
hid_err(hdev, "Can not get the protocol version.\n");
return;
}
- hid_info(hdev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
}
if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) {
name = hidpp_get_device_name(hidpp);
- if (!name) {
- hid_err(hdev,
- "unable to retrieve the name of the device");
- return;
- }
-
- devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name);
- kfree(name);
- if (!devm_name)
- return;
+ if (name) {
+ devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "%s", name);
+ kfree(name);
+ if (!devm_name)
+ return;
- hidpp->name = devm_name;
+ hidpp->name = devm_name;
+ }
}
hidpp_initialize_battery(hidpp);
@@ -3181,7 +3429,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
return;
}
- hidpp_populate_input(hidpp, input, false);
+ hidpp_populate_input(hidpp, input);
ret = input_register_device(input);
if (ret)
@@ -3201,6 +3449,60 @@ static const struct attribute_group ps_attribute_group = {
.attrs = sysfs_attrs
};
+static int hidpp_get_report_length(struct hid_device *hdev, int id)
+{
+ struct hid_report_enum *re;
+ struct hid_report *report;
+
+ re = &(hdev->report_enum[HID_OUTPUT_REPORT]);
+ report = re->report_id_hash[id];
+ if (!report)
+ return 0;
+
+ return report->field[0]->report_count + 1;
+}
+
+static bool hidpp_validate_report(struct hid_device *hdev, int id,
+ int expected_length, bool optional)
+{
+ int report_length;
+
+ if (id >= HID_MAX_IDS || id < 0) {
+ hid_err(hdev, "invalid HID report id %u\n", id);
+ return false;
+ }
+
+ report_length = hidpp_get_report_length(hdev, id);
+ if (!report_length)
+ return optional;
+
+ if (report_length < expected_length) {
+ hid_warn(hdev, "not enough values in hidpp report %d\n", id);
+ return false;
+ }
+
+ return true;
+}
+
+static bool hidpp_validate_device(struct hid_device *hdev)
+{
+ return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT,
+ HIDPP_REPORT_SHORT_LENGTH, false) &&
+ hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG,
+ HIDPP_REPORT_LONG_LENGTH, true);
+}
+
+static bool hidpp_application_equals(struct hid_device *hdev,
+ unsigned int application)
+{
+ struct list_head *report_list;
+ struct hid_report *report;
+
+ report_list = &hdev->report_enum[HID_INPUT_REPORT].report_list;
+ report = list_first_entry_or_null(report_list, struct hid_report, list);
+ return report && report->application == application;
+}
+
static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct hidpp_device *hidpp;
@@ -3208,20 +3510,48 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
bool connected;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
- hidpp = devm_kzalloc(&hdev->dev, sizeof(struct hidpp_device),
- GFP_KERNEL);
+ /* report_fixup needs drvdata to be set before we call hid_parse */
+ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
if (!hidpp)
return -ENOMEM;
hidpp->hid_dev = hdev;
hidpp->name = hdev->name;
+ hidpp->quirks = id->driver_data;
hid_set_drvdata(hdev, hidpp);
- hidpp->quirks = id->driver_data;
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "%s:parse failed\n", __func__);
+ return ret;
+ }
+
+ /*
+ * Make sure the device is HID++ capable, otherwise treat as generic HID
+ */
+ if (!hidpp_validate_device(hdev)) {
+ hid_set_drvdata(hdev, NULL);
+ devm_kfree(&hdev->dev, hidpp);
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ }
+
+ hidpp->very_long_report_length =
+ hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG);
+ if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
+ hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH;
if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE)
hidpp->quirks |= HIDPP_QUIRK_UNIFYING;
+ if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE &&
+ hidpp_application_equals(hdev, HID_GD_MOUSE))
+ hidpp->quirks |= HIDPP_QUIRK_HIDPP_WHEELS |
+ HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS;
+
+ if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE &&
+ hidpp_application_equals(hdev, HID_GD_KEYBOARD))
+ hidpp->quirks |= HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS;
+
if (disable_raw_mode) {
hidpp->quirks &= ~HIDPP_QUIRK_CLASS_WTP;
hidpp->quirks &= ~HIDPP_QUIRK_NO_HIDINPUT;
@@ -3230,15 +3560,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
ret = wtp_allocate(hdev, id);
if (ret)
- goto allocate_fail;
- } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) {
- ret = m560_allocate(hdev);
- if (ret)
- goto allocate_fail;
+ return ret;
} else if (hidpp->quirks & HIDPP_QUIRK_CLASS_K400) {
ret = k400_allocate(hdev);
if (ret)
- goto allocate_fail;
+ return ret;
}
INIT_WORK(&hidpp->work, delayed_work_cb);
@@ -3251,93 +3577,79 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hid_warn(hdev, "Cannot allocate sysfs group for %s\n",
hdev->name);
- ret = hid_parse(hdev);
+ /*
+ * Plain USB connections need to actually call start and open
+ * on the transport driver to allow incoming data.
+ */
+ ret = hid_hw_start(hdev, 0);
if (ret) {
- hid_err(hdev, "%s:parse failed\n", __func__);
- goto hid_parse_fail;
+ hid_err(hdev, "hw start failed\n");
+ goto hid_hw_start_fail;
}
- if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
- connect_mask &= ~HID_CONNECT_HIDINPUT;
-
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- goto hid_hw_start_fail;
- }
- ret = hid_hw_open(hdev);
- if (ret < 0) {
- dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
- __func__, ret);
- hid_hw_stop(hdev);
- goto hid_hw_start_fail;
- }
+ ret = hid_hw_open(hdev);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
+ __func__, ret);
+ hid_hw_stop(hdev);
+ goto hid_hw_open_fail;
}
-
/* Allow incoming packets */
hid_device_io_start(hdev);
if (hidpp->quirks & HIDPP_QUIRK_UNIFYING)
hidpp_unifying_init(hidpp);
- connected = hidpp_is_connected(hidpp);
+ connected = hidpp_root_get_protocol_version(hidpp) == 0;
atomic_set(&hidpp->connected, connected);
if (!(hidpp->quirks & HIDPP_QUIRK_UNIFYING)) {
if (!connected) {
ret = -ENODEV;
hid_err(hdev, "Device not connected");
- goto hid_hw_open_failed;
+ goto hid_hw_init_fail;
}
- hid_info(hdev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
-
hidpp_overwrite_name(hdev);
}
if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
ret = wtp_get_config(hidpp);
if (ret)
- goto hid_hw_open_failed;
+ goto hid_hw_init_fail;
} else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
ret = g920_get_config(hidpp);
if (ret)
- goto hid_hw_open_failed;
+ goto hid_hw_init_fail;
}
- /* Block incoming packets */
- hid_device_io_stop(hdev);
+ hidpp_connect_event(hidpp);
- if (!(hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
- goto hid_hw_start_fail;
- }
- }
+ /* Reset the HID node state */
+ hid_device_io_stop(hdev);
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
- /* Allow incoming packets */
- hid_device_io_start(hdev);
+ if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
+ connect_mask &= ~HID_CONNECT_HIDINPUT;
- hidpp_connect_event(hidpp);
+ /* Now export the actual inputs and hidraw nodes to the world */
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+ goto hid_hw_start_fail;
+ }
return ret;
-hid_hw_open_failed:
- hid_device_io_stop(hdev);
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- hid_hw_close(hdev);
- hid_hw_stop(hdev);
- }
+hid_hw_init_fail:
+ hid_hw_close(hdev);
+hid_hw_open_fail:
+ hid_hw_stop(hdev);
hid_hw_start_fail:
-hid_parse_fail:
sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
cancel_work_sync(&hidpp->work);
mutex_destroy(&hidpp->send_mutex);
-allocate_fail:
- hid_set_drvdata(hdev, NULL);
return ret;
}
@@ -3345,12 +3657,14 @@ static void hidpp_remove(struct hid_device *hdev)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ if (!hidpp)
+ return hid_hw_stop(hdev);
+
sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)
hidpp_ff_deinit(hdev);
- hid_hw_close(hdev);
- }
+
hid_hw_stop(hdev);
cancel_work_sync(&hidpp->work);
mutex_destroy(&hidpp->send_mutex);
@@ -3360,6 +3674,10 @@ static void hidpp_remove(struct hid_device *hdev)
HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
USB_VENDOR_ID_LOGITECH, (product))
+#define L27MHZ_DEVICE(product) \
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_27MHZ_DEVICE, \
+ USB_VENDOR_ID_LOGITECH, (product))
+
static const struct hid_device_id hidpp_devices[] = {
{ /* wireless touchpad */
LDJ_DEVICE(0x4011),
@@ -3411,11 +3729,37 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Solar Keyboard Logitech K750 */
LDJ_DEVICE(0x4002),
.driver_data = HIDPP_QUIRK_CLASS_K750 },
+ { /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
+ LDJ_DEVICE(0xb305),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ LDJ_DEVICE(HID_ANY_ID) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
+ { /* Keyboard LX501 (Y-RR53) */
+ L27MHZ_DEVICE(0x0049),
+ .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
+ { /* Keyboard MX3000 (Y-RAM74) */
+ L27MHZ_DEVICE(0x0057),
+ .driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL },
+ { /* Keyboard MX3200 (Y-RAV80) */
+ L27MHZ_DEVICE(0x005c),
+ .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
+
+ { L27MHZ_DEVICE(HID_ANY_ID) },
+
+ { /* Logitech G403 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
+ { /* Logitech G700 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
+ { /* Logitech G900 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) },
+ { /* Logitech G920 Wheel over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
+
+ { /* MX5000 keyboard over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{}
};
@@ -3429,6 +3773,7 @@ static const struct hid_usage_id hidpp_usages[] = {
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
+ .report_fixup = hidpp_report_fixup,
.probe = hidpp_probe,
.remove = hidpp_remove,
.raw_event = hidpp_raw_event,
diff --git a/drivers/hid/hid-macally.c b/drivers/hid/hid-macally.c
new file mode 100644
index 000000000000..9a4fc7dffb14
--- /dev/null
+++ b/drivers/hid/hid-macally.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for quirky Macally devices
+ *
+ * Copyright (c) 2019 Alex Henrie <alexhenrie24@gmail.com>
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Alex Henrie <alexhenrie24@gmail.com>");
+MODULE_DESCRIPTION("Macally devices");
+MODULE_LICENSE("GPL");
+
+/*
+ * The Macally ikey keyboard says that its logical and usage maximums are both
+ * 101, but the power key is 102 and the equals key is 103
+ */
+static __u8 *macally_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize >= 60 && rdesc[53] == 0x65 && rdesc[59] == 0x65) {
+ hid_info(hdev,
+ "fixing up Macally ikey keyboard report descriptor\n");
+ rdesc[53] = rdesc[59] = 0x67;
+ }
+ return rdesc;
+}
+
+static struct hid_device_id macally_id_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
+ USB_DEVICE_ID_MACALLY_IKEY_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, macally_id_table);
+
+static struct hid_driver macally_driver = {
+ .name = "macally",
+ .id_table = macally_id_table,
+ .report_fixup = macally_report_fixup,
+};
+
+module_hid_driver(macally_driver);
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index c1b29a9eb41a..482c24f0e078 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -28,6 +28,7 @@
#include <linux/completion.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/string.h>
#include "hid-picolcd.h"
@@ -275,27 +276,20 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
{
struct picolcd_data *data = dev_get_drvdata(dev);
struct hid_report *report = NULL;
- size_t cnt = count;
int timeout = data->opmode_delay;
unsigned long flags;
- if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) {
+ if (sysfs_streq(buf, "lcd")) {
if (data->status & PICOLCD_BOOTLOADER)
report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev);
- buf += 3;
- cnt -= 3;
- } else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) {
+ } else if (sysfs_streq(buf, "bootloader")) {
if (!(data->status & PICOLCD_BOOTLOADER))
report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev);
- buf += 10;
- cnt -= 10;
- }
- if (!report || report->maxfield != 1)
+ } else {
return -EINVAL;
+ }
- while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
- cnt--;
- if (cnt != 0)
+ if (!report || report->maxfield != 1)
return -EINVAL;
spin_lock_irqsave(&data->lock, flags);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 953908f2267c..fea7f7ff5ab1 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -432,7 +432,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_LOGITECH)
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
@@ -464,13 +463,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
#endif
#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
@@ -715,7 +709,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +848,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ }
};
-/**
+/*
* hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
*
* There are composite devices for which we want to ignore only a certain
@@ -996,6 +989,10 @@ bool hid_ignore(struct hid_device *hdev)
if (hdev->product == 0x0401 &&
strncmp(hdev->name, "ELAN0800", 8) != 0)
return true;
+ /* Same with product id 0x0400 */
+ if (hdev->product == 0x0400 &&
+ strncmp(hdev->name, "QTEC0001", 8) != 0)
+ return true;
break;
}
@@ -1042,7 +1039,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
}
if (bl_entry != NULL)
- dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
bl_entry->driver_data, bl_entry->vendor,
bl_entry->product);
@@ -1209,7 +1206,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
quirks |= bl_entry->driver_data;
if (quirks)
- dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
quirks, hdev->vendor, hdev->product);
return quirks;
}
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index bb012bc032e0..462e653a7bbb 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -157,8 +157,7 @@ static int usage_id_cmp(const void *p1, const void *p2)
static ssize_t enable_sensor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", sensor_inst->enable);
}
@@ -237,8 +236,7 @@ static ssize_t enable_sensor_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
int value;
int ret = -EINVAL;
@@ -283,8 +281,7 @@ static const struct attribute_group enable_sensor_attr_group = {
static ssize_t show_value(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
struct hid_sensor_hub_attribute_info *attribute;
int index, usage, field_index;
char name[HID_CUSTOM_NAME_LENGTH];
@@ -392,8 +389,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
static ssize_t store_value(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
int index, field_index, usage;
char name[HID_CUSTOM_NAME_LENGTH];
int value;
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8141cadfca0e..8dae0f9b819e 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
static int steam_register(struct steam_device *steam)
{
int ret;
+ bool client_opened;
/*
* This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
* Unlikely, but getting the serial could fail, and it is not so
* important, so make up a serial number and go on.
*/
+ mutex_lock(&steam->mutex);
if (steam_get_serial(steam) < 0)
strlcpy(steam->serial_no, "XXXXXXXXXX",
sizeof(steam->serial_no));
+ mutex_unlock(&steam->mutex);
hid_info(steam->hdev, "Steam Controller '%s' connected",
steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
}
mutex_lock(&steam->mutex);
- if (!steam->client_opened) {
+ client_opened = steam->client_opened;
+ if (!client_opened)
steam_set_lizard_mode(steam, lizard_mode);
+ mutex_unlock(&steam->mutex);
+
+ if (!client_opened)
ret = steam_input_register(steam);
- } else {
+ else
ret = 0;
- }
- mutex_unlock(&steam->mutex);
return ret;
}
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
{
struct steam_device *steam = hdev->driver_data;
+ unsigned long flags;
+ bool connected;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ connected = steam->connected;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
mutex_lock(&steam->mutex);
steam->client_opened = false;
+ if (connected)
+ steam_set_lizard_mode(steam, lizard_mode);
mutex_unlock(&steam->mutex);
- if (steam->connected) {
- steam_set_lizard_mode(steam, lizard_mode);
+ if (connected)
steam_input_register(steam);
- }
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c
new file mode 100644
index 000000000000..95e0807878c7
--- /dev/null
+++ b/drivers/hid/hid-u2fzero.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * U2F Zero LED and RNG driver
+ *
+ * Copyright 2018 Andrej Shadura <andrew@shadura.me>
+ * Loosely based on drivers/hid/hid-led.c
+ * and drivers/usb/misc/chaoskey.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/hid.h>
+#include <linux/hidraw.h>
+#include <linux/hw_random.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+#define DRIVER_SHORT "u2fzero"
+
+#define HID_REPORT_SIZE 64
+
+/* We only use broadcast (CID-less) messages */
+#define CID_BROADCAST 0xffffffff
+
+struct u2f_hid_msg {
+ u32 cid;
+ union {
+ struct {
+ u8 cmd;
+ u8 bcnth;
+ u8 bcntl;
+ u8 data[HID_REPORT_SIZE - 7];
+ } init;
+ struct {
+ u8 seq;
+ u8 data[HID_REPORT_SIZE - 5];
+ } cont;
+ };
+} __packed;
+
+struct u2f_hid_report {
+ u8 report_type;
+ struct u2f_hid_msg msg;
+} __packed;
+
+#define U2F_HID_MSG_LEN(f) (size_t)(((f).init.bcnth << 8) + (f).init.bcntl)
+
+/* Custom extensions to the U2FHID protocol */
+#define U2F_CUSTOM_GET_RNG 0x21
+#define U2F_CUSTOM_WINK 0x24
+
+struct u2fzero_device {
+ struct hid_device *hdev;
+ struct urb *urb; /* URB for the RNG data */
+ struct led_classdev ldev; /* Embedded struct for led */
+ struct hwrng hwrng; /* Embedded struct for hwrng */
+ char *led_name;
+ char *rng_name;
+ u8 *buf_out;
+ u8 *buf_in;
+ struct mutex lock;
+ bool present;
+};
+
+static int u2fzero_send(struct u2fzero_device *dev, struct u2f_hid_report *req)
+{
+ int ret;
+
+ mutex_lock(&dev->lock);
+
+ memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report));
+
+ ret = hid_hw_output_report(dev->hdev, dev->buf_out,
+ sizeof(struct u2f_hid_msg));
+
+ mutex_unlock(&dev->lock);
+
+ if (ret < 0)
+ return ret;
+
+ return ret == sizeof(struct u2f_hid_msg) ? 0 : -EMSGSIZE;
+}
+
+struct u2fzero_transfer_context {
+ struct completion done;
+ int status;
+};
+
+static void u2fzero_read_callback(struct urb *urb)
+{
+ struct u2fzero_transfer_context *ctx = urb->context;
+
+ ctx->status = urb->status;
+ complete(&ctx->done);
+}
+
+static int u2fzero_recv(struct u2fzero_device *dev,
+ struct u2f_hid_report *req,
+ struct u2f_hid_msg *resp)
+{
+ int ret;
+ struct hid_device *hdev = dev->hdev;
+ struct u2fzero_transfer_context ctx;
+
+ mutex_lock(&dev->lock);
+
+ memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report));
+
+ dev->urb->context = &ctx;
+ init_completion(&ctx.done);
+
+ ret = usb_submit_urb(dev->urb, GFP_NOIO);
+ if (unlikely(ret)) {
+ hid_err(hdev, "usb_submit_urb failed: %d", ret);
+ goto err;
+ }
+
+ ret = hid_hw_output_report(dev->hdev, dev->buf_out,
+ sizeof(struct u2f_hid_msg));
+
+ if (ret < 0) {
+ hid_err(hdev, "hid_hw_output_report failed: %d", ret);
+ goto err;
+ }
+
+ ret = (wait_for_completion_timeout(
+ &ctx.done, msecs_to_jiffies(USB_CTRL_SET_TIMEOUT)));
+ if (ret < 0) {
+ usb_kill_urb(dev->urb);
+ hid_err(hdev, "urb submission timed out");
+ } else {
+ ret = dev->urb->actual_length;
+ memcpy(resp, dev->buf_in, ret);
+ }
+
+err:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static int u2fzero_blink(struct led_classdev *ldev)
+{
+ struct u2fzero_device *dev = container_of(ldev,
+ struct u2fzero_device, ldev);
+ struct u2f_hid_report req = {
+ .report_type = 0,
+ .msg.cid = CID_BROADCAST,
+ .msg.init = {
+ .cmd = U2F_CUSTOM_WINK,
+ .bcnth = 0,
+ .bcntl = 0,
+ .data = {0},
+ }
+ };
+ return u2fzero_send(dev, &req);
+}
+
+static int u2fzero_brightness_set(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ ldev->brightness = LED_OFF;
+ if (brightness)
+ return u2fzero_blink(ldev);
+ else
+ return 0;
+}
+
+static int u2fzero_rng_read(struct hwrng *rng, void *data,
+ size_t max, bool wait)
+{
+ struct u2fzero_device *dev = container_of(rng,
+ struct u2fzero_device, hwrng);
+ struct u2f_hid_report req = {
+ .report_type = 0,
+ .msg.cid = CID_BROADCAST,
+ .msg.init = {
+ .cmd = U2F_CUSTOM_GET_RNG,
+ .bcnth = 0,
+ .bcntl = 0,
+ .data = {0},
+ }
+ };
+ struct u2f_hid_msg resp;
+ int ret;
+ size_t actual_length;
+
+ if (!dev->present) {
+ hid_dbg(dev->hdev, "device not present");
+ return 0;
+ }
+
+ ret = u2fzero_recv(dev, &req, &resp);
+ if (ret < 0)
+ return 0;
+
+ /* only take the minimum amount of data it is safe to take */
+ actual_length = min3((size_t)ret - offsetof(struct u2f_hid_msg,
+ init.data), U2F_HID_MSG_LEN(resp), max);
+
+ memcpy(data, resp.init.data, actual_length);
+
+ return actual_length;
+}
+
+static int u2fzero_init_led(struct u2fzero_device *dev,
+ unsigned int minor)
+{
+ dev->led_name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
+ "%s%u", DRIVER_SHORT, minor);
+ if (dev->led_name == NULL)
+ return -ENOMEM;
+
+ dev->ldev.name = dev->led_name;
+ dev->ldev.max_brightness = LED_ON;
+ dev->ldev.flags = LED_HW_PLUGGABLE;
+ dev->ldev.brightness_set_blocking = u2fzero_brightness_set;
+
+ return devm_led_classdev_register(&dev->hdev->dev, &dev->ldev);
+}
+
+static int u2fzero_init_hwrng(struct u2fzero_device *dev,
+ unsigned int minor)
+{
+ dev->rng_name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
+ "%s-rng%u", DRIVER_SHORT, minor);
+ if (dev->rng_name == NULL)
+ return -ENOMEM;
+
+ dev->hwrng.name = dev->rng_name;
+ dev->hwrng.read = u2fzero_rng_read;
+ dev->hwrng.quality = 1;
+
+ return devm_hwrng_register(&dev->hdev->dev, &dev->hwrng);
+}
+
+static int u2fzero_fill_in_urb(struct u2fzero_device *dev)
+{
+ struct hid_device *hdev = dev->hdev;
+ struct usb_device *udev;
+ struct usbhid_device *usbhid = hdev->driver_data;
+ unsigned int pipe_in;
+ struct usb_host_endpoint *ep;
+
+ if (dev->hdev->bus != BUS_USB)
+ return -EINVAL;
+
+ udev = hid_to_usb_dev(hdev);
+
+ if (!usbhid->urbout || !usbhid->urbin)
+ return -ENODEV;
+
+ ep = usb_pipe_endpoint(udev, usbhid->urbin->pipe);
+ if (!ep)
+ return -ENODEV;
+
+ dev->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->urb)
+ return -ENOMEM;
+
+ pipe_in = (usbhid->urbin->pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
+
+ usb_fill_int_urb(dev->urb,
+ udev,
+ pipe_in,
+ dev->buf_in,
+ HID_REPORT_SIZE,
+ u2fzero_read_callback,
+ NULL,
+ ep->desc.bInterval);
+
+ return 0;
+}
+
+static int u2fzero_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct u2fzero_device *dev;
+ unsigned int minor;
+ int ret;
+
+ if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+ return -EINVAL;
+
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL)
+ return -ENOMEM;
+
+ dev->buf_out = devm_kmalloc(&hdev->dev,
+ sizeof(struct u2f_hid_report), GFP_KERNEL);
+ if (dev->buf_out == NULL)
+ return -ENOMEM;
+
+ dev->buf_in = devm_kmalloc(&hdev->dev,
+ sizeof(struct u2f_hid_msg), GFP_KERNEL);
+ if (dev->buf_in == NULL)
+ return -ENOMEM;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ dev->hdev = hdev;
+ hid_set_drvdata(hdev, dev);
+ mutex_init(&dev->lock);
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret)
+ return ret;
+
+ u2fzero_fill_in_urb(dev);
+
+ dev->present = true;
+
+ minor = ((struct hidraw *) hdev->hidraw)->minor;
+
+ ret = u2fzero_init_led(dev, minor);
+ if (ret) {
+ hid_hw_stop(hdev);
+ return ret;
+ }
+
+ hid_info(hdev, "U2F Zero LED initialised\n");
+
+ ret = u2fzero_init_hwrng(dev, minor);
+ if (ret) {
+ hid_hw_stop(hdev);
+ return ret;
+ }
+
+ hid_info(hdev, "U2F Zero RNG initialised\n");
+
+ return 0;
+}
+
+static void u2fzero_remove(struct hid_device *hdev)
+{
+ struct u2fzero_device *dev = hid_get_drvdata(hdev);
+
+ mutex_lock(&dev->lock);
+ dev->present = false;
+ mutex_unlock(&dev->lock);
+
+ hid_hw_stop(hdev);
+ usb_poison_urb(dev->urb);
+ usb_free_urb(dev->urb);
+}
+
+static const struct hid_device_id u2fzero_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL,
+ USB_DEVICE_ID_U2F_ZERO) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, u2fzero_table);
+
+static struct hid_driver u2fzero_driver = {
+ .name = "hid-" DRIVER_SHORT,
+ .probe = u2fzero_probe,
+ .remove = u2fzero_remove,
+ .id_table = u2fzero_table,
+};
+
+module_hid_driver(u2fzero_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrej Shadura <andrew@shadura.me>");
+MODULE_DESCRIPTION("U2F Zero LED and RNG driver");
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 7710d9f957da..0187c9f8fc22 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
goto cleanup;
}
rc = usb_string(udev, 201, ver_ptr, ver_len);
- if (ver_ptr == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
if (rc == -EPIPE) {
*ver_ptr = '\0';
} else if (rc < 0) {
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
index 519e4c8b53c4..786adbc97ac5 100644
--- a/drivers/hid/intel-ish-hid/Kconfig
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -14,4 +14,19 @@ config INTEL_ISH_HID
Broxton and Kaby Lake.
Say Y here if you want to support Intel ISH. If unsure, say N.
+
+config INTEL_ISH_FIRMWARE_DOWNLOADER
+ tristate "Host Firmware Load feature for Intel ISH"
+ depends on INTEL_ISH_HID
+ depends on X86
+ help
+ The Integrated Sensor Hub (ISH) enables the kernel to offload
+ sensor polling and algorithm processing to a dedicated low power
+ processor in the chipset.
+
+ The Host Firmware Load feature adds support to load the ISH
+ firmware from host file system at boot.
+
+ Say M here if you want to support Host Firmware Loading feature
+ for Intel ISH. If unsure, say N.
endmenu
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
index 825b70af672f..2de97e4b7740 100644
--- a/drivers/hid/intel-ish-hid/Makefile
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -20,4 +20,7 @@ obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp-hid.o
intel-ishtp-hid-objs := ishtp-hid.o
intel-ishtp-hid-objs += ishtp-hid-client.o
+obj-$(CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ishtp-loader.o
+intel-ishtp-loader-objs += ishtp-fw-loader.o
+
ccflags-y += -Idrivers/hid/intel-ish-hid/ishtp
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 08a8327dfd22..523c0cbd44a4 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -31,6 +31,7 @@
#define CNL_H_DEVICE_ID 0xA37C
#define ICL_MOBILE_DEVICE_ID 0x34FC
#define SPT_H_DEVICE_ID 0xA135
+#define CML_LP_DEVICE_ID 0x02FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index a6e1ee744f4d..ac0a179daf23 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -40,6 +40,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
new file mode 100644
index 000000000000..22ba21457035
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -0,0 +1,1085 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISH-TP client driver for ISH firmware loading
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/intel-ish-client-if.h>
+#include <linux/property.h>
+#include <asm/cacheflush.h>
+
+/* Number of times we attempt to load the firmware before giving up */
+#define MAX_LOAD_ATTEMPTS 3
+
+/* ISH TX/RX ring buffer pool size */
+#define LOADER_CL_RX_RING_SIZE 1
+#define LOADER_CL_TX_RING_SIZE 1
+
+/*
+ * ISH Shim firmware loader reserves 4 Kb buffer in SRAM. The buffer is
+ * used to temporarily hold the data transferred from host to Shim
+ * firmware loader. Reason for the odd size of 3968 bytes? Each IPC
+ * transfer is 128 bytes (= 4 bytes header + 124 bytes payload). So the
+ * 4 Kb buffer can hold maximum of 32 IPC transfers, which means we can
+ * have a max payload of 3968 bytes (= 32 x 124 payload).
+ */
+#define LOADER_SHIM_IPC_BUF_SIZE 3968
+
+/**
+ * enum ish_loader_commands - ISH loader host commands.
+ * LOADER_CMD_XFER_QUERY Query the Shim firmware loader for
+ * capabilities
+ * LOADER_CMD_XFER_FRAGMENT Transfer one firmware image fragment at a
+ * time. The command may be executed
+ * multiple times until the entire firmware
+ * image is downloaded to SRAM.
+ * LOADER_CMD_START Start executing the main firmware.
+ */
+enum ish_loader_commands {
+ LOADER_CMD_XFER_QUERY = 0,
+ LOADER_CMD_XFER_FRAGMENT,
+ LOADER_CMD_START,
+};
+
+/* Command bit mask */
+#define CMD_MASK GENMASK(6, 0)
+#define IS_RESPONSE BIT(7)
+
+/*
+ * ISH firmware max delay for one transmit failure is 1 Hz,
+ * and firmware will retry 2 times, so 3 Hz is used for timeout.
+ */
+#define ISHTP_SEND_TIMEOUT (3 * HZ)
+
+/*
+ * Loader transfer modes:
+ *
+ * LOADER_XFER_MODE_ISHTP mode uses the existing ISH-TP mechanism to
+ * transfer data. This may use IPC or DMA if supported in firmware.
+ * The buffer size is limited to 4 Kb by the IPC/ISH-TP protocol for
+ * both IPC & DMA (legacy).
+ *
+ * LOADER_XFER_MODE_DIRECT_DMA - firmware loading is a bit different
+ * from the sensor data streaming. Here we download a large (300+ Kb)
+ * image directly to ISH SRAM memory. There is limited benefit of
+ * DMA'ing 300 Kb image in 4 Kb chucks limit. Hence, we introduce
+ * this "direct dma" mode, where we do not use ISH-TP for DMA, but
+ * instead manage the DMA directly in kernel driver and Shim firmware
+ * loader (allocate buffer, break in chucks and transfer). This allows
+ * to overcome 4 Kb limit, and optimize the data flow path in firmware.
+ */
+#define LOADER_XFER_MODE_DIRECT_DMA BIT(0)
+#define LOADER_XFER_MODE_ISHTP BIT(1)
+
+/* ISH Transport Loader client unique GUID */
+static const guid_t loader_ishtp_guid =
+ GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
+ 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc);
+
+#define FILENAME_SIZE 256
+
+/*
+ * The firmware loading latency will be minimum if we can DMA the
+ * entire ISH firmware image in one go. This requires that we allocate
+ * a large DMA buffer in kernel, which could be problematic on some
+ * platforms. So here we limit the DMA buffer size via a module_param.
+ * We default to 4 pages, but a customer can set it to higher limit if
+ * deemed appropriate for his platform.
+ */
+static int dma_buf_size_limit = 4 * PAGE_SIZE;
+
+/**
+ * struct loader_msg_hdr - Header for ISH Loader commands.
+ * @command: LOADER_CMD* commands. Bit 7 is the response.
+ * @status: Command response status. Non 0, is error
+ * condition.
+ *
+ * This structure is used as header for every command/data sent/received
+ * between Host driver and ISH Shim firmware loader.
+ */
+struct loader_msg_hdr {
+ u8 command;
+ u8 reserved[2];
+ u8 status;
+} __packed;
+
+struct loader_xfer_query {
+ struct loader_msg_hdr hdr;
+ u32 image_size;
+} __packed;
+
+struct ish_fw_version {
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+} __packed;
+
+union loader_version {
+ u32 value;
+ struct {
+ u8 major;
+ u8 minor;
+ u8 hotfix;
+ u8 build;
+ };
+} __packed;
+
+struct loader_capability {
+ u32 max_fw_image_size;
+ u32 xfer_mode;
+ u32 max_dma_buf_size; /* only for dma mode, multiples of cacheline */
+} __packed;
+
+struct shim_fw_info {
+ struct ish_fw_version ish_fw_version;
+ u32 protocol_version;
+ union loader_version ldr_version;
+ struct loader_capability ldr_capability;
+} __packed;
+
+struct loader_xfer_query_response {
+ struct loader_msg_hdr hdr;
+ struct shim_fw_info fw_info;
+} __packed;
+
+struct loader_xfer_fragment {
+ struct loader_msg_hdr hdr;
+ u32 xfer_mode;
+ u32 offset;
+ u32 size;
+ u32 is_last;
+} __packed;
+
+struct loader_xfer_ipc_fragment {
+ struct loader_xfer_fragment fragment;
+ u8 data[] ____cacheline_aligned; /* variable length payload here */
+} __packed;
+
+struct loader_xfer_dma_fragment {
+ struct loader_xfer_fragment fragment;
+ u64 ddr_phys_addr;
+} __packed;
+
+struct loader_start {
+ struct loader_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct response_info - Encapsulate firmware response related
+ * information for passing between function
+ * loader_cl_send() and process_recv() callback.
+ * @data Copy the data received from firmware here.
+ * @max_size Max size allocated for the @data buffer. If the
+ * received data exceeds this value, we log an
+ * error.
+ * @size Actual size of data received from firmware.
+ * @error Returns 0 for success, negative error code for a
+ * failure in function process_recv().
+ * @received Set to true on receiving a valid firmware
+ * response to host command
+ * @wait_queue Wait queue for Host firmware loading where the
+ * client sends message to ISH firmware and waits
+ * for response
+ */
+struct response_info {
+ void *data;
+ size_t max_size;
+ size_t size;
+ int error;
+ bool received;
+ wait_queue_head_t wait_queue;
+};
+
+/**
+ * struct ishtp_cl_data - Encapsulate per ISH-TP Client Data.
+ * @work_ishtp_reset: Work queue for reset handling.
+ * @work_fw_load: Work queue for host firmware loading.
+ * @flag_retry Flag for indicating host firmware loading should
+ * be retried.
+ * @retry_count Count the number of retries.
+ *
+ * This structure is used to store data per client.
+ */
+struct ishtp_cl_data {
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+
+ /*
+ * Used for passing firmware response information between
+ * loader_cl_send() and process_recv() callback.
+ */
+ struct response_info response;
+
+ struct work_struct work_ishtp_reset;
+ struct work_struct work_fw_load;
+
+ /*
+ * In certain failure scenrios, it makes sense to reset the ISH
+ * subsystem and retry Host firmware loading (e.g. bad message
+ * packet, ENOMEM, etc.). On the other hand, failures due to
+ * protocol mismatch, etc., are not recoverable. We do not
+ * retry them.
+ *
+ * If set, the flag indicates that we should re-try the
+ * particular failure.
+ */
+ bool flag_retry;
+ int retry_count;
+};
+
+#define IPC_FRAGMENT_DATA_PREAMBLE \
+ offsetof(struct loader_xfer_ipc_fragment, data)
+
+#define cl_data_to_dev(client_data) ishtp_device((client_data)->cl_device)
+
+/**
+ * get_firmware_variant() - Gets the filename of firmware image to be
+ * loaded based on platform variant.
+ * @client_data Client data instance.
+ * @filename Returns firmware filename.
+ *
+ * Queries the firmware-name device property string.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int get_firmware_variant(struct ishtp_cl_data *client_data,
+ char *filename)
+{
+ int rv;
+ const char *val;
+ struct device *devc = ishtp_get_pci_device(client_data->cl_device);
+
+ rv = device_property_read_string(devc, "firmware-name", &val);
+ if (rv < 0) {
+ dev_err(devc,
+ "Error: ISH firmware-name device property required\n");
+ return rv;
+ }
+ return snprintf(filename, FILENAME_SIZE, "intel/%s", val);
+}
+
+/**
+ * loader_cl_send() Send message from host to firmware
+ * @client_data: Client data instance
+ * @out_msg Message buffer to be sent to firmware
+ * @out_size Size of out going message
+ * @in_msg Message buffer where the incoming data copied.
+ * This buffer is allocated by calling
+ * @in_size Max size of incoming message
+ *
+ * Return: Number of bytes copied in the in_msg on success, negative
+ * error code on failure.
+ */
+static int loader_cl_send(struct ishtp_cl_data *client_data,
+ u8 *out_msg, size_t out_size,
+ u8 *in_msg, size_t in_size)
+{
+ int rv;
+ struct loader_msg_hdr *out_hdr = (struct loader_msg_hdr *)out_msg;
+ struct ishtp_cl *loader_ishtp_cl = client_data->loader_ishtp_cl;
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "%s: command=%02lx is_response=%u status=%02x\n",
+ __func__,
+ out_hdr->command & CMD_MASK,
+ out_hdr->command & IS_RESPONSE ? 1 : 0,
+ out_hdr->status);
+
+ /* Setup in coming buffer & size */
+ client_data->response.data = in_msg;
+ client_data->response.max_size = in_size;
+ client_data->response.error = 0;
+ client_data->response.received = false;
+
+ rv = ishtp_cl_send(loader_ishtp_cl, out_msg, out_size);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data),
+ "ishtp_cl_send error %d\n", rv);
+ return rv;
+ }
+
+ wait_event_interruptible_timeout(client_data->response.wait_queue,
+ client_data->response.received,
+ ISHTP_SEND_TIMEOUT);
+ if (!client_data->response.received) {
+ dev_err(cl_data_to_dev(client_data),
+ "Timed out for response to command=%02lx",
+ out_hdr->command & CMD_MASK);
+ return -ETIMEDOUT;
+ }
+
+ if (client_data->response.error < 0)
+ return client_data->response.error;
+
+ return client_data->response.size;
+}
+
+/**
+ * process_recv() - Receive and parse incoming packet
+ * @loader_ishtp_cl: Client instance to get stats
+ * @rb_in_proc: ISH received message buffer
+ *
+ * Parse the incoming packet. If it is a response packet then it will
+ * update received and wake up the caller waiting to for the response.
+ */
+static void process_recv(struct ishtp_cl *loader_ishtp_cl,
+ struct ishtp_cl_rb *rb_in_proc)
+{
+ struct loader_msg_hdr *hdr;
+ size_t data_len = rb_in_proc->buf_idx;
+ struct ishtp_cl_data *client_data =
+ ishtp_get_client_data(loader_ishtp_cl);
+
+ /* Sanity check */
+ if (!client_data->response.data) {
+ dev_err(cl_data_to_dev(client_data),
+ "Receiving buffer is null. Should be allocated by calling function\n");
+ client_data->response.error = -EINVAL;
+ goto end;
+ }
+
+ if (client_data->response.received) {
+ dev_err(cl_data_to_dev(client_data),
+ "Previous firmware message not yet processed\n");
+ client_data->response.error = -EINVAL;
+ goto end;
+ }
+ /*
+ * All firmware messages have a header. Check buffer size
+ * before accessing elements inside.
+ */
+ if (!rb_in_proc->buffer.data) {
+ dev_warn(cl_data_to_dev(client_data),
+ "rb_in_proc->buffer.data returned null");
+ client_data->response.error = -EBADMSG;
+ goto end;
+ }
+
+ if (data_len < sizeof(struct loader_msg_hdr)) {
+ dev_err(cl_data_to_dev(client_data),
+ "data size %zu is less than header %zu\n",
+ data_len, sizeof(struct loader_msg_hdr));
+ client_data->response.error = -EMSGSIZE;
+ goto end;
+ }
+
+ hdr = (struct loader_msg_hdr *)rb_in_proc->buffer.data;
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "%s: command=%02lx is_response=%u status=%02x\n",
+ __func__,
+ hdr->command & CMD_MASK,
+ hdr->command & IS_RESPONSE ? 1 : 0,
+ hdr->status);
+
+ if (((hdr->command & CMD_MASK) != LOADER_CMD_XFER_QUERY) &&
+ ((hdr->command & CMD_MASK) != LOADER_CMD_XFER_FRAGMENT) &&
+ ((hdr->command & CMD_MASK) != LOADER_CMD_START)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Invalid command=%02lx\n",
+ hdr->command & CMD_MASK);
+ client_data->response.error = -EPROTO;
+ goto end;
+ }
+
+ if (data_len > client_data->response.max_size) {
+ dev_err(cl_data_to_dev(client_data),
+ "Received buffer size %zu is larger than allocated buffer %zu\n",
+ data_len, client_data->response.max_size);
+ client_data->response.error = -EMSGSIZE;
+ goto end;
+ }
+
+ /* We expect only "response" messages from firmware */
+ if (!(hdr->command & IS_RESPONSE)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Invalid response to command\n");
+ client_data->response.error = -EIO;
+ goto end;
+ }
+
+ if (hdr->status) {
+ dev_err(cl_data_to_dev(client_data),
+ "Loader returned status %d\n",
+ hdr->status);
+ client_data->response.error = -EIO;
+ goto end;
+ }
+
+ /* Update the actual received buffer size */
+ client_data->response.size = data_len;
+
+ /*
+ * Copy the buffer received in firmware response for the
+ * calling thread.
+ */
+ memcpy(client_data->response.data,
+ rb_in_proc->buffer.data, data_len);
+
+ /* Set flag before waking up the caller */
+ client_data->response.received = true;
+
+end:
+ /* Free the buffer */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
+
+ /* Wake the calling thread */
+ wake_up_interruptible(&client_data->response.wait_queue);
+}
+
+/**
+ * loader_cl_event_cb() - bus driver callback for incoming message
+ * @device: Pointer to the ishtp client device for which this
+ * message is targeted
+ *
+ * Remove the packet from the list and process the message by calling
+ * process_recv
+ */
+static void loader_cl_event_cb(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_rb *rb_in_proc;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ while ((rb_in_proc = ishtp_cl_rx_get_rb(loader_ishtp_cl)) != NULL) {
+ /* Process the data packet from firmware */
+ process_recv(loader_ishtp_cl, rb_in_proc);
+ }
+}
+
+/**
+ * ish_query_loader_prop() - Query ISH Shim firmware loader
+ * @client_data: Client data instance
+ * @fw: Poiner to firmware data struct in host memory
+ * @fw_info: Loader firmware properties
+ *
+ * This function queries the ISH Shim firmware loader for capabilities.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
+ const struct firmware *fw,
+ struct shim_fw_info *fw_info)
+{
+ int rv;
+ struct loader_xfer_query ldr_xfer_query;
+ struct loader_xfer_query_response ldr_xfer_query_resp;
+
+ memset(&ldr_xfer_query, 0, sizeof(ldr_xfer_query));
+ ldr_xfer_query.hdr.command = LOADER_CMD_XFER_QUERY;
+ ldr_xfer_query.image_size = fw->size;
+ rv = loader_cl_send(client_data,
+ (u8 *)&ldr_xfer_query,
+ sizeof(ldr_xfer_query),
+ (u8 *)&ldr_xfer_query_resp,
+ sizeof(ldr_xfer_query_resp));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ return rv;
+ }
+
+ /* On success, the return value is the received buffer size */
+ if (rv != sizeof(struct loader_xfer_query_response)) {
+ dev_err(cl_data_to_dev(client_data),
+ "data size %d is not equal to size of loader_xfer_query_response %zu\n",
+ rv, sizeof(struct loader_xfer_query_response));
+ client_data->flag_retry = true;
+ return -EMSGSIZE;
+ }
+
+ /* Save fw_info for use outside this function */
+ *fw_info = ldr_xfer_query_resp.fw_info;
+
+ /* Loader firmware properties */
+ dev_dbg(cl_data_to_dev(client_data),
+ "ish_fw_version: major=%d minor=%d hotfix=%d build=%d protocol_version=0x%x loader_version=%d\n",
+ fw_info->ish_fw_version.major,
+ fw_info->ish_fw_version.minor,
+ fw_info->ish_fw_version.hotfix,
+ fw_info->ish_fw_version.build,
+ fw_info->protocol_version,
+ fw_info->ldr_version.value);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "loader_capability: max_fw_image_size=0x%x xfer_mode=%d max_dma_buf_size=0x%x dma_buf_size_limit=0x%x\n",
+ fw_info->ldr_capability.max_fw_image_size,
+ fw_info->ldr_capability.xfer_mode,
+ fw_info->ldr_capability.max_dma_buf_size,
+ dma_buf_size_limit);
+
+ /* Sanity checks */
+ if (fw_info->ldr_capability.max_fw_image_size < fw->size) {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH firmware size %zu is greater than Shim firmware loader max supported %d\n",
+ fw->size,
+ fw_info->ldr_capability.max_fw_image_size);
+ return -ENOSPC;
+ }
+
+ /* For DMA the buffer size should be multiple of cacheline size */
+ if ((fw_info->ldr_capability.xfer_mode & LOADER_XFER_MODE_DIRECT_DMA) &&
+ (fw_info->ldr_capability.max_dma_buf_size % L1_CACHE_BYTES)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Shim firmware loader buffer size %d should be multiple of cacheline\n",
+ fw_info->ldr_capability.max_dma_buf_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_fw_xfer_ishtp() Loads ISH firmware using ishtp interface
+ * @client_data: Client data instance
+ * @fw: Pointer to firmware data struct in host memory
+ *
+ * This function uses ISH-TP to transfer ISH firmware from host to
+ * ISH SRAM. Lower layers may use IPC or DMA depending on firmware
+ * support.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_xfer_ishtp(struct ishtp_cl_data *client_data,
+ const struct firmware *fw)
+{
+ int rv;
+ u32 fragment_offset, fragment_size, payload_max_size;
+ struct loader_xfer_ipc_fragment *ldr_xfer_ipc_frag;
+ struct loader_msg_hdr ldr_xfer_ipc_ack;
+
+ payload_max_size =
+ LOADER_SHIM_IPC_BUF_SIZE - IPC_FRAGMENT_DATA_PREAMBLE;
+
+ ldr_xfer_ipc_frag = kzalloc(LOADER_SHIM_IPC_BUF_SIZE, GFP_KERNEL);
+ if (!ldr_xfer_ipc_frag) {
+ client_data->flag_retry = true;
+ return -ENOMEM;
+ }
+
+ ldr_xfer_ipc_frag->fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
+ ldr_xfer_ipc_frag->fragment.xfer_mode = LOADER_XFER_MODE_ISHTP;
+
+ /* Break the firmware image into fragments and send as ISH-TP payload */
+ fragment_offset = 0;
+ while (fragment_offset < fw->size) {
+ if (fragment_offset + payload_max_size < fw->size) {
+ fragment_size = payload_max_size;
+ ldr_xfer_ipc_frag->fragment.is_last = 0;
+ } else {
+ fragment_size = fw->size - fragment_offset;
+ ldr_xfer_ipc_frag->fragment.is_last = 1;
+ }
+
+ ldr_xfer_ipc_frag->fragment.offset = fragment_offset;
+ ldr_xfer_ipc_frag->fragment.size = fragment_size;
+ memcpy(ldr_xfer_ipc_frag->data,
+ &fw->data[fragment_offset],
+ fragment_size);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "xfer_mode=ipc offset=0x%08x size=0x%08x is_last=%d\n",
+ ldr_xfer_ipc_frag->fragment.offset,
+ ldr_xfer_ipc_frag->fragment.size,
+ ldr_xfer_ipc_frag->fragment.is_last);
+
+ rv = loader_cl_send(client_data,
+ (u8 *)ldr_xfer_ipc_frag,
+ IPC_FRAGMENT_DATA_PREAMBLE + fragment_size,
+ (u8 *)&ldr_xfer_ipc_ack,
+ sizeof(ldr_xfer_ipc_ack));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ goto end_err_resp_buf_release;
+ }
+
+ fragment_offset += fragment_size;
+ }
+
+ kfree(ldr_xfer_ipc_frag);
+ return 0;
+
+end_err_resp_buf_release:
+ /* Free ISH buffer if not done already, in error case */
+ kfree(ldr_xfer_ipc_frag);
+ return rv;
+}
+
+/**
+ * ish_fw_xfer_direct_dma() - Loads ISH firmware using direct dma
+ * @client_data: Client data instance
+ * @fw: Pointer to firmware data struct in host memory
+ * @fw_info: Loader firmware properties
+ *
+ * Host firmware load is a unique case where we need to download
+ * a large firmware image (200+ Kb). This function implements
+ * direct DMA transfer in kernel and ISH firmware. This allows
+ * us to overcome the ISH-TP 4 Kb limit, and allows us to DMA
+ * directly to ISH UMA at location of choice.
+ * Function depends on corresponding support in ISH firmware.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
+ const struct firmware *fw,
+ const struct shim_fw_info fw_info)
+{
+ int rv;
+ void *dma_buf;
+ dma_addr_t dma_buf_phy;
+ u32 fragment_offset, fragment_size, payload_max_size;
+ struct loader_msg_hdr ldr_xfer_dma_frag_ack;
+ struct loader_xfer_dma_fragment ldr_xfer_dma_frag;
+ struct device *devc = ishtp_get_pci_device(client_data->cl_device);
+ u32 shim_fw_buf_size =
+ fw_info.ldr_capability.max_dma_buf_size;
+
+ /*
+ * payload_max_size should be set to minimum of
+ * (1) Size of firmware to be loaded,
+ * (2) Max DMA buffer size supported by Shim firmware,
+ * (3) DMA buffer size limit set by boot_param dma_buf_size_limit.
+ */
+ payload_max_size = min3(fw->size,
+ (size_t)shim_fw_buf_size,
+ (size_t)dma_buf_size_limit);
+
+ /*
+ * Buffer size should be multiple of cacheline size
+ * if it's not, select the previous cacheline boundary.
+ */
+ payload_max_size &= ~(L1_CACHE_BYTES - 1);
+
+ dma_buf = kmalloc(payload_max_size, GFP_KERNEL | GFP_DMA32);
+ if (!dma_buf) {
+ client_data->flag_retry = true;
+ return -ENOMEM;
+ }
+
+ dma_buf_phy = dma_map_single(devc, dma_buf, payload_max_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(devc, dma_buf_phy)) {
+ dev_err(cl_data_to_dev(client_data), "DMA map failed\n");
+ client_data->flag_retry = true;
+ rv = -ENOMEM;
+ goto end_err_dma_buf_release;
+ }
+
+ ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
+ ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA;
+ ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy;
+
+ /* Send the firmware image in chucks of payload_max_size */
+ fragment_offset = 0;
+ while (fragment_offset < fw->size) {
+ if (fragment_offset + payload_max_size < fw->size) {
+ fragment_size = payload_max_size;
+ ldr_xfer_dma_frag.fragment.is_last = 0;
+ } else {
+ fragment_size = fw->size - fragment_offset;
+ ldr_xfer_dma_frag.fragment.is_last = 1;
+ }
+
+ ldr_xfer_dma_frag.fragment.offset = fragment_offset;
+ ldr_xfer_dma_frag.fragment.size = fragment_size;
+ memcpy(dma_buf, &fw->data[fragment_offset], fragment_size);
+
+ dma_sync_single_for_device(devc, dma_buf_phy,
+ payload_max_size,
+ DMA_TO_DEVICE);
+
+ /*
+ * Flush cache here because the dma_sync_single_for_device()
+ * does not do for x86.
+ */
+ clflush_cache_range(dma_buf, payload_max_size);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "xfer_mode=dma offset=0x%08x size=0x%x is_last=%d ddr_phys_addr=0x%016llx\n",
+ ldr_xfer_dma_frag.fragment.offset,
+ ldr_xfer_dma_frag.fragment.size,
+ ldr_xfer_dma_frag.fragment.is_last,
+ ldr_xfer_dma_frag.ddr_phys_addr);
+
+ rv = loader_cl_send(client_data,
+ (u8 *)&ldr_xfer_dma_frag,
+ sizeof(ldr_xfer_dma_frag),
+ (u8 *)&ldr_xfer_dma_frag_ack,
+ sizeof(ldr_xfer_dma_frag_ack));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ goto end_err_resp_buf_release;
+ }
+
+ fragment_offset += fragment_size;
+ }
+
+ dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
+ kfree(dma_buf);
+ return 0;
+
+end_err_resp_buf_release:
+ /* Free ISH buffer if not done already, in error case */
+ dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
+end_err_dma_buf_release:
+ kfree(dma_buf);
+ return rv;
+}
+
+/**
+ * ish_fw_start() Start executing ISH main firmware
+ * @client_data: client data instance
+ *
+ * This function sends message to Shim firmware loader to start
+ * the execution of ISH main firmware.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_start(struct ishtp_cl_data *client_data)
+{
+ struct loader_start ldr_start;
+ struct loader_msg_hdr ldr_start_ack;
+
+ memset(&ldr_start, 0, sizeof(ldr_start));
+ ldr_start.hdr.command = LOADER_CMD_START;
+ return loader_cl_send(client_data,
+ (u8 *)&ldr_start,
+ sizeof(ldr_start),
+ (u8 *)&ldr_start_ack,
+ sizeof(ldr_start_ack));
+}
+
+/**
+ * load_fw_from_host() Loads ISH firmware from host
+ * @client_data: Client data instance
+ *
+ * This function loads the ISH firmware to ISH SRAM and starts execution
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int load_fw_from_host(struct ishtp_cl_data *client_data)
+{
+ int rv;
+ u32 xfer_mode;
+ char *filename;
+ const struct firmware *fw;
+ struct shim_fw_info fw_info;
+ struct ishtp_cl *loader_ishtp_cl = client_data->loader_ishtp_cl;
+
+ client_data->flag_retry = false;
+
+ filename = kzalloc(FILENAME_SIZE, GFP_KERNEL);
+ if (!filename) {
+ client_data->flag_retry = true;
+ rv = -ENOMEM;
+ goto end_error;
+ }
+
+ /* Get filename of the ISH firmware to be loaded */
+ rv = get_firmware_variant(client_data, filename);
+ if (rv < 0)
+ goto end_err_filename_buf_release;
+
+ rv = request_firmware(&fw, filename, cl_data_to_dev(client_data));
+ if (rv < 0)
+ goto end_err_filename_buf_release;
+
+ /* Step 1: Query Shim firmware loader properties */
+
+ rv = ish_query_loader_prop(client_data, fw, &fw_info);
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ /* Step 2: Send the main firmware image to be loaded, to ISH SRAM */
+
+ xfer_mode = fw_info.ldr_capability.xfer_mode;
+ if (xfer_mode & LOADER_XFER_MODE_DIRECT_DMA) {
+ rv = ish_fw_xfer_direct_dma(client_data, fw, fw_info);
+ } else if (xfer_mode & LOADER_XFER_MODE_ISHTP) {
+ rv = ish_fw_xfer_ishtp(client_data, fw);
+ } else {
+ dev_err(cl_data_to_dev(client_data),
+ "No transfer mode selected in firmware\n");
+ rv = -EINVAL;
+ }
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ /* Step 3: Start ISH main firmware exeuction */
+
+ rv = ish_fw_start(client_data);
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ release_firmware(fw);
+ kfree(filename);
+ dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n",
+ filename);
+ return 0;
+
+end_err_fw_release:
+ release_firmware(fw);
+end_err_filename_buf_release:
+ kfree(filename);
+end_error:
+ /* Keep a count of retries, and give up after 3 attempts */
+ if (client_data->flag_retry &&
+ client_data->retry_count++ < MAX_LOAD_ATTEMPTS) {
+ dev_warn(cl_data_to_dev(client_data),
+ "ISH host firmware load failed %d. Resetting ISH, and trying again..\n",
+ rv);
+ ish_hw_reset(ishtp_get_ishtp_device(loader_ishtp_cl));
+ } else {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH host firmware load failed %d\n", rv);
+ }
+ return rv;
+}
+
+static void load_fw_from_host_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data;
+
+ client_data = container_of(work, struct ishtp_cl_data,
+ work_fw_load);
+ load_fw_from_host(client_data);
+}
+
+/**
+ * loader_init() - Init function for ISH-TP client
+ * @loader_ishtp_cl: ISH-TP client instance
+ * @reset: true if called for init after reset
+ *
+ * Return: 0 for success, negative error code for failure
+ */
+static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset)
+{
+ int rv;
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_cl_data *client_data =
+ ishtp_get_client_data(loader_ishtp_cl);
+
+ dev_dbg(cl_data_to_dev(client_data), "reset flag: %d\n", reset);
+
+ rv = ishtp_cl_link(loader_ishtp_cl);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data), "ishtp_cl_link failed\n");
+ return rv;
+ }
+
+ /* Connect to firmware client */
+ ishtp_set_tx_ring_size(loader_ishtp_cl, LOADER_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(loader_ishtp_cl, LOADER_CL_RX_RING_SIZE);
+
+ fw_client =
+ ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl),
+ &loader_ishtp_guid);
+ if (!fw_client) {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH client uuid not found\n");
+ rv = -ENOENT;
+ goto err_cl_unlink;
+ }
+
+ ishtp_cl_set_fw_client_id(loader_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+ ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_CONNECTING);
+
+ rv = ishtp_cl_connect(loader_ishtp_cl);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data), "Client connect fail\n");
+ goto err_cl_unlink;
+ }
+
+ dev_dbg(cl_data_to_dev(client_data), "Client connected\n");
+
+ ishtp_register_event_cb(client_data->cl_device, loader_cl_event_cb);
+
+ return 0;
+
+err_cl_unlink:
+ ishtp_cl_unlink(loader_ishtp_cl);
+ return rv;
+}
+
+static void loader_deinit(struct ishtp_cl *loader_ishtp_cl)
+{
+ ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(loader_ishtp_cl);
+ ishtp_cl_unlink(loader_ishtp_cl);
+ ishtp_cl_flush_queues(loader_ishtp_cl);
+
+ /* Disband and free all Tx and Rx client-level rings */
+ ishtp_cl_free(loader_ishtp_cl);
+}
+
+static void reset_handler(struct work_struct *work)
+{
+ int rv;
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+
+ client_data = container_of(work, struct ishtp_cl_data,
+ work_ishtp_reset);
+
+ loader_ishtp_cl = client_data->loader_ishtp_cl;
+ cl_device = client_data->cl_device;
+
+ /* Unlink, flush queues & start again */
+ ishtp_cl_unlink(loader_ishtp_cl);
+ ishtp_cl_flush_queues(loader_ishtp_cl);
+ ishtp_cl_free(loader_ishtp_cl);
+
+ loader_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!loader_ishtp_cl)
+ return;
+
+ ishtp_set_drvdata(cl_device, loader_ishtp_cl);
+ ishtp_set_client_data(loader_ishtp_cl, client_data);
+ client_data->loader_ishtp_cl = loader_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ rv = loader_init(loader_ishtp_cl, 1);
+ if (rv < 0) {
+ dev_err(ishtp_device(cl_device), "Reset Failed\n");
+ return;
+ }
+
+ /* ISH firmware loading from host */
+ load_fw_from_host(client_data);
+}
+
+/**
+ * loader_ishtp_cl_probe() - ISH-TP client driver probe
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device create on ISH-TP bus
+ *
+ * Return: 0 for success, negative error code for failure
+ */
+static int loader_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_data *client_data;
+ int rv;
+
+ client_data = devm_kzalloc(ishtp_device(cl_device),
+ sizeof(*client_data),
+ GFP_KERNEL);
+ if (!client_data)
+ return -ENOMEM;
+
+ loader_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!loader_ishtp_cl)
+ return -ENOMEM;
+
+ ishtp_set_drvdata(cl_device, loader_ishtp_cl);
+ ishtp_set_client_data(loader_ishtp_cl, client_data);
+ client_data->loader_ishtp_cl = loader_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ init_waitqueue_head(&client_data->response.wait_queue);
+
+ INIT_WORK(&client_data->work_ishtp_reset,
+ reset_handler);
+ INIT_WORK(&client_data->work_fw_load,
+ load_fw_from_host_handler);
+
+ rv = loader_init(loader_ishtp_cl, 0);
+ if (rv < 0) {
+ ishtp_cl_free(loader_ishtp_cl);
+ return rv;
+ }
+ ishtp_get_device(cl_device);
+
+ client_data->retry_count = 0;
+
+ /* ISH firmware loading from host */
+ schedule_work(&client_data->work_fw_load);
+
+ return 0;
+}
+
+/**
+ * loader_ishtp_cl_remove() - ISH-TP client driver remove
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device remove on ISH-TP bus
+ *
+ * Return: 0
+ */
+static int loader_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ client_data = ishtp_get_client_data(loader_ishtp_cl);
+
+ /*
+ * The sequence of the following two cancel_work_sync() is
+ * important. The work_fw_load can in turn schedue
+ * work_ishtp_reset, so first cancel work_fw_load then
+ * cancel work_ishtp_reset.
+ */
+ cancel_work_sync(&client_data->work_fw_load);
+ cancel_work_sync(&client_data->work_ishtp_reset);
+ loader_deinit(loader_ishtp_cl);
+ ishtp_put_device(cl_device);
+
+ return 0;
+}
+
+/**
+ * loader_ishtp_cl_reset() - ISH-TP client driver reset
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device reset on ISH-TP bus
+ *
+ * Return: 0
+ */
+static int loader_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ client_data = ishtp_get_client_data(loader_ishtp_cl);
+
+ schedule_work(&client_data->work_ishtp_reset);
+
+ return 0;
+}
+
+static struct ishtp_cl_driver loader_ishtp_cl_driver = {
+ .name = "ish-loader",
+ .guid = &loader_ishtp_guid,
+ .probe = loader_ishtp_cl_probe,
+ .remove = loader_ishtp_cl_remove,
+ .reset = loader_ishtp_cl_reset,
+};
+
+static int __init ish_loader_init(void)
+{
+ return ishtp_cl_driver_register(&loader_ishtp_cl_driver, THIS_MODULE);
+}
+
+static void __exit ish_loader_exit(void)
+{
+ ishtp_cl_driver_unregister(&loader_ishtp_cl_driver);
+}
+
+late_initcall(ish_loader_init);
+module_exit(ish_loader_exit);
+
+module_param(dma_buf_size_limit, int, 0644);
+MODULE_PARM_DESC(dma_buf_size_limit, "Limit the DMA buf size to this value in bytes");
+
+MODULE_DESCRIPTION("ISH ISH-TP Host firmware Loader Client Driver");
+MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ishtp:*");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 30fe0c5e6fad..56777a43e69c 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -15,15 +15,16 @@
#include <linux/module.h>
#include <linux/hid.h>
+#include <linux/intel-ish-client-if.h>
#include <linux/sched.h>
-#include "ishtp/ishtp-dev.h"
-#include "ishtp/client.h"
#include "ishtp-hid.h"
/* Rx ring buffer pool size */
#define HID_CL_RX_RING_SIZE 32
#define HID_CL_TX_RING_SIZE 16
+#define cl_data_to_dev(client_data) ishtp_device(client_data->cl_device)
+
/**
* report_bad_packets() - Report bad packets
* @hid_ishtp_cl: Client instance to get stats
@@ -37,9 +38,9 @@ static void report_bad_packet(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
size_t cur_pos, size_t payload_len)
{
struct hostif_msg *recv_msg = recv_buf;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
- dev_err(&client_data->cl_device->dev, "[hid-ish]: BAD packet %02X\n"
+ dev_err(cl_data_to_dev(client_data), "[hid-ish]: BAD packet %02X\n"
"total_bad=%u cur_pos=%u\n"
"[%02X %02X %02X %02X]\n"
"payload_len=%u\n"
@@ -69,13 +70,15 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
unsigned char *payload;
struct device_info *dev_info;
int i, j;
- size_t payload_len, total_len, cur_pos;
+ size_t payload_len, total_len, cur_pos, raw_len;
int report_type;
struct report_list *reports_list;
char *reports;
size_t report_len;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int curr_hid_dev = client_data->cur_hid_dev;
+ struct ishtp_hid_data *hid_data = NULL;
+ struct hid_device *hid = NULL;
payload = recv_buf + sizeof(struct hostif_msg_hdr);
total_len = data_len;
@@ -83,12 +86,12 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
do {
if (cur_pos + sizeof(struct hostif_msg) > total_len) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: error, received %u which is less than data header %u\n",
(unsigned int)data_len,
(unsigned int)sizeof(struct hostif_msg_hdr));
++client_data->bad_recv_cnt;
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
@@ -101,7 +104,7 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
++client_data->bad_recv_cnt;
report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
@@ -116,18 +119,18 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
report_bad_packet(hid_ishtp_cl, recv_msg,
cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
client_data->hid_dev_count = (unsigned int)*payload;
if (!client_data->hid_devices)
client_data->hid_devices = devm_kcalloc(
- &client_data->cl_device->dev,
+ cl_data_to_dev(client_data),
client_data->hid_dev_count,
sizeof(struct device_info),
GFP_KERNEL);
if (!client_data->hid_devices) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"Mem alloc failed for hid device info\n");
wake_up_interruptible(&client_data->init_wait);
break;
@@ -135,7 +138,7 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
for (i = 0; i < client_data->hid_dev_count; ++i) {
if (1 + sizeof(struct device_info) * i >=
payload_len) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: [ENUM_DEVICES]: content size %zu is bigger than payload_len %zu\n",
1 + sizeof(struct device_info)
* i, payload_len);
@@ -165,12 +168,12 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
report_bad_packet(hid_ishtp_cl, recv_msg,
cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
if (!client_data->hid_descr[curr_hid_dev])
client_data->hid_descr[curr_hid_dev] =
- devm_kmalloc(&client_data->cl_device->dev,
+ devm_kmalloc(cl_data_to_dev(client_data),
payload_len, GFP_KERNEL);
if (client_data->hid_descr[curr_hid_dev]) {
memcpy(client_data->hid_descr[curr_hid_dev],
@@ -190,12 +193,12 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
report_bad_packet(hid_ishtp_cl, recv_msg,
cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
if (!client_data->report_descr[curr_hid_dev])
client_data->report_descr[curr_hid_dev] =
- devm_kmalloc(&client_data->cl_device->dev,
+ devm_kmalloc(cl_data_to_dev(client_data),
payload_len, GFP_KERNEL);
if (client_data->report_descr[curr_hid_dev]) {
memcpy(client_data->report_descr[curr_hid_dev],
@@ -219,18 +222,31 @@ do_get_report:
/* Get index of device that matches this id */
for (i = 0; i < client_data->num_hid_devices; ++i) {
if (recv_msg->hdr.device_id ==
- client_data->hid_devices[i].dev_id)
- if (client_data->hid_sensor_hubs[i]) {
- hid_input_report(
- client_data->hid_sensor_hubs[
- i],
- report_type, payload,
- payload_len, 0);
- ishtp_hid_wakeup(
- client_data->hid_sensor_hubs[
- i]);
+ client_data->hid_devices[i].dev_id) {
+ hid = client_data->hid_sensor_hubs[i];
+ if (!hid)
break;
+
+ hid_data = hid->driver_data;
+ if (hid_data->raw_get_req) {
+ raw_len =
+ (hid_data->raw_buf_size <
+ payload_len) ?
+ hid_data->raw_buf_size :
+ payload_len;
+
+ memcpy(hid_data->raw_buf,
+ payload, raw_len);
+ } else {
+ hid_input_report
+ (hid, report_type,
+ payload, payload_len,
+ 0);
}
+
+ ishtp_hid_wakeup(hid);
+ break;
+ }
}
break;
@@ -295,7 +311,7 @@ do_get_report:
++client_data->bad_recv_cnt;
report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
@@ -475,7 +491,7 @@ int ishtp_hid_link_ready_wait(struct ishtp_cl_data *client_data)
static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
{
struct hostif_msg msg;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int retry_count;
int rv;
@@ -501,18 +517,18 @@ static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
sizeof(struct hostif_msg));
}
if (!client_data->enum_devices_done) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: timed out waiting for enum_devices\n");
return -ETIMEDOUT;
}
if (!client_data->hid_devices) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: failed to allocate HID dev structures\n");
return -ENOMEM;
}
client_data->num_hid_devices = client_data->hid_dev_count;
- dev_info(&hid_ishtp_cl->device->dev,
+ dev_info(ishtp_device(client_data->cl_device),
"[hid-ish]: enum_devices_done OK, num_hid_devices=%d\n",
client_data->num_hid_devices);
@@ -531,7 +547,7 @@ static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
static int ishtp_get_hid_descriptor(struct ishtp_cl *hid_ishtp_cl, int index)
{
struct hostif_msg msg;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int rv;
/* Get HID descriptor */
@@ -549,13 +565,13 @@ static int ishtp_get_hid_descriptor(struct ishtp_cl *hid_ishtp_cl, int index)
client_data->hid_descr_done,
3 * HZ);
if (!client_data->hid_descr_done) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: timed out for hid_descr_done\n");
return -EIO;
}
if (!client_data->hid_descr[index]) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: allocation HID desc fail\n");
return -ENOMEM;
}
@@ -578,7 +594,7 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
int index)
{
struct hostif_msg msg;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int rv;
/* Get report descriptor */
@@ -596,12 +612,12 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
client_data->report_descr_done,
3 * HZ);
if (!client_data->report_descr_done) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: timed out for report descr\n");
return -EIO;
}
if (!client_data->report_descr[index]) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: failed to alloc report descr\n");
return -ENOMEM;
}
@@ -626,42 +642,42 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
{
struct ishtp_device *dev;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
struct ishtp_fw_client *fw_client;
int i;
int rv;
- dev_dbg(&client_data->cl_device->dev, "%s\n", __func__);
+ dev_dbg(cl_data_to_dev(client_data), "%s\n", __func__);
hid_ishtp_trace(client_data, "%s reset flag: %d\n", __func__, reset);
- rv = ishtp_cl_link(hid_ishtp_cl, ISHTP_HOST_CLIENT_ID_ANY);
+ rv = ishtp_cl_link(hid_ishtp_cl);
if (rv) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"ishtp_cl_link failed\n");
return -ENOMEM;
}
client_data->init_done = 0;
- dev = hid_ishtp_cl->dev;
+ dev = ishtp_get_ishtp_device(hid_ishtp_cl);
/* Connect to FW client */
- hid_ishtp_cl->rx_ring_size = HID_CL_RX_RING_SIZE;
- hid_ishtp_cl->tx_ring_size = HID_CL_TX_RING_SIZE;
+ ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE);
fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid);
if (!fw_client) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"ish client uuid not found\n");
return -ENOENT;
}
-
- hid_ishtp_cl->fw_client_id = fw_client->client_id;
- hid_ishtp_cl->state = ISHTP_CL_CONNECTING;
+ ishtp_cl_set_fw_client_id(hid_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_CONNECTING);
rv = ishtp_cl_connect(hid_ishtp_cl);
if (rv) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"client connect fail\n");
goto err_cl_unlink;
}
@@ -669,7 +685,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
hid_ishtp_trace(client_data, "%s client connected\n", __func__);
/* Register read callback */
- ishtp_register_event_cb(hid_ishtp_cl->device, ish_cl_event_cb);
+ ishtp_register_event_cb(client_data->cl_device, ish_cl_event_cb);
rv = ishtp_enum_enum_devices(hid_ishtp_cl);
if (rv)
@@ -692,7 +708,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
if (!reset) {
rv = ishtp_hid_probe(i, client_data);
if (rv) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: HID probe for #%u failed: %d\n",
i, rv);
goto err_cl_disconnect;
@@ -707,7 +723,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
return 0;
err_cl_disconnect:
- hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
ishtp_cl_disconnect(hid_ishtp_cl);
err_cl_unlink:
ishtp_cl_unlink(hid_ishtp_cl);
@@ -744,16 +760,16 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- dev_dbg(&cl_device->dev, "%s\n", __func__);
+ dev_dbg(ishtp_device(client_data->cl_device), "%s\n", __func__);
hid_ishtp_cl_deinit(hid_ishtp_cl);
- hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device);
if (!hid_ishtp_cl)
return;
ishtp_set_drvdata(cl_device, hid_ishtp_cl);
- hid_ishtp_cl->client_data = client_data;
+ ishtp_set_client_data(hid_ishtp_cl, client_data);
client_data->hid_ishtp_cl = hid_ishtp_cl;
client_data->num_hid_devices = 0;
@@ -762,15 +778,17 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
rv = hid_ishtp_cl_init(hid_ishtp_cl, 1);
if (!rv)
break;
- dev_err(&client_data->cl_device->dev, "Retry reset init\n");
+ dev_err(cl_data_to_dev(client_data), "Retry reset init\n");
}
if (rv) {
- dev_err(&client_data->cl_device->dev, "Reset Failed\n");
+ dev_err(cl_data_to_dev(client_data), "Reset Failed\n");
hid_ishtp_trace(client_data, "%s Failed hid_ishtp_cl %p\n",
__func__, hid_ishtp_cl);
}
}
+void (*hid_print_trace)(void *unused, const char *format, ...);
+
/**
* hid_ishtp_cl_probe() - ISHTP client driver probe
* @cl_device: ISHTP client device instance
@@ -788,21 +806,18 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
if (!cl_device)
return -ENODEV;
- if (!guid_equal(&hid_ishtp_guid,
- &cl_device->fw_client->props.protocol_name))
- return -ENODEV;
-
- client_data = devm_kzalloc(&cl_device->dev, sizeof(*client_data),
+ client_data = devm_kzalloc(ishtp_device(cl_device),
+ sizeof(*client_data),
GFP_KERNEL);
if (!client_data)
return -ENOMEM;
- hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device);
if (!hid_ishtp_cl)
return -ENOMEM;
ishtp_set_drvdata(cl_device, hid_ishtp_cl);
- hid_ishtp_cl->client_data = client_data;
+ ishtp_set_client_data(hid_ishtp_cl, client_data);
client_data->hid_ishtp_cl = hid_ishtp_cl;
client_data->cl_device = cl_device;
@@ -811,6 +826,8 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+ hid_print_trace = ishtp_trace_callback(cl_device);
+
rv = hid_ishtp_cl_init(hid_ishtp_cl, 0);
if (rv) {
ishtp_cl_free(hid_ishtp_cl);
@@ -832,13 +849,13 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- dev_dbg(&cl_device->dev, "%s\n", __func__);
- hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
ishtp_cl_disconnect(hid_ishtp_cl);
ishtp_put_device(cl_device);
ishtp_hid_remove(client_data);
@@ -862,7 +879,7 @@ static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
@@ -872,8 +889,6 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
return 0;
}
-#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
-
/**
* hid_ishtp_cl_suspend() - ISHTP client driver suspend
* @device: device instance
@@ -884,9 +899,9 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_suspend(struct device *device)
{
- struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
@@ -905,9 +920,9 @@ static int hid_ishtp_cl_suspend(struct device *device)
*/
static int hid_ishtp_cl_resume(struct device *device)
{
- struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
@@ -922,6 +937,7 @@ static const struct dev_pm_ops hid_ishtp_pm_ops = {
static struct ishtp_cl_driver hid_ishtp_cl_driver = {
.name = "ish-hid",
+ .guid = &hid_ishtp_guid,
.probe = hid_ishtp_cl_probe,
.remove = hid_ishtp_cl_remove,
.reset = hid_ishtp_cl_reset,
@@ -933,7 +949,7 @@ static int __init ish_hid_init(void)
int rv;
/* Register ISHTP client device driver with ISHTP Bus */
- rv = ishtp_cl_driver_register(&hid_ishtp_cl_driver);
+ rv = ishtp_cl_driver_register(&hid_ishtp_cl_driver, THIS_MODULE);
return rv;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index bc4c536f3c0d..62c03561adaa 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -14,8 +14,8 @@
*/
#include <linux/hid.h>
+#include <linux/intel-ish-client-if.h>
#include <uapi/linux/input.h>
-#include "ishtp/client.h"
#include "ishtp-hid.h"
/**
@@ -59,10 +59,46 @@ static void ishtp_hid_close(struct hid_device *hid)
{
}
-static int ishtp_raw_request(struct hid_device *hdev, unsigned char reportnum,
- __u8 *buf, size_t len, unsigned char rtype, int reqtype)
+static int ishtp_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
{
- return 0;
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ char *ishtp_buf = NULL;
+ size_t ishtp_buf_len;
+ unsigned int header_size = sizeof(struct hostif_msg);
+
+ if (rtype == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ hid_data->request_done = false;
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ hid_data->raw_buf = buf;
+ hid_data->raw_buf_size = len;
+ hid_data->raw_get_req = true;
+
+ hid_ishtp_get_report(hid, reportnum, rtype);
+ break;
+ case HID_REQ_SET_REPORT:
+ /*
+ * Spare 7 bytes for 64b accesses through
+ * get/put_unaligned_le64()
+ */
+ ishtp_buf_len = len + header_size;
+ ishtp_buf = kzalloc(ishtp_buf_len + 7, GFP_KERNEL);
+ if (!ishtp_buf)
+ return -ENOMEM;
+
+ memcpy(ishtp_buf + header_size, buf, len);
+ hid_ishtp_set_feature(hid, ishtp_buf, ishtp_buf_len, reportnum);
+ kfree(ishtp_buf);
+ break;
+ }
+
+ hid_hw_wait(hid);
+
+ return len;
}
/**
@@ -87,6 +123,7 @@ static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep,
hid_data->request_done = false;
switch (reqtype) {
case HID_REQ_GET_REPORT:
+ hid_data->raw_get_req = false;
hid_ishtp_get_report(hid, rep->id, rep->type);
break;
case HID_REQ_SET_REPORT:
@@ -116,7 +153,6 @@ static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep,
static int ishtp_wait_for_response(struct hid_device *hid)
{
struct ishtp_hid_data *hid_data = hid->driver_data;
- struct ishtp_cl_data *client_data = hid_data->client_data;
int rv;
hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
@@ -204,7 +240,8 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
hid->ll_driver = &ishtp_hid_ll_driver;
hid->bus = BUS_INTEL_ISHTP;
- hid->dev.parent = &client_data->cl_device->dev;
+ hid->dev.parent = ishtp_device(client_data->cl_device);
+
hid->version = le16_to_cpu(ISH_HID_VERSION);
hid->vendor = le16_to_cpu(client_data->hid_devices[cur_hid_dev].vid);
hid->product = le16_to_cpu(client_data->hid_devices[cur_hid_dev].pid);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 1cd07a441cd4..e27d3d6c1920 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -24,9 +24,9 @@
#define IS_RESPONSE 0x80
/* Used to dump to Linux trace buffer, if enabled */
-#define hid_ishtp_trace(client, ...) \
- client->cl_device->ishtp_dev->print_log(\
- client->cl_device->ishtp_dev, __VA_ARGS__)
+extern void (*hid_print_trace)(void *unused, const char *format, ...);
+#define hid_ishtp_trace(client, ...) \
+ (hid_print_trace)(NULL, __VA_ARGS__)
/* ISH Transport protocol (ISHTP in short) GUID */
static const guid_t hid_ishtp_guid =
@@ -159,6 +159,9 @@ struct ishtp_cl_data {
* @client_data: Link to the client instance
* @hid_wait: Completion waitq
*
+ * @raw_get_req: Flag indicating raw get request ongoing
+ * @raw_buf: raw request buffer filled on receiving get report
+ * @raw_buf_size: raw request buffer size
* Used to tie hid hid->driver data to driver client instance
*/
struct ishtp_hid_data {
@@ -166,6 +169,11 @@ struct ishtp_hid_data {
bool request_done;
struct ishtp_cl_data *client_data;
wait_queue_head_t hid_wait;
+
+ /* raw request */
+ bool raw_get_req;
+ u8 *raw_buf;
+ size_t raw_buf_size;
};
/* Interface functions between HID LL driver and ISH TP client */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index d5f4b6438d86..fb8ca12955b4 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -171,6 +171,19 @@ struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
EXPORT_SYMBOL(ishtp_fw_cl_get_client);
/**
+ * ishtp_get_fw_client_id() - Get fw client id
+ *
+ * This interface is used to reset HW get FW client id.
+ *
+ * Return: firmware client id.
+ */
+int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client)
+{
+ return fw_client->client_id;
+}
+EXPORT_SYMBOL(ishtp_get_fw_client_id);
+
+/**
* ishtp_fw_cl_by_id() - return index to fw_clients for client_id
* @dev: the ishtp device structure
* @client_id: fw client id to search
@@ -220,6 +233,26 @@ static int ishtp_cl_device_probe(struct device *dev)
}
/**
+ * ishtp_cl_bus_match() - Bus match() callback
+ * @dev: the device structure
+ * @drv: the driver structure
+ *
+ * This is a bus match callback, called when a new ishtp_cl_device is
+ * registered during ishtp bus client enumeration. Use the guid_t in
+ * drv and dev to decide whether they match or not.
+ *
+ * Return: 1 if dev & drv matches, 0 otherwise.
+ */
+static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
+
+ return guid_equal(driver->guid,
+ &device->fw_client->props.protocol_name);
+}
+
+/**
* ishtp_cl_device_remove() - Bus remove() callback
* @dev: the device structure
*
@@ -372,6 +405,7 @@ static struct bus_type ishtp_cl_bus_type = {
.name = "ishtp",
.dev_groups = ishtp_cl_dev_groups,
.probe = ishtp_cl_device_probe,
+ .match = ishtp_cl_bus_match,
.remove = ishtp_cl_device_remove,
.pm = &ishtp_cl_bus_dev_pm_ops,
.uevent = ishtp_cl_uevent,
@@ -445,6 +479,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
}
ishtp_device_ready = true;
+ dev_set_drvdata(&device->dev, device);
return device;
}
@@ -464,7 +499,7 @@ static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
}
/**
- * __ishtp_cl_driver_register() - Client driver register
+ * ishtp_cl_driver_register() - Client driver register
* @driver: the client driver instance
* @owner: Owner of this driver module
*
@@ -473,8 +508,8 @@ static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
*
* Return: Return value of driver_register or -ENODEV if not ready
*/
-int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
- struct module *owner)
+int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner)
{
int err;
@@ -491,7 +526,7 @@ int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
return 0;
}
-EXPORT_SYMBOL(__ishtp_cl_driver_register);
+EXPORT_SYMBOL(ishtp_cl_driver_register);
/**
* ishtp_cl_driver_unregister() - Client driver unregister
@@ -807,6 +842,59 @@ int ishtp_use_dma_transfer(void)
}
/**
+ * ishtp_device() - Return device pointer
+ *
+ * This interface is used to return device pointer from ishtp_cl_device
+ * instance.
+ *
+ * Return: device *.
+ */
+struct device *ishtp_device(struct ishtp_cl_device *device)
+{
+ return &device->dev;
+}
+EXPORT_SYMBOL(ishtp_device);
+
+/**
+ * ishtp_get_pci_device() - Return PCI device dev pointer
+ * This interface is used to return PCI device pointer
+ * from ishtp_cl_device instance.
+ *
+ * Return: device *.
+ */
+struct device *ishtp_get_pci_device(struct ishtp_cl_device *device)
+{
+ return device->ishtp_dev->devc;
+}
+EXPORT_SYMBOL(ishtp_get_pci_device);
+
+/**
+ * ishtp_trace_callback() - Return trace callback
+ *
+ * This interface is used to return trace callback function pointer.
+ *
+ * Return: void *.
+ */
+void *ishtp_trace_callback(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->ishtp_dev->print_log;
+}
+EXPORT_SYMBOL(ishtp_trace_callback);
+
+/**
+ * ish_hw_reset() - Call HW reset IPC callback
+ *
+ * This interface is used to reset HW in case of error.
+ *
+ * Return: value from IPC hw_reset callback
+ */
+int ish_hw_reset(struct ishtp_device *dev)
+{
+ return dev->ops->hw_reset(dev);
+}
+EXPORT_SYMBOL(ish_hw_reset);
+
+/**
* ishtp_bus_register() - Function to register bus
*
* This register ishtp bus
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
index 4cf7ad586c37..93d516f5a853 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.h
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/intel-ish-client-if.h>
struct ishtp_cl;
struct ishtp_cl_device;
@@ -52,25 +53,6 @@ struct ishtp_cl_device {
void (*event_cb)(struct ishtp_cl_device *device);
};
-/**
- * struct ishtp_cl_device - ISHTP device handle
- * @driver: driver instance on a bus
- * @name: Name of the device for probe
- * @probe: driver callback for device probe
- * @remove: driver callback on device removal
- *
- * Client drivers defines to get probed/removed for ISHTP client device.
- */
-struct ishtp_cl_driver {
- struct device_driver driver;
- const char *name;
- int (*probe)(struct ishtp_cl_device *dev);
- int (*remove)(struct ishtp_cl_device *dev);
- int (*reset)(struct ishtp_cl_device *dev);
- const struct dev_pm_ops *pm;
-};
-
-
int ishtp_bus_new_client(struct ishtp_device *dev);
void ishtp_remove_all_clients(struct ishtp_device *dev);
int ishtp_cl_device_bind(struct ishtp_cl *cl);
@@ -98,22 +80,5 @@ void ishtp_recv(struct ishtp_device *dev);
void ishtp_reset_handler(struct ishtp_device *dev);
void ishtp_reset_compl_handler(struct ishtp_device *dev);
-void ishtp_put_device(struct ishtp_cl_device *);
-void ishtp_get_device(struct ishtp_cl_device *);
-
-void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
-void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
-
-int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
- struct module *owner);
-#define ishtp_cl_driver_register(driver) \
- __ishtp_cl_driver_register(driver, THIS_MODULE)
-void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
-
-int ishtp_register_event_cb(struct ishtp_cl_device *device,
- void (*read_cb)(struct ishtp_cl_device *));
int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const guid_t *cuuid);
-struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
- const guid_t *uuid);
-
#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index faeccdb1475b..b7ac5e3b1e82 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -126,7 +126,7 @@ static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
*
* Return: The allocated client instance or NULL on failure
*/
-struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *cl;
@@ -134,7 +134,7 @@ struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
if (!cl)
return NULL;
- ishtp_cl_init(cl, dev);
+ ishtp_cl_init(cl, cl_device->ishtp_dev);
return cl;
}
EXPORT_SYMBOL(ishtp_cl_allocate);
@@ -168,9 +168,6 @@ EXPORT_SYMBOL(ishtp_cl_free);
/**
* ishtp_cl_link() - Reserve a host id and link the client instance
* @cl: client device instance
- * @id: host client id to use. It can be ISHTP_HOST_CLIENT_ID_ANY if any
- * id from the available can be used
- *
*
* This allocates a single bit in the hostmap. This function will make sure
* that not many client sessions are opened at the same time. Once allocated
@@ -179,11 +176,11 @@ EXPORT_SYMBOL(ishtp_cl_free);
*
* Return: 0 or error code on failure
*/
-int ishtp_cl_link(struct ishtp_cl *cl, int id)
+int ishtp_cl_link(struct ishtp_cl *cl)
{
struct ishtp_device *dev;
- unsigned long flags, flags_cl;
- int ret = 0;
+ unsigned long flags, flags_cl;
+ int id, ret = 0;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
@@ -197,10 +194,7 @@ int ishtp_cl_link(struct ishtp_cl *cl, int id)
goto unlock_dev;
}
- /* If Id is not assigned get one*/
- if (id == ISHTP_HOST_CLIENT_ID_ANY)
- id = find_first_zero_bit(dev->host_clients_map,
- ISHTP_CLIENTS_MAX);
+ id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
if (id >= ISHTP_CLIENTS_MAX) {
spin_unlock_irqrestore(&dev->device_lock, flags);
@@ -1069,3 +1063,45 @@ void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
eoi:
return;
}
+
+void *ishtp_get_client_data(struct ishtp_cl *cl)
+{
+ return cl->client_data;
+}
+EXPORT_SYMBOL(ishtp_get_client_data);
+
+void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
+{
+ cl->client_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_client_data);
+
+struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
+{
+ return cl->dev;
+}
+EXPORT_SYMBOL(ishtp_get_ishtp_device);
+
+void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->tx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_tx_ring_size);
+
+void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->rx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_rx_ring_size);
+
+void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
+{
+ cl->state = state;
+}
+EXPORT_SYMBOL(ishtp_set_connection_state);
+
+void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
+{
+ cl->fw_client_id = fw_client_id;
+}
+EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index e0df3eb611e6..6ed00947d6bc 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -19,15 +19,6 @@
#include <linux/types.h>
#include "ishtp-dev.h"
-/* Client state */
-enum cl_state {
- ISHTP_CL_INITIALIZING = 0,
- ISHTP_CL_CONNECTING,
- ISHTP_CL_CONNECTED,
- ISHTP_CL_DISCONNECTING,
- ISHTP_CL_DISCONNECTED
-};
-
/* Tx and Rx ring size */
#define CL_DEF_RX_RING_SIZE 2
#define CL_DEF_TX_RING_SIZE 2
@@ -169,19 +160,4 @@ static inline bool ishtp_cl_cmp_id(const struct ishtp_cl *cl1,
(cl1->fw_client_id == cl2->fw_client_id);
}
-/* exported functions from ISHTP under client management scope */
-struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev);
-void ishtp_cl_free(struct ishtp_cl *cl);
-int ishtp_cl_link(struct ishtp_cl *cl, int id);
-void ishtp_cl_unlink(struct ishtp_cl *cl);
-int ishtp_cl_disconnect(struct ishtp_cl *cl);
-int ishtp_cl_connect(struct ishtp_cl *cl);
-int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
-int ishtp_cl_flush_queues(struct ishtp_cl *cl);
-
-/* exported functions from ISHTP client buffer management scope */
-int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
-bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
-struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
-
#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index e54ce1ef27dd..3cfef084b9fc 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -79,32 +79,6 @@ struct ishtp_fw_client {
uint8_t client_id;
};
-/**
- * struct ishtp_msg_data - ISHTP message data struct
- * @size: Size of data in the *data
- * @data: Pointer to data
- */
-struct ishtp_msg_data {
- uint32_t size;
- unsigned char *data;
-};
-
-/*
- * struct ishtp_cl_rb - request block structure
- * @list: Link to list members
- * @cl: ISHTP client instance
- * @buffer: message header
- * @buf_idx: Index into buffer
- * @read_time: unused at this time
- */
-struct ishtp_cl_rb {
- struct list_head list;
- struct ishtp_cl *cl;
- struct ishtp_msg_data buffer;
- unsigned long buf_idx;
- unsigned long read_time;
-};
-
/*
* Control info for IPC messages ISHTP/IPC sending FIFO -
* list with inline data buffer
@@ -264,11 +238,6 @@ static inline int ish_ipc_reset(struct ishtp_device *dev)
return dev->ops->ipc_reset(dev);
}
-static inline int ish_hw_reset(struct ishtp_device *dev)
-{
- return dev->ops->hw_reset(dev);
-}
-
/* Exported function */
void ishtp_device_init(struct ishtp_device *dev);
int ishtp_start(struct ishtp_device *dev);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 840634e0f1e3..dbaead0a5371 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -632,7 +632,7 @@ static int uhid_char_open(struct inode *inode, struct file *file)
INIT_WORK(&uhid->worker, uhid_device_add_worker);
file->private_data = uhid;
- nonseekable_open(inode, file);
+ stream_open(inode, file);
return 0;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 62703b354d6d..3fc0b247a807 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -336,6 +336,8 @@ static struct vmbus_channel *alloc_channel(void)
tasklet_init(&channel->callback_event,
vmbus_on_event, (unsigned long)channel);
+ hv_ringbuffer_pre_init(channel);
+
return channel;
}
@@ -345,6 +347,7 @@ static struct vmbus_channel *alloc_channel(void)
static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
+ vmbus_remove_channel_attr_group(channel);
kobject_put(&channel->kobj);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 632d25674e7f..45653029ee18 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -408,7 +408,6 @@ int hv_synic_cleanup(unsigned int cpu)
clockevents_unbind_device(hv_cpu->clk_evt, cpu);
hv_ce_shutdown(hv_cpu->clk_evt);
- put_cpu_ptr(hv_cpu);
}
hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index cb86b133eb4d..e5467b821f41 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -193,6 +193,7 @@ extern void hv_synic_clockevents_cleanup(void);
/* Interface */
+void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
struct page *pages, u32 pagecnt);
@@ -321,6 +322,8 @@ void vmbus_device_unregister(struct hv_device *device_obj);
int vmbus_add_channel_kobj(struct hv_device *device_obj,
struct vmbus_channel *channel);
+void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
+
struct vmbus_channel *relid2channel(u32 relid);
void vmbus_free_channels(void);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 9e8b31ccc142..121a01c43298 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -166,14 +166,18 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
}
/* Get various debug metrics for the specified ring buffer. */
-int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
- if (!ring_info->ring_buffer)
+ mutex_lock(&ring_info->ring_buffer_mutex);
+
+ if (!ring_info->ring_buffer) {
+ mutex_unlock(&ring_info->ring_buffer_mutex);
return -EINVAL;
+ }
hv_get_ringbuffer_availbytes(ring_info,
&bytes_avail_toread,
@@ -184,10 +188,19 @@ int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
debug_info->current_write_index = ring_info->ring_buffer->write_index;
debug_info->current_interrupt_mask
= ring_info->ring_buffer->interrupt_mask;
+ mutex_unlock(&ring_info->ring_buffer_mutex);
+
return 0;
}
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
+/* Initialize a channel's ring buffer info mutex locks */
+void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
+{
+ mutex_init(&channel->inbound.ring_buffer_mutex);
+ mutex_init(&channel->outbound.ring_buffer_mutex);
+}
+
/* Initialize the ring buffer. */
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
struct page *pages, u32 page_cnt)
@@ -197,8 +210,6 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
- memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
-
/*
* First page holds struct hv_ring_buffer, do wraparound mapping for
* the rest.
@@ -232,6 +243,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
reciprocal_value(ring_info->ring_size / 10);
ring_info->ring_datasize = ring_info->ring_size -
sizeof(struct hv_ring_buffer);
+ ring_info->priv_read_index = 0;
spin_lock_init(&ring_info->ring_lock);
@@ -241,8 +253,10 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
/* Cleanup the ring buffer. */
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
{
+ mutex_lock(&ring_info->ring_buffer_mutex);
vunmap(ring_info->ring_buffer);
ring_info->ring_buffer = NULL;
+ mutex_unlock(&ring_info->ring_buffer_mutex);
}
/* Write to the ring buffer. */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 000b53e5a17a..aa25f3bcbdea 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -630,7 +630,36 @@ static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_driver_override.attr,
NULL,
};
-ATTRIBUTE_GROUPS(vmbus_dev);
+
+/*
+ * Device-level attribute_group callback function. Returns the permission for
+ * each attribute, and returns 0 if an attribute is not visible.
+ */
+static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ const struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ /* Hide the monitor attributes if the monitor mechanism is not used. */
+ if (!hv_dev->channel->offermsg.monitor_allocated &&
+ (attr == &dev_attr_monitor_id.attr ||
+ attr == &dev_attr_server_monitor_pending.attr ||
+ attr == &dev_attr_client_monitor_pending.attr ||
+ attr == &dev_attr_server_monitor_latency.attr ||
+ attr == &dev_attr_client_monitor_latency.attr ||
+ attr == &dev_attr_server_monitor_conn_id.attr ||
+ attr == &dev_attr_client_monitor_conn_id.attr))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group vmbus_dev_group = {
+ .attrs = vmbus_dev_attrs,
+ .is_visible = vmbus_dev_attr_is_visible
+};
+__ATTRIBUTE_GROUPS(vmbus_dev);
/*
* vmbus_uevent - add uevent for our device
@@ -1381,7 +1410,7 @@ static void vmbus_chan_release(struct kobject *kobj)
struct vmbus_chan_attribute {
struct attribute attr;
- ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
+ ssize_t (*show)(struct vmbus_channel *chan, char *buf);
ssize_t (*store)(struct vmbus_channel *chan,
const char *buf, size_t count);
};
@@ -1400,15 +1429,12 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
{
const struct vmbus_chan_attribute *attribute
= container_of(attr, struct vmbus_chan_attribute, attr);
- const struct vmbus_channel *chan
+ struct vmbus_channel *chan
= container_of(kobj, struct vmbus_channel, kobj);
if (!attribute->show)
return -EIO;
- if (chan->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
-
return attribute->show(chan, buf);
}
@@ -1416,45 +1442,81 @@ static const struct sysfs_ops vmbus_chan_sysfs_ops = {
.show = vmbus_chan_attr_show,
};
-static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
{
- const struct hv_ring_buffer_info *rbi = &channel->outbound;
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
- return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
}
static VMBUS_CHAN_ATTR_RO(out_mask);
-static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
{
- const struct hv_ring_buffer_info *rbi = &channel->inbound;
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
- return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
}
static VMBUS_CHAN_ATTR_RO(in_mask);
-static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
{
- const struct hv_ring_buffer_info *rbi = &channel->inbound;
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
- return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
+ ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
}
static VMBUS_CHAN_ATTR_RO(read_avail);
-static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
{
- const struct hv_ring_buffer_info *rbi = &channel->outbound;
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
- return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
+ ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
}
static VMBUS_CHAN_ATTR_RO(write_avail);
-static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
+static ssize_t show_target_cpu(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
-static ssize_t channel_pending_show(const struct vmbus_channel *channel,
+static ssize_t channel_pending_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%d\n",
@@ -1463,7 +1525,7 @@ static ssize_t channel_pending_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
-static ssize_t channel_latency_show(const struct vmbus_channel *channel,
+static ssize_t channel_latency_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%d\n",
@@ -1472,19 +1534,19 @@ static ssize_t channel_latency_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
-static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->interrupts);
}
static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
-static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->sig_events);
}
static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
-static ssize_t channel_intr_in_full_show(const struct vmbus_channel *channel,
+static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -1492,7 +1554,7 @@ static ssize_t channel_intr_in_full_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
-static ssize_t channel_intr_out_empty_show(const struct vmbus_channel *channel,
+static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -1500,7 +1562,7 @@ static ssize_t channel_intr_out_empty_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
-static ssize_t channel_out_full_first_show(const struct vmbus_channel *channel,
+static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -1508,7 +1570,7 @@ static ssize_t channel_out_full_first_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
-static ssize_t channel_out_full_total_show(const struct vmbus_channel *channel,
+static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -1516,14 +1578,14 @@ static ssize_t channel_out_full_total_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
-static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
+static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%u\n", channel->offermsg.monitorid);
}
static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
-static ssize_t subchannel_id_show(const struct vmbus_channel *channel,
+static ssize_t subchannel_id_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%u\n",
@@ -1550,10 +1612,34 @@ static struct attribute *vmbus_chan_attrs[] = {
NULL
};
+/*
+ * Channel-level attribute_group callback function. Returns the permission for
+ * each attribute, and returns 0 if an attribute is not visible.
+ */
+static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ const struct vmbus_channel *channel =
+ container_of(kobj, struct vmbus_channel, kobj);
+
+ /* Hide the monitor attributes if the monitor mechanism is not used. */
+ if (!channel->offermsg.monitor_allocated &&
+ (attr == &chan_attr_pending.attr ||
+ attr == &chan_attr_latency.attr ||
+ attr == &chan_attr_monitor_id.attr))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group vmbus_chan_group = {
+ .attrs = vmbus_chan_attrs,
+ .is_visible = vmbus_chan_attr_is_visible
+};
+
static struct kobj_type vmbus_chan_ktype = {
.sysfs_ops = &vmbus_chan_sysfs_ops,
.release = vmbus_chan_release,
- .default_attrs = vmbus_chan_attrs,
};
/*
@@ -1561,6 +1647,7 @@ static struct kobj_type vmbus_chan_ktype = {
*/
int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
{
+ const struct device *device = &dev->device;
struct kobject *kobj = &channel->kobj;
u32 relid = channel->offermsg.child_relid;
int ret;
@@ -1571,12 +1658,31 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
if (ret)
return ret;
+ ret = sysfs_create_group(kobj, &vmbus_chan_group);
+
+ if (ret) {
+ /*
+ * The calling functions' error handling paths will cleanup the
+ * empty channel directory.
+ */
+ dev_err(device, "Unable to set up channel sysfs files\n");
+ return ret;
+ }
+
kobject_uevent(kobj, KOBJ_ADD);
return 0;
}
/*
+ * vmbus_remove_channel_attr_group - remove the channel's attribute group
+ */
+void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
+{
+ sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
+}
+
+/*
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
*/
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6f929bfa9fcd..1915a18b537b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -17,7 +17,7 @@ menuconfig HWMON
To find out which specific driver(s) you need, use the
sensors-detect script from the lm_sensors package. Read
- <file:Documentation/hwmon/userspace-tools> for details.
+ <file:Documentation/hwmon/userspace-tools.rst> for details.
This support can also be built as a module. If so, the module
will be called hwmon.
@@ -59,7 +59,7 @@ config SENSORS_ABITUGURU
chip can be found on Abit uGuru featuring motherboards (most modern
Abit motherboards from before end 2005). For more info and a list
of which motherboards have which revision see
- Documentation/hwmon/abituguru
+ Documentation/hwmon/abituguru.rst
This driver can also be built as a module. If so, the module
will be called abituguru.
@@ -73,7 +73,7 @@ config SENSORS_ABITUGURU3
and their settings is supported. The third revision of the Abit
uGuru chip can be found on recent Abit motherboards (since end
2005). For more info and a list of which motherboards have which
- revision see Documentation/hwmon/abituguru3
+ revision see Documentation/hwmon/abituguru3.rst
This driver can also be built as a module. If so, the module
will be called abituguru3.
@@ -643,7 +643,7 @@ config SENSORS_CORETEMP
help
If you say yes here you get support for the temperature
sensor inside your CPU. Most of the family 6 CPUs
- are supported. Check Documentation/hwmon/coretemp for details.
+ are supported. Check Documentation/hwmon/coretemp.rst for details.
config SENSORS_IT87
tristate "ITE IT87xx and compatibles"
@@ -705,6 +705,16 @@ config SENSORS_LINEAGE
This driver can also be built as a module. If so, the module
will be called lineage-pem.
+config SENSORS_LOCHNAGAR
+ tristate "Lochnagar Hardware Monitor"
+ depends on MFD_LOCHNAGAR
+ help
+ If you say yes here you get support for Lochnagar 2 temperature,
+ voltage and current sensors abilities.
+
+ This driver can also be built as a module. If so, the module
+ will be called lochnagar-hwmon.
+
config SENSORS_LTC2945
tristate "Linear Technology LTC2945"
depends on I2C
@@ -898,6 +908,7 @@ config SENSORS_MAX6642
config SENSORS_MAX6650
tristate "Maxim MAX6650 sensor chip"
depends on I2C
+ depends on THERMAL || THERMAL=n
help
If you say yes here you get support for the MAX6650 / MAX6651
sensor chips.
@@ -1759,6 +1770,7 @@ config SENSORS_VT8231
config SENSORS_W83773G
tristate "Nuvoton W83773G"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the Nuvoton W83773G hardware
monitoring chip.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index f5c7b442e69e..8db472ea04f0 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
+obj-$(CONFIG_SENSORS_LOCHNAGAR) += lochnagar-hwmon.o
obj-$(CONFIG_SENSORS_LM63) += lm63.o
obj-$(CONFIG_SENSORS_LM70) += lm70.o
obj-$(CONFIG_SENSORS_LM73) += lm73.o
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index f13806d731fa..b176da8e92a7 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -215,7 +215,7 @@ static const struct i2c_device_id ad7414_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ad7414_id);
-static const struct of_device_id ad7414_of_match[] = {
+static const struct of_device_id __maybe_unused ad7414_of_match[] = {
{ .compatible = "ad,ad7414" },
{ },
};
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index ca794bf904de..e640be442dae 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -521,7 +521,7 @@ static const struct i2c_device_id adc128_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adc128_id);
-static const struct of_device_id adc128_of_match[] = {
+static const struct of_device_id __maybe_unused adc128_of_match[] = {
{ .compatible = "ti,adc128d818" },
{ },
};
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 1e4dad36f5ef..ec437cc9e739 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -174,7 +174,7 @@ static struct adm1025_data *adm1025_update_device(struct device *dev)
*/
static ssize_t
-show_in(struct device *dev, struct device_attribute *attr, char *buf)
+in_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -183,7 +183,7 @@ show_in(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_in_min(struct device *dev, struct device_attribute *attr, char *buf)
+in_min_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -192,7 +192,7 @@ show_in_min(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_in_max(struct device *dev, struct device_attribute *attr, char *buf)
+in_max_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -201,7 +201,7 @@ show_in_max(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_temp(struct device *dev, struct device_attribute *attr, char *buf)
+temp_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -209,7 +209,7 @@ show_temp(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_temp_min(struct device *dev, struct device_attribute *attr, char *buf)
+temp_min_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -217,15 +217,15 @@ show_temp_min(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_temp_max(struct device *dev, struct device_attribute *attr, char *buf)
+temp_max_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[index]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -245,8 +245,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -266,22 +266,28 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_in(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IWUSR | S_IRUGO, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IWUSR | S_IRUGO, \
- show_in_max, set_in_max, offset)
-set_in(0);
-set_in(1);
-set_in(2);
-set_in(3);
-set_in(4);
-set_in(5);
-
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -301,8 +307,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -322,15 +329,12 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_temp(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \
- show_temp_max, set_temp_max, offset - 1)
-set_temp(1);
-set_temp(2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
static ssize_t
alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -341,21 +345,21 @@ alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(alarms);
static ssize_t
-show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, alarm, 14);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index e43f09a07cd0..d34a68a11036 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -477,24 +477,24 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
return data;
}
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in[nr]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_min[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -513,16 +513,16 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_max[nr]));
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -542,48 +542,72 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define in_reg(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in, \
- NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-
-in_reg(0);
-in_reg(1);
-in_reg(2);
-in_reg(3);
-in_reg(4);
-in_reg(5);
-in_reg(6);
-in_reg(7);
-in_reg(8);
-in_reg(9);
-in_reg(10);
-in_reg(11);
-in_reg(12);
-in_reg(13);
-in_reg(14);
-in_reg(15);
-
-static ssize_t show_in16(struct device *dev, struct device_attribute *attr,
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, in, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
+static SENSOR_DEVICE_ATTR_RO(in9_input, in, 9);
+static SENSOR_DEVICE_ATTR_RW(in9_min, in_min, 9);
+static SENSOR_DEVICE_ATTR_RW(in9_max, in_max, 9);
+static SENSOR_DEVICE_ATTR_RO(in10_input, in, 10);
+static SENSOR_DEVICE_ATTR_RW(in10_min, in_min, 10);
+static SENSOR_DEVICE_ATTR_RW(in10_max, in_max, 10);
+static SENSOR_DEVICE_ATTR_RO(in11_input, in, 11);
+static SENSOR_DEVICE_ATTR_RW(in11_min, in_min, 11);
+static SENSOR_DEVICE_ATTR_RW(in11_max, in_max, 11);
+static SENSOR_DEVICE_ATTR_RO(in12_input, in, 12);
+static SENSOR_DEVICE_ATTR_RW(in12_min, in_min, 12);
+static SENSOR_DEVICE_ATTR_RW(in12_max, in_max, 12);
+static SENSOR_DEVICE_ATTR_RO(in13_input, in, 13);
+static SENSOR_DEVICE_ATTR_RW(in13_min, in_min, 13);
+static SENSOR_DEVICE_ATTR_RW(in13_max, in_max, 13);
+static SENSOR_DEVICE_ATTR_RO(in14_input, in, 14);
+static SENSOR_DEVICE_ATTR_RW(in14_min, in_min, 14);
+static SENSOR_DEVICE_ATTR_RW(in14_max, in_max, 14);
+static SENSOR_DEVICE_ATTR_RO(in15_input, in, 15);
+static SENSOR_DEVICE_ATTR_RW(in15_min, in_min, 15);
+static SENSOR_DEVICE_ATTR_RW(in15_max, in_max, 15);
+
+static ssize_t in16_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in[16]) -
NEG12_OFFSET);
}
-static ssize_t show_in16_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in16_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_min[16])
- NEG12_OFFSET);
}
-static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in16_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -603,15 +627,16 @@ static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_in16_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in16_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_max[16])
- NEG12_OFFSET);
}
-static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in16_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -632,17 +657,14 @@ static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_in16, NULL, 16);
-static SENSOR_DEVICE_ATTR(in16_min, S_IRUGO | S_IWUSR, show_in16_min,
- set_in16_min, 16);
-static SENSOR_DEVICE_ATTR(in16_max, S_IRUGO | S_IWUSR, show_in16_max,
- set_in16_max, 16);
-
+static SENSOR_DEVICE_ATTR_RO(in16_input, in16, 16);
+static SENSOR_DEVICE_ATTR_RW(in16_min, in16_min, 16);
+static SENSOR_DEVICE_ATTR_RW(in16_max, in16_max, 16);
/* Now add fan read/write functions */
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -650,8 +672,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
data->fan_div[nr]));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -659,8 +681,9 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -681,20 +704,22 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-#define fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan, NULL, \
- offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1);
-
-fan_offset(1);
-fan_offset(2);
-fan_offset(3);
-fan_offset(4);
-fan_offset(5);
-fan_offset(6);
-fan_offset(7);
-fan_offset(8);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
+static SENSOR_DEVICE_ATTR_RW(fan4_min, fan_min, 3);
+static SENSOR_DEVICE_ATTR_RO(fan5_input, fan, 4);
+static SENSOR_DEVICE_ATTR_RW(fan5_min, fan_min, 4);
+static SENSOR_DEVICE_ATTR_RO(fan6_input, fan, 5);
+static SENSOR_DEVICE_ATTR_RW(fan6_min, fan_min, 5);
+static SENSOR_DEVICE_ATTR_RO(fan7_input, fan, 6);
+static SENSOR_DEVICE_ATTR_RW(fan7_min, fan_min, 6);
+static SENSOR_DEVICE_ATTR_RO(fan8_input, fan, 7);
+static SENSOR_DEVICE_ATTR_RW(fan8_min, fan_min, 7);
/* Adjust fan_min to account for new fan divisor */
static void fixup_fan_min(struct device *dev, int fan, int old_div)
@@ -715,16 +740,17 @@ static void fixup_fan_min(struct device *dev, int fan, int old_div)
}
/* Now add fan_div read/write functions */
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", data->fan_div[nr]);
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -765,38 +791,35 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-#define fan_offset_div(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-fan_offset_div(1);
-fan_offset_div(2);
-fan_offset_div(3);
-fan_offset_div(4);
-fan_offset_div(5);
-fan_offset_div(6);
-fan_offset_div(7);
-fan_offset_div(8);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
+static SENSOR_DEVICE_ATTR_RW(fan4_div, fan_div, 3);
+static SENSOR_DEVICE_ATTR_RW(fan5_div, fan_div, 4);
+static SENSOR_DEVICE_ATTR_RW(fan6_div, fan_div, 5);
+static SENSOR_DEVICE_ATTR_RW(fan7_div, fan_div, 6);
+static SENSOR_DEVICE_ATTR_RW(fan8_div, fan_div, 7);
/* Temps */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -816,16 +839,17 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -846,30 +870,27 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1);
-
-
-temp_reg(1);
-temp_reg(2);
-temp_reg(3);
-
-static ssize_t show_temp_offset(struct device *dev,
- struct device_attribute *attr, char *buf)
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+
+static ssize_t temp_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr]));
}
-static ssize_t set_temp_offset(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t temp_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -890,16 +911,13 @@ static ssize_t set_temp_offset(struct device *dev,
return count;
}
-#define temp_offset_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_offset, S_IRUGO | S_IWUSR, \
- show_temp_offset, set_temp_offset, offset - 1);
-
-temp_offset_reg(1);
-temp_offset_reg(2);
-temp_offset_reg(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
-static ssize_t show_temp_auto_point1_temp_hyst(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_point1_temp_hyst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -907,8 +925,9 @@ static ssize_t show_temp_auto_point1_temp_hyst(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(
ADM1026_FAN_ACTIVATION_TEMP_HYST + data->temp_tmin[nr]));
}
-static ssize_t show_temp_auto_point2_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_point2_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -916,16 +935,18 @@ static ssize_t show_temp_auto_point2_temp(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr] +
ADM1026_FAN_CONTROL_TEMP_RANGE));
}
-static ssize_t show_temp_auto_point1_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_point1_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr]));
}
-static ssize_t set_temp_auto_point1_temp(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_point1_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -946,18 +967,18 @@ static ssize_t set_temp_auto_point1_temp(struct device *dev,
return count;
}
-#define temp_auto_point(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_point1_temp, \
- S_IRUGO | S_IWUSR, show_temp_auto_point1_temp, \
- set_temp_auto_point1_temp, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_point1_temp_hyst, S_IRUGO,\
- show_temp_auto_point1_temp_hyst, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_point2_temp, S_IRUGO, \
- show_temp_auto_point2_temp, NULL, offset - 1);
-
-temp_auto_point(1);
-temp_auto_point(2);
-temp_auto_point(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_point1_temp, temp_auto_point1_temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_auto_point1_temp_hyst,
+ temp_auto_point1_temp_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_auto_point2_temp, temp_auto_point2_temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_point1_temp, temp_auto_point1_temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_auto_point1_temp_hyst,
+ temp_auto_point1_temp_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_auto_point2_temp, temp_auto_point2_temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_point1_temp, temp_auto_point1_temp, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_auto_point1_temp_hyst,
+ temp_auto_point1_temp_hyst, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_auto_point2_temp, temp_auto_point2_temp, 2);
static ssize_t show_temp_crit_enable(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -988,24 +1009,24 @@ static ssize_t set_temp_crit_enable(struct device *dev,
return count;
}
-#define temp_crit_enable(offset) \
-static DEVICE_ATTR(temp##offset##_crit_enable, S_IRUGO | S_IWUSR, \
- show_temp_crit_enable, set_temp_crit_enable);
+static DEVICE_ATTR(temp1_crit_enable, 0644, show_temp_crit_enable,
+ set_temp_crit_enable);
+static DEVICE_ATTR(temp2_crit_enable, 0644, show_temp_crit_enable,
+ set_temp_crit_enable);
+static DEVICE_ATTR(temp3_crit_enable, 0644, show_temp_crit_enable,
+ set_temp_crit_enable);
-temp_crit_enable(1);
-temp_crit_enable(2);
-temp_crit_enable(3);
-
-static ssize_t show_temp_crit(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_crit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr]));
}
-static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_crit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -1026,13 +1047,9 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
return count;
}
-#define temp_crit_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO | S_IWUSR, \
- show_temp_crit, set_temp_crit, offset - 1);
-
-temp_crit_reg(1);
-temp_crit_reg(2);
-temp_crit_reg(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp_crit, 2);
static ssize_t analog_out_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1110,7 +1127,7 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
@@ -1118,34 +1135,34 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%ld\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in11_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in12_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in13_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(in14_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(in15_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(in16_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 16);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 17);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 18);
-static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 19);
-static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 20);
-static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 21);
-static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_alarm, NULL, 22);
-static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_alarm, NULL, 23);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 24);
-static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 25);
-static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 26);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in9_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in11_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in12_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in13_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(in14_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(in15_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(in16_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 13);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 16);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 17);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 18);
+static SENSOR_DEVICE_ATTR_RO(fan4_alarm, alarm, 19);
+static SENSOR_DEVICE_ATTR_RO(fan5_alarm, alarm, 20);
+static SENSOR_DEVICE_ATTR_RO(fan6_alarm, alarm, 21);
+static SENSOR_DEVICE_ATTR_RO(fan7_alarm, alarm, 22);
+static SENSOR_DEVICE_ATTR_RO(fan8_alarm, alarm, 23);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 24);
+static SENSOR_DEVICE_ATTR_RO(in10_alarm, alarm, 25);
+static SENSOR_DEVICE_ATTR_RO(in8_alarm, alarm, 26);
static ssize_t alarm_mask_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1188,7 +1205,6 @@ static ssize_t alarm_mask_store(struct device *dev,
static DEVICE_ATTR_RW(alarm_mask);
-
static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1371,23 +1387,23 @@ static ssize_t pwm1_enable_store(struct device *dev,
/* enable PWM fan control */
static DEVICE_ATTR_RW(pwm1);
-static DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
-static DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
+static DEVICE_ATTR(pwm2, 0644, pwm1_show, pwm1_store);
+static DEVICE_ATTR(pwm3, 0644, pwm1_show, pwm1_store);
static DEVICE_ATTR_RW(pwm1_enable);
-static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+static DEVICE_ATTR(pwm2_enable, 0644, pwm1_enable_show,
pwm1_enable_store);
-static DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+static DEVICE_ATTR(pwm3_enable, 0644, pwm1_enable_show,
pwm1_enable_store);
static DEVICE_ATTR_RW(temp1_auto_point1_pwm);
-static DEVICE_ATTR(temp2_auto_point1_pwm, S_IRUGO | S_IWUSR,
- temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
-static DEVICE_ATTR(temp3_auto_point1_pwm, S_IRUGO | S_IWUSR,
- temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
+static DEVICE_ATTR(temp2_auto_point1_pwm, 0644,
+ temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
+static DEVICE_ATTR(temp3_auto_point1_pwm, 0644,
+ temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
static DEVICE_ATTR_RO(temp1_auto_point2_pwm);
-static DEVICE_ATTR(temp2_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+static DEVICE_ATTR(temp2_auto_point2_pwm, 0444, temp1_auto_point2_pwm_show,
NULL);
-static DEVICE_ATTR(temp3_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+static DEVICE_ATTR(temp3_auto_point2_pwm, 0444, temp1_auto_point2_pwm_show,
NULL);
static struct attribute *adm1026_attributes[] = {
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index e561279aea21..388060ff85e7 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -166,7 +166,7 @@ static struct adm1029_data *adm1029_update_device(struct device *dev)
*/
static ssize_t
-show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
@@ -175,7 +175,7 @@ show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_fan(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
@@ -193,7 +193,7 @@ show_fan(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
@@ -203,8 +203,9 @@ show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index]));
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adm1029_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -254,26 +255,26 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *devattr,
}
/* Access rights on sysfs. */
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, 0444, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, 0444, show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
-static SENSOR_DEVICE_ATTR(temp1_max, 0444, show_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_max, 0444, show_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp3_max, 0444, show_temp, NULL, 5);
+static SENSOR_DEVICE_ATTR_RO(temp1_max, temp, 3);
+static SENSOR_DEVICE_ATTR_RO(temp2_max, temp, 4);
+static SENSOR_DEVICE_ATTR_RO(temp3_max, temp, 5);
-static SENSOR_DEVICE_ATTR(temp1_min, 0444, show_temp, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp2_min, 0444, show_temp, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp3_min, 0444, show_temp, NULL, 8);
+static SENSOR_DEVICE_ATTR_RO(temp1_min, temp, 6);
+static SENSOR_DEVICE_ATTR_RO(temp2_min, temp, 7);
+static SENSOR_DEVICE_ATTR_RO(temp3_min, temp, 8);
-static SENSOR_DEVICE_ATTR(fan1_input, 0444, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, 0444, show_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
-static SENSOR_DEVICE_ATTR(fan1_min, 0444, show_fan, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan2_min, 0444, show_fan, NULL, 3);
+static SENSOR_DEVICE_ATTR_RO(fan1_min, fan, 2);
+static SENSOR_DEVICE_ATTR_RO(fan2_min, fan, 3);
-static SENSOR_DEVICE_ATTR(fan1_div, 0644, show_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, 0644, show_fan_div, set_fan_div, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static struct attribute *adm1029_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index bcf508269fd6..6b46d8eaa775 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -331,7 +331,7 @@ get_fan_auto_nearest(struct adm1031_data *data, int chan, u8 val, u8 reg)
return -EINVAL;
}
-static ssize_t show_fan_auto_channel(struct device *dev,
+static ssize_t fan_auto_channel_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -340,8 +340,8 @@ static ssize_t show_fan_auto_channel(struct device *dev,
}
static ssize_t
-set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+fan_auto_channel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -392,13 +392,11 @@ set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(auto_fan1_channel, S_IRUGO | S_IWUSR,
- show_fan_auto_channel, set_fan_auto_channel, 0);
-static SENSOR_DEVICE_ATTR(auto_fan2_channel, S_IRUGO | S_IWUSR,
- show_fan_auto_channel, set_fan_auto_channel, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_fan1_channel, fan_auto_channel, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_fan2_channel, fan_auto_channel, 1);
/* Auto Temps */
-static ssize_t show_auto_temp_off(struct device *dev,
+static ssize_t auto_temp_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -406,7 +404,7 @@ static ssize_t show_auto_temp_off(struct device *dev,
return sprintf(buf, "%d\n",
AUTO_TEMP_OFF_FROM_REG(data->auto_temp[nr]));
}
-static ssize_t show_auto_temp_min(struct device *dev,
+static ssize_t auto_temp_min_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -415,8 +413,8 @@ static ssize_t show_auto_temp_min(struct device *dev,
AUTO_TEMP_MIN_FROM_REG(data->auto_temp[nr]));
}
static ssize_t
-set_auto_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+auto_temp_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -436,7 +434,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_auto_temp_max(struct device *dev,
+static ssize_t auto_temp_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -445,8 +443,8 @@ static ssize_t show_auto_temp_max(struct device *dev,
AUTO_TEMP_MAX_FROM_REG(data->auto_temp[nr]));
}
static ssize_t
-set_auto_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+auto_temp_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -468,28 +466,26 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define auto_temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(auto_temp##offset##_off, S_IRUGO, \
- show_auto_temp_off, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(auto_temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_auto_temp_min, set_auto_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(auto_temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_auto_temp_max, set_auto_temp_max, offset - 1)
-
-auto_temp_reg(1);
-auto_temp_reg(2);
-auto_temp_reg(3);
+static SENSOR_DEVICE_ATTR_RO(auto_temp1_off, auto_temp_off, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_temp1_min, auto_temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_temp1_max, auto_temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(auto_temp2_off, auto_temp_off, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_temp2_min, auto_temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_temp2_max, auto_temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(auto_temp3_off, auto_temp_off, 2);
+static SENSOR_DEVICE_ATTR_RW(auto_temp3_min, auto_temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(auto_temp3_max, auto_temp_max, 2);
/* pwm */
-static ssize_t show_pwm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[nr]));
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -517,12 +513,10 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 1);
-static SENSOR_DEVICE_ATTR(auto_fan1_min_pwm, S_IRUGO | S_IWUSR,
- show_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(auto_fan2_min_pwm, S_IRUGO | S_IWUSR,
- show_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_fan1_min_pwm, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_fan2_min_pwm, pwm, 1);
/* Fans */
@@ -572,9 +566,8 @@ static int trust_fan_readings(struct adm1031_data *data, int chan)
return res;
}
-
-static ssize_t show_fan(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
@@ -585,15 +578,15 @@ static ssize_t show_fan(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", FAN_DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
@@ -601,8 +594,9 @@ static ssize_t show_fan_min(struct device *dev,
FAN_FROM_REG(data->fan_min[nr],
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -625,8 +619,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -673,21 +668,16 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-#define fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1)
-
-fan_offset(1);
-fan_offset(2);
-
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Temps */
-static ssize_t show_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
@@ -697,7 +687,7 @@ static ssize_t show_temp(struct device *dev,
(((data->ext_temp[nr] >> ((nr - 1) * 3)) & 7));
return sprintf(buf, "%d\n", TEMP_FROM_REG_EXT(data->temp[nr], ext));
}
-static ssize_t show_temp_offset(struct device *dev,
+static ssize_t temp_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -705,30 +695,30 @@ static ssize_t show_temp_offset(struct device *dev,
return sprintf(buf, "%d\n",
TEMP_OFFSET_FROM_REG(data->temp_offset[nr]));
}
-static ssize_t show_temp_min(struct device *dev,
+static ssize_t temp_min_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t show_temp_max(struct device *dev,
+static ssize_t temp_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t show_temp_crit(struct device *dev,
+static ssize_t temp_crit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr]));
}
-static ssize_t set_temp_offset(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t temp_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -748,8 +738,9 @@ static ssize_t set_temp_offset(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -769,8 +760,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -790,8 +782,9 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_crit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -812,21 +805,21 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
return count;
}
-#define temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_offset, S_IRUGO | S_IWUSR, \
- show_temp_offset, set_temp_offset, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO | S_IWUSR, \
- show_temp_crit, set_temp_crit, offset - 1)
-
-temp_reg(1);
-temp_reg(2);
-temp_reg(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp_crit, 2);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -838,29 +831,29 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", (data->alarm >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_fault, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(fan2_fault, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(temp3_min_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 13);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 14);
/* Update Interval */
static const unsigned int update_intervals[] = {
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 255413fdbde9..000b20f1db71 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -269,16 +269,16 @@ static ssize_t temp1_input_show(struct device *dev,
return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
}
-static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t max_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%d\n", data->temp_max[attr->index] * 1000);
}
-static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t max_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -299,14 +299,12 @@ static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
}
static DEVICE_ATTR_RO(temp1_input);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
- show_max, set_max, 0);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
- show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, max, 1);
/* voltage */
-static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -314,8 +312,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
attr->index));
}
-static ssize_t show_in_min(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t in_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -323,8 +321,8 @@ static ssize_t show_in_min(struct device *dev,
attr->index));
}
-static ssize_t show_in_max(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t in_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -332,9 +330,9 @@ static ssize_t show_in_max(struct device *dev,
attr->index));
}
-static ssize_t set_in_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -354,9 +352,9 @@ static ssize_t set_in_min(struct device *dev,
return count;
}
-static ssize_t set_in_max(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -376,24 +374,28 @@ static ssize_t set_in_max(struct device *dev,
return count;
}
-#define vin(nr) \
-static SENSOR_DEVICE_ATTR(in##nr##_input, S_IRUGO, \
- show_in, NULL, nr); \
-static SENSOR_DEVICE_ATTR(in##nr##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, nr); \
-static SENSOR_DEVICE_ATTR(in##nr##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, nr);
-
-vin(0);
-vin(1);
-vin(2);
-vin(3);
-vin(4);
-vin(5);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
/* fans */
-static ssize_t show_fan(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -401,8 +403,8 @@ static ssize_t show_fan(struct device *dev,
1 << data->fan_div[attr->index]));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -410,8 +412,8 @@ static ssize_t show_fan_min(struct device *dev,
1 << data->fan_div[attr->index]));
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t fan_div_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -429,9 +431,9 @@ static ssize_t show_fan_div(struct device *dev,
* - otherwise: select fan clock divider to suit fan speed low limit,
* measurement code may adjust registers to ensure fan speed reading
*/
-static ssize_t set_fan_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -489,16 +491,12 @@ static ssize_t set_fan_min(struct device *dev,
return count;
}
-#define fan(nr) \
-static SENSOR_DEVICE_ATTR(fan##nr##_input, S_IRUGO, \
- show_fan, NULL, nr - 1); \
-static SENSOR_DEVICE_ATTR(fan##nr##_div, S_IRUGO, \
- show_fan_div, NULL, nr - 1); \
-static SENSOR_DEVICE_ATTR(fan##nr##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, nr - 1);
-
-fan(1);
-fan(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan2_div, fan_div, 1);
/* alarms */
static ssize_t alarms_show(struct device *dev,
@@ -509,22 +507,22 @@ static ssize_t alarms_show(struct device *dev,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
/* vid */
static ssize_t cpu0_vid_show(struct device *dev,
@@ -564,9 +562,8 @@ static ssize_t aout_output_store(struct device *dev,
}
static DEVICE_ATTR_RW(aout_output);
-static ssize_t chassis_clear(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t alarm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm9240_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -583,8 +580,7 @@ static ssize_t chassis_clear(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_alarm,
- chassis_clear, 12);
+static SENSOR_DEVICE_ATTR_RW(intrusion0_alarm, alarm, 12);
static struct attribute *adm9240_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
@@ -632,7 +628,6 @@ static struct attribute *adm9240_attrs[] = {
ATTRIBUTE_GROUPS(adm9240);
-
/*** sensor chip detect and driver install ***/
/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index c21b0529adb2..412ab7015d75 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -307,7 +307,7 @@ static const struct i2c_device_id ads1015_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
-static const struct of_device_id ads1015_of_match[] = {
+static const struct of_device_id __maybe_unused ads1015_of_match[] = {
{
.compatible = "ti,ads1015",
.data = (void *)ads1015
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 12c56d3783ed..03d6e782777a 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -8,7 +8,7 @@
*
* ADS7830 support, by Guillaume Roguez <guillaume.roguez@savoirfairelinux.com>
*
- * For further information, see the Documentation/hwmon/ads7828 file.
+ * For further information, see the Documentation/hwmon/ads7828.rst file.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -200,7 +200,7 @@ static const struct i2c_device_id ads7828_device_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, ads7828_device_ids);
-static const struct of_device_id ads7828_of_match[] = {
+static const struct of_device_id __maybe_unused ads7828_of_match[] = {
{
.compatible = "ti,ads7828",
.data = (void *)ads7828
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index b939f8a115ba..44a827b031cb 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -639,40 +639,22 @@ static int adt7411_init_device(struct adt7411_data *data)
return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val);
}
-static const u32 adt7411_in_config[] = {
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info adt7411_in = {
- .type = hwmon_in,
- .config = adt7411_in_config,
-};
-
-static const u32 adt7411_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
- HWMON_T_MAX | HWMON_T_MAX_ALARM,
- HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
- HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info adt7411_temp = {
- .type = hwmon_temp,
- .config = adt7411_temp_config,
-};
-
static const struct hwmon_channel_info *adt7411_info[] = {
- &adt7411_in,
- &adt7411_temp,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_FAULT),
NULL
};
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 0dbb8df74e44..7caec127df86 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -168,7 +168,7 @@ static const struct i2c_device_id adt7475_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adt7475_id);
-static const struct of_device_id adt7475_of_match[] = {
+static const struct of_device_id __maybe_unused adt7475_of_match[] = {
{
.compatible = "adi,adt7473",
.data = (void *)adt7473
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index c4dd6301e7c8..0daf0b32aa4a 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -830,10 +830,8 @@ static int aspeed_create_pwm_cooling(struct device *dev,
}
snprintf(cdev->name, MAX_CDEV_NAME_LEN, "%pOFn%d", child, pwm_port);
- cdev->tcdev = thermal_of_cooling_device_register(child,
- cdev->name,
- cdev,
- &aspeed_pwm_cool_ops);
+ cdev->tcdev = devm_thermal_of_cooling_device_register(dev, child,
+ cdev->name, cdev, &aspeed_pwm_cool_ops);
if (IS_ERR(cdev->tcdev))
return PTR_ERR(cdev->tcdev);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 73c681162653..623736d2a7c1 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -96,17 +96,23 @@ superio_select(int base, int ld)
outb(ld, base + 1);
}
-static inline void
+static inline int
superio_enter(int base)
{
+ if (!request_muxed_region(base, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, base);
outb(0x87, base);
+
+ return 0;
}
static inline void
superio_exit(int base)
{
outb(0xaa, base);
+ release_region(base, 2);
}
/*
@@ -1561,7 +1567,7 @@ exit:
static int __init f71805f_find(int sioaddr, unsigned short *address,
struct f71805f_sio_data *sio_data)
{
- int err = -ENODEV;
+ int err;
u16 devid;
static const char * const names[] = {
@@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address,
"F71872F/FG or F71806F/FG",
};
- superio_enter(sioaddr);
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
+ err = -ENODEV;
devid = superio_inw(sioaddr, SIO_REG_MANID);
if (devid != SIO_FINTEK_ID)
goto exit;
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 042a166e1858..8fb54079fac8 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -837,7 +837,7 @@ static int watchdog_open(struct inode *inode, struct file *filp)
watchdog_trigger(data);
filp->private_data = data;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int watchdog_release(struct inode *inode, struct file *filp)
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index f1bf67aca9e8..3f6e5b4e3997 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -498,6 +498,11 @@ static const struct of_device_id of_gpio_fan_match[] = {
};
MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
+static void gpio_fan_stop(void *data)
+{
+ set_fan_speed(data, 0);
+}
+
static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
@@ -532,6 +537,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
err = fan_ctrl_init(fan_data);
if (err)
return err;
+ devm_add_action_or_reset(dev, gpio_fan_stop, fan_data);
}
/* Make this driver part of hwmon class. */
@@ -543,32 +549,20 @@ static int gpio_fan_probe(struct platform_device *pdev)
return PTR_ERR(fan_data->hwmon_dev);
/* Optional cooling device register for Device tree platforms */
- fan_data->cdev = thermal_of_cooling_device_register(np,
- "gpio-fan",
- fan_data,
- &gpio_fan_cool_ops);
+ fan_data->cdev = devm_thermal_of_cooling_device_register(dev, np,
+ "gpio-fan", fan_data, &gpio_fan_cool_ops);
dev_info(dev, "GPIO fan initialized\n");
return 0;
}
-static int gpio_fan_remove(struct platform_device *pdev)
+static void gpio_fan_shutdown(struct platform_device *pdev)
{
struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
- if (!IS_ERR(fan_data->cdev))
- thermal_cooling_device_unregister(fan_data->cdev);
-
if (fan_data->gpios)
set_fan_speed(fan_data, 0);
-
- return 0;
-}
-
-static void gpio_fan_shutdown(struct platform_device *pdev)
-{
- gpio_fan_remove(pdev);
}
#ifdef CONFIG_PM_SLEEP
@@ -602,7 +596,6 @@ static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
- .remove = gpio_fan_remove,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index d167fcfec765..2bf9599b34a1 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -252,7 +252,7 @@ static const struct i2c_device_id hih6130_id[] = {
};
MODULE_DEVICE_TABLE(i2c, hih6130_id);
-static const struct of_device_id hih6130_of_match[] = {
+static const struct of_device_id __maybe_unused hih6130_of_match[] = {
{ .compatible = "honeywell,hih6130", },
{ }
};
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c22dc1e07911..cd91510a5387 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -324,6 +324,11 @@ static const char * const hwmon_chip_attrs[] = {
[hwmon_chip_power_reset_history] = "power_reset_history",
[hwmon_chip_update_interval] = "update_interval",
[hwmon_chip_alarms] = "alarms",
+ [hwmon_chip_samples] = "samples",
+ [hwmon_chip_curr_samples] = "curr_samples",
+ [hwmon_chip_in_samples] = "in_samples",
+ [hwmon_chip_power_samples] = "power_samples",
+ [hwmon_chip_temp_samples] = "temp_samples",
};
static const char * const hwmon_temp_attr_templates[] = {
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 5c3c08449de7..1770423f7a80 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -92,6 +92,9 @@ static int iio_hwmon_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < st->num_channels; i++) {
+ const char *prefix;
+ int n;
+
a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
if (a == NULL)
return -ENOMEM;
@@ -103,28 +106,28 @@ static int iio_hwmon_probe(struct platform_device *pdev)
switch (type) {
case IIO_VOLTAGE:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "in%d_input",
- in_i++);
+ n = in_i++;
+ prefix = "in";
break;
case IIO_TEMP:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "temp%d_input",
- temp_i++);
+ n = temp_i++;
+ prefix = "temp";
break;
case IIO_CURRENT:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "curr%d_input",
- curr_i++);
+ n = curr_i++;
+ prefix = "curr";
break;
case IIO_HUMIDITYRELATIVE:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "humidity%d_input",
- humidity_i++);
+ n = humidity_i++;
+ prefix = "humidity";
break;
default:
return -EINVAL;
}
+
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s%d_input",
+ prefix, n);
if (a->dev_attr.attr.name == NULL)
return -ENOMEM;
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index e3854463db84..6a4ec2f2ddb2 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -587,7 +587,7 @@ static const struct i2c_device_id ina209_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ina209_id);
-static const struct of_device_id ina209_of_match[] = {
+static const struct of_device_id __maybe_unused ina209_of_match[] = {
{ .compatible = "ti,ina209" },
{ },
};
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 290379c49be9..42df51f8cdf2 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -507,7 +507,7 @@ static const struct i2c_device_id ina2xx_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
-static const struct of_device_id ina2xx_of_match[] = {
+static const struct of_device_id __maybe_unused ina2xx_of_match[] = {
{
.compatible = "ti,ina219",
.data = (void *)ina219
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 3626b87a5fd2..e0637fed9585 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/util_macros.h>
#define INA3221_DRIVER_NAME "ina3221"
@@ -51,6 +52,9 @@
#define INA3221_CONFIG_VBUS_CT_SHIFT 6
#define INA3221_CONFIG_VBUS_CT_MASK GENMASK(8, 6)
#define INA3221_CONFIG_VBUS_CT(x) (((x) & GENMASK(8, 6)) >> 6)
+#define INA3221_CONFIG_AVG_SHIFT 9
+#define INA3221_CONFIG_AVG_MASK GENMASK(11, 9)
+#define INA3221_CONFIG_AVG(x) (((x) & GENMASK(11, 9)) >> 9)
#define INA3221_CONFIG_CHs_EN_MASK GENMASK(14, 12)
#define INA3221_CONFIG_CHx_EN(x) BIT(14 - (x))
@@ -135,17 +139,42 @@ static const u16 ina3221_conv_time[] = {
140, 204, 332, 588, 1100, 2116, 4156, 8244,
};
-static inline int ina3221_wait_for_data(struct ina3221_data *ina)
+/* Lookup table for number of samples using in averaging mode */
+static const int ina3221_avg_samples[] = {
+ 1, 4, 16, 64, 128, 256, 512, 1024,
+};
+
+/* Converting update_interval in msec to conversion time in usec */
+static inline u32 ina3221_interval_ms_to_conv_time(u16 config, int interval)
+{
+ u32 channels = hweight16(config & INA3221_CONFIG_CHs_EN_MASK);
+ u32 samples_idx = INA3221_CONFIG_AVG(config);
+ u32 samples = ina3221_avg_samples[samples_idx];
+
+ /* Bisect the result to Bus and Shunt conversion times */
+ return DIV_ROUND_CLOSEST(interval * 1000 / 2, channels * samples);
+}
+
+/* Converting CONFIG register value to update_interval in usec */
+static inline u32 ina3221_reg_to_interval_us(u16 config)
{
- u32 channels = hweight16(ina->reg_config & INA3221_CONFIG_CHs_EN_MASK);
- u32 vbus_ct_idx = INA3221_CONFIG_VBUS_CT(ina->reg_config);
- u32 vsh_ct_idx = INA3221_CONFIG_VSH_CT(ina->reg_config);
+ u32 channels = hweight16(config & INA3221_CONFIG_CHs_EN_MASK);
+ u32 vbus_ct_idx = INA3221_CONFIG_VBUS_CT(config);
+ u32 vsh_ct_idx = INA3221_CONFIG_VSH_CT(config);
+ u32 samples_idx = INA3221_CONFIG_AVG(config);
+ u32 samples = ina3221_avg_samples[samples_idx];
u32 vbus_ct = ina3221_conv_time[vbus_ct_idx];
u32 vsh_ct = ina3221_conv_time[vsh_ct_idx];
- u32 wait, cvrf;
/* Calculate total conversion time */
- wait = channels * (vbus_ct + vsh_ct);
+ return channels * (vbus_ct + vsh_ct) * samples;
+}
+
+static inline int ina3221_wait_for_data(struct ina3221_data *ina)
+{
+ u32 wait, cvrf;
+
+ wait = ina3221_reg_to_interval_us(ina->reg_config);
/* Polling the CVRF bit to make sure read data is ready */
return regmap_field_read_poll_timeout(ina->fields[F_CVRF],
@@ -176,6 +205,26 @@ static const u8 ina3221_in_reg[] = {
INA3221_SHUNT3,
};
+static int ina3221_read_chip(struct device *dev, u32 attr, long *val)
+{
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ int regval;
+
+ switch (attr) {
+ case hwmon_chip_samples:
+ regval = INA3221_CONFIG_AVG(ina->reg_config);
+ *val = ina3221_avg_samples[regval];
+ return 0;
+ case hwmon_chip_update_interval:
+ /* Return in msec */
+ *val = ina3221_reg_to_interval_us(ina->reg_config);
+ *val = DIV_ROUND_CLOSEST(*val, 1000);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
{
const bool is_shunt = channel > INA3221_CHANNEL3;
@@ -279,6 +328,48 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
}
}
+static int ina3221_write_chip(struct device *dev, u32 attr, long val)
+{
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ int ret, idx;
+ u32 tmp;
+
+ switch (attr) {
+ case hwmon_chip_samples:
+ idx = find_closest(val, ina3221_avg_samples,
+ ARRAY_SIZE(ina3221_avg_samples));
+
+ tmp = (ina->reg_config & ~INA3221_CONFIG_AVG_MASK) |
+ (idx << INA3221_CONFIG_AVG_SHIFT);
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, tmp);
+ if (ret)
+ return ret;
+
+ /* Update reg_config accordingly */
+ ina->reg_config = tmp;
+ return 0;
+ case hwmon_chip_update_interval:
+ tmp = ina3221_interval_ms_to_conv_time(ina->reg_config, val);
+ idx = find_closest(tmp, ina3221_conv_time,
+ ARRAY_SIZE(ina3221_conv_time));
+
+ /* Update Bus and Shunt voltage conversion times */
+ tmp = INA3221_CONFIG_VBUS_CT_MASK | INA3221_CONFIG_VSH_CT_MASK;
+ tmp = (ina->reg_config & ~tmp) |
+ (idx << INA3221_CONFIG_VBUS_CT_SHIFT) |
+ (idx << INA3221_CONFIG_VSH_CT_SHIFT);
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, tmp);
+ if (ret)
+ return ret;
+
+ /* Update reg_config accordingly */
+ ina->reg_config = tmp;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ina3221_write_curr(struct device *dev, u32 attr,
int channel, long val)
{
@@ -309,6 +400,7 @@ static int ina3221_write_enable(struct device *dev, int channel, bool enable)
struct ina3221_data *ina = dev_get_drvdata(dev);
u16 config, mask = INA3221_CONFIG_CHx_EN(channel);
u16 config_old = ina->reg_config & mask;
+ u32 tmp;
int ret;
config = enable ? mask : 0;
@@ -327,14 +419,13 @@ static int ina3221_write_enable(struct device *dev, int channel, bool enable)
}
/* Enable or disable the channel */
- ret = regmap_update_bits(ina->regmap, INA3221_CONFIG, mask, config);
+ tmp = (ina->reg_config & ~mask) | (config & mask);
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, tmp);
if (ret)
goto fail;
/* Cache the latest config register value */
- ret = regmap_read(ina->regmap, INA3221_CONFIG, &ina->reg_config);
- if (ret)
- goto fail;
+ ina->reg_config = tmp;
/* For disabling routine, decrease refcount or suspend() at last */
if (!enable)
@@ -361,6 +452,9 @@ static int ina3221_read(struct device *dev, enum hwmon_sensor_types type,
mutex_lock(&ina->lock);
switch (type) {
+ case hwmon_chip:
+ ret = ina3221_read_chip(dev, attr, val);
+ break;
case hwmon_in:
/* 0-align channel ID */
ret = ina3221_read_in(dev, attr, channel - 1, val);
@@ -387,6 +481,9 @@ static int ina3221_write(struct device *dev, enum hwmon_sensor_types type,
mutex_lock(&ina->lock);
switch (type) {
+ case hwmon_chip:
+ ret = ina3221_write_chip(dev, attr, val);
+ break;
case hwmon_in:
/* 0-align channel ID */
ret = ina3221_write_enable(dev, channel - 1, val);
@@ -423,6 +520,14 @@ static umode_t ina3221_is_visible(const void *drvdata,
const struct ina3221_input *input = NULL;
switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_samples:
+ case hwmon_chip_update_interval:
+ return 0644;
+ default:
+ return 0;
+ }
case hwmon_in:
/* Ignore in0_ */
if (channel == 0)
@@ -458,44 +563,29 @@ static umode_t ina3221_is_visible(const void *drvdata,
}
}
-static const u32 ina3221_in_config[] = {
- /* 0: dummy, skipped in is_visible */
- HWMON_I_INPUT,
- /* 1-3: input voltage Channels */
- HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
- HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
- HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
- /* 4-6: shunt voltage Channels */
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ina3221_in = {
- .type = hwmon_in,
- .config = ina3221_in_config,
-};
-
#define INA3221_HWMON_CURR_CONFIG (HWMON_C_INPUT | \
HWMON_C_CRIT | HWMON_C_CRIT_ALARM | \
HWMON_C_MAX | HWMON_C_MAX_ALARM)
-static const u32 ina3221_curr_config[] = {
- INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG,
- 0
-};
-
-static const struct hwmon_channel_info ina3221_curr = {
- .type = hwmon_curr,
- .config = ina3221_curr_config,
-};
-
static const struct hwmon_channel_info *ina3221_info[] = {
- &ina3221_in,
- &ina3221_curr,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_SAMPLES,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(in,
+ /* 0: dummy, skipped in is_visible */
+ HWMON_I_INPUT,
+ /* 1-3: input voltage Channels */
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ /* 4-6: shunt voltage Channels */
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(curr,
+ INA3221_HWMON_CURR_CONFIG,
+ INA3221_HWMON_CURR_CONFIG,
+ INA3221_HWMON_CURR_CONFIG),
NULL
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 4fa482ae0eb5..6b57a6d4e626 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -451,20 +451,12 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
-static const u32 jc42_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | HWMON_T_CRIT |
- HWMON_T_MAX_HYST | HWMON_T_CRIT_HYST |
- HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info jc42_temp = {
- .type = hwmon_temp,
- .config = jc42_temp_config,
-};
-
static const struct hwmon_channel_info *jc42_info[] = {
- &jc42_temp,
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM),
NULL
};
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index 2d40a2e771d7..7d5947595b45 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -94,7 +94,6 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct jz4740_hwmon *hwmon;
struct device *hwmon_dev;
- struct resource *mem;
hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
@@ -109,8 +108,7 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
return hwmon->irq;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hwmon->base = devm_ioremap_resource(&pdev->dev, mem);
+ hwmon->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hwmon->base))
return PTR_ERR(hwmon->base);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index eac54b9cdeec..8848fbe5fd16 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1153,7 +1153,7 @@ static const struct i2c_device_id lm63_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
-static const struct of_device_id lm63_of_match[] = {
+static const struct of_device_id __maybe_unused lm63_of_match[] = {
{
.compatible = "national,lm63",
.data = (void *)lm63
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 447af07450f1..423a382420b9 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -59,6 +59,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
tmp175,
tmp275,
tmp75,
+ tmp75b,
tmp75c,
};
@@ -194,35 +195,11 @@ static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
-/*-----------------------------------------------------------------------*/
-
-/* device probe and removal */
-
-/* chip configuration */
-
-static const u32 lm75_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm75_chip = {
- .type = hwmon_chip,
- .config = lm75_chip_config,
-};
-
-static const u32 lm75_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
- 0
-};
-
-static const struct hwmon_channel_info lm75_temp = {
- .type = hwmon_temp,
- .config = lm75_temp_config,
-};
-
static const struct hwmon_channel_info *lm75_info[] = {
- &lm75_chip,
- &lm75_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
NULL
};
@@ -378,6 +355,11 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->resolution = 12;
data->sample_time = MSEC_PER_SEC / 2;
break;
+ case tmp75b: /* not one-shot mode, Conversion rate 37Hz */
+ clr_mask |= 1 << 15 | 0x3 << 13;
+ data->resolution = 12;
+ data->sample_time = MSEC_PER_SEC / 37;
+ break;
case tmp75c:
clr_mask |= 1 << 5; /* not one-shot mode */
data->resolution = 12;
@@ -438,12 +420,13 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tmp175", tmp175, },
{ "tmp275", tmp275, },
{ "tmp75", tmp75, },
+ { "tmp75b", tmp75b, },
{ "tmp75c", tmp75c, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm75_ids);
-static const struct of_device_id lm75_of_match[] = {
+static const struct of_device_id __maybe_unused lm75_of_match[] = {
{
.compatible = "adi,adt75",
.data = (void *)adt75
@@ -537,6 +520,10 @@ static const struct of_device_id lm75_of_match[] = {
.data = (void *)tmp75
},
{
+ .compatible = "ti,tmp75b",
+ .data = (void *)tmp75b
+ },
+ {
.compatible = "ti,tmp75c",
.data = (void *)tmp75c
},
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 0cb7ff613b80..eb95947673de 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -73,7 +73,6 @@ enum chips { lm78, lm79 };
#define LM78_REG_CHIPID 0x49
#define LM78_REG_I2C_ADDR 0x48
-
/*
* Conversions. Rounding and limit checking is only done on the TO_REG
* variants.
@@ -147,15 +146,13 @@ struct lm78_data {
u16 alarms; /* Register encoding, combined */
};
-
static int lm78_read_value(struct lm78_data *data, u8 reg);
static int lm78_write_value(struct lm78_data *data, u8 reg, u8 value);
static struct lm78_data *lm78_update_device(struct device *dev);
static void lm78_init_device(struct lm78_data *data);
-
/* 7 Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *da,
+static ssize_t in_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -163,7 +160,7 @@ static ssize_t show_in(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[attr->index]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+static ssize_t in_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -171,7 +168,7 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[attr->index]));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+static ssize_t in_max_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -179,8 +176,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[attr->index]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -199,8 +196,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -219,21 +216,27 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0);
-show_in_offset(1);
-show_in_offset(2);
-show_in_offset(3);
-show_in_offset(4);
-show_in_offset(5);
-show_in_offset(6);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
/* Temperature */
static ssize_t temp1_input_show(struct device *dev,
@@ -300,7 +303,7 @@ static DEVICE_ATTR_RW(temp1_max);
static DEVICE_ATTR_RW(temp1_max_hyst);
/* 3 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -310,7 +313,7 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -320,8 +323,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -340,7 +343,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -354,8 +357,8 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -413,22 +416,17 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
-show_fan_offset(3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
/* Fan 3 divisor is locked in H/W */
-static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
- show_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
- show_fan_div, set_fan_div, 1);
-static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_div, fan_div, 2);
/* VID */
static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *da,
@@ -448,24 +446,24 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *da,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct lm78_data *data = lm78_update_device(dev);
int nr = to_sensor_dev_attr(da)->index;
return sprintf(buf, "%u\n", (data->alarms >> nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
static struct attribute *lm78_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index a95d48316f06..80db367b4c54 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -165,7 +165,6 @@ static inline u16 FAN_TO_REG(unsigned long val)
#define PWM_TO_REG(val) clamp_val(val, 0, 255)
#define PWM_FROM_REG(val) (val)
-
/*
* ZONEs have the following parameters:
* Limit (low) temp, 1. degC
@@ -563,24 +562,25 @@ static struct lm85_data *lm85_update_device(struct device *dev)
}
/* 4 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr]));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -599,16 +599,14 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1)
-
-show_fan_offset(1);
-show_fan_offset(2);
-show_fan_offset(3);
-show_fan_offset(4);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
+static SENSOR_DEVICE_ATTR_RW(fan4_min, fan_min, 3);
/* vid, vrm, alarms */
@@ -667,44 +665,44 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 18);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 16);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 17);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 18);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 16);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 17);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(fan4_alarm, alarm, 13);
/* pwm */
-static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[nr]));
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -723,8 +721,8 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
- *attr, char *buf)
+static ssize_t pwm_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -745,8 +743,9 @@ static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", enable);
}
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t pwm_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -788,8 +787,8 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
return count;
}
-static ssize_t show_pwm_freq(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -804,8 +803,9 @@ static ssize_t show_pwm_freq(struct device *dev,
return sprintf(buf, "%d\n", freq);
}
-static ssize_t set_pwm_freq(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_freq_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -841,22 +841,20 @@ static ssize_t set_pwm_freq(struct device *dev,
return count;
}
-#define show_pwm_reg(offset) \
-static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
- show_pwm, set_pwm, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
- show_pwm_enable, set_pwm_enable, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_freq, S_IRUGO | S_IWUSR, \
- show_pwm_freq, set_pwm_freq, offset - 1)
-
-show_pwm_reg(1);
-show_pwm_reg(2);
-show_pwm_reg(3);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
/* Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -864,16 +862,16 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
data->in_ext[nr]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_min[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -892,16 +890,16 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_max[nr]));
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -920,27 +918,35 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_in_reg(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset)
-
-show_in_reg(0);
-show_in_reg(1);
-show_in_reg(2);
-show_in_reg(3);
-show_in_reg(4);
-show_in_reg(5);
-show_in_reg(6);
-show_in_reg(7);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
/* Temps */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -948,16 +954,17 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
data->temp_ext[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -979,16 +986,17 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1010,31 +1018,30 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1);
-
-show_temp_reg(1);
-show_temp_reg(2);
-show_temp_reg(3);
-
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
/* Automatic PWM control */
-static ssize_t show_pwm_auto_channels(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_channels_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", ZONE_FROM_REG(data->autofan[nr].config));
}
-static ssize_t set_pwm_auto_channels(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_auto_channels_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1055,16 +1062,17 @@ static ssize_t set_pwm_auto_channels(struct device *dev,
return count;
}
-static ssize_t show_pwm_auto_pwm_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_pwm_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->autofan[nr].min_pwm));
}
-static ssize_t set_pwm_auto_pwm_min(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_auto_pwm_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1084,16 +1092,18 @@ static ssize_t set_pwm_auto_pwm_min(struct device *dev,
return count;
}
-static ssize_t show_pwm_auto_pwm_minctl(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_pwm_minctl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", data->autofan[nr].min_off);
}
-static ssize_t set_pwm_auto_pwm_minctl(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_auto_pwm_minctl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1117,25 +1127,21 @@ static ssize_t set_pwm_auto_pwm_minctl(struct device *dev,
return count;
}
-#define pwm_auto(offset) \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_channels, \
- S_IRUGO | S_IWUSR, show_pwm_auto_channels, \
- set_pwm_auto_channels, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_pwm_min, \
- S_IRUGO | S_IWUSR, show_pwm_auto_pwm_min, \
- set_pwm_auto_pwm_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_pwm_minctl, \
- S_IRUGO | S_IWUSR, show_pwm_auto_pwm_minctl, \
- set_pwm_auto_pwm_minctl, offset - 1)
-
-pwm_auto(1);
-pwm_auto(2);
-pwm_auto(3);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_channels, pwm_auto_channels, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_pwm_min, pwm_auto_pwm_min, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_pwm_minctl, pwm_auto_pwm_minctl, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_channels, pwm_auto_channels, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_pwm_min, pwm_auto_pwm_min, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_pwm_minctl, pwm_auto_pwm_minctl, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_auto_channels, pwm_auto_channels, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_auto_pwm_min, pwm_auto_pwm_min, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_auto_pwm_minctl, pwm_auto_pwm_minctl, 2);
/* Temperature settings for automatic PWM control */
-static ssize_t show_temp_auto_temp_off(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_off_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -1143,8 +1149,9 @@ static ssize_t show_temp_auto_temp_off(struct device *dev,
HYST_FROM_REG(data->zone[nr].hyst));
}
-static ssize_t set_temp_auto_temp_off(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1172,16 +1179,18 @@ static ssize_t set_temp_auto_temp_off(struct device *dev,
return count;
}
-static ssize_t show_temp_auto_temp_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit));
}
-static ssize_t set_temp_auto_temp_min(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1210,8 +1219,9 @@ static ssize_t set_temp_auto_temp_min(struct device *dev,
return count;
}
-static ssize_t show_temp_auto_temp_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -1219,8 +1229,9 @@ static ssize_t show_temp_auto_temp_max(struct device *dev,
RANGE_FROM_REG(data->zone[nr].range));
}
-static ssize_t set_temp_auto_temp_max(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_max_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1245,16 +1256,18 @@ static ssize_t set_temp_auto_temp_max(struct device *dev,
return count;
}
-static ssize_t show_temp_auto_temp_crit(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_crit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].critical));
}
-static ssize_t set_temp_auto_temp_crit(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_crit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1274,23 +1287,18 @@ static ssize_t set_temp_auto_temp_crit(struct device *dev,
return count;
}
-#define temp_auto(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_off, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_off, \
- set_temp_auto_temp_off, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_min, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_min, \
- set_temp_auto_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_max, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_max, \
- set_temp_auto_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_crit, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_crit, \
- set_temp_auto_temp_crit, offset - 1);
-
-temp_auto(1);
-temp_auto(2);
-temp_auto(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_off, temp_auto_temp_off, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_min, temp_auto_temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_max, temp_auto_temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_crit, temp_auto_temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_off, temp_auto_temp_off, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_min, temp_auto_temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_max, temp_auto_temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_crit, temp_auto_temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_off, temp_auto_temp_off, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_min, temp_auto_temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_max, temp_auto_temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_crit, temp_auto_temp_crit, 2);
static struct attribute *lm85_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -1642,7 +1650,7 @@ static const struct i2c_device_id lm85_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
-static const struct of_device_id lm85_of_match[] = {
+static const struct of_device_id __maybe_unused lm85_of_match[] = {
{
.compatible = "adi,adm1027",
.data = (void *)adm1027
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index b48d30760388..644e61eae574 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -276,8 +276,8 @@ static struct lm87_data *lm87_update_device(struct device *dev)
* Sysfs stuff
*/
-static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -286,8 +286,8 @@ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
data->in_scale[nr]));
}
-static ssize_t show_in_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -296,8 +296,8 @@ static ssize_t show_in_min(struct device *dev,
data->in_scale[nr]));
}
-static ssize_t show_in_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -306,8 +306,8 @@ static ssize_t show_in_max(struct device *dev,
data->in_scale[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -327,8 +327,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -348,23 +348,32 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_in(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in_input, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset)
-set_in(0);
-set_in(1);
-set_in(2);
-set_in(3);
-set_in(4);
-set_in(5);
-set_in(6);
-set_in(7);
-
-static ssize_t show_temp_input(struct device *dev,
+static SENSOR_DEVICE_ATTR_RO(in0_input, in_input, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+
+static ssize_t temp_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -373,7 +382,7 @@ static ssize_t show_temp_input(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_low(struct device *dev,
+static ssize_t temp_low_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -383,7 +392,7 @@ static ssize_t show_temp_low(struct device *dev,
TEMP_FROM_REG(data->temp_low[nr]));
}
-static ssize_t show_temp_high(struct device *dev,
+static ssize_t temp_high_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -393,8 +402,9 @@ static ssize_t show_temp_high(struct device *dev,
TEMP_FROM_REG(data->temp_high[nr]));
}
-static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_low_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -413,8 +423,9 @@ static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_high_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -433,16 +444,15 @@ static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_temp(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp_input, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_high, set_temp_high, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_low, set_temp_low, offset - 1)
-set_temp(1);
-set_temp(2);
-set_temp(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_low, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_high, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_low, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_high, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp_input, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_low, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_high, 2);
static ssize_t temp1_crit_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -460,9 +470,9 @@ static ssize_t temp2_crit_show(struct device *dev,
static DEVICE_ATTR_RO(temp1_crit);
static DEVICE_ATTR_RO(temp2_crit);
-static DEVICE_ATTR(temp3_crit, S_IRUGO, temp2_crit_show, NULL);
+static DEVICE_ATTR(temp3_crit, 0444, temp2_crit_show, NULL);
-static ssize_t show_fan_input(struct device *dev,
+static ssize_t fan_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -472,8 +482,8 @@ static ssize_t show_fan_input(struct device *dev,
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -482,8 +492,8 @@ static ssize_t show_fan_min(struct device *dev,
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -492,8 +502,9 @@ static ssize_t show_fan_div(struct device *dev,
FAN_DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -519,8 +530,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
* of least surprise; the user doesn't expect the fan minimum to change just
* because the divider changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -575,15 +587,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_fan(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan_input, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1)
-set_fan(1);
-set_fan(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -653,28 +662,28 @@ static ssize_t aout_output_store(struct device *dev,
}
static DEVICE_ATTR_RW(aout_output);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 15);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 15);
/*
* Real code
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 480d70a51778..b99eda01696e 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -236,7 +236,7 @@ static const struct i2c_device_id lm90_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
-static const struct of_device_id lm90_of_match[] = {
+static const struct of_device_id __maybe_unused lm90_of_match[] = {
{
.compatible = "adi,adm1032",
.data = (void *)adm1032
@@ -1720,16 +1720,6 @@ static void lm90_regulator_disable(void *regulator)
regulator_disable(regulator);
}
-static const u32 lm90_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL | HWMON_C_ALARMS,
- 0
-};
-
-static const struct hwmon_channel_info lm90_chip_info = {
- .type = hwmon_chip,
- .config = lm90_chip_config,
-};
-
static const struct hwmon_ops lm90_ops = {
.is_visible = lm90_is_visible,
@@ -1792,7 +1782,8 @@ static int lm90_probe(struct i2c_client *client,
data->chip.ops = &lm90_ops;
data->chip.info = data->info;
- data->info[0] = &lm90_chip_info;
+ data->info[0] = HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL | HWMON_C_ALARMS);
data->info[1] = &data->temp_info;
info = &data->temp_info;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 3ff188937158..6c5215e6d448 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -418,33 +418,15 @@ static void lm95241_init_client(struct i2c_client *client,
data->model);
}
-static const u32 lm95241_chip_config[] = {
- HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm95241_chip = {
- .type = hwmon_chip,
- .config = lm95241_chip_config,
-};
-
-static const u32 lm95241_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_TYPE |
- HWMON_T_FAULT,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_TYPE |
- HWMON_T_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info lm95241_temp = {
- .type = hwmon_temp,
- .config = lm95241_temp_config,
-};
-
static const struct hwmon_channel_info *lm95241_info[] = {
- &lm95241_chip,
- &lm95241_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_TYPE | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_TYPE | HWMON_T_FAULT),
NULL
};
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index e4cac3a04536..8d08ca8bbdf8 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -92,19 +92,6 @@ static const unsigned short normal_i2c[] = {
#define LM95235_REVISION 0xB1
#define LM95245_REVISION 0xB3
-static const u8 lm95245_reg_address[] = {
- LM95245_REG_R_LOCAL_TEMPH_S,
- LM95245_REG_R_LOCAL_TEMPL_S,
- LM95245_REG_R_REMOTE_TEMPH_S,
- LM95245_REG_R_REMOTE_TEMPL_S,
- LM95245_REG_R_REMOTE_TEMPH_U,
- LM95245_REG_R_REMOTE_TEMPL_U,
- LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT,
- LM95245_REG_RW_REMOTE_TCRIT_LIMIT,
- LM95245_REG_RW_COMMON_HYSTERESIS,
- LM95245_REG_R_STATUS1,
-};
-
/* Client data (each client gets its own) */
struct lm95245_data {
struct regmap *regmap;
@@ -545,32 +532,16 @@ static const struct regmap_config lm95245_regmap_config = {
.use_single_write = true,
};
-static const u32 lm95245_chip_config[] = {
- HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm95245_chip = {
- .type = hwmon_chip,
- .config = lm95245_chip_config,
-};
-
-static const u32 lm95245_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_CRIT_ALARM,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_CRIT |
- HWMON_T_CRIT_HYST | HWMON_T_FAULT | HWMON_T_MAX_ALARM |
- HWMON_T_CRIT_ALARM | HWMON_T_TYPE | HWMON_T_OFFSET,
- 0
-};
-
-static const struct hwmon_channel_info lm95245_temp = {
- .type = hwmon_temp,
- .config = lm95245_temp_config,
-};
-
static const struct hwmon_channel_info *lm95245_info[] = {
- &lm95245_chip,
- &lm95245_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_CRIT_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_FAULT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_TYPE | HWMON_T_OFFSET),
NULL
};
@@ -623,7 +594,7 @@ static const struct i2c_device_id lm95245_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm95245_id);
-static const struct of_device_id lm95245_of_match[] = {
+static const struct of_device_id __maybe_unused lm95245_of_match[] = {
{ .compatible = "national,lm95235" },
{ .compatible = "national,lm95245" },
{ },
diff --git a/drivers/hwmon/lochnagar-hwmon.c b/drivers/hwmon/lochnagar-hwmon.c
new file mode 100644
index 000000000000..8b19adf2eeb0
--- /dev/null
+++ b/drivers/hwmon/lochnagar-hwmon.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lochnagar hardware monitoring features
+ *
+ * Copyright (c) 2016-2019 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Lucas Tanure <tanureal@opensource.cirrus.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/math64.h>
+#include <linux/mfd/lochnagar.h>
+#include <linux/mfd/lochnagar2_regs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define LN2_MAX_NSAMPLE 1023
+#define LN2_SAMPLE_US 1670
+
+#define LN2_CURR_UNITS 1000
+#define LN2_VOLT_UNITS 1000
+#define LN2_TEMP_UNITS 1000
+#define LN2_PWR_UNITS 1000000
+
+static const char * const lochnagar_chan_names[] = {
+ "DBVDD1",
+ "1V8 DSP",
+ "1V8 CDC",
+ "VDDCORE DSP",
+ "AVDD 1V8",
+ "SYSVDD",
+ "VDDCORE CDC",
+ "MICVDD",
+};
+
+struct lochnagar_hwmon {
+ struct regmap *regmap;
+
+ long power_nsamples[ARRAY_SIZE(lochnagar_chan_names)];
+
+ /* Lock to ensure only a single sensor is read at a time */
+ struct mutex sensor_lock;
+};
+
+enum lochnagar_measure_mode {
+ LN2_CURR = 0,
+ LN2_VOLT,
+ LN2_TEMP,
+};
+
+/**
+ * float_to_long - Convert ieee754 reading from hardware to an integer
+ *
+ * @data: Value read from the hardware
+ * @precision: Units to multiply up to eg. 1000 = milli, 1000000 = micro
+ *
+ * Return: Converted integer reading
+ *
+ * Depending on the measurement type the hardware returns an ieee754
+ * floating point value in either volts, amps or celsius. This function
+ * will convert that into an integer in a smaller unit such as micro-amps
+ * or milli-celsius. The hardware does not return NaN, so consideration of
+ * that is not required.
+ */
+static long float_to_long(u32 data, u32 precision)
+{
+ u64 man = data & 0x007FFFFF;
+ int exp = ((data & 0x7F800000) >> 23) - 127 - 23;
+ bool negative = data & 0x80000000;
+ long result;
+
+ man = (man + (1 << 23)) * precision;
+
+ if (fls64(man) + exp > (int)sizeof(long) * 8 - 1)
+ result = LONG_MAX;
+ else if (exp < 0)
+ result = (man + (1ull << (-exp - 1))) >> -exp;
+ else
+ result = man << exp;
+
+ return negative ? -result : result;
+}
+
+static int do_measurement(struct regmap *regmap, int chan,
+ enum lochnagar_measure_mode mode, int nsamples)
+{
+ unsigned int val;
+ int ret;
+
+ chan = 1 << (chan + LOCHNAGAR2_IMON_MEASURED_CHANNELS_SHIFT);
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL1,
+ LOCHNAGAR2_IMON_ENA_MASK | chan | mode);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL2, nsamples);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL3,
+ LOCHNAGAR2_IMON_CONFIGURE_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(regmap, LOCHNAGAR2_IMON_CTRL3, val,
+ val & LOCHNAGAR2_IMON_DONE_MASK,
+ 1000, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL3,
+ LOCHNAGAR2_IMON_MEASURE_MASK);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Actual measurement time is ~1.67mS per sample, approximate this
+ * with a 1.5mS per sample msleep and then poll for success up to
+ * ~0.17mS * 1023 (LN2_MAX_NSAMPLES). Normally for smaller values
+ * of nsamples the poll will complete on the first loop due to
+ * other latency in the system.
+ */
+ msleep((nsamples * 3) / 2);
+
+ ret = regmap_read_poll_timeout(regmap, LOCHNAGAR2_IMON_CTRL3, val,
+ val & LOCHNAGAR2_IMON_DONE_MASK,
+ 5000, 200000);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(regmap, LOCHNAGAR2_IMON_CTRL3, 0);
+}
+
+static int request_data(struct regmap *regmap, int chan, u32 *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL4,
+ LOCHNAGAR2_IMON_DATA_REQ_MASK |
+ chan << LOCHNAGAR2_IMON_CH_SEL_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(regmap, LOCHNAGAR2_IMON_CTRL4, val,
+ val & LOCHNAGAR2_IMON_DATA_RDY_MASK,
+ 1000, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, LOCHNAGAR2_IMON_DATA1, &val);
+ if (ret < 0)
+ return ret;
+
+ *data = val << 16;
+
+ ret = regmap_read(regmap, LOCHNAGAR2_IMON_DATA2, &val);
+ if (ret < 0)
+ return ret;
+
+ *data |= val;
+
+ return regmap_write(regmap, LOCHNAGAR2_IMON_CTRL4, 0);
+}
+
+static int read_sensor(struct device *dev, int chan,
+ enum lochnagar_measure_mode mode, int nsamples,
+ unsigned int precision, long *val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+ struct regmap *regmap = priv->regmap;
+ u32 data;
+ int ret;
+
+ mutex_lock(&priv->sensor_lock);
+
+ ret = do_measurement(regmap, chan, mode, nsamples);
+ if (ret < 0) {
+ dev_err(dev, "Failed to perform measurement: %d\n", ret);
+ goto error;
+ }
+
+ ret = request_data(regmap, chan, &data);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read measurement: %d\n", ret);
+ goto error;
+ }
+
+ *val = float_to_long(data, precision);
+
+error:
+ mutex_unlock(&priv->sensor_lock);
+
+ return ret;
+}
+
+static int read_power(struct device *dev, int chan, long *val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+ int nsamples = priv->power_nsamples[chan];
+ u64 power;
+ int ret;
+
+ if (!strcmp("SYSVDD", lochnagar_chan_names[chan])) {
+ power = 5 * LN2_PWR_UNITS;
+ } else {
+ ret = read_sensor(dev, chan, LN2_VOLT, 1, LN2_PWR_UNITS, val);
+ if (ret < 0)
+ return ret;
+
+ power = abs(*val);
+ }
+
+ ret = read_sensor(dev, chan, LN2_CURR, nsamples, LN2_PWR_UNITS, val);
+ if (ret < 0)
+ return ret;
+
+ power *= abs(*val);
+ power = DIV_ROUND_CLOSEST_ULL(power, LN2_PWR_UNITS);
+
+ if (power > LONG_MAX)
+ *val = LONG_MAX;
+ else
+ *val = power;
+
+ return 0;
+}
+
+static umode_t lochnagar_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int chan)
+{
+ switch (type) {
+ case hwmon_in:
+ if (!strcmp("SYSVDD", lochnagar_chan_names[chan]))
+ return 0;
+ break;
+ case hwmon_power:
+ if (attr == hwmon_power_average_interval)
+ return 0644;
+ break;
+ default:
+ break;
+ }
+
+ return 0444;
+}
+
+static int lochnagar_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int chan, long *val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+ int interval;
+
+ switch (type) {
+ case hwmon_in:
+ return read_sensor(dev, chan, LN2_VOLT, 1, LN2_VOLT_UNITS, val);
+ case hwmon_curr:
+ return read_sensor(dev, chan, LN2_CURR, 1, LN2_CURR_UNITS, val);
+ case hwmon_temp:
+ return read_sensor(dev, chan, LN2_TEMP, 1, LN2_TEMP_UNITS, val);
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_average:
+ return read_power(dev, chan, val);
+ case hwmon_power_average_interval:
+ interval = priv->power_nsamples[chan] * LN2_SAMPLE_US;
+ *val = DIV_ROUND_CLOSEST(interval, 1000);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lochnagar_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int chan, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ case hwmon_curr:
+ case hwmon_power:
+ *str = lochnagar_chan_names[chan];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lochnagar_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int chan, long val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+
+ if (type != hwmon_power || attr != hwmon_power_average_interval)
+ return -EOPNOTSUPP;
+
+ val = clamp_t(long, val, 1, (LN2_MAX_NSAMPLE * LN2_SAMPLE_US) / 1000);
+ val = DIV_ROUND_CLOSEST(val * 1000, LN2_SAMPLE_US);
+
+ priv->power_nsamples[chan] = val;
+
+ return 0;
+}
+
+static const struct hwmon_ops lochnagar_ops = {
+ .is_visible = lochnagar_is_visible,
+ .read = lochnagar_read,
+ .read_string = lochnagar_read_string,
+ .write = lochnagar_write,
+};
+
+static const struct hwmon_channel_info *lochnagar_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power, HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL),
+ NULL
+};
+
+static const struct hwmon_chip_info lochnagar_chip_info = {
+ .ops = &lochnagar_ops,
+ .info = lochnagar_info,
+};
+
+static const struct of_device_id lochnagar_of_match[] = {
+ { .compatible = "cirrus,lochnagar2-hwmon" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lochnagar_of_match);
+
+static int lochnagar_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *hwmon_dev;
+ struct lochnagar_hwmon *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->sensor_lock);
+
+ priv->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!priv->regmap) {
+ dev_err(dev, "No register map found\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->power_nsamples); i++)
+ priv->power_nsamples[i] = 96;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "Lochnagar", priv,
+ &lochnagar_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver lochnagar_hwmon_driver = {
+ .driver = {
+ .name = "lochnagar-hwmon",
+ .of_match_table = lochnagar_of_match,
+ },
+ .probe = lochnagar_hwmon_probe,
+};
+module_platform_driver(lochnagar_hwmon_driver);
+
+MODULE_AUTHOR("Lucas Tanure <tanureal@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Lochnagar hardware monitoring features");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index 76c6fda76d95..2b5cd37e6dc3 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -208,7 +208,7 @@ static const struct i2c_device_id ltc4151_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ltc4151_id);
-static const struct of_device_id ltc4151_match[] = {
+static const struct of_device_id __maybe_unused ltc4151_match[] = {
{ .compatible = "lltc,ltc4151" },
{},
};
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 34d0653ca607..26542635de9b 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -390,57 +390,30 @@ static umode_t ltc4245_is_visible(const void *_data,
}
}
-static const u32 ltc4245_in_config[] = {
- HWMON_I_INPUT, /* dummy, skipped in is_visible */
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ltc4245_in = {
- .type = hwmon_in,
- .config = ltc4245_in_config,
-};
-
-static const u32 ltc4245_curr_config[] = {
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info ltc4245_curr = {
- .type = hwmon_curr,
- .config = ltc4245_curr_config,
-};
-
-static const u32 ltc4245_power_config[] = {
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ltc4245_power = {
- .type = hwmon_power,
- .config = ltc4245_power_config,
-};
-
static const struct hwmon_channel_info *ltc4245_info[] = {
- &ltc4245_in,
- &ltc4245_curr,
- &ltc4245_power,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT),
NULL
};
diff --git a/drivers/hwmon/ltq-cputemp.c b/drivers/hwmon/ltq-cputemp.c
index 1d33f94594c1..570791f0e024 100644
--- a/drivers/hwmon/ltq-cputemp.c
+++ b/drivers/hwmon/ltq-cputemp.c
@@ -77,29 +77,11 @@ static umode_t ltq_is_visible(const void *_data, enum hwmon_sensor_types type,
}
}
-static const u32 ltq_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info ltq_chip = {
- .type = hwmon_chip,
- .config = ltq_chip_config,
-};
-
-static const u32 ltq_temp_config[] = {
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ltq_temp = {
- .type = hwmon_temp,
- .config = ltq_temp_config,
-};
-
static const struct hwmon_channel_info *ltq_info[] = {
- &ltq_chip,
- &ltq_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT),
NULL
};
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index 3d9e210beedf..dd6a35219a18 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * For further information, see the Documentation/hwmon/max197 file.
+ * For further information, see the Documentation/hwmon/max197.rst file.
*/
#include <linux/kernel.h>
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 722bcbb9865a..0b0b04d36931 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -400,45 +400,27 @@ static umode_t max31790_is_visible(const void *data,
}
}
-static const u32 max31790_fan_config[] = {
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info max31790_fan = {
- .type = hwmon_fan,
- .config = max31790_fan_config,
-};
-
-static const u32 max31790_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
-};
-
-static const struct hwmon_channel_info max31790_pwm = {
- .type = hwmon_pwm,
- .config = max31790_pwm_config,
-};
-
static const struct hwmon_channel_info *max31790_info[] = {
- &max31790_fan,
- &max31790_pwm,
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
NULL
};
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
index 35555f0eefb9..1810a753f1a3 100644
--- a/drivers/hwmon/max6621.c
+++ b/drivers/hwmon/max6621.c
@@ -458,37 +458,19 @@ static const struct regmap_config max6621_regmap_config = {
.num_reg_defaults = ARRAY_SIZE(max6621_regmap_default),
};
-static u32 max6621_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info max6621_chip = {
- .type = hwmon_chip,
- .config = max6621_chip_config,
-};
-
-static const u32 max6621_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0
-};
-
-static const struct hwmon_channel_info max6621_temp = {
- .type = hwmon_temp,
- .config = max6621_temp_config,
-};
-
static const struct hwmon_channel_info *max6621_info[] = {
- &max6621_chip,
- &max6621_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL
};
@@ -570,7 +552,7 @@ static const struct i2c_device_id max6621_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max6621_id);
-static const struct of_device_id max6621_of_match[] = {
+static const struct of_device_id __maybe_unused max6621_of_match[] = {
{ .compatible = "maxim,max6621" },
{ }
};
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 61135a2d0cff..939953240827 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -40,6 +40,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/of_device.h>
+#include <linux/thermal.h>
/*
* Insmod parameters
@@ -113,6 +114,7 @@ module_param(clock, int, 0444);
struct max6650_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
+ struct thermal_cooling_device *cooling_dev;
struct mutex update_lock;
int nr_fans;
char valid; /* zero until following fields are valid */
@@ -125,6 +127,7 @@ struct max6650_data {
u8 count;
u8 dac;
u8 alarm;
+ unsigned long cooling_dev_state;
};
static const u8 tach_reg[] = {
@@ -134,7 +137,7 @@ static const u8 tach_reg[] = {
MAX6650_REG_TACH3,
};
-static const struct of_device_id max6650_dt_match[] = {
+static const struct of_device_id __maybe_unused max6650_dt_match[] = {
{
.compatible = "maxim,max6650",
.data = (void *)1
@@ -694,6 +697,63 @@ static int max6650_init_client(struct max6650_data *data,
return 0;
}
+#if IS_ENABLED(CONFIG_THERMAL)
+
+static int max6650_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 255;
+
+ return 0;
+}
+
+static int max6650_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct max6650_data *data = cdev->devdata;
+
+ *state = data->cooling_dev_state;
+
+ return 0;
+}
+
+static int max6650_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct max6650_data *data = cdev->devdata;
+ struct i2c_client *client = data->client;
+ int err;
+
+ state = clamp_val(state, 0, 255);
+
+ mutex_lock(&data->update_lock);
+
+ if (data->config & MAX6650_CFG_V12)
+ data->dac = 180 - (180 * state)/255;
+ else
+ data->dac = 76 - (76 * state)/255;
+
+ err = i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
+
+ if (!err) {
+ max6650_set_operating_mode(data, state ?
+ MAX6650_CFG_MODE_OPEN_LOOP :
+ MAX6650_CFG_MODE_OFF);
+ data->cooling_dev_state = state;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return err < 0 ? err : 0;
+}
+
+static const struct thermal_cooling_device_ops max6650_cooling_ops = {
+ .get_max_state = max6650_get_max_state,
+ .get_cur_state = max6650_get_cur_state,
+ .set_cur_state = max6650_set_cur_state,
+};
+#endif
+
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -709,6 +769,7 @@ static int max6650_probe(struct i2c_client *client,
return -ENOMEM;
data->client = client;
+ i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->nr_fans = of_id ? (int)(uintptr_t)of_id->data : id->driver_data;
@@ -727,7 +788,31 @@ static int max6650_probe(struct i2c_client *client,
hwmon_dev = devm_hwmon_device_register_with_groups(dev,
client->name, data,
data->groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ err = PTR_ERR_OR_ZERO(hwmon_dev);
+ if (err)
+ return err;
+
+#if IS_ENABLED(CONFIG_THERMAL)
+ data->cooling_dev =
+ thermal_of_cooling_device_register(client->dev.of_node,
+ client->name, data,
+ &max6650_cooling_ops);
+ if (IS_ERR(data->cooling_dev))
+ dev_warn(&client->dev,
+ "thermal cooling device register failed: %ld\n",
+ PTR_ERR(data->cooling_dev));
+#endif
+ return 0;
+}
+
+static int max6650_remove(struct i2c_client *client)
+{
+ struct max6650_data *data = i2c_get_clientdata(client);
+
+ if (!IS_ERR(data->cooling_dev))
+ thermal_cooling_device_unregister(data->cooling_dev);
+
+ return 0;
}
static const struct i2c_device_id max6650_id[] = {
@@ -743,6 +828,7 @@ static struct i2c_driver max6650_driver = {
.of_match_table = of_match_ptr(max6650_dt_match),
},
.probe = max6650_probe,
+ .remove = max6650_remove,
.id_table = max6650_id,
};
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index da43f7ae3de1..328793eaee3c 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -650,7 +650,7 @@ static const struct i2c_device_id max6697_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max6697_id);
-static const struct of_device_id max6697_of_match[] = {
+static const struct of_device_id __maybe_unused max6697_of_match[] = {
{
.compatible = "maxim,max6581",
.data = (void *)max6581
diff --git a/drivers/hwmon/menf21bmc_hwmon.c b/drivers/hwmon/menf21bmc_hwmon.c
index c29a4c3c6b9e..f540f938fcd9 100644
--- a/drivers/hwmon/menf21bmc_hwmon.c
+++ b/drivers/hwmon/menf21bmc_hwmon.c
@@ -101,7 +101,7 @@ static int menf21bmc_hwmon_get_volt_limits(struct menf21bmc_hwmon *drv_data)
}
static ssize_t
-show_label(struct device *dev, struct device_attribute *devattr, char *buf)
+label_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -109,7 +109,7 @@ show_label(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_in(struct device *dev, struct device_attribute *devattr, char *buf)
+in_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct menf21bmc_hwmon *drv_data = menf21bmc_hwmon_update(dev);
@@ -121,7 +121,7 @@ show_in(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_min(struct device *dev, struct device_attribute *devattr, char *buf)
+min_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct menf21bmc_hwmon *drv_data = dev_get_drvdata(dev);
@@ -130,7 +130,7 @@ show_min(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_max(struct device *dev, struct device_attribute *devattr, char *buf)
+max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct menf21bmc_hwmon *drv_data = dev_get_drvdata(dev);
@@ -138,21 +138,26 @@ show_max(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%d\n", drv_data->in_max[attr->index]);
}
-#define create_voltage_sysfs(idx) \
-static SENSOR_DEVICE_ATTR(in##idx##_input, S_IRUGO, \
- show_in, NULL, idx); \
-static SENSOR_DEVICE_ATTR(in##idx##_min, S_IRUGO, \
- show_min, NULL, idx); \
-static SENSOR_DEVICE_ATTR(in##idx##_max, S_IRUGO, \
- show_max, NULL, idx); \
-static SENSOR_DEVICE_ATTR(in##idx##_label, S_IRUGO, \
- show_label, NULL, idx);
-
-create_voltage_sysfs(0);
-create_voltage_sysfs(1);
-create_voltage_sysfs(2);
-create_voltage_sysfs(3);
-create_voltage_sysfs(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RO(in0_min, min, 0);
+static SENSOR_DEVICE_ATTR_RO(in0_max, max, 0);
+static SENSOR_DEVICE_ATTR_RO(in0_label, label, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RO(in1_min, min, 1);
+static SENSOR_DEVICE_ATTR_RO(in1_max, max, 1);
+static SENSOR_DEVICE_ATTR_RO(in1_label, label, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RO(in2_min, min, 2);
+static SENSOR_DEVICE_ATTR_RO(in2_max, max, 2);
+static SENSOR_DEVICE_ATTR_RO(in2_label, label, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RO(in3_min, min, 3);
+static SENSOR_DEVICE_ATTR_RO(in3_max, max, 3);
+static SENSOR_DEVICE_ATTR_RO(in3_label, label, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RO(in4_min, min, 4);
+static SENSOR_DEVICE_ATTR_RO(in4_max, max, 4);
+static SENSOR_DEVICE_ATTR_RO(in4_label, label, 4);
static struct attribute *menf21bmc_hwmon_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index db8c6de0b6a0..ed8d59d4eecb 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -27,7 +27,9 @@
#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2)
#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */
#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
-#define MLXREG_FAN_TACHO_DIVIDER_DEF 1132
+#define MLXREG_FAN_TACHO_DIV_MIN 283
+#define MLXREG_FAN_TACHO_DIV_DEF (MLXREG_FAN_TACHO_DIV_MIN * 4)
+#define MLXREG_FAN_TACHO_DIV_SCALE_MAX 64
/*
* FAN datasheet defines the formula for RPM calculations as RPM = 15/t-high.
* The logic in a programmable device measures the time t-high by sampling the
@@ -227,40 +229,22 @@ mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
-static const u32 mlxreg_fan_hwmon_fan_config[] = {
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info mlxreg_fan_hwmon_fan = {
- .type = hwmon_fan,
- .config = mlxreg_fan_hwmon_fan_config,
-};
-
-static const u32 mlxreg_fan_hwmon_pwm_config[] = {
- HWMON_PWM_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info mlxreg_fan_hwmon_pwm = {
- .type = hwmon_pwm,
- .config = mlxreg_fan_hwmon_pwm_config,
-};
-
static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
- &mlxreg_fan_hwmon_fan,
- &mlxreg_fan_hwmon_pwm,
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT),
NULL
};
@@ -360,15 +344,57 @@ static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
.set_cur_state = mlxreg_fan_set_cur_state,
};
+static int mlxreg_fan_connect_verify(struct mlxreg_fan *fan,
+ struct mlxreg_core_data *data)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, data->capability, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query capability register 0x%08x\n",
+ data->capability);
+ return err;
+ }
+
+ return !!(regval & data->bit);
+}
+
+static int mlxreg_fan_speed_divider_get(struct mlxreg_fan *fan,
+ struct mlxreg_core_data *data)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, data->capability, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query capability register 0x%08x\n",
+ data->capability);
+ return err;
+ }
+
+ /*
+ * Set divider value according to the capability register, in case it
+ * contains valid value. Otherwise use default value. The purpose of
+ * this validation is to protect against the old hardware, in which
+ * this register can return zero.
+ */
+ if (regval > 0 && regval <= MLXREG_FAN_TACHO_DIV_SCALE_MAX)
+ fan->divider = regval * MLXREG_FAN_TACHO_DIV_MIN;
+
+ return 0;
+}
+
static int mlxreg_fan_config(struct mlxreg_fan *fan,
struct mlxreg_core_platform_data *pdata)
{
struct mlxreg_core_data *data = pdata->data;
bool configured = false;
int tacho_num = 0, i;
+ int err;
fan->samples = MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF;
- fan->divider = MLXREG_FAN_TACHO_DIVIDER_DEF;
+ fan->divider = MLXREG_FAN_TACHO_DIV_DEF;
for (i = 0; i < pdata->counter; i++, data++) {
if (strnstr(data->label, "tacho", sizeof(data->label))) {
if (tacho_num == MLXREG_FAN_MAX_TACHO) {
@@ -376,6 +402,17 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
data->label);
return -EINVAL;
}
+
+ if (data->capability) {
+ err = mlxreg_fan_connect_verify(fan, data);
+ if (err < 0)
+ return err;
+ else if (!err) {
+ tacho_num++;
+ continue;
+ }
+ }
+
fan->tacho[tacho_num].reg = data->reg;
fan->tacho[tacho_num].mask = data->mask;
fan->tacho[tacho_num++].connected = true;
@@ -394,13 +431,21 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
return -EINVAL;
}
/* Validate that conf parameters are not zeros. */
- if (!data->mask || !data->bit) {
+ if (!data->mask && !data->bit && !data->capability) {
dev_err(fan->dev, "invalid conf entry params: %s\n",
data->label);
return -EINVAL;
}
- fan->samples = data->mask;
- fan->divider = data->bit;
+ if (data->capability) {
+ err = mlxreg_fan_speed_divider_get(fan, data);
+ if (err)
+ return err;
+ } else {
+ if (data->mask)
+ fan->samples = data->mask;
+ if (data->bit)
+ fan->divider = data->bit;
+ }
configured = true;
} else {
dev_err(fan->dev, "invalid label: %s\n", data->label);
@@ -420,42 +465,42 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
static int mlxreg_fan_probe(struct platform_device *pdev)
{
struct mlxreg_core_platform_data *pdata;
+ struct device *dev = &pdev->dev;
struct mlxreg_fan *fan;
struct device *hwm;
int err;
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "Failed to get platform data.\n");
+ dev_err(dev, "Failed to get platform data.\n");
return -EINVAL;
}
- fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+ fan = devm_kzalloc(dev, sizeof(*fan), GFP_KERNEL);
if (!fan)
return -ENOMEM;
- fan->dev = &pdev->dev;
+ fan->dev = dev;
fan->regmap = pdata->regmap;
- platform_set_drvdata(pdev, fan);
err = mlxreg_fan_config(fan, pdata);
if (err)
return err;
- hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan",
+ hwm = devm_hwmon_device_register_with_info(dev, "mlxreg_fan",
fan,
&mlxreg_fan_hwmon_chip_info,
NULL);
if (IS_ERR(hwm)) {
- dev_err(&pdev->dev, "Failed to register hwmon device\n");
+ dev_err(dev, "Failed to register hwmon device\n");
return PTR_ERR(hwm);
}
if (IS_REACHABLE(CONFIG_THERMAL)) {
- fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan,
- &mlxreg_fan_cooling_ops);
+ fan->cdev = devm_thermal_of_cooling_device_register(dev,
+ NULL, "mlxreg_fan", fan, &mlxreg_fan_cooling_ops);
if (IS_ERR(fan->cdev)) {
- dev_err(&pdev->dev, "Failed to register cooling device\n");
+ dev_err(dev, "Failed to register cooling device\n");
return PTR_ERR(fan->cdev);
}
}
@@ -463,22 +508,11 @@ static int mlxreg_fan_probe(struct platform_device *pdev)
return 0;
}
-static int mlxreg_fan_remove(struct platform_device *pdev)
-{
- struct mlxreg_fan *fan = platform_get_drvdata(pdev);
-
- if (IS_REACHABLE(CONFIG_THERMAL))
- thermal_cooling_device_unregister(fan->cdev);
-
- return 0;
-}
-
static struct platform_driver mlxreg_fan_driver = {
.driver = {
.name = "mlxreg-fan",
},
.probe = mlxreg_fan_probe,
- .remove = mlxreg_fan_remove,
};
module_platform_driver(mlxreg_fan_driver);
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 82c7de7b4639..04516789b070 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -400,89 +400,53 @@ static int nct7904_detect(struct i2c_client *client,
return 0;
}
-static const u32 nct7904_in_config[] = {
- HWMON_I_INPUT, /* dummy, skipped in is_visible */
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_in = {
- .type = hwmon_in,
- .config = nct7904_in_config,
-};
-
-static const u32 nct7904_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_fan = {
- .type = hwmon_fan,
- .config = nct7904_fan_config,
-};
-
-static const u32 nct7904_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_pwm = {
- .type = hwmon_pwm,
- .config = nct7904_pwm_config,
-};
-
-static const u32 nct7904_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_temp = {
- .type = hwmon_temp,
- .config = nct7904_temp_config,
-};
-
static const struct hwmon_channel_info *nct7904_info[] = {
- &nct7904_in,
- &nct7904_fan,
- &nct7904_pwm,
- &nct7904_temp,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT, /* dummy, skipped in is_visible */
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT),
NULL
};
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index b3b907bdfb63..09aaefa6fdb8 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -629,51 +629,33 @@ static umode_t npcm7xx_is_visible(const void *data,
}
}
-static const u32 npcm7xx_pwm_config[] = {
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info npcm7xx_pwm = {
- .type = hwmon_pwm,
- .config = npcm7xx_pwm_config,
-};
-
-static const u32 npcm7xx_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info npcm7xx_fan = {
- .type = hwmon_fan,
- .config = npcm7xx_fan_config,
-};
-
static const struct hwmon_channel_info *npcm7xx_info[] = {
- &npcm7xx_pwm,
- &npcm7xx_fan,
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT),
NULL
};
@@ -864,10 +846,8 @@ static int npcm7xx_create_pwm_cooling(struct device *dev,
snprintf(cdev->name, THERMAL_NAME_LENGTH, "%pOFn%d", child,
pwm_port);
- cdev->tcdev = thermal_of_cooling_device_register(child,
- cdev->name,
- cdev,
- &npcm7xx_pwm_cool_ops);
+ cdev->tcdev = devm_thermal_of_cooling_device_register(dev, child,
+ cdev->name, cdev, &npcm7xx_pwm_cool_ops);
if (IS_ERR(cdev->tcdev))
return PTR_ERR(cdev->tcdev);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e4f9f7ce92fa..fd47c36a52bc 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -629,29 +629,9 @@ static umode_t ntc_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
-static const u32 ntc_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info ntc_chip = {
- .type = hwmon_chip,
- .config = ntc_chip_config,
-};
-
-static const u32 ntc_temp_config[] = {
- HWMON_T_INPUT, HWMON_T_TYPE,
- 0
-};
-
-static const struct hwmon_channel_info ntc_temp = {
- .type = hwmon_temp,
- .config = ntc_temp_config,
-};
-
static const struct hwmon_channel_info *ntc_info[] = {
- &ntc_chip,
- &ntc_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_TYPE),
NULL
};
diff --git a/drivers/hwmon/occ/Kconfig b/drivers/hwmon/occ/Kconfig
index 66686628fb53..1658634a053e 100644
--- a/drivers/hwmon/occ/Kconfig
+++ b/drivers/hwmon/occ/Kconfig
@@ -5,11 +5,14 @@
config SENSORS_OCC_P8_I2C
tristate "POWER8 OCC through I2C"
depends on I2C
+ depends on ARM || ARM64 || COMPILE_TEST
select SENSORS_OCC
help
This option enables support for monitoring sensors provided by the
- On-Chip Controller (OCC) on a POWER8 processor. Communications with
- the OCC are established through I2C bus.
+ On-Chip Controller (OCC) on a POWER8 processor. However, this driver
+ can only run on a baseboard management controller (BMC) connected to
+ the P8, not the POWER processor itself. Communications with the OCC are
+ established through I2C bus.
This driver can also be built as a module. If so, the module will be
called occ-p8-hwmon.
@@ -17,15 +20,17 @@ config SENSORS_OCC_P8_I2C
config SENSORS_OCC_P9_SBE
tristate "POWER9 OCC through SBE"
depends on FSI_OCC
+ depends on ARM || ARM64 || COMPILE_TEST
select SENSORS_OCC
help
This option enables support for monitoring sensors provided by the
- On-Chip Controller (OCC) on a POWER9 processor. Communications with
- the OCC are established through SBE fifo on an FSI bus.
+ On-Chip Controller (OCC) on a POWER9 processor. However, this driver
+ can only run on a baseboard management controller (BMC) connected to
+ the P9, not the POWER processor itself. Communications with the OCC are
+ established through SBE fifo on an FSI bus.
This driver can also be built as a module. If so, the module will be
called occ-p9-hwmon.
config SENSORS_OCC
- bool "POWER On-Chip Controller"
- depends on SENSORS_OCC_P8_I2C || SENSORS_OCC_P9_SBE
+ tristate
diff --git a/drivers/hwmon/occ/Makefile b/drivers/hwmon/occ/Makefile
index 3fec12ddc7e7..493588d5a9d3 100644
--- a/drivers/hwmon/occ/Makefile
+++ b/drivers/hwmon/occ/Makefile
@@ -1,5 +1,7 @@
-occ-p8-hwmon-objs := common.o sysfs.o p8_i2c.o
-occ-p9-hwmon-objs := common.o sysfs.o p9_sbe.o
+occ-hwmon-common-objs := common.o sysfs.o
+occ-p8-hwmon-objs := p8_i2c.o
+occ-p9-hwmon-objs := p9_sbe.o
+obj-$(CONFIG_SENSORS_OCC) += occ-hwmon-common.o
obj-$(CONFIG_SENSORS_OCC_P8_I2C) += occ-p8-hwmon.o
obj-$(CONFIG_SENSORS_OCC_P9_SBE) += occ-p9-hwmon.o
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index b91a80abf724..13a6290c8d25 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -2,11 +2,13 @@
// Copyright IBM Corp 2019
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/math64.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <asm/unaligned.h>
@@ -139,6 +141,7 @@ static int occ_poll(struct occ *occ)
/* mutex should already be locked if necessary */
rc = occ->send_cmd(occ, cmd);
if (rc) {
+ occ->last_error = rc;
if (occ->error_count++ > OCC_ERROR_COUNT_THRESHOLD)
occ->error = rc;
@@ -147,6 +150,7 @@ static int occ_poll(struct occ *occ)
/* clear error since communication was successful */
occ->error_count = 0;
+ occ->last_error = 0;
occ->error = 0;
/* check for safe state */
@@ -208,6 +212,8 @@ int occ_update_response(struct occ *occ)
if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
rc = occ_poll(occ);
occ->last_update = jiffies;
+ } else {
+ rc = occ->last_error;
}
mutex_unlock(&occ->lock);
@@ -890,6 +896,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
s++;
}
}
+
+ s = (sensors->power.num_sensors * 4) + 1;
} else {
for (i = 0; i < sensors->power.num_sensors; ++i) {
s = i + 1;
@@ -918,11 +926,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
show_power, NULL, 3, i);
attr++;
}
- }
- if (sensors->caps.num_sensors >= 1) {
s = sensors->power.num_sensors + 1;
+ }
+ if (sensors->caps.num_sensors >= 1) {
snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
0, 0);
@@ -1097,3 +1105,8 @@ int occ_setup(struct occ *occ, const char *name)
return rc;
}
+EXPORT_SYMBOL_GPL(occ_setup);
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("Common OCC hwmon code");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index ed2cf4245295..fc13f3c73c47 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -106,7 +106,8 @@ struct occ {
struct attribute_group group;
const struct attribute_group *groups[2];
- int error; /* latest transfer error */
+ int error; /* final transfer error after retry */
+ int last_error; /* latest transfer error */
unsigned int error_count; /* number of xfr errors observed */
unsigned long last_safe; /* time OCC entered "safe" state */
diff --git a/drivers/hwmon/occ/sysfs.c b/drivers/hwmon/occ/sysfs.c
index fe3d15e416e7..c73be0747e66 100644
--- a/drivers/hwmon/occ/sysfs.c
+++ b/drivers/hwmon/occ/sysfs.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/sysfs.h>
@@ -42,16 +43,16 @@ static ssize_t occ_sysfs_show(struct device *dev,
val = !!(header->status & OCC_STAT_ACTIVE);
break;
case 2:
- val = !!(header->status & OCC_EXT_STAT_DVFS_OT);
+ val = !!(header->ext_status & OCC_EXT_STAT_DVFS_OT);
break;
case 3:
- val = !!(header->status & OCC_EXT_STAT_DVFS_POWER);
+ val = !!(header->ext_status & OCC_EXT_STAT_DVFS_POWER);
break;
case 4:
- val = !!(header->status & OCC_EXT_STAT_MEM_THROTTLE);
+ val = !!(header->ext_status & OCC_EXT_STAT_MEM_THROTTLE);
break;
case 5:
- val = !!(header->status & OCC_EXT_STAT_QUICK_DROP);
+ val = !!(header->ext_status & OCC_EXT_STAT_QUICK_DROP);
break;
case 6:
val = header->occ_state;
@@ -62,9 +63,6 @@ static ssize_t occ_sysfs_show(struct device *dev,
else
val = 1;
break;
- case 8:
- val = occ->error;
- break;
default:
return -EINVAL;
}
@@ -72,6 +70,16 @@ static ssize_t occ_sysfs_show(struct device *dev,
return snprintf(buf, PAGE_SIZE - 1, "%d\n", val);
}
+static ssize_t occ_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct occ *occ = dev_get_drvdata(dev);
+
+ occ_update_response(occ);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", occ->error);
+}
+
static SENSOR_DEVICE_ATTR(occ_master, 0444, occ_sysfs_show, NULL, 0);
static SENSOR_DEVICE_ATTR(occ_active, 0444, occ_sysfs_show, NULL, 1);
static SENSOR_DEVICE_ATTR(occ_dvfs_overtemp, 0444, occ_sysfs_show, NULL, 2);
@@ -80,7 +88,7 @@ static SENSOR_DEVICE_ATTR(occ_mem_throttle, 0444, occ_sysfs_show, NULL, 4);
static SENSOR_DEVICE_ATTR(occ_quick_pwr_drop, 0444, occ_sysfs_show, NULL, 5);
static SENSOR_DEVICE_ATTR(occ_state, 0444, occ_sysfs_show, NULL, 6);
static SENSOR_DEVICE_ATTR(occs_present, 0444, occ_sysfs_show, NULL, 7);
-static SENSOR_DEVICE_ATTR(occ_error, 0444, occ_sysfs_show, NULL, 8);
+static DEVICE_ATTR_RO(occ_error);
static struct attribute *occ_attributes[] = {
&sensor_dev_attr_occ_master.dev_attr.attr,
@@ -91,7 +99,7 @@ static struct attribute *occ_attributes[] = {
&sensor_dev_attr_occ_quick_pwr_drop.dev_attr.attr,
&sensor_dev_attr_occ_state.dev_attr.attr,
&sensor_dev_attr_occs_present.dev_attr.attr,
- &sensor_dev_attr_occ_error.dev_attr.attr,
+ &dev_attr_occ_error.attr,
NULL
};
@@ -155,7 +163,7 @@ void occ_sysfs_poll_done(struct occ *occ)
}
if (occ->error && occ->error != occ->prev_error) {
- name = sensor_dev_attr_occ_error.dev_attr.attr.name;
+ name = dev_attr_occ_error.attr.name;
sysfs_notify(&occ->bus_dev->kobj, NULL, name);
}
@@ -177,3 +185,4 @@ void occ_shutdown(struct occ *occ)
{
sysfs_remove_group(&occ->bus_dev->kobj, &occ_sysfs);
}
+EXPORT_SYMBOL_GPL(occ_shutdown);
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index d1a3f2040c00..58eee8fa3e6d 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" };
#define LD_IN 1
#define LD_TEMP 1
+static inline int superio_enter(int sioaddr)
+{
+ if (!request_muxed_region(sioaddr, 2, DRVNAME))
+ return -EBUSY;
+ return 0;
+}
+
static inline void superio_outb(int sioaddr, int reg, int val)
{
outb(reg, sioaddr);
@@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr)
{
outb(0x02, sioaddr);
outb(0x02, sioaddr + 1);
+ release_region(sioaddr, 2);
}
/*
@@ -1195,7 +1203,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
{
u16 val;
u8 cfg, cfg_b;
- int i, err = 0;
+ int i, err;
+
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
/* Identify device */
val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 629cb45f8557..7edab7e30eaf 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -54,6 +54,24 @@ config SENSORS_IR35221
This driver can also be built as a module. If so, the module will
be called ir35521.
+config SENSORS_IR38064
+ tristate "Infineon IR38064"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ IR38064.
+
+ This driver can also be built as a module. If so, the module will
+ be called ir38064.
+
+config SENSORS_ISL68137
+ tristate "Intersil ISL68137"
+ help
+ If you say yes here you get hardware monitoring support for Intersil
+ ISL68137.
+
+ This driver can also be built as a module. If so, the module will
+ be called isl68137.
+
config SENSORS_LM25066
tristate "National Semiconductor LM25066 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index ea0e39518c21..2219b9300316 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
+obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
+obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
new file mode 100644
index 000000000000..1820f5077f66
--- /dev/null
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Infineon IR38064
+ *
+ * Copyright (c) 2017 Google Inc
+ *
+ * VOUT_MODE is not supported by the device. The driver fakes VOUT linear16
+ * mode with exponent value -8 as direct mode with m=256/b=0/R=0.
+ *
+ * The device supports VOUT_PEAK, IOUT_PEAK, and TEMPERATURE_PEAK, however
+ * this driver does not currently support them.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info ir38064_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .m[PSC_VOLTAGE_OUT] = 256,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT,
+};
+
+static int ir38064_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &ir38064_info);
+}
+
+static const struct i2c_device_id ir38064_id[] = {
+ {"ir38064", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ir38064_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ir38064_driver = {
+ .driver = {
+ .name = "ir38064",
+ },
+ .probe = ir38064_probe,
+ .remove = pmbus_do_remove,
+ .id_table = ir38064_id,
+};
+
+module_i2c_driver(ir38064_driver);
+
+MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon IR38064");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
new file mode 100644
index 000000000000..515596c92fe1
--- /dev/null
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Intersil ISL68137
+ *
+ * Copyright (c) 2017 Google Inc
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "pmbus.h"
+
+#define ISL68137_VOUT_AVS 0x30
+
+static ssize_t isl68137_avs_enable_show_page(struct i2c_client *client,
+ int page,
+ char *buf)
+{
+ int val = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
+
+ return sprintf(buf, "%d\n",
+ (val & ISL68137_VOUT_AVS) == ISL68137_VOUT_AVS ? 1 : 0);
+}
+
+static ssize_t isl68137_avs_enable_store_page(struct i2c_client *client,
+ int page,
+ const char *buf, size_t count)
+{
+ int rc, op_val;
+ bool result;
+
+ rc = kstrtobool(buf, &result);
+ if (rc)
+ return rc;
+
+ op_val = result ? ISL68137_VOUT_AVS : 0;
+
+ /*
+ * Writes to VOUT setpoint over AVSBus will persist after the VRM is
+ * switched to PMBus control. Switching back to AVSBus control
+ * restores this persisted setpoint rather than re-initializing to
+ * PMBus VOUT_COMMAND. Writing VOUT_COMMAND first over PMBus before
+ * enabling AVS control is the workaround.
+ */
+ if (op_val == ISL68137_VOUT_AVS) {
+ rc = pmbus_read_word_data(client, page, PMBUS_VOUT_COMMAND);
+ if (rc < 0)
+ return rc;
+
+ rc = pmbus_write_word_data(client, page, PMBUS_VOUT_COMMAND,
+ rc);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = pmbus_update_byte_data(client, page, PMBUS_OPERATION,
+ ISL68137_VOUT_AVS, op_val);
+
+ return (rc < 0) ? rc : count;
+}
+
+static ssize_t isl68137_avs_enable_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return isl68137_avs_enable_show_page(client, attr->index, buf);
+}
+
+static ssize_t isl68137_avs_enable_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return isl68137_avs_enable_store_page(client, attr->index, buf, count);
+}
+
+static SENSOR_DEVICE_ATTR_RW(avs0_enable, isl68137_avs_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(avs1_enable, isl68137_avs_enable, 1);
+
+static struct attribute *enable_attrs[] = {
+ &sensor_dev_attr_avs0_enable.dev_attr.attr,
+ &sensor_dev_attr_avs1_enable.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group enable_group = {
+ .attrs = enable_attrs,
+};
+
+static const struct attribute_group *attribute_groups[] = {
+ &enable_group,
+ NULL,
+};
+
+static struct pmbus_driver_info isl68137_info = {
+ .pages = 2,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_IN] = 1,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 2,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 1,
+ .m[PSC_POWER] = 1,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = 0,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2
+ | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
+ .groups = attribute_groups,
+};
+
+static int isl68137_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &isl68137_info);
+}
+
+static const struct i2c_device_id isl68137_id[] = {
+ {"isl68137", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, isl68137_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver isl68137_driver = {
+ .driver = {
+ .name = "isl68137",
+ },
+ .probe = isl68137_probe,
+ .remove = pmbus_do_remove,
+ .id_table = isl68137_id,
+};
+
+module_i2c_driver(isl68137_driver);
+
+MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
+MODULE_DESCRIPTION("PMBus driver for Intersil ISL68137");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 53db78753a0d..715d4ab57516 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -26,6 +26,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/log2.h>
#include "pmbus.h"
enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
@@ -39,12 +40,15 @@ enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_CLEAR_PIN_PEAK 0xd6
#define LM25066_DEVICE_SETUP 0xd9
#define LM25066_READ_AVG_VIN 0xdc
+#define LM25066_SAMPLES_FOR_AVG 0xdb
#define LM25066_READ_AVG_VOUT 0xdd
#define LM25066_READ_AVG_IIN 0xde
#define LM25066_READ_AVG_PIN 0xdf
#define LM25066_DEV_SETUP_CL BIT(4) /* Current limit */
+#define LM25066_SAMPLES_FOR_AVG_MAX 4096
+
/* LM25056 only */
#define LM25056_VAUX_OV_WARN_LIMIT 0xe3
@@ -284,6 +288,12 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = 0;
break;
+ case PMBUS_VIRT_SAMPLES:
+ ret = pmbus_read_byte_data(client, 0, LM25066_SAMPLES_FOR_AVG);
+ if (ret < 0)
+ break;
+ ret = 1 << ret;
+ break;
default:
ret = -ENODATA;
break;
@@ -398,6 +408,11 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = pmbus_write_byte(client, 0, LM25066_CLEAR_PIN_PEAK);
break;
+ case PMBUS_VIRT_SAMPLES:
+ word = clamp_val(word, 1, LM25066_SAMPLES_FOR_AVG_MAX);
+ ret = pmbus_write_byte_data(client, 0, LM25066_SAMPLES_FOR_AVG,
+ ilog2(word));
+ break;
default:
ret = -ENODATA;
break;
@@ -438,7 +453,7 @@ static int lm25066_probe(struct i2c_client *client,
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VMON
| PMBUS_HAVE_PIN | PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT
- | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_SAMPLES;
if (data->id == lm25056) {
info->func[0] |= PMBUS_HAVE_STATUS_VMON;
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 1d24397d36ec..59f85658313c 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -217,6 +217,20 @@ enum pmbus_regs {
PMBUS_VIRT_PWM_ENABLE_2,
PMBUS_VIRT_PWM_ENABLE_3,
PMBUS_VIRT_PWM_ENABLE_4,
+
+ /* Samples for average
+ *
+ * Drivers wanting to expose functionality for changing the number of
+ * samples used for average values should implement support in
+ * {read,write}_word_data callback for either PMBUS_VIRT_SAMPLES if it
+ * applies to all types of measurements, or any number of specific
+ * PMBUS_VIRT_*_SAMPLES registers to allow for individual control.
+ */
+ PMBUS_VIRT_SAMPLES,
+ PMBUS_VIRT_IN_SAMPLES,
+ PMBUS_VIRT_CURR_SAMPLES,
+ PMBUS_VIRT_POWER_SAMPLES,
+ PMBUS_VIRT_TEMP_SAMPLES,
};
/*
@@ -371,6 +385,7 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_VMON BIT(19)
#define PMBUS_HAVE_PWM12 BIT(20)
#define PMBUS_HAVE_PWM34 BIT(21)
+#define PMBUS_HAVE_SAMPLES BIT(22)
#define PMBUS_PAGE_VIRTUAL BIT(31)
@@ -417,6 +432,9 @@ struct pmbus_driver_info {
/* Regulator functionality, if supported by this chip driver. */
int num_regulators;
const struct regulator_desc *reg_desc;
+
+ /* custom attributes */
+ const struct attribute_group **groups;
};
/* Regulator ops */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 2e2b5851139c..32a74b8be6bd 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -103,7 +103,7 @@ struct pmbus_data {
int max_attributes;
int num_attributes;
struct attribute_group group;
- const struct attribute_group *groups[2];
+ const struct attribute_group **groups;
struct dentry *debugfs; /* debugfs device directory */
struct pmbus_sensor *sensors;
@@ -1073,7 +1073,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
name, seq, type);
boolean->s1 = s1;
boolean->s2 = s2;
- pmbus_attr_init(a, boolean->name, S_IRUGO, pmbus_show_boolean, NULL,
+ pmbus_attr_init(a, boolean->name, 0444, pmbus_show_boolean, NULL,
(reg << 16) | mask);
return pmbus_add_attribute(data, &a->dev_attr.attr);
@@ -1107,7 +1107,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
sensor->update = update;
sensor->convert = convert;
pmbus_dev_attr_init(a, sensor->name,
- readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
+ readonly ? 0444 : 0644,
pmbus_show_sensor, pmbus_set_sensor);
if (pmbus_add_attribute(data, &a->attr))
@@ -1139,7 +1139,7 @@ static int pmbus_add_label(struct pmbus_data *data,
snprintf(label->label, sizeof(label->label), "%s%d", lstring,
index);
- pmbus_dev_attr_init(a, label->name, S_IRUGO, pmbus_show_label, NULL);
+ pmbus_dev_attr_init(a, label->name, 0444, pmbus_show_label, NULL);
return pmbus_add_attribute(data, &a->attr);
}
@@ -1901,6 +1901,112 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
return 0;
}
+struct pmbus_samples_attr {
+ int reg;
+ char *name;
+};
+
+struct pmbus_samples_reg {
+ int page;
+ struct pmbus_samples_attr *attr;
+ struct device_attribute dev_attr;
+};
+
+static struct pmbus_samples_attr pmbus_samples_registers[] = {
+ {
+ .reg = PMBUS_VIRT_SAMPLES,
+ .name = "samples",
+ }, {
+ .reg = PMBUS_VIRT_IN_SAMPLES,
+ .name = "in_samples",
+ }, {
+ .reg = PMBUS_VIRT_CURR_SAMPLES,
+ .name = "curr_samples",
+ }, {
+ .reg = PMBUS_VIRT_POWER_SAMPLES,
+ .name = "power_samples",
+ }, {
+ .reg = PMBUS_VIRT_TEMP_SAMPLES,
+ .name = "temp_samples",
+ }
+};
+
+#define to_samples_reg(x) container_of(x, struct pmbus_samples_reg, dev_attr)
+
+static ssize_t pmbus_show_samples(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int val;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_samples_reg *reg = to_samples_reg(devattr);
+
+ val = _pmbus_read_word_data(client, reg->page, reg->attr->reg);
+ if (val < 0)
+ return val;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t pmbus_set_samples(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int ret;
+ long val;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_samples_reg *reg = to_samples_reg(devattr);
+
+ if (kstrtol(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ ret = _pmbus_write_word_data(client, reg->page, reg->attr->reg, val);
+
+ return ret ? : count;
+}
+
+static int pmbus_add_samples_attr(struct pmbus_data *data, int page,
+ struct pmbus_samples_attr *attr)
+{
+ struct pmbus_samples_reg *reg;
+
+ reg = devm_kzalloc(data->dev, sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ reg->attr = attr;
+ reg->page = page;
+
+ pmbus_dev_attr_init(&reg->dev_attr, attr->name, 0644,
+ pmbus_show_samples, pmbus_set_samples);
+
+ return pmbus_add_attribute(data, &reg->dev_attr.attr);
+}
+
+static int pmbus_add_samples_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int s;
+
+ if (!(info->func[0] & PMBUS_HAVE_SAMPLES))
+ return 0;
+
+ for (s = 0; s < ARRAY_SIZE(pmbus_samples_registers); s++) {
+ struct pmbus_samples_attr *attr;
+ int ret;
+
+ attr = &pmbus_samples_registers[s];
+ if (!pmbus_check_word_register(client, 0, attr->reg))
+ continue;
+
+ ret = pmbus_add_samples_attr(data, 0, attr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int pmbus_find_attributes(struct i2c_client *client,
struct pmbus_data *data)
{
@@ -1932,6 +2038,10 @@ static int pmbus_find_attributes(struct i2c_client *client,
/* Fans */
ret = pmbus_add_fan_attributes(client, data);
+ if (ret)
+ return ret;
+
+ ret = pmbus_add_samples_attributes(client, data);
return ret;
}
@@ -2305,6 +2415,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
struct device *dev = &client->dev;
const struct pmbus_platform_data *pdata = dev_get_platdata(dev);
struct pmbus_data *data;
+ size_t groups_num = 0;
int ret;
if (!info)
@@ -2319,6 +2430,15 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
if (!data)
return -ENOMEM;
+ if (info->groups)
+ while (info->groups[groups_num])
+ groups_num++;
+
+ data->groups = devm_kcalloc(dev, groups_num + 2, sizeof(void *),
+ GFP_KERNEL);
+ if (!data->groups)
+ return -ENOMEM;
+
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->dev = dev;
@@ -2346,6 +2466,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
}
data->groups[0] = &data->group;
+ memcpy(data->groups + 1, info->groups, sizeof(void *) * groups_num);
data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
data, data->groups);
if (IS_ERR(data->hwmon_dev)) {
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 2bc352c5357f..3fd5105ee9ae 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -97,7 +97,7 @@ static const struct i2c_device_id tps53679_id[] = {
MODULE_DEVICE_TABLE(i2c, tps53679_id);
-static const struct of_device_id tps53679_of_match[] = {
+static const struct of_device_id __maybe_unused tps53679_of_match[] = {
{.compatible = "ti,tps53679"},
{}
};
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ae93885fccd8..40a3e3d0e661 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -151,7 +151,7 @@ static const struct i2c_device_id ucd9000_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ucd9000_id);
-static const struct of_device_id ucd9000_of_match[] = {
+static const struct of_device_id __maybe_unused ucd9000_of_match[] = {
{
.compatible = "ti,ucd9000",
.data = (void *)ucd9000
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index 3ed94585837a..fec7656700db 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -47,7 +47,7 @@ static const struct i2c_device_id ucd9200_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ucd9200_id);
-static const struct of_device_id ucd9200_of_match[] = {
+static const struct of_device_id __maybe_unused ucd9200_of_match[] = {
{
.compatible = "ti,cd9200",
.data = (void *)ucd9200
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 167221c7628a..5fb2745f0226 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -18,6 +18,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -26,6 +27,7 @@
#include <linux/regulator/consumer.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
+#include <linux/timer.h>
#define MAX_PWM 255
@@ -33,6 +35,14 @@ struct pwm_fan_ctx {
struct mutex lock;
struct pwm_device *pwm;
struct regulator *reg_en;
+
+ int irq;
+ atomic_t pulses;
+ unsigned int rpm;
+ u8 pulses_per_revolution;
+ ktime_t sample_start;
+ struct timer_list rpm_timer;
+
unsigned int pwm_value;
unsigned int pwm_fan_state;
unsigned int pwm_fan_max_state;
@@ -40,6 +50,32 @@ struct pwm_fan_ctx {
struct thermal_cooling_device *cdev;
};
+/* This handler assumes self resetting edge triggered interrupt. */
+static irqreturn_t pulse_handler(int irq, void *dev_id)
+{
+ struct pwm_fan_ctx *ctx = dev_id;
+
+ atomic_inc(&ctx->pulses);
+
+ return IRQ_HANDLED;
+}
+
+static void sample_timer(struct timer_list *t)
+{
+ struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer);
+ int pulses;
+ u64 tmp;
+
+ pulses = atomic_read(&ctx->pulses);
+ atomic_sub(pulses, &ctx->pulses);
+ tmp = (u64)pulses * ktime_ms_delta(ktime_get(), ctx->sample_start) * 60;
+ do_div(tmp, ctx->pulses_per_revolution * 1000);
+ ctx->rpm = tmp;
+
+ ctx->sample_start = ktime_get();
+ mod_timer(&ctx->rpm_timer, jiffies + HZ);
+}
+
static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
{
unsigned long period;
@@ -100,15 +136,45 @@ static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", ctx->pwm_value);
}
+static ssize_t rpm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ctx->rpm);
+}
static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, rpm, 0);
static struct attribute *pwm_fan_attrs[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
NULL,
};
-ATTRIBUTE_GROUPS(pwm_fan);
+static umode_t pwm_fan_attrs_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+
+ /* Hide fan_input in case no interrupt is available */
+ if (n == 1 && ctx->irq <= 0)
+ return 0;
+
+ return a->mode;
+}
+
+static const struct attribute_group pwm_fan_group = {
+ .attrs = pwm_fan_attrs,
+ .is_visible = pwm_fan_attrs_visible,
+};
+
+static const struct attribute_group *pwm_fan_groups[] = {
+ &pwm_fan_group,
+ NULL,
+};
/* thermal cooling device callbacks */
static int pwm_fan_get_max_state(struct thermal_cooling_device *cdev,
@@ -207,33 +273,51 @@ static int pwm_fan_of_get_cooling_data(struct device *dev,
return 0;
}
+static void pwm_fan_regulator_disable(void *data)
+{
+ regulator_disable(data);
+}
+
+static void pwm_fan_pwm_disable(void *__ctx)
+{
+ struct pwm_fan_ctx *ctx = __ctx;
+ pwm_disable(ctx->pwm);
+ del_timer_sync(&ctx->rpm_timer);
+}
+
static int pwm_fan_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
+ struct device *dev = &pdev->dev;
struct pwm_fan_ctx *ctx;
struct device *hwmon;
int ret;
struct pwm_state state = { };
+ u32 ppr = 2;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mutex_init(&ctx->lock);
- ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL);
+ ctx->pwm = devm_of_pwm_get(dev, dev->of_node, NULL);
if (IS_ERR(ctx->pwm)) {
ret = PTR_ERR(ctx->pwm);
if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get PWM: %d\n", ret);
+ dev_err(dev, "Could not get PWM: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, ctx);
- ctx->reg_en = devm_regulator_get_optional(&pdev->dev, "fan");
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq == -EPROBE_DEFER)
+ return ctx->irq;
+
+ ctx->reg_en = devm_regulator_get_optional(dev, "fan");
if (IS_ERR(ctx->reg_en)) {
if (PTR_ERR(ctx->reg_en) != -ENODEV)
return PTR_ERR(ctx->reg_en);
@@ -242,10 +326,11 @@ static int pwm_fan_probe(struct platform_device *pdev)
} else {
ret = regulator_enable(ctx->reg_en);
if (ret) {
- dev_err(&pdev->dev,
- "Failed to enable fan supply: %d\n", ret);
+ dev_err(dev, "Failed to enable fan supply: %d\n", ret);
return ret;
}
+ devm_add_action_or_reset(dev, pwm_fan_regulator_disable,
+ ctx->reg_en);
}
ctx->pwm_value = MAX_PWM;
@@ -257,62 +342,57 @@ static int pwm_fan_probe(struct platform_device *pdev)
ret = pwm_apply_state(ctx->pwm, &state);
if (ret) {
- dev_err(&pdev->dev, "Failed to configure PWM\n");
- goto err_reg_disable;
+ dev_err(dev, "Failed to configure PWM: %d\n", ret);
+ return ret;
+ }
+ timer_setup(&ctx->rpm_timer, sample_timer, 0);
+ devm_add_action_or_reset(dev, pwm_fan_pwm_disable, ctx);
+
+ of_property_read_u32(dev->of_node, "pulses-per-revolution", &ppr);
+ ctx->pulses_per_revolution = ppr;
+ if (!ctx->pulses_per_revolution) {
+ dev_err(dev, "pulses-per-revolution can't be zero.\n");
+ return -EINVAL;
+ }
+
+ if (ctx->irq > 0) {
+ ret = devm_request_irq(dev, ctx->irq, pulse_handler, 0,
+ pdev->name, ctx);
+ if (ret) {
+ dev_err(dev, "Failed to request interrupt: %d\n", ret);
+ return ret;
+ }
+ ctx->sample_start = ktime_get();
+ mod_timer(&ctx->rpm_timer, jiffies + HZ);
}
- hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, "pwmfan",
+ hwmon = devm_hwmon_device_register_with_groups(dev, "pwmfan",
ctx, pwm_fan_groups);
if (IS_ERR(hwmon)) {
- dev_err(&pdev->dev, "Failed to register hwmon device\n");
- ret = PTR_ERR(hwmon);
- goto err_pwm_disable;
+ dev_err(dev, "Failed to register hwmon device\n");
+ return PTR_ERR(hwmon);
}
- ret = pwm_fan_of_get_cooling_data(&pdev->dev, ctx);
+ ret = pwm_fan_of_get_cooling_data(dev, ctx);
if (ret)
return ret;
ctx->pwm_fan_state = ctx->pwm_fan_max_state;
if (IS_ENABLED(CONFIG_THERMAL)) {
- cdev = thermal_of_cooling_device_register(pdev->dev.of_node,
- "pwm-fan", ctx,
- &pwm_fan_cooling_ops);
+ cdev = devm_thermal_of_cooling_device_register(dev,
+ dev->of_node, "pwm-fan", ctx, &pwm_fan_cooling_ops);
if (IS_ERR(cdev)) {
- dev_err(&pdev->dev,
- "Failed to register pwm-fan as cooling device");
ret = PTR_ERR(cdev);
- goto err_pwm_disable;
+ dev_err(dev,
+ "Failed to register pwm-fan as cooling device: %d\n",
+ ret);
+ return ret;
}
ctx->cdev = cdev;
thermal_cdev_update(cdev);
}
return 0;
-
-err_pwm_disable:
- state.enabled = false;
- pwm_apply_state(ctx->pwm, &state);
-
-err_reg_disable:
- if (ctx->reg_en)
- regulator_disable(ctx->reg_en);
-
- return ret;
-}
-
-static int pwm_fan_remove(struct platform_device *pdev)
-{
- struct pwm_fan_ctx *ctx = platform_get_drvdata(pdev);
-
- thermal_cooling_device_unregister(ctx->cdev);
- if (ctx->pwm_value)
- pwm_disable(ctx->pwm);
-
- if (ctx->reg_en)
- regulator_disable(ctx->reg_en);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -380,7 +460,6 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
- .remove = pwm_fan_remove,
.driver = {
.name = "pwm-fan",
.pm = &pwm_fan_pm,
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index 0d0457245e7d..efe4bb1ff221 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -86,18 +86,9 @@ static umode_t rpi_is_visible(const void *_data, enum hwmon_sensor_types type,
return 0444;
}
-static const u32 rpi_in_config[] = {
- HWMON_I_LCRIT_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info rpi_in = {
- .type = hwmon_in,
- .config = rpi_in_config,
-};
-
static const struct hwmon_channel_info *rpi_info[] = {
- &rpi_in,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_LCRIT_ALARM),
NULL
};
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 0c4710d35d16..0d65aa5985e2 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -98,7 +98,7 @@ static int s3c_hwmon_read_ch(struct device *dev,
static ssize_t s3c_hwmon_show_raw(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct s3c_hwmon *adc = platform_get_drvdata(to_platform_device(dev));
+ struct s3c_hwmon *adc = dev_get_drvdata(dev);
struct sensor_device_attribute *sa = to_sensor_dev_attr(attr);
int ret;
@@ -164,7 +164,7 @@ static ssize_t s3c_hwmon_ch_show(struct device *dev,
char *buf)
{
struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr);
- struct s3c_hwmon *hwmon = platform_get_drvdata(to_platform_device(dev));
+ struct s3c_hwmon *hwmon = dev_get_drvdata(dev);
struct s3c_hwmon_pdata *pdata = dev_get_platdata(dev);
struct s3c_hwmon_chcfg *cfg;
int ret;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 39b41e35c2bf..7f4a63959730 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -10,7 +10,7 @@
*
* Copyright (c) 2007 Wouter Horre
*
- * For further information, see the Documentation/hwmon/sht15 file.
+ * For further information, see the Documentation/hwmon/sht15.rst file.
*/
#include <linux/interrupt.h>
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 6d789aab54c9..44451b913292 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -67,7 +67,6 @@
#include <linux/acpi.h>
#include <linux/io.h>
-
/*
* If force_addr is set to anything different from 0, we forcibly enable
* the device at the given address.
@@ -222,7 +221,7 @@ static struct platform_driver sis5595_driver = {
};
/* 4 Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *da,
+static ssize_t in_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -231,7 +230,7 @@ static ssize_t show_in(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+static ssize_t in_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -240,7 +239,7 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr]));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+static ssize_t in_max_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -249,8 +248,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -269,8 +268,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -289,19 +288,21 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0);
-show_in_offset(1);
-show_in_offset(2);
-show_in_offset(3);
-show_in_offset(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
/* Temperature */
static ssize_t temp1_input_show(struct device *dev,
@@ -368,7 +369,7 @@ static DEVICE_ATTR_RW(temp1_max);
static DEVICE_ATTR_RW(temp1_max_hyst);
/* 2 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -378,7 +379,7 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -388,8 +389,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -408,7 +409,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -423,8 +424,8 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -480,16 +481,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -500,21 +497,21 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
int nr = to_sensor_dev_attr(da)->index;
return sprintf(buf, "%u\n", (data->alarms >> nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 15);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 15);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -673,7 +670,6 @@ static int sis5595_remove(struct platform_device *pdev)
return 0;
}
-
/* ISA access must be locked explicitly. */
static int sis5595_read_value(struct sis5595_data *data, u8 reg)
{
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index c0775084dde0..60e193f2e970 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -72,14 +72,19 @@ static inline void superio_select(int ld)
superio_outb(0x07, ld);
}
-static inline void superio_enter(void)
+static inline int superio_enter(void)
{
+ if (!request_muxed_region(REG, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x55, REG);
+ return 0;
}
static inline void superio_exit(void)
{
outb(0xAA, REG);
+ release_region(REG, 2);
}
#define SUPERIO_REG_DEVID 0x20
@@ -300,8 +305,12 @@ static int __init smsc47b397_find(void)
u8 id, rev;
char *name;
unsigned short addr;
+ int err;
+
+ err = superio_enter();
+ if (err)
+ return err;
- superio_enter();
id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
switch (id) {
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index c7b6a425e2c0..5f92eab24c62 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -73,16 +73,21 @@ superio_inb(int reg)
/* logical device for fans is 0x0A */
#define superio_select() superio_outb(0x07, 0x0A)
-static inline void
+static inline int
superio_enter(void)
{
+ if (!request_muxed_region(REG, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x55, REG);
+ return 0;
}
static inline void
superio_exit(void)
{
outb(0xAA, REG);
+ release_region(REG, 2);
}
#define SUPERIO_REG_ACT 0x30
@@ -202,8 +207,8 @@ static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
return data;
}
-static ssize_t get_fan(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
@@ -221,8 +226,8 @@ static ssize_t get_fan(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", rpm);
}
-static ssize_t get_fan_min(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
@@ -232,32 +237,32 @@ static ssize_t get_fan_min(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", rpm);
}
-static ssize_t get_fan_div(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_div_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index]));
}
-static ssize_t get_fan_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
int bitnr = to_sensor_dev_attr(devattr)->index;
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static ssize_t get_pwm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[attr->index]));
}
-static ssize_t get_pwm_en(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t pwm_en_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
@@ -271,8 +276,9 @@ static ssize_t alarms_show(struct device *dev,
return sprintf(buf, "%d\n", data->alarms);
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -307,8 +313,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute
* of least surprise; the user doesn't expect the fan minimum to change just
* because the divider changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -370,8 +377,8 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute
return count;
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -396,8 +403,9 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute
return count;
}
-static ssize_t set_pwm_en(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t pwm_en_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -422,23 +430,24 @@ static ssize_t set_pwm_en(struct device *dev, struct device_attribute
return count;
}
-#define fan_present(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, get_fan, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- get_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- get_fan_div, set_fan_div, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_alarm, S_IRUGO, get_fan_alarm, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
- get_pwm, set_pwm, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
- get_pwm_en, set_pwm_en, offset - 1)
-
-fan_present(1);
-fan_present(2);
-fan_present(3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, fan_alarm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_en, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, fan_alarm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_en, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, fan_alarm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_en, 2);
static DEVICE_ATTR_RO(alarms);
@@ -531,8 +540,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
{
u8 val;
unsigned short addr;
+ int err;
+
+ err = superio_enter();
+ if (err)
+ return err;
- superio_enter();
val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
/*
@@ -608,13 +621,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
{
if ((sio_data->activate & 0x01) == 0) {
- superio_enter();
- superio_select();
-
- pr_info("Disabling device\n");
- superio_outb(SUPERIO_REG_ACT, sio_data->activate);
-
- superio_exit();
+ if (!superio_enter()) {
+ superio_select();
+ pr_info("Disabling device\n");
+ superio_outb(SUPERIO_REG_ACT, sio_data->activate);
+ superio_exit();
+ } else {
+ pr_warn("Failed to disable device\n");
+ }
}
}
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 6989408033ec..e5d9222b22f1 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -179,8 +179,8 @@ static struct smsc47m192_data *smsc47m192_update_device(struct device *dev)
}
/* Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -188,8 +188,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr], nr));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -197,8 +197,8 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr], nr));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -206,8 +206,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr], nr));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -228,8 +228,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -250,26 +250,34 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0)
-show_in_offset(1)
-show_in_offset(2)
-show_in_offset(3)
-show_in_offset(4)
-show_in_offset(5)
-show_in_offset(6)
-show_in_offset(7)
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
/* Temperatures */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -277,8 +285,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -286,8 +294,8 @@ static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -295,8 +303,9 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -317,8 +326,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -339,8 +349,8 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_offset(struct device *dev, struct device_attribute
- *attr, char *buf)
+static ssize_t temp_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -348,8 +358,9 @@ static ssize_t show_temp_offset(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr]));
}
-static ssize_t set_temp_offset(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t temp_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -385,19 +396,18 @@ static ssize_t set_temp_offset(struct device *dev, struct device_attribute
return count;
}
-#define show_temp_index(index) \
-static SENSOR_DEVICE_ATTR(temp##index##_input, S_IRUGO, \
- show_temp, NULL, index-1); \
-static SENSOR_DEVICE_ATTR(temp##index##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, index-1); \
-static SENSOR_DEVICE_ATTR(temp##index##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, index-1); \
-static SENSOR_DEVICE_ATTR(temp##index##_offset, S_IRUGO | S_IWUSR, \
- show_temp_offset, set_temp_offset, index-1);
-
-show_temp_index(1)
-show_temp_index(2)
-show_temp_index(3)
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
/* VID */
static ssize_t cpu0_vid_show(struct device *dev,
@@ -434,8 +444,8 @@ static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vrm);
/* Alarms */
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -443,19 +453,19 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms & nr) ? 1 : 0);
}
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0x0010);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0x0020);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0x0040);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 0x4000);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 0x8000);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0x0001);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0x0002);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 0x0004);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 0x0008);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 0x0100);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 0x0200);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 0x0400);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 0x0800);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 0x0010);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 0x0020);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 0x0040);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 0x4000);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 0x8000);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0x0001);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 0x0002);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 0x0004);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 0x0008);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 0x0100);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 0x0200);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 0x0400);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 0x0800);
static struct attribute *smsc47m192_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 90b60297f2f7..f670796b609a 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -85,7 +85,7 @@ static const struct i2c_device_id stts751_id[] = {
{ }
};
-static const struct of_device_id stts751_of_match[] = {
+static const struct of_device_id __maybe_unused stts751_of_match[] = {
{ .compatible = "stts751" },
{ },
};
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 6a0ee903127e..ae9942331cae 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -128,16 +128,16 @@ static struct thmc50_data *thmc50_update_device(struct device *dev)
return data;
}
-static ssize_t show_analog_out(struct device *dev,
+static ssize_t analog_out_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->analog_out);
}
-static ssize_t set_analog_out(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t analog_out_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct thmc50_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -166,14 +166,14 @@ static ssize_t set_analog_out(struct device *dev,
}
/* There is only one PWM mode = DC */
-static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t pwm_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "0\n");
}
/* Temperatures */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -181,16 +181,17 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", data->temp_input[nr] * 1000);
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->temp_min[nr] * 1000);
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = dev_get_drvdata(dev);
@@ -210,16 +211,17 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->temp_max[nr] * 1000);
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = dev_get_drvdata(dev);
@@ -239,16 +241,15 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_critical(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t temp_critical_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->temp_critical[nr] * 1000);
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
@@ -257,29 +258,27 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms >> index) & 1);
}
-#define temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO, \
- show_temp_critical, NULL, offset - 1);
-
-temp_reg(1);
-temp_reg(2);
-temp_reg(3);
-
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
-
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_analog_out,
- set_analog_out, 0);
-static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_critical, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit, temp_critical, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit, temp_critical, 2);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
+
+static SENSOR_DEVICE_ATTR_RW(pwm1, analog_out, 0);
+static SENSOR_DEVICE_ATTR_RO(pwm1_mode, pwm_mode, 0);
static struct attribute *thmc50_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 35523d315f25..f4ee55615dea 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -150,29 +150,11 @@ static umode_t tmp102_is_visible(const void *data, enum hwmon_sensor_types type,
}
}
-static u32 tmp102_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info tmp102_chip = {
- .type = hwmon_chip,
- .config = tmp102_chip_config,
-};
-
-static u32 tmp102_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
- 0
-};
-
-static const struct hwmon_channel_info tmp102_temp = {
- .type = hwmon_temp,
- .config = tmp102_temp_config,
-};
-
static const struct hwmon_channel_info *tmp102_info[] = {
- &tmp102_chip,
- &tmp102_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
NULL
};
@@ -321,7 +303,7 @@ static const struct i2c_device_id tmp102_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp102_id);
-static const struct of_device_id tmp102_of_match[] = {
+static const struct of_device_id __maybe_unused tmp102_of_match[] = {
{ .compatible = "ti,tmp102" },
{ },
};
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index bda0fdc1eb53..a91726d33da8 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -170,7 +170,7 @@ static const struct i2c_device_id tmp103_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp103_id);
-static const struct of_device_id tmp103_of_match[] = {
+static const struct of_device_id __maybe_unused tmp103_of_match[] = {
{ .compatible = "ti,tmp103" },
{ },
};
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index 429bfeae4ca8..2447af704424 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -281,30 +281,13 @@ static umode_t tmp108_is_visible(const void *data, enum hwmon_sensor_types type,
}
}
-static u32 tmp108_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info tmp108_chip = {
- .type = hwmon_chip,
- .config = tmp108_chip_config,
-};
-
-static u32 tmp108_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MIN_HYST
- | HWMON_T_MAX_HYST | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info tmp108_temp = {
- .type = hwmon_temp,
- .config = tmp108_temp_config,
-};
-
static const struct hwmon_channel_info *tmp108_info[] = {
- &tmp108_chip,
- &tmp108_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM),
NULL
};
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 2732a71f3b39..5e63010dd3a0 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -70,7 +70,7 @@ static const struct i2c_device_id tmp421_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp421_id);
-static const struct of_device_id tmp421_of_match[] = {
+static const struct of_device_id __maybe_unused tmp421_of_match[] = {
{
.compatible = "ti,tmp421",
.data = (void *)2
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 81f35e3a06b8..259725dec37e 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -47,7 +47,6 @@
#include <linux/acpi.h>
#include <linux/io.h>
-
/*
* If force_addr is set to anything different from 0, we forcibly enable
* the device at the given address.
@@ -355,32 +354,32 @@ static void via686a_init_device(struct via686a_data *data);
/* following are the sysfs callback functions */
/* 7 voltage sensors */
-static ssize_t show_in(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t in_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in[nr], nr));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t in_min_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_min[nr], nr));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t in_max_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_max[nr], nr));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t in_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -398,8 +397,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t in_max_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -417,44 +416,48 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-show_in_offset(0);
-show_in_offset(1);
-show_in_offset(2);
-show_in_offset(3);
-show_in_offset(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
/* 3 temperatures */
-static ssize_t show_temp(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t temp_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG10(data->temp[nr]));
}
-static ssize_t show_temp_over(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t temp_over_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_over[nr]));
}
-static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t temp_hyst_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_hyst[nr]));
}
-static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t temp_over_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -472,8 +475,9 @@ static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t temp_hyst_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -491,29 +495,28 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-#define show_temp_offset(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_over, set_temp_over, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \
- show_temp_hyst, set_temp_hyst, offset - 1);
-show_temp_offset(1);
-show_temp_offset(2);
-show_temp_offset(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_over, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_over, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_over, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_hyst, 2);
/* 2 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -521,15 +524,15 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
FAN_FROM_REG(data->fan_min[nr],
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -546,8 +549,8 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t fan_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -568,16 +571,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -589,23 +588,23 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct via686a_data *data = via686a_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
@@ -676,7 +675,6 @@ static struct platform_driver via686a_driver = {
.remove = via686a_remove,
};
-
/* This is called when the module is loaded */
static int via686a_probe(struct platform_device *pdev)
{
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 3a6bfa51cb94..95d5e8ec8b7f 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn)
outb(ldn, sio_cip + 1);
}
-static inline void superio_enter(int sio_cip)
+static inline int superio_enter(int sio_cip)
{
+ if (!request_muxed_region(sio_cip, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, sio_cip);
outb(0x87, sio_cip);
+
+ return 0;
}
static inline void superio_exit(int sio_cip)
{
outb(0xaa, sio_cip);
+ release_region(sio_cip, 2);
}
/* ---------------------------------------------------------------------
@@ -1282,11 +1288,14 @@ EXIT:
static int __init vt1211_find(int sio_cip, unsigned short *address)
{
- int err = -ENODEV;
+ int err;
int devid;
- superio_enter(sio_cip);
+ err = superio_enter(sio_cip);
+ if (err)
+ return err;
+ err = -ENODEV;
devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID);
if (devid != SIO_VT1211_ID)
goto EXIT;
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 367b5eb53fb6..e2f1a80367e2 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -192,8 +192,8 @@ static inline void vt8231_write_value(struct vt8231_data *data, u8 reg,
}
/* following are the sysfs callback functions */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -202,8 +202,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", ((data->in[nr] - 3) * 10000) / 958);
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -212,8 +212,8 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", ((data->in_min[nr] - 3) * 10000) / 958);
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -222,8 +222,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", (((data->in_max[nr] - 3) * 10000) / 958));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -242,8 +242,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -330,19 +330,21 @@ static ssize_t in5_max_store(struct device *dev,
return count;
}
-#define define_voltage_sysfs(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset)
-
-define_voltage_sysfs(0);
-define_voltage_sysfs(1);
-define_voltage_sysfs(2);
-define_voltage_sysfs(3);
-define_voltage_sysfs(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
static DEVICE_ATTR_RO(in5_input);
static DEVICE_ATTR_RW(in5_min);
@@ -407,8 +409,8 @@ static ssize_t temp1_max_hyst_store(struct device *dev,
return count;
}
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -416,8 +418,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -425,8 +427,8 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_max[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -434,8 +436,9 @@ static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_min[nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -453,8 +456,9 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -477,27 +481,30 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
* Note that these map the Linux temperature sensor numbering (1-6) to the VIA
* temperature sensor numbering (0-5)
*/
-#define define_temperature_sysfs(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1)
static DEVICE_ATTR_RO(temp1_input);
static DEVICE_ATTR_RW(temp1_max);
static DEVICE_ATTR_RW(temp1_max_hyst);
-define_temperature_sysfs(2);
-define_temperature_sysfs(3);
-define_temperature_sysfs(4);
-define_temperature_sysfs(5);
-define_temperature_sysfs(6);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3);
+static SENSOR_DEVICE_ATTR_RW(temp4_max, temp_max, 3);
+static SENSOR_DEVICE_ATTR_RW(temp4_max_hyst, temp_min, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_input, temp, 4);
+static SENSOR_DEVICE_ATTR_RW(temp5_max, temp_max, 4);
+static SENSOR_DEVICE_ATTR_RW(temp5_max_hyst, temp_min, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_input, temp, 5);
+static SENSOR_DEVICE_ATTR_RW(temp6_max, temp_max, 5);
+static SENSOR_DEVICE_ATTR_RW(temp6_max_hyst, temp_min, 5);
/* Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -506,8 +513,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -516,8 +523,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -525,8 +532,9 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -545,8 +553,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct vt8231_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
@@ -593,17 +602,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-
-#define define_fan_sysfs(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1)
-
-define_fan_sysfs(1);
-define_fan_sysfs(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -614,27 +618,27 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct vt8231_data *data = vt8231_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp4_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp5_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp6_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 8ac89d0781cc..7ca53a28c305 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -130,17 +130,23 @@ superio_select(struct w83627hf_sio_data *sio, int ld)
outb(ld, sio->sioaddr + 1);
}
-static inline void
+static inline int
superio_enter(struct w83627hf_sio_data *sio)
{
+ if (!request_muxed_region(sio->sioaddr, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, sio->sioaddr);
outb(0x87, sio->sioaddr);
+
+ return 0;
}
static inline void
superio_exit(struct w83627hf_sio_data *sio)
{
outb(0xAA, sio->sioaddr);
+ release_region(sio->sioaddr, 2);
}
#define W627_DEVID 0x52
@@ -396,7 +402,6 @@ struct w83627hf_data {
#endif
};
-
static int w83627hf_probe(struct platform_device *pdev);
static int w83627hf_remove(struct platform_device *pdev);
@@ -482,28 +487,28 @@ static struct platform_driver w83627hf_driver = {
};
static ssize_t
-show_in_input(struct device *dev, struct device_attribute *devattr, char *buf)
+in_input_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr]));
}
static ssize_t
-show_in_min(struct device *dev, struct device_attribute *devattr, char *buf)
+in_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr]));
}
static ssize_t
-show_in_max(struct device *dev, struct device_attribute *devattr, char *buf)
+in_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr]));
}
static ssize_t
-store_in_min(struct device *dev, struct device_attribute *devattr,
+in_min_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -522,7 +527,7 @@ store_in_min(struct device *dev, struct device_attribute *devattr,
return count;
}
static ssize_t
-store_in_max(struct device *dev, struct device_attribute *devattr,
+in_max_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -540,22 +545,31 @@ store_in_max(struct device *dev, struct device_attribute *devattr,
mutex_unlock(&data->update_lock);
return count;
}
-#define sysfs_vin_decl(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in_input, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO|S_IWUSR, \
- show_in_min, store_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO|S_IWUSR, \
- show_in_max, store_in_max, offset);
-
-sysfs_vin_decl(1);
-sysfs_vin_decl(2);
-sysfs_vin_decl(3);
-sysfs_vin_decl(4);
-sysfs_vin_decl(5);
-sysfs_vin_decl(6);
-sysfs_vin_decl(7);
-sysfs_vin_decl(8);
+
+static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, in_input, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
/* use a different set of functions for in0 */
static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg)
@@ -661,7 +675,8 @@ static DEVICE_ATTR_RW(in0_min);
static DEVICE_ATTR_RW(in0_max);
static ssize_t
-show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_input_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -669,7 +684,7 @@ show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf)
(long)DIV_FROM_REG(data->fan_div[nr])));
}
static ssize_t
-show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -677,7 +692,7 @@ show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf)
(long)DIV_FROM_REG(data->fan_div[nr])));
}
static ssize_t
-store_fan_min(struct device *dev, struct device_attribute *devattr,
+fan_min_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -697,18 +712,16 @@ store_fan_min(struct device *dev, struct device_attribute *devattr,
mutex_unlock(&data->update_lock);
return count;
}
-#define sysfs_fan_decl(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan_input, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, store_fan_min, offset - 1);
-sysfs_fan_decl(1);
-sysfs_fan_decl(2);
-sysfs_fan_decl(3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_input, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
static ssize_t
-show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -719,8 +732,7 @@ show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_temp_max(struct device *dev, struct device_attribute *devattr,
- char *buf)
+temp_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -731,7 +743,7 @@ show_temp_max(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-show_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
+temp_max_hyst_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -743,7 +755,7 @@ show_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-store_temp_max(struct device *dev, struct device_attribute *devattr,
+temp_max_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -765,7 +777,7 @@ store_temp_max(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-store_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
+temp_max_hyst_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -786,17 +798,15 @@ store_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
return count;
}
-#define sysfs_temp_decl(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO|S_IWUSR, \
- show_temp_max, store_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO|S_IWUSR, \
- show_temp_max_hyst, store_temp_max_hyst, offset - 1);
-
-sysfs_temp_decl(1);
-sysfs_temp_decl(2);
-sysfs_temp_decl(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_max_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_max_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_max_hyst, 2);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -841,27 +851,27 @@ alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(alarms);
static ssize_t
-show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16);
-static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 16);
+static SENSOR_DEVICE_ATTR_RO(in8_alarm, alarm, 17);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 13);
static ssize_t
beep_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -902,7 +912,7 @@ beep_mask_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(beep_mask);
static ssize_t
-show_beep(struct device *dev, struct device_attribute *attr, char *buf)
+beep_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -910,8 +920,8 @@ show_beep(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-store_beep(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+beep_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct w83627hf_data *data = dev_get_drvdata(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -959,41 +969,25 @@ store_beep(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 0);
-static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 1);
-static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 2);
-static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 3);
-static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 8);
-static SENSOR_DEVICE_ATTR(in5_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 9);
-static SENSOR_DEVICE_ATTR(in6_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 10);
-static SENSOR_DEVICE_ATTR(in7_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 16);
-static SENSOR_DEVICE_ATTR(in8_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 17);
-static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 6);
-static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 7);
-static SENSOR_DEVICE_ATTR(fan3_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 11);
-static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 4);
-static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 5);
-static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 13);
-static SENSOR_DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 15);
+static SENSOR_DEVICE_ATTR_RW(in0_beep, beep, 0);
+static SENSOR_DEVICE_ATTR_RW(in1_beep, beep, 1);
+static SENSOR_DEVICE_ATTR_RW(in2_beep, beep, 2);
+static SENSOR_DEVICE_ATTR_RW(in3_beep, beep, 3);
+static SENSOR_DEVICE_ATTR_RW(in4_beep, beep, 8);
+static SENSOR_DEVICE_ATTR_RW(in5_beep, beep, 9);
+static SENSOR_DEVICE_ATTR_RW(in6_beep, beep, 10);
+static SENSOR_DEVICE_ATTR_RW(in7_beep, beep, 16);
+static SENSOR_DEVICE_ATTR_RW(in8_beep, beep, 17);
+static SENSOR_DEVICE_ATTR_RW(fan1_beep, beep, 6);
+static SENSOR_DEVICE_ATTR_RW(fan2_beep, beep, 7);
+static SENSOR_DEVICE_ATTR_RW(fan3_beep, beep, 11);
+static SENSOR_DEVICE_ATTR_RW(temp1_beep, beep, 4);
+static SENSOR_DEVICE_ATTR_RW(temp2_beep, beep, 5);
+static SENSOR_DEVICE_ATTR_RW(temp3_beep, beep, 13);
+static SENSOR_DEVICE_ATTR_RW(beep_enable, beep, 15);
static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1007,7 +1001,7 @@ show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
* because the divisor changed.
*/
static ssize_t
-store_fan_div(struct device *dev, struct device_attribute *devattr,
+fan_div_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1047,15 +1041,12 @@ store_fan_div(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO|S_IWUSR,
- show_fan_div, store_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO|S_IWUSR,
- show_fan_div, store_fan_div, 1);
-static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO|S_IWUSR,
- show_fan_div, store_fan_div, 2);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
static ssize_t
-show_pwm(struct device *dev, struct device_attribute *devattr, char *buf)
+pwm_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1063,7 +1054,7 @@ show_pwm(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-store_pwm(struct device *dev, struct device_attribute *devattr,
+pwm_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1096,12 +1087,13 @@ store_pwm(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 1);
-static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
static ssize_t
-show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf)
+pwm_enable_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1109,8 +1101,8 @@ show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-store_pwm_enable(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+pwm_enable_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = dev_get_drvdata(dev);
@@ -1134,15 +1126,12 @@ store_pwm_enable(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 1);
-static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
static ssize_t
-show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf)
+pwm_freq_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1155,7 +1144,7 @@ show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-store_pwm_freq(struct device *dev, struct device_attribute *devattr,
+pwm_freq_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1186,15 +1175,12 @@ store_pwm_freq(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO|S_IWUSR,
- show_pwm_freq, store_pwm_freq, 0);
-static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO|S_IWUSR,
- show_pwm_freq, store_pwm_freq, 1);
-static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO|S_IWUSR,
- show_pwm_freq, store_pwm_freq, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
static ssize_t
-show_temp_type(struct device *dev, struct device_attribute *devattr,
+temp_type_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1203,7 +1189,7 @@ show_temp_type(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-store_temp_type(struct device *dev, struct device_attribute *devattr,
+temp_type_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1258,13 +1244,9 @@ store_temp_type(struct device *dev, struct device_attribute *devattr,
return count;
}
-#define sysfs_temp_type(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO | S_IWUSR, \
- show_temp_type, store_temp_type, offset - 1);
-
-sysfs_temp_type(1);
-sysfs_temp_type(2);
-sysfs_temp_type(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_type, temp_type, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_type, temp_type, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_type, temp_type, 2);
static ssize_t
name_show(struct device *dev, struct device_attribute *devattr, char *buf)
@@ -1278,7 +1260,7 @@ static DEVICE_ATTR_RO(name);
static int __init w83627hf_find(int sioaddr, unsigned short *addr,
struct w83627hf_sio_data *sio_data)
{
- int err = -ENODEV;
+ int err;
u16 val;
static __initconst char *const names[] = {
@@ -1290,7 +1272,11 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
};
sio_data->sioaddr = sioaddr;
- superio_enter(sio_data);
+ err = superio_enter(sio_data);
+ if (err)
+ return err;
+
+ err = -ENODEV;
val = force_id ? force_id : superio_inb(sio_data, DEVID);
switch (val) {
case W627_DEVID:
@@ -1595,7 +1581,6 @@ static int w83627hf_remove(struct platform_device *pdev)
return 0;
}
-
/* Registers 0x50-0x5f are banked */
static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg)
{
@@ -1644,9 +1629,21 @@ static int w83627thf_read_gpio5(struct platform_device *pdev)
struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff, sel;
- superio_enter(sio_data);
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
superio_select(sio_data, W83627HF_LD_GPIO5);
+ res = 0xff;
+
/* Make sure these GPIO pins are enabled */
if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) {
dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n");
@@ -1677,7 +1674,17 @@ static int w83687thf_read_vid(struct platform_device *pdev)
struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff;
- superio_enter(sio_data);
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
superio_select(sio_data, W83627HF_LD_HWM);
/* Make sure these GPIO pins are enabled */
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
index e858093ac806..d4105321e462 100644
--- a/drivers/hwmon/w83773g.c
+++ b/drivers/hwmon/w83773g.c
@@ -44,7 +44,7 @@ static const struct i2c_device_id w83773_id[] = {
MODULE_DEVICE_TABLE(i2c, w83773_id);
-static const struct of_device_id w83773_of_match[] = {
+static const struct of_device_id __maybe_unused w83773_of_match[] = {
{
.compatible = "nuvoton,w83773g"
},
@@ -237,31 +237,13 @@ static umode_t w83773_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
-static const u32 w83773_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info w83773_chip = {
- .type = hwmon_chip,
- .config = w83773_chip_config,
-};
-
-static const u32 w83773_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
- HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
- 0
-};
-
-static const struct hwmon_channel_info w83773_temp = {
- .type = hwmon_temp,
- .config = w83773_temp_config,
-};
-
static const struct hwmon_channel_info *w83773_info[] = {
- &w83773_chip,
- &w83773_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET),
NULL
};
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 0af0f6283b35..e94ae1bb3cf0 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1341,7 +1341,7 @@ static int watchdog_open(struct inode *inode, struct file *filp)
/* Store pointer to data into filp's private data */
filp->private_data = data;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int watchdog_close(struct inode *inode, struct file *filp)
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ad34380cac49..18e8d03321d6 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -75,20 +75,13 @@ config CORESIGHT_SOURCE_ETM4X
bool "CoreSight Embedded Trace Macrocell 4.x driver"
depends on ARM64
select CORESIGHT_LINKS_AND_SINKS
+ select PID_IN_CONTEXTIDR
help
This driver provides support for the ETM4.x tracer module, tracing the
instructions that a processor is executing. This is primarily useful
for instruction level tracing. Depending on the implemented version
data tracing may also be available.
-config CORESIGHT_DYNAMIC_REPLICATOR
- bool "CoreSight Programmable Replicator driver"
- depends on CORESIGHT_LINKS_AND_SINKS
- help
- This enables support for dynamic CoreSight replicator link driver.
- The programmable ATB replicator allows independent filtering of the
- trace data based on the traceid.
-
config CORESIGHT_STM
bool "CoreSight System Trace Macrocell driver"
depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 41870ded51a3..3b435aa42af5 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
coresight-etm3x-sysfs.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
coresight-etm4x-sysfs.o
-obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 170fbb66bda2..4ea68a3522e9 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -485,12 +485,12 @@ static int catu_disable(struct coresight_device *csdev, void *__unused)
return rc;
}
-const struct coresight_ops_helper catu_helper_ops = {
+static const struct coresight_ops_helper catu_helper_ops = {
.enable = catu_enable,
.disable = catu_disable,
};
-const struct coresight_ops catu_ops = {
+static const struct coresight_ops catu_ops = {
.helper_ops = &catu_helper_ops,
};
@@ -557,8 +557,9 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->csdev = coresight_register(&catu_desc);
if (IS_ERR(drvdata->csdev))
ret = PTR_ERR(drvdata->csdev);
+ else
+ pm_runtime_put(&adev->dev);
out:
- pm_runtime_put(&adev->dev);
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 1b281f0dcccc..1d2ad183fd92 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -109,11 +109,6 @@ static inline bool coresight_is_catu_device(struct coresight_device *csdev)
return true;
}
-#ifdef CONFIG_CORESIGHT_CATU
extern const struct etr_buf_operations etr_catu_buf_ops;
-#else
-/* Dummy declaration for the CATU ops */
-static const struct etr_buf_operations etr_catu_buf_ops;
-#endif
#endif
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
deleted file mode 100644
index 299667b887fc..000000000000
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/amba/bus.h>
-#include <linux/clk.h>
-#include <linux/coresight.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-
-#include "coresight-priv.h"
-
-#define REPLICATOR_IDFILTER0 0x000
-#define REPLICATOR_IDFILTER1 0x004
-
-/**
- * struct replicator_state - specifics associated to a replicator component
- * @base: memory mapped base address for this component.
- * @dev: the device entity associated with this component
- * @atclk: optional clock for the core parts of the replicator.
- * @csdev: component vitals needed by the framework
- */
-struct replicator_state {
- void __iomem *base;
- struct device *dev;
- struct clk *atclk;
- struct coresight_device *csdev;
-};
-
-/*
- * replicator_reset : Reset the replicator configuration to sane values.
- */
-static void replicator_reset(struct replicator_state *drvdata)
-{
- CS_UNLOCK(drvdata->base);
-
- if (!coresight_claim_device_unlocked(drvdata->base)) {
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
- coresight_disclaim_device_unlocked(drvdata->base);
- }
-
- CS_LOCK(drvdata->base);
-}
-
-static int replicator_enable(struct coresight_device *csdev, int inport,
- int outport)
-{
- int rc = 0;
- u32 reg;
- struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- switch (outport) {
- case 0:
- reg = REPLICATOR_IDFILTER0;
- break;
- case 1:
- reg = REPLICATOR_IDFILTER1;
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- CS_UNLOCK(drvdata->base);
-
- if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
- (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
- rc = coresight_claim_device_unlocked(drvdata->base);
-
- /* Ensure that the outport is enabled. */
- if (!rc) {
- writel_relaxed(0x00, drvdata->base + reg);
- dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
- }
-
- CS_LOCK(drvdata->base);
-
- return rc;
-}
-
-static void replicator_disable(struct coresight_device *csdev, int inport,
- int outport)
-{
- u32 reg;
- struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- switch (outport) {
- case 0:
- reg = REPLICATOR_IDFILTER0;
- break;
- case 1:
- reg = REPLICATOR_IDFILTER1;
- break;
- default:
- WARN_ON(1);
- return;
- }
-
- CS_UNLOCK(drvdata->base);
-
- /* disable the flow of ATB data through port */
- writel_relaxed(0xff, drvdata->base + reg);
-
- if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
- (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
- coresight_disclaim_device_unlocked(drvdata->base);
- CS_LOCK(drvdata->base);
-
- dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
-}
-
-static const struct coresight_ops_link replicator_link_ops = {
- .enable = replicator_enable,
- .disable = replicator_disable,
-};
-
-static const struct coresight_ops replicator_cs_ops = {
- .link_ops = &replicator_link_ops,
-};
-
-#define coresight_replicator_reg(name, offset) \
- coresight_simple_reg32(struct replicator_state, name, offset)
-
-coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
-coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
-
-static struct attribute *replicator_mgmt_attrs[] = {
- &dev_attr_idfilter0.attr,
- &dev_attr_idfilter1.attr,
- NULL,
-};
-
-static const struct attribute_group replicator_mgmt_group = {
- .attrs = replicator_mgmt_attrs,
- .name = "mgmt",
-};
-
-static const struct attribute_group *replicator_groups[] = {
- &replicator_mgmt_group,
- NULL,
-};
-
-static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
-{
- int ret;
- struct device *dev = &adev->dev;
- struct resource *res = &adev->res;
- struct coresight_platform_data *pdata = NULL;
- struct replicator_state *drvdata;
- struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
- void __iomem *base;
-
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
- }
-
- drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- drvdata->dev = &adev->dev;
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
-
- /* Validity for the resource is already checked by the AMBA core */
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- drvdata->base = base;
- dev_set_drvdata(dev, drvdata);
- pm_runtime_put(&adev->dev);
-
- desc.type = CORESIGHT_DEV_TYPE_LINK;
- desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
- desc.ops = &replicator_cs_ops;
- desc.pdata = adev->dev.platform_data;
- desc.dev = &adev->dev;
- desc.groups = replicator_groups;
- drvdata->csdev = coresight_register(&desc);
-
- if (!IS_ERR(drvdata->csdev)) {
- replicator_reset(drvdata);
- return 0;
- }
- return PTR_ERR(drvdata->csdev);
-}
-
-#ifdef CONFIG_PM
-static int replicator_runtime_suspend(struct device *dev)
-{
- struct replicator_state *drvdata = dev_get_drvdata(dev);
-
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
-
- return 0;
-}
-
-static int replicator_runtime_resume(struct device *dev)
-{
- struct replicator_state *drvdata = dev_get_drvdata(dev);
-
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops replicator_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(replicator_runtime_suspend,
- replicator_runtime_resume,
- NULL)
-};
-
-static const struct amba_id replicator_ids[] = {
- {
- .id = 0x000bb909,
- .mask = 0x000fffff,
- },
- {
- /* Coresight SoC-600 */
- .id = 0x000bb9ec,
- .mask = 0x000fffff,
- },
- { 0, 0 },
-};
-
-static struct amba_driver replicator_driver = {
- .drv = {
- .name = "coresight-dynamic-replicator",
- .pm = &replicator_dev_pm_ops,
- .suppress_bind_attrs = true,
- },
- .probe = replicator_probe,
- .id_table = replicator_ids,
-};
-builtin_amba_driver(replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 105782ea64c7..4ee4c80a4354 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -5,6 +5,7 @@
* Description: CoreSight Embedded Trace Buffer driver
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -71,6 +72,8 @@
* @miscdev: specifics to handle "/dev/xyz.etb" entry.
* @spinlock: only one at a time pls.
* @reading: synchronise user space access to etb buffer.
+ * @pid: Process ID of the process being monitored by the session
+ * that is using this component.
* @buf: area of memory where ETB buffer content gets sent.
* @mode: this ETB is being used.
* @buffer_depth: size of @buf.
@@ -84,6 +87,7 @@ struct etb_drvdata {
struct miscdevice miscdev;
spinlock_t spinlock;
local_t reading;
+ pid_t pid;
u8 *buf;
u32 mode;
u32 buffer_depth;
@@ -93,17 +97,9 @@ struct etb_drvdata {
static int etb_set_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle);
-static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
+static inline unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
{
- u32 depth = 0;
-
- pm_runtime_get_sync(drvdata->dev);
-
- /* RO registers don't need locking */
- depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
-
- pm_runtime_put(drvdata->dev);
- return depth;
+ return readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
}
static void __etb_enable_hw(struct etb_drvdata *drvdata)
@@ -159,14 +155,15 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
goto out;
}
- /* Nothing to do, the tracer is already enabled. */
- if (drvdata->mode == CS_MODE_SYSFS)
- goto out;
+ if (drvdata->mode == CS_MODE_DISABLED) {
+ ret = etb_enable_hw(drvdata);
+ if (ret)
+ goto out;
- ret = etb_enable_hw(drvdata);
- if (!ret)
drvdata->mode = CS_MODE_SYSFS;
+ }
+ atomic_inc(csdev->refcnt);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
@@ -175,29 +172,52 @@ out:
static int etb_enable_perf(struct coresight_device *csdev, void *data)
{
int ret = 0;
+ pid_t pid;
unsigned long flags;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct perf_output_handle *handle = data;
spin_lock_irqsave(&drvdata->spinlock, flags);
- /* No need to continue if the component is already in use. */
- if (drvdata->mode != CS_MODE_DISABLED) {
+ /* No need to continue if the component is already in used by sysFS. */
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Get a handle on the pid of the process to monitor */
+ pid = task_pid_nr(handle->event->owner);
+
+ if (drvdata->pid != -1 && drvdata->pid != pid) {
ret = -EBUSY;
goto out;
}
/*
+ * No HW configuration is needed if the sink is already in
+ * use for this session.
+ */
+ if (drvdata->pid == pid) {
+ atomic_inc(csdev->refcnt);
+ goto out;
+ }
+
+ /*
* We don't have an internal state to clean up if we fail to setup
* the perf buffer. So we can perform the step before we turn the
* ETB on and leave without cleaning up.
*/
- ret = etb_set_buffer(csdev, (struct perf_output_handle *)data);
+ ret = etb_set_buffer(csdev, handle);
if (ret)
goto out;
ret = etb_enable_hw(drvdata);
- if (!ret)
+ if (!ret) {
+ /* Associate with monitored process. */
+ drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
+ atomic_inc(csdev->refcnt);
+ }
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -325,27 +345,35 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
coresight_disclaim_device(drvdata->base);
}
-static void etb_disable(struct coresight_device *csdev)
+static int etb_disable(struct coresight_device *csdev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- /* Disable the ETB only if it needs to */
- if (drvdata->mode != CS_MODE_DISABLED) {
- etb_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(csdev->refcnt)) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EBUSY;
}
+
+ /* Complain if we (somehow) got out of sync */
+ WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ etb_disable_hw(drvdata);
+ /* Dissociate from monitored process. */
+ drvdata->pid = -1;
+ drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(drvdata->dev, "ETB disabled\n");
+ return 0;
}
-static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
- void **pages, int nr_pages, bool overwrite)
+static void *etb_alloc_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
{
- int node;
+ int node, cpu = event->cpu;
struct cs_buffers *buf;
if (cpu == -1)
@@ -404,7 +432,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
const u32 *barrier;
u32 read_ptr, write_ptr, capacity;
u32 status, read_data;
- unsigned long offset, to_read;
+ unsigned long offset, to_read = 0, flags;
struct cs_buffers *buf = sink_config;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -413,6 +441,12 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* Don't do anything if another tracer is using this sink */
+ if (atomic_read(csdev->refcnt) != 1)
+ goto out;
+
__etb_disable_hw(drvdata);
CS_UNLOCK(drvdata->base);
@@ -523,6 +557,8 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
}
__etb_enable_hw(drvdata);
CS_LOCK(drvdata->base);
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
return to_read;
}
@@ -720,7 +756,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
- pm_runtime_put(&adev->dev);
if (drvdata->buffer_depth & 0x80000000)
return -EINVAL;
@@ -730,6 +765,9 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (!drvdata->buf)
return -ENOMEM;
+ /* This device is not associated with a session */
+ drvdata->pid = -1;
+
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &etb_cs_ops;
@@ -747,6 +785,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
goto err_misc_register;
+ pm_runtime_put(&adev->dev);
return 0;
err_misc_register:
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 4d5a2b9f9d6a..3c6294432748 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -29,6 +29,7 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
/* ETMv3.5/PTM's ETMCR is 'config' */
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
+PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
/* Sink ID - same for all ETMs */
@@ -36,6 +37,7 @@ PMU_FORMAT_ATTR(sinkid, "config2:0-31");
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
+ &format_attr_contextid.attr,
&format_attr_timestamp.attr,
&format_attr_retstack.attr,
&format_attr_sinkid.attr,
@@ -118,23 +120,34 @@ out:
return ret;
}
+static void free_sink_buffer(struct etm_event_data *event_data)
+{
+ int cpu;
+ cpumask_t *mask = &event_data->mask;
+ struct coresight_device *sink;
+
+ if (WARN_ON(cpumask_empty(mask)))
+ return;
+
+ if (!event_data->snk_config)
+ return;
+
+ cpu = cpumask_first(mask);
+ sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
+ sink_ops(sink)->free_buffer(event_data->snk_config);
+}
+
static void free_event_data(struct work_struct *work)
{
int cpu;
cpumask_t *mask;
struct etm_event_data *event_data;
- struct coresight_device *sink;
event_data = container_of(work, struct etm_event_data, work);
mask = &event_data->mask;
/* Free the sink buffers, if there are any */
- if (event_data->snk_config && !WARN_ON(cpumask_empty(mask))) {
- cpu = cpumask_first(mask);
- sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
- if (sink_ops(sink)->free_buffer)
- sink_ops(sink)->free_buffer(event_data->snk_config);
- }
+ free_sink_buffer(event_data);
for_each_cpu(cpu, mask) {
struct list_head **ppath;
@@ -213,7 +226,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = coresight_get_enabled_sink(true);
}
- if (!sink || !sink_ops(sink)->alloc_buffer)
+ if (!sink)
goto err;
mask = &event_data->mask;
@@ -259,9 +272,12 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
if (cpu >= nr_cpu_ids)
goto err;
+ if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
+ goto err;
+
/* Allocate the sink buffer for this session */
event_data->snk_config =
- sink_ops(sink)->alloc_buffer(sink, cpu, pages,
+ sink_ops(sink)->alloc_buffer(sink, event, pages,
nr_pages, overwrite);
if (!event_data->snk_config)
goto err;
@@ -566,7 +582,8 @@ static int __init etm_perf_init(void)
{
int ret;
- etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
+ etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
+ PERF_PMU_CAP_ITRACE);
etm_pmu.attr_groups = etm_pmu_attr_groups;
etm_pmu.task_ctx_nr = perf_sw_context;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 08ce37c9475d..8bb0092c7ec2 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -138,8 +138,11 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
drvdata->base + TRCCNTVRn(i));
}
- /* Resource selector pair 0 is always implemented and reserved */
- for (i = 0; i < drvdata->nr_resource * 2; i++)
+ /*
+ * Resource selector pair 0 is always implemented and reserved. As
+ * such start at 2.
+ */
+ for (i = 2; i < drvdata->nr_resource * 2; i++)
writel_relaxed(config->res_ctrl[i],
drvdata->base + TRCRSCTLRn(i));
@@ -201,6 +204,91 @@ static void etm4_enable_hw_smp_call(void *info)
arg->rc = etm4_enable_hw(arg->drvdata);
}
+/*
+ * The goal of function etm4_config_timestamp_event() is to configure a
+ * counter that will tell the tracer to emit a timestamp packet when it
+ * reaches zero. This is done in order to get a more fine grained idea
+ * of when instructions are executed so that they can be correlated
+ * with execution on other CPUs.
+ *
+ * To do this the counter itself is configured to self reload and
+ * TRCRSCTLR1 (always true) used to get the counter to decrement. From
+ * there a resource selector is configured with the counter and the
+ * timestamp control register to use the resource selector to trigger the
+ * event that will insert a timestamp packet in the stream.
+ */
+static int etm4_config_timestamp_event(struct etmv4_drvdata *drvdata)
+{
+ int ctridx, ret = -EINVAL;
+ int counter, rselector;
+ u32 val = 0;
+ struct etmv4_config *config = &drvdata->config;
+
+ /* No point in trying if we don't have at least one counter */
+ if (!drvdata->nr_cntr)
+ goto out;
+
+ /* Find a counter that hasn't been initialised */
+ for (ctridx = 0; ctridx < drvdata->nr_cntr; ctridx++)
+ if (config->cntr_val[ctridx] == 0)
+ break;
+
+ /* All the counters have been configured already, bail out */
+ if (ctridx == drvdata->nr_cntr) {
+ pr_debug("%s: no available counter found\n", __func__);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /*
+ * Searching for an available resource selector to use, starting at
+ * '2' since every implementation has at least 2 resource selector.
+ * ETMIDR4 gives the number of resource selector _pairs_,
+ * hence multiply by 2.
+ */
+ for (rselector = 2; rselector < drvdata->nr_resource * 2; rselector++)
+ if (!config->res_ctrl[rselector])
+ break;
+
+ if (rselector == drvdata->nr_resource * 2) {
+ pr_debug("%s: no available resource selector found\n",
+ __func__);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Remember what counter we used */
+ counter = 1 << ctridx;
+
+ /*
+ * Initialise original and reload counter value to the smallest
+ * possible value in order to get as much precision as we can.
+ */
+ config->cntr_val[ctridx] = 1;
+ config->cntrldvr[ctridx] = 1;
+
+ /* Set the trace counter control register */
+ val = 0x1 << 16 | /* Bit 16, reload counter automatically */
+ 0x0 << 7 | /* Select single resource selector */
+ 0x1; /* Resource selector 1, i.e always true */
+
+ config->cntr_ctrl[ctridx] = val;
+
+ val = 0x2 << 16 | /* Group 0b0010 - Counter and sequencers */
+ counter << 0; /* Counter to use */
+
+ config->res_ctrl[rselector] = val;
+
+ val = 0x0 << 7 | /* Select single resource selector */
+ rselector; /* Resource selector */
+
+ config->ts_ctrl = val;
+
+ ret = 0;
+out:
+ return ret;
+}
+
static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
struct perf_event *event)
{
@@ -236,9 +324,29 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
/* TRM: Must program this for cycacc to work */
config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
}
- if (attr->config & BIT(ETM_OPT_TS))
+ if (attr->config & BIT(ETM_OPT_TS)) {
+ /*
+ * Configure timestamps to be emitted at regular intervals in
+ * order to correlate instructions executed on different CPUs
+ * (CPU-wide trace scenarios).
+ */
+ ret = etm4_config_timestamp_event(drvdata);
+
+ /*
+ * No need to go further if timestamp intervals can't
+ * be configured.
+ */
+ if (ret)
+ goto out;
+
/* bit[11], Global timestamp tracing bit */
config->cfg |= BIT(11);
+ }
+
+ if (attr->config & BIT(ETM_OPT_CTXTID))
+ /* bit[6], Context ID tracing bit */
+ config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
+
/* return stack - enable if selected and supported */
if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
/* bit[12], Return stack enable bit */
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 927925151509..16b0c0e1e43a 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -12,6 +12,8 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
@@ -43,7 +45,7 @@ struct funnel_drvdata {
unsigned long priority;
};
-static int funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
+static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
{
u32 functl;
int rc = 0;
@@ -71,17 +73,19 @@ done:
static int funnel_enable(struct coresight_device *csdev, int inport,
int outport)
{
- int rc;
+ int rc = 0;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- rc = funnel_enable_hw(drvdata, inport);
+ if (drvdata->base)
+ rc = dynamic_funnel_enable_hw(drvdata, inport);
if (!rc)
dev_dbg(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
return rc;
}
-static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
+static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
+ int inport)
{
u32 functl;
@@ -103,7 +107,8 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- funnel_disable_hw(drvdata, inport);
+ if (drvdata->base)
+ dynamic_funnel_disable_hw(drvdata, inport);
dev_dbg(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
}
@@ -177,54 +182,70 @@ static struct attribute *coresight_funnel_attrs[] = {
};
ATTRIBUTE_GROUPS(coresight_funnel);
-static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
+static int funnel_probe(struct device *dev, struct resource *res)
{
int ret;
void __iomem *base;
- struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
- struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
+ struct device_node *np = dev->of_node;
if (np) {
pdata = of_get_coresight_platform_data(dev, np);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
+ dev->platform_data = pdata;
}
+ if (of_device_is_compatible(np, "arm,coresight-funnel"))
+ pr_warn_once("Uses OBSOLETE CoreSight funnel binding\n");
+
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &adev->dev;
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
+ drvdata->dev = dev;
+ drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
}
- dev_set_drvdata(dev, drvdata);
- /* Validity for the resource is already checked by the AMBA core */
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ /*
+ * Map the device base for dynamic-funnel, which has been
+ * validated by AMBA core.
+ */
+ if (res) {
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_disable_clk;
+ }
+ drvdata->base = base;
+ desc.groups = coresight_funnel_groups;
+ }
- drvdata->base = base;
- pm_runtime_put(&adev->dev);
+ dev_set_drvdata(dev, drvdata);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &funnel_cs_ops;
desc.pdata = pdata;
desc.dev = dev;
- desc.groups = coresight_funnel_groups;
drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto out_disable_clk;
+ }
+
+ pm_runtime_put(dev);
- return PTR_ERR_OR_ZERO(drvdata->csdev);
+out_disable_clk:
+ if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+ return ret;
}
#ifdef CONFIG_PM
@@ -253,7 +274,48 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
};
-static const struct amba_id funnel_ids[] = {
+static int static_funnel_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* Static funnel do not have programming base */
+ ret = funnel_probe(&pdev->dev, NULL);
+
+ if (ret) {
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
+
+ return ret;
+}
+
+static const struct of_device_id static_funnel_match[] = {
+ {.compatible = "arm,coresight-static-funnel"},
+ {}
+};
+
+static struct platform_driver static_funnel_driver = {
+ .probe = static_funnel_probe,
+ .driver = {
+ .name = "coresight-static-funnel",
+ .of_match_table = static_funnel_match,
+ .pm = &funnel_dev_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(static_funnel_driver);
+
+static int dynamic_funnel_probe(struct amba_device *adev,
+ const struct amba_id *id)
+{
+ return funnel_probe(&adev->dev, &adev->res);
+}
+
+static const struct amba_id dynamic_funnel_ids[] = {
{
.id = 0x000bb908,
.mask = 0x000fffff,
@@ -266,14 +328,14 @@ static const struct amba_id funnel_ids[] = {
{ 0, 0},
};
-static struct amba_driver funnel_driver = {
+static struct amba_driver dynamic_funnel_driver = {
.drv = {
- .name = "coresight-funnel",
+ .name = "coresight-dynamic-funnel",
.owner = THIS_MODULE,
.pm = &funnel_dev_pm_ops,
.suppress_bind_attrs = true,
},
- .probe = funnel_probe,
- .id_table = funnel_ids,
+ .probe = dynamic_funnel_probe,
+ .id_table = dynamic_funnel_ids,
};
-builtin_amba_driver(funnel_driver);
+builtin_amba_driver(dynamic_funnel_driver);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index feac98315471..8c9ce74498e1 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Replicator driver
*/
+#include <linux/amba/bus.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -18,25 +19,117 @@
#include "coresight-priv.h"
+#define REPLICATOR_IDFILTER0 0x000
+#define REPLICATOR_IDFILTER1 0x004
+
/**
* struct replicator_drvdata - specifics associated to a replicator component
+ * @base: memory mapped base address for this component. Also indicates
+ * whether this one is programmable or not.
* @dev: the device entity associated with this component
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
*/
struct replicator_drvdata {
+ void __iomem *base;
struct device *dev;
struct clk *atclk;
struct coresight_device *csdev;
};
+static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ if (!coresight_claim_device_unlocked(drvdata->base)) {
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
+ coresight_disclaim_device_unlocked(drvdata->base);
+ }
+
+ CS_LOCK(drvdata->base);
+}
+
+/*
+ * replicator_reset : Reset the replicator configuration to sane values.
+ */
+static inline void replicator_reset(struct replicator_drvdata *drvdata)
+{
+ if (drvdata->base)
+ dynamic_replicator_reset(drvdata);
+}
+
+static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
+ int inport, int outport)
+{
+ int rc = 0;
+ u32 reg;
+
+ switch (outport) {
+ case 0:
+ reg = REPLICATOR_IDFILTER0;
+ break;
+ case 1:
+ reg = REPLICATOR_IDFILTER1;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ CS_UNLOCK(drvdata->base);
+
+ if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
+ (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ rc = coresight_claim_device_unlocked(drvdata->base);
+
+ /* Ensure that the outport is enabled. */
+ if (!rc)
+ writel_relaxed(0x00, drvdata->base + reg);
+ CS_LOCK(drvdata->base);
+
+ return rc;
+}
+
static int replicator_enable(struct coresight_device *csdev, int inport,
int outport)
{
+ int rc = 0;
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
- return 0;
+ if (drvdata->base)
+ rc = dynamic_replicator_enable(drvdata, inport, outport);
+ if (!rc)
+ dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
+ return rc;
+}
+
+static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
+ int inport, int outport)
+{
+ u32 reg;
+
+ switch (outport) {
+ case 0:
+ reg = REPLICATOR_IDFILTER0;
+ break;
+ case 1:
+ reg = REPLICATOR_IDFILTER1;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ CS_UNLOCK(drvdata->base);
+
+ /* disable the flow of ATB data through port */
+ writel_relaxed(0xff, drvdata->base + reg);
+
+ if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
+ (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ coresight_disclaim_device_unlocked(drvdata->base);
+ CS_LOCK(drvdata->base);
}
static void replicator_disable(struct coresight_device *csdev, int inport,
@@ -44,6 +137,8 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ if (drvdata->base)
+ dynamic_replicator_disable(drvdata, inport, outport);
dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
}
@@ -56,58 +151,110 @@ static const struct coresight_ops replicator_cs_ops = {
.link_ops = &replicator_link_ops,
};
-static int replicator_probe(struct platform_device *pdev)
+#define coresight_replicator_reg(name, offset) \
+ coresight_simple_reg32(struct replicator_drvdata, name, offset)
+
+coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
+coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
+
+static struct attribute *replicator_mgmt_attrs[] = {
+ &dev_attr_idfilter0.attr,
+ &dev_attr_idfilter1.attr,
+ NULL,
+};
+
+static const struct attribute_group replicator_mgmt_group = {
+ .attrs = replicator_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *replicator_groups[] = {
+ &replicator_mgmt_group,
+ NULL,
+};
+
+static int replicator_probe(struct device *dev, struct resource *res)
{
- int ret;
- struct device *dev = &pdev->dev;
+ int ret = 0;
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
struct coresight_desc desc = { 0 };
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
if (np) {
pdata = of_get_coresight_platform_data(dev, np);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- pdev->dev.platform_data = pdata;
+ dev->platform_data = pdata;
}
+ if (of_device_is_compatible(np, "arm,coresight-replicator"))
+ pr_warn_once("Uses OBSOLETE CoreSight replicator binding\n");
+
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &pdev->dev;
- drvdata->atclk = devm_clk_get(&pdev->dev, "atclk"); /* optional */
+ drvdata->dev = dev;
+ drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
}
- pm_runtime_get_noresume(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- platform_set_drvdata(pdev, drvdata);
+
+ /*
+ * Map the device base for dynamic-replicator, which has been
+ * validated by AMBA core
+ */
+ if (res) {
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_disable_clk;
+ }
+ drvdata->base = base;
+ desc.groups = replicator_groups;
+ }
+
+ dev_set_drvdata(dev, drvdata);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc.ops = &replicator_cs_ops;
- desc.pdata = pdev->dev.platform_data;
- desc.dev = &pdev->dev;
+ desc.pdata = dev->platform_data;
+ desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
- goto out_disable_pm;
+ goto out_disable_clk;
}
- pm_runtime_put(&pdev->dev);
-
- return 0;
+ replicator_reset(drvdata);
+ pm_runtime_put(dev);
-out_disable_pm:
- if (!IS_ERR(drvdata->atclk))
+out_disable_clk:
+ if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int static_replicator_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* Static replicators do not have programming base */
+ ret = replicator_probe(&pdev->dev, NULL);
+
+ if (ret) {
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
return ret;
}
@@ -139,18 +286,49 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
replicator_runtime_resume, NULL)
};
-static const struct of_device_id replicator_match[] = {
+static const struct of_device_id static_replicator_match[] = {
{.compatible = "arm,coresight-replicator"},
+ {.compatible = "arm,coresight-static-replicator"},
{}
};
-static struct platform_driver replicator_driver = {
- .probe = replicator_probe,
+static struct platform_driver static_replicator_driver = {
+ .probe = static_replicator_probe,
.driver = {
- .name = "coresight-replicator",
- .of_match_table = replicator_match,
+ .name = "coresight-static-replicator",
+ .of_match_table = static_replicator_match,
+ .pm = &replicator_dev_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(static_replicator_driver);
+
+static int dynamic_replicator_probe(struct amba_device *adev,
+ const struct amba_id *id)
+{
+ return replicator_probe(&adev->dev, &adev->res);
+}
+
+static const struct amba_id dynamic_replicator_ids[] = {
+ {
+ .id = 0x000bb909,
+ .mask = 0x000fffff,
+ },
+ {
+ /* Coresight SoC-600 */
+ .id = 0x000bb9ec,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver dynamic_replicator_driver = {
+ .drv = {
+ .name = "coresight-dynamic-replicator",
.pm = &replicator_dev_pm_ops,
.suppress_bind_attrs = true,
},
+ .probe = dynamic_replicator_probe,
+ .id_table = dynamic_replicator_ids,
};
-builtin_platform_driver(replicator_driver);
+builtin_amba_driver(dynamic_replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index a5f053f2db2c..2527b5d3b65e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/atomic.h>
#include <linux/circ_buf.h>
#include <linux/coresight.h>
#include <linux/perf_event.h>
@@ -180,8 +181,10 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ atomic_inc(csdev->refcnt);
goto out;
+ }
/*
* If drvdata::buf isn't NULL, memory was allocated for a previous
@@ -200,11 +203,13 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
}
ret = tmc_etb_enable_hw(drvdata);
- if (!ret)
+ if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
- else
+ atomic_inc(csdev->refcnt);
+ } else {
/* Free up the buffer if we failed to enable */
used = false;
+ }
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -218,6 +223,7 @@ out:
static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
{
int ret = 0;
+ pid_t pid;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct perf_output_handle *handle = data;
@@ -228,19 +234,42 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
if (drvdata->reading)
break;
/*
- * In Perf mode there can be only one writer per sink. There
- * is also no need to continue if the ETB/ETF is already
- * operated from sysFS.
+ * No need to continue if the ETB/ETF is already operated
+ * from sysFS.
*/
- if (drvdata->mode != CS_MODE_DISABLED)
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ ret = -EBUSY;
+ break;
+ }
+
+ /* Get a handle on the pid of the process to monitor */
+ pid = task_pid_nr(handle->event->owner);
+
+ if (drvdata->pid != -1 && drvdata->pid != pid) {
+ ret = -EBUSY;
break;
+ }
ret = tmc_set_etf_buffer(csdev, handle);
if (ret)
break;
+
+ /*
+ * No HW configuration is needed if the sink is already in
+ * use for this session.
+ */
+ if (drvdata->pid == pid) {
+ atomic_inc(csdev->refcnt);
+ break;
+ }
+
ret = tmc_etb_enable_hw(drvdata);
- if (!ret)
+ if (!ret) {
+ /* Associate with monitored process. */
+ drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
+ atomic_inc(csdev->refcnt);
+ }
} while (0);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -273,26 +302,34 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev,
return 0;
}
-static void tmc_disable_etf_sink(struct coresight_device *csdev)
+static int tmc_disable_etf_sink(struct coresight_device *csdev)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
+
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return;
+ return -EBUSY;
}
- /* Disable the TMC only if it needs to */
- if (drvdata->mode != CS_MODE_DISABLED) {
- tmc_etb_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(csdev->refcnt)) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EBUSY;
}
+ /* Complain if we (somehow) got out of sync */
+ WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ tmc_etb_disable_hw(drvdata);
+ /* Dissociate from monitored process. */
+ drvdata->pid = -1;
+ drvdata->mode = CS_MODE_DISABLED;
+
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
+ return 0;
}
static int tmc_enable_etf_link(struct coresight_device *csdev,
@@ -337,10 +374,11 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
}
-static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
- void **pages, int nr_pages, bool overwrite)
+static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
{
- int node;
+ int node, cpu = event->cpu;
struct cs_buffers *buf;
if (cpu == -1)
@@ -400,7 +438,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
u32 *buf_ptr;
u64 read_ptr, write_ptr;
u32 status;
- unsigned long offset, to_read;
+ unsigned long offset, to_read = 0, flags;
struct cs_buffers *buf = sink_config;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -411,6 +449,12 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
return 0;
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* Don't do anything if another tracer is using this sink */
+ if (atomic_read(csdev->refcnt) != 1)
+ goto out;
+
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
@@ -504,6 +548,8 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
to_read = buf->nr_pages << PAGE_SHIFT;
}
CS_LOCK(drvdata->base);
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
return to_read;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index f684283890d3..df6e4b0b84e9 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -4,10 +4,15 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/atomic.h>
#include <linux/coresight.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
+#include <linux/idr.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/types.h>
#include <linux/vmalloc.h>
#include "coresight-catu.h"
#include "coresight-etm-perf.h"
@@ -23,14 +28,18 @@ struct etr_flat_buf {
/*
* etr_perf_buffer - Perf buffer used for ETR
+ * @drvdata - The ETR drvdaga this buffer has been allocated for.
* @etr_buf - Actual buffer used by the ETR
+ * @pid - The PID this etr_perf_buffer belongs to.
* @snaphost - Perf session mode
* @head - handle->head at the beginning of the session.
* @nr_pages - Number of pages in the ring buffer.
* @pages - Array of Pages in the ring buffer.
*/
struct etr_perf_buffer {
+ struct tmc_drvdata *drvdata;
struct etr_buf *etr_buf;
+ pid_t pid;
bool snapshot;
unsigned long head;
int nr_pages;
@@ -772,7 +781,8 @@ static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
static const struct etr_buf_operations *etr_buf_ops[] = {
[ETR_MODE_FLAT] = &etr_flat_buf_ops,
[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
- [ETR_MODE_CATU] = &etr_catu_buf_ops,
+ [ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU)
+ ? &etr_catu_buf_ops : NULL,
};
static inline int tmc_etr_mode_alloc_buf(int mode,
@@ -786,7 +796,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
case ETR_MODE_FLAT:
case ETR_MODE_ETR_SG:
case ETR_MODE_CATU:
- if (etr_buf_ops[mode]->alloc)
+ if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
node, pages);
if (!rc)
@@ -1124,8 +1134,10 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* sink is already enabled no memory is needed and the HW need not be
* touched, even if the buffer size has changed.
*/
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ atomic_inc(csdev->refcnt);
goto out;
+ }
/*
* If we don't have a buffer or it doesn't match the requested size,
@@ -1138,8 +1150,10 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
}
ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
- if (!ret)
+ if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
+ atomic_inc(csdev->refcnt);
+ }
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1154,23 +1168,23 @@ out:
}
/*
- * tmc_etr_setup_perf_buf: Allocate ETR buffer for use by perf.
+ * alloc_etr_buf: Allocate ETR buffer for use by perf.
* The size of the hardware buffer is dependent on the size configured
* via sysfs and the perf ring buffer size. We prefer to allocate the
* largest possible size, scaling down the size by half until it
* reaches a minimum limit (1M), beyond which we give up.
*/
-static struct etr_perf_buffer *
-tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages,
- void **pages, bool snapshot)
+static struct etr_buf *
+alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ int nr_pages, void **pages, bool snapshot)
{
+ int node, cpu = event->cpu;
struct etr_buf *etr_buf;
- struct etr_perf_buffer *etr_perf;
unsigned long size;
- etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
- if (!etr_perf)
- return ERR_PTR(-ENOMEM);
+ if (cpu == -1)
+ cpu = smp_processor_id();
+ node = cpu_to_node(cpu);
/*
* Try to match the perf ring buffer size if it is larger
@@ -1195,32 +1209,160 @@ tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages,
size /= 2;
} while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
+ return ERR_PTR(-ENOMEM);
+
+done:
+ return etr_buf;
+}
+
+static struct etr_buf *
+get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata,
+ struct perf_event *event, int nr_pages,
+ void **pages, bool snapshot)
+{
+ int ret;
+ pid_t pid = task_pid_nr(event->owner);
+ struct etr_buf *etr_buf;
+
+retry:
+ /*
+ * An etr_perf_buffer is associated with an event and holds a reference
+ * to the AUX ring buffer that was created for that event. In CPU-wide
+ * N:1 mode multiple events (one per CPU), each with its own AUX ring
+ * buffer, share a sink. As such an etr_perf_buffer is created for each
+ * event but a single etr_buf associated with the ETR is shared between
+ * them. The last event in a trace session will copy the content of the
+ * etr_buf to its AUX ring buffer. Ring buffer associated to other
+ * events are simply not used an freed as events are destoyed. We still
+ * need to allocate a ring buffer for each event since we don't know
+ * which event will be last.
+ */
+
+ /*
+ * The first thing to do here is check if an etr_buf has already been
+ * allocated for this session. If so it is shared with this event,
+ * otherwise it is created.
+ */
+ mutex_lock(&drvdata->idr_mutex);
+ etr_buf = idr_find(&drvdata->idr, pid);
+ if (etr_buf) {
+ refcount_inc(&etr_buf->refcount);
+ mutex_unlock(&drvdata->idr_mutex);
+ return etr_buf;
+ }
+
+ /* If we made it here no buffer has been allocated, do so now. */
+ mutex_unlock(&drvdata->idr_mutex);
+
+ etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
+ if (IS_ERR(etr_buf))
+ return etr_buf;
+
+ refcount_set(&etr_buf->refcount, 1);
+
+ /* Now that we have a buffer, add it to the IDR. */
+ mutex_lock(&drvdata->idr_mutex);
+ ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
+ mutex_unlock(&drvdata->idr_mutex);
+
+ /* Another event with this session ID has allocated this buffer. */
+ if (ret == -ENOSPC) {
+ tmc_free_etr_buf(etr_buf);
+ goto retry;
+ }
+
+ /* The IDR can't allocate room for a new session, abandon ship. */
+ if (ret == -ENOMEM) {
+ tmc_free_etr_buf(etr_buf);
+ return ERR_PTR(ret);
+ }
+
+
+ return etr_buf;
+}
+
+static struct etr_buf *
+get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
+ struct perf_event *event, int nr_pages,
+ void **pages, bool snapshot)
+{
+ struct etr_buf *etr_buf;
+
+ /*
+ * In per-thread mode the etr_buf isn't shared, so just go ahead
+ * with memory allocation.
+ */
+ etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
+ if (IS_ERR(etr_buf))
+ goto out;
+
+ refcount_set(&etr_buf->refcount, 1);
+out:
+ return etr_buf;
+}
+
+static struct etr_buf *
+get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ int nr_pages, void **pages, bool snapshot)
+{
+ if (event->cpu == -1)
+ return get_perf_etr_buf_per_thread(drvdata, event, nr_pages,
+ pages, snapshot);
+
+ return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages,
+ pages, snapshot);
+}
+
+static struct etr_perf_buffer *
+tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ int nr_pages, void **pages, bool snapshot)
+{
+ int node, cpu = event->cpu;
+ struct etr_buf *etr_buf;
+ struct etr_perf_buffer *etr_perf;
+
+ if (cpu == -1)
+ cpu = smp_processor_id();
+ node = cpu_to_node(cpu);
+
+ etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
+ if (!etr_perf)
+ return ERR_PTR(-ENOMEM);
+
+ etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot);
+ if (!IS_ERR(etr_buf))
+ goto done;
+
kfree(etr_perf);
return ERR_PTR(-ENOMEM);
done:
+ /*
+ * Keep a reference to the ETR this buffer has been allocated for
+ * in order to have access to the IDR in tmc_free_etr_buffer().
+ */
+ etr_perf->drvdata = drvdata;
etr_perf->etr_buf = etr_buf;
+
return etr_perf;
}
static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
- int cpu, void **pages, int nr_pages,
- bool snapshot)
+ struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
{
struct etr_perf_buffer *etr_perf;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- if (cpu == -1)
- cpu = smp_processor_id();
-
- etr_perf = tmc_etr_setup_perf_buf(drvdata, cpu_to_node(cpu),
+ etr_perf = tmc_etr_setup_perf_buf(drvdata, event,
nr_pages, pages, snapshot);
if (IS_ERR(etr_perf)) {
dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n");
return NULL;
}
+ etr_perf->pid = task_pid_nr(event->owner);
etr_perf->snapshot = snapshot;
etr_perf->nr_pages = nr_pages;
etr_perf->pages = pages;
@@ -1231,9 +1373,33 @@ static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
static void tmc_free_etr_buffer(void *config)
{
struct etr_perf_buffer *etr_perf = config;
+ struct tmc_drvdata *drvdata = etr_perf->drvdata;
+ struct etr_buf *buf, *etr_buf = etr_perf->etr_buf;
+
+ if (!etr_buf)
+ goto free_etr_perf_buffer;
+
+ mutex_lock(&drvdata->idr_mutex);
+ /* If we are not the last one to use the buffer, don't touch it. */
+ if (!refcount_dec_and_test(&etr_buf->refcount)) {
+ mutex_unlock(&drvdata->idr_mutex);
+ goto free_etr_perf_buffer;
+ }
+
+ /* We are the last one, remove from the IDR and free the buffer. */
+ buf = idr_remove(&drvdata->idr, etr_perf->pid);
+ mutex_unlock(&drvdata->idr_mutex);
+
+ /*
+ * Something went very wrong if the buffer associated with this ID
+ * is not the same in the IDR. Leak to avoid use after free.
+ */
+ if (buf && WARN_ON(buf != etr_buf))
+ goto free_etr_perf_buffer;
+
+ tmc_free_etr_buf(etr_perf->etr_buf);
- if (etr_perf->etr_buf)
- tmc_free_etr_buf(etr_perf->etr_buf);
+free_etr_perf_buffer:
kfree(etr_perf);
}
@@ -1308,6 +1474,13 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
struct etr_buf *etr_buf = etr_perf->etr_buf;
spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* Don't do anything if another tracer is using this sink */
+ if (atomic_read(csdev->refcnt) != 1) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
+ }
+
if (WARN_ON(drvdata->perf_data != etr_perf)) {
lost = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1347,17 +1520,15 @@ out:
static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
{
int rc = 0;
+ pid_t pid;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct perf_output_handle *handle = data;
struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
spin_lock_irqsave(&drvdata->spinlock, flags);
- /*
- * There can be only one writer per sink in perf mode. If the sink
- * is already open in SYSFS mode, we can't use it.
- */
- if (drvdata->mode != CS_MODE_DISABLED || WARN_ON(drvdata->perf_data)) {
+ /* Don't use this sink if it is already claimed by sysFS */
+ if (drvdata->mode == CS_MODE_SYSFS) {
rc = -EBUSY;
goto unlock_out;
}
@@ -1367,11 +1538,34 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
goto unlock_out;
}
+ /* Get a handle on the pid of the process to monitor */
+ pid = etr_perf->pid;
+
+ /* Do not proceed if this device is associated with another session */
+ if (drvdata->pid != -1 && drvdata->pid != pid) {
+ rc = -EBUSY;
+ goto unlock_out;
+ }
+
etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
drvdata->perf_data = etr_perf;
+
+ /*
+ * No HW configuration is needed if the sink is already in
+ * use for this session.
+ */
+ if (drvdata->pid == pid) {
+ atomic_inc(csdev->refcnt);
+ goto unlock_out;
+ }
+
rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
- if (!rc)
+ if (!rc) {
+ /* Associate with monitored process. */
+ drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
+ atomic_inc(csdev->refcnt);
+ }
unlock_out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1392,26 +1586,34 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev,
return -EINVAL;
}
-static void tmc_disable_etr_sink(struct coresight_device *csdev)
+static int tmc_disable_etr_sink(struct coresight_device *csdev)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
+
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return;
+ return -EBUSY;
}
- /* Disable the TMC only if it needs to */
- if (drvdata->mode != CS_MODE_DISABLED) {
- tmc_etr_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(csdev->refcnt)) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EBUSY;
}
+ /* Complain if we (somehow) got out of sync */
+ WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ tmc_etr_disable_hw(drvdata);
+ /* Dissociate from monitored process. */
+ drvdata->pid = -1;
+ drvdata->mode = CS_MODE_DISABLED;
+
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(drvdata->dev, "TMC-ETR disabled\n");
+ return 0;
}
static const struct coresight_ops_sink tmc_etr_sink_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 2a02da3d630f..3f718729d741 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -8,10 +8,12 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
@@ -340,6 +342,8 @@ static inline bool tmc_etr_can_use_sg(struct tmc_drvdata *drvdata)
static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
u32 devid, void *dev_caps)
{
+ int rc;
+
u32 dma_mask = 0;
/* Set the unadvertised capabilities */
@@ -369,7 +373,10 @@ static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
dma_mask = 40;
}
- return dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
+ rc = dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
+ if (rc)
+ dev_err(drvdata->dev, "Failed to setup DMA mask: %d\n", rc);
+ return rc;
}
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
@@ -415,6 +422,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
drvdata->memwidth = tmc_get_memwidth(devid);
+ /* This device is not associated with a session */
+ drvdata->pid = -1;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
if (np)
@@ -427,8 +436,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
- pm_runtime_put(&adev->dev);
-
desc.pdata = pdata;
desc.dev = dev;
desc.groups = coresight_tmc_groups;
@@ -447,6 +454,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
coresight_get_uci_data(id));
if (ret)
goto out;
+ idr_init(&drvdata->idr);
+ mutex_init(&drvdata->idr_mutex);
break;
case TMC_CONFIG_TYPE_ETF:
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
@@ -471,6 +480,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
ret = misc_register(&drvdata->miscdev);
if (ret)
coresight_unregister(drvdata->csdev);
+ else
+ pm_runtime_put(&adev->dev);
out:
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 487c53701e9c..503f1b3a3741 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -8,7 +8,10 @@
#define _CORESIGHT_TMC_H
#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
@@ -133,6 +136,7 @@ struct etr_buf_operations;
/**
* struct etr_buf - Details of the buffer used by ETR
+ * refcount ; Number of sources currently using this etr_buf.
* @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc.
* @full : Trace data overflow
* @size : Size of the buffer.
@@ -143,6 +147,7 @@ struct etr_buf_operations;
* @private : Backend specific information for the buf
*/
struct etr_buf {
+ refcount_t refcount;
enum etr_mode mode;
bool full;
ssize_t size;
@@ -160,6 +165,8 @@ struct etr_buf {
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
+ * @pid: Process ID of the process being monitored by the session
+ * that is using this component.
* @buf: Snapshot of the trace data for ETF/ETB.
* @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB.
@@ -170,6 +177,8 @@ struct etr_buf {
* @trigger_cntr: amount of words to store after a trigger.
* @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the
* device configuration register (DEVID)
+ * @idr: Holds etr_bufs allocated for this ETR.
+ * @idr_mutex: Access serialisation for idr.
* @perf_data: PERF buffer for ETR.
* @sysfs_data: SYSFS buffer for ETR.
*/
@@ -179,6 +188,7 @@ struct tmc_drvdata {
struct coresight_device *csdev;
struct miscdevice miscdev;
spinlock_t spinlock;
+ pid_t pid;
bool reading;
union {
char *buf; /* TMC ETB */
@@ -191,6 +201,8 @@ struct tmc_drvdata {
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
u32 etr_caps;
+ struct idr idr;
+ struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
void *perf_data;
};
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index b2f72a1fa402..63d9af31f57f 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -5,6 +5,7 @@
* Description: CoreSight Trace Port Interface Unit driver
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -73,7 +74,7 @@ static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
tpiu_enable_hw(drvdata);
-
+ atomic_inc(csdev->refcnt);
dev_dbg(drvdata->dev, "TPIU enabled\n");
return 0;
}
@@ -94,13 +95,17 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static void tpiu_disable(struct coresight_device *csdev)
+static int tpiu_disable(struct coresight_device *csdev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ if (atomic_dec_return(csdev->refcnt))
+ return -EBUSY;
+
tpiu_disable_hw(drvdata);
dev_dbg(drvdata->dev, "TPIU disabled\n");
+ return 0;
}
static const struct coresight_ops_sink tpiu_sink_ops = {
@@ -153,8 +158,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
/* Disable tpiu to support older devices */
tpiu_disable_hw(drvdata);
- pm_runtime_put(&adev->dev);
-
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
desc.ops = &tpiu_cs_ops;
@@ -162,7 +165,12 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
- return PTR_ERR_OR_ZERO(drvdata->csdev);
+ if (!IS_ERR(drvdata->csdev)) {
+ pm_runtime_put(&adev->dev);
+ return 0;
+ }
+
+ return PTR_ERR(drvdata->csdev);
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 29cef898afba..4b130281236a 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -225,26 +225,28 @@ static int coresight_enable_sink(struct coresight_device *csdev,
* We need to make sure the "new" session is compatible with the
* existing "mode" of operation.
*/
- if (sink_ops(csdev)->enable) {
- ret = sink_ops(csdev)->enable(csdev, mode, data);
- if (ret)
- return ret;
- csdev->enable = true;
- }
+ if (!sink_ops(csdev)->enable)
+ return -EINVAL;
- atomic_inc(csdev->refcnt);
+ ret = sink_ops(csdev)->enable(csdev, mode, data);
+ if (ret)
+ return ret;
+ csdev->enable = true;
return 0;
}
static void coresight_disable_sink(struct coresight_device *csdev)
{
- if (atomic_dec_return(csdev->refcnt) == 0) {
- if (sink_ops(csdev)->disable) {
- sink_ops(csdev)->disable(csdev);
- csdev->enable = false;
- }
- }
+ int ret;
+
+ if (!sink_ops(csdev)->disable)
+ return;
+
+ ret = sink_ops(csdev)->disable(csdev);
+ if (ret)
+ return;
+ csdev->enable = false;
}
static int coresight_enable_link(struct coresight_device *csdev,
@@ -973,7 +975,6 @@ static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
- kfree(csdev->conns);
kfree(csdev->refcnt);
kfree(csdev);
}
diff --git a/drivers/hwtracing/intel_th/acpi.c b/drivers/hwtracing/intel_th/acpi.c
index 87bc3744755f..87f9024e4bbb 100644
--- a/drivers/hwtracing/intel_th/acpi.c
+++ b/drivers/hwtracing/intel_th/acpi.c
@@ -37,15 +37,21 @@ MODULE_DEVICE_TABLE(acpi, intel_th_acpi_ids);
static int intel_th_acpi_probe(struct platform_device *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct resource resource[TH_MMIO_END];
const struct acpi_device_id *id;
struct intel_th *th;
+ int i, r;
id = acpi_match_device(intel_th_acpi_ids, &pdev->dev);
if (!id)
return -ENODEV;
- th = intel_th_alloc(&pdev->dev, (void *)id->driver_data,
- pdev->resource, pdev->num_resources, -1);
+ for (i = 0, r = 0; i < pdev->num_resources && r < TH_MMIO_END; i++)
+ if (pdev->resource[i].flags &
+ (IORESOURCE_IRQ | IORESOURCE_MEM))
+ resource[r++] = pdev->resource[i];
+
+ th = intel_th_alloc(&pdev->dev, (void *)id->driver_data, resource, r);
if (IS_ERR(th))
return PTR_ERR(th);
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 7c1acc2f801c..033dce563c99 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -430,9 +430,9 @@ static const struct intel_th_subdevice {
.nres = 1,
.res = {
{
- /* Handle TSCU from GTH driver */
+ /* Handle TSCU and CTS from GTH driver */
.start = REG_GTH_OFFSET,
- .end = REG_TSCU_OFFSET + REG_TSCU_LENGTH - 1,
+ .end = REG_CTS_OFFSET + REG_CTS_LENGTH - 1,
.flags = IORESOURCE_MEM,
},
},
@@ -491,7 +491,7 @@ static const struct intel_th_subdevice {
.flags = IORESOURCE_MEM,
},
{
- .start = 1, /* use resource[1] */
+ .start = TH_MMIO_SW,
.end = 0,
.flags = IORESOURCE_MEM,
},
@@ -501,6 +501,24 @@ static const struct intel_th_subdevice {
.type = INTEL_TH_SOURCE,
},
{
+ .nres = 2,
+ .res = {
+ {
+ .start = REG_STH_OFFSET,
+ .end = REG_STH_OFFSET + REG_STH_LENGTH - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TH_MMIO_RTIT,
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+ },
+ },
+ .id = -1,
+ .name = "rtit",
+ .type = INTEL_TH_SOURCE,
+ },
+ {
.nres = 1,
.res = {
{
@@ -584,7 +602,6 @@ intel_th_subdevice_alloc(struct intel_th *th,
struct intel_th_device *thdev;
struct resource res[3];
unsigned int req = 0;
- bool is64bit = false;
int r, err;
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -594,18 +611,12 @@ intel_th_subdevice_alloc(struct intel_th *th,
thdev->drvdata = th->drvdata;
- for (r = 0; r < th->num_resources; r++)
- if (th->resource[r].flags & IORESOURCE_MEM_64) {
- is64bit = true;
- break;
- }
-
memcpy(res, subdev->res,
sizeof(struct resource) * subdev->nres);
for (r = 0; r < subdev->nres; r++) {
struct resource *devres = th->resource;
- int bar = 0; /* cut subdevices' MMIO from resource[0] */
+ int bar = TH_MMIO_CONFIG;
/*
* Take .end == 0 to mean 'take the whole bar',
@@ -614,8 +625,9 @@ intel_th_subdevice_alloc(struct intel_th *th,
*/
if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
bar = res[r].start;
- if (is64bit)
- bar *= 2;
+ err = -ENODEV;
+ if (bar >= th->num_resources)
+ goto fail_put_device;
res[r].start = 0;
res[r].end = resource_size(&devres[bar]) - 1;
}
@@ -627,7 +639,12 @@ intel_th_subdevice_alloc(struct intel_th *th,
dev_dbg(th->dev, "%s:%d @ %pR\n",
subdev->name, r, &res[r]);
} else if (res[r].flags & IORESOURCE_IRQ) {
- res[r].start = th->irq;
+ /*
+ * Only pass on the IRQ if we have useful interrupts:
+ * the ones that can be configured via MINTCTL.
+ */
+ if (INTEL_TH_CAP(th, has_mintctl) && th->irq != -1)
+ res[r].start = th->irq;
}
}
@@ -758,8 +775,13 @@ static int intel_th_populate(struct intel_th *th)
thdev = intel_th_subdevice_alloc(th, subdev);
/* note: caller should free subdevices from th::thdev[] */
- if (IS_ERR(thdev))
+ if (IS_ERR(thdev)) {
+ /* ENODEV for individual subdevices is allowed */
+ if (PTR_ERR(thdev) == -ENODEV)
+ continue;
+
return PTR_ERR(thdev);
+ }
th->thdev[th->num_thdevs++] = thdev;
}
@@ -809,26 +831,40 @@ static const struct file_operations intel_th_output_fops = {
.llseek = noop_llseek,
};
+static irqreturn_t intel_th_irq(int irq, void *data)
+{
+ struct intel_th *th = data;
+ irqreturn_t ret = IRQ_NONE;
+ struct intel_th_driver *d;
+ int i;
+
+ for (i = 0; i < th->num_thdevs; i++) {
+ if (th->thdev[i]->type != INTEL_TH_OUTPUT)
+ continue;
+
+ d = to_intel_th_driver(th->thdev[i]->dev.driver);
+ if (d && d->irq)
+ ret |= d->irq(th->thdev[i]);
+ }
+
+ if (ret == IRQ_NONE)
+ pr_warn_ratelimited("nobody cared for irq\n");
+
+ return ret;
+}
+
/**
* intel_th_alloc() - allocate a new Intel TH device and its subdevices
* @dev: parent device
- * @devres: parent's resources
- * @ndevres: number of resources
+ * @devres: resources indexed by th_mmio_idx
* @irq: irq number
*/
struct intel_th *
intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
- struct resource *devres, unsigned int ndevres, int irq)
+ struct resource *devres, unsigned int ndevres)
{
+ int err, r, nr_mmios = 0;
struct intel_th *th;
- int err, r;
-
- if (irq == -1)
- for (r = 0; r < ndevres; r++)
- if (devres[r].flags & IORESOURCE_IRQ) {
- irq = devres[r].start;
- break;
- }
th = kzalloc(sizeof(*th), GFP_KERNEL);
if (!th)
@@ -846,12 +882,32 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
err = th->major;
goto err_ida;
}
+ th->irq = -1;
th->dev = dev;
th->drvdata = drvdata;
- th->resource = devres;
- th->num_resources = ndevres;
- th->irq = irq;
+ for (r = 0; r < ndevres; r++)
+ switch (devres[r].flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_MEM:
+ th->resource[nr_mmios++] = devres[r];
+ break;
+ case IORESOURCE_IRQ:
+ err = devm_request_irq(dev, devres[r].start,
+ intel_th_irq, IRQF_SHARED,
+ dev_name(dev), th);
+ if (err)
+ goto err_chrdev;
+
+ if (th->irq == -1)
+ th->irq = devres[r].start;
+ break;
+ default:
+ dev_warn(dev, "Unknown resource type %lx\n",
+ devres[r].flags);
+ break;
+ }
+
+ th->num_resources = nr_mmios;
dev_set_drvdata(dev, th);
@@ -868,6 +924,10 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
return th;
+err_chrdev:
+ __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
+ "intel_th/output");
+
err_ida:
ida_simple_remove(&intel_th_ida, th->id);
@@ -928,6 +988,27 @@ int intel_th_trace_enable(struct intel_th_device *thdev)
EXPORT_SYMBOL_GPL(intel_th_trace_enable);
/**
+ * intel_th_trace_switch() - execute a switch sequence
+ * @thdev: output device that requests tracing switch
+ */
+int intel_th_trace_switch(struct intel_th_device *thdev)
+{
+ struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
+ struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
+
+ if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
+ return -EINVAL;
+
+ hubdrv->trig_switch(hub, &thdev->output);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_th_trace_switch);
+
+/**
* intel_th_trace_disable() - disable tracing for an output device
* @thdev: output device that requests tracing be disabled
*/
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index edc52d75e6bd..fa9d34af87ac 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -308,6 +308,11 @@ static int intel_th_gth_reset(struct gth_device *gth)
iowrite32(0, gth->base + REG_GTH_SCR);
iowrite32(0xfc, gth->base + REG_GTH_SCR2);
+ /* setup CTS for single trigger */
+ iowrite32(CTS_EVENT_ENABLE_IF_ANYTHING, gth->base + REG_CTS_C0S0_EN);
+ iowrite32(CTS_ACTION_CONTROL_SET_STATE(CTS_STATE_IDLE) |
+ CTS_ACTION_CONTROL_TRIGGER, gth->base + REG_CTS_C0S0_ACT);
+
return 0;
}
@@ -457,6 +462,68 @@ static int intel_th_output_attributes(struct gth_device *gth)
}
/**
+ * intel_th_gth_stop() - stop tracing to an output device
+ * @gth: GTH device
+ * @output: output device's descriptor
+ * @capture_done: set when no more traces will be captured
+ *
+ * This will stop tracing using force storeEn off signal and wait for the
+ * pipelines to be empty for the corresponding output port.
+ */
+static void intel_th_gth_stop(struct gth_device *gth,
+ struct intel_th_output *output,
+ bool capture_done)
+{
+ struct intel_th_device *outdev =
+ container_of(output, struct intel_th_device, output);
+ struct intel_th_driver *outdrv =
+ to_intel_th_driver(outdev->dev.driver);
+ unsigned long count;
+ u32 reg;
+ u32 scr2 = 0xfc | (capture_done ? 1 : 0);
+
+ iowrite32(0, gth->base + REG_GTH_SCR);
+ iowrite32(scr2, gth->base + REG_GTH_SCR2);
+
+ /* wait on pipeline empty for the given port */
+ for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
+ count && !(reg & BIT(output->port)); count--) {
+ reg = ioread32(gth->base + REG_GTH_STAT);
+ cpu_relax();
+ }
+
+ if (!count)
+ dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n",
+ output->port);
+
+ /* wait on output piepline empty */
+ if (outdrv->wait_empty)
+ outdrv->wait_empty(outdev);
+
+ /* clear force capture done for next captures */
+ iowrite32(0xfc, gth->base + REG_GTH_SCR2);
+}
+
+/**
+ * intel_th_gth_start() - start tracing to an output device
+ * @gth: GTH device
+ * @output: output device's descriptor
+ *
+ * This will start tracing using force storeEn signal.
+ */
+static void intel_th_gth_start(struct gth_device *gth,
+ struct intel_th_output *output)
+{
+ u32 scr = 0xfc0000;
+
+ if (output->multiblock)
+ scr |= 0xff;
+
+ iowrite32(scr, gth->base + REG_GTH_SCR);
+ iowrite32(0, gth->base + REG_GTH_SCR2);
+}
+
+/**
* intel_th_gth_disable() - disable tracing to an output device
* @thdev: GTH device
* @output: output device's descriptor
@@ -469,7 +536,6 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
struct intel_th_output *output)
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
- unsigned long count;
int master;
u32 reg;
@@ -482,22 +548,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
}
spin_unlock(&gth->gth_lock);
- iowrite32(0, gth->base + REG_GTH_SCR);
- iowrite32(0xfd, gth->base + REG_GTH_SCR2);
-
- /* wait on pipeline empty for the given port */
- for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
- count && !(reg & BIT(output->port)); count--) {
- reg = ioread32(gth->base + REG_GTH_STAT);
- cpu_relax();
- }
-
- /* clear force capture done for next captures */
- iowrite32(0xfc, gth->base + REG_GTH_SCR2);
-
- if (!count)
- dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
- output->port);
+ intel_th_gth_stop(gth, output, true);
reg = ioread32(gth->base + REG_GTH_SCRPD0);
reg &= ~output->scratchpad;
@@ -526,8 +577,8 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
struct intel_th *th = to_intel_th(thdev);
- u32 scr = 0xfc0000, scrpd;
int master;
+ u32 scrpd;
spin_lock(&gth->gth_lock);
for_each_set_bit(master, gth->output[output->port].master,
@@ -535,9 +586,6 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
gth_master_set(gth, master, output->port);
}
- if (output->multiblock)
- scr |= 0xff;
-
output->active = true;
spin_unlock(&gth->gth_lock);
@@ -548,8 +596,38 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
scrpd |= output->scratchpad;
iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
- iowrite32(scr, gth->base + REG_GTH_SCR);
- iowrite32(0, gth->base + REG_GTH_SCR2);
+ intel_th_gth_start(gth, output);
+}
+
+/**
+ * intel_th_gth_switch() - execute a switch sequence
+ * @thdev: GTH device
+ * @output: output device's descriptor
+ *
+ * This will execute a switch sequence that will trigger a switch window
+ * when tracing to MSC in multi-block mode.
+ */
+static void intel_th_gth_switch(struct intel_th_device *thdev,
+ struct intel_th_output *output)
+{
+ struct gth_device *gth = dev_get_drvdata(&thdev->dev);
+ unsigned long count;
+ u32 reg;
+
+ /* trigger */
+ iowrite32(0, gth->base + REG_CTS_CTL);
+ iowrite32(CTS_CTL_SEQUENCER_ENABLE, gth->base + REG_CTS_CTL);
+ /* wait on trigger status */
+ for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH;
+ count && !(reg & BIT(4)); count--) {
+ reg = ioread32(gth->base + REG_CTS_STAT);
+ cpu_relax();
+ }
+ if (!count)
+ dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
+
+ intel_th_gth_stop(gth, output, false);
+ intel_th_gth_start(gth, output);
}
/**
@@ -735,6 +813,7 @@ static struct intel_th_driver intel_th_gth_driver = {
.unassign = intel_th_gth_unassign,
.set_output = intel_th_gth_set_output,
.enable = intel_th_gth_enable,
+ .trig_switch = intel_th_gth_switch,
.disable = intel_th_gth_disable,
.driver = {
.name = "gth",
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
index 6f2b0b930875..bfcc0fd01177 100644
--- a/drivers/hwtracing/intel_th/gth.h
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -49,6 +49,12 @@ enum {
REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */
REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */
REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */
+
+ /* Common Capture Sequencer (CTS) registers */
+ REG_CTS_C0S0_EN = 0x30c0, /* clause_event_enable_c0s0 */
+ REG_CTS_C0S0_ACT = 0x3180, /* clause_action_control_c0s0 */
+ REG_CTS_STAT = 0x32a0, /* cts_status */
+ REG_CTS_CTL = 0x32a4, /* cts_control */
};
/* waiting for Pipeline Empty bit(s) to assert for GTH */
@@ -57,4 +63,17 @@ enum {
#define TSUCTRL_CTCRESYNC BIT(0)
#define TSCUSTAT_CTCSYNCING BIT(1)
+/* waiting for Trigger status to assert for CTS */
+#define CTS_TRIG_WAITLOOP_DEPTH 10000
+
+#define CTS_EVENT_ENABLE_IF_ANYTHING BIT(31)
+#define CTS_ACTION_CONTROL_STATE_OFF 27
+#define CTS_ACTION_CONTROL_SET_STATE(x) \
+ (((x) & 0x1f) << CTS_ACTION_CONTROL_STATE_OFF)
+#define CTS_ACTION_CONTROL_TRIGGER BIT(4)
+
+#define CTS_STATE_IDLE 0x10u
+
+#define CTS_CTL_SEQUENCER_ENABLE BIT(0)
+
#endif /* __INTEL_TH_GTH_H__ */
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 780206dc9012..0df480072b6c 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -8,6 +8,8 @@
#ifndef __INTEL_TH_H__
#define __INTEL_TH_H__
+#include <linux/irqreturn.h>
+
/* intel_th_device device types */
enum {
/* Devices that generate trace data */
@@ -18,6 +20,8 @@ enum {
INTEL_TH_SWITCH,
};
+struct intel_th_device;
+
/**
* struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
* @port: output port number, assigned by the switch
@@ -25,6 +29,7 @@ enum {
* @scratchpad: scratchpad bits to flag when this output is enabled
* @multiblock: true for multiblock output configuration
* @active: true when this output is enabled
+ * @wait_empty: wait for device pipeline to be empty
*
* Output port descriptor, used by switch driver to tell which output
* port this output device corresponds to. Filled in at output device's
@@ -42,10 +47,12 @@ struct intel_th_output {
/**
* struct intel_th_drvdata - describes hardware capabilities and quirks
* @tscu_enable: device needs SW to enable time stamping unit
+ * @has_mintctl: device has interrupt control (MINTCTL) register
* @host_mode_only: device can only operate in 'host debugger' mode
*/
struct intel_th_drvdata {
unsigned int tscu_enable : 1,
+ has_mintctl : 1,
host_mode_only : 1;
};
@@ -157,10 +164,13 @@ struct intel_th_driver {
struct intel_th_device *othdev);
void (*enable)(struct intel_th_device *thdev,
struct intel_th_output *output);
+ void (*trig_switch)(struct intel_th_device *thdev,
+ struct intel_th_output *output);
void (*disable)(struct intel_th_device *thdev,
struct intel_th_output *output);
/* output ops */
- void (*irq)(struct intel_th_device *thdev);
+ irqreturn_t (*irq)(struct intel_th_device *thdev);
+ void (*wait_empty)(struct intel_th_device *thdev);
int (*activate)(struct intel_th_device *thdev);
void (*deactivate)(struct intel_th_device *thdev);
/* file_operations for those who want a device node */
@@ -213,21 +223,23 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev)
struct intel_th *
intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
- struct resource *devres, unsigned int ndevres, int irq);
+ struct resource *devres, unsigned int ndevres);
void intel_th_free(struct intel_th *th);
int intel_th_driver_register(struct intel_th_driver *thdrv);
void intel_th_driver_unregister(struct intel_th_driver *thdrv);
int intel_th_trace_enable(struct intel_th_device *thdev);
+int intel_th_trace_switch(struct intel_th_device *thdev);
int intel_th_trace_disable(struct intel_th_device *thdev);
int intel_th_set_output(struct intel_th_device *thdev,
unsigned int master);
int intel_th_output_enable(struct intel_th *th, unsigned int otype);
-enum {
+enum th_mmio_idx {
TH_MMIO_CONFIG = 0,
- TH_MMIO_SW = 2,
+ TH_MMIO_SW = 1,
+ TH_MMIO_RTIT = 2,
TH_MMIO_END,
};
@@ -237,6 +249,9 @@ enum {
#define TH_CONFIGURABLE_MASTERS 256
#define TH_MSC_MAX 2
+/* Maximum IRQ vectors */
+#define TH_NVEC_MAX 8
+
/**
* struct intel_th - Intel TH controller
* @dev: driver core's device
@@ -244,7 +259,7 @@ enum {
* @hub: "switch" subdevice (GTH)
* @resource: resources of the entire controller
* @num_thdevs: number of devices in the @thdev array
- * @num_resources: number or resources in the @resource array
+ * @num_resources: number of resources in the @resource array
* @irq: irq number
* @id: this Intel TH controller's device ID in the system
* @major: device node major for output devices
@@ -256,7 +271,7 @@ struct intel_th {
struct intel_th_device *hub;
struct intel_th_drvdata *drvdata;
- struct resource *resource;
+ struct resource resource[TH_MMIO_END];
int (*activate)(struct intel_th *);
void (*deactivate)(struct intel_th *);
unsigned int num_thdevs;
@@ -296,6 +311,9 @@ enum {
REG_TSCU_OFFSET = 0x2000,
REG_TSCU_LENGTH = 0x1000,
+ REG_CTS_OFFSET = 0x3000,
+ REG_CTS_LENGTH = 0x1000,
+
/* Software Trace Hub (STH) [0x4000..0x4fff] */
REG_STH_OFFSET = 0x4000,
REG_STH_LENGTH = 0x2000,
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index ba7aaf421f36..81bb54fa3ce8 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -29,28 +29,18 @@
#define msc_dev(x) (&(x)->thdev->dev)
/**
- * struct msc_block - multiblock mode block descriptor
- * @bdesc: pointer to hardware descriptor (beginning of the block)
- * @addr: physical address of the block
- */
-struct msc_block {
- struct msc_block_desc *bdesc;
- dma_addr_t addr;
-};
-
-/**
* struct msc_window - multiblock mode window descriptor
* @entry: window list linkage (msc::win_list)
* @pgoff: page offset into the buffer that this window starts at
* @nr_blocks: number of blocks (pages) in this window
- * @block: array of block descriptors
+ * @sgt: array of block descriptors
*/
struct msc_window {
struct list_head entry;
unsigned long pgoff;
unsigned int nr_blocks;
struct msc *msc;
- struct msc_block block[0];
+ struct sg_table sgt;
};
/**
@@ -84,6 +74,8 @@ struct msc_iter {
* @reg_base: register window base address
* @thdev: intel_th_device pointer
* @win_list: list of windows in multiblock mode
+ * @single_sgt: single mode buffer
+ * @cur_win: current window
* @nr_pages: total number of pages allocated for this buffer
* @single_sz: amount of data in single mode
* @single_wrap: single mode wrap occurred
@@ -101,9 +93,12 @@ struct msc_iter {
*/
struct msc {
void __iomem *reg_base;
+ void __iomem *msu_base;
struct intel_th_device *thdev;
struct list_head win_list;
+ struct sg_table single_sgt;
+ struct msc_window *cur_win;
unsigned long nr_pages;
unsigned long single_sz;
unsigned int single_wrap : 1;
@@ -120,7 +115,8 @@ struct msc {
/* config */
unsigned int enabled : 1,
- wrap : 1;
+ wrap : 1,
+ do_irq : 1;
unsigned int mode;
unsigned int burst_len;
unsigned int index;
@@ -139,6 +135,49 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
return false;
}
+static inline struct msc_block_desc *
+msc_win_block(struct msc_window *win, unsigned int block)
+{
+ return sg_virt(&win->sgt.sgl[block]);
+}
+
+static inline dma_addr_t
+msc_win_baddr(struct msc_window *win, unsigned int block)
+{
+ return sg_dma_address(&win->sgt.sgl[block]);
+}
+
+static inline unsigned long
+msc_win_bpfn(struct msc_window *win, unsigned int block)
+{
+ return msc_win_baddr(win, block) >> PAGE_SHIFT;
+}
+
+/**
+ * msc_is_last_win() - check if a window is the last one for a given MSC
+ * @win: window
+ * Return: true if @win is the last window in MSC's multiblock buffer
+ */
+static inline bool msc_is_last_win(struct msc_window *win)
+{
+ return win->entry.next == &win->msc->win_list;
+}
+
+/**
+ * msc_next_window() - return next window in the multiblock buffer
+ * @win: current window
+ *
+ * Return: window following the current one
+ */
+static struct msc_window *msc_next_window(struct msc_window *win)
+{
+ if (msc_is_last_win(win))
+ return list_first_entry(&win->msc->win_list, struct msc_window,
+ entry);
+
+ return list_next_entry(win, entry);
+}
+
/**
* msc_oldest_window() - locate the window with oldest data
* @msc: MSC device
@@ -150,9 +189,7 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
*/
static struct msc_window *msc_oldest_window(struct msc *msc)
{
- struct msc_window *win;
- u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA);
- unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT;
+ struct msc_window *win, *next = msc_next_window(msc->cur_win);
unsigned int found = 0;
if (list_empty(&msc->win_list))
@@ -164,18 +201,18 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
* something like 2, in which case we're good
*/
list_for_each_entry(win, &msc->win_list, entry) {
- if (win->block[0].addr == win_addr)
+ if (win == next)
found++;
/* skip the empty ones */
- if (msc_block_is_empty(win->block[0].bdesc))
+ if (msc_block_is_empty(msc_win_block(win, 0)))
continue;
if (found)
return win;
}
- return list_entry(msc->win_list.next, struct msc_window, entry);
+ return list_first_entry(&msc->win_list, struct msc_window, entry);
}
/**
@@ -187,7 +224,7 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
static unsigned int msc_win_oldest_block(struct msc_window *win)
{
unsigned int blk;
- struct msc_block_desc *bdesc = win->block[0].bdesc;
+ struct msc_block_desc *bdesc = msc_win_block(win, 0);
/* without wrapping, first block is the oldest */
if (!msc_block_wrapped(bdesc))
@@ -198,7 +235,7 @@ static unsigned int msc_win_oldest_block(struct msc_window *win)
* oldest data for this window.
*/
for (blk = 0; blk < win->nr_blocks; blk++) {
- bdesc = win->block[blk].bdesc;
+ bdesc = msc_win_block(win, blk);
if (msc_block_last_written(bdesc))
return blk;
@@ -207,34 +244,9 @@ static unsigned int msc_win_oldest_block(struct msc_window *win)
return 0;
}
-/**
- * msc_is_last_win() - check if a window is the last one for a given MSC
- * @win: window
- * Return: true if @win is the last window in MSC's multiblock buffer
- */
-static inline bool msc_is_last_win(struct msc_window *win)
-{
- return win->entry.next == &win->msc->win_list;
-}
-
-/**
- * msc_next_window() - return next window in the multiblock buffer
- * @win: current window
- *
- * Return: window following the current one
- */
-static struct msc_window *msc_next_window(struct msc_window *win)
-{
- if (msc_is_last_win(win))
- return list_entry(win->msc->win_list.next, struct msc_window,
- entry);
-
- return list_entry(win->entry.next, struct msc_window, entry);
-}
-
static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
{
- return iter->win->block[iter->block].bdesc;
+ return msc_win_block(iter->win, iter->block);
}
static void msc_iter_init(struct msc_iter *iter)
@@ -467,13 +479,47 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
offsetof(struct msc_block_desc, hw_tag);
for (blk = 0; blk < win->nr_blocks; blk++) {
- struct msc_block_desc *bdesc = win->block[blk].bdesc;
+ struct msc_block_desc *bdesc = msc_win_block(win, blk);
memset(&bdesc->hw_tag, 0, hw_sz);
}
}
}
+static int intel_th_msu_init(struct msc *msc)
+{
+ u32 mintctl, msusts;
+
+ if (!msc->do_irq)
+ return 0;
+
+ mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
+ mintctl |= msc->index ? M1BLIE : M0BLIE;
+ iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
+ if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
+ dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
+ msc->do_irq = 0;
+ return 0;
+ }
+
+ msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
+ iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
+
+ return 0;
+}
+
+static void intel_th_msu_deinit(struct msc *msc)
+{
+ u32 mintctl;
+
+ if (!msc->do_irq)
+ return;
+
+ mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
+ mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
+ iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
+}
+
/**
* msc_configure() - set up MSC hardware
* @msc: the MSC device to configure
@@ -531,23 +577,14 @@ static int msc_configure(struct msc *msc)
*/
static void msc_disable(struct msc *msc)
{
- unsigned long count;
u32 reg;
lockdep_assert_held(&msc->buf_mutex);
intel_th_trace_disable(msc->thdev);
- for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
- count && !(reg & MSCSTS_PLE); count--) {
- reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
- cpu_relax();
- }
-
- if (!count)
- dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
-
if (msc->mode == MSC_MODE_SINGLE) {
+ reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
@@ -617,22 +654,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
*/
static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
{
+ unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned int order = get_order(size);
struct page *page;
+ int ret;
if (!size)
return 0;
+ ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
+ if (ret)
+ goto err_out;
+
+ ret = -ENOMEM;
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
- return -ENOMEM;
+ goto err_free_sgt;
split_page(page, order);
- msc->nr_pages = size >> PAGE_SHIFT;
+ sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
+
+ ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
+ DMA_FROM_DEVICE);
+ if (ret < 0)
+ goto err_free_pages;
+
+ msc->nr_pages = nr_pages;
msc->base = page_address(page);
- msc->base_addr = page_to_phys(page);
+ msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
return 0;
+
+err_free_pages:
+ __free_pages(page, order);
+
+err_free_sgt:
+ sg_free_table(&msc->single_sgt);
+
+err_out:
+ return ret;
}
/**
@@ -643,6 +703,10 @@ static void msc_buffer_contig_free(struct msc *msc)
{
unsigned long off;
+ dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
+ 1, DMA_FROM_DEVICE);
+ sg_free_table(&msc->single_sgt);
+
for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
struct page *page = virt_to_page(msc->base + off);
@@ -669,6 +733,40 @@ static struct page *msc_buffer_contig_get_page(struct msc *msc,
return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
}
+static int __msc_buffer_win_alloc(struct msc_window *win,
+ unsigned int nr_blocks)
+{
+ struct scatterlist *sg_ptr;
+ void *block;
+ int i, ret;
+
+ ret = sg_alloc_table(&win->sgt, nr_blocks, GFP_KERNEL);
+ if (ret)
+ return -ENOMEM;
+
+ for_each_sg(win->sgt.sgl, sg_ptr, nr_blocks, i) {
+ block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
+ PAGE_SIZE, &sg_dma_address(sg_ptr),
+ GFP_KERNEL);
+ if (!block)
+ goto err_nomem;
+
+ sg_set_buf(sg_ptr, block, PAGE_SIZE);
+ }
+
+ return nr_blocks;
+
+err_nomem:
+ for (i--; i >= 0; i--)
+ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
+ msc_win_block(win, i),
+ msc_win_baddr(win, i));
+
+ sg_free_table(&win->sgt);
+
+ return -ENOMEM;
+}
+
/**
* msc_buffer_win_alloc() - alloc a window for a multiblock mode
* @msc: MSC device
@@ -682,44 +780,49 @@ static struct page *msc_buffer_contig_get_page(struct msc *msc,
static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
{
struct msc_window *win;
- unsigned long size = PAGE_SIZE;
- int i, ret = -ENOMEM;
+ int ret = -ENOMEM, i;
if (!nr_blocks)
return 0;
- win = kzalloc(offsetof(struct msc_window, block[nr_blocks]),
- GFP_KERNEL);
+ /*
+ * This limitation hold as long as we need random access to the
+ * block. When that changes, this can go away.
+ */
+ if (nr_blocks > SG_MAX_SINGLE_ALLOC)
+ return -EINVAL;
+
+ win = kzalloc(sizeof(*win), GFP_KERNEL);
if (!win)
return -ENOMEM;
+ win->msc = msc;
+
if (!list_empty(&msc->win_list)) {
- struct msc_window *prev = list_entry(msc->win_list.prev,
- struct msc_window, entry);
+ struct msc_window *prev = list_last_entry(&msc->win_list,
+ struct msc_window,
+ entry);
+ /* This works as long as blocks are page-sized */
win->pgoff = prev->pgoff + prev->nr_blocks;
}
- for (i = 0; i < nr_blocks; i++) {
- win->block[i].bdesc =
- dma_alloc_coherent(msc_dev(msc)->parent->parent, size,
- &win->block[i].addr, GFP_KERNEL);
-
- if (!win->block[i].bdesc)
- goto err_nomem;
+ ret = __msc_buffer_win_alloc(win, nr_blocks);
+ if (ret < 0)
+ goto err_nomem;
#ifdef CONFIG_X86
+ for (i = 0; i < ret; i++)
/* Set the page as uncached */
- set_memory_uc((unsigned long)win->block[i].bdesc, 1);
+ set_memory_uc((unsigned long)msc_win_block(win, i), 1);
#endif
- }
- win->msc = msc;
- win->nr_blocks = nr_blocks;
+ win->nr_blocks = ret;
if (list_empty(&msc->win_list)) {
- msc->base = win->block[0].bdesc;
- msc->base_addr = win->block[0].addr;
+ msc->base = msc_win_block(win, 0);
+ msc->base_addr = msc_win_baddr(win, 0);
+ msc->cur_win = win;
}
list_add_tail(&win->entry, &msc->win_list);
@@ -728,19 +831,25 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
return 0;
err_nomem:
- for (i--; i >= 0; i--) {
-#ifdef CONFIG_X86
- /* Reset the page to write-back before releasing */
- set_memory_wb((unsigned long)win->block[i].bdesc, 1);
-#endif
- dma_free_coherent(msc_dev(msc)->parent->parent, size,
- win->block[i].bdesc, win->block[i].addr);
- }
kfree(win);
return ret;
}
+static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
+{
+ int i;
+
+ for (i = 0; i < win->nr_blocks; i++) {
+ struct page *page = sg_page(&win->sgt.sgl[i]);
+
+ page->mapping = NULL;
+ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
+ msc_win_block(win, i), msc_win_baddr(win, i));
+ }
+ sg_free_table(&win->sgt);
+}
+
/**
* msc_buffer_win_free() - free a window from MSC's window list
* @msc: MSC device
@@ -761,17 +870,13 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
msc->base_addr = 0;
}
- for (i = 0; i < win->nr_blocks; i++) {
- struct page *page = virt_to_page(win->block[i].bdesc);
-
- page->mapping = NULL;
#ifdef CONFIG_X86
- /* Reset the page to write-back before releasing */
- set_memory_wb((unsigned long)win->block[i].bdesc, 1);
+ for (i = 0; i < win->nr_blocks; i++)
+ /* Reset the page to write-back */
+ set_memory_wb((unsigned long)msc_win_block(win, i), 1);
#endif
- dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
- win->block[i].bdesc, win->block[i].addr);
- }
+
+ __msc_buffer_win_free(msc, win);
kfree(win);
}
@@ -798,19 +903,18 @@ static void msc_buffer_relink(struct msc *msc)
*/
if (msc_is_last_win(win)) {
sw_tag |= MSC_SW_TAG_LASTWIN;
- next_win = list_entry(msc->win_list.next,
- struct msc_window, entry);
+ next_win = list_first_entry(&msc->win_list,
+ struct msc_window, entry);
} else {
- next_win = list_entry(win->entry.next,
- struct msc_window, entry);
+ next_win = list_next_entry(win, entry);
}
for (blk = 0; blk < win->nr_blocks; blk++) {
- struct msc_block_desc *bdesc = win->block[blk].bdesc;
+ struct msc_block_desc *bdesc = msc_win_block(win, blk);
memset(bdesc, 0, sizeof(*bdesc));
- bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT;
+ bdesc->next_win = msc_win_bpfn(next_win, 0);
/*
* Similarly to last window, last block should point
@@ -818,11 +922,9 @@ static void msc_buffer_relink(struct msc *msc)
*/
if (blk == win->nr_blocks - 1) {
sw_tag |= MSC_SW_TAG_LASTBLK;
- bdesc->next_blk =
- win->block[0].addr >> PAGE_SHIFT;
+ bdesc->next_blk = msc_win_bpfn(win, 0);
} else {
- bdesc->next_blk =
- win->block[blk + 1].addr >> PAGE_SHIFT;
+ bdesc->next_blk = msc_win_bpfn(win, blk + 1);
}
bdesc->sw_tag = sw_tag;
@@ -997,7 +1099,7 @@ static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
found:
pgoff -= win->pgoff;
- return virt_to_page(win->block[pgoff].bdesc);
+ return sg_page(&win->sgt.sgl[pgoff]);
}
/**
@@ -1250,6 +1352,22 @@ static const struct file_operations intel_th_msc_fops = {
.owner = THIS_MODULE,
};
+static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
+{
+ struct msc *msc = dev_get_drvdata(&thdev->dev);
+ unsigned long count;
+ u32 reg;
+
+ for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
+ count && !(reg & MSCSTS_PLE); count--) {
+ reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
+ cpu_relax();
+ }
+
+ if (!count)
+ dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
+}
+
static int intel_th_msc_init(struct msc *msc)
{
atomic_set(&msc->user_count, -1);
@@ -1266,6 +1384,39 @@ static int intel_th_msc_init(struct msc *msc)
return 0;
}
+static void msc_win_switch(struct msc *msc)
+{
+ struct msc_window *last, *first;
+
+ first = list_first_entry(&msc->win_list, struct msc_window, entry);
+ last = list_last_entry(&msc->win_list, struct msc_window, entry);
+
+ if (msc_is_last_win(msc->cur_win))
+ msc->cur_win = first;
+ else
+ msc->cur_win = list_next_entry(msc->cur_win, entry);
+
+ msc->base = msc_win_block(msc->cur_win, 0);
+ msc->base_addr = msc_win_baddr(msc->cur_win, 0);
+
+ intel_th_trace_switch(msc->thdev);
+}
+
+static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
+{
+ struct msc *msc = dev_get_drvdata(&thdev->dev);
+ u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
+ u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
+
+ if (!(msusts & mask)) {
+ if (msc->enabled)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
static const char * const msc_mode[] = {
[MSC_MODE_SINGLE] = "single",
[MSC_MODE_MULTI] = "multi",
@@ -1440,10 +1591,38 @@ free_win:
static DEVICE_ATTR_RW(nr_pages);
+static ssize_t
+win_switch_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val != 1)
+ return -EINVAL;
+
+ mutex_lock(&msc->buf_mutex);
+ if (msc->mode != MSC_MODE_MULTI)
+ ret = -ENOTSUPP;
+ else
+ msc_win_switch(msc);
+ mutex_unlock(&msc->buf_mutex);
+
+ return ret ? ret : size;
+}
+
+static DEVICE_ATTR_WO(win_switch);
+
static struct attribute *msc_output_attrs[] = {
&dev_attr_wrap.attr,
&dev_attr_mode.attr,
&dev_attr_nr_pages.attr,
+ &dev_attr_win_switch.attr,
NULL,
};
@@ -1471,10 +1650,19 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
if (!msc)
return -ENOMEM;
+ res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
+ if (!res)
+ msc->do_irq = 1;
+
msc->index = thdev->id;
msc->thdev = thdev;
msc->reg_base = base + msc->index * 0x100;
+ msc->msu_base = base;
+
+ err = intel_th_msu_init(msc);
+ if (err)
+ return err;
err = intel_th_msc_init(msc);
if (err)
@@ -1491,6 +1679,7 @@ static void intel_th_msc_remove(struct intel_th_device *thdev)
int ret;
intel_th_msc_deactivate(thdev);
+ intel_th_msu_deinit(msc);
/*
* Buffers should not be used at this point except if the
@@ -1504,6 +1693,8 @@ static void intel_th_msc_remove(struct intel_th_device *thdev)
static struct intel_th_driver intel_th_msc_driver = {
.probe = intel_th_msc_probe,
.remove = intel_th_msc_remove,
+ .irq = intel_th_msc_interrupt,
+ .wait_empty = intel_th_msc_wait_empty,
.activate = intel_th_msc_activate,
.deactivate = intel_th_msc_deactivate,
.fops = &intel_th_msc_fops,
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 9cc8aced6116..574c16004cb2 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -11,6 +11,7 @@
enum {
REG_MSU_MSUPARAMS = 0x0000,
REG_MSU_MSUSTS = 0x0008,
+ REG_MSU_MINTCTL = 0x0004, /* MSU-global interrupt control */
REG_MSU_MSC0CTL = 0x0100, /* MSC0 control */
REG_MSU_MSC0STS = 0x0104, /* MSC0 status */
REG_MSU_MSC0BAR = 0x0108, /* MSC0 output base address */
@@ -28,6 +29,8 @@ enum {
/* MSUSTS bits */
#define MSUSTS_MSU_INT BIT(0)
+#define MSUSTS_MSC0BLAST BIT(16)
+#define MSUSTS_MSC1BLAST BIT(24)
/* MSCnCTL bits */
#define MSC_EN BIT(0)
@@ -36,6 +39,11 @@ enum {
#define MSC_MODE (BIT(4) | BIT(5))
#define MSC_LEN (BIT(8) | BIT(9) | BIT(10))
+/* MINTCTL bits */
+#define MICDE BIT(0)
+#define M0BLIE BIT(16)
+#define M1BLIE BIT(24)
+
/* MSC operating modes (MSC_MODE) */
enum {
MSC_MODE_SINGLE = 0,
@@ -87,7 +95,7 @@ static inline unsigned long msc_data_sz(struct msc_block_desc *bdesc)
static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
{
- if (bdesc->hw_tag & MSC_HW_TAG_BLOCKWRAP)
+ if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP))
return true;
return false;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 1cf6290d6435..f1228708f2a2 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -17,7 +17,13 @@
#define DRIVER_NAME "intel_th_pci"
-#define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW))
+enum {
+ TH_PCI_CONFIG_BAR = 0,
+ TH_PCI_STH_SW_BAR = 2,
+ TH_PCI_RTIT_BAR = 4,
+};
+
+#define BAR_MASK (BIT(TH_PCI_CONFIG_BAR) | BIT(TH_PCI_STH_SW_BAR))
#define PCI_REG_NPKDSC 0x80
#define NPKDSC_TSACT BIT(5)
@@ -66,8 +72,12 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_th_drvdata *drvdata = (void *)id->driver_data;
+ struct resource resource[TH_MMIO_END + TH_NVEC_MAX] = {
+ [TH_MMIO_CONFIG] = pdev->resource[TH_PCI_CONFIG_BAR],
+ [TH_MMIO_SW] = pdev->resource[TH_PCI_STH_SW_BAR],
+ };
+ int err, r = TH_MMIO_SW + 1, i;
struct intel_th *th;
- int err;
err = pcim_enable_device(pdev);
if (err)
@@ -77,8 +87,19 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
if (err)
return err;
- th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource,
- DEVICE_COUNT_RESOURCE, pdev->irq);
+ if (pdev->resource[TH_PCI_RTIT_BAR].start) {
+ resource[TH_MMIO_RTIT] = pdev->resource[TH_PCI_RTIT_BAR];
+ r++;
+ }
+
+ err = pci_alloc_irq_vectors(pdev, 1, 8, PCI_IRQ_ALL_TYPES);
+ if (err > 0)
+ for (i = 0; i < err; i++, r++) {
+ resource[r].flags = IORESOURCE_IRQ;
+ resource[r].start = pci_irq_vector(pdev, i);
+ }
+
+ th = intel_th_alloc(&pdev->dev, drvdata, resource, r);
if (IS_ERR(th))
return PTR_ERR(th);
@@ -95,10 +116,13 @@ static void intel_th_pci_remove(struct pci_dev *pdev)
struct intel_th *th = pci_get_drvdata(pdev);
intel_th_free(th);
+
+ pci_free_irq_vectors(pdev);
}
static const struct intel_th_drvdata intel_th_2x = {
.tscu_enable = 1,
+ .has_mintctl = 1,
};
static const struct pci_device_id intel_th_pci_id_table[] = {
@@ -165,6 +189,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Comet Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index c7ba8acfd4d5..e55b902560de 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -166,11 +166,10 @@ stm_master(struct stm_device *stm, unsigned int idx)
static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
{
struct stp_master *master;
- size_t size;
- size = ALIGN(stm->data->sw_nchannels, 8) / 8;
- size += sizeof(struct stp_master);
- master = kzalloc(size, GFP_ATOMIC);
+ master = kzalloc(struct_size(master, chan_map,
+ BITS_TO_LONGS(stm->data->sw_nchannels)),
+ GFP_ATOMIC);
if (!master)
return -ENOMEM;
@@ -218,8 +217,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
bitmap_release_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
- output->nr_chans = 0;
master->nr_free += output->nr_chans;
+ output->nr_chans = 0;
}
/*
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 5e5990a83da5..913db013fe90 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -603,6 +603,23 @@ bailout:
return ret;
}
+/*
+ * We print a warning when we are not flagged to support atomic transfers but
+ * will try anyhow. That's what the I2C core would do as well. Sadly, we can't
+ * modify the algorithm struct at probe time because this struct is exported
+ * 'const'.
+ */
+static int bit_xfer_atomic(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+
+ if (!adap->can_do_atomic)
+ dev_warn(&i2c_adap->dev, "not flagged for atomic transfers\n");
+
+ return bit_xfer(i2c_adap, msgs, num);
+}
+
static u32 bit_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
@@ -615,8 +632,9 @@ static u32 bit_func(struct i2c_adapter *adap)
/* -----exported algorithm data: ------------------------------------- */
const struct i2c_algorithm i2c_bit_algo = {
- .master_xfer = bit_xfer,
- .functionality = bit_func,
+ .master_xfer = bit_xfer,
+ .master_xfer_atomic = bit_xfer_atomic,
+ .functionality = bit_func,
};
EXPORT_SYMBOL(i2c_bit_algo);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f8979abb9a19..26186439db6b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -77,6 +77,16 @@ config I2C_AMD8111
This driver can also be built as a module. If so, the module
will be called i2c-amd8111.
+config I2C_AMD_MP2
+ tristate "AMD MP2 PCIe"
+ depends on PCI && ACPI
+ help
+ If you say yes to this option, support will be included for the AMD
+ MP2 PCIe I2C adapter.
+
+ This driver can also be built as modules. If so, the modules will
+ be called i2c-amd-mp2-pci and i2c-amd-mp2-plat.
+
config I2C_HIX5HD2
tristate "Hix5hd2 high-speed I2C driver"
depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
@@ -176,6 +186,7 @@ config I2C_PIIX4
AMD Hudson-2
AMD ML
AMD CZ
+ Hygon CZ
Serverworks OSB4
Serverworks CSB5
Serverworks CSB6
@@ -388,6 +399,19 @@ config I2C_AT91
the latency to fill the transmission register is too long. If you
are facing this situation, use the i2c-gpio driver.
+config I2C_AT91_SLAVE_EXPERIMENTAL
+ tristate "Microchip AT91 I2C experimental slave mode"
+ depends on I2C_AT91
+ select I2C_SLAVE
+ help
+ If you say yes to this option, support for the slave mode will be
+ added. Caution: do not use it for production. This feature has not
+ been tested in a heavy way, help wanted.
+ There are known bugs:
+ - It can hang, on a SAMA5D4, after several transfers.
+ - There are some mismtaches with a SAMA5D4 as slave and a SAMA5D2 as
+ master.
+
config I2C_AU1550
tristate "Au1550/Au1200/Au1300 SMBus interface"
depends on MIPS_ALCHEMY
@@ -425,6 +449,7 @@ config I2C_BCM_IPROC
tristate "Broadcom iProc I2C controller"
depends on ARCH_BCM_IPROC || COMPILE_TEST
default ARCH_BCM_IPROC
+ select I2C_SLAVE
help
If you say yes to this option, support will be included for the
Broadcom iProc I2C controller.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 5f0cb6915969..a3245231b0b7 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -33,8 +33,13 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
# Embedded system I2C/SMBus host controller drivers
obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o
+obj-$(CONFIG_I2C_AMD_MP2) += i2c-amd-mp2-pci.o i2c-amd-mp2-plat.o
obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
+i2c-at91-objs := i2c-at91-core.o i2c-at91-master.o
+ifeq ($(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL),y)
+ i2c-at91-objs += i2c-at91-slave.o
+endif
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o
obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
new file mode 100644
index 000000000000..455e1f36a2a3
--- /dev/null
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * AMD MP2 PCIe communication driver
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Elie Morisse <syniurge@gmail.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "i2c-amd-mp2.h"
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+static void amd_mp2_c2p_mutex_lock(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ /* there is only one data mailbox for two i2c adapters */
+ mutex_lock(&privdata->c2p_lock);
+ privdata->c2p_lock_busid = i2c_common->bus_id;
+}
+
+static void amd_mp2_c2p_mutex_unlock(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ if (unlikely(privdata->c2p_lock_busid != i2c_common->bus_id)) {
+ dev_warn(ndev_dev(privdata),
+ "bus %d attempting to unlock C2P locked by bus %d\n",
+ i2c_common->bus_id, privdata->c2p_lock_busid);
+ return;
+ }
+
+ mutex_unlock(&privdata->c2p_lock);
+}
+
+static int amd_mp2_cmd(struct amd_i2c_common *i2c_common,
+ union i2c_cmd_base i2c_cmd_base)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ void __iomem *reg;
+
+ i2c_common->reqcmd = i2c_cmd_base.s.i2c_cmd;
+
+ reg = privdata->mmio + ((i2c_cmd_base.s.bus_id == 1) ?
+ AMD_C2P_MSG1 : AMD_C2P_MSG0);
+ writel(i2c_cmd_base.ul, reg);
+
+ return 0;
+}
+
+int amd_mp2_bus_enable_set(struct amd_i2c_common *i2c_common, bool enable)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ union i2c_cmd_base i2c_cmd_base;
+
+ dev_dbg(ndev_dev(privdata), "%s id: %d\n", __func__,
+ i2c_common->bus_id);
+
+ i2c_cmd_base.ul = 0;
+ i2c_cmd_base.s.i2c_cmd = enable ? i2c_enable : i2c_disable;
+ i2c_cmd_base.s.bus_id = i2c_common->bus_id;
+ i2c_cmd_base.s.i2c_speed = i2c_common->i2c_speed;
+
+ amd_mp2_c2p_mutex_lock(i2c_common);
+
+ return amd_mp2_cmd(i2c_common, i2c_cmd_base);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_bus_enable_set);
+
+static void amd_mp2_cmd_rw_fill(struct amd_i2c_common *i2c_common,
+ union i2c_cmd_base *i2c_cmd_base,
+ enum i2c_cmd reqcmd)
+{
+ i2c_cmd_base->s.i2c_cmd = reqcmd;
+ i2c_cmd_base->s.bus_id = i2c_common->bus_id;
+ i2c_cmd_base->s.i2c_speed = i2c_common->i2c_speed;
+ i2c_cmd_base->s.slave_addr = i2c_common->msg->addr;
+ i2c_cmd_base->s.length = i2c_common->msg->len;
+}
+
+int amd_mp2_rw(struct amd_i2c_common *i2c_common, enum i2c_cmd reqcmd)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ union i2c_cmd_base i2c_cmd_base;
+
+ amd_mp2_cmd_rw_fill(i2c_common, &i2c_cmd_base, reqcmd);
+ amd_mp2_c2p_mutex_lock(i2c_common);
+
+ if (i2c_common->msg->len <= 32) {
+ i2c_cmd_base.s.mem_type = use_c2pmsg;
+ if (reqcmd == i2c_write)
+ memcpy_toio(privdata->mmio + AMD_C2P_MSG2,
+ i2c_common->msg->buf,
+ i2c_common->msg->len);
+ } else {
+ i2c_cmd_base.s.mem_type = use_dram;
+ writeq((u64)i2c_common->dma_addr,
+ privdata->mmio + AMD_C2P_MSG2);
+ }
+
+ return amd_mp2_cmd(i2c_common, i2c_cmd_base);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_rw);
+
+static void amd_mp2_pci_check_rw_event(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ int len = i2c_common->eventval.r.length;
+ u32 slave_addr = i2c_common->eventval.r.slave_addr;
+ bool err = false;
+
+ if (unlikely(len != i2c_common->msg->len)) {
+ dev_err(ndev_dev(privdata),
+ "length %d in event doesn't match buffer length %d!\n",
+ len, i2c_common->msg->len);
+ err = true;
+ }
+
+ if (unlikely(slave_addr != i2c_common->msg->addr)) {
+ dev_err(ndev_dev(privdata),
+ "unexpected slave address %x (expected: %x)!\n",
+ slave_addr, i2c_common->msg->addr);
+ err = true;
+ }
+
+ if (!err)
+ i2c_common->cmd_success = true;
+}
+
+static void __amd_mp2_process_event(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ enum status_type sts = i2c_common->eventval.r.status;
+ enum response_type res = i2c_common->eventval.r.response;
+ int len = i2c_common->eventval.r.length;
+
+ if (res != command_success) {
+ if (res != command_failed)
+ dev_err(ndev_dev(privdata), "invalid response to i2c command!\n");
+ return;
+ }
+
+ switch (i2c_common->reqcmd) {
+ case i2c_read:
+ if (sts == i2c_readcomplete_event) {
+ amd_mp2_pci_check_rw_event(i2c_common);
+ if (len <= 32)
+ memcpy_fromio(i2c_common->msg->buf,
+ privdata->mmio + AMD_C2P_MSG2,
+ len);
+ } else if (sts != i2c_readfail_event) {
+ dev_err(ndev_dev(privdata),
+ "invalid i2c status after read (%d)!\n", sts);
+ }
+ break;
+ case i2c_write:
+ if (sts == i2c_writecomplete_event)
+ amd_mp2_pci_check_rw_event(i2c_common);
+ else if (sts != i2c_writefail_event)
+ dev_err(ndev_dev(privdata),
+ "invalid i2c status after write (%d)!\n", sts);
+ break;
+ case i2c_enable:
+ if (sts == i2c_busenable_complete)
+ i2c_common->cmd_success = true;
+ else if (sts != i2c_busenable_failed)
+ dev_err(ndev_dev(privdata),
+ "invalid i2c status after bus enable (%d)!\n",
+ sts);
+ break;
+ case i2c_disable:
+ if (sts == i2c_busdisable_complete)
+ i2c_common->cmd_success = true;
+ else if (sts != i2c_busdisable_failed)
+ dev_err(ndev_dev(privdata),
+ "invalid i2c status after bus disable (%d)!\n",
+ sts);
+ break;
+ default:
+ break;
+ }
+}
+
+void amd_mp2_process_event(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ if (unlikely(i2c_common->reqcmd == i2c_none)) {
+ dev_warn(ndev_dev(privdata),
+ "received msg but no cmd was sent (bus = %d)!\n",
+ i2c_common->bus_id);
+ return;
+ }
+
+ __amd_mp2_process_event(i2c_common);
+
+ i2c_common->reqcmd = i2c_none;
+ amd_mp2_c2p_mutex_unlock(i2c_common);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_process_event);
+
+static irqreturn_t amd_mp2_irq_isr(int irq, void *dev)
+{
+ struct amd_mp2_dev *privdata = dev;
+ struct amd_i2c_common *i2c_common;
+ u32 val;
+ unsigned int bus_id;
+ void __iomem *reg;
+ enum irqreturn ret = IRQ_NONE;
+
+ for (bus_id = 0; bus_id < 2; bus_id++) {
+ i2c_common = privdata->busses[bus_id];
+ if (!i2c_common)
+ continue;
+
+ reg = privdata->mmio + ((bus_id == 0) ?
+ AMD_P2C_MSG1 : AMD_P2C_MSG2);
+ val = readl(reg);
+ if (val != 0) {
+ writel(0, reg);
+ writel(0, privdata->mmio + AMD_P2C_MSG_INTEN);
+ i2c_common->eventval.ul = val;
+ i2c_common->cmd_completion(i2c_common);
+
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ if (ret != IRQ_HANDLED) {
+ val = readl(privdata->mmio + AMD_P2C_MSG_INTEN);
+ if (val != 0) {
+ writel(0, privdata->mmio + AMD_P2C_MSG_INTEN);
+ dev_warn(ndev_dev(privdata),
+ "received irq without message\n");
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+void amd_mp2_rw_timeout(struct amd_i2c_common *i2c_common)
+{
+ i2c_common->reqcmd = i2c_none;
+ amd_mp2_c2p_mutex_unlock(i2c_common);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_rw_timeout);
+
+int amd_mp2_register_cb(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ if (i2c_common->bus_id > 1)
+ return -EINVAL;
+
+ if (privdata->busses[i2c_common->bus_id]) {
+ dev_err(ndev_dev(privdata),
+ "Bus %d already taken!\n", i2c_common->bus_id);
+ return -EINVAL;
+ }
+
+ privdata->busses[i2c_common->bus_id] = i2c_common;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amd_mp2_register_cb);
+
+int amd_mp2_unregister_cb(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ privdata->busses[i2c_common->bus_id] = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amd_mp2_unregister_cb);
+
+static void amd_mp2_clear_reg(struct amd_mp2_dev *privdata)
+{
+ int reg;
+
+ for (reg = AMD_C2P_MSG0; reg <= AMD_C2P_MSG9; reg += 4)
+ writel(0, privdata->mmio + reg);
+
+ for (reg = AMD_P2C_MSG1; reg <= AMD_P2C_MSG2; reg += 4)
+ writel(0, privdata->mmio + reg);
+}
+
+static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
+ struct pci_dev *pci_dev)
+{
+ int rc;
+
+ pci_set_drvdata(pci_dev, privdata);
+
+ rc = pcim_enable_device(pci_dev);
+ if (rc) {
+ dev_err(ndev_dev(privdata), "Failed to enable MP2 PCI device\n");
+ goto err_pci_enable;
+ }
+
+ rc = pcim_iomap_regions(pci_dev, 1 << 2, pci_name(pci_dev));
+ if (rc) {
+ dev_err(ndev_dev(privdata), "I/O memory remapping failed\n");
+ goto err_pci_enable;
+ }
+ privdata->mmio = pcim_iomap_table(pci_dev)[2];
+
+ pci_set_master(pci_dev);
+
+ rc = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (rc)
+ goto err_dma_mask;
+ }
+
+ /* Set up intx irq */
+ writel(0, privdata->mmio + AMD_P2C_MSG_INTEN);
+ pci_intx(pci_dev, 1);
+ rc = devm_request_irq(&pci_dev->dev, pci_dev->irq, amd_mp2_irq_isr,
+ IRQF_SHARED, dev_name(&pci_dev->dev), privdata);
+ if (rc)
+ dev_err(&pci_dev->dev, "Failure requesting irq %i: %d\n",
+ pci_dev->irq, rc);
+
+ return rc;
+
+err_dma_mask:
+ pci_clear_master(pci_dev);
+err_pci_enable:
+ pci_set_drvdata(pci_dev, NULL);
+ return rc;
+}
+
+static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ struct amd_mp2_dev *privdata;
+ int rc;
+
+ privdata = devm_kzalloc(&pci_dev->dev, sizeof(*privdata), GFP_KERNEL);
+ if (!privdata)
+ return -ENOMEM;
+
+ rc = amd_mp2_pci_init(privdata, pci_dev);
+ if (rc)
+ return rc;
+
+ mutex_init(&privdata->c2p_lock);
+ privdata->pci_dev = pci_dev;
+
+ pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000);
+ pm_runtime_use_autosuspend(&pci_dev->dev);
+ pm_runtime_put_autosuspend(&pci_dev->dev);
+ pm_runtime_allow(&pci_dev->dev);
+
+ privdata->probed = true;
+
+ dev_info(&pci_dev->dev, "MP2 device registered.\n");
+ return 0;
+}
+
+static void amd_mp2_pci_remove(struct pci_dev *pci_dev)
+{
+ struct amd_mp2_dev *privdata = pci_get_drvdata(pci_dev);
+
+ pm_runtime_forbid(&pci_dev->dev);
+ pm_runtime_get_noresume(&pci_dev->dev);
+
+ pci_intx(pci_dev, 0);
+ pci_clear_master(pci_dev);
+
+ amd_mp2_clear_reg(privdata);
+}
+
+#ifdef CONFIG_PM
+static int amd_mp2_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct amd_mp2_dev *privdata = pci_get_drvdata(pci_dev);
+ struct amd_i2c_common *i2c_common;
+ unsigned int bus_id;
+ int ret = 0;
+
+ for (bus_id = 0; bus_id < 2; bus_id++) {
+ i2c_common = privdata->busses[bus_id];
+ if (i2c_common)
+ i2c_common->suspend(i2c_common);
+ }
+
+ ret = pci_save_state(pci_dev);
+ if (ret) {
+ dev_err(ndev_dev(privdata),
+ "pci_save_state failed = %d\n", ret);
+ return ret;
+ }
+
+ pci_disable_device(pci_dev);
+ return ret;
+}
+
+static int amd_mp2_pci_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct amd_mp2_dev *privdata = pci_get_drvdata(pci_dev);
+ struct amd_i2c_common *i2c_common;
+ unsigned int bus_id;
+ int ret = 0;
+
+ pci_restore_state(pci_dev);
+ ret = pci_enable_device(pci_dev);
+ if (ret < 0) {
+ dev_err(ndev_dev(privdata),
+ "pci_enable_device failed = %d\n", ret);
+ return ret;
+ }
+
+ for (bus_id = 0; bus_id < 2; bus_id++) {
+ i2c_common = privdata->busses[bus_id];
+ if (i2c_common) {
+ ret = i2c_common->resume(i2c_common);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static UNIVERSAL_DEV_PM_OPS(amd_mp2_pci_pm_ops, amd_mp2_pci_suspend,
+ amd_mp2_pci_resume, NULL);
+#endif /* CONFIG_PM */
+
+static const struct pci_device_id amd_mp2_pci_tbl[] = {
+ {PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_MP2)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, amd_mp2_pci_tbl);
+
+static struct pci_driver amd_mp2_pci_driver = {
+ .name = "i2c_amd_mp2",
+ .id_table = amd_mp2_pci_tbl,
+ .probe = amd_mp2_pci_probe,
+ .remove = amd_mp2_pci_remove,
+#ifdef CONFIG_PM
+ .driver = {
+ .pm = &amd_mp2_pci_pm_ops,
+ },
+#endif
+};
+module_pci_driver(amd_mp2_pci_driver);
+
+static int amd_mp2_device_match(struct device *dev, void *data)
+{
+ return 1;
+}
+
+struct amd_mp2_dev *amd_mp2_find_device(void)
+{
+ struct device *dev;
+ struct pci_dev *pci_dev;
+
+ dev = driver_find_device(&amd_mp2_pci_driver.driver, NULL, NULL,
+ amd_mp2_device_match);
+ if (!dev)
+ return NULL;
+
+ pci_dev = to_pci_dev(dev);
+ return (struct amd_mp2_dev *)pci_get_drvdata(pci_dev);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_find_device);
+
+MODULE_DESCRIPTION("AMD(R) PCI-E MP2 I2C Controller Driver");
+MODULE_AUTHOR("Shyam Sundar S K <Shyam-sundar.S-k@amd.com>");
+MODULE_AUTHOR("Elie Morisse <syniurge@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
new file mode 100644
index 000000000000..f5b3f00c6559
--- /dev/null
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * AMD MP2 platform driver
+ *
+ * Setup the I2C adapters enumerated in the ACPI namespace.
+ * MP2 controllers have 2 separate busses, up to 2 I2C adapters may be listed.
+ *
+ * Authors: Nehal Bakulchandra Shah <Nehal-bakulchandra.shah@amd.com>
+ * Elie Morisse <syniurge@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "i2c-amd-mp2.h"
+
+#define AMD_MP2_I2C_MAX_RW_LENGTH ((1 << 12) - 1)
+#define AMD_I2C_TIMEOUT (msecs_to_jiffies(250))
+
+/**
+ * struct amd_i2c_dev - MP2 bus/i2c adapter context
+ * @common: shared context with the MP2 PCI driver
+ * @pdev: platform driver node
+ * @adap: i2c adapter
+ * @cmd_complete: xfer completion object
+ */
+struct amd_i2c_dev {
+ struct amd_i2c_common common;
+ struct platform_device *pdev;
+ struct i2c_adapter adap;
+ struct completion cmd_complete;
+};
+
+#define amd_i2c_dev_common(__common) \
+ container_of(__common, struct amd_i2c_dev, common)
+
+static int i2c_amd_dma_map(struct amd_i2c_common *i2c_common)
+{
+ struct device *dev_pci = &i2c_common->mp2_dev->pci_dev->dev;
+ struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common);
+ enum dma_data_direction dma_direction =
+ i2c_common->msg->flags & I2C_M_RD ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ i2c_common->dma_buf = i2c_get_dma_safe_msg_buf(i2c_common->msg, 0);
+ i2c_common->dma_addr = dma_map_single(dev_pci, i2c_common->dma_buf,
+ i2c_common->msg->len,
+ dma_direction);
+
+ if (unlikely(dma_mapping_error(dev_pci, i2c_common->dma_addr))) {
+ dev_err(&i2c_dev->pdev->dev,
+ "Error while mapping dma buffer %p\n",
+ i2c_common->dma_buf);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void i2c_amd_dma_unmap(struct amd_i2c_common *i2c_common)
+{
+ struct device *dev_pci = &i2c_common->mp2_dev->pci_dev->dev;
+ enum dma_data_direction dma_direction =
+ i2c_common->msg->flags & I2C_M_RD ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ dma_unmap_single(dev_pci, i2c_common->dma_addr,
+ i2c_common->msg->len, dma_direction);
+
+ i2c_put_dma_safe_msg_buf(i2c_common->dma_buf, i2c_common->msg, true);
+}
+
+static void i2c_amd_start_cmd(struct amd_i2c_dev *i2c_dev)
+{
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+
+ reinit_completion(&i2c_dev->cmd_complete);
+ i2c_common->cmd_success = false;
+}
+
+static void i2c_amd_cmd_completion(struct amd_i2c_common *i2c_common)
+{
+ struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common);
+ union i2c_event *event = &i2c_common->eventval;
+
+ if (event->r.status == i2c_readcomplete_event)
+ dev_dbg(&i2c_dev->pdev->dev, "%s readdata:%*ph\n",
+ __func__, event->r.length,
+ i2c_common->msg->buf);
+
+ complete(&i2c_dev->cmd_complete);
+}
+
+static int i2c_amd_check_cmd_completion(struct amd_i2c_dev *i2c_dev)
+{
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+ unsigned long timeout;
+
+ timeout = wait_for_completion_timeout(&i2c_dev->cmd_complete,
+ i2c_dev->adap.timeout);
+
+ if ((i2c_common->reqcmd == i2c_read ||
+ i2c_common->reqcmd == i2c_write) &&
+ i2c_common->msg->len > 32)
+ i2c_amd_dma_unmap(i2c_common);
+
+ if (timeout == 0) {
+ amd_mp2_rw_timeout(i2c_common);
+ return -ETIMEDOUT;
+ }
+
+ amd_mp2_process_event(i2c_common);
+
+ if (!i2c_common->cmd_success)
+ return -EIO;
+
+ return 0;
+}
+
+static int i2c_amd_enable_set(struct amd_i2c_dev *i2c_dev, bool enable)
+{
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+
+ i2c_amd_start_cmd(i2c_dev);
+ amd_mp2_bus_enable_set(i2c_common, enable);
+
+ return i2c_amd_check_cmd_completion(i2c_dev);
+}
+
+static int i2c_amd_xfer_msg(struct amd_i2c_dev *i2c_dev, struct i2c_msg *pmsg)
+{
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+
+ i2c_amd_start_cmd(i2c_dev);
+ i2c_common->msg = pmsg;
+
+ if (pmsg->len > 32)
+ if (i2c_amd_dma_map(i2c_common))
+ return -EIO;
+
+ if (pmsg->flags & I2C_M_RD)
+ amd_mp2_rw(i2c_common, i2c_read);
+ else
+ amd_mp2_rw(i2c_common, i2c_write);
+
+ return i2c_amd_check_cmd_completion(i2c_dev);
+}
+
+static int i2c_amd_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct amd_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ int i;
+ struct i2c_msg *pmsg;
+ int err;
+
+ /* the adapter might have been deleted while waiting for the bus lock */
+ if (unlikely(!i2c_dev->common.mp2_dev))
+ return -EINVAL;
+
+ amd_mp2_pm_runtime_get(i2c_dev->common.mp2_dev);
+
+ for (i = 0; i < num; i++) {
+ pmsg = &msgs[i];
+ err = i2c_amd_xfer_msg(i2c_dev, pmsg);
+ if (err)
+ break;
+ }
+
+ amd_mp2_pm_runtime_put(i2c_dev->common.mp2_dev);
+ return err ? err : num;
+}
+
+static u32 i2c_amd_func(struct i2c_adapter *a)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm i2c_amd_algorithm = {
+ .master_xfer = i2c_amd_xfer,
+ .functionality = i2c_amd_func,
+};
+
+#ifdef CONFIG_PM
+static int i2c_amd_suspend(struct amd_i2c_common *i2c_common)
+{
+ struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common);
+
+ i2c_amd_enable_set(i2c_dev, false);
+ return 0;
+}
+
+static int i2c_amd_resume(struct amd_i2c_common *i2c_common)
+{
+ struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common);
+
+ return i2c_amd_enable_set(i2c_dev, true);
+}
+#endif
+
+static enum speed_enum i2c_amd_get_bus_speed(struct platform_device *pdev)
+{
+ u32 acpi_speed;
+ int i;
+ static const u32 supported_speeds[] = {
+ 0, 100000, 400000, 1000000, 1400000, 3400000
+ };
+
+ acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
+ /* round down to the lowest standard speed */
+ for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (acpi_speed < supported_speeds[i])
+ break;
+ }
+ acpi_speed = supported_speeds[i - 1];
+
+ switch (acpi_speed) {
+ case 100000:
+ return speed100k;
+ case 400000:
+ return speed400k;
+ case 1000000:
+ return speed1000k;
+ case 1400000:
+ return speed1400k;
+ case 3400000:
+ return speed3400k;
+ default:
+ return speed400k;
+ }
+}
+
+static const struct i2c_adapter_quirks amd_i2c_dev_quirks = {
+ .max_read_len = AMD_MP2_I2C_MAX_RW_LENGTH,
+ .max_write_len = AMD_MP2_I2C_MAX_RW_LENGTH,
+};
+
+static int i2c_amd_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct amd_i2c_dev *i2c_dev;
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ struct acpi_device *adev;
+ struct amd_mp2_dev *mp2_dev;
+ const char *uid;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return -ENODEV;
+
+ /* The ACPI namespace doesn't contain information about which MP2 PCI
+ * device an AMDI0011 ACPI device is related to, so assume that there's
+ * only one MP2 PCI device per system.
+ */
+ mp2_dev = amd_mp2_find_device();
+ if (!mp2_dev || !mp2_dev->probed)
+ /* The MP2 PCI device should get probed later */
+ return -EPROBE_DEFER;
+
+ i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
+ if (!i2c_dev)
+ return -ENOMEM;
+
+ i2c_dev->common.mp2_dev = mp2_dev;
+ i2c_dev->pdev = pdev;
+ platform_set_drvdata(pdev, i2c_dev);
+
+ i2c_dev->common.cmd_completion = &i2c_amd_cmd_completion;
+#ifdef CONFIG_PM
+ i2c_dev->common.suspend = &i2c_amd_suspend;
+ i2c_dev->common.resume = &i2c_amd_resume;
+#endif
+
+ uid = adev->pnp.unique_id;
+ if (!uid) {
+ dev_err(&pdev->dev, "missing UID/bus id!\n");
+ return -EINVAL;
+ } else if (strcmp(uid, "0") == 0) {
+ i2c_dev->common.bus_id = 0;
+ } else if (strcmp(uid, "1") == 0) {
+ i2c_dev->common.bus_id = 1;
+ } else {
+ dev_err(&pdev->dev, "incorrect UID/bus id \"%s\"!\n", uid);
+ return -EINVAL;
+ }
+ dev_dbg(&pdev->dev, "bus id is %u\n", i2c_dev->common.bus_id);
+
+ /* Register the adapter */
+ amd_mp2_pm_runtime_get(mp2_dev);
+
+ i2c_dev->common.reqcmd = i2c_none;
+ if (amd_mp2_register_cb(&i2c_dev->common))
+ return -EINVAL;
+ device_link_add(&i2c_dev->pdev->dev, &mp2_dev->pci_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ i2c_dev->common.i2c_speed = i2c_amd_get_bus_speed(pdev);
+
+ /* Setup i2c adapter description */
+ i2c_dev->adap.owner = THIS_MODULE;
+ i2c_dev->adap.algo = &i2c_amd_algorithm;
+ i2c_dev->adap.quirks = &amd_i2c_dev_quirks;
+ i2c_dev->adap.dev.parent = &pdev->dev;
+ i2c_dev->adap.algo_data = i2c_dev;
+ i2c_dev->adap.timeout = AMD_I2C_TIMEOUT;
+ ACPI_COMPANION_SET(&i2c_dev->adap.dev, ACPI_COMPANION(&pdev->dev));
+ i2c_dev->adap.dev.of_node = pdev->dev.of_node;
+ snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name),
+ "AMD MP2 i2c bus %u", i2c_dev->common.bus_id);
+ i2c_set_adapdata(&i2c_dev->adap, i2c_dev);
+
+ init_completion(&i2c_dev->cmd_complete);
+
+ /* Enable the bus */
+ if (i2c_amd_enable_set(i2c_dev, true))
+ dev_err(&pdev->dev, "initial bus enable failed\n");
+
+ /* Attach to the i2c layer */
+ ret = i2c_add_adapter(&i2c_dev->adap);
+
+ amd_mp2_pm_runtime_put(mp2_dev);
+
+ if (ret < 0)
+ dev_err(&pdev->dev, "i2c add adapter failed = %d\n", ret);
+
+ return ret;
+}
+
+static int i2c_amd_remove(struct platform_device *pdev)
+{
+ struct amd_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+
+ i2c_lock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
+
+ i2c_amd_enable_set(i2c_dev, false);
+ amd_mp2_unregister_cb(i2c_common);
+ i2c_common->mp2_dev = NULL;
+
+ i2c_unlock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
+
+ i2c_del_adapter(&i2c_dev->adap);
+ return 0;
+}
+
+static const struct acpi_device_id i2c_amd_acpi_match[] = {
+ { "AMDI0011" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match);
+
+static struct platform_driver i2c_amd_plat_driver = {
+ .probe = i2c_amd_probe,
+ .remove = i2c_amd_remove,
+ .driver = {
+ .name = "i2c_amd_mp2",
+ .acpi_match_table = ACPI_PTR(i2c_amd_acpi_match),
+ },
+};
+module_platform_driver(i2c_amd_plat_driver);
+
+MODULE_DESCRIPTION("AMD(R) MP2 I2C Platform Driver");
+MODULE_AUTHOR("Nehal Shah <nehal-bakulchandra.shah@amd.com>");
+MODULE_AUTHOR("Elie Morisse <syniurge@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/i2c/busses/i2c-amd-mp2.h b/drivers/i2c/busses/i2c-amd-mp2.h
new file mode 100644
index 000000000000..058362edebaa
--- /dev/null
+++ b/drivers/i2c/busses/i2c-amd-mp2.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * AMD MP2 I2C adapter driver
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Elie Morisse <syniurge@gmail.com>
+ */
+
+#ifndef I2C_AMD_PCI_MP2_H
+#define I2C_AMD_PCI_MP2_H
+
+#include <linux/i2c.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#define PCI_DEVICE_ID_AMD_MP2 0x15E6
+
+struct amd_i2c_common;
+struct amd_mp2_dev;
+
+enum {
+ /* MP2 C2P Message Registers */
+ AMD_C2P_MSG0 = 0x10500, /* MP2 Message for I2C0 */
+ AMD_C2P_MSG1 = 0x10504, /* MP2 Message for I2C1 */
+ AMD_C2P_MSG2 = 0x10508, /* DRAM Address Lo / Data 0 */
+ AMD_C2P_MSG3 = 0x1050c, /* DRAM Address HI / Data 1 */
+ AMD_C2P_MSG4 = 0x10510, /* Data 2 */
+ AMD_C2P_MSG5 = 0x10514, /* Data 3 */
+ AMD_C2P_MSG6 = 0x10518, /* Data 4 */
+ AMD_C2P_MSG7 = 0x1051c, /* Data 5 */
+ AMD_C2P_MSG8 = 0x10520, /* Data 6 */
+ AMD_C2P_MSG9 = 0x10524, /* Data 7 */
+
+ /* MP2 P2C Message Registers */
+ AMD_P2C_MSG0 = 0x10680, /* Do not use */
+ AMD_P2C_MSG1 = 0x10684, /* I2C0 interrupt register */
+ AMD_P2C_MSG2 = 0x10688, /* I2C1 interrupt register */
+ AMD_P2C_MSG3 = 0x1068C, /* MP2 debug info */
+ AMD_P2C_MSG_INTEN = 0x10690, /* MP2 interrupt gen register */
+ AMD_P2C_MSG_INTSTS = 0x10694, /* Interrupt status */
+};
+
+/* Command register data structures */
+
+#define i2c_none (-1)
+enum i2c_cmd {
+ i2c_read = 0,
+ i2c_write,
+ i2c_enable,
+ i2c_disable,
+ number_of_sensor_discovered,
+ is_mp2_active,
+ invalid_cmd = 0xF,
+};
+
+enum speed_enum {
+ speed100k = 0,
+ speed400k = 1,
+ speed1000k = 2,
+ speed1400k = 3,
+ speed3400k = 4
+};
+
+enum mem_type {
+ use_dram = 0,
+ use_c2pmsg = 1,
+};
+
+/**
+ * union i2c_cmd_base : bit access of C2P commands
+ * @i2c_cmd: bit 0..3 i2c R/W command
+ * @bus_id: bit 4..7 i2c bus index
+ * @slave_addr: bit 8..15 slave address
+ * @length: bit 16..27 read/write length
+ * @i2c_speed: bit 28..30 bus speed
+ * @mem_type: bit 31 0-DRAM; 1-C2P msg o/p
+ */
+union i2c_cmd_base {
+ u32 ul;
+ struct {
+ enum i2c_cmd i2c_cmd : 4;
+ u8 bus_id : 4;
+ u32 slave_addr : 8;
+ u32 length : 12;
+ enum speed_enum i2c_speed : 3;
+ enum mem_type mem_type : 1;
+ } s;
+};
+
+enum response_type {
+ invalid_response = 0,
+ command_success = 1,
+ command_failed = 2,
+};
+
+enum status_type {
+ i2c_readcomplete_event = 0,
+ i2c_readfail_event = 1,
+ i2c_writecomplete_event = 2,
+ i2c_writefail_event = 3,
+ i2c_busenable_complete = 4,
+ i2c_busenable_failed = 5,
+ i2c_busdisable_complete = 6,
+ i2c_busdisable_failed = 7,
+ invalid_data_length = 8,
+ invalid_slave_address = 9,
+ invalid_i2cbus_id = 10,
+ invalid_dram_addr = 11,
+ invalid_command = 12,
+ mp2_active = 13,
+ numberof_sensors_discovered_resp = 14,
+ i2c_bus_notinitialized
+};
+
+/**
+ * union i2c_event : bit access of P2C events
+ * @response: bit 0..1 i2c response type
+ * @status: bit 2..6 status_type
+ * @mem_type: bit 7 0-DRAM; 1-C2P msg o/p
+ * @bus_id: bit 8..11 i2c bus id
+ * @length: bit 12..23 message length
+ * @slave_addr: bit 24-31 slave address
+ */
+union i2c_event {
+ u32 ul;
+ struct {
+ enum response_type response : 2;
+ enum status_type status : 5;
+ enum mem_type mem_type : 1;
+ u8 bus_id : 4;
+ u32 length : 12;
+ u32 slave_addr : 8;
+ } r;
+};
+
+/**
+ * struct amd_i2c_common - per bus/i2c adapter context, shared
+ * between the pci and the platform driver
+ * @eventval: MP2 event value set by the IRQ handler
+ * @mp2_dev: MP2 pci device this adapter is part of
+ * @msg: i2c message
+ * @cmd_completion: function called by the IRQ handler to signal
+ * the platform driver
+ * @reqcmd: requested i2c command type
+ * @cmd_success: set to true if the MP2 responded to a command with
+ * the expected status and response type
+ * @bus_id: bus index
+ * @i2c_speed: i2c bus speed determined by the slowest slave
+ * @dma_buf: if msg length > 32, holds the DMA buffer virtual address
+ * @dma_addr: if msg length > 32, holds the DMA buffer address
+ */
+struct amd_i2c_common {
+ union i2c_event eventval;
+ struct amd_mp2_dev *mp2_dev;
+ struct i2c_msg *msg;
+ void (*cmd_completion)(struct amd_i2c_common *i2c_common);
+ enum i2c_cmd reqcmd;
+ u8 cmd_success;
+ u8 bus_id;
+ enum speed_enum i2c_speed;
+ u8 *dma_buf;
+ dma_addr_t dma_addr;
+#ifdef CONFIG_PM
+ int (*suspend)(struct amd_i2c_common *i2c_common);
+ int (*resume)(struct amd_i2c_common *i2c_common);
+#endif /* CONFIG_PM */
+};
+
+/**
+ * struct amd_mp2_dev - per PCI device context
+ * @pci_dev: PCI driver node
+ * @busses: MP2 devices may have up to two busses,
+ * each bus corresponding to an i2c adapter
+ * @mmio: iommapped registers
+ * @c2p_lock: controls access to the C2P mailbox shared between
+ * the two adapters
+ * @c2p_lock_busid: id of the adapter which locked c2p_lock
+ */
+struct amd_mp2_dev {
+ struct pci_dev *pci_dev;
+ struct amd_i2c_common *busses[2];
+ void __iomem *mmio;
+ struct mutex c2p_lock;
+ u8 c2p_lock_busid;
+ unsigned int probed;
+};
+
+#define ndev_pdev(ndev) ((ndev)->pci_dev)
+#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
+#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
+#define work_amd_i2c_common(__work) \
+ container_of(__work, struct amd_i2c_common, work.work)
+
+/* PCIe communication driver */
+
+int amd_mp2_rw(struct amd_i2c_common *i2c_common, enum i2c_cmd reqcmd);
+int amd_mp2_bus_enable_set(struct amd_i2c_common *i2c_common, bool enable);
+
+void amd_mp2_process_event(struct amd_i2c_common *i2c_common);
+
+void amd_mp2_rw_timeout(struct amd_i2c_common *i2c_common);
+
+int amd_mp2_register_cb(struct amd_i2c_common *i2c_common);
+int amd_mp2_unregister_cb(struct amd_i2c_common *i2c_common);
+
+struct amd_mp2_dev *amd_mp2_find_device(void);
+
+static inline void amd_mp2_pm_runtime_get(struct amd_mp2_dev *mp2_dev)
+{
+ pm_runtime_get_sync(&mp2_dev->pci_dev->dev);
+}
+
+static inline void amd_mp2_pm_runtime_put(struct amd_mp2_dev *mp2_dev)
+{
+ pm_runtime_mark_last_busy(&mp2_dev->pci_dev->dev);
+ pm_runtime_put_autosuspend(&mp2_dev->pci_dev->dev);
+}
+
+#endif
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
new file mode 100644
index 000000000000..8d55cdd69ff4
--- /dev/null
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
+ *
+ * Copyright (C) 2011 Weinmann Medical GmbH
+ * Author: Nikolaus Voss <n.voss@weinmann.de>
+ *
+ * Evolved from original work by:
+ * Copyright (C) 2004 Rick Bronson
+ * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
+ *
+ * Borrowed heavily from original work by:
+ * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "i2c-at91.h"
+
+unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
+{
+ return readl_relaxed(dev->base + reg);
+}
+
+void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
+{
+ writel_relaxed(val, dev->base + reg);
+}
+
+void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
+{
+ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
+}
+
+void at91_twi_irq_save(struct at91_twi_dev *dev)
+{
+ dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
+ at91_disable_twi_interrupts(dev);
+}
+
+void at91_twi_irq_restore(struct at91_twi_dev *dev)
+{
+ at91_twi_write(dev, AT91_TWI_IER, dev->imr);
+}
+
+void at91_init_twi_bus(struct at91_twi_dev *dev)
+{
+ at91_disable_twi_interrupts(dev);
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
+ if (dev->slave_detected)
+ at91_init_twi_bus_slave(dev);
+ else
+ at91_init_twi_bus_master(dev);
+}
+
+static struct at91_twi_pdata at91rm9200_config = {
+ .clk_max_div = 5,
+ .clk_offset = 3,
+ .has_unre_flag = true,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata at91sam9261_config = {
+ .clk_max_div = 5,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata at91sam9260_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata at91sam9g20_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata at91sam9g10_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static const struct platform_device_id at91_twi_devtypes[] = {
+ {
+ .name = "i2c-at91rm9200",
+ .driver_data = (unsigned long) &at91rm9200_config,
+ }, {
+ .name = "i2c-at91sam9261",
+ .driver_data = (unsigned long) &at91sam9261_config,
+ }, {
+ .name = "i2c-at91sam9260",
+ .driver_data = (unsigned long) &at91sam9260_config,
+ }, {
+ .name = "i2c-at91sam9g20",
+ .driver_data = (unsigned long) &at91sam9g20_config,
+ }, {
+ .name = "i2c-at91sam9g10",
+ .driver_data = (unsigned long) &at91sam9g10_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+#if defined(CONFIG_OF)
+static struct at91_twi_pdata at91sam9x5_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata sama5d4_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = true,
+};
+
+static struct at91_twi_pdata sama5d2_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = true,
+ .has_alt_cmd = true,
+ .has_hold_field = true,
+};
+
+static const struct of_device_id atmel_twi_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-i2c",
+ .data = &at91rm9200_config,
+ }, {
+ .compatible = "atmel,at91sam9260-i2c",
+ .data = &at91sam9260_config,
+ }, {
+ .compatible = "atmel,at91sam9261-i2c",
+ .data = &at91sam9261_config,
+ }, {
+ .compatible = "atmel,at91sam9g20-i2c",
+ .data = &at91sam9g20_config,
+ }, {
+ .compatible = "atmel,at91sam9g10-i2c",
+ .data = &at91sam9g10_config,
+ }, {
+ .compatible = "atmel,at91sam9x5-i2c",
+ .data = &at91sam9x5_config,
+ }, {
+ .compatible = "atmel,sama5d4-i2c",
+ .data = &sama5d4_config,
+ }, {
+ .compatible = "atmel,sama5d2-i2c",
+ .data = &sama5d2_config,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
+#endif
+
+static struct at91_twi_pdata *at91_twi_get_driver_data(
+ struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
+ if (!match)
+ return NULL;
+ return (struct at91_twi_pdata *)match->data;
+ }
+ return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
+}
+
+static int at91_twi_probe(struct platform_device *pdev)
+{
+ struct at91_twi_dev *dev;
+ struct resource *mem;
+ int rc;
+ u32 phy_addr;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->dev = &pdev->dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+ phy_addr = mem->start;
+
+ dev->pdata = at91_twi_get_driver_data(pdev);
+ if (!dev->pdata)
+ return -ENODEV;
+
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
+
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0)
+ return dev->irq;
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->clk = devm_clk_get(dev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ dev_err(dev->dev, "no clock defined\n");
+ return -ENODEV;
+ }
+ clk_prepare_enable(dev->clk);
+
+ snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
+ i2c_set_adapdata(&dev->adapter, dev);
+ dev->adapter.owner = THIS_MODULE;
+ dev->adapter.class = I2C_CLASS_DEPRECATED;
+ dev->adapter.dev.parent = dev->dev;
+ dev->adapter.nr = pdev->id;
+ dev->adapter.timeout = AT91_I2C_TIMEOUT;
+ dev->adapter.dev.of_node = pdev->dev.of_node;
+
+ dev->slave_detected = i2c_detect_slave_mode(&pdev->dev);
+
+ if (dev->slave_detected)
+ rc = at91_twi_probe_slave(pdev, phy_addr, dev);
+ else
+ rc = at91_twi_probe_master(pdev, phy_addr, dev);
+ if (rc)
+ return rc;
+
+ at91_init_twi_bus(dev);
+
+ pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev->dev);
+ pm_runtime_set_active(dev->dev);
+ pm_runtime_enable(dev->dev);
+
+ rc = i2c_add_numbered_adapter(&dev->adapter);
+ if (rc) {
+ clk_disable_unprepare(dev->clk);
+
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
+
+ return rc;
+ }
+
+ dev_info(dev->dev, "AT91 i2c bus driver (hw version: %#x).\n",
+ at91_twi_read(dev, AT91_TWI_VER));
+ return 0;
+}
+
+static int at91_twi_remove(struct platform_device *pdev)
+{
+ struct at91_twi_dev *dev = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&dev->adapter);
+ clk_disable_unprepare(dev->clk);
+
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int at91_twi_runtime_suspend(struct device *dev)
+{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(twi_dev->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int at91_twi_runtime_resume(struct device *dev)
+{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+
+ return clk_prepare_enable(twi_dev->clk);
+}
+
+static int at91_twi_suspend_noirq(struct device *dev)
+{
+ if (!pm_runtime_status_suspended(dev))
+ at91_twi_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int at91_twi_resume_noirq(struct device *dev)
+{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pm_runtime_status_suspended(dev)) {
+ ret = at91_twi_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+
+ at91_init_twi_bus(twi_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops at91_twi_pm = {
+ .suspend_noirq = at91_twi_suspend_noirq,
+ .resume_noirq = at91_twi_resume_noirq,
+ .runtime_suspend = at91_twi_runtime_suspend,
+ .runtime_resume = at91_twi_runtime_resume,
+};
+
+#define at91_twi_pm_ops (&at91_twi_pm)
+#else
+#define at91_twi_pm_ops NULL
+#endif
+
+static struct platform_driver at91_twi_driver = {
+ .probe = at91_twi_probe,
+ .remove = at91_twi_remove,
+ .id_table = at91_twi_devtypes,
+ .driver = {
+ .name = "at91_i2c",
+ .of_match_table = of_match_ptr(atmel_twi_dt_ids),
+ .pm = at91_twi_pm_ops,
+ },
+};
+
+static int __init at91_twi_init(void)
+{
+ return platform_driver_register(&at91_twi_driver);
+}
+
+static void __exit at91_twi_exit(void)
+{
+ platform_driver_unregister(&at91_twi_driver);
+}
+
+subsys_initcall(at91_twi_init);
+module_exit(at91_twi_exit);
+
+MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
+MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_i2c");
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91-master.c
index 3f3e8b3bf5ff..e87232f2e708 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
*
@@ -10,11 +11,6 @@
*
* Borrowed heavily from original work by:
* Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/clk.h>
@@ -25,159 +21,16 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/platform_data/dma-atmel.h>
#include <linux/pm_runtime.h>
-#include <linux/pinctrl/consumer.h>
-
-#define DEFAULT_TWI_CLK_HZ 100000 /* max 400 Kbits/s */
-#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
-#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
-#define AUTOSUSPEND_TIMEOUT 2000
-#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256
-
-/* AT91 TWI register definitions */
-#define AT91_TWI_CR 0x0000 /* Control Register */
-#define AT91_TWI_START BIT(0) /* Send a Start Condition */
-#define AT91_TWI_STOP BIT(1) /* Send a Stop Condition */
-#define AT91_TWI_MSEN BIT(2) /* Master Transfer Enable */
-#define AT91_TWI_MSDIS BIT(3) /* Master Transfer Disable */
-#define AT91_TWI_SVEN BIT(4) /* Slave Transfer Enable */
-#define AT91_TWI_SVDIS BIT(5) /* Slave Transfer Disable */
-#define AT91_TWI_QUICK BIT(6) /* SMBus quick command */
-#define AT91_TWI_SWRST BIT(7) /* Software Reset */
-#define AT91_TWI_ACMEN BIT(16) /* Alternative Command Mode Enable */
-#define AT91_TWI_ACMDIS BIT(17) /* Alternative Command Mode Disable */
-#define AT91_TWI_THRCLR BIT(24) /* Transmit Holding Register Clear */
-#define AT91_TWI_RHRCLR BIT(25) /* Receive Holding Register Clear */
-#define AT91_TWI_LOCKCLR BIT(26) /* Lock Clear */
-#define AT91_TWI_FIFOEN BIT(28) /* FIFO Enable */
-#define AT91_TWI_FIFODIS BIT(29) /* FIFO Disable */
-
-#define AT91_TWI_MMR 0x0004 /* Master Mode Register */
-#define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
-#define AT91_TWI_MREAD BIT(12) /* Master Read Direction */
-
-#define AT91_TWI_IADR 0x000c /* Internal Address Register */
-
-#define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
-#define AT91_TWI_CWGR_HOLD_MAX 0x1f
-#define AT91_TWI_CWGR_HOLD(x) (((x) & AT91_TWI_CWGR_HOLD_MAX) << 24)
-
-#define AT91_TWI_SR 0x0020 /* Status Register */
-#define AT91_TWI_TXCOMP BIT(0) /* Transmission Complete */
-#define AT91_TWI_RXRDY BIT(1) /* Receive Holding Register Ready */
-#define AT91_TWI_TXRDY BIT(2) /* Transmit Holding Register Ready */
-#define AT91_TWI_OVRE BIT(6) /* Overrun Error */
-#define AT91_TWI_UNRE BIT(7) /* Underrun Error */
-#define AT91_TWI_NACK BIT(8) /* Not Acknowledged */
-#define AT91_TWI_LOCK BIT(23) /* TWI Lock due to Frame Errors */
-
-#define AT91_TWI_INT_MASK \
- (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
-
-#define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
-#define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
-#define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
-#define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
-#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
-
-#define AT91_TWI_ACR 0x0040 /* Alternative Command Register */
-#define AT91_TWI_ACR_DATAL(len) ((len) & 0xff)
-#define AT91_TWI_ACR_DIR BIT(8)
-
-#define AT91_TWI_FMR 0x0050 /* FIFO Mode Register */
-#define AT91_TWI_FMR_TXRDYM(mode) (((mode) & 0x3) << 0)
-#define AT91_TWI_FMR_TXRDYM_MASK (0x3 << 0)
-#define AT91_TWI_FMR_RXRDYM(mode) (((mode) & 0x3) << 4)
-#define AT91_TWI_FMR_RXRDYM_MASK (0x3 << 4)
-#define AT91_TWI_ONE_DATA 0x0
-#define AT91_TWI_TWO_DATA 0x1
-#define AT91_TWI_FOUR_DATA 0x2
-
-#define AT91_TWI_FLR 0x0054 /* FIFO Level Register */
-
-#define AT91_TWI_FSR 0x0060 /* FIFO Status Register */
-#define AT91_TWI_FIER 0x0064 /* FIFO Interrupt Enable Register */
-#define AT91_TWI_FIDR 0x0068 /* FIFO Interrupt Disable Register */
-#define AT91_TWI_FIMR 0x006c /* FIFO Interrupt Mask Register */
-
-#define AT91_TWI_VER 0x00fc /* Version Register */
-
-struct at91_twi_pdata {
- unsigned clk_max_div;
- unsigned clk_offset;
- bool has_unre_flag;
- bool has_alt_cmd;
- bool has_hold_field;
- struct at_dma_slave dma_slave;
-};
-
-struct at91_twi_dma {
- struct dma_chan *chan_rx;
- struct dma_chan *chan_tx;
- struct scatterlist sg[2];
- struct dma_async_tx_descriptor *data_desc;
- enum dma_data_direction direction;
- bool buf_mapped;
- bool xfer_in_progress;
-};
-struct at91_twi_dev {
- struct device *dev;
- void __iomem *base;
- struct completion cmd_complete;
- struct clk *clk;
- u8 *buf;
- size_t buf_len;
- struct i2c_msg *msg;
- int irq;
- unsigned imr;
- unsigned transfer_status;
- struct i2c_adapter adapter;
- unsigned twi_cwgr_reg;
- struct at91_twi_pdata *pdata;
- bool use_dma;
- bool use_alt_cmd;
- bool recv_len_abort;
- u32 fifo_size;
- struct at91_twi_dma dma;
-};
-
-static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
-{
- return readl_relaxed(dev->base + reg);
-}
-
-static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
-{
- writel_relaxed(val, dev->base + reg);
-}
-
-static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
-{
- at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
-}
+#include "i2c-at91.h"
-static void at91_twi_irq_save(struct at91_twi_dev *dev)
+void at91_init_twi_bus_master(struct at91_twi_dev *dev)
{
- dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
- at91_disable_twi_interrupts(dev);
-}
-
-static void at91_twi_irq_restore(struct at91_twi_dev *dev)
-{
- at91_twi_write(dev, AT91_TWI_IER, dev->imr);
-}
-
-static void at91_init_twi_bus(struct at91_twi_dev *dev)
-{
- at91_disable_twi_interrupts(dev);
- at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
/* FIFO should be enabled immediately after the software reset */
if (dev->fifo_size)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
@@ -190,16 +43,18 @@ static void at91_init_twi_bus(struct at91_twi_dev *dev)
* Calculate symmetric clock as stated in datasheet:
* twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
*/
-static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
+static void at91_calc_twi_clock(struct at91_twi_dev *dev)
{
int ckdiv, cdiv, div, hold = 0;
struct at91_twi_pdata *pdata = dev->pdata;
int offset = pdata->clk_offset;
int max_ckdiv = pdata->clk_max_div;
- u32 twd_hold_time_ns = 0;
+ struct i2c_timings timings, *t = &timings;
+
+ i2c_parse_fw_timings(dev->dev, t, true);
div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
- 2 * twi_clk) - offset);
+ 2 * t->bus_freq_hz) - offset);
ckdiv = fls(div >> 8);
cdiv = div >> ckdiv;
@@ -211,15 +66,12 @@ static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
}
if (pdata->has_hold_field) {
- of_property_read_u32(dev->dev->of_node, "i2c-sda-hold-time-ns",
- &twd_hold_time_ns);
-
/*
* hold time = HOLD + 3 x T_peripheral_clock
* Use clk rate in kHz to prevent overflows when computing
* hold.
*/
- hold = DIV_ROUND_UP(twd_hold_time_ns
+ hold = DIV_ROUND_UP(t->sda_hold_ns
* (clk_get_rate(dev->clk) / 1000), 1000000);
hold -= 3;
if (hold < 0)
@@ -236,7 +88,7 @@ static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
| AT91_TWI_CWGR_HOLD(hold);
dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns)\n",
- cdiv, ckdiv, hold, twd_hold_time_ns);
+ cdiv, ckdiv, hold, t->sda_hold_ns);
}
static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
@@ -833,124 +685,6 @@ static const struct i2c_algorithm at91_twi_algorithm = {
.functionality = at91_twi_func,
};
-static struct at91_twi_pdata at91rm9200_config = {
- .clk_max_div = 5,
- .clk_offset = 3,
- .has_unre_flag = true,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata at91sam9261_config = {
- .clk_max_div = 5,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata at91sam9260_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata at91sam9g20_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata at91sam9g10_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static const struct platform_device_id at91_twi_devtypes[] = {
- {
- .name = "i2c-at91rm9200",
- .driver_data = (unsigned long) &at91rm9200_config,
- }, {
- .name = "i2c-at91sam9261",
- .driver_data = (unsigned long) &at91sam9261_config,
- }, {
- .name = "i2c-at91sam9260",
- .driver_data = (unsigned long) &at91sam9260_config,
- }, {
- .name = "i2c-at91sam9g20",
- .driver_data = (unsigned long) &at91sam9g20_config,
- }, {
- .name = "i2c-at91sam9g10",
- .driver_data = (unsigned long) &at91sam9g10_config,
- }, {
- /* sentinel */
- }
-};
-
-#if defined(CONFIG_OF)
-static struct at91_twi_pdata at91sam9x5_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata sama5d4_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = true,
-};
-
-static struct at91_twi_pdata sama5d2_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = true,
- .has_alt_cmd = true,
- .has_hold_field = true,
-};
-
-static const struct of_device_id atmel_twi_dt_ids[] = {
- {
- .compatible = "atmel,at91rm9200-i2c",
- .data = &at91rm9200_config,
- } , {
- .compatible = "atmel,at91sam9260-i2c",
- .data = &at91sam9260_config,
- } , {
- .compatible = "atmel,at91sam9261-i2c",
- .data = &at91sam9261_config,
- } , {
- .compatible = "atmel,at91sam9g20-i2c",
- .data = &at91sam9g20_config,
- } , {
- .compatible = "atmel,at91sam9g10-i2c",
- .data = &at91sam9g10_config,
- }, {
- .compatible = "atmel,at91sam9x5-i2c",
- .data = &at91sam9x5_config,
- }, {
- .compatible = "atmel,sama5d4-i2c",
- .data = &sama5d4_config,
- }, {
- .compatible = "atmel,sama5d2-i2c",
- .data = &sama5d2_config,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
-#endif
-
static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
{
int ret = 0;
@@ -1033,74 +767,24 @@ error:
return ret;
}
-static struct at91_twi_pdata *at91_twi_get_driver_data(
- struct platform_device *pdev)
+int at91_twi_probe_master(struct platform_device *pdev,
+ u32 phy_addr, struct at91_twi_dev *dev)
{
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
- if (!match)
- return NULL;
- return (struct at91_twi_pdata *)match->data;
- }
- return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
-}
-
-static int at91_twi_probe(struct platform_device *pdev)
-{
- struct at91_twi_dev *dev;
- struct resource *mem;
int rc;
- u32 phy_addr;
- u32 bus_clk_rate;
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
init_completion(&dev->cmd_complete);
- dev->dev = &pdev->dev;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENODEV;
- phy_addr = mem->start;
-
- dev->pdata = at91_twi_get_driver_data(pdev);
- if (!dev->pdata)
- return -ENODEV;
-
- dev->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(dev->base))
- return PTR_ERR(dev->base);
-
- dev->irq = platform_get_irq(pdev, 0);
- if (dev->irq < 0)
- return dev->irq;
rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
- dev_name(dev->dev), dev);
+ dev_name(dev->dev), dev);
if (rc) {
dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
return rc;
}
- platform_set_drvdata(pdev, dev);
-
- dev->clk = devm_clk_get(dev->dev, NULL);
- if (IS_ERR(dev->clk)) {
- dev_err(dev->dev, "no clock defined\n");
- return -ENODEV;
- }
- rc = clk_prepare_enable(dev->clk);
- if (rc)
- return rc;
-
if (dev->dev->of_node) {
rc = at91_twi_configure_dma(dev, phy_addr);
- if (rc == -EPROBE_DEFER) {
- clk_disable_unprepare(dev->clk);
+ if (rc == -EPROBE_DEFER)
return rc;
- }
}
if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
@@ -1108,144 +792,10 @@ static int at91_twi_probe(struct platform_device *pdev)
dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
}
- rc = of_property_read_u32(dev->dev->of_node, "clock-frequency",
- &bus_clk_rate);
- if (rc)
- bus_clk_rate = DEFAULT_TWI_CLK_HZ;
-
- at91_calc_twi_clock(dev, bus_clk_rate);
- at91_init_twi_bus(dev);
+ at91_calc_twi_clock(dev);
- snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
- i2c_set_adapdata(&dev->adapter, dev);
- dev->adapter.owner = THIS_MODULE;
- dev->adapter.class = I2C_CLASS_DEPRECATED;
dev->adapter.algo = &at91_twi_algorithm;
dev->adapter.quirks = &at91_twi_quirks;
- dev->adapter.dev.parent = dev->dev;
- dev->adapter.nr = pdev->id;
- dev->adapter.timeout = AT91_I2C_TIMEOUT;
- dev->adapter.dev.of_node = pdev->dev.of_node;
-
- pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(dev->dev);
- pm_runtime_set_active(dev->dev);
- pm_runtime_enable(dev->dev);
-
- rc = i2c_add_numbered_adapter(&dev->adapter);
- if (rc) {
- clk_disable_unprepare(dev->clk);
-
- pm_runtime_disable(dev->dev);
- pm_runtime_set_suspended(dev->dev);
-
- return rc;
- }
-
- dev_info(dev->dev, "AT91 i2c bus driver (hw version: %#x).\n",
- at91_twi_read(dev, AT91_TWI_VER));
- return 0;
-}
-
-static int at91_twi_remove(struct platform_device *pdev)
-{
- struct at91_twi_dev *dev = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&dev->adapter);
- clk_disable_unprepare(dev->clk);
-
- pm_runtime_disable(dev->dev);
- pm_runtime_set_suspended(dev->dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int at91_twi_runtime_suspend(struct device *dev)
-{
- struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
-
- clk_disable_unprepare(twi_dev->clk);
-
- pinctrl_pm_select_sleep_state(dev);
-
- return 0;
-}
-
-static int at91_twi_runtime_resume(struct device *dev)
-{
- struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
-
- pinctrl_pm_select_default_state(dev);
-
- return clk_prepare_enable(twi_dev->clk);
-}
-
-static int at91_twi_suspend_noirq(struct device *dev)
-{
- if (!pm_runtime_status_suspended(dev))
- at91_twi_runtime_suspend(dev);
-
- return 0;
-}
-
-static int at91_twi_resume_noirq(struct device *dev)
-{
- struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
- int ret;
-
- if (!pm_runtime_status_suspended(dev)) {
- ret = at91_twi_runtime_resume(dev);
- if (ret)
- return ret;
- }
-
- pm_runtime_mark_last_busy(dev);
- pm_request_autosuspend(dev);
-
- at91_init_twi_bus(twi_dev);
return 0;
}
-
-static const struct dev_pm_ops at91_twi_pm = {
- .suspend_noirq = at91_twi_suspend_noirq,
- .resume_noirq = at91_twi_resume_noirq,
- .runtime_suspend = at91_twi_runtime_suspend,
- .runtime_resume = at91_twi_runtime_resume,
-};
-
-#define at91_twi_pm_ops (&at91_twi_pm)
-#else
-#define at91_twi_pm_ops NULL
-#endif
-
-static struct platform_driver at91_twi_driver = {
- .probe = at91_twi_probe,
- .remove = at91_twi_remove,
- .id_table = at91_twi_devtypes,
- .driver = {
- .name = "at91_i2c",
- .of_match_table = of_match_ptr(atmel_twi_dt_ids),
- .pm = at91_twi_pm_ops,
- },
-};
-
-static int __init at91_twi_init(void)
-{
- return platform_driver_register(&at91_twi_driver);
-}
-
-static void __exit at91_twi_exit(void)
-{
- platform_driver_unregister(&at91_twi_driver);
-}
-
-subsys_initcall(at91_twi_init);
-module_exit(at91_twi_exit);
-
-MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
-MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:at91_i2c");
diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c
new file mode 100644
index 000000000000..d6eeea5166c0
--- /dev/null
+++ b/drivers/i2c/busses/i2c-at91-slave.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * i2c slave support for Atmel's AT91 Two-Wire Interface (TWI)
+ *
+ * Copyright (C) 2017 Juergen Fitschen <me@jue.yt>
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+
+#include "i2c-at91.h"
+
+static irqreturn_t atmel_twi_interrupt_slave(int irq, void *dev_id)
+{
+ struct at91_twi_dev *dev = dev_id;
+ const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
+ const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
+ u8 value;
+
+ if (!irqstatus)
+ return IRQ_NONE;
+
+ /* slave address has been detected on I2C bus */
+ if (irqstatus & AT91_TWI_SVACC) {
+ if (status & AT91_TWI_SVREAD) {
+ i2c_slave_event(dev->slave,
+ I2C_SLAVE_READ_REQUESTED, &value);
+ writeb_relaxed(value, dev->base + AT91_TWI_THR);
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_TXRDY | AT91_TWI_EOSACC);
+ } else {
+ i2c_slave_event(dev->slave,
+ I2C_SLAVE_WRITE_REQUESTED, &value);
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_RXRDY | AT91_TWI_EOSACC);
+ }
+ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_SVACC);
+ }
+
+ /* byte transmitted to remote master */
+ if (irqstatus & AT91_TWI_TXRDY) {
+ i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, &value);
+ writeb_relaxed(value, dev->base + AT91_TWI_THR);
+ }
+
+ /* byte received from remote master */
+ if (irqstatus & AT91_TWI_RXRDY) {
+ value = readb_relaxed(dev->base + AT91_TWI_RHR);
+ i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
+ }
+
+ /* master sent stop */
+ if (irqstatus & AT91_TWI_EOSACC) {
+ at91_twi_write(dev, AT91_TWI_IDR,
+ AT91_TWI_TXRDY | AT91_TWI_RXRDY | AT91_TWI_EOSACC);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_SVACC);
+ i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &value);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int at91_reg_slave(struct i2c_client *slave)
+{
+ struct at91_twi_dev *dev = i2c_get_adapdata(slave->adapter);
+
+ if (dev->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ /* Make sure twi_clk doesn't get turned off! */
+ pm_runtime_get_sync(dev->dev);
+
+ dev->slave = slave;
+ dev->smr = AT91_TWI_SMR_SADR(slave->addr);
+
+ at91_init_twi_bus(dev);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_SVACC);
+
+ dev_info(dev->dev, "entered slave mode (ADR=%d)\n", slave->addr);
+
+ return 0;
+}
+
+static int at91_unreg_slave(struct i2c_client *slave)
+{
+ struct at91_twi_dev *dev = i2c_get_adapdata(slave->adapter);
+
+ WARN_ON(!dev->slave);
+
+ dev_info(dev->dev, "leaving slave mode\n");
+
+ dev->slave = NULL;
+ dev->smr = 0;
+
+ at91_init_twi_bus(dev);
+
+ pm_runtime_put(dev->dev);
+
+ return 0;
+}
+
+static u32 at91_twi_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
+ | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
+}
+
+static const struct i2c_algorithm at91_twi_algorithm_slave = {
+ .reg_slave = at91_reg_slave,
+ .unreg_slave = at91_unreg_slave,
+ .functionality = at91_twi_func,
+};
+
+int at91_twi_probe_slave(struct platform_device *pdev,
+ u32 phy_addr, struct at91_twi_dev *dev)
+{
+ int rc;
+
+ rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt_slave,
+ 0, dev_name(dev->dev), dev);
+ if (rc) {
+ dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
+ return rc;
+ }
+
+ dev->adapter.algo = &at91_twi_algorithm_slave;
+
+ return 0;
+}
+
+void at91_init_twi_bus_slave(struct at91_twi_dev *dev)
+{
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSDIS);
+ if (dev->slave_detected && dev->smr) {
+ at91_twi_write(dev, AT91_TWI_SMR, dev->smr);
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVEN);
+ }
+}
diff --git a/drivers/i2c/busses/i2c-at91.h b/drivers/i2c/busses/i2c-at91.h
new file mode 100644
index 000000000000..499b506f6128
--- /dev/null
+++ b/drivers/i2c/busses/i2c-at91.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
+ *
+ * Copyright (C) 2011 Weinmann Medical GmbH
+ * Author: Nikolaus Voss <n.voss@weinmann.de>
+ *
+ * Evolved from original work by:
+ * Copyright (C) 2004 Rick Bronson
+ * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
+ *
+ * Borrowed heavily from original work by:
+ * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/i2c.h>
+#include <linux/platform_data/dma-atmel.h>
+#include <linux/platform_device.h>
+
+#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
+#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
+#define AUTOSUSPEND_TIMEOUT 2000
+#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256
+
+/* AT91 TWI register definitions */
+#define AT91_TWI_CR 0x0000 /* Control Register */
+#define AT91_TWI_START BIT(0) /* Send a Start Condition */
+#define AT91_TWI_STOP BIT(1) /* Send a Stop Condition */
+#define AT91_TWI_MSEN BIT(2) /* Master Transfer Enable */
+#define AT91_TWI_MSDIS BIT(3) /* Master Transfer Disable */
+#define AT91_TWI_SVEN BIT(4) /* Slave Transfer Enable */
+#define AT91_TWI_SVDIS BIT(5) /* Slave Transfer Disable */
+#define AT91_TWI_QUICK BIT(6) /* SMBus quick command */
+#define AT91_TWI_SWRST BIT(7) /* Software Reset */
+#define AT91_TWI_ACMEN BIT(16) /* Alternative Command Mode Enable */
+#define AT91_TWI_ACMDIS BIT(17) /* Alternative Command Mode Disable */
+#define AT91_TWI_THRCLR BIT(24) /* Transmit Holding Register Clear */
+#define AT91_TWI_RHRCLR BIT(25) /* Receive Holding Register Clear */
+#define AT91_TWI_LOCKCLR BIT(26) /* Lock Clear */
+#define AT91_TWI_FIFOEN BIT(28) /* FIFO Enable */
+#define AT91_TWI_FIFODIS BIT(29) /* FIFO Disable */
+
+#define AT91_TWI_MMR 0x0004 /* Master Mode Register */
+#define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
+#define AT91_TWI_MREAD BIT(12) /* Master Read Direction */
+
+#define AT91_TWI_SMR 0x0008 /* Slave Mode Register */
+#define AT91_TWI_SMR_SADR_MAX 0x007f
+#define AT91_TWI_SMR_SADR(x) (((x) & AT91_TWI_SMR_SADR_MAX) << 16)
+
+#define AT91_TWI_IADR 0x000c /* Internal Address Register */
+
+#define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
+#define AT91_TWI_CWGR_HOLD_MAX 0x1f
+#define AT91_TWI_CWGR_HOLD(x) (((x) & AT91_TWI_CWGR_HOLD_MAX) << 24)
+
+#define AT91_TWI_SR 0x0020 /* Status Register */
+#define AT91_TWI_TXCOMP BIT(0) /* Transmission Complete */
+#define AT91_TWI_RXRDY BIT(1) /* Receive Holding Register Ready */
+#define AT91_TWI_TXRDY BIT(2) /* Transmit Holding Register Ready */
+#define AT91_TWI_SVREAD BIT(3) /* Slave Read */
+#define AT91_TWI_SVACC BIT(4) /* Slave Access */
+#define AT91_TWI_OVRE BIT(6) /* Overrun Error */
+#define AT91_TWI_UNRE BIT(7) /* Underrun Error */
+#define AT91_TWI_NACK BIT(8) /* Not Acknowledged */
+#define AT91_TWI_EOSACC BIT(11) /* End Of Slave Access */
+#define AT91_TWI_LOCK BIT(23) /* TWI Lock due to Frame Errors */
+
+#define AT91_TWI_INT_MASK \
+ (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK \
+ | AT91_TWI_SVACC | AT91_TWI_EOSACC)
+
+#define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
+#define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
+#define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
+#define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
+#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
+
+#define AT91_TWI_ACR 0x0040 /* Alternative Command Register */
+#define AT91_TWI_ACR_DATAL(len) ((len) & 0xff)
+#define AT91_TWI_ACR_DIR BIT(8)
+
+#define AT91_TWI_FMR 0x0050 /* FIFO Mode Register */
+#define AT91_TWI_FMR_TXRDYM(mode) (((mode) & 0x3) << 0)
+#define AT91_TWI_FMR_TXRDYM_MASK (0x3 << 0)
+#define AT91_TWI_FMR_RXRDYM(mode) (((mode) & 0x3) << 4)
+#define AT91_TWI_FMR_RXRDYM_MASK (0x3 << 4)
+#define AT91_TWI_ONE_DATA 0x0
+#define AT91_TWI_TWO_DATA 0x1
+#define AT91_TWI_FOUR_DATA 0x2
+
+#define AT91_TWI_FLR 0x0054 /* FIFO Level Register */
+
+#define AT91_TWI_FSR 0x0060 /* FIFO Status Register */
+#define AT91_TWI_FIER 0x0064 /* FIFO Interrupt Enable Register */
+#define AT91_TWI_FIDR 0x0068 /* FIFO Interrupt Disable Register */
+#define AT91_TWI_FIMR 0x006c /* FIFO Interrupt Mask Register */
+
+#define AT91_TWI_VER 0x00fc /* Version Register */
+
+struct at91_twi_pdata {
+ unsigned clk_max_div;
+ unsigned clk_offset;
+ bool has_unre_flag;
+ bool has_alt_cmd;
+ bool has_hold_field;
+ struct at_dma_slave dma_slave;
+};
+
+struct at91_twi_dma {
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct scatterlist sg[2];
+ struct dma_async_tx_descriptor *data_desc;
+ enum dma_data_direction direction;
+ bool buf_mapped;
+ bool xfer_in_progress;
+};
+
+struct at91_twi_dev {
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct clk *clk;
+ u8 *buf;
+ size_t buf_len;
+ struct i2c_msg *msg;
+ int irq;
+ unsigned imr;
+ unsigned transfer_status;
+ struct i2c_adapter adapter;
+ unsigned twi_cwgr_reg;
+ struct at91_twi_pdata *pdata;
+ bool use_dma;
+ bool use_alt_cmd;
+ bool recv_len_abort;
+ u32 fifo_size;
+ struct at91_twi_dma dma;
+ bool slave_detected;
+#ifdef CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL
+ unsigned smr;
+ struct i2c_client *slave;
+#endif
+};
+
+unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg);
+void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val);
+void at91_disable_twi_interrupts(struct at91_twi_dev *dev);
+void at91_twi_irq_save(struct at91_twi_dev *dev);
+void at91_twi_irq_restore(struct at91_twi_dev *dev);
+void at91_init_twi_bus(struct at91_twi_dev *dev);
+
+void at91_init_twi_bus_master(struct at91_twi_dev *dev);
+int at91_twi_probe_master(struct platform_device *pdev, u32 phy_addr,
+ struct at91_twi_dev *dev);
+
+#ifdef CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL
+void at91_init_twi_bus_slave(struct at91_twi_dev *dev);
+int at91_twi_probe_slave(struct platform_device *pdev, u32 phy_addr,
+ struct at91_twi_dev *dev);
+
+#else
+static inline void at91_init_twi_bus_slave(struct at91_twi_dev *dev) {}
+static inline int at91_twi_probe_slave(struct platform_device *pdev,
+ u32 phy_addr, struct at91_twi_dev *dev)
+{
+ return -EINVAL;
+}
+
+#endif
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index bf564391091f..1c7b41f45c83 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -99,6 +99,7 @@
* @adapter: core i2c abstraction
* @i2c_clk: clock reference for i2c input clock
* @bus_clk_rate: current i2c bus clock rate
+ * @last: a flag indicating is this is last message in transfer
*/
struct axxia_i2c_dev {
void __iomem *base;
@@ -112,6 +113,7 @@ struct axxia_i2c_dev {
struct i2c_adapter adapter;
struct clk *i2c_clk;
u32 bus_clk_rate;
+ bool last;
};
static void i2c_int_disable(struct axxia_i2c_dev *idev, u32 mask)
@@ -324,15 +326,14 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
/* Stop completed */
i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete);
- } else if (status & MST_STATUS_SNS) {
+ } else if (status & (MST_STATUS_SNS | MST_STATUS_SS)) {
/* Transfer done */
- i2c_int_disable(idev, ~MST_STATUS_TSS);
+ int mask = idev->last ? ~0 : ~MST_STATUS_TSS;
+
+ i2c_int_disable(idev, mask);
if (i2c_m_rd(idev->msg_r) && idev->msg_xfrd_r < idev->msg_r->len)
axxia_i2c_empty_rx_fifo(idev);
complete(&idev->msg_complete);
- } else if (status & MST_STATUS_SS) {
- /* Auto/Sequence transfer done */
- complete(&idev->msg_complete);
} else if (status & MST_STATUS_TSS) {
/* Transfer timeout */
idev->msg_err = -ETIMEDOUT;
@@ -405,6 +406,7 @@ static int axxia_i2c_xfer_seq(struct axxia_i2c_dev *idev, struct i2c_msg msgs[])
idev->msg_r = &msgs[1];
idev->msg_xfrd = 0;
idev->msg_xfrd_r = 0;
+ idev->last = true;
axxia_i2c_fill_tx_fifo(idev);
writel(CMD_SEQUENCE, idev->base + MST_COMMAND);
@@ -415,10 +417,6 @@ static int axxia_i2c_xfer_seq(struct axxia_i2c_dev *idev, struct i2c_msg msgs[])
time_left = wait_for_completion_timeout(&idev->msg_complete,
I2C_XFER_TIMEOUT);
- i2c_int_disable(idev, int_mask);
-
- axxia_i2c_empty_rx_fifo(idev);
-
if (idev->msg_err == -ENXIO) {
if (axxia_i2c_handle_seq_nak(idev))
axxia_i2c_init(idev);
@@ -438,9 +436,10 @@ static int axxia_i2c_xfer_seq(struct axxia_i2c_dev *idev, struct i2c_msg msgs[])
return idev->msg_err;
}
-static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
+static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg,
+ bool last)
{
- u32 int_mask = MST_STATUS_ERR | MST_STATUS_SNS;
+ u32 int_mask = MST_STATUS_ERR;
u32 rx_xfer, tx_xfer;
unsigned long time_left;
unsigned int wt_value;
@@ -449,6 +448,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
idev->msg_r = msg;
idev->msg_xfrd = 0;
idev->msg_xfrd_r = 0;
+ idev->last = last;
reinit_completion(&idev->msg_complete);
axxia_i2c_set_addr(idev, msg);
@@ -478,8 +478,13 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
if (idev->msg_err)
goto out;
- /* Start manual mode */
- writel(CMD_MANUAL, idev->base + MST_COMMAND);
+ if (!last) {
+ writel(CMD_MANUAL, idev->base + MST_COMMAND);
+ int_mask |= MST_STATUS_SNS;
+ } else {
+ writel(CMD_AUTO, idev->base + MST_COMMAND);
+ int_mask |= MST_STATUS_SS;
+ }
writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
@@ -507,28 +512,6 @@ out:
return idev->msg_err;
}
-static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
-{
- u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
- unsigned long time_left;
-
- reinit_completion(&idev->msg_complete);
-
- /* Issue stop */
- writel(0xb, idev->base + MST_COMMAND);
- i2c_int_enable(idev, int_mask);
- time_left = wait_for_completion_timeout(&idev->msg_complete,
- I2C_STOP_TIMEOUT);
- i2c_int_disable(idev, int_mask);
- if (time_left == 0)
- return -ETIMEDOUT;
-
- if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
- dev_warn(idev->dev, "busy after stop\n");
-
- return 0;
-}
-
/* This function checks if the msgs[] array contains messages compatible with
* Sequence mode of operation. This mode assumes there will be exactly one
* write of non-zero length followed by exactly one read of non-zero length,
@@ -558,9 +541,7 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
i2c_int_enable(idev, MST_STATUS_TSS);
for (i = 0; ret == 0 && i < num; ++i)
- ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
-
- axxia_i2c_stop(idev);
+ ret = axxia_i2c_xfer_msg(idev, &msgs[i], i == (num - 1));
return ret ? : i;
}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 4c8c3bc4669c..a845b8decac8 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -17,17 +17,38 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#define IDM_CTRL_DIRECT_OFFSET 0x00
#define CFG_OFFSET 0x00
#define CFG_RESET_SHIFT 31
#define CFG_EN_SHIFT 30
+#define CFG_SLAVE_ADDR_0_SHIFT 28
#define CFG_M_RETRY_CNT_SHIFT 16
#define CFG_M_RETRY_CNT_MASK 0x0f
#define TIM_CFG_OFFSET 0x04
#define TIM_CFG_MODE_400_SHIFT 31
+#define TIM_RAND_SLAVE_STRETCH_SHIFT 24
+#define TIM_RAND_SLAVE_STRETCH_MASK 0x7f
+#define TIM_PERIODIC_SLAVE_STRETCH_SHIFT 16
+#define TIM_PERIODIC_SLAVE_STRETCH_MASK 0x7f
+
+#define S_CFG_SMBUS_ADDR_OFFSET 0x08
+#define S_CFG_EN_NIC_SMB_ADDR3_SHIFT 31
+#define S_CFG_NIC_SMB_ADDR3_SHIFT 24
+#define S_CFG_NIC_SMB_ADDR3_MASK 0x7f
+#define S_CFG_EN_NIC_SMB_ADDR2_SHIFT 23
+#define S_CFG_NIC_SMB_ADDR2_SHIFT 16
+#define S_CFG_NIC_SMB_ADDR2_MASK 0x7f
+#define S_CFG_EN_NIC_SMB_ADDR1_SHIFT 15
+#define S_CFG_NIC_SMB_ADDR1_SHIFT 8
+#define S_CFG_NIC_SMB_ADDR1_MASK 0x7f
+#define S_CFG_EN_NIC_SMB_ADDR0_SHIFT 7
+#define S_CFG_NIC_SMB_ADDR0_SHIFT 0
+#define S_CFG_NIC_SMB_ADDR0_MASK 0x7f
#define M_FIFO_CTRL_OFFSET 0x0c
#define M_FIFO_RX_FLUSH_SHIFT 31
@@ -37,6 +58,14 @@
#define M_FIFO_RX_THLD_SHIFT 8
#define M_FIFO_RX_THLD_MASK 0x3f
+#define S_FIFO_CTRL_OFFSET 0x10
+#define S_FIFO_RX_FLUSH_SHIFT 31
+#define S_FIFO_TX_FLUSH_SHIFT 30
+#define S_FIFO_RX_CNT_SHIFT 16
+#define S_FIFO_RX_CNT_MASK 0x7f
+#define S_FIFO_RX_THLD_SHIFT 8
+#define S_FIFO_RX_THLD_MASK 0x3f
+
#define M_CMD_OFFSET 0x30
#define M_CMD_START_BUSY_SHIFT 31
#define M_CMD_STATUS_SHIFT 25
@@ -46,6 +75,8 @@
#define M_CMD_STATUS_NACK_ADDR 0x2
#define M_CMD_STATUS_NACK_DATA 0x3
#define M_CMD_STATUS_TIMEOUT 0x4
+#define M_CMD_STATUS_FIFO_UNDERRUN 0x5
+#define M_CMD_STATUS_RX_FIFO_FULL 0x6
#define M_CMD_PROTOCOL_SHIFT 9
#define M_CMD_PROTOCOL_MASK 0xf
#define M_CMD_PROTOCOL_BLK_WR 0x7
@@ -54,17 +85,36 @@
#define M_CMD_RD_CNT_SHIFT 0
#define M_CMD_RD_CNT_MASK 0xff
+#define S_CMD_OFFSET 0x34
+#define S_CMD_START_BUSY_SHIFT 31
+#define S_CMD_STATUS_SHIFT 23
+#define S_CMD_STATUS_MASK 0x07
+#define S_CMD_STATUS_SUCCESS 0x0
+#define S_CMD_STATUS_TIMEOUT 0x5
+
#define IE_OFFSET 0x38
#define IE_M_RX_FIFO_FULL_SHIFT 31
#define IE_M_RX_THLD_SHIFT 30
#define IE_M_START_BUSY_SHIFT 28
#define IE_M_TX_UNDERRUN_SHIFT 27
+#define IE_S_RX_FIFO_FULL_SHIFT 26
+#define IE_S_RX_THLD_SHIFT 25
+#define IE_S_RX_EVENT_SHIFT 24
+#define IE_S_START_BUSY_SHIFT 23
+#define IE_S_TX_UNDERRUN_SHIFT 22
+#define IE_S_RD_EVENT_SHIFT 21
#define IS_OFFSET 0x3c
#define IS_M_RX_FIFO_FULL_SHIFT 31
#define IS_M_RX_THLD_SHIFT 30
#define IS_M_START_BUSY_SHIFT 28
#define IS_M_TX_UNDERRUN_SHIFT 27
+#define IS_S_RX_FIFO_FULL_SHIFT 26
+#define IS_S_RX_THLD_SHIFT 25
+#define IS_S_RX_EVENT_SHIFT 24
+#define IS_S_START_BUSY_SHIFT 23
+#define IS_S_TX_UNDERRUN_SHIFT 22
+#define IS_S_RD_EVENT_SHIFT 21
#define M_TX_OFFSET 0x40
#define M_TX_WR_STATUS_SHIFT 31
@@ -78,19 +128,71 @@
#define M_RX_DATA_SHIFT 0
#define M_RX_DATA_MASK 0xff
+#define S_TX_OFFSET 0x48
+#define S_TX_WR_STATUS_SHIFT 31
+#define S_TX_DATA_SHIFT 0
+#define S_TX_DATA_MASK 0xff
+
+#define S_RX_OFFSET 0x4c
+#define S_RX_STATUS_SHIFT 30
+#define S_RX_STATUS_MASK 0x03
+#define S_RX_PEC_ERR_SHIFT 29
+#define S_RX_DATA_SHIFT 0
+#define S_RX_DATA_MASK 0xff
+
#define I2C_TIMEOUT_MSEC 50000
#define M_TX_RX_FIFO_SIZE 64
+#define M_RX_FIFO_MAX_THLD_VALUE (M_TX_RX_FIFO_SIZE - 1)
+
+#define M_RX_MAX_READ_LEN 255
+#define M_RX_FIFO_THLD_VALUE 50
+
+#define IE_M_ALL_INTERRUPT_SHIFT 27
+#define IE_M_ALL_INTERRUPT_MASK 0x1e
+
+#define SLAVE_READ_WRITE_BIT_MASK 0x1
+#define SLAVE_READ_WRITE_BIT_SHIFT 0x1
+#define SLAVE_MAX_SIZE_TRANSACTION 64
+#define SLAVE_CLOCK_STRETCH_TIME 25
+
+#define IE_S_ALL_INTERRUPT_SHIFT 21
+#define IE_S_ALL_INTERRUPT_MASK 0x3f
+
+enum i2c_slave_read_status {
+ I2C_SLAVE_RX_FIFO_EMPTY = 0,
+ I2C_SLAVE_RX_START,
+ I2C_SLAVE_RX_DATA,
+ I2C_SLAVE_RX_END,
+};
+
+enum i2c_slave_xfer_dir {
+ I2C_SLAVE_DIR_READ = 0,
+ I2C_SLAVE_DIR_WRITE,
+ I2C_SLAVE_DIR_NONE,
+};
enum bus_speed_index {
I2C_SPD_100K = 0,
I2C_SPD_400K,
};
+enum bcm_iproc_i2c_type {
+ IPROC_I2C,
+ IPROC_I2C_NIC
+};
+
struct bcm_iproc_i2c_dev {
struct device *device;
+ enum bcm_iproc_i2c_type type;
int irq;
void __iomem *base;
+ void __iomem *idm_base;
+
+ u32 ape_addr_mask;
+
+ /* lock for indirect access through IDM */
+ spinlock_t idm_lock;
struct i2c_adapter adapter;
unsigned int bus_speed;
@@ -100,68 +202,332 @@ struct bcm_iproc_i2c_dev {
struct i2c_msg *msg;
+ struct i2c_client *slave;
+ enum i2c_slave_xfer_dir xfer_dir;
+
/* bytes that have been transferred */
unsigned int tx_bytes;
+ /* bytes that have been read */
+ unsigned int rx_bytes;
+ unsigned int thld_bytes;
};
/*
* Can be expanded in the future if more interrupt status bits are utilized
*/
-#define ISR_MASK (BIT(IS_M_START_BUSY_SHIFT) | BIT(IS_M_TX_UNDERRUN_SHIFT))
+#define ISR_MASK (BIT(IS_M_START_BUSY_SHIFT) | BIT(IS_M_TX_UNDERRUN_SHIFT)\
+ | BIT(IS_M_RX_THLD_SHIFT))
-static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+#define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\
+ | BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT))
+
+static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
+static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool enable);
+
+static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset)
{
- struct bcm_iproc_i2c_dev *iproc_i2c = data;
- u32 status = readl(iproc_i2c->base + IS_OFFSET);
+ u32 val;
- status &= ISR_MASK;
+ if (iproc_i2c->idm_base) {
+ spin_lock(&iproc_i2c->idm_lock);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ val = readl(iproc_i2c->base + offset);
+ spin_unlock(&iproc_i2c->idm_lock);
+ } else {
+ val = readl(iproc_i2c->base + offset);
+ }
- if (!status)
- return IRQ_NONE;
+ return val;
+}
- /* TX FIFO is empty and we have more data to send */
- if (status & BIT(IS_M_TX_UNDERRUN_SHIFT)) {
- struct i2c_msg *msg = iproc_i2c->msg;
- unsigned int tx_bytes = msg->len - iproc_i2c->tx_bytes;
- unsigned int i;
- u32 val;
-
- /* can only fill up to the FIFO size */
- tx_bytes = min_t(unsigned int, tx_bytes, M_TX_RX_FIFO_SIZE);
- for (i = 0; i < tx_bytes; i++) {
- /* start from where we left over */
- unsigned int idx = iproc_i2c->tx_bytes + i;
+static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset, u32 val)
+{
+ if (iproc_i2c->idm_base) {
+ spin_lock(&iproc_i2c->idm_lock);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ writel(val, iproc_i2c->base + offset);
+ spin_unlock(&iproc_i2c->idm_lock);
+ } else {
+ writel(val, iproc_i2c->base + offset);
+ }
+}
- val = msg->buf[idx];
+static void bcm_iproc_i2c_slave_init(
+ struct bcm_iproc_i2c_dev *iproc_i2c, bool need_reset)
+{
+ u32 val;
- /* mark the last byte */
- if (idx == msg->len - 1) {
- u32 tmp;
+ if (need_reset) {
+ /* put controller in reset */
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+ val |= BIT(CFG_RESET_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- val |= BIT(M_TX_WR_STATUS_SHIFT);
+ /* wait 100 usec per spec */
+ udelay(100);
+
+ /* bring controller out of reset */
+ val &= ~(BIT(CFG_RESET_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
+ }
+
+ /* flush TX/RX FIFOs */
+ val = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val);
+
+ /* Maximum slave stretch time */
+ val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
+ val &= ~(TIM_RAND_SLAVE_STRETCH_MASK << TIM_RAND_SLAVE_STRETCH_SHIFT);
+ val |= (SLAVE_CLOCK_STRETCH_TIME << TIM_RAND_SLAVE_STRETCH_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
+
+ /* Configure the slave address */
+ val = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ val |= BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+ val &= ~(S_CFG_NIC_SMB_ADDR3_MASK << S_CFG_NIC_SMB_ADDR3_SHIFT);
+ val |= (iproc_i2c->slave->addr << S_CFG_NIC_SMB_ADDR3_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, val);
+
+ /* clear all pending slave interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
+
+ /* Enable interrupt register for any READ event */
+ val = BIT(IE_S_RD_EVENT_SHIFT);
+ /* Enable interrupt register to indicate a valid byte in receive fifo */
+ val |= BIT(IE_S_RX_EVENT_SHIFT);
+ /* Enable interrupt register for the Slave BUSY command */
+ val |= BIT(IE_S_START_BUSY_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+
+ iproc_i2c->xfer_dir = I2C_SLAVE_DIR_NONE;
+}
+
+static void bcm_iproc_i2c_check_slave_status(
+ struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ u32 val;
+
+ val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
+ val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
+
+ if (val == S_CMD_STATUS_TIMEOUT) {
+ dev_err(iproc_i2c->device, "slave random stretch time timeout\n");
+
+ /* re-initialize i2c for recovery */
+ bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+ bcm_iproc_i2c_slave_init(iproc_i2c, true);
+ bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+ }
+}
+
+static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 status)
+{
+ u8 value;
+ u32 val;
+ u32 rd_status;
+ u32 tmp;
+
+ /* Start of transaction. check address and populate the direction */
+ if (iproc_i2c->xfer_dir == I2C_SLAVE_DIR_NONE) {
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+ rd_status = (tmp >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
+ /* This condition checks whether the request is a new request */
+ if (((rd_status == I2C_SLAVE_RX_START) &&
+ (status & BIT(IS_S_RX_EVENT_SHIFT))) ||
+ ((rd_status == I2C_SLAVE_RX_END) &&
+ (status & BIT(IS_S_RD_EVENT_SHIFT)))) {
+
+ /* Last bit is W/R bit.
+ * If 1 then its a read request(by master).
+ */
+ iproc_i2c->xfer_dir = tmp & SLAVE_READ_WRITE_BIT_MASK;
+ if (iproc_i2c->xfer_dir == I2C_SLAVE_DIR_WRITE)
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_READ_REQUESTED, &value);
+ else
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_WRITE_REQUESTED, &value);
+ }
+ }
+
+ /* read request from master */
+ if ((status & BIT(IS_S_RD_EVENT_SHIFT)) &&
+ (iproc_i2c->xfer_dir == I2C_SLAVE_DIR_WRITE)) {
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_READ_PROCESSED, &value);
+ iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
+
+ val = BIT(S_CMD_START_BUSY_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
+ }
+
+ /* write request from master */
+ if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
+ (iproc_i2c->xfer_dir == I2C_SLAVE_DIR_READ)) {
+ val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+ /* Its a write request by Master to Slave.
+ * We read data present in receive FIFO
+ */
+ value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_WRITE_RECEIVED, &value);
+
+ /* check the status for the last byte of the transaction */
+ rd_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
+ if (rd_status == I2C_SLAVE_RX_END)
+ iproc_i2c->xfer_dir = I2C_SLAVE_DIR_NONE;
+
+ dev_dbg(iproc_i2c->device, "\nread value = 0x%x\n", value);
+ }
+
+ /* Stop */
+ if (status & BIT(IS_S_START_BUSY_SHIFT)) {
+ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
+ iproc_i2c->xfer_dir = I2C_SLAVE_DIR_NONE;
+ }
+
+ /* clear interrupt status */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
+
+ bcm_iproc_i2c_check_slave_status(iproc_i2c);
+ return true;
+}
+
+static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ struct i2c_msg *msg = iproc_i2c->msg;
+
+ /* Read valid data from RX FIFO */
+ while (iproc_i2c->rx_bytes < msg->len) {
+ if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT)
+ & M_FIFO_RX_CNT_MASK))
+ break;
+
+ msg->buf[iproc_i2c->rx_bytes] =
+ (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >>
+ M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+ iproc_i2c->rx_bytes++;
+ }
+}
+
+static void bcm_iproc_i2c_send(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ struct i2c_msg *msg = iproc_i2c->msg;
+ unsigned int tx_bytes = msg->len - iproc_i2c->tx_bytes;
+ unsigned int i;
+ u32 val;
+
+ /* can only fill up to the FIFO size */
+ tx_bytes = min_t(unsigned int, tx_bytes, M_TX_RX_FIFO_SIZE);
+ for (i = 0; i < tx_bytes; i++) {
+ /* start from where we left over */
+ unsigned int idx = iproc_i2c->tx_bytes + i;
+
+ val = msg->buf[idx];
+
+ /* mark the last byte */
+ if (idx == msg->len - 1) {
+ val |= BIT(M_TX_WR_STATUS_SHIFT);
+
+ if (iproc_i2c->irq) {
+ u32 tmp;
/*
- * Since this is the last byte, we should
- * now disable TX FIFO underrun interrupt
+ * Since this is the last byte, we should now
+ * disable TX FIFO underrun interrupt
*/
- tmp = readl(iproc_i2c->base + IE_OFFSET);
+ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
tmp &= ~BIT(IE_M_TX_UNDERRUN_SHIFT);
- writel(tmp, iproc_i2c->base + IE_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
+ tmp);
}
+ }
+
+ /* load data into TX FIFO */
+ iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
+ }
- /* load data into TX FIFO */
- writel(val, iproc_i2c->base + M_TX_OFFSET);
+ /* update number of transferred bytes */
+ iproc_i2c->tx_bytes += tx_bytes;
+}
+
+static void bcm_iproc_i2c_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ struct i2c_msg *msg = iproc_i2c->msg;
+ u32 bytes_left, val;
+
+ bcm_iproc_i2c_read_valid_bytes(iproc_i2c);
+ bytes_left = msg->len - iproc_i2c->rx_bytes;
+ if (bytes_left == 0) {
+ if (iproc_i2c->irq) {
+ /* finished reading all data, disable rx thld event */
+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ val &= ~BIT(IS_M_RX_THLD_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
}
- /* update number of transferred bytes */
- iproc_i2c->tx_bytes += tx_bytes;
+ } else if (bytes_left < iproc_i2c->thld_bytes) {
+ /* set bytes left as threshold */
+ val = iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET);
+ val &= ~(M_FIFO_RX_THLD_MASK << M_FIFO_RX_THLD_SHIFT);
+ val |= (bytes_left << M_FIFO_RX_THLD_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val);
+ iproc_i2c->thld_bytes = bytes_left;
}
+ /*
+ * bytes_left >= iproc_i2c->thld_bytes,
+ * hence no need to change the THRESHOLD SET.
+ * It will remain as iproc_i2c->thld_bytes itself
+ */
+}
+
+static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 status)
+{
+ /* TX FIFO is empty and we have more data to send */
+ if (status & BIT(IS_M_TX_UNDERRUN_SHIFT))
+ bcm_iproc_i2c_send(iproc_i2c);
+ /* RX FIFO threshold is reached and data needs to be read out */
+ if (status & BIT(IS_M_RX_THLD_SHIFT))
+ bcm_iproc_i2c_read(iproc_i2c);
+
+ /* transfer is done */
if (status & BIT(IS_M_START_BUSY_SHIFT)) {
iproc_i2c->xfer_is_done = 1;
- complete(&iproc_i2c->done);
+ if (iproc_i2c->irq)
+ complete(&iproc_i2c->done);
}
+}
- writel(status, iproc_i2c->base + IS_OFFSET);
+static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = data;
+ u32 status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
+ bool ret;
+ u32 sl_status = status & ISR_MASK_SLAVE;
+
+ if (sl_status) {
+ ret = bcm_iproc_i2c_slave_isr(iproc_i2c, sl_status);
+ if (ret)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+ }
+
+ status &= ISR_MASK;
+ if (!status)
+ return IRQ_NONE;
+
+ /* process all master based events */
+ bcm_iproc_i2c_process_m_event(iproc_i2c, status);
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
return IRQ_HANDLED;
}
@@ -171,26 +537,29 @@ static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
u32 val;
/* put controller in reset */
- val = readl(iproc_i2c->base + CFG_OFFSET);
- val |= 1 << CFG_RESET_SHIFT;
- val &= ~(1 << CFG_EN_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+ val |= BIT(CFG_RESET_SHIFT);
+ val &= ~(BIT(CFG_EN_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
/* wait 100 usec per spec */
udelay(100);
/* bring controller out of reset */
- val &= ~(1 << CFG_RESET_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
+ val &= ~(BIT(CFG_RESET_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
/* flush TX/RX FIFOs and set RX FIFO threshold to zero */
- val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
- writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+ val = (BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val);
/* disable all interrupts */
- writel(0, iproc_i2c->base + IE_OFFSET);
+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ val &= ~(IE_M_ALL_INTERRUPT_MASK <<
+ IE_M_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
/* clear all pending interrupts */
- writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, 0xffffffff);
return 0;
}
@@ -200,12 +569,12 @@ static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
{
u32 val;
- val = readl(iproc_i2c->base + CFG_OFFSET);
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
if (enable)
val |= BIT(CFG_EN_SHIFT);
else
val &= ~BIT(CFG_EN_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
}
static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
@@ -213,7 +582,7 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
{
u32 val;
- val = readl(iproc_i2c->base + M_CMD_OFFSET);
+ val = iproc_i2c_rd_reg(iproc_i2c, M_CMD_OFFSET);
val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK;
switch (val) {
@@ -236,6 +605,14 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
dev_dbg(iproc_i2c->device, "bus timeout\n");
return -ETIMEDOUT;
+ case M_CMD_STATUS_FIFO_UNDERRUN:
+ dev_dbg(iproc_i2c->device, "FIFO under-run\n");
+ return -ENXIO;
+
+ case M_CMD_STATUS_RX_FIFO_FULL:
+ dev_dbg(iproc_i2c->device, "RX FIFO full\n");
+ return -ETIMEDOUT;
+
default:
dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
@@ -248,18 +625,76 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
}
}
+static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
+ struct i2c_msg *msg,
+ u32 cmd)
+{
+ unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MSEC);
+ u32 val, status;
+ int ret;
+
+ iproc_i2c_wr_reg(iproc_i2c, M_CMD_OFFSET, cmd);
+
+ if (iproc_i2c->irq) {
+ time_left = wait_for_completion_timeout(&iproc_i2c->done,
+ time_left);
+ /* disable all interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, 0);
+ /* read it back to flush the write */
+ iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ /* make sure the interrupt handler isn't running */
+ synchronize_irq(iproc_i2c->irq);
+
+ } else { /* polling mode */
+ unsigned long timeout = jiffies + time_left;
+
+ do {
+ status = iproc_i2c_rd_reg(iproc_i2c,
+ IS_OFFSET) & ISR_MASK;
+ bcm_iproc_i2c_process_m_event(iproc_i2c, status);
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
+
+ if (time_after(jiffies, timeout)) {
+ time_left = 0;
+ break;
+ }
+
+ cpu_relax();
+ cond_resched();
+ } while (!iproc_i2c->xfer_is_done);
+ }
+
+ if (!time_left && !iproc_i2c->xfer_is_done) {
+ dev_err(iproc_i2c->device, "transaction timed out\n");
+
+ /* flush both TX/RX FIFOs */
+ val = BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val);
+ return -ETIMEDOUT;
+ }
+
+ ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
+ if (ret) {
+ /* flush both TX/RX FIFOs */
+ val = BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val);
+ return ret;
+ }
+
+ return 0;
+}
+
static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg)
{
- int ret, i;
+ int i;
u8 addr;
- u32 val;
+ u32 val, tmp, val_intr_en;
unsigned int tx_bytes;
- unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MSEC);
/* check if bus is busy */
- if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
- BIT(M_CMD_START_BUSY_SHIFT))) {
+ if (!!(iproc_i2c_rd_reg(iproc_i2c,
+ M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT))) {
dev_warn(iproc_i2c->device, "bus is busy\n");
return -EBUSY;
}
@@ -268,7 +703,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
/* format and load slave address into the TX FIFO */
addr = i2c_8bit_addr_from_msg(msg);
- writel(addr, iproc_i2c->base + M_TX_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, addr);
/*
* For a write transaction, load data into the TX FIFO. Only allow
@@ -282,15 +717,17 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
/* mark the last byte */
if (i == msg->len - 1)
- val |= 1 << M_TX_WR_STATUS_SHIFT;
+ val |= BIT(M_TX_WR_STATUS_SHIFT);
- writel(val, iproc_i2c->base + M_TX_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
}
iproc_i2c->tx_bytes = tx_bytes;
}
/* mark as incomplete before starting the transaction */
- reinit_completion(&iproc_i2c->done);
+ if (iproc_i2c->irq)
+ reinit_completion(&iproc_i2c->done);
+
iproc_i2c->xfer_is_done = 0;
/*
@@ -298,7 +735,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
* transaction is done, i.e., the internal start_busy bit, transitions
* from 1 to 0.
*/
- val = BIT(IE_M_START_BUSY_SHIFT);
+ val_intr_en = BIT(IE_M_START_BUSY_SHIFT);
/*
* If TX data size is larger than the TX FIFO, need to enable TX
@@ -307,9 +744,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
*/
if (!(msg->flags & I2C_M_RD) &&
msg->len > iproc_i2c->tx_bytes)
- val |= BIT(IE_M_TX_UNDERRUN_SHIFT);
-
- writel(val, iproc_i2c->base + IE_OFFSET);
+ val_intr_en |= BIT(IE_M_TX_UNDERRUN_SHIFT);
/*
* Now we can activate the transfer. For a read operation, specify the
@@ -317,54 +752,31 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
*/
val = BIT(M_CMD_START_BUSY_SHIFT);
if (msg->flags & I2C_M_RD) {
+ iproc_i2c->rx_bytes = 0;
+ if (msg->len > M_RX_FIFO_MAX_THLD_VALUE)
+ iproc_i2c->thld_bytes = M_RX_FIFO_THLD_VALUE;
+ else
+ iproc_i2c->thld_bytes = msg->len;
+
+ /* set threshold value */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET);
+ tmp &= ~(M_FIFO_RX_THLD_MASK << M_FIFO_RX_THLD_SHIFT);
+ tmp |= iproc_i2c->thld_bytes << M_FIFO_RX_THLD_SHIFT;
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, tmp);
+
+ /* enable the RX threshold interrupt */
+ val_intr_en |= BIT(IE_M_RX_THLD_SHIFT);
+
val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
(msg->len << M_CMD_RD_CNT_SHIFT);
} else {
val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
}
- writel(val, iproc_i2c->base + M_CMD_OFFSET);
-
- time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left);
-
- /* disable all interrupts */
- writel(0, iproc_i2c->base + IE_OFFSET);
- /* read it back to flush the write */
- readl(iproc_i2c->base + IE_OFFSET);
-
- /* make sure the interrupt handler isn't running */
- synchronize_irq(iproc_i2c->irq);
- if (!time_left && !iproc_i2c->xfer_is_done) {
- dev_err(iproc_i2c->device, "transaction timed out\n");
+ if (iproc_i2c->irq)
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val_intr_en);
- /* flush FIFOs */
- val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
- (1 << M_FIFO_TX_FLUSH_SHIFT);
- writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
- return -ETIMEDOUT;
- }
-
- ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
- if (ret) {
- /* flush both TX/RX FIFOs */
- val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
- (1 << M_FIFO_TX_FLUSH_SHIFT);
- writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
- return ret;
- }
-
- /*
- * For a read operation, we now need to load the data from FIFO
- * into the memory buffer
- */
- if (msg->flags & I2C_M_RD) {
- for (i = 0; i < msg->len; i++) {
- msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >>
- M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
- }
- }
-
- return 0;
+ return bcm_iproc_i2c_xfer_wait(iproc_i2c, msg, val);
}
static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
@@ -387,17 +799,23 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+
+ if (adap->algo->reg_slave)
+ val |= I2C_FUNC_SLAVE;
+
+ return val;
}
-static const struct i2c_algorithm bcm_iproc_algo = {
+static struct i2c_algorithm bcm_iproc_algo = {
.master_xfer = bcm_iproc_i2c_xfer,
.functionality = bcm_iproc_i2c_functionality,
+ .reg_slave = bcm_iproc_i2c_reg_slave,
+ .unreg_slave = bcm_iproc_i2c_unreg_slave,
};
-static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
- /* need to reserve one byte in the FIFO for the slave address */
- .max_read_len = M_TX_RX_FIFO_SIZE - 1,
+static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
+ .max_read_len = M_RX_MAX_READ_LEN,
};
static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
@@ -425,10 +843,10 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
}
iproc_i2c->bus_speed = bus_speed;
- val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
- val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
+ val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
+ val &= ~BIT(TIM_CFG_MODE_400_SHIFT);
val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
- writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
@@ -449,6 +867,8 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iproc_i2c);
iproc_i2c->device = &pdev->dev;
+ iproc_i2c->type =
+ (enum bcm_iproc_i2c_type)of_device_get_match_data(&pdev->dev);
init_completion(&iproc_i2c->done);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -456,6 +876,29 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
if (IS_ERR(iproc_i2c->base))
return PTR_ERR(iproc_i2c->base);
+ if (iproc_i2c->type == IPROC_I2C_NIC) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ iproc_i2c->idm_base = devm_ioremap_resource(iproc_i2c->device,
+ res);
+ if (IS_ERR(iproc_i2c->idm_base))
+ return PTR_ERR(iproc_i2c->idm_base);
+
+ ret = of_property_read_u32(iproc_i2c->device->of_node,
+ "brcm,ape-hsls-addr-mask",
+ &iproc_i2c->ape_addr_mask);
+ if (ret < 0) {
+ dev_err(iproc_i2c->device,
+ "'brcm,ape-hsls-addr-mask' missing\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&iproc_i2c->idm_lock);
+
+ /* no slave support */
+ bcm_iproc_algo.reg_slave = NULL;
+ bcm_iproc_algo.unreg_slave = NULL;
+ }
+
ret = bcm_iproc_i2c_init(iproc_i2c);
if (ret)
return ret;
@@ -465,17 +908,20 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
return ret;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(iproc_i2c->device, "no irq resource\n");
- return irq;
- }
- iproc_i2c->irq = irq;
+ if (irq > 0) {
+ ret = devm_request_irq(iproc_i2c->device, irq,
+ bcm_iproc_i2c_isr, 0, pdev->name,
+ iproc_i2c);
+ if (ret < 0) {
+ dev_err(iproc_i2c->device,
+ "unable to request irq %i\n", irq);
+ return ret;
+ }
- ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0,
- pdev->name, iproc_i2c);
- if (ret < 0) {
- dev_err(iproc_i2c->device, "unable to request irq %i\n", irq);
- return ret;
+ iproc_i2c->irq = irq;
+ } else {
+ dev_warn(iproc_i2c->device,
+ "no irq resource, falling back to poll mode\n");
}
bcm_iproc_i2c_enable_disable(iproc_i2c, true);
@@ -495,10 +941,15 @@ static int bcm_iproc_i2c_remove(struct platform_device *pdev)
{
struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev);
- /* make sure there's no pending interrupt when we remove the adapter */
- writel(0, iproc_i2c->base + IE_OFFSET);
- readl(iproc_i2c->base + IE_OFFSET);
- synchronize_irq(iproc_i2c->irq);
+ if (iproc_i2c->irq) {
+ /*
+ * Make sure there's no pending interrupt when we remove the
+ * adapter
+ */
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, 0);
+ iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ synchronize_irq(iproc_i2c->irq);
+ }
i2c_del_adapter(&iproc_i2c->adapter);
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
@@ -512,10 +963,15 @@ static int bcm_iproc_i2c_suspend(struct device *dev)
{
struct bcm_iproc_i2c_dev *iproc_i2c = dev_get_drvdata(dev);
- /* make sure there's no pending interrupt when we go into suspend */
- writel(0, iproc_i2c->base + IE_OFFSET);
- readl(iproc_i2c->base + IE_OFFSET);
- synchronize_irq(iproc_i2c->irq);
+ if (iproc_i2c->irq) {
+ /*
+ * Make sure there's no pending interrupt when we go into
+ * suspend
+ */
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, 0);
+ iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ synchronize_irq(iproc_i2c->irq);
+ }
/* now disable the controller */
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
@@ -538,10 +994,10 @@ static int bcm_iproc_i2c_resume(struct device *dev)
return ret;
/* configure to the desired bus speed */
- val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
- val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
+ val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
+ val &= ~BIT(TIM_CFG_MODE_400_SHIFT);
val |= (iproc_i2c->bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
- writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
bcm_iproc_i2c_enable_disable(iproc_i2c, true);
@@ -558,8 +1014,54 @@ static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = {
#define BCM_IPROC_I2C_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
+
+static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+
+ if (iproc_i2c->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ iproc_i2c->slave = slave;
+ bcm_iproc_i2c_slave_init(iproc_i2c, false);
+ return 0;
+}
+
+static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+{
+ u32 tmp;
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+
+ if (!iproc_i2c->slave)
+ return -EINVAL;
+
+ iproc_i2c->slave = NULL;
+
+ /* disable all slave interrupts */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
+ IE_S_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+
+ /* Erase the slave address programmed */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
+
+ return 0;
+}
+
static const struct of_device_id bcm_iproc_i2c_of_match[] = {
- { .compatible = "brcm,iproc-i2c" },
+ {
+ .compatible = "brcm,iproc-i2c",
+ .data = (int *)IPROC_I2C,
+ }, {
+ .compatible = "brcm,iproc-nic-i2c",
+ .data = (int *)IPROC_I2C_NIC,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index f4d862234980..506991596b68 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -165,7 +165,6 @@ static const struct bsc_clk_param bsc_clk[] = {
struct brcmstb_i2c_dev {
struct device *device;
void __iomem *base;
- void __iomem *irq_base;
int irq;
struct bsc_regs *bsc_regmap;
struct i2c_adapter adapter;
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index a4730111d290..2de7452fcd6d 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -251,13 +251,27 @@ unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare)
{
+ int ret;
+
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
- if (prepare)
- return clk_prepare_enable(dev->clk);
+ if (prepare) {
+ /* Optional interface clock */
+ ret = clk_prepare_enable(dev->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret)
+ clk_disable_unprepare(dev->pclk);
+
+ return ret;
+ }
clk_disable_unprepare(dev->clk);
+ clk_disable_unprepare(dev->pclk);
+
return 0;
}
EXPORT_SYMBOL_GPL(i2c_dw_prepare_clk);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 6b4ef1d38fb2..67edbbde1070 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -177,6 +177,7 @@
* @base: IO registers pointer
* @cmd_complete: tx completion indicator
* @clk: input reference clock
+ * @pclk: clock required to access the registers
* @slave: represent an I2C slave device
* @cmd_err: run time hadware error code
* @msgs: points to an array of messages currently being transferred
@@ -227,6 +228,7 @@ struct dw_i2c_dev {
void __iomem *ext;
struct completion cmd_complete;
struct clk *clk;
+ struct clk *pclk;
struct reset_control *rst;
struct i2c_client *slave;
u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index bb8e3f149979..d464799e40a3 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -426,8 +426,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
pm_runtime_get_sync(dev->dev);
- if (dev->suspended) {
- dev_err(dev->dev, "Error %s call while suspended\n", __func__);
+ if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
ret = -ESHUTDOWN;
goto done_nolock;
}
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 416f89b8f881..ddfb81872906 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -344,6 +344,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
else
i2c_dw_configure_master(dev);
+ /* Optional interface clock */
+ dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
+ if (IS_ERR(dev->pclk))
+ return PTR_ERR(dev->pclk);
+
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (!i2c_dw_prepare_clk(dev, true)) {
u64 clk_khz;
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index bba5c4627de3..9684a0ac2a6d 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -413,6 +413,8 @@ static int i2c_gpio_probe(struct platform_device *pdev)
if (gpiod_cansleep(priv->sda) || gpiod_cansleep(priv->scl))
dev_warn(dev, "Slow GPIO pins might wreak havoc into I2C/SMBus bus timing");
+ else
+ bit_data->can_do_atomic = true;
bit_data->setsda = i2c_gpio_setsda_val;
bit_data->setscl = i2c_gpio_setscl_val;
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 06c4c767af32..dc00fabc919a 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -639,8 +639,7 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int lpi2c_runtime_suspend(struct device *dev)
+static int __maybe_unused lpi2c_runtime_suspend(struct device *dev)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
@@ -650,7 +649,7 @@ static int lpi2c_runtime_suspend(struct device *dev)
return 0;
}
-static int lpi2c_runtime_resume(struct device *dev)
+static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
int ret;
@@ -671,10 +670,6 @@ static const struct dev_pm_ops lpi2c_pm_ops = {
SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend,
lpi2c_runtime_resume, NULL)
};
-#define IMX_LPI2C_PM (&lpi2c_pm_ops)
-#else
-#define IMX_LPI2C_PM NULL
-#endif
static struct platform_driver lpi2c_imx_driver = {
.probe = lpi2c_imx_probe,
@@ -682,7 +677,7 @@ static struct platform_driver lpi2c_imx_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = lpi2c_imx_of_match,
- .pm = IMX_LPI2C_PM,
+ .pm = &lpi2c_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 42fed40198a0..fd70b110e8f4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -515,9 +515,9 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
struct clk_notifier_data *ndata = data;
- struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk,
+ struct imx_i2c_struct *i2c_imx = container_of(nb,
struct imx_i2c_struct,
- clk);
+ clk_change_nb);
if (action & POST_RATE_CHANGE)
i2c_imx_set_clk(i2c_imx, ndata->new_rate);
@@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
/* Init DMA config if supported */
ret = i2c_imx_dma_request(i2c_imx, phy_addr);
if (ret < 0)
- goto clk_notifier_unregister;
+ goto del_adapter;
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
return 0; /* Return OK */
+del_adapter:
+ i2c_del_adapter(&i2c_imx->adapter);
clk_notifier_unregister:
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
rpm_disable:
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 5c754bf659e2..f64c1d72f73f 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -30,7 +30,6 @@
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/io.h>
-#include <linux/acpi.h>
/* SCH SMBus address offsets */
#define SMBHSTCNT (0 + sch_smba)
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 684d651612b3..745b0d03e883 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#define I2C_RS_TRANSFER (1 << 4)
+#define I2C_ARB_LOST (1 << 3)
#define I2C_HS_NACKERR (1 << 2)
#define I2C_ACKERR (1 << 1)
#define I2C_TRANSAC_COMP (1 << 0)
@@ -76,6 +77,8 @@
#define I2C_CONTROL_DIR_CHANGE (0x1 << 4)
#define I2C_CONTROL_ACKERR_DET_EN (0x1 << 5)
#define I2C_CONTROL_TRANSFER_LEN_CHANGE (0x1 << 6)
+#define I2C_CONTROL_DMAACK_EN (0x1 << 8)
+#define I2C_CONTROL_ASYNC_MODE (0x1 << 9)
#define I2C_CONTROL_WRAPPER (0x1 << 0)
#define I2C_DRV_NAME "i2c-mt65xx"
@@ -106,40 +109,97 @@ enum mtk_trans_op {
};
enum I2C_REGS_OFFSET {
- OFFSET_DATA_PORT = 0x0,
- OFFSET_SLAVE_ADDR = 0x04,
- OFFSET_INTR_MASK = 0x08,
- OFFSET_INTR_STAT = 0x0c,
- OFFSET_CONTROL = 0x10,
- OFFSET_TRANSFER_LEN = 0x14,
- OFFSET_TRANSAC_LEN = 0x18,
- OFFSET_DELAY_LEN = 0x1c,
- OFFSET_TIMING = 0x20,
- OFFSET_START = 0x24,
- OFFSET_EXT_CONF = 0x28,
- OFFSET_FIFO_STAT = 0x30,
- OFFSET_FIFO_THRESH = 0x34,
- OFFSET_FIFO_ADDR_CLR = 0x38,
- OFFSET_IO_CONFIG = 0x40,
- OFFSET_RSV_DEBUG = 0x44,
- OFFSET_HS = 0x48,
- OFFSET_SOFTRESET = 0x50,
- OFFSET_DCM_EN = 0x54,
- OFFSET_PATH_DIR = 0x60,
- OFFSET_DEBUGSTAT = 0x64,
- OFFSET_DEBUGCTRL = 0x68,
- OFFSET_TRANSFER_LEN_AUX = 0x6c,
- OFFSET_CLOCK_DIV = 0x70,
+ OFFSET_DATA_PORT,
+ OFFSET_SLAVE_ADDR,
+ OFFSET_INTR_MASK,
+ OFFSET_INTR_STAT,
+ OFFSET_CONTROL,
+ OFFSET_TRANSFER_LEN,
+ OFFSET_TRANSAC_LEN,
+ OFFSET_DELAY_LEN,
+ OFFSET_TIMING,
+ OFFSET_START,
+ OFFSET_EXT_CONF,
+ OFFSET_FIFO_STAT,
+ OFFSET_FIFO_THRESH,
+ OFFSET_FIFO_ADDR_CLR,
+ OFFSET_IO_CONFIG,
+ OFFSET_RSV_DEBUG,
+ OFFSET_HS,
+ OFFSET_SOFTRESET,
+ OFFSET_DCM_EN,
+ OFFSET_PATH_DIR,
+ OFFSET_DEBUGSTAT,
+ OFFSET_DEBUGCTRL,
+ OFFSET_TRANSFER_LEN_AUX,
+ OFFSET_CLOCK_DIV,
+ OFFSET_LTIMING,
+};
+
+static const u16 mt_i2c_regs_v1[] = {
+ [OFFSET_DATA_PORT] = 0x0,
+ [OFFSET_SLAVE_ADDR] = 0x4,
+ [OFFSET_INTR_MASK] = 0x8,
+ [OFFSET_INTR_STAT] = 0xc,
+ [OFFSET_CONTROL] = 0x10,
+ [OFFSET_TRANSFER_LEN] = 0x14,
+ [OFFSET_TRANSAC_LEN] = 0x18,
+ [OFFSET_DELAY_LEN] = 0x1c,
+ [OFFSET_TIMING] = 0x20,
+ [OFFSET_START] = 0x24,
+ [OFFSET_EXT_CONF] = 0x28,
+ [OFFSET_FIFO_STAT] = 0x30,
+ [OFFSET_FIFO_THRESH] = 0x34,
+ [OFFSET_FIFO_ADDR_CLR] = 0x38,
+ [OFFSET_IO_CONFIG] = 0x40,
+ [OFFSET_RSV_DEBUG] = 0x44,
+ [OFFSET_HS] = 0x48,
+ [OFFSET_SOFTRESET] = 0x50,
+ [OFFSET_DCM_EN] = 0x54,
+ [OFFSET_PATH_DIR] = 0x60,
+ [OFFSET_DEBUGSTAT] = 0x64,
+ [OFFSET_DEBUGCTRL] = 0x68,
+ [OFFSET_TRANSFER_LEN_AUX] = 0x6c,
+ [OFFSET_CLOCK_DIV] = 0x70,
+};
+
+static const u16 mt_i2c_regs_v2[] = {
+ [OFFSET_DATA_PORT] = 0x0,
+ [OFFSET_SLAVE_ADDR] = 0x4,
+ [OFFSET_INTR_MASK] = 0x8,
+ [OFFSET_INTR_STAT] = 0xc,
+ [OFFSET_CONTROL] = 0x10,
+ [OFFSET_TRANSFER_LEN] = 0x14,
+ [OFFSET_TRANSAC_LEN] = 0x18,
+ [OFFSET_DELAY_LEN] = 0x1c,
+ [OFFSET_TIMING] = 0x20,
+ [OFFSET_START] = 0x24,
+ [OFFSET_EXT_CONF] = 0x28,
+ [OFFSET_LTIMING] = 0x2c,
+ [OFFSET_HS] = 0x30,
+ [OFFSET_IO_CONFIG] = 0x34,
+ [OFFSET_FIFO_ADDR_CLR] = 0x38,
+ [OFFSET_TRANSFER_LEN_AUX] = 0x44,
+ [OFFSET_CLOCK_DIV] = 0x48,
+ [OFFSET_SOFTRESET] = 0x50,
+ [OFFSET_DEBUGSTAT] = 0xe0,
+ [OFFSET_DEBUGCTRL] = 0xe8,
+ [OFFSET_FIFO_STAT] = 0xf4,
+ [OFFSET_FIFO_THRESH] = 0xf8,
+ [OFFSET_DCM_EN] = 0xf88,
};
struct mtk_i2c_compatible {
const struct i2c_adapter_quirks *quirks;
+ const u16 *regs;
unsigned char pmic_i2c: 1;
unsigned char dcm: 1;
unsigned char auto_restart: 1;
unsigned char aux_len_reg: 1;
unsigned char support_33bits: 1;
unsigned char timing_adjust: 1;
+ unsigned char dma_sync: 1;
+ unsigned char ltiming_adjust: 1;
};
struct mtk_i2c {
@@ -153,6 +213,7 @@ struct mtk_i2c {
struct clk *clk_main; /* main clock for i2c bus */
struct clk *clk_dma; /* DMA clock for i2c via DMA */
struct clk *clk_pmic; /* PMIC clock for i2c from PMIC */
+ struct clk *clk_arb; /* Arbitrator clock for i2c */
bool have_pmic; /* can use i2c pins from PMIC */
bool use_push_pull; /* IO config push-pull mode */
@@ -162,6 +223,7 @@ struct mtk_i2c {
enum mtk_trans_op op;
u16 timing_reg;
u16 high_speed_reg;
+ u16 ltiming_reg;
unsigned char auto_restart;
bool ignore_restart_irq;
const struct mtk_i2c_compatible *dev_comp;
@@ -181,51 +243,78 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
};
static const struct mtk_i2c_compatible mt2712_compat = {
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
.support_33bits = 1,
.timing_adjust = 1,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
};
static const struct mtk_i2c_compatible mt6577_compat = {
.quirks = &mt6577_i2c_quirks,
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 0,
.aux_len_reg = 0,
.support_33bits = 0,
.timing_adjust = 0,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
};
static const struct mtk_i2c_compatible mt6589_compat = {
.quirks = &mt6577_i2c_quirks,
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 1,
.dcm = 0,
.auto_restart = 0,
.aux_len_reg = 0,
.support_33bits = 0,
.timing_adjust = 0,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
};
static const struct mtk_i2c_compatible mt7622_compat = {
.quirks = &mt7622_i2c_quirks,
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
.support_33bits = 0,
.timing_adjust = 0,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
};
static const struct mtk_i2c_compatible mt8173_compat = {
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
.support_33bits = 1,
.timing_adjust = 0,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
+};
+
+static const struct mtk_i2c_compatible mt8183_compat = {
+ .regs = mt_i2c_regs_v2,
+ .pmic_i2c = 0,
+ .dcm = 0,
+ .auto_restart = 1,
+ .aux_len_reg = 1,
+ .support_33bits = 1,
+ .timing_adjust = 1,
+ .dma_sync = 1,
+ .ltiming_adjust = 1,
};
static const struct of_device_id mtk_i2c_of_match[] = {
@@ -234,10 +323,22 @@ static const struct of_device_id mtk_i2c_of_match[] = {
{ .compatible = "mediatek,mt6589-i2c", .data = &mt6589_compat },
{ .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat },
{ .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
+ { .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
{}
};
MODULE_DEVICE_TABLE(of, mtk_i2c_of_match);
+static u16 mtk_i2c_readw(struct mtk_i2c *i2c, enum I2C_REGS_OFFSET reg)
+{
+ return readw(i2c->base + i2c->dev_comp->regs[reg]);
+}
+
+static void mtk_i2c_writew(struct mtk_i2c *i2c, u16 val,
+ enum I2C_REGS_OFFSET reg)
+{
+ writew(val, i2c->base + i2c->dev_comp->regs[reg]);
+}
+
static int mtk_i2c_clock_enable(struct mtk_i2c *i2c)
{
int ret;
@@ -255,8 +356,18 @@ static int mtk_i2c_clock_enable(struct mtk_i2c *i2c)
if (ret)
goto err_pmic;
}
+
+ if (i2c->clk_arb) {
+ ret = clk_prepare_enable(i2c->clk_arb);
+ if (ret)
+ goto err_arb;
+ }
+
return 0;
+err_arb:
+ if (i2c->have_pmic)
+ clk_disable_unprepare(i2c->clk_pmic);
err_pmic:
clk_disable_unprepare(i2c->clk_main);
err_main:
@@ -267,6 +378,9 @@ err_main:
static void mtk_i2c_clock_disable(struct mtk_i2c *i2c)
{
+ if (i2c->clk_arb)
+ clk_disable_unprepare(i2c->clk_arb);
+
if (i2c->have_pmic)
clk_disable_unprepare(i2c->clk_pmic);
@@ -278,31 +392,36 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- writew(I2C_SOFT_RST, i2c->base + OFFSET_SOFTRESET);
+ mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
/* Set ioconfig */
if (i2c->use_push_pull)
- writew(I2C_IO_CONFIG_PUSH_PULL, i2c->base + OFFSET_IO_CONFIG);
+ mtk_i2c_writew(i2c, I2C_IO_CONFIG_PUSH_PULL, OFFSET_IO_CONFIG);
else
- writew(I2C_IO_CONFIG_OPEN_DRAIN, i2c->base + OFFSET_IO_CONFIG);
+ mtk_i2c_writew(i2c, I2C_IO_CONFIG_OPEN_DRAIN, OFFSET_IO_CONFIG);
if (i2c->dev_comp->dcm)
- writew(I2C_DCM_DISABLE, i2c->base + OFFSET_DCM_EN);
+ mtk_i2c_writew(i2c, I2C_DCM_DISABLE, OFFSET_DCM_EN);
if (i2c->dev_comp->timing_adjust)
- writew(I2C_DEFAULT_CLK_DIV - 1, i2c->base + OFFSET_CLOCK_DIV);
+ mtk_i2c_writew(i2c, I2C_DEFAULT_CLK_DIV - 1, OFFSET_CLOCK_DIV);
- writew(i2c->timing_reg, i2c->base + OFFSET_TIMING);
- writew(i2c->high_speed_reg, i2c->base + OFFSET_HS);
+ mtk_i2c_writew(i2c, i2c->timing_reg, OFFSET_TIMING);
+ mtk_i2c_writew(i2c, i2c->high_speed_reg, OFFSET_HS);
+ if (i2c->dev_comp->ltiming_adjust)
+ mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
/* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
if (i2c->have_pmic)
- writew(I2C_CONTROL_WRAPPER, i2c->base + OFFSET_PATH_DIR);
+ mtk_i2c_writew(i2c, I2C_CONTROL_WRAPPER, OFFSET_PATH_DIR);
control_reg = I2C_CONTROL_ACKERR_DET_EN |
I2C_CONTROL_CLK_EXT_EN | I2C_CONTROL_DMA_EN;
- writew(control_reg, i2c->base + OFFSET_CONTROL);
- writew(I2C_DELAY_LEN, i2c->base + OFFSET_DELAY_LEN);
+ if (i2c->dev_comp->dma_sync)
+ control_reg |= I2C_CONTROL_DMAACK_EN | I2C_CONTROL_ASYNC_MODE;
+
+ mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
+ mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN);
writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
udelay(50);
@@ -390,6 +509,8 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
unsigned int clk_src;
unsigned int step_cnt;
unsigned int sample_cnt;
+ unsigned int l_step_cnt;
+ unsigned int l_sample_cnt;
unsigned int target_speed;
int ret;
@@ -399,11 +520,11 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
if (target_speed > MAX_FS_MODE_SPEED) {
/* Set master code speed register */
ret = mtk_i2c_calculate_speed(i2c, clk_src, MAX_FS_MODE_SPEED,
- &step_cnt, &sample_cnt);
+ &l_step_cnt, &l_sample_cnt);
if (ret < 0)
return ret;
- i2c->timing_reg = (sample_cnt << 8) | step_cnt;
+ i2c->timing_reg = (l_sample_cnt << 8) | l_step_cnt;
/* Set the high speed mode register */
ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed,
@@ -413,6 +534,10 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
i2c->high_speed_reg = I2C_TIME_DEFAULT_VALUE |
(sample_cnt << 12) | (step_cnt << 8);
+
+ if (i2c->dev_comp->ltiming_adjust)
+ i2c->ltiming_reg = (l_sample_cnt << 6) | l_step_cnt |
+ (sample_cnt << 12) | (step_cnt << 9);
} else {
ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed,
&step_cnt, &sample_cnt);
@@ -423,6 +548,9 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
/* Disable the high speed transaction */
i2c->high_speed_reg = I2C_TIME_CLR_VALUE;
+
+ if (i2c->dev_comp->ltiming_adjust)
+ i2c->ltiming_reg = (sample_cnt << 6) | step_cnt;
}
return 0;
@@ -454,7 +582,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
reinit_completion(&i2c->msg_complete);
- control_reg = readw(i2c->base + OFFSET_CONTROL) &
+ control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
if ((i2c->speed_hz > MAX_FS_MODE_SPEED) || (left_num >= 1))
control_reg |= I2C_CONTROL_RS;
@@ -462,40 +590,41 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
if (i2c->op == I2C_MASTER_WRRD)
control_reg |= I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS;
- writew(control_reg, i2c->base + OFFSET_CONTROL);
+ mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
/* set start condition */
if (i2c->speed_hz <= I2C_DEFAULT_SPEED)
- writew(I2C_ST_START_CON, i2c->base + OFFSET_EXT_CONF);
+ mtk_i2c_writew(i2c, I2C_ST_START_CON, OFFSET_EXT_CONF);
else
- writew(I2C_FS_START_CON, i2c->base + OFFSET_EXT_CONF);
+ mtk_i2c_writew(i2c, I2C_FS_START_CON, OFFSET_EXT_CONF);
addr_reg = i2c_8bit_addr_from_msg(msgs);
- writew(addr_reg, i2c->base + OFFSET_SLAVE_ADDR);
+ mtk_i2c_writew(i2c, addr_reg, OFFSET_SLAVE_ADDR);
/* Clear interrupt status */
- writew(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
- I2C_TRANSAC_COMP, i2c->base + OFFSET_INTR_STAT);
- writew(I2C_FIFO_ADDR_CLR, i2c->base + OFFSET_FIFO_ADDR_CLR);
+ mtk_i2c_writew(i2c, restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
+ I2C_ARB_LOST | I2C_TRANSAC_COMP, OFFSET_INTR_STAT);
+
+ mtk_i2c_writew(i2c, I2C_FIFO_ADDR_CLR, OFFSET_FIFO_ADDR_CLR);
/* Enable interrupt */
- writew(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
- I2C_TRANSAC_COMP, i2c->base + OFFSET_INTR_MASK);
+ mtk_i2c_writew(i2c, restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
+ I2C_ARB_LOST | I2C_TRANSAC_COMP, OFFSET_INTR_MASK);
/* Set transfer and transaction len */
if (i2c->op == I2C_MASTER_WRRD) {
if (i2c->dev_comp->aux_len_reg) {
- writew(msgs->len, i2c->base + OFFSET_TRANSFER_LEN);
- writew((msgs + 1)->len, i2c->base +
- OFFSET_TRANSFER_LEN_AUX);
+ mtk_i2c_writew(i2c, msgs->len, OFFSET_TRANSFER_LEN);
+ mtk_i2c_writew(i2c, (msgs + 1)->len,
+ OFFSET_TRANSFER_LEN_AUX);
} else {
- writew(msgs->len | ((msgs + 1)->len) << 8,
- i2c->base + OFFSET_TRANSFER_LEN);
+ mtk_i2c_writew(i2c, msgs->len | ((msgs + 1)->len) << 8,
+ OFFSET_TRANSFER_LEN);
}
- writew(I2C_WRRD_TRANAC_VALUE, i2c->base + OFFSET_TRANSAC_LEN);
+ mtk_i2c_writew(i2c, I2C_WRRD_TRANAC_VALUE, OFFSET_TRANSAC_LEN);
} else {
- writew(msgs->len, i2c->base + OFFSET_TRANSFER_LEN);
- writew(num, i2c->base + OFFSET_TRANSAC_LEN);
+ mtk_i2c_writew(i2c, msgs->len, OFFSET_TRANSFER_LEN);
+ mtk_i2c_writew(i2c, num, OFFSET_TRANSAC_LEN);
}
/* Prepare buffer data to start transfer */
@@ -607,14 +736,14 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
if (left_num >= 1)
start_reg |= I2C_RS_MUL_CNFG;
}
- writew(start_reg, i2c->base + OFFSET_START);
+ mtk_i2c_writew(i2c, start_reg, OFFSET_START);
ret = wait_for_completion_timeout(&i2c->msg_complete,
i2c->adap.timeout);
/* Clear interrupt mask */
- writew(~(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
- I2C_TRANSAC_COMP), i2c->base + OFFSET_INTR_MASK);
+ mtk_i2c_writew(i2c, ~(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
+ I2C_ARB_LOST | I2C_TRANSAC_COMP), OFFSET_INTR_MASK);
if (i2c->op == I2C_MASTER_WR) {
dma_unmap_single(i2c->dev, wpaddr,
@@ -724,8 +853,8 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
if (i2c->auto_restart)
restart_flag = I2C_RS_TRANSFER;
- intr_stat = readw(i2c->base + OFFSET_INTR_STAT);
- writew(intr_stat, i2c->base + OFFSET_INTR_STAT);
+ intr_stat = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
+ mtk_i2c_writew(i2c, intr_stat, OFFSET_INTR_STAT);
/*
* when occurs ack error, i2c controller generate two interrupts
@@ -737,8 +866,8 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
if (i2c->ignore_restart_irq && (i2c->irq_stat & restart_flag)) {
i2c->ignore_restart_irq = false;
i2c->irq_stat = 0;
- writew(I2C_RS_MUL_CNFG | I2C_RS_MUL_TRIG | I2C_TRANSAC_START,
- i2c->base + OFFSET_START);
+ mtk_i2c_writew(i2c, I2C_RS_MUL_CNFG | I2C_RS_MUL_TRIG |
+ I2C_TRANSAC_START, OFFSET_START);
} else {
if (i2c->irq_stat & (I2C_TRANSAC_COMP | restart_flag))
complete(&i2c->msg_complete);
@@ -839,6 +968,10 @@ static int mtk_i2c_probe(struct platform_device *pdev)
return PTR_ERR(i2c->clk_dma);
}
+ i2c->clk_arb = devm_clk_get(&pdev->dev, "arb");
+ if (IS_ERR(i2c->clk_arb))
+ i2c->clk_arb = NULL;
+
clk = i2c->clk_main;
if (i2c->have_pmic) {
i2c->clk_pmic = devm_clk_get(&pdev->dev, "pmic");
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 0ed5a41804dc..4f30a43b63da 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1070,8 +1070,7 @@ static int nmk_i2c_remove(struct amba_device *adev)
/* disable the controller */
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
clk_disable_unprepare(dev->clk);
- if (res)
- release_mem_region(res->start, resource_size(res));
+ release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 4e67d5ed480e..1c8f708f212b 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -253,6 +253,12 @@ static const struct pci_device_id gpu_i2c_ids[] = {
};
MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
+static const struct property_entry ccgx_props[] = {
+ /* Use FW built for NVIDIA (nv) only */
+ PROPERTY_ENTRY_U16("ccgx,firmware-build", ('n' << 8) | 'v'),
+ { }
+};
+
static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
{
struct i2c_client *ccgx_client;
@@ -267,6 +273,7 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
sizeof(i2cd->gpu_ccgx_ucsi->type));
i2cd->gpu_ccgx_ucsi->addr = 0x8;
i2cd->gpu_ccgx_ucsi->irq = irq;
+ i2cd->gpu_ccgx_ucsi->properties = ccgx_props;
ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
if (!ccgx_client)
return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 4e1a077fb688..c3dabee0aa35 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -26,8 +26,6 @@
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-#define OCORES_FLAG_POLL BIT(0)
-
/*
* 'process_lock' exists because ocores_process() and ocores_process_timeout()
* can't run in parallel.
@@ -37,7 +35,6 @@ struct ocores_i2c {
int iobase;
u32 reg_shift;
u32 reg_io_width;
- unsigned long flags;
wait_queue_head_t wait;
struct i2c_adapter adap;
struct i2c_msg *msg;
@@ -403,11 +400,7 @@ static int ocores_xfer_polling(struct i2c_adapter *adap,
static int ocores_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
- struct ocores_i2c *i2c = i2c_get_adapdata(adap);
-
- if (i2c->flags & OCORES_FLAG_POLL)
- return ocores_xfer_polling(adap, msgs, num);
- return ocores_xfer_core(i2c, msgs, num, false);
+ return ocores_xfer_core(i2c_get_adapdata(adap), msgs, num, false);
}
static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
@@ -447,8 +440,9 @@ static u32 ocores_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static const struct i2c_algorithm ocores_algorithm = {
+static struct i2c_algorithm ocores_algorithm = {
.master_xfer = ocores_xfer,
+ .master_xfer_atomic = ocores_xfer_polling,
.functionality = ocores_func,
};
@@ -673,13 +667,13 @@ static int ocores_i2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq == -ENXIO) {
- i2c->flags |= OCORES_FLAG_POLL;
+ ocores_algorithm.master_xfer = ocores_xfer_polling;
} else {
if (irq < 0)
return irq;
}
- if (!(i2c->flags & OCORES_FLAG_POLL)) {
+ if (ocores_algorithm.master_xfer != ocores_xfer_polling) {
ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
pdev->name, i2c);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index cd9c65f3d404..faa0394048a0 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -269,6 +269,8 @@ static const u8 reg_map_ip_v2[] = {
[OMAP_I2C_IP_V2_IRQENABLE_CLR] = 0x30,
};
+static int omap_i2c_xfer_data(struct omap_i2c_dev *omap);
+
static inline void omap_i2c_write_reg(struct omap_i2c_dev *omap,
int reg, u16 val)
{
@@ -648,15 +650,28 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *omap, u8 size, bool is_rx)
(1000 * omap->speed / 8);
}
+static void omap_i2c_wait(struct omap_i2c_dev *omap)
+{
+ u16 stat;
+ u16 mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+ int count = 0;
+
+ do {
+ stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
+ count++;
+ } while (!(stat & mask) && count < 5);
+}
+
/*
* Low level master read/write transaction.
*/
static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
- struct i2c_msg *msg, int stop)
+ struct i2c_msg *msg, int stop, bool polling)
{
struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
unsigned long timeout;
u16 w;
+ int ret;
dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
msg->addr, msg->len, msg->flags, stop);
@@ -680,7 +695,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, w);
- reinit_completion(&omap->cmd_complete);
+ if (!polling)
+ reinit_completion(&omap->cmd_complete);
omap->cmd_err = 0;
w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
@@ -732,8 +748,18 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* REVISIT: We should abort the transfer on signals, but the bus goes
* into arbitration and we're currently unable to recover from it.
*/
- timeout = wait_for_completion_timeout(&omap->cmd_complete,
- OMAP_I2C_TIMEOUT);
+ if (!polling) {
+ timeout = wait_for_completion_timeout(&omap->cmd_complete,
+ OMAP_I2C_TIMEOUT);
+ } else {
+ do {
+ omap_i2c_wait(omap);
+ ret = omap_i2c_xfer_data(omap);
+ } while (ret == -EAGAIN);
+
+ timeout = !ret;
+ }
+
if (timeout == 0) {
dev_err(omap->dev, "controller timed out\n");
omap_i2c_reset(omap);
@@ -772,7 +798,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* to do the work during IRQ processing.
*/
static int
-omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+omap_i2c_xfer_common(struct i2c_adapter *adap, struct i2c_msg msgs[], int num,
+ bool polling)
{
struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
int i;
@@ -794,7 +821,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
omap->set_mpu_wkup_lat(omap->dev, omap->latency);
for (i = 0; i < num; i++) {
- r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
+ r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)),
+ polling);
if (r != 0)
break;
}
@@ -813,6 +841,18 @@ out:
return r;
}
+static int
+omap_i2c_xfer_irq(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ return omap_i2c_xfer_common(adap, msgs, num, false);
+}
+
+static int
+omap_i2c_xfer_polling(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ return omap_i2c_xfer_common(adap, msgs, num, true);
+}
+
static u32
omap_i2c_func(struct i2c_adapter *adap)
{
@@ -1035,10 +1075,8 @@ omap_i2c_isr(int irq, void *dev_id)
return ret;
}
-static irqreturn_t
-omap_i2c_isr_thread(int this_irq, void *dev_id)
+static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
{
- struct omap_i2c_dev *omap = dev_id;
u16 bits;
u16 stat;
int err = 0, count = 0;
@@ -1056,7 +1094,8 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
if (!stat) {
/* my work here is done */
- goto out;
+ err = -EAGAIN;
+ break;
}
dev_dbg(omap->dev, "IRQ (ISR = 0x%04x)\n", stat);
@@ -1165,14 +1204,25 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
}
} while (stat);
- omap_i2c_complete_cmd(omap, err);
+ return err;
+}
+
+static irqreturn_t
+omap_i2c_isr_thread(int this_irq, void *dev_id)
+{
+ int ret;
+ struct omap_i2c_dev *omap = dev_id;
+
+ ret = omap_i2c_xfer_data(omap);
+ if (ret != -EAGAIN)
+ omap_i2c_complete_cmd(omap, ret);
-out:
return IRQ_HANDLED;
}
static const struct i2c_algorithm omap_i2c_algo = {
- .master_xfer = omap_i2c_xfer,
+ .master_xfer = omap_i2c_xfer_irq,
+ .master_xfer_atomic = omap_i2c_xfer_polling,
.functionality = omap_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 90946a8b9a75..e9a0514ae166 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -19,6 +19,7 @@
Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
AMD Hudson-2, ML, CZ
+ Hygon CZ
SMSC Victory66
Note: we assume there can only be one device, with one or more
@@ -289,7 +290,9 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
PIIX4_dev->revision >= 0x41) ||
(PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
- PIIX4_dev->revision >= 0x49))
+ PIIX4_dev->revision >= 0x49) ||
+ (PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON &&
+ PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS))
smb_en = 0x00;
else
smb_en = (aux) ? 0x28 : 0x2c;
@@ -361,7 +364,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
piix4_smba, i2ccfg >> 4);
/* Find which register is used for port selection */
- if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
+ if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
+ PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
switch (PIIX4_dev->device) {
case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
@@ -794,6 +798,7 @@ static const struct pci_device_id piix4_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_OSB4) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
@@ -904,11 +909,13 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
if ((dev->vendor == PCI_VENDOR_ID_ATI &&
dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
dev->revision >= 0x40) ||
- dev->vendor == PCI_VENDOR_ID_AMD) {
+ dev->vendor == PCI_VENDOR_ID_AMD ||
+ dev->vendor == PCI_VENDOR_ID_HYGON) {
bool notify_imc = false;
is_sb800 = true;
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ if ((dev->vendor == PCI_VENDOR_ID_AMD ||
+ dev->vendor == PCI_VENDOR_ID_HYGON) &&
dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
u8 imc;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a7578f6da979..d39a4606f72d 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -85,6 +85,7 @@
/* ICFBSCR */
#define TCYC17 0x0f /* 17*Tcyc delay 1st bit between SDA and SCL */
+#define RCAR_MIN_DMA_LEN 8
#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
#define RCAR_BUS_PHASE_DATA (MDBS | MIE)
@@ -398,7 +399,7 @@ static void rcar_i2c_dma_callback(void *data)
rcar_i2c_dma_unmap(priv);
}
-static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
+static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
{
struct device *dev = rcar_i2c_priv_to_dev(priv);
struct i2c_msg *msg = priv->msg;
@@ -412,9 +413,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
int len;
/* Do various checks to see if DMA is feasible at all */
- if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
- (read && priv->flags & ID_P_NO_RXDMA))
- return;
+ if (IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
+ !(msg->flags & I2C_M_DMA_SAFE) || (read && priv->flags & ID_P_NO_RXDMA))
+ return false;
if (read) {
/*
@@ -434,7 +435,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
if (dma_mapping_error(chan->device->dev, dma_addr)) {
dev_dbg(dev, "dma map failed, using PIO\n");
- return;
+ return false;
}
sg_dma_len(&priv->sg) = len;
@@ -448,7 +449,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
if (!txdesc) {
dev_dbg(dev, "dma prep slave sg failed, using PIO\n");
rcar_i2c_cleanup_dma(priv);
- return;
+ return false;
}
txdesc->callback = rcar_i2c_dma_callback;
@@ -458,7 +459,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
if (dma_submit_error(cookie)) {
dev_dbg(dev, "submitting dma failed, using PIO\n");
rcar_i2c_cleanup_dma(priv);
- return;
+ return false;
}
/* Enable DMA Master Received/Transmitted */
@@ -468,6 +469,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICDMAER, TMDMAE);
dma_async_issue_pending(chan);
+ return true;
}
static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
@@ -478,6 +480,10 @@ static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
if (!(msr & MDE))
return;
+ /* Check if DMA can be enabled and take over */
+ if (priv->pos == 1 && rcar_i2c_dma(priv))
+ return;
+
if (priv->pos < msg->len) {
/*
* Prepare next data to ICRXTX register.
@@ -488,13 +494,6 @@ static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
*/
rcar_i2c_write(priv, ICRXTX, msg->buf[priv->pos]);
priv->pos++;
-
- /*
- * Try to use DMA to transmit the rest of the data if
- * address transfer phase just finished.
- */
- if (msr & MAT)
- rcar_i2c_dma(priv);
} else {
/*
* The last data was pushed to ICRXTX on _PREV_ empty irq.
@@ -921,6 +920,9 @@ static int rcar_i2c_probe(struct platform_device *pdev)
struct i2c_timings i2c_t;
int irq, ret;
+ /* Otherwise logic will break because some bytes must always use PIO */
+ BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
+
priv = devm_kzalloc(dev, sizeof(struct rcar_i2c_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index b75ff144b570..f31413fd9521 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -43,6 +43,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#define RIIC_ICCR1 0x00
#define RIIC_ICCR2 0x04
@@ -112,12 +113,10 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct riic_dev *riic = i2c_get_adapdata(adap);
unsigned long time_left;
- int i, ret;
+ int i;
u8 start_bit;
- ret = clk_prepare_enable(riic->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(adap->dev.parent);
if (readb(riic->base + RIIC_ICCR2) & ICCR2_BBSY) {
riic->err = -EBUSY;
@@ -150,7 +149,7 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
}
out:
- clk_disable_unprepare(riic->clk);
+ pm_runtime_put(adap->dev.parent);
return riic->err ?: num;
}
@@ -281,20 +280,18 @@ static const struct i2c_algorithm riic_algo = {
static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
{
- int ret;
+ int ret = 0;
unsigned long rate;
int total_ticks, cks, brl, brh;
- ret = clk_prepare_enable(riic->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(riic->adapter.dev.parent);
if (t->bus_freq_hz > 400000) {
dev_err(&riic->adapter.dev,
"unsupported bus speed (%dHz). 400000 max\n",
t->bus_freq_hz);
- clk_disable_unprepare(riic->clk);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
rate = clk_get_rate(riic->clk);
@@ -332,8 +329,8 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
if (brl > (0x1F + 3)) {
dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
(unsigned long)t->bus_freq_hz);
- clk_disable_unprepare(riic->clk);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
brh = total_ticks - brl;
@@ -378,9 +375,9 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1);
- clk_disable_unprepare(riic->clk);
-
- return 0;
+out:
+ pm_runtime_put(riic->adapter.dev.parent);
+ return ret;
}
static struct riic_irq_desc riic_irqs[] = {
@@ -439,28 +436,36 @@ static int riic_i2c_probe(struct platform_device *pdev)
i2c_parse_fw_timings(&pdev->dev, &i2c_t, true);
+ pm_runtime_enable(&pdev->dev);
+
ret = riic_init_hw(riic, &i2c_t);
if (ret)
- return ret;
-
+ goto out;
ret = i2c_add_adapter(adap);
if (ret)
- return ret;
+ goto out;
platform_set_drvdata(pdev, riic);
dev_info(&pdev->dev, "registered with %dHz bus speed\n",
i2c_t.bus_freq_hz);
return 0;
+
+out:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
}
static int riic_i2c_remove(struct platform_device *pdev)
{
struct riic_dev *riic = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
writeb(0, riic->base + RIIC_ICIER);
+ pm_runtime_put(&pdev->dev);
i2c_del_adapter(&riic->adapter);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 4284fc991cfd..48337bef5b87 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -476,8 +476,12 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
list_add_tail(&v->node,
&solutions);
+ break;
}
}
+
+ if (p_prev == p)
+ break;
}
}
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 5503fa171df0..743c161b22c5 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -328,12 +328,6 @@ static int stu300_start_and_await_event(struct stu300_dev *dev,
{
int ret;
- if (unlikely(irqs_disabled())) {
- /* TODO: implement polling for this case if need be. */
- WARN(1, "irqs are disabled, cannot poll for event\n");
- return -EIO;
- }
-
/* Lock command issue, fill in an event we wait for */
spin_lock_irq(&dev->cmd_issue_lock);
init_completion(&dev->cmd_complete);
@@ -380,13 +374,6 @@ static int stu300_await_event(struct stu300_dev *dev,
{
int ret;
- if (unlikely(irqs_disabled())) {
- /* TODO: implement polling for this case if need be. */
- dev_err(&dev->pdev->dev, "irqs are disabled on this "
- "system!\n");
- return -EIO;
- }
-
/* Is it already here? */
spin_lock_irq(&dev->cmd_issue_lock);
dev->cmd_err = STU300_ERROR_NONE;
@@ -846,6 +833,13 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
return num;
}
+static int stu300_xfer_todo(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ /* TODO: implement polling for this case if need be. */
+ WARN(1, "%s: atomic transfers not implemented\n", dev_name(&adap->dev));
+ return -EOPNOTSUPP;
+}
+
static u32 stu300_func(struct i2c_adapter *adap)
{
/* This is the simplest thing you can think of... */
@@ -853,8 +847,9 @@ static u32 stu300_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm stu300_algo = {
- .master_xfer = stu300_xfer,
- .functionality = stu300_func,
+ .master_xfer = stu300_xfer,
+ .master_xfer_atomic = stu300_xfer_todo,
+ .functionality = stu300_func,
};
static const struct i2c_adapter_quirks stu300_quirks = {
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index d18b0941b71a..f14d4b3fab44 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -597,6 +597,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
i2c->adapter = synquacer_i2c_ops;
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.dev.parent = &pdev->dev;
+ i2c->adapter.dev.of_node = pdev->dev.of_node;
+ ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev));
i2c->adapter.nr = pdev->id;
init_completion(&i2c->completion);
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index f6cd35d0a2ac..9bb085793a0c 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -207,7 +207,8 @@ static int tegra_bpmp_i2c_msg_len_check(struct i2c_msg *msgs, unsigned int num)
static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c,
struct mrq_i2c_request *request,
- struct mrq_i2c_response *response)
+ struct mrq_i2c_response *response,
+ bool atomic)
{
struct tegra_bpmp_message msg;
int err;
@@ -222,7 +223,7 @@ static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c,
msg.rx.data = response;
msg.rx.size = sizeof(*response);
- if (irqs_disabled())
+ if (atomic)
err = tegra_bpmp_transfer_atomic(i2c->bpmp, &msg);
else
err = tegra_bpmp_transfer(i2c->bpmp, &msg);
@@ -230,8 +231,9 @@ static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c,
return err;
}
-static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs, int num)
+static int tegra_bpmp_i2c_xfer_common(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num,
+ bool atomic)
{
struct tegra_bpmp_i2c *i2c = i2c_get_adapdata(adapter);
struct mrq_i2c_response response;
@@ -253,7 +255,7 @@ static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter,
return err;
}
- err = tegra_bpmp_i2c_msg_xfer(i2c, &request, &response);
+ err = tegra_bpmp_i2c_msg_xfer(i2c, &request, &response, atomic);
if (err < 0) {
dev_err(i2c->dev, "failed to transfer message: %d\n", err);
return err;
@@ -268,6 +270,18 @@ static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter,
return num;
}
+static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ return tegra_bpmp_i2c_xfer_common(adapter, msgs, num, false);
+}
+
+static int tegra_bpmp_i2c_xfer_atomic(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ return tegra_bpmp_i2c_xfer_common(adapter, msgs, num, true);
+}
+
static u32 tegra_bpmp_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR |
@@ -276,6 +290,7 @@ static u32 tegra_bpmp_i2c_func(struct i2c_adapter *adapter)
static const struct i2c_algorithm tegra_bpmp_i2c_algo = {
.master_xfer = tegra_bpmp_i2c_xfer,
+ .master_xfer_atomic = tegra_bpmp_i2c_xfer_atomic,
.functionality = tegra_bpmp_i2c_func,
};
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 38af18645133..9732a81bb7dd 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- int i = 0, scl = 1, ret;
+ int i = 0, scl = 1, ret = 0;
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
@@ -327,6 +327,8 @@ static int i2c_device_probe(struct device *dev)
if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
dev_dbg(dev, "Using Host Notify IRQ\n");
+ /* Keep adapter active when Host Notify is required */
+ pm_runtime_get_sync(&client->adapter->dev);
irq = i2c_smbus_host_notify_to_irq(client);
} else if (dev->of_node) {
irq = of_irq_get_byname(dev->of_node, "irq");
@@ -431,6 +433,8 @@ static int i2c_device_remove(struct device *dev)
device_init_wakeup(&client->dev, false);
client->irq = client->init_irq;
+ if (client->flags & I2C_CLIENT_HOST_NOTIFY)
+ pm_runtime_put(&client->adapter->dev);
return status;
}
@@ -1867,8 +1871,10 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
- if (WARN_ON(test_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags)))
- return -ESHUTDOWN;
+
+ ret = __i2c_check_suspended(adap);
+ if (ret)
+ return ret;
if (adap->quirks && i2c_check_for_quirks(adap, msgs, num))
return -EOPNOTSUPP;
@@ -1890,7 +1896,11 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
for (ret = 0, try = 0; try <= adap->retries; try++) {
- ret = adap->algo->master_xfer(adap, msgs, num);
+ if (i2c_in_atomic_xfer_mode() && adap->algo->master_xfer_atomic)
+ ret = adap->algo->master_xfer_atomic(adap, msgs, num);
+ else
+ ret = adap->algo->master_xfer(adap, msgs, num);
+
if (ret != -EAGAIN)
break;
if (time_after(jiffies, orig_jiffies + adap->timeout))
@@ -1946,14 +1956,9 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
* one (discarding status on the second message) or errno
* (discarding status on the first one).
*/
- if (in_atomic() || irqs_disabled()) {
- ret = i2c_trylock_bus(adap, I2C_LOCK_SEGMENT);
- if (!ret)
- /* I2C activity is ongoing. */
- return -EAGAIN;
- } else {
- i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
- }
+ ret = __i2c_lock_bus_helper(adap);
+ if (ret)
+ return ret;
ret = __i2c_transfer(adap, msgs, num);
i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 132119112596..788d42f2aad9 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -20,6 +20,8 @@
#include <linux/i2c-smbus.h>
#include <linux/slab.h>
+#include "i2c-core.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/smbus.h>
@@ -530,7 +532,10 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
{
s32 res;
- i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
+ res = __i2c_lock_bus_helper(adapter);
+ if (res)
+ return res;
+
res = __i2c_smbus_xfer(adapter, addr, flags, read_write,
command, protocol, data);
i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
@@ -543,10 +548,17 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
unsigned short flags, char read_write,
u8 command, int protocol, union i2c_smbus_data *data)
{
+ int (*xfer_func)(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
unsigned long orig_jiffies;
int try;
s32 res;
+ res = __i2c_check_suspended(adapter);
+ if (res)
+ return res;
+
/* If enabled, the following two tracepoints are conditional on
* read_write and protocol.
*/
@@ -557,13 +569,20 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB;
- if (adapter->algo->smbus_xfer) {
+ xfer_func = adapter->algo->smbus_xfer;
+ if (i2c_in_atomic_xfer_mode()) {
+ if (adapter->algo->smbus_xfer_atomic)
+ xfer_func = adapter->algo->smbus_xfer_atomic;
+ else if (adapter->algo->master_xfer_atomic)
+ xfer_func = NULL; /* fallback to I2C emulation */
+ }
+
+ if (xfer_func) {
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
for (res = 0, try = 0; try <= adapter->retries; try++) {
- res = adapter->algo->smbus_xfer(adapter, addr, flags,
- read_write, command,
- protocol, data);
+ res = xfer_func(adapter, addr, flags, read_write,
+ command, protocol, data);
if (res != -EAGAIN)
break;
if (time_after(jiffies,
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 37576f50fe20..c88cfef81343 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -29,6 +29,42 @@ extern int __i2c_first_dynamic_bus_num;
int i2c_check_7bit_addr_validity_strict(unsigned short addr);
+/*
+ * We only allow atomic transfers for very late communication, e.g. to send
+ * the powerdown command to a PMIC. Atomic transfers are a corner case and not
+ * for generic use!
+ */
+static inline bool i2c_in_atomic_xfer_mode(void)
+{
+ return system_state > SYSTEM_RUNNING && irqs_disabled();
+}
+
+static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
+{
+ int ret = 0;
+
+ if (i2c_in_atomic_xfer_mode()) {
+ WARN(!adap->algo->master_xfer_atomic && !adap->algo->smbus_xfer_atomic,
+ "No atomic I2C transfer handler for '%s'\n", dev_name(&adap->dev));
+ ret = i2c_trylock_bus(adap, I2C_LOCK_SEGMENT) ? 0 : -EAGAIN;
+ } else {
+ i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
+ }
+
+ return ret;
+}
+
+static inline int __i2c_check_suspended(struct i2c_adapter *adap)
+{
+ if (test_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags)) {
+ if (!test_and_set_bit(I2C_ALF_SUSPEND_REPORTED, &adap->locked_flags))
+ dev_WARN(&adap->dev, "Transfer while suspended\n");
+ return -ESHUTDOWN;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_ACPI
const struct acpi_device_id *
i2c_acpi_match_device(const struct acpi_device_id *matches,
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index f330690b4125..603252fa1284 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -310,12 +310,18 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
else
priv->algo.master_xfer = __i2c_mux_master_xfer;
}
+ if (parent->algo->master_xfer_atomic)
+ priv->algo.master_xfer_atomic = priv->algo.master_xfer;
+
if (parent->algo->smbus_xfer) {
if (muxc->mux_locked)
priv->algo.smbus_xfer = i2c_mux_smbus_xfer;
else
priv->algo.smbus_xfer = __i2c_mux_smbus_xfer;
}
+ if (parent->algo->smbus_xfer_atomic)
+ priv->algo.smbus_xfer_atomic = priv->algo.smbus_xfer;
+
priv->algo.functionality = i2c_mux_functionality;
/* Now fill out new adapter structure */
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 035032e20327..4eecffc26527 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -99,6 +99,8 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
/* Now fill out current adapter structure. cur_chan must be up to date */
priv->algo.master_xfer = i2c_demux_master_xfer;
+ if (adap->algo->master_xfer_atomic)
+ priv->algo.master_xfer_atomic = i2c_demux_master_xfer;
priv->algo.functionality = i2c_demux_functionality;
snprintf(priv->cur_adap.name, sizeof(priv->cur_adap.name),
@@ -219,8 +221,8 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
- + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, struct_size(priv, chan, num_chan),
+ GFP_KERNEL);
props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL);
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 9e75d6b9140b..50e1fb4aedf5 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -22,7 +22,6 @@
#include <linux/i2c-mux.h>
#include <linux/jiffies.h>
#include <linux/module.h>
-#include <linux/platform_data/pca954x.h>
#include <linux/slab.h>
/*
@@ -287,10 +286,8 @@ static int pca9541_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adap = client->adapter;
- struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
struct i2c_mux_core *muxc;
struct pca9541 *data;
- int force;
int ret;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -306,9 +303,6 @@ static int pca9541_probe(struct i2c_client *client,
/* Create mux adapter */
- force = 0;
- if (pdata)
- force = pdata->modes[0].adap_id;
muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data),
I2C_MUX_ARBITRATOR,
pca9541_select_chan, pca9541_release_chan);
@@ -320,7 +314,7 @@ static int pca9541_probe(struct i2c_client *client,
i2c_set_clientdata(client, muxc);
- ret = i2c_mux_add_adapter(muxc, force, 0, 0);
+ ret = i2c_mux_add_adapter(muxc, 0, 0, 0);
if (ret)
return ret;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index bfabf985e830..923aa3a5a3dc 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -46,10 +46,10 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/platform_data/pca954x.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <dt-bindings/mux/mux.h>
#define PCA954X_MAX_NCHANS 8
@@ -85,7 +85,9 @@ struct pca954x {
const struct chip_desc *chip;
u8 last_chan; /* last register value */
- u8 deselect;
+ /* MUX_IDLE_AS_IS, MUX_IDLE_DISCONNECT or >= 0 for channel */
+ s8 idle_state;
+
struct i2c_client *client;
struct irq_domain *irq;
@@ -254,15 +256,71 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
{
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
+ s8 idle_state;
+
+ idle_state = READ_ONCE(data->idle_state);
+ if (idle_state >= 0)
+ /* Set the mux back to a predetermined channel */
+ return pca954x_select_chan(muxc, idle_state);
+
+ if (idle_state == MUX_IDLE_DISCONNECT) {
+ /* Deselect active channel */
+ data->last_chan = 0;
+ return pca954x_reg_write(muxc->parent, client,
+ data->last_chan);
+ }
- if (!(data->deselect & (1 << chan)))
- return 0;
+ /* otherwise leave as-is */
- /* Deselect active channel */
- data->last_chan = 0;
- return pca954x_reg_write(muxc->parent, client, data->last_chan);
+ return 0;
+}
+
+static ssize_t idle_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ struct pca954x *data = i2c_mux_priv(muxc);
+
+ return sprintf(buf, "%d\n", READ_ONCE(data->idle_state));
}
+static ssize_t idle_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ struct pca954x *data = i2c_mux_priv(muxc);
+ int val;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != MUX_IDLE_AS_IS && val != MUX_IDLE_DISCONNECT &&
+ (val < 0 || val >= data->chip->nchans))
+ return -EINVAL;
+
+ i2c_lock_bus(muxc->parent, I2C_LOCK_SEGMENT);
+
+ WRITE_ONCE(data->idle_state, val);
+ /*
+ * Set the mux into a state consistent with the new
+ * idle_state.
+ */
+ if (data->last_chan || val != MUX_IDLE_DISCONNECT)
+ ret = pca954x_deselect_mux(muxc, 0);
+
+ i2c_unlock_bus(muxc->parent, I2C_LOCK_SEGMENT);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(idle_state);
+
static irqreturn_t pca954x_irq_handler(int irq, void *dev_id)
{
struct pca954x *data = dev_id;
@@ -329,8 +387,11 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
static void pca954x_cleanup(struct i2c_mux_core *muxc)
{
struct pca954x *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
int c, irq;
+ device_remove_file(&client->dev, &dev_attr_idle_state);
+
if (data->irq) {
for (c = 0; c < data->chip->nchans; c++) {
irq = irq_find_mapping(data->irq, c);
@@ -348,14 +409,13 @@ static int pca954x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adap = client->adapter;
- struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
bool idle_disconnect_dt;
struct gpio_desc *gpio;
- int num, force, class;
struct i2c_mux_core *muxc;
struct pca954x *data;
+ int num;
int ret;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
@@ -412,9 +472,12 @@ static int pca954x_probe(struct i2c_client *client,
}
data->last_chan = 0; /* force the first selection */
+ data->idle_state = MUX_IDLE_AS_IS;
idle_disconnect_dt = np &&
of_property_read_bool(np, "i2c-mux-idle-disconnect");
+ if (idle_disconnect_dt)
+ data->idle_state = MUX_IDLE_DISCONNECT;
ret = pca954x_irq_setup(muxc);
if (ret)
@@ -422,24 +485,7 @@ static int pca954x_probe(struct i2c_client *client,
/* Now create an adapter for each channel */
for (num = 0; num < data->chip->nchans; num++) {
- bool idle_disconnect_pd = false;
-
- force = 0; /* dynamic adap number */
- class = 0; /* no class by default */
- if (pdata) {
- if (num < pdata->num_modes) {
- /* force static number */
- force = pdata->modes[num].adap_id;
- class = pdata->modes[num].class;
- } else
- /* discard unconfigured channels */
- break;
- idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
- }
- data->deselect |= (idle_disconnect_pd ||
- idle_disconnect_dt) << num;
-
- ret = i2c_mux_add_adapter(muxc, force, num, class);
+ ret = i2c_mux_add_adapter(muxc, 0, num, 0);
if (ret)
goto fail_cleanup;
}
@@ -453,6 +499,12 @@ static int pca954x_probe(struct i2c_client *client,
goto fail_cleanup;
}
+ /*
+ * The attr probably isn't going to be needed in most cases,
+ * so don't fail completely on error.
+ */
+ device_create_file(dev, &dev_attr_idle_state);
+
dev_info(dev, "registered %d multiplexed busses for I2C %s %s\n",
num, data->chip->muxtype == pca954x_ismux
? "mux" : "switch", client->name);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 2dc628d4f1ae..5f4bd52121fe 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -385,8 +385,9 @@ static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
return;
ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
- *ptr &= ~(I3C_ADDR_SLOT_STATUS_MASK << (bitpos % BITS_PER_LONG));
- *ptr |= status << (bitpos % BITS_PER_LONG);
+ *ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
+ (bitpos % BITS_PER_LONG));
+ *ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
}
static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
@@ -1980,7 +1981,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
{
struct i3c_dev_boardinfo *boardinfo;
struct device *dev = &master->dev;
- struct i3c_device_info info = { };
enum i3c_addr_slot_status addrstatus;
u32 init_dyn_addr = 0;
@@ -2012,8 +2012,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
- if ((info.pid & GENMASK_ULL(63, 48)) ||
- I3C_PID_RND_LOWER_32BITS(info.pid))
+ if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
+ I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
return -EINVAL;
boardinfo->init_dyn_addr = init_dyn_addr;
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 59279224e07f..1d83c97431c7 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master)
static void dw_i3c_master_disable(struct dw_i3c_master *master)
{
- writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE,
+ writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
master->regs + DEVICE_CTRL);
}
@@ -841,11 +841,6 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
return -ENOTSUPP;
for (i = 0; i < i3c_nxfers; i++) {
- if (i3c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
- return -ENOTSUPP;
- }
-
- for (i = 0; i < i3c_nxfers; i++) {
if (i3c_xfers[i].rnw)
nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
else
@@ -974,11 +969,6 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
return -ENOTSUPP;
for (i = 0; i < i2c_nxfers; i++) {
- if (i2c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
- return -ENOTSUPP;
- }
-
- for (i = 0; i < i2c_nxfers; i++) {
if (i2c_xfers[i].flags & I2C_M_RD)
nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
else
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 1f03884a6808..3b15adc6ce98 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1797,6 +1797,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g, NULL);
return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 4a6e1a413ead..46f2df288c6a 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -82,8 +82,9 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
/*
* ide-cd always generates media changed event if media is missing, which
- * makes it impossible to use for proper event reporting, so disk->events
- * is cleared to 0 and the following function is used only to trigger
+ * makes it impossible to use for proper event reporting, so
+ * DISK_EVENT_FLAG_UEVENT is cleared in disk->event_flags
+ * and the following function is used only to trigger
* revalidation and never propagated to userland.
*/
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 04e008e8f6f9..f233b34ea0c0 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -299,8 +299,9 @@ static unsigned int ide_gd_check_events(struct gendisk *disk,
/*
* The following is used to force revalidation on the first open on
* removeable devices, and never gets reported to userland as
- * genhd->events is 0. This is intended as removeable ide disk
- * can't really detect MEDIA_CHANGE events.
+ * DISK_EVENT_FLAG_UEVENT isn't set in genhd->event_flags.
+ * This is intended as removable ide disk can't really detect
+ * MEDIA_CHANGE events.
*/
ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
@@ -416,6 +417,7 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g, NULL);
return 0;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 5aeaca24a28f..4ad824984581 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1437,6 +1437,9 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
ide_hwif_t *hwif, *mate = NULL;
int i, j = 0;
+ pr_warn("legacy IDE will be removed in 2021, please switch to libata\n"
+ "Report any missing HW support to linux-ide@vger.kernel.org\n");
+
ide_host_for_each_port(i, hwif, host) {
if (hwif == NULL) {
mate = NULL;
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 67d4a7d4acc8..88d132edc4e3 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -156,7 +156,6 @@ static u16 tx4939ide_check_error_ints(ide_hwif_t *hwif)
u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl);
tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl);
- mmiowb();
/* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */
ndelay(270);
tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
@@ -396,7 +395,6 @@ static void tx4939ide_init_hwif(ide_hwif_t *hwif)
/* Soft Reset */
tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl);
- mmiowb();
/* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */
ndelay(450);
tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl);
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index d08aeb41cd07..a22cbee593fe 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -4,7 +4,6 @@
menuconfig IIO
tristate "Industrial I/O support"
- select ANON_INODES
help
The industrial I/O subsystem provides a unified framework for
drivers for many different types of embedded sensors using a
@@ -39,28 +38,28 @@ config IIO_TRIGGER
data now' interrupt.
config IIO_CONSUMERS_PER_TRIGGER
- int "Maximum number of consumers per trigger"
- depends on IIO_TRIGGER
- default "2"
- help
- This value controls the maximum number of consumers that a
- given trigger may handle. Default is 2.
+ int "Maximum number of consumers per trigger"
+ depends on IIO_TRIGGER
+ default "2"
+ help
+ This value controls the maximum number of consumers that a
+ given trigger may handle. Default is 2.
config IIO_SW_DEVICE
tristate "Enable software IIO device support"
select IIO_CONFIGFS
help
- Provides IIO core support for software devices. A software
- device can be created via configfs or directly by a driver
- using the API provided.
+ Provides IIO core support for software devices. A software
+ device can be created via configfs or directly by a driver
+ using the API provided.
config IIO_SW_TRIGGER
tristate "Enable software triggers support"
select IIO_CONFIGFS
help
- Provides IIO core support for software triggers. A software
- trigger can be created via configfs or directly by a driver
- using the API provided.
+ Provides IIO core support for software triggers. A software
+ trigger can be created via configfs or directly by a driver
+ using the API provided.
config IIO_TRIGGERED_EVENT
tristate
@@ -74,7 +73,6 @@ source "drivers/iio/afe/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/chemical/Kconfig"
source "drivers/iio/common/Kconfig"
-source "drivers/iio/counter/Kconfig"
source "drivers/iio/dac/Kconfig"
source "drivers/iio/dummy/Kconfig"
source "drivers/iio/frequency/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index cb5993251381..bff682ad1cfb 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -20,7 +20,6 @@ obj-y += amplifiers/
obj-y += buffer/
obj-y += chemical/
obj-y += common/
-obj-y += counter/
obj-y += dac/
obj-y += dummy/
obj-y += gyro/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 898839ca164a..62a970a20219 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -6,28 +6,28 @@
menu "Accelerometers"
config ADIS16201
- tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16201 dual-axis
- digital inclinometer and accelerometer.
+ tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say Y here to build support for Analog Devices adis16201 dual-axis
+ digital inclinometer and accelerometer.
- To compile this driver as a module, say M here: the module will
- be called adis16201.
+ To compile this driver as a module, say M here: the module will
+ be called adis16201.
config ADIS16209
- tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16209 dual-axis digital inclinometer
- and accelerometer.
+ tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say Y here to build support for Analog Devices adis16209 dual-axis digital inclinometer
+ and accelerometer.
- To compile this driver as a module, say M here: the module will be
- called adis16209.
+ To compile this driver as a module, say M here: the module will be
+ called adis16209.
config ADXL345
tristate
@@ -100,16 +100,16 @@ config BMA180
module will be called bma180.
config BMA220
- tristate "Bosch BMA220 3-Axis Accelerometer Driver"
+ tristate "Bosch BMA220 3-Axis Accelerometer Driver"
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
- help
- Say yes here to add support for the Bosch BMA220 triaxial
- acceleration sensor.
+ help
+ Say yes here to add support for the Bosch BMA220 triaxial
+ acceleration sensor.
- To compile this driver as a module, choose M here: the
- module will be called bma220_spi.
+ To compile this driver as a module, choose M here: the
+ module will be called bma220_spi.
config BMC150_ACCEL
tristate "Bosch BMC150 Accelerometer Driver"
@@ -223,7 +223,7 @@ config IIO_ST_ACCEL_3AXIS
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
- LNG2DM, LIS3DE
+ LNG2DM, LIS3DE, LIS2DE12
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index cb9765a3de60..f9720a1e8a7c 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -116,6 +116,7 @@ struct bma180_data {
struct i2c_client *client;
struct iio_trigger *trig;
const struct bma180_part_info *part_info;
+ struct iio_mount_matrix orientation;
struct mutex mutex;
bool sleep_state;
int scale;
@@ -561,6 +562,15 @@ static int bma180_set_power_mode(struct iio_dev *indio_dev,
return ret;
}
+static const struct iio_mount_matrix *
+bma180_accel_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
static const struct iio_enum bma180_power_mode_enum = {
.items = bma180_power_modes,
.num_items = ARRAY_SIZE(bma180_power_modes),
@@ -571,7 +581,8 @@ static const struct iio_enum bma180_power_mode_enum = {
static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
IIO_ENUM("power_mode", true, &bma180_power_mode_enum),
IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
- { },
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma180_accel_get_mount_matrix),
+ { }
};
#define BMA180_ACC_CHANNEL(_axis, _bits) { \
@@ -722,6 +733,11 @@ static int bma180_probe(struct i2c_client *client,
chip = id->driver_data;
data->part_info = &bma180_part_info[chip];
+ ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
+ &data->orientation);
+ if (ret)
+ return ret;
+
ret = data->part_info->chip_config(data);
if (ret < 0)
goto err_chip_disable;
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 383c802eb5b8..44d7c49fe2a1 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -204,6 +204,7 @@ struct bmc150_accel_data {
int ev_enable_state;
int64_t timestamp, old_timestamp; /* Only used in hw fifo mode. */
const struct bmc150_accel_chip_info *chip_info;
+ struct iio_mount_matrix orientation;
};
static const struct {
@@ -393,7 +394,7 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
if (ret < 0) {
dev_err(dev,
- "Failed: bmc150_accel_set_power_state for %d\n", on);
+ "Failed: %s for %d\n", __func__, on);
if (on)
pm_runtime_put_noidle(dev);
@@ -796,6 +797,20 @@ static ssize_t bmc150_accel_get_fifo_state(struct device *dev,
return sprintf(buf, "%d\n", state);
}
+static const struct iio_mount_matrix *
+bmc150_accel_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info bmc150_accel_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bmc150_accel_get_mount_matrix),
+ { }
+};
+
static IIO_CONST_ATTR(hwfifo_watermark_min, "1");
static IIO_CONST_ATTR(hwfifo_watermark_max,
__stringify(BMC150_ACCEL_FIFO_LENGTH));
@@ -978,6 +993,7 @@ static const struct iio_event_spec bmc150_accel_event = {
.shift = 16 - (bits), \
.endianness = IIO_LE, \
}, \
+ .ext_info = bmc150_accel_ext_info, \
.event_spec = &bmc150_accel_event, \
.num_event_specs = 1 \
}
@@ -1555,6 +1571,11 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
data->regmap = regmap;
+ ret = iio_read_mount_matrix(dev, "mount-matrix",
+ &data->orientation);
+ if (ret)
+ return ret;
+
ret = bmc150_accel_chip_init(data);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 063e89eff791..46bb2e421bb9 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for older Chrome OS EC accelerometer
*
* Copyright 2017 Google, Inc
*
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This driver uses the memory mapper cros-ec interface to communicate
* with the Chrome OS EC about accelerometer data.
* Accelerometer access is presented through iio sysfs.
@@ -29,7 +21,6 @@
#include <linux/mfd/cros_ec_commands.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/platform_device.h>
#define DRV_NAME "cros-ec-accel-legacy"
@@ -353,7 +344,7 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
struct iio_dev *indio_dev;
struct cros_ec_accel_legacy_state *state;
- int ret, i;
+ int ret;
if (!ec || !ec->ec_dev) {
dev_warn(&pdev->dev, "No EC device found.\n");
@@ -381,20 +372,17 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
* Present the channel using HTML5 standard:
* need to invert X and Y and invert some lid axis.
*/
- for (i = X ; i < MAX_AXIS; i++) {
- switch (i) {
- case X:
- ec_accel_channels[X].scan_index = Y;
- case Y:
- ec_accel_channels[Y].scan_index = X;
- case Z:
- ec_accel_channels[Z].scan_index = Z;
- }
- if (state->sensor_num == MOTIONSENSE_LOC_LID && i != Y)
- state->sign[i] = -1;
- else
- state->sign[i] = 1;
- }
+ ec_accel_channels[X].scan_index = Y;
+ ec_accel_channels[Y].scan_index = X;
+ ec_accel_channels[Z].scan_index = Z;
+
+ state->sign[Y] = 1;
+
+ if (state->sensor_num == MOTIONSENSE_LOC_LID)
+ state->sign[X] = state->sign[Z] = -1;
+ else
+ state->sign[X] = state->sign[Z] = 1;
+
indio_dev->num_channels = ARRAY_SIZE(ec_accel_channels);
indio_dev->dev.parent = &pdev->dev;
indio_dev->info = &cros_ec_accel_legacy_info;
@@ -419,5 +407,5 @@ module_platform_driver(cros_ec_accel_platform_driver);
MODULE_DESCRIPTION("ChromeOS EC legacy accelerometer driver");
MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 7096e577b23f..2922a2e88a1b 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -451,7 +451,7 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
}
if (ret < 0) {
dev_err(&data->client->dev,
- "Failed: kxcjk1013_set_power_state for %d\n", on);
+ "Failed: %s for %d\n", __func__, on);
if (on)
pm_runtime_put_noidle(&data->client->dev);
return ret;
@@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
mutex_lock(&data->mutex);
ret = kxcjk1013_set_mode(data, OPERATION);
+ if (ret == 0)
+ ret = kxcjk1013_set_range(data, data->range);
mutex_unlock(&data->mutex);
return ret;
@@ -1489,6 +1491,7 @@ static const struct acpi_device_id kx_acpi_match[] = {
{"KXCJ1013", KXCJK1013},
{"KXCJ1008", KXCJ91008},
{"KXCJ9000", KXCJ91008},
+ {"KIOX0008", KXCJ91008},
{"KIOX0009", KXTJ21009},
{"KIOX000A", KXCJ91008},
{"KIOX010A", KXCJ91008}, /* KXCJ91008 inside the display of a 2-in-1 */
@@ -1510,10 +1513,20 @@ static const struct i2c_device_id kxcjk1013_id[] = {
MODULE_DEVICE_TABLE(i2c, kxcjk1013_id);
+static const struct of_device_id kxcjk1013_of_match[] = {
+ { .compatible = "kionix,kxcjk1013", },
+ { .compatible = "kionix,kxcj91008", },
+ { .compatible = "kionix,kxtj21009", },
+ { .compatible = "kionix,kxtf9", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, kxcjk1013_of_match);
+
static struct i2c_driver kxcjk1013_driver = {
.driver = {
.name = KXCJK1013_DRV_NAME,
.acpi_match_table = ACPI_PTR(kx_acpi_match),
+ .of_match_table = kxcjk1013_of_match,
.pm = &kxcjk1013_pm_ops,
},
.probe = kxcjk1013_probe,
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 0c0df4fce420..70c60db62247 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -420,9 +420,7 @@ int kxsd9_common_probe(struct device *dev,
indio_dev->available_scan_masks = kxsd9_scan_masks;
/* Read the mounting matrix, if present */
- ret = of_iio_read_mount_matrix(dev,
- "mount-matrix",
- &st->orientation);
+ ret = iio_read_mount_matrix(dev, "mount-matrix", &st->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 302781126bc6..00e100fc845a 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1580,7 +1580,7 @@ static int mma8452_probe(struct i2c_client *client,
case FXLS8471_DEVICE_ID:
if (ret == data->chip_info->chip_id)
break;
- /* else: fall through */
+ /* fall through */
default:
ret = -ENODEV;
goto disable_regulators;
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index fd53258656ca..9d25955af58e 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -34,6 +34,7 @@ enum st_accel_type {
LIS3LV02DL,
LIS2DW12,
LIS3DHH,
+ LIS2DE12,
ST_ACCEL_MAX,
};
@@ -57,6 +58,7 @@ enum st_accel_type {
#define LIS2DW12_ACCEL_DEV_NAME "lis2dw12"
#define LIS3DHH_ACCEL_DEV_NAME "lis3dhh"
#define LIS3DE_ACCEL_DEV_NAME "lis3de"
+#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index a3c0916479fa..5ff04d9755d5 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -831,6 +831,82 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = false,
.bootime = 2,
},
+ {
+ .wai = 0x33,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS2DE12_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_8bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01, },
+ { .hz = 10, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ { .hz = 400, .value = 0x07, },
+ { .hz = 1620, .value = 0x08, },
+ { .hz = 5376, .value = 0x09, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(15600),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(31200),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(62500),
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(187500),
+ },
+ },
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ },
+ .addr_ihl = 0x25,
+ .mask_ihl = 0x02,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -992,7 +1068,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev,
goto out;
val = elements[i].integer.value;
- if (val < 0 || val > 2)
+ if (val > 2)
goto out;
/* Avoiding full matrix multiplication, we simply reorder the
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index de8ae4327094..4e54477ccf24 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -102,6 +102,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis3de",
.data = LIS3DE_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis2de12",
+ .data = LIS2DE12_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -140,6 +144,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS3LV02DL_ACCEL_DEV_NAME },
{ LIS2DW12_ACCEL_DEV_NAME },
{ LIS3DE_ACCEL_DEV_NAME },
+ { LIS2DE12_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 76db6e5cc296..2036eca546fd 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -124,6 +124,18 @@ config AD7768_1
To compile this driver as a module, choose M here: the module will be
called ad7768-1.
+config AD7780
+ tristate "Analog Devices AD7780 and similar ADCs driver"
+ depends on SPI
+ depends on GPIOLIB || COMPILE_TEST
+ select AD_SIGMA_DELTA
+ help
+ Say yes here to build support for Analog Devices AD7170, AD7171,
+ AD7780 and AD7781 SPI analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7780.
+
config AD7791
tristate "Analog Devices AD7791 ADC driver"
depends on SPI
@@ -390,7 +402,7 @@ config HX711
This driver uses two GPIOs, one acts as the clock and controls the
channel selection and gain, the other one is used for the measurement
- data
+ data
Currently the raw value is read from the chip and delivered.
To get an actual weight one needs to subtract the
@@ -541,7 +553,7 @@ config MAX1363
To compile this driver as a module, choose M here: the module will be
called max1363.
-config MAX9611
+config MAX9611
tristate "Maxim max9611/max9612 ADC driver"
depends on I2C
help
@@ -585,17 +597,17 @@ config MCP3911
called mcp3911.
config MEDIATEK_MT6577_AUXADC
- tristate "MediaTek AUXADC driver"
- depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Say yes here to enable support for MediaTek mt65xx AUXADC.
+ tristate "MediaTek AUXADC driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to enable support for MediaTek mt65xx AUXADC.
- The driver supports immediate mode operation to read from one of sixteen
- channels (external or internal).
+ The driver supports immediate mode operation to read from one of sixteen
+ channels (external or internal).
- This driver can also be built as a module. If so, the module will be
- called mt6577_auxadc.
+ This driver can also be built as a module. If so, the module will be
+ called mt6577_auxadc.
config MEN_Z188_ADC
tristate "MEN 16z188 ADC IP Core support"
@@ -809,7 +821,9 @@ config STM32_DFSDM_ADC
depends on (ARCH_STM32 && OF) || COMPILE_TEST
select STM32_DFSDM_CORE
select REGMAP_MMIO
+ select IIO_BUFFER
select IIO_BUFFER_HW_CONSUMER
+ select IIO_TRIGGERED_BUFFER
help
Select this option to support ADCSigma delta modulator for
STMicroelectronics STM32 digital filter for sigma delta converter.
@@ -956,7 +970,7 @@ config TI_ADS1015
config TI_ADS7950
tristate "Texas Instruments ADS7950 ADC driver"
- depends on SPI
+ depends on SPI && GPIOLIB
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -967,6 +981,16 @@ config TI_ADS7950
To compile this driver as a module, choose M here: the
module will be called ti-ads7950.
+config TI_ADS8344
+ tristate "Texas Instruments ADS8344"
+ depends on SPI && OF
+ help
+ If you say yes here you get support for Texas Instruments ADS8344
+ ADC chips
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads8344.
+
config TI_ADS8688
tristate "Texas Instruments ADS8688"
depends on SPI && OF
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 6fcebd167524..ef9cc485fb67 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
obj-$(CONFIG_AD7606) += ad7606.o
obj-$(CONFIG_AD7766) += ad7766.o
obj-$(CONFIG_AD7768_1) += ad7768-1.o
+obj-$(CONFIG_AD7780) += ad7780.o
obj-$(CONFIG_AD7791) += ad7791.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
@@ -87,6 +88,7 @@ obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
obj-$(CONFIG_TI_ADC161S626) += ti-adc161s626.o
obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
obj-$(CONFIG_TI_ADS7950) += ti-ads7950.o
+obj-$(CONFIG_TI_ADS8344) += ti-ads8344.o
obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o
obj-$(CONFIG_TI_ADS124S08) += ti-ads124s08.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 7d5e5311d8de..659ef37d5fe8 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -411,7 +411,7 @@ static int ad7124_init_channel_vref(struct ad7124_state *st,
dev_err(&st->sd.spi->dev,
"Error, trying to use external voltage reference without a %s regulator.\n",
ad7124_ref_names[refsel]);
- return PTR_ERR(st->vref[refsel]);
+ return PTR_ERR(st->vref[refsel]);
}
st->channel_config[channel_number].vref_mv =
regulator_get_voltage(st->vref[refsel]);
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index ebb8de03bbce..24c70c3cefb4 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -31,7 +31,7 @@
* Scales are computed as 5000/32768 and 10000/32768 respectively,
* so that when applied to the raw values they provide mV values
*/
-static const unsigned int scale_avail[2] = {
+static const unsigned int ad7606_scale_avail[2] = {
152588, 305176
};
@@ -39,6 +39,10 @@ static const unsigned int ad7606_oversampling_avail[7] = {
1, 2, 4, 8, 16, 32, 64,
};
+static const unsigned int ad7616_oversampling_avail[8] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+};
+
static int ad7606_reset(struct ad7606_state *st)
{
if (st->gpio_reset) {
@@ -154,7 +158,7 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- *val2 = scale_avail[st->range];
+ *val2 = st->scale_avail[st->range];
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
*val = st->oversampling;
@@ -163,21 +167,31 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static ssize_t in_voltage_scale_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ad7606_show_avail(char *buf, const unsigned int *vals,
+ unsigned int n, bool micros)
{
- int i, len = 0;
-
- for (i = 0; i < ARRAY_SIZE(scale_avail); i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- scale_avail[i]);
+ size_t len = 0;
+ int i;
+ for (i = 0; i < n; i++) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ micros ? "0.%06u " : "%u ", vals[i]);
+ }
buf[len - 1] = '\n';
return len;
}
+static ssize_t in_voltage_scale_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_show_avail(buf, st->scale_avail, st->num_scales, true);
+}
+
static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
static int ad7606_write_raw(struct iio_dev *indio_dev,
@@ -193,7 +207,7 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SCALE:
mutex_lock(&st->lock);
- i = find_closest(val2, scale_avail, ARRAY_SIZE(scale_avail));
+ i = find_closest(val2, st->scale_avail, st->num_scales);
gpiod_set_value(st->gpio_range, i);
st->range = i;
mutex_unlock(&st->lock);
@@ -202,15 +216,20 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
if (val2)
return -EINVAL;
- i = find_closest(val, ad7606_oversampling_avail,
- ARRAY_SIZE(ad7606_oversampling_avail));
+ i = find_closest(val, st->oversampling_avail,
+ st->num_os_ratios);
values[0] = i;
mutex_lock(&st->lock);
gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
st->gpio_os->info, values);
- st->oversampling = ad7606_oversampling_avail[i];
+
+ /* AD7616 requires a reset to update value */
+ if (st->chip_info->os_req_reset)
+ ad7606_reset(st);
+
+ st->oversampling = st->oversampling_avail[i];
mutex_unlock(&st->lock);
return 0;
@@ -219,11 +238,23 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
}
}
-static IIO_CONST_ATTR(oversampling_ratio_available, "1 2 4 8 16 32 64");
+static ssize_t ad7606_oversampling_ratio_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_show_avail(buf, st->oversampling_avail,
+ st->num_os_ratios, false);
+}
+
+static IIO_DEVICE_ATTR(oversampling_ratio_available, 0444,
+ ad7606_oversampling_ratio_avail, NULL, 0);
static struct attribute *ad7606_attributes_os_and_range[] = {
&iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
};
@@ -232,7 +263,7 @@ static const struct attribute_group ad7606_attribute_group_os_and_range = {
};
static struct attribute *ad7606_attributes_os[] = {
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
};
@@ -292,6 +323,36 @@ static const struct iio_chan_spec ad7606_channels[] = {
AD7606_CHANNEL(7),
};
+/*
+ * The current assumption that this driver makes for AD7616, is that it's
+ * working in Hardware Mode with Serial, Burst and Sequencer modes activated.
+ * To activate them, following pins must be pulled high:
+ * -SER/PAR
+ * -SEQEN
+ * And following pins must be pulled low:
+ * -WR/BURST
+ * -DB4/SER1W
+ */
+static const struct iio_chan_spec ad7616_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(16),
+ AD7606_CHANNEL(0),
+ AD7606_CHANNEL(1),
+ AD7606_CHANNEL(2),
+ AD7606_CHANNEL(3),
+ AD7606_CHANNEL(4),
+ AD7606_CHANNEL(5),
+ AD7606_CHANNEL(6),
+ AD7606_CHANNEL(7),
+ AD7606_CHANNEL(8),
+ AD7606_CHANNEL(9),
+ AD7606_CHANNEL(10),
+ AD7606_CHANNEL(11),
+ AD7606_CHANNEL(12),
+ AD7606_CHANNEL(13),
+ AD7606_CHANNEL(14),
+ AD7606_CHANNEL(15),
+};
+
static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
/* More devices added in future */
[ID_AD7605_4] = {
@@ -301,17 +362,27 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
[ID_AD7606_8] = {
.channels = ad7606_channels,
.num_channels = 9,
- .has_oversampling = true,
+ .oversampling_avail = ad7606_oversampling_avail,
+ .oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
},
[ID_AD7606_6] = {
.channels = ad7606_channels,
.num_channels = 7,
- .has_oversampling = true,
+ .oversampling_avail = ad7606_oversampling_avail,
+ .oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
},
[ID_AD7606_4] = {
.channels = ad7606_channels,
.num_channels = 5,
- .has_oversampling = true,
+ .oversampling_avail = ad7606_oversampling_avail,
+ .oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
+ },
+ [ID_AD7616] = {
+ .channels = ad7616_channels,
+ .num_channels = 17,
+ .oversampling_avail = ad7616_oversampling_avail,
+ .oversampling_num = ARRAY_SIZE(ad7616_oversampling_avail),
+ .os_req_reset = true,
},
};
@@ -343,7 +414,7 @@ static int ad7606_request_gpios(struct ad7606_state *st)
if (IS_ERR(st->gpio_frstdata))
return PTR_ERR(st->gpio_frstdata);
- if (!st->chip_info->has_oversampling)
+ if (!st->chip_info->oversampling_num)
return 0;
st->gpio_os = devm_gpiod_get_array_optional(dev,
@@ -467,6 +538,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
/* tied to logic low, analog input range is +/- 5V */
st->range = 0;
st->oversampling = 1;
+ st->scale_avail = ad7606_scale_avail;
+ st->num_scales = ARRAY_SIZE(ad7606_scale_avail);
st->reg = devm_regulator_get(dev, "avcc");
if (IS_ERR(st->reg))
@@ -484,6 +557,11 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
st->chip_info = &ad7606_chip_info_tbl[id];
+ if (st->chip_info->oversampling_num) {
+ st->oversampling_avail = st->chip_info->oversampling_avail;
+ st->num_os_ratios = st->chip_info->oversampling_num;
+ }
+
ret = ad7606_request_gpios(st);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 5d12410f68e1..f9ef52131e74 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -12,12 +12,17 @@
* struct ad7606_chip_info - chip specific information
* @channels: channel specification
* @num_channels: number of channels
- * @has_oversampling: whether the device has oversampling support
+ * @oversampling_avail pointer to the array which stores the available
+ * oversampling ratios.
+ * @oversampling_num number of elements stored in oversampling_avail array
+ * @os_req_reset some devices require a reset to update oversampling
*/
struct ad7606_chip_info {
const struct iio_chan_spec *channels;
unsigned int num_channels;
- bool has_oversampling;
+ const unsigned int *oversampling_avail;
+ unsigned int oversampling_num;
+ bool os_req_reset;
};
/**
@@ -29,6 +34,11 @@ struct ad7606_chip_info {
* @range voltage range selection, selects which scale to apply
* @oversampling oversampling selection
* @base_address address from where to read data in parallel operation
+ * @scale_avail pointer to the array which stores the available scales
+ * @num_scales number of elements stored in the scale_avail array
+ * @oversampling_avail pointer to the array which stores the available
+ * oversampling ratios.
+ * @num_os_ratios number of elements stored in oversampling_avail array
* @lock protect sensor state from concurrent accesses to GPIOs
* @gpio_convst GPIO descriptor for conversion start signal (CONVST)
* @gpio_reset GPIO descriptor for device hard-reset
@@ -50,6 +60,10 @@ struct ad7606_state {
unsigned int range;
unsigned int oversampling;
void __iomem *base_address;
+ const unsigned int *scale_avail;
+ unsigned int num_scales;
+ const unsigned int *oversampling_avail;
+ unsigned int num_os_ratios;
struct mutex lock; /* protect sensor state */
struct gpio_desc *gpio_convst;
@@ -64,9 +78,9 @@ struct ad7606_state {
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
- * 8 * 16-bit samples + 64-bit timestamp
+ * 16 * 16-bit samples + 64-bit timestamp
*/
- unsigned short data[12] ____cacheline_aligned;
+ unsigned short data[20] ____cacheline_aligned;
};
/**
@@ -86,7 +100,8 @@ enum ad7606_supported_device_ids {
ID_AD7605_4,
ID_AD7606_8,
ID_AD7606_6,
- ID_AD7606_4
+ ID_AD7606_4,
+ ID_AD7616,
};
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index 4fd0ec36a086..b7faef69a58f 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -53,6 +53,7 @@ static const struct spi_device_id ad7606_id_table[] = {
{ "ad7606-4", ID_AD7606_4 },
{ "ad7606-6", ID_AD7606_6 },
{ "ad7606-8", ID_AD7606_8 },
+ { "ad7616", ID_AD7616 },
{}
};
MODULE_DEVICE_TABLE(spi, ad7606_id_table);
@@ -62,6 +63,7 @@ static const struct of_device_id ad7606_of_match[] = {
{ .compatible = "adi,ad7606-4" },
{ .compatible = "adi,ad7606-6" },
{ .compatible = "adi,ad7606-8" },
+ { .compatible = "adi,ad7616" },
{ },
};
MODULE_DEVICE_TABLE(of, ad7606_of_match);
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index c4a85789c2db..217a5a5c3c6d 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD7170/AD7171 and AD7780/AD7781 SPI ADC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
+ * Copyright 2019 Renato Lui Geh
*/
#include <linux/interrupt.h>
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/bits.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -28,16 +29,25 @@
#define AD7780_ID1 BIT(4)
#define AD7780_ID0 BIT(3)
#define AD7780_GAIN BIT(2)
-#define AD7780_PAT1 BIT(1)
-#define AD7780_PAT0 BIT(0)
-#define AD7780_PATTERN (AD7780_PAT0)
-#define AD7780_PATTERN_MASK (AD7780_PAT0 | AD7780_PAT1)
+#define AD7170_ID 0
+#define AD7171_ID 1
+#define AD7780_ID 1
+#define AD7781_ID 0
+
+#define AD7780_ID_MASK (AD7780_ID0 | AD7780_ID1)
+
+#define AD7780_PATTERN_GOOD 1
+#define AD7780_PATTERN_MASK GENMASK(1, 0)
-#define AD7170_PAT2 BIT(2)
+#define AD7170_PATTERN_GOOD 5
+#define AD7170_PATTERN_MASK GENMASK(2, 0)
-#define AD7170_PATTERN (AD7780_PAT0 | AD7170_PAT2)
-#define AD7170_PATTERN_MASK (AD7780_PAT0 | AD7780_PAT1 | AD7170_PAT2)
+#define AD7780_GAIN_MIDPOINT 64
+#define AD7780_FILTER_MIDPOINT 13350
+
+static const unsigned int ad778x_gain[2] = { 1, 128 };
+static const unsigned int ad778x_odr_avail[2] = { 10000, 16700 };
struct ad7780_chip_info {
struct iio_chan_spec channel;
@@ -50,7 +60,11 @@ struct ad7780_state {
const struct ad7780_chip_info *chip_info;
struct regulator *reg;
struct gpio_desc *powerdown_gpio;
- unsigned int gain;
+ struct gpio_desc *gain_gpio;
+ struct gpio_desc *filter_gpio;
+ unsigned int gain;
+ unsigned int odr;
+ unsigned int int_vref_mv;
struct ad_sigma_delta sd;
};
@@ -104,17 +118,69 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
voltage_uv = regulator_get_voltage(st->reg);
if (voltage_uv < 0)
return voltage_uv;
- *val = (voltage_uv / 1000) * st->gain;
+ voltage_uv /= 1000;
+ *val = voltage_uv * st->gain;
*val2 = chan->scan_type.realbits - 1;
+ st->int_vref_mv = voltage_uv;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
*val = -(1 << (chan->scan_type.realbits - 1));
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->odr;
+ return IIO_VAL_INT;
+ default:
+ break;
}
return -EINVAL;
}
+static int ad7780_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long m)
+{
+ struct ad7780_state *st = iio_priv(indio_dev);
+ const struct ad7780_chip_info *chip_info = st->chip_info;
+ unsigned long long vref;
+ unsigned int full_scale, gain;
+
+ if (!chip_info->is_ad778x)
+ return -EINVAL;
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val != 0)
+ return -EINVAL;
+
+ vref = st->int_vref_mv * 1000000LL;
+ full_scale = 1 << (chip_info->channel.scan_type.realbits - 1);
+ gain = DIV_ROUND_CLOSEST_ULL(vref, full_scale);
+ gain = DIV_ROUND_CLOSEST(gain, val2);
+ st->gain = gain;
+ if (gain < AD7780_GAIN_MIDPOINT)
+ gain = 0;
+ else
+ gain = 1;
+ gpiod_set_value(st->gain_gpio, gain);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (1000*val + val2/1000 < AD7780_FILTER_MIDPOINT)
+ val = 0;
+ else
+ val = 1;
+ st->odr = ad778x_odr_avail[val];
+ gpiod_set_value(st->filter_gpio, val);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int ad7780_postprocess_sample(struct ad_sigma_delta *sigma_delta,
unsigned int raw_sample)
{
@@ -126,10 +192,8 @@ static int ad7780_postprocess_sample(struct ad_sigma_delta *sigma_delta,
return -EIO;
if (chip_info->is_ad778x) {
- if (raw_sample & AD7780_GAIN)
- st->gain = 1;
- else
- st->gain = 128;
+ st->gain = ad778x_gain[raw_sample & AD7780_GAIN];
+ st->odr = ad778x_odr_avail[raw_sample & AD7780_FILTER];
}
return 0;
@@ -142,30 +206,32 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
};
#define AD7780_CHANNEL(bits, wordsize) \
- AD_SD_CHANNEL_NO_SAMP_FREQ(1, 0, 0, bits, 32, wordsize - bits)
+ AD_SD_CHANNEL(1, 0, 0, bits, 32, (wordsize) - (bits))
+#define AD7170_CHANNEL(bits, wordsize) \
+ AD_SD_CHANNEL_NO_SAMP_FREQ(1, 0, 0, bits, 32, (wordsize) - (bits))
static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
[ID_AD7170] = {
- .channel = AD7780_CHANNEL(12, 24),
- .pattern = AD7170_PATTERN,
+ .channel = AD7170_CHANNEL(12, 24),
+ .pattern = AD7170_PATTERN_GOOD,
.pattern_mask = AD7170_PATTERN_MASK,
.is_ad778x = false,
},
[ID_AD7171] = {
- .channel = AD7780_CHANNEL(16, 24),
- .pattern = AD7170_PATTERN,
+ .channel = AD7170_CHANNEL(16, 24),
+ .pattern = AD7170_PATTERN_GOOD,
.pattern_mask = AD7170_PATTERN_MASK,
.is_ad778x = false,
},
[ID_AD7780] = {
.channel = AD7780_CHANNEL(24, 32),
- .pattern = AD7780_PATTERN,
+ .pattern = AD7780_PATTERN_GOOD,
.pattern_mask = AD7780_PATTERN_MASK,
.is_ad778x = true,
},
[ID_AD7781] = {
.channel = AD7780_CHANNEL(20, 32),
- .pattern = AD7780_PATTERN,
+ .pattern = AD7780_PATTERN_GOOD,
.pattern_mask = AD7780_PATTERN_MASK,
.is_ad778x = true,
},
@@ -173,8 +239,47 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
static const struct iio_info ad7780_info = {
.read_raw = ad7780_read_raw,
+ .write_raw = ad7780_write_raw,
};
+static int ad7780_init_gpios(struct device *dev, struct ad7780_state *st)
+{
+ int ret;
+
+ st->powerdown_gpio = devm_gpiod_get_optional(dev,
+ "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->powerdown_gpio)) {
+ ret = PTR_ERR(st->powerdown_gpio);
+ dev_err(dev, "Failed to request powerdown GPIO: %d\n", ret);
+ return ret;
+ }
+
+ if (!st->chip_info->is_ad778x)
+ return 0;
+
+
+ st->gain_gpio = devm_gpiod_get_optional(dev,
+ "adi,gain",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->gain_gpio)) {
+ ret = PTR_ERR(st->gain_gpio);
+ dev_err(dev, "Failed to request gain GPIO: %d\n", ret);
+ return ret;
+ }
+
+ st->filter_gpio = devm_gpiod_get_optional(dev,
+ "adi,filter",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->filter_gpio)) {
+ ret = PTR_ERR(st->filter_gpio);
+ dev_err(dev, "Failed to request filter GPIO: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ad7780_probe(struct spi_device *spi)
{
struct ad7780_state *st;
@@ -190,16 +295,6 @@ static int ad7780_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &ad7780_sigma_delta_info);
- st->reg = devm_regulator_get(&spi->dev, "avdd");
- if (IS_ERR(st->reg))
- return PTR_ERR(st->reg);
-
- ret = regulator_enable(st->reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
- return ret;
- }
-
st->chip_info =
&ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -212,14 +307,18 @@ static int ad7780_probe(struct spi_device *spi)
indio_dev->num_channels = 1;
indio_dev->info = &ad7780_info;
- st->powerdown_gpio = devm_gpiod_get_optional(&spi->dev,
- "powerdown",
- GPIOD_OUT_LOW);
- if (IS_ERR(st->powerdown_gpio)) {
- ret = PTR_ERR(st->powerdown_gpio);
- dev_err(&spi->dev, "Failed to request powerdown GPIO: %d\n",
- ret);
- goto error_disable_reg;
+ ret = ad7780_init_gpios(&spi->dev, st);
+ if (ret)
+ goto error_cleanup_buffer_and_trigger;
+
+ st->reg = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
+ return ret;
}
ret = ad_sd_setup_buffer_and_trigger(indio_dev);
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index d62dbb62be45..cb7b854df00c 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -24,9 +24,9 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#define AD7923_WRITE_CR (1 << 11) /* write control register */
-#define AD7923_RANGE (1 << 1) /* range to REFin */
-#define AD7923_CODING (1 << 0) /* coding is straight binary */
+#define AD7923_WRITE_CR BIT(11) /* write control register */
+#define AD7923_RANGE BIT(1) /* range to REFin */
+#define AD7923_CODING BIT(0) /* coding is straight binary */
#define AD7923_PM_MODE_AS (1) /* auto shutdown */
#define AD7923_PM_MODE_FS (2) /* full shutdown */
#define AD7923_PM_MODE_OPS (3) /* normal operation */
@@ -40,16 +40,16 @@
#define AD7923_MAX_CHAN 4
-#define AD7923_PM_MODE_WRITE(mode) (mode << 4) /* write mode */
-#define AD7923_CHANNEL_WRITE(channel) (channel << 6) /* write channel */
-#define AD7923_SEQUENCE_WRITE(sequence) (((sequence & 1) << 3) \
- + ((sequence & 2) << 9))
+#define AD7923_PM_MODE_WRITE(mode) ((mode) << 4) /* write mode */
+#define AD7923_CHANNEL_WRITE(channel) ((channel) << 6) /* write channel */
+#define AD7923_SEQUENCE_WRITE(sequence) ((((sequence) & 1) << 3) \
+ + (((sequence) & 2) << 9))
/* write sequence fonction */
/* left shift for CR : bit 11 transmit in first */
#define AD7923_SHIFT_REGISTER 4
/* val = value, dec = left shift, bits = number of bits of the mask */
-#define EXTRACT(val, dec, bits) ((val >> dec) & ((1 << bits) - 1))
+#define EXTRACT(val, dec, bits) (((val) >> (dec)) & ((1 << (bits)) - 1))
struct ad7923_state {
struct spi_device *spi;
@@ -130,7 +130,7 @@ static const struct ad7923_chip_info ad7923_chip_info[] = {
* ad7923_update_scan_mode() setup the spi transfer buffer for the new scan mask
**/
static int ad7923_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *active_scan_mask)
+ const unsigned long *active_scan_mask)
{
struct ad7923_state *st = iio_priv(indio_dev);
int i, cmd, len;
@@ -181,7 +181,7 @@ static irqreturn_t ad7923_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns(indio_dev));
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -272,7 +272,7 @@ static int ad7923_probe(struct spi_device *spi)
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (indio_dev == NULL)
+ if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
@@ -314,7 +314,7 @@ static int ad7923_probe(struct spi_device *spi)
return ret;
ret = iio_triggered_buffer_setup(indio_dev, NULL,
- &ad7923_trigger_handler, NULL);
+ &ad7923_trigger_handler, NULL);
if (ret)
goto error_disable_reg;
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index ff5f2da2e1b1..a4310600a853 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -62,7 +62,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
struct spi_transfer t = {
.tx_buf = data,
.len = size + 1,
- .cs_change = sigma_delta->bus_locked,
+ .cs_change = sigma_delta->keep_cs_asserted,
};
struct spi_message m;
int ret;
@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
if (sigma_delta->info->has_registers) {
data[0] = reg << sigma_delta->info->addr_shift;
data[0] |= sigma_delta->info->read_mask;
+ data[0] |= sigma_delta->comm;
spi_message_add_tail(&t[0], &m);
}
spi_message_add_tail(&t[1], &m);
@@ -217,6 +218,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
+ sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
ret = ad_sigma_delta_set_mode(sigma_delta, mode);
@@ -234,9 +236,10 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
ret = 0;
}
out:
+ sigma_delta->keep_cs_asserted = false;
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->master);
- ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
return ret;
}
@@ -289,6 +292,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
+ sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
@@ -298,9 +302,6 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
ret = wait_for_completion_interruptible_timeout(
&sigma_delta->completion, HZ);
- sigma_delta->bus_locked = false;
- spi_bus_unlock(sigma_delta->spi->master);
-
if (ret == 0)
ret = -EIO;
if (ret < 0)
@@ -321,7 +322,10 @@ out:
sigma_delta->irq_dis = true;
}
+ sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ sigma_delta->bus_locked = false;
+ spi_bus_unlock(sigma_delta->spi->master);
mutex_unlock(&indio_dev->mlock);
if (ret)
@@ -358,6 +362,8 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
+ sigma_delta->keep_cs_asserted = true;
+
ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS);
if (ret)
goto err_unlock;
@@ -386,6 +392,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
sigma_delta->irq_dis = true;
}
+ sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
sigma_delta->bus_locked = false;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 75d2f73582a3..596841a3c4db 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
ret = wait_event_interruptible_timeout(st->wq_data_avail,
st->done,
msecs_to_jiffies(1000));
- if (ret == 0)
- ret = -ETIMEDOUT;
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
-
- *val = st->last_value;
+ /* Disable interrupts, regardless if adc conversion was
+ * successful or not
+ */
at91_adc_writel(st, AT91_ADC_CHDR,
AT91_ADC_CH(chan->channel));
at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
- st->last_value = 0;
- st->done = false;
+ if (ret > 0) {
+ /* a valid conversion took place */
+ *val = st->last_value;
+ st->last_value = 0;
+ st->done = false;
+ ret = IIO_VAL_INT;
+ } else if (ret == 0) {
+ /* conversion timeout */
+ dev_err(&idev->dev, "ADC Channel %d timeout.\n",
+ chan->channel);
+ ret = -ETIMEDOUT;
+ }
+
mutex_unlock(&st->lock);
- return IIO_VAL_INT;
+ return ret;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_mv;
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index ad6764fb2a23..958a34dd88ac 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -388,8 +388,9 @@ static irqreturn_t imx7d_adc_isr(int irq, void *dev_id)
* timeout flags.
*/
if (status & IMX7D_REG_ADC_INT_STATUS_CHANNEL_CONV_TIME_OUT) {
- pr_err("%s: ADC got conversion time out interrupt: 0x%08x\n",
- dev_name(info->dev), status);
+ dev_err(info->dev,
+ "ADC got conversion time out interrupt: 0x%08x\n",
+ status);
status &= ~IMX7D_REG_ADC_INT_STATUS_CHANNEL_CONV_TIME_OUT;
writel(status, info->regs + IMX7D_REG_ADC_INT_STATUS);
}
@@ -433,167 +434,139 @@ static void imx7d_adc_power_down(struct imx7d_adc *info)
writel(adc_cfg, info->regs + IMX7D_REG_ADC_ADC_CFG);
}
+static int imx7d_adc_enable(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct imx7d_adc *info = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(info->vref);
+ if (ret) {
+ dev_err(info->dev,
+ "Can't enable adc reference top voltage, err = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret) {
+ dev_err(info->dev,
+ "Could not prepare or enable clock.\n");
+ regulator_disable(info->vref);
+ return ret;
+ }
+
+ imx7d_adc_hw_init(info);
+
+ return 0;
+}
+
+static int imx7d_adc_disable(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct imx7d_adc *info = iio_priv(indio_dev);
+
+ imx7d_adc_power_down(info);
+
+ clk_disable_unprepare(info->clk);
+ regulator_disable(info->vref);
+
+ return 0;
+}
+
+static void __imx7d_adc_disable(void *data)
+{
+ imx7d_adc_disable(data);
+}
+
static int imx7d_adc_probe(struct platform_device *pdev)
{
struct imx7d_adc *info;
struct iio_dev *indio_dev;
- struct resource *mem;
+ struct device *dev = &pdev->dev;
int irq;
int ret;
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
if (!indio_dev) {
dev_err(&pdev->dev, "Failed allocating iio device\n");
return -ENOMEM;
}
info = iio_priv(indio_dev);
- info->dev = &pdev->dev;
+ info->dev = dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs)) {
ret = PTR_ERR(info->regs);
- dev_err(&pdev->dev,
- "Failed to remap adc memory, err = %d\n", ret);
+ dev_err(dev, "Failed to remap adc memory, err = %d\n", ret);
return ret;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "No irq resource?\n");
+ dev_err(dev, "No irq resource?\n");
return irq;
}
- info->clk = devm_clk_get(&pdev->dev, "adc");
+ info->clk = devm_clk_get(dev, "adc");
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
- dev_err(&pdev->dev, "Failed getting clock, err = %d\n", ret);
+ dev_err(dev, "Failed getting clock, err = %d\n", ret);
return ret;
}
- info->vref = devm_regulator_get(&pdev->dev, "vref");
+ info->vref = devm_regulator_get(dev, "vref");
if (IS_ERR(info->vref)) {
ret = PTR_ERR(info->vref);
- dev_err(&pdev->dev,
+ dev_err(dev,
"Failed getting reference voltage, err = %d\n", ret);
return ret;
}
- ret = regulator_enable(info->vref);
- if (ret) {
- dev_err(&pdev->dev,
- "Can't enable adc reference top voltage, err = %d\n",
- ret);
- return ret;
- }
-
platform_set_drvdata(pdev, indio_dev);
init_completion(&info->completion);
- indio_dev->name = dev_name(&pdev->dev);
- indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
indio_dev->info = &imx7d_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = imx7d_adc_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(imx7d_adc_iio_channels);
- ret = clk_prepare_enable(info->clk);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not prepare or enable the clock.\n");
- goto error_adc_clk_enable;
- }
-
- ret = devm_request_irq(info->dev, irq,
- imx7d_adc_isr, 0,
- dev_name(&pdev->dev), info);
+ ret = devm_request_irq(dev, irq,
+ imx7d_adc_isr, 0,
+ dev_name(dev), info);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed requesting irq, irq = %d\n", irq);
- goto error_iio_device_register;
+ dev_err(dev, "Failed requesting irq, irq = %d\n", irq);
+ return ret;
}
imx7d_adc_feature_config(info);
- imx7d_adc_hw_init(info);
-
- ret = iio_device_register(indio_dev);
- if (ret) {
- imx7d_adc_power_down(info);
- dev_err(&pdev->dev, "Couldn't register the device.\n");
- goto error_iio_device_register;
- }
-
- return 0;
-
-error_iio_device_register:
- clk_disable_unprepare(info->clk);
-error_adc_clk_enable:
- regulator_disable(info->vref);
-
- return ret;
-}
-
-static int imx7d_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct imx7d_adc *info = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- imx7d_adc_power_down(info);
-
- clk_disable_unprepare(info->clk);
- regulator_disable(info->vref);
-
- return 0;
-}
-
-static int __maybe_unused imx7d_adc_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct imx7d_adc *info = iio_priv(indio_dev);
-
- imx7d_adc_power_down(info);
-
- clk_disable_unprepare(info->clk);
- regulator_disable(info->vref);
-
- return 0;
-}
-static int __maybe_unused imx7d_adc_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct imx7d_adc *info = iio_priv(indio_dev);
- int ret;
+ ret = imx7d_adc_enable(&indio_dev->dev);
+ if (ret)
+ return ret;
- ret = regulator_enable(info->vref);
- if (ret) {
- dev_err(info->dev,
- "Can't enable adc reference top voltage, err = %d\n",
- ret);
+ ret = devm_add_action_or_reset(dev, __imx7d_adc_disable,
+ &indio_dev->dev);
+ if (ret)
return ret;
- }
- ret = clk_prepare_enable(info->clk);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(info->dev,
- "Could not prepare or enable clock.\n");
- regulator_disable(info->vref);
+ dev_err(&pdev->dev, "Couldn't register the device.\n");
return ret;
}
- imx7d_adc_hw_init(info);
-
return 0;
}
-static SIMPLE_DEV_PM_OPS(imx7d_adc_pm_ops, imx7d_adc_suspend, imx7d_adc_resume);
+static SIMPLE_DEV_PM_OPS(imx7d_adc_pm_ops, imx7d_adc_disable, imx7d_adc_enable);
static struct platform_driver imx7d_adc_driver = {
.probe = imx7d_adc_probe,
- .remove = imx7d_adc_remove,
.driver = {
.name = "imx7d_adc",
.of_match_table = imx7d_adc_match,
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index 6ee1673deb0d..92b1d5037ac9 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -302,10 +302,8 @@ static int ingenic_adc_probe(struct platform_device *pdev)
mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
adc->base = devm_ioremap_resource(dev, mem_base);
- if (IS_ERR(adc->base)) {
- dev_err(dev, "Unable to ioremap mmio resource\n");
+ if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
- }
adc->clk = devm_clk_get(dev, "adc");
if (IS_ERR(adc->clk)) {
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
index e361c1532a75..a6ee1c3a9064 100644
--- a/drivers/iio/adc/lpc32xx_adc.c
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -7,20 +7,15 @@
* Copyright (C) 2011, 2012 Roland Stigge <stigge@antcom.de>
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/err.h>
#include <linux/completion.h>
-#include <linux/of.h>
-
+#include <linux/err.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
/*
* LPC32XX registers definitions
@@ -52,6 +47,7 @@ struct lpc32xx_adc_state {
void __iomem *adc_base;
struct clk *clk;
struct completion completion;
+ struct regulator *vref;
u32 value;
};
@@ -64,7 +60,9 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
{
struct lpc32xx_adc_state *st = iio_priv(indio_dev);
int ret;
- if (mask == IIO_CHAN_INFO_RAW) {
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
mutex_lock(&indio_dev->mlock);
ret = clk_prepare_enable(st->clk);
if (ret) {
@@ -84,22 +82,36 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- }
- return -EINVAL;
+ case IIO_CHAN_INFO_SCALE:
+ *val = regulator_get_voltage(st->vref) / 1000;
+ *val2 = 10;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
}
static const struct iio_info lpc32xx_adc_iio_info = {
.read_raw = &lpc32xx_read_raw,
};
-#define LPC32XX_ADC_CHANNEL(_index) { \
+#define LPC32XX_ADC_CHANNEL_BASE(_index) \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = _index, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.address = LPC32XXAD_IN * _index, \
- .scan_index = _index, \
+ .scan_index = _index,
+
+#define LPC32XX_ADC_CHANNEL(_index) { \
+ LPC32XX_ADC_CHANNEL_BASE(_index) \
+}
+
+#define LPC32XX_ADC_SCALE_CHANNEL(_index) { \
+ LPC32XX_ADC_CHANNEL_BASE(_index) \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
}
static const struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
@@ -108,6 +120,12 @@ static const struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
LPC32XX_ADC_CHANNEL(2),
};
+static const struct iio_chan_spec lpc32xx_adc_iio_scale_channels[] = {
+ LPC32XX_ADC_SCALE_CHANNEL(0),
+ LPC32XX_ADC_SCALE_CHANNEL(1),
+ LPC32XX_ADC_SCALE_CHANNEL(2),
+};
+
static irqreturn_t lpc32xx_adc_isr(int irq, void *dev_id)
{
struct lpc32xx_adc_state *st = dev_id;
@@ -166,6 +184,15 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
return retval;
}
+ st->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(st->vref)) {
+ iodev->channels = lpc32xx_adc_iio_channels;
+ dev_info(&pdev->dev,
+ "Missing vref regulator: No scaling available\n");
+ } else {
+ iodev->channels = lpc32xx_adc_iio_scale_channels;
+ }
+
platform_set_drvdata(pdev, iodev);
init_completion(&st->completion);
@@ -174,7 +201,6 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
iodev->dev.parent = &pdev->dev;
iodev->info = &lpc32xx_adc_iio_info;
iodev->modes = INDIO_DIRECT_MODE;
- iodev->channels = lpc32xx_adc_iio_channels;
iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels);
retval = devm_iio_device_register(&pdev->dev, iodev);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index f8600fbcdfe3..510d8b7ef3a0 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1150,6 +1150,11 @@ static const struct meson_sar_adc_data meson_sar_adc_axg_data = {
.name = "meson-axg-saradc",
};
+static const struct meson_sar_adc_data meson_sar_adc_g12a_data = {
+ .param = &meson_sar_adc_gxl_param,
+ .name = "meson-g12a-saradc",
+};
+
static const struct of_device_id meson_sar_adc_of_match[] = {
{
.compatible = "amlogic,meson8-saradc",
@@ -1175,6 +1180,9 @@ static const struct of_device_id meson_sar_adc_of_match[] = {
}, {
.compatible = "amlogic,meson-axg-saradc",
.data = &meson_sar_adc_axg_data,
+ }, {
+ .compatible = "amlogic,meson-g12a-saradc",
+ .data = &meson_sar_adc_g12a_data,
},
{},
};
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index c627513d9f0f..5384472b6c4d 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -465,6 +465,8 @@ static int mxs_lradc_adc_trigger_init(struct iio_dev *iio)
trig = devm_iio_trigger_alloc(&iio->dev, "%s-dev%i", iio->name,
iio->id);
+ if (!trig)
+ return -ENOMEM;
trig->dev.parent = adc->dev;
iio_trigger_set_drvdata(trig, iio);
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index 6a866cc187f7..21fdcde77883 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -664,6 +664,7 @@ static const struct of_device_id adc5_match_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, adc5_match_table);
static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
{
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index fcd4a1c00ca0..19adc2b23472 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -12,6 +12,11 @@
#include <linux/iio/buffer.h>
#include <linux/iio/hw-consumer.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/timer/stm32-lptim-trigger.h>
+#include <linux/iio/timer/stm32-timer-trigger.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -38,6 +43,11 @@
#define DFSDM_MAX_RES BIT(31)
#define DFSDM_DATA_RES BIT(23)
+/* Filter configuration */
+#define DFSDM_CR1_CFG_MASK (DFSDM_CR1_RCH_MASK | DFSDM_CR1_RCONT_MASK | \
+ DFSDM_CR1_RSYNC_MASK | DFSDM_CR1_JSYNC_MASK | \
+ DFSDM_CR1_JSCAN_MASK)
+
enum sd_converter_type {
DFSDM_AUDIO,
DFSDM_IIO,
@@ -54,6 +64,8 @@ struct stm32_dfsdm_adc {
struct stm32_dfsdm *dfsdm;
const struct stm32_dfsdm_dev_data *dev_data;
unsigned int fl_id;
+ unsigned int nconv;
+ unsigned long smask;
/* ADC specific */
unsigned int oversamp;
@@ -114,6 +126,61 @@ static int stm32_dfsdm_str2val(const char *str,
return -EINVAL;
}
+/**
+ * struct stm32_dfsdm_trig_info - DFSDM trigger info
+ * @name: name of the trigger, corresponding to its source
+ * @jextsel: trigger signal selection
+ */
+struct stm32_dfsdm_trig_info {
+ const char *name;
+ unsigned int jextsel;
+};
+
+/* hardware injected trigger enable, edge selection */
+enum stm32_dfsdm_jexten {
+ STM32_DFSDM_JEXTEN_DISABLED,
+ STM32_DFSDM_JEXTEN_RISING_EDGE,
+ STM32_DFSDM_JEXTEN_FALLING_EDGE,
+ STM32_DFSDM_EXTEN_BOTH_EDGES,
+};
+
+static const struct stm32_dfsdm_trig_info stm32_dfsdm_trigs[] = {
+ { TIM1_TRGO, 0 },
+ { TIM1_TRGO2, 1 },
+ { TIM8_TRGO, 2 },
+ { TIM8_TRGO2, 3 },
+ { TIM3_TRGO, 4 },
+ { TIM4_TRGO, 5 },
+ { TIM16_OC1, 6 },
+ { TIM6_TRGO, 7 },
+ { TIM7_TRGO, 8 },
+ { LPTIM1_OUT, 26 },
+ { LPTIM2_OUT, 27 },
+ { LPTIM3_OUT, 28 },
+ {},
+};
+
+static int stm32_dfsdm_get_jextsel(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ int i;
+
+ /* lookup triggers registered by stm32 timer trigger driver */
+ for (i = 0; stm32_dfsdm_trigs[i].name; i++) {
+ /**
+ * Checking both stm32 timer trigger type and trig name
+ * should be safe against arbitrary trigger names.
+ */
+ if ((is_stm32_timer_trigger(trig) ||
+ is_stm32_lptim_trigger(trig)) &&
+ !strcmp(stm32_dfsdm_trigs[i].name, trig->name)) {
+ return stm32_dfsdm_trigs[i].jextsel;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
unsigned int fast, unsigned int oversamp)
{
@@ -200,19 +267,39 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
return 0;
}
-static int stm32_dfsdm_start_channel(struct stm32_dfsdm *dfsdm,
- unsigned int ch_id)
+static int stm32_dfsdm_start_channel(struct stm32_dfsdm_adc *adc)
{
- return regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
- DFSDM_CHCFGR1_CHEN_MASK,
- DFSDM_CHCFGR1_CHEN(1));
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ const struct iio_chan_spec *chan;
+ unsigned int bit;
+ int ret;
+
+ for_each_set_bit(bit, &adc->smask, sizeof(adc->smask) * BITS_PER_BYTE) {
+ chan = indio_dev->channels + bit;
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(chan->channel),
+ DFSDM_CHCFGR1_CHEN_MASK,
+ DFSDM_CHCFGR1_CHEN(1));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
-static void stm32_dfsdm_stop_channel(struct stm32_dfsdm *dfsdm,
- unsigned int ch_id)
+static void stm32_dfsdm_stop_channel(struct stm32_dfsdm_adc *adc)
{
- regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
- DFSDM_CHCFGR1_CHEN_MASK, DFSDM_CHCFGR1_CHEN(0));
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ const struct iio_chan_spec *chan;
+ unsigned int bit;
+
+ for_each_set_bit(bit, &adc->smask, sizeof(adc->smask) * BITS_PER_BYTE) {
+ chan = indio_dev->channels + bit;
+ regmap_update_bits(regmap, DFSDM_CHCFGR1(chan->channel),
+ DFSDM_CHCFGR1_CHEN_MASK,
+ DFSDM_CHCFGR1_CHEN(0));
+ }
}
static int stm32_dfsdm_chan_configure(struct stm32_dfsdm *dfsdm,
@@ -237,9 +324,11 @@ static int stm32_dfsdm_chan_configure(struct stm32_dfsdm *dfsdm,
DFSDM_CHCFGR1_CHINSEL(ch->alt_si));
}
-static int stm32_dfsdm_start_filter(struct stm32_dfsdm *dfsdm,
- unsigned int fl_id)
+static int stm32_dfsdm_start_filter(struct stm32_dfsdm_adc *adc,
+ unsigned int fl_id,
+ struct iio_trigger *trig)
{
+ struct stm32_dfsdm *dfsdm = adc->dfsdm;
int ret;
/* Enable filter */
@@ -248,7 +337,11 @@ static int stm32_dfsdm_start_filter(struct stm32_dfsdm *dfsdm,
if (ret < 0)
return ret;
- /* Start conversion */
+ /* Nothing more to do for injected (scan mode/triggered) conversions */
+ if (adc->nconv > 1 || trig)
+ return 0;
+
+ /* Software start (single or continuous) regular conversion */
return regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
DFSDM_CR1_RSWSTART_MASK,
DFSDM_CR1_RSWSTART(1));
@@ -262,11 +355,45 @@ static void stm32_dfsdm_stop_filter(struct stm32_dfsdm *dfsdm,
DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(0));
}
-static int stm32_dfsdm_filter_configure(struct stm32_dfsdm *dfsdm,
- unsigned int fl_id, unsigned int ch_id)
+static int stm32_dfsdm_filter_set_trig(struct stm32_dfsdm_adc *adc,
+ unsigned int fl_id,
+ struct iio_trigger *trig)
{
- struct regmap *regmap = dfsdm->regmap;
- struct stm32_dfsdm_filter *fl = &dfsdm->fl_list[fl_id];
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ u32 jextsel = 0, jexten = STM32_DFSDM_JEXTEN_DISABLED;
+ int ret;
+
+ if (trig) {
+ ret = stm32_dfsdm_get_jextsel(indio_dev, trig);
+ if (ret < 0)
+ return ret;
+
+ /* set trigger source and polarity (default to rising edge) */
+ jextsel = ret;
+ jexten = STM32_DFSDM_JEXTEN_RISING_EDGE;
+ }
+
+ ret = regmap_update_bits(regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_JEXTSEL_MASK | DFSDM_CR1_JEXTEN_MASK,
+ DFSDM_CR1_JEXTSEL(jextsel) |
+ DFSDM_CR1_JEXTEN(jexten));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int stm32_dfsdm_filter_configure(struct stm32_dfsdm_adc *adc,
+ unsigned int fl_id,
+ struct iio_trigger *trig)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[fl_id];
+ u32 cr1;
+ const struct iio_chan_spec *chan;
+ unsigned int bit, jchg = 0;
int ret;
/* Average integrator oversampling */
@@ -286,15 +413,68 @@ static int stm32_dfsdm_filter_configure(struct stm32_dfsdm *dfsdm,
if (ret)
return ret;
- /* No scan mode supported for the moment */
- ret = regmap_update_bits(regmap, DFSDM_CR1(fl_id), DFSDM_CR1_RCH_MASK,
- DFSDM_CR1_RCH(ch_id));
+ ret = stm32_dfsdm_filter_set_trig(adc, fl_id, trig);
if (ret)
return ret;
- return regmap_update_bits(regmap, DFSDM_CR1(fl_id),
- DFSDM_CR1_RSYNC_MASK,
- DFSDM_CR1_RSYNC(fl->sync_mode));
+ /*
+ * DFSDM modes configuration W.R.T audio/iio type modes
+ * ----------------------------------------------------------------
+ * Modes | regular | regular | injected | injected |
+ * | | continuous | | + scan |
+ * --------------|---------|--------------|----------|------------|
+ * single conv | x | | | |
+ * (1 chan) | | | | |
+ * --------------|---------|--------------|----------|------------|
+ * 1 Audio chan | | sample freq | | |
+ * | | or sync_mode | | |
+ * --------------|---------|--------------|----------|------------|
+ * 1 IIO chan | | sample freq | trigger | |
+ * | | or sync_mode | | |
+ * --------------|---------|--------------|----------|------------|
+ * 2+ IIO chans | | | | trigger or |
+ * | | | | sync_mode |
+ * ----------------------------------------------------------------
+ */
+ if (adc->nconv == 1 && !trig) {
+ bit = __ffs(adc->smask);
+ chan = indio_dev->channels + bit;
+
+ /* Use regular conversion for single channel without trigger */
+ cr1 = DFSDM_CR1_RCH(chan->channel);
+
+ /* Continuous conversions triggered by SPI clk in buffer mode */
+ if (indio_dev->currentmode & INDIO_BUFFER_SOFTWARE)
+ cr1 |= DFSDM_CR1_RCONT(1);
+
+ cr1 |= DFSDM_CR1_RSYNC(fl->sync_mode);
+ } else {
+ /* Use injected conversion for multiple channels */
+ for_each_set_bit(bit, &adc->smask,
+ sizeof(adc->smask) * BITS_PER_BYTE) {
+ chan = indio_dev->channels + bit;
+ jchg |= BIT(chan->channel);
+ }
+ ret = regmap_write(regmap, DFSDM_JCHGR(fl_id), jchg);
+ if (ret < 0)
+ return ret;
+
+ /* Use scan mode for multiple channels */
+ cr1 = DFSDM_CR1_JSCAN((adc->nconv > 1) ? 1 : 0);
+
+ /*
+ * Continuous conversions not supported in injected mode,
+ * either use:
+ * - conversions in sync with filter 0
+ * - triggered conversions
+ */
+ if (!fl->sync_mode && !trig)
+ return -EINVAL;
+ cr1 |= DFSDM_CR1_JSYNC(fl->sync_mode);
+ }
+
+ return regmap_update_bits(regmap, DFSDM_CR1(fl_id), DFSDM_CR1_CFG_MASK,
+ cr1);
}
static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
@@ -378,13 +558,38 @@ static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev,
return snprintf(buf, PAGE_SIZE, "%d\n", adc->spi_freq);
}
+static int dfsdm_adc_set_samp_freq(struct iio_dev *indio_dev,
+ unsigned int sample_freq,
+ unsigned int spi_freq)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ unsigned int oversamp;
+ int ret;
+
+ oversamp = DIV_ROUND_CLOSEST(spi_freq, sample_freq);
+ if (spi_freq % sample_freq)
+ dev_dbg(&indio_dev->dev,
+ "Rate not accurate. requested (%u), actual (%u)\n",
+ sample_freq, spi_freq / oversamp);
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, oversamp);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "No filter parameters that match!\n");
+ return ret;
+ }
+ adc->sample_freq = spi_freq / oversamp;
+ adc->oversamp = oversamp;
+
+ return 0;
+}
+
static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
uintptr_t priv,
const struct iio_chan_spec *chan,
const char *buf, size_t len)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
unsigned int sample_freq = adc->sample_freq;
unsigned int spi_freq;
@@ -403,17 +608,9 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
return -EINVAL;
if (sample_freq) {
- if (spi_freq % sample_freq)
- dev_warn(&indio_dev->dev,
- "Sampling rate not accurate (%d)\n",
- spi_freq / (spi_freq / sample_freq));
-
- ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / sample_freq));
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- "No filter parameters that match!\n");
+ ret = dfsdm_adc_set_samp_freq(indio_dev, sample_freq, spi_freq);
+ if (ret < 0)
return ret;
- }
}
adc->spi_freq = spi_freq;
@@ -421,72 +618,44 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
}
static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc,
- const struct iio_chan_spec *chan,
- bool dma)
+ struct iio_trigger *trig)
{
struct regmap *regmap = adc->dfsdm->regmap;
int ret;
- unsigned int dma_en = 0, cont_en = 0;
- ret = stm32_dfsdm_start_channel(adc->dfsdm, chan->channel);
+ ret = stm32_dfsdm_start_channel(adc);
if (ret < 0)
return ret;
- ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
- chan->channel);
+ ret = stm32_dfsdm_filter_configure(adc, adc->fl_id, trig);
if (ret < 0)
goto stop_channels;
- if (dma) {
- /* Enable DMA transfer*/
- dma_en = DFSDM_CR1_RDMAEN(1);
- /* Enable conversion triggered by SPI clock*/
- cont_en = DFSDM_CR1_RCONT(1);
- }
- /* Enable DMA transfer*/
- ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_RDMAEN_MASK, dma_en);
+ ret = stm32_dfsdm_start_filter(adc, adc->fl_id, trig);
if (ret < 0)
- goto stop_channels;
-
- /* Enable conversion triggered by SPI clock*/
- ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_RCONT_MASK, cont_en);
- if (ret < 0)
- goto stop_channels;
-
- ret = stm32_dfsdm_start_filter(adc->dfsdm, adc->fl_id);
- if (ret < 0)
- goto stop_channels;
+ goto filter_unconfigure;
return 0;
-stop_channels:
- regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_RDMAEN_MASK, 0);
-
+filter_unconfigure:
regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_RCONT_MASK, 0);
- stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
+ DFSDM_CR1_CFG_MASK, 0);
+stop_channels:
+ stm32_dfsdm_stop_channel(adc);
return ret;
}
-static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc,
- const struct iio_chan_spec *chan)
+static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
{
struct regmap *regmap = adc->dfsdm->regmap;
stm32_dfsdm_stop_filter(adc->dfsdm, adc->fl_id);
- /* Clean conversion options */
- regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_RDMAEN_MASK, 0);
-
regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_RCONT_MASK, 0);
+ DFSDM_CR1_CFG_MASK, 0);
- stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
+ stm32_dfsdm_stop_channel(adc);
}
static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
@@ -494,6 +663,7 @@ static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
unsigned int watermark = DFSDM_DMA_BUFFER_SIZE / 2;
+ unsigned int rx_buf_sz = DFSDM_DMA_BUFFER_SIZE;
/*
* DMA cyclic transfers are used, buffer is split into two periods.
@@ -502,7 +672,7 @@ static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
* - one buffer (period) driver pushed to ASoC side.
*/
watermark = min(watermark, val * (unsigned int)(sizeof(u32)));
- adc->buf_sz = watermark * 2;
+ adc->buf_sz = min(rx_buf_sz, watermark * 2 * adc->nconv);
return 0;
}
@@ -532,13 +702,41 @@ static unsigned int stm32_dfsdm_adc_dma_residue(struct stm32_dfsdm_adc *adc)
return 0;
}
-static void stm32_dfsdm_audio_dma_buffer_done(void *data)
+static irqreturn_t stm32_dfsdm_adc_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int available = stm32_dfsdm_adc_dma_residue(adc);
+
+ while (available >= indio_dev->scan_bytes) {
+ u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ pf->timestamp);
+ available -= indio_dev->scan_bytes;
+ adc->bufi += indio_dev->scan_bytes;
+ if (adc->bufi >= adc->buf_sz)
+ adc->bufi = 0;
+ }
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_dfsdm_dma_buffer_done(void *data)
{
struct iio_dev *indio_dev = data;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
int available = stm32_dfsdm_adc_dma_residue(adc);
size_t old_pos;
+ if (indio_dev->currentmode & INDIO_BUFFER_TRIGGERED) {
+ iio_trigger_poll_chained(indio_dev->trig);
+ return;
+ }
+
/*
* FIXME: In Kernel interface does not support cyclic DMA buffer,and
* offers only an interface to push data samples per samples.
@@ -566,6 +764,9 @@ static void stm32_dfsdm_audio_dma_buffer_done(void *data)
adc->bufi = 0;
old_pos = 0;
}
+ /* regular iio buffer without trigger */
+ if (adc->dev_data->type == DFSDM_IIO)
+ iio_push_to_buffers(indio_dev, buffer);
}
if (adc->cb)
adc->cb(&adc->rx_buf[old_pos], adc->bufi - old_pos,
@@ -575,6 +776,10 @@ static void stm32_dfsdm_audio_dma_buffer_done(void *data)
static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_slave_config config = {
+ .src_addr = (dma_addr_t)adc->dfsdm->phys_base,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
int ret;
@@ -585,6 +790,14 @@ static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
dev_dbg(&indio_dev->dev, "%s size=%d watermark=%d\n", __func__,
adc->buf_sz, adc->buf_sz / 2);
+ if (adc->nconv == 1 && !indio_dev->trig)
+ config.src_addr += DFSDM_RDATAR(adc->fl_id);
+ else
+ config.src_addr += DFSDM_JDATAR(adc->fl_id);
+ ret = dmaengine_slave_config(adc->dma_chan, &config);
+ if (ret)
+ return ret;
+
/* Prepare a DMA cyclic transaction */
desc = dmaengine_prep_dma_cyclic(adc->dma_chan,
adc->dma_buf,
@@ -594,71 +807,154 @@ static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
if (!desc)
return -EBUSY;
- desc->callback = stm32_dfsdm_audio_dma_buffer_done;
+ desc->callback = stm32_dfsdm_dma_buffer_done;
desc->callback_param = indio_dev;
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
- if (ret) {
- dmaengine_terminate_all(adc->dma_chan);
- return ret;
- }
+ if (ret)
+ goto err_stop_dma;
/* Issue pending DMA requests */
dma_async_issue_pending(adc->dma_chan);
+ if (adc->nconv == 1 && !indio_dev->trig) {
+ /* Enable regular DMA transfer*/
+ ret = regmap_update_bits(adc->dfsdm->regmap,
+ DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK,
+ DFSDM_CR1_RDMAEN_MASK);
+ } else {
+ /* Enable injected DMA transfer*/
+ ret = regmap_update_bits(adc->dfsdm->regmap,
+ DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_JDMAEN_MASK,
+ DFSDM_CR1_JDMAEN_MASK);
+ }
+
+ if (ret < 0)
+ goto err_stop_dma;
+
return 0;
+
+err_stop_dma:
+ dmaengine_terminate_all(adc->dma_chan);
+
+ return ret;
}
-static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
+static void stm32_dfsdm_adc_dma_stop(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (!adc->dma_chan)
+ return;
+
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK | DFSDM_CR1_JDMAEN_MASK, 0);
+ dmaengine_terminate_all(adc->dma_chan);
+}
+
+static int stm32_dfsdm_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ adc->nconv = bitmap_weight(scan_mask, indio_dev->masklength);
+ adc->smask = *scan_mask;
+
+ dev_dbg(&indio_dev->dev, "nconv=%d mask=%lx\n", adc->nconv, *scan_mask);
+
+ return 0;
+}
+
+static int __stm32_dfsdm_postenable(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- const struct iio_chan_spec *chan = &indio_dev->channels[0];
int ret;
/* Reset adc buffer index */
adc->bufi = 0;
+ if (adc->hwc) {
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (ret < 0)
+ return ret;
+ }
+
ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
if (ret < 0)
- return ret;
+ goto err_stop_hwc;
- ret = stm32_dfsdm_start_conv(adc, chan, true);
+ ret = stm32_dfsdm_adc_dma_start(indio_dev);
if (ret) {
- dev_err(&indio_dev->dev, "Can't start conversion\n");
+ dev_err(&indio_dev->dev, "Can't start DMA\n");
goto stop_dfsdm;
}
- if (adc->dma_chan) {
- ret = stm32_dfsdm_adc_dma_start(indio_dev);
- if (ret) {
- dev_err(&indio_dev->dev, "Can't start DMA\n");
- goto err_stop_conv;
- }
+ ret = stm32_dfsdm_start_conv(adc, indio_dev->trig);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start conversion\n");
+ goto err_stop_dma;
}
return 0;
-err_stop_conv:
- stm32_dfsdm_stop_conv(adc, chan);
+err_stop_dma:
+ stm32_dfsdm_adc_dma_stop(indio_dev);
stop_dfsdm:
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+err_stop_hwc:
+ if (adc->hwc)
+ iio_hw_consumer_disable(adc->hwc);
return ret;
}
-static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
+static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
+{
+ int ret;
+
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = __stm32_dfsdm_postenable(indio_dev);
+ if (ret < 0)
+ goto err_predisable;
+
+ return 0;
+
+err_predisable:
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
+}
+
+static void __stm32_dfsdm_predisable(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- const struct iio_chan_spec *chan = &indio_dev->channels[0];
- if (adc->dma_chan)
- dmaengine_terminate_all(adc->dma_chan);
+ stm32_dfsdm_stop_conv(adc);
- stm32_dfsdm_stop_conv(adc, chan);
+ stm32_dfsdm_adc_dma_stop(indio_dev);
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+ if (adc->hwc)
+ iio_hw_consumer_disable(adc->hwc);
+}
+
+static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
+{
+ __stm32_dfsdm_predisable(indio_dev);
+
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+ iio_triggered_buffer_predisable(indio_dev);
+
return 0;
}
@@ -736,7 +1032,9 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
if (ret < 0)
goto stop_dfsdm;
- ret = stm32_dfsdm_start_conv(adc, chan, false);
+ adc->nconv = 1;
+ adc->smask = BIT(chan->scan_index);
+ ret = stm32_dfsdm_start_conv(adc, NULL);
if (ret < 0) {
regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
@@ -757,7 +1055,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
else
ret = IIO_VAL_INT;
- stm32_dfsdm_stop_conv(adc, chan);
+ stm32_dfsdm_stop_conv(adc);
stop_dfsdm:
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
@@ -777,16 +1075,23 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = stm32_dfsdm_set_osrs(fl, 0, val);
if (!ret)
adc->oversamp = val;
-
+ iio_device_release_direct_mode(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
if (!val)
return -EINVAL;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
switch (ch->src) {
case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
spi_freq = adc->dfsdm->spi_master_freq;
@@ -799,20 +1104,9 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
spi_freq = adc->spi_freq;
}
- if (spi_freq % val)
- dev_warn(&indio_dev->dev,
- "Sampling rate not accurate (%d)\n",
- spi_freq / (spi_freq / val));
-
- ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / val));
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- "Not able to find parameter that match!\n");
- return ret;
- }
- adc->sample_freq = val;
-
- return 0;
+ ret = dfsdm_adc_set_samp_freq(indio_dev, val, spi_freq);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
}
return -EINVAL;
@@ -827,11 +1121,15 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = iio_hw_consumer_enable(adc->hwc);
if (ret < 0) {
dev_err(&indio_dev->dev,
"%s: IIO enable failed (channel %d)\n",
__func__, chan->channel);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
@@ -840,8 +1138,10 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
dev_err(&indio_dev->dev,
"%s: Conversion failed (channel %d)\n",
__func__, chan->channel);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
+ iio_device_release_direct_mode(indio_dev);
return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
@@ -858,15 +1158,25 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int stm32_dfsdm_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ return stm32_dfsdm_get_jextsel(indio_dev, trig) < 0 ? -EINVAL : 0;
+}
+
static const struct iio_info stm32_dfsdm_info_audio = {
.hwfifo_set_watermark = stm32_dfsdm_set_watermark,
.read_raw = stm32_dfsdm_read_raw,
.write_raw = stm32_dfsdm_write_raw,
+ .update_scan_mode = stm32_dfsdm_update_scan_mode,
};
static const struct iio_info stm32_dfsdm_info_adc = {
+ .hwfifo_set_watermark = stm32_dfsdm_set_watermark,
.read_raw = stm32_dfsdm_read_raw,
.write_raw = stm32_dfsdm_write_raw,
+ .update_scan_mode = stm32_dfsdm_update_scan_mode,
+ .validate_trigger = stm32_dfsdm_validate_trigger,
};
static irqreturn_t stm32_dfsdm_irq(int irq, void *arg)
@@ -926,12 +1236,6 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- struct dma_slave_config config = {
- .src_addr = (dma_addr_t)adc->dfsdm->phys_base +
- DFSDM_RDATAR(adc->fl_id),
- .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- };
- int ret;
adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
if (!adc->dma_chan)
@@ -941,23 +1245,14 @@ static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
DFSDM_DMA_BUFFER_SIZE,
&adc->dma_buf, GFP_KERNEL);
if (!adc->rx_buf) {
- ret = -ENOMEM;
- goto err_release;
+ dma_release_channel(adc->dma_chan);
+ return -ENOMEM;
}
- ret = dmaengine_slave_config(adc->dma_chan, &config);
- if (ret)
- goto err_free;
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+ indio_dev->setup_ops = &stm32_dfsdm_buffer_setup_ops;
return 0;
-
-err_free:
- dma_free_coherent(adc->dma_chan->device->dev, DFSDM_DMA_BUFFER_SIZE,
- adc->rx_buf, adc->dma_buf);
-err_release:
- dma_release_channel(adc->dma_chan);
-
- return ret;
}
static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
@@ -978,7 +1273,8 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
* IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling
*/
ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
- ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
if (adc->dev_data->type == DFSDM_AUDIO) {
ch->scan_type.sign = 's';
@@ -1000,9 +1296,6 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
struct stm32_dfsdm_channel *d_ch;
int ret;
- indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
- indio_dev->setup_ops = &stm32_dfsdm_buffer_setup_ops;
-
ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL);
if (!ch)
return -ENOMEM;
@@ -1070,6 +1363,25 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
init_completion(&adc->completion);
+ /* Optionally request DMA */
+ if (stm32_dfsdm_dma_request(indio_dev)) {
+ dev_dbg(&indio_dev->dev, "No DMA support\n");
+ return 0;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev,
+ &iio_pollfunc_store_time,
+ &stm32_dfsdm_adc_trigger_handler,
+ &stm32_dfsdm_buffer_setup_ops);
+ if (ret) {
+ stm32_dfsdm_dma_release(indio_dev);
+ dev_err(&indio_dev->dev, "buffer setup failed\n");
+ return ret;
+ }
+
+ /* lptimer/timer hardware triggers */
+ indio_dev->modes |= INDIO_HARDWARE_TRIGGERED;
+
return 0;
}
@@ -1117,7 +1429,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
iio->dev.parent = dev;
iio->dev.of_node = np;
- iio->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+ iio->modes = INDIO_DIRECT_MODE;
platform_set_drvdata(pdev, adc);
@@ -1203,10 +1515,48 @@ static int stm32_dfsdm_adc_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused stm32_dfsdm_adc_suspend(struct device *dev)
+{
+ struct stm32_dfsdm_adc *adc = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+ if (iio_buffer_enabled(indio_dev))
+ __stm32_dfsdm_predisable(indio_dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_dfsdm_adc_resume(struct device *dev)
+{
+ struct stm32_dfsdm_adc *adc = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ const struct iio_chan_spec *chan;
+ struct stm32_dfsdm_channel *ch;
+ int i, ret;
+
+ /* restore channels configuration */
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ chan = indio_dev->channels + i;
+ ch = &adc->dfsdm->ch_list[chan->channel];
+ ret = stm32_dfsdm_chan_configure(adc->dfsdm, ch);
+ if (ret)
+ return ret;
+ }
+
+ if (iio_buffer_enabled(indio_dev))
+ __stm32_dfsdm_postenable(indio_dev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_dfsdm_adc_pm_ops,
+ stm32_dfsdm_adc_suspend, stm32_dfsdm_adc_resume);
+
static struct platform_driver stm32_dfsdm_adc_driver = {
.driver = {
.name = "stm32-dfsdm-adc",
.of_match_table = stm32_dfsdm_adc_match,
+ .pm = &stm32_dfsdm_adc_pm_ops,
},
.probe = stm32_dfsdm_adc_probe,
.remove = stm32_dfsdm_adc_remove,
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index bf089f5d6225..0a4d3746d21c 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -12,6 +12,8 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -90,6 +92,36 @@ struct dfsdm_priv {
struct clk *aclk; /* audio clock */
};
+static inline struct dfsdm_priv *to_stm32_dfsdm_priv(struct stm32_dfsdm *dfsdm)
+{
+ return container_of(dfsdm, struct dfsdm_priv, dfsdm);
+}
+
+static int stm32_dfsdm_clk_prepare_enable(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = to_stm32_dfsdm_priv(dfsdm);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret || !priv->aclk)
+ return ret;
+
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret)
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static void stm32_dfsdm_clk_disable_unprepare(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = to_stm32_dfsdm_priv(dfsdm);
+
+ if (priv->aclk)
+ clk_disable_unprepare(priv->aclk);
+ clk_disable_unprepare(priv->clk);
+}
+
/**
* stm32_dfsdm_start_dfsdm - start global dfsdm interface.
*
@@ -98,24 +130,17 @@ struct dfsdm_priv {
*/
int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
{
- struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ struct dfsdm_priv *priv = to_stm32_dfsdm_priv(dfsdm);
struct device *dev = &priv->pdev->dev;
unsigned int clk_div = priv->spi_clk_out_div, clk_src;
int ret;
if (atomic_inc_return(&priv->n_active_ch) == 1) {
- ret = clk_prepare_enable(priv->clk);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "Failed to start clock\n");
+ pm_runtime_put_noidle(dev);
goto error_ret;
}
- if (priv->aclk) {
- ret = clk_prepare_enable(priv->aclk);
- if (ret < 0) {
- dev_err(dev, "Failed to start audio clock\n");
- goto disable_clk;
- }
- }
/* select clock source, e.g. 0 for "dfsdm" or 1 for "audio" */
clk_src = priv->aclk ? 1 : 0;
@@ -123,21 +148,21 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
DFSDM_CHCFGR1_CKOUTSRC_MASK,
DFSDM_CHCFGR1_CKOUTSRC(clk_src));
if (ret < 0)
- goto disable_aclk;
+ goto pm_put;
/* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
DFSDM_CHCFGR1_CKOUTDIV_MASK,
DFSDM_CHCFGR1_CKOUTDIV(clk_div));
if (ret < 0)
- goto disable_aclk;
+ goto pm_put;
/* Global enable of DFSDM interface */
ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
DFSDM_CHCFGR1_DFSDMEN_MASK,
DFSDM_CHCFGR1_DFSDMEN(1));
if (ret < 0)
- goto disable_aclk;
+ goto pm_put;
}
dev_dbg(dev, "%s: n_active_ch %d\n", __func__,
@@ -145,11 +170,8 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
return 0;
-disable_aclk:
- clk_disable_unprepare(priv->aclk);
-disable_clk:
- clk_disable_unprepare(priv->clk);
-
+pm_put:
+ pm_runtime_put_sync(dev);
error_ret:
atomic_dec(&priv->n_active_ch);
@@ -165,7 +187,7 @@ EXPORT_SYMBOL_GPL(stm32_dfsdm_start_dfsdm);
*/
int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm)
{
- struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ struct dfsdm_priv *priv = to_stm32_dfsdm_priv(dfsdm);
int ret;
if (atomic_dec_and_test(&priv->n_active_ch)) {
@@ -183,9 +205,7 @@ int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm)
if (ret < 0)
return ret;
- clk_disable_unprepare(priv->clk);
- if (priv->aclk)
- clk_disable_unprepare(priv->aclk);
+ pm_runtime_put_sync(&priv->pdev->dev);
}
dev_dbg(&priv->pdev->dev, "%s: n_active_ch %d\n", __func__,
atomic_read(&priv->n_active_ch));
@@ -199,7 +219,7 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
{
struct device_node *node = pdev->dev.of_node;
struct resource *res;
- unsigned long clk_freq;
+ unsigned long clk_freq, divider;
unsigned int spi_freq, rem;
int ret;
@@ -243,13 +263,20 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
return 0;
}
- priv->spi_clk_out_div = div_u64_rem(clk_freq, spi_freq, &rem) - 1;
- if (!priv->spi_clk_out_div) {
- /* spi_clk_out_div == 0 means ckout is OFF */
+ divider = div_u64_rem(clk_freq, spi_freq, &rem);
+ /* Round up divider when ckout isn't precise, not to exceed spi_freq */
+ if (rem)
+ divider++;
+
+ /* programmable divider is in range of [2:256] */
+ if (divider < 2 || divider > 256) {
dev_err(&pdev->dev, "spi-max-frequency not achievable\n");
return -EINVAL;
}
- priv->dfsdm.spi_master_freq = spi_freq;
+
+ /* SPI clock output divider is: divider = CKOUTDIV + 1 */
+ priv->spi_clk_out_div = divider - 1;
+ priv->dfsdm.spi_master_freq = clk_freq / (priv->spi_clk_out_div + 1);
if (rem) {
dev_warn(&pdev->dev, "SPI clock not accurate\n");
@@ -318,14 +345,111 @@ static int stm32_dfsdm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dfsdm);
- return devm_of_platform_populate(&pdev->dev);
+ ret = stm32_dfsdm_clk_prepare_enable(dfsdm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to start clock\n");
+ return ret;
+ }
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret)
+ goto pm_put;
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+pm_put:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ stm32_dfsdm_clk_disable_unprepare(dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_core_remove(struct platform_device *pdev)
+{
+ struct stm32_dfsdm *dfsdm = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+ of_platform_depopulate(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ stm32_dfsdm_clk_disable_unprepare(dfsdm);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_dfsdm_core_suspend(struct device *dev)
+{
+ struct stm32_dfsdm *dfsdm = dev_get_drvdata(dev);
+ struct dfsdm_priv *priv = to_stm32_dfsdm_priv(dfsdm);
+ int ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ /* Balance devm_regmap_init_mmio_clk() clk_prepare() */
+ clk_unprepare(priv->clk);
+
+ return pinctrl_pm_select_sleep_state(dev);
}
+static int __maybe_unused stm32_dfsdm_core_resume(struct device *dev)
+{
+ struct stm32_dfsdm *dfsdm = dev_get_drvdata(dev);
+ struct dfsdm_priv *priv = to_stm32_dfsdm_priv(dfsdm);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare(priv->clk);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_resume(dev);
+}
+
+static int __maybe_unused stm32_dfsdm_core_runtime_suspend(struct device *dev)
+{
+ struct stm32_dfsdm *dfsdm = dev_get_drvdata(dev);
+
+ stm32_dfsdm_clk_disable_unprepare(dfsdm);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_dfsdm_core_runtime_resume(struct device *dev)
+{
+ struct stm32_dfsdm *dfsdm = dev_get_drvdata(dev);
+
+ return stm32_dfsdm_clk_prepare_enable(dfsdm);
+}
+
+static const struct dev_pm_ops stm32_dfsdm_core_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dfsdm_core_suspend,
+ stm32_dfsdm_core_resume)
+ SET_RUNTIME_PM_OPS(stm32_dfsdm_core_runtime_suspend,
+ stm32_dfsdm_core_runtime_resume,
+ NULL)
+};
+
static struct platform_driver stm32_dfsdm_driver = {
.probe = stm32_dfsdm_probe,
+ .remove = stm32_dfsdm_core_remove,
.driver = {
.name = "stm32-dfsdm",
.of_match_table = stm32_dfsdm_of_match,
+ .pm = &stm32_dfsdm_core_pm_ops,
},
};
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index 37f4b74a5d32..7921f827c6ec 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -184,9 +184,6 @@ static irqreturn_t stmpe_adc_isr(int irq, void *dev_id)
struct stmpe_adc *info = (struct stmpe_adc *)dev_id;
u16 data;
- if (info->channel > STMPE_TEMP_CHANNEL)
- return IRQ_NONE;
-
if (info->channel <= STMPE_ADC_LAST_NR) {
int int_sta;
@@ -205,6 +202,8 @@ static irqreturn_t stmpe_adc_isr(int irq, void *dev_id)
/* Read value */
stmpe_block_read(info->stmpe, STMPE_REG_TEMP_DATA, 2,
(u8 *) &data);
+ } else {
+ return IRQ_NONE;
}
info->value = (u32) be16_to_cpu(data);
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 0ad63592cc3c..2e66e4d586ff 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -36,12 +37,15 @@
*/
#define TI_ADS7950_VA_MV_ACPI_DEFAULT 5000
+#define TI_ADS7950_CR_GPIO BIT(14)
#define TI_ADS7950_CR_MANUAL BIT(12)
#define TI_ADS7950_CR_WRITE BIT(11)
#define TI_ADS7950_CR_CHAN(ch) ((ch) << 7)
#define TI_ADS7950_CR_RANGE_5V BIT(6)
+#define TI_ADS7950_CR_GPIO_DATA BIT(4)
#define TI_ADS7950_MAX_CHAN 16
+#define TI_ADS7950_NUM_GPIOS 4
#define TI_ADS7950_TIMESTAMP_SIZE (sizeof(int64_t) / sizeof(__be16))
@@ -49,6 +53,16 @@
#define TI_ADS7950_EXTRACT(val, dec, bits) \
(((val) >> (dec)) & ((1 << (bits)) - 1))
+#define TI_ADS7950_MAN_CMD(cmd) (TI_ADS7950_CR_MANUAL | (cmd))
+#define TI_ADS7950_GPIO_CMD(cmd) (TI_ADS7950_CR_GPIO | (cmd))
+
+/* Manual mode configuration */
+#define TI_ADS7950_MAN_CMD_SETTINGS(st) \
+ (TI_ADS7950_MAN_CMD(TI_ADS7950_CR_WRITE | st->cmd_settings_bitmask))
+/* GPIO mode configuration */
+#define TI_ADS7950_GPIO_CMD_SETTINGS(st) \
+ (TI_ADS7950_GPIO_CMD(st->gpio_cmd_settings_bitmask))
+
struct ti_ads7950_state {
struct spi_device *spi;
struct spi_transfer ring_xfer;
@@ -56,10 +70,36 @@ struct ti_ads7950_state {
struct spi_message ring_msg;
struct spi_message scan_single_msg;
+ /* Lock to protect the spi xfer buffers */
+ struct mutex slock;
+ struct gpio_chip chip;
+
struct regulator *reg;
unsigned int vref_mv;
- unsigned int settings;
+ /*
+ * Bitmask of lower 7 bits used for configuration
+ * These bits only can be written when TI_ADS7950_CR_WRITE
+ * is set, otherwise it retains its original state.
+ * [0-3] GPIO signal
+ * [4] Set following frame to return GPIO signal values
+ * [5] Powers down device
+ * [6] Sets Vref range1(2.5v) or range2(5v)
+ *
+ * Bits present on Manual/Auto1/Auto2 commands
+ */
+ unsigned int cmd_settings_bitmask;
+
+ /*
+ * Bitmask of GPIO command
+ * [0-3] GPIO direction
+ * [4-6] Different GPIO alarm mode configurations
+ * [7] GPIO 2 as device range input
+ * [8] GPIO 3 as device power down input
+ * [9] Reset all registers
+ * [10-11] N/A
+ */
+ unsigned int gpio_cmd_settings_bitmask;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -248,7 +288,7 @@ static int ti_ads7950_update_scan_mode(struct iio_dev *indio_dev,
len = 0;
for_each_set_bit(i, active_scan_mask, indio_dev->num_channels) {
- cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(i) | st->settings;
+ cmd = TI_ADS7950_MAN_CMD(TI_ADS7950_CR_CHAN(i));
st->tx_buf[len++] = cmd;
}
@@ -268,6 +308,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
struct ti_ads7950_state *st = iio_priv(indio_dev);
int ret;
+ mutex_lock(&st->slock);
ret = spi_sync(st->spi, &st->ring_msg);
if (ret < 0)
goto out;
@@ -276,6 +317,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
iio_get_time_ns(indio_dev));
out:
+ mutex_unlock(&st->slock);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -286,9 +328,8 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
struct ti_ads7950_state *st = iio_priv(indio_dev);
int ret, cmd;
- mutex_lock(&indio_dev->mlock);
-
- cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(ch) | st->settings;
+ mutex_lock(&st->slock);
+ cmd = TI_ADS7950_MAN_CMD(TI_ADS7950_CR_CHAN(ch));
st->single_tx = cmd;
ret = spi_sync(st->spi, &st->scan_single_msg);
@@ -298,7 +339,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
ret = st->single_rx;
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->slock);
return ret;
}
@@ -317,7 +358,7 @@ static int ti_ads7950_get_range(struct ti_ads7950_state *st)
vref /= 1000;
}
- if (st->settings & TI_ADS7950_CR_RANGE_5V)
+ if (st->cmd_settings_bitmask & TI_ADS7950_CR_RANGE_5V)
vref *= 2;
return vref;
@@ -362,6 +403,132 @@ static const struct iio_info ti_ads7950_info = {
.update_scan_mode = ti_ads7950_update_scan_mode,
};
+static void ti_ads7950_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct ti_ads7950_state *st = gpiochip_get_data(chip);
+
+ mutex_lock(&st->slock);
+
+ if (value)
+ st->cmd_settings_bitmask |= BIT(offset);
+ else
+ st->cmd_settings_bitmask &= ~BIT(offset);
+
+ st->single_tx = TI_ADS7950_MAN_CMD_SETTINGS(st);
+ spi_sync(st->spi, &st->scan_single_msg);
+
+ mutex_unlock(&st->slock);
+}
+
+static int ti_ads7950_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ti_ads7950_state *st = gpiochip_get_data(chip);
+ int ret;
+
+ mutex_lock(&st->slock);
+
+ /* If set as output, return the output */
+ if (st->gpio_cmd_settings_bitmask & BIT(offset)) {
+ ret = st->cmd_settings_bitmask & BIT(offset);
+ goto out;
+ }
+
+ /* GPIO data bit sets SDO bits 12-15 to GPIO input */
+ st->cmd_settings_bitmask |= TI_ADS7950_CR_GPIO_DATA;
+ st->single_tx = TI_ADS7950_MAN_CMD_SETTINGS(st);
+ ret = spi_sync(st->spi, &st->scan_single_msg);
+ if (ret)
+ goto out;
+
+ ret = ((st->single_rx >> 12) & BIT(offset)) ? 1 : 0;
+
+ /* Revert back to original settings */
+ st->cmd_settings_bitmask &= ~TI_ADS7950_CR_GPIO_DATA;
+ st->single_tx = TI_ADS7950_MAN_CMD_SETTINGS(st);
+ ret = spi_sync(st->spi, &st->scan_single_msg);
+ if (ret)
+ goto out;
+
+out:
+ mutex_unlock(&st->slock);
+
+ return ret;
+}
+
+static int ti_ads7950_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct ti_ads7950_state *st = gpiochip_get_data(chip);
+
+ /* Bitmask is inverted from GPIO framework 0=input/1=output */
+ return !(st->gpio_cmd_settings_bitmask & BIT(offset));
+}
+
+static int _ti_ads7950_set_direction(struct gpio_chip *chip, int offset,
+ int input)
+{
+ struct ti_ads7950_state *st = gpiochip_get_data(chip);
+ int ret = 0;
+
+ mutex_lock(&st->slock);
+
+ /* Only change direction if needed */
+ if (input && (st->gpio_cmd_settings_bitmask & BIT(offset)))
+ st->gpio_cmd_settings_bitmask &= ~BIT(offset);
+ else if (!input && !(st->gpio_cmd_settings_bitmask & BIT(offset)))
+ st->gpio_cmd_settings_bitmask |= BIT(offset);
+ else
+ goto out;
+
+ st->single_tx = TI_ADS7950_GPIO_CMD_SETTINGS(st);
+ ret = spi_sync(st->spi, &st->scan_single_msg);
+
+out:
+ mutex_unlock(&st->slock);
+
+ return ret;
+}
+
+static int ti_ads7950_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return _ti_ads7950_set_direction(chip, offset, 1);
+}
+
+static int ti_ads7950_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ ti_ads7950_set(chip, offset, value);
+
+ return _ti_ads7950_set_direction(chip, offset, 0);
+}
+
+static int ti_ads7950_init_hw(struct ti_ads7950_state *st)
+{
+ int ret = 0;
+
+ mutex_lock(&st->slock);
+
+ /* Settings for Manual/Auto1/Auto2 commands */
+ /* Default to 5v ref */
+ st->cmd_settings_bitmask = TI_ADS7950_CR_RANGE_5V;
+ st->single_tx = TI_ADS7950_MAN_CMD_SETTINGS(st);
+ ret = spi_sync(st->spi, &st->scan_single_msg);
+ if (ret)
+ goto out;
+
+ /* Settings for GPIO command */
+ st->gpio_cmd_settings_bitmask = 0x0;
+ st->single_tx = TI_ADS7950_GPIO_CMD_SETTINGS(st);
+ ret = spi_sync(st->spi, &st->scan_single_msg);
+
+out:
+ mutex_unlock(&st->slock);
+
+ return ret;
+}
+
static int ti_ads7950_probe(struct spi_device *spi)
{
struct ti_ads7950_state *st;
@@ -386,7 +553,6 @@ static int ti_ads7950_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
- st->settings = TI_ADS7950_CR_MANUAL | TI_ADS7950_CR_RANGE_5V;
info = &ti_ads7950_chip_info[spi_get_device_id(spi)->driver_data];
@@ -432,16 +598,19 @@ static int ti_ads7950_probe(struct spi_device *spi)
if (ACPI_COMPANION(&spi->dev))
st->vref_mv = TI_ADS7950_VA_MV_ACPI_DEFAULT;
+ mutex_init(&st->slock);
+
st->reg = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(st->reg)) {
dev_err(&spi->dev, "Failed get get regulator \"vref\"\n");
- return PTR_ERR(st->reg);
+ ret = PTR_ERR(st->reg);
+ goto error_destroy_mutex;
}
ret = regulator_enable(st->reg);
if (ret) {
dev_err(&spi->dev, "Failed to enable regulator \"vref\"\n");
- return ret;
+ goto error_destroy_mutex;
}
ret = iio_triggered_buffer_setup(indio_dev, NULL,
@@ -451,18 +620,46 @@ static int ti_ads7950_probe(struct spi_device *spi)
goto error_disable_reg;
}
+ ret = ti_ads7950_init_hw(st);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to init adc chip\n");
+ goto error_cleanup_ring;
+ }
+
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&spi->dev, "Failed to register iio device\n");
goto error_cleanup_ring;
}
+ /* Add GPIO chip */
+ st->chip.label = dev_name(&st->spi->dev);
+ st->chip.parent = &st->spi->dev;
+ st->chip.owner = THIS_MODULE;
+ st->chip.base = -1;
+ st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
+ st->chip.get_direction = ti_ads7950_get_direction;
+ st->chip.direction_input = ti_ads7950_direction_input;
+ st->chip.direction_output = ti_ads7950_direction_output;
+ st->chip.get = ti_ads7950_get;
+ st->chip.set = ti_ads7950_set;
+
+ ret = gpiochip_add_data(&st->chip, st);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to init GPIOs\n");
+ goto error_iio_device;
+ }
+
return 0;
+error_iio_device:
+ iio_device_unregister(indio_dev);
error_cleanup_ring:
iio_triggered_buffer_cleanup(indio_dev);
error_disable_reg:
regulator_disable(st->reg);
+error_destroy_mutex:
+ mutex_destroy(&st->slock);
return ret;
}
@@ -472,9 +669,11 @@ static int ti_ads7950_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ti_ads7950_state *st = iio_priv(indio_dev);
+ gpiochip_remove(&st->chip);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(st->reg);
+ mutex_destroy(&st->slock);
return 0;
}
diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c
new file mode 100644
index 000000000000..9a460807d46d
--- /dev/null
+++ b/drivers/iio/adc/ti-ads8344.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADS8344 16-bit 8-Channel ADC driver
+ *
+ * Author: Gregory CLEMENT <gregory.clement@bootlin.com>
+ *
+ * Datasheet: http://www.ti.com/lit/ds/symlink/ads8344.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#define ADS8344_START BIT(7)
+#define ADS8344_SINGLE_END BIT(2)
+#define ADS8344_CHANNEL(channel) ((channel) << 4)
+#define ADS8344_CLOCK_INTERNAL 0x2 /* PD1 = 1 and PD0 = 0 */
+
+struct ads8344 {
+ struct spi_device *spi;
+ struct regulator *reg;
+ /*
+ * Lock protecting access to adc->tx_buff and rx_buff,
+ * especially from concurrent read on sysfs file.
+ */
+ struct mutex lock;
+
+ u8 tx_buf ____cacheline_aligned;
+ u16 rx_buf;
+};
+
+#define ADS8344_VOLTAGE_CHANNEL(chan, si) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ }
+
+#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (chan1), \
+ .channel2 = (chan2), \
+ .differential = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ }
+
+static const struct iio_chan_spec ads8344_channels[] = {
+ ADS8344_VOLTAGE_CHANNEL(0, 0),
+ ADS8344_VOLTAGE_CHANNEL(1, 4),
+ ADS8344_VOLTAGE_CHANNEL(2, 1),
+ ADS8344_VOLTAGE_CHANNEL(3, 5),
+ ADS8344_VOLTAGE_CHANNEL(4, 2),
+ ADS8344_VOLTAGE_CHANNEL(5, 6),
+ ADS8344_VOLTAGE_CHANNEL(6, 3),
+ ADS8344_VOLTAGE_CHANNEL(7, 7),
+ ADS8344_VOLTAGE_CHANNEL_DIFF(0, 1, 8),
+ ADS8344_VOLTAGE_CHANNEL_DIFF(2, 3, 9),
+ ADS8344_VOLTAGE_CHANNEL_DIFF(4, 5, 10),
+ ADS8344_VOLTAGE_CHANNEL_DIFF(6, 7, 11),
+ ADS8344_VOLTAGE_CHANNEL_DIFF(1, 0, 12),
+ ADS8344_VOLTAGE_CHANNEL_DIFF(3, 2, 13),
+ ADS8344_VOLTAGE_CHANNEL_DIFF(5, 4, 14),
+ ADS8344_VOLTAGE_CHANNEL_DIFF(7, 6, 15),
+};
+
+static int ads8344_adc_conversion(struct ads8344 *adc, int channel,
+ bool differential)
+{
+ struct spi_device *spi = adc->spi;
+ int ret;
+
+ adc->tx_buf = ADS8344_START;
+ if (!differential)
+ adc->tx_buf |= ADS8344_SINGLE_END;
+ adc->tx_buf |= ADS8344_CHANNEL(channel);
+ adc->tx_buf |= ADS8344_CLOCK_INTERNAL;
+
+ ret = spi_write(spi, &adc->tx_buf, 1);
+ if (ret)
+ return ret;
+
+ udelay(9);
+
+ ret = spi_read(spi, &adc->rx_buf, 2);
+ if (ret)
+ return ret;
+
+ return adc->rx_buf;
+}
+
+static int ads8344_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int *value,
+ int *shift, long mask)
+{
+ struct ads8344 *adc = iio_priv(iio);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ *value = ads8344_adc_conversion(adc, channel->scan_index,
+ channel->differential);
+ mutex_unlock(&adc->lock);
+ if (*value < 0)
+ return *value;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *value = regulator_get_voltage(adc->reg);
+ if (*value < 0)
+ return *value;
+
+ /* convert regulator output voltage to mV */
+ *value /= 1000;
+ *shift = 16;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ads8344_info = {
+ .read_raw = ads8344_read_raw,
+};
+
+static int ads8344_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ads8344 *adc;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+ mutex_init(&adc->lock);
+
+ indio_dev->name = dev_name(&spi->dev);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
+ indio_dev->info = &ads8344_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ads8344_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads8344_channels);
+
+ adc->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(adc->reg))
+ return PTR_ERR(adc->reg);
+
+ ret = regulator_enable(adc->reg);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ regulator_disable(adc->reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ads8344_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ads8344 *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(adc->reg);
+
+ return 0;
+}
+
+static const struct of_device_id ads8344_of_match[] = {
+ { .compatible = "ti,ads8344", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ads8344_of_match);
+
+static struct spi_driver ads8344_driver = {
+ .driver = {
+ .name = "ads8344",
+ .of_match_table = ads8344_of_match,
+ },
+ .probe = ads8344_probe,
+ .remove = ads8344_remove,
+};
+module_spi_driver(ads8344_driver);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@bootlin.com>");
+MODULE_DESCRIPTION("ADS8344 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 8b4568edd5cb..f9461070a74a 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -523,6 +523,6 @@ static struct spi_driver ads8688_driver = {
};
module_spi_driver(ads8688_driver);
-MODULE_AUTHOR("Sean Nyekjaer <sean.nyekjaer@prevas.dk>");
+MODULE_AUTHOR("Sean Nyekjaer <sean@geanix.dk>");
MODULE_DESCRIPTION("Texas Instruments ADS8688 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index b13c61539d46..6401ca7a9a20 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1292,6 +1292,7 @@ static int xadc_probe(struct platform_device *pdev)
err_free_irq:
free_irq(xadc->irq, indio_dev);
+ cancel_delayed_work_sync(&xadc->zynq_unmask_work);
err_clk_disable_unprepare:
clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
@@ -1321,8 +1322,8 @@ static int xadc_remove(struct platform_device *pdev)
iio_triggered_buffer_cleanup(indio_dev);
}
free_irq(xadc->irq, indio_dev);
+ cancel_delayed_work_sync(&xadc->zynq_unmask_work);
clk_disable_unprepare(xadc->clk);
- cancel_delayed_work(&xadc->zynq_unmask_work);
kfree(xadc->data);
kfree(indio_dev->channels);
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index ea63c838eeae..df21e7dbec40 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -36,7 +36,8 @@ static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
static void iio_buffer_cb_release(struct iio_buffer *buffer)
{
struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
- kfree(cb_buff->buffer.scan_mask);
+
+ bitmap_free(cb_buff->buffer.scan_mask);
kfree(cb_buff);
}
@@ -74,9 +75,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
}
cb_buff->indio_dev = cb_buff->channels[0].indio_dev;
- cb_buff->buffer.scan_mask
- = kcalloc(BITS_TO_LONGS(cb_buff->indio_dev->masklength),
- sizeof(long), GFP_KERNEL);
+ cb_buff->buffer.scan_mask = bitmap_zalloc(cb_buff->indio_dev->masklength,
+ GFP_KERNEL);
if (cb_buff->buffer.scan_mask == NULL) {
ret = -ENOMEM;
goto error_release_channels;
@@ -95,7 +95,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
return cb_buff;
error_free_scan_mask:
- kfree(cb_buff->buffer.scan_mask);
+ bitmap_free(cb_buff->buffer.scan_mask);
error_release_channels:
iio_channel_release_all(cb_buff->channels);
error_free_cb_buff:
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index d5d146e9e372..5dc11a359444 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -12,14 +12,14 @@ config ATLAS_PH_SENSOR
select IIO_TRIGGERED_BUFFER
select IRQ_WORK
help
- Say Y here to build I2C interface support for the following
- Atlas Scientific OEM SM sensors:
+ Say Y here to build I2C interface support for the following
+ Atlas Scientific OEM SM sensors:
* pH SM sensor
* EC SM sensor
* ORP SM sensor
- To compile this driver as module, choose M here: the
- module will be called atlas-ph-sensor.
+ To compile this driver as module, choose M here: the
+ module will be called atlas-ph-sensor.
config BME680
tristate "Bosch Sensortec BME680 sensor driver"
@@ -47,8 +47,8 @@ config BME680_SPI
config CCS811
tristate "AMS CCS811 VOC sensor"
depends on I2C
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here to build I2C interface support for the AMS
CCS811 VOC (Volatile Organic Compounds) sensor
@@ -64,6 +64,7 @@ config IAQCORE
config PMS7003
tristate "Plantower PMS7003 particulate matter sensor"
depends on SERIAL_DEV_BUS
+ select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Plantower PMS7003 particulate
matter sensor.
@@ -71,6 +72,19 @@ config PMS7003
To compile this driver as a module, choose M here: the module will
be called pms7003.
+config SENSIRION_SGP30
+ tristate "Sensirion SGPxx gas sensors"
+ depends on I2C
+ select CRC8
+ help
+ Say Y here to build I2C interface support for the following
+ Sensirion SGP gas sensors:
+ * SGP30 gas sensor
+ * SGPC3 low power gas sensor
+
+ To compile this driver as module, choose M here: the
+ module will be called sgp30.
+
config SPS30
tristate "SPS30 particulate matter sensor"
depends on I2C
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index 0ae89b87e2d6..4edc5d21cb9f 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -2,11 +2,9 @@
#ifndef BME680_H_
#define BME680_H_
-#define BME680_REG_CHIP_I2C_ID 0xD0
-#define BME680_REG_CHIP_SPI_ID 0x50
+#define BME680_REG_CHIP_ID 0xD0
#define BME680_CHIP_ID_VAL 0x61
-#define BME680_REG_SOFT_RESET_I2C 0xE0
-#define BME680_REG_SOFT_RESET_SPI 0x60
+#define BME680_REG_SOFT_RESET 0xE0
#define BME680_CMD_SOFTRESET 0xB6
#define BME680_REG_STATUS 0x73
#define BME680_SPI_MEM_PAGE_BIT BIT(4)
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 70c1fe4366f4..ccde4c65ff93 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -63,9 +63,23 @@ struct bme680_data {
s32 t_fine;
};
+static const struct regmap_range bme680_volatile_ranges[] = {
+ regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
+ regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
+ regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
+};
+
+static const struct regmap_access_table bme680_volatile_table = {
+ .yes_ranges = bme680_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges),
+};
+
const struct regmap_config bme680_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0xef,
+ .volatile_table = &bme680_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
};
EXPORT_SYMBOL(bme680_regmap_config);
@@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
s64 var1, var2, var3;
s16 calc_temp;
+ /* If the calibration is invalid, attempt to reload it */
+ if (!calib->par_t2)
+ bme680_read_calib(data, calib);
+
var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
var2 = (var1 * calib->par_t2) >> 11;
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
@@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data)
return ret;
}
-static int bme680_read_temp(struct bme680_data *data,
- int *val, int *val2)
+static int bme680_read_temp(struct bme680_data *data, int *val)
{
struct device *dev = regmap_get_device(data->regmap);
int ret;
@@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data,
* compensate_press/compensate_humid to get compensated
* pressure/humidity readings.
*/
- if (val && val2) {
- *val = comp_temp;
- *val2 = 100;
- return IIO_VAL_FRACTIONAL;
+ if (val) {
+ *val = comp_temp * 10; /* Centidegrees to millidegrees */
+ return IIO_VAL_INT;
}
return ret;
@@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data,
s32 adc_press;
/* Read and compensate temperature to get a reading of t_fine */
- ret = bme680_read_temp(data, NULL, NULL);
+ ret = bme680_read_temp(data, NULL);
if (ret < 0)
return ret;
@@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data,
u32 comp_humidity;
/* Read and compensate temperature to get a reading of t_fine */
- ret = bme680_read_temp(data, NULL, NULL);
+ ret = bme680_read_temp(data, NULL);
if (ret < 0)
return ret;
@@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_TEMP:
- return bme680_read_temp(data, val, val2);
+ return bme680_read_temp(data, val);
case IIO_PRESSURE:
return bme680_read_press(data, val, val2);
case IIO_HUMIDITYRELATIVE:
@@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
{
struct iio_dev *indio_dev;
struct bme680_data *data;
+ unsigned int val;
int ret;
+ ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
+ BME680_CMD_SOFTRESET);
+ if (ret < 0) {
+ dev_err(dev, "Failed to reset chip\n");
+ return ret;
+ }
+
+ ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
+ if (ret < 0) {
+ dev_err(dev, "Error reading chip ID\n");
+ return ret;
+ }
+
+ if (val != BME680_CHIP_ID_VAL) {
+ dev_err(dev, "Wrong chip ID, got %x expected %x\n",
+ val, BME680_CHIP_ID_VAL);
+ return -ENODEV;
+ }
+
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index b2f805b6b36a..de9c9e3d23ea 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
{
struct regmap *regmap;
const char *name = NULL;
- unsigned int val;
- int ret;
regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
if (IS_ERR(regmap)) {
@@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
return PTR_ERR(regmap);
}
- ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
- BME680_CMD_SOFTRESET);
- if (ret < 0) {
- dev_err(&client->dev, "Failed to reset chip\n");
- return ret;
- }
-
- ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
- if (ret < 0) {
- dev_err(&client->dev, "Error reading I2C chip ID\n");
- return ret;
- }
-
- if (val != BME680_CHIP_ID_VAL) {
- dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
- val, BME680_CHIP_ID_VAL);
- return -ENODEV;
- }
-
if (id)
name = id->name;
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index d0b7bdd3f066..3b838068a7e4 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -12,28 +12,93 @@
#include "bme680.h"
+struct bme680_spi_bus_context {
+ struct spi_device *spi;
+ u8 current_page;
+};
+
+/*
+ * In SPI mode there are only 7 address bits, a "page" register determines
+ * which part of the 8-bit range is active. This function looks at the address
+ * and writes the page selection bit if needed
+ */
+static int bme680_regmap_spi_select_page(
+ struct bme680_spi_bus_context *ctx, u8 reg)
+{
+ struct spi_device *spi = ctx->spi;
+ int ret;
+ u8 buf[2];
+ u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
+
+ if (page == ctx->current_page)
+ return 0;
+
+ /*
+ * Data sheet claims we're only allowed to change bit 4, so we must do
+ * a read-modify-write on each and every page select
+ */
+ buf[0] = BME680_REG_STATUS;
+ ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to set page %u\n", page);
+ return ret;
+ }
+
+ buf[0] = BME680_REG_STATUS;
+ if (page)
+ buf[1] |= BME680_SPI_MEM_PAGE_BIT;
+ else
+ buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
+
+ ret = spi_write(spi, buf, 2);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to set page %u\n", page);
+ return ret;
+ }
+
+ ctx->current_page = page;
+
+ return 0;
+}
+
static int bme680_regmap_spi_write(void *context, const void *data,
size_t count)
{
- struct spi_device *spi = context;
+ struct bme680_spi_bus_context *ctx = context;
+ struct spi_device *spi = ctx->spi;
+ int ret;
u8 buf[2];
memcpy(buf, data, 2);
+
+ ret = bme680_regmap_spi_select_page(ctx, buf[0]);
+ if (ret)
+ return ret;
+
/*
* The SPI register address (= full register address without bit 7)
* and the write command (bit7 = RW = '0')
*/
buf[0] &= ~0x80;
- return spi_write_then_read(spi, buf, 2, NULL, 0);
+ return spi_write(spi, buf, 2);
}
static int bme680_regmap_spi_read(void *context, const void *reg,
size_t reg_size, void *val, size_t val_size)
{
- struct spi_device *spi = context;
+ struct bme680_spi_bus_context *ctx = context;
+ struct spi_device *spi = ctx->spi;
+ int ret;
+ u8 addr = *(const u8 *)reg;
+
+ ret = bme680_regmap_spi_select_page(ctx, addr);
+ if (ret)
+ return ret;
- return spi_write_then_read(spi, reg, reg_size, val, val_size);
+ addr |= 0x80; /* bit7 = RW = '1' */
+
+ return spi_write_then_read(spi, &addr, 1, val, val_size);
}
static struct regmap_bus bme680_regmap_bus = {
@@ -46,8 +111,8 @@ static struct regmap_bus bme680_regmap_bus = {
static int bme680_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
+ struct bme680_spi_bus_context *bus_context;
struct regmap *regmap;
- unsigned int val;
int ret;
spi->bits_per_word = 8;
@@ -57,45 +122,21 @@ static int bme680_spi_probe(struct spi_device *spi)
return ret;
}
+ bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
+ if (!bus_context)
+ return -ENOMEM;
+
+ bus_context->spi = spi;
+ bus_context->current_page = 0xff; /* Undefined on warm boot */
+
regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
- &spi->dev, &bme680_regmap_config);
+ bus_context, &bme680_regmap_config);
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Failed to register spi regmap %d\n",
(int)PTR_ERR(regmap));
return PTR_ERR(regmap);
}
- ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
- BME680_CMD_SOFTRESET);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to reset chip\n");
- return ret;
- }
-
- /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
- ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
- if (ret < 0) {
- dev_err(&spi->dev, "Error reading SPI chip ID\n");
- return ret;
- }
-
- if (val != BME680_CHIP_ID_VAL) {
- dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
- val, BME680_CHIP_ID_VAL);
- return -ENODEV;
- }
- /*
- * select Page 1 of spi_mem_page to enable access to
- * to registers from address 0x00 to 0x7F.
- */
- ret = regmap_write_bits(regmap, BME680_REG_STATUS,
- BME680_SPI_MEM_PAGE_BIT,
- BME680_SPI_MEM_PAGE_1_VAL);
- if (ret < 0) {
- dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
- return ret;
- }
-
return bme680_core_probe(&spi->dev, regmap, id->name);
}
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index db8e7b2327b3..23c9ab252470 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -321,7 +321,12 @@ static int pms7003_probe(struct serdev_device *serdev)
}
static const struct of_device_id pms7003_of_match[] = {
+ { .compatible = "plantower,pms1003" },
+ { .compatible = "plantower,pms3003" },
+ { .compatible = "plantower,pms5003" },
+ { .compatible = "plantower,pms6003" },
{ .compatible = "plantower,pms7003" },
+ { .compatible = "plantower,pmsa003" },
{ }
};
MODULE_DEVICE_TABLE(of, pms7003_of_match);
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 89cb0066a6e0..17af4e0fd5f8 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -1,22 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cros_ec_sensors - Driver for Chrome OS Embedded Controller sensors.
*
* Copyright (C) 2016 Google, Inc
*
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This driver uses the cros-ec interface to communicate with the Chrome OS
* EC about sensors data. Data access is presented through iio sysfs.
*/
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/buffer.h>
#include <linux/iio/common/cros_ec_sensors_core.h>
@@ -30,7 +21,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#define CROS_EC_SENSORS_MAX_CHANNELS 4
@@ -103,9 +93,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
* Do not use IIO_DEGREE_TO_RAD to avoid precision
* loss. Round to the nearest integer.
*/
- *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
- *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
- ret = IIO_VAL_FRACTIONAL;
+ *val = 0;
+ *val2 = div_s64(val64 * 3141592653ULL,
+ 180 << (CROS_EC_SENSOR_BITS - 1));
+ ret = IIO_VAL_INT_PLUS_NANO;
break;
case MOTIONSENSE_TYPE_MAG:
/*
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 414cc43c287e..719a0df5aeeb 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cros_ec_sensors_core - Common function for Chrome OS EC sensor driver.
*
* Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/delay.h>
@@ -25,7 +17,6 @@
#include <linux/mfd/cros_ec_commands.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/platform_device.h>
static char *cros_ec_loc[] = {
@@ -269,6 +260,17 @@ static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
return 0;
}
+/**
+ * cros_ec_sensors_read_lpc() - read acceleration data from EC shared memory.
+ * @indio_dev: pointer to IIO device.
+ * @scan_mask: bitmap of the sensor indices to scan.
+ * @data: location to store data.
+ *
+ * Note: this is the safe function for reading the EC data. It guarantees
+ * that the data sampled was not modified by the EC while being read.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data)
{
diff --git a/drivers/iio/common/ms_sensors/Kconfig b/drivers/iio/common/ms_sensors/Kconfig
index b28a92b97cf9..89398d0afc0d 100644
--- a/drivers/iio/common/ms_sensors/Kconfig
+++ b/drivers/iio/common/ms_sensors/Kconfig
@@ -3,4 +3,4 @@
#
config IIO_MS_SENSORS_I2C
- tristate
+ tristate
diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c
index 645f2e3975db..e38f704d88b7 100644
--- a/drivers/iio/common/ssp_sensors/ssp_iio.c
+++ b/drivers/iio/common/ssp_sensors/ssp_iio.c
@@ -81,7 +81,7 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
unsigned int len, int64_t timestamp)
{
__le32 time;
- int64_t calculated_time;
+ int64_t calculated_time = 0;
struct ssp_sensor_data *spd = iio_priv(indio_dev);
if (indio_dev->scan_bytes == 0)
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
deleted file mode 100644
index 92be8d0f7735..000000000000
--- a/drivers/iio/counter/104-quad-8.c
+++ /dev/null
@@ -1,631 +0,0 @@
-/*
- * IIO driver for the ACCES 104-QUAD-8
- * Copyright (C) 2016 William Breathitt Gray
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/types.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/isa.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-
-#define QUAD8_EXTENT 32
-
-static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
-static unsigned int num_quad8;
-module_param_array(base, uint, &num_quad8, 0);
-MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
-
-#define QUAD8_NUM_COUNTERS 8
-
-/**
- * struct quad8_iio - IIO device private data structure
- * @preset: array of preset values
- * @count_mode: array of count mode configurations
- * @quadrature_mode: array of quadrature mode configurations
- * @quadrature_scale: array of quadrature mode scale configurations
- * @ab_enable: array of A and B inputs enable configurations
- * @preset_enable: array of set_to_preset_on_index attribute configurations
- * @synchronous_mode: array of index function synchronous mode configurations
- * @index_polarity: array of index function polarity configurations
- * @base: base port address of the IIO device
- */
-struct quad8_iio {
- unsigned int preset[QUAD8_NUM_COUNTERS];
- unsigned int count_mode[QUAD8_NUM_COUNTERS];
- unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
- unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
- unsigned int ab_enable[QUAD8_NUM_COUNTERS];
- unsigned int preset_enable[QUAD8_NUM_COUNTERS];
- unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
- unsigned int index_polarity[QUAD8_NUM_COUNTERS];
- unsigned int base;
-};
-
-#define QUAD8_REG_CHAN_OP 0x11
-#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
-/* Borrow Toggle flip-flop */
-#define QUAD8_FLAG_BT BIT(0)
-/* Carry Toggle flip-flop */
-#define QUAD8_FLAG_CT BIT(1)
-/* Error flag */
-#define QUAD8_FLAG_E BIT(4)
-/* Up/Down flag */
-#define QUAD8_FLAG_UD BIT(5)
-/* Reset and Load Signal Decoders */
-#define QUAD8_CTR_RLD 0x00
-/* Counter Mode Register */
-#define QUAD8_CTR_CMR 0x20
-/* Input / Output Control Register */
-#define QUAD8_CTR_IOR 0x40
-/* Index Control Register */
-#define QUAD8_CTR_IDR 0x60
-/* Reset Byte Pointer (three byte data pointer) */
-#define QUAD8_RLD_RESET_BP 0x01
-/* Reset Counter */
-#define QUAD8_RLD_RESET_CNTR 0x02
-/* Reset Borrow Toggle, Carry Toggle, Compare Toggle, and Sign flags */
-#define QUAD8_RLD_RESET_FLAGS 0x04
-/* Reset Error flag */
-#define QUAD8_RLD_RESET_E 0x06
-/* Preset Register to Counter */
-#define QUAD8_RLD_PRESET_CNTR 0x08
-/* Transfer Counter to Output Latch */
-#define QUAD8_RLD_CNTR_OUT 0x10
-#define QUAD8_CHAN_OP_ENABLE_COUNTERS 0x00
-#define QUAD8_CHAN_OP_RESET_COUNTERS 0x01
-
-static int quad8_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int *val, int *val2, long mask)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel;
- unsigned int flags;
- unsigned int borrow;
- unsigned int carry;
- int i;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- if (chan->type == IIO_INDEX) {
- *val = !!(inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
- & BIT(chan->channel));
- return IIO_VAL_INT;
- }
-
- flags = inb(base_offset + 1);
- borrow = flags & QUAD8_FLAG_BT;
- carry = !!(flags & QUAD8_FLAG_CT);
-
- /* Borrow XOR Carry effectively doubles count range */
- *val = (borrow ^ carry) << 24;
-
- /* Reset Byte Pointer; transfer Counter to Output Latch */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
- base_offset + 1);
-
- for (i = 0; i < 3; i++)
- *val |= (unsigned int)inb(base_offset) << (8 * i);
-
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_ENABLE:
- *val = priv->ab_enable[chan->channel];
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = 1;
- *val2 = priv->quadrature_scale[chan->channel];
- return IIO_VAL_FRACTIONAL_LOG2;
- }
-
- return -EINVAL;
-}
-
-static int quad8_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val, int val2, long mask)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel;
- int i;
- unsigned int ior_cfg;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- if (chan->type == IIO_INDEX)
- return -EINVAL;
-
- /* Only 24-bit values are supported */
- if ((unsigned int)val > 0xFFFFFF)
- return -EINVAL;
-
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
-
- /* Counter can only be set via Preset Register */
- for (i = 0; i < 3; i++)
- outb(val >> (8 * i), base_offset);
-
- /* Transfer Preset Register to Counter */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
-
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
-
- /* Set Preset Register back to original value */
- val = priv->preset[chan->channel];
- for (i = 0; i < 3; i++)
- outb(val >> (8 * i), base_offset);
-
- /* Reset Borrow, Carry, Compare, and Sign flags */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
- /* Reset Error flag */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
-
- return 0;
- case IIO_CHAN_INFO_ENABLE:
- /* only boolean values accepted */
- if (val < 0 || val > 1)
- return -EINVAL;
-
- priv->ab_enable[chan->channel] = val;
-
- ior_cfg = val | priv->preset_enable[chan->channel] << 1;
-
- /* Load I/O control configuration */
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
-
- return 0;
- case IIO_CHAN_INFO_SCALE:
- /* Quadrature scaling only available in quadrature mode */
- if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
- return -EINVAL;
-
- /* Only three gain states (1, 0.5, 0.25) */
- if (val == 1 && !val2)
- priv->quadrature_scale[chan->channel] = 0;
- else if (!val)
- switch (val2) {
- case 500000:
- priv->quadrature_scale[chan->channel] = 1;
- break;
- case 250000:
- priv->quadrature_scale[chan->channel] = 2;
- break;
- default:
- return -EINVAL;
- }
- else
- return -EINVAL;
-
- return 0;
- }
-
- return -EINVAL;
-}
-
-static const struct iio_info quad8_info = {
- .read_raw = quad8_read_raw,
- .write_raw = quad8_write_raw
-};
-
-static ssize_t quad8_read_preset(struct iio_dev *indio_dev, uintptr_t private,
- const struct iio_chan_spec *chan, char *buf)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset[chan->channel]);
-}
-
-static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
- const struct iio_chan_spec *chan, const char *buf, size_t len)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel;
- unsigned int preset;
- int ret;
- int i;
-
- ret = kstrtouint(buf, 0, &preset);
- if (ret)
- return ret;
-
- /* Only 24-bit values are supported */
- if (preset > 0xFFFFFF)
- return -EINVAL;
-
- priv->preset[chan->channel] = preset;
-
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
-
- /* Set Preset Register */
- for (i = 0; i < 3; i++)
- outb(preset >> (8 * i), base_offset);
-
- return len;
-}
-
-static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
- uintptr_t private, const struct iio_chan_spec *chan, char *buf)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n",
- !priv->preset_enable[chan->channel]);
-}
-
-static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
- uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
- size_t len)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel + 1;
- bool preset_enable;
- int ret;
- unsigned int ior_cfg;
-
- ret = kstrtobool(buf, &preset_enable);
- if (ret)
- return ret;
-
- /* Preset enable is active low in Input/Output Control register */
- preset_enable = !preset_enable;
-
- priv->preset_enable[chan->channel] = preset_enable;
-
- ior_cfg = priv->ab_enable[chan->channel] |
- (unsigned int)preset_enable << 1;
-
- /* Load I/O control configuration to Input / Output Control Register */
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
-
- return len;
-}
-
-static const char *const quad8_noise_error_states[] = {
- "No excessive noise is present at the count inputs",
- "Excessive noise is present at the count inputs"
-};
-
-static int quad8_get_noise_error(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel + 1;
-
- return !!(inb(base_offset) & QUAD8_FLAG_E);
-}
-
-static const struct iio_enum quad8_noise_error_enum = {
- .items = quad8_noise_error_states,
- .num_items = ARRAY_SIZE(quad8_noise_error_states),
- .get = quad8_get_noise_error
-};
-
-static const char *const quad8_count_direction_states[] = {
- "down",
- "up"
-};
-
-static int quad8_get_count_direction(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel + 1;
-
- return !!(inb(base_offset) & QUAD8_FLAG_UD);
-}
-
-static const struct iio_enum quad8_count_direction_enum = {
- .items = quad8_count_direction_states,
- .num_items = ARRAY_SIZE(quad8_count_direction_states),
- .get = quad8_get_count_direction
-};
-
-static const char *const quad8_count_modes[] = {
- "normal",
- "range limit",
- "non-recycle",
- "modulo-n"
-};
-
-static int quad8_set_count_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int count_mode)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- unsigned int mode_cfg = count_mode << 1;
- const int base_offset = priv->base + 2 * chan->channel + 1;
-
- priv->count_mode[chan->channel] = count_mode;
-
- /* Add quadrature mode configuration */
- if (priv->quadrature_mode[chan->channel])
- mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
-
- /* Load mode configuration to Counter Mode Register */
- outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
-
- return 0;
-}
-
-static int quad8_get_count_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return priv->count_mode[chan->channel];
-}
-
-static const struct iio_enum quad8_count_mode_enum = {
- .items = quad8_count_modes,
- .num_items = ARRAY_SIZE(quad8_count_modes),
- .set = quad8_set_count_mode,
- .get = quad8_get_count_mode
-};
-
-static const char *const quad8_synchronous_modes[] = {
- "non-synchronous",
- "synchronous"
-};
-
-static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int synchronous_mode)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const unsigned int idr_cfg = synchronous_mode |
- priv->index_polarity[chan->channel] << 1;
- const int base_offset = priv->base + 2 * chan->channel + 1;
-
- /* Index function must be non-synchronous in non-quadrature mode */
- if (synchronous_mode && !priv->quadrature_mode[chan->channel])
- return -EINVAL;
-
- priv->synchronous_mode[chan->channel] = synchronous_mode;
-
- /* Load Index Control configuration to Index Control Register */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
-
- return 0;
-}
-
-static int quad8_get_synchronous_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return priv->synchronous_mode[chan->channel];
-}
-
-static const struct iio_enum quad8_synchronous_mode_enum = {
- .items = quad8_synchronous_modes,
- .num_items = ARRAY_SIZE(quad8_synchronous_modes),
- .set = quad8_set_synchronous_mode,
- .get = quad8_get_synchronous_mode
-};
-
-static const char *const quad8_quadrature_modes[] = {
- "non-quadrature",
- "quadrature"
-};
-
-static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int quadrature_mode)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
- const int base_offset = priv->base + 2 * chan->channel + 1;
-
- if (quadrature_mode)
- mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
- else {
- /* Quadrature scaling only available in quadrature mode */
- priv->quadrature_scale[chan->channel] = 0;
-
- /* Synchronous function not supported in non-quadrature mode */
- if (priv->synchronous_mode[chan->channel])
- quad8_set_synchronous_mode(indio_dev, chan, 0);
- }
-
- priv->quadrature_mode[chan->channel] = quadrature_mode;
-
- /* Load mode configuration to Counter Mode Register */
- outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
-
- return 0;
-}
-
-static int quad8_get_quadrature_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return priv->quadrature_mode[chan->channel];
-}
-
-static const struct iio_enum quad8_quadrature_mode_enum = {
- .items = quad8_quadrature_modes,
- .num_items = ARRAY_SIZE(quad8_quadrature_modes),
- .set = quad8_set_quadrature_mode,
- .get = quad8_get_quadrature_mode
-};
-
-static const char *const quad8_index_polarity_modes[] = {
- "negative",
- "positive"
-};
-
-static int quad8_set_index_polarity(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int index_polarity)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
- index_polarity << 1;
- const int base_offset = priv->base + 2 * chan->channel + 1;
-
- priv->index_polarity[chan->channel] = index_polarity;
-
- /* Load Index Control configuration to Index Control Register */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
-
- return 0;
-}
-
-static int quad8_get_index_polarity(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return priv->index_polarity[chan->channel];
-}
-
-static const struct iio_enum quad8_index_polarity_enum = {
- .items = quad8_index_polarity_modes,
- .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
- .set = quad8_set_index_polarity,
- .get = quad8_get_index_polarity
-};
-
-static const struct iio_chan_spec_ext_info quad8_count_ext_info[] = {
- {
- .name = "preset",
- .shared = IIO_SEPARATE,
- .read = quad8_read_preset,
- .write = quad8_write_preset
- },
- {
- .name = "set_to_preset_on_index",
- .shared = IIO_SEPARATE,
- .read = quad8_read_set_to_preset_on_index,
- .write = quad8_write_set_to_preset_on_index
- },
- IIO_ENUM("noise_error", IIO_SEPARATE, &quad8_noise_error_enum),
- IIO_ENUM_AVAILABLE("noise_error", &quad8_noise_error_enum),
- IIO_ENUM("count_direction", IIO_SEPARATE, &quad8_count_direction_enum),
- IIO_ENUM_AVAILABLE("count_direction", &quad8_count_direction_enum),
- IIO_ENUM("count_mode", IIO_SEPARATE, &quad8_count_mode_enum),
- IIO_ENUM_AVAILABLE("count_mode", &quad8_count_mode_enum),
- IIO_ENUM("quadrature_mode", IIO_SEPARATE, &quad8_quadrature_mode_enum),
- IIO_ENUM_AVAILABLE("quadrature_mode", &quad8_quadrature_mode_enum),
- {}
-};
-
-static const struct iio_chan_spec_ext_info quad8_index_ext_info[] = {
- IIO_ENUM("synchronous_mode", IIO_SEPARATE,
- &quad8_synchronous_mode_enum),
- IIO_ENUM_AVAILABLE("synchronous_mode", &quad8_synchronous_mode_enum),
- IIO_ENUM("index_polarity", IIO_SEPARATE, &quad8_index_polarity_enum),
- IIO_ENUM_AVAILABLE("index_polarity", &quad8_index_polarity_enum),
- {}
-};
-
-#define QUAD8_COUNT_CHAN(_chan) { \
- .type = IIO_COUNT, \
- .channel = (_chan), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_SCALE), \
- .ext_info = quad8_count_ext_info, \
- .indexed = 1 \
-}
-
-#define QUAD8_INDEX_CHAN(_chan) { \
- .type = IIO_INDEX, \
- .channel = (_chan), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .ext_info = quad8_index_ext_info, \
- .indexed = 1 \
-}
-
-static const struct iio_chan_spec quad8_channels[] = {
- QUAD8_COUNT_CHAN(0), QUAD8_INDEX_CHAN(0),
- QUAD8_COUNT_CHAN(1), QUAD8_INDEX_CHAN(1),
- QUAD8_COUNT_CHAN(2), QUAD8_INDEX_CHAN(2),
- QUAD8_COUNT_CHAN(3), QUAD8_INDEX_CHAN(3),
- QUAD8_COUNT_CHAN(4), QUAD8_INDEX_CHAN(4),
- QUAD8_COUNT_CHAN(5), QUAD8_INDEX_CHAN(5),
- QUAD8_COUNT_CHAN(6), QUAD8_INDEX_CHAN(6),
- QUAD8_COUNT_CHAN(7), QUAD8_INDEX_CHAN(7)
-};
-
-static int quad8_probe(struct device *dev, unsigned int id)
-{
- struct iio_dev *indio_dev;
- struct quad8_iio *priv;
- int i, j;
- unsigned int base_offset;
-
- indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
- if (!indio_dev)
- return -ENOMEM;
-
- if (!devm_request_region(dev, base[id], QUAD8_EXTENT,
- dev_name(dev))) {
- dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
- base[id], base[id] + QUAD8_EXTENT);
- return -EBUSY;
- }
-
- indio_dev->info = &quad8_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
- indio_dev->channels = quad8_channels;
- indio_dev->name = dev_name(dev);
- indio_dev->dev.parent = dev;
-
- priv = iio_priv(indio_dev);
- priv->base = base[id];
-
- /* Reset all counters and disable interrupt function */
- outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
- /* Set initial configuration for all counters */
- for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
- base_offset = base[id] + 2 * i;
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
- /* Reset Preset Register */
- for (j = 0; j < 3; j++)
- outb(0x00, base_offset);
- /* Reset Borrow, Carry, Compare, and Sign flags */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
- /* Reset Error flag */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
- /* Binary encoding; Normal count; non-quadrature mode */
- outb(QUAD8_CTR_CMR, base_offset + 1);
- /* Disable A and B inputs; preset on index; FLG1 as Carry */
- outb(QUAD8_CTR_IOR, base_offset + 1);
- /* Disable index function; negative index polarity */
- outb(QUAD8_CTR_IDR, base_offset + 1);
- }
- /* Enable all counters */
- outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
-
- return devm_iio_device_register(dev, indio_dev);
-}
-
-static struct isa_driver quad8_driver = {
- .probe = quad8_probe,
- .driver = {
- .name = "104-quad-8"
- }
-};
-
-module_isa_driver(quad8_driver, num_quad8);
-
-MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
-MODULE_DESCRIPTION("ACCES 104-QUAD-8 IIO driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/counter/Kconfig b/drivers/iio/counter/Kconfig
deleted file mode 100644
index bf1e559ad7cd..000000000000
--- a/drivers/iio/counter/Kconfig
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-# Counter devices
-#
-# When adding new entries keep the list in alphabetical order
-
-menu "Counters"
-
-config 104_QUAD_8
- tristate "ACCES 104-QUAD-8 driver"
- depends on PC104 && X86
- select ISA_BUS_API
- help
- Say yes here to build support for the ACCES 104-QUAD-8 quadrature
- encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
-
- Performing a write to a counter's IIO_CHAN_INFO_RAW sets the counter and
- also clears the counter's respective error flag. Although the counters
- have a 25-bit range, only the lower 24 bits may be set, either directly
- or via a counter's preset attribute. Interrupts are not supported by
- this driver.
-
- The base port addresses for the devices may be configured via the base
- array module parameter.
-
-config STM32_LPTIMER_CNT
- tristate "STM32 LP Timer encoder counter driver"
- depends on MFD_STM32_LPTIMER || COMPILE_TEST
- help
- Select this option to enable STM32 Low-Power Timer quadrature encoder
- and counter driver.
-
- To compile this driver as a module, choose M here: the
- module will be called stm32-lptimer-cnt.
-endmenu
diff --git a/drivers/iio/counter/Makefile b/drivers/iio/counter/Makefile
deleted file mode 100644
index 1b9a896eb488..000000000000
--- a/drivers/iio/counter/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for IIO counter devices
-#
-
-# When adding new entries keep the list in alphabetical order
-
-obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
-obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
diff --git a/drivers/iio/counter/stm32-lptimer-cnt.c b/drivers/iio/counter/stm32-lptimer-cnt.c
deleted file mode 100644
index 42fb8ba67090..000000000000
--- a/drivers/iio/counter/stm32-lptimer-cnt.c
+++ /dev/null
@@ -1,382 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * STM32 Low-Power Timer Encoder and Counter driver
- *
- * Copyright (C) STMicroelectronics 2017
- *
- * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
- * Inspired by 104-quad-8 and stm32-timer-trigger drivers.
- *
- */
-
-#include <linux/bitfield.h>
-#include <linux/iio/iio.h>
-#include <linux/mfd/stm32-lptimer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-struct stm32_lptim_cnt {
- struct device *dev;
- struct regmap *regmap;
- struct clk *clk;
- u32 preset;
- u32 polarity;
- u32 quadrature_mode;
-};
-
-static int stm32_lptim_is_enabled(struct stm32_lptim_cnt *priv)
-{
- u32 val;
- int ret;
-
- ret = regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
- if (ret)
- return ret;
-
- return FIELD_GET(STM32_LPTIM_ENABLE, val);
-}
-
-static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
- int enable)
-{
- int ret;
- u32 val;
-
- val = FIELD_PREP(STM32_LPTIM_ENABLE, enable);
- ret = regmap_write(priv->regmap, STM32_LPTIM_CR, val);
- if (ret)
- return ret;
-
- if (!enable) {
- clk_disable(priv->clk);
- return 0;
- }
-
- /* LP timer must be enabled before writing CMP & ARR */
- ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->preset);
- if (ret)
- return ret;
-
- ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
- if (ret)
- return ret;
-
- /* ensure CMP & ARR registers are properly written */
- ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
- (val & STM32_LPTIM_CMPOK_ARROK),
- 100, 1000);
- if (ret)
- return ret;
-
- ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
- STM32_LPTIM_CMPOKCF_ARROKCF);
- if (ret)
- return ret;
-
- ret = clk_enable(priv->clk);
- if (ret) {
- regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
- return ret;
- }
-
- /* Start LP timer in continuous mode */
- return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
- STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
-}
-
-static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
-{
- u32 mask = STM32_LPTIM_ENC | STM32_LPTIM_COUNTMODE |
- STM32_LPTIM_CKPOL | STM32_LPTIM_PRESC;
- u32 val;
-
- /* Setup LP timer encoder/counter and polarity, without prescaler */
- if (priv->quadrature_mode)
- val = enable ? STM32_LPTIM_ENC : 0;
- else
- val = enable ? STM32_LPTIM_COUNTMODE : 0;
- val |= FIELD_PREP(STM32_LPTIM_CKPOL, enable ? priv->polarity : 0);
-
- return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val);
-}
-
-static int stm32_lptim_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
- int ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_ENABLE:
- if (val < 0 || val > 1)
- return -EINVAL;
-
- /* Check nobody uses the timer, or already disabled/enabled */
- ret = stm32_lptim_is_enabled(priv);
- if ((ret < 0) || (!ret && !val))
- return ret;
- if (val && ret)
- return -EBUSY;
-
- ret = stm32_lptim_setup(priv, val);
- if (ret)
- return ret;
- return stm32_lptim_set_enable_state(priv, val);
-
- default:
- return -EINVAL;
- }
-}
-
-static int stm32_lptim_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
-{
- struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
- u32 dat;
- int ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &dat);
- if (ret)
- return ret;
- *val = dat;
- return IIO_VAL_INT;
-
- case IIO_CHAN_INFO_ENABLE:
- ret = stm32_lptim_is_enabled(priv);
- if (ret < 0)
- return ret;
- *val = ret;
- return IIO_VAL_INT;
-
- case IIO_CHAN_INFO_SCALE:
- /* Non-quadrature mode: scale = 1 */
- *val = 1;
- *val2 = 0;
- if (priv->quadrature_mode) {
- /*
- * Quadrature encoder mode:
- * - both edges, quarter cycle, scale is 0.25
- * - either rising/falling edge scale is 0.5
- */
- if (priv->polarity > 1)
- *val2 = 2;
- else
- *val2 = 1;
- }
- return IIO_VAL_FRACTIONAL_LOG2;
-
- default:
- return -EINVAL;
- }
-}
-
-static const struct iio_info stm32_lptim_cnt_iio_info = {
- .read_raw = stm32_lptim_read_raw,
- .write_raw = stm32_lptim_write_raw,
-};
-
-static const char *const stm32_lptim_quadrature_modes[] = {
- "non-quadrature",
- "quadrature",
-};
-
-static int stm32_lptim_get_quadrature_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
-
- return priv->quadrature_mode;
-}
-
-static int stm32_lptim_set_quadrature_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- unsigned int type)
-{
- struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
-
- if (stm32_lptim_is_enabled(priv))
- return -EBUSY;
-
- priv->quadrature_mode = type;
-
- return 0;
-}
-
-static const struct iio_enum stm32_lptim_quadrature_mode_en = {
- .items = stm32_lptim_quadrature_modes,
- .num_items = ARRAY_SIZE(stm32_lptim_quadrature_modes),
- .get = stm32_lptim_get_quadrature_mode,
- .set = stm32_lptim_set_quadrature_mode,
-};
-
-static const char * const stm32_lptim_cnt_polarity[] = {
- "rising-edge", "falling-edge", "both-edges",
-};
-
-static int stm32_lptim_cnt_get_polarity(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
-
- return priv->polarity;
-}
-
-static int stm32_lptim_cnt_set_polarity(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- unsigned int type)
-{
- struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
-
- if (stm32_lptim_is_enabled(priv))
- return -EBUSY;
-
- priv->polarity = type;
-
- return 0;
-}
-
-static const struct iio_enum stm32_lptim_cnt_polarity_en = {
- .items = stm32_lptim_cnt_polarity,
- .num_items = ARRAY_SIZE(stm32_lptim_cnt_polarity),
- .get = stm32_lptim_cnt_get_polarity,
- .set = stm32_lptim_cnt_set_polarity,
-};
-
-static ssize_t stm32_lptim_cnt_get_preset(struct iio_dev *indio_dev,
- uintptr_t private,
- const struct iio_chan_spec *chan,
- char *buf)
-{
- struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset);
-}
-
-static ssize_t stm32_lptim_cnt_set_preset(struct iio_dev *indio_dev,
- uintptr_t private,
- const struct iio_chan_spec *chan,
- const char *buf, size_t len)
-{
- struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
- int ret;
-
- if (stm32_lptim_is_enabled(priv))
- return -EBUSY;
-
- ret = kstrtouint(buf, 0, &priv->preset);
- if (ret)
- return ret;
-
- if (priv->preset > STM32_LPTIM_MAX_ARR)
- return -EINVAL;
-
- return len;
-}
-
-/* LP timer with encoder */
-static const struct iio_chan_spec_ext_info stm32_lptim_enc_ext_info[] = {
- {
- .name = "preset",
- .shared = IIO_SEPARATE,
- .read = stm32_lptim_cnt_get_preset,
- .write = stm32_lptim_cnt_set_preset,
- },
- IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en),
- IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en),
- IIO_ENUM("quadrature_mode", IIO_SEPARATE,
- &stm32_lptim_quadrature_mode_en),
- IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_lptim_quadrature_mode_en),
- {}
-};
-
-static const struct iio_chan_spec stm32_lptim_enc_channels = {
- .type = IIO_COUNT,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_ENABLE) |
- BIT(IIO_CHAN_INFO_SCALE),
- .ext_info = stm32_lptim_enc_ext_info,
- .indexed = 1,
-};
-
-/* LP timer without encoder (counter only) */
-static const struct iio_chan_spec_ext_info stm32_lptim_cnt_ext_info[] = {
- {
- .name = "preset",
- .shared = IIO_SEPARATE,
- .read = stm32_lptim_cnt_get_preset,
- .write = stm32_lptim_cnt_set_preset,
- },
- IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en),
- IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en),
- {}
-};
-
-static const struct iio_chan_spec stm32_lptim_cnt_channels = {
- .type = IIO_COUNT,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_ENABLE) |
- BIT(IIO_CHAN_INFO_SCALE),
- .ext_info = stm32_lptim_cnt_ext_info,
- .indexed = 1,
-};
-
-static int stm32_lptim_cnt_probe(struct platform_device *pdev)
-{
- struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
- struct stm32_lptim_cnt *priv;
- struct iio_dev *indio_dev;
-
- if (IS_ERR_OR_NULL(ddata))
- return -EINVAL;
-
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
- if (!indio_dev)
- return -ENOMEM;
-
- priv = iio_priv(indio_dev);
- priv->dev = &pdev->dev;
- priv->regmap = ddata->regmap;
- priv->clk = ddata->clk;
- priv->preset = STM32_LPTIM_MAX_ARR;
-
- indio_dev->name = dev_name(&pdev->dev);
- indio_dev->dev.parent = &pdev->dev;
- indio_dev->dev.of_node = pdev->dev.of_node;
- indio_dev->info = &stm32_lptim_cnt_iio_info;
- if (ddata->has_encoder)
- indio_dev->channels = &stm32_lptim_enc_channels;
- else
- indio_dev->channels = &stm32_lptim_cnt_channels;
- indio_dev->num_channels = 1;
-
- platform_set_drvdata(pdev, priv);
-
- return devm_iio_device_register(&pdev->dev, indio_dev);
-}
-
-static const struct of_device_id stm32_lptim_cnt_of_match[] = {
- { .compatible = "st,stm32-lptimer-counter", },
- {},
-};
-MODULE_DEVICE_TABLE(of, stm32_lptim_cnt_of_match);
-
-static struct platform_driver stm32_lptim_cnt_driver = {
- .probe = stm32_lptim_cnt_probe,
- .driver = {
- .name = "stm32-lptimer-counter",
- .of_match_table = stm32_lptim_cnt_of_match,
- },
-};
-module_platform_driver(stm32_lptim_cnt_driver);
-
-MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
-MODULE_ALIAS("platform:stm32-lptimer-counter");
-MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 2f98cb2a3b96..6c3ba143839b 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -112,6 +112,8 @@ struct ad5064_state {
bool use_internal_vref;
ad5064_write_func write;
+ /* Lock used to maintain consistency between cached and dev state */
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -248,11 +250,11 @@ static int ad5064_set_powerdown_mode(struct iio_dev *indio_dev,
struct ad5064_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->pwr_down_mode[chan->channel] = mode + 1;
ret = ad5064_sync_powerdown_mode(st, chan);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -291,11 +293,11 @@ static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->pwr_down[chan->channel] = pwr_down;
ret = ad5064_sync_powerdown_mode(st, chan);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -349,12 +351,12 @@ static int ad5064_write_raw(struct iio_dev *indio_dev,
if (val >= (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5064_write(st, AD5064_CMD_WRITE_INPUT_N_UPDATE_N,
chan->address, val, chan->scan_type.shift);
if (ret == 0)
st->dac_cache[chan->channel] = val;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -856,6 +858,7 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
return -ENOMEM;
st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
dev_set_drvdata(dev, indio_dev);
st->chip_info = &ad5064_chip_info_tbl[type];
diff --git a/drivers/iio/dac/ad5758.c b/drivers/iio/dac/ad5758.c
index 2bdf1b0aee06..a513c70faefa 100644
--- a/drivers/iio/dac/ad5758.c
+++ b/drivers/iio/dac/ad5758.c
@@ -72,8 +72,6 @@
#define AD5758_DCDC_CONFIG1_DCDC_VPROG_MODE(x) (((x) & 0x1F) << 0)
#define AD5758_DCDC_CONFIG1_DCDC_MODE_MSK GENMASK(6, 5)
#define AD5758_DCDC_CONFIG1_DCDC_MODE_MODE(x) (((x) & 0x3) << 5)
-#define AD5758_DCDC_CONFIG1_PROT_SW_EN_MSK BIT(7)
-#define AD5758_DCDC_CONFIG1_PROT_SW_EN_MODE(x) (((x) & 0x1) << 7)
/* AD5758_DCDC_CONFIG2 */
#define AD5758_DCDC_CONFIG2_ILIMIT_MSK GENMASK(3, 1)
@@ -84,6 +82,10 @@
/* AD5758_DIGITAL_DIAG_RESULTS */
#define AD5758_CAL_MEM_UNREFRESHED_MSK BIT(15)
+/* AD5758_ADC_CONFIG */
+#define AD5758_ADC_CONFIG_PPC_BUF_EN(x) (((x) & 0x1) << 11)
+#define AD5758_ADC_CONFIG_PPC_BUF_MSK BIT(11)
+
#define AD5758_WR_FLAG_MSK(x) (0x80 | ((x) & 0x1F))
#define AD5758_FULL_SCALE_MICRO 65535000000ULL
@@ -315,6 +317,18 @@ static int ad5758_set_dc_dc_conv_mode(struct ad5758_state *st,
{
int ret;
+ /*
+ * The ENABLE_PPC_BUFFERS bit must be set prior to enabling PPC current
+ * mode.
+ */
+ if (mode == AD5758_DCDC_MODE_PPC_CURRENT) {
+ ret = ad5758_spi_write_mask(st, AD5758_ADC_CONFIG,
+ AD5758_ADC_CONFIG_PPC_BUF_MSK,
+ AD5758_ADC_CONFIG_PPC_BUF_EN(1));
+ if (ret < 0)
+ return ret;
+ }
+
ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG1,
AD5758_DCDC_CONFIG1_DCDC_MODE_MSK,
AD5758_DCDC_CONFIG1_DCDC_MODE_MODE(mode));
@@ -444,23 +458,6 @@ static int ad5758_set_out_range(struct ad5758_state *st, int range)
AD5758_CAL_MEM_UNREFRESHED_MSK);
}
-static int ad5758_fault_prot_switch_en(struct ad5758_state *st, bool enable)
-{
- int ret;
-
- ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG1,
- AD5758_DCDC_CONFIG1_PROT_SW_EN_MSK,
- AD5758_DCDC_CONFIG1_PROT_SW_EN_MODE(enable));
- if (ret < 0)
- return ret;
- /*
- * Poll the BUSY_3WI bit in the DCDC_CONFIG2 register until it is 0.
- * This allows the 3-wire interface communication to complete.
- */
- return ad5758_wait_for_task_complete(st, AD5758_DCDC_CONFIG2,
- AD5758_DCDC_CONFIG2_BUSY_3WI_MSK);
-}
-
static int ad5758_internal_buffers_en(struct ad5758_state *st, bool enable)
{
int ret;
@@ -585,8 +582,8 @@ static ssize_t ad5758_write_powerdown(struct iio_dev *indio_dev,
{
struct ad5758_state *st = iio_priv(indio_dev);
bool pwr_down;
- unsigned int dcdc_config1_mode, dc_dc_mode, dac_config_mode, val;
- unsigned long int dcdc_config1_msk, dac_config_msk;
+ unsigned int dc_dc_mode, dac_config_mode, val;
+ unsigned long int dac_config_msk;
int ret;
ret = kstrtobool(buf, &pwr_down);
@@ -602,17 +599,6 @@ static ssize_t ad5758_write_powerdown(struct iio_dev *indio_dev,
val = 1;
}
- dcdc_config1_mode = AD5758_DCDC_CONFIG1_DCDC_MODE_MODE(dc_dc_mode) |
- AD5758_DCDC_CONFIG1_PROT_SW_EN_MODE(val);
- dcdc_config1_msk = AD5758_DCDC_CONFIG1_DCDC_MODE_MSK |
- AD5758_DCDC_CONFIG1_PROT_SW_EN_MSK;
-
- ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG1,
- dcdc_config1_msk,
- dcdc_config1_mode);
- if (ret < 0)
- goto err_unlock;
-
dac_config_mode = AD5758_DAC_CONFIG_OUT_EN_MODE(val) |
AD5758_DAC_CONFIG_INT_EN_MODE(val);
dac_config_msk = AD5758_DAC_CONFIG_OUT_EN_MSK |
@@ -841,11 +827,6 @@ static int ad5758_init(struct ad5758_state *st)
return ret;
}
- /* Enable the VIOUT fault protection switch (FPS is closed) */
- ret = ad5758_fault_prot_switch_en(st, 1);
- if (ret < 0)
- return ret;
-
/* Power up the DAC and internal (INT) amplifiers */
ret = ad5758_internal_buffers_en(st, 1);
if (ret < 0)
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 6d71fd905e29..c701a45469f6 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
inoutbuf[0] = 0x60; /* write EEPROM */
inoutbuf[0] |= data->ref_mode << 3;
+ inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
inoutbuf[1] = data->dac_value >> 4;
inoutbuf[2] = (data->dac_value & 0xf) << 4;
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index f6dcd8bce2b0..891e9cac019e 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -429,6 +429,6 @@ static struct i2c_driver dac5571_driver = {
};
module_i2c_driver(dac5571_driver);
-MODULE_AUTHOR("Sean Nyekjaer <sean.nyekjaer@prevas.dk>");
+MODULE_AUTHOR("Sean Nyekjaer <sean@geanix.dk>");
MODULE_DESCRIPTION("Texas Instruments 8/10/12-bit 1/4-channel DAC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dummy/iio_dummy_evgen.c b/drivers/iio/dummy/iio_dummy_evgen.c
index efd0005f59b4..c6033e341963 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.c
+++ b/drivers/iio/dummy/iio_dummy_evgen.c
@@ -196,7 +196,10 @@ static __init int iio_dummy_evgen_init(void)
return ret;
device_initialize(&iio_evgen_dev);
dev_set_name(&iio_evgen_dev, "iio_evgen");
- return device_add(&iio_evgen_dev);
+ ret = device_add(&iio_evgen_dev);
+ if (ret)
+ put_device(&iio_evgen_dev);
+ return ret;
}
module_init(iio_dummy_evgen_init);
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 3f9be69499ec..9b9eee27176c 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -872,22 +872,22 @@ static int ad9523_setup(struct iio_dev *indio_dev)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL2_VCO_DIVIDER,
- AD9523_PLL2_VCO_DIV_M1(pdata->pll2_vco_diff_m1) |
- AD9523_PLL2_VCO_DIV_M2(pdata->pll2_vco_diff_m2) |
- AD_IFE(pll2_vco_diff_m1, 0,
+ AD9523_PLL2_VCO_DIV_M1(pdata->pll2_vco_div_m1) |
+ AD9523_PLL2_VCO_DIV_M2(pdata->pll2_vco_div_m2) |
+ AD_IFE(pll2_vco_div_m1, 0,
AD9523_PLL2_VCO_DIV_M1_PWR_DOWN_EN) |
- AD_IFE(pll2_vco_diff_m2, 0,
+ AD_IFE(pll2_vco_div_m2, 0,
AD9523_PLL2_VCO_DIV_M2_PWR_DOWN_EN));
if (ret < 0)
return ret;
- if (pdata->pll2_vco_diff_m1)
+ if (pdata->pll2_vco_div_m1)
st->vco_out_freq[AD9523_VCO1] =
- st->vco_freq / pdata->pll2_vco_diff_m1;
+ st->vco_freq / pdata->pll2_vco_div_m1;
- if (pdata->pll2_vco_diff_m2)
+ if (pdata->pll2_vco_div_m2)
st->vco_out_freq[AD9523_VCO2] =
- st->vco_freq / pdata->pll2_vco_diff_m2;
+ st->vco_freq / pdata->pll2_vco_div_m2;
st->vco_out_freq[AD9523_VCXO] = pdata->vcxo_freq;
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 3126cf05e6b9..61c00cee037d 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -73,6 +73,28 @@ config BMG160_SPI
tristate
select REGMAP_SPI
+config FXAS21002C
+ tristate "NXP FXAS21002C Gyro Sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select FXAS21002C_I2C if (I2C)
+ select FXAS21002C_SPI if (SPI)
+ depends on (I2C || SPI_MASTER)
+ help
+ Say yes here to build support for NXP FXAS21002C Tri-axis Gyro
+ Sensor driver connected via I2C or SPI.
+
+ This driver can also be built as a module. If so, the module
+ will be called fxas21002c_i2c or fxas21002c_spi.
+
+config FXAS21002C_I2C
+ tristate
+ select REGMAP_I2C
+
+config FXAS21002C_SPI
+ tristate
+ select REGMAP_SPI
+
config HID_SENSOR_GYRO_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index 295ec780c4eb..45cbd5dc644e 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -12,6 +12,9 @@ obj-$(CONFIG_ADXRS450) += adxrs450.o
obj-$(CONFIG_BMG160) += bmg160_core.o
obj-$(CONFIG_BMG160_I2C) += bmg160_i2c.o
obj-$(CONFIG_BMG160_SPI) += bmg160_spi.o
+obj-$(CONFIG_FXAS21002C) += fxas21002c_core.o
+obj-$(CONFIG_FXAS21002C_I2C) += fxas21002c_i2c.o
+obj-$(CONFIG_FXAS21002C_SPI) += fxas21002c_spi.o
obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 63ca31628a93..f26041e26c65 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -102,6 +102,7 @@ struct bmg160_data {
struct regmap *regmap;
struct iio_trigger *dready_trig;
struct iio_trigger *motion_trig;
+ struct iio_mount_matrix orientation;
struct mutex mutex;
s16 buffer[8];
u32 dps_range;
@@ -582,11 +583,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
return bmg160_get_filter(data, val);
case IIO_CHAN_INFO_SCALE:
- *val = 0;
switch (chan->type) {
case IIO_TEMP:
- *val2 = 500000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = 500;
+ return IIO_VAL_INT;
case IIO_ANGL_VEL:
{
int i;
@@ -594,6 +594,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
if (bmg160_scale_table[i].dps_range ==
data->dps_range) {
+ *val = 0;
*val2 = bmg160_scale_table[i].scale;
return IIO_VAL_INT_PLUS_MICRO;
}
@@ -794,6 +795,20 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev,
return 0;
}
+static const struct iio_mount_matrix *
+bmg160_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bmg160_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info bmg160_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bmg160_get_mount_matrix),
+ { }
+};
+
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 400 1000 2000");
static IIO_CONST_ATTR(in_anglvel_scale_available,
@@ -831,6 +846,7 @@ static const struct iio_event_spec bmg160_event = {
.storagebits = 16, \
.endianness = IIO_LE, \
}, \
+ .ext_info = bmg160_ext_info, \
.event_spec = &bmg160_event, \
.num_event_specs = 1 \
}
@@ -1075,6 +1091,11 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
data->irq = irq;
data->regmap = regmap;
+ ret = iio_read_mount_matrix(dev, "mount-matrix",
+ &data->orientation);
+ if (ret)
+ return ret;
+
ret = bmg160_chip_init(data);
if (ret < 0)
return ret;
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 90126a5a7663..934a092134f0 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -54,10 +54,19 @@ static const struct i2c_device_id bmg160_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, bmg160_i2c_id);
+static const struct of_device_id bmg160_of_match[] = {
+ { .compatible = "bosch,bmg160" },
+ { .compatible = "bosch,bmi055_gyro" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, bmg160_of_match);
+
static struct i2c_driver bmg160_i2c_driver = {
.driver = {
.name = "bmg160_i2c",
.acpi_match_table = ACPI_PTR(bmg160_acpi_match),
+ .of_match_table = bmg160_of_match,
.pm = &bmg160_pm_ops,
},
.probe = bmg160_i2c_probe,
diff --git a/drivers/iio/gyro/fxas21002c.h b/drivers/iio/gyro/fxas21002c.h
new file mode 100644
index 000000000000..566d92de2676
--- /dev/null
+++ b/drivers/iio/gyro/fxas21002c.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Driver for NXP FXAS21002C Gyroscope - Header
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef FXAS21002C_H_
+#define FXAS21002C_H_
+
+#include <linux/regmap.h>
+
+#define FXAS21002C_REG_STATUS 0x00
+#define FXAS21002C_REG_OUT_X_MSB 0x01
+#define FXAS21002C_REG_OUT_X_LSB 0x02
+#define FXAS21002C_REG_OUT_Y_MSB 0x03
+#define FXAS21002C_REG_OUT_Y_LSB 0x04
+#define FXAS21002C_REG_OUT_Z_MSB 0x05
+#define FXAS21002C_REG_OUT_Z_LSB 0x06
+#define FXAS21002C_REG_DR_STATUS 0x07
+#define FXAS21002C_REG_F_STATUS 0x08
+#define FXAS21002C_REG_F_SETUP 0x09
+#define FXAS21002C_REG_F_EVENT 0x0A
+#define FXAS21002C_REG_INT_SRC_FLAG 0x0B
+#define FXAS21002C_REG_WHO_AM_I 0x0C
+#define FXAS21002C_REG_CTRL0 0x0D
+#define FXAS21002C_REG_RT_CFG 0x0E
+#define FXAS21002C_REG_RT_SRC 0x0F
+#define FXAS21002C_REG_RT_THS 0x10
+#define FXAS21002C_REG_RT_COUNT 0x11
+#define FXAS21002C_REG_TEMP 0x12
+#define FXAS21002C_REG_CTRL1 0x13
+#define FXAS21002C_REG_CTRL2 0x14
+#define FXAS21002C_REG_CTRL3 0x15
+
+enum fxas21002c_fields {
+ F_DR_STATUS,
+ F_OUT_X_MSB,
+ F_OUT_X_LSB,
+ F_OUT_Y_MSB,
+ F_OUT_Y_LSB,
+ F_OUT_Z_MSB,
+ F_OUT_Z_LSB,
+ /* DR_STATUS */
+ F_ZYX_OW, F_Z_OW, F_Y_OW, F_X_OW, F_ZYX_DR, F_Z_DR, F_Y_DR, F_X_DR,
+ /* F_STATUS */
+ F_OVF, F_WMKF, F_CNT,
+ /* F_SETUP */
+ F_MODE, F_WMRK,
+ /* F_EVENT */
+ F_EVENT, FE_TIME,
+ /* INT_SOURCE_FLAG */
+ F_BOOTEND, F_SRC_FIFO, F_SRC_RT, F_SRC_DRDY,
+ /* WHO_AM_I */
+ F_WHO_AM_I,
+ /* CTRL_REG0 */
+ F_BW, F_SPIW, F_SEL, F_HPF_EN, F_FS,
+ /* RT_CFG */
+ F_ELE, F_ZTEFE, F_YTEFE, F_XTEFE,
+ /* RT_SRC */
+ F_EA, F_ZRT, F_ZRT_POL, F_YRT, F_YRT_POL, F_XRT, F_XRT_POL,
+ /* RT_THS */
+ F_DBCNTM, F_THS,
+ /* RT_COUNT */
+ F_RT_COUNT,
+ /* TEMP */
+ F_TEMP,
+ /* CTRL_REG1 */
+ F_RST, F_ST, F_DR, F_ACTIVE, F_READY,
+ /* CTRL_REG2 */
+ F_INT_CFG_FIFO, F_INT_EN_FIFO, F_INT_CFG_RT, F_INT_EN_RT,
+ F_INT_CFG_DRDY, F_INT_EN_DRDY, F_IPOL, F_PP_OD,
+ /* CTRL_REG3 */
+ F_WRAPTOONE, F_EXTCTRLEN, F_FS_DOUBLE,
+ /* MAX FIELDS */
+ F_MAX_FIELDS,
+};
+
+static const struct reg_field fxas21002c_reg_fields[] = {
+ [F_DR_STATUS] = REG_FIELD(FXAS21002C_REG_STATUS, 0, 7),
+ [F_OUT_X_MSB] = REG_FIELD(FXAS21002C_REG_OUT_X_MSB, 0, 7),
+ [F_OUT_X_LSB] = REG_FIELD(FXAS21002C_REG_OUT_X_LSB, 0, 7),
+ [F_OUT_Y_MSB] = REG_FIELD(FXAS21002C_REG_OUT_Y_MSB, 0, 7),
+ [F_OUT_Y_LSB] = REG_FIELD(FXAS21002C_REG_OUT_Y_LSB, 0, 7),
+ [F_OUT_Z_MSB] = REG_FIELD(FXAS21002C_REG_OUT_Z_MSB, 0, 7),
+ [F_OUT_Z_LSB] = REG_FIELD(FXAS21002C_REG_OUT_Z_LSB, 0, 7),
+ [F_ZYX_OW] = REG_FIELD(FXAS21002C_REG_DR_STATUS, 7, 7),
+ [F_Z_OW] = REG_FIELD(FXAS21002C_REG_DR_STATUS, 6, 6),
+ [F_Y_OW] = REG_FIELD(FXAS21002C_REG_DR_STATUS, 5, 5),
+ [F_X_OW] = REG_FIELD(FXAS21002C_REG_DR_STATUS, 4, 4),
+ [F_ZYX_DR] = REG_FIELD(FXAS21002C_REG_DR_STATUS, 3, 3),
+ [F_Z_DR] = REG_FIELD(FXAS21002C_REG_DR_STATUS, 2, 2),
+ [F_Y_DR] = REG_FIELD(FXAS21002C_REG_DR_STATUS, 1, 1),
+ [F_X_DR] = REG_FIELD(FXAS21002C_REG_DR_STATUS, 0, 0),
+ [F_OVF] = REG_FIELD(FXAS21002C_REG_F_STATUS, 7, 7),
+ [F_WMKF] = REG_FIELD(FXAS21002C_REG_F_STATUS, 6, 6),
+ [F_CNT] = REG_FIELD(FXAS21002C_REG_F_STATUS, 0, 5),
+ [F_MODE] = REG_FIELD(FXAS21002C_REG_F_SETUP, 6, 7),
+ [F_WMRK] = REG_FIELD(FXAS21002C_REG_F_SETUP, 0, 5),
+ [F_EVENT] = REG_FIELD(FXAS21002C_REG_F_EVENT, 5, 5),
+ [FE_TIME] = REG_FIELD(FXAS21002C_REG_F_EVENT, 0, 4),
+ [F_BOOTEND] = REG_FIELD(FXAS21002C_REG_INT_SRC_FLAG, 3, 3),
+ [F_SRC_FIFO] = REG_FIELD(FXAS21002C_REG_INT_SRC_FLAG, 2, 2),
+ [F_SRC_RT] = REG_FIELD(FXAS21002C_REG_INT_SRC_FLAG, 1, 1),
+ [F_SRC_DRDY] = REG_FIELD(FXAS21002C_REG_INT_SRC_FLAG, 0, 0),
+ [F_WHO_AM_I] = REG_FIELD(FXAS21002C_REG_WHO_AM_I, 0, 7),
+ [F_BW] = REG_FIELD(FXAS21002C_REG_CTRL0, 6, 7),
+ [F_SPIW] = REG_FIELD(FXAS21002C_REG_CTRL0, 5, 5),
+ [F_SEL] = REG_FIELD(FXAS21002C_REG_CTRL0, 3, 4),
+ [F_HPF_EN] = REG_FIELD(FXAS21002C_REG_CTRL0, 2, 2),
+ [F_FS] = REG_FIELD(FXAS21002C_REG_CTRL0, 0, 1),
+ [F_ELE] = REG_FIELD(FXAS21002C_REG_RT_CFG, 3, 3),
+ [F_ZTEFE] = REG_FIELD(FXAS21002C_REG_RT_CFG, 2, 2),
+ [F_YTEFE] = REG_FIELD(FXAS21002C_REG_RT_CFG, 1, 1),
+ [F_XTEFE] = REG_FIELD(FXAS21002C_REG_RT_CFG, 0, 0),
+ [F_EA] = REG_FIELD(FXAS21002C_REG_RT_SRC, 6, 6),
+ [F_ZRT] = REG_FIELD(FXAS21002C_REG_RT_SRC, 5, 5),
+ [F_ZRT_POL] = REG_FIELD(FXAS21002C_REG_RT_SRC, 4, 4),
+ [F_YRT] = REG_FIELD(FXAS21002C_REG_RT_SRC, 3, 3),
+ [F_YRT_POL] = REG_FIELD(FXAS21002C_REG_RT_SRC, 2, 2),
+ [F_XRT] = REG_FIELD(FXAS21002C_REG_RT_SRC, 1, 1),
+ [F_XRT_POL] = REG_FIELD(FXAS21002C_REG_RT_SRC, 0, 0),
+ [F_DBCNTM] = REG_FIELD(FXAS21002C_REG_RT_THS, 7, 7),
+ [F_THS] = REG_FIELD(FXAS21002C_REG_RT_SRC, 0, 6),
+ [F_RT_COUNT] = REG_FIELD(FXAS21002C_REG_RT_COUNT, 0, 7),
+ [F_TEMP] = REG_FIELD(FXAS21002C_REG_TEMP, 0, 7),
+ [F_RST] = REG_FIELD(FXAS21002C_REG_CTRL1, 6, 6),
+ [F_ST] = REG_FIELD(FXAS21002C_REG_CTRL1, 5, 5),
+ [F_DR] = REG_FIELD(FXAS21002C_REG_CTRL1, 2, 4),
+ [F_ACTIVE] = REG_FIELD(FXAS21002C_REG_CTRL1, 1, 1),
+ [F_READY] = REG_FIELD(FXAS21002C_REG_CTRL1, 0, 0),
+ [F_INT_CFG_FIFO] = REG_FIELD(FXAS21002C_REG_CTRL2, 7, 7),
+ [F_INT_EN_FIFO] = REG_FIELD(FXAS21002C_REG_CTRL2, 6, 6),
+ [F_INT_CFG_RT] = REG_FIELD(FXAS21002C_REG_CTRL2, 5, 5),
+ [F_INT_EN_RT] = REG_FIELD(FXAS21002C_REG_CTRL2, 4, 4),
+ [F_INT_CFG_DRDY] = REG_FIELD(FXAS21002C_REG_CTRL2, 3, 3),
+ [F_INT_EN_DRDY] = REG_FIELD(FXAS21002C_REG_CTRL2, 2, 2),
+ [F_IPOL] = REG_FIELD(FXAS21002C_REG_CTRL2, 1, 1),
+ [F_PP_OD] = REG_FIELD(FXAS21002C_REG_CTRL2, 0, 0),
+ [F_WRAPTOONE] = REG_FIELD(FXAS21002C_REG_CTRL3, 3, 3),
+ [F_EXTCTRLEN] = REG_FIELD(FXAS21002C_REG_CTRL3, 2, 2),
+ [F_FS_DOUBLE] = REG_FIELD(FXAS21002C_REG_CTRL3, 0, 0),
+};
+
+extern const struct dev_pm_ops fxas21002c_pm_ops;
+
+int fxas21002c_core_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name);
+void fxas21002c_core_remove(struct device *dev);
+#endif
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
new file mode 100644
index 000000000000..89d2bb2282ea
--- /dev/null
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -0,0 +1,1004 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for NXP FXAS21002C Gyroscope - Core
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "fxas21002c.h"
+
+#define FXAS21002C_CHIP_ID_1 0xD6
+#define FXAS21002C_CHIP_ID_2 0xD7
+
+enum fxas21002c_mode_state {
+ FXAS21002C_MODE_STANDBY,
+ FXAS21002C_MODE_READY,
+ FXAS21002C_MODE_ACTIVE,
+};
+
+#define FXAS21002C_STANDBY_ACTIVE_TIME_MS 62
+#define FXAS21002C_READY_ACTIVE_TIME_MS 7
+
+#define FXAS21002C_ODR_LIST_MAX 10
+
+#define FXAS21002C_SCALE_FRACTIONAL 32
+#define FXAS21002C_RANGE_LIMIT_DOUBLE 2000
+
+#define FXAS21002C_AXIS_TO_REG(axis) (FXAS21002C_REG_OUT_X_MSB + ((axis) * 2))
+
+static const int fxas21002c_odr_values[] = {
+ 800, 400, 200, 100, 50, 25, 12, 12
+};
+
+/*
+ * These values are taken from the low-pass filter cutoff frequency calculated
+ * ODR * 0.lpf_values. So, for ODR = 800Hz with a lpf value = 0.32
+ * => LPF cutoff frequency = 800 * 0.32 = 256 Hz
+ */
+static const int fxas21002c_lpf_values[] = {
+ 32, 16, 8
+};
+
+/*
+ * These values are taken from the high-pass filter cutoff frequency calculated
+ * ODR * 0.0hpf_values. So, for ODR = 800Hz with a hpf value = 0.018750
+ * => HPF cutoff frequency = 800 * 0.018750 = 15 Hz
+ */
+static const int fxas21002c_hpf_values[] = {
+ 18750, 9625, 4875, 2475
+};
+
+static const int fxas21002c_range_values[] = {
+ 4000, 2000, 1000, 500, 250
+};
+
+struct fxas21002c_data {
+ u8 chip_id;
+ enum fxas21002c_mode_state mode;
+ enum fxas21002c_mode_state prev_mode;
+
+ struct mutex lock; /* serialize data access */
+ struct regmap *regmap;
+ struct regmap_field *regmap_fields[F_MAX_FIELDS];
+ struct iio_trigger *dready_trig;
+ s64 timestamp;
+ int irq;
+
+ struct regulator *vdd;
+ struct regulator *vddio;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ s16 buffer[8] ____cacheline_aligned;
+};
+
+enum fxas21002c_channel_index {
+ CHANNEL_SCAN_INDEX_X,
+ CHANNEL_SCAN_INDEX_Y,
+ CHANNEL_SCAN_INDEX_Z,
+ CHANNEL_SCAN_MAX,
+};
+
+static int fxas21002c_odr_hz_from_value(struct fxas21002c_data *data, u8 value)
+{
+ int odr_value_max = ARRAY_SIZE(fxas21002c_odr_values) - 1;
+
+ value = min_t(u8, value, odr_value_max);
+
+ return fxas21002c_odr_values[value];
+}
+
+static int fxas21002c_odr_value_from_hz(struct fxas21002c_data *data,
+ unsigned int hz)
+{
+ int odr_table_size = ARRAY_SIZE(fxas21002c_odr_values);
+ int i;
+
+ for (i = 0; i < odr_table_size; i++)
+ if (fxas21002c_odr_values[i] == hz)
+ return i;
+
+ return -EINVAL;
+}
+
+static int fxas21002c_lpf_bw_from_value(struct fxas21002c_data *data, u8 value)
+{
+ int lpf_value_max = ARRAY_SIZE(fxas21002c_lpf_values) - 1;
+
+ value = min_t(u8, value, lpf_value_max);
+
+ return fxas21002c_lpf_values[value];
+}
+
+static int fxas21002c_lpf_value_from_bw(struct fxas21002c_data *data,
+ unsigned int hz)
+{
+ int lpf_table_size = ARRAY_SIZE(fxas21002c_lpf_values);
+ int i;
+
+ for (i = 0; i < lpf_table_size; i++)
+ if (fxas21002c_lpf_values[i] == hz)
+ return i;
+
+ return -EINVAL;
+}
+
+static int fxas21002c_hpf_sel_from_value(struct fxas21002c_data *data, u8 value)
+{
+ int hpf_value_max = ARRAY_SIZE(fxas21002c_hpf_values) - 1;
+
+ value = min_t(u8, value, hpf_value_max);
+
+ return fxas21002c_hpf_values[value];
+}
+
+static int fxas21002c_hpf_value_from_sel(struct fxas21002c_data *data,
+ unsigned int hz)
+{
+ int hpf_table_size = ARRAY_SIZE(fxas21002c_hpf_values);
+ int i;
+
+ for (i = 0; i < hpf_table_size; i++)
+ if (fxas21002c_hpf_values[i] == hz)
+ return i;
+
+ return -EINVAL;
+}
+
+static int fxas21002c_range_fs_from_value(struct fxas21002c_data *data,
+ u8 value)
+{
+ int range_value_max = ARRAY_SIZE(fxas21002c_range_values) - 1;
+ unsigned int fs_double;
+ int ret;
+
+ /* We need to check if FS_DOUBLE is enabled to offset the value */
+ ret = regmap_field_read(data->regmap_fields[F_FS_DOUBLE], &fs_double);
+ if (ret < 0)
+ return ret;
+
+ if (!fs_double)
+ value += 1;
+
+ value = min_t(u8, value, range_value_max);
+
+ return fxas21002c_range_values[value];
+}
+
+static int fxas21002c_range_value_from_fs(struct fxas21002c_data *data,
+ unsigned int range)
+{
+ int range_table_size = ARRAY_SIZE(fxas21002c_range_values);
+ bool found = false;
+ int fs_double = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < range_table_size; i++)
+ if (fxas21002c_range_values[i] == range) {
+ found = true;
+ break;
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ if (range > FXAS21002C_RANGE_LIMIT_DOUBLE)
+ fs_double = 1;
+
+ ret = regmap_field_write(data->regmap_fields[F_FS_DOUBLE], fs_double);
+ if (ret < 0)
+ return ret;
+
+ return i;
+}
+
+static int fxas21002c_mode_get(struct fxas21002c_data *data)
+{
+ unsigned int active;
+ unsigned int ready;
+ int ret;
+
+ ret = regmap_field_read(data->regmap_fields[F_ACTIVE], &active);
+ if (ret < 0)
+ return ret;
+ if (active)
+ return FXAS21002C_MODE_ACTIVE;
+
+ ret = regmap_field_read(data->regmap_fields[F_READY], &ready);
+ if (ret < 0)
+ return ret;
+ if (ready)
+ return FXAS21002C_MODE_READY;
+
+ return FXAS21002C_MODE_STANDBY;
+}
+
+static int fxas21002c_mode_set(struct fxas21002c_data *data,
+ enum fxas21002c_mode_state mode)
+{
+ int ret;
+
+ if (mode == data->mode)
+ return 0;
+
+ if (mode == FXAS21002C_MODE_READY)
+ ret = regmap_field_write(data->regmap_fields[F_READY], 1);
+ else
+ ret = regmap_field_write(data->regmap_fields[F_READY], 0);
+ if (ret < 0)
+ return ret;
+
+ if (mode == FXAS21002C_MODE_ACTIVE)
+ ret = regmap_field_write(data->regmap_fields[F_ACTIVE], 1);
+ else
+ ret = regmap_field_write(data->regmap_fields[F_ACTIVE], 0);
+ if (ret < 0)
+ return ret;
+
+ /* if going to active wait the setup times */
+ if (mode == FXAS21002C_MODE_ACTIVE &&
+ data->mode == FXAS21002C_MODE_STANDBY)
+ msleep_interruptible(FXAS21002C_STANDBY_ACTIVE_TIME_MS);
+
+ if (data->mode == FXAS21002C_MODE_READY)
+ msleep_interruptible(FXAS21002C_READY_ACTIVE_TIME_MS);
+
+ data->prev_mode = data->mode;
+ data->mode = mode;
+
+ return ret;
+}
+
+static int fxas21002c_write(struct fxas21002c_data *data,
+ enum fxas21002c_fields field, int bits)
+{
+ int actual_mode;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ actual_mode = fxas21002c_mode_get(data);
+ if (actual_mode < 0) {
+ ret = actual_mode;
+ goto out_unlock;
+ }
+
+ ret = fxas21002c_mode_set(data, FXAS21002C_MODE_READY);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = regmap_field_write(data->regmap_fields[field], bits);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = fxas21002c_mode_set(data, data->prev_mode);
+
+out_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int fxas21002c_pm_get(struct fxas21002c_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ pm_runtime_put_noidle(dev);
+
+ return ret;
+}
+
+static int fxas21002c_pm_put(struct fxas21002c_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+
+ pm_runtime_mark_last_busy(dev);
+
+ return pm_runtime_put_autosuspend(dev);
+}
+
+static int fxas21002c_temp_get(struct fxas21002c_data *data, int *val)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int temp;
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = fxas21002c_pm_get(data);
+ if (ret < 0)
+ goto data_unlock;
+
+ ret = regmap_field_read(data->regmap_fields[F_TEMP], &temp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read temp: %d\n", ret);
+ goto data_unlock;
+ }
+
+ *val = sign_extend32(temp, 7);
+
+ ret = fxas21002c_pm_put(data);
+ if (ret < 0)
+ goto data_unlock;
+
+ ret = IIO_VAL_INT;
+
+data_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int fxas21002c_axis_get(struct fxas21002c_data *data,
+ int index, int *val)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ __be16 axis_be;
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = fxas21002c_pm_get(data);
+ if (ret < 0)
+ goto data_unlock;
+
+ ret = regmap_bulk_read(data->regmap, FXAS21002C_AXIS_TO_REG(index),
+ &axis_be, sizeof(axis_be));
+ if (ret < 0) {
+ dev_err(dev, "failed to read axis: %d: %d\n", index, ret);
+ goto data_unlock;
+ }
+
+ *val = sign_extend32(be16_to_cpu(axis_be), 15);
+
+ ret = fxas21002c_pm_put(data);
+ if (ret < 0)
+ goto data_unlock;
+
+ ret = IIO_VAL_INT;
+
+data_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int fxas21002c_odr_get(struct fxas21002c_data *data, int *odr)
+{
+ unsigned int odr_bits;
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_field_read(data->regmap_fields[F_DR], &odr_bits);
+ if (ret < 0)
+ goto data_unlock;
+
+ *odr = fxas21002c_odr_hz_from_value(data, odr_bits);
+
+ ret = IIO_VAL_INT;
+
+data_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int fxas21002c_odr_set(struct fxas21002c_data *data, int odr)
+{
+ int odr_bits;
+
+ odr_bits = fxas21002c_odr_value_from_hz(data, odr);
+ if (odr_bits < 0)
+ return odr_bits;
+
+ return fxas21002c_write(data, F_DR, odr_bits);
+}
+
+static int fxas21002c_lpf_get(struct fxas21002c_data *data, int *val2)
+{
+ unsigned int bw_bits;
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_field_read(data->regmap_fields[F_BW], &bw_bits);
+ if (ret < 0)
+ goto data_unlock;
+
+ *val2 = fxas21002c_lpf_bw_from_value(data, bw_bits) * 10000;
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+
+data_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int fxas21002c_lpf_set(struct fxas21002c_data *data, int bw)
+{
+ int bw_bits;
+ int odr;
+ int ret;
+
+ bw_bits = fxas21002c_lpf_value_from_bw(data, bw);
+ if (bw_bits < 0)
+ return bw_bits;
+
+ /*
+ * From table 33 of the device spec, for ODR = 25Hz and 12.5 value 0.08
+ * is not allowed and for ODR = 12.5 value 0.16 is also not allowed
+ */
+ ret = fxas21002c_odr_get(data, &odr);
+ if (ret < 0)
+ return -EINVAL;
+
+ if ((odr == 25 && bw_bits > 0x01) || (odr == 12 && bw_bits > 0))
+ return -EINVAL;
+
+ return fxas21002c_write(data, F_BW, bw_bits);
+}
+
+static int fxas21002c_hpf_get(struct fxas21002c_data *data, int *val2)
+{
+ unsigned int sel_bits;
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_field_read(data->regmap_fields[F_SEL], &sel_bits);
+ if (ret < 0)
+ goto data_unlock;
+
+ *val2 = fxas21002c_hpf_sel_from_value(data, sel_bits);
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+
+data_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int fxas21002c_hpf_set(struct fxas21002c_data *data, int sel)
+{
+ int sel_bits;
+
+ sel_bits = fxas21002c_hpf_value_from_sel(data, sel);
+ if (sel_bits < 0)
+ return sel_bits;
+
+ return fxas21002c_write(data, F_SEL, sel_bits);
+}
+
+static int fxas21002c_scale_get(struct fxas21002c_data *data, int *val)
+{
+ int fs_bits;
+ int scale;
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_field_read(data->regmap_fields[F_FS], &fs_bits);
+ if (ret < 0)
+ goto data_unlock;
+
+ scale = fxas21002c_range_fs_from_value(data, fs_bits);
+ if (scale < 0) {
+ ret = scale;
+ goto data_unlock;
+ }
+
+ *val = scale;
+
+data_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int fxas21002c_scale_set(struct fxas21002c_data *data, int range)
+{
+ int fs_bits;
+
+ fs_bits = fxas21002c_range_value_from_fs(data, range);
+ if (fs_bits < 0)
+ return fs_bits;
+
+ return fxas21002c_write(data, F_FS, fs_bits);
+}
+
+static int fxas21002c_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct fxas21002c_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_TEMP:
+ return fxas21002c_temp_get(data, val);
+ case IIO_ANGL_VEL:
+ return fxas21002c_axis_get(data, chan->scan_index, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val2 = FXAS21002C_SCALE_FRACTIONAL;
+ ret = fxas21002c_scale_get(data, val);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *val = 0;
+ return fxas21002c_lpf_get(data, val2);
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ *val = 0;
+ return fxas21002c_hpf_get(data, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val2 = 0;
+ return fxas21002c_odr_get(data, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxas21002c_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct fxas21002c_data *data = iio_priv(indio_dev);
+ int range;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+
+ return fxas21002c_odr_set(data, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val)
+ return -EINVAL;
+
+ val2 = val2 / 10000;
+ return fxas21002c_lpf_set(data, val2);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ range = (((val * 1000 + val2 / 1000) *
+ FXAS21002C_SCALE_FRACTIONAL) / 1000);
+ return fxas21002c_scale_set(data, range);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ return fxas21002c_hpf_set(data, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("12.5 25 50 100 200 400 800");
+
+static IIO_CONST_ATTR(in_anglvel_filter_low_pass_3db_frequency_available,
+ "0.32 0.16 0.08");
+
+static IIO_CONST_ATTR(in_anglvel_filter_high_pass_3db_frequency_available,
+ "0.018750 0.009625 0.004875 0.002475");
+
+static IIO_CONST_ATTR(in_anglvel_scale_available,
+ "125.0 62.5 31.25 15.625 7.8125");
+
+static struct attribute *fxas21002c_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_anglvel_filter_low_pass_3db_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_anglvel_filter_high_pass_3db_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_anglvel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fxas21002c_attrs_group = {
+ .attrs = fxas21002c_attributes,
+};
+
+#define FXAS21002C_CHANNEL(_axis) { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = CHANNEL_SCAN_INDEX_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec fxas21002c_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_index = -1,
+ },
+ FXAS21002C_CHANNEL(X),
+ FXAS21002C_CHANNEL(Y),
+ FXAS21002C_CHANNEL(Z),
+};
+
+static const struct iio_info fxas21002c_info = {
+ .attrs = &fxas21002c_attrs_group,
+ .read_raw = &fxas21002c_read_raw,
+ .write_raw = &fxas21002c_write_raw,
+};
+
+static irqreturn_t fxas21002c_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct fxas21002c_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_bulk_read(data->regmap, FXAS21002C_REG_OUT_X_MSB,
+ data->buffer, CHANNEL_SCAN_MAX * sizeof(s16));
+ if (ret < 0)
+ goto out_unlock;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ data->timestamp);
+
+out_unlock:
+ mutex_unlock(&data->lock);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int fxas21002c_chip_init(struct fxas21002c_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int chip_id;
+ int ret;
+
+ ret = regmap_field_read(data->regmap_fields[F_WHO_AM_I], &chip_id);
+ if (ret < 0)
+ return ret;
+
+ if (chip_id != FXAS21002C_CHIP_ID_1 &&
+ chip_id != FXAS21002C_CHIP_ID_2) {
+ dev_err(dev, "chip id 0x%02x is not supported\n", chip_id);
+ return -EINVAL;
+ }
+
+ data->chip_id = chip_id;
+
+ ret = fxas21002c_mode_set(data, FXAS21002C_MODE_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ /* Set ODR to 200HZ as default */
+ ret = fxas21002c_odr_set(data, 200);
+ if (ret < 0)
+ dev_err(dev, "failed to set ODR: %d\n", ret);
+
+ return ret;
+}
+
+static int fxas21002c_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct fxas21002c_data *data = iio_priv(indio_dev);
+
+ return regmap_field_write(data->regmap_fields[F_INT_EN_DRDY], state);
+}
+
+static const struct iio_trigger_ops fxas21002c_trigger_ops = {
+ .set_trigger_state = &fxas21002c_data_rdy_trigger_set_state,
+};
+
+static irqreturn_t fxas21002c_data_rdy_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct fxas21002c_data *data = iio_priv(indio_dev);
+
+ data->timestamp = iio_get_time_ns(indio_dev);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t fxas21002c_data_rdy_thread(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct fxas21002c_data *data = iio_priv(indio_dev);
+ unsigned int data_ready;
+ int ret;
+
+ ret = regmap_field_read(data->regmap_fields[F_SRC_DRDY], &data_ready);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ if (!data_ready)
+ return IRQ_NONE;
+
+ iio_trigger_poll_chained(data->dready_trig);
+
+ return IRQ_HANDLED;
+}
+
+static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct device_node *np = indio_dev->dev.of_node;
+ unsigned long irq_trig;
+ bool irq_open_drain;
+ int irq1;
+ int ret;
+
+ if (!data->irq)
+ return 0;
+
+ irq1 = of_irq_get_byname(np, "INT1");
+
+ if (irq1 == data->irq) {
+ dev_info(dev, "using interrupt line INT1\n");
+ ret = regmap_field_write(data->regmap_fields[F_INT_CFG_DRDY],
+ 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev_info(dev, "using interrupt line INT2\n");
+
+ irq_open_drain = of_property_read_bool(np, "drive-open-drain");
+
+ data->dready_trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->dready_trig)
+ return -ENOMEM;
+
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(data->irq));
+
+ if (irq_trig == IRQF_TRIGGER_RISING) {
+ ret = regmap_field_write(data->regmap_fields[F_IPOL], 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (irq_open_drain)
+ irq_trig |= IRQF_SHARED;
+
+ ret = devm_request_threaded_irq(dev, data->irq,
+ fxas21002c_data_rdy_handler,
+ fxas21002c_data_rdy_thread,
+ irq_trig, "fxas21002c_data_ready",
+ indio_dev);
+ if (ret < 0)
+ return ret;
+
+ data->dready_trig->dev.parent = dev;
+ data->dready_trig->ops = &fxas21002c_trigger_ops;
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+
+ return devm_iio_trigger_register(dev, data->dready_trig);
+}
+
+static int fxas21002c_power_enable(struct fxas21002c_data *data)
+{
+ int ret;
+
+ ret = regulator_enable(data->vdd);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_enable(data->vddio);
+ if (ret < 0) {
+ regulator_disable(data->vdd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void fxas21002c_power_disable(struct fxas21002c_data *data)
+{
+ regulator_disable(data->vdd);
+ regulator_disable(data->vddio);
+}
+
+static void fxas21002c_power_disable_action(void *_data)
+{
+ struct fxas21002c_data *data = _data;
+
+ fxas21002c_power_disable(data);
+}
+
+static int fxas21002c_regulators_get(struct fxas21002c_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+
+ data->vdd = devm_regulator_get(dev->parent, "vdd");
+ if (IS_ERR(data->vdd))
+ return PTR_ERR(data->vdd);
+
+ data->vddio = devm_regulator_get(dev->parent, "vddio");
+
+ return PTR_ERR_OR_ZERO(data->vddio);
+}
+
+int fxas21002c_core_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name)
+{
+ struct fxas21002c_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap_field *f;
+ int i;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->irq = irq;
+ data->regmap = regmap;
+
+ for (i = 0; i < F_MAX_FIELDS; i++) {
+ f = devm_regmap_field_alloc(dev, data->regmap,
+ fxas21002c_reg_fields[i]);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ data->regmap_fields[i] = f;
+ }
+
+ mutex_init(&data->lock);
+
+ ret = fxas21002c_regulators_get(data);
+ if (ret < 0)
+ return ret;
+
+ ret = fxas21002c_power_enable(data);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, fxas21002c_power_disable_action,
+ data);
+ if (ret < 0)
+ return ret;
+
+ ret = fxas21002c_chip_init(data);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = fxas21002c_channels;
+ indio_dev->num_channels = ARRAY_SIZE(fxas21002c_channels);
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &fxas21002c_info;
+
+ ret = fxas21002c_trigger_probe(data);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ fxas21002c_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
+ pm_runtime_use_autosuspend(dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto pm_disable;
+
+ return 0;
+
+pm_disable:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fxas21002c_core_probe);
+
+void fxas21002c_core_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ iio_device_unregister(indio_dev);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+}
+EXPORT_SYMBOL_GPL(fxas21002c_core_remove);
+
+static int __maybe_unused fxas21002c_suspend(struct device *dev)
+{
+ struct fxas21002c_data *data = iio_priv(dev_get_drvdata(dev));
+
+ fxas21002c_mode_set(data, FXAS21002C_MODE_STANDBY);
+ fxas21002c_power_disable(data);
+
+ return 0;
+}
+
+static int __maybe_unused fxas21002c_resume(struct device *dev)
+{
+ struct fxas21002c_data *data = iio_priv(dev_get_drvdata(dev));
+ int ret;
+
+ ret = fxas21002c_power_enable(data);
+ if (ret < 0)
+ return ret;
+
+ return fxas21002c_mode_set(data, data->prev_mode);
+}
+
+static int __maybe_unused fxas21002c_runtime_suspend(struct device *dev)
+{
+ struct fxas21002c_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return fxas21002c_mode_set(data, FXAS21002C_MODE_READY);
+}
+
+static int __maybe_unused fxas21002c_runtime_resume(struct device *dev)
+{
+ struct fxas21002c_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return fxas21002c_mode_set(data, FXAS21002C_MODE_ACTIVE);
+}
+
+const struct dev_pm_ops fxas21002c_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fxas21002c_suspend, fxas21002c_resume)
+ SET_RUNTIME_PM_OPS(fxas21002c_runtime_suspend,
+ fxas21002c_runtime_resume, NULL)
+};
+EXPORT_SYMBOL_GPL(fxas21002c_pm_ops);
+
+MODULE_AUTHOR("Rui Miguel Silva <rui.silva@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("FXAS21002C Gyro driver");
diff --git a/drivers/iio/gyro/fxas21002c_i2c.c b/drivers/iio/gyro/fxas21002c_i2c.c
new file mode 100644
index 000000000000..a7807fd97483
--- /dev/null
+++ b/drivers/iio/gyro/fxas21002c_i2c.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for NXP FXAS21002C Gyroscope - I2C
+ *
+ * Copyright (C) 2018 Linaro Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "fxas21002c.h"
+
+static const struct regmap_config fxas21002c_regmap_i2c_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FXAS21002C_REG_CTRL3,
+};
+
+static int fxas21002c_i2c_probe(struct i2c_client *i2c)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &fxas21002c_regmap_i2c_conf);
+ if (IS_ERR(regmap)) {
+ dev_err(&i2c->dev, "Failed to register i2c regmap: %ld\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return fxas21002c_core_probe(&i2c->dev, regmap, i2c->irq, i2c->name);
+}
+
+static int fxas21002c_i2c_remove(struct i2c_client *i2c)
+{
+ fxas21002c_core_remove(&i2c->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id fxas21002c_i2c_id[] = {
+ { "fxas21002c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, fxas21002c_i2c_id);
+
+static const struct of_device_id fxas21002c_i2c_of_match[] = {
+ { .compatible = "nxp,fxas21002c", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fxas21002c_i2c_of_match);
+
+static struct i2c_driver fxas21002c_i2c_driver = {
+ .driver = {
+ .name = "fxas21002c_i2c",
+ .pm = &fxas21002c_pm_ops,
+ .of_match_table = fxas21002c_i2c_of_match,
+ },
+ .probe_new = fxas21002c_i2c_probe,
+ .remove = fxas21002c_i2c_remove,
+ .id_table = fxas21002c_i2c_id,
+};
+module_i2c_driver(fxas21002c_i2c_driver);
+
+MODULE_AUTHOR("Rui Miguel Silva <rui.silva@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("FXAS21002C I2C Gyro driver");
diff --git a/drivers/iio/gyro/fxas21002c_spi.c b/drivers/iio/gyro/fxas21002c_spi.c
new file mode 100644
index 000000000000..77ceebef4e34
--- /dev/null
+++ b/drivers/iio/gyro/fxas21002c_spi.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for NXP Fxas21002c Gyroscope - SPI
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "fxas21002c.h"
+
+static const struct regmap_config fxas21002c_regmap_spi_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FXAS21002C_REG_CTRL3,
+};
+
+static int fxas21002c_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &fxas21002c_regmap_spi_conf);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap: %ld\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return fxas21002c_core_probe(&spi->dev, regmap, spi->irq, id->name);
+}
+
+static int fxas21002c_spi_remove(struct spi_device *spi)
+{
+ fxas21002c_core_remove(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id fxas21002c_spi_id[] = {
+ { "fxas21002c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, fxas21002c_spi_id);
+
+static const struct of_device_id fxas21002c_spi_of_match[] = {
+ { .compatible = "nxp,fxas21002c", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fxas21002c_spi_of_match);
+
+static struct spi_driver fxas21002c_spi_driver = {
+ .driver = {
+ .name = "fxas21002c_spi",
+ .pm = &fxas21002c_pm_ops,
+ .of_match_table = fxas21002c_spi_of_match,
+ },
+ .probe = fxas21002c_spi_probe,
+ .remove = fxas21002c_spi_remove,
+ .id_table = fxas21002c_spi_id,
+};
+module_spi_driver(fxas21002c_spi_driver);
+
+MODULE_AUTHOR("Rui Miguel Silva <rui.silva@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("FXAS21002C SPI Gyro driver");
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 7adecb562c81..203a6be33b70 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -242,6 +242,20 @@ err_ret:
return ret;
}
+static const struct iio_mount_matrix *
+itg3200_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct itg3200 *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info itg3200_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, itg3200_get_mount_matrix),
+ { }
+};
+
#define ITG3200_ST \
{ .sign = 's', .realbits = 16, .storagebits = 16, .endianness = IIO_BE }
@@ -255,6 +269,7 @@ err_ret:
.address = ITG3200_REG_GYRO_ ## _mod ## OUT_H, \
.scan_index = ITG3200_SCAN_GYRO_ ## _mod, \
.scan_type = ITG3200_ST, \
+ .ext_info = itg3200_ext_info, \
}
static const struct iio_chan_spec itg3200_channels[] = {
@@ -297,6 +312,11 @@ static int itg3200_probe(struct i2c_client *client,
st = iio_priv(indio_dev);
+ ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
+ &st->orientation);
+ if (ret)
+ return ret;
+
i2c_set_clientdata(client, indio_dev);
st->i2c = client;
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 77fac81a3adc..0a406163d775 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -29,7 +29,8 @@
#include "mpu3050.h"
-#define MPU3050_CHIP_ID 0x69
+#define MPU3050_CHIP_ID 0x68
+#define MPU3050_CHIP_ID_MASK 0x7E
/*
* Register map: anything suffixed *_H is a big-endian high byte and always
@@ -864,7 +865,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050)
dev_err(mpu3050->dev, "error setting power mode\n");
return ret;
}
- msleep(10);
+ usleep_range(10000, 20000);
return 0;
}
@@ -1149,8 +1150,7 @@ int mpu3050_common_probe(struct device *dev,
mpu3050->divisor = 99;
/* Read the mounting matrix, if present */
- ret = of_iio_read_mount_matrix(dev, "mount-matrix",
- &mpu3050->orientation);
+ ret = iio_read_mount_matrix(dev, "mount-matrix", &mpu3050->orientation);
if (ret)
return ret;
@@ -1176,8 +1176,9 @@ int mpu3050_common_probe(struct device *dev,
goto err_power_down;
}
- if (val != MPU3050_CHIP_ID) {
- dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+ if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
+ dev_err(dev, "unsupported chip id %02x\n",
+ (u8)(val & MPU3050_CHIP_ID_MASK));
ret = -ENODEV;
goto err_power_down;
}
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 1a0d458e4f4e..f1a8ec9d637b 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -4,16 +4,16 @@
menu "Humidity sensors"
config AM2315
- tristate "Aosong AM2315 relative humidity and temperature sensor"
- depends on I2C
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- If you say yes here you get support for the Aosong AM2315
- relative humidity and ambient temperature sensor.
+ tristate "Aosong AM2315 relative humidity and temperature sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for the Aosong AM2315
+ relative humidity and ambient temperature sensor.
- This driver can also be built as a module. If so, the module will
- be called am2315.
+ This driver can also be built as a module. If so, the module will
+ be called am2315.
config DHT11
tristate "DHT11 (and compatible sensors) driver"
@@ -78,7 +78,7 @@ config HTS221_SPI
config HTU21
tristate "Measurement Specialties HTU21 humidity & temperature sensor"
depends on I2C
- select IIO_MS_SENSORS_I2C
+ select IIO_MS_SENSORS_I2C
help
If you say yes here you get support for the Measurement Specialties
HTU21 humidity and temperature sensor.
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 68629c68b19b..9e452fce1aaf 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -4,8 +4,6 @@
#
# When adding new entries keep the list in alphabetical order
-adis16400-y := adis16400_core.o
-adis16400-$(CONFIG_IIO_BUFFER) += adis16400_buffer.o
obj-$(CONFIG_ADIS16400) += adis16400.o
obj-$(CONFIG_ADIS16480) += adis16480.o
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400.c
index 46a569005a13..beb6919e7180 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400.c
@@ -31,8 +31,183 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/imu/adis.h>
+
+#define ADIS16400_STARTUP_DELAY 290 /* ms */
+#define ADIS16400_MTEST_DELAY 90 /* ms */
+
+#define ADIS16400_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16400_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16400_XGYRO_OUT 0x04 /* X-axis gyroscope output */
+#define ADIS16400_YGYRO_OUT 0x06 /* Y-axis gyroscope output */
+#define ADIS16400_ZGYRO_OUT 0x08 /* Z-axis gyroscope output */
+#define ADIS16400_XACCL_OUT 0x0A /* X-axis accelerometer output */
+#define ADIS16400_YACCL_OUT 0x0C /* Y-axis accelerometer output */
+#define ADIS16400_ZACCL_OUT 0x0E /* Z-axis accelerometer output */
+#define ADIS16400_XMAGN_OUT 0x10 /* X-axis magnetometer measurement */
+#define ADIS16400_YMAGN_OUT 0x12 /* Y-axis magnetometer measurement */
+#define ADIS16400_ZMAGN_OUT 0x14 /* Z-axis magnetometer measurement */
+#define ADIS16400_TEMP_OUT 0x16 /* Temperature output */
+#define ADIS16400_AUX_ADC 0x18 /* Auxiliary ADC measurement */
+
+#define ADIS16350_XTEMP_OUT 0x10 /* X-axis gyroscope temperature measurement */
+#define ADIS16350_YTEMP_OUT 0x12 /* Y-axis gyroscope temperature measurement */
+#define ADIS16350_ZTEMP_OUT 0x14 /* Z-axis gyroscope temperature measurement */
+
+#define ADIS16300_PITCH_OUT 0x12 /* X axis inclinometer output measurement */
+#define ADIS16300_ROLL_OUT 0x14 /* Y axis inclinometer output measurement */
+#define ADIS16300_AUX_ADC 0x16 /* Auxiliary ADC measurement */
+
+#define ADIS16448_BARO_OUT 0x16 /* Barometric pressure output */
+#define ADIS16448_TEMP_OUT 0x18 /* Temperature output */
+
+/* Calibration parameters */
+#define ADIS16400_XGYRO_OFF 0x1A /* X-axis gyroscope bias offset factor */
+#define ADIS16400_YGYRO_OFF 0x1C /* Y-axis gyroscope bias offset factor */
+#define ADIS16400_ZGYRO_OFF 0x1E /* Z-axis gyroscope bias offset factor */
+#define ADIS16400_XACCL_OFF 0x20 /* X-axis acceleration bias offset factor */
+#define ADIS16400_YACCL_OFF 0x22 /* Y-axis acceleration bias offset factor */
+#define ADIS16400_ZACCL_OFF 0x24 /* Z-axis acceleration bias offset factor */
+#define ADIS16400_XMAGN_HIF 0x26 /* X-axis magnetometer, hard-iron factor */
+#define ADIS16400_YMAGN_HIF 0x28 /* Y-axis magnetometer, hard-iron factor */
+#define ADIS16400_ZMAGN_HIF 0x2A /* Z-axis magnetometer, hard-iron factor */
+#define ADIS16400_XMAGN_SIF 0x2C /* X-axis magnetometer, soft-iron factor */
+#define ADIS16400_YMAGN_SIF 0x2E /* Y-axis magnetometer, soft-iron factor */
+#define ADIS16400_ZMAGN_SIF 0x30 /* Z-axis magnetometer, soft-iron factor */
+
+#define ADIS16400_GPIO_CTRL 0x32 /* Auxiliary digital input/output control */
+#define ADIS16400_MSC_CTRL 0x34 /* Miscellaneous control */
+#define ADIS16400_SMPL_PRD 0x36 /* Internal sample period (rate) control */
+#define ADIS16400_SENS_AVG 0x38 /* Dynamic range and digital filter control */
+#define ADIS16400_SLP_CNT 0x3A /* Sleep mode control */
+#define ADIS16400_DIAG_STAT 0x3C /* System status */
+
+/* Alarm functions */
+#define ADIS16400_GLOB_CMD 0x3E /* System command */
+#define ADIS16400_ALM_MAG1 0x40 /* Alarm 1 amplitude threshold */
+#define ADIS16400_ALM_MAG2 0x42 /* Alarm 2 amplitude threshold */
+#define ADIS16400_ALM_SMPL1 0x44 /* Alarm 1 sample size */
+#define ADIS16400_ALM_SMPL2 0x46 /* Alarm 2 sample size */
+#define ADIS16400_ALM_CTRL 0x48 /* Alarm control */
+#define ADIS16400_AUX_DAC 0x4A /* Auxiliary DAC data */
+
+#define ADIS16334_LOT_ID1 0x52 /* Lot identification code 1 */
+#define ADIS16334_LOT_ID2 0x54 /* Lot identification code 2 */
+#define ADIS16400_PRODUCT_ID 0x56 /* Product identifier */
+#define ADIS16334_SERIAL_NUMBER 0x58 /* Serial number, lot specific */
+
+#define ADIS16400_ERROR_ACTIVE (1<<14)
+#define ADIS16400_NEW_DATA (1<<14)
+
+/* MSC_CTRL */
+#define ADIS16400_MSC_CTRL_MEM_TEST (1<<11)
+#define ADIS16400_MSC_CTRL_INT_SELF_TEST (1<<10)
+#define ADIS16400_MSC_CTRL_NEG_SELF_TEST (1<<9)
+#define ADIS16400_MSC_CTRL_POS_SELF_TEST (1<<8)
+#define ADIS16400_MSC_CTRL_GYRO_BIAS (1<<7)
+#define ADIS16400_MSC_CTRL_ACCL_ALIGN (1<<6)
+#define ADIS16400_MSC_CTRL_DATA_RDY_EN (1<<2)
+#define ADIS16400_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
+#define ADIS16400_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
+
+/* SMPL_PRD */
+#define ADIS16400_SMPL_PRD_TIME_BASE (1<<7)
+#define ADIS16400_SMPL_PRD_DIV_MASK 0x7F
+
+/* DIAG_STAT */
+#define ADIS16400_DIAG_STAT_ZACCL_FAIL 15
+#define ADIS16400_DIAG_STAT_YACCL_FAIL 14
+#define ADIS16400_DIAG_STAT_XACCL_FAIL 13
+#define ADIS16400_DIAG_STAT_XGYRO_FAIL 12
+#define ADIS16400_DIAG_STAT_YGYRO_FAIL 11
+#define ADIS16400_DIAG_STAT_ZGYRO_FAIL 10
+#define ADIS16400_DIAG_STAT_ALARM2 9
+#define ADIS16400_DIAG_STAT_ALARM1 8
+#define ADIS16400_DIAG_STAT_FLASH_CHK 6
+#define ADIS16400_DIAG_STAT_SELF_TEST 5
+#define ADIS16400_DIAG_STAT_OVERFLOW 4
+#define ADIS16400_DIAG_STAT_SPI_FAIL 3
+#define ADIS16400_DIAG_STAT_FLASH_UPT 2
+#define ADIS16400_DIAG_STAT_POWER_HIGH 1
+#define ADIS16400_DIAG_STAT_POWER_LOW 0
+
+/* GLOB_CMD */
+#define ADIS16400_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16400_GLOB_CMD_P_AUTO_NULL (1<<4)
+#define ADIS16400_GLOB_CMD_FLASH_UPD (1<<3)
+#define ADIS16400_GLOB_CMD_DAC_LATCH (1<<2)
+#define ADIS16400_GLOB_CMD_FAC_CALIB (1<<1)
+#define ADIS16400_GLOB_CMD_AUTO_NULL (1<<0)
+
+/* SLP_CNT */
+#define ADIS16400_SLP_CNT_POWER_OFF (1<<8)
+
+#define ADIS16334_RATE_DIV_SHIFT 8
+#define ADIS16334_RATE_INT_CLK BIT(0)
+
+#define ADIS16400_SPI_SLOW (u32)(300 * 1000)
+#define ADIS16400_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16400_SPI_FAST (u32)(2000 * 1000)
+
+#define ADIS16400_HAS_PROD_ID BIT(0)
+#define ADIS16400_NO_BURST BIT(1)
+#define ADIS16400_HAS_SLOW_MODE BIT(2)
+#define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
+#define ADIS16400_BURST_DIAG_STAT BIT(4)
+
+struct adis16400_state;
+
+struct adis16400_chip_info {
+ const struct iio_chan_spec *channels;
+ const int num_channels;
+ const long flags;
+ unsigned int gyro_scale_micro;
+ unsigned int accel_scale_micro;
+ int temp_scale_nano;
+ int temp_offset;
+ int (*set_freq)(struct adis16400_state *st, unsigned int freq);
+ int (*get_freq)(struct adis16400_state *st);
+};
+
+/**
+ * struct adis16400_state - device instance specific data
+ * @variant: chip variant info
+ * @filt_int: integer part of requested filter frequency
+ * @adis: adis device
+ **/
+struct adis16400_state {
+ struct adis16400_chip_info *variant;
+ int filt_int;
+
+ struct adis adis;
+ unsigned long avail_scan_mask[2];
+};
-#include "adis16400.h"
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum {
+ ADIS16400_SCAN_SUPPLY,
+ ADIS16400_SCAN_GYRO_X,
+ ADIS16400_SCAN_GYRO_Y,
+ ADIS16400_SCAN_GYRO_Z,
+ ADIS16400_SCAN_ACC_X,
+ ADIS16400_SCAN_ACC_Y,
+ ADIS16400_SCAN_ACC_Z,
+ ADIS16400_SCAN_MAGN_X,
+ ADIS16400_SCAN_MAGN_Y,
+ ADIS16400_SCAN_MAGN_Z,
+ ADIS16400_SCAN_BARO,
+ ADIS16350_SCAN_TEMP_X,
+ ADIS16350_SCAN_TEMP_Y,
+ ADIS16350_SCAN_TEMP_Z,
+ ADIS16300_SCAN_INCLI_X,
+ ADIS16300_SCAN_INCLI_Y,
+ ADIS16400_SCAN_ADC,
+ ADIS16400_SCAN_TIMESTAMP,
+};
#ifdef CONFIG_DEBUG_FS
@@ -145,6 +320,11 @@ enum adis16400_chip_variant {
ADIS16448,
};
+static struct adis_burst adis16400_burst = {
+ .en = true,
+ .reg_cmd = ADIS16400_GLOB_CMD,
+};
+
static int adis16334_get_freq(struct adis16400_state *st)
{
int ret;
@@ -465,6 +645,51 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
}
}
+#if IS_ENABLED(CONFIG_IIO_BUFFER)
+static irqreturn_t adis16400_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16400_state *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ u32 old_speed_hz = st->adis.spi->max_speed_hz;
+ void *buffer;
+ int ret;
+
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST) &&
+ st->adis.spi->max_speed_hz > ADIS16400_SPI_BURST) {
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_BURST;
+ spi_setup(st->adis.spi);
+ }
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST)) {
+ st->adis.spi->max_speed_hz = old_speed_hz;
+ spi_setup(st->adis.spi);
+ }
+
+ if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
+ buffer = adis->buffer + sizeof(u16);
+ else
+ buffer = adis->buffer;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+#else
+#define adis16400_trigger_handler NULL
+#endif /* IS_ENABLED(CONFIG_IIO_BUFFER) */
+
#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -835,7 +1060,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
static const struct iio_info adis16400_info = {
.read_raw = &adis16400_read_raw,
.write_raw = &adis16400_write_raw,
- .update_scan_mode = adis16400_update_scan_mode,
+ .update_scan_mode = adis_update_scan_mode,
.debugfs_reg_access = adis_debugfs_reg_access,
};
@@ -926,6 +1151,9 @@ static int adis16400_probe(struct spi_device *spi)
if (!(st->variant->flags & ADIS16400_NO_BURST)) {
adis16400_setup_chan_mask(st);
indio_dev->available_scan_masks = st->avail_scan_mask;
+ st->adis.burst = &adis16400_burst;
+ if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
+ st->adis.burst->extra_len = sizeof(u16);
}
ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
deleted file mode 100644
index 73b189c1c0fb..000000000000
--- a/drivers/iio/imu/adis16400.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * adis16400.h support Analog Devices ADIS16400
- * 3d 18g accelerometers,
- * 3d gyroscopes,
- * 3d 2.5gauss magnetometers via SPI
- *
- * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * Loosely based upon lis3l02dq.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SPI_ADIS16400_H_
-#define SPI_ADIS16400_H_
-
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16400_STARTUP_DELAY 290 /* ms */
-#define ADIS16400_MTEST_DELAY 90 /* ms */
-
-#define ADIS16400_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16400_SUPPLY_OUT 0x02 /* Power supply measurement */
-#define ADIS16400_XGYRO_OUT 0x04 /* X-axis gyroscope output */
-#define ADIS16400_YGYRO_OUT 0x06 /* Y-axis gyroscope output */
-#define ADIS16400_ZGYRO_OUT 0x08 /* Z-axis gyroscope output */
-#define ADIS16400_XACCL_OUT 0x0A /* X-axis accelerometer output */
-#define ADIS16400_YACCL_OUT 0x0C /* Y-axis accelerometer output */
-#define ADIS16400_ZACCL_OUT 0x0E /* Z-axis accelerometer output */
-#define ADIS16400_XMAGN_OUT 0x10 /* X-axis magnetometer measurement */
-#define ADIS16400_YMAGN_OUT 0x12 /* Y-axis magnetometer measurement */
-#define ADIS16400_ZMAGN_OUT 0x14 /* Z-axis magnetometer measurement */
-#define ADIS16400_TEMP_OUT 0x16 /* Temperature output */
-#define ADIS16400_AUX_ADC 0x18 /* Auxiliary ADC measurement */
-
-#define ADIS16350_XTEMP_OUT 0x10 /* X-axis gyroscope temperature measurement */
-#define ADIS16350_YTEMP_OUT 0x12 /* Y-axis gyroscope temperature measurement */
-#define ADIS16350_ZTEMP_OUT 0x14 /* Z-axis gyroscope temperature measurement */
-
-#define ADIS16300_PITCH_OUT 0x12 /* X axis inclinometer output measurement */
-#define ADIS16300_ROLL_OUT 0x14 /* Y axis inclinometer output measurement */
-#define ADIS16300_AUX_ADC 0x16 /* Auxiliary ADC measurement */
-
-#define ADIS16448_BARO_OUT 0x16 /* Barometric pressure output */
-#define ADIS16448_TEMP_OUT 0x18 /* Temperature output */
-
-/* Calibration parameters */
-#define ADIS16400_XGYRO_OFF 0x1A /* X-axis gyroscope bias offset factor */
-#define ADIS16400_YGYRO_OFF 0x1C /* Y-axis gyroscope bias offset factor */
-#define ADIS16400_ZGYRO_OFF 0x1E /* Z-axis gyroscope bias offset factor */
-#define ADIS16400_XACCL_OFF 0x20 /* X-axis acceleration bias offset factor */
-#define ADIS16400_YACCL_OFF 0x22 /* Y-axis acceleration bias offset factor */
-#define ADIS16400_ZACCL_OFF 0x24 /* Z-axis acceleration bias offset factor */
-#define ADIS16400_XMAGN_HIF 0x26 /* X-axis magnetometer, hard-iron factor */
-#define ADIS16400_YMAGN_HIF 0x28 /* Y-axis magnetometer, hard-iron factor */
-#define ADIS16400_ZMAGN_HIF 0x2A /* Z-axis magnetometer, hard-iron factor */
-#define ADIS16400_XMAGN_SIF 0x2C /* X-axis magnetometer, soft-iron factor */
-#define ADIS16400_YMAGN_SIF 0x2E /* Y-axis magnetometer, soft-iron factor */
-#define ADIS16400_ZMAGN_SIF 0x30 /* Z-axis magnetometer, soft-iron factor */
-
-#define ADIS16400_GPIO_CTRL 0x32 /* Auxiliary digital input/output control */
-#define ADIS16400_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16400_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16400_SENS_AVG 0x38 /* Dynamic range and digital filter control */
-#define ADIS16400_SLP_CNT 0x3A /* Sleep mode control */
-#define ADIS16400_DIAG_STAT 0x3C /* System status */
-
-/* Alarm functions */
-#define ADIS16400_GLOB_CMD 0x3E /* System command */
-#define ADIS16400_ALM_MAG1 0x40 /* Alarm 1 amplitude threshold */
-#define ADIS16400_ALM_MAG2 0x42 /* Alarm 2 amplitude threshold */
-#define ADIS16400_ALM_SMPL1 0x44 /* Alarm 1 sample size */
-#define ADIS16400_ALM_SMPL2 0x46 /* Alarm 2 sample size */
-#define ADIS16400_ALM_CTRL 0x48 /* Alarm control */
-#define ADIS16400_AUX_DAC 0x4A /* Auxiliary DAC data */
-
-#define ADIS16334_LOT_ID1 0x52 /* Lot identification code 1 */
-#define ADIS16334_LOT_ID2 0x54 /* Lot identification code 2 */
-#define ADIS16400_PRODUCT_ID 0x56 /* Product identifier */
-#define ADIS16334_SERIAL_NUMBER 0x58 /* Serial number, lot specific */
-
-#define ADIS16400_ERROR_ACTIVE (1<<14)
-#define ADIS16400_NEW_DATA (1<<14)
-
-/* MSC_CTRL */
-#define ADIS16400_MSC_CTRL_MEM_TEST (1<<11)
-#define ADIS16400_MSC_CTRL_INT_SELF_TEST (1<<10)
-#define ADIS16400_MSC_CTRL_NEG_SELF_TEST (1<<9)
-#define ADIS16400_MSC_CTRL_POS_SELF_TEST (1<<8)
-#define ADIS16400_MSC_CTRL_GYRO_BIAS (1<<7)
-#define ADIS16400_MSC_CTRL_ACCL_ALIGN (1<<6)
-#define ADIS16400_MSC_CTRL_DATA_RDY_EN (1<<2)
-#define ADIS16400_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
-#define ADIS16400_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
-
-/* SMPL_PRD */
-#define ADIS16400_SMPL_PRD_TIME_BASE (1<<7)
-#define ADIS16400_SMPL_PRD_DIV_MASK 0x7F
-
-/* DIAG_STAT */
-#define ADIS16400_DIAG_STAT_ZACCL_FAIL 15
-#define ADIS16400_DIAG_STAT_YACCL_FAIL 14
-#define ADIS16400_DIAG_STAT_XACCL_FAIL 13
-#define ADIS16400_DIAG_STAT_XGYRO_FAIL 12
-#define ADIS16400_DIAG_STAT_YGYRO_FAIL 11
-#define ADIS16400_DIAG_STAT_ZGYRO_FAIL 10
-#define ADIS16400_DIAG_STAT_ALARM2 9
-#define ADIS16400_DIAG_STAT_ALARM1 8
-#define ADIS16400_DIAG_STAT_FLASH_CHK 6
-#define ADIS16400_DIAG_STAT_SELF_TEST 5
-#define ADIS16400_DIAG_STAT_OVERFLOW 4
-#define ADIS16400_DIAG_STAT_SPI_FAIL 3
-#define ADIS16400_DIAG_STAT_FLASH_UPT 2
-#define ADIS16400_DIAG_STAT_POWER_HIGH 1
-#define ADIS16400_DIAG_STAT_POWER_LOW 0
-
-/* GLOB_CMD */
-#define ADIS16400_GLOB_CMD_SW_RESET (1<<7)
-#define ADIS16400_GLOB_CMD_P_AUTO_NULL (1<<4)
-#define ADIS16400_GLOB_CMD_FLASH_UPD (1<<3)
-#define ADIS16400_GLOB_CMD_DAC_LATCH (1<<2)
-#define ADIS16400_GLOB_CMD_FAC_CALIB (1<<1)
-#define ADIS16400_GLOB_CMD_AUTO_NULL (1<<0)
-
-/* SLP_CNT */
-#define ADIS16400_SLP_CNT_POWER_OFF (1<<8)
-
-#define ADIS16334_RATE_DIV_SHIFT 8
-#define ADIS16334_RATE_INT_CLK BIT(0)
-
-#define ADIS16400_SPI_SLOW (u32)(300 * 1000)
-#define ADIS16400_SPI_BURST (u32)(1000 * 1000)
-#define ADIS16400_SPI_FAST (u32)(2000 * 1000)
-
-#define ADIS16400_HAS_PROD_ID BIT(0)
-#define ADIS16400_NO_BURST BIT(1)
-#define ADIS16400_HAS_SLOW_MODE BIT(2)
-#define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
-#define ADIS16400_BURST_DIAG_STAT BIT(4)
-
-struct adis16400_state;
-
-struct adis16400_chip_info {
- const struct iio_chan_spec *channels;
- const int num_channels;
- const long flags;
- unsigned int gyro_scale_micro;
- unsigned int accel_scale_micro;
- int temp_scale_nano;
- int temp_offset;
- int (*set_freq)(struct adis16400_state *st, unsigned int freq);
- int (*get_freq)(struct adis16400_state *st);
-};
-
-/**
- * struct adis16400_state - device instance specific data
- * @variant: chip variant info
- * @filt_int: integer part of requested filter frequency
- * @adis: adis device
- **/
-struct adis16400_state {
- struct adis16400_chip_info *variant;
- int filt_int;
-
- struct adis adis;
- unsigned long avail_scan_mask[2];
-};
-
-/* At the moment triggers are only used for ring buffer
- * filling. This may change!
- */
-
-enum {
- ADIS16400_SCAN_SUPPLY,
- ADIS16400_SCAN_GYRO_X,
- ADIS16400_SCAN_GYRO_Y,
- ADIS16400_SCAN_GYRO_Z,
- ADIS16400_SCAN_ACC_X,
- ADIS16400_SCAN_ACC_Y,
- ADIS16400_SCAN_ACC_Z,
- ADIS16400_SCAN_MAGN_X,
- ADIS16400_SCAN_MAGN_Y,
- ADIS16400_SCAN_MAGN_Z,
- ADIS16400_SCAN_BARO,
- ADIS16350_SCAN_TEMP_X,
- ADIS16350_SCAN_TEMP_Y,
- ADIS16350_SCAN_TEMP_Z,
- ADIS16300_SCAN_INCLI_X,
- ADIS16300_SCAN_INCLI_Y,
- ADIS16400_SCAN_ADC,
- ADIS16400_SCAN_TIMESTAMP,
-};
-
-#ifdef CONFIG_IIO_BUFFER
-
-ssize_t adis16400_read_data_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-
-
-int adis16400_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask);
-irqreturn_t adis16400_trigger_handler(int irq, void *p);
-
-#else /* CONFIG_IIO_BUFFER */
-
-#define adis16400_update_scan_mode NULL
-#define adis16400_trigger_handler NULL
-
-#endif /* CONFIG_IIO_BUFFER */
-
-#endif /* SPI_ADIS16400_H_ */
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
deleted file mode 100644
index e70a5339acb1..000000000000
--- a/drivers/iio/imu/adis16400_buffer.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/export.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/trigger_consumer.h>
-
-#include "adis16400.h"
-
-int adis16400_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask)
-{
- struct adis16400_state *st = iio_priv(indio_dev);
- struct adis *adis = &st->adis;
- unsigned int burst_length;
- u8 *tx;
-
- if (st->variant->flags & ADIS16400_NO_BURST)
- return adis_update_scan_mode(indio_dev, scan_mask);
-
- kfree(adis->xfer);
- kfree(adis->buffer);
-
- /* All but the timestamp channel */
- burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
- if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
- burst_length += sizeof(u16);
-
- adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
- if (!adis->xfer)
- return -ENOMEM;
-
- adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
- if (!adis->buffer)
- return -ENOMEM;
-
- tx = adis->buffer + burst_length;
- tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
- tx[1] = 0;
-
- adis->xfer[0].tx_buf = tx;
- adis->xfer[0].bits_per_word = 8;
- adis->xfer[0].len = 2;
- adis->xfer[1].rx_buf = adis->buffer;
- adis->xfer[1].bits_per_word = 8;
- adis->xfer[1].len = burst_length;
-
- spi_message_init(&adis->msg);
- spi_message_add_tail(&adis->xfer[0], &adis->msg);
- spi_message_add_tail(&adis->xfer[1], &adis->msg);
-
- return 0;
-}
-
-irqreturn_t adis16400_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct adis16400_state *st = iio_priv(indio_dev);
- struct adis *adis = &st->adis;
- u32 old_speed_hz = st->adis.spi->max_speed_hz;
- void *buffer;
- int ret;
-
- if (!adis->buffer)
- return -ENOMEM;
-
- if (!(st->variant->flags & ADIS16400_NO_BURST) &&
- st->adis.spi->max_speed_hz > ADIS16400_SPI_BURST) {
- st->adis.spi->max_speed_hz = ADIS16400_SPI_BURST;
- spi_setup(st->adis.spi);
- }
-
- ret = spi_sync(adis->spi, &adis->msg);
- if (ret)
- dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
-
- if (!(st->variant->flags & ADIS16400_NO_BURST)) {
- st->adis.spi->max_speed_hz = old_speed_hz;
- spi_setup(st->adis.spi);
- }
-
- if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
- buffer = adis->buffer + sizeof(u16);
- else
- buffer = adis->buffer;
-
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- pf->timestamp);
-
- iio_trigger_notify_done(indio_dev->trig);
-
- return IRQ_HANDLED;
-}
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index a27fe208f3ae..ab137c1bbe7b 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -9,6 +9,9 @@
*
*/
+#include <linux/clk.h>
+#include <linux/bitfield.h>
+#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -97,6 +100,12 @@
#define ADIS16480_REG_FIRM_DM ADIS16480_REG(0x03, 0x7A)
#define ADIS16480_REG_FIRM_Y ADIS16480_REG(0x03, 0x7C)
+/*
+ * External clock scaling in PPS mode.
+ * Available only for ADIS1649x devices
+ */
+#define ADIS16495_REG_SYNC_SCALE ADIS16480_REG(0x03, 0x10)
+
#define ADIS16480_REG_SERIAL_NUM ADIS16480_REG(0x04, 0x20)
/* Each filter coefficent bank spans two pages */
@@ -107,6 +116,20 @@
#define ADIS16480_FIR_COEF_C(x) ADIS16480_FIR_COEF(0x09, (x))
#define ADIS16480_FIR_COEF_D(x) ADIS16480_FIR_COEF(0x0B, (x))
+/* ADIS16480_REG_FNCTIO_CTRL */
+#define ADIS16480_DRDY_SEL_MSK GENMASK(1, 0)
+#define ADIS16480_DRDY_SEL(x) FIELD_PREP(ADIS16480_DRDY_SEL_MSK, x)
+#define ADIS16480_DRDY_POL_MSK BIT(2)
+#define ADIS16480_DRDY_POL(x) FIELD_PREP(ADIS16480_DRDY_POL_MSK, x)
+#define ADIS16480_DRDY_EN_MSK BIT(3)
+#define ADIS16480_DRDY_EN(x) FIELD_PREP(ADIS16480_DRDY_EN_MSK, x)
+#define ADIS16480_SYNC_SEL_MSK GENMASK(5, 4)
+#define ADIS16480_SYNC_SEL(x) FIELD_PREP(ADIS16480_SYNC_SEL_MSK, x)
+#define ADIS16480_SYNC_EN_MSK BIT(7)
+#define ADIS16480_SYNC_EN(x) FIELD_PREP(ADIS16480_SYNC_EN_MSK, x)
+#define ADIS16480_SYNC_MODE_MSK BIT(8)
+#define ADIS16480_SYNC_MODE(x) FIELD_PREP(ADIS16480_SYNC_MODE_MSK, x)
+
struct adis16480_chip_info {
unsigned int num_channels;
const struct iio_chan_spec *channels;
@@ -114,12 +137,40 @@ struct adis16480_chip_info {
unsigned int gyro_max_scale;
unsigned int accel_max_val;
unsigned int accel_max_scale;
+ unsigned int temp_scale;
+ unsigned int int_clk;
+ unsigned int max_dec_rate;
+ const unsigned int *filter_freqs;
+ bool has_pps_clk_mode;
+};
+
+enum adis16480_int_pin {
+ ADIS16480_PIN_DIO1,
+ ADIS16480_PIN_DIO2,
+ ADIS16480_PIN_DIO3,
+ ADIS16480_PIN_DIO4
+};
+
+enum adis16480_clock_mode {
+ ADIS16480_CLK_SYNC,
+ ADIS16480_CLK_PPS,
+ ADIS16480_CLK_INT
};
struct adis16480 {
const struct adis16480_chip_info *chip_info;
struct adis adis;
+ struct clk *ext_clk;
+ enum adis16480_clock_mode clk_mode;
+ unsigned int clk_freq;
+};
+
+static const char * const adis16480_int_pin_names[4] = {
+ [ADIS16480_PIN_DIO1] = "DIO1",
+ [ADIS16480_PIN_DIO2] = "DIO2",
+ [ADIS16480_PIN_DIO3] = "DIO3",
+ [ADIS16480_PIN_DIO4] = "DIO4",
};
#ifdef CONFIG_DEBUG_FS
@@ -268,20 +319,34 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev)
static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
{
struct adis16480 *st = iio_priv(indio_dev);
- unsigned int t;
+ unsigned int t, reg;
t = val * 1000 + val2 / 1000;
if (t <= 0)
return -EINVAL;
- t = 2460000 / t;
- if (t > 2048)
- t = 2048;
+ /*
+ * When using PPS mode, the rate of data collection is equal to the
+ * product of the external clock frequency and the scale factor in the
+ * SYNC_SCALE register.
+ * When using sync mode, or internal clock, the output data rate is
+ * equal with the clock frequency divided by DEC_RATE + 1.
+ */
+ if (st->clk_mode == ADIS16480_CLK_PPS) {
+ t = t / st->clk_freq;
+ reg = ADIS16495_REG_SYNC_SCALE;
+ } else {
+ t = st->clk_freq / t;
+ reg = ADIS16480_REG_DEC_RATE;
+ }
+
+ if (t > st->chip_info->max_dec_rate)
+ t = st->chip_info->max_dec_rate;
- if (t != 0)
+ if ((t != 0) && (st->clk_mode != ADIS16480_CLK_PPS))
t--;
- return adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
+ return adis_write_reg_16(&st->adis, reg, t);
}
static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
@@ -290,12 +355,29 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
uint16_t t;
int ret;
unsigned freq;
+ unsigned int reg;
- ret = adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
+ if (st->clk_mode == ADIS16480_CLK_PPS)
+ reg = ADIS16495_REG_SYNC_SCALE;
+ else
+ reg = ADIS16480_REG_DEC_RATE;
+
+ ret = adis_read_reg_16(&st->adis, reg, &t);
if (ret < 0)
return ret;
- freq = 2460000 / (t + 1);
+ /*
+ * When using PPS mode, the rate of data collection is equal to the
+ * product of the external clock frequency and the scale factor in the
+ * SYNC_SCALE register.
+ * When using sync mode, or internal clock, the output data rate is
+ * equal with the clock frequency divided by DEC_RATE + 1.
+ */
+ if (st->clk_mode == ADIS16480_CLK_PPS)
+ freq = st->clk_freq * t;
+ else
+ freq = st->clk_freq / (t + 1);
+
*val = freq / 1000;
*val2 = (freq % 1000) * 1000;
@@ -425,6 +507,13 @@ static const unsigned int adis16480_def_filter_freqs[] = {
63,
};
+static const unsigned int adis16495_def_filter_freqs[] = {
+ 300,
+ 100,
+ 300,
+ 100,
+};
+
static const unsigned int ad16480_filter_data[][2] = {
[ADIS16480_SCAN_GYRO_X] = { ADIS16480_REG_FILTER_BNK0, 0 },
[ADIS16480_SCAN_GYRO_Y] = { ADIS16480_REG_FILTER_BNK0, 3 },
@@ -456,7 +545,7 @@ static int adis16480_get_filter_freq(struct iio_dev *indio_dev,
if (!(val & enable_mask))
*freq = 0;
else
- *freq = adis16480_def_filter_freqs[(val >> offset) & 0x3];
+ *freq = st->chip_info->filter_freqs[(val >> offset) & 0x3];
return IIO_VAL_INT;
}
@@ -483,10 +572,10 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
val &= ~enable_mask;
} else {
best_freq = 0;
- best_diff = 310;
+ best_diff = st->chip_info->filter_freqs[0];
for (i = 0; i < ARRAY_SIZE(adis16480_def_filter_freqs); i++) {
- if (adis16480_def_filter_freqs[i] >= freq) {
- diff = adis16480_def_filter_freqs[i] - freq;
+ if (st->chip_info->filter_freqs[i] >= freq) {
+ diff = st->chip_info->filter_freqs[i] - freq;
if (diff < best_diff) {
best_diff = diff;
best_freq = i;
@@ -506,6 +595,7 @@ static int adis16480_read_raw(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val, int *val2, long info)
{
struct adis16480 *st = iio_priv(indio_dev);
+ unsigned int temp;
switch (info) {
case IIO_CHAN_INFO_RAW:
@@ -525,8 +615,13 @@ static int adis16480_read_raw(struct iio_dev *indio_dev,
*val2 = 100; /* 0.0001 gauss */
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
- *val = 5;
- *val2 = 650000; /* 5.65 milli degree Celsius */
+ /*
+ * +85 degrees Celsius = temp_max_scale
+ * +25 degrees Celsius = 0
+ * LSB, 25 degrees Celsius = 60 / temp_max_scale
+ */
+ *val = st->chip_info->temp_scale / 1000;
+ *val2 = (st->chip_info->temp_scale % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_PRESSURE:
*val = 0;
@@ -537,7 +632,8 @@ static int adis16480_read_raw(struct iio_dev *indio_dev,
}
case IIO_CHAN_INFO_OFFSET:
/* Only the temperature channel has a offset */
- *val = 4425; /* 25 degree Celsius = 0x0000 */
+ temp = 25 * 1000000LL; /* 25 degree Celsius = 0x0000 */
+ *val = DIV_ROUND_CLOSEST_ULL(temp, st->chip_info->temp_scale);
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
return adis16480_get_calibbias(indio_dev, chan, val);
@@ -678,6 +774,12 @@ enum adis16480_variant {
ADIS16480,
ADIS16485,
ADIS16488,
+ ADIS16495_1,
+ ADIS16495_2,
+ ADIS16495_3,
+ ADIS16497_1,
+ ADIS16497_2,
+ ADIS16497_3,
};
static const struct adis16480_chip_info adis16480_chip_info[] = {
@@ -693,6 +795,10 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.gyro_max_scale = 300,
.accel_max_val = IIO_M_S_2_TO_G(21973),
.accel_max_scale = 18,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .filter_freqs = adis16480_def_filter_freqs,
},
[ADIS16480] = {
.channels = adis16480_channels,
@@ -701,6 +807,10 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.gyro_max_scale = 450,
.accel_max_val = IIO_M_S_2_TO_G(12500),
.accel_max_scale = 10,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .filter_freqs = adis16480_def_filter_freqs,
},
[ADIS16485] = {
.channels = adis16485_channels,
@@ -709,6 +819,10 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.gyro_max_scale = 450,
.accel_max_val = IIO_M_S_2_TO_G(20000),
.accel_max_scale = 5,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .filter_freqs = adis16480_def_filter_freqs,
},
[ADIS16488] = {
.channels = adis16480_channels,
@@ -717,6 +831,88 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.gyro_max_scale = 450,
.accel_max_val = IIO_M_S_2_TO_G(22500),
.accel_max_scale = 18,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .filter_freqs = adis16480_def_filter_freqs,
+ },
+ [ADIS16495_1] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
+ .gyro_max_scale = 125,
+ .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .accel_max_scale = 8,
+ .temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ },
+ [ADIS16495_2] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(18000),
+ .gyro_max_scale = 450,
+ .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .accel_max_scale = 8,
+ .temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ },
+ [ADIS16495_3] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
+ .gyro_max_scale = 2000,
+ .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .accel_max_scale = 8,
+ .temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ },
+ [ADIS16497_1] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
+ .gyro_max_scale = 125,
+ .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .accel_max_scale = 40,
+ .temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ },
+ [ADIS16497_2] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(18000),
+ .gyro_max_scale = 450,
+ .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .accel_max_scale = 40,
+ .temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ },
+ [ADIS16497_3] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
+ .gyro_max_scale = 2000,
+ .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .accel_max_scale = 40,
+ .temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
},
};
@@ -741,8 +937,17 @@ static int adis16480_stop_device(struct iio_dev *indio_dev)
static int adis16480_enable_irq(struct adis *adis, bool enable)
{
- return adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL,
- enable ? BIT(3) : 0);
+ uint16_t val;
+ int ret;
+
+ ret = adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= ~ADIS16480_DRDY_EN_MSK;
+ val |= ADIS16480_DRDY_EN(enable);
+
+ return adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
}
static int adis16480_initial_setup(struct iio_dev *indio_dev)
@@ -826,6 +1031,156 @@ static const struct adis_data adis16480_data = {
.enable_irq = adis16480_enable_irq,
};
+static int adis16480_config_irq_pin(struct device_node *of_node,
+ struct adis16480 *st)
+{
+ struct irq_data *desc;
+ enum adis16480_int_pin pin;
+ unsigned int irq_type;
+ uint16_t val;
+ int i, irq = 0;
+
+ desc = irq_get_irq_data(st->adis.spi->irq);
+ if (!desc) {
+ dev_err(&st->adis.spi->dev, "Could not find IRQ %d\n", irq);
+ return -EINVAL;
+ }
+
+ /* Disable data ready since the default after reset is on */
+ val = ADIS16480_DRDY_EN(0);
+
+ /*
+ * Get the interrupt from the devicetre by reading the interrupt-names
+ * property. If it is not specified, use DIO1 pin as default.
+ * According to the datasheet, the factory default assigns DIO2 as data
+ * ready signal. However, in the previous versions of the driver, DIO1
+ * pin was used. So, we should leave it as is since some devices might
+ * be expecting the interrupt on the wrong physical pin.
+ */
+ pin = ADIS16480_PIN_DIO1;
+ for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) {
+ irq = of_irq_get_byname(of_node, adis16480_int_pin_names[i]);
+ if (irq > 0) {
+ pin = i;
+ break;
+ }
+ }
+
+ val |= ADIS16480_DRDY_SEL(pin);
+
+ /*
+ * Get the interrupt line behaviour. The data ready polarity can be
+ * configured as positive or negative, corresponding to
+ * IRQF_TRIGGER_RISING or IRQF_TRIGGER_FALLING respectively.
+ */
+ irq_type = irqd_get_trigger_type(desc);
+ if (irq_type == IRQF_TRIGGER_RISING) { /* Default */
+ val |= ADIS16480_DRDY_POL(1);
+ } else if (irq_type == IRQF_TRIGGER_FALLING) {
+ val |= ADIS16480_DRDY_POL(0);
+ } else {
+ dev_err(&st->adis.spi->dev,
+ "Invalid interrupt type 0x%x specified\n", irq_type);
+ return -EINVAL;
+ }
+ /* Write the data ready configuration to the FNCTIO_CTRL register */
+ return adis_write_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, val);
+}
+
+static int adis16480_of_get_ext_clk_pin(struct adis16480 *st,
+ struct device_node *of_node)
+{
+ const char *ext_clk_pin;
+ enum adis16480_int_pin pin;
+ int i;
+
+ pin = ADIS16480_PIN_DIO2;
+ if (of_property_read_string(of_node, "adi,ext-clk-pin", &ext_clk_pin))
+ goto clk_input_not_found;
+
+ for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) {
+ if (strcasecmp(ext_clk_pin, adis16480_int_pin_names[i]) == 0)
+ return i;
+ }
+
+clk_input_not_found:
+ dev_info(&st->adis.spi->dev,
+ "clk input line not specified, using DIO2\n");
+ return pin;
+}
+
+static int adis16480_ext_clk_config(struct adis16480 *st,
+ struct device_node *of_node,
+ bool enable)
+{
+ unsigned int mode, mask;
+ enum adis16480_int_pin pin;
+ uint16_t val;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, &val);
+ if (ret < 0)
+ return ret;
+
+ pin = adis16480_of_get_ext_clk_pin(st, of_node);
+ /*
+ * Each DIOx pin supports only one function at a time. When a single pin
+ * has two assignments, the enable bit for a lower priority function
+ * automatically resets to zero (disabling the lower priority function).
+ */
+ if (pin == ADIS16480_DRDY_SEL(val))
+ dev_warn(&st->adis.spi->dev,
+ "DIO%x pin supports only one function at a time\n",
+ pin + 1);
+
+ mode = ADIS16480_SYNC_EN(enable) | ADIS16480_SYNC_SEL(pin);
+ mask = ADIS16480_SYNC_EN_MSK | ADIS16480_SYNC_SEL_MSK;
+ /* Only ADIS1649x devices support pps ext clock mode */
+ if (st->chip_info->has_pps_clk_mode) {
+ mode |= ADIS16480_SYNC_MODE(st->clk_mode);
+ mask |= ADIS16480_SYNC_MODE_MSK;
+ }
+
+ val &= ~mask;
+ val |= mode;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, val);
+ if (ret < 0)
+ return ret;
+
+ return clk_prepare_enable(st->ext_clk);
+}
+
+static int adis16480_get_ext_clocks(struct adis16480 *st)
+{
+ st->clk_mode = ADIS16480_CLK_INT;
+ st->ext_clk = devm_clk_get(&st->adis.spi->dev, "sync");
+ if (!IS_ERR_OR_NULL(st->ext_clk)) {
+ st->clk_mode = ADIS16480_CLK_SYNC;
+ return 0;
+ }
+
+ if (PTR_ERR(st->ext_clk) != -ENOENT) {
+ dev_err(&st->adis.spi->dev, "failed to get ext clk\n");
+ return PTR_ERR(st->ext_clk);
+ }
+
+ if (st->chip_info->has_pps_clk_mode) {
+ st->ext_clk = devm_clk_get(&st->adis.spi->dev, "pps");
+ if (!IS_ERR_OR_NULL(st->ext_clk)) {
+ st->clk_mode = ADIS16480_CLK_PPS;
+ return 0;
+ }
+
+ if (PTR_ERR(st->ext_clk) != -ENOENT) {
+ dev_err(&st->adis.spi->dev, "failed to get ext clk\n");
+ return PTR_ERR(st->ext_clk);
+ }
+ }
+
+ return 0;
+}
+
static int adis16480_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -853,10 +1208,29 @@ static int adis16480_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
+ ret = adis16480_config_irq_pin(spi->dev.of_node, st);
+ if (ret)
+ return ret;
+
+ ret = adis16480_get_ext_clocks(st);
if (ret)
return ret;
+ if (!IS_ERR_OR_NULL(st->ext_clk)) {
+ ret = adis16480_ext_clk_config(st, spi->dev.of_node, true);
+ if (ret)
+ return ret;
+
+ st->clk_freq = clk_get_rate(st->ext_clk);
+ st->clk_freq *= 1000; /* micro */
+ } else {
+ st->clk_freq = st->chip_info->int_clk;
+ }
+
+ ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
+ if (ret)
+ goto error_clk_disable_unprepare;
+
ret = adis16480_initial_setup(indio_dev);
if (ret)
goto error_cleanup_buffer;
@@ -873,6 +1247,8 @@ error_stop_device:
adis16480_stop_device(indio_dev);
error_cleanup_buffer:
adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+error_clk_disable_unprepare:
+ clk_disable_unprepare(st->ext_clk);
return ret;
}
@@ -885,6 +1261,7 @@ static int adis16480_remove(struct spi_device *spi)
adis16480_stop_device(indio_dev);
adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+ clk_disable_unprepare(st->ext_clk);
return 0;
}
@@ -894,13 +1271,35 @@ static const struct spi_device_id adis16480_ids[] = {
{ "adis16480", ADIS16480 },
{ "adis16485", ADIS16485 },
{ "adis16488", ADIS16488 },
+ { "adis16495-1", ADIS16495_1 },
+ { "adis16495-2", ADIS16495_2 },
+ { "adis16495-3", ADIS16495_3 },
+ { "adis16497-1", ADIS16497_1 },
+ { "adis16497-2", ADIS16497_2 },
+ { "adis16497-3", ADIS16497_3 },
{ }
};
MODULE_DEVICE_TABLE(spi, adis16480_ids);
+static const struct of_device_id adis16480_of_match[] = {
+ { .compatible = "adi,adis16375" },
+ { .compatible = "adi,adis16480" },
+ { .compatible = "adi,adis16485" },
+ { .compatible = "adi,adis16488" },
+ { .compatible = "adi,adis16495-1" },
+ { .compatible = "adi,adis16495-2" },
+ { .compatible = "adi,adis16495-3" },
+ { .compatible = "adi,adis16497-1" },
+ { .compatible = "adi,adis16497-2" },
+ { .compatible = "adi,adis16497-3" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adis16480_of_match);
+
static struct spi_driver adis16480_driver = {
.driver = {
.name = "adis16480",
+ .of_match_table = adis16480_of_match,
},
.id_table = adis16480_ids,
.probe = adis16480_probe,
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 76643c5571aa..3a7c970568dc 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -20,6 +20,43 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/imu/adis.h>
+static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+ unsigned int burst_length;
+ u8 *tx;
+
+ /* All but the timestamp channel */
+ burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
+ burst_length += adis->burst->extra_len;
+
+ adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
+ if (!adis->xfer)
+ return -ENOMEM;
+
+ adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ tx = adis->buffer + burst_length;
+ tx[0] = ADIS_READ_REG(adis->burst->reg_cmd);
+ tx[1] = 0;
+
+ adis->xfer[0].tx_buf = tx;
+ adis->xfer[0].bits_per_word = 8;
+ adis->xfer[0].len = 2;
+ adis->xfer[1].rx_buf = adis->buffer;
+ adis->xfer[1].bits_per_word = 8;
+ adis->xfer[1].len = burst_length;
+
+ spi_message_init(&adis->msg);
+ spi_message_add_tail(&adis->xfer[0], &adis->msg);
+ spi_message_add_tail(&adis->xfer[1], &adis->msg);
+
+ return 0;
+}
+
int adis_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
@@ -32,6 +69,9 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
kfree(adis->xfer);
kfree(adis->buffer);
+ if (adis->burst && adis->burst->en)
+ return adis_update_scan_mode_burst(indio_dev, scan_mask);
+
scan_count = indio_dev->scan_bytes / 2;
adis->xfer = kcalloc(scan_count + 1, sizeof(*adis->xfer), GFP_KERNEL);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 650de0fefb7b..6138a6d86afb 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -796,12 +796,14 @@ static const struct iio_mount_matrix *
inv_get_mount_matrix(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
- return &((struct inv_mpu6050_state *)iio_priv(indio_dev))->orientation;
+ struct inv_mpu6050_state *data = iio_priv(indio_dev);
+
+ return &data->orientation;
}
static const struct iio_chan_spec_ext_info inv_ext_info[] = {
IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, inv_get_mount_matrix),
- { },
+ { }
};
#define INV_MPU6050_CHAN(_type, _channel2, _index) \
@@ -1021,8 +1023,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
pdata = dev_get_platdata(dev);
if (!pdata) {
- result = of_iio_read_mount_matrix(dev, "mount-matrix",
- &st->orientation);
+ result = iio_read_mount_matrix(dev, "mount-matrix",
+ &st->orientation);
if (result) {
dev_err(dev, "Failed to retrieve mounting matrix %d\n",
result);
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 094fd006b63d..9e592973a8a6 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -9,7 +9,7 @@ config IIO_ST_LSM6DSX
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
- ism330dlc, lsm6dso
+ ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index d1d8d07a0714..004a8a1a0027 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -20,6 +20,9 @@
#define ST_LSM6DSM_DEV_NAME "lsm6dsm"
#define ST_ISM330DLC_DEV_NAME "ism330dlc"
#define ST_LSM6DSO_DEV_NAME "lsm6dso"
+#define ST_ASM330LHH_DEV_NAME "asm330lhh"
+#define ST_LSM6DSOX_DEV_NAME "lsm6dsox"
+#define ST_LSM6DSR_DEV_NAME "lsm6dsr"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
@@ -28,6 +31,9 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DSM_ID,
ST_ISM330DLC_ID,
ST_LSM6DSO_ID,
+ ST_ASM330LHH_ID,
+ ST_LSM6DSOX_ID,
+ ST_LSM6DSR_ID,
ST_LSM6DSX_MAX_ID,
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 2c0d3763405a..793598ee960a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -13,9 +13,9 @@
* (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
* value of the decimation factor and ODR set for each FIFO data set.
*
- * LSM6DSO: The FIFO buffer can be configured to store data from gyroscope and
- * accelerometer. Each sample is queued with a tag (1B) indicating data source
- * (gyroscope, accelerometer, hw timer).
+ * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR: The FIFO buffer can be configured to
+ * store data from gyroscope and accelerometer. Each sample is queued with
+ * a tag (1B) indicating data source (gyroscope, accelerometer, hw timer).
*
* FIFO supported modes:
* - BYPASS: FIFO disabled
@@ -506,7 +506,7 @@ st_lsm6dsx_push_tagged_data(struct st_lsm6dsx_hw *hw, u8 tag,
}
/**
- * st_lsm6dsx_read_tagged_fifo() - LSM6DSO read FIFO routine
+ * st_lsm6dsx_read_tagged_fifo() - tagged hw FIFO read routine
* @hw: Pointer to instance of struct st_lsm6dsx_hw.
*
* Read samples from the hw FIFO and push them to IIO buffers.
@@ -517,7 +517,6 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
{
u16 pattern_len = hw->sip * ST_LSM6DSX_TAGGED_SAMPLE_SIZE;
u16 fifo_len, fifo_diff_mask;
- struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
u8 iio_buff[ST_LSM6DSX_IIO_BUFF_SIZE], tag;
bool reset_ts = false;
int i, err, read_len;
@@ -539,9 +538,6 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
if (!fifo_len)
return 0;
- acc_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
- gyro_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_GYRO]);
-
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
err = st_lsm6dsx_read_block(hw,
ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 12e29dda9b98..cf82c9049945 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -23,7 +23,7 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 4KB
*
- * - LSM6DSO
+ * - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR
* - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
@@ -62,37 +62,19 @@
#define ST_LSM6DSX_REG_INT2_ON_INT1_ADDR 0x13
#define ST_LSM6DSX_REG_INT2_ON_INT1_MASK BIT(5)
-#define ST_LSM6DSX_REG_ACC_ODR_ADDR 0x10
-#define ST_LSM6DSX_REG_ACC_ODR_MASK GENMASK(7, 4)
-#define ST_LSM6DSX_REG_ACC_FS_ADDR 0x10
-#define ST_LSM6DSX_REG_ACC_FS_MASK GENMASK(3, 2)
#define ST_LSM6DSX_REG_ACC_OUT_X_L_ADDR 0x28
#define ST_LSM6DSX_REG_ACC_OUT_Y_L_ADDR 0x2a
#define ST_LSM6DSX_REG_ACC_OUT_Z_L_ADDR 0x2c
-#define ST_LSM6DSX_REG_GYRO_ODR_ADDR 0x11
-#define ST_LSM6DSX_REG_GYRO_ODR_MASK GENMASK(7, 4)
-#define ST_LSM6DSX_REG_GYRO_FS_ADDR 0x11
-#define ST_LSM6DSX_REG_GYRO_FS_MASK GENMASK(3, 2)
#define ST_LSM6DSX_REG_GYRO_OUT_X_L_ADDR 0x22
#define ST_LSM6DSX_REG_GYRO_OUT_Y_L_ADDR 0x24
#define ST_LSM6DSX_REG_GYRO_OUT_Z_L_ADDR 0x26
-#define ST_LSM6DSX_ACC_FS_2G_GAIN IIO_G_TO_M_S_2(61)
-#define ST_LSM6DSX_ACC_FS_4G_GAIN IIO_G_TO_M_S_2(122)
-#define ST_LSM6DSX_ACC_FS_8G_GAIN IIO_G_TO_M_S_2(244)
-#define ST_LSM6DSX_ACC_FS_16G_GAIN IIO_G_TO_M_S_2(488)
-
-#define ST_LSM6DSX_GYRO_FS_245_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_LSM6DSX_GYRO_FS_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_LSM6DSX_GYRO_FS_1000_GAIN IIO_DEGREE_TO_RAD(35000)
-#define ST_LSM6DSX_GYRO_FS_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-
static const struct st_lsm6dsx_odr_table_entry st_lsm6dsx_odr_table[] = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
- .addr = ST_LSM6DSX_REG_ACC_ODR_ADDR,
- .mask = ST_LSM6DSX_REG_ACC_ODR_MASK,
+ .addr = 0x10,
+ .mask = GENMASK(7, 4),
},
.odr_avl[0] = { 13, 0x01 },
.odr_avl[1] = { 26, 0x02 },
@@ -103,8 +85,8 @@ static const struct st_lsm6dsx_odr_table_entry st_lsm6dsx_odr_table[] = {
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
- .addr = ST_LSM6DSX_REG_GYRO_ODR_ADDR,
- .mask = ST_LSM6DSX_REG_GYRO_ODR_MASK,
+ .addr = 0x11,
+ .mask = GENMASK(7, 4),
},
.odr_avl[0] = { 13, 0x01 },
.odr_avl[1] = { 26, 0x02 },
@@ -118,23 +100,23 @@ static const struct st_lsm6dsx_odr_table_entry st_lsm6dsx_odr_table[] = {
static const struct st_lsm6dsx_fs_table_entry st_lsm6dsx_fs_table[] = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
- .addr = ST_LSM6DSX_REG_ACC_FS_ADDR,
- .mask = ST_LSM6DSX_REG_ACC_FS_MASK,
+ .addr = 0x10,
+ .mask = GENMASK(3, 2),
},
- .fs_avl[0] = { ST_LSM6DSX_ACC_FS_2G_GAIN, 0x0 },
- .fs_avl[1] = { ST_LSM6DSX_ACC_FS_4G_GAIN, 0x2 },
- .fs_avl[2] = { ST_LSM6DSX_ACC_FS_8G_GAIN, 0x3 },
- .fs_avl[3] = { ST_LSM6DSX_ACC_FS_16G_GAIN, 0x1 },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
- .addr = ST_LSM6DSX_REG_GYRO_FS_ADDR,
- .mask = ST_LSM6DSX_REG_GYRO_FS_MASK,
+ .addr = 0x11,
+ .mask = GENMASK(3, 2),
},
- .fs_avl[0] = { ST_LSM6DSX_GYRO_FS_245_GAIN, 0x0 },
- .fs_avl[1] = { ST_LSM6DSX_GYRO_FS_500_GAIN, 0x1 },
- .fs_avl[2] = { ST_LSM6DSX_GYRO_FS_1000_GAIN, 0x2 },
- .fs_avl[3] = { ST_LSM6DSX_GYRO_FS_2000_GAIN, 0x3 },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
}
};
@@ -287,6 +269,111 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.max_fifo_size = 512,
.id = {
[0] = ST_LSM6DSO_ID,
+ [1] = ST_LSM6DSOX_ID,
+ },
+ .batch = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .addr = 0x09,
+ .mask = GENMASK(3, 0),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .addr = 0x09,
+ .mask = GENMASK(7, 4),
+ },
+ },
+ .fifo_ops = {
+ .read_fifo = st_lsm6dsx_read_tagged_fifo,
+ .fifo_th = {
+ .addr = 0x07,
+ .mask = GENMASK(8, 0),
+ },
+ .fifo_diff = {
+ .addr = 0x3a,
+ .mask = GENMASK(8, 0),
+ },
+ .th_wl = 1,
+ },
+ .ts_settings = {
+ .timer_en = {
+ .addr = 0x19,
+ .mask = BIT(5),
+ },
+ .decimator = {
+ .addr = 0x0a,
+ .mask = GENMASK(7, 6),
+ },
+ },
+ .shub_settings = {
+ .page_mux = {
+ .addr = 0x01,
+ .mask = BIT(6),
+ },
+ .master_en = {
+ .addr = 0x14,
+ .mask = BIT(2),
+ },
+ .pullup_en = {
+ .addr = 0x14,
+ .mask = BIT(3),
+ },
+ .aux_sens = {
+ .addr = 0x14,
+ .mask = GENMASK(1, 0),
+ },
+ .wr_once = {
+ .addr = 0x14,
+ .mask = BIT(6),
+ },
+ .shub_out = 0x02,
+ .slv0_addr = 0x15,
+ .dw_slv0_addr = 0x21,
+ .batch_en = BIT(3),
+ }
+ },
+ {
+ .wai = 0x6b,
+ .max_fifo_size = 512,
+ .id = {
+ [0] = ST_ASM330LHH_ID,
+ },
+ .batch = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .addr = 0x09,
+ .mask = GENMASK(3, 0),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .addr = 0x09,
+ .mask = GENMASK(7, 4),
+ },
+ },
+ .fifo_ops = {
+ .read_fifo = st_lsm6dsx_read_tagged_fifo,
+ .fifo_th = {
+ .addr = 0x07,
+ .mask = GENMASK(8, 0),
+ },
+ .fifo_diff = {
+ .addr = 0x3a,
+ .mask = GENMASK(8, 0),
+ },
+ .th_wl = 1,
+ },
+ .ts_settings = {
+ .timer_en = {
+ .addr = 0x19,
+ .mask = BIT(5),
+ },
+ .decimator = {
+ .addr = 0x0a,
+ .mask = GENMASK(7, 6),
+ },
+ },
+ },
+ {
+ .wai = 0x6b,
+ .max_fifo_size = 512,
+ .id = {
+ [0] = ST_LSM6DSR_ID,
},
.batch = {
[ST_LSM6DSX_ID_ACC] = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 448b7bc1e578..f54370196098 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -65,6 +65,18 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,lsm6dso",
.data = (void *)ST_LSM6DSO_ID,
},
+ {
+ .compatible = "st,asm330lhh",
+ .data = (void *)ST_ASM330LHH_ID,
+ },
+ {
+ .compatible = "st,lsm6dsox",
+ .data = (void *)ST_LSM6DSOX_ID,
+ },
+ {
+ .compatible = "st,lsm6dsr",
+ .data = (void *)ST_LSM6DSR_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -76,6 +88,9 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DSM_DEV_NAME, ST_LSM6DSM_ID },
{ ST_ISM330DLC_DEV_NAME, ST_ISM330DLC_ID },
{ ST_LSM6DSO_DEV_NAME, ST_LSM6DSO_ID },
+ { ST_ASM330LHH_DEV_NAME, ST_ASM330LHH_ID },
+ { ST_LSM6DSOX_DEV_NAME, ST_LSM6DSOX_ID },
+ { ST_LSM6DSR_DEV_NAME, ST_LSM6DSR_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index b1df8a6973e6..4a4abb2935da 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -65,6 +65,18 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,lsm6dso",
.data = (void *)ST_LSM6DSO_ID,
},
+ {
+ .compatible = "st,asm330lhh",
+ .data = (void *)ST_ASM330LHH_ID,
+ },
+ {
+ .compatible = "st,lsm6dsox",
+ .data = (void *)ST_LSM6DSOX_ID,
+ },
+ {
+ .compatible = "st,lsm6dsr",
+ .data = (void *)ST_LSM6DSR_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -76,6 +88,9 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DSM_DEV_NAME, ST_LSM6DSM_ID },
{ ST_ISM330DLC_DEV_NAME, ST_ISM330DLC_ID },
{ ST_LSM6DSO_DEV_NAME, ST_LSM6DSO_ID },
+ { ST_ASM330LHH_DEV_NAME, ST_ASM330LHH_ID },
+ { ST_LSM6DSOX_DEV_NAME, ST_LSM6DSOX_ID },
+ { ST_LSM6DSR_DEV_NAME, ST_LSM6DSR_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index cd5bfe39591b..4fa273002c03 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
const unsigned long *mask;
unsigned long *trialmask;
- trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
- sizeof(*trialmask),
- GFP_KERNEL);
+ trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*trialmask), GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
@@ -344,12 +343,12 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
}
bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
- kfree(trialmask);
+ bitmap_free(trialmask);
return 0;
err_invalid_mask:
- kfree(trialmask);
+ bitmap_free(trialmask);
return -EINVAL;
}
@@ -666,7 +665,7 @@ static void iio_free_scan_mask(struct iio_dev *indio_dev,
{
/* If the mask is dynamically allocated free it, otherwise do nothing */
if (!indio_dev->available_scan_masks)
- kfree(mask);
+ bitmap_free(mask);
}
struct iio_device_config {
@@ -736,8 +735,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
}
/* What scan mask do we actually have? */
- compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
- sizeof(long), GFP_KERNEL);
+ compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
if (compound_mask == NULL)
return -ENOMEM;
@@ -762,7 +760,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
indio_dev->masklength,
compound_mask,
strict_scanmask);
- kfree(compound_mask);
+ bitmap_free(compound_mask);
if (scan_mask == NULL)
return -EINVAL;
} else {
@@ -1303,9 +1301,8 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
channels[i].scan_index;
}
if (indio_dev->masklength && buffer->scan_mask == NULL) {
- buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
- sizeof(*buffer->scan_mask),
- GFP_KERNEL);
+ buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
+ GFP_KERNEL);
if (buffer->scan_mask == NULL) {
ret = -ENOMEM;
goto error_cleanup_dynamic;
@@ -1334,7 +1331,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
return 0;
error_free_scan_mask:
- kfree(buffer->scan_mask);
+ bitmap_free(buffer->scan_mask);
error_cleanup_dynamic:
iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
kfree(indio_dev->buffer->buffer_group.attrs);
@@ -1347,7 +1344,7 @@ void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
if (!indio_dev->buffer)
return;
- kfree(indio_dev->buffer->scan_mask);
+ bitmap_free(indio_dev->buffer->scan_mask);
kfree(indio_dev->buffer->buffer_group.attrs);
kfree(indio_dev->buffer->scan_el_group.attrs);
iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 4700fd5d8c90..f5a4581302f4 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -19,6 +19,7 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/poll.h>
+#include <linux/property.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/cdev.h>
@@ -530,8 +531,8 @@ ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
/**
- * of_iio_read_mount_matrix() - retrieve iio device mounting matrix from
- * device-tree "mount-matrix" property
+ * iio_read_mount_matrix() - retrieve iio device mounting matrix from
+ * device "mount-matrix" property
* @dev: device the mounting matrix property is assigned to
* @propname: device specific mounting matrix property name
* @matrix: where to store retrieved matrix
@@ -541,40 +542,29 @@ EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
*
* Return: 0 if success, or a negative error code on failure.
*/
-#ifdef CONFIG_OF
-int of_iio_read_mount_matrix(const struct device *dev,
- const char *propname,
- struct iio_mount_matrix *matrix)
+int iio_read_mount_matrix(struct device *dev, const char *propname,
+ struct iio_mount_matrix *matrix)
{
- if (dev->of_node) {
- int err = of_property_read_string_array(dev->of_node,
- propname, matrix->rotation,
- ARRAY_SIZE(iio_mount_idmatrix.rotation));
+ size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation);
+ int err;
- if (err == ARRAY_SIZE(iio_mount_idmatrix.rotation))
- return 0;
+ err = device_property_read_string_array(dev, propname,
+ matrix->rotation, len);
+ if (err == len)
+ return 0;
- if (err >= 0)
- /* Invalid number of matrix entries. */
- return -EINVAL;
+ if (err >= 0)
+ /* Invalid number of matrix entries. */
+ return -EINVAL;
- if (err != -EINVAL)
- /* Invalid matrix declaration format. */
- return err;
- }
+ if (err != -EINVAL)
+ /* Invalid matrix declaration format. */
+ return err;
/* Matrix was not declared at all: fallback to identity. */
return iio_setup_mount_idmatrix(dev, matrix);
}
-#else
-int of_iio_read_mount_matrix(const struct device *dev,
- const char *propname,
- struct iio_mount_matrix *matrix)
-{
- return iio_setup_mount_idmatrix(dev, matrix);
-}
-#endif
-EXPORT_SYMBOL(of_iio_read_mount_matrix);
+EXPORT_SYMBOL(iio_read_mount_matrix);
static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
int size, const int *vals)
@@ -1743,10 +1733,10 @@ EXPORT_SYMBOL(__iio_device_register);
**/
void iio_device_unregister(struct iio_dev *indio_dev)
{
- mutex_lock(&indio_dev->info_exist_lock);
-
cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
+ mutex_lock(&indio_dev->info_exist_lock);
+
iio_device_unregister_debugfs(indio_dev);
iio_disable_all_buffers(indio_dev);
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index ce66699c7fcc..e5b538379ed1 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -254,8 +254,11 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
/* Get irq number */
pf->irq = iio_trigger_get_irq(trig);
- if (pf->irq < 0)
+ if (pf->irq < 0) {
+ pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
+ trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
goto out_put_module;
+ }
/* Request irq */
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 06ca3f7fcc44..4a5eff3f18bc 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -733,11 +733,11 @@ static int iio_channel_read_avail(struct iio_channel *chan,
vals, type, length, info);
}
-int iio_read_avail_channel_raw(struct iio_channel *chan,
- const int **vals, int *length)
+int iio_read_avail_channel_attribute(struct iio_channel *chan,
+ const int **vals, int *type, int *length,
+ enum iio_chan_info_enum attribute)
{
int ret;
- int type;
mutex_lock(&chan->indio_dev->info_exist_lock);
if (!chan->indio_dev->info) {
@@ -745,11 +745,23 @@ int iio_read_avail_channel_raw(struct iio_channel *chan,
goto err_unlock;
}
- ret = iio_channel_read_avail(chan,
- vals, &type, length, IIO_CHAN_INFO_RAW);
+ ret = iio_channel_read_avail(chan, vals, type, length, attribute);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
+
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+ const int **vals, int *length)
+{
+ int ret;
+ int type;
+
+ ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
+ IIO_CHAN_INFO_RAW);
+
if (ret >= 0 && type != IIO_VAL_INT)
/* raw values are assumed to be IIO_VAL_INT */
ret = -EINVAL;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 5190eacfeb0a..954c958cfc43 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -13,11 +13,11 @@ config ACPI_ALS
select IIO_TRIGGERED_BUFFER
select IIO_KFIFO_BUF
help
- Say Y here if you want to build a driver for the ACPI0008
- Ambient Light Sensor.
+ Say Y here if you want to build a driver for the ACPI0008
+ Ambient Light Sensor.
- To compile this driver as a module, choose M here: the module will
- be called acpi-als.
+ To compile this driver as a module, choose M here: the module will
+ be called acpi-als.
config ADJD_S311
tristate "ADJD-S311-CR999 digital color sensor"
@@ -25,31 +25,31 @@ config ADJD_S311
select IIO_TRIGGERED_BUFFER
depends on I2C
help
- If you say yes here you get support for the Avago ADJD-S311-CR999
- digital color light sensor.
+ If you say yes here you get support for the Avago ADJD-S311-CR999
+ digital color light sensor.
- This driver can also be built as a module. If so, the module
- will be called adjd_s311.
+ This driver can also be built as a module. If so, the module
+ will be called adjd_s311.
config AL3320A
tristate "AL3320A ambient light sensor"
depends on I2C
help
- Say Y here if you want to build a driver for the Dyna Image AL3320A
- ambient light sensor.
+ Say Y here if you want to build a driver for the Dyna Image AL3320A
+ ambient light sensor.
- To compile this driver as a module, choose M here: the
- module will be called al3320a.
+ To compile this driver as a module, choose M here: the
+ module will be called al3320a.
config APDS9300
tristate "APDS9300 ambient light sensor"
depends on I2C
help
- Say Y here if you want to build a driver for the Avago APDS9300
- ambient light sensor.
+ Say Y here if you want to build a driver for the Avago APDS9300
+ ambient light sensor.
- To compile this driver as a module, choose M here: the
- module will be called apds9300.
+ To compile this driver as a module, choose M here: the
+ module will be called apds9300.
config APDS9960
tristate "Avago APDS9960 gesture/RGB/ALS/proximity sensor"
@@ -68,74 +68,74 @@ config BH1750
tristate "ROHM BH1750 ambient light sensor"
depends on I2C
help
- Say Y here to build support for the ROHM BH1710, BH1715, BH1721,
- BH1750, BH1751 ambient light sensors.
+ Say Y here to build support for the ROHM BH1710, BH1715, BH1721,
+ BH1750, BH1751 ambient light sensors.
- To compile this driver as a module, choose M here: the module will
- be called bh1750.
+ To compile this driver as a module, choose M here: the module will
+ be called bh1750.
config BH1780
tristate "ROHM BH1780 ambient light sensor"
depends on I2C
help
- Say Y here to build support for the ROHM BH1780GLI ambient
- light sensor.
+ Say Y here to build support for the ROHM BH1780GLI ambient
+ light sensor.
- To compile this driver as a module, choose M here: the module will
- be called bh1780.
+ To compile this driver as a module, choose M here: the module will
+ be called bh1780.
config CM32181
depends on I2C
tristate "CM32181 driver"
help
- Say Y here if you use cm32181.
- This option enables ambient light sensor using
- Capella cm32181 device driver.
+ Say Y here if you use cm32181.
+ This option enables ambient light sensor using
+ Capella cm32181 device driver.
- To compile this driver as a module, choose M here:
- the module will be called cm32181.
+ To compile this driver as a module, choose M here:
+ the module will be called cm32181.
config CM3232
depends on I2C
tristate "CM3232 ambient light sensor"
help
- Say Y here if you use cm3232.
- This option enables ambient light sensor using
- Capella Microsystems cm3232 device driver.
+ Say Y here if you use cm3232.
+ This option enables ambient light sensor using
+ Capella Microsystems cm3232 device driver.
- To compile this driver as a module, choose M here:
- the module will be called cm3232.
+ To compile this driver as a module, choose M here:
+ the module will be called cm3232.
config CM3323
depends on I2C
tristate "Capella CM3323 color light sensor"
help
- Say Y here if you want to build a driver for Capella CM3323
- color sensor.
+ Say Y here if you want to build a driver for Capella CM3323
+ color sensor.
- To compile this driver as a module, choose M here: the module will
- be called cm3323.
+ To compile this driver as a module, choose M here: the module will
+ be called cm3323.
config CM3605
tristate "Capella CM3605 ambient light and proximity sensor"
depends on OF
help
- Say Y here if you want to build a driver for Capella CM3605
- ambient light and short range proximity sensor.
+ Say Y here if you want to build a driver for Capella CM3605
+ ambient light and short range proximity sensor.
- To compile this driver as a module, choose M here: the module will
- be called cm3605.
+ To compile this driver as a module, choose M here: the module will
+ be called cm3605.
config CM36651
depends on I2C
tristate "CM36651 driver"
help
- Say Y here if you use cm36651.
- This option enables proximity & RGB sensor using
- Capella cm36651 device driver.
+ Say Y here if you use cm36651.
+ This option enables proximity & RGB sensor using
+ Capella cm36651 device driver.
- To compile this driver as a module, choose M here:
- the module will be called cm36651.
+ To compile this driver as a module, choose M here:
+ the module will be called cm36651.
config IIO_CROS_EC_LIGHT_PROX
tristate "ChromeOS EC Light and Proximity Sensors"
@@ -167,21 +167,21 @@ config SENSORS_ISL29018
select REGMAP_I2C
default n
help
- If you say yes here you get support for ambient light sensing and
- proximity infrared sensing from Intersil ISL29018.
- This driver will provide the measurements of ambient light intensity
- in lux, proximity infrared sensing and normal infrared sensing.
- Data from sensor is accessible via sysfs.
+ If you say yes here you get support for ambient light sensing and
+ proximity infrared sensing from Intersil ISL29018.
+ This driver will provide the measurements of ambient light intensity
+ in lux, proximity infrared sensing and normal infrared sensing.
+ Data from sensor is accessible via sysfs.
config SENSORS_ISL29028
tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor"
depends on I2C
select REGMAP_I2C
help
- Provides driver for the Intersil's ISL29028 device.
- This driver supports the sysfs interface to get the ALS, IR intensity,
- Proximity value via iio. The ISL29028 provides the concurrent sensing
- of ambient light and proximity.
+ Provides driver for the Intersil's ISL29028 device.
+ This driver supports the sysfs interface to get the ALS, IR intensity,
+ Proximity value via iio. The ISL29028 provides the concurrent sensing
+ of ambient light and proximity.
config ISL29125
tristate "Intersil ISL29125 digital color light sensor"
@@ -228,22 +228,22 @@ config JSA1212
depends on I2C
select REGMAP_I2C
help
- Say Y here if you want to build a IIO driver for JSA1212
- proximity & ALS sensor device.
+ Say Y here if you want to build a IIO driver for JSA1212
+ proximity & ALS sensor device.
- To compile this driver as a module, choose M here:
- the module will be called jsa1212.
+ To compile this driver as a module, choose M here:
+ the module will be called jsa1212.
config RPR0521
tristate "ROHM RPR0521 ALS and proximity sensor driver"
depends on I2C
select REGMAP_I2C
help
- Say Y here if you want to build support for ROHM's RPR0521
- ambient light and proximity sensor device.
+ Say Y here if you want to build support for ROHM's RPR0521
+ ambient light and proximity sensor device.
- To compile this driver as a module, choose M here:
- the module will be called rpr0521.
+ To compile this driver as a module, choose M here:
+ the module will be called rpr0521.
config SENSORS_LM3533
tristate "LM3533 ambient light sensor"
@@ -269,22 +269,22 @@ config LTR501
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- If you say yes here you get support for the Lite-On LTR-501ALS-01
- ambient light and proximity sensor. This driver also supports LTR-559
- ALS/PS or LTR-301 ALS sensors.
+ If you say yes here you get support for the Lite-On LTR-501ALS-01
+ ambient light and proximity sensor. This driver also supports LTR-559
+ ALS/PS or LTR-301 ALS sensors.
- This driver can also be built as a module. If so, the module
- will be called ltr501.
+ This driver can also be built as a module. If so, the module
+ will be called ltr501.
config LV0104CS
tristate "LV0104CS Ambient Light Sensor"
depends on I2C
help
- Say Y here if you want to build support for the On Semiconductor
- LV0104CS ambient light sensor.
+ Say Y here if you want to build support for the On Semiconductor
+ LV0104CS ambient light sensor.
- To compile this driver as a module, choose M here:
- the module will be called lv0104cs.
+ To compile this driver as a module, choose M here:
+ the module will be called lv0104cs.
config MAX44000
tristate "MAX44000 Ambient and Infrared Proximity Sensor"
@@ -293,11 +293,11 @@ config MAX44000
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say Y here if you want to build support for Maxim Integrated's
- MAX44000 ambient and infrared proximity sensor device.
+ Say Y here if you want to build support for Maxim Integrated's
+ MAX44000 ambient and infrared proximity sensor device.
- To compile this driver as a module, choose M here:
- the module will be called max44000.
+ To compile this driver as a module, choose M here:
+ the module will be called max44000.
config MAX44009
tristate "MAX44009 Ambient Light Sensor"
@@ -320,15 +320,15 @@ config OPT3001
opt3001.
config PA12203001
- tristate "TXC PA12203001 light and proximity sensor"
- depends on I2C
- select REGMAP_I2C
- help
- If you say yes here you get support for the TXC PA12203001
- ambient light and proximity sensor.
+ tristate "TXC PA12203001 light and proximity sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the TXC PA12203001
+ ambient light and proximity sensor.
- This driver can also be built as a module. If so, the module
- will be called pa12203001.
+ This driver can also be built as a module. If so, the module
+ will be called pa12203001.
config SI1133
tristate "SI1133 UV Index Sensor and Ambient Light Sensor"
@@ -359,12 +359,12 @@ config STK3310
depends on I2C
select REGMAP_I2C
help
- Say yes here to get support for the Sensortek STK3310 ambient light
- and proximity sensor. The STK3311 model is also supported by this
- driver.
+ Say yes here to get support for the Sensortek STK3310 ambient light
+ and proximity sensor. The STK3311 model is also supported by this
+ driver.
- Choosing M will build the driver as a module. If so, the module
- will be called stk3310.
+ Choosing M will build the driver as a module. If so, the module
+ will be called stk3310.
config ST_UVIS25
tristate "STMicroelectronics UVIS25 sensor driver"
@@ -396,11 +396,11 @@ config TCS3414
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- If you say yes here you get support for the TAOS TCS3414
- family of digital color sensors.
+ If you say yes here you get support for the TAOS TCS3414
+ family of digital color sensors.
- This driver can also be built as a module. If so, the module
- will be called tcs3414.
+ This driver can also be built as a module. If so, the module
+ will be called tcs3414.
config TCS3472
tristate "TAOS TCS3472 color light-to-digital converter"
@@ -408,67 +408,67 @@ config TCS3472
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- If you say yes here you get support for the TAOS TCS3472
- family of color light-to-digital converters with IR filter.
+ If you say yes here you get support for the TAOS TCS3472
+ family of color light-to-digital converters with IR filter.
- This driver can also be built as a module. If so, the module
- will be called tcs3472.
+ This driver can also be built as a module. If so, the module
+ will be called tcs3472.
config SENSORS_TSL2563
tristate "TAOS TSL2560, TSL2561, TSL2562 and TSL2563 ambient light sensors"
depends on I2C
help
- If you say yes here you get support for the Taos TSL2560,
- TSL2561, TSL2562 and TSL2563 ambient light sensors.
+ If you say yes here you get support for the Taos TSL2560,
+ TSL2561, TSL2562 and TSL2563 ambient light sensors.
- This driver can also be built as a module. If so, the module
- will be called tsl2563.
+ This driver can also be built as a module. If so, the module
+ will be called tsl2563.
config TSL2583
tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
depends on I2C
help
- Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
- Access ALS data via iio, sysfs.
+ Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
+ Access ALS data via iio, sysfs.
config TSL2772
tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
depends on I2C
help
- Support for: tsl2571, tsl2671, tmd2671, tsl2771, tmd2771, tsl2572, tsl2672,
- tmd2672, tsl2772, tmd2772 devices.
- Provides iio_events and direct access via sysfs.
+ Support for: tsl2571, tsl2671, tmd2671, tsl2771, tmd2771, tsl2572, tsl2672,
+ tmd2672, tsl2772, tmd2772 devices.
+ Provides iio_events and direct access via sysfs.
config TSL4531
tristate "TAOS TSL4531 ambient light sensors"
depends on I2C
help
- Say Y here if you want to build a driver for the TAOS TSL4531 family
- of ambient light sensors with direct lux output.
+ Say Y here if you want to build a driver for the TAOS TSL4531 family
+ of ambient light sensors with direct lux output.
- To compile this driver as a module, choose M here: the
- module will be called tsl4531.
+ To compile this driver as a module, choose M here: the
+ module will be called tsl4531.
config US5182D
tristate "UPISEMI light and proximity sensor"
depends on I2C
help
- If you say yes here you get support for the UPISEMI US5182D
- ambient light and proximity sensor.
+ If you say yes here you get support for the UPISEMI US5182D
+ ambient light and proximity sensor.
- This driver can also be built as a module. If so, the module
- will be called us5182d.
+ This driver can also be built as a module. If so, the module
+ will be called us5182d.
config VCNL4000
tristate "VCNL4000/4010/4020/4200 combined ALS and proximity sensor"
depends on I2C
help
- Say Y here if you want to build a driver for the Vishay VCNL4000,
- VCNL4010, VCNL4020, VCNL4200 combined ambient light and proximity
- sensor.
+ Say Y here if you want to build a driver for the Vishay VCNL4000,
+ VCNL4010, VCNL4020, VCNL4200 combined ambient light and proximity
+ sensor.
- To compile this driver as a module, choose M here: the
- module will be called vcnl4000.
+ To compile this driver as a module, choose M here: the
+ module will be called vcnl4000.
config VCNL4035
tristate "VCNL4035 combined ALS and proximity sensor"
@@ -476,41 +476,41 @@ config VCNL4035
select REGMAP_I2C
depends on I2C
help
- Say Y here if you want to build a driver for the Vishay VCNL4035,
- combined ambient light (ALS) and proximity sensor. Currently only ALS
- function is available.
+ Say Y here if you want to build a driver for the Vishay VCNL4035,
+ combined ambient light (ALS) and proximity sensor. Currently only ALS
+ function is available.
- To compile this driver as a module, choose M here: the
- module will be called vcnl4035.
+ To compile this driver as a module, choose M here: the
+ module will be called vcnl4035.
config VEML6070
tristate "VEML6070 UV A light sensor"
depends on I2C
help
- Say Y here if you want to build a driver for the Vishay VEML6070 UV A
- light sensor.
+ Say Y here if you want to build a driver for the Vishay VEML6070 UV A
+ light sensor.
- To compile this driver as a module, choose M here: the
- module will be called veml6070.
+ To compile this driver as a module, choose M here: the
+ module will be called veml6070.
config VL6180
tristate "VL6180 ALS, range and proximity sensor"
depends on I2C
help
- Say Y here if you want to build a driver for the STMicroelectronics
- VL6180 combined ambient light, range and proximity sensor.
+ Say Y here if you want to build a driver for the STMicroelectronics
+ VL6180 combined ambient light, range and proximity sensor.
- To compile this driver as a module, choose M here: the
- module will be called vl6180.
+ To compile this driver as a module, choose M here: the
+ module will be called vl6180.
config ZOPT2201
tristate "ZOPT2201 ALS and UV B sensor"
depends on I2C
help
- Say Y here if you want to build a driver for the IDT
- ZOPT2201 ambient light and UV B sensor.
+ Say Y here if you want to build a driver for the IDT
+ ZOPT2201 ambient light and UV B sensor.
- To compile this driver as a module, choose M here: the
- module will be called zopt2201.
+ To compile this driver as a module, choose M here: the
+ module will be called zopt2201.
endmenu
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index fd1609e975ab..308ee6ff2e22 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cros_ec_light_prox - Driver for light and prox sensors behing CrosEC.
*
* Copyright (C) 2017 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/buffer.h>
#include <linux/iio/common/cros_ec_sensors_core.h>
@@ -28,7 +19,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
/*
* We only represent one entry for light or proximity. EC is merging different
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 04fd0d4b6f19..b19e6559b980 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1,8 +1,9 @@
/*
- * vcnl4000.c - Support for Vishay VCNL4000/4010/4020/4200 combined ambient
+ * vcnl4000.c - Support for Vishay VCNL4000/4010/4020/4040/4200 combined ambient
* light and proximity sensor
*
* Copyright 2012 Peter Meerwald <pmeerw@pmeerw.net>
+ * Copyright 2019 Pursim SPC
*
* This file is subject to the terms and conditions of version 2 of
* the GNU General Public License. See the file COPYING in the main
@@ -10,13 +11,14 @@
*
* IIO driver for:
* VCNL4000/10/20 (7-bit I2C slave address 0x13)
+ * VCNL4040 (7-bit I2C slave address 0x60)
* VCNL4200 (7-bit I2C slave address 0x51)
*
* TODO:
* allow to adjust IR current
* proximity threshold and event handling
* periodic ALS/proximity measurement (VCNL4010/20)
- * interrupts (VCNL4010/20, VCNL4200)
+ * interrupts (VCNL4010/20/40, VCNL4200)
*/
#include <linux/module.h>
@@ -30,6 +32,7 @@
#define VCNL4000_DRV_NAME "vcnl4000"
#define VCNL4000_PROD_ID 0x01
#define VCNL4010_PROD_ID 0x02 /* for VCNL4020, VCNL4010 */
+#define VCNL4040_PROD_ID 0x86
#define VCNL4200_PROD_ID 0x58
#define VCNL4000_COMMAND 0x80 /* Command register */
@@ -49,6 +52,8 @@
#define VCNL4200_AL_DATA 0x09 /* Ambient light data */
#define VCNL4200_DEV_ID 0x0e /* Device ID, slave address and version */
+#define VCNL4040_DEV_ID 0x0c /* Device ID and version */
+
/* Bit masks for COMMAND register */
#define VCNL4000_AL_RDY BIT(6) /* ALS data ready? */
#define VCNL4000_PS_RDY BIT(5) /* proximity data ready? */
@@ -58,6 +63,7 @@
enum vcnl4000_device_ids {
VCNL4000,
VCNL4010,
+ VCNL4040,
VCNL4200,
};
@@ -90,6 +96,7 @@ static const struct i2c_device_id vcnl4000_id[] = {
{ "vcnl4000", VCNL4000 },
{ "vcnl4010", VCNL4010 },
{ "vcnl4020", VCNL4010 },
+ { "vcnl4040", VCNL4040 },
{ "vcnl4200", VCNL4200 },
{ }
};
@@ -128,31 +135,53 @@ static int vcnl4000_init(struct vcnl4000_data *data)
static int vcnl4200_init(struct vcnl4000_data *data)
{
- int ret;
+ int ret, id;
ret = i2c_smbus_read_word_data(data->client, VCNL4200_DEV_ID);
if (ret < 0)
return ret;
- if ((ret & 0xff) != VCNL4200_PROD_ID)
- return -ENODEV;
+ id = ret & 0xff;
+
+ if (id != VCNL4200_PROD_ID) {
+ ret = i2c_smbus_read_word_data(data->client, VCNL4040_DEV_ID);
+ if (ret < 0)
+ return ret;
+
+ id = ret & 0xff;
+
+ if (id != VCNL4040_PROD_ID)
+ return -ENODEV;
+ }
+
+ dev_dbg(&data->client->dev, "device id 0x%x", id);
data->rev = (ret >> 8) & 0xf;
/* Set defaults and enable both channels */
- ret = i2c_smbus_write_byte_data(data->client, VCNL4200_AL_CONF, 0x00);
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_AL_CONF, 0);
if (ret < 0)
return ret;
- ret = i2c_smbus_write_byte_data(data->client, VCNL4200_PS_CONF1, 0x00);
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, 0);
if (ret < 0)
return ret;
data->al_scale = 24000;
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
- /* Integration time is 50ms, but the experiments show 54ms in total. */
- data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
- data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+ switch (id) {
+ case VCNL4200_PROD_ID:
+ /* Integration time is 50ms, but the experiments */
+ /* show 54ms in total. */
+ data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
+ data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+ break;
+ case VCNL4040_PROD_ID:
+ /* Integration time is 80ms, add 10ms. */
+ data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
+ data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
+ break;
+ }
data->vcnl4200_al.last_measurement = ktime_set(0, 0);
data->vcnl4200_ps.last_measurement = ktime_set(0, 0);
mutex_init(&data->vcnl4200_al.lock);
@@ -271,6 +300,12 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
.measure_light = vcnl4000_measure_light,
.measure_proximity = vcnl4000_measure_proximity,
},
+ [VCNL4040] = {
+ .prod = "VCNL4040",
+ .init = vcnl4200_init,
+ .measure_light = vcnl4200_measure_light,
+ .measure_proximity = vcnl4200_measure_proximity,
+ },
[VCNL4200] = {
.prod = "VCNL4200",
.init = vcnl4200_init,
@@ -363,9 +398,31 @@ static int vcnl4000_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
+static const struct of_device_id vcnl_4000_of_match[] = {
+ {
+ .compatible = "vishay,vcnl4000",
+ .data = "VCNL4000",
+ },
+ {
+ .compatible = "vishay,vcnl4010",
+ .data = "VCNL4010",
+ },
+ {
+ .compatible = "vishay,vcnl4010",
+ .data = "VCNL4020",
+ },
+ {
+ .compatible = "vishay,vcnl4200",
+ .data = "VCNL4200",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, vcnl_4000_of_match);
+
static struct i2c_driver vcnl4000_driver = {
.driver = {
.name = VCNL4000_DRV_NAME,
+ .of_match_table = vcnl_4000_of_match,
},
.probe = vcnl4000_probe,
.id_table = vcnl4000_id,
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 93be1f4c0f27..f4d0a6c0fde7 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -733,9 +733,8 @@ static int ak8974_probe(struct i2c_client *i2c,
ak8974->i2c = i2c;
mutex_init(&ak8974->lock);
- ret = of_iio_read_mount_matrix(&i2c->dev,
- "mount-matrix",
- &ak8974->orientation);
+ ret = iio_read_mount_matrix(&i2c->dev, "mount-matrix",
+ &ak8974->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index d430b80808ef..43d08c089792 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -746,12 +746,14 @@ static const struct iio_mount_matrix *
ak8975_get_mount_matrix(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
- return &((struct ak8975_data *)iio_priv(indio_dev))->orientation;
+ struct ak8975_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
}
static const struct iio_chan_spec_ext_info ak8975_ext_info[] = {
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, ak8975_get_mount_matrix),
- { },
+ { }
};
#define AK8975_CHANNEL(axis, index) \
@@ -792,7 +794,7 @@ static const struct acpi_device_id ak_acpi_match[] = {
{"AK09911", AK09911},
{"AKM9911", AK09911},
{"AK09912", AK09912},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
#endif
@@ -911,9 +913,8 @@ static int ak8975_probe(struct i2c_client *client,
data->eoc_irq = 0;
if (!pdata) {
- err = of_iio_read_mount_matrix(&client->dev,
- "mount-matrix",
- &data->orientation);
+ err = iio_read_mount_matrix(&client->dev, "mount-matrix",
+ &data->orientation);
if (err)
return err;
} else
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index d91cb845e3d6..b0d8b036d9bb 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -143,6 +143,7 @@ struct bmc150_magn_data {
*/
struct mutex mutex;
struct regmap *regmap;
+ struct iio_mount_matrix orientation;
/* 4 x 32 bits for x, y z, 4 bytes align, 64 bits timestamp */
s32 buffer[6];
struct iio_trigger *dready_trig;
@@ -612,6 +613,20 @@ static ssize_t bmc150_magn_show_samp_freq_avail(struct device *dev,
return len;
}
+static const struct iio_mount_matrix *
+bmc150_magn_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bmc150_magn_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info bmc150_magn_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bmc150_magn_get_mount_matrix),
+ { }
+};
+
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(bmc150_magn_show_samp_freq_avail);
static struct attribute *bmc150_magn_attributes[] = {
@@ -638,6 +653,7 @@ static const struct attribute_group bmc150_magn_attrs_group = {
.storagebits = 32, \
.endianness = IIO_LE \
}, \
+ .ext_info = bmc150_magn_ext_info, \
}
static const struct iio_chan_spec bmc150_magn_channels[] = {
@@ -861,6 +877,11 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
data->irq = irq;
data->dev = dev;
+ ret = iio_read_mount_matrix(dev, "mount-matrix",
+ &data->orientation);
+ if (ret)
+ return ret;
+
if (!name && ACPI_HANDLE(dev))
name = bmc150_magn_match_acpi_device(dev);
diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index a75224cf99df..e3e22d2508d3 100644
--- a/drivers/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -43,6 +43,7 @@ struct hmc5843_data {
struct mutex lock;
struct regmap *regmap;
const struct hmc5843_chip_info *variant;
+ struct iio_mount_matrix orientation;
__be16 buffer[8];
};
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index ada142fb7aa3..05629ec56d80 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -237,6 +237,15 @@ int hmc5843_set_measurement_configuration(struct iio_dev *indio_dev,
return hmc5843_set_meas_conf(data, meas_conf);
}
+static const struct iio_mount_matrix *
+hmc5843_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct hmc5843_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
static const struct iio_enum hmc5843_meas_conf_enum = {
.items = hmc5843_meas_conf_modes,
.num_items = ARRAY_SIZE(hmc5843_meas_conf_modes),
@@ -247,7 +256,8 @@ static const struct iio_enum hmc5843_meas_conf_enum = {
static const struct iio_chan_spec_ext_info hmc5843_ext_info[] = {
IIO_ENUM("meas_conf", true, &hmc5843_meas_conf_enum),
IIO_ENUM_AVAILABLE("meas_conf", &hmc5843_meas_conf_enum),
- { },
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix),
+ { }
};
static const struct iio_enum hmc5983_meas_conf_enum = {
@@ -260,7 +270,8 @@ static const struct iio_enum hmc5983_meas_conf_enum = {
static const struct iio_chan_spec_ext_info hmc5983_ext_info[] = {
IIO_ENUM("meas_conf", true, &hmc5983_meas_conf_enum),
IIO_ENUM_AVAILABLE("meas_conf", &hmc5983_meas_conf_enum),
- { },
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix),
+ { }
};
static
@@ -635,6 +646,11 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
data->variant = &hmc5843_chip_info_tbl[id];
mutex_init(&data->lock);
+ ret = iio_read_mount_matrix(dev, "mount-matrix",
+ &data->orientation);
+ if (ret)
+ return ret;
+
indio_dev->dev.parent = dev;
indio_dev->name = name;
indio_dev->info = &hmc5843_info;
diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 3de7f4426ac4..86abba5827a2 100644
--- a/drivers/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -58,8 +58,13 @@ static const struct regmap_config hmc5843_i2c_regmap_config = {
static int hmc5843_i2c_probe(struct i2c_client *cli,
const struct i2c_device_id *id)
{
+ struct regmap *regmap = devm_regmap_init_i2c(cli,
+ &hmc5843_i2c_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
return hmc5843_common_probe(&cli->dev,
- devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config),
+ regmap,
id->driver_data, id->name);
}
diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index 535f03a70d63..79b2b707f90e 100644
--- a/drivers/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -58,6 +58,7 @@ static const struct regmap_config hmc5843_spi_regmap_config = {
static int hmc5843_spi_probe(struct spi_device *spi)
{
int ret;
+ struct regmap *regmap;
const struct spi_device_id *id = spi_get_device_id(spi);
spi->mode = SPI_MODE_3;
@@ -67,8 +68,12 @@ static int hmc5843_spi_probe(struct spi_device *spi)
if (ret)
return ret;
+ regmap = devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
return hmc5843_common_probe(&spi->dev,
- devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config),
+ regmap,
id->driver_data, id->name);
}
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index 6303cbe79903..a81a3a1b4dc8 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -26,26 +26,26 @@ config DS1803
module will be called ds1803.
config MAX5481
- tristate "Maxim MAX5481-MAX5484 Digital Potentiometer driver"
- depends on SPI
- help
- Say yes here to build support for the Maxim
- MAX5481, MAX5482, MAX5483, MAX5484 digital potentiometer
- chips.
+ tristate "Maxim MAX5481-MAX5484 Digital Potentiometer driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Maxim
+ MAX5481, MAX5482, MAX5483, MAX5484 digital potentiometer
+ chips.
- To compile this driver as a module, choose M here: the
- module will be called max5481.
+ To compile this driver as a module, choose M here: the
+ module will be called max5481.
config MAX5487
- tristate "Maxim MAX5487/MAX5488/MAX5489 Digital Potentiometer driver"
- depends on SPI
- help
- Say yes here to build support for the Maxim
- MAX5487, MAX5488, MAX5489 digital potentiometer
- chips.
-
- To compile this driver as a module, choose M here: the
- module will be called max5487.
+ tristate "Maxim MAX5487/MAX5488/MAX5489 Digital Potentiometer driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Maxim
+ MAX5487, MAX5488, MAX5489 digital potentiometer
+ chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called max5487.
config MCP4018
tristate "Microchip MCP4017/18/19 Digital Potentiometer driver"
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index 90e895adf997..a0e5f530faa9 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -113,7 +113,7 @@ static int lmp91000_read(struct lmp91000_data *data, int channel, int *val)
return -EINVAL;
/* delay till first temperature reading is complete */
- if ((state != channel) && (channel == LMP91000_REG_MODECN_TEMP))
+ if (state != channel && channel == LMP91000_REG_MODECN_TEMP)
usleep_range(3000, 4000);
data->chan_select = channel != LMP91000_REG_MODECN_3LEAD;
@@ -211,12 +211,11 @@ static int lmp91000_read_config(struct lmp91000_data *data)
ret = of_property_read_u32(np, "ti,tia-gain-ohm", &val);
if (ret) {
- if (of_property_read_bool(np, "ti,external-tia-resistor"))
- val = 0;
- else {
- dev_err(dev, "no ti,tia-gain-ohm defined");
+ if (!of_property_read_bool(np, "ti,external-tia-resistor")) {
+ dev_err(dev, "no ti,tia-gain-ohm defined and external resistor not specified\n");
return ret;
}
+ val = 0;
}
ret = -EINVAL;
@@ -255,8 +254,8 @@ static int lmp91000_read_config(struct lmp91000_data *data)
regmap_write(data->regmap, LMP91000_REG_LOCK, 0);
regmap_write(data->regmap, LMP91000_REG_TIACN, reg);
- regmap_write(data->regmap, LMP91000_REG_REFCN, LMP91000_REG_REFCN_EXT_REF
- | LMP91000_REG_REFCN_50_ZERO);
+ regmap_write(data->regmap, LMP91000_REG_REFCN,
+ LMP91000_REG_REFCN_EXT_REF | LMP91000_REG_REFCN_50_ZERO);
regmap_write(data->regmap, LMP91000_REG_LOCK, 1);
return 0;
@@ -276,7 +275,6 @@ static int lmp91000_buffer_cb(const void *val, void *private)
static const struct iio_trigger_ops lmp91000_trigger_ops = {
};
-
static int lmp91000_buffer_preenable(struct iio_dev *indio_dev)
{
struct lmp91000_data *data = iio_priv(indio_dev);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index fe87d27779d9..3329d740c86c 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -164,6 +164,9 @@ static int bmp280_read_calib(struct bmp280_data *data,
return ret;
}
+ /* Toss the temperature calibration data into the entropy pool */
+ add_device_randomness(t_buf, sizeof(t_buf));
+
calib->T1 = le16_to_cpu(t_buf[T1]);
calib->T2 = le16_to_cpu(t_buf[T2]);
calib->T3 = le16_to_cpu(t_buf[T3]);
@@ -177,6 +180,9 @@ static int bmp280_read_calib(struct bmp280_data *data,
return ret;
}
+ /* Toss the pressure calibration data into the entropy pool */
+ add_device_randomness(p_buf, sizeof(p_buf));
+
calib->P1 = le16_to_cpu(p_buf[P1]);
calib->P2 = le16_to_cpu(p_buf[P2]);
calib->P3 = le16_to_cpu(p_buf[P3]);
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 87c07af9181f..034ce98d6e97 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cros_ec_baro - Driver for barometer sensor behind CrosEC.
*
* Copyright (C) 2017 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/buffer.h>
#include <linux/iio/common/cros_ec_sensors_core.h>
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index b99367a89f81..e9f254ae3892 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -45,6 +45,18 @@ config LIDAR_LITE_V2
To compile this driver as a module, choose M here: the
module will be called pulsedlight-lite-v2
+config MB1232
+ tristate "MaxSonar I2CXL family ultrasonic sensors"
+ depends on I2C
+ help
+ Say Y to build a driver for the ultrasonic sensors I2CXL of
+ MaxBotix which have an i2c interface. It can be used to measure
+ the distance of objects. Supported types are mb1202, mb1212,
+ mb1222, mb1232, mb1242, mb7040, mb7137
+
+ To compile this driver as a module, choose M here: the
+ module will be called mb1232.
+
config RFD77402
tristate "RFD77402 ToF sensor"
depends on I2C
@@ -56,12 +68,19 @@ config RFD77402
module will be called rfd77402.
config SRF04
- tristate "Devantech SRF04 ultrasonic ranger sensor"
+ tristate "GPIO bitbanged ultrasonic ranger sensor (SRF04, MB1000)"
depends on GPIOLIB
help
- Say Y here to build a driver for Devantech SRF04 ultrasonic
+ Say Y here to build a driver for GPIO bitbanged ultrasonic
ranger sensor. This driver can be used to measure the distance
of objects. It is using two GPIOs.
+ Actually Supported types are:
+ - Devantech SRF04
+ - Maxbotix mb1000
+ - Maxbotix mb1010
+ - Maxbotix mb1020
+ - Maxbotix mb1030
+ - Maxbotix mb1040
To compile this driver as a module, choose M here: the
module will be called srf04.
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index 6d031f903c4c..0bb5f9de13d6 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_ISL29501) += isl29501.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
+obj-$(CONFIG_MB1232) += mb1232.o
obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
obj-$(CONFIG_SRF08) += srf08.o
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index f130388a16a0..b591c63bd6c4 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -345,6 +345,14 @@ static SIMPLE_DEV_PM_OPS(as3935_pm_ops, as3935_suspend, as3935_resume);
#define AS3935_PM_OPS NULL
#endif
+static void as3935_stop_work(void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct as3935_state *st = iio_priv(indio_dev);
+
+ cancel_delayed_work_sync(&st->work);
+}
+
static int as3935_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -368,7 +376,6 @@ static int as3935_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
mutex_init(&st->lock);
- INIT_DELAYED_WORK(&st->work, as3935_event_work);
ret = of_property_read_u32(np,
"ams,tuning-capacitor-pf", &st->tune_cap);
@@ -414,22 +421,28 @@ static int as3935_probe(struct spi_device *spi)
iio_trigger_set_drvdata(trig, indio_dev);
trig->ops = &iio_interrupt_trigger_ops;
- ret = iio_trigger_register(trig);
+ ret = devm_iio_trigger_register(&spi->dev, trig);
if (ret) {
dev_err(&spi->dev, "failed to register trigger\n");
return ret;
}
- ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
- &as3935_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ iio_pollfunc_store_time,
+ as3935_trigger_handler, NULL);
if (ret) {
dev_err(&spi->dev, "cannot setup iio trigger\n");
- goto unregister_trigger;
+ return ret;
}
calibrate_as3935(st);
+ INIT_DELAYED_WORK(&st->work, as3935_event_work);
+ ret = devm_add_action(&spi->dev, as3935_stop_work, indio_dev);
+ if (ret)
+ return ret;
+
ret = devm_request_irq(&spi->dev, spi->irq,
&as3935_interrupt_handler,
IRQF_TRIGGER_RISING,
@@ -438,35 +451,15 @@ static int as3935_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev, "unable to request irq\n");
- goto unregister_buffer;
+ return ret;
}
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret < 0) {
dev_err(&spi->dev, "unable to register device\n");
- goto unregister_buffer;
+ return ret;
}
return 0;
-
-unregister_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-
-unregister_trigger:
- iio_trigger_unregister(st->trig);
-
- return ret;
-}
-
-static int as3935_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct as3935_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- iio_trigger_unregister(st->trig);
-
- return 0;
}
static const struct of_device_id as3935_of_match[] = {
@@ -488,7 +481,6 @@ static struct spi_driver as3935_driver = {
.pm = AS3935_PM_OPS,
},
.probe = as3935_probe,
- .remove = as3935_remove,
.id_table = as3935_id,
};
module_spi_driver(as3935_driver);
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
new file mode 100644
index 000000000000..166b3e6d7db8
--- /dev/null
+++ b/drivers/iio/proximity/mb1232.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * mb1232.c - Support for MaxBotix I2CXL-MaxSonar-EZ series ultrasonic
+ * ranger with i2c interface
+ * actually tested with mb1232 type
+ *
+ * Copyright (c) 2019 Andreas Klinger <ak@it-klinger.de>
+ *
+ * For details about the device see:
+ * https://www.maxbotix.com/documents/I2CXL-MaxSonar-EZ_Datasheet.pdf
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* registers of MaxSonar device */
+#define MB1232_RANGE_COMMAND 0x51 /* Command for reading range */
+#define MB1232_ADDR_UNLOCK_1 0xAA /* Command 1 for changing address */
+#define MB1232_ADDR_UNLOCK_2 0xA5 /* Command 2 for changing address */
+
+struct mb1232_data {
+ struct i2c_client *client;
+
+ struct mutex lock;
+
+ /*
+ * optionally a gpio can be used to announce when ranging has
+ * finished
+ * since we are just using the falling trigger of it we request
+ * only the interrupt for announcing when data is ready to be read
+ */
+ struct completion ranging;
+ int irqnr;
+};
+
+static irqreturn_t mb1232_handle_irq(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct mb1232_data *data = iio_priv(indio_dev);
+
+ complete(&data->ranging);
+
+ return IRQ_HANDLED;
+}
+
+static s16 mb1232_read_distance(struct mb1232_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+ s16 distance;
+ __be16 buf;
+
+ mutex_lock(&data->lock);
+
+ reinit_completion(&data->ranging);
+
+ ret = i2c_smbus_write_byte(client, MB1232_RANGE_COMMAND);
+ if (ret < 0) {
+ dev_err(&client->dev, "write command - err: %d\n", ret);
+ goto error_unlock;
+ }
+
+ if (data->irqnr >= 0) {
+ /* it cannot take more than 100 ms */
+ ret = wait_for_completion_killable_timeout(&data->ranging,
+ HZ/10);
+ if (ret < 0)
+ goto error_unlock;
+ else if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto error_unlock;
+ }
+ } else {
+ /* use simple sleep if announce irq is not connected */
+ msleep(15);
+ }
+
+ ret = i2c_master_recv(client, (char *)&buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c_master_recv: ret=%d\n", ret);
+ goto error_unlock;
+ }
+
+ distance = __be16_to_cpu(buf);
+ /* check for not returning misleading error codes */
+ if (distance < 0) {
+ dev_err(&client->dev, "distance=%d\n", distance);
+ ret = -EINVAL;
+ goto error_unlock;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return distance;
+
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static irqreturn_t mb1232_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mb1232_data *data = iio_priv(indio_dev);
+ /*
+ * triggered buffer
+ * 16-bit channel + 48-bit padding + 64-bit timestamp
+ */
+ s16 buffer[8] = { 0 };
+
+ buffer[0] = mb1232_read_distance(data);
+ if (buffer[0] < 0)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int mb1232_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long mask)
+{
+ struct mb1232_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (channel->type != IIO_DISTANCE)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = mb1232_read_distance(data);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* 1 LSB is 1 cm */
+ *val = 0;
+ *val2 = 10000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec mb1232_channels[] = {
+ {
+ .type = IIO_DISTANCE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct iio_info mb1232_info = {
+ .read_raw = mb1232_read_raw,
+};
+
+static int mb1232_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct mb1232_data *data;
+ int ret;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_WRITE_BYTE))
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ indio_dev->info = &mb1232_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mb1232_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mb1232_channels);
+
+ mutex_init(&data->lock);
+
+ init_completion(&data->ranging);
+
+ data->irqnr = irq_of_parse_and_map(dev->of_node, 0);
+ if (data->irqnr <= 0) {
+ /* usage of interrupt is optional */
+ data->irqnr = -1;
+ } else {
+ ret = devm_request_irq(dev, data->irqnr, mb1232_handle_irq,
+ IRQF_TRIGGER_FALLING, id->name, indio_dev);
+ if (ret < 0) {
+ dev_err(dev, "request_irq: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time, mb1232_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(dev, "setup of iio triggered buffer failed\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id of_mb1232_match[] = {
+ { .compatible = "maxbotix,mb1202", },
+ { .compatible = "maxbotix,mb1212", },
+ { .compatible = "maxbotix,mb1222", },
+ { .compatible = "maxbotix,mb1232", },
+ { .compatible = "maxbotix,mb1242", },
+ { .compatible = "maxbotix,mb7040", },
+ { .compatible = "maxbotix,mb7137", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_mb1232_match);
+
+static const struct i2c_device_id mb1232_id[] = {
+ { "maxbotix-mb1202", },
+ { "maxbotix-mb1212", },
+ { "maxbotix-mb1222", },
+ { "maxbotix-mb1232", },
+ { "maxbotix-mb1242", },
+ { "maxbotix-mb7040", },
+ { "maxbotix-mb7137", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mb1232_id);
+
+static struct i2c_driver mb1232_driver = {
+ .driver = {
+ .name = "maxbotix-mb1232",
+ .of_match_table = of_mb1232_match,
+ },
+ .probe = mb1232_probe,
+ .id_table = mb1232_id,
+};
+module_i2c_driver(mb1232_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("Maxbotix I2CXL-MaxSonar i2c ultrasonic ranger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index 09c7b9c095b0..1bad4018d1aa 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -23,7 +23,7 @@
* trig: --+ +------------------------------------------------------
* ^ ^
* |<->|
- * udelay(10)
+ * udelay(trigger_pulse_us)
*
* ultra +-+ +-+ +-+
* sonic | | | | | |
@@ -48,6 +48,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/sched.h>
@@ -56,6 +57,10 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+struct srf04_cfg {
+ unsigned long trigger_pulse_us;
+};
+
struct srf04_data {
struct device *dev;
struct gpio_desc *gpiod_trig;
@@ -66,6 +71,15 @@ struct srf04_data {
ktime_t ts_falling;
struct completion rising;
struct completion falling;
+ const struct srf04_cfg *cfg;
+};
+
+static const struct srf04_cfg srf04_cfg = {
+ .trigger_pulse_us = 10,
+};
+
+static const struct srf04_cfg mb_lv_cfg = {
+ .trigger_pulse_us = 20,
};
static irqreturn_t srf04_handle_irq(int irq, void *dev_id)
@@ -102,7 +116,7 @@ static int srf04_read(struct srf04_data *data)
reinit_completion(&data->falling);
gpiod_set_value(data->gpiod_trig, 1);
- udelay(10);
+ udelay(data->cfg->trigger_pulse_us);
gpiod_set_value(data->gpiod_trig, 0);
/* it cannot take more than 20 ms */
@@ -215,6 +229,18 @@ static const struct iio_chan_spec srf04_chan_spec[] = {
},
};
+static const struct of_device_id of_srf04_match[] = {
+ { .compatible = "devantech,srf04", .data = &srf04_cfg},
+ { .compatible = "maxbotix,mb1000", .data = &mb_lv_cfg},
+ { .compatible = "maxbotix,mb1010", .data = &mb_lv_cfg},
+ { .compatible = "maxbotix,mb1020", .data = &mb_lv_cfg},
+ { .compatible = "maxbotix,mb1030", .data = &mb_lv_cfg},
+ { .compatible = "maxbotix,mb1040", .data = &mb_lv_cfg},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_srf04_match);
+
static int srf04_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -230,6 +256,7 @@ static int srf04_probe(struct platform_device *pdev)
data = iio_priv(indio_dev);
data->dev = dev;
+ data->cfg = of_match_device(of_srf04_match, dev)->data;
mutex_init(&data->lock);
init_completion(&data->rising);
@@ -280,13 +307,6 @@ static int srf04_probe(struct platform_device *pdev)
return devm_iio_device_register(dev, indio_dev);
}
-static const struct of_device_id of_srf04_match[] = {
- { .compatible = "devantech,srf04", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, of_srf04_match);
-
static struct platform_driver srf04_driver = {
.probe = srf04_probe,
.driver = {
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index 82e4a62745e2..c185cbee25c7 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -66,14 +66,14 @@ config TMP006
be called tmp006.
config TMP007
- tristate "TMP007 infrared thermopile sensor with Integrated Math Engine"
- depends on I2C
- help
- If you say yes here you get support for the Texas Instruments
- TMP007 infrared thermopile sensor with Integrated Math Engine.
+ tristate "TMP007 infrared thermopile sensor with Integrated Math Engine"
+ depends on I2C
+ help
+ If you say yes here you get support for the Texas Instruments
+ TMP007 infrared thermopile sensor with Integrated Math Engine.
- This driver can also be built as a module. If so, the module will
- be called tmp007.
+ This driver can also be built as a module. If so, the module will
+ be called tmp007.
config TSYS01
tristate "Measurement Specialties TSYS01 temperature sensor using I2C bus connection"
@@ -97,4 +97,14 @@ config TSYS02D
This driver can also be built as a module. If so, the module will
be called tsys02d.
+config MAX31856
+ tristate "MAX31856 thermocouple sensor"
+ depends on SPI
+ help
+ If you say yes here you get support for MAX31856
+ thermocouple sensor chip connected via SPI.
+
+ This driver can also be built as a module. If so, the module
+ will be called max31856.
+
endmenu
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index 34a31db0bb63..baca4776ca0d 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o
obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
+obj-$(CONFIG_MAX31856) += max31856.o
obj-$(CONFIG_MLX90614) += mlx90614.o
obj-$(CONFIG_MLX90632) += mlx90632.o
obj-$(CONFIG_TMP006) += tmp006.o
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
new file mode 100644
index 000000000000..f184ba5601d9
--- /dev/null
+++ b/drivers/iio/temperature/max31856.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/* max31856.c
+ *
+ * Maxim MAX31856 thermocouple sensor driver
+ *
+ * Copyright (C) 2018-2019 Rockwell Collins
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <dt-bindings/iio/temperature/thermocouple.h>
+/*
+ * The MSB of the register value determines whether the following byte will
+ * be written or read. If it is 0, one or more byte reads will follow.
+ */
+#define MAX31856_RD_WR_BIT BIT(7)
+
+#define MAX31856_CR0_AUTOCONVERT BIT(7)
+#define MAX31856_CR0_1SHOT BIT(6)
+#define MAX31856_CR0_OCFAULT BIT(4)
+#define MAX31856_CR0_OCFAULT_MASK GENMASK(5, 4)
+#define MAX31856_TC_TYPE_MASK GENMASK(3, 0)
+#define MAX31856_FAULT_OVUV BIT(1)
+#define MAX31856_FAULT_OPEN BIT(0)
+
+/* The MAX31856 registers */
+#define MAX31856_CR0_REG 0x00
+#define MAX31856_CR1_REG 0x01
+#define MAX31856_MASK_REG 0x02
+#define MAX31856_CJHF_REG 0x03
+#define MAX31856_CJLF_REG 0x04
+#define MAX31856_LTHFTH_REG 0x05
+#define MAX31856_LTHFTL_REG 0x06
+#define MAX31856_LTLFTH_REG 0x07
+#define MAX31856_LTLFTL_REG 0x08
+#define MAX31856_CJTO_REG 0x09
+#define MAX31856_CJTH_REG 0x0A
+#define MAX31856_CJTL_REG 0x0B
+#define MAX31856_LTCBH_REG 0x0C
+#define MAX31856_LTCBM_REG 0x0D
+#define MAX31856_LTCBL_REG 0x0E
+#define MAX31856_SR_REG 0x0F
+
+static const struct iio_chan_spec max31856_channels[] = {
+ { /* Thermocouple Temperature */
+ .type = IIO_TEMP,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ },
+ { /* Cold Junction Temperature */
+ .type = IIO_TEMP,
+ .channel2 = IIO_MOD_TEMP_AMBIENT,
+ .modified = 1,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+struct max31856_data {
+ struct spi_device *spi;
+ u32 thermocouple_type;
+};
+
+static int max31856_read(struct max31856_data *data, u8 reg,
+ u8 val[], unsigned int read_size)
+{
+ return spi_write_then_read(data->spi, &reg, 1, val, read_size);
+}
+
+static int max31856_write(struct max31856_data *data, u8 reg,
+ unsigned int val)
+{
+ u8 buf[2];
+
+ buf[0] = reg | (MAX31856_RD_WR_BIT);
+ buf[1] = val;
+
+ return spi_write(data->spi, buf, 2);
+}
+
+static int max31856_init(struct max31856_data *data)
+{
+ int ret;
+ u8 reg_cr0_val, reg_cr1_val;
+
+ /* Start by changing to Off mode before making changes as
+ * some settings are recommended to be set only when the device
+ * is off
+ */
+ ret = max31856_read(data, MAX31856_CR0_REG, &reg_cr0_val, 1);
+ if (ret)
+ return ret;
+
+ reg_cr0_val &= ~MAX31856_CR0_AUTOCONVERT;
+ ret = max31856_write(data, MAX31856_CR0_REG, reg_cr0_val);
+ if (ret)
+ return ret;
+
+ /* Set thermocouple type based on dts property */
+ ret = max31856_read(data, MAX31856_CR1_REG, &reg_cr1_val, 1);
+ if (ret)
+ return ret;
+
+ reg_cr1_val &= ~MAX31856_TC_TYPE_MASK;
+ reg_cr1_val |= data->thermocouple_type;
+ ret = max31856_write(data, MAX31856_CR1_REG, reg_cr1_val);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable Open circuit fault detection
+ * Read datasheet for more information: Table 4.
+ * Value 01 means : Enabled (Once every 16 conversions)
+ */
+ reg_cr0_val &= ~MAX31856_CR0_OCFAULT_MASK;
+ reg_cr0_val |= MAX31856_CR0_OCFAULT;
+
+ /* Set Auto Conversion Mode */
+ reg_cr0_val &= ~MAX31856_CR0_1SHOT;
+ reg_cr0_val |= MAX31856_CR0_AUTOCONVERT;
+
+ return max31856_write(data, MAX31856_CR0_REG, reg_cr0_val);
+}
+
+static int max31856_thermocouple_read(struct max31856_data *data,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ int ret, offset_cjto;
+ u8 reg_val[3];
+
+ switch (chan->channel2) {
+ case IIO_NO_MOD:
+ /*
+ * Multibyte Read
+ * MAX31856_LTCBH_REG, MAX31856_LTCBM_REG, MAX31856_LTCBL_REG
+ */
+ ret = max31856_read(data, MAX31856_LTCBH_REG, reg_val, 3);
+ if (ret)
+ return ret;
+ /* Skip last 5 dead bits of LTCBL */
+ *val = (reg_val[0] << 16 | reg_val[1] << 8 | reg_val[2]) >> 5;
+ /* Check 7th bit of LTCBH reg. value for sign*/
+ if (reg_val[0] & 0x80)
+ *val -= 0x80000;
+ break;
+
+ case IIO_MOD_TEMP_AMBIENT:
+ /*
+ * Multibyte Read
+ * MAX31856_CJTO_REG, MAX31856_CJTH_REG, MAX31856_CJTL_REG
+ */
+ ret = max31856_read(data, MAX31856_CJTO_REG, reg_val, 3);
+ if (ret)
+ return ret;
+ /* Get Cold Junction Temp. offset register value */
+ offset_cjto = reg_val[0];
+ /* Get CJTH and CJTL value and skip last 2 dead bits of CJTL */
+ *val = (reg_val[1] << 8 | reg_val[2]) >> 2;
+ /* As per datasheet add offset into CJTH and CJTL */
+ *val += offset_cjto;
+ /* Check 7th bit of CJTH reg. value for sign */
+ if (reg_val[1] & 0x80)
+ *val -= 0x4000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = max31856_read(data, MAX31856_SR_REG, reg_val, 1);
+ if (ret)
+ return ret;
+ /* Check for over/under voltage or open circuit fault */
+ if (reg_val[0] & (MAX31856_FAULT_OVUV | MAX31856_FAULT_OPEN))
+ return -EIO;
+
+ return ret;
+}
+
+static int max31856_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max31856_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = max31856_thermocouple_read(data, chan, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel2) {
+ case IIO_MOD_TEMP_AMBIENT:
+ /* Cold junction Temp. Data resolution is 0.015625 */
+ *val = 15;
+ *val2 = 625000; /* 1000 * 0.015625 */
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ /* Thermocouple Temp. Data resolution is 0.0078125 */
+ *val = 7;
+ *val2 = 812500; /* 1000 * 0.0078125) */
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t show_fault(struct device *dev, u8 faultbit, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max31856_data *data = iio_priv(indio_dev);
+ u8 reg_val;
+ int ret;
+ bool fault;
+
+ ret = max31856_read(data, MAX31856_SR_REG, &reg_val, 1);
+ if (ret)
+ return ret;
+
+ fault = reg_val & faultbit;
+
+ return sprintf(buf, "%d\n", fault);
+}
+
+static ssize_t show_fault_ovuv(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return show_fault(dev, MAX31856_FAULT_OVUV, buf);
+}
+
+static ssize_t show_fault_oc(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return show_fault(dev, MAX31856_FAULT_OPEN, buf);
+}
+
+static IIO_DEVICE_ATTR(fault_ovuv, 0444, show_fault_ovuv, NULL, 0);
+static IIO_DEVICE_ATTR(fault_oc, 0444, show_fault_oc, NULL, 0);
+
+static struct attribute *max31856_attributes[] = {
+ &iio_dev_attr_fault_ovuv.dev_attr.attr,
+ &iio_dev_attr_fault_oc.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group max31856_group = {
+ .attrs = max31856_attributes,
+};
+
+static const struct iio_info max31856_info = {
+ .read_raw = max31856_read_raw,
+ .attrs = &max31856_group,
+};
+
+static int max31856_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct max31856_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->spi = spi;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->info = &max31856_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max31856_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max31856_channels);
+
+ ret = of_property_read_u32(spi->dev.of_node, "thermocouple-type",
+ &data->thermocouple_type);
+
+ if (ret) {
+ dev_info(&spi->dev,
+ "Could not read thermocouple type DT property, configuring as a K-Type\n");
+ data->thermocouple_type = THERMOCOUPLE_TYPE_K;
+ }
+
+ /*
+ * no need to translate values as the supported types
+ * have the same value as the #defines
+ */
+ switch (data->thermocouple_type) {
+ case THERMOCOUPLE_TYPE_B:
+ case THERMOCOUPLE_TYPE_E:
+ case THERMOCOUPLE_TYPE_J:
+ case THERMOCOUPLE_TYPE_K:
+ case THERMOCOUPLE_TYPE_N:
+ case THERMOCOUPLE_TYPE_R:
+ case THERMOCOUPLE_TYPE_S:
+ case THERMOCOUPLE_TYPE_T:
+ break;
+ default:
+ dev_err(&spi->dev,
+ "error: thermocouple-type %u not supported by max31856\n"
+ , data->thermocouple_type);
+ return -EINVAL;
+ }
+
+ ret = max31856_init(data);
+ if (ret) {
+ dev_err(&spi->dev, "error: Failed to configure max31856\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id max31856_id[] = {
+ { "max31856", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, max31856_id);
+
+static const struct of_device_id max31856_of_match[] = {
+ { .compatible = "maxim,max31856" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max31856_of_match);
+
+static struct spi_driver max31856_driver = {
+ .driver = {
+ .name = "max31856",
+ .of_match_table = max31856_of_match,
+ },
+ .probe = max31856_probe,
+ .id_table = max31856_id,
+};
+module_spi_driver(max31856_driver);
+
+MODULE_AUTHOR("Paresh Chaudhary <paresh.chaudhary@rockwellcollins.com>");
+MODULE_AUTHOR("Patrick Havelange <patrick.havelange@essensium.com>");
+MODULE_DESCRIPTION("Maxim MAX31856 thermocouple sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/trigger/iio-trig-loop.c b/drivers/iio/trigger/iio-trig-loop.c
index 94a90e0a3fdb..9258d3cf149b 100644
--- a/drivers/iio/trigger/iio-trig-loop.c
+++ b/drivers/iio/trigger/iio-trig-loop.c
@@ -60,7 +60,7 @@ static int iio_loop_trigger_set_state(struct iio_trigger *trig, bool state)
if (state) {
loop_trig->task = kthread_run(iio_loop_thread,
trig, trig->name);
- if (unlikely(IS_ERR(loop_trig->task))) {
+ if (IS_ERR(loop_trig->task)) {
dev_err(&trig->dev,
"failed to create trigger loop thread\n");
return PTR_ERR(loop_trig->task);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index a1fb840de45d..cbfbea49f126 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -25,7 +25,6 @@ config INFINIBAND_USER_MAD
config INFINIBAND_USER_ACCESS
tristate "InfiniBand userspace access (verbs and CM)"
- select ANON_INODES
depends on MMU
---help---
Userspace InfiniBand access support. This enables the
@@ -94,6 +93,7 @@ source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
+source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/i40iw/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0dce94e3c495..2f7d14159841 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -42,9 +42,10 @@
#include <net/neighbour.h>
#include <net/route.h>
#include <net/netevent.h>
-#include <net/addrconf.h>
+#include <net/ipv6_stubs.h>
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
#include <rdma/ib_sa.h>
#include <rdma/ib.h>
#include <rdma/rdma_netlink.h>
@@ -86,8 +87,8 @@ static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
return false;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_addr_policy, NULL);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_addr_policy, NULL);
if (ret)
return false;
@@ -351,7 +352,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
if (family == AF_INET) {
rt = container_of(dst, struct rtable, dst);
- return rt->rt_uses_gateway;
+ return rt->rt_gw_family == AF_INET;
}
rt6 = container_of(dst, struct rt6_info, dst);
@@ -730,8 +731,8 @@ int roce_resolve_route_from_path(struct sa_path_rec *rec,
if (rec->roce.route_resolved)
return 0;
- rdma_gid2ip(&sgid._sockaddr, &rec->sgid);
- rdma_gid2ip(&dgid._sockaddr, &rec->dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid, &rec->sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid, &rec->dgid);
if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family)
return -EINVAL;
@@ -742,7 +743,7 @@ int roce_resolve_route_from_path(struct sa_path_rec *rec,
dev_addr.net = &init_net;
dev_addr.sgid_attr = attr;
- ret = addr_resolve(&sgid._sockaddr, &dgid._sockaddr,
+ ret = addr_resolve((struct sockaddr *)&sgid, (struct sockaddr *)&dgid,
&dev_addr, false, true, 0);
if (ret)
return ret;
@@ -814,22 +815,22 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
struct rdma_dev_addr dev_addr;
struct resolve_cb_context ctx;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
int ret;
- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
dev_addr.net = &init_net;
dev_addr.sgid_attr = sgid_attr;
init_completion(&ctx.comp);
- ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr,
- &dev_addr, 1000, resolve_cb, true, &ctx);
+ ret = rdma_resolve_ip((struct sockaddr *)&sgid_addr,
+ (struct sockaddr *)&dgid_addr, &dev_addr, 1000,
+ resolve_cb, true, &ctx);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 43c67e5f43c6..18e476b3ced0 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -78,11 +78,22 @@ enum gid_table_entry_state {
GID_TABLE_ENTRY_PENDING_DEL = 3,
};
+struct roce_gid_ndev_storage {
+ struct rcu_head rcu_head;
+ struct net_device *ndev;
+};
+
struct ib_gid_table_entry {
struct kref kref;
struct work_struct del_work;
struct ib_gid_attr attr;
void *context;
+ /* Store the ndev pointer to release reference later on in
+ * call_rcu context because by that time gid_table_entry
+ * and attr might be already freed. So keep a copy of it.
+ * ndev_storage is freed by rcu callback.
+ */
+ struct roce_gid_ndev_storage *ndev_storage;
enum gid_table_entry_state state;
};
@@ -206,6 +217,20 @@ static void schedule_free_gid(struct kref *kref)
queue_work(ib_wq, &entry->del_work);
}
+static void put_gid_ndev(struct rcu_head *head)
+{
+ struct roce_gid_ndev_storage *storage =
+ container_of(head, struct roce_gid_ndev_storage, rcu_head);
+
+ WARN_ON(!storage->ndev);
+ /* At this point its safe to release netdev reference,
+ * as all callers working on gid_attr->ndev are done
+ * using this netdev.
+ */
+ dev_put(storage->ndev);
+ kfree(storage);
+}
+
static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
{
struct ib_device *device = entry->attr.device;
@@ -228,8 +253,8 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
/* Now this index is ready to be allocated */
write_unlock_irq(&table->rwlock);
- if (entry->attr.ndev)
- dev_put(entry->attr.ndev);
+ if (entry->ndev_storage)
+ call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
kfree(entry);
}
@@ -266,14 +291,25 @@ static struct ib_gid_table_entry *
alloc_gid_entry(const struct ib_gid_attr *attr)
{
struct ib_gid_table_entry *entry;
+ struct net_device *ndev;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return NULL;
+
+ ndev = rcu_dereference_protected(attr->ndev, 1);
+ if (ndev) {
+ entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage),
+ GFP_KERNEL);
+ if (!entry->ndev_storage) {
+ kfree(entry);
+ return NULL;
+ }
+ dev_hold(ndev);
+ entry->ndev_storage->ndev = ndev;
+ }
kref_init(&entry->kref);
memcpy(&entry->attr, attr, sizeof(*attr));
- if (entry->attr.ndev)
- dev_hold(entry->attr.ndev);
INIT_WORK(&entry->del_work, free_gid_work);
entry->state = GID_TABLE_ENTRY_INVALID;
return entry;
@@ -343,6 +379,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
static void del_gid(struct ib_device *ib_dev, u8 port,
struct ib_gid_table *table, int ix)
{
+ struct roce_gid_ndev_storage *ndev_storage;
struct ib_gid_table_entry *entry;
lockdep_assert_held(&table->lock);
@@ -360,6 +397,13 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
table->data_vec[ix] = NULL;
write_unlock_irq(&table->rwlock);
+ ndev_storage = entry->ndev_storage;
+ if (ndev_storage) {
+ entry->ndev_storage = NULL;
+ rcu_assign_pointer(entry->attr.ndev, NULL);
+ call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
+ }
+
if (rdma_cap_roce_gid_table(ib_dev, port))
ib_dev->ops.del_gid(&entry->attr, &entry->context);
@@ -543,30 +587,11 @@ out_unlock:
int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
union ib_gid *gid, struct ib_gid_attr *attr)
{
- struct net_device *idev;
- unsigned long mask;
- int ret;
-
- idev = ib_device_get_netdev(ib_dev, port);
- if (idev && attr->ndev != idev) {
- union ib_gid default_gid;
-
- /* Adding default GIDs is not permitted */
- make_default_gid(idev, &default_gid);
- if (!memcmp(gid, &default_gid, sizeof(*gid))) {
- dev_put(idev);
- return -EPERM;
- }
- }
- if (idev)
- dev_put(idev);
-
- mask = GID_ATTR_FIND_MASK_GID |
- GID_ATTR_FIND_MASK_GID_TYPE |
- GID_ATTR_FIND_MASK_NETDEV;
+ unsigned long mask = GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_GID_TYPE |
+ GID_ATTR_FIND_MASK_NETDEV;
- ret = __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
- return ret;
+ return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
}
static int
@@ -1263,11 +1288,72 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
read_lock_irqsave(&table->rwlock, flags);
valid = is_gid_entry_valid(table->data_vec[attr->index]);
- if (valid && attr->ndev && (READ_ONCE(attr->ndev->flags) & IFF_UP))
- ndev = attr->ndev;
+ if (valid) {
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev ||
+ (ndev && ((READ_ONCE(ndev->flags) & IFF_UP) == 0)))
+ ndev = ERR_PTR(-ENODEV);
+ }
read_unlock_irqrestore(&table->rwlock, flags);
return ndev;
}
+EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
+
+static int get_lower_dev_vlan(struct net_device *lower_dev, void *data)
+{
+ u16 *vlan_id = data;
+
+ if (is_vlan_dev(lower_dev))
+ *vlan_id = vlan_dev_vlan_id(lower_dev);
+
+ /* We are interested only in first level vlan device, so
+ * always return 1 to stop iterating over next level devices.
+ */
+ return 1;
+}
+
+/**
+ * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address
+ * of a GID entry.
+ *
+ * @attr: GID attribute pointer whose L2 fields to be read
+ * @vlan_id: Pointer to vlan id to fill up if the GID entry has
+ * vlan id. It is optional.
+ * @smac: Pointer to smac to fill up for a GID entry. It is optional.
+ *
+ * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id
+ * (if gid entry has vlan) and source MAC, or returns error.
+ */
+int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
+ u16 *vlan_id, u8 *smac)
+{
+ struct net_device *ndev;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ if (smac)
+ ether_addr_copy(smac, ndev->dev_addr);
+ if (vlan_id) {
+ *vlan_id = 0xffff;
+ if (is_vlan_dev(ndev)) {
+ *vlan_id = vlan_dev_vlan_id(ndev);
+ } else {
+ /* If the netdev is upper device and if it's lower
+ * device is vlan device, consider vlan id of the
+ * the lower vlan device for this gid entry.
+ */
+ netdev_walk_all_lower_dev_rcu(attr->ndev,
+ get_lower_dev_vlan, vlan_id);
+ }
+ }
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(rdma_read_gid_l2_fields);
static int config_non_roce_gid_cache(struct ib_device *device,
u8 port, int gid_tbl_len)
@@ -1392,7 +1478,6 @@ static void ib_cache_event(struct ib_event_handler *handler,
event->event == IB_EVENT_PORT_ACTIVE ||
event->event == IB_EVENT_LID_CHANGE ||
event->event == IB_EVENT_PKEY_CHANGE ||
- event->event == IB_EVENT_SM_CHANGE ||
event->event == IB_EVENT_CLIENT_REREGISTER ||
event->event == IB_EVENT_GID_CHANGE) {
work = kmalloc(sizeof *work, GFP_ATOMIC);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index b9416a6fca36..da10e6ccb43c 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -52,6 +52,7 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
#include "cm_msgs.h"
+#include "core_priv.h"
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
@@ -124,7 +125,8 @@ static struct ib_cm {
struct rb_root remote_qp_table;
struct rb_root remote_id_table;
struct rb_root remote_sidr_table;
- struct idr local_id_table;
+ struct xarray local_id_table;
+ u32 local_id_next;
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
@@ -219,7 +221,6 @@ struct cm_port {
struct cm_device {
struct list_head list;
struct ib_device *ib_device;
- struct device *device;
u8 ack_delay;
int going_down;
struct cm_port *port[0];
@@ -598,35 +599,31 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
- unsigned long flags;
- int id;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&cm.lock, flags);
+ int err;
+ u32 id;
- id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
-
- spin_unlock_irqrestore(&cm.lock, flags);
- idr_preload_end();
+ err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv,
+ xa_limit_32b, &cm.local_id_next, GFP_KERNEL);
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
- return id < 0 ? id : 0;
+ return err;
+}
+
+static u32 cm_local_id(__be32 local_id)
+{
+ return (__force u32) (local_id ^ cm.random_id_operand);
}
static void cm_free_id(__be32 local_id)
{
- spin_lock_irq(&cm.lock);
- idr_remove(&cm.local_id_table,
- (__force int) (local_id ^ cm.random_id_operand));
- spin_unlock_irq(&cm.lock);
+ xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
}
static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
- cm_id_priv = idr_find(&cm.local_id_table,
- (__force int) (local_id ^ cm.random_id_operand));
+ cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
@@ -1988,11 +1985,12 @@ static int cm_req_handler(struct cm_work *work)
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
gid_attr = grh->sgid_attr;
- if (gid_attr && gid_attr->ndev) {
+ if (gid_attr &&
+ rdma_protocol_roce(work->port->cm_dev->ib_device,
+ work->port->port_num)) {
work->path[0].rec_type =
sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
} else {
- /* If no GID attribute or ndev is null, it is not RoCE. */
cm_path_set_rec_type(work->port->cm_dev->ib_device,
work->port->port_num,
&work->path[0],
@@ -2824,9 +2822,8 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
spin_unlock_irq(&cm.lock);
return NULL;
}
- cm_id_priv = idr_find(&cm.local_id_table, (__force int)
- (timewait_info->work.local_id ^
- cm.random_id_operand));
+ cm_id_priv = xa_load(&cm.local_id_table,
+ cm_local_id(timewait_info->work.local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
@@ -4276,18 +4273,6 @@ static struct kobj_type cm_counter_obj_type = {
.default_attrs = cm_counter_default_attrs
};
-static void cm_release_port_obj(struct kobject *obj)
-{
- struct cm_port *cm_port;
-
- cm_port = container_of(obj, struct cm_port, port_obj);
- kfree(cm_port);
-}
-
-static struct kobj_type cm_port_obj_type = {
- .release = cm_release_port_obj
-};
-
static char *cm_devnode(struct device *dev, umode_t *mode)
{
if (mode)
@@ -4306,19 +4291,12 @@ static int cm_create_port_fs(struct cm_port *port)
{
int i, ret;
- ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
- &port->cm_dev->device->kobj,
- "%d", port->port_num);
- if (ret) {
- kfree(port);
- return ret;
- }
-
for (i = 0; i < CM_COUNTER_GROUPS; i++) {
- ret = kobject_init_and_add(&port->counter_group[i].obj,
- &cm_counter_obj_type,
- &port->port_obj,
- "%s", counter_group_names[i]);
+ ret = ib_port_register_module_stat(port->cm_dev->ib_device,
+ port->port_num,
+ &port->counter_group[i].obj,
+ &cm_counter_obj_type,
+ counter_group_names[i]);
if (ret)
goto error;
}
@@ -4327,8 +4305,7 @@ static int cm_create_port_fs(struct cm_port *port)
error:
while (i--)
- kobject_put(&port->counter_group[i].obj);
- kobject_put(&port->port_obj);
+ ib_port_unregister_module_stat(&port->counter_group[i].obj);
return ret;
}
@@ -4338,9 +4315,8 @@ static void cm_remove_port_fs(struct cm_port *port)
int i;
for (i = 0; i < CM_COUNTER_GROUPS; i++)
- kobject_put(&port->counter_group[i].obj);
+ ib_port_unregister_module_stat(&port->counter_group[i].obj);
- kobject_put(&port->port_obj);
}
static void cm_add_one(struct ib_device *ib_device)
@@ -4367,13 +4343,6 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
cm_dev->going_down = 0;
- cm_dev->device = device_create(&cm_class, &ib_device->dev,
- MKDEV(0, 0), NULL,
- "%s", dev_name(&ib_device->dev));
- if (IS_ERR(cm_dev->device)) {
- kfree(cm_dev);
- return;
- }
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
@@ -4440,7 +4409,6 @@ error1:
cm_remove_port_fs(port);
}
free:
- device_unregister(cm_dev->device);
kfree(cm_dev);
}
@@ -4494,7 +4462,6 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
cm_remove_port_fs(port);
}
- device_unregister(cm_dev->device);
kfree(cm_dev);
}
@@ -4502,7 +4469,6 @@ static int __init ib_cm_init(void)
{
int ret;
- memset(&cm, 0, sizeof cm);
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
@@ -4512,7 +4478,7 @@ static int __init ib_cm_init(void)
cm.remote_id_table = RB_ROOT;
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
- idr_init(&cm.local_id_table);
+ xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list);
@@ -4538,7 +4504,6 @@ error3:
error2:
class_unregister(&cm_class);
error1:
- idr_destroy(&cm.local_id_table);
return ret;
}
@@ -4560,9 +4525,8 @@ static void __exit ib_cm_cleanup(void)
}
class_unregister(&cm_class);
- idr_destroy(&cm.local_id_table);
+ WARN_ON(!xa_empty(&cm.local_id_table));
}
module_init(ib_cm_init);
module_exit(ib_cm_cleanup);
-
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 476d4309576d..3d16d614aff6 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -98,7 +98,7 @@ struct cm_req_msg {
u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
-} __attribute__ ((packed));
+} __packed;
static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
{
@@ -423,7 +423,7 @@ enum cm_msg_response {
u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
+} __packed;
static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
{
@@ -461,7 +461,7 @@ struct cm_rej_msg {
u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
+} __packed;
static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
{
@@ -506,7 +506,7 @@ struct cm_rep_msg {
u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
+} __packed;
static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
{
@@ -614,7 +614,7 @@ struct cm_rtu_msg {
u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
+} __packed;
struct cm_dreq_msg {
struct ib_mad_hdr hdr;
@@ -626,7 +626,7 @@ struct cm_dreq_msg {
u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
+} __packed;
static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
{
@@ -647,7 +647,7 @@ struct cm_drep_msg {
u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
+} __packed;
struct cm_lap_msg {
struct ib_mad_hdr hdr;
@@ -675,7 +675,7 @@ struct cm_lap_msg {
u8 offset63;
u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
+} __packed;
static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
{
@@ -784,7 +784,7 @@ struct cm_apr_msg {
u8 info[IB_CM_APR_INFO_LENGTH];
u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
+} __packed;
struct cm_sidr_req_msg {
struct ib_mad_hdr hdr;
@@ -795,7 +795,7 @@ struct cm_sidr_req_msg {
__be64 service_id;
u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
-} __attribute__ ((packed));
+} __packed;
struct cm_sidr_rep_msg {
struct ib_mad_hdr hdr;
@@ -811,7 +811,7 @@ struct cm_sidr_rep_msg {
u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
+} __packed;
static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
{
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 68c997be2429..19f1730a4f24 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -39,7 +39,7 @@
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/igmp.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/inetdevice.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -191,10 +191,10 @@ static struct workqueue_struct *cma_wq;
static unsigned int cma_pernet_id;
struct cma_pernet {
- struct idr tcp_ps;
- struct idr udp_ps;
- struct idr ipoib_ps;
- struct idr ib_ps;
+ struct xarray tcp_ps;
+ struct xarray udp_ps;
+ struct xarray ipoib_ps;
+ struct xarray ib_ps;
};
static struct cma_pernet *cma_pernet(struct net *net)
@@ -202,7 +202,8 @@ static struct cma_pernet *cma_pernet(struct net *net)
return net_generic(net, cma_pernet_id);
}
-static struct idr *cma_pernet_idr(struct net *net, enum rdma_ucm_port_space ps)
+static
+struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
{
struct cma_pernet *pernet = cma_pernet(net);
@@ -247,25 +248,25 @@ struct class_port_info_context {
static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list, int snum)
{
- struct idr *idr = cma_pernet_idr(net, ps);
+ struct xarray *xa = cma_pernet_xa(net, ps);
- return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL);
+ return xa_insert(xa, snum, bind_list, GFP_KERNEL);
}
static struct rdma_bind_list *cma_ps_find(struct net *net,
enum rdma_ucm_port_space ps, int snum)
{
- struct idr *idr = cma_pernet_idr(net, ps);
+ struct xarray *xa = cma_pernet_xa(net, ps);
- return idr_find(idr, snum);
+ return xa_load(xa, snum);
}
static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps,
int snum)
{
- struct idr *idr = cma_pernet_idr(net, ps);
+ struct xarray *xa = cma_pernet_xa(net, ps);
- idr_remove(idr, snum);
+ xa_erase(xa, snum);
}
enum {
@@ -615,6 +616,9 @@ cma_validate_port(struct ib_device *device, u8 port,
int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
+ if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
+ return ERR_PTR(-ENODEV);
+
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
return ERR_PTR(-ENODEV);
@@ -1173,18 +1177,31 @@ static inline bool cma_any_addr(const struct sockaddr *addr)
return cma_zero_addr(addr) || cma_loopback_addr(addr);
}
-static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
+static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
{
if (src->sa_family != dst->sa_family)
return -1;
switch (src->sa_family) {
case AF_INET:
- return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
- case AF_INET6:
- return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
- &((struct sockaddr_in6 *) dst)->sin6_addr);
+ return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
+ ((struct sockaddr_in *)dst)->sin_addr.s_addr;
+ case AF_INET6: {
+ struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
+ struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
+ bool link_local;
+
+ if (ipv6_addr_cmp(&src_addr6->sin6_addr,
+ &dst_addr6->sin6_addr))
+ return 1;
+ link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
+ IPV6_ADDR_LINKLOCAL;
+ /* Link local must match their scope_ids */
+ return link_local ? (src_addr6->sin6_scope_id !=
+ dst_addr6->sin6_scope_id) :
+ 0;
+ }
+
default:
return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
&((struct sockaddr_ib *) dst)->sib_addr);
@@ -1469,6 +1486,7 @@ static struct net_device *
roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
{
const struct ib_gid_attr *sgid_attr = NULL;
+ struct net_device *ndev;
if (ib_event->event == IB_CM_REQ_RECEIVED)
sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
@@ -1477,8 +1495,15 @@ roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
if (!sgid_attr)
return NULL;
- dev_hold(sgid_attr->ndev);
- return sgid_attr->ndev;
+
+ rcu_read_lock();
+ ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr);
+ if (IS_ERR(ndev))
+ ndev = NULL;
+ else
+ dev_hold(ndev);
+ rcu_read_unlock();
+ return ndev;
}
static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
@@ -3247,7 +3272,7 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
goto err;
bind_list->ps = ps;
- bind_list->port = (unsigned short)ret;
+ bind_list->port = snum;
cma_bind_port(bind_list, id_priv);
return 0;
err:
@@ -4655,10 +4680,10 @@ static int cma_init_net(struct net *net)
{
struct cma_pernet *pernet = cma_pernet(net);
- idr_init(&pernet->tcp_ps);
- idr_init(&pernet->udp_ps);
- idr_init(&pernet->ipoib_ps);
- idr_init(&pernet->ib_ps);
+ xa_init(&pernet->tcp_ps);
+ xa_init(&pernet->udp_ps);
+ xa_init(&pernet->ipoib_ps);
+ xa_init(&pernet->ib_ps);
return 0;
}
@@ -4667,10 +4692,10 @@ static void cma_exit_net(struct net *net)
{
struct cma_pernet *pernet = cma_pernet(net);
- idr_destroy(&pernet->tcp_ps);
- idr_destroy(&pernet->udp_ps);
- idr_destroy(&pernet->ipoib_ps);
- idr_destroy(&pernet->ib_ps);
+ WARN_ON(!xa_empty(&pernet->tcp_ps));
+ WARN_ON(!xa_empty(&pernet->udp_ps));
+ WARN_ON(!xa_empty(&pernet->ipoib_ps));
+ WARN_ON(!xa_empty(&pernet->ib_ps));
}
static struct pernet_operations cma_pernet_operations = {
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 08c690249594..ff40a450b5d2 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -55,6 +55,7 @@ struct pkey_index_qp_list {
};
extern const struct attribute_group ib_dev_attr_group;
+extern bool ib_devices_shared_netns;
int ib_device_register_sysfs(struct ib_device *device);
void ib_device_unregister_sysfs(struct ib_device *device);
@@ -279,7 +280,8 @@ static inline void ib_mad_agent_security_change(void)
}
#endif
-struct ib_device *ib_device_get_by_index(u32 ifindex);
+struct ib_device *ib_device_get_by_index(const struct net *net, u32 index);
+
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
@@ -302,6 +304,7 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
qp->device = dev;
qp->pd = pd;
qp->uobject = uobj;
+ qp->real_qp = qp;
/*
* We don't track XRC QPs for now, because they don't have PD
* and more importantly they are created internaly by driver,
@@ -336,4 +339,17 @@ int roce_resolve_route_from_path(struct sa_path_rec *rec,
const struct ib_gid_attr *attr);
struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
+
+void ib_free_port_attrs(struct ib_core_device *coredev);
+int ib_setup_port_attrs(struct ib_core_device *coredev);
+
+int rdma_compatdev_set(u8 enable);
+
+int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
+ struct kobject *kobj, struct kobj_type *ktype,
+ const char *name);
+void ib_port_unregister_module_stat(struct kobject *kobj);
+
+int ib_device_set_netns_put(struct sk_buff *skb,
+ struct ib_device *dev, u32 ns_fd);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index d61e5e1427c2..a4c81992267c 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -128,15 +128,17 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
* @comp_vector: HCA completion vectors for this CQ
* @poll_ctx: context to poll the CQ from.
* @caller: module owner name.
+ * @udata: Valid user data or NULL for kernel object
*
* This is the proper interface to allocate a CQ for in-kernel users. A
* CQ allocated with this interface will automatically be polled from the
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
* to use this CQ abstraction.
*/
-struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector,
- enum ib_poll_context poll_ctx, const char *caller)
+struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx,
+ const char *caller, struct ib_udata *udata)
{
struct ib_cq_init_attr cq_attr = {
.cqe = nr_cqe,
@@ -145,7 +147,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
struct ib_cq *cq;
int ret = -ENOMEM;
- cq = dev->ops.create_cq(dev, &cq_attr, NULL, NULL);
+ cq = dev->ops.create_cq(dev, &cq_attr, NULL);
if (IS_ERR(cq))
return cq;
@@ -193,16 +195,17 @@ out_free_wc:
kfree(cq->wc);
rdma_restrack_del(&cq->res);
out_destroy_cq:
- cq->device->ops.destroy_cq(cq);
+ cq->device->ops.destroy_cq(cq, udata);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(__ib_alloc_cq);
+EXPORT_SYMBOL(__ib_alloc_cq_user);
/**
* ib_free_cq - free a completion queue
* @cq: completion queue to free.
+ * @udata: User data or NULL for kernel object
*/
-void ib_free_cq(struct ib_cq *cq)
+void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
int ret;
@@ -225,7 +228,7 @@ void ib_free_cq(struct ib_cq *cq)
kfree(cq->wc);
rdma_restrack_del(&cq->res);
- ret = cq->device->ops.destroy_cq(cq);
+ ret = cq->device->ops.destroy_cq(cq, udata);
WARN_ON_ONCE(ret);
}
-EXPORT_SYMBOL(ib_free_cq);
+EXPORT_SYMBOL(ib_free_cq_user);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 7421ec4883fb..78dc07c6ac4b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -38,6 +38,8 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/hashtable.h>
@@ -101,6 +103,54 @@ static DECLARE_RWSEM(clients_rwsem);
* be registered.
*/
#define CLIENT_DATA_REGISTERED XA_MARK_1
+
+/**
+ * struct rdma_dev_net - rdma net namespace metadata for a net
+ * @net: Pointer to owner net namespace
+ * @id: xarray id to identify the net namespace.
+ */
+struct rdma_dev_net {
+ possible_net_t net;
+ u32 id;
+};
+
+static unsigned int rdma_dev_net_id;
+
+/*
+ * A list of net namespaces is maintained in an xarray. This is necessary
+ * because we can't get the locking right using the existing net ns list. We
+ * would require a init_net callback after the list is updated.
+ */
+static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
+/*
+ * rwsem to protect accessing the rdma_nets xarray entries.
+ */
+static DECLARE_RWSEM(rdma_nets_rwsem);
+
+bool ib_devices_shared_netns = true;
+module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
+MODULE_PARM_DESC(netns_mode,
+ "Share device among net namespaces; default=1 (shared)");
+/**
+ * rdma_dev_access_netns() - Return whether a rdma device can be accessed
+ * from a specified net namespace or not.
+ * @device: Pointer to rdma device which needs to be checked
+ * @net: Pointer to net namesapce for which access to be checked
+ *
+ * rdma_dev_access_netns() - Return whether a rdma device can be accessed
+ * from a specified net namespace or not. When
+ * rdma device is in shared mode, it ignores the
+ * net namespace. When rdma device is exclusive
+ * to a net namespace, rdma device net namespace is
+ * checked against the specified one.
+ */
+bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
+{
+ return (ib_devices_shared_netns ||
+ net_eq(read_pnet(&dev->coredev.rdma_net), net));
+}
+EXPORT_SYMBOL(rdma_dev_access_netns);
+
/*
* xarray has this behavior where it won't iterate over NULL values stored in
* allocated arrays. So we need our own iterator to see all values stored in
@@ -147,10 +197,73 @@ static int ib_security_change(struct notifier_block *nb, unsigned long event,
static void ib_policy_change_task(struct work_struct *work);
static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
+static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
+ struct va_format *vaf)
+{
+ if (ibdev && ibdev->dev.parent)
+ dev_printk_emit(level[1] - '0',
+ ibdev->dev.parent,
+ "%s %s %s: %pV",
+ dev_driver_string(ibdev->dev.parent),
+ dev_name(ibdev->dev.parent),
+ dev_name(&ibdev->dev),
+ vaf);
+ else if (ibdev)
+ printk("%s%s: %pV",
+ level, dev_name(&ibdev->dev), vaf);
+ else
+ printk("%s(NULL ib_device): %pV", level, vaf);
+}
+
+void ibdev_printk(const char *level, const struct ib_device *ibdev,
+ const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ __ibdev_printk(level, ibdev, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ibdev_printk);
+
+#define define_ibdev_printk_level(func, level) \
+void func(const struct ib_device *ibdev, const char *fmt, ...) \
+{ \
+ struct va_format vaf; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ \
+ vaf.fmt = fmt; \
+ vaf.va = &args; \
+ \
+ __ibdev_printk(level, ibdev, &vaf); \
+ \
+ va_end(args); \
+} \
+EXPORT_SYMBOL(func);
+
+define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
+define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
+define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
+define_ibdev_printk_level(ibdev_err, KERN_ERR);
+define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
+define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
+define_ibdev_printk_level(ibdev_info, KERN_INFO);
+
static struct notifier_block ibdev_lsm_nb = {
.notifier_call = ib_security_change,
};
+static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
+ struct net *net);
+
/* Pointer to the RCU head at the start of the ib_port_data array */
struct ib_port_data_rcu {
struct rcu_head rcu_head;
@@ -200,16 +313,22 @@ static int ib_device_check_mandatory(struct ib_device *device)
* Caller must perform ib_device_put() to return the device reference count
* when ib_device_get_by_index() returns valid device pointer.
*/
-struct ib_device *ib_device_get_by_index(u32 index)
+struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
{
struct ib_device *device;
down_read(&devices_rwsem);
device = xa_load(&devices, index);
if (device) {
+ if (!rdma_dev_access_netns(device, net)) {
+ device = NULL;
+ goto out;
+ }
+
if (!ib_device_try_get(device))
device = NULL;
}
+out:
up_read(&devices_rwsem);
return device;
}
@@ -268,6 +387,26 @@ struct ib_device *ib_device_get_by_name(const char *name,
}
EXPORT_SYMBOL(ib_device_get_by_name);
+static int rename_compat_devs(struct ib_device *device)
+{
+ struct ib_core_device *cdev;
+ unsigned long index;
+ int ret = 0;
+
+ mutex_lock(&device->compat_devs_mutex);
+ xa_for_each (&device->compat_devs, index, cdev) {
+ ret = device_rename(&cdev->dev, dev_name(&device->dev));
+ if (ret) {
+ dev_warn(&cdev->dev,
+ "Fail to rename compatdev to new name %s\n",
+ dev_name(&device->dev));
+ break;
+ }
+ }
+ mutex_unlock(&device->compat_devs_mutex);
+ return ret;
+}
+
int ib_device_rename(struct ib_device *ibdev, const char *name)
{
int ret;
@@ -287,6 +426,7 @@ int ib_device_rename(struct ib_device *ibdev, const char *name)
if (ret)
goto out;
strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
+ ret = rename_compat_devs(ibdev);
out:
up_write(&devices_rwsem);
return ret;
@@ -336,6 +476,7 @@ static void ib_device_release(struct device *device)
WARN_ON(refcount_read(&dev->refcount));
ib_cache_release_one(dev);
ib_security_release_port_pkey_list(dev);
+ xa_destroy(&dev->compat_devs);
xa_destroy(&dev->client_data);
if (dev->port_data)
kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
@@ -357,12 +498,42 @@ static int ib_device_uevent(struct device *device,
return 0;
}
+static const void *net_namespace(struct device *d)
+{
+ struct ib_core_device *coredev =
+ container_of(d, struct ib_core_device, dev);
+
+ return read_pnet(&coredev->rdma_net);
+}
+
static struct class ib_class = {
.name = "infiniband",
.dev_release = ib_device_release,
.dev_uevent = ib_device_uevent,
+ .ns_type = &net_ns_type_operations,
+ .namespace = net_namespace,
};
+static void rdma_init_coredev(struct ib_core_device *coredev,
+ struct ib_device *dev, struct net *net)
+{
+ /* This BUILD_BUG_ON is intended to catch layout change
+ * of union of ib_core_device and device.
+ * dev must be the first element as ib_core and providers
+ * driver uses it. Adding anything in ib_core_device before
+ * device will break this assumption.
+ */
+ BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
+ offsetof(struct ib_device, dev));
+
+ coredev->dev.class = &ib_class;
+ coredev->dev.groups = dev->groups;
+ device_initialize(&coredev->dev);
+ coredev->owner = dev;
+ INIT_LIST_HEAD(&coredev->port_list);
+ write_pnet(&coredev->rdma_net, net);
+}
+
/**
* _ib_alloc_device - allocate an IB device struct
* @size:size of structure to allocate
@@ -389,10 +560,8 @@ struct ib_device *_ib_alloc_device(size_t size)
return NULL;
}
- device->dev.class = &ib_class;
device->groups[0] = &ib_dev_attr_group;
- device->dev.groups = device->groups;
- device_initialize(&device->dev);
+ rdma_init_coredev(&device->coredev, device, &init_net);
INIT_LIST_HEAD(&device->event_handler_list);
spin_lock_init(&device->event_handler_lock);
@@ -403,7 +572,8 @@ struct ib_device *_ib_alloc_device(size_t size)
*/
xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
init_rwsem(&device->client_data_rwsem);
- INIT_LIST_HEAD(&device->port_list);
+ xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
+ mutex_init(&device->compat_devs_mutex);
init_completion(&device->unreg_completion);
INIT_WORK(&device->unregistration_work, ib_unregister_work);
@@ -436,6 +606,7 @@ void ib_dealloc_device(struct ib_device *device)
/* Expedite releasing netdev references */
free_netdevs(device);
+ WARN_ON(!xa_empty(&device->compat_devs));
WARN_ON(!xa_empty(&device->client_data));
WARN_ON(refcount_read(&device->refcount));
rdma_restrack_clean(device);
@@ -644,6 +815,283 @@ static int ib_security_change(struct notifier_block *nb, unsigned long event,
return NOTIFY_OK;
}
+static void compatdev_release(struct device *dev)
+{
+ struct ib_core_device *cdev =
+ container_of(dev, struct ib_core_device, dev);
+
+ kfree(cdev);
+}
+
+static int add_one_compat_dev(struct ib_device *device,
+ struct rdma_dev_net *rnet)
+{
+ struct ib_core_device *cdev;
+ int ret;
+
+ lockdep_assert_held(&rdma_nets_rwsem);
+ if (!ib_devices_shared_netns)
+ return 0;
+
+ /*
+ * Create and add compat device in all namespaces other than where it
+ * is currently bound to.
+ */
+ if (net_eq(read_pnet(&rnet->net),
+ read_pnet(&device->coredev.rdma_net)))
+ return 0;
+
+ /*
+ * The first of init_net() or ib_register_device() to take the
+ * compat_devs_mutex wins and gets to add the device. Others will wait
+ * for completion here.
+ */
+ mutex_lock(&device->compat_devs_mutex);
+ cdev = xa_load(&device->compat_devs, rnet->id);
+ if (cdev) {
+ ret = 0;
+ goto done;
+ }
+ ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
+ if (ret)
+ goto done;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev) {
+ ret = -ENOMEM;
+ goto cdev_err;
+ }
+
+ cdev->dev.parent = device->dev.parent;
+ rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
+ cdev->dev.release = compatdev_release;
+ dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
+
+ ret = device_add(&cdev->dev);
+ if (ret)
+ goto add_err;
+ ret = ib_setup_port_attrs(cdev);
+ if (ret)
+ goto port_err;
+
+ ret = xa_err(xa_store(&device->compat_devs, rnet->id,
+ cdev, GFP_KERNEL));
+ if (ret)
+ goto insert_err;
+
+ mutex_unlock(&device->compat_devs_mutex);
+ return 0;
+
+insert_err:
+ ib_free_port_attrs(cdev);
+port_err:
+ device_del(&cdev->dev);
+add_err:
+ put_device(&cdev->dev);
+cdev_err:
+ xa_release(&device->compat_devs, rnet->id);
+done:
+ mutex_unlock(&device->compat_devs_mutex);
+ return ret;
+}
+
+static void remove_one_compat_dev(struct ib_device *device, u32 id)
+{
+ struct ib_core_device *cdev;
+
+ mutex_lock(&device->compat_devs_mutex);
+ cdev = xa_erase(&device->compat_devs, id);
+ mutex_unlock(&device->compat_devs_mutex);
+ if (cdev) {
+ ib_free_port_attrs(cdev);
+ device_del(&cdev->dev);
+ put_device(&cdev->dev);
+ }
+}
+
+static void remove_compat_devs(struct ib_device *device)
+{
+ struct ib_core_device *cdev;
+ unsigned long index;
+
+ xa_for_each (&device->compat_devs, index, cdev)
+ remove_one_compat_dev(device, index);
+}
+
+static int add_compat_devs(struct ib_device *device)
+{
+ struct rdma_dev_net *rnet;
+ unsigned long index;
+ int ret = 0;
+
+ lockdep_assert_held(&devices_rwsem);
+
+ down_read(&rdma_nets_rwsem);
+ xa_for_each (&rdma_nets, index, rnet) {
+ ret = add_one_compat_dev(device, rnet);
+ if (ret)
+ break;
+ }
+ up_read(&rdma_nets_rwsem);
+ return ret;
+}
+
+static void remove_all_compat_devs(void)
+{
+ struct ib_compat_device *cdev;
+ struct ib_device *dev;
+ unsigned long index;
+
+ down_read(&devices_rwsem);
+ xa_for_each (&devices, index, dev) {
+ unsigned long c_index = 0;
+
+ /* Hold nets_rwsem so that any other thread modifying this
+ * system param can sync with this thread.
+ */
+ down_read(&rdma_nets_rwsem);
+ xa_for_each (&dev->compat_devs, c_index, cdev)
+ remove_one_compat_dev(dev, c_index);
+ up_read(&rdma_nets_rwsem);
+ }
+ up_read(&devices_rwsem);
+}
+
+static int add_all_compat_devs(void)
+{
+ struct rdma_dev_net *rnet;
+ struct ib_device *dev;
+ unsigned long index;
+ int ret = 0;
+
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
+ unsigned long net_index = 0;
+
+ /* Hold nets_rwsem so that any other thread modifying this
+ * system param can sync with this thread.
+ */
+ down_read(&rdma_nets_rwsem);
+ xa_for_each (&rdma_nets, net_index, rnet) {
+ ret = add_one_compat_dev(dev, rnet);
+ if (ret)
+ break;
+ }
+ up_read(&rdma_nets_rwsem);
+ }
+ up_read(&devices_rwsem);
+ if (ret)
+ remove_all_compat_devs();
+ return ret;
+}
+
+int rdma_compatdev_set(u8 enable)
+{
+ struct rdma_dev_net *rnet;
+ unsigned long index;
+ int ret = 0;
+
+ down_write(&rdma_nets_rwsem);
+ if (ib_devices_shared_netns == enable) {
+ up_write(&rdma_nets_rwsem);
+ return 0;
+ }
+
+ /* enable/disable of compat devices is not supported
+ * when more than default init_net exists.
+ */
+ xa_for_each (&rdma_nets, index, rnet) {
+ ret++;
+ break;
+ }
+ if (!ret)
+ ib_devices_shared_netns = enable;
+ up_write(&rdma_nets_rwsem);
+ if (ret)
+ return -EBUSY;
+
+ if (enable)
+ ret = add_all_compat_devs();
+ else
+ remove_all_compat_devs();
+ return ret;
+}
+
+static void rdma_dev_exit_net(struct net *net)
+{
+ struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
+ struct ib_device *dev;
+ unsigned long index;
+ int ret;
+
+ down_write(&rdma_nets_rwsem);
+ /*
+ * Prevent the ID from being re-used and hide the id from xa_for_each.
+ */
+ ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
+ WARN_ON(ret);
+ up_write(&rdma_nets_rwsem);
+
+ down_read(&devices_rwsem);
+ xa_for_each (&devices, index, dev) {
+ get_device(&dev->dev);
+ /*
+ * Release the devices_rwsem so that pontentially blocking
+ * device_del, doesn't hold the devices_rwsem for too long.
+ */
+ up_read(&devices_rwsem);
+
+ remove_one_compat_dev(dev, rnet->id);
+
+ /*
+ * If the real device is in the NS then move it back to init.
+ */
+ rdma_dev_change_netns(dev, net, &init_net);
+
+ put_device(&dev->dev);
+ down_read(&devices_rwsem);
+ }
+ up_read(&devices_rwsem);
+
+ xa_erase(&rdma_nets, rnet->id);
+}
+
+static __net_init int rdma_dev_init_net(struct net *net)
+{
+ struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
+ unsigned long index;
+ struct ib_device *dev;
+ int ret;
+
+ /* No need to create any compat devices in default init_net. */
+ if (net_eq(net, &init_net))
+ return 0;
+
+ write_pnet(&rnet->net, net);
+
+ ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
+ /* Hold nets_rwsem so that netlink command cannot change
+ * system configuration for device sharing mode.
+ */
+ down_read(&rdma_nets_rwsem);
+ ret = add_one_compat_dev(dev, rnet);
+ up_read(&rdma_nets_rwsem);
+ if (ret)
+ break;
+ }
+ up_read(&devices_rwsem);
+
+ if (ret)
+ rdma_dev_exit_net(net);
+
+ return ret;
+}
+
/*
* Assign the unique string device name and the unique device index. This is
* undone by ib_dealloc_device.
@@ -711,6 +1159,9 @@ static void setup_dma_device(struct ib_device *device)
WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
+ /* Setup default max segment size for all IB devices */
+ dma_set_max_seg_size(device->dma_device, SZ_2G);
+
}
/*
@@ -765,8 +1216,12 @@ static void disable_device(struct ib_device *device)
ib_device_put(device);
wait_for_completion(&device->unreg_completion);
- /* Expedite removing unregistered pointers from the hash table */
- free_netdevs(device);
+ /*
+ * compat devices must be removed after device refcount drops to zero.
+ * Otherwise init_net() may add more compatdevs after removing compat
+ * devices and before device is disabled.
+ */
+ remove_compat_devs(device);
}
/*
@@ -807,7 +1262,8 @@ static int enable_device_and_get(struct ib_device *device)
break;
}
up_read(&clients_rwsem);
-
+ if (!ret)
+ ret = add_compat_devs(device);
out:
up_read(&devices_rwsem);
return ret;
@@ -847,6 +1303,11 @@ int ib_register_device(struct ib_device *device, const char *name)
ib_device_register_rdmacg(device);
+ /*
+ * Ensure that ADD uevent is not fired because it
+ * is too early amd device is not initialized yet.
+ */
+ dev_set_uevent_suppress(&device->dev, true);
ret = device_add(&device->dev);
if (ret)
goto cg_cleanup;
@@ -859,6 +1320,9 @@ int ib_register_device(struct ib_device *device, const char *name)
}
ret = enable_device_and_get(device);
+ dev_set_uevent_suppress(&device->dev, false);
+ /* Mark for userspace that device is ready */
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
if (ret) {
void (*dealloc_fn)(struct ib_device *);
@@ -887,6 +1351,7 @@ int ib_register_device(struct ib_device *device, const char *name)
dev_cleanup:
device_del(&device->dev);
cg_cleanup:
+ dev_set_uevent_suppress(&device->dev, false);
ib_device_unregister_rdmacg(device);
ib_cache_cleanup_one(device);
return ret;
@@ -908,6 +1373,10 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
goto out;
disable_device(ib_dev);
+
+ /* Expedite removing unregistered pointers from the hash table */
+ free_netdevs(ib_dev);
+
ib_device_unregister_sysfs(ib_dev);
device_del(&ib_dev->dev);
ib_device_unregister_rdmacg(ib_dev);
@@ -1038,6 +1507,126 @@ void ib_unregister_device_queued(struct ib_device *ib_dev)
}
EXPORT_SYMBOL(ib_unregister_device_queued);
+/*
+ * The caller must pass in a device that has the kref held and the refcount
+ * released. If the device is in cur_net and still registered then it is moved
+ * into net.
+ */
+static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
+ struct net *net)
+{
+ int ret2 = -EINVAL;
+ int ret;
+
+ mutex_lock(&device->unregistration_lock);
+
+ /*
+ * If a device not under ib_device_get() or if the unregistration_lock
+ * is not held, the namespace can be changed, or it can be unregistered.
+ * Check again under the lock.
+ */
+ if (refcount_read(&device->refcount) == 0 ||
+ !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
+ disable_device(device);
+
+ /*
+ * At this point no one can be using the device, so it is safe to
+ * change the namespace.
+ */
+ write_pnet(&device->coredev.rdma_net, net);
+
+ down_read(&devices_rwsem);
+ /*
+ * Currently rdma devices are system wide unique. So the device name
+ * is guaranteed free in the new namespace. Publish the new namespace
+ * at the sysfs level.
+ */
+ ret = device_rename(&device->dev, dev_name(&device->dev));
+ up_read(&devices_rwsem);
+ if (ret) {
+ dev_warn(&device->dev,
+ "%s: Couldn't rename device after namespace change\n",
+ __func__);
+ /* Try and put things back and re-enable the device */
+ write_pnet(&device->coredev.rdma_net, cur_net);
+ }
+
+ ret2 = enable_device_and_get(device);
+ if (ret2) {
+ /*
+ * This shouldn't really happen, but if it does, let the user
+ * retry at later point. So don't disable the device.
+ */
+ dev_warn(&device->dev,
+ "%s: Couldn't re-enable device after namespace change\n",
+ __func__);
+ }
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
+
+ ib_device_put(device);
+out:
+ mutex_unlock(&device->unregistration_lock);
+ if (ret)
+ return ret;
+ return ret2;
+}
+
+int ib_device_set_netns_put(struct sk_buff *skb,
+ struct ib_device *dev, u32 ns_fd)
+{
+ struct net *net;
+ int ret;
+
+ net = get_net_ns_by_fd(ns_fd);
+ if (IS_ERR(net)) {
+ ret = PTR_ERR(net);
+ goto net_err;
+ }
+
+ if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto ns_err;
+ }
+
+ /*
+ * Currently supported only for those providers which support
+ * disassociation and don't do port specific sysfs init. Once a
+ * port_cleanup infrastructure is implemented, this limitation will be
+ * removed.
+ */
+ if (!dev->ops.disassociate_ucontext || dev->ops.init_port ||
+ ib_devices_shared_netns) {
+ ret = -EOPNOTSUPP;
+ goto ns_err;
+ }
+
+ get_device(&dev->dev);
+ ib_device_put(dev);
+ ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
+ put_device(&dev->dev);
+
+ put_net(net);
+ return ret;
+
+ns_err:
+ put_net(net);
+net_err:
+ ib_device_put(dev);
+ return ret;
+}
+
+static struct pernet_operations rdma_dev_net_ops = {
+ .init = rdma_dev_init_net,
+ .exit = rdma_dev_exit_net,
+ .id = &rdma_dev_net_id,
+ .size = sizeof(struct rdma_dev_net),
+};
+
static int assign_client_id(struct ib_client *client)
{
int ret;
@@ -1515,6 +2104,9 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
down_read(&devices_rwsem);
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
+ if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
+ continue;
+
ret = nldev_cb(dev, skb, cb, idx);
if (ret)
break;
@@ -1787,6 +2379,14 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_vf_config);
SET_DEVICE_OP(dev_ops, get_vf_stats);
SET_DEVICE_OP(dev_ops, init_port);
+ SET_DEVICE_OP(dev_ops, iw_accept);
+ SET_DEVICE_OP(dev_ops, iw_add_ref);
+ SET_DEVICE_OP(dev_ops, iw_connect);
+ SET_DEVICE_OP(dev_ops, iw_create_listen);
+ SET_DEVICE_OP(dev_ops, iw_destroy_listen);
+ SET_DEVICE_OP(dev_ops, iw_get_qp);
+ SET_DEVICE_OP(dev_ops, iw_reject);
+ SET_DEVICE_OP(dev_ops, iw_rem_ref);
SET_DEVICE_OP(dev_ops, map_mr_sg);
SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
@@ -1823,7 +2423,9 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, unmap_fmr);
+ SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_pd);
+ SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);
}
EXPORT_SYMBOL(ib_set_device_ops);
@@ -1903,12 +2505,20 @@ static int __init ib_core_init(void)
goto err_sa;
}
+ ret = register_pernet_device(&rdma_dev_net_ops);
+ if (ret) {
+ pr_warn("Couldn't init compat dev. ret %d\n", ret);
+ goto err_compat;
+ }
+
nldev_init();
rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
roce_gid_mgmt_init();
return 0;
+err_compat:
+ unregister_lsm_notifier(&ibdev_lsm_nb);
err_sa:
ib_sa_cleanup();
err_mad:
@@ -1933,6 +2543,7 @@ static void __exit ib_core_cleanup(void)
roce_gid_mgmt_cleanup();
nldev_exit();
rdma_nl_unregister(RDMA_NL_LS);
+ unregister_pernet_device(&rdma_dev_net_ops);
unregister_lsm_notifier(&ibdev_lsm_nb);
ib_sa_cleanup();
ib_mad_cleanup();
@@ -1950,5 +2561,8 @@ static void __exit ib_core_cleanup(void)
MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
-subsys_initcall(ib_core_init);
+/* ib core relies on netdev stack to first register net_ns_type_operations
+ * ns kobject type before ib_core initialization.
+ */
+fs_initcall(ib_core_init);
module_exit(ib_core_cleanup);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 732637c913d9..72141c5b7c95 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -394,7 +394,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
/* destroy the listening endpoint */
- cm_id->device->iwcm->destroy_listen(cm_id);
+ cm_id->device->ops.iw_destroy_listen(cm_id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_ESTABLISHED:
@@ -417,7 +417,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
*/
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_id->device->iwcm->reject(cm_id, NULL, 0);
+ cm_id->device->ops.iw_reject(cm_id, NULL, 0);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_CONN_SENT:
@@ -427,7 +427,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
break;
}
if (cm_id_priv->qp) {
- cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
+ cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
cm_id_priv->qp = NULL;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -504,7 +504,7 @@ static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
{
const char *devname = dev_name(&cm_id->device->dev);
- const char *ifname = cm_id->device->iwcm->ifname;
+ const char *ifname = cm_id->device->iw_ifname;
struct iwpm_dev_data pm_reg_msg = {};
struct iwpm_sa_data pm_msg;
int status;
@@ -526,7 +526,7 @@ static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
cm_id->mapped = true;
pm_msg.loc_addr = cm_id->local_addr;
pm_msg.rem_addr = cm_id->remote_addr;
- pm_msg.flags = (cm_id->device->iwcm->driver_flags & IW_F_NO_PORT_MAP) ?
+ pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ?
IWPM_FLAGS_NO_PORT_MAP : 0;
if (active)
status = iwpm_add_and_query_mapping(&pm_msg,
@@ -577,7 +577,8 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ret = iw_cm_map(cm_id, false);
if (!ret)
- ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
+ ret = cm_id->device->ops.iw_create_listen(cm_id,
+ backlog);
if (ret)
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -617,7 +618,7 @@ int iw_cm_reject(struct iw_cm_id *cm_id,
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->reject(cm_id, private_data,
+ ret = cm_id->device->ops.iw_reject(cm_id, private_data,
private_data_len);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
@@ -653,25 +654,25 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
return -EINVAL;
}
/* Get the ib_qp given the QPN */
- qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
+ qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
}
- cm_id->device->iwcm->add_ref(qp);
+ cm_id->device->ops.iw_add_ref(qp);
cm_id_priv->qp = qp;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->accept(cm_id, iw_param);
+ ret = cm_id->device->ops.iw_accept(cm_id, iw_param);
if (ret) {
/* An error on accept precludes provider events */
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->qp) {
- cm_id->device->iwcm->rem_ref(qp);
+ cm_id->device->ops.iw_rem_ref(qp);
cm_id_priv->qp = NULL;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -712,25 +713,25 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
}
/* Get the ib_qp given the QPN */
- qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
+ qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
ret = -EINVAL;
goto err;
}
- cm_id->device->iwcm->add_ref(qp);
+ cm_id->device->ops.iw_add_ref(qp);
cm_id_priv->qp = qp;
cm_id_priv->state = IW_CM_STATE_CONN_SENT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ret = iw_cm_map(cm_id, true);
if (!ret)
- ret = cm_id->device->iwcm->connect(cm_id, iw_param);
+ ret = cm_id->device->ops.iw_connect(cm_id, iw_param);
if (!ret)
return 0; /* success */
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->qp) {
- cm_id->device->iwcm->rem_ref(qp);
+ cm_id->device->ops.iw_rem_ref(qp);
cm_id_priv->qp = NULL;
}
cm_id_priv->state = IW_CM_STATE_IDLE;
@@ -895,7 +896,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
} else {
/* REJECTED or RESET */
- cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
+ cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
cm_id_priv->qp = NULL;
cm_id_priv->state = IW_CM_STATE_IDLE;
}
@@ -946,7 +947,7 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->qp) {
- cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
+ cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
cm_id_priv->qp = NULL;
}
switch (cm_id_priv->state) {
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index a5d2a20ee697..41929bb83739 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -506,14 +506,14 @@ int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
int ret;
const char *err_str = "";
- ret = nlmsg_validate(cb->nlh, nlh_len, policy_max - 1, nlmsg_policy,
- NULL);
+ ret = nlmsg_validate_deprecated(cb->nlh, nlh_len, policy_max - 1,
+ nlmsg_policy, NULL);
if (ret) {
err_str = "Invalid attribute";
goto parse_nlmsg_error;
}
- ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max - 1,
- nlmsg_policy, NULL);
+ ret = nlmsg_parse_deprecated(cb->nlh, nlh_len, nltb, policy_max - 1,
+ nlmsg_policy, NULL);
if (ret) {
err_str = "Unable to parse the nlmsg";
goto parse_nlmsg_error;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index e742a6a2c138..cc99479b2c09 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2009 HNR Consulting. All rights reserved.
- * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ * Copyright (c) 2014,2018 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -38,10 +38,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dma-mapping.h>
-#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/security.h>
+#include <linux/xarray.h>
#include <rdma/ib_cache.h>
#include "mad_priv.h"
@@ -51,6 +51,32 @@
#include "opa_smi.h"
#include "agent.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/ib_mad.h>
+
+#ifdef CONFIG_TRACEPOINTS
+static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_qp_info *qp_info,
+ struct trace_event_raw_ib_mad_send_template *entry)
+{
+ u16 pkey;
+ struct ib_device *dev = qp_info->port_priv->device;
+ u8 pnum = qp_info->port_priv->port_num;
+ struct ib_ud_wr *wr = &mad_send_wr->send_wr;
+ struct rdma_ah_attr attr = {};
+
+ rdma_query_ah(wr->ah, &attr);
+
+ /* These are common */
+ entry->sl = attr.sl;
+ ib_query_pkey(dev, pnum, wr->pkey_index, &pkey);
+ entry->pkey = pkey;
+ entry->rqpn = wr->remote_qpn;
+ entry->rqkey = wr->remote_qkey;
+ entry->dlid = rdma_ah_get_dlid(&attr);
+}
+#endif
+
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
@@ -59,12 +85,9 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
-/*
- * The mlx4 driver uses the top byte to distinguish which virtual function
- * generated the MAD, so we must avoid using it.
- */
-#define AGENT_ID_LIMIT (1 << 24)
-static DEFINE_IDR(ib_mad_clients);
+/* Client ID 0 is used for snoop-only clients */
+static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
+static u32 ib_mad_client_next;
static struct list_head ib_mad_port_list;
/* Port list lock */
@@ -389,18 +412,17 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
goto error4;
}
- idr_preload(GFP_KERNEL);
- idr_lock(&ib_mad_clients);
- ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0,
- AGENT_ID_LIMIT, GFP_ATOMIC);
- idr_unlock(&ib_mad_clients);
- idr_preload_end();
-
+ /*
+ * The mlx4 driver uses the top byte to distinguish which virtual
+ * function generated the MAD, so we must avoid using it.
+ */
+ ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
+ mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
+ &ib_mad_client_next, GFP_KERNEL);
if (ret2 < 0) {
ret = ERR_PTR(ret2);
goto error5;
}
- mad_agent_priv->agent.hi_tid = ret2;
/*
* Make sure MAD registration (if supplied)
@@ -445,12 +467,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
}
spin_unlock_irq(&port_priv->reg_lock);
+ trace_ib_mad_create_agent(mad_agent_priv);
return &mad_agent_priv->agent;
error6:
spin_unlock_irq(&port_priv->reg_lock);
- idr_lock(&ib_mad_clients);
- idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
- idr_unlock(&ib_mad_clients);
+ xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
error5:
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
error4:
@@ -602,6 +623,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
struct ib_mad_port_private *port_priv;
/* Note that we could still be handling received MADs */
+ trace_ib_mad_unregister_agent(mad_agent_priv);
/*
* Canceling all sends results in dropping received response
@@ -614,9 +636,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
spin_lock_irq(&port_priv->reg_lock);
remove_mad_reg_req(mad_agent_priv);
spin_unlock_irq(&port_priv->reg_lock);
- idr_lock(&ib_mad_clients);
- idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
- idr_unlock(&ib_mad_clients);
+ xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
flush_workqueue(port_priv->wq);
ib_cancel_rmpp_recvs(mad_agent_priv);
@@ -821,6 +841,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
u32 opa_drslid;
+ trace_ib_mad_handle_out_opa_smi(opa_smp);
+
if ((opa_get_smp_direction(opa_smp)
? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
OPA_LID_PERMISSIVE &&
@@ -846,6 +868,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
goto out;
} else {
+ trace_ib_mad_handle_out_ib_smi(smp);
+
if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
IB_LID_PERMISSIVE &&
smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
@@ -1223,6 +1247,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
+ trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
NULL);
list = &qp_info->send_queue.list;
@@ -1756,7 +1781,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
*/
hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
rcu_read_lock();
- mad_agent = idr_find(&ib_mad_clients, hi_tid);
+ mad_agent = xa_load(&ib_mad_clients, hi_tid);
if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
mad_agent = NULL;
rcu_read_unlock();
@@ -2077,6 +2102,8 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
enum smi_forward_action retsmi;
struct ib_smp *smp = (struct ib_smp *)recv->mad;
+ trace_ib_mad_handle_ib_smi(smp);
+
if (smi_handle_dr_smp_recv(smp,
rdma_cap_ib_switch(port_priv->device),
port_num,
@@ -2162,6 +2189,8 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
enum smi_forward_action retsmi;
struct opa_smp *smp = (struct opa_smp *)recv->mad;
+ trace_ib_mad_handle_opa_smi(smp);
+
if (opa_smi_handle_dr_smp_recv(smp,
rdma_cap_ib_switch(port_priv->device),
port_num,
@@ -2286,6 +2315,9 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
goto out;
+ trace_ib_mad_recv_done_handler(qp_info, wc,
+ (struct ib_mad_hdr *)recv->mad);
+
mad_size = recv->mad_size;
response = alloc_mad_private(mad_size, GFP_KERNEL);
if (!response)
@@ -2332,6 +2364,7 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
if (mad_agent) {
+ trace_ib_mad_recv_done_agent(mad_agent);
ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
/*
* recv is freed up in error cases in ib_mad_complete_recv
@@ -2496,6 +2529,9 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
send_queue = mad_list->mad_queue;
qp_info = send_queue->qp_info;
+ trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
+ trace_ib_mad_send_done_handler(mad_send_wr, wc);
+
retry:
ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
mad_send_wr->header_mapping,
@@ -2527,6 +2563,7 @@ retry:
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
if (queued_send_wr) {
+ trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
NULL);
if (ret) {
@@ -2574,6 +2611,7 @@ static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
if (mad_send_wr->retry) {
/* Repost send */
mad_send_wr->retry = 0;
+ trace_ib_mad_error_handler(mad_send_wr, qp_info);
ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
NULL);
if (!ret)
@@ -3356,9 +3394,6 @@ int ib_mad_init(void)
INIT_LIST_HEAD(&ib_mad_port_list);
- /* Client ID 0 is used for snoop-only clients */
- idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL);
-
if (ib_register_client(&mad_client)) {
pr_err("Couldn't register ib_mad client\n");
return -EINVAL;
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 216509036aa8..956b3a7dfed7 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -73,14 +73,14 @@ struct ib_mad_private_header {
struct ib_mad_recv_wc recv_wc;
struct ib_wc wc;
u64 mapping;
-} __attribute__ ((packed));
+} __packed;
struct ib_mad_private {
struct ib_mad_private_header header;
size_t mad_size;
struct ib_grh grh;
u8 mad[0];
-} __attribute__ ((packed));
+} __packed;
struct ib_rmpp_segment {
struct list_head list;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index d50ff70bb24b..cd338ddc4a39 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -804,7 +804,6 @@ static void mcast_event_handler(struct ib_event_handler *handler,
switch (event->event) {
case IB_EVENT_PORT_ERR:
case IB_EVENT_LID_CHANGE:
- case IB_EVENT_SM_CHANGE:
case IB_EVENT_CLIENT_REREGISTER:
mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
break;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 11ed58d3fce5..69188cbbd99b 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -116,6 +116,10 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING,
.len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
+ [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
+ [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -198,6 +202,8 @@ static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
{
char fw[IB_FW_VERSION_NAME_MAX];
+ int ret = 0;
+ u8 port;
if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
@@ -226,7 +232,25 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
return -EMSGSIZE;
if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
return -EMSGSIZE;
- return 0;
+
+ /*
+ * Link type is determined on first port and mlx4 device
+ * which can potentially have two different link type for the same
+ * IB device is considered as better to be avoided in the future,
+ */
+ port = rdma_start_port(device);
+ if (rdma_cap_opa_mad(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
+ else if (rdma_protocol_ib(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
+ else if (rdma_protocol_iwarp(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
+ else if (rdma_protocol_roce(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
+ else if (rdma_protocol_usnic(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
+ "usnic");
+ return ret;
}
static int fill_port_info(struct sk_buff *msg,
@@ -292,7 +316,8 @@ static int fill_res_info_entry(struct sk_buff *msg,
{
struct nlattr *entry_attr;
- entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
+ entry_attr = nla_nest_start_noflag(msg,
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
if (!entry_attr)
return -EMSGSIZE;
@@ -327,7 +352,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
if (!table_attr)
return -EMSGSIZE;
@@ -607,14 +632,14 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = ib_device_get_by_index(index);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
if (!device)
return -EINVAL;
@@ -652,13 +677,13 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
- extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = ib_device_get_by_index(index);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
if (!device)
return -EINVAL;
@@ -668,9 +693,20 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
IB_DEVICE_NAME_MAX);
err = ib_device_rename(device, name);
+ goto done;
+ }
+
+ if (tb[RDMA_NLDEV_NET_NS_FD]) {
+ u32 ns_fd;
+
+ ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
+ err = ib_device_set_netns_put(skb, device, ns_fd);
+ goto put_done;
}
+done:
ib_device_put(device);
+put_done:
return err;
}
@@ -706,7 +742,7 @@ static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
/*
* There is no need to take lock, because
- * we are relying on ib_core's lists_rwsem
+ * we are relying on ib_core's locking.
*/
return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
}
@@ -721,15 +757,15 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 port;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err ||
!tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
!tb[RDMA_NLDEV_ATTR_PORT_INDEX])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = ib_device_get_by_index(index);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
if (!device)
return -EINVAL;
@@ -777,13 +813,13 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
int err;
unsigned int p;
- err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = ib_device_get_by_index(ifindex);
+ device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
if (!device)
return -EINVAL;
@@ -832,13 +868,13 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = ib_device_get_by_index(index);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
if (!device)
return -EINVAL;
@@ -886,7 +922,6 @@ static int _nldev_res_get_dumpit(struct ib_device *device,
nlmsg_cancel(skb, nlh);
goto out;
}
-
nlmsg_end(skb, nlh);
idx++;
@@ -981,13 +1016,13 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *msg;
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = ib_device_get_by_index(index);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
if (!device)
return -EINVAL;
@@ -1070,8 +1105,8 @@ static int res_get_common_dumpit(struct sk_buff *skb,
u32 index, port = 0;
bool filled = false;
- err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
/*
* Right now, we are expecting the device index to get res information,
* but it is possible to extend this code to return all devices in
@@ -1084,7 +1119,7 @@ static int res_get_common_dumpit(struct sk_buff *skb,
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = ib_device_get_by_index(index);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
if (!device)
return -EINVAL;
@@ -1108,7 +1143,7 @@ static int res_get_common_dumpit(struct sk_buff *skb,
goto err;
}
- table_attr = nla_nest_start(skb, fe->nldev_attr);
+ table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
if (!table_attr) {
ret = -EMSGSIZE;
goto err;
@@ -1134,7 +1169,7 @@ static int res_get_common_dumpit(struct sk_buff *skb,
filled = true;
- entry_attr = nla_nest_start(skb, fe->entry);
+ entry_attr = nla_nest_start_noflag(skb, fe->entry);
if (!entry_attr) {
ret = -EMSGSIZE;
rdma_restrack_put(res);
@@ -1249,8 +1284,8 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
char type[IFNAMSIZ];
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
!tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
return -EINVAL;
@@ -1293,13 +1328,13 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = ib_device_get_by_index(index);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
if (!device)
return -EINVAL;
@@ -1312,6 +1347,58 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
}
+static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct sk_buff *msg;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err)
+ return err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_SYS_GET),
+ 0, 0);
+
+ err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
+ (u8)ib_devices_shared_netns);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+ nlmsg_end(msg, nlh);
+ return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+}
+
+static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ u8 enable;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
+ return -EINVAL;
+
+ enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
+ /* Only 0 and 1 are supported */
+ if (enable > 1)
+ return -EINVAL;
+
+ err = rdma_compatdev_set(enable);
+ return err;
+}
+
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
@@ -1357,6 +1444,13 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_res_get_pd_doit,
.dump = nldev_res_get_pd_dumpit,
},
+ [RDMA_NLDEV_CMD_SYS_GET] = {
+ .doit = nldev_sys_get_doit,
+ },
+ [RDMA_NLDEV_CMD_SYS_SET] = {
+ .doit = nldev_set_sys_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
};
void __init nldev_init(void)
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 778375ff664e..ccf4d069c25c 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -125,9 +125,10 @@ static void assert_uverbs_usecnt(struct ib_uobject *uobj,
* and consumes the kref on the uobj.
*/
static int uverbs_destroy_uobject(struct ib_uobject *uobj,
- enum rdma_remove_reason reason)
+ enum rdma_remove_reason reason,
+ struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_file *ufile = uobj->ufile;
+ struct ib_uverbs_file *ufile = attrs->ufile;
unsigned long flags;
int ret;
@@ -135,7 +136,8 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
if (uobj->object) {
- ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason);
+ ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
+ attrs);
if (ret) {
if (ib_is_destroy_retryable(ret, reason, uobj))
return ret;
@@ -196,9 +198,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
* version requires the caller to have already obtained an
* LOOKUP_DESTROY uobject kref.
*/
-int uobj_destroy(struct ib_uobject *uobj)
+int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_file *ufile = uobj->ufile;
+ struct ib_uverbs_file *ufile = attrs->ufile;
int ret;
down_read(&ufile->hw_destroy_rwsem);
@@ -207,7 +209,7 @@ int uobj_destroy(struct ib_uobject *uobj)
if (ret)
goto out_unlock;
- ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY);
+ ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs);
if (ret) {
atomic_set(&uobj->usecnt, 0);
goto out_unlock;
@@ -224,18 +226,17 @@ out_unlock:
* uverbs_put_destroy.
*/
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
- u32 id,
- const struct uverbs_attr_bundle *attrs)
+ u32 id, struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj;
int ret;
uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id,
- UVERBS_LOOKUP_DESTROY);
+ UVERBS_LOOKUP_DESTROY, attrs);
if (IS_ERR(uobj))
return uobj;
- ret = uobj_destroy(uobj);
+ ret = uobj_destroy(uobj, attrs);
if (ret) {
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
return ERR_PTR(ret);
@@ -249,7 +250,7 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
* (negative errno on failure). For use by callers that do not need the uobj.
*/
int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
- const struct uverbs_attr_bundle *attrs)
+ struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj;
@@ -296,25 +297,13 @@ static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
static int idr_add_uobj(struct ib_uobject *uobj)
{
- int ret;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&uobj->ufile->idr_lock);
-
- /*
- * We start with allocating an idr pointing to NULL. This represents an
- * object which isn't initialized yet. We'll replace it later on with
- * the real object once we commit.
- */
- ret = idr_alloc(&uobj->ufile->idr, NULL, 0,
- min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
- if (ret >= 0)
- uobj->id = ret;
-
- spin_unlock(&uobj->ufile->idr_lock);
- idr_preload_end();
-
- return ret < 0 ? ret : 0;
+ /*
+ * We start with allocating an idr pointing to NULL. This represents an
+ * object which isn't initialized yet. We'll replace it later on with
+ * the real object once we commit.
+ */
+ return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b,
+ GFP_KERNEL);
}
/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
@@ -324,29 +313,20 @@ lookup_get_idr_uobject(const struct uverbs_api_object *obj,
enum rdma_lookup_mode mode)
{
struct ib_uobject *uobj;
- unsigned long idrno = id;
if (id < 0 || id > ULONG_MAX)
return ERR_PTR(-EINVAL);
rcu_read_lock();
- /* object won't be released as we're protected in rcu */
- uobj = idr_find(&ufile->idr, idrno);
- if (!uobj) {
- uobj = ERR_PTR(-ENOENT);
- goto free;
- }
-
/*
* The idr_find is guaranteed to return a pointer to something that
* isn't freed yet, or NULL, as the free after idr_remove goes through
* kfree_rcu(). However the object may still have been released and
* kfree() could be called at any time.
*/
- if (!kref_get_unless_zero(&uobj->ref))
+ uobj = xa_load(&ufile->idr, id);
+ if (!uobj || !kref_get_unless_zero(&uobj->ref))
uobj = ERR_PTR(-ENOENT);
-
-free:
rcu_read_unlock();
return uobj;
}
@@ -393,12 +373,13 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
struct ib_uverbs_file *ufile, s64 id,
- enum rdma_lookup_mode mode)
+ enum rdma_lookup_mode mode,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj;
int ret;
- if (IS_ERR(obj) && PTR_ERR(obj) == -ENOMSG) {
+ if (obj == ERR_PTR(-ENOMSG)) {
/* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */
uobj = lookup_get_idr_uobject(NULL, ufile, id, mode);
if (IS_ERR(uobj))
@@ -431,6 +412,8 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
ret = uverbs_try_lock_object(uobj, mode);
if (ret)
goto free;
+ if (attrs)
+ attrs->context = uobj->context;
return uobj;
free:
@@ -438,38 +421,6 @@ free:
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
}
-struct ib_uobject *_uobj_get_read(enum uverbs_default_objects type,
- u32 object_id,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uobject *uobj;
-
- uobj = rdma_lookup_get_uobject(uobj_get_type(attrs, type), attrs->ufile,
- object_id, UVERBS_LOOKUP_READ);
- if (IS_ERR(uobj))
- return uobj;
-
- attrs->context = uobj->context;
-
- return uobj;
-}
-
-struct ib_uobject *_uobj_get_write(enum uverbs_default_objects type,
- u32 object_id,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uobject *uobj;
-
- uobj = rdma_lookup_get_uobject(uobj_get_type(attrs, type), attrs->ufile,
- object_id, UVERBS_LOOKUP_WRITE);
-
- if (IS_ERR(uobj))
- return uobj;
-
- attrs->context = uobj->context;
-
- return uobj;
-}
static struct ib_uobject *
alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
@@ -489,14 +440,12 @@ alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
RDMACG_RESOURCE_HCA_OBJECT);
if (ret)
- goto idr_remove;
+ goto remove;
return uobj;
-idr_remove:
- spin_lock(&ufile->idr_lock);
- idr_remove(&ufile->idr, uobj->id);
- spin_unlock(&ufile->idr_lock);
+remove:
+ xa_erase(&ufile->idr, uobj->id);
uobj_put:
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
@@ -526,7 +475,8 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
}
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile)
+ struct ib_uverbs_file *ufile,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *ret;
@@ -546,6 +496,8 @@ struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
up_read(&ufile->hw_destroy_rwsem);
return ret;
}
+ if (attrs)
+ attrs->context = ret->context;
return ret;
}
@@ -554,18 +506,17 @@ static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
RDMACG_RESOURCE_HCA_OBJECT);
- spin_lock(&uobj->ufile->idr_lock);
- idr_remove(&uobj->ufile->idr, uobj->id);
- spin_unlock(&uobj->ufile->idr_lock);
+ xa_erase(&uobj->ufile->idr, uobj->id);
}
static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
const struct uverbs_obj_idr_type *idr_type =
container_of(uobj->uapi_object->type_attrs,
struct uverbs_obj_idr_type, type);
- int ret = idr_type->destroy_object(uobj, why);
+ int ret = idr_type->destroy_object(uobj, why, attrs);
/*
* We can only fail gracefully if the user requested to destroy the
@@ -586,9 +537,7 @@ static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
static void remove_handle_idr_uobject(struct ib_uobject *uobj)
{
- spin_lock(&uobj->ufile->idr_lock);
- idr_remove(&uobj->ufile->idr, uobj->id);
- spin_unlock(&uobj->ufile->idr_lock);
+ xa_erase(&uobj->ufile->idr, uobj->id);
/* Matches the kref in alloc_commit_idr_uobject */
uverbs_uobject_put(uobj);
}
@@ -599,7 +548,8 @@ static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
}
static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
const struct uverbs_obj_fd_type *fd_type = container_of(
uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
@@ -618,17 +568,17 @@ static void remove_handle_fd_uobject(struct ib_uobject *uobj)
static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
{
struct ib_uverbs_file *ufile = uobj->ufile;
+ void *old;
- spin_lock(&ufile->idr_lock);
/*
* We already allocated this IDR with a NULL object, so
* this shouldn't fail.
*
- * NOTE: Once we set the IDR we loose ownership of our kref on uobj.
+ * NOTE: Storing the uobj transfers our kref on uobj to the XArray.
* It will be put by remove_commit_idr_uobject()
*/
- WARN_ON(idr_replace(&ufile->idr, uobj, uobj->id));
- spin_unlock(&ufile->idr_lock);
+ old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
+ WARN_ON(old != NULL);
return 0;
}
@@ -675,15 +625,16 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
* caller can no longer assume uobj is valid. If this function fails it
* destroys the uboject, including the attached HW object.
*/
-int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj)
+int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_file *ufile = uobj->ufile;
+ struct ib_uverbs_file *ufile = attrs->ufile;
int ret;
/* alloc_commit consumes the uobj kref */
ret = uobj->uapi_object->type_class->alloc_commit(uobj);
if (ret) {
- uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
up_read(&ufile->hw_destroy_rwsem);
return ret;
}
@@ -707,12 +658,13 @@ int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj)
* This consumes the kref for uobj. It is up to the caller to unwind the HW
* object and anything else connected to uobj before calling this.
*/
-void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
+void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_file *ufile = uobj->ufile;
uobj->object = NULL;
- uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
@@ -760,29 +712,28 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
{
- spin_lock_init(&ufile->idr_lock);
- idr_init(&ufile->idr);
+ xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC);
}
void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
{
struct ib_uobject *entry;
- int id;
+ unsigned long id;
/*
* At this point uverbs_cleanup_ufile() is guaranteed to have run, and
- * there are no HW objects left, however the IDR is still populated
+ * there are no HW objects left, however the xarray is still populated
* with anything that has not been cleaned up by userspace. Since the
* kref on ufile is 0, nothing is allowed to call lookup_get.
*
* This is an optimized equivalent to remove_handle_idr_uobject
*/
- idr_for_each_entry(&ufile->idr, entry, id) {
+ xa_for_each(&ufile->idr, id, entry) {
WARN_ON(entry->object);
uverbs_uobject_put(entry);
}
- idr_destroy(&ufile->idr);
+ xa_destroy(&ufile->idr);
}
const struct uverbs_obj_type_class uverbs_idr_class = {
@@ -814,6 +765,10 @@ void uverbs_close_fd(struct file *f)
{
struct ib_uobject *uobj = f->private_data;
struct ib_uverbs_file *ufile = uobj->ufile;
+ struct uverbs_attr_bundle attrs = {
+ .context = uobj->context,
+ .ufile = ufile,
+ };
if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
/*
@@ -823,7 +778,7 @@ void uverbs_close_fd(struct file *f)
* write lock here, or we have a kernel bug.
*/
WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
- uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE);
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs);
up_read(&ufile->hw_destroy_rwsem);
}
@@ -872,6 +827,7 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
{
struct ib_uobject *obj, *next_obj;
int ret = -EINVAL;
+ struct uverbs_attr_bundle attrs = { .ufile = ufile };
/*
* This shouldn't run while executing other commands on this
@@ -883,12 +839,13 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
* other threads (which might still use the FDs) chance to run.
*/
list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
+ attrs.context = obj->context;
/*
* if we hit this WARN_ON, that means we are
* racing with a lookup_get.
*/
WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
- if (!uverbs_destroy_uobject(obj, reason))
+ if (!uverbs_destroy_uobject(obj, reason, &attrs))
ret = 0;
else
atomic_set(&obj->usecnt, 0);
@@ -967,26 +924,25 @@ const struct uverbs_obj_type_class uverbs_fd_class = {
EXPORT_SYMBOL(uverbs_fd_class);
struct ib_uobject *
-uverbs_get_uobject_from_file(u16 object_id,
- struct ib_uverbs_file *ufile,
- enum uverbs_obj_access access, s64 id)
+uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
+ s64 id, struct uverbs_attr_bundle *attrs)
{
const struct uverbs_api_object *obj =
- uapi_get_object(ufile->device->uapi, object_id);
+ uapi_get_object(attrs->ufile->device->uapi, object_id);
switch (access) {
case UVERBS_ACCESS_READ:
- return rdma_lookup_get_uobject(obj, ufile, id,
- UVERBS_LOOKUP_READ);
+ return rdma_lookup_get_uobject(obj, attrs->ufile, id,
+ UVERBS_LOOKUP_READ, attrs);
case UVERBS_ACCESS_DESTROY:
/* Actual destruction is done inside uverbs_handle_method */
- return rdma_lookup_get_uobject(obj, ufile, id,
- UVERBS_LOOKUP_DESTROY);
+ return rdma_lookup_get_uobject(obj, attrs->ufile, id,
+ UVERBS_LOOKUP_DESTROY, attrs);
case UVERBS_ACCESS_WRITE:
- return rdma_lookup_get_uobject(obj, ufile, id,
- UVERBS_LOOKUP_WRITE);
+ return rdma_lookup_get_uobject(obj, attrs->ufile, id,
+ UVERBS_LOOKUP_WRITE, attrs);
case UVERBS_ACCESS_NEW:
- return rdma_alloc_begin_uobject(obj, ufile);
+ return rdma_alloc_begin_uobject(obj, attrs->ufile, attrs);
default:
WARN_ON(true);
return ERR_PTR(-EOPNOTSUPP);
@@ -994,8 +950,8 @@ uverbs_get_uobject_from_file(u16 object_id,
}
int uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access,
- bool commit)
+ enum uverbs_obj_access access, bool commit,
+ struct uverbs_attr_bundle *attrs)
{
int ret = 0;
@@ -1018,9 +974,9 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
break;
case UVERBS_ACCESS_NEW:
if (commit)
- ret = rdma_alloc_commit_uobject(uobj);
+ ret = rdma_alloc_commit_uobject(uobj, attrs);
else
- rdma_alloc_abort_uobject(uobj);
+ rdma_alloc_abort_uobject(uobj, attrs);
break;
default:
WARN_ON(true);
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 69f8db66925e..5445323629b5 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -48,7 +48,7 @@ struct ib_uverbs_device;
void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
enum rdma_remove_reason reason);
-int uobj_destroy(struct ib_uobject *uobj);
+int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs);
/*
* uverbs_uobject_get is called in order to increase the reference count on
@@ -83,9 +83,8 @@ void uverbs_close_fd(struct file *f);
* uverbs_finalize_objects are called.
*/
struct ib_uobject *
-uverbs_get_uobject_from_file(u16 object_id,
- struct ib_uverbs_file *ufile,
- enum uverbs_obj_access access, s64 id);
+uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
+ s64 id, struct uverbs_attr_bundle *attrs);
/*
* Note that certain finalize stages could return a status:
@@ -103,8 +102,8 @@ uverbs_get_uobject_from_file(u16 object_id,
* object.
*/
int uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access,
- bool commit);
+ enum uverbs_obj_access access, bool commit,
+ struct uverbs_attr_bundle *attrs);
int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7925e45ea88a..7d8071c7e564 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/kref.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/workqueue.h>
#include <uapi/linux/if_ether.h>
#include <rdma/ib_pack.h>
@@ -183,8 +183,7 @@ static struct ib_client sa_client = {
.remove = ib_sa_remove_one
};
-static DEFINE_SPINLOCK(idr_lock);
-static DEFINE_IDR(query_idr);
+static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
static DEFINE_SPINLOCK(tid_lock);
static u32 tid;
@@ -1028,8 +1027,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
!(NETLINK_CB(skb).sk))
return -EPERM;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_policy, NULL);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy, NULL);
attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
if (ret || !attr)
goto settimeout_out;
@@ -1080,8 +1079,8 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
return 0;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_policy, NULL);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy, NULL);
if (ret)
return 0;
@@ -1180,14 +1179,14 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
struct ib_mad_agent *agent;
struct ib_mad_send_buf *mad_buf;
- spin_lock_irqsave(&idr_lock, flags);
- if (idr_find(&query_idr, id) != query) {
- spin_unlock_irqrestore(&idr_lock, flags);
+ xa_lock_irqsave(&queries, flags);
+ if (xa_load(&queries, id) != query) {
+ xa_unlock_irqrestore(&queries, flags);
return;
}
agent = query->port->agent;
mad_buf = query->mad_buf;
- spin_unlock_irqrestore(&idr_lock, flags);
+ xa_unlock_irqrestore(&queries, flags);
/*
* If the query is still on the netlink request list, schedule
@@ -1363,21 +1362,14 @@ static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
gfp_t gfp_mask)
{
- bool preload = gfpflags_allow_blocking(gfp_mask);
unsigned long flags;
int ret, id;
- if (preload)
- idr_preload(gfp_mask);
- spin_lock_irqsave(&idr_lock, flags);
-
- id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
-
- spin_unlock_irqrestore(&idr_lock, flags);
- if (preload)
- idr_preload_end();
- if (id < 0)
- return id;
+ xa_lock_irqsave(&queries, flags);
+ ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
+ xa_unlock_irqrestore(&queries, flags);
+ if (ret < 0)
+ return ret;
query->mad_buf->timeout_ms = timeout_ms;
query->mad_buf->context[0] = query;
@@ -1394,9 +1386,9 @@ static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
ret = ib_post_send_mad(query->mad_buf, NULL);
if (ret) {
- spin_lock_irqsave(&idr_lock, flags);
- idr_remove(&query_idr, id);
- spin_unlock_irqrestore(&idr_lock, flags);
+ xa_lock_irqsave(&queries, flags);
+ __xa_erase(&queries, id);
+ xa_unlock_irqrestore(&queries, flags);
}
/*
@@ -2188,9 +2180,9 @@ static void send_handler(struct ib_mad_agent *agent,
break;
}
- spin_lock_irqsave(&idr_lock, flags);
- idr_remove(&query_idr, query->id);
- spin_unlock_irqrestore(&idr_lock, flags);
+ xa_lock_irqsave(&queries, flags);
+ __xa_erase(&queries, query->id);
+ xa_unlock_irqrestore(&queries, flags);
free_mad(query);
if (query->client)
@@ -2475,5 +2467,5 @@ void ib_sa_cleanup(void)
destroy_workqueue(ib_nl_wq);
mcast_cleanup();
ib_unregister_client(&sa_client);
- idr_destroy(&query_idr);
+ WARN_ON(!xa_empty(&queries));
}
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 9b6a065bdfa5..c78d0c9646ae 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -349,10 +349,15 @@ static struct attribute *port_default_attrs[] = {
static size_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
{
- if (!gid_attr->ndev)
- return -EINVAL;
-
- return sprintf(buf, "%s\n", gid_attr->ndev->name);
+ struct net_device *ndev;
+ size_t ret = -EINVAL;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(gid_attr->ndev);
+ if (ndev)
+ ret = sprintf(buf, "%s\n", ndev->name);
+ rcu_read_unlock();
+ return ret;
}
static size_t print_gid_type(const struct ib_gid_attr *gid_attr, char *buf)
@@ -1015,8 +1020,10 @@ err_free_stats:
return;
}
-static int add_port(struct ib_device *device, int port_num)
+static int add_port(struct ib_core_device *coredev, int port_num)
{
+ struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
+ bool is_full_dev = &device->coredev == coredev;
struct ib_port *p;
struct ib_port_attr attr;
int i;
@@ -1034,7 +1041,7 @@ static int add_port(struct ib_device *device, int port_num)
p->port_num = port_num;
ret = kobject_init_and_add(&p->kobj, &port_type,
- device->ports_kobj,
+ coredev->ports_kobj,
"%d", port_num);
if (ret) {
kfree(p);
@@ -1055,7 +1062,7 @@ static int add_port(struct ib_device *device, int port_num)
goto err_put;
}
- if (device->ops.process_mad) {
+ if (device->ops.process_mad && is_full_dev) {
p->pma_table = get_counter_table(device, port_num);
ret = sysfs_create_group(&p->kobj, p->pma_table);
if (ret)
@@ -1111,7 +1118,7 @@ static int add_port(struct ib_device *device, int port_num)
if (ret)
goto err_free_pkey;
- if (device->ops.init_port) {
+ if (device->ops.init_port && is_full_dev) {
ret = device->ops.init_port(device, port_num, &p->kobj);
if (ret)
goto err_remove_pkey;
@@ -1122,10 +1129,10 @@ static int add_port(struct ib_device *device, int port_num)
* port, so holder should be device. Therefore skip per port conunter
* initialization.
*/
- if (device->ops.alloc_hw_stats && port_num)
+ if (device->ops.alloc_hw_stats && port_num && is_full_dev)
setup_hw_stats(device, p, port_num);
- list_add_tail(&p->kobj.entry, &device->port_list);
+ list_add_tail(&p->kobj.entry, &coredev->port_list);
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
@@ -1194,6 +1201,7 @@ static ssize_t node_type_show(struct device *device,
case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type);
case RDMA_NODE_USNIC_UDP: return sprintf(buf, "%d: usNIC UDP\n", dev->node_type);
+ case RDMA_NODE_UNSPECIFIED: return sprintf(buf, "%d: unspecified\n", dev->node_type);
case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
@@ -1279,11 +1287,11 @@ const struct attribute_group ib_dev_attr_group = {
.attrs = ib_dev_attrs,
};
-static void ib_free_port_attrs(struct ib_device *device)
+void ib_free_port_attrs(struct ib_core_device *coredev)
{
struct kobject *p, *t;
- list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ list_for_each_entry_safe(p, t, &coredev->port_list, entry) {
struct ib_port *port = container_of(p, struct ib_port, kobj);
list_del(&p->entry);
@@ -1303,20 +1311,22 @@ static void ib_free_port_attrs(struct ib_device *device)
kobject_put(p);
}
- kobject_put(device->ports_kobj);
+ kobject_put(coredev->ports_kobj);
}
-static int ib_setup_port_attrs(struct ib_device *device)
+int ib_setup_port_attrs(struct ib_core_device *coredev)
{
+ struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
unsigned int port;
int ret;
- device->ports_kobj = kobject_create_and_add("ports", &device->dev.kobj);
- if (!device->ports_kobj)
+ coredev->ports_kobj = kobject_create_and_add("ports",
+ &coredev->dev.kobj);
+ if (!coredev->ports_kobj)
return -ENOMEM;
rdma_for_each_port (device, port) {
- ret = add_port(device, port);
+ ret = add_port(coredev, port);
if (ret)
goto err_put;
}
@@ -1324,7 +1334,7 @@ static int ib_setup_port_attrs(struct ib_device *device)
return 0;
err_put:
- ib_free_port_attrs(device);
+ ib_free_port_attrs(coredev);
return ret;
}
@@ -1332,7 +1342,7 @@ int ib_device_register_sysfs(struct ib_device *device)
{
int ret;
- ret = ib_setup_port_attrs(device);
+ ret = ib_setup_port_attrs(&device->coredev);
if (ret)
return ret;
@@ -1348,5 +1358,48 @@ void ib_device_unregister_sysfs(struct ib_device *device)
free_hsag(&device->dev.kobj, device->hw_stats_ag);
kfree(device->hw_stats);
- ib_free_port_attrs(device);
+ ib_free_port_attrs(&device->coredev);
+}
+
+/**
+ * ib_port_register_module_stat - add module counters under relevant port
+ * of IB device.
+ *
+ * @device: IB device to add counters
+ * @port_num: valid port number
+ * @kobj: pointer to the kobject to initialize
+ * @ktype: pointer to the ktype for this kobject.
+ * @name: the name of the kobject
+ */
+int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
+ struct kobject *kobj, struct kobj_type *ktype,
+ const char *name)
+{
+ struct kobject *p, *t;
+ int ret;
+
+ list_for_each_entry_safe(p, t, &device->coredev.port_list, entry) {
+ struct ib_port *port = container_of(p, struct ib_port, kobj);
+
+ if (port->port_num != port_num)
+ continue;
+
+ ret = kobject_init_and_add(kobj, ktype, &port->kobj, "%s",
+ name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_port_register_module_stat);
+
+/**
+ * ib_port_unregister_module_stat - release module counters
+ * @kobj: pointer to the kobject to release
+ */
+void ib_port_unregister_module_stat(struct kobject *kobj)
+{
+ kobject_put(kobj);
}
+EXPORT_SYMBOL(ib_port_unregister_module_stat);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 7541fbaf58a3..8e7da2d41fd8 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -42,7 +42,7 @@
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/cdev.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -125,23 +125,22 @@ static struct ib_client ucm_client = {
.remove = ib_ucm_remove_one
};
-static DEFINE_MUTEX(ctx_id_mutex);
-static DEFINE_IDR(ctx_id_table);
+static DEFINE_XARRAY_ALLOC(ctx_id_table);
static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
{
struct ib_ucm_context *ctx;
- mutex_lock(&ctx_id_mutex);
- ctx = idr_find(&ctx_id_table, id);
+ xa_lock(&ctx_id_table);
+ ctx = xa_load(&ctx_id_table, id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
else if (ctx->file != file)
ctx = ERR_PTR(-EINVAL);
else
atomic_inc(&ctx->ref);
- mutex_unlock(&ctx_id_mutex);
+ xa_unlock(&ctx_id_table);
return ctx;
}
@@ -194,10 +193,7 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
ctx->file = file;
INIT_LIST_HEAD(&ctx->events);
- mutex_lock(&ctx_id_mutex);
- ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
- mutex_unlock(&ctx_id_mutex);
- if (ctx->id < 0)
+ if (xa_alloc(&ctx_id_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
goto error;
list_add_tail(&ctx->file_list, &file->ctxs);
@@ -514,9 +510,7 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
err2:
ib_destroy_cm_id(ctx->cm_id);
err1:
- mutex_lock(&ctx_id_mutex);
- idr_remove(&ctx_id_table, ctx->id);
- mutex_unlock(&ctx_id_mutex);
+ xa_erase(&ctx_id_table, ctx->id);
kfree(ctx);
return result;
}
@@ -536,15 +530,15 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- mutex_lock(&ctx_id_mutex);
- ctx = idr_find(&ctx_id_table, cmd.id);
+ xa_lock(&ctx_id_table);
+ ctx = xa_load(&ctx_id_table, cmd.id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
else if (ctx->file != file)
ctx = ERR_PTR(-EINVAL);
else
- idr_remove(&ctx_id_table, ctx->id);
- mutex_unlock(&ctx_id_mutex);
+ __xa_erase(&ctx_id_table, ctx->id);
+ xa_unlock(&ctx_id_table);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1175,7 +1169,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
file->filp = filp;
file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int ib_ucm_close(struct inode *inode, struct file *filp)
@@ -1189,10 +1183,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
struct ib_ucm_context, file_list);
mutex_unlock(&file->file_mutex);
- mutex_lock(&ctx_id_mutex);
- idr_remove(&ctx_id_table, ctx->id);
- mutex_unlock(&ctx_id_mutex);
-
+ xa_erase(&ctx_id_table, ctx->id);
ib_destroy_cm_id(ctx->cm_id);
ib_ucm_cleanup_events(ctx);
kfree(ctx);
@@ -1352,7 +1343,7 @@ static void __exit ib_ucm_cleanup(void)
class_remove_file(&cm_class, &class_attr_abi_version.attr);
unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
- idr_destroy(&ctx_id_table);
+ WARN_ON(!xa_empty(&ctx_id_table));
}
module_init(ib_ucm_init);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 7468b26b8a01..140a338a135f 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1744,7 +1744,7 @@ static int ucma_open(struct inode *inode, struct file *filp)
filp->private_data = file;
file->filp = filp;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int ucma_close(struct inode *inode, struct file *filp)
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index fe5551562dbc..e7ea819fcb11 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -37,27 +37,23 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/export.h>
-#include <linux/hugetlb.h>
#include <linux/slab.h>
+#include <linux/pagemap.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
-
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
- struct scatterlist *sg;
+ struct sg_page_iter sg_iter;
struct page *page;
- int i;
if (umem->nmap > 0)
- ib_dma_unmap_sg(dev, umem->sg_head.sgl,
- umem->npages,
+ ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
DMA_BIDIRECTIONAL);
- for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
-
- page = sg_page(sg);
+ for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
+ page = sg_page_iter_page(&sg_iter);
if (!PageDirty(page) && umem->writable && dirty)
set_page_dirty_lock(page);
put_page(page);
@@ -66,6 +62,124 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
sg_free_table(&umem->sg_head);
}
+/* ib_umem_add_sg_table - Add N contiguous pages to scatter table
+ *
+ * sg: current scatterlist entry
+ * page_list: array of npage struct page pointers
+ * npages: number of pages in page_list
+ * max_seg_sz: maximum segment size in bytes
+ * nents: [out] number of entries in the scatterlist
+ *
+ * Return new end of scatterlist
+ */
+static struct scatterlist *ib_umem_add_sg_table(struct scatterlist *sg,
+ struct page **page_list,
+ unsigned long npages,
+ unsigned int max_seg_sz,
+ int *nents)
+{
+ unsigned long first_pfn;
+ unsigned long i = 0;
+ bool update_cur_sg = false;
+ bool first = !sg_page(sg);
+
+ /* Check if new page_list is contiguous with end of previous page_list.
+ * sg->length here is a multiple of PAGE_SIZE and sg->offset is 0.
+ */
+ if (!first && (page_to_pfn(sg_page(sg)) + (sg->length >> PAGE_SHIFT) ==
+ page_to_pfn(page_list[0])))
+ update_cur_sg = true;
+
+ while (i != npages) {
+ unsigned long len;
+ struct page *first_page = page_list[i];
+
+ first_pfn = page_to_pfn(first_page);
+
+ /* Compute the number of contiguous pages we have starting
+ * at i
+ */
+ for (len = 0; i != npages &&
+ first_pfn + len == page_to_pfn(page_list[i]) &&
+ len < (max_seg_sz >> PAGE_SHIFT);
+ len++)
+ i++;
+
+ /* Squash N contiguous pages from page_list into current sge */
+ if (update_cur_sg) {
+ if ((max_seg_sz - sg->length) >= (len << PAGE_SHIFT)) {
+ sg_set_page(sg, sg_page(sg),
+ sg->length + (len << PAGE_SHIFT),
+ 0);
+ update_cur_sg = false;
+ continue;
+ }
+ update_cur_sg = false;
+ }
+
+ /* Squash N contiguous pages into next sge or first sge */
+ if (!first)
+ sg = sg_next(sg);
+
+ (*nents)++;
+ sg_set_page(sg, first_page, len << PAGE_SHIFT, 0);
+ first = false;
+ }
+
+ return sg;
+}
+
+/**
+ * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
+ *
+ * @umem: umem struct
+ * @pgsz_bitmap: bitmap of HW supported page sizes
+ * @virt: IOVA
+ *
+ * This helper is intended for HW that support multiple page
+ * sizes but can do only a single page size in an MR.
+ *
+ * Returns 0 if the umem requires page sizes not supported by
+ * the driver to be mapped. Drivers always supporting PAGE_SIZE
+ * or smaller will never see a 0 result.
+ */
+unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ unsigned long virt)
+{
+ struct scatterlist *sg;
+ unsigned int best_pg_bit;
+ unsigned long va, pgoff;
+ dma_addr_t mask;
+ int i;
+
+ /* At minimum, drivers must support PAGE_SIZE or smaller */
+ if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
+ return 0;
+
+ va = virt;
+ /* max page size not to exceed MR length */
+ mask = roundup_pow_of_two(umem->length);
+ /* offset into first SGL */
+ pgoff = umem->address & ~PAGE_MASK;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ /* Walk SGL and reduce max page size if VA/PA bits differ
+ * for any address.
+ */
+ mask |= (sg_dma_address(sg) + pgoff) ^ va;
+ if (i && i != (umem->nmap - 1))
+ /* restrict by length as well for interior SGEs */
+ mask |= sg_dma_len(sg);
+ va += sg_dma_len(sg) - pgoff;
+ pgoff = 0;
+ }
+ best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
+
+ return BIT_ULL(best_pg_bit);
+}
+EXPORT_SYMBOL(ib_umem_find_best_pgsz);
+
/**
* ib_umem_get - Pin and DMA map userspace memory.
*
@@ -84,16 +198,14 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct ib_ucontext *context;
struct ib_umem *umem;
struct page **page_list;
- struct vm_area_struct **vma_list;
unsigned long lock_limit;
unsigned long new_pinned;
unsigned long cur_base;
struct mm_struct *mm;
unsigned long npages;
int ret;
- int i;
unsigned long dma_attrs = 0;
- struct scatterlist *sg, *sg_list_start;
+ struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE;
if (!udata)
@@ -138,29 +250,23 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
mmgrab(mm);
if (access & IB_ACCESS_ON_DEMAND) {
+ if (WARN_ON_ONCE(!context->invalidate_range)) {
+ ret = -EINVAL;
+ goto umem_kfree;
+ }
+
ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
if (ret)
goto umem_kfree;
return umem;
}
- /* We assume the memory is from hugetlb until proved otherwise */
- umem->hugetlb = 1;
-
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
ret = -ENOMEM;
goto umem_kfree;
}
- /*
- * if we can't alloc the vma_list, it's not so bad;
- * just assume the memory is not hugetlb memory
- */
- vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
- if (!vma_list)
- umem->hugetlb = 0;
-
npages = ib_umem_num_pages(umem);
if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL;
@@ -185,41 +291,35 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
if (!umem->writable)
gup_flags |= FOLL_FORCE;
- sg_list_start = umem->sg_head.sgl;
+ sg = umem->sg_head.sgl;
while (npages) {
down_read(&mm->mmap_sem);
- ret = get_user_pages_longterm(cur_base,
+ ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- gup_flags, page_list, vma_list);
+ gup_flags | FOLL_LONGTERM,
+ page_list, NULL);
if (ret < 0) {
up_read(&mm->mmap_sem);
goto umem_release;
}
- umem->npages += ret;
cur_base += ret * PAGE_SIZE;
npages -= ret;
- /* Continue to hold the mmap_sem as vma_list access
- * needs to be protected.
- */
- for_each_sg(sg_list_start, sg, ret, i) {
- if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
- umem->hugetlb = 0;
+ sg = ib_umem_add_sg_table(sg, page_list, ret,
+ dma_get_max_seg_size(context->device->dma_device),
+ &umem->sg_nents);
- sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
- }
up_read(&mm->mmap_sem);
-
- /* preparing for next loop */
- sg_list_start = sg;
}
+ sg_mark_end(sg);
+
umem->nmap = ib_dma_map_sg_attrs(context->device,
umem->sg_head.sgl,
- umem->npages,
+ umem->sg_nents,
DMA_BIDIRECTIONAL,
dma_attrs);
@@ -236,8 +336,6 @@ umem_release:
vma:
atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
- if (vma_list)
- free_page((unsigned long) vma_list);
free_page((unsigned long) page_list);
umem_kfree:
if (ret) {
@@ -315,7 +413,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
return -EINVAL;
}
- ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
offset + ib_umem_offset(umem));
if (ret < 0)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index e6ec79ad9cc8..f962b5bbfa40 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -152,7 +152,7 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct ib_ucontext_per_mm *per_mm =
container_of(mn, struct ib_ucontext_per_mm, mn);
- if (range->blockable)
+ if (mmu_notifier_range_blockable(range))
down_read(&per_mm->umem_rwsem);
else if (!down_read_trylock(&per_mm->umem_rwsem))
return -EAGAIN;
@@ -170,7 +170,8 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
range->end,
invalidate_range_start_trampoline,
- range->blockable, NULL);
+ mmu_notifier_range_blockable(range),
+ NULL);
}
static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
@@ -241,7 +242,7 @@ static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
per_mm->mm = mm;
per_mm->umem_tree = RB_ROOT_CACHED;
init_rwsem(&per_mm->umem_rwsem);
- per_mm->active = ctx->invalidate_range;
+ per_mm->active = true;
rcu_read_lock();
per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
@@ -417,9 +418,6 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
h = hstate_vma(vma);
umem->page_shift = huge_page_shift(h);
up_read(&mm->mmap_sem);
- umem->hugetlb = 1;
- } else {
- umem->hugetlb = 0;
}
mutex_init(&umem_odp->umem_mutex);
@@ -503,7 +501,6 @@ static int ib_umem_odp_map_dma_single_page(
struct ib_umem *umem = &umem_odp->umem;
struct ib_device *dev = umem->context->device;
dma_addr_t dma_addr;
- int stored_page = 0;
int remove_existing_mapping = 0;
int ret = 0;
@@ -527,8 +524,7 @@ static int ib_umem_odp_map_dma_single_page(
}
umem_odp->dma_list[page_index] = dma_addr | access_mask;
umem_odp->page_list[page_index] = page;
- umem->npages++;
- stored_page = 1;
+ umem_odp->npages++;
} else if (umem_odp->page_list[page_index] == page) {
umem_odp->dma_list[page_index] |= access_mask;
} else {
@@ -540,11 +536,9 @@ static int ib_umem_odp_map_dma_single_page(
}
out:
- /* On Demand Paging - avoid pinning the page */
- if (umem->context->invalidate_range || !stored_page)
- put_page(page);
+ put_page(page);
- if (remove_existing_mapping && umem->context->invalidate_range) {
+ if (remove_existing_mapping) {
ib_umem_notifier_start_account(umem_odp);
umem->context->invalidate_range(
umem_odp,
@@ -754,12 +748,9 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
*/
set_page_dirty(head_page);
}
- /* on demand pinning support */
- if (!umem->context->invalidate_range)
- put_page(page);
umem_odp->page_list[idx] = NULL;
umem_odp->dma_list[idx] = 0;
- umem->npages--;
+ umem_odp->npages--;
}
}
mutex_unlock(&umem_odp->umem_mutex);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 02b7947ab215..671f07ba1fad 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -129,6 +129,9 @@ struct ib_umad_packet {
struct ib_user_mad mad;
};
+#define CREATE_TRACE_POINTS
+#include <trace/events/ib_umad.h>
+
static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
IB_UMAD_NUM_FIXED_MINOR;
@@ -334,6 +337,9 @@ static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
return -EFAULT;
}
}
+
+ trace_ib_umad_read_recv(file, &packet->mad.hdr, &recv_buf->mad->mad_hdr);
+
return hdr_size(file) + packet->length;
}
@@ -353,6 +359,9 @@ static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf,
if (copy_to_user(buf, packet->mad.data, packet->length))
return -EFAULT;
+ trace_ib_umad_read_send(file, &packet->mad.hdr,
+ (struct ib_mad_hdr *)&packet->mad.data);
+
return size;
}
@@ -508,6 +517,9 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
mutex_lock(&file->mutex);
+ trace_ib_umad_write(file, &packet->mad.hdr,
+ (struct ib_mad_hdr *)&packet->mad.data);
+
agent = __get_agent(file, packet->mad.hdr.id);
if (!agent) {
ret = -EINVAL;
@@ -968,6 +980,11 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
goto out;
}
+ if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
+ ret = -EPERM;
+ goto out;
+ }
+
file = kzalloc(sizeof(*file), GFP_KERNEL);
if (!file) {
ret = -ENOMEM;
@@ -985,7 +1002,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
list_add_tail(&file->port_list, &port->file_list);
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
out:
mutex_unlock(&port->file_mutex);
return ret;
@@ -1061,6 +1078,11 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
}
}
+ if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
+ ret = -EPERM;
+ goto err_up_sem;
+ }
+
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
if (ret)
goto err_up_sem;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index ea0bc6885517..1e5aeb39f774 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -160,10 +160,9 @@ struct ib_uverbs_file {
struct mutex umap_lock;
struct list_head umaps;
+ struct page *disassociate_page;
- struct idr idr;
- /* spinlock protects write access to idr */
- spinlock_t idr_lock;
+ struct xarray idr;
};
struct ib_uverbs_event {
@@ -240,7 +239,8 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
- enum rdma_remove_reason why);
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs);
int uverbs_dealloc_mw(struct ib_mw *mw);
void ib_uverbs_detach_umcast(struct ib_qp *qp,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 062a86c04123..5a3a1780ceea 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -162,7 +162,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
const void __user *res = iter->cur;
if (iter->cur + len > iter->end)
- return ERR_PTR(-ENOSPC);
+ return (void __force __user *)ERR_PTR(-ENOSPC);
iter->cur += len;
return res;
}
@@ -175,7 +175,7 @@ static int uverbs_request_finish(struct uverbs_req_iter *iter)
}
static struct ib_uverbs_completion_event_file *
-_ib_uverbs_lookup_comp_file(s32 fd, const struct uverbs_attr_bundle *attrs)
+_ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
fd, attrs);
@@ -230,6 +230,8 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
goto err_alloc;
}
+ attrs->context = ucontext;
+
ucontext->res.type = RDMA_RESTRACK_CTX;
ucontext->device = ib_dev;
ucontext->cg_obj = cg_obj;
@@ -423,7 +425,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
atomic_set(&pd->usecnt, 0);
pd->res.type = RDMA_RESTRACK_PD;
- ret = ib_dev->ops.alloc_pd(pd, uobj->context, &attrs->driver_udata);
+ ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
if (ret)
goto err_alloc;
@@ -436,15 +438,15 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
if (ret)
goto err_copy;
- return uobj_alloc_commit(uobj);
+ return uobj_alloc_commit(uobj, attrs);
err_copy:
- ib_dealloc_pd(pd);
+ ib_dealloc_pd_user(pd, &attrs->driver_udata);
pd = NULL;
err_alloc:
kfree(pd);
err:
- uobj_alloc_abort(uobj);
+ uobj_alloc_abort(uobj, attrs);
return ret;
}
@@ -594,8 +596,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
}
if (!xrcd) {
- xrcd = ib_dev->ops.alloc_xrcd(ib_dev, obj->uobject.context,
- &attrs->driver_udata);
+ xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata);
if (IS_ERR(xrcd)) {
ret = PTR_ERR(xrcd);
goto err;
@@ -633,7 +634,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
mutex_unlock(&ibudev->xrcd_tree_mutex);
- return uobj_alloc_commit(&obj->uobject);
+ return uobj_alloc_commit(&obj->uobject, attrs);
err_copy:
if (inode) {
@@ -643,10 +644,10 @@ err_copy:
}
err_dealloc_xrcd:
- ib_dealloc_xrcd(xrcd);
+ ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
err:
- uobj_alloc_abort(&obj->uobject);
+ uobj_alloc_abort(&obj->uobject, attrs);
err_tree_mutex_unlock:
if (f.file)
@@ -669,19 +670,19 @@ static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
}
-int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject,
- struct ib_xrcd *xrcd,
- enum rdma_remove_reason why)
+int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct inode *inode;
int ret;
- struct ib_uverbs_device *dev = uobject->context->ufile->device;
+ struct ib_uverbs_device *dev = attrs->ufile->device;
inode = xrcd->inode;
if (inode && !atomic_dec_and_test(&xrcd->usecnt))
return 0;
- ret = ib_dealloc_xrcd(xrcd);
+ ret = ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject)) {
atomic_inc(&xrcd->usecnt);
@@ -763,16 +764,16 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
uobj_put_obj_read(pd);
- return uobj_alloc_commit(uobj);
+ return uobj_alloc_commit(uobj, attrs);
err_copy:
- ib_dereg_mr(mr);
+ ib_dereg_mr_user(mr, &attrs->driver_udata);
err_put:
uobj_put_obj_read(pd);
err_free:
- uobj_alloc_abort(uobj);
+ uobj_alloc_abort(uobj, attrs);
return ret;
}
@@ -917,14 +918,14 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
goto err_copy;
uobj_put_obj_read(pd);
- return uobj_alloc_commit(uobj);
+ return uobj_alloc_commit(uobj, attrs);
err_copy:
uverbs_dealloc_mw(mw);
err_put:
uobj_put_obj_read(pd);
err_free:
- uobj_alloc_abort(uobj);
+ uobj_alloc_abort(uobj, attrs);
return ret;
}
@@ -965,11 +966,11 @@ static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
ret = uverbs_response(attrs, &resp, sizeof(resp));
if (ret) {
- uobj_alloc_abort(uobj);
+ uobj_alloc_abort(uobj, attrs);
return ret;
}
- return uobj_alloc_commit(uobj);
+ return uobj_alloc_commit(uobj, attrs);
}
static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
@@ -1009,8 +1010,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
attr.comp_vector = cmd->comp_vector;
attr.flags = cmd->flags;
- cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
- &attrs->driver_udata);
+ cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_file;
@@ -1036,7 +1036,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
if (ret)
goto err_cb;
- ret = uobj_alloc_commit(&obj->uobject);
+ ret = uobj_alloc_commit(&obj->uobject, attrs);
if (ret)
return ERR_PTR(ret);
return obj;
@@ -1049,7 +1049,7 @@ err_file:
ib_uverbs_release_ucq(attrs->ufile, ev_file, obj);
err:
- uobj_alloc_abort(&obj->uobject);
+ uobj_alloc_abort(&obj->uobject, attrs);
return ERR_PTR(ret);
}
@@ -1418,7 +1418,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (ret)
goto err_cb;
- qp->real_qp = qp;
qp->pd = pd;
qp->send_cq = attr.send_cq;
qp->recv_cq = attr.recv_cq;
@@ -1477,7 +1476,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (ind_tbl)
uobj_put_obj_read(ind_tbl);
- return uobj_alloc_commit(&obj->uevent.uobject);
+ return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_cb:
ib_destroy_qp(qp);
@@ -1495,7 +1494,7 @@ err_put:
if (ind_tbl)
uobj_put_obj_read(ind_tbl);
- uobj_alloc_abort(&obj->uevent.uobject);
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
return ret;
}
@@ -1609,14 +1608,14 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
qp->uobject = &obj->uevent.uobject;
uobj_put_read(xrcd_uobj);
- return uobj_alloc_commit(&obj->uevent.uobject);
+ return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_destroy:
ib_destroy_qp(qp);
err_xrcd:
uobj_put_read(xrcd_uobj);
err_put:
- uobj_alloc_abort(&obj->uevent.uobject);
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
return ret;
}
@@ -2451,7 +2450,7 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
goto err_copy;
uobj_put_obj_read(pd);
- return uobj_alloc_commit(uobj);
+ return uobj_alloc_commit(uobj, attrs);
err_copy:
rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
@@ -2460,7 +2459,7 @@ err_put:
uobj_put_obj_read(pd);
err:
- uobj_alloc_abort(uobj);
+ uobj_alloc_abort(uobj, attrs);
return ret;
}
@@ -2962,16 +2961,16 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
uobj_put_obj_read(pd);
uobj_put_obj_read(cq);
- return uobj_alloc_commit(&obj->uevent.uobject);
+ return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_copy:
- ib_destroy_wq(wq);
+ ib_destroy_wq(wq, &attrs->driver_udata);
err_put_cq:
uobj_put_obj_read(cq);
err_put_pd:
uobj_put_obj_read(pd);
err_uobj:
- uobj_alloc_abort(&obj->uevent.uobject);
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
return err;
}
@@ -3136,12 +3135,12 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
for (j = 0; j < num_read_wqs; j++)
uobj_put_obj_read(wqs[j]);
- return uobj_alloc_commit(uobj);
+ return uobj_alloc_commit(uobj, attrs);
err_copy:
ib_destroy_rwq_ind_table(rwq_ind_tbl);
err_uobj:
- uobj_alloc_abort(uobj);
+ uobj_alloc_abort(uobj, attrs);
put_wqs:
for (j = 0; j < num_read_wqs; j++)
uobj_put_obj_read(wqs[j]);
@@ -3314,7 +3313,7 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
kfree(flow_attr);
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
- return uobj_alloc_commit(uobj);
+ return uobj_alloc_commit(uobj, attrs);
err_copy:
if (!qp->device->ops.destroy_flow(flow_id))
atomic_dec(&qp->usecnt);
@@ -3325,7 +3324,7 @@ err_free_flow_attr:
err_put:
uobj_put_obj_read(qp);
err_uobj:
- uobj_alloc_abort(uobj);
+ uobj_alloc_abort(uobj, attrs);
err_free_attr:
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
@@ -3411,9 +3410,9 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
- srq = pd->device->ops.create_srq(pd, &attr, udata);
- if (IS_ERR(srq)) {
- ret = PTR_ERR(srq);
+ srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
+ if (!srq) {
+ ret = -ENOMEM;
goto err_put;
}
@@ -3424,6 +3423,10 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
srq->event_handler = attr.event_handler;
srq->srq_context = attr.srq_context;
+ ret = pd->device->ops.create_srq(srq, &attr, udata);
+ if (ret)
+ goto err_free;
+
if (ib_srq_has_cq(cmd->srq_type)) {
srq->ext.cq = attr.ext.cq;
atomic_inc(&attr.ext.cq->usecnt);
@@ -3458,11 +3461,13 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
uobj_put_obj_read(attr.ext.cq);
uobj_put_obj_read(pd);
- return uobj_alloc_commit(&obj->uevent.uobject);
+ return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_copy:
- ib_destroy_srq(srq);
+ ib_destroy_srq_user(srq, &attrs->driver_udata);
+err_free:
+ kfree(srq);
err_put:
uobj_put_obj_read(pd);
@@ -3477,7 +3482,7 @@ err_put_xrcd:
}
err:
- uobj_alloc_abort(&obj->uevent.uobject);
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index e1379949e663..829b0c6944d8 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -207,13 +207,12 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
for (i = 0; i != array_len; i++) {
attr->uobjects[i] = uverbs_get_uobject_from_file(
- spec->u2.objs_arr.obj_type, pbundle->bundle.ufile,
- spec->u2.objs_arr.access, idr_vals[i]);
+ spec->u2.objs_arr.obj_type, spec->u2.objs_arr.access,
+ idr_vals[i], &pbundle->bundle);
if (IS_ERR(attr->uobjects[i])) {
ret = PTR_ERR(attr->uobjects[i]);
break;
}
- pbundle->bundle.context = attr->uobjects[i]->context;
}
attr->len = i;
@@ -223,7 +222,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
struct uverbs_objs_arr_attr *attr,
- bool commit)
+ bool commit, struct uverbs_attr_bundle *attrs)
{
const struct uverbs_attr_spec *spec = &attr_uapi->spec;
int current_ret;
@@ -231,8 +230,9 @@ static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
size_t i;
for (i = 0; i != attr->len; i++) {
- current_ret = uverbs_finalize_object(
- attr->uobjects[i], spec->u2.objs_arr.access, commit);
+ current_ret = uverbs_finalize_object(attr->uobjects[i],
+ spec->u2.objs_arr.access,
+ commit, attrs);
if (!ret)
ret = current_ret;
}
@@ -325,13 +325,10 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
* IDR implementation today rejects negative IDs
*/
o_attr->uobject = uverbs_get_uobject_from_file(
- spec->u.obj.obj_type,
- pbundle->bundle.ufile,
- spec->u.obj.access,
- uattr->data_s64);
+ spec->u.obj.obj_type, spec->u.obj.access,
+ uattr->data_s64, &pbundle->bundle);
if (IS_ERR(o_attr->uobject))
return PTR_ERR(o_attr->uobject);
- pbundle->bundle.context = o_attr->uobject->context;
__set_bit(attr_bkey, pbundle->uobj_finalize);
if (spec->u.obj.access == UVERBS_ACCESS_NEW) {
@@ -456,12 +453,14 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
uverbs_fill_udata(&pbundle->bundle,
&pbundle->bundle.driver_udata,
UVERBS_ATTR_UHW_IN, UVERBS_ATTR_UHW_OUT);
+ else
+ pbundle->bundle.driver_udata = (struct ib_udata){};
if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
struct uverbs_obj_attr *destroy_attr =
&pbundle->bundle.attrs[destroy_bkey].obj_attr;
- ret = uobj_destroy(destroy_attr->uobject);
+ ret = uobj_destroy(destroy_attr->uobject, &pbundle->bundle);
if (ret)
return ret;
__clear_bit(destroy_bkey, pbundle->uobj_finalize);
@@ -512,7 +511,8 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
current_ret = uverbs_finalize_object(
attr->obj_attr.uobject,
- attr->obj_attr.attr_elm->spec.u.obj.access, commit);
+ attr->obj_attr.attr_elm->spec.u.obj.access, commit,
+ &pbundle->bundle);
if (!ret)
ret = current_ret;
}
@@ -535,7 +535,8 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
current_ret = uverbs_free_idrs_array(
- attr_uapi, &attr->objs_arr_attr, commit);
+ attr_uapi, &attr->objs_arr_attr, commit,
+ &pbundle->bundle);
if (!ret)
ret = current_ret;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 70b7d80431a9..84a5e9a6d483 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
kref_put(&file->async_file->ref,
ib_uverbs_release_async_event_file);
put_device(&file->device->dev);
+
+ if (file->disassociate_page)
+ __free_pages(file->disassociate_page, 0);
kfree(file);
}
@@ -720,7 +723,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
* then the command request structure starts
* with a '__aligned u64 response' member.
*/
- ret = get_user(response, (const u64 *)buf);
+ ret = get_user(response, (const u64 __user *)buf);
if (ret)
goto out_unlock;
@@ -877,45 +880,78 @@ static void rdma_umap_close(struct vm_area_struct *vma)
kfree(priv);
}
+/*
+ * Once the zap_vma_ptes has been called touches to the VMA will come here and
+ * we return a dummy writable zero page for all the pfns.
+ */
+static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
+{
+ struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
+ struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
+ vm_fault_t ret = 0;
+
+ if (!priv)
+ return VM_FAULT_SIGBUS;
+
+ /* Read only pages can just use the system zero page. */
+ if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
+ vmf->page = ZERO_PAGE(vmf->address);
+ get_page(vmf->page);
+ return 0;
+ }
+
+ mutex_lock(&ufile->umap_lock);
+ if (!ufile->disassociate_page)
+ ufile->disassociate_page =
+ alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
+
+ if (ufile->disassociate_page) {
+ /*
+ * This VMA is forced to always be shared so this doesn't have
+ * to worry about COW.
+ */
+ vmf->page = ufile->disassociate_page;
+ get_page(vmf->page);
+ } else {
+ ret = VM_FAULT_SIGBUS;
+ }
+ mutex_unlock(&ufile->umap_lock);
+
+ return ret;
+}
+
static const struct vm_operations_struct rdma_umap_ops = {
.open = rdma_umap_open,
.close = rdma_umap_close,
+ .fault = rdma_umap_fault,
};
-static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
- struct vm_area_struct *vma,
- unsigned long size)
+/*
+ * Map IO memory into a process. This is to be called by drivers as part of
+ * their mmap() functions if they wish to send something like PCI-E BAR memory
+ * to userspace.
+ */
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
{
struct ib_uverbs_file *ufile = ucontext->ufile;
struct rdma_umap_priv *priv;
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
if (vma->vm_end - vma->vm_start != size)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Driver is using this wrong, must be called by ib_uverbs_mmap */
if (WARN_ON(!vma->vm_file ||
vma->vm_file->private_data != ufile))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
lockdep_assert_held(&ufile->device->disassociate_srcu);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
- return ERR_PTR(-ENOMEM);
- return priv;
-}
-
-/*
- * Map IO memory into a process. This is to be called by drivers as part of
- * their mmap() functions if they wish to send something like PCI-E BAR memory
- * to userspace.
- */
-int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
-
- if (IS_ERR(priv))
- return PTR_ERR(priv);
+ return -ENOMEM;
vma->vm_page_prot = prot;
if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
@@ -928,35 +964,6 @@ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
}
EXPORT_SYMBOL(rdma_user_mmap_io);
-/*
- * The page case is here for a slightly different reason, the driver expects
- * to be able to free the page it is sharing to user space when it destroys
- * its ucontext, which means we need to zap the user space references.
- *
- * We could handle this differently by providing an API to allocate a shared
- * page and then only freeing the shared page when the last ufile is
- * destroyed.
- */
-int rdma_user_mmap_page(struct ib_ucontext *ucontext,
- struct vm_area_struct *vma, struct page *page,
- unsigned long size)
-{
- struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
-
- if (IS_ERR(priv))
- return PTR_ERR(priv);
-
- if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
- vma->vm_page_prot)) {
- kfree(priv);
- return -EAGAIN;
- }
-
- rdma_umap_priv_init(priv, vma);
- return 0;
-}
-EXPORT_SYMBOL(rdma_user_mmap_page);
-
void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
{
struct rdma_umap_priv *priv, *next_priv;
@@ -992,7 +999,9 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
* at a time to get the lock ordering right. Typically there
* will only be one mm, so no big deal.
*/
- down_write(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
+ if (!mmget_still_valid(mm))
+ goto skip_mm;
mutex_lock(&ufile->umap_lock);
list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
list) {
@@ -1004,10 +1013,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
zap_vma_ptes(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
}
mutex_unlock(&ufile->umap_lock);
- up_write(&mm->mmap_sem);
+ skip_mm:
+ up_read(&mm->mmap_sem);
mmput(mm);
}
}
@@ -1045,6 +1054,11 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
goto err;
}
+ if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) {
+ ret = -EPERM;
+ goto err;
+ }
+
/* In case IB device supports disassociate ucontext, there is no hard
* dependency between uverbs device and its low level device.
*/
@@ -1083,7 +1097,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
setup_ufile_idr_uobject(file);
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
err_module:
module_put(ib_dev->owner);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index f224cb727224..35b2e2c640cc 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -40,14 +40,17 @@
#include "uverbs.h"
static int uverbs_free_ah(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
- return rdma_destroy_ah((struct ib_ah *)uobject->object,
- RDMA_DESTROY_AH_SLEEPABLE);
+ return rdma_destroy_ah_user((struct ib_ah *)uobject->object,
+ RDMA_DESTROY_AH_SLEEPABLE,
+ &attrs->driver_udata);
}
static int uverbs_free_flow(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_flow *flow = (struct ib_flow *)uobject->object;
struct ib_uflow_object *uflow =
@@ -66,13 +69,15 @@ static int uverbs_free_flow(struct ib_uobject *uobject,
}
static int uverbs_free_mw(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
}
static int uverbs_free_qp(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_qp *qp = uobject->object;
struct ib_uqp_object *uqp =
@@ -93,19 +98,20 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
ib_uverbs_detach_umcast(qp, uqp);
}
- ret = ib_destroy_qp(qp);
+ ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
if (uqp->uxrcd)
atomic_dec(&uqp->uxrcd->refcnt);
- ib_uverbs_release_uevent(uobject->context->ufile, &uqp->uevent);
+ ib_uverbs_release_uevent(attrs->ufile, &uqp->uevent);
return ret;
}
static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
@@ -120,23 +126,25 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
}
static int uverbs_free_wq(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_wq *wq = uobject->object;
struct ib_uwq_object *uwq =
container_of(uobject, struct ib_uwq_object, uevent.uobject);
int ret;
- ret = ib_destroy_wq(wq);
+ ret = ib_destroy_wq(wq, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
- ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
+ ib_uverbs_release_uevent(attrs->ufile, &uwq->uevent);
return ret;
}
static int uverbs_free_srq(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_srq *srq = uobject->object;
struct ib_uevent_object *uevent =
@@ -144,7 +152,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
enum ib_srq_type srq_type = srq->srq_type;
int ret;
- ret = ib_destroy_srq(srq);
+ ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
@@ -155,12 +163,13 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
atomic_dec(&us->uxrcd->refcnt);
}
- ib_uverbs_release_uevent(uobject->context->ufile, uevent);
+ ib_uverbs_release_uevent(attrs->ufile, uevent);
return ret;
}
static int uverbs_free_xrcd(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_xrcd *xrcd = uobject->object;
struct ib_uxrcd_object *uxrcd =
@@ -171,15 +180,16 @@ static int uverbs_free_xrcd(struct ib_uobject *uobject,
if (ret)
return ret;
- mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
- ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why);
- mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
+ mutex_lock(&attrs->ufile->device->xrcd_tree_mutex);
+ ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, attrs);
+ mutex_unlock(&attrs->ufile->device->xrcd_tree_mutex);
return ret;
}
static int uverbs_free_pd(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_pd *pd = uobject->object;
int ret;
@@ -188,7 +198,7 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
if (ret)
return ret;
- ib_dealloc_pd(pd);
+ ib_dealloc_pd_user(pd, &attrs->driver_udata);
return 0;
}
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index 309c5e80988d..9f013304e677 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -31,11 +31,13 @@
* SOFTWARE.
*/
+#include "rdma_core.h"
#include "uverbs.h"
#include <rdma/uverbs_std_types.h>
static int uverbs_free_counters(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_counters *counters = uobject->object;
int ret;
@@ -52,7 +54,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
- struct ib_device *ib_dev = uobj->context->device;
+ struct ib_device *ib_dev = attrs->context->device;
struct ib_counters *counters;
int ret;
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index a59ea89e3f2b..db5c46a1bb2d 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -35,7 +35,8 @@
#include "uverbs.h"
static int uverbs_free_cq(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_cq *cq = uobject->object;
struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
@@ -43,12 +44,12 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
container_of(uobject, struct ib_ucq_object, uobject);
int ret;
- ret = ib_destroy_cq(cq);
+ ret = ib_destroy_cq_user(cq, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
ib_uverbs_release_ucq(
- uobject->context->ufile,
+ attrs->ufile,
ev_queue ? container_of(ev_queue,
struct ib_uverbs_completion_event_file,
ev_queue) :
@@ -63,7 +64,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
struct ib_ucq_object *obj = container_of(
uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE),
typeof(*obj), uobject);
- struct ib_device *ib_dev = obj->uobject.context->device;
+ struct ib_device *ib_dev = attrs->context->device;
int ret;
u64 user_handle;
struct ib_cq_init_attr attr = {};
@@ -110,8 +111,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
INIT_LIST_HEAD(&obj->comp_list);
INIT_LIST_HEAD(&obj->async_list);
- cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
- &attrs->driver_udata);
+ cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_event_file;
diff --git a/drivers/infiniband/core/uverbs_std_types_dm.c b/drivers/infiniband/core/uverbs_std_types_dm.c
index 2ef70637bee1..d5a1de33c2c9 100644
--- a/drivers/infiniband/core/uverbs_std_types_dm.c
+++ b/drivers/infiniband/core/uverbs_std_types_dm.c
@@ -30,11 +30,13 @@
* SOFTWARE.
*/
+#include "rdma_core.h"
#include "uverbs.h"
#include <rdma/uverbs_std_types.h>
static int uverbs_free_dm(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_dm *dm = uobject->object;
int ret;
@@ -43,7 +45,7 @@ static int uverbs_free_dm(struct ib_uobject *uobject,
if (ret)
return ret;
- return dm->device->ops.dealloc_dm(dm);
+ return dm->device->ops.dealloc_dm(dm, attrs);
}
static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
@@ -53,7 +55,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
struct ib_uobject *uobj =
uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DM_HANDLE)
->obj_attr.uobject;
- struct ib_device *ib_dev = uobj->context->device;
+ struct ib_device *ib_dev = attrs->context->device;
struct ib_dm *dm;
int ret;
@@ -70,7 +72,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
if (ret)
return ret;
- dm = ib_dev->ops.alloc_dm(ib_dev, uobj->context, &attr, attrs);
+ dm = ib_dev->ops.alloc_dm(ib_dev, attrs->context, &attr, attrs);
if (IS_ERR(dm))
return PTR_ERR(dm);
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index 4962b87fa600..459cf165b231 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -30,11 +30,13 @@
* SOFTWARE.
*/
+#include "rdma_core.h"
#include "uverbs.h"
#include <rdma/uverbs_std_types.h>
static int uverbs_free_flow_action(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_flow_action *action = uobject->object;
int ret;
@@ -308,7 +310,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE);
- struct ib_device *ib_dev = uobj->context->device;
+ struct ib_device *ib_dev = attrs->context->device;
int ret;
struct ib_flow_action *action;
struct ib_flow_action_esp_attr esp_attr = {};
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 4d4be0c2b752..610d3b9f7654 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -30,13 +30,16 @@
* SOFTWARE.
*/
+#include "rdma_core.h"
#include "uverbs.h"
#include <rdma/uverbs_std_types.h>
static int uverbs_free_mr(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
- return ib_dereg_mr((struct ib_mr *)uobject->object);
+ return ib_dereg_mr_user((struct ib_mr *)uobject->object,
+ &attrs->driver_udata);
}
static int UVERBS_HANDLER(UVERBS_METHOD_ADVISE_MR)(
@@ -145,7 +148,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
return 0;
err_dereg:
- ib_dereg_mr(mr);
+ ib_dereg_mr_user(mr, &attrs->driver_udata);
return ret;
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 5a5e83f5f0fc..e666a1f7608d 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -218,6 +218,8 @@ rdma_node_get_transport(enum rdma_node_type node_type)
return RDMA_TRANSPORT_USNIC_UDP;
if (node_type == RDMA_NODE_RNIC)
return RDMA_TRANSPORT_IWARP;
+ if (node_type == RDMA_NODE_UNSPECIFIED)
+ return RDMA_TRANSPORT_UNSPECIFIED;
return RDMA_TRANSPORT_IB;
}
@@ -269,7 +271,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
pd->res.type = RDMA_RESTRACK_PD;
rdma_restrack_set_task(&pd->res, caller);
- ret = device->ops.alloc_pd(pd, NULL, NULL);
+ ret = device->ops.alloc_pd(pd, NULL);
if (ret) {
kfree(pd);
return ERR_PTR(ret);
@@ -316,17 +318,18 @@ EXPORT_SYMBOL(__ib_alloc_pd);
/**
* ib_dealloc_pd - Deallocates a protection domain.
* @pd: The protection domain to deallocate.
+ * @udata: Valid user data or NULL for kernel object
*
* It is an error to call this function while any resources in the pd still
* exist. The caller is responsible to synchronously destroy them and
* guarantee no new allocations will happen.
*/
-void ib_dealloc_pd(struct ib_pd *pd)
+void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
{
int ret;
if (pd->__internal_mr) {
- ret = pd->device->ops.dereg_mr(pd->__internal_mr);
+ ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
WARN_ON(ret);
pd->__internal_mr = NULL;
}
@@ -336,10 +339,10 @@ void ib_dealloc_pd(struct ib_pd *pd)
WARN_ON(atomic_read(&pd->usecnt));
rdma_restrack_del(&pd->res);
- pd->device->ops.dealloc_pd(pd);
+ pd->device->ops.dealloc_pd(pd, udata);
kfree(pd);
}
-EXPORT_SYMBOL(ib_dealloc_pd);
+EXPORT_SYMBOL(ib_dealloc_pd_user);
/* Address handles */
@@ -495,25 +498,33 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
u32 flags,
struct ib_udata *udata)
{
+ struct ib_device *device = pd->device;
struct ib_ah *ah;
+ int ret;
might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
- if (!pd->device->ops.create_ah)
+ if (!device->ops.create_ah)
return ERR_PTR(-EOPNOTSUPP);
- ah = pd->device->ops.create_ah(pd, ah_attr, flags, udata);
+ ah = rdma_zalloc_drv_obj_gfp(
+ device, ib_ah,
+ (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
- if (!IS_ERR(ah)) {
- ah->device = pd->device;
- ah->pd = pd;
- ah->uobject = NULL;
- ah->type = ah_attr->type;
- ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
+ ah->device = device;
+ ah->pd = pd;
+ ah->type = ah_attr->type;
+ ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
- atomic_inc(&pd->usecnt);
+ ret = device->ops.create_ah(ah, ah_attr, flags, udata);
+ if (ret) {
+ kfree(ah);
+ return ERR_PTR(ret);
}
+ atomic_inc(&pd->usecnt);
return ah;
}
@@ -930,25 +941,24 @@ int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
}
EXPORT_SYMBOL(rdma_query_ah);
-int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
+int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{
const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
struct ib_pd *pd;
- int ret;
might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
pd = ah->pd;
- ret = ah->device->ops.destroy_ah(ah, flags);
- if (!ret) {
- atomic_dec(&pd->usecnt);
- if (sgid_attr)
- rdma_put_gid_attr(sgid_attr);
- }
- return ret;
+ ah->device->ops.destroy_ah(ah, flags);
+ atomic_dec(&pd->usecnt);
+ if (sgid_attr)
+ rdma_put_gid_attr(sgid_attr);
+
+ kfree(ah);
+ return 0;
}
-EXPORT_SYMBOL(rdma_destroy_ah);
+EXPORT_SYMBOL(rdma_destroy_ah_user);
/* Shared receive queues */
@@ -956,29 +966,40 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr)
{
struct ib_srq *srq;
+ int ret;
if (!pd->device->ops.create_srq)
return ERR_PTR(-EOPNOTSUPP);
- srq = pd->device->ops.create_srq(pd, srq_init_attr, NULL);
-
- if (!IS_ERR(srq)) {
- srq->device = pd->device;
- srq->pd = pd;
- srq->uobject = NULL;
- srq->event_handler = srq_init_attr->event_handler;
- srq->srq_context = srq_init_attr->srq_context;
- srq->srq_type = srq_init_attr->srq_type;
- if (ib_srq_has_cq(srq->srq_type)) {
- srq->ext.cq = srq_init_attr->ext.cq;
- atomic_inc(&srq->ext.cq->usecnt);
- }
- if (srq->srq_type == IB_SRQT_XRC) {
- srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
- atomic_inc(&srq->ext.xrc.xrcd->usecnt);
- }
- atomic_inc(&pd->usecnt);
- atomic_set(&srq->usecnt, 0);
+ srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ srq->device = pd->device;
+ srq->pd = pd;
+ srq->event_handler = srq_init_attr->event_handler;
+ srq->srq_context = srq_init_attr->srq_context;
+ srq->srq_type = srq_init_attr->srq_type;
+
+ if (ib_srq_has_cq(srq->srq_type)) {
+ srq->ext.cq = srq_init_attr->ext.cq;
+ atomic_inc(&srq->ext.cq->usecnt);
+ }
+ if (srq->srq_type == IB_SRQT_XRC) {
+ srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
+ atomic_inc(&srq->ext.xrc.xrcd->usecnt);
+ }
+ atomic_inc(&pd->usecnt);
+
+ ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
+ if (ret) {
+ atomic_dec(&srq->pd->usecnt);
+ if (srq->srq_type == IB_SRQT_XRC)
+ atomic_dec(&srq->ext.xrc.xrcd->usecnt);
+ if (ib_srq_has_cq(srq->srq_type))
+ atomic_dec(&srq->ext.cq->usecnt);
+ kfree(srq);
+ return ERR_PTR(ret);
}
return srq;
@@ -1003,36 +1024,23 @@ int ib_query_srq(struct ib_srq *srq,
}
EXPORT_SYMBOL(ib_query_srq);
-int ib_destroy_srq(struct ib_srq *srq)
+int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
{
- struct ib_pd *pd;
- enum ib_srq_type srq_type;
- struct ib_xrcd *uninitialized_var(xrcd);
- struct ib_cq *uninitialized_var(cq);
- int ret;
-
if (atomic_read(&srq->usecnt))
return -EBUSY;
- pd = srq->pd;
- srq_type = srq->srq_type;
- if (ib_srq_has_cq(srq_type))
- cq = srq->ext.cq;
- if (srq_type == IB_SRQT_XRC)
- xrcd = srq->ext.xrc.xrcd;
+ srq->device->ops.destroy_srq(srq, udata);
- ret = srq->device->ops.destroy_srq(srq);
- if (!ret) {
- atomic_dec(&pd->usecnt);
- if (srq_type == IB_SRQT_XRC)
- atomic_dec(&xrcd->usecnt);
- if (ib_srq_has_cq(srq_type))
- atomic_dec(&cq->usecnt);
- }
+ atomic_dec(&srq->pd->usecnt);
+ if (srq->srq_type == IB_SRQT_XRC)
+ atomic_dec(&srq->ext.xrc.xrcd->usecnt);
+ if (ib_srq_has_cq(srq->srq_type))
+ atomic_dec(&srq->ext.cq->usecnt);
+ kfree(srq);
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(ib_destroy_srq);
+EXPORT_SYMBOL(ib_destroy_srq_user);
/* Queue pairs */
@@ -1111,8 +1119,9 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
}
EXPORT_SYMBOL(ib_open_qp);
-static struct ib_qp *create_xrc_qp(struct ib_qp *qp,
- struct ib_qp_init_attr *qp_init_attr)
+static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
{
struct ib_qp *real_qp = qp;
@@ -1134,8 +1143,9 @@ static struct ib_qp *create_xrc_qp(struct ib_qp *qp,
return qp;
}
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr)
+struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
{
struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
struct ib_qp *qp;
@@ -1164,7 +1174,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (ret)
goto err;
- qp->real_qp = qp;
qp->qp_type = qp_init_attr->qp_type;
qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
@@ -1176,7 +1185,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->port = 0;
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
- struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr);
+ struct ib_qp *xrc_qp =
+ create_xrc_qp_user(qp, qp_init_attr, udata);
if (IS_ERR(xrc_qp)) {
ret = PTR_ERR(xrc_qp);
@@ -1230,7 +1240,7 @@ err:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_create_qp);
+EXPORT_SYMBOL(ib_create_qp_user);
static const struct {
int valid;
@@ -1837,7 +1847,7 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp)
return 0;
}
-int ib_destroy_qp(struct ib_qp *qp)
+int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
{
const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
@@ -1869,7 +1879,7 @@ int ib_destroy_qp(struct ib_qp *qp)
rdma_rw_cleanup_mrs(qp);
rdma_restrack_del(&qp->res);
- ret = qp->device->ops.destroy_qp(qp);
+ ret = qp->device->ops.destroy_qp(qp, udata);
if (!ret) {
if (alt_path_sgid_attr)
rdma_put_gid_attr(alt_path_sgid_attr);
@@ -1894,7 +1904,7 @@ int ib_destroy_qp(struct ib_qp *qp)
return ret;
}
-EXPORT_SYMBOL(ib_destroy_qp);
+EXPORT_SYMBOL(ib_destroy_qp_user);
/* Completion queues */
@@ -1907,7 +1917,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device,
{
struct ib_cq *cq;
- cq = device->ops.create_cq(device, cq_attr, NULL, NULL);
+ cq = device->ops.create_cq(device, cq_attr, NULL);
if (!IS_ERR(cq)) {
cq->device = device;
@@ -1933,15 +1943,15 @@ int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
}
EXPORT_SYMBOL(rdma_set_cq_moderation);
-int ib_destroy_cq(struct ib_cq *cq)
+int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
if (atomic_read(&cq->usecnt))
return -EBUSY;
rdma_restrack_del(&cq->res);
- return cq->device->ops.destroy_cq(cq);
+ return cq->device->ops.destroy_cq(cq, udata);
}
-EXPORT_SYMBOL(ib_destroy_cq);
+EXPORT_SYMBOL(ib_destroy_cq_user);
int ib_resize_cq(struct ib_cq *cq, int cqe)
{
@@ -1952,14 +1962,14 @@ EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */
-int ib_dereg_mr(struct ib_mr *mr)
+int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
{
struct ib_pd *pd = mr->pd;
struct ib_dm *dm = mr->dm;
int ret;
rdma_restrack_del(&mr->res);
- ret = mr->device->ops.dereg_mr(mr);
+ ret = mr->device->ops.dereg_mr(mr, udata);
if (!ret) {
atomic_dec(&pd->usecnt);
if (dm)
@@ -1968,13 +1978,14 @@ int ib_dereg_mr(struct ib_mr *mr)
return ret;
}
-EXPORT_SYMBOL(ib_dereg_mr);
+EXPORT_SYMBOL(ib_dereg_mr_user);
/**
* ib_alloc_mr() - Allocates a memory region
* @pd: protection domain associated with the region
* @mr_type: memory region type
* @max_num_sg: maximum sg entries available for registration.
+ * @udata: user data or null for kernel objects
*
* Notes:
* Memory registeration page/sg lists must not exceed max_num_sg.
@@ -1982,16 +1993,15 @@ EXPORT_SYMBOL(ib_dereg_mr);
* max_num_sg * used_page_size.
*
*/
-struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct ib_mr *mr;
if (!pd->device->ops.alloc_mr)
return ERR_PTR(-EOPNOTSUPP);
- mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
+ mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata);
if (!IS_ERR(mr)) {
mr->device = pd->device;
mr->pd = pd;
@@ -2005,7 +2015,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
return mr;
}
-EXPORT_SYMBOL(ib_alloc_mr);
+EXPORT_SYMBOL(ib_alloc_mr_user);
/* "Fast" memory regions */
@@ -2138,7 +2148,7 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
if (!device->ops.alloc_xrcd)
return ERR_PTR(-EOPNOTSUPP);
- xrcd = device->ops.alloc_xrcd(device, NULL, NULL);
+ xrcd = device->ops.alloc_xrcd(device, NULL);
if (!IS_ERR(xrcd)) {
xrcd->device = device;
xrcd->inode = NULL;
@@ -2151,7 +2161,7 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
}
EXPORT_SYMBOL(__ib_alloc_xrcd);
-int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
struct ib_qp *qp;
int ret;
@@ -2166,7 +2176,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return ret;
}
- return xrcd->device->ops.dealloc_xrcd(xrcd);
+ return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
}
EXPORT_SYMBOL(ib_dealloc_xrcd);
@@ -2210,10 +2220,11 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
EXPORT_SYMBOL(ib_create_wq);
/**
- * ib_destroy_wq - Destroys the specified WQ.
+ * ib_destroy_wq - Destroys the specified user WQ.
* @wq: The WQ to destroy.
+ * @udata: Valid user data
*/
-int ib_destroy_wq(struct ib_wq *wq)
+int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{
int err;
struct ib_cq *cq = wq->cq;
@@ -2222,7 +2233,7 @@ int ib_destroy_wq(struct ib_wq *wq)
if (atomic_read(&wq->usecnt))
return -EBUSY;
- err = wq->device->ops.destroy_wq(wq);
+ err = wq->device->ops.destroy_wq(wq, udata);
if (!err) {
atomic_dec(&pd->usecnt);
atomic_dec(&cq->usecnt);
@@ -2701,3 +2712,37 @@ int rdma_init_netdev(struct ib_device *device, u8 port_num,
netdev, params.param);
}
EXPORT_SYMBOL(rdma_init_netdev);
+
+void __rdma_block_iter_start(struct ib_block_iter *biter,
+ struct scatterlist *sglist, unsigned int nents,
+ unsigned long pgsz)
+{
+ memset(biter, 0, sizeof(struct ib_block_iter));
+ biter->__sg = sglist;
+ biter->__sg_nents = nents;
+
+ /* Driver provides best block size to use */
+ biter->__pg_bit = __fls(pgsz);
+}
+EXPORT_SYMBOL(__rdma_block_iter_start);
+
+bool __rdma_block_iter_next(struct ib_block_iter *biter)
+{
+ unsigned int block_offset;
+
+ if (!biter->__sg_nents || !biter->__sg)
+ return false;
+
+ biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
+ block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
+ biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
+
+ if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
+ biter->__sg_advance = 0;
+ biter->__sg = sg_next(biter->__sg);
+ biter->__sg_nents--;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__rdma_block_iter_next);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index e4f31c1be8f7..77094be1b262 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
+obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index d25439c305f7..51e8234520a9 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -1,10 +1,10 @@
config INFINIBAND_BNXT_RE
- tristate "Broadcom Netxtreme HCA support"
- depends on 64BIT
- depends on ETHERNET && NETDEVICES && PCI && INET && DCB
- select NET_VENDOR_BROADCOM
- select BNXT
- ---help---
+ tristate "Broadcom Netxtreme HCA support"
+ depends on 64BIT
+ depends on ETHERNET && NETDEVICES && PCI && INET && DCB
+ select NET_VENDOR_BROADCOM
+ select BNXT
+ ---help---
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
RoCE HCAs. To compile this driver as a module, choose M here:
the module will be called bnxt_re.
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 071b2fc38b0b..2c3685faa57a 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -119,21 +119,6 @@ static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
}
/* Device */
-struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct net_device *netdev = NULL;
-
- rcu_read_lock();
- if (rdev)
- netdev = rdev->netdev;
- if (netdev)
- dev_hold(netdev);
-
- rcu_read_unlock();
- return netdev;
-}
-
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata)
@@ -375,8 +360,9 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
- if ((attr->ndev) && is_vlan_dev(attr->ndev))
- vlan_id = vlan_dev_vlan_id(attr->ndev);
+ rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
+ if (rc)
+ return rc;
rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
rdev->qplib_res.netdev->dev_addr,
@@ -564,7 +550,7 @@ fail:
}
/* Protection Domains */
-void bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
+void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
@@ -576,14 +562,12 @@ void bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
&pd->qplib_pd);
}
-int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext,
- struct ib_udata *udata)
+int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ibdev = ibpd->device;
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_re_ucontext *ucntx = container_of(ucontext,
- struct bnxt_re_ucontext,
- ib_uctx);
+ struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
+ udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
int rc;
@@ -635,20 +619,13 @@ fail:
}
/* Address Handles */
-int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
+void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
{
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
struct bnxt_re_dev *rdev = ah->rdev;
- int rc;
- rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
- !(flags & RDMA_DESTROY_AH_SLEEPABLE));
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
- return rc;
- }
- kfree(ah);
- return 0;
+ bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
+ !(flags & RDMA_DESTROY_AH_SLEEPABLE));
}
static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
@@ -669,26 +646,22 @@ static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
return nw_type;
}
-struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
- struct ib_udata *udata)
+int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
+ u32 flags, struct ib_udata *udata)
{
+ struct ib_pd *ib_pd = ib_ah->pd;
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
struct bnxt_re_dev *rdev = pd->rdev;
const struct ib_gid_attr *sgid_attr;
- struct bnxt_re_ah *ah;
+ struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
u8 nw_type;
int rc;
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
ah->rdev = rdev;
ah->qplib_ah.pd = &pd->qplib_pd;
@@ -718,7 +691,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
!(flags & RDMA_CREATE_AH_SLEEPABLE));
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
- goto fail;
+ return rc;
}
/* Write AVID to shared page. */
@@ -735,11 +708,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
spin_unlock_irqrestore(&uctx->sh_lock, flag);
}
- return &ah->ib_ah;
-
-fail:
- kfree(ah);
- return ERR_PTR(rc);
+ return 0;
}
int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
@@ -789,7 +758,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
}
/* Queue Pairs */
-int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
+int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
@@ -812,13 +781,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
- rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
- &rdev->sqp_ah->qplib_ah, false);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to destroy HW AH for shadow QP");
- return rc;
- }
+ bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah,
+ false);
bnxt_qplib_clean_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
@@ -895,8 +859,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
return PTR_ERR(umem);
qp->sumem = umem;
- qplib_qp->sq.sglist = umem->sg_head.sgl;
- qplib_qp->sq.nmap = umem->nmap;
+ qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl;
+ qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
+ qplib_qp->sq.sg_info.nmap = umem->nmap;
qplib_qp->qp_handle = ureq.qp_handle;
if (!qp->qplib_qp.srq) {
@@ -907,8 +872,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
- qplib_qp->rq.sglist = umem->sg_head.sgl;
- qplib_qp->rq.nmap = umem->nmap;
+ qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
+ qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
+ qplib_qp->rq.sg_info.nmap = umem->nmap;
}
qplib_qp->dpi = &cntx->dpi;
@@ -916,8 +882,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
rqfail:
ib_umem_release(qp->sumem);
qp->sumem = NULL;
- qplib_qp->sq.sglist = NULL;
- qplib_qp->sq.nmap = 0;
+ memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
return PTR_ERR(umem);
}
@@ -1326,30 +1291,22 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
}
/* Shared Receive Queues */
-int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
+void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
{
struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
ib_srq);
struct bnxt_re_dev *rdev = srq->rdev;
struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
struct bnxt_qplib_nq *nq = NULL;
- int rc;
if (qplib_srq->cq)
nq = qplib_srq->cq->nq;
- rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
- return rc;
- }
-
+ bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
if (srq->umem)
ib_umem_release(srq->umem);
- kfree(srq);
atomic_dec(&rdev->srq_count);
if (nq)
nq->budget--;
- return 0;
}
static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
@@ -1374,22 +1331,25 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
return PTR_ERR(umem);
srq->umem = umem;
- qplib_srq->nmap = umem->nmap;
- qplib_srq->sglist = umem->sg_head.sgl;
+ qplib_srq->sg_info.sglist = umem->sg_head.sgl;
+ qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
+ qplib_srq->sg_info.nmap = umem->nmap;
qplib_srq->srq_handle = ureq.srq_handle;
qplib_srq->dpi = &cntx->dpi;
return 0;
}
-struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
+int bnxt_re_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
{
+ struct ib_pd *ib_pd = ib_srq->pd;
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_srq *srq;
+ struct bnxt_re_srq *srq =
+ container_of(ib_srq, struct bnxt_re_srq, ib_srq);
struct bnxt_qplib_nq *nq = NULL;
int rc, entries;
@@ -1404,11 +1364,6 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
goto exit;
}
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq) {
- rc = -ENOMEM;
- goto exit;
- }
srq->rdev = rdev;
srq->qplib_srq.pd = &pd->qplib_pd;
srq->qplib_srq.dpi = &rdev->dpi_privileged;
@@ -1454,14 +1409,13 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
nq->budget++;
atomic_inc(&rdev->srq_count);
- return &srq->ib_srq;
+ return 0;
fail:
if (srq->umem)
ib_umem_release(srq->umem);
- kfree(srq);
exit:
- return ERR_PTR(rc);
+ return rc;
}
int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
@@ -1684,8 +1638,11 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->ah_attr.roce.dmac);
sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
- memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr,
- ETH_ALEN);
+ rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
+ &qp->qplib_qp.smac[0]);
+ if (rc)
+ return rc;
+
nw_type = rdma_gid_attr_network_type(sgid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV4:
@@ -1904,8 +1861,10 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
- if (is_vlan_dev(sgid_attr->ndev))
- vlan_id = vlan_dev_vlan_id(sgid_attr->ndev);
+ rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
+ if (rc)
+ return rc;
+
/* Get network header type for this GID */
nw_type = rdma_gid_attr_network_type(sgid_attr);
switch (nw_type) {
@@ -2558,7 +2517,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
}
/* Completion Queues */
-int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
+int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
int rc;
struct bnxt_re_cq *cq;
@@ -2587,7 +2546,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
@@ -2614,12 +2572,10 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
if (entries > dev_attr->max_cq_wqes + 1)
entries = dev_attr->max_cq_wqes + 1;
- if (context) {
+ if (udata) {
struct bnxt_re_cq_req req;
- struct bnxt_re_ucontext *uctx = container_of
- (context,
- struct bnxt_re_ucontext,
- ib_uctx);
+ struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct bnxt_re_ucontext, ib_uctx);
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
rc = -EFAULT;
goto fail;
@@ -2632,8 +2588,9 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
rc = PTR_ERR(cq->umem);
goto fail;
}
- cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
- cq->qplib_cq.nmap = cq->umem->nmap;
+ cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
+ cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
+ cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
cq->qplib_cq.dpi = &uctx->dpi;
} else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
@@ -2645,8 +2602,6 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
}
cq->qplib_cq.dpi = &rdev->dpi_privileged;
- cq->qplib_cq.sghead = NULL;
- cq->qplib_cq.nmap = 0;
}
/*
* Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
@@ -2671,7 +2626,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
atomic_inc(&rdev->cq_count);
spin_lock_init(&cq->cq_lock);
- if (context) {
+ if (udata) {
struct bnxt_re_cq_resp resp;
resp.cqid = cq->qplib_cq.id;
@@ -2689,7 +2644,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
return &cq->ib_cq;
c2fail:
- if (context)
+ if (udata)
ib_umem_release(cq->umem);
fail:
kfree(cq->cql);
@@ -3381,7 +3336,7 @@ fail:
return ERR_PTR(rc);
}
-int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
+int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
struct bnxt_re_dev *rdev = mr->rdev;
@@ -3427,7 +3382,7 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
}
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
- u32 max_num_sg)
+ u32 max_num_sg, struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
@@ -3552,17 +3507,12 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
int page_shift)
{
u64 *pbl_tbl = pbl_tbl_orig;
- u64 paddr;
- u64 page_mask = (1ULL << page_shift) - 1;
- struct sg_dma_page_iter sg_iter;
+ u64 page_size = BIT_ULL(page_shift);
+ struct ib_block_iter biter;
+
+ rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
+ *pbl_tbl++ = rdma_block_iter_dma_address(&biter);
- for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- paddr = sg_page_iter_dma_address(&sg_iter);
- if (pbl_tbl == pbl_tbl_orig)
- *pbl_tbl++ = paddr & ~page_mask;
- else if ((paddr & page_mask) == 0)
- *pbl_tbl++ = paddr;
- }
return pbl_tbl - pbl_tbl_orig;
}
@@ -3624,7 +3574,9 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
goto free_umem;
}
- page_shift = PAGE_SHIFT;
+ page_shift = __ffs(ib_umem_find_best_pgsz(umem,
+ BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
+ virt_addr));
if (!bnxt_re_page_size_ok(page_shift)) {
dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
@@ -3632,17 +3584,13 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
goto fail;
}
- if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
+ if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
+ length > BNXT_RE_MAX_MR_SIZE_LOW) {
dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
rc = -EINVAL;
goto fail;
}
- if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
- page_shift = BNXT_RE_PAGE_SHIFT_2M;
- dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
- 1 << page_shift);
- }
/* Map umem buf ptrs to the PBL */
umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
@@ -3709,7 +3657,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
resp.chip_id0 = chip_met_rev_num;
/* Future extension of chip info */
resp.chip_id1 = 0;
- /*Temp, Use idr_alloc instead */
+ /*Temp, Use xa_alloc instead */
resp.dev_id = rdev->en_dev->pdev->devfn;
resp.max_qp = rdev->qplib_ctx.qpc_count;
resp.pg_size = PAGE_SIZE;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index e45465ed4eee..09a33049e42f 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -63,15 +63,15 @@ struct bnxt_re_pd {
};
struct bnxt_re_ah {
- struct bnxt_re_dev *rdev;
struct ib_ah ib_ah;
+ struct bnxt_re_dev *rdev;
struct bnxt_qplib_ah qplib_ah;
};
struct bnxt_re_srq {
+ struct ib_srq ib_srq;
struct bnxt_re_dev *rdev;
u32 srq_limit;
- struct ib_srq ib_srq;
struct bnxt_qplib_srq qplib_srq;
struct ib_umem *umem;
spinlock_t lock; /* protect srq */
@@ -142,8 +142,6 @@ struct bnxt_re_ucontext {
spinlock_t sh_lock; /* protect shpg */
};
-struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num);
-
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
@@ -163,24 +161,21 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num);
-int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata);
-void bnxt_re_dealloc_pd(struct ib_pd *pd);
-struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
- struct ib_udata *udata);
+int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+ struct ib_udata *udata);
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
-int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
-struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
+void bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
+int bnxt_re_create_srq(struct ib_srq *srq,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-int bnxt_re_destroy_srq(struct ib_srq *srq);
+void bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
@@ -190,16 +185,15 @@ int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
-int bnxt_re_destroy_qp(struct ib_qp *qp);
+int bnxt_re_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
const struct ib_send_wr **bad_send_wr);
int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata);
-int bnxt_re_destroy_cq(struct ib_cq *cq);
+int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
@@ -207,8 +201,8 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
- u32 max_num_sg);
-int bnxt_re_dereg_mr(struct ib_mr *mr);
+ u32 max_num_sg, struct ib_udata *udata);
+int bnxt_re_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_udata *udata);
int bnxt_re_dealloc_mw(struct ib_mw *mw);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 2bd24ac45ee4..814f959c7db9 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -617,7 +617,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.get_dma_mr = bnxt_re_get_dma_mr,
.get_hw_stats = bnxt_re_ib_get_hw_stats,
.get_link_layer = bnxt_re_get_link_layer,
- .get_netdev = bnxt_re_get_netdev,
.get_port_immutable = bnxt_re_get_port_immutable,
.map_mr_sg = bnxt_re_map_mr_sg,
.mmap = bnxt_re_mmap,
@@ -637,13 +636,16 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.query_srq = bnxt_re_query_srq,
.reg_user_mr = bnxt_re_reg_user_mr,
.req_notify_cq = bnxt_re_req_notify_cq,
+ INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
+ INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
};
static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
{
struct ib_device *ibdev = &rdev->ibdev;
+ int ret;
/* ib device init */
ibdev->owner = THIS_MODULE;
@@ -691,6 +693,10 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
ibdev->driver_id = RDMA_DRIVER_BNXT_RE;
ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
+ ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
+ if (ret)
+ return ret;
+
return ib_register_device(ibdev, "bnxt_re%d");
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 71c34d5b0ac0..958c1ff9c515 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -478,7 +478,7 @@ int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
hwq_type = bnxt_qplib_get_hwq_type(nq->res);
- if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
+ if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL,
&nq->hwq.max_elements,
BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
PAGE_SIZE, hwq_type))
@@ -507,7 +507,7 @@ static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
writeq(val, db);
}
-int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
@@ -521,14 +521,12 @@ int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
/* Configure the request */
req.srq_cid = cpu_to_le32(srq->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
+ (struct creq_base *)&resp, NULL, 0);
+ kfree(srq->swq);
if (rc)
- return rc;
-
+ return;
bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
- kfree(srq->swq);
- return 0;
}
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
@@ -542,8 +540,8 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
int rc, idx;
srq->hwq.max_elements = srq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
- srq->nmap, &srq->hwq.max_elements,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, &srq->sg_info,
+ &srq->hwq.max_elements,
BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE);
if (rc)
@@ -742,7 +740,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* SQ */
sq->hwq.max_elements = sq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL,
&sq->hwq.max_elements,
BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE);
@@ -781,7 +779,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (rq->max_wqe) {
rq->hwq.max_elements = qp->rq.max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL,
&rq->hwq.max_elements,
BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE);
@@ -890,8 +888,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
sizeof(struct sq_psn_search);
}
sq->hwq.max_elements = sq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
- sq->nmap, &sq->hwq.max_elements,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, &sq->sg_info,
+ &sq->hwq.max_elements,
BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
psn_sz,
PAGE_SIZE, HWQ_TYPE_QUEUE);
@@ -959,8 +957,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (rq->max_wqe) {
rq->hwq.max_elements = rq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
- rq->nmap, &rq->hwq.max_elements,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq,
+ &rq->sg_info,
+ &rq->hwq.max_elements,
BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE);
if (rc)
@@ -1030,7 +1029,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req_size = xrrq->max_elements *
BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
req_size &= ~(PAGE_SIZE - 1);
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
&xrrq->max_elements,
BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
0, req_size, HWQ_TYPE_CTX);
@@ -1046,7 +1045,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
req_size &= ~(PAGE_SIZE - 1);
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
&xrrq->max_elements,
BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
0, req_size, HWQ_TYPE_CTX);
@@ -1935,8 +1934,8 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
int rc;
cq->hwq.max_elements = cq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
- cq->nmap, &cq->hwq.max_elements,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, &cq->sg_info,
+ &cq->hwq.max_elements,
BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE);
if (rc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 3f618b5f1f06..99e0a13cbefa 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -52,10 +52,9 @@ struct bnxt_qplib_srq {
struct bnxt_qplib_cq *cq;
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq;
- struct scatterlist *sglist;
int start_idx;
int last_idx;
- u32 nmap;
+ struct bnxt_qplib_sg_info sg_info;
u16 eventq_hw_ring_id;
spinlock_t lock; /* protect SRQE link list */
};
@@ -237,8 +236,7 @@ struct bnxt_qplib_swqe {
struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq;
- struct scatterlist *sglist;
- u32 nmap;
+ struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
u16 q_full_delta;
u16 max_sge;
@@ -381,8 +379,7 @@ struct bnxt_qplib_cq {
u32 cnq_hw_ring_id;
struct bnxt_qplib_nq *nq;
bool resize_in_progress;
- struct scatterlist *sghead;
- u32 nmap;
+ struct bnxt_qplib_sg_info sg_info;
u64 cq_handle;
#define CQ_RESIZE_WAIT_TIME_MS 500
@@ -521,8 +518,8 @@ int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
-int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
- struct bnxt_qplib_srq *srq);
+void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_swqe *wqe);
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index c6461e957078..48b04d2f175f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -569,7 +569,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
rcfw->pdev = pdev;
rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
hwq_type = bnxt_qplib_get_hwq_type(rcfw->res);
- if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
+ if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL,
&rcfw->creq.max_elements,
BNXT_QPLIB_CREQE_UNITS,
0, PAGE_SIZE, hwq_type)) {
@@ -584,7 +584,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
rcfw->cmdq.max_elements = rcfw->cmdq_depth;
if (bnxt_qplib_alloc_init_hwq
- (rcfw->pdev, &rcfw->cmdq, NULL, 0,
+ (rcfw->pdev, &rcfw->cmdq, NULL,
&rcfw->cmdq.max_elements,
BNXT_QPLIB_CMDQE_UNITS, 0,
bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth),
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 0bc24f934829..37928b1111df 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -83,7 +83,8 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
}
static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
- struct scatterlist *sghead, u32 pages, u32 pg_size)
+ struct scatterlist *sghead, u32 pages,
+ u32 nmaps, u32 pg_size)
{
struct sg_dma_page_iter sg_iter;
bool is_umem = false;
@@ -116,7 +117,7 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
} else {
i = 0;
is_umem = true;
- for_each_sg_dma_page (sghead, &sg_iter, pages, 0) {
+ for_each_sg_dma_page(sghead, &sg_iter, nmaps, 0) {
pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
pbl->pg_arr[i] = NULL;
pbl->pg_count++;
@@ -158,12 +159,13 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
/* All HWQs are power of 2 in size */
int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
- struct scatterlist *sghead, int nmap,
+ struct bnxt_qplib_sg_info *sg_info,
u32 *elements, u32 element_size, u32 aux,
u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
{
- u32 pages, slots, size, aux_pages = 0, aux_size = 0;
+ u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0;
dma_addr_t *src_phys_ptr, **dst_virt_ptr;
+ struct scatterlist *sghead = NULL;
int i, rc;
hwq->level = PBL_LVL_MAX;
@@ -177,6 +179,9 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
}
size = roundup_pow_of_two(element_size);
+ if (sg_info)
+ sghead = sg_info->sglist;
+
if (!sghead) {
hwq->is_user = false;
pages = (slots * size) / pg_size + aux_pages;
@@ -184,17 +189,20 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
pages++;
if (!pages)
return -EINVAL;
+ maps = 0;
} else {
hwq->is_user = true;
- pages = nmap;
+ pages = sg_info->npages;
+ maps = sg_info->nmap;
}
/* Alloc the 1st memory block; can be a PDL/PTL/PBL */
if (sghead && (pages == MAX_PBL_LVL_0_PGS))
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
- pages, pg_size);
+ pages, maps, pg_size);
else
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
+ rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL,
+ 1, 0, pg_size);
if (rc)
goto fail;
@@ -204,7 +212,8 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
if (pages > MAX_PBL_LVL_1_PGS) {
/* 2 levels of indirection */
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
- MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
+ MAX_PBL_LVL_1_PGS_FOR_LVL_2,
+ 0, pg_size);
if (rc)
goto fail;
/* Fill in lvl0 PBL */
@@ -217,7 +226,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
hwq->level = PBL_LVL_1;
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
- pages, pg_size);
+ pages, maps, pg_size);
if (rc)
goto fail;
@@ -246,7 +255,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
/* 1 level of indirection */
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
- pages, pg_size);
+ pages, maps, pg_size);
if (rc)
goto fail;
/* Fill in lvl0 PBL */
@@ -339,7 +348,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* QPC Tables */
ctx->qpc_tbl.max_elements = ctx->qpc_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL,
&ctx->qpc_tbl.max_elements,
BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
@@ -348,7 +357,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* MRW Tables */
ctx->mrw_tbl.max_elements = ctx->mrw_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL,
&ctx->mrw_tbl.max_elements,
BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
@@ -357,7 +366,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* SRQ Tables */
ctx->srqc_tbl.max_elements = ctx->srqc_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL,
&ctx->srqc_tbl.max_elements,
BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
@@ -366,7 +375,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* CQ Tables */
ctx->cq_tbl.max_elements = ctx->cq_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL,
&ctx->cq_tbl.max_elements,
BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
@@ -375,7 +384,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* TQM Buffer */
ctx->tqm_pde.max_elements = 512;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL,
&ctx->tqm_pde.max_elements, sizeof(u64),
0, PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
@@ -386,7 +395,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
continue;
ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
ctx->tqm_count[i];
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL,
&ctx->tqm_tbl[i].max_elements, 1,
0, PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
@@ -424,7 +433,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* TIM Buffer */
ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL,
&ctx->tim_tbl.max_elements, 1,
0, PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 32cebd0f1436..30c42c92fac7 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -219,6 +219,12 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
}
+struct bnxt_qplib_sg_info {
+ struct scatterlist *sglist;
+ u32 nmap;
+ u32 npages;
+};
+
#define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member)
@@ -227,7 +233,7 @@ struct bnxt_qplib_dev_attr;
void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq);
int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
- struct scatterlist *sl, int nmap, u32 *elements,
+ struct bnxt_qplib_sg_info *sg_info, u32 *elements,
u32 elements_per_page, u32 aux, u32 pg_size,
enum bnxt_qplib_hwq_type hwq_type);
void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index e9c53e406404..48793d3512ac 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -532,25 +532,21 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
return 0;
}
-int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
- bool block)
+void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_ah req;
struct creq_destroy_ah_resp resp;
u16 cmd_flags = 0;
- int rc;
/* Clean up the AH table in the device */
RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
req.ah_cid = cpu_to_le32(ah->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
- NULL, block);
- if (rc)
- return rc;
- return 0;
+ bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
+ block);
}
/* MRW */
@@ -684,7 +680,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
mr->hwq.max_elements = pages;
/* Use system PAGE_SIZE */
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL,
&mr->hwq.max_elements,
PAGE_SIZE, 0, PAGE_SIZE,
HWQ_TYPE_CTX);
@@ -754,7 +750,7 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
return -ENOMEM;
frpl->hwq.max_elements = pages;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL, 0,
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL,
&frpl->hwq.max_elements, PAGE_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
if (!rc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 39454b3f738d..0ec3b12b0bcd 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -243,8 +243,8 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bool block);
-int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
- bool block);
+void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block);
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mrw);
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 83d2e19d31ae..53aa5c36247a 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -64,7 +64,7 @@ enum t3_wr_flags {
T3_SOLICITED_EVENT_FLAG = 0x04,
T3_READ_FENCE_FLAG = 0x08,
T3_LOCAL_FENCE_FLAG = 0x10
-} __attribute__ ((packed));
+} __packed;
enum t3_wr_opcode {
T3_WR_BP = FW_WROPCODE_RI_BYPASS,
@@ -77,7 +77,7 @@ enum t3_wr_opcode {
T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
-} __attribute__ ((packed));
+} __packed;
enum t3_rdma_opcode {
T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
@@ -95,7 +95,7 @@ enum t3_rdma_opcode {
T3_QP_MOD,
T3_BYPASS,
T3_RDMA_READ_REQ_WITH_INV,
-} __attribute__ ((packed));
+} __packed;
static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
{
@@ -306,7 +306,7 @@ enum t3_mpa_attrs {
uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
uP_RI_MPA_CRC_ENABLE = 0x4,
uP_RI_MPA_IETF_ENABLE = 0x8
-} __attribute__ ((packed));
+} __packed;
enum t3_qp_caps {
uP_RI_QP_RDMA_READ_ENABLE = 0x01,
@@ -314,7 +314,7 @@ enum t3_qp_caps {
uP_RI_QP_BIND_ENABLE = 0x04,
uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
uP_RI_QP_STAG0_ENABLE = 0x10
-} __attribute__ ((packed));
+} __packed;
enum rdma_init_rtr_types {
RTR_READ = 1,
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index fb03bc492ef7..56a8ab6210cf 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -62,37 +62,30 @@ struct cxgb3_client t3c_client = {
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(dev_mutex);
-static int disable_qp_db(int id, void *p, void *data)
-{
- struct iwch_qp *qhp = p;
-
- cxio_disable_wq_db(&qhp->wq);
- return 0;
-}
-
-static int enable_qp_db(int id, void *p, void *data)
-{
- struct iwch_qp *qhp = p;
-
- if (data)
- ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
- cxio_enable_wq_db(&qhp->wq);
- return 0;
-}
-
static void disable_dbs(struct iwch_dev *rnicp)
{
- spin_lock_irq(&rnicp->lock);
- idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
- spin_unlock_irq(&rnicp->lock);
+ unsigned long index;
+ struct iwch_qp *qhp;
+
+ xa_lock_irq(&rnicp->qps);
+ xa_for_each(&rnicp->qps, index, qhp)
+ cxio_disable_wq_db(&qhp->wq);
+ xa_unlock_irq(&rnicp->qps);
}
static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
{
- spin_lock_irq(&rnicp->lock);
- idr_for_each(&rnicp->qpidr, enable_qp_db,
- (void *)(unsigned long)ring_db);
- spin_unlock_irq(&rnicp->lock);
+ unsigned long index;
+ struct iwch_qp *qhp;
+
+ xa_lock_irq(&rnicp->qps);
+ xa_for_each(&rnicp->qps, index, qhp) {
+ if (ring_db)
+ ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell,
+ qhp->wq.qpid);
+ cxio_enable_wq_db(&qhp->wq);
+ }
+ xa_unlock_irq(&rnicp->qps);
}
static void iwch_db_drop_task(struct work_struct *work)
@@ -105,10 +98,9 @@ static void iwch_db_drop_task(struct work_struct *work)
static void rnic_init(struct iwch_dev *rnicp)
{
pr_debug("%s iwch_dev %p\n", __func__, rnicp);
- idr_init(&rnicp->cqidr);
- idr_init(&rnicp->qpidr);
- idr_init(&rnicp->mmidr);
- spin_lock_init(&rnicp->lock);
+ xa_init_flags(&rnicp->cqs, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&rnicp->qps, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&rnicp->mrs, XA_FLAGS_LOCK_IRQ);
INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
@@ -190,9 +182,9 @@ static void close_rnic_dev(struct t3cdev *tdev)
list_del(&dev->entry);
iwch_unregister_device(dev);
cxio_rdev_close(&dev->rdev);
- idr_destroy(&dev->cqidr);
- idr_destroy(&dev->qpidr);
- idr_destroy(&dev->mmidr);
+ WARN_ON(!xa_empty(&dev->cqs));
+ WARN_ON(!xa_empty(&dev->qps));
+ WARN_ON(!xa_empty(&dev->mrs));
ib_dealloc_device(&dev->ibdev);
break;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index c69bc4f52049..310a937bffcf 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -35,7 +35,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/workqueue.h>
#include <rdma/ib_verbs.h>
@@ -106,10 +106,9 @@ struct iwch_dev {
struct cxio_rdev rdev;
u32 device_cap_flags;
struct iwch_rnic_attributes attr;
- struct idr cqidr;
- struct idr qpidr;
- struct idr mmidr;
- spinlock_t lock;
+ struct xarray cqs;
+ struct xarray qps;
+ struct xarray mrs;
struct list_head entry;
struct delayed_work db_drop_task;
};
@@ -136,40 +135,17 @@ static inline int t3a_device(const struct iwch_dev *rhp)
static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
{
- return idr_find(&rhp->cqidr, cqid);
+ return xa_load(&rhp->cqs, cqid);
}
static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
{
- return idr_find(&rhp->qpidr, qpid);
+ return xa_load(&rhp->qps, qpid);
}
static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
{
- return idr_find(&rhp->mmidr, mmid);
-}
-
-static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
- void *handle, u32 id)
-{
- int ret;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&rhp->lock);
-
- ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
-
- spin_unlock_irq(&rhp->lock);
- idr_preload_end();
-
- return ret < 0 ? ret : 0;
-}
-
-static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
-{
- spin_lock_irq(&rhp->lock);
- idr_remove(idr, id);
- spin_unlock_irq(&rhp->lock);
+ return xa_load(&rhp->mrs, mmid);
}
extern struct cxgb3_client t3c_client;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 4a0c82a8fb60..9d356c1301c7 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -48,14 +48,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
struct iwch_qp *qhp;
unsigned long flag;
- spin_lock(&rnicp->lock);
- qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
+ xa_lock(&rnicp->qps);
+ qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
if (!qhp) {
pr_err("%s unaffiliated error 0x%x qpid 0x%x\n",
__func__, CQE_STATUS(rsp_msg->cqe),
CQE_QPID(rsp_msg->cqe));
- spin_unlock(&rnicp->lock);
+ xa_unlock(&rnicp->qps);
return;
}
@@ -65,7 +65,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
__func__,
qhp->attr.state, qhp->wq.qpid,
CQE_STATUS(rsp_msg->cqe));
- spin_unlock(&rnicp->lock);
+ xa_unlock(&rnicp->qps);
return;
}
@@ -76,7 +76,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
atomic_inc(&qhp->refcnt);
- spin_unlock(&rnicp->lock);
+ xa_unlock(&rnicp->qps);
if (qhp->attr.state == IWCH_QP_STATE_RTS) {
attrs.next_state = IWCH_QP_STATE_TERMINATE;
@@ -114,21 +114,21 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
unsigned long flag;
rnicp = (struct iwch_dev *) rdev_p->ulp;
- spin_lock(&rnicp->lock);
+ xa_lock(&rnicp->qps);
chp = get_chp(rnicp, cqid);
- qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
+ qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
if (!chp || !qhp) {
pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
cqid, CQE_QPID(rsp_msg->cqe),
CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
CQE_WRID_LOW(rsp_msg->cqe));
- spin_unlock(&rnicp->lock);
+ xa_unlock(&rnicp->qps);
goto out;
}
iwch_qp_add_ref(&qhp->ibqp);
atomic_inc(&chp->refcnt);
- spin_unlock(&rnicp->lock);
+ xa_unlock(&rnicp->qps);
/*
* 1) completion of our sending a TERMINATE.
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index 12886b1b4b10..ce0f2741821d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -49,7 +49,7 @@ static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
- return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
+ return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
}
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 4accf7b3dcf2..3a481dfb1607 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -88,14 +88,14 @@ static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
return 0;
}
-static int iwch_destroy_cq(struct ib_cq *ib_cq)
+static int iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct iwch_cq *chp;
pr_debug("%s ib_cq %p\n", __func__, ib_cq);
chp = to_iwch_cq(ib_cq);
- remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
+ xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
atomic_dec(&chp->refcnt);
wait_event(chp->wait, !atomic_read(&chp->refcnt));
@@ -106,7 +106,6 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_context,
struct ib_udata *udata)
{
int entries = attr->cqe;
@@ -114,7 +113,6 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
struct iwch_cq *chp;
struct iwch_create_cq_resp uresp;
struct iwch_create_cq_req ureq;
- struct iwch_ucontext *ucontext = NULL;
static int warned;
size_t resplen;
@@ -127,8 +125,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
if (!chp)
return ERR_PTR(-ENOMEM);
- if (ib_context) {
- ucontext = to_iwch_ucontext(ib_context);
+ if (udata) {
if (!t3a_device(rhp)) {
if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
kfree(chp);
@@ -154,7 +151,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
entries = roundup_pow_of_two(entries);
chp->cq.size_log2 = ilog2(entries);
- if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
+ if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata)) {
kfree(chp);
return ERR_PTR(-ENOMEM);
}
@@ -164,18 +161,20 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
- if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
+ if (xa_store_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL)) {
cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
kfree(chp);
return ERR_PTR(-ENOMEM);
}
- if (ucontext) {
+ if (udata) {
struct iwch_mm_entry *mm;
+ struct iwch_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct iwch_ucontext, ibucontext);
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm) {
- iwch_destroy_cq(&chp->ibcq);
+ iwch_destroy_cq(&chp->ibcq, udata);
return ERR_PTR(-ENOMEM);
}
uresp.cqid = chp->cq.cqid;
@@ -201,7 +200,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
}
if (ib_copy_to_udata(udata, &uresp, resplen)) {
kfree(mm);
- iwch_destroy_cq(&chp->ibcq);
+ iwch_destroy_cq(&chp->ibcq, udata);
return ERR_PTR(-EFAULT);
}
insert_mmap(ucontext, mm);
@@ -367,7 +366,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return ret;
}
-static void iwch_deallocate_pd(struct ib_pd *pd)
+static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
@@ -378,8 +377,7 @@ static void iwch_deallocate_pd(struct ib_pd *pd)
cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
}
-static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata)
+static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct iwch_pd *php = to_iwch_pd(pd);
struct ib_device *ibdev = pd->device;
@@ -394,11 +392,11 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
php->pdid = pdid;
php->rhp = rhp;
- if (context) {
+ if (udata) {
struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- iwch_deallocate_pd(&php->ibpd);
+ iwch_deallocate_pd(&php->ibpd, udata);
return -EFAULT;
}
}
@@ -406,7 +404,7 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
return 0;
}
-static int iwch_dereg_mr(struct ib_mr *ib_mr)
+static int iwch_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_mr *mhp;
@@ -421,7 +419,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr);
iwch_free_pbl(mhp);
- remove_handle(rhp, &rhp->mmidr, mmid);
+ xa_erase_irq(&rhp->mrs, mmid);
if (mhp->kva)
kfree((void *) (unsigned long) mhp->kva);
if (mhp->umem)
@@ -539,7 +537,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = PAGE_SHIFT;
- n = mhp->umem->nmap;
+ n = ib_umem_num_pages(mhp->umem);
err = iwch_alloc_pbl(mhp, n);
if (err)
@@ -590,7 +588,7 @@ pbl_done:
uresp.pbl_addr);
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
- iwch_dereg_mr(&mhp->ibmr);
+ iwch_dereg_mr(&mhp->ibmr, udata);
err = -EFAULT;
goto err;
}
@@ -636,7 +634,7 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
mhp->attr.stag = stag;
mmid = (stag) >> 8;
mhp->ibmw.rkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
kfree(mhp);
return ERR_PTR(-ENOMEM);
@@ -655,15 +653,14 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
rhp = mhp->rhp;
mmid = (mw->rkey) >> 8;
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- remove_handle(rhp, &rhp->mmidr, mmid);
+ xa_erase_irq(&rhp->mrs, mmid);
pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
kfree(mhp);
return 0;
}
-static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
@@ -701,7 +698,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
mhp->attr.state = 1;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+ ret = xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL);
if (ret)
goto err3;
@@ -742,7 +739,7 @@ static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
}
-static int iwch_destroy_qp(struct ib_qp *ib_qp)
+static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_qp *qhp;
@@ -756,13 +753,13 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp)
iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
- remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
+ xa_erase_irq(&rhp->qps, qhp->wq.qpid);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
- ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
- : NULL;
+ ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
+ ibucontext);
cxio_destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
@@ -872,7 +869,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
- if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
+ if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) {
cxio_destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
kfree(qhp);
@@ -885,14 +882,14 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
if (!mm1) {
- iwch_destroy_qp(&qhp->ibqp);
+ iwch_destroy_qp(&qhp->ibqp, udata);
return ERR_PTR(-ENOMEM);
}
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2) {
kfree(mm1);
- iwch_destroy_qp(&qhp->ibqp);
+ iwch_destroy_qp(&qhp->ibqp, udata);
return ERR_PTR(-ENOMEM);
}
@@ -909,7 +906,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
kfree(mm1);
kfree(mm2);
- iwch_destroy_qp(&qhp->ibqp);
+ iwch_destroy_qp(&qhp->ibqp, udata);
return ERR_PTR(-EFAULT);
}
mm1->key = uresp.key;
@@ -1324,6 +1321,14 @@ static const struct ib_device_ops iwch_dev_ops = {
.get_dma_mr = iwch_get_dma_mr,
.get_hw_stats = iwch_get_mib,
.get_port_immutable = iwch_port_immutable,
+ .iw_accept = iwch_accept_cr,
+ .iw_add_ref = iwch_qp_add_ref,
+ .iw_connect = iwch_connect,
+ .iw_create_listen = iwch_create_listen,
+ .iw_destroy_listen = iwch_destroy_listen,
+ .iw_get_qp = iwch_get_qp,
+ .iw_reject = iwch_reject_cr,
+ .iw_rem_ref = iwch_qp_rem_ref,
.map_mr_sg = iwch_map_mr_sg,
.mmap = iwch_mmap,
.modify_qp = iwch_ib_modify_qp,
@@ -1343,8 +1348,6 @@ static const struct ib_device_ops iwch_dev_ops = {
int iwch_register_device(struct iwch_dev *dev)
{
- int ret;
-
pr_debug("%s iwch_dev %p\n", __func__, dev);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
@@ -1382,34 +1385,18 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
- dev->ibdev.iwcm = kzalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
- if (!dev->ibdev.iwcm)
- return -ENOMEM;
-
- dev->ibdev.iwcm->connect = iwch_connect;
- dev->ibdev.iwcm->accept = iwch_accept_cr;
- dev->ibdev.iwcm->reject = iwch_reject_cr;
- dev->ibdev.iwcm->create_listen = iwch_create_listen;
- dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
- dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
- dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
- dev->ibdev.iwcm->get_qp = iwch_get_qp;
- memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
- sizeof(dev->ibdev.iwcm->ifname));
+ memcpy(dev->ibdev.iw_ifname, dev->rdev.t3cdev_p->lldev->name,
+ sizeof(dev->ibdev.iw_ifname));
dev->ibdev.driver_id = RDMA_DRIVER_CXGB3;
rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
- ret = ib_register_device(&dev->ibdev, "cxgb3_%d");
- if (ret)
- kfree(dev->ibdev.iwcm);
- return ret;
+ return ib_register_device(&dev->ibdev, "cxgb3_%d");
}
void iwch_unregister_device(struct iwch_dev *dev)
{
pr_debug("%s iwch_dev %p\n", __func__, dev);
ib_unregister_device(&dev->ibdev);
- kfree(dev->ibdev.iwcm);
return;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 4d232bdf9e97..0f3b1193d5f8 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -331,20 +331,23 @@ static void remove_ep_tid(struct c4iw_ep *ep)
{
unsigned long flags;
- spin_lock_irqsave(&ep->com.dev->lock, flags);
- _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
- if (idr_is_empty(&ep->com.dev->hwtid_idr))
+ xa_lock_irqsave(&ep->com.dev->hwtids, flags);
+ __xa_erase(&ep->com.dev->hwtids, ep->hwtid);
+ if (xa_empty(&ep->com.dev->hwtids))
wake_up(&ep->com.dev->wait);
- spin_unlock_irqrestore(&ep->com.dev->lock, flags);
+ xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
}
-static void insert_ep_tid(struct c4iw_ep *ep)
+static int insert_ep_tid(struct c4iw_ep *ep)
{
unsigned long flags;
+ int err;
+
+ xa_lock_irqsave(&ep->com.dev->hwtids, flags);
+ err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL);
+ xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
- spin_lock_irqsave(&ep->com.dev->lock, flags);
- _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
- spin_unlock_irqrestore(&ep->com.dev->lock, flags);
+ return err;
}
/*
@@ -355,11 +358,11 @@ static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
struct c4iw_ep *ep;
unsigned long flags;
- spin_lock_irqsave(&dev->lock, flags);
- ep = idr_find(&dev->hwtid_idr, tid);
+ xa_lock_irqsave(&dev->hwtids, flags);
+ ep = xa_load(&dev->hwtids, tid);
if (ep)
c4iw_get_ep(&ep->com);
- spin_unlock_irqrestore(&dev->lock, flags);
+ xa_unlock_irqrestore(&dev->hwtids, flags);
return ep;
}
@@ -372,11 +375,11 @@ static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
struct c4iw_listen_ep *ep;
unsigned long flags;
- spin_lock_irqsave(&dev->lock, flags);
- ep = idr_find(&dev->stid_idr, stid);
+ xa_lock_irqsave(&dev->stids, flags);
+ ep = xa_load(&dev->stids, stid);
if (ep)
c4iw_get_ep(&ep->com);
- spin_unlock_irqrestore(&dev->lock, flags);
+ xa_unlock_irqrestore(&dev->stids, flags);
return ep;
}
@@ -457,6 +460,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
skb_reset_transport_header(skb);
} else {
skb = alloc_skb(len, gfp);
+ if (!skb)
+ return NULL;
}
t4_set_arp_err_handler(skb, NULL, NULL);
return skb;
@@ -555,7 +560,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ xa_erase_irq(&ep->com.dev->atids, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
}
@@ -1235,7 +1240,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
set_emss(ep, tcp_opt);
/* dealloc the atid */
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
+ xa_erase_irq(&ep->com.dev->atids, atid);
cxgb4_free_atid(t, atid);
set_bit(ACT_ESTAB, &ep->com.history);
@@ -2184,7 +2189,9 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
err = -ENOMEM;
goto fail2;
}
- insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
+ err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL);
+ if (err)
+ goto fail2a;
/* find a route */
if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
@@ -2236,7 +2243,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
fail4:
dst_release(ep->dst);
fail3:
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ xa_erase_irq(&ep->com.dev->atids, ep->atid);
+fail2a:
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail2:
/*
@@ -2319,8 +2327,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
(const u32 *)
&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
- atid);
+ xa_erase_irq(&ep->com.dev->atids, atid);
cxgb4_free_atid(t, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -2357,7 +2364,7 @@ fail:
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
ep->com.local_addr.ss_family);
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
+ xa_erase_irq(&ep->com.dev->atids, atid);
cxgb4_free_atid(t, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -2947,7 +2954,7 @@ out:
(const u32 *)&sin6->sin6_addr.s6_addr,
1);
}
- remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
+ xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid);
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
ep->com.local_addr.ss_family);
dst_release(ep->dst);
@@ -3342,7 +3349,9 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -ENOMEM;
goto fail2;
}
- insert_handle(dev, &dev->atid_idr, ep, ep->atid);
+ err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL);
+ if (err)
+ goto fail5;
memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
@@ -3430,7 +3439,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
fail4:
dst_release(ep->dst);
fail3:
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ xa_erase_irq(&ep->com.dev->atids, ep->atid);
+fail5:
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail2:
skb_queue_purge(&ep->com.ep_skb_list);
@@ -3553,7 +3563,9 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
err = -ENOMEM;
goto fail2;
}
- insert_handle(dev, &dev->stid_idr, ep, ep->stid);
+ err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL);
+ if (err)
+ goto fail3;
state_set(&ep->com, LISTEN);
if (ep->com.local_addr.ss_family == AF_INET)
@@ -3564,7 +3576,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = ep;
goto out;
}
- remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
+ xa_erase_irq(&ep->com.dev->stids, ep->stid);
+fail3:
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
@@ -3603,7 +3616,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
+ xa_erase_irq(&ep->com.dev->stids, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
done:
@@ -3763,7 +3776,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(dev, &dev->atid_idr, atid);
+ xa_erase_irq(&dev->atids, atid);
cxgb4_free_atid(dev->rdev.lldi.tids, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 1fd8798d91a7..52ce586621c6 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -30,6 +30,8 @@
* SOFTWARE.
*/
+#include <rdma/uverbs_ioctl.h>
+
#include "iw_cxgb4.h"
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
@@ -968,7 +970,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return !err || err == -ENODATA ? npolled : err;
}
-int c4iw_destroy_cq(struct ib_cq *ib_cq)
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct c4iw_cq *chp;
struct c4iw_ucontext *ucontext;
@@ -976,12 +978,12 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
pr_debug("ib_cq %p\n", ib_cq);
chp = to_c4iw_cq(ib_cq);
- remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
+ xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
atomic_dec(&chp->refcnt);
wait_event(chp->wait, !atomic_read(&chp->refcnt));
- ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
- : NULL;
+ ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
+ ibucontext);
destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
chp->destroy_skb, chp->wr_waitp);
@@ -992,7 +994,6 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_context,
struct ib_udata *udata)
{
int entries = attr->cqe;
@@ -1001,10 +1002,11 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
struct c4iw_cq *chp;
struct c4iw_create_cq ucmd;
struct c4iw_create_cq_resp uresp;
- struct c4iw_ucontext *ucontext = NULL;
int ret, wr_len;
size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2;
+ struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct c4iw_ucontext, ibucontext);
pr_debug("ib_dev %p entries %d\n", ibdev, entries);
if (attr->flags)
@@ -1015,8 +1017,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (vector >= rhp->rdev.lldi.nciq)
return ERR_PTR(-EINVAL);
- if (ib_context) {
- ucontext = to_c4iw_ucontext(ib_context);
+ if (udata) {
if (udata->inlen < sizeof(ucmd))
ucontext->is_32b_cqe = 1;
}
@@ -1068,7 +1069,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
/*
* memsize must be a multiple of the page size if its a user cq.
*/
- if (ucontext)
+ if (udata)
memsize = roundup(memsize, PAGE_SIZE);
chp->cq.size = hwentries;
@@ -1088,7 +1089,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
- ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+ ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
if (ret)
goto err_destroy_cq;
@@ -1143,7 +1144,7 @@ err_free_mm2:
err_free_mm:
kfree(mm);
err_remove_handle:
- remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
+ xa_erase_irq(&rhp->cqs, chp->cq.cqid);
err_destroy_cq:
destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index c79cf63fb0bb..4c0d925c5ff5 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -81,14 +81,6 @@ struct c4iw_debugfs_data {
int pos;
};
-static int count_idrs(int id, void *p, void *data)
-{
- int *countp = data;
-
- *countp = *countp + 1;
- return 0;
-}
-
static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
@@ -250,16 +242,11 @@ static void set_ep_sin6_addrs(struct c4iw_ep *ep,
}
}
-static int dump_qp(int id, void *p, void *data)
+static int dump_qp(struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd)
{
- struct c4iw_qp *qp = p;
- struct c4iw_debugfs_data *qpd = data;
int space;
int cc;
- if (id != qp->wq.sq.qid)
- return 0;
-
space = qpd->bufsize - qpd->pos - 1;
if (space == 0)
return 1;
@@ -335,7 +322,9 @@ static int qp_release(struct inode *inode, struct file *file)
static int qp_open(struct inode *inode, struct file *file)
{
+ struct c4iw_qp *qp;
struct c4iw_debugfs_data *qpd;
+ unsigned long index;
int count = 1;
qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
@@ -345,9 +334,12 @@ static int qp_open(struct inode *inode, struct file *file)
qpd->devp = inode->i_private;
qpd->pos = 0;
- spin_lock_irq(&qpd->devp->lock);
- idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
- spin_unlock_irq(&qpd->devp->lock);
+ /*
+ * No need to lock; we drop the lock to call vmalloc so it's racy
+ * anyway. Someone who cares should switch this over to seq_file
+ */
+ xa_for_each(&qpd->devp->qps, index, qp)
+ count++;
qpd->bufsize = count * 180;
qpd->buf = vmalloc(qpd->bufsize);
@@ -356,9 +348,10 @@ static int qp_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- spin_lock_irq(&qpd->devp->lock);
- idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
- spin_unlock_irq(&qpd->devp->lock);
+ xa_lock_irq(&qpd->devp->qps);
+ xa_for_each(&qpd->devp->qps, index, qp)
+ dump_qp(qp, qpd);
+ xa_unlock_irq(&qpd->devp->qps);
qpd->buf[qpd->pos++] = 0;
file->private_data = qpd;
@@ -373,9 +366,8 @@ static const struct file_operations qp_debugfs_fops = {
.llseek = default_llseek,
};
-static int dump_stag(int id, void *p, void *data)
+static int dump_stag(unsigned long id, struct c4iw_debugfs_data *stagd)
{
- struct c4iw_debugfs_data *stagd = data;
int space;
int cc;
struct fw_ri_tpte tpte;
@@ -424,6 +416,8 @@ static int stag_release(struct inode *inode, struct file *file)
static int stag_open(struct inode *inode, struct file *file)
{
struct c4iw_debugfs_data *stagd;
+ void *p;
+ unsigned long index;
int ret = 0;
int count = 1;
@@ -435,9 +429,8 @@ static int stag_open(struct inode *inode, struct file *file)
stagd->devp = inode->i_private;
stagd->pos = 0;
- spin_lock_irq(&stagd->devp->lock);
- idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
- spin_unlock_irq(&stagd->devp->lock);
+ xa_for_each(&stagd->devp->mrs, index, p)
+ count++;
stagd->bufsize = count * 256;
stagd->buf = vmalloc(stagd->bufsize);
@@ -446,9 +439,10 @@ static int stag_open(struct inode *inode, struct file *file)
goto err1;
}
- spin_lock_irq(&stagd->devp->lock);
- idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
- spin_unlock_irq(&stagd->devp->lock);
+ xa_lock_irq(&stagd->devp->mrs);
+ xa_for_each(&stagd->devp->mrs, index, p)
+ dump_stag(index, stagd);
+ xa_unlock_irq(&stagd->devp->mrs);
stagd->buf[stagd->pos++] = 0;
file->private_data = stagd;
@@ -558,10 +552,8 @@ static const struct file_operations stats_debugfs_fops = {
.write = stats_clear,
};
-static int dump_ep(int id, void *p, void *data)
+static int dump_ep(struct c4iw_ep *ep, struct c4iw_debugfs_data *epd)
{
- struct c4iw_ep *ep = p;
- struct c4iw_debugfs_data *epd = data;
int space;
int cc;
@@ -617,10 +609,9 @@ static int dump_ep(int id, void *p, void *data)
return 0;
}
-static int dump_listen_ep(int id, void *p, void *data)
+static
+int dump_listen_ep(struct c4iw_listen_ep *ep, struct c4iw_debugfs_data *epd)
{
- struct c4iw_listen_ep *ep = p;
- struct c4iw_debugfs_data *epd = data;
int space;
int cc;
@@ -674,6 +665,9 @@ static int ep_release(struct inode *inode, struct file *file)
static int ep_open(struct inode *inode, struct file *file)
{
+ struct c4iw_ep *ep;
+ struct c4iw_listen_ep *lep;
+ unsigned long index;
struct c4iw_debugfs_data *epd;
int ret = 0;
int count = 1;
@@ -686,11 +680,12 @@ static int ep_open(struct inode *inode, struct file *file)
epd->devp = inode->i_private;
epd->pos = 0;
- spin_lock_irq(&epd->devp->lock);
- idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
- idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
- idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
- spin_unlock_irq(&epd->devp->lock);
+ xa_for_each(&epd->devp->hwtids, index, ep)
+ count++;
+ xa_for_each(&epd->devp->atids, index, ep)
+ count++;
+ xa_for_each(&epd->devp->stids, index, lep)
+ count++;
epd->bufsize = count * 240;
epd->buf = vmalloc(epd->bufsize);
@@ -699,11 +694,18 @@ static int ep_open(struct inode *inode, struct file *file)
goto err1;
}
- spin_lock_irq(&epd->devp->lock);
- idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
- idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
- idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
- spin_unlock_irq(&epd->devp->lock);
+ xa_lock_irq(&epd->devp->hwtids);
+ xa_for_each(&epd->devp->hwtids, index, ep)
+ dump_ep(ep, epd);
+ xa_unlock_irq(&epd->devp->hwtids);
+ xa_lock_irq(&epd->devp->atids);
+ xa_for_each(&epd->devp->atids, index, ep)
+ dump_ep(ep, epd);
+ xa_unlock_irq(&epd->devp->atids);
+ xa_lock_irq(&epd->devp->stids);
+ xa_for_each(&epd->devp->stids, index, lep)
+ dump_listen_ep(lep, epd);
+ xa_unlock_irq(&epd->devp->stids);
file->private_data = epd;
goto out;
@@ -931,16 +933,12 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
void c4iw_dealloc(struct uld_ctx *ctx)
{
c4iw_rdev_close(&ctx->dev->rdev);
- WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
- idr_destroy(&ctx->dev->cqidr);
- WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
- idr_destroy(&ctx->dev->qpidr);
- WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
- idr_destroy(&ctx->dev->mmidr);
- wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
- idr_destroy(&ctx->dev->hwtid_idr);
- idr_destroy(&ctx->dev->stid_idr);
- idr_destroy(&ctx->dev->atid_idr);
+ WARN_ON(!xa_empty(&ctx->dev->cqs));
+ WARN_ON(!xa_empty(&ctx->dev->qps));
+ WARN_ON(!xa_empty(&ctx->dev->mrs));
+ wait_event(ctx->dev->wait, xa_empty(&ctx->dev->hwtids));
+ WARN_ON(!xa_empty(&ctx->dev->stids));
+ WARN_ON(!xa_empty(&ctx->dev->atids));
if (ctx->dev->rdev.bar2_kva)
iounmap(ctx->dev->rdev.bar2_kva);
if (ctx->dev->rdev.oc_mw_kva)
@@ -1044,13 +1042,12 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
return ERR_PTR(ret);
}
- idr_init(&devp->cqidr);
- idr_init(&devp->qpidr);
- idr_init(&devp->mmidr);
- idr_init(&devp->hwtid_idr);
- idr_init(&devp->stid_idr);
- idr_init(&devp->atid_idr);
- spin_lock_init(&devp->lock);
+ xa_init_flags(&devp->cqs, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->qps, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->mrs, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->hwtids, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->atids, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->stids, XA_FLAGS_LOCK_IRQ);
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
INIT_LIST_HEAD(&devp->db_fc_list);
@@ -1265,34 +1262,21 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
return 0;
}
-static int disable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_disable_wq_db(&qp->wq);
- return 0;
-}
-
static void stop_queues(struct uld_ctx *ctx)
{
- unsigned long flags;
+ struct c4iw_qp *qp;
+ unsigned long index, flags;
- spin_lock_irqsave(&ctx->dev->lock, flags);
+ xa_lock_irqsave(&ctx->dev->qps, flags);
ctx->dev->rdev.stats.db_state_transitions++;
ctx->dev->db_state = STOPPED;
- if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
- idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
- else
+ if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
+ xa_for_each(&ctx->dev->qps, index, qp)
+ t4_disable_wq_db(&qp->wq);
+ } else {
ctx->dev->rdev.status_page->db_off = 1;
- spin_unlock_irqrestore(&ctx->dev->lock, flags);
-}
-
-static int enable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_enable_wq_db(&qp->wq);
- return 0;
+ }
+ xa_unlock_irqrestore(&ctx->dev->qps, flags);
}
static void resume_rc_qp(struct c4iw_qp *qp)
@@ -1322,18 +1306,21 @@ static void resume_a_chunk(struct uld_ctx *ctx)
static void resume_queues(struct uld_ctx *ctx)
{
- spin_lock_irq(&ctx->dev->lock);
+ xa_lock_irq(&ctx->dev->qps);
if (ctx->dev->db_state != STOPPED)
goto out;
ctx->dev->db_state = FLOW_CONTROL;
while (1) {
if (list_empty(&ctx->dev->db_fc_list)) {
+ struct c4iw_qp *qp;
+ unsigned long index;
+
WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
ctx->dev->db_state = NORMAL;
ctx->dev->rdev.stats.db_state_transitions++;
if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
- idr_for_each(&ctx->dev->qpidr, enable_qp_db,
- NULL);
+ xa_for_each(&ctx->dev->qps, index, qp)
+ t4_enable_wq_db(&qp->wq);
} else {
ctx->dev->rdev.status_page->db_off = 0;
}
@@ -1345,12 +1332,12 @@ static void resume_queues(struct uld_ctx *ctx)
resume_a_chunk(ctx);
}
if (!list_empty(&ctx->dev->db_fc_list)) {
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
if (DB_FC_RESUME_DELAY) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(DB_FC_RESUME_DELAY);
}
- spin_lock_irq(&ctx->dev->lock);
+ xa_lock_irq(&ctx->dev->qps);
if (ctx->dev->db_state != FLOW_CONTROL)
break;
}
@@ -1359,7 +1346,7 @@ static void resume_queues(struct uld_ctx *ctx)
out:
if (ctx->dev->db_state != NORMAL)
ctx->dev->rdev.stats.db_fc_interruptions++;
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
}
struct qp_list {
@@ -1367,23 +1354,6 @@ struct qp_list {
struct c4iw_qp **qps;
};
-static int add_and_ref_qp(int id, void *p, void *data)
-{
- struct qp_list *qp_listp = data;
- struct c4iw_qp *qp = p;
-
- c4iw_qp_add_ref(&qp->ibqp);
- qp_listp->qps[qp_listp->idx++] = qp;
- return 0;
-}
-
-static int count_qps(int id, void *p, void *data)
-{
- unsigned *countp = data;
- (*countp)++;
- return 0;
-}
-
static void deref_qps(struct qp_list *qp_list)
{
int idx;
@@ -1400,7 +1370,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
for (idx = 0; idx < qp_list->idx; idx++) {
struct c4iw_qp *qp = qp_list->qps[idx];
- spin_lock_irq(&qp->rhp->lock);
+ xa_lock_irq(&qp->rhp->qps);
spin_lock(&qp->lock);
ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
qp->wq.sq.qid,
@@ -1410,7 +1380,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
spin_unlock(&qp->lock);
- spin_unlock_irq(&qp->rhp->lock);
+ xa_unlock_irq(&qp->rhp->qps);
return;
}
qp->wq.sq.wq_pidx_inc = 0;
@@ -1424,12 +1394,12 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
spin_unlock(&qp->lock);
- spin_unlock_irq(&qp->rhp->lock);
+ xa_unlock_irq(&qp->rhp->qps);
return;
}
qp->wq.rq.wq_pidx_inc = 0;
spin_unlock(&qp->lock);
- spin_unlock_irq(&qp->rhp->lock);
+ xa_unlock_irq(&qp->rhp->qps);
/* Wait for the dbfifo to drain */
while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
@@ -1441,6 +1411,8 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
static void recover_queues(struct uld_ctx *ctx)
{
+ struct c4iw_qp *qp;
+ unsigned long index;
int count = 0;
struct qp_list qp_list;
int ret;
@@ -1458,22 +1430,26 @@ static void recover_queues(struct uld_ctx *ctx)
}
/* Count active queues so we can build a list of queues to recover */
- spin_lock_irq(&ctx->dev->lock);
+ xa_lock_irq(&ctx->dev->qps);
WARN_ON(ctx->dev->db_state != STOPPED);
ctx->dev->db_state = RECOVERY;
- idr_for_each(&ctx->dev->qpidr, count_qps, &count);
+ xa_for_each(&ctx->dev->qps, index, qp)
+ count++;
qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC);
if (!qp_list.qps) {
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
return;
}
qp_list.idx = 0;
/* add and ref each qp so it doesn't get freed */
- idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
+ xa_for_each(&ctx->dev->qps, index, qp) {
+ c4iw_qp_add_ref(&qp->ibqp);
+ qp_list.qps[qp_list.idx++] = qp;
+ }
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
/* now traverse the list in a safe context to recover the db state*/
recover_lost_dbs(ctx, &qp_list);
@@ -1482,10 +1458,10 @@ static void recover_queues(struct uld_ctx *ctx)
deref_qps(&qp_list);
kfree(qp_list.qps);
- spin_lock_irq(&ctx->dev->lock);
+ xa_lock_irq(&ctx->dev->qps);
WARN_ON(ctx->dev->db_state != RECOVERY);
ctx->dev->db_state = STOPPED;
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
}
static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 8741d23168f3..4cd877bd2f56 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -123,15 +123,15 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
struct c4iw_qp *qhp;
u32 cqid;
- spin_lock_irq(&dev->lock);
- qhp = get_qhp(dev, CQE_QPID(err_cqe));
+ xa_lock_irq(&dev->qps);
+ qhp = xa_load(&dev->qps, CQE_QPID(err_cqe));
if (!qhp) {
pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
CQE_WRID_LOW(err_cqe));
- spin_unlock_irq(&dev->lock);
+ xa_unlock_irq(&dev->qps);
goto out;
}
@@ -146,13 +146,13 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
CQE_WRID_LOW(err_cqe));
- spin_unlock_irq(&dev->lock);
+ xa_unlock_irq(&dev->qps);
goto out;
}
c4iw_qp_add_ref(&qhp->ibqp);
atomic_inc(&chp->refcnt);
- spin_unlock_irq(&dev->lock);
+ xa_unlock_irq(&dev->qps);
/* Bad incoming write */
if (RQ_TYPE(err_cqe) &&
@@ -225,11 +225,11 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
struct c4iw_cq *chp;
unsigned long flag;
- spin_lock_irqsave(&dev->lock, flag);
- chp = get_chp(dev, qid);
+ xa_lock_irqsave(&dev->cqs, flag);
+ chp = xa_load(&dev->cqs, qid);
if (chp) {
atomic_inc(&chp->refcnt);
- spin_unlock_irqrestore(&dev->lock, flag);
+ xa_unlock_irqrestore(&dev->cqs, flag);
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
@@ -238,7 +238,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
wake_up(&chp->wait);
} else {
pr_debug("unknown cqid 0x%x\n", qid);
- spin_unlock_irqrestore(&dev->lock, flag);
+ xa_unlock_irqrestore(&dev->cqs, flag);
}
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 5a5da41faef6..916ef982172e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -34,7 +34,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/completion.h>
#include <linux/netdevice.h>
#include <linux/sched/mm.h>
@@ -315,16 +315,15 @@ struct c4iw_dev {
struct ib_device ibdev;
struct c4iw_rdev rdev;
u32 device_cap_flags;
- struct idr cqidr;
- struct idr qpidr;
- struct idr mmidr;
- spinlock_t lock;
+ struct xarray cqs;
+ struct xarray qps;
+ struct xarray mrs;
struct mutex db_mutex;
struct dentry *debugfs_root;
enum db_state db_state;
- struct idr hwtid_idr;
- struct idr atid_idr;
- struct idr stid_idr;
+ struct xarray hwtids;
+ struct xarray atids;
+ struct xarray stids;
struct list_head db_fc_list;
u32 avail_ird;
wait_queue_head_t wait;
@@ -349,70 +348,12 @@ static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
{
- return idr_find(&rhp->cqidr, cqid);
+ return xa_load(&rhp->cqs, cqid);
}
static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
{
- return idr_find(&rhp->qpidr, qpid);
-}
-
-static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
-{
- return idr_find(&rhp->mmidr, mmid);
-}
-
-static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
- void *handle, u32 id, int lock)
-{
- int ret;
-
- if (lock) {
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&rhp->lock);
- }
-
- ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
-
- if (lock) {
- spin_unlock_irq(&rhp->lock);
- idr_preload_end();
- }
-
- return ret < 0 ? ret : 0;
-}
-
-static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
- void *handle, u32 id)
-{
- return _insert_handle(rhp, idr, handle, id, 1);
-}
-
-static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
- void *handle, u32 id)
-{
- return _insert_handle(rhp, idr, handle, id, 0);
-}
-
-static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
- u32 id, int lock)
-{
- if (lock)
- spin_lock_irq(&rhp->lock);
- idr_remove(idr, id);
- if (lock)
- spin_unlock_irq(&rhp->lock);
-}
-
-static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
-{
- _remove_handle(rhp, idr, id, 1);
-}
-
-static inline void remove_handle_nolock(struct c4iw_dev *rhp,
- struct idr *idr, u32 id)
-{
- _remove_handle(rhp, idr, id, 0);
+ return xa_load(&rhp->qps, qpid);
}
extern uint c4iw_max_read_depth;
@@ -1038,9 +979,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
void c4iw_qp_add_ref(struct ib_qp *qp);
void c4iw_qp_rem_ref(struct ib_qp *qp);
-struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg);
+struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata);
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int c4iw_dealloc_mw(struct ib_mw *mw);
@@ -1051,21 +991,19 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
struct ib_udata *udata);
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
-int c4iw_dereg_mr(struct ib_mr *ib_mr);
-int c4iw_destroy_cq(struct ib_cq *ib_cq);
+int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_context,
struct ib_udata *udata);
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
-int c4iw_destroy_srq(struct ib_srq *ib_srq);
-struct ib_srq *c4iw_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *attrs,
- struct ib_udata *udata);
-int c4iw_destroy_qp(struct ib_qp *ib_qp);
+void c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
+int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata);
+int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *attrs,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 5baa31ab6366..811c0c8c5b16 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -395,7 +395,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
mhp->ibmr.iova = mhp->attr.va_fbo;
mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
- return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
+ return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
}
static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
@@ -542,7 +542,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = PAGE_SHIFT;
- n = mhp->umem->nmap;
+ n = ib_umem_num_pages(mhp->umem);
err = alloc_pbl(mhp, n);
if (err)
goto err_umem_release;
@@ -645,7 +645,7 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
mhp->attr.stag = stag;
mmid = (stag) >> 8;
mhp->ibmw.rkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
ret = -ENOMEM;
goto dealloc_win;
}
@@ -673,7 +673,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
mhp = to_c4iw_mw(mw);
rhp = mhp->rhp;
mmid = (mw->rkey) >> 8;
- remove_handle(rhp, &rhp->mmidr, mmid);
+ xa_erase_irq(&rhp->mrs, mmid);
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
mhp->wr_waitp);
kfree_skb(mhp->dereg_skb);
@@ -683,9 +683,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
return 0;
}
-struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
@@ -740,7 +739,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
mhp->attr.state = 0;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
ret = -ENOMEM;
goto err_dereg;
}
@@ -786,7 +785,7 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
}
-int c4iw_dereg_mr(struct ib_mr *ib_mr)
+int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_mr *mhp;
@@ -797,7 +796,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
mhp = to_c4iw_mr(ib_mr);
rhp = mhp->rhp;
mmid = mhp->attr.stag >> 8;
- remove_handle(rhp, &rhp->mmidr, mmid);
+ xa_erase_irq(&rhp->mrs, mmid);
if (mhp->mpl)
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
@@ -821,9 +820,9 @@ void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
struct c4iw_mr *mhp;
unsigned long flags;
- spin_lock_irqsave(&rhp->lock, flags);
- mhp = get_mhp(rhp, rkey >> 8);
+ xa_lock_irqsave(&rhp->mrs, flags);
+ mhp = xa_load(&rhp->mrs, rkey >> 8);
if (mhp)
mhp->attr.state = 0;
- spin_unlock_irqrestore(&rhp->lock, flags);
+ xa_unlock_irqrestore(&rhp->mrs, flags);
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 507c54572cc9..74b795642fca 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -190,7 +190,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return ret;
}
-static void c4iw_deallocate_pd(struct ib_pd *pd)
+static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
@@ -204,8 +204,7 @@ static void c4iw_deallocate_pd(struct ib_pd *pd)
mutex_unlock(&rhp->rdev.stats.lock);
}
-static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata)
+static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct c4iw_pd *php = to_c4iw_pd(pd);
struct ib_device *ibdev = pd->device;
@@ -220,11 +219,11 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
php->pdid = pdid;
php->rhp = rhp;
- if (context) {
+ if (udata) {
struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid};
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- c4iw_deallocate_pd(&php->ibpd);
+ c4iw_deallocate_pd(&php->ibpd, udata);
return -EFAULT;
}
}
@@ -483,24 +482,6 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
}
-static struct net_device *get_netdev(struct ib_device *dev, u8 port)
-{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, ibdev);
- struct c4iw_rdev *rdev = &c4iw_dev->rdev;
- struct net_device *ndev;
-
- if (!port || port > rdev->lldi.nports)
- return NULL;
-
- rcu_read_lock();
- ndev = rdev->lldi.ports[port - 1];
- if (ndev)
- dev_hold(ndev);
- rcu_read_unlock();
-
- return ndev;
-}
-
static int fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res)
{
return (res->type < ARRAY_SIZE(c4iw_restrack_funcs) &&
@@ -528,8 +509,15 @@ static const struct ib_device_ops c4iw_dev_ops = {
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = c4iw_get_dma_mr,
.get_hw_stats = c4iw_get_mib,
- .get_netdev = get_netdev,
.get_port_immutable = c4iw_port_immutable,
+ .iw_accept = c4iw_accept_cr,
+ .iw_add_ref = c4iw_qp_add_ref,
+ .iw_connect = c4iw_connect,
+ .iw_create_listen = c4iw_create_listen,
+ .iw_destroy_listen = c4iw_destroy_listen,
+ .iw_get_qp = c4iw_get_qp,
+ .iw_reject = c4iw_reject_cr,
+ .iw_rem_ref = c4iw_qp_rem_ref,
.map_mr_sg = c4iw_map_mr_sg,
.mmap = c4iw_mmap,
.modify_qp = c4iw_ib_modify_qp,
@@ -546,9 +534,24 @@ static const struct ib_device_ops c4iw_dev_ops = {
.reg_user_mr = c4iw_reg_user_mr,
.req_notify_cq = c4iw_arm_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
};
+static int set_netdevs(struct ib_device *ib_dev, struct c4iw_rdev *rdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < rdev->lldi.nports; i++) {
+ ret = ib_device_set_netdev(ib_dev, rdev->lldi.ports[i],
+ i + 1);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
void c4iw_register_device(struct work_struct *work)
{
int ret;
@@ -593,33 +596,20 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
- dev->ibdev.iwcm = kzalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
- if (!dev->ibdev.iwcm) {
- ret = -ENOMEM;
- goto err_dealloc_ctx;
- }
-
- dev->ibdev.iwcm->connect = c4iw_connect;
- dev->ibdev.iwcm->accept = c4iw_accept_cr;
- dev->ibdev.iwcm->reject = c4iw_reject_cr;
- dev->ibdev.iwcm->create_listen = c4iw_create_listen;
- dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
- dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
- dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
- dev->ibdev.iwcm->get_qp = c4iw_get_qp;
- memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
- sizeof(dev->ibdev.iwcm->ifname));
+ memcpy(dev->ibdev.iw_ifname, dev->rdev.lldi.ports[0]->name,
+ sizeof(dev->ibdev.iw_ifname));
rdma_set_device_sysfs_group(&dev->ibdev, &c4iw_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_CXGB4;
ib_set_device_ops(&dev->ibdev, &c4iw_dev_ops);
+ ret = set_netdevs(&dev->ibdev, &dev->rdev);
+ if (ret)
+ goto err_dealloc_ctx;
ret = ib_register_device(&dev->ibdev, "cxgb4_%d");
if (ret)
- goto err_kfree_iwcm;
+ goto err_dealloc_ctx;
return;
-err_kfree_iwcm:
- kfree(dev->ibdev.iwcm);
err_dealloc_ctx:
pr_err("%s - Failed registering iwarp device: %d\n",
pci_name(ctx->lldi.pdev), ret);
@@ -631,6 +621,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
{
pr_debug("c4iw_dev %p\n", dev);
ib_unregister_device(&dev->ibdev);
- kfree(dev->ibdev.iwcm);
return;
}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d3a82839f5ea..e92b9544357a 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -57,18 +57,18 @@ MODULE_PARM_DESC(db_coalescing_threshold,
static int max_fr_immd = T4_MAX_FR_IMMD;
module_param(max_fr_immd, int, 0644);
-MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
+MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
static int alloc_ird(struct c4iw_dev *dev, u32 ird)
{
int ret = 0;
- spin_lock_irq(&dev->lock);
+ xa_lock_irq(&dev->qps);
if (ird <= dev->avail_ird)
dev->avail_ird -= ird;
else
ret = -ENOMEM;
- spin_unlock_irq(&dev->lock);
+ xa_unlock_irq(&dev->qps);
if (ret)
dev_warn(&dev->rdev.lldi.pdev->dev,
@@ -79,9 +79,9 @@ static int alloc_ird(struct c4iw_dev *dev, u32 ird)
static void free_ird(struct c4iw_dev *dev, int ird)
{
- spin_lock_irq(&dev->lock);
+ xa_lock_irq(&dev->qps);
dev->avail_ird += ird;
- spin_unlock_irq(&dev->lock);
+ xa_unlock_irq(&dev->qps);
}
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
@@ -939,7 +939,7 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
- spin_lock_irqsave(&qhp->rhp->lock, flags);
+ xa_lock_irqsave(&qhp->rhp->qps, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL)
t4_ring_sq_db(&qhp->wq, inc, NULL);
@@ -948,7 +948,7 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
qhp->wq.sq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ xa_unlock_irqrestore(&qhp->rhp->qps, flags);
return 0;
}
@@ -956,7 +956,7 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
- spin_lock_irqsave(&qhp->rhp->lock, flags);
+ xa_lock_irqsave(&qhp->rhp->qps, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL)
t4_ring_rq_db(&qhp->wq, inc, NULL);
@@ -965,7 +965,7 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
qhp->wq.rq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ xa_unlock_irqrestore(&qhp->rhp->qps, flags);
return 0;
}
@@ -1976,10 +1976,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ disconnect = 1;
if (!internal) {
- c4iw_get_ep(&qhp->ep->com);
terminate = 1;
- disconnect = 1;
} else {
terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
@@ -2095,7 +2095,7 @@ out:
return ret;
}
-int c4iw_destroy_qp(struct ib_qp *ib_qp)
+int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_qp *qhp;
@@ -2111,12 +2111,11 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
- remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
-
- spin_lock_irq(&rhp->lock);
+ xa_lock_irq(&rhp->qps);
+ __xa_erase(&rhp->qps, qhp->wq.sq.qid);
if (!list_empty(&qhp->db_fc_entry))
list_del_init(&qhp->db_fc_entry);
- spin_unlock_irq(&rhp->lock);
+ xa_unlock_irq(&rhp->qps);
free_ird(rhp, qhp->attr.max_ird);
c4iw_qp_rem_ref(ib_qp);
@@ -2234,7 +2233,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
kref_init(&qhp->kref);
INIT_WORK(&qhp->free_work, free_qp_work);
- ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+ ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
if (ret)
goto err_destroy_qp;
@@ -2370,7 +2369,7 @@ err_free_rq_key:
err_free_sq_key:
kfree(sq_key_mm);
err_remove_handle:
- remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+ xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
err_destroy_qp:
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
@@ -2684,11 +2683,12 @@ void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
}
}
-struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
+int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
struct ib_udata *udata)
{
+ struct ib_pd *pd = ib_srq->pd;
struct c4iw_dev *rhp;
- struct c4iw_srq *srq;
+ struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
struct c4iw_pd *php;
struct c4iw_create_srq_resp uresp;
struct c4iw_ucontext *ucontext;
@@ -2703,11 +2703,11 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
rhp = php->rhp;
if (!rhp->rdev.lldi.vr->srq.size)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
/*
* SRQ RQT and RQ must be a power of 2 and at least 16 deep.
@@ -2718,15 +2718,9 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
ibucontext);
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq)
- return ERR_PTR(-ENOMEM);
-
srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
- if (!srq->wr_waitp) {
- ret = -ENOMEM;
- goto err_free_srq;
- }
+ if (!srq->wr_waitp)
+ return -ENOMEM;
srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
if (srq->idx < 0) {
@@ -2760,7 +2754,7 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
srq->flags = T4_SRQ_LIMIT_SUPPORT;
- ret = insert_handle(rhp, &rhp->qpidr, srq, srq->wq.qid);
+ ret = xa_insert_irq(&rhp->qps, srq->wq.qid, srq, GFP_KERNEL);
if (ret)
goto err_free_queue;
@@ -2806,13 +2800,14 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
(unsigned long)srq->wq.memsize, attrs->attr.max_wr);
spin_lock_init(&srq->lock);
- return &srq->ibsrq;
+ return 0;
+
err_free_srq_db_key_mm:
kfree(srq_db_key_mm);
err_free_srq_key_mm:
kfree(srq_key_mm);
err_remove_handle:
- remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
+ xa_erase_irq(&rhp->qps, srq->wq.qid);
err_free_queue:
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
srq->wr_waitp);
@@ -2822,12 +2817,10 @@ err_free_srq_idx:
c4iw_free_srq_idx(&rhp->rdev, srq->idx);
err_free_wr_wait:
c4iw_put_wr_wait(srq->wr_waitp);
-err_free_srq:
- kfree(srq);
- return ERR_PTR(ret);
+ return ret;
}
-int c4iw_destroy_srq(struct ib_srq *ibsrq)
+void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_srq *srq;
@@ -2838,13 +2831,11 @@ int c4iw_destroy_srq(struct ib_srq *ibsrq)
pr_debug("%s id %d\n", __func__, srq->wq.qid);
- remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
- ucontext = ibsrq->uobject ?
- to_c4iw_ucontext(ibsrq->uobject->context) : NULL;
+ xa_erase_irq(&rhp->qps, srq->wq.qid);
+ ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
+ ibucontext);
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
srq->wr_waitp);
c4iw_free_srq_idx(&rhp->rdev, srq->idx);
c4iw_put_wr_wait(srq->wr_waitp);
- kfree(srq);
- return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
index 9a7520ee41e0..f82d46ed969d 100644
--- a/drivers/infiniband/hw/cxgb4/restrack.c
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -149,7 +149,7 @@ static int fill_res_qp_entry(struct sk_buff *msg,
if (qhp->ucontext)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
@@ -216,7 +216,7 @@ static int fill_res_ep_entry(struct sk_buff *msg,
if (!uep)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err_free_uep;
@@ -387,7 +387,7 @@ static int fill_res_cq_entry(struct sk_buff *msg,
if (ibcq->uobject)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
@@ -447,7 +447,7 @@ static int fill_res_mr_entry(struct sk_buff *msg,
if (!stag)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
diff --git a/drivers/infiniband/hw/efa/Kconfig b/drivers/infiniband/hw/efa/Kconfig
new file mode 100644
index 000000000000..457e18ba1d57
--- /dev/null
+++ b/drivers/infiniband/hw/efa/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+#
+# Amazon fabric device configuration
+#
+
+config INFINIBAND_EFA
+ tristate "Amazon Elastic Fabric Adapter (EFA) support"
+ depends on PCI_MSI && 64BIT && !CPU_BIG_ENDIAN
+ depends on INFINIBAND_USER_ACCESS
+ help
+ This driver supports Amazon Elastic Fabric Adapter (EFA).
+
+ To compile this driver as a module, choose M here.
+ The module will be called efa.
diff --git a/drivers/infiniband/hw/efa/Makefile b/drivers/infiniband/hw/efa/Makefile
new file mode 100644
index 000000000000..6e83083af0bc
--- /dev/null
+++ b/drivers/infiniband/hw/efa/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+#
+# Makefile for Amazon Elastic Fabric Adapter (EFA) device driver.
+#
+
+obj-$(CONFIG_INFINIBAND_EFA) += efa.o
+
+efa-y := efa_com_cmd.o efa_com.o efa_main.o efa_verbs.o
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
new file mode 100644
index 000000000000..9e3cc3239c13
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_H_
+#define _EFA_H_
+
+#include <linux/bitops.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <rdma/efa-abi.h>
+#include <rdma/ib_verbs.h>
+
+#include "efa_com_cmd.h"
+
+#define DRV_MODULE_NAME "efa"
+#define DEVICE_NAME "Elastic Fabric Adapter (EFA)"
+
+#define EFA_IRQNAME_SIZE 40
+
+/* 1 for AENQ + ADMIN */
+#define EFA_NUM_MSIX_VEC 1
+#define EFA_MGMNT_MSIX_VEC_IDX 0
+
+struct efa_irq {
+ irq_handler_t handler;
+ void *data;
+ int cpu;
+ u32 vector;
+ cpumask_t affinity_hint_mask;
+ char name[EFA_IRQNAME_SIZE];
+};
+
+struct efa_sw_stats {
+ atomic64_t alloc_pd_err;
+ atomic64_t create_qp_err;
+ atomic64_t create_cq_err;
+ atomic64_t reg_mr_err;
+ atomic64_t alloc_ucontext_err;
+ atomic64_t create_ah_err;
+};
+
+/* Don't use anything other than atomic64 */
+struct efa_stats {
+ struct efa_sw_stats sw_stats;
+ atomic64_t keep_alive_rcvd;
+};
+
+struct efa_dev {
+ struct ib_device ibdev;
+ struct efa_com_dev edev;
+ struct pci_dev *pdev;
+ struct efa_com_get_device_attr_result dev_attr;
+
+ u64 reg_bar_addr;
+ u64 reg_bar_len;
+ u64 mem_bar_addr;
+ u64 mem_bar_len;
+ u64 db_bar_addr;
+ u64 db_bar_len;
+ u8 addr[EFA_GID_SIZE];
+ u32 mtu;
+
+ int admin_msix_vector_idx;
+ struct efa_irq admin_irq;
+
+ struct efa_stats stats;
+};
+
+struct efa_ucontext {
+ struct ib_ucontext ibucontext;
+ struct xarray mmap_xa;
+ u32 mmap_xa_page;
+ u16 uarn;
+};
+
+struct efa_pd {
+ struct ib_pd ibpd;
+ u16 pdn;
+};
+
+struct efa_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+};
+
+struct efa_cq {
+ struct ib_cq ibcq;
+ struct efa_ucontext *ucontext;
+ dma_addr_t dma_addr;
+ void *cpu_addr;
+ size_t size;
+ u16 cq_idx;
+};
+
+struct efa_qp {
+ struct ib_qp ibqp;
+ dma_addr_t rq_dma_addr;
+ void *rq_cpu_addr;
+ size_t rq_size;
+ enum ib_qp_state state;
+ u32 qp_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
+struct efa_ah {
+ struct ib_ah ibah;
+ u16 ah;
+ /* dest_addr */
+ u8 id[EFA_GID_SIZE];
+};
+
+int efa_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata);
+int efa_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid);
+int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey);
+int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+struct ib_cq *efa_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
+struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata);
+int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable);
+int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
+void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
+int efa_mmap(struct ib_ucontext *ibucontext,
+ struct vm_area_struct *vma);
+int efa_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_attr *ah_attr,
+ u32 flags,
+ struct ib_udata *udata);
+void efa_destroy_ah(struct ib_ah *ibah, u32 flags);
+int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_udata *udata);
+enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
+ u8 port_num);
+
+#endif /* _EFA_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
new file mode 100644
index 000000000000..2be0469d545f
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -0,0 +1,794 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_ADMIN_CMDS_H_
+#define _EFA_ADMIN_CMDS_H_
+
+#define EFA_ADMIN_API_VERSION_MAJOR 0
+#define EFA_ADMIN_API_VERSION_MINOR 1
+
+/* EFA admin queue opcodes */
+enum efa_admin_aq_opcode {
+ EFA_ADMIN_CREATE_QP = 1,
+ EFA_ADMIN_MODIFY_QP = 2,
+ EFA_ADMIN_QUERY_QP = 3,
+ EFA_ADMIN_DESTROY_QP = 4,
+ EFA_ADMIN_CREATE_AH = 5,
+ EFA_ADMIN_DESTROY_AH = 6,
+ EFA_ADMIN_REG_MR = 7,
+ EFA_ADMIN_DEREG_MR = 8,
+ EFA_ADMIN_CREATE_CQ = 9,
+ EFA_ADMIN_DESTROY_CQ = 10,
+ EFA_ADMIN_GET_FEATURE = 11,
+ EFA_ADMIN_SET_FEATURE = 12,
+ EFA_ADMIN_GET_STATS = 13,
+ EFA_ADMIN_ALLOC_PD = 14,
+ EFA_ADMIN_DEALLOC_PD = 15,
+ EFA_ADMIN_ALLOC_UAR = 16,
+ EFA_ADMIN_DEALLOC_UAR = 17,
+ EFA_ADMIN_MAX_OPCODE = 17,
+};
+
+enum efa_admin_aq_feature_id {
+ EFA_ADMIN_DEVICE_ATTR = 1,
+ EFA_ADMIN_AENQ_CONFIG = 2,
+ EFA_ADMIN_NETWORK_ATTR = 3,
+ EFA_ADMIN_QUEUE_ATTR = 4,
+ EFA_ADMIN_HW_HINTS = 5,
+ EFA_ADMIN_FEATURES_OPCODE_NUM = 8,
+};
+
+/* QP transport type */
+enum efa_admin_qp_type {
+ /* Unreliable Datagram */
+ EFA_ADMIN_QP_TYPE_UD = 1,
+ /* Scalable Reliable Datagram */
+ EFA_ADMIN_QP_TYPE_SRD = 2,
+};
+
+/* QP state */
+enum efa_admin_qp_state {
+ EFA_ADMIN_QP_STATE_RESET = 0,
+ EFA_ADMIN_QP_STATE_INIT = 1,
+ EFA_ADMIN_QP_STATE_RTR = 2,
+ EFA_ADMIN_QP_STATE_RTS = 3,
+ EFA_ADMIN_QP_STATE_SQD = 4,
+ EFA_ADMIN_QP_STATE_SQE = 5,
+ EFA_ADMIN_QP_STATE_ERR = 6,
+};
+
+enum efa_admin_get_stats_type {
+ EFA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+};
+
+enum efa_admin_get_stats_scope {
+ EFA_ADMIN_GET_STATS_SCOPE_ALL = 0,
+ EFA_ADMIN_GET_STATS_SCOPE_QUEUE = 1,
+};
+
+enum efa_admin_modify_qp_mask_bits {
+ EFA_ADMIN_QP_STATE_BIT = 0,
+ EFA_ADMIN_CUR_QP_STATE_BIT = 1,
+ EFA_ADMIN_QKEY_BIT = 2,
+ EFA_ADMIN_SQ_PSN_BIT = 3,
+ EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT = 4,
+};
+
+/*
+ * QP allocation sizes, converted by fabric QueuePair (QP) create command
+ * from QP capabilities.
+ */
+struct efa_admin_qp_alloc_size {
+ /* Send descriptor ring size in bytes */
+ u32 send_queue_ring_size;
+
+ /* Max number of WQEs that can be outstanding on send queue. */
+ u32 send_queue_depth;
+
+ /*
+ * Recv descriptor ring size in bytes, sufficient for user-provided
+ * number of WQEs
+ */
+ u32 recv_queue_ring_size;
+
+ /* Max number of WQEs that can be outstanding on recv queue */
+ u32 recv_queue_depth;
+};
+
+struct efa_admin_create_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* Protection Domain associated with this QP */
+ u16 pd;
+
+ /* QP type */
+ u8 qp_type;
+
+ /*
+ * 0 : sq_virt - If set, SQ ring base address is
+ * virtual (IOVA returned by MR registration)
+ * 1 : rq_virt - If set, RQ ring base address is
+ * virtual (IOVA returned by MR registration)
+ * 7:2 : reserved - MBZ
+ */
+ u8 flags;
+
+ /*
+ * Send queue (SQ) ring base physical address. This field is not
+ * used if this is a Low Latency Queue(LLQ).
+ */
+ u64 sq_base_addr;
+
+ /* Receive queue (RQ) ring base address. */
+ u64 rq_base_addr;
+
+ /* Index of CQ to be associated with Send Queue completions */
+ u32 send_cq_idx;
+
+ /* Index of CQ to be associated with Recv Queue completions */
+ u32 recv_cq_idx;
+
+ /*
+ * Memory registration key for the SQ ring, used only when not in
+ * LLQ mode and base address is virtual
+ */
+ u32 sq_l_key;
+
+ /*
+ * Memory registration key for the RQ ring, used only when base
+ * address is virtual
+ */
+ u32 rq_l_key;
+
+ /* Requested QP allocation sizes */
+ struct efa_admin_qp_alloc_size qp_alloc_size;
+
+ /* UAR number */
+ u16 uar;
+
+ /* MBZ */
+ u16 reserved;
+
+ /* MBZ */
+ u32 reserved2;
+};
+
+struct efa_admin_create_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* Opaque handle to be used for consequent operations on the QP */
+ u32 qp_handle;
+
+ /* QP number in the given EFA virtual device */
+ u16 qp_num;
+
+ /* MBZ */
+ u16 reserved;
+
+ /* Index of sub-CQ for Send Queue completions */
+ u16 send_sub_cq_idx;
+
+ /* Index of sub-CQ for Receive Queue completions */
+ u16 recv_sub_cq_idx;
+
+ /* SQ doorbell address, as offset to PCIe DB BAR */
+ u32 sq_db_offset;
+
+ /* RQ doorbell address, as offset to PCIe DB BAR */
+ u32 rq_db_offset;
+
+ /*
+ * low latency send queue ring base address as an offset to PCIe
+ * MMIO LLQ_MEM BAR
+ */
+ u32 llq_descriptors_offset;
+};
+
+struct efa_admin_modify_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /*
+ * Mask indicating which fields should be updated see enum
+ * efa_admin_modify_qp_mask_bits
+ */
+ u32 modify_mask;
+
+ /* QP handle returned by create_qp command */
+ u32 qp_handle;
+
+ /* QP state */
+ u32 qp_state;
+
+ /* Override current QP state (before applying the transition) */
+ u32 cur_qp_state;
+
+ /* QKey */
+ u32 qkey;
+
+ /* SQ PSN */
+ u32 sq_psn;
+
+ /* Enable async notification when SQ is drained */
+ u8 sq_drained_async_notify;
+
+ /* MBZ */
+ u8 reserved1;
+
+ /* MBZ */
+ u16 reserved2;
+};
+
+struct efa_admin_modify_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+struct efa_admin_query_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* QP handle returned by create_qp command */
+ u32 qp_handle;
+};
+
+struct efa_admin_query_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* QP state */
+ u32 qp_state;
+
+ /* QKey */
+ u32 qkey;
+
+ /* SQ PSN */
+ u32 sq_psn;
+
+ /* Indicates that draining is in progress */
+ u8 sq_draining;
+
+ /* MBZ */
+ u8 reserved1;
+
+ /* MBZ */
+ u16 reserved2;
+};
+
+struct efa_admin_destroy_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* QP handle returned by create_qp command */
+ u32 qp_handle;
+};
+
+struct efa_admin_destroy_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+/*
+ * Create Address Handle command parameters. Must not be called more than
+ * once for the same destination
+ */
+struct efa_admin_create_ah_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* Destination address in network byte order */
+ u8 dest_addr[16];
+
+ /* PD number */
+ u16 pd;
+
+ u16 reserved;
+};
+
+struct efa_admin_create_ah_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* Target interface address handle (opaque) */
+ u16 ah;
+
+ u16 reserved;
+};
+
+struct efa_admin_destroy_ah_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* Target interface address handle (opaque) */
+ u16 ah;
+
+ /* PD number */
+ u16 pd;
+};
+
+struct efa_admin_destroy_ah_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+/*
+ * Registration of MemoryRegion, required for QP working with Virtual
+ * Addresses. In standard verbs semantics, region length is limited to 2GB
+ * space, but EFA offers larger MR support for large memory space, to ease
+ * on users working with very large datasets (i.e. full GPU memory mapping).
+ */
+struct efa_admin_reg_mr_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* Protection Domain */
+ u16 pd;
+
+ /* MBZ */
+ u16 reserved16_w1;
+
+ /* Physical Buffer List, each element is page-aligned. */
+ union {
+ /*
+ * Inline array of guest-physical page addresses of user
+ * memory pages (optimization for short region
+ * registrations)
+ */
+ u64 inline_pbl_array[4];
+
+ /* points to PBL (direct or indirect, chained if needed) */
+ struct efa_admin_ctrl_buff_info pbl;
+ } pbl;
+
+ /* Memory region length, in bytes. */
+ u64 mr_length;
+
+ /*
+ * flags and page size
+ * 4:0 : phys_page_size_shift - page size is (1 <<
+ * phys_page_size_shift). Page size is used for
+ * building the Virtual to Physical address mapping
+ * 6:5 : reserved - MBZ
+ * 7 : mem_addr_phy_mode_en - Enable bit for physical
+ * memory registration (no translation), can be used
+ * only by privileged clients. If set, PBL must
+ * contain a single entry.
+ */
+ u8 flags;
+
+ /*
+ * permissions
+ * 0 : local_write_enable - Write permissions: value
+ * of 1 needed for RQ buffers and for RDMA write
+ * 7:1 : reserved1 - remote access flags, etc
+ */
+ u8 permissions;
+
+ u16 reserved16_w5;
+
+ /* number of pages in PBL (redundant, could be calculated) */
+ u32 page_num;
+
+ /*
+ * IO Virtual Address associated with this MR. If
+ * mem_addr_phy_mode_en is set, contains the physical address of
+ * the region.
+ */
+ u64 iova;
+};
+
+struct efa_admin_reg_mr_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /*
+ * L_Key, to be used in conjunction with local buffer references in
+ * SQ and RQ WQE, or with virtual RQ/CQ rings
+ */
+ u32 l_key;
+
+ /*
+ * R_Key, to be used in RDMA messages to refer to remotely accessed
+ * memory region
+ */
+ u32 r_key;
+};
+
+struct efa_admin_dereg_mr_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* L_Key, memory region's l_key */
+ u32 l_key;
+};
+
+struct efa_admin_dereg_mr_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+struct efa_admin_create_cq_cmd {
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /*
+ * 4:0 : reserved5
+ * 5 : interrupt_mode_enabled - if set, cq operates
+ * in interrupt mode (i.e. CQ events and MSI-X are
+ * generated), otherwise - polling
+ * 6 : virt - If set, ring base address is virtual
+ * (IOVA returned by MR registration)
+ * 7 : reserved6
+ */
+ u8 cq_caps_1;
+
+ /*
+ * 4:0 : cq_entry_size_words - size of CQ entry in
+ * 32-bit words, valid values: 4, 8.
+ * 7:5 : reserved7
+ */
+ u8 cq_caps_2;
+
+ /* completion queue depth in # of entries. must be power of 2 */
+ u16 cq_depth;
+
+ /* msix vector assigned to this cq */
+ u32 msix_vector_idx;
+
+ /*
+ * CQ ring base address, virtual or physical depending on 'virt'
+ * flag
+ */
+ struct efa_common_mem_addr cq_ba;
+
+ /*
+ * Memory registration key for the ring, used only when base
+ * address is virtual
+ */
+ u32 l_key;
+
+ /*
+ * number of sub cqs - must be equal to sub_cqs_per_cq of queue
+ * attributes.
+ */
+ u16 num_sub_cqs;
+
+ /* UAR number */
+ u16 uar;
+};
+
+struct efa_admin_create_cq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ u16 cq_idx;
+
+ /* actual cq depth in number of entries */
+ u16 cq_actual_depth;
+};
+
+struct efa_admin_destroy_cq_cmd {
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ u16 cq_idx;
+
+ u16 reserved1;
+};
+
+struct efa_admin_destroy_cq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+/*
+ * EFA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct efa_admin_aq_get_stats_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ /* command specific inline data */
+ u32 inline_data_w1[3];
+
+ struct efa_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* stats type as defined in enum efa_admin_get_stats_type */
+ u8 type;
+
+ /* stats scope defined in enum efa_admin_get_stats_scope */
+ u8 scope;
+
+ u16 scope_modifier;
+};
+
+struct efa_admin_basic_stats {
+ u64 tx_bytes;
+
+ u64 tx_pkts;
+
+ u64 rx_bytes;
+
+ u64 rx_pkts;
+
+ u64 rx_drops;
+};
+
+struct efa_admin_acq_get_stats_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ struct efa_admin_basic_stats basic_stats;
+};
+
+struct efa_admin_get_set_feature_common_desc {
+ /*
+ * 1:0 : select - 0x1 - current value; 0x3 - default
+ * value
+ * 7:3 : reserved3
+ */
+ u8 flags;
+
+ /* as appears in efa_admin_aq_feature_id */
+ u8 feature_id;
+
+ /* MBZ */
+ u16 reserved16;
+};
+
+struct efa_admin_feature_device_attr_desc {
+ /* Bitmap of efa_admin_aq_feature_id */
+ u64 supported_features;
+
+ /* Bitmap of supported page sizes in MR registrations */
+ u64 page_size_cap;
+
+ u32 fw_version;
+
+ u32 admin_api_version;
+
+ u32 device_version;
+
+ /* Bar used for SQ and RQ doorbells */
+ u16 db_bar;
+
+ /* Indicates how many bits are used physical address access */
+ u8 phys_addr_width;
+
+ /* Indicates how many bits are used virtual address access */
+ u8 virt_addr_width;
+};
+
+struct efa_admin_feature_queue_attr_desc {
+ /* The maximum number of queue pairs supported */
+ u32 max_qp;
+
+ u32 max_sq_depth;
+
+ /* max send wr used in inline-buf */
+ u32 inline_buf_size;
+
+ u32 max_rq_depth;
+
+ /* The maximum number of completion queues supported per VF */
+ u32 max_cq;
+
+ u32 max_cq_depth;
+
+ /* Number of sub-CQs to be created for each CQ */
+ u16 sub_cqs_per_cq;
+
+ u16 reserved;
+
+ /*
+ * Maximum number of SGEs (buffs) allowed for a single send work
+ * queue element (WQE)
+ */
+ u16 max_wr_send_sges;
+
+ /* Maximum number of SGEs allowed for a single recv WQE */
+ u16 max_wr_recv_sges;
+
+ /* The maximum number of memory regions supported */
+ u32 max_mr;
+
+ /* The maximum number of pages can be registered */
+ u32 max_mr_pages;
+
+ /* The maximum number of protection domains supported */
+ u32 max_pd;
+
+ /* The maximum number of address handles supported */
+ u32 max_ah;
+
+ /* The maximum size of LLQ in bytes */
+ u32 max_llq_size;
+};
+
+struct efa_admin_feature_aenq_desc {
+ /* bitmask for AENQ groups the device can report */
+ u32 supported_groups;
+
+ /* bitmask for AENQ groups to report */
+ u32 enabled_groups;
+};
+
+struct efa_admin_feature_network_attr_desc {
+ /* Raw address data in network byte order */
+ u8 addr[16];
+
+ u32 mtu;
+};
+
+/*
+ * When hint value is 0, hints capabilities are not supported or driver
+ * should use its own predefined value
+ */
+struct efa_admin_hw_hints {
+ /* value in ms */
+ u16 mmio_read_timeout;
+
+ /* value in ms */
+ u16 driver_watchdog_timeout;
+
+ /* value in ms */
+ u16 admin_completion_timeout;
+
+ /* poll interval in ms */
+ u16 poll_interval;
+};
+
+struct efa_admin_get_feature_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ struct efa_admin_ctrl_buff_info control_buffer;
+
+ struct efa_admin_get_set_feature_common_desc feature_common;
+
+ u32 raw[11];
+};
+
+struct efa_admin_get_feature_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ union {
+ u32 raw[14];
+
+ struct efa_admin_feature_device_attr_desc device_attr;
+
+ struct efa_admin_feature_aenq_desc aenq;
+
+ struct efa_admin_feature_network_attr_desc network_attr;
+
+ struct efa_admin_feature_queue_attr_desc queue_attr;
+
+ struct efa_admin_hw_hints hw_hints;
+ } u;
+};
+
+struct efa_admin_set_feature_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ struct efa_admin_ctrl_buff_info control_buffer;
+
+ struct efa_admin_get_set_feature_common_desc feature_common;
+
+ union {
+ u32 raw[11];
+
+ /* AENQ configuration */
+ struct efa_admin_feature_aenq_desc aenq;
+ } u;
+};
+
+struct efa_admin_set_feature_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ union {
+ u32 raw[14];
+ } u;
+};
+
+struct efa_admin_alloc_pd_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+};
+
+struct efa_admin_alloc_pd_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* PD number */
+ u16 pd;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_dealloc_pd_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* PD number */
+ u16 pd;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_dealloc_pd_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+struct efa_admin_alloc_uar_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+};
+
+struct efa_admin_alloc_uar_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* UAR number */
+ u16 uar;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_dealloc_uar_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* UAR number */
+ u16 uar;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_dealloc_uar_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+/* asynchronous event notification groups */
+enum efa_admin_aenq_group {
+ EFA_ADMIN_FATAL_ERROR = 1,
+ EFA_ADMIN_WARNING = 2,
+ EFA_ADMIN_NOTIFICATION = 3,
+ EFA_ADMIN_KEEP_ALIVE = 4,
+ EFA_ADMIN_AENQ_GROUPS_NUM = 5,
+};
+
+enum efa_admin_aenq_notification_syndrom {
+ EFA_ADMIN_SUSPEND = 0,
+ EFA_ADMIN_RESUME = 1,
+ EFA_ADMIN_UPDATE_HINTS = 2,
+};
+
+struct efa_admin_mmio_req_read_less_resp {
+ u16 req_id;
+
+ u16 reg_off;
+
+ /* value is valid when poll is cleared */
+ u32 reg_val;
+};
+
+/* create_qp_cmd */
+#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
+#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_SHIFT 1
+#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
+
+/* reg_mr_cmd */
+#define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0)
+#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT 7
+#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
+#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
+
+/* create_cq_cmd */
+#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
+#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_SHIFT 6
+#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
+#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+
+/* get_set_feature_common_desc */
+#define EFA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+
+#endif /* _EFA_ADMIN_CMDS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h
new file mode 100644
index 000000000000..c8e0c8b905be
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_admin_defs.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_ADMIN_H_
+#define _EFA_ADMIN_H_
+
+enum efa_admin_aq_completion_status {
+ EFA_ADMIN_SUCCESS = 0,
+ EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+ EFA_ADMIN_BAD_OPCODE = 2,
+ EFA_ADMIN_UNSUPPORTED_OPCODE = 3,
+ EFA_ADMIN_MALFORMED_REQUEST = 4,
+ /* Additional status is provided in ACQ entry extended_status */
+ EFA_ADMIN_ILLEGAL_PARAMETER = 5,
+ EFA_ADMIN_UNKNOWN_ERROR = 6,
+ EFA_ADMIN_RESOURCE_BUSY = 7,
+};
+
+struct efa_admin_aq_common_desc {
+ /*
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ u16 command_id;
+
+ /* as appears in efa_admin_aq_opcode */
+ u8 opcode;
+
+ /*
+ * 0 : phase
+ * 1 : ctrl_data - control buffer address valid
+ * 2 : ctrl_data_indirect - control buffer address
+ * points to list of pages with addresses of control
+ * buffers
+ * 7:3 : reserved3
+ */
+ u8 flags;
+};
+
+/*
+ * used in efa_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
+ */
+struct efa_admin_ctrl_buff_info {
+ u32 length;
+
+ struct efa_common_mem_addr address;
+};
+
+struct efa_admin_aq_entry {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ u32 inline_data_w1[3];
+
+ struct efa_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ u32 inline_data_w4[12];
+};
+
+struct efa_admin_acq_common_desc {
+ /*
+ * command identifier to associate it with the aq descriptor
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ u16 command;
+
+ u8 status;
+
+ /*
+ * 0 : phase
+ * 7:1 : reserved1
+ */
+ u8 flags;
+
+ u16 extended_status;
+
+ /*
+ * indicates to the driver which AQ entry has been consumed by the
+ * device and could be reused
+ */
+ u16 sq_head_indx;
+};
+
+struct efa_admin_acq_entry {
+ struct efa_admin_acq_common_desc acq_common_descriptor;
+
+ u32 response_specific_data[14];
+};
+
+struct efa_admin_aenq_common_desc {
+ u16 group;
+
+ u16 syndrom;
+
+ /*
+ * 0 : phase
+ * 7:1 : reserved - MBZ
+ */
+ u8 flags;
+
+ u8 reserved1[3];
+
+ u32 timestamp_low;
+
+ u32 timestamp_high;
+};
+
+struct efa_admin_aenq_entry {
+ struct efa_admin_aenq_common_desc aenq_common_desc;
+
+ /* command specific inline data */
+ u32 inline_data_w4[12];
+};
+
+/* aq_common_desc */
+#define EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* acq_common_desc */
+#define EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_common_desc */
+#define EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+#endif /* _EFA_ADMIN_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
new file mode 100644
index 000000000000..a5c788741a04
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -0,0 +1,1160 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include "efa_com.h"
+#include "efa_regs_defs.h"
+
+#define ADMIN_CMD_TIMEOUT_US 30000000 /* usecs */
+
+#define EFA_REG_READ_TIMEOUT_US 50000 /* usecs */
+#define EFA_MMIO_READ_INVALID 0xffffffff
+
+#define EFA_POLL_INTERVAL_MS 100 /* msecs */
+
+#define EFA_ASYNC_QUEUE_DEPTH 16
+#define EFA_ADMIN_QUEUE_DEPTH 32
+
+#define MIN_EFA_VER\
+ ((EFA_ADMIN_API_VERSION_MAJOR << EFA_REGS_VERSION_MAJOR_VERSION_SHIFT) | \
+ (EFA_ADMIN_API_VERSION_MINOR & EFA_REGS_VERSION_MINOR_VERSION_MASK))
+
+#define EFA_CTRL_MAJOR 0
+#define EFA_CTRL_MINOR 0
+#define EFA_CTRL_SUB_MINOR 1
+
+#define MIN_EFA_CTRL_VER \
+ (((EFA_CTRL_MAJOR) << \
+ (EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
+ ((EFA_CTRL_MINOR) << \
+ (EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
+ (EFA_CTRL_SUB_MINOR))
+
+#define EFA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
+#define EFA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
+
+#define EFA_REGS_ADMIN_INTR_MASK 1
+
+enum efa_cmd_status {
+ EFA_CMD_SUBMITTED,
+ EFA_CMD_COMPLETED,
+ /* Abort - canceled by the driver */
+ EFA_CMD_ABORTED,
+};
+
+struct efa_comp_ctx {
+ struct completion wait_event;
+ struct efa_admin_acq_entry *user_cqe;
+ u32 comp_size;
+ enum efa_cmd_status status;
+ /* status from the device */
+ u8 comp_status;
+ u8 cmd_opcode;
+ u8 occupied;
+};
+
+static const char *efa_com_cmd_str(u8 cmd)
+{
+#define EFA_CMD_STR_CASE(_cmd) case EFA_ADMIN_##_cmd: return #_cmd
+
+ switch (cmd) {
+ EFA_CMD_STR_CASE(CREATE_QP);
+ EFA_CMD_STR_CASE(MODIFY_QP);
+ EFA_CMD_STR_CASE(QUERY_QP);
+ EFA_CMD_STR_CASE(DESTROY_QP);
+ EFA_CMD_STR_CASE(CREATE_AH);
+ EFA_CMD_STR_CASE(DESTROY_AH);
+ EFA_CMD_STR_CASE(REG_MR);
+ EFA_CMD_STR_CASE(DEREG_MR);
+ EFA_CMD_STR_CASE(CREATE_CQ);
+ EFA_CMD_STR_CASE(DESTROY_CQ);
+ EFA_CMD_STR_CASE(GET_FEATURE);
+ EFA_CMD_STR_CASE(SET_FEATURE);
+ EFA_CMD_STR_CASE(GET_STATS);
+ EFA_CMD_STR_CASE(ALLOC_PD);
+ EFA_CMD_STR_CASE(DEALLOC_PD);
+ EFA_CMD_STR_CASE(ALLOC_UAR);
+ EFA_CMD_STR_CASE(DEALLOC_UAR);
+ default: return "unknown command opcode";
+ }
+#undef EFA_CMD_STR_CASE
+}
+
+static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
+{
+ struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
+ struct efa_admin_mmio_req_read_less_resp *read_resp;
+ unsigned long exp_time;
+ u32 mmio_read_reg;
+ u32 err;
+
+ read_resp = mmio_read->read_resp;
+
+ spin_lock(&mmio_read->lock);
+ mmio_read->seq_num++;
+
+ /* trash DMA req_id to identify when hardware is done */
+ read_resp->req_id = mmio_read->seq_num + 0x9aL;
+ mmio_read_reg = (offset << EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
+ EFA_REGS_MMIO_REG_READ_REG_OFF_MASK;
+ mmio_read_reg |= mmio_read->seq_num &
+ EFA_REGS_MMIO_REG_READ_REQ_ID_MASK;
+
+ writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);
+
+ exp_time = jiffies + usecs_to_jiffies(mmio_read->mmio_read_timeout);
+ do {
+ if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
+ break;
+ udelay(1);
+ } while (time_is_after_jiffies(exp_time));
+
+ if (read_resp->req_id != mmio_read->seq_num) {
+ ibdev_err(edev->efa_dev,
+ "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
+ mmio_read->seq_num, offset, read_resp->req_id,
+ read_resp->reg_off);
+ err = EFA_MMIO_READ_INVALID;
+ goto out;
+ }
+
+ if (read_resp->reg_off != offset) {
+ ibdev_err(edev->efa_dev,
+ "Reading register failed: wrong offset provided\n");
+ err = EFA_MMIO_READ_INVALID;
+ goto out;
+ }
+
+ err = read_resp->reg_val;
+out:
+ spin_unlock(&mmio_read->lock);
+ return err;
+}
+
+static int efa_com_admin_init_sq(struct efa_com_dev *edev)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_com_admin_sq *sq = &aq->sq;
+ u16 size = aq->depth * sizeof(*sq->entries);
+ u32 addr_high;
+ u32 addr_low;
+ u32 aq_caps;
+
+ sq->entries =
+ dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
+ if (!sq->entries)
+ return -ENOMEM;
+
+ spin_lock_init(&sq->lock);
+
+ sq->cc = 0;
+ sq->pc = 0;
+ sq->phase = 1;
+
+ sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF);
+
+ addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(sq->dma_addr);
+ addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(sq->dma_addr);
+
+ writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
+ writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
+
+ aq_caps = aq->depth & EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
+ aq_caps |= (sizeof(struct efa_admin_aq_entry) <<
+ EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
+ EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
+
+ writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
+
+ return 0;
+}
+
+static int efa_com_admin_init_cq(struct efa_com_dev *edev)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_com_admin_cq *cq = &aq->cq;
+ u16 size = aq->depth * sizeof(*cq->entries);
+ u32 addr_high;
+ u32 addr_low;
+ u32 acq_caps;
+
+ cq->entries =
+ dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
+ if (!cq->entries)
+ return -ENOMEM;
+
+ spin_lock_init(&cq->lock);
+
+ cq->cc = 0;
+ cq->phase = 1;
+
+ addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(cq->dma_addr);
+ addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(cq->dma_addr);
+
+ writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
+ writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
+
+ acq_caps = aq->depth & EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
+ acq_caps |= (sizeof(struct efa_admin_acq_entry) <<
+ EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
+ EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
+ acq_caps |= (aq->msix_vector_idx <<
+ EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT) &
+ EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK;
+
+ writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
+
+ return 0;
+}
+
+static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
+ struct efa_aenq_handlers *aenq_handlers)
+{
+ struct efa_com_aenq *aenq = &edev->aenq;
+ u32 addr_low, addr_high, aenq_caps;
+ u16 size;
+
+ if (!aenq_handlers) {
+ ibdev_err(edev->efa_dev, "aenq handlers pointer is NULL\n");
+ return -EINVAL;
+ }
+
+ size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries);
+ aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr,
+ GFP_KERNEL);
+ if (!aenq->entries)
+ return -ENOMEM;
+
+ aenq->aenq_handlers = aenq_handlers;
+ aenq->depth = EFA_ASYNC_QUEUE_DEPTH;
+ aenq->cc = 0;
+ aenq->phase = 1;
+
+ addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
+ addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
+
+ writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
+ writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
+
+ aenq_caps = aenq->depth & EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= (sizeof(struct efa_admin_aenq_entry) <<
+ EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ aenq_caps |= (aenq->msix_vector_idx
+ << EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT) &
+ EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK;
+ writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
+
+ /*
+ * Init cons_db to mark that all entries in the queue
+ * are initially available
+ */
+ writel(edev->aenq.cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
+
+ return 0;
+}
+
+/* ID to be used with efa_com_get_comp_ctx */
+static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq)
+{
+ u16 ctx_id;
+
+ spin_lock(&aq->comp_ctx_lock);
+ ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
+ aq->comp_ctx_pool_next++;
+ spin_unlock(&aq->comp_ctx_lock);
+
+ return ctx_id;
+}
+
+static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
+ u16 ctx_id)
+{
+ spin_lock(&aq->comp_ctx_lock);
+ aq->comp_ctx_pool_next--;
+ aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
+ spin_unlock(&aq->comp_ctx_lock);
+}
+
+static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
+ struct efa_comp_ctx *comp_ctx)
+{
+ u16 comp_id = comp_ctx->user_cqe->acq_common_descriptor.command &
+ EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ ibdev_dbg(aq->efa_dev, "Putting completion command_id %d\n", comp_id);
+ comp_ctx->occupied = 0;
+ efa_com_dealloc_ctx_id(aq, comp_id);
+}
+
+static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
+ u16 command_id, bool capture)
+{
+ if (command_id >= aq->depth) {
+ ibdev_err(aq->efa_dev,
+ "command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, aq->depth);
+ return NULL;
+ }
+
+ if (aq->comp_ctx[command_id].occupied && capture) {
+ ibdev_err(aq->efa_dev, "Completion context is occupied\n");
+ return NULL;
+ }
+
+ if (capture) {
+ aq->comp_ctx[command_id].occupied = 1;
+ ibdev_dbg(aq->efa_dev, "Taking completion ctxt command_id %d\n",
+ command_id);
+ }
+
+ return &aq->comp_ctx[command_id];
+}
+
+static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
+ struct efa_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct efa_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ struct efa_comp_ctx *comp_ctx;
+ u16 queue_size_mask;
+ u16 ctx_id;
+ u16 pi;
+
+ queue_size_mask = aq->depth - 1;
+ pi = aq->sq.pc & queue_size_mask;
+
+ ctx_id = efa_com_alloc_ctx_id(aq);
+
+ cmd->aq_common_descriptor.flags |= aq->sq.phase &
+ EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+
+ cmd->aq_common_descriptor.command_id |= ctx_id &
+ EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = efa_com_get_comp_ctx(aq, ctx_id, true);
+ if (!comp_ctx) {
+ efa_com_dealloc_ctx_id(aq, ctx_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ comp_ctx->status = EFA_CMD_SUBMITTED;
+ comp_ctx->comp_size = comp_size_in_bytes;
+ comp_ctx->user_cqe = comp;
+ comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+
+ reinit_completion(&comp_ctx->wait_event);
+
+ memcpy(&aq->sq.entries[pi], cmd, cmd_size_in_bytes);
+
+ aq->sq.pc++;
+ atomic64_inc(&aq->stats.submitted_cmd);
+
+ if ((aq->sq.pc & queue_size_mask) == 0)
+ aq->sq.phase = !aq->sq.phase;
+
+ /* barrier not needed in case of writel */
+ writel(aq->sq.pc, aq->sq.db_addr);
+
+ return comp_ctx;
+}
+
+static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq)
+{
+ size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool);
+ size_t size = aq->depth * sizeof(struct efa_comp_ctx);
+ struct efa_comp_ctx *comp_ctx;
+ u16 i;
+
+ aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL);
+ aq->comp_ctx_pool = devm_kzalloc(aq->dmadev, pool_size, GFP_KERNEL);
+ if (!aq->comp_ctx || !aq->comp_ctx_pool) {
+ devm_kfree(aq->dmadev, aq->comp_ctx_pool);
+ devm_kfree(aq->dmadev, aq->comp_ctx);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < aq->depth; i++) {
+ comp_ctx = efa_com_get_comp_ctx(aq, i, false);
+ if (comp_ctx)
+ init_completion(&comp_ctx->wait_event);
+
+ aq->comp_ctx_pool[i] = i;
+ }
+
+ spin_lock_init(&aq->comp_ctx_lock);
+
+ aq->comp_ctx_pool_next = 0;
+
+ return 0;
+}
+
+static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
+ struct efa_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct efa_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ struct efa_comp_ctx *comp_ctx;
+
+ spin_lock(&aq->sq.lock);
+ if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
+ ibdev_err(aq->efa_dev, "Admin queue is closed\n");
+ spin_unlock(&aq->sq.lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp,
+ comp_size_in_bytes);
+ spin_unlock(&aq->sq.lock);
+ if (IS_ERR(comp_ctx))
+ clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+
+ return comp_ctx;
+}
+
+static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
+ struct efa_admin_acq_entry *cqe)
+{
+ struct efa_comp_ctx *comp_ctx;
+ u16 cmd_id;
+
+ cmd_id = cqe->acq_common_descriptor.command &
+ EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
+ if (!comp_ctx) {
+ ibdev_err(aq->efa_dev,
+ "comp_ctx is NULL. Changing the admin queue running state\n");
+ clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+ return;
+ }
+
+ comp_ctx->status = EFA_CMD_COMPLETED;
+ comp_ctx->comp_status = cqe->acq_common_descriptor.status;
+ if (comp_ctx->user_cqe)
+ memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
+
+ if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
+ complete(&comp_ctx->wait_event);
+}
+
+static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+{
+ struct efa_admin_acq_entry *cqe;
+ u16 queue_size_mask;
+ u16 comp_num = 0;
+ u8 phase;
+ u16 ci;
+
+ queue_size_mask = aq->depth - 1;
+
+ ci = aq->cq.cc & queue_size_mask;
+ phase = aq->cq.phase;
+
+ cqe = &aq->cq.entries[ci];
+
+ /* Go over all the completions */
+ while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
+ EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /*
+ * Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ dma_rmb();
+ efa_com_handle_single_admin_completion(aq, cqe);
+
+ ci++;
+ comp_num++;
+ if (ci == aq->depth) {
+ ci = 0;
+ phase = !phase;
+ }
+
+ cqe = &aq->cq.entries[ci];
+ }
+
+ aq->cq.cc += comp_num;
+ aq->cq.phase = phase;
+ aq->sq.cc += comp_num;
+ atomic64_add(comp_num, &aq->stats.completed_cmd);
+}
+
+static int efa_com_comp_status_to_errno(u8 comp_status)
+{
+ switch (comp_status) {
+ case EFA_ADMIN_SUCCESS:
+ return 0;
+ case EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
+ return -ENOMEM;
+ case EFA_ADMIN_UNSUPPORTED_OPCODE:
+ return -EOPNOTSUPP;
+ case EFA_ADMIN_BAD_OPCODE:
+ case EFA_ADMIN_MALFORMED_REQUEST:
+ case EFA_ADMIN_ILLEGAL_PARAMETER:
+ case EFA_ADMIN_UNKNOWN_ERROR:
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_ctx,
+ struct efa_com_admin_queue *aq)
+{
+ unsigned long timeout;
+ unsigned long flags;
+ int err;
+
+ timeout = jiffies + usecs_to_jiffies(aq->completion_timeout);
+
+ while (1) {
+ spin_lock_irqsave(&aq->cq.lock, flags);
+ efa_com_handle_admin_completion(aq);
+ spin_unlock_irqrestore(&aq->cq.lock, flags);
+
+ if (comp_ctx->status != EFA_CMD_SUBMITTED)
+ break;
+
+ if (time_is_before_jiffies(timeout)) {
+ ibdev_err(aq->efa_dev,
+ "Wait for completion (polling) timeout\n");
+ /* EFA didn't have any completion */
+ atomic64_inc(&aq->stats.no_completion);
+
+ clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+ err = -ETIME;
+ goto out;
+ }
+
+ msleep(aq->poll_interval);
+ }
+
+ if (comp_ctx->status == EFA_CMD_ABORTED) {
+ ibdev_err(aq->efa_dev, "Command was aborted\n");
+ atomic64_inc(&aq->stats.aborted_cmd);
+ err = -ENODEV;
+ goto out;
+ }
+
+ WARN_ONCE(comp_ctx->status != EFA_CMD_COMPLETED,
+ "Invalid completion status %d\n", comp_ctx->status);
+
+ err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
+out:
+ efa_com_put_comp_ctx(aq, comp_ctx);
+ return err;
+}
+
+static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx,
+ struct efa_com_admin_queue *aq)
+{
+ unsigned long flags;
+ int err;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+ usecs_to_jiffies(aq->completion_timeout));
+
+ /*
+ * In case the command wasn't completed find out the root cause.
+ * There might be 2 kinds of errors
+ * 1) No completion (timeout reached)
+ * 2) There is completion but the device didn't get any msi-x interrupt.
+ */
+ if (comp_ctx->status == EFA_CMD_SUBMITTED) {
+ spin_lock_irqsave(&aq->cq.lock, flags);
+ efa_com_handle_admin_completion(aq);
+ spin_unlock_irqrestore(&aq->cq.lock, flags);
+
+ atomic64_inc(&aq->stats.no_completion);
+
+ if (comp_ctx->status == EFA_CMD_COMPLETED)
+ ibdev_err(aq->efa_dev,
+ "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ efa_com_cmd_str(comp_ctx->cmd_opcode),
+ comp_ctx->cmd_opcode, comp_ctx->status,
+ comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ else
+ ibdev_err(aq->efa_dev,
+ "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ efa_com_cmd_str(comp_ctx->cmd_opcode),
+ comp_ctx->cmd_opcode, comp_ctx->status,
+ comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+
+ clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+ err = -ETIME;
+ goto out;
+ }
+
+ err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
+out:
+ efa_com_put_comp_ctx(aq, comp_ctx);
+ return err;
+}
+
+/*
+ * There are two types to wait for completion.
+ * Polling mode - wait until the completion is available.
+ * Async mode - wait on wait queue until the completion is ready
+ * (or the timeout expired).
+ * It is expected that the IRQ called efa_com_handle_admin_completion
+ * to mark the completions.
+ */
+static int efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx,
+ struct efa_com_admin_queue *aq)
+{
+ if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
+ return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
+
+ return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
+}
+
+/**
+ * efa_com_cmd_exec - Execute admin command
+ * @aq: admin queue.
+ * @cmd: the admin command to execute.
+ * @cmd_size: the command size.
+ * @comp: command completion return entry.
+ * @comp_size: command completion size.
+ * Submit an admin command and then wait until the device will return a
+ * completion.
+ * The completion will be copied into comp.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
+ struct efa_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct efa_admin_acq_entry *comp,
+ size_t comp_size)
+{
+ struct efa_comp_ctx *comp_ctx;
+ int err;
+
+ might_sleep();
+
+ /* In case of queue FULL */
+ down(&aq->avail_cmds);
+
+ ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n",
+ efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+ cmd->aq_common_descriptor.opcode);
+ comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
+ if (IS_ERR(comp_ctx)) {
+ ibdev_err(aq->efa_dev,
+ "Failed to submit command %s (opcode %u) err %ld\n",
+ efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+ cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
+
+ up(&aq->avail_cmds);
+ return PTR_ERR(comp_ctx);
+ }
+
+ err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
+ if (err)
+ ibdev_err(aq->efa_dev,
+ "Failed to process command %s (opcode %u) comp_status %d err %d\n",
+ efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+ cmd->aq_common_descriptor.opcode,
+ comp_ctx->comp_status, err);
+
+ up(&aq->avail_cmds);
+
+ return err;
+}
+
+/**
+ * efa_com_abort_admin_commands - Abort all the outstanding admin commands.
+ * @edev: EFA communication layer struct
+ *
+ * This method aborts all the outstanding admin commands.
+ * The caller should then call efa_com_wait_for_abort_completion to make sure
+ * all the commands were completed.
+ */
+static void efa_com_abort_admin_commands(struct efa_com_dev *edev)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_comp_ctx *comp_ctx;
+ unsigned long flags;
+ u16 i;
+
+ spin_lock(&aq->sq.lock);
+ spin_lock_irqsave(&aq->cq.lock, flags);
+ for (i = 0; i < aq->depth; i++) {
+ comp_ctx = efa_com_get_comp_ctx(aq, i, false);
+ if (!comp_ctx)
+ break;
+
+ comp_ctx->status = EFA_CMD_ABORTED;
+
+ complete(&comp_ctx->wait_event);
+ }
+ spin_unlock_irqrestore(&aq->cq.lock, flags);
+ spin_unlock(&aq->sq.lock);
+}
+
+/**
+ * efa_com_wait_for_abort_completion - Wait for admin commands abort.
+ * @edev: EFA communication layer struct
+ *
+ * This method wait until all the outstanding admin commands will be completed.
+ */
+static void efa_com_wait_for_abort_completion(struct efa_com_dev *edev)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int i;
+
+ /* all mine */
+ for (i = 0; i < aq->depth; i++)
+ down(&aq->avail_cmds);
+
+ /* let it go */
+ for (i = 0; i < aq->depth; i++)
+ up(&aq->avail_cmds);
+}
+
+static void efa_com_admin_flush(struct efa_com_dev *edev)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+
+ clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+
+ efa_com_abort_admin_commands(edev);
+ efa_com_wait_for_abort_completion(edev);
+}
+
+/**
+ * efa_com_admin_destroy - Destroy the admin and the async events queues.
+ * @edev: EFA communication layer struct
+ */
+void efa_com_admin_destroy(struct efa_com_dev *edev)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_com_aenq *aenq = &edev->aenq;
+ struct efa_com_admin_cq *cq = &aq->cq;
+ struct efa_com_admin_sq *sq = &aq->sq;
+ u16 size;
+
+ efa_com_admin_flush(edev);
+
+ devm_kfree(edev->dmadev, aq->comp_ctx_pool);
+ devm_kfree(edev->dmadev, aq->comp_ctx);
+
+ size = aq->depth * sizeof(*sq->entries);
+ dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr);
+
+ size = aq->depth * sizeof(*cq->entries);
+ dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr);
+
+ size = aenq->depth * sizeof(*aenq->entries);
+ dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr);
+}
+
+/**
+ * efa_com_set_admin_polling_mode - Set the admin completion queue polling mode
+ * @edev: EFA communication layer struct
+ * @polling: Enable/Disable polling mode
+ *
+ * Set the admin completion mode.
+ */
+void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling)
+{
+ u32 mask_value = 0;
+
+ if (polling)
+ mask_value = EFA_REGS_ADMIN_INTR_MASK;
+
+ writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF);
+ if (polling)
+ set_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
+ else
+ clear_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
+}
+
+static void efa_com_stats_init(struct efa_com_dev *edev)
+{
+ atomic64_t *s = (atomic64_t *)&edev->aq.stats;
+ int i;
+
+ for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
+ atomic64_set(s, 0);
+}
+
+/**
+ * efa_com_admin_init - Init the admin and the async queues
+ * @edev: EFA communication layer struct
+ * @aenq_handlers: Those handlers to be called upon event.
+ *
+ * Initialize the admin submission and completion queues.
+ * Initialize the asynchronous events notification queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int efa_com_admin_init(struct efa_com_dev *edev,
+ struct efa_aenq_handlers *aenq_handlers)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ u32 timeout;
+ u32 dev_sts;
+ u32 cap;
+ int err;
+
+ dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
+ if (!(dev_sts & EFA_REGS_DEV_STS_READY_MASK)) {
+ ibdev_err(edev->efa_dev,
+ "Device isn't ready, abort com init %#x\n", dev_sts);
+ return -ENODEV;
+ }
+
+ aq->depth = EFA_ADMIN_QUEUE_DEPTH;
+
+ aq->dmadev = edev->dmadev;
+ aq->efa_dev = edev->efa_dev;
+ set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state);
+
+ sema_init(&aq->avail_cmds, aq->depth);
+
+ efa_com_stats_init(edev);
+
+ err = efa_com_init_comp_ctxt(aq);
+ if (err)
+ return err;
+
+ err = efa_com_admin_init_sq(edev);
+ if (err)
+ goto err_destroy_comp_ctxt;
+
+ err = efa_com_admin_init_cq(edev);
+ if (err)
+ goto err_destroy_sq;
+
+ efa_com_set_admin_polling_mode(edev, false);
+
+ err = efa_com_admin_init_aenq(edev, aenq_handlers);
+ if (err)
+ goto err_destroy_cq;
+
+ cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
+ timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+ EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ aq->completion_timeout = timeout * 100000;
+ else
+ aq->completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
+ aq->poll_interval = EFA_POLL_INTERVAL_MS;
+
+ set_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+
+ return 0;
+
+err_destroy_cq:
+ dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries),
+ aq->cq.entries, aq->cq.dma_addr);
+err_destroy_sq:
+ dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries),
+ aq->sq.entries, aq->sq.dma_addr);
+err_destroy_comp_ctxt:
+ devm_kfree(edev->dmadev, aq->comp_ctx);
+
+ return err;
+}
+
+/**
+ * efa_com_admin_q_comp_intr_handler - admin queue interrupt handler
+ * @edev: EFA communication layer struct
+ *
+ * This method goes over the admin completion queue and wakes up
+ * all the pending threads that wait on the commands wait event.
+ *
+ * @note: Should be called after MSI-X interrupt.
+ */
+void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&edev->aq.cq.lock, flags);
+ efa_com_handle_admin_completion(&edev->aq);
+ spin_unlock_irqrestore(&edev->aq.cq.lock, flags);
+}
+
+/*
+ * efa_handle_specific_aenq_event:
+ * return the handler that is relevant to the specific event group
+ */
+static efa_aenq_handler efa_com_get_specific_aenq_cb(struct efa_com_dev *edev,
+ u16 group)
+{
+ struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers;
+
+ if (group < EFA_MAX_HANDLERS && aenq_handlers->handlers[group])
+ return aenq_handlers->handlers[group];
+
+ return aenq_handlers->unimplemented_handler;
+}
+
+/**
+ * efa_com_aenq_intr_handler - AENQ interrupt handler
+ * @edev: EFA communication layer struct
+ * @data: Data of interrupt handler.
+ *
+ * Go over the async event notification queue and call the proper aenq handler.
+ */
+void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data)
+{
+ struct efa_admin_aenq_common_desc *aenq_common;
+ struct efa_com_aenq *aenq = &edev->aenq;
+ struct efa_admin_aenq_entry *aenq_e;
+ efa_aenq_handler handler_cb;
+ u32 processed = 0;
+ u8 phase;
+ u32 ci;
+
+ ci = aenq->cc & (aenq->depth - 1);
+ phase = aenq->phase;
+ aenq_e = &aenq->entries[ci]; /* Get first entry */
+ aenq_common = &aenq_e->aenq_common_desc;
+
+ /* Go over all the events */
+ while ((READ_ONCE(aenq_common->flags) &
+ EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /*
+ * Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ dma_rmb();
+
+ /* Handle specific event*/
+ handler_cb = efa_com_get_specific_aenq_cb(edev,
+ aenq_common->group);
+ handler_cb(data, aenq_e); /* call the actual event handler*/
+
+ /* Get next event entry */
+ ci++;
+ processed++;
+
+ if (ci == aenq->depth) {
+ ci = 0;
+ phase = !phase;
+ }
+ aenq_e = &aenq->entries[ci];
+ aenq_common = &aenq_e->aenq_common_desc;
+ }
+
+ aenq->cc += processed;
+ aenq->phase = phase;
+
+ /* Don't update aenq doorbell if there weren't any processed events */
+ if (!processed)
+ return;
+
+ /* barrier not needed in case of writel */
+ writel(aenq->cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
+}
+
+static void efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev *edev)
+{
+ struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
+ u32 addr_high;
+ u32 addr_low;
+
+ /* dma_addr_bits is unknown at this point */
+ addr_high = (mmio_read->read_resp_dma_addr >> 32) & GENMASK(31, 0);
+ addr_low = mmio_read->read_resp_dma_addr & GENMASK(31, 0);
+
+ writel(addr_high, edev->reg_bar + EFA_REGS_MMIO_RESP_HI_OFF);
+ writel(addr_low, edev->reg_bar + EFA_REGS_MMIO_RESP_LO_OFF);
+}
+
+int efa_com_mmio_reg_read_init(struct efa_com_dev *edev)
+{
+ struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
+
+ spin_lock_init(&mmio_read->lock);
+ mmio_read->read_resp =
+ dma_alloc_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ if (!mmio_read->read_resp)
+ return -ENOMEM;
+
+ efa_com_mmio_reg_read_resp_addr_init(edev);
+
+ mmio_read->read_resp->req_id = 0;
+ mmio_read->seq_num = 0;
+ mmio_read->mmio_read_timeout = EFA_REG_READ_TIMEOUT_US;
+
+ return 0;
+}
+
+void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)
+{
+ struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
+
+ dma_free_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+}
+
+int efa_com_validate_version(struct efa_com_dev *edev)
+{
+ u32 ctrl_ver_masked;
+ u32 ctrl_ver;
+ u32 ver;
+
+ /*
+ * Make sure the EFA version and the controller version are at least
+ * as the driver expects
+ */
+ ver = efa_com_reg_read32(edev, EFA_REGS_VERSION_OFF);
+ ctrl_ver = efa_com_reg_read32(edev,
+ EFA_REGS_CONTROLLER_VERSION_OFF);
+
+ ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
+ (ver & EFA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+ EFA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & EFA_REGS_VERSION_MINOR_VERSION_MASK);
+
+ if (ver < MIN_EFA_VER) {
+ ibdev_err(edev->efa_dev,
+ "EFA version is lower than the minimal version the driver supports\n");
+ return -EOPNOTSUPP;
+ }
+
+ ibdev_dbg(edev->efa_dev,
+ "efa controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
+ EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
+ EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
+ (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
+ (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
+ EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+
+ ctrl_ver_masked =
+ (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
+ (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
+ (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
+
+ /* Validate the ctrl version without the implementation ID */
+ if (ctrl_ver_masked < MIN_EFA_CTRL_VER) {
+ ibdev_err(edev->efa_dev,
+ "EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * efa_com_get_dma_width - Retrieve physical dma address width the device
+ * supports.
+ * @edev: EFA communication layer struct
+ *
+ * Retrieve the maximum physical address bits the device can handle.
+ *
+ * @return: > 0 on Success and negative value otherwise.
+ */
+int efa_com_get_dma_width(struct efa_com_dev *edev)
+{
+ u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
+ int width;
+
+ width = (caps & EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
+ EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
+
+ ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);
+
+ if (width < 32 || width > 64) {
+ ibdev_err(edev->efa_dev, "DMA width illegal value: %d\n", width);
+ return -EINVAL;
+ }
+
+ edev->dma_addr_bits = width;
+
+ return width;
+}
+
+static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout,
+ u16 exp_state)
+{
+ u32 val, i;
+
+ for (i = 0; i < timeout; i++) {
+ val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
+
+ if ((val & EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
+ exp_state)
+ return 0;
+
+ ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
+ msleep(EFA_POLL_INTERVAL_MS);
+ }
+
+ return -ETIME;
+}
+
+/**
+ * efa_com_dev_reset - Perform device FLR to the device.
+ * @edev: EFA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int efa_com_dev_reset(struct efa_com_dev *edev,
+ enum efa_regs_reset_reason_types reset_reason)
+{
+ u32 stat, timeout, cap, reset_val;
+ int err;
+
+ stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
+ cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
+
+ if (!(stat & EFA_REGS_DEV_STS_READY_MASK)) {
+ ibdev_err(edev->efa_dev,
+ "Device isn't ready, can't reset device\n");
+ return -EINVAL;
+ }
+
+ timeout = (cap & EFA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
+ EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
+ if (!timeout) {
+ ibdev_err(edev->efa_dev, "Invalid timeout value\n");
+ return -EINVAL;
+ }
+
+ /* start reset */
+ reset_val = EFA_REGS_DEV_CTL_DEV_RESET_MASK;
+ reset_val |= (reset_reason << EFA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+ EFA_REGS_DEV_CTL_RESET_REASON_MASK;
+ writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
+
+ /* reset clears the mmio readless address, restore it */
+ efa_com_mmio_reg_read_resp_addr_init(edev);
+
+ err = wait_for_reset_state(edev, timeout,
+ EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ if (err) {
+ ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n");
+ return err;
+ }
+
+ /* reset done */
+ writel(0, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
+ err = wait_for_reset_state(edev, timeout, 0);
+ if (err) {
+ ibdev_err(edev->efa_dev, "Reset indication didn't turn off\n");
+ return err;
+ }
+
+ timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+ EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ edev->aq.completion_timeout = timeout * 100000;
+ else
+ edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
new file mode 100644
index 000000000000..84d96724a74b
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_COM_H_
+#define _EFA_COM_H_
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+
+#include <rdma/ib_verbs.h>
+
+#include "efa_common_defs.h"
+#include "efa_admin_defs.h"
+#include "efa_admin_cmds_defs.h"
+#include "efa_regs_defs.h"
+
+#define EFA_MAX_HANDLERS 256
+
+struct efa_com_admin_cq {
+ struct efa_admin_acq_entry *entries;
+ dma_addr_t dma_addr;
+ spinlock_t lock; /* Protects ACQ */
+
+ u16 cc; /* consumer counter */
+ u8 phase;
+};
+
+struct efa_com_admin_sq {
+ struct efa_admin_aq_entry *entries;
+ dma_addr_t dma_addr;
+ spinlock_t lock; /* Protects ASQ */
+
+ u32 __iomem *db_addr;
+
+ u16 cc; /* consumer counter */
+ u16 pc; /* producer counter */
+ u8 phase;
+
+};
+
+/* Don't use anything other than atomic64 */
+struct efa_com_stats_admin {
+ atomic64_t aborted_cmd;
+ atomic64_t submitted_cmd;
+ atomic64_t completed_cmd;
+ atomic64_t no_completion;
+};
+
+enum {
+ EFA_AQ_STATE_RUNNING_BIT = 0,
+ EFA_AQ_STATE_POLLING_BIT = 1,
+};
+
+struct efa_com_admin_queue {
+ void *dmadev;
+ void *efa_dev;
+ struct efa_comp_ctx *comp_ctx;
+ u32 completion_timeout; /* usecs */
+ u16 poll_interval; /* msecs */
+ u16 depth;
+ struct efa_com_admin_cq cq;
+ struct efa_com_admin_sq sq;
+ u16 msix_vector_idx;
+
+ unsigned long state;
+
+ /* Count the number of available admin commands */
+ struct semaphore avail_cmds;
+
+ struct efa_com_stats_admin stats;
+
+ spinlock_t comp_ctx_lock; /* Protects completion context pool */
+ u32 *comp_ctx_pool;
+ u16 comp_ctx_pool_next;
+};
+
+struct efa_aenq_handlers;
+
+struct efa_com_aenq {
+ struct efa_admin_aenq_entry *entries;
+ struct efa_aenq_handlers *aenq_handlers;
+ dma_addr_t dma_addr;
+ u32 cc; /* consumer counter */
+ u16 msix_vector_idx;
+ u16 depth;
+ u8 phase;
+};
+
+struct efa_com_mmio_read {
+ struct efa_admin_mmio_req_read_less_resp *read_resp;
+ dma_addr_t read_resp_dma_addr;
+ u16 seq_num;
+ u16 mmio_read_timeout; /* usecs */
+ /* serializes mmio reads */
+ spinlock_t lock;
+};
+
+struct efa_com_dev {
+ struct efa_com_admin_queue aq;
+ struct efa_com_aenq aenq;
+ u8 __iomem *reg_bar;
+ void *dmadev;
+ void *efa_dev;
+ u32 supported_features;
+ u32 dma_addr_bits;
+
+ struct efa_com_mmio_read mmio_read;
+};
+
+typedef void (*efa_aenq_handler)(void *data,
+ struct efa_admin_aenq_entry *aenq_e);
+
+/* Holds aenq handlers. Indexed by AENQ event group */
+struct efa_aenq_handlers {
+ efa_aenq_handler handlers[EFA_MAX_HANDLERS];
+ efa_aenq_handler unimplemented_handler;
+};
+
+int efa_com_admin_init(struct efa_com_dev *edev,
+ struct efa_aenq_handlers *aenq_handlers);
+void efa_com_admin_destroy(struct efa_com_dev *edev);
+int efa_com_dev_reset(struct efa_com_dev *edev,
+ enum efa_regs_reset_reason_types reset_reason);
+void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling);
+void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev);
+int efa_com_mmio_reg_read_init(struct efa_com_dev *edev);
+void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev);
+
+int efa_com_validate_version(struct efa_com_dev *edev);
+int efa_com_get_dma_width(struct efa_com_dev *edev);
+
+int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
+ struct efa_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct efa_admin_acq_entry *comp,
+ size_t comp_size);
+void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data);
+
+#endif /* _EFA_COM_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
new file mode 100644
index 000000000000..14227725521c
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include "efa.h"
+#include "efa_com.h"
+#include "efa_com_cmd.h"
+
+void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
+{
+ *addr_low = lower_32_bits(addr);
+ *addr_high = upper_32_bits(addr);
+}
+
+int efa_com_create_qp(struct efa_com_dev *edev,
+ struct efa_com_create_qp_params *params,
+ struct efa_com_create_qp_result *res)
+{
+ struct efa_admin_create_qp_cmd create_qp_cmd = {};
+ struct efa_admin_create_qp_resp cmd_completion;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ create_qp_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_QP;
+
+ create_qp_cmd.pd = params->pd;
+ create_qp_cmd.qp_type = params->qp_type;
+ create_qp_cmd.rq_base_addr = params->rq_base_addr;
+ create_qp_cmd.send_cq_idx = params->send_cq_idx;
+ create_qp_cmd.recv_cq_idx = params->recv_cq_idx;
+ create_qp_cmd.qp_alloc_size.send_queue_ring_size =
+ params->sq_ring_size_in_bytes;
+ create_qp_cmd.qp_alloc_size.send_queue_depth =
+ params->sq_depth;
+ create_qp_cmd.qp_alloc_size.recv_queue_ring_size =
+ params->rq_ring_size_in_bytes;
+ create_qp_cmd.qp_alloc_size.recv_queue_depth =
+ params->rq_depth;
+ create_qp_cmd.uar = params->uarn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&create_qp_cmd,
+ sizeof(create_qp_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to create qp [%d]\n", err);
+ return err;
+ }
+
+ res->qp_handle = cmd_completion.qp_handle;
+ res->qp_num = cmd_completion.qp_num;
+ res->sq_db_offset = cmd_completion.sq_db_offset;
+ res->rq_db_offset = cmd_completion.rq_db_offset;
+ res->llq_descriptors_offset = cmd_completion.llq_descriptors_offset;
+ res->send_sub_cq_idx = cmd_completion.send_sub_cq_idx;
+ res->recv_sub_cq_idx = cmd_completion.recv_sub_cq_idx;
+
+ return err;
+}
+
+int efa_com_modify_qp(struct efa_com_dev *edev,
+ struct efa_com_modify_qp_params *params)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_modify_qp_cmd cmd = {};
+ struct efa_admin_modify_qp_resp resp;
+ int err;
+
+ cmd.aq_common_desc.opcode = EFA_ADMIN_MODIFY_QP;
+ cmd.modify_mask = params->modify_mask;
+ cmd.qp_handle = params->qp_handle;
+ cmd.qp_state = params->qp_state;
+ cmd.cur_qp_state = params->cur_qp_state;
+ cmd.qkey = params->qkey;
+ cmd.sq_psn = params->sq_psn;
+ cmd.sq_drained_async_notify = params->sq_drained_async_notify;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err(edev->efa_dev,
+ "Failed to modify qp-%u modify_mask[%#x] [%d]\n",
+ cmd.qp_handle, cmd.modify_mask, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_query_qp(struct efa_com_dev *edev,
+ struct efa_com_query_qp_params *params,
+ struct efa_com_query_qp_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_query_qp_cmd cmd = {};
+ struct efa_admin_query_qp_resp resp;
+ int err;
+
+ cmd.aq_common_desc.opcode = EFA_ADMIN_QUERY_QP;
+ cmd.qp_handle = params->qp_handle;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to query qp-%u [%d]\n",
+ cmd.qp_handle, err);
+ return err;
+ }
+
+ result->qp_state = resp.qp_state;
+ result->qkey = resp.qkey;
+ result->sq_draining = resp.sq_draining;
+ result->sq_psn = resp.sq_psn;
+
+ return 0;
+}
+
+int efa_com_destroy_qp(struct efa_com_dev *edev,
+ struct efa_com_destroy_qp_params *params)
+{
+ struct efa_admin_destroy_qp_resp cmd_completion;
+ struct efa_admin_destroy_qp_cmd qp_cmd = {};
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ qp_cmd.aq_common_desc.opcode = EFA_ADMIN_DESTROY_QP;
+ qp_cmd.qp_handle = params->qp_handle;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&qp_cmd,
+ sizeof(qp_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err)
+ ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n",
+ qp_cmd.qp_handle, err);
+
+ return 0;
+}
+
+int efa_com_create_cq(struct efa_com_dev *edev,
+ struct efa_com_create_cq_params *params,
+ struct efa_com_create_cq_result *result)
+{
+ struct efa_admin_create_cq_resp cmd_completion;
+ struct efa_admin_create_cq_cmd create_cmd = {};
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ create_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_CQ;
+ create_cmd.cq_caps_2 = (params->entry_size_in_bytes / 4) &
+ EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ create_cmd.cq_depth = params->cq_depth;
+ create_cmd.num_sub_cqs = params->num_sub_cqs;
+ create_cmd.uar = params->uarn;
+
+ efa_com_set_dma_addr(params->dma_addr,
+ &create_cmd.cq_ba.mem_addr_high,
+ &create_cmd.cq_ba.mem_addr_low);
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to create cq[%d]\n", err);
+ return err;
+ }
+
+ result->cq_idx = cmd_completion.cq_idx;
+ result->actual_depth = params->cq_depth;
+
+ return err;
+}
+
+int efa_com_destroy_cq(struct efa_com_dev *edev,
+ struct efa_com_destroy_cq_params *params)
+{
+ struct efa_admin_destroy_cq_cmd destroy_cmd = {};
+ struct efa_admin_destroy_cq_resp destroy_resp;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ destroy_cmd.cq_idx = params->cq_idx;
+ destroy_cmd.aq_common_desc.opcode = EFA_ADMIN_DESTROY_CQ;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct efa_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (err)
+ ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n",
+ params->cq_idx, err);
+
+ return 0;
+}
+
+int efa_com_register_mr(struct efa_com_dev *edev,
+ struct efa_com_reg_mr_params *params,
+ struct efa_com_reg_mr_result *result)
+{
+ struct efa_admin_reg_mr_resp cmd_completion;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_reg_mr_cmd mr_cmd = {};
+ int err;
+
+ mr_cmd.aq_common_desc.opcode = EFA_ADMIN_REG_MR;
+ mr_cmd.pd = params->pd;
+ mr_cmd.mr_length = params->mr_length_in_bytes;
+ mr_cmd.flags |= params->page_shift &
+ EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
+ mr_cmd.iova = params->iova;
+ mr_cmd.permissions |= params->permissions &
+ EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK;
+
+ if (params->inline_pbl) {
+ memcpy(mr_cmd.pbl.inline_pbl_array,
+ params->pbl.inline_pbl_array,
+ sizeof(mr_cmd.pbl.inline_pbl_array));
+ } else {
+ mr_cmd.pbl.pbl.length = params->pbl.pbl.length;
+ mr_cmd.pbl.pbl.address.mem_addr_low =
+ params->pbl.pbl.address.mem_addr_low;
+ mr_cmd.pbl.pbl.address.mem_addr_high =
+ params->pbl.pbl.address.mem_addr_high;
+ mr_cmd.aq_common_desc.flags |=
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+ if (params->indirect)
+ mr_cmd.aq_common_desc.flags |=
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ }
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&mr_cmd,
+ sizeof(mr_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to register mr [%d]\n", err);
+ return err;
+ }
+
+ result->l_key = cmd_completion.l_key;
+ result->r_key = cmd_completion.r_key;
+
+ return 0;
+}
+
+int efa_com_dereg_mr(struct efa_com_dev *edev,
+ struct efa_com_dereg_mr_params *params)
+{
+ struct efa_admin_dereg_mr_resp cmd_completion;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_dereg_mr_cmd mr_cmd = {};
+ int err;
+
+ mr_cmd.aq_common_desc.opcode = EFA_ADMIN_DEREG_MR;
+ mr_cmd.l_key = params->l_key;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&mr_cmd,
+ sizeof(mr_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err)
+ ibdev_err(edev->efa_dev,
+ "Failed to de-register mr(lkey-%u) [%d]\n",
+ mr_cmd.l_key, err);
+
+ return 0;
+}
+
+int efa_com_create_ah(struct efa_com_dev *edev,
+ struct efa_com_create_ah_params *params,
+ struct efa_com_create_ah_result *result)
+{
+ struct efa_admin_create_ah_resp cmd_completion;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_create_ah_cmd ah_cmd = {};
+ int err;
+
+ ah_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_AH;
+
+ memcpy(ah_cmd.dest_addr, params->dest_addr, sizeof(ah_cmd.dest_addr));
+ ah_cmd.pd = params->pdn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&ah_cmd,
+ sizeof(ah_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to create ah [%d]\n", err);
+ return err;
+ }
+
+ result->ah = cmd_completion.ah;
+
+ return 0;
+}
+
+int efa_com_destroy_ah(struct efa_com_dev *edev,
+ struct efa_com_destroy_ah_params *params)
+{
+ struct efa_admin_destroy_ah_resp cmd_completion;
+ struct efa_admin_destroy_ah_cmd ah_cmd = {};
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ ah_cmd.aq_common_desc.opcode = EFA_ADMIN_DESTROY_AH;
+ ah_cmd.ah = params->ah;
+ ah_cmd.pd = params->pdn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&ah_cmd,
+ sizeof(ah_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err)
+ ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n",
+ ah_cmd.ah, ah_cmd.pd, err);
+
+ return 0;
+}
+
+static bool
+efa_com_check_supported_feature_id(struct efa_com_dev *edev,
+ enum efa_admin_aq_feature_id feature_id)
+{
+ u32 feature_mask = 1 << feature_id;
+
+ /* Device attributes is always supported */
+ if (feature_id != EFA_ADMIN_DEVICE_ATTR &&
+ !(edev->supported_features & feature_mask))
+ return false;
+
+ return true;
+}
+
+static int efa_com_get_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_get_feature_resp *get_resp,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
+{
+ struct efa_admin_get_feature_cmd get_cmd = {};
+ struct efa_com_admin_queue *aq;
+ int err;
+
+ if (!efa_com_check_supported_feature_id(edev, feature_id)) {
+ ibdev_err(edev->efa_dev, "Feature %d isn't supported\n",
+ feature_id);
+ return -EOPNOTSUPP;
+ }
+
+ aq = &edev->aq;
+
+ get_cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_FEATURE;
+
+ if (control_buff_size)
+ get_cmd.aq_common_descriptor.flags =
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+
+
+ efa_com_set_dma_addr(control_buf_dma_addr,
+ &get_cmd.control_buffer.address.mem_addr_high,
+ &get_cmd.control_buffer.address.mem_addr_low);
+
+ get_cmd.control_buffer.length = control_buff_size;
+ get_cmd.feature_common.feature_id = feature_id;
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)
+ &get_cmd,
+ sizeof(get_cmd),
+ (struct efa_admin_acq_entry *)
+ get_resp,
+ sizeof(*get_resp));
+
+ if (err)
+ ibdev_err(edev->efa_dev,
+ "Failed to submit get_feature command %d [%d]\n",
+ feature_id, err);
+
+ return 0;
+}
+
+static int efa_com_get_feature(struct efa_com_dev *edev,
+ struct efa_admin_get_feature_resp *get_resp,
+ enum efa_admin_aq_feature_id feature_id)
+{
+ return efa_com_get_feature_ex(edev, get_resp, feature_id, 0, 0);
+}
+
+int efa_com_get_network_attr(struct efa_com_dev *edev,
+ struct efa_com_get_network_attr_result *result)
+{
+ struct efa_admin_get_feature_resp resp;
+ int err;
+
+ err = efa_com_get_feature(edev, &resp,
+ EFA_ADMIN_NETWORK_ATTR);
+ if (err) {
+ ibdev_err(edev->efa_dev,
+ "Failed to get network attributes %d\n", err);
+ return err;
+ }
+
+ memcpy(result->addr, resp.u.network_attr.addr,
+ sizeof(resp.u.network_attr.addr));
+ result->mtu = resp.u.network_attr.mtu;
+
+ return 0;
+}
+
+int efa_com_get_device_attr(struct efa_com_dev *edev,
+ struct efa_com_get_device_attr_result *result)
+{
+ struct efa_admin_get_feature_resp resp;
+ int err;
+
+ err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR);
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to get device attributes %d\n",
+ err);
+ return err;
+ }
+
+ result->page_size_cap = resp.u.device_attr.page_size_cap;
+ result->fw_version = resp.u.device_attr.fw_version;
+ result->admin_api_version = resp.u.device_attr.admin_api_version;
+ result->device_version = resp.u.device_attr.device_version;
+ result->supported_features = resp.u.device_attr.supported_features;
+ result->phys_addr_width = resp.u.device_attr.phys_addr_width;
+ result->virt_addr_width = resp.u.device_attr.virt_addr_width;
+ result->db_bar = resp.u.device_attr.db_bar;
+
+ if (result->admin_api_version < 1) {
+ ibdev_err(edev->efa_dev,
+ "Failed to get device attr api version [%u < 1]\n",
+ result->admin_api_version);
+ return -EINVAL;
+ }
+
+ edev->supported_features = resp.u.device_attr.supported_features;
+ err = efa_com_get_feature(edev, &resp,
+ EFA_ADMIN_QUEUE_ATTR);
+ if (err) {
+ ibdev_err(edev->efa_dev,
+ "Failed to get network attributes %d\n", err);
+ return err;
+ }
+
+ result->max_qp = resp.u.queue_attr.max_qp;
+ result->max_sq_depth = resp.u.queue_attr.max_sq_depth;
+ result->max_rq_depth = resp.u.queue_attr.max_rq_depth;
+ result->max_cq = resp.u.queue_attr.max_cq;
+ result->max_cq_depth = resp.u.queue_attr.max_cq_depth;
+ result->inline_buf_size = resp.u.queue_attr.inline_buf_size;
+ result->max_sq_sge = resp.u.queue_attr.max_wr_send_sges;
+ result->max_rq_sge = resp.u.queue_attr.max_wr_recv_sges;
+ result->max_mr = resp.u.queue_attr.max_mr;
+ result->max_mr_pages = resp.u.queue_attr.max_mr_pages;
+ result->max_pd = resp.u.queue_attr.max_pd;
+ result->max_ah = resp.u.queue_attr.max_ah;
+ result->max_llq_size = resp.u.queue_attr.max_llq_size;
+ result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
+
+ return 0;
+}
+
+int efa_com_get_hw_hints(struct efa_com_dev *edev,
+ struct efa_com_get_hw_hints_result *result)
+{
+ struct efa_admin_get_feature_resp resp;
+ int err;
+
+ err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS);
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to get hw hints %d\n", err);
+ return err;
+ }
+
+ result->admin_completion_timeout = resp.u.hw_hints.admin_completion_timeout;
+ result->driver_watchdog_timeout = resp.u.hw_hints.driver_watchdog_timeout;
+ result->mmio_read_timeout = resp.u.hw_hints.mmio_read_timeout;
+ result->poll_interval = resp.u.hw_hints.poll_interval;
+
+ return 0;
+}
+
+static int efa_com_set_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
+{
+ struct efa_com_admin_queue *aq;
+ int err;
+
+ if (!efa_com_check_supported_feature_id(edev, feature_id)) {
+ ibdev_err(edev->efa_dev, "Feature %d isn't supported\n",
+ feature_id);
+ return -EOPNOTSUPP;
+ }
+
+ aq = &edev->aq;
+
+ set_cmd->aq_common_descriptor.opcode = EFA_ADMIN_SET_FEATURE;
+ if (control_buff_size) {
+ set_cmd->aq_common_descriptor.flags =
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ efa_com_set_dma_addr(control_buf_dma_addr,
+ &set_cmd->control_buffer.address.mem_addr_high,
+ &set_cmd->control_buffer.address.mem_addr_low);
+ }
+
+ set_cmd->control_buffer.length = control_buff_size;
+ set_cmd->feature_common.feature_id = feature_id;
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)set_cmd,
+ sizeof(*set_cmd),
+ (struct efa_admin_acq_entry *)set_resp,
+ sizeof(*set_resp));
+
+ if (err)
+ ibdev_err(edev->efa_dev,
+ "Failed to submit set_feature command %d error: %d\n",
+ feature_id, err);
+
+ return 0;
+}
+
+static int efa_com_set_feature(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id)
+{
+ return efa_com_set_feature_ex(edev, set_resp, set_cmd, feature_id,
+ 0, 0);
+}
+
+int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
+{
+ struct efa_admin_get_feature_resp get_resp;
+ struct efa_admin_set_feature_resp set_resp;
+ struct efa_admin_set_feature_cmd cmd = {};
+ int err;
+
+ ibdev_dbg(edev->efa_dev, "Configuring aenq with groups[%#x]\n", groups);
+
+ err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG);
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to get aenq attributes: %d\n",
+ err);
+ return err;
+ }
+
+ ibdev_dbg(edev->efa_dev,
+ "Get aenq groups: supported[%#x] enabled[%#x]\n",
+ get_resp.u.aenq.supported_groups,
+ get_resp.u.aenq.enabled_groups);
+
+ if ((get_resp.u.aenq.supported_groups & groups) != groups) {
+ ibdev_err(edev->efa_dev,
+ "Trying to set unsupported aenq groups[%#x] supported[%#x]\n",
+ groups, get_resp.u.aenq.supported_groups);
+ return -EOPNOTSUPP;
+ }
+
+ cmd.u.aenq.enabled_groups = groups;
+ err = efa_com_set_feature(edev, &set_resp, &cmd,
+ EFA_ADMIN_AENQ_CONFIG);
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to set aenq attributes: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_alloc_pd(struct efa_com_dev *edev,
+ struct efa_com_alloc_pd_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_alloc_pd_cmd cmd = {};
+ struct efa_admin_alloc_pd_resp resp;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_ALLOC_PD;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to allocate pd[%d]\n", err);
+ return err;
+ }
+
+ result->pdn = resp.pd;
+
+ return 0;
+}
+
+int efa_com_dealloc_pd(struct efa_com_dev *edev,
+ struct efa_com_dealloc_pd_params *params)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_dealloc_pd_cmd cmd = {};
+ struct efa_admin_dealloc_pd_resp resp;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_DEALLOC_PD;
+ cmd.pd = params->pdn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to deallocate pd-%u [%d]\n",
+ cmd.pd, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_alloc_uar(struct efa_com_dev *edev,
+ struct efa_com_alloc_uar_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_alloc_uar_cmd cmd = {};
+ struct efa_admin_alloc_uar_resp resp;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_ALLOC_UAR;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to allocate uar[%d]\n", err);
+ return err;
+ }
+
+ result->uarn = resp.uar;
+
+ return 0;
+}
+
+int efa_com_dealloc_uar(struct efa_com_dev *edev,
+ struct efa_com_dealloc_uar_params *params)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_dealloc_uar_cmd cmd = {};
+ struct efa_admin_dealloc_uar_resp resp;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_DEALLOC_UAR;
+ cmd.uar = params->uarn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err(edev->efa_dev, "Failed to deallocate uar-%u [%d]\n",
+ cmd.uar, err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
new file mode 100644
index 000000000000..a1174380462c
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_COM_CMD_H_
+#define _EFA_COM_CMD_H_
+
+#include "efa_com.h"
+
+#define EFA_GID_SIZE 16
+
+struct efa_com_create_qp_params {
+ u64 rq_base_addr;
+ u32 send_cq_idx;
+ u32 recv_cq_idx;
+ /*
+ * Send descriptor ring size in bytes,
+ * sufficient for user-provided number of WQEs and SGL size
+ */
+ u32 sq_ring_size_in_bytes;
+ /* Max number of WQEs that will be posted on send queue */
+ u32 sq_depth;
+ /* Recv descriptor ring size in bytes */
+ u32 rq_ring_size_in_bytes;
+ u32 rq_depth;
+ u16 pd;
+ u16 uarn;
+ u8 qp_type;
+};
+
+struct efa_com_create_qp_result {
+ u32 qp_handle;
+ u32 qp_num;
+ u32 sq_db_offset;
+ u32 rq_db_offset;
+ u32 llq_descriptors_offset;
+ u16 send_sub_cq_idx;
+ u16 recv_sub_cq_idx;
+};
+
+struct efa_com_modify_qp_params {
+ u32 modify_mask;
+ u32 qp_handle;
+ u32 qp_state;
+ u32 cur_qp_state;
+ u32 qkey;
+ u32 sq_psn;
+ u8 sq_drained_async_notify;
+};
+
+struct efa_com_query_qp_params {
+ u32 qp_handle;
+};
+
+struct efa_com_query_qp_result {
+ u32 qp_state;
+ u32 qkey;
+ u32 sq_draining;
+ u32 sq_psn;
+};
+
+struct efa_com_destroy_qp_params {
+ u32 qp_handle;
+};
+
+struct efa_com_create_cq_params {
+ /* cq physical base address in OS memory */
+ dma_addr_t dma_addr;
+ /* completion queue depth in # of entries */
+ u16 cq_depth;
+ u16 num_sub_cqs;
+ u16 uarn;
+ u8 entry_size_in_bytes;
+};
+
+struct efa_com_create_cq_result {
+ /* cq identifier */
+ u16 cq_idx;
+ /* actual cq depth in # of entries */
+ u16 actual_depth;
+};
+
+struct efa_com_destroy_cq_params {
+ u16 cq_idx;
+};
+
+struct efa_com_create_ah_params {
+ u16 pdn;
+ /* Destination address in network byte order */
+ u8 dest_addr[EFA_GID_SIZE];
+};
+
+struct efa_com_create_ah_result {
+ u16 ah;
+};
+
+struct efa_com_destroy_ah_params {
+ u16 ah;
+ u16 pdn;
+};
+
+struct efa_com_get_network_attr_result {
+ u8 addr[EFA_GID_SIZE];
+ u32 mtu;
+};
+
+struct efa_com_get_device_attr_result {
+ u64 page_size_cap;
+ u64 max_mr_pages;
+ u32 fw_version;
+ u32 admin_api_version;
+ u32 device_version;
+ u32 supported_features;
+ u32 phys_addr_width;
+ u32 virt_addr_width;
+ u32 max_qp;
+ u32 max_sq_depth; /* wqes */
+ u32 max_rq_depth; /* wqes */
+ u32 max_cq;
+ u32 max_cq_depth; /* cqes */
+ u32 inline_buf_size;
+ u32 max_mr;
+ u32 max_pd;
+ u32 max_ah;
+ u32 max_llq_size;
+ u16 sub_cqs_per_cq;
+ u16 max_sq_sge;
+ u16 max_rq_sge;
+ u8 db_bar;
+};
+
+struct efa_com_get_hw_hints_result {
+ u16 mmio_read_timeout;
+ u16 driver_watchdog_timeout;
+ u16 admin_completion_timeout;
+ u16 poll_interval;
+ u32 reserved[4];
+};
+
+struct efa_com_mem_addr {
+ u32 mem_addr_low;
+ u32 mem_addr_high;
+};
+
+/* Used at indirect mode page list chunks for chaining */
+struct efa_com_ctrl_buff_info {
+ /* indicates length of the buffer pointed by control_buffer_address. */
+ u32 length;
+ /* points to control buffer (direct or indirect) */
+ struct efa_com_mem_addr address;
+};
+
+struct efa_com_reg_mr_params {
+ /* Memory region length, in bytes. */
+ u64 mr_length_in_bytes;
+ /* IO Virtual Address associated with this MR. */
+ u64 iova;
+ /* words 8:15: Physical Buffer List, each element is page-aligned. */
+ union {
+ /*
+ * Inline array of physical addresses of app pages
+ * (optimization for short region reservations)
+ */
+ u64 inline_pbl_array[4];
+ /*
+ * Describes the next physically contiguous chunk of indirect
+ * page list. A page list contains physical addresses of command
+ * data pages. Data pages are 4KB; page list chunks are
+ * variable-sized.
+ */
+ struct efa_com_ctrl_buff_info pbl;
+ } pbl;
+ /* number of pages in PBL (redundant, could be calculated) */
+ u32 page_num;
+ /* Protection Domain */
+ u16 pd;
+ /*
+ * phys_page_size_shift - page size is (1 << phys_page_size_shift)
+ * Page size is used for building the Virtual to Physical
+ * address mapping
+ */
+ u8 page_shift;
+ /*
+ * permissions
+ * 0: local_write_enable - Write permissions: value of 1 needed
+ * for RQ buffers and for RDMA write:1: reserved1 - remote
+ * access flags, etc
+ */
+ u8 permissions;
+ u8 inline_pbl;
+ u8 indirect;
+};
+
+struct efa_com_reg_mr_result {
+ /*
+ * To be used in conjunction with local buffers references in SQ and
+ * RQ WQE
+ */
+ u32 l_key;
+ /*
+ * To be used in incoming RDMA semantics messages to refer to remotely
+ * accessed memory region
+ */
+ u32 r_key;
+};
+
+struct efa_com_dereg_mr_params {
+ u32 l_key;
+};
+
+struct efa_com_alloc_pd_result {
+ u16 pdn;
+};
+
+struct efa_com_dealloc_pd_params {
+ u16 pdn;
+};
+
+struct efa_com_alloc_uar_result {
+ u16 uarn;
+};
+
+struct efa_com_dealloc_uar_params {
+ u16 uarn;
+};
+
+void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
+int efa_com_create_qp(struct efa_com_dev *edev,
+ struct efa_com_create_qp_params *params,
+ struct efa_com_create_qp_result *res);
+int efa_com_modify_qp(struct efa_com_dev *edev,
+ struct efa_com_modify_qp_params *params);
+int efa_com_query_qp(struct efa_com_dev *edev,
+ struct efa_com_query_qp_params *params,
+ struct efa_com_query_qp_result *result);
+int efa_com_destroy_qp(struct efa_com_dev *edev,
+ struct efa_com_destroy_qp_params *params);
+int efa_com_create_cq(struct efa_com_dev *edev,
+ struct efa_com_create_cq_params *params,
+ struct efa_com_create_cq_result *result);
+int efa_com_destroy_cq(struct efa_com_dev *edev,
+ struct efa_com_destroy_cq_params *params);
+int efa_com_register_mr(struct efa_com_dev *edev,
+ struct efa_com_reg_mr_params *params,
+ struct efa_com_reg_mr_result *result);
+int efa_com_dereg_mr(struct efa_com_dev *edev,
+ struct efa_com_dereg_mr_params *params);
+int efa_com_create_ah(struct efa_com_dev *edev,
+ struct efa_com_create_ah_params *params,
+ struct efa_com_create_ah_result *result);
+int efa_com_destroy_ah(struct efa_com_dev *edev,
+ struct efa_com_destroy_ah_params *params);
+int efa_com_get_network_attr(struct efa_com_dev *edev,
+ struct efa_com_get_network_attr_result *result);
+int efa_com_get_device_attr(struct efa_com_dev *edev,
+ struct efa_com_get_device_attr_result *result);
+int efa_com_get_hw_hints(struct efa_com_dev *edev,
+ struct efa_com_get_hw_hints_result *result);
+int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups);
+int efa_com_alloc_pd(struct efa_com_dev *edev,
+ struct efa_com_alloc_pd_result *result);
+int efa_com_dealloc_pd(struct efa_com_dev *edev,
+ struct efa_com_dealloc_pd_params *params);
+int efa_com_alloc_uar(struct efa_com_dev *edev,
+ struct efa_com_alloc_uar_result *result);
+int efa_com_dealloc_uar(struct efa_com_dev *edev,
+ struct efa_com_dealloc_uar_params *params);
+
+#endif /* _EFA_COM_CMD_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_common_defs.h b/drivers/infiniband/hw/efa/efa_common_defs.h
new file mode 100644
index 000000000000..c559ec08898e
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_common_defs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_COMMON_H_
+#define _EFA_COMMON_H_
+
+#define EFA_COMMON_SPEC_VERSION_MAJOR 2
+#define EFA_COMMON_SPEC_VERSION_MINOR 0
+
+struct efa_common_mem_addr {
+ u32 mem_addr_low;
+
+ u32 mem_addr_high;
+};
+
+#endif /* _EFA_COMMON_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
new file mode 100644
index 000000000000..db974caf1eb1
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <rdma/ib_user_verbs.h>
+
+#include "efa.h"
+
+#define PCI_DEV_ID_EFA_VF 0xefa0
+
+static const struct pci_device_id efa_pci_tbl[] = {
+ { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA_VF) },
+ { }
+};
+
+MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION(DEVICE_NAME);
+MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
+
+#define EFA_REG_BAR 0
+#define EFA_MEM_BAR 2
+#define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
+
+#define EFA_AENQ_ENABLED_GROUPS \
+ (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
+ BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
+
+static void efa_update_network_attr(struct efa_dev *dev,
+ struct efa_com_get_network_attr_result *network_attr)
+{
+ memcpy(dev->addr, network_attr->addr, sizeof(network_attr->addr));
+ dev->mtu = network_attr->mtu;
+
+ dev_dbg(&dev->pdev->dev, "Full address %pI6\n", dev->addr);
+}
+
+/* This handler will called for unknown event group or unimplemented handlers */
+static void unimplemented_aenq_handler(void *data,
+ struct efa_admin_aenq_entry *aenq_e)
+{
+ struct efa_dev *dev = (struct efa_dev *)data;
+
+ ibdev_err(&dev->ibdev,
+ "Unknown event was received or event with unimplemented handler\n");
+}
+
+static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
+{
+ struct efa_dev *dev = (struct efa_dev *)data;
+
+ atomic64_inc(&dev->stats.keep_alive_rcvd);
+}
+
+static struct efa_aenq_handlers aenq_handlers = {
+ .handlers = {
+ [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
+ },
+ .unimplemented_handler = unimplemented_aenq_handler
+};
+
+static void efa_release_bars(struct efa_dev *dev, int bars_mask)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int release_bars;
+
+ release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
+ pci_release_selected_regions(pdev, release_bars);
+}
+
+static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
+{
+ struct efa_dev *dev = data;
+
+ efa_com_admin_q_comp_intr_handler(&dev->edev);
+ efa_com_aenq_intr_handler(&dev->edev, data);
+
+ return IRQ_HANDLED;
+}
+
+static int efa_request_mgmnt_irq(struct efa_dev *dev)
+{
+ struct efa_irq *irq;
+ int err;
+
+ irq = &dev->admin_irq;
+ err = request_irq(irq->vector, irq->handler, 0, irq->name,
+ irq->data);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n",
+ err);
+ return err;
+ }
+
+ dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n",
+ nr_cpumask_bits, &irq->affinity_hint_mask, irq->vector);
+ irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
+
+ return err;
+}
+
+static void efa_setup_mgmnt_irq(struct efa_dev *dev)
+{
+ u32 cpu;
+
+ snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
+ "efa-mgmnt@pci:%s", pci_name(dev->pdev));
+ dev->admin_irq.handler = efa_intr_msix_mgmnt;
+ dev->admin_irq.data = dev;
+ dev->admin_irq.vector =
+ pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx);
+ cpu = cpumask_first(cpu_online_mask);
+ dev->admin_irq.cpu = cpu;
+ cpumask_set_cpu(cpu,
+ &dev->admin_irq.affinity_hint_mask);
+ dev_info(&dev->pdev->dev, "Setup irq:0x%p vector:%d name:%s\n",
+ &dev->admin_irq,
+ dev->admin_irq.vector,
+ dev->admin_irq.name);
+}
+
+static void efa_free_mgmnt_irq(struct efa_dev *dev)
+{
+ struct efa_irq *irq;
+
+ irq = &dev->admin_irq;
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_irq(irq->vector, irq->data);
+}
+
+static int efa_set_mgmnt_irq(struct efa_dev *dev)
+{
+ efa_setup_mgmnt_irq(dev);
+
+ return efa_request_mgmnt_irq(dev);
+}
+
+static int efa_request_doorbell_bar(struct efa_dev *dev)
+{
+ u8 db_bar_idx = dev->dev_attr.db_bar;
+ struct pci_dev *pdev = dev->pdev;
+ int bars;
+ int err;
+
+ if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx);
+
+ err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(&dev->pdev->dev,
+ "pci_request_selected_regions for bar %d failed %d\n",
+ db_bar_idx, err);
+ return err;
+ }
+ }
+
+ dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
+ dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
+
+ return 0;
+}
+
+static void efa_release_doorbell_bar(struct efa_dev *dev)
+{
+ if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
+ efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
+}
+
+static void efa_update_hw_hints(struct efa_dev *dev,
+ struct efa_com_get_hw_hints_result *hw_hints)
+{
+ struct efa_com_dev *edev = &dev->edev;
+
+ if (hw_hints->mmio_read_timeout)
+ edev->mmio_read.mmio_read_timeout =
+ hw_hints->mmio_read_timeout * 1000;
+
+ if (hw_hints->poll_interval)
+ edev->aq.poll_interval = hw_hints->poll_interval;
+
+ if (hw_hints->admin_completion_timeout)
+ edev->aq.completion_timeout =
+ hw_hints->admin_completion_timeout;
+}
+
+static void efa_stats_init(struct efa_dev *dev)
+{
+ atomic64_t *s = (atomic64_t *)&dev->stats;
+ int i;
+
+ for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
+ atomic64_set(s, 0);
+}
+
+static const struct ib_device_ops efa_dev_ops = {
+ .alloc_pd = efa_alloc_pd,
+ .alloc_ucontext = efa_alloc_ucontext,
+ .create_ah = efa_create_ah,
+ .create_cq = efa_create_cq,
+ .create_qp = efa_create_qp,
+ .dealloc_pd = efa_dealloc_pd,
+ .dealloc_ucontext = efa_dealloc_ucontext,
+ .dereg_mr = efa_dereg_mr,
+ .destroy_ah = efa_destroy_ah,
+ .destroy_cq = efa_destroy_cq,
+ .destroy_qp = efa_destroy_qp,
+ .get_link_layer = efa_port_link_layer,
+ .get_port_immutable = efa_get_port_immutable,
+ .mmap = efa_mmap,
+ .modify_qp = efa_modify_qp,
+ .query_device = efa_query_device,
+ .query_gid = efa_query_gid,
+ .query_pkey = efa_query_pkey,
+ .query_port = efa_query_port,
+ .query_qp = efa_query_qp,
+ .reg_user_mr = efa_reg_mr,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
+};
+
+static int efa_ib_device_add(struct efa_dev *dev)
+{
+ struct efa_com_get_network_attr_result network_attr;
+ struct efa_com_get_hw_hints_result hw_hints;
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ efa_stats_init(dev);
+
+ err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
+ if (err)
+ return err;
+
+ dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
+ err = efa_request_doorbell_bar(dev);
+ if (err)
+ return err;
+
+ err = efa_com_get_network_attr(&dev->edev, &network_attr);
+ if (err)
+ goto err_release_doorbell_bar;
+
+ efa_update_network_attr(dev, &network_attr);
+
+ err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
+ if (err)
+ goto err_release_doorbell_bar;
+
+ efa_update_hw_hints(dev, &hw_hints);
+
+ /* Try to enable all the available aenq groups */
+ err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
+ if (err)
+ goto err_release_doorbell_bar;
+
+ dev->ibdev.owner = THIS_MODULE;
+ dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
+ dev->ibdev.phys_port_cnt = 1;
+ dev->ibdev.num_comp_vectors = 1;
+ dev->ibdev.dev.parent = &pdev->dev;
+ dev->ibdev.uverbs_abi_ver = EFA_UVERBS_ABI_VERSION;
+
+ dev->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
+
+ dev->ibdev.uverbs_ex_cmd_mask =
+ (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
+
+ dev->ibdev.driver_id = RDMA_DRIVER_EFA;
+ ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
+
+ err = ib_register_device(&dev->ibdev, "efa_%d");
+ if (err)
+ goto err_release_doorbell_bar;
+
+ ibdev_info(&dev->ibdev, "IB device registered\n");
+
+ return 0;
+
+err_release_doorbell_bar:
+ efa_release_doorbell_bar(dev);
+ return err;
+}
+
+static void efa_ib_device_remove(struct efa_dev *dev)
+{
+ efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
+ ibdev_info(&dev->ibdev, "Unregister ib device\n");
+ ib_unregister_device(&dev->ibdev);
+ efa_release_doorbell_bar(dev);
+}
+
+static void efa_disable_msix(struct efa_dev *dev)
+{
+ pci_free_irq_vectors(dev->pdev);
+}
+
+static int efa_enable_msix(struct efa_dev *dev)
+{
+ int msix_vecs, irq_num;
+
+ /* Reserve the max msix vectors we might need */
+ msix_vecs = EFA_NUM_MSIX_VEC;
+ dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
+ msix_vecs);
+
+ dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
+ irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
+ msix_vecs, PCI_IRQ_MSIX);
+
+ if (irq_num < 0) {
+ dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
+ irq_num);
+ return -ENOSPC;
+ }
+
+ if (irq_num != msix_vecs) {
+ dev_err(&dev->pdev->dev,
+ "Allocated %d MSI-X (out of %d requested)\n",
+ irq_num, msix_vecs);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
+{
+ int dma_width;
+ int err;
+
+ err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
+ if (err)
+ return err;
+
+ err = efa_com_validate_version(edev);
+ if (err)
+ return err;
+
+ dma_width = efa_com_get_dma_width(edev);
+ if (dma_width < 0) {
+ err = dma_width;
+ return err;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_dma_mask failed %d\n", err);
+ return err;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+ if (err) {
+ dev_err(&pdev->dev,
+ "err_pci_set_consistent_dma_mask failed %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
+{
+ struct efa_com_dev *edev;
+ struct efa_dev *dev;
+ int bars;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
+ return ERR_PTR(err);
+ }
+
+ pci_set_master(pdev);
+
+ dev = ib_alloc_device(efa_dev, ibdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "Device alloc failed\n");
+ err = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ edev = &dev->edev;
+ edev->efa_dev = dev;
+ edev->dmadev = &pdev->dev;
+ dev->pdev = pdev;
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
+ err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
+ err);
+ goto err_ibdev_destroy;
+ }
+
+ dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
+ dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
+ dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
+ dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
+
+ edev->reg_bar = devm_ioremap(&pdev->dev,
+ dev->reg_bar_addr,
+ dev->reg_bar_len);
+ if (!edev->reg_bar) {
+ dev_err(&pdev->dev, "Failed to remap register bar\n");
+ err = -EFAULT;
+ goto err_release_bars;
+ }
+
+ err = efa_com_mmio_reg_read_init(edev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init readless MMIO\n");
+ goto err_iounmap;
+ }
+
+ err = efa_device_init(edev, pdev);
+ if (err) {
+ dev_err(&pdev->dev, "EFA device init failed\n");
+ if (err == -ETIME)
+ err = -EPROBE_DEFER;
+ goto err_reg_read_destroy;
+ }
+
+ err = efa_enable_msix(dev);
+ if (err)
+ goto err_reg_read_destroy;
+
+ edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
+ edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
+
+ err = efa_set_mgmnt_irq(dev);
+ if (err)
+ goto err_disable_msix;
+
+ err = efa_com_admin_init(edev, &aenq_handlers);
+ if (err)
+ goto err_free_mgmnt_irq;
+
+ return dev;
+
+err_free_mgmnt_irq:
+ efa_free_mgmnt_irq(dev);
+err_disable_msix:
+ efa_disable_msix(dev);
+err_reg_read_destroy:
+ efa_com_mmio_reg_read_destroy(edev);
+err_iounmap:
+ devm_iounmap(&pdev->dev, edev->reg_bar);
+err_release_bars:
+ efa_release_bars(dev, EFA_BASE_BAR_MASK);
+err_ibdev_destroy:
+ ib_dealloc_device(&dev->ibdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ERR_PTR(err);
+}
+
+static void efa_remove_device(struct pci_dev *pdev)
+{
+ struct efa_dev *dev = pci_get_drvdata(pdev);
+ struct efa_com_dev *edev;
+
+ edev = &dev->edev;
+ efa_com_admin_destroy(edev);
+ efa_free_mgmnt_irq(dev);
+ efa_disable_msix(dev);
+ efa_com_mmio_reg_read_destroy(edev);
+ devm_iounmap(&pdev->dev, edev->reg_bar);
+ efa_release_bars(dev, EFA_BASE_BAR_MASK);
+ ib_dealloc_device(&dev->ibdev);
+ pci_disable_device(pdev);
+}
+
+static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct efa_dev *dev;
+ int err;
+
+ dev = efa_probe_device(pdev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ err = efa_ib_device_add(dev);
+ if (err)
+ goto err_remove_device;
+
+ return 0;
+
+err_remove_device:
+ efa_remove_device(pdev);
+ return err;
+}
+
+static void efa_remove(struct pci_dev *pdev)
+{
+ struct efa_dev *dev = pci_get_drvdata(pdev);
+
+ efa_ib_device_remove(dev);
+ efa_remove_device(pdev);
+}
+
+static struct pci_driver efa_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = efa_pci_tbl,
+ .probe = efa_probe,
+ .remove = efa_remove,
+};
+
+module_pci_driver(efa_pci_driver);
diff --git a/drivers/infiniband/hw/efa/efa_regs_defs.h b/drivers/infiniband/hw/efa/efa_regs_defs.h
new file mode 100644
index 000000000000..bb9cad3d6a15
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_regs_defs.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_REGS_H_
+#define _EFA_REGS_H_
+
+enum efa_regs_reset_reason_types {
+ EFA_REGS_RESET_NORMAL = 0,
+ /* Keep alive timeout */
+ EFA_REGS_RESET_KEEP_ALIVE_TO = 1,
+ EFA_REGS_RESET_ADMIN_TO = 2,
+ EFA_REGS_RESET_INIT_ERR = 3,
+ EFA_REGS_RESET_DRIVER_INVALID_STATE = 4,
+ EFA_REGS_RESET_OS_TRIGGER = 5,
+ EFA_REGS_RESET_SHUTDOWN = 6,
+ EFA_REGS_RESET_USER_TRIGGER = 7,
+ EFA_REGS_RESET_GENERIC = 8,
+};
+
+/* efa_registers offsets */
+
+/* 0 base */
+#define EFA_REGS_VERSION_OFF 0x0
+#define EFA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define EFA_REGS_CAPS_OFF 0x8
+#define EFA_REGS_AQ_BASE_LO_OFF 0x10
+#define EFA_REGS_AQ_BASE_HI_OFF 0x14
+#define EFA_REGS_AQ_CAPS_OFF 0x18
+#define EFA_REGS_ACQ_BASE_LO_OFF 0x20
+#define EFA_REGS_ACQ_BASE_HI_OFF 0x24
+#define EFA_REGS_ACQ_CAPS_OFF 0x28
+#define EFA_REGS_AQ_PROD_DB_OFF 0x2c
+#define EFA_REGS_AENQ_CAPS_OFF 0x34
+#define EFA_REGS_AENQ_BASE_LO_OFF 0x38
+#define EFA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define EFA_REGS_AENQ_CONS_DB_OFF 0x40
+#define EFA_REGS_INTR_MASK_OFF 0x4c
+#define EFA_REGS_DEV_CTL_OFF 0x54
+#define EFA_REGS_DEV_STS_OFF 0x58
+#define EFA_REGS_MMIO_REG_READ_OFF 0x5c
+#define EFA_REGS_MMIO_RESP_LO_OFF 0x60
+#define EFA_REGS_MMIO_RESP_HI_OFF 0x64
+
+/* version register */
+#define EFA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define EFA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define EFA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+
+/* controller_version register */
+#define EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+
+/* caps register */
+#define EFA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define EFA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define EFA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
+
+/* aq_caps register */
+#define EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* acq_caps register */
+#define EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xff0000
+#define EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT 24
+#define EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK 0xff000000
+
+/* aenq_caps register */
+#define EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xff0000
+#define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT 24
+#define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK 0xff000000
+
+/* dev_ctl register */
+#define EFA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define EFA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define EFA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define EFA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define EFA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
+
+/* dev_sts register */
+#define EFA_REGS_DEV_STS_READY_MASK 0x1
+#define EFA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define EFA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define EFA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define EFA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define EFA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define EFA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define EFA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define EFA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define EFA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+
+/* mmio_reg_read register */
+#define EFA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define EFA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+
+#endif /* _EFA_REGS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
new file mode 100644
index 000000000000..6d6886c9009f
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -0,0 +1,1825 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include <rdma/ib_addr.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "efa.h"
+
+#define EFA_MMAP_FLAG_SHIFT 56
+#define EFA_MMAP_PAGE_MASK GENMASK(EFA_MMAP_FLAG_SHIFT - 1, 0)
+#define EFA_MMAP_INVALID U64_MAX
+
+enum {
+ EFA_MMAP_DMA_PAGE = 0,
+ EFA_MMAP_IO_WC,
+ EFA_MMAP_IO_NC,
+};
+
+#define EFA_AENQ_ENABLED_GROUPS \
+ (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
+ BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
+
+struct efa_mmap_entry {
+ void *obj;
+ u64 address;
+ u64 length;
+ u32 mmap_page;
+ u8 mmap_flag;
+};
+
+static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
+{
+ return ((u64)efa->mmap_flag << EFA_MMAP_FLAG_SHIFT) |
+ ((u64)efa->mmap_page << PAGE_SHIFT);
+}
+
+#define EFA_CHUNK_PAYLOAD_SHIFT 12
+#define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
+#define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
+
+#define EFA_CHUNK_SHIFT 12
+#define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
+#define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
+
+#define EFA_PTRS_PER_CHUNK \
+ ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
+
+#define EFA_CHUNK_USED_SIZE \
+ ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
+
+#define EFA_SUPPORTED_ACCESS_FLAGS IB_ACCESS_LOCAL_WRITE
+
+struct pbl_chunk {
+ dma_addr_t dma_addr;
+ u64 *buf;
+ u32 length;
+};
+
+struct pbl_chunk_list {
+ struct pbl_chunk *chunks;
+ unsigned int size;
+};
+
+struct pbl_context {
+ union {
+ struct {
+ dma_addr_t dma_addr;
+ } continuous;
+ struct {
+ u32 pbl_buf_size_in_pages;
+ struct scatterlist *sgl;
+ int sg_dma_cnt;
+ struct pbl_chunk_list chunk_list;
+ } indirect;
+ } phys;
+ u64 *pbl_buf;
+ u32 pbl_buf_size_in_bytes;
+ u8 physically_continuous;
+};
+
+static inline struct efa_dev *to_edev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct efa_dev, ibdev);
+}
+
+static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct efa_ucontext, ibucontext);
+}
+
+static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct efa_pd, ibpd);
+}
+
+static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct efa_mr, ibmr);
+}
+
+static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct efa_qp, ibqp);
+}
+
+static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct efa_cq, ibcq);
+}
+
+static inline struct efa_ah *to_eah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct efa_ah, ibah);
+}
+
+#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
+ sizeof(((typeof(x) *)0)->fld) <= (sz))
+
+#define is_reserved_cleared(reserved) \
+ !memchr_inv(reserved, 0, sizeof(reserved))
+
+static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ void *addr;
+
+ addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+ if (!addr)
+ return NULL;
+
+ *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
+ if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
+ ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
+ free_pages_exact(addr, size);
+ return NULL;
+ }
+
+ return addr;
+}
+
+/*
+ * This is only called when the ucontext is destroyed and there can be no
+ * concurrent query via mmap or allocate on the xarray, thus we can be sure no
+ * other thread is using the entry pointer. We also know that all the BAR
+ * pages have either been zap'd or munmaped at this point. Normal pages are
+ * refcounted and will be freed at the proper time.
+ */
+static void mmap_entries_remove_free(struct efa_dev *dev,
+ struct efa_ucontext *ucontext)
+{
+ struct efa_mmap_entry *entry;
+ unsigned long mmap_page;
+
+ xa_for_each(&ucontext->mmap_xa, mmap_page, entry) {
+ xa_erase(&ucontext->mmap_xa, mmap_page);
+
+ ibdev_dbg(
+ &dev->ibdev,
+ "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
+ entry->obj, get_mmap_key(entry), entry->address,
+ entry->length);
+ if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
+ /* DMA mapping is already gone, now free the pages */
+ free_pages_exact(phys_to_virt(entry->address),
+ entry->length);
+ kfree(entry);
+ }
+}
+
+static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev,
+ struct efa_ucontext *ucontext,
+ u64 key, u64 len)
+{
+ struct efa_mmap_entry *entry;
+ u64 mmap_page;
+
+ mmap_page = (key & EFA_MMAP_PAGE_MASK) >> PAGE_SHIFT;
+ if (mmap_page > U32_MAX)
+ return NULL;
+
+ entry = xa_load(&ucontext->mmap_xa, mmap_page);
+ if (!entry || get_mmap_key(entry) != key || entry->length != len)
+ return NULL;
+
+ ibdev_dbg(&dev->ibdev,
+ "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
+ entry->obj, key, entry->address, entry->length);
+
+ return entry;
+}
+
+/*
+ * Note this locking scheme cannot support removal of entries, except during
+ * ucontext destruction when the core code guarentees no concurrency.
+ */
+static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
+ void *obj, u64 address, u64 length, u8 mmap_flag)
+{
+ struct efa_mmap_entry *entry;
+ int err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return EFA_MMAP_INVALID;
+
+ entry->obj = obj;
+ entry->address = address;
+ entry->length = length;
+ entry->mmap_flag = mmap_flag;
+
+ xa_lock(&ucontext->mmap_xa);
+ entry->mmap_page = ucontext->mmap_xa_page;
+ ucontext->mmap_xa_page += DIV_ROUND_UP(length, PAGE_SIZE);
+ err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
+ GFP_KERNEL);
+ xa_unlock(&ucontext->mmap_xa);
+ if (err){
+ kfree(entry);
+ return EFA_MMAP_INVALID;
+ }
+
+ ibdev_dbg(
+ &dev->ibdev,
+ "mmap: obj[0x%p] addr[%#llx], len[%#llx], key[%#llx] inserted\n",
+ entry->obj, entry->address, entry->length, get_mmap_key(entry));
+
+ return get_mmap_key(entry);
+}
+
+int efa_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata)
+{
+ struct efa_com_get_device_attr_result *dev_attr;
+ struct efa_ibv_ex_query_device_resp resp = {};
+ struct efa_dev *dev = to_edev(ibdev);
+ int err;
+
+ if (udata && udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(ibdev,
+ "Incompatible ABI params, udata not cleared\n");
+ return -EINVAL;
+ }
+
+ dev_attr = &dev->dev_attr;
+
+ memset(props, 0, sizeof(*props));
+ props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
+ props->page_size_cap = dev_attr->page_size_cap;
+ props->vendor_id = dev->pdev->vendor;
+ props->vendor_part_id = dev->pdev->device;
+ props->hw_ver = dev->pdev->subsystem_device;
+ props->max_qp = dev_attr->max_qp;
+ props->max_cq = dev_attr->max_cq;
+ props->max_pd = dev_attr->max_pd;
+ props->max_mr = dev_attr->max_mr;
+ props->max_ah = dev_attr->max_ah;
+ props->max_cqe = dev_attr->max_cq_depth;
+ props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
+ dev_attr->max_rq_depth);
+ props->max_send_sge = dev_attr->max_sq_sge;
+ props->max_recv_sge = dev_attr->max_rq_sge;
+
+ if (udata && udata->outlen) {
+ resp.max_sq_sge = dev_attr->max_sq_sge;
+ resp.max_rq_sge = dev_attr->max_rq_sge;
+ resp.max_sq_wr = dev_attr->max_sq_depth;
+ resp.max_rq_wr = dev_attr->max_rq_depth;
+
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(ibdev,
+ "Failed to copy udata for query_device\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int efa_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct efa_dev *dev = to_edev(ibdev);
+
+ props->lmc = 1;
+
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = 5;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->active_speed = IB_SPEED_EDR;
+ props->active_width = IB_WIDTH_4X;
+ props->max_mtu = ib_mtu_int_to_enum(dev->mtu);
+ props->active_mtu = ib_mtu_int_to_enum(dev->mtu);
+ props->max_msg_sz = dev->mtu;
+ props->max_vl_num = 1;
+
+ return 0;
+}
+
+int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct efa_dev *dev = to_edev(ibqp->device);
+ struct efa_com_query_qp_params params = {};
+ struct efa_com_query_qp_result result;
+ struct efa_qp *qp = to_eqp(ibqp);
+ int err;
+
+#define EFA_QUERY_QP_SUPP_MASK \
+ (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
+ IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP)
+
+ if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
+ qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
+ return -EOPNOTSUPP;
+ }
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ params.qp_handle = qp->qp_handle;
+ err = efa_com_query_qp(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ qp_attr->qp_state = result.qp_state;
+ qp_attr->qkey = result.qkey;
+ qp_attr->sq_psn = result.sq_psn;
+ qp_attr->sq_draining = result.sq_draining;
+ qp_attr->port_num = 1;
+
+ qp_attr->cap.max_send_wr = qp->max_send_wr;
+ qp_attr->cap.max_recv_wr = qp->max_recv_wr;
+ qp_attr->cap.max_send_sge = qp->max_send_sge;
+ qp_attr->cap.max_recv_sge = qp->max_recv_sge;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->qp_context = ibqp->qp_context;
+ qp_init_attr->cap = qp_attr->cap;
+
+ return 0;
+}
+
+int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct efa_dev *dev = to_edev(ibdev);
+
+ memcpy(gid->raw, dev->addr, sizeof(dev->addr));
+
+ return 0;
+}
+
+int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ if (index > 0)
+ return -EINVAL;
+
+ *pkey = 0xffff;
+ return 0;
+}
+
+static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
+{
+ struct efa_com_dealloc_pd_params params = {
+ .pdn = pdn,
+ };
+
+ return efa_com_dealloc_pd(&dev->edev, &params);
+}
+
+int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_ibv_alloc_pd_resp resp = {};
+ struct efa_com_alloc_pd_result result;
+ struct efa_pd *pd = to_epd(ibpd);
+ int err;
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, udata not cleared\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = efa_com_alloc_pd(&dev->edev, &result);
+ if (err)
+ goto err_out;
+
+ pd->pdn = result.pdn;
+ resp.pdn = result.pdn;
+
+ if (udata->outlen) {
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "Failed to copy udata for alloc_pd\n");
+ goto err_dealloc_pd;
+ }
+ }
+
+ ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
+
+ return 0;
+
+err_dealloc_pd:
+ efa_pd_dealloc(dev, result.pdn);
+err_out:
+ atomic64_inc(&dev->stats.sw_stats.alloc_pd_err);
+ return err;
+}
+
+void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_pd *pd = to_epd(ibpd);
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
+ return;
+ }
+
+ ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
+ efa_pd_dealloc(dev, pd->pdn);
+}
+
+static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
+{
+ struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
+
+ return efa_com_destroy_qp(&dev->edev, &params);
+}
+
+int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibqp->pd->device);
+ struct efa_qp *qp = to_eqp(ibqp);
+ int err;
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
+ return -EINVAL;
+ }
+
+ ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
+ err = efa_destroy_qp_handle(dev, qp->qp_handle);
+ if (err)
+ return err;
+
+ if (qp->rq_cpu_addr) {
+ ibdev_dbg(&dev->ibdev,
+ "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
+ qp->rq_cpu_addr, qp->rq_size,
+ &qp->rq_dma_addr);
+ dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
+ DMA_TO_DEVICE);
+ }
+
+ kfree(qp);
+ return 0;
+}
+
+static int qp_mmap_entries_setup(struct efa_qp *qp,
+ struct efa_dev *dev,
+ struct efa_ucontext *ucontext,
+ struct efa_com_create_qp_params *params,
+ struct efa_ibv_create_qp_resp *resp)
+{
+ /*
+ * Once an entry is inserted it might be mmapped, hence cannot be
+ * cleaned up until dealloc_ucontext.
+ */
+ resp->sq_db_mmap_key =
+ mmap_entry_insert(dev, ucontext, qp,
+ dev->db_bar_addr + resp->sq_db_offset,
+ PAGE_SIZE, EFA_MMAP_IO_NC);
+ if (resp->sq_db_mmap_key == EFA_MMAP_INVALID)
+ return -ENOMEM;
+
+ resp->sq_db_offset &= ~PAGE_MASK;
+
+ resp->llq_desc_mmap_key =
+ mmap_entry_insert(dev, ucontext, qp,
+ dev->mem_bar_addr + resp->llq_desc_offset,
+ PAGE_ALIGN(params->sq_ring_size_in_bytes +
+ (resp->llq_desc_offset & ~PAGE_MASK)),
+ EFA_MMAP_IO_WC);
+ if (resp->llq_desc_mmap_key == EFA_MMAP_INVALID)
+ return -ENOMEM;
+
+ resp->llq_desc_offset &= ~PAGE_MASK;
+
+ if (qp->rq_size) {
+ resp->rq_db_mmap_key =
+ mmap_entry_insert(dev, ucontext, qp,
+ dev->db_bar_addr + resp->rq_db_offset,
+ PAGE_SIZE, EFA_MMAP_IO_NC);
+ if (resp->rq_db_mmap_key == EFA_MMAP_INVALID)
+ return -ENOMEM;
+
+ resp->rq_db_offset &= ~PAGE_MASK;
+
+ resp->rq_mmap_key =
+ mmap_entry_insert(dev, ucontext, qp,
+ virt_to_phys(qp->rq_cpu_addr),
+ qp->rq_size, EFA_MMAP_DMA_PAGE);
+ if (resp->rq_mmap_key == EFA_MMAP_INVALID)
+ return -ENOMEM;
+
+ resp->rq_mmap_size = qp->rq_size;
+ }
+
+ return 0;
+}
+
+static int efa_qp_validate_cap(struct efa_dev *dev,
+ struct ib_qp_init_attr *init_attr)
+{
+ if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested send wr[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_send_wr,
+ dev->dev_attr.max_sq_depth);
+ return -EINVAL;
+ }
+ if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested receive wr[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_recv_wr,
+ dev->dev_attr.max_rq_depth);
+ return -EINVAL;
+ }
+ if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested sge send[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
+ return -EINVAL;
+ }
+ if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested sge recv[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
+ return -EINVAL;
+ }
+ if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested inline data[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_inline_data,
+ dev->dev_attr.inline_buf_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int efa_qp_validate_attr(struct efa_dev *dev,
+ struct ib_qp_init_attr *init_attr)
+{
+ if (init_attr->qp_type != IB_QPT_DRIVER &&
+ init_attr->qp_type != IB_QPT_UD) {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported qp type %d\n", init_attr->qp_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->srq) {
+ ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->create_flags) {
+ ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct efa_com_create_qp_params create_qp_params = {};
+ struct efa_com_create_qp_result create_qp_resp;
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_ibv_create_qp_resp resp = {};
+ struct efa_ibv_create_qp cmd = {};
+ bool rq_entry_inserted = false;
+ struct efa_ucontext *ucontext;
+ struct efa_qp *qp;
+ int err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
+ ibucontext);
+
+ err = efa_qp_validate_cap(dev, init_attr);
+ if (err)
+ goto err_out;
+
+ err = efa_qp_validate_attr(dev, init_attr);
+ if (err)
+ goto err_out;
+
+ if (!field_avail(cmd, driver_qp_type, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, no input udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (udata->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(udata, sizeof(cmd),
+ udata->inlen - sizeof(cmd))) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, unknown fields in udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = ib_copy_from_udata(&cmd, udata,
+ min(sizeof(cmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "Cannot copy udata for create_qp\n");
+ goto err_out;
+ }
+
+ if (cmd.comp_mask) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, unknown fields in udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ create_qp_params.uarn = ucontext->uarn;
+ create_qp_params.pd = to_epd(ibpd)->pdn;
+
+ if (init_attr->qp_type == IB_QPT_UD) {
+ create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
+ } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
+ create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
+ } else {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported qp type %d driver qp type %d\n",
+ init_attr->qp_type, cmd.driver_qp_type);
+ err = -EOPNOTSUPP;
+ goto err_free_qp;
+ }
+
+ ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
+ init_attr->qp_type, cmd.driver_qp_type);
+ create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
+ create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
+ create_qp_params.sq_depth = init_attr->cap.max_send_wr;
+ create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
+
+ create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
+ create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
+ qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
+ if (qp->rq_size) {
+ qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
+ qp->rq_size, DMA_TO_DEVICE);
+ if (!qp->rq_cpu_addr) {
+ err = -ENOMEM;
+ goto err_free_qp;
+ }
+
+ ibdev_dbg(&dev->ibdev,
+ "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
+ qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
+ create_qp_params.rq_base_addr = qp->rq_dma_addr;
+ }
+
+ err = efa_com_create_qp(&dev->edev, &create_qp_params,
+ &create_qp_resp);
+ if (err)
+ goto err_free_mapped;
+
+ resp.sq_db_offset = create_qp_resp.sq_db_offset;
+ resp.rq_db_offset = create_qp_resp.rq_db_offset;
+ resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
+ resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
+ resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
+
+ err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
+ &resp);
+ if (err)
+ goto err_destroy_qp;
+
+ rq_entry_inserted = true;
+ qp->qp_handle = create_qp_resp.qp_handle;
+ qp->ibqp.qp_num = create_qp_resp.qp_num;
+ qp->ibqp.qp_type = init_attr->qp_type;
+ qp->max_send_wr = init_attr->cap.max_send_wr;
+ qp->max_recv_wr = init_attr->cap.max_recv_wr;
+ qp->max_send_sge = init_attr->cap.max_send_sge;
+ qp->max_recv_sge = init_attr->cap.max_recv_sge;
+ qp->max_inline_data = init_attr->cap.max_inline_data;
+
+ if (udata->outlen) {
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "Failed to copy udata for qp[%u]\n",
+ create_qp_resp.qp_num);
+ goto err_destroy_qp;
+ }
+ }
+
+ ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
+
+ return &qp->ibqp;
+
+err_destroy_qp:
+ efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
+err_free_mapped:
+ if (qp->rq_size) {
+ dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
+ DMA_TO_DEVICE);
+ if (!rq_entry_inserted)
+ free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
+ }
+err_free_qp:
+ kfree(qp);
+err_out:
+ atomic64_inc(&dev->stats.sw_stats.create_qp_err);
+ return ERR_PTR(err);
+}
+
+static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
+ struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+#define EFA_MODIFY_QP_SUPP_MASK \
+ (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
+ IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN)
+
+ if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
+ qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
+ qp_attr_mask)) {
+ ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
+ return -EINVAL;
+ }
+
+ if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
+ ibdev_dbg(&dev->ibdev, "Can't change port num\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
+ ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibqp->device);
+ struct efa_com_modify_qp_params params = {};
+ struct efa_qp *qp = to_eqp(ibqp);
+ enum ib_qp_state cur_state;
+ enum ib_qp_state new_state;
+ int err;
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, udata not cleared\n");
+ return -EINVAL;
+ }
+
+ cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
+ qp->state;
+ new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
+
+ err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
+ new_state);
+ if (err)
+ return err;
+
+ params.qp_handle = qp->qp_handle;
+
+ if (qp_attr_mask & IB_QP_STATE) {
+ params.modify_mask |= BIT(EFA_ADMIN_QP_STATE_BIT) |
+ BIT(EFA_ADMIN_CUR_QP_STATE_BIT);
+ params.cur_qp_state = qp_attr->cur_qp_state;
+ params.qp_state = qp_attr->qp_state;
+ }
+
+ if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
+ params.modify_mask |=
+ BIT(EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT);
+ params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
+ }
+
+ if (qp_attr_mask & IB_QP_QKEY) {
+ params.modify_mask |= BIT(EFA_ADMIN_QKEY_BIT);
+ params.qkey = qp_attr->qkey;
+ }
+
+ if (qp_attr_mask & IB_QP_SQ_PSN) {
+ params.modify_mask |= BIT(EFA_ADMIN_SQ_PSN_BIT);
+ params.sq_psn = qp_attr->sq_psn;
+ }
+
+ err = efa_com_modify_qp(&dev->edev, &params);
+ if (err)
+ return err;
+
+ qp->state = new_state;
+
+ return 0;
+}
+
+static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
+{
+ struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
+
+ return efa_com_destroy_cq(&dev->edev, &params);
+}
+
+int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibcq->device);
+ struct efa_cq *cq = to_ecq(ibcq);
+ int err;
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
+ return -EINVAL;
+ }
+
+ ibdev_dbg(&dev->ibdev,
+ "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
+ cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
+
+ err = efa_destroy_cq_idx(dev, cq->cq_idx);
+ if (err)
+ return err;
+
+ dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
+
+ kfree(cq);
+ return 0;
+}
+
+static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
+ struct efa_ibv_create_cq_resp *resp)
+{
+ resp->q_mmap_size = cq->size;
+ resp->q_mmap_key = mmap_entry_insert(dev, cq->ucontext, cq,
+ virt_to_phys(cq->cpu_addr),
+ cq->size, EFA_MMAP_DMA_PAGE);
+ if (resp->q_mmap_key == EFA_MMAP_INVALID)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries,
+ int vector, struct ib_ucontext *ibucontext,
+ struct ib_udata *udata)
+{
+ struct efa_ibv_create_cq_resp resp = {};
+ struct efa_com_create_cq_params params;
+ struct efa_com_create_cq_result result;
+ struct efa_dev *dev = to_edev(ibdev);
+ struct efa_ibv_create_cq cmd = {};
+ bool cq_entry_inserted = false;
+ struct efa_cq *cq;
+ int err;
+
+ ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
+
+ if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
+ ibdev_dbg(ibdev,
+ "cq: requested entries[%u] non-positive or greater than max[%u]\n",
+ entries, dev->dev_attr.max_cq_depth);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!field_avail(cmd, num_sub_cqs, udata->inlen)) {
+ ibdev_dbg(ibdev,
+ "Incompatible ABI params, no input udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (udata->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(udata, sizeof(cmd),
+ udata->inlen - sizeof(cmd))) {
+ ibdev_dbg(ibdev,
+ "Incompatible ABI params, unknown fields in udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = ib_copy_from_udata(&cmd, udata,
+ min(sizeof(cmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
+ goto err_out;
+ }
+
+ if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
+ ibdev_dbg(ibdev,
+ "Incompatible ABI params, unknown fields in udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!cmd.cq_entry_size) {
+ ibdev_dbg(ibdev,
+ "Invalid entry size [%u]\n", cmd.cq_entry_size);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
+ ibdev_dbg(ibdev,
+ "Invalid number of sub cqs[%u] expected[%u]\n",
+ cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ cq->ucontext = to_eucontext(ibucontext);
+ cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
+ cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
+ if (!cq->cpu_addr) {
+ err = -ENOMEM;
+ goto err_free_cq;
+ }
+
+ params.uarn = cq->ucontext->uarn;
+ params.cq_depth = entries;
+ params.dma_addr = cq->dma_addr;
+ params.entry_size_in_bytes = cmd.cq_entry_size;
+ params.num_sub_cqs = cmd.num_sub_cqs;
+ err = efa_com_create_cq(&dev->edev, &params, &result);
+ if (err)
+ goto err_free_mapped;
+
+ resp.cq_idx = result.cq_idx;
+ cq->cq_idx = result.cq_idx;
+ cq->ibcq.cqe = result.actual_depth;
+ WARN_ON_ONCE(entries != result.actual_depth);
+
+ err = cq_mmap_entries_setup(dev, cq, &resp);
+ if (err) {
+ ibdev_dbg(ibdev,
+ "Could not setup cq[%u] mmap entries\n", cq->cq_idx);
+ goto err_destroy_cq;
+ }
+
+ cq_entry_inserted = true;
+
+ if (udata->outlen) {
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(ibdev,
+ "Failed to copy udata for create_cq\n");
+ goto err_destroy_cq;
+ }
+ }
+
+ ibdev_dbg(ibdev,
+ "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
+ cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
+
+ return &cq->ibcq;
+
+err_destroy_cq:
+ efa_destroy_cq_idx(dev, cq->cq_idx);
+err_free_mapped:
+ dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
+ if (!cq_entry_inserted)
+ free_pages_exact(cq->cpu_addr, cq->size);
+err_free_cq:
+ kfree(cq);
+err_out:
+ atomic64_inc(&dev->stats.sw_stats.create_cq_err);
+ return ERR_PTR(err);
+}
+
+struct ib_cq *efa_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct efa_ucontext,
+ ibucontext);
+
+ return do_create_cq(ibdev, attr->cqe, attr->comp_vector,
+ &ucontext->ibucontext, udata);
+}
+
+static int umem_to_page_list(struct efa_dev *dev,
+ struct ib_umem *umem,
+ u64 *page_list,
+ u32 hp_cnt,
+ u8 hp_shift)
+{
+ u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
+ struct sg_dma_page_iter sg_iter;
+ unsigned int page_idx = 0;
+ unsigned int hp_idx = 0;
+
+ ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
+ hp_cnt, pages_in_hp);
+
+ for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ if (page_idx % pages_in_hp == 0) {
+ page_list[hp_idx] = sg_page_iter_dma_address(&sg_iter);
+ hp_idx++;
+ }
+
+ page_idx++;
+ }
+
+ return 0;
+}
+
+static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
+{
+ struct scatterlist *sglist;
+ struct page *pg;
+ int i;
+
+ sglist = kcalloc(page_cnt, sizeof(*sglist), GFP_KERNEL);
+ if (!sglist)
+ return NULL;
+ sg_init_table(sglist, page_cnt);
+ for (i = 0; i < page_cnt; i++) {
+ pg = vmalloc_to_page(buf);
+ if (!pg)
+ goto err;
+ sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
+ buf += PAGE_SIZE / sizeof(*buf);
+ }
+ return sglist;
+
+err:
+ kfree(sglist);
+ return NULL;
+}
+
+/*
+ * create a chunk list of physical pages dma addresses from the supplied
+ * scatter gather list
+ */
+static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ unsigned int entry, payloads_in_sg, chunk_list_size, chunk_idx, payload_idx;
+ struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
+ int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
+ struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
+ int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
+ struct efa_com_ctrl_buff_info *ctrl_buf;
+ u64 *cur_chunk_buf, *prev_chunk_buf;
+ struct scatterlist *sg;
+ dma_addr_t dma_addr;
+ int i;
+
+ /* allocate a chunk list that consists of 4KB chunks */
+ chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
+
+ chunk_list->size = chunk_list_size;
+ chunk_list->chunks = kcalloc(chunk_list_size,
+ sizeof(*chunk_list->chunks),
+ GFP_KERNEL);
+ if (!chunk_list->chunks)
+ return -ENOMEM;
+
+ ibdev_dbg(&dev->ibdev,
+ "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
+ page_cnt);
+
+ /* allocate chunk buffers: */
+ for (i = 0; i < chunk_list_size; i++) {
+ chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
+ if (!chunk_list->chunks[i].buf)
+ goto chunk_list_dealloc;
+
+ chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
+ }
+ chunk_list->chunks[chunk_list_size - 1].length =
+ ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
+ EFA_CHUNK_PTR_SIZE;
+
+ /* fill the dma addresses of sg list pages to chunks: */
+ chunk_idx = 0;
+ payload_idx = 0;
+ cur_chunk_buf = chunk_list->chunks[0].buf;
+ for_each_sg(pages_sgl, sg, sg_dma_cnt, entry) {
+ payloads_in_sg = sg_dma_len(sg) >> EFA_CHUNK_PAYLOAD_SHIFT;
+ for (i = 0; i < payloads_in_sg; i++) {
+ cur_chunk_buf[payload_idx++] =
+ (sg_dma_address(sg) & ~(EFA_CHUNK_PAYLOAD_SIZE - 1)) +
+ (EFA_CHUNK_PAYLOAD_SIZE * i);
+
+ if (payload_idx == EFA_PTRS_PER_CHUNK) {
+ chunk_idx++;
+ cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
+ payload_idx = 0;
+ }
+ }
+ }
+
+ /* map chunks to dma and fill chunks next ptrs */
+ for (i = chunk_list_size - 1; i >= 0; i--) {
+ dma_addr = dma_map_single(&dev->pdev->dev,
+ chunk_list->chunks[i].buf,
+ chunk_list->chunks[i].length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
+ ibdev_err(&dev->ibdev,
+ "chunk[%u] dma_map_failed\n", i);
+ goto chunk_list_unmap;
+ }
+
+ chunk_list->chunks[i].dma_addr = dma_addr;
+ ibdev_dbg(&dev->ibdev,
+ "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
+
+ if (!i)
+ break;
+
+ prev_chunk_buf = chunk_list->chunks[i - 1].buf;
+
+ ctrl_buf = (struct efa_com_ctrl_buff_info *)
+ &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
+ ctrl_buf->length = chunk_list->chunks[i].length;
+
+ efa_com_set_dma_addr(dma_addr,
+ &ctrl_buf->address.mem_addr_high,
+ &ctrl_buf->address.mem_addr_low);
+ }
+
+ return 0;
+
+chunk_list_unmap:
+ for (; i < chunk_list_size; i++) {
+ dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
+ chunk_list->chunks[i].length, DMA_TO_DEVICE);
+ }
+chunk_list_dealloc:
+ for (i = 0; i < chunk_list_size; i++)
+ kfree(chunk_list->chunks[i].buf);
+
+ kfree(chunk_list->chunks);
+ return -ENOMEM;
+}
+
+static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
+ int i;
+
+ for (i = 0; i < chunk_list->size; i++) {
+ dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
+ chunk_list->chunks[i].length, DMA_TO_DEVICE);
+ kfree(chunk_list->chunks[i].buf);
+ }
+
+ kfree(chunk_list->chunks);
+}
+
+/* initialize pbl continuous mode: map pbl buffer to a dma address. */
+static int pbl_continuous_initialize(struct efa_dev *dev,
+ struct pbl_context *pbl)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
+ pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
+ ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
+ return -ENOMEM;
+ }
+
+ pbl->phys.continuous.dma_addr = dma_addr;
+ ibdev_dbg(&dev->ibdev,
+ "pbl continuous - dma_addr = %pad, size[%u]\n",
+ &dma_addr, pbl->pbl_buf_size_in_bytes);
+
+ return 0;
+}
+
+/*
+ * initialize pbl indirect mode:
+ * create a chunk list out of the dma addresses of the physical pages of
+ * pbl buffer.
+ */
+static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
+ struct scatterlist *sgl;
+ int sg_dma_cnt, err;
+
+ BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
+ sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
+ if (!sgl)
+ return -ENOMEM;
+
+ sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
+ if (!sg_dma_cnt) {
+ err = -EINVAL;
+ goto err_map;
+ }
+
+ pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
+ pbl->phys.indirect.sgl = sgl;
+ pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
+ err = pbl_chunk_list_create(dev, pbl);
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "chunk_list creation failed[%d]\n", err);
+ goto err_chunk;
+ }
+
+ ibdev_dbg(&dev->ibdev,
+ "pbl indirect - size[%u], chunks[%u]\n",
+ pbl->pbl_buf_size_in_bytes,
+ pbl->phys.indirect.chunk_list.size);
+
+ return 0;
+
+err_chunk:
+ dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
+err_map:
+ kfree(sgl);
+ return err;
+}
+
+static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ pbl_chunk_list_destroy(dev, pbl);
+ dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
+ pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
+ kfree(pbl->phys.indirect.sgl);
+}
+
+/* create a page buffer list from a mapped user memory region */
+static int pbl_create(struct efa_dev *dev,
+ struct pbl_context *pbl,
+ struct ib_umem *umem,
+ int hp_cnt,
+ u8 hp_shift)
+{
+ int err;
+
+ pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
+ pbl->pbl_buf = kzalloc(pbl->pbl_buf_size_in_bytes,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (pbl->pbl_buf) {
+ pbl->physically_continuous = 1;
+ err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
+ hp_shift);
+ if (err)
+ goto err_continuous;
+ err = pbl_continuous_initialize(dev, pbl);
+ if (err)
+ goto err_continuous;
+ } else {
+ pbl->physically_continuous = 0;
+ pbl->pbl_buf = vzalloc(pbl->pbl_buf_size_in_bytes);
+ if (!pbl->pbl_buf)
+ return -ENOMEM;
+
+ err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
+ hp_shift);
+ if (err)
+ goto err_indirect;
+ err = pbl_indirect_initialize(dev, pbl);
+ if (err)
+ goto err_indirect;
+ }
+
+ ibdev_dbg(&dev->ibdev,
+ "user_pbl_created: user_pages[%u], continuous[%u]\n",
+ hp_cnt, pbl->physically_continuous);
+
+ return 0;
+
+err_continuous:
+ kfree(pbl->pbl_buf);
+ return err;
+err_indirect:
+ vfree(pbl->pbl_buf);
+ return err;
+}
+
+static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ if (pbl->physically_continuous) {
+ dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
+ pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
+ kfree(pbl->pbl_buf);
+ } else {
+ pbl_indirect_terminate(dev, pbl);
+ vfree(pbl->pbl_buf);
+ }
+}
+
+static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
+ struct efa_com_reg_mr_params *params)
+{
+ int err;
+
+ params->inline_pbl = 1;
+ err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
+ params->page_num, params->page_shift);
+ if (err)
+ return err;
+
+ ibdev_dbg(&dev->ibdev,
+ "inline_pbl_array - pages[%u]\n", params->page_num);
+
+ return 0;
+}
+
+static int efa_create_pbl(struct efa_dev *dev,
+ struct pbl_context *pbl,
+ struct efa_mr *mr,
+ struct efa_com_reg_mr_params *params)
+{
+ int err;
+
+ err = pbl_create(dev, pbl, mr->umem, params->page_num,
+ params->page_shift);
+ if (err) {
+ ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
+ return err;
+ }
+
+ params->inline_pbl = 0;
+ params->indirect = !pbl->physically_continuous;
+ if (pbl->physically_continuous) {
+ params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
+
+ efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
+ &params->pbl.pbl.address.mem_addr_high,
+ &params->pbl.pbl.address.mem_addr_low);
+ } else {
+ params->pbl.pbl.length =
+ pbl->phys.indirect.chunk_list.chunks[0].length;
+
+ efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
+ &params->pbl.pbl.address.mem_addr_high,
+ &params->pbl.pbl.address.mem_addr_low);
+ }
+
+ return 0;
+}
+
+static void efa_cont_pages(struct ib_umem *umem, u64 addr,
+ unsigned long max_page_shift,
+ int *count, u8 *shift, u32 *ncont)
+{
+ struct scatterlist *sg;
+ u64 base = ~0, p = 0;
+ unsigned long tmp;
+ unsigned long m;
+ u64 len, pfn;
+ int i = 0;
+ int entry;
+
+ addr = addr >> PAGE_SHIFT;
+ tmp = (unsigned long)addr;
+ m = find_first_bit(&tmp, BITS_PER_LONG);
+ if (max_page_shift)
+ m = min_t(unsigned long, max_page_shift - PAGE_SHIFT, m);
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ len = DIV_ROUND_UP(sg_dma_len(sg), PAGE_SIZE);
+ pfn = sg_dma_address(sg) >> PAGE_SHIFT;
+ if (base + p != pfn) {
+ /*
+ * If either the offset or the new
+ * base are unaligned update m
+ */
+ tmp = (unsigned long)(pfn | p);
+ if (!IS_ALIGNED(tmp, 1 << m))
+ m = find_first_bit(&tmp, BITS_PER_LONG);
+
+ base = pfn;
+ p = 0;
+ }
+
+ p += len;
+ i += len;
+ }
+
+ if (i) {
+ m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
+ *ncont = DIV_ROUND_UP(i, (1 << m));
+ } else {
+ m = 0;
+ *ncont = 0;
+ }
+
+ *shift = PAGE_SHIFT + m;
+ *count = i;
+}
+
+struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_com_reg_mr_params params = {};
+ struct efa_com_reg_mr_result result = {};
+ unsigned long max_page_shift;
+ struct pbl_context pbl;
+ struct efa_mr *mr;
+ int inline_size;
+ int npages;
+ int err;
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, udata not cleared\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (access_flags & ~EFA_SUPPORTED_ACCESS_FLAGS) {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported access flags[%#x], supported[%#x]\n",
+ access_flags, EFA_SUPPORTED_ACCESS_FLAGS);
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ ibdev_dbg(&dev->ibdev,
+ "Failed to pin and map user space memory[%d]\n", err);
+ goto err_free;
+ }
+
+ params.pd = to_epd(ibpd)->pdn;
+ params.iova = virt_addr;
+ params.mr_length_in_bytes = length;
+ params.permissions = access_flags & 0x1;
+ max_page_shift = fls64(dev->dev_attr.page_size_cap);
+
+ efa_cont_pages(mr->umem, start, max_page_shift, &npages,
+ &params.page_shift, &params.page_num);
+ ibdev_dbg(&dev->ibdev,
+ "start %#llx length %#llx npages %d params.page_shift %u params.page_num %u\n",
+ start, length, npages, params.page_shift, params.page_num);
+
+ inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
+ if (params.page_num <= inline_size) {
+ err = efa_create_inline_pbl(dev, mr, &params);
+ if (err)
+ goto err_unmap;
+
+ err = efa_com_register_mr(&dev->edev, &params, &result);
+ if (err)
+ goto err_unmap;
+ } else {
+ err = efa_create_pbl(dev, &pbl, mr, &params);
+ if (err)
+ goto err_unmap;
+
+ err = efa_com_register_mr(&dev->edev, &params, &result);
+ pbl_destroy(dev, &pbl);
+
+ if (err)
+ goto err_unmap;
+ }
+
+ mr->ibmr.lkey = result.l_key;
+ mr->ibmr.rkey = result.r_key;
+ mr->ibmr.length = length;
+ ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
+
+ return &mr->ibmr;
+
+err_unmap:
+ ib_umem_release(mr->umem);
+err_free:
+ kfree(mr);
+err_out:
+ atomic64_inc(&dev->stats.sw_stats.reg_mr_err);
+ return ERR_PTR(err);
+}
+
+int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibmr->device);
+ struct efa_com_dereg_mr_params params;
+ struct efa_mr *mr = to_emr(ibmr);
+ int err;
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
+ return -EINVAL;
+ }
+
+ ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
+
+ if (mr->umem) {
+ params.l_key = mr->ibmr.lkey;
+ err = efa_com_dereg_mr(&dev->edev, &params);
+ if (err)
+ return err;
+ ib_umem_release(mr->umem);
+ }
+
+ kfree(mr);
+
+ return 0;
+}
+
+int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err) {
+ ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
+ return err;
+ }
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
+{
+ struct efa_com_dealloc_uar_params params = {
+ .uarn = uarn,
+ };
+
+ return efa_com_dealloc_uar(&dev->edev, &params);
+}
+
+int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
+{
+ struct efa_ucontext *ucontext = to_eucontext(ibucontext);
+ struct efa_dev *dev = to_edev(ibucontext->device);
+ struct efa_ibv_alloc_ucontext_resp resp = {};
+ struct efa_com_alloc_uar_result result;
+ int err;
+
+ /*
+ * it's fine if the driver does not know all request fields,
+ * we will ack input fields in our response.
+ */
+
+ err = efa_com_alloc_uar(&dev->edev, &result);
+ if (err)
+ goto err_out;
+
+ ucontext->uarn = result.uarn;
+ xa_init(&ucontext->mmap_xa);
+
+ resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
+ resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
+ resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
+ resp.inline_buf_size = dev->dev_attr.inline_buf_size;
+ resp.max_llq_size = dev->dev_attr.max_llq_size;
+
+ if (udata && udata->outlen) {
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err)
+ goto err_dealloc_uar;
+ }
+
+ return 0;
+
+err_dealloc_uar:
+ efa_dealloc_uar(dev, result.uarn);
+err_out:
+ atomic64_inc(&dev->stats.sw_stats.alloc_ucontext_err);
+ return err;
+}
+
+void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
+{
+ struct efa_ucontext *ucontext = to_eucontext(ibucontext);
+ struct efa_dev *dev = to_edev(ibucontext->device);
+
+ mmap_entries_remove_free(dev, ucontext);
+ efa_dealloc_uar(dev, ucontext->uarn);
+}
+
+static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
+ struct vm_area_struct *vma, u64 key, u64 length)
+{
+ struct efa_mmap_entry *entry;
+ unsigned long va;
+ u64 pfn;
+ int err;
+
+ entry = mmap_entry_get(dev, ucontext, key, length);
+ if (!entry) {
+ ibdev_dbg(&dev->ibdev, "key[%#llx] does not have valid entry\n",
+ key);
+ return -EINVAL;
+ }
+
+ ibdev_dbg(&dev->ibdev,
+ "Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n",
+ entry->address, length, entry->mmap_flag);
+
+ pfn = entry->address >> PAGE_SHIFT;
+ switch (entry->mmap_flag) {
+ case EFA_MMAP_IO_NC:
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
+ pgprot_noncached(vma->vm_page_prot));
+ break;
+ case EFA_MMAP_IO_WC:
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
+ pgprot_writecombine(vma->vm_page_prot));
+ break;
+ case EFA_MMAP_DMA_PAGE:
+ for (va = vma->vm_start; va < vma->vm_end;
+ va += PAGE_SIZE, pfn++) {
+ err = vm_insert_page(vma, va, pfn_to_page(pfn));
+ if (err)
+ break;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (err)
+ ibdev_dbg(
+ &dev->ibdev,
+ "Couldn't mmap address[%#llx] length[%#llx] mmap_flag[%d] err[%d]\n",
+ entry->address, length, entry->mmap_flag, err);
+
+ return err;
+}
+
+int efa_mmap(struct ib_ucontext *ibucontext,
+ struct vm_area_struct *vma)
+{
+ struct efa_ucontext *ucontext = to_eucontext(ibucontext);
+ struct efa_dev *dev = to_edev(ibucontext->device);
+ u64 length = vma->vm_end - vma->vm_start;
+ u64 key = vma->vm_pgoff << PAGE_SHIFT;
+
+ ibdev_dbg(&dev->ibdev,
+ "start %#lx, end %#lx, length = %#llx, key = %#llx\n",
+ vma->vm_start, vma->vm_end, length, key);
+
+ if (length % PAGE_SIZE != 0 || !(vma->vm_flags & VM_SHARED)) {
+ ibdev_dbg(&dev->ibdev,
+ "length[%#llx] is not page size aligned[%#lx] or VM_SHARED is not set [%#lx]\n",
+ length, PAGE_SIZE, vma->vm_flags);
+ return -EINVAL;
+ }
+
+ if (vma->vm_flags & VM_EXEC) {
+ ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
+ return -EPERM;
+ }
+ vma->vm_flags &= ~VM_MAYEXEC;
+
+ return __efa_mmap(dev, ucontext, vma, key, length);
+}
+
+static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
+{
+ struct efa_com_destroy_ah_params params = {
+ .ah = ah->ah,
+ .pdn = to_epd(ah->ibah.pd)->pdn,
+ };
+
+ return efa_com_destroy_ah(&dev->edev, &params);
+}
+
+int efa_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_attr *ah_attr,
+ u32 flags,
+ struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibah->device);
+ struct efa_com_create_ah_params params = {};
+ struct efa_ibv_create_ah_resp resp = {};
+ struct efa_com_create_ah_result result;
+ struct efa_ah *ah = to_eah(ibah);
+ int err;
+
+ if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) {
+ ibdev_dbg(&dev->ibdev,
+ "Create address handle is not supported in atomic context\n");
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
+ sizeof(params.dest_addr));
+ params.pdn = to_epd(ibah->pd)->pdn;
+ err = efa_com_create_ah(&dev->edev, &params, &result);
+ if (err)
+ goto err_out;
+
+ memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
+ ah->ah = result.ah;
+
+ resp.efa_address_handle = result.ah;
+
+ if (udata->outlen) {
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "Failed to copy udata for create_ah response\n");
+ goto err_destroy_ah;
+ }
+ }
+ ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
+
+ return 0;
+
+err_destroy_ah:
+ efa_ah_destroy(dev, ah);
+err_out:
+ atomic64_inc(&dev->stats.sw_stats.create_ah_err);
+ return err;
+}
+
+void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct efa_dev *dev = to_edev(ibah->pd->device);
+ struct efa_ah *ah = to_eah(ibah);
+
+ ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
+
+ if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
+ ibdev_dbg(&dev->ibdev,
+ "Destroy address handle is not supported in atomic context\n");
+ return;
+ }
+
+ efa_ah_destroy(dev, ah);
+}
+
+enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_UNSPECIFIED;
+}
+
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 612f04190ed8..310105d4e3de 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -4104,6 +4104,9 @@ def_access_ibp_counter(seq_naks);
static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
+[C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
+[C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
+[C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
CNTR_NORMAL),
[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
@@ -8365,7 +8368,6 @@ static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
struct hfi1_devdata *dd = rcd->dd;
u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
- mmiowb(); /* make sure everything before is written */
write_csr(dd, addr, rcd->imask);
/* force the above write on the chip and get a value back */
(void)read_csr(dd, addr);
@@ -11803,12 +11805,10 @@ void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
<< RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
}
- mmiowb();
reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
(((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
<< RCV_HDR_HEAD_HEAD_SHIFT);
write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
- mmiowb();
}
u32 hdrqempty(struct hfi1_ctxtdata *rcd)
@@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int total_contexts;
int ret;
unsigned ngroups;
- int qos_rmt_count;
+ int rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
u32 send_contexts = chip_send_contexts(dd);
@@ -13294,10 +13294,23 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = rcv_contexts - total_contexts;
}
- /* each user context requires an entry in the RMT */
- qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
- if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
- user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+ /*
+ * The RMT entries are currently allocated as shown below:
+ * 1. QOS (0 to 128 entries);
+ * 2. FECN (num_kernel_context - 1 + num_user_contexts +
+ * num_vnic_contexts);
+ * 3. VNIC (num_vnic_contexts).
+ * It should be noted that FECN oversubscribe num_vnic_contexts
+ * entries of RMT because both VNIC and PSM could allocate any receive
+ * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+ * and PSM FECN must reserve an RMT entry for each possible PSM receive
+ * context.
+ */
+ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+ if (HFI1_CAP_IS_KSET(TID_RDMA))
+ rmt_count += num_kernel_contexts - 1;
+ if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+ user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
dd_dev_err(dd,
"RMT size is reducing the number of user receive contexts from %u to %d\n",
n_usr_ctxts,
@@ -14278,35 +14291,43 @@ bail:
init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
}
-static void init_user_fecn_handling(struct hfi1_devdata *dd,
- struct rsm_map_table *rmt)
+static void init_fecn_handling(struct hfi1_devdata *dd,
+ struct rsm_map_table *rmt)
{
struct rsm_rule_data rrd;
u64 reg;
- int i, idx, regoff, regidx;
+ int i, idx, regoff, regidx, start;
u8 offset;
+ u32 total_cnt;
+
+ if (HFI1_CAP_IS_KSET(TID_RDMA))
+ /* Exclude context 0 */
+ start = 1;
+ else
+ start = dd->first_dyn_alloc_ctxt;
+
+ total_cnt = dd->num_rcv_contexts - start;
/* there needs to be enough room in the map table */
- if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
- dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
+ if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
+ dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
return;
}
/*
* RSM will extract the destination context as an index into the
* map table. The destination contexts are a sequential block
- * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
+ * in the range start...num_rcv_contexts-1 (inclusive).
* Map entries are accessed as offset + extracted value. Adjust
* the added offset so this sequence can be placed anywhere in
* the table - as long as the entries themselves do not wrap.
* There are only enough bits in offset for the table size, so
* start with that to allow for a "negative" offset.
*/
- offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
- (int)dd->first_dyn_alloc_ctxt);
+ offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
- for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
- i < dd->num_rcv_contexts; i++, idx++) {
+ for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
+ i++, idx++) {
/* replace with identity mapping */
regoff = (idx % 8) * 8;
regidx = idx / 8;
@@ -14341,7 +14362,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
/* add rule 1 */
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
- rmt->used += dd->num_user_contexts;
+ rmt->used += total_cnt;
}
/* Initialize RSM for VNIC */
@@ -14428,7 +14449,7 @@ static void init_rxe(struct hfi1_devdata *dd)
rmt = alloc_rsm_map_table(dd);
/* set up QOS, including the QPN map table */
init_qos(dd, rmt);
- init_user_fecn_handling(dd, rmt);
+ init_fecn_handling(dd, rmt);
complete_rsm_map_table(dd, rmt);
/* record number of used rsm map entries for vnic */
dd->vnic.rmt_start = rmt->used;
@@ -14654,8 +14675,8 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd)
*/
static int init_asic_data(struct hfi1_devdata *dd)
{
- unsigned long flags;
- struct hfi1_devdata *tmp, *peer = NULL;
+ unsigned long index;
+ struct hfi1_devdata *peer;
struct hfi1_asic_data *asic_data;
int ret = 0;
@@ -14664,14 +14685,12 @@ static int init_asic_data(struct hfi1_devdata *dd)
if (!asic_data)
return -ENOMEM;
- spin_lock_irqsave(&hfi1_devs_lock, flags);
+ xa_lock_irq(&hfi1_dev_table);
/* Find our peer device */
- list_for_each_entry(tmp, &hfi1_dev_list, list) {
- if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
- dd->unit != tmp->unit) {
- peer = tmp;
+ xa_for_each(&hfi1_dev_table, index, peer) {
+ if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
+ dd->unit != peer->unit)
break;
- }
}
if (peer) {
@@ -14683,7 +14702,7 @@ static int init_asic_data(struct hfi1_devdata *dd)
mutex_init(&dd->asic_data->asic_resource_mutex);
}
dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ xa_unlock_irq(&hfi1_dev_table);
/* first one through - set up i2c devices */
if (!peer)
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 6c27c1c6a868..4e6c3556ec48 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -858,6 +858,9 @@ static inline int idx_from_vl(int vl)
/* Per device counter indexes */
enum {
C_RCV_OVF = 0,
+ C_RX_LEN_ERR,
+ C_RX_ICRC_ERR,
+ C_RX_EBP,
C_RX_TID_FULL,
C_RX_TID_INVALID,
C_RX_TID_FLGMS,
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index c0800ea5a3f8..ab3589d17aee 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -380,6 +380,9 @@
#define DC_LCB_PRF_TX_FLIT_CNT (DC_LCB_CSRS + 0x000000000418)
#define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
#define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
+#define RCV_LENGTH_ERR_CNT 0
+#define RCV_ICRC_ERR_CNT 6
+#define RCV_EBP_CNT 9
#define RCV_BUF_OVFL_CNT 10
#define RCV_CONTEXT_EGR_STALL 22
#define RCV_DATA_PKT_CNT 0
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index 7310a5dba420..d47da7b0438f 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -286,7 +286,7 @@ struct diag_pkt {
#define RHF_TID_ERR (0x1ull << 59)
#define RHF_LEN_ERR (0x1ull << 60)
#define RHF_ECC_ERR (0x1ull << 61)
-#define RHF_VCRC_ERR (0x1ull << 62)
+#define RHF_RESERVED (0x1ull << 62)
#define RHF_ICRC_ERR (0x1ull << 63)
#define RHF_ERROR_SMASK 0xffe0000000000000ull /* bits 63:53 */
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 427ba0ce74a5..15efb4a380b2 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -1080,6 +1080,77 @@ static int qsfp2_debugfs_release(struct inode *in, struct file *fp)
return __qsfp_debugfs_release(in, fp, 1);
}
+#define EXPROM_WRITE_ENABLE BIT_ULL(14)
+
+static bool exprom_wp_disabled;
+
+static int exprom_wp_set(struct hfi1_devdata *dd, bool disable)
+{
+ u64 gpio_val = 0;
+
+ if (disable) {
+ gpio_val = EXPROM_WRITE_ENABLE;
+ exprom_wp_disabled = true;
+ dd_dev_info(dd, "Disable Expansion ROM Write Protection\n");
+ } else {
+ exprom_wp_disabled = false;
+ dd_dev_info(dd, "Enable Expansion ROM Write Protection\n");
+ }
+
+ write_csr(dd, ASIC_GPIO_OUT, gpio_val);
+ write_csr(dd, ASIC_GPIO_OE, gpio_val);
+
+ return 0;
+}
+
+static ssize_t exprom_wp_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t exprom_wp_debugfs_write(struct file *file,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ char cdata;
+
+ if (count != 1)
+ return -EINVAL;
+ if (get_user(cdata, buf))
+ return -EFAULT;
+ if (cdata == '0')
+ exprom_wp_set(ppd->dd, false);
+ else if (cdata == '1')
+ exprom_wp_set(ppd->dd, true);
+ else
+ return -EINVAL;
+
+ return 1;
+}
+
+static unsigned long exprom_in_use;
+
+static int exprom_wp_debugfs_open(struct inode *in, struct file *fp)
+{
+ if (test_and_set_bit(0, &exprom_in_use))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int exprom_wp_debugfs_release(struct inode *in, struct file *fp)
+{
+ struct hfi1_pportdata *ppd = private2ppd(fp);
+
+ if (exprom_wp_disabled)
+ exprom_wp_set(ppd->dd, false);
+ clear_bit(0, &exprom_in_use);
+
+ return 0;
+}
+
#define DEBUGFS_OPS(nm, readroutine, writeroutine) \
{ \
.name = nm, \
@@ -1119,6 +1190,9 @@ static const struct counter_info port_cntr_ops[] = {
qsfp1_debugfs_open, qsfp1_debugfs_release),
DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
qsfp2_debugfs_open, qsfp2_debugfs_release),
+ DEBUGFS_XOPS("exprom_wp", exprom_wp_debugfs_read,
+ exprom_wp_debugfs_write, exprom_wp_debugfs_open,
+ exprom_wp_debugfs_release),
DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
DEBUGFS_OPS("dc8051_memory", dc8051_memory_read, NULL),
DEBUGFS_OPS("lcb", debugfs_lcb_read, debugfs_lcb_write),
@@ -1302,15 +1376,15 @@ static void _driver_stats_seq_stop(struct seq_file *s, void *v)
static u64 hfi1_sps_ints(void)
{
- unsigned long flags;
+ unsigned long index, flags;
struct hfi1_devdata *dd;
u64 sps_ints = 0;
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- list_for_each_entry(dd, &hfi1_dev_list, list) {
+ xa_lock_irqsave(&hfi1_dev_table, flags);
+ xa_for_each(&hfi1_dev_table, index, dd) {
sps_ints += get_all_cpu_total(dd->int_counter);
}
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ xa_unlock_irqrestore(&hfi1_dev_table, flags);
return sps_ints;
}
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 2a9d2912f5db..01aa1f132f55 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -72,8 +72,6 @@
*/
const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
-DEFINE_SPINLOCK(hfi1_devs_lock);
-LIST_HEAD(hfi1_dev_list);
DEFINE_MUTEX(hfi1_mutex); /* general driver use */
unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
@@ -175,11 +173,11 @@ int hfi1_count_active_units(void)
{
struct hfi1_devdata *dd;
struct hfi1_pportdata *ppd;
- unsigned long flags;
+ unsigned long index, flags;
int pidx, nunits_active = 0;
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- list_for_each_entry(dd, &hfi1_dev_list, list) {
+ xa_lock_irqsave(&hfi1_dev_table, flags);
+ xa_for_each(&hfi1_dev_table, index, dd) {
if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1)
continue;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
@@ -190,7 +188,7 @@ int hfi1_count_active_units(void)
}
}
}
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ xa_unlock_irqrestore(&hfi1_dev_table, flags);
return nunits_active;
}
@@ -264,7 +262,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
hfi1_dbg_fault_suppress_err(verbs_dev))
return;
- if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
+ if (packet->rhf & RHF_ICRC_ERR)
return;
if (packet->etype == RHF_RCV_TYPE_BYPASS) {
@@ -516,7 +514,9 @@ bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
*/
do_cnp = prescan ||
(opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
- opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE);
+ opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) ||
+ opcode == TID_OP(READ_RESP) ||
+ opcode == TID_OP(ACK);
/* Call appropriate CNP handler */
if (!ignore_fecn && do_cnp && fecn)
@@ -1581,7 +1581,7 @@ static void show_eflags_errs(struct hfi1_packet *packet)
u32 rte = rhf_rcv_type_err(packet->rhf);
dd_dev_err(rcd->dd,
- "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
+ "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n",
rcd->ctxt, packet->rhf,
packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
@@ -1589,7 +1589,6 @@ static void show_eflags_errs(struct hfi1_packet *packet)
packet->rhf & RHF_TID_ERR ? "tid " : "",
packet->rhf & RHF_LEN_ERR ? "len " : "",
packet->rhf & RHF_ECC_ERR ? "ecc " : "",
- packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
rte);
}
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
index 1be49a0d9c11..e9d5cc8b771a 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -112,9 +112,6 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
*/
void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
{
- WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_full_list));
- WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_used_list));
-
kfree(rcd->groups);
rcd->groups = NULL;
hfi1_exp_tid_group_init(rcd);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 048b5d73ba39..b458c218842b 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -54,7 +54,6 @@
#include <linux/list.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/idr.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/completion.h>
@@ -65,6 +64,7 @@
#include <linux/kthread.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/xarray.h>
#include <rdma/ib_hdrs.h>
#include <rdma/opa_addr.h>
#include <linux/rhashtable.h>
@@ -1021,8 +1021,8 @@ struct hfi1_asic_data {
struct hfi1_vnic_data {
struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT];
struct kmem_cache *txreq_cache;
+ struct xarray vesws;
u8 num_vports;
- struct idr vesw_idr;
u8 rmt_start;
u8 num_ctxt;
};
@@ -1041,7 +1041,6 @@ struct sdma_vl_map;
typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
struct hfi1_devdata {
struct hfi1_ibdev verbs_dev; /* must be first */
- struct list_head list;
/* pointers to related structs for this device */
/* pci access data structure */
struct pci_dev *pcidev;
@@ -1426,8 +1425,7 @@ struct hfi1_filedata {
struct mm_struct *mm;
};
-extern struct list_head hfi1_dev_list;
-extern spinlock_t hfi1_devs_lock;
+extern struct xarray hfi1_dev_table;
struct hfi1_devdata *hfi1_lookup(int unit);
static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index faaaac8fbc55..71cb9525c074 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -49,7 +49,7 @@
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/hrtimer.h>
@@ -124,7 +124,7 @@ MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user
static inline u64 encode_rcv_header_entry_size(u16 size);
-static struct idr hfi1_unit_table;
+DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
static int hfi1_create_kctxt(struct hfi1_devdata *dd,
struct hfi1_pportdata *ppd)
@@ -469,7 +469,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
if (rcd->egrbufs.size < hfi1_max_mtu) {
rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
hfi1_cdbg(PROC,
- "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
+ "ctxt%u: eager bufs size too small. Adjusting to %u\n",
rcd->ctxt, rcd->egrbufs.size);
}
rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
@@ -805,7 +805,8 @@ static int create_workqueues(struct hfi1_devdata *dd)
ppd->hfi1_wq =
alloc_workqueue(
"hfi%d_%d",
- WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
+ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM,
HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
dd->unit, pidx);
if (!ppd->hfi1_wq)
@@ -1018,21 +1019,9 @@ done:
return ret;
}
-static inline struct hfi1_devdata *__hfi1_lookup(int unit)
-{
- return idr_find(&hfi1_unit_table, unit);
-}
-
struct hfi1_devdata *hfi1_lookup(int unit)
{
- struct hfi1_devdata *dd;
- unsigned long flags;
-
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- dd = __hfi1_lookup(unit);
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
-
- return dd;
+ return xa_load(&hfi1_dev_table, unit);
}
/*
@@ -1200,7 +1189,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
/*
* Release our hold on the shared asic data. If we are the last one,
* return the structure to be finalized outside the lock. Must be
- * holding hfi1_devs_lock.
+ * holding hfi1_dev_table lock.
*/
static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
{
@@ -1236,13 +1225,10 @@ static void hfi1_clean_devdata(struct hfi1_devdata *dd)
struct hfi1_asic_data *ad;
unsigned long flags;
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- if (!list_empty(&dd->list)) {
- idr_remove(&hfi1_unit_table, dd->unit);
- list_del_init(&dd->list);
- }
+ xa_lock_irqsave(&hfi1_dev_table, flags);
+ __xa_erase(&hfi1_dev_table, dd->unit);
ad = release_asic_data(dd);
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ xa_unlock_irqrestore(&hfi1_dev_table, flags);
finalize_asic_data(dd, ad);
free_platform_config(dd);
@@ -1286,13 +1272,10 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
* Must be done via verbs allocator, because the verbs cleanup process
* both does cleanup and free of the data structure.
* "extra" is for chip-specific data.
- *
- * Use the idr mechanism to get a unit number for this unit.
*/
static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
size_t extra)
{
- unsigned long flags;
struct hfi1_devdata *dd;
int ret, nports;
@@ -1307,21 +1290,10 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
dd->pport = (struct hfi1_pportdata *)(dd + 1);
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
-
- INIT_LIST_HEAD(&dd->list);
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&hfi1_devs_lock, flags);
-
- ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
- if (ret >= 0) {
- dd->unit = ret;
- list_add(&dd->list, &hfi1_dev_list);
- }
dd->node = NUMA_NO_NODE;
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
- idr_preload_end();
-
+ ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
+ GFP_KERNEL);
if (ret < 0) {
dev_err(&pdev->dev,
"Could not allocate unit ID: error %d\n", -ret);
@@ -1522,8 +1494,6 @@ static int __init hfi1_mod_init(void)
* These must be called before the driver is registered with
* the PCI subsystem.
*/
- idr_init(&hfi1_unit_table);
-
hfi1_dbg_init();
ret = pci_register_driver(&hfi1_pci_driver);
if (ret < 0) {
@@ -1534,7 +1504,6 @@ static int __init hfi1_mod_init(void)
bail_dev:
hfi1_dbg_exit();
- idr_destroy(&hfi1_unit_table);
dev_cleanup();
bail:
return ret;
@@ -1552,7 +1521,7 @@ static void __exit hfi1_mod_cleanup(void)
node_affinity_destroy_all();
hfi1_dbg_exit();
- idr_destroy(&hfi1_unit_table);
+ WARN_ON(!xa_empty(&hfi1_dev_table));
dispose_firmware(); /* asymmetric with obtain_firmware() */
dev_cleanup();
}
@@ -2071,7 +2040,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
rcd->egrbufs.size = alloced_bytes;
hfi1_cdbg(PROC,
- "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
+ "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
rcd->ctxt, rcd->egrbufs.alloced,
rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
diff --git a/drivers/infiniband/hw/hfi1/opfn.h b/drivers/infiniband/hw/hfi1/opfn.h
index 5f2011cabc25..62f93c1dc082 100644
--- a/drivers/infiniband/hw/hfi1/opfn.h
+++ b/drivers/infiniband/hw/hfi1/opfn.h
@@ -47,12 +47,14 @@
* for future transactions
*/
+#include <linux/workqueue.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdmavt_qp.h>
+
/* STL Verbs Extended */
#define IB_BTHE_E_SHIFT 24
#define HFI1_VERBS_E_ATOMIC_VADDR U64_MAX
-struct ib_atomic_eth;
-
enum hfi1_opfn_codes {
STL_VERBS_EXTD_NONE = 0,
STL_VERBS_EXTD_TID_RDMA,
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index a1de566fe95e..16ba9d52e1b9 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1578,7 +1578,6 @@ void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
sc_del_credit_return_intr(sc);
trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
if (needint) {
- mmiowb();
sc_return_credits(sc);
}
}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9b643c2409cf..4e0e9fc0a777 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -742,6 +742,8 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
iowait_wakeup,
iowait_sdma_drained,
hfi1_init_priority);
+ /* Init to a value to start the running average correctly */
+ priv->s_running_pkt_size = piothreshold / 2;
return priv;
}
@@ -898,7 +900,9 @@ void notify_error_qp(struct rvt_qp *qp)
if (!list_empty(&priv->s_iowait.list) &&
!(qp->s_flags & RVT_S_BUSY) &&
!(priv->s_flags & RVT_S_BUSY)) {
- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index e6726c1ab866..a922edcf23d6 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -140,10 +140,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
case OP(RDMA_READ_RESPONSE_LAST):
case OP(RDMA_READ_RESPONSE_ONLY):
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
/* FALLTHROUGH */
case OP(ATOMIC_ACKNOWLEDGE):
/*
@@ -343,7 +340,8 @@ write_resp:
break;
e->sent = 1;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ /* Do not free e->rdma_sge until all data are received */
+ qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
break;
case TID_OP(READ_RESP):
@@ -1836,7 +1834,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
qp->s_last = s_last;
/* see post_send() */
barrier();
- rvt_put_swqe(wqe);
+ rvt_put_qp_swqe(qp, wqe);
rvt_qp_swqe_complete(qp,
wqe,
ib_hfi1_wc_opcode[wqe->wr.opcode],
@@ -1884,7 +1882,7 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
u32 s_last;
trdma_clean_swqe(qp, wqe);
- rvt_put_swqe(wqe);
+ rvt_put_qp_swqe(qp, wqe);
rvt_qp_wqe_unreserve(qp, wqe);
s_last = qp->s_last;
trace_hfi1_qp_send_completion(qp, wqe, s_last);
@@ -2643,10 +2641,7 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
len = be32_to_cpu(reth->length);
if (unlikely(offset + len != e->rdma_sge.sge_length))
goto unlock_done;
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
if (len != 0) {
u32 rkey = be32_to_cpu(reth->rkey);
u64 vaddr = get_ib_reth_vaddr(reth);
@@ -3088,10 +3083,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
reth = &ohdr->u.rc.reth;
len = be32_to_cpu(reth->length);
if (len) {
@@ -3166,10 +3158,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
/* Process OPFN special virtual address */
if (opfn) {
opfn_conn_response(qp, e, ateth);
diff --git a/drivers/infiniband/hw/hfi1/rc.h b/drivers/infiniband/hw/hfi1/rc.h
index 8e0935b9bf2a..5ed5e85d5841 100644
--- a/drivers/infiniband/hw/hfi1/rc.h
+++ b/drivers/infiniband/hw/hfi1/rc.h
@@ -41,6 +41,14 @@ static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
return rvt_restart_sge(ss, wqe, len);
}
+static inline void release_rdma_sge_mr(struct rvt_ack_entry *e)
+{
+ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+}
+
struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
u8 *prev_ack, bool *scheduled);
int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val,
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 124a3ec1e15c..23ac6057b211 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -524,7 +524,7 @@ void _hfi1_do_send(struct work_struct *work)
/**
* hfi1_do_send - perform a send on a QP
- * @work: contains a pointer to the QP
+ * @qp: a pointer to the QP
* @in_thread: true if in a workqueue thread
*
* Process entries in the send work queue until credit or queue is
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index fdda33aca77f..6fb93032fbef 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -67,8 +67,6 @@ static u32 mask_generation(u32 a)
#define TID_RDMA_DESTQP_FLOW_SHIFT 11
#define TID_RDMA_DESTQP_FLOW_MASK 0x1f
-#define TID_FLOW_SW_PSN BIT(0)
-
#define TID_OPFN_QP_CTXT_MASK 0xff
#define TID_OPFN_QP_CTXT_SHIFT 56
#define TID_OPFN_QP_KDETH_MASK 0xff
@@ -128,6 +126,15 @@ static int make_tid_rdma_ack(struct rvt_qp *qp,
struct ib_other_headers *ohdr,
struct hfi1_pkt_state *ps);
static void hfi1_do_tid_send(struct rvt_qp *qp);
+static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx);
+static void tid_rdma_rcv_err(struct hfi1_packet *packet,
+ struct ib_other_headers *ohdr,
+ struct rvt_qp *qp, u32 psn, int diff, bool fecn);
+static void update_r_next_psn_fecn(struct hfi1_packet *packet,
+ struct hfi1_qp_priv *priv,
+ struct hfi1_ctxtdata *rcd,
+ struct tid_rdma_flow *flow,
+ bool fecn);
static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
{
@@ -776,7 +783,6 @@ int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
rcd->flows[fs->index].generation = fs->generation;
fs->generation = kern_setup_hw_flow(rcd, fs->index);
fs->psn = 0;
- fs->flags = 0;
dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
/* get head before dropping lock */
fqp = first_qp(rcd, &rcd->flow_queue);
@@ -1807,6 +1813,7 @@ sync_check:
goto done;
hfi1_kern_clear_hw_flow(req->rcd, qp);
+ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
req->state = TID_REQUEST_ACTIVE;
}
@@ -2036,10 +2043,7 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet,
if (psn != e->psn || len != req->total_len)
goto unlock;
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
rkey = be32_to_cpu(reth->rkey);
vaddr = get_ib_reth_vaddr(reth);
@@ -2238,7 +2242,7 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
struct ib_reth *reth;
struct hfi1_qp_priv *qpriv = qp->priv;
u32 bth0, psn, len, rkey;
- bool is_fecn;
+ bool fecn;
u8 next;
u64 vaddr;
int diff;
@@ -2248,7 +2252,7 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
if (hfi1_ruc_check_hdr(ibp, packet))
return;
- is_fecn = process_ecn(qp, packet);
+ fecn = process_ecn(qp, packet);
psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
trace_hfi1_rsp_rcv_tid_read_req(qp, psn);
@@ -2267,9 +2271,8 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
diff = delta_psn(psn, qp->r_psn);
if (unlikely(diff)) {
- if (tid_rdma_rcv_error(packet, ohdr, qp, psn, diff))
- return;
- goto send_ack;
+ tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
+ return;
}
/* We've verified the request, insert it into the ack queue. */
@@ -2285,10 +2288,7 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
rkey = be32_to_cpu(reth->rkey);
qp->r_len = len;
@@ -2324,11 +2324,11 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
/* Schedule the send tasklet. */
qp->s_flags |= RVT_S_RESP_PENDING;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (is_fecn)
- goto send_ack;
return;
nack_inv_unlock:
@@ -2345,8 +2345,6 @@ nack_acc:
rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
qp->r_ack_psn = qp->r_psn;
-send_ack:
- hfi1_send_rc_ack(packet, is_fecn);
}
u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
@@ -2463,12 +2461,12 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
struct tid_rdma_request *req;
struct tid_rdma_flow *flow;
u32 opcode, aeth;
- bool is_fecn;
+ bool fecn;
unsigned long flags;
u32 kpsn, ipsn;
trace_hfi1_sender_rcv_tid_read_resp(qp);
- is_fecn = process_ecn(qp, packet);
+ fecn = process_ecn(qp, packet);
kpsn = mask_psn(be32_to_cpu(ohdr->bth[2]));
aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth);
opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
@@ -2481,8 +2479,43 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
flow = &req->flows[req->clear_tail];
/* When header suppression is disabled */
- if (cmp_psn(ipsn, flow->flow_state.ib_lpsn))
+ if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) {
+ update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
+
+ if (cmp_psn(kpsn, flow->flow_state.r_next_psn))
+ goto ack_done;
+ flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
+ /*
+ * Copy the payload to destination buffer if this packet is
+ * delivered as an eager packet due to RSM rule and FECN.
+ * The RSM rule selects FECN bit in BTH and SH bit in
+ * KDETH header and therefore will not match the last
+ * packet of each segment that has SH bit cleared.
+ */
+ if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
+ struct rvt_sge_state ss;
+ u32 len;
+ u32 tlen = packet->tlen;
+ u16 hdrsize = packet->hlen;
+ u8 pad = packet->pad;
+ u8 extra_bytes = pad + packet->extra_byte +
+ (SIZE_OF_CRC << 2);
+ u32 pmtu = qp->pmtu;
+
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
+ goto ack_op_err;
+ len = restart_sge(&ss, req->e.swqe, ipsn, pmtu);
+ if (unlikely(len < pmtu))
+ goto ack_op_err;
+ rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
+ false);
+ /* Raise the sw sequence check flag for next packet */
+ priv->s_flags |= HFI1_R_TID_SW_PSN;
+ }
+
goto ack_done;
+ }
+ flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
req->ack_pending--;
priv->pending_tid_r_segs--;
qp->s_num_rd_atomic--;
@@ -2524,6 +2557,7 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
req->comp_seg == req->cur_seg) ||
priv->tid_r_comp == priv->tid_r_reqs) {
hfi1_kern_clear_hw_flow(priv->rcd, qp);
+ priv->s_flags &= ~HFI1_R_TID_SW_PSN;
if (req->state == TID_REQUEST_SYNC)
req->state = TID_REQUEST_ACTIVE;
}
@@ -2545,8 +2579,6 @@ ack_op_err:
ack_done:
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (is_fecn)
- hfi1_send_rc_ack(packet, is_fecn);
}
void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
@@ -2773,9 +2805,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return ret;
}
- if (priv->flow_state.flags & TID_FLOW_SW_PSN) {
+ if (priv->s_flags & HFI1_R_TID_SW_PSN) {
diff = cmp_psn(psn,
- priv->flow_state.r_next_psn);
+ flow->flow_state.r_next_psn);
if (diff > 0) {
if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
restart_tid_rdma_read_req(rcd,
@@ -2811,22 +2843,15 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
qp->r_flags &=
~RVT_R_RDMAR_SEQ;
}
- priv->flow_state.r_next_psn++;
+ flow->flow_state.r_next_psn =
+ mask_psn(psn + 1);
} else {
- u64 reg;
u32 last_psn;
- /*
- * The only sane way to get the amount of
- * progress is to read the HW flow state.
- */
- reg = read_uctxt_csr(dd, rcd->ctxt,
- RCV_TID_FLOW_TABLE +
- (8 * flow->idx));
- last_psn = mask_psn(reg);
-
- priv->flow_state.r_next_psn = last_psn;
- priv->flow_state.flags |= TID_FLOW_SW_PSN;
+ last_psn = read_r_next_psn(dd, rcd->ctxt,
+ flow->idx);
+ flow->flow_state.r_next_psn = last_psn;
+ priv->s_flags |= HFI1_R_TID_SW_PSN;
/*
* If no request has been restarted yet,
* restart the current one.
@@ -2891,10 +2916,11 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
struct rvt_ack_entry *e;
struct tid_rdma_request *req;
struct tid_rdma_flow *flow;
+ int diff = 0;
trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ",
packet->rhf);
- if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
+ if (packet->rhf & RHF_ICRC_ERR)
return ret;
packet->ohdr = &hdr->u.oth;
@@ -2974,17 +3000,10 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
switch (rte) {
case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) {
- u64 reg;
-
qpriv->s_flags |= HFI1_R_TID_SW_PSN;
- /*
- * The only sane way to get the amount of
- * progress is to read the HW flow state.
- */
- reg = read_uctxt_csr(dd, rcd->ctxt,
- RCV_TID_FLOW_TABLE +
- (8 * flow->idx));
- flow->flow_state.r_next_psn = mask_psn(reg);
+ flow->flow_state.r_next_psn =
+ read_r_next_psn(dd, rcd->ctxt,
+ flow->idx);
qpriv->r_next_psn_kdeth =
flow->flow_state.r_next_psn;
goto nak_psn;
@@ -2997,10 +3016,12 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
* mismatch could be due to packets that were
* already in flight.
*/
- if (psn != flow->flow_state.r_next_psn) {
- psn = flow->flow_state.r_next_psn;
+ diff = cmp_psn(psn,
+ flow->flow_state.r_next_psn);
+ if (diff > 0)
goto nak_psn;
- }
+ else if (diff < 0)
+ break;
qpriv->s_nak_state = 0;
/*
@@ -3011,8 +3032,10 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
if (psn == full_flow_psn(flow,
flow->flow_state.lpsn))
ret = false;
+ flow->flow_state.r_next_psn =
+ mask_psn(psn + 1);
qpriv->r_next_psn_kdeth =
- ++flow->flow_state.r_next_psn;
+ flow->flow_state.r_next_psn;
}
break;
@@ -3517,8 +3540,10 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
if (qpriv->r_tid_alloc == qpriv->r_tid_head) {
/* If all data has been received, clear the flow */
if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS &&
- !qpriv->alloc_w_segs)
+ !qpriv->alloc_w_segs) {
hfi1_kern_clear_hw_flow(rcd, qp);
+ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
+ }
break;
}
@@ -3544,8 +3569,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
if (qpriv->sync_pt && !qpriv->alloc_w_segs) {
hfi1_kern_clear_hw_flow(rcd, qp);
qpriv->sync_pt = false;
- if (qpriv->s_flags & HFI1_R_TID_SW_PSN)
- qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
+ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
}
/* Allocate flow if we don't have one */
@@ -3687,7 +3711,7 @@ void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
struct hfi1_qp_priv *qpriv = qp->priv;
struct tid_rdma_request *req;
u32 bth0, psn, len, rkey, num_segs;
- bool is_fecn;
+ bool fecn;
u8 next;
u64 vaddr;
int diff;
@@ -3696,7 +3720,7 @@ void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
if (hfi1_ruc_check_hdr(ibp, packet))
return;
- is_fecn = process_ecn(qp, packet);
+ fecn = process_ecn(qp, packet);
psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
trace_hfi1_rsp_rcv_tid_write_req(qp, psn);
@@ -3713,9 +3737,8 @@ void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len);
diff = delta_psn(psn, qp->r_psn);
if (unlikely(diff)) {
- if (tid_rdma_rcv_error(packet, ohdr, qp, psn, diff))
- return;
- goto send_ack;
+ tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
+ return;
}
/*
@@ -3751,10 +3774,7 @@ void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
goto update_head;
}
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
/* The length needs to be in multiples of PAGE_SIZE */
if (!len || len & ~PAGE_MASK)
@@ -3834,11 +3854,11 @@ update_head:
/* Schedule the send tasklet. */
qp->s_flags |= RVT_S_RESP_PENDING;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (is_fecn)
- goto send_ack;
return;
nack_inv_unlock:
@@ -3855,8 +3875,6 @@ nack_acc:
rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
qp->r_ack_psn = qp->r_psn;
-send_ack:
- hfi1_send_rc_ack(packet, is_fecn);
}
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
@@ -4073,10 +4091,10 @@ void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
struct tid_rdma_flow *flow;
enum ib_wc_status status;
u32 opcode, aeth, psn, flow_psn, i, tidlen = 0, pktlen;
- bool is_fecn;
+ bool fecn;
unsigned long flags;
- is_fecn = process_ecn(qp, packet);
+ fecn = process_ecn(qp, packet);
psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
aeth = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.aeth);
opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
@@ -4216,7 +4234,6 @@ void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
qpriv->s_tid_cur = i;
}
qp->s_flags &= ~HFI1_S_WAIT_TID_RESP;
-
hfi1_schedule_tid_send(qp);
goto ack_done;
@@ -4225,9 +4242,9 @@ ack_op_err:
ack_err:
rvt_error_qp(qp, status);
ack_done:
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (is_fecn)
- hfi1_send_rc_ack(packet, is_fecn);
}
bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
@@ -4307,7 +4324,9 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
unsigned long flags;
u32 psn, next;
u8 opcode;
+ bool fecn;
+ fecn = process_ecn(qp, packet);
psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
@@ -4320,9 +4339,53 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
req = ack_to_tid_req(e);
flow = &req->flows[req->clear_tail];
if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
+ update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
+
if (cmp_psn(psn, flow->flow_state.r_next_psn))
goto send_nak;
- flow->flow_state.r_next_psn++;
+
+ flow->flow_state.r_next_psn = mask_psn(psn + 1);
+ /*
+ * Copy the payload to destination buffer if this packet is
+ * delivered as an eager packet due to RSM rule and FECN.
+ * The RSM rule selects FECN bit in BTH and SH bit in
+ * KDETH header and therefore will not match the last
+ * packet of each segment that has SH bit cleared.
+ */
+ if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
+ struct rvt_sge_state ss;
+ u32 len;
+ u32 tlen = packet->tlen;
+ u16 hdrsize = packet->hlen;
+ u8 pad = packet->pad;
+ u8 extra_bytes = pad + packet->extra_byte +
+ (SIZE_OF_CRC << 2);
+ u32 pmtu = qp->pmtu;
+
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
+ goto send_nak;
+ len = req->comp_seg * req->seg_len;
+ len += delta_psn(psn,
+ full_flow_psn(flow, flow->flow_state.spsn)) *
+ pmtu;
+ if (unlikely(req->total_len - len < pmtu))
+ goto send_nak;
+
+ /*
+ * The e->rdma_sge field is set when TID RDMA WRITE REQ
+ * is first received and is never modified thereafter.
+ */
+ ss.sge = e->rdma_sge;
+ ss.sg_list = NULL;
+ ss.num_sge = 1;
+ ss.total_len = req->total_len;
+ rvt_skip_sge(&ss, len, false);
+ rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
+ false);
+ /* Raise the sw sequence check flag for next packet */
+ priv->r_next_psn_kdeth = mask_psn(psn + 1);
+ priv->s_flags |= HFI1_R_TID_SW_PSN;
+ }
goto exit;
}
flow->flow_state.r_next_psn = mask_psn(psn + 1);
@@ -4347,6 +4410,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
priv->r_tid_ack = priv->r_tid_tail;
if (opcode == TID_OP(WRITE_DATA_LAST)) {
+ release_rdma_sge_mr(e);
for (next = priv->r_tid_tail + 1; ; next++) {
if (next > rvt_size_atomic(&dev->rdi))
next = 0;
@@ -4386,6 +4450,8 @@ done:
hfi1_schedule_tid_send(qp);
exit:
priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
spin_unlock_irqrestore(&qp->s_lock, flags);
return;
@@ -4487,12 +4553,11 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
struct tid_rdma_request *req;
struct tid_rdma_flow *flow;
u32 aeth, psn, req_psn, ack_psn, fspsn, resync_psn, ack_kpsn;
- bool is_fecn;
unsigned long flags;
u16 fidx;
trace_hfi1_tid_write_sender_rcv_tid_ack(qp, 0);
- is_fecn = process_ecn(qp, packet);
+ process_ecn(qp, packet);
psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
aeth = be32_to_cpu(ohdr->u.tid_rdma.ack.aeth);
req_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.verbs_psn));
@@ -4846,10 +4911,10 @@ void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
struct tid_rdma_flow *flow;
struct tid_flow_state *fs = &qpriv->flow_state;
u32 psn, generation, idx, gen_next;
- bool is_fecn;
+ bool fecn;
unsigned long flags;
- is_fecn = process_ecn(qp, packet);
+ fecn = process_ecn(qp, packet);
psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
generation = mask_psn(psn + 1) >> HFI1_KDETH_BTH_SEQ_SHIFT;
@@ -4940,6 +5005,8 @@ void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
qpriv->s_flags |= RVT_S_ACK_PENDING;
hfi1_schedule_tid_send(qp);
bail:
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
spin_unlock_irqrestore(&qp->s_lock, flags);
}
@@ -5017,24 +5084,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
make_tid_rdma_ack(qp, ohdr, ps))
return 1;
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == READ_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (iowait_sdma_pending(&priv->s_iowait)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- clear_ahg(qp);
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
- /* will get called again */
- goto done_free_tx;
- }
+ /*
+ * Bail out if we can't send data.
+ * Be reminded that this check must been done after the call to
+ * make_tid_rdma_ack() because the responding QP could be in
+ * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
+ */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
+ goto bail;
if (priv->s_flags & RVT_S_WAIT_ACK)
goto bail;
@@ -5144,11 +5201,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
middle, ps);
return 1;
-done_free_tx:
- hfi1_put_txreq(ps->s_txreq);
- ps->s_txreq = NULL;
- return 1;
-
bail:
hfi1_put_txreq(ps->s_txreq);
bail_no_tx:
@@ -5464,3 +5516,48 @@ bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e)
}
return false;
}
+
+static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx)
+{
+ u64 reg;
+
+ /*
+ * The only sane way to get the amount of
+ * progress is to read the HW flow state.
+ */
+ reg = read_uctxt_csr(dd, ctxt, RCV_TID_FLOW_TABLE + (8 * fidx));
+ return mask_psn(reg);
+}
+
+static void tid_rdma_rcv_err(struct hfi1_packet *packet,
+ struct ib_other_headers *ohdr,
+ struct rvt_qp *qp, u32 psn, int diff, bool fecn)
+{
+ unsigned long flags;
+
+ tid_rdma_rcv_error(packet, ohdr, qp, psn, diff);
+ if (fecn) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qp->s_flags |= RVT_S_ECN;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+}
+
+static void update_r_next_psn_fecn(struct hfi1_packet *packet,
+ struct hfi1_qp_priv *priv,
+ struct hfi1_ctxtdata *rcd,
+ struct tid_rdma_flow *flow,
+ bool fecn)
+{
+ /*
+ * If a start/middle packet is delivered here due to
+ * RSM rule and FECN, we need to update the r_next_psn.
+ */
+ if (fecn && packet->etype == RHF_RCV_TYPE_EAGER &&
+ !(priv->s_flags & HFI1_R_TID_SW_PSN)) {
+ struct hfi1_devdata *dd = rcd->dd;
+
+ flow->flow_state.r_next_psn =
+ read_r_next_psn(dd, rcd->ctxt, flow->idx);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.h b/drivers/infiniband/hw/hfi1/tid_rdma.h
index 53ab24ef4f02..1c536185261e 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.h
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.h
@@ -76,10 +76,8 @@ struct tid_rdma_qp_params {
struct tid_flow_state {
u32 generation;
u32 psn;
- u32 r_next_psn; /* next PSN to be received (in TID space) */
u8 index;
u8 last_index;
- u8 flags;
};
enum tid_rdma_req_state {
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
index e62171fb7379..de7a87392b8d 100644
--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -86,14 +86,14 @@ DECLARE_EVENT_CLASS(hfi1_trace_template,
* actual function to work and can not be in a macro.
*/
#define __hfi1_trace_def(lvl) \
-void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
+void __printf(2, 3) __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
\
DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
TP_PROTO(const char *function, struct va_format *vaf), \
TP_ARGS(function, vaf))
#define __hfi1_trace_fn(lvl) \
-void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
+void __printf(2, 3) __hfi1_trace_##lvl(const char *func, char *fmt, ...)\
{ \
struct va_format vaf = { \
.fmt = fmt, \
diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h
index 548dfc45a407..4388b594ed1b 100644
--- a/drivers/infiniband/hw/hfi1/trace_tid.h
+++ b/drivers/infiniband/hw/hfi1/trace_tid.h
@@ -53,7 +53,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent);
"tid_r_comp %u pending_tid_r_segs %u " \
"s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \
"s_state 0x%x hw_flow_index %u generation 0x%x " \
- "fpsn 0x%x flow_flags 0x%x"
+ "fpsn 0x%x"
#define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \
"cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \
@@ -71,7 +71,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent);
"pending_tid_w_segs %u sync_pt %s " \
"ps_nak_psn 0x%x ps_nak_state 0x%x " \
"prnr_nak_state 0x%x hw_flow_index %u generation "\
- "0x%x fpsn 0x%x flow_flags 0x%x resync %s" \
+ "0x%x fpsn 0x%x resync %s" \
"r_next_psn_kdeth 0x%x"
#define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \
@@ -973,7 +973,6 @@ DECLARE_EVENT_CLASS(/* tid_read_sender */
__field(u32, hw_flow_index)
__field(u32, generation)
__field(u32, fpsn)
- __field(u32, flow_flags)
),
TP_fast_assign(/* assign */
struct hfi1_qp_priv *priv = qp->priv;
@@ -991,7 +990,6 @@ DECLARE_EVENT_CLASS(/* tid_read_sender */
__entry->hw_flow_index = priv->flow_state.index;
__entry->generation = priv->flow_state.generation;
__entry->fpsn = priv->flow_state.psn;
- __entry->flow_flags = priv->flow_state.flags;
),
TP_printk(/* print */
TID_READ_SENDER_PRN,
@@ -1007,8 +1005,7 @@ DECLARE_EVENT_CLASS(/* tid_read_sender */
__entry->s_state,
__entry->hw_flow_index,
__entry->generation,
- __entry->fpsn,
- __entry->flow_flags
+ __entry->fpsn
)
);
@@ -1338,7 +1335,6 @@ DECLARE_EVENT_CLASS(/* tid_write_sp */
__field(u32, hw_flow_index)
__field(u32, generation)
__field(u32, fpsn)
- __field(u32, flow_flags)
__field(bool, resync)
__field(u32, r_next_psn_kdeth)
),
@@ -1360,7 +1356,6 @@ DECLARE_EVENT_CLASS(/* tid_write_sp */
__entry->hw_flow_index = priv->flow_state.index;
__entry->generation = priv->flow_state.generation;
__entry->fpsn = priv->flow_state.psn;
- __entry->flow_flags = priv->flow_state.flags;
__entry->resync = priv->resync;
__entry->r_next_psn_kdeth = priv->r_next_psn_kdeth;
),
@@ -1381,7 +1376,6 @@ DECLARE_EVENT_CLASS(/* tid_write_sp */
__entry->hw_flow_index,
__entry->generation,
__entry->fpsn,
- __entry->flow_flags,
__entry->resync ? "yes" : "no",
__entry->r_next_psn_kdeth
)
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 24b592c6522e..02eee8eff1db 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -104,8 +104,9 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
bool writable, struct page **pages)
{
int ret;
+ unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
- ret = get_user_pages_fast(vaddr, npages, writable, pages);
+ ret = get_user_pages_fast(vaddr, npages, gup_flags, pages);
if (ret < 0)
return ret;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 55a56b3d7f83..1eb4105b2d22 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1223,15 +1223,16 @@ static inline send_routine get_send_routine(struct rvt_qp *qp,
case IB_QPT_UD:
break;
case IB_QPT_UC:
- case IB_QPT_RC: {
+ case IB_QPT_RC:
+ priv->s_running_pkt_size =
+ (tx->s_cur_size + priv->s_running_pkt_size) / 2;
if (piothreshold &&
- tx->s_cur_size <= min(piothreshold, qp->pmtu) &&
+ priv->s_running_pkt_size <= min(piothreshold, qp->pmtu) &&
(BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) &&
iowait_sdma_pending(&priv->s_iowait) == 0 &&
!sdma_txreq_built(&tx->txreq))
return dd->process_pio_send;
break;
- }
default:
break;
}
@@ -1739,15 +1740,15 @@ static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
static u64 hfi1_sps_ints(void)
{
- unsigned long flags;
+ unsigned long index, flags;
struct hfi1_devdata *dd;
u64 sps_ints = 0;
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- list_for_each_entry(dd, &hfi1_dev_list, list) {
+ xa_lock_irqsave(&hfi1_dev_table, flags);
+ xa_for_each(&hfi1_dev_table, index, dd) {
sps_ints += get_all_cpu_total(dd->int_counter);
}
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ xa_unlock_irqrestore(&hfi1_dev_table, flags);
return sps_ints;
}
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 62ace0b2d17a..7ecb8ed4a1d9 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -170,6 +170,7 @@ struct hfi1_qp_priv {
struct tid_flow_state flow_state;
struct tid_rdma_qp_params tid_rdma;
struct rvt_qp *owner;
+ u16 s_running_pkt_size;
u8 hdr_type; /* 9B or 16B */
struct rvt_sge_state tid_ss; /* SGE state pointer for 2nd leg */
atomic_t n_requests; /* # of TID RDMA requests in the */
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index a922db58be14..b49e60e8397d 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -162,12 +162,12 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
void hfi1_vnic_setup(struct hfi1_devdata *dd)
{
- idr_init(&dd->vnic.vesw_idr);
+ xa_init(&dd->vnic.vesws);
}
void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
{
- idr_destroy(&dd->vnic.vesw_idr);
+ WARN_ON(!xa_empty(&dd->vnic.vesws));
}
#define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \
@@ -423,8 +423,7 @@ tx_finish:
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
struct opa_vnic_skb_mdata *mdata;
@@ -534,7 +533,7 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
l4_type = hfi1_16B_get_l4(packet->ebuf);
if (likely(l4_type == OPA_16B_L4_ETHR)) {
vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
- vinfo = idr_find(&dd->vnic.vesw_idr, vesw_id);
+ vinfo = xa_load(&dd->vnic.vesws, vesw_id);
/*
* In case of invalid vesw id, count the error on
@@ -542,9 +541,10 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
*/
if (unlikely(!vinfo)) {
struct hfi1_vnic_vport_info *vinfo_tmp;
- int id_tmp = 0;
+ unsigned long index = 0;
- vinfo_tmp = idr_get_next(&dd->vnic.vesw_idr, &id_tmp);
+ vinfo_tmp = xa_find(&dd->vnic.vesws, &index, ULONG_MAX,
+ XA_PRESENT);
if (vinfo_tmp) {
spin_lock(&vport_cntr_lock);
vinfo_tmp->stats[0].netstats.rx_nohandler++;
@@ -598,8 +598,7 @@ static int hfi1_vnic_up(struct hfi1_vnic_vport_info *vinfo)
if (!vinfo->vesw_id)
return -EINVAL;
- rc = idr_alloc(&dd->vnic.vesw_idr, vinfo, vinfo->vesw_id,
- vinfo->vesw_id + 1, GFP_NOWAIT);
+ rc = xa_insert(&dd->vnic.vesws, vinfo->vesw_id, vinfo, GFP_KERNEL);
if (rc < 0)
return rc;
@@ -625,7 +624,7 @@ static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
clear_bit(HFI1_VNIC_UP, &vinfo->flags);
netif_carrier_off(vinfo->netdev);
netif_tx_disable(vinfo->netdev);
- idr_remove(&dd->vnic.vesw_idr, vinfo->vesw_id);
+ xa_erase(&dd->vnic.vesws, vinfo->vesw_id);
/* ensure irqs see the change */
msix_vnic_synchronize_irq(dd);
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index e2a7f1488f76..eee5205f936f 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -7,8 +7,8 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
- hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o
+ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
hns-roce-hw-v1-objs := hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
-hns-roce-hw-v2-objs := hns_roce_hw_v2.o
+hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index b3c8c45ec1e3..cdd2ac24fc2a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -39,38 +39,34 @@
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
#define HNS_ROCE_VLAN_SL_SHIFT 13
-struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
- struct ib_udata *udata)
+int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
+ u32 flags, struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
- struct hns_roce_ah *ah;
+ struct hns_roce_ah *ah = to_hr_ah(ibah);
u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
bool vlan_en = false;
+ int ret;
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+ gid_attr = ah_attr->grh.sgid_attr;
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_tag, NULL);
+ if (ret)
+ return ret;
/* Get mac address */
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
- gid_attr = ah_attr->grh.sgid_attr;
- if (is_vlan_dev(gid_attr->ndev)) {
- vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
+ if (vlan_tag < VLAN_CFI_MASK) {
vlan_en = true;
- }
-
- if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
+ }
- ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
+ ah->av.port_pd = cpu_to_le32(to_hr_pd(ibah->pd)->pdn |
(rdma_ah_get_port_num(ah_attr) <<
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = grh->sgid_index;
@@ -86,7 +82,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) <<
HNS_ROCE_SL_SHIFT);
- return &ah->ibah;
+ return 0;
}
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
@@ -111,9 +107,7 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
return 0;
}
-int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
+void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
{
- kfree(to_hr_ah(ah));
-
- return 0;
+ return;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 059fd1da493e..2b6ac646ca9a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -53,6 +53,7 @@ enum {
HNS_ROCE_CMD_QUERY_QPC = 0x42,
HNS_ROCE_CMD_MODIFY_CQC = 0x52,
+ HNS_ROCE_CMD_QUERY_CQC = 0x53,
/* CQC BT commands */
HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index f4c92a7ac1ce..8e95a1aa1b4f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -57,32 +57,6 @@
#define roce_set_bit(origin, shift, val) \
roce_set_field((origin), (1ul << (shift)), (shift), (val))
-/*
- * roce_hw_index_cmp_lt - Compare two hardware index values in hisilicon
- * SOC, check if a is less than b.
- * @a: hardware index value
- * @b: hardware index value
- * @bits: the number of bits of a and b, range: 0~31.
- *
- * Hardware index increases continuously till max value, and then restart
- * from zero, again and again. Because the bits of reg field is often
- * limited, the reg field can only hold the low bits of the hardware index
- * in hisilicon SOC.
- * In some scenes we need to compare two values(a,b) getted from two reg
- * fields in this driver, for example:
- * If a equals 0xfffe, b equals 0x1 and bits equals 16, we think b has
- * incresed from 0xffff to 0x1 and a is less than b.
- * If a equals 0xfffe, b equals 0x0xf001 and bits equals 16, we think a
- * is bigger than b.
- *
- * Return true on a less than b, otherwise false.
- */
-#define roce_hw_index_mask(bits) ((1ul << (bits)) - 1)
-#define roce_hw_index_shift(bits) (32 - (bits))
-#define roce_hw_index_cmp_lt(a, b, bits) \
- ((int)((((a) - (b)) & roce_hw_index_mask(bits)) << \
- roce_hw_index_shift(bits)) < 0)
-
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
@@ -271,8 +245,6 @@
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
-#define ROCEE_SDB_PTR_CMP_BITS 28
-
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
@@ -353,13 +325,8 @@
#define ROCEE_CAEP_AE_MASK_REG 0x6C8
#define ROCEE_CAEP_AE_ST_REG 0x6CC
-#define ROCEE_SDB_ISSUE_PTR_REG 0x758
-#define ROCEE_SDB_SEND_PTR_REG 0x75C
#define ROCEE_CAEP_CQE_WCMD_EMPTY 0x850
#define ROCEE_SCAEP_WR_CQE_CNT 0x8D0
-#define ROCEE_SDB_INV_CNT_REG 0x9A4
-#define ROCEE_SDB_RETRY_CNT_REG 0x9AC
-#define ROCEE_TSP_BP_ST_REG 0x9EC
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 1dfe5627006c..9caf35061721 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
@@ -127,13 +128,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
goto err_out;
}
- /* The cq insert radix tree */
- spin_lock_irq(&cq_table->lock);
- /* Radix_tree: The associated pointer and long integer key value like */
- ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
- spin_unlock_irq(&cq_table->lock);
+ ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n");
+ dev_err(dev, "CQ alloc failed xa_store.\n");
goto err_put;
}
@@ -141,7 +138,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
- goto err_radix;
+ goto err_xa;
}
hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
@@ -152,7 +149,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
- goto err_radix;
+ goto err_xa;
}
hr_cq->cons_index = 0;
@@ -164,10 +161,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
return 0;
-err_radix:
- spin_lock_irq(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, hr_cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+err_xa:
+ xa_erase(&cq_table->array, hr_cq->cqn);
err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
@@ -197,6 +192,8 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
+ xa_erase(&cq_table->array, hr_cq->cqn);
+
/* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
@@ -205,10 +202,6 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
complete(&hr_cq->free);
wait_for_completion(&hr_cq->free);
- spin_lock_irq(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, hr_cq->cqn);
- spin_unlock_irq(&cq_table->lock);
-
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
@@ -309,7 +302,6 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
@@ -321,6 +313,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
int vector = attr->comp_vector;
int cq_entries = attr->cqe;
int ret;
+ struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct hns_roce_ucontext, ibucontext);
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
@@ -339,7 +333,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
hr_cq->ib_cq.cqe = cq_entries - 1;
spin_lock_init(&hr_cq->lock);
- if (context) {
+ if (udata) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "Failed to copy_from_udata.\n");
ret = -EFAULT;
@@ -357,8 +351,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp))) {
- ret = hns_roce_db_map_user(to_hr_ucontext(context),
- udata, ucmd.db_addr,
+ ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
&hr_cq->db);
if (ret) {
dev_err(dev, "cq record doorbell map failed!\n");
@@ -369,7 +362,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
}
/* Get user space parameters */
- uar = &to_hr_ucontext(context)->uar;
+ uar = &context->uar;
} else {
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
@@ -408,7 +401,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
* problems if tptr is set to zero here, so we initialze it in user
* space.
*/
- if (!context && hr_cq->tptr_addr)
+ if (!udata && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0;
/* Get created cq handler and carry out event */
@@ -416,7 +409,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
hr_cq->event = hns_roce_ib_cq_event;
hr_cq->cq_depth = cq_entries;
- if (context) {
+ if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (ret)
@@ -429,21 +422,20 @@ err_cqc:
hns_roce_free_cq(hr_dev, hr_cq);
err_dbmap:
- if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp)))
- hns_roce_db_unmap_user(to_hr_ucontext(context),
- &hr_cq->db);
+ hns_roce_db_unmap_user(context, &hr_cq->db);
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- if (context)
+ if (udata)
ib_umem_release(hr_cq->umem);
else
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
hr_cq->ib_cq.cqe);
err_db:
- if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
+ if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
hns_roce_free_db(hr_dev, &hr_cq->db);
err_cq:
@@ -452,24 +444,27 @@ err_cq:
}
EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
-int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
+int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
int ret = 0;
if (hr_dev->hw->destroy_cq) {
- ret = hr_dev->hw->destroy_cq(ib_cq);
+ ret = hr_dev->hw->destroy_cq(ib_cq, udata);
} else {
hns_roce_free_cq(hr_dev, hr_cq);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- if (ib_cq->uobject) {
+ if (udata) {
ib_umem_release(hr_cq->umem);
if (hr_cq->db_en == 1)
hns_roce_db_unmap_user(
- to_hr_ucontext(ib_cq->uobject->context),
+ rdma_udata_to_drv_context(
+ udata,
+ struct hns_roce_ucontext,
+ ibucontext),
&hr_cq->db);
} else {
/* Free the buff of stored cq */
@@ -491,8 +486,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq;
- cq = radix_tree_lookup(&hr_dev->cq_table.tree,
- cqn & (hr_dev->caps.num_cqs - 1));
+ cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1));
if (!cq) {
dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
return;
@@ -509,8 +503,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq;
- cq = radix_tree_lookup(&cq_table->tree,
- cqn & (hr_dev->caps.num_cqs - 1));
+ cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1));
if (cq)
atomic_inc(&cq->refcount);
@@ -530,8 +523,7 @@ int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
- spin_lock_init(&cq_table->lock);
- INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+ xa_init(&cq_table->array);
return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
hr_dev->caps.num_cqs - 1,
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 9ee86daf1700..563cf39df6d5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -505,7 +505,6 @@ struct hns_roce_uar_table {
struct hns_roce_qp_table {
struct hns_roce_bitmap bitmap;
- spinlock_t lock;
struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table;
struct hns_roce_hem_table trrl_table;
@@ -515,8 +514,7 @@ struct hns_roce_qp_table {
struct hns_roce_cq_table {
struct hns_roce_bitmap bitmap;
- spinlock_t lock;
- struct radix_tree_root tree;
+ struct xarray array;
struct hns_roce_hem_table table;
};
@@ -869,6 +867,11 @@ struct hns_roce_work {
int sub_type;
};
+struct hns_roce_dfx_hw {
+ int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
+ int *buffer);
+};
+
struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
int (*cmq_init)(struct hns_roce_dev *hr_dev);
@@ -907,7 +910,7 @@ struct hns_roce_hw {
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
- int (*destroy_qp)(struct ib_qp *ibqp);
+ int (*destroy_qp)(struct ib_qp *ibqp, struct ib_udata *udata);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
@@ -916,8 +919,9 @@ struct hns_roce_hw {
const struct ib_recv_wr **bad_recv_wr);
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
- int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
- int (*destroy_cq)(struct ib_cq *ibcq);
+ int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
+ struct ib_udata *udata);
+ int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
@@ -956,7 +960,7 @@ struct hns_roce_dev {
int irq[HNS_ROCE_MAX_IRQ_NUM];
u8 __iomem *reg_base;
struct hns_roce_caps caps;
- struct radix_tree_root qp_table_tree;
+ struct xarray qp_table_xa;
unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM];
u64 sys_image_guid;
@@ -985,6 +989,7 @@ struct hns_roce_dev {
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
+ const struct hns_roce_dfx_hw *dfx;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -1046,8 +1051,7 @@ static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
static inline struct hns_roce_qp
*__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
{
- return radix_tree_lookup(&hr_dev->qp_table_tree,
- qpn & (hr_dev->caps.num_qps - 1));
+ return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
}
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
@@ -1107,16 +1111,13 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned long obj, int cnt,
int rr);
-struct ib_ah *hns_roce_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
- struct ib_udata *udata);
+int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
+ u32 flags, struct ib_udata *udata);
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
-int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags);
+void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags);
-int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata);
-void hns_roce_dealloc_pd(struct ib_pd *pd);
+int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -1126,10 +1127,10 @@ int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata);
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg);
+ u32 max_num_sg, struct ib_udata *udata);
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
-int hns_roce_dereg_mr(struct ib_mr *ibmr);
+int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index);
@@ -1147,13 +1148,13 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct ib_umem *umem);
-struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
+int hns_roce_create_srq(struct ib_srq *srq,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
-int hns_roce_destroy_srq(struct ib_srq *ibsrq);
+void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
@@ -1179,10 +1180,9 @@ int to_hr_qp_type(int qp_type);
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata);
-int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
+int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
@@ -1202,4 +1202,6 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
+int hns_roce_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index f1fec56f3ff4..8e29dbb5b5fb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
dma_offset = offset = idx_offset * table->obj_size;
} else {
+ u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
/* mtt mhop */
i = mhop.l0_idx;
@@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
hem_idx = i;
hem = table->hem[hem_idx];
- dma_offset = offset = (obj & (table->num_obj - 1)) *
- table->obj_size % mhop.bt_chunk_size;
+ dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
+ mhop.bt_chunk_size;
if (mhop.hop_num == 2)
dma_offset = offset = 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 97515c340134..4c5d0f160c10 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -730,7 +730,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
/* Reserved cq for loop qp */
cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
cq_init_attr.comp_vector = 0;
- cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
+ cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL);
if (IS_ERR(cq)) {
dev_err(dev, "Create cq for reserved loop qp failed!");
return -ENOMEM;
@@ -749,7 +749,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
goto alloc_mem_failed;
pd->device = ibdev;
- ret = hns_roce_alloc_pd(pd, NULL, NULL);
+ ret = hns_roce_alloc_pd(pd, NULL);
if (ret)
goto alloc_pd_failed;
@@ -855,17 +855,17 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
create_lp_qp_failed:
for (i -= 1; i >= 0; i--) {
hr_qp = free_mr->mr_free_qp[i];
- if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
+ if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
}
- hns_roce_dealloc_pd(pd);
+ hns_roce_dealloc_pd(pd, NULL);
alloc_pd_failed:
kfree(pd);
alloc_mem_failed:
- if (hns_roce_ib_destroy_cq(cq))
+ if (hns_roce_ib_destroy_cq(cq, NULL))
dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
return ret;
@@ -888,17 +888,17 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
if (!hr_qp)
continue;
- ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
+ ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
if (ret)
dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
i, ret);
}
- ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
+ ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
if (ret)
dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
- hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
+ hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
}
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
@@ -1096,7 +1096,7 @@ free_work:
}
static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr)
+ struct hns_roce_mr *mr, struct ib_udata *udata)
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_mr_free_work *mr_work;
@@ -1511,38 +1511,6 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
return ret;
}
-static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_des_qp *des_qp;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- des_qp = &priv->des_qp;
-
- des_qp->requeue_flag = 1;
- des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
- if (!des_qp->qp_wq) {
- dev_err(dev, "Create destroy qp workqueue failed!\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv;
- struct hns_roce_des_qp *des_qp;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- des_qp = &priv->des_qp;
-
- des_qp->requeue_flag = 0;
- flush_workqueue(des_qp->qp_wq);
- destroy_workqueue(des_qp->qp_wq);
-}
-
static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
{
int i = 0;
@@ -1661,12 +1629,6 @@ static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
goto error_failed_tptr_init;
}
- ret = hns_roce_des_qp_init(hr_dev);
- if (ret) {
- dev_err(dev, "des qp init failed!\n");
- goto error_failed_des_qp_init;
- }
-
ret = hns_roce_free_mr_init(hr_dev);
if (ret) {
dev_err(dev, "free mr init failed!\n");
@@ -1678,9 +1640,6 @@ static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
return 0;
error_failed_free_mr_init:
- hns_roce_des_qp_free(hr_dev);
-
-error_failed_des_qp_init:
hns_roce_tptr_free(hr_dev);
error_failed_tptr_init:
@@ -1698,7 +1657,6 @@ static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
{
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_free_mr_free(hr_dev);
- hns_roce_des_qp_free(hr_dev);
hns_roce_tptr_free(hr_dev);
hns_roce_bt_free(hr_dev);
hns_roce_raq_free(hr_dev);
@@ -1750,8 +1708,6 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
writel(val, hcr + 5);
- mmiowb();
-
return 0;
}
@@ -3644,307 +3600,22 @@ static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
}
-static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
- u32 *old_send, u32 *old_retry,
- u32 *tsp_st, u32 *success_flags)
-{
- __le32 *old_send_tmp, *old_retry_tmp;
- u32 sdb_retry_cnt;
- u32 sdb_send_ptr;
- u32 cur_cnt, old_cnt;
- __le32 tmp, tmp1;
- u32 send_ptr;
-
- sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
- sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
- tmp = cpu_to_le32(sdb_send_ptr);
- tmp1 = cpu_to_le32(sdb_retry_cnt);
- cur_cnt = roce_get_field(tmp, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(tmp1, ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
-
- old_send_tmp = (__le32 *)old_send;
- old_retry_tmp = (__le32 *)old_retry;
- if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
- old_cnt = roce_get_field(*old_send_tmp,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(*old_retry_tmp,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
- *success_flags = 1;
- } else {
- old_cnt = roce_get_field(*old_send_tmp,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
- if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
- *success_flags = 1;
- } else {
- send_ptr = roce_get_field(*old_send_tmp,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(tmp1,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- roce_set_field(*old_send_tmp,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
- send_ptr);
- }
- }
-}
-
-static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- u32 sdb_issue_ptr,
- u32 *sdb_inv_cnt,
- u32 *wait_stage)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u32 sdb_send_ptr, old_send;
- __le32 sdb_issue_ptr_tmp;
- __le32 sdb_send_ptr_tmp;
- u32 success_flags = 0;
- unsigned long end;
- u32 old_retry;
- u32 inv_cnt;
- u32 tsp_st;
- __le32 tmp;
-
- if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
- *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
- dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
- hr_qp->qpn, *wait_stage);
- return -EINVAL;
- }
-
- /* Calculate the total timeout for the entire verification process */
- end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
-
- if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
- /* Query db process status, until hw process completely */
- sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
- while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
- ROCEE_SDB_PTR_CMP_BITS)) {
- if (!time_before(jiffies, end)) {
- dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
- hr_qp->qpn, sdb_issue_ptr,
- sdb_send_ptr);
- return 0;
- }
-
- msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
- sdb_send_ptr = roce_read(hr_dev,
- ROCEE_SDB_SEND_PTR_REG);
- }
-
- sdb_send_ptr_tmp = cpu_to_le32(sdb_send_ptr);
- sdb_issue_ptr_tmp = cpu_to_le32(sdb_issue_ptr);
- if (roce_get_field(sdb_issue_ptr_tmp,
- ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
- ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
- roce_get_field(sdb_send_ptr_tmp,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
- old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
- old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
-
- do {
- tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
- tmp = cpu_to_le32(tsp_st);
- if (roce_get_bit(tmp,
- ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
- *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
- return 0;
- }
-
- if (!time_before(jiffies, end)) {
- dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
- "issue 0x%x send 0x%x.\n",
- hr_qp->qpn,
- le32_to_cpu(sdb_issue_ptr_tmp),
- le32_to_cpu(sdb_send_ptr_tmp));
- return 0;
- }
-
- msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
-
- hns_roce_check_sdb_status(hr_dev, &old_send,
- &old_retry, &tsp_st,
- &success_flags);
- } while (!success_flags);
- }
-
- *wait_stage = HNS_ROCE_V1_DB_STAGE2;
-
- /* Get list pointer */
- *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
- dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
- hr_qp->qpn, *sdb_inv_cnt);
- }
-
- if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
- /* Query db's list status, until hw reversal */
- inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
- while (roce_hw_index_cmp_lt(inv_cnt,
- *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
- ROCEE_SDB_CNT_CMP_BITS)) {
- if (!time_before(jiffies, end)) {
- dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
- hr_qp->qpn, inv_cnt);
- return 0;
- }
-
- msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
- inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
- }
-
- *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
- }
-
- return 0;
-}
-
-static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_qp_work *qp_work_entry,
- int *is_timeout)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u32 sdb_issue_ptr;
- int ret;
-
- if (hr_qp->state != IB_QPS_RESET) {
- /* Set qp to ERR, waiting for hw complete processing all dbs */
- ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
- IB_QPS_ERR);
- if (ret) {
- dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
- hr_qp->qpn);
- return ret;
- }
-
- /* Record issued doorbell */
- sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
- qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
- qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
-
- /* Query db process status, until hw process completely */
- ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
- &qp_work_entry->sdb_inv_cnt,
- &qp_work_entry->db_wait_stage);
- if (ret) {
- dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
- hr_qp->qpn);
- return ret;
- }
-
- if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
- qp_work_entry->sche_cnt = 0;
- *is_timeout = 1;
- return 0;
- }
-
- /* Modify qp to reset before destroying qp */
- ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
- IB_QPS_RESET);
- if (ret) {
- dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
- hr_qp->qpn);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
-{
- struct hns_roce_qp_work *qp_work_entry;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_dev *hr_dev;
- struct hns_roce_qp *hr_qp;
- struct device *dev;
- unsigned long qpn;
- int ret;
-
- qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
- hr_dev = to_hr_dev(qp_work_entry->ib_dev);
- dev = &hr_dev->pdev->dev;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- hr_qp = qp_work_entry->qp;
- qpn = hr_qp->qpn;
-
- dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
-
- qp_work_entry->sche_cnt++;
-
- /* Query db process status, until hw process completely */
- ret = check_qp_db_process_status(hr_dev, hr_qp,
- qp_work_entry->sdb_issue_ptr,
- &qp_work_entry->sdb_inv_cnt,
- &qp_work_entry->db_wait_stage);
- if (ret) {
- dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
- qpn);
- return;
- }
-
- if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
- priv->des_qp.requeue_flag) {
- queue_work(priv->des_qp.qp_wq, work);
- return;
- }
-
- /* Modify qp to reset before destroying qp */
- ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
- IB_QPS_RESET);
- if (ret) {
- dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
- return;
- }
-
- hns_roce_qp_remove(hr_dev, hr_qp);
- hns_roce_qp_free(hr_dev, hr_qp);
-
- if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
- /* RC QP, release QPN */
- hns_roce_release_range_qp(hr_dev, qpn, 1);
- kfree(hr_qp);
- } else
- kfree(hr_to_hr_sqp(hr_qp));
-
- kfree(qp_work_entry);
-
- dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
-}
-
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp_work qp_work_entry;
- struct hns_roce_qp_work *qp_work;
- struct hns_roce_v1_priv *priv;
struct hns_roce_cq *send_cq, *recv_cq;
- bool is_user = ibqp->uobject;
- int is_timeout = 0;
int ret;
- ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
- if (ret) {
- dev_err(dev, "QP reset state check failed(%d)!\n", ret);
+ ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
+ if (ret)
return ret;
- }
send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
hns_roce_lock_cqs(send_cq, recv_cq);
- if (!is_user) {
+ if (!udata) {
__hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
to_hr_srq(hr_qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
@@ -3952,18 +3623,16 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
}
hns_roce_unlock_cqs(send_cq, recv_cq);
- if (!is_timeout) {
- hns_roce_qp_remove(hr_dev, hr_qp);
- hns_roce_qp_free(hr_dev, hr_qp);
+ hns_roce_qp_remove(hr_dev, hr_qp);
+ hns_roce_qp_free(hr_dev, hr_qp);
- /* RC QP, release QPN */
- if (hr_qp->ibqp.qp_type == IB_QPT_RC)
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
- }
+ /* RC QP, release QPN */
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
- if (is_user)
+ if (udata)
ib_umem_release(hr_qp->umem);
else {
kfree(hr_qp->sq.wrid);
@@ -3972,33 +3641,14 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
- if (!is_timeout) {
- if (hr_qp->ibqp.qp_type == IB_QPT_RC)
- kfree(hr_qp);
- else
- kfree(hr_to_hr_sqp(hr_qp));
- } else {
- qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
- if (!qp_work)
- return -ENOMEM;
-
- INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
- qp_work->ib_dev = &hr_dev->ib_dev;
- qp_work->qp = hr_qp;
- qp_work->db_wait_stage = qp_work_entry.db_wait_stage;
- qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr;
- qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
- qp_work->sche_cnt = qp_work_entry.sche_cnt;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- queue_work(priv->des_qp.qp_wq, &qp_work->work);
- dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
- }
-
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+ kfree(hr_qp);
+ else
+ kfree(hr_to_hr_sqp(hr_qp));
return 0;
}
-static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
+static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 66440147d9eb..52307b2c7100 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -110,11 +110,6 @@
#define HNS_ROCE_V1_EXT_ODB_ALFUL \
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-#define HNS_ROCE_V1_DB_WAIT_OK 0
-#define HNS_ROCE_V1_DB_STAGE1 1
-#define HNS_ROCE_V1_DB_STAGE2 2
-#define HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS 10000
-#define HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS 20
#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000
#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000
#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5
@@ -162,7 +157,6 @@
#define SQ_PSN_SHIFT 8
#define QKEY_VAL 0x80010000
#define SDB_INV_CNT_OFFSET 8
-#define SDB_ST_CMP_VAL 8
#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
@@ -1068,11 +1062,6 @@ struct hns_roce_qp_work {
u32 sche_cnt;
};
-struct hns_roce_des_qp {
- struct workqueue_struct *qp_wq;
- int requeue_flag;
-};
-
struct hns_roce_mr_free_work {
struct work_struct work;
struct ib_device *ib_dev;
@@ -1100,12 +1089,11 @@ struct hns_roce_v1_priv {
struct hns_roce_raq_table raq_table;
struct hns_roce_bt_table bt_table;
struct hns_roce_tptr_table tptr_table;
- struct hns_roce_des_qp des_qp;
struct hns_roce_free_mr free_mr;
};
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp);
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 1c54390e1c85..b5392cb5b20f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -37,7 +37,9 @@
#include <linux/types.h>
#include <net/addrconf.h>
#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
#include "hnae3.h"
#include "hns_roce_common.h"
@@ -1086,7 +1088,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
return ret;
}
-int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
int retval;
@@ -1559,7 +1561,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
caps->srqc_ba_pg_sz = 0;
caps->srqc_buf_pg_sz = 0;
- caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
caps->cqc_ba_pg_sz = 0;
caps->cqc_buf_pg_sz = 0;
caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
@@ -2150,7 +2152,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
V2_MPT_BYTE_4_PD_S, mr->pd);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
(mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
@@ -3171,12 +3173,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
- if (attr_mask & IB_QP_QKEY) {
- context->qkey_xrcd = attr->qkey;
- qpc_mask->qkey_xrcd = 0;
- hr_qp->qkey = attr->qkey;
- }
-
if (hr_qp->rdb_en) {
roce_set_bit(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
@@ -3388,7 +3384,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
0);
hr_qp->access_flags = attr->qp_access_flags;
- hr_qp->pkey_index = attr->pkey_index;
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
@@ -3512,11 +3507,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
}
- if (attr_mask & IB_QP_QKEY) {
- context->qkey_xrcd = attr->qkey;
- qpc_mask->qkey_xrcd = 0;
- }
-
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
@@ -3636,13 +3626,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
- roce_set_field(context->byte_80_rnr_rx_cqn,
- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
- V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
- roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
- V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
-
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
>> PAGE_ADDR_SHIFT);
@@ -3670,13 +3653,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
- roce_set_field(context->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
- V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
- roce_set_field(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
- V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
-
roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
@@ -3715,15 +3691,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
}
- if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
- attr->max_dest_rd_atomic) {
- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S,
- fls(attr->max_dest_rd_atomic - 1));
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S, 0);
- }
-
if (attr_mask & IB_QP_DEST_QPN) {
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
@@ -3784,11 +3751,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
context->rq_rnr_timer = 0;
qpc_mask->rq_rnr_timer = 0;
- roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
- V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
- roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
- V2_QPC_BYTE_152_RAQ_PSN_S, 0);
-
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
@@ -3886,13 +3848,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_240_RX_ACK_MSN_M,
V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
- roce_set_field(context->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
- V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
- roce_set_field(qpc_mask->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
- V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
-
roce_set_field(qpc_mask->byte_248_ack_psn,
V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
@@ -3906,27 +3861,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
- roce_set_field(context->byte_220_retry_psn_msn,
- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
- roce_set_field(qpc_mask->byte_220_retry_psn_msn,
- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
-
- roce_set_field(context->byte_224_retry_msg,
- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
- V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
- roce_set_field(qpc_mask->byte_224_retry_msg,
- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
- V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
-
- roce_set_field(context->byte_224_retry_msg,
- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
- roce_set_field(qpc_mask->byte_224_retry_msg,
- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
-
roce_set_field(qpc_mask->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
@@ -3937,66 +3871,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
V2_QPC_BYTE_212_CHECK_FLG_S, 0);
- roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
- V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
- V2_QPC_BYTE_212_RETRY_CNT_S, 0);
-
- roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
- V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
- V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
-
- roce_set_field(context->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
- V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
- roce_set_field(qpc_mask->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
- V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
-
- roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
- V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
- roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
- V2_QPC_BYTE_244_RNR_CNT_S, 0);
-
roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
V2_QPC_BYTE_212_LSN_S, 0x100);
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
V2_QPC_BYTE_212_LSN_S, 0);
- if (attr_mask & IB_QP_TIMEOUT) {
- if (attr->timeout < 31) {
- roce_set_field(context->byte_28_at_fl,
- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
- attr->timeout);
- roce_set_field(qpc_mask->byte_28_at_fl,
- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
- 0);
- } else {
- dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
- }
- }
-
- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
- V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
- roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
- V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
-
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
- roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
- V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
- roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
- V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
- if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S,
- fls(attr->max_rd_atomic - 1));
- roce_set_field(qpc_mask->byte_208_irrl,
- V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S, 0);
- }
return 0;
}
@@ -4090,7 +3972,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_global_route *grh =
rdma_ah_read_grh(&attr->ah_attr);
const struct ib_gid_attr *gid_attr = NULL;
- u8 src_mac[ETH_ALEN];
int is_roce_protocol;
u16 vlan = 0xffff;
u8 ib_port;
@@ -4104,11 +3985,12 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
if (is_roce_protocol) {
gid_attr = attr->ah_attr.grh.sgid_attr;
- vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
- memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
+ if (ret)
+ goto out;
}
- if (is_vlan_dev(gid_attr->ndev)) {
+ if (vlan < VLAN_CFI_MASK) {
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
@@ -4190,9 +4072,152 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
}
+ if (attr_mask & IB_QP_TIMEOUT) {
+ if (attr->timeout < 31) {
+ roce_set_field(context->byte_28_at_fl,
+ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+ attr->timeout);
+ roce_set_field(qpc_mask->byte_28_at_fl,
+ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+ 0);
+ } else {
+ dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
+ }
+ }
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ roce_set_field(context->byte_212_lsn,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
+ attr->retry_cnt);
+ roce_set_field(qpc_mask->byte_212_lsn,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
+
+ roce_set_field(context->byte_212_lsn,
+ V2_QPC_BYTE_212_RETRY_CNT_M,
+ V2_QPC_BYTE_212_RETRY_CNT_S,
+ attr->retry_cnt);
+ roce_set_field(qpc_mask->byte_212_lsn,
+ V2_QPC_BYTE_212_RETRY_CNT_M,
+ V2_QPC_BYTE_212_RETRY_CNT_S, 0);
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ roce_set_field(context->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
+ roce_set_field(qpc_mask->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
+
+ roce_set_field(context->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RNR_CNT_M,
+ V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
+ roce_set_field(qpc_mask->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RNR_CNT_M,
+ V2_QPC_BYTE_244_RNR_CNT_S, 0);
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ roce_set_field(context->byte_172_sq_psn,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_172_sq_psn,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
+
+ roce_set_field(context->byte_196_sq_psn,
+ V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+ V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_196_sq_psn,
+ V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+ V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+
+ roce_set_field(context->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
+
+ roce_set_field(context->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
+ attr->sq_psn >> 16);
+ roce_set_field(qpc_mask->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
+
+ roce_set_field(context->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
+ attr->sq_psn);
+ roce_set_field(qpc_mask->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
+
+ roce_set_field(context->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+ }
+
+ if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+ attr->max_dest_rd_atomic) {
+ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S,
+ fls(attr->max_dest_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S, 0);
+ }
+
+ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S,
+ fls(attr->max_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_208_irrl,
+ V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S, 0);
+ }
+
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ roce_set_field(context->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_S,
+ attr->min_rnr_timer);
+ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
+ }
+
+ /* RC&UC required attr */
+ if (attr_mask & IB_QP_RQ_PSN) {
+ roce_set_field(context->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
+ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
+
+ roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+ V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
+ roce_set_field(qpc_mask->byte_152_raq,
+ V2_QPC_BYTE_152_RAQ_PSN_M,
+ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+ }
+
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = attr->qkey;
+ qpc_mask->qkey_xrcd = 0;
+ hr_qp->qkey = attr->qkey;
+ }
+
roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
ibqp->srq ? 1 : 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
@@ -4421,7 +4446,7 @@ out:
static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
- bool is_user)
+ struct ib_udata *udata)
{
struct hns_roce_cq *send_cq, *recv_cq;
struct device *dev = hr_dev->dev;
@@ -4443,7 +4468,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_lock_cqs(send_cq, recv_cq);
- if (!is_user) {
+ if (!udata) {
__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
to_hr_srq(hr_qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
@@ -4464,16 +4489,18 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
- if (is_user) {
+ if (udata) {
+ struct hns_roce_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct hns_roce_ucontext,
+ ibucontext);
+
if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
- hns_roce_db_unmap_user(
- to_hr_ucontext(hr_qp->ibqp.uobject->context),
- &hr_qp->sdb);
+ hns_roce_db_unmap_user(context, &hr_qp->sdb);
if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
- hns_roce_db_unmap_user(
- to_hr_ucontext(hr_qp->ibqp.uobject->context),
- &hr_qp->rdb);
+ hns_roce_db_unmap_user(context, &hr_qp->rdb);
ib_umem_release(hr_qp->umem);
} else {
kfree(hr_qp->sq.wrid);
@@ -4492,13 +4519,13 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
return 0;
}
-static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
+static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret;
- ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, ibqp->uobject);
+ ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
if (ret) {
dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
return ret;
@@ -6044,6 +6071,10 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
return ret;
}
+static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
+ .query_cqc_info = hns_roce_v2_query_cqc_info,
+};
+
static const struct ib_device_ops hns_roce_v2_dev_ops = {
.destroy_qp = hns_roce_v2_destroy_qp,
.modify_cq = hns_roce_v2_modify_cq,
@@ -6113,16 +6144,10 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
- const struct pci_device_id *id;
int i;
- id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
- if (!id) {
- dev_err(hr_dev->dev, "device is not compatible!\n");
- return -ENXIO;
- }
-
hr_dev->hw = &hns_roce_hw_v2;
+ hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = hr_dev->sdb_offset;
@@ -6209,6 +6234,7 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
{
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct pci_device_id *id;
struct device *dev = &handle->pdev->dev;
int ret;
@@ -6219,6 +6245,10 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto reset_chk_err;
}
+ id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
+ if (!id)
+ return 0;
+
ret = __hns_roce_hw_v2_init_instance(handle);
if (ret) {
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index f1f1b75812f9..edfdbe2ce0db 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -719,8 +719,8 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
#define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
-#define V2_QPC_BYTE_152_RAQ_PSN_S 8
-#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8)
+#define V2_QPC_BYTE_152_RAQ_PSN_S 0
+#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
@@ -1799,6 +1799,9 @@ struct hns_roce_sccc_clr_done {
__le32 rsv[5];
};
+int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
+ int *buffer);
+
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
new file mode 100644
index 000000000000..5a97b5a0b7be
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+// Copyright (c) 2019 Hisilicon Limited.
+
+#include "hnae3.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hw_v2.h"
+
+int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
+ int *buffer)
+{
+ struct hns_roce_v2_cq_context *cq_context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ cq_context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
+ HNS_ROCE_CMD_QUERY_CQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ dev_err(hr_dev->dev, "QUERY cqc cmd process error\n");
+ goto err_mailbox;
+ }
+
+ memcpy(buffer, cq_context, sizeof(*cq_context));
+
+err_mailbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c929125da84b..8da5f18bf820 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -234,25 +234,6 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
return 0;
}
-static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
- u8 port_num)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct net_device *ndev;
-
- if (port_num < 1 || port_num > hr_dev->caps.num_ports)
- return NULL;
-
- rcu_read_lock();
-
- ndev = hr_dev->iboe.netdevs[port_num - 1];
- if (ndev)
- dev_hold(ndev);
-
- rcu_read_unlock();
- return ndev;
-}
-
static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props)
{
@@ -455,9 +436,9 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.destroy_ah = hns_roce_destroy_ah,
.destroy_cq = hns_roce_ib_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
+ .fill_res_entry = hns_roce_fill_res_entry,
.get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer,
- .get_netdev = hns_roce_get_netdev,
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
.modify_device = hns_roce_modify_device,
@@ -468,6 +449,8 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.query_pkey = hns_roce_query_pkey,
.query_port = hns_roce_query_port,
.reg_user_mr = hns_roce_reg_user_mr,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
};
@@ -489,6 +472,8 @@ static const struct ib_device_ops hns_roce_dev_frmr_ops = {
static const struct ib_device_ops hns_roce_dev_srq_ops = {
.create_srq = hns_roce_create_srq,
.destroy_srq = hns_roce_destroy_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
};
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
@@ -497,6 +482,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
struct hns_roce_ib_iboe *iboe = NULL;
struct ib_device *ib_dev = NULL;
struct device *dev = hr_dev->dev;
+ unsigned int i;
iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock);
@@ -562,6 +548,15 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->driver_id = RDMA_DRIVER_HNS;
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
+ for (i = 0; i < hr_dev->caps.num_ports; i++) {
+ if (!hr_dev->iboe.netdevs[i])
+ continue;
+
+ ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
+ i + 1);
+ if (ret)
+ return ret;
+ }
ret = ib_register_device(ib_dev, "hns_%d");
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index b09f1cde2ff5..6110ec408626 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table;
dma_addr_t dma_handle;
__le64 *mtts;
- u32 s = start_index * sizeof(u64);
u32 bt_page_size;
u32 i;
@@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
return -EINVAL;
mtts = hns_roce_table_find(hr_dev, table,
- mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+ mtt->first_seg +
+ start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
&dma_handle);
if (!mtts)
return -ENOMEM;
@@ -1282,14 +1282,14 @@ free_cmd_mbox:
return ret;
}
-int hns_roce_dereg_mr(struct ib_mr *ibmr)
+int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0;
if (hr_dev->hw->dereg_mr) {
- ret = hr_dev->hw->dereg_mr(hr_dev, mr);
+ ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
} else {
hns_roce_mr_free(hr_dev, mr);
@@ -1303,7 +1303,7 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr)
}
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg)
+ u32 max_num_sg, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = hr_dev->dev;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index b9b97c5e97e6..813401384d78 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -57,8 +57,7 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
}
-int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ib_dev = ibpd->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
@@ -72,7 +71,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return ret;
}
- if (context) {
+ if (udata) {
struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn};
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
@@ -86,7 +85,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
}
EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
-void hns_roce_dealloc_pd(struct ib_pd *pd)
+void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 57c76eafef2f..8db2817a249e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -45,17 +45,14 @@
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
- struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
struct device *dev = hr_dev->dev;
struct hns_roce_qp *qp;
- spin_lock(&qp_table->lock);
-
+ xa_lock(&hr_dev->qp_table_xa);
qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (qp)
atomic_inc(&qp->refcount);
-
- spin_unlock(&qp_table->lock);
+ xa_unlock(&hr_dev->qp_table_xa);
if (!qp) {
dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
@@ -147,29 +144,20 @@ EXPORT_SYMBOL_GPL(to_hns_roce_state);
static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
struct hns_roce_qp *hr_qp)
{
- struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ struct xarray *xa = &hr_dev->qp_table_xa;
int ret;
if (!qpn)
return -EINVAL;
hr_qp->qpn = qpn;
-
- spin_lock_irq(&qp_table->lock);
- ret = radix_tree_insert(&hr_dev->qp_table_tree,
- hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
- spin_unlock_irq(&qp_table->lock);
- if (ret) {
- dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
- goto err_put_irrl;
- }
-
atomic_set(&hr_qp->refcount, 1);
init_completion(&hr_qp->free);
- return 0;
-
-err_put_irrl:
+ ret = xa_err(xa_store_irq(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1),
+ hr_qp, GFP_KERNEL));
+ if (ret)
+ dev_err(hr_dev->dev, "QPC xa_store failed\n");
return ret;
}
@@ -220,17 +208,9 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
}
}
- spin_lock_irq(&qp_table->lock);
- ret = radix_tree_insert(&hr_dev->qp_table_tree,
- hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
- spin_unlock_irq(&qp_table->lock);
- if (ret) {
- dev_err(dev, "QPC radix_tree_insert failed\n");
+ ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
+ if (ret)
goto err_put_sccc;
- }
-
- atomic_set(&hr_qp->refcount, 1);
- init_completion(&hr_qp->free);
return 0;
@@ -255,13 +235,12 @@ err_out:
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
- struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ struct xarray *xa = &hr_dev->qp_table_xa;
unsigned long flags;
- spin_lock_irqsave(&qp_table->lock, flags);
- radix_tree_delete(&hr_dev->qp_table_tree,
- hr_qp->qpn & (hr_dev->caps.num_qps - 1));
- spin_unlock_irqrestore(&qp_table->lock, flags);
+ xa_lock_irqsave(xa, flags);
+ __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
+ xa_unlock_irqrestore(xa, flags);
}
EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
@@ -274,9 +253,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
wait_for_completion(&hr_qp->free);
if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
- if (hr_dev->caps.sccc_entry_sz)
- hns_roce_table_put(hr_dev, &qp_table->sccc_table,
- hr_qp->qpn);
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
@@ -536,7 +512,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
{
- if (attr->qp_type == IB_QPT_XRC_TGT)
+ if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
return 0;
return 1;
@@ -1157,8 +1133,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
int ret;
mutex_init(&qp_table->scc_mutex);
- spin_lock_init(&qp_table->lock);
- INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
+ xa_init(&hr_dev->qp_table_xa);
/* In hw v1, a port include two SQP, six ports total 12 */
if (hr_dev->caps.max_sq_sg <= 2)
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
new file mode 100644
index 000000000000..0a31d0a3d657
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+// Copyright (c) 2019 Hisilicon Limited.
+
+#include <rdma/rdma_cm.h>
+#include <rdma/restrack.h>
+#include <uapi/rdma/rdma_netlink.h>
+#include "hnae3.h"
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+
+static int hns_roce_fill_cq(struct sk_buff *msg,
+ struct hns_roce_v2_cq_context *context)
+{
+ if (rdma_nl_put_driver_u32(msg, "state",
+ roce_get_field(context->byte_4_pg_ceqn,
+ V2_CQC_BYTE_4_ARM_ST_M,
+ V2_CQC_BYTE_4_ARM_ST_S)))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(msg, "ceqn",
+ roce_get_field(context->byte_4_pg_ceqn,
+ V2_CQC_BYTE_4_CEQN_M,
+ V2_CQC_BYTE_4_CEQN_S)))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(msg, "cqn",
+ roce_get_field(context->byte_8_cqn,
+ V2_CQC_BYTE_8_CQN_M,
+ V2_CQC_BYTE_8_CQN_S)))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(msg, "hopnum",
+ roce_get_field(context->byte_16_hop_addr,
+ V2_CQC_BYTE_16_CQE_HOP_NUM_M,
+ V2_CQC_BYTE_16_CQE_HOP_NUM_S)))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(
+ msg, "pi",
+ roce_get_field(context->byte_28_cq_pi,
+ V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M,
+ V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S)))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(
+ msg, "ci",
+ roce_get_field(context->byte_32_cq_ci,
+ V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M,
+ V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S)))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(
+ msg, "coalesce",
+ roce_get_field(context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_M,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_S)))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(
+ msg, "period",
+ roce_get_field(context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_PERIOD_M,
+ V2_CQC_BYTE_56_CQ_PERIOD_S)))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(msg, "cnt",
+ roce_get_field(context->byte_52_cqe_cnt,
+ V2_CQC_BYTE_52_CQE_CNT_M,
+ V2_CQC_BYTE_52_CQE_CNT_S)))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_cq *ib_cq = container_of(res, struct ib_cq, res);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct hns_roce_v2_cq_context *context;
+ struct nlattr *table_attr;
+ int ret;
+
+ if (!hr_dev->dfx->query_cqc_info)
+ return -EINVAL;
+
+ context = kzalloc(sizeof(struct hns_roce_v2_cq_context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context);
+ if (ret)
+ goto err;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ if (hns_roce_fill_cq(msg, context))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ kfree(context);
+
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ kfree(context);
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (res->type == RDMA_RESTRACK_CQ)
+ return hns_roce_fill_res_cq_entry(msg, res);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index a8ee2f6da967..b3421b1f21e0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -206,13 +206,13 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
return 0;
}
-struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
+int hns_roce_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
struct hns_roce_ib_create_srq_resp resp = {};
- struct hns_roce_srq *srq;
+ struct hns_roce_srq *srq = to_hr_srq(ib_srq);
int srq_desc_size;
int srq_buf_size;
u32 page_shift;
@@ -223,11 +223,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
/* Check the actual SRQ wqe and SRQ sge num */
if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
- return ERR_PTR(-EINVAL);
-
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
@@ -249,17 +245,13 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
if (udata) {
struct hns_roce_ib_create_srq ucmd;
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- ret = -EFAULT;
- goto err_srq;
- }
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ return -EFAULT;
srq->umem =
ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
- if (IS_ERR(srq->umem)) {
- ret = PTR_ERR(srq->umem);
- goto err_srq;
- }
+ if (IS_ERR(srq->umem))
+ return PTR_ERR(srq->umem);
if (hr_dev->caps.srqwqe_buf_pg_sz) {
npages = (ib_umem_page_count(srq->umem) +
@@ -321,11 +313,9 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
} else {
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
- (1 << page_shift) * 2,
- &srq->buf, page_shift)) {
- ret = -ENOMEM;
- goto err_srq;
- }
+ (1 << page_shift) * 2, &srq->buf,
+ page_shift))
+ return -ENOMEM;
srq->head = 0;
srq->tail = srq->max - 1;
@@ -340,7 +330,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
goto err_srq_mtt;
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
- ret = hns_roce_create_idx_que(pd, srq, page_shift);
+ ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift);
if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
ret);
@@ -372,7 +362,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
- ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0,
+ ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(ib_srq->pd)->pdn, cqn, 0,
&srq->mtt, 0, srq);
if (ret)
goto err_wrid;
@@ -389,7 +379,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
}
}
- return &srq->ibsrq;
+ return 0;
err_srqc_alloc:
hns_roce_srq_free(hr_dev, srq);
@@ -418,12 +408,10 @@ err_buf:
else
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
-err_srq:
- kfree(srq);
- return ERR_PTR(ret);
+ return ret;
}
-int hns_roce_destroy_srq(struct ib_srq *ibsrq)
+void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
@@ -440,10 +428,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq)
hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
&srq->buf);
}
-
- kfree(srq);
-
- return 0;
}
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 2f2b4426ded7..8feec35f95a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -552,7 +552,7 @@ enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
void i40iw_request_reset(struct i40iw_device *iwdev);
void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
-void i40iw_setup_cm_core(struct i40iw_device *iwdev);
+int i40iw_setup_cm_core(struct i40iw_device *iwdev);
void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
void i40iw_process_aeq(struct i40iw_device *);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 206cfb0016f8..8233f5a4e623 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3237,7 +3237,7 @@ void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
* core
* @iwdev: iwarp device structure
*/
-void i40iw_setup_cm_core(struct i40iw_device *iwdev)
+int i40iw_setup_cm_core(struct i40iw_device *iwdev)
{
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
@@ -3256,9 +3256,19 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
cm_core->event_wq = alloc_ordered_workqueue("iwewq",
WQ_MEM_RECLAIM);
+ if (!cm_core->event_wq)
+ goto error;
cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
WQ_MEM_RECLAIM);
+ if (!cm_core->disconn_wq)
+ goto error;
+
+ return 0;
+error:
+ i40iw_cleanup_cm_core(&iwdev->cm_core);
+
+ return -ENOMEM;
}
/**
@@ -3278,8 +3288,10 @@ void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
del_timer_sync(&cm_core->tcp_timer);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- destroy_workqueue(cm_core->event_wq);
- destroy_workqueue(cm_core->disconn_wq);
+ if (cm_core->event_wq)
+ destroy_workqueue(cm_core->event_wq);
+ if (cm_core->disconn_wq)
+ destroy_workqueue(cm_core->disconn_wq);
}
/**
@@ -3478,7 +3490,8 @@ static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
/* Need to free the Last Streaming Mode Message */
if (iwqp->ietf_mem.va) {
if (iwqp->lsmm_mr)
- iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr);
+ iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr,
+ NULL);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
}
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 68095f00d08f..10932baee279 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1641,7 +1641,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
iwdev = &hdl->device;
iwdev->hdl = hdl;
dev = &iwdev->sc_dev;
- i40iw_setup_cm_core(iwdev);
+ if (i40iw_setup_cm_core(iwdev)) {
+ kfree(iwdev->hdl);
+ return -ENOMEM;
+ }
dev->back_dev = (void *)iwdev;
iwdev->ldev = &hdl->ldev;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index f27be3e7830b..d474aad62a81 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -211,7 +211,7 @@ enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
struct i40iw_sc_vsi;
void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
-#define i40iw_mmiowb() mmiowb()
+#define i40iw_mmiowb() do { } while (0)
void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
#endif /* _I40IW_OSDEP_H_ */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index a8352e3ca23d..5689d742bafb 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -291,18 +291,15 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
/**
* i40iw_alloc_pd - allocate protection domain
* @pd: PD pointer
- * @context: user context created during alloc
* @udata: user data
*/
-static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata)
+static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct i40iw_pd *iwpd = to_iwpd(pd);
struct i40iw_device *iwdev = to_iwdev(pd->device);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_alloc_pd_resp uresp;
struct i40iw_sc_pd *sc_pd;
- struct i40iw_ucontext *ucontext;
u32 pd_id = 0;
int err;
@@ -318,8 +315,9 @@ static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
sc_pd = &iwpd->sc_pd;
- if (context) {
- ucontext = to_ucontext(context);
+ if (udata) {
+ struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct i40iw_ucontext, ibucontext);
dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
memset(&uresp, 0, sizeof(uresp));
uresp.pd_id = pd_id;
@@ -342,8 +340,9 @@ error:
/**
* i40iw_dealloc_pd - deallocate pd
* @ibpd: ptr of pd to be deallocated
+ * @udata: user data or null for kernel object
*/
-static void i40iw_dealloc_pd(struct ib_pd *ibpd)
+static void i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct i40iw_pd *iwpd = to_iwpd(ibpd);
struct i40iw_device *iwdev = to_iwdev(ibpd->device);
@@ -413,7 +412,7 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
* i40iw_destroy_qp - destroy qp
* @ibqp: qp's ib pointer also to get to device's qp address
*/
-static int i40iw_destroy_qp(struct ib_qp *ibqp)
+static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct i40iw_qp *iwqp = to_iwqp(ibqp);
@@ -744,8 +743,8 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (err_code) {
i40iw_pr_err("copy_to_udata failed\n");
- i40iw_destroy_qp(&iwqp->ibqp);
- /* let the completion of the qp destroy free the qp */
+ i40iw_destroy_qp(&iwqp->ibqp, udata);
+ /* let the completion of the qp destroy free the qp */
return ERR_PTR(err_code);
}
}
@@ -1063,8 +1062,9 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
/**
* i40iw_destroy_cq - destroy cq
* @ib_cq: cq pointer
+ * @udata: user data or NULL for kernel object
*/
-static int i40iw_destroy_cq(struct ib_cq *ib_cq)
+static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct i40iw_cq *iwcq;
struct i40iw_device *iwdev;
@@ -1089,12 +1089,10 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq)
* i40iw_create_cq - create cq
* @ibdev: device pointer from stack
* @attr: attributes for cq
- * @context: user context created during alloc
* @udata: user data
*/
static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
@@ -1144,14 +1142,14 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
info.ceq_id_valid = true;
info.ceqe_mask = 1;
info.type = I40IW_CQ_TYPE_IWARP;
- if (context) {
- struct i40iw_ucontext *ucontext;
+ if (udata) {
+ struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct i40iw_ucontext, ibucontext);
struct i40iw_create_cq_req req;
struct i40iw_cq_mr *cqmr;
memset(&req, 0, sizeof(req));
iwcq->user_mode = true;
- ucontext = to_ucontext(context);
if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
err_code = -EFAULT;
goto cq_free_resources;
@@ -1221,7 +1219,7 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
goto cq_free_resources;
}
- if (context) {
+ if (udata) {
struct i40iw_create_cq_resp resp;
memset(&resp, 0, sizeof(resp));
@@ -1340,53 +1338,22 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
struct i40iw_pble_info *pinfo;
- struct sg_dma_page_iter sg_iter;
- u64 pg_addr = 0;
+ struct ib_block_iter biter;
u32 idx = 0;
- bool first_pg = true;
pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
if (iwmr->type == IW_MEMREG_TYPE_QP)
iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
- for_each_sg_dma_page (region->sg_head.sgl, &sg_iter, region->nmap, 0) {
- pg_addr = sg_page_iter_dma_address(&sg_iter);
- if (first_pg)
- *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
- else if (!(pg_addr & ~iwmr->page_msk))
- *pbl = cpu_to_le64(pg_addr);
- else
- continue;
-
- first_pg = false;
+ rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
+ iwmr->page_size) {
+ *pbl = rdma_block_iter_dma_address(&biter);
pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
}
}
/**
- * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
- * @addr: virtual address
- * @iwmr: mr pointer for this memory registration
- */
-static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
-{
- struct vm_area_struct *vma;
- struct hstate *h;
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, addr);
- if (vma && is_vm_hugetlb_page(vma)) {
- h = hstate_vma(vma);
- if (huge_page_size(h) == 0x200000) {
- iwmr->page_size = huge_page_size(h);
- iwmr->page_msk = huge_page_mask(h);
- }
- }
- up_read(&current->mm->mmap_sem);
-}
-
-/**
* i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
* @arr: lvl1 pbl array
* @npages: page count
@@ -1601,10 +1568,10 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr
* @pd: ibpd pointer
* @mr_type: memory for stag registrion
* @max_num_sg: man number of pages
+ * @udata: user data or NULL for kernel objects
*/
-static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct i40iw_pd *iwpd = to_iwpd(pd);
struct i40iw_device *iwdev = to_iwdev(pd->device);
@@ -1841,10 +1808,9 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
iwmr->ibmr.device = pd->device;
iwmr->page_size = PAGE_SIZE;
- iwmr->page_msk = PAGE_MASK;
-
- if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
- i40iw_set_hugetlb_values(start, iwmr);
+ if (req.reg_type == IW_MEMREG_TYPE_MEM)
+ iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
+ virt);
region_length = region->length + (start & (iwmr->page_size - 1));
pg_shift = ffs(iwmr->page_size) - 1;
@@ -2038,7 +2004,7 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
* i40iw_dereg_mr - deregister mr
* @ib_mr: mr ptr for dereg
*/
-static int i40iw_dereg_mr(struct ib_mr *ib_mr)
+static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct ib_pd *ibpd = ib_mr->pd;
struct i40iw_pd *iwpd = to_iwpd(ibpd);
@@ -2058,9 +2024,12 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
if (iwmr->type != IW_MEMREG_TYPE_MEM) {
/* region is released. only test for userness. */
if (iwmr->region) {
- struct i40iw_ucontext *ucontext;
+ struct i40iw_ucontext *ucontext =
+ rdma_udata_to_drv_context(
+ udata,
+ struct i40iw_ucontext,
+ ibucontext);
- ucontext = to_ucontext(ibpd->uobject->context);
i40iw_del_memlist(iwmr, ucontext);
}
if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
@@ -2703,6 +2672,14 @@ static const struct ib_device_ops i40iw_dev_ops = {
.get_dma_mr = i40iw_get_dma_mr,
.get_hw_stats = i40iw_get_hw_stats,
.get_port_immutable = i40iw_port_immutable,
+ .iw_accept = i40iw_accept,
+ .iw_add_ref = i40iw_add_ref,
+ .iw_connect = i40iw_connect,
+ .iw_create_listen = i40iw_create_listen,
+ .iw_destroy_listen = i40iw_destroy_listen,
+ .iw_get_qp = i40iw_get_qp,
+ .iw_reject = i40iw_reject,
+ .iw_rem_ref = i40iw_rem_ref,
.map_mr_sg = i40iw_map_mr_sg,
.mmap = i40iw_mmap,
.modify_qp = i40iw_modify_qp,
@@ -2766,22 +2743,8 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.phys_port_cnt = 1;
iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
iwibdev->ibdev.dev.parent = &pcidev->dev;
- iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
- if (!iwibdev->ibdev.iwcm) {
- ib_dealloc_device(&iwibdev->ibdev);
- return NULL;
- }
-
- iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
- iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
- iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
- iwibdev->ibdev.iwcm->connect = i40iw_connect;
- iwibdev->ibdev.iwcm->accept = i40iw_accept;
- iwibdev->ibdev.iwcm->reject = i40iw_reject;
- iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
- iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
- memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
- sizeof(iwibdev->ibdev.iwcm->ifname));
+ memcpy(iwibdev->ibdev.iw_ifname, netdev->name,
+ sizeof(iwibdev->ibdev.iw_ifname));
ib_set_device_ops(&iwibdev->ibdev, &i40iw_dev_ops);
return iwibdev;
@@ -2812,8 +2775,6 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
return;
ib_unregister_device(&iwibdev->ibdev);
- kfree(iwibdev->ibdev.iwcm);
- iwibdev->ibdev.iwcm = NULL;
wait_event_timeout(iwibdev->iwdev->close_wq,
!atomic64_read(&iwibdev->iwdev->use_count),
I40IW_EVENT_TIMEOUT);
@@ -2841,8 +2802,6 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
return 0;
error:
- kfree(iwdev->iwibdev->ibdev.iwcm);
- iwdev->iwibdev->ibdev.iwcm = NULL;
ib_dealloc_device(&iwdev->iwibdev->ibdev);
return ret;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 76cf173377ab..3a413752ccc3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -94,8 +94,7 @@ struct i40iw_mr {
struct ib_umem *region;
u16 type;
u32 page_cnt;
- u32 page_size;
- u64 page_msk;
+ u64 page_size;
u32 npages;
u32 stag;
u64 length;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 1672808262ba..02a169f8027b 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -40,13 +40,12 @@
#include "mlx4_ib.h"
-static struct ib_ah *create_ib_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- struct mlx4_ib_ah *ah)
+static void create_ib_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
{
- struct mlx4_dev *dev = to_mdev(pd->device)->dev;
+ struct mlx4_ib_ah *ah = to_mah(ib_ah);
+ struct mlx4_dev *dev = to_mdev(ib_ah->device)->dev;
- ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn |
+ ah->av.ib.port_pd = cpu_to_be32(to_mpd(ib_ah->pd)->pdn |
(rdma_ah_get_port_num(ah_attr) << 24));
ah->av.ib.g_slid = rdma_ah_get_path_bits(ah_attr);
ah->av.ib.sl_tclass_flowlabel =
@@ -73,15 +72,12 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd,
--static_rate;
ah->av.ib.stat_rate = static_rate;
}
-
- return &ah->ibah;
}
-static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- struct mlx4_ib_ah *ah)
+static int create_iboe_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
{
- struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
+ struct mlx4_ib_dev *ibdev = to_mdev(ib_ah->device);
+ struct mlx4_ib_ah *ah = to_mah(ib_ah);
const struct ib_gid_attr *gid_attr;
struct mlx4_dev *dev = ibdev->dev;
int is_mcast = 0;
@@ -103,12 +99,14 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
*/
gid_attr = ah_attr->grh.sgid_attr;
if (gid_attr) {
- if (is_vlan_dev(gid_attr->ndev))
- vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
- memcpy(ah->av.eth.s_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_tag,
+ &ah->av.eth.s_mac[0]);
+ if (ret)
+ return ret;
+
ret = mlx4_ib_gid_index_to_real_index(ibdev, gid_attr);
if (ret < 0)
- return ERR_PTR(ret);
+ return ret;
ah->av.eth.gid_index = ret;
} else {
/* mlx4_ib_create_ah_slave fills in the s_mac and the vlan */
@@ -117,7 +115,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
- ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn |
+ ah->av.eth.port_pd = cpu_to_be32(to_mpd(ib_ah->pd)->pdn |
(rdma_ah_get_port_num(ah_attr) << 24));
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
ah->av.eth.hop_limit = grh->hop_limit;
@@ -140,63 +138,45 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
memcpy(ah->av.eth.dgid, grh->dgid.raw, 16);
ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(rdma_ah_get_sl(ah_attr)
<< 29);
- return &ah->ibah;
+ return 0;
}
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
+ u32 flags, struct ib_udata *udata)
{
- struct mlx4_ib_ah *ah;
- struct ib_ah *ret;
-
- ah = kzalloc(sizeof *ah, GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
-
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
- if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
- ret = ERR_PTR(-EINVAL);
- } else {
- /*
- * TBD: need to handle the case when we get
- * called in an atomic context and there we
- * might sleep. We don't expect this
- * currently since we're working with link
- * local addresses which we can translate
- * without going to sleep.
- */
- ret = create_iboe_ah(pd, ah_attr, ah);
- }
-
- if (IS_ERR(ret))
- kfree(ah);
+ if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
+ /*
+ * TBD: need to handle the case when we get
+ * called in an atomic context and there we
+ * might sleep. We don't expect this
+ * currently since we're working with link
+ * local addresses which we can translate
+ * without going to sleep.
+ */
+ return create_iboe_ah(ib_ah, ah_attr);
+ }
- return ret;
- } else
- return create_ib_ah(pd, ah_attr, ah); /* never fails */
+ create_ib_ah(ib_ah, ah_attr);
+ return 0;
}
-/* AH's created via this call must be free'd by mlx4_ib_destroy_ah. */
-struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- int slave_sgid_index, u8 *s_mac,
- u16 vlan_tag)
+int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
+ int slave_sgid_index, u8 *s_mac, u16 vlan_tag)
{
struct rdma_ah_attr slave_attr = *ah_attr;
- struct mlx4_ib_ah *mah;
- struct ib_ah *ah;
+ struct mlx4_ib_ah *mah = to_mah(ah);
+ int ret;
slave_attr.grh.sgid_attr = NULL;
slave_attr.grh.sgid_index = slave_sgid_index;
- ah = mlx4_ib_create_ah(pd, &slave_attr, 0, NULL);
- if (IS_ERR(ah))
- return ah;
+ ret = mlx4_ib_create_ah(ah, &slave_attr, 0, NULL);
+ if (ret)
+ return ret;
- ah->device = pd->device;
- ah->pd = pd;
ah->type = ah_attr->type;
- mah = to_mah(ah);
/* get rid of force-loopback bit */
mah->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
@@ -208,7 +188,7 @@ struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
mah->av.eth.vlan = cpu_to_be16(vlan_tag);
- return ah;
+ return 0;
}
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
@@ -250,8 +230,7 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
return 0;
}
-int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
+void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
{
- kfree(to_mah(ah));
- return 0;
+ return;
}
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 8c79a480f2b7..ecd6cadd529a 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -168,20 +168,17 @@ static void id_map_ent_timeout(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
- struct id_map_entry *db_ent, *found_ent;
+ struct id_map_entry *found_ent;
struct mlx4_ib_dev *dev = ent->dev;
struct mlx4_ib_sriov *sriov = &dev->sriov;
struct rb_root *sl_id_map = &sriov->sl_id_map;
- int pv_id = (int) ent->pv_cm_id;
spin_lock(&sriov->id_map_lock);
- db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
- if (!db_ent)
+ if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
goto out;
found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
if (found_ent && found_ent == ent)
rb_erase(&found_ent->node, sl_id_map);
- idr_remove(&sriov->pv_id_table, pv_id);
out:
list_del(&ent->list);
@@ -196,13 +193,12 @@ static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
struct id_map_entry *ent, *found_ent;
spin_lock(&sriov->id_map_lock);
- ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
+ ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
if (!ent)
goto out;
found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
if (found_ent && found_ent == ent)
rb_erase(&found_ent->node, sl_id_map);
- idr_remove(&sriov->pv_id_table, pv_cm_id);
out:
spin_unlock(&sriov->id_map_lock);
}
@@ -256,25 +252,19 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
ent->dev = to_mdev(ibdev);
INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
- idr_preload(GFP_KERNEL);
- spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
-
- ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
+ ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
+ xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
if (ret >= 0) {
- ent->pv_cm_id = (u32)ret;
+ spin_lock(&sriov->id_map_lock);
sl_id_map_add(ibdev, ent);
list_add_tail(&ent->list, &sriov->cm_list);
- }
-
- spin_unlock(&sriov->id_map_lock);
- idr_preload_end();
-
- if (ret >= 0)
+ spin_unlock(&sriov->id_map_lock);
return ent;
+ }
/*error flow*/
kfree(ent);
- mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
+ mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
return ERR_PTR(-ENOMEM);
}
@@ -290,7 +280,7 @@ id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
if (ent)
*pv_cm_id = (int) ent->pv_cm_id;
} else
- ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
+ ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
spin_unlock(&sriov->id_map_lock);
return ent;
@@ -407,7 +397,7 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
spin_lock_init(&dev->sriov.id_map_lock);
INIT_LIST_HEAD(&dev->sriov.cm_list);
dev->sriov.sl_id_map = RB_ROOT;
- idr_init(&dev->sriov.pv_id_table);
+ xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
}
/* slave = -1 ==> all slaves */
@@ -444,7 +434,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
struct id_map_entry, node);
rb_erase(&ent->node, sl_id_map);
- idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
+ xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
}
list_splice_init(&dev->sriov.cm_list, &lh);
} else {
@@ -460,7 +450,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
/* remove those nodes from databases */
list_for_each_entry_safe(map, tmp_map, &lh, list) {
rb_erase(&map->node, sl_id_map);
- idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
+ xa_erase(&sriov->pv_id_table, map->pv_cm_id);
}
/* add remaining nodes from cm_list */
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 03ac72339dd2..022a0b4ea452 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -38,6 +38,7 @@
#include "mlx4_ib.h"
#include <rdma/mlx4-abi.h>
+#include <rdma/uverbs_ioctl.h>
static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
{
@@ -173,7 +174,6 @@ err_buf:
#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
int entries = attr->cqe;
@@ -183,6 +183,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
struct mlx4_uar *uar;
void *buf_addr;
int err;
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
if (entries < 1 || entries > dev->dev->caps.max_cqes)
return ERR_PTR(-EINVAL);
@@ -204,7 +206,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
INIT_LIST_HEAD(&cq->send_qp_list);
INIT_LIST_HEAD(&cq->recv_qp_list);
- if (context) {
+ if (udata) {
struct mlx4_ib_create_cq ucmd;
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
@@ -218,12 +220,11 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if (err)
goto err_cq;
- err = mlx4_ib_db_map_user(to_mucontext(context), udata,
- ucmd.db_addr, &cq->db);
+ err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
if (err)
goto err_mtt;
- uar = &to_mucontext(context)->uar;
+ uar = &context->uar;
cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
} else {
err = mlx4_db_alloc(dev->dev, &cq->db, 1);
@@ -248,21 +249,21 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if (dev->eq_table)
vector = dev->eq_table[vector % ibdev->num_comp_vectors];
- err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
- cq->db.dma, &cq->mcq, vector, 0,
+ err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
+ &cq->mcq, vector, 0,
!!(cq->create_flags &
IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
- buf_addr, !!context);
+ buf_addr, !!udata);
if (err)
goto err_dbmap;
- if (context)
+ if (udata)
cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
else
cq->mcq.comp = mlx4_ib_cq_comp;
cq->mcq.event = mlx4_ib_cq_event;
- if (context)
+ if (udata)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
err = -EFAULT;
goto err_cq_free;
@@ -274,19 +275,19 @@ err_cq_free:
mlx4_cq_free(dev->dev, &cq->mcq);
err_dbmap:
- if (context)
- mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
+ if (udata)
+ mlx4_ib_db_unmap_user(context, &cq->db);
err_mtt:
mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
- if (context)
+ if (udata)
ib_umem_release(cq->umem);
else
mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
err_db:
- if (!context)
+ if (!udata)
mlx4_db_free(dev->dev, &cq->db);
err_cq:
@@ -485,7 +486,7 @@ out:
return err;
}
-int mlx4_ib_destroy_cq(struct ib_cq *cq)
+int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(cq->device);
struct mlx4_ib_cq *mcq = to_mcq(cq);
@@ -493,8 +494,13 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
mlx4_cq_free(dev->dev, &mcq->mcq);
mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
- if (cq->uobject) {
- mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
+ if (udata) {
+ mlx4_ib_db_unmap_user(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx4_ib_ucontext,
+ ibucontext),
+ &mcq->db);
ib_umem_release(mcq->umem);
} else {
mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 3aab71b29ce8..0f390351cef0 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -31,6 +31,7 @@
*/
#include <linux/slab.h>
+#include <rdma/uverbs_ioctl.h>
#include "mlx4_ib.h"
@@ -41,12 +42,13 @@ struct mlx4_ib_user_db_page {
int refcnt;
};
-int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context,
- struct ib_udata *udata, unsigned long virt,
+int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
struct mlx4_db *db)
{
struct mlx4_ib_user_db_page *page;
int err = 0;
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
mutex_lock(&context->db_page_mutex);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 936ee1314bcd..68c951491a08 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1371,9 +1371,9 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
struct ib_ah *ah;
struct ib_qp *send_qp = NULL;
unsigned wire_tx_ix = 0;
- int ret = 0;
u16 wire_pkey_ix;
int src_qpnum;
+ int ret;
sqp_ctx = dev->sriov.sqps[port-1];
@@ -1393,12 +1393,20 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
send_qp = sqp->qp;
- /* create ah */
- ah = mlx4_ib_create_ah_slave(sqp_ctx->pd, attr,
- rdma_ah_retrieve_grh(attr)->sgid_index,
- s_mac, vlan_id);
- if (IS_ERR(ah))
+ ah = rdma_zalloc_drv_obj(sqp_ctx->pd->device, ib_ah);
+ if (!ah)
return -ENOMEM;
+
+ ah->device = sqp_ctx->pd->device;
+ ah->pd = sqp_ctx->pd;
+
+ /* create ah */
+ ret = mlx4_ib_create_ah_slave(ah, attr,
+ rdma_ah_retrieve_grh(attr)->sgid_index,
+ s_mac, vlan_id);
+ if (ret)
+ goto out;
+
spin_lock(&sqp->tx_lock);
if (sqp->tx_ix_head - sqp->tx_ix_tail >=
(MLX4_NUM_TUNNEL_BUFS - 1))
@@ -1410,8 +1418,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
goto out;
sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
- if (sqp->tx_ring[wire_tx_ix].ah)
- mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
+ kfree(sqp->tx_ring[wire_tx_ix].ah);
sqp->tx_ring[wire_tx_ix].ah = ah;
ib_dma_sync_single_for_cpu(&dev->ib_dev,
sqp->tx_ring[wire_tx_ix].buf.map,
@@ -1450,7 +1457,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
spin_unlock(&sqp->tx_lock);
sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
- mlx4_ib_destroy_ah(ah, 0);
+ kfree(ah);
return ret;
}
@@ -1902,8 +1909,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
if (wc.status == IB_WC_SUCCESS) {
switch (wc.opcode) {
case IB_WC_SEND:
- mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
+ kfree(sqp->tx_ring[wc.wr_id &
+ (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;
spin_lock(&sqp->tx_lock);
@@ -1931,8 +1938,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
" status = %d, wrid = 0x%llx\n",
ctx->slave, wc.status, wc.wr_id);
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
- mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
+ kfree(sqp->tx_ring[wc.wr_id &
+ (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;
spin_lock(&sqp->tx_lock);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 733f7bbd5901..25d09d53b51c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1177,8 +1177,7 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
}
}
-static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct mlx4_ib_pd *pd = to_mpd(ibpd);
struct ib_device *ibdev = ibpd->device;
@@ -1188,20 +1187,19 @@ static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
if (err)
return err;
- if (context && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+ if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
return -EFAULT;
}
return 0;
}
-static void mlx4_ib_dealloc_pd(struct ib_pd *pd)
+static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
}
static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
struct mlx4_ib_xrcd *xrcd;
@@ -1243,7 +1241,7 @@ err1:
return ERR_PTR(err);
}
-static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
ib_destroy_cq(to_mxrcd(xrcd)->cq);
ib_dealloc_pd(to_mxrcd(xrcd)->pd);
@@ -2560,7 +2558,10 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
.req_notify_cq = mlx4_ib_arm_cq,
.rereg_user_mr = mlx4_ib_rereg_user_mr,
.resize_cq = mlx4_ib_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 60dc1347c5ab..26897102057d 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -492,10 +492,11 @@ struct mlx4_ib_sriov {
struct mlx4_sriov_alias_guid alias_guid;
/* CM paravirtualization fields */
- struct list_head cm_list;
+ struct xarray pv_id_table;
+ u32 pv_id_next;
spinlock_t id_map_lock;
struct rb_root sl_id_map;
- struct idr pv_id_table;
+ struct list_head cm_list;
};
struct gid_cache_context {
@@ -722,8 +723,7 @@ static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
-int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context,
- struct ib_udata *udata, unsigned long virt,
+int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
struct mlx4_db *db);
void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
@@ -733,43 +733,38 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
-int mlx4_ib_dereg_mr(struct ib_mr *mr);
+int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
-struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg);
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata);
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata);
-int mlx4_ib_destroy_cq(struct ib_cq *cq);
+int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
-struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- int slave_sgid_index, u8 *s_mac,
- u16 vlan_tag);
+int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+ struct ib_udata *udata);
+int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
+ int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
-int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags);
+void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags);
-struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata);
+int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-int mlx4_ib_destroy_srq(struct ib_srq *srq);
+void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
@@ -777,7 +772,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
-int mlx4_ib_destroy_qp(struct ib_qp *qp);
+int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx4_ib_drain_sq(struct ib_qp *qp);
void mlx4_ib_drain_rq(struct ib_qp *qp);
int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -912,7 +907,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
-int mlx4_ib_destroy_wq(struct ib_wq *wq);
+int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 395379a480cb..355205a28544 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -595,7 +595,7 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
}
}
-int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
+int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
int ret;
@@ -655,9 +655,8 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
return 0;
}
-struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_mr *mr;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 429a59c5801c..5221c0794d1d 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1041,11 +1041,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_mtt;
if (qp_has_rq(init_attr)) {
- err = mlx4_ib_db_map_user(
- context, udata,
- (src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr :
+ err = mlx4_ib_db_map_user(udata,
+ (src == MLX4_IB_QP_SRC) ?
+ ucmd.qp.db_addr :
ucmd.wq.db_addr,
- &qp->db);
+ &qp->db);
if (err)
goto err_mtt;
}
@@ -1338,7 +1338,8 @@ static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
}
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
- enum mlx4_ib_source_type src, bool is_user)
+ enum mlx4_ib_source_type src,
+ struct ib_udata *udata)
{
struct mlx4_ib_cq *send_cq, *recv_cq;
unsigned long flags;
@@ -1380,7 +1381,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
list_del(&qp->qps_list);
list_del(&qp->cq_send_list);
list_del(&qp->cq_recv_list);
- if (!is_user) {
+ if (!udata) {
__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
if (send_cq != recv_cq)
@@ -1398,19 +1399,26 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
else if (src == MLX4_IB_RWQ_SRC)
- mlx4_ib_release_wqn(to_mucontext(
- qp->ibwq.uobject->context), qp, 1);
+ mlx4_ib_release_wqn(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx4_ib_ucontext,
+ ibucontext),
+ qp, 1);
else
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
}
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
- if (is_user) {
+ if (udata) {
if (qp->rq.wqe_cnt) {
- struct mlx4_ib_ucontext *mcontext = !src ?
- to_mucontext(qp->ibqp.uobject->context) :
- to_mucontext(qp->ibwq.uobject->context);
+ struct mlx4_ib_ucontext *mcontext =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx4_ib_ucontext,
+ ibucontext);
+
mlx4_ib_db_unmap_user(mcontext, &qp->db);
}
ib_umem_release(qp->umem);
@@ -1594,7 +1602,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
return ibqp;
}
-static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
+static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(qp->device);
struct mlx4_ib_qp *mqp = to_mqp(qp);
@@ -1615,7 +1623,7 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
if (qp->rwq_ind_tbl) {
destroy_qp_rss(dev, mqp);
} else {
- destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, qp->uobject);
+ destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata);
}
if (is_sqp(dev, mqp))
@@ -1626,7 +1634,7 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
return 0;
}
-int mlx4_ib_destroy_qp(struct ib_qp *qp)
+int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct mlx4_ib_qp *mqp = to_mqp(qp);
@@ -1637,7 +1645,7 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
ib_destroy_qp(sqp->roce_v2_gsi);
}
- return _mlx4_ib_destroy_qp(qp);
+ return _mlx4_ib_destroy_qp(qp, udata);
}
static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
@@ -2240,8 +2248,10 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
if (is_eth) {
gid_attr = attr->ah_attr.grh.sgid_attr;
- vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
- memcpy(smac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ err = rdma_read_gid_l2_fields(gid_attr, &vlan,
+ &smac[0]);
+ if (err)
+ goto out;
}
if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
@@ -3744,12 +3754,6 @@ out:
writel_relaxed(qp->doorbell_qpn,
to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
- /*
- * Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order.
- */
- mmiowb();
-
stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
qp->sq_next_wqe = ind;
@@ -4244,7 +4248,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
return err;
}
-int mlx4_ib_destroy_wq(struct ib_wq *ibwq)
+int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
@@ -4252,7 +4256,7 @@ int mlx4_ib_destroy_wq(struct ib_wq *ibwq)
if (qp->counter_index)
mlx4_ib_free_qp_counter(dev, qp);
- destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, 1);
+ destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
kfree(qp);
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 381cf899bcef..4bf2946b9759 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -69,14 +69,14 @@ static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
}
}
-struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+int mlx4_ib_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ struct mlx4_ib_dev *dev = to_mdev(ib_srq->device);
struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx4_ib_ucontext, ibucontext);
- struct mlx4_ib_srq *srq;
+ struct mlx4_ib_srq *srq = to_msrq(ib_srq);
struct mlx4_wqe_srq_next_seg *next;
struct mlx4_wqe_data_seg *scatter;
u32 cqn;
@@ -89,11 +89,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
/* Sanity check SRQ size before proceeding */
if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
- return ERR_PTR(-EINVAL);
-
- srq = kmalloc(sizeof *srq, GFP_KERNEL);
- if (!srq)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
@@ -111,16 +107,12 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (udata) {
struct mlx4_ib_create_srq ucmd;
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
- err = -EFAULT;
- goto err_srq;
- }
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ return -EFAULT;
srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
- if (IS_ERR(srq->umem)) {
- err = PTR_ERR(srq->umem);
- goto err_srq;
- }
+ if (IS_ERR(srq->umem))
+ return PTR_ERR(srq->umem);
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
srq->umem->page_shift, &srq->mtt);
@@ -131,14 +123,13 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
- err = mlx4_ib_db_map_user(ucontext, udata, ucmd.db_addr,
- &srq->db);
+ err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
if (err)
goto err_mtt;
} else {
err = mlx4_db_alloc(dev->dev, &srq->db, 0);
if (err)
- goto err_srq;
+ return err;
*srq->db.db = 0;
@@ -185,8 +176,8 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
(u16) dev->dev->caps.reserved_xrcds;
- err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
- srq->db.dma, &srq->msrq);
+ err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn,
+ &srq->mtt, srq->db.dma, &srq->msrq);
if (err)
goto err_wrid;
@@ -201,7 +192,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
init_attr->attr.max_wr = srq->msrq.max - 1;
- return &srq->ibsrq;
+ return 0;
err_wrid:
if (udata)
@@ -222,10 +213,7 @@ err_db:
if (!udata)
mlx4_db_free(dev->dev, &srq->db);
-err_srq:
- kfree(srq);
-
- return ERR_PTR(err);
+ return err;
}
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@ -272,7 +260,7 @@ int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
return 0;
}
-int mlx4_ib_destroy_srq(struct ib_srq *srq)
+void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(srq->device);
struct mlx4_ib_srq *msrq = to_msrq(srq);
@@ -280,8 +268,13 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
mlx4_srq_free(dev->dev, &msrq->msrq);
mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
- if (srq->uobject) {
- mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
+ if (udata) {
+ mlx4_ib_db_unmap_user(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx4_ib_ucontext,
+ ibucontext),
+ &msrq->db);
ib_umem_release(msrq->umem);
} else {
kvfree(msrq->wrid);
@@ -289,10 +282,6 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
&msrq->buf);
mlx4_db_free(dev->dev, &msrq->db);
}
-
- kfree(msrq);
-
- return 0;
}
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 420ae0897333..80642dd359bc 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -32,9 +32,8 @@
#include "mlx5_ib.h"
-static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ah *ah,
- struct rdma_ah_attr *ah_attr)
+static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+ struct rdma_ah_attr *ah_attr)
{
enum ib_gid_type gid_type;
@@ -67,21 +66,19 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
}
-
- return &ah->ibah;
}
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
+ u32 flags, struct ib_udata *udata)
{
- struct mlx5_ib_ah *ah;
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_ah *ah = to_mah(ibah);
+ struct mlx5_ib_dev *dev = to_mdev(ibah->device);
enum rdma_ah_attr_type ah_type = ah_attr->type;
if ((ah_type == RDMA_AH_ATTR_TYPE_ROCE) &&
!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && udata) {
int err;
@@ -90,21 +87,18 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
sizeof(resp.dmac);
if (udata->outlen < min_resp_len)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
resp.response_length = min_resp_len;
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
- return ERR_PTR(err);
+ return err;
}
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
-
- return create_ib_ah(dev, ah, ah_attr); /* never fails */
+ create_ib_ah(dev, ah, ah_attr);
+ return 0;
}
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
@@ -131,8 +125,7 @@ int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
return 0;
}
-int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
+void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
{
- kfree(to_mah(ah));
- return 0;
+ return;
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 6bcc63aaa50b..e3ec79b8f7f5 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -82,10 +82,10 @@ int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
}
-int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
- u64 length, u32 alignment)
+int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
+ u64 length, u32 alignment)
{
- struct mlx5_core_dev *dev = memic->dev;
+ struct mlx5_core_dev *dev = dm->dev;
u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
>> PAGE_SHIFT;
u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
@@ -115,17 +115,17 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
mlx5_alignment);
while (page_idx < num_memic_hw_pages) {
- spin_lock(&memic->memic_lock);
- page_idx = bitmap_find_next_zero_area(memic->memic_alloc_pages,
+ spin_lock(&dm->lock);
+ page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
num_memic_hw_pages,
page_idx,
num_pages, 0);
if (page_idx < num_memic_hw_pages)
- bitmap_set(memic->memic_alloc_pages,
+ bitmap_set(dm->memic_alloc_pages,
page_idx, num_pages);
- spin_unlock(&memic->memic_lock);
+ spin_unlock(&dm->lock);
if (page_idx >= num_memic_hw_pages)
break;
@@ -135,10 +135,10 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (ret) {
- spin_lock(&memic->memic_lock);
- bitmap_clear(memic->memic_alloc_pages,
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
page_idx, num_pages);
- spin_unlock(&memic->memic_lock);
+ spin_unlock(&dm->lock);
if (ret == -EAGAIN) {
page_idx++;
@@ -148,7 +148,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
return ret;
}
- *addr = pci_resource_start(dev->pdev, 0) +
+ *addr = dev->bar_addr +
MLX5_GET64(alloc_memic_out, out, memic_start_addr);
return 0;
@@ -157,9 +157,9 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
return -ENOMEM;
}
-int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
+int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
{
- struct mlx5_core_dev *dev = memic->dev;
+ struct mlx5_core_dev *dev = dm->dev;
u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
u32 out[MLX5_ST_SZ_DW(dealloc_memic_out)] = {0};
@@ -167,7 +167,7 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
u64 start_page_idx;
int err;
- addr -= pci_resource_start(dev->pdev, 0);
+ addr -= dev->bar_addr;
start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
@@ -177,15 +177,140 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err) {
- spin_lock(&memic->memic_lock);
- bitmap_clear(memic->memic_alloc_pages,
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
start_page_idx, num_pages);
- spin_unlock(&memic->memic_lock);
+ spin_unlock(&dm->lock);
}
return err;
}
+int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
+ u16 uid, phys_addr_t *addr, u32 *obj_id)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
+ unsigned long *block_map;
+ u64 icm_start_addr;
+ u32 log_icm_size;
+ u32 max_blocks;
+ u64 block_idx;
+ void *sw_icm;
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
+ MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ steering_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
+ block_map = dm->steering_sw_icm_alloc_blocks;
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ header_modify_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_sw_icm_size);
+ block_map = dm->header_modify_sw_icm_alloc_blocks;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+ spin_lock(&dm->lock);
+ block_idx = bitmap_find_next_zero_area(block_map,
+ max_blocks,
+ 0,
+ num_blocks, 0);
+
+ if (block_idx < max_blocks)
+ bitmap_set(block_map,
+ block_idx, num_blocks);
+
+ spin_unlock(&dm->lock);
+
+ if (block_idx >= max_blocks)
+ return -ENOMEM;
+
+ sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
+ icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
+ icm_start_addr);
+ MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ spin_lock(&dm->lock);
+ bitmap_clear(block_map,
+ block_idx, num_blocks);
+ spin_unlock(&dm->lock);
+
+ return ret;
+ }
+
+ *addr = icm_start_addr;
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+
+int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
+ u16 uid, phys_addr_t addr, u32 obj_id)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ unsigned long *block_map;
+ u64 start_idx;
+ int err;
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ start_idx =
+ (addr - MLX5_CAP64_DEV_MEM(
+ dev, steering_sw_icm_start_address)) >>
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ block_map = dm->steering_sw_icm_alloc_blocks;
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ start_idx =
+ (addr -
+ MLX5_CAP64_DEV_MEM(
+ dev, header_modify_sw_icm_start_address)) >>
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ block_map = dm->header_modify_sw_icm_alloc_blocks;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ spin_lock(&dm->lock);
+ bitmap_clear(block_map,
+ start_idx, num_blocks);
+ spin_unlock(&dm->lock);
+
+ return 0;
+}
+
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
{
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 923a7b93f507..0572dcba6eae 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -44,9 +44,9 @@ int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out);
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
void *in, int in_size);
-int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
+int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
u64 length, u32 alignment);
-int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length);
+int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
@@ -65,4 +65,8 @@ int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
+int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
+ u16 uid, phys_addr_t *addr, u32 *obj_id);
+int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
+ u16 uid, phys_addr_t addr, u32 obj_id);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 18704e503508..2e2e65f00257 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -679,8 +679,7 @@ static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
}
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
- struct ib_ucontext *context, struct mlx5_ib_cq *cq,
- int entries, u32 **cqb,
+ struct mlx5_ib_cq *cq, int entries, u32 **cqb,
int *cqe_size, int *index, int *inlen)
{
struct mlx5_ib_create_cq ucmd = {};
@@ -691,6 +690,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int ncont;
void *cqc;
int err;
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
ucmdlen = udata->inlen < sizeof(ucmd) ?
(sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
@@ -715,8 +716,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
return err;
}
- err = mlx5_ib_db_map_user(to_mucontext(context), udata, ucmd.db_addr,
- &cq->db);
+ err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db);
if (err)
goto err_umem;
@@ -740,7 +740,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- *index = to_mucontext(context)->bfregi.sys_pages[0];
+ *index = context->bfregi.sys_pages[0];
if (ucmd.cqe_comp_en == 1) {
int mini_cqe_format;
@@ -782,23 +782,26 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
}
- MLX5_SET(create_cq_in, *cqb, uid, to_mucontext(context)->devx_uid);
+ MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
return 0;
err_cqb:
kvfree(*cqb);
err_db:
- mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
+ mlx5_ib_db_unmap_user(context, &cq->db);
err_umem:
ib_umem_release(cq->buf.umem);
return err;
}
-static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
+static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
{
- mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ mlx5_ib_db_unmap_user(context, &cq->db);
ib_umem_release(cq->buf.umem);
}
@@ -883,7 +886,6 @@ static void notify_soft_wc_handler(struct work_struct *work)
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
int entries = attr->cqe;
@@ -923,9 +925,9 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
INIT_LIST_HEAD(&cq->list_send_qp);
INIT_LIST_HEAD(&cq->list_recv_qp);
- if (context) {
- err = create_cq_user(dev, udata, context, cq, entries,
- &cqb, &cqe_size, &index, &inlen);
+ if (udata) {
+ err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
+ &index, &inlen);
if (err)
goto err_create;
} else {
@@ -962,7 +964,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
cq->mcq.irqn = irqn;
- if (context)
+ if (udata)
cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
else
cq->mcq.comp = mlx5_ib_cq_comp;
@@ -970,7 +972,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
INIT_LIST_HEAD(&cq->wc_list);
- if (context)
+ if (udata)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
err = -EFAULT;
goto err_cmd;
@@ -985,8 +987,8 @@ err_cmd:
err_cqb:
kvfree(cqb);
- if (context)
- destroy_cq_user(cq, context);
+ if (udata)
+ destroy_cq_user(cq, udata);
else
destroy_cq_kernel(dev, cq);
@@ -996,19 +998,14 @@ err_create:
return ERR_PTR(err);
}
-
-int mlx5_ib_destroy_cq(struct ib_cq *cq)
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
- struct ib_ucontext *context = NULL;
-
- if (cq->uobject)
- context = cq->uobject->context;
mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
- if (context)
- destroy_cq_user(mcq, context);
+ if (udata)
+ destroy_cq_user(mcq, udata);
else
destroy_cq_kernel(dev, mcq);
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 9e08df7914aa..80b42d069328 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -85,6 +85,10 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
if (is_user && capable(CAP_NET_RAW) &&
(MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
cap |= MLX5_UCTX_CAP_RAW_TX;
+ if (is_user && capable(CAP_SYS_RAWIO) &&
+ (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
+ MLX5_UCTX_CAP_INTERNAL_DEV_RES))
+ cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
MLX5_SET(uctx, uctx, cap, cap);
@@ -150,7 +154,7 @@ bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
* must be considered upon checking for a valid object id.
* For that the opcode of the creator command is encoded as part of the obj_id.
*/
-static u64 get_enc_obj_id(u16 opcode, u32 obj_id)
+static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
{
return ((u64)opcode << 32) | obj_id;
}
@@ -163,7 +167,9 @@ static u64 devx_get_obj_id(const void *in)
switch (opcode) {
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
- obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT,
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_type) << 16,
MLX5_GET(general_obj_in_cmd_hdr, in,
obj_id));
break;
@@ -373,8 +379,10 @@ static u64 devx_get_obj_id(const void *in)
return obj_id;
}
-static bool devx_is_valid_obj_id(struct ib_uobject *uobj, const void *in)
+static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
+ struct ib_uobject *uobj, const void *in)
{
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
u64 obj_id = devx_get_obj_id(in);
if (!obj_id)
@@ -389,7 +397,6 @@ static bool devx_is_valid_obj_id(struct ib_uobject *uobj, const void *in)
case UVERBS_OBJECT_SRQ:
{
struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
- struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
u16 opcode;
switch (srq->common.res) {
@@ -681,6 +688,7 @@ static bool devx_is_whitelist_cmd(void *in)
switch (opcode) {
case MLX5_CMD_OP_QUERY_HCA_CAP:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
return true;
default:
return false;
@@ -718,6 +726,7 @@ static bool devx_is_general_cmd(void *in)
switch (opcode) {
case MLX5_CMD_OP_QUERY_HCA_CAP:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_VPORT_STATE:
case MLX5_CMD_OP_QUERY_ADAPTER:
case MLX5_CMD_OP_QUERY_ISSI:
@@ -1117,7 +1126,8 @@ static void devx_cleanup_mkey(struct devx_obj *obj)
}
static int devx_obj_cleanup(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj = uobject->object;
@@ -1135,7 +1145,8 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
return ret;
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
- struct mlx5_ib_dev *dev = to_mdev(uobject->context->device);
+ struct mlx5_ib_dev *dev =
+ mlx5_udata_to_mdev(&attrs->driver_udata);
call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
devx_free_indirect_mkey);
@@ -1162,6 +1173,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj;
+ u16 obj_type = 0;
int err;
int uid;
u32 obj_id;
@@ -1221,7 +1233,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (err)
goto err_copy;
- obj->obj_id = get_enc_obj_id(opcode, obj_id);
+ if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
+ obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
+
+ obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
+
return 0;
err_copy:
@@ -1260,7 +1276,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
if (!devx_is_obj_modify_cmd(cmd_in))
return -EINVAL;
- if (!devx_is_valid_obj_id(uobj, cmd_in))
+ if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
return -EINVAL;
cmd_out = uverbs_zalloc(attrs, cmd_out_len);
@@ -1302,7 +1318,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
if (!devx_is_obj_query_cmd(cmd_in))
return -EINVAL;
- if (!devx_is_valid_obj_id(uobj, cmd_in))
+ if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
return -EINVAL;
cmd_out = uverbs_zalloc(attrs, cmd_out_len);
@@ -1350,7 +1366,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
- struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
ev_file = container_of(uobj, struct devx_async_cmd_event_file,
uobj);
@@ -1412,7 +1428,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
if (err)
return err;
- if (!devx_is_valid_obj_id(uobj, cmd_in))
+ if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
return -EINVAL;
fd_uobj = uverbs_attr_get_uobject(attrs,
@@ -1599,7 +1615,8 @@ err_obj_free:
}
static int devx_umem_cleanup(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct devx_umem *obj = uobject->object;
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
@@ -1704,7 +1721,7 @@ static __poll_t devx_async_cmd_event_poll(struct file *filp,
return pollflags;
}
-const struct file_operations devx_async_cmd_event_fops = {
+static const struct file_operations devx_async_cmd_event_fops = {
.owner = THIS_MODULE,
.read = devx_async_cmd_event_read,
.poll = devx_async_cmd_event_poll,
@@ -1900,7 +1917,7 @@ static bool devx_is_supported(struct ib_device *device)
{
struct mlx5_ib_dev *dev = to_mdev(device);
- return !dev->rep && MLX5_CAP_GEN(dev->mdev, log_max_uctx);
+ return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
}
const struct uapi_definition mlx5_ib_devx_defs[] = {
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index 798591a18484..1fc302d41a53 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -29,6 +29,9 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
*namespace = MLX5_FLOW_NAMESPACE_EGRESS;
break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
+ *namespace = MLX5_FLOW_NAMESPACE_FDB;
+ break;
default:
return -EINVAL;
}
@@ -75,7 +78,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct ib_qp *qp = NULL;
struct ib_uobject *uobj =
uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
- struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
int len, ret, i;
u32 counter_id = 0;
@@ -93,6 +96,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
return -EINVAL;
+ /* Allow only DEVX object as dest when inserting to FDB */
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx)
+ return -EINVAL;
+
if (dest_devx) {
devx_obj = uverbs_attr_get_obj(
attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
@@ -104,6 +111,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
*/
if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
return -EINVAL;
+ /* Allow only flow table as dest when inserting to FDB */
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ return -EINVAL;
} else if (dest_qp) {
struct mlx5_ib_qp *mqp;
@@ -189,7 +200,8 @@ err_out:
}
static int flow_matcher_cleanup(struct ib_uobject *uobject,
- enum rdma_remove_reason why)
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_flow_matcher *obj = uobject->object;
int ret;
@@ -202,21 +214,67 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject,
return 0;
}
+static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
+ struct mlx5_ib_flow_matcher *obj)
+{
+ enum mlx5_ib_uapi_flow_table_type ft_type =
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX;
+ u32 flags;
+ int err;
+
+ /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older
+ * users should switch to it. We leave this to not break userspace
+ */
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) &&
+ uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS))
+ return -EINVAL;
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) {
+ err = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE);
+ if (err)
+ return err;
+
+ err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) {
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ IB_FLOW_ATTR_FLAGS_EGRESS);
+ if (err)
+ return err;
+
+ if (flags) {
+ mlx5_ib_ft_type_to_namespace(
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
+ &obj->ns_type);
+ return 0;
+ }
+ }
+
+ obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
+
+ return 0;
+}
+
static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
- struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
struct mlx5_ib_flow_matcher *obj;
- u32 flags;
int err;
obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
if (!obj)
return -ENOMEM;
- obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
obj->mask_len = uverbs_attr_get_len(
attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
err = uverbs_copy_from(&obj->matcher_mask,
@@ -242,19 +300,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
if (err)
goto end;
- err = uverbs_get_flags32(&flags, attrs,
- MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
- IB_FLOW_ATTR_FLAGS_EGRESS);
+ err = mlx5_ib_matcher_ns(attrs, obj);
if (err)
goto end;
- if (flags) {
- err = mlx5_ib_ft_type_to_namespace(
- MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX, &obj->ns_type);
- if (err)
- goto end;
- }
-
uobj->object = obj;
obj->mdev = dev->mdev;
atomic_set(&obj->usecnt, 0);
@@ -326,7 +375,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
- struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
enum mlx5_ib_uapi_flow_table_type ft_type;
struct ib_flow_action *action;
int num_actions;
@@ -353,7 +402,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
if (IS_ERR(action))
return PTR_ERR(action);
- uverbs_flow_action_fill_action(action, uobj, uobj->context->device,
+ uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev,
IB_FLOW_ACTION_UNSPECIFIED);
return 0;
@@ -445,7 +494,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
- struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
enum mlx5_ib_uapi_flow_table_type ft_type;
struct mlx5_ib_flow_action *maction;
@@ -493,8 +542,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
goto free_maction;
}
- uverbs_flow_action_fill_action(&maction->ib_action, uobj,
- uobj->context->device,
+ uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev,
IB_FLOW_ACTION_UNSPECIFIED);
return 0;
@@ -605,6 +653,9 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY),
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
enum ib_flow_flags,
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
@@ -619,15 +670,9 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
-static bool flow_is_supported(struct ib_device *device)
-{
- return !to_mdev(device)->rep;
-}
-
const struct uapi_definition mlx5_ib_flow_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
- MLX5_IB_OBJECT_FLOW_MATCHER,
- UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
+ MLX5_IB_OBJECT_FLOW_MATCHER),
UAPI_DEF_CHAIN_OBJ_TREE(
UVERBS_OBJECT_FLOW,
&mlx5_ib_fs),
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index b8639ac71336..cbcc40d776b9 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -7,69 +7,59 @@
#include "ib_rep.h"
#include "srq.h"
-static const struct mlx5_ib_profile vf_rep_profile = {
- STAGE_CREATE(MLX5_IB_STAGE_INIT,
- mlx5_ib_stage_init_init,
- mlx5_ib_stage_init_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
- mlx5_ib_stage_rep_flow_db_init,
- NULL),
- STAGE_CREATE(MLX5_IB_STAGE_CAPS,
- mlx5_ib_stage_caps_init,
- NULL),
- STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
- mlx5_ib_stage_rep_non_default_cb,
- NULL),
- STAGE_CREATE(MLX5_IB_STAGE_ROCE,
- mlx5_ib_stage_rep_roce_init,
- mlx5_ib_stage_rep_roce_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_SRQ,
- mlx5_init_srq_table,
- mlx5_cleanup_srq_table),
- STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
- mlx5_ib_stage_dev_res_init,
- mlx5_ib_stage_dev_res_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
- mlx5_ib_stage_counters_init,
- mlx5_ib_stage_counters_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_BFREG,
- mlx5_ib_stage_bfrag_init,
- mlx5_ib_stage_bfrag_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
- NULL,
- mlx5_ib_stage_pre_ib_reg_umr_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
- mlx5_ib_stage_ib_reg_init,
- mlx5_ib_stage_ib_reg_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
- mlx5_ib_stage_post_ib_reg_umr_init,
- NULL),
-};
+static int
+mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_ib_dev *ibdev;
+ int vport_index;
+
+ ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch);
+ vport_index = ibdev->free_port++;
+
+ ibdev->port[vport_index].rep = rep;
+ write_lock(&ibdev->port[vport_index].roce.netdev_lock);
+ ibdev->port[vport_index].roce.netdev =
+ mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
+ write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
+
+ return 0;
+}
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
+ int num_ports = MLX5_TOTAL_VPORTS(dev);
const struct mlx5_ib_profile *profile;
struct mlx5_ib_dev *ibdev;
+ int vport_index;
if (rep->vport == MLX5_VPORT_UPLINK)
profile = &uplink_rep_profile;
else
- profile = &vf_rep_profile;
+ return mlx5_ib_set_vport_rep(dev, rep);
ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
if (!ibdev)
return -ENOMEM;
- ibdev->rep = rep;
- ibdev->mdev = dev;
- ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
- MLX5_CAP_GEN(dev, num_vhca_ports));
- if (!__mlx5_ib_add(ibdev, profile)) {
+ ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
+ GFP_KERNEL);
+ if (!ibdev->port) {
ib_dealloc_device(&ibdev->ib_dev);
- return -EINVAL;
+ return -ENOMEM;
}
+ ibdev->is_rep = true;
+ vport_index = ibdev->free_port++;
+ ibdev->port[vport_index].rep = rep;
+ ibdev->port[vport_index].roce.netdev =
+ mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
+ ibdev->mdev = dev;
+ ibdev->num_ports = num_ports;
+
+ if (!__mlx5_ib_add(ibdev, profile))
+ return -EINVAL;
+
rep->rep_if[REP_IB].priv = ibdev;
return 0;
@@ -80,13 +70,13 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
struct mlx5_ib_dev *dev;
- if (!rep->rep_if[REP_IB].priv)
+ if (!rep->rep_if[REP_IB].priv ||
+ rep->vport != MLX5_VPORT_UPLINK)
return;
dev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
rep->rep_if[REP_IB].priv = NULL;
- ib_dealloc_device(&dev->ib_dev);
}
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
@@ -140,22 +130,21 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
return mlx5_eswitch_vport_rep(esw, vport);
}
-int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
- struct mlx5_ib_sq *sq)
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u16 port)
{
- struct mlx5_flow_handle *flow_rule;
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep;
- if (!dev->rep)
- return 0;
+ if (!dev->is_rep || !port)
+ return NULL;
- flow_rule =
- mlx5_eswitch_add_send_to_vport_rule(esw,
- dev->rep->vport,
- sq->base.mqp.qpn);
- if (IS_ERR(flow_rule))
- return PTR_ERR(flow_rule);
- sq->flow_rule = flow_rule;
+ if (!dev->port[port - 1].rep)
+ return ERR_PTR(-EINVAL);
- return 0;
+ rep = dev->port[port - 1].rep;
+
+ return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport,
+ sq->base.mqp.qpn);
}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index 798d41e61fb4..1d9778da8a50 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -20,8 +20,9 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
int vport_index);
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev);
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev);
-int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
- struct mlx5_ib_sq *sq);
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u16 port);
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
int vport_index);
#else /* CONFIG_MLX5_ESWITCH */
@@ -52,10 +53,12 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {}
static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {}
-static inline int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
- struct mlx5_ib_sq *sq)
+static inline
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u16 port)
{
- return 0;
+ return NULL;
}
static inline
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 531ff20b32ad..abac70ad5c7c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -156,6 +156,34 @@ static int get_port_state(struct ib_device *ibdev,
return ret;
}
+static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
+ struct net_device *ndev,
+ u8 *port_num)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ struct net_device *rep_ndev;
+ struct mlx5_ib_port *port;
+ int i;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ port = &dev->port[i];
+ if (!port->rep)
+ continue;
+
+ read_lock(&port->roce.netdev_lock);
+ rep_ndev = mlx5_ib_get_rep_netdev(esw,
+ port->rep->vport);
+ if (rep_ndev == ndev) {
+ read_unlock(&port->roce.netdev_lock);
+ *port_num = i + 1;
+ return &port->roce;
+ }
+ read_unlock(&port->roce.netdev_lock);
+ }
+
+ return NULL;
+}
+
static int mlx5_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -172,22 +200,17 @@ static int mlx5_netdev_event(struct notifier_block *this,
switch (event) {
case NETDEV_REGISTER:
+ /* Should already be registered during the load */
+ if (ibdev->is_rep)
+ break;
write_lock(&roce->netdev_lock);
- if (ibdev->rep) {
- struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
- struct net_device *rep_ndev;
-
- rep_ndev = mlx5_ib_get_rep_netdev(esw,
- ibdev->rep->vport);
- if (rep_ndev == ndev)
- roce->netdev = ndev;
- } else if (ndev->dev.parent == &mdev->pdev->dev) {
+ if (ndev->dev.parent == mdev->device)
roce->netdev = ndev;
- }
write_unlock(&roce->netdev_lock);
break;
case NETDEV_UNREGISTER:
+ /* In case of reps, ib device goes away before the netdevs */
write_lock(&roce->netdev_lock);
if (roce->netdev == ndev)
roce->netdev = NULL;
@@ -205,6 +228,10 @@ static int mlx5_netdev_event(struct notifier_block *this,
dev_put(lag_ndev);
}
+ if (ibdev->is_rep)
+ roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
+ if (!roce)
+ return NOTIFY_DONE;
if ((upper == ndev || (!upper && ndev == roce->netdev))
&& ibdev->ib_active) {
struct ib_event ibev = { };
@@ -257,11 +284,11 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
/* Ensure ndev does not disappear before we invoke dev_hold()
*/
- read_lock(&ibdev->roce[port_num - 1].netdev_lock);
- ndev = ibdev->roce[port_num - 1].netdev;
+ read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
+ ndev = ibdev->port[port_num - 1].roce.netdev;
if (ndev)
dev_hold(ndev);
- read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
+ read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
out:
mlx5_ib_put_native_port_mdev(ibdev, port_num);
@@ -479,9 +506,14 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
+ * Use native port in case of reps
*/
- err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
- mdev_port_num);
+ if (dev->is_rep)
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+ 1);
+ else
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+ mdev_port_num);
if (err)
goto out;
ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
@@ -542,52 +574,22 @@ out:
return err;
}
-struct mlx5_ib_vlan_info {
- u16 vlan_id;
- bool vlan;
-};
-
-static int get_lower_dev_vlan(struct net_device *lower_dev, void *data)
-{
- struct mlx5_ib_vlan_info *vlan_info = data;
-
- if (is_vlan_dev(lower_dev)) {
- vlan_info->vlan = true;
- vlan_info->vlan_id = vlan_dev_vlan_id(lower_dev);
- }
- /* We are interested only in first level vlan device, so
- * always return 1 to stop iterating over next level devices.
- */
- return 1;
-}
-
static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
enum ib_gid_type gid_type = IB_GID_TYPE_IB;
- struct mlx5_ib_vlan_info vlan_info = { };
+ u16 vlan_id = 0xffff;
u8 roce_version = 0;
u8 roce_l3_type = 0;
u8 mac[ETH_ALEN];
+ int ret;
if (gid) {
gid_type = attr->gid_type;
- ether_addr_copy(mac, attr->ndev->dev_addr);
-
- if (is_vlan_dev(attr->ndev)) {
- vlan_info.vlan = true;
- vlan_info.vlan_id = vlan_dev_vlan_id(attr->ndev);
- } else {
- /* If the netdev is upper device and if it's lower
- * lower device is vlan device, consider vlan id of
- * the lower vlan device for this gid entry.
- */
- rcu_read_lock();
- netdev_walk_all_lower_dev_rcu(attr->ndev,
- get_lower_dev_vlan, &vlan_info);
- rcu_read_unlock();
- }
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+ if (ret)
+ return ret;
}
switch (gid_type) {
@@ -608,7 +610,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
roce_l3_type, gid->raw, mac,
- vlan_info.vlan, vlan_info.vlan_id,
+ vlan_id < VLAN_CFI_MASK, vlan_id,
port_num);
}
@@ -1119,6 +1121,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, qp_packet_based))
resp.flags |=
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
+
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
}
if (field_avail(typeof(resp), sw_parsing_caps,
@@ -1405,7 +1409,9 @@ static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
{
int ret;
- /* Only link layer == ethernet is valid for representors */
+ /* Only link layer == ethernet is valid for representors
+ * and we always use port 1
+ */
ret = mlx5_query_port_roce(ibdev, port, props);
if (ret || !props)
return ret;
@@ -1952,11 +1958,11 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
print_lib_caps(dev, context->lib_caps);
if (dev->lag_active) {
- u8 port = mlx5_core_native_port_num(dev->mdev);
+ u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity,
atomic_add_return(
- 1, &dev->roce[port].tx_port_affinity));
+ 1, &dev->port[port].roce.tx_port_affinity));
}
return 0;
@@ -2009,7 +2015,7 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
- return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
+ return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
static int get_command(unsigned long offset)
@@ -2058,20 +2064,22 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
{
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
+ !(vma->vm_flags & VM_SHARED))
return -EINVAL;
if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
return -EOPNOTSUPP;
- if (vma->vm_flags & VM_WRITE)
+ if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
+ vma->vm_flags &= ~VM_MAYWRITE;
- if (!dev->mdev->clock_info_page)
+ if (!dev->mdev->clock_info)
return -EOPNOTSUPP;
- return rdma_user_mmap_page(&context->ibucontext, vma,
- dev->mdev->clock_info_page, PAGE_SIZE);
+ return vm_insert_page(vma, vma->vm_start,
+ virt_to_page(dev->mdev->clock_info));
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
@@ -2199,7 +2207,7 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
page_idx + npages)
return -EINVAL;
- pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
+ pfn = ((dev->mdev->bar_addr +
MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
PAGE_SHIFT) +
page_idx;
@@ -2231,19 +2239,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (vma->vm_flags & VM_WRITE)
return -EPERM;
+ vma->vm_flags &= ~VM_MAYWRITE;
/* Don't expose to user-space information it shouldn't have */
if (PAGE_SIZE > 4096)
return -EOPNOTSUPP;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pfn = (dev->mdev->iseg_base +
offsetof(struct mlx5_init_seg, internal_timer_h)) >>
PAGE_SHIFT;
- if (io_remap_pfn_range(vma, vma->vm_start, pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
- break;
+ return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
@@ -2257,89 +2264,200 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
return 0;
}
-struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs)
+static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
+ u32 type)
{
- u64 act_size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
- struct mlx5_memic *memic = &to_mdev(ibdev)->memic;
- phys_addr_t memic_addr;
- struct mlx5_ib_dm *dm;
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
+ return -EOPNOTSUPP;
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ if (!capable(CAP_SYS_RAWIO) ||
+ !capable(CAP_NET_RAW))
+ return -EPERM;
+
+ if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
+ return -EOPNOTSUPP;
+ break;
+ }
+
+ return 0;
+}
+
+static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
+ struct mlx5_ib_dm *dm,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
u64 start_offset;
u32 page_idx;
int err;
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return ERR_PTR(-ENOMEM);
-
- mlx5_ib_dbg(to_mdev(ibdev), "alloc_memic req: user_length=0x%llx act_length=0x%llx log_alignment=%d\n",
- attr->length, act_size, attr->alignment);
+ dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
- err = mlx5_cmd_alloc_memic(memic, &memic_addr,
- act_size, attr->alignment);
+ err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
+ dm->size, attr->alignment);
if (err)
- goto err_free;
+ return err;
- start_offset = memic_addr & ~PAGE_MASK;
- page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) -
- MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
+ page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) -
+ MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >>
PAGE_SHIFT;
err = uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ goto err_dealloc;
+
+ start_offset = dm->dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs,
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
&start_offset, sizeof(start_offset));
if (err)
goto err_dealloc;
+ bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
+ DIV_ROUND_UP(dm->size, PAGE_SIZE));
+
+ return 0;
+
+err_dealloc:
+ mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
+
+ return err;
+}
+
+static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
+ struct mlx5_ib_dm *dm,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs,
+ int type)
+{
+ struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+ u64 act_size;
+ int err;
+
+ /* Allocation size must a multiple of the basic block size
+ * and a power of 2.
+ */
+ act_size = roundup(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
+ act_size = roundup_pow_of_two(act_size);
+
+ dm->size = act_size;
+ err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size,
+ to_mucontext(ctx)->devx_uid, &dm->dev_addr,
+ &dm->icm_dm.obj_id);
+ if (err)
+ return err;
+
err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- &page_idx, sizeof(page_idx));
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &dm->dev_addr, sizeof(dm->dev_addr));
if (err)
- goto err_dealloc;
+ mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size,
+ to_mucontext(ctx)->devx_uid,
+ dm->dev_addr, dm->icm_dm.obj_id);
- bitmap_set(to_mucontext(context)->dm_pages, page_idx,
- DIV_ROUND_UP(act_size, PAGE_SIZE));
+ return err;
+}
- dm->dev_addr = memic_addr;
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dm *dm;
+ enum mlx5_ib_uapi_dm_type type;
+ int err;
+
+ err = uverbs_get_const_default(&type, attrs,
+ MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ MLX5_IB_UAPI_DM_TYPE_MEMIC);
+ if (err)
+ return ERR_PTR(err);
+
+ mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
+ type, attr->length, attr->alignment);
+
+ err = check_dm_type_support(to_mdev(ibdev), type);
+ if (err)
+ return ERR_PTR(err);
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->type = type;
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ err = handle_alloc_dm_memic(context, dm,
+ attr,
+ attrs);
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (err)
+ goto err_free;
return &dm->ibdm;
-err_dealloc:
- mlx5_cmd_dealloc_memic(memic, memic_addr,
- act_size);
err_free:
kfree(dm);
return ERR_PTR(err);
}
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
+int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
{
- struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic;
+ struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
struct mlx5_ib_dm *dm = to_mdm(ibdm);
- u64 act_size = roundup(dm->ibdm.length, MLX5_MEMIC_BASE_SIZE);
u32 page_idx;
int ret;
- ret = mlx5_cmd_dealloc_memic(memic, dm->dev_addr, act_size);
- if (ret)
- return ret;
+ switch (dm->type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
+ if (ret)
+ return ret;
- page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
- MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
- PAGE_SHIFT;
- bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
- page_idx,
- DIV_ROUND_UP(act_size, PAGE_SIZE));
+ page_idx = (dm->dev_addr -
+ pci_resource_start(dm_db->dev->pdev, 0) -
+ MLX5_CAP64_DEV_MEM(dm_db->dev,
+ memic_bar_start_addr)) >>
+ PAGE_SHIFT;
+ bitmap_clear(ctx->dm_pages, page_idx,
+ DIV_ROUND_UP(dm->size, PAGE_SIZE));
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size,
+ ctx->devx_uid, dm->dev_addr,
+ dm->icm_dm.obj_id);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
kfree(dm);
return 0;
}
-static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct mlx5_ib_pd *pd = to_mpd(ibpd);
struct ib_device *ibdev = ibpd->device;
@@ -2348,8 +2466,10 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
u16 uid = 0;
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
- uid = context ? to_mucontext(context)->devx_uid : 0;
+ uid = context ? context->devx_uid : 0;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid);
err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
@@ -2359,7 +2479,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
pd->uid = uid;
- if (context) {
+ if (udata) {
resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
@@ -2370,7 +2490,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return 0;
}
-static void mlx5_ib_dealloc_pd(struct ib_pd *pd)
+static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
@@ -3149,10 +3269,10 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
if (ft_type == MLX5_IB_FT_RX) {
fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
prio = &dev->flow_db->prios[priority];
- if (!dev->rep &&
+ if (!dev->is_rep &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
- if (!dev->rep &&
+ if (!dev->is_rep &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
reformat_l3_tunnel_to_l2))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
@@ -3162,7 +3282,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
log_max_ft_size));
fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
prio = &dev->flow_db->egress_prios[priority];
- if (!dev->rep &&
+ if (!dev->is_rep &&
MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
}
@@ -3195,12 +3315,11 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
if (!ns)
return ERR_PTR(-ENOTSUPP);
- if (num_entries > max_table_size)
- return ERR_PTR(-ENOMEM);
+ max_table_size = min_t(int, num_entries, max_table_size);
ft = prio->flow_table;
if (!ft)
- return _get_prio(ns, prio, priority, num_entries, num_groups,
+ return _get_prio(ns, prio, priority, max_table_size, num_groups,
flags);
return prio;
@@ -3368,7 +3487,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!is_valid_attr(dev->mdev, flow_attr))
return ERR_PTR(-EINVAL);
- if (dev->rep && is_egress)
+ if (dev->is_rep && is_egress)
return ERR_PTR(-EINVAL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
@@ -3399,13 +3518,17 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!flow_is_multicast_only(flow_attr))
set_underlay_qp(dev, spec, underlay_qpn);
- if (dev->rep) {
+ if (dev->is_rep) {
void *misc;
+ if (!dev->port[flow_attr->port - 1].rep) {
+ err = -EINVAL;
+ goto free;
+ }
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port,
- dev->rep->vport);
+ dev->port[flow_attr->port - 1].rep->vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -3767,11 +3890,16 @@ _get_flow_table(struct mlx5_ib_dev *dev,
bool mcast)
{
struct mlx5_flow_namespace *ns = NULL;
- struct mlx5_ib_flow_prio *prio;
- int max_table_size;
+ struct mlx5_ib_flow_prio *prio = NULL;
+ int max_table_size = 0;
u32 flags = 0;
int priority;
+ if (mcast)
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
@@ -3780,20 +3908,18 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
reformat_l3_tunnel_to_l2))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
- max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
- log_max_ft_size));
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ max_table_size = BIT(
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
+ priority = FDB_BYPASS_PATH;
}
- if (max_table_size < MLX5_FS_MAX_ENTRIES)
- return ERR_PTR(-ENOMEM);
-
- if (mcast)
- priority = MLX5_IB_FLOW_MCAST_PRIO;
- else
- priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+ max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
if (!ns)
@@ -3801,13 +3927,18 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
prio = &dev->flow_db->prios[priority];
- else
+ else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
prio = &dev->flow_db->egress_prios[priority];
+ else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ prio = &dev->flow_db->fdb;
+
+ if (!prio)
+ return ERR_PTR(-EINVAL);
if (prio->flow_table)
return prio;
- return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
+ return _get_prio(ns, prio, priority, max_table_size,
MLX5_FS_MAX_TYPES, flags);
}
@@ -4354,9 +4485,13 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
+ u8 port = (eqe->data.port.port >> 4) & 0xf;
+
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
- schedule_work(&ibdev->delay_drop.delay_drop_work);
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ schedule_work(&ibdev->delay_drop.delay_drop_work);
break;
default: /* do nothing */
return;
@@ -4503,7 +4638,7 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= dev->num_ports; port++) {
+ for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
dev->mdev->port_caps[port - 1].has_smi = false;
if (MLX5_CAP_GEN(dev->mdev, port_type) ==
MLX5_CAP_PORT_TYPE_IB) {
@@ -4534,7 +4669,7 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
mlx5_query_ext_port_caps(dev, port);
}
-static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
+static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
@@ -4549,10 +4684,6 @@ static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
if (!dprops)
goto out;
- err = set_has_smi_cap(dev);
- if (err)
- goto out;
-
err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
if (err) {
mlx5_ib_warn(dev, "query_device failed %d\n", err);
@@ -4581,6 +4712,16 @@ out:
return err;
}
+static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
+{
+ /* For representors use port 1, is this is the only native
+ * port
+ */
+ if (dev->is_rep)
+ return __get_port_caps(dev, 1);
+ return __get_port_caps(dev, port);
+}
+
static void destroy_umrc_res(struct mlx5_ib_dev *dev)
{
int err;
@@ -4590,7 +4731,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
if (dev->umrc.qp)
- mlx5_ib_destroy_qp(dev->umrc.qp);
+ mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
if (dev->umrc.cq)
ib_free_cq(dev->umrc.cq);
if (dev->umrc.pd)
@@ -4695,7 +4836,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
return 0;
error_4:
- mlx5_ib_destroy_qp(qp);
+ mlx5_ib_destroy_qp(qp, NULL);
dev->umrc.qp = NULL;
error_3:
@@ -4746,11 +4887,11 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
devr->p0->uobject = NULL;
atomic_set(&devr->p0->usecnt, 0);
- ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL);
+ ret = mlx5_ib_alloc_pd(devr->p0, NULL);
if (ret)
goto error0;
- devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
+ devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL);
if (IS_ERR(devr->c0)) {
ret = PTR_ERR(devr->c0);
goto error1;
@@ -4762,7 +4903,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
devr->c0->cq_context = NULL;
atomic_set(&devr->c0->usecnt, 0);
- devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
+ devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
if (IS_ERR(devr->x0)) {
ret = PTR_ERR(devr->x0);
goto error2;
@@ -4773,7 +4914,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
mutex_init(&devr->x0->tgt_qp_mutex);
INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
- devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
+ devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
if (IS_ERR(devr->x1)) {
ret = PTR_ERR(devr->x1);
goto error3;
@@ -4791,19 +4932,21 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
attr.ext.cq = devr->c0;
attr.ext.xrc.xrcd = devr->x0;
- devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
- if (IS_ERR(devr->s0)) {
- ret = PTR_ERR(devr->s0);
+ devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
+ if (!devr->s0) {
+ ret = -ENOMEM;
goto error4;
}
+
devr->s0->device = &dev->ib_dev;
devr->s0->pd = devr->p0;
- devr->s0->uobject = NULL;
- devr->s0->event_handler = NULL;
- devr->s0->srq_context = NULL;
devr->s0->srq_type = IB_SRQT_XRC;
devr->s0->ext.xrc.xrcd = devr->x0;
devr->s0->ext.cq = devr->c0;
+ ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
+ if (ret)
+ goto err_create;
+
atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
atomic_inc(&devr->s0->ext.cq->usecnt);
atomic_inc(&devr->p0->usecnt);
@@ -4813,18 +4956,21 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
attr.attr.max_sge = 1;
attr.attr.max_wr = 1;
attr.srq_type = IB_SRQT_BASIC;
- devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
- if (IS_ERR(devr->s1)) {
- ret = PTR_ERR(devr->s1);
+ devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
+ if (!devr->s1) {
+ ret = -ENOMEM;
goto error5;
}
+
devr->s1->device = &dev->ib_dev;
devr->s1->pd = devr->p0;
- devr->s1->uobject = NULL;
- devr->s1->event_handler = NULL;
- devr->s1->srq_context = NULL;
devr->s1->srq_type = IB_SRQT_BASIC;
devr->s1->ext.cq = devr->c0;
+
+ ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
+ if (ret)
+ goto error6;
+
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s1->usecnt, 0);
@@ -4836,16 +4982,20 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
return 0;
+error6:
+ kfree(devr->s1);
error5:
- mlx5_ib_destroy_srq(devr->s0);
+ mlx5_ib_destroy_srq(devr->s0, NULL);
+err_create:
+ kfree(devr->s0);
error4:
- mlx5_ib_dealloc_xrcd(devr->x1);
+ mlx5_ib_dealloc_xrcd(devr->x1, NULL);
error3:
- mlx5_ib_dealloc_xrcd(devr->x0);
+ mlx5_ib_dealloc_xrcd(devr->x0, NULL);
error2:
- mlx5_ib_destroy_cq(devr->c0);
+ mlx5_ib_destroy_cq(devr->c0, NULL);
error1:
- mlx5_ib_dealloc_pd(devr->p0);
+ mlx5_ib_dealloc_pd(devr->p0, NULL);
error0:
kfree(devr->p0);
return ret;
@@ -4853,20 +5003,20 @@ error0:
static void destroy_dev_resources(struct mlx5_ib_resources *devr)
{
- struct mlx5_ib_dev *dev =
- container_of(devr, struct mlx5_ib_dev, devr);
int port;
- mlx5_ib_destroy_srq(devr->s1);
- mlx5_ib_destroy_srq(devr->s0);
- mlx5_ib_dealloc_xrcd(devr->x0);
- mlx5_ib_dealloc_xrcd(devr->x1);
- mlx5_ib_destroy_cq(devr->c0);
- mlx5_ib_dealloc_pd(devr->p0);
+ mlx5_ib_destroy_srq(devr->s1, NULL);
+ kfree(devr->s1);
+ mlx5_ib_destroy_srq(devr->s0, NULL);
+ kfree(devr->s0);
+ mlx5_ib_dealloc_xrcd(devr->x0, NULL);
+ mlx5_ib_dealloc_xrcd(devr->x1, NULL);
+ mlx5_ib_destroy_cq(devr->c0, NULL);
+ mlx5_ib_dealloc_pd(devr->p0, NULL);
kfree(devr->p0);
/* Make sure no change P_Key work items are still executing */
- for (port = 0; port < dev->num_ports; ++port)
+ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
cancel_work_sync(&devr->ports[port].pkey_change_work);
}
@@ -5009,10 +5159,10 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->roce[port_num].nb);
+ dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
- dev->roce[port_num].nb.notifier_call = NULL;
+ dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
}
@@ -5021,9 +5171,9 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
- if (dev->roce[port_num].nb.notifier_call) {
- unregister_netdevice_notifier(&dev->roce[port_num].nb);
- dev->roce[port_num].nb.notifier_call = NULL;
+ if (dev->port[port_num].roce.nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
+ dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
@@ -5572,7 +5722,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
port_num + 1);
- ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
+ ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
}
/* The mlx5_ib_multiport_mutex should be held when calling this function */
@@ -5673,7 +5823,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
}
if (bound) {
- dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
+ dev_dbg(mpi->mdev->device,
+ "removing port from unaffiliated list.\n");
mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
list_del(&mpi->list);
break;
@@ -5731,7 +5882,10 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
UA_MANDATORY),
UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
UVERBS_ATTR_TYPE(u16),
- UA_MANDATORY));
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ enum mlx5_ib_uapi_dm_type,
+ UA_OPTIONAL));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_flow_action,
@@ -5822,35 +5976,58 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
return &mcounters->ibcntrs;
}
-void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
mlx5_ib_cleanup_multiport_master(dev);
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
srcu_barrier(&dev->mr_srcu);
cleanup_srcu_struct(&dev->mr_srcu);
}
- kfree(dev->port);
+
+ WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
+
+ WARN_ON(dev->dm.steering_sw_icm_alloc_blocks &&
+ !bitmap_empty(
+ dev->dm.steering_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
+
+ kfree(dev->dm.steering_sw_icm_alloc_blocks);
+
+ WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks &&
+ !bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(
+ mdev, log_header_modify_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
+
+ kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
}
-int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
+ u64 header_modify_icm_blocks = 0;
+ u64 steering_icm_blocks = 0;
int err;
int i;
- dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
- GFP_KERNEL);
- if (!dev->port)
- return -ENOMEM;
-
for (i = 0; i < dev->num_ports; i++) {
spin_lock_init(&dev->port[i].mp.mpi_lock);
- rwlock_init(&dev->roce[i].netdev_lock);
+ rwlock_init(&dev->port[i].roce.netdev_lock);
+ dev->port[i].roce.dev = dev;
+ dev->port[i].roce.native_port_num = i + 1;
+ dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
err = mlx5_ib_init_multiport_master(dev);
if (err)
- goto err_free_port;
+ return err;
+
+ err = set_has_smi_cap(dev);
+ if (err)
+ return err;
if (!mlx5_core_mp_enabled(mdev)) {
for (i = 1; i <= dev->num_ports; i++) {
@@ -5872,28 +6049,60 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
- dev->ib_dev.dev.parent = &mdev->pdev->dev;
+ dev->ib_dev.dev.parent = mdev->device;
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
- spin_lock_init(&dev->memic.memic_lock);
- dev->memic.dev = mdev;
+ if (MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) {
+ if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) {
+ steering_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(mdev,
+ log_steering_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
+
+ dev->dm.steering_sw_icm_alloc_blocks =
+ kcalloc(BITS_TO_LONGS(steering_icm_blocks),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dev->dm.steering_sw_icm_alloc_blocks)
+ goto err_mp;
+ }
+
+ if (MLX5_CAP64_DEV_MEM(mdev,
+ header_modify_sw_icm_start_address)) {
+ header_modify_icm_blocks = BIT(
+ MLX5_CAP_DEV_MEM(
+ mdev, log_header_modify_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
+
+ dev->dm.header_modify_sw_icm_alloc_blocks =
+ kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dev->dm.header_modify_sw_icm_alloc_blocks)
+ goto err_dm;
+ }
+ }
+
+ spin_lock_init(&dev->dm.lock);
+ dev->dm.dev = mdev;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
err = init_srcu_struct(&dev->mr_srcu);
if (err)
- goto err_mp;
+ goto err_dm;
}
return 0;
+
+err_dm:
+ kfree(dev->dm.steering_sw_icm_alloc_blocks);
+ kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
+
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
-err_free_port:
- kfree(dev->port);
-
return -ENOMEM;
}
@@ -5909,20 +6118,6 @@ static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
return 0;
}
-int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev)
-{
- struct mlx5_ib_dev *nic_dev;
-
- nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch);
-
- if (!nic_dev)
- return -EINVAL;
-
- dev->flow_db = nic_dev->flow_db;
-
- return 0;
-}
-
static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
{
kfree(dev->flow_db);
@@ -5982,7 +6177,10 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.req_notify_cq = mlx5_ib_arm_cq,
.rereg_user_mr = mlx5_ib_rereg_user_mr,
.resize_cq = mlx5_ib_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
};
@@ -6018,7 +6216,7 @@ static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
.reg_dm_mr = mlx5_ib_reg_dm_mr,
};
-int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
int err;
@@ -6084,7 +6282,9 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
}
- if (MLX5_CAP_DEV_MEM(mdev, memic))
+ if (MLX5_CAP_DEV_MEM(mdev, memic) ||
+ MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
@@ -6124,7 +6324,7 @@ static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
.query_port = mlx5_ib_rep_query_port,
};
-int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
{
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
return 0;
@@ -6142,13 +6342,6 @@ static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
{
u8 port_num;
- int i;
-
- for (i = 0; i < dev->num_ports; i++) {
- dev->roce[i].dev = dev;
- dev->roce[i].native_port_num = i + 1;
- dev->roce[i].last_port_state = IB_PORT_DOWN;
- }
dev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
@@ -6160,6 +6353,7 @@ static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ /* Register only for native ports */
return mlx5_add_netdev_notifier(dev, port_num);
}
@@ -6170,7 +6364,7 @@ static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
mlx5_remove_netdev_notifier(dev, port_num);
}
-int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
@@ -6186,7 +6380,7 @@ int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
return err;
}
-void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_stage_common_roce_cleanup(dev);
}
@@ -6233,12 +6427,12 @@ static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
}
}
-int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
{
return create_dev_resources(&dev->devr);
}
-void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
{
destroy_dev_resources(&dev->devr);
}
@@ -6260,7 +6454,7 @@ static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
.get_hw_stats = mlx5_ib_get_hw_stats,
};
-int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
@@ -6271,7 +6465,7 @@ int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
return 0;
}
-void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
@@ -6301,7 +6495,7 @@ static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
}
-int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
{
int err;
@@ -6316,13 +6510,13 @@ int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
return err;
}
-void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
}
-int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
{
const char *name;
@@ -6334,17 +6528,17 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
return ib_register_device(&dev->ib_dev, name);
}
-void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
destroy_umrc_res(dev);
}
-void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
{
ib_unregister_device(&dev->ib_dev);
}
-int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
{
return create_umr_res(dev);
}
@@ -6399,6 +6593,9 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
if (profile->stage[stage].cleanup)
profile->stage[stage].cleanup(dev);
}
+
+ kfree(dev->port);
+ ib_dealloc_device(&dev->ib_dev);
}
void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
@@ -6520,6 +6717,9 @@ const struct mlx5_ib_profile uplink_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
NULL,
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
+ mlx5_ib_stage_devx_init,
+ mlx5_ib_stage_devx_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
@@ -6561,7 +6761,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
if (!bound) {
list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
- dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
+ dev_dbg(mdev->device,
+ "no suitable IB device found to bind to, added to unaffiliated list.\n");
}
mutex_unlock(&mlx5_ib_multiport_mutex);
@@ -6573,12 +6774,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
enum rdma_link_layer ll;
struct mlx5_ib_dev *dev;
int port_type_cap;
+ int num_ports;
printk_once(KERN_INFO "%s", mlx5_version);
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
- mlx5_ib_register_vport_reps(mdev);
+ if (!mlx5_core_mp_enabled(mdev))
+ mlx5_ib_register_vport_reps(mdev);
return mdev;
}
@@ -6588,13 +6791,20 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
return mlx5_ib_add_slave_port(mdev);
+ num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
if (!dev)
return NULL;
+ dev->port = kcalloc(num_ports, sizeof(*dev->port),
+ GFP_KERNEL);
+ if (!dev->port) {
+ ib_dealloc_device((struct ib_device *)dev);
+ return NULL;
+ }
dev->mdev = mdev;
- dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
- MLX5_CAP_GEN(mdev, num_vhca_ports));
+ dev->num_ports = num_ports;
return __mlx5_ib_add(dev, &pf_profile);
}
@@ -6621,8 +6831,6 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
dev = context;
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
-
- ib_dealloc_device((struct ib_device *)dev);
}
static struct mlx5_interface mlx5_ib_interface = {
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 4a617d78eae1..40eb8be482e4 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -48,6 +48,7 @@
#include <rdma/mlx5-abi.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
#include "srq.h"
@@ -117,6 +118,10 @@ enum {
MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
};
+#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \
+ (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
+#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
struct list_head db_page_list;
@@ -194,6 +199,7 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
+ struct mlx5_ib_flow_prio fdb;
struct mlx5_flow_table *lag_demux_ft;
/* Protect flow steering bypass flow tables
* when add/del flow rules.
@@ -553,15 +559,28 @@ enum mlx5_ib_mtt_access_flags {
struct mlx5_ib_dm {
struct ib_dm ibdm;
phys_addr_t dev_addr;
+ u32 type;
+ size_t size;
+ union {
+ struct {
+ u32 obj_id;
+ } icm_dm;
+ /* other dm types specific params should be added here */
+ };
};
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
-#define MLX5_IB_DM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
- IB_ACCESS_REMOTE_WRITE |\
- IB_ACCESS_REMOTE_READ |\
- IB_ACCESS_REMOTE_ATOMIC |\
- IB_ZERO_BASED)
+#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
+ IB_ACCESS_REMOTE_WRITE |\
+ IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC |\
+ IB_ZERO_BASED)
+
+#define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
+ IB_ACCESS_REMOTE_WRITE |\
+ IB_ACCESS_REMOTE_READ |\
+ IB_ZERO_BASED)
struct mlx5_ib_mr {
struct ib_mr ibmr;
@@ -702,12 +721,6 @@ struct mlx5_ib_multiport {
spinlock_t mpi_lock;
};
-struct mlx5_ib_port {
- struct mlx5_ib_counters cnts;
- struct mlx5_ib_multiport mp;
- struct mlx5_ib_dbg_cc_params *dbg_cc_params;
-};
-
struct mlx5_roce {
/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
* netdev pointer
@@ -721,6 +734,14 @@ struct mlx5_roce {
u8 native_port_num;
};
+struct mlx5_ib_port {
+ struct mlx5_ib_counters cnts;
+ struct mlx5_ib_multiport mp;
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_roce roce;
+ struct mlx5_eswitch_rep *rep;
+};
+
struct mlx5_ib_dbg_param {
int offset;
struct mlx5_ib_dev *dev;
@@ -840,10 +861,16 @@ struct mlx5_ib_flow_action {
};
};
-struct mlx5_memic {
+struct mlx5_dm {
struct mlx5_core_dev *dev;
- spinlock_t memic_lock;
+ /* This lock is used to protect the access to the shared
+ * allocation map when concurrent requests by different
+ * processes are handled.
+ */
+ spinlock_t lock;
DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
+ unsigned long *steering_sw_icm_alloc_blocks;
+ unsigned long *header_modify_sw_icm_alloc_blocks;
};
struct mlx5_read_counters_attr {
@@ -905,7 +932,6 @@ struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
struct notifier_block mdev_events;
- struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
/* serialize update of capability mask
*/
@@ -940,17 +966,18 @@ struct mlx5_ib_dev {
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile;
- struct mlx5_eswitch_rep *rep;
+ bool is_rep;
int lag_active;
struct mlx5_ib_lb_state lb;
u8 umr_fence;
struct list_head ib_dev_list;
u64 sys_image_guid;
- struct mlx5_memic memic;
+ struct mlx5_dm dm;
u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
+ int free_port;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -968,6 +995,14 @@ static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
}
+static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ return to_mdev(context->ibucontext.device);
+}
+
static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct mlx5_ib_cq, ibcq);
@@ -1046,17 +1081,16 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
+int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+ struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
-int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
-struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata);
+void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
+int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
-int mlx5_ib_destroy_srq(struct ib_srq *srq);
+void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
@@ -1068,7 +1102,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mlx5_ib_destroy_qp(struct ib_qp *qp);
+int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx5_ib_drain_sq(struct ib_qp *qp);
void mlx5_ib_drain_rq(struct ib_qp *qp);
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
@@ -1083,9 +1117,8 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
void *buffer, int buflen, size_t *bc);
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata);
-int mlx5_ib_destroy_cq(struct ib_cq *cq);
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
@@ -1112,10 +1145,9 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
-struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg);
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata);
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -1124,9 +1156,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
+ struct ib_udata *udata);
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
@@ -1170,7 +1201,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
-int mlx5_ib_destroy_wq(struct ib_wq *wq);
+int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
@@ -1182,7 +1213,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
struct uverbs_attr_bundle *attrs);
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm);
+int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs);
struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
@@ -1230,23 +1261,6 @@ static inline void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp,
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
/* Needed for rep profile */
-int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev);
-void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev);
-void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev);
-void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
-void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
-void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
-void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
-void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index c85f00255884..5f09699fab98 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -600,7 +600,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
- if (!mlx5_debugfs_root || dev->rep)
+ if (!mlx5_debugfs_root || dev->is_rep)
return;
debugfs_remove_recursive(dev->cache.root);
@@ -614,7 +614,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
struct dentry *dir;
int i;
- if (!mlx5_debugfs_root || dev->rep)
+ if (!mlx5_debugfs_root || dev->is_rep)
return;
cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
@@ -677,7 +677,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
MLX5_IB_UMR_OCTOWORD;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
- !dev->rep &&
+ !dev->is_rep &&
mlx5_core_is_pf(dev->mdev))
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
@@ -1159,8 +1159,8 @@ static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
mr->access_flags = access_flags;
}
-static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
- u64 length, int acc)
+static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
+ u64 length, int acc, int mode)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
@@ -1182,9 +1182,8 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
- MLX5_SET(mkc, mkc, access_mode_4_2,
- (MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
@@ -1194,8 +1193,7 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
MLX5_SET64(mkc, mkc, len, length);
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr,
- memic_addr - pci_resource_start(dev->mdev->pdev, 0));
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
@@ -1237,15 +1235,31 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_dm *mdm = to_mdm(dm);
- u64 memic_addr;
+ struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
+ u64 start_addr = mdm->dev_addr + attr->offset;
+ int mode;
- if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
- return ERR_PTR(-EINVAL);
+ switch (mdm->type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
+ return ERR_PTR(-EINVAL);
+
+ mode = MLX5_MKC_ACCESS_MODE_MEMIC;
+ start_addr -= pci_resource_start(dev->pdev, 0);
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
+ return ERR_PTR(-EINVAL);
- memic_addr = mdm->dev_addr + attr->offset;
+ mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
- return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
- attr->access_flags);
+ return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
+ attr->access_flags, mode);
}
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -1623,15 +1637,14 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
kfree(mr);
}
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
return 0;
}
-struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index c20bfc41ecf1..91507a2e9290 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -288,7 +288,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
- if (unlikely(!umem->npages && mr->parent &&
+ if (unlikely(!umem_odp->npages && mr->parent &&
!umem_odp->dying)) {
WRITE_ONCE(umem_odp->dying, 1);
atomic_inc(&mr->parent->num_leaf_free);
@@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
- u64 access_mask = ODP_READ_ALLOWED_BIT;
+ u64 access_mask;
u64 start_idx, page_mask;
struct ib_umem_odp *odp;
size_t size;
@@ -607,6 +607,7 @@ next_mr:
page_shift = mr->umem->page_shift;
page_mask = ~(BIT(page_shift) - 1);
start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+ access_mask = ODP_READ_ALLOWED_BIT;
if (prefetch && !downgrade && !mr->umem->writable) {
/* prefetch with write-access must
@@ -710,6 +711,15 @@ struct pf_frame {
int depth;
};
+static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
+{
+ if (!mmkey)
+ return false;
+ if (mmkey->type == MLX5_MKEY_MW)
+ return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
+ return mmkey->key == key;
+}
+
static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
{
struct mlx5_ib_mw *mw;
@@ -759,7 +769,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
next_mr:
mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key));
- if (!mmkey || mmkey->key != key) {
+ if (!mkey_is_eq(mmkey, key)) {
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
ret = -EFAULT;
goto srcu_unlock;
@@ -919,7 +929,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault,
void *wqe,
void *wqe_end, u32 *bytes_mapped,
- u32 *total_wqe_bytes, int receive_queue)
+ u32 *total_wqe_bytes, bool receive_queue)
{
int ret = 0, npages = 0;
u64 io_virt;
@@ -1199,17 +1209,15 @@ static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{
- int ret;
- void *wqe, *wqe_end;
+ bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
+ u16 wqe_index = pfault->wqe.wqe_index;
+ void *wqe = NULL, *wqe_end = NULL;
u32 bytes_mapped, total_wqe_bytes;
- char *buffer = NULL;
+ struct mlx5_core_rsc_common *res;
int resume_with_error = 1;
- u16 wqe_index = pfault->wqe.wqe_index;
- int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
- struct mlx5_core_rsc_common *res = NULL;
- struct mlx5_ib_qp *qp = NULL;
- struct mlx5_ib_srq *srq = NULL;
+ struct mlx5_ib_qp *qp;
size_t bytes_copied;
+ int ret = 0;
res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
if (!res) {
@@ -1217,87 +1225,74 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
return;
}
- switch (res->res) {
- case MLX5_RES_QP:
- qp = res_to_qp(res);
- break;
- case MLX5_RES_SRQ:
- case MLX5_RES_XSRQ:
- srq = res_to_srq(res);
- break;
- default:
- mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n", pfault->type);
+ if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
+ res->res != MLX5_RES_XSRQ) {
+ mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
+ pfault->type);
goto resolve_page_fault;
}
- buffer = (char *)__get_free_page(GFP_KERNEL);
- if (!buffer) {
+ wqe = (void *)__get_free_page(GFP_KERNEL);
+ if (!wqe) {
mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
goto resolve_page_fault;
}
- if (qp) {
- if (requestor) {
- ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index,
- buffer, PAGE_SIZE,
- &bytes_copied);
- } else {
- ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index,
- buffer, PAGE_SIZE,
- &bytes_copied);
- }
- } else {
- ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index,
- buffer, PAGE_SIZE,
+ qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
+ if (qp && sq) {
+ ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
+ if (ret)
+ goto read_user;
+ ret = mlx5_ib_mr_initiator_pfault_handler(
+ dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
+ } else if (qp && !sq) {
+ ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
+ if (ret)
+ goto read_user;
+ ret = mlx5_ib_mr_responder_pfault_handler_rq(
+ dev, qp, wqe, &wqe_end, bytes_copied);
+ } else if (!qp) {
+ struct mlx5_ib_srq *srq = res_to_srq(res);
+
+ ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
&bytes_copied);
+ if (ret)
+ goto read_user;
+ ret = mlx5_ib_mr_responder_pfault_handler_srq(
+ dev, srq, &wqe, &wqe_end, bytes_copied);
}
- if (ret) {
- mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
- ret, wqe_index, pfault->token);
+ if (ret < 0 || wqe >= wqe_end)
goto resolve_page_fault;
- }
- wqe = buffer;
- if (requestor)
- ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp,
- &wqe, &wqe_end,
- bytes_copied);
- else if (qp)
- ret = mlx5_ib_mr_responder_pfault_handler_rq(dev, qp,
- wqe, &wqe_end,
- bytes_copied);
- else
- ret = mlx5_ib_mr_responder_pfault_handler_srq(dev, srq,
- &wqe, &wqe_end,
- bytes_copied);
+ ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
+ &total_wqe_bytes, !sq);
+ if (ret == -EAGAIN)
+ goto out;
- if (ret < 0)
+ if (ret < 0 || total_wqe_bytes > bytes_mapped)
goto resolve_page_fault;
- if (wqe >= wqe_end) {
- mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
- goto resolve_page_fault;
- }
+out:
+ ret = 0;
+ resume_with_error = 0;
- ret = pagefault_data_segments(dev, pfault, wqe, wqe_end,
- &bytes_mapped, &total_wqe_bytes,
- !requestor);
- if (ret == -EAGAIN) {
- resume_with_error = 0;
- goto resolve_page_fault;
- } else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
- goto resolve_page_fault;
- }
+read_user:
+ if (ret)
+ mlx5_ib_err(
+ dev,
+ "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
+ ret, wqe_index, pfault->token);
- resume_with_error = 0;
resolve_page_fault:
mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
pfault->wqe.wq_num, resume_with_error,
pfault->type);
mlx5_core_res_put(res);
- free_page((unsigned long)buffer);
+ free_page((unsigned long)wqe);
}
static int pages_in_range(u64 address, u32 length)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7cd006da1dae..f6623c77443a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -92,6 +92,7 @@ struct mlx5_modify_raw_qp_param {
struct mlx5_rate_limit rl;
u8 rq_q_ctr_id;
+ u16 port;
};
static void get_cqs(enum ib_qp_type qp_type,
@@ -777,14 +778,17 @@ err_umem:
}
static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_rwq *rwq)
+ struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
{
- struct mlx5_ib_ucontext *context;
+ struct mlx5_ib_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx5_ib_ucontext,
+ ibucontext);
if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
atomic_dec(&dev->delay_drop.rqs_cnt);
- context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &rwq->db);
if (rwq->umem)
ib_umem_release(rwq->umem);
@@ -983,11 +987,15 @@ err_bfreg:
}
static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base)
+ struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
+ struct ib_udata *udata)
{
- struct mlx5_ib_ucontext *context;
+ struct mlx5_ib_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx5_ib_ucontext,
+ ibucontext);
- context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem);
@@ -1206,11 +1214,11 @@ static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
}
-static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
- struct mlx5_ib_sq *sq)
+static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
{
if (sq->flow_rule)
mlx5_del_flow_rules(sq->flow_rule);
+ sq->flow_rule = NULL;
}
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
@@ -1278,15 +1286,8 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
if (err)
goto err_umem;
- err = create_flow_rule_vport_sq(dev, sq);
- if (err)
- goto err_flow;
-
return 0;
-err_flow:
- mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
-
err_umem:
ib_umem_release(sq->ubuffer.umem);
sq->ubuffer.umem = NULL;
@@ -1297,7 +1298,7 @@ err_umem:
static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{
- destroy_flow_rule_vport_sq(dev, sq);
+ destroy_flow_rule_vport_sq(sq);
mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
ib_umem_release(sq->ubuffer.umem);
}
@@ -1402,7 +1403,8 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, u32 tdn,
u32 *qp_flags_en,
- struct ib_pd *pd)
+ struct ib_pd *pd,
+ u32 *out, int outlen)
{
u8 lb_flag = 0;
u32 *in;
@@ -1429,15 +1431,16 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
- if (dev->rep) {
+ if (dev->is_rep) {
lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
*qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
}
MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
- err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
+ err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
+ rq->tirn = MLX5_GET(create_tir_out, out, tirn);
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
err = mlx5_ib_enable_lb(dev, false, true);
@@ -1463,6 +1466,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
int err;
u32 tdn = mucontext->tdn;
u16 uid = to_mpd(pd)->uid;
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
if (qp->sq.wqe_cnt) {
err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
@@ -1495,7 +1499,9 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
goto err_destroy_sq;
- err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd);
+ err = create_raw_packet_qp_tir(
+ dev, rq, tdn, &qp->flags_en, pd, out,
+ MLX5_ST_SZ_BYTES(create_tir_out));
if (err)
goto err_destroy_rq;
@@ -1504,6 +1510,20 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
resp->tirn = rq->tirn;
resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
+ resp->tir_icm_addr = MLX5_GET(
+ create_tir_out, out, icm_address_31_0);
+ resp->tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_39_32)
+ << 32;
+ resp->tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_63_40)
+ << 40;
+ resp->comp_mask |=
+ MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
+ }
}
}
@@ -1577,8 +1597,10 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_create_qp_resp resp = {};
int inlen;
+ int outlen;
int err;
u32 *in;
+ u32 *out;
void *tirc;
void *hfso;
u32 selected_fields = 0;
@@ -1641,7 +1663,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->rep) {
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
}
@@ -1658,10 +1680,12 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
+ outlen = MLX5_ST_SZ_BYTES(create_tir_out);
+ in = kvzalloc(inlen + outlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
+ out = in + MLX5_ST_SZ_DW(create_tir_in);
MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type,
@@ -1773,8 +1797,9 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir:
- err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
+ err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
+ qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
err = mlx5_ib_enable_lb(dev, false, true);
@@ -1789,6 +1814,18 @@ create_tir:
if (mucontext->devx_uid) {
resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
resp.tirn = qp->rss_qp.tirn;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
+ resp.tir_icm_addr =
+ MLX5_GET(create_tir_out, out, icm_address_31_0);
+ resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
+ icm_address_39_32)
+ << 32;
+ resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
+ icm_address_63_40)
+ << 40;
+ resp.comp_mask |=
+ MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
+ }
}
err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
@@ -1818,13 +1855,16 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
- if (rcqe_sz == 128) {
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
+ if (rcqe_sz == 128)
+ MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+
return;
}
- if (init_attr->qp_type != MLX5_IB_QPT_DCT)
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
+ MLX5_SET(qpc, qpc, cs_res,
+ rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
+ MLX5_RES_SCAT_DATA32_CQE);
}
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
@@ -2284,7 +2324,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err_create:
if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, pd, qp, base);
+ destroy_qp_user(dev, pd, qp, base, udata);
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
@@ -2395,7 +2435,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct mlx5_modify_raw_qp_param *raw_qp_param,
u8 lag_tx_affinity);
-static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_udata *udata)
{
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base;
@@ -2466,7 +2507,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
else if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base);
+ destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
}
static const char *ib_qp_type_str(enum ib_qp_type type)
@@ -2732,7 +2773,7 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
return 0;
}
-int mlx5_ib_destroy_qp(struct ib_qp *qp)
+int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
@@ -2743,7 +2784,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
return mlx5_ib_destroy_dct(mqp);
- destroy_qp_common(dev, mqp);
+ destroy_qp_common(dev, mqp, udata);
kfree(mqp);
@@ -2961,6 +3002,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_PRI_PORT,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_PRI_PORT,
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
@@ -2994,6 +3040,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PM_STATE,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT,
},
},
[MLX5_QP_STATE_RTS] = {
@@ -3010,6 +3062,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_SRQN |
MLX5_QP_OPTPAR_CQN_RCV,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
},
},
[MLX5_QP_STATE_SQER] = {
@@ -3021,6 +3079,10 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RRE,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
},
},
};
@@ -3261,6 +3323,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
if (modify_sq) {
+ struct mlx5_flow_handle *flow_rule;
+
if (tx_affinity) {
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
tx_affinity,
@@ -3269,8 +3333,25 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return err;
}
- return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
- raw_qp_param, qp->ibqp.pd);
+ flow_rule = create_flow_rule_vport_sq(dev, sq,
+ raw_qp_param->port);
+ if (IS_ERR(flow_rule))
+ return PTR_ERR(flow_rule);
+
+ err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
+ raw_qp_param, qp->ibqp.pd);
+ if (err) {
+ if (flow_rule)
+ mlx5_del_flow_rules(flow_rule);
+ return err;
+ }
+
+ if (flow_rule) {
+ destroy_flow_rule_vport_sq(sq);
+ sq->flow_rule = flow_rule;
+ }
+
+ return err;
}
return 0;
@@ -3295,7 +3376,7 @@ static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
} else {
tx_port_affinity =
(unsigned int)atomic_add_return(
- 1, &dev->roce[port_num].tx_port_affinity) %
+ 1, &dev->port[port_num].roce.tx_port_affinity) %
MLX5_MAX_PORTS +
1;
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
@@ -3400,7 +3481,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (dev->lag_active) {
- u8 p = mlx5_core_native_port_num(dev->mdev);
+ u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
tx_affinity = get_tx_affinity(dev, pd, base, p,
udata);
context->flags |= cpu_to_be32(tx_affinity << 24);
@@ -3553,6 +3634,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
}
+ if (attr_mask & IB_QP_PORT)
+ raw_qp_param.port = attr->port_num;
+
if (attr_mask & IB_QP_RATE_LIMIT) {
raw_qp_param.rl.rate = attr->rate_limit;
@@ -4726,16 +4810,15 @@ static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
{
__be32 *p = NULL;
- u32 tidx = idx;
int i, j;
pr_debug("dump WQE index %u:\n", idx);
for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
if ((i & 0xf) == 0) {
- tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
- p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, tidx);
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
pr_debug("WQBB at %p:\n", (void *)p);
j = 0;
+ idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
}
pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
@@ -5119,11 +5202,10 @@ out:
wmb();
/* currently we support only regular doorbells */
- mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
+ mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
*/
- mmiowb();
bf->offset ^= bf->buf_size;
}
@@ -5625,8 +5707,7 @@ out:
}
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+ struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_xrcd *xrcd;
@@ -5648,7 +5729,7 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
return &xrcd->ibxrcd;
}
-int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
@@ -5960,19 +6041,19 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
err_copy:
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
err_user_rq:
- destroy_user_rq(dev, pd, rwq);
+ destroy_user_rq(dev, pd, rwq, udata);
err:
kfree(rwq);
return ERR_PTR(err);
}
-int mlx5_ib_destroy_wq(struct ib_wq *wq)
+int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
- destroy_user_rq(dev, wq->pd, rwq);
+ destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 1ec1beb1296b..4e7fde86c96b 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -194,9 +194,15 @@ err_db:
return err;
}
-static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
+static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
+ struct ib_udata *udata)
{
- mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
+ mlx5_ib_db_unmap_user(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx5_ib_ucontext,
+ ibucontext),
+ &srq->db);
ib_umem_release(srq->umem);
}
@@ -208,16 +214,16 @@ static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
mlx5_db_free(dev->mdev, &srq->db);
}
-struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+int mlx5_ib_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_ib_srq *srq;
+ struct mlx5_ib_dev *dev = to_mdev(ib_srq->device);
+ struct mlx5_ib_srq *srq = to_msrq(ib_srq);
size_t desc_size;
size_t buf_size;
int err;
- struct mlx5_srq_attr in = {0};
+ struct mlx5_srq_attr in = {};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
/* Sanity check SRQ size before proceeding */
@@ -225,13 +231,9 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
init_attr->attr.max_wr,
max_srq_wqes);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq)
- return ERR_PTR(-ENOMEM);
-
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
@@ -239,35 +241,32 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
- if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
- err = -EINVAL;
- goto err_srq;
- }
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size)
+ return -EINVAL;
+
desc_size = roundup_pow_of_two(desc_size);
desc_size = max_t(size_t, 32, desc_size);
- if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
- err = -EINVAL;
- goto err_srq;
- }
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
+ return -EINVAL;
+
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
- if (buf_size < desc_size) {
- err = -EINVAL;
- goto err_srq;
- }
+ if (buf_size < desc_size)
+ return -EINVAL;
+
in.type = init_attr->srq_type;
if (udata)
- err = create_srq_user(pd, srq, &in, udata, buf_size);
+ err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
else
err = create_srq_kernel(dev, srq, &in, buf_size);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
udata ? "user" : "kernel", err);
- goto err_srq;
+ return err;
}
in.log_size = ilog2(srq->msrq.max);
@@ -297,7 +296,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
else
in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
- in.pd = to_mpd(pd)->pdn;
+ in.pd = to_mpd(ib_srq->pd)->pdn;
in.db_record = srq->db.dma;
err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
kvfree(in.pas);
@@ -320,21 +319,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
init_attr->attr.max_wr = srq->msrq.max - 1;
- return &srq->ibsrq;
+ return 0;
err_core:
mlx5_cmd_destroy_srq(dev, &srq->msrq);
err_usr_kern_srq:
if (udata)
- destroy_srq_user(pd, srq);
+ destroy_srq_user(ib_srq->pd, srq, udata);
else
destroy_srq_kernel(dev, srq);
-err_srq:
- kfree(srq);
-
- return ERR_PTR(err);
+ return err;
}
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@ -387,7 +383,7 @@ out_box:
return ret;
}
-int mlx5_ib_destroy_srq(struct ib_srq *srq)
+void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(srq->device);
struct mlx5_ib_srq *msrq = to_msrq(srq);
@@ -395,14 +391,16 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
mlx5_cmd_destroy_srq(dev, &msrq->msrq);
if (srq->uobject) {
- mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
+ mlx5_ib_db_unmap_user(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx5_ib_ucontext,
+ ibucontext),
+ &msrq->db);
ib_umem_release(msrq->umem);
} else {
destroy_srq_kernel(dev, msrq);
}
-
- kfree(srq);
- return 0;
}
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
diff --git a/drivers/infiniband/hw/mlx5/srq.h b/drivers/infiniband/hw/mlx5/srq.h
index c330af35ff10..af197c36d757 100644
--- a/drivers/infiniband/hw/mlx5/srq.h
+++ b/drivers/infiniband/hw/mlx5/srq.h
@@ -51,15 +51,12 @@ struct mlx5_core_srq {
struct mlx5_srq_table {
struct notifier_block nb;
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
+ struct xarray array;
};
int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *in);
-int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
+void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out);
int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 63ac38bb3498..b0d0687c7a68 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -83,13 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
- spin_lock(&table->lock);
-
- srq = radix_tree_lookup(&table->tree, srqn);
+ xa_lock(&table->array);
+ srq = xa_load(&table->array, srqn);
if (srq)
atomic_inc(&srq->common.refcount);
-
- spin_unlock(&table->lock);
+ xa_unlock(&table->array);
return srq;
}
@@ -597,9 +595,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
atomic_set(&srq->common.refcount, 1);
init_completion(&srq->common.free);
- spin_lock_irq(&table->lock);
- err = radix_tree_insert(&table->tree, srq->srqn, srq);
- spin_unlock_irq(&table->lock);
+ err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
if (err)
goto err_destroy_srq_split;
@@ -611,26 +607,22 @@ err_destroy_srq_split:
return err;
}
-int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *tmp;
int err;
- spin_lock_irq(&table->lock);
- tmp = radix_tree_delete(&table->tree, srq->srqn);
- spin_unlock_irq(&table->lock);
+ tmp = xa_erase_irq(&table->array, srq->srqn);
if (!tmp || tmp != srq)
- return -EINVAL;
+ return;
err = destroy_srq_split(dev, srq);
if (err)
- return err;
+ return;
mlx5_core_res_put(&srq->common);
wait_for_completion(&srq->common.free);
-
- return 0;
}
int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
@@ -680,13 +672,11 @@ static int srq_event_notifier(struct notifier_block *nb,
eqe = data;
srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
- spin_lock(&table->lock);
-
- srq = radix_tree_lookup(&table->tree, srqn);
+ xa_lock(&table->array);
+ srq = xa_load(&table->array, srqn);
if (srq)
atomic_inc(&srq->common.refcount);
-
- spin_unlock(&table->lock);
+ xa_unlock(&table->array);
if (!srq)
return NOTIFY_OK;
@@ -703,8 +693,7 @@ int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
struct mlx5_srq_table *table = &dev->srq_table;
memset(table, 0, sizeof(*table));
- spin_lock_init(&table->lock);
- INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+ xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
table->nb.notifier_call = srq_event_notifier;
mlx5_notifier_register(dev->mdev, &table->nb);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 83aa47eb81a9..bdf5ed38de22 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -292,12 +292,6 @@ static int mthca_cmd_post(struct mthca_dev *dev,
err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier,
op_modifier, op, token, event);
- /*
- * Make sure that our HCR writes don't get mixed in with
- * writes from another CPU starting a FW command.
- */
- mmiowb();
-
mutex_unlock(&dev->cmd.hcr_mutex);
return err;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index a6531ffe29a6..c3cfea243af8 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -77,7 +77,7 @@ struct mthca_cq_context {
__be32 ci_db; /* Arbel only */
__be32 state_db; /* Arbel only */
u32 reserved;
-} __attribute__((packed));
+} __packed;
#define MTHCA_CQ_STATUS_OK ( 0 << 28)
#define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
@@ -211,11 +211,6 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1,
dev->kar + MTHCA_CQ_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
- /*
- * Make sure doorbells don't leak out of CQ spinlock
- * and reach the HCA out of order:
- */
- mmiowb();
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 30400ea4808b..2cdf686203c1 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -63,7 +63,7 @@ struct mthca_eq_context {
__be32 consumer_index;
__be32 producer_index;
u32 reserved3[4];
-} __attribute__((packed));
+} __packed;
#define MTHCA_EQ_STATUS_OK ( 0 << 28)
#define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
@@ -130,7 +130,7 @@ struct mthca_eqe {
u32 raw[6];
struct {
__be32 cqn;
- } __attribute__((packed)) comp;
+ } __packed comp;
struct {
u16 reserved1;
__be16 token;
@@ -138,27 +138,27 @@ struct mthca_eqe {
u8 reserved3[3];
u8 status;
__be64 out_param;
- } __attribute__((packed)) cmd;
+ } __packed cmd;
struct {
__be32 qpn;
- } __attribute__((packed)) qp;
+ } __packed qp;
struct {
__be32 srqn;
- } __attribute__((packed)) srq;
+ } __packed srq;
struct {
__be32 cqn;
u32 reserved1;
u8 reserved2[3];
u8 syndrome;
- } __attribute__((packed)) cq_err;
+ } __packed cq_err;
struct {
u32 reserved1[2];
__be32 port;
- } __attribute__((packed)) port_change;
+ } __packed port_change;
} event;
u8 reserved3[3];
u8 owner;
-} __attribute__((packed));
+} __packed;
#define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
#define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 112d2f38e0de..8ff0e90d7564 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages_fast(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages);
+ ret = get_user_pages_fast(uaddr & PAGE_MASK, 1,
+ FOLL_WRITE | FOLL_LONGTERM, pages);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 6686042aafb4..4250b2c18c64 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -60,7 +60,7 @@ struct mthca_mpt_entry {
__be64 mtt_seg;
__be32 mtt_sz; /* Arbel only */
u32 reserved[2];
-} __attribute__((packed));
+} __packed;
#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MTHCA_MPT_FLAG_MIO (1 << 17)
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index d063d7a37762..4f40dfedf920 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -363,18 +363,17 @@ static int mthca_mmap_uar(struct ib_ucontext *context,
return 0;
}
-static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ibdev = ibpd->device;
struct mthca_pd *pd = to_mpd(ibpd);
int err;
- err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
+ err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd);
if (err)
return err;
- if (context) {
+ if (udata) {
if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
mthca_pd_free(to_mdev(ibdev), pd);
return -EFAULT;
@@ -384,114 +383,86 @@ static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return 0;
}
-static void mthca_dealloc_pd(struct ib_pd *pd)
+static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
}
-static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
- struct ib_udata *udata)
+static int mthca_ah_create(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
+ u32 flags, struct ib_udata *udata)
{
- int err;
- struct mthca_ah *ah;
+ struct mthca_ah *ah = to_mah(ibah);
- ah = kmalloc(sizeof *ah, GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
-
- err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
- if (err) {
- kfree(ah);
- return ERR_PTR(err);
- }
-
- return &ah->ibah;
+ return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd), ah_attr,
+ ah);
}
-static int mthca_ah_destroy(struct ib_ah *ah, u32 flags)
+static void mthca_ah_destroy(struct ib_ah *ah, u32 flags)
{
mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
- kfree(ah);
-
- return 0;
}
-static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+static int mthca_create_srq(struct ib_srq *ibsrq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct mthca_create_srq ucmd;
struct mthca_ucontext *context = rdma_udata_to_drv_context(
udata, struct mthca_ucontext, ibucontext);
- struct mthca_srq *srq;
+ struct mthca_srq *srq = to_msrq(ibsrq);
int err;
if (init_attr->srq_type != IB_SRQT_BASIC)
- return ERR_PTR(-EOPNOTSUPP);
-
- srq = kmalloc(sizeof *srq, GFP_KERNEL);
- if (!srq)
- return ERR_PTR(-ENOMEM);
+ return -EOPNOTSUPP;
if (udata) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
- err = -EFAULT;
- goto err_free;
- }
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ return -EFAULT;
- err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+ err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar,
context->db_tab, ucmd.db_index,
ucmd.db_page);
if (err)
- goto err_free;
+ return err;
srq->mr.ibmr.lkey = ucmd.lkey;
srq->db_index = ucmd.db_index;
}
- err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
+ err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd),
&init_attr->attr, srq, udata);
if (err && udata)
- mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
+ mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar,
context->db_tab, ucmd.db_index);
if (err)
- goto err_free;
+ return err;
- if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
- mthca_free_srq(to_mdev(pd->device), srq);
- err = -EFAULT;
- goto err_free;
+ if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
+ mthca_free_srq(to_mdev(ibsrq->device), srq);
+ return -EFAULT;
}
- return &srq->ibsrq;
-
-err_free:
- kfree(srq);
-
- return ERR_PTR(err);
+ return 0;
}
-static int mthca_destroy_srq(struct ib_srq *srq)
+static void mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
- struct mthca_ucontext *context;
-
- if (srq->uobject) {
- context = to_mucontext(srq->uobject->context);
+ if (udata) {
+ struct mthca_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mthca_ucontext,
+ ibucontext);
mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
context->db_tab, to_msrq(srq)->db_index);
}
mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
- kfree(srq);
-
- return 0;
}
static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
@@ -607,16 +578,22 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
return &qp->ibqp;
}
-static int mthca_destroy_qp(struct ib_qp *qp)
+static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
- if (qp->uobject) {
+ if (udata) {
+ struct mthca_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mthca_ucontext,
+ ibucontext);
+
mthca_unmap_user_db(to_mdev(qp->device),
- &to_mucontext(qp->uobject->context)->uar,
- to_mucontext(qp->uobject->context)->db_tab,
+ &context->uar,
+ context->db_tab,
to_mqp(qp)->sq.db_index);
mthca_unmap_user_db(to_mdev(qp->device),
- &to_mucontext(qp->uobject->context)->uar,
- to_mucontext(qp->uobject->context)->db_tab,
+ &context->uar,
+ context->db_tab,
to_mqp(qp)->rq.db_index);
}
mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
@@ -626,7 +603,6 @@ static int mthca_destroy_qp(struct ib_qp *qp)
static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
int entries = attr->cqe;
@@ -634,6 +610,8 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
struct mthca_cq *cq;
int nent;
int err;
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
if (attr->flags)
return ERR_PTR(-EINVAL);
@@ -641,19 +619,19 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
return ERR_PTR(-EINVAL);
- if (context) {
+ if (udata) {
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return ERR_PTR(-EFAULT);
- err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
- to_mucontext(context)->db_tab,
- ucmd.set_db_index, ucmd.set_db_page);
+ err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
+ context->db_tab, ucmd.set_db_index,
+ ucmd.set_db_page);
if (err)
return ERR_PTR(err);
- err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
- to_mucontext(context)->db_tab,
- ucmd.arm_db_index, ucmd.arm_db_page);
+ err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
+ context->db_tab, ucmd.arm_db_index,
+ ucmd.arm_db_page);
if (err)
goto err_unmap_set;
}
@@ -664,7 +642,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
goto err_unmap_arm;
}
- if (context) {
+ if (udata) {
cq->buf.mr.ibmr.lkey = ucmd.lkey;
cq->set_ci_db_index = ucmd.set_db_index;
cq->arm_db_index = ucmd.arm_db_index;
@@ -673,14 +651,13 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
for (nent = 1; nent <= entries; nent <<= 1)
; /* nothing */
- err = mthca_init_cq(to_mdev(ibdev), nent,
- context ? to_mucontext(context) : NULL,
- context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
+ err = mthca_init_cq(to_mdev(ibdev), nent, context,
+ udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
cq);
if (err)
goto err_free;
- if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
+ if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) {
mthca_free_cq(to_mdev(ibdev), cq);
err = -EFAULT;
goto err_free;
@@ -694,14 +671,14 @@ err_free:
kfree(cq);
err_unmap_arm:
- if (context)
- mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
- to_mucontext(context)->db_tab, ucmd.arm_db_index);
+ if (udata)
+ mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
+ context->db_tab, ucmd.arm_db_index);
err_unmap_set:
- if (context)
- mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
- to_mucontext(context)->db_tab, ucmd.set_db_index);
+ if (udata)
+ mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
+ context->db_tab, ucmd.set_db_index);
return ERR_PTR(err);
}
@@ -827,16 +804,22 @@ out:
return ret;
}
-static int mthca_destroy_cq(struct ib_cq *cq)
+static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
- if (cq->uobject) {
+ if (udata) {
+ struct mthca_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mthca_ucontext,
+ ibucontext);
+
mthca_unmap_user_db(to_mdev(cq->device),
- &to_mucontext(cq->uobject->context)->uar,
- to_mucontext(cq->uobject->context)->db_tab,
+ &context->uar,
+ context->db_tab,
to_mcq(cq)->arm_db_index);
mthca_unmap_user_db(to_mdev(cq->device),
- &to_mucontext(cq->uobject->context)->uar,
- to_mucontext(cq->uobject->context)->db_tab,
+ &context->uar,
+ context->db_tab,
to_mcq(cq)->set_ci_db_index);
}
mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
@@ -914,7 +897,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err;
}
- n = mr->umem->nmap;
+ n = ib_umem_num_pages(mr->umem);
mr->mtt = mthca_alloc_mtt(dev, n);
if (IS_ERR(mr->mtt)) {
@@ -974,7 +957,7 @@ err:
return ERR_PTR(err);
}
-static int mthca_dereg_mr(struct ib_mr *mr)
+static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
{
struct mthca_mr *mmr = to_mmr(mr);
@@ -1200,6 +1183,8 @@ static const struct ib_device_ops mthca_dev_ops = {
.query_qp = mthca_query_qp,
.reg_user_mr = mthca_reg_user_mr,
.resize_cq = mthca_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
};
@@ -1210,6 +1195,8 @@ static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
.modify_srq = mthca_modify_srq,
.post_srq_recv = mthca_arbel_post_srq_recv,
.query_srq = mthca_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
};
static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
@@ -1218,6 +1205,8 @@ static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
.modify_srq = mthca_modify_srq,
.post_srq_recv = mthca_tavor_post_srq_recv,
.query_srq = mthca_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
};
static const struct ib_device_ops mthca_dev_arbel_fmr_ops = {
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 7a5b25d13faa..d04c245359eb 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -115,7 +115,7 @@ struct mthca_qp_path {
u8 hop_limit;
__be32 sl_tclass_flowlabel;
u8 rgid[16];
-} __attribute__((packed));
+} __packed;
struct mthca_qp_context {
__be32 flags;
@@ -154,14 +154,14 @@ struct mthca_qp_context {
__be16 rq_wqe_counter; /* reserved on Tavor */
__be16 sq_wqe_counter; /* reserved on Tavor */
u32 reserved3[18];
-} __attribute__((packed));
+} __packed;
struct mthca_qp_param {
__be32 opt_param_mask;
u32 reserved1;
struct mthca_qp_context context;
u32 reserved2[62];
-} __attribute__((packed));
+} __packed;
enum {
MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
@@ -1809,11 +1809,6 @@ out:
(qp->qpn << 8) | size0,
dev->kar + MTHCA_SEND_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
- /*
- * Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order:
- */
- mmiowb();
}
qp->sq.next_ind = ind;
@@ -1924,12 +1919,6 @@ out:
qp->rq.next_ind = ind;
qp->rq.head += nreq;
- /*
- * Make sure doorbells don't leak out of RQ spinlock and reach
- * the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&qp->rq.lock, flags);
return err;
}
@@ -2164,12 +2153,6 @@ out:
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
- /*
- * Make sure doorbells don't leak out of SQ spinlock and reach
- * the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&qp->sq.lock, flags);
return err;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 06b920385512..a85935ccce88 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -570,12 +570,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
- /*
- * Make sure doorbells don't leak out of SRQ spinlock and
- * reach the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&srq->lock, flags);
return err;
}
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 032883180f65..62bf986eba67 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1407,7 +1407,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
if (neigh->nud_state & NUD_VALID) {
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
" is %pM, Gateway is 0x%08X \n", dst_ip,
- neigh->ha, ntohl(rt->rt_gateway));
+ neigh->ha, ntohl(rt->rt_gw4));
if (arpindex >= 0) {
if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
@@ -3033,7 +3033,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
/* Need to free the Last Streaming Mode Message */
if (nesqp->ietf_frame) {
if (nesqp->lsmm_mr)
- nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr);
+ nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr,
+ NULL);
pci_free_consistent(nesdev->pcidev,
nesqp->private_data_len + nesqp->ietf_frame_size,
nesqp->ietf_frame, nesqp->ietf_frame_pbase);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 828e4af3f951..49024326a518 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -52,7 +52,7 @@ atomic_t qps_created;
atomic_t sw_qps_destroyed;
static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
-static int nes_dereg_mr(struct ib_mr *ib_mr);
+static int nes_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
/**
* nes_alloc_mw
@@ -306,9 +306,8 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
/*
* nes_alloc_mr
*/
-static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
@@ -386,7 +385,7 @@ static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd,
return ibmr;
err:
- nes_dereg_mr(ibmr);
+ nes_dereg_mr(ibmr, udata);
return ERR_PTR(-ENOMEM);
}
@@ -641,22 +640,24 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
/**
* nes_alloc_pd
*/
-static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata)
+static int nes_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct ib_device *ibdev = pd->device;
struct nes_pd *nespd = to_nespd(pd);
struct nes_vnic *nesvnic = to_nesvnic(ibdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_ucontext *nesucontext;
struct nes_alloc_pd_resp uresp;
u32 pd_num = 0;
int err;
+ struct nes_ucontext *nesucontext = rdma_udata_to_drv_context(
+ udata, struct nes_ucontext, ibucontext);
- nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
- nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context,
- netdev_refcnt_read(nesvnic->netdev));
+ nes_debug(
+ NES_DBG_PD,
+ "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
+ nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev,
+ &nesucontext->ibucontext, netdev_refcnt_read(nesvnic->netdev));
err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD);
@@ -668,8 +669,7 @@ static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
- if (context) {
- nesucontext = to_nesucontext(context);
+ if (udata) {
nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells,
NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db);
nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n",
@@ -700,7 +700,7 @@ static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
/**
* nes_dealloc_pd
*/
-static void nes_dealloc_pd(struct ib_pd *ibpd)
+static void nes_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct nes_ucontext *nesucontext;
struct nes_pd *nespd = to_nespd(ibpd);
@@ -708,8 +708,12 @@ static void nes_dealloc_pd(struct ib_pd *ibpd)
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- if ((ibpd->uobject) && (ibpd->uobject->context)) {
- nesucontext = to_nesucontext(ibpd->uobject->context);
+ if (udata) {
+ nesucontext =
+ rdma_udata_to_drv_context(
+ udata,
+ struct nes_ucontext,
+ ibucontext);
nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n",
nespd->mmap_db_index);
clear_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
@@ -1039,53 +1043,48 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
}
if (req.user_qp_buffer)
nesqp->nesuqp_addr = req.user_qp_buffer;
- if (udata) {
- nesqp->user_mode = 1;
- if (virt_wqs) {
- err = 1;
- list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) {
- if (nespbl->user_base == (unsigned long )req.user_wqe_buffers) {
- list_del(&nespbl->list);
- err = 0;
- nes_debug(NES_DBG_QP, "Found PBL for virtual QP. nespbl=%p. user_base=0x%lx\n",
- nespbl, nespbl->user_base);
- break;
- }
- }
- if (err) {
- nes_debug(NES_DBG_QP, "Didn't Find PBL for virtual QP. address = %llx.\n",
- (long long unsigned int)req.user_wqe_buffers);
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- kfree(nesqp->allocated_buffer);
- return ERR_PTR(-EFAULT);
+
+ nesqp->user_mode = 1;
+ if (virt_wqs) {
+ err = 1;
+ list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) {
+ if (nespbl->user_base == (unsigned long )req.user_wqe_buffers) {
+ list_del(&nespbl->list);
+ err = 0;
+ nes_debug(NES_DBG_QP, "Found PBL for virtual QP. nespbl=%p. user_base=0x%lx\n",
+ nespbl, nespbl->user_base);
+ break;
}
}
-
- nesqp->mmap_sq_db_index =
- find_next_zero_bit(nes_ucontext->allocated_wqs,
- NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
- /* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n",
- nespd->mmap_db_index); */
- if (nesqp->mmap_sq_db_index >= NES_MAX_USER_WQ_REGIONS) {
- nes_debug(NES_DBG_QP,
- "db index > max user regions, failing create QP\n");
+ if (err) {
+ nes_debug(NES_DBG_QP, "Didn't Find PBL for virtual QP. address = %llx.\n",
+ (long long unsigned int)req.user_wqe_buffers);
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- if (virt_wqs) {
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
- nespbl->pbl_pbase);
- kfree(nespbl);
- }
kfree(nesqp->allocated_buffer);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EFAULT);
}
- set_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
- nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = nesqp;
- nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index + 1;
- } else {
+ }
+
+ nesqp->mmap_sq_db_index =
+ find_next_zero_bit(nes_ucontext->allocated_wqs,
+ NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
+ /* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n",
+ nespd->mmap_db_index); */
+ if (nesqp->mmap_sq_db_index >= NES_MAX_USER_WQ_REGIONS) {
+ nes_debug(NES_DBG_QP,
+ "db index > max user regions, failing create QP\n");
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ if (virt_wqs) {
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+ }
kfree(nesqp->allocated_buffer);
- return ERR_PTR(-EFAULT);
+ return ERR_PTR(-ENOMEM);
}
+ set_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
+ nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = nesqp;
+ nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index + 1;
}
err = (!virt_wqs) ? nes_setup_mmap_qp(nesqp, nesvnic, sq_size, rq_size) :
nes_setup_virt_qp(nesqp, nespbl, nesvnic, sq_size, rq_size);
@@ -1303,7 +1302,7 @@ static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
/**
* nes_destroy_qp
*/
-static int nes_destroy_qp(struct ib_qp *ibqp)
+static int nes_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct nes_qp *nesqp = to_nesqp(ibqp);
struct nes_ucontext *nes_ucontext;
@@ -1343,8 +1342,12 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
}
if (nesqp->user_mode) {
- if ((ibqp->uobject)&&(ibqp->uobject->context)) {
- nes_ucontext = to_nesucontext(ibqp->uobject->context);
+ if (udata) {
+ nes_ucontext =
+ rdma_udata_to_drv_context(
+ udata,
+ struct nes_ucontext,
+ ibucontext);
clear_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = NULL;
if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
@@ -1373,7 +1376,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
*/
static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
int entries = attr->cqe;
@@ -1418,9 +1420,10 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
nescq->hw_cq.cq_number = cq_num;
nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1;
+ if (udata) {
+ struct nes_ucontext *nes_ucontext = rdma_udata_to_drv_context(
+ udata, struct nes_ucontext, ibucontext);
- if (context) {
- nes_ucontext = to_nesucontext(context);
if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) {
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
kfree(nescq);
@@ -1487,7 +1490,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
cqp_request = nes_get_cqp_request(nesdev);
if (cqp_request == NULL) {
nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
- if (!context)
+ if (!udata)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
nescq->hw_cq.cq_pbase);
else {
@@ -1516,7 +1519,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
if (nesadapter->free_4kpbl == 0) {
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
nes_free_cqp_request(nesdev, cqp_request);
- if (!context)
+ if (!udata)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
nescq->hw_cq.cq_pbase);
else {
@@ -1538,7 +1541,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
if (nesadapter->free_256pbl == 0) {
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
nes_free_cqp_request(nesdev, cqp_request);
- if (!context)
+ if (!udata)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
nescq->hw_cq.cq_pbase);
else {
@@ -1564,7 +1567,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
(nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
- if (context) {
+ if (udata) {
if (pbl_entries != 1)
u64temp = (u64)nespbl->pbl_pbase;
else
@@ -1595,7 +1598,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
nescq->hw_cq.cq_number, ret);
if ((!ret) || (cqp_request->major_code)) {
nes_put_cqp_request(nesdev, cqp_request);
- if (!context)
+ if (!udata)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
nescq->hw_cq.cq_pbase);
else {
@@ -1609,7 +1612,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
}
nes_put_cqp_request(nesdev, cqp_request);
- if (context) {
+ if (udata) {
/* free the nespbl */
pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
nespbl->pbl_pbase);
@@ -1631,7 +1634,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
/**
* nes_destroy_cq
*/
-static int nes_destroy_cq(struct ib_cq *ib_cq)
+static int nes_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct nes_cq *nescq;
struct nes_device *nesdev;
@@ -2382,7 +2385,7 @@ reg_user_mr_err:
/**
* nes_dereg_mr
*/
-static int nes_dereg_mr(struct ib_mr *ib_mr)
+static int nes_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct nes_mr *nesmr = to_nesmr(ib_mr);
struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device);
@@ -3574,6 +3577,14 @@ static const struct ib_device_ops nes_dev_ops = {
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = nes_get_dma_mr,
.get_port_immutable = nes_port_immutable,
+ .iw_accept = nes_accept,
+ .iw_add_ref = nes_add_ref,
+ .iw_connect = nes_connect,
+ .iw_create_listen = nes_create_listen,
+ .iw_destroy_listen = nes_destroy_listen,
+ .iw_get_qp = nes_get_qp,
+ .iw_reject = nes_reject,
+ .iw_rem_ref = nes_rem_ref,
.map_mr_sg = nes_map_mr_sg,
.mmap = nes_mmap,
.modify_qp = nes_modify_qp,
@@ -3638,23 +3649,9 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.num_comp_vectors = 1;
nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev;
- nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
- if (nesibdev->ibdev.iwcm == NULL) {
- ib_dealloc_device(&nesibdev->ibdev);
- return NULL;
- }
- nesibdev->ibdev.iwcm->add_ref = nes_add_ref;
- nesibdev->ibdev.iwcm->rem_ref = nes_rem_ref;
- nesibdev->ibdev.iwcm->get_qp = nes_get_qp;
- nesibdev->ibdev.iwcm->connect = nes_connect;
- nesibdev->ibdev.iwcm->accept = nes_accept;
- nesibdev->ibdev.iwcm->reject = nes_reject;
- nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
- nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
-
ib_set_device_ops(&nesibdev->ibdev, &nes_dev_ops);
- memcpy(nesibdev->ibdev.iwcm->ifname, netdev->name,
- sizeof(nesibdev->ibdev.iwcm->ifname));
+ memcpy(nesibdev->ibdev.iw_ifname, netdev->name,
+ sizeof(nesibdev->ibdev.iw_ifname));
return nesibdev;
}
@@ -3715,7 +3712,6 @@ void nes_destroy_ofa_device(struct nes_ib_device *nesibdev)
nes_unregister_ofa_device(nesibdev);
- kfree(nesibdev->ibdev.iwcm);
ib_dealloc_device(&nesibdev->ibdev);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index a7295322efbc..8d3e36d548aa 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -83,7 +83,6 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct iphdr ipv4;
const struct ib_global_route *ib_grh;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
@@ -133,9 +132,9 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
ipv4.tot_len = htons(0);
ipv4.ttl = ib_grh->hop_limit;
ipv4.protocol = nxthdr;
- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
- rdma_gid2ip(&dgid_addr._sockaddr, &ib_grh->dgid);
+ rdma_gid2ip((struct sockaddr*)&dgid_addr, &ib_grh->dgid);
ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
} else {
@@ -156,37 +155,34 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
return status;
}
-struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
- u32 flags, struct ib_udata *udata)
+int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+ struct ib_udata *udata)
{
u32 *ahid_addr;
int status;
- struct ocrdma_ah *ah;
+ struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
bool isvlan = false;
u16 vlan_tag = 0xffff;
const struct ib_gid_attr *sgid_attr;
- struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibah->pd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+ sgid_attr = attr->grh.sgid_attr;
+ status = rdma_read_gid_l2_fields(sgid_attr, &vlan_tag, NULL);
+ if (status)
+ return status;
status = ocrdma_alloc_av(dev, ah);
if (status)
goto av_err;
- sgid_attr = attr->grh.sgid_attr;
- if (is_vlan_dev(sgid_attr->ndev))
- vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
-
/* Get network header type for this GID */
ah->hdr_type = rdma_gid_attr_network_type(sgid_attr);
@@ -210,23 +206,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
OCRDMA_AH_VLAN_VALID_SHIFT);
}
- return &ah->ibah;
+ return 0;
av_conf_err:
ocrdma_free_av(dev, ah);
av_err:
- kfree(ah);
- return ERR_PTR(status);
+ return status;
}
-int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags)
+void ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
ocrdma_free_av(dev, ah);
- kfree(ah);
- return 0;
}
int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index eb996e14b520..64cb82c08664 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -51,9 +51,9 @@ enum {
OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
};
-struct ib_ah *ocrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
-int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
+int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+ struct ib_udata *udata);
+void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int ocrdma_process_mad(struct ib_device *,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 097e5ab2a19f..5127e2ea4bdd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -2496,10 +2496,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
int status;
struct rdma_ah_attr *ah_attr = &attrs->ah_attr;
const struct ib_gid_attr *sgid_attr;
- u32 vlan_id = 0xFFFF;
+ u16 vlan_id = 0xFFFF;
u8 mac_addr[6], hdr_type;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
@@ -2526,8 +2525,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
sizeof(cmd->params.dgid));
sgid_attr = ah_attr->grh.sgid_attr;
- vlan_id = rdma_vlan_dev_vlan_id(sgid_attr->ndev);
- memcpy(mac_addr, sgid_attr->ndev->dev_addr, ETH_ALEN);
+ status = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, &mac_addr[0]);
+ if (status)
+ return status;
qp->sgid_idx = grh->sgid_index;
memcpy(&cmd->params.sgid[0], &sgid_attr->gid.raw[0],
@@ -2541,8 +2541,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
hdr_type = rdma_gid_attr_network_type(sgid_attr);
if (hdr_type == RDMA_NETWORK_IPV4) {
- rdma_gid2ip(&sgid_addr._sockaddr, &sgid_attr->gid);
- rdma_gid2ip(&dgid_addr._sockaddr, &grh->dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &grh->dgid);
memcpy(&cmd->params.dgid[0],
&dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
memcpy(&cmd->params.sgid[0],
@@ -2863,21 +2863,19 @@ int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
return status;
}
-int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
+void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
{
- int status = -ENOMEM;
struct ocrdma_destroy_srq *cmd;
struct pci_dev *pdev = dev->nic_info.pdev;
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
if (!cmd)
- return status;
+ return;
cmd->id = srq->id;
- status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (srq->rq.va)
dma_free_coherent(&pdev->dev, srq->rq.len,
srq->rq.va, srq->rq.pa);
kfree(cmd);
- return status;
}
static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
@@ -3067,13 +3065,12 @@ int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
return status;
}
-int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
+void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
{
unsigned long flags;
spin_lock_irqsave(&dev->av_tbl.lock, flags);
ah->av->valid = 0;
spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
- return 0;
}
static int ocrdma_create_eqs(struct ocrdma_dev *dev)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index ebc1f442aec3..06ec59326a90 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -137,10 +137,10 @@ int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
struct ocrdma_pd *);
int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *);
-int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
+void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq);
-int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
-int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
+int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
+void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
enum ib_qp_state *old_ib_state);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index b9e10d55a58e..fc6c0962dea9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -62,8 +62,6 @@ MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("Dual BSD/GPL");
-static DEFINE_IDR(ocrdma_dev_id);
-
void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
{
u8 mac_addr[6];
@@ -161,7 +159,6 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = ocrdma_get_dma_mr,
.get_link_layer = ocrdma_link_layer,
- .get_netdev = ocrdma_get_netdev,
.get_port_immutable = ocrdma_port_immutable,
.map_mr_sg = ocrdma_map_mr_sg,
.mmap = ocrdma_mmap,
@@ -179,6 +176,8 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.reg_user_mr = ocrdma_reg_user_mr,
.req_notify_cq = ocrdma_arm_cq,
.resize_cq = ocrdma_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, ocrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext),
};
@@ -189,10 +188,14 @@ static const struct ib_device_ops ocrdma_dev_srq_ops = {
.modify_srq = ocrdma_modify_srq,
.post_srq_recv = ocrdma_post_srq_recv,
.query_srq = ocrdma_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, ocrdma_srq, ibsrq),
};
static int ocrdma_register_device(struct ocrdma_dev *dev)
{
+ int ret;
+
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
@@ -247,6 +250,10 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
}
rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_OCRDMA;
+ ret = ib_device_set_netdev(&dev->ibdev, dev->nic_info.netdev, 1);
+ if (ret)
+ return ret;
+
return ib_register_device(&dev->ibdev, "ocrdma%d");
}
@@ -304,15 +311,13 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
pr_err("Unable to allocate ib device\n");
return NULL;
}
+
dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
if (!dev->mbx_cmd)
- goto idr_err;
+ goto init_err;
memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
- dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);
- if (dev->id < 0)
- goto idr_err;
-
+ dev->id = PCI_FUNC(dev->nic_info.pdev->devfn);
status = ocrdma_init_hw(dev);
if (status)
goto init_err;
@@ -349,8 +354,6 @@ alloc_err:
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
init_err:
- idr_remove(&ocrdma_dev_id, dev->id);
-idr_err:
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
pr_err("%s() leaving. ret=%d\n", __func__, status);
@@ -360,7 +363,6 @@ idr_err:
static void ocrdma_remove_free(struct ocrdma_dev *dev)
{
- idr_remove(&ocrdma_dev_id, dev->id);
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
}
@@ -465,7 +467,6 @@ static void __exit ocrdma_exit_module(void)
{
be_roce_unregister_driver(&ocrdma_drv);
ocrdma_rem_debugfs();
- idr_destroy(&ocrdma_dev_id);
}
module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index b4e1777c2c97..35ec87015792 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -47,6 +47,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
+#include <rdma/uverbs_ioctl.h>
#include "ocrdma.h"
#include "ocrdma_hw.h"
@@ -112,24 +113,6 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
return 0;
}
-struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
-{
- struct ocrdma_dev *dev;
- struct net_device *ndev = NULL;
-
- rcu_read_lock();
-
- dev = get_ocrdma_dev(ibdev);
- if (dev)
- ndev = dev->nic_info.netdev;
- if (ndev)
- dev_hold(ndev);
-
- rcu_read_unlock();
-
- return ndev;
-}
-
static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
u8 *ib_speed, u8 *ib_width)
{
@@ -367,6 +350,16 @@ static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
return status;
}
+/*
+ * NOTE:
+ *
+ * ocrdma_ucontext must be used here because this function is also
+ * called from ocrdma_alloc_ucontext where ib_udata does not have
+ * valid ib_ucontext pointer. ib_uverbs_get_context does not call
+ * uobj_{alloc|get_xxx} helpers which are used to store the
+ * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
+ * ib_udata does NOT imply valid ib_ucontext here!
+ */
static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
struct ocrdma_ucontext *uctx,
struct ib_udata *udata)
@@ -593,7 +586,6 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
}
static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
- struct ib_ucontext *ib_ctx,
struct ib_udata *udata)
{
int status;
@@ -601,7 +593,8 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
u64 dpp_page_addr = 0;
u32 db_page_size;
struct ocrdma_alloc_pd_uresp rsp;
- struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
memset(&rsp, 0, sizeof(rsp));
rsp.id = pd->id;
@@ -639,18 +632,17 @@ dpp_map_err:
return status;
}
-int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ibdev = ibpd->device;
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct ocrdma_pd *pd;
- struct ocrdma_ucontext *uctx = NULL;
int status;
u8 is_uctx_pd = false;
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
- if (udata && context) {
- uctx = get_ocrdma_ucontext(context);
+ if (udata) {
pd = ocrdma_get_ucontext_pd(uctx);
if (pd) {
is_uctx_pd = true;
@@ -664,8 +656,8 @@ int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
goto exit;
pd_mapping:
- if (udata && context) {
- status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
+ if (udata) {
+ status = ocrdma_copy_pd_uresp(dev, pd, udata);
if (status)
goto err;
}
@@ -680,7 +672,7 @@ exit:
return status;
}
-void ocrdma_dealloc_pd(struct ib_pd *ibpd)
+void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
@@ -922,7 +914,7 @@ umem_err:
return ERR_PTR(status);
}
-int ocrdma_dereg_mr(struct ib_mr *ib_mr)
+int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
@@ -946,13 +938,17 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
}
static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
- struct ib_udata *udata,
- struct ib_ucontext *ib_ctx)
+ struct ib_udata *udata)
{
int status;
- struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
struct ocrdma_create_cq_uresp uresp;
+ /* this must be user flow! */
+ if (!udata)
+ return -EINVAL;
+
memset(&uresp, 0, sizeof(uresp));
uresp.cq_id = cq->id;
uresp.page_size = PAGE_ALIGN(cq->len);
@@ -983,13 +979,13 @@ err:
struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_ctx,
struct ib_udata *udata)
{
int entries = attr->cqe;
struct ocrdma_cq *cq;
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
- struct ocrdma_ucontext *uctx = NULL;
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
u16 pd_id = 0;
int status;
struct ocrdma_create_cq_ureq ureq;
@@ -1011,18 +1007,16 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head);
- if (ib_ctx) {
- uctx = get_ocrdma_ucontext(ib_ctx);
+ if (udata)
pd_id = uctx->cntxt_pd->id;
- }
status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
if (status) {
kfree(cq);
return ERR_PTR(status);
}
- if (ib_ctx) {
- status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
+ if (udata) {
+ status = ocrdma_copy_cq_uresp(dev, cq, udata);
if (status)
goto ctx_err;
}
@@ -1076,7 +1070,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
spin_unlock_irqrestore(&cq->cq_lock, flags);
}
-int ocrdma_destroy_cq(struct ib_cq *ibcq)
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
struct ocrdma_eq *eq = NULL;
@@ -1697,7 +1691,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
spin_unlock_irqrestore(&dev->flush_q_lock, flags);
}
-int ocrdma_destroy_qp(struct ib_qp *ibqp)
+int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct ocrdma_pd *pd;
struct ocrdma_qp *qp;
@@ -1793,45 +1787,43 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
return status;
}
-struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- int status = -ENOMEM;
- struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
- struct ocrdma_srq *srq;
+ int status;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
+ struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (init_attr->attr.max_wr > dev->attr.max_rqe)
- return ERR_PTR(-EINVAL);
-
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq)
- return ERR_PTR(status);
+ return -EINVAL;
spin_lock_init(&srq->q_lock);
srq->pd = pd;
srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
if (status)
- goto err;
+ return status;
- if (udata == NULL) {
- status = -ENOMEM;
+ if (!udata) {
srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
GFP_KERNEL);
- if (srq->rqe_wr_id_tbl == NULL)
+ if (!srq->rqe_wr_id_tbl) {
+ status = -ENOMEM;
goto arm_err;
+ }
srq->bit_fields_len = (srq->rq.max_cnt / 32) +
(srq->rq.max_cnt % 32 ? 1 : 0);
srq->idx_bit_fields =
kmalloc_array(srq->bit_fields_len, sizeof(u32),
GFP_KERNEL);
- if (srq->idx_bit_fields == NULL)
+ if (!srq->idx_bit_fields) {
+ status = -ENOMEM;
goto arm_err;
+ }
memset(srq->idx_bit_fields, 0xff,
srq->bit_fields_len * sizeof(u32));
}
@@ -1848,15 +1840,13 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
goto arm_err;
}
- return &srq->ibsrq;
+ return 0;
arm_err:
ocrdma_mbx_destroy_srq(dev, srq);
-err:
kfree(srq->rqe_wr_id_tbl);
kfree(srq->idx_bit_fields);
- kfree(srq);
- return ERR_PTR(status);
+ return status;
}
int ocrdma_modify_srq(struct ib_srq *ibsrq,
@@ -1885,15 +1875,14 @@ int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
return status;
}
-int ocrdma_destroy_srq(struct ib_srq *ibsrq)
+void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
- int status;
struct ocrdma_srq *srq;
struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
srq = get_ocrdma_srq(ibsrq);
- status = ocrdma_mbx_destroy_srq(dev, srq);
+ ocrdma_mbx_destroy_srq(dev, srq);
if (srq->pd->uctx)
ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
@@ -1901,8 +1890,6 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
kfree(srq->idx_bit_fields);
kfree(srq->rqe_wr_id_tbl);
- kfree(srq);
- return status;
}
/* unprivileged verbs and their support functions. */
@@ -2931,9 +2918,8 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
return 0;
}
-struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
int status;
struct ocrdma_mr *mr;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 4c04ab40798e..d76aae7ed0d3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -61,7 +61,6 @@ enum rdma_protocol_type
ocrdma_query_protocol(struct ib_device *device, u8 port_num);
void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
-struct net_device *ocrdma_get_netdev(struct ib_device *device, u8 port_num);
int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
@@ -69,16 +68,14 @@ void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
-int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
- struct ib_udata *udata);
-void ocrdma_dealloc_pd(struct ib_pd *pd);
+int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_ctx,
struct ib_udata *udata);
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
-int ocrdma_destroy_cq(struct ib_cq *);
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
struct ib_qp *ocrdma_create_qp(struct ib_pd *,
struct ib_qp_init_attr *attrs,
@@ -90,25 +87,24 @@ int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int ocrdma_query_qp(struct ib_qp *,
struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
-int ocrdma_destroy_qp(struct ib_qp *);
+int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
-struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
- struct ib_udata *);
+int ocrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attr,
+ struct ib_udata *udata);
int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
enum ib_srq_attr_mask, struct ib_udata *);
int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
-int ocrdma_destroy_srq(struct ib_srq *);
+void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_recv_wr);
-int ocrdma_dereg_mr(struct ib_mr *);
+int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *);
-struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg);
+struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata);
int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 996d9ecd93e0..083c2c00a8e9 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -39,7 +39,6 @@
#include <linux/iommu.h>
#include <linux/pci.h>
#include <net/addrconf.h>
-#include <linux/idr.h>
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
@@ -82,20 +81,6 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
}
-static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
-{
- struct qedr_dev *qdev;
-
- qdev = get_qedr_dev(dev);
- dev_hold(qdev->ndev);
-
- /* The HW vendor's device driver must guarantee
- * that this function returns NULL before the net device has finished
- * NETDEV_UNREGISTER state.
- */
- return qdev->ndev;
-}
-
static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
@@ -163,6 +148,14 @@ static const struct attribute_group qedr_attr_group = {
static const struct ib_device_ops qedr_iw_dev_ops = {
.get_port_immutable = qedr_iw_port_immutable,
+ .iw_accept = qedr_iw_accept,
+ .iw_add_ref = qedr_iw_qp_add_ref,
+ .iw_connect = qedr_iw_connect,
+ .iw_create_listen = qedr_iw_create_listen,
+ .iw_destroy_listen = qedr_iw_destroy_listen,
+ .iw_get_qp = qedr_iw_get_qp,
+ .iw_reject = qedr_iw_reject,
+ .iw_rem_ref = qedr_iw_qp_rem_ref,
.query_gid = qedr_iw_query_gid,
};
@@ -172,21 +165,8 @@ static int qedr_iw_register_device(struct qedr_dev *dev)
ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops);
- dev->ibdev.iwcm = kzalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
- if (!dev->ibdev.iwcm)
- return -ENOMEM;
-
- dev->ibdev.iwcm->connect = qedr_iw_connect;
- dev->ibdev.iwcm->accept = qedr_iw_accept;
- dev->ibdev.iwcm->reject = qedr_iw_reject;
- dev->ibdev.iwcm->create_listen = qedr_iw_create_listen;
- dev->ibdev.iwcm->destroy_listen = qedr_iw_destroy_listen;
- dev->ibdev.iwcm->add_ref = qedr_iw_qp_add_ref;
- dev->ibdev.iwcm->rem_ref = qedr_iw_qp_rem_ref;
- dev->ibdev.iwcm->get_qp = qedr_iw_get_qp;
-
- memcpy(dev->ibdev.iwcm->ifname,
- dev->ndev->name, sizeof(dev->ibdev.iwcm->ifname));
+ memcpy(dev->ibdev.iw_ifname,
+ dev->ndev->name, sizeof(dev->ibdev.iw_ifname));
return 0;
}
@@ -220,7 +200,6 @@ static const struct ib_device_ops qedr_dev_ops = {
.get_dev_fw_str = qedr_get_dev_fw_str,
.get_dma_mr = qedr_get_dma_mr,
.get_link_layer = qedr_link_layer,
- .get_netdev = qedr_get_netdev,
.map_mr_sg = qedr_map_mr_sg,
.mmap = qedr_mmap,
.modify_port = qedr_modify_port,
@@ -239,7 +218,10 @@ static const struct ib_device_ops qedr_dev_ops = {
.reg_user_mr = qedr_reg_user_mr,
.req_notify_cq = qedr_arm_cq,
.resize_cq = qedr_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
};
@@ -293,6 +275,10 @@ static int qedr_register_device(struct qedr_dev *dev)
ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
dev->ibdev.driver_id = RDMA_DRIVER_QEDR;
+ rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
+ if (rc)
+ return rc;
+
return ib_register_device(&dev->ibdev, "qedr%d");
}
@@ -364,8 +350,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
spin_lock_init(&dev->sgid_lock);
if (IS_IWARP(dev)) {
- spin_lock_init(&dev->qpidr.idr_lock);
- idr_init(&dev->qpidr.idr);
+ xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
}
@@ -760,8 +745,8 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
break;
case EVENT_TYPE_SRQ:
srq_id = (u16)roce_handle64;
- spin_lock_irqsave(&dev->srqidr.idr_lock, flags);
- srq = idr_find(&dev->srqidr.idr, srq_id);
+ xa_lock_irqsave(&dev->srqs, flags);
+ srq = xa_load(&dev->srqs, srq_id);
if (srq) {
ibsrq = &srq->ibsrq;
if (ibsrq->event_handler) {
@@ -775,7 +760,7 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
"SRQ event with NULL pointer ibsrq. Handle=%llx\n",
roce_handle64);
}
- spin_unlock_irqrestore(&dev->srqidr.idr_lock, flags);
+ xa_unlock_irqrestore(&dev->srqs, flags);
DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
default:
break;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 53bbe6b4e6e6..6175d1e98717 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -33,7 +33,7 @@
#define __QEDR_H__
#include <linux/pci.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <rdma/ib_addr.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_chain.h>
@@ -123,11 +123,6 @@ struct qedr_device_attr {
#define QEDR_ENET_STATE_BIT (0)
-struct qedr_idr {
- spinlock_t idr_lock; /* Protect idr data-structure */
- struct idr idr;
-};
-
struct qedr_dev {
struct ib_device ibdev;
struct qed_dev *cdev;
@@ -171,8 +166,8 @@ struct qedr_dev {
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
enum qed_rdma_type rdma_type;
- struct qedr_idr qpidr;
- struct qedr_idr srqidr;
+ struct xarray qps;
+ struct xarray srqs;
struct workqueue_struct *iwarp_wq;
u16 iwarp_max_mtu;
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 0555e5a8c9ed..22881d4442b9 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -491,7 +491,7 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int rc = 0;
int i;
- qp = idr_find(&dev->qpidr.idr, conn_param->qpn);
+ qp = xa_load(&dev->qps, conn_param->qpn);
if (unlikely(!qp))
return -EINVAL;
@@ -681,7 +681,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
- qp = idr_find(&dev->qpidr.idr, conn_param->qpn);
+ qp = xa_load(&dev->qps, conn_param->qpn);
if (!qp) {
DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
return -EINVAL;
@@ -739,9 +739,7 @@ void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
struct qedr_qp *qp = get_qedr_qp(ibqp);
if (atomic_dec_and_test(&qp->refcnt)) {
- spin_lock_irq(&qp->dev->qpidr.idr_lock);
- idr_remove(&qp->dev->qpidr.idr, qp->qp_id);
- spin_unlock_irq(&qp->dev->qpidr.idr_lock);
+ xa_erase_irq(&qp->dev->qps, qp->qp_id);
kfree(qp);
}
}
@@ -750,5 +748,5 @@ struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
- return idr_find(&dev->qpidr.idr, qpn);
+ return xa_load(&dev->qps, qpn);
}
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index e1ac2fd60bb1..f5542d703ef9 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -397,14 +397,17 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
bool has_udp = false;
int i;
- send_size = 0;
- for (i = 0; i < swr->num_sge; ++i)
- send_size += swr->sg_list[i].length;
+ rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
+ if (rc)
+ return rc;
- vlan_id = rdma_vlan_dev_vlan_id(sgid_attr->ndev);
if (vlan_id < VLAN_CFI_MASK)
has_vlan = true;
+ send_size = 0;
+ for (i = 0; i < swr->num_sge; ++i)
+ send_size += swr->sg_list[i].length;
+
has_udp = (sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
if (!has_udp) {
/* RoCE v1 */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 59ad4202422c..3d7bde19838e 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -42,6 +42,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
+#include <rdma/uverbs_ioctl.h>
#include <linux/qed/common_hsi.h>
#include "qedr_hsi_rdma.h"
@@ -436,8 +437,7 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma->vm_page_prot);
}
-int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ibdev = ibpd->device;
struct qedr_dev *dev = get_qedr_dev(ibdev);
@@ -446,7 +446,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
int rc;
DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
- (udata && context) ? "User Lib" : "Kernel");
+ udata ? "User Lib" : "Kernel");
if (!dev->rdma_ctx) {
DP_ERR(dev, "invalid RDMA context\n");
@@ -459,10 +459,12 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
pd->pd_id = pd_id;
- if (udata && context) {
+ if (udata) {
struct qedr_alloc_pd_uresp uresp = {
.pd_id = pd_id,
};
+ struct qedr_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct qedr_ucontext, ibucontext);
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc) {
@@ -471,14 +473,14 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return rc;
}
- pd->uctx = get_qedr_ucontext(context);
+ pd->uctx = context;
pd->uctx->pd = pd;
}
return 0;
}
-void qedr_dealloc_pd(struct ib_pd *ibpd)
+void qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_pd *pd = get_qedr_pd(ibpd);
@@ -773,9 +775,6 @@ static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
cq->db.data.agg_flags = flags;
cq->db.data.value = cpu_to_le32(cons);
writeq(cq->db.raw, cq->db_addr);
-
- /* Make sure write would stick */
- mmiowb();
}
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
@@ -816,9 +815,10 @@ int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_ctx, struct ib_udata *udata)
+ struct ib_udata *udata)
{
- struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
+ struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct qedr_ucontext, ibucontext);
struct qed_rdma_destroy_cq_out_params destroy_oparams;
struct qed_rdma_destroy_cq_in_params destroy_iparams;
struct qedr_dev *dev = get_qedr_dev(ibdev);
@@ -906,7 +906,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
cq->sig = QEDR_CQ_MAGIC_NUMBER;
spin_lock_init(&cq->cq_lock);
- if (ib_ctx) {
+ if (udata) {
rc = qedr_copy_cq_uresp(dev, cq, udata);
if (rc)
goto err3;
@@ -962,7 +962,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
#define QEDR_DESTROY_CQ_ITER_DURATION (10)
-int qedr_destroy_cq(struct ib_cq *ibcq)
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
struct qed_rdma_destroy_cq_out_params oparams;
@@ -986,7 +986,7 @@ int qedr_destroy_cq(struct ib_cq *ibcq)
dev->ops->common->chain_free(dev->cdev, &cq->pbl);
- if (ibcq->uobject && ibcq->uobject->context) {
+ if (udata) {
qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
}
@@ -1047,10 +1047,13 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
enum rdma_network_type nw_type;
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
u32 ipv4_addr;
+ int ret;
int i;
gid_attr = grh->sgid_attr;
- qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr->ndev);
+ ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
+ if (ret)
+ return ret;
nw_type = rdma_gid_attr_network_type(gid_attr);
switch (nw_type) {
@@ -1264,7 +1267,7 @@ static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
}
}
-static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
+static int qedr_check_srq_params(struct qedr_dev *dev,
struct ib_srq_init_attr *attrs,
struct ib_udata *udata)
{
@@ -1380,38 +1383,28 @@ err0:
return rc;
}
-static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
- void *ptr, u32 id);
-static void qedr_idr_remove(struct qedr_dev *dev,
- struct qedr_idr *qidr, u32 id);
-
-struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct qed_rdma_destroy_srq_in_params destroy_in_params;
struct qed_rdma_create_srq_in_params in_params = {};
- struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
struct qed_rdma_create_srq_out_params out_params;
- struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
struct qedr_create_srq_ureq ureq = {};
u64 pbl_base_addr, phy_prod_pair_addr;
struct qedr_srq_hwq_info *hw_srq;
u32 page_cnt, page_size;
- struct qedr_srq *srq;
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
int rc = 0;
DP_DEBUG(dev, QEDR_MSG_QP,
"create SRQ called from %s (pd %p)\n",
(udata) ? "User lib" : "kernel", pd);
- rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
+ rc = qedr_check_srq_params(dev, init_attr, udata);
if (rc)
- return ERR_PTR(-EINVAL);
-
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
srq->dev = dev;
hw_srq = &srq->hw_srq;
@@ -1467,13 +1460,13 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
goto err2;
}
- rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
+ rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
if (rc)
goto err2;
DP_DEBUG(dev, QEDR_MSG_SRQ,
"create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
- return &srq->ibsrq;
+ return 0;
err2:
destroy_in_params.srq_id = srq->srq_id;
@@ -1485,18 +1478,16 @@ err1:
else
qedr_free_srq_kernel_params(srq);
err0:
- kfree(srq);
-
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
-int qedr_destroy_srq(struct ib_srq *ibsrq)
+void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct qed_rdma_destroy_srq_in_params in_params = {};
struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
struct qedr_srq *srq = get_qedr_srq(ibsrq);
- qedr_idr_remove(dev, &dev->srqidr, srq->srq_id);
+ xa_erase_irq(&dev->srqs, srq->srq_id);
in_params.srq_id = srq->srq_id;
dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
@@ -1508,9 +1499,6 @@ int qedr_destroy_srq(struct ib_srq *ibsrq)
DP_DEBUG(dev, QEDR_MSG_SRQ,
"destroy srq: destroyed srq with srq_id=0x%0x\n",
srq->srq_id);
- kfree(srq);
-
- return 0;
}
int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@ -1596,29 +1584,6 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
}
-static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
- void *ptr, u32 id)
-{
- int rc;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&qidr->idr_lock);
-
- rc = idr_alloc(&qidr->idr, ptr, id, id + 1, GFP_ATOMIC);
-
- spin_unlock_irq(&qidr->idr_lock);
- idr_preload_end();
-
- return rc < 0 ? rc : 0;
-}
-
-static void qedr_idr_remove(struct qedr_dev *dev, struct qedr_idr *qidr, u32 id)
-{
- spin_lock_irq(&qidr->idr_lock);
- idr_remove(&qidr->idr, id);
- spin_unlock_irq(&qidr->idr_lock);
-}
-
static inline void
qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
struct qedr_qp *qp,
@@ -1988,7 +1953,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
- rc = qedr_idr_add(dev, &dev->qpidr, qp, qp->qp_id);
+ rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
if (rc)
goto err;
}
@@ -2084,8 +2049,6 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
if (rdma_protocol_roce(&dev->ibdev, 1)) {
writel(qp->rq.db_data.raw, qp->rq.db);
- /* Make sure write takes effect */
- mmiowb();
}
break;
case QED_ROCE_QP_STATE_ERR:
@@ -2498,7 +2461,8 @@ err:
return rc;
}
-static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct ib_udata *udata)
{
int rc = 0;
@@ -2508,7 +2472,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
return rc;
}
- if (qp->ibqp.uobject && qp->ibqp.uobject->context)
+ if (udata)
qedr_cleanup_user(dev, qp);
else
qedr_cleanup_kernel(dev, qp);
@@ -2516,7 +2480,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
return 0;
}
-int qedr_destroy_qp(struct ib_qp *ibqp)
+int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
struct qedr_dev *dev = qp->dev;
@@ -2560,37 +2524,31 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
if (qp->qp_type == IB_QPT_GSI)
qedr_destroy_gsi_qp(dev);
- qedr_free_qp_resources(dev, qp);
+ qedr_free_qp_resources(dev, qp, udata);
if (atomic_dec_and_test(&qp->refcnt) &&
rdma_protocol_iwarp(&dev->ibdev, 1)) {
- qedr_idr_remove(dev, &dev->qpidr, qp->qp_id);
+ xa_erase_irq(&dev->qps, qp->qp_id);
kfree(qp);
}
return rc;
}
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
- u32 flags, struct ib_udata *udata)
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+ struct ib_udata *udata)
{
- struct qedr_ah *ah;
-
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+ struct qedr_ah *ah = get_qedr_ah(ibah);
rdma_copy_ah_attr(&ah->attr, attr);
- return &ah->ibah;
+ return 0;
}
-int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
+void qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct qedr_ah *ah = get_qedr_ah(ibah);
rdma_destroy_ah_attr(&ah->attr);
- kfree(ah);
- return 0;
}
static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
@@ -2739,7 +2697,7 @@ err0:
return ERR_PTR(rc);
}
-int qedr_dereg_mr(struct ib_mr *ib_mr)
+int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct qedr_mr *mr = get_qedr_mr(ib_mr);
struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
@@ -2831,8 +2789,8 @@ err0:
return ERR_PTR(rc);
}
-struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
- enum ib_mr_type mr_type, u32 max_num_sg)
+struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct qedr_mr *mr;
@@ -3502,9 +3460,6 @@ int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
smp_wmb();
writel(qp->sq.db_data.raw, qp->sq.db);
- /* Make sure write sticks */
- mmiowb();
-
spin_unlock_irqrestore(&qp->q_lock, flags);
return rc;
@@ -3695,12 +3650,8 @@ int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
writel(qp->rq.db_data.raw, qp->rq.db);
- /* Make sure write sticks */
- mmiowb();
-
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
- mmiowb(); /* for second doorbell */
}
wr = wr->next;
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index f0c05f4771ac..9328c80375ef 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -47,16 +47,14 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
-int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
- struct ib_udata *udata);
-void qedr_dealloc_pd(struct ib_pd *pd);
+int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_ctx,
struct ib_udata *udata);
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
-int qedr_destroy_cq(struct ib_cq *);
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
struct ib_udata *);
@@ -64,22 +62,21 @@ int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
-int qedr_destroy_qp(struct ib_qp *ibqp);
+int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
-struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *attr,
- struct ib_udata *udata);
+int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *attr,
+ struct ib_udata *udata);
int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-int qedr_destroy_srq(struct ib_srq *ibsrq);
+void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_recv_wr);
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
- u32 flags, struct ib_udata *udata);
-int qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+ struct ib_udata *udata);
+void qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
-int qedr_dereg_mr(struct ib_mr *);
+int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
@@ -89,7 +86,7 @@ int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg);
+ u32 max_num_sg, struct ib_udata *udata);
int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
int qedr_post_send(struct ib_qp *, const struct ib_send_wr *,
const struct ib_send_wr **bad_wr);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 83d2349188db..432d6d0fd7f4 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -52,6 +52,7 @@
#include <linux/kref.h>
#include <linux/sched.h>
#include <linux/kthread.h>
+#include <linux/xarray.h>
#include <rdma/ib_hdrs.h>
#include <rdma/rdma_vt.h>
@@ -1105,8 +1106,7 @@ struct qib_filedata {
int rec_cpu_num; /* for cpu affinity; -1 if none */
};
-extern struct list_head qib_dev_list;
-extern spinlock_t qib_devs_lock;
+extern struct xarray qib_dev_table;
extern struct qib_devdata *qib_lookup(int unit);
extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist;
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index a4a1f56ce824..f91f23e02283 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -57,7 +57,7 @@
* QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
* fastpath code
* QIB_TRACE_REGWRITES define as 1 if you want register writes to be
- * traced in faspath code
+ * traced in fastpath code
* _QIB_TRACING define as 0 if you want to remove all tracing in a
* compilation unit
*/
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 3117cc5f2a9a..92eeea5679e2 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -49,8 +49,6 @@
*/
const char ib_qib_version[] = QIB_DRIVER_VERSION "\n";
-DEFINE_SPINLOCK(qib_devs_lock);
-LIST_HEAD(qib_dev_list);
DEFINE_MUTEX(qib_mutex); /* general driver use */
unsigned qib_ibmtu;
@@ -96,11 +94,11 @@ int qib_count_active_units(void)
{
struct qib_devdata *dd;
struct qib_pportdata *ppd;
- unsigned long flags;
+ unsigned long index, flags;
int pidx, nunits_active = 0;
- spin_lock_irqsave(&qib_devs_lock, flags);
- list_for_each_entry(dd, &qib_dev_list, list) {
+ xa_lock_irqsave(&qib_dev_table, flags);
+ xa_for_each(&qib_dev_table, index, dd) {
if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
continue;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
@@ -112,7 +110,7 @@ int qib_count_active_units(void)
}
}
}
- spin_unlock_irqrestore(&qib_devs_lock, flags);
+ xa_unlock_irqrestore(&qib_dev_table, flags);
return nunits_active;
}
@@ -125,13 +123,12 @@ int qib_count_units(int *npresentp, int *nupp)
{
int nunits = 0, npresent = 0, nup = 0;
struct qib_devdata *dd;
- unsigned long flags;
+ unsigned long index, flags;
int pidx;
struct qib_pportdata *ppd;
- spin_lock_irqsave(&qib_devs_lock, flags);
-
- list_for_each_entry(dd, &qib_dev_list, list) {
+ xa_lock_irqsave(&qib_dev_table, flags);
+ xa_for_each(&qib_dev_table, index, dd) {
nunits++;
if ((dd->flags & QIB_PRESENT) && dd->kregbase)
npresent++;
@@ -142,8 +139,7 @@ int qib_count_units(int *npresentp, int *nupp)
nup++;
}
}
-
- spin_unlock_irqrestore(&qib_devs_lock, flags);
+ xa_unlock_irqrestore(&qib_dev_table, flags);
if (npresentp)
*npresentp = npresent;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 1d940a2885c9..ceb42d948412 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -508,8 +508,8 @@ bail:
*/
static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
{
- struct qib_devdata *dd, *tmp;
- unsigned long flags;
+ struct qib_devdata *dd;
+ unsigned long index;
int ret;
static const struct tree_descr files[] = {
@@ -524,18 +524,12 @@ static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
goto bail;
}
- spin_lock_irqsave(&qib_devs_lock, flags);
-
- list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) {
- spin_unlock_irqrestore(&qib_devs_lock, flags);
+ xa_for_each(&qib_dev_table, index, dd) {
ret = add_cntr_files(sb, dd);
if (ret)
goto bail;
- spin_lock_irqsave(&qib_devs_lock, flags);
}
- spin_unlock_irqrestore(&qib_devs_lock, flags);
-
bail:
return ret;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index cdbf707fa267..531d8a1db2c3 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1884,7 +1884,6 @@ static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
writel(pa, tidp32);
qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
- mmiowb();
spin_unlock_irqrestore(tidlockp, flags);
}
@@ -1928,7 +1927,6 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
pa |= 2 << 29;
}
writel(pa, tidp32);
- mmiowb();
}
@@ -2053,9 +2051,7 @@ static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
{
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- mmiowb();
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- mmiowb();
}
static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 9fde45538f6e..ea3ddb05cbad 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2175,7 +2175,6 @@ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
pa = chippa;
}
writeq(pa, tidptr);
- mmiowb();
}
/**
@@ -2704,9 +2703,7 @@ static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
{
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- mmiowb();
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- mmiowb();
}
static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 17d6b24b3473..dd4843379f51 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -3793,7 +3793,6 @@ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
pa = chippa;
}
writeq(pa, tidptr);
- mmiowb();
}
/**
@@ -4440,10 +4439,8 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
adjust_rcv_timeout(rcd, npkts);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- mmiowb();
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- mmiowb();
}
static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
@@ -6140,7 +6137,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
static int setup_txselect(const char *str, const struct kernel_param *kp)
{
struct qib_devdata *dd;
- unsigned long val;
+ unsigned long index, val;
char *n;
if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
@@ -6156,7 +6153,7 @@ static int setup_txselect(const char *str, const struct kernel_param *kp)
}
strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
- list_for_each_entry(dd, &qib_dev_list, list)
+ xa_for_each(&qib_dev_table, index, dd)
if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
set_no_qsfp_atten(dd, 1);
return 0;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 9fd69903ca57..d4fd8a6cff7b 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -36,7 +36,6 @@
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
-#include <linux/idr.h>
#include <linux/module.h>
#include <linux/printk.h>
#ifdef CONFIG_INFINIBAND_QIB_DCA
@@ -95,7 +94,7 @@ MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disable
static void verify_interrupt(struct timer_list *);
-static struct idr qib_unit_table;
+DEFINE_XARRAY_FLAGS(qib_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
u32 qib_cpulist_count;
unsigned long *qib_cpulist;
@@ -785,21 +784,9 @@ void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
{
}
-static inline struct qib_devdata *__qib_lookup(int unit)
-{
- return idr_find(&qib_unit_table, unit);
-}
-
struct qib_devdata *qib_lookup(int unit)
{
- struct qib_devdata *dd;
- unsigned long flags;
-
- spin_lock_irqsave(&qib_devs_lock, flags);
- dd = __qib_lookup(unit);
- spin_unlock_irqrestore(&qib_devs_lock, flags);
-
- return dd;
+ return xa_load(&qib_dev_table, unit);
}
/*
@@ -1046,10 +1033,9 @@ void qib_free_devdata(struct qib_devdata *dd)
{
unsigned long flags;
- spin_lock_irqsave(&qib_devs_lock, flags);
- idr_remove(&qib_unit_table, dd->unit);
- list_del(&dd->list);
- spin_unlock_irqrestore(&qib_devs_lock, flags);
+ xa_lock_irqsave(&qib_dev_table, flags);
+ __xa_erase(&qib_dev_table, dd->unit);
+ xa_unlock_irqrestore(&qib_dev_table, flags);
#ifdef CONFIG_DEBUG_FS
qib_dbg_ibdev_exit(&dd->verbs_dev);
@@ -1070,15 +1056,15 @@ u64 qib_int_counter(struct qib_devdata *dd)
u64 qib_sps_ints(void)
{
- unsigned long flags;
+ unsigned long index, flags;
struct qib_devdata *dd;
u64 sps_ints = 0;
- spin_lock_irqsave(&qib_devs_lock, flags);
- list_for_each_entry(dd, &qib_dev_list, list) {
+ xa_lock_irqsave(&qib_dev_table, flags);
+ xa_for_each(&qib_dev_table, index, dd) {
sps_ints += qib_int_counter(dd);
}
- spin_unlock_irqrestore(&qib_devs_lock, flags);
+ xa_unlock_irqrestore(&qib_dev_table, flags);
return sps_ints;
}
@@ -1087,12 +1073,9 @@ u64 qib_sps_ints(void)
* allocator, because the verbs cleanup process both does cleanup and
* free of the data structure.
* "extra" is for chip-specific data.
- *
- * Use the idr mechanism to get a unit number for this unit.
*/
struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
{
- unsigned long flags;
struct qib_devdata *dd;
int ret, nports;
@@ -1103,20 +1086,8 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
if (!dd)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&dd->list);
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&qib_devs_lock, flags);
-
- ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
- if (ret >= 0) {
- dd->unit = ret;
- list_add(&dd->list, &qib_dev_list);
- }
-
- spin_unlock_irqrestore(&qib_devs_lock, flags);
- idr_preload_end();
-
+ ret = xa_alloc_irq(&qib_dev_table, &dd->unit, dd, xa_limit_32b,
+ GFP_KERNEL);
if (ret < 0) {
qib_early_err(&pdev->dev,
"Could not allocate unit ID: error %d\n", -ret);
@@ -1255,8 +1226,6 @@ static int __init qib_ib_init(void)
* These must be called before the driver is registered with
* the PCI subsystem.
*/
- idr_init(&qib_unit_table);
-
#ifdef CONFIG_INFINIBAND_QIB_DCA
dca_register_notify(&dca_notifier);
#endif
@@ -1281,7 +1250,6 @@ bail_dev:
#ifdef CONFIG_DEBUG_FS
qib_dbg_exit();
#endif
- idr_destroy(&qib_unit_table);
qib_dev_cleanup();
bail:
return ret;
@@ -1313,7 +1281,7 @@ static void __exit qib_ib_cleanup(void)
qib_cpulist_count = 0;
kfree(qib_cpulist);
- idr_destroy(&qib_unit_table);
+ WARN_ON(!xa_empty(&qib_dev_table));
qib_dev_cleanup();
}
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 50dd9811b088..2ac4c67f5ba1 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -933,7 +933,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
qp->s_last = s_last;
/* see post_send() */
barrier();
- rvt_put_swqe(wqe);
+ rvt_put_qp_swqe(qp, wqe);
rvt_qp_swqe_complete(qp,
wqe,
ib_qib_wc_opcode[wqe->wr.opcode],
@@ -975,7 +975,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
u32 s_last;
- rvt_put_swqe(wqe);
+ rvt_put_qp_swqe(qp, wqe);
s_last = qp->s_last;
if (++s_last >= qp->s_size)
s_last = 0;
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 12caf3db8c34..4f4a09c2dbcd 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1068,7 +1068,6 @@ static int qib_sd_setvals(struct qib_devdata *dd)
for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
writeq(data, iaddr + idx);
- mmiowb();
qib_read_kreg32(dd, kr_scratch);
dds_reg_map >>= 4;
for (midx = 0; midx < DDS_ROWS; ++midx) {
@@ -1076,7 +1075,6 @@ static int qib_sd_setvals(struct qib_devdata *dd)
data = dds_init_vals[midx].reg_vals[idx];
writeq(data, daddr);
- mmiowb();
qib_read_kreg32(dd, kr_scratch);
} /* End inner for (vals for this reg, each row) */
} /* end outer for (regs to be stored) */
@@ -1098,13 +1096,11 @@ static int qib_sd_setvals(struct qib_devdata *dd)
didx = idx + min_idx;
/* Store the next RXEQ register address */
writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
- mmiowb();
qib_read_kreg32(dd, kr_scratch);
/* Iterate through RXEQ values */
for (vidx = 0; vidx < 4; vidx++) {
data = rxeq_init_vals[idx].rdata[vidx];
writeq(data, taddr + (vidx << 6) + idx);
- mmiowb();
qib_read_kreg32(dd, kr_scratch);
}
} /* end outer for (Reg-writes for RXEQ) */
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 123ca8f64f75..f712fb7fa82f 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -114,10 +114,10 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_read(&current->mm->mmap_sem);
for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages_longterm(start_page + got * PAGE_SIZE,
- num_pages - got,
- FOLL_WRITE | FOLL_FORCE,
- p + got, NULL);
+ ret = get_user_pages(start_page + got * PAGE_SIZE,
+ num_pages - got,
+ FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
+ p + got, NULL);
if (ret < 0) {
up_read(&current->mm->mmap_sem);
goto bail_release;
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 31c523b2a9f5..0c204776263f 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -225,8 +225,6 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
if (sdma_rb_node) {
sdma_rb_node->refcount++;
} else {
- int ret;
-
sdma_rb_node = kmalloc(sizeof(
struct qib_user_sdma_rb_node), GFP_KERNEL);
if (!sdma_rb_node)
@@ -235,8 +233,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
sdma_rb_node->refcount = 1;
sdma_rb_node->pid = current->pid;
- ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root,
- sdma_rb_node);
+ qib_user_sdma_rb_insert(&qib_user_sdma_rb_root, sdma_rb_node);
}
pq->sdma_rb_node = sdma_rb_node;
@@ -673,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
else
j = npages;
- ret = get_user_pages_fast(addr, j, 0, pages);
+ ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
if (ret != j) {
i = 0;
j = ret;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index a4426c24b0d1..17bdf8acee2f 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -46,7 +46,7 @@
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_hdrs.h>
-#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
#include <rdma/rdmavt_cq.h>
struct qib_ctxtdata;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index bd4521b2cc5f..e9352750e029 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -447,8 +447,7 @@ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
return 0;
}
-int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct usnic_ib_pd *pd = to_upd(ibpd);
void *umem_pd;
@@ -461,7 +460,7 @@ int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return 0;
}
-void usnic_ib_dealloc_pd(struct ib_pd *pd)
+void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
}
@@ -539,7 +538,7 @@ out_release_mutex:
return ERR_PTR(err);
}
-int usnic_ib_destroy_qp(struct ib_qp *qp)
+int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct usnic_ib_qp_grp *qp_grp;
struct usnic_ib_vf *vf;
@@ -590,7 +589,6 @@ out_unlock:
struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
struct ib_cq *cq;
@@ -606,7 +604,7 @@ struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
return cq;
}
-int usnic_ib_destroy_cq(struct ib_cq *cq)
+int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
usnic_dbg("\n");
kfree(cq);
@@ -642,13 +640,13 @@ err_free:
return ERR_PTR(err);
}
-int usnic_ib_dereg_mr(struct ib_mr *ibmr)
+int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct usnic_ib_mr *mr = to_umr(ibmr);
usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
- usnic_uiom_reg_release(mr->umem, ibmr->uobject->context);
+ usnic_uiom_reg_release(mr->umem);
kfree(mr);
return 0;
}
@@ -731,4 +729,3 @@ int usnic_ib_mmap(struct ib_ucontext *context,
return -EINVAL;
}
-/* End of ib callbacks section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index c40e89b6246f..028f322f8e9b 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -50,24 +50,22 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid);
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey);
-int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata);
-void usnic_ib_dealloc_pd(struct ib_pd *pd);
+int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
-int usnic_ib_destroy_qp(struct ib_qp *qp);
+int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata);
-int usnic_ib_destroy_cq(struct ib_cq *cq);
+int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
-int usnic_ib_dereg_mr(struct ib_mr *ibmr);
+int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int usnic_ib_mmap(struct ib_ucontext *context,
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 06862a6af185..e312f522a66d 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -143,10 +143,11 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = 0;
while (npages) {
- ret = get_user_pages_longterm(cur_base,
- min_t(unsigned long, npages,
- PAGE_SIZE / sizeof(struct page *)),
- gup_flags, page_list, NULL);
+ ret = get_user_pages(cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE / sizeof(struct page *)),
+ gup_flags | FOLL_LONGTERM,
+ page_list, NULL);
if (ret < 0)
goto out;
@@ -432,8 +433,7 @@ static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
}
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
- struct ib_ucontext *context)
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
{
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index c88cfa087e3a..70be49b1ca05 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -90,7 +90,6 @@ void usnic_uiom_free_dev_list(struct device **devs);
struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
unsigned long addr, size_t size,
int access, int dmasync);
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
- struct ib_ucontext *ucontext);
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr);
int usnic_uiom_init(char *drv_name);
#endif /* USNIC_UIOM_H_ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 104c7db4704f..d7deb19a2800 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -49,6 +49,7 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
#include "pvrdma.h"
@@ -93,7 +94,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
* pvrdma_create_cq - create completion queue
* @ibdev: the device
* @attr: completion queue attributes
- * @context: user context
* @udata: user data
*
* @return: ib_cq completion queue pointer on success,
@@ -101,7 +101,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
*/
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
int entries = attr->cqe;
@@ -116,6 +115,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
struct pvrdma_create_cq_resp cq_resp = {0};
struct pvrdma_create_cq ucmd;
+ struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct pvrdma_ucontext, ibucontext);
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
@@ -133,7 +134,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
}
cq->ibcq.cqe = entries;
- cq->is_kernel = !context;
+ cq->is_kernel = !udata;
if (!cq->is_kernel) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
@@ -185,8 +186,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
cmd->nchunks = npages;
- cmd->ctx_handle = (context) ?
- (u64)to_vucontext(context)->ctx_handle : 0;
+ cmd->ctx_handle = context ? context->ctx_handle : 0;
cmd->cqe = entries;
cmd->pdir_dma = cq->pdir.dir_dma;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
@@ -204,13 +204,13 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (!cq->is_kernel) {
- cq->uar = &(to_vucontext(context)->uar);
+ cq->uar = &context->uar;
/* Copy udata back. */
if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back udata\n");
- pvrdma_destroy_cq(&cq->ibcq);
+ pvrdma_destroy_cq(&cq->ibcq, udata);
return ERR_PTR(-EINVAL);
}
}
@@ -245,10 +245,11 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
/**
* pvrdma_destroy_cq - destroy completion queue
* @cq: the completion queue to destroy.
+ * @udata: user data or null for kernel object
*
* @return: 0 for success.
*/
-int pvrdma_destroy_cq(struct ib_cq *cq)
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct pvrdma_cq *vcq = to_vcq(cq);
union pvrdma_cmd_req req;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 6d8b3e0de57a..40182297f87f 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -143,24 +143,6 @@ static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
- u8 port_num)
-{
- struct net_device *netdev;
- struct pvrdma_dev *dev = to_vdev(ibdev);
-
- if (port_num != 1)
- return NULL;
-
- rcu_read_lock();
- netdev = dev->netdev;
- if (netdev)
- dev_hold(netdev);
- rcu_read_unlock();
-
- return netdev;
-}
-
static const struct ib_device_ops pvrdma_dev_ops = {
.add_gid = pvrdma_add_gid,
.alloc_mr = pvrdma_alloc_mr,
@@ -179,7 +161,6 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.get_dev_fw_str = pvrdma_get_fw_ver_str,
.get_dma_mr = pvrdma_get_dma_mr,
.get_link_layer = pvrdma_port_link_layer,
- .get_netdev = pvrdma_get_netdev,
.get_port_immutable = pvrdma_port_immutable,
.map_mr_sg = pvrdma_map_mr_sg,
.mmap = pvrdma_mmap,
@@ -195,6 +176,8 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.query_qp = pvrdma_query_qp,
.reg_user_mr = pvrdma_reg_user_mr,
.req_notify_cq = pvrdma_req_notify_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
};
@@ -204,6 +187,8 @@ static const struct ib_device_ops pvrdma_dev_srq_ops = {
.destroy_srq = pvrdma_destroy_srq,
.modify_srq = pvrdma_modify_srq,
.query_srq = pvrdma_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, pvrdma_srq, ibsrq),
};
static int pvrdma_register_device(struct pvrdma_dev *dev)
@@ -277,6 +262,9 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
goto err_qp_free;
}
dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA;
+ ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
+ if (ret)
+ return ret;
spin_lock_init(&dev->srq_tbl_lock);
rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
@@ -720,6 +708,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_UNREGISTER:
+ ib_device_set_netdev(&dev->ib_dev, NULL, 1);
dev_put(dev->netdev);
dev->netdev = NULL;
break;
@@ -731,6 +720,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
if ((dev->netdev == NULL) &&
(pci_get_drvdata(pdev_net) == ndev)) {
/* this is our netdev */
+ ib_device_set_netdev(&dev->ib_dev, ndev, 1);
dev->netdev = ndev;
dev_hold(ndev);
}
@@ -1131,6 +1121,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
pvrdma_free_slots(dev);
+ dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+ dev->dsrbase);
iounmap(dev->regs);
kfree(dev->sgid_tbl);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index a85884e90e84..65dc47ffb8f3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -119,7 +119,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
- int ret;
+ int ret, npages;
if (length == 0 || length > dev->dsr->caps.max_mr_size) {
dev_warn(&dev->pdev->dev, "invalid mem region length\n");
@@ -133,9 +133,10 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(umem);
}
- if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ npages = ib_umem_num_pages(umem);
+ if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
- umem->npages);
+ npages);
ret = -EINVAL;
goto err_umem;
}
@@ -150,7 +151,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mmr.size = length;
mr->umem = umem;
- ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false);
if (ret) {
dev_warn(&dev->pdev->dev,
"could not allocate page directory\n");
@@ -167,7 +168,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
cmd->length = length;
cmd->pd_handle = to_vpd(pd)->pd_handle;
cmd->access_flags = access_flags;
- cmd->nchunks = umem->npages;
+ cmd->nchunks = npages;
cmd->pdir_dma = mr->pdir.dir_dma;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
@@ -201,7 +202,7 @@ err_umem:
* @return: ib_mr pointer on success, otherwise returns an errno.
*/
struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg)
+ u32 max_num_sg, struct ib_udata *udata)
{
struct pvrdma_dev *dev = to_vdev(pd->device);
struct pvrdma_user_mr *mr;
@@ -272,7 +273,7 @@ freemr:
*
* @return: 0 on success.
*/
-int pvrdma_dereg_mr(struct ib_mr *ibmr)
+int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct pvrdma_user_mr *mr = to_vmr(ibmr);
struct pvrdma_dev *dev = to_vdev(ibmr->device);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 08f4257169bd..0eaaead5baec 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -446,10 +446,11 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
/**
* pvrdma_destroy_qp - destroy a queue pair
* @qp: the queue pair to destroy
+ * @udata: user data or null for kernel object
*
* @return: 0 on success.
*/
-int pvrdma_destroy_qp(struct ib_qp *qp)
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct pvrdma_qp *vqp = to_vqp(qp);
union pvrdma_cmd_req req;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 951d9d68107a..6cac0c88cf39 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -94,19 +94,18 @@ int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
* @init_attr: shared receive queue attributes
* @udata: user data
*
- * @return: the ib_srq pointer on success, otherwise returns an errno.
+ * @return: 0 on success, otherwise returns an errno.
*/
-struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct pvrdma_srq *srq = NULL;
- struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_srq *srq = to_vsrq(ibsrq);
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
union pvrdma_cmd_req req;
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
- struct pvrdma_create_srq_resp srq_resp = {0};
+ struct pvrdma_create_srq_resp srq_resp = {};
struct pvrdma_create_srq ucmd;
unsigned long flags;
int ret;
@@ -115,31 +114,25 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
/* No support for kernel clients. */
dev_warn(&dev->pdev->dev,
"no shared receive queue support for kernel client\n");
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
if (init_attr->srq_type != IB_SRQT_BASIC) {
dev_warn(&dev->pdev->dev,
"shared receive queue type %d not supported\n",
init_attr->srq_type);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr ||
init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
dev_warn(&dev->pdev->dev,
"shared receive queue size invalid\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
- return ERR_PTR(-ENOMEM);
-
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq) {
- ret = -ENOMEM;
- goto err_srq;
- }
+ return -ENOMEM;
spin_lock_init(&srq->lock);
refcount_set(&srq->refcnt, 1);
@@ -181,7 +174,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
cmd->srq_type = init_attr->srq_type;
cmd->nchunks = srq->npages;
- cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->pd_handle = to_vpd(ibsrq->pd)->pd_handle;
cmd->attrs.max_wr = init_attr->attr.max_wr;
cmd->attrs.max_sge = init_attr->attr.max_sge;
cmd->attrs.srq_limit = init_attr->attr.srq_limit;
@@ -204,21 +197,20 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
/* Copy udata back. */
if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
- pvrdma_destroy_srq(&srq->ibsrq);
- return ERR_PTR(-EINVAL);
+ pvrdma_destroy_srq(&srq->ibsrq, udata);
+ return -EINVAL;
}
- return &srq->ibsrq;
+ return 0;
err_page_dir:
pvrdma_page_dir_cleanup(dev, &srq->pdir);
err_umem:
ib_umem_release(srq->umem);
err_srq:
- kfree(srq);
atomic_dec(&dev->num_srqs);
- return ERR_PTR(ret);
+ return ret;
}
static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
@@ -246,10 +238,11 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
/**
* pvrdma_destroy_srq - destroy shared receive queue
* @srq: the shared receive queue to destroy
+ * @udata: user data or null for kernel object
*
* @return: 0 for success.
*/
-int pvrdma_destroy_srq(struct ib_srq *srq)
+void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
struct pvrdma_srq *vsrq = to_vsrq(srq);
union pvrdma_cmd_req req;
@@ -268,8 +261,6 @@ int pvrdma_destroy_srq(struct ib_srq *srq)
ret);
pvrdma_free_srq(dev, vsrq);
-
- return 0;
}
/**
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 42fe821f8d58..faf7ecd7b3fa 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -50,6 +50,7 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/vmw_pvrdma-abi.h>
+#include <rdma/uverbs_ioctl.h>
#include "pvrdma.h"
@@ -70,8 +71,6 @@ int pvrdma_query_device(struct ib_device *ibdev,
if (uhw->inlen || uhw->outlen)
return -EINVAL;
- memset(props, 0, sizeof(*props));
-
props->fw_ver = dev->dsr->caps.fw_ver;
props->sys_image_guid = dev->dsr->caps.sys_image_guid;
props->max_mr_size = dev->dsr->caps.max_mr_size;
@@ -421,13 +420,11 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
/**
* pvrdma_alloc_pd - allocate protection domain
* @ibpd: PD pointer
- * @context: user context
* @udata: user data
*
* @return: the ib_pd protection domain pointer on success, otherwise errno.
*/
-int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ibdev = ibpd->device;
struct pvrdma_pd *pd = to_vpd(ibpd);
@@ -438,13 +435,15 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret;
+ struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct pvrdma_ucontext, ibucontext);
/* Check allowed max pds */
if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
return -ENOMEM;
cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
- cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0;
+ cmd->ctx_handle = context ? context->ctx_handle : 0;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
if (ret < 0) {
dev_warn(&dev->pdev->dev,
@@ -453,16 +452,16 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
goto err;
}
- pd->privileged = !context;
+ pd->privileged = !udata;
pd->pd_handle = resp->pd_handle;
pd->pdn = resp->pd_handle;
pd_resp.pdn = resp->pd_handle;
- if (context) {
+ if (udata) {
if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n");
- pvrdma_dealloc_pd(&pd->ibpd);
+ pvrdma_dealloc_pd(&pd->ibpd, udata);
return -EFAULT;
}
}
@@ -478,10 +477,11 @@ err:
/**
* pvrdma_dealloc_pd - deallocate protection domain
* @pd: the protection domain to be released
+ * @udata: user data or null for kernel object
*
* @return: 0 on success, otherwise errno.
*/
-void pvrdma_dealloc_pd(struct ib_pd *pd)
+void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct pvrdma_dev *dev = to_vdev(pd->device);
union pvrdma_cmd_req req = {};
@@ -507,34 +507,28 @@ void pvrdma_dealloc_pd(struct ib_pd *pd)
* @udata: user data blob
* @flags: create address handle flags (see enum rdma_create_ah_flags)
*
- * @return: the ib_ah pointer on success, otherwise errno.
+ * @return: 0 on success, otherwise errno.
*/
-struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
+ u32 flags, struct ib_udata *udata)
{
- struct pvrdma_dev *dev = to_vdev(pd->device);
- struct pvrdma_ah *ah;
+ struct pvrdma_dev *dev = to_vdev(ibah->device);
+ struct pvrdma_ah *ah = to_vah(ibah);
const struct ib_global_route *grh;
u8 port_num = rdma_ah_get_port_num(ah_attr);
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
grh = rdma_ah_read_grh(ah_attr);
if ((ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
- return ERR_PTR(-ENOMEM);
-
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah) {
- atomic_dec(&dev->num_ahs);
- return ERR_PTR(-ENOMEM);
- }
+ return -ENOMEM;
- ah->av.port_pd = to_vpd(pd)->pd_handle | (port_num << 24);
+ ah->av.port_pd = to_vpd(ibah->pd)->pd_handle | (port_num << 24);
ah->av.src_path_bits = rdma_ah_get_path_bits(ah_attr);
ah->av.src_path_bits |= 0x80;
ah->av.gid_index = grh->sgid_index;
@@ -544,11 +538,7 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
memcpy(ah->av.dgid, grh->dgid.raw, 16);
memcpy(ah->av.dmac, ah_attr->roce.dmac, ETH_ALEN);
- ah->ibah.device = pd->device;
- ah->ibah.pd = pd;
- ah->ibah.uobject = NULL;
-
- return &ah->ibah;
+ return 0;
}
/**
@@ -556,14 +546,10 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
* @ah: the address handle to destroyed
* @flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
*
- * @return: 0 on success.
*/
-int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags)
+void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags)
{
struct pvrdma_dev *dev = to_vdev(ah->device);
- kfree(to_vah(ah));
atomic_dec(&dev->num_ahs);
-
- return 0;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 607aa131d67c..9d7b021e1c59 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -398,36 +398,33 @@ int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void pvrdma_dealloc_ucontext(struct ib_ucontext *context);
-int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata);
-void pvrdma_dealloc_pd(struct ib_pd *ibpd);
+int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
-int pvrdma_dereg_mr(struct ib_mr *mr);
+int pvrdma_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg);
+ u32 max_num_sg, struct ib_udata *udata);
int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata);
-int pvrdma_destroy_cq(struct ib_cq *cq);
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
-struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
-int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
+int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+ struct ib_udata *udata);
+void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
-struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata);
+int pvrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-int pvrdma_destroy_srq(struct ib_srq *srq);
+void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
@@ -436,7 +433,7 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
-int pvrdma_destroy_qp(struct ib_qp *qp);
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index fc10e4e26ca7..0e147b32cbe9 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -89,36 +89,29 @@ EXPORT_SYMBOL(rvt_check_ah);
/**
* rvt_create_ah - create an address handle
- * @pd: the protection domain
+ * @ibah: the IB address handle
* @ah_attr: the attributes of the AH
* @create_flags: create address handle flags (see enum rdma_create_ah_flags)
* @udata: pointer to user's input output buffer information.
*
* This may be called from interrupt context.
*
- * Return: newly allocated ah
+ * Return: 0 on success
*/
-struct ib_ah *rvt_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- u32 create_flags,
- struct ib_udata *udata)
+int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
+ u32 create_flags, struct ib_udata *udata)
{
- struct rvt_ah *ah;
- struct rvt_dev_info *dev = ib_to_rvt(pd->device);
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+ struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
unsigned long flags;
- if (rvt_check_ah(pd->device, ah_attr))
- return ERR_PTR(-EINVAL);
-
- ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+ if (rvt_check_ah(ibah->device, ah_attr))
+ return -EINVAL;
spin_lock_irqsave(&dev->n_ahs_lock, flags);
if (dev->n_ahs_allocated == dev->dparms.props.max_ah) {
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- kfree(ah);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
dev->n_ahs_allocated++;
@@ -129,35 +122,32 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
atomic_set(&ah->refcount, 0);
if (dev->driver_f.notify_new_ah)
- dev->driver_f.notify_new_ah(pd->device, ah_attr, ah);
+ dev->driver_f.notify_new_ah(ibah->device, ah_attr, ah);
- return &ah->ibah;
+ return 0;
}
/**
* rvt_destory_ah - Destory an address handle
* @ibah: address handle
* @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
+ * @udata: user data or NULL for kernel object
*
* Return: 0 on success
*/
-int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags)
+void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags)
{
struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
struct rvt_ah *ah = ibah_to_rvtah(ibah);
unsigned long flags;
- if (atomic_read(&ah->refcount) != 0)
- return -EBUSY;
+ WARN_ON_ONCE(atomic_read(&ah->refcount));
spin_lock_irqsave(&dev->n_ahs_lock, flags);
dev->n_ahs_allocated--;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
rdma_destroy_ah_attr(&ah->attr);
- kfree(ah);
-
- return 0;
}
/**
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index 72431a618d5d..bbb4d3bdec4e 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -50,11 +50,9 @@
#include <rdma/rdma_vt.h>
-struct ib_ah *rvt_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- u32 create_flags,
- struct ib_udata *udata);
-int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
+int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
+ u32 create_flags, struct ib_udata *udata);
+void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 4f1544ad4aff..a06e6da7a026 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -168,7 +168,6 @@ static void send_complete(struct work_struct *work)
* rvt_create_cq - create a completion queue
* @ibdev: the device this completion queue is attached to
* @attr: creation attributes
- * @context: unused by the QLogic_IB driver
* @udata: user data for libibverbs.so
*
* Called by ib_create_cq() in the generic verbs code.
@@ -178,7 +177,6 @@ static void send_complete(struct work_struct *work)
*/
struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
@@ -232,7 +230,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
- cq->ip = rvt_create_mmap_info(rdi, sz, context, wc);
+ cq->ip = rvt_create_mmap_info(rdi, sz, udata, wc);
if (!cq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
@@ -299,12 +297,13 @@ done:
/**
* rvt_destroy_cq - destroy a completion queue
* @ibcq: the completion queue to destroy.
+ * @udata: user data or NULL for kernel object
*
* Called by ib_destroy_cq() in the generic verbs code.
*
* Return: always 0
*/
-int rvt_destroy_cq(struct ib_cq *ibcq)
+int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
struct rvt_dev_info *rdi = cq->rdi;
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
index 72184b1c176b..3ad6faf18ecb 100644
--- a/drivers/infiniband/sw/rdmavt/cq.h
+++ b/drivers/infiniband/sw/rdmavt/cq.h
@@ -53,9 +53,8 @@
struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata);
-int rvt_destroy_cq(struct ib_cq *ibcq);
+int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index 6b712eecbd37..652f4a7efc1b 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -49,6 +49,7 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
+#include <rdma/uverbs_ioctl.h>
#include "mmap.h"
/**
@@ -150,18 +151,19 @@ done:
* rvt_create_mmap_info - allocate information for hfi1_mmap
* @rdi: rvt dev struct
* @size: size in bytes to map
- * @context: user context
+ * @udata: user data (must be valid!)
* @obj: opaque pointer to a cq, wq etc
*
* Return: rvt_mmap struct on success
*/
-struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
- u32 size,
- struct ib_ucontext *context,
- void *obj)
+struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
+ struct ib_udata *udata, void *obj)
{
struct rvt_mmap_info *ip;
+ if (!udata)
+ return ERR_PTR(-EINVAL);
+
ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
if (!ip)
return ip;
@@ -177,7 +179,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
INIT_LIST_HEAD(&ip->pending_mmaps);
ip->size = size;
- ip->context = context;
+ ip->context =
+ container_of(udata, struct uverbs_attr_bundle, driver_udata)
+ ->context;
ip->obj = obj;
kref_init(&ip->ref);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.h b/drivers/infiniband/sw/rdmavt/mmap.h
index fab0e7b1daf9..02466c40bc1e 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.h
+++ b/drivers/infiniband/sw/rdmavt/mmap.h
@@ -53,10 +53,8 @@
void rvt_mmap_init(struct rvt_dev_info *rdi);
void rvt_release_mmap_info(struct kref *ref);
int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
- u32 size,
- struct ib_ucontext *context,
- void *obj);
+struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
+ struct ib_udata *udata, void *obj);
void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
u32 size, void *obj);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 728795043496..54f3f9c27552 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -392,7 +392,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(umem))
return (void *)umem;
- n = umem->nmap;
+ n = ib_umem_num_pages(umem);
mr = __rvt_alloc_mr(n, pd);
if (IS_ERR(mr)) {
@@ -548,7 +548,7 @@ bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
*
* Returns 0 on success.
*/
-int rvt_dereg_mr(struct ib_mr *ibmr)
+int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rvt_mr *mr = to_imr(ibmr);
int ret;
@@ -575,9 +575,8 @@ out:
*
* Return: the memory region on success, otherwise return an errno.
*/
-struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct rvt_mr *mr;
@@ -608,11 +607,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(mapped_segs == mr->mr.max_segs))
return -ENOMEM;
- if (mr->mr.length == 0) {
- mr->mr.user_base = addr;
- mr->mr.iova = addr;
- }
-
m = mapped_segs / RVT_SEGSZ;
n = mapped_segs % RVT_SEGSZ;
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -630,17 +624,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
* @sg_nents: number of entries in sg
* @sg_offset: offset in bytes into sg
*
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
* Return: number of sg elements mapped to the memory region
*/
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rvt_mr *mr = to_imr(ibmr);
+ int ret;
mr->mr.length = 0;
mr->mr.page_shift = PAGE_SHIFT;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
- rvt_set_page);
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+ mr->mr.user_base = ibmr->iova;
+ mr->mr.iova = ibmr->iova;
+ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+ mr->mr.length = (size_t)ibmr->length;
+ return ret;
}
/**
@@ -671,6 +672,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
ibmr->rkey = key;
mr->mr.lkey = key;
mr->mr.access_flags = access;
+ mr->mr.iova = ibmr->iova;
atomic_set(&mr->mr.lkey_invalid, 0);
return 0;
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
index 132800ee0205..2c8d0752e8e3 100644
--- a/drivers/infiniband/sw/rdmavt/mr.h
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -78,10 +78,9 @@ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
-int rvt_dereg_mr(struct ib_mr *ibmr);
-struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg);
+int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata);
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c
index 6033054b22fa..a403718f0b5e 100644
--- a/drivers/infiniband/sw/rdmavt/pd.c
+++ b/drivers/infiniband/sw/rdmavt/pd.c
@@ -51,15 +51,13 @@
/**
* rvt_alloc_pd - allocate a protection domain
* @ibpd: PD
- * @context: optional user context
* @udata: optional user data
*
* Allocate and keep track of a PD.
*
* Return: 0 on success
*/
-int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ibdev = ibpd->device;
struct rvt_dev_info *dev = ib_to_rvt(ibdev);
@@ -93,10 +91,11 @@ bail:
/**
* rvt_dealloc_pd - Free PD
* @ibpd: Free up PD
+ * @udata: Valid user data or NULL for kernel object
*
* Return: always 0
*/
-void rvt_dealloc_pd(struct ib_pd *ibpd)
+void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h
index 7a887e4a45e7..71ba76d72b1d 100644
--- a/drivers/infiniband/sw/rdmavt/pd.h
+++ b/drivers/infiniband/sw/rdmavt/pd.h
@@ -50,8 +50,7 @@
#include <rdma/rdma_vt.h>
-int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata);
-void rvt_dealloc_pd(struct ib_pd *ibpd);
+int rvt_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
#endif /* DEF_RDMAVTPD_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index a34b9a2a32b6..31a2e65e4906 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -623,13 +623,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
while (qp->s_last != qp->s_head) {
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- rvt_put_swqe(wqe);
-
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(
- wqe->ud_wr.ah)->refcount);
+ rvt_put_qp_swqe(qp, wqe);
if (++qp->s_last >= qp->s_size)
qp->s_last = 0;
smp_wmb(); /* see qp_set_savail */
@@ -957,8 +951,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
size_t sg_list_sz;
struct ib_qp *ret = ERR_PTR(-ENOMEM);
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
- struct rvt_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct rvt_ucontext, ibucontext);
void *priv = NULL;
size_t sqsize;
@@ -1131,8 +1123,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
} else {
u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
- qp->ip = rvt_create_mmap_info(rdi, s,
- &ucontext->ibucontext,
+ qp->ip = rvt_create_mmap_info(rdi, s, udata,
qp->r_rq.wq);
if (!qp->ip) {
ret = ERR_PTR(-ENOMEM);
@@ -1617,7 +1608,7 @@ inval:
*
* Return: 0 on success.
*/
-int rvt_destroy_qp(struct ib_qp *ibqp)
+int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
@@ -2018,8 +2009,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
* opportunity to adjust PSN values based on internal checks.
*/
log_pmtu = qp->log_pmtu;
- if (qp->ibqp.qp_type != IB_QPT_UC &&
- qp->ibqp.qp_type != IB_QPT_RC) {
+ if (qp->allowed_ops == IB_OPCODE_UD) {
struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
log_pmtu = ah->log_pmtu;
@@ -2067,8 +2057,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
return 0;
bail_inval_free_ref:
- if (qp->ibqp.qp_type != IB_QPT_UC &&
- qp->ibqp.qp_type != IB_QPT_RC)
+ if (qp->allowed_ops == IB_OPCODE_UD)
atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
bail_inval_free:
/* release mr holds */
@@ -2691,11 +2680,7 @@ void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->s_last = last;
/* See post_send() */
barrier();
- rvt_put_swqe(wqe);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
+ rvt_put_qp_swqe(qp, wqe);
rvt_qp_swqe_complete(qp,
wqe,
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
index 6d883972e0b8..6db1619389b0 100644
--- a/drivers/infiniband/sw/rdmavt/qp.h
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -48,7 +48,7 @@
*
*/
-#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
int rvt_driver_qp_init(struct rvt_dev_info *rdi);
void rvt_qp_exit(struct rvt_dev_info *rdi);
@@ -57,7 +57,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct ib_udata *udata);
int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
-int rvt_destroy_qp(struct ib_qp *ibqp);
+int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr);
int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
index 8d71647820a8..09f0cf538be6 100644
--- a/drivers/infiniband/sw/rdmavt/rc.c
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -45,7 +45,7 @@
*
*/
-#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
#include <rdma/ib_hdrs.h>
/*
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 895b3fabd0bf..8d6b3e764255 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -71,31 +71,24 @@ void rvt_driver_srq_init(struct rvt_dev_info *rdi)
* @srq_init_attr: the attributes of the SRQ
* @udata: data from libibverbs when creating a user SRQ
*
- * Return: Allocated srq object
+ * Return: 0 on success
*/
-struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
+int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
{
- struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
- struct rvt_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct rvt_ucontext, ibucontext);
- struct rvt_srq *srq;
+ struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
u32 sz;
- struct ib_srq *ret;
+ int ret;
if (srq_init_attr->srq_type != IB_SRQT_BASIC)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (srq_init_attr->attr.max_sge == 0 ||
srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
srq_init_attr->attr.max_wr == 0 ||
srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
- return ERR_PTR(-EINVAL);
-
- srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node);
- if (!srq)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
/*
* Need to use vmalloc() if we want to support large #s of entries.
@@ -109,7 +102,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
dev->dparms.node);
if (!srq->rq.wq) {
- ret = ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
goto bail_srq;
}
@@ -118,23 +111,18 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
* See rvt_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
- int err;
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
- srq->ip =
- rvt_create_mmap_info(dev, s, &ucontext->ibucontext,
- srq->rq.wq);
+ srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
if (!srq->ip) {
- ret = ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
goto bail_wq;
}
- err = ib_copy_to_udata(udata, &srq->ip->offset,
+ ret = ib_copy_to_udata(udata, &srq->ip->offset,
sizeof(srq->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
+ if (ret)
goto bail_ip;
- }
}
/*
@@ -146,7 +134,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
spin_lock(&dev->n_srqs_lock);
if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
spin_unlock(&dev->n_srqs_lock);
- ret = ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
goto bail_ip;
}
@@ -159,14 +147,13 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
spin_unlock_irq(&dev->pending_lock);
}
- return &srq->ibsrq;
+ return 0;
bail_ip:
kfree(srq->ip);
bail_wq:
vfree(srq->rq.wq);
bail_srq:
- kfree(srq);
return ret;
}
@@ -338,9 +325,8 @@ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
* rvt_destroy_srq - destory an srq
* @ibsrq: srq object to destroy
*
- * Return always 0
*/
-int rvt_destroy_srq(struct ib_srq *ibsrq)
+void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
@@ -352,7 +338,4 @@ int rvt_destroy_srq(struct ib_srq *ibsrq)
kref_put(&srq->ip->ref, rvt_release_mmap_info);
else
vfree(srq->rq.wq);
- kfree(srq);
-
- return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/srq.h b/drivers/infiniband/sw/rdmavt/srq.h
index bf0eaaf56465..6427d7d62a9a 100644
--- a/drivers/infiniband/sw/rdmavt/srq.h
+++ b/drivers/infiniband/sw/rdmavt/srq.h
@@ -50,13 +50,12 @@
#include <rdma/rdma_vt.h>
void rvt_driver_srq_init(struct rvt_dev_info *rdi);
-struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
+int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask,
struct ib_udata *udata);
int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-int rvt_destroy_srq(struct ib_srq *ibsrq);
+void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
#endif /* DEF_RVTSRQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
index efc9d814b032..c32d21cc615e 100644
--- a/drivers/infiniband/sw/rdmavt/trace_qp.h
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -51,7 +51,7 @@
#include <linux/trace_seq.h>
#include <rdma/ib_verbs.h>
-#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rvt_qp
diff --git a/drivers/infiniband/sw/rdmavt/trace_rc.h b/drivers/infiniband/sw/rdmavt/trace_rc.h
index 995276933a55..c47357af2099 100644
--- a/drivers/infiniband/sw/rdmavt/trace_rc.h
+++ b/drivers/infiniband/sw/rdmavt/trace_rc.h
@@ -51,7 +51,7 @@
#include <linux/trace_seq.h>
#include <rdma/ib_verbs.h>
-#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rvt_rc
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
index d5df352eadb1..d963ca755828 100644
--- a/drivers/infiniband/sw/rdmavt/trace_tx.h
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -51,7 +51,7 @@
#include <linux/trace_seq.h>
#include <rdma/ib_verbs.h>
-#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rvt_tx
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 42c9d35f832d..9546a837a8ac 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -425,7 +425,10 @@ static const struct ib_device_ops rvt_dev_ops = {
.req_notify_cq = rvt_req_notify_cq,
.resize_cq = rvt_resize_cq,
.unmap_fmr = rvt_unmap_fmr,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 2e2dff478833..ecf6e659c0da 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -80,7 +80,6 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
SHASH_DESC_ON_STACK(shash, rxe->tfm);
shash->tfm = rxe->tfm;
- shash->flags = 0;
*(u32 *)shash_desc_ctx(shash) = crc;
err = crypto_shash_update(shash, next, len);
if (unlikely(err)) {
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index a57276f2cb84..ad3090131126 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -82,7 +82,7 @@ static void rxe_send_complete(unsigned long data)
}
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
- int comp_vector, struct ib_ucontext *context,
+ int comp_vector, struct ib_udata *udata,
struct rxe_create_cq_resp __user *uresp)
{
int err;
@@ -94,7 +94,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
return -ENOMEM;
}
- err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context,
+ err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
if (err) {
vfree(cq->queue->buf);
@@ -115,13 +115,13 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
}
int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
- struct rxe_resize_cq_resp __user *uresp)
+ struct rxe_resize_cq_resp __user *uresp,
+ struct ib_udata *udata)
{
int err;
err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
- sizeof(struct rxe_cqe),
- cq->queue->ip ? cq->queue->ip->context : NULL,
+ sizeof(struct rxe_cqe), udata,
uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
if (!err)
cq->ibcq.cqe = cqe;
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index 6cb18406f5b8..ce003666b800 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -643,7 +643,7 @@ struct rxe_atmeth {
__be32 rkey;
__be64 swap_add;
__be64 comp;
-} __attribute__((__packed__));
+} __packed;
static inline u64 __atmeth_va(void *arg)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 3d8cef836f0d..775c23becaec 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -53,11 +53,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
int cqe, int comp_vector);
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
- int comp_vector, struct ib_ucontext *context,
+ int comp_vector, struct ib_udata *udata,
struct rxe_create_cq_resp __user *uresp);
int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
- struct rxe_resize_cq_resp __user *uresp);
+ struct rxe_resize_cq_resp __user *uresp,
+ struct ib_udata *udata);
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
@@ -91,10 +92,8 @@ struct rxe_mmap_info {
void rxe_mmap_release(struct kref *ref);
-struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
- u32 size,
- struct ib_ucontext *context,
- void *obj);
+struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
+ struct ib_udata *udata, void *obj);
int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
@@ -224,13 +223,12 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
- struct ib_srq_init_attr *init,
- struct ib_ucontext *context,
+ struct ib_srq_init_attr *init, struct ib_udata *udata,
struct rxe_create_srq_resp __user *uresp);
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
- struct rxe_modify_srq_cmd *ucmd);
+ struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
void rxe_dealloc(struct ib_device *ib_dev);
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index d22431e3a908..48f48122ddcb 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -36,6 +36,7 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <asm/pgtable.h>
+#include <rdma/uverbs_ioctl.h>
#include "rxe.h"
#include "rxe_loc.h"
@@ -140,13 +141,14 @@ done:
/*
* Allocate information for rxe_mmap
*/
-struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
- u32 size,
- struct ib_ucontext *context,
- void *obj)
+struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
+ struct ib_udata *udata, void *obj)
{
struct rxe_mmap_info *ip;
+ if (!udata)
+ return ERR_PTR(-EINVAL);
+
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip)
return NULL;
@@ -165,7 +167,9 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
INIT_LIST_HEAD(&ip->pending_mmaps);
ip->info.size = size;
- ip->context = context;
+ ip->context =
+ container_of(udata, struct uverbs_attr_bundle, driver_udata)
+ ->context;
ip->obj = obj;
kref_init(&ip->ref);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 42f0f25e396c..f501f72489d8 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -179,7 +179,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
}
mem->umem = umem;
- num_buf = umem->nmap;
+ num_buf = ib_umem_num_pages(umem);
rxe_mem_init(access, mem);
@@ -199,6 +199,12 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
buf = map[0]->buf;
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ if (num_buf >= RXE_BUF_PER_MAP) {
+ map++;
+ buf = map[0]->buf;
+ num_buf = 0;
+ }
+
vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
pr_warn("null vaddr\n");
@@ -211,11 +217,6 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
num_buf++;
buf++;
- if (num_buf >= RXE_BUF_PER_MAP) {
- map++;
- buf = map[0]->buf;
- num_buf = 0;
- }
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 753cabcd441c..5a3474f9351b 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -338,13 +338,13 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
}
-static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb,
- struct rxe_av *av)
+static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
bool xnet = false;
__be16 df = htons(IP_DF);
+ struct rxe_av *av = rxe_get_av(pkt);
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
@@ -364,11 +364,11 @@ static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb,
return 0;
}
-static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb,
- struct rxe_av *av)
+static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
+ struct rxe_av *av = rxe_get_av(pkt);
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
@@ -392,16 +392,15 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb,
int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
{
int err = 0;
- struct rxe_av *av = rxe_get_av(pkt);
- if (av->network_type == RDMA_NETWORK_IPV4)
- err = prepare4(pkt, skb, av);
- else if (av->network_type == RDMA_NETWORK_IPV6)
- err = prepare6(pkt, skb, av);
+ if (skb->protocol == htons(ETH_P_IP))
+ err = prepare4(pkt, skb);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ err = prepare6(pkt, skb);
*crc = rxe_icrc_hdr(pkt, skb);
- if (ether_addr_equal(skb->dev->dev_addr, av->dmac))
+ if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
pkt->mask |= RXE_LOOPBACK_MASK;
return err;
@@ -422,23 +421,20 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
- struct rxe_av *av;
int err;
- av = rxe_get_av(pkt);
-
skb->destructor = rxe_skb_tx_dtor;
skb->sk = pkt->qp->sk->sk;
rxe_add_ref(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
- if (av->network_type == RDMA_NETWORK_IPV4) {
+ if (skb->protocol == htons(ETH_P_IP)) {
err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
- } else if (av->network_type == RDMA_NETWORK_IPV6) {
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else {
- pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
+ pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
atomic_dec(&pkt->qp->skb_out);
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
@@ -462,7 +458,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt)
{
unsigned int hdr_len;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct net_device *ndev;
const struct ib_gid_attr *attr;
const int port_num = 1;
@@ -470,7 +466,6 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
if (IS_ERR(attr))
return NULL;
- ndev = attr->ndev;
if (av->network_type == RDMA_NETWORK_IPV4)
hdr_len = ETH_HLEN + sizeof(struct udphdr) +
@@ -479,15 +474,26 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
hdr_len = ETH_HLEN + sizeof(struct udphdr) +
sizeof(struct ipv6hdr);
+ rcu_read_lock();
+ ndev = rdma_read_gid_attr_ndev_rcu(attr);
+ if (IS_ERR(ndev)) {
+ rcu_read_unlock();
+ goto out;
+ }
skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
GFP_ATOMIC);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ rcu_read_unlock();
goto out;
+ }
- skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
+ skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
+ /* FIXME: hold reference to this netdev until life of this skb. */
skb->dev = ndev;
+ rcu_read_unlock();
+
if (av->network_type == RDMA_NETWORK_IPV4)
skb->protocol = htons(ETH_P_IP);
else
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 120fa9005954..56cf18af016a 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -52,12 +52,12 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_AH] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
- .flags = RXE_POOL_ATOMIC,
+ .flags = RXE_POOL_ATOMIC | RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
.size = sizeof(struct rxe_srq),
- .flags = RXE_POOL_INDEX,
+ .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
},
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 09ede70dc1e8..e2c6d1cedf41 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -217,8 +217,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
}
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
- struct ib_qp_init_attr *init,
- struct ib_ucontext *context,
+ struct ib_qp_init_attr *init, struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
@@ -254,7 +253,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
if (!qp->sq.queue)
return -ENOMEM;
- err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context,
+ err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
qp->sq.queue->buf, qp->sq.queue->buf_size,
&qp->sq.queue->ip);
@@ -287,7 +286,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_init_attr *init,
- struct ib_ucontext *context,
+ struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
@@ -308,7 +307,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
if (!qp->rq.queue)
return -ENOMEM;
- err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context,
+ err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
qp->rq.queue->buf, qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
@@ -344,8 +343,6 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct rxe_cq *rcq = to_rcq(init->recv_cq);
struct rxe_cq *scq = to_rcq(init->send_cq);
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
- struct rxe_ucontext *ucontext =
- rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
rxe_add_ref(pd);
rxe_add_ref(rcq);
@@ -360,11 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
rxe_qp_init_misc(rxe, qp, init);
- err = rxe_qp_init_req(rxe, qp, init, &ucontext->ibuc, uresp);
+ err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
if (err)
goto err1;
- err = rxe_qp_init_resp(rxe, qp, init, &ucontext->ibuc, uresp);
+ err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
if (err)
goto err2;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index f84ab4469261..ff92704de32f 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -36,18 +36,15 @@
#include "rxe_loc.h"
#include "rxe_queue.h"
-int do_mmap_info(struct rxe_dev *rxe,
- struct mminfo __user *outbuf,
- struct ib_ucontext *context,
- struct rxe_queue_buf *buf,
- size_t buf_size,
- struct rxe_mmap_info **ip_p)
+int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
+ struct ib_udata *udata, struct rxe_queue_buf *buf,
+ size_t buf_size, struct rxe_mmap_info **ip_p)
{
int err;
struct rxe_mmap_info *ip = NULL;
if (outbuf) {
- ip = rxe_create_mmap_info(rxe, buf_size, context, buf);
+ ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
if (!ip)
goto err1;
@@ -153,12 +150,9 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
return 0;
}
-int rxe_queue_resize(struct rxe_queue *q,
- unsigned int *num_elem_p,
- unsigned int elem_size,
- struct ib_ucontext *context,
- struct mminfo __user *outbuf,
- spinlock_t *producer_lock,
+int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
+ unsigned int elem_size, struct ib_udata *udata,
+ struct mminfo __user *outbuf, spinlock_t *producer_lock,
spinlock_t *consumer_lock)
{
struct rxe_queue *new_q;
@@ -170,7 +164,7 @@ int rxe_queue_resize(struct rxe_queue *q,
if (!new_q)
return -ENOMEM;
- err = do_mmap_info(new_q->rxe, outbuf, context, new_q->buf,
+ err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
new_q->buf_size, &new_q->ip);
if (err) {
vfree(new_q->buf);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 79ba4b320054..acd0a925481c 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -76,12 +76,9 @@ struct rxe_queue {
unsigned int index_mask;
};
-int do_mmap_info(struct rxe_dev *rxe,
- struct mminfo __user *outbuf,
- struct ib_ucontext *context,
- struct rxe_queue_buf *buf,
- size_t buf_size,
- struct rxe_mmap_info **ip_p);
+int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
+ struct ib_udata *udata, struct rxe_queue_buf *buf,
+ size_t buf_size, struct rxe_mmap_info **ip_p);
void rxe_queue_reset(struct rxe_queue *q);
@@ -89,10 +86,8 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
int *num_elem,
unsigned int elem_size);
-int rxe_queue_resize(struct rxe_queue *q,
- unsigned int *num_elem_p,
- unsigned int elem_size,
- struct ib_ucontext *context,
+int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
+ unsigned int elem_size, struct ib_udata *udata,
struct mminfo __user *outbuf,
/* Protect producers while resizing queue */
spinlock_t *producer_lock,
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index c41a5fee81f7..d8459431534e 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -99,8 +99,7 @@ err1:
}
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
- struct ib_srq_init_attr *init,
- struct ib_ucontext *context,
+ struct ib_srq_init_attr *init, struct ib_udata *udata,
struct rxe_create_srq_resp __user *uresp)
{
int err;
@@ -128,7 +127,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->rq.queue = q;
- err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
+ err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
q->buf_size, &q->ip);
if (err) {
vfree(q->buf);
@@ -149,7 +148,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
- struct rxe_modify_srq_cmd *ucmd)
+ struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
{
int err;
struct rxe_queue *q = srq->rq.queue;
@@ -163,11 +162,8 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
mi = u64_to_user_ptr(ucmd->mmap_info_addr);
err = rxe_queue_resize(q, &attr->max_wr,
- rcv_wqe_size(srq->rq.max_sge),
- srq->rq.queue->ip ?
- srq->rq.queue->ip->context :
- NULL,
- mi, &srq->rq.producer_lock,
+ rcv_wqe_size(srq->rq.max_sge), udata, mi,
+ &srq->rq.producer_lock,
&srq->rq.consumer_lock);
if (err)
goto err2;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 6ecf28570ff0..8c3e2a18cfe4 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -176,8 +176,7 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
return 0;
}
-static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
@@ -185,37 +184,31 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
}
-static void rxe_dealloc_pd(struct ib_pd *ibpd)
+static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_pd *pd = to_rpd(ibpd);
rxe_drop_ref(pd);
}
-static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
- struct rdma_ah_attr *attr,
- u32 flags,
- struct ib_udata *udata)
+static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
+ u32 flags, struct ib_udata *udata)
{
int err;
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_ah *ah;
+ struct rxe_dev *rxe = to_rdev(ibah->device);
+ struct rxe_ah *ah = to_rah(ibah);
err = rxe_av_chk_attr(rxe, attr);
if (err)
- return ERR_PTR(err);
-
- ah = rxe_alloc(&rxe->ah_pool);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+ return err;
- rxe_add_ref(pd);
- ah->pd = pd;
+ err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
+ if (err)
+ return err;
rxe_init_av(attr, &ah->av);
- return &ah->ibah;
+ return 0;
}
static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
@@ -242,13 +235,11 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
-static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
+static void rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
- rxe_drop_ref(ah->pd);
rxe_drop_ref(ah);
- return 0;
}
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
@@ -298,21 +289,18 @@ err1:
return err;
}
-static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *init,
- struct ib_udata *udata)
+static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
+ struct ib_udata *udata)
{
int err;
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_ucontext *ucontext =
- rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
- struct rxe_srq *srq;
+ struct rxe_dev *rxe = to_rdev(ibsrq->device);
+ struct rxe_pd *pd = to_rpd(ibsrq->pd);
+ struct rxe_srq *srq = to_rsrq(ibsrq);
struct rxe_create_srq_resp __user *uresp = NULL;
if (udata) {
if (udata->outlen < sizeof(*uresp))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
uresp = udata->outbuf;
}
@@ -320,28 +308,24 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
if (err)
goto err1;
- srq = rxe_alloc(&rxe->srq_pool);
- if (!srq) {
- err = -ENOMEM;
+ err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
+ if (err)
goto err1;
- }
- rxe_add_index(srq);
rxe_add_ref(pd);
srq->pd = pd;
- err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp);
+ err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err)
goto err2;
- return &srq->ibsrq;
+ return 0;
err2:
rxe_drop_ref(pd);
- rxe_drop_index(srq);
rxe_drop_ref(srq);
err1:
- return ERR_PTR(err);
+ return err;
}
static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@ -366,7 +350,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (err)
goto err1;
- err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
+ err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
if (err)
goto err1;
@@ -389,7 +373,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return 0;
}
-static int rxe_destroy_srq(struct ib_srq *ibsrq)
+static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rxe_srq *srq = to_rsrq(ibsrq);
@@ -397,10 +381,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq)
rxe_queue_cleanup(srq->rq.queue);
rxe_drop_ref(srq->pd);
- rxe_drop_index(srq);
rxe_drop_ref(srq);
-
- return 0;
}
static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
@@ -509,7 +490,7 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
}
-static int rxe_destroy_qp(struct ib_qp *ibqp)
+static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct rxe_qp *qp = to_rqp(ibqp);
@@ -799,7 +780,6 @@ err1:
static struct ib_cq *rxe_create_cq(struct ib_device *dev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
int err;
@@ -826,8 +806,8 @@ static struct ib_cq *rxe_create_cq(struct ib_device *dev,
goto err1;
}
- err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
- context, uresp);
+ err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
+ uresp);
if (err)
goto err2;
@@ -839,7 +819,7 @@ err1:
return ERR_PTR(err);
}
-static int rxe_destroy_cq(struct ib_cq *ibcq)
+static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rxe_cq *cq = to_rcq(ibcq);
@@ -866,7 +846,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
if (err)
goto err1;
- err = rxe_cq_resize_queue(cq, cqe, uresp);
+ err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
if (err)
goto err1;
@@ -990,7 +970,7 @@ err2:
return ERR_PTR(err);
}
-static int rxe_dereg_mr(struct ib_mr *ibmr)
+static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rxe_mem *mr = to_rmr(ibmr);
@@ -1001,9 +981,8 @@ static int rxe_dereg_mr(struct ib_mr *ibmr)
return 0;
}
-static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
@@ -1176,7 +1155,10 @@ static const struct ib_device_ops rxe_dev_ops = {
.reg_user_mr = rxe_reg_user_mr,
.req_notify_cq = rxe_req_notify_cq,
.resize_cq = rxe_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
};
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 157e51aeb1e1..e8be7f44e3be 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -71,8 +71,8 @@ struct rxe_pd {
};
struct rxe_ah {
- struct rxe_pool_entry pelem;
struct ib_ah ibah;
+ struct rxe_pool_entry pelem;
struct rxe_pd *pd;
struct rxe_av av;
};
@@ -120,8 +120,8 @@ struct rxe_rq {
};
struct rxe_srq {
- struct rxe_pool_entry pelem;
struct ib_srq ibsrq;
+ struct rxe_pool_entry pelem;
struct rxe_pd *pd;
struct rxe_rq rq;
u32 srq_num;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 48eda16db1a7..9b5e11d3fb85 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2402,7 +2402,18 @@ static ssize_t dev_id_show(struct device *dev,
{
struct net_device *ndev = to_net_dev(dev);
- if (ndev->dev_id == ndev->dev_port)
+ /*
+ * ndev->dev_port will be equal to 0 in old kernel prior to commit
+ * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
+ * port numbers") Zero was chosen as special case for user space
+ * applications to fallback and query dev_id to check if it has
+ * different value or not.
+ *
+ * Don't print warning in such scenario.
+ *
+ * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
+ */
+ if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
netdev_info_once(ndev,
"\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
current->comm);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 1e88213459f2..ba09068f6200 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -279,8 +279,7 @@ void ipoib_event(struct ib_event_handler *handler,
ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
dev_name(&record->device->dev), record->element.port_num);
- if (record->event == IB_EVENT_SM_CHANGE ||
- record->event == IB_EVENT_CLIENT_REREGISTER) {
+ if (record->event == IB_EVENT_CLIENT_REREGISTER) {
queue_work(ipoib_workqueue, &priv->flush_light);
} else if (record->event == IB_EVENT_PORT_ERR ||
record->event == IB_EVENT_PORT_ACTIVE ||
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index d00af71a2cfc..299268f261ee 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -4,8 +4,8 @@ config INFINIBAND_ISER
select SCSI_ISCSI_ATTRS
---help---
Support for the iSCSI Extensions for RDMA (iSER) Protocol
- over InfiniBand. This allows you to access storage devices
- that speak iSCSI over iSER over InfiniBand.
+ over InfiniBand. This allows you to access storage devices
+ that speak iSCSI over iSER over InfiniBand.
The iSER protocol is defined by IETF.
See <http://www.ietf.org/rfc/rfc5046.txt>
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 8c707accd148..9c185a8dabd3 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -763,7 +763,6 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
struct iser_conn *iser_conn = ep->dd_data;
- int len;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -774,12 +773,10 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&iser_conn->ib_conn.cma_id->route.addr.dst_addr,
param, buf);
- break;
default:
- return -ENOSYS;
+ break;
}
-
- return len;
+ return -ENOSYS;
}
/**
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index a7aeaa0c6fbc..36d525110fd2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -311,7 +311,7 @@ struct iser_login_desc {
u64 rsp_dma;
struct ib_sge sge;
struct ib_cqe cqe;
-} __attribute__((packed));
+} __packed;
struct iser_conn;
struct ib_conn;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index ae70cd18903e..aeff68f582d3 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -95,8 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
}
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct opa_vnic_skb_mdata *mdata;
@@ -106,8 +105,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
mdata = skb_push(skb, sizeof(*mdata));
mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
- rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
- sb_dev, fallback);
+ rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
skb_pull(skb, sizeof(*mdata));
return rc;
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 560e4f2d466e..be5befd92d16 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -51,6 +51,7 @@
*/
#include <linux/module.h>
+#include <linux/xarray.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_verbs.h>
#include <rdma/opa_smi.h>
@@ -97,7 +98,7 @@ const char opa_vnic_driver_version[] = DRV_VERSION;
* @class_port_info: Class port info information.
* @tid: Transaction id
* @port_num: OPA port number
- * @vport_idr: vnic ports idr
+ * @vports: vnic ports
* @event_handler: ib event handler
* @lock: adapter interface lock
*/
@@ -107,7 +108,7 @@ struct opa_vnic_vema_port {
struct opa_class_port_info class_port_info;
u64 tid;
u8 port_num;
- struct idr vport_idr;
+ struct xarray vports;
struct ib_event_handler event_handler;
/* Lock to query/update network adapter */
@@ -148,7 +149,7 @@ vema_get_vport_adapter(struct opa_vnic_vema_mad *recvd_mad,
{
u8 vport_num = vema_get_vport_num(recvd_mad);
- return idr_find(&port->vport_idr, vport_num);
+ return xa_load(&port->vports, vport_num);
}
/**
@@ -207,8 +208,7 @@ static struct opa_vnic_adapter *vema_add_vport(struct opa_vnic_vema_port *port,
int rc;
adapter->cport = cport;
- rc = idr_alloc(&port->vport_idr, adapter, vport_num,
- vport_num + 1, GFP_NOWAIT);
+ rc = xa_insert(&port->vports, vport_num, adapter, GFP_KERNEL);
if (rc < 0) {
opa_vnic_rem_netdev(adapter);
adapter = ERR_PTR(rc);
@@ -853,36 +853,14 @@ err_exit:
v_err("Aborting trap\n");
}
-static int vema_rem_vport(int id, void *p, void *data)
-{
- struct opa_vnic_adapter *adapter = p;
-
- opa_vnic_rem_netdev(adapter);
- return 0;
-}
-
-static int vema_enable_vport(int id, void *p, void *data)
-{
- struct opa_vnic_adapter *adapter = p;
-
- netif_carrier_on(adapter->netdev);
- return 0;
-}
-
-static int vema_disable_vport(int id, void *p, void *data)
-{
- struct opa_vnic_adapter *adapter = p;
-
- netif_carrier_off(adapter->netdev);
- return 0;
-}
-
static void opa_vnic_event(struct ib_event_handler *handler,
struct ib_event *record)
{
struct opa_vnic_vema_port *port =
container_of(handler, struct opa_vnic_vema_port, event_handler);
struct opa_vnic_ctrl_port *cport = port->cport;
+ struct opa_vnic_adapter *adapter;
+ unsigned long index;
if (record->element.port_num != port->port_num)
return;
@@ -891,10 +869,16 @@ static void opa_vnic_event(struct ib_event_handler *handler,
record->event, dev_name(&record->device->dev),
record->element.port_num);
- if (record->event == IB_EVENT_PORT_ERR)
- idr_for_each(&port->vport_idr, vema_disable_vport, NULL);
- if (record->event == IB_EVENT_PORT_ACTIVE)
- idr_for_each(&port->vport_idr, vema_enable_vport, NULL);
+ if (record->event != IB_EVENT_PORT_ERR &&
+ record->event != IB_EVENT_PORT_ACTIVE)
+ return;
+
+ xa_for_each(&port->vports, index, adapter) {
+ if (record->event == IB_EVENT_PORT_ACTIVE)
+ netif_carrier_on(adapter->netdev);
+ else
+ netif_carrier_off(adapter->netdev);
+ }
}
/**
@@ -905,6 +889,8 @@ static void opa_vnic_event(struct ib_event_handler *handler,
*/
static void vema_unregister(struct opa_vnic_ctrl_port *cport)
{
+ struct opa_vnic_adapter *adapter;
+ unsigned long index;
int i;
for (i = 1; i <= cport->num_ports; i++) {
@@ -915,13 +901,14 @@ static void vema_unregister(struct opa_vnic_ctrl_port *cport)
/* Lock ensures no MAD is being processed */
mutex_lock(&port->lock);
- idr_for_each(&port->vport_idr, vema_rem_vport, NULL);
+ xa_for_each(&port->vports, index, adapter)
+ opa_vnic_rem_netdev(adapter);
mutex_unlock(&port->lock);
ib_unregister_mad_agent(port->mad_agent);
port->mad_agent = NULL;
mutex_destroy(&port->lock);
- idr_destroy(&port->vport_idr);
+ xa_destroy(&port->vports);
ib_unregister_event_handler(&port->event_handler);
}
}
@@ -958,7 +945,7 @@ static int vema_register(struct opa_vnic_ctrl_port *cport)
cport->ibdev, opa_vnic_event);
ib_register_event_handler(&port->event_handler);
- idr_init(&port->vport_idr);
+ xa_init(&port->vports);
mutex_init(&port->lock);
port->mad_agent = ib_register_mad_agent(cport->ibdev, i,
IB_QPT_GSI, &reg_req,
@@ -969,7 +956,6 @@ static int vema_register(struct opa_vnic_ctrl_port *cport)
ret = PTR_ERR(port->mad_agent);
port->mad_agent = NULL;
mutex_destroy(&port->lock);
- idr_destroy(&port->vport_idr);
vema_unregister(cport);
return ret;
}
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index f48369d6f3a0..d1e25aba8212 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -503,14 +503,13 @@ static int evdev_open(struct inode *inode, struct file *file)
{
struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
- unsigned int size = sizeof(struct evdev_client) +
- bufsize * sizeof(struct input_event);
struct evdev_client *client;
int error;
- client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ client = kzalloc(struct_size(client, buffer, bufsize),
+ GFP_KERNEL | __GFP_NOWARN);
if (!client)
- client = vzalloc(size);
+ client = vzalloc(struct_size(client, buffer, bufsize));
if (!client)
return -ENOMEM;
@@ -524,7 +523,7 @@ static int evdev_open(struct inode *inode, struct file *file)
goto err_free_client;
file->private_data = client;
- nonseekable_open(inode, file);
+ stream_open(inode, file);
return 0;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 4c1e427dfabb..d806f6be4788 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -279,7 +279,7 @@ static int joydev_open(struct inode *inode, struct file *file)
goto err_free_client;
file->private_data = client;
- nonseekable_open(inode, file);
+ stream_open(inode, file);
return 0;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a878351f1643..82398827b64f 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -137,6 +137,17 @@ config KEYBOARD_ATKBD_RDI_KEYCODES
right-hand column will be interpreted as the key shown in the
left-hand column.
+config KEYBOARD_QT1050
+ tristate "Microchip AT42QT1050 Touch Sensor Chip"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to use Microchip AT42QT1050 QTouch
+ Sensor chip as input device.
+
+ To compile this driver as a module, choose M here:
+ the module will be called qt1050
+
config KEYBOARD_QT1070
tristate "Atmel AT42QT1070 Touch Sensor Chip"
depends on I2C
@@ -194,7 +205,7 @@ config KEYBOARD_LKKBD
config KEYBOARD_EP93XX
tristate "EP93xx Matrix Keypad support"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on the Cirrus EP93XX.
@@ -420,7 +431,7 @@ config KEYBOARD_MPR121
config KEYBOARD_SNVS_PWRKEY
tristate "IMX SNVS Power Key Driver"
- depends on SOC_IMX6SX || SOC_IMX7D
+ depends on ARCH_MXC || COMPILE_TEST
depends on OF
help
This is the snvs powerkey driver for the Freescale i.MX application
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 182e92985dbf..f0291ca39f62 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
obj-$(CONFIG_KEYBOARD_PMIC8XXX) += pmic8xxx-keypad.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
+obj-$(CONFIG_KEYBOARD_QT1050) += qt1050.o
obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o
obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
obj-$(CONFIG_KEYBOARD_SAMSUNG) += samsung-keypad.o
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 850bb259c20e..3ad93e3e2f4c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -401,6 +401,8 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
if (ps2_handle_response(&atkbd->ps2dev, data))
goto out;
+ pm_wakeup_event(&serio->dev, 0);
+
if (!atkbd->enabled)
goto out;
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index f77b295e0123..575dac52f7b4 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -27,8 +27,7 @@
#include <linux/io.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
-
-#include <mach/hardware.h>
+#include <linux/soc/cirrus/ep93xx.h>
#include <linux/platform_data/keypad-ep93xx.h>
/*
@@ -137,10 +136,7 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
unsigned int val = 0;
- if (pdata->flags & EP93XX_KEYPAD_KDIV)
- clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV4);
- else
- clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV16);
+ clk_set_rate(keypad->clk, pdata->clk_rate);
if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY)
val |= KEY_INIT_DIS3KY;
diff --git a/drivers/input/keyboard/qt1050.c b/drivers/input/keyboard/qt1050.c
new file mode 100644
index 000000000000..403060d05c3b
--- /dev/null
+++ b/drivers/input/keyboard/qt1050.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip AT42QT1050 QTouch Sensor Controller
+ *
+ * Copyright (C) 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ *
+ * Base on AT42QT1070 driver by:
+ * Bo Shen <voice.shen@atmel.com>
+ * Copyright (C) 2011 Atmel
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+/* Chip ID */
+#define QT1050_CHIP_ID 0x00
+#define QT1050_CHIP_ID_VER 0x46
+
+/* Firmware version */
+#define QT1050_FW_VERSION 0x01
+
+/* Detection status */
+#define QT1050_DET_STATUS 0x02
+
+/* Key status */
+#define QT1050_KEY_STATUS 0x03
+
+/* Key Signals */
+#define QT1050_KEY_SIGNAL_0_MSB 0x06
+#define QT1050_KEY_SIGNAL_0_LSB 0x07
+#define QT1050_KEY_SIGNAL_1_MSB 0x08
+#define QT1050_KEY_SIGNAL_1_LSB 0x09
+#define QT1050_KEY_SIGNAL_2_MSB 0x0c
+#define QT1050_KEY_SIGNAL_2_LSB 0x0d
+#define QT1050_KEY_SIGNAL_3_MSB 0x0e
+#define QT1050_KEY_SIGNAL_3_LSB 0x0f
+#define QT1050_KEY_SIGNAL_4_MSB 0x10
+#define QT1050_KEY_SIGNAL_4_LSB 0x11
+
+/* Reference data */
+#define QT1050_REF_DATA_0_MSB 0x14
+#define QT1050_REF_DATA_0_LSB 0x15
+#define QT1050_REF_DATA_1_MSB 0x16
+#define QT1050_REF_DATA_1_LSB 0x17
+#define QT1050_REF_DATA_2_MSB 0x1a
+#define QT1050_REF_DATA_2_LSB 0x1b
+#define QT1050_REF_DATA_3_MSB 0x1c
+#define QT1050_REF_DATA_3_LSB 0x1d
+#define QT1050_REF_DATA_4_MSB 0x1e
+#define QT1050_REF_DATA_4_LSB 0x1f
+
+/* Negative threshold level */
+#define QT1050_NTHR_0 0x21
+#define QT1050_NTHR_1 0x22
+#define QT1050_NTHR_2 0x24
+#define QT1050_NTHR_3 0x25
+#define QT1050_NTHR_4 0x26
+
+/* Pulse / Scale */
+#define QT1050_PULSE_SCALE_0 0x28
+#define QT1050_PULSE_SCALE_1 0x29
+#define QT1050_PULSE_SCALE_2 0x2b
+#define QT1050_PULSE_SCALE_3 0x2c
+#define QT1050_PULSE_SCALE_4 0x2d
+
+/* Detection integrator counter / AKS */
+#define QT1050_DI_AKS_0 0x2f
+#define QT1050_DI_AKS_1 0x30
+#define QT1050_DI_AKS_2 0x32
+#define QT1050_DI_AKS_3 0x33
+#define QT1050_DI_AKS_4 0x34
+
+/* Charge Share Delay */
+#define QT1050_CSD_0 0x36
+#define QT1050_CSD_1 0x37
+#define QT1050_CSD_2 0x39
+#define QT1050_CSD_3 0x3a
+#define QT1050_CSD_4 0x3b
+
+/* Low Power Mode */
+#define QT1050_LPMODE 0x3d
+
+/* Calibration and Reset */
+#define QT1050_RES_CAL 0x3f
+#define QT1050_RES_CAL_RESET BIT(7)
+#define QT1050_RES_CAL_CALIBRATE BIT(1)
+
+#define QT1050_MAX_KEYS 5
+#define QT1050_RESET_TIME 255
+
+struct qt1050_key_regs {
+ unsigned int nthr;
+ unsigned int pulse_scale;
+ unsigned int di_aks;
+ unsigned int csd;
+};
+
+struct qt1050_key {
+ u32 num;
+ u32 charge_delay;
+ u32 thr_cnt;
+ u32 samples;
+ u32 scale;
+ u32 keycode;
+};
+
+struct qt1050_priv {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct regmap *regmap;
+ struct qt1050_key keys[QT1050_MAX_KEYS];
+ unsigned short keycodes[QT1050_MAX_KEYS];
+ u8 reg_keys;
+ u8 last_keys;
+};
+
+static const struct qt1050_key_regs qt1050_key_regs_data[] = {
+ {
+ .nthr = QT1050_NTHR_0,
+ .pulse_scale = QT1050_PULSE_SCALE_0,
+ .di_aks = QT1050_DI_AKS_0,
+ .csd = QT1050_CSD_0,
+ }, {
+ .nthr = QT1050_NTHR_1,
+ .pulse_scale = QT1050_PULSE_SCALE_1,
+ .di_aks = QT1050_DI_AKS_1,
+ .csd = QT1050_CSD_1,
+ }, {
+ .nthr = QT1050_NTHR_2,
+ .pulse_scale = QT1050_PULSE_SCALE_2,
+ .di_aks = QT1050_DI_AKS_2,
+ .csd = QT1050_CSD_2,
+ }, {
+ .nthr = QT1050_NTHR_3,
+ .pulse_scale = QT1050_PULSE_SCALE_3,
+ .di_aks = QT1050_DI_AKS_3,
+ .csd = QT1050_CSD_3,
+ }, {
+ .nthr = QT1050_NTHR_4,
+ .pulse_scale = QT1050_PULSE_SCALE_4,
+ .di_aks = QT1050_DI_AKS_4,
+ .csd = QT1050_CSD_4,
+ }
+};
+
+static bool qt1050_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case QT1050_DET_STATUS:
+ case QT1050_KEY_STATUS:
+ case QT1050_KEY_SIGNAL_0_MSB:
+ case QT1050_KEY_SIGNAL_0_LSB:
+ case QT1050_KEY_SIGNAL_1_MSB:
+ case QT1050_KEY_SIGNAL_1_LSB:
+ case QT1050_KEY_SIGNAL_2_MSB:
+ case QT1050_KEY_SIGNAL_2_LSB:
+ case QT1050_KEY_SIGNAL_3_MSB:
+ case QT1050_KEY_SIGNAL_3_LSB:
+ case QT1050_KEY_SIGNAL_4_MSB:
+ case QT1050_KEY_SIGNAL_4_LSB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_range qt1050_readable_ranges[] = {
+ regmap_reg_range(QT1050_CHIP_ID, QT1050_KEY_STATUS),
+ regmap_reg_range(QT1050_KEY_SIGNAL_0_MSB, QT1050_KEY_SIGNAL_1_LSB),
+ regmap_reg_range(QT1050_KEY_SIGNAL_2_MSB, QT1050_KEY_SIGNAL_4_LSB),
+ regmap_reg_range(QT1050_REF_DATA_0_MSB, QT1050_REF_DATA_1_LSB),
+ regmap_reg_range(QT1050_REF_DATA_2_MSB, QT1050_REF_DATA_4_LSB),
+ regmap_reg_range(QT1050_NTHR_0, QT1050_NTHR_1),
+ regmap_reg_range(QT1050_NTHR_2, QT1050_NTHR_4),
+ regmap_reg_range(QT1050_PULSE_SCALE_0, QT1050_PULSE_SCALE_1),
+ regmap_reg_range(QT1050_PULSE_SCALE_2, QT1050_PULSE_SCALE_4),
+ regmap_reg_range(QT1050_DI_AKS_0, QT1050_DI_AKS_1),
+ regmap_reg_range(QT1050_DI_AKS_2, QT1050_DI_AKS_4),
+ regmap_reg_range(QT1050_CSD_0, QT1050_CSD_1),
+ regmap_reg_range(QT1050_CSD_2, QT1050_RES_CAL),
+};
+
+static const struct regmap_access_table qt1050_readable_table = {
+ .yes_ranges = qt1050_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qt1050_readable_ranges),
+};
+
+static const struct regmap_range qt1050_writeable_ranges[] = {
+ regmap_reg_range(QT1050_NTHR_0, QT1050_NTHR_1),
+ regmap_reg_range(QT1050_NTHR_2, QT1050_NTHR_4),
+ regmap_reg_range(QT1050_PULSE_SCALE_0, QT1050_PULSE_SCALE_1),
+ regmap_reg_range(QT1050_PULSE_SCALE_2, QT1050_PULSE_SCALE_4),
+ regmap_reg_range(QT1050_DI_AKS_0, QT1050_DI_AKS_1),
+ regmap_reg_range(QT1050_DI_AKS_2, QT1050_DI_AKS_4),
+ regmap_reg_range(QT1050_CSD_0, QT1050_CSD_1),
+ regmap_reg_range(QT1050_CSD_2, QT1050_RES_CAL),
+};
+
+static const struct regmap_access_table qt1050_writeable_table = {
+ .yes_ranges = qt1050_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qt1050_writeable_ranges),
+};
+
+static struct regmap_config qt1050_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = QT1050_RES_CAL,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .wr_table = &qt1050_writeable_table,
+ .rd_table = &qt1050_readable_table,
+ .volatile_reg = qt1050_volatile_reg,
+};
+
+static bool qt1050_identify(struct qt1050_priv *ts)
+{
+ unsigned int val;
+ int err;
+
+ /* Read Chip ID */
+ regmap_read(ts->regmap, QT1050_CHIP_ID, &val);
+ if (val != QT1050_CHIP_ID_VER) {
+ dev_err(&ts->client->dev, "ID %d not supported\n", val);
+ return false;
+ }
+
+ /* Read firmware version */
+ err = regmap_read(ts->regmap, QT1050_FW_VERSION, &val);
+ if (err) {
+ dev_err(&ts->client->dev, "could not read the firmware version\n");
+ return false;
+ }
+
+ dev_info(&ts->client->dev, "AT42QT1050 firmware version %1d.%1d\n",
+ val >> 4, val & 0xf);
+
+ return true;
+}
+
+static irqreturn_t qt1050_irq_threaded(int irq, void *dev_id)
+{
+ struct qt1050_priv *ts = dev_id;
+ struct input_dev *input = ts->input;
+ unsigned long new_keys, changed;
+ unsigned int val;
+ int i, err;
+
+ /* Read the detected status register, thus clearing interrupt */
+ err = regmap_read(ts->regmap, QT1050_DET_STATUS, &val);
+ if (err) {
+ dev_err(&ts->client->dev, "Fail to read detection status: %d\n",
+ err);
+ return IRQ_NONE;
+ }
+
+ /* Read which key changed, keys are not continuous */
+ err = regmap_read(ts->regmap, QT1050_KEY_STATUS, &val);
+ if (err) {
+ dev_err(&ts->client->dev,
+ "Fail to determine the key status: %d\n", err);
+ return IRQ_NONE;
+ }
+ new_keys = (val & 0x70) >> 2 | (val & 0x6) >> 1;
+ changed = ts->last_keys ^ new_keys;
+ /* Report registered keys only */
+ changed &= ts->reg_keys;
+
+ for_each_set_bit(i, &changed, QT1050_MAX_KEYS)
+ input_report_key(input, ts->keys[i].keycode,
+ test_bit(i, &new_keys));
+
+ ts->last_keys = new_keys;
+ input_sync(input);
+
+ return IRQ_HANDLED;
+}
+
+static const struct qt1050_key_regs *qt1050_get_key_regs(int key_num)
+{
+ return &qt1050_key_regs_data[key_num];
+}
+
+static int qt1050_set_key(struct regmap *map, int number, int on)
+{
+ const struct qt1050_key_regs *key_regs;
+
+ key_regs = qt1050_get_key_regs(number);
+
+ return regmap_update_bits(map, key_regs->di_aks, 0xfc,
+ on ? BIT(4) : 0x00);
+}
+
+static int qt1050_apply_fw_data(struct qt1050_priv *ts)
+{
+ struct regmap *map = ts->regmap;
+ struct qt1050_key *button = &ts->keys[0];
+ const struct qt1050_key_regs *key_regs;
+ int i, err;
+
+ /* Disable all keys and enable only the specified ones */
+ for (i = 0; i < QT1050_MAX_KEYS; i++) {
+ err = qt1050_set_key(map, i, 0);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < QT1050_MAX_KEYS; i++, button++) {
+ /* Keep KEY_RESERVED keys off */
+ if (button->keycode == KEY_RESERVED)
+ continue;
+
+ err = qt1050_set_key(map, button->num, 1);
+ if (err)
+ return err;
+
+ key_regs = qt1050_get_key_regs(button->num);
+
+ err = regmap_write(map, key_regs->pulse_scale,
+ (button->samples << 4) | (button->scale));
+ if (err)
+ return err;
+ err = regmap_write(map, key_regs->csd, button->charge_delay);
+ if (err)
+ return err;
+ err = regmap_write(map, key_regs->nthr, button->thr_cnt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int qt1050_parse_fw(struct qt1050_priv *ts)
+{
+ struct device *dev = &ts->client->dev;
+ struct fwnode_handle *child;
+ int nbuttons;
+
+ nbuttons = device_get_child_node_count(dev);
+ if (nbuttons == 0 || nbuttons > QT1050_MAX_KEYS)
+ return -ENODEV;
+
+ device_for_each_child_node(dev, child) {
+ struct qt1050_key button;
+
+ /* Required properties */
+ if (fwnode_property_read_u32(child, "linux,code",
+ &button.keycode)) {
+ dev_err(dev, "Button without keycode\n");
+ goto err;
+ }
+ if (button.keycode >= KEY_MAX) {
+ dev_err(dev, "Invalid keycode 0x%x\n",
+ button.keycode);
+ goto err;
+ }
+
+ if (fwnode_property_read_u32(child, "reg",
+ &button.num)) {
+ dev_err(dev, "Button without pad number\n");
+ goto err;
+ }
+ if (button.num < 0 || button.num > QT1050_MAX_KEYS - 1)
+ goto err;
+
+ ts->reg_keys |= BIT(button.num);
+
+ /* Optional properties */
+ if (fwnode_property_read_u32(child,
+ "microchip,pre-charge-time-ns",
+ &button.charge_delay)) {
+ button.charge_delay = 0;
+ } else {
+ if (button.charge_delay % 2500 == 0)
+ button.charge_delay =
+ button.charge_delay / 2500;
+ else
+ button.charge_delay = 0;
+ }
+
+ if (fwnode_property_read_u32(child, "microchip,average-samples",
+ &button.samples)) {
+ button.samples = 0;
+ } else {
+ if (is_power_of_2(button.samples))
+ button.samples = ilog2(button.samples);
+ else
+ button.samples = 0;
+ }
+
+ if (fwnode_property_read_u32(child, "microchip,average-scaling",
+ &button.scale)) {
+ button.scale = 0;
+ } else {
+ if (is_power_of_2(button.scale))
+ button.scale = ilog2(button.scale);
+ else
+ button.scale = 0;
+
+ }
+
+ if (fwnode_property_read_u32(child, "microchip,threshold",
+ &button.thr_cnt)) {
+ button.thr_cnt = 20;
+ } else {
+ if (button.thr_cnt > 255)
+ button.thr_cnt = 20;
+ }
+
+ ts->keys[button.num] = button;
+ }
+
+ return 0;
+
+err:
+ fwnode_handle_put(child);
+ return -EINVAL;
+}
+
+static int qt1050_probe(struct i2c_client *client)
+{
+ struct qt1050_priv *ts;
+ struct input_dev *input;
+ struct device *dev = &client->dev;
+ struct regmap *map;
+ unsigned int status, i;
+ int err;
+
+ /* Check basic functionality */
+ err = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE);
+ if (!err) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ if (!client->irq) {
+ dev_err(dev, "assign a irq line to this device\n");
+ return -EINVAL;
+ }
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ map = devm_regmap_init_i2c(client, &qt1050_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ ts->client = client;
+ ts->input = input;
+ ts->regmap = map;
+
+ i2c_set_clientdata(client, ts);
+
+ /* Identify the qt1050 chip */
+ if (!qt1050_identify(ts))
+ return -ENODEV;
+
+ /* Get pdata */
+ err = qt1050_parse_fw(ts);
+ if (err) {
+ dev_err(dev, "Failed to parse firmware: %d\n", err);
+ return err;
+ }
+
+ input->name = "AT42QT1050 QTouch Sensor";
+ input->dev.parent = &client->dev;
+ input->id.bustype = BUS_I2C;
+
+ /* Add the keycode */
+ input->keycode = ts->keycodes;
+ input->keycodesize = sizeof(ts->keycodes[0]);
+ input->keycodemax = QT1050_MAX_KEYS;
+
+ __set_bit(EV_KEY, input->evbit);
+ for (i = 0; i < QT1050_MAX_KEYS; i++) {
+ ts->keycodes[i] = ts->keys[i].keycode;
+ __set_bit(ts->keycodes[i], input->keybit);
+ }
+
+ /* Trigger re-calibration */
+ err = regmap_update_bits(ts->regmap, QT1050_RES_CAL, 0x7f,
+ QT1050_RES_CAL_CALIBRATE);
+ if (err) {
+ dev_err(dev, "Trigger calibration failed: %d\n", err);
+ return err;
+ }
+ err = regmap_read_poll_timeout(ts->regmap, QT1050_DET_STATUS, status,
+ status >> 7 == 1, 10000, 200000);
+ if (err) {
+ dev_err(dev, "Calibration failed: %d\n", err);
+ return err;
+ }
+
+ /* Soft reset to set defaults */
+ err = regmap_update_bits(ts->regmap, QT1050_RES_CAL,
+ QT1050_RES_CAL_RESET, QT1050_RES_CAL_RESET);
+ if (err) {
+ dev_err(dev, "Trigger soft reset failed: %d\n", err);
+ return err;
+ }
+ msleep(QT1050_RESET_TIME);
+
+ /* Set pdata */
+ err = qt1050_apply_fw_data(ts);
+ if (err) {
+ dev_err(dev, "Failed to set firmware data: %d\n", err);
+ return err;
+ }
+
+ err = devm_request_threaded_irq(dev, client->irq, NULL,
+ qt1050_irq_threaded, IRQF_ONESHOT,
+ "qt1050", ts);
+ if (err) {
+ dev_err(&client->dev, "Failed to request irq: %d\n", err);
+ return err;
+ }
+
+ /* Clear #CHANGE line */
+ err = regmap_read(ts->regmap, QT1050_DET_STATUS, &status);
+ if (err) {
+ dev_err(dev, "Failed to clear #CHANGE line level: %d\n", err);
+ return err;
+ }
+
+ /* Register the input device */
+ err = input_register_device(ts->input);
+ if (err) {
+ dev_err(&client->dev, "Failed to register input device: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused qt1050_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct qt1050_priv *ts = i2c_get_clientdata(client);
+
+ disable_irq(client->irq);
+
+ /*
+ * Set measurement interval to 1s (125 x 8ms) if wakeup is allowed
+ * else turn off. The 1s interval seems to be a good compromise between
+ * low power and response time.
+ */
+ return regmap_write(ts->regmap, QT1050_LPMODE,
+ device_may_wakeup(dev) ? 125 : 0);
+}
+
+static int __maybe_unused qt1050_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct qt1050_priv *ts = i2c_get_clientdata(client);
+
+ enable_irq(client->irq);
+
+ /* Set measurement interval back to 16ms (2 x 8ms) */
+ return regmap_write(ts->regmap, QT1050_LPMODE, 2);
+}
+
+static SIMPLE_DEV_PM_OPS(qt1050_pm_ops, qt1050_suspend, qt1050_resume);
+
+static const struct of_device_id __maybe_unused qt1050_of_match[] = {
+ { .compatible = "microchip,qt1050", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qt1050_of_match);
+
+static struct i2c_driver qt1050_driver = {
+ .driver = {
+ .name = "qt1050",
+ .of_match_table = of_match_ptr(qt1050_of_match),
+ .pm = &qt1050_pm_ops,
+ },
+ .probe_new = qt1050_probe,
+};
+
+module_i2c_driver(qt1050_driver);
+
+MODULE_AUTHOR("Marco Felsch <kernel@pengutronix.de");
+MODULE_DESCRIPTION("Driver for AT42QT1050 QTouch sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index effb63205d3d..5342d8d45f81 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -148,6 +149,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return error;
}
+ pdata->input = input;
+ platform_set_drvdata(pdev, pdata);
+
error = devm_request_irq(&pdev->dev, pdata->irq,
imx_snvs_pwrkey_interrupt,
0, pdev->name, pdev);
@@ -163,32 +167,10 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return error;
}
- pdata->input = input;
- platform_set_drvdata(pdev, pdata);
-
device_init_wakeup(&pdev->dev, pdata->wakeup);
-
- return 0;
-}
-
-static int __maybe_unused imx_snvs_pwrkey_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
-
- if (device_may_wakeup(&pdev->dev))
- enable_irq_wake(pdata->irq);
-
- return 0;
-}
-
-static int __maybe_unused imx_snvs_pwrkey_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
-
- if (device_may_wakeup(&pdev->dev))
- disable_irq_wake(pdata->irq);
+ error = dev_pm_set_wake_irq(&pdev->dev, pdata->irq);
+ if (error)
+ dev_err(&pdev->dev, "irq wake enable failed.\n");
return 0;
}
@@ -199,13 +181,9 @@ static const struct of_device_id imx_snvs_pwrkey_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_snvs_pwrkey_ids);
-static SIMPLE_DEV_PM_OPS(imx_snvs_pwrkey_pm_ops, imx_snvs_pwrkey_suspend,
- imx_snvs_pwrkey_resume);
-
static struct platform_driver imx_snvs_pwrkey_driver = {
.driver = {
.name = "snvs_pwrkey",
- .pm = &imx_snvs_pwrkey_pm_ops,
.of_match_table = imx_snvs_pwrkey_ids,
},
.probe = imx_snvs_pwrkey_probe,
diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c
index 57272df34cd5..df3eec72a9b2 100644
--- a/drivers/input/keyboard/sun4i-lradc-keys.c
+++ b/drivers/input/keyboard/sun4i-lradc-keys.c
@@ -46,6 +46,7 @@
#define CONTINUE_TIME_SEL(x) ((x) << 16) /* 4 bits */
#define KEY_MODE_SEL(x) ((x) << 12) /* 2 bits */
#define LEVELA_B_CNT(x) ((x) << 8) /* 4 bits */
+#define HOLD_KEY_EN(x) ((x) << 7)
#define HOLD_EN(x) ((x) << 6)
#define LEVELB_VOL(x) ((x) << 4) /* 2 bits */
#define SAMPLE_RATE(x) ((x) << 2) /* 2 bits */
@@ -63,6 +64,25 @@
#define CHAN0_KEYDOWN_IRQ BIT(1)
#define CHAN0_DATA_IRQ BIT(0)
+/* struct lradc_variant - Describe sun4i-a10-lradc-keys hardware variant
+ * @divisor_numerator: The numerator of lradc Vref internally divisor
+ * @divisor_denominator: The denominator of lradc Vref internally divisor
+ */
+struct lradc_variant {
+ u8 divisor_numerator;
+ u8 divisor_denominator;
+};
+
+static const struct lradc_variant lradc_variant_a10 = {
+ .divisor_numerator = 2,
+ .divisor_denominator = 3
+};
+
+static const struct lradc_variant r_lradc_variant_a83t = {
+ .divisor_numerator = 3,
+ .divisor_denominator = 4
+};
+
struct sun4i_lradc_keymap {
u32 voltage;
u32 keycode;
@@ -74,6 +94,7 @@ struct sun4i_lradc_data {
void __iomem *base;
struct regulator *vref_supply;
struct sun4i_lradc_keymap *chan0_map;
+ const struct lradc_variant *variant;
u32 chan0_map_count;
u32 chan0_keycode;
u32 vref;
@@ -128,9 +149,9 @@ static int sun4i_lradc_open(struct input_dev *dev)
if (error)
return error;
- /* lradc Vref internally is divided by 2/3 */
- lradc->vref = regulator_get_voltage(lradc->vref_supply) * 2 / 3;
-
+ lradc->vref = regulator_get_voltage(lradc->vref_supply) *
+ lradc->variant->divisor_numerator /
+ lradc->variant->divisor_denominator;
/*
* Set sample time to 4 ms / 250 Hz. Wait 2 * 4 ms for key to
* stabilize on press, wait (1 + 1) * 4 ms for key release
@@ -222,6 +243,12 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
if (error)
return error;
+ lradc->variant = of_device_get_match_data(&pdev->dev);
+ if (!lradc->variant) {
+ dev_err(&pdev->dev, "Missing sun4i-a10-lradc-keys variant\n");
+ return -EINVAL;
+ }
+
lradc->vref_supply = devm_regulator_get(dev, "vref");
if (IS_ERR(lradc->vref_supply))
return PTR_ERR(lradc->vref_supply);
@@ -265,7 +292,10 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
}
static const struct of_device_id sun4i_lradc_of_match[] = {
- { .compatible = "allwinner,sun4i-a10-lradc-keys", },
+ { .compatible = "allwinner,sun4i-a10-lradc-keys",
+ .data = &lradc_variant_a10 },
+ { .compatible = "allwinner,sun8i-a83t-r-lradc",
+ .data = &r_lradc_variant_a83t },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun4i_lradc_of_match);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index e15ed1bb8558..54d36f98b426 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -190,6 +190,15 @@ config INPUT_M68K_BEEP
tristate "M68k Beeper support"
depends on M68K
+config INPUT_MAX77650_ONKEY
+ tristate "Maxim MAX77650 ONKEY support"
+ depends on MFD_MAX77650
+ help
+ Support the ONKEY of the MAX77650 PMIC as an input device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called max77650-onkey.
+
config INPUT_MAX77693_HAPTIC
tristate "MAXIM MAX77693/MAX77843 haptic controller support"
depends on (MFD_MAX77693 || MFD_MAX77843) && PWM
@@ -290,6 +299,18 @@ config INPUT_GPIO_DECODER
To compile this driver as a module, choose M here: the module
will be called gpio_decoder.
+config INPUT_GPIO_VIBRA
+ tristate "GPIO vibrator support"
+ depends on GPIOLIB || COMPILE_TEST
+ select INPUT_FF_MEMLESS
+ help
+ Say Y here to get support for GPIO based vibrator devices.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module will be
+ called gpio-vibra.
+
config INPUT_IXP4XX_BEEPER
tristate "IXP4XX Beeper support"
depends on ARCH_IXP4XX
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index b936c5b1d4ac..8fd187f314bd 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o
+obj-$(CONFIG_INPUT_GPIO_VIBRA) += gpio-vibra.o
obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
+obj-$(CONFIG_INPUT_MAX77650_ONKEY) += max77650-onkey.o
obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o
diff --git a/drivers/input/misc/gpio-vibra.c b/drivers/input/misc/gpio-vibra.c
new file mode 100644
index 000000000000..f79f75595dd7
--- /dev/null
+++ b/drivers/input/misc/gpio-vibra.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * GPIO vibrator driver
+ *
+ * Copyright (C) 2019 Luca Weiss <luca@z3ntu.xyz>
+ *
+ * Based on PWM vibrator driver:
+ * Copyright (C) 2017 Collabora Ltd.
+ *
+ * Based on previous work from:
+ * Copyright (C) 2012 Dmitry Torokhov <dmitry.torokhov@gmail.com>
+ *
+ * Based on PWM beeper driver:
+ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+struct gpio_vibrator {
+ struct input_dev *input;
+ struct gpio_desc *gpio;
+ struct regulator *vcc;
+
+ struct work_struct play_work;
+ bool running;
+ bool vcc_on;
+};
+
+static int gpio_vibrator_start(struct gpio_vibrator *vibrator)
+{
+ struct device *pdev = vibrator->input->dev.parent;
+ int err;
+
+ if (!vibrator->vcc_on) {
+ err = regulator_enable(vibrator->vcc);
+ if (err) {
+ dev_err(pdev, "failed to enable regulator: %d\n", err);
+ return err;
+ }
+ vibrator->vcc_on = true;
+ }
+
+ gpiod_set_value_cansleep(vibrator->gpio, 1);
+
+ return 0;
+}
+
+static void gpio_vibrator_stop(struct gpio_vibrator *vibrator)
+{
+ gpiod_set_value_cansleep(vibrator->gpio, 0);
+
+ if (vibrator->vcc_on) {
+ regulator_disable(vibrator->vcc);
+ vibrator->vcc_on = false;
+ }
+}
+
+static void gpio_vibrator_play_work(struct work_struct *work)
+{
+ struct gpio_vibrator *vibrator =
+ container_of(work, struct gpio_vibrator, play_work);
+
+ if (vibrator->running)
+ gpio_vibrator_start(vibrator);
+ else
+ gpio_vibrator_stop(vibrator);
+}
+
+static int gpio_vibrator_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct gpio_vibrator *vibrator = input_get_drvdata(dev);
+ int level;
+
+ level = effect->u.rumble.strong_magnitude;
+ if (!level)
+ level = effect->u.rumble.weak_magnitude;
+
+ vibrator->running = level;
+ schedule_work(&vibrator->play_work);
+
+ return 0;
+}
+
+static void gpio_vibrator_close(struct input_dev *input)
+{
+ struct gpio_vibrator *vibrator = input_get_drvdata(input);
+
+ cancel_work_sync(&vibrator->play_work);
+ gpio_vibrator_stop(vibrator);
+ vibrator->running = false;
+}
+
+static int gpio_vibrator_probe(struct platform_device *pdev)
+{
+ struct gpio_vibrator *vibrator;
+ int err;
+
+ vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL);
+ if (!vibrator)
+ return -ENOMEM;
+
+ vibrator->input = devm_input_allocate_device(&pdev->dev);
+ if (!vibrator->input)
+ return -ENOMEM;
+
+ vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
+ err = PTR_ERR_OR_ZERO(vibrator->vcc);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to request regulator: %d\n",
+ err);
+ return err;
+ }
+
+ vibrator->gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
+ err = PTR_ERR_OR_ZERO(vibrator->gpio);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to request main gpio: %d\n",
+ err);
+ return err;
+ }
+
+ INIT_WORK(&vibrator->play_work, gpio_vibrator_play_work);
+
+ vibrator->input->name = "gpio-vibrator";
+ vibrator->input->id.bustype = BUS_HOST;
+ vibrator->input->close = gpio_vibrator_close;
+
+ input_set_drvdata(vibrator->input, vibrator);
+ input_set_capability(vibrator->input, EV_FF, FF_RUMBLE);
+
+ err = input_ff_create_memless(vibrator->input, NULL,
+ gpio_vibrator_play_effect);
+ if (err) {
+ dev_err(&pdev->dev, "Couldn't create FF dev: %d\n", err);
+ return err;
+ }
+
+ err = input_register_device(vibrator->input);
+ if (err) {
+ dev_err(&pdev->dev, "Couldn't register input dev: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, vibrator);
+
+ return 0;
+}
+
+static int __maybe_unused gpio_vibrator_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_vibrator *vibrator = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&vibrator->play_work);
+ if (vibrator->running)
+ gpio_vibrator_stop(vibrator);
+
+ return 0;
+}
+
+static int __maybe_unused gpio_vibrator_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_vibrator *vibrator = platform_get_drvdata(pdev);
+
+ if (vibrator->running)
+ gpio_vibrator_start(vibrator);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(gpio_vibrator_pm_ops,
+ gpio_vibrator_suspend, gpio_vibrator_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_vibra_dt_match_table[] = {
+ { .compatible = "gpio-vibrator" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_vibra_dt_match_table);
+#endif
+
+static struct platform_driver gpio_vibrator_driver = {
+ .probe = gpio_vibrator_probe,
+ .driver = {
+ .name = "gpio-vibrator",
+ .pm = &gpio_vibrator_pm_ops,
+ .of_match_table = of_match_ptr(gpio_vibra_dt_match_table),
+ },
+};
+module_platform_driver(gpio_vibrator_driver);
+
+MODULE_AUTHOR("Luca Weiss <luca@z3ntu.xy>");
+MODULE_DESCRIPTION("GPIO vibrator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-vibrator");
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 1fe149f3def2..4776273fa10b 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -30,6 +30,8 @@ MODULE_ALIAS("platform:ixp4xx-beeper");
static DEFINE_SPINLOCK(beep_lock);
+static int ixp4xx_timer2_irq;
+
static void ixp4xx_spkr_control(unsigned int pin, unsigned int count)
{
unsigned long flags;
@@ -90,6 +92,7 @@ static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
static int ixp4xx_spkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
+ int irq;
int err;
input_dev = input_allocate_device();
@@ -110,15 +113,22 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
input_dev->event = ixp4xx_spkr_event;
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto err_free_device;
+ }
+
err = gpio_request(dev->id, "ixp4-beeper");
if (err)
goto err_free_device;
- err = request_irq(IRQ_IXP4XX_TIMER2, &ixp4xx_spkr_interrupt,
+ err = request_irq(irq, &ixp4xx_spkr_interrupt,
IRQF_NO_SUSPEND, "ixp4xx-beeper",
(void *) dev->id);
if (err)
goto err_free_gpio;
+ ixp4xx_timer2_irq = irq;
err = input_register_device(input_dev);
if (err)
@@ -129,7 +139,7 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
return 0;
err_free_irq:
- free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ free_irq(irq, (void *)dev->id);
err_free_gpio:
gpio_free(dev->id);
err_free_device:
@@ -146,10 +156,10 @@ static int ixp4xx_spkr_remove(struct platform_device *dev)
input_unregister_device(input_dev);
/* turn the speaker off */
- disable_irq(IRQ_IXP4XX_TIMER2);
+ disable_irq(ixp4xx_timer2_irq);
ixp4xx_spkr_control(pin, 0);
- free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ free_irq(ixp4xx_timer2_irq, (void *)dev->id);
gpio_free(dev->id);
return 0;
@@ -161,7 +171,7 @@ static void ixp4xx_spkr_shutdown(struct platform_device *dev)
unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
/* turn off the speaker */
- disable_irq(IRQ_IXP4XX_TIMER2);
+ disable_irq(ixp4xx_timer2_irq);
ixp4xx_spkr_control(pin, 0);
}
diff --git a/drivers/input/misc/max77650-onkey.c b/drivers/input/misc/max77650-onkey.c
new file mode 100644
index 000000000000..fbf6caab7217
--- /dev/null
+++ b/drivers/input/misc/max77650-onkey.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// ONKEY driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MAX77650_ONKEY_MODE_MASK BIT(3)
+#define MAX77650_ONKEY_MODE_PUSH 0x00
+#define MAX77650_ONKEY_MODE_SLIDE BIT(3)
+
+struct max77650_onkey {
+ struct input_dev *input;
+ unsigned int code;
+};
+
+static irqreturn_t max77650_onkey_falling(int irq, void *data)
+{
+ struct max77650_onkey *onkey = data;
+
+ input_report_key(onkey->input, onkey->code, 0);
+ input_sync(onkey->input);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t max77650_onkey_rising(int irq, void *data)
+{
+ struct max77650_onkey *onkey = data;
+
+ input_report_key(onkey->input, onkey->code, 1);
+ input_sync(onkey->input);
+
+ return IRQ_HANDLED;
+}
+
+static int max77650_onkey_probe(struct platform_device *pdev)
+{
+ int irq_r, irq_f, error, mode;
+ struct max77650_onkey *onkey;
+ struct device *dev, *parent;
+ struct regmap *map;
+ unsigned int type;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+
+ map = dev_get_regmap(parent, NULL);
+ if (!map)
+ return -ENODEV;
+
+ onkey = devm_kzalloc(dev, sizeof(*onkey), GFP_KERNEL);
+ if (!onkey)
+ return -ENOMEM;
+
+ error = device_property_read_u32(dev, "linux,code", &onkey->code);
+ if (error)
+ onkey->code = KEY_POWER;
+
+ if (device_property_read_bool(dev, "maxim,onkey-slide")) {
+ mode = MAX77650_ONKEY_MODE_SLIDE;
+ type = EV_SW;
+ } else {
+ mode = MAX77650_ONKEY_MODE_PUSH;
+ type = EV_KEY;
+ }
+
+ error = regmap_update_bits(map, MAX77650_REG_CNFG_GLBL,
+ MAX77650_ONKEY_MODE_MASK, mode);
+ if (error)
+ return error;
+
+ irq_f = platform_get_irq_byname(pdev, "nEN_F");
+ if (irq_f < 0)
+ return irq_f;
+
+ irq_r = platform_get_irq_byname(pdev, "nEN_R");
+ if (irq_r < 0)
+ return irq_r;
+
+ onkey->input = devm_input_allocate_device(dev);
+ if (!onkey->input)
+ return -ENOMEM;
+
+ onkey->input->name = "max77650_onkey";
+ onkey->input->phys = "max77650_onkey/input0";
+ onkey->input->id.bustype = BUS_I2C;
+ input_set_capability(onkey->input, type, onkey->code);
+
+ error = devm_request_any_context_irq(dev, irq_f, max77650_onkey_falling,
+ IRQF_ONESHOT, "onkey-down", onkey);
+ if (error < 0)
+ return error;
+
+ error = devm_request_any_context_irq(dev, irq_r, max77650_onkey_rising,
+ IRQF_ONESHOT, "onkey-up", onkey);
+ if (error < 0)
+ return error;
+
+ return input_register_device(onkey->input);
+}
+
+static struct platform_driver max77650_onkey_driver = {
+ .driver = {
+ .name = "max77650-onkey",
+ },
+ .probe = max77650_onkey_probe,
+};
+module_platform_driver(max77650_onkey_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 ONKEY driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 26ec603fe220..1a6762fc38f9 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -398,7 +398,7 @@ static int uinput_open(struct inode *inode, struct file *file)
newdev->state = UIST_NEW_DEVICE;
file->private_data = newdev;
- nonseekable_open(inode, file);
+ stream_open(inode, file);
return 0;
}
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 628ef617bb2f..f9525d6f0bfe 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0600", 0 },
{ "ELAN0601", 0 },
{ "ELAN0602", 0 },
+ { "ELAN0603", 0 },
+ { "ELAN0604", 0 },
{ "ELAN0605", 0 },
+ { "ELAN0606", 0 },
+ { "ELAN0607", 0 },
{ "ELAN0608", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
{ "ELAN060C", 0 },
+ { "ELAN060F", 0 },
+ { "ELAN0610", 0 },
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
+ { "ELAN0615", 0 },
+ { "ELAN0616", 0 },
{ "ELAN0617", 0 },
{ "ELAN0618", 0 },
+ { "ELAN0619", 0 },
+ { "ELAN061A", 0 },
+ { "ELAN061B", 0 },
{ "ELAN061C", 0 },
{ "ELAN061D", 0 },
{ "ELAN061E", 0 },
+ { "ELAN061F", 0 },
{ "ELAN0620", 0 },
{ "ELAN0621", 0 },
{ "ELAN0622", 0 },
+ { "ELAN0623", 0 },
+ { "ELAN0624", 0 },
+ { "ELAN0625", 0 },
+ { "ELAN0626", 0 },
+ { "ELAN0627", 0 },
+ { "ELAN0628", 0 },
+ { "ELAN0629", 0 },
+ { "ELAN062A", 0 },
+ { "ELAN062B", 0 },
+ { "ELAN062C", 0 },
+ { "ELAN062D", 0 },
+ { "ELAN0631", 0 },
+ { "ELAN0632", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index d3ff1fc09af7..94f7ca5ad077 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -373,6 +373,8 @@ static irqreturn_t psmouse_interrupt(struct serio *serio,
if (ps2_handle_response(&psmouse->ps2dev, data))
goto out;
+ pm_wakeup_event(&serio->dev, 0);
+
if (psmouse->state <= PSMOUSE_RESYNCING)
goto out;
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index fc3ab93b7aea..7fb358f96195 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -860,7 +860,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
error = rmi_register_function(fn);
if (error)
- goto err_put_fn;
+ return error;
if (pdt->function_number == 0x01)
data->f01_container = fn;
@@ -870,10 +870,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
list_add_tail(&fn->node, &data->function_list);
return RMI_SCAN_CONTINUE;
-
-err_put_fn:
- put_device(&fn->dev);
- return error;
}
void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index df64d6aed4f7..93901ebd122a 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
}
rc = f11_write_control_regs(fn, &f11->sens_query,
- &f11->dev_controls, fn->fd.query_base_addr);
+ &f11->dev_controls, fn->fd.control_base_addr);
if (rc)
dev_warn(&fn->dev, "Failed to write control registers\n");
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index a6f515bcab22..516fea06ed59 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -456,25 +456,15 @@ static int rmi_f54_vidioc_fmt(struct file *file, void *priv,
static int rmi_f54_vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
+ struct f54_data *f54 = video_drvdata(file);
+
if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- switch (fmt->index) {
- case 0:
- fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
- break;
-
- case 1:
- fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD08;
- break;
-
- case 2:
- fmt->pixelformat = V4L2_TCH_FMT_TU16;
- break;
-
- default:
+ if (fmt->index)
return -EINVAL;
- }
+
+ fmt->pixelformat = f54->format.pixelformat;
return 0;
}
@@ -692,6 +682,7 @@ static int rmi_f54_probe(struct rmi_function *fn)
return -ENOMEM;
rmi_f54_create_input_map(f54);
+ rmi_f54_set_input(f54, 0);
/* register video device */
strlcpy(f54->v4l2.name, F54_NAME, sizeof(f54->v4l2.name));
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index c9c7224d5ae0..bfe436ccb046 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -254,6 +254,7 @@ config SERIO_APBPS2
config SERIO_OLPC_APSP
tristate "OLPC AP-SP input support"
+ depends on ARCH_MMP || COMPILE_TEST
help
Say Y here if you want support for the keyboard and touchpad included
in the OLPC XO-1.75 and XO-4 laptops.
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index a8b9be3e28db..7935e52b5435 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -440,5 +440,7 @@ static void __exit hv_kbd_exit(void)
}
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic Keyboard Driver");
+
module_init(hv_kbd_init);
module_exit(hv_kbd_exit);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 95a78ccbd847..6462f1798fbb 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -573,9 +573,6 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
port = &i8042_ports[port_no];
serio = port->exists ? port->serio : NULL;
- if (irq && serio)
- pm_wakeup_event(&serio->dev, 0);
-
filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n",
port_no, irq,
dfl & SERIO_PARITY ? ", bad parity" : "",
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index e6a07e68d1ff..22b8e05aa36c 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -409,6 +409,7 @@ bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data)
ps2dev->nak = PS2_RET_ERR;
break;
}
+ /* Fall through */
/*
* Workaround for mice which don't ACK the Get ID command.
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 7a4884ad198b..a2029c3235af 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1312,4 +1312,14 @@ config TOUCHSCREEN_ROHM_BU21023
To compile this driver as a module, choose M here: the
module will be called bu21023_ts.
+config TOUCHSCREEN_IQS5XX
+ tristate "Azoteq IQS550/572/525 trackpad/touchscreen controller"
+ depends on I2C
+ help
+ Say Y to enable support for the Azoteq IQS550/572/525
+ family of trackpad/touchscreen controllers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iqs5xx.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index fcc7605fba8d..084a596a0c8b 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -110,3 +110,4 @@ obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o
obj-$(CONFIG_TOUCHSCREEN_RASPBERRYPI_FW) += raspberrypi-ts.o
+obj-$(CONFIG_TOUCHSCREEN_IQS5XX) += iqs5xx.o
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 702bfda7ee77..c639ebce914c 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Simon Budig, <simon.budig@kernelconcepts.de>
* Daniel Wagener <daniel.wagener@kernelconcepts.de> (M09 firmware support)
* Lothar Waßmann <LW@KARO-electronics.de> (DT support)
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
@@ -39,7 +27,6 @@
#include <linux/gpio/consumer.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/of_device.h>
#define WORK_REGISTER_THRESHOLD 0x00
#define WORK_REGISTER_REPORT_RATE 0x08
@@ -1073,7 +1060,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return -ENOMEM;
}
- chip_data = of_device_get_match_data(&client->dev);
+ chip_data = device_get_match_data(&client->dev);
if (!chip_data)
chip_data = (const struct edt_i2c_chip_data *)id->driver_data;
if (!chip_data || !chip_data->max_support_points) {
@@ -1254,7 +1241,6 @@ static const struct i2c_device_id edt_ft5x06_ts_id[] = {
};
MODULE_DEVICE_TABLE(i2c, edt_ft5x06_ts_id);
-#ifdef CONFIG_OF
static const struct of_device_id edt_ft5x06_of_match[] = {
{ .compatible = "edt,edt-ft5206", .data = &edt_ft5x06_data },
{ .compatible = "edt,edt-ft5306", .data = &edt_ft5x06_data },
@@ -1266,12 +1252,11 @@ static const struct of_device_id edt_ft5x06_of_match[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, edt_ft5x06_of_match);
-#endif
static struct i2c_driver edt_ft5x06_ts_driver = {
.driver = {
.name = "edt_ft5x06",
- .of_match_table = of_match_ptr(edt_ft5x06_of_match),
+ .of_match_table = edt_ft5x06_of_match,
.pm = &edt_ft5x06_ts_pm_ops,
},
.id_table = edt_ft5x06_ts_id,
@@ -1283,4 +1268,4 @@ module_i2c_driver(edt_ft5x06_ts_driver);
MODULE_AUTHOR("Simon Budig <simon.budig@kernelconcepts.de>");
MODULE_DESCRIPTION("EDT FT5x06 I2C Touchscreen Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index f57d82220a88..f7c1d168dd89 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/of.h>
@@ -47,6 +48,8 @@ struct goodix_ts_data {
struct touchscreen_properties prop;
unsigned int max_touch_num;
unsigned int int_trigger_type;
+ struct regulator *avdd28;
+ struct regulator *vddio;
struct gpio_desc *gpiod_int;
struct gpio_desc *gpiod_rst;
u16 id;
@@ -216,6 +219,7 @@ static const struct goodix_chip_data *goodix_get_chip_data(u16 id)
{
switch (id) {
case 1151:
+ case 5663:
case 5688:
return &gt1x_chip_data;
@@ -532,6 +536,24 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
return -EINVAL;
dev = &ts->client->dev;
+ ts->avdd28 = devm_regulator_get(dev, "AVDD28");
+ if (IS_ERR(ts->avdd28)) {
+ error = PTR_ERR(ts->avdd28);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get AVDD28 regulator: %d\n", error);
+ return error;
+ }
+
+ ts->vddio = devm_regulator_get(dev, "VDDIO");
+ if (IS_ERR(ts->vddio)) {
+ error = PTR_ERR(ts->vddio);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get VDDIO regulator: %d\n", error);
+ return error;
+ }
+
/* Get the interrupt GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME, GPIOD_IN);
if (IS_ERR(gpiod)) {
@@ -764,6 +786,14 @@ err_release_cfg:
complete_all(&ts->firmware_loading_complete);
}
+static void goodix_disable_regulators(void *arg)
+{
+ struct goodix_ts_data *ts = arg;
+
+ regulator_disable(ts->vddio);
+ regulator_disable(ts->avdd28);
+}
+
static int goodix_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -789,6 +819,29 @@ static int goodix_ts_probe(struct i2c_client *client,
if (error)
return error;
+ /* power up the controller */
+ error = regulator_enable(ts->avdd28);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable AVDD28 regulator: %d\n",
+ error);
+ return error;
+ }
+
+ error = regulator_enable(ts->vddio);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable VDDIO regulator: %d\n",
+ error);
+ regulator_disable(ts->avdd28);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(&client->dev,
+ goodix_disable_regulators, ts);
+ if (error)
+ return error;
+
if (ts->gpiod_int && ts->gpiod_rst) {
/* reset the controller */
error = goodix_reset(ts);
@@ -945,6 +998,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt1151" },
+ { .compatible = "goodix,gt5663" },
{ .compatible = "goodix,gt5688" },
{ .compatible = "goodix,gt911" },
{ .compatible = "goodix,gt9110" },
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
new file mode 100644
index 000000000000..b832fe062645
--- /dev/null
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -0,0 +1,1133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS550/572/525 Trackpad/Touchscreen Controller
+ *
+ * Copyright (C) 2018
+ * Author: Jeff LaBundy <jeff@labundy.com>
+ *
+ * These devices require firmware exported from a PC-based configuration tool
+ * made available by the vendor. Firmware files may be pushed to the device's
+ * nonvolatile memory by writing the filename to the 'fw_file' sysfs control.
+ *
+ * Link to PC-based configuration tool and data sheet: http://www.azoteq.com/
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define IQS5XX_FW_FILE_LEN 64
+#define IQS5XX_NUM_RETRIES 10
+#define IQS5XX_NUM_POINTS 256
+#define IQS5XX_NUM_CONTACTS 5
+#define IQS5XX_WR_BYTES_MAX 2
+
+#define IQS5XX_PROD_NUM_IQS550 40
+#define IQS5XX_PROD_NUM_IQS572 58
+#define IQS5XX_PROD_NUM_IQS525 52
+#define IQS5XX_PROJ_NUM_A000 0
+#define IQS5XX_PROJ_NUM_B000 15
+#define IQS5XX_MAJOR_VER_MIN 2
+
+#define IQS5XX_RESUME 0x00
+#define IQS5XX_SUSPEND 0x01
+
+#define IQS5XX_SW_INPUT_EVENT 0x10
+#define IQS5XX_SETUP_COMPLETE 0x40
+#define IQS5XX_EVENT_MODE 0x01
+#define IQS5XX_TP_EVENT 0x04
+
+#define IQS5XX_FLIP_X 0x01
+#define IQS5XX_FLIP_Y 0x02
+#define IQS5XX_SWITCH_XY_AXIS 0x04
+
+#define IQS5XX_PROD_NUM 0x0000
+#define IQS5XX_ABS_X 0x0016
+#define IQS5XX_ABS_Y 0x0018
+#define IQS5XX_SYS_CTRL0 0x0431
+#define IQS5XX_SYS_CTRL1 0x0432
+#define IQS5XX_SYS_CFG0 0x058E
+#define IQS5XX_SYS_CFG1 0x058F
+#define IQS5XX_TOTAL_RX 0x063D
+#define IQS5XX_TOTAL_TX 0x063E
+#define IQS5XX_XY_CFG0 0x0669
+#define IQS5XX_X_RES 0x066E
+#define IQS5XX_Y_RES 0x0670
+#define IQS5XX_CHKSM 0x83C0
+#define IQS5XX_APP 0x8400
+#define IQS5XX_CSTM 0xBE00
+#define IQS5XX_PMAP_END 0xBFFF
+#define IQS5XX_END_COMM 0xEEEE
+
+#define IQS5XX_CHKSM_LEN (IQS5XX_APP - IQS5XX_CHKSM)
+#define IQS5XX_APP_LEN (IQS5XX_CSTM - IQS5XX_APP)
+#define IQS5XX_CSTM_LEN (IQS5XX_PMAP_END + 1 - IQS5XX_CSTM)
+#define IQS5XX_PMAP_LEN (IQS5XX_PMAP_END + 1 - IQS5XX_CHKSM)
+
+#define IQS5XX_REC_HDR_LEN 4
+#define IQS5XX_REC_LEN_MAX 255
+#define IQS5XX_REC_TYPE_DATA 0x00
+#define IQS5XX_REC_TYPE_EOF 0x01
+
+#define IQS5XX_BL_ADDR_MASK 0x40
+#define IQS5XX_BL_CMD_VER 0x00
+#define IQS5XX_BL_CMD_READ 0x01
+#define IQS5XX_BL_CMD_EXEC 0x02
+#define IQS5XX_BL_CMD_CRC 0x03
+#define IQS5XX_BL_BLK_LEN_MAX 64
+#define IQS5XX_BL_ID 0x0200
+#define IQS5XX_BL_STATUS_RESET 0x00
+#define IQS5XX_BL_STATUS_AVAIL 0xA5
+#define IQS5XX_BL_STATUS_NONE 0xEE
+#define IQS5XX_BL_CRC_PASS 0x00
+#define IQS5XX_BL_CRC_FAIL 0x01
+#define IQS5XX_BL_ATTEMPTS 3
+
+struct iqs5xx_private {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct gpio_desc *reset_gpio;
+ struct mutex lock;
+ u8 bl_status;
+};
+
+struct iqs5xx_dev_id_info {
+ __be16 prod_num;
+ __be16 proj_num;
+ u8 major_ver;
+ u8 minor_ver;
+ u8 bl_status;
+} __packed;
+
+struct iqs5xx_ihex_rec {
+ char start;
+ char len[2];
+ char addr[4];
+ char type[2];
+ char data[2];
+} __packed;
+
+struct iqs5xx_touch_data {
+ __be16 abs_x;
+ __be16 abs_y;
+ __be16 strength;
+ u8 area;
+} __packed;
+
+static int iqs5xx_read_burst(struct i2c_client *client,
+ u16 reg, void *val, u16 len)
+{
+ __be16 reg_buf = cpu_to_be16(reg);
+ int ret, i;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(reg_buf),
+ .buf = (u8 *)&reg_buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = (u8 *)val,
+ },
+ };
+
+ /*
+ * The first addressing attempt outside of a communication window fails
+ * and must be retried, after which the device clock stretches until it
+ * is available.
+ */
+ for (i = 0; i < IQS5XX_NUM_RETRIES; i++) {
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret == ARRAY_SIZE(msg))
+ return 0;
+
+ usleep_range(200, 300);
+ }
+
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "Failed to read from address 0x%04X: %d\n",
+ reg, ret);
+
+ return ret;
+}
+
+static int iqs5xx_read_word(struct i2c_client *client, u16 reg, u16 *val)
+{
+ __be16 val_buf;
+ int error;
+
+ error = iqs5xx_read_burst(client, reg, &val_buf, sizeof(val_buf));
+ if (error)
+ return error;
+
+ *val = be16_to_cpu(val_buf);
+
+ return 0;
+}
+
+static int iqs5xx_read_byte(struct i2c_client *client, u16 reg, u8 *val)
+{
+ return iqs5xx_read_burst(client, reg, val, sizeof(*val));
+}
+
+static int iqs5xx_write_burst(struct i2c_client *client,
+ u16 reg, const void *val, u16 len)
+{
+ int ret, i;
+ u16 mlen = sizeof(reg) + len;
+ u8 mbuf[sizeof(reg) + IQS5XX_WR_BYTES_MAX];
+
+ if (len > IQS5XX_WR_BYTES_MAX)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, mbuf);
+ memcpy(mbuf + sizeof(reg), val, len);
+
+ /*
+ * The first addressing attempt outside of a communication window fails
+ * and must be retried, after which the device clock stretches until it
+ * is available.
+ */
+ for (i = 0; i < IQS5XX_NUM_RETRIES; i++) {
+ ret = i2c_master_send(client, mbuf, mlen);
+ if (ret == mlen)
+ return 0;
+
+ usleep_range(200, 300);
+ }
+
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "Failed to write to address 0x%04X: %d\n",
+ reg, ret);
+
+ return ret;
+}
+
+static int iqs5xx_write_word(struct i2c_client *client, u16 reg, u16 val)
+{
+ __be16 val_buf = cpu_to_be16(val);
+
+ return iqs5xx_write_burst(client, reg, &val_buf, sizeof(val_buf));
+}
+
+static int iqs5xx_write_byte(struct i2c_client *client, u16 reg, u8 val)
+{
+ return iqs5xx_write_burst(client, reg, &val, sizeof(val));
+}
+
+static void iqs5xx_reset(struct i2c_client *client)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+
+ gpiod_set_value_cansleep(iqs5xx->reset_gpio, 1);
+ usleep_range(200, 300);
+
+ gpiod_set_value_cansleep(iqs5xx->reset_gpio, 0);
+}
+
+static int iqs5xx_bl_cmd(struct i2c_client *client, u8 bl_cmd, u16 bl_addr)
+{
+ struct i2c_msg msg;
+ int ret;
+ u8 mbuf[sizeof(bl_cmd) + sizeof(bl_addr)];
+
+ msg.addr = client->addr ^ IQS5XX_BL_ADDR_MASK;
+ msg.flags = 0;
+ msg.len = sizeof(bl_cmd);
+ msg.buf = mbuf;
+
+ *mbuf = bl_cmd;
+
+ switch (bl_cmd) {
+ case IQS5XX_BL_CMD_VER:
+ case IQS5XX_BL_CMD_CRC:
+ case IQS5XX_BL_CMD_EXEC:
+ break;
+ case IQS5XX_BL_CMD_READ:
+ msg.len += sizeof(bl_addr);
+ put_unaligned_be16(bl_addr, mbuf + sizeof(bl_cmd));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret != 1)
+ goto msg_fail;
+
+ switch (bl_cmd) {
+ case IQS5XX_BL_CMD_VER:
+ msg.len = sizeof(u16);
+ break;
+ case IQS5XX_BL_CMD_CRC:
+ msg.len = sizeof(u8);
+ /*
+ * This delay saves the bus controller the trouble of having to
+ * tolerate a relatively long clock-stretching period while the
+ * CRC is calculated.
+ */
+ msleep(50);
+ break;
+ case IQS5XX_BL_CMD_EXEC:
+ usleep_range(10000, 10100);
+ /* fall through */
+ default:
+ return 0;
+ }
+
+ msg.flags = I2C_M_RD;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret != 1)
+ goto msg_fail;
+
+ if (bl_cmd == IQS5XX_BL_CMD_VER &&
+ get_unaligned_be16(mbuf) != IQS5XX_BL_ID) {
+ dev_err(&client->dev, "Unrecognized bootloader ID: 0x%04X\n",
+ get_unaligned_be16(mbuf));
+ return -EINVAL;
+ }
+
+ if (bl_cmd == IQS5XX_BL_CMD_CRC && *mbuf != IQS5XX_BL_CRC_PASS) {
+ dev_err(&client->dev, "Bootloader CRC failed\n");
+ return -EIO;
+ }
+
+ return 0;
+
+msg_fail:
+ if (ret >= 0)
+ ret = -EIO;
+
+ if (bl_cmd != IQS5XX_BL_CMD_VER)
+ dev_err(&client->dev,
+ "Unsuccessful bootloader command 0x%02X: %d\n",
+ bl_cmd, ret);
+
+ return ret;
+}
+
+static int iqs5xx_bl_open(struct i2c_client *client)
+{
+ int error, i, j;
+
+ /*
+ * The device opens a bootloader polling window for 2 ms following the
+ * release of reset. If the host cannot establish communication during
+ * this time frame, it must cycle reset again.
+ */
+ for (i = 0; i < IQS5XX_BL_ATTEMPTS; i++) {
+ iqs5xx_reset(client);
+
+ for (j = 0; j < IQS5XX_NUM_RETRIES; j++) {
+ error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_VER, 0);
+ if (!error || error == -EINVAL)
+ return error;
+ }
+ }
+
+ dev_err(&client->dev, "Failed to open bootloader: %d\n", error);
+
+ return error;
+}
+
+static int iqs5xx_bl_write(struct i2c_client *client,
+ u16 bl_addr, u8 *pmap_data, u16 pmap_len)
+{
+ struct i2c_msg msg;
+ int ret, i;
+ u8 mbuf[sizeof(bl_addr) + IQS5XX_BL_BLK_LEN_MAX];
+
+ if (pmap_len % IQS5XX_BL_BLK_LEN_MAX)
+ return -EINVAL;
+
+ msg.addr = client->addr ^ IQS5XX_BL_ADDR_MASK;
+ msg.flags = 0;
+ msg.len = sizeof(mbuf);
+ msg.buf = mbuf;
+
+ for (i = 0; i < pmap_len; i += IQS5XX_BL_BLK_LEN_MAX) {
+ put_unaligned_be16(bl_addr + i, mbuf);
+ memcpy(mbuf + sizeof(bl_addr), pmap_data + i,
+ sizeof(mbuf) - sizeof(bl_addr));
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret != 1)
+ goto msg_fail;
+
+ usleep_range(10000, 10100);
+ }
+
+ return 0;
+
+msg_fail:
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "Failed to write block at address 0x%04X: %d\n",
+ bl_addr + i, ret);
+
+ return ret;
+}
+
+static int iqs5xx_bl_verify(struct i2c_client *client,
+ u16 bl_addr, u8 *pmap_data, u16 pmap_len)
+{
+ struct i2c_msg msg;
+ int ret, i;
+ u8 bl_data[IQS5XX_BL_BLK_LEN_MAX];
+
+ if (pmap_len % IQS5XX_BL_BLK_LEN_MAX)
+ return -EINVAL;
+
+ msg.addr = client->addr ^ IQS5XX_BL_ADDR_MASK;
+ msg.flags = I2C_M_RD;
+ msg.len = sizeof(bl_data);
+ msg.buf = bl_data;
+
+ for (i = 0; i < pmap_len; i += IQS5XX_BL_BLK_LEN_MAX) {
+ ret = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_READ, bl_addr + i);
+ if (ret)
+ return ret;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret != 1)
+ goto msg_fail;
+
+ if (memcmp(bl_data, pmap_data + i, sizeof(bl_data))) {
+ dev_err(&client->dev,
+ "Failed to verify block at address 0x%04X\n",
+ bl_addr + i);
+ return -EIO;
+ }
+ }
+
+ return 0;
+
+msg_fail:
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "Failed to read block at address 0x%04X: %d\n",
+ bl_addr + i, ret);
+
+ return ret;
+}
+
+static int iqs5xx_set_state(struct i2c_client *client, u8 state)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+ int error1, error2;
+
+ if (iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
+ return 0;
+
+ mutex_lock(&iqs5xx->lock);
+
+ /*
+ * Addressing the device outside of a communication window prompts it
+ * to assert the RDY output, so disable the interrupt line to prevent
+ * the handler from servicing a false interrupt.
+ */
+ disable_irq(client->irq);
+
+ error1 = iqs5xx_write_byte(client, IQS5XX_SYS_CTRL1, state);
+ error2 = iqs5xx_write_byte(client, IQS5XX_END_COMM, 0);
+
+ usleep_range(50, 100);
+ enable_irq(client->irq);
+
+ mutex_unlock(&iqs5xx->lock);
+
+ if (error1)
+ return error1;
+
+ return error2;
+}
+
+static int iqs5xx_open(struct input_dev *input)
+{
+ struct iqs5xx_private *iqs5xx = input_get_drvdata(input);
+
+ return iqs5xx_set_state(iqs5xx->client, IQS5XX_RESUME);
+}
+
+static void iqs5xx_close(struct input_dev *input)
+{
+ struct iqs5xx_private *iqs5xx = input_get_drvdata(input);
+
+ iqs5xx_set_state(iqs5xx->client, IQS5XX_SUSPEND);
+}
+
+static int iqs5xx_axis_init(struct i2c_client *client)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+ struct touchscreen_properties prop;
+ struct input_dev *input;
+ int error;
+ u16 max_x, max_x_hw;
+ u16 max_y, max_y_hw;
+ u8 val;
+
+ if (!iqs5xx->input) {
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = client->name;
+ input->id.bustype = BUS_I2C;
+ input->open = iqs5xx_open;
+ input->close = iqs5xx_close;
+
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+ input_set_capability(input, EV_ABS, ABS_MT_PRESSURE);
+
+ error = input_mt_init_slots(input,
+ IQS5XX_NUM_CONTACTS, INPUT_MT_DIRECT);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to initialize slots: %d\n", error);
+ return error;
+ }
+
+ input_set_drvdata(input, iqs5xx);
+ iqs5xx->input = input;
+ }
+
+ touchscreen_parse_properties(iqs5xx->input, true, &prop);
+
+ error = iqs5xx_read_byte(client, IQS5XX_TOTAL_RX, &val);
+ if (error)
+ return error;
+ max_x_hw = (val - 1) * IQS5XX_NUM_POINTS;
+
+ error = iqs5xx_read_byte(client, IQS5XX_TOTAL_TX, &val);
+ if (error)
+ return error;
+ max_y_hw = (val - 1) * IQS5XX_NUM_POINTS;
+
+ error = iqs5xx_read_byte(client, IQS5XX_XY_CFG0, &val);
+ if (error)
+ return error;
+
+ if (val & IQS5XX_SWITCH_XY_AXIS)
+ swap(max_x_hw, max_y_hw);
+
+ if (prop.swap_x_y)
+ val ^= IQS5XX_SWITCH_XY_AXIS;
+
+ if (prop.invert_x)
+ val ^= prop.swap_x_y ? IQS5XX_FLIP_Y : IQS5XX_FLIP_X;
+
+ if (prop.invert_y)
+ val ^= prop.swap_x_y ? IQS5XX_FLIP_X : IQS5XX_FLIP_Y;
+
+ error = iqs5xx_write_byte(client, IQS5XX_XY_CFG0, val);
+ if (error)
+ return error;
+
+ if (prop.max_x > max_x_hw) {
+ dev_err(&client->dev, "Invalid maximum x-coordinate: %u > %u\n",
+ prop.max_x, max_x_hw);
+ return -EINVAL;
+ } else if (prop.max_x == 0) {
+ error = iqs5xx_read_word(client, IQS5XX_X_RES, &max_x);
+ if (error)
+ return error;
+
+ input_abs_set_max(iqs5xx->input,
+ prop.swap_x_y ? ABS_MT_POSITION_Y :
+ ABS_MT_POSITION_X,
+ max_x);
+ } else {
+ max_x = (u16)prop.max_x;
+ }
+
+ if (prop.max_y > max_y_hw) {
+ dev_err(&client->dev, "Invalid maximum y-coordinate: %u > %u\n",
+ prop.max_y, max_y_hw);
+ return -EINVAL;
+ } else if (prop.max_y == 0) {
+ error = iqs5xx_read_word(client, IQS5XX_Y_RES, &max_y);
+ if (error)
+ return error;
+
+ input_abs_set_max(iqs5xx->input,
+ prop.swap_x_y ? ABS_MT_POSITION_X :
+ ABS_MT_POSITION_Y,
+ max_y);
+ } else {
+ max_y = (u16)prop.max_y;
+ }
+
+ /*
+ * Write horizontal and vertical resolution to the device in case its
+ * original defaults were overridden or swapped as per the properties
+ * specified in the device tree.
+ */
+ error = iqs5xx_write_word(client,
+ prop.swap_x_y ? IQS5XX_Y_RES : IQS5XX_X_RES,
+ max_x);
+ if (error)
+ return error;
+
+ return iqs5xx_write_word(client,
+ prop.swap_x_y ? IQS5XX_X_RES : IQS5XX_Y_RES,
+ max_y);
+}
+
+static int iqs5xx_dev_init(struct i2c_client *client)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+ struct iqs5xx_dev_id_info *dev_id_info;
+ int error;
+ u8 val;
+ u8 buf[sizeof(*dev_id_info) + 1];
+
+ error = iqs5xx_read_burst(client, IQS5XX_PROD_NUM,
+ &buf[1], sizeof(*dev_id_info));
+ if (error)
+ return iqs5xx_bl_open(client);
+
+ /*
+ * A000 and B000 devices use 8-bit and 16-bit addressing, respectively.
+ * Querying an A000 device's version information with 16-bit addressing
+ * gives the appearance that the data is shifted by one byte; a nonzero
+ * leading array element suggests this could be the case (in which case
+ * the missing zero is prepended).
+ */
+ buf[0] = 0;
+ dev_id_info = (struct iqs5xx_dev_id_info *)&buf[(buf[1] > 0) ? 0 : 1];
+
+ switch (be16_to_cpu(dev_id_info->prod_num)) {
+ case IQS5XX_PROD_NUM_IQS550:
+ case IQS5XX_PROD_NUM_IQS572:
+ case IQS5XX_PROD_NUM_IQS525:
+ break;
+ default:
+ dev_err(&client->dev, "Unrecognized product number: %u\n",
+ be16_to_cpu(dev_id_info->prod_num));
+ return -EINVAL;
+ }
+
+ switch (be16_to_cpu(dev_id_info->proj_num)) {
+ case IQS5XX_PROJ_NUM_A000:
+ dev_err(&client->dev, "Unsupported project number: %u\n",
+ be16_to_cpu(dev_id_info->proj_num));
+ return iqs5xx_bl_open(client);
+ case IQS5XX_PROJ_NUM_B000:
+ break;
+ default:
+ dev_err(&client->dev, "Unrecognized project number: %u\n",
+ be16_to_cpu(dev_id_info->proj_num));
+ return -EINVAL;
+ }
+
+ if (dev_id_info->major_ver < IQS5XX_MAJOR_VER_MIN) {
+ dev_err(&client->dev, "Unsupported major version: %u\n",
+ dev_id_info->major_ver);
+ return iqs5xx_bl_open(client);
+ }
+
+ switch (dev_id_info->bl_status) {
+ case IQS5XX_BL_STATUS_AVAIL:
+ case IQS5XX_BL_STATUS_NONE:
+ break;
+ default:
+ dev_err(&client->dev,
+ "Unrecognized bootloader status: 0x%02X\n",
+ dev_id_info->bl_status);
+ return -EINVAL;
+ }
+
+ error = iqs5xx_axis_init(client);
+ if (error)
+ return error;
+
+ error = iqs5xx_read_byte(client, IQS5XX_SYS_CFG0, &val);
+ if (error)
+ return error;
+
+ val |= IQS5XX_SETUP_COMPLETE;
+ val &= ~IQS5XX_SW_INPUT_EVENT;
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG0, val);
+ if (error)
+ return error;
+
+ val = IQS5XX_TP_EVENT | IQS5XX_EVENT_MODE;
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG1, val);
+ if (error)
+ return error;
+
+ error = iqs5xx_write_byte(client, IQS5XX_END_COMM, 0);
+ if (error)
+ return error;
+
+ iqs5xx->bl_status = dev_id_info->bl_status;
+
+ /*
+ * Closure of the first communication window that appears following the
+ * release of reset appears to kick off an initialization period during
+ * which further communication is met with clock stretching. The return
+ * from this function is delayed so that further communication attempts
+ * avoid this period.
+ */
+ msleep(100);
+
+ return 0;
+}
+
+static irqreturn_t iqs5xx_irq(int irq, void *data)
+{
+ struct iqs5xx_private *iqs5xx = data;
+ struct iqs5xx_touch_data touch_data[IQS5XX_NUM_CONTACTS];
+ struct i2c_client *client = iqs5xx->client;
+ struct input_dev *input = iqs5xx->input;
+ int error, i;
+
+ /*
+ * This check is purely a precaution, as the device does not assert the
+ * RDY output during bootloader mode. If the device operates outside of
+ * bootloader mode, the input device is guaranteed to be allocated.
+ */
+ if (iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
+ return IRQ_NONE;
+
+ error = iqs5xx_read_burst(client, IQS5XX_ABS_X,
+ touch_data, sizeof(touch_data));
+ if (error)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(touch_data); i++) {
+ u16 pressure = be16_to_cpu(touch_data[i].strength);
+
+ input_mt_slot(input, i);
+ if (input_mt_report_slot_state(input, MT_TOOL_FINGER,
+ pressure != 0)) {
+ input_report_abs(input, ABS_MT_POSITION_X,
+ be16_to_cpu(touch_data[i].abs_x));
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ be16_to_cpu(touch_data[i].abs_y));
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+ }
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+
+ error = iqs5xx_write_byte(client, IQS5XX_END_COMM, 0);
+ if (error)
+ return IRQ_NONE;
+
+ /*
+ * Once the communication window is closed, a small delay is added to
+ * ensure the device's RDY output has been deasserted by the time the
+ * interrupt handler returns.
+ */
+ usleep_range(50, 100);
+
+ return IRQ_HANDLED;
+}
+
+static int iqs5xx_fw_file_parse(struct i2c_client *client,
+ const char *fw_file, u8 *pmap)
+{
+ const struct firmware *fw;
+ struct iqs5xx_ihex_rec *rec;
+ size_t pos = 0;
+ int error, i;
+ u16 rec_num = 1;
+ u16 rec_addr;
+ u8 rec_len, rec_type, rec_chksm, chksm;
+ u8 rec_hdr[IQS5XX_REC_HDR_LEN];
+ u8 rec_data[IQS5XX_REC_LEN_MAX];
+
+ /*
+ * Firmware exported from the vendor's configuration tool deviates from
+ * standard ihex as follows: (1) the checksum for records corresponding
+ * to user-exported settings is not recalculated, and (2) an address of
+ * 0xFFFF is used for the EOF record.
+ *
+ * Because the ihex2fw tool tolerates neither (1) nor (2), the slightly
+ * nonstandard ihex firmware is parsed directly by the driver.
+ */
+ error = request_firmware(&fw, fw_file, &client->dev);
+ if (error) {
+ dev_err(&client->dev, "Failed to request firmware %s: %d\n",
+ fw_file, error);
+ return error;
+ }
+
+ do {
+ if (pos + sizeof(*rec) > fw->size) {
+ dev_err(&client->dev, "Insufficient firmware size\n");
+ error = -EINVAL;
+ break;
+ }
+ rec = (struct iqs5xx_ihex_rec *)(fw->data + pos);
+ pos += sizeof(*rec);
+
+ if (rec->start != ':') {
+ dev_err(&client->dev, "Invalid start at record %u\n",
+ rec_num);
+ error = -EINVAL;
+ break;
+ }
+
+ error = hex2bin(rec_hdr, rec->len, sizeof(rec_hdr));
+ if (error) {
+ dev_err(&client->dev, "Invalid header at record %u\n",
+ rec_num);
+ break;
+ }
+
+ rec_len = *rec_hdr;
+ rec_addr = get_unaligned_be16(rec_hdr + sizeof(rec_len));
+ rec_type = *(rec_hdr + sizeof(rec_len) + sizeof(rec_addr));
+
+ if (pos + rec_len * 2 > fw->size) {
+ dev_err(&client->dev, "Insufficient firmware size\n");
+ error = -EINVAL;
+ break;
+ }
+ pos += (rec_len * 2);
+
+ error = hex2bin(rec_data, rec->data, rec_len);
+ if (error) {
+ dev_err(&client->dev, "Invalid data at record %u\n",
+ rec_num);
+ break;
+ }
+
+ error = hex2bin(&rec_chksm,
+ rec->data + rec_len * 2, sizeof(rec_chksm));
+ if (error) {
+ dev_err(&client->dev, "Invalid checksum at record %u\n",
+ rec_num);
+ break;
+ }
+
+ chksm = 0;
+ for (i = 0; i < sizeof(rec_hdr); i++)
+ chksm += rec_hdr[i];
+ for (i = 0; i < rec_len; i++)
+ chksm += rec_data[i];
+ chksm = ~chksm + 1;
+
+ if (chksm != rec_chksm && rec_addr < IQS5XX_CSTM) {
+ dev_err(&client->dev,
+ "Incorrect checksum at record %u\n",
+ rec_num);
+ error = -EINVAL;
+ break;
+ }
+
+ switch (rec_type) {
+ case IQS5XX_REC_TYPE_DATA:
+ if (rec_addr < IQS5XX_CHKSM ||
+ rec_addr > IQS5XX_PMAP_END) {
+ dev_err(&client->dev,
+ "Invalid address at record %u\n",
+ rec_num);
+ error = -EINVAL;
+ } else {
+ memcpy(pmap + rec_addr - IQS5XX_CHKSM,
+ rec_data, rec_len);
+ }
+ break;
+ case IQS5XX_REC_TYPE_EOF:
+ break;
+ default:
+ dev_err(&client->dev, "Invalid type at record %u\n",
+ rec_num);
+ error = -EINVAL;
+ }
+
+ if (error)
+ break;
+
+ rec_num++;
+ while (pos < fw->size) {
+ if (*(fw->data + pos) == ':')
+ break;
+ pos++;
+ }
+ } while (rec_type != IQS5XX_REC_TYPE_EOF);
+
+ release_firmware(fw);
+
+ return error;
+}
+
+static int iqs5xx_fw_file_write(struct i2c_client *client, const char *fw_file)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+ int error;
+ u8 *pmap;
+
+ if (iqs5xx->bl_status == IQS5XX_BL_STATUS_NONE)
+ return -EPERM;
+
+ pmap = kzalloc(IQS5XX_PMAP_LEN, GFP_KERNEL);
+ if (!pmap)
+ return -ENOMEM;
+
+ error = iqs5xx_fw_file_parse(client, fw_file, pmap);
+ if (error)
+ goto err_kfree;
+
+ mutex_lock(&iqs5xx->lock);
+
+ /*
+ * Disable the interrupt line in case the first attempt(s) to enter the
+ * bootloader don't happen quickly enough, in which case the device may
+ * assert the RDY output until the next attempt.
+ */
+ disable_irq(client->irq);
+
+ iqs5xx->bl_status = IQS5XX_BL_STATUS_RESET;
+
+ error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_VER, 0);
+ if (error) {
+ error = iqs5xx_bl_open(client);
+ if (error)
+ goto err_reset;
+ }
+
+ error = iqs5xx_bl_write(client, IQS5XX_CHKSM, pmap, IQS5XX_PMAP_LEN);
+ if (error)
+ goto err_reset;
+
+ error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_CRC, 0);
+ if (error)
+ goto err_reset;
+
+ error = iqs5xx_bl_verify(client, IQS5XX_CSTM,
+ pmap + IQS5XX_CHKSM_LEN + IQS5XX_APP_LEN,
+ IQS5XX_CSTM_LEN);
+ if (error)
+ goto err_reset;
+
+ error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_EXEC, 0);
+
+err_reset:
+ if (error) {
+ iqs5xx_reset(client);
+ usleep_range(10000, 10100);
+ }
+
+ error = iqs5xx_dev_init(client);
+ if (!error && iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
+ error = -EINVAL;
+
+ enable_irq(client->irq);
+
+ mutex_unlock(&iqs5xx->lock);
+
+err_kfree:
+ kfree(pmap);
+
+ return error;
+}
+
+static ssize_t fw_file_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs5xx->client;
+ size_t len = count;
+ bool input_reg = !iqs5xx->input;
+ char fw_file[IQS5XX_FW_FILE_LEN + 1];
+ int error;
+
+ if (!len)
+ return -EINVAL;
+
+ if (buf[len - 1] == '\n')
+ len--;
+
+ if (len > IQS5XX_FW_FILE_LEN)
+ return -ENAMETOOLONG;
+
+ memcpy(fw_file, buf, len);
+ fw_file[len] = '\0';
+
+ error = iqs5xx_fw_file_write(client, fw_file);
+ if (error)
+ return error;
+
+ /*
+ * If the input device was not allocated already, it is guaranteed to
+ * be allocated by this point and can finally be registered.
+ */
+ if (input_reg) {
+ error = input_register_device(iqs5xx->input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register device: %d\n",
+ error);
+ return error;
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(fw_file);
+
+static struct attribute *iqs5xx_attrs[] = {
+ &dev_attr_fw_file.attr,
+ NULL,
+};
+
+static const struct attribute_group iqs5xx_attr_group = {
+ .attrs = iqs5xx_attrs,
+};
+
+static int __maybe_unused iqs5xx_suspend(struct device *dev)
+{
+ struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
+ struct input_dev *input = iqs5xx->input;
+ int error = 0;
+
+ if (!input)
+ return error;
+
+ mutex_lock(&input->mutex);
+
+ if (input->users)
+ error = iqs5xx_set_state(iqs5xx->client, IQS5XX_SUSPEND);
+
+ mutex_unlock(&input->mutex);
+
+ return error;
+}
+
+static int __maybe_unused iqs5xx_resume(struct device *dev)
+{
+ struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
+ struct input_dev *input = iqs5xx->input;
+ int error = 0;
+
+ if (!input)
+ return error;
+
+ mutex_lock(&input->mutex);
+
+ if (input->users)
+ error = iqs5xx_set_state(iqs5xx->client, IQS5XX_RESUME);
+
+ mutex_unlock(&input->mutex);
+
+ return error;
+}
+
+static SIMPLE_DEV_PM_OPS(iqs5xx_pm, iqs5xx_suspend, iqs5xx_resume);
+
+static int iqs5xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iqs5xx_private *iqs5xx;
+ int error;
+
+ iqs5xx = devm_kzalloc(&client->dev, sizeof(*iqs5xx), GFP_KERNEL);
+ if (!iqs5xx)
+ return -ENOMEM;
+
+ dev_set_drvdata(&client->dev, iqs5xx);
+
+ i2c_set_clientdata(client, iqs5xx);
+ iqs5xx->client = client;
+
+ iqs5xx->reset_gpio = devm_gpiod_get(&client->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(iqs5xx->reset_gpio)) {
+ error = PTR_ERR(iqs5xx->reset_gpio);
+ dev_err(&client->dev, "Failed to request GPIO: %d\n", error);
+ return error;
+ }
+
+ mutex_init(&iqs5xx->lock);
+
+ iqs5xx_reset(client);
+ usleep_range(10000, 10100);
+
+ error = iqs5xx_dev_init(client);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, iqs5xx_irq, IRQF_ONESHOT,
+ client->name, iqs5xx);
+ if (error) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ error = devm_device_add_group(&client->dev, &iqs5xx_attr_group);
+ if (error) {
+ dev_err(&client->dev, "Failed to add attributes: %d\n", error);
+ return error;
+ }
+
+ if (iqs5xx->input) {
+ error = input_register_device(iqs5xx->input);
+ if (error)
+ dev_err(&client->dev,
+ "Failed to register device: %d\n",
+ error);
+ }
+
+ return error;
+}
+
+static const struct i2c_device_id iqs5xx_id[] = {
+ { "iqs550", 0 },
+ { "iqs572", 1 },
+ { "iqs525", 2 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, iqs5xx_id);
+
+static const struct of_device_id iqs5xx_of_match[] = {
+ { .compatible = "azoteq,iqs550" },
+ { .compatible = "azoteq,iqs572" },
+ { .compatible = "azoteq,iqs525" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs5xx_of_match);
+
+static struct i2c_driver iqs5xx_i2c_driver = {
+ .driver = {
+ .name = "iqs5xx",
+ .of_match_table = iqs5xx_of_match,
+ .pm = &iqs5xx_pm,
+ },
+ .id_table = iqs5xx_id,
+ .probe = iqs5xx_probe,
+};
+module_i2c_driver(iqs5xx_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS550/572/525 Trackpad/Touchscreen Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 6005a1c189f6..871eb4bc4efc 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -90,18 +90,7 @@ static int icc_summary_show(struct seq_file *s, void *data)
return 0;
}
-
-static int icc_summary_open(struct inode *inode, struct file *file)
-{
- return single_open(file, icc_summary_show, inode->i_private);
-}
-
-static const struct file_operations icc_summary_fops = {
- .open = icc_summary_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(icc_summary);
static struct icc_node *node_find(const int id)
{
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 6f07f3b21816..15b831113ded 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -359,6 +359,31 @@ config ARM_SMMU
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
+config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
+ bool "Default to disabling bypass on ARM SMMU v1 and v2"
+ depends on ARM_SMMU
+ default y
+ help
+ Say Y here to (by default) disable bypass streams such that
+ incoming transactions from devices that are not attached to
+ an iommu domain will report an abort back to the device and
+ will not be allowed to pass through the SMMU.
+
+ Any old kernels that existed before this KConfig was
+ introduced would default to _allowing_ bypass (AKA the
+ equivalent of NO for this config). However the default for
+ this option is YES because the old behavior is insecure.
+
+ There are few reasons to allow unmatched stream bypass, and
+ even fewer good ones. If saying YES here breaks your board
+ you should work on fixing your board. This KConfig option
+ is expected to be removed in the future and we'll simply
+ hardcode the bypass disable in the code.
+
+ NOTE: the kernel command line parameter
+ 'arm-smmu.disable_bypass' will continue to override this
+ config.
+
config ARM_SMMU_V3
bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index f7cdd2ab7f11..09c9e45f7fa2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -165,7 +165,7 @@ static inline u16 get_pci_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return PCI_DEVID(pdev->bus->number, pdev->devfn);
+ return pci_dev_id(pdev);
}
static inline int get_acpihid_device_id(struct device *dev,
@@ -1723,31 +1723,6 @@ static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
*
****************************************************************************/
-/*
- * This function adds a protection domain to the global protection domain list
- */
-static void add_domain_to_list(struct protection_domain *domain)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&amd_iommu_pd_lock, flags);
- list_add(&domain->list, &amd_iommu_pd_list);
- spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
-}
-
-/*
- * This function removes a protection domain to the global
- * protection domain list
- */
-static void del_domain_from_list(struct protection_domain *domain)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&amd_iommu_pd_lock, flags);
- list_del(&domain->list);
- spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
-}
-
static u16 domain_id_alloc(void)
{
int id;
@@ -1838,8 +1813,6 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
if (!dom)
return;
- del_domain_from_list(&dom->domain);
-
put_iova_domain(&dom->iovad);
free_pagetable(&dom->domain);
@@ -1880,8 +1853,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
/* Initialize reserved ranges */
copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
- add_domain_to_list(&dma_dom->domain);
-
return dma_dom;
free_dma_dom:
@@ -2122,23 +2093,6 @@ out_err:
return ret;
}
-/* FIXME: Move this to PCI code */
-#define PCI_PRI_TLP_OFF (1 << 15)
-
-static bool pci_pri_tlp_required(struct pci_dev *pdev)
-{
- u16 status;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return false;
-
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
-
- return (status & PCI_PRI_TLP_OFF) ? true : false;
-}
-
/*
* If a device is not yet associated with a domain, this function makes the
* device visible in the domain
@@ -2167,7 +2121,7 @@ static int attach_device(struct device *dev,
dev_data->ats.enabled = true;
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
- dev_data->pri_tlp = pci_pri_tlp_required(pdev);
+ dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
}
} else if (amd_iommu_iotlb_sup &&
pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
@@ -2897,8 +2851,6 @@ static void protection_domain_free(struct protection_domain *domain)
if (!domain)
return;
- del_domain_from_list(domain);
-
if (domain->id)
domain_id_free(domain->id);
@@ -2928,8 +2880,6 @@ static struct protection_domain *protection_domain_alloc(void)
if (protection_domain_init(domain))
goto out_err;
- add_domain_to_list(domain);
-
return domain;
out_err:
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 1b1378619fc9..f977df90d2a4 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -189,12 +189,6 @@ static bool amd_iommu_pc_present __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
/*
- * List of protection domains - used during resume
- */
-LIST_HEAD(amd_iommu_pd_list);
-spinlock_t amd_iommu_pd_lock;
-
-/*
* Pointer to the device table which is shared by all AMD IOMMUs
* it is indexed by the PCI device id or the HT unit id and contains
* information about the domain the device belongs to as well as the
@@ -359,7 +353,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
static void iommu_set_exclusion_range(struct amd_iommu *iommu)
{
u64 start = iommu->exclusion_start & PAGE_MASK;
- u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+ u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
u64 entry;
if (!iommu->exclusion_start)
@@ -2526,8 +2520,6 @@ static int __init early_amd_iommu_init(void)
*/
__set_bit(0, amd_iommu_pd_alloc_bitmap);
- spin_lock_init(&amd_iommu_pd_lock);
-
/*
* now the data structures are allocated and basically initialized
* start the real acpi table scan
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 87965e4d9647..85c488b8daea 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -675,12 +675,6 @@ extern struct list_head amd_iommu_list;
extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
/*
- * Declarations for the global list of all protection domains
- */
-extern spinlock_t amd_iommu_pd_lock;
-extern struct list_head amd_iommu_pd_list;
-
-/*
* Structure defining one entry in the device table
*/
struct dev_table_entry {
diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h
index a1226e4ab5f8..e9132a926761 100644
--- a/drivers/iommu/arm-smmu-regs.h
+++ b/drivers/iommu/arm-smmu-regs.h
@@ -147,6 +147,8 @@ enum arm_smmu_s2cr_privcfg {
#define CBAR_IRPTNDX_SHIFT 24
#define CBAR_IRPTNDX_MASK 0xff
+#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
+
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
#define CBA2R_RW64_32BIT (0 << 0)
#define CBA2R_RW64_64BIT (1 << 0)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index d3880010c6cf..4d5a694f02c2 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -29,6 +29,7 @@
#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/pci-ats.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
@@ -86,6 +87,7 @@
#define IDR5_VAX_52_BIT 1
#define ARM_SMMU_CR0 0x20
+#define CR0_ATSCHK (1 << 4)
#define CR0_CMDQEN (1 << 3)
#define CR0_EVTQEN (1 << 2)
#define CR0_PRIQEN (1 << 1)
@@ -294,6 +296,7 @@
#define CMDQ_ERR_CERROR_NONE_IDX 0
#define CMDQ_ERR_CERROR_ILL_IDX 1
#define CMDQ_ERR_CERROR_ABT_IDX 2
+#define CMDQ_ERR_CERROR_ATC_INV_IDX 3
#define CMDQ_0_OP GENMASK_ULL(7, 0)
#define CMDQ_0_SSV (1UL << 11)
@@ -312,6 +315,12 @@
#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
+#define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12)
+#define CMDQ_ATC_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_ATC_0_GLOBAL (1UL << 9)
+#define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0)
+#define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12)
+
#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
@@ -433,6 +442,16 @@ struct arm_smmu_cmdq_ent {
u64 addr;
} tlbi;
+ #define CMDQ_OP_ATC_INV 0x40
+ #define ATC_INV_SIZE_ALL 52
+ struct {
+ u32 sid;
+ u32 ssid;
+ u64 addr;
+ u8 size;
+ bool global;
+ } atc;
+
#define CMDQ_OP_PRI_RESP 0x41
struct {
u32 sid;
@@ -505,19 +524,6 @@ struct arm_smmu_s2_cfg {
u64 vtcr;
};
-struct arm_smmu_strtab_ent {
- /*
- * An STE is "assigned" if the master emitting the corresponding SID
- * is attached to a domain. The behaviour of an unassigned STE is
- * determined by the disable_bypass parameter, whereas an assigned
- * STE behaves according to s1_cfg/s2_cfg, which themselves are
- * configured according to the domain type.
- */
- bool assigned;
- struct arm_smmu_s1_cfg *s1_cfg;
- struct arm_smmu_s2_cfg *s2_cfg;
-};
-
struct arm_smmu_strtab_cfg {
__le64 *strtab;
dma_addr_t strtab_dma;
@@ -591,9 +597,14 @@ struct arm_smmu_device {
};
/* SMMU private data for each master */
-struct arm_smmu_master_data {
+struct arm_smmu_master {
struct arm_smmu_device *smmu;
- struct arm_smmu_strtab_ent ste;
+ struct device *dev;
+ struct arm_smmu_domain *domain;
+ struct list_head domain_head;
+ u32 *sids;
+ unsigned int num_sids;
+ bool ats_enabled :1;
};
/* SMMU private data for an IOMMU domain */
@@ -618,6 +629,9 @@ struct arm_smmu_domain {
};
struct iommu_domain domain;
+
+ struct list_head devices;
+ spinlock_t devices_lock;
};
struct arm_smmu_option_prop {
@@ -820,6 +834,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
case CMDQ_OP_TLBI_S12_VMALL:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
break;
+ case CMDQ_OP_ATC_INV:
+ cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
+ cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
+ cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid);
+ cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid);
+ cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size);
+ cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK;
+ break;
case CMDQ_OP_PRI_RESP:
cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
@@ -864,6 +886,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
[CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
[CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
+ [CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout",
};
int i;
@@ -883,6 +906,14 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
dev_err(smmu->dev, "retrying command fetch\n");
case CMDQ_ERR_CERROR_NONE_IDX:
return;
+ case CMDQ_ERR_CERROR_ATC_INV_IDX:
+ /*
+ * ATC Invalidation Completion timeout. CONS is still pointing
+ * at the CMD_SYNC. Attempt to complete other pending commands
+ * by repeating the CMD_SYNC, though we might well end up back
+ * here since the ATC invalidation may still be pending.
+ */
+ return;
case CMDQ_ERR_CERROR_ILL_IDX:
/* Fallthrough */
default:
@@ -999,7 +1030,7 @@ static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
return ret;
}
-static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
{
int ret;
bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
@@ -1009,6 +1040,7 @@ static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
: __arm_smmu_cmdq_issue_sync(smmu);
if (ret)
dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
+ return ret;
}
/* Context descriptor manipulation functions */
@@ -1025,7 +1057,6 @@ static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
val |= ARM_SMMU_TCR2CD(tcr, EPD0);
val |= ARM_SMMU_TCR2CD(tcr, EPD1);
val |= ARM_SMMU_TCR2CD(tcr, IPS);
- val |= ARM_SMMU_TCR2CD(tcr, TBI0);
return val;
}
@@ -1085,8 +1116,8 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
arm_smmu_cmdq_issue_sync(smmu);
}
-static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
- __le64 *dst, struct arm_smmu_strtab_ent *ste)
+static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
+ __le64 *dst)
{
/*
* This is hideously complicated, but we only really care about
@@ -1106,6 +1137,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
*/
u64 val = le64_to_cpu(dst[0]);
bool ste_live = false;
+ struct arm_smmu_device *smmu = NULL;
+ struct arm_smmu_s1_cfg *s1_cfg = NULL;
+ struct arm_smmu_s2_cfg *s2_cfg = NULL;
+ struct arm_smmu_domain *smmu_domain = NULL;
struct arm_smmu_cmdq_ent prefetch_cmd = {
.opcode = CMDQ_OP_PREFETCH_CFG,
.prefetch = {
@@ -1113,6 +1148,25 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
},
};
+ if (master) {
+ smmu_domain = master->domain;
+ smmu = master->smmu;
+ }
+
+ if (smmu_domain) {
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ s1_cfg = &smmu_domain->s1_cfg;
+ break;
+ case ARM_SMMU_DOMAIN_S2:
+ case ARM_SMMU_DOMAIN_NESTED:
+ s2_cfg = &smmu_domain->s2_cfg;
+ break;
+ default:
+ break;
+ }
+ }
+
if (val & STRTAB_STE_0_V) {
switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
case STRTAB_STE_0_CFG_BYPASS:
@@ -1133,8 +1187,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val = STRTAB_STE_0_V;
/* Bypass/fault */
- if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
- if (!ste->assigned && disable_bypass)
+ if (!smmu_domain || !(s1_cfg || s2_cfg)) {
+ if (!smmu_domain && disable_bypass)
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
else
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
@@ -1152,41 +1206,42 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
return;
}
- if (ste->s1_cfg) {
+ if (s1_cfg) {
BUG_ON(ste_live);
dst[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
-#ifdef CONFIG_PCI_ATS
- FIELD_PREP(STRTAB_STE_1_EATS, STRTAB_STE_1_EATS_TRANS) |
-#endif
FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
- val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
}
- if (ste->s2_cfg) {
+ if (s2_cfg) {
BUG_ON(ste_live);
dst[2] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_2_S2VMID, ste->s2_cfg->vmid) |
- FIELD_PREP(STRTAB_STE_2_VTCR, ste->s2_cfg->vtcr) |
+ FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
+ FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
#ifdef __BIG_ENDIAN
STRTAB_STE_2_S2ENDI |
#endif
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
STRTAB_STE_2_S2R);
- dst[3] = cpu_to_le64(ste->s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
+ dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
}
+ if (master->ats_enabled)
+ dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
+ STRTAB_STE_1_EATS_TRANS));
+
arm_smmu_sync_ste_for_sid(smmu, sid);
dst[0] = cpu_to_le64(val);
arm_smmu_sync_ste_for_sid(smmu, sid);
@@ -1199,10 +1254,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
{
unsigned int i;
- struct arm_smmu_strtab_ent ste = { .assigned = false };
for (i = 0; i < nent; ++i) {
- arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
+ arm_smmu_write_strtab_ent(NULL, -1, strtab);
strtab += STRTAB_STE_DWORDS;
}
}
@@ -1390,6 +1444,96 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
return IRQ_WAKE_THREAD;
}
+static void
+arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ size_t log2_span;
+ size_t span_mask;
+ /* ATC invalidates are always on 4096-bytes pages */
+ size_t inval_grain_shift = 12;
+ unsigned long page_start, page_end;
+
+ *cmd = (struct arm_smmu_cmdq_ent) {
+ .opcode = CMDQ_OP_ATC_INV,
+ .substream_valid = !!ssid,
+ .atc.ssid = ssid,
+ };
+
+ if (!size) {
+ cmd->atc.size = ATC_INV_SIZE_ALL;
+ return;
+ }
+
+ page_start = iova >> inval_grain_shift;
+ page_end = (iova + size - 1) >> inval_grain_shift;
+
+ /*
+ * In an ATS Invalidate Request, the address must be aligned on the
+ * range size, which must be a power of two number of page sizes. We
+ * thus have to choose between grossly over-invalidating the region, or
+ * splitting the invalidation into multiple commands. For simplicity
+ * we'll go with the first solution, but should refine it in the future
+ * if multiple commands are shown to be more efficient.
+ *
+ * Find the smallest power of two that covers the range. The most
+ * significant differing bit between the start and end addresses,
+ * fls(start ^ end), indicates the required span. For example:
+ *
+ * We want to invalidate pages [8; 11]. This is already the ideal range:
+ * x = 0b1000 ^ 0b1011 = 0b11
+ * span = 1 << fls(x) = 4
+ *
+ * To invalidate pages [7; 10], we need to invalidate [0; 15]:
+ * x = 0b0111 ^ 0b1010 = 0b1101
+ * span = 1 << fls(x) = 16
+ */
+ log2_span = fls_long(page_start ^ page_end);
+ span_mask = (1ULL << log2_span) - 1;
+
+ page_start &= ~span_mask;
+
+ cmd->atc.addr = page_start << inval_grain_shift;
+ cmd->atc.size = log2_span;
+}
+
+static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ int i;
+
+ if (!master->ats_enabled)
+ return 0;
+
+ for (i = 0; i < master->num_sids; i++) {
+ cmd->atc.sid = master->sids[i];
+ arm_smmu_cmdq_issue_cmd(master->smmu, cmd);
+ }
+
+ return arm_smmu_cmdq_issue_sync(master->smmu);
+}
+
+static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
+ int ssid, unsigned long iova, size_t size)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct arm_smmu_cmdq_ent cmd;
+ struct arm_smmu_master *master;
+
+ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
+ return 0;
+
+ arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head)
+ ret |= arm_smmu_atc_inv_master(master, &cmd);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ return ret ? -ETIMEDOUT : 0;
+}
+
/* IO_PGTABLE API */
static void arm_smmu_tlb_sync(void *cookie)
{
@@ -1493,6 +1637,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
}
mutex_init(&smmu_domain->init_mutex);
+ INIT_LIST_HEAD(&smmu_domain->devices);
+ spin_lock_init(&smmu_domain->devices_lock);
+
return &smmu_domain->domain;
}
@@ -1688,55 +1835,97 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
return step;
}
-static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
{
int i, j;
- struct arm_smmu_master_data *master = fwspec->iommu_priv;
struct arm_smmu_device *smmu = master->smmu;
- for (i = 0; i < fwspec->num_ids; ++i) {
- u32 sid = fwspec->ids[i];
+ for (i = 0; i < master->num_sids; ++i) {
+ u32 sid = master->sids[i];
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
/* Bridged PCI devices may end up with duplicated IDs */
for (j = 0; j < i; j++)
- if (fwspec->ids[j] == sid)
+ if (master->sids[j] == sid)
break;
if (j < i)
continue;
- arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
+ arm_smmu_write_strtab_ent(master, sid, step);
}
}
-static void arm_smmu_detach_dev(struct device *dev)
+static int arm_smmu_enable_ats(struct arm_smmu_master *master)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_data *master = fwspec->iommu_priv;
+ int ret;
+ size_t stu;
+ struct pci_dev *pdev;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
- master->ste.assigned = false;
- arm_smmu_install_ste_for_dev(fwspec);
+ if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) ||
+ !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled())
+ return -ENXIO;
+
+ pdev = to_pci_dev(master->dev);
+ if (pdev->untrusted)
+ return -EPERM;
+
+ /* Smallest Translation Unit: log2 of the smallest supported granule */
+ stu = __ffs(smmu->pgsize_bitmap);
+
+ ret = pci_enable_ats(pdev, stu);
+ if (ret)
+ return ret;
+
+ master->ats_enabled = true;
+ return 0;
+}
+
+static void arm_smmu_disable_ats(struct arm_smmu_master *master)
+{
+ if (!master->ats_enabled || !dev_is_pci(master->dev))
+ return;
+
+ pci_disable_ats(to_pci_dev(master->dev));
+ master->ats_enabled = false;
+}
+
+static void arm_smmu_detach_dev(struct arm_smmu_master *master)
+{
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = master->domain;
+
+ if (!smmu_domain)
+ return;
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_del(&master->domain_head);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ master->domain = NULL;
+ arm_smmu_install_ste_for_dev(master);
+
+ /* Disabling ATS invalidates all ATC entries */
+ arm_smmu_disable_ats(master);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
+ unsigned long flags;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_master_data *master;
- struct arm_smmu_strtab_ent *ste;
+ struct arm_smmu_master *master;
if (!fwspec)
return -ENOENT;
master = fwspec->iommu_priv;
smmu = master->smmu;
- ste = &master->ste;
- /* Already attached to a different domain? */
- if (ste->assigned)
- arm_smmu_detach_dev(dev);
+ arm_smmu_detach_dev(master);
mutex_lock(&smmu_domain->init_mutex);
@@ -1756,21 +1945,19 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock;
}
- ste->assigned = true;
+ master->domain = smmu_domain;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
- ste->s1_cfg = NULL;
- ste->s2_cfg = NULL;
- } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- ste->s1_cfg = &smmu_domain->s1_cfg;
- ste->s2_cfg = NULL;
- arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
- } else {
- ste->s1_cfg = NULL;
- ste->s2_cfg = &smmu_domain->s2_cfg;
- }
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_add(&master->domain_head, &smmu_domain->devices);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- arm_smmu_install_ste_for_dev(fwspec);
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
+ arm_smmu_enable_ats(master);
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+ arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
+
+ arm_smmu_install_ste_for_dev(master);
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -1790,12 +1977,18 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
static size_t
arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
- struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+ int ret;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
if (!ops)
return 0;
- return ops->unmap(ops, iova, size);
+ ret = ops->unmap(ops, iova, size);
+ if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size))
+ return 0;
+
+ return ret;
}
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
@@ -1860,7 +2053,7 @@ static int arm_smmu_add_device(struct device *dev)
{
int i, ret;
struct arm_smmu_device *smmu;
- struct arm_smmu_master_data *master;
+ struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct iommu_group *group;
@@ -1882,13 +2075,16 @@ static int arm_smmu_add_device(struct device *dev)
if (!master)
return -ENOMEM;
+ master->dev = dev;
master->smmu = smmu;
+ master->sids = fwspec->ids;
+ master->num_sids = fwspec->num_ids;
fwspec->iommu_priv = master;
}
/* Check the SIDs are in range of the SMMU and our stream table */
- for (i = 0; i < fwspec->num_ids; i++) {
- u32 sid = fwspec->ids[i];
+ for (i = 0; i < master->num_sids; i++) {
+ u32 sid = master->sids[i];
if (!arm_smmu_sid_in_range(smmu, sid))
return -ERANGE;
@@ -1913,7 +2109,7 @@ static int arm_smmu_add_device(struct device *dev)
static void arm_smmu_remove_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_data *master;
+ struct arm_smmu_master *master;
struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
@@ -1921,8 +2117,7 @@ static void arm_smmu_remove_device(struct device *dev)
master = fwspec->iommu_priv;
smmu = master->smmu;
- if (master && master->ste.assigned)
- arm_smmu_detach_dev(dev);
+ arm_smmu_detach_dev(master);
iommu_group_remove_device(dev);
iommu_device_unlink(&smmu->iommu, dev);
kfree(master);
@@ -2454,13 +2649,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Clear CR0 and sync (disables SMMU and queue processing) */
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
if (reg & CR0_SMMUEN) {
- if (is_kdump_kernel()) {
- arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
- arm_smmu_device_disable(smmu);
- return -EBUSY;
- }
-
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
+ WARN_ON(is_kdump_kernel() && !disable_bypass);
+ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
}
ret = arm_smmu_device_disable(smmu);
@@ -2547,12 +2738,24 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
}
}
+ if (smmu->features & ARM_SMMU_FEAT_ATS) {
+ enables |= CR0_ATSCHK;
+ ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to enable ATS check\n");
+ return ret;
+ }
+ }
+
ret = arm_smmu_setup_irqs(smmu);
if (ret) {
dev_err(smmu->dev, "failed to setup irqs\n");
return ret;
}
+ if (is_kdump_kernel())
+ enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
/* Enable the SMMU interface, or ensure bypass */
if (!bypass || disable_bypass) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 045d93884164..5e54cc0a28b3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -110,7 +110,8 @@ static int force_stage;
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
-static bool disable_bypass;
+static bool disable_bypass =
+ IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
module_param(disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
@@ -569,12 +570,13 @@ static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
- u32 fsr, fsynr;
+ u32 fsr, fsynr, cbfrsynra;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
void __iomem *cb_base;
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -585,10 +587,11 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
+ cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
dev_err_ratelimited(smmu->dev,
- "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
- fsr, iova, fsynr, cfg->cbndx);
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
writel(fsr, cb_base + ARM_SMMU_CB_FSR);
return IRQ_HANDLED;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 77aabe637a60..5e898047c390 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -206,12 +206,13 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
return 0;
}
-static void iova_reserve_pci_windows(struct pci_dev *dev,
+static int iova_reserve_pci_windows(struct pci_dev *dev,
struct iova_domain *iovad)
{
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
struct resource_entry *window;
unsigned long lo, hi;
+ phys_addr_t start = 0, end;
resource_list_for_each_entry(window, &bridge->windows) {
if (resource_type(window->res) != IORESOURCE_MEM)
@@ -221,6 +222,31 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
hi = iova_pfn(iovad, window->res->end - window->offset);
reserve_iova(iovad, lo, hi);
}
+
+ /* Get reserved DMA windows from host bridge */
+ resource_list_for_each_entry(window, &bridge->dma_ranges) {
+ end = window->res->start - window->offset;
+resv_iova:
+ if (end > start) {
+ lo = iova_pfn(iovad, start);
+ hi = iova_pfn(iovad, end);
+ reserve_iova(iovad, lo, hi);
+ } else {
+ /* dma_ranges list should be sorted */
+ dev_err(&dev->dev, "Failed to reserve IOVA\n");
+ return -EINVAL;
+ }
+
+ start = window->res->end - window->offset + 1;
+ /* If window is last entry */
+ if (window->node.next == &bridge->dma_ranges &&
+ end != ~(dma_addr_t)0) {
+ end = ~(dma_addr_t)0;
+ goto resv_iova;
+ }
+ }
+
+ return 0;
}
static int iova_reserve_iommu_regions(struct device *dev,
@@ -232,8 +258,11 @@ static int iova_reserve_iommu_regions(struct device *dev,
LIST_HEAD(resv_regions);
int ret = 0;
- if (dev_is_pci(dev))
- iova_reserve_pci_windows(to_pci_dev(dev), iovad);
+ if (dev_is_pci(dev)) {
+ ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
+ if (ret)
+ return ret;
+ }
iommu_get_resv_regions(dev, &resv_regions);
list_for_each_entry(region, &resv_regions, list) {
@@ -619,17 +648,7 @@ out_free_pages:
int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
{
- unsigned long uaddr = vma->vm_start;
- unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- int ret = -ENXIO;
-
- for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
- ret = vm_insert_page(vma, uaddr, pages[i]);
- if (ret)
- break;
- uaddr += PAGE_SIZE;
- }
- return ret;
+ return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 9c49300e9fb7..6d969a172fbb 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -145,7 +145,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
for (tmp = dev; tmp; tmp = tmp->bus->self)
level++;
- size = sizeof(*info) + level * sizeof(info->path[0]);
+ size = struct_size(info, path, level);
if (size <= sizeof(dmar_pci_notify_info_buf)) {
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
} else {
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 28cb713d728c..a209199f3af6 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1391,7 +1391,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
/* pdev will be returned if device is not a vf */
pf_pdev = pci_physfn(pdev);
- info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+ info->pfsid = pci_dev_id(pf_pdev);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -2341,32 +2341,33 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
-{
- int ret;
- struct intel_iommu *iommu;
-
- /* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
- if (ret)
- return ret;
-
- /* Notify about the new mapping */
- if (domain_type_is_vm(domain)) {
- /* VM typed domains can have more than one IOMMUs */
- int iommu_id;
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
- } else {
- /* General domains only have one IOMMU */
- iommu = domain_get_iommu(domain);
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
+{
+ int ret;
+ struct intel_iommu *iommu;
+
+ /* Do the real mapping first */
+ ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+ if (ret)
+ return ret;
+
+ /* Notify about the new mapping */
+ if (domain_type_is_vm(domain)) {
+ /* VM typed domains can have more than one IOMMUs */
+ int iommu_id;
+
+ for_each_domain_iommu(iommu_id, domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+ } else {
+ /* General domains only have one IOMMU */
+ iommu = domain_get_iommu(domain);
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
- return 0;
+ return 0;
}
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
@@ -2485,6 +2486,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->domain = domain;
info->iommu = iommu;
info->pasid_table = NULL;
+ info->auxd_enabled = 0;
+ INIT_LIST_HEAD(&info->auxiliary_domains);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -3412,9 +3415,12 @@ static int __init init_dmars(void)
iommu_identity_mapping |= IDENTMAP_ALL;
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- iommu_identity_mapping |= IDENTMAP_GFX;
+ dmar_map_gfx = 0;
#endif
+ if (!dmar_map_gfx)
+ iommu_identity_mapping |= IDENTMAP_GFX;
+
check_tylersburg_isoch();
if (iommu_identity_mapping) {
@@ -3496,7 +3502,13 @@ domains_done:
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
+ /*
+ * Call dmar_alloc_hwirq() with dmar_global_lock held,
+ * could cause possible lock race condition.
+ */
+ up_write(&dmar_global_lock);
ret = intel_svm_enable_prq(iommu);
+ down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
}
@@ -3606,45 +3618,40 @@ out:
}
/* Check if the dev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct device *dev)
+static bool iommu_need_mapping(struct device *dev)
{
int found;
if (iommu_dummy(dev))
- return 1;
+ return false;
if (!iommu_identity_mapping)
- return 0;
+ return true;
found = identity_mapping(dev);
if (found) {
if (iommu_should_identity_map(dev, 0))
- return 1;
- else {
- /*
- * 32 bit DMA is removed from si_domain and fall back
- * to non-identity mapping.
- */
- dmar_remove_one_dev_info(dev);
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- return 0;
- }
+ return false;
+
+ /*
+ * 32 bit DMA is removed from si_domain and fall back to
+ * non-identity mapping.
+ */
+ dmar_remove_one_dev_info(dev);
+ dev_info(dev, "32bit DMA uses non-identity mapping\n");
} else {
/*
* In case of a detached 64 bit DMA device from vm, the device
* is put into si_domain for identity mapping.
*/
- if (iommu_should_identity_map(dev, 0)) {
- int ret;
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret) {
- dev_info(dev, "64bit DMA uses identity mapping\n");
- return 1;
- }
+ if (iommu_should_identity_map(dev, 0) &&
+ !domain_add_dev_info(si_domain, dev)) {
+ dev_info(dev, "64bit DMA uses identity mapping\n");
+ return false;
}
}
- return 0;
+ return true;
}
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
@@ -3660,9 +3667,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return paddr;
-
domain = get_valid_domain_for_dev(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3711,15 +3715,20 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_single(dev, page_to_phys(page) + offset, size,
- dir, *dev->dma_mask);
+ if (iommu_need_mapping(dev))
+ return __intel_map_single(dev, page_to_phys(page) + offset,
+ size, dir, *dev->dma_mask);
+ return dma_direct_map_page(dev, page, offset, size, dir, attrs);
}
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
+ if (iommu_need_mapping(dev))
+ return __intel_map_single(dev, phys_addr, size, dir,
+ *dev->dma_mask);
+ return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3730,9 +3739,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
unsigned long iova_pfn;
struct intel_iommu *iommu;
struct page *freelist;
-
- if (iommu_no_mapping(dev))
- return;
+ struct pci_dev *pdev = NULL;
domain = find_domain(dev);
BUG_ON(!domain);
@@ -3745,11 +3752,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
start_pfn = mm_to_dma_pfn(iova_pfn);
last_pfn = start_pfn + nrpages - 1;
+ if (dev_is_pci(dev))
+ pdev = to_pci_dev(dev);
+
dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
freelist = domain_unmap(domain, start_pfn, last_pfn);
- if (intel_iommu_strict) {
+ if (intel_iommu_strict || (pdev && pdev->untrusted)) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
nrpages, !freelist, 0);
/* free iova */
@@ -3769,7 +3779,17 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- intel_unmap(dev, dev_addr, size);
+ if (iommu_need_mapping(dev))
+ intel_unmap(dev, dev_addr, size);
+ else
+ dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (iommu_need_mapping(dev))
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3779,28 +3799,17 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
int order;
+ if (!iommu_need_mapping(dev))
+ return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
+
size = PAGE_ALIGN(size);
order = get_order(size);
- if (!iommu_no_mapping(dev))
- flags &= ~(GFP_DMA | GFP_DMA32);
- else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- else
- flags |= GFP_DMA32;
- }
-
if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
flags & __GFP_NOWARN);
- if (page && iommu_no_mapping(dev) &&
- page_to_phys(page) + size > dev->coherent_dma_mask) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
}
if (!page)
@@ -3826,6 +3835,9 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order;
struct page *page = virt_to_page(vaddr);
+ if (!iommu_need_mapping(dev))
+ return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -3843,6 +3855,9 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;
+ if (!iommu_need_mapping(dev))
+ return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
+
for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
}
@@ -3850,20 +3865,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
-static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sglist, int nelems, int dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i) {
- BUG_ON(!sg_page(sg));
- sg->dma_address = sg_phys(sg);
- sg->dma_length = sg->length;
- }
- return nelems;
-}
-
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, unsigned long attrs)
{
@@ -3878,8 +3879,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
+ if (!iommu_need_mapping(dev))
+ return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = get_valid_domain_for_dev(dev);
if (!domain)
@@ -3929,7 +3930,7 @@ static const struct dma_map_ops intel_dma_ops = {
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.map_resource = intel_map_resource,
- .unmap_resource = intel_unmap_page,
+ .unmap_resource = intel_unmap_resource,
.dma_supported = dma_direct_supported,
};
@@ -4055,9 +4056,7 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
- if (dmar_map_gfx) {
- intel_iommu_gfx_mapped = 1;
- } else {
+ if (!dmar_map_gfx) {
drhd->ignored = 1;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
@@ -4086,7 +4085,7 @@ static int init_iommu_hw(void)
iommu_disable_protect_mem_regions(iommu);
continue;
}
-
+
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
@@ -4896,6 +4895,9 @@ int __init intel_iommu_init(void)
goto out_free_reserved_range;
}
+ if (dmar_map_gfx)
+ intel_iommu_gfx_mapped = 1;
+
init_no_remapping_devices();
ret = init_dmars();
@@ -5065,35 +5067,139 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
domain_exit(to_dmar_domain(domain));
}
-static int intel_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+/*
+ * Check whether a @domain could be attached to the @dev through the
+ * aux-domain attach/detach APIs.
+ */
+static inline bool
+is_aux_domain(struct device *dev, struct iommu_domain *domain)
{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct intel_iommu *iommu;
- int addr_width;
- u8 bus, devfn;
+ struct device_domain_info *info = dev->archdata.iommu;
- if (device_is_rmrr_locked(dev)) {
- dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
- return -EPERM;
- }
+ return info && info->auxd_enabled &&
+ domain->type == IOMMU_DOMAIN_UNMANAGED;
+}
- /* normally dev is not mapped */
- if (unlikely(domain_context_mapped(dev))) {
- struct dmar_domain *old_domain;
+static void auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
- old_domain = find_domain(dev);
- if (old_domain) {
- rcu_read_lock();
- dmar_remove_one_dev_info(dev);
- rcu_read_unlock();
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
- if (!domain_type_is_vm_or_si(old_domain) &&
- list_empty(&old_domain->devices))
- domain_exit(old_domain);
+ domain->auxd_refcnt++;
+ list_add(&domain->auxd, &info->auxiliary_domains);
+}
+
+static void auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
+
+ list_del(&domain->auxd);
+ domain->auxd_refcnt--;
+
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+}
+
+static int aux_domain_add_dev(struct dmar_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ u8 bus, devfn;
+ unsigned long flags;
+ struct intel_iommu *iommu;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
+
+ if (domain->default_pasid <= 0) {
+ int pasid;
+
+ pasid = intel_pasid_alloc_id(domain, PASID_MIN,
+ pci_max_pasids(to_pci_dev(dev)),
+ GFP_KERNEL);
+ if (pasid <= 0) {
+ pr_err("Can't allocate default pasid\n");
+ return -ENODEV;
}
+ domain->default_pasid = pasid;
}
+ spin_lock_irqsave(&device_domain_lock, flags);
+ /*
+ * iommu->lock must be held to attach domain to iommu and setup the
+ * pasid entry for second level translation.
+ */
+ spin_lock(&iommu->lock);
+ ret = domain_attach_iommu(domain, iommu);
+ if (ret)
+ goto attach_failed;
+
+ /* Setup the PASID entry for mediated devices: */
+ ret = intel_pasid_setup_second_level(iommu, domain, dev,
+ domain->default_pasid);
+ if (ret)
+ goto table_failed;
+ spin_unlock(&iommu->lock);
+
+ auxiliary_link_device(domain, dev);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+
+table_failed:
+ domain_detach_iommu(domain, iommu);
+attach_failed:
+ spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+
+ return ret;
+}
+
+static void aux_domain_remove_dev(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+
+ if (!is_aux_domain(dev, &domain->domain))
+ return;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ iommu = info->iommu;
+
+ auxiliary_unlink_device(domain, dev);
+
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static int prepare_domain_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct intel_iommu *iommu;
+ int addr_width;
+ u8 bus, devfn;
+
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
@@ -5126,7 +5232,58 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
- return domain_add_dev_info(dmar_domain, dev);
+ return 0;
+}
+
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret;
+
+ if (device_is_rmrr_locked(dev)) {
+ dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
+ return -EPERM;
+ }
+
+ if (is_aux_domain(dev, domain))
+ return -EPERM;
+
+ /* normally dev is not mapped */
+ if (unlikely(domain_context_mapped(dev))) {
+ struct dmar_domain *old_domain;
+
+ old_domain = find_domain(dev);
+ if (old_domain) {
+ rcu_read_lock();
+ dmar_remove_one_dev_info(dev);
+ rcu_read_unlock();
+
+ if (!domain_type_is_vm_or_si(old_domain) &&
+ list_empty(&old_domain->devices))
+ domain_exit(old_domain);
+ }
+ }
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ return domain_add_dev_info(to_dmar_domain(domain), dev);
+}
+
+static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret;
+
+ if (!is_aux_domain(dev, domain))
+ return -EPERM;
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ return aux_domain_add_dev(to_dmar_domain(domain), dev);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
@@ -5135,6 +5292,12 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
dmar_remove_one_dev_info(dev);
}
+static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ aux_domain_remove_dev(to_dmar_domain(domain), dev);
+}
+
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot)
@@ -5223,6 +5386,42 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
+static inline bool scalable_mode_support(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool ret = true;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!sm_supported(iommu)) {
+ ret = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static inline bool iommu_pasid_support(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool ret = true;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!pasid_supported(iommu)) {
+ ret = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5307,8 +5506,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
}
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
+int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
{
struct device_domain_info *info;
struct context_entry *context;
@@ -5317,7 +5515,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
u64 ctx_lo;
int ret;
- domain = get_valid_domain_for_dev(sdev->dev);
+ domain = get_valid_domain_for_dev(dev);
if (!domain)
return -EINVAL;
@@ -5325,7 +5523,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
spin_lock(&iommu->lock);
ret = -EINVAL;
- info = sdev->dev->archdata.iommu;
+ info = dev->archdata.iommu;
if (!info || !info->pasid_supported)
goto out;
@@ -5335,14 +5533,13 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
ctx_lo = context[0].lo;
- sdev->did = FLPT_DEFAULT_DID;
- sdev->sid = PCI_DEVID(info->bus, info->devfn);
-
if (!(ctx_lo & CONTEXT_PASIDE)) {
ctx_lo |= CONTEXT_PASIDE;
context[0].lo = ctx_lo;
wmb();
- iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
+ iommu->flush.flush_context(iommu,
+ domain->iommu_did[iommu->seq_id],
+ PCI_DEVID(info->bus, info->devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
}
@@ -5351,12 +5548,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
if (!info->pasid_enabled)
iommu_enable_dev_iotlb(info);
- if (info->ats_enabled) {
- sdev->dev_iotlb = 1;
- sdev->qdep = info->ats_qdep;
- if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
- sdev->qdep = 0;
- }
ret = 0;
out:
@@ -5366,6 +5557,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
return ret;
}
+#ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
struct intel_iommu *iommu;
@@ -5387,12 +5579,142 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
+static int intel_iommu_enable_auxd(struct device *dev)
+{
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+ u8 bus, devfn;
+ int ret;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu || dmar_disabled)
+ return -EINVAL;
+
+ if (!sm_supported(iommu) || !pasid_supported(iommu))
+ return -EINVAL;
+
+ ret = intel_iommu_enable_pasid(iommu, dev);
+ if (ret)
+ return -ENODEV;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ info->auxd_enabled = 1;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+}
+
+static int intel_iommu_disable_auxd(struct device *dev)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ if (!WARN_ON(!info))
+ info->auxd_enabled = 0;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+}
+
+/*
+ * A PCI express designated vendor specific extended capability is defined
+ * in the section 3.7 of Intel scalable I/O virtualization technical spec
+ * for system software and tools to detect endpoint devices supporting the
+ * Intel scalable IO virtualization without host driver dependency.
+ *
+ * Returns the address of the matching extended capability structure within
+ * the device's PCI configuration space or 0 if the device does not support
+ * it.
+ */
+static int siov_find_pci_dvsec(struct pci_dev *pdev)
+{
+ int pos;
+ u16 vendor, id;
+
+ pos = pci_find_next_ext_capability(pdev, 0, 0x23);
+ while (pos) {
+ pci_read_config_word(pdev, pos + 4, &vendor);
+ pci_read_config_word(pdev, pos + 8, &id);
+ if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
+ return pos;
+
+ pos = pci_find_next_ext_capability(pdev, pos, 0x23);
+ }
+
+ return 0;
+}
+
+static bool
+intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
+{
+ if (feat == IOMMU_DEV_FEAT_AUX) {
+ int ret;
+
+ if (!dev_is_pci(dev) || dmar_disabled ||
+ !scalable_mode_support() || !iommu_pasid_support())
+ return false;
+
+ ret = pci_pasid_features(to_pci_dev(dev));
+ if (ret < 0)
+ return false;
+
+ return !!siov_find_pci_dvsec(to_pci_dev(dev));
+ }
+
+ return false;
+}
+
+static int
+intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
+{
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return intel_iommu_enable_auxd(dev);
+
+ return -ENODEV;
+}
+
+static int
+intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
+{
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return intel_iommu_disable_auxd(dev);
+
+ return -ENODEV;
+}
+
+static bool
+intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return scalable_mode_support() && info && info->auxd_enabled;
+
+ return false;
+}
+
+static int
+intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ return dmar_domain->default_pasid > 0 ?
+ dmar_domain->default_pasid : -EINVAL;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
+ .aux_attach_dev = intel_iommu_aux_attach_device,
+ .aux_detach_dev = intel_iommu_aux_detach_device,
+ .aux_get_pasid = intel_iommu_aux_get_pasid,
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
@@ -5401,6 +5723,10 @@ const struct iommu_ops intel_iommu_ops = {
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = intel_iommu_put_resv_regions,
.device_group = pci_device_group,
+ .dev_has_feat = intel_iommu_dev_has_feat,
+ .dev_feat_enabled = intel_iommu_dev_feat_enabled,
+ .dev_enable_feat = intel_iommu_dev_enable_feat,
+ .dev_disable_feat = intel_iommu_dev_disable_feat,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index 03b12d2ee213..2fefeafda437 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -154,8 +154,10 @@ int intel_pasid_alloc_table(struct device *dev)
order = size ? get_order(size) : 0;
pages = alloc_pages_node(info->iommu->node,
GFP_KERNEL | __GFP_ZERO, order);
- if (!pages)
+ if (!pages) {
+ kfree(pasid_table);
return -ENOMEM;
+ }
pasid_table->table = page_address(pages);
pasid_table->order = order;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 3a4b09ae8561..8f87304f915c 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -228,6 +228,7 @@ static LIST_HEAD(global_svm_list);
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
struct mm_struct *mm = NULL;
@@ -291,13 +292,29 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
sdev->dev = dev;
- ret = intel_iommu_enable_pasid(iommu, sdev);
+ ret = intel_iommu_enable_pasid(iommu, dev);
if (ret || !pasid) {
/* If they don't actually want to assign a PASID, this is
* just an enabling check/preparation. */
kfree(sdev);
goto out;
}
+
+ info = dev->archdata.iommu;
+ if (!info || !info->pasid_supported) {
+ kfree(sdev);
+ goto out;
+ }
+
+ sdev->did = FLPT_DEFAULT_DID;
+ sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ if (info->ats_enabled) {
+ sdev->dev_iotlb = 1;
+ sdev->qdep = info->ats_qdep;
+ if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
+ sdev->qdep = 0;
+ }
+
/* Finish the setup now we know we're keeping it */
sdev->users = 1;
sdev->ops = ops;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 2d74641b7f7b..4160aa9f3f80 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -424,7 +424,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
else
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
- PCI_DEVID(dev->bus->number, dev->devfn));
+ pci_dev_id(dev));
return 0;
}
@@ -548,8 +548,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
goto out_free_table;
}
- bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
- sizeof(long), GFP_ATOMIC);
+ bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
goto out_free_pages;
@@ -616,7 +615,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
return 0;
out_free_bitmap:
- kfree(bitmap);
+ bitmap_free(bitmap);
out_free_pages:
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
out_free_table:
@@ -640,7 +639,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
}
free_pages((unsigned long)iommu->ir_table->base,
INTR_REMAP_PAGE_ORDER);
- kfree(iommu->ir_table->bitmap);
+ bitmap_free(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;
}
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index d3700ec15cbd..4e21efbc4459 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -172,6 +172,10 @@
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
+#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
+#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
+#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
+
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@@ -180,11 +184,6 @@
#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
-#define iopte_leaf(pte,l) \
- (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
- (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
- (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
-
struct arm_lpae_io_pgtable {
struct io_pgtable iop;
@@ -198,6 +197,15 @@ struct arm_lpae_io_pgtable {
typedef u64 arm_lpae_iopte;
+static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
+ enum io_pgtable_fmt fmt)
+{
+ if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
+ return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
+
+ return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
+}
+
static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
struct arm_lpae_io_pgtable *data)
{
@@ -303,12 +311,14 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NS;
- if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
pte |= ARM_LPAE_PTE_TYPE_PAGE;
else
pte |= ARM_LPAE_PTE_TYPE_BLOCK;
- pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+ if (data->iop.fmt != ARM_MALI_LPAE)
+ pte |= ARM_LPAE_PTE_AF;
+ pte |= ARM_LPAE_PTE_SH_IS;
pte |= paddr_to_iopte(paddr, data);
__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
@@ -321,7 +331,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
{
arm_lpae_iopte pte = *ptep;
- if (iopte_leaf(pte, lvl)) {
+ if (iopte_leaf(pte, lvl, data->iop.fmt)) {
/* We require an unmap first */
WARN_ON(!selftest_running);
return -EEXIST;
@@ -409,7 +419,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
__arm_lpae_sync_pte(ptep, cfg);
}
- if (pte && !iopte_leaf(pte, lvl)) {
+ if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
cptep = iopte_deref(pte, data);
} else if (pte) {
/* We require an unmap first */
@@ -429,31 +439,37 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) {
pte = ARM_LPAE_PTE_nG;
-
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY;
-
if (!(prot & IOMMU_PRIV))
pte |= ARM_LPAE_PTE_AP_UNPRIV;
-
- if (prot & IOMMU_MMIO)
- pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
- << ARM_LPAE_PTE_ATTRINDX_SHIFT);
- else if (prot & IOMMU_CACHE)
- pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
- << ARM_LPAE_PTE_ATTRINDX_SHIFT);
} else {
pte = ARM_LPAE_PTE_HAP_FAULT;
if (prot & IOMMU_READ)
pte |= ARM_LPAE_PTE_HAP_READ;
if (prot & IOMMU_WRITE)
pte |= ARM_LPAE_PTE_HAP_WRITE;
+ }
+
+ /*
+ * Note that this logic is structured to accommodate Mali LPAE
+ * having stage-1-like attributes but stage-2-like permissions.
+ */
+ if (data->iop.fmt == ARM_64_LPAE_S2 ||
+ data->iop.fmt == ARM_32_LPAE_S2) {
if (prot & IOMMU_MMIO)
pte |= ARM_LPAE_PTE_MEMATTR_DEV;
else if (prot & IOMMU_CACHE)
pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
else
pte |= ARM_LPAE_PTE_MEMATTR_NC;
+ } else {
+ if (prot & IOMMU_MMIO)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ else if (prot & IOMMU_CACHE)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
if (prot & IOMMU_NOEXEC)
@@ -511,7 +527,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
while (ptep != end) {
arm_lpae_iopte pte = *ptep++;
- if (!pte || iopte_leaf(pte, lvl))
+ if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
continue;
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
@@ -602,7 +618,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
__arm_lpae_set_pte(ptep, 0, &iop->cfg);
- if (!iopte_leaf(pte, lvl)) {
+ if (!iopte_leaf(pte, lvl, iop->fmt)) {
/* Also flush any partial walks */
io_pgtable_tlb_add_flush(iop, iova, size,
ARM_LPAE_GRANULE(data), false);
@@ -621,7 +637,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
}
return size;
- } else if (iopte_leaf(pte, lvl)) {
+ } else if (iopte_leaf(pte, lvl, iop->fmt)) {
/*
* Insert a table at the next level to map the old region,
* minus the part we want to unmap
@@ -669,7 +685,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
return 0;
/* Leaf entry? */
- if (iopte_leaf(pte,lvl))
+ if (iopte_leaf(pte, lvl, data->iop.fmt))
goto found_translation;
/* Take it to the next level */
@@ -995,6 +1011,32 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
return iop;
}
+static struct io_pgtable *
+arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct io_pgtable *iop;
+
+ if (cfg->ias != 48 || cfg->oas > 40)
+ return NULL;
+
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
+ if (iop) {
+ u64 mair, ttbr;
+
+ /* Copy values as union fields overlap */
+ mair = cfg->arm_lpae_s1_cfg.mair[0];
+ ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
+
+ cfg->arm_mali_lpae_cfg.memattr = mair;
+ cfg->arm_mali_lpae_cfg.transtab = ttbr |
+ ARM_MALI_LPAE_TTBR_READ_INNER |
+ ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+ }
+
+ return iop;
+}
+
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
.alloc = arm_64_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable,
@@ -1015,6 +1057,11 @@ struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
.free = arm_lpae_free_pgtable,
};
+struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
+ .alloc = arm_mali_lpae_alloc_pgtable,
+ .free = arm_lpae_free_pgtable,
+};
+
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
static struct io_pgtable_cfg *cfg_cookie;
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 93f2880be6c6..5227cfdbb65b 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -30,6 +30,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
[ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
+ [ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns,
#endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 109de67d5d72..67ee6623f9b2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -45,10 +45,6 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
#endif
static bool iommu_dma_strict __read_mostly = true;
-struct iommu_callback_data {
- const struct iommu_ops *ops;
-};
-
struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
@@ -1217,9 +1213,6 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
{
int err;
struct notifier_block *nb;
- struct iommu_callback_data cb = {
- .ops = ops,
- };
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (!nb)
@@ -1231,7 +1224,7 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
if (err)
goto out_free;
- err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
+ err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
if (err)
goto out_err;
@@ -1240,7 +1233,7 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
out_err:
/* Clean up */
- bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
+ bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
bus_unregister_notifier(bus, nb);
out_free:
@@ -2039,3 +2032,203 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
+
+/*
+ * Per device IOMMU features.
+ */
+bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_has_feat)
+ return ops->dev_has_feat(dev, feat);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
+
+int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_enable_feat)
+ return ops->dev_enable_feat(dev, feat);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
+
+/*
+ * The device drivers should do the necessary cleanups before calling this.
+ * For example, before disabling the aux-domain feature, the device driver
+ * should detach all aux-domains. Otherwise, this will return -EBUSY.
+ */
+int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_disable_feat)
+ return ops->dev_disable_feat(dev, feat);
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
+
+bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_feat_enabled)
+ return ops->dev_feat_enabled(dev, feat);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
+
+/*
+ * Aux-domain specific attach/detach.
+ *
+ * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
+ * true. Also, as long as domains are attached to a device through this
+ * interface, any tries to call iommu_attach_device() should fail
+ * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
+ * This should make us safe against a device being attached to a guest as a
+ * whole while there are still pasid users on it (aux and sva).
+ */
+int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
+{
+ int ret = -ENODEV;
+
+ if (domain->ops->aux_attach_dev)
+ ret = domain->ops->aux_attach_dev(domain, dev);
+
+ if (!ret)
+ trace_attach_device_to_domain(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
+
+void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
+{
+ if (domain->ops->aux_detach_dev) {
+ domain->ops->aux_detach_dev(domain, dev);
+ trace_detach_device_from_domain(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
+
+int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+{
+ int ret = -ENODEV;
+
+ if (domain->ops->aux_get_pasid)
+ ret = domain->ops->aux_get_pasid(domain, dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
+
+/**
+ * iommu_sva_bind_device() - Bind a process address space to a device
+ * @dev: the device
+ * @mm: the mm to bind, caller must hold a reference to it
+ *
+ * Create a bond between device and address space, allowing the device to access
+ * the mm using the returned PASID. If a bond already exists between @device and
+ * @mm, it is returned and an additional reference is taken. Caller must call
+ * iommu_sva_unbind_device() to release each reference.
+ *
+ * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
+ * initialize the required SVA features.
+ *
+ * On error, returns an ERR_PTR value.
+ */
+struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
+{
+ struct iommu_group *group;
+ struct iommu_sva *handle = ERR_PTR(-EINVAL);
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (!ops || !ops->sva_bind)
+ return ERR_PTR(-ENODEV);
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return ERR_PTR(-ENODEV);
+
+ /* Ensure device count and domain don't change while we're binding */
+ mutex_lock(&group->mutex);
+
+ /*
+ * To keep things simple, SVA currently doesn't support IOMMU groups
+ * with more than one device. Existing SVA-capable systems are not
+ * affected by the problems that required IOMMU groups (lack of ACS
+ * isolation, device ID aliasing and other hardware issues).
+ */
+ if (iommu_group_device_count(group) != 1)
+ goto out_unlock;
+
+ handle = ops->sva_bind(dev, mm, drvdata);
+
+out_unlock:
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
+
+/**
+ * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
+ * @handle: the handle returned by iommu_sva_bind_device()
+ *
+ * Put reference to a bond between device and address space. The device should
+ * not be issuing any more transaction for this PASID. All outstanding page
+ * requests for this PASID must have been flushed to the IOMMU.
+ *
+ * Returns 0 on success, or an error value
+ */
+void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+ struct iommu_group *group;
+ struct device *dev = handle->dev;
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (!ops || !ops->sva_unbind)
+ return;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return;
+
+ mutex_lock(&group->mutex);
+ ops->sva_unbind(handle);
+ mutex_unlock(&group->mutex);
+
+ iommu_group_put(group);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
+
+int iommu_sva_set_ops(struct iommu_sva *handle,
+ const struct iommu_sva_ops *sva_ops)
+{
+ if (handle->ops && handle->ops != sva_ops)
+ return -EEXIST;
+
+ handle->ops = sva_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
+
+int iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+ const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
+
+ if (!ops || !ops->sva_get_pasid)
+ return IOMMU_PASID_INVALID;
+
+ return ops->sva_get_pasid(handle);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 9fb0eb7a4d02..b4b87d6ae67f 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -34,7 +34,7 @@
#include <linux/of_iommu.h>
#include <asm/cacheflush.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "msm_iommu_hw-8xxx.h"
#include "msm_iommu.h"
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index de3e02277b70..b66d11b0286e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -632,16 +632,20 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (!larbnode)
return -EINVAL;
- if (!of_device_is_available(larbnode))
+ if (!of_device_is_available(larbnode)) {
+ of_node_put(larbnode);
continue;
+ }
ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
if (ret)/* The id is consecutive if there is no this property */
id = i;
plarbdev = of_find_device_by_node(larbnode);
- if (!plarbdev)
+ if (!plarbdev) {
+ of_node_put(larbnode);
return -EPROBE_DEFER;
+ }
data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
component_match_add_release(dev, &match, release_of,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 5182c7d6171e..463ee08f7d3a 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
-#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
SMMU_TLB_FLUSH_VA_MATCH_SECTION)
#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
@@ -146,8 +145,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
SMMU_PDE_NONSECURE)
-#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
- SMMU_PTE_NONSECURE)
static unsigned int iova_pd_index(unsigned long iova)
{
@@ -205,8 +202,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_MATCH_ALL;
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -216,8 +217,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_SECTION(iova);
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -227,8 +232,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_GROUP(iova);
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -316,6 +325,9 @@ static void tegra_smmu_domain_free(struct iommu_domain *domain)
/* TODO: free page directory and page tables */
+ WARN_ON_ONCE(as->use_count);
+ kfree(as->count);
+ kfree(as->pts);
kfree(as);
}
@@ -645,6 +657,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
+ u32 pte_attrs;
u32 *pte;
pte = as_get_pte(as, iova, &pte_dma);
@@ -655,8 +668,16 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
if (*pte == 0)
tegra_smmu_pte_get_use(as, iova);
+ pte_attrs = SMMU_PTE_NONSECURE;
+
+ if (prot & IOMMU_READ)
+ pte_attrs |= SMMU_PTE_READABLE;
+
+ if (prot & IOMMU_WRITE)
+ pte_attrs |= SMMU_PTE_WRITABLE;
+
tegra_smmu_set_pte(as, iova, pte, pte_dma,
- __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
+ __phys_to_pfn(paddr) | pte_attrs);
return 0;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 5438abb1baba..cf7984991062 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -160,6 +160,12 @@ config IMGPDC_IRQ
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config IXP4XX_IRQ
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_MULTI_HANDLER
+ select SPARSE_IRQ
+
config MADERA_IRQ
tristate
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 85972ae1bd7f..f8c66e958a64 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
obj-$(CONFIG_I8259) += irq-i8259.o
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
+obj-$(CONFIG_IXP4XX_IRQ) += irq-ixp4xx.o
obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
index aa7290784636..0390603170b4 100644
--- a/drivers/irqchip/irq-ath79-misc.c
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -22,6 +22,15 @@
#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
#define ATH79_MISC_IRQ_COUNT 32
+#define ATH79_MISC_PERF_IRQ 5
+
+static int ath79_perfcount_irq;
+
+int get_c0_perfcount_int(void)
+{
+ return ath79_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
static void ath79_misc_irq_handler(struct irq_desc *desc)
{
@@ -113,6 +122,8 @@ static void __init ath79_misc_intc_domain_init(
{
void __iomem *base = domain->host_data;
+ ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
+
/* Disable and clear all interrupts */
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index f5fe0100f9ff..de14e06fd9ec 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -446,7 +446,7 @@ static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
}
static int __init
-acpi_parse_madt_msi(struct acpi_subtable_header *header,
+acpi_parse_madt_msi(union acpi_subtable_headers *header,
const unsigned long end)
{
int ret;
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 8d6d009d1d58..c81d5b81da56 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -159,7 +159,7 @@ static int __init its_pci_of_msi_init(void)
#ifdef CONFIG_ACPI
static int __init
-its_pci_msi_parse_madt(struct acpi_subtable_header *header,
+its_pci_msi_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_translator *its_entry;
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 7b8e87b493fe..9cdcda5bb3bd 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -117,7 +117,7 @@ static int __init its_pmsi_init_one(struct fwnode_handle *fwnode,
#ifdef CONFIG_ACPI
static int __init
-its_pmsi_parse_madt(struct acpi_subtable_header *header,
+its_pmsi_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_translator *its_entry;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 7577755bdcf4..128ac893d7e4 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3830,13 +3830,13 @@ static int __init acpi_get_its_numa_node(u32 its_id)
return NUMA_NO_NODE;
}
-static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
+static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
const unsigned long end)
{
return 0;
}
-static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
+static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
const unsigned long end)
{
int node;
@@ -3903,7 +3903,7 @@ static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
static void __init acpi_its_srat_maps_free(void) { }
#endif
-static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
+static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_translator *its_entry;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 15e55d327505..f44cd89cfc40 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1593,7 +1593,7 @@ gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
}
static int __init
-gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
+gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_redistributor *redist =
@@ -1611,7 +1611,7 @@ gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
}
static int __init
-gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
+gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *gicc =
@@ -1653,14 +1653,14 @@ static int __init gic_acpi_collect_gicr_base(void)
return -ENODEV;
}
-static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
+static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
const unsigned long end)
{
/* Subtable presence means that redist exists, that's it */
return 0;
}
-static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
+static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *gicc =
@@ -1726,7 +1726,7 @@ static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
return true;
}
-static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
+static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *gicc =
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index fd3110c171ba..c6dbe5018972 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1495,7 +1495,7 @@ static struct
} acpi_data __initdata;
static int __init
-gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
+gic_acpi_parse_madt_cpu(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *processor;
@@ -1527,7 +1527,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
}
/* The things you have to do to just *count* something... */
-static int __init acpi_dummy_func(struct acpi_subtable_header *header,
+static int __init acpi_dummy_func(union acpi_subtable_headers *header,
const unsigned long end)
{
return 0;
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
new file mode 100644
index 000000000000..d576809429ac
--- /dev/null
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * irqchip for the IXP4xx interrupt controller
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/irq-ixp4xx.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define IXP4XX_ICPR 0x00 /* Interrupt Status */
+#define IXP4XX_ICMR 0x04 /* Interrupt Enable */
+#define IXP4XX_ICLR 0x08 /* Interrupt IRQ/FIQ Select */
+#define IXP4XX_ICIP 0x0C /* IRQ Status */
+#define IXP4XX_ICFP 0x10 /* FIQ Status */
+#define IXP4XX_ICHR 0x14 /* Interrupt Priority */
+#define IXP4XX_ICIH 0x18 /* IRQ Highest Pri Int */
+#define IXP4XX_ICFH 0x1C /* FIQ Highest Pri Int */
+
+/* IXP43x and IXP46x-only */
+#define IXP4XX_ICPR2 0x20 /* Interrupt Status 2 */
+#define IXP4XX_ICMR2 0x24 /* Interrupt Enable 2 */
+#define IXP4XX_ICLR2 0x28 /* Interrupt IRQ/FIQ Select 2 */
+#define IXP4XX_ICIP2 0x2C /* IRQ Status */
+#define IXP4XX_ICFP2 0x30 /* FIQ Status */
+#define IXP4XX_ICEEN 0x34 /* Error High Pri Enable */
+
+/**
+ * struct ixp4xx_irq - state container for the Faraday IRQ controller
+ * @irqbase: IRQ controller memory base in virtual memory
+ * @is_356: if this is an IXP43x, IXP45x or IX46x SoC (with 64 IRQs)
+ * @irqchip: irqchip for this instance
+ * @domain: IRQ domain for this instance
+ */
+struct ixp4xx_irq {
+ void __iomem *irqbase;
+ bool is_356;
+ struct irq_chip irqchip;
+ struct irq_domain *domain;
+};
+
+/* Local static state container */
+static struct ixp4xx_irq ixirq;
+
+/* GPIO Clocks */
+#define IXP4XX_GPIO_CLK_0 14
+#define IXP4XX_GPIO_CLK_1 15
+
+static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ /* All are level active high (asserted) here */
+ if (type != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ return 0;
+}
+
+static void ixp4xx_irq_mask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val &= ~BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val &= ~BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+/*
+ * Level triggered interrupts on GPIO lines can only be cleared when the
+ * interrupt condition disappears.
+ */
+static void ixp4xx_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val |= BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ unsigned long status;
+ int i;
+
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP);
+ for_each_set_bit(i, &status, 32)
+ handle_domain_irq(ixi->domain, i, regs);
+
+ /*
+ * IXP465/IXP435 has an upper IRQ status register
+ */
+ if (ixi->is_356) {
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2);
+ for_each_set_bit(i, &status, 32)
+ handle_domain_irq(ixi->domain, i + 32, regs);
+ }
+}
+
+static int ixp4xx_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ixp4xx_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_irq *ixi = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ /*
+ * TODO: after converting IXP4xx to only device tree, set
+ * handle_bad_irq as default handler and assume all consumers
+ * call .set_type() as this is provided in the second cell in
+ * the device tree phandle.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixi->irqchip,
+ ixi,
+ handle_level_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+ }
+
+ return 0;
+}
+
+/*
+ * This needs to be a hierarchical irqdomain to work well with the
+ * GPIO irqchip (which is lower in the hierarchy)
+ */
+static const struct irq_domain_ops ixp4xx_irqdomain_ops = {
+ .translate = ixp4xx_irq_domain_translate,
+ .alloc = ixp4xx_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+/**
+ * ixp4xx_get_irq_domain() - retrieve the ixp4xx irq domain
+ *
+ * This function will go away when we transition to DT probing.
+ */
+struct irq_domain *ixp4xx_get_irq_domain(void)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+
+ return ixi->domain;
+}
+EXPORT_SYMBOL_GPL(ixp4xx_get_irq_domain);
+
+/*
+ * This is the Linux IRQ to hwirq mapping table. This goes away when
+ * we have DT support as all IRQ resources are defined in the device
+ * tree. It will register all the IRQs that are not used by the hierarchical
+ * GPIO IRQ chip. The "holes" inbetween these IRQs will be requested by
+ * the GPIO driver using . This is a step-gap solution.
+ */
+struct ixp4xx_irq_chunk {
+ int irq;
+ int hwirq;
+ int nr_irqs;
+};
+
+static const struct ixp4xx_irq_chunk ixp4xx_irq_chunks[] = {
+ {
+ .irq = 16,
+ .hwirq = 0,
+ .nr_irqs = 6,
+ },
+ {
+ .irq = 24,
+ .hwirq = 8,
+ .nr_irqs = 11,
+ },
+ {
+ .irq = 46,
+ .hwirq = 30,
+ .nr_irqs = 2,
+ },
+ /* Only on the 436 variants */
+ {
+ .irq = 48,
+ .hwirq = 32,
+ .nr_irqs = 10,
+ },
+};
+
+/**
+ * ixp4x_irq_setup() - Common setup code for the IXP4xx interrupt controller
+ * @ixi: State container
+ * @irqbase: Virtual memory base for the interrupt controller
+ * @fwnode: Corresponding fwnode abstraction for this controller
+ * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
+ */
+static int ixp4xx_irq_setup(struct ixp4xx_irq *ixi,
+ void __iomem *irqbase,
+ struct fwnode_handle *fwnode,
+ bool is_356)
+{
+ int nr_irqs;
+
+ ixi->irqbase = irqbase;
+ ixi->is_356 = is_356;
+
+ /* Route all sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR);
+
+ /* Disable all interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR);
+
+ if (is_356) {
+ /* Route upper 32 sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR2);
+
+ /* Disable upper 32 interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR2);
+
+ nr_irqs = 64;
+ } else {
+ nr_irqs = 32;
+ }
+
+ ixi->irqchip.name = "IXP4xx";
+ ixi->irqchip.irq_mask = ixp4xx_irq_mask;
+ ixi->irqchip.irq_unmask = ixp4xx_irq_unmask;
+ ixi->irqchip.irq_set_type = ixp4xx_set_irq_type;
+
+ ixi->domain = irq_domain_create_linear(fwnode, nr_irqs,
+ &ixp4xx_irqdomain_ops,
+ ixi);
+ if (!ixi->domain) {
+ pr_crit("IXP4XX: can not add primary irqdomain\n");
+ return -ENODEV;
+ }
+
+ set_handle_irq(ixp4xx_handle_irq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_irq_init() - Function to initialize the irqchip from boardfiles
+ * @irqbase: physical base for the irq controller
+ * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
+ */
+void __init ixp4xx_irq_init(resource_size_t irqbase,
+ bool is_356)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ void __iomem *base;
+ struct fwnode_handle *fwnode;
+ struct irq_fwspec fwspec;
+ int nr_chunks;
+ int ret;
+ int i;
+
+ base = ioremap(irqbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4XX: could not ioremap interrupt controller\n");
+ return;
+ }
+ fwnode = irq_domain_alloc_fwnode(base);
+ if (!fwnode) {
+ pr_crit("IXP4XX: no domain handle\n");
+ return;
+ }
+ ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356);
+ if (ret) {
+ pr_crit("IXP4XX: failed to set up irqchip\n");
+ irq_domain_free_fwnode(fwnode);
+ }
+
+ nr_chunks = ARRAY_SIZE(ixp4xx_irq_chunks);
+ if (!is_356)
+ nr_chunks--;
+
+ /*
+ * After adding OF support, this is no longer needed: irqs
+ * will be allocated for the respective fwnodes.
+ */
+ for (i = 0; i < nr_chunks; i++) {
+ const struct ixp4xx_irq_chunk *chunk = &ixp4xx_irq_chunks[i];
+
+ pr_info("Allocate Linux IRQs %d..%d HW IRQs %d..%d\n",
+ chunk->irq, chunk->irq + chunk->nr_irqs - 1,
+ chunk->hwirq, chunk->hwirq + chunk->nr_irqs - 1);
+ fwspec.fwnode = fwnode;
+ fwspec.param[0] = chunk->hwirq;
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(ixi->domain,
+ chunk->irq,
+ chunk->nr_irqs,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ pr_crit("IXP4XX: can not allocate irqs in hierarchy %d\n",
+ ret);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(ixp4xx_irq_init);
+
+#ifdef CONFIG_OF
+int __init ixp4xx_of_init_irq(struct device_node *np,
+ struct device_node *parent)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ void __iomem *base;
+ struct fwnode_handle *fwnode;
+ bool is_356;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4XX: could not ioremap interrupt controller\n");
+ return -ENODEV;
+ }
+ fwnode = of_node_to_fwnode(np);
+
+ /* These chip variants have 64 interrupts */
+ is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp45x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp46x-interrupt");
+
+ ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356);
+ if (ret)
+ pr_crit("IXP4XX: failed to set up irqchip\n");
+
+ return ret;
+}
+IRQCHIP_DECLARE(ixp42x, "intel,ixp42x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp43x, "intel,ixp43x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp45x, "intel,ixp45x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp46x, "intel,ixp46x-interrupt",
+ ixp4xx_of_init_irq);
+#endif
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
index 86b72fbd3b45..353111a10413 100644
--- a/drivers/irqchip/irq-ls1x.c
+++ b/drivers/irqchip/irq-ls1x.c
@@ -130,6 +130,7 @@ static int __init ls1x_intc_of_init(struct device_node *node,
NULL);
if (!priv->domain) {
pr_err("ls1x-irq: cannot add IRQ domain\n");
+ err = -ENOMEM;
goto out_iounmap;
}
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index e1da70a9530c..3c3ad42f22bf 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -960,7 +960,7 @@ static int capi_open(struct inode *inode, struct file *file)
list_add_tail(&cdev->list, &capidev_list);
mutex_unlock(&capidev_list_lock);
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int capi_release(struct inode *inode, struct file *file)
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index ecdeb89645d0..149b1aca52a2 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -958,6 +958,7 @@ static void write_iso_callback(struct urb *urb)
*/
static int starturbs(struct bc_state *bcs)
{
+ struct usb_device *udev = bcs->cs->hw.bas->udev;
struct bas_bc_state *ubc = bcs->hw.bas;
struct urb *urb;
int j, k;
@@ -975,8 +976,8 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
- usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
- usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel),
+ usb_fill_int_urb(urb, udev,
+ usb_rcvisocpipe(udev, 3 + 2 * bcs->channel),
ubc->isoinbuf + k * BAS_INBUFSIZE,
BAS_INBUFSIZE, read_iso_callback, bcs,
BAS_FRAMETIME);
@@ -1006,8 +1007,8 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
- usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
- usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel),
+ usb_fill_int_urb(urb, udev,
+ usb_sndisocpipe(udev, 4 + 2 * bcs->channel),
ubc->isooutbuf->data,
sizeof(ubc->isooutbuf->data),
write_iso_callback, &ubc->isoouturbs[k],
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 362aa5450a5e..29c22d74afe0 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2041,9 +2041,9 @@ setup_hw(struct hfc_pci *hc)
}
printk(KERN_INFO
- "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
- (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
- (u_long) hc->hw.dmahandle, hc->irq, HZ);
+ "HFC-PCI: defined at mem %#lx fifo %p(%pad) IRQ %d HZ %d\n",
+ (u_long) hc->hw.pci_io, hc->hw.fifos,
+ &hc->hw.dmahandle, hc->irq, HZ);
/* enable memory mapped ports, disable busmaster */
pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b12e6cae26c2..de965115a183 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1294,9 +1294,9 @@ void HiSax_reportcard(int cardnr, int sel)
printk(KERN_DEBUG "HiSax: reportcard No %d\n", cardnr + 1);
printk(KERN_DEBUG "HiSax: Type %s\n", CardType[cs->typ]);
printk(KERN_DEBUG "HiSax: debuglevel %x\n", cs->debug);
- printk(KERN_DEBUG "HiSax: HiSax_reportcard address 0x%lX\n",
- (ulong) & HiSax_reportcard);
- printk(KERN_DEBUG "HiSax: cs 0x%lX\n", (ulong) cs);
+ printk(KERN_DEBUG "HiSax: HiSax_reportcard address 0x%px\n",
+ HiSax_reportcard);
+ printk(KERN_DEBUG "HiSax: cs 0x%px\n", cs);
printk(KERN_DEBUG "HiSax: HW_Flags %lx bc0 flg %lx bc1 flg %lx\n",
cs->HW_Flags, cs->bcs[0].Flag, cs->bcs[1].Flag);
printk(KERN_DEBUG "HiSax: bcs 0 mode %d ch%d\n",
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a7b275ea5de1..7e0f419c14f8 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1888,8 +1888,9 @@ static u32 isdn_ppp_mp_get_seq(int short_seq,
return seq;
}
-struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
- struct sk_buff *from, struct sk_buff *to)
+static struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
+ struct sk_buff *from,
+ struct sk_buff *to)
{
if (from)
while (from != to) {
@@ -1900,8 +1901,8 @@ struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
return from;
}
-void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *from, struct sk_buff *to)
+static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
+ struct sk_buff *from, struct sk_buff *to)
{
ippp_bundle *mp = net_dev->pb;
int proto;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 4ab8b1b6608f..a14e35d40538 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
struct sock *sk = sock->sk;
int err = 0;
- if (!maddr || maddr->family != AF_ISDN)
+ if (addr_len < sizeof(struct sockaddr_mISDN))
return -EINVAL;
- if (addr_len < sizeof(struct sockaddr_mISDN))
+ if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
lock_sock(sk);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a72f97fca57b..71be87bdb926 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -23,8 +23,8 @@ config LEDS_CLASS_FLASH
tristate "LED Flash Class Support"
depends on LEDS_CLASS
help
- This option enables the flash led sysfs class in /sys/class/leds.
- It wrapps LED Class and adds flash LEDs specific sysfs attributes
+ This option enables the flash LED sysfs class in /sys/class/leds.
+ It wraps LED Class and adds flash LEDs specific sysfs attributes
and kernel internal API to it. You'll need this to provide support
for the flash related features of a LED device. It can be built
as a module.
@@ -56,7 +56,7 @@ config LEDS_AAT1290
depends on OF
depends on PINCTRL
help
- This option enables support for the LEDs on the AAT1290.
+ This option enables support for the LEDs on the AAT1290.
config LEDS_AN30259A
tristate "LED support for Panasonic AN30259A"
@@ -138,6 +138,16 @@ config LEDS_LM3530
controlled manually or using PWM input or using ambient
light automatically.
+config LEDS_LM3532
+ tristate "LCD Backlight driver for LM3532"
+ depends on LEDS_CLASS
+ depends on I2C
+ help
+ This option enables support for the LCD backlight using
+ LM3532 ambient light sensor chip. This ALS chip can be
+ controlled manually or using PWM input or using ambient
+ light automatically.
+
config LEDS_LM3533
tristate "LED support for LM3533"
depends on LEDS_CLASS
@@ -413,13 +423,13 @@ config LEDS_CLEVO_MAIL
This module can drive the mail LED for the following notebooks:
- Clevo D400P
- Clevo D410J
- Clevo D410V
- Clevo D400V/D470V (not tested, but might work)
- Clevo M540N
- Clevo M5x0N (not tested, but might work)
- Positivo Mobile (Clevo M5x0V)
+ Clevo D400P
+ Clevo D410J
+ Clevo D410V
+ Clevo D400V/D470V (not tested, but might work)
+ Clevo M540N
+ Clevo M5x0N (not tested, but might work)
+ Positivo Mobile (Clevo M5x0V)
If your model is not listed here you can try the "nodetect"
module parameter.
@@ -462,7 +472,7 @@ config LEDS_WM831X_STATUS
depends on MFD_WM831X
help
This option enables support for the status LEDs of the WM831x
- series of PMICs.
+ series of PMICs.
config LEDS_WM8350
tristate "LED Support for WM8350 AudioPlus PMIC"
@@ -533,6 +543,7 @@ config LEDS_LT3593
tristate "LED driver for LT3593 controllers"
depends on LEDS_CLASS
depends on GPIOLIB || COMPILE_TEST
+ depends on OF
help
This option enables support for LEDs driven by a Linear Technology
LT3593 controller. This controller uses a special one-wire pulse
@@ -608,6 +619,12 @@ config LEDS_TLC591XX
This option enables support for Texas Instruments TLC59108
and TLC59116 LED controllers.
+config LEDS_MAX77650
+ tristate "LED support for Maxim MAX77650 PMIC"
+ depends on LEDS_CLASS && MFD_MAX77650
+ help
+ LEDs driver for MAX77650 family of PMICs from Maxim Integrated.
+
config LEDS_MAX77693
tristate "LED support for MAX77693 Flash"
depends on LEDS_CLASS_FLASH
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 4c1b0054f379..1e9702ebffee 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_CPCAP) += leds-cpcap.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
+obj-$(CONFIG_LEDS_LM3532) += leds-lm3532.o
obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o
obj-$(CONFIG_LEDS_LM3642) += leds-lm3642.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
@@ -61,6 +62,7 @@ obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
+obj-$(CONFIG_LEDS_MAX77650) += leds-max77650.o
obj-$(CONFIG_LEDS_MAX77693) += leds-max77693.o
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 3c7e3487b373..85848c5da705 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -57,6 +57,7 @@ static ssize_t brightness_store(struct device *dev,
if (state == LED_OFF)
led_trigger_remove(led_cdev);
led_set_brightness(led_cdev, state);
+ flush_work(&led_cdev->set_brightness_work);
ret = size;
unlock:
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index e3da7c03da1b..e9ae7f87ab90 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -164,6 +164,11 @@ static void led_blink_setup(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
+ /*
+ * If "set brightness to 0" is pending in workqueue, we don't
+ * want that to be reordered after blink_set()
+ */
+ flush_work(&led_cdev->set_brightness_work);
if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) &&
led_cdev->blink_set &&
!led_cdev->blink_set(led_cdev, delay_on, delay_off))
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index 98a69b1a43f9..b0df514992e1 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -25,7 +25,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <media/v4l2-flash-led-class.h>
@@ -148,8 +148,8 @@ struct as3645a {
struct v4l2_flash *vf;
struct v4l2_flash *vfind;
- struct device_node *flash_node;
- struct device_node *indicator_node;
+ struct fwnode_handle *flash_node;
+ struct fwnode_handle *indicator_node;
struct as3645a_config cfg;
@@ -493,30 +493,31 @@ static int as3645a_detect(struct as3645a *flash)
static int as3645a_parse_node(struct as3645a *flash,
struct as3645a_names *names,
- struct device_node *node)
+ struct fwnode_handle *fwnode)
{
struct as3645a_config *cfg = &flash->cfg;
- struct device_node *child;
+ struct fwnode_handle *child;
const char *name;
int rval;
- for_each_child_of_node(node, child) {
+ fwnode_for_each_child_node(fwnode, child) {
u32 id = 0;
- of_property_read_u32(child, "reg", &id);
+ fwnode_property_read_u32(child, "reg", &id);
switch (id) {
case AS_LED_FLASH:
- flash->flash_node = of_node_get(child);
+ flash->flash_node = child;
break;
case AS_LED_INDICATOR:
- flash->indicator_node = of_node_get(child);
+ flash->indicator_node = child;
break;
default:
dev_warn(&flash->client->dev,
"unknown LED %u encountered, ignoring\n", id);
break;
}
+ fwnode_handle_get(child);
}
if (!flash->flash_node) {
@@ -524,42 +525,46 @@ static int as3645a_parse_node(struct as3645a *flash,
return -ENODEV;
}
- rval = of_property_read_string(flash->flash_node, "label", &name);
- if (!rval)
+ rval = fwnode_property_read_string(flash->flash_node, "label", &name);
+ if (!rval) {
strlcpy(names->flash, name, sizeof(names->flash));
- else
+ } else if (is_of_node(fwnode)) {
snprintf(names->flash, sizeof(names->flash),
- "%pOFn:flash", node);
+ "%pOFn:flash", to_of_node(fwnode));
+ } else {
+ dev_err(&flash->client->dev, "flash node has no label!\n");
+ return -EINVAL;
+ }
- rval = of_property_read_u32(flash->flash_node, "flash-timeout-us",
- &cfg->flash_timeout_us);
+ rval = fwnode_property_read_u32(flash->flash_node, "flash-timeout-us",
+ &cfg->flash_timeout_us);
if (rval < 0) {
dev_err(&flash->client->dev,
"can't read flash-timeout-us property for flash\n");
goto out_err;
}
- rval = of_property_read_u32(flash->flash_node, "flash-max-microamp",
- &cfg->flash_max_ua);
+ rval = fwnode_property_read_u32(flash->flash_node, "flash-max-microamp",
+ &cfg->flash_max_ua);
if (rval < 0) {
dev_err(&flash->client->dev,
"can't read flash-max-microamp property for flash\n");
goto out_err;
}
- rval = of_property_read_u32(flash->flash_node, "led-max-microamp",
- &cfg->assist_max_ua);
+ rval = fwnode_property_read_u32(flash->flash_node, "led-max-microamp",
+ &cfg->assist_max_ua);
if (rval < 0) {
dev_err(&flash->client->dev,
"can't read led-max-microamp property for flash\n");
goto out_err;
}
- of_property_read_u32(flash->flash_node, "voltage-reference",
- &cfg->voltage_reference);
+ fwnode_property_read_u32(flash->flash_node, "voltage-reference",
+ &cfg->voltage_reference);
- of_property_read_u32(flash->flash_node, "ams,input-max-microamp",
- &cfg->peak);
+ fwnode_property_read_u32(flash->flash_node, "ams,input-max-microamp",
+ &cfg->peak);
cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak);
if (!flash->indicator_node) {
@@ -568,15 +573,21 @@ static int as3645a_parse_node(struct as3645a *flash,
goto out_err;
}
- rval = of_property_read_string(flash->indicator_node, "label", &name);
- if (!rval)
+ rval = fwnode_property_read_string(flash->indicator_node, "label",
+ &name);
+ if (!rval) {
strlcpy(names->indicator, name, sizeof(names->indicator));
- else
+ } else if (is_of_node(fwnode)) {
snprintf(names->indicator, sizeof(names->indicator),
- "%pOFn:indicator", node);
+ "%pOFn:indicator", to_of_node(fwnode));
+ } else {
+ dev_err(&flash->client->dev, "indicator node has no label!\n");
+ return -EINVAL;
+ }
- rval = of_property_read_u32(flash->indicator_node, "led-max-microamp",
- &cfg->indicator_max_ua);
+ rval = fwnode_property_read_u32(flash->indicator_node,
+ "led-max-microamp",
+ &cfg->indicator_max_ua);
if (rval < 0) {
dev_err(&flash->client->dev,
"can't read led-max-microamp property for indicator\n");
@@ -586,8 +597,8 @@ static int as3645a_parse_node(struct as3645a *flash,
return 0;
out_err:
- of_node_put(flash->flash_node);
- of_node_put(flash->indicator_node);
+ fwnode_handle_put(flash->flash_node);
+ fwnode_handle_put(flash->indicator_node);
return rval;
}
@@ -668,14 +679,14 @@ static int as3645a_v4l2_setup(struct as3645a *flash)
strlcpy(cfgind.dev_name, flash->iled_cdev.name, sizeof(cfg.dev_name));
flash->vf = v4l2_flash_init(
- &flash->client->dev, of_fwnode_handle(flash->flash_node),
- &flash->fled, NULL, &cfg);
+ &flash->client->dev, flash->flash_node, &flash->fled, NULL,
+ &cfg);
if (IS_ERR(flash->vf))
return PTR_ERR(flash->vf);
flash->vfind = v4l2_flash_indicator_init(
- &flash->client->dev, of_fwnode_handle(flash->indicator_node),
- &flash->iled_cdev, &cfgind);
+ &flash->client->dev, flash->indicator_node, &flash->iled_cdev,
+ &cfgind);
if (IS_ERR(flash->vfind)) {
v4l2_flash_release(flash->vf);
return PTR_ERR(flash->vfind);
@@ -690,7 +701,7 @@ static int as3645a_probe(struct i2c_client *client)
struct as3645a *flash;
int rval;
- if (client->dev.of_node == NULL)
+ if (!dev_fwnode(&client->dev))
return -ENODEV;
flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL);
@@ -699,7 +710,7 @@ static int as3645a_probe(struct i2c_client *client)
flash->client = client;
- rval = as3645a_parse_node(flash, &names, client->dev.of_node);
+ rval = as3645a_parse_node(flash, &names, dev_fwnode(&client->dev));
if (rval < 0)
return rval;
@@ -731,8 +742,8 @@ out_mutex_destroy:
mutex_destroy(&flash->mutex);
out_put_nodes:
- of_node_put(flash->flash_node);
- of_node_put(flash->indicator_node);
+ fwnode_handle_put(flash->flash_node);
+ fwnode_handle_put(flash->indicator_node);
return rval;
}
@@ -751,8 +762,8 @@ static int as3645a_remove(struct i2c_client *client)
mutex_destroy(&flash->mutex);
- of_node_put(flash->flash_node);
- of_node_put(flash->indicator_node);
+ fwnode_handle_put(flash->flash_node);
+ fwnode_handle_put(flash->indicator_node);
return 0;
}
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index 851c1920b63c..11b771fb933b 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -594,7 +594,6 @@ static int blinkm_probe(struct i2c_client *client,
goto exit;
}
- data->i2c_addr = 0x09;
data->i2c_addr = 0x08;
/* i2c addr - use fake addr of 0x08 initially (real is 0x09) */
data->fw_ver = 0xfe;
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
new file mode 100644
index 000000000000..180895b83b88
--- /dev/null
+++ b/drivers/leds/leds-lm3532.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0
+// TI LM3532 LED driver
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <uapi/linux/uleds.h>
+#include <linux/gpio/consumer.h>
+
+#define LM3532_NAME "lm3532-led"
+#define LM3532_BL_MODE_MANUAL 0x00
+#define LM3532_BL_MODE_ALS 0x01
+
+#define LM3532_REG_OUTPUT_CFG 0x10
+#define LM3532_REG_STARTSHUT_RAMP 0x11
+#define LM3532_REG_RT_RAMP 0x12
+#define LM3532_REG_PWM_A_CFG 0x13
+#define LM3532_REG_PWM_B_CFG 0x14
+#define LM3532_REG_PWM_C_CFG 0x15
+#define LM3532_REG_ZONE_CFG_A 0x16
+#define LM3532_REG_CTRL_A_BRT 0x17
+#define LM3532_REG_ZONE_CFG_B 0x18
+#define LM3532_REG_CTRL_B_BRT 0x19
+#define LM3532_REG_ZONE_CFG_C 0x1a
+#define LM3532_REG_CTRL_C_BRT 0x1b
+#define LM3532_REG_ENABLE 0x1d
+#define LM3532_ALS_CONFIG 0x23
+#define LM3532_REG_ZN_0_HI 0x60
+#define LM3532_REG_ZN_0_LO 0x61
+#define LM3532_REG_ZN_1_HI 0x62
+#define LM3532_REG_ZN_1_LO 0x63
+#define LM3532_REG_ZN_2_HI 0x64
+#define LM3532_REG_ZN_2_LO 0x65
+#define LM3532_REG_ZN_3_HI 0x66
+#define LM3532_REG_ZN_3_LO 0x67
+#define LM3532_REG_MAX 0x7e
+
+/* Contorl Enable */
+#define LM3532_CTRL_A_ENABLE BIT(0)
+#define LM3532_CTRL_B_ENABLE BIT(1)
+#define LM3532_CTRL_C_ENABLE BIT(2)
+
+/* PWM Zone Control */
+#define LM3532_PWM_ZONE_MASK 0x7c
+#define LM3532_PWM_ZONE_0_EN BIT(2)
+#define LM3532_PWM_ZONE_1_EN BIT(3)
+#define LM3532_PWM_ZONE_2_EN BIT(4)
+#define LM3532_PWM_ZONE_3_EN BIT(5)
+#define LM3532_PWM_ZONE_4_EN BIT(6)
+
+/* Brightness Configuration */
+#define LM3532_I2C_CTRL BIT(0)
+#define LM3532_ALS_CTRL 0
+#define LM3532_LINEAR_MAP BIT(1)
+#define LM3532_ZONE_MASK (BIT(2) | BIT(3) | BIT(4))
+#define LM3532_ZONE_0 0
+#define LM3532_ZONE_1 BIT(2)
+#define LM3532_ZONE_2 BIT(3)
+#define LM3532_ZONE_3 (BIT(2) | BIT(3))
+#define LM3532_ZONE_4 BIT(4)
+
+#define LM3532_ENABLE_ALS BIT(3)
+#define LM3532_ALS_SEL_SHIFT 6
+
+/* Zone Boundary Register */
+#define LM3532_ALS_WINDOW_mV 2000
+#define LM3532_ALS_ZB_MAX 4
+#define LM3532_ALS_OFFSET_mV 2
+
+#define LM3532_CONTROL_A 0
+#define LM3532_CONTROL_B 1
+#define LM3532_CONTROL_C 2
+#define LM3532_MAX_CONTROL_BANKS 3
+#define LM3532_MAX_LED_STRINGS 3
+
+#define LM3532_OUTPUT_CFG_MASK 0x3
+#define LM3532_BRT_VAL_ADJUST 8
+#define LM3532_RAMP_DOWN_SHIFT 3
+
+#define LM3532_NUM_RAMP_VALS 8
+#define LM3532_NUM_AVG_VALS 8
+#define LM3532_NUM_IMP_VALS 32
+
+/*
+ * struct lm3532_als_data
+ * @config - value of ALS configuration register
+ * @als1_imp_sel - value of ALS1 resistor select register
+ * @als2_imp_sel - value of ALS2 resistor select register
+ * @als_avrg_time - ALS averaging time
+ * @als_input_mode - ALS input mode for brightness control
+ * @als_vmin - Minimum ALS voltage
+ * @als_vmax - Maximum ALS voltage
+ * @zone_lo - values of ALS lo ZB(Zone Boundary) registers
+ * @zone_hi - values of ALS hi ZB(Zone Boundary) registers
+ */
+struct lm3532_als_data {
+ u8 config;
+ u8 als1_imp_sel;
+ u8 als2_imp_sel;
+ u8 als_avrg_time;
+ u8 als_input_mode;
+ u32 als_vmin;
+ u32 als_vmax;
+ u8 zones_lo[LM3532_ALS_ZB_MAX];
+ u8 zones_hi[LM3532_ALS_ZB_MAX];
+};
+
+/**
+ * struct lm3532_led
+ * @led_dev: led class device
+ * @priv - Pointer the device data structure
+ * @control_bank - Control bank the LED is associated to
+ * @mode - Mode of the LED string
+ * @num_leds - Number of LED strings are supported in this array
+ * @led_strings - The LED strings supported in this array
+ * @label - LED label
+ */
+struct lm3532_led {
+ struct led_classdev led_dev;
+ struct lm3532_data *priv;
+
+ int control_bank;
+ int mode;
+ int num_leds;
+ u32 led_strings[LM3532_MAX_CONTROL_BANKS];
+ char label[LED_MAX_NAME_SIZE];
+};
+
+/**
+ * struct lm3532_data
+ * @enable_gpio - Hardware enable gpio
+ * @regulator: regulator
+ * @client: i2c client
+ * @regmap - Devices register map
+ * @dev - Pointer to the devices device struct
+ * @lock - Lock for reading/writing the device
+ * @als_data - Pointer to the als data struct
+ * @runtime_ramp_up - Runtime ramp up setting
+ * @runtime_ramp_down - Runtime ramp down setting
+ * @leds - Array of LED strings
+ */
+struct lm3532_data {
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct device *dev;
+ struct mutex lock;
+
+ struct lm3532_als_data *als_data;
+
+ u32 runtime_ramp_up;
+ u32 runtime_ramp_down;
+
+ struct lm3532_led leds[];
+};
+
+static const struct reg_default lm3532_reg_defs[] = {
+ {LM3532_REG_OUTPUT_CFG, 0xe4},
+ {LM3532_REG_STARTSHUT_RAMP, 0xc0},
+ {LM3532_REG_RT_RAMP, 0xc0},
+ {LM3532_REG_PWM_A_CFG, 0x82},
+ {LM3532_REG_PWM_B_CFG, 0x82},
+ {LM3532_REG_PWM_C_CFG, 0x82},
+ {LM3532_REG_ZONE_CFG_A, 0xf1},
+ {LM3532_REG_CTRL_A_BRT, 0xf3},
+ {LM3532_REG_ZONE_CFG_B, 0xf1},
+ {LM3532_REG_CTRL_B_BRT, 0xf3},
+ {LM3532_REG_ZONE_CFG_C, 0xf1},
+ {LM3532_REG_CTRL_C_BRT, 0xf3},
+ {LM3532_REG_ENABLE, 0xf8},
+ {LM3532_ALS_CONFIG, 0x44},
+ {LM3532_REG_ZN_0_HI, 0x35},
+ {LM3532_REG_ZN_0_LO, 0x33},
+ {LM3532_REG_ZN_1_HI, 0x6a},
+ {LM3532_REG_ZN_1_LO, 0x66},
+ {LM3532_REG_ZN_2_HI, 0xa1},
+ {LM3532_REG_ZN_2_LO, 0x99},
+ {LM3532_REG_ZN_3_HI, 0xdc},
+ {LM3532_REG_ZN_3_LO, 0xcc},
+};
+
+static const struct regmap_config lm3532_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LM3532_REG_MAX,
+ .reg_defaults = lm3532_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lm3532_reg_defs),
+ .cache_type = REGCACHE_FLAT,
+};
+
+const static int als_imp_table[LM3532_NUM_IMP_VALS] = {37000, 18500, 12330,
+ 92500, 7400, 6170, 5290,
+ 4630, 4110, 3700, 3360,
+ 3080, 2850, 2640, 2440,
+ 2310, 2180, 2060, 1950,
+ 1850, 1760, 1680, 1610,
+ 1540, 1480, 1420, 1370,
+ 1320, 1280, 1230, 1190};
+static int lm3532_get_als_imp_index(int als_imped)
+{
+ int i;
+
+ if (als_imped > als_imp_table[1])
+ return 0;
+
+ if (als_imped < als_imp_table[LM3532_NUM_IMP_VALS - 1])
+ return LM3532_NUM_IMP_VALS - 1;
+
+ for (i = 1; i < LM3532_NUM_IMP_VALS; i++) {
+ if (als_imped == als_imp_table[i])
+ return i;
+
+ /* Find an approximate index by looking up the table */
+ if (als_imped < als_imp_table[i - 1] &&
+ als_imped > als_imp_table[i]) {
+ if (als_imped - als_imp_table[i - 1] <
+ als_imp_table[i] - als_imped)
+ return i + 1;
+ else
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int lm3532_get_index(const int table[], int size, int value)
+{
+ int i;
+
+ for (i = 1; i < size; i++) {
+ if (value == table[i])
+ return i;
+
+ /* Find an approximate index by looking up the table */
+ if (value > table[i - 1] &&
+ value < table[i]) {
+ if (value - table[i - 1] < table[i] - value)
+ return i - 1;
+ else
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+const static int als_avrg_table[LM3532_NUM_AVG_VALS] = {17920, 35840, 71680,
+ 1433360, 286720, 573440,
+ 1146880, 2293760};
+static int lm3532_get_als_avg_index(int avg_time)
+{
+ if (avg_time <= als_avrg_table[0])
+ return 0;
+
+ if (avg_time > als_avrg_table[LM3532_NUM_AVG_VALS - 1])
+ return LM3532_NUM_AVG_VALS - 1;
+
+ return lm3532_get_index(&als_avrg_table[0], LM3532_NUM_AVG_VALS,
+ avg_time);
+}
+
+const static int ramp_table[LM3532_NUM_RAMP_VALS] = { 8, 1024, 2048, 4096, 8192,
+ 16384, 32768, 65536};
+static int lm3532_get_ramp_index(int ramp_time)
+{
+ if (ramp_time <= ramp_table[0])
+ return 0;
+
+ if (ramp_time > ramp_table[LM3532_NUM_RAMP_VALS - 1])
+ return LM3532_NUM_RAMP_VALS - 1;
+
+ return lm3532_get_index(&ramp_table[0], LM3532_NUM_RAMP_VALS,
+ ramp_time);
+}
+
+static int lm3532_led_enable(struct lm3532_led *led_data)
+{
+ int ctrl_en_val = BIT(led_data->control_bank);
+ int ret;
+
+ ret = regmap_update_bits(led_data->priv->regmap, LM3532_REG_ENABLE,
+ ctrl_en_val, ctrl_en_val);
+ if (ret) {
+ dev_err(led_data->priv->dev, "Failed to set ctrl:%d\n", ret);
+ return ret;
+ }
+
+ return regulator_enable(led_data->priv->regulator);
+}
+
+static int lm3532_led_disable(struct lm3532_led *led_data)
+{
+ int ctrl_en_val = BIT(led_data->control_bank);
+ int ret;
+
+ ret = regmap_update_bits(led_data->priv->regmap, LM3532_REG_ENABLE,
+ ctrl_en_val, ~ctrl_en_val);
+ if (ret) {
+ dev_err(led_data->priv->dev, "Failed to set ctrl:%d\n", ret);
+ return ret;
+ }
+
+ return regulator_disable(led_data->priv->regulator);
+}
+
+static int lm3532_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lm3532_led *led =
+ container_of(led_cdev, struct lm3532_led, led_dev);
+ u8 brightness_reg;
+ int ret;
+
+ mutex_lock(&led->priv->lock);
+
+ if (led->mode == LM3532_BL_MODE_ALS) {
+ if (brt_val > LED_OFF)
+ ret = lm3532_led_enable(led);
+ else
+ ret = lm3532_led_disable(led);
+
+ goto unlock;
+ }
+
+ if (brt_val == LED_OFF) {
+ ret = lm3532_led_disable(led);
+ goto unlock;
+ }
+
+ ret = lm3532_led_enable(led);
+ if (ret)
+ goto unlock;
+
+ brightness_reg = LM3532_REG_CTRL_A_BRT + led->control_bank * 2;
+ brt_val = brt_val / LM3532_BRT_VAL_ADJUST;
+
+ ret = regmap_write(led->priv->regmap, brightness_reg, brt_val);
+
+unlock:
+ mutex_unlock(&led->priv->lock);
+ return ret;
+}
+
+static int lm3532_init_registers(struct lm3532_led *led)
+{
+ struct lm3532_data *drvdata = led->priv;
+ unsigned int runtime_ramp_val;
+ unsigned int output_cfg_val = 0;
+ unsigned int output_cfg_shift = 0;
+ unsigned int output_cfg_mask = 0;
+ int ret, i;
+
+ for (i = 0; i < led->num_leds; i++) {
+ output_cfg_shift = led->led_strings[i] * 2;
+ output_cfg_val |= (led->control_bank << output_cfg_shift);
+ output_cfg_mask |= LM3532_OUTPUT_CFG_MASK << output_cfg_shift;
+ }
+
+ ret = regmap_update_bits(drvdata->regmap, LM3532_REG_OUTPUT_CFG,
+ output_cfg_mask, output_cfg_val);
+ if (ret)
+ return ret;
+
+ runtime_ramp_val = drvdata->runtime_ramp_up |
+ (drvdata->runtime_ramp_down << LM3532_RAMP_DOWN_SHIFT);
+
+ return regmap_write(drvdata->regmap, LM3532_REG_RT_RAMP,
+ runtime_ramp_val);
+}
+
+static int lm3532_als_configure(struct lm3532_data *priv,
+ struct lm3532_led *led)
+{
+ struct lm3532_als_data *als = priv->als_data;
+ u32 als_vmin, als_vmax, als_vstep;
+ int zone_reg = LM3532_REG_ZN_0_HI;
+ int brightnes_config_reg;
+ int ret;
+ int i;
+
+ als_vmin = als->als_vmin;
+ als_vmax = als->als_vmax;
+
+ als_vstep = (als_vmax - als_vmin) / ((LM3532_ALS_ZB_MAX + 1) * 2);
+
+ for (i = 0; i < LM3532_ALS_ZB_MAX; i++) {
+ als->zones_lo[i] = ((als_vmin + als_vstep + (i * als_vstep)) *
+ LED_FULL) / 1000;
+ als->zones_hi[i] = ((als_vmin + LM3532_ALS_OFFSET_mV +
+ als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
+
+ zone_reg = LM3532_REG_ZN_0_HI + i * 2;
+ ret = regmap_write(priv->regmap, zone_reg, als->zones_lo[i]);
+ if (ret)
+ return ret;
+
+ zone_reg += 1;
+ ret = regmap_write(priv->regmap, zone_reg, als->zones_hi[i]);
+ if (ret)
+ return ret;
+ }
+
+ als->config = (als->als_avrg_time | (LM3532_ENABLE_ALS) |
+ (als->als_input_mode << LM3532_ALS_SEL_SHIFT));
+
+ ret = regmap_write(priv->regmap, LM3532_ALS_CONFIG, als->config);
+ if (ret)
+ return ret;
+
+ brightnes_config_reg = LM3532_REG_ZONE_CFG_A + led->control_bank * 2;
+
+ return regmap_update_bits(priv->regmap, brightnes_config_reg,
+ LM3532_I2C_CTRL, LM3532_ALS_CTRL);
+}
+
+static int lm3532_parse_als(struct lm3532_data *priv)
+{
+ struct lm3532_als_data *als;
+ int als_avg_time;
+ int als_impedance;
+ int ret;
+
+ als = devm_kzalloc(priv->dev, sizeof(*als), GFP_KERNEL);
+ if (als == NULL)
+ return -ENOMEM;
+
+ ret = device_property_read_u32(&priv->client->dev, "ti,als-vmin",
+ &als->als_vmin);
+ if (ret)
+ als->als_vmin = 0;
+
+ ret = device_property_read_u32(&priv->client->dev, "ti,als-vmax",
+ &als->als_vmax);
+ if (ret)
+ als->als_vmax = LM3532_ALS_WINDOW_mV;
+
+ if (als->als_vmax > LM3532_ALS_WINDOW_mV) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = device_property_read_u32(&priv->client->dev, "ti,als1-imp-sel",
+ &als_impedance);
+ if (ret)
+ als->als1_imp_sel = 0;
+ else
+ als->als1_imp_sel = lm3532_get_als_imp_index(als_impedance);
+
+ ret = device_property_read_u32(&priv->client->dev, "ti,als2-imp-sel",
+ &als_impedance);
+ if (ret)
+ als->als2_imp_sel = 0;
+ else
+ als->als2_imp_sel = lm3532_get_als_imp_index(als_impedance);
+
+ ret = device_property_read_u32(&priv->client->dev, "ti,als-avrg-time-us",
+ &als_avg_time);
+ if (ret)
+ als->als_avrg_time = 0;
+ else
+ als->als_avrg_time = lm3532_get_als_avg_index(als_avg_time);
+
+ ret = device_property_read_u8(&priv->client->dev, "ti,als-input-mode",
+ &als->als_input_mode);
+ if (ret)
+ als->als_input_mode = 0;
+
+ if (als->als_input_mode > LM3532_BL_MODE_ALS) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ priv->als_data = als;
+
+ return ret;
+}
+
+static int lm3532_parse_node(struct lm3532_data *priv)
+{
+ struct fwnode_handle *child = NULL;
+ struct lm3532_led *led;
+ const char *name;
+ int control_bank;
+ u32 ramp_time;
+ size_t i = 0;
+ int ret;
+
+ priv->enable_gpio = devm_gpiod_get_optional(&priv->client->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->enable_gpio))
+ priv->enable_gpio = NULL;
+
+ priv->regulator = devm_regulator_get(&priv->client->dev, "vin");
+ if (IS_ERR(priv->regulator))
+ priv->regulator = NULL;
+
+ ret = device_property_read_u32(&priv->client->dev, "ramp-up-us",
+ &ramp_time);
+ if (ret)
+ dev_info(&priv->client->dev, "ramp-up-ms property missing\n");
+ else
+ priv->runtime_ramp_up = lm3532_get_ramp_index(ramp_time);
+
+ ret = device_property_read_u32(&priv->client->dev, "ramp-down-us",
+ &ramp_time);
+ if (ret)
+ dev_info(&priv->client->dev, "ramp-down-ms property missing\n");
+ else
+ priv->runtime_ramp_down = lm3532_get_ramp_index(ramp_time);
+
+ device_for_each_child_node(priv->dev, child) {
+ led = &priv->leds[i];
+
+ ret = fwnode_property_read_u32(child, "reg", &control_bank);
+ if (ret) {
+ dev_err(&priv->client->dev, "reg property missing\n");
+ fwnode_handle_put(child);
+ goto child_out;
+ }
+
+ if (control_bank > LM3532_CONTROL_C) {
+ dev_err(&priv->client->dev, "Control bank invalid\n");
+ continue;
+ }
+
+ led->control_bank = control_bank;
+
+ ret = fwnode_property_read_u32(child, "ti,led-mode",
+ &led->mode);
+ if (ret) {
+ dev_err(&priv->client->dev, "ti,led-mode property missing\n");
+ fwnode_handle_put(child);
+ goto child_out;
+ }
+
+ if (led->mode == LM3532_BL_MODE_ALS) {
+ ret = lm3532_parse_als(priv);
+ if (ret)
+ dev_err(&priv->client->dev, "Failed to parse als\n");
+ else
+ lm3532_als_configure(priv, led);
+ }
+
+ led->num_leds = fwnode_property_read_u32_array(child,
+ "led-sources",
+ NULL, 0);
+
+ if (led->num_leds > LM3532_MAX_LED_STRINGS) {
+ dev_err(&priv->client->dev, "To many LED string defined\n");
+ continue;
+ }
+
+ ret = fwnode_property_read_u32_array(child, "led-sources",
+ led->led_strings,
+ led->num_leds);
+ if (ret) {
+ dev_err(&priv->client->dev, "led-sources property missing\n");
+ fwnode_handle_put(child);
+ goto child_out;
+ }
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led->led_dev.default_trigger);
+
+ ret = fwnode_property_read_string(child, "label", &name);
+ if (ret)
+ snprintf(led->label, sizeof(led->label),
+ "%s::", priv->client->name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s:%s", priv->client->name, name);
+
+ led->priv = priv;
+ led->led_dev.name = led->label;
+ led->led_dev.brightness_set_blocking = lm3532_brightness_set;
+
+ ret = devm_led_classdev_register(priv->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&priv->client->dev, "led register err: %d\n",
+ ret);
+ fwnode_handle_put(child);
+ goto child_out;
+ }
+
+ lm3532_init_registers(led);
+
+ i++;
+ }
+
+child_out:
+ return ret;
+}
+
+static int lm3532_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lm3532_data *drvdata;
+ int ret = 0;
+ int count;
+
+ count = device_get_child_node_count(&client->dev);
+ if (!count) {
+ dev_err(&client->dev, "LEDs are not defined in device tree!");
+ return -ENODEV;
+ }
+
+ drvdata = devm_kzalloc(&client->dev, struct_size(drvdata, leds, count),
+ GFP_KERNEL);
+ if (drvdata == NULL)
+ return -ENOMEM;
+
+ drvdata->client = client;
+ drvdata->dev = &client->dev;
+
+ drvdata->regmap = devm_regmap_init_i2c(client, &lm3532_regmap_config);
+ if (IS_ERR(drvdata->regmap)) {
+ ret = PTR_ERR(drvdata->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ mutex_init(&drvdata->lock);
+ i2c_set_clientdata(client, drvdata);
+
+ ret = lm3532_parse_node(drvdata);
+ if (ret) {
+ dev_err(&client->dev, "Failed to parse node\n");
+ return ret;
+ }
+
+ if (drvdata->enable_gpio)
+ gpiod_direction_output(drvdata->enable_gpio, 1);
+
+ return ret;
+}
+
+static int lm3532_remove(struct i2c_client *client)
+{
+ struct lm3532_data *drvdata = i2c_get_clientdata(client);
+
+ mutex_destroy(&drvdata->lock);
+
+ if (drvdata->enable_gpio)
+ gpiod_direction_output(drvdata->enable_gpio, 0);
+
+ return 0;
+}
+
+static const struct of_device_id of_lm3532_leds_match[] = {
+ { .compatible = "ti,lm3532", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lm3532_leds_match);
+
+static const struct i2c_device_id lm3532_id[] = {
+ {LM3532_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lm3532_id);
+
+static struct i2c_driver lm3532_i2c_driver = {
+ .probe = lm3532_probe,
+ .remove = lm3532_remove,
+ .id_table = lm3532_id,
+ .driver = {
+ .name = LM3532_NAME,
+ .of_match_table = of_lm3532_leds_match,
+ },
+};
+module_i2c_driver(lm3532_i2c_driver);
+
+MODULE_DESCRIPTION("Back Light driver for LM3532");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index de3623e0d094..83e8e58d81cb 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -60,67 +60,14 @@ static int lt3593_led_set(struct led_classdev *led_cdev,
return 0;
}
-static struct lt3593_led_data *lt3593_led_probe_pdata(struct device *dev)
-{
- struct gpio_led_platform_data *pdata = dev_get_platdata(dev);
- const struct gpio_led *template = &pdata->leds[0];
- struct lt3593_led_data *led_data;
- int ret, state;
-
- if (pdata->num_leds != 1)
- return ERR_PTR(-EINVAL);
-
- led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
- if (!led_data)
- return ERR_PTR(-ENOMEM);
-
- led_data->cdev.name = template->name;
- led_data->cdev.default_trigger = template->default_trigger;
- led_data->cdev.brightness_set_blocking = lt3593_led_set;
-
- state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
- led_data->cdev.brightness = state ? LED_FULL : LED_OFF;
-
- if (!template->retain_state_suspended)
- led_data->cdev.flags |= LED_CORE_SUSPENDRESUME;
-
- ret = devm_gpio_request_one(dev, template->gpio, state ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- template->name);
- if (ret < 0)
- return ERR_PTR(ret);
-
- led_data->gpiod = gpio_to_desc(template->gpio);
- if (!led_data->gpiod)
- return ERR_PTR(-EPROBE_DEFER);
-
- ret = devm_led_classdev_register(dev, &led_data->cdev);
- if (ret < 0)
- return ERR_PTR(ret);
-
- dev_info(dev, "registered LT3593 LED '%s' at GPIO %d\n",
- template->name, template->gpio);
-
- return led_data;
-}
-
static int lt3593_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct lt3593_led_data *led_data;
struct fwnode_handle *child;
int ret, state = LEDS_GPIO_DEFSTATE_OFF;
- enum gpiod_flags flags = GPIOD_OUT_LOW;
const char *tmp;
- if (dev_get_platdata(dev)) {
- led_data = lt3593_led_probe_pdata(dev);
- if (IS_ERR(led_data))
- return PTR_ERR(led_data);
-
- goto out;
- }
-
if (!dev->of_node)
return -ENODEV;
@@ -151,13 +98,8 @@ static int lt3593_led_probe(struct platform_device *pdev)
&led_data->cdev.default_trigger);
if (!fwnode_property_read_string(child, "default-state", &tmp)) {
- if (!strcmp(tmp, "keep")) {
- state = LEDS_GPIO_DEFSTATE_KEEP;
- flags = GPIOD_ASIS;
- } else if (!strcmp(tmp, "on")) {
+ if (!strcmp(tmp, "on"))
state = LEDS_GPIO_DEFSTATE_ON;
- flags = GPIOD_OUT_HIGH;
- }
}
led_data->cdev.name = led_data->name;
@@ -171,20 +113,16 @@ static int lt3593_led_probe(struct platform_device *pdev)
}
led_data->cdev.dev->of_node = dev->of_node;
-
-out:
platform_set_drvdata(pdev, led_data);
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id of_lt3593_leds_match[] = {
{ .compatible = "lltc,lt3593", },
{},
};
MODULE_DEVICE_TABLE(of, of_lt3593_leds_match);
-#endif
static struct platform_driver lt3593_led_driver = {
.probe = lt3593_led_probe,
diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c
new file mode 100644
index 000000000000..6b74ce9cac12
--- /dev/null
+++ b/drivers/leds/leds-max77650.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// LED driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MAX77650_LED_NUM_LEDS 3
+
+#define MAX77650_LED_A_BASE 0x40
+#define MAX77650_LED_B_BASE 0x43
+
+#define MAX77650_LED_BR_MASK GENMASK(4, 0)
+#define MAX77650_LED_EN_MASK GENMASK(7, 6)
+
+#define MAX77650_LED_MAX_BRIGHTNESS MAX77650_LED_BR_MASK
+
+/* Enable EN_LED_MSTR. */
+#define MAX77650_LED_TOP_DEFAULT BIT(0)
+
+#define MAX77650_LED_ENABLE GENMASK(7, 6)
+#define MAX77650_LED_DISABLE 0x00
+
+#define MAX77650_LED_A_DEFAULT MAX77650_LED_DISABLE
+/* 100% on duty */
+#define MAX77650_LED_B_DEFAULT GENMASK(3, 0)
+
+struct max77650_led {
+ struct led_classdev cdev;
+ struct regmap *map;
+ unsigned int regA;
+ unsigned int regB;
+};
+
+static struct max77650_led *max77650_to_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct max77650_led, cdev);
+}
+
+static int max77650_led_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct max77650_led *led = max77650_to_led(cdev);
+ int val, mask;
+
+ mask = MAX77650_LED_BR_MASK | MAX77650_LED_EN_MASK;
+
+ if (brightness == LED_OFF)
+ val = MAX77650_LED_DISABLE;
+ else
+ val = MAX77650_LED_ENABLE | brightness;
+
+ return regmap_update_bits(led->map, led->regA, mask, val);
+}
+
+static int max77650_led_probe(struct platform_device *pdev)
+{
+ struct device_node *of_node, *child;
+ struct max77650_led *leds, *led;
+ struct device *parent;
+ struct device *dev;
+ struct regmap *map;
+ const char *label;
+ int rv, num_leds;
+ u32 reg;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+ of_node = dev->of_node;
+
+ if (!of_node)
+ return -ENODEV;
+
+ leds = devm_kcalloc(dev, sizeof(*leds),
+ MAX77650_LED_NUM_LEDS, GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ map = dev_get_regmap(dev->parent, NULL);
+ if (!map)
+ return -ENODEV;
+
+ num_leds = of_get_child_count(of_node);
+ if (!num_leds || num_leds > MAX77650_LED_NUM_LEDS)
+ return -ENODEV;
+
+ for_each_child_of_node(of_node, child) {
+ rv = of_property_read_u32(child, "reg", &reg);
+ if (rv || reg >= MAX77650_LED_NUM_LEDS)
+ return -EINVAL;
+
+ led = &leds[reg];
+ led->map = map;
+ led->regA = MAX77650_LED_A_BASE + reg;
+ led->regB = MAX77650_LED_B_BASE + reg;
+ led->cdev.brightness_set_blocking = max77650_led_brightness_set;
+ led->cdev.max_brightness = MAX77650_LED_MAX_BRIGHTNESS;
+
+ label = of_get_property(child, "label", NULL);
+ if (!label) {
+ led->cdev.name = "max77650::";
+ } else {
+ led->cdev.name = devm_kasprintf(dev, GFP_KERNEL,
+ "max77650:%s", label);
+ if (!led->cdev.name)
+ return -ENOMEM;
+ }
+
+ of_property_read_string(child, "linux,default-trigger",
+ &led->cdev.default_trigger);
+
+ rv = devm_of_led_classdev_register(dev, child, &led->cdev);
+ if (rv)
+ return rv;
+
+ rv = regmap_write(map, led->regA, MAX77650_LED_A_DEFAULT);
+ if (rv)
+ return rv;
+
+ rv = regmap_write(map, led->regB, MAX77650_LED_B_DEFAULT);
+ if (rv)
+ return rv;
+ }
+
+ return regmap_write(map,
+ MAX77650_REG_CNFG_LED_TOP,
+ MAX77650_LED_TOP_DEFAULT);
+}
+
+static struct platform_driver max77650_led_driver = {
+ .driver = {
+ .name = "max77650-led",
+ },
+ .probe = max77650_led_probe,
+};
+module_platform_driver(max77650_led_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 LED driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index f51b356d4426..a9f5dad55956 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -40,7 +40,6 @@
* bits the chip supports.
*/
-#include <linux/acpi.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -48,8 +47,8 @@
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -110,15 +109,6 @@ static const struct i2c_device_id pca955x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca955x_id);
-static const struct acpi_device_id pca955x_acpi_ids[] = {
- { "PCA9550", pca9550 },
- { "PCA9551", pca9551 },
- { "PCA9552", pca9552 },
- { "PCA9553", pca9553 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, pca955x_acpi_ids);
-
struct pca955x {
struct mutex lock;
struct pca955x_led *leds;
@@ -373,16 +363,14 @@ static int pca955x_gpio_direction_output(struct gpio_chip *gc,
}
#endif /* CONFIG_LEDS_PCA955X_GPIO */
-#if IS_ENABLED(CONFIG_OF)
static struct pca955x_platform_data *
-pca955x_pdata_of_init(struct i2c_client *client, struct pca955x_chipdef *chip)
+pca955x_get_pdata(struct i2c_client *client, struct pca955x_chipdef *chip)
{
- struct device_node *np = client->dev.of_node;
- struct device_node *child;
struct pca955x_platform_data *pdata;
+ struct fwnode_handle *child;
int count;
- count = of_get_child_count(np);
+ count = device_get_child_node_count(&client->dev);
if (!count || count > chip->bits)
return ERR_PTR(-ENODEV);
@@ -396,24 +384,25 @@ pca955x_pdata_of_init(struct i2c_client *client, struct pca955x_chipdef *chip)
if (!pdata->leds)
return ERR_PTR(-ENOMEM);
- for_each_child_of_node(np, child) {
+ device_for_each_child_node(&client->dev, child) {
const char *name;
u32 reg;
int res;
- res = of_property_read_u32(child, "reg", &reg);
+ res = fwnode_property_read_u32(child, "reg", &reg);
if ((res != 0) || (reg >= chip->bits))
continue;
- if (of_property_read_string(child, "label", &name))
- name = child->name;
+ res = fwnode_property_read_string(child, "label", &name);
+ if ((res != 0) && is_of_node(child))
+ name = to_of_node(child)->name;
snprintf(pdata->leds[reg].name, sizeof(pdata->leds[reg].name),
"%s", name);
pdata->leds[reg].type = PCA955X_TYPE_LED;
- of_property_read_u32(child, "type", &pdata->leds[reg].type);
- of_property_read_string(child, "linux,default-trigger",
+ fwnode_property_read_u32(child, "type", &pdata->leds[reg].type);
+ fwnode_property_read_string(child, "linux,default-trigger",
&pdata->leds[reg].default_trigger);
}
@@ -429,15 +418,7 @@ static const struct of_device_id of_pca955x_match[] = {
{ .compatible = "nxp,pca9553", .data = (void *)pca9553 },
{},
};
-
MODULE_DEVICE_TABLE(of, of_pca955x_match);
-#else
-static struct pca955x_platform_data *
-pca955x_pdata_of_init(struct i2c_client *client, struct pca955x_chipdef *chip)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
static int pca955x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -450,20 +431,11 @@ static int pca955x_probe(struct i2c_client *client,
struct pca955x_platform_data *pdata;
int ngpios = 0;
- if (id) {
- chip = &pca955x_chipdefs[id->driver_data];
- } else {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(pca955x_acpi_ids, &client->dev);
- if (!acpi_id)
- return -ENODEV;
- chip = &pca955x_chipdefs[acpi_id->driver_data];
- }
+ chip = &pca955x_chipdefs[id->driver_data];
adapter = to_i2c_adapter(client->dev.parent);
pdata = dev_get_platdata(&client->dev);
if (!pdata) {
- pdata = pca955x_pdata_of_init(client, chip);
+ pdata = pca955x_get_pdata(client, chip);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
@@ -602,8 +574,7 @@ static int pca955x_probe(struct i2c_client *client,
static struct i2c_driver pca955x_driver = {
.driver = {
.name = "leds-pca955x",
- .acpi_match_table = ACPI_PTR(pca955x_acpi_ids),
- .of_match_table = of_match_ptr(of_pca955x_match),
+ .of_match_table = of_pca955x_match,
},
.probe = pca955x_probe,
.id_table = pca955x_id,
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 5c0908113e38..9b4ef070d956 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -25,7 +25,6 @@
* or by adding the 'nxp,hw-blink' property to the DTS.
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -33,6 +32,7 @@
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_data/leds-pca963x.h>
@@ -97,15 +97,6 @@ static const struct i2c_device_id pca963x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca963x_id);
-static const struct acpi_device_id pca963x_acpi_ids[] = {
- { "PCA9632", pca9633 },
- { "PCA9633", pca9633 },
- { "PCA9634", pca9634 },
- { "PCA9635", pca9635 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, pca963x_acpi_ids);
-
struct pca963x_led;
struct pca963x {
@@ -287,16 +278,15 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
return 0;
}
-#if IS_ENABLED(CONFIG_OF)
static struct pca963x_platform_data *
-pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
+pca963x_get_pdata(struct i2c_client *client, struct pca963x_chipdef *chip)
{
- struct device_node *np = client->dev.of_node, *child;
struct pca963x_platform_data *pdata;
struct led_info *pca963x_leds;
+ struct fwnode_handle *child;
int count;
- count = of_get_child_count(np);
+ count = device_get_child_node_count(&client->dev);
if (!count || count > chip->n_leds)
return ERR_PTR(-ENODEV);
@@ -305,18 +295,22 @@ pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
if (!pca963x_leds)
return ERR_PTR(-ENOMEM);
- for_each_child_of_node(np, child) {
+ device_for_each_child_node(&client->dev, child) {
struct led_info led = {};
u32 reg;
int res;
- res = of_property_read_u32(child, "reg", &reg);
+ res = fwnode_property_read_u32(child, "reg", &reg);
if ((res != 0) || (reg >= chip->n_leds))
continue;
- led.name =
- of_get_property(child, "label", NULL) ? : child->name;
- led.default_trigger =
- of_get_property(child, "linux,default-trigger", NULL);
+
+ res = fwnode_property_read_string(child, "label", &led.name);
+ if ((res != 0) && is_of_node(child))
+ led.name = to_of_node(child)->name;
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led.default_trigger);
+
pca963x_leds[reg] = led;
}
pdata = devm_kzalloc(&client->dev,
@@ -328,22 +322,23 @@ pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
pdata->leds.num_leds = chip->n_leds;
/* default to open-drain unless totem pole (push-pull) is specified */
- if (of_property_read_bool(np, "nxp,totem-pole"))
+ if (device_property_read_bool(&client->dev, "nxp,totem-pole"))
pdata->outdrv = PCA963X_TOTEM_POLE;
else
pdata->outdrv = PCA963X_OPEN_DRAIN;
/* default to software blinking unless hardware blinking is specified */
- if (of_property_read_bool(np, "nxp,hw-blink"))
+ if (device_property_read_bool(&client->dev, "nxp,hw-blink"))
pdata->blink_type = PCA963X_HW_BLINK;
else
pdata->blink_type = PCA963X_SW_BLINK;
- if (of_property_read_u32(np, "nxp,period-scale", &chip->scaling))
+ if (device_property_read_u32(&client->dev, "nxp,period-scale",
+ &chip->scaling))
chip->scaling = 1000;
/* default to non-inverted output, unless inverted is specified */
- if (of_property_read_bool(np, "nxp,inverted-out"))
+ if (device_property_read_bool(&client->dev, "nxp,inverted-out"))
pdata->dir = PCA963X_INVERTED;
else
pdata->dir = PCA963X_NORMAL;
@@ -359,13 +354,6 @@ static const struct of_device_id of_pca963x_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, of_pca963x_match);
-#else
-static struct pca963x_platform_data *
-pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
static int pca963x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -376,20 +364,11 @@ static int pca963x_probe(struct i2c_client *client,
struct pca963x_chipdef *chip;
int i, err;
- if (id) {
- chip = &pca963x_chipdefs[id->driver_data];
- } else {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(pca963x_acpi_ids, &client->dev);
- if (!acpi_id)
- return -ENODEV;
- chip = &pca963x_chipdefs[acpi_id->driver_data];
- }
+ chip = &pca963x_chipdefs[id->driver_data];
pdata = dev_get_platdata(&client->dev);
if (!pdata) {
- pdata = pca963x_dt_init(client, chip);
+ pdata = pca963x_get_pdata(client, chip);
if (IS_ERR(pdata)) {
dev_warn(&client->dev, "could not parse configuration\n");
pdata = NULL;
@@ -495,8 +474,7 @@ static int pca963x_remove(struct i2c_client *client)
static struct i2c_driver pca963x_driver = {
.driver = {
.name = "leds-pca963x",
- .of_match_table = of_match_ptr(of_pca963x_match),
- .acpi_match_table = ACPI_PTR(pca963x_acpi_ids),
+ .of_match_table = of_pca963x_match,
},
.probe = pca963x_probe,
.remove = pca963x_remove,
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
index 0c43bfac9598..08b6a769ff8f 100644
--- a/drivers/leds/uleds.c
+++ b/drivers/leds/uleds.c
@@ -74,7 +74,7 @@ static int uleds_open(struct inode *inode, struct file *file)
udev->state = ULEDS_STATE_UNKNOWN;
file->private_data = udev;
- nonseekable_open(inode, file);
+ stream_open(inode, file);
return 0;
}
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 3789185144da..0b7d5fb4548d 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
struct pblk_sec_meta *meta;
struct bio *new_bio = rqd->bio;
struct bio *bio = pr_ctx->orig_bio;
- struct bio_vec src_bv, dst_bv;
void *meta_list = rqd->meta_list;
- int bio_init_idx = pr_ctx->bio_init_idx;
unsigned long *read_bitmap = pr_ctx->bitmap;
+ struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
+ struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
int nr_secs = pr_ctx->orig_nr_secs;
int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
void *src_p, *dst_p;
- int hole, i;
+ int bit, i;
if (unlikely(nr_holes == 1)) {
struct ppa_addr ppa;
@@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
/* Fill the holes in the original bio */
i = 0;
- hole = find_first_zero_bit(read_bitmap, nr_secs);
- do {
- struct pblk_line *line;
+ for (bit = 0; bit < nr_secs; bit++) {
+ if (!test_bit(bit, read_bitmap)) {
+ struct bio_vec dst_bv, src_bv;
+ struct pblk_line *line;
- line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
- kref_put(&line->ref, pblk_line_put);
+ line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
+ kref_put(&line->ref, pblk_line_put);
- meta = pblk_get_meta(pblk, meta_list, hole);
- meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
+ meta = pblk_get_meta(pblk, meta_list, bit);
+ meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
- src_bv = new_bio->bi_io_vec[i++];
- dst_bv = bio->bi_io_vec[bio_init_idx + hole];
+ dst_bv = bio_iter_iovec(bio, orig_iter);
+ src_bv = bio_iter_iovec(new_bio, new_iter);
- src_p = kmap_atomic(src_bv.bv_page);
- dst_p = kmap_atomic(dst_bv.bv_page);
+ src_p = kmap_atomic(src_bv.bv_page);
+ dst_p = kmap_atomic(dst_bv.bv_page);
- memcpy(dst_p + dst_bv.bv_offset,
- src_p + src_bv.bv_offset,
- PBLK_EXPOSED_PAGE_SIZE);
+ memcpy(dst_p + dst_bv.bv_offset,
+ src_p + src_bv.bv_offset,
+ PBLK_EXPOSED_PAGE_SIZE);
- kunmap_atomic(src_p);
- kunmap_atomic(dst_p);
+ kunmap_atomic(src_p);
+ kunmap_atomic(dst_p);
- mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
+ flush_dcache_page(dst_bv.bv_page);
+ mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
- hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
- } while (hole < nr_secs);
+ bio_advance_iter(new_bio, &new_iter,
+ PBLK_EXPOSED_PAGE_SIZE);
+ i++;
+ }
+ bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
+ }
bio_put(new_bio);
kfree(pr_ctx);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index d86e7a4ac04d..595542bfae85 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -41,6 +41,16 @@ config PL320_MBOX
Management Engine, primarily for cpufreq. Say Y here if you want
to use the PL320 IPCM support.
+config ARMADA_37XX_RWTM_MBOX
+ tristate "Armada 37xx rWTM BIU Mailbox"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF
+ help
+ Mailbox implementation for communication with the the firmware
+ running on the Cortex-M3 rWTM secure processor of the Armada 37xx
+ SOC. Say Y here if you are building for such a device (for example
+ the Turris Mox router).
+
config OMAP2PLUS_MBOX
tristate "OMAP2+ Mailbox framework support"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 8be3bcbcf882..c22fad6f696b 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_ARM_MHU) += arm_mhu.o
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
+obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
+
obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
new file mode 100644
index 000000000000..97f90e97a83c
--- /dev/null
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * rWTM BIU Mailbox driver for Armada 37xx
+ *
+ * Author: Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/armada-37xx-rwtm-mailbox.h>
+
+#define DRIVER_NAME "armada-37xx-rwtm-mailbox"
+
+/* relative to rWTM BIU Mailbox Registers */
+#define RWTM_MBOX_PARAM(i) (0x0 + ((i) << 2))
+#define RWTM_MBOX_COMMAND 0x40
+#define RWTM_MBOX_RETURN_STATUS 0x80
+#define RWTM_MBOX_STATUS(i) (0x84 + ((i) << 2))
+#define RWTM_MBOX_FIFO_STATUS 0xc4
+#define FIFO_STS_RDY 0x100
+#define FIFO_STS_CNTR_MASK 0x7
+#define FIFO_STS_CNTR_MAX 4
+
+#define RWTM_HOST_INT_RESET 0xc8
+#define RWTM_HOST_INT_MASK 0xcc
+#define SP_CMD_COMPLETE BIT(0)
+#define SP_CMD_QUEUE_FULL_ACCESS BIT(17)
+#define SP_CMD_QUEUE_FULL BIT(18)
+
+struct a37xx_mbox {
+ struct device *dev;
+ struct mbox_controller controller;
+ void __iomem *base;
+ int irq;
+};
+
+static void a37xx_mbox_receive(struct mbox_chan *chan)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ struct armada_37xx_rwtm_rx_msg rx_msg;
+ int i;
+
+ rx_msg.retval = readl(mbox->base + RWTM_MBOX_RETURN_STATUS);
+ for (i = 0; i < 16; ++i)
+ rx_msg.status[i] = readl(mbox->base + RWTM_MBOX_STATUS(i));
+
+ mbox_chan_received_data(chan, &rx_msg);
+}
+
+static irqreturn_t a37xx_mbox_irq_handler(int irq, void *data)
+{
+ struct mbox_chan *chan = data;
+ struct a37xx_mbox *mbox = chan->con_priv;
+ u32 reg;
+
+ reg = readl(mbox->base + RWTM_HOST_INT_RESET);
+
+ if (reg & SP_CMD_COMPLETE)
+ a37xx_mbox_receive(chan);
+
+ if (reg & (SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL))
+ dev_err(mbox->dev, "Secure processor command queue full\n");
+
+ writel(reg, mbox->base + RWTM_HOST_INT_RESET);
+ if (reg)
+ mbox_chan_txdone(chan, 0);
+
+ return reg ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int a37xx_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ struct armada_37xx_rwtm_tx_msg *msg = data;
+ int i;
+ u32 reg;
+
+ if (!data)
+ return -EINVAL;
+
+ reg = readl(mbox->base + RWTM_MBOX_FIFO_STATUS);
+ if (!(reg & FIFO_STS_RDY))
+ dev_warn(mbox->dev, "Secure processor not ready\n");
+
+ if ((reg & FIFO_STS_CNTR_MASK) >= FIFO_STS_CNTR_MAX) {
+ dev_err(mbox->dev, "Secure processor command queue full\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < 16; ++i)
+ writel(msg->args[i], mbox->base + RWTM_MBOX_PARAM(i));
+ writel(msg->command, mbox->base + RWTM_MBOX_COMMAND);
+
+ return 0;
+}
+
+static int a37xx_mbox_startup(struct mbox_chan *chan)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ u32 reg;
+ int ret;
+
+ ret = devm_request_irq(mbox->dev, mbox->irq, a37xx_mbox_irq_handler, 0,
+ DRIVER_NAME, chan);
+ if (ret < 0) {
+ dev_err(mbox->dev, "Cannot request irq\n");
+ return ret;
+ }
+
+ /* enable IRQ generation */
+ reg = readl(mbox->base + RWTM_HOST_INT_MASK);
+ reg &= ~(SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL);
+ writel(reg, mbox->base + RWTM_HOST_INT_MASK);
+
+ return 0;
+}
+
+static void a37xx_mbox_shutdown(struct mbox_chan *chan)
+{
+ u32 reg;
+ struct a37xx_mbox *mbox = chan->con_priv;
+
+ /* disable interrupt generation */
+ reg = readl(mbox->base + RWTM_HOST_INT_MASK);
+ reg |= SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL;
+ writel(reg, mbox->base + RWTM_HOST_INT_MASK);
+
+ devm_free_irq(mbox->dev, mbox->irq, chan);
+}
+
+static const struct mbox_chan_ops a37xx_mbox_ops = {
+ .send_data = a37xx_mbox_send_data,
+ .startup = a37xx_mbox_startup,
+ .shutdown = a37xx_mbox_shutdown,
+};
+
+static int armada_37xx_mbox_probe(struct platform_device *pdev)
+{
+ struct a37xx_mbox *mbox;
+ struct resource *regs;
+ struct mbox_chan *chans;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Allocated one channel */
+ chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mbox->base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mbox->base)) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return PTR_ERR(mbox->base);
+ }
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0) {
+ dev_err(&pdev->dev, "Cannot get irq\n");
+ return mbox->irq;
+ }
+
+ mbox->dev = &pdev->dev;
+
+ /* Hardware supports only one channel. */
+ chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = chans;
+ mbox->controller.ops = &a37xx_mbox_ops;
+ mbox->controller.txdone_irq = true;
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register mailbox controller\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ return ret;
+}
+
+static int armada_37xx_mbox_remove(struct platform_device *pdev)
+{
+ struct a37xx_mbox *mbox = platform_get_drvdata(pdev);
+
+ if (!mbox)
+ return -EINVAL;
+
+ mbox_controller_unregister(&mbox->controller);
+
+ return 0;
+}
+
+static const struct of_device_id armada_37xx_mbox_match[] = {
+ { .compatible = "marvell,armada-3700-rwtm-mailbox" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, armada_37xx_mbox_match);
+
+static struct platform_driver armada_37xx_mbox_driver = {
+ .probe = armada_37xx_mbox_probe,
+ .remove = armada_37xx_mbox_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = armada_37xx_mbox_match,
+ },
+};
+
+module_platform_driver(armada_37xx_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 85fc5b56f99b..25be8bb5e371 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -264,7 +264,6 @@ static int imx_mu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *iomem;
struct imx_mu_priv *priv;
unsigned int i;
int ret;
@@ -275,8 +274,7 @@ static int imx_mu_probe(struct platform_device *pdev)
priv->dev = dev;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, iomem);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 256f18b67e8a..08a0a3517138 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -382,7 +382,7 @@ static const struct mbox_chan_ops pcc_chan_ops = {
*
* This gets called for each entry in the PCC table.
*/
-static int parse_pcc_subspace(struct acpi_subtable_header *header,
+static int parse_pcc_subspace(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_pcct_subspace *ss = (struct acpi_pcct_subspace *) header;
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index 210fe504f5ae..f91dfb1327c7 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -8,9 +8,9 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
@@ -240,9 +240,11 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
/* irq */
for (i = 0; i < IPCC_IRQ_NUM; i++) {
- ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]);
+ ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
if (ipcc->irqs[i] < 0) {
- dev_err(dev, "no IRQ specified %s\n", irq_name[i]);
+ if (ipcc->irqs[i] != -EPROBE_DEFER)
+ dev_err(dev, "no IRQ specified %s\n",
+ irq_name[i]);
ret = ipcc->irqs[i];
goto err_clk;
}
@@ -263,9 +265,10 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
/* wakeup */
if (of_property_read_bool(np, "wakeup-source")) {
- ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup");
+ ipcc->wkp = platform_get_irq_byname(pdev, "wakeup");
if (ipcc->wkp < 0) {
- dev_err(dev, "could not get wakeup IRQ\n");
+ if (ipcc->wkp != -EPROBE_DEFER)
+ dev_err(dev, "could not get wakeup IRQ\n");
ret = ipcc->wkp;
goto err_clk;
}
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 5002838ea476..f8986effcb50 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -327,10 +327,11 @@ static int bch_allocator_thread(void *arg)
* possibly issue discards to them, then we add the bucket to
* the free list:
*/
- while (!fifo_empty(&ca->free_inc)) {
+ while (1) {
long bucket;
- fifo_pop(&ca->free_inc, bucket);
+ if (!fifo_pop(&ca->free_inc, bucket))
+ break;
if (ca->discard) {
mutex_unlock(&ca->set->bucket_lock);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 64def336f053..773f5fdad25f 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -429,14 +429,14 @@ static void do_btree_node_write(struct btree *b)
bset_sector_offset(&b->keys, i));
if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
- int j;
struct bio_vec *bv;
- void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
+ void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bv, b->bio, j, iter_all)
- memcpy(page_address(bv->bv_page),
- base + j * PAGE_SIZE, PAGE_SIZE);
+ bio_for_each_segment_all(bv, b->bio, iter_all) {
+ memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
+ addr += PAGE_SIZE;
+ }
bch_submit_bbio(b->bio, b->c, &k.key, 0);
@@ -1476,11 +1476,11 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
out_nocoalesce:
closure_sync(&cl);
- bch_keylist_free(&keylist);
while ((k = bch_keylist_pop(&keylist)))
if (!bkey_cmp(k, &ZERO_KEY))
atomic_dec(&b->c->prio_blocked);
+ bch_keylist_free(&keylist);
for (i = 0; i < nodes; i++)
if (!IS_ERR_OR_NULL(new_nodes[i])) {
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index b2fd412715b1..12dae9348147 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -147,7 +147,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
{
#define read_bucket(b) \
({ \
- int ret = journal_read_bucket(ca, list, b); \
+ ret = journal_read_bucket(ca, list, b); \
__set_bit(b, bitmap); \
if (ret < 0) \
return ret; \
@@ -156,6 +156,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
struct cache *ca;
unsigned int iter;
+ int ret = 0;
for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal;
@@ -267,7 +268,7 @@ bsearch:
struct journal_replay,
list)->j.seq;
- return 0;
+ return ret;
#undef read_bucket
}
@@ -317,6 +318,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
}
+static bool is_discard_enabled(struct cache_set *s)
+{
+ struct cache *ca;
+ unsigned int i;
+
+ for_each_cache(ca, s, i)
+ if (ca->discard)
+ return true;
+
+ return false;
+}
+
int bch_journal_replay(struct cache_set *s, struct list_head *list)
{
int ret = 0, keys = 0, entries = 0;
@@ -330,9 +343,17 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
- cache_set_err_on(n != i->j.seq, s,
-"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
- n, i->j.seq - 1, start, end);
+ if (n != i->j.seq) {
+ if (n == start && is_discard_enabled(s))
+ pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
+ n, i->j.seq - 1, start, end);
+ else {
+ pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+ n, i->j.seq - 1, start, end);
+ ret = -EIO;
+ goto err;
+ }
+ }
for (k = i->j.start;
k < bset_bkey_last(&i->j);
@@ -540,11 +561,11 @@ static void journal_reclaim(struct cache_set *c)
ca->sb.nr_this_dev);
}
- bkey_init(k);
- SET_KEY_PTRS(k, n);
-
- if (n)
+ if (n) {
+ bkey_init(k);
+ SET_KEY_PTRS(k, n);
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+ }
out:
if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait);
@@ -671,6 +692,9 @@ static void journal_write_unlocked(struct closure *cl)
ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
}
+ /* If KEY_PTRS(k) == 0, this jset gets lost in air */
+ BUG_ON(i == 0);
+
atomic_dec_bug(&fifo_back(&c->journal.pin));
bch_journal_next(&c->journal);
journal_reclaim(c);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index f101bfe8657a..41adcd1546f1 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -329,12 +329,13 @@ void bch_data_insert(struct closure *cl)
bch_data_insert_start(cl);
}
-/* Congested? */
-
-unsigned int bch_get_congested(struct cache_set *c)
+/*
+ * Congested? Return 0 (not congested) or the limit (in sectors)
+ * beyond which we should bypass the cache due to congestion.
+ */
+unsigned int bch_get_congested(const struct cache_set *c)
{
int i;
- long rand;
if (!c->congested_read_threshold_us &&
!c->congested_write_threshold_us)
@@ -353,8 +354,7 @@ unsigned int bch_get_congested(struct cache_set *c)
if (i > 0)
i = fract_exp_two(i, 6);
- rand = get_random_int();
- i -= bitmap_weight(&rand, BITS_PER_LONG);
+ i -= hweight32(get_random_u32());
return i > 0 ? i : 1;
}
@@ -376,7 +376,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{
struct cache_set *c = dc->disk.c;
unsigned int mode = cache_mode(dc);
- unsigned int sectors, congested = bch_get_congested(c);
+ unsigned int sectors, congested;
struct task_struct *task = current;
struct io *i;
@@ -412,6 +412,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto rescale;
}
+ congested = bch_get_congested(c);
if (!congested && !dc->sequential_cutoff)
goto rescale;
@@ -706,14 +707,14 @@ static void search_free(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
- atomic_dec(&s->d->c->search_inflight);
+ atomic_dec(&s->iop.c->search_inflight);
if (s->iop.bio)
bio_put(s->iop.bio);
bio_complete(s);
closure_debug_destroy(cl);
- mempool_free(s, &s->d->c->search);
+ mempool_free(s, &s->iop.c->search);
}
static inline struct search *search_alloc(struct bio *bio,
@@ -756,13 +757,13 @@ static void cached_dev_bio_complete(struct closure *cl)
struct search *s = container_of(cl, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- search_free(cl);
cached_dev_put(dc);
+ search_free(cl);
}
/* Process reads */
-static void cached_dev_cache_miss_done(struct closure *cl)
+static void cached_dev_read_error_done(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
@@ -800,7 +801,22 @@ static void cached_dev_read_error(struct closure *cl)
closure_bio_submit(s->iop.c, bio, cl);
}
- continue_at(cl, cached_dev_cache_miss_done, NULL);
+ continue_at(cl, cached_dev_read_error_done, NULL);
+}
+
+static void cached_dev_cache_miss_done(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct bcache_device *d = s->d;
+
+ if (s->iop.replace_collision)
+ bch_mark_cache_miss_collision(s->iop.c, s->d);
+
+ if (s->iop.bio)
+ bio_free_pages(s->iop.bio);
+
+ cached_dev_bio_complete(cl);
+ closure_put(&d->cl);
}
static void cached_dev_read_done(struct closure *cl)
@@ -833,6 +849,7 @@ static void cached_dev_read_done(struct closure *cl)
if (verify(dc) && s->recoverable && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio);
+ closure_get(&dc->disk.cl);
bio_complete(s);
if (s->iop.bio &&
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 721bf336ed1a..c64dbd7a91aa 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -33,7 +33,7 @@ struct data_insert_op {
BKEY_PADDED(replace_key);
};
-unsigned int bch_get_congested(struct cache_set *c);
+unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a697a3a923cd..1b63ac876169 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -662,6 +662,11 @@ static const struct block_device_operations bcache_ops = {
void bcache_device_stop(struct bcache_device *d)
{
if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
+ /*
+ * closure_fn set to
+ * - cached device: cached_dev_flush()
+ * - flash dev: flash_dev_flush()
+ */
closure_queue(&d->cl);
}
@@ -906,21 +911,18 @@ static int cached_dev_status_update(void *arg)
void bch_cached_dev_run(struct cached_dev *dc)
{
struct bcache_device *d = &dc->disk;
- char buf[SB_LABEL_SIZE + 1];
+ char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL);
char *env[] = {
"DRIVER=bcache",
kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
- NULL,
+ kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""),
NULL,
};
- memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
- buf[SB_LABEL_SIZE] = '\0';
- env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
-
if (atomic_xchg(&dc->running, 1)) {
kfree(env[1]);
kfree(env[2]);
+ kfree(buf);
return;
}
@@ -944,6 +946,7 @@ void bch_cached_dev_run(struct cached_dev *dc)
kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
kfree(env[1]);
kfree(env[2]);
+ kfree(buf);
if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
@@ -1174,6 +1177,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return 0;
}
+/* when dc->disk.kobj released */
void bch_cached_dev_release(struct kobject *kobj)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
@@ -1280,7 +1284,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
-static void register_bdev(struct cache_sb *sb, struct page *sb_page,
+static int register_bdev(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev,
struct cached_dev *dc)
{
@@ -1318,14 +1322,16 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
bch_cached_dev_run(dc);
- return;
+ return 0;
err:
pr_notice("error %s: %s", dc->backing_dev_name, err);
bcache_device_stop(&dc->disk);
+ return -EIO;
}
/* Flash only volumes */
+/* When d->kobj released */
void bch_flash_dev_release(struct kobject *kobj)
{
struct bcache_device *d = container_of(kobj, struct bcache_device,
@@ -1496,6 +1502,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
return true;
}
+/* When c->kobj released */
void bch_cache_set_release(struct kobject *kobj)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
@@ -1516,6 +1523,7 @@ static void cache_set_free(struct closure *cl)
bch_btree_cache_free(c);
bch_journal_free(c);
+ mutex_lock(&bch_register_lock);
for_each_cache(ca, c, i)
if (ca) {
ca->set = NULL;
@@ -1534,7 +1542,6 @@ static void cache_set_free(struct closure *cl)
mempool_exit(&c->search);
kfree(c->devices);
- mutex_lock(&bch_register_lock);
list_del(&c->list);
mutex_unlock(&bch_register_lock);
@@ -1673,6 +1680,7 @@ static void __cache_set_unregister(struct closure *cl)
void bch_cache_set_stop(struct cache_set *c)
{
if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
+ /* closure_fn set to __cache_set_unregister() */
closure_queue(&c->caching);
}
@@ -1775,13 +1783,15 @@ err:
return NULL;
}
-static void run_cache_set(struct cache_set *c)
+static int run_cache_set(struct cache_set *c)
{
const char *err = "cannot allocate memory";
struct cached_dev *dc, *t;
struct cache *ca;
struct closure cl;
unsigned int i;
+ LIST_HEAD(journal);
+ struct journal_replay *l;
closure_init_stack(&cl);
@@ -1790,7 +1800,6 @@ static void run_cache_set(struct cache_set *c)
set_gc_sectors(c);
if (CACHE_SYNC(&c->sb)) {
- LIST_HEAD(journal);
struct bkey *k;
struct jset *j;
@@ -1869,7 +1878,9 @@ static void run_cache_set(struct cache_set *c)
if (j->version < BCACHE_JSET_VERSION_UUID)
__uuid_write(c);
- bch_journal_replay(c, &journal);
+ err = "bcache: replay journal failed";
+ if (bch_journal_replay(c, &journal))
+ goto err;
} else {
pr_notice("invalidating existing data");
@@ -1937,11 +1948,19 @@ static void run_cache_set(struct cache_set *c)
flash_devs_run(c);
set_bit(CACHE_SET_RUNNING, &c->flags);
- return;
+ return 0;
err:
+ while (!list_empty(&journal)) {
+ l = list_first_entry(&journal, struct journal_replay, list);
+ list_del(&l->list);
+ kfree(l);
+ }
+
closure_sync(&cl);
/* XXX: test this, it's broken */
bch_cache_set_error(c, "%s", err);
+
+ return -EIO;
}
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
@@ -2005,8 +2024,11 @@ found:
ca->set->cache[ca->sb.nr_this_dev] = ca;
c->cache_by_alloc[c->caches_loaded++] = ca;
- if (c->caches_loaded == c->sb.nr_in_set)
- run_cache_set(c);
+ if (c->caches_loaded == c->sb.nr_in_set) {
+ err = "failed to run cache set";
+ if (run_cache_set(c) < 0)
+ goto err;
+ }
return NULL;
err:
@@ -2016,6 +2038,7 @@ err:
/* Cache device */
+/* When ca->kobj released */
void bch_cache_release(struct kobject *kobj)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
@@ -2179,6 +2202,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ret = cache_alloc(ca);
if (ret != 0) {
+ /*
+ * If we failed here, it means ca->kobj is not initialized yet,
+ * kobject_put() won't be called and there is no chance to
+ * call blkdev_put() to bdev in bch_cache_release(). So we
+ * explicitly call blkdev_put() here.
+ */
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
@@ -2262,7 +2291,7 @@ static bool bch_is_open(struct block_device *bdev)
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
- ssize_t ret = size;
+ ssize_t ret = -EINVAL;
const char *err = "cannot allocate memory";
char *path = NULL;
struct cache_sb *sb = NULL;
@@ -2296,7 +2325,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
- goto out;
+ goto quiet_out;
}
goto err;
}
@@ -2317,17 +2346,23 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
goto err_close;
mutex_lock(&bch_register_lock);
- register_bdev(sb, sb_page, bdev, dc);
+ ret = register_bdev(sb, sb_page, bdev, dc);
mutex_unlock(&bch_register_lock);
+ /* blkdev_put() will be called in cached_dev_free() */
+ if (ret < 0)
+ goto err;
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
goto err_close;
+ /* blkdev_put() will be called in bch_cache_release() */
if (register_cache(sb, sb_page, bdev, ca) != 0)
goto err;
}
+quiet_out:
+ ret = size;
out:
if (sb_page)
put_page(sb_page);
@@ -2340,7 +2375,6 @@ err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
pr_info("error %s: %s", path, err);
- ret = -EINVAL;
goto out;
}
@@ -2370,10 +2404,19 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
bcache_device_stop(&dc->disk);
+ mutex_unlock(&bch_register_lock);
+
+ /*
+ * Give an early chance for other kthreads and
+ * kworkers to stop themselves
+ */
+ schedule();
+
/* What's a condition variable? */
while (1) {
- long timeout = start + 2 * HZ - jiffies;
+ long timeout = start + 10 * HZ - jiffies;
+ mutex_lock(&bch_register_lock);
stopped = list_empty(&bch_cache_sets) &&
list_empty(&uncached_devices);
@@ -2385,7 +2428,6 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
mutex_unlock(&bch_register_lock);
schedule_timeout(timeout);
- mutex_lock(&bch_register_lock);
}
finish_wait(&unregister_wait, &wait);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 17bae9c14ca0..6cd44d3cf906 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -996,8 +996,6 @@ SHOW(__bch_cache)
!cached[n - 1])
--n;
- unused = ca->sb.nbuckets - n;
-
while (cached < p + n &&
*cached == BTREE_PRIO)
cached++, n--;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 00aab6abcfe4..1fbced94e4cc 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -560,17 +560,29 @@ static inline uint64_t bch_crc64_update(uint64_t crc,
return crc;
}
-/* Does linear interpolation between powers of two */
+/*
+ * A stepwise-linear pseudo-exponential. This returns 1 << (x >>
+ * frac_bits), with the less-significant bits filled in by linear
+ * interpolation.
+ *
+ * This can also be interpreted as a floating-point number format,
+ * where the low frac_bits are the mantissa (with implicit leading
+ * 1 bit), and the more significant bits are the exponent.
+ * The return value is 1.mantissa * 2^exponent.
+ *
+ * The way this is used, fract_bits is 6 and the largest possible
+ * input is CONGESTED_MAX-1 = 1023 (exponent 16, mantissa 0x1.fc),
+ * so the maximum output is 0x1fc00.
+ */
static inline unsigned int fract_exp_two(unsigned int x,
unsigned int fract_bits)
{
- unsigned int fract = x & ~(~0 << fract_bits);
-
- x >>= fract_bits;
- x = 1 << x;
- x += (x * fract) >> fract_bits;
+ unsigned int mantissa = 1 << fract_bits; /* Implicit bit */
- return x;
+ mantissa += x & (mantissa - 1);
+ x >>= fract_bits; /* The exponent */
+ /* Largest intermediate value 0x7f0000 */
+ return mantissa << x >> fract_bits;
}
void bch_bio_map(struct bio *bio, void *base);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 1ecef76225a1..2a48ea3f1b30 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -150,7 +150,7 @@ struct dm_buffer {
void (*end_io)(struct dm_buffer *, blk_status_t);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
#define MAX_STACK 10
- struct stack_trace stack_trace;
+ unsigned int stack_len;
unsigned long stack_entries[MAX_STACK];
#endif
};
@@ -232,11 +232,7 @@ static DEFINE_MUTEX(dm_bufio_clients_lock);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
static void buffer_record_stack(struct dm_buffer *b)
{
- b->stack_trace.nr_entries = 0;
- b->stack_trace.max_entries = MAX_STACK;
- b->stack_trace.entries = b->stack_entries;
- b->stack_trace.skip = 2;
- save_stack_trace(&b->stack_trace);
+ b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
}
#endif
@@ -438,7 +434,7 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
adjust_total_allocated(b->data_mode, (long)c->block_size);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
- memset(&b->stack_trace, 0, sizeof(b->stack_trace));
+ b->stack_len = 0;
#endif
return b;
}
@@ -1520,8 +1516,9 @@ static void drop_buffers(struct dm_bufio_client *c)
DMERR("leaked buffer %llx, hold count %u, list %d",
(unsigned long long)b->block, b->hold_count, i);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
- print_stack_trace(&b->stack_trace, 1);
- b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
+ stack_trace_print(b->stack_entries, b->stack_len, 1);
+ /* mark unclaimed to avoid BUG_ON below */
+ b->hold_count = 0;
#endif
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 95c6d86ab5e8..c4ef1fceead6 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -115,6 +115,7 @@ struct mapped_device {
struct srcu_struct io_barrier;
};
+void disable_discard(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index dd6565798778..7f6462f74ac8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -332,7 +332,6 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
int err;
desc->tfm = essiv->hash_tfm;
- desc->flags = 0;
err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
shash_desc_zero(desc);
@@ -606,7 +605,6 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
int i, r;
desc->tfm = lmk->hash_tfm;
- desc->flags = 0;
r = crypto_shash_init(desc);
if (r)
@@ -768,7 +766,6 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
/* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm;
- desc->flags = 0;
for (i = 0; i < 4; i++) {
r = crypto_shash_init(desc);
if (r)
@@ -1445,11 +1442,10 @@ out:
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
{
- unsigned int i;
struct bio_vec *bv;
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bv, clone, i, iter_all) {
+ bio_for_each_segment_all(bv, clone, iter_all) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, &cc->page_pool);
}
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 12b5216c2cfe..721efc493942 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -135,9 +135,8 @@ struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
/*
* Funtions to manipulate consecutive chunks
*/
-# if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64)
-# define DM_CHUNK_CONSECUTIVE_BITS 8
-# define DM_CHUNK_NUMBER_BITS 56
+#define DM_CHUNK_CONSECUTIVE_BITS 8
+#define DM_CHUNK_NUMBER_BITS 56
static inline chunk_t dm_chunk_number(chunk_t chunk)
{
@@ -163,29 +162,6 @@ static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
}
-# else
-# define DM_CHUNK_CONSECUTIVE_BITS 0
-
-static inline chunk_t dm_chunk_number(chunk_t chunk)
-{
- return chunk;
-}
-
-static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
-{
- return 0;
-}
-
-static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
-{
-}
-
-static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
-{
-}
-
-# endif
-
/*
* Return the number of sectors in the device.
*/
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index b53f30f16b4d..4b76f84424c3 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -36,7 +36,7 @@ struct dm_device {
struct list_head list;
};
-const char *dm_allowed_targets[] __initconst = {
+const char * const dm_allowed_targets[] __initconst = {
"crypt",
"delay",
"linear",
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index d57d997a52c8..c27c32cf4a30 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -88,14 +88,10 @@ struct journal_entry {
#if BITS_PER_LONG == 64
#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
-#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
-#elif defined(CONFIG_LBDAF)
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
-#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#else
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
-#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
#endif
+#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
@@ -532,7 +528,6 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
unsigned j, size;
desc->tfm = ic->journal_mac;
- desc->flags = 0;
r = crypto_shash_init(desc);
if (unlikely(r)) {
@@ -913,7 +908,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
{
return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
- range2->logical_sector + range2->n_sectors > range2->logical_sector;
+ range1->logical_sector + range1->n_sectors > range2->logical_sector;
}
static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
@@ -959,8 +954,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
struct dm_integrity_range *last_range =
list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
struct task_struct *last_range_task;
- if (!ranges_overlap(range, last_range))
- break;
last_range_task = last_range->task;
list_del(&last_range->wait_entry);
if (!add_new_range(ic, last_range, false)) {
@@ -1278,7 +1271,6 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
unsigned digest_size;
req->tfm = ic->internal_hash;
- req->flags = 0;
r = crypto_shash_init(req);
if (unlikely(r < 0)) {
@@ -3185,7 +3177,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
journal_watermark = val;
else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
sync_msec = val;
- else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
+ else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
if (ic->meta_dev) {
dm_put_device(ti, ic->meta_dev);
ic->meta_dev = NULL;
@@ -3204,17 +3196,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->sectors_per_block = val >> SECTOR_SHIFT;
- } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+ } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
"Invalid internal_hash argument");
if (r)
goto bad;
- } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
+ } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
"Invalid journal_crypt argument");
if (r)
goto bad;
- } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
+ } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
"Invalid journal_mac argument");
if (r)
@@ -3616,7 +3608,7 @@ static struct target_type integrity_target = {
.io_hints = dm_integrity_io_hints,
};
-int __init dm_integrity_init(void)
+static int __init dm_integrity_init(void)
{
int r;
@@ -3635,7 +3627,7 @@ int __init dm_integrity_init(void)
return r;
}
-void dm_integrity_exit(void)
+static void __exit dm_integrity_exit(void)
{
dm_unregister_target(&integrity_target);
kmem_cache_destroy(journal_io_cache);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 09773636602d..b66745bd08bb 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
}
if (unlikely(error == BLK_STS_TARGET)) {
- if (req_op(clone) == REQ_OP_WRITE_SAME &&
- !clone->q->limits.max_write_same_sectors)
+ if (req_op(clone) == REQ_OP_DISCARD &&
+ !clone->q->limits.max_discard_sectors)
+ disable_discard(tio->md);
+ else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+ !clone->q->limits.max_write_same_sectors)
disable_write_same(tio->md);
- if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
- !clone->q->limits.max_write_zeroes_sectors)
+ else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+ !clone->q->limits.max_write_zeroes_sectors)
disable_write_zeroes(tio->md);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ba9481f1bf3c..cde3b49b2a91 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}
+static int device_requires_stable_pages(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+}
+
+/*
+ * If any underlying device requires stable pages, a table must require
+ * them as well. Only targets that support iterate_devices are considered:
+ * don't want error, zero, etc to require stable pages.
+ */
+static bool dm_table_requires_stable_pages(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
+ return true;
+ }
+
+ return false;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_table_verify_integrity(t);
/*
+ * Some devices don't use blk_integrity but still want stable pages
+ * because they do their own checksumming.
+ */
+ if (dm_table_requires_stable_pages(t))
+ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ else
+ q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+
+ /*
* Determine whether or not this queue's I/O timings contribute
* to the entropy pool, Only request-based targets use this.
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 68d24056d0b1..043f0761e4a0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
}
}
+void disable_discard(struct mapped_device *md)
+{
+ struct queue_limits *limits = dm_get_queue_limits(md);
+
+ /* device doesn't really support DISCARD, disable it */
+ limits->max_discard_sectors = 0;
+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
void disable_write_same(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
@@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio)
dm_endio_fn endio = tio->ti->type->end_io;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
- if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ if (bio_op(bio) == REQ_OP_DISCARD &&
+ !bio->bi_disk->queue->limits.max_discard_sectors)
+ disable_discard(md);
+ else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+ !bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
- if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
@@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
return -EINVAL;
}
- /*
- * BIO based queue uses its own splitting. When multipage bvecs
- * is switched on, size of the incoming bio may be too big to
- * be handled in some targets, such as crypt.
- *
- * When these targets are ready for the big bio, we can remove
- * the limit.
- */
- ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
+ ti->max_io_len = (uint32_t) len;
return 0;
}
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 1cd4f991792c..3a62a46b75c7 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -490,10 +490,10 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %d\n", le32_to_cpu(sb->version));
pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
- le32_to_cpu(*(__u32 *)(sb->uuid+0)),
- le32_to_cpu(*(__u32 *)(sb->uuid+4)),
- le32_to_cpu(*(__u32 *)(sb->uuid+8)),
- le32_to_cpu(*(__u32 *)(sb->uuid+12)));
+ le32_to_cpu(*(__le32 *)(sb->uuid+0)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+4)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+8)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+12)));
pr_debug(" events: %llu\n",
(unsigned long long) le64_to_cpu(sb->events));
pr_debug("events cleared: %llu\n",
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 05ffffb8b769..45ffa23fa85d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -88,8 +88,7 @@ static struct kobj_type md_ktype;
struct md_cluster_operations *md_cluster_ops;
EXPORT_SYMBOL(md_cluster_ops);
-struct module *md_cluster_mod;
-EXPORT_SYMBOL(md_cluster_mod);
+static struct module *md_cluster_mod;
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
@@ -132,24 +131,6 @@ static inline int speed_max(struct mddev *mddev)
mddev->sync_speed_max : sysctl_speed_limit_max;
}
-static void * flush_info_alloc(gfp_t gfp_flags, void *data)
-{
- return kzalloc(sizeof(struct flush_info), gfp_flags);
-}
-static void flush_info_free(void *flush_info, void *data)
-{
- kfree(flush_info);
-}
-
-static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
-{
- return kzalloc(sizeof(struct flush_bio), gfp_flags);
-}
-static void flush_bio_free(void *flush_bio, void *data)
-{
- kfree(flush_bio);
-}
-
static struct ctl_table_header *raid_table_header;
static struct ctl_table raid_table[] = {
@@ -423,54 +404,31 @@ static int md_congested(void *data, int bits)
/*
* Generic flush handling for md
*/
-static void submit_flushes(struct work_struct *ws)
-{
- struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
- struct mddev *mddev = fi->mddev;
- struct bio *bio = fi->bio;
-
- bio->bi_opf &= ~REQ_PREFLUSH;
- md_handle_request(mddev, bio);
-
- mempool_free(fi, mddev->flush_pool);
-}
-static void md_end_flush(struct bio *fbio)
+static void md_end_flush(struct bio *bio)
{
- struct flush_bio *fb = fbio->bi_private;
- struct md_rdev *rdev = fb->rdev;
- struct flush_info *fi = fb->fi;
- struct bio *bio = fi->bio;
- struct mddev *mddev = fi->mddev;
+ struct md_rdev *rdev = bio->bi_private;
+ struct mddev *mddev = rdev->mddev;
rdev_dec_pending(rdev, mddev);
- if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0) {
- /* an empty barrier - all done */
- bio_endio(bio);
- mempool_free(fi, mddev->flush_pool);
- } else {
- INIT_WORK(&fi->flush_work, submit_flushes);
- queue_work(md_wq, &fi->flush_work);
- }
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pre-request flush has finished */
+ queue_work(md_wq, &mddev->flush_work);
}
-
- mempool_free(fb, mddev->flush_bio_pool);
- bio_put(fbio);
+ bio_put(bio);
}
-void md_flush_request(struct mddev *mddev, struct bio *bio)
+static void md_submit_flush_data(struct work_struct *ws);
+
+static void submit_flushes(struct work_struct *ws)
{
+ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct md_rdev *rdev;
- struct flush_info *fi;
-
- fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
-
- fi->bio = bio;
- fi->mddev = mddev;
- atomic_set(&fi->flush_pending, 1);
+ mddev->start_flush = ktime_get_boottime();
+ INIT_WORK(&mddev->flush_work, md_submit_flush_data);
+ atomic_set(&mddev->flush_pending, 1);
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
@@ -480,37 +438,74 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
* we reclaim rcu_read_lock
*/
struct bio *bi;
- struct flush_bio *fb;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
-
- fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
- fb->fi = fi;
- fb->rdev = rdev;
-
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
- bio_set_dev(bi, rdev->bdev);
bi->bi_end_io = md_end_flush;
- bi->bi_private = fb;
+ bi->bi_private = rdev;
+ bio_set_dev(bi, rdev->bdev);
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- atomic_inc(&fi->flush_pending);
+ atomic_inc(&mddev->flush_pending);
submit_bio(bi);
-
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
+ if (atomic_dec_and_test(&mddev->flush_pending))
+ queue_work(md_wq, &mddev->flush_work);
+}
+
+static void md_submit_flush_data(struct work_struct *ws)
+{
+ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
+ struct bio *bio = mddev->flush_bio;
+
+ /*
+ * must reset flush_bio before calling into md_handle_request to avoid a
+ * deadlock, because other bios passed md_handle_request suspend check
+ * could wait for this and below md_handle_request could wait for those
+ * bios because of suspend check
+ */
+ mddev->last_flush = mddev->start_flush;
+ mddev->flush_bio = NULL;
+ wake_up(&mddev->sb_wait);
+
+ if (bio->bi_iter.bi_size == 0) {
+ /* an empty barrier - all done */
+ bio_endio(bio);
+ } else {
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ md_handle_request(mddev, bio);
+ }
+}
- if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0) {
+void md_flush_request(struct mddev *mddev, struct bio *bio)
+{
+ ktime_t start = ktime_get_boottime();
+ spin_lock_irq(&mddev->lock);
+ wait_event_lock_irq(mddev->sb_wait,
+ !mddev->flush_bio ||
+ ktime_after(mddev->last_flush, start),
+ mddev->lock);
+ if (!ktime_after(mddev->last_flush, start)) {
+ WARN_ON(mddev->flush_bio);
+ mddev->flush_bio = bio;
+ bio = NULL;
+ }
+ spin_unlock_irq(&mddev->lock);
+
+ if (!bio) {
+ INIT_WORK(&mddev->flush_work, submit_flushes);
+ queue_work(md_wq, &mddev->flush_work);
+ } else {
+ /* flush was performed for some other bio while we waited. */
+ if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio);
- mempool_free(fi, mddev->flush_pool);
- } else {
- INIT_WORK(&fi->flush_work, submit_flushes);
- queue_work(md_wq, &fi->flush_work);
+ else {
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ mddev->pers->make_request(mddev, bio);
}
}
}
@@ -560,6 +555,7 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
spin_lock_init(&mddev->lock);
+ atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
@@ -1109,8 +1105,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
* (not needed for Linear and RAID0 as metadata doesn't
* record this size)
*/
- if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
- sb->level >= 1)
+ if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
rdev->sectors = (sector_t)(2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
@@ -1408,8 +1403,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
- if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
- rdev->mddev->level >= 1)
+ if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
do {
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -1553,7 +1547,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
*/
s32 offset;
sector_t bb_sector;
- u64 *bbp;
+ __le64 *bbp;
int i;
int sectors = le16_to_cpu(sb->bblog_size);
if (sectors > (PAGE_SIZE / 512))
@@ -1565,7 +1559,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
if (!sync_page_io(rdev, bb_sector, sectors << 9,
rdev->bb_page, REQ_OP_READ, 0, true))
return -EIO;
- bbp = (u64 *)page_address(rdev->bb_page);
+ bbp = (__le64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
u64 bb = le64_to_cpu(*bbp);
@@ -1877,7 +1871,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
md_error(mddev, rdev);
else {
struct badblocks *bb = &rdev->badblocks;
- u64 *bbp = (u64 *)page_address(rdev->bb_page);
+ __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
u64 *p = bb->page;
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
@@ -2855,8 +2849,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
- rdev->saved_raid_disk >= 0) {
+ if (!rdev->mddev->pers)
+ err = -EINVAL;
+ else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
+ rdev->saved_raid_disk >= 0) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
@@ -3384,10 +3380,10 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- rv = mddev ? mddev_lock(mddev): -EBUSY;
+ rv = mddev ? mddev_lock(mddev) : -ENODEV;
if (!rv) {
if (rdev->mddev == NULL)
- rv = -EBUSY;
+ rv = -ENODEV;
else
rv = entry->store(rdev, page, length);
mddev_unlock(mddev);
@@ -5511,22 +5507,6 @@ int md_run(struct mddev *mddev)
if (err)
return err;
}
- if (mddev->flush_pool == NULL) {
- mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
- flush_info_free, mddev);
- if (!mddev->flush_pool) {
- err = -ENOMEM;
- goto abort;
- }
- }
- if (mddev->flush_bio_pool == NULL) {
- mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
- flush_bio_free, mddev);
- if (!mddev->flush_bio_pool) {
- err = -ENOMEM;
- goto abort;
- }
- }
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -5686,11 +5666,8 @@ int md_run(struct mddev *mddev)
return 0;
abort:
- mempool_destroy(mddev->flush_bio_pool);
- mddev->flush_bio_pool = NULL;
- mempool_destroy(mddev->flush_pool);
- mddev->flush_pool = NULL;
-
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
return err;
}
EXPORT_SYMBOL_GPL(md_run);
@@ -5894,14 +5871,6 @@ static void __md_stop(struct mddev *mddev)
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (mddev->flush_bio_pool) {
- mempool_destroy(mddev->flush_bio_pool);
- mddev->flush_bio_pool = NULL;
- }
- if (mddev->flush_pool) {
- mempool_destroy(mddev->flush_pool);
- mddev->flush_pool = NULL;
- }
}
void md_stop(struct mddev *mddev)
@@ -9257,7 +9226,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
* reshape is happening in the remote node, we need to
* update reshape_position and call start_reshape.
*/
- mddev->reshape_position = sb->reshape_position;
+ mddev->reshape_position = le64_to_cpu(sb->reshape_position);
if (mddev->pers->update_reshape_pos)
mddev->pers->update_reshape_pos(mddev);
if (mddev->pers->start_reshape)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index c52afb52c776..257cb4c9e22b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -252,19 +252,6 @@ enum mddev_sb_flags {
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
-#define NR_FLUSH_INFOS 8
-#define NR_FLUSH_BIOS 64
-struct flush_info {
- struct bio *bio;
- struct mddev *mddev;
- struct work_struct flush_work;
- atomic_t flush_pending;
-};
-struct flush_bio {
- struct flush_info *fi;
- struct md_rdev *rdev;
-};
-
struct mddev {
void *private;
struct md_personality *pers;
@@ -470,8 +457,16 @@ struct mddev {
* metadata and bitmap writes
*/
- mempool_t *flush_pool;
- mempool_t *flush_bio_pool;
+ /* Generic flush handling.
+ * The last to finish preflush schedules a worker to submit
+ * the rest of the request (without the REQ_PREFLUSH flag).
+ */
+ struct bio *flush_bio;
+ atomic_t flush_pending;
+ ktime_t start_flush, last_flush; /* last_flush is when the last completed
+ * flush was started.
+ */
+ struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 3972232b8037..749ec268d957 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -35,7 +35,10 @@
#define MAX_HOLDERS 4
#define MAX_STACK 10
-typedef unsigned long stack_entries[MAX_STACK];
+struct stack_store {
+ unsigned int nr_entries;
+ unsigned long entries[MAX_STACK];
+};
struct block_lock {
spinlock_t lock;
@@ -44,8 +47,7 @@ struct block_lock {
struct task_struct *holders[MAX_HOLDERS];
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
- struct stack_trace traces[MAX_HOLDERS];
- stack_entries entries[MAX_HOLDERS];
+ struct stack_store traces[MAX_HOLDERS];
#endif
};
@@ -73,7 +75,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
{
unsigned h = __find_holder(lock, NULL);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
- struct stack_trace *t;
+ struct stack_store *t;
#endif
get_task_struct(task);
@@ -81,11 +83,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
t = lock->traces + h;
- t->nr_entries = 0;
- t->max_entries = MAX_STACK;
- t->entries = lock->entries[h];
- t->skip = 2;
- save_stack_trace(t);
+ t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2);
#endif
}
@@ -106,7 +104,8 @@ static int __check_holder(struct block_lock *lock)
DMERR("recursive lock detected in metadata");
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
DMERR("previously held here:");
- print_stack_trace(lock->traces + i, 4);
+ stack_trace_print(lock->traces[i].entries,
+ lock->traces[i].nr_entries, 4);
DMERR("subsequent acquisition attempted here:");
dump_stack();
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fdf451aac369..0c8a098d220e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2110,7 +2110,7 @@ static void process_checks(struct r1bio *r1_bio)
}
r1_bio->read_disk = primary;
for (i = 0; i < conf->raid_disks * 2; i++) {
- int j;
+ int j = 0;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
blk_status_t status = sbio->bi_status;
@@ -2125,8 +2125,8 @@ static void process_checks(struct r1bio *r1_bio)
/* Now we can 'fixup' the error value */
sbio->bi_status = 0;
- bio_for_each_segment_all(bi, sbio, j, iter_all)
- page_len[j] = bi->bv_len;
+ bio_for_each_segment_all(bi, sbio, iter_all)
+ page_len[j++] = bi->bv_len;
if (!status) {
for (j = vcnt; j-- ; ) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c033bfcb209e..7fde645d2e90 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -711,6 +711,8 @@ static bool is_full_stripe_write(struct stripe_head *sh)
}
static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+ __acquires(&sh1->stripe_lock)
+ __acquires(&sh2->stripe_lock)
{
if (sh1 > sh2) {
spin_lock_irq(&sh2->stripe_lock);
@@ -722,6 +724,8 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
}
static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+ __releases(&sh1->stripe_lock)
+ __releases(&sh2->stripe_lock)
{
spin_unlock(&sh1->stripe_lock);
spin_unlock_irq(&sh2->stripe_lock);
@@ -4187,7 +4191,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
/* now write out any block on a failed drive,
* or P or Q if they were recomputed
*/
- BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
+ dev = NULL;
if (s->failed == 2) {
dev = &sh->dev[s->failed_num[1]];
s->locked++;
@@ -4212,6 +4216,14 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
+ if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
+ "%s: disk%td not up to date\n",
+ mdname(conf->mddev),
+ dev - (struct r5dev *) &sh->dev)) {
+ clear_bit(R5_LOCKED, &dev->flags);
+ clear_bit(R5_Wantwrite, &dev->flags);
+ s->locked--;
+ }
clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
@@ -6166,6 +6178,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
static int handle_active_stripes(struct r5conf *conf, int group,
struct r5worker *worker,
struct list_head *temp_inactive_list)
+ __releases(&conf->device_lock)
+ __acquires(&conf->device_lock)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
int i, batch_size = 0, hash;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 102eb35fcf3f..8efaf99243e0 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -31,14 +31,14 @@ comment "Multimedia core support"
#
config MEDIA_CAMERA_SUPPORT
bool "Cameras/video grabbers support"
- ---help---
+ help
Enable support for webcams and video grabbers.
Say Y when you have a webcam or a video capture grabber board.
config MEDIA_ANALOG_TV_SUPPORT
bool "Analog TV support"
- ---help---
+ help
Enable analog TV support.
Say Y when you have a TV board with analog support or with a
@@ -50,7 +50,7 @@ config MEDIA_ANALOG_TV_SUPPORT
config MEDIA_DIGITAL_TV_SUPPORT
bool "Digital TV support"
- ---help---
+ help
Enable digital TV support.
Say Y when you have a board with digital support or a board with
@@ -58,7 +58,7 @@ config MEDIA_DIGITAL_TV_SUPPORT
config MEDIA_RADIO_SUPPORT
bool "AM/FM radio receivers/transmitters support"
- ---help---
+ help
Enable AM/FM radio support.
Additional info and docs are available on the web at
@@ -72,14 +72,14 @@ config MEDIA_RADIO_SUPPORT
config MEDIA_SDR_SUPPORT
bool "Software defined radio support"
- ---help---
+ help
Enable software defined radio support.
Say Y when you have a software defined radio device.
config MEDIA_CEC_SUPPORT
bool "HDMI CEC support"
- ---help---
+ help
Enable support for HDMI CEC (Consumer Electronics Control),
which is an optional HDMI feature.
@@ -96,7 +96,7 @@ source "drivers/media/cec/Kconfig"
config MEDIA_CONTROLLER
bool "Media Controller API"
depends on MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT
- ---help---
+ help
Enable the media controller API used to query media devices internal
topology and configure it dynamically.
@@ -105,7 +105,7 @@ config MEDIA_CONTROLLER
config MEDIA_CONTROLLER_DVB
bool "Enable Media controller for DVB (EXPERIMENTAL)"
depends on MEDIA_CONTROLLER && DVB_CORE
- ---help---
+ help
Enable the media controller API support for DVB.
This is currently experimental.
@@ -114,7 +114,7 @@ config MEDIA_CONTROLLER_REQUEST_API
bool "Enable Media controller Request API (EXPERIMENTAL)"
depends on MEDIA_CONTROLLER && STAGING_MEDIA
default n
- ---help---
+ help
DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
This option enables the Request API for the Media controller and V4L2
@@ -137,7 +137,7 @@ config VIDEO_DEV
config VIDEO_V4L2_SUBDEV_API
bool "V4L2 sub-device userspace API"
depends on VIDEO_DEV && MEDIA_CONTROLLER
- ---help---
+ help
Enables the V4L2 sub-device pad-level userspace API used to configure
video format, size and frame rate between hardware blocks.
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 985d35ec6b29..4a330d0e5e40 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -6,6 +6,12 @@
media-objs := media-device.o media-devnode.o media-entity.o \
media-request.o
+ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
+ ifeq ($(CONFIG_USB),y)
+ media-objs += media-dev-allocator.o
+ endif
+endif
+
#
# I2C drivers should come before other drivers, otherwise they'll fail
# when compiled as builtin drivers
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index 9c2b108c613a..b5aadacf335a 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -2,11 +2,11 @@ config MEDIA_CEC_RC
bool "HDMI CEC RC integration"
depends on CEC_CORE && RC_CORE
depends on CEC_CORE=m || RC_CORE=y
- ---help---
+ help
Pass on CEC remote control messages to the RC framework.
config CEC_PIN_ERROR_INJ
bool "Enable CEC error injection support"
depends on CEC_PIN && DEBUG_FS
- ---help---
+ help
This option enables CEC error injection using debugfs.
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index cc875dabd765..f5d1578e256a 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -126,6 +126,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
/* Part 2: Initialize and register the character device */
cdev_init(&devnode->cdev, &cec_devnode_fops);
devnode->cdev.owner = owner;
+ kobject_set_name(&devnode->cdev.kobj, "cec%d", devnode->minor);
ret = cdev_device_add(&devnode->cdev, &devnode->dev);
if (ret) {
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index dd2078b27a41..9598c7778871 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/of_platform.h>
#include <media/cec.h>
#include <media/cec-notifier.h>
@@ -127,3 +128,32 @@ void cec_notifier_unregister(struct cec_notifier *n)
cec_notifier_put(n);
}
EXPORT_SYMBOL_GPL(cec_notifier_unregister);
+
+struct device *cec_notifier_parse_hdmi_phandle(struct device *dev)
+{
+ struct platform_device *hdmi_pdev;
+ struct device *hdmi_dev = NULL;
+ struct device_node *np;
+
+ np = of_parse_phandle(dev->of_node, "hdmi-phandle", 0);
+
+ if (!np) {
+ dev_err(dev, "Failed to find HDMI node in device tree\n");
+ return ERR_PTR(-ENODEV);
+ }
+ hdmi_pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (hdmi_pdev) {
+ hdmi_dev = &hdmi_pdev->dev;
+ /*
+ * Note that the device struct is only used as a key into the
+ * cec_notifiers list, it is never actually accessed.
+ * So we decrement the reference here so we don't leak
+ * memory.
+ */
+ put_device(hdmi_dev);
+ return hdmi_dev;
+ }
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL_GPL(cec_notifier_parse_hdmi_phandle);
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index 1dcc39b87bb7..121cda73ff88 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -1028,7 +1028,7 @@ static int cx2341x_api(void *priv, cx2341x_mbox_func func,
return func(priv, cmd, args, 0, data);
}
-#define NEQ(field) (old->field != new->field)
+#define CMP_FIELD(__old, __new, __field) (__old->__field != __new->__field)
int cx2341x_update(void *priv, cx2341x_mbox_func func,
const struct cx2341x_mpeg_params *old,
@@ -1042,20 +1042,22 @@ int cx2341x_update(void *priv, cx2341x_mbox_func func,
11, /* VCD */
12, /* SVCD */
};
-
- int err = 0;
- int force = (old == NULL);
- u16 temporal = new->video_temporal_filter;
+ int err;
cx2341x_api(priv, func, CX2341X_ENC_SET_OUTPUT_PORT, 2, new->port, 0);
- if (force || NEQ(is_50hz)) {
+ if (!old ||
+ CMP_FIELD(old, new, is_50hz)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_RATE, 1,
new->is_50hz);
- if (err) return err;
+ if (err)
+ return err;
}
- if (force || NEQ(width) || NEQ(height) || NEQ(video_encoding)) {
+ if (!old ||
+ CMP_FIELD(old, new, width) ||
+ CMP_FIELD(old, new, height) ||
+ CMP_FIELD(old, new, video_encoding)) {
u16 w = new->width;
u16 h = new->height;
@@ -1065,94 +1067,127 @@ int cx2341x_update(void *priv, cx2341x_mbox_func func,
}
err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_SIZE, 2,
h, w);
- if (err) return err;
+ if (err)
+ return err;
}
- if (force || NEQ(stream_type)) {
+ if (!old ||
+ CMP_FIELD(old, new, stream_type)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_STREAM_TYPE, 1,
mpeg_stream_type[new->stream_type]);
- if (err) return err;
+ if (err)
+ return err;
}
- if (force || NEQ(video_aspect)) {
+ if (!old ||
+ CMP_FIELD(old, new, video_aspect)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_ASPECT_RATIO, 1,
1 + new->video_aspect);
- if (err) return err;
+ if (err)
+ return err;
}
- if (force || NEQ(video_b_frames) || NEQ(video_gop_size)) {
+ if (!old ||
+ CMP_FIELD(old, new, video_b_frames) ||
+ CMP_FIELD(old, new, video_gop_size)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_PROPERTIES, 2,
- new->video_gop_size, new->video_b_frames + 1);
- if (err) return err;
+ new->video_gop_size, new->video_b_frames + 1);
+ if (err)
+ return err;
}
- if (force || NEQ(video_gop_closure)) {
+ if (!old ||
+ CMP_FIELD(old, new, video_gop_closure)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_CLOSURE, 1,
new->video_gop_closure);
- if (err) return err;
+ if (err)
+ return err;
}
- if (force || NEQ(audio_properties)) {
+ if (!old ||
+ CMP_FIELD(old, new, audio_properties)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_AUDIO_PROPERTIES,
1, new->audio_properties);
- if (err) return err;
+ if (err)
+ return err;
}
- if (force || NEQ(audio_mute)) {
+ if (!old ||
+ CMP_FIELD(old, new, audio_mute)) {
err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_AUDIO, 1,
new->audio_mute);
- if (err) return err;
+ if (err)
+ return err;
}
- if (force || NEQ(video_bitrate_mode) || NEQ(video_bitrate) ||
- NEQ(video_bitrate_peak)) {
+ if (!old ||
+ CMP_FIELD(old, new, video_bitrate_mode) ||
+ CMP_FIELD(old, new, video_bitrate) ||
+ CMP_FIELD(old, new, video_bitrate_peak)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_BIT_RATE, 5,
- new->video_bitrate_mode, new->video_bitrate,
- new->video_bitrate_peak / 400, 0, 0);
- if (err) return err;
+ new->video_bitrate_mode, new->video_bitrate,
+ new->video_bitrate_peak / 400, 0, 0);
+ if (err)
+ return err;
}
- if (force || NEQ(video_spatial_filter_mode) ||
- NEQ(video_temporal_filter_mode) ||
- NEQ(video_median_filter_type)) {
+ if (!old ||
+ CMP_FIELD(old, new, video_spatial_filter_mode) ||
+ CMP_FIELD(old, new, video_temporal_filter_mode) ||
+ CMP_FIELD(old, new, video_median_filter_type)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_MODE,
- 2, new->video_spatial_filter_mode |
+ 2,
+ new->video_spatial_filter_mode |
(new->video_temporal_filter_mode << 1),
- new->video_median_filter_type);
- if (err) return err;
+ new->video_median_filter_type);
+ if (err)
+ return err;
}
- if (force || NEQ(video_luma_median_filter_bottom) ||
- NEQ(video_luma_median_filter_top) ||
- NEQ(video_chroma_median_filter_bottom) ||
- NEQ(video_chroma_median_filter_top)) {
+ if (!old ||
+ CMP_FIELD(old, new, video_luma_median_filter_bottom) ||
+ CMP_FIELD(old, new, video_luma_median_filter_top) ||
+ CMP_FIELD(old, new, video_chroma_median_filter_bottom) ||
+ CMP_FIELD(old, new, video_chroma_median_filter_top)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_CORING_LEVELS, 4,
- new->video_luma_median_filter_bottom,
- new->video_luma_median_filter_top,
- new->video_chroma_median_filter_bottom,
- new->video_chroma_median_filter_top);
- if (err) return err;
+ new->video_luma_median_filter_bottom,
+ new->video_luma_median_filter_top,
+ new->video_chroma_median_filter_bottom,
+ new->video_chroma_median_filter_top);
+ if (err)
+ return err;
}
- if (force || NEQ(video_luma_spatial_filter_type) ||
- NEQ(video_chroma_spatial_filter_type)) {
+ if (!old ||
+ CMP_FIELD(old, new, video_luma_spatial_filter_type) ||
+ CMP_FIELD(old, new, video_chroma_spatial_filter_type)) {
err = cx2341x_api(priv, func,
CX2341X_ENC_SET_SPATIAL_FILTER_TYPE,
2, new->video_luma_spatial_filter_type,
new->video_chroma_spatial_filter_type);
- if (err) return err;
+ if (err)
+ return err;
}
- if (force || NEQ(video_spatial_filter) ||
- old->video_temporal_filter != temporal) {
+ if (!old ||
+ CMP_FIELD(old, new, video_spatial_filter) ||
+ CMP_FIELD(old, new, video_temporal_filter)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_PROPS,
- 2, new->video_spatial_filter, temporal);
- if (err) return err;
+ 2, new->video_spatial_filter,
+ new->video_temporal_filter);
+ if (err)
+ return err;
}
- if (force || NEQ(video_temporal_decimation)) {
+ if (!old ||
+ CMP_FIELD(old, new, video_temporal_decimation)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_DROP_RATE,
1, new->video_temporal_decimation);
- if (err) return err;
+ if (err)
+ return err;
}
- if (force || NEQ(video_mute) ||
- (new->video_mute && NEQ(video_mute_yuv))) {
+ if (!old ||
+ CMP_FIELD(old, new, video_mute) ||
+ (new->video_mute && CMP_FIELD(old, new, video_mute_yuv))) {
err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_VIDEO, 1,
- new->video_mute | (new->video_mute_yuv << 8));
- if (err) return err;
+ new->video_mute | (new->video_mute_yuv << 8));
+ if (err)
+ return err;
}
- if (force || NEQ(stream_insert_nav_packets)) {
+ if (!old ||
+ CMP_FIELD(old, new, stream_insert_nav_packets)) {
err = cx2341x_api(priv, func, CX2341X_ENC_MISC, 2,
- 7, new->stream_insert_nav_packets);
- if (err) return err;
+ 7, new->stream_insert_nav_packets);
+ if (err)
+ return err;
}
return 0;
}
diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
index 4bfbd5f463d1..577880b133eb 100644
--- a/drivers/media/common/siano/Kconfig
+++ b/drivers/media/common/siano/Kconfig
@@ -15,7 +15,7 @@ config SMS_SIANO_RC
depends on SMS_USB_DRV || SMS_SDIO_DRV
depends on MEDIA_COMMON_OPTIONS
default y
- ---help---
+ help
Choose Y to select Remote Controller support for Siano driver.
config SMS_SIANO_DEBUGFS
@@ -24,7 +24,7 @@ config SMS_SIANO_DEBUGFS
depends on DEBUG_FS
depends on SMS_USB_DRV = SMS_SDIO_DRV
- ---help---
+ help
Choose Y to enable visualizing a dump of the frontend
statistics response packets via debugfs. Currently, works
only with Siano USB devices.
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 15b6b9c0a2e4..3cf25abf5807 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -672,6 +672,11 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
return -EBUSY;
}
+ if (q->waiting_in_dqbuf && *count) {
+ dprintk(1, "another dup()ped fd is waiting for a buffer\n");
+ return -EBUSY;
+ }
+
if (*count == 0 || q->num_buffers != 0 ||
(q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
/*
@@ -807,6 +812,10 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
}
if (!q->num_buffers) {
+ if (q->waiting_in_dqbuf && *count) {
+ dprintk(1, "another dup()ped fd is waiting for a buffer\n");
+ return -EBUSY;
+ }
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
q->waiting_for_buffers = !q->is_output;
@@ -915,8 +924,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
if (WARN_ON(state != VB2_BUF_STATE_DONE &&
state != VB2_BUF_STATE_ERROR &&
- state != VB2_BUF_STATE_QUEUED &&
- state != VB2_BUF_STATE_REQUEUEING))
+ state != VB2_BUF_STATE_QUEUED))
state = VB2_BUF_STATE_ERROR;
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -929,8 +937,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
dprintk(4, "done processing on buffer %d, state: %d\n",
vb->index, state);
- if (state != VB2_BUF_STATE_QUEUED &&
- state != VB2_BUF_STATE_REQUEUEING) {
+ if (state != VB2_BUF_STATE_QUEUED) {
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
@@ -938,8 +945,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
}
spin_lock_irqsave(&q->done_lock, flags);
- if (state == VB2_BUF_STATE_QUEUED ||
- state == VB2_BUF_STATE_REQUEUEING) {
+ if (state == VB2_BUF_STATE_QUEUED) {
vb->state = VB2_BUF_STATE_QUEUED;
} else {
/* Add the buffer to the done buffers list */
@@ -949,8 +955,6 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
atomic_dec(&q->owned_by_drv_count);
if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
- /* This is not supported at the moment */
- WARN_ON(state == VB2_BUF_STATE_REQUEUEING);
media_request_object_unbind(&vb->req_obj);
media_request_object_put(&vb->req_obj);
}
@@ -962,10 +966,6 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
switch (state) {
case VB2_BUF_STATE_QUEUED:
return;
- case VB2_BUF_STATE_REQUEUEING:
- if (q->start_streaming_called)
- __enqueue_in_driver(vb);
- return;
default:
/* Inform any processes that may be waiting for buffers */
wake_up(&q->done_wq);
@@ -1516,6 +1516,12 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
vb = q->bufs[index];
+ if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
+ q->requires_requests) {
+ dprintk(1, "qbuf requires a request\n");
+ return -EBADR;
+ }
+
if ((req && q->uses_qbuf) ||
(!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
q->uses_requests)) {
@@ -1659,6 +1665,11 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
for (;;) {
int ret;
+ if (q->waiting_in_dqbuf) {
+ dprintk(1, "another dup()ped fd is waiting for a buffer\n");
+ return -EBUSY;
+ }
+
if (!q->streaming) {
dprintk(1, "streaming off, will not wait for buffers\n");
return -EINVAL;
@@ -1686,6 +1697,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
return -EAGAIN;
}
+ q->waiting_in_dqbuf = 1;
/*
* We are streaming and blocking, wait for another buffer to
* become ready or for streamoff. Driver's lock is released to
@@ -1706,6 +1718,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* the locks or return an error if one occurred.
*/
call_void_qop(q, wait_finish, q);
+ q->waiting_in_dqbuf = 0;
if (ret) {
dprintk(1, "sleep was interrupted\n");
return ret;
@@ -2188,6 +2201,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
goto unlock;
}
+ /*
+ * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
+ * not as a in-buffer offset. We always want to mmap a whole buffer
+ * from its beginning.
+ */
+ vma->vm_pgoff = 0;
+
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
unlock:
@@ -2247,6 +2267,9 @@ int vb2_core_queue_init(struct vb2_queue *q)
WARN_ON(!q->ops->buf_queue))
return -EINVAL;
+ if (WARN_ON(q->requires_requests && !q->supports_requests))
+ return -EINVAL;
+
INIT_LIST_HEAD(&q->queued_list);
INIT_LIST_HEAD(&q->done_list);
spin_lock_init(&q->done_lock);
@@ -2585,6 +2608,12 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
if (!data)
return -EINVAL;
+ if (q->waiting_in_dqbuf) {
+ dprintk(3, "another dup()ped fd is %s\n",
+ read ? "reading" : "writing");
+ return -EBUSY;
+ }
+
/*
* Initialize emulator on first call.
*/
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 82389aead6ed..ecbef266130b 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -186,12 +186,6 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
return -EINVAL;
}
- /*
- * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
- * map whole buffer
- */
- vma->vm_pgoff = 0;
-
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
buf->dma_addr, buf->size, buf->attrs);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 270c3162fdcb..4a4c49d6085c 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -328,28 +328,18 @@ static unsigned int vb2_dma_sg_num_users(void *buf_priv)
static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
- int i = 0;
+ int err;
if (!buf) {
printk(KERN_ERR "No memory to map\n");
return -EINVAL;
}
- do {
- int ret;
-
- ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
- if (ret) {
- printk(KERN_ERR "Remapping memory, error: %d\n", ret);
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
-
+ err = vm_map_pages(vma, buf->pages, buf->num_pages);
+ if (err) {
+ printk(KERN_ERR "Remapping memory, error: %d\n", err);
+ return err;
+ }
/*
* Use common vm_area operations to track buffer refcount.
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index d09dee20e421..fb9ac7696fc6 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -122,9 +122,9 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/*
- * __init_v4l2_vb2_buffer() - initialize the v4l2_vb2_buffer struct
+ * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct
*/
-static void __init_v4l2_vb2_buffer(struct vb2_buffer *vb)
+static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -368,6 +368,12 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
if (ret)
return ret;
+ if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
+ vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "%s: buffer is not in dequeued state\n", opname);
+ return -EINVAL;
+ }
+
if (!vb->prepared) {
/* Copy relevant information provided by the userspace */
memset(vbuf->planes, 0,
@@ -381,6 +387,10 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
return 0;
if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
+ if (q->requires_requests) {
+ dprintk(1, "%s: queue requires requests\n", opname);
+ return -EBADR;
+ }
if (q->uses_requests) {
dprintk(1, "%s: queue uses requests\n", opname);
return -EBUSY;
@@ -388,7 +398,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
return 0;
} else if (!q->supports_requests) {
dprintk(1, "%s: queue does not support requests\n", opname);
- return -EACCES;
+ return -EBADR;
} else if (q->uses_qbuf) {
dprintk(1, "%s: queue does not use requests\n", opname);
return -EBUSY;
@@ -419,11 +429,6 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
!q->ops->buf_out_validate))
return -EINVAL;
- if (vb->state != VB2_BUF_STATE_DEQUEUED) {
- dprintk(1, "%s: buffer is not in dequeued state\n", opname);
- return -EINVAL;
- }
-
if (b->request_fd < 0) {
dprintk(1, "%s: request_fd < 0\n", opname);
return -EINVAL;
@@ -543,7 +548,6 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
break;
case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
- case VB2_BUF_STATE_REQUEUEING:
/* nothing */
break;
}
@@ -592,7 +596,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
static const struct vb2_buf_ops v4l2_buf_ops = {
.verify_planes_array = __verify_planes_array_core,
- .init_buffer = __init_v4l2_vb2_buffer,
+ .init_buffer = __init_vb2_v4l2_buffer,
.fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer,
.copy_timestamp = __copy_timestamp,
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 6dfbd5b05907..1c6659f7c394 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -46,17 +46,17 @@ static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
buf->size = size;
buf->vaddr = vmalloc_user(buf->size);
- buf->dma_dir = dma_dir;
- buf->handler.refcount = &buf->refcount;
- buf->handler.put = vb2_vmalloc_put;
- buf->handler.arg = buf;
-
if (!buf->vaddr) {
pr_debug("vmalloc of size %ld failed\n", buf->size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
+ buf->dma_dir = dma_dir;
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_vmalloc_put;
+ buf->handler.arg = buf;
+
refcount_set(&buf->refcount, 1);
return buf;
}
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 4a5834a1c3b7..a3393cd4e584 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -526,7 +526,6 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvb_media_device_free(dvbdev);
kfree(dvbdevfops);
kfree(dvbdev);
- up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
return ret;
}
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index f59a102b0a64..9ba8f39fe310 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -467,7 +467,7 @@ struct dvb_frontend *as102_attach(const char *name,
/* init frontend callback ops */
memcpy(&fe->ops, &as102_fe_ops, sizeof(struct dvb_frontend_ops));
- strncpy(fe->ops.info.name, name, sizeof(fe->ops.info.name));
+ strscpy(fe->ops.info.name, name, sizeof(fe->ops.info.name));
return fe;
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index f8040f6def62..d869029ca87d 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2774,7 +2774,8 @@ static struct dvb_frontend *dib7000p_init(struct i2c_adapter *i2c_adap, u8 i2c_a
dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
/* init 7090 tuner adapter */
- strncpy(st->dib7090_tuner_adap.name, "DiB7090 tuner interface", sizeof(st->dib7090_tuner_adap.name));
+ strscpy(st->dib7090_tuner_adap.name, "DiB7090 tuner interface",
+ sizeof(st->dib7090_tuner_adap.name));
st->dib7090_tuner_adap.algo = &dib7090_tuner_xfer_algo;
st->dib7090_tuner_adap.algo_data = NULL;
st->dib7090_tuner_adap.dev.parent = st->i2c_adap->dev.parent;
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 85c429cce23e..564669338dc6 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -4458,8 +4458,8 @@ static struct dvb_frontend *dib8000_init(struct i2c_adapter *i2c_adap, u8 i2c_ad
dibx000_init_i2c_master(&state->i2c_master, DIB8000, state->i2c.adap, state->i2c.addr);
/* init 8096p tuner adapter */
- strncpy(state->dib8096p_tuner_adap.name, "DiB8096P tuner interface",
- sizeof(state->dib8096p_tuner_adap.name));
+ strscpy(state->dib8096p_tuner_adap.name, "DiB8096P tuner interface",
+ sizeof(state->dib8096p_tuner_adap.name));
state->dib8096p_tuner_adap.algo = &dib8096p_tuner_xfer_algo;
state->dib8096p_tuner_adap.algo_data = NULL;
state->dib8096p_tuner_adap.dev.parent = state->i2c.adap->dev.parent;
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 1875da07c150..e7838926e6bc 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -2521,7 +2521,8 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
dibx000_init_i2c_master(&st->i2c_master, DIB7000MC, st->i2c.i2c_adap, st->i2c.i2c_addr);
st->tuner_adap.dev.parent = i2c_adap->dev.parent;
- strncpy(st->tuner_adap.name, "DIB9000_FW TUNER ACCESS", sizeof(st->tuner_adap.name));
+ strscpy(st->tuner_adap.name, "DIB9000_FW TUNER ACCESS",
+ sizeof(st->tuner_adap.name));
st->tuner_adap.algo = &dib9000_tuner_algo;
st->tuner_adap.algo_data = NULL;
i2c_set_adapdata(&st->tuner_adap, st);
@@ -2529,7 +2530,8 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
goto error;
st->component_bus.dev.parent = i2c_adap->dev.parent;
- strncpy(st->component_bus.name, "DIB9000_FW COMPONENT BUS ACCESS", sizeof(st->component_bus.name));
+ strscpy(st->component_bus.name, "DIB9000_FW COMPONENT BUS ACCESS",
+ sizeof(st->component_bus.name));
st->component_bus.algo = &dib9000_component_bus_algo;
st->component_bus.algo_data = NULL;
st->component_bus_speed = 340;
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 0a5b15bee1d7..c597d6fc3046 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2263,61 +2263,41 @@ static int DRX_Start(struct drxd_state *state, s32 off)
case DRX_CHANNEL_LOW:
transmissionParams |= SC_RA_RAM_OP_PARAM_PRIO_LO;
status = Write16(state, EC_SB_REG_PRIOR__A, EC_SB_REG_PRIOR_LO, 0x0000);
- if (status < 0)
- break;
break;
case DRX_CHANNEL_HIGH:
transmissionParams |= SC_RA_RAM_OP_PARAM_PRIO_HI;
status = Write16(state, EC_SB_REG_PRIOR__A, EC_SB_REG_PRIOR_HI, 0x0000);
- if (status < 0)
- break;
break;
-
}
switch (p->code_rate_HP) {
case FEC_1_2:
transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_1_2;
- if (state->type_A) {
+ if (state->type_A)
status = Write16(state, EC_VD_REG_SET_CODERATE__A, EC_VD_REG_SET_CODERATE_C1_2, 0x0000);
- if (status < 0)
- break;
- }
break;
default:
operationMode |= SC_RA_RAM_OP_AUTO_RATE__M;
/* fall through */
case FEC_2_3:
transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_2_3;
- if (state->type_A) {
+ if (state->type_A)
status = Write16(state, EC_VD_REG_SET_CODERATE__A, EC_VD_REG_SET_CODERATE_C2_3, 0x0000);
- if (status < 0)
- break;
- }
break;
case FEC_3_4:
transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_3_4;
- if (state->type_A) {
+ if (state->type_A)
status = Write16(state, EC_VD_REG_SET_CODERATE__A, EC_VD_REG_SET_CODERATE_C3_4, 0x0000);
- if (status < 0)
- break;
- }
break;
case FEC_5_6:
transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_5_6;
- if (state->type_A) {
+ if (state->type_A)
status = Write16(state, EC_VD_REG_SET_CODERATE__A, EC_VD_REG_SET_CODERATE_C5_6, 0x0000);
- if (status < 0)
- break;
- }
break;
case FEC_7_8:
transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_7_8;
- if (state->type_A) {
+ if (state->type_A)
status = Write16(state, EC_VD_REG_SET_CODERATE__A, EC_VD_REG_SET_CODERATE_C7_8, 0x0000);
- if (status < 0)
- break;
- }
break;
}
if (status < 0)
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 29836c1a40e9..da1b48a18515 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -839,7 +839,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
sizeof(struct dvb_tuner_ops));
- strncpy(fe->ops.tuner_ops.info.name, desc->name,
+ strscpy(fe->ops.tuner_ops.info.name, desc->name,
sizeof(fe->ops.tuner_ops.info.name));
fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 123f2a33738b..b543e1c4c4f9 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -309,6 +309,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
u16 u16tmp;
u32 tuner_frequency_khz, target_mclk;
s32 s32tmp;
+ static const struct reg_sequence reset_buf[] = {
+ {0x07, 0x80}, {0x07, 0x00}
+ };
dev_dbg(&client->dev,
"delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -321,11 +324,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
}
/* reset */
- ret = regmap_write(dev->regmap, 0x07, 0x80);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x07, 0x00);
+ ret = regmap_multi_reg_write(dev->regmap, reset_buf, 2);
if (ret)
goto err;
@@ -1470,7 +1469,7 @@ static int m88ds3103_probe(struct i2c_client *client,
/* create dvb_frontend */
memcpy(&dev->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
if (dev->chip_id == M88RS6000_CHIP_ID)
- strncpy(dev->fe.ops.info.name, "Montage Technology M88RS6000",
+ strscpy(dev->fe.ops.info.name, "Montage Technology M88RS6000",
sizeof(dev->fe.ops.info.name));
if (!pdata->attach_in_use)
dev->fe.ops.release = NULL;
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index feacd8da421d..c9bcd2e95417 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -275,18 +275,20 @@ static u32 si2165_get_fe_clk(struct si2165_state *state)
static int si2165_wait_init_done(struct si2165_state *state)
{
- int ret = -EINVAL;
+ int ret;
u8 val = 0;
int i;
for (i = 0; i < 3; ++i) {
- si2165_readreg8(state, REG_INIT_DONE, &val);
+ ret = si2165_readreg8(state, REG_INIT_DONE, &val);
+ if (ret < 0)
+ return ret;
if (val == 0x01)
return 0;
usleep_range(1000, 50000);
}
dev_err(&state->client->dev, "init_done was not set\n");
- return ret;
+ return -EINVAL;
}
static int si2165_upload_firmware_block(struct si2165_state *state,
@@ -1299,7 +1301,6 @@ MODULE_DEVICE_TABLE(i2c, si2165_id_table);
static struct i2c_driver si2165_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "si2165",
},
.probe = si2165_probe,
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index e5cd2cd414f4..0af9b335be12 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -180,6 +180,9 @@ static int ts2020_set_tuner_rf(struct dvb_frontend *fe)
unsigned int utmp;
ret = regmap_read(dev->regmap, 0x3d, &utmp);
+ if (ret)
+ return ret;
+
utmp &= 0x7f;
if (utmp < 0x16)
utmp = 0xa1;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 6d32f8dcf83b..7793358ab8b3 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -8,7 +8,7 @@ config VIDEO_IR_I2C
tristate "I2C module for IR" if !MEDIA_SUBDRV_AUTOSELECT
depends on I2C && RC_CORE
default y
- ---help---
+ help
Most boards have an IR chip directly connected via GPIO. However,
some video boards have the IR connected via I2C bus.
@@ -29,7 +29,7 @@ comment "Audio decoders, processors and mixers"
config VIDEO_TVAUDIO
tristate "Simple audio decoder chips"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for several audio decoder chips found on some bt8xx boards:
Philips: tda9840, tda9873h, tda9874h/a, tda9850, tda985x, tea6300,
tea6320, tea6420, tda8425, ta8874z.
@@ -41,7 +41,7 @@ config VIDEO_TVAUDIO
config VIDEO_TDA7432
tristate "Philips TDA7432 audio processor"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for tda7432 audio decoder chip found on some bt8xx boards.
To compile this driver as a module, choose M here: the
@@ -50,7 +50,7 @@ config VIDEO_TDA7432
config VIDEO_TDA9840
tristate "Philips TDA9840 audio processor"
depends on I2C
- ---help---
+ help
Support for tda9840 audio decoder chip found on some Zoran boards.
To compile this driver as a module, choose M here: the
@@ -60,9 +60,10 @@ config VIDEO_TDA1997X
tristate "NXP TDA1997x HDMI receiver"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on SND_SOC
- select SND_PCM
select HDMI
- ---help---
+ select SND_PCM
+ select V4L2_FWNODE
+ help
V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
To compile this driver as a module, choose M here: the
@@ -71,7 +72,7 @@ config VIDEO_TDA1997X
config VIDEO_TEA6415C
tristate "Philips TEA6415C audio processor"
depends on I2C
- ---help---
+ help
Support for tea6415c audio decoder chip found on some bt8xx boards.
To compile this driver as a module, choose M here: the
@@ -80,7 +81,7 @@ config VIDEO_TEA6415C
config VIDEO_TEA6420
tristate "Philips TEA6420 audio processor"
depends on I2C
- ---help---
+ help
Support for tea6420 audio decoder chip found on some bt8xx boards.
To compile this driver as a module, choose M here: the
@@ -89,7 +90,7 @@ config VIDEO_TEA6420
config VIDEO_MSP3400
tristate "Micronas MSP34xx audio decoders"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Micronas MSP34xx series of audio decoders.
To compile this driver as a module, choose M here: the
@@ -98,7 +99,7 @@ config VIDEO_MSP3400
config VIDEO_CS3308
tristate "Cirrus Logic CS3308 audio ADC"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Cirrus Logic CS3308 High Performance 8-Channel
Analog Volume Control
@@ -108,7 +109,7 @@ config VIDEO_CS3308
config VIDEO_CS5345
tristate "Cirrus Logic CS5345 audio ADC"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Cirrus Logic CS5345 24-bit, 192 kHz
stereo A/D converter.
@@ -118,7 +119,7 @@ config VIDEO_CS5345
config VIDEO_CS53L32A
tristate "Cirrus Logic CS53L32A audio ADC"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Cirrus Logic CS53L32A low voltage
stereo A/D converter.
@@ -128,7 +129,7 @@ config VIDEO_CS53L32A
config VIDEO_TLV320AIC23B
tristate "Texas Instruments TLV320AIC23B audio codec"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Texas Instruments TLV320AIC23B audio codec.
To compile this driver as a module, choose M here: the
@@ -137,7 +138,7 @@ config VIDEO_TLV320AIC23B
config VIDEO_UDA1342
tristate "Philips UDA1342 audio codec"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Philips UDA1342 audio codec.
To compile this driver as a module, choose M here: the
@@ -146,7 +147,7 @@ config VIDEO_UDA1342
config VIDEO_WM8775
tristate "Wolfson Microelectronics WM8775 audio ADC with input mixer"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Wolfson Microelectronics WM8775 high
performance stereo A/D Converter with a 4 channel input mixer.
@@ -156,7 +157,7 @@ config VIDEO_WM8775
config VIDEO_WM8739
tristate "Wolfson Microelectronics WM8739 stereo audio ADC"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Wolfson Microelectronics WM8739
stereo A/D Converter.
@@ -166,7 +167,7 @@ config VIDEO_WM8739
config VIDEO_VP27SMPX
tristate "Panasonic VP27's internal MPX"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the internal MPX of the Panasonic VP27s tuner.
To compile this driver as a module, choose M here: the
@@ -200,7 +201,7 @@ comment "Video decoders"
config VIDEO_ADV7180
tristate "Analog Devices ADV7180 decoder"
depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- ---help---
+ help
Support for the Analog Devices ADV7180 video decoder.
To compile this driver as a module, choose M here: the
@@ -209,7 +210,7 @@ config VIDEO_ADV7180
config VIDEO_ADV7183
tristate "Analog Devices ADV7183 decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
V4l2 subdevice driver for the Analog Devices
ADV7183 video decoder.
@@ -221,7 +222,8 @@ config VIDEO_ADV748X
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on OF
select REGMAP_I2C
- ---help---
+ select V4L2_FWNODE
+ help
V4L2 subdevice driver for the Analog Devices
ADV7481 and ADV7482 HDMI/Analog video decoders.
@@ -234,7 +236,7 @@ config VIDEO_ADV7604
depends on GPIOLIB || COMPILE_TEST
select HDMI
select V4L2_FWNODE
- ---help---
+ help
Support for the Analog Devices ADV7604 video decoder.
This is a Analog Devices Component/Graphics Digitizer
@@ -247,7 +249,7 @@ config VIDEO_ADV7604_CEC
bool "Enable Analog Devices ADV7604 CEC support"
depends on VIDEO_ADV7604
select CEC_CORE
- ---help---
+ help
When selected the adv7604 will support the optional
HDMI CEC feature.
@@ -255,7 +257,7 @@ config VIDEO_ADV7842
tristate "Analog Devices ADV7842 decoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
select HDMI
- ---help---
+ help
Support for the Analog Devices ADV7842 video decoder.
This is a Analog Devices Component/Graphics/SD Digitizer
@@ -268,14 +270,14 @@ config VIDEO_ADV7842_CEC
bool "Enable Analog Devices ADV7842 CEC support"
depends on VIDEO_ADV7842
select CEC_CORE
- ---help---
+ help
When selected the adv7842 will support the optional
HDMI CEC feature.
config VIDEO_BT819
tristate "BT819A VideoStream decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for BT819A video decoder.
To compile this driver as a module, choose M here: the
@@ -284,7 +286,7 @@ config VIDEO_BT819
config VIDEO_BT856
tristate "BT856 VideoStream decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for BT856 video decoder.
To compile this driver as a module, choose M here: the
@@ -293,7 +295,7 @@ config VIDEO_BT856
config VIDEO_BT866
tristate "BT866 VideoStream decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for BT866 video decoder.
To compile this driver as a module, choose M here: the
@@ -302,7 +304,7 @@ config VIDEO_BT866
config VIDEO_KS0127
tristate "KS0127 video decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for KS0127 video decoder.
This chip is used on AverMedia AVS6EYES Zoran-based MJPEG
@@ -314,53 +316,16 @@ config VIDEO_KS0127
config VIDEO_ML86V7667
tristate "OKI ML86V7667 video decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the OKI Semiconductor ML86V7667 video decoder.
To compile this driver as a module, choose M here: the
module will be called ml86v7667.
-config VIDEO_AD5820
- tristate "AD5820 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- ---help---
- This is a driver for the AD5820 camera lens voice coil.
- It is used for example in Nokia N900 (RX-51).
-
-config VIDEO_AK7375
- tristate "AK7375 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on VIDEO_V4L2_SUBDEV_API
- help
- This is a driver for the AK7375 camera lens voice coil.
- AK7375 is a 12 bit DAC with 120mA output current sink
- capability. This is designed for linear control of
- voice coil motors, controlled via I2C serial interface.
-
-config VIDEO_DW9714
- tristate "DW9714 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on VIDEO_V4L2_SUBDEV_API
- ---help---
- This is a driver for the DW9714 camera lens voice coil.
- DW9714 is a 10 bit DAC with 120mA output current sink
- capability. This is designed for linear control of
- voice coil motors, controlled via I2C serial interface.
-
-config VIDEO_DW9807_VCM
- tristate "DW9807 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on VIDEO_V4L2_SUBDEV_API
- ---help---
- This is a driver for the DW9807 camera lens voice coil.
- DW9807 is a 10 bit DAC with 100mA output current sink
- capability. This is designed for linear control of
- voice coil motors, controlled via I2C serial interface.
-
config VIDEO_SAA7110
tristate "Philips SAA7110 video decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Philips SAA7110 video decoders.
To compile this driver as a module, choose M here: the
@@ -369,7 +334,7 @@ config VIDEO_SAA7110
config VIDEO_SAA711X
tristate "Philips SAA7111/3/4/5 video decoders"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Philips SAA7111/3/4/5 video decoders.
To compile this driver as a module, choose M here: the
@@ -380,7 +345,7 @@ config VIDEO_TC358743
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
select HDMI
select V4L2_FWNODE
- ---help---
+ help
Support for the Toshiba TC358743 HDMI to MIPI CSI-2 bridge.
To compile this driver as a module, choose M here: the
@@ -390,7 +355,7 @@ config VIDEO_TC358743_CEC
bool "Enable Toshiba TC358743 CEC support"
depends on VIDEO_TC358743
select CEC_CORE
- ---help---
+ help
When selected the tc358743 will support the optional
HDMI CEC feature.
@@ -398,7 +363,7 @@ config VIDEO_TVP514X
tristate "Texas Instruments TVP514x video decoder"
depends on VIDEO_V4L2 && I2C
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the TI TVP5146/47
decoder. It is currently working with the TI OMAP3 camera
controller.
@@ -410,7 +375,7 @@ config VIDEO_TVP5150
tristate "Texas Instruments TVP5150 video decoder"
depends on VIDEO_V4L2 && I2C
select V4L2_FWNODE
- ---help---
+ help
Support for the Texas Instruments TVP5150 video decoder.
To compile this driver as a module, choose M here: the
@@ -420,7 +385,7 @@ config VIDEO_TVP7002
tristate "Texas Instruments TVP7002 video decoder"
depends on VIDEO_V4L2 && I2C
select V4L2_FWNODE
- ---help---
+ help
Support for the Texas Instruments TVP7002 video decoder.
To compile this driver as a module, choose M here: the
@@ -429,7 +394,7 @@ config VIDEO_TVP7002
config VIDEO_TW2804
tristate "Techwell TW2804 multiple video decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Techwell tw2804 multiple video decoder.
To compile this driver as a module, choose M here: the
@@ -438,7 +403,7 @@ config VIDEO_TW2804
config VIDEO_TW9903
tristate "Techwell TW9903 video decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Techwell tw9903 multi-standard video decoder
with high quality down scaler.
@@ -448,7 +413,7 @@ config VIDEO_TW9903
config VIDEO_TW9906
tristate "Techwell TW9906 video decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Techwell tw9906 enhanced multi-standard comb filter
video decoder with YCbCr input support.
@@ -458,7 +423,7 @@ config VIDEO_TW9906
config VIDEO_TW9910
tristate "Techwell TW9910 video decoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for Techwell TW9910 NTSC/PAL/SECAM video decoder.
To compile this driver as a module, choose M here: the
@@ -467,7 +432,7 @@ config VIDEO_TW9910
config VIDEO_VPX3220
tristate "vpx3220a, vpx3216b & vpx3214c video decoders"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for VPX322x video decoders.
To compile this driver as a module, choose M here: the
@@ -478,7 +443,7 @@ comment "Video and audio decoders"
config VIDEO_SAA717X
tristate "Philips SAA7171/3/4 audio/video decoders"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Philips SAA7171/3/4 audio/video decoders.
To compile this driver as a module, choose M here: the
@@ -491,7 +456,7 @@ comment "Video encoders"
config VIDEO_SAA7127
tristate "Philips SAA7127/9 digital video encoders"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Philips SAA7127/9 digital video encoders.
To compile this driver as a module, choose M here: the
@@ -500,7 +465,7 @@ config VIDEO_SAA7127
config VIDEO_SAA7185
tristate "Philips SAA7185 video encoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Philips SAA7185 video encoder.
To compile this driver as a module, choose M here: the
@@ -509,7 +474,7 @@ config VIDEO_SAA7185
config VIDEO_ADV7170
tristate "Analog Devices ADV7170 video encoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Analog Devices ADV7170 video encoder driver
To compile this driver as a module, choose M here: the
@@ -518,7 +483,7 @@ config VIDEO_ADV7170
config VIDEO_ADV7175
tristate "Analog Devices ADV7175 video encoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Analog Devices ADV7175 video encoder driver
To compile this driver as a module, choose M here: the
@@ -546,7 +511,7 @@ config VIDEO_ADV7511
tristate "Analog Devices ADV7511 encoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
select HDMI
- ---help---
+ help
Support for the Analog Devices ADV7511 video encoder.
This is a Analog Devices HDMI transmitter.
@@ -558,14 +523,14 @@ config VIDEO_ADV7511_CEC
bool "Enable Analog Devices ADV7511 CEC support"
depends on VIDEO_ADV7511
select CEC_CORE
- ---help---
+ help
When selected the adv7511 will support the optional
HDMI CEC feature.
config VIDEO_AD9389B
tristate "Analog Devices AD9389B encoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- ---help---
+ help
Support for the Analog Devices AD9389B video encoder.
This is a Analog Devices HDMI transmitter.
@@ -582,7 +547,7 @@ config VIDEO_AK881X
config VIDEO_THS8200
tristate "Texas Instruments THS8200 video encoder"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Texas Instruments THS8200 video encoder.
To compile this driver as a module, choose M here: the
@@ -612,7 +577,7 @@ config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a Video4Linux2 sensor driver for the Sony
IMX258 camera.
@@ -624,7 +589,7 @@ config VIDEO_IMX274
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
- ---help---
+ help
This is a V4L2 sensor driver for the Sony IMX274
CMOS image sensor.
@@ -666,7 +631,7 @@ config VIDEO_OV2659
depends on VIDEO_V4L2 && I2C
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV2659 camera.
@@ -678,7 +643,7 @@ config VIDEO_OV2680
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV2680 camera.
@@ -690,7 +655,7 @@ config VIDEO_OV2685
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV2685 camera.
@@ -703,7 +668,7 @@ config VIDEO_OV5640
depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the Omnivision
OV5640 camera sensor with a MIPI CSI-2 interface.
@@ -713,7 +678,7 @@ config VIDEO_OV5645
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV5645 camera.
@@ -725,7 +690,7 @@ config VIDEO_OV5647
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV5647 camera.
@@ -736,7 +701,7 @@ config VIDEO_OV6650
tristate "OmniVision OV6650 sensor support"
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV6650 camera.
@@ -749,7 +714,7 @@ config VIDEO_OV5670
depends on MEDIA_CAMERA_SUPPORT
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV5670 camera.
@@ -760,7 +725,7 @@ config VIDEO_OV5695
tristate "OmniVision OV5695 sensor support"
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV5695 camera.
@@ -784,7 +749,7 @@ config VIDEO_OV772X
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
select REGMAP_SCCB
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV772x camera.
@@ -795,7 +760,7 @@ config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV7640 camera.
@@ -807,7 +772,7 @@ config VIDEO_OV7670
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV7670 VGA camera. It currently only works with the M88ALP01
controller.
@@ -816,7 +781,7 @@ config VIDEO_OV7740
tristate "OmniVision OV7740 sensor support"
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
@@ -843,7 +808,7 @@ config VIDEO_OV9650
tristate "OmniVision OV9650/OV9652 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select REGMAP_SCCB
- ---help---
+ help
This is a V4L2 sensor driver for the Omnivision
OV9650 and OV9652 camera sensors.
@@ -852,7 +817,7 @@ config VIDEO_OV13858
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the OmniVision
OV13858 camera.
@@ -860,7 +825,7 @@ config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a Video4Linux2 sensor driver for the ST VS6624
camera.
@@ -880,7 +845,7 @@ config VIDEO_MT9M032
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
- ---help---
+ help
This driver supports MT9M032 camera sensors from Aptina, monochrome
models only.
@@ -897,7 +862,7 @@ config VIDEO_MT9P031
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
- ---help---
+ help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt9p031 5 Mpixel camera.
@@ -905,7 +870,7 @@ config VIDEO_MT9T001
tristate "Aptina MT9T001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt0t001 3 Mpixel camera.
@@ -913,7 +878,7 @@ config VIDEO_MT9T112
tristate "Aptina MT9T111/MT9T112 support"
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) MT9T111 and MT9T112 3 Mpixel camera.
@@ -924,7 +889,7 @@ config VIDEO_MT9V011
tristate "Micron mt9v011 sensor support"
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a Video4Linux2 sensor driver for the Micron
mt0v011 1.3 Mpixel camera. It currently only works with the
em28xx driver.
@@ -935,7 +900,7 @@ config VIDEO_MT9V032
depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
select V4L2_FWNODE
- ---help---
+ help
This is a Video4Linux2 sensor driver for the Micron
MT9V032 752x480 CMOS sensor.
@@ -954,14 +919,14 @@ config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This driver supports SR030PC30 VGA camera from Siliconfile
config VIDEO_NOON010PC30
tristate "Siliconfile NOON010PC30 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This driver supports NOON010PC30 CIF camera from Siliconfile
source "drivers/media/i2c/m5mols/Kconfig"
@@ -981,7 +946,7 @@ config VIDEO_S5K6AA
tristate "Samsung S5K6AAFX sensor support"
depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- ---help---
+ help
This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
camera sensor with an embedded SoC image signal processor.
@@ -989,7 +954,7 @@ config VIDEO_S5K6A3
tristate "Samsung S5K6A3 sensor support"
depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- ---help---
+ help
This is a V4L2 sensor driver for Samsung S5K6A3 raw
camera sensor.
@@ -997,7 +962,7 @@ config VIDEO_S5K4ECGX
tristate "Samsung S5K4ECGX sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select CRC32
- ---help---
+ help
This is a V4L2 sensor driver for Samsung S5K4ECGX 5M
camera sensor with an embedded SoC image signal processor.
@@ -1005,7 +970,7 @@ config VIDEO_S5K5BAF
tristate "Samsung S5K5BAF sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
- ---help---
+ help
This is a V4L2 sensor driver for Samsung S5K5BAF 2M
camera sensor with an embedded SoC image signal processor.
@@ -1016,17 +981,56 @@ config VIDEO_S5C73M3
tristate "Samsung S5C73M3 sensor support"
depends on I2C && SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
- ---help---
+ help
This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
+comment "Lens drivers"
+
+config VIDEO_AD5820
+ tristate "AD5820 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ help
+ This is a driver for the AD5820 camera lens voice coil.
+ It is used for example in Nokia N900 (RX-51).
+
+config VIDEO_AK7375
+ tristate "AK7375 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ help
+ This is a driver for the AK7375 camera lens voice coil.
+ AK7375 is a 12 bit DAC with 120mA output current sink
+ capability. This is designed for linear control of
+ voice coil motors, controlled via I2C serial interface.
+
+config VIDEO_DW9714
+ tristate "DW9714 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ help
+ This is a driver for the DW9714 camera lens voice coil.
+ DW9714 is a 10 bit DAC with 120mA output current sink
+ capability. This is designed for linear control of
+ voice coil motors, controlled via I2C serial interface.
+
+config VIDEO_DW9807_VCM
+ tristate "DW9807 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ help
+ This is a driver for the DW9807 camera lens voice coil.
+ DW9807 is a 10 bit DAC with 100mA output current sink
+ capability. This is designed for linear control of
+ voice coil motors, controlled via I2C serial interface.
+
comment "Flash devices"
config VIDEO_ADP1653
tristate "ADP1653 flash support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a driver for the ADP1653 flash controller. It is used for
example in Nokia N900.
@@ -1035,7 +1039,7 @@ config VIDEO_LM3560
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
- ---help---
+ help
This is a driver for the lm3560 dual flash controllers. It controls
flash, torch LEDs.
@@ -1044,7 +1048,7 @@ config VIDEO_LM3646
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
- ---help---
+ help
This is a driver for the lm3646 dual flash controllers. It controls
flash, torch LEDs.
@@ -1053,7 +1057,7 @@ comment "Video improvement chips"
config VIDEO_UPD64031A
tristate "NEC Electronics uPD64031A Ghost Reduction"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the NEC Electronics uPD64031A Ghost Reduction
video chip. It is most often found in NTSC TV cards made for
Japan and is used to reduce the 'ghosting' effect that can
@@ -1065,7 +1069,7 @@ config VIDEO_UPD64031A
config VIDEO_UPD64083
tristate "NEC Electronics uPD64083 3-Dimensional Y/C separation"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the NEC Electronics uPD64083 3-Dimensional Y/C
separation video chip. It is used to improve the quality of
the colors of a composite signal.
@@ -1079,7 +1083,7 @@ config VIDEO_SAA6752HS
tristate "Philips SAA6752HS MPEG-2 Audio/Video Encoder"
depends on VIDEO_V4L2 && I2C
select CRC32
- ---help---
+ help
Support for the Philips SAA6752HS MPEG-2 video and MPEG-audio/AC-3
audio encoder with multiplexer.
@@ -1091,7 +1095,7 @@ comment "SDR tuner chips"
config SDR_MAX2175
tristate "Maxim 2175 RF to Bits tuner"
depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
- ---help---
+ help
Support for Maxim 2175 tuner. It is an advanced analog/digital
radio receiver with RF-to-Bits front-end designed for SDR solutions.
@@ -1112,7 +1116,7 @@ config VIDEO_THS7303
config VIDEO_M52790
tristate "Mitsubishi M52790 A/V switch"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Mitsubishi M52790 A/V switch.
To compile this driver as a module, choose M here: the
@@ -1123,7 +1127,7 @@ config VIDEO_I2C
depends on VIDEO_V4L2 && I2C
select VIDEOBUF2_VMALLOC
imply HWMON
- ---help---
+ help
Enable the I2C transport video support which supports the
following:
* Panasonic AMG88xx Grid-Eye Sensors
@@ -1132,6 +1136,19 @@ config VIDEO_I2C
To compile this driver as a module, choose M here: the
module will be called video-i2c
+config VIDEO_ST_MIPID02
+ tristate "STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
+ help
+ Support for STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge.
+ It is used to allow usage of CSI-2 sensor with PARALLEL port
+ controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called st-mipid02.
+
endmenu
endif
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index a64fca82e0c4..d8ad9dad495d 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -113,5 +113,6 @@ obj-$(CONFIG_VIDEO_IMX258) += imx258.o
obj-$(CONFIG_VIDEO_IMX274) += imx274.o
obj-$(CONFIG_VIDEO_IMX319) += imx319.o
obj-$(CONFIG_VIDEO_IMX355) += imx355.o
+obj-$(CONFIG_VIDEO_ST_MIPID02) += st-mipid02.o
obj-$(CONFIG_SDR_MAX2175) += max2175.o
diff --git a/drivers/media/i2c/cx25840/Kconfig b/drivers/media/i2c/cx25840/Kconfig
index 451133ad41ff..f4b31d7cb440 100644
--- a/drivers/media/i2c/cx25840/Kconfig
+++ b/drivers/media/i2c/cx25840/Kconfig
@@ -1,7 +1,7 @@
config VIDEO_CX25840
tristate "Conexant CX2584x audio/video decoders"
depends on VIDEO_V4L2 && I2C
- ---help---
+ help
Support for the Conexant CX2584x audio/video decoders.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/i2c/et8ek8/Kconfig b/drivers/media/i2c/et8ek8/Kconfig
index 9fe409e95666..ab23b41bf353 100644
--- a/drivers/media/i2c/et8ek8/Kconfig
+++ b/drivers/media/i2c/et8ek8/Kconfig
@@ -2,6 +2,6 @@ config VIDEO_ET8EK8
tristate "ET8EK8 camera sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
- ---help---
+ help
This is a driver for the Toshiba ET8EK8 5 MP camera sensor.
It is used for example in Nokia N900 (RX-51).
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 9857e151db46..83e9961b0505 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -588,12 +588,10 @@ static int imx214_set_format(struct v4l2_subdev *sd,
__crop = __imx214_get_pad_crop(imx214, cfg, format->pad, format->which);
- if (format)
- mode = v4l2_find_nearest_size(imx214_modes,
- ARRAY_SIZE(imx214_modes), width, height,
- format->format.width, format->format.height);
- else
- mode = &imx214_modes[0];
+ mode = v4l2_find_nearest_size(imx214_modes,
+ ARRAY_SIZE(imx214_modes), width, height,
+ format->format.width,
+ format->format.height);
__crop->width = mode->width;
__crop->height = mode->height;
diff --git a/drivers/media/i2c/m5mols/Kconfig b/drivers/media/i2c/m5mols/Kconfig
index dc8c2505907e..be0bb3f1bc22 100644
--- a/drivers/media/i2c/m5mols/Kconfig
+++ b/drivers/media/i2c/m5mols/Kconfig
@@ -2,5 +2,5 @@ config VIDEO_M5MOLS
tristate "Fujitsu M-5MOLS 8MP sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This driver supports Fujitsu M-5MOLS camera sensor with ISP
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 799acce803fe..5ed2413eac8a 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1117,8 +1117,10 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
if (ov2659_formats[index].code == mf->code)
break;
- if (index < 0)
- return -EINVAL;
+ if (index < 0) {
+ index = 0;
+ mf->code = ov2659_formats[index].code;
+ }
mf->colorspace = V4L2_COLORSPACE_SRGB;
mf->field = V4L2_FIELD_NONE;
@@ -1130,7 +1132,7 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
#else
- return -ENOTTY;
+ ret = -ENOTTY;
#endif
} else {
s64 val;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index c33fd584cb44..1b972e591b48 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -804,15 +804,25 @@ static int ov6650_prog_dflt(struct i2c_client *client)
return ret;
}
-static int ov6650_video_probe(struct i2c_client *client)
+static int ov6650_video_probe(struct v4l2_subdev *sd)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
u8 pidh, pidl, midh, midl;
int ret;
- ret = ov6650_s_power(&priv->subdev, 1);
- if (ret < 0)
+ priv->clk = v4l2_clk_get(&client->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(&client->dev, "v4l2_clk request err: %d\n", ret);
return ret;
+ }
+
+ ret = ov6650_s_power(sd, 1);
+ if (ret < 0)
+ goto eclkput;
+
+ msleep(20);
/*
* check and show product ID and manufacturer ID
@@ -846,7 +856,12 @@ static int ov6650_video_probe(struct i2c_client *client)
ret = v4l2_ctrl_handler_setup(&priv->hdl);
done:
- ov6650_s_power(&priv->subdev, 0);
+ ov6650_s_power(sd, 0);
+ if (!ret)
+ return 0;
+eclkput:
+ v4l2_clk_put(priv->clk);
+
return ret;
}
@@ -929,6 +944,10 @@ static const struct v4l2_subdev_ops ov6650_subdev_ops = {
.pad = &ov6650_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov6650_internal_ops = {
+ .registered = ov6650_video_probe,
+};
+
/*
* i2c_driver function
*/
@@ -989,18 +1008,12 @@ static int ov6650_probe(struct i2c_client *client,
priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
priv->colorspace = V4L2_COLORSPACE_JPEG;
- priv->clk = v4l2_clk_get(&client->dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto eclkget;
- }
+ priv->subdev.internal_ops = &ov6650_internal_ops;
+ priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- ret = ov6650_video_probe(client);
- if (ret) {
- v4l2_clk_put(priv->clk);
-eclkget:
+ ret = v4l2_async_register_subdev(&priv->subdev);
+ if (ret)
v4l2_ctrl_handler_free(&priv->hdl);
- }
return ret;
}
@@ -1010,7 +1023,7 @@ static int ov6650_remove(struct i2c_client *client)
struct ov6650 *priv = to_ov6650(client);
v4l2_clk_put(priv->clk);
- v4l2_device_unregister_subdev(&priv->subdev);
+ v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
return 0;
}
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index a7d26b294eb5..44c3eed8a858 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -864,7 +864,15 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
/* Recalculate frame rate */
ov7675_get_framerate(sd, tpf);
- return ov7675_apply_framerate(sd);
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any changes to H/W at this time. Instead
+ * the framerate will be restored right after power-up.
+ */
+ if (info->on)
+ return ov7675_apply_framerate(sd);
+
+ return 0;
}
static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
@@ -895,7 +903,16 @@ static int ov7670_set_framerate_legacy(struct v4l2_subdev *sd,
info->clkrc = (info->clkrc & 0x80) | div;
tpf->numerator = 1;
tpf->denominator = info->clock_speed / div;
- return ov7670_write(sd, REG_CLKRC, info->clkrc);
+
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any changes to H/W at this time. Instead
+ * the framerate will be restored right after power-up.
+ */
+ if (info->on)
+ return ov7670_write(sd, REG_CLKRC, info->clkrc);
+
+ return 0;
}
/*
@@ -1105,9 +1122,13 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
if (ret)
return ret;
- ret = ov7670_apply_fmt(sd);
- if (ret)
- return ret;
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any changes to H/W at this time. Instead
+ * the frame format will be restored right after power-up.
+ */
+ if (info->on)
+ return ov7670_apply_fmt(sd);
return 0;
}
@@ -1664,6 +1685,7 @@ static int ov7670_s_power(struct v4l2_subdev *sd, int on)
if (on) {
ov7670_power_on (sd);
+ ov7670_init(sd, 0);
ov7670_apply_fmt(sd);
ov7675_apply_framerate(sd);
v4l2_ctrl_handler_setup(&info->hdl);
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index dfece91ce96b..54e80a60aa57 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -448,6 +448,27 @@ static int ov7740_get_gain(struct ov7740 *ov7740, struct v4l2_ctrl *ctrl)
return 0;
}
+static int ov7740_get_exp(struct ov7740 *ov7740, struct v4l2_ctrl *ctrl)
+{
+ struct regmap *regmap = ov7740->regmap;
+ unsigned int value0, value1;
+ int ret;
+
+ if (ctrl->val == V4L2_EXPOSURE_MANUAL)
+ return 0;
+
+ ret = regmap_read(regmap, REG_AEC, &value0);
+ if (ret)
+ return ret;
+ ret = regmap_read(regmap, REG_HAEC, &value1);
+ if (ret)
+ return ret;
+
+ ov7740->exposure->val = (value1 << 8) | (value0 & 0xff);
+
+ return 0;
+}
+
static int ov7740_set_exp(struct regmap *regmap, int value)
{
int ret;
@@ -494,6 +515,9 @@ static int ov7740_get_volatile_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_AUTOGAIN:
ret = ov7740_get_gain(ov7740, ctrl);
break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = ov7740_get_exp(ov7740, ctrl);
+ break;
default:
ret = -EINVAL;
break;
@@ -991,8 +1015,6 @@ static int ov7740_init_controls(struct ov7740 *ov7740)
ov7740->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov7740_ctrl_ops,
V4L2_CID_EXPOSURE, 0, 65535, 1, 500);
- if (ov7740->exposure)
- ov7740->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
ov7740->auto_exposure = v4l2_ctrl_new_std_menu(ctrl_hdlr,
&ov7740_ctrl_ops,
@@ -1003,7 +1025,7 @@ static int ov7740_init_controls(struct ov7740 *ov7740)
v4l2_ctrl_auto_cluster(3, &ov7740->auto_wb, 0, false);
v4l2_ctrl_auto_cluster(2, &ov7740->auto_gain, 0, true);
v4l2_ctrl_auto_cluster(2, &ov7740->auto_exposure,
- V4L2_EXPOSURE_MANUAL, false);
+ V4L2_EXPOSURE_MANUAL, true);
v4l2_ctrl_cluster(2, &ov7740->hflip);
if (ctrl_hdlr->error) {
diff --git a/drivers/media/i2c/smiapp/Kconfig b/drivers/media/i2c/smiapp/Kconfig
index f59718d8e51e..26b54f2aa95b 100644
--- a/drivers/media/i2c/smiapp/Kconfig
+++ b/drivers/media/i2c/smiapp/Kconfig
@@ -4,5 +4,5 @@ config VIDEO_SMIAPP
depends on MEDIA_CAMERA_SUPPORT
select VIDEO_SMIAPP_PLL
select V4L2_FWNODE
- ---help---
+ help
This is a generic driver for SMIA++/SMIA camera modules.
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
new file mode 100644
index 000000000000..9369f38dbf3d
--- /dev/null
+++ b/drivers/media/i2c/st-mipid02.c
@@ -0,0 +1,1033 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for ST MIPID02 CSI-2 to PARALLEL bridge
+ *
+ * Copyright (C) STMicroelectronics SA 2019
+ * Authors: Mickael Guene <mickael.guene@st.com>
+ * for STMicroelectronics.
+ *
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define MIPID02_CLK_LANE_WR_REG1 0x01
+#define MIPID02_CLK_LANE_REG1 0x02
+#define MIPID02_CLK_LANE_REG3 0x04
+#define MIPID02_DATA_LANE0_REG1 0x05
+#define MIPID02_DATA_LANE0_REG2 0x06
+#define MIPID02_DATA_LANE1_REG1 0x09
+#define MIPID02_DATA_LANE1_REG2 0x0a
+#define MIPID02_MODE_REG1 0x14
+#define MIPID02_MODE_REG2 0x15
+#define MIPID02_DATA_ID_RREG 0x17
+#define MIPID02_DATA_SELECTION_CTRL 0x19
+#define MIPID02_PIX_WIDTH_CTRL 0x1e
+#define MIPID02_PIX_WIDTH_CTRL_EMB 0x1f
+
+/* Bits definition for MIPID02_CLK_LANE_REG1 */
+#define CLK_ENABLE BIT(0)
+/* Bits definition for MIPID02_CLK_LANE_REG3 */
+#define CLK_MIPI_CSI BIT(1)
+/* Bits definition for MIPID02_DATA_LANE0_REG1 */
+#define DATA_ENABLE BIT(0)
+/* Bits definition for MIPID02_DATA_LANEx_REG2 */
+#define DATA_MIPI_CSI BIT(0)
+/* Bits definition for MIPID02_MODE_REG1 */
+#define MODE_DATA_SWAP BIT(2)
+#define MODE_NO_BYPASS BIT(6)
+/* Bits definition for MIPID02_MODE_REG2 */
+#define MODE_HSYNC_ACTIVE_HIGH BIT(1)
+#define MODE_VSYNC_ACTIVE_HIGH BIT(2)
+/* Bits definition for MIPID02_DATA_SELECTION_CTRL */
+#define SELECTION_MANUAL_DATA BIT(2)
+#define SELECTION_MANUAL_WIDTH BIT(3)
+
+static const u32 mipid02_supported_fmt_codes[] = {
+ MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_BGR888_1X24
+};
+
+/* regulator supplies */
+static const char * const mipid02_supply_name[] = {
+ "VDDE", /* 1.8V digital I/O supply */
+ "VDDIN", /* 1V8 voltage regulator supply */
+};
+
+#define MIPID02_NUM_SUPPLIES ARRAY_SIZE(mipid02_supply_name)
+
+#define MIPID02_SINK_0 0
+#define MIPID02_SINK_1 1
+#define MIPID02_SOURCE 2
+#define MIPID02_PAD_NB 3
+
+struct mipid02_dev {
+ struct i2c_client *i2c_client;
+ struct regulator_bulk_data supplies[MIPID02_NUM_SUPPLIES];
+ struct v4l2_subdev sd;
+ struct media_pad pad[MIPID02_PAD_NB];
+ struct clk *xclk;
+ struct gpio_desc *reset_gpio;
+ /* endpoints info */
+ struct v4l2_fwnode_endpoint rx;
+ u64 link_frequency;
+ struct v4l2_fwnode_endpoint tx;
+ /* remote source */
+ struct v4l2_async_subdev asd;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *s_subdev;
+ /* registers */
+ struct {
+ u8 clk_lane_reg1;
+ u8 data_lane0_reg1;
+ u8 data_lane1_reg1;
+ u8 mode_reg1;
+ u8 mode_reg2;
+ u8 data_id_rreg;
+ u8 pix_width_ctrl;
+ u8 pix_width_ctrl_emb;
+ } r;
+ /* lock to protect all members below */
+ struct mutex lock;
+ bool streaming;
+ struct v4l2_mbus_framefmt fmt;
+};
+
+static int bpp_from_code(__u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return 8;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ return 10;
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ return 12;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ return 16;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ return 24;
+ default:
+ return 0;
+ }
+}
+
+static u8 data_type_from_code(__u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return 0x2a;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ return 0x2b;
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ return 0x2c;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ return 0x1e;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ return 0x24;
+ default:
+ return 0;
+ }
+}
+
+static void init_format(struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = MEDIA_BUS_FMT_SBGGR8_1X8;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(V4L2_COLORSPACE_SRGB);
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(V4L2_COLORSPACE_SRGB);
+ fmt->width = 640;
+ fmt->height = 480;
+}
+
+static __u32 get_fmt_code(__u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mipid02_supported_fmt_codes); i++) {
+ if (code == mipid02_supported_fmt_codes[i])
+ return code;
+ }
+
+ return mipid02_supported_fmt_codes[0];
+}
+
+static __u32 serial_to_parallel_code(__u32 serial)
+{
+ if (serial == MEDIA_BUS_FMT_UYVY8_1X16)
+ return MEDIA_BUS_FMT_UYVY8_2X8;
+ if (serial == MEDIA_BUS_FMT_BGR888_1X24)
+ return MEDIA_BUS_FMT_BGR888_3X8;
+
+ return serial;
+}
+
+static inline struct mipid02_dev *to_mipid02_dev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mipid02_dev, sd);
+}
+
+static int mipid02_read_reg(struct mipid02_dev *bridge, u16 reg, u8 *val)
+{
+ struct i2c_client *client = bridge->i2c_client;
+ struct i2c_msg msg[2];
+ u8 buf[2];
+ int ret;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags;
+ msg[0].buf = buf;
+ msg[0].len = sizeof(buf);
+
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags | I2C_M_RD;
+ msg[1].buf = val;
+ msg[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_dbg(&client->dev, "%s: %x i2c_transfer, reg: %x => %d\n",
+ __func__, client->addr, reg, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mipid02_write_reg(struct mipid02_dev *bridge, u16 reg, u8 val)
+{
+ struct i2c_client *client = bridge->i2c_client;
+ struct i2c_msg msg;
+ u8 buf[3];
+ int ret;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+ buf[2] = val;
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.buf = buf;
+ msg.len = sizeof(buf);
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_dbg(&client->dev, "%s: i2c_transfer, reg: %x => %d\n",
+ __func__, reg, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mipid02_get_regulators(struct mipid02_dev *bridge)
+{
+ unsigned int i;
+
+ for (i = 0; i < MIPID02_NUM_SUPPLIES; i++)
+ bridge->supplies[i].supply = mipid02_supply_name[i];
+
+ return devm_regulator_bulk_get(&bridge->i2c_client->dev,
+ MIPID02_NUM_SUPPLIES,
+ bridge->supplies);
+}
+
+static void mipid02_apply_reset(struct mipid02_dev *bridge)
+{
+ gpiod_set_value_cansleep(bridge->reset_gpio, 0);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(bridge->reset_gpio, 1);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(bridge->reset_gpio, 0);
+ usleep_range(5000, 10000);
+}
+
+static int mipid02_set_power_on(struct mipid02_dev *bridge)
+{
+ struct i2c_client *client = bridge->i2c_client;
+ int ret;
+
+ ret = clk_prepare_enable(bridge->xclk);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to enable clock\n", __func__);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(MIPID02_NUM_SUPPLIES,
+ bridge->supplies);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to enable regulators\n",
+ __func__);
+ goto xclk_off;
+ }
+
+ if (bridge->reset_gpio) {
+ dev_dbg(&client->dev, "apply reset");
+ mipid02_apply_reset(bridge);
+ } else {
+ dev_dbg(&client->dev, "don't apply reset");
+ usleep_range(5000, 10000);
+ }
+
+ return 0;
+
+xclk_off:
+ clk_disable_unprepare(bridge->xclk);
+ return ret;
+}
+
+static void mipid02_set_power_off(struct mipid02_dev *bridge)
+{
+ regulator_bulk_disable(MIPID02_NUM_SUPPLIES, bridge->supplies);
+ clk_disable_unprepare(bridge->xclk);
+}
+
+static int mipid02_detect(struct mipid02_dev *bridge)
+{
+ u8 reg;
+
+ /*
+ * There is no version registers. Just try to read register
+ * MIPID02_CLK_LANE_WR_REG1.
+ */
+ return mipid02_read_reg(bridge, MIPID02_CLK_LANE_WR_REG1, &reg);
+}
+
+static u32 mipid02_get_link_freq_from_cid_pixel_rate(struct mipid02_dev *bridge,
+ struct v4l2_subdev *subdev)
+{
+ struct v4l2_fwnode_endpoint *ep = &bridge->rx;
+ struct v4l2_ctrl *ctrl;
+ u32 pixel_clock;
+ u32 bpp = bpp_from_code(bridge->fmt.code);
+
+ ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl)
+ return 0;
+ pixel_clock = v4l2_ctrl_g_ctrl_int64(ctrl);
+
+ return pixel_clock * bpp / (2 * ep->bus.mipi_csi2.num_data_lanes);
+}
+
+/*
+ * We need to know link frequency to setup clk_lane_reg1 timings. Link frequency
+ * will be computed using connected device V4L2_CID_PIXEL_RATE, bit per pixel
+ * and number of lanes.
+ */
+static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge)
+{
+ struct i2c_client *client = bridge->i2c_client;
+ struct v4l2_subdev *subdev = bridge->s_subdev;
+ u32 link_freq;
+
+ link_freq = mipid02_get_link_freq_from_cid_pixel_rate(bridge, subdev);
+ if (!link_freq) {
+ dev_err(&client->dev, "Failed to detect link frequency");
+ return -EINVAL;
+ }
+
+ dev_dbg(&client->dev, "detect link_freq = %d Hz", link_freq);
+ bridge->r.clk_lane_reg1 |= (2000000000 / link_freq) << 2;
+
+ return 0;
+}
+
+static int mipid02_configure_clk_lane(struct mipid02_dev *bridge)
+{
+ struct i2c_client *client = bridge->i2c_client;
+ struct v4l2_fwnode_endpoint *ep = &bridge->rx;
+ bool *polarities = ep->bus.mipi_csi2.lane_polarities;
+
+ /* midid02 doesn't support clock lane remapping */
+ if (ep->bus.mipi_csi2.clock_lane != 0) {
+ dev_err(&client->dev, "clk lane must be map to lane 0\n");
+ return -EINVAL;
+ }
+ bridge->r.clk_lane_reg1 |= (polarities[0] << 1) | CLK_ENABLE;
+
+ return 0;
+}
+
+static int mipid02_configure_data0_lane(struct mipid02_dev *bridge, int nb,
+ bool are_lanes_swap, bool *polarities)
+{
+ bool are_pin_swap = are_lanes_swap ? polarities[2] : polarities[1];
+
+ if (nb == 1 && are_lanes_swap)
+ return 0;
+
+ /*
+ * data lane 0 as pin swap polarity reversed compared to clock and
+ * data lane 1
+ */
+ if (!are_pin_swap)
+ bridge->r.data_lane0_reg1 = 1 << 1;
+ bridge->r.data_lane0_reg1 |= DATA_ENABLE;
+
+ return 0;
+}
+
+static int mipid02_configure_data1_lane(struct mipid02_dev *bridge, int nb,
+ bool are_lanes_swap, bool *polarities)
+{
+ bool are_pin_swap = are_lanes_swap ? polarities[1] : polarities[2];
+
+ if (nb == 1 && !are_lanes_swap)
+ return 0;
+
+ if (are_pin_swap)
+ bridge->r.data_lane1_reg1 = 1 << 1;
+ bridge->r.data_lane1_reg1 |= DATA_ENABLE;
+
+ return 0;
+}
+
+static int mipid02_configure_from_rx(struct mipid02_dev *bridge)
+{
+ struct v4l2_fwnode_endpoint *ep = &bridge->rx;
+ bool are_lanes_swap = ep->bus.mipi_csi2.data_lanes[0] == 2;
+ bool *polarities = ep->bus.mipi_csi2.lane_polarities;
+ int nb = ep->bus.mipi_csi2.num_data_lanes;
+ int ret;
+
+ ret = mipid02_configure_clk_lane(bridge);
+ if (ret)
+ return ret;
+
+ ret = mipid02_configure_data0_lane(bridge, nb, are_lanes_swap,
+ polarities);
+ if (ret)
+ return ret;
+
+ ret = mipid02_configure_data1_lane(bridge, nb, are_lanes_swap,
+ polarities);
+ if (ret)
+ return ret;
+
+ bridge->r.mode_reg1 |= are_lanes_swap ? MODE_DATA_SWAP : 0;
+ bridge->r.mode_reg1 |= (nb - 1) << 1;
+
+ return mipid02_configure_from_rx_speed(bridge);
+}
+
+static int mipid02_configure_from_tx(struct mipid02_dev *bridge)
+{
+ struct v4l2_fwnode_endpoint *ep = &bridge->tx;
+
+ bridge->r.pix_width_ctrl = ep->bus.parallel.bus_width;
+ bridge->r.pix_width_ctrl_emb = ep->bus.parallel.bus_width;
+ if (ep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ bridge->r.mode_reg2 |= MODE_HSYNC_ACTIVE_HIGH;
+ if (ep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ bridge->r.mode_reg2 |= MODE_VSYNC_ACTIVE_HIGH;
+
+ return 0;
+}
+
+static int mipid02_configure_from_code(struct mipid02_dev *bridge)
+{
+ u8 data_type;
+
+ bridge->r.data_id_rreg = 0;
+ data_type = data_type_from_code(bridge->fmt.code);
+ if (!data_type)
+ return -EINVAL;
+ bridge->r.data_id_rreg = data_type;
+
+ return 0;
+}
+
+static int mipid02_stream_disable(struct mipid02_dev *bridge)
+{
+ struct i2c_client *client = bridge->i2c_client;
+ int ret;
+
+ /* Disable all lanes */
+ ret = mipid02_write_reg(bridge, MIPID02_CLK_LANE_REG1, 0);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE0_REG1, 0);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE1_REG1, 0);
+ if (ret)
+ goto error;
+error:
+ if (ret)
+ dev_err(&client->dev, "failed to stream off %d", ret);
+
+ return ret;
+}
+
+static int mipid02_stream_enable(struct mipid02_dev *bridge)
+{
+ struct i2c_client *client = bridge->i2c_client;
+ int ret = -EINVAL;
+
+ if (!bridge->s_subdev)
+ goto error;
+
+ memset(&bridge->r, 0, sizeof(bridge->r));
+ /* build registers content */
+ ret = mipid02_configure_from_rx(bridge);
+ if (ret)
+ goto error;
+ ret = mipid02_configure_from_tx(bridge);
+ if (ret)
+ goto error;
+ ret = mipid02_configure_from_code(bridge);
+ if (ret)
+ goto error;
+
+ /* write mipi registers */
+ ret = mipid02_write_reg(bridge, MIPID02_CLK_LANE_REG1,
+ bridge->r.clk_lane_reg1);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_CLK_LANE_REG3, CLK_MIPI_CSI);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE0_REG1,
+ bridge->r.data_lane0_reg1);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE0_REG2,
+ DATA_MIPI_CSI);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE1_REG1,
+ bridge->r.data_lane1_reg1);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE1_REG2,
+ DATA_MIPI_CSI);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_MODE_REG1,
+ MODE_NO_BYPASS | bridge->r.mode_reg1);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_MODE_REG2,
+ bridge->r.mode_reg2);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_DATA_ID_RREG,
+ bridge->r.data_id_rreg);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_DATA_SELECTION_CTRL,
+ SELECTION_MANUAL_DATA | SELECTION_MANUAL_WIDTH);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_PIX_WIDTH_CTRL,
+ bridge->r.pix_width_ctrl);
+ if (ret)
+ goto error;
+ ret = mipid02_write_reg(bridge, MIPID02_PIX_WIDTH_CTRL_EMB,
+ bridge->r.pix_width_ctrl_emb);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ dev_err(&client->dev, "failed to stream on %d", ret);
+ mipid02_stream_disable(bridge);
+
+ return ret;
+}
+
+static int mipid02_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct mipid02_dev *bridge = to_mipid02_dev(sd);
+ struct i2c_client *client = bridge->i2c_client;
+ int ret = 0;
+
+ dev_dbg(&client->dev, "%s : requested %d / current = %d", __func__,
+ enable, bridge->streaming);
+ mutex_lock(&bridge->lock);
+
+ if (bridge->streaming == enable)
+ goto out;
+
+ ret = enable ? mipid02_stream_enable(bridge) :
+ mipid02_stream_disable(bridge);
+ if (!ret)
+ bridge->streaming = enable;
+
+out:
+ dev_dbg(&client->dev, "%s current now = %d / %d", __func__,
+ bridge->streaming, ret);
+ mutex_unlock(&bridge->lock);
+
+ return ret;
+}
+
+static int mipid02_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct mipid02_dev *bridge = to_mipid02_dev(sd);
+ int ret = 0;
+
+ switch (code->pad) {
+ case MIPID02_SINK_0:
+ if (code->index >= ARRAY_SIZE(mipid02_supported_fmt_codes))
+ ret = -EINVAL;
+ else
+ code->code = mipid02_supported_fmt_codes[code->index];
+ break;
+ case MIPID02_SOURCE:
+ if (code->index == 0)
+ code->code = serial_to_parallel_code(bridge->fmt.code);
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mipid02_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
+ struct mipid02_dev *bridge = to_mipid02_dev(sd);
+ struct i2c_client *client = bridge->i2c_client;
+ struct v4l2_mbus_framefmt *fmt;
+
+ dev_dbg(&client->dev, "%s probe %d", __func__, format->pad);
+
+ if (format->pad >= MIPID02_PAD_NB)
+ return -EINVAL;
+ /* second CSI-2 pad not yet supported */
+ if (format->pad == MIPID02_SINK_1)
+ return -EINVAL;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt = v4l2_subdev_get_try_format(&bridge->sd, cfg, format->pad);
+ else
+ fmt = &bridge->fmt;
+
+ mutex_lock(&bridge->lock);
+
+ *mbus_fmt = *fmt;
+ /* code may need to be converted for source */
+ if (format->pad == MIPID02_SOURCE)
+ mbus_fmt->code = serial_to_parallel_code(mbus_fmt->code);
+
+ mutex_unlock(&bridge->lock);
+
+ return 0;
+}
+
+static void mipid02_set_fmt_source(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct mipid02_dev *bridge = to_mipid02_dev(sd);
+
+ /* source pad mirror active sink pad */
+ format->format = bridge->fmt;
+ /* but code may need to be converted */
+ format->format.code = serial_to_parallel_code(format->format.code);
+
+ /* only apply format for V4L2_SUBDEV_FORMAT_TRY case */
+ if (format->which != V4L2_SUBDEV_FORMAT_TRY)
+ return;
+
+ *v4l2_subdev_get_try_format(sd, cfg, format->pad) = format->format;
+}
+
+static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct mipid02_dev *bridge = to_mipid02_dev(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ format->format.code = get_fmt_code(format->format.code);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ else
+ fmt = &bridge->fmt;
+
+ *fmt = format->format;
+}
+
+static int mipid02_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct mipid02_dev *bridge = to_mipid02_dev(sd);
+ struct i2c_client *client = bridge->i2c_client;
+ int ret = 0;
+
+ dev_dbg(&client->dev, "%s for %d", __func__, format->pad);
+
+ if (format->pad >= MIPID02_PAD_NB)
+ return -EINVAL;
+ /* second CSI-2 pad not yet supported */
+ if (format->pad == MIPID02_SINK_1)
+ return -EINVAL;
+
+ mutex_lock(&bridge->lock);
+
+ if (bridge->streaming) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ if (format->pad == MIPID02_SOURCE)
+ mipid02_set_fmt_source(sd, cfg, format);
+ else
+ mipid02_set_fmt_sink(sd, cfg, format);
+
+error:
+ mutex_unlock(&bridge->lock);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops mipid02_video_ops = {
+ .s_stream = mipid02_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops mipid02_pad_ops = {
+ .enum_mbus_code = mipid02_enum_mbus_code,
+ .get_fmt = mipid02_get_fmt,
+ .set_fmt = mipid02_set_fmt,
+};
+
+static const struct v4l2_subdev_ops mipid02_subdev_ops = {
+ .video = &mipid02_video_ops,
+ .pad = &mipid02_pad_ops,
+};
+
+static const struct media_entity_operations mipid02_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int mipid02_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *s_subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct mipid02_dev *bridge = to_mipid02_dev(notifier->sd);
+ struct i2c_client *client = bridge->i2c_client;
+ int source_pad;
+ int ret;
+
+ dev_dbg(&client->dev, "sensor_async_bound call %p", s_subdev);
+
+ source_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
+ s_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (source_pad < 0) {
+ dev_err(&client->dev, "Couldn't find output pad for subdev %s\n",
+ s_subdev->name);
+ return source_pad;
+ }
+
+ ret = media_create_pad_link(&s_subdev->entity, source_pad,
+ &bridge->sd.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(&client->dev, "Couldn't create media link %d", ret);
+ return ret;
+ }
+
+ bridge->s_subdev = s_subdev;
+
+ return 0;
+}
+
+static void mipid02_async_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *s_subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct mipid02_dev *bridge = to_mipid02_dev(notifier->sd);
+
+ bridge->s_subdev = NULL;
+}
+
+static const struct v4l2_async_notifier_operations mipid02_notifier_ops = {
+ .bound = mipid02_async_bound,
+ .unbind = mipid02_async_unbind,
+};
+
+static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
+{
+ struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
+ struct i2c_client *client = bridge->i2c_client;
+ struct device_node *ep_node;
+ int ret;
+
+ /* parse rx (endpoint 0) */
+ ep_node = of_graph_get_endpoint_by_regs(bridge->i2c_client->dev.of_node,
+ 0, 0);
+ if (!ep_node) {
+ dev_err(&client->dev, "unable to find port0 ep");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), &ep);
+ if (ret) {
+ dev_err(&client->dev, "Could not parse v4l2 endpoint %d\n",
+ ret);
+ goto error_of_node_put;
+ }
+
+ /* do some sanity checks */
+ if (ep.bus.mipi_csi2.num_data_lanes > 2) {
+ dev_err(&client->dev, "max supported data lanes is 2 / got %d",
+ ep.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto error_of_node_put;
+ }
+
+ /* register it for later use */
+ bridge->rx = ep;
+
+ /* register async notifier so we get noticed when sensor is connected */
+ bridge->asd.match.fwnode =
+ fwnode_graph_get_remote_port_parent(of_fwnode_handle(ep_node));
+ bridge->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ of_node_put(ep_node);
+
+ v4l2_async_notifier_init(&bridge->notifier);
+ ret = v4l2_async_notifier_add_subdev(&bridge->notifier, &bridge->asd);
+ if (ret) {
+ dev_err(&client->dev, "fail to register asd to notifier %d",
+ ret);
+ fwnode_handle_put(bridge->asd.match.fwnode);
+ return ret;
+ }
+ bridge->notifier.ops = &mipid02_notifier_ops;
+
+ ret = v4l2_async_subdev_notifier_register(&bridge->sd,
+ &bridge->notifier);
+ if (ret)
+ v4l2_async_notifier_cleanup(&bridge->notifier);
+
+ return ret;
+
+error_of_node_put:
+ of_node_put(ep_node);
+error:
+
+ return ret;
+}
+
+static int mipid02_parse_tx_ep(struct mipid02_dev *bridge)
+{
+ struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_PARALLEL };
+ struct i2c_client *client = bridge->i2c_client;
+ struct device_node *ep_node;
+ int ret;
+
+ /* parse tx (endpoint 2) */
+ ep_node = of_graph_get_endpoint_by_regs(bridge->i2c_client->dev.of_node,
+ 2, 0);
+ if (!ep_node) {
+ dev_err(&client->dev, "unable to find port1 ep");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), &ep);
+ if (ret) {
+ dev_err(&client->dev, "Could not parse v4l2 endpoint\n");
+ goto error_of_node_put;
+ }
+
+ of_node_put(ep_node);
+ bridge->tx = ep;
+
+ return 0;
+
+error_of_node_put:
+ of_node_put(ep_node);
+error:
+
+ return -EINVAL;
+}
+
+static int mipid02_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct mipid02_dev *bridge;
+ u32 clk_freq;
+ int ret;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ init_format(&bridge->fmt);
+
+ bridge->i2c_client = client;
+ v4l2_i2c_subdev_init(&bridge->sd, client, &mipid02_subdev_ops);
+
+ /* got and check clock */
+ bridge->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(bridge->xclk)) {
+ dev_err(dev, "failed to get xclk\n");
+ return PTR_ERR(bridge->xclk);
+ }
+
+ clk_freq = clk_get_rate(bridge->xclk);
+ if (clk_freq < 6000000 || clk_freq > 27000000) {
+ dev_err(dev, "xclk freq must be in 6-27 Mhz range. got %d Hz\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ bridge->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+
+ ret = mipid02_get_regulators(bridge);
+ if (ret) {
+ dev_err(dev, "failed to get regulators %d", ret);
+ return ret;
+ }
+
+ mutex_init(&bridge->lock);
+ bridge->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ bridge->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ bridge->sd.entity.ops = &mipid02_subdev_entity_ops;
+ bridge->pad[0].flags = MEDIA_PAD_FL_SINK;
+ bridge->pad[1].flags = MEDIA_PAD_FL_SINK;
+ bridge->pad[2].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&bridge->sd.entity, MIPID02_PAD_NB,
+ bridge->pad);
+ if (ret) {
+ dev_err(&client->dev, "pads init failed %d", ret);
+ goto mutex_cleanup;
+ }
+
+ /* enable clock, power and reset device if available */
+ ret = mipid02_set_power_on(bridge);
+ if (ret)
+ goto entity_cleanup;
+
+ ret = mipid02_detect(bridge);
+ if (ret) {
+ dev_err(&client->dev, "failed to detect mipid02 %d", ret);
+ goto power_off;
+ }
+
+ ret = mipid02_parse_tx_ep(bridge);
+ if (ret) {
+ dev_err(&client->dev, "failed to parse tx %d", ret);
+ goto power_off;
+ }
+
+ ret = mipid02_parse_rx_ep(bridge);
+ if (ret) {
+ dev_err(&client->dev, "failed to parse rx %d", ret);
+ goto power_off;
+ }
+
+ ret = v4l2_async_register_subdev(&bridge->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "v4l2_async_register_subdev failed %d",
+ ret);
+ goto unregister_notifier;
+ }
+
+ dev_info(&client->dev, "mipid02 device probe successfully");
+
+ return 0;
+
+unregister_notifier:
+ v4l2_async_notifier_unregister(&bridge->notifier);
+ v4l2_async_notifier_cleanup(&bridge->notifier);
+power_off:
+ mipid02_set_power_off(bridge);
+entity_cleanup:
+ media_entity_cleanup(&bridge->sd.entity);
+mutex_cleanup:
+ mutex_destroy(&bridge->lock);
+
+ return ret;
+}
+
+static int mipid02_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct mipid02_dev *bridge = to_mipid02_dev(sd);
+
+ v4l2_async_notifier_unregister(&bridge->notifier);
+ v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_unregister_subdev(&bridge->sd);
+ mipid02_set_power_off(bridge);
+ media_entity_cleanup(&bridge->sd.entity);
+ mutex_destroy(&bridge->lock);
+
+ return 0;
+}
+
+static const struct of_device_id mipid02_dt_ids[] = {
+ { .compatible = "st,st-mipid02" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mipid02_dt_ids);
+
+static struct i2c_driver mipid02_i2c_driver = {
+ .driver = {
+ .name = "st-mipid02",
+ .of_match_table = mipid02_dt_ids,
+ },
+ .probe_new = mipid02_probe,
+ .remove = mipid02_remove,
+};
+
+module_i2c_driver(mipid02_i2c_driver);
+
+MODULE_AUTHOR("Mickael Guene <mickael.guene@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics MIPID02 CSI-2 bridge driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/media-dev-allocator.c b/drivers/media/media-dev-allocator.c
new file mode 100644
index 000000000000..ae17887dec59
--- /dev/null
+++ b/drivers/media/media-dev-allocator.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * media-dev-allocator.c - Media Controller Device Allocator API
+ *
+ * Copyright (c) 2019 Shuah Khan <shuah@kernel.org>
+ *
+ * Credits: Suggested by Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+/*
+ * This file adds a global refcounted Media Controller Device Instance API.
+ * A system wide global media device list is managed and each media device
+ * includes a kref count. The last put on the media device releases the media
+ * device instance.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <media/media-device.h>
+#include <media/media-dev-allocator.h>
+
+static LIST_HEAD(media_device_list);
+static DEFINE_MUTEX(media_device_lock);
+
+struct media_device_instance {
+ struct media_device mdev;
+ struct module *owner;
+ struct list_head list;
+ struct kref refcount;
+};
+
+static inline struct media_device_instance *
+to_media_device_instance(struct media_device *mdev)
+{
+ return container_of(mdev, struct media_device_instance, mdev);
+}
+
+static void media_device_instance_release(struct kref *kref)
+{
+ struct media_device_instance *mdi =
+ container_of(kref, struct media_device_instance, refcount);
+
+ dev_dbg(mdi->mdev.dev, "%s: releasing Media Device\n", __func__);
+
+ mutex_lock(&media_device_lock);
+
+ media_device_unregister(&mdi->mdev);
+ media_device_cleanup(&mdi->mdev);
+
+ list_del(&mdi->list);
+ mutex_unlock(&media_device_lock);
+
+ kfree(mdi);
+}
+
+/* Callers should hold media_device_lock when calling this function */
+static struct media_device *__media_device_get(struct device *dev,
+ const char *module_name,
+ struct module *owner)
+{
+ struct media_device_instance *mdi;
+
+ list_for_each_entry(mdi, &media_device_list, list) {
+ if (mdi->mdev.dev != dev)
+ continue;
+
+ kref_get(&mdi->refcount);
+
+ /* get module reference for the media_device owner */
+ if (owner != mdi->owner && !try_module_get(mdi->owner))
+ dev_err(dev,
+ "%s: module %s get owner reference error\n",
+ __func__, module_name);
+ else
+ dev_dbg(dev, "%s: module %s got owner reference\n",
+ __func__, module_name);
+ return &mdi->mdev;
+ }
+
+ mdi = kzalloc(sizeof(*mdi), GFP_KERNEL);
+ if (!mdi)
+ return NULL;
+
+ mdi->owner = owner;
+ kref_init(&mdi->refcount);
+ list_add_tail(&mdi->list, &media_device_list);
+
+ dev_dbg(dev, "%s: Allocated media device for owner %s\n",
+ __func__, module_name);
+ return &mdi->mdev;
+}
+
+struct media_device *media_device_usb_allocate(struct usb_device *udev,
+ const char *module_name,
+ struct module *owner)
+{
+ struct media_device *mdev;
+
+ mutex_lock(&media_device_lock);
+ mdev = __media_device_get(&udev->dev, module_name, owner);
+ if (!mdev) {
+ mutex_unlock(&media_device_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* check if media device is already initialized */
+ if (!mdev->dev)
+ __media_device_usb_init(mdev, udev, udev->product,
+ module_name);
+ mutex_unlock(&media_device_lock);
+ return mdev;
+}
+EXPORT_SYMBOL_GPL(media_device_usb_allocate);
+
+void media_device_delete(struct media_device *mdev, const char *module_name,
+ struct module *owner)
+{
+ struct media_device_instance *mdi = to_media_device_instance(mdev);
+
+ mutex_lock(&media_device_lock);
+ /* put module reference for the media_device owner */
+ if (mdi->owner != owner) {
+ module_put(mdi->owner);
+ dev_dbg(mdi->mdev.dev,
+ "%s: module %s put owner module reference\n",
+ __func__, module_name);
+ }
+ mutex_unlock(&media_device_lock);
+ kref_put(&mdi->refcount, media_device_instance_release);
+}
+EXPORT_SYMBOL_GPL(media_device_delete);
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index 6b87a721dc49..d5aa30eeff4a 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -251,6 +251,7 @@ int __must_check media_devnode_register(struct media_device *mdev,
/* Part 2: Initialize the character device */
cdev_init(&devnode->cdev, &media_devnode_fops);
devnode->cdev.owner = owner;
+ kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor);
/* Part 3: Add the media and char device */
ret = cdev_device_add(&devnode->cdev, &devnode->dev);
@@ -290,8 +291,9 @@ void media_devnode_unregister(struct media_devnode *devnode)
mutex_lock(&media_devnode_lock);
/* Delete the cdev on this minor as well */
cdev_device_del(&devnode->cdev, &devnode->dev);
- mutex_unlock(&media_devnode_lock);
devnode->media_dev = NULL;
+ mutex_unlock(&media_devnode_lock);
+
put_device(&devnode->dev);
}
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 0b1cb3559140..a998a2e0ea1d 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -17,7 +17,6 @@
*/
#include <linux/bitmap.h>
-#include <linux/module.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <media/media-entity.h>
@@ -436,7 +435,10 @@ __must_check int __media_pipeline_start(struct media_entity *entity,
entity->stream_count++;
- if (WARN_ON(entity->pipe && entity->pipe != pipe)) {
+ if (entity->pipe && entity->pipe != pipe) {
+ pr_err("Pipe active for %s. Can't start for %s\n",
+ entity->name,
+ entity_err->name);
ret = -EBUSY;
goto error;
}
@@ -588,33 +590,6 @@ void media_pipeline_stop(struct media_entity *entity)
EXPORT_SYMBOL_GPL(media_pipeline_stop);
/* -----------------------------------------------------------------------------
- * Module use count
- */
-
-struct media_entity *media_entity_get(struct media_entity *entity)
-{
- if (entity == NULL)
- return NULL;
-
- if (entity->graph_obj.mdev->dev &&
- !try_module_get(entity->graph_obj.mdev->dev->driver->owner))
- return NULL;
-
- return entity;
-}
-EXPORT_SYMBOL_GPL(media_entity_get);
-
-void media_entity_put(struct media_entity *entity)
-{
- if (entity == NULL)
- return;
-
- if (entity->graph_obj.mdev->dev)
- module_put(entity->graph_obj.mdev->dev->driver->owner);
-}
-EXPORT_SYMBOL_GPL(media_entity_put);
-
-/* -----------------------------------------------------------------------------
* Links management
*/
diff --git a/drivers/media/media-request.c b/drivers/media/media-request.c
index eec2e2b2f6ec..e3fca436c75b 100644
--- a/drivers/media/media-request.c
+++ b/drivers/media/media-request.c
@@ -246,38 +246,38 @@ static const struct file_operations request_fops = {
struct media_request *
media_request_get_by_fd(struct media_device *mdev, int request_fd)
{
- struct file *filp;
+ struct fd f;
struct media_request *req;
if (!mdev || !mdev->ops ||
!mdev->ops->req_validate || !mdev->ops->req_queue)
- return ERR_PTR(-EACCES);
+ return ERR_PTR(-EBADR);
- filp = fget(request_fd);
- if (!filp)
+ f = fdget(request_fd);
+ if (!f.file)
goto err_no_req_fd;
- if (filp->f_op != &request_fops)
+ if (f.file->f_op != &request_fops)
goto err_fput;
- req = filp->private_data;
+ req = f.file->private_data;
if (req->mdev != mdev)
goto err_fput;
/*
* Note: as long as someone has an open filehandle of the request,
- * the request can never be released. The fget() above ensures that
+ * the request can never be released. The fdget() above ensures that
* even if userspace closes the request filehandle, the release()
* fop won't be called, so the media_request_get() always succeeds
* and there is no race condition where the request was released
* before media_request_get() is called.
*/
media_request_get(req);
- fput(filp);
+ fdput(f);
return req;
err_fput:
- fput(filp);
+ fdput(f);
err_no_req_fd:
dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
@@ -407,7 +407,7 @@ int media_request_object_bind(struct media_request *req,
int ret = -EBUSY;
if (WARN_ON(!ops->release))
- return -EACCES;
+ return -EBADR;
spin_lock_irqsave(&req->lock, flags);
diff --git a/drivers/media/mmc/siano/Kconfig b/drivers/media/mmc/siano/Kconfig
index 7693487e2f63..3941ee8352bb 100644
--- a/drivers/media/mmc/siano/Kconfig
+++ b/drivers/media/mmc/siano/Kconfig
@@ -9,5 +9,5 @@ config SMS_SDIO_DRV
depends on !RC_CORE || RC_CORE
select MEDIA_COMMON_OPTIONS
select SMS_SIANO_MDTV
- ---help---
+ help
Choose if you would like to have Siano's support for SDIO interface
diff --git a/drivers/media/pci/bt8xx/Kconfig b/drivers/media/pci/bt8xx/Kconfig
index bc89e37608cd..0f46db7d5ffc 100644
--- a/drivers/media/pci/bt8xx/Kconfig
+++ b/drivers/media/pci/bt8xx/Kconfig
@@ -13,7 +13,7 @@ config VIDEO_BT848
select VIDEO_SAA6588 if MEDIA_SUBDRV_AUTOSELECT
select RADIO_ADAPTERS
select RADIO_TEA575X
- ---help---
+ help
Support for BT848 based frame grabber/overlay boards. This includes
the Miro, Hauppauge and STB boards. Please read the material in
<file:Documentation/media/v4l-drivers/bttv.rst> for more information.
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index c94318c71a0c..e929797206f6 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -1100,7 +1100,8 @@ static int dst_get_device_id(struct dst_state *state)
/* Card capabilities */
state->dst_hw_cap = p_dst_type->dst_feature;
pr_err("Recognise [%s]\n", p_dst_type->device_id);
- strncpy(&state->fw_name[0], p_dst_type->device_id, 6);
+ strscpy(state->fw_name, p_dst_type->device_id,
+ sizeof(state->fw_name));
/* Multiple tuners */
if (p_dst_type->tuner_type & TUNER_TYPE_MULTI) {
switch (use_dst_type) {
diff --git a/drivers/media/pci/bt8xx/dst_common.h b/drivers/media/pci/bt8xx/dst_common.h
index 6a2cfdd44e3e..79dec1b1722c 100644
--- a/drivers/media/pci/bt8xx/dst_common.h
+++ b/drivers/media/pci/bt8xx/dst_common.h
@@ -138,7 +138,7 @@ struct dst_state {
u32 tuner_type;
char *tuner_name;
struct mutex dst_mutex;
- u8 fw_name[8];
+ char fw_name[8];
struct dvb_device *dst_ca;
};
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index aa35cbc0a904..9a544bab3178 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_COBALT
select VIDEO_ADV7511
select VIDEO_ADV7842
select VIDEOBUF2_DMA_SG
- ---help---
+ help
This is a video4linux driver for the Cisco PCIe Cobalt card.
This board is sadly not available outside of Cisco, but it is
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index 04783e78cc12..a518927abae1 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -128,7 +128,7 @@ done:
cb->vb.sequence = s->sequence++;
vb2_buffer_done(&cb->vb.vb2_buf,
(skip || s->unstable_frame) ?
- VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/media/pci/cx18/Kconfig b/drivers/media/pci/cx18/Kconfig
index c675b83c43a9..96477bba0d5c 100644
--- a/drivers/media/pci/cx18/Kconfig
+++ b/drivers/media/pci/cx18/Kconfig
@@ -13,7 +13,7 @@ config VIDEO_CX18
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA8290 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This is a video4linux driver for Conexant cx23418 based
PCI combo video recorder devices.
@@ -27,7 +27,7 @@ config VIDEO_CX18_ALSA
tristate "Conexant 23418 DMA audio support"
depends on VIDEO_CX18 && SND
select SND_PCM
- ---help---
+ help
This is a video4linux driver for direct (DMA) audio on
Conexant 23418 based TV cards using ALSA.
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index f778965a2eb8..59e78fb17575 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -484,7 +484,7 @@ static ssize_t cx18_read_pos(struct cx18_stream *s, char __user *ubuf,
CX18_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
if (rc > 0)
- pos += rc;
+ *pos += rc;
return rc;
}
diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig
index 3435bbaa3167..1bba9e497915 100644
--- a/drivers/media/pci/cx23885/Kconfig
+++ b/drivers/media/pci/cx23885/Kconfig
@@ -43,7 +43,7 @@ config VIDEO_CX23885
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_M88RS6000T if MEDIA_SUBDRV_AUTOSELECT
select DVB_TUNER_DIB0070 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This is a video4linux driver for Conexant 23885 based
TV cards.
@@ -54,7 +54,7 @@ config MEDIA_ALTERA_CI
tristate "Altera FPGA based CI module"
depends on VIDEO_CX23885 && DVB_CORE
select ALTERA_STAPL
- ---help---
+ help
An Altera FPGA CI module for NetUP Dual DVB-T/C RF CI card.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 0d0929c54f93..e2e63f05645e 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1474,8 +1474,9 @@ static int dvb_register(struct cx23885_tsport *port)
if (fe0->dvb.frontend != NULL) {
struct i2c_adapter *tun_i2c;
- fe0->dvb.frontend->sec_priv = kmalloc(sizeof(dib7000p_ops), GFP_KERNEL);
- memcpy(fe0->dvb.frontend->sec_priv, &dib7000p_ops, sizeof(dib7000p_ops));
+ fe0->dvb.frontend->sec_priv = kmemdup(&dib7000p_ops, sizeof(dib7000p_ops), GFP_KERNEL);
+ if (!fe0->dvb.frontend->sec_priv)
+ return -ENOMEM;
tun_i2c = dib7000p_ops.get_i2c_master(fe0->dvb.frontend, DIBX000_I2C_INTERFACE_TUNER, 1);
if (!dvb_attach(dib0070_attach, fe0->dvb.frontend, tun_i2c, &dib7070p_dib0070_config))
return -ENODEV;
diff --git a/drivers/media/pci/cx25821/Kconfig b/drivers/media/pci/cx25821/Kconfig
index 1755d3d2feaa..a64fa9a6d5d5 100644
--- a/drivers/media/pci/cx25821/Kconfig
+++ b/drivers/media/pci/cx25821/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_CX25821
depends on VIDEO_DEV && PCI && I2C
select I2C_ALGOBIT
select VIDEOBUF2_DMA_SG
- ---help---
+ help
This is a video4linux driver for Conexant 25821 based
TV cards.
@@ -14,7 +14,7 @@ config VIDEO_CX25821_ALSA
tristate "Conexant 25821 DMA audio support"
depends on VIDEO_CX25821 && SND
select SND_PCM
- ---help---
+ help
This is a video4linux driver for direct (DMA) audio on
Conexant 25821 based capture cards using ALSA.
diff --git a/drivers/media/pci/cx88/Kconfig b/drivers/media/pci/cx88/Kconfig
index 14b813d634a8..fbb17ddb6bc3 100644
--- a/drivers/media/pci/cx88/Kconfig
+++ b/drivers/media/pci/cx88/Kconfig
@@ -6,7 +6,7 @@ config VIDEO_CX88
select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_WM8775 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This is a video4linux driver for Conexant 2388x based
TV cards.
@@ -17,7 +17,7 @@ config VIDEO_CX88_ALSA
tristate "Conexant 2388x DMA audio support"
depends on VIDEO_CX88 && SND
select SND_PCM
- ---help---
+ help
This is a video4linux driver for direct (DMA) audio on
Conexant 2388x based TV cards using ALSA.
@@ -33,7 +33,7 @@ config VIDEO_CX88_BLACKBIRD
tristate "Blackbird MPEG encoder support (cx2388x + cx23416)"
depends on VIDEO_CX88
select VIDEO_CX2341X
- ---help---
+ help
This adds support for MPEG encoder cards based on the
Blackbird reference design, using the Conexant 2388x
and 23416 chips.
@@ -64,7 +64,7 @@ config VIDEO_CX88_DVB
select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This adds support for DVB/ATSC cards based on the
Conexant 2388x chip.
@@ -75,7 +75,7 @@ config VIDEO_CX88_ENABLE_VP3054
bool "VP-3054 Secondary I2C Bus Support"
default y
depends on VIDEO_CX88_DVB && DVB_MT352
- ---help---
+ help
This adds DVB-T support for cards based on the
Conexant 2388x chip and the MT352 demodulator,
which also require support for the VP-3054
diff --git a/drivers/media/pci/ddbridge/Kconfig b/drivers/media/pci/ddbridge/Kconfig
index 16faef265e97..fc98b6d575d9 100644
--- a/drivers/media/pci/ddbridge/Kconfig
+++ b/drivers/media/pci/ddbridge/Kconfig
@@ -15,7 +15,7 @@ config DVB_DDBRIDGE
select DVB_MXL5XX if MEDIA_SUBDRV_AUTOSELECT
select DVB_CXD2099 if MEDIA_SUBDRV_AUTOSELECT
select DVB_DUMMY_FE if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
Support for cards with the Digital Devices PCI express bridge:
- Octopus PCIe Bridge
- Octopus mini PCIe Bridge
@@ -36,7 +36,7 @@ config DVB_DDBRIDGE_MSIENABLE
depends on DVB_DDBRIDGE
depends on PCI_MSI
default n
- ---help---
+ help
Use PCI MSI (Message Signaled Interrupts) per default. Enabling this
might lead to I2C errors originating from the bridge in conjunction
with certain SATA controllers, requiring a reload of the ddbridge
diff --git a/drivers/media/pci/dt3155/Kconfig b/drivers/media/pci/dt3155/Kconfig
index 858b0f2f15be..d770eec541d4 100644
--- a/drivers/media/pci/dt3155/Kconfig
+++ b/drivers/media/pci/dt3155/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_DT3155
depends on PCI && VIDEO_DEV && VIDEO_V4L2
select VIDEOBUF2_DMA_CONTIG
default n
- ---help---
+ help
Enables dt3155 device driver for the DataTranslation DT3155 frame grabber.
Say Y here if you have this hardware.
In doubt, say N.
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index 17d69bd5d7f1..49677ee889e3 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -46,7 +46,6 @@ static int read_i2c_reg(void __iomem *addr, u8 index, u8 *data)
u32 tmp = index;
iowrite32((tmp << 17) | IIC_READ, addr + IIC_CSR2);
- mmiowb();
udelay(45); /* wait at least 43 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
return -EIO; /* error: NEW_CYCLE not cleared */
@@ -77,7 +76,6 @@ static int write_i2c_reg(void __iomem *addr, u8 index, u8 data)
u32 tmp = index;
iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2);
- mmiowb();
udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
return -EIO; /* error: NEW_CYCLE not cleared */
@@ -104,7 +102,6 @@ static void write_i2c_reg_nowait(void __iomem *addr, u8 index, u8 data)
u32 tmp = index;
iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2);
- mmiowb();
}
/**
@@ -264,7 +261,6 @@ static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id)
FLD_DN_ODD | FLD_DN_EVEN |
CAP_CONT_EVEN | CAP_CONT_ODD,
ipd->regs + CSR1);
- mmiowb();
}
spin_lock(&ipd->lock);
@@ -282,7 +278,6 @@ static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id)
iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START);
iowrite32(ipd->width, ipd->regs + EVEN_DMA_STRIDE);
iowrite32(ipd->width, ipd->regs + ODD_DMA_STRIDE);
- mmiowb();
}
/* enable interrupts, clear all irq flags */
@@ -437,12 +432,10 @@ static int dt3155_init_board(struct dt3155_priv *pd)
/* resetting the adapter */
iowrite32(ADDR_ERR_ODD | ADDR_ERR_EVEN | FLD_CRPT_ODD | FLD_CRPT_EVEN |
FLD_DN_ODD | FLD_DN_EVEN, pd->regs + CSR1);
- mmiowb();
msleep(20);
/* initializing adapter registers */
iowrite32(FIFO_EN | SRST, pd->regs + CSR1);
- mmiowb();
iowrite32(0xEEEEEE01, pd->regs + EVEN_PIXEL_FMT);
iowrite32(0xEEEEEE01, pd->regs + ODD_PIXEL_FMT);
iowrite32(0x00000020, pd->regs + FIFO_TRIGER);
@@ -454,7 +447,6 @@ static int dt3155_init_board(struct dt3155_priv *pd)
iowrite32(0, pd->regs + MASK_LENGTH);
iowrite32(0x0005007C, pd->regs + FIFO_FLAG_CNT);
iowrite32(0x01010101, pd->regs + IIC_CLK_DUR);
- mmiowb();
/* verifying that we have a DT3155 board (not just a SAA7116 chip) */
read_i2c_reg(pd->regs, DT_ID, &tmp);
diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
index 715f77651482..bd518bdc9f5f 100644
--- a/drivers/media/pci/intel/ipu3/Kconfig
+++ b/drivers/media/pci/intel/ipu3/Kconfig
@@ -7,7 +7,7 @@ config VIDEO_IPU3_CIO2
select V4L2_FWNODE
select VIDEOBUF2_DMA_SG
- ---help---
+ help
This is the Intel IPU3 CIO2 CSI-2 receiver unit, found in Intel
Skylake and Kaby Lake SoCs and used for capturing images and
video from a camera sensor.
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index f8020ebe9f05..2a52a393fe74 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1601,6 +1601,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
subdev->owner = THIS_MODULE;
snprintf(subdev->name, sizeof(subdev->name),
CIO2_ENTITY_NAME " %td", q - cio2->queue);
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
v4l2_set_subdevdata(subdev, cio2);
r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
if (r) {
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index 06ca4e23f9fb..e96b3c182a2f 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -18,7 +18,7 @@ config VIDEO_IVTV
select VIDEO_VP27SMPX
select VIDEO_UPD64031A
select VIDEO_UPD64083
- ---help---
+ help
This is a video4linux driver for Conexant cx23416 or cx23415 based
PCI personal video recorder devices.
@@ -32,7 +32,7 @@ config VIDEO_IVTV_DEPRECATED_IOCTLS
bool "enable the DVB ioctls abuse on ivtv driver"
depends on VIDEO_IVTV
default n
- ---help---
+ help
Enable the usage of the a DVB set of ioctls that were abused by
IVTV driver for a while.
@@ -45,7 +45,7 @@ config VIDEO_IVTV_ALSA
tristate "Conexant cx23415/cx23416 ALSA interface for PCM audio capture"
depends on VIDEO_IVTV && SND
select SND_PCM
- ---help---
+ help
This driver provides an ALSA interface as another method for user
applications to obtain PCM audio data from Conexant cx23415/cx23416
based PCI TV cards supported by the ivtv driver.
@@ -63,7 +63,7 @@ config VIDEO_FB_IVTV
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- ---help---
+ help
This is a framebuffer driver for the Conexant cx23415 MPEG
encoder/decoder.
@@ -77,7 +77,7 @@ config VIDEO_FB_IVTV_FORCE_PAT
bool "force cx23415 framebuffer init with x86 PAT enabled"
depends on VIDEO_FB_IVTV && X86_PAT
default n
- ---help---
+ help
With PAT enabled, the cx23415 framebuffer driver does not
utilize write-combined caching on the framebuffer memory.
For this reason, the driver will by default disable itself
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index 6196daae4b3e..043ac0ae9ed0 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -420,7 +420,7 @@ static ssize_t ivtv_read_pos(struct ivtv_stream *s, char __user *ubuf, size_t co
IVTV_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
if (rc > 0)
- pos += rc;
+ *pos += rc;
return rc;
}
diff --git a/drivers/media/pci/mantis/mantis_i2c.c b/drivers/media/pci/mantis/mantis_i2c.c
index 6528a2180119..f8b503ef42bc 100644
--- a/drivers/media/pci/mantis/mantis_i2c.c
+++ b/drivers/media/pci/mantis/mantis_i2c.c
@@ -225,7 +225,7 @@ int mantis_i2c_init(struct mantis_pci *mantis)
init_waitqueue_head(&mantis->i2c_wq);
mutex_init(&mantis->i2c_lock);
- strncpy(i2c_adapter->name, "Mantis I2C", sizeof(i2c_adapter->name));
+ strscpy(i2c_adapter->name, "Mantis I2C", sizeof(i2c_adapter->name));
i2c_set_adapdata(i2c_adapter, mantis);
i2c_adapter->owner = THIS_MODULE;
diff --git a/drivers/media/pci/meye/Kconfig b/drivers/media/pci/meye/Kconfig
index 9a50f54231ad..ce0463c81886 100644
--- a/drivers/media/pci/meye/Kconfig
+++ b/drivers/media/pci/meye/Kconfig
@@ -2,7 +2,7 @@ config VIDEO_MEYE
tristate "Sony Vaio Picturebook Motion Eye Video For Linux"
depends on PCI && VIDEO_V4L2
depends on SONY_LAPTOP || COMPILE_TEST
- ---help---
+ help
This is the video4linux driver for the Motion Eye camera found
in the Vaio Picturebook laptops. Please read the material in
<file:Documentation/media/v4l-drivers/meye.rst> for more information.
diff --git a/drivers/media/pci/netup_unidvb/Kconfig b/drivers/media/pci/netup_unidvb/Kconfig
index b663154d0cc4..60057585f04c 100644
--- a/drivers/media/pci/netup_unidvb/Kconfig
+++ b/drivers/media/pci/netup_unidvb/Kconfig
@@ -8,7 +8,7 @@ config DVB_NETUP_UNIDVB
select DVB_HELENE if MEDIA_SUBDRV_AUTOSELECT
select DVB_LNBH25 if MEDIA_SUBDRV_AUTOSELECT
select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
Support for NetUP PCI express Universal DVB card.
Say Y when you want to support NetUP Dual Universal DVB card.
diff --git a/drivers/media/pci/ngene/Kconfig b/drivers/media/pci/ngene/Kconfig
index e06d019996f3..8a80a5bab8e9 100644
--- a/drivers/media/pci/ngene/Kconfig
+++ b/drivers/media/pci/ngene/Kconfig
@@ -15,6 +15,6 @@ config DVB_NGENE
select DVB_STV6111 if MEDIA_SUBDRV_AUTOSELECT
select DVB_LNBH25 if MEDIA_SUBDRV_AUTOSELECT
select DVB_CXD2099 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
Support for Micronas PCI express cards with nGene bridge.
diff --git a/drivers/media/pci/saa7134/Kconfig b/drivers/media/pci/saa7134/Kconfig
index b44e0d70907e..8b28783b3fcd 100644
--- a/drivers/media/pci/saa7134/Kconfig
+++ b/drivers/media/pci/saa7134/Kconfig
@@ -7,7 +7,7 @@ config VIDEO_SAA7134
select CRC32
select VIDEO_SAA6588 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_SAA6752HS if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This is a video4linux driver for Philips SAA713x based
TV cards.
@@ -18,7 +18,7 @@ config VIDEO_SAA7134_ALSA
tristate "Philips SAA7134 DMA audio support"
depends on VIDEO_SAA7134 && SND
select SND_PCM
- ---help---
+ help
This is a video4linux driver for direct (DMA) audio in
Philips SAA713x based TV cards using ALSA
@@ -31,7 +31,7 @@ config VIDEO_SAA7134_RC
depends on VIDEO_SAA7134
depends on !(RC_CORE=m && VIDEO_SAA7134=y)
default y
- ---help---
+ help
Enables Remote Controller support on saa7134 driver.
config VIDEO_SAA7134_DVB
@@ -57,7 +57,7 @@ config VIDEO_SAA7134_DVB
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA8290 if MEDIA_SUBDRV_AUTOSELECT
select DVB_ZL10039 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This adds support for DVB cards based on the
Philips saa7134 chip.
@@ -68,6 +68,6 @@ config VIDEO_SAA7134_GO7007
tristate "go7007 support for saa7134 based TV cards"
depends on VIDEO_SAA7134
depends on VIDEO_GO7007
- ---help---
+ help
Enables saa7134 driver support for boards with go7007
MPEG encoder (WIS Voyager or compatible).
diff --git a/drivers/media/pci/saa7134/saa7134-go7007.c b/drivers/media/pci/saa7134/saa7134-go7007.c
index 275c5e151818..626e130a9770 100644
--- a/drivers/media/pci/saa7134/saa7134-go7007.c
+++ b/drivers/media/pci/saa7134/saa7134-go7007.c
@@ -444,7 +444,7 @@ static int saa7134_go7007_init(struct saa7134_dev *dev)
sd = &saa->sd;
v4l2_subdev_init(sd, &saa7134_go7007_sd_ops);
v4l2_set_subdevdata(sd, saa);
- strncpy(sd->name, "saa7134-go7007", sizeof(sd->name));
+ strscpy(sd->name, "saa7134-go7007", sizeof(sd->name));
/* Allocate a couple pages for receiving the compressed stream */
saa->top = (u8 *)get_zeroed_page(GFP_KERNEL);
diff --git a/drivers/media/pci/saa7146/Kconfig b/drivers/media/pci/saa7146/Kconfig
index da88b77a916c..60d9862580ff 100644
--- a/drivers/media/pci/saa7146/Kconfig
+++ b/drivers/media/pci/saa7146/Kconfig
@@ -2,7 +2,7 @@ config VIDEO_HEXIUM_GEMINI
tristate "Hexium Gemini frame grabber"
depends on PCI && VIDEO_V4L2 && I2C
select VIDEO_SAA7146_VV
- ---help---
+ help
This is a video4linux driver for the Hexium Gemini frame
grabber card by Hexium. Please note that the Gemini Dual
card is *not* fully supported.
@@ -14,7 +14,7 @@ config VIDEO_HEXIUM_ORION
tristate "Hexium HV-PCI6 and Orion frame grabber"
depends on PCI && VIDEO_V4L2 && I2C
select VIDEO_SAA7146_VV
- ---help---
+ help
This is a video4linux driver for the Hexium HV-PCI6 and
Orion frame grabber cards by Hexium.
@@ -30,7 +30,7 @@ config VIDEO_MXB
select VIDEO_TDA9840 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TEA6415C if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TEA6420 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This is a video4linux driver for the 'Multimedia eXtension Board'
TV card by Siemens-Nixdorf.
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index 5817d9cde4d0..6d8e4afe9673 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -270,9 +270,8 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
/* enable i2c-port pins */
saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
- hexium->i2c_adapter = (struct i2c_adapter) {
- .name = "hexium gemini",
- };
+ strscpy(hexium->i2c_adapter.name, "hexium gemini",
+ sizeof(hexium->i2c_adapter.name));
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
DEB_S("cannot register i2c-device. skipping.\n");
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index 0a05176c18ab..a794f9e5f990 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -231,9 +231,8 @@ static int hexium_probe(struct saa7146_dev *dev)
saa7146_write(dev, DD1_STREAM_B, 0x00000000);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
- hexium->i2c_adapter = (struct i2c_adapter) {
- .name = "hexium orion",
- };
+ strscpy(hexium->i2c_adapter.name, "hexium orion",
+ sizeof(hexium->i2c_adapter.name));
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
DEB_S("cannot register i2c-device. skipping.\n");
diff --git a/drivers/media/pci/saa7164/Kconfig b/drivers/media/pci/saa7164/Kconfig
index 9098ef5feca4..265c5a4fd823 100644
--- a/drivers/media/pci/saa7164/Kconfig
+++ b/drivers/media/pci/saa7164/Kconfig
@@ -8,7 +8,7 @@ config VIDEO_SAA7164
select DVB_TDA10048 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This is a video4linux driver for NXP SAA7164 based
TV cards.
diff --git a/drivers/media/pci/solo6x10/Kconfig b/drivers/media/pci/solo6x10/Kconfig
index d9e06a6bf1eb..2061d02a82d0 100644
--- a/drivers/media/pci/solo6x10/Kconfig
+++ b/drivers/media/pci/solo6x10/Kconfig
@@ -8,7 +8,7 @@ config VIDEO_SOLO6X10
select VIDEOBUF2_DMA_CONTIG
select SND_PCM
select FONT_8x16
- ---help---
+ help
This driver supports the Bluecherry H.264 and MPEG-4 hardware
compression capture cards and other Softlogic-based ones.
diff --git a/drivers/media/pci/tw5864/Kconfig b/drivers/media/pci/tw5864/Kconfig
index 760fb11dfeae..e5d52f076232 100644
--- a/drivers/media/pci/tw5864/Kconfig
+++ b/drivers/media/pci/tw5864/Kconfig
@@ -2,7 +2,7 @@ config VIDEO_TW5864
tristate "Techwell TW5864 video/audio grabber and encoder"
depends on VIDEO_DEV && PCI && VIDEO_V4L2
select VIDEOBUF2_DMA_CONTIG
- ---help---
+ help
Support for boards based on Techwell TW5864 chip which provides
multichannel video & audio grabbing and encoding (H.264, MJPEG,
ADPCM G.726).
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 5a1f3aa4101a..434d313a3c15 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -1395,13 +1395,13 @@ static void tw5864_handle_frame(struct tw5864_h264_frame *frame)
input->vb = NULL;
spin_unlock_irqrestore(&input->slock, flags);
- v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
-
if (!vb) { /* Gone because of disabling */
dev_dbg(&dev->pci->dev, "vb is empty, dropping frame\n");
return;
}
+ v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
+
/*
* Check for space.
* Mind the overhead of startcode emulation prevention.
diff --git a/drivers/media/pci/tw68/Kconfig b/drivers/media/pci/tw68/Kconfig
index 95d5d5202048..4bfc4fa416e5 100644
--- a/drivers/media/pci/tw68/Kconfig
+++ b/drivers/media/pci/tw68/Kconfig
@@ -2,7 +2,7 @@ config VIDEO_TW68
tristate "Techwell tw68x Video For Linux"
depends on VIDEO_DEV && PCI && VIDEO_V4L2
select VIDEOBUF2_DMA_SG
- ---help---
+ help
Support for Techwell tw68xx based frame grabber boards.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 4acbed189644..011c1c2fcf19 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -6,7 +6,7 @@ menuconfig V4L_PLATFORM_DRIVERS
bool "V4L platform devices"
depends on MEDIA_CAMERA_SUPPORT
default n
- ---help---
+ help
Say Y here to enable support for platform-specific V4L drivers.
if V4L_PLATFORM_DRIVERS
@@ -55,7 +55,7 @@ config VIDEO_VIU
depends on VIDEO_V4L2 && (PPC_MPC512x || COMPILE_TEST) && I2C
select VIDEOBUF_DMA_CONTIG
default y
- ---help---
+ help
Support for Freescale VIU video driver. This device captures
video data, or overlays video on DIU frame buffer.
@@ -80,13 +80,13 @@ config VIDEO_OMAP3
select VIDEOBUF2_DMA_CONTIG
select MFD_SYSCON
select V4L2_FWNODE
- ---help---
+ help
Driver for an OMAP 3 camera controller.
config VIDEO_OMAP3_DEBUG
bool "OMAP 3 Camera debug messages"
depends on VIDEO_OMAP3
- ---help---
+ help
Enable debug messages on OMAP 3 camera controller driver.
config VIDEO_PXA27x
@@ -96,7 +96,7 @@ config VIDEO_PXA27x
select VIDEOBUF2_DMA_SG
select SG_SPLIT
select V4L2_FWNODE
- ---help---
+ help
This is a v4l2 driver for the PXA27x Quick Capture Interface
config VIDEO_QCOM_CAMSS
@@ -112,7 +112,7 @@ config VIDEO_S3C_CAMIF
depends on PM
depends on ARCH_S3C64XX || PLAT_S3C24XX || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
- ---help---
+ help
This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
host interface (CAMIF).
@@ -125,7 +125,7 @@ config VIDEO_STM32_DCMI
depends on ARCH_STM32 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
- ---help---
+ help
This module makes the STM32 Digital Camera Memory Interface (DCMI)
available as a v4l2 device.
@@ -138,7 +138,7 @@ config VIDEO_RENESAS_CEU
depends on ARCH_SHMOBILE || ARCH_R7S72100 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
- ---help---
+ help
This is a v4l2 driver for the Renesas CEU Interface
source "drivers/media/platform/exynos4-is/Kconfig"
@@ -155,7 +155,7 @@ config VIDEO_TI_CAL
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
default n
- ---help---
+ help
Support for the TI CAL (Camera Adaptation Layer) block
found on DRA72X SoC.
In TI Technical Reference Manual this module is referred as
@@ -168,7 +168,7 @@ menuconfig V4L_MEM2MEM_DRIVERS
depends on VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
default n
- ---help---
+ help
Say Y here to enable selecting drivers for V4L devices that
use system memory for both source and destination buffers, as opposed
to capture and output drivers, which use memory buffers for just
@@ -184,7 +184,7 @@ config VIDEO_CODA
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select GENERIC_ALLOCATOR
- ---help---
+ help
Coda is a range of video codec IPs that supports
H.264, MPEG-4, and other video formats.
@@ -207,7 +207,7 @@ config VIDEO_MEDIATEK_JPEG
depends on ARCH_MEDIATEK || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
- ---help---
+ help
Mediatek jpeg codec driver provides HW capability to decode
JPEG format
@@ -218,7 +218,7 @@ config VIDEO_MEDIATEK_VPU
tristate "Mediatek Video Processor Unit"
depends on VIDEO_DEV && VIDEO_V4L2
depends on ARCH_MEDIATEK || COMPILE_TEST
- ---help---
+ help
This driver provides downloading VPU firmware and
communicating with VPU. This driver for hw video
codec embedded in Mediatek's MT8173 SOCs. It is able
@@ -236,7 +236,7 @@ config VIDEO_MEDIATEK_MDP
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VPU
default n
- ---help---
+ help
It is a v4l2 driver and present in Mediatek MT8173 SoCs.
The driver supports for scaling and color space conversion.
@@ -252,7 +252,7 @@ config VIDEO_MEDIATEK_VCODEC
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VPU
default n
- ---help---
+ help
Mediatek video codec driver provides HW capability to
encode and decode in a range of video formats
This driver rely on VPU driver to communicate with VPU.
@@ -276,7 +276,7 @@ config VIDEO_SAMSUNG_S5P_G2D
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
default n
- ---help---
+ help
This is a v4l2 driver for Samsung S5P and EXYNOS4 G2D
2d graphics accelerator.
@@ -286,7 +286,7 @@ config VIDEO_SAMSUNG_S5P_JPEG
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
- ---help---
+ help
This is a v4l2 driver for Samsung S5P, EXYNOS3250
and EXYNOS4 JPEG codec
@@ -407,7 +407,7 @@ config VIDEO_RENESAS_FDP1
depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
- ---help---
+ help
This is a V4L2 driver for the Renesas Fine Display Processor
providing colour space conversion, and de-interlacing features.
@@ -420,7 +420,7 @@ config VIDEO_RENESAS_JPU
depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
- ---help---
+ help
This is a V4L2 driver for the Renesas JPEG Processing Unit.
To compile this driver as a module, choose M here: the module
@@ -430,7 +430,7 @@ config VIDEO_RENESAS_FCP
tristate "Renesas Frame Compression Processor"
depends on ARCH_RENESAS || COMPILE_TEST
depends on OF
- ---help---
+ help
This is a driver for the Renesas Frame Compression Processor (FCP).
The FCP is a companion module of video processing modules in the
Renesas R-Car Gen3 SoCs. It handles memory access for the codec,
@@ -446,7 +446,7 @@ config VIDEO_RENESAS_VSP1
depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
- ---help---
+ help
This is a V4L2 driver for the Renesas VSP1 video processing engine.
To compile this driver as a module, choose M here: the module
@@ -459,7 +459,7 @@ config VIDEO_ROCKCHIP_RGA
select VIDEOBUF2_DMA_SG
select V4L2_MEM2MEM_DEV
default n
- ---help---
+ help
This is a v4l2 driver for Rockchip SOC RGA 2d graphics accelerator.
Rockchip RGA is a separate 2D raster graphic acceleration unit.
It accelerates 2D graphics operations, such as point/line drawing,
@@ -477,14 +477,14 @@ config VIDEO_TI_VPE
select VIDEO_TI_SC
select VIDEO_TI_CSC
default n
- ---help---
+ help
Support for the TI VPE(Video Processing Engine) block
found on DRA7XX SoC.
config VIDEO_TI_VPE_DEBUG
bool "VPE debug messages"
depends on VIDEO_TI_VPE
- ---help---
+ help
Enable debug messages on VPE driver.
config VIDEO_QCOM_VENUS
@@ -495,7 +495,7 @@ config VIDEO_QCOM_VENUS
select QCOM_SCM if ARCH_QCOM
select VIDEOBUF2_DMA_SG
select V4L2_MEM2MEM_DEV
- ---help---
+ help
This is a V4L2 driver for Qualcomm Venus video accelerator
hardware. It accelerates encoding and decoding operations
on various Qualcomm SoCs.
@@ -530,7 +530,7 @@ config VIDEO_VIM2M
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
default n
- ---help---
+ help
This is a virtual test device for the memory-to-memory driver
framework.
@@ -542,7 +542,7 @@ menuconfig DVB_PLATFORM_DRIVERS
bool "DVB platform devices"
depends on MEDIA_DIGITAL_TV_SUPPORT
default n
- ---help---
+ help
Say Y here to enable support for platform-specific Digital TV drivers.
if DVB_PLATFORM_DRIVERS
@@ -562,7 +562,7 @@ config VIDEO_CROS_EC_CEC
select CEC_NOTIFIER
select CHROME_PLATFORMS
select CROS_EC_PROTO
- ---help---
+ help
If you say yes here you will get support for the
ChromeOS Embedded Controller's CEC.
The CEC bus is present in the HDMI connector and enables communication
@@ -573,18 +573,34 @@ config VIDEO_MESON_AO_CEC
depends on ARCH_MESON || COMPILE_TEST
select CEC_CORE
select CEC_NOTIFIER
- ---help---
+ help
This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the
generic CEC framework interface.
CEC bus is present in the HDMI connector and enables communication
+config VIDEO_MESON_G12A_AO_CEC
+ tristate "Amlogic Meson G12A AO CEC driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ select REGMAP
+ select REGMAP_MMIO
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for Amlogic Meson G12A SoCs AO CEC interface.
+ This driver if for the new AO-CEC module found in G12A SoCs,
+ usually named AO_CEC_B in documentation.
+ It uses the generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
config CEC_GPIO
tristate "Generic GPIO-based CEC driver"
depends on PREEMPT || COMPILE_TEST
select CEC_CORE
select CEC_PIN
select GPIOLIB
- ---help---
+ help
This is a generic GPIO-based CEC driver.
The CEC bus is present in the HDMI connector and enables communication
between compatible devices.
@@ -594,7 +610,7 @@ config VIDEO_SAMSUNG_S5P_CEC
depends on ARCH_EXYNOS || COMPILE_TEST
select CEC_CORE
select CEC_NOTIFIER
- ---help---
+ help
This is a driver for Samsung S5P HDMI CEC interface. It uses the
generic CEC framework interface.
CEC bus is present in the HDMI connector and enables communication
@@ -605,7 +621,7 @@ config VIDEO_STI_HDMI_CEC
depends on ARCH_STI || COMPILE_TEST
select CEC_CORE
select CEC_NOTIFIER
- ---help---
+ help
This is a driver for STIH4xx HDMI CEC interface. It uses the
generic CEC framework interface.
CEC bus is present in the HDMI connector and enables communication
@@ -617,7 +633,7 @@ config VIDEO_STM32_HDMI_CEC
select REGMAP
select REGMAP_MMIO
select CEC_CORE
- ---help---
+ help
This is a driver for STM32 interface. It uses the
generic CEC framework interface.
CEC bus is present in the HDMI connector and enables communication
@@ -628,7 +644,7 @@ config VIDEO_TEGRA_HDMI_CEC
depends on ARCH_TEGRA || COMPILE_TEST
select CEC_CORE
select CEC_NOTIFIER
- ---help---
+ help
This is a driver for the Tegra HDMI CEC interface. It uses the
generic CEC framework interface.
The CEC bus is present in the HDMI connector and enables communication
@@ -649,7 +665,7 @@ config VIDEO_SECO_CEC
config VIDEO_SECO_RC
bool "SECO Boards IR RC5 support"
depends on VIDEO_SECO_CEC
- depends on RC_CORE
+ depends on RC_CORE=y || RC_CORE = VIDEO_SECO_CEC
help
If you say yes here you will get support for the
SECO Boards Consumer-IR in seco-cec driver.
@@ -662,7 +678,7 @@ menuconfig SDR_PLATFORM_DRIVERS
bool "SDR platform devices"
depends on MEDIA_SDR_SUPPORT
default n
- ---help---
+ help
Say Y here to enable support for platform-specific SDR Drivers.
if SDR_PLATFORM_DRIVERS
@@ -672,7 +688,7 @@ config VIDEO_RCAR_DRIF
depends on VIDEO_V4L2
depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_VMALLOC
- ---help---
+ help
Say Y if you want to enable R-Car Gen3 DRIF support. DRIF is Digital
Radio Interface that interfaces with an RF front end chip. It is a
receiver of digital data which uses DMA to transfer received data to
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 5c17624aaade..fe7b937eb5f2 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1540,7 +1540,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
if (!fmt)
return -EINVAL;
- strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
f->type = vpfe->fmt.type;
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index 692e08ef38c0..8144fe36ad48 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -14,7 +14,6 @@
#include <linux/of_irq.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -208,7 +207,6 @@ struct aspeed_video {
void __iomem *base;
struct clk *eclk;
struct clk *vclk;
- struct reset_control *rst;
struct device *dev;
struct v4l2_ctrl_handler ctrl_handler;
@@ -483,19 +481,10 @@ static void aspeed_video_enable_mode_detect(struct aspeed_video *video)
aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_TRIG_MODE_DET);
}
-static void aspeed_video_reset(struct aspeed_video *video)
-{
- /* Reset the engine */
- reset_control_assert(video->rst);
-
- /* Don't usleep here; function may be called in interrupt context */
- udelay(100);
- reset_control_deassert(video->rst);
-}
-
static void aspeed_video_off(struct aspeed_video *video)
{
- aspeed_video_reset(video);
+ /* Disable interrupts */
+ aspeed_video_write(video, VE_INTERRUPT_CTRL, 0);
/* Turn off the relevant clocks */
clk_disable_unprepare(video->vclk);
@@ -507,8 +496,6 @@ static void aspeed_video_on(struct aspeed_video *video)
/* Turn on the relevant clocks */
clk_prepare_enable(video->eclk);
clk_prepare_enable(video->vclk);
-
- aspeed_video_reset(video);
}
static void aspeed_video_bufs_done(struct aspeed_video *video,
@@ -1464,7 +1451,9 @@ static void aspeed_video_stop_streaming(struct vb2_queue *q)
* Need to force stop any DMA and try and get HW into a good
* state for future calls to start streaming again.
*/
- aspeed_video_reset(video);
+ aspeed_video_off(video);
+ aspeed_video_on(video);
+
aspeed_video_init_regs(video);
aspeed_video_get_resolution(video);
@@ -1619,17 +1608,7 @@ static int aspeed_video_init(struct aspeed_video *video)
return PTR_ERR(video->vclk);
}
- video->rst = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(video->rst)) {
- dev_err(dev, "Unable to get VE reset\n");
- return PTR_ERR(video->rst);
- }
-
- rc = of_reserved_mem_device_init(dev);
- if (rc) {
- dev_err(dev, "Unable to reserve memory\n");
- return rc;
- }
+ of_reserved_mem_device_init(dev);
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (rc) {
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
index a211ef20f77e..c3f6a47cdc0e 100644
--- a/drivers/media/platform/atmel/Kconfig
+++ b/drivers/media/platform/atmel/Kconfig
@@ -15,6 +15,6 @@ config VIDEO_ATMEL_ISI
depends on ARCH_AT91 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
- ---help---
+ help
This module makes the ATMEL Image Sensor Interface available
as a v4l2 device.
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index 2aadc19235ea..d730693f299c 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -24,6 +24,8 @@
#define ISC_PFE_CFG0_HPOL_LOW BIT(0)
#define ISC_PFE_CFG0_VPOL_LOW BIT(1)
#define ISC_PFE_CFG0_PPOL_LOW BIT(2)
+#define ISC_PFE_CFG0_CCIR656 BIT(9)
+#define ISC_PFE_CFG0_CCIR_CRC BIT(10)
#define ISC_PFE_CFG0_MODE_PROGRESSIVE (0x0 << 4)
#define ISC_PFE_CFG0_MODE_MASK GENMASK(6, 4)
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index 50178968b8a6..4bba9da206e4 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -89,35 +89,25 @@ struct isc_subdev_entity {
struct list_head list;
};
-/* Indicate the format is generated by the sensor */
-#define FMT_FLAG_FROM_SENSOR BIT(0)
-/* Indicate the format is produced by ISC itself */
-#define FMT_FLAG_FROM_CONTROLLER BIT(1)
-/* Indicate a Raw Bayer format */
-#define FMT_FLAG_RAW_FORMAT BIT(2)
-
-#define FMT_FLAG_RAW_FROM_SENSOR (FMT_FLAG_FROM_SENSOR | \
- FMT_FLAG_RAW_FORMAT)
-
/*
* struct isc_format - ISC media bus format information
+ This structure represents the interface between the ISC
+ and the sensor. It's the input format received by
+ the ISC.
* @fourcc: Fourcc code for this format
* @mbus_code: V4L2 media bus format code.
- * flags: Indicate format from sensor or converted by controller
- * @bpp: Bits per pixel (when stored in memory)
- * (when transferred over a bus)
- * @sd_support: Subdev supports this format
- * @isc_support: ISC can convert raw format to this format
+ * @cfa_baycfg: If this format is RAW BAYER, indicate the type of bayer.
+ this is either BGBG, RGRG, etc.
+ * @pfe_cfg0_bps: Number of hardware data lines connected to the ISC
*/
struct isc_format {
u32 fourcc;
u32 mbus_code;
- u32 flags;
- u8 bpp;
+ u32 cfa_baycfg;
bool sd_support;
- bool isc_support;
+ u32 pfe_cfg0_bps;
};
/* Pipeline bitmap */
@@ -135,16 +125,31 @@ struct isc_format {
#define GAM_ENABLES (GAM_RENABLE | GAM_GENABLE | GAM_BENABLE | GAM_ENABLE)
+/*
+ * struct fmt_config - ISC format configuration and internal pipeline
+ This structure represents the internal configuration
+ of the ISC.
+ It also holds the format that ISC will present to v4l2.
+ * @sd_format: Pointer to an isc_format struct that holds the sensor
+ configuration.
+ * @fourcc: Fourcc code for this format.
+ * @bpp: Bytes per pixel in the current format.
+ * @rlp_cfg_mode: Configuration of the RLP (rounding, limiting packaging)
+ * @dcfg_imode: Configuration of the input of the DMA module
+ * @dctrl_dview: Configuration of the output of the DMA module
+ * @bits_pipeline: Configuration of the pipeline, which modules are enabled
+ */
struct fmt_config {
- u32 fourcc;
+ struct isc_format *sd_format;
- u32 pfe_cfg0_bps;
- u32 cfa_baycfg;
- u32 rlp_cfg_mode;
- u32 dcfg_imode;
- u32 dctrl_dview;
+ u32 fourcc;
+ u8 bpp;
- u32 bits_pipeline;
+ u32 rlp_cfg_mode;
+ u32 dcfg_imode;
+ u32 dctrl_dview;
+
+ u32 bits_pipeline;
};
#define HIST_ENTRIES 512
@@ -196,8 +201,9 @@ struct isc_device {
struct v4l2_format fmt;
struct isc_format **user_formats;
unsigned int num_user_formats;
- const struct isc_format *current_fmt;
- const struct isc_format *raw_fmt;
+
+ struct fmt_config config;
+ struct fmt_config try_config;
struct isc_ctrls ctrls;
struct work_struct awb_work;
@@ -210,319 +216,125 @@ struct isc_device {
struct list_head subdev_entities;
};
-static struct isc_format formats_list[] = {
- {
- .fourcc = V4L2_PIX_FMT_SBGGR8,
- .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 8,
- },
+/* This is a list of the formats that the ISC can *output* */
+static struct isc_format controller_formats[] = {
{
- .fourcc = V4L2_PIX_FMT_SGBRG8,
- .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 8,
- },
- {
- .fourcc = V4L2_PIX_FMT_SGRBG8,
- .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 8,
- },
- {
- .fourcc = V4L2_PIX_FMT_SRGGB8,
- .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 8,
- },
- {
- .fourcc = V4L2_PIX_FMT_SBGGR10,
- .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 16,
- },
- {
- .fourcc = V4L2_PIX_FMT_SGBRG10,
- .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 16,
- },
- {
- .fourcc = V4L2_PIX_FMT_SGRBG10,
- .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_ARGB444,
},
{
- .fourcc = V4L2_PIX_FMT_SRGGB10,
- .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_ARGB555,
},
{
- .fourcc = V4L2_PIX_FMT_SBGGR12,
- .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
},
{
- .fourcc = V4L2_PIX_FMT_SGBRG12,
- .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_ABGR32,
},
{
- .fourcc = V4L2_PIX_FMT_SGRBG12,
- .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_XBGR32,
},
{
- .fourcc = V4L2_PIX_FMT_SRGGB12,
- .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .flags = FMT_FLAG_RAW_FROM_SENSOR,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_YUV420,
},
{
- .fourcc = V4L2_PIX_FMT_YUV420,
- .mbus_code = 0x0,
- .flags = FMT_FLAG_FROM_CONTROLLER,
- .bpp = 12,
+ .fourcc = V4L2_PIX_FMT_YUYV,
},
{
.fourcc = V4L2_PIX_FMT_YUV422P,
- .mbus_code = 0x0,
- .flags = FMT_FLAG_FROM_CONTROLLER,
- .bpp = 16,
},
{
.fourcc = V4L2_PIX_FMT_GREY,
- .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
- .flags = FMT_FLAG_FROM_CONTROLLER |
- FMT_FLAG_FROM_SENSOR,
- .bpp = 8,
- },
- {
- .fourcc = V4L2_PIX_FMT_ARGB444,
- .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
- .flags = FMT_FLAG_FROM_CONTROLLER,
- .bpp = 16,
- },
- {
- .fourcc = V4L2_PIX_FMT_ARGB555,
- .mbus_code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
- .flags = FMT_FLAG_FROM_CONTROLLER,
- .bpp = 16,
- },
- {
- .fourcc = V4L2_PIX_FMT_RGB565,
- .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .flags = FMT_FLAG_FROM_CONTROLLER,
- .bpp = 16,
- },
- {
- .fourcc = V4L2_PIX_FMT_ARGB32,
- .mbus_code = MEDIA_BUS_FMT_ARGB8888_1X32,
- .flags = FMT_FLAG_FROM_CONTROLLER,
- .bpp = 32,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
- .flags = FMT_FLAG_FROM_CONTROLLER |
- FMT_FLAG_FROM_SENSOR,
- .bpp = 16,
},
};
-static struct fmt_config fmt_configs_list[] = {
+/* This is a list of formats that the ISC can receive as *input* */
+static struct isc_format formats_list[] = {
{
.fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_GBGB,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_GRGR,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
},
{
.fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_GBGB,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_GRGR,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
},
{
.fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_GBGB,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0
},
{
.fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_GRGR,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUV420,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
- .dcfg_imode = ISC_DCFG_IMODE_YC420P,
- .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
- .bits_pipeline = SUB420_ENABLE | SUB422_ENABLE |
- CBC_ENABLE | CSC_ENABLE |
- GAM_ENABLES |
- CFA_ENABLE | WB_ENABLE,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUV422P,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
- .dcfg_imode = ISC_DCFG_IMODE_YC422P,
- .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
- .bits_pipeline = SUB422_ENABLE |
- CBC_ENABLE | CSC_ENABLE |
- GAM_ENABLES |
- CFA_ENABLE | WB_ENABLE,
},
{
.fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = CBC_ENABLE | CSC_ENABLE |
- GAM_ENABLES |
- CFA_ENABLE | WB_ENABLE,
- },
- {
- .fourcc = V4L2_PIX_FMT_ARGB444,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
},
{
- .fourcc = V4L2_PIX_FMT_ARGB555,
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
},
{
.fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
- },
- {
- .fourcc = V4L2_PIX_FMT_ARGB32,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB32,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED32,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
- .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
- .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
- .bits_pipeline = 0x0
},
};
@@ -571,6 +383,13 @@ static const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
0x3E20007, 0x3E90007, 0x3F00008, 0x3F80007 },
};
+#define ISC_IS_FORMAT_RAW(mbus_code) \
+ (((mbus_code) & 0xf000) == 0x3000)
+
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
static unsigned int sensor_preferred = 1;
module_param(sensor_preferred, uint, 0644);
MODULE_PARM_DESC(sensor_preferred,
@@ -896,40 +715,17 @@ static int isc_buffer_prepare(struct vb2_buffer *vb)
return 0;
}
-static inline bool sensor_is_preferred(const struct isc_format *isc_fmt)
-{
- return (sensor_preferred && isc_fmt->sd_support) ||
- !isc_fmt->isc_support;
-}
-
-static struct fmt_config *get_fmt_config(u32 fourcc)
-{
- struct fmt_config *config;
- int i;
-
- config = &fmt_configs_list[0];
- for (i = 0; i < ARRAY_SIZE(fmt_configs_list); i++) {
- if (config->fourcc == fourcc)
- return config;
-
- config++;
- }
- return NULL;
-}
-
static void isc_start_dma(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
- struct v4l2_pix_format *pixfmt = &isc->fmt.fmt.pix;
- u32 sizeimage = pixfmt->sizeimage;
- struct fmt_config *config = get_fmt_config(isc->current_fmt->fourcc);
+ u32 sizeimage = isc->fmt.fmt.pix.sizeimage;
u32 dctrl_dview;
dma_addr_t addr0;
addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
regmap_write(regmap, ISC_DAD0, addr0);
- switch (pixfmt->pixelformat) {
+ switch (isc->config.fourcc) {
case V4L2_PIX_FMT_YUV420:
regmap_write(regmap, ISC_DAD1, addr0 + (sizeimage * 2) / 3);
regmap_write(regmap, ISC_DAD2, addr0 + (sizeimage * 5) / 6);
@@ -942,10 +738,7 @@ static void isc_start_dma(struct isc_device *isc)
break;
}
- if (sensor_is_preferred(isc->current_fmt))
- dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
- else
- dctrl_dview = config->dctrl_dview;
+ dctrl_dview = isc->config.dctrl_dview;
regmap_write(regmap, ISC_DCTRL, dctrl_dview | ISC_DCTRL_IE_IS);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
@@ -955,7 +748,6 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
- struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
u32 val, bay_cfg;
const u32 *gamma;
unsigned int i;
@@ -969,7 +761,7 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
if (!pipeline)
return;
- bay_cfg = config->cfa_baycfg;
+ bay_cfg = isc->config.sd_format->cfa_baycfg;
regmap_write(regmap, ISC_WB_CFG, bay_cfg);
regmap_write(regmap, ISC_WB_O_RGR, 0x0);
@@ -1011,24 +803,24 @@ static int isc_update_profile(struct isc_device *isc)
}
if (counter < 0) {
- v4l2_warn(&isc->v4l2_dev, "Time out to update profie\n");
+ v4l2_warn(&isc->v4l2_dev, "Time out to update profile\n");
return -ETIMEDOUT;
}
return 0;
}
-static void isc_set_histogram(struct isc_device *isc)
+static void isc_set_histogram(struct isc_device *isc, bool enable)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
- struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
- if (ctrls->awb && (ctrls->hist_stat != HIST_ENABLED)) {
+ if (enable) {
regmap_write(regmap, ISC_HIS_CFG,
ISC_HIS_CFG_MODE_R |
- (config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT) |
- ISC_HIS_CFG_RAR);
+ (isc->config.sd_format->cfa_baycfg
+ << ISC_HIS_CFG_BAYSEL_SHIFT) |
+ ISC_HIS_CFG_RAR);
regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_EN);
regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
ctrls->hist_id = ISC_HIS_CFG_MODE_R;
@@ -1036,7 +828,7 @@ static void isc_set_histogram(struct isc_device *isc)
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
ctrls->hist_stat = HIST_ENABLED;
- } else if (!ctrls->awb && (ctrls->hist_stat != HIST_DISABLED)) {
+ } else {
regmap_write(regmap, ISC_INTDIS, ISC_INT_HISDONE);
regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_DIS);
@@ -1044,58 +836,24 @@ static void isc_set_histogram(struct isc_device *isc)
}
}
-static inline void isc_get_param(const struct isc_format *fmt,
- u32 *rlp_mode, u32 *dcfg)
-{
- struct fmt_config *config = get_fmt_config(fmt->fourcc);
-
- *dcfg = ISC_DCFG_YMBSIZE_BEATS8;
-
- switch (fmt->fourcc) {
- case V4L2_PIX_FMT_SBGGR10:
- case V4L2_PIX_FMT_SGBRG10:
- case V4L2_PIX_FMT_SGRBG10:
- case V4L2_PIX_FMT_SRGGB10:
- case V4L2_PIX_FMT_SBGGR12:
- case V4L2_PIX_FMT_SGBRG12:
- case V4L2_PIX_FMT_SGRBG12:
- case V4L2_PIX_FMT_SRGGB12:
- *rlp_mode = config->rlp_cfg_mode;
- *dcfg |= config->dcfg_imode;
- break;
- default:
- *rlp_mode = ISC_RLP_CFG_MODE_DAT8;
- *dcfg |= ISC_DCFG_IMODE_PACKED8;
- break;
- }
-}
-
static int isc_configure(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
- const struct isc_format *current_fmt = isc->current_fmt;
- struct fmt_config *curfmt_config = get_fmt_config(current_fmt->fourcc);
- struct fmt_config *rawfmt_config = get_fmt_config(isc->raw_fmt->fourcc);
- struct isc_subdev_entity *subdev = isc->current_subdev;
u32 pfe_cfg0, rlp_mode, dcfg, mask, pipeline;
+ struct isc_subdev_entity *subdev = isc->current_subdev;
- if (sensor_is_preferred(current_fmt)) {
- pfe_cfg0 = curfmt_config->pfe_cfg0_bps;
- pipeline = 0x0;
- isc_get_param(current_fmt, &rlp_mode, &dcfg);
- isc->ctrls.hist_stat = HIST_INIT;
- } else {
- pfe_cfg0 = rawfmt_config->pfe_cfg0_bps;
- pipeline = curfmt_config->bits_pipeline;
- rlp_mode = curfmt_config->rlp_cfg_mode;
- dcfg = curfmt_config->dcfg_imode |
+ pfe_cfg0 = isc->config.sd_format->pfe_cfg0_bps;
+ rlp_mode = isc->config.rlp_cfg_mode;
+ pipeline = isc->config.bits_pipeline;
+
+ dcfg = isc->config.dcfg_imode |
ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
- }
pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
mask = ISC_PFE_CFG0_BPS_MASK | ISC_PFE_CFG0_HPOL_LOW |
ISC_PFE_CFG0_VPOL_LOW | ISC_PFE_CFG0_PPOL_LOW |
- ISC_PFE_CFG0_MODE_MASK;
+ ISC_PFE_CFG0_MODE_MASK | ISC_PFE_CFG0_CCIR_CRC |
+ ISC_PFE_CFG0_CCIR656;
regmap_update_bits(regmap, ISC_PFE_CFG0, mask, pfe_cfg0);
@@ -1107,8 +865,15 @@ static int isc_configure(struct isc_device *isc)
/* Set the pipeline */
isc_set_pipeline(isc, pipeline);
- if (pipeline)
- isc_set_histogram(isc);
+ /*
+ * The current implemented histogram is available for RAW R, B, GB
+ * channels. We need to check if sensor is outputting RAW BAYER
+ */
+ if (isc->ctrls.awb &&
+ ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
+ isc_set_histogram(isc, true);
+ else
+ isc_set_histogram(isc, false);
/* Update profile */
return isc_update_profile(isc);
@@ -1125,7 +890,8 @@ static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Enable stream on the sub device */
ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD) {
- v4l2_err(&isc->v4l2_dev, "stream on failed in subdev\n");
+ v4l2_err(&isc->v4l2_dev, "stream on failed in subdev %d\n",
+ ret);
goto err_start_stream;
}
@@ -1223,6 +989,22 @@ static void isc_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
}
+static struct isc_format *find_format_by_fourcc(struct isc_device *isc,
+ unsigned int fourcc)
+{
+ unsigned int num_formats = isc->num_user_formats;
+ struct isc_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ fmt = isc->user_formats[i];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
static const struct vb2_ops isc_vb2_ops = {
.queue_setup = isc_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
@@ -1249,15 +1031,31 @@ static int isc_querycap(struct file *file, void *priv,
static int isc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct isc_device *isc = video_drvdata(file);
u32 index = f->index;
+ u32 i, supported_index;
- if (index >= isc->num_user_formats)
- return -EINVAL;
+ if (index < ARRAY_SIZE(controller_formats)) {
+ f->pixelformat = controller_formats[index].fourcc;
+ return 0;
+ }
- f->pixelformat = isc->user_formats[index]->fourcc;
+ index -= ARRAY_SIZE(controller_formats);
- return 0;
+ i = 0;
+ supported_index = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats_list); i++) {
+ if (!ISC_IS_FORMAT_RAW(formats_list[i].mbus_code) ||
+ !formats_list[i].sd_support)
+ continue;
+ if (supported_index == index) {
+ f->pixelformat = formats_list[i].fourcc;
+ return 0;
+ }
+ supported_index++;
+ }
+
+ return -EINVAL;
}
static int isc_g_fmt_vid_cap(struct file *file, void *priv,
@@ -1270,26 +1068,238 @@ static int isc_g_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static struct isc_format *find_format_by_fourcc(struct isc_device *isc,
- unsigned int fourcc)
+/*
+ * Checks the current configured format, if ISC can output it,
+ * considering which type of format the ISC receives from the sensor
+ */
+static int isc_try_validate_formats(struct isc_device *isc)
{
- unsigned int num_formats = isc->num_user_formats;
- struct isc_format *fmt;
- unsigned int i;
+ int ret;
+ bool bayer = false, yuv = false, rgb = false, grey = false;
+
+ /* all formats supported by the RLP module are OK */
+ switch (isc->try_config.fourcc) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ ret = 0;
+ bayer = true;
+ break;
- for (i = 0; i < num_formats; i++) {
- fmt = isc->user_formats[i];
- if (fmt->fourcc == fourcc)
- return fmt;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUYV:
+ ret = 0;
+ yuv = true;
+ break;
+
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_ARGB444:
+ case V4L2_PIX_FMT_ARGB555:
+ ret = 0;
+ rgb = true;
+ break;
+ case V4L2_PIX_FMT_GREY:
+ ret = 0;
+ grey = true;
+ break;
+ default:
+ /* any other different formats are not supported */
+ ret = -EINVAL;
}
- return NULL;
+ /* we cannot output RAW/Grey if we do not receive RAW */
+ if ((bayer || grey) &&
+ !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
+ return -EINVAL;
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "Format validation, requested rgb=%u, yuv=%u, grey=%u, bayer=%u\n",
+ rgb, yuv, grey, bayer);
+
+ return ret;
+}
+
+/*
+ * Configures the RLP and DMA modules, depending on the output format
+ * configured for the ISC.
+ * If direct_dump == true, just dump raw data 8 bits.
+ */
+static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
+{
+ if (direct_dump) {
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ return 0;
+ }
+
+ switch (isc->try_config.fourcc) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 8;
+ break;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ break;
+ case V4L2_PIX_FMT_ARGB444:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ break;
+ case V4L2_PIX_FMT_ARGB555:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ break;
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB32;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 32;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC420P;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR;
+ isc->try_config.bpp = 12;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC422P;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR;
+ isc->try_config.bpp = 16;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ break;
+ case V4L2_PIX_FMT_GREY:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Configuring pipeline modules, depending on which format the ISC outputs
+ * and considering which format it has as input from the sensor.
+ */
+static int isc_try_configure_pipeline(struct isc_device *isc)
+{
+ switch (isc->try_config.fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_ARGB555:
+ case V4L2_PIX_FMT_ARGB444:
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ /* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ WB_ENABLE | GAM_ENABLES;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ /* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
+ SUB420_ENABLE | SUB422_ENABLE | CBC_ENABLE;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ /* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
+ SUB422_ENABLE | CBC_ENABLE;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ /* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
+ SUB422_ENABLE | CBC_ENABLE;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ case V4L2_PIX_FMT_GREY:
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ /* if sensor format is RAW, we convert inside ISC */
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
+ CBC_ENABLE;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ default:
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ return 0;
}
static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
- struct isc_format **current_fmt, u32 *code)
+ u32 *code)
{
- struct isc_format *isc_fmt;
+ int i;
+ struct isc_format *sd_fmt = NULL, *direct_fmt = NULL;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg;
struct v4l2_subdev_format format = {
@@ -1297,48 +1307,119 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
};
u32 mbus_code;
int ret;
+ bool rlp_dma_direct_dump = false;
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- isc_fmt = find_format_by_fourcc(isc, pixfmt->pixelformat);
- if (!isc_fmt) {
- v4l2_warn(&isc->v4l2_dev, "Format 0x%x not found\n",
- pixfmt->pixelformat);
- isc_fmt = isc->user_formats[isc->num_user_formats - 1];
- pixfmt->pixelformat = isc_fmt->fourcc;
+ /* Step 1: find a RAW format that is supported */
+ for (i = 0; i < isc->num_user_formats; i++) {
+ if (ISC_IS_FORMAT_RAW(isc->user_formats[i]->mbus_code)) {
+ sd_fmt = isc->user_formats[i];
+ break;
+ }
+ }
+ /* Step 2: We can continue with this RAW format, or we can look
+ * for better: maybe sensor supports directly what we need.
+ */
+ direct_fmt = find_format_by_fourcc(isc, pixfmt->pixelformat);
+
+ /* Step 3: We have both. We decide given the module parameter which
+ * one to use.
+ */
+ if (direct_fmt && sd_fmt && sensor_preferred)
+ sd_fmt = direct_fmt;
+
+ /* Step 4: we do not have RAW but we have a direct format. Use it. */
+ if (direct_fmt && !sd_fmt)
+ sd_fmt = direct_fmt;
+
+ /* Step 5: if we are using a direct format, we need to package
+ * everything as 8 bit data and just dump it
+ */
+ if (sd_fmt == direct_fmt)
+ rlp_dma_direct_dump = true;
+
+ /* Step 6: We have no format. This can happen if the userspace
+ * requests some weird/invalid format.
+ * In this case, default to whatever we have
+ */
+ if (!sd_fmt && !direct_fmt) {
+ sd_fmt = isc->user_formats[isc->num_user_formats - 1];
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "Sensor not supporting %.4s, using %.4s\n",
+ (char *)&pixfmt->pixelformat, (char *)&sd_fmt->fourcc);
+ }
+
+ if (!sd_fmt) {
+ ret = -EINVAL;
+ goto isc_try_fmt_err;
}
+ /* Step 7: Print out what we decided for debugging */
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "Preferring to have sensor using format %.4s\n",
+ (char *)&sd_fmt->fourcc);
+
+ /* Step 8: at this moment we decided which format the subdev will use */
+ isc->try_config.sd_format = sd_fmt;
+
/* Limit to Atmel ISC hardware capabilities */
if (pixfmt->width > ISC_MAX_SUPPORT_WIDTH)
pixfmt->width = ISC_MAX_SUPPORT_WIDTH;
if (pixfmt->height > ISC_MAX_SUPPORT_HEIGHT)
pixfmt->height = ISC_MAX_SUPPORT_HEIGHT;
- if (sensor_is_preferred(isc_fmt))
- mbus_code = isc_fmt->mbus_code;
- else
- mbus_code = isc->raw_fmt->mbus_code;
+ /*
+ * The mbus format is the one the subdev outputs.
+ * The pixels will be transferred in this format Sensor -> ISC
+ */
+ mbus_code = sd_fmt->mbus_code;
+
+ /*
+ * Validate formats. If the required format is not OK, default to raw.
+ */
+
+ isc->try_config.fourcc = pixfmt->pixelformat;
+
+ if (isc_try_validate_formats(isc)) {
+ pixfmt->pixelformat = isc->try_config.fourcc = sd_fmt->fourcc;
+ /* Re-try to validate the new format */
+ ret = isc_try_validate_formats(isc);
+ if (ret)
+ goto isc_try_fmt_err;
+ }
+
+ ret = isc_try_configure_rlp_dma(isc, rlp_dma_direct_dump);
+ if (ret)
+ goto isc_try_fmt_err;
+
+ ret = isc_try_configure_pipeline(isc);
+ if (ret)
+ goto isc_try_fmt_err;
v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
&pad_cfg, &format);
if (ret < 0)
- return ret;
+ goto isc_try_fmt_err;
v4l2_fill_pix_format(pixfmt, &format.format);
pixfmt->field = V4L2_FIELD_NONE;
- pixfmt->bytesperline = (pixfmt->width * isc_fmt->bpp) >> 3;
+ pixfmt->bytesperline = (pixfmt->width * isc->try_config.bpp) >> 3;
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
- if (current_fmt)
- *current_fmt = isc_fmt;
-
if (code)
*code = mbus_code;
return 0;
+
+isc_try_fmt_err:
+ v4l2_err(&isc->v4l2_dev, "Could not find any possible format for a working pipeline\n");
+ memset(&isc->try_config, 0, sizeof(isc->try_config));
+
+ return ret;
}
static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
@@ -1346,11 +1427,10 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- struct isc_format *current_fmt;
- u32 mbus_code;
+ u32 mbus_code = 0;
int ret;
- ret = isc_try_fmt(isc, f, &current_fmt, &mbus_code);
+ ret = isc_try_fmt(isc, f, &mbus_code);
if (ret)
return ret;
@@ -1361,7 +1441,10 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
return ret;
isc->fmt = *f;
- isc->current_fmt = current_fmt;
+ /* make the try configuration active */
+ isc->config = isc->try_config;
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev, "New ISC configuration in place\n");
return 0;
}
@@ -1382,7 +1465,7 @@ static int isc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct isc_device *isc = video_drvdata(file);
- return isc_try_fmt(isc, f, NULL, NULL);
+ return isc_try_fmt(isc, f, NULL);
}
static int isc_enum_input(struct file *file, void *priv,
@@ -1431,27 +1514,31 @@ static int isc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct isc_device *isc = video_drvdata(file);
- const struct isc_format *isc_fmt;
struct v4l2_subdev_frame_size_enum fse = {
.index = fsize->index,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- int ret;
+ int ret = -EINVAL;
+ int i;
- isc_fmt = find_format_by_fourcc(isc, fsize->pixel_format);
- if (!isc_fmt)
- return -EINVAL;
+ for (i = 0; i < isc->num_user_formats; i++)
+ if (isc->user_formats[i]->fourcc == fsize->pixel_format)
+ ret = 0;
- if (sensor_is_preferred(isc_fmt))
- fse.code = isc_fmt->mbus_code;
- else
- fse.code = isc->raw_fmt->mbus_code;
+ for (i = 0; i < ARRAY_SIZE(controller_formats); i++)
+ if (controller_formats[i].fourcc == fsize->pixel_format)
+ ret = 0;
+
+ if (ret)
+ return ret;
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
NULL, &fse);
if (ret)
return ret;
+ fse.code = isc->config.sd_format->mbus_code;
+
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fse.max_width;
fsize->discrete.height = fse.max_height;
@@ -1463,29 +1550,32 @@ static int isc_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *fival)
{
struct isc_device *isc = video_drvdata(file);
- const struct isc_format *isc_fmt;
struct v4l2_subdev_frame_interval_enum fie = {
.index = fival->index,
.width = fival->width,
.height = fival->height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- int ret;
+ int ret = -EINVAL;
+ int i;
- isc_fmt = find_format_by_fourcc(isc, fival->pixel_format);
- if (!isc_fmt)
- return -EINVAL;
+ for (i = 0; i < isc->num_user_formats; i++)
+ if (isc->user_formats[i]->fourcc == fival->pixel_format)
+ ret = 0;
- if (sensor_is_preferred(isc_fmt))
- fie.code = isc_fmt->mbus_code;
- else
- fie.code = isc->raw_fmt->mbus_code;
+ for (i = 0; i < ARRAY_SIZE(controller_formats); i++)
+ if (controller_formats[i].fourcc == fival->pixel_format)
+ ret = 0;
+
+ if (ret)
+ return ret;
ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
enum_frame_interval, NULL, &fie);
if (ret)
return ret;
+ fie.code = isc->config.sd_format->mbus_code;
fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fival->discrete = fie.interval;
@@ -1668,7 +1758,6 @@ static void isc_awb_work(struct work_struct *w)
struct isc_device *isc =
container_of(w, struct isc_device, awb_work);
struct regmap *regmap = isc->regmap;
- struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
struct isc_ctrls *ctrls = &isc->ctrls;
u32 hist_id = ctrls->hist_id;
u32 baysel;
@@ -1686,7 +1775,7 @@ static void isc_awb_work(struct work_struct *w)
}
ctrls->hist_id = hist_id;
- baysel = config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
+ baysel = isc->config.sd_format->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
pm_runtime_get_sync(isc->dev);
@@ -1754,7 +1843,6 @@ static int isc_ctrl_init(struct isc_device *isc)
return 0;
}
-
static int isc_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
@@ -1812,35 +1900,20 @@ static int isc_formats_init(struct isc_device *isc)
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ num_fmts = 0;
while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
NULL, &mbus_code)) {
mbus_code.index++;
fmt = find_format_by_code(mbus_code.code, &i);
- if ((!fmt) || (!(fmt->flags & FMT_FLAG_FROM_SENSOR)))
+ if (!fmt) {
+ v4l2_warn(&isc->v4l2_dev, "Mbus code %x not supported\n",
+ mbus_code.code);
continue;
+ }
fmt->sd_support = true;
-
- if (fmt->flags & FMT_FLAG_RAW_FORMAT)
- isc->raw_fmt = fmt;
- }
-
- fmt = &formats_list[0];
- for (i = 0; i < list_size; i++) {
- if (fmt->flags & FMT_FLAG_FROM_CONTROLLER)
- fmt->isc_support = true;
-
- fmt++;
- }
-
- fmt = &formats_list[0];
- num_fmts = 0;
- for (i = 0; i < list_size; i++) {
- if (fmt->isc_support || fmt->sd_support)
- num_fmts++;
-
- fmt++;
+ num_fmts++;
}
if (!num_fmts)
@@ -1855,9 +1928,8 @@ static int isc_formats_init(struct isc_device *isc)
fmt = &formats_list[0];
for (i = 0, j = 0; i < list_size; i++) {
- if (fmt->isc_support || fmt->sd_support)
+ if (fmt->sd_support)
isc->user_formats[j++] = fmt;
-
fmt++;
}
@@ -1877,13 +1949,11 @@ static int isc_set_default_fmt(struct isc_device *isc)
};
int ret;
- ret = isc_try_fmt(isc, &f, NULL, NULL);
+ ret = isc_try_fmt(isc, &f, NULL);
if (ret)
return ret;
- isc->current_fmt = isc->user_formats[0];
isc->fmt = f;
-
return 0;
}
@@ -2084,6 +2154,10 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
+ if (v4l2_epn.bus_type == V4L2_MBUS_BT656)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC |
+ ISC_PFE_CFG0_CCIR656;
+
subdev_entity->asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
subdev_entity->asd->match.fwnode =
of_fwnode_handle(rem);
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index b4f396c2e72c..eaa86737fa04 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -2010,6 +2010,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
/* Clear decode success flag */
coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
+ /* Clear error return value */
+ coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB);
+
trace_coda_dec_pic_run(ctx, meta);
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index fa0b22fb7991..3ce58dee4422 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -764,6 +764,7 @@ static int coda_s_fmt_vid_cap(struct file *file, void *priv,
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct coda_q_data *q_data_src;
+ const struct coda_codec *codec;
struct v4l2_rect r;
int ret;
@@ -784,6 +785,15 @@ static int coda_s_fmt_vid_cap(struct file *file, void *priv,
if (ctx->inst_type != CODA_INST_ENCODER)
return 0;
+ /* Setting the coded format determines the selected codec */
+ codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+ f->fmt.pix.pixelformat);
+ if (!codec) {
+ v4l2_err(&ctx->dev->v4l2_dev, "failed to determine codec\n");
+ return -EINVAL;
+ }
+ ctx->codec = codec;
+
ctx->colorspace = f->fmt.pix.colorspace;
ctx->xfer_func = f->fmt.pix.xfer_func;
ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
@@ -796,6 +806,7 @@ static int coda_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
+ const struct coda_codec *codec;
struct v4l2_format f_cap;
struct vb2_queue *dst_vq;
int ret;
@@ -808,14 +819,23 @@ static int coda_s_fmt_vid_out(struct file *file, void *priv,
if (ret)
return ret;
- if (ctx->inst_type != CODA_INST_DECODER)
- return 0;
-
ctx->colorspace = f->fmt.pix.colorspace;
ctx->xfer_func = f->fmt.pix.xfer_func;
ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
ctx->quantization = f->fmt.pix.quantization;
+ if (ctx->inst_type != CODA_INST_DECODER)
+ return 0;
+
+ /* Setting the coded format determines the selected codec */
+ codec = coda_find_codec(ctx->dev, f->fmt.pix.pixelformat,
+ V4L2_PIX_FMT_YUV420);
+ if (!codec) {
+ v4l2_err(&ctx->dev->v4l2_dev, "failed to determine codec\n");
+ return -EINVAL;
+ }
+ ctx->codec = codec;
+
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (!dst_vq)
return -EINVAL;
@@ -980,6 +1000,11 @@ static int coda_s_selection(struct file *file, void *fh,
static int coda_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec)
{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+
+ if (ctx->inst_type != CODA_INST_ENCODER)
+ return -ENOTTY;
+
if (ec->cmd != V4L2_ENC_CMD_STOP)
return -EINVAL;
@@ -1000,10 +1025,6 @@ static int coda_encoder_cmd(struct file *file, void *fh,
if (ret < 0)
return ret;
- /* Ignore encoder stop command silently in decoder context */
- if (ctx->inst_type != CODA_INST_ENCODER)
- return 0;
-
/* Set the stream-end flag on this context */
ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
@@ -1021,6 +1042,11 @@ static int coda_encoder_cmd(struct file *file, void *fh,
static int coda_try_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+
+ if (ctx->inst_type != CODA_INST_DECODER)
+ return -ENOTTY;
+
if (dc->cmd != V4L2_DEC_CMD_STOP)
return -EINVAL;
@@ -1043,10 +1069,6 @@ static int coda_decoder_cmd(struct file *file, void *fh,
if (ret < 0)
return ret;
- /* Ignore decoder stop command silently in encoder context */
- if (ctx->inst_type != CODA_INST_DECODER)
- return 0;
-
/* Set the stream-end flag on this context */
coda_bit_stream_end_flag(ctx);
ctx->hold = false;
@@ -1055,6 +1077,42 @@ static int coda_decoder_cmd(struct file *file, void *fh,
return 0;
}
+static int coda_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_q_data *q_data_dst;
+ const struct coda_codec *codec;
+
+ if (ctx->inst_type != CODA_INST_ENCODER)
+ return -ENOTTY;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ if (coda_format_normalize_yuv(fsize->pixel_format) ==
+ V4L2_PIX_FMT_YUV420) {
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ codec = coda_find_codec(ctx->dev, fsize->pixel_format,
+ q_data_dst->fourcc);
+ } else {
+ codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
+ fsize->pixel_format);
+ }
+ if (!codec)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = MIN_W;
+ fsize->stepwise.max_width = codec->max_w;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = MIN_H;
+ fsize->stepwise.max_height = codec->max_h;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
static int coda_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *f)
{
@@ -1233,6 +1291,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_g_parm = coda_g_parm,
.vidioc_s_parm = coda_s_parm,
+ .vidioc_enum_framesizes = coda_enum_framesizes,
.vidioc_enum_frameintervals = coda_enum_frameintervals,
.vidioc_subscribe_event = coda_subscribe_event,
@@ -1442,6 +1501,9 @@ static int coda_queue_setup(struct vb2_queue *vq,
q_data = get_q_data(ctx, vq->type);
size = q_data->sizeimage;
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
*nplanes = 1;
sizes[0] = size;
@@ -1680,14 +1742,6 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
ctx->gopcounter = ctx->params.gop_size - 1;
- ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
- q_data_dst->fourcc);
- if (!ctx->codec) {
- v4l2_err(v4l2_dev, "couldn't tell instance type.\n");
- ret = -EINVAL;
- goto err;
- }
-
if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
ctx->params.gop_size = 1;
ctx->gopcounter = ctx->params.gop_size - 1;
@@ -2015,7 +2069,6 @@ static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx)
static void coda_decode_ctrls(struct coda_ctx *ctx)
{
- u64 mask;
u8 max;
ctx->h264_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
@@ -2029,27 +2082,14 @@ static void coda_decode_ctrls(struct coda_ctx *ctx)
ctx->h264_profile_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (ctx->dev->devtype->product == CODA_HX4 ||
- ctx->dev->devtype->product == CODA_7541) {
+ ctx->dev->devtype->product == CODA_7541)
max = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
- mask = ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0));
- } else if (ctx->dev->devtype->product == CODA_960) {
+ else if (ctx->dev->devtype->product == CODA_960)
max = V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
- mask = ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1));
- } else {
+ else
return;
- }
ctx->h264_level_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
- &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, max, mask,
- max);
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, max, 0, max);
if (ctx->h264_level_ctrl)
ctx->h264_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
}
@@ -2063,11 +2103,17 @@ static int coda_ctrls_setup(struct coda_ctx *ctx)
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
if (ctx->inst_type == CODA_INST_ENCODER) {
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+ 1, 1, 1, 1);
if (ctx->cvd->dst_formats[0] == V4L2_PIX_FMT_JPEG)
coda_jpeg_encode_ctrls(ctx);
else
coda_encode_ctrls(ctx);
} else {
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+ 1, 1, 1, 1);
if (ctx->cvd->src_formats[0] == V4L2_PIX_FMT_H264)
coda_decode_ctrls(ctx);
}
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
index 7bc4d8a9af28..068df9888dbf 100644
--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -236,6 +236,7 @@ static int cros_ec_cec_get_notifier(struct device *dev,
return -EPROBE_DEFER;
*notify = cec_notifier_get_conn(d, m->conn);
+ put_device(d);
return 0;
}
}
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index 47cecd10eb9f..2dee9af6d413 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -884,9 +884,7 @@ static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
static int isif_config_ycbcr(void)
{
struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
- struct vpss_pg_frame_size frame_size;
u32 modeset = 0, ccdcfg = 0;
- struct vpss_sync_pol sync;
dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
@@ -974,13 +972,6 @@ static int isif_config_ycbcr(void)
/* two fields are interleaved in memory */
regw(0x00000249, SDOFST);
- /* Setup test pattern if enabled */
- if (isif_cfg.bayer.config_params.test_pat_gen) {
- sync.ccdpg_hdpol = params->hd_pol;
- sync.ccdpg_vdpol = params->vd_pol;
- dm365_vpss_set_sync_pol(sync);
- dm365_vpss_set_pg_frame_size(frame_size);
- }
return 0;
}
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 26dadbba930f..1e3a13830544 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1759,7 +1759,7 @@ static int vpfe_probe(struct platform_device *pdev)
mutex_lock(&ccdc_lock);
- strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32);
+ strscpy(ccdc_cfg->name, vpfe_cfg->ccdc, sizeof(ccdc_cfg->name));
/* Get VINT0 irq resource */
res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res1) {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 6216b7ac6875..b5aacb0fb96b 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1254,7 +1254,8 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
} else {
std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
}
- strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
+ strscpy(std_info->name, "Custom timings BT656/1120",
+ sizeof(std_info->name));
std_info->width = bt->width;
std_info->height = bt->height;
std_info->frm_fmt = bt->interlaced ? 0 : 1;
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index f4b4f2a1dfc0..a69897c68a50 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -987,8 +987,8 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
} else {
std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
}
- strncpy(std_info->name, "Custom timings BT656/1120",
- VPIF_MAX_NAME);
+ strscpy(std_info->name, "Custom timings BT656/1120",
+ sizeof(std_info->name));
std_info->width = bt->width;
std_info->height = bt->height;
std_info->frm_fmt = bt->interlaced ? 0 : 1;
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 3e9fcf4f8a13..de4af0357a3c 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -742,7 +742,7 @@ static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
f->index);
if (!fmt)
return -EINVAL;
- strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
if (fmt->fourcc == MEDIA_BUS_FMT_JPEG_1X8)
f->flags |= V4L2_FMT_FLAG_COMPRESSED;
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 61c8177409cf..1bea1ce4091e 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -252,7 +252,7 @@ static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
if (!fmt)
return -EINVAL;
- strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
return 0;
}
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index 0bcfc5aa8f3d..8e7ef23b9a7e 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -1025,8 +1025,8 @@ static irqreturn_t pxp_irq_handler(int irq, void *dev_id)
static int pxp_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- strlcpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver));
- strlcpy(cap->card, MEM2MEM_NAME, sizeof(cap->card));
+ strscpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver));
+ strscpy(cap->card, MEM2MEM_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", MEM2MEM_NAME);
return 0;
diff --git a/drivers/media/platform/marvell-ccic/Kconfig b/drivers/media/platform/marvell-ccic/Kconfig
index cf12e077203a..cd88e2eed749 100644
--- a/drivers/media/platform/marvell-ccic/Kconfig
+++ b/drivers/media/platform/marvell-ccic/Kconfig
@@ -5,7 +5,7 @@ config VIDEO_CAFE_CCIC
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_DMA_SG
- ---help---
+ help
This is a video4linux2 driver for the Marvell 88ALP01 integrated
CMOS camera controller. This is the controller found on first-
generation OLPC systems.
@@ -19,7 +19,7 @@ config VIDEO_MMP_CAMERA
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_DMA_SG
- ---help---
+ help
This is a Video4Linux2 driver for the integrated camera
controller found on Marvell Armada 610 application
processors (and likely beyond). This is the controller found
diff --git a/drivers/media/platform/meson/Makefile b/drivers/media/platform/meson/Makefile
index 597beb8f34d1..f611c23c3718 100644
--- a/drivers/media/platform/meson/Makefile
+++ b/drivers/media/platform/meson/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_VIDEO_MESON_AO_CEC) += ao-cec.o
+obj-$(CONFIG_VIDEO_MESON_G12A_AO_CEC) += ao-cec-g12a.o
diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c
new file mode 100644
index 000000000000..3620a1e310f5
--- /dev/null
+++ b/drivers/media/platform/meson/ao-cec-g12a.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Amlogic Meson AO CEC G12A Controller
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+#include <linux/clk-provider.h>
+
+/* CEC Registers */
+
+#define CECB_CLK_CNTL_REG0 0x00
+
+#define CECB_CLK_CNTL_N1 GENMASK(11, 0)
+#define CECB_CLK_CNTL_N2 GENMASK(23, 12)
+#define CECB_CLK_CNTL_DUAL_EN BIT(28)
+#define CECB_CLK_CNTL_OUTPUT_EN BIT(30)
+#define CECB_CLK_CNTL_INPUT_EN BIT(31)
+
+#define CECB_CLK_CNTL_REG1 0x04
+
+#define CECB_CLK_CNTL_M1 GENMASK(11, 0)
+#define CECB_CLK_CNTL_M2 GENMASK(23, 12)
+#define CECB_CLK_CNTL_BYPASS_EN BIT(24)
+
+/*
+ * [14:12] Filter_del. For glitch-filtering CEC line, ignore signal
+ * change pulse width < filter_del * T(filter_tick) * 3.
+ * [9:8] Filter_tick_sel: Select which periodical pulse for
+ * glitch-filtering CEC line signal.
+ * - 0=Use T(xtal)*3 = 125ns;
+ * - 1=Use once-per-1us pulse;
+ * - 2=Use once-per-10us pulse;
+ * - 3=Use once-per-100us pulse.
+ * [3] Sysclk_en. 0=Disable system clock; 1=Enable system clock.
+ * [2:1] cntl_clk
+ * - 0 = Disable clk (Power-off mode)
+ * - 1 = Enable gated clock (Normal mode)
+ * - 2 = Enable free-run clk (Debug mode)
+ * [0] SW_RESET 1=Apply reset; 0=No reset.
+ */
+#define CECB_GEN_CNTL_REG 0x08
+
+#define CECB_GEN_CNTL_RESET BIT(0)
+#define CECB_GEN_CNTL_CLK_DISABLE 0
+#define CECB_GEN_CNTL_CLK_ENABLE 1
+#define CECB_GEN_CNTL_CLK_ENABLE_DBG 2
+#define CECB_GEN_CNTL_CLK_CTRL_MASK GENMASK(2, 1)
+#define CECB_GEN_CNTL_SYS_CLK_EN BIT(3)
+#define CECB_GEN_CNTL_FILTER_TICK_125NS 0
+#define CECB_GEN_CNTL_FILTER_TICK_1US 1
+#define CECB_GEN_CNTL_FILTER_TICK_10US 2
+#define CECB_GEN_CNTL_FILTER_TICK_100US 3
+#define CECB_GEN_CNTL_FILTER_TICK_SEL GENMASK(9, 8)
+#define CECB_GEN_CNTL_FILTER_DEL GENMASK(14, 12)
+
+/*
+ * [7:0] cec_reg_addr
+ * [15:8] cec_reg_wrdata
+ * [16] cec_reg_wr
+ * - 0 = Read
+ * - 1 = Write
+ * [31:24] cec_reg_rddata
+ */
+#define CECB_RW_REG 0x0c
+
+#define CECB_RW_ADDR GENMASK(7, 0)
+#define CECB_RW_WR_DATA GENMASK(15, 8)
+#define CECB_RW_WRITE_EN BIT(16)
+#define CECB_RW_BUS_BUSY BIT(23)
+#define CECB_RW_RD_DATA GENMASK(31, 24)
+
+/*
+ * [0] DONE Interrupt
+ * [1] End Of Message Interrupt
+ * [2] Not Acknowlegde Interrupt
+ * [3] Arbitration Loss Interrupt
+ * [4] Initiator Error Interrupt
+ * [5] Follower Error Interrupt
+ * [6] Wake-Up Interrupt
+ */
+#define CECB_INTR_MASKN_REG 0x10
+#define CECB_INTR_CLR_REG 0x14
+#define CECB_INTR_STAT_REG 0x18
+
+#define CECB_INTR_DONE BIT(0)
+#define CECB_INTR_EOM BIT(1)
+#define CECB_INTR_NACK BIT(2)
+#define CECB_INTR_ARB_LOSS BIT(3)
+#define CECB_INTR_INITIATOR_ERR BIT(4)
+#define CECB_INTR_FOLLOWER_ERR BIT(5)
+#define CECB_INTR_WAKE_UP BIT(6)
+
+/* CEC Commands */
+
+#define CECB_CTRL 0x00
+
+#define CECB_CTRL_SEND BIT(0)
+#define CECB_CTRL_TYPE GENMASK(2, 1)
+#define CECB_CTRL_TYPE_RETRY 0
+#define CECB_CTRL_TYPE_NEW 1
+#define CECB_CTRL_TYPE_NEXT 2
+
+#define CECB_CTRL2 0x01
+#define CECB_INTR_MASK 0x02
+#define CECB_LADD_LOW 0x05
+#define CECB_LADD_HIGH 0x06
+#define CECB_TX_CNT 0x07
+#define CECB_RX_CNT 0x08
+#define CECB_STAT0 0x09
+#define CECB_TX_DATA00 0x10
+#define CECB_TX_DATA01 0x11
+#define CECB_TX_DATA02 0x12
+#define CECB_TX_DATA03 0x13
+#define CECB_TX_DATA04 0x14
+#define CECB_TX_DATA05 0x15
+#define CECB_TX_DATA06 0x16
+#define CECB_TX_DATA07 0x17
+#define CECB_TX_DATA08 0x18
+#define CECB_TX_DATA09 0x19
+#define CECB_TX_DATA10 0x1A
+#define CECB_TX_DATA11 0x1B
+#define CECB_TX_DATA12 0x1C
+#define CECB_TX_DATA13 0x1D
+#define CECB_TX_DATA14 0x1E
+#define CECB_TX_DATA15 0x1F
+#define CECB_RX_DATA00 0x20
+#define CECB_RX_DATA01 0x21
+#define CECB_RX_DATA02 0x22
+#define CECB_RX_DATA03 0x23
+#define CECB_RX_DATA04 0x24
+#define CECB_RX_DATA05 0x25
+#define CECB_RX_DATA06 0x26
+#define CECB_RX_DATA07 0x27
+#define CECB_RX_DATA08 0x28
+#define CECB_RX_DATA09 0x29
+#define CECB_RX_DATA10 0x2A
+#define CECB_RX_DATA11 0x2B
+#define CECB_RX_DATA12 0x2C
+#define CECB_RX_DATA13 0x2D
+#define CECB_RX_DATA14 0x2E
+#define CECB_RX_DATA15 0x2F
+#define CECB_LOCK_BUF 0x30
+
+#define CECB_LOCK_BUF_EN BIT(0)
+
+#define CECB_WAKEUPCTRL 0x31
+
+struct meson_ao_cec_g12a_device {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct regmap *regmap_cec;
+ spinlock_t cec_reg_lock;
+ struct cec_notifier *notify;
+ struct cec_adapter *adap;
+ struct cec_msg rx_msg;
+ struct clk *oscin;
+ struct clk *core;
+};
+
+static const struct regmap_config meson_ao_cec_g12a_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = CECB_INTR_STAT_REG,
+};
+
+/*
+ * The AO-CECB embeds a dual/divider to generate a more precise
+ * 32,768KHz clock for CEC core clock.
+ * ______ ______
+ * | | | |
+ * ______ | Div1 |-| Cnt1 | ______
+ * | | /|______| |______|\ | |
+ * Xtal-->| Gate |---| ______ ______ X-X--| Gate |-->
+ * |______| | \| | | |/ | |______|
+ * | | Div2 |-| Cnt2 | |
+ * | |______| |______| |
+ * |_______________________|
+ *
+ * The dividing can be switched to single or dual, with a counter
+ * for each divider to set when the switching is done.
+ * The entire dividing mechanism can be also bypassed.
+ */
+
+struct meson_ao_cec_g12a_dualdiv_clk {
+ struct clk_hw hw;
+ struct regmap *regmap;
+};
+
+#define hw_to_meson_ao_cec_g12a_dualdiv_clk(_hw) \
+ container_of(_hw, struct meson_ao_cec_g12a_dualdiv_clk, hw) \
+
+static unsigned long
+meson_ao_cec_g12a_dualdiv_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk =
+ hw_to_meson_ao_cec_g12a_dualdiv_clk(hw);
+ unsigned long n1;
+ u32 reg0, reg1;
+
+ regmap_read(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, &reg0);
+ regmap_read(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, &reg1);
+
+ if (reg1 & CECB_CLK_CNTL_BYPASS_EN)
+ return parent_rate;
+
+ if (reg0 & CECB_CLK_CNTL_DUAL_EN) {
+ unsigned long n2, m1, m2, f1, f2, p1, p2;
+
+ n1 = FIELD_GET(CECB_CLK_CNTL_N1, reg0) + 1;
+ n2 = FIELD_GET(CECB_CLK_CNTL_N2, reg0) + 1;
+
+ m1 = FIELD_GET(CECB_CLK_CNTL_M1, reg1) + 1;
+ m2 = FIELD_GET(CECB_CLK_CNTL_M1, reg1) + 1;
+
+ f1 = DIV_ROUND_CLOSEST(parent_rate, n1);
+ f2 = DIV_ROUND_CLOSEST(parent_rate, n2);
+
+ p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2));
+ p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2));
+
+ return DIV_ROUND_UP(100000000, p1 + p2);
+ }
+
+ n1 = FIELD_GET(CECB_CLK_CNTL_N1, reg0) + 1;
+
+ return DIV_ROUND_CLOSEST(parent_rate, n1);
+}
+
+static int meson_ao_cec_g12a_dualdiv_clk_enable(struct clk_hw *hw)
+{
+ struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk =
+ hw_to_meson_ao_cec_g12a_dualdiv_clk(hw);
+
+
+ /* Disable Input & Output */
+ regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
+ CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN,
+ 0);
+
+ /* Set N1 & N2 */
+ regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
+ CECB_CLK_CNTL_N1,
+ FIELD_PREP(CECB_CLK_CNTL_N1, 733 - 1));
+
+ regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
+ CECB_CLK_CNTL_N2,
+ FIELD_PREP(CECB_CLK_CNTL_N2, 732 - 1));
+
+ /* Set M1 & M2 */
+ regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG1,
+ CECB_CLK_CNTL_M1,
+ FIELD_PREP(CECB_CLK_CNTL_M1, 8 - 1));
+
+ regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG1,
+ CECB_CLK_CNTL_M2,
+ FIELD_PREP(CECB_CLK_CNTL_M2, 11 - 1));
+
+ /* Enable Dual divisor */
+ regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
+ CECB_CLK_CNTL_DUAL_EN, CECB_CLK_CNTL_DUAL_EN);
+
+ /* Disable divisor bypass */
+ regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG1,
+ CECB_CLK_CNTL_BYPASS_EN, 0);
+
+ /* Enable Input & Output */
+ regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
+ CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN,
+ CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN);
+
+ return 0;
+}
+
+static void meson_ao_cec_g12a_dualdiv_clk_disable(struct clk_hw *hw)
+{
+ struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk =
+ hw_to_meson_ao_cec_g12a_dualdiv_clk(hw);
+
+ regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
+ CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN,
+ 0);
+}
+
+static int meson_ao_cec_g12a_dualdiv_clk_is_enabled(struct clk_hw *hw)
+{
+ struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk =
+ hw_to_meson_ao_cec_g12a_dualdiv_clk(hw);
+ int val;
+
+ regmap_read(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, &val);
+
+ return !!(val & (CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN));
+}
+
+static const struct clk_ops meson_ao_cec_g12a_dualdiv_clk_ops = {
+ .recalc_rate = meson_ao_cec_g12a_dualdiv_clk_recalc_rate,
+ .is_enabled = meson_ao_cec_g12a_dualdiv_clk_is_enabled,
+ .enable = meson_ao_cec_g12a_dualdiv_clk_enable,
+ .disable = meson_ao_cec_g12a_dualdiv_clk_disable,
+};
+
+static int meson_ao_cec_g12a_setup_clk(struct meson_ao_cec_g12a_device *ao_cec)
+{
+ struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk;
+ struct device *dev = &ao_cec->pdev->dev;
+ struct clk_init_data init;
+ const char *parent_name;
+ struct clk *clk;
+ char *name;
+
+ dualdiv_clk = devm_kzalloc(dev, sizeof(*dualdiv_clk), GFP_KERNEL);
+ if (!dualdiv_clk)
+ return -ENOMEM;
+
+ name = kasprintf(GFP_KERNEL, "%s#dualdiv_clk", dev_name(dev));
+ if (!name)
+ return -ENOMEM;
+
+ parent_name = __clk_get_name(ao_cec->oscin);
+
+ init.name = name;
+ init.ops = &meson_ao_cec_g12a_dualdiv_clk_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ dualdiv_clk->regmap = ao_cec->regmap;
+ dualdiv_clk->hw.init = &init;
+
+ clk = devm_clk_register(dev, &dualdiv_clk->hw);
+ kfree(name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ao_cec->core = clk;
+
+ return 0;
+}
+
+static int meson_ao_cec_g12a_read(void *context, unsigned int addr,
+ unsigned int *data)
+{
+ struct meson_ao_cec_g12a_device *ao_cec = context;
+ u32 reg = FIELD_PREP(CECB_RW_ADDR, addr);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
+
+ ret = regmap_write(ao_cec->regmap, CECB_RW_REG, reg);
+ if (ret)
+ goto read_out;
+
+ ret = regmap_read_poll_timeout(ao_cec->regmap, CECB_RW_REG, reg,
+ !(reg & CECB_RW_BUS_BUSY),
+ 5, 1000);
+ if (ret)
+ goto read_out;
+
+ ret = regmap_read(ao_cec->regmap, CECB_RW_REG, &reg);
+
+ *data = FIELD_GET(CECB_RW_RD_DATA, reg);
+
+read_out:
+ spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
+
+ return ret;
+}
+
+static int meson_ao_cec_g12a_write(void *context, unsigned int addr,
+ unsigned int data)
+{
+ struct meson_ao_cec_g12a_device *ao_cec = context;
+ unsigned long flags;
+ u32 reg = FIELD_PREP(CECB_RW_ADDR, addr) |
+ FIELD_PREP(CECB_RW_WR_DATA, data) |
+ CECB_RW_WRITE_EN;
+ int ret = 0;
+
+ spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
+
+ ret = regmap_write(ao_cec->regmap, CECB_RW_REG, reg);
+
+ spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
+
+ return ret;
+}
+
+static const struct regmap_config meson_ao_cec_g12a_cec_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_read = meson_ao_cec_g12a_read,
+ .reg_write = meson_ao_cec_g12a_write,
+ .max_register = 0xffff,
+ .fast_io = true,
+};
+
+static inline void
+meson_ao_cec_g12a_irq_setup(struct meson_ao_cec_g12a_device *ao_cec,
+ bool enable)
+{
+ u32 cfg = CECB_INTR_DONE | CECB_INTR_EOM | CECB_INTR_NACK |
+ CECB_INTR_ARB_LOSS | CECB_INTR_INITIATOR_ERR |
+ CECB_INTR_FOLLOWER_ERR;
+
+ regmap_write(ao_cec->regmap, CECB_INTR_MASKN_REG,
+ enable ? cfg : 0);
+}
+
+static void meson_ao_cec_g12a_irq_rx(struct meson_ao_cec_g12a_device *ao_cec)
+{
+ int i, ret = 0;
+ u32 val;
+
+ ret = regmap_read(ao_cec->regmap_cec, CECB_RX_CNT, &val);
+
+ ao_cec->rx_msg.len = val;
+ if (ao_cec->rx_msg.len > CEC_MAX_MSG_SIZE)
+ ao_cec->rx_msg.len = CEC_MAX_MSG_SIZE;
+
+ for (i = 0; i < ao_cec->rx_msg.len; i++) {
+ ret |= regmap_read(ao_cec->regmap_cec,
+ CECB_RX_DATA00 + i, &val);
+
+ ao_cec->rx_msg.msg[i] = val & 0xff;
+ }
+
+ ret |= regmap_write(ao_cec->regmap_cec, CECB_LOCK_BUF, 0);
+ if (ret)
+ return;
+
+ cec_received_msg(ao_cec->adap, &ao_cec->rx_msg);
+}
+
+static irqreturn_t meson_ao_cec_g12a_irq(int irq, void *data)
+{
+ struct meson_ao_cec_g12a_device *ao_cec = data;
+ u32 stat;
+
+ regmap_read(ao_cec->regmap, CECB_INTR_STAT_REG, &stat);
+ if (stat)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t meson_ao_cec_g12a_irq_thread(int irq, void *data)
+{
+ struct meson_ao_cec_g12a_device *ao_cec = data;
+ u32 stat;
+
+ regmap_read(ao_cec->regmap, CECB_INTR_STAT_REG, &stat);
+ regmap_write(ao_cec->regmap, CECB_INTR_CLR_REG, stat);
+
+ if (stat & CECB_INTR_DONE)
+ cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_OK);
+
+ if (stat & CECB_INTR_EOM)
+ meson_ao_cec_g12a_irq_rx(ao_cec);
+
+ if (stat & CECB_INTR_NACK)
+ cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_NACK);
+
+ if (stat & CECB_INTR_ARB_LOSS) {
+ regmap_write(ao_cec->regmap_cec, CECB_TX_CNT, 0);
+ regmap_update_bits(ao_cec->regmap_cec, CECB_CTRL,
+ CECB_CTRL_SEND | CECB_CTRL_TYPE, 0);
+ cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ARB_LOST);
+ }
+
+ /* Initiator reports an error on the CEC bus */
+ if (stat & CECB_INTR_INITIATOR_ERR)
+ cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ERROR);
+
+ /* Follower reports a receive error, just reset RX buffer */
+ if (stat & CECB_INTR_FOLLOWER_ERR)
+ regmap_write(ao_cec->regmap_cec, CECB_LOCK_BUF, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int
+meson_ao_cec_g12a_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct meson_ao_cec_g12a_device *ao_cec = adap->priv;
+ int ret = 0;
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID) {
+ /* Assume this will allways succeed */
+ regmap_write(ao_cec->regmap_cec, CECB_LADD_LOW, 0);
+ regmap_write(ao_cec->regmap_cec, CECB_LADD_HIGH, 0);
+
+ return 0;
+ } else if (logical_addr < 8) {
+ ret = regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_LOW,
+ BIT(logical_addr),
+ BIT(logical_addr));
+ } else {
+ ret = regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_HIGH,
+ BIT(logical_addr - 8),
+ BIT(logical_addr - 8));
+ }
+
+ /* Always set Broadcast/Unregistered 15 address */
+ ret |= regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_HIGH,
+ BIT(CEC_LOG_ADDR_UNREGISTERED - 8),
+ BIT(CEC_LOG_ADDR_UNREGISTERED - 8));
+
+ return ret ? -EIO : 0;
+}
+
+static int meson_ao_cec_g12a_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct meson_ao_cec_g12a_device *ao_cec = adap->priv;
+ unsigned int type;
+ int ret = 0;
+ u32 val;
+ int i;
+
+ /* Check if RX is in progress */
+ ret = regmap_read(ao_cec->regmap_cec, CECB_LOCK_BUF, &val);
+ if (ret)
+ return ret;
+ if (val & CECB_LOCK_BUF_EN)
+ return -EBUSY;
+
+ /* Check if TX Busy */
+ ret = regmap_read(ao_cec->regmap_cec, CECB_CTRL, &val);
+ if (ret)
+ return ret;
+ if (val & CECB_CTRL_SEND)
+ return -EBUSY;
+
+ switch (signal_free_time) {
+ case CEC_SIGNAL_FREE_TIME_RETRY:
+ type = CECB_CTRL_TYPE_RETRY;
+ break;
+ case CEC_SIGNAL_FREE_TIME_NEXT_XFER:
+ type = CECB_CTRL_TYPE_NEXT;
+ break;
+ case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR:
+ default:
+ type = CECB_CTRL_TYPE_NEW;
+ break;
+ }
+
+ for (i = 0; i < msg->len; i++)
+ ret |= regmap_write(ao_cec->regmap_cec, CECB_TX_DATA00 + i,
+ msg->msg[i]);
+
+ ret |= regmap_write(ao_cec->regmap_cec, CECB_TX_CNT, msg->len);
+ if (ret)
+ return -EIO;
+
+ ret = regmap_update_bits(ao_cec->regmap_cec, CECB_CTRL,
+ CECB_CTRL_SEND |
+ CECB_CTRL_TYPE,
+ CECB_CTRL_SEND |
+ FIELD_PREP(CECB_CTRL_TYPE, type));
+
+ return ret;
+}
+
+static int meson_ao_cec_g12a_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct meson_ao_cec_g12a_device *ao_cec = adap->priv;
+
+ meson_ao_cec_g12a_irq_setup(ao_cec, false);
+
+ regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
+ CECB_GEN_CNTL_RESET, CECB_GEN_CNTL_RESET);
+
+ if (!enable)
+ return 0;
+
+ /* Setup Filter */
+ regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
+ CECB_GEN_CNTL_FILTER_TICK_SEL |
+ CECB_GEN_CNTL_FILTER_DEL,
+ FIELD_PREP(CECB_GEN_CNTL_FILTER_TICK_SEL,
+ CECB_GEN_CNTL_FILTER_TICK_1US) |
+ FIELD_PREP(CECB_GEN_CNTL_FILTER_DEL, 7));
+
+ /* Enable System Clock */
+ regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
+ CECB_GEN_CNTL_SYS_CLK_EN,
+ CECB_GEN_CNTL_SYS_CLK_EN);
+
+ /* Enable gated clock (Normal mode). */
+ regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
+ CECB_GEN_CNTL_CLK_CTRL_MASK,
+ FIELD_PREP(CECB_GEN_CNTL_CLK_CTRL_MASK,
+ CECB_GEN_CNTL_CLK_ENABLE));
+
+ /* Release Reset */
+ regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
+ CECB_GEN_CNTL_RESET, 0);
+
+ meson_ao_cec_g12a_irq_setup(ao_cec, true);
+
+ return 0;
+}
+
+static const struct cec_adap_ops meson_ao_cec_g12a_ops = {
+ .adap_enable = meson_ao_cec_g12a_adap_enable,
+ .adap_log_addr = meson_ao_cec_g12a_set_log_addr,
+ .adap_transmit = meson_ao_cec_g12a_transmit,
+};
+
+static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
+{
+ struct meson_ao_cec_g12a_device *ao_cec;
+ struct device *hdmi_dev;
+ struct resource *res;
+ void __iomem *base;
+ int ret, irq;
+
+ hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
+ if (IS_ERR(hdmi_dev))
+ return PTR_ERR(hdmi_dev);
+
+ ao_cec = devm_kzalloc(&pdev->dev, sizeof(*ao_cec), GFP_KERNEL);
+ if (!ao_cec)
+ return -ENOMEM;
+
+ spin_lock_init(&ao_cec->cec_reg_lock);
+ ao_cec->pdev = pdev;
+
+ ao_cec->notify = cec_notifier_get(hdmi_dev);
+ if (!ao_cec->notify)
+ return -ENOMEM;
+
+ ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_g12a_ops, ao_cec,
+ "meson_g12a_ao_cec",
+ CEC_CAP_DEFAULTS,
+ CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(ao_cec->adap)) {
+ ret = PTR_ERR(ao_cec->adap);
+ goto out_probe_notify;
+ }
+
+ ao_cec->adap->owner = THIS_MODULE;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_probe_adapter;
+ }
+
+ ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &meson_ao_cec_g12a_regmap_conf);
+ if (IS_ERR(ao_cec->regmap)) {
+ ret = PTR_ERR(ao_cec->regmap);
+ goto out_probe_adapter;
+ }
+
+ ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec,
+ &meson_ao_cec_g12a_cec_regmap_conf);
+ if (IS_ERR(ao_cec->regmap_cec)) {
+ ret = PTR_ERR(ao_cec->regmap_cec);
+ goto out_probe_adapter;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ meson_ao_cec_g12a_irq,
+ meson_ao_cec_g12a_irq_thread,
+ 0, NULL, ao_cec);
+ if (ret) {
+ dev_err(&pdev->dev, "irq request failed\n");
+ goto out_probe_adapter;
+ }
+
+ ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin");
+ if (IS_ERR(ao_cec->oscin)) {
+ dev_err(&pdev->dev, "oscin clock request failed\n");
+ ret = PTR_ERR(ao_cec->oscin);
+ goto out_probe_adapter;
+ }
+
+ ret = meson_ao_cec_g12a_setup_clk(ao_cec);
+ if (ret)
+ goto out_probe_adapter;
+
+ ret = clk_prepare_enable(ao_cec->core);
+ if (ret) {
+ dev_err(&pdev->dev, "core clock enable failed\n");
+ goto out_probe_adapter;
+ }
+
+ device_reset_optional(&pdev->dev);
+
+ platform_set_drvdata(pdev, ao_cec);
+
+ ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
+ if (ret < 0) {
+ cec_notifier_put(ao_cec->notify);
+ goto out_probe_core_clk;
+ }
+
+ /* Setup Hardware */
+ regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET);
+
+ cec_register_cec_notifier(ao_cec->adap, ao_cec->notify);
+
+ return 0;
+
+out_probe_core_clk:
+ clk_disable_unprepare(ao_cec->core);
+
+out_probe_adapter:
+ cec_delete_adapter(ao_cec->adap);
+
+out_probe_notify:
+ cec_notifier_put(ao_cec->notify);
+
+ dev_err(&pdev->dev, "CEC controller registration failed\n");
+
+ return ret;
+}
+
+static int meson_ao_cec_g12a_remove(struct platform_device *pdev)
+{
+ struct meson_ao_cec_g12a_device *ao_cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ao_cec->core);
+
+ cec_unregister_adapter(ao_cec->adap);
+
+ cec_notifier_put(ao_cec->notify);
+
+ return 0;
+}
+
+static const struct of_device_id meson_ao_cec_g12a_of_match[] = {
+ { .compatible = "amlogic,meson-g12a-ao-cec", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_ao_cec_g12a_of_match);
+
+static struct platform_driver meson_ao_cec_g12a_driver = {
+ .probe = meson_ao_cec_g12a_probe,
+ .remove = meson_ao_cec_g12a_remove,
+ .driver = {
+ .name = "meson-ao-cec-g12a",
+ .of_match_table = of_match_ptr(meson_ao_cec_g12a_of_match),
+ },
+};
+
+module_platform_driver(meson_ao_cec_g12a_driver);
+
+MODULE_DESCRIPTION("Meson AO CEC G12A Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
index cd4be38ab5ac..facf9b029e79 100644
--- a/drivers/media/platform/meson/ao-cec.c
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -601,20 +601,14 @@ static const struct cec_adap_ops meson_ao_cec_ops = {
static int meson_ao_cec_probe(struct platform_device *pdev)
{
struct meson_ao_cec_device *ao_cec;
- struct platform_device *hdmi_dev;
- struct device_node *np;
+ struct device *hdmi_dev;
struct resource *res;
int ret, irq;
- np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
- if (!np) {
- dev_err(&pdev->dev, "Failed to find hdmi node\n");
- return -ENODEV;
- }
+ hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
- hdmi_dev = of_find_device_by_node(np);
- if (hdmi_dev == NULL)
- return -EPROBE_DEFER;
+ if (IS_ERR(hdmi_dev))
+ return PTR_ERR(hdmi_dev);
ao_cec = devm_kzalloc(&pdev->dev, sizeof(*ao_cec), GFP_KERNEL);
if (!ao_cec)
@@ -622,7 +616,7 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
spin_lock_init(&ao_cec->cec_reg_lock);
- ao_cec->notify = cec_notifier_get(&hdmi_dev->dev);
+ ao_cec->notify = cec_notifier_get(hdmi_dev);
if (!ao_cec->notify)
return -ENOMEM;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index d022c65bb34c..851903867bc9 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -129,11 +129,9 @@ static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
mutex_lock(&ctx->lock);
if (dstbuf->used) {
vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0,
- ctx->picinfo.y_bs_sz);
+ ctx->picinfo.fb_sz[0]);
vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
- ctx->picinfo.c_bs_sz);
-
- dstbuf->ready_to_display = true;
+ ctx->picinfo.fb_sz[1]);
mtk_v4l2_debug(2,
"[%d]status=%x queue id=%d to done_list %d",
@@ -278,6 +276,27 @@ static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
clean_free_buffer(ctx);
}
+static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
+ unsigned int pixelformat)
+{
+ struct mtk_video_fmt *fmt;
+ struct mtk_q_data *dst_q_data;
+ unsigned int k;
+
+ dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &mtk_video_formats[k];
+ if (fmt->fourcc == pixelformat) {
+ mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
+ dst_q_data->fmt.fourcc, pixelformat);
+ dst_q_data->fmt = fmt;
+ return;
+ }
+ }
+
+ mtk_v4l2_err("Cannot get fourcc(%d), using init value", pixelformat);
+}
+
static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
{
unsigned int dpbsize = 0;
@@ -299,6 +318,10 @@ static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
return -EINVAL;
}
+ if (ctx->last_decoded_picinfo.cap_fourcc != ctx->picinfo.cap_fourcc &&
+ ctx->picinfo.cap_fourcc != 0)
+ mtk_vdec_update_fmt(ctx, ctx->picinfo.cap_fourcc);
+
if ((ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w) ||
(ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h))
return 0;
@@ -352,11 +375,11 @@ static void mtk_vdec_worker(struct work_struct *work)
pfb = &dst_buf_info->frame_buffer;
pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
- pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;
+ pfb->base_y.size = ctx->picinfo.fb_sz[0];
pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1);
pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
- pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
+ pfb->base_c.size = ctx->picinfo.fb_sz[1];
pfb->status = 0;
mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
@@ -388,7 +411,7 @@ static void mtk_vdec_worker(struct work_struct *work)
}
buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
- buf.size = (size_t)src_buf->planes[0].bytesused;
+ buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
if (!buf.va) {
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
@@ -976,14 +999,13 @@ static int vidioc_vdec_g_fmt(struct file *file, void *priv,
* So we just return picinfo yet, and update picinfo in
* stop_streaming hook function
*/
- q_data->sizeimage[0] = ctx->picinfo.y_bs_sz +
- ctx->picinfo.y_len_sz;
- q_data->sizeimage[1] = ctx->picinfo.c_bs_sz +
- ctx->picinfo.c_len_sz;
+ q_data->sizeimage[0] = ctx->picinfo.fb_sz[0];
+ q_data->sizeimage[1] = ctx->picinfo.fb_sz[1];
q_data->bytesperline[0] = ctx->last_decoded_picinfo.buf_w;
q_data->bytesperline[1] = ctx->last_decoded_picinfo.buf_w;
q_data->coded_width = ctx->picinfo.buf_w;
q_data->coded_height = ctx->picinfo.buf_h;
+ ctx->last_decoded_picinfo.cap_fourcc = q_data->fmt->fourcc;
/*
* Width and height are set to the dimensions
@@ -1103,10 +1125,11 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
struct mtk_vcodec_mem src_mem;
bool res_chg = false;
int ret = 0;
- unsigned int dpbsize = 1;
+ unsigned int dpbsize = 1, i = 0;
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
struct mtk_video_dec_buf *buf = NULL;
+ struct mtk_q_data *dst_q_data;
mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p",
ctx->id, vb->vb2_queue->type,
@@ -1122,11 +1145,9 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
buf->queued_in_vb2 = true;
buf->queued_in_v4l2 = true;
- buf->ready_to_display = false;
} else {
buf->queued_in_vb2 = false;
buf->queued_in_v4l2 = true;
- buf->ready_to_display = false;
}
mutex_unlock(&ctx->lock);
return;
@@ -1155,10 +1176,10 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
- src_mem.size = (size_t)src_buf->planes[0].bytesused;
+ src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
mtk_v4l2_debug(2,
"[%d] buf id=%d va=%p dma=%pad size=%zx",
- ctx->id, src_buf->index,
+ ctx->id, src_buf->vb2_buf.index,
src_mem.va, &src_mem.dma_addr,
src_mem.size);
@@ -1182,7 +1203,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
}
mtk_v4l2_debug(ret ? 0 : 1,
"[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
- ctx->id, src_buf->index,
+ ctx->id, src_buf->vb2_buf.index,
src_mem.size, ret, res_chg);
return;
}
@@ -1194,21 +1215,18 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
}
ctx->last_decoded_picinfo = ctx->picinfo;
- ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
- ctx->picinfo.y_bs_sz +
- ctx->picinfo.y_len_sz;
- ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
- ctx->picinfo.buf_w;
- ctx->q_data[MTK_Q_DATA_DST].sizeimage[1] =
- ctx->picinfo.c_bs_sz +
- ctx->picinfo.c_len_sz;
- ctx->q_data[MTK_Q_DATA_DST].bytesperline[1] = ctx->picinfo.buf_w;
+ dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ for (i = 0; i < dst_q_data->fmt->num_planes; i++) {
+ dst_q_data->sizeimage[i] = ctx->picinfo.fb_sz[i];
+ dst_q_data->bytesperline[i] = ctx->picinfo.buf_w;
+ }
+
mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
ctx->id,
ctx->picinfo.buf_w, ctx->picinfo.buf_h,
ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
- ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
+ dst_q_data->sizeimage[0],
+ dst_q_data->sizeimage[1]);
ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
if (dpbsize == 0)
@@ -1253,7 +1271,6 @@ static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
buf->used = false;
- buf->ready_to_display = false;
buf->queued_in_v4l2 = false;
} else {
buf->lastframe = false;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
index dc4fc1df63c5..e4984edec4f8 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -45,7 +45,6 @@ struct vdec_fb {
* @list: link list
* @used: Capture buffer contain decoded frame data and keep in
* codec data structure
- * @ready_to_display: Capture buffer not display yet
* @queued_in_vb2: Capture buffer is queue in vb2
* @queued_in_v4l2: Capture buffer is in v4l2 driver, but not in vb2
* queue yet
@@ -60,7 +59,6 @@ struct mtk_video_dec_buf {
struct list_head list;
bool used;
- bool ready_to_display;
bool queued_in_vb2;
bool queued_in_v4l2;
bool lastframe;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index e7e2a108def9..662a84b822af 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -211,24 +211,20 @@ struct mtk_vcodec_pm {
* @pic_h: picture height
* @buf_w: picture buffer width (64 aligned up from pic_w)
* @buf_h: picture buffer heiht (64 aligned up from pic_h)
- * @y_bs_sz: Y bitstream size
- * @c_bs_sz: CbCr bitstream size
- * @y_len_sz: additional size required to store decompress information for y
- * plane
- * @c_len_sz: additional size required to store decompress information for cbcr
- * plane
+ * @fb_sz: bitstream size of each plane
* E.g. suppose picture size is 176x144,
* buffer size will be aligned to 176x160.
+ * @cap_fourcc: fourcc number(may changed when resolution change)
+ * @reserved: align struct to 64-bit in order to adjust 32-bit and 64-bit os.
*/
struct vdec_pic_info {
unsigned int pic_w;
unsigned int pic_h;
unsigned int buf_w;
unsigned int buf_h;
- unsigned int y_bs_sz;
- unsigned int c_bs_sz;
- unsigned int y_len_sz;
- unsigned int c_len_sz;
+ unsigned int fb_sz[VIDEO_MAX_PLANES];
+ unsigned int cap_fourcc;
+ unsigned int reserved;
};
/**
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index c6b48b5925fb..50351adafc47 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -894,7 +894,7 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
- dst_buf->planes[0].bytesused = 0;
+ dst_buf->vb2_buf.planes[0].bytesused = 0;
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
}
} else {
@@ -947,7 +947,7 @@ static int mtk_venc_encode_header(void *priv)
bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
- bs_buf.size = (size_t)dst_buf->planes[0].length;
+ bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;
mtk_v4l2_debug(1,
"[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
@@ -976,7 +976,7 @@ static int mtk_venc_encode_header(void *priv)
}
ctx->state = MTK_STATE_HEADER;
- dst_buf->planes[0].bytesused = enc_result.bs_size;
+ dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
return 0;
@@ -1107,12 +1107,12 @@ static void mtk_venc_worker(struct work_struct *work)
if (ret) {
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
- dst_buf->planes[0].bytesused = 0;
+ dst_buf->vb2_buf.planes[0].bytesused = 0;
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
mtk_v4l2_err("venc_if_encode failed=%d", ret);
} else {
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- dst_buf->planes[0].bytesused = enc_result.bs_size;
+ dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
enc_result.bs_size);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index 02c960c63ac0..cdbcd6909728 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -253,8 +253,8 @@ static void get_pic_info(struct vdec_h264_inst *inst,
*pic = inst->vsi->pic;
mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
- mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
- pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+ mtk_vcodec_debug(inst, "fb size: Y(%d), C(%d)",
+ pic->fb_sz[0], pic->fb_sz[1]);
}
static void get_crop_info(struct vdec_h264_inst *inst, struct v4l2_rect *cr)
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index bac3723038de..ba79136421ef 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -294,8 +294,8 @@ static void get_pic_info(struct vdec_vp8_inst *inst, struct vdec_pic_info *pic)
mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
- mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
- pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+ mtk_vcodec_debug(inst, "fb size: Y(%d), C(%d)",
+ pic->fb_sz[0], pic->fb_sz[1]);
}
static void vp8_dec_finish(struct vdec_vp8_inst *inst)
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index bc8349bc2e80..939ea14bf6c5 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -481,15 +481,15 @@ static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
*/
if ((frm_to_show->fb != NULL) &&
(inst->cur_fb->base_y.size >=
- frm_to_show->fb->base_y.size)) {
+ frm_to_show->fb->base_y.size) &&
+ (inst->cur_fb->base_c.size >=
+ frm_to_show->fb->base_c.size)) {
memcpy((void *)inst->cur_fb->base_y.va,
(void *)frm_to_show->fb->base_y.va,
- vsi->buf_w *
- vsi->buf_h);
+ frm_to_show->fb->base_y.size);
memcpy((void *)inst->cur_fb->base_c.va,
(void *)frm_to_show->fb->base_c.va,
- vsi->buf_w *
- vsi->buf_h / 2);
+ frm_to_show->fb->base_c.size);
} else {
/* After resolution change case, current CAPTURE buffer
* may have less buffer size than frm_to_show buffer
@@ -702,10 +702,8 @@ static void init_all_fb_lists(struct vdec_vp9_inst *inst)
static void get_pic_info(struct vdec_vp9_inst *inst, struct vdec_pic_info *pic)
{
- pic->y_bs_sz = inst->vsi->buf_sz_y_bs;
- pic->c_bs_sz = inst->vsi->buf_sz_c_bs;
- pic->y_len_sz = inst->vsi->buf_len_sz_y;
- pic->c_len_sz = inst->vsi->buf_len_sz_c;
+ pic->fb_sz[0] = inst->vsi->buf_sz_y_bs + inst->vsi->buf_len_sz_y;
+ pic->fb_sz[1] = inst->vsi->buf_sz_c_bs + inst->vsi->buf_len_sz_c;
pic->pic_w = inst->vsi->pic_w;
pic->pic_h = inst->vsi->pic_h;
@@ -714,8 +712,9 @@ static void get_pic_info(struct vdec_vp9_inst *inst, struct vdec_pic_info *pic)
mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
- mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
- pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+ mtk_vcodec_debug(inst, "fb size: Y(%d), C(%d)",
+ pic->fb_sz[0],
+ pic->fb_sz[1]);
}
static void get_disp_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
@@ -895,7 +894,7 @@ static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
if (vsi->resolution_changed) {
if (!vp9_alloc_work_buf(inst)) {
- ret = -EINVAL;
+ ret = -EIO;
goto DECODE_ERROR;
}
}
@@ -924,14 +923,12 @@ static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
if (vsi->show_existing_frame && (vsi->frm_to_show_idx <
VP9_MAX_FRM_BUF_NUM)) {
- mtk_vcodec_err(inst,
+ mtk_vcodec_debug(inst,
"Skip Decode drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
vsi->new_fb_idx, vsi->frm_to_show_idx);
vp9_ref_cnt_fb(inst, &vsi->new_fb_idx,
vsi->frm_to_show_idx);
- ret = -EINVAL;
- goto DECODE_ERROR;
}
/* VPU assign the buffer pointer in its address space,
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index b6602490a247..46c45f93c977 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -614,7 +614,7 @@ static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
struct vpu_run *run = (struct vpu_run *)data;
vpu->run.signaled = run->signaled;
- strncpy(vpu->run.fw_ver, run->fw_ver, VPU_FW_VER_LEN);
+ strscpy(vpu->run.fw_ver, run->fw_ver, sizeof(vpu->run.fw_ver));
vpu->run.dec_capability = run->dec_capability;
vpu->run.enc_capability = run->enc_capability;
wake_up_interruptible(&vpu->run.wq);
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index f60f499c596b..e100b30bb6f5 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -385,8 +385,8 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
- strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+ strscpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver));
+ strscpy(cap->card, MEM2MEM_NAME, sizeof(cap->card));
cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index 4b5e55d41ad4..30ce2ba120a1 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -14,5 +14,5 @@ config VIDEO_OMAP2_VOUT
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select FRAME_VECTOR
default n
- ---help---
+ help
V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 4fe228752a43..a632f06d9fff 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2350,7 +2350,7 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- remote = of_graph_get_remote_port(np);
+ remote = of_graph_get_remote_port_parent(np);
if (remote)
asd->match.fwnode = of_fwnode_handle(remote);
else
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index 15804ad7e65d..34ea503a9842 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -569,7 +569,7 @@ struct hfi_capability {
struct hfi_capabilities {
u32 num_capabilities;
- struct hfi_capability data[1];
+ struct hfi_capability *data;
};
#define HFI_DEBUG_MSG_LOW 0x01
@@ -726,7 +726,7 @@ struct hfi_profile_level {
struct hfi_profile_level_supported {
u32 profile_count;
- struct hfi_profile_level profile_level[1];
+ struct hfi_profile_level *profile_level;
};
struct hfi_quality_vs_speed {
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
index e3eb8fee2536..240ac3f3c941 100644
--- a/drivers/media/platform/rcar-vin/Kconfig
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -3,6 +3,7 @@ config VIDEO_RCAR_CSI2
tristate "R-Car MIPI CSI-2 Receiver"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF
depends on ARCH_RENESAS || COMPILE_TEST
+ select RESET_CONTROLLER
select V4L2_FWNODE
help
Support for Renesas R-Car MIPI CSI-2 receiver.
@@ -17,7 +18,7 @@ config VIDEO_RCAR_VIN
depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
- ---help---
+ help
Support for Renesas R-Car Video Input (VIN) driver.
Supports R-Car Gen2 and Gen3 SoCs.
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 594d80434004..64f9cf790445 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -546,7 +546,9 @@ static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name);
+ mutex_lock(&vin->lock);
rvin_parallel_subdevice_detach(vin);
+ mutex_unlock(&vin->lock);
}
static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
@@ -556,7 +558,9 @@ static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
int ret;
+ mutex_lock(&vin->lock);
ret = rvin_parallel_subdevice_attach(vin, subdev);
+ mutex_unlock(&vin->lock);
if (ret)
return ret;
@@ -664,6 +668,7 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
}
/* Create all media device links between VINs and CSI-2's. */
+ mutex_lock(&vin->group->lock);
for (route = vin->info->routes; route->mask; route++) {
struct media_pad *source_pad, *sink_pad;
struct media_entity *source, *sink;
@@ -699,6 +704,7 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
break;
}
}
+ mutex_unlock(&vin->group->lock);
return ret;
}
@@ -714,6 +720,8 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
if (vin->group->vin[i])
rvin_v4l2_unregister(vin->group->vin[i]);
+ mutex_lock(&vin->group->lock);
+
for (i = 0; i < RVIN_CSI_MAX; i++) {
if (vin->group->csi[i].fwnode != asd->match.fwnode)
continue;
@@ -721,6 +729,8 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
vin_dbg(vin, "Unbind CSI-2 %s from slot %u\n", subdev->name, i);
break;
}
+
+ mutex_unlock(&vin->group->lock);
}
static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
@@ -730,6 +740,8 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
unsigned int i;
+ mutex_lock(&vin->group->lock);
+
for (i = 0; i < RVIN_CSI_MAX; i++) {
if (vin->group->csi[i].fwnode != asd->match.fwnode)
continue;
@@ -738,6 +750,8 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
break;
}
+ mutex_unlock(&vin->group->lock);
+
return 0;
}
@@ -752,6 +766,7 @@ static int rvin_mc_parse_of_endpoint(struct device *dev,
struct v4l2_async_subdev *asd)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
+ int ret = 0;
if (vep->base.port != 1 || vep->base.id >= RVIN_CSI_MAX)
return -EINVAL;
@@ -762,38 +777,48 @@ static int rvin_mc_parse_of_endpoint(struct device *dev,
return -ENOTCONN;
}
+ mutex_lock(&vin->group->lock);
+
if (vin->group->csi[vep->base.id].fwnode) {
vin_dbg(vin, "OF device %pOF already handled\n",
to_of_node(asd->match.fwnode));
- return -ENOTCONN;
+ ret = -ENOTCONN;
+ goto out;
}
vin->group->csi[vep->base.id].fwnode = asd->match.fwnode;
vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
to_of_node(asd->match.fwnode), vep->base.id);
+out:
+ mutex_unlock(&vin->group->lock);
- return 0;
+ return ret;
}
static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
{
- unsigned int count = 0;
+ unsigned int count = 0, vin_mask = 0;
unsigned int i;
int ret;
mutex_lock(&vin->group->lock);
/* If not all VIN's are registered don't register the notifier. */
- for (i = 0; i < RCAR_VIN_NUM; i++)
- if (vin->group->vin[i])
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i]) {
count++;
+ vin_mask |= BIT(i);
+ }
+ }
if (vin->group->count != count) {
mutex_unlock(&vin->group->lock);
return 0;
}
+ mutex_unlock(&vin->group->lock);
+
v4l2_async_notifier_init(&vin->group->notifier);
/*
@@ -802,21 +827,17 @@ static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
* will only be registered once with the group notifier.
*/
for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (!vin->group->vin[i])
+ if (!(vin_mask & BIT(i)))
continue;
ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(
vin->group->vin[i]->dev, &vin->group->notifier,
sizeof(struct v4l2_async_subdev), 1,
rvin_mc_parse_of_endpoint);
- if (ret) {
- mutex_unlock(&vin->group->lock);
+ if (ret)
return ret;
- }
}
- mutex_unlock(&vin->group->lock);
-
if (list_empty(&vin->group->notifier.asd_list))
return 0;
@@ -1136,6 +1157,10 @@ static const struct rvin_info rcar_info_r8a77995 = {
static const struct of_device_id rvin_of_id_table[] = {
{
+ .compatible = "renesas,vin-r8a774a1",
+ .data = &rcar_info_r8a7796,
+ },
+ {
.compatible = "renesas,vin-r8a774c0",
.data = &rcar_info_r8a77990,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index f64528d2be3c..799e526fd3df 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -14,6 +14,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/sys_soc.h>
#include <media/v4l2-ctrls.h>
@@ -350,6 +351,7 @@ struct rcar_csi2 {
struct device *dev;
void __iomem *base;
const struct rcar_csi2_info *info;
+ struct reset_control *rstc;
struct v4l2_subdev subdev;
struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
@@ -387,11 +389,19 @@ static void rcsi2_write(struct rcar_csi2 *priv, unsigned int reg, u32 data)
iowrite32(data, priv->base + reg);
}
-static void rcsi2_reset(struct rcar_csi2 *priv)
+static void rcsi2_enter_standby(struct rcar_csi2 *priv)
{
- rcsi2_write(priv, SRST_REG, SRST_SRST);
+ rcsi2_write(priv, PHYCNT_REG, 0);
+ rcsi2_write(priv, PHTC_REG, PHTC_TESTCLR);
+ reset_control_assert(priv->rstc);
usleep_range(100, 150);
- rcsi2_write(priv, SRST_REG, 0);
+ pm_runtime_put(priv->dev);
+}
+
+static void rcsi2_exit_standby(struct rcar_csi2 *priv)
+{
+ pm_runtime_get_sync(priv->dev);
+ reset_control_deassert(priv->rstc);
}
static int rcsi2_wait_phy_start(struct rcar_csi2 *priv)
@@ -462,7 +472,7 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp)
return mbps;
}
-static int rcsi2_start(struct rcar_csi2 *priv)
+static int rcsi2_start_receiver(struct rcar_csi2 *priv)
{
const struct rcar_csi2_format *format;
u32 phycnt, vcdt = 0, vcdt2 = 0;
@@ -506,12 +516,9 @@ static int rcsi2_start(struct rcar_csi2 *priv)
/* Init */
rcsi2_write(priv, TREF_REG, TREF_TREF);
- rcsi2_reset(priv);
rcsi2_write(priv, PHTC_REG, 0);
/* Configure */
- rcsi2_write(priv, FLD_REG, FLD_FLD_NUM(2) | FLD_FLD_EN4 |
- FLD_FLD_EN3 | FLD_FLD_EN2 | FLD_FLD_EN);
rcsi2_write(priv, VCDT_REG, vcdt);
if (vcdt2)
rcsi2_write(priv, VCDT2_REG, vcdt2);
@@ -542,6 +549,8 @@ static int rcsi2_start(struct rcar_csi2 *priv)
rcsi2_write(priv, PHYCNT_REG, phycnt);
rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN |
LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP);
+ rcsi2_write(priv, FLD_REG, FLD_FLD_NUM(2) | FLD_FLD_EN4 |
+ FLD_FLD_EN3 | FLD_FLD_EN2 | FLD_FLD_EN);
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ);
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ | PHYCNT_RSTZ);
@@ -564,19 +573,36 @@ static int rcsi2_start(struct rcar_csi2 *priv)
return 0;
}
-static void rcsi2_stop(struct rcar_csi2 *priv)
+static int rcsi2_start(struct rcar_csi2 *priv)
{
- rcsi2_write(priv, PHYCNT_REG, 0);
+ int ret;
- rcsi2_reset(priv);
+ rcsi2_exit_standby(priv);
- rcsi2_write(priv, PHTC_REG, PHTC_TESTCLR);
+ ret = rcsi2_start_receiver(priv);
+ if (ret) {
+ rcsi2_enter_standby(priv);
+ return ret;
+ }
+
+ ret = v4l2_subdev_call(priv->remote, video, s_stream, 1);
+ if (ret) {
+ rcsi2_enter_standby(priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rcsi2_stop(struct rcar_csi2 *priv)
+{
+ rcsi2_enter_standby(priv);
+ v4l2_subdev_call(priv->remote, video, s_stream, 0);
}
static int rcsi2_s_stream(struct v4l2_subdev *sd, int enable)
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
- struct v4l2_subdev *nextsd;
int ret = 0;
mutex_lock(&priv->lock);
@@ -586,27 +612,12 @@ static int rcsi2_s_stream(struct v4l2_subdev *sd, int enable)
goto out;
}
- nextsd = priv->remote;
-
if (enable && priv->stream_count == 0) {
- pm_runtime_get_sync(priv->dev);
-
ret = rcsi2_start(priv);
- if (ret) {
- pm_runtime_put(priv->dev);
- goto out;
- }
-
- ret = v4l2_subdev_call(nextsd, video, s_stream, 1);
- if (ret) {
- rcsi2_stop(priv);
- pm_runtime_put(priv->dev);
+ if (ret)
goto out;
- }
} else if (!enable && priv->stream_count == 1) {
rcsi2_stop(priv);
- v4l2_subdev_call(nextsd, video, s_stream, 0);
- pm_runtime_put(priv->dev);
}
priv->stream_count += enable ? 1 : -1;
@@ -854,7 +865,8 @@ static int rcsi2_phtw_write_mbps(struct rcar_csi2 *priv, unsigned int mbps,
return rcsi2_phtw_write(priv, value->reg, code);
}
-static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
+static int __rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv,
+ unsigned int mbps)
{
static const struct phtw_value step1[] = {
{ .data = 0xcc, .code = 0xe2 },
@@ -880,7 +892,7 @@ static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
if (ret)
return ret;
- if (mbps <= 250) {
+ if (mbps != 0 && mbps <= 250) {
ret = rcsi2_phtw_write(priv, 0x39, 0x05);
if (ret)
return ret;
@@ -894,6 +906,16 @@ static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
return rcsi2_phtw_write_array(priv, step2);
}
+static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ return __rcsi2_init_phtw_h3_v3h_m3n(priv, mbps);
+}
+
+static int rcsi2_init_phtw_h3es2(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ return __rcsi2_init_phtw_h3_v3h_m3n(priv, 0);
+}
+
static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
{
return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
@@ -902,11 +924,11 @@ static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
static int rcsi2_confirm_start_v3m_e3(struct rcar_csi2 *priv)
{
static const struct phtw_value step1[] = {
- { .data = 0xed, .code = 0x34 },
- { .data = 0xed, .code = 0x44 },
- { .data = 0xed, .code = 0x54 },
- { .data = 0xed, .code = 0x84 },
- { .data = 0xed, .code = 0x94 },
+ { .data = 0xee, .code = 0x34 },
+ { .data = 0xee, .code = 0x44 },
+ { .data = 0xee, .code = 0x54 },
+ { .data = 0xee, .code = 0x84 },
+ { .data = 0xee, .code = 0x94 },
{ /* sentinel */ },
};
@@ -936,6 +958,10 @@ static int rcsi2_probe_resources(struct rcar_csi2 *priv,
if (irq < 0)
return irq;
+ priv->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return PTR_ERR(priv->rstc);
+
return 0;
}
@@ -952,6 +978,14 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a7795es1 = {
.num_channels = 4,
};
+static const struct rcar_csi2_info rcar_csi2_info_r8a7795es2 = {
+ .init_phtw = rcsi2_init_phtw_h3es2,
+ .hsfreqrange = hsfreqrange_h3_v3h_m3n,
+ .csi0clkfreqrange = 0x20,
+ .num_channels = 4,
+ .clear_ulps = true,
+};
+
static const struct rcar_csi2_info rcar_csi2_info_r8a7796 = {
.hsfreqrange = hsfreqrange_m3w_h3es1,
.num_channels = 4,
@@ -986,6 +1020,10 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = {
static const struct of_device_id rcar_csi2_of_table[] = {
{
+ .compatible = "renesas,r8a774a1-csi2",
+ .data = &rcar_csi2_info_r8a7796,
+ },
+ {
.compatible = "renesas,r8a774c0-csi2",
.data = &rcar_csi2_info_r8a77990,
},
@@ -1017,11 +1055,15 @@ static const struct of_device_id rcar_csi2_of_table[] = {
};
MODULE_DEVICE_TABLE(of, rcar_csi2_of_table);
-static const struct soc_device_attribute r8a7795es1[] = {
+static const struct soc_device_attribute r8a7795[] = {
{
.soc_id = "r8a7795", .revision = "ES1.*",
.data = &rcar_csi2_info_r8a7795es1,
},
+ {
+ .soc_id = "r8a7795", .revision = "ES2.*",
+ .data = &rcar_csi2_info_r8a7795es2,
+ },
{ /* sentinel */ },
};
@@ -1039,10 +1081,10 @@ static int rcsi2_probe(struct platform_device *pdev)
priv->info = of_device_get_match_data(&pdev->dev);
/*
- * r8a7795 ES1.x behaves differently than the ES2.0+ but doesn't
- * have it's own compatible string.
+ * The different ES versions of r8a7795 (H3) behave differently but
+ * share the same compatible string.
*/
- attr = soc_device_match(r8a7795es1);
+ attr = soc_device_match(r8a7795);
if (attr)
priv->info = attr->data;
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 2207a31d355e..91ab064404a1 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -486,7 +486,7 @@ static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs)
}
/* Use previous value if its XS value is closer */
- if (p_prev_set && p_set &&
+ if (p_prev_set &&
xs - p_prev_set->xs_value < p_set->xs_value - xs)
p_set = p_prev_set;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index c417ff8f6fe5..608e5217ccd5 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -1405,11 +1405,9 @@ static int rcar_drif_probe(struct platform_device *pdev)
/* Register map */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ch->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(ch->base)) {
- ret = PTR_ERR(ch->base);
- dev_err(&pdev->dev, "ioremap failed (%d)\n", ret);
- return ret;
- }
+ if (IS_ERR(ch->base))
+ return PTR_ERR(ch->base);
+
ch->start = res->start;
platform_set_drvdata(pdev, ch);
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 6bda1eee9170..6a90bc4c476e 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -945,7 +945,7 @@ static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
u32 rndctl;
pstride = q_data->format.plane_fmt[0].bytesperline
- << FD1_WPF_PSTRIDE_Y_SHIFT;
+ << FD1_WPF_PSTRIDE_Y_SHIFT;
if (q_data->format.num_planes > 1)
pstride |= q_data->format.plane_fmt[1].bytesperline
@@ -1139,8 +1139,8 @@ static int fdp1_m2m_job_ready(void *priv)
int dstbufs = 1;
dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
- v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
- v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
+ v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
/* One output buffer is required for each field */
if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
@@ -1278,7 +1278,7 @@ static void fdp1_m2m_device_run(void *priv)
fdp1_queue_field(ctx, fbuf);
dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
- i, fbuf->last_field);
+ i, fbuf->last_field);
}
/* Queue as many jobs as our data provides for */
@@ -1337,7 +1337,7 @@ static void device_frame_end(struct fdp1_dev *fdp1,
fdp1_job_free(fdp1, job);
dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
- ctx->num_processed, ctx->translen);
+ ctx->num_processed, ctx->translen);
if (ctx->num_processed == ctx->translen ||
ctx->aborting) {
@@ -1362,7 +1362,7 @@ static int fdp1_vidioc_querycap(struct file *file, void *priv,
strscpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
strscpy(cap->card, DRIVER_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", DRIVER_NAME);
+ "platform:%s", DRIVER_NAME);
return 0;
}
@@ -1993,13 +1993,13 @@ static void fdp1_stop_streaming(struct vb2_queue *q)
/* Free smsk_data */
if (ctx->smsk_cpu) {
dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
- ctx->smsk_cpu, ctx->smsk_addr[0]);
+ ctx->smsk_cpu, ctx->smsk_addr[0]);
ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
ctx->smsk_cpu = NULL;
}
WARN(!list_empty(&ctx->fields_queue),
- "Buffer queue not empty");
+ "Buffer queue not empty");
} else {
/* Empty Capture queues (Jobs) */
struct fdp1_job *job;
@@ -2021,10 +2021,10 @@ static void fdp1_stop_streaming(struct vb2_queue *q)
fdp1_field_complete(ctx, ctx->previous);
WARN(!list_empty(&ctx->fdp1->queued_job_list),
- "Queued Job List not empty");
+ "Queued Job List not empty");
WARN(!list_empty(&ctx->fdp1->hw_job_list),
- "HW Job list not empty");
+ "HW Job list not empty");
}
}
@@ -2110,7 +2110,7 @@ static int fdp1_open(struct file *file)
fdp1_ctrl_deint_menu);
ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
- V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
@@ -2347,8 +2347,8 @@ static int fdp1_probe(struct platform_device *pdev)
goto release_m2m;
}
- v4l2_info(&fdp1->v4l2_dev,
- "Device registered as /dev/video%d\n", vfd->num);
+ v4l2_info(&fdp1->v4l2_dev, "Device registered as /dev/video%d\n",
+ vfd->num);
/* Power up the cells to read HW */
pm_runtime_enable(&pdev->dev);
@@ -2367,7 +2367,7 @@ static int fdp1_probe(struct platform_device *pdev)
break;
default:
dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
- hw_version);
+ hw_version);
}
/* Allow the hw to sleep until an open call puts it to use */
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 8837e2678bde..7f62f5ef0086 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -178,22 +178,16 @@ static const struct cec_adap_ops s5p_cec_adap_ops = {
static int s5p_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np;
- struct platform_device *hdmi_dev;
+ struct device *hdmi_dev;
struct resource *res;
struct s5p_cec_dev *cec;
bool needs_hpd = of_property_read_bool(pdev->dev.of_node, "needs-hpd");
int ret;
- np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+ hdmi_dev = cec_notifier_parse_hdmi_phandle(dev);
- if (!np) {
- dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
- return -ENODEV;
- }
- hdmi_dev = of_find_device_by_node(np);
- if (hdmi_dev == NULL)
- return -EPROBE_DEFER;
+ if (IS_ERR(hdmi_dev))
+ return PTR_ERR(hdmi_dev);
cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
@@ -224,7 +218,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
if (IS_ERR(cec->reg))
return PTR_ERR(cec->reg);
- cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ cec->notifier = cec_notifier_get(hdmi_dev);
if (cec->notifier == NULL)
return -ENOMEM;
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 971c47165010..c8f394e1aff4 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -297,8 +297,8 @@ static int g2d_release(struct file *file)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- strncpy(cap->driver, G2D_NAME, sizeof(cap->driver) - 1);
- strncpy(cap->card, G2D_NAME, sizeof(cap->card) - 1);
+ strscpy(cap->driver, G2D_NAME, sizeof(cap->driver));
+ strscpy(cap->card, G2D_NAME, sizeof(cap->card));
cap->bus_info[0] = 0;
cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
@@ -312,7 +312,7 @@ static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
return -EINVAL;
fmt = &formats[f->index];
f->pixelformat = fmt->fourcc;
- strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ strscpy(f->description, fmt->name, sizeof(f->description));
return 0;
}
diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c
index a425a10540c1..e5080d6f5b2d 100644
--- a/drivers/media/platform/seco-cec/seco-cec.c
+++ b/drivers/media/platform/seco-cec/seco-cec.c
@@ -536,6 +536,7 @@ static int secocec_cec_get_notifier(struct cec_notifier **notify)
return -EPROBE_DEFER;
*notify = cec_notifier_get_conn(d, m->conn);
+ put_device(d);
return 0;
}
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index d277cc674349..5a9ba05c996e 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -493,9 +493,6 @@ static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
const struct sh_veu_format *fmt;
fmt = sh_veu_find_fmt(f);
- if (!fmt)
- /* wrong buffer type */
- return -EINVAL;
return sh_veu_try_fmt(f, fmt);
}
@@ -506,9 +503,6 @@ static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
const struct sh_veu_format *fmt;
fmt = sh_veu_find_fmt(f);
- if (!fmt)
- /* wrong buffer type */
- return -EINVAL;
return sh_veu_try_fmt(f, fmt);
}
diff --git a/drivers/media/platform/sti/c8sectpfe/Kconfig b/drivers/media/platform/sti/c8sectpfe/Kconfig
index 7420a50572d3..93eaabfd5437 100644
--- a/drivers/media/platform/sti/c8sectpfe/Kconfig
+++ b/drivers/media/platform/sti/c8sectpfe/Kconfig
@@ -12,7 +12,7 @@ config DVB_C8SECTPFE
select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This adds support for DVB front-end cards connected
to TS inputs of STiH407/410 SoC.
diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/platform/sti/cec/stih-cec.c
index d34099f75990..fc37efe1d554 100644
--- a/drivers/media/platform/sti/cec/stih-cec.c
+++ b/drivers/media/platform/sti/cec/stih-cec.c
@@ -301,26 +301,19 @@ static int stih_cec_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct stih_cec *cec;
- struct device_node *np;
- struct platform_device *hdmi_dev;
+ struct device *hdmi_dev;
int ret;
+ hdmi_dev = cec_notifier_parse_hdmi_phandle(dev);
+
+ if (IS_ERR(hdmi_dev))
+ return PTR_ERR(hdmi_dev);
+
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
- np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
-
- if (!np) {
- dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
- return -ENODEV;
- }
-
- hdmi_dev = of_find_device_by_node(np);
- if (!hdmi_dev)
- return -EPROBE_DEFER;
-
- cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ cec->notifier = cec_notifier_get(hdmi_dev);
if (!cec->notifier)
return -ENOMEM;
diff --git a/drivers/media/platform/sti/delta/delta-ipc.c b/drivers/media/platform/sti/delta/delta-ipc.c
index a4603d573c34..186d88f02ecd 100644
--- a/drivers/media/platform/sti/delta/delta-ipc.c
+++ b/drivers/media/platform/sti/delta/delta-ipc.c
@@ -220,10 +220,8 @@ int delta_ipc_open(struct delta_ctx *pctx, const char *name,
err:
pctx->sys_errors++;
- if (ctx->ipc_buf) {
- hw_free(pctx, ctx->ipc_buf);
- ctx->ipc_buf = NULL;
- }
+ hw_free(pctx, ctx->ipc_buf);
+ ctx->ipc_buf = NULL;
return ret;
};
diff --git a/drivers/media/platform/stm32/stm32-cec.c b/drivers/media/platform/stm32/stm32-cec.c
index 7c496bc1cf38..8a86b2cc22fa 100644
--- a/drivers/media/platform/stm32/stm32-cec.c
+++ b/drivers/media/platform/stm32/stm32-cec.c
@@ -56,6 +56,13 @@
#define ALL_TX_IT (TXEND | TXBR | TXACKE | TXERR | TXUDR | ARBLST)
#define ALL_RX_IT (RXEND | RXBR | RXACKE | RXOVR)
+/*
+ * 400 ms is the time it takes for one 16 byte message to be
+ * transferred and 5 is the maximum number of retries. Add
+ * another 100 ms as a margin.
+ */
+#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
+
struct stm32_cec {
struct cec_adapter *adap;
struct device *dev;
@@ -188,7 +195,11 @@ static int stm32_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
{
struct stm32_cec *cec = adap->priv;
u32 oar = (1 << logical_addr) << 16;
+ u32 val;
+ /* Poll every 100µs the register CEC_CR to wait end of transmission */
+ regmap_read_poll_timeout(cec->regmap, CEC_CR, val, !(val & TXSOM),
+ 100, CEC_XFER_TIMEOUT_MS * 1000);
regmap_update_bits(cec->regmap, CEC_CR, CECEN, 0);
if (logical_addr == CEC_LOG_ADDR_INVALID)
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 5fe5b38fa901..b9dad0accd1b 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -97,6 +97,8 @@ enum state {
#define TIMEOUT_MS 1000
+#define OVERRUN_ERROR_THRESHOLD 3
+
struct dcmi_graph_entity {
struct device_node *node;
@@ -164,6 +166,9 @@ struct stm32_dcmi {
int errors_count;
int overrun_count;
int buffers_count;
+
+ /* Ensure DMA operations atomicity */
+ struct mutex dma_lock;
};
static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
@@ -314,6 +319,13 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
return ret;
}
+ /*
+ * Avoid call of dmaengine_terminate_all() between
+ * dmaengine_prep_slave_single() and dmaengine_submit()
+ * by locking the whole DMA submission sequence
+ */
+ mutex_lock(&dcmi->dma_lock);
+
/* Prepare a DMA transaction */
desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
buf->size,
@@ -322,6 +334,7 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
if (!desc) {
dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n",
__func__, &buf->paddr, buf->size);
+ mutex_unlock(&dcmi->dma_lock);
return -EINVAL;
}
@@ -333,9 +346,12 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
dcmi->dma_cookie = dmaengine_submit(desc);
if (dma_submit_error(dcmi->dma_cookie)) {
dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
+ mutex_unlock(&dcmi->dma_lock);
return -ENXIO;
}
+ mutex_unlock(&dcmi->dma_lock);
+
dma_async_issue_pending(dcmi->dma_chan);
return 0;
@@ -432,11 +448,13 @@ static irqreturn_t dcmi_irq_thread(int irq, void *arg)
spin_lock_irq(&dcmi->irqlock);
- if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) {
- dcmi->errors_count++;
- if (dcmi->misr & IT_OVR)
- dcmi->overrun_count++;
+ if (dcmi->misr & IT_OVR) {
+ dcmi->overrun_count++;
+ if (dcmi->overrun_count > OVERRUN_ERROR_THRESHOLD)
+ dcmi->errors_count++;
}
+ if (dcmi->misr & IT_ERR)
+ dcmi->errors_count++;
if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG &&
dcmi->misr & IT_FRAME) {
@@ -570,9 +588,9 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
int ret;
ret = pm_runtime_get_sync(dcmi->dev);
- if (ret) {
- dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync\n",
- __func__);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
+ __func__, ret);
goto err_release_buffers;
}
@@ -720,7 +738,9 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
spin_unlock_irq(&dcmi->irqlock);
/* Stop all pending DMA operations */
+ mutex_lock(&dcmi->dma_lock);
dmaengine_terminate_all(dcmi->dma_chan);
+ mutex_unlock(&dcmi->dma_lock);
pm_runtime_put(dcmi->dev);
@@ -811,6 +831,9 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
if (!sd_fmt) {
+ if (!dcmi->num_of_sd_formats)
+ return -ENODATA;
+
sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
pix->pixelformat = sd_fmt->fourcc;
}
@@ -989,6 +1012,9 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
if (!sd_fmt) {
+ if (!dcmi->num_of_sd_formats)
+ return -ENODATA;
+
sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
pix->pixelformat = sd_fmt->fourcc;
}
@@ -1595,7 +1621,7 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
/* Parse the graph to extract a list of subdevice DT nodes. */
ret = dcmi_graph_parse(dcmi, dcmi->dev->of_node);
if (ret < 0) {
- dev_err(dcmi->dev, "Graph parsing failed\n");
+ dev_err(dcmi->dev, "Failed to parse graph\n");
return ret;
}
@@ -1604,6 +1630,7 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
ret = v4l2_async_notifier_add_subdev(&dcmi->notifier,
&dcmi->entity.asd);
if (ret) {
+ dev_err(dcmi->dev, "Failed to add subdev notifier\n");
of_node_put(dcmi->entity.node);
return ret;
}
@@ -1612,7 +1639,7 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
if (ret < 0) {
- dev_err(dcmi->dev, "Notifier registration failed\n");
+ dev_err(dcmi->dev, "Failed to register notifier\n");
v4l2_async_notifier_cleanup(&dcmi->notifier);
return ret;
}
@@ -1645,7 +1672,7 @@ static int dcmi_probe(struct platform_device *pdev)
dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(dcmi->rstc)) {
dev_err(&pdev->dev, "Could not get reset control\n");
- return -ENODEV;
+ return PTR_ERR(dcmi->rstc);
}
/* Get bus characteristics from devicetree */
@@ -1660,7 +1687,7 @@ static int dcmi_probe(struct platform_device *pdev)
of_node_put(np);
if (ret) {
dev_err(&pdev->dev, "Could not parse the endpoint\n");
- return -ENODEV;
+ return ret;
}
if (ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
@@ -1673,8 +1700,9 @@ static int dcmi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(&pdev->dev, "Could not get irq\n");
- return -ENODEV;
+ if (irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get irq\n");
+ return irq;
}
dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1694,12 +1722,13 @@ static int dcmi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), dcmi);
if (ret) {
dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
- return -ENODEV;
+ return ret;
}
mclk = devm_clk_get(&pdev->dev, "mclk");
if (IS_ERR(mclk)) {
- dev_err(&pdev->dev, "Unable to get mclk\n");
+ if (PTR_ERR(mclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get mclk\n");
return PTR_ERR(mclk);
}
@@ -1711,6 +1740,7 @@ static int dcmi_probe(struct platform_device *pdev)
spin_lock_init(&dcmi->irqlock);
mutex_init(&dcmi->lock);
+ mutex_init(&dcmi->dma_lock);
init_completion(&dcmi->complete);
INIT_LIST_HEAD(&dcmi->buffers);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 1fd16861f111..f0dfe68486d1 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -412,7 +412,7 @@ static int vidioc_enum_input(struct file *file, void *fh,
if (inp->index != 0)
return -EINVAL;
- strlcpy(inp->name, "camera", sizeof(inp->name));
+ strscpy(inp->name, "camera", sizeof(inp->name));
inp->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
@@ -644,7 +644,7 @@ int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi,
}
/* Register video device */
- strlcpy(vdev->name, name, sizeof(vdev->name));
+ strscpy(vdev->name, name, sizeof(vdev->name));
vdev->release = video_device_release_empty;
vdev->fops = &sun6i_video_fops;
vdev->ioctl_ops = &sun6i_video_ioctl_ops;
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index aba488cd0e64..7fb3a4fa07c1 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -327,21 +327,15 @@ static const struct cec_adap_ops tegra_cec_ops = {
static int tegra_cec_probe(struct platform_device *pdev)
{
- struct platform_device *hdmi_dev;
- struct device_node *np;
+ struct device *hdmi_dev;
struct tegra_cec *cec;
struct resource *res;
int ret = 0;
- np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+ hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
- if (!np) {
- dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
+ if (!hdmi_dev)
return -ENODEV;
- }
- hdmi_dev = of_find_device_by_node(np);
- if (hdmi_dev == NULL)
- return -EPROBE_DEFER;
cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
@@ -400,7 +394,7 @@ static int tegra_cec_probe(struct platform_device *pdev)
goto clk_error;
}
- cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ cec->notifier = cec_notifier_get(hdmi_dev);
if (!cec->notifier) {
ret = -ENOMEM;
goto clk_error;
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index fc3c212b96e1..8d075683e448 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -1643,8 +1643,7 @@ of_get_next_endpoint(const struct device_node *parent,
static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
{
struct platform_device *pdev = ctx->dev->pdev;
- struct device_node *ep_node, *port, *remote_ep,
- *sensor_node, *parent;
+ struct device_node *ep_node, *port, *sensor_node, *parent;
struct v4l2_fwnode_endpoint *endpoint;
struct v4l2_async_subdev *asd;
u32 regval = 0;
@@ -1657,7 +1656,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
ep_node = NULL;
port = NULL;
- remote_ep = NULL;
sensor_node = NULL;
ret = -EINVAL;
@@ -1703,12 +1701,7 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
asd->match.fwnode = of_fwnode_handle(sensor_node);
- remote_ep = of_graph_get_remote_endpoint(ep_node);
- if (!remote_ep) {
- ctx_dbg(3, ctx, "can't get remote-endpoint\n");
- goto cleanup_exit;
- }
- v4l2_fwnode_endpoint_parse(of_fwnode_handle(remote_ep), endpoint);
+ v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint);
if (endpoint->bus_type != V4L2_MBUS_CSI2_DPHY) {
ctx_err(ctx, "Port:%d sub-device %pOFn is not a CSI2 device\n",
@@ -1759,7 +1752,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
sensor_node = NULL;
cleanup_exit:
- of_node_put(remote_ep);
of_node_put(sensor_node);
of_node_put(ep_node);
of_node_put(port);
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 207e7e76c048..1e40eafec284 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -1491,8 +1491,8 @@ handled:
static int vpe_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
- strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
+ strscpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
VPE_MODULE_NAME);
cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
@@ -1519,7 +1519,7 @@ static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
if (!fmt)
return -EINVAL;
- strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
return 0;
}
diff --git a/drivers/media/platform/vicodec/codec-fwht.c b/drivers/media/platform/vicodec/codec-fwht.c
index d1d6085da9f1..31faf319e508 100644
--- a/drivers/media/platform/vicodec/codec-fwht.c
+++ b/drivers/media/platform/vicodec/codec-fwht.c
@@ -46,8 +46,12 @@ static const uint8_t zigzag[64] = {
63,
};
-
-static int rlc(const s16 *in, __be16 *output, int blocktype)
+/*
+ * noinline_for_stack to work around
+ * https://bugs.llvm.org/show_bug.cgi?id=38809
+ */
+static int noinline_for_stack
+rlc(const s16 *in, __be16 *output, int blocktype)
{
s16 block[8 * 8];
s16 *wp = block;
@@ -106,8 +110,8 @@ static int rlc(const s16 *in, __be16 *output, int blocktype)
* This function will worst-case increase rlc_in by 65*2 bytes:
* one s16 value for the header and 8 * 8 coefficients of type s16.
*/
-static u16 derlc(const __be16 **rlc_in, s16 *dwht_out,
- const __be16 *end_of_input)
+static noinline_for_stack u16
+derlc(const __be16 **rlc_in, s16 *dwht_out, const __be16 *end_of_input)
{
/* header */
const __be16 *input = *rlc_in;
@@ -240,8 +244,9 @@ static void dequantize_inter(s16 *coeff)
*coeff <<= *quant;
}
-static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
- unsigned int input_step, bool intra)
+static void noinline_for_stack fwht(const u8 *block, s16 *output_block,
+ unsigned int stride,
+ unsigned int input_step, bool intra)
{
/* we'll need more than 8 bits for the transformed coefficients */
s32 workspace1[8], workspace2[8];
@@ -373,7 +378,8 @@ static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
* Furthermore values can be negative... This is just a version that
* works with 16 signed data
*/
-static void fwht16(const s16 *block, s16 *output_block, int stride, int intra)
+static void noinline_for_stack
+fwht16(const s16 *block, s16 *output_block, int stride, int intra)
{
/* we'll need more than 8 bits for the transformed coefficients */
s32 workspace1[8], workspace2[8];
@@ -456,7 +462,8 @@ static void fwht16(const s16 *block, s16 *output_block, int stride, int intra)
}
}
-static void ifwht(const s16 *block, s16 *output_block, int intra)
+static noinline_for_stack void
+ifwht(const s16 *block, s16 *output_block, int intra)
{
/*
* we'll need more than 8 bits for the transformed coefficients
@@ -604,9 +611,9 @@ static int var_inter(const s16 *old, const s16 *new)
return ret;
}
-static int decide_blocktype(const u8 *cur, const u8 *reference,
- s16 *deltablock, unsigned int stride,
- unsigned int input_step)
+static noinline_for_stack int
+decide_blocktype(const u8 *cur, const u8 *reference, s16 *deltablock,
+ unsigned int stride, unsigned int input_step)
{
s16 tmp[64];
s16 old[64];
@@ -632,12 +639,13 @@ static int decide_blocktype(const u8 *cur, const u8 *reference,
return vari <= vard ? IBLOCK : PBLOCK;
}
-static void fill_decoder_block(u8 *dst, const s16 *input, int stride)
+static void fill_decoder_block(u8 *dst, const s16 *input, int stride,
+ unsigned int dst_step)
{
int i, j;
for (i = 0; i < 8; i++) {
- for (j = 0; j < 8; j++, input++, dst++) {
+ for (j = 0; j < 8; j++, input++, dst += dst_step) {
if (*input < 0)
*dst = 0;
else if (*input > 255)
@@ -645,17 +653,19 @@ static void fill_decoder_block(u8 *dst, const s16 *input, int stride)
else
*dst = *input;
}
- dst += stride - 8;
+ dst += stride - (8 * dst_step);
}
}
-static void add_deltas(s16 *deltas, const u8 *ref, int stride)
+static void add_deltas(s16 *deltas, const u8 *ref, int stride,
+ unsigned int ref_step)
{
int k, l;
for (k = 0; k < 8; k++) {
for (l = 0; l < 8; l++) {
- *deltas += *ref++;
+ *deltas += *ref;
+ ref += ref_step;
/*
* Due to quantizing, it might possible that the
* decoded coefficients are slightly out of range
@@ -666,7 +676,7 @@ static void add_deltas(s16 *deltas, const u8 *ref, int stride)
*deltas = 255;
deltas++;
}
- ref += stride - 8;
+ ref += stride - (8 * ref_step);
}
}
@@ -711,8 +721,8 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
ifwht(cf->de_coeffs, cf->de_fwht, blocktype);
if (blocktype == PBLOCK)
- add_deltas(cf->de_fwht, refp, 8);
- fill_decoder_block(refp, cf->de_fwht, 8);
+ add_deltas(cf->de_fwht, refp, 8, 1);
+ fill_decoder_block(refp, cf->de_fwht, 8, 1);
}
input += 8 * input_step;
@@ -821,23 +831,31 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm,
return encoding;
}
-static bool decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
- u32 height, u32 width, u32 coded_width,
+static bool decode_plane(struct fwht_cframe *cf, const __be16 **rlco,
+ u32 height, u32 width, const u8 *ref, u32 ref_stride,
+ unsigned int ref_step, u8 *dst,
+ unsigned int dst_stride, unsigned int dst_step,
bool uncompressed, const __be16 *end_of_rlco_buf)
{
unsigned int copies = 0;
s16 copy[8 * 8];
u16 stat;
unsigned int i, j;
+ bool is_intra = !ref;
width = round_up(width, 8);
height = round_up(height, 8);
if (uncompressed) {
+ int i;
+
if (end_of_rlco_buf + 1 < *rlco + width * height / 2)
return false;
- memcpy(ref, *rlco, width * height);
- *rlco += width * height / 2;
+ for (i = 0; i < height; i++) {
+ memcpy(dst, *rlco, width);
+ dst += dst_stride;
+ *rlco += width / 2;
+ }
return true;
}
@@ -849,15 +867,17 @@ static bool decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
*/
for (j = 0; j < height / 8; j++) {
for (i = 0; i < width / 8; i++) {
- u8 *refp = ref + j * 8 * coded_width + i * 8;
+ const u8 *refp = ref + j * 8 * ref_stride +
+ i * 8 * ref_step;
+ u8 *dstp = dst + j * 8 * dst_stride + i * 8 * dst_step;
if (copies) {
memcpy(cf->de_fwht, copy, sizeof(copy));
- if (stat & PFRAME_BIT)
+ if ((stat & PFRAME_BIT) && !is_intra)
add_deltas(cf->de_fwht, refp,
- coded_width);
- fill_decoder_block(refp, cf->de_fwht,
- coded_width);
+ ref_stride, ref_step);
+ fill_decoder_block(dstp, cf->de_fwht,
+ dst_stride, dst_step);
copies--;
continue;
}
@@ -865,35 +885,41 @@ static bool decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
stat = derlc(rlco, cf->coeffs, end_of_rlco_buf);
if (stat & OVERFLOW_BIT)
return false;
- if (stat & PFRAME_BIT)
+ if ((stat & PFRAME_BIT) && !is_intra)
dequantize_inter(cf->coeffs);
else
dequantize_intra(cf->coeffs);
ifwht(cf->coeffs, cf->de_fwht,
- (stat & PFRAME_BIT) ? 0 : 1);
+ ((stat & PFRAME_BIT) && !is_intra) ? 0 : 1);
copies = (stat & DUPS_MASK) >> 1;
if (copies)
memcpy(copy, cf->de_fwht, sizeof(copy));
- if (stat & PFRAME_BIT)
- add_deltas(cf->de_fwht, refp, coded_width);
- fill_decoder_block(refp, cf->de_fwht, coded_width);
+ if ((stat & PFRAME_BIT) && !is_intra)
+ add_deltas(cf->de_fwht, refp,
+ ref_stride, ref_step);
+ fill_decoder_block(dstp, cf->de_fwht, dst_stride,
+ dst_step);
}
}
return true;
}
-bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
- u32 hdr_flags, unsigned int components_num,
- unsigned int width, unsigned int height,
- unsigned int coded_width)
+bool fwht_decode_frame(struct fwht_cframe *cf, u32 hdr_flags,
+ unsigned int components_num, unsigned int width,
+ unsigned int height, const struct fwht_raw_frame *ref,
+ unsigned int ref_stride, unsigned int ref_chroma_stride,
+ struct fwht_raw_frame *dst, unsigned int dst_stride,
+ unsigned int dst_chroma_stride)
{
const __be16 *rlco = cf->rlc_data;
const __be16 *end_of_rlco_buf = cf->rlc_data +
(cf->size / sizeof(*rlco)) - 1;
- if (!decode_plane(cf, &rlco, ref->luma, height, width, coded_width,
+ if (!decode_plane(cf, &rlco, height, width, ref->luma, ref_stride,
+ ref->luma_alpha_step, dst->luma, dst_stride,
+ dst->luma_alpha_step,
hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED,
end_of_rlco_buf))
return false;
@@ -901,27 +927,30 @@ bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
if (components_num >= 3) {
u32 h = height;
u32 w = width;
- u32 c = coded_width;
if (!(hdr_flags & FWHT_FL_CHROMA_FULL_HEIGHT))
h /= 2;
- if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH)) {
+ if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH))
w /= 2;
- c /= 2;
- }
- if (!decode_plane(cf, &rlco, ref->cb, h, w, c,
+
+ if (!decode_plane(cf, &rlco, h, w, ref->cb, ref_chroma_stride,
+ ref->chroma_step, dst->cb, dst_chroma_stride,
+ dst->chroma_step,
hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED,
end_of_rlco_buf))
return false;
- if (!decode_plane(cf, &rlco, ref->cr, h, w, c,
+ if (!decode_plane(cf, &rlco, h, w, ref->cr, ref_chroma_stride,
+ ref->chroma_step, dst->cr, dst_chroma_stride,
+ dst->chroma_step,
hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED,
end_of_rlco_buf))
return false;
}
if (components_num == 4)
- if (!decode_plane(cf, &rlco, ref->alpha, height, width,
- coded_width,
+ if (!decode_plane(cf, &rlco, height, width, ref->alpha, ref_stride,
+ ref->luma_alpha_step, dst->alpha, dst_stride,
+ dst->luma_alpha_step,
hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED,
end_of_rlco_buf))
return false;
diff --git a/drivers/media/platform/vicodec/codec-fwht.h b/drivers/media/platform/vicodec/codec-fwht.h
index c410512d47c5..b6fec2b1cbca 100644
--- a/drivers/media/platform/vicodec/codec-fwht.h
+++ b/drivers/media/platform/vicodec/codec-fwht.h
@@ -124,6 +124,7 @@ struct fwht_raw_frame {
unsigned int luma_alpha_step;
unsigned int chroma_step;
unsigned int components_num;
+ u8 *buf;
u8 *luma, *cb, *cr, *alpha;
};
@@ -140,9 +141,10 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm,
bool is_intra, bool next_is_intra,
unsigned int width, unsigned int height,
unsigned int stride, unsigned int chroma_stride);
-bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
- u32 hdr_flags, unsigned int components_num,
- unsigned int width, unsigned int height,
- unsigned int coded_width);
-
+bool fwht_decode_frame(struct fwht_cframe *cf, u32 hdr_flags,
+ unsigned int components_num, unsigned int width,
+ unsigned int height, const struct fwht_raw_frame *ref,
+ unsigned int ref_stride, unsigned int ref_chroma_stride,
+ struct fwht_raw_frame *dst, unsigned int dst_stride,
+ unsigned int dst_chroma_stride);
#endif
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
index 6573a471c5ca..01e7f09efc4e 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
@@ -37,7 +37,19 @@ static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
{ V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
};
-const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div,
+bool v4l2_fwht_validate_fmt(const struct v4l2_fwht_pixfmt_info *info,
+ u32 width_div, u32 height_div, u32 components_num,
+ u32 pixenc)
+{
+ if (info->width_div == width_div &&
+ info->height_div == height_div &&
+ (!pixenc || info->pixenc == pixenc) &&
+ info->components_num == components_num)
+ return true;
+ return false;
+}
+
+const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_nth_fmt(u32 width_div,
u32 height_div,
u32 components_num,
u32 pixenc,
@@ -46,10 +58,10 @@ const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div,
unsigned int i;
for (i = 0; i < ARRAY_SIZE(v4l2_fwht_pixfmts); i++) {
- if (v4l2_fwht_pixfmts[i].width_div == width_div &&
- v4l2_fwht_pixfmts[i].height_div == height_div &&
- (!pixenc || v4l2_fwht_pixfmts[i].pixenc == pixenc) &&
- v4l2_fwht_pixfmts[i].components_num == components_num) {
+ bool is_valid = v4l2_fwht_validate_fmt(&v4l2_fwht_pixfmts[i],
+ width_div, height_div,
+ components_num, pixenc);
+ if (is_valid) {
if (start_idx == 0)
return v4l2_fwht_pixfmts + i;
start_idx--;
@@ -75,117 +87,141 @@ const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx)
return v4l2_fwht_pixfmts + idx;
}
-int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
+static int prepare_raw_frame(struct fwht_raw_frame *rf,
+ const struct v4l2_fwht_pixfmt_info *info, u8 *buf,
+ unsigned int size)
{
- unsigned int size = state->stride * state->coded_height;
- unsigned int chroma_stride = state->stride;
- const struct v4l2_fwht_pixfmt_info *info = state->info;
- struct fwht_cframe_hdr *p_hdr;
- struct fwht_cframe cf;
- struct fwht_raw_frame rf;
- u32 encoding;
- u32 flags = 0;
-
- if (!info)
- return -EINVAL;
-
- rf.luma = p_in;
- rf.width_div = info->width_div;
- rf.height_div = info->height_div;
- rf.luma_alpha_step = info->luma_alpha_step;
- rf.chroma_step = info->chroma_step;
- rf.alpha = NULL;
- rf.components_num = info->components_num;
+ rf->luma = buf;
+ rf->width_div = info->width_div;
+ rf->height_div = info->height_div;
+ rf->luma_alpha_step = info->luma_alpha_step;
+ rf->chroma_step = info->chroma_step;
+ rf->alpha = NULL;
+ rf->components_num = info->components_num;
+ /*
+ * The buffer is NULL if it is the reference
+ * frame of an I-frame in the stateless decoder
+ */
+ if (!buf) {
+ rf->luma = NULL;
+ rf->cb = NULL;
+ rf->cr = NULL;
+ rf->alpha = NULL;
+ return 0;
+ }
switch (info->id) {
case V4L2_PIX_FMT_GREY:
- rf.cb = NULL;
- rf.cr = NULL;
+ rf->cb = NULL;
+ rf->cr = NULL;
break;
case V4L2_PIX_FMT_YUV420:
- rf.cb = rf.luma + size;
- rf.cr = rf.cb + size / 4;
- chroma_stride /= 2;
+ rf->cb = rf->luma + size;
+ rf->cr = rf->cb + size / 4;
break;
case V4L2_PIX_FMT_YVU420:
- rf.cr = rf.luma + size;
- rf.cb = rf.cr + size / 4;
- chroma_stride /= 2;
+ rf->cr = rf->luma + size;
+ rf->cb = rf->cr + size / 4;
break;
case V4L2_PIX_FMT_YUV422P:
- rf.cb = rf.luma + size;
- rf.cr = rf.cb + size / 2;
- chroma_stride /= 2;
+ rf->cb = rf->luma + size;
+ rf->cr = rf->cb + size / 2;
break;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV24:
- rf.cb = rf.luma + size;
- rf.cr = rf.cb + 1;
+ rf->cb = rf->luma + size;
+ rf->cr = rf->cb + 1;
break;
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_NV42:
- rf.cr = rf.luma + size;
- rf.cb = rf.cr + 1;
+ rf->cr = rf->luma + size;
+ rf->cb = rf->cr + 1;
break;
case V4L2_PIX_FMT_YUYV:
- rf.cb = rf.luma + 1;
- rf.cr = rf.cb + 2;
+ rf->cb = rf->luma + 1;
+ rf->cr = rf->cb + 2;
break;
case V4L2_PIX_FMT_YVYU:
- rf.cr = rf.luma + 1;
- rf.cb = rf.cr + 2;
+ rf->cr = rf->luma + 1;
+ rf->cb = rf->cr + 2;
break;
case V4L2_PIX_FMT_UYVY:
- rf.cb = rf.luma;
- rf.cr = rf.cb + 2;
- rf.luma++;
+ rf->cb = rf->luma;
+ rf->cr = rf->cb + 2;
+ rf->luma++;
break;
case V4L2_PIX_FMT_VYUY:
- rf.cr = rf.luma;
- rf.cb = rf.cr + 2;
- rf.luma++;
+ rf->cr = rf->luma;
+ rf->cb = rf->cr + 2;
+ rf->luma++;
break;
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_HSV24:
- rf.cr = rf.luma;
- rf.cb = rf.cr + 2;
- rf.luma++;
+ rf->cr = rf->luma;
+ rf->cb = rf->cr + 2;
+ rf->luma++;
break;
case V4L2_PIX_FMT_BGR24:
- rf.cb = rf.luma;
- rf.cr = rf.cb + 2;
- rf.luma++;
+ rf->cb = rf->luma;
+ rf->cr = rf->cb + 2;
+ rf->luma++;
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_XRGB32:
case V4L2_PIX_FMT_HSV32:
- rf.cr = rf.luma + 1;
- rf.cb = rf.cr + 2;
- rf.luma += 2;
+ rf->cr = rf->luma + 1;
+ rf->cb = rf->cr + 2;
+ rf->luma += 2;
break;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_XBGR32:
- rf.cb = rf.luma;
- rf.cr = rf.cb + 2;
- rf.luma++;
+ rf->cb = rf->luma;
+ rf->cr = rf->cb + 2;
+ rf->luma++;
break;
case V4L2_PIX_FMT_ARGB32:
- rf.alpha = rf.luma;
- rf.cr = rf.luma + 1;
- rf.cb = rf.cr + 2;
- rf.luma += 2;
+ rf->alpha = rf->luma;
+ rf->cr = rf->luma + 1;
+ rf->cb = rf->cr + 2;
+ rf->luma += 2;
break;
case V4L2_PIX_FMT_ABGR32:
- rf.cb = rf.luma;
- rf.cr = rf.cb + 2;
- rf.luma++;
- rf.alpha = rf.cr + 1;
+ rf->cb = rf->luma;
+ rf->cr = rf->cb + 2;
+ rf->luma++;
+ rf->alpha = rf->cr + 1;
break;
default:
return -EINVAL;
}
+ return 0;
+}
+
+int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
+{
+ unsigned int size = state->stride * state->coded_height;
+ unsigned int chroma_stride = state->stride;
+ const struct v4l2_fwht_pixfmt_info *info = state->info;
+ struct fwht_cframe_hdr *p_hdr;
+ struct fwht_cframe cf;
+ struct fwht_raw_frame rf;
+ u32 encoding;
+ u32 flags = 0;
+
+ if (!info)
+ return -EINVAL;
+
+ if (prepare_raw_frame(&rf, info, p_in, size))
+ return -EINVAL;
+
+ if (info->planes_num == 3)
+ chroma_stride /= 2;
+
+ if (info->id == V4L2_PIX_FMT_NV24 ||
+ info->id == V4L2_PIX_FMT_NV42)
+ chroma_stride *= 2;
cf.i_frame_qp = state->i_frame_qp;
cf.p_frame_qp = state->p_frame_qp;
@@ -235,14 +271,17 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
{
- unsigned int i, j, k;
u32 flags;
struct fwht_cframe cf;
- u8 *p, *ref_p;
unsigned int components_num = 3;
unsigned int version;
const struct v4l2_fwht_pixfmt_info *info;
unsigned int hdr_width_div, hdr_height_div;
+ struct fwht_raw_frame dst_rf;
+ unsigned int dst_chroma_stride = state->stride;
+ unsigned int ref_chroma_stride = state->ref_stride;
+ unsigned int dst_size = state->stride * state->coded_height;
+ unsigned int ref_size;
if (!state->info)
return -EINVAL;
@@ -290,241 +329,29 @@ int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
hdr_height_div != info->height_div)
return -EINVAL;
- if (!fwht_decode_frame(&cf, &state->ref_frame, flags, components_num,
- state->visible_width, state->visible_height,
- state->coded_width))
+ if (prepare_raw_frame(&dst_rf, info, p_out, dst_size))
return -EINVAL;
+ if (info->planes_num == 3) {
+ dst_chroma_stride /= 2;
+ ref_chroma_stride /= 2;
+ }
+ if (info->id == V4L2_PIX_FMT_NV24 ||
+ info->id == V4L2_PIX_FMT_NV42) {
+ dst_chroma_stride *= 2;
+ ref_chroma_stride *= 2;
+ }
- /*
- * TODO - handle the case where the compressed stream encodes a
- * different format than the requested decoded format.
- */
- switch (state->info->id) {
- case V4L2_PIX_FMT_GREY:
- ref_p = state->ref_frame.luma;
- for (i = 0; i < state->coded_height; i++) {
- memcpy(p_out, ref_p, state->visible_width);
- p_out += state->stride;
- ref_p += state->coded_width;
- }
- break;
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YUV422P:
- ref_p = state->ref_frame.luma;
- for (i = 0; i < state->coded_height; i++) {
- memcpy(p_out, ref_p, state->visible_width);
- p_out += state->stride;
- ref_p += state->coded_width;
- }
- ref_p = state->ref_frame.cb;
- for (i = 0; i < state->coded_height / 2; i++) {
- memcpy(p_out, ref_p, state->visible_width / 2);
- p_out += state->stride / 2;
- ref_p += state->coded_width / 2;
- }
- ref_p = state->ref_frame.cr;
- for (i = 0; i < state->coded_height / 2; i++) {
- memcpy(p_out, ref_p, state->visible_width / 2);
- p_out += state->stride / 2;
- ref_p += state->coded_width / 2;
- }
- break;
- case V4L2_PIX_FMT_YVU420:
- ref_p = state->ref_frame.luma;
- for (i = 0; i < state->coded_height; i++) {
- memcpy(p_out, ref_p, state->visible_width);
- p_out += state->stride;
- ref_p += state->coded_width;
- }
-
- ref_p = state->ref_frame.cr;
- for (i = 0; i < state->coded_height / 2; i++) {
- memcpy(p_out, ref_p, state->visible_width / 2);
- p_out += state->stride / 2;
- ref_p += state->coded_width / 2;
- }
- ref_p = state->ref_frame.cb;
- for (i = 0; i < state->coded_height / 2; i++) {
- memcpy(p_out, ref_p, state->visible_width / 2);
- p_out += state->stride / 2;
- ref_p += state->coded_width / 2;
- }
- break;
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV24:
- ref_p = state->ref_frame.luma;
- for (i = 0; i < state->coded_height; i++) {
- memcpy(p_out, ref_p, state->visible_width);
- p_out += state->stride;
- ref_p += state->coded_width;
- }
+ ref_size = state->ref_stride * state->coded_height;
- k = 0;
- for (i = 0; i < state->coded_height / 2; i++) {
- for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
- *p++ = state->ref_frame.cb[k];
- *p++ = state->ref_frame.cr[k];
- k++;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV61:
- case V4L2_PIX_FMT_NV42:
- ref_p = state->ref_frame.luma;
- for (i = 0; i < state->coded_height; i++) {
- memcpy(p_out, ref_p, state->visible_width);
- p_out += state->stride;
- ref_p += state->coded_width;
- }
+ if (prepare_raw_frame(&state->ref_frame, info, state->ref_frame.buf,
+ ref_size))
+ return -EINVAL;
- k = 0;
- for (i = 0; i < state->coded_height / 2; i++) {
- for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
- *p++ = state->ref_frame.cr[k];
- *p++ = state->ref_frame.cb[k];
- k++;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_YUYV:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cb[k / 2];
- *p++ = state->ref_frame.luma[k + 1];
- *p++ = state->ref_frame.cr[k / 2];
- k += 2;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_YVYU:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cr[k / 2];
- *p++ = state->ref_frame.luma[k + 1];
- *p++ = state->ref_frame.cb[k / 2];
- k += 2;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_UYVY:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
- *p++ = state->ref_frame.cb[k / 2];
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cr[k / 2];
- *p++ = state->ref_frame.luma[k + 1];
- k += 2;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_VYUY:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
- *p++ = state->ref_frame.cr[k / 2];
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cb[k / 2];
- *p++ = state->ref_frame.luma[k + 1];
- k += 2;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_RGB24:
- case V4L2_PIX_FMT_HSV24:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width; j++) {
- *p++ = state->ref_frame.cr[k];
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cb[k];
- k++;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_BGR24:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width; j++) {
- *p++ = state->ref_frame.cb[k];
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cr[k];
- k++;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_RGB32:
- case V4L2_PIX_FMT_XRGB32:
- case V4L2_PIX_FMT_HSV32:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width; j++) {
- *p++ = 0;
- *p++ = state->ref_frame.cr[k];
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cb[k];
- k++;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_BGR32:
- case V4L2_PIX_FMT_XBGR32:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width; j++) {
- *p++ = state->ref_frame.cb[k];
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cr[k];
- *p++ = 0;
- k++;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_ARGB32:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width; j++) {
- *p++ = state->ref_frame.alpha[k];
- *p++ = state->ref_frame.cr[k];
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cb[k];
- k++;
- }
- p_out += state->stride;
- }
- break;
- case V4L2_PIX_FMT_ABGR32:
- k = 0;
- for (i = 0; i < state->coded_height; i++) {
- for (j = 0, p = p_out; j < state->coded_width; j++) {
- *p++ = state->ref_frame.cb[k];
- *p++ = state->ref_frame.luma[k];
- *p++ = state->ref_frame.cr[k];
- *p++ = state->ref_frame.alpha[k];
- k++;
- }
- p_out += state->stride;
- }
- break;
- default:
+ if (!fwht_decode_frame(&cf, flags, components_num,
+ state->visible_width, state->visible_height,
+ &state->ref_frame, state->ref_stride, ref_chroma_stride,
+ &dst_rf, state->stride, dst_chroma_stride))
return -EINVAL;
- }
return 0;
}
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.h b/drivers/media/platform/vicodec/codec-v4l2-fwht.h
index aa6fa90a48be..1a0d2a9f931a 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.h
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.h
@@ -30,6 +30,7 @@ struct v4l2_fwht_state {
unsigned int coded_width;
unsigned int coded_height;
unsigned int stride;
+ unsigned int ref_stride;
unsigned int gop_size;
unsigned int gop_cnt;
u16 i_frame_qp;
@@ -43,11 +44,15 @@ struct v4l2_fwht_state {
struct fwht_raw_frame ref_frame;
struct fwht_cframe_hdr header;
u8 *compressed_frame;
+ u64 ref_frame_ts;
};
const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat);
const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx);
-const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div,
+bool v4l2_fwht_validate_fmt(const struct v4l2_fwht_pixfmt_info *info,
+ u32 width_div, u32 height_div, u32 components_num,
+ u32 pixenc);
+const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_nth_fmt(u32 width_div,
u32 height_div,
u32 components_num,
u32 pixenc,
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index d7636fe9e174..bd01a9206aa6 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -64,6 +64,10 @@ static const struct v4l2_fwht_pixfmt_info pixfmt_fwht = {
V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0, 1
};
+static const struct v4l2_fwht_pixfmt_info pixfmt_stateless_fwht = {
+ V4L2_PIX_FMT_FWHT_STATELESS, 0, 3, 1, 1, 1, 1, 1, 0, 1
+};
+
static void vicodec_dev_release(struct device *dev)
{
}
@@ -89,27 +93,29 @@ enum {
V4L2_M2M_DST = 1,
};
+struct vicodec_dev_instance {
+ struct video_device vfd;
+ struct mutex mutex;
+ spinlock_t lock;
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
struct vicodec_dev {
struct v4l2_device v4l2_dev;
- struct video_device enc_vfd;
- struct video_device dec_vfd;
+ struct vicodec_dev_instance stateful_enc;
+ struct vicodec_dev_instance stateful_dec;
+ struct vicodec_dev_instance stateless_dec;
#ifdef CONFIG_MEDIA_CONTROLLER
struct media_device mdev;
#endif
- struct mutex enc_mutex;
- struct mutex dec_mutex;
- spinlock_t enc_lock;
- spinlock_t dec_lock;
-
- struct v4l2_m2m_dev *enc_dev;
- struct v4l2_m2m_dev *dec_dev;
};
struct vicodec_ctx {
struct v4l2_fh fh;
struct vicodec_dev *dev;
bool is_enc;
+ bool is_stateless;
spinlock_t *lock;
struct v4l2_ctrl_handler hdl;
@@ -148,27 +154,149 @@ static struct vicodec_q_data *get_q_data(struct vicodec_ctx *ctx,
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
return &ctx->q_data[V4L2_M2M_DST];
default:
- WARN_ON(1);
break;
}
return NULL;
}
+static void copy_cap_to_ref(const u8 *cap, const struct v4l2_fwht_pixfmt_info *info,
+ struct v4l2_fwht_state *state)
+{
+ int plane_idx;
+ u8 *p_ref = state->ref_frame.buf;
+ unsigned int cap_stride = state->stride;
+ unsigned int ref_stride = state->ref_stride;
+
+ for (plane_idx = 0; plane_idx < info->planes_num; plane_idx++) {
+ int i;
+ unsigned int h_div = (plane_idx == 1 || plane_idx == 2) ?
+ info->height_div : 1;
+ const u8 *row_cap = cap;
+ u8 *row_ref = p_ref;
+
+ if (info->planes_num == 3 && plane_idx == 1) {
+ cap_stride /= 2;
+ ref_stride /= 2;
+ }
+
+ if (plane_idx == 1 &&
+ (info->id == V4L2_PIX_FMT_NV24 ||
+ info->id == V4L2_PIX_FMT_NV42)) {
+ cap_stride *= 2;
+ ref_stride *= 2;
+ }
+
+ for (i = 0; i < state->visible_height / h_div; i++) {
+ memcpy(row_ref, row_cap, ref_stride);
+ row_ref += ref_stride;
+ row_cap += cap_stride;
+ }
+ cap += cap_stride * (state->coded_height / h_div);
+ p_ref += ref_stride * (state->coded_height / h_div);
+ }
+}
+
+static bool validate_by_version(unsigned int flags, unsigned int version)
+{
+ if (!version || version > FWHT_VERSION)
+ return false;
+
+ if (version >= 2) {
+ unsigned int components_num = 1 +
+ ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
+ FWHT_FL_COMPONENTS_NUM_OFFSET);
+ unsigned int pixenc = flags & FWHT_FL_PIXENC_MSK;
+
+ if (components_num == 0 || components_num > 4 || !pixenc)
+ return false;
+ }
+ return true;
+}
+
+static bool validate_stateless_params_flags(const struct v4l2_ctrl_fwht_params *params,
+ const struct v4l2_fwht_pixfmt_info *cur_info)
+{
+ unsigned int width_div =
+ (params->flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
+ unsigned int height_div =
+ (params->flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+ unsigned int components_num = 3;
+ unsigned int pixenc = 0;
+
+ if (params->version < 3)
+ return false;
+
+ components_num = 1 + ((params->flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
+ FWHT_FL_COMPONENTS_NUM_OFFSET);
+ pixenc = (params->flags & FWHT_FL_PIXENC_MSK);
+ if (v4l2_fwht_validate_fmt(cur_info, width_div, height_div,
+ components_num, pixenc))
+ return true;
+ return false;
+}
+
+
+static void update_state_from_header(struct vicodec_ctx *ctx)
+{
+ const struct fwht_cframe_hdr *p_hdr = &ctx->state.header;
+
+ ctx->state.visible_width = ntohl(p_hdr->width);
+ ctx->state.visible_height = ntohl(p_hdr->height);
+ ctx->state.colorspace = ntohl(p_hdr->colorspace);
+ ctx->state.xfer_func = ntohl(p_hdr->xfer_func);
+ ctx->state.ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
+ ctx->state.quantization = ntohl(p_hdr->quantization);
+}
+
static int device_process(struct vicodec_ctx *ctx,
struct vb2_v4l2_buffer *src_vb,
struct vb2_v4l2_buffer *dst_vb)
{
struct vicodec_dev *dev = ctx->dev;
- struct vicodec_q_data *q_dst;
struct v4l2_fwht_state *state = &ctx->state;
u8 *p_src, *p_dst;
- int ret;
+ int ret = 0;
- q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- if (ctx->is_enc)
+ if (ctx->is_enc || ctx->is_stateless)
p_src = vb2_plane_vaddr(&src_vb->vb2_buf, 0);
else
p_src = state->compressed_frame;
+
+ if (ctx->is_stateless) {
+ struct media_request *src_req = src_vb->vb2_buf.req_obj.req;
+
+ ret = v4l2_ctrl_request_setup(src_req, &ctx->hdl);
+ if (ret)
+ return ret;
+ update_state_from_header(ctx);
+
+ ctx->state.header.size =
+ htonl(vb2_get_plane_payload(&src_vb->vb2_buf, 0));
+ /*
+ * set the reference buffer from the reference timestamp
+ * only if this is a P-frame
+ */
+ if (!(ntohl(ctx->state.header.flags) & FWHT_FL_I_FRAME)) {
+ struct vb2_buffer *ref_vb2_buf;
+ int ref_buf_idx;
+ struct vb2_queue *vq_cap =
+ v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ ref_buf_idx = vb2_find_timestamp(vq_cap,
+ ctx->state.ref_frame_ts, 0);
+ if (ref_buf_idx < 0)
+ return -EINVAL;
+
+ ref_vb2_buf = vq_cap->bufs[ref_buf_idx];
+ if (ref_vb2_buf->state == VB2_BUF_STATE_ERROR)
+ ret = -EINVAL;
+ ctx->state.ref_frame.buf =
+ vb2_plane_vaddr(ref_vb2_buf, 0);
+ } else {
+ ctx->state.ref_frame.buf = NULL;
+ }
+ }
p_dst = vb2_plane_vaddr(&dst_vb->vb2_buf, 0);
if (!p_src || !p_dst) {
v4l2_err(&dev->v4l2_dev,
@@ -178,30 +306,31 @@ static int device_process(struct vicodec_ctx *ctx,
if (ctx->is_enc) {
struct vicodec_q_data *q_src;
+ int comp_sz_or_errcode;
q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
state->info = q_src->info;
- ret = v4l2_fwht_encode(state, p_src, p_dst);
- if (ret < 0)
- return ret;
- vb2_set_plane_payload(&dst_vb->vb2_buf, 0, ret);
+ comp_sz_or_errcode = v4l2_fwht_encode(state, p_src, p_dst);
+ if (comp_sz_or_errcode < 0)
+ return comp_sz_or_errcode;
+ vb2_set_plane_payload(&dst_vb->vb2_buf, 0, comp_sz_or_errcode);
} else {
+ struct vicodec_q_data *q_dst;
unsigned int comp_frame_size = ntohl(ctx->state.header.size);
+ q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (comp_frame_size > ctx->comp_max_size)
return -EINVAL;
state->info = q_dst->info;
ret = v4l2_fwht_decode(state, p_src, p_dst);
if (ret < 0)
return ret;
+ if (!ctx->is_stateless)
+ copy_cap_to_ref(p_dst, ctx->state.info, &ctx->state);
+
vb2_set_plane_payload(&dst_vb->vb2_buf, 0, q_dst->sizeimage);
}
-
- dst_vb->sequence = q_dst->sequence++;
- dst_vb->flags &= ~V4L2_BUF_FLAG_LAST;
- v4l2_m2m_buf_copy_metadata(src_vb, dst_vb, !ctx->is_enc);
-
- return 0;
+ return ret;
}
/*
@@ -274,16 +403,26 @@ static void device_run(void *priv)
struct vicodec_ctx *ctx = priv;
struct vicodec_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct vicodec_q_data *q_src;
+ struct vicodec_q_data *q_src, *q_dst;
u32 state;
+ struct media_request *src_req;
+
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ src_req = src_buf->vb2_buf.req_obj.req;
+
q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
state = VB2_BUF_STATE_DONE;
if (device_process(ctx, src_buf, dst_buf))
state = VB2_BUF_STATE_ERROR;
+ else
+ dst_buf->sequence = q_dst->sequence++;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, !ctx->is_enc);
+
ctx->last_dst_buf = dst_buf;
spin_lock(ctx->lock);
@@ -291,7 +430,7 @@ static void device_run(void *priv)
dst_buf->flags |= V4L2_BUF_FLAG_LAST;
v4l2_event_queue_fh(&ctx->fh, &eos_event);
}
- if (ctx->is_enc) {
+ if (ctx->is_enc || ctx->is_stateless) {
src_buf->sequence = q_src->sequence++;
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, state);
@@ -303,6 +442,9 @@ static void device_run(void *priv)
ctx->comp_has_next_frame = false;
}
v4l2_m2m_buf_done(dst_buf, state);
+ if (ctx->is_stateless && src_req)
+ v4l2_ctrl_request_complete(src_req, &ctx->hdl);
+
ctx->comp_size = 0;
ctx->header_size = 0;
ctx->comp_magic_cnt = 0;
@@ -310,9 +452,12 @@ static void device_run(void *priv)
spin_unlock(ctx->lock);
if (ctx->is_enc)
- v4l2_m2m_job_finish(dev->enc_dev, ctx->fh.m2m_ctx);
+ v4l2_m2m_job_finish(dev->stateful_enc.m2m_dev, ctx->fh.m2m_ctx);
+ else if (ctx->is_stateless)
+ v4l2_m2m_job_finish(dev->stateless_dec.m2m_dev,
+ ctx->fh.m2m_ctx);
else
- v4l2_m2m_job_finish(dev->dec_dev, ctx->fh.m2m_ctx);
+ v4l2_m2m_job_finish(dev->stateful_dec.m2m_dev, ctx->fh.m2m_ctx);
}
static void job_remove_src_buf(struct vicodec_ctx *ctx, u32 state)
@@ -344,7 +489,7 @@ info_from_header(const struct fwht_cframe_hdr *p_hdr)
FWHT_FL_COMPONENTS_NUM_OFFSET);
pixenc = (flags & FWHT_FL_PIXENC_MSK);
}
- return v4l2_fwht_default_fmt(width_div, height_div,
+ return v4l2_fwht_find_nth_fmt(width_div, height_div,
components_num, pixenc, 0);
}
@@ -356,21 +501,11 @@ static bool is_header_valid(const struct fwht_cframe_hdr *p_hdr)
unsigned int version = ntohl(p_hdr->version);
unsigned int flags = ntohl(p_hdr->flags);
- if (!version || version > FWHT_VERSION)
- return false;
-
if (w < MIN_WIDTH || w > MAX_WIDTH || h < MIN_HEIGHT || h > MAX_HEIGHT)
return false;
- if (version >= 2) {
- unsigned int components_num = 1 +
- ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
- FWHT_FL_COMPONENTS_NUM_OFFSET);
- unsigned int pixenc = flags & FWHT_FL_PIXENC_MSK;
-
- if (components_num == 0 || components_num > 4 || !pixenc)
- return false;
- }
+ if (!validate_by_version(flags, version))
+ return false;
info = info_from_header(p_hdr);
if (!info)
@@ -388,6 +523,12 @@ static void update_capture_data_from_header(struct vicodec_ctx *ctx)
unsigned int hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
unsigned int hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+ /*
+ * This function should not be used by a stateless codec since
+ * it changes values in q_data that are not request specific
+ */
+ WARN_ON(ctx->is_stateless);
+
q_dst->info = info;
q_dst->visible_width = ntohl(p_hdr->width);
q_dst->visible_height = ntohl(p_hdr->height);
@@ -440,7 +581,7 @@ static int job_ready(void *priv)
if (ctx->source_changed)
return 0;
- if (ctx->is_enc || ctx->comp_has_frame)
+ if (ctx->is_stateless || ctx->is_enc || ctx->comp_has_frame)
return 1;
restart:
@@ -551,8 +692,8 @@ static const struct v4l2_fwht_pixfmt_info *find_fmt(u32 fmt)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- strncpy(cap->driver, VICODEC_NAME, sizeof(cap->driver) - 1);
- strncpy(cap->card, VICODEC_NAME, sizeof(cap->card) - 1);
+ strscpy(cap->driver, VICODEC_NAME, sizeof(cap->driver));
+ strscpy(cap->card, VICODEC_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", VICODEC_NAME);
return 0;
@@ -575,7 +716,7 @@ static int enum_fmt(struct v4l2_fmtdesc *f, struct vicodec_ctx *ctx,
if (!info || ctx->is_enc)
info = v4l2_fwht_get_pixfmt(f->index);
else
- info = v4l2_fwht_default_fmt(info->width_div,
+ info = v4l2_fwht_find_nth_fmt(info->width_div,
info->height_div,
info->components_num,
info->pixenc,
@@ -586,7 +727,8 @@ static int enum_fmt(struct v4l2_fmtdesc *f, struct vicodec_ctx *ctx,
} else {
if (f->index)
return -EINVAL;
- f->pixelformat = V4L2_PIX_FMT_FWHT;
+ f->pixelformat = ctx->is_stateless ?
+ V4L2_PIX_FMT_FWHT_STATELESS : V4L2_PIX_FMT_FWHT;
}
return 0;
}
@@ -688,13 +830,15 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pix_mp;
struct v4l2_pix_format *pix;
struct v4l2_plane_pix_format *plane;
- const struct v4l2_fwht_pixfmt_info *info = &pixfmt_fwht;
+ const struct v4l2_fwht_pixfmt_info *info = ctx->is_stateless ?
+ &pixfmt_stateless_fwht : &pixfmt_fwht;
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
pix = &f->fmt.pix;
- if (pix->pixelformat != V4L2_PIX_FMT_FWHT)
+ if (pix->pixelformat != V4L2_PIX_FMT_FWHT &&
+ pix->pixelformat != V4L2_PIX_FMT_FWHT_STATELESS)
info = find_fmt(pix->pixelformat);
pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
@@ -715,7 +859,8 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
pix_mp = &f->fmt.pix_mp;
plane = pix_mp->plane_fmt;
- if (pix_mp->pixelformat != V4L2_PIX_FMT_FWHT)
+ if (pix_mp->pixelformat != V4L2_PIX_FMT_FWHT &&
+ pix_mp->pixelformat != V4L2_PIX_FMT_FWHT_STATELESS)
info = find_fmt(pix_mp->pixelformat);
pix_mp->num_planes = 1;
@@ -792,8 +937,12 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
if (multiplanar)
return -EINVAL;
pix = &f->fmt.pix;
- pix->pixelformat = !ctx->is_enc ? V4L2_PIX_FMT_FWHT :
- find_fmt(pix->pixelformat)->id;
+ if (ctx->is_enc)
+ pix->pixelformat = find_fmt(pix->pixelformat)->id;
+ else if (ctx->is_stateless)
+ pix->pixelformat = V4L2_PIX_FMT_FWHT_STATELESS;
+ else
+ pix->pixelformat = V4L2_PIX_FMT_FWHT;
if (!pix->colorspace)
pix->colorspace = V4L2_COLORSPACE_REC709;
break;
@@ -801,8 +950,12 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
if (!multiplanar)
return -EINVAL;
pix_mp = &f->fmt.pix_mp;
- pix_mp->pixelformat = !ctx->is_enc ? V4L2_PIX_FMT_FWHT :
- find_fmt(pix_mp->pixelformat)->id;
+ if (ctx->is_enc)
+ pix_mp->pixelformat = find_fmt(pix_mp->pixelformat)->id;
+ else if (ctx->is_stateless)
+ pix_mp->pixelformat = V4L2_PIX_FMT_FWHT_STATELESS;
+ else
+ pix_mp->pixelformat = V4L2_PIX_FMT_FWHT;
if (!pix_mp->colorspace)
pix_mp->colorspace = V4L2_COLORSPACE_REC709;
break;
@@ -845,6 +998,8 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
if (pix->pixelformat == V4L2_PIX_FMT_FWHT)
q_data->info = &pixfmt_fwht;
+ else if (pix->pixelformat == V4L2_PIX_FMT_FWHT_STATELESS)
+ q_data->info = &pixfmt_stateless_fwht;
else
q_data->info = find_fmt(pix->pixelformat);
q_data->coded_width = pix->width;
@@ -866,6 +1021,8 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
if (pix_mp->pixelformat == V4L2_PIX_FMT_FWHT)
q_data->info = &pixfmt_fwht;
+ else if (pix_mp->pixelformat == V4L2_PIX_FMT_FWHT_STATELESS)
+ q_data->info = &pixfmt_stateless_fwht;
else
q_data->info = find_fmt(pix_mp->pixelformat);
q_data->coded_width = pix_mp->width;
@@ -945,16 +1102,6 @@ static int vidioc_g_selection(struct file *file, void *priv,
{
struct vicodec_ctx *ctx = file2ctx(file);
struct vicodec_q_data *q_data;
- enum v4l2_buf_type valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- enum v4l2_buf_type valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-
- if (multiplanar) {
- valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- }
-
- if (s->type != valid_cap_type && s->type != valid_out_type)
- return -EINVAL;
q_data = get_q_data(ctx, s->type);
if (!q_data)
@@ -963,18 +1110,14 @@ static int vidioc_g_selection(struct file *file, void *priv,
* encoder supports only cropping on the OUTPUT buffer
* decoder supports only composing on the CAPTURE buffer
*/
- if ((ctx->is_enc && s->type == valid_out_type) ||
- (!ctx->is_enc && s->type == valid_cap_type)) {
+ if (ctx->is_enc && s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
switch (s->target) {
- case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_CROP:
s->r.left = 0;
s->r.top = 0;
s->r.width = q_data->visible_width;
s->r.height = q_data->visible_height;
return 0;
- case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
s->r.left = 0;
@@ -983,6 +1126,22 @@ static int vidioc_g_selection(struct file *file, void *priv,
s->r.height = q_data->coded_height;
return 0;
}
+ } else if (!ctx->is_enc && s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ return 0;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->coded_width;
+ s->r.height = q_data->coded_height;
+ return 0;
+ }
}
return -EINVAL;
}
@@ -992,12 +1151,8 @@ static int vidioc_s_selection(struct file *file, void *priv,
{
struct vicodec_ctx *ctx = file2ctx(file);
struct vicodec_q_data *q_data;
- enum v4l2_buf_type out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-
- if (multiplanar)
- out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- if (s->type != out_type)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
q_data = get_q_data(ctx, s->type);
@@ -1092,6 +1247,8 @@ static int vicodec_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
switch (fsize->pixel_format) {
+ case V4L2_PIX_FMT_FWHT_STATELESS:
+ break;
case V4L2_PIX_FMT_FWHT:
break;
default:
@@ -1200,6 +1357,14 @@ static int vicodec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
return 0;
}
+static int vicodec_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
static int vicodec_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -1263,10 +1428,11 @@ static void vicodec_buf_queue(struct vb2_buffer *vb)
}
/*
- * source change event is relevant only for the decoder
+ * source change event is relevant only for the stateful decoder
* in the compressed stream
*/
- if (ctx->is_enc || !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (ctx->is_stateless || ctx->is_enc ||
+ !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
return;
}
@@ -1314,12 +1480,33 @@ static void vicodec_return_bufs(struct vb2_queue *q, u32 state)
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (vbuf == NULL)
return;
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->hdl);
spin_lock(ctx->lock);
v4l2_m2m_buf_done(vbuf, state);
spin_unlock(ctx->lock);
}
}
+static unsigned int total_frame_size(struct vicodec_q_data *q_data)
+{
+ unsigned int size;
+ unsigned int chroma_div;
+
+ if (!q_data->info) {
+ WARN_ON(1);
+ return 0;
+ }
+ size = q_data->coded_width * q_data->coded_height;
+ chroma_div = q_data->info->width_div * q_data->info->height_div;
+
+ if (q_data->info->components_num == 4)
+ return 2 * size + 2 * (size / chroma_div);
+ else if (q_data->info->components_num == 3)
+ return size + 2 * (size / chroma_div);
+ return size;
+}
+
static int vicodec_start_streaming(struct vb2_queue *q,
unsigned int count)
{
@@ -1330,7 +1517,7 @@ static int vicodec_start_streaming(struct vb2_queue *q,
unsigned int size = q_data->coded_width * q_data->coded_height;
unsigned int chroma_div;
unsigned int total_planes_size;
- u8 *new_comp_frame;
+ u8 *new_comp_frame = NULL;
if (!info)
return -EINVAL;
@@ -1338,24 +1525,24 @@ static int vicodec_start_streaming(struct vb2_queue *q,
chroma_div = info->width_div * info->height_div;
q_data->sequence = 0;
- ctx->last_src_buf = NULL;
- ctx->last_dst_buf = NULL;
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->last_src_buf = NULL;
+ else
+ ctx->last_dst_buf = NULL;
+
state->gop_cnt = 0;
if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
(!V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc))
return 0;
- if (info->id == V4L2_PIX_FMT_FWHT) {
+ if (info->id == V4L2_PIX_FMT_FWHT ||
+ info->id == V4L2_PIX_FMT_FWHT_STATELESS) {
vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
return -EINVAL;
}
- if (info->components_num == 4)
- total_planes_size = 2 * size + 2 * (size / chroma_div);
- else if (info->components_num == 3)
- total_planes_size = size + 2 * (size / chroma_div);
- else
- total_planes_size = size;
+ total_planes_size = total_frame_size(q_data);
+ ctx->comp_max_size = total_planes_size;
state->visible_width = q_data->visible_width;
state->visible_height = q_data->visible_height;
@@ -1364,8 +1551,14 @@ static int vicodec_start_streaming(struct vb2_queue *q,
state->stride = q_data->coded_width *
info->bytesperline_mult;
- state->ref_frame.luma = kvmalloc(total_planes_size, GFP_KERNEL);
- ctx->comp_max_size = total_planes_size;
+ if (ctx->is_stateless) {
+ state->ref_stride = state->stride;
+ return 0;
+ }
+ state->ref_stride = q_data->coded_width * info->luma_alpha_step;
+
+ state->ref_frame.buf = kvmalloc(total_planes_size, GFP_KERNEL);
+ state->ref_frame.luma = state->ref_frame.buf;
new_comp_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL);
if (!state->ref_frame.luma || !new_comp_frame) {
@@ -1413,7 +1606,10 @@ static void vicodec_stop_streaming(struct vb2_queue *q)
if ((!V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
(V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) {
- kvfree(ctx->state.ref_frame.luma);
+ if (!ctx->is_stateless)
+ kvfree(ctx->state.ref_frame.buf);
+ ctx->state.ref_frame.buf = NULL;
+ ctx->state.ref_frame.luma = NULL;
ctx->comp_max_size = 0;
ctx->source_changed = false;
}
@@ -1427,14 +1623,24 @@ static void vicodec_stop_streaming(struct vb2_queue *q)
}
}
+static void vicodec_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
+}
+
+
static const struct vb2_ops vicodec_qops = {
- .queue_setup = vicodec_queue_setup,
- .buf_prepare = vicodec_buf_prepare,
- .buf_queue = vicodec_buf_queue,
- .start_streaming = vicodec_start_streaming,
- .stop_streaming = vicodec_stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
+ .queue_setup = vicodec_queue_setup,
+ .buf_out_validate = vicodec_buf_out_validate,
+ .buf_prepare = vicodec_buf_prepare,
+ .buf_queue = vicodec_buf_queue,
+ .buf_request_complete = vicodec_buf_request_complete,
+ .start_streaming = vicodec_start_streaming,
+ .stop_streaming = vicodec_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
@@ -1452,9 +1658,14 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &vicodec_qops;
src_vq->mem_ops = &vb2_vmalloc_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- src_vq->lock = ctx->is_enc ? &ctx->dev->enc_mutex :
- &ctx->dev->dec_mutex;
-
+ if (ctx->is_enc)
+ src_vq->lock = &ctx->dev->stateful_enc.mutex;
+ else if (ctx->is_stateless)
+ src_vq->lock = &ctx->dev->stateless_dec.mutex;
+ else
+ src_vq->lock = &ctx->dev->stateful_dec.mutex;
+ src_vq->supports_requests = ctx->is_stateless;
+ src_vq->requires_requests = ctx->is_stateless;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
@@ -1473,53 +1684,86 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
return vb2_queue_init(dst_vq);
}
-#define VICODEC_CID_CUSTOM_BASE (V4L2_CID_MPEG_BASE | 0xf000)
-#define VICODEC_CID_I_FRAME_QP (VICODEC_CID_CUSTOM_BASE + 0)
-#define VICODEC_CID_P_FRAME_QP (VICODEC_CID_CUSTOM_BASE + 1)
+static int vicodec_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vicodec_ctx *ctx = container_of(ctrl->handler,
+ struct vicodec_ctx, hdl);
+ const struct v4l2_ctrl_fwht_params *params;
+ struct vicodec_q_data *q_dst = get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_FWHT_PARAMS:
+ if (!q_dst->info)
+ return -EINVAL;
+ params = ctrl->p_new.p_fwht_params;
+ if (params->width > q_dst->coded_width ||
+ params->width < MIN_WIDTH ||
+ params->height > q_dst->coded_height ||
+ params->height < MIN_HEIGHT)
+ return -EINVAL;
+ if (!validate_by_version(params->flags, params->version))
+ return -EINVAL;
+ if (!validate_stateless_params_flags(params, q_dst->info))
+ return -EINVAL;
+ return 0;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static void update_header_from_stateless_params(struct vicodec_ctx *ctx,
+ const struct v4l2_ctrl_fwht_params *params)
+{
+ struct fwht_cframe_hdr *p_hdr = &ctx->state.header;
+
+ p_hdr->magic1 = FWHT_MAGIC1;
+ p_hdr->magic2 = FWHT_MAGIC2;
+ p_hdr->version = htonl(params->version);
+ p_hdr->width = htonl(params->width);
+ p_hdr->height = htonl(params->height);
+ p_hdr->flags = htonl(params->flags);
+ p_hdr->colorspace = htonl(params->colorspace);
+ p_hdr->xfer_func = htonl(params->xfer_func);
+ p_hdr->ycbcr_enc = htonl(params->ycbcr_enc);
+ p_hdr->quantization = htonl(params->quantization);
+}
static int vicodec_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vicodec_ctx *ctx = container_of(ctrl->handler,
struct vicodec_ctx, hdl);
+ const struct v4l2_ctrl_fwht_params *params;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
ctx->state.gop_size = ctrl->val;
return 0;
- case VICODEC_CID_I_FRAME_QP:
+ case V4L2_CID_FWHT_I_FRAME_QP:
ctx->state.i_frame_qp = ctrl->val;
return 0;
- case VICODEC_CID_P_FRAME_QP:
+ case V4L2_CID_FWHT_P_FRAME_QP:
ctx->state.p_frame_qp = ctrl->val;
return 0;
+ case V4L2_CID_MPEG_VIDEO_FWHT_PARAMS:
+ params = ctrl->p_new.p_fwht_params;
+ update_header_from_stateless_params(ctx, params);
+ ctx->state.ref_frame_ts = params->backward_ref_ts;
+ return 0;
}
return -EINVAL;
}
static const struct v4l2_ctrl_ops vicodec_ctrl_ops = {
.s_ctrl = vicodec_s_ctrl,
+ .try_ctrl = vicodec_try_ctrl,
};
-static const struct v4l2_ctrl_config vicodec_ctrl_i_frame = {
- .ops = &vicodec_ctrl_ops,
- .id = VICODEC_CID_I_FRAME_QP,
- .name = "FWHT I-Frame QP Value",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 1,
- .max = 31,
- .def = 20,
- .step = 1,
-};
-
-static const struct v4l2_ctrl_config vicodec_ctrl_p_frame = {
- .ops = &vicodec_ctrl_ops,
- .id = VICODEC_CID_P_FRAME_QP,
- .name = "FWHT P-Frame QP Value",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 1,
- .max = 31,
- .def = 20,
- .step = 1,
+static const struct v4l2_ctrl_config vicodec_ctrl_stateless_state = {
+ .ops = &vicodec_ctrl_ops,
+ .id = V4L2_CID_MPEG_VIDEO_FWHT_PARAMS,
+ .elem_size = sizeof(struct v4l2_ctrl_fwht_params),
};
/*
@@ -1542,8 +1786,10 @@ static int vicodec_open(struct file *file)
goto open_unlock;
}
- if (vfd == &dev->enc_vfd)
+ if (vfd == &dev->stateful_enc.vfd)
ctx->is_enc = true;
+ else if (vfd == &dev->stateless_dec.vfd)
+ ctx->is_stateless = true;
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
@@ -1552,8 +1798,12 @@ static int vicodec_open(struct file *file)
v4l2_ctrl_handler_init(hdl, 4);
v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE,
1, 16, 1, 10);
- v4l2_ctrl_new_custom(hdl, &vicodec_ctrl_i_frame, NULL);
- v4l2_ctrl_new_custom(hdl, &vicodec_ctrl_p_frame, NULL);
+ v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, V4L2_CID_FWHT_I_FRAME_QP,
+ 1, 31, 1, 20);
+ v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, V4L2_CID_FWHT_P_FRAME_QP,
+ 1, 31, 1, 20);
+ if (ctx->is_stateless)
+ v4l2_ctrl_new_custom(hdl, &vicodec_ctrl_stateless_state, NULL);
if (hdl->error) {
rc = hdl->error;
v4l2_ctrl_handler_free(hdl);
@@ -1563,15 +1813,19 @@ static int vicodec_open(struct file *file)
ctx->fh.ctrl_handler = hdl;
v4l2_ctrl_handler_setup(hdl);
- ctx->q_data[V4L2_M2M_SRC].info =
- ctx->is_enc ? v4l2_fwht_get_pixfmt(0) : &pixfmt_fwht;
+ if (ctx->is_enc)
+ ctx->q_data[V4L2_M2M_SRC].info = v4l2_fwht_get_pixfmt(0);
+ else if (ctx->is_stateless)
+ ctx->q_data[V4L2_M2M_SRC].info = &pixfmt_stateless_fwht;
+ else
+ ctx->q_data[V4L2_M2M_SRC].info = &pixfmt_fwht;
ctx->q_data[V4L2_M2M_SRC].coded_width = 1280;
ctx->q_data[V4L2_M2M_SRC].coded_height = 720;
ctx->q_data[V4L2_M2M_SRC].visible_width = 1280;
ctx->q_data[V4L2_M2M_SRC].visible_height = 720;
size = 1280 * 720 * ctx->q_data[V4L2_M2M_SRC].info->sizeimage_mult /
ctx->q_data[V4L2_M2M_SRC].info->sizeimage_div;
- if (ctx->is_enc)
+ if (ctx->is_enc || ctx->is_stateless)
ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
else
ctx->q_data[V4L2_M2M_SRC].sizeimage =
@@ -1590,13 +1844,17 @@ static int vicodec_open(struct file *file)
ctx->state.colorspace = V4L2_COLORSPACE_REC709;
if (ctx->is_enc) {
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->enc_dev, ctx,
- &queue_init);
- ctx->lock = &dev->enc_lock;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->stateful_enc.m2m_dev,
+ ctx, &queue_init);
+ ctx->lock = &dev->stateful_enc.lock;
+ } else if (ctx->is_stateless) {
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->stateless_dec.m2m_dev,
+ ctx, &queue_init);
+ ctx->lock = &dev->stateless_dec.lock;
} else {
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->dec_dev, ctx,
- &queue_init);
- ctx->lock = &dev->dec_lock;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->stateful_dec.m2m_dev,
+ ctx, &queue_init);
+ ctx->lock = &dev->stateful_dec.lock;
}
if (IS_ERR(ctx->fh.m2m_ctx)) {
@@ -1620,17 +1878,71 @@ static int vicodec_release(struct file *file)
struct video_device *vfd = video_devdata(file);
struct vicodec_ctx *ctx = file2ctx(file);
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
- v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(vfd->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(vfd->lock);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kvfree(ctx->state.compressed_frame);
kfree(ctx);
return 0;
}
+static int vicodec_request_validate(struct media_request *req)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *parent_hdl, *hdl;
+ struct vicodec_ctx *ctx = NULL;
+ struct v4l2_ctrl *ctrl;
+ unsigned int count;
+
+ list_for_each_entry(obj, &req->objects, list) {
+ struct vb2_buffer *vb;
+
+ if (vb2_request_object_is_buffer(obj)) {
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ break;
+ }
+ }
+
+ if (!ctx) {
+ pr_err("No buffer was provided with the request\n");
+ return -ENOENT;
+ }
+
+ count = vb2_request_buffer_cnt(req);
+ if (!count) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "No buffer was provided with the request\n");
+ return -ENOENT;
+ } else if (count > 1) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "More than one buffer was provided with the request\n");
+ return -EINVAL;
+ }
+
+ parent_hdl = &ctx->hdl;
+
+ hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
+ if (!hdl) {
+ v4l2_info(&ctx->dev->v4l2_dev, "Missing codec control\n");
+ return -ENOENT;
+ }
+ ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl,
+ vicodec_ctrl_stateless_state.id);
+ if (!ctrl) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "Missing required codec control\n");
+ return -ENOENT;
+ }
+
+ return vb2_request_validate(req);
+}
+
static const struct v4l2_file_operations vicodec_fops = {
.owner = THIS_MODULE,
.open = vicodec_open,
@@ -1649,24 +1961,67 @@ static const struct video_device vicodec_videodev = {
.release = video_device_release_empty,
};
+static const struct media_device_ops vicodec_m2m_media_ops = {
+ .req_validate = vicodec_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
static const struct v4l2_m2m_ops m2m_ops = {
.device_run = device_run,
.job_ready = job_ready,
};
+static int register_instance(struct vicodec_dev *dev,
+ struct vicodec_dev_instance *dev_instance,
+ const char *name, bool is_enc)
+{
+ struct video_device *vfd;
+ int ret;
+
+ spin_lock_init(&dev_instance->lock);
+ mutex_init(&dev_instance->mutex);
+ dev_instance->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev_instance->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init vicodec enc device\n");
+ return PTR_ERR(dev_instance->m2m_dev);
+ }
+
+ dev_instance->vfd = vicodec_videodev;
+ vfd = &dev_instance->vfd;
+ vfd->lock = &dev_instance->mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ strscpy(vfd->name, name, sizeof(vfd->name));
+ vfd->device_caps = V4L2_CAP_STREAMING |
+ (multiplanar ? V4L2_CAP_VIDEO_M2M_MPLANE : V4L2_CAP_VIDEO_M2M);
+ if (is_enc) {
+ v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
+ } else {
+ v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
+ }
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device '%s'\n", name);
+ v4l2_m2m_release(dev_instance->m2m_dev);
+ return ret;
+ }
+ v4l2_info(&dev->v4l2_dev, "Device '%s' registered as /dev/video%d\n",
+ name, vfd->num);
+ return 0;
+}
+
static int vicodec_probe(struct platform_device *pdev)
{
struct vicodec_dev *dev;
- struct video_device *vfd;
int ret;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- spin_lock_init(&dev->enc_lock);
- spin_lock_init(&dev->dec_lock);
-
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
return ret;
@@ -1677,103 +2032,74 @@ static int vicodec_probe(struct platform_device *pdev)
strscpy(dev->mdev.bus_info, "platform:vicodec",
sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
+ dev->mdev.ops = &vicodec_m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
#endif
- mutex_init(&dev->enc_mutex);
- mutex_init(&dev->dec_mutex);
-
platform_set_drvdata(pdev, dev);
- dev->enc_dev = v4l2_m2m_init(&m2m_ops);
- if (IS_ERR(dev->enc_dev)) {
- v4l2_err(&dev->v4l2_dev, "Failed to init vicodec device\n");
- ret = PTR_ERR(dev->enc_dev);
+ if (register_instance(dev, &dev->stateful_enc,
+ "stateful-encoder", true))
goto unreg_dev;
- }
- dev->dec_dev = v4l2_m2m_init(&m2m_ops);
- if (IS_ERR(dev->dec_dev)) {
- v4l2_err(&dev->v4l2_dev, "Failed to init vicodec device\n");
- ret = PTR_ERR(dev->dec_dev);
- goto err_enc_m2m;
- }
+ if (register_instance(dev, &dev->stateful_dec,
+ "stateful-decoder", false))
+ goto unreg_sf_enc;
- dev->enc_vfd = vicodec_videodev;
- vfd = &dev->enc_vfd;
- vfd->lock = &dev->enc_mutex;
- vfd->v4l2_dev = &dev->v4l2_dev;
- strscpy(vfd->name, "vicodec-enc", sizeof(vfd->name));
- vfd->device_caps = V4L2_CAP_STREAMING |
- (multiplanar ? V4L2_CAP_VIDEO_M2M_MPLANE : V4L2_CAP_VIDEO_M2M);
- v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
- v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
- video_set_drvdata(vfd, dev);
-
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
- if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- goto err_dec_m2m;
- }
- v4l2_info(&dev->v4l2_dev,
- "Device registered as /dev/video%d\n", vfd->num);
+ if (register_instance(dev, &dev->stateless_dec,
+ "stateless-decoder", false))
+ goto unreg_sf_dec;
- dev->dec_vfd = vicodec_videodev;
- vfd = &dev->dec_vfd;
- vfd->lock = &dev->dec_mutex;
- vfd->v4l2_dev = &dev->v4l2_dev;
- vfd->device_caps = V4L2_CAP_STREAMING |
- (multiplanar ? V4L2_CAP_VIDEO_M2M_MPLANE : V4L2_CAP_VIDEO_M2M);
- strscpy(vfd->name, "vicodec-dec", sizeof(vfd->name));
- v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
- v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
- video_set_drvdata(vfd, dev);
-
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ ret = v4l2_m2m_register_media_controller(dev->stateful_enc.m2m_dev,
+ &dev->stateful_enc.vfd,
+ MEDIA_ENT_F_PROC_VIDEO_ENCODER);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- goto unreg_enc;
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller for enc\n");
+ goto unreg_m2m;
}
- v4l2_info(&dev->v4l2_dev,
- "Device registered as /dev/video%d\n", vfd->num);
-#ifdef CONFIG_MEDIA_CONTROLLER
- ret = v4l2_m2m_register_media_controller(dev->enc_dev,
- &dev->enc_vfd, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+ ret = v4l2_m2m_register_media_controller(dev->stateful_dec.m2m_dev,
+ &dev->stateful_dec.vfd,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
- goto unreg_m2m;
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller for dec\n");
+ goto unreg_m2m_sf_enc_mc;
}
- ret = v4l2_m2m_register_media_controller(dev->dec_dev,
- &dev->dec_vfd, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ ret = v4l2_m2m_register_media_controller(dev->stateless_dec.m2m_dev,
+ &dev->stateless_dec.vfd,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
- goto unreg_m2m_enc_mc;
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller for stateless dec\n");
+ goto unreg_m2m_sf_dec_mc;
}
ret = media_device_register(&dev->mdev);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
- goto unreg_m2m_dec_mc;
+ goto unreg_m2m_sl_dec_mc;
}
#endif
return 0;
#ifdef CONFIG_MEDIA_CONTROLLER
-unreg_m2m_dec_mc:
- v4l2_m2m_unregister_media_controller(dev->dec_dev);
-unreg_m2m_enc_mc:
- v4l2_m2m_unregister_media_controller(dev->enc_dev);
+unreg_m2m_sl_dec_mc:
+ v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev);
+unreg_m2m_sf_dec_mc:
+ v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev);
+unreg_m2m_sf_enc_mc:
+ v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev);
unreg_m2m:
- video_unregister_device(&dev->dec_vfd);
+ video_unregister_device(&dev->stateless_dec.vfd);
+ v4l2_m2m_release(dev->stateless_dec.m2m_dev);
#endif
-unreg_enc:
- video_unregister_device(&dev->enc_vfd);
-err_dec_m2m:
- v4l2_m2m_release(dev->dec_dev);
-err_enc_m2m:
- v4l2_m2m_release(dev->enc_dev);
+unreg_sf_dec:
+ video_unregister_device(&dev->stateful_dec.vfd);
+ v4l2_m2m_release(dev->stateful_dec.m2m_dev);
+unreg_sf_enc:
+ video_unregister_device(&dev->stateful_enc.vfd);
+ v4l2_m2m_release(dev->stateful_enc.m2m_dev);
unreg_dev:
v4l2_device_unregister(&dev->v4l2_dev);
@@ -1788,15 +2114,17 @@ static int vicodec_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
- v4l2_m2m_unregister_media_controller(dev->enc_dev);
- v4l2_m2m_unregister_media_controller(dev->dec_dev);
+ v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev);
+ v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev);
+ v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev);
media_device_cleanup(&dev->mdev);
#endif
- v4l2_m2m_release(dev->enc_dev);
- v4l2_m2m_release(dev->dec_dev);
- video_unregister_device(&dev->enc_vfd);
- video_unregister_device(&dev->dec_vfd);
+ v4l2_m2m_release(dev->stateful_enc.m2m_dev);
+ v4l2_m2m_release(dev->stateful_dec.m2m_dev);
+ video_unregister_device(&dev->stateful_enc.vfd);
+ video_unregister_device(&dev->stateful_dec.vfd);
+ video_unregister_device(&dev->stateless_dec.vfd);
v4l2_device_unregister(&dev->v4l2_dev);
return 0;
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 0ba30756e1e4..d8cd5f5cb10d 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -419,9 +419,14 @@ static int video_mux_probe(struct platform_device *pdev)
vmux->active = -1;
vmux->pads = devm_kcalloc(dev, num_pads, sizeof(*vmux->pads),
GFP_KERNEL);
+ if (!vmux->pads)
+ return -ENOMEM;
+
vmux->format_mbus = devm_kcalloc(dev, num_pads,
sizeof(*vmux->format_mbus),
GFP_KERNEL);
+ if (!vmux->format_mbus)
+ return -ENOMEM;
for (i = 0; i < num_pads; i++) {
vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 34dcaca45d8b..243c82b5d537 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -302,7 +302,7 @@ static void copy_two_pixels(struct vim2m_q_data *q_data_in,
switch (in->fourcc) {
case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */
for (i = 0; i < 2; i++) {
- u16 pix = *(u16 *)(src[i]);
+ u16 pix = le16_to_cpu(*(__le16 *)(src[i]));
*r++ = (u8)(((pix & 0xf800) >> 11) << 3) | 0x07;
*g++ = (u8)((((pix & 0x07e0) >> 5)) << 2) | 0x03;
@@ -311,12 +311,11 @@ static void copy_two_pixels(struct vim2m_q_data *q_data_in,
break;
case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */
for (i = 0; i < 2; i++) {
- u16 pix = *(u16 *)(src[i]);
+ u16 pix = be16_to_cpu(*(__be16 *)(src[i]));
- *r++ = (u8)(((0x00f8 & pix) >> 3) << 3) | 0x07;
- *g++ = (u8)(((pix & 0x7) << 2) |
- ((pix & 0xe000) >> 5)) | 0x03;
- *b++ = (u8)(((pix & 0x1f00) >> 8) << 3) | 0x07;
+ *r++ = (u8)(((pix & 0xf800) >> 11) << 3) | 0x07;
+ *g++ = (u8)((((pix & 0x07e0) >> 5)) << 2) | 0x03;
+ *b++ = (u8)((pix & 0x1f) << 3) | 0x07;
}
break;
default:
@@ -345,21 +344,26 @@ static void copy_two_pixels(struct vim2m_q_data *q_data_in,
switch (out->fourcc) {
case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */
for (i = 0; i < 2; i++) {
- u16 *pix = (u16 *)*dst;
+ u16 pix;
+ __le16 *dst_pix = (__le16 *)*dst;
+
+ pix = ((*r << 8) & 0xf800) | ((*g << 3) & 0x07e0) |
+ (*b >> 3);
- *pix = ((*r << 8) & 0xf800) | ((*g << 3) & 0x07e0) |
- (*b >> 3);
+ *dst_pix = cpu_to_le16(pix);
*dst += 2;
}
return;
case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */
for (i = 0; i < 2; i++) {
- u16 *pix = (u16 *)*dst;
- u8 green = *g++ >> 2;
+ u16 pix;
+ __be16 *dst_pix = (__be16 *)*dst;
- *pix = ((green << 8) & 0xe000) | (green & 0x07) |
- ((*b++ << 5) & 0x1f00) | ((*r++ & 0xf8));
+ pix = ((*r << 8) & 0xf800) | ((*g << 3) & 0x07e0) |
+ (*b >> 3);
+
+ *dst_pix = cpu_to_be16(pix);
*dst += 2;
}
@@ -655,8 +659,8 @@ static void device_work(struct work_struct *w)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
- strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+ strscpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver));
+ strscpy(cap->card, MEM2MEM_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", MEM2MEM_NAME);
return 0;
@@ -1262,6 +1266,15 @@ static int vim2m_release(struct file *file)
return 0;
}
+static void vim2m_device_release(struct video_device *vdev)
+{
+ struct vim2m_dev *dev = container_of(vdev, struct vim2m_dev, vfd);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ v4l2_m2m_release(dev->m2m_dev);
+ kfree(dev);
+}
+
static const struct v4l2_file_operations vim2m_fops = {
.owner = THIS_MODULE,
.open = vim2m_open,
@@ -1277,7 +1290,7 @@ static const struct video_device vim2m_videodev = {
.fops = &vim2m_fops,
.ioctl_ops = &vim2m_ioctl_ops,
.minor = -1,
- .release = video_device_release_empty,
+ .release = vim2m_device_release,
.device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
};
@@ -1298,13 +1311,13 @@ static int vim2m_probe(struct platform_device *pdev)
struct video_device *vfd;
int ret;
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
- return ret;
+ goto error_free;
atomic_set(&dev->num_inst, 0);
mutex_init(&dev->dev_mutex);
@@ -1317,7 +1330,7 @@ static int vim2m_probe(struct platform_device *pdev)
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- goto unreg_v4l2;
+ goto error_v4l2;
}
video_set_drvdata(vfd, dev);
@@ -1330,7 +1343,7 @@ static int vim2m_probe(struct platform_device *pdev)
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
- goto unreg_dev;
+ goto error_dev;
}
#ifdef CONFIG_MEDIA_CONTROLLER
@@ -1346,27 +1359,29 @@ static int vim2m_probe(struct platform_device *pdev)
MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
- goto unreg_m2m;
+ goto error_m2m;
}
ret = media_device_register(&dev->mdev);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
- goto unreg_m2m_mc;
+ goto error_m2m_mc;
}
#endif
return 0;
#ifdef CONFIG_MEDIA_CONTROLLER
-unreg_m2m_mc:
+error_m2m_mc:
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
-unreg_m2m:
+error_m2m:
v4l2_m2m_release(dev->m2m_dev);
#endif
-unreg_dev:
+error_dev:
video_unregister_device(&dev->vfd);
-unreg_v4l2:
+error_v4l2:
v4l2_device_unregister(&dev->v4l2_dev);
+error_free:
+ kfree(dev);
return ret;
}
@@ -1382,9 +1397,7 @@ static int vim2m_remove(struct platform_device *pdev)
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
media_device_cleanup(&dev->mdev);
#endif
- v4l2_m2m_release(dev->m2m_dev);
video_unregister_device(&dev->vfd);
- v4l2_device_unregister(&dev->v4l2_dev);
return 0;
}
diff --git a/drivers/media/platform/vimc/Kconfig b/drivers/media/platform/vimc/Kconfig
index 71c9fe7d3370..1de9bc9aa49b 100644
--- a/drivers/media/platform/vimc/Kconfig
+++ b/drivers/media/platform/vimc/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_VIMC
select VIDEOBUF2_VMALLOC
select VIDEO_V4L2_TPG
default n
- ---help---
+ help
Skeleton driver for Virtual Media Controller
This driver can be compared to the vivid driver for emulating
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index ea869631a3f6..e7d0fc2228a6 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -28,6 +28,32 @@
#define VIMC_CAP_DRV_NAME "vimc-capture"
+static const u32 vimc_cap_supported_pixfmt[] = {
+ V4L2_PIX_FMT_BGR24,
+ V4L2_PIX_FMT_RGB24,
+ V4L2_PIX_FMT_ARGB32,
+ V4L2_PIX_FMT_SBGGR8,
+ V4L2_PIX_FMT_SGBRG8,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_PIX_FMT_SRGGB8,
+ V4L2_PIX_FMT_SBGGR10,
+ V4L2_PIX_FMT_SGBRG10,
+ V4L2_PIX_FMT_SGRBG10,
+ V4L2_PIX_FMT_SRGGB10,
+ V4L2_PIX_FMT_SBGGR10ALAW8,
+ V4L2_PIX_FMT_SGBRG10ALAW8,
+ V4L2_PIX_FMT_SGRBG10ALAW8,
+ V4L2_PIX_FMT_SRGGB10ALAW8,
+ V4L2_PIX_FMT_SBGGR10DPCM8,
+ V4L2_PIX_FMT_SGBRG10DPCM8,
+ V4L2_PIX_FMT_SGRBG10DPCM8,
+ V4L2_PIX_FMT_SRGGB10DPCM8,
+ V4L2_PIX_FMT_SBGGR12,
+ V4L2_PIX_FMT_SGBRG12,
+ V4L2_PIX_FMT_SGRBG12,
+ V4L2_PIX_FMT_SRGGB12,
+};
+
struct vimc_cap_device {
struct vimc_ent_device ved;
struct video_device vdev;
@@ -101,29 +127,25 @@ static int vimc_cap_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format *format = &f->fmt.pix;
- const struct vimc_pix_map *vpix;
format->width = clamp_t(u32, format->width, VIMC_FRAME_MIN_WIDTH,
VIMC_FRAME_MAX_WIDTH) & ~1;
format->height = clamp_t(u32, format->height, VIMC_FRAME_MIN_HEIGHT,
VIMC_FRAME_MAX_HEIGHT) & ~1;
- /* Don't accept a pixelformat that is not on the table */
- vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
- if (!vpix) {
- format->pixelformat = fmt_default.pixelformat;
- vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
- }
- /* TODO: Add support for custom bytesperline values */
- format->bytesperline = format->width * vpix->bpp;
- format->sizeimage = format->bytesperline * format->height;
+ vimc_colorimetry_clamp(format);
if (format->field == V4L2_FIELD_ANY)
format->field = fmt_default.field;
- vimc_colorimetry_clamp(format);
+ /* TODO: Add support for custom bytesperline values */
- return 0;
+ /* Don't accept a pixelformat that is not on the table */
+ if (!v4l2_format_info(format->pixelformat))
+ format->pixelformat = fmt_default.pixelformat;
+
+ return v4l2_fill_pixfmt(format, format->pixelformat,
+ format->width, format->height);
}
static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
@@ -159,27 +181,31 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
static int vimc_cap_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- const struct vimc_pix_map *vpix = vimc_pix_map_by_index(f->index);
-
- if (!vpix)
+ if (f->index >= ARRAY_SIZE(vimc_cap_supported_pixfmt))
return -EINVAL;
- f->pixelformat = vpix->pixelformat;
+ f->pixelformat = vimc_cap_supported_pixfmt[f->index];
return 0;
}
+static bool vimc_cap_is_pixfmt_supported(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_cap_supported_pixfmt); i++)
+ if (vimc_cap_supported_pixfmt[i] == pixelformat)
+ return true;
+ return false;
+}
+
static int vimc_cap_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
- const struct vimc_pix_map *vpix;
-
if (fsize->index)
return -EINVAL;
- /* Only accept code in the pix map table */
- vpix = vimc_pix_map_by_code(fsize->pixel_format);
- if (!vpix)
+ if (!vimc_cap_is_pixfmt_supported(fsize->pixel_format))
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
@@ -187,8 +213,8 @@ static int vimc_cap_enum_framesizes(struct file *file, void *fh,
fsize->stepwise.max_width = VIMC_FRAME_MAX_WIDTH;
fsize->stepwise.min_height = VIMC_FRAME_MIN_HEIGHT;
fsize->stepwise.max_height = VIMC_FRAME_MAX_HEIGHT;
- fsize->stepwise.step_width = 2;
- fsize->stepwise.step_height = 2;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
return 0;
}
@@ -253,6 +279,7 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
return ret;
}
+ vcap->stream.producer_pixfmt = vcap->format.pixelformat;
ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
if (ret) {
media_pipeline_stop(entity);
@@ -338,6 +365,15 @@ static const struct media_entity_operations vimc_cap_mops = {
.link_validate = vimc_link_validate,
};
+static void vimc_cap_release(struct video_device *vdev)
+{
+ struct vimc_cap_device *vcap =
+ container_of(vdev, struct vimc_cap_device, vdev);
+
+ vimc_pads_cleanup(vcap->ved.pads);
+ kfree(vcap);
+}
+
static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
void *master_data)
{
@@ -348,8 +384,6 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
vb2_queue_release(&vcap->queue);
media_entity_cleanup(ved->ent);
video_unregister_device(&vcap->vdev);
- vimc_pads_cleanup(vcap->ved.pads);
- kfree(vcap);
}
static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
@@ -396,7 +430,6 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
{
struct v4l2_device *v4l2_dev = master_data;
struct vimc_platform_data *pdata = comp->platform_data;
- const struct vimc_pix_map *vpix;
struct vimc_cap_device *vcap;
struct video_device *vdev;
struct vb2_queue *q;
@@ -451,10 +484,8 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
/* Set default frame format */
vcap->format = fmt_default;
- vpix = vimc_pix_map_by_pixelformat(vcap->format.pixelformat);
- vcap->format.bytesperline = vcap->format.width * vpix->bpp;
- vcap->format.sizeimage = vcap->format.bytesperline *
- vcap->format.height;
+ v4l2_fill_pixfmt(&vcap->format, vcap->format.pixelformat,
+ vcap->format.width, vcap->format.height);
/* Fill the vimc_ent_device struct */
vcap->ved.ent = &vcap->vdev.entity;
@@ -467,7 +498,7 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
vdev = &vcap->vdev;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
vdev->entity.ops = &vimc_cap_mops;
- vdev->release = video_device_release_empty;
+ vdev->release = vimc_cap_release;
vdev->fops = &vimc_cap_fops;
vdev->ioctl_ops = &vimc_cap_ioctl_ops;
vdev->lock = &vcap->lock;
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index c1a74bb2df58..fad7c7c6de93 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -20,192 +20,139 @@
#include "vimc-common.h"
-/*
- * NOTE: non-bayer formats need to come first (necessary for enum_mbus_code
- * in the scaler)
- */
-static const struct vimc_pix_map vimc_pix_map_list[] = {
- /* TODO: add all missing formats */
-
- /* RGB formats */
- {
- .code = MEDIA_BUS_FMT_BGR888_1X24,
- .pixelformat = V4L2_PIX_FMT_BGR24,
- .bpp = 3,
- .bayer = false,
- },
- {
- .code = MEDIA_BUS_FMT_RGB888_1X24,
- .pixelformat = V4L2_PIX_FMT_RGB24,
- .bpp = 3,
- .bayer = false,
- },
- {
- .code = MEDIA_BUS_FMT_ARGB8888_1X32,
- .pixelformat = V4L2_PIX_FMT_ARGB32,
- .bpp = 4,
- .bayer = false,
- },
-
- /* Bayer formats */
- {
- .code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGBRG8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGRBG8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .pixelformat = V4L2_PIX_FMT_SRGGB8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .pixelformat = V4L2_PIX_FMT_SBGGR10,
- .bpp = 2,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .pixelformat = V4L2_PIX_FMT_SGBRG10,
- .bpp = 2,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .pixelformat = V4L2_PIX_FMT_SGRBG10,
- .bpp = 2,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .pixelformat = V4L2_PIX_FMT_SRGGB10,
- .bpp = 2,
- .bayer = true,
- },
-
- /* 10bit raw bayer a-law compressed to 8 bits */
- {
- .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
- .pixelformat = V4L2_PIX_FMT_SBGGR10ALAW8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGBRG10ALAW8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
- .pixelformat = V4L2_PIX_FMT_SRGGB10ALAW8,
- .bpp = 1,
- .bayer = true,
- },
-
- /* 10bit raw bayer DPCM compressed to 8 bits */
- {
- .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
- .pixelformat = V4L2_PIX_FMT_SBGGR10DPCM8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGBRG10DPCM8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
- .pixelformat = V4L2_PIX_FMT_SRGGB10DPCM8,
- .bpp = 1,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .pixelformat = V4L2_PIX_FMT_SBGGR12,
- .bpp = 2,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .pixelformat = V4L2_PIX_FMT_SGBRG12,
- .bpp = 2,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .pixelformat = V4L2_PIX_FMT_SGRBG12,
- .bpp = 2,
- .bayer = true,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .pixelformat = V4L2_PIX_FMT_SRGGB12,
- .bpp = 2,
- .bayer = true,
- },
+static const __u32 vimc_mbus_list[] = {
+ MEDIA_BUS_FMT_FIXED,
+ MEDIA_BUS_FMT_RGB444_1X12,
+ MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+ MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_BGR565_2X8_BE,
+ MEDIA_BUS_FMT_BGR565_2X8_LE,
+ MEDIA_BUS_FMT_RGB565_2X8_BE,
+ MEDIA_BUS_FMT_RGB565_2X8_LE,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RBG888_1X24,
+ MEDIA_BUS_FMT_RGB666_1X24_CPADHI,
+ MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_GBR888_1X24,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ MEDIA_BUS_FMT_RGB101010_1X30,
+ MEDIA_BUS_FMT_RGB121212_1X36,
+ MEDIA_BUS_FMT_RGB161616_1X48,
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_UV8_1X8,
+ MEDIA_BUS_FMT_UYVY8_1_5X8,
+ MEDIA_BUS_FMT_VYUY8_1_5X8,
+ MEDIA_BUS_FMT_YUYV8_1_5X8,
+ MEDIA_BUS_FMT_YVYU8_1_5X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
+ MEDIA_BUS_FMT_UYVY10_2X10,
+ MEDIA_BUS_FMT_VYUY10_2X10,
+ MEDIA_BUS_FMT_YUYV10_2X10,
+ MEDIA_BUS_FMT_YVYU10_2X10,
+ MEDIA_BUS_FMT_Y12_1X12,
+ MEDIA_BUS_FMT_UYVY12_2X12,
+ MEDIA_BUS_FMT_VYUY12_2X12,
+ MEDIA_BUS_FMT_YUYV12_2X12,
+ MEDIA_BUS_FMT_YVYU12_2X12,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYUY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YVYU8_1X16,
+ MEDIA_BUS_FMT_YDYUYDYV8_1X16,
+ MEDIA_BUS_FMT_UYVY10_1X20,
+ MEDIA_BUS_FMT_VYUY10_1X20,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YVYU10_1X20,
+ MEDIA_BUS_FMT_VUY8_1X24,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ MEDIA_BUS_FMT_UYVY12_1X24,
+ MEDIA_BUS_FMT_VYUY12_1X24,
+ MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_YVYU12_1X24,
+ MEDIA_BUS_FMT_YUV10_1X30,
+ MEDIA_BUS_FMT_UYYVYY10_0_5X30,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_UYYVYY12_0_5X36,
+ MEDIA_BUS_FMT_YUV12_1X36,
+ MEDIA_BUS_FMT_YUV16_1X48,
+ MEDIA_BUS_FMT_UYYVYY16_0_5X48,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ MEDIA_BUS_FMT_SBGGR16_1X16,
+ MEDIA_BUS_FMT_SGBRG16_1X16,
+ MEDIA_BUS_FMT_SGRBG16_1X16,
+ MEDIA_BUS_FMT_SRGGB16_1X16,
+ MEDIA_BUS_FMT_JPEG_1X8,
+ MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
};
-const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
-{
- if (i >= ARRAY_SIZE(vimc_pix_map_list))
- return NULL;
-
- return &vimc_pix_map_list[i];
-}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_index);
-
-const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
+/* Helper function to check mbus codes */
+bool vimc_mbus_code_supported(__u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
- if (vimc_pix_map_list[i].code == code)
- return &vimc_pix_map_list[i];
- }
- return NULL;
+ for (i = 0; i < ARRAY_SIZE(vimc_mbus_list); i++)
+ if (code == vimc_mbus_list[i])
+ return true;
+ return false;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_code);
+EXPORT_SYMBOL_GPL(vimc_mbus_code_supported);
-const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
+/* Helper function to enumerate mbus codes */
+int vimc_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
{
- unsigned int i;
+ if (code->index >= ARRAY_SIZE(vimc_mbus_list))
+ return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
- if (vimc_pix_map_list[i].pixelformat == pixelformat)
- return &vimc_pix_map_list[i];
- }
- return NULL;
+ code->code = vimc_mbus_list[code->index];
+ return 0;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
+EXPORT_SYMBOL_GPL(vimc_enum_mbus_code);
/* Helper function to allocate and initialize pads */
struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
@@ -277,15 +224,13 @@ static int vimc_get_mbus_format(struct media_pad *pad,
struct video_device,
entity);
struct vimc_ent_device *ved = video_get_drvdata(vdev);
- const struct vimc_pix_map *vpix;
struct v4l2_pix_format vdev_fmt;
if (!ved->vdev_get_format)
return -ENOIOCTLCMD;
ved->vdev_get_format(ved, &vdev_fmt);
- vpix = vimc_pix_map_by_pixelformat(vdev_fmt.pixelformat);
- v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, vpix->code);
+ v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, 0);
} else {
return -EINVAL;
}
@@ -325,8 +270,12 @@ int vimc_link_validate(struct media_link *link)
/* The width, height and code must match. */
if (source_fmt.format.width != sink_fmt.format.width
|| source_fmt.format.height != sink_fmt.format.height
- || source_fmt.format.code != sink_fmt.format.code)
+ || (source_fmt.format.code && sink_fmt.format.code &&
+ source_fmt.format.code != sink_fmt.format.code)) {
+ pr_err("vimc: format doesn't match in link %s->%s\n",
+ link->source->entity->name, link->sink->entity->name);
return -EPIPE;
+ }
/*
* The field order must match, or the sink field order must be NONE
@@ -380,6 +329,7 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
u32 function,
u16 num_pads,
const unsigned long *pads_flag,
+ const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops)
{
int ret;
@@ -394,6 +344,7 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
/* Initialize the subdev */
v4l2_subdev_init(sd, sd_ops);
+ sd->internal_ops = sd_int_ops;
sd->entity.function = function;
sd->entity.ops = &vimc_ent_sd_mops;
sd->owner = THIS_MODULE;
@@ -431,9 +382,9 @@ EXPORT_SYMBOL_GPL(vimc_ent_sd_register);
void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd)
{
- v4l2_device_unregister_subdev(sd);
media_entity_cleanup(ved->ent);
vimc_pads_cleanup(ved->pads);
+ v4l2_device_unregister_subdev(sd);
}
EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister);
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
index 84539430b5e7..142ddfa69b3d 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -22,6 +22,8 @@
#include <media/media-device.h>
#include <media/v4l2-device.h>
+#include "vimc-streamer.h"
+
#define VIMC_PDEV_NAME "vimc"
/* VIMC-specific controls */
@@ -78,23 +80,6 @@ struct vimc_platform_data {
};
/**
- * struct vimc_pix_map - maps media bus code with v4l2 pixel format
- *
- * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
- * @bbp: number of bytes each pixel occupies
- * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
- *
- * Struct which matches the MEDIA_BUS_FMT_* codes with the corresponding
- * V4L2_PIX_FMT_* fourcc pixelformat and its bytes per pixel (bpp)
- */
-struct vimc_pix_map {
- unsigned int code;
- unsigned int bpp;
- u32 pixelformat;
- bool bayer;
-};
-
-/**
* struct vimc_ent_device - core struct that represents a node in the topology
*
* @ent: the pointer to struct media_entity for the node
@@ -115,6 +100,7 @@ struct vimc_pix_map {
struct vimc_ent_device {
struct media_entity *ent;
struct media_pad *pads;
+ struct vimc_stream *stream;
void * (*process_frame)(struct vimc_ent_device *ved,
const void *frame);
void (*vdev_get_format)(struct vimc_ent_device *ved,
@@ -122,6 +108,23 @@ struct vimc_ent_device {
};
/**
+ * vimc_mbus_code_supported - helper to check supported mbus codes
+ *
+ * Helper function to check if mbus code is enumerated by vimc_enum_mbus_code()
+ */
+bool vimc_mbus_code_supported(__u32 code);
+
+/**
+ * vimc_enum_mbus_code - enumerate mbus codes
+ *
+ * Helper function to be pluged in .enum_mbus_code from
+ * struct v4l2_subdev_pad_ops.
+ */
+int vimc_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code);
+
+/**
* vimc_pads_init - initialize pads
*
* @num_pads: number of pads to initialize
@@ -156,27 +159,6 @@ static inline void vimc_pads_cleanup(struct media_pad *pads)
int vimc_pipeline_s_stream(struct media_entity *ent, int enable);
/**
- * vimc_pix_map_by_index - get vimc_pix_map struct by its index
- *
- * @i: index of the vimc_pix_map struct in vimc_pix_map_list
- */
-const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i);
-
-/**
- * vimc_pix_map_by_code - get vimc_pix_map struct by media bus code
- *
- * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
- */
-const struct vimc_pix_map *vimc_pix_map_by_code(u32 code);
-
-/**
- * vimc_pix_map_by_pixelformat - get vimc_pix_map struct by v4l2 pixel format
- *
- * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
- */
-const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
-
-/**
* vimc_ent_sd_register - initialize and register a subdev node
*
* @ved: the vimc_ent_device struct to be initialize
@@ -187,6 +169,7 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
* @function: media entity function defined by MEDIA_ENT_F_* macros
* @num_pads: number of pads to initialize
* @pads_flag: flags to use in each pad
+ * @sd_int_ops: pointer to &struct v4l2_subdev_internal_ops
* @sd_ops: pointer to &struct v4l2_subdev_ops.
*
* Helper function initialize and register the struct vimc_ent_device and struct
@@ -199,6 +182,7 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
u32 function,
u16 num_pads,
const unsigned long *pads_flag,
+ const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops);
/**
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 0fbb7914098f..3aa62d7e3d0e 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -304,6 +304,8 @@ static int vimc_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "probe");
+ memset(&vimc->mdev, 0, sizeof(vimc->mdev));
+
/* Create platform_device for each entity in the topology*/
vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
sizeof(*vimc->subdevs), GFP_KERNEL);
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 7d77c63b99d2..281f9c1a7249 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -26,6 +26,9 @@
#include "vimc-common.h"
#define VIMC_DEB_DRV_NAME "vimc-debayer"
+/* This module only supports tranforming a bayer format to V4L2_PIX_FMT_RGB24 */
+#define VIMC_DEB_SRC_PIXFMT V4L2_PIX_FMT_RGB24
+#define VIMC_DEB_SRC_MBUS_FMT_DEFAULT MEDIA_BUS_FMT_RGB888_1X24
static unsigned int deb_mean_win_size = 3;
module_param(deb_mean_win_size, uint, 0000);
@@ -44,6 +47,7 @@ enum vimc_deb_rgb_colors {
};
struct vimc_deb_pix_map {
+ u32 pixelformat;
u32 code;
enum vimc_deb_rgb_colors order[2][2];
};
@@ -66,68 +70,80 @@ struct vimc_deb_device {
static const struct v4l2_mbus_framefmt sink_fmt_default = {
.width = 640,
.height = 480,
- .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_DEFAULT,
};
static const struct vimc_deb_pix_map vimc_deb_pix_map_list[] = {
{
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
.order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_RED } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SGBRG8,
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
{ VIMC_DEB_RED, VIMC_DEB_GREEN } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SGRBG8,
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
{ VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SRGGB8,
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
.order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
.order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_RED } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
.code = MEDIA_BUS_FMT_SGBRG10_1X10,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
{ VIMC_DEB_RED, VIMC_DEB_GREEN } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
.code = MEDIA_BUS_FMT_SGRBG10_1X10,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
{ VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
.order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SBGGR12,
.code = MEDIA_BUS_FMT_SBGGR12_1X12,
.order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_RED } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SGBRG12,
.code = MEDIA_BUS_FMT_SGBRG12_1X12,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
{ VIMC_DEB_RED, VIMC_DEB_GREEN } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SGRBG12,
.code = MEDIA_BUS_FMT_SGRBG12_1X12,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
{ VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
},
{
+ .pixelformat = V4L2_PIX_FMT_SRGGB12,
.code = MEDIA_BUS_FMT_SRGGB12_1X12,
.order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
@@ -168,41 +184,32 @@ static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
- /* We only support one format for source pads */
- if (IS_SRC(code->pad)) {
- struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
-
- if (code->index)
- return -EINVAL;
-
- code->code = vdeb->src_code;
- } else {
+ /* For the sink pad we only support codes in the map_list */
+ if (IS_SINK(code->pad)) {
if (code->index >= ARRAY_SIZE(vimc_deb_pix_map_list))
return -EINVAL;
code->code = vimc_deb_pix_map_list[code->index].code;
+ return 0;
}
- return 0;
+ return vimc_enum_mbus_code(sd, cfg, code);
}
static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
-
if (fse->index)
return -EINVAL;
+ /* For the sink pad we only support codes in the map_list */
if (IS_SINK(fse->pad)) {
const struct vimc_deb_pix_map *vpix =
vimc_deb_pix_map_by_code(fse->code);
if (!vpix)
return -EINVAL;
- } else if (fse->code != vdeb->src_code) {
- return -EINVAL;
}
fse->min_width = VIMC_FRAME_MIN_WIDTH;
@@ -258,6 +265,9 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
+ if (!vimc_mbus_code_supported(fmt->format.code))
+ fmt->format.code = sink_fmt_default.code;
+
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
if (vdeb->src_frame)
@@ -270,11 +280,11 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
/*
* Do not change the format of the source pad,
- * it is propagated from the sink
+ * it is propagated from the sink (except for the code)
*/
if (IS_SRC(fmt->pad)) {
+ vdeb->src_code = fmt->format.code;
fmt->format = *sink_fmt;
- /* TODO: Add support for other formats */
fmt->format.code = vdeb->src_code;
} else {
/* Set the new format in the sink pad */
@@ -306,7 +316,7 @@ static const struct v4l2_subdev_pad_ops vimc_deb_pad_ops = {
.set_fmt = vimc_deb_set_fmt,
};
-static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
+static void vimc_deb_set_rgb_pix_rgb24(struct vimc_deb_device *vdeb,
unsigned int lin,
unsigned int col,
unsigned int rgb[3])
@@ -323,25 +333,38 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
if (enable) {
- const struct vimc_pix_map *vpix;
+ u32 src_pixelformat = vdeb->ved.stream->producer_pixfmt;
+ const struct v4l2_format_info *pix_info;
unsigned int frame_size;
if (vdeb->src_frame)
return 0;
- /* Calculate the frame size of the source pad */
- vpix = vimc_pix_map_by_code(vdeb->src_code);
- frame_size = vdeb->sink_fmt.width * vdeb->sink_fmt.height *
- vpix->bpp;
-
- /* Save the bytes per pixel of the sink */
- vpix = vimc_pix_map_by_code(vdeb->sink_fmt.code);
- vdeb->sink_bpp = vpix->bpp;
+ /* We only support translating bayer to RGB24 */
+ if (src_pixelformat != V4L2_PIX_FMT_RGB24) {
+ dev_err(vdeb->dev,
+ "translating to pixfmt (0x%08x) is not supported\n",
+ src_pixelformat);
+ return -EINVAL;
+ }
/* Get the corresponding pixel map from the table */
vdeb->sink_pix_map =
vimc_deb_pix_map_by_code(vdeb->sink_fmt.code);
+ /* Request bayer format from the pipeline for the sink pad */
+ vdeb->ved.stream->producer_pixfmt =
+ vdeb->sink_pix_map->pixelformat;
+
+ /* Calculate frame_size of the source */
+ pix_info = v4l2_format_info(src_pixelformat);
+ frame_size = vdeb->sink_fmt.width * vdeb->sink_fmt.height *
+ pix_info->bpp[0];
+
+ /* Get bpp from the sink */
+ pix_info = v4l2_format_info(vdeb->sink_pix_map->pixelformat);
+ vdeb->sink_bpp = pix_info->bpp[0];
+
/*
* Allocate the frame buffer. Use vmalloc to be able to
* allocate a large amount of memory
@@ -489,6 +512,18 @@ static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
}
+static void vimc_deb_release(struct v4l2_subdev *sd)
+{
+ struct vimc_deb_device *vdeb =
+ container_of(sd, struct vimc_deb_device, sd);
+
+ kfree(vdeb);
+}
+
+static const struct v4l2_subdev_internal_ops vimc_deb_int_ops = {
+ .release = vimc_deb_release,
+};
+
static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
void *master_data)
{
@@ -497,7 +532,6 @@ static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
ved);
vimc_ent_sd_unregister(ved, &vdeb->sd);
- kfree(vdeb);
}
static int vimc_deb_comp_bind(struct device *comp, struct device *master,
@@ -519,7 +553,7 @@ static int vimc_deb_comp_bind(struct device *comp, struct device *master,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2,
(const unsigned long[2]) {MEDIA_PAD_FL_SINK,
MEDIA_PAD_FL_SOURCE},
- &vimc_deb_ops);
+ &vimc_deb_int_ops, &vimc_deb_ops);
if (ret) {
kfree(vdeb);
return ret;
@@ -531,14 +565,14 @@ static int vimc_deb_comp_bind(struct device *comp, struct device *master,
/* Initialize the frame format */
vdeb->sink_fmt = sink_fmt_default;
+ vdeb->src_code = VIMC_DEB_SRC_MBUS_FMT_DEFAULT;
/*
* TODO: Add support for more output formats, we only support
- * RGB888 for now
+ * RGB24 for now.
* NOTE: the src format is always the same as the sink, except
* for the code
*/
- vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
- vdeb->set_rgb_src = vimc_deb_set_rgb_mbus_fmt_rgb888_1x24;
+ vdeb->set_rgb_src = vimc_deb_set_rgb_pix_rgb24;
return 0;
}
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index 39b2a73dfcc1..8aecf8e92031 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -35,6 +35,12 @@ MODULE_PARM_DESC(sca_mult, " the image size multiplier");
#define IS_SRC(pad) (pad)
#define MAX_ZOOM 8
+static const u32 vimc_sca_supported_pixfmt[] = {
+ V4L2_PIX_FMT_BGR24,
+ V4L2_PIX_FMT_RGB24,
+ V4L2_PIX_FMT_ARGB32,
+};
+
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
@@ -57,6 +63,16 @@ static const struct v4l2_mbus_framefmt sink_fmt_default = {
.colorspace = V4L2_COLORSPACE_DEFAULT,
};
+static bool vimc_sca_is_pixfmt_supported(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_sca_supported_pixfmt); i++)
+ if (vimc_sca_supported_pixfmt[i] == pixelformat)
+ return true;
+ return false;
+}
+
static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg)
{
@@ -76,35 +92,13 @@ static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
return 0;
}
-static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
-
- /* We don't support bayer format */
- if (!vpix || vpix->bayer)
- return -EINVAL;
-
- code->code = vpix->code;
-
- return 0;
-}
-
static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- const struct vimc_pix_map *vpix;
-
if (fse->index)
return -EINVAL;
- /* Only accept code in the pix map table in non bayer format */
- vpix = vimc_pix_map_by_code(fse->code);
- if (!vpix || vpix->bayer)
- return -EINVAL;
-
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
@@ -141,13 +135,6 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
static void vimc_sca_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
{
- const struct vimc_pix_map *vpix;
-
- /* Only accept code in the pix map table in non bayer format */
- vpix = vimc_pix_map_by_code(fmt->code);
- if (!vpix || vpix->bayer)
- fmt->code = sink_fmt_default.code;
-
fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
VIMC_FRAME_MAX_WIDTH) & ~1;
fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
@@ -166,6 +153,9 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
+ if (!vimc_mbus_code_supported(fmt->format.code))
+ fmt->format.code = sink_fmt_default.code;
+
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
if (vsca->src_frame)
@@ -208,7 +198,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
.init_cfg = vimc_sca_init_cfg,
- .enum_mbus_code = vimc_sca_enum_mbus_code,
+ .enum_mbus_code = vimc_enum_mbus_code,
.enum_frame_size = vimc_sca_enum_frame_size,
.get_fmt = vimc_sca_get_fmt,
.set_fmt = vimc_sca_set_fmt,
@@ -219,15 +209,22 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
if (enable) {
- const struct vimc_pix_map *vpix;
+ u32 pixelformat = vsca->ved.stream->producer_pixfmt;
+ const struct v4l2_format_info *pix_info;
unsigned int frame_size;
if (vsca->src_frame)
return 0;
+ if (!vimc_sca_is_pixfmt_supported(pixelformat)) {
+ dev_err(vsca->dev, "pixfmt (0x%08x) is not supported\n",
+ pixelformat);
+ return -EINVAL;
+ }
+
/* Save the bytes per pixel of the sink */
- vpix = vimc_pix_map_by_code(vsca->sink_fmt.code);
- vsca->bpp = vpix->bpp;
+ pix_info = v4l2_format_info(pixelformat);
+ vsca->bpp = pix_info->bpp[0];
/* Calculate the width in bytes of the src frame */
vsca->src_line_size = vsca->sink_fmt.width *
@@ -348,6 +345,18 @@ static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
return vsca->src_frame;
};
+static void vimc_sca_release(struct v4l2_subdev *sd)
+{
+ struct vimc_sca_device *vsca =
+ container_of(sd, struct vimc_sca_device, sd);
+
+ kfree(vsca);
+}
+
+static const struct v4l2_subdev_internal_ops vimc_sca_int_ops = {
+ .release = vimc_sca_release,
+};
+
static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
void *master_data)
{
@@ -356,7 +365,6 @@ static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
ved);
vimc_ent_sd_unregister(ved, &vsca->sd);
- kfree(vsca);
}
@@ -379,7 +387,7 @@ static int vimc_sca_comp_bind(struct device *comp, struct device *master,
MEDIA_ENT_F_PROC_VIDEO_SCALER, 2,
(const unsigned long[2]) {MEDIA_PAD_FL_SINK,
MEDIA_PAD_FL_SOURCE},
- &vimc_sca_ops);
+ &vimc_sca_int_ops, &vimc_sca_ops);
if (ret) {
kfree(vsca);
return ret;
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 59195f262623..081e54204c9f 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -65,34 +65,13 @@ static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
return 0;
}
-static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
-
- if (!vpix)
- return -EINVAL;
-
- code->code = vpix->code;
-
- return 0;
-}
-
static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- const struct vimc_pix_map *vpix;
-
if (fse->index)
return -EINVAL;
- /* Only accept code in the pix map table */
- vpix = vimc_pix_map_by_code(fse->code);
- if (!vpix)
- return -EINVAL;
-
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->max_width = VIMC_FRAME_MAX_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
@@ -117,14 +96,17 @@ static int vimc_sen_get_fmt(struct v4l2_subdev *sd,
static void vimc_sen_tpg_s_format(struct vimc_sen_device *vsen)
{
- const struct vimc_pix_map *vpix =
- vimc_pix_map_by_code(vsen->mbus_format.code);
+ u32 pixelformat = vsen->ved.stream->producer_pixfmt;
+ const struct v4l2_format_info *pix_info;
+
+ pix_info = v4l2_format_info(pixelformat);
tpg_reset_source(&vsen->tpg, vsen->mbus_format.width,
vsen->mbus_format.height, vsen->mbus_format.field);
- tpg_s_bytesperline(&vsen->tpg, 0, vsen->mbus_format.width * vpix->bpp);
+ tpg_s_bytesperline(&vsen->tpg, 0,
+ vsen->mbus_format.width * pix_info->bpp[0]);
tpg_s_buf_height(&vsen->tpg, vsen->mbus_format.height);
- tpg_s_fourcc(&vsen->tpg, vpix->pixelformat);
+ tpg_s_fourcc(&vsen->tpg, pixelformat);
/* TODO: add support for V4L2_FIELD_ALTERNATE */
tpg_s_field(&vsen->tpg, vsen->mbus_format.field, false);
tpg_s_colorspace(&vsen->tpg, vsen->mbus_format.colorspace);
@@ -135,13 +117,6 @@ static void vimc_sen_tpg_s_format(struct vimc_sen_device *vsen)
static void vimc_sen_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
{
- const struct vimc_pix_map *vpix;
-
- /* Only accept code in the pix map table */
- vpix = vimc_pix_map_by_code(fmt->code);
- if (!vpix)
- fmt->code = fmt_default.code;
-
fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
VIMC_FRAME_MAX_WIDTH) & ~1;
fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
@@ -161,6 +136,9 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
struct vimc_sen_device *vsen = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
+ if (!vimc_mbus_code_supported(fmt->format.code))
+ fmt->format.code = fmt_default.code;
+
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
if (vsen->frame)
@@ -193,7 +171,7 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
.init_cfg = vimc_sen_init_cfg,
- .enum_mbus_code = vimc_sen_enum_mbus_code,
+ .enum_mbus_code = vimc_enum_mbus_code,
.enum_frame_size = vimc_sen_enum_frame_size,
.get_fmt = vimc_sen_get_fmt,
.set_fmt = vimc_sen_set_fmt,
@@ -215,7 +193,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
container_of(sd, struct vimc_sen_device, sd);
if (enable) {
- const struct vimc_pix_map *vpix;
+ u32 pixelformat = vsen->ved.stream->producer_pixfmt;
+ const struct v4l2_format_info *pix_info;
unsigned int frame_size;
if (vsen->kthread_sen)
@@ -223,8 +202,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
/* Calculate the frame size */
- vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
- frame_size = vsen->mbus_format.width * vpix->bpp *
+ pix_info = v4l2_format_info(pixelformat);
+ frame_size = vsen->mbus_format.width * pix_info->bpp[0] *
vsen->mbus_format.height;
/*
@@ -301,6 +280,20 @@ static const struct v4l2_ctrl_ops vimc_sen_ctrl_ops = {
.s_ctrl = vimc_sen_s_ctrl,
};
+static void vimc_sen_release(struct v4l2_subdev *sd)
+{
+ struct vimc_sen_device *vsen =
+ container_of(sd, struct vimc_sen_device, sd);
+
+ v4l2_ctrl_handler_free(&vsen->hdl);
+ tpg_free(&vsen->tpg);
+ kfree(vsen);
+}
+
+static const struct v4l2_subdev_internal_ops vimc_sen_int_ops = {
+ .release = vimc_sen_release,
+};
+
static void vimc_sen_comp_unbind(struct device *comp, struct device *master,
void *master_data)
{
@@ -309,9 +302,6 @@ static void vimc_sen_comp_unbind(struct device *comp, struct device *master,
container_of(ved, struct vimc_sen_device, ved);
vimc_ent_sd_unregister(ved, &vsen->sd);
- v4l2_ctrl_handler_free(&vsen->hdl);
- tpg_free(&vsen->tpg);
- kfree(vsen);
}
/* Image Processing Controls */
@@ -371,7 +361,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
pdata->entity_name,
MEDIA_ENT_F_CAM_SENSOR, 1,
(const unsigned long[1]) {MEDIA_PAD_FL_SOURCE},
- &vimc_sen_ops);
+ &vimc_sen_int_ops, &vimc_sen_ops);
if (ret)
goto err_free_hdl;
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
index fcc897fb247b..26b674259489 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -46,19 +46,19 @@ static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
*/
static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
{
- struct media_entity *entity;
+ struct vimc_ent_device *ved;
struct v4l2_subdev *sd;
while (stream->pipe_size) {
stream->pipe_size--;
- entity = stream->ved_pipeline[stream->pipe_size]->ent;
- entity = vimc_get_source_entity(entity);
+ ved = stream->ved_pipeline[stream->pipe_size];
+ ved->stream = NULL;
stream->ved_pipeline[stream->pipe_size] = NULL;
- if (!is_media_entity_v4l2_subdev(entity))
+ if (!is_media_entity_v4l2_subdev(ved->ent))
continue;
- sd = media_entity_to_v4l2_subdev(entity);
+ sd = media_entity_to_v4l2_subdev(ved->ent);
v4l2_subdev_call(sd, video, s_stream, 0);
}
}
@@ -88,19 +88,27 @@ static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
return -EINVAL;
}
stream->ved_pipeline[stream->pipe_size++] = ved;
+ ved->stream = stream;
+
+ if (is_media_entity_v4l2_subdev(ved->ent)) {
+ sd = media_entity_to_v4l2_subdev(ved->ent);
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ pr_err("subdev_call error %s\n",
+ ved->ent->name);
+ vimc_streamer_pipeline_terminate(stream);
+ return ret;
+ }
+ }
entity = vimc_get_source_entity(ved->ent);
/* Check if the end of the pipeline was reached*/
if (!entity)
return 0;
+ /* Get the next device in the pipeline */
if (is_media_entity_v4l2_subdev(entity)) {
sd = media_entity_to_v4l2_subdev(entity);
- ret = v4l2_subdev_call(sd, video, s_stream, 1);
- if (ret && ret != -ENOIOCTLCMD) {
- vimc_streamer_pipeline_terminate(stream);
- return ret;
- }
ved = v4l2_get_subdevdata(sd);
} else {
vdev = container_of(entity,
@@ -117,10 +125,10 @@ static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
static int vimc_streamer_thread(void *data)
{
struct vimc_stream *stream = data;
+ u8 *frame = NULL;
int i;
set_freezable();
- set_current_state(TASK_UNINTERRUPTIBLE);
for (;;) {
try_to_freeze();
@@ -128,15 +136,13 @@ static int vimc_streamer_thread(void *data)
break;
for (i = stream->pipe_size - 1; i >= 0; i--) {
- stream->frame = stream->ved_pipeline[i]->process_frame(
- stream->ved_pipeline[i],
- stream->frame);
- if (!stream->frame)
- break;
- if (IS_ERR(stream->frame))
+ frame = stream->ved_pipeline[i]->process_frame(
+ stream->ved_pipeline[i], frame);
+ if (!frame || IS_ERR(frame))
break;
}
//wait for 60hz
+ set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ / 60);
}
diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h
index 752af2e2d5a2..2b3667408794 100644
--- a/drivers/media/platform/vimc/vimc-streamer.h
+++ b/drivers/media/platform/vimc/vimc-streamer.h
@@ -15,12 +15,32 @@
#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
+/**
+ * struct vimc_stream - struct that represents a stream in the pipeline
+ *
+ * @pipe: the media pipeline object associated with this stream
+ * @ved_pipeline: array containing all the entities participating in the
+ * stream. The order is from a video device (usually a capture device) where
+ * stream_on was called, to the entity generating the first base image to be
+ * processed in the pipeline.
+ * @pipe_size: size of @ved_pipeline
+ * @kthread: thread that generates the frames of the stream.
+ * @producer_pixfmt: the pixel format requested from the pipeline. This must
+ * be set just before calling vimc_streamer_s_stream(ent, 1). This value is
+ * propagated up to the source of the base image (usually a sensor node) and
+ * can be modified by entities during s_stream callback to request a different
+ * format from rest of the pipeline.
+ *
+ * When the user call stream_on in a video device, struct vimc_stream is
+ * used to keep track of all entities and subdevices that generates and
+ * process frames for the stream.
+ */
struct vimc_stream {
struct media_pipeline pipe;
struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
unsigned int pipe_size;
- u8 *frame;
struct task_struct *kthread;
+ u32 producer_pixfmt;
};
/**
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index 154de92dd809..4b51d4d6cf93 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_VIVID
select VIDEOBUF2_DMA_CONTIG
select VIDEO_V4L2_TPG
default n
- ---help---
+ help
Enables a virtual video driver. This driver emulates a webcam,
TV, S-Video and HDMI capture hardware, including VBI support for
the SDTV inputs. Also video output, VBI output, radio receivers,
@@ -28,7 +28,7 @@ config VIDEO_VIVID_CEC
bool "Enable CEC emulation support"
depends on VIDEO_VIVID
select CEC_CORE
- ---help---
+ help
When selected the vivid module will emulate the optional
HDMI CEC feature.
@@ -36,6 +36,6 @@ config VIDEO_VIVID_MAX_DEVS
int "Maximum number of devices"
depends on VIDEO_VIVID
default "64"
- ---help---
+ help
This allows you to specify the maximum number of devices supported
by the vivid driver.
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 342e0e6c103b..7047df6f0e0e 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -681,7 +681,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->v4l2_dev.mdev = &dev->mdev;
/* Initialize media device */
- strlcpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model));
+ strscpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model));
snprintf(dev->mdev.bus_info, sizeof(dev->mdev.bus_info),
"platform:%s-%03d", VIVID_MODULE_NAME, inst);
dev->mdev.dev = &pdev->dev;
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 52eeda624d7e..530ac8decb25 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -1007,7 +1007,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
if (dev->bitmap_cap && (compose->width != s->r.width ||
compose->height != s->r.height)) {
- kfree(dev->bitmap_cap);
+ vfree(dev->bitmap_cap);
dev->bitmap_cap = NULL;
}
*compose = s->r;
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index e61b91b414f9..9350ca65dd91 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -798,7 +798,7 @@ int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection
s->r.height *= factor;
if (dev->bitmap_out && (compose->width != s->r.width ||
compose->height != s->r.height)) {
- kfree(dev->bitmap_out);
+ vfree(dev->bitmap_out);
dev->bitmap_out = NULL;
}
*compose = s->r;
@@ -941,15 +941,19 @@ int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv,
return ret;
if (win->bitmap) {
- new_bitmap = memdup_user(win->bitmap, bitmap_size);
+ new_bitmap = vzalloc(bitmap_size);
- if (IS_ERR(new_bitmap))
- return PTR_ERR(new_bitmap);
+ if (!new_bitmap)
+ return -ENOMEM;
+ if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
+ vfree(new_bitmap);
+ return -EFAULT;
+ }
}
dev->overlay_out_top = win->w.top;
dev->overlay_out_left = win->w.left;
- kfree(dev->bitmap_out);
+ vfree(dev->bitmap_out);
dev->bitmap_out = new_bitmap;
dev->clipcount_out = win->clipcount;
if (dev->clipcount_out)
diff --git a/drivers/media/platform/vsp1/vsp1_brx.c b/drivers/media/platform/vsp1/vsp1_brx.c
index 58ad248cd7f7..2d86c718a5cf 100644
--- a/drivers/media/platform/vsp1/vsp1_brx.c
+++ b/drivers/media/platform/vsp1/vsp1_brx.c
@@ -283,6 +283,7 @@ static const struct v4l2_subdev_ops brx_ops = {
static void brx_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_brx *brx = to_brx(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_clu.c b/drivers/media/platform/vsp1/vsp1_clu.c
index 942fc14c19d1..a47b23bf5abf 100644
--- a/drivers/media/platform/vsp1/vsp1_clu.c
+++ b/drivers/media/platform/vsp1/vsp1_clu.c
@@ -171,6 +171,7 @@ static const struct v4l2_subdev_ops clu_ops = {
static void clu_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_clu *clu = to_clu(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 26289adaf658..104b6f514536 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -178,7 +178,7 @@ struct vsp1_dl_cmd_pool {
* @post_cmd: post command to be issued through extended dl header
* @has_chain: if true, indicates that there's a partition chain
* @chain: entry in the display list partition chain
- * @internal: whether the display list is used for internal purpose
+ * @flags: display list flags, a combination of VSP1_DL_FRAME_END_*
*/
struct vsp1_dl_list {
struct list_head list;
@@ -197,7 +197,7 @@ struct vsp1_dl_list {
bool has_chain;
struct list_head chain;
- bool internal;
+ unsigned int flags;
};
/**
@@ -699,8 +699,8 @@ struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
* which bodies are added.
*
* Adding a body to a display list passes ownership of the body to the list. The
- * caller retains its reference to the fragment when adding it to the display
- * list, but is not allowed to add new entries to the body.
+ * caller retains its reference to the body when adding it to the display list,
+ * but is not allowed to add new entries to the body.
*
* The reference must be explicitly released by a call to vsp1_dl_body_put()
* when the body isn't needed anymore.
@@ -770,17 +770,35 @@ static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
}
dl->header->num_lists = num_lists;
+ dl->header->flags = 0;
- if (!list_empty(&dl->chain) && !is_last) {
+ /*
+ * Enable the interrupt for the end of each frame. In continuous mode
+ * chained lists are used with one list per frame, so enable the
+ * interrupt for each list. In singleshot mode chained lists are used
+ * to partition a single frame, so enable the interrupt for the last
+ * list only.
+ */
+ if (!dlm->singleshot || is_last)
+ dl->header->flags |= VSP1_DLH_INT_ENABLE;
+
+ /*
+ * In continuous mode enable auto-start for all lists, as the VSP must
+ * loop on the same list until a new one is queued. In singleshot mode
+ * enable auto-start for all lists but the last to chain processing of
+ * partitions without software intervention.
+ */
+ if (!dlm->singleshot || !is_last)
+ dl->header->flags |= VSP1_DLH_AUTO_START;
+
+ if (!is_last) {
/*
- * If this display list's chain is not empty, we are on a list,
- * and the next item is the display list that we must queue for
- * automatic processing by the hardware.
+ * If this is not the last display list in the chain, queue the
+ * next item for automatic processing by the hardware.
*/
struct vsp1_dl_list *next = list_next_entry(dl, chain);
dl->header->next_header = next->dma;
- dl->header->flags = VSP1_DLH_AUTO_START;
} else if (!dlm->singleshot) {
/*
* if the display list manager works in continuous mode, the VSP
@@ -788,13 +806,6 @@ static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
* instructed to do otherwise.
*/
dl->header->next_header = dl->dma;
- dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
- } else {
- /*
- * Otherwise, in mem-to-mem mode, we work in single-shot mode
- * and the next display list must not be started automatically.
- */
- dl->header->flags = VSP1_DLH_INT_ENABLE;
}
if (!dl->extension)
@@ -861,13 +872,15 @@ static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
*
* If a display list is already pending we simply drop it as the new
* display list is assumed to contain a more recent configuration. It is
- * an error if the already pending list has the internal flag set, as
- * there is then a process waiting for that list to complete. This
- * shouldn't happen as the waiting process should perform proper
- * locking, but warn just in case.
+ * an error if the already pending list has the
+ * VSP1_DL_FRAME_END_INTERNAL flag set, as there is then a process
+ * waiting for that list to complete. This shouldn't happen as the
+ * waiting process should perform proper locking, but warn just in
+ * case.
*/
if (vsp1_dl_list_hw_update_pending(dlm)) {
- WARN_ON(dlm->pending && dlm->pending->internal);
+ WARN_ON(dlm->pending &&
+ (dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL));
__vsp1_dl_list_put(dlm->pending);
dlm->pending = dl;
return;
@@ -897,7 +910,7 @@ static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
dlm->active = dl;
}
-void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags)
{
struct vsp1_dl_manager *dlm = dl->dlm;
struct vsp1_dl_list *dl_next;
@@ -912,7 +925,7 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
vsp1_dl_list_fill_header(dl_next, last);
}
- dl->internal = internal;
+ dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED;
spin_lock_irqsave(&dlm->lock, flags);
@@ -941,9 +954,13 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
* set in single-shot mode as display list processing is then not continuous and
* races never occur.
*
- * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the previous display list
- * has completed and had been queued with the internal notification flag.
- * Internal notification is only supported for continuous mode.
+ * The following flags are only supported for continuous mode.
+ *
+ * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the display list that just
+ * became active had been queued with the internal notification flag.
+ *
+ * The VSP1_DL_FRAME_END_WRITEBACK flag indicates that the previously active
+ * display list had been queued with the writeback flag.
*/
unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
{
@@ -982,13 +999,24 @@ unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
goto done;
/*
+ * If the active display list has the writeback flag set, the frame
+ * completion marks the end of the writeback capture. Return the
+ * VSP1_DL_FRAME_END_WRITEBACK flag and reset the display list's
+ * writeback flag.
+ */
+ if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) {
+ flags |= VSP1_DL_FRAME_END_WRITEBACK;
+ dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK;
+ }
+
+ /*
* The device starts processing the queued display list right after the
* frame end interrupt. The display list thus becomes active.
*/
if (dlm->queued) {
- if (dlm->queued->internal)
+ if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL)
flags |= VSP1_DL_FRAME_END_INTERNAL;
- dlm->queued->internal = false;
+ dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL;
__vsp1_dl_list_put(dlm->active);
dlm->active = dlm->queued;
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h
index 125750dc8b5c..bebe16483ca5 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.h
+++ b/drivers/media/platform/vsp1/vsp1_dl.h
@@ -17,8 +17,10 @@ struct vsp1_dl_body_pool;
struct vsp1_dl_list;
struct vsp1_dl_manager;
+/* Keep these flags in sync with VSP1_DU_STATUS_* in include/media/vsp1.h. */
#define VSP1_DL_FRAME_END_COMPLETED BIT(0)
-#define VSP1_DL_FRAME_END_INTERNAL BIT(1)
+#define VSP1_DL_FRAME_END_WRITEBACK BIT(1)
+#define VSP1_DL_FRAME_END_INTERNAL BIT(2)
/**
* struct vsp1_dl_ext_cmd - Extended Display command
@@ -61,7 +63,7 @@ struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
void vsp1_dl_list_put(struct vsp1_dl_list *dl);
struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl);
struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl);
-void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal);
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags);
struct vsp1_dl_body_pool *
vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index 84895385d2e5..a4a45d68a6ef 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -34,14 +34,16 @@ static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe,
unsigned int completion)
{
struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
- bool complete = completion == VSP1_DL_FRAME_END_COMPLETED;
if (drm_pipe->du_complete) {
struct vsp1_entity *uif = drm_pipe->uif;
+ unsigned int status = completion
+ & (VSP1_DU_STATUS_COMPLETE |
+ VSP1_DU_STATUS_WRITEBACK);
u32 crc;
crc = uif ? vsp1_uif_get_crc(to_uif(&uif->subdev)) : 0;
- drm_pipe->du_complete(drm_pipe->du_private, complete, crc);
+ drm_pipe->du_complete(drm_pipe->du_private, status, crc);
}
if (completion & VSP1_DL_FRAME_END_INTERNAL) {
@@ -537,6 +539,12 @@ static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe)
struct vsp1_entity *next;
struct vsp1_dl_list *dl;
struct vsp1_dl_body *dlb;
+ unsigned int dl_flags = 0;
+
+ if (drm_pipe->force_brx_release)
+ dl_flags |= VSP1_DL_FRAME_END_INTERNAL;
+ if (pipe->output->writeback)
+ dl_flags |= VSP1_DL_FRAME_END_WRITEBACK;
dl = vsp1_dl_list_get(pipe->output->dlm);
dlb = vsp1_dl_list_get_body0(dl);
@@ -554,12 +562,42 @@ static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe)
}
vsp1_entity_route_setup(entity, pipe, dlb);
- vsp1_entity_configure_stream(entity, pipe, dlb);
+ vsp1_entity_configure_stream(entity, pipe, dl, dlb);
vsp1_entity_configure_frame(entity, pipe, dl, dlb);
vsp1_entity_configure_partition(entity, pipe, dl, dlb);
}
- vsp1_dl_list_commit(dl, drm_pipe->force_brx_release);
+ vsp1_dl_list_commit(dl, dl_flags);
+}
+
+static int vsp1_du_pipeline_set_rwpf_format(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rwpf,
+ u32 pixelformat, unsigned int pitch)
+{
+ const struct vsp1_format_info *fmtinfo;
+ unsigned int chroma_hsub;
+
+ fmtinfo = vsp1_get_format_info(vsp1, pixelformat);
+ if (!fmtinfo) {
+ dev_dbg(vsp1->dev, "Unsupported pixel format %08x\n",
+ pixelformat);
+ return -EINVAL;
+ }
+
+ /*
+ * Only formats with three planes can affect the chroma planes pitch.
+ * All formats with two planes have a horizontal subsampling value of 2,
+ * but combine U and V in a single chroma plane, which thus results in
+ * the luma plane and chroma plane having the same pitch.
+ */
+ chroma_hsub = (fmtinfo->planes == 3) ? fmtinfo->hsub : 1;
+
+ rwpf->fmtinfo = fmtinfo;
+ rwpf->format.num_planes = fmtinfo->planes;
+ rwpf->format.plane_fmt[0].bytesperline = pitch;
+ rwpf->format.plane_fmt[1].bytesperline = pitch / chroma_hsub;
+
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -700,8 +738,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
drm_pipe->du_private = cfg->callback_data;
/* Disable the display interrupts. */
- vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0);
- vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0);
+ vsp1_write(vsp1, VI6_DISP_IRQ_STA(pipe_index), 0);
+ vsp1_write(vsp1, VI6_DISP_IRQ_ENB(pipe_index), 0);
/* Configure all entities in the pipeline. */
vsp1_du_pipeline_configure(pipe);
@@ -769,9 +807,8 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
- const struct vsp1_format_info *fmtinfo;
- unsigned int chroma_hsub;
struct vsp1_rwpf *rpf;
+ int ret;
if (rpf_index >= vsp1->info->rpf_count)
return -EINVAL;
@@ -804,25 +841,11 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
* Store the format, stride, memory buffer address, crop and compose
* rectangles and Z-order position and for the input.
*/
- fmtinfo = vsp1_get_format_info(vsp1, cfg->pixelformat);
- if (!fmtinfo) {
- dev_dbg(vsp1->dev, "Unsupported pixel format %08x for RPF\n",
- cfg->pixelformat);
- return -EINVAL;
- }
-
- /*
- * Only formats with three planes can affect the chroma planes pitch.
- * All formats with two planes have a horizontal subsampling value of 2,
- * but combine U and V in a single chroma plane, which thus results in
- * the luma plane and chroma plane having the same pitch.
- */
- chroma_hsub = (fmtinfo->planes == 3) ? fmtinfo->hsub : 1;
+ ret = vsp1_du_pipeline_set_rwpf_format(vsp1, rpf, cfg->pixelformat,
+ cfg->pitch);
+ if (ret < 0)
+ return ret;
- rpf->fmtinfo = fmtinfo;
- rpf->format.num_planes = fmtinfo->planes;
- rpf->format.plane_fmt[0].bytesperline = cfg->pitch;
- rpf->format.plane_fmt[1].bytesperline = cfg->pitch / chroma_hsub;
rpf->alpha = cfg->alpha;
rpf->mem.addr[0] = cfg->mem[0];
@@ -851,12 +874,31 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
struct vsp1_pipeline *pipe = &drm_pipe->pipe;
+ int ret;
drm_pipe->crc = cfg->crc;
mutex_lock(&vsp1->drm->lock);
+
+ if (cfg->writeback.pixelformat) {
+ const struct vsp1_du_writeback_config *wb_cfg = &cfg->writeback;
+
+ ret = vsp1_du_pipeline_set_rwpf_format(vsp1, pipe->output,
+ wb_cfg->pixelformat,
+ wb_cfg->pitch);
+ if (WARN_ON(ret < 0))
+ goto done;
+
+ pipe->output->mem.addr[0] = wb_cfg->mem[0];
+ pipe->output->mem.addr[1] = wb_cfg->mem[1];
+ pipe->output->mem.addr[2] = wb_cfg->mem[2];
+ pipe->output->writeback = true;
+ }
+
vsp1_du_pipeline_setup_inputs(vsp1, pipe);
vsp1_du_pipeline_configure(pipe);
+
+done:
mutex_unlock(&vsp1->drm->lock);
}
EXPORT_SYMBOL_GPL(vsp1_du_atomic_flush);
diff --git a/drivers/media/platform/vsp1/vsp1_drm.h b/drivers/media/platform/vsp1/vsp1_drm.h
index 8dfd274a59e2..e85ad4366fbb 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.h
+++ b/drivers/media/platform/vsp1/vsp1_drm.h
@@ -42,7 +42,7 @@ struct vsp1_drm_pipeline {
struct vsp1_du_crc_config crc;
/* Frame synchronisation */
- void (*du_complete)(void *data, bool completed, u32 crc);
+ void (*du_complete)(void *data, unsigned int status, u32 crc);
void *du_private;
};
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index a54ab528b060..aa9d2286056e 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -71,10 +71,11 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity,
void vsp1_entity_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
if (entity->ops->configure_stream)
- entity->ops->configure_stream(entity, pipe, dlb);
+ entity->ops->configure_stream(entity, pipe, dl, dlb);
}
void vsp1_entity_configure_frame(struct vsp1_entity *entity,
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index 97acb7795cf1..a1ceb37bb837 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -67,7 +67,9 @@ struct vsp1_route {
* struct vsp1_entity_operations - Entity operations
* @destroy: Destroy the entity.
* @configure_stream: Setup the hardware parameters for the stream which do
- * not vary between frames (pipeline, formats).
+ * not vary between frames (pipeline, formats). Note that
+ * the vsp1_dl_list argument is only valid for display
+ * pipeline and will be NULL for mem-to-mem pipelines.
* @configure_frame: Configure the runtime parameters for each frame.
* @configure_partition: Configure partition specific parameters.
* @max_width: Return the max supported width of data that the entity can
@@ -78,7 +80,7 @@ struct vsp1_route {
struct vsp1_entity_operations {
void (*destroy)(struct vsp1_entity *);
void (*configure_stream)(struct vsp1_entity *, struct vsp1_pipeline *,
- struct vsp1_dl_body *);
+ struct vsp1_dl_list *, struct vsp1_dl_body *);
void (*configure_frame)(struct vsp1_entity *, struct vsp1_pipeline *,
struct vsp1_dl_list *, struct vsp1_dl_body *);
void (*configure_partition)(struct vsp1_entity *,
@@ -155,6 +157,7 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity,
void vsp1_entity_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb);
void vsp1_entity_configure_frame(struct vsp1_entity *entity,
diff --git a/drivers/media/platform/vsp1/vsp1_hgo.c b/drivers/media/platform/vsp1/vsp1_hgo.c
index 827373c25351..bf3f981f93a1 100644
--- a/drivers/media/platform/vsp1/vsp1_hgo.c
+++ b/drivers/media/platform/vsp1/vsp1_hgo.c
@@ -131,6 +131,7 @@ static const struct v4l2_ctrl_config hgo_num_bins_control = {
static void hgo_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_hgo *hgo = to_hgo(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_hgt.c b/drivers/media/platform/vsp1/vsp1_hgt.c
index bb6ce6fdd5f4..aa1c718e0453 100644
--- a/drivers/media/platform/vsp1/vsp1_hgt.c
+++ b/drivers/media/platform/vsp1/vsp1_hgt.c
@@ -127,6 +127,7 @@ static const struct v4l2_ctrl_config hgt_hue_areas = {
static void hgt_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_hgt *hgt = to_hgt(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index 39ab2e0c7c18..d5ebd9d08c8a 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -129,6 +129,7 @@ static const struct v4l2_subdev_ops hsit_ops = {
static void hsit_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_hsit *hsit = to_hsit(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 8b0a26335d70..14ed5d7bd061 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -84,6 +84,7 @@ static const struct v4l2_subdev_ops lif_ops = {
static void lif_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
const struct v4l2_mbus_framefmt *format;
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index 64c48d9459b0..9f88842d7048 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -147,6 +147,7 @@ static const struct v4l2_subdev_ops lut_ops = {
static void lut_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_lut *lut = to_lut(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 54ff539ffea0..f72ac01c21ea 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -42,6 +42,30 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBA444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_RGBX444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBX_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ABGR444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ABGR_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XBGR444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ABGR_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_BGRA444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGRA_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_BGRX444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGRA_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
{ V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
@@ -50,6 +74,30 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBA555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_RGBX555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBX_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ABGR555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ABGR_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XBGR555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ABGR_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_BGRA555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGRA_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_BGRX555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGRA_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
{ V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
@@ -68,6 +116,20 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
{ V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_BGRA32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_BGRX32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBA32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_RGBX32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
{ V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index f6e4157095cc..1bb1d39c60d9 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -39,12 +39,12 @@
#define VI6_WFP_IRQ_STA_DFE (1 << 1)
#define VI6_WFP_IRQ_STA_FRE (1 << 0)
-#define VI6_DISP_IRQ_ENB 0x0078
+#define VI6_DISP_IRQ_ENB(n) (0x0078 + (n) * 60)
#define VI6_DISP_IRQ_ENB_DSTE (1 << 8)
#define VI6_DISP_IRQ_ENB_MAEE (1 << 5)
#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << (n))
-#define VI6_DISP_IRQ_STA 0x007c
+#define VI6_DISP_IRQ_STA(n) (0x007c + (n) * 60)
#define VI6_DISP_IRQ_STA_DST (1 << 8)
#define VI6_DISP_IRQ_STA_MAE (1 << 5)
#define VI6_DISP_IRQ_STA_LNE(n) (1 << (n))
@@ -307,7 +307,7 @@
#define VI6_WPF_DSTM_ADDR_C0 0x1028
#define VI6_WPF_DSTM_ADDR_C1 0x102c
-#define VI6_WPF_WRBCK_CTRL 0x1034
+#define VI6_WPF_WRBCK_CTRL(n) (0x1034 + (n) * 0x100)
#define VI6_WPF_WRBCK_CTRL_WBMD (1 << 0)
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 616afa7e165f..85587c1b6a37 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -57,6 +57,7 @@ static const struct v4l2_subdev_ops rpf_ops = {
static void rpf_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 70742ecf766f..2f3582590618 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -61,6 +61,7 @@ struct vsp1_rwpf {
} flip;
struct vsp1_rwpf_memory mem;
+ bool writeback;
struct vsp1_dl_manager *dlm;
};
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index b1617cb1f2b9..2b65457ee12f 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -269,6 +269,7 @@ static const struct v4l2_subdev_ops sru_ops = {
static void sru_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
const struct vsp1_sru_param *param;
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index 27012af973b2..5fc04c082d1a 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -257,6 +257,7 @@ static const struct v4l2_subdev_ops uds_ops = {
static void uds_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_uds *uds = to_uds(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_uif.c b/drivers/media/platform/vsp1/vsp1_uif.c
index 4b58d51df231..467d1072577b 100644
--- a/drivers/media/platform/vsp1/vsp1_uif.c
+++ b/drivers/media/platform/vsp1/vsp1_uif.c
@@ -192,6 +192,7 @@ static const struct v4l2_subdev_ops uif_ops = {
static void uif_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_uif *uif = to_uif(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 7ceaf3222145..fd98e483b2f4 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -307,11 +307,6 @@ static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
* This function completes the current buffer by filling its sequence number,
* time stamp and payload size, and hands it back to the videobuf core.
*
- * When operating in DU output mode (deep pipeline to the DU through the LIF),
- * the VSP1 needs to constantly supply frames to the display. In that case, if
- * no other buffer is queued, reuse the one that has just been processed instead
- * of handing it back to the videobuf core.
- *
* Return the next queued buffer or NULL if the queue is empty.
*/
static struct vsp1_vb2_buffer *
@@ -333,12 +328,6 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
done = list_first_entry(&video->irqqueue,
struct vsp1_vb2_buffer, queue);
- /* In DU output mode reuse the buffer if the list is singular. */
- if (pipe->lif && list_is_singular(&video->irqqueue)) {
- spin_unlock_irqrestore(&video->irqlock, flags);
- return done;
- }
-
list_del(&done->queue);
if (!list_empty(&video->irqqueue))
@@ -432,7 +421,7 @@ static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
}
/* Complete, and commit the head display list. */
- vsp1_dl_list_commit(dl, false);
+ vsp1_dl_list_commit(dl, 0);
pipe->configured = true;
vsp1_pipeline_run(pipe);
@@ -836,7 +825,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
list_for_each_entry(entity, &pipe->entities, list_pipe) {
vsp1_entity_route_setup(entity, pipe, pipe->stream_config);
- vsp1_entity_configure_stream(entity, pipe, pipe->stream_config);
+ vsp1_entity_configure_stream(entity, pipe, NULL,
+ pipe->stream_config);
}
return 0;
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 32bb207b2007..208498fa6ed7 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -232,17 +232,41 @@ static void vsp1_wpf_destroy(struct vsp1_entity *entity)
vsp1_dlm_destroy(wpf->dlm);
}
+static int wpf_configure_writeback_chain(struct vsp1_rwpf *wpf,
+ struct vsp1_dl_list *dl)
+{
+ unsigned int index = wpf->entity.index;
+ struct vsp1_dl_list *dl_next;
+ struct vsp1_dl_body *dlb;
+
+ dl_next = vsp1_dl_list_get(wpf->dlm);
+ if (!dl_next) {
+ dev_err(wpf->entity.vsp1->dev,
+ "Failed to obtain a dl list, disabling writeback\n");
+ return -ENOMEM;
+ }
+
+ dlb = vsp1_dl_list_get_body0(dl_next);
+ vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL(index), 0);
+ vsp1_dl_list_add_chain(dl, dl_next);
+
+ return 0;
+}
+
static void wpf_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
struct vsp1_device *vsp1 = wpf->entity.vsp1;
const struct v4l2_mbus_framefmt *source_format;
const struct v4l2_mbus_framefmt *sink_format;
+ unsigned int index = wpf->entity.index;
unsigned int i;
u32 outfmt = 0;
u32 srcrpf = 0;
+ int ret;
sink_format = vsp1_entity_get_pad_format(&wpf->entity,
wpf->entity.config,
@@ -250,8 +274,9 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
source_format = vsp1_entity_get_pad_format(&wpf->entity,
wpf->entity.config,
RWPF_PAD_SOURCE);
+
/* Format */
- if (!pipe->lif) {
+ if (!pipe->lif || wpf->writeback) {
const struct v4l2_pix_format_mplane *format = &wpf->format;
const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
@@ -276,8 +301,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSWAP, fmtinfo->swap);
- if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP) &&
- wpf->entity.index == 0)
+ if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP) && index == 0)
vsp1_wpf_write(wpf, dlb, VI6_WPF_ROT_CTRL,
VI6_WPF_ROT_CTRL_LN16 |
(256 << VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT));
@@ -288,11 +312,9 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
wpf->outfmt = outfmt;
- vsp1_dl_body_write(dlb, VI6_DPR_WPF_FPORCH(wpf->entity.index),
+ vsp1_dl_body_write(dlb, VI6_DPR_WPF_FPORCH(index),
VI6_DPR_WPF_FPORCH_FP_WPFN);
- vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL, 0);
-
/*
* Sources. If the pipeline has a single input and BRx is not used,
* configure it as the master layer. Otherwise configure all
@@ -318,9 +340,26 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
vsp1_wpf_write(wpf, dlb, VI6_WPF_SRCRPF, srcrpf);
/* Enable interrupts. */
- vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
- vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(wpf->entity.index),
+ vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(index), 0);
+ vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(index),
VI6_WFP_IRQ_ENB_DFEE);
+
+ /*
+ * Configure writeback for display pipelines (the wpf writeback flag is
+ * never set for memory-to-memory pipelines). Start by adding a chained
+ * display list to disable writeback after a single frame, and process
+ * to enable writeback. If the display list allocation fails don't
+ * enable writeback as we wouldn't be able to safely disable it,
+ * resulting in possible memory corruption.
+ */
+ if (wpf->writeback) {
+ ret = wpf_configure_writeback_chain(wpf, dl);
+ if (ret < 0)
+ wpf->writeback = false;
+ }
+
+ vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL(index),
+ wpf->writeback ? VI6_WPF_WRBCK_CTRL_WBMD : 0);
}
static void wpf_configure_frame(struct vsp1_entity *entity,
@@ -362,6 +401,7 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
unsigned int width;
unsigned int height;
+ unsigned int left;
unsigned int offset;
unsigned int flip;
unsigned int i;
@@ -371,13 +411,16 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
RWPF_PAD_SINK);
width = sink_format->width;
height = sink_format->height;
+ left = 0;
/*
* Cropping. The partition algorithm can split the image into
* multiple slices.
*/
- if (pipe->partitions > 1)
+ if (pipe->partitions > 1) {
width = pipe->partition->wpf.width;
+ left = pipe->partition->wpf.left;
+ }
vsp1_wpf_write(wpf, dlb, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
(0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
@@ -386,7 +429,11 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
(0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
(height << VI6_WPF_SZCLIP_SIZE_SHIFT));
- if (pipe->lif)
+ /*
+ * For display pipelines without writeback enabled there's no memory
+ * address to configure, return now.
+ */
+ if (pipe->lif && !wpf->writeback)
return;
/*
@@ -408,13 +455,11 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
flip = wpf->flip.active;
if (flip & BIT(WPF_CTRL_HFLIP) && !wpf->flip.rotate)
- offset = format->width - pipe->partition->wpf.left
- - pipe->partition->wpf.width;
+ offset = format->width - left - width;
else if (flip & BIT(WPF_CTRL_VFLIP) && wpf->flip.rotate)
- offset = format->height - pipe->partition->wpf.left
- - pipe->partition->wpf.width;
+ offset = format->height - left - width;
else
- offset = pipe->partition->wpf.left;
+ offset = left;
for (i = 0; i < format->num_planes; ++i) {
unsigned int hsub = i > 0 ? fmtinfo->hsub : 1;
@@ -436,7 +481,7 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
* image height.
*/
if (wpf->flip.rotate)
- height = pipe->partition->wpf.width;
+ height = width;
else
height = format->height;
@@ -477,6 +522,12 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
+
+ /*
+ * Writeback operates in single-shot mode and lasts for a single frame,
+ * reset the writeback flag to false for the next frame.
+ */
+ wpf->writeback = false;
}
static unsigned int wpf_max_width(struct vsp1_entity *entity,
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
index 74ec8aaa5ae0..a2773ad7c185 100644
--- a/drivers/media/platform/xilinx/Kconfig
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -5,7 +5,7 @@ config VIDEO_XILINX
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
- ---help---
+ help
Driver for Xilinx Video IP Pipelines
if VIDEO_XILINX
@@ -14,13 +14,13 @@ config VIDEO_XILINX_TPG
tristate "Xilinx Video Test Pattern Generator"
depends on VIDEO_XILINX
select VIDEO_XILINX_VTC
- ---help---
+ help
Driver for the Xilinx Video Test Pattern Generator
config VIDEO_XILINX_VTC
tristate "Xilinx Video Timing Controller"
depends on VIDEO_XILINX
- ---help---
+ help
Driver for the Xilinx Video Timing Controller
endif #VIDEO_XILINX
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 9b99dfb2d0c6..9cd00f64af32 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -7,7 +7,7 @@ menuconfig RADIO_ADAPTERS
depends on VIDEO_V4L2
depends on MEDIA_RADIO_SUPPORT
default y
- ---help---
+ help
Say Y here to enable selecting AM/FM radio adapters.
if RADIO_ADAPTERS && VIDEO_V4L2
@@ -29,7 +29,7 @@ config RADIO_SI476X
depends on MFD_SI476X_CORE
depends on SND_SOC
select SND_SOC_SI476X
- ---help---
+ help
Choose Y here if you have this FM radio chip.
In order to control your radio card, you will need to use programs
@@ -43,7 +43,7 @@ config RADIO_SI476X
config USB_MR800
tristate "AverMedia MR 800 USB FM radio support"
depends on USB && VIDEO_V4L2
- ---help---
+ help
Say Y here if you want to connect this type of radio to your
computer's USB port. Note that the audio is not digital, and
you must connect the line out connector to a sound card or a
@@ -55,7 +55,7 @@ config USB_MR800
config USB_DSBR
tristate "D-Link/GemTek USB FM radio support"
depends on USB && VIDEO_V4L2
- ---help---
+ help
Say Y here if you want to connect this type of radio to your
computer's USB port. Note that the audio is not digital, and
you must connect the line out connector to a sound card or a
@@ -68,7 +68,7 @@ config RADIO_MAXIRADIO
tristate "Guillemot MAXI Radio FM 2000 radio"
depends on VIDEO_V4L2 && PCI
select RADIO_TEA575X
- ---help---
+ help
Choose Y here if you have this radio card. This card may also be
found as Gemtek PCI FM.
@@ -84,7 +84,7 @@ config RADIO_SHARK
tristate "Griffin radioSHARK USB radio receiver"
depends on USB
select RADIO_TEA575X
- ---help---
+ help
Choose Y here if you have this radio receiver.
There are 2 versions of this device, this driver is for version 1,
@@ -101,7 +101,7 @@ config RADIO_SHARK
config RADIO_SHARK2
tristate "Griffin radioSHARK2 USB radio receiver"
depends on USB
- ---help---
+ help
Choose Y here if you have this radio receiver.
There are 2 versions of this device, this driver is for version 2,
@@ -118,7 +118,7 @@ config RADIO_SHARK2
config USB_KEENE
tristate "Keene FM Transmitter USB support"
depends on USB && VIDEO_V4L2
- ---help---
+ help
Say Y here if you want to connect this type of FM transmitter
to your computer's USB port.
@@ -128,7 +128,7 @@ config USB_KEENE
config USB_RAREMONO
tristate "Thanko's Raremono AM/FM/SW radio support"
depends on USB && VIDEO_V4L2
- ---help---
+ help
The 'Thanko's Raremono' device contains the Si4734 chip from Silicon Labs Inc.
It is one of the very few or perhaps the only consumer USB radio device
to receive the AM/FM/SW bands.
@@ -142,7 +142,7 @@ config USB_RAREMONO
config USB_MA901
tristate "Masterkit MA901 USB FM radio support"
depends on USB && VIDEO_V4L2
- ---help---
+ help
Say Y here if you want to connect this type of radio to your
computer's USB port. Note that the audio is not digital, and
you must connect the line out connector to a sound card or a
@@ -154,7 +154,7 @@ config USB_MA901
config RADIO_TEA5764
tristate "TEA5764 I2C FM radio support"
depends on I2C && VIDEO_V4L2
- ---help---
+ help
Say Y here if you want to use the TEA5764 FM chip found in
EZX phones. This FM chip is present in EZX phones from Motorola,
connected to internal pxa I2C bus.
@@ -173,7 +173,7 @@ config RADIO_TEA5764_XTAL
config RADIO_SAA7706H
tristate "SAA7706H Car Radio DSP"
depends on I2C && VIDEO_V4L2
- ---help---
+ help
Say Y here if you want to use the SAA7706H Car radio Digital
Signal Processor, found for instance on the Russellville development
board. On the russellville the device is connected to internal
@@ -185,7 +185,7 @@ config RADIO_SAA7706H
config RADIO_TEF6862
tristate "TEF6862 Car Radio Enhanced Selectivity Tuner"
depends on I2C && VIDEO_V4L2
- ---help---
+ help
Say Y here if you want to use the TEF6862 Car Radio Enhanced
Selectivity Tuner, found for instance on the Russellville development
board. On the russellville the device is connected to internal
@@ -200,7 +200,7 @@ config RADIO_TIMBERDALE
depends on I2C # for RADIO_SAA7706H
select RADIO_TEF6862
select RADIO_SAA7706H
- ---help---
+ help
This is a kind of umbrella driver for the Radio Tuner and DSP
found behind the Timberdale FPGA on the Russellville board.
Enabling this driver will automatically select the DSP and tuner.
@@ -211,7 +211,7 @@ config RADIO_WL1273
select MFD_CORE
select MFD_WL1273_CORE
select FW_LOADER
- ---help---
+ help
Choose Y here if you have this FM radio chip.
In order to control your radio card, you will need to use programs
@@ -233,7 +233,7 @@ menuconfig V4L_RADIO_ISA_DRIVERS
bool "ISA radio devices"
depends on ISA || COMPILE_TEST
default n
- ---help---
+ help
Say Y here to enable support for these ISA drivers.
if V4L_RADIO_ISA_DRIVERS
@@ -246,7 +246,7 @@ config RADIO_CADET
tristate "ADS Cadet AM/FM Tuner"
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
- ---help---
+ help
Choose Y here if you have one of these AM/FM radio cards, and then
fill in the port address below.
@@ -258,7 +258,7 @@ config RADIO_RTRACK
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
select RADIO_ISA
- ---help---
+ help
Choose Y here if you have one of these FM radio cards, and then fill
in the port address below.
@@ -290,7 +290,7 @@ config RADIO_RTRACK2
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
select RADIO_ISA
- ---help---
+ help
Choose Y here if you have this FM radio card, and then fill in the
port address below.
@@ -314,7 +314,7 @@ config RADIO_AZTECH
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
select RADIO_ISA
- ---help---
+ help
Choose Y here if you have one of these FM radio cards, and then fill
in the port address below.
@@ -335,7 +335,7 @@ config RADIO_GEMTEK
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
select RADIO_ISA
- ---help---
+ help
Choose Y here if you have this FM radio card, and then fill in the
I/O port address and settings below. The following cards either have
GemTek Radio tuner or are rebranded GemTek Radio cards:
@@ -377,7 +377,7 @@ config RADIO_MIROPCM20
depends on ISA_DMA_API && VIDEO_V4L2 && SND
select SND_ISA
select SND_MIRO
- ---help---
+ help
Choose Y here if you have this FM radio card. You also need to enable
the ALSA sound system. This choice automatically selects the ALSA
sound card driver "Miro miroSOUND PCM1pro/PCM12/PCM20radio" as this
@@ -390,7 +390,7 @@ config RADIO_SF16FMI
tristate "SF16-FMI/SF16-FMP/SF16-FMD Radio"
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
- ---help---
+ help
Choose Y here if you have one of these FM radio cards.
To compile this driver as a module, choose M here: the
@@ -401,7 +401,7 @@ config RADIO_SF16FMR2
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
select RADIO_TEA575X
- ---help---
+ help
Choose Y here if you have one of these FM radio cards.
To compile this driver as a module, choose M here: the
@@ -412,7 +412,7 @@ config RADIO_TERRATEC
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
select RADIO_ISA
- ---help---
+ help
Choose Y here if you have this FM radio card.
Note: this driver hasn't been tested since a long time due to lack
@@ -451,7 +451,7 @@ config RADIO_TYPHOON
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
select RADIO_ISA
- ---help---
+ help
Choose Y here if you have one of these FM radio cards, and then fill
in the port address and the frequency used for muting below.
@@ -486,7 +486,7 @@ config RADIO_ZOLTRIX
depends on ISA || COMPILE_TEST
depends on VIDEO_V4L2
select RADIO_ISA
- ---help---
+ help
Choose Y here if you have one of these FM radio cards, and then fill
in the port address below.
diff --git a/drivers/media/radio/si470x/Kconfig b/drivers/media/radio/si470x/Kconfig
index 6dbb158cd2a0..21026488de90 100644
--- a/drivers/media/radio/si470x/Kconfig
+++ b/drivers/media/radio/si470x/Kconfig
@@ -1,7 +1,7 @@
config RADIO_SI470X
tristate "Silicon Labs Si470x FM Radio Receiver support"
depends on VIDEO_V4L2
- ---help---
+ help
This is a driver for devices with the Silicon Labs SI470x
chip (either via USB or I2C buses).
@@ -15,7 +15,7 @@ config RADIO_SI470X
config USB_SI470X
tristate "Silicon Labs Si470x FM Radio Receiver support with USB"
depends on USB && RADIO_SI470X
- ---help---
+ help
This is a driver for USB devices with the Silicon Labs SI470x
chip. Currently these devices are known to work:
- 10c4:818a: Silicon Labs USB FM Radio Reference Design
@@ -40,7 +40,7 @@ config USB_SI470X
config I2C_SI470X
tristate "Silicon Labs Si470x FM Radio Receiver support with I2C"
depends on I2C && RADIO_SI470X
- ---help---
+ help
This is a driver for I2C devices with the Silicon Labs SI470x
chip.
diff --git a/drivers/media/radio/si4713/Kconfig b/drivers/media/radio/si4713/Kconfig
index 9c8b887cff75..17567c917554 100644
--- a/drivers/media/radio/si4713/Kconfig
+++ b/drivers/media/radio/si4713/Kconfig
@@ -2,7 +2,7 @@ config USB_SI4713
tristate "Silicon Labs Si4713 FM Radio Transmitter support with USB"
depends on USB && I2C && RADIO_SI4713
select I2C_SI4713
- ---help---
+ help
This is a driver for USB devices with the Silicon Labs SI4713
chip. Currently these devices are known to work.
- 10c4:8244: Silicon Labs FM Transmitter USB device.
@@ -17,7 +17,7 @@ config PLATFORM_SI4713
tristate "Silicon Labs Si4713 FM Radio Transmitter support with I2C"
depends on I2C && RADIO_SI4713
select I2C_SI4713
- ---help---
+ help
This is a driver for I2C devices with the Silicon Labs SI4713
chip.
@@ -30,7 +30,7 @@ config PLATFORM_SI4713
config I2C_SI4713
tristate "Silicon Labs Si4713 FM Radio Transmitter support"
depends on I2C && RADIO_SI4713
- ---help---
+ help
Say Y here if you want support to Si4713 FM Radio Transmitter.
This device can transmit audio through FM. It can transmit
RDS and RBDS signals as well. This module is the v4l2 radio
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index f4a53f1e856e..a8584371a32d 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -1272,7 +1272,7 @@ static int si4713_g_modulator(struct v4l2_subdev *sd, struct v4l2_modulator *vm)
if (vm->index > 0)
return -EINVAL;
- strncpy(vm->name, "FM Modulator", 32);
+ strscpy(vm->name, "FM Modulator", sizeof(vm->name));
vm->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW |
V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_CONTROLS;
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 3c8987af3772..053d51a5890a 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -489,7 +489,8 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
return -EIO;
}
/* Send response data to caller */
- if (response != NULL && response_len != NULL && evt_hdr->dlen) {
+ if (response != NULL && response_len != NULL && evt_hdr->dlen &&
+ evt_hdr->dlen <= payload_len) {
/* Skip header info and copy only response data */
skb_pull(skb, sizeof(struct fm_event_msg_hdr));
memcpy(response, skb->data, evt_hdr->dlen);
@@ -583,6 +584,8 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
return;
fm_evt_hdr = (void *)skb->data;
+ if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
+ return;
/* Skip header info and copy only response data */
skb_pull(skb, sizeof(struct fm_event_msg_hdr));
@@ -1268,8 +1271,9 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
switch (action->type) {
case ACTION_SEND_COMMAND: /* Send */
- if (fmc_send_cmd(fmdev, 0, 0, action->data,
- action->size, NULL, NULL))
+ ret = fmc_send_cmd(fmdev, 0, 0, action->data,
+ action->size, NULL, NULL);
+ if (ret)
goto rel_fw;
cmd_cnt++;
@@ -1308,7 +1312,7 @@ static int load_default_rx_configuration(struct fmdev *fmdev)
static int fm_power_up(struct fmdev *fmdev, u8 mode)
{
u16 payload;
- __be16 asic_id, asic_ver;
+ __be16 asic_id = 0, asic_ver = 0;
int resp_len, ret;
u8 fw_name[50];
@@ -1520,7 +1524,7 @@ int fmc_prepare(struct fmdev *fmdev)
}
ret = 0;
- } else if (ret == -1) {
+ } else if (ret < 0) {
fmerr("st_register failed %d\n", ret);
return -EAGAIN;
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 96ce3e5524e0..3fc6ac15c66d 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -2,7 +2,7 @@
menuconfig RC_CORE
tristate "Remote Controller support"
depends on INPUT
- ---help---
+ help
Enable support for Remote Controllers on Linux. This is
needed in order to support several video capture adapters,
standalone IR receivers/transmitters, and RF receivers.
@@ -19,7 +19,7 @@ source "drivers/media/rc/keymaps/Kconfig"
config LIRC
bool "LIRC user interface"
depends on RC_CORE
- ---help---
+ help
Enable this option to enable the Linux Infrared Remote
Control user interface (e.g. /dev/lirc*). This interface
passes raw IR to and from userspace, which is needed for
@@ -48,7 +48,7 @@ config IR_NEC_DECODER
depends on RC_CORE
select BITREVERSE
- ---help---
+ help
Enable this option if you have IR with NEC protocol, and
if the IR is decoded in software
@@ -57,7 +57,7 @@ config IR_RC5_DECODER
depends on RC_CORE
select BITREVERSE
- ---help---
+ help
Enable this option if you have IR with RC-5 protocol, and
if the IR is decoded in software
@@ -66,7 +66,7 @@ config IR_RC6_DECODER
depends on RC_CORE
select BITREVERSE
- ---help---
+ help
Enable this option if you have an infrared remote control which
uses the RC6 protocol, and you need software decoding support.
@@ -75,7 +75,7 @@ config IR_JVC_DECODER
depends on RC_CORE
select BITREVERSE
- ---help---
+ help
Enable this option if you have an infrared remote control which
uses the JVC protocol, and you need software decoding support.
@@ -84,7 +84,7 @@ config IR_SONY_DECODER
depends on RC_CORE
select BITREVERSE
- ---help---
+ help
Enable this option if you have an infrared remote control which
uses the Sony protocol, and you need software decoding support.
@@ -92,7 +92,7 @@ config IR_SANYO_DECODER
tristate "Enable IR raw decoder for the Sanyo protocol"
depends on RC_CORE
- ---help---
+ help
Enable this option if you have an infrared remote control which
uses the Sanyo protocol (Sanyo, Aiwa, Chinon remotes),
and you need software decoding support.
@@ -101,7 +101,7 @@ config IR_SHARP_DECODER
tristate "Enable IR raw decoder for the Sharp protocol"
depends on RC_CORE
- ---help---
+ help
Enable this option if you have an infrared remote control which
uses the Sharp protocol (Sharp, Denon), and you need software
decoding support.
@@ -111,7 +111,7 @@ config IR_MCE_KBD_DECODER
depends on RC_CORE
select BITREVERSE
- ---help---
+ help
Enable this option if you have a Microsoft Remote Keyboard for
Windows Media Center Edition, which you would like to use with
a raw IR receiver in your system.
@@ -121,14 +121,14 @@ config IR_XMP_DECODER
depends on RC_CORE
select BITREVERSE
- ---help---
+ help
Enable this option if you have IR with XMP protocol, and
if the IR is decoded in software
config IR_IMON_DECODER
tristate "Enable IR raw decoder for the iMON protocol"
depends on RC_CORE
- ---help---
+ help
Enable this option if you have iMON PAD or Antec Veris infrared
remote control and you would like to use it with a raw IR
receiver, or if you wish to use an encoder to transmit this IR.
@@ -177,7 +177,7 @@ config IR_ENE
tristate "ENE eHome Receiver/Transceiver (pnp id: ENE0100/ENE02xxx)"
depends on PNP || COMPILE_TEST
depends on RC_CORE
- ---help---
+ help
Say Y here to enable support for integrated infrared receiver
/transceiver made by ENE.
@@ -203,7 +203,7 @@ config IR_IMON
depends on USB_ARCH_HAS_HCD
depends on RC_CORE
select USB
- ---help---
+ help
Say Y here if you want to use a SoundGraph iMON (aka Antec Veris)
IR Receiver and/or LCD/VFD/VGA display.
@@ -215,7 +215,7 @@ config IR_IMON_RAW
depends on USB_ARCH_HAS_HCD
depends on RC_CORE
select USB
- ---help---
+ help
Say Y here if you want to use a SoundGraph iMON IR Receiver,
early raw models.
@@ -227,7 +227,7 @@ config IR_MCEUSB
depends on USB_ARCH_HAS_HCD
depends on RC_CORE
select USB
- ---help---
+ help
Say Y here if you want to use a Windows Media Center Edition
eHome Infrared Transceiver.
@@ -238,7 +238,7 @@ config IR_ITE_CIR
tristate "ITE Tech Inc. IT8712/IT8512 Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
depends on RC_CORE
- ---help---
+ help
Say Y here to enable support for integrated infrared receivers
/transceivers made by ITE Tech Inc. These are found in
several ASUS devices, like the ASUS Digimatrix or the ASUS
@@ -251,7 +251,7 @@ config IR_FINTEK
tristate "Fintek Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
depends on RC_CORE
- ---help---
+ help
Say Y here to enable support for integrated infrared receiver
/transceiver made by Fintek. This chip is found on assorted
Jetway motherboards (and of course, possibly others).
@@ -263,7 +263,7 @@ config IR_MESON
tristate "Amlogic Meson IR remote receiver"
depends on RC_CORE
depends on ARCH_MESON || COMPILE_TEST
- ---help---
+ help
Say Y if you want to use the IR remote receiver available
on Amlogic Meson SoCs.
@@ -274,7 +274,7 @@ config IR_MTK
tristate "Mediatek IR remote receiver"
depends on RC_CORE
depends on ARCH_MEDIATEK || COMPILE_TEST
- ---help---
+ help
Say Y if you want to use the IR remote receiver available
on Mediatek SoCs.
@@ -285,7 +285,7 @@ config IR_NUVOTON
tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
depends on RC_CORE
- ---help---
+ help
Say Y here to enable support for integrated infrared receiver
/transceiver made by Nuvoton (formerly Winbond). This chip is
found in the ASRock ION 330HT, as well as assorted Intel
@@ -301,7 +301,7 @@ config IR_REDRAT3
select NEW_LEDS
select LEDS_CLASS
select USB
- ---help---
+ help
Say Y here if you want to use a RedRat3 Infrared Transceiver.
To compile this driver as a module, choose M here: the
@@ -311,7 +311,7 @@ config IR_SPI
tristate "SPI connected IR LED"
depends on SPI && LIRC
depends on OF || COMPILE_TEST
- ---help---
+ help
Say Y if you want to use an IR LED connected through SPI bus.
To compile this driver as a module, choose M here: the module will be
@@ -322,7 +322,7 @@ config IR_STREAMZAP
depends on USB_ARCH_HAS_HCD
depends on RC_CORE
select USB
- ---help---
+ help
Say Y here if you want to use a Streamzap PC Remote
Infrared Receiver.
@@ -336,7 +336,7 @@ config IR_WINBOND_CIR
select NEW_LEDS
select LEDS_CLASS
select BITREVERSE
- ---help---
+ help
Say Y here if you want to use the IR remote functionality found
in some Winbond SuperI/O chips. Currently only the WPCD376I
chip is supported (included in some Intel Media series
@@ -350,7 +350,7 @@ config IR_IGORPLUGUSB
depends on USB_ARCH_HAS_HCD
depends on RC_CORE
select USB
- ---help---
+ help
Say Y here if you want to use the IgorPlug-USB IR Receiver by
Igor Cesko. This device is included on the Fit-PC2.
@@ -365,7 +365,7 @@ config IR_IGUANA
depends on USB_ARCH_HAS_HCD
depends on RC_CORE
select USB
- ---help---
+ help
Say Y here if you want to use the IguanaWorks USB IR Transceiver.
Both infrared receive and send are supported. If you want to
change the ID or the pin config, use the user space driver from
@@ -383,7 +383,7 @@ config IR_TTUSBIR
select USB
select NEW_LEDS
select LEDS_CLASS
- ---help---
+ help
Say Y here if you want to use the TechnoTrend USB IR Receiver. The
driver can control the led.
@@ -393,7 +393,7 @@ config IR_TTUSBIR
config IR_RX51
tristate "Nokia N900 IR transmitter diode"
depends on (OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS || COMPILE_TEST) && RC_CORE
- ---help---
+ help
Say Y or M here if you want to enable support for the IR
transmitter diode built in the Nokia N900 (RX51) device.
@@ -405,7 +405,7 @@ source "drivers/media/rc/img-ir/Kconfig"
config RC_LOOPBACK
tristate "Remote Control Loopback Driver"
depends on RC_CORE
- ---help---
+ help
Say Y here if you want support for the remote control loopback
driver which allows TX data to be sent back as RX data.
This is mostly useful for debugging purposes.
@@ -419,7 +419,7 @@ config IR_GPIO_CIR
tristate "GPIO IR remote control"
depends on RC_CORE
depends on (OF && GPIOLIB) || COMPILE_TEST
- ---help---
+ help
Say Y if you want to use GPIO based IR Receiver.
To compile this driver as a module, choose M here: the module will
@@ -430,7 +430,7 @@ config IR_GPIO_TX
depends on RC_CORE
depends on LIRC
depends on (OF && GPIOLIB) || COMPILE_TEST
- ---help---
+ help
Say Y if you want to a GPIO based IR transmitter. This is a
bit banging driver.
@@ -443,7 +443,7 @@ config IR_PWM_TX
depends on LIRC
depends on PWM
depends on OF || COMPILE_TEST
- ---help---
+ help
Say Y if you want to use a PWM based IR transmitter. This is
more power efficient than the bit banging gpio driver.
@@ -454,7 +454,7 @@ config RC_ST
tristate "ST remote control receiver"
depends on RC_CORE
depends on ARCH_STI || COMPILE_TEST
- ---help---
+ help
Say Y here if you want support for ST remote control driver
which allows both IR and UHF RX.
The driver passes raw pulse and space information to the LIRC decoder.
@@ -465,7 +465,7 @@ config IR_SUNXI
tristate "SUNXI IR remote control"
depends on RC_CORE
depends on ARCH_SUNXI || COMPILE_TEST
- ---help---
+ help
Say Y if you want to use sunXi internal IR Controller
To compile this driver as a module, choose M here: the module will
@@ -474,7 +474,7 @@ config IR_SUNXI
config IR_SERIAL
tristate "Homebrew Serial Port Receiver"
depends on RC_CORE
- ---help---
+ help
Say Y if you want to use Homebrew Serial Port Receivers and
Transceivers.
@@ -484,13 +484,13 @@ config IR_SERIAL
config IR_SERIAL_TRANSMITTER
bool "Serial Port Transmitter"
depends on IR_SERIAL
- ---help---
+ help
Serial Port Transmitter support
config IR_SIR
tristate "Built-in SIR IrDA port"
depends on RC_CORE
- ---help---
+ help
Say Y if you want to use a IrDA SIR port Transceivers.
To compile this driver as a module, choose M here: the module will
@@ -500,7 +500,7 @@ config IR_TANGO
tristate "Sigma Designs SMP86xx IR decoder"
depends on RC_CORE
depends on ARCH_TANGO || COMPILE_TEST
- ---help---
+ help
Adds support for the HW IR decoder embedded on Sigma Designs
Tango-based systems (SMP86xx, SMP87xx).
The HW decoder supports NEC, RC-5, RC-6 IR protocols.
@@ -522,7 +522,7 @@ config IR_ZX
tristate "ZTE ZX IR remote control"
depends on RC_CORE
depends on ARCH_ZX || COMPILE_TEST
- ---help---
+ help
Say Y if you want to use the IR remote control available
on ZTE ZX family SoCs.
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 390a722e6211..ee657003c1a1 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -97,6 +97,12 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_map_update_elem_proto;
case BPF_FUNC_map_delete_elem:
return &bpf_map_delete_elem_proto;
+ case BPF_FUNC_map_push_elem:
+ return &bpf_map_push_elem_proto;
+ case BPF_FUNC_map_pop_elem:
+ return &bpf_map_pop_elem_proto;
+ case BPF_FUNC_map_peek_elem:
+ return &bpf_map_peek_elem_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_tail_call:
diff --git a/drivers/media/rc/ir-rcmm-decoder.c b/drivers/media/rc/ir-rcmm-decoder.c
index f1096ac1e5c5..64fb65a9a19f 100644
--- a/drivers/media/rc/ir-rcmm-decoder.c
+++ b/drivers/media/rc/ir-rcmm-decoder.c
@@ -5,7 +5,6 @@
#include "rc-core-priv.h"
#include <linux/module.h>
-#include <linux/version.h>
#define RCMM_UNIT 166667 /* nanosecs */
#define RCMM_PREFIX_PULSE 416666 /* 166666.666666666*2.5 */
diff --git a/drivers/media/rc/keymaps/Kconfig b/drivers/media/rc/keymaps/Kconfig
index 767423bbbdd0..f459096d8e9c 100644
--- a/drivers/media/rc/keymaps/Kconfig
+++ b/drivers/media/rc/keymaps/Kconfig
@@ -3,7 +3,7 @@ config RC_MAP
depends on RC_CORE
default y
- ---help---
+ help
This option enables the compilation of lots of Remote
Controller tables. They are short tables, but if you
don't use a remote controller, or prefer to load the
diff --git a/drivers/media/rc/keymaps/rc-xbox-dvd.c b/drivers/media/rc/keymaps/rc-xbox-dvd.c
index af387244636b..42815ab57bff 100644
--- a/drivers/media/rc/keymaps/rc-xbox-dvd.c
+++ b/drivers/media/rc/keymaps/rc-xbox-dvd.c
@@ -42,7 +42,7 @@ static struct rc_map_list xbox_dvd_map = {
.map = {
.scan = xbox_dvd,
.size = ARRAY_SIZE(xbox_dvd),
- .rc_proto = RC_PROTO_UNKNOWN,
+ .rc_proto = RC_PROTO_XBOX_DVD,
.name = RC_MAP_XBOX_DVD,
}
};
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index f862f1b7f996..92db1e83c192 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -195,7 +195,7 @@ static int ir_lirc_open(struct inode *inode, struct file *file)
list_add(&fh->list, &dev->lirc_fh);
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
- nonseekable_open(inode, file);
+ stream_open(inode, file);
return 0;
out_kfifo:
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index e8fa28e20192..be5fd129d728 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -76,6 +76,7 @@ static const struct {
.scancode_bits = 0x00ffffff, .repeat_period = 114 },
[RC_PROTO_RCMM32] = { .name = "rc-mm-32",
.scancode_bits = 0xffffffff, .repeat_period = 114 },
+ [RC_PROTO_XBOX_DVD] = { .name = "xbox-dvd", .repeat_period = 64 },
};
/* Used to keep track of known keymaps */
@@ -1027,6 +1028,7 @@ static const struct {
{ RC_PROTO_BIT_RCMM12 |
RC_PROTO_BIT_RCMM24 |
RC_PROTO_BIT_RCMM32, "rc-mm", "ir-rcmm-decoder" },
+ { RC_PROTO_BIT_XBOX_DVD, "xbox-dvd", NULL },
};
/**
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index ffe2c672d105..3998ba29beb6 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -773,8 +773,6 @@ static void serial_ir_exit(void)
static int __init serial_ir_init_module(void)
{
- int result;
-
switch (type) {
case IR_HOMEBREW:
case IR_IRDEO:
@@ -802,12 +800,7 @@ static int __init serial_ir_init_module(void)
if (sense != -1)
sense = !!sense;
- result = serial_ir_init();
- if (!result)
- return 0;
-
- serial_ir_exit();
- return result;
+ return serial_ir_init();
}
static void __exit serial_ir_exit_module(void)
diff --git a/drivers/media/rc/xbox_remote.c b/drivers/media/rc/xbox_remote.c
index f959cbb94744..4a3f2cc4ef18 100644
--- a/drivers/media/rc/xbox_remote.c
+++ b/drivers/media/rc/xbox_remote.c
@@ -107,7 +107,7 @@ static void xbox_remote_input_report(struct urb *urb)
return;
}
- rc_keydown(xbox_remote->rdev, RC_PROTO_UNKNOWN,
+ rc_keydown(xbox_remote->rdev, RC_PROTO_XBOX_DVD,
le16_to_cpup((__le16 *)(data + 2)), 0);
}
@@ -148,7 +148,7 @@ static void xbox_remote_rc_init(struct xbox_remote *xbox_remote)
struct rc_dev *rdev = xbox_remote->rdev;
rdev->priv = xbox_remote;
- rdev->allowed_protocols = RC_PROTO_BIT_UNKNOWN;
+ rdev->allowed_protocols = RC_PROTO_BIT_XBOX_DVD;
rdev->driver_name = "xbox_remote";
rdev->open = xbox_remote_rc_open;
@@ -157,6 +157,8 @@ static void xbox_remote_rc_init(struct xbox_remote *xbox_remote)
rdev->device_name = xbox_remote->rc_name;
rdev->input_phys = xbox_remote->rc_phys;
+ rdev->timeout = MS_TO_NS(10);
+
usb_to_input_id(xbox_remote->udev, &rdev->input_id);
rdev->dev.parent = &xbox_remote->interface->dev;
}
diff --git a/drivers/media/spi/Kconfig b/drivers/media/spi/Kconfig
index b07ac86fc53c..df169ecf0c27 100644
--- a/drivers/media/spi/Kconfig
+++ b/drivers/media/spi/Kconfig
@@ -6,7 +6,7 @@ menu "SPI helper chips"
config VIDEO_GS1662
tristate "Gennum Serializers video"
depends on SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- ---help---
+ help
Enable the GS1662 driver which serializes video streams.
endmenu
diff --git a/drivers/media/usb/airspy/Kconfig b/drivers/media/usb/airspy/Kconfig
index 10b204cf4dbc..67578511bb9a 100644
--- a/drivers/media/usb/airspy/Kconfig
+++ b/drivers/media/usb/airspy/Kconfig
@@ -2,7 +2,7 @@ config USB_AIRSPY
tristate "AirSpy"
depends on VIDEO_V4L2
select VIDEOBUF2_VMALLOC
- ---help---
+ help
This is a video4linux2 driver for AirSpy SDR device.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig
index 65fc067eb864..0ad985542c60 100644
--- a/drivers/media/usb/au0828/Kconfig
+++ b/drivers/media/usb/au0828/Kconfig
@@ -2,6 +2,8 @@
config VIDEO_AU0828
tristate "Auvitek AU0828 support"
depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_DVB
select I2C_ALGOBIT
select VIDEO_TVEEPROM
select VIDEOBUF2_VMALLOC if VIDEO_V4L2
@@ -9,7 +11,7 @@ config VIDEO_AU0828
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MXL5007T if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This is a hybrid analog/digital tv capture driver for
Auvitek's AU0828 USB device.
@@ -23,7 +25,7 @@ config VIDEO_AU0828_V4L2
select DVB_AU8522_V4L if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TUNER
default y
- ---help---
+ help
This is a video4linux driver for Auvitek's USB device.
Choose Y here to include support for v4l2 analog video
@@ -34,5 +36,5 @@ config VIDEO_AU0828_RC
depends on RC_CORE
depends on !(RC_CORE=m && VIDEO_AU0828=y)
depends on VIDEO_AU0828
- ---help---
+ help
Enables Remote Controller support on au0828 driver.
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 3f8c92a70116..925a80437822 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -155,9 +155,7 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
dev->media_dev->disable_source = NULL;
mutex_unlock(&mdev->graph_mutex);
- media_device_unregister(dev->media_dev);
- media_device_cleanup(dev->media_dev);
- kfree(dev->media_dev);
+ media_device_delete(dev->media_dev, KBUILD_MODNAME, THIS_MODULE);
dev->media_dev = NULL;
#endif
}
@@ -210,14 +208,10 @@ static int au0828_media_device_init(struct au0828_dev *dev,
#ifdef CONFIG_MEDIA_CONTROLLER
struct media_device *mdev;
- mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ mdev = media_device_usb_allocate(udev, KBUILD_MODNAME, THIS_MODULE);
if (!mdev)
return -ENOMEM;
- /* check if media device is already initialized */
- if (!mdev->dev)
- media_device_usb_init(mdev, udev, udev->product);
-
dev->media_dev = mdev;
#endif
return 0;
@@ -278,6 +272,28 @@ create_link:
}
}
+static bool au0828_is_link_shareable(struct media_entity *owner,
+ struct media_entity *entity)
+{
+ bool shareable = false;
+
+ /* Tuner link can be shared by audio, video, and VBI */
+ switch (owner->function) {
+ case MEDIA_ENT_F_IO_V4L:
+ case MEDIA_ENT_F_AUDIO_CAPTURE:
+ case MEDIA_ENT_F_IO_VBI:
+ if (entity->function == MEDIA_ENT_F_IO_V4L ||
+ entity->function == MEDIA_ENT_F_AUDIO_CAPTURE ||
+ entity->function == MEDIA_ENT_F_IO_VBI)
+ shareable = true;
+ break;
+ case MEDIA_ENT_F_DTV_DEMOD:
+ default:
+ break;
+ }
+ return shareable;
+}
+
/* Callers should hold graph_mutex */
static int au0828_enable_source(struct media_entity *entity,
struct media_pipeline *pipe)
@@ -320,18 +336,20 @@ static int au0828_enable_source(struct media_entity *entity,
/*
* Default input is tuner and default input_type
* is AU0828_VMUX_TELEVISION.
- * FIXME:
+ *
* There is a problem when s_input is called to
* change the default input. s_input will try to
* enable_source before attempting to change the
* input on the device, and will end up enabling
* default source which is tuner.
*
- * Additional logic is necessary in au0828
- * to detect that the input has changed and
- * enable the right source.
+ * Additional logic is necessary in au0828 to detect
+ * that the input has changed and enable the right
+ * source. au0828 handles this case in its s_input.
+ * It will disable the old source and enable the new
+ * source.
+ *
*/
-
if (dev->input_type == AU0828_VMUX_TELEVISION)
find_source = dev->tuner;
else if (dev->input_type == AU0828_VMUX_SVIDEO ||
@@ -344,27 +362,33 @@ static int au0828_enable_source(struct media_entity *entity,
}
}
- /* Is an active link between sink and source */
+ /* Is there an active link between sink and source */
if (dev->active_link) {
- /*
- * If DVB is using the tuner and calling entity is
- * audio/video, the following check will be false,
- * since sink is different. Result is Busy.
- */
- if (dev->active_link->sink->entity == sink &&
- dev->active_link->source->entity == find_source) {
- /*
- * Either ALSA or Video own tuner. sink is
- * the same for both. Prevent Video stepping
- * on ALSA when ALSA owns the source.
+ if (dev->active_link_owner == entity) {
+ /* This check is necessary to handle multiple
+ * enable_source calls from v4l_ioctls during
+ * the course of video/vbi application run-time.
*/
- if (dev->active_link_owner != entity &&
- dev->active_link_owner->function ==
- MEDIA_ENT_F_AUDIO_CAPTURE) {
- pr_debug("ALSA has the tuner\n");
- ret = -EBUSY;
- goto end;
- }
+ pr_debug("%s already owns the tuner\n", entity->name);
+ ret = 0;
+ goto end;
+ } else if (au0828_is_link_shareable(dev->active_link_owner,
+ entity)) {
+ /* Either ALSA or Video own tuner. Sink is the same
+ * for both. Allow sharing the active link between
+ * their common source (tuner) and sink (decoder).
+ * Starting pipeline between sharing entity and sink
+ * will fail with pipe mismatch, while owner has an
+ * active pipeline. Switch pipeline ownership from
+ * user to owner when owner disables the source.
+ */
+ dev->active_link_shared = true;
+ /* save the user info to use from disable */
+ dev->active_link_user = entity;
+ dev->active_link_user_pipe = pipe;
+ pr_debug("%s owns the tuner %s can share!\n",
+ dev->active_link_owner->name,
+ entity->name);
ret = 0;
goto end;
} else {
@@ -391,7 +415,7 @@ static int au0828_enable_source(struct media_entity *entity,
source = found_link->source->entity;
ret = __media_entity_setup_link(found_link, MEDIA_LNK_FL_ENABLED);
if (ret) {
- pr_err("Activate tuner link %s->%s. Error %d\n",
+ pr_err("Activate link from %s->%s. Error %d\n",
source->name, sink->name, ret);
goto end;
}
@@ -401,25 +425,26 @@ static int au0828_enable_source(struct media_entity *entity,
pr_err("Start Pipeline: %s->%s Error %d\n",
source->name, entity->name, ret);
ret = __media_entity_setup_link(found_link, 0);
- pr_err("Deactivate link Error %d\n", ret);
+ if (ret)
+ pr_err("Deactivate link Error %d\n", ret);
goto end;
}
- /*
- * save active link and active link owner to avoid audio
- * deactivating video owned link from disable_source and
- * vice versa
+
+ /* save link state to allow audio and video share the link
+ * and not disable the link while the other is using it.
+ * active_link_owner is used to deactivate the link.
*/
dev->active_link = found_link;
dev->active_link_owner = entity;
dev->active_source = source;
dev->active_sink = sink;
- pr_debug("Enabled Source: %s->%s->%s Ret %d\n",
+ pr_info("Enabled Source: %s->%s->%s Ret %d\n",
dev->active_source->name, dev->active_sink->name,
dev->active_link_owner->name, ret);
end:
- pr_debug("au0828_enable_source() end %s %d %d\n",
- entity->name, entity->function, ret);
+ pr_debug("%s end: ent:%s fnc:%d ret %d\n",
+ __func__, entity->name, entity->function, ret);
return ret;
}
@@ -438,21 +463,95 @@ static void au0828_disable_source(struct media_entity *entity)
if (!dev->active_link)
return;
- /* link is active - stop pipeline from source (tuner) */
+ /* link is active - stop pipeline from source
+ * (tuner/s-video/Composite) to the entity
+ * When DVB/s-video/Composite owns tuner, it won't be in
+ * shared state.
+ */
if (dev->active_link->sink->entity == dev->active_sink &&
dev->active_link->source->entity == dev->active_source) {
/*
- * prevent video from deactivating link when audio
- * has active pipeline
+ * Prevent video from deactivating link when audio
+ * has active pipeline and vice versa. In addition
+ * handle the case when more than one video/vbi
+ * application is sharing the link.
*/
+ bool owner_is_audio = false;
+
+ if (dev->active_link_owner->function ==
+ MEDIA_ENT_F_AUDIO_CAPTURE)
+ owner_is_audio = true;
+
+ if (dev->active_link_shared) {
+ pr_debug("Shared link owner %s user %s %d\n",
+ dev->active_link_owner->name,
+ entity->name, dev->users);
+
+ /* Handle video device users > 1
+ * When audio owns the shared link with
+ * more than one video users, avoid
+ * disabling the source and/or switching
+ * the owner until the last disable_source
+ * call from video _close(). Use dev->users to
+ * determine when to switch/disable.
+ */
+ if (dev->active_link_owner != entity) {
+ /* video device has users > 1 */
+ if (owner_is_audio && dev->users > 1)
+ return;
+
+ dev->active_link_user = NULL;
+ dev->active_link_user_pipe = NULL;
+ dev->active_link_shared = false;
+ return;
+ }
+
+ /* video owns the link and has users > 1 */
+ if (!owner_is_audio && dev->users > 1)
+ return;
+
+ /* stop pipeline */
+ __media_pipeline_stop(dev->active_link_owner);
+ pr_debug("Pipeline stop for %s\n",
+ dev->active_link_owner->name);
+
+ ret = __media_pipeline_start(
+ dev->active_link_user,
+ dev->active_link_user_pipe);
+ if (ret) {
+ pr_err("Start Pipeline: %s->%s %d\n",
+ dev->active_source->name,
+ dev->active_link_user->name,
+ ret);
+ goto deactivate_link;
+ }
+ /* link user is now the owner */
+ dev->active_link_owner = dev->active_link_user;
+ dev->active_link_user = NULL;
+ dev->active_link_user_pipe = NULL;
+ dev->active_link_shared = false;
+
+ pr_debug("Pipeline started for %s\n",
+ dev->active_link_owner->name);
+ return;
+ } else if (!owner_is_audio && dev->users > 1)
+ /* video/vbi owns the link and has users > 1 */
+ return;
+
if (dev->active_link_owner != entity)
return;
- __media_pipeline_stop(entity);
+
+ /* stop pipeline */
+ __media_pipeline_stop(dev->active_link_owner);
+ pr_debug("Pipeline stop for %s\n",
+ dev->active_link_owner->name);
+
+deactivate_link:
ret = __media_entity_setup_link(dev->active_link, 0);
if (ret)
pr_err("Deactivate link Error %d\n", ret);
- pr_debug("Disabled Source: %s->%s->%s Ret %d\n",
+ pr_info("Disabled Source: %s->%s->%s Ret %d\n",
dev->active_source->name, dev->active_sink->name,
dev->active_link_owner->name, ret);
@@ -460,6 +559,8 @@ static void au0828_disable_source(struct media_entity *entity)
dev->active_link_owner = NULL;
dev->active_source = NULL;
dev->active_sink = NULL;
+ dev->active_link_shared = false;
+ dev->active_link_user = NULL;
}
}
#endif
@@ -480,6 +581,9 @@ static int au0828_media_device_register(struct au0828_dev *dev,
/* register media device */
ret = media_device_register(dev->media_dev);
if (ret) {
+ media_device_delete(dev->media_dev, KBUILD_MODNAME,
+ THIS_MODULE);
+ dev->media_dev = NULL;
dev_err(&udev->dev,
"Media Device Register Error: %d\n", ret);
return ret;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 7876c897cc1d..4bde3db83aa2 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -758,6 +758,9 @@ static int au0828_analog_stream_enable(struct au0828_dev *d)
dprintk(1, "au0828_analog_stream_enable called\n");
+ if (test_bit(DEV_DISCONNECTED, &d->dev_state))
+ return -ENODEV;
+
iface = usb_ifnum_to_if(d->usbdev, 0);
if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
dprintk(1, "Changing intf#0 to alt 5\n");
@@ -839,9 +842,9 @@ int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
return rc;
}
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1);
+
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- v4l2_device_call_all(&dev->v4l2_dev, 0, video,
- s_stream, 1);
dev->vid_timeout_running = 1;
mod_timer(&dev->vid_timeout, jiffies + (HZ / 10));
} else if (vq->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
@@ -861,10 +864,11 @@ static void au0828_stop_streaming(struct vb2_queue *vq)
dprintk(1, "au0828_stop_streaming called %d\n", dev->streaming_users);
- if (dev->streaming_users-- == 1)
+ if (dev->streaming_users-- == 1) {
au0828_uninit_isoc(dev);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+ }
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
dev->vid_timeout_running = 0;
del_timer_sync(&dev->vid_timeout);
@@ -893,8 +897,10 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
dprintk(1, "au0828_stop_vbi_streaming called %d\n",
dev->streaming_users);
- if (dev->streaming_users-- == 1)
+ if (dev->streaming_users-- == 1) {
au0828_uninit_isoc(dev);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+ }
spin_lock_irqsave(&dev->slock, flags);
if (dev->isoc_ctl.vbi_buf != NULL) {
@@ -1065,7 +1071,7 @@ static int au0828_v4l2_close(struct file *filp)
* streaming.
*
* On most USB devices like au0828 the tuner can
- * be safely put in sleep stare here if ALSA isn't
+ * be safely put in sleep state here if ALSA isn't
* streaming. Exceptions are some very old USB tuner
* models such as em28xx-based WinTV USB2 which have
* a separate audio output jack. The devices that have
@@ -1074,7 +1080,7 @@ static int au0828_v4l2_close(struct file *filp)
* so the s_power callback are silently ignored.
* So, the current logic here does the following:
* Disable (put tuner to sleep) when
- * - ALSA and DVB aren't not streaming;
+ * - ALSA and DVB aren't streaming.
* - the last V4L2 file handler is closed.
*
* FIXME:
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 425c35d16057..b47ecc9affd8 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -31,6 +31,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/media-device.h>
+#include <media/media-dev-allocator.h>
/* DVB */
#include <media/demux.h>
@@ -283,9 +284,12 @@ struct au0828_dev {
struct media_entity_notify entity_notify;
struct media_entity *tuner;
struct media_link *active_link;
- struct media_entity *active_link_owner;
struct media_entity *active_source;
struct media_entity *active_sink;
+ struct media_entity *active_link_owner;
+ struct media_entity *active_link_user;
+ struct media_pipeline *active_link_user_pipe;
+ bool active_link_shared;
#endif
};
diff --git a/drivers/media/usb/cpia2/Kconfig b/drivers/media/usb/cpia2/Kconfig
index 66e9283f5993..7029a04f3ffd 100644
--- a/drivers/media/usb/cpia2/Kconfig
+++ b/drivers/media/usb/cpia2/Kconfig
@@ -1,7 +1,7 @@
config VIDEO_CPIA2
tristate "CPiA2 Video For Linux"
depends on VIDEO_DEV && USB && VIDEO_V4L2
- ---help---
+ help
This is the video4linux driver for cameras based on Vision's CPiA2
(Colour Processor Interface ASIC), such as the Digital Blue QX5
Microscope. If you have one of these cameras, say Y here
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 95c0bd4a19dc..45caf78119c4 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -1240,8 +1240,7 @@ static int __init cpia2_init(void)
LOG("%s v%s\n",
ABOUT, CPIA_VERSION);
check_parameters();
- cpia2_usb_init();
- return 0;
+ return cpia2_usb_init();
}
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 9e5b3e7c3ef5..9262d0d7439a 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -7,7 +7,7 @@ config VIDEO_CX231XX
select VIDEO_CX25840
select VIDEO_CX2341X
- ---help---
+ help
This is a video4linux driver for Conexant 231xx USB based TV cards.
To compile this driver as a module, choose M here: the
@@ -18,7 +18,7 @@ config VIDEO_CX231XX_RC
depends on RC_CORE=y || RC_CORE=VIDEO_CX231XX
depends on VIDEO_CX231XX
default y
- ---help---
+ help
cx231xx hardware has a builtin RX/TX support. However, a few
designs opted to not use it, but, instead, some other hardware.
This module enables the usage of those other hardware, like the
@@ -31,7 +31,7 @@ config VIDEO_CX231XX_ALSA
depends on VIDEO_CX231XX && SND
select SND_PCM
- ---help---
+ help
This is an ALSA driver for Cx231xx USB based TV cards.
To compile this driver as a module, choose M here: the
@@ -52,6 +52,6 @@ config VIDEO_CX231XX_DVB
select DVB_MN88473 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_R820T if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This adds support for DVB cards based on the
Conexant cx231xx chips.
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 80d3bd3a0f24..1b7f1af399fb 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -846,6 +846,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
state->af9033_config[1].adc_multiplier = AF9033_ADC_MULTIPLIER_2X;
state->af9033_config[0].ts_mode = AF9033_TS_MODE_USB;
state->af9033_config[1].ts_mode = AF9033_TS_MODE_SERIAL;
+ state->it930x_addresses = 0;
if (state->chip_type == 0x9135) {
/* feed clock for integrated RF tuner */
@@ -872,6 +873,10 @@ static int af9035_read_config(struct dvb_usb_device *d)
* IT930x is an USB bridge, only single demod-single tuner
* configurations seen so far.
*/
+ if ((le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_AVERMEDIA) &&
+ (le16_to_cpu(d->udev->descriptor.idProduct) == USB_PID_AVERMEDIA_TD310)) {
+ state->it930x_addresses = 1;
+ }
return 0;
}
@@ -1218,6 +1223,48 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
+ /* I2C master bus 2 clock speed 300k */
+ ret = af9035_wr_reg(d, 0x00f6a7, 0x07);
+ if (ret < 0)
+ goto err;
+
+ /* I2C master bus 1,3 clock speed 300k */
+ ret = af9035_wr_reg(d, 0x00f103, 0x07);
+ if (ret < 0)
+ goto err;
+
+ /* set gpio11 low */
+ ret = af9035_wr_reg_mask(d, 0xd8d4, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8d5, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8d3, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* Tuner enable using gpiot2_en, gpiot2_on and gpiot2_o (reset) */
+ ret = af9035_wr_reg_mask(d, 0xd8b8, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8b9, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8b7, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ msleep(200);
+
+ ret = af9035_wr_reg_mask(d, 0xd8b7, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
memset(&si2168_config, 0, sizeof(si2168_config));
si2168_config.i2c_adapter = &adapter;
si2168_config.fe = &adap->fe[0];
@@ -1225,8 +1272,9 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
state->af9033_config[adap->id].fe = &adap->fe[0];
state->af9033_config[adap->id].ops = &state->ops;
- ret = af9035_add_i2c_dev(d, "si2168", 0x67, &si2168_config,
- &d->i2c_adap);
+ ret = af9035_add_i2c_dev(d, "si2168",
+ it930x_addresses_table[state->it930x_addresses].frontend_i2c_addr,
+ &si2168_config, &d->i2c_adap);
if (ret)
goto err;
@@ -1575,54 +1623,12 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
- /* I2C master bus 2 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f6a7, 0x07);
- if (ret < 0)
- goto err;
-
- /* I2C master bus 1,3 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f103, 0x07);
- if (ret < 0)
- goto err;
-
- /* set gpio11 low */
- ret = af9035_wr_reg_mask(d, 0xd8d4, 0x01, 0x01);
- if (ret < 0)
- goto err;
-
- ret = af9035_wr_reg_mask(d, 0xd8d5, 0x01, 0x01);
- if (ret < 0)
- goto err;
-
- ret = af9035_wr_reg_mask(d, 0xd8d3, 0x01, 0x01);
- if (ret < 0)
- goto err;
-
- /* Tuner enable using gpiot2_en, gpiot2_on and gpiot2_o (reset) */
- ret = af9035_wr_reg_mask(d, 0xd8b8, 0x01, 0x01);
- if (ret < 0)
- goto err;
-
- ret = af9035_wr_reg_mask(d, 0xd8b9, 0x01, 0x01);
- if (ret < 0)
- goto err;
-
- ret = af9035_wr_reg_mask(d, 0xd8b7, 0x00, 0x01);
- if (ret < 0)
- goto err;
-
- msleep(200);
-
- ret = af9035_wr_reg_mask(d, 0xd8b7, 0x01, 0x01);
- if (ret < 0)
- goto err;
-
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = adap->fe[0];
- si2157_config.if_port = 1;
- ret = af9035_add_i2c_dev(d, "si2157", 0x63,
- &si2157_config, state->i2c_adapter_demod);
-
+ si2157_config.if_port = it930x_addresses_table[state->it930x_addresses].tuner_if_port;
+ ret = af9035_add_i2c_dev(d, "si2157",
+ it930x_addresses_table[state->it930x_addresses].tuner_i2c_addr,
+ &si2157_config, state->i2c_adapter_demod);
if (ret)
goto err;
@@ -2128,6 +2134,8 @@ static const struct usb_device_id af9035_id_table[] = {
/* IT930x devices */
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9303,
&it930x_props, "ITE 9303 Generic", NULL) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD310,
+ &it930x_props, "AVerMedia TD310 DVB-T2", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index a76e6bf0ab1e..bc41c16f9727 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -69,6 +69,7 @@ struct state {
u8 dual_mode:1;
u8 no_read:1;
u8 af9033_i2c_addr[2];
+ u8 it930x_addresses;
struct af9033_config af9033_config[2];
struct af9033_ops ops;
#define AF9035_I2C_CLIENT_MAX 4
@@ -77,6 +78,17 @@ struct state {
struct platform_device *platform_device_tuner[2];
};
+struct address_table {
+ u8 frontend_i2c_addr;
+ u8 tuner_i2c_addr;
+ u8 tuner_if_port;
+};
+
+static const struct address_table it930x_addresses_table[] = {
+ { 0x67, 0x63, 1 },
+ { 0x64, 0x60, 0 },
+};
+
static const u32 clock_lut_af9035[] = {
20480000, /* FPGA */
16384000, /* 16.38 MHz */
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index e28bd8836751..ae0814dd202a 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -615,16 +615,18 @@ static int dvbsky_init(struct dvb_usb_device *d)
return 0;
}
-static void dvbsky_exit(struct dvb_usb_device *d)
+static int dvbsky_frontend_detach(struct dvb_usb_adapter *adap)
{
+ struct dvb_usb_device *d = adap_to_d(adap);
struct dvbsky_state *state = d_to_priv(d);
- struct dvb_usb_adapter *adap = &d->adapter[0];
+
+ dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
dvb_module_release(state->i2c_client_tuner);
dvb_module_release(state->i2c_client_demod);
dvb_module_release(state->i2c_client_ci);
- adap->fe[0] = NULL;
+ return 0;
}
/* DVB USB Driver stuff */
@@ -640,11 +642,11 @@ static struct dvb_usb_device_properties dvbsky_s960_props = {
.i2c_algo = &dvbsky_i2c_algo,
.frontend_attach = dvbsky_s960_attach,
+ .frontend_detach = dvbsky_frontend_detach,
.init = dvbsky_init,
.get_rc_config = dvbsky_get_rc_config,
.streaming_ctrl = dvbsky_streaming_ctrl,
.identify_state = dvbsky_identify_state,
- .exit = dvbsky_exit,
.read_mac_address = dvbsky_read_mac_addr,
.num_adapters = 1,
@@ -667,11 +669,11 @@ static struct dvb_usb_device_properties dvbsky_s960c_props = {
.i2c_algo = &dvbsky_i2c_algo,
.frontend_attach = dvbsky_s960c_attach,
+ .frontend_detach = dvbsky_frontend_detach,
.init = dvbsky_init,
.get_rc_config = dvbsky_get_rc_config,
.streaming_ctrl = dvbsky_streaming_ctrl,
.identify_state = dvbsky_identify_state,
- .exit = dvbsky_exit,
.read_mac_address = dvbsky_read_mac_addr,
.num_adapters = 1,
@@ -694,11 +696,11 @@ static struct dvb_usb_device_properties dvbsky_t680c_props = {
.i2c_algo = &dvbsky_i2c_algo,
.frontend_attach = dvbsky_t680c_attach,
+ .frontend_detach = dvbsky_frontend_detach,
.init = dvbsky_init,
.get_rc_config = dvbsky_get_rc_config,
.streaming_ctrl = dvbsky_streaming_ctrl,
.identify_state = dvbsky_identify_state,
- .exit = dvbsky_exit,
.read_mac_address = dvbsky_read_mac_addr,
.num_adapters = 1,
@@ -721,11 +723,11 @@ static struct dvb_usb_device_properties dvbsky_t330_props = {
.i2c_algo = &dvbsky_i2c_algo,
.frontend_attach = dvbsky_t330_attach,
+ .frontend_detach = dvbsky_frontend_detach,
.init = dvbsky_init,
.get_rc_config = dvbsky_get_rc_config,
.streaming_ctrl = dvbsky_streaming_ctrl,
.identify_state = dvbsky_identify_state,
- .exit = dvbsky_exit,
.read_mac_address = dvbsky_read_mac_addr,
.num_adapters = 1,
@@ -748,11 +750,11 @@ static struct dvb_usb_device_properties mygica_t230c_props = {
.i2c_algo = &dvbsky_i2c_algo,
.frontend_attach = dvbsky_mygica_t230c_attach,
+ .frontend_detach = dvbsky_frontend_detach,
.init = dvbsky_init,
.get_rc_config = dvbsky_get_rc_config,
.streaming_ctrl = dvbsky_streaming_ctrl,
.identify_state = dvbsky_identify_state,
- .exit = dvbsky_exit,
.num_adapters = 1,
.adapter = {
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index 451e076525d3..639da7e24066 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -13,7 +13,7 @@ config VIDEO_EM28XX_V4L2
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
- ---help---
+ help
This is a video4linux driver for Empia 28xx based TV cards.
To compile this driver as a module, choose M here: the
@@ -23,7 +23,7 @@ config VIDEO_EM28XX_ALSA
depends on VIDEO_EM28XX && SND
select SND_PCM
tristate "Empia EM28xx ALSA audio module"
- ---help---
+ help
This is an ALSA driver for some Empia 28xx based TV cards.
This is not required for em2800/em2820/em2821 boards. However,
@@ -66,7 +66,7 @@ config VIDEO_EM28XX_DVB
select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This adds support for DVB cards based on the
Empiatech em28xx chips.
@@ -76,5 +76,5 @@ config VIDEO_EM28XX_RC
depends on VIDEO_EM28XX
depends on !(RC_CORE=m && VIDEO_EM28XX=y)
default VIDEO_EM28XX
- ---help---
+ help
Enables Remote Controller support on em28xx driver.
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index f84a1208d5d3..d85ea1af6aa1 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -499,7 +499,7 @@ static int em28xx_probe_i2c_ir(struct em28xx *dev)
* at address 0x18, so if that address is needed for another board in
* the future, please put it after 0x1f.
*/
- const unsigned short addr_list[] = {
+ static const unsigned short addr_list[] = {
0x1f, 0x30, 0x47, I2C_CLIENT_END
};
diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
index af1d02430931..beab257c092f 100644
--- a/drivers/media/usb/go7007/Kconfig
+++ b/drivers/media/usb/go7007/Kconfig
@@ -13,7 +13,7 @@ config VIDEO_GO7007
select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This is a video4linux driver for the WIS GO7007 MPEG
encoder chip.
@@ -23,7 +23,7 @@ config VIDEO_GO7007
config VIDEO_GO7007_USB
tristate "WIS GO7007 USB support"
depends on VIDEO_GO7007 && USB
- ---help---
+ help
This is a video4linux driver for the WIS GO7007 MPEG
encoder chip over USB.
@@ -34,7 +34,7 @@ config VIDEO_GO7007_LOADER
tristate "WIS GO7007 Loader support"
depends on VIDEO_GO7007
default y
- ---help---
+ help
This is a go7007 firmware loader driver for the WIS GO7007
MPEG encoder chip over USB.
@@ -44,7 +44,7 @@ config VIDEO_GO7007_LOADER
config VIDEO_GO7007_USB_S2250_BOARD
tristate "Sensoray 2250/2251 support"
depends on VIDEO_GO7007_USB && USB
- ---help---
+ help
This is a video4linux driver for the Sensoray 2250/2251 device.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c
index 24f5b615dc7a..dfa9f899d0c2 100644
--- a/drivers/media/usb/go7007/go7007-fw.c
+++ b/drivers/media/usb/go7007/go7007-fw.c
@@ -1499,8 +1499,8 @@ static int modet_to_package(struct go7007 *go, __le16 *code, int space)
return cnt;
}
-static int do_special(struct go7007 *go, u16 type, __le16 *code, int space,
- int *framelen)
+static noinline_for_stack int do_special(struct go7007 *go, u16 type,
+ __le16 *code, int space, int *framelen)
{
switch (type) {
case SPECIAL_FRM_HEAD:
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index 19c6a0354ce0..abe98488be23 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -1132,7 +1132,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
usb->usbdev = usbdev;
usb_make_path(usbdev, go->bus_info, sizeof(go->bus_info));
go->board_id = id->driver_info;
- strncpy(go->name, name, sizeof(go->name));
+ strscpy(go->name, name, sizeof(go->name));
if (board->flags & GO7007_USB_EZUSB)
go->hpi_ops = &go7007_usb_ezusb_hpi_ops;
else
@@ -1198,7 +1198,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
go->board_id = GO7007_BOARDID_ENDURA;
usb->board = board = &board_endura;
go->board_info = &board->main_info;
- strncpy(go->name, "Pelco Endura",
+ strscpy(go->name, "Pelco Endura",
sizeof(go->name));
} else {
u16 channel;
@@ -1232,21 +1232,21 @@ static int go7007_usb_probe(struct usb_interface *intf,
case 1:
go->tuner_type = TUNER_SONY_BTF_PG472Z;
go->std = V4L2_STD_PAL;
- strncpy(go->name, "Plextor PX-TV402U-EU",
- sizeof(go->name));
+ strscpy(go->name, "Plextor PX-TV402U-EU",
+ sizeof(go->name));
break;
case 2:
go->tuner_type = TUNER_SONY_BTF_PK467Z;
go->std = V4L2_STD_NTSC_M_JP;
num_i2c_devs -= 2;
- strncpy(go->name, "Plextor PX-TV402U-JP",
- sizeof(go->name));
+ strscpy(go->name, "Plextor PX-TV402U-JP",
+ sizeof(go->name));
break;
case 3:
go->tuner_type = TUNER_SONY_BTF_PB463Z;
num_i2c_devs -= 2;
- strncpy(go->name, "Plextor PX-TV402U-NA",
- sizeof(go->name));
+ strscpy(go->name, "Plextor PX-TV402U-NA",
+ sizeof(go->name));
break;
default:
pr_debug("unable to detect tuner type!\n");
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index 7a2781fa83e7..bebdfcecf600 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -327,7 +327,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt->flags = V4L2_FMT_FLAG_COMPRESSED;
- strncpy(fmt->description, desc, sizeof(fmt->description));
+ strscpy(fmt->description, desc, sizeof(fmt->description));
return 0;
}
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index 088566e88467..0e6f36cb46e6 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -4,7 +4,7 @@ menuconfig USB_GSPCA
depends on INPUT || INPUT=n
select VIDEOBUF2_VMALLOC
default m
- ---help---
+ help
Say Y here if you want to enable selecting webcams based
on the GSPCA framework.
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index ac70b36d67b7..4d7517411cc2 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -294,7 +294,7 @@ static void fill_frame(struct gspca_dev *gspca_dev,
/* check the packet status and length */
st = urb->iso_frame_desc[i].status;
if (st) {
- pr_err("ISOC data error: [%d] len=%d, status=%d\n",
+ gspca_dbg(gspca_dev, D_PACK, "ISOC data error: [%d] len=%d, status=%d\n",
i, len, st);
gspca_dev->last_packet_type = DISCARD_PACKET;
continue;
@@ -314,6 +314,8 @@ static void fill_frame(struct gspca_dev *gspca_dev,
}
resubmit:
+ if (!gspca_dev->streaming)
+ return;
/* resubmit the URB */
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
@@ -330,7 +332,7 @@ static void isoc_irq(struct urb *urb)
struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
gspca_dbg(gspca_dev, D_PACK, "isoc irq\n");
- if (!vb2_start_streaming_called(&gspca_dev->queue))
+ if (!gspca_dev->streaming)
return;
fill_frame(gspca_dev, urb);
}
@@ -344,7 +346,7 @@ static void bulk_irq(struct urb *urb)
int st;
gspca_dbg(gspca_dev, D_PACK, "bulk irq\n");
- if (!vb2_start_streaming_called(&gspca_dev->queue))
+ if (!gspca_dev->streaming)
return;
switch (urb->status) {
case 0:
@@ -367,6 +369,8 @@ static void bulk_irq(struct urb *urb)
urb->actual_length);
resubmit:
+ if (!gspca_dev->streaming)
+ return;
/* resubmit the URB */
if (gspca_dev->cam.bulk_nurbs != 0) {
st = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1638,6 +1642,8 @@ void gspca_disconnect(struct usb_interface *intf)
mutex_lock(&gspca_dev->usb_lock);
gspca_dev->present = false;
+ destroy_urbs(gspca_dev);
+ gspca_input_destroy_urb(gspca_dev);
vb2_queue_error(&gspca_dev->queue);
diff --git a/drivers/media/usb/hackrf/Kconfig b/drivers/media/usb/hackrf/Kconfig
index 937e6f5c1e8e..072e186018f5 100644
--- a/drivers/media/usb/hackrf/Kconfig
+++ b/drivers/media/usb/hackrf/Kconfig
@@ -2,7 +2,7 @@ config USB_HACKRF
tristate "HackRF"
depends on VIDEO_V4L2
select VIDEOBUF2_VMALLOC
- ---help---
+ help
This is a video4linux2 driver for HackRF SDR device.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/usb/hdpvr/Kconfig b/drivers/media/usb/hdpvr/Kconfig
index d73d9a1952b4..9e78c0c32651 100644
--- a/drivers/media/usb/hdpvr/Kconfig
+++ b/drivers/media/usb/hdpvr/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_HDPVR
tristate "Hauppauge HD PVR support"
depends on VIDEO_DEV && VIDEO_V4L2
- ---help---
+ help
This is a video4linux driver for Hauppauge's HD PVR USB device.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index e082086428a4..3804aa3fb50f 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -769,8 +769,7 @@ static int vidioc_enum_input(struct file *file, void *_fh, struct v4l2_input *i)
i->type = V4L2_INPUT_TYPE_CAMERA;
- strncpy(i->name, iname[n], sizeof(i->name) - 1);
- i->name[sizeof(i->name) - 1] = '\0';
+ strscpy(i->name, iname[n], sizeof(i->name));
i->audioset = 1<<HDPVR_RCA_FRONT | 1<<HDPVR_RCA_BACK | 1<<HDPVR_SPDIF;
@@ -841,8 +840,7 @@ static int vidioc_enumaudio(struct file *file, void *priv,
audio->capability = V4L2_AUDCAP_STEREO;
- strncpy(audio->name, audio_iname[n], sizeof(audio->name) - 1);
- audio->name[sizeof(audio->name) - 1] = '\0';
+ strscpy(audio->name, audio_iname[n], sizeof(audio->name));
return 0;
}
@@ -874,7 +872,6 @@ static int vidioc_g_audio(struct file *file, void *private_data,
audio->index = dev->options.audio_input;
audio->capability = V4L2_AUDCAP_STEREO;
strscpy(audio->name, audio_iname[audio->index], sizeof(audio->name));
- audio->name[sizeof(audio->name) - 1] = '\0';
return 0;
}
@@ -991,7 +988,8 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *private_data,
return -EINVAL;
f->flags = V4L2_FMT_FLAG_COMPRESSED;
- strncpy(f->description, "MPEG2-TS with AVC/AAC streams", 32);
+ strscpy(f->description, "MPEG2-TS with AVC/AAC streams",
+ sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
return 0;
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
index 18ead44824ba..11f1b75d3efd 100644
--- a/drivers/media/usb/pulse8-cec/Kconfig
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -4,7 +4,7 @@ config USB_PULSE8_CEC
select CEC_CORE
select SERIO
select SERIO_SERPORT
- ---help---
+ help
This is a cec driver for the Pulse Eight HDMI CEC device.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index b085b14f3f87..ea9ee74fa336 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -435,7 +435,7 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
if (err)
return err;
- strncpy(log_addrs->osd_name, data, 13);
+ strscpy(log_addrs->osd_name, data, sizeof(log_addrs->osd_name));
dev_dbg(pulse8->dev, "OSD name: %s\n", log_addrs->osd_name);
return 0;
@@ -566,7 +566,7 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
char *osd_str = cmd + 1;
cmd[0] = MSGCODE_SET_OSD_NAME;
- strncpy(cmd + 1, adap->log_addrs.osd_name, 13);
+ strscpy(cmd + 1, adap->log_addrs.osd_name, sizeof(cmd) - 1);
if (osd_len < 4) {
memset(osd_str + osd_len, ' ', 4 - osd_len);
osd_len = 4;
diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
index 1ad913fc30bf..ac6612cf1bec 100644
--- a/drivers/media/usb/pvrusb2/Kconfig
+++ b/drivers/media/usb/pvrusb2/Kconfig
@@ -9,7 +9,7 @@ config VIDEO_PVRUSB2
select VIDEO_MSP3400
select VIDEO_WM8775
select VIDEO_CS53L32A
- ---help---
+ help
This is a video4linux driver for Conexant 23416 based
usb2 personal video recorder devices.
@@ -20,7 +20,7 @@ config VIDEO_PVRUSB2_SYSFS
bool "pvrusb2 sysfs support"
default y
depends on VIDEO_PVRUSB2 && SYSFS
- ---help---
+ help
This option enables the operation of a sysfs based
interface for query and control of the pvrusb2 driver.
@@ -43,7 +43,7 @@ config VIDEO_PVRUSB2_DVB
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA8290 if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
This option enables a DVB interface for the pvrusb2 driver.
If your device does not support digital television, this
feature will have no affect on the driver's operation.
@@ -53,7 +53,7 @@ config VIDEO_PVRUSB2_DVB
config VIDEO_PVRUSB2_DEBUGIFC
bool "pvrusb2 debug interface"
depends on VIDEO_PVRUSB2_SYSFS
- ---help---
+ help
This option enables the inclusion of a debug interface
in the pvrusb2 driver, hosted through sysfs.
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 446a999dd2ce..816c85786c2a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -666,6 +666,8 @@ static int ctrl_get_input(struct pvr2_ctrl *cptr,int *vp)
static int ctrl_check_input(struct pvr2_ctrl *cptr,int v)
{
+ if (v < 0 || v > PVR2_CVAL_INPUT_MAX)
+ return 0;
return ((1 << v) & cptr->hdw->input_allowed_mask) != 0;
}
@@ -2459,9 +2461,8 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
if (!(qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)) {
ciptr->set_value = ctrl_cx2341x_set;
}
- strncpy(hdw->mpeg_ctrl_info[idx].desc,qctrl.name,
- PVR2_CTLD_INFO_DESC_SIZE);
- hdw->mpeg_ctrl_info[idx].desc[PVR2_CTLD_INFO_DESC_SIZE-1] = 0;
+ strscpy(hdw->mpeg_ctrl_info[idx].desc, qctrl.name,
+ sizeof(hdw->mpeg_ctrl_info[idx].desc));
ciptr->default_value = qctrl.default_value;
switch (qctrl.type) {
default:
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
index 25648add77e5..bd2b7a67b732 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
@@ -50,6 +50,7 @@
#define PVR2_CVAL_INPUT_COMPOSITE 2
#define PVR2_CVAL_INPUT_SVIDEO 3
#define PVR2_CVAL_INPUT_RADIO 4
+#define PVR2_CVAL_INPUT_MAX PVR2_CVAL_INPUT_RADIO
enum pvr2_config {
pvr2_config_empty, /* No configuration */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 08d5b7aa3537..cb6668580d77 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -284,7 +284,7 @@ static int pvr2_enumaudio(struct file *file, void *priv, struct v4l2_audio *vin)
if (vin->index > 0)
return -EINVAL;
- strncpy(vin->name, "PVRUSB2 Audio", 14);
+ strscpy(vin->name, "PVRUSB2 Audio", sizeof(vin->name));
vin->capability = V4L2_AUDCAP_STEREO;
return 0;
}
@@ -293,7 +293,7 @@ static int pvr2_g_audio(struct file *file, void *priv, struct v4l2_audio *vin)
{
/* pkt: FIXME: see above comment (VIDIOC_ENUMAUDIO) */
vin->index = 0;
- strncpy(vin->name, "PVRUSB2 Audio", 14);
+ strscpy(vin->name, "PVRUSB2 Audio", sizeof(vin->name));
vin->capability = V4L2_AUDCAP_STEREO;
return 0;
}
diff --git a/drivers/media/usb/pwc/Kconfig b/drivers/media/usb/pwc/Kconfig
index d63d0a850035..5f6d91edca41 100644
--- a/drivers/media/usb/pwc/Kconfig
+++ b/drivers/media/usb/pwc/Kconfig
@@ -2,7 +2,7 @@ config USB_PWC
tristate "USB Philips Cameras"
depends on VIDEO_V4L2
select VIDEOBUF2_VMALLOC
- ---help---
+ help
Say Y or M here if you want to use one of these Philips & OEM
webcams:
* Philips PCA645, PCA646
@@ -41,7 +41,7 @@ config USB_PWC_INPUT_EVDEV
bool "USB Philips Cameras input events device support"
default y
depends on USB_PWC && (USB_PWC=INPUT || INPUT=y)
- ---help---
+ help
This option makes USB Philips cameras register the snapshot button as
an input device to report button events.
diff --git a/drivers/media/usb/pwc/pwc-ctrl.c b/drivers/media/usb/pwc/pwc-ctrl.c
index 655cef39eb3d..b681a184ef87 100644
--- a/drivers/media/usb/pwc/pwc-ctrl.c
+++ b/drivers/media/usb/pwc/pwc-ctrl.c
@@ -242,14 +242,14 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
fps = (frames / 5) - 1;
/* Find a supported framerate with progressively higher compression */
- pChoose = NULL;
- while (*compression <= 3) {
+ do {
pChoose = &Timon_table[size][fps][*compression];
if (pChoose->alternate != 0)
break;
(*compression)++;
- }
- if (pChoose == NULL || pChoose->alternate == 0)
+ } while (*compression <= 3);
+
+ if (pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
if (send_to_cam)
@@ -279,7 +279,7 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
int frames, int *compression, int send_to_cam)
{
- const struct Kiara_table_entry *pChoose = NULL;
+ const struct Kiara_table_entry *pChoose;
int fps, ret = 0;
if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
@@ -293,13 +293,14 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
fps = (frames / 5) - 1;
/* Find a supported framerate with progressively higher compression */
- while (*compression <= 3) {
+ do {
pChoose = &Kiara_table[size][fps][*compression];
if (pChoose->alternate != 0)
break;
(*compression)++;
- }
- if (pChoose == NULL || pChoose->alternate == 0)
+ } while (*compression <= 3);
+
+ if (pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
/* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/usb/rainshadow-cec/Kconfig
index 030ef01b1ff0..6b00be618db8 100644
--- a/drivers/media/usb/rainshadow-cec/Kconfig
+++ b/drivers/media/usb/rainshadow-cec/Kconfig
@@ -4,7 +4,7 @@ config USB_RAINSHADOW_CEC
select CEC_CORE
select SERIO
select SERIO_SERPORT
- ---help---
+ help
This is a cec driver for the RainShadow Tech HDMI CEC device.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/usb/siano/Kconfig b/drivers/media/usb/siano/Kconfig
index d37b742d4f7a..cc5e5aa3c93a 100644
--- a/drivers/media/usb/siano/Kconfig
+++ b/drivers/media/usb/siano/Kconfig
@@ -8,6 +8,6 @@ config SMS_USB_DRV
depends on !RC_CORE || RC_CORE
select MEDIA_COMMON_OPTIONS
select SMS_SIANO_MDTV
- ---help---
+ help
Choose if you would like to have Siano's support for USB interface
diff --git a/drivers/media/usb/stk1160/Kconfig b/drivers/media/usb/stk1160/Kconfig
index 425ed00e2599..03426e4437ea 100644
--- a/drivers/media/usb/stk1160/Kconfig
+++ b/drivers/media/usb/stk1160/Kconfig
@@ -2,7 +2,7 @@ config VIDEO_STK1160_COMMON
tristate "STK1160 USB video capture support"
depends on VIDEO_DEV && I2C
- ---help---
+ help
This is a video4linux driver for STK1160 based video capture devices.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/usb/stkwebcam/Kconfig b/drivers/media/usb/stkwebcam/Kconfig
index a6a00aa4fce6..ea9e04b3caaf 100644
--- a/drivers/media/usb/stkwebcam/Kconfig
+++ b/drivers/media/usb/stkwebcam/Kconfig
@@ -1,7 +1,7 @@
config USB_STKWEBCAM
tristate "USB Syntek DC1125 Camera support"
depends on VIDEO_V4L2
- ---help---
+ help
Say Y here if you want to use this type of camera.
Supported devices are typically found in some Asus laptops,
with USB id 174f:a311 and 05e1:0501. Other Syntek cameras
diff --git a/drivers/media/usb/tm6000/Kconfig b/drivers/media/usb/tm6000/Kconfig
index a43b77abd931..321ae691f4d9 100644
--- a/drivers/media/usb/tm6000/Kconfig
+++ b/drivers/media/usb/tm6000/Kconfig
@@ -18,7 +18,7 @@ config VIDEO_TM6000_ALSA
tristate "TV Master TM5600/6000/6010 audio support"
depends on VIDEO_TM6000 && SND
select SND_PCM
- ---help---
+ help
This is a video4linux driver for direct (DMA) audio for
TM5600/TM6000/TM6010 USB Devices.
@@ -29,5 +29,5 @@ config VIDEO_TM6000_DVB
tristate "DVB Support for tm6000 based TV cards"
depends on VIDEO_TM6000 && DVB_CORE && USB
select DVB_ZL10353
- ---help---
+ help
This adds support for DVB cards based on the tm5600/tm6000 chip.
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig
index 14a0941fa0d0..2b4ac0848469 100644
--- a/drivers/media/usb/usbtv/Kconfig
+++ b/drivers/media/usb/usbtv/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_USBTV
select SND_PCM
select VIDEOBUF2_VMALLOC
- ---help---
+ help
This is a video4linux2 driver for USBTV007 based video capture devices.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/usb/usbvision/Kconfig b/drivers/media/usb/usbvision/Kconfig
index 6b6afc5d8f7e..7aa080cb9884 100644
--- a/drivers/media/usb/usbvision/Kconfig
+++ b/drivers/media/usb/usbvision/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_USBVISION
depends on I2C && VIDEO_V4L2
select VIDEO_TUNER
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
- ---help---
+ help
There are more than 50 different USB video devices based on
NT1003/1004/1005 USB Bridges. This driver enables using those
devices.
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index 92d166bf8c12..abc4eed832a3 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -2302,6 +2302,9 @@ int usbvision_init_isoc(struct usb_usbvision *usbvision)
sb_size,
GFP_KERNEL,
&urb->transfer_dma);
+ if (!usbvision->sbuf[buf_idx].data)
+ return -ENOMEM;
+
urb->dev = dev;
urb->context = usbvision;
urb->pipe = usb_rcvisocpipe(dev, usbvision->video_endp);
diff --git a/drivers/media/usb/uvc/Kconfig b/drivers/media/usb/uvc/Kconfig
index 6ed85efabcaa..94937d0cc2e3 100644
--- a/drivers/media/usb/uvc/Kconfig
+++ b/drivers/media/usb/uvc/Kconfig
@@ -2,7 +2,7 @@ config USB_VIDEO_CLASS
tristate "USB Video Class (UVC)"
depends on VIDEO_V4L2
select VIDEOBUF2_VMALLOC
- ---help---
+ help
Support for the USB Video Class (UVC). Currently only video
input devices, such as webcams, are supported.
@@ -13,7 +13,7 @@ config USB_VIDEO_CLASS_INPUT_EVDEV
default y
depends on USB_VIDEO_CLASS
depends on USB_VIDEO_CLASS=INPUT || INPUT=y
- ---help---
+ help
This option makes USB Video Class devices register an input device
to report button events.
diff --git a/drivers/media/usb/zr364xx/Kconfig b/drivers/media/usb/zr364xx/Kconfig
index ac429bca70e8..979b1d4f3f68 100644
--- a/drivers/media/usb/zr364xx/Kconfig
+++ b/drivers/media/usb/zr364xx/Kconfig
@@ -3,7 +3,7 @@ config USB_ZR364XX
depends on VIDEO_V4L2
select VIDEOBUF_GEN
select VIDEOBUF_VMALLOC
- ---help---
+ help
Say Y here if you want to connect this type of camera to your
computer's USB port.
See <file:Documentation/media/v4l-drivers/zr364xx.rst> for more info
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index c0940f5c69b4..8402096f7796 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -13,7 +13,7 @@ config VIDEO_V4L2
config VIDEO_ADV_DEBUG
bool "Enable advanced debug functionality on V4L2 drivers"
default n
- ---help---
+ help
Say Y here to enable advanced debugging functionality on some
V4L devices.
In doubt, say N.
@@ -21,7 +21,7 @@ config VIDEO_ADV_DEBUG
config VIDEO_FIXED_MINOR_RANGES
bool "Enable old-style fixed minor ranges on drivers/video devices"
default n
- ---help---
+ help
Say Y here to enable the old-style fixed-range minor assignments.
Only useful if you rely on the old behavior and use mknod instead of udev.
@@ -33,7 +33,7 @@ config VIDEO_PCI_SKELETON
depends on SAMPLES
depends on VIDEO_V4L2 && VIDEOBUF2_CORE
depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
- ---help---
+ help
Enable build of the skeleton PCI driver, used as a reference
when developing new drivers.
@@ -51,7 +51,7 @@ config V4L2_FLASH_LED_CLASS
tristate "V4L2 flash API for LED flash class devices"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on LEDS_CLASS_FLASH
- ---help---
+ help
Say Y here to enable V4L2 flash API support for LED flash
class drivers.
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 663730f088cd..b5778b2ffa27 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -445,3 +445,189 @@ int v4l2_s_parm_cap(struct video_device *vdev,
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_s_parm_cap);
+
+const struct v4l2_format_info *v4l2_format_info(u32 format)
+{
+ static const struct v4l2_format_info formats[] = {
+ /* RGB formats */
+ { .format = V4L2_PIX_FMT_BGR24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XBGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XRGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_GREY, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ /* YUV packed formats */
+ { .format = V4L2_PIX_FMT_YUYV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVYU, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_UYVY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_VYUY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+
+ /* YUV planar formats */
+ { .format = V4L2_PIX_FMT_NV12, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV24, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV42, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_YUV410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YVU410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YUV411P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+
+ /* YUV planar formats, non contiguous variant */
+ { .format = V4L2_PIX_FMT_YUV420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_NV12M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+
+ /* Bayer RGB formats */
+ { .format = V4L2_PIX_FMT_SBGGR8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i)
+ if (formats[i].format == format)
+ return &formats[i];
+ return NULL;
+}
+EXPORT_SYMBOL(v4l2_format_info);
+
+static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane)
+{
+ if (!info->block_w[plane])
+ return 1;
+ return info->block_w[plane];
+}
+
+static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane)
+{
+ if (!info->block_h[plane])
+ return 1;
+ return info->block_h[plane];
+}
+
+int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
+ int pixelformat, int width, int height)
+{
+ const struct v4l2_format_info *info;
+ struct v4l2_plane_pix_format *plane;
+ int i;
+
+ info = v4l2_format_info(pixelformat);
+ if (!info)
+ return -EINVAL;
+
+ pixfmt->width = width;
+ pixfmt->height = height;
+ pixfmt->pixelformat = pixelformat;
+ pixfmt->num_planes = info->mem_planes;
+
+ if (info->mem_planes == 1) {
+ plane = &pixfmt->plane_fmt[0];
+ plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0];
+ plane->sizeimage = 0;
+
+ for (i = 0; i < info->comp_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
+ unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
+ unsigned int aligned_width;
+ unsigned int aligned_height;
+
+ aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
+ aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
+
+ plane->sizeimage += info->bpp[i] *
+ DIV_ROUND_UP(aligned_width, hdiv) *
+ DIV_ROUND_UP(aligned_height, vdiv);
+ }
+ } else {
+ for (i = 0; i < info->comp_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
+ unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
+ unsigned int aligned_width;
+ unsigned int aligned_height;
+
+ aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
+ aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
+
+ plane = &pixfmt->plane_fmt[i];
+ plane->bytesperline =
+ info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv);
+ plane->sizeimage =
+ plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
+
+int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, int pixelformat, int width, int height)
+{
+ const struct v4l2_format_info *info;
+ int i;
+
+ info = v4l2_format_info(pixelformat);
+ if (!info)
+ return -EINVAL;
+
+ /* Single planar API cannot be used for multi plane formats. */
+ if (info->mem_planes > 1)
+ return -EINVAL;
+
+ pixfmt->width = width;
+ pixfmt->height = height;
+ pixfmt->pixelformat = pixelformat;
+ pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0];
+ pixfmt->sizeimage = 0;
+
+ for (i = 0; i < info->comp_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
+ unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
+ unsigned int aligned_width;
+ unsigned int aligned_height;
+
+ aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
+ aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
+
+ pixfmt->sizeimage += info->bpp[i] *
+ DIV_ROUND_UP(aligned_width, hdiv) *
+ DIV_ROUND_UP(aligned_height, vdiv);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index b79d3bbd8350..89a1fe564675 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -828,6 +828,10 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
return "H264 Constrained Intra Pred";
case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
@@ -849,6 +853,9 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: return "MPEG-2 Slice Parameters";
case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: return "MPEG-2 Quantization Matrices";
+ case V4L2_CID_MPEG_VIDEO_FWHT_PARAMS: return "FWHT Stateless Parameters";
+ case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value";
+ case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value";
/* VPX controls */
case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
@@ -1303,6 +1310,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION:
*type = V4L2_CTRL_TYPE_MPEG2_QUANTIZATION;
break;
+ case V4L2_CID_MPEG_VIDEO_FWHT_PARAMS:
+ *type = V4L2_CTRL_TYPE_FWHT_PARAMS;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1599,7 +1609,7 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
case V4L2_CTRL_TYPE_INTEGER_MENU:
if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum)
return -ERANGE;
- if (ctrl->menu_skip_mask & (1 << ptr.p_s32[idx]))
+ if (ctrl->menu_skip_mask & (1ULL << ptr.p_s32[idx]))
return -EINVAL;
if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
ctrl->qmenu[ptr.p_s32[idx]][0] == '\0')
@@ -1669,6 +1679,9 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
return 0;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ return 0;
+
default:
return -EINVAL;
}
@@ -2249,6 +2262,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization);
break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_fwht_params);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -2918,7 +2934,7 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
return -EINVAL;
/* Use mask to see if this menu item should be skipped */
- if (ctrl->menu_skip_mask & (1 << i))
+ if (ctrl->menu_skip_mask & (1ULL << i))
return -EINVAL;
/* Empty menu items should also be skipped */
if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
@@ -3899,18 +3915,19 @@ void v4l2_ctrl_request_complete(struct media_request *req,
}
EXPORT_SYMBOL(v4l2_ctrl_request_complete);
-void v4l2_ctrl_request_setup(struct media_request *req,
+int v4l2_ctrl_request_setup(struct media_request *req,
struct v4l2_ctrl_handler *main_hdl)
{
struct media_request_object *obj;
struct v4l2_ctrl_handler *hdl;
struct v4l2_ctrl_ref *ref;
+ int ret = 0;
if (!req || !main_hdl)
- return;
+ return 0;
if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
- return;
+ return -EBUSY;
/*
* Note that it is valid if nothing was found. It means
@@ -3919,10 +3936,10 @@ void v4l2_ctrl_request_setup(struct media_request *req,
*/
obj = media_request_object_find(req, &req_ops, main_hdl);
if (!obj)
- return;
+ return 0;
if (obj->completed) {
media_request_object_put(obj);
- return;
+ return -EBUSY;
}
hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
@@ -3990,12 +4007,15 @@ void v4l2_ctrl_request_setup(struct media_request *req,
update_from_auto_cluster(master);
}
- try_or_set_cluster(NULL, master, true, 0);
-
+ ret = try_or_set_cluster(NULL, master, true, 0);
v4l2_ctrl_unlock(master);
+
+ if (ret)
+ break;
}
media_request_object_put(obj);
+ return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_request_setup);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index e0ddb9a52bd1..7cca0de1b730 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -216,10 +216,18 @@ error_module:
}
EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+static void v4l2_subdev_release(struct v4l2_subdev *sd)
+{
+ struct module *owner = !sd->owner_v4l2_dev ? sd->owner : NULL;
+
+ if (sd->internal_ops && sd->internal_ops->release)
+ sd->internal_ops->release(sd);
+ module_put(owner);
+}
+
static void v4l2_device_release_subdev_node(struct video_device *vdev)
{
- struct v4l2_subdev *sd = video_get_drvdata(vdev);
- sd->devnode = NULL;
+ v4l2_subdev_release(video_get_drvdata(vdev));
kfree(vdev);
}
@@ -318,8 +326,9 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
media_device_unregister_entity(&sd->entity);
}
#endif
- video_unregister_device(sd->devnode);
- if (!sd->owner_v4l2_dev)
- module_put(sd->owner);
+ if (sd->devnode)
+ video_unregister_device(sd->devnode);
+ else
+ v4l2_subdev_release(sd);
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 20571846e636..ea1ed88f9dc8 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -163,7 +163,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
}
if (use_default_lane_mapping)
- pr_debug("using default lane mapping\n");
+ pr_debug("no lane mapping given, using defaults\n");
}
rval = fwnode_property_read_u32_array(fwnode, "data-lanes", NULL, 0);
@@ -175,6 +175,10 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
num_data_lanes);
have_data_lanes = true;
+ if (use_default_lane_mapping) {
+ pr_debug("data-lanes property exists; disabling default mapping\n");
+ use_default_lane_mapping = false;
+ }
}
for (i = 0; i < num_data_lanes; i++) {
@@ -225,6 +229,10 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
if (bus_type == V4L2_MBUS_CSI2_DPHY ||
bus_type == V4L2_MBUS_CSI2_CPHY || lanes_used ||
have_clk_lane || (flags & ~V4L2_MBUS_CSI2_CONTINUOUS_CLOCK)) {
+ /* Only D-PHY has a clock lane. */
+ unsigned int dfl_data_lane_index =
+ bus_type == V4L2_MBUS_CSI2_DPHY;
+
bus->flags = flags;
if (bus_type == V4L2_MBUS_UNKNOWN)
vep->bus_type = V4L2_MBUS_CSI2_DPHY;
@@ -233,7 +241,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
if (use_default_lane_mapping) {
bus->clock_lane = 0;
for (i = 0; i < num_data_lanes; i++)
- bus->data_lanes[i] = 1 + i;
+ bus->data_lanes[i] = dfl_data_lane_index + i;
} else {
bus->clock_lane = clock_lane;
for (i = 0; i < num_data_lanes; i++)
@@ -820,7 +828,10 @@ error:
* underneath the fwnode identified by the previous tuple, etc. until you
* reached the fwnode you need.
*
- * An example with a graph, as defined in Documentation/acpi/dsd/graph.txt:
+ * THIS EXAMPLE EXISTS MERELY TO DOCUMENT THIS FUNCTION. DO NOT USE IT AS A
+ * REFERENCE IN HOW ACPI TABLES SHOULD BE WRITTEN!! See documentation under
+ * Documentation/acpi/dsd instead and especially graph.txt,
+ * data-node-references.txt and leds.txt .
*
* Scope (\_SB.PCI0.I2C2)
* {
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index f6d663934648..ac87c3e37280 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1337,6 +1337,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
+ case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break;
case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break;
@@ -1373,7 +1374,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
}
if (descr)
- WARN_ON(strscpy(fmt->description, descr, sz) >= sz);
+ WARN_ON(strscpy(fmt->description, descr, sz) < 0);
fmt->flags = flags;
}
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index f5f0d71ec745..d75815ab0d7b 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -18,6 +18,7 @@
#include <linux/ioctl.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/videodev2.h>
@@ -54,9 +55,6 @@ static int subdev_open(struct file *file)
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_subdev_fh *subdev_fh;
-#if defined(CONFIG_MEDIA_CONTROLLER)
- struct media_entity *entity = NULL;
-#endif
int ret;
subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
@@ -73,12 +71,15 @@ static int subdev_open(struct file *file)
v4l2_fh_add(&subdev_fh->vfh);
file->private_data = &subdev_fh->vfh;
#if defined(CONFIG_MEDIA_CONTROLLER)
- if (sd->v4l2_dev->mdev) {
- entity = media_entity_get(&sd->entity);
- if (!entity) {
+ if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
+ struct module *owner;
+
+ owner = sd->entity.graph_obj.mdev->dev->driver->owner;
+ if (!try_module_get(owner)) {
ret = -EBUSY;
goto err;
}
+ subdev_fh->owner = owner;
}
#endif
@@ -91,9 +92,7 @@ static int subdev_open(struct file *file)
return 0;
err:
-#if defined(CONFIG_MEDIA_CONTROLLER)
- media_entity_put(entity);
-#endif
+ module_put(subdev_fh->owner);
v4l2_fh_del(&subdev_fh->vfh);
v4l2_fh_exit(&subdev_fh->vfh);
subdev_fh_free(subdev_fh);
@@ -111,10 +110,7 @@ static int subdev_close(struct file *file)
if (sd->internal_ops && sd->internal_ops->close)
sd->internal_ops->close(sd, subdev_fh);
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (sd->v4l2_dev->mdev)
- media_entity_put(&sd->entity);
-#endif
+ module_put(subdev_fh->owner);
v4l2_fh_del(vfh);
v4l2_fh_exit(vfh);
subdev_fh_free(subdev_fh);
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 08929c087e27..870a2a526e0b 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -186,12 +186,12 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
- flags, dma->pages, NULL);
+ err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+ flags | FOLL_LONGTERM, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+ dprintk(1, "get_user_pages: err=%d [%d]\n", err,
dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 90161dec6fa5..91ae4eb0e913 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -28,6 +28,9 @@ ti-emif-sram-objs := ti-emif-pm.o ti-emif-sram-pm.o
AFLAGS_ti-emif-sram-pm.o :=-Wa,-march=armv7-a
-include drivers/memory/Makefile.asm-offsets
-
drivers/memory/ti-emif-sram-pm.o: include/generated/ti-emif-asm-offsets.h
+
+include/generated/ti-emif-asm-offsets.h: drivers/memory/emif-asm-offsets.s FORCE
+ $(call filechk,offsets,__TI_EMIF_ASM_OFFSETS_H__)
+
+targets += emif-asm-offsets.s
diff --git a/drivers/memory/Makefile.asm-offsets b/drivers/memory/Makefile.asm-offsets
deleted file mode 100644
index 0447e174c752..000000000000
--- a/drivers/memory/Makefile.asm-offsets
+++ /dev/null
@@ -1,4 +0,0 @@
-include/generated/ti-emif-asm-offsets.h: drivers/memory/emif-asm-offsets.s FORCE
- $(call filechk,offsets,__TI_EMIF_ASM_OFFSETS_H__)
-
-targets += emif-asm-offsets.s
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index c3748b414c27..0322df9dc249 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
struct atmel_ebi_dev_config {
int cs;
@@ -36,6 +37,7 @@ struct atmel_ebi_dev {
struct atmel_ebi_caps {
unsigned int available_cs;
unsigned int ebi_csa_offs;
+ const char *regmap_name;
void (*get_config)(struct atmel_ebi_dev *ebid,
struct atmel_ebi_dev_config *conf);
int (*xlate_config)(struct atmel_ebi_dev *ebid,
@@ -47,7 +49,7 @@ struct atmel_ebi_caps {
struct atmel_ebi {
struct clk *clk;
- struct regmap *matrix;
+ struct regmap *regmap;
struct {
struct regmap *regmap;
struct clk *clk;
@@ -357,7 +359,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np,
* one "atmel,smc-" property is present.
*/
if (ebi->caps->ebi_csa_offs && apply)
- regmap_update_bits(ebi->matrix,
+ regmap_update_bits(ebi->regmap,
ebi->caps->ebi_csa_offs,
BIT(cs), 0);
@@ -372,6 +374,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np,
static const struct atmel_ebi_caps at91sam9260_ebi_caps = {
.available_cs = 0xff,
.ebi_csa_offs = AT91SAM9260_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -380,6 +383,7 @@ static const struct atmel_ebi_caps at91sam9260_ebi_caps = {
static const struct atmel_ebi_caps at91sam9261_ebi_caps = {
.available_cs = 0xff,
.ebi_csa_offs = AT91SAM9261_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -388,6 +392,7 @@ static const struct atmel_ebi_caps at91sam9261_ebi_caps = {
static const struct atmel_ebi_caps at91sam9263_ebi0_caps = {
.available_cs = 0x3f,
.ebi_csa_offs = AT91SAM9263_MATRIX_EBI0CSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -396,6 +401,7 @@ static const struct atmel_ebi_caps at91sam9263_ebi0_caps = {
static const struct atmel_ebi_caps at91sam9263_ebi1_caps = {
.available_cs = 0x7,
.ebi_csa_offs = AT91SAM9263_MATRIX_EBI1CSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -404,6 +410,7 @@ static const struct atmel_ebi_caps at91sam9263_ebi1_caps = {
static const struct atmel_ebi_caps at91sam9rl_ebi_caps = {
.available_cs = 0x3f,
.ebi_csa_offs = AT91SAM9RL_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -412,6 +419,7 @@ static const struct atmel_ebi_caps at91sam9rl_ebi_caps = {
static const struct atmel_ebi_caps at91sam9g45_ebi_caps = {
.available_cs = 0x3f,
.ebi_csa_offs = AT91SAM9G45_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -420,6 +428,7 @@ static const struct atmel_ebi_caps at91sam9g45_ebi_caps = {
static const struct atmel_ebi_caps at91sam9x5_ebi_caps = {
.available_cs = 0x3f,
.ebi_csa_offs = AT91SAM9X5_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -432,6 +441,15 @@ static const struct atmel_ebi_caps sama5d3_ebi_caps = {
.apply_config = sama5_ebi_apply_config,
};
+static const struct atmel_ebi_caps sam9x60_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa_offs = AT91_SFR_CCFG_EBICSA,
+ .regmap_name = "microchip,sfr",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
static const struct of_device_id atmel_ebi_id_table[] = {
{
.compatible = "atmel,at91sam9260-ebi",
@@ -465,6 +483,10 @@ static const struct of_device_id atmel_ebi_id_table[] = {
.compatible = "atmel,sama5d3-ebi",
.data = &sama5d3_ebi_caps,
},
+ {
+ .compatible = "microchip,sam9x60-ebi",
+ .data = &sam9x60_ebi_caps,
+ },
{ /* sentinel */ }
};
@@ -543,13 +565,14 @@ static int atmel_ebi_probe(struct platform_device *pdev)
/*
* The sama5d3 does not provide an EBICSA register and thus does need
- * to access the matrix registers.
+ * to access it.
*/
if (ebi->caps->ebi_csa_offs) {
- ebi->matrix =
- syscon_regmap_lookup_by_phandle(np, "atmel,matrix");
- if (IS_ERR(ebi->matrix))
- return PTR_ERR(ebi->matrix);
+ ebi->regmap =
+ syscon_regmap_lookup_by_phandle(np,
+ ebi->caps->regmap_name);
+ if (IS_ERR(ebi->regmap))
+ return PTR_ERR(ebi->regmap);
}
ret = of_property_read_u32(np, "#address-cells", &val);
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index bcdca9fbef51..5733e8fe1aef 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -259,9 +259,11 @@ static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host,
case 3:
host->io_word[0] |= buf[off + 2] << 16;
host->io_pos++;
+ /* fall through */
case 2:
host->io_word[0] |= buf[off + 1] << 8;
host->io_pos++;
+ /* fall through */
case 1:
host->io_word[0] |= buf[off];
host->io_pos++;
@@ -368,7 +370,6 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
{
struct jmb38x_ms_host *host = memstick_priv(msh);
- unsigned char *data;
unsigned int data_len, cmd, t_val;
if (!(STATUS_HAS_MEDIA & readl(host->addr + STATUS))) {
@@ -400,8 +401,6 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
cmd |= TPC_WAIT_INT;
}
- data = host->req->data;
-
if (!no_dma)
host->cmd_flags |= DMA_DATA;
@@ -644,7 +643,6 @@ static int jmb38x_ms_reset(struct jmb38x_ms_host *host)
writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
- mmiowb();
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET_REQ
@@ -659,7 +657,6 @@ reset_next:
writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
- mmiowb();
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET
@@ -672,7 +669,6 @@ reset_next:
return -EIO;
reset_ok:
- mmiowb();
writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE);
writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
return 0;
@@ -1009,7 +1005,6 @@ static void jmb38x_ms_remove(struct pci_dev *dev)
tasklet_kill(&host->notify);
writel(0, host->addr + INT_SIGNAL_ENABLE);
writel(0, host->addr + INT_STATUS_ENABLE);
- mmiowb();
dev_dbg(&jm->pdev->dev, "interrupts off\n");
spin_lock_irqsave(&host->lock, flags);
if (host->req) {
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index bed205849d02..6b13ac56eb27 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -166,9 +166,11 @@ static unsigned int tifm_ms_write_data(struct tifm_ms *host,
case 3:
host->io_word |= buf[off + 2] << 16;
host->io_pos++;
+ /* fall through */
case 2:
host->io_word |= buf[off + 1] << 8;
host->io_pos++;
+ /* fall through */
case 1:
host->io_word |= buf[off];
host->io_pos++;
@@ -254,7 +256,6 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
static int tifm_ms_issue_cmd(struct tifm_ms *host)
{
struct tifm_dev *sock = host->dev;
- unsigned char *data;
unsigned int data_len, cmd, sys_param;
host->cmd_flags = 0;
@@ -263,8 +264,6 @@ static int tifm_ms_issue_cmd(struct tifm_ms *host)
host->io_word = 0;
host->cmd_flags = 0;
- data = host->req->data;
-
host->use_dma = !no_dma;
if (host->req->long_data) {
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index ba551d8dfba4..d8882b0a1338 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -642,7 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
freereq = 0;
if (event != MPI_EVENT_EVENT_CHANGE)
break;
- /* else: fall through */
+ /* fall through */
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 8d22d6134a89..f9ac22413000 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -565,7 +565,7 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
* TODO - this define is not in MPI spec yet,
* but they plan to set it to 0x21
*/
- if (event == 0x21 ) {
+ if (event == 0x21) {
ioc->aen_event_read_flag=1;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n",
ioc->name));
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 612cb5bc1333..6a79cd0ebe2b 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2928,27 +2928,27 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
u8 *tmp;
- smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
- if (le16_to_cpu(smprep->ResponseDataLength) !=
- sizeof(struct rep_manu_reply))
+ smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
+ if (le16_to_cpu(smprep->ResponseDataLength) !=
+ sizeof(struct rep_manu_reply))
goto out_free;
- manufacture_reply = data_out + sizeof(struct rep_manu_request);
- strncpy(edev->vendor_id, manufacture_reply->vendor_id,
- SAS_EXPANDER_VENDOR_ID_LEN);
- strncpy(edev->product_id, manufacture_reply->product_id,
- SAS_EXPANDER_PRODUCT_ID_LEN);
- strncpy(edev->product_rev, manufacture_reply->product_rev,
- SAS_EXPANDER_PRODUCT_REV_LEN);
- edev->level = manufacture_reply->sas_format;
- if (manufacture_reply->sas_format) {
- strncpy(edev->component_vendor_id,
- manufacture_reply->component_vendor_id,
+ manufacture_reply = data_out + sizeof(struct rep_manu_request);
+ strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strncpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strncpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format;
+ if (manufacture_reply->sas_format) {
+ strncpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
- tmp = (u8 *)&manufacture_reply->component_id;
- edev->component_id = tmp[0] << 8 | tmp[1];
- edev->component_revision_id =
- manufacture_reply->component_revision_id;
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
}
} else {
printk(MYIOC_s_ERR_FMT
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6ba07c7feb92..f0737c57ed5f 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -786,6 +786,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
/*
* Allow non-SAS & non-NEXUS_LOSS to drop into below code
*/
+ /* Fall through */
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
/* Linux handles an unsolicited DID_RESET better
@@ -882,6 +883,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
scsi_set_resid(sc, 0);
+ /* Fall through */
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
sc->result = (DID_OK << 16) | scsi_status;
@@ -1934,7 +1936,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
- retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
if (retval < 0)
status = FAILED;
else
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 7172b0b16bdd..eabc4de5816c 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -258,8 +258,6 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
IOCPage4_t *IOCPage4Ptr;
MPT_FRAME_HDR *mf;
dma_addr_t dataDma;
- u16 req_idx;
- u32 frameOffset;
u32 flagsLength;
int ii;
@@ -276,9 +274,6 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
*/
pReq = (Config_t *)mf;
- req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
- frameOffset = ioc->req_sz - sizeof(IOCPage4_t);
-
/* Complete the request frame (same for all requests).
*/
pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0ce2d8dfc5f1..294d9567cc71 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -16,7 +16,7 @@ config MFD_CS5535
depends on PCI && (X86_32 || (X86 && COMPILE_TEST))
---help---
This is the core driver for CS5535/CS5536 MFD functions. This is
- necessary for using the board's GPIO and MFGPT functionality.
+ necessary for using the board's GPIO and MFGPT functionality.
config MFD_ALTERA_A10SR
bool "Altera Arria10 DevKit System Resource chip"
@@ -29,6 +29,16 @@ config MFD_ALTERA_A10SR
accessing the external gpio extender (LEDs & buttons) and
power supply alarms (hwmon).
+config MFD_ALTERA_SYSMGR
+ bool "Altera SOCFPGA System Manager"
+ depends on (ARCH_SOCFPGA || ARCH_STRATIX10) && OF
+ select MFD_SYSCON
+ help
+ Select this to get System Manager support for all Altera branded
+ SOCFPGAs. The SOCFPGA System Manager handles all SOCFPGAs by
+ using regmap_mmio accesses for ARM32 parts and SMC calls to
+ EL3 for ARM64 parts.
+
config MFD_ACT8945A
tristate "Active-semi ACT8945A"
select MFD_CORE
@@ -213,13 +223,13 @@ config MFD_CROS_EC
protocol for talking to the EC is defined by the bus driver.
config MFD_CROS_EC_CHARDEV
- tristate "Chrome OS Embedded Controller userspace device interface"
- depends on MFD_CROS_EC
- ---help---
- This driver adds support to talk with the ChromeOS EC from userspace.
+ tristate "Chrome OS Embedded Controller userspace device interface"
+ depends on MFD_CROS_EC
+ ---help---
+ This driver adds support to talk with the ChromeOS EC from userspace.
- If you have a supported Chromebook, choose Y or M here.
- The module will be called cros_ec_dev.
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called cros_ec_dev.
config MFD_MADERA
tristate "Cirrus Logic Madera codecs"
@@ -733,6 +743,20 @@ config MFD_MAX77620
provides common support for accessing the device; additional drivers
must be enabled in order to use the functionality of the device.
+config MFD_MAX77650
+ tristate "Maxim MAX77650/77651 PMIC Support"
+ depends on I2C
+ depends on OF || COMPILE_TEST
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say Y here to add support for Maxim Semiconductor MAX77650 and
+ MAX77651 Power Management ICs. This is the core multifunction
+ driver for interacting with the device. The module name is
+ 'max77650'. Additional drivers can be enabled in order to use
+ the following functionalities of the device: GPIO, regulator,
+ charger, LED, onkey.
+
config MFD_MAX77686
tristate "Maxim Semiconductor MAX77686/802 PMIC Support"
depends on I2C
@@ -867,7 +891,7 @@ config MFD_CPCAP
At least Motorola Droid 4 is known to use CPCAP.
config MFD_VIPERBOARD
- tristate "Nano River Technologies Viperboard"
+ tristate "Nano River Technologies Viperboard"
select MFD_CORE
depends on USB
default n
@@ -903,15 +927,15 @@ config PCF50633_ADC
tristate "NXP PCF50633 ADC"
depends on MFD_PCF50633
help
- Say yes here if you want to include support for ADC in the
- NXP PCF50633 chip.
+ Say yes here if you want to include support for ADC in the
+ NXP PCF50633 chip.
config PCF50633_GPIO
tristate "NXP PCF50633 GPIO"
depends on MFD_PCF50633
help
- Say yes here if you want to include support GPIO for pins on
- the PCF50633 chip.
+ Say yes here if you want to include support GPIO for pins on
+ the PCF50633 chip.
config UCB1400_CORE
tristate "Philips UCB1400 Core driver"
@@ -1026,7 +1050,7 @@ config MFD_RN5T618
select REGMAP_I2C
help
Say yes here to add support for the Ricoh RN5T567,
- RN5T618, RC5T619 PMIC.
+ RN5T618, RC5T619 PMIC.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
@@ -1079,9 +1103,9 @@ config MFD_SM501_GPIO
bool "Export GPIO via GPIO layer"
depends on MFD_SM501 && GPIOLIB
---help---
- This option uses the gpio library layer to export the 64 GPIO
- lines on the SM501. The platform data is used to supply the
- base number for the first GPIO line to register.
+ This option uses the gpio library layer to export the 64 GPIO
+ lines on the SM501. The platform data is used to supply the
+ base number for the first GPIO line to register.
config MFD_SKY81452
tristate "Skyworks Solutions SKY81452"
@@ -1096,16 +1120,16 @@ config MFD_SKY81452
will be called sky81452.
config MFD_SMSC
- bool "SMSC ECE1099 series chips"
- depends on I2C=y
- select MFD_CORE
- select REGMAP_I2C
- help
- If you say yes here you get support for the
- ece1099 chips from SMSC.
+ bool "SMSC ECE1099 series chips"
+ depends on I2C=y
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the
+ ece1099 chips from SMSC.
- To compile this driver as a module, choose M here: the
- module will be called smsc.
+ To compile this driver as a module, choose M here: the
+ module will be called smsc.
config MFD_SC27XX_PMIC
tristate "Spreadtrum SC27xx PMICs"
@@ -1171,12 +1195,12 @@ config AB8500_CORE
This chip embeds various other multimedia funtionalities as well.
config AB8500_DEBUG
- bool "Enable debug info via debugfs"
- depends on AB8500_GPADC && DEBUG_FS
- default y if DEBUG_FS
- help
- Select this option if you want debug information using the debug
- filesystem, debugfs.
+ bool "Enable debug info via debugfs"
+ depends on AB8500_GPADC && DEBUG_FS
+ default y if DEBUG_FS
+ help
+ Select this option if you want debug information using the debug
+ filesystem, debugfs.
config AB8500_GPADC
bool "ST-Ericsson AB8500 GPADC driver"
@@ -1246,7 +1270,7 @@ config MFD_STA2X11
config MFD_SUN6I_PRCM
bool "Allwinner A31 PRCM controller"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
select MFD_CORE
help
Support for the PRCM (Power/Reset/Clock Management) unit available
@@ -1907,6 +1931,19 @@ config MFD_STPMIC1
To compile this driver as a module, choose M here: the
module will be called stpmic1.
+config MFD_STMFX
+ tristate "Support for STMicroelectronics Multi-Function eXpander (STMFX)"
+ depends on I2C
+ depends on OF || COMPILE_TEST
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Support for the STMicroelectronics Multi-Function eXpander.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b4569ed7f3f3..52b1a90ff515 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -155,6 +155,7 @@ obj-$(CONFIG_MFD_DA9150) += da9150-core.o
obj-$(CONFIG_MFD_MAX14577) += max14577.o
obj-$(CONFIG_MFD_MAX77620) += max77620.o
+obj-$(CONFIG_MFD_MAX77650) += max77650.o
obj-$(CONFIG_MFD_MAX77686) += max77686.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o
obj-$(CONFIG_MFD_MAX77843) += max77843.o
@@ -237,6 +238,7 @@ obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
+obj-$(CONFIG_MFD_ALTERA_SYSMGR) += altera-sysmgr.o
obj-$(CONFIG_MFD_STPMIC1) += stpmic1.o
obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o
@@ -246,4 +248,4 @@ obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
-
+obj-$(CONFIG_MFD_STMFX) += stmfx.o
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 8d652b2f9d14..f70d3f6a959b 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -2587,7 +2587,7 @@ static ssize_t ab8500_unsubscribe_write(struct file *file,
}
/*
- * - several deubgfs nodes fops
+ * - several debugfs nodes fops
*/
static const struct file_operations ab8500_bank_fops = {
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
new file mode 100644
index 000000000000..8976f82785bb
--- /dev/null
+++ b/drivers/mfd/altera-sysmgr.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019, Intel Corporation.
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Based on syscon driver.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mfd/altera-sysmgr.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/**
+ * struct altr_sysmgr - Altera SOCFPGA System Manager
+ * @regmap: the regmap used for System Manager accesses.
+ * @base : the base address for the System Manager
+ */
+struct altr_sysmgr {
+ struct regmap *regmap;
+ resource_size_t *base;
+};
+
+static struct platform_driver altr_sysmgr_driver;
+
+/**
+ * s10_protected_reg_write
+ * Write to a protected SMC register.
+ * @base: Base address of System Manager
+ * @reg: Address offset of register
+ * @val: Value to write
+ * Return: INTEL_SIP_SMC_STATUS_OK (0) on success
+ * INTEL_SIP_SMC_REG_ERROR on error
+ * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION if not supported
+ */
+static int s10_protected_reg_write(void *base,
+ unsigned int reg, unsigned int val)
+{
+ struct arm_smccc_res result;
+ unsigned long sysmgr_base = (unsigned long)base;
+
+ arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, sysmgr_base + reg,
+ val, 0, 0, 0, 0, 0, &result);
+
+ return (int)result.a0;
+}
+
+/**
+ * s10_protected_reg_read
+ * Read the status of a protected SMC register
+ * @base: Base address of System Manager.
+ * @reg: Address of register
+ * @val: Value read.
+ * Return: INTEL_SIP_SMC_STATUS_OK (0) on success
+ * INTEL_SIP_SMC_REG_ERROR on error
+ * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION if not supported
+ */
+static int s10_protected_reg_read(void *base,
+ unsigned int reg, unsigned int *val)
+{
+ struct arm_smccc_res result;
+ unsigned long sysmgr_base = (unsigned long)base;
+
+ arm_smccc_smc(INTEL_SIP_SMC_REG_READ, sysmgr_base + reg,
+ 0, 0, 0, 0, 0, 0, &result);
+
+ *val = (unsigned int)result.a1;
+
+ return (int)result.a0;
+}
+
+static struct regmap_config altr_sysmgr_regmap_cfg = {
+ .name = "altr_sysmgr",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+/**
+ * sysmgr_match_phandle
+ * Matching function used by driver_find_device().
+ * Return: True if match is found, otherwise false.
+ */
+static int sysmgr_match_phandle(struct device *dev, void *data)
+{
+ return dev->of_node == (struct device_node *)data;
+}
+
+/**
+ * altr_sysmgr_regmap_lookup_by_phandle
+ * Find the sysmgr previous configured in probe() and return regmap property.
+ * Return: regmap if found or error if not found.
+ */
+struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device *dev;
+ struct altr_sysmgr *sysmgr;
+ struct device_node *sysmgr_np;
+
+ if (property)
+ sysmgr_np = of_parse_phandle(np, property, 0);
+ else
+ sysmgr_np = np;
+
+ if (!sysmgr_np)
+ return ERR_PTR(-ENODEV);
+
+ dev = driver_find_device(&altr_sysmgr_driver.driver, NULL,
+ (void *)sysmgr_np, sysmgr_match_phandle);
+ of_node_put(sysmgr_np);
+ if (!dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ sysmgr = dev_get_drvdata(dev);
+
+ return sysmgr->regmap;
+}
+EXPORT_SYMBOL_GPL(altr_sysmgr_regmap_lookup_by_phandle);
+
+static int sysmgr_probe(struct platform_device *pdev)
+{
+ struct altr_sysmgr *sysmgr;
+ struct regmap *regmap;
+ struct resource *res;
+ struct regmap_config sysmgr_config = altr_sysmgr_regmap_cfg;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ sysmgr = devm_kzalloc(dev, sizeof(*sysmgr), GFP_KERNEL);
+ if (!sysmgr)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ sysmgr_config.max_register = resource_size(res) -
+ sysmgr_config.reg_stride;
+ if (of_device_is_compatible(np, "altr,sys-mgr-s10")) {
+ /* Need physical address for SMCC call */
+ sysmgr->base = (resource_size_t *)res->start;
+ sysmgr_config.reg_read = s10_protected_reg_read;
+ sysmgr_config.reg_write = s10_protected_reg_write;
+
+ regmap = devm_regmap_init(dev, NULL, sysmgr->base,
+ &sysmgr_config);
+ } else {
+ sysmgr->base = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!sysmgr->base)
+ return -ENOMEM;
+
+ sysmgr_config.max_register = res->end - res->start - 3;
+ regmap = devm_regmap_init_mmio(dev, sysmgr->base,
+ &sysmgr_config);
+ }
+
+ if (IS_ERR(regmap)) {
+ pr_err("regmap init failed\n");
+ return PTR_ERR(regmap);
+ }
+
+ sysmgr->regmap = regmap;
+
+ platform_set_drvdata(pdev, sysmgr);
+
+ return 0;
+}
+
+static const struct of_device_id altr_sysmgr_of_match[] = {
+ { .compatible = "altr,sys-mgr" },
+ { .compatible = "altr,sys-mgr-s10" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_sysmgr_of_match);
+
+static struct platform_driver altr_sysmgr_driver = {
+ .probe = sysmgr_probe,
+ .driver = {
+ .name = "altr,system_manager",
+ .of_match_table = altr_sysmgr_of_match,
+ },
+};
+
+static int __init altr_sysmgr_init(void)
+{
+ return platform_driver_register(&altr_sysmgr_driver);
+}
+core_initcall(altr_sysmgr_init);
+
+static void __exit altr_sysmgr_exit(void)
+{
+ platform_driver_unregister(&altr_sysmgr_driver);
+}
+module_exit(altr_sysmgr_exit);
+
+MODULE_AUTHOR("Thor Thayer <>");
+MODULE_DESCRIPTION("SOCFPGA System Manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index e82543bcfdc8..35a9e16f9902 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -141,6 +141,7 @@ static const struct of_device_id atmel_hlcdc_match[] = {
{ .compatible = "atmel,sama5d2-hlcdc" },
{ .compatible = "atmel,sama5d3-hlcdc" },
{ .compatible = "atmel,sama5d4-hlcdc" },
+ { .compatible = "microchip,sam9x60-hlcdc" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, atmel_hlcdc_match);
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index a7b7c5423ea5..c2e8a0dee7f8 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -65,6 +65,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
{ .compatible = "x-powers,axp202", .data = (void *)AXP202_ID },
{ .compatible = "x-powers,axp209", .data = (void *)AXP209_ID },
{ .compatible = "x-powers,axp221", .data = (void *)AXP221_ID },
+ { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
{ .compatible = "x-powers,axp806", .data = (void *)AXP806_ID },
{ },
};
@@ -75,6 +76,7 @@ static const struct i2c_device_id axp20x_i2c_id[] = {
{ "axp202", 0 },
{ "axp209", 0 },
{ "axp221", 0 },
+ { "axp223", 0 },
{ "axp806", 0 },
{ },
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 3c97f2c0fdfe..2215660dfa05 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -198,6 +198,12 @@ static const struct resource axp22x_usb_power_supply_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
};
+/* AXP803 and AXP813/AXP818 share the same interrupts */
+static const struct resource axp803_usb_power_supply_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP803_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+ DEFINE_RES_IRQ_NAMED(AXP803_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+};
+
static const struct resource axp22x_pek_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
@@ -741,6 +747,11 @@ static const struct mfd_cell axp803_cells[] = {
.of_compatible = "x-powers,axp813-ac-power-supply",
.num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
.resources = axp20x_ac_power_supply_resources,
+ }, {
+ .name = "axp20x-usb-power-supply",
+ .num_resources = ARRAY_SIZE(axp803_usb_power_supply_resources),
+ .resources = axp803_usb_power_supply_resources,
+ .of_compatible = "x-powers,axp813-usb-power-supply",
},
{ .name = "axp20x-regulator" },
};
@@ -793,6 +804,11 @@ static const struct mfd_cell axp813_cells[] = {
.of_compatible = "x-powers,axp813-ac-power-supply",
.num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
.resources = axp20x_ac_power_supply_resources,
+ }, {
+ .name = "axp20x-usb-power-supply",
+ .num_resources = ARRAY_SIZE(axp803_usb_power_supply_resources),
+ .resources = axp803_usb_power_supply_resources,
+ .of_compatible = "x-powers,axp813-usb-power-supply",
},
};
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 6acfe036d522..bd2bcdd4718b 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -75,20 +75,49 @@ static irqreturn_t ec_irq_thread(int irq, void *data)
static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
{
+ int ret;
struct {
struct cros_ec_command msg;
- struct ec_params_host_sleep_event req;
+ union {
+ struct ec_params_host_sleep_event req0;
+ struct ec_params_host_sleep_event_v1 req1;
+ struct ec_response_host_sleep_event_v1 resp1;
+ } u;
} __packed buf;
memset(&buf, 0, sizeof(buf));
- buf.req.sleep_event = sleep_event;
+ if (ec_dev->host_sleep_v1) {
+ buf.u.req1.sleep_event = sleep_event;
+ buf.u.req1.suspend_params.sleep_timeout_ms =
+ EC_HOST_SLEEP_TIMEOUT_DEFAULT;
+
+ buf.msg.outsize = sizeof(buf.u.req1);
+ if ((sleep_event == HOST_SLEEP_EVENT_S3_RESUME) ||
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME))
+ buf.msg.insize = sizeof(buf.u.resp1);
+
+ buf.msg.version = 1;
+
+ } else {
+ buf.u.req0.sleep_event = sleep_event;
+ buf.msg.outsize = sizeof(buf.u.req0);
+ }
buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
- buf.msg.version = 0;
- buf.msg.outsize = sizeof(buf.req);
- return cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+
+ /* For now, report failure to transition to S0ix with a warning. */
+ if (ret >= 0 && ec_dev->host_sleep_v1 &&
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME))
+ WARN_ONCE(buf.u.resp1.resume_response.sleep_transitions &
+ EC_HOST_RESUME_SLEEP_TIMEOUT,
+ "EC detected sleep transition timeout. Total slp_s0 transitions: %d",
+ buf.u.resp1.resume_response.sleep_transitions &
+ EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK);
+
+ return ret;
}
int cros_ec_register(struct cros_ec_device *ec_dev)
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index d275deaecb12..54a58df571b6 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -385,7 +385,8 @@ static const struct mfd_cell cros_ec_rtc_cells[] = {
};
static const struct mfd_cell cros_usbpd_charger_cells[] = {
- { .name = "cros-usbpd-charger" }
+ { .name = "cros-usbpd-charger" },
+ { .name = "cros-usbpd-logger" },
};
static const struct mfd_cell cros_ec_platform_cells[] = {
@@ -418,6 +419,39 @@ static int ec_device_probe(struct platform_device *pdev)
device_initialize(&ec->class_dev);
cdev_init(&ec->cdev, &fops);
+ /* Check whether this is actually a Fingerprint MCU rather than an EC */
+ if (cros_ec_check_features(ec, EC_FEATURE_FINGERPRINT)) {
+ dev_info(dev, "CrOS Fingerprint MCU detected.\n");
+ /*
+ * Help userspace differentiating ECs from FP MCU,
+ * regardless of the probing order.
+ */
+ ec_platform->ec_name = CROS_EC_DEV_FP_NAME;
+ }
+
+ /*
+ * Check whether this is actually an Integrated Sensor Hub (ISH)
+ * rather than an EC.
+ */
+ if (cros_ec_check_features(ec, EC_FEATURE_ISH)) {
+ dev_info(dev, "CrOS ISH MCU detected.\n");
+ /*
+ * Help userspace differentiating ECs from ISH MCU,
+ * regardless of the probing order.
+ */
+ ec_platform->ec_name = CROS_EC_DEV_ISH_NAME;
+ }
+
+ /* Check whether this is actually a Touchpad MCU rather than an EC */
+ if (cros_ec_check_features(ec, EC_FEATURE_TOUCHPAD)) {
+ dev_info(dev, "CrOS Touchpad MCU detected.\n");
+ /*
+ * Help userspace differentiating ECs from TP MCU,
+ * regardless of the probing order.
+ */
+ ec_platform->ec_name = CROS_EC_DEV_TP_NAME;
+ }
+
/*
* Add the class device
* Link to the character device for creating the /dev entry
diff --git a/drivers/mfd/cs47l35-tables.c b/drivers/mfd/cs47l35-tables.c
index 604c9dd14df5..338b825127f1 100644
--- a/drivers/mfd/cs47l35-tables.c
+++ b/drivers/mfd/cs47l35-tables.c
@@ -178,6 +178,7 @@ static const struct reg_default cs47l35_reg_default[] = {
{ 0x00000448, 0x0a83 }, /* R1096 (0x448) - eDRE Enable */
{ 0x0000044a, 0x0000 }, /* R1098 (0x44a) - eDRE Manual */
{ 0x00000450, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 1 */
+ { 0x00000451, 0x0000 }, /* R1105 (0x451) - DAC AEC Control 2 */
{ 0x00000458, 0x0000 }, /* R1112 (0x458) - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 (0x490) - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 (0x491) - PDM SPK1 CTRL 2 */
@@ -970,6 +971,7 @@ static bool cs47l35_16bit_readable_register(struct device *dev,
case MADERA_EDRE_ENABLE:
case MADERA_EDRE_MANUAL:
case MADERA_DAC_AEC_CONTROL_1:
+ case MADERA_DAC_AEC_CONTROL_2:
case MADERA_NOISE_GATE_CONTROL:
case MADERA_PDM_SPK1_CTRL_1:
case MADERA_PDM_SPK1_CTRL_2:
diff --git a/drivers/mfd/cs47l90-tables.c b/drivers/mfd/cs47l90-tables.c
index 77207d98f0cc..c040d3d7232a 100644
--- a/drivers/mfd/cs47l90-tables.c
+++ b/drivers/mfd/cs47l90-tables.c
@@ -263,6 +263,7 @@ static const struct reg_default cs47l90_reg_default[] = {
{ 0x00000440, 0x003f }, /* R1088 (0x440) - DRE Enable */
{ 0x00000448, 0x003f }, /* R1096 (0x448) - eDRE Enable */
{ 0x00000450, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 1 */
+ { 0x00000451, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 2 */
{ 0x00000458, 0x0000 }, /* R1112 (0x458) - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 (0x490) - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 (0x491) - PDM SPK1 CTRL 2 */
@@ -1692,6 +1693,7 @@ static bool cs47l90_16bit_readable_register(struct device *dev,
case MADERA_DRE_ENABLE:
case MADERA_EDRE_ENABLE:
case MADERA_DAC_AEC_CONTROL_1:
+ case MADERA_DAC_AEC_CONTROL_2:
case MADERA_NOISE_GATE_CONTROL:
case MADERA_PDM_SPK1_CTRL_1:
case MADERA_PDM_SPK1_CTRL_2:
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index 6e4ce49b4405..b125f90dd375 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * da9063-core.c: Device access for Dialog DA9063 modules
+ * Device access for Dialog DA9063 modules
*
* Copyright 2012 Dialog Semiconductors Ltd.
* Copyright 2013 Philipp Zabel, Pengutronix
@@ -7,11 +8,6 @@
* Author: Krystian Garbaciak, Dialog Semiconductor
* Author: Michal Hajduk, Dialog Semiconductor
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
@@ -26,7 +22,6 @@
#include <linux/regmap.h>
#include <linux/mfd/da9063/core.h>
-#include <linux/mfd/da9063/pdata.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/proc_fs.h>
@@ -165,7 +160,6 @@ static int da9063_clear_fault_log(struct da9063 *da9063)
int da9063_device_init(struct da9063 *da9063, unsigned int irq)
{
- struct da9063_pdata *pdata = da9063->dev->platform_data;
int model, variant_id, variant_code;
int ret;
@@ -173,24 +167,10 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
if (ret < 0)
dev_err(da9063->dev, "Cannot clear fault log\n");
- if (pdata) {
- da9063->flags = pdata->flags;
- da9063->irq_base = pdata->irq_base;
- } else {
- da9063->flags = 0;
- da9063->irq_base = -1;
- }
+ da9063->flags = 0;
+ da9063->irq_base = -1;
da9063->chip_irq = irq;
- if (pdata && pdata->init != NULL) {
- ret = pdata->init(da9063);
- if (ret != 0) {
- dev_err(da9063->dev,
- "Platform initialization failed.\n");
- return ret;
- }
- }
-
ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_ID, &model);
if (ret < 0) {
dev_err(da9063->dev, "Cannot read chip model id.\n");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 50a24b1921d0..455de74c0dd2 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -1,15 +1,10 @@
-/* da9063-i2c.c: Interrupt support for Dialog DA9063
+// SPDX-License-Identifier: GPL-2.0+
+/* I2C support for Dialog DA9063
*
* Copyright 2012 Dialog Semiconductor Ltd.
* Copyright 2013 Philipp Zabel, Pengutronix
*
* Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
@@ -22,7 +17,6 @@
#include <linux/mfd/core.h>
#include <linux/mfd/da9063/core.h>
-#include <linux/mfd/da9063/pdata.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/of.h>
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
index ecc0c8ce6c58..e2bbedf58e68 100644
--- a/drivers/mfd/da9063-irq.c
+++ b/drivers/mfd/da9063-irq.c
@@ -1,15 +1,10 @@
-/* da9063-irq.c: Interrupts support for Dialog DA9063
+// SPDX-License-Identifier: GPL-2.0+
+/* Interrupt support for Dialog DA9063
*
* Copyright 2012 Dialog Semiconductor Ltd.
* Copyright 2013 Philipp Zabel, Pengutronix
*
* Author: Michal Hajduk, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
@@ -19,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/regmap.h>
#include <linux/mfd/da9063/core.h>
-#include <linux/mfd/da9063/pdata.h>
#define DA9063_REG_EVENT_A_OFFSET 0
#define DA9063_REG_EVENT_B_OFFSET 1
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index cba2eb166650..6b111be944d9 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -129,6 +129,19 @@ static const struct intel_lpss_platform_info cnl_i2c_info = {
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
+ /* CML */
+ { PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x02a9), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x02ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x02c5), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02c6), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02c7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x02e8), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02e9), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02ea), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02eb), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&spt_info },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 50bffc3382d7..7e425ff53491 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -28,6 +28,8 @@
#include <linux/seq_file.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dma/idma64.h>
+
#include "intel-lpss.h"
#define LPSS_DEV_OFFSET 0x000
@@ -96,8 +98,6 @@ static const struct resource intel_lpss_idma64_resources[] = {
DEFINE_RES_IRQ(0),
};
-#define LPSS_IDMA64_DRIVER_NAME "idma64"
-
/*
* Cells needs to be ordered so that the iDMA is created first. This is
* because we need to be sure the DMA is available when the host controller
@@ -273,6 +273,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
{
u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
+ /* Set the device in reset state */
+ writel(0, lpss->priv + LPSS_PRIV_RESETS);
+
intel_lpss_deassert_reset(lpss);
intel_lpss_set_remap_addr(lpss);
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 5bddb84cfc1f..11adbf77960d 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -74,16 +74,6 @@ static const struct dmi_system_id dmi_platform_info[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
- "6ES7647-0AA00-0YA2"),
- },
- .driver_data = (void *)400000,
- },
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
- "6ES7647-0AA00-1YA2"),
},
.driver_data = (void *)400000,
},
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 64a3aece9c5e..be84bb2aa837 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -60,6 +60,7 @@ static struct mfd_cell cht_wc_dev[] = {
.resources = cht_wc_ext_charger_resources,
},
{ .name = "cht_wcove_region", },
+ { .name = "cht_wcove_leds", },
};
/*
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index d8ddd1a6f304..436361ce3737 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -37,6 +37,8 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+static struct max77620_chip *max77620_scratch;
+
static const struct resource gpio_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_TOP_GPIO),
};
@@ -111,6 +113,26 @@ static const struct mfd_cell max20024_children[] = {
},
};
+static const struct mfd_cell max77663_children[] = {
+ { .name = "max77620-pinctrl", },
+ { .name = "max77620-clock", },
+ { .name = "max77663-pmic", },
+ { .name = "max77620-watchdog", },
+ {
+ .name = "max77620-gpio",
+ .resources = gpio_resources,
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ }, {
+ .name = "max77620-rtc",
+ .resources = rtc_resources,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ }, {
+ .name = "max77663-power",
+ .resources = power_resources,
+ .num_resources = ARRAY_SIZE(power_resources),
+ },
+};
+
static const struct regmap_range max77620_readable_ranges[] = {
regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
};
@@ -171,6 +193,35 @@ static const struct regmap_config max20024_regmap_config = {
.volatile_table = &max77620_volatile_table,
};
+static const struct regmap_range max77663_readable_ranges[] = {
+ regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_CID5),
+};
+
+static const struct regmap_access_table max77663_readable_table = {
+ .yes_ranges = max77663_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max77663_readable_ranges),
+};
+
+static const struct regmap_range max77663_writable_ranges[] = {
+ regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_CID5),
+};
+
+static const struct regmap_access_table max77663_writable_table = {
+ .yes_ranges = max77663_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max77663_writable_ranges),
+};
+
+static const struct regmap_config max77663_regmap_config = {
+ .name = "power-slave",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77620_REG_CID5 + 1,
+ .cache_type = REGCACHE_RBTREE,
+ .rd_table = &max77663_readable_table,
+ .wr_table = &max77663_writable_table,
+ .volatile_table = &max77620_volatile_table,
+};
+
/*
* MAX77620 and MAX20024 has the following steps of the interrupt handling
* for TOP interrupts:
@@ -240,6 +291,9 @@ static int max77620_get_fps_period_reg_value(struct max77620_chip *chip,
case MAX77620:
fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
break;
+ case MAX77663:
+ fps_min_period = MAX20024_FPS_PERIOD_MIN_US;
+ break;
default:
return -EINVAL;
}
@@ -274,6 +328,9 @@ static int max77620_config_fps(struct max77620_chip *chip,
case MAX77620:
fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
break;
+ case MAX77663:
+ fps_max_period = MAX20024_FPS_PERIOD_MAX_US;
+ break;
default:
return -EINVAL;
}
@@ -375,6 +432,9 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
}
skip_fps:
+ if (chip->chip_id == MAX77663)
+ return 0;
+
/* Enable wake on EN0 pin */
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
MAX77620_ONOFFCNFG2_WK_EN0,
@@ -423,6 +483,15 @@ static int max77620_read_es_version(struct max77620_chip *chip)
return ret;
}
+static void max77620_pm_power_off(void)
+{
+ struct max77620_chip *chip = max77620_scratch;
+
+ regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG1,
+ MAX77620_ONOFFCNFG1_SFT_RST,
+ MAX77620_ONOFFCNFG1_SFT_RST);
+}
+
static int max77620_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -430,6 +499,7 @@ static int max77620_probe(struct i2c_client *client,
struct max77620_chip *chip;
const struct mfd_cell *mfd_cells;
int n_mfd_cells;
+ bool pm_off;
int ret;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
@@ -453,6 +523,11 @@ static int max77620_probe(struct i2c_client *client,
n_mfd_cells = ARRAY_SIZE(max20024_children);
rmap_config = &max20024_regmap_config;
break;
+ case MAX77663:
+ mfd_cells = max77663_children;
+ n_mfd_cells = ARRAY_SIZE(max77663_children);
+ rmap_config = &max77663_regmap_config;
+ break;
default:
dev_err(chip->dev, "ChipID is invalid %d\n", chip->chip_id);
return -EINVAL;
@@ -491,6 +566,12 @@ static int max77620_probe(struct i2c_client *client,
return ret;
}
+ pm_off = of_device_is_system_power_controller(client->dev.of_node);
+ if (pm_off && !pm_power_off) {
+ max77620_scratch = chip;
+ pm_power_off = max77620_pm_power_off;
+ }
+
return 0;
}
@@ -546,6 +627,9 @@ static int max77620_i2c_suspend(struct device *dev)
return ret;
}
+ if (chip->chip_id == MAX77663)
+ goto out;
+
/* Disable WK_EN0 */
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
MAX77620_ONOFFCNFG2_WK_EN0, 0);
@@ -581,7 +665,7 @@ static int max77620_i2c_resume(struct device *dev)
* For MAX20024: No need to configure WKEN0 on resume as
* it is configured on Init.
*/
- if (chip->chip_id == MAX20024)
+ if (chip->chip_id == MAX20024 || chip->chip_id == MAX77663)
goto out;
/* Enable WK_EN0 */
@@ -603,6 +687,7 @@ out:
static const struct i2c_device_id max77620_id[] = {
{"max77620", MAX77620},
{"max20024", MAX20024},
+ {"max77663", MAX77663},
{},
};
diff --git a/drivers/mfd/max77650.c b/drivers/mfd/max77650.c
new file mode 100644
index 000000000000..60e07aca6ae5
--- /dev/null
+++ b/drivers/mfd/max77650.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// Core MFD driver for MAXIM 77650/77651 charger/power-supply.
+// Programming manual: https://pdfserv.maximintegrated.com/en/an/AN6428.pdf
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define MAX77650_INT_GPI_F_MSK BIT(0)
+#define MAX77650_INT_GPI_R_MSK BIT(1)
+#define MAX77650_INT_GPI_MSK \
+ (MAX77650_INT_GPI_F_MSK | MAX77650_INT_GPI_R_MSK)
+#define MAX77650_INT_nEN_F_MSK BIT(2)
+#define MAX77650_INT_nEN_R_MSK BIT(3)
+#define MAX77650_INT_TJAL1_R_MSK BIT(4)
+#define MAX77650_INT_TJAL2_R_MSK BIT(5)
+#define MAX77650_INT_DOD_R_MSK BIT(6)
+
+#define MAX77650_INT_THM_MSK BIT(0)
+#define MAX77650_INT_CHG_MSK BIT(1)
+#define MAX77650_INT_CHGIN_MSK BIT(2)
+#define MAX77650_INT_TJ_REG_MSK BIT(3)
+#define MAX77650_INT_CHGIN_CTRL_MSK BIT(4)
+#define MAX77650_INT_SYS_CTRL_MSK BIT(5)
+#define MAX77650_INT_SYS_CNFG_MSK BIT(6)
+
+#define MAX77650_INT_GLBL_OFFSET 0
+#define MAX77650_INT_CHG_OFFSET 1
+
+#define MAX77650_SBIA_LPM_MASK BIT(5)
+#define MAX77650_SBIA_LPM_DISABLED 0x00
+
+enum {
+ MAX77650_INT_GPI,
+ MAX77650_INT_nEN_F,
+ MAX77650_INT_nEN_R,
+ MAX77650_INT_TJAL1_R,
+ MAX77650_INT_TJAL2_R,
+ MAX77650_INT_DOD_R,
+ MAX77650_INT_THM,
+ MAX77650_INT_CHG,
+ MAX77650_INT_CHGIN,
+ MAX77650_INT_TJ_REG,
+ MAX77650_INT_CHGIN_CTRL,
+ MAX77650_INT_SYS_CTRL,
+ MAX77650_INT_SYS_CNFG,
+};
+
+static const struct resource max77650_charger_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_CHG, "CHG"),
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_CHGIN, "CHGIN"),
+};
+
+static const struct resource max77650_gpio_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_GPI, "GPI"),
+};
+
+static const struct resource max77650_onkey_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_nEN_F, "nEN_F"),
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_nEN_R, "nEN_R"),
+};
+
+static const struct mfd_cell max77650_cells[] = {
+ {
+ .name = "max77650-regulator",
+ .of_compatible = "maxim,max77650-regulator",
+ }, {
+ .name = "max77650-charger",
+ .of_compatible = "maxim,max77650-charger",
+ .resources = max77650_charger_resources,
+ .num_resources = ARRAY_SIZE(max77650_charger_resources),
+ }, {
+ .name = "max77650-gpio",
+ .of_compatible = "maxim,max77650-gpio",
+ .resources = max77650_gpio_resources,
+ .num_resources = ARRAY_SIZE(max77650_gpio_resources),
+ }, {
+ .name = "max77650-led",
+ .of_compatible = "maxim,max77650-led",
+ }, {
+ .name = "max77650-onkey",
+ .of_compatible = "maxim,max77650-onkey",
+ .resources = max77650_onkey_resources,
+ .num_resources = ARRAY_SIZE(max77650_onkey_resources),
+ },
+};
+
+static const struct regmap_irq max77650_irqs[] = {
+ [MAX77650_INT_GPI] = {
+ .reg_offset = MAX77650_INT_GLBL_OFFSET,
+ .mask = MAX77650_INT_GPI_MSK,
+ .type = {
+ .type_falling_val = MAX77650_INT_GPI_F_MSK,
+ .type_rising_val = MAX77650_INT_GPI_R_MSK,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
+ },
+ REGMAP_IRQ_REG(MAX77650_INT_nEN_F,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_nEN_F_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_nEN_R,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_nEN_R_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_TJAL1_R,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_TJAL1_R_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_TJAL2_R,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_TJAL2_R_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_DOD_R,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_DOD_R_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_THM,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_THM_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_CHG,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_CHG_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_CHGIN,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_CHGIN_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_TJ_REG,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_TJ_REG_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_CHGIN_CTRL,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_CHGIN_CTRL_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_SYS_CTRL,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_SYS_CTRL_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_SYS_CNFG,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_SYS_CNFG_MSK),
+};
+
+static const struct regmap_irq_chip max77650_irq_chip = {
+ .name = "max77650-irq",
+ .irqs = max77650_irqs,
+ .num_irqs = ARRAY_SIZE(max77650_irqs),
+ .num_regs = 2,
+ .status_base = MAX77650_REG_INT_GLBL,
+ .mask_base = MAX77650_REG_INTM_GLBL,
+ .type_in_mask = true,
+ .type_invert = true,
+ .init_ack_masked = true,
+ .clear_on_unmask = true,
+};
+
+static const struct regmap_config max77650_regmap_config = {
+ .name = "max77650",
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int max77650_i2c_probe(struct i2c_client *i2c)
+{
+ struct regmap_irq_chip_data *irq_data;
+ struct device *dev = &i2c->dev;
+ struct irq_domain *domain;
+ struct regmap *map;
+ unsigned int val;
+ int rv, id;
+
+ map = devm_regmap_init_i2c(i2c, &max77650_regmap_config);
+ if (IS_ERR(map)) {
+ dev_err(dev, "Unable to initialise I2C Regmap\n");
+ return PTR_ERR(map);
+ }
+
+ rv = regmap_read(map, MAX77650_REG_CID, &val);
+ if (rv) {
+ dev_err(dev, "Unable to read Chip ID\n");
+ return rv;
+ }
+
+ id = MAX77650_CID_BITS(val);
+ switch (id) {
+ case MAX77650_CID_77650A:
+ case MAX77650_CID_77650C:
+ case MAX77650_CID_77651A:
+ case MAX77650_CID_77651B:
+ break;
+ default:
+ dev_err(dev, "Chip not supported - ID: 0x%02x\n", id);
+ return -ENODEV;
+ }
+
+ /*
+ * This IC has a low-power mode which reduces the quiescent current
+ * consumption to ~5.6uA but is only suitable for systems consuming
+ * less than ~2mA. Since this is not likely the case even on
+ * linux-based wearables - keep the chip in normal power mode.
+ */
+ rv = regmap_update_bits(map,
+ MAX77650_REG_CNFG_GLBL,
+ MAX77650_SBIA_LPM_MASK,
+ MAX77650_SBIA_LPM_DISABLED);
+ if (rv) {
+ dev_err(dev, "Unable to change the power mode\n");
+ return rv;
+ }
+
+ rv = devm_regmap_add_irq_chip(dev, map, i2c->irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77650_irq_chip, &irq_data);
+ if (rv) {
+ dev_err(dev, "Unable to add Regmap IRQ chip\n");
+ return rv;
+ }
+
+ domain = regmap_irq_get_domain(irq_data);
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ max77650_cells, ARRAY_SIZE(max77650_cells),
+ NULL, 0, domain);
+}
+
+static const struct of_device_id max77650_of_match[] = {
+ { .compatible = "maxim,max77650" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_of_match);
+
+static struct i2c_driver max77650_i2c_driver = {
+ .driver = {
+ .name = "max77650",
+ .of_match_table = of_match_ptr(max77650_of_match),
+ },
+ .probe_new = max77650_i2c_probe,
+};
+module_i2c_driver(max77650_i2c_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 multi-function core driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 94e3f32ce935..1ade4c8cc91f 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -269,6 +269,19 @@ fail_alloc:
return ret;
}
+/**
+ * mfd_add_devices - register child devices
+ *
+ * @parent: Pointer to parent device.
+ * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care
+ * of device numbering, or will be added to a device's cell_id.
+ * @cells: Array of (struct mfd_cell)s describing child devices.
+ * @n_devs: Number of child devices to register.
+ * @mem_base: Parent register range resource for child devices.
+ * @irq_base: Base of the range of virtual interrupt numbers allocated for
+ * this MFD device. Unused if @domain is specified.
+ * @domain: Interrupt domain to create mappings for hardware interrupts.
+ */
int mfd_add_devices(struct device *parent, int id,
const struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 446713dbee27..93177d85e7a9 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -459,7 +459,6 @@ EXPORT_SYMBOL_GPL(omap_tll_disable);
MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
-MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb tll driver for TI OMAP EHCI and OHCI controllers");
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 216fbf6adec9..94377782d208 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -568,14 +568,6 @@ static int rk808_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id rk808_ids[] = {
- { "rk805" },
- { "rk808" },
- { "rk818" },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, rk808_ids);
-
static struct i2c_driver rk808_i2c_driver = {
.driver = {
.name = "rk808",
@@ -583,7 +575,6 @@ static struct i2c_driver rk808_i2c_driver = {
},
.probe = rk808_probe,
.remove = rk808_remove,
- .id_table = rk808_ids,
};
module_i2c_driver(rk808_i2c_driver);
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 521319086c81..95473ff9bb4b 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -28,45 +28,33 @@
#include <linux/regmap.h>
static const struct mfd_cell s5m8751_devs[] = {
- {
- .name = "s5m8751-pmic",
- }, {
- .name = "s5m-charger",
- }, {
- .name = "s5m8751-codec",
- },
+ { .name = "s5m8751-pmic", },
+ { .name = "s5m-charger", },
+ { .name = "s5m8751-codec", },
};
static const struct mfd_cell s5m8763_devs[] = {
- {
- .name = "s5m8763-pmic",
- }, {
- .name = "s5m-rtc",
- }, {
- .name = "s5m-charger",
- },
+ { .name = "s5m8763-pmic", },
+ { .name = "s5m-rtc", },
+ { .name = "s5m-charger", },
};
static const struct mfd_cell s5m8767_devs[] = {
+ { .name = "s5m8767-pmic", },
+ { .name = "s5m-rtc", },
{
- .name = "s5m8767-pmic",
- }, {
- .name = "s5m-rtc",
- }, {
.name = "s5m8767-clk",
.of_compatible = "samsung,s5m8767-clk",
- }
+ },
};
static const struct mfd_cell s2mps11_devs[] = {
+ { .name = "s2mps11-regulator", },
+ { .name = "s2mps14-rtc", },
{
- .name = "s2mps11-regulator",
- }, {
- .name = "s2mps14-rtc",
- }, {
.name = "s2mps11-clk",
.of_compatible = "samsung,s2mps11-clk",
- }
+ },
};
static const struct mfd_cell s2mps13_devs[] = {
@@ -79,37 +67,30 @@ static const struct mfd_cell s2mps13_devs[] = {
};
static const struct mfd_cell s2mps14_devs[] = {
+ { .name = "s2mps14-regulator", },
+ { .name = "s2mps14-rtc", },
{
- .name = "s2mps14-regulator",
- }, {
- .name = "s2mps14-rtc",
- }, {
.name = "s2mps14-clk",
.of_compatible = "samsung,s2mps14-clk",
- }
+ },
};
static const struct mfd_cell s2mps15_devs[] = {
+ { .name = "s2mps15-regulator", },
+ { .name = "s2mps15-rtc", },
{
- .name = "s2mps15-regulator",
- }, {
- .name = "s2mps15-rtc",
- }, {
.name = "s2mps13-clk",
.of_compatible = "samsung,s2mps13-clk",
},
};
static const struct mfd_cell s2mpa01_devs[] = {
- {
- .name = "s2mpa01-pmic",
- },
+ { .name = "s2mpa01-pmic", },
+ { .name = "s2mps14-rtc", },
};
static const struct mfd_cell s2mpu02_devs[] = {
- {
- .name = "s2mpu02-regulator",
- },
+ { .name = "s2mpu02-regulator", },
};
#ifdef CONFIG_OF
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index ad0099077e7e..a98c5d165039 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -455,6 +455,9 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
case S5M8767X:
sec_irq_chip = &s5m8767_irq_chip;
break;
+ case S2MPA01:
+ sec_irq_chip = &s2mps14_irq_chip;
+ break;
case S2MPS11X:
sec_irq_chip = &s2mps11_irq_chip;
break;
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 69df27769c21..43ac71691fe4 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
static const struct mfd_cell sprd_pmic_devs[] = {
{
.name = "sc27xx-wdt",
- .of_compatible = "sprd,sc27xx-wdt",
+ .of_compatible = "sprd,sc2731-wdt",
}, {
.name = "sc27xx-rtc",
- .of_compatible = "sprd,sc27xx-rtc",
+ .of_compatible = "sprd,sc2731-rtc",
}, {
.name = "sc27xx-charger",
- .of_compatible = "sprd,sc27xx-charger",
+ .of_compatible = "sprd,sc2731-charger",
}, {
.name = "sc27xx-chg-timer",
- .of_compatible = "sprd,sc27xx-chg-timer",
+ .of_compatible = "sprd,sc2731-chg-timer",
}, {
.name = "sc27xx-fast-chg",
- .of_compatible = "sprd,sc27xx-fast-chg",
+ .of_compatible = "sprd,sc2731-fast-chg",
}, {
.name = "sc27xx-chg-wdt",
- .of_compatible = "sprd,sc27xx-chg-wdt",
+ .of_compatible = "sprd,sc2731-chg-wdt",
}, {
.name = "sc27xx-typec",
- .of_compatible = "sprd,sc27xx-typec",
+ .of_compatible = "sprd,sc2731-typec",
}, {
.name = "sc27xx-flash",
- .of_compatible = "sprd,sc27xx-flash",
+ .of_compatible = "sprd,sc2731-flash",
}, {
.name = "sc27xx-eic",
- .of_compatible = "sprd,sc27xx-eic",
+ .of_compatible = "sprd,sc2731-eic",
}, {
.name = "sc27xx-efuse",
- .of_compatible = "sprd,sc27xx-efuse",
+ .of_compatible = "sprd,sc2731-efuse",
}, {
.name = "sc27xx-thermal",
- .of_compatible = "sprd,sc27xx-thermal",
+ .of_compatible = "sprd,sc2731-thermal",
}, {
.name = "sc27xx-adc",
- .of_compatible = "sprd,sc27xx-adc",
+ .of_compatible = "sprd,sc2731-adc",
}, {
.name = "sc27xx-audio-codec",
- .of_compatible = "sprd,sc27xx-audio-codec",
+ .of_compatible = "sprd,sc2731-audio-codec",
}, {
.name = "sc27xx-regulator",
- .of_compatible = "sprd,sc27xx-regulator",
+ .of_compatible = "sprd,sc2731-regulator",
}, {
.name = "sc27xx-vibrator",
- .of_compatible = "sprd,sc27xx-vibrator",
+ .of_compatible = "sprd,sc2731-vibrator",
}, {
.name = "sc27xx-keypad-led",
- .of_compatible = "sprd,sc27xx-keypad-led",
+ .of_compatible = "sprd,sc2731-keypad-led",
}, {
.name = "sc27xx-bltc",
- .of_compatible = "sprd,sc27xx-bltc",
+ .of_compatible = "sprd,sc2731-bltc",
}, {
.name = "sc27xx-fgu",
- .of_compatible = "sprd,sc27xx-fgu",
+ .of_compatible = "sprd,sc2731-fgu",
}, {
.name = "sc27xx-7sreset",
- .of_compatible = "sprd,sc27xx-7sreset",
+ .of_compatible = "sprd,sc2731-7sreset",
}, {
.name = "sc27xx-poweroff",
- .of_compatible = "sprd,sc27xx-poweroff",
+ .of_compatible = "sprd,sc2731-poweroff",
}, {
.name = "sc27xx-syscon",
- .of_compatible = "sprd,sc27xx-syscon",
+ .of_compatible = "sprd,sc2731-syscon",
},
};
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index 36b96fee4ce6..0ae27cd30268 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -80,8 +80,6 @@ struct ssbi {
int (*write)(struct ssbi *, u16 addr, const u8 *buf, int len);
};
-#define to_ssbi(dev) platform_get_drvdata(to_platform_device(dev))
-
static inline u32 ssbi_readl(struct ssbi *ssbi, u32 reg)
{
return readl(ssbi->base + reg);
@@ -243,7 +241,7 @@ err:
int ssbi_read(struct device *dev, u16 addr, u8 *buf, int len)
{
- struct ssbi *ssbi = to_ssbi(dev);
+ struct ssbi *ssbi = dev_get_drvdata(dev);
unsigned long flags;
int ret;
@@ -257,7 +255,7 @@ EXPORT_SYMBOL_GPL(ssbi_read);
int ssbi_write(struct device *dev, u16 addr, const u8 *buf, int len)
{
- struct ssbi *ssbi = to_ssbi(dev);
+ struct ssbi *ssbi = dev_get_drvdata(dev);
unsigned long flags;
int ret;
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
new file mode 100644
index 000000000000..fe8efba2d45f
--- /dev/null
+++ b/drivers/mfd/stmfx.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STMicroelectronics Multi-Function eXpander (STMFX) core
+ *
+ * Copyright (C) 2019 STMicroelectronics
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>.
+ */
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/stmfx.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+static bool stmfx_reg_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STMFX_REG_SYS_CTRL:
+ case STMFX_REG_IRQ_SRC_EN:
+ case STMFX_REG_IRQ_PENDING:
+ case STMFX_REG_IRQ_GPI_PENDING1:
+ case STMFX_REG_IRQ_GPI_PENDING2:
+ case STMFX_REG_IRQ_GPI_PENDING3:
+ case STMFX_REG_GPIO_STATE1:
+ case STMFX_REG_GPIO_STATE2:
+ case STMFX_REG_GPIO_STATE3:
+ case STMFX_REG_IRQ_GPI_SRC1:
+ case STMFX_REG_IRQ_GPI_SRC2:
+ case STMFX_REG_IRQ_GPI_SRC3:
+ case STMFX_REG_GPO_SET1:
+ case STMFX_REG_GPO_SET2:
+ case STMFX_REG_GPO_SET3:
+ case STMFX_REG_GPO_CLR1:
+ case STMFX_REG_GPO_CLR2:
+ case STMFX_REG_GPO_CLR3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool stmfx_reg_writeable(struct device *dev, unsigned int reg)
+{
+ return (reg >= STMFX_REG_SYS_CTRL);
+}
+
+static const struct regmap_config stmfx_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .max_register = STMFX_REG_MAX,
+ .volatile_reg = stmfx_reg_volatile,
+ .writeable_reg = stmfx_reg_writeable,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct resource stmfx_pinctrl_resources[] = {
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_GPIO),
+};
+
+static const struct resource stmfx_idd_resources[] = {
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_IDD),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_ERROR),
+};
+
+static const struct resource stmfx_ts_resources[] = {
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_DET),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_NE),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_TH),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_FULL),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_OVF),
+};
+
+static struct mfd_cell stmfx_cells[] = {
+ {
+ .of_compatible = "st,stmfx-0300-pinctrl",
+ .name = "stmfx-pinctrl",
+ .resources = stmfx_pinctrl_resources,
+ .num_resources = ARRAY_SIZE(stmfx_pinctrl_resources),
+ },
+ {
+ .of_compatible = "st,stmfx-0300-idd",
+ .name = "stmfx-idd",
+ .resources = stmfx_idd_resources,
+ .num_resources = ARRAY_SIZE(stmfx_idd_resources),
+ },
+ {
+ .of_compatible = "st,stmfx-0300-ts",
+ .name = "stmfx-ts",
+ .resources = stmfx_ts_resources,
+ .num_resources = ARRAY_SIZE(stmfx_ts_resources),
+ },
+};
+
+static u8 stmfx_func_to_mask(u32 func)
+{
+ u8 mask = 0;
+
+ if (func & STMFX_FUNC_GPIO)
+ mask |= STMFX_REG_SYS_CTRL_GPIO_EN;
+
+ if ((func & STMFX_FUNC_ALTGPIO_LOW) || (func & STMFX_FUNC_ALTGPIO_HIGH))
+ mask |= STMFX_REG_SYS_CTRL_ALTGPIO_EN;
+
+ if (func & STMFX_FUNC_TS)
+ mask |= STMFX_REG_SYS_CTRL_TS_EN;
+
+ if (func & STMFX_FUNC_IDD)
+ mask |= STMFX_REG_SYS_CTRL_IDD_EN;
+
+ return mask;
+}
+
+int stmfx_function_enable(struct stmfx *stmfx, u32 func)
+{
+ u32 sys_ctrl;
+ u8 mask;
+ int ret;
+
+ ret = regmap_read(stmfx->map, STMFX_REG_SYS_CTRL, &sys_ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * IDD and TS have priority in STMFX FW, so if IDD and TS are enabled,
+ * ALTGPIO function is disabled by STMFX FW. If IDD or TS is enabled,
+ * the number of aGPIO available decreases. To avoid GPIO management
+ * disturbance, abort IDD or TS function enable in this case.
+ */
+ if (((func & STMFX_FUNC_IDD) || (func & STMFX_FUNC_TS)) &&
+ (sys_ctrl & STMFX_REG_SYS_CTRL_ALTGPIO_EN)) {
+ dev_err(stmfx->dev, "ALTGPIO function already enabled\n");
+ return -EBUSY;
+ }
+
+ /* If TS is enabled, aGPIO[3:0] cannot be used */
+ if ((func & STMFX_FUNC_ALTGPIO_LOW) &&
+ (sys_ctrl & STMFX_REG_SYS_CTRL_TS_EN)) {
+ dev_err(stmfx->dev, "TS in use, aGPIO[3:0] unavailable\n");
+ return -EBUSY;
+ }
+
+ /* If IDD is enabled, aGPIO[7:4] cannot be used */
+ if ((func & STMFX_FUNC_ALTGPIO_HIGH) &&
+ (sys_ctrl & STMFX_REG_SYS_CTRL_IDD_EN)) {
+ dev_err(stmfx->dev, "IDD in use, aGPIO[7:4] unavailable\n");
+ return -EBUSY;
+ }
+
+ mask = stmfx_func_to_mask(func);
+
+ return regmap_update_bits(stmfx->map, STMFX_REG_SYS_CTRL, mask, mask);
+}
+EXPORT_SYMBOL_GPL(stmfx_function_enable);
+
+int stmfx_function_disable(struct stmfx *stmfx, u32 func)
+{
+ u8 mask = stmfx_func_to_mask(func);
+
+ return regmap_update_bits(stmfx->map, STMFX_REG_SYS_CTRL, mask, 0);
+}
+EXPORT_SYMBOL_GPL(stmfx_function_disable);
+
+static void stmfx_irq_bus_lock(struct irq_data *data)
+{
+ struct stmfx *stmfx = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&stmfx->lock);
+}
+
+static void stmfx_irq_bus_sync_unlock(struct irq_data *data)
+{
+ struct stmfx *stmfx = irq_data_get_irq_chip_data(data);
+
+ regmap_write(stmfx->map, STMFX_REG_IRQ_SRC_EN, stmfx->irq_src);
+
+ mutex_unlock(&stmfx->lock);
+}
+
+static void stmfx_irq_mask(struct irq_data *data)
+{
+ struct stmfx *stmfx = irq_data_get_irq_chip_data(data);
+
+ stmfx->irq_src &= ~BIT(data->hwirq % 8);
+}
+
+static void stmfx_irq_unmask(struct irq_data *data)
+{
+ struct stmfx *stmfx = irq_data_get_irq_chip_data(data);
+
+ stmfx->irq_src |= BIT(data->hwirq % 8);
+}
+
+static struct irq_chip stmfx_irq_chip = {
+ .name = "stmfx-core",
+ .irq_bus_lock = stmfx_irq_bus_lock,
+ .irq_bus_sync_unlock = stmfx_irq_bus_sync_unlock,
+ .irq_mask = stmfx_irq_mask,
+ .irq_unmask = stmfx_irq_unmask,
+};
+
+static irqreturn_t stmfx_irq_handler(int irq, void *data)
+{
+ struct stmfx *stmfx = data;
+ unsigned long n, pending;
+ u32 ack;
+ int ret;
+
+ ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING,
+ (u32 *)&pending);
+ if (ret)
+ return IRQ_NONE;
+
+ /*
+ * There is no ACK for GPIO, MFX_REG_IRQ_PENDING_GPIO is a logical OR
+ * of MFX_REG_IRQ_GPI _PENDING1/_PENDING2/_PENDING3
+ */
+ ack = pending & ~BIT(STMFX_REG_IRQ_SRC_EN_GPIO);
+ if (ack) {
+ ret = regmap_write(stmfx->map, STMFX_REG_IRQ_ACK, ack);
+ if (ret)
+ return IRQ_NONE;
+ }
+
+ for_each_set_bit(n, &pending, STMFX_REG_IRQ_SRC_MAX)
+ handle_nested_irq(irq_find_mapping(stmfx->irq_domain, n));
+
+ return IRQ_HANDLED;
+}
+
+static int stmfx_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_chip_and_handler(virq, &stmfx_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(virq, 1);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static void stmfx_irq_unmap(struct irq_domain *d, unsigned int virq)
+{
+ irq_set_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_data(virq, NULL);
+}
+
+static const struct irq_domain_ops stmfx_irq_ops = {
+ .map = stmfx_irq_map,
+ .unmap = stmfx_irq_unmap,
+};
+
+static void stmfx_irq_exit(struct i2c_client *client)
+{
+ struct stmfx *stmfx = i2c_get_clientdata(client);
+ int hwirq;
+
+ for (hwirq = 0; hwirq < STMFX_REG_IRQ_SRC_MAX; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(stmfx->irq_domain, hwirq));
+
+ irq_domain_remove(stmfx->irq_domain);
+}
+
+static int stmfx_irq_init(struct i2c_client *client)
+{
+ struct stmfx *stmfx = i2c_get_clientdata(client);
+ u32 irqoutpin = 0, irqtrigger;
+ int ret;
+
+ stmfx->irq_domain = irq_domain_add_simple(stmfx->dev->of_node,
+ STMFX_REG_IRQ_SRC_MAX, 0,
+ &stmfx_irq_ops, stmfx);
+ if (!stmfx->irq_domain) {
+ dev_err(stmfx->dev, "Failed to create IRQ domain\n");
+ return -EINVAL;
+ }
+
+ if (!of_property_read_bool(stmfx->dev->of_node, "drive-open-drain"))
+ irqoutpin |= STMFX_REG_IRQ_OUT_PIN_TYPE;
+
+ irqtrigger = irq_get_trigger_type(client->irq);
+ if ((irqtrigger & IRQ_TYPE_EDGE_RISING) ||
+ (irqtrigger & IRQ_TYPE_LEVEL_HIGH))
+ irqoutpin |= STMFX_REG_IRQ_OUT_PIN_POL;
+
+ ret = regmap_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, irqoutpin);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(stmfx->dev, client->irq,
+ NULL, stmfx_irq_handler,
+ irqtrigger | IRQF_ONESHOT,
+ "stmfx", stmfx);
+ if (ret)
+ stmfx_irq_exit(client);
+
+ return ret;
+}
+
+static int stmfx_chip_reset(struct stmfx *stmfx)
+{
+ int ret;
+
+ ret = regmap_write(stmfx->map, STMFX_REG_SYS_CTRL,
+ STMFX_REG_SYS_CTRL_SWRST);
+ if (ret)
+ return ret;
+
+ msleep(STMFX_BOOT_TIME_MS);
+
+ return ret;
+}
+
+static int stmfx_chip_init(struct i2c_client *client)
+{
+ struct stmfx *stmfx = i2c_get_clientdata(client);
+ u32 id;
+ u8 version[2];
+ int ret;
+
+ stmfx->vdd = devm_regulator_get_optional(&client->dev, "vdd");
+ ret = PTR_ERR_OR_ZERO(stmfx->vdd);
+ if (ret == -ENODEV) {
+ stmfx->vdd = NULL;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ } else if (ret) {
+ dev_err(&client->dev, "Failed to get VDD regulator: %d\n", ret);
+ return ret;
+ }
+
+ if (stmfx->vdd) {
+ ret = regulator_enable(stmfx->vdd);
+ if (ret) {
+ dev_err(&client->dev, "VDD enable failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = regmap_read(stmfx->map, STMFX_REG_CHIP_ID, &id);
+ if (ret) {
+ dev_err(&client->dev, "Error reading chip ID: %d\n", ret);
+ goto err;
+ }
+
+ /*
+ * Check that ID is the complement of the I2C address:
+ * STMFX I2C address follows the 7-bit format (MSB), that's why
+ * client->addr is shifted.
+ *
+ * STMFX_I2C_ADDR| STMFX | Linux
+ * input pin | I2C device address | I2C device address
+ *---------------------------------------------------------
+ * 0 | b: 1000 010x h:0x84 | 0x42
+ * 1 | b: 1000 011x h:0x86 | 0x43
+ */
+ if (FIELD_GET(STMFX_REG_CHIP_ID_MASK, ~id) != (client->addr << 1)) {
+ dev_err(&client->dev, "Unknown chip ID: %#x\n", id);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = regmap_bulk_read(stmfx->map, STMFX_REG_FW_VERSION_MSB,
+ version, ARRAY_SIZE(version));
+ if (ret) {
+ dev_err(&client->dev, "Error reading FW version: %d\n", ret);
+ goto err;
+ }
+
+ dev_info(&client->dev, "STMFX id: %#x, fw version: %x.%02x\n",
+ id, version[0], version[1]);
+
+ ret = stmfx_chip_reset(stmfx);
+ if (ret) {
+ dev_err(&client->dev, "Failed to reset chip: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (stmfx->vdd)
+ return regulator_disable(stmfx->vdd);
+
+ return ret;
+}
+
+static int stmfx_chip_exit(struct i2c_client *client)
+{
+ struct stmfx *stmfx = i2c_get_clientdata(client);
+
+ regmap_write(stmfx->map, STMFX_REG_IRQ_SRC_EN, 0);
+ regmap_write(stmfx->map, STMFX_REG_SYS_CTRL, 0);
+
+ if (stmfx->vdd)
+ return regulator_disable(stmfx->vdd);
+
+ return 0;
+}
+
+static int stmfx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct stmfx *stmfx;
+ int ret;
+
+ stmfx = devm_kzalloc(dev, sizeof(*stmfx), GFP_KERNEL);
+ if (!stmfx)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, stmfx);
+
+ stmfx->dev = dev;
+
+ stmfx->map = devm_regmap_init_i2c(client, &stmfx_regmap_config);
+ if (IS_ERR(stmfx->map)) {
+ ret = PTR_ERR(stmfx->map);
+ dev_err(dev, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ mutex_init(&stmfx->lock);
+
+ ret = stmfx_chip_init(client);
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ return -EPROBE_DEFER;
+ return ret;
+ }
+
+ if (client->irq < 0) {
+ dev_err(dev, "Failed to get IRQ: %d\n", client->irq);
+ ret = client->irq;
+ goto err_chip_exit;
+ }
+
+ ret = stmfx_irq_init(client);
+ if (ret)
+ goto err_chip_exit;
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ stmfx_cells, ARRAY_SIZE(stmfx_cells), NULL,
+ 0, stmfx->irq_domain);
+ if (ret)
+ goto err_irq_exit;
+
+ return 0;
+
+err_irq_exit:
+ stmfx_irq_exit(client);
+err_chip_exit:
+ stmfx_chip_exit(client);
+
+ return ret;
+}
+
+static int stmfx_remove(struct i2c_client *client)
+{
+ stmfx_irq_exit(client);
+
+ return stmfx_chip_exit(client);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stmfx_suspend(struct device *dev)
+{
+ struct stmfx *stmfx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_raw_read(stmfx->map, STMFX_REG_SYS_CTRL,
+ &stmfx->bkp_sysctrl, sizeof(stmfx->bkp_sysctrl));
+ if (ret)
+ return ret;
+
+ ret = regmap_raw_read(stmfx->map, STMFX_REG_IRQ_OUT_PIN,
+ &stmfx->bkp_irqoutpin,
+ sizeof(stmfx->bkp_irqoutpin));
+ if (ret)
+ return ret;
+
+ if (stmfx->vdd)
+ return regulator_disable(stmfx->vdd);
+
+ return 0;
+}
+
+static int stmfx_resume(struct device *dev)
+{
+ struct stmfx *stmfx = dev_get_drvdata(dev);
+ int ret;
+
+ if (stmfx->vdd) {
+ ret = regulator_enable(stmfx->vdd);
+ if (ret) {
+ dev_err(stmfx->dev,
+ "VDD enable failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = regmap_raw_write(stmfx->map, STMFX_REG_SYS_CTRL,
+ &stmfx->bkp_sysctrl, sizeof(stmfx->bkp_sysctrl));
+ if (ret)
+ return ret;
+
+ ret = regmap_raw_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN,
+ &stmfx->bkp_irqoutpin,
+ sizeof(stmfx->bkp_irqoutpin));
+ if (ret)
+ return ret;
+
+ ret = regmap_raw_write(stmfx->map, STMFX_REG_IRQ_SRC_EN,
+ &stmfx->irq_src, sizeof(stmfx->irq_src));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stmfx_dev_pm_ops, stmfx_suspend, stmfx_resume);
+
+static const struct of_device_id stmfx_of_match[] = {
+ { .compatible = "st,stmfx-0300", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stmfx_of_match);
+
+static struct i2c_driver stmfx_driver = {
+ .driver = {
+ .name = "stmfx-core",
+ .of_match_table = of_match_ptr(stmfx_of_match),
+ .pm = &stmfx_dev_pm_ops,
+ },
+ .probe = stmfx_probe,
+ .remove = stmfx_remove,
+};
+module_i2c_driver(stmfx_driver);
+
+MODULE_DESCRIPTION("STMFX core driver");
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c
index 2b658bed47db..2f12a415b807 100644
--- a/drivers/mfd/sun6i-prcm.c
+++ b/drivers/mfd/sun6i-prcm.c
@@ -148,13 +148,12 @@ static const struct of_device_id sun6i_prcm_dt_ids[] = {
static int sun6i_prcm_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
const struct prcm_data *data;
struct resource *res;
int ret;
- match = of_match_node(sun6i_prcm_dt_ids, np);
+ match = of_match_node(sun6i_prcm_dt_ids, pdev->dev.of_node);
if (!match)
return -EINVAL;
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 0ecdffb3d967..f6922a0f8058 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -12,6 +12,7 @@
* (at your option) any later version.
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hwspinlock.h>
#include <linux/io.h>
@@ -45,6 +46,7 @@ static const struct regmap_config syscon_regmap_config = {
static struct syscon *of_syscon_register(struct device_node *np)
{
+ struct clk *clk;
struct syscon *syscon;
struct regmap *regmap;
void __iomem *base;
@@ -119,6 +121,18 @@ static struct syscon *of_syscon_register(struct device_node *np)
goto err_regmap;
}
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ /* clock is optional */
+ if (ret != -ENOENT)
+ goto err_clk;
+ } else {
+ ret = regmap_mmio_attach_clk(regmap, clk);
+ if (ret)
+ goto err_attach;
+ }
+
syscon->regmap = regmap;
syscon->np = np;
@@ -128,6 +142,11 @@ static struct syscon *of_syscon_register(struct device_node *np)
return syscon;
+err_attach:
+ if (!IS_ERR(clk))
+ clk_put(clk);
+err_clk:
+ regmap_exit(regmap);
err_regmap:
iounmap(base);
err_map:
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 43d8683266de..e9cfb147345e 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -82,8 +82,7 @@ struct t7l66xb {
static int t7l66xb_mmc_enable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+ struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
unsigned long flags;
u8 dev_ctl;
int ret;
@@ -108,8 +107,7 @@ static int t7l66xb_mmc_enable(struct platform_device *mmc)
static int t7l66xb_mmc_disable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+ struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
unsigned long flags;
u8 dev_ctl;
@@ -128,16 +126,14 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
static void t7l66xb_mmc_pwr(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+ struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_pwr(t7l66xb->scr + 0x200, 0, state);
}
static void t7l66xb_mmc_clk_div(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+ struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_clk_div(t7l66xb->scr + 0x200, 0, state);
}
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 85fab3729102..f417c6fecfe2 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -80,16 +80,14 @@ static int tc6387xb_resume(struct platform_device *dev)
static void tc6387xb_mmc_pwr(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_pwr(tc6387xb->scr + 0x200, 0, state);
}
static void tc6387xb_mmc_clk_div(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_clk_div(tc6387xb->scr + 0x200, 0, state);
}
@@ -97,8 +95,7 @@ static void tc6387xb_mmc_clk_div(struct platform_device *mmc, int state)
static int tc6387xb_mmc_enable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
clk_prepare_enable(tc6387xb->clk32k);
@@ -110,8 +107,7 @@ static int tc6387xb_mmc_enable(struct platform_device *mmc)
static int tc6387xb_mmc_disable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
clk_disable_unprepare(tc6387xb->clk32k);
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 0c9f0390e891..6943048a64c2 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -122,14 +122,13 @@ enum {
static int tc6393xb_nand_enable(struct platform_device *nand)
{
- struct platform_device *dev = to_platform_device(nand->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(nand->dev.parent);
unsigned long flags;
raw_spin_lock_irqsave(&tc6393xb->lock, flags);
/* SMD buffer on */
- dev_dbg(&dev->dev, "SMD buffer on\n");
+ dev_dbg(nand->dev.parent, "SMD buffer on\n");
tmio_iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
@@ -312,8 +311,7 @@ static int tc6393xb_fb_disable(struct platform_device *dev)
int tc6393xb_lcd_set_power(struct platform_device *fb, bool on)
{
- struct platform_device *dev = to_platform_device(fb->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(fb->dev.parent);
u8 fer;
unsigned long flags;
@@ -334,8 +332,7 @@ EXPORT_SYMBOL(tc6393xb_lcd_set_power);
int tc6393xb_lcd_mode(struct platform_device *fb,
const struct fb_videomode *mode) {
- struct platform_device *dev = to_platform_device(fb->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(fb->dev.parent);
unsigned long flags;
raw_spin_lock_irqsave(&tc6393xb->lock, flags);
@@ -351,8 +348,7 @@ EXPORT_SYMBOL(tc6393xb_lcd_mode);
static int tc6393xb_mmc_enable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_enable(tc6393xb->scr + 0x200, 0,
tc6393xb_mmc_resources[0].start & 0xfffe);
@@ -362,8 +358,7 @@ static int tc6393xb_mmc_enable(struct platform_device *mmc)
static int tc6393xb_mmc_resume(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_resume(tc6393xb->scr + 0x200, 0,
tc6393xb_mmc_resources[0].start & 0xfffe);
@@ -373,16 +368,14 @@ static int tc6393xb_mmc_resume(struct platform_device *mmc)
static void tc6393xb_mmc_pwr(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_pwr(tc6393xb->scr + 0x200, 0, state);
}
static void tc6393xb_mmc_clk_div(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_clk_div(tc6393xb->scr + 0x200, 0, state);
}
diff --git a/drivers/mfd/ti-lmu.c b/drivers/mfd/ti-lmu.c
index 37d0bdb291c3..b06cb908d1aa 100644
--- a/drivers/mfd/ti-lmu.c
+++ b/drivers/mfd/ti-lmu.c
@@ -54,14 +54,6 @@ static void ti_lmu_disable_hw(void *data)
gpiod_set_value(lmu->en_gpio, 0);
}
-static const struct mfd_cell lm3532_devices[] = {
- {
- .name = "ti-lmu-backlight",
- .id = LM3532,
- .of_compatible = "ti,lm3532-backlight",
- },
-};
-
#define LM363X_REGULATOR(_id) \
{ \
.name = "lm363x-regulator", \
@@ -141,7 +133,6 @@ static const struct ti_lmu_data chip##_data = \
.max_register = max_reg, \
} \
-TI_LMU_DATA(lm3532, LM3532_MAX_REG);
TI_LMU_DATA(lm3631, LM3631_MAX_REG);
TI_LMU_DATA(lm3632, LM3632_MAX_REG);
TI_LMU_DATA(lm3633, LM3633_MAX_REG);
@@ -211,7 +202,6 @@ static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
}
static const struct of_device_id ti_lmu_of_match[] = {
- { .compatible = "ti,lm3532", .data = &lm3532_data },
{ .compatible = "ti,lm3631", .data = &lm3631_data },
{ .compatible = "ti,lm3632", .data = &lm3632_data },
{ .compatible = "ti,lm3633", .data = &lm3633_data },
@@ -222,7 +212,6 @@ static const struct of_device_id ti_lmu_of_match[] = {
MODULE_DEVICE_TABLE(of, ti_lmu_of_match);
static const struct i2c_device_id ti_lmu_ids[] = {
- { "lm3532", LM3532 },
{ "lm3631", LM3631 },
{ "lm3632", LM3632 },
{ "lm3633", LM3633 },
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index 3bd75061f777..f78be039e463 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -27,6 +27,7 @@ static const struct of_device_id tps65912_spi_of_match_table[] = {
{ .compatible = "ti,tps65912", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table);
static int tps65912_spi_probe(struct spi_device *spi)
{
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 299016bc46d9..104477b512a2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1245,6 +1245,28 @@ free:
return status;
}
+static int __maybe_unused twl_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq)
+ disable_irq(client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused twl_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq)
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
+
static const struct i2c_device_id twl_ids[] = {
{ "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
{ "twl5030", 0 }, /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
/* One Client Driver , 4 Clients */
static struct i2c_driver twl_driver = {
.driver.name = DRIVER_NAME,
+ .driver.pm = &twl_dev_pm_ops,
.id_table = twl_ids,
.probe = twl_probe,
.remove = twl_remove,
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 7c3c5fd5fcd0..86052c5c6069 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on)
}
}
+ /*
+ * Register access can produce errors after power-up unless we
+ * wait at least 8ms based on measurements on duovero.
+ */
+ usleep_range(10000, 12000);
+
/* Sync with the HW */
- regcache_sync(twl6040->regmap);
+ ret = regcache_sync(twl6040->regmap);
+ if (ret) {
+ dev_err(twl6040->dev, "Failed to sync with the HW: %i\n",
+ ret);
+ goto out;
+ }
/* Default PLL configuration after power up */
twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 25fbbaf39cb9..21bae64e0451 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -34,7 +34,7 @@
/* Current settings - values are 2*2^(reg_val/4) microamps. These are
* exported since they are used by multiple drivers.
*/
-int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = {
+const unsigned int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = {
2,
2,
3,
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 79756c83f5f0..cf067424a156 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -35,12 +35,6 @@ static bool wm8400_volatile(struct device *dev, unsigned int reg)
}
}
-int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
-{
- return regmap_bulk_read(wm8400->regmap, reg, data, count);
-}
-EXPORT_SYMBOL_GPL(wm8400_block_read);
-
static int wm8400_register_codec(struct wm8400 *wm8400)
{
const struct mfd_cell cell = {
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index da22bcb62b04..4e285addbf2b 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -451,17 +451,18 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
lss_l1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN)
| rtsx_check_dev_flag(pcr, PM_L1_2_EN);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
if (lss_l1_2) {
pcr_dbg(pcr, "Set parameters for L1.2.");
rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
0xFF, PCIE_L1_2_EN);
- rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
RTS5260_DVCC_OCP_EN |
RTS5260_DVCC_OCP_CL_EN,
RTS5260_DVCC_OCP_EN |
RTS5260_DVCC_OCP_CL_EN);
- rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
0xFF, PCIE_L1_2_PD_FE_EN);
} else if (lss_l1_1) {
pcr_dbg(pcr, "Set parameters for L1.1.");
@@ -573,10 +574,10 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
* to drive low, and we forcibly request clock.
*/
if (option->force_clkreq_0)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
return 0;
@@ -704,7 +705,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
option->ocp_en = 1;
if (option->ocp_en)
hw_param->interrupt_en |= SD_OC_INT_EN;
- hw_param->ocp_glitch = SDVIO_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_100U | SDVIO_OCP_GLITCH_800U;
option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
}
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index dc7b34174f85..a4d17a5a9763 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -168,7 +168,7 @@ int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
if (dsisr & CXL_PSL_DSISR_An_S)
access |= _PAGE_WRITE;
- if (!mm && (REGION_ID(dar) != USER_REGION_ID))
+ if (!mm && (get_region_id(dar) != USER_REGION_ID))
access |= _PAGE_PRIVILEGED;
if (dsisr & DSISR_NOHPTE)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 39f832d27288..98603e235cf0 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of.h>
+#include <linux/sort.h>
#include <linux/of_platform.h>
#include <linux/rpmsg.h>
#include <linux/scatterlist.h>
@@ -31,7 +32,7 @@
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_INIT_HANDLE 1
#define FASTRPC_CTXID_MASK (0xFF0)
-#define INIT_FILELEN_MAX (2 * 1024 * 1024)
+#define INIT_FILELEN_MAX (64 * 1024 * 1024)
#define INIT_MEMLEN_MAX (8 * 1024 * 1024)
#define FASTRPC_DEVICE_NAME "fastrpc"
@@ -104,6 +105,15 @@ struct fastrpc_invoke_rsp {
int retval; /* invoke return value */
};
+struct fastrpc_buf_overlap {
+ u64 start;
+ u64 end;
+ int raix;
+ u64 mstart;
+ u64 mend;
+ u64 offset;
+};
+
struct fastrpc_buf {
struct fastrpc_user *fl;
struct dma_buf *dmabuf;
@@ -149,12 +159,14 @@ struct fastrpc_invoke_ctx {
struct kref refcount;
struct list_head node; /* list of ctxs */
struct completion work;
+ struct work_struct put_work;
struct fastrpc_msg msg;
struct fastrpc_user *fl;
struct fastrpc_remote_arg *rpra;
struct fastrpc_map **maps;
struct fastrpc_buf *buf;
struct fastrpc_invoke_args *args;
+ struct fastrpc_buf_overlap *olaps;
struct fastrpc_channel_ctx *cctx;
};
@@ -282,6 +294,7 @@ static void fastrpc_context_free(struct kref *ref)
{
struct fastrpc_invoke_ctx *ctx;
struct fastrpc_channel_ctx *cctx;
+ unsigned long flags;
int i;
ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
@@ -293,11 +306,12 @@ static void fastrpc_context_free(struct kref *ref)
if (ctx->buf)
fastrpc_buf_free(ctx->buf);
- spin_lock(&cctx->lock);
+ spin_lock_irqsave(&cctx->lock, flags);
idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
- spin_unlock(&cctx->lock);
+ spin_unlock_irqrestore(&cctx->lock, flags);
kfree(ctx->maps);
+ kfree(ctx->olaps);
kfree(ctx);
}
@@ -311,12 +325,70 @@ static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
kref_put(&ctx->refcount, fastrpc_context_free);
}
+static void fastrpc_context_put_wq(struct work_struct *work)
+{
+ struct fastrpc_invoke_ctx *ctx =
+ container_of(work, struct fastrpc_invoke_ctx, put_work);
+
+ fastrpc_context_put(ctx);
+}
+
+#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
+static int olaps_cmp(const void *a, const void *b)
+{
+ struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
+ struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
+ /* sort with lowest starting buffer first */
+ int st = CMP(pa->start, pb->start);
+ /* sort with highest ending buffer first */
+ int ed = CMP(pb->end, pa->end);
+
+ return st == 0 ? ed : st;
+}
+
+static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
+{
+ u64 max_end = 0;
+ int i;
+
+ for (i = 0; i < ctx->nbufs; ++i) {
+ ctx->olaps[i].start = ctx->args[i].ptr;
+ ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
+ ctx->olaps[i].raix = i;
+ }
+
+ sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
+
+ for (i = 0; i < ctx->nbufs; ++i) {
+ /* Falling inside previous range */
+ if (ctx->olaps[i].start < max_end) {
+ ctx->olaps[i].mstart = max_end;
+ ctx->olaps[i].mend = ctx->olaps[i].end;
+ ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
+
+ if (ctx->olaps[i].end > max_end) {
+ max_end = ctx->olaps[i].end;
+ } else {
+ ctx->olaps[i].mend = 0;
+ ctx->olaps[i].mstart = 0;
+ }
+
+ } else {
+ ctx->olaps[i].mend = ctx->olaps[i].end;
+ ctx->olaps[i].mstart = ctx->olaps[i].start;
+ ctx->olaps[i].offset = 0;
+ max_end = ctx->olaps[i].end;
+ }
+ }
+}
+
static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
struct fastrpc_user *user, u32 kernel, u32 sc,
struct fastrpc_invoke_args *args)
{
struct fastrpc_channel_ctx *cctx = user->cctx;
struct fastrpc_invoke_ctx *ctx = NULL;
+ unsigned long flags;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -336,7 +408,15 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
+ ctx->olaps = kcalloc(ctx->nscalars,
+ sizeof(*ctx->olaps), GFP_KERNEL);
+ if (!ctx->olaps) {
+ kfree(ctx->maps);
+ kfree(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
ctx->args = args;
+ fastrpc_get_buff_overlaps(ctx);
}
ctx->sc = sc;
@@ -345,20 +425,21 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
ctx->tgid = user->tgid;
ctx->cctx = cctx;
init_completion(&ctx->work);
+ INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
spin_lock(&user->lock);
list_add_tail(&ctx->node, &user->pending);
spin_unlock(&user->lock);
- spin_lock(&cctx->lock);
+ spin_lock_irqsave(&cctx->lock, flags);
ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
FASTRPC_CTX_MAX, GFP_ATOMIC);
if (ret < 0) {
- spin_unlock(&cctx->lock);
+ spin_unlock_irqrestore(&cctx->lock, flags);
goto err_idr;
}
ctx->ctxid = ret << 4;
- spin_unlock(&cctx->lock);
+ spin_unlock_irqrestore(&cctx->lock, flags);
kref_init(&ctx->refcount);
@@ -368,6 +449,7 @@ err_idr:
list_del(&ctx->node);
spin_unlock(&user->lock);
kfree(ctx->maps);
+ kfree(ctx->olaps);
kfree(ctx);
return ERR_PTR(ret);
@@ -586,8 +668,11 @@ static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
size = ALIGN(metalen, FASTRPC_ALIGN);
for (i = 0; i < ctx->nscalars; i++) {
if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
- size = ALIGN(size, FASTRPC_ALIGN);
- size += ctx->args[i].length;
+
+ if (ctx->olaps[i].offset == 0)
+ size = ALIGN(size, FASTRPC_ALIGN);
+
+ size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
}
}
@@ -625,12 +710,12 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
struct fastrpc_remote_arg *rpra;
struct fastrpc_invoke_buf *list;
struct fastrpc_phy_page *pages;
- int inbufs, i, err = 0;
- u64 rlen, pkt_size;
+ int inbufs, i, oix, err = 0;
+ u64 len, rlen, pkt_size;
+ u64 pg_start, pg_end;
uintptr_t args;
int metalen;
-
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
metalen = fastrpc_get_meta_size(ctx);
pkt_size = fastrpc_get_payload_size(ctx, metalen);
@@ -653,8 +738,11 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
rlen = pkt_size - metalen;
ctx->rpra = rpra;
- for (i = 0; i < ctx->nbufs; ++i) {
- u64 len = ctx->args[i].length;
+ for (oix = 0; oix < ctx->nbufs; ++oix) {
+ int mlen;
+
+ i = ctx->olaps[oix].raix;
+ len = ctx->args[i].length;
rpra[i].pv = 0;
rpra[i].len = len;
@@ -664,22 +752,45 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
if (!len)
continue;
- pages[i].size = roundup(len, PAGE_SIZE);
-
if (ctx->maps[i]) {
+ struct vm_area_struct *vma = NULL;
+
rpra[i].pv = (u64) ctx->args[i].ptr;
pages[i].addr = ctx->maps[i]->phys;
+
+ vma = find_vma(current->mm, ctx->args[i].ptr);
+ if (vma)
+ pages[i].addr += ctx->args[i].ptr -
+ vma->vm_start;
+
+ pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
+ pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
+ PAGE_SHIFT;
+ pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
+
} else {
- rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
- args = ALIGN(args, FASTRPC_ALIGN);
- if (rlen < len)
+
+ if (ctx->olaps[oix].offset == 0) {
+ rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
+ args = ALIGN(args, FASTRPC_ALIGN);
+ }
+
+ mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
+
+ if (rlen < mlen)
goto bail;
- rpra[i].pv = args;
- pages[i].addr = ctx->buf->phys + (pkt_size - rlen);
+ rpra[i].pv = args - ctx->olaps[oix].offset;
+ pages[i].addr = ctx->buf->phys -
+ ctx->olaps[oix].offset +
+ (pkt_size - rlen);
pages[i].addr = pages[i].addr & PAGE_MASK;
- args = args + len;
- rlen -= len;
+
+ pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
+ pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
+ args = args + mlen;
+ rlen -= mlen;
}
if (i < inbufs && !ctx->maps[i]) {
@@ -782,6 +893,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (err)
goto bail;
}
+
+ /* make sure that all CPU memory writes are seen by DSP */
+ dma_wmb();
/* Send invoke buffer to remote dsp */
err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
if (err)
@@ -798,6 +912,8 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
goto bail;
if (ctx->nscalars) {
+ /* make sure that all memory writes by DSP are seen by CPU */
+ dma_rmb();
/* populate all the output buffers with results */
err = fastrpc_put_args(ctx, kernel);
if (err)
@@ -843,12 +959,12 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
if (copy_from_user(&init, argp, sizeof(init))) {
err = -EFAULT;
- goto bail;
+ goto err;
}
if (init.filelen > INIT_FILELEN_MAX) {
err = -EINVAL;
- goto bail;
+ goto err;
}
inbuf.pgid = fl->tgid;
@@ -862,17 +978,15 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
if (init.filelen && init.filefd) {
err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
if (err)
- goto bail;
+ goto err;
}
memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1024 * 1024);
err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
&imem);
- if (err) {
- fastrpc_map_put(map);
- goto bail;
- }
+ if (err)
+ goto err_alloc;
fl->init_mem = imem;
args[0].ptr = (u64)(uintptr_t)&inbuf;
@@ -908,13 +1022,24 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, args);
+ if (err)
+ goto err_invoke;
- if (err) {
+ kfree(args);
+
+ return 0;
+
+err_invoke:
+ fl->init_mem = NULL;
+ fastrpc_buf_free(imem);
+err_alloc:
+ if (map) {
+ spin_lock(&fl->lock);
+ list_del(&map->node);
+ spin_unlock(&fl->lock);
fastrpc_map_put(map);
- fastrpc_buf_free(imem);
}
-
-bail:
+err:
kfree(args);
return err;
@@ -924,9 +1049,10 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
struct fastrpc_channel_ctx *cctx)
{
struct fastrpc_session_ctx *session = NULL;
+ unsigned long flags;
int i;
- spin_lock(&cctx->lock);
+ spin_lock_irqsave(&cctx->lock, flags);
for (i = 0; i < cctx->sesscount; i++) {
if (!cctx->session[i].used && cctx->session[i].valid) {
cctx->session[i].used = true;
@@ -934,7 +1060,7 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
break;
}
}
- spin_unlock(&cctx->lock);
+ spin_unlock_irqrestore(&cctx->lock, flags);
return session;
}
@@ -942,9 +1068,11 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
struct fastrpc_session_ctx *session)
{
- spin_lock(&cctx->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cctx->lock, flags);
session->used = false;
- spin_unlock(&cctx->lock);
+ spin_unlock_irqrestore(&cctx->lock, flags);
}
static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
@@ -970,12 +1098,13 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
struct fastrpc_channel_ctx *cctx = fl->cctx;
struct fastrpc_invoke_ctx *ctx, *n;
struct fastrpc_map *map, *m;
+ unsigned long flags;
fastrpc_release_current_dsp_process(fl);
- spin_lock(&cctx->lock);
+ spin_lock_irqsave(&cctx->lock, flags);
list_del(&fl->user);
- spin_unlock(&cctx->lock);
+ spin_unlock_irqrestore(&cctx->lock, flags);
if (fl->init_mem)
fastrpc_buf_free(fl->init_mem);
@@ -1003,6 +1132,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
{
struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
struct fastrpc_user *fl = NULL;
+ unsigned long flags;
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
if (!fl)
@@ -1026,9 +1156,9 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
return -EBUSY;
}
- spin_lock(&cctx->lock);
+ spin_lock_irqsave(&cctx->lock, flags);
list_add_tail(&fl->user, &cctx->users);
- spin_unlock(&cctx->lock);
+ spin_unlock_irqrestore(&cctx->lock, flags);
return 0;
}
@@ -1184,6 +1314,8 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
struct fastrpc_session_ctx *sess;
struct device *dev = &pdev->dev;
int i, sessions = 0;
+ unsigned long flags;
+ int rc;
cctx = dev_get_drvdata(dev->parent);
if (!cctx)
@@ -1191,7 +1323,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
- spin_lock(&cctx->lock);
+ spin_lock_irqsave(&cctx->lock, flags);
sess = &cctx->session[cctx->sesscount];
sess->used = false;
sess->valid = true;
@@ -1212,8 +1344,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
}
}
cctx->sesscount++;
- spin_unlock(&cctx->lock);
- dma_set_mask(dev, DMA_BIT_MASK(32));
+ spin_unlock_irqrestore(&cctx->lock, flags);
+ rc = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(dev, "32-bit DMA enable failed\n");
+ return rc;
+ }
return 0;
}
@@ -1222,16 +1358,17 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
{
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
+ unsigned long flags;
int i;
- spin_lock(&cctx->lock);
+ spin_lock_irqsave(&cctx->lock, flags);
for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
if (cctx->session[i].sid == sess->sid) {
cctx->session[i].valid = false;
cctx->sesscount--;
}
}
- spin_unlock(&cctx->lock);
+ spin_unlock_irqrestore(&cctx->lock, flags);
return 0;
}
@@ -1313,11 +1450,12 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
struct fastrpc_user *user;
+ unsigned long flags;
- spin_lock(&cctx->lock);
+ spin_lock_irqsave(&cctx->lock, flags);
list_for_each_entry(user, &cctx->users, user)
fastrpc_notify_users(user);
- spin_unlock(&cctx->lock);
+ spin_unlock_irqrestore(&cctx->lock, flags);
misc_deregister(&cctx->miscdev);
of_platform_depopulate(&rpdev->dev);
@@ -1349,7 +1487,13 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
ctx->retval = rsp->retval;
complete(&ctx->work);
- fastrpc_context_put(ctx);
+
+ /*
+ * The DMA buffer associated with the context cannot be freed in
+ * interrupt context so schedule it through a worker thread to
+ * avoid a kernel BUG.
+ */
+ schedule_work(&ctx->put_work);
return 0;
}
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 7c713e01d198..6f7e39f07811 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -227,7 +227,7 @@ static int ddcb_info_show(struct seq_file *s, void *unused)
seq_puts(s, "DDCB QUEUE:\n");
seq_printf(s, " ddcb_max: %d\n"
" ddcb_daddr: %016llx - %016llx\n"
- " ddcb_vaddr: %016llx\n"
+ " ddcb_vaddr: %p\n"
" ddcbs_in_flight: %u\n"
" ddcbs_max_in_flight: %u\n"
" ddcbs_completed: %u\n"
@@ -237,7 +237,7 @@ static int ddcb_info_show(struct seq_file *s, void *unused)
queue->ddcb_max, (long long)queue->ddcb_daddr,
(long long)queue->ddcb_daddr +
(queue->ddcb_max * DDCB_LENGTH),
- (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight,
+ queue->ddcb_vaddr, queue->ddcbs_in_flight,
queue->ddcbs_max_in_flight, queue->ddcbs_completed,
queue->return_on_busy, queue->wait_on_busy,
cd->irqs_processed);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 25265fd0fd6e..89cff9d1012b 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -603,7 +603,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
- m->write, /* readable/writable */
+ m->write ? FOLL_WRITE : 0, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_get_user_pages;
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index c6592db59b25..f8e85243d672 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -6,7 +6,7 @@ obj-m := habanalabs.o
habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
- command_submission.o mmu.o
+ command_submission.o mmu.o firmware_if.o pci.o
habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
index 85f75806a9a7..e495f44064fa 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -13,7 +13,7 @@
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
- hdev->asic_funcs->dma_free_coherent(hdev, cb->size,
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
(void *) (uintptr_t) cb->kernel_address,
cb->bus_address);
kfree(cb);
@@ -66,10 +66,10 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
return NULL;
if (ctx_id == HL_KERNEL_ASID_ID)
- p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
+ p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address, GFP_ATOMIC);
else
- p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
+ p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address,
GFP_USER | __GFP_ZERO);
if (!p) {
@@ -214,6 +214,13 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
u64 handle;
int rc;
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Device is %s. Can't execute CB IOCTL\n",
+ atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+ return -EBUSY;
+ }
+
switch (args->in.op) {
case HL_CB_OP_CREATE:
rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 19c84214a7ea..6fe785e26859 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -93,7 +93,6 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
parser.user_cb_size = job->user_cb_size;
parser.ext_queue = job->ext_queue;
job->patched_cb = NULL;
- parser.use_virt_addr = hdev->mmu_enable;
rc = hdev->asic_funcs->cs_parser(hdev, &parser);
if (job->ext_queue) {
@@ -261,7 +260,8 @@ static void cs_timedout(struct work_struct *work)
ctx_asid = cs->ctx->asid;
/* TODO: add information about last signaled seq and last emitted seq */
- dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence);
+ dev_err(hdev->dev, "User %d command submission %llu got stuck!\n",
+ ctx_asid, cs->sequence);
cs_put(cs);
@@ -600,20 +600,20 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
void __user *chunks;
u32 num_chunks;
u64 cs_seq = ULONG_MAX;
- int rc, do_restore;
+ int rc, do_ctx_switch;
bool need_soft_reset = false;
if (hl_device_disabled_or_in_reset(hdev)) {
- dev_warn(hdev->dev,
+ dev_warn_ratelimited(hdev->dev,
"Device is %s. Can't submit new CS\n",
atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
rc = -EBUSY;
goto out;
}
- do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0);
+ do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
- if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
+ if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
long ret;
chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
@@ -621,7 +621,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
mutex_lock(&hpriv->restore_phase_mutex);
- if (do_restore) {
+ if (do_ctx_switch) {
rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
if (rc) {
dev_err_ratelimited(hdev->dev,
@@ -677,18 +677,18 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
}
}
- ctx->thread_restore_wait_token = 1;
- } else if (!ctx->thread_restore_wait_token) {
+ ctx->thread_ctx_switch_wait_token = 1;
+ } else if (!ctx->thread_ctx_switch_wait_token) {
u32 tmp;
rc = hl_poll_timeout_memory(hdev,
- (u64) (uintptr_t) &ctx->thread_restore_wait_token,
+ (u64) (uintptr_t) &ctx->thread_ctx_switch_wait_token,
jiffies_to_usecs(hdev->timeout_jiffies),
&tmp);
if (rc || !tmp) {
dev_err(hdev->dev,
- "restore phase hasn't finished in time\n");
+ "context switch phase didn't finish in time\n");
rc = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 619ace1c4ef7..4804cdcf4c48 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -106,8 +106,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
ctx->cs_sequence = 1;
spin_lock_init(&ctx->cs_lock);
- atomic_set(&ctx->thread_restore_token, 1);
- ctx->thread_restore_wait_token = 0;
+ atomic_set(&ctx->thread_ctx_switch_token, 1);
+ ctx->thread_ctx_switch_wait_token = 0;
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 974a87789bd8..a4447699ff4e 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -505,22 +505,97 @@ err:
return -EINVAL;
}
+static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
+ u64 *phys_addr)
+{
+ struct hl_ctx *ctx = hdev->user_ctx;
+ u64 hop_addr, hop_pte_addr, hop_pte;
+ int rc = 0;
+
+ if (!ctx) {
+ dev_err(hdev->dev, "no ctx available\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->mmu_lock);
+
+ /* hop 0 */
+ hop_addr = get_hop0_addr(ctx);
+ hop_pte_addr = get_hop0_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ /* hop 1 */
+ hop_addr = get_next_hop_addr(hop_pte);
+ if (hop_addr == ULLONG_MAX)
+ goto not_mapped;
+ hop_pte_addr = get_hop1_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ /* hop 2 */
+ hop_addr = get_next_hop_addr(hop_pte);
+ if (hop_addr == ULLONG_MAX)
+ goto not_mapped;
+ hop_pte_addr = get_hop2_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ /* hop 3 */
+ hop_addr = get_next_hop_addr(hop_pte);
+ if (hop_addr == ULLONG_MAX)
+ goto not_mapped;
+ hop_pte_addr = get_hop3_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ if (!(hop_pte & LAST_MASK)) {
+ /* hop 4 */
+ hop_addr = get_next_hop_addr(hop_pte);
+ if (hop_addr == ULLONG_MAX)
+ goto not_mapped;
+ hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+ }
+
+ if (!(hop_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+
+ *phys_addr = (hop_pte & PTE_PHYS_ADDR_MASK) | (virt_addr & OFFSET_MASK);
+
+ goto out;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+ rc = -EINVAL;
+out:
+ mutex_unlock(&ctx->mmu_lock);
+ return rc;
+}
+
static ssize_t hl_data_read32(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
char tmp_buf[32];
+ u64 addr = entry->addr;
u32 val;
ssize_t rc;
if (*ppos)
return 0;
- rc = hdev->asic_funcs->debugfs_read32(hdev, entry->addr, &val);
+ if (addr >= prop->va_space_dram_start_address &&
+ addr < prop->va_space_dram_end_address &&
+ hdev->mmu_enable &&
+ hdev->dram_supports_virtual_memory) {
+ rc = device_va_to_pa(hdev, entry->addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
if (rc) {
- dev_err(hdev->dev, "Failed to read from 0x%010llx\n",
- entry->addr);
+ dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
return rc;
}
@@ -536,6 +611,8 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 addr = entry->addr;
u32 value;
ssize_t rc;
@@ -543,10 +620,19 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
if (rc)
return rc;
- rc = hdev->asic_funcs->debugfs_write32(hdev, entry->addr, value);
+ if (addr >= prop->va_space_dram_start_address &&
+ addr < prop->va_space_dram_end_address &&
+ hdev->mmu_enable &&
+ hdev->dram_supports_virtual_memory) {
+ rc = device_va_to_pa(hdev, entry->addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
if (rc) {
dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
- value, entry->addr);
+ value, addr);
return rc;
}
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 77d51be66c7e..91a9e47a3482 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -5,11 +5,14 @@
* All Rights Reserved.
*/
+#define pr_fmt(fmt) "habanalabs: " fmt
+
#include "habanalabs.h"
#include <linux/pci.h>
#include <linux/sched/signal.h>
#include <linux/hwmon.h>
+#include <uapi/misc/habanalabs.h>
#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
@@ -21,6 +24,20 @@ bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
return false;
}
+enum hl_device_status hl_device_status(struct hl_device *hdev)
+{
+ enum hl_device_status status;
+
+ if (hdev->disabled)
+ status = HL_DEVICE_STATUS_MALFUNCTION;
+ else if (atomic_read(&hdev->in_reset))
+ status = HL_DEVICE_STATUS_IN_RESET;
+ else
+ status = HL_DEVICE_STATUS_OPERATIONAL;
+
+ return status;
+};
+
static void hpriv_release(struct kref *ref)
{
struct hl_fpriv *hpriv;
@@ -498,11 +515,8 @@ disable_device:
return rc;
}
-static void hl_device_hard_reset_pending(struct work_struct *work)
+static void device_kill_open_processes(struct hl_device *hdev)
{
- struct hl_device_reset_work *device_reset_work =
- container_of(work, struct hl_device_reset_work, reset_work);
- struct hl_device *hdev = device_reset_work->hdev;
u16 pending_total, pending_cnt;
struct task_struct *task = NULL;
@@ -537,6 +551,12 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
}
}
+ /* We killed the open users, but because the driver cleans up after the
+ * user contexts are closed (e.g. mmu mappings), we need to wait again
+ * to make sure the cleaning phase is finished before continuing with
+ * the reset
+ */
+
pending_cnt = pending_total;
while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
@@ -552,6 +572,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
mutex_unlock(&hdev->fd_open_cnt_lock);
+}
+
+static void device_hard_reset_pending(struct work_struct *work)
+{
+ struct hl_device_reset_work *device_reset_work =
+ container_of(work, struct hl_device_reset_work, reset_work);
+ struct hl_device *hdev = device_reset_work->hdev;
+
+ device_kill_open_processes(hdev);
+
hl_device_reset(hdev, true, true);
kfree(device_reset_work);
@@ -613,6 +643,8 @@ again:
if ((hard_reset) && (!from_hard_reset_thread)) {
struct hl_device_reset_work *device_reset_work;
+ hdev->hard_reset_pending = true;
+
if (!hdev->pdev) {
dev_err(hdev->dev,
"Reset action is NOT supported in simulator\n");
@@ -620,8 +652,6 @@ again:
goto out_err;
}
- hdev->hard_reset_pending = true;
-
device_reset_work = kzalloc(sizeof(*device_reset_work),
GFP_ATOMIC);
if (!device_reset_work) {
@@ -635,7 +665,7 @@ again:
* from a dedicated work
*/
INIT_WORK(&device_reset_work->reset_work,
- hl_device_hard_reset_pending);
+ device_hard_reset_pending);
device_reset_work->hdev = hdev;
schedule_work(&device_reset_work->reset_work);
@@ -663,17 +693,9 @@ again:
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
- if (hard_reset) {
- /* Release kernel context */
- if (hl_ctx_put(hdev->kernel_ctx) != 1) {
- dev_err(hdev->dev,
- "kernel ctx is alive during hard reset\n");
- rc = -EBUSY;
- goto out_err;
- }
-
+ /* Release kernel context */
+ if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
hdev->kernel_ctx = NULL;
- }
/* Reset the H/W. It will be in idle state after this returns */
hdev->asic_funcs->hw_fini(hdev, hard_reset);
@@ -688,16 +710,24 @@ again:
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]);
- /* Make sure the setup phase for the user context will run again */
+ /* Make sure the context switch phase will run again */
if (hdev->user_ctx) {
- atomic_set(&hdev->user_ctx->thread_restore_token, 1);
- hdev->user_ctx->thread_restore_wait_token = 0;
+ atomic_set(&hdev->user_ctx->thread_ctx_switch_token, 1);
+ hdev->user_ctx->thread_ctx_switch_wait_token = 0;
}
/* Finished tear-down, starting to re-initialize */
if (hard_reset) {
hdev->device_cpu_disabled = false;
+ hdev->hard_reset_pending = false;
+
+ if (hdev->kernel_ctx) {
+ dev_crit(hdev->dev,
+ "kernel ctx was alive during hard reset, something is terribly wrong\n");
+ rc = -EBUSY;
+ goto out_err;
+ }
/* Allocate the kernel context */
hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
@@ -752,8 +782,6 @@ again:
}
hl_set_max_power(hdev, hdev->max_power);
-
- hdev->hard_reset_pending = false;
} else {
rc = hdev->asic_funcs->soft_reset_late_init(hdev);
if (rc) {
@@ -1030,11 +1058,22 @@ void hl_device_fini(struct hl_device *hdev)
WARN(1, "Failed to remove device because reset function did not finish\n");
return;
}
- };
+ }
/* Mark device as disabled */
hdev->disabled = true;
+ /*
+ * Flush anyone that is inside the critical section of enqueue
+ * jobs to the H/W
+ */
+ hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ hdev->hard_reset_pending = true;
+
+ device_kill_open_processes(hdev);
+
hl_hwmon_fini(hdev);
device_late_fini(hdev);
@@ -1108,7 +1147,13 @@ int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr,
* either by the direct access of the device or by another core
*/
u32 *paddr = (u32 *) (uintptr_t) addr;
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+ ktime_t timeout;
+
+ /* timeout should be longer when working with simulator */
+ if (!hdev->pdev)
+ timeout_us *= 10;
+
+ timeout = ktime_add_us(ktime_get(), timeout_us);
might_sleep();
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
new file mode 100644
index 000000000000..eda5d7fcb79f
--- /dev/null
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/firmware.h>
+#include <linux/genalloc.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+/**
+ * hl_fw_push_fw_to_device() - Push FW code to device.
+ * @hdev: pointer to hl_device structure.
+ *
+ * Copy fw code from firmware file to device memory.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+ void __iomem *dst)
+{
+ const struct firmware *fw;
+ const u64 *fw_data;
+ size_t fw_size, i;
+ int rc;
+
+ rc = request_firmware(&fw, fw_name, hdev->dev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request %s\n", fw_name);
+ goto out;
+ }
+
+ fw_size = fw->size;
+ if ((fw_size % 4) != 0) {
+ dev_err(hdev->dev, "illegal %s firmware size %zu\n",
+ fw_name, fw_size);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
+
+ fw_data = (const u64 *) fw->data;
+
+ if ((fw->size % 8) != 0)
+ fw_size -= 8;
+
+ for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
+ if (!(i & (0x80000 - 1))) {
+ dev_dbg(hdev->dev,
+ "copied so far %zu out of %zu for %s firmware",
+ i, fw_size, fw_name);
+ usleep_range(20, 100);
+ }
+
+ writeq(*fw_data, dst);
+ }
+
+ if ((fw->size % 8) != 0)
+ writel(*(const u32 *) fw_data, dst);
+
+out:
+ release_firmware(fw);
+ return rc;
+}
+
+int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
+{
+ struct armcp_packet pkt = {};
+
+ pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+ return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
+ sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
+}
+
+int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
+ u16 len, u32 timeout, long *result)
+{
+ struct armcp_packet *pkt;
+ dma_addr_t pkt_dma_addr;
+ u32 tmp;
+ int rc = 0;
+
+ if (len > HL_CPU_CB_SIZE) {
+ dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
+ len);
+ return -ENOMEM;
+ }
+
+ pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
+ &pkt_dma_addr);
+ if (!pkt) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for packet to CPU\n");
+ return -ENOMEM;
+ }
+
+ memcpy(pkt, msg, len);
+
+ mutex_lock(&hdev->send_cpu_message_lock);
+
+ if (hdev->disabled)
+ goto out;
+
+ if (hdev->device_cpu_disabled) {
+ rc = -EIO;
+ goto out;
+ }
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
+ goto out;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
+ timeout, &tmp);
+
+ hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
+
+ if (rc == -ETIMEDOUT) {
+ dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
+ hdev->device_cpu_disabled = true;
+ goto out;
+ }
+
+ if (tmp == ARMCP_PACKET_FENCE_VAL) {
+ u32 ctl = le32_to_cpu(pkt->ctl);
+
+ rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
+ if (rc) {
+ dev_err(hdev->dev,
+ "F/W ERROR %d for CPU packet %d\n",
+ rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
+ >> ARMCP_PKT_CTL_OPCODE_SHIFT);
+ rc = -EINVAL;
+ } else if (result) {
+ *result = (long) le64_to_cpu(pkt->result);
+ }
+ } else {
+ dev_err(hdev->dev, "CPU packet wrong fence value\n");
+ rc = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&hdev->send_cpu_message_lock);
+
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
+
+ return rc;
+}
+
+int hl_fw_test_cpu_queue(struct hl_device *hdev)
+{
+ struct armcp_packet test_pkt = {};
+ long result;
+ int rc;
+
+ test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
+ sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (!rc) {
+ if (result == ARMCP_PACKET_FENCE_VAL)
+ dev_info(hdev->dev,
+ "queue test on CPU queue succeeded\n");
+ else
+ dev_err(hdev->dev,
+ "CPU queue test failed (0x%08lX)\n", result);
+ } else {
+ dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
+ }
+
+ return rc;
+}
+
+void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ u64 kernel_addr;
+
+ /* roundup to HL_CPU_PKT_SIZE */
+ size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
+
+ kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
+
+ *dma_handle = hdev->cpu_accessible_dma_address +
+ (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
+
+ return (void *) (uintptr_t) kernel_addr;
+}
+
+void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
+ void *vaddr)
+{
+ /* roundup to HL_CPU_PKT_SIZE */
+ size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
+
+ gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
+ size);
+}
+
+int hl_fw_send_heartbeat(struct hl_device *hdev)
+{
+ struct armcp_packet hb_pkt = {};
+ long result;
+ int rc;
+
+ hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
+ sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
+ rc = -EIO;
+
+ return rc;
+}
+
+int hl_fw_armcp_info_get(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct armcp_packet pkt = {};
+ void *armcp_info_cpu_addr;
+ dma_addr_t armcp_info_dma_addr;
+ long result;
+ int rc;
+
+ armcp_info_cpu_addr =
+ hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ sizeof(struct armcp_info),
+ &armcp_info_dma_addr);
+ if (!armcp_info_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for ArmCP info packet\n");
+ return -ENOMEM;
+ }
+
+ memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(armcp_info_dma_addr);
+ pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_ARMCP_INFO_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send armcp info pkt, error %d\n", rc);
+ goto out;
+ }
+
+ memcpy(&prop->armcp_info, armcp_info_cpu_addr,
+ sizeof(prop->armcp_info));
+
+ rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to build hwmon channel info, error %d\n", rc);
+ rc = -EFAULT;
+ goto out;
+ }
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
+ sizeof(struct armcp_info), armcp_info_cpu_addr);
+
+ return rc;
+}
+
+int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
+{
+ struct armcp_packet pkt = {};
+ void *eeprom_info_cpu_addr;
+ dma_addr_t eeprom_info_dma_addr;
+ long result;
+ int rc;
+
+ eeprom_info_cpu_addr =
+ hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ max_size, &eeprom_info_dma_addr);
+ if (!eeprom_info_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for EEPROM info packet\n");
+ return -ENOMEM;
+ }
+
+ memset(eeprom_info_cpu_addr, 0, max_size);
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
+ pkt.data_max_size = cpu_to_le32(max_size);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_ARMCP_EEPROM_TIMEOUT_USEC, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send armcp EEPROM pkt, error %d\n", rc);
+ goto out;
+ }
+
+ /* result contains the actual size */
+ memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
+ eeprom_info_cpu_addr);
+
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile
index e458e5ba500b..131432f677e2 100644
--- a/drivers/misc/habanalabs/goya/Makefile
+++ b/drivers/misc/habanalabs/goya/Makefile
@@ -1,3 +1,4 @@
subdir-ccflags-y += -I$(src)
-HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o
+HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o \
+ goya/goya_coresight.o
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index ea979ebd62fb..a582e29c1ee4 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -12,10 +12,8 @@
#include <linux/pci.h>
#include <linux/genalloc.h>
-#include <linux/firmware.h>
#include <linux/hwmon.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/io-64-nonatomic-hi-lo.h>
/*
* GOYA security scheme:
@@ -71,7 +69,7 @@
*
*/
-#define GOYA_MMU_REGS_NUM 61
+#define GOYA_MMU_REGS_NUM 63
#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
@@ -80,15 +78,12 @@
#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
-#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
-#define GOYA_MAX_INITIATORS 20
-
#define GOYA_MAX_STRING_LEN 20
#define GOYA_CB_POOL_CB_CNT 512
@@ -173,12 +168,12 @@ static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
mmMME_SBA_CONTROL_DATA,
mmMME_SBB_CONTROL_DATA,
mmMME_SBC_CONTROL_DATA,
- mmMME_WBC_CONTROL_DATA
+ mmMME_WBC_CONTROL_DATA,
+ mmPCIE_WRAP_PSOC_ARUSER,
+ mmPCIE_WRAP_PSOC_AWUSER
};
-#define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121
-
-static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
+static u32 goya_all_events[] = {
GOYA_ASYNC_EVENT_ID_PCIE_IF,
GOYA_ASYNC_EVENT_ID_TPC0_ECC,
GOYA_ASYNC_EVENT_ID_TPC1_ECC,
@@ -302,14 +297,7 @@ static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
};
-static int goya_armcp_info_get(struct hl_device *hdev);
-static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
-static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
-static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
-static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
- u64 phys_addr);
-
-static void goya_get_fixed_properties(struct hl_device *hdev)
+void goya_get_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i;
@@ -357,7 +345,6 @@ static void goya_get_fixed_properties(struct hl_device *hdev)
prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
- prop->host_phys_base_address = HOST_PHYS_BASE;
prop->va_space_host_start_address = VA_HOST_SPACE_START;
prop->va_space_host_end_address = VA_HOST_SPACE_END;
prop->va_space_dram_start_address = VA_DDR_SPACE_START;
@@ -367,24 +354,13 @@ static void goya_get_fixed_properties(struct hl_device *hdev)
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
+ prop->high_pll = PLL_HIGH_DEFAULT;
prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
prop->max_power_default = MAX_POWER_DEFAULT;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
-
- prop->high_pll = PLL_HIGH_DEFAULT;
-}
-
-int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
-{
- struct armcp_packet pkt;
-
- memset(&pkt, 0, sizeof(pkt));
-
- pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
-
- return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
- sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
+ prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
+ prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
}
/*
@@ -398,199 +374,40 @@ int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
*/
static int goya_pci_bars_map(struct hl_device *hdev)
{
- struct pci_dev *pdev = hdev->pdev;
+ static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
+ bool is_wc[3] = {false, false, true};
int rc;
- rc = pci_request_regions(pdev, HL_NAME);
- if (rc) {
- dev_err(hdev->dev, "Cannot obtain PCI resources\n");
+ rc = hl_pci_bars_map(hdev, name, is_wc);
+ if (rc)
return rc;
- }
-
- hdev->pcie_bar[SRAM_CFG_BAR_ID] =
- pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
- if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
- dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
- rc = -ENODEV;
- goto err_release_regions;
- }
-
- hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
- if (!hdev->pcie_bar[MSIX_BAR_ID]) {
- dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
- rc = -ENODEV;
- goto err_unmap_sram_cfg;
- }
-
- hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
- if (!hdev->pcie_bar[DDR_BAR_ID]) {
- dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
- rc = -ENODEV;
- goto err_unmap_msix;
- }
hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
- (CFG_BASE - SRAM_BASE_ADDR);
-
- return 0;
-
-err_unmap_msix:
- iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
-err_unmap_sram_cfg:
- iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
-err_release_regions:
- pci_release_regions(pdev);
-
- return rc;
-}
-
-/*
- * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
- *
- * @hdev: pointer to hl_device structure
- *
- * Release all PCI BARS and unmap their virtual addresses
- *
- */
-static void goya_pci_bars_unmap(struct hl_device *hdev)
-{
- struct pci_dev *pdev = hdev->pdev;
-
- iounmap(hdev->pcie_bar[DDR_BAR_ID]);
- iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
- iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
- pci_release_regions(pdev);
-}
-
-/*
- * goya_elbi_write - Write through the ELBI interface
- *
- * @hdev: pointer to hl_device structure
- *
- * return 0 on success, -1 on failure
- *
- */
-static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
-{
- struct pci_dev *pdev = hdev->pdev;
- ktime_t timeout;
- u32 val;
-
- /* Clear previous status */
- pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
-
- pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
- pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
- pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
- PCI_CONFIG_ELBI_CTRL_WRITE);
-
- timeout = ktime_add_ms(ktime_get(), 10);
- for (;;) {
- pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
- if (val & PCI_CONFIG_ELBI_STS_MASK)
- break;
- if (ktime_compare(ktime_get(), timeout) > 0) {
- pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
- &val);
- break;
- }
- usleep_range(300, 500);
- }
-
- if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
- return 0;
-
- if (val & PCI_CONFIG_ELBI_STS_ERR) {
- dev_err(hdev->dev, "Error writing to ELBI\n");
- return -EIO;
- }
-
- if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
- dev_err(hdev->dev, "ELBI write didn't finish in time\n");
- return -EIO;
- }
-
- dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
- return -EIO;
-}
-
-/*
- * goya_iatu_write - iatu write routine
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
-{
- u32 dbi_offset;
- int rc;
-
- dbi_offset = addr & 0xFFF;
-
- rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
- rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
-
- if (rc)
- return -EIO;
+ (CFG_BASE - SRAM_BASE_ADDR);
return 0;
}
-static void goya_reset_link_through_bridge(struct hl_device *hdev)
-{
- struct pci_dev *pdev = hdev->pdev;
- struct pci_dev *parent_port;
- u16 val;
-
- parent_port = pdev->bus->self;
- pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
- val |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
- ssleep(1);
-
- val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
- pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
- ssleep(3);
-}
-
-/*
- * goya_set_ddr_bar_base - set DDR bar to map specific device address
- *
- * @hdev: pointer to hl_device structure
- * @addr: address in DDR. Must be aligned to DDR bar size
- *
- * This function configures the iATU so that the DDR bar will start at the
- * specified addr.
- *
- */
-static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
+static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
+ u64 old_addr = addr;
int rc;
if ((goya) && (goya->ddr_bar_cur_addr == addr))
- return 0;
+ return old_addr;
/* Inbound Region 1 - Bar 4 - Point to DDR */
- rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr));
- rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr));
- rc |= goya_iatu_write(hdev, 0x300, 0);
- /* Enable + Bar match + match enable + Bar 4 */
- rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
-
- /* Return the DBI window to the default location */
- rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
- rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
-
- if (rc) {
- dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
- return -EIO;
- }
+ rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
+ if (rc)
+ return U64_MAX;
- if (goya)
+ if (goya) {
+ old_addr = goya->ddr_bar_cur_addr;
goya->ddr_bar_cur_addr = addr;
+ }
- return 0;
+ return old_addr;
}
/*
@@ -603,40 +420,8 @@ static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
*/
static int goya_init_iatu(struct hl_device *hdev)
{
- int rc;
-
- /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
- rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
- rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
- rc |= goya_iatu_write(hdev, 0x100, 0);
- /* Enable + Bar match + match enable */
- rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
-
- /* Inbound Region 1 - Bar 4 - Point to DDR */
- rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
-
- /* Outbound Region 0 - Point to Host */
- rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
- rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
- rc |= goya_iatu_write(hdev, 0x010,
- lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
- rc |= goya_iatu_write(hdev, 0x014, 0);
- rc |= goya_iatu_write(hdev, 0x018, 0);
- rc |= goya_iatu_write(hdev, 0x020,
- upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
- /* Increase region size */
- rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
- /* Enable */
- rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
-
- /* Return the DBI window to the default location */
- rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
- rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
-
- if (rc)
- return -EIO;
-
- return 0;
+ return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
+ HOST_PHYS_BASE, HOST_PHYS_SIZE);
}
/*
@@ -682,52 +467,9 @@ static int goya_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
- /* set DMA mask for GOYA */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
- if (rc) {
- dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(hdev->dev,
- "Unable to set pci dma mask to 32 bits\n");
- return rc;
- }
- }
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
- if (rc) {
- dev_warn(hdev->dev,
- "Unable to set pci consistent dma mask to 39 bits\n");
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(hdev->dev,
- "Unable to set pci consistent dma mask to 32 bits\n");
- return rc;
- }
- }
-
- if (hdev->reset_pcilink)
- goya_reset_link_through_bridge(hdev);
-
- rc = pci_enable_device_mem(pdev);
- if (rc) {
- dev_err(hdev->dev, "can't enable PCI device\n");
+ rc = hl_pci_init(hdev, 39);
+ if (rc)
return rc;
- }
-
- pci_set_master(pdev);
-
- rc = goya_init_iatu(hdev);
- if (rc) {
- dev_err(hdev->dev, "Failed to initialize iATU\n");
- goto disable_device;
- }
-
- rc = goya_pci_bars_map(hdev);
- if (rc) {
- dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
- goto disable_device;
- }
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
@@ -737,12 +479,6 @@ static int goya_early_init(struct hl_device *hdev)
}
return 0;
-
-disable_device:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
-
- return rc;
}
/*
@@ -755,14 +491,33 @@ disable_device:
*/
static int goya_early_fini(struct hl_device *hdev)
{
- goya_pci_bars_unmap(hdev);
-
- pci_clear_master(hdev->pdev);
- pci_disable_device(hdev->pdev);
+ hl_pci_fini(hdev);
return 0;
}
+static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
+{
+ /* mask to zero the MMBP and ASID bits */
+ WREG32_AND(reg, ~0x7FF);
+ WREG32_OR(reg, asid);
+}
+
+static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ if (secure)
+ WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
+ else
+ WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
+
+ RREG32(mmDMA_QM_0_GLBL_PROT);
+}
+
/*
* goya_fetch_psoc_frequency - Fetch PSOC frequency values
*
@@ -779,20 +534,12 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
}
-/*
- * goya_late_init - GOYA late initialization code
- *
- * @hdev: pointer to hl_device structure
- *
- * Get ArmCP info and send message to CPU to enable PCI access
- */
-static int goya_late_init(struct hl_device *hdev)
+int goya_late_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct goya_device *goya = hdev->asic_specific;
int rc;
- rc = goya->armcp_info_get(hdev);
+ rc = goya_armcp_info_get(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to get armcp info\n");
return rc;
@@ -804,7 +551,7 @@ static int goya_late_init(struct hl_device *hdev)
*/
WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
- rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
if (rc) {
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
return rc;
@@ -830,7 +577,7 @@ static int goya_late_init(struct hl_device *hdev)
return 0;
disable_pci_access:
- goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
return rc;
}
@@ -879,9 +626,6 @@ static int goya_sw_init(struct hl_device *hdev)
if (!goya)
return -ENOMEM;
- goya->test_cpu_queue = goya_test_cpu_queue;
- goya->armcp_info_get = goya_armcp_info_get;
-
/* according to goya_init_iatu */
goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
@@ -901,45 +645,43 @@ static int goya_sw_init(struct hl_device *hdev)
}
hdev->cpu_accessible_dma_mem =
- hdev->asic_funcs->dma_alloc_coherent(hdev,
- CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
&hdev->cpu_accessible_dma_address,
GFP_KERNEL | __GFP_ZERO);
if (!hdev->cpu_accessible_dma_mem) {
- dev_err(hdev->dev,
- "failed to allocate %d of dma memory for CPU accessible memory space\n",
- CPU_ACCESSIBLE_MEM_SIZE);
rc = -ENOMEM;
goto free_dma_pool;
}
- hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1);
+ hdev->cpu_accessible_dma_pool = gen_pool_create(HL_CPU_PKT_SHIFT, -1);
if (!hdev->cpu_accessible_dma_pool) {
dev_err(hdev->dev,
"Failed to create CPU accessible DMA pool\n");
rc = -ENOMEM;
- goto free_cpu_pq_dma_mem;
+ goto free_cpu_dma_mem;
}
rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
(uintptr_t) hdev->cpu_accessible_dma_mem,
- CPU_ACCESSIBLE_MEM_SIZE, -1);
+ HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to CPU accessible DMA pool\n");
rc = -EFAULT;
- goto free_cpu_pq_pool;
+ goto free_cpu_accessible_dma_pool;
}
spin_lock_init(&goya->hw_queues_lock);
return 0;
-free_cpu_pq_pool:
+free_cpu_accessible_dma_pool:
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
-free_cpu_pq_dma_mem:
- hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
+free_cpu_dma_mem:
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
free_dma_pool:
@@ -962,7 +704,8 @@ static int goya_sw_fini(struct hl_device *hdev)
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
- hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
@@ -1056,11 +799,10 @@ static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
* Initialize the H/W registers of the QMAN DMA channels
*
*/
-static void goya_init_dma_qmans(struct hl_device *hdev)
+void goya_init_dma_qmans(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
struct hl_hw_queue *q;
- dma_addr_t bus_address;
int i;
if (goya->hw_cap_initialized & HW_CAP_DMA)
@@ -1069,10 +811,7 @@ static void goya_init_dma_qmans(struct hl_device *hdev)
q = &hdev->kernel_queues[0];
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
- bus_address = q->bus_address +
- hdev->asic_prop.host_phys_base_address;
-
- goya_init_dma_qman(hdev, i, bus_address);
+ goya_init_dma_qman(hdev, i, q->bus_address);
goya_init_dma_ch(hdev, i);
}
@@ -1209,11 +948,10 @@ static int goya_stop_external_queues(struct hl_device *hdev)
* Returns 0 on success
*
*/
-static int goya_init_cpu_queues(struct hl_device *hdev)
+int goya_init_cpu_queues(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
struct hl_eq *eq;
- dma_addr_t bus_address;
u32 status;
struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
int err;
@@ -1226,23 +964,22 @@ static int goya_init_cpu_queues(struct hl_device *hdev)
eq = &hdev->event_queue;
- bus_address = cpu_pq->bus_address +
- hdev->asic_prop.host_phys_base_address;
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0,
+ lower_32_bits(cpu_pq->bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1,
+ upper_32_bits(cpu_pq->bus_address));
- bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address;
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(eq->bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(eq->bus_address));
- bus_address = hdev->cpu_accessible_dma_address +
- hdev->asic_prop.host_phys_base_address;
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8,
+ lower_32_bits(hdev->cpu_accessible_dma_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9,
+ upper_32_bits(hdev->cpu_accessible_dma_address));
WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE);
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, HL_CPU_ACCESSIBLE_MEM_SIZE);
/* Used for EQ CI */
WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
@@ -1688,14 +1425,15 @@ static void goya_init_golden_registers(struct hl_device *hdev)
/*
* Workaround for H2 #HW-23 bug
- * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
- * to 16 on KMD DMA
- * We need to limit only these DMAs because the user can only read
+ * Set DMA max outstanding read requests to 240 on DMA CH 1.
+ * This limitation is still large enough to not affect Gen4 bandwidth.
+ * We need to only limit that DMA channel because the user can only read
* from Host using DMA CH 1
*/
- WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
+ WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
+
goya->hw_cap_initialized |= HW_CAP_GOLDEN;
}
@@ -1789,7 +1527,7 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
}
-static void goya_init_mme_qmans(struct hl_device *hdev)
+void goya_init_mme_qmans(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u32 so_base_lo, so_base_hi;
@@ -1896,7 +1634,7 @@ static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
}
-static void goya_init_tpc_qmans(struct hl_device *hdev)
+void goya_init_tpc_qmans(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u32 so_base_lo, so_base_hi;
@@ -2223,10 +1961,10 @@ static int goya_enable_msix(struct hl_device *hdev)
}
}
- irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
+ irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
rc = request_irq(irq, hl_irq_handler_eq, 0,
- goya_irq_name[EVENT_QUEUE_MSIX_IDX],
+ goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
&hdev->event_queue);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
@@ -2257,7 +1995,7 @@ static void goya_sync_irqs(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
synchronize_irq(pci_irq_vector(hdev->pdev, i));
- synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX));
+ synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
}
static void goya_disable_msix(struct hl_device *hdev)
@@ -2270,7 +2008,7 @@ static void goya_disable_msix(struct hl_device *hdev)
goya_sync_irqs(hdev);
- irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
+ irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
free_irq(irq, &hdev->event_queue);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
@@ -2330,67 +2068,45 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
}
/*
- * goya_push_fw_to_device - Push FW code to device
- *
- * @hdev: pointer to hl_device structure
+ * goya_push_uboot_to_device() - Push u-boot FW code to device.
+ * @hdev: Pointer to hl_device structure.
*
- * Copy fw code from firmware file to device memory.
- * Returns 0 on success
+ * Copy u-boot fw code from firmware file to SRAM BAR.
*
+ * Return: 0 on success, non-zero for failure.
*/
-static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
- void __iomem *dst)
+static int goya_push_uboot_to_device(struct hl_device *hdev)
{
- const struct firmware *fw;
- const u64 *fw_data;
- size_t fw_size, i;
- int rc;
-
- rc = request_firmware(&fw, fw_name, hdev->dev);
-
- if (rc) {
- dev_err(hdev->dev, "Failed to request %s\n", fw_name);
- goto out;
- }
-
- fw_size = fw->size;
- if ((fw_size % 4) != 0) {
- dev_err(hdev->dev, "illegal %s firmware size %zu\n",
- fw_name, fw_size);
- rc = -EINVAL;
- goto out;
- }
-
- dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
-
- fw_data = (const u64 *) fw->data;
+ char fw_name[200];
+ void __iomem *dst;
- if ((fw->size % 8) != 0)
- fw_size -= 8;
+ snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
+ dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
- for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
- if (!(i & (0x80000 - 1))) {
- dev_dbg(hdev->dev,
- "copied so far %zu out of %zu for %s firmware",
- i, fw_size, fw_name);
- usleep_range(20, 100);
- }
+ return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+}
- writeq(*fw_data, dst);
- }
+/*
+ * goya_push_linux_to_device() - Push LINUX FW code to device.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Copy LINUX fw code from firmware file to HBM BAR.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+static int goya_push_linux_to_device(struct hl_device *hdev)
+{
+ char fw_name[200];
+ void __iomem *dst;
- if ((fw->size % 8) != 0)
- writel(*(const u32 *) fw_data, dst);
+ snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
+ dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
-out:
- release_firmware(fw);
- return rc;
+ return hl_fw_push_fw_to_device(hdev, fw_name, dst);
}
static int goya_pldm_init_cpu(struct hl_device *hdev)
{
- char fw_name[200];
- void __iomem *dst;
u32 val, unit_rst_val;
int rc;
@@ -2408,15 +2124,11 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
- dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
- rc = goya_push_fw_to_device(hdev, fw_name, dst);
+ rc = goya_push_uboot_to_device(hdev);
if (rc)
return rc;
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
- dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
- rc = goya_push_fw_to_device(hdev, fw_name, dst);
+ rc = goya_push_linux_to_device(hdev);
if (rc)
return rc;
@@ -2478,8 +2190,6 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
{
struct goya_device *goya = hdev->asic_specific;
- char fw_name[200];
- void __iomem *dst;
u32 status;
int rc;
@@ -2493,11 +2203,10 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
* Before pushing u-boot/linux to device, need to set the ddr bar to
* base address of dram
*/
- rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
- if (rc) {
+ if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
dev_err(hdev->dev,
"failed to map DDR bar to DRAM base address\n");
- return rc;
+ return -EIO;
}
if (hdev->pldm) {
@@ -2550,6 +2259,11 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
"ARM status %d - DDR initialization failed\n",
status);
break;
+ case CPU_BOOT_STATUS_UBOOT_NOT_READY:
+ dev_err(hdev->dev,
+ "ARM status %d - u-boot stopped by user\n",
+ status);
+ break;
default:
dev_err(hdev->dev,
"ARM status %d - Invalid status code\n",
@@ -2571,9 +2285,7 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
- dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
- rc = goya_push_fw_to_device(hdev, fw_name, dst);
+ rc = goya_push_linux_to_device(hdev);
if (rc)
return rc;
@@ -2606,7 +2318,39 @@ out:
return 0;
}
-static int goya_mmu_init(struct hl_device *hdev)
+static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
+ u64 phys_addr)
+{
+ u32 status, timeout_usec;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
+ WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
+ WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
+
+ rc = hl_poll_timeout(
+ hdev,
+ MMU_ASID_BUSY,
+ status,
+ !(status & 0x80000000),
+ 1000,
+ timeout_usec);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout during MMU hop0 config of asid %d\n", asid);
+ return rc;
+ }
+
+ return 0;
+}
+
+int goya_mmu_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
@@ -2697,12 +2441,12 @@ static int goya_hw_init(struct hl_device *hdev)
* After CPU initialization is finished, change DDR bar mapping inside
* iATU to point to the start address of the MMU page tables
*/
- rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
- (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull)));
- if (rc) {
+ if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
+ (MMU_PAGE_TABLES_ADDR &
+ ~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
dev_err(hdev->dev,
"failed to map DDR bar to MMU page tables\n");
- return rc;
+ return -EIO;
}
rc = goya_mmu_init(hdev);
@@ -2729,28 +2473,16 @@ static int goya_hw_init(struct hl_device *hdev)
goto disable_msix;
}
- /* CPU initialization is finished, we can now move to 48 bit DMA mask */
- rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
- if (rc) {
- dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n");
- rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(hdev->dev,
- "Unable to set pci dma mask to 32 bits\n");
- goto disable_pci_access;
- }
- }
-
- rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
- if (rc) {
- dev_warn(hdev->dev,
- "Unable to set pci consistent dma mask to 48 bits\n");
- rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(hdev->dev,
- "Unable to set pci consistent dma mask to 32 bits\n");
+ /*
+ * Check if we managed to set the DMA mask to more then 32 bits. If so,
+ * let's try to increase it again because in Goya we set the initial
+ * dma mask to less then 39 bits so that the allocation of the memory
+ * area for the device's cpu will be under 39 bits
+ */
+ if (hdev->dma_mask > 32) {
+ rc = hl_pci_set_dma_mask(hdev, 48);
+ if (rc)
goto disable_pci_access;
- }
}
/* Perform read from the device to flush all MSI-X configuration */
@@ -2759,7 +2491,7 @@ static int goya_hw_init(struct hl_device *hdev)
return 0;
disable_pci_access:
- goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
disable_msix:
goya_disable_msix(hdev);
disable_queues:
@@ -2866,7 +2598,7 @@ int goya_suspend(struct hl_device *hdev)
{
int rc;
- rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2894,7 +2626,7 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
return rc;
}
-static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
+void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
{
u32 db_reg_offset, db_value;
bool invalid_queue = false;
@@ -2992,13 +2724,23 @@ void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
- return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
+ void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
+ dma_handle, flags);
+
+ /* Shift to the device's base physical address of host memory */
+ if (kernel_addr)
+ *dma_handle += HOST_PHYS_BASE;
+
+ return kernel_addr;
}
static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
- dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
+ /* Cancel the device's base physical address of host memory */
+ dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
+
+ dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
}
void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
@@ -3061,12 +2803,12 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
{
- struct goya_device *goya = hdev->asic_specific;
struct packet_msg_prot *fence_pkt;
u32 *fence_ptr;
dma_addr_t fence_dma_addr;
struct hl_cb *cb;
u32 tmp, timeout;
+ char buf[16] = {};
int rc;
if (hdev->pldm)
@@ -3074,13 +2816,14 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
else
timeout = HL_DEVICE_TIMEOUT_USEC;
- if (!hdev->asic_funcs->is_device_idle(hdev)) {
+ if (!hdev->asic_funcs->is_device_idle(hdev, buf, sizeof(buf))) {
dev_err_ratelimited(hdev->dev,
- "Can't send KMD job on QMAN0 if device is not idle\n");
+ "Can't send KMD job on QMAN0 because %s is busy\n",
+ buf);
return -EBUSY;
}
- fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
&fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
@@ -3090,10 +2833,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
*fence_ptr = 0;
- if (goya->hw_cap_initialized & HW_CAP_MMU) {
- WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
- RREG32(mmDMA_QM_0_GLBL_PROT);
- }
+ goya_qman0_set_security(hdev, true);
/*
* goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
@@ -3111,8 +2851,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
(1 << GOYA_PKT_CTL_MB_SHIFT);
fence_pkt->ctl = cpu_to_le32(tmp);
fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
- fence_pkt->addr = cpu_to_le64(fence_dma_addr +
- hdev->asic_prop.host_phys_base_address);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr);
rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
job->job_cb_size, cb->bus_address);
@@ -3132,13 +2871,10 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
}
free_fence_ptr:
- hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
+ hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
fence_dma_addr);
- if (goya->hw_cap_initialized & HW_CAP_MMU) {
- WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
- RREG32(mmDMA_QM_0_GLBL_PROT);
- }
+ goya_qman0_set_security(hdev, false);
return rc;
}
@@ -3147,10 +2883,6 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
u32 timeout, long *result)
{
struct goya_device *goya = hdev->asic_specific;
- struct armcp_packet *pkt;
- dma_addr_t pkt_dma_addr;
- u32 tmp;
- int rc = 0;
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
if (result)
@@ -3158,74 +2890,8 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
return 0;
}
- if (len > CPU_CB_SIZE) {
- dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
- len);
- return -ENOMEM;
- }
-
- pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
- &pkt_dma_addr);
- if (!pkt) {
- dev_err(hdev->dev,
- "Failed to allocate DMA memory for packet to CPU\n");
- return -ENOMEM;
- }
-
- memcpy(pkt, msg, len);
-
- mutex_lock(&hdev->send_cpu_message_lock);
-
- if (hdev->disabled)
- goto out;
-
- if (hdev->device_cpu_disabled) {
- rc = -EIO;
- goto out;
- }
-
- rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len,
- pkt_dma_addr);
- if (rc) {
- dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
- goto out;
- }
-
- rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
- timeout, &tmp);
-
- hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ);
-
- if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
- hdev->device_cpu_disabled = true;
- goto out;
- }
-
- if (tmp == ARMCP_PACKET_FENCE_VAL) {
- u32 ctl = le32_to_cpu(pkt->ctl);
-
- rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
- if (rc) {
- dev_err(hdev->dev,
- "F/W ERROR %d for CPU packet %d\n",
- rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
- >> ARMCP_PKT_CTL_OPCODE_SHIFT);
- rc = -EINVAL;
- } else if (result) {
- *result = (long) le64_to_cpu(pkt->result);
- }
- } else {
- dev_err(hdev->dev, "CPU packet wrong fence value\n");
- rc = -EINVAL;
- }
-
-out:
- mutex_unlock(&hdev->send_cpu_message_lock);
-
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
-
- return rc;
+ return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
+ timeout, result);
}
int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
@@ -3239,7 +2905,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
fence_val = GOYA_QMAN0_FENCE_VAL;
- fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
&fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
@@ -3249,7 +2915,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
*fence_ptr = 0;
- fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev,
+ fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
sizeof(struct packet_msg_prot),
GFP_KERNEL, &pkt_dma_addr);
if (!fence_pkt) {
@@ -3264,8 +2930,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
(1 << GOYA_PKT_CTL_MB_SHIFT);
fence_pkt->ctl = cpu_to_le32(tmp);
fence_pkt->value = cpu_to_le32(fence_val);
- fence_pkt->addr = cpu_to_le64(fence_dma_addr +
- hdev->asic_prop.host_phys_base_address);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr);
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
sizeof(struct packet_msg_prot),
@@ -3293,48 +2958,30 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
}
free_pkt:
- hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt,
+ hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
pkt_dma_addr);
free_fence_ptr:
- hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
+ hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
fence_dma_addr);
return rc;
}
int goya_test_cpu_queue(struct hl_device *hdev)
{
- struct armcp_packet test_pkt;
- long result;
- int rc;
-
- /* cpu_queues_enable flag is always checked in send cpu message */
-
- memset(&test_pkt, 0, sizeof(test_pkt));
-
- test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
- test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
-
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
- sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+ struct goya_device *goya = hdev->asic_specific;
- if (!rc) {
- if (result == ARMCP_PACKET_FENCE_VAL)
- dev_info(hdev->dev,
- "queue test on CPU queue succeeded\n");
- else
- dev_err(hdev->dev,
- "CPU queue test failed (0x%08lX)\n", result);
- } else {
- dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
- }
+ /*
+ * check capability here as send_cpu_message() won't update the result
+ * value if no capability
+ */
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
- return rc;
+ return hl_fw_test_cpu_queue(hdev);
}
-static int goya_test_queues(struct hl_device *hdev)
+int goya_test_queues(struct hl_device *hdev)
{
- struct goya_device *goya = hdev->asic_specific;
int i, rc, ret_val = 0;
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
@@ -3344,7 +2991,7 @@ static int goya_test_queues(struct hl_device *hdev)
}
if (hdev->cpu_queues_enable) {
- rc = goya->test_cpu_queue(hdev);
+ rc = goya_test_cpu_queue(hdev);
if (rc)
ret_val = -EINVAL;
}
@@ -3355,57 +3002,68 @@ static int goya_test_queues(struct hl_device *hdev)
static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
gfp_t mem_flags, dma_addr_t *dma_handle)
{
+ void *kernel_addr;
+
if (size > GOYA_DMA_POOL_BLK_SIZE)
return NULL;
- return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
+ kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
+
+ /* Shift to the device's base physical address of host memory */
+ if (kernel_addr)
+ *dma_handle += HOST_PHYS_BASE;
+
+ return kernel_addr;
}
static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
dma_addr_t dma_addr)
{
- dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
+ /* Cancel the device's base physical address of host memory */
+ dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
+
+ dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
}
-static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
- size_t size, dma_addr_t *dma_handle)
+void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle)
{
- u64 kernel_addr;
-
- /* roundup to CPU_PKT_SIZE */
- size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
-
- kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
-
- *dma_handle = hdev->cpu_accessible_dma_address +
- (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
-
- return (void *) (uintptr_t) kernel_addr;
+ return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
}
-static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
- size_t size, void *vaddr)
+void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
+ void *vaddr)
{
- /* roundup to CPU_PKT_SIZE */
- size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
-
- gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
- size);
+ hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
-static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
+static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
- if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir))
+ struct scatterlist *sg;
+ int i;
+
+ if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
return -ENOMEM;
+ /* Shift to the device's base physical address of host memory */
+ for_each_sg(sgl, sg, nents, i)
+ sg->dma_address += HOST_PHYS_BASE;
+
return 0;
}
-static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg,
+static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
- dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir);
+ struct scatterlist *sg;
+ int i;
+
+ /* Cancel the device's base physical address of host memory */
+ for_each_sg(sgl, sg, nents, i)
+ sg->dma_address -= HOST_PHYS_BASE;
+
+ dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
}
u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
@@ -3555,31 +3213,29 @@ static int goya_validate_dma_pkt_host(struct hl_device *hdev,
return -EFAULT;
}
- if (parser->ctx_id != HL_KERNEL_ASID_ID) {
- if (sram_addr) {
- if (!hl_mem_area_inside_range(device_memory_addr,
- le32_to_cpu(user_dma_pkt->tsize),
- hdev->asic_prop.sram_user_base_address,
- hdev->asic_prop.sram_end_address)) {
+ if (sram_addr) {
+ if (!hl_mem_area_inside_range(device_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.sram_user_base_address,
+ hdev->asic_prop.sram_end_address)) {
+
+ dev_err(hdev->dev,
+ "SRAM address 0x%llx + 0x%x is invalid\n",
+ device_memory_addr,
+ user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+ } else {
+ if (!hl_mem_area_inside_range(device_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.dram_user_base_address,
+ hdev->asic_prop.dram_end_address)) {
- dev_err(hdev->dev,
- "SRAM address 0x%llx + 0x%x is invalid\n",
- device_memory_addr,
- user_dma_pkt->tsize);
- return -EFAULT;
- }
- } else {
- if (!hl_mem_area_inside_range(device_memory_addr,
- le32_to_cpu(user_dma_pkt->tsize),
- hdev->asic_prop.dram_user_base_address,
- hdev->asic_prop.dram_end_address)) {
-
- dev_err(hdev->dev,
- "DRAM address 0x%llx + 0x%x is invalid\n",
- device_memory_addr,
- user_dma_pkt->tsize);
- return -EFAULT;
- }
+ dev_err(hdev->dev,
+ "DRAM address 0x%llx + 0x%x is invalid\n",
+ device_memory_addr,
+ user_dma_pkt->tsize);
+ return -EFAULT;
}
}
@@ -3693,7 +3349,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
* WA for HW-23.
* We can't allow user to read from Host using QMANs other than 1.
*/
- if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
+ if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
le32_to_cpu(user_dma_pkt->tsize),
hdev->asic_prop.va_space_host_start_address,
@@ -3957,8 +3613,6 @@ static int goya_patch_dma_packet(struct hl_device *hdev,
new_dma_pkt->ctl = cpu_to_le32(ctl);
new_dma_pkt->tsize = cpu_to_le32((u32) len);
- dma_addr += hdev->asic_prop.host_phys_base_address;
-
if (dir == DMA_TO_DEVICE) {
new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
@@ -4209,36 +3863,35 @@ free_userptr:
return rc;
}
-static int goya_parse_cb_no_ext_quque(struct hl_device *hdev,
+static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
- if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
- /* For internal queue jobs, just check if cb address is valid */
- if (hl_mem_area_inside_range(
- (u64) (uintptr_t) parser->user_cb,
- parser->user_cb_size,
- asic_prop->sram_user_base_address,
- asic_prop->sram_end_address))
- return 0;
+ if (goya->hw_cap_initialized & HW_CAP_MMU)
+ return 0;
- if (hl_mem_area_inside_range(
- (u64) (uintptr_t) parser->user_cb,
- parser->user_cb_size,
- asic_prop->dram_user_base_address,
- asic_prop->dram_end_address))
- return 0;
+ /* For internal queue jobs, just check if CB address is valid */
+ if (hl_mem_area_inside_range(
+ (u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->sram_user_base_address,
+ asic_prop->sram_end_address))
+ return 0;
- dev_err(hdev->dev,
- "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
- parser->user_cb, parser->user_cb_size);
+ if (hl_mem_area_inside_range(
+ (u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->dram_user_base_address,
+ asic_prop->dram_end_address))
+ return 0;
- return -EFAULT;
- }
+ dev_err(hdev->dev,
+ "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
+ parser->user_cb, parser->user_cb_size);
- return 0;
+ return -EFAULT;
}
int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
@@ -4246,9 +3899,9 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
struct goya_device *goya = hdev->asic_specific;
if (!parser->ext_queue)
- return goya_parse_cb_no_ext_quque(hdev, parser);
+ return goya_parse_cb_no_ext_queue(hdev, parser);
- if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr)
+ if (goya->hw_cap_initialized & HW_CAP_MMU)
return goya_parse_cb_mmu(hdev, parser);
else
return goya_parse_cb_no_mmu(hdev, parser);
@@ -4279,12 +3932,12 @@ void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
}
-static void goya_update_eq_ci(struct hl_device *hdev, u32 val)
+void goya_update_eq_ci(struct hl_device *hdev, u32 val)
{
WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
}
-static void goya_restore_phase_topology(struct hl_device *hdev)
+void goya_restore_phase_topology(struct hl_device *hdev)
{
int i, num_of_sob_in_longs, num_of_mon_in_longs;
@@ -4321,6 +3974,7 @@ static void goya_restore_phase_topology(struct hl_device *hdev)
static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 ddr_bar_addr;
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
@@ -4338,15 +3992,16 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
- rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (!rc) {
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (ddr_bar_addr != U64_MAX) {
*val = readl(hdev->pcie_bar[DDR_BAR_ID] +
(addr - bar_base_addr));
- rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
- (MMU_PAGE_TABLES_ADDR &
- ~(prop->dram_pci_bar_size - 0x1ull)));
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev,
+ ddr_bar_addr);
}
+ if (ddr_bar_addr == U64_MAX)
+ rc = -EIO;
} else {
rc = -EFAULT;
}
@@ -4371,6 +4026,7 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 ddr_bar_addr;
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
@@ -4388,15 +4044,16 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
- rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (!rc) {
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (ddr_bar_addr != U64_MAX) {
writel(val, hdev->pcie_bar[DDR_BAR_ID] +
(addr - bar_base_addr));
- rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
- (MMU_PAGE_TABLES_ADDR &
- ~(prop->dram_pci_bar_size - 0x1ull)));
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev,
+ ddr_bar_addr);
}
+ if (ddr_bar_addr == U64_MAX)
+ rc = -EIO;
} else {
rc = -EFAULT;
}
@@ -4408,6 +4065,9 @@ static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
+ if (hdev->hard_reset_pending)
+ return U64_MAX;
+
return readq(hdev->pcie_bar[DDR_BAR_ID] +
(addr - goya->ddr_bar_cur_addr));
}
@@ -4416,6 +4076,9 @@ static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
{
struct goya_device *goya = hdev->asic_specific;
+ if (hdev->hard_reset_pending)
+ return;
+
writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
(addr - goya->ddr_bar_cur_addr));
}
@@ -4605,8 +4268,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
- total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
+ rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
+ HL_DEVICE_TIMEOUT_USEC, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
@@ -4622,8 +4285,8 @@ static int goya_soft_reset_late_init(struct hl_device *hdev)
* Unmask all IRQs since some could have been received
* during the soft reset
*/
- return goya_unmask_irq_arr(hdev, goya_non_fatal_events,
- sizeof(goya_non_fatal_events));
+ return goya_unmask_irq_arr(hdev, goya_all_events,
+ sizeof(goya_all_events));
}
static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
@@ -4638,7 +4301,7 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(event_type);
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, &result);
if (rc)
@@ -4759,7 +4422,6 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
u64 val, bool is_dram)
{
struct packet_lin_dma *lin_dma_pkt;
- struct hl_cs_parser parser;
struct hl_cs_job *job;
u32 cb_size, ctl;
struct hl_cb *cb;
@@ -4799,36 +4461,16 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
job->user_cb->cs_cnt++;
job->user_cb_size = cb_size;
job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = job->user_cb_size +
+ sizeof(struct packet_msg_prot) * 2;
hl_debugfs_add_job(hdev, job);
- parser.ctx_id = HL_KERNEL_ASID_ID;
- parser.cs_sequence = 0;
- parser.job_id = job->id;
- parser.hw_queue_id = job->hw_queue_id;
- parser.job_userptr_list = &job->userptr_list;
- parser.user_cb = job->user_cb;
- parser.user_cb_size = job->user_cb_size;
- parser.ext_queue = job->ext_queue;
- parser.use_virt_addr = hdev->mmu_enable;
-
- rc = hdev->asic_funcs->cs_parser(hdev, &parser);
- if (rc) {
- dev_err(hdev->dev, "Failed to parse kernel CB\n");
- goto free_job;
- }
-
- job->patched_cb = parser.patched_cb;
- job->job_cb_size = parser.patched_cb_size;
- job->patched_cb->cs_cnt++;
-
rc = goya_send_job_on_qman0(hdev, job);
- job->patched_cb->cs_cnt--;
hl_cb_put(job->patched_cb);
-free_job:
- hl_userptr_delete_list(hdev, &job->userptr_list);
hl_debugfs_remove_job(hdev, job);
kfree(job);
cb->cs_cnt--;
@@ -4840,7 +4482,7 @@ release_cb:
return rc;
}
-static int goya_context_switch(struct hl_device *hdev, u32 asid)
+int goya_context_switch(struct hl_device *hdev, u32 asid)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 addr = prop->sram_base_address;
@@ -4854,12 +4496,13 @@ static int goya_context_switch(struct hl_device *hdev, u32 asid)
return rc;
}
+ WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
goya_mmu_prepare(hdev, asid);
return 0;
}
-static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
+int goya_mmu_clear_pgt_range(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
@@ -4873,7 +4516,7 @@ static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
return goya_memset_device_memory(hdev, addr, size, 0, true);
}
-static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
+int goya_mmu_set_dram_default_page(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
@@ -4886,7 +4529,7 @@ static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
return goya_memset_device_memory(hdev, addr, size, val, true);
}
-static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
+void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
{
struct goya_device *goya = hdev->asic_specific;
int i;
@@ -4900,10 +4543,8 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
}
/* zero the MMBP and ASID bits and then set the ASID */
- for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
- WREG32_AND(goya_mmu_regs[i], ~0x7FF);
- WREG32_OR(goya_mmu_regs[i], asid);
- }
+ for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
+ goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
}
static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
@@ -4994,107 +4635,29 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
"Timeout when waiting for MMU cache invalidation\n");
}
-static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
- u64 phys_addr)
-{
- u32 status, timeout_usec;
- int rc;
-
- if (hdev->pldm)
- timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
- else
- timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
-
- WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
- WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
- WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
-
- rc = hl_poll_timeout(
- hdev,
- MMU_ASID_BUSY,
- status,
- !(status & 0x80000000),
- 1000,
- timeout_usec);
-
- if (rc) {
- dev_err(hdev->dev,
- "Timeout during MMU hop0 config of asid %d\n", asid);
- return rc;
- }
-
- return 0;
-}
-
int goya_send_heartbeat(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
- struct armcp_packet hb_pkt;
- long result;
- int rc;
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- memset(&hb_pkt, 0, sizeof(hb_pkt));
-
- hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
- hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
-
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
- sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
-
- if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
- rc = -EIO;
-
- return rc;
+ return hl_fw_send_heartbeat(hdev);
}
-static int goya_armcp_info_get(struct hl_device *hdev)
+int goya_armcp_info_get(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct armcp_packet pkt;
- void *armcp_info_cpu_addr;
- dma_addr_t armcp_info_dma_addr;
u64 dram_size;
- long result;
int rc;
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- armcp_info_cpu_addr =
- hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- sizeof(struct armcp_info), &armcp_info_dma_addr);
- if (!armcp_info_cpu_addr) {
- dev_err(hdev->dev,
- "Failed to allocate DMA memory for ArmCP info packet\n");
- return -ENOMEM;
- }
-
- memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
-
- memset(&pkt, 0, sizeof(pkt));
-
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.addr = cpu_to_le64(armcp_info_dma_addr +
- prop->host_phys_base_address);
- pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
-
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- GOYA_ARMCP_INFO_TIMEOUT, &result);
-
- if (rc) {
- dev_err(hdev->dev,
- "Failed to send armcp info pkt, error %d\n", rc);
- goto out;
- }
-
- memcpy(&prop->armcp_info, armcp_info_cpu_addr,
- sizeof(prop->armcp_info));
+ rc = hl_fw_armcp_info_get(hdev);
+ if (rc)
+ return rc;
dram_size = le64_to_cpu(prop->armcp_info.dram_size);
if (dram_size) {
@@ -5110,32 +4673,10 @@ static int goya_armcp_info_get(struct hl_device *hdev)
prop->dram_end_address = prop->dram_base_address + dram_size;
}
- rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
- if (rc) {
- dev_err(hdev->dev,
- "Failed to build hwmon channel info, error %d\n", rc);
- rc = -EFAULT;
- goto out;
- }
-
-out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- sizeof(struct armcp_info), armcp_info_cpu_addr);
-
- return rc;
-}
-
-static void goya_init_clock_gating(struct hl_device *hdev)
-{
-
-}
-
-static void goya_disable_clock_gating(struct hl_device *hdev)
-{
-
+ return 0;
}
-static bool goya_is_device_idle(struct hl_device *hdev)
+static bool goya_is_device_idle(struct hl_device *hdev, char *buf, size_t size)
{
u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
int i;
@@ -5147,7 +4688,7 @@ static bool goya_is_device_idle(struct hl_device *hdev)
if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
DMA_QM_IDLE_MASK)
- return false;
+ return HL_ENG_BUSY(buf, size, "DMA%d_QM", i);
}
offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
@@ -5159,31 +4700,31 @@ static bool goya_is_device_idle(struct hl_device *hdev)
if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
TPC_QM_IDLE_MASK)
- return false;
+ return HL_ENG_BUSY(buf, size, "TPC%d_QM", i);
if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
TPC_CMDQ_IDLE_MASK)
- return false;
+ return HL_ENG_BUSY(buf, size, "TPC%d_CMDQ", i);
if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
TPC_CFG_IDLE_MASK)
- return false;
+ return HL_ENG_BUSY(buf, size, "TPC%d_CFG", i);
}
if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
MME_QM_IDLE_MASK)
- return false;
+ return HL_ENG_BUSY(buf, size, "MME_QM");
if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
MME_CMDQ_IDLE_MASK)
- return false;
+ return HL_ENG_BUSY(buf, size, "MME_CMDQ");
if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
MME_ARCH_IDLE_MASK)
- return false;
+ return HL_ENG_BUSY(buf, size, "MME_ARCH");
if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
- return false;
+ return HL_ENG_BUSY(buf, size, "MME");
return true;
}
@@ -5211,52 +4752,11 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
size_t max_size)
{
struct goya_device *goya = hdev->asic_specific;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct armcp_packet pkt;
- void *eeprom_info_cpu_addr;
- dma_addr_t eeprom_info_dma_addr;
- long result;
- int rc;
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- eeprom_info_cpu_addr =
- hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- max_size, &eeprom_info_dma_addr);
- if (!eeprom_info_cpu_addr) {
- dev_err(hdev->dev,
- "Failed to allocate DMA memory for EEPROM info packet\n");
- return -ENOMEM;
- }
-
- memset(eeprom_info_cpu_addr, 0, max_size);
-
- memset(&pkt, 0, sizeof(pkt));
-
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.addr = cpu_to_le64(eeprom_info_dma_addr +
- prop->host_phys_base_address);
- pkt.data_max_size = cpu_to_le32(max_size);
-
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- GOYA_ARMCP_EEPROM_TIMEOUT, &result);
-
- if (rc) {
- dev_err(hdev->dev,
- "Failed to send armcp EEPROM pkt, error %d\n", rc);
- goto out;
- }
-
- /* result contains the actual size */
- memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
-
-out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
- eeprom_info_cpu_addr);
-
- return rc;
+ return hl_fw_get_eeprom_data(hdev, data, max_size);
}
static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
@@ -5279,12 +4779,12 @@ static const struct hl_asic_funcs goya_funcs = {
.cb_mmap = goya_cb_mmap,
.ring_doorbell = goya_ring_doorbell,
.flush_pq_write = goya_flush_pq_write,
- .dma_alloc_coherent = goya_dma_alloc_coherent,
- .dma_free_coherent = goya_dma_free_coherent,
+ .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
+ .asic_dma_free_coherent = goya_dma_free_coherent,
.get_int_queue_base = goya_get_int_queue_base,
.test_queues = goya_test_queues,
- .dma_pool_zalloc = goya_dma_pool_zalloc,
- .dma_pool_free = goya_dma_pool_free,
+ .asic_dma_pool_zalloc = goya_dma_pool_zalloc,
+ .asic_dma_pool_free = goya_dma_pool_free,
.cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
.cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
.hl_dma_unmap_sg = goya_dma_unmap_sg,
@@ -5306,8 +4806,7 @@ static const struct hl_asic_funcs goya_funcs = {
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
.send_heartbeat = goya_send_heartbeat,
- .enable_clock_gating = goya_init_clock_gating,
- .disable_clock_gating = goya_disable_clock_gating,
+ .debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
.soft_reset_late_init = goya_soft_reset_late_init,
.hw_queues_lock = goya_hw_queues_lock,
@@ -5315,7 +4814,12 @@ static const struct hl_asic_funcs goya_funcs = {
.get_pci_id = goya_get_pci_id,
.get_eeprom_data = goya_get_eeprom_data,
.send_cpu_message = goya_send_cpu_message,
- .get_hw_state = goya_get_hw_state
+ .get_hw_state = goya_get_hw_state,
+ .pci_bars_map = goya_pci_bars_map,
+ .set_dram_bar_base = goya_set_ddr_bar_base,
+ .init_iatu = goya_init_iatu,
+ .rreg = hl_rreg,
+ .wreg = hl_wreg
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 830551b6b062..14e216cb3668 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -39,9 +39,13 @@
#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
#endif
-#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
+#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
-#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */
+#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */
+
+#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
+
+#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
#define TPC_ENABLED_MASK 0xFF
@@ -49,19 +53,14 @@
#define MAX_POWER_DEFAULT 200000 /* 200W */
-#define GOYA_ARMCP_INFO_TIMEOUT 10000000 /* 10s */
-#define GOYA_ARMCP_EEPROM_TIMEOUT 10000000 /* 10s */
-
#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
-#define MMU_PAGE_TABLES_SIZE 0x0DE00000 /* 222MB */
+#define MMU_PAGE_TABLES_SIZE 0x0FC00000 /* 252MB */
#define MMU_DRAM_DEFAULT_PAGE_SIZE 0x00200000 /* 2MB */
#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */
-#define CPU_PQ_PKT_SIZE 0x00001000 /* 4KB */
-#define CPU_PQ_DATA_SIZE 0x01FFE000 /* 32MB - 8KB */
#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE)
@@ -69,13 +68,13 @@
MMU_PAGE_TABLES_SIZE)
#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \
MMU_DRAM_DEFAULT_PAGE_SIZE)
-#define CPU_PQ_PKT_ADDR (MMU_CACHE_MNG_ADDR + \
+#define DRAM_KMD_END_ADDR (MMU_CACHE_MNG_ADDR + \
MMU_CACHE_MNG_SIZE)
-#define CPU_PQ_DATA_ADDR (CPU_PQ_PKT_ADDR + CPU_PQ_PKT_SIZE)
-#define DRAM_BASE_ADDR_USER (CPU_PQ_DATA_ADDR + CPU_PQ_DATA_SIZE)
-#if (DRAM_BASE_ADDR_USER != 0x20000000)
-#error "KMD must reserve 512MB"
+#define DRAM_BASE_ADDR_USER 0x20000000
+
+#if (DRAM_KMD_END_ADDR > DRAM_BASE_ADDR_USER)
+#error "KMD must reserve no more than 512MB"
#endif
/*
@@ -142,22 +141,12 @@
#define HW_CAP_GOLDEN 0x00000400
#define HW_CAP_TPC 0x00000800
-#define CPU_PKT_SHIFT 5
-#define CPU_PKT_SIZE (1 << CPU_PKT_SHIFT)
-#define CPU_PKT_MASK (~((1 << CPU_PKT_SHIFT) - 1))
-#define CPU_MAX_PKTS_IN_CB 32
-#define CPU_CB_SIZE (CPU_PKT_SIZE * CPU_MAX_PKTS_IN_CB)
-#define CPU_ACCESSIBLE_MEM_SIZE (HL_QUEUE_LENGTH * CPU_CB_SIZE)
-
enum goya_fw_component {
FW_COMP_UBOOT,
FW_COMP_PREBOOT
};
struct goya_device {
- int (*test_cpu_queue)(struct hl_device *hdev);
- int (*armcp_info_get)(struct hl_device *hdev);
-
/* TODO: remove hw_queues_lock after moving to scheduler code */
spinlock_t hw_queues_lock;
@@ -170,13 +159,34 @@ struct goya_device {
u32 hw_cap_initialized;
};
+void goya_get_fixed_properties(struct hl_device *hdev);
+int goya_mmu_init(struct hl_device *hdev);
+void goya_init_dma_qmans(struct hl_device *hdev);
+void goya_init_mme_qmans(struct hl_device *hdev);
+void goya_init_tpc_qmans(struct hl_device *hdev);
+int goya_init_cpu_queues(struct hl_device *hdev);
+void goya_init_security(struct hl_device *hdev);
+int goya_late_init(struct hl_device *hdev);
+void goya_late_fini(struct hl_device *hdev);
+
+void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
+void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
+void goya_update_eq_ci(struct hl_device *hdev, u32 val);
+void goya_restore_phase_topology(struct hl_device *hdev);
+int goya_context_switch(struct hl_device *hdev, u32 asid);
+
int goya_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus,
u8 i2c_addr, u8 i2c_reg, u32 *val);
int goya_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus,
u8 i2c_addr, u8 i2c_reg, u32 val);
+void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state);
+
+int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
+int goya_test_queues(struct hl_device *hdev);
int goya_test_cpu_queue(struct hl_device *hdev);
int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
u32 timeout, long *result);
+
long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
long goya_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
@@ -184,28 +194,35 @@ long goya_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
long goya_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
void goya_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
long value);
-void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state);
+u64 goya_get_max_power(struct hl_device *hdev);
+void goya_set_max_power(struct hl_device *hdev, u64 value);
+
void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
void goya_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
-void goya_init_security(struct hl_device *hdev);
-u64 goya_get_max_power(struct hl_device *hdev);
-void goya_set_max_power(struct hl_device *hdev, u64 value);
+int goya_armcp_info_get(struct hl_device *hdev);
+int goya_debug_coresight(struct hl_device *hdev, void *data);
+
+void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
+int goya_mmu_clear_pgt_range(struct hl_device *hdev);
+int goya_mmu_set_dram_default_page(struct hl_device *hdev);
-int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
-void goya_late_fini(struct hl_device *hdev);
int goya_suspend(struct hl_device *hdev);
int goya_resume(struct hl_device *hdev);
-void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
+
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
void *goya_get_events_stat(struct hl_device *hdev, u32 *size);
+
void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
u32 cq_val, u32 msix_vec);
int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
- dma_addr_t *dma_handle, u16 *queue_len);
+ dma_addr_t *dma_handle, u16 *queue_len);
u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt);
-int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
int goya_send_heartbeat(struct hl_device *hdev);
+void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle);
+void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
+ void *vaddr);
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
new file mode 100644
index 000000000000..1ac951f52d1e
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "goyaP.h"
+#include "include/goya/goya_coresight.h"
+#include "include/goya/asic_reg/goya_regs.h"
+
+#include <uapi/misc/habanalabs.h>
+
+#include <linux/coresight.h>
+
+#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100)
+
+static u64 debug_stm_regs[GOYA_STM_LAST + 1] = {
+ [GOYA_STM_CPU] = mmCPU_STM_BASE,
+ [GOYA_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
+ [GOYA_STM_DMA_CH_1_CS] = mmDMA_CH_1_CS_STM_BASE,
+ [GOYA_STM_DMA_CH_2_CS] = mmDMA_CH_2_CS_STM_BASE,
+ [GOYA_STM_DMA_CH_3_CS] = mmDMA_CH_3_CS_STM_BASE,
+ [GOYA_STM_DMA_CH_4_CS] = mmDMA_CH_4_CS_STM_BASE,
+ [GOYA_STM_DMA_MACRO_CS] = mmDMA_MACRO_CS_STM_BASE,
+ [GOYA_STM_MME1_SBA] = mmMME1_SBA_STM_BASE,
+ [GOYA_STM_MME3_SBB] = mmMME3_SBB_STM_BASE,
+ [GOYA_STM_MME4_WACS2] = mmMME4_WACS2_STM_BASE,
+ [GOYA_STM_MME4_WACS] = mmMME4_WACS_STM_BASE,
+ [GOYA_STM_MMU_CS] = mmMMU_CS_STM_BASE,
+ [GOYA_STM_PCIE] = mmPCIE_STM_BASE,
+ [GOYA_STM_PSOC] = mmPSOC_STM_BASE,
+ [GOYA_STM_TPC0_EML] = mmTPC0_EML_STM_BASE,
+ [GOYA_STM_TPC1_EML] = mmTPC1_EML_STM_BASE,
+ [GOYA_STM_TPC2_EML] = mmTPC2_EML_STM_BASE,
+ [GOYA_STM_TPC3_EML] = mmTPC3_EML_STM_BASE,
+ [GOYA_STM_TPC4_EML] = mmTPC4_EML_STM_BASE,
+ [GOYA_STM_TPC5_EML] = mmTPC5_EML_STM_BASE,
+ [GOYA_STM_TPC6_EML] = mmTPC6_EML_STM_BASE,
+ [GOYA_STM_TPC7_EML] = mmTPC7_EML_STM_BASE
+};
+
+static u64 debug_etf_regs[GOYA_ETF_LAST + 1] = {
+ [GOYA_ETF_CPU_0] = mmCPU_ETF_0_BASE,
+ [GOYA_ETF_CPU_1] = mmCPU_ETF_1_BASE,
+ [GOYA_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE,
+ [GOYA_ETF_DMA_CH_0_CS] = mmDMA_CH_0_CS_ETF_BASE,
+ [GOYA_ETF_DMA_CH_1_CS] = mmDMA_CH_1_CS_ETF_BASE,
+ [GOYA_ETF_DMA_CH_2_CS] = mmDMA_CH_2_CS_ETF_BASE,
+ [GOYA_ETF_DMA_CH_3_CS] = mmDMA_CH_3_CS_ETF_BASE,
+ [GOYA_ETF_DMA_CH_4_CS] = mmDMA_CH_4_CS_ETF_BASE,
+ [GOYA_ETF_DMA_MACRO_CS] = mmDMA_MACRO_CS_ETF_BASE,
+ [GOYA_ETF_MME1_SBA] = mmMME1_SBA_ETF_BASE,
+ [GOYA_ETF_MME3_SBB] = mmMME3_SBB_ETF_BASE,
+ [GOYA_ETF_MME4_WACS2] = mmMME4_WACS2_ETF_BASE,
+ [GOYA_ETF_MME4_WACS] = mmMME4_WACS_ETF_BASE,
+ [GOYA_ETF_MMU_CS] = mmMMU_CS_ETF_BASE,
+ [GOYA_ETF_PCIE] = mmPCIE_ETF_BASE,
+ [GOYA_ETF_PSOC] = mmPSOC_ETF_BASE,
+ [GOYA_ETF_TPC0_EML] = mmTPC0_EML_ETF_BASE,
+ [GOYA_ETF_TPC1_EML] = mmTPC1_EML_ETF_BASE,
+ [GOYA_ETF_TPC2_EML] = mmTPC2_EML_ETF_BASE,
+ [GOYA_ETF_TPC3_EML] = mmTPC3_EML_ETF_BASE,
+ [GOYA_ETF_TPC4_EML] = mmTPC4_EML_ETF_BASE,
+ [GOYA_ETF_TPC5_EML] = mmTPC5_EML_ETF_BASE,
+ [GOYA_ETF_TPC6_EML] = mmTPC6_EML_ETF_BASE,
+ [GOYA_ETF_TPC7_EML] = mmTPC7_EML_ETF_BASE
+};
+
+static u64 debug_funnel_regs[GOYA_FUNNEL_LAST + 1] = {
+ [GOYA_FUNNEL_CPU] = mmCPU_FUNNEL_BASE,
+ [GOYA_FUNNEL_DMA_CH_6_1] = mmDMA_CH_FUNNEL_6_1_BASE,
+ [GOYA_FUNNEL_DMA_MACRO_3_1] = mmDMA_MACRO_FUNNEL_3_1_BASE,
+ [GOYA_FUNNEL_MME0_RTR] = mmMME0_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_MME1_RTR] = mmMME1_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_MME2_RTR] = mmMME2_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_MME3_RTR] = mmMME3_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_MME4_RTR] = mmMME4_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_MME5_RTR] = mmMME5_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_PCIE] = mmPCIE_FUNNEL_BASE,
+ [GOYA_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC0_EML] = mmTPC0_EML_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC1_EML] = mmTPC1_EML_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC1_RTR] = mmTPC1_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC2_EML] = mmTPC2_EML_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC2_RTR] = mmTPC2_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC3_EML] = mmTPC3_EML_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC3_RTR] = mmTPC3_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC4_EML] = mmTPC4_EML_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC4_RTR] = mmTPC4_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC5_EML] = mmTPC5_EML_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC5_RTR] = mmTPC5_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC6_EML] = mmTPC6_EML_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC6_RTR] = mmTPC6_RTR_FUNNEL_BASE,
+ [GOYA_FUNNEL_TPC7_EML] = mmTPC7_EML_FUNNEL_BASE
+};
+
+static u64 debug_bmon_regs[GOYA_BMON_LAST + 1] = {
+ [GOYA_BMON_CPU_RD] = mmCPU_RD_BMON_BASE,
+ [GOYA_BMON_CPU_WR] = mmCPU_WR_BMON_BASE,
+ [GOYA_BMON_DMA_CH_0_0] = mmDMA_CH_0_BMON_0_BASE,
+ [GOYA_BMON_DMA_CH_0_1] = mmDMA_CH_0_BMON_1_BASE,
+ [GOYA_BMON_DMA_CH_1_0] = mmDMA_CH_1_BMON_0_BASE,
+ [GOYA_BMON_DMA_CH_1_1] = mmDMA_CH_1_BMON_1_BASE,
+ [GOYA_BMON_DMA_CH_2_0] = mmDMA_CH_2_BMON_0_BASE,
+ [GOYA_BMON_DMA_CH_2_1] = mmDMA_CH_2_BMON_1_BASE,
+ [GOYA_BMON_DMA_CH_3_0] = mmDMA_CH_3_BMON_0_BASE,
+ [GOYA_BMON_DMA_CH_3_1] = mmDMA_CH_3_BMON_1_BASE,
+ [GOYA_BMON_DMA_CH_4_0] = mmDMA_CH_4_BMON_0_BASE,
+ [GOYA_BMON_DMA_CH_4_1] = mmDMA_CH_4_BMON_1_BASE,
+ [GOYA_BMON_DMA_MACRO_0] = mmDMA_MACRO_BMON_0_BASE,
+ [GOYA_BMON_DMA_MACRO_1] = mmDMA_MACRO_BMON_1_BASE,
+ [GOYA_BMON_DMA_MACRO_2] = mmDMA_MACRO_BMON_2_BASE,
+ [GOYA_BMON_DMA_MACRO_3] = mmDMA_MACRO_BMON_3_BASE,
+ [GOYA_BMON_DMA_MACRO_4] = mmDMA_MACRO_BMON_4_BASE,
+ [GOYA_BMON_DMA_MACRO_5] = mmDMA_MACRO_BMON_5_BASE,
+ [GOYA_BMON_DMA_MACRO_6] = mmDMA_MACRO_BMON_6_BASE,
+ [GOYA_BMON_DMA_MACRO_7] = mmDMA_MACRO_BMON_7_BASE,
+ [GOYA_BMON_MME1_SBA_0] = mmMME1_SBA_BMON0_BASE,
+ [GOYA_BMON_MME1_SBA_1] = mmMME1_SBA_BMON1_BASE,
+ [GOYA_BMON_MME3_SBB_0] = mmMME3_SBB_BMON0_BASE,
+ [GOYA_BMON_MME3_SBB_1] = mmMME3_SBB_BMON1_BASE,
+ [GOYA_BMON_MME4_WACS2_0] = mmMME4_WACS2_BMON0_BASE,
+ [GOYA_BMON_MME4_WACS2_1] = mmMME4_WACS2_BMON1_BASE,
+ [GOYA_BMON_MME4_WACS2_2] = mmMME4_WACS2_BMON2_BASE,
+ [GOYA_BMON_MME4_WACS_0] = mmMME4_WACS_BMON0_BASE,
+ [GOYA_BMON_MME4_WACS_1] = mmMME4_WACS_BMON1_BASE,
+ [GOYA_BMON_MME4_WACS_2] = mmMME4_WACS_BMON2_BASE,
+ [GOYA_BMON_MME4_WACS_3] = mmMME4_WACS_BMON3_BASE,
+ [GOYA_BMON_MME4_WACS_4] = mmMME4_WACS_BMON4_BASE,
+ [GOYA_BMON_MME4_WACS_5] = mmMME4_WACS_BMON5_BASE,
+ [GOYA_BMON_MME4_WACS_6] = mmMME4_WACS_BMON6_BASE,
+ [GOYA_BMON_MMU_0] = mmMMU_BMON_0_BASE,
+ [GOYA_BMON_MMU_1] = mmMMU_BMON_1_BASE,
+ [GOYA_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE,
+ [GOYA_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE,
+ [GOYA_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE,
+ [GOYA_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE,
+ [GOYA_BMON_TPC0_EML_0] = mmTPC0_EML_BUSMON_0_BASE,
+ [GOYA_BMON_TPC0_EML_1] = mmTPC0_EML_BUSMON_1_BASE,
+ [GOYA_BMON_TPC0_EML_2] = mmTPC0_EML_BUSMON_2_BASE,
+ [GOYA_BMON_TPC0_EML_3] = mmTPC0_EML_BUSMON_3_BASE,
+ [GOYA_BMON_TPC1_EML_0] = mmTPC1_EML_BUSMON_0_BASE,
+ [GOYA_BMON_TPC1_EML_1] = mmTPC1_EML_BUSMON_1_BASE,
+ [GOYA_BMON_TPC1_EML_2] = mmTPC1_EML_BUSMON_2_BASE,
+ [GOYA_BMON_TPC1_EML_3] = mmTPC1_EML_BUSMON_3_BASE,
+ [GOYA_BMON_TPC2_EML_0] = mmTPC2_EML_BUSMON_0_BASE,
+ [GOYA_BMON_TPC2_EML_1] = mmTPC2_EML_BUSMON_1_BASE,
+ [GOYA_BMON_TPC2_EML_2] = mmTPC2_EML_BUSMON_2_BASE,
+ [GOYA_BMON_TPC2_EML_3] = mmTPC2_EML_BUSMON_3_BASE,
+ [GOYA_BMON_TPC3_EML_0] = mmTPC3_EML_BUSMON_0_BASE,
+ [GOYA_BMON_TPC3_EML_1] = mmTPC3_EML_BUSMON_1_BASE,
+ [GOYA_BMON_TPC3_EML_2] = mmTPC3_EML_BUSMON_2_BASE,
+ [GOYA_BMON_TPC3_EML_3] = mmTPC3_EML_BUSMON_3_BASE,
+ [GOYA_BMON_TPC4_EML_0] = mmTPC4_EML_BUSMON_0_BASE,
+ [GOYA_BMON_TPC4_EML_1] = mmTPC4_EML_BUSMON_1_BASE,
+ [GOYA_BMON_TPC4_EML_2] = mmTPC4_EML_BUSMON_2_BASE,
+ [GOYA_BMON_TPC4_EML_3] = mmTPC4_EML_BUSMON_3_BASE,
+ [GOYA_BMON_TPC5_EML_0] = mmTPC5_EML_BUSMON_0_BASE,
+ [GOYA_BMON_TPC5_EML_1] = mmTPC5_EML_BUSMON_1_BASE,
+ [GOYA_BMON_TPC5_EML_2] = mmTPC5_EML_BUSMON_2_BASE,
+ [GOYA_BMON_TPC5_EML_3] = mmTPC5_EML_BUSMON_3_BASE,
+ [GOYA_BMON_TPC6_EML_0] = mmTPC6_EML_BUSMON_0_BASE,
+ [GOYA_BMON_TPC6_EML_1] = mmTPC6_EML_BUSMON_1_BASE,
+ [GOYA_BMON_TPC6_EML_2] = mmTPC6_EML_BUSMON_2_BASE,
+ [GOYA_BMON_TPC6_EML_3] = mmTPC6_EML_BUSMON_3_BASE,
+ [GOYA_BMON_TPC7_EML_0] = mmTPC7_EML_BUSMON_0_BASE,
+ [GOYA_BMON_TPC7_EML_1] = mmTPC7_EML_BUSMON_1_BASE,
+ [GOYA_BMON_TPC7_EML_2] = mmTPC7_EML_BUSMON_2_BASE,
+ [GOYA_BMON_TPC7_EML_3] = mmTPC7_EML_BUSMON_3_BASE
+};
+
+static u64 debug_spmu_regs[GOYA_SPMU_LAST + 1] = {
+ [GOYA_SPMU_DMA_CH_0_CS] = mmDMA_CH_0_CS_SPMU_BASE,
+ [GOYA_SPMU_DMA_CH_1_CS] = mmDMA_CH_1_CS_SPMU_BASE,
+ [GOYA_SPMU_DMA_CH_2_CS] = mmDMA_CH_2_CS_SPMU_BASE,
+ [GOYA_SPMU_DMA_CH_3_CS] = mmDMA_CH_3_CS_SPMU_BASE,
+ [GOYA_SPMU_DMA_CH_4_CS] = mmDMA_CH_4_CS_SPMU_BASE,
+ [GOYA_SPMU_DMA_MACRO_CS] = mmDMA_MACRO_CS_SPMU_BASE,
+ [GOYA_SPMU_MME1_SBA] = mmMME1_SBA_SPMU_BASE,
+ [GOYA_SPMU_MME3_SBB] = mmMME3_SBB_SPMU_BASE,
+ [GOYA_SPMU_MME4_WACS2] = mmMME4_WACS2_SPMU_BASE,
+ [GOYA_SPMU_MME4_WACS] = mmMME4_WACS_SPMU_BASE,
+ [GOYA_SPMU_MMU_CS] = mmMMU_CS_SPMU_BASE,
+ [GOYA_SPMU_PCIE] = mmPCIE_SPMU_BASE,
+ [GOYA_SPMU_TPC0_EML] = mmTPC0_EML_SPMU_BASE,
+ [GOYA_SPMU_TPC1_EML] = mmTPC1_EML_SPMU_BASE,
+ [GOYA_SPMU_TPC2_EML] = mmTPC2_EML_SPMU_BASE,
+ [GOYA_SPMU_TPC3_EML] = mmTPC3_EML_SPMU_BASE,
+ [GOYA_SPMU_TPC4_EML] = mmTPC4_EML_SPMU_BASE,
+ [GOYA_SPMU_TPC5_EML] = mmTPC5_EML_SPMU_BASE,
+ [GOYA_SPMU_TPC6_EML] = mmTPC6_EML_SPMU_BASE,
+ [GOYA_SPMU_TPC7_EML] = mmTPC7_EML_SPMU_BASE
+};
+
+static int goya_coresight_timeout(struct hl_device *hdev, u64 addr,
+ int position, bool up)
+{
+ int rc;
+ u32 val, timeout_usec;
+
+ if (hdev->pldm)
+ timeout_usec = GOYA_PLDM_CORESIGHT_TIMEOUT_USEC;
+ else
+ timeout_usec = CORESIGHT_TIMEOUT_USEC;
+
+ rc = hl_poll_timeout(
+ hdev,
+ addr,
+ val,
+ up ? val & BIT(position) : !(val & BIT(position)),
+ 1000,
+ timeout_usec);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
+ addr, position, up);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int goya_config_stm(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_stm *input;
+ u64 base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
+ int rc;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + 0xE80, 0x80004);
+ WREG32(base_reg + 0xD64, 7);
+ WREG32(base_reg + 0xD60, 0);
+ WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask));
+ WREG32(base_reg + 0xD20, lower_32_bits(input->sp_mask));
+ WREG32(base_reg + 0xD60, 1);
+ WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask));
+ WREG32(base_reg + 0xD20, upper_32_bits(input->sp_mask));
+ WREG32(base_reg + 0xE70, 0x10);
+ WREG32(base_reg + 0xE60, 0);
+ WREG32(base_reg + 0xE64, 0x420000);
+ WREG32(base_reg + 0xE00, 0xFFFFFFFF);
+ WREG32(base_reg + 0xE20, 0xFFFFFFFF);
+ WREG32(base_reg + 0xEF4, input->id);
+ WREG32(base_reg + 0xDF4, 0x80);
+ WREG32(base_reg + 0xE8C, input->frequency);
+ WREG32(base_reg + 0xE90, 0x7FF);
+ WREG32(base_reg + 0xE80, 0x7 | (input->id << 16));
+ } else {
+ WREG32(base_reg + 0xE80, 4);
+ WREG32(base_reg + 0xD64, 0);
+ WREG32(base_reg + 0xD60, 1);
+ WREG32(base_reg + 0xD00, 0);
+ WREG32(base_reg + 0xD20, 0);
+ WREG32(base_reg + 0xD60, 0);
+ WREG32(base_reg + 0xE20, 0);
+ WREG32(base_reg + 0xE00, 0);
+ WREG32(base_reg + 0xDF4, 0x80);
+ WREG32(base_reg + 0xE70, 0);
+ WREG32(base_reg + 0xE60, 0);
+ WREG32(base_reg + 0xE64, 0);
+ WREG32(base_reg + 0xE8C, 0);
+
+ rc = goya_coresight_timeout(hdev, base_reg + 0xE80, 23, false);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to disable STM on timeout, error %d\n",
+ rc);
+ return rc;
+ }
+
+ WREG32(base_reg + 0xE80, 4);
+ }
+
+ return 0;
+}
+
+static int goya_config_etf(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_etf *input;
+ u64 base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
+ u32 val;
+ int rc;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+ val = RREG32(base_reg + 0x304);
+ val |= 0x1000;
+ WREG32(base_reg + 0x304, val);
+ val |= 0x40;
+ WREG32(base_reg + 0x304, val);
+
+ rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to %s ETF on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to %s ETF on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ WREG32(base_reg + 0x20, 0);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + 0x34, 0x3FFC);
+ WREG32(base_reg + 0x28, input->sink_mode);
+ WREG32(base_reg + 0x304, 0x4001);
+ WREG32(base_reg + 0x308, 0xA);
+ WREG32(base_reg + 0x20, 1);
+ } else {
+ WREG32(base_reg + 0x34, 0);
+ WREG32(base_reg + 0x28, 0);
+ WREG32(base_reg + 0x304, 0);
+ }
+
+ return 0;
+}
+
+static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
+ u32 size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 range_start, range_end;
+
+ if (hdev->mmu_enable) {
+ range_start = prop->va_space_dram_start_address;
+ range_end = prop->va_space_dram_end_address;
+ } else {
+ range_start = prop->dram_user_base_address;
+ range_end = prop->dram_end_address;
+ }
+
+ return hl_mem_area_inside_range(addr, size, range_start, range_end);
+}
+
+static int goya_config_etr(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_etr *input;
+ u64 base_reg = mmPSOC_ETR_BASE - CFG_BASE;
+ u32 val;
+ int rc;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+ val = RREG32(base_reg + 0x304);
+ val |= 0x1000;
+ WREG32(base_reg + 0x304, val);
+ val |= 0x40;
+ WREG32(base_reg + 0x304, val);
+
+ rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ WREG32(base_reg + 0x20, 0);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ if (input->buffer_size == 0) {
+ dev_err(hdev->dev,
+ "ETR buffer size should be bigger than 0\n");
+ return -EINVAL;
+ }
+
+ if (!goya_etr_validate_address(hdev,
+ input->buffer_address, input->buffer_size)) {
+ dev_err(hdev->dev, "buffer address is not valid\n");
+ return -EINVAL;
+ }
+
+ WREG32(base_reg + 0x34, 0x3FFC);
+ WREG32(base_reg + 0x4, input->buffer_size);
+ WREG32(base_reg + 0x28, input->sink_mode);
+ WREG32(base_reg + 0x110, 0x700);
+ WREG32(base_reg + 0x118,
+ lower_32_bits(input->buffer_address));
+ WREG32(base_reg + 0x11C,
+ upper_32_bits(input->buffer_address));
+ WREG32(base_reg + 0x304, 3);
+ WREG32(base_reg + 0x308, 0xA);
+ WREG32(base_reg + 0x20, 1);
+ } else {
+ WREG32(base_reg + 0x34, 0);
+ WREG32(base_reg + 0x4, 0x400);
+ WREG32(base_reg + 0x118, 0);
+ WREG32(base_reg + 0x11C, 0);
+ WREG32(base_reg + 0x308, 0);
+ WREG32(base_reg + 0x28, 0);
+ WREG32(base_reg + 0x304, 0);
+
+ if (params->output_size >= sizeof(u32))
+ *(u32 *) params->output = RREG32(base_reg + 0x18);
+ }
+
+ return 0;
+}
+
+static int goya_config_funnel(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE + 0xFB0,
+ CORESIGHT_UNLOCK);
+
+ WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE,
+ params->enable ? 0x33F : 0);
+
+ return 0;
+}
+
+static int goya_config_bmon(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_bmon *input;
+ u64 base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
+ u32 pcie_base = 0;
+
+ WREG32(base_reg + 0x104, 1);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + 0x200, lower_32_bits(input->start_addr0));
+ WREG32(base_reg + 0x204, upper_32_bits(input->start_addr0));
+ WREG32(base_reg + 0x208, lower_32_bits(input->addr_mask0));
+ WREG32(base_reg + 0x20C, upper_32_bits(input->addr_mask0));
+ WREG32(base_reg + 0x240, lower_32_bits(input->start_addr1));
+ WREG32(base_reg + 0x244, upper_32_bits(input->start_addr1));
+ WREG32(base_reg + 0x248, lower_32_bits(input->addr_mask1));
+ WREG32(base_reg + 0x24C, upper_32_bits(input->addr_mask1));
+ WREG32(base_reg + 0x224, 0);
+ WREG32(base_reg + 0x234, 0);
+ WREG32(base_reg + 0x30C, input->bw_win);
+ WREG32(base_reg + 0x308, input->win_capture);
+
+ /* PCIE IF BMON bug WA */
+ if (params->reg_idx != GOYA_BMON_PCIE_MSTR_RD &&
+ params->reg_idx != GOYA_BMON_PCIE_MSTR_WR &&
+ params->reg_idx != GOYA_BMON_PCIE_SLV_RD &&
+ params->reg_idx != GOYA_BMON_PCIE_SLV_WR)
+ pcie_base = 0xA000000;
+
+ WREG32(base_reg + 0x700, pcie_base | 0xB00 | (input->id << 12));
+ WREG32(base_reg + 0x708, pcie_base | 0xA00 | (input->id << 12));
+ WREG32(base_reg + 0x70C, pcie_base | 0xC00 | (input->id << 12));
+
+ WREG32(base_reg + 0x100, 0x11);
+ WREG32(base_reg + 0x304, 0x1);
+ } else {
+ WREG32(base_reg + 0x200, 0);
+ WREG32(base_reg + 0x204, 0);
+ WREG32(base_reg + 0x208, 0xFFFFFFFF);
+ WREG32(base_reg + 0x20C, 0xFFFFFFFF);
+ WREG32(base_reg + 0x240, 0);
+ WREG32(base_reg + 0x244, 0);
+ WREG32(base_reg + 0x248, 0xFFFFFFFF);
+ WREG32(base_reg + 0x24C, 0xFFFFFFFF);
+ WREG32(base_reg + 0x224, 0xFFFFFFFF);
+ WREG32(base_reg + 0x234, 0x1070F);
+ WREG32(base_reg + 0x30C, 0);
+ WREG32(base_reg + 0x308, 0xFFFF);
+ WREG32(base_reg + 0x700, 0xA000B00);
+ WREG32(base_reg + 0x708, 0xA000A00);
+ WREG32(base_reg + 0x70C, 0xA000C00);
+ WREG32(base_reg + 0x100, 1);
+ WREG32(base_reg + 0x304, 0);
+ WREG32(base_reg + 0x104, 0);
+ }
+
+ return 0;
+}
+
+static int goya_config_spmu(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ u64 base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
+ struct hl_debug_params_spmu *input = params->input;
+ u64 *output;
+ u32 output_arr_len;
+ u32 events_num;
+ u32 overflow_idx;
+ u32 cycle_cnt_idx;
+ int i;
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ if (input->event_types_num < 3) {
+ dev_err(hdev->dev,
+ "not enough values for SPMU enable\n");
+ return -EINVAL;
+ }
+
+ WREG32(base_reg + 0xE04, 0x41013046);
+ WREG32(base_reg + 0xE04, 0x41013040);
+
+ for (i = 0 ; i < input->event_types_num ; i++)
+ WREG32(base_reg + 0x400 + i * 4, input->event_types[i]);
+
+ WREG32(base_reg + 0xE04, 0x41013041);
+ WREG32(base_reg + 0xC00, 0x8000003F);
+ } else {
+ output = params->output;
+ output_arr_len = params->output_size / 8;
+ events_num = output_arr_len - 2;
+ overflow_idx = output_arr_len - 2;
+ cycle_cnt_idx = output_arr_len - 1;
+
+ if (!output)
+ return -EINVAL;
+
+ if (output_arr_len < 3) {
+ dev_err(hdev->dev,
+ "not enough values for SPMU disable\n");
+ return -EINVAL;
+ }
+
+ WREG32(base_reg + 0xE04, 0x41013040);
+
+ for (i = 0 ; i < events_num ; i++)
+ output[i] = RREG32(base_reg + i * 8);
+
+ output[overflow_idx] = RREG32(base_reg + 0xCC0);
+
+ output[cycle_cnt_idx] = RREG32(base_reg + 0xFC);
+ output[cycle_cnt_idx] <<= 32;
+ output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8);
+
+ WREG32(base_reg + 0xCC0, 0);
+ }
+
+ return 0;
+}
+
+static int goya_config_timestamp(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+ if (params->enable) {
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
+ }
+
+ return 0;
+}
+
+int goya_debug_coresight(struct hl_device *hdev, void *data)
+{
+ struct hl_debug_params *params = data;
+ u32 val;
+ int rc;
+
+ switch (params->op) {
+ case HL_DEBUG_OP_STM:
+ rc = goya_config_stm(hdev, params);
+ break;
+ case HL_DEBUG_OP_ETF:
+ rc = goya_config_etf(hdev, params);
+ break;
+ case HL_DEBUG_OP_ETR:
+ rc = goya_config_etr(hdev, params);
+ break;
+ case HL_DEBUG_OP_FUNNEL:
+ rc = goya_config_funnel(hdev, params);
+ break;
+ case HL_DEBUG_OP_BMON:
+ rc = goya_config_bmon(hdev, params);
+ break;
+ case HL_DEBUG_OP_SPMU:
+ rc = goya_config_spmu(hdev, params);
+ break;
+ case HL_DEBUG_OP_TIMESTAMP:
+ rc = goya_config_timestamp(hdev, params);
+ break;
+
+ default:
+ dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
+ return -EINVAL;
+ }
+
+ /* Perform read from the device to flush all configuration */
+ val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index 575003238401..d95d1b2f860d 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -6,6 +6,7 @@
*/
#include "goyaP.h"
+#include "include/goya/asic_reg/goya_regs.h"
/*
* goya_set_block_as_protected - set the given block as protected
@@ -2159,6 +2160,8 @@ static void goya_init_protection_bits(struct hl_device *hdev)
* Bits 7-11 represents the word offset inside the 128 bytes.
* Bits 2-6 represents the bit location inside the word.
*/
+ u32 pb_addr, mask;
+ u8 word_offset;
goya_pb_set_block(hdev, mmPCI_NRTR_BASE);
goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE);
@@ -2237,6 +2240,14 @@ static void goya_init_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmPCIE_AUX_BASE);
goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE);
goya_pb_set_block(hdev, mmPCIE_PHY_BASE);
+ goya_pb_set_block(hdev, mmTPC0_NRTR_BASE);
+ goya_pb_set_block(hdev, mmTPC_PLL_BASE);
+
+ pb_addr = (mmTPC_PLL_CLK_RLX_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC_PLL_CLK_RLX_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC_PLL_CLK_RLX_0 & 0x7C) >> 2);
+
+ WREG32(pb_addr + word_offset, mask);
goya_init_mme_protection_bits(hdev);
@@ -2294,8 +2305,8 @@ void goya_init_security(struct hl_device *hdev)
u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
- u32 lbw_rng11_base = 0xFCE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
- u32 lbw_rng11_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng11_base = 0xFCE02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng11_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a8ee52c880cd..71243b319920 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -11,8 +11,6 @@
#include "include/armcp_if.h"
#include "include/qman_if.h"
-#define pr_fmt(fmt) "habanalabs: " fmt
-
#include <linux/cdev.h>
#include <linux/iopoll.h>
#include <linux/irqreturn.h>
@@ -33,6 +31,9 @@
#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
+#define HL_ARMCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_ARMCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
+
#define HL_MAX_QUEUES 128
#define HL_MAX_JOBS_PER_CS 64
@@ -48,8 +49,9 @@
/**
* struct pgt_info - MMU hop page info.
- * @node: hash linked-list node for the pgts hash of pgts.
- * @addr: physical address of the pgt.
+ * @node: hash linked-list node for the pgts shadow hash of pgts.
+ * @phys_addr: physical address of the pgt.
+ * @shadow_addr: shadow hop in the host.
* @ctx: pointer to the owner ctx.
* @num_of_ptes: indicates how many ptes are used in the pgt.
*
@@ -59,10 +61,11 @@
* page, it is freed with its pgt_info structure.
*/
struct pgt_info {
- struct hlist_node node;
- u64 addr;
- struct hl_ctx *ctx;
- int num_of_ptes;
+ struct hlist_node node;
+ u64 phys_addr;
+ u64 shadow_addr;
+ struct hl_ctx *ctx;
+ int num_of_ptes;
};
struct hl_device;
@@ -132,8 +135,6 @@ enum hl_device_hw_state {
* @dram_user_base_address: DRAM physical start address for user access.
* @dram_size: DRAM total size.
* @dram_pci_bar_size: size of PCI bar towards DRAM.
- * @host_phys_base_address: base physical address of host memory for
- * transactions that the device generates.
* @max_power_default: max power of the device after reset
* @va_space_host_start_address: base address of virtual memory range for
* mapping host memory.
@@ -145,6 +146,8 @@ enum hl_device_hw_state {
* mapping DRAM memory.
* @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
* fault.
+ * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
+ * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
* @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
* @mmu_dram_default_page_addr: DRAM default page physical address.
* @mmu_pgt_size: MMU page tables total size.
@@ -179,13 +182,14 @@ struct asic_fixed_properties {
u64 dram_user_base_address;
u64 dram_size;
u64 dram_pci_bar_size;
- u64 host_phys_base_address;
u64 max_power_default;
u64 va_space_host_start_address;
u64 va_space_host_end_address;
u64 va_space_dram_start_address;
u64 va_space_dram_end_address;
u64 dram_size_for_default_page_mapping;
+ u64 pcie_dbi_base_address;
+ u64 pcie_aux_dbi_reg_addr;
u64 mmu_pgt_addr;
u64 mmu_dram_default_page_addr;
u32 mmu_pgt_size;
@@ -314,6 +318,18 @@ struct hl_cs_job;
#define HL_EQ_LENGTH 64
#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
+#define HL_CPU_PKT_SHIFT 5
+#define HL_CPU_PKT_SIZE (1 << HL_CPU_PKT_SHIFT)
+#define HL_CPU_PKT_MASK (~((1 << HL_CPU_PKT_SHIFT) - 1))
+#define HL_CPU_MAX_PKTS_IN_CB 32
+#define HL_CPU_CB_SIZE (HL_CPU_PKT_SIZE * \
+ HL_CPU_MAX_PKTS_IN_CB)
+#define HL_CPU_CB_QUEUE_SIZE (HL_QUEUE_LENGTH * HL_CPU_CB_SIZE)
+
+/* KMD <-> ArmCP shared memory size (EQ + PQ + CPU CB queue) */
+#define HL_CPU_ACCESSIBLE_MEM_SIZE (HL_EQ_SIZE_IN_BYTES + \
+ HL_QUEUE_SIZE_IN_BYTES + \
+ HL_CPU_CB_QUEUE_SIZE)
/**
* struct hl_hw_queue - describes a H/W transport queue.
@@ -381,14 +397,12 @@ struct hl_eq {
/**
* enum hl_asic_type - supported ASIC types.
- * @ASIC_AUTO_DETECT: ASIC type will be automatically set.
- * @ASIC_GOYA: Goya device.
* @ASIC_INVALID: Invalid ASIC type.
+ * @ASIC_GOYA: Goya device.
*/
enum hl_asic_type {
- ASIC_AUTO_DETECT,
- ASIC_GOYA,
- ASIC_INVALID
+ ASIC_INVALID,
+ ASIC_GOYA
};
struct hl_cs_parser;
@@ -436,19 +450,19 @@ enum hl_pll_frequency {
* @cb_mmap: maps a CB.
* @ring_doorbell: increment PI on a given QMAN.
* @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
- * @dma_alloc_coherent: Allocate coherent DMA memory by calling
- * dma_alloc_coherent(). This is ASIC function because its
- * implementation is not trivial when the driver is loaded
- * in simulation mode (not upstreamed).
- * @dma_free_coherent: Free coherent DMA memory by calling dma_free_coherent().
- * This is ASIC function because its implementation is not
- * trivial when the driver is loaded in simulation mode
- * (not upstreamed).
+ * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
+ * dma_alloc_coherent(). This is ASIC function because
+ * its implementation is not trivial when the driver
+ * is loaded in simulation mode (not upstreamed).
+ * @asic_dma_free_coherent: Free coherent DMA memory by calling
+ * dma_free_coherent(). This is ASIC function because
+ * its implementation is not trivial when the driver
+ * is loaded in simulation mode (not upstreamed).
* @get_int_queue_base: get the internal queue base address.
* @test_queues: run simple test on all queues for sanity check.
- * @dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
- * size of allocation is HL_DMA_POOL_BLK_SIZE.
- * @dma_pool_free: free small DMA allocation from pool.
+ * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
+ * size of allocation is HL_DMA_POOL_BLK_SIZE.
+ * @asic_dma_pool_free: free small DMA allocation from pool.
* @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
* @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
* @hl_dma_unmap_sg: DMA unmap scatter-gather list.
@@ -472,8 +486,7 @@ enum hl_pll_frequency {
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
- * @enable_clock_gating: enable clock gating for reducing power consumption.
- * @disable_clock_gating: disable clock for accessing registers on HBW.
+ * @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
* @soft_reset_late_init: perform certain actions needed after soft reset.
* @hw_queues_lock: acquire H/W queues lock.
@@ -482,6 +495,12 @@ enum hl_pll_frequency {
* @get_eeprom_data: retrieve EEPROM data from F/W.
* @send_cpu_message: send buffer to ArmCP.
* @get_hw_state: retrieve the H/W state
+ * @pci_bars_map: Map PCI BARs.
+ * @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
+ * old address the bar pointed to or U64_MAX for failure
+ * @init_iatu: Initialize the iATU unit inside the PCI controller.
+ * @rreg: Read a register. Needed for simulator support.
+ * @wreg: Write a register. Needed for simulator support.
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -499,27 +518,27 @@ struct hl_asic_funcs {
u64 kaddress, phys_addr_t paddress, u32 size);
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
- void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size,
+ void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
- void (*dma_free_coherent)(struct hl_device *hdev, size_t size,
+ void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle);
void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len);
int (*test_queues)(struct hl_device *hdev);
- void* (*dma_pool_zalloc)(struct hl_device *hdev, size_t size,
+ void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size,
gfp_t mem_flags, dma_addr_t *dma_handle);
- void (*dma_pool_free)(struct hl_device *hdev, void *vaddr,
+ void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
dma_addr_t dma_addr);
void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
size_t size, dma_addr_t *dma_handle);
void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
size_t size, void *vaddr);
void (*hl_dma_unmap_sg)(struct hl_device *hdev,
- struct scatterlist *sg, int nents,
+ struct scatterlist *sgl, int nents,
enum dma_data_direction dir);
int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
int (*asic_dma_map_sg)(struct hl_device *hdev,
- struct scatterlist *sg, int nents,
+ struct scatterlist *sgl, int nents,
enum dma_data_direction dir);
u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
struct sg_table *sgt);
@@ -543,9 +562,8 @@ struct hl_asic_funcs {
void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
- void (*enable_clock_gating)(struct hl_device *hdev);
- void (*disable_clock_gating)(struct hl_device *hdev);
- bool (*is_device_idle)(struct hl_device *hdev);
+ int (*debug_coresight)(struct hl_device *hdev, void *data);
+ bool (*is_device_idle)(struct hl_device *hdev, char *buf, size_t size);
int (*soft_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
@@ -555,6 +573,11 @@ struct hl_asic_funcs {
int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
u16 len, u32 timeout, long *result);
enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
+ int (*pci_bars_map)(struct hl_device *hdev);
+ u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
+ int (*init_iatu)(struct hl_device *hdev);
+ u32 (*rreg)(struct hl_device *hdev, u32 reg);
+ void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
};
@@ -582,7 +605,8 @@ struct hl_va_range {
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
- * @mmu_hash: holds a mapping from virtual address to pgt_info structure.
+ * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
+ * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
* @hpriv: pointer to the private (KMD) data of the process (fd).
* @hdev: pointer to the device structure.
* @refcount: reference counter for the context. Context is released only when
@@ -601,17 +625,19 @@ struct hl_va_range {
* DRAM mapping.
* @cs_lock: spinlock to protect cs_sequence.
* @dram_phys_mem: amount of used physical DRAM memory by this context.
- * @thread_restore_token: token to prevent multiple threads of the same context
- * from running the restore phase. Only one thread
- * should run it.
- * @thread_restore_wait_token: token to prevent the threads that didn't run
- * the restore phase from moving to their execution
- * phase before the restore phase has finished.
+ * @thread_ctx_switch_token: token to prevent multiple threads of the same
+ * context from running the context switch phase.
+ * Only a single thread should run it.
+ * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
+ * the context switch phase from moving to their
+ * execution phase before the context switch phase
+ * has finished.
* @asid: context's unique address space ID in the device's MMU.
*/
struct hl_ctx {
DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
- DECLARE_HASHTABLE(mmu_hash, MMU_HASH_TABLE_BITS);
+ DECLARE_HASHTABLE(mmu_phys_hash, MMU_HASH_TABLE_BITS);
+ DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
struct hl_fpriv *hpriv;
struct hl_device *hdev;
struct kref refcount;
@@ -625,8 +651,8 @@ struct hl_ctx {
u64 *dram_default_hops;
spinlock_t cs_lock;
atomic64_t dram_phys_mem;
- atomic_t thread_restore_token;
- u32 thread_restore_wait_token;
+ atomic_t thread_ctx_switch_token;
+ u32 thread_ctx_switch_wait_token;
u32 asid;
};
@@ -753,8 +779,6 @@ struct hl_cs_job {
* @patched_cb_size: the size of the CB after parsing.
* @ext_queue: whether the job is for external queue or internal queue.
* @job_id: the id of the related job inside the related CS.
- * @use_virt_addr: whether to treat the addresses in the CB as virtual during
- * parsing.
*/
struct hl_cs_parser {
struct hl_cb *user_cb;
@@ -767,7 +791,6 @@ struct hl_cs_parser {
u32 patched_cb_size;
u8 ext_queue;
u8 job_id;
- u8 use_virt_addr;
};
@@ -850,6 +873,29 @@ struct hl_vm {
u8 init_done;
};
+
+/*
+ * DEBUG, PROFILING STRUCTURE
+ */
+
+/**
+ * struct hl_debug_params - Coresight debug parameters.
+ * @input: pointer to component specific input parameters.
+ * @output: pointer to component specific output parameters.
+ * @output_size: size of output buffer.
+ * @reg_idx: relevant register ID.
+ * @op: component operation to execute.
+ * @enable: true if to enable component debugging, false otherwise.
+ */
+struct hl_debug_params {
+ void *input;
+ void *output;
+ u32 output_size;
+ u32 reg_idx;
+ u32 op;
+ bool enable;
+};
+
/*
* FILE PRIVATE STRUCTURE
*/
@@ -973,13 +1019,10 @@ struct hl_dbg_device_entry {
u32 hl_rreg(struct hl_device *hdev, u32 reg);
void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
-#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
- readl_poll_timeout(hdev->rmmio + addr, val, cond, sleep_us, timeout_us)
-
-#define RREG32(reg) hl_rreg(hdev, (reg))
-#define WREG32(reg, v) hl_wreg(hdev, (reg), (v))
+#define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
+#define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \
- hl_rreg(hdev, (reg)))
+ hdev->asic_funcs->rreg(hdev, (reg)))
#define WREG32_P(reg, val, mask) \
do { \
@@ -997,6 +1040,36 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
(val) << REG_FIELD_SHIFT(reg, field))
+#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t __timeout; \
+ /* timeout should be longer when working with simulator */ \
+ if (hdev->pdev) \
+ __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ else \
+ __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
+ might_sleep_if(sleep_us); \
+ for (;;) { \
+ (val) = RREG32(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
+ (val) = RREG32(addr); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+
+#define HL_ENG_BUSY(buf, size, fmt, ...) ({ \
+ if (buf) \
+ snprintf(buf, size, fmt, ##__VA_ARGS__); \
+ false; \
+ })
+
struct hwmon_chip_info;
/**
@@ -1047,7 +1120,8 @@ struct hl_device_reset_work {
* @asic_specific: ASIC specific information to use only from ASIC files.
* @mmu_pgt_pool: pool of available MMU hops.
* @vm: virtual memory manager for MMU.
- * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context
+ * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
+ * @mmu_shadow_hop0: shadow mapping of the MMU hop 0 zone.
* @hwmon_dev: H/W monitor device.
* @pm_mng_profile: current power management profile.
* @hl_chip_info: ASIC's sensors information.
@@ -1082,6 +1156,7 @@ struct hl_device_reset_work {
* @init_done: is the initialization of the device done.
* @mmu_enable: is MMU enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
+ * @dma_mask: the dma mask that was set for this device
*/
struct hl_device {
struct pci_dev *pdev;
@@ -1117,6 +1192,7 @@ struct hl_device {
struct gen_pool *mmu_pgt_pool;
struct hl_vm vm;
struct mutex mmu_cache_lock;
+ void *mmu_shadow_hop0;
struct device *hwmon_dev;
enum hl_pm_mng_profile pm_mng_profile;
struct hwmon_chip_info *hl_chip_info;
@@ -1151,6 +1227,7 @@ struct hl_device {
u8 dram_default_page_mapping;
u8 init_done;
u8 device_cpu_disabled;
+ u8 dma_mask;
/* Parameters for bring-up */
u8 mmu_enable;
@@ -1245,6 +1322,7 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
int hl_device_open(struct inode *inode, struct file *filp);
bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
+enum hl_device_status hl_device_status(struct hl_device *hdev);
int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
enum hl_asic_type asic_type, int minor);
void destroy_hdev(struct hl_device *hdev);
@@ -1351,6 +1429,32 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
+int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+ void __iomem *dst);
+int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
+int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
+ u16 len, u32 timeout, long *result);
+int hl_fw_test_cpu_queue(struct hl_device *hdev);
+void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle);
+void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
+ void *vaddr);
+int hl_fw_send_heartbeat(struct hl_device *hdev);
+int hl_fw_armcp_info_get(struct hl_device *hdev);
+int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
+
+int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
+ bool is_wc[3]);
+int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
+int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
+ u64 addr);
+int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
+ u64 dram_base_address, u64 host_phys_base_address,
+ u64 host_phys_size);
+int hl_pci_init(struct hl_device *hdev, u8 dma_mask);
+void hl_pci_fini(struct hl_device *hdev);
+int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
+
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index 748601463f11..5f4d155be767 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -6,6 +6,8 @@
*
*/
+#define pr_fmt(fmt) "habanalabs: " fmt
+
#include "habanalabs.h"
#include <linux/pci.h>
@@ -218,7 +220,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->disabled = true;
hdev->pdev = pdev; /* can be NULL in case of simulator device */
- if (asic_type == ASIC_AUTO_DETECT) {
+ if (pdev) {
hdev->asic_type = get_asic_type(pdev->device);
if (hdev->asic_type == ASIC_INVALID) {
dev_err(&pdev->dev, "Unsupported ASIC\n");
@@ -229,6 +231,9 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->asic_type = asic_type;
}
+ /* Set default DMA mask to 32 bits */
+ hdev->dma_mask = 32;
+
mutex_lock(&hl_devs_idr_lock);
if (minor == -1) {
@@ -334,7 +339,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
" device found [%04x:%04x] (rev %x)\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
- rc = create_hdev(&hdev, pdev, ASIC_AUTO_DETECT, -1);
+ rc = create_hdev(&hdev, pdev, ASIC_INVALID, -1);
if (rc)
return rc;
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 2c2739a3c5ec..eeefb22023e9 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -12,6 +12,32 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
+static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
+ [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
+ [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
+ [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
+ [HL_DEBUG_OP_FUNNEL] = 0,
+ [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
+ [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
+ [HL_DEBUG_OP_TIMESTAMP] = 0
+
+};
+
+static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_device_status dev_stat = {0};
+ u32 size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!size) || (!out))
+ return -EINVAL;
+
+ dev_stat.status = hl_device_status(hdev);
+
+ return copy_to_user(out, &dev_stat,
+ min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
+}
+
static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
{
struct hl_info_hw_ip_info hw_ip = {0};
@@ -93,21 +119,91 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev);
+ hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, NULL, 0);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
}
+static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
+{
+ struct hl_debug_params *params;
+ void *input = NULL, *output = NULL;
+ int rc;
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ params->reg_idx = args->reg_idx;
+ params->enable = args->enable;
+ params->op = args->op;
+
+ if (args->input_ptr && args->input_size) {
+ input = memdup_user((const void __user *) args->input_ptr,
+ args->input_size);
+ if (IS_ERR(input)) {
+ rc = PTR_ERR(input);
+ input = NULL;
+ dev_err(hdev->dev,
+ "error %d when copying input debug data\n", rc);
+ goto out;
+ }
+
+ params->input = input;
+ }
+
+ if (args->output_ptr && args->output_size) {
+ output = kzalloc(args->output_size, GFP_KERNEL);
+ if (!output) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ params->output = output;
+ params->output_size = args->output_size;
+ }
+
+ rc = hdev->asic_funcs->debug_coresight(hdev, params);
+ if (rc) {
+ dev_err(hdev->dev,
+ "debug coresight operation failed %d\n", rc);
+ goto out;
+ }
+
+ if (output) {
+ if (copy_to_user((void __user *) (uintptr_t) args->output_ptr,
+ output,
+ args->output_size)) {
+ dev_err(hdev->dev,
+ "copy to user failed in debug ioctl\n");
+ rc = -EFAULT;
+ goto out;
+ }
+ }
+
+out:
+ kfree(params);
+ kfree(output);
+ kfree(input);
+
+ return rc;
+}
+
static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct hl_info_args *args = data;
struct hl_device *hdev = hpriv->hdev;
int rc;
+ /* We want to return device status even if it disabled or in reset */
+ if (args->op == HL_INFO_DEVICE_STATUS)
+ return device_status_info(hdev, args);
+
if (hl_device_disabled_or_in_reset(hdev)) {
- dev_err(hdev->dev,
- "Device is disabled or in reset. Can't execute INFO IOCTL\n");
+ dev_warn_ratelimited(hdev->dev,
+ "Device is %s. Can't execute INFO IOCTL\n",
+ atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
return -EBUSY;
}
@@ -137,6 +233,40 @@ static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
return rc;
}
+static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_debug_args *args = data;
+ struct hl_device *hdev = hpriv->hdev;
+ int rc = 0;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Device is %s. Can't execute DEBUG IOCTL\n",
+ atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+ return -EBUSY;
+ }
+
+ switch (args->op) {
+ case HL_DEBUG_OP_ETR:
+ case HL_DEBUG_OP_ETF:
+ case HL_DEBUG_OP_STM:
+ case HL_DEBUG_OP_FUNNEL:
+ case HL_DEBUG_OP_BMON:
+ case HL_DEBUG_OP_SPMU:
+ case HL_DEBUG_OP_TIMESTAMP:
+ args->input_size =
+ min(args->input_size, hl_debug_struct_size[args->op]);
+ rc = debug_coresight(hdev, args);
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid request %d\n", args->op);
+ rc = -ENOTTY;
+ break;
+ }
+
+ return rc;
+}
+
#define HL_IOCTL_DEF(ioctl, _func) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
@@ -145,7 +275,8 @@ static const struct hl_ioctl_desc hl_ioctls[] = {
HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
- HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl)
+ HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
};
#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls)
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index ef3bb6951360..2894d8975933 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -82,7 +82,7 @@ static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
bd += hl_pi_2_offset(q->pi);
bd->ctl = __cpu_to_le32(ctl);
bd->len = __cpu_to_le32(len);
- bd->ptr = __cpu_to_le64(ptr + hdev->asic_prop.host_phys_base_address);
+ bd->ptr = __cpu_to_le64(ptr);
q->pi = hl_queue_inc_ptr(q->pi);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
@@ -263,9 +263,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
* checked in hl_queue_sanity_checks
*/
cq = &hdev->completion_queue[q->hw_queue_id];
- cq_addr = cq->bus_address +
- hdev->asic_prop.host_phys_base_address;
- cq_addr += cq->pi * sizeof(struct hl_cq_entry);
+ cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len,
cq_addr,
@@ -415,14 +413,20 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
}
static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
- struct hl_hw_queue *q)
+ struct hl_hw_queue *q, bool is_cpu_queue)
{
void *p;
int rc;
- p = hdev->asic_funcs->dma_alloc_coherent(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ if (is_cpu_queue)
+ p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ &q->bus_address);
+ else
+ p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ &q->bus_address,
+ GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
@@ -446,8 +450,15 @@ static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
return 0;
free_queue:
- hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
- (void *) (uintptr_t) q->kernel_address, q->bus_address);
+ if (is_cpu_queue)
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address);
+ else
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address,
+ q->bus_address);
return rc;
}
@@ -474,12 +485,12 @@ static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q);
+ return ext_and_cpu_hw_queue_init(hdev, q, true);
}
static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q);
+ return ext_and_cpu_hw_queue_init(hdev, q, false);
}
/*
@@ -569,8 +580,15 @@ static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
kfree(q->shadow_queue);
- hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
- (void *) (uintptr_t) q->kernel_address, q->bus_address);
+ if (q->queue_type == QUEUE_TYPE_CPU)
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address);
+ else
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address,
+ q->bus_address);
}
int hl_hw_queues_create(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
index 9dddb917e72c..1f1e35e86d84 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -32,8 +32,6 @@ struct hl_eq_entry {
#define EQ_CTL_EVENT_TYPE_SHIFT 16
#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
-#define EVENT_QUEUE_MSIX_IDX 5
-
enum pq_init_status {
PQ_INIT_STATUS_NA = 0,
PQ_INIT_STATUS_READY_FOR_CP,
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
index 2cf5c46b6e8e..4e0dbbbbde20 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
@@ -188,4 +188,3 @@
#define CPU_CA53_CFG_ARM_PMU_EVENT_MASK 0x3FFFFFFF
#endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
index 840ccffa1081..f3faf1aad91a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
@@ -58,4 +58,3 @@
#define mmCPU_CA53_CFG_ARM_PMU_1 0x441214
#endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
index f23cb3e41c30..cf657918962a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
@@ -46,4 +46,3 @@
#define mmCPU_IF_AXI_SPLIT_INTR 0x442130
#endif /* ASIC_REG_CPU_IF_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
index 8fc97f838ada..8c8f9726d4b9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
@@ -102,4 +102,3 @@
#define mmCPU_PLL_FREQ_CALC_EN 0x4A2440
#endif /* ASIC_REG_CPU_PLL_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
index 61c8cd9ce58b..0b246fe6ad04 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
@@ -206,4 +206,3 @@
#define mmDMA_CH_0_MEM_INIT_BUSY 0x4011FC
#endif /* ASIC_REG_DMA_CH_0_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
index 92960ef5e308..5449031722f2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
@@ -206,4 +206,3 @@
#define mmDMA_CH_1_MEM_INIT_BUSY 0x4091FC
#endif /* ASIC_REG_DMA_CH_1_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
index 4e37871a51bb..a4768521d18a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
@@ -206,4 +206,3 @@
#define mmDMA_CH_2_MEM_INIT_BUSY 0x4111FC
#endif /* ASIC_REG_DMA_CH_2_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
index a2d6aeb32a18..619d01897ff8 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
@@ -206,4 +206,3 @@
#define mmDMA_CH_3_MEM_INIT_BUSY 0x4191FC
#endif /* ASIC_REG_DMA_CH_3_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
index 400d6fd3acf5..038617e163f1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
@@ -206,4 +206,3 @@
#define mmDMA_CH_4_MEM_INIT_BUSY 0x4211FC
#endif /* ASIC_REG_DMA_CH_4_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
index 8d965443c51e..f43b564af1be 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
@@ -102,4 +102,3 @@
#define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK 0x1FFFFFFF
#endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
index 8bfcb001189d..c3bfc1b8e3fd 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
@@ -178,4 +178,3 @@
#define mmDMA_MACRO_RAZWI_HBW_RD_ID 0x4B0158
#endif /* ASIC_REG_DMA_MACRO_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
index 9f33f351a3c1..bc977488c072 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
@@ -206,4 +206,3 @@
#define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
#endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
index d8293745a02b..c4abc7ff1fc6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
@@ -224,4 +224,3 @@
#define mmDMA_NRTR_NON_LIN_SCRAMB 0x1C0604
#endif /* ASIC_REG_DMA_NRTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
index 10619dbb9b17..b17f72c31ab6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
@@ -462,4 +462,3 @@
#define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
#endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
index c693bc5dcb22..bf360b301154 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
@@ -176,4 +176,3 @@
#define mmDMA_QM_0_CQ_BUF_RDATA 0x40030C
#endif /* ASIC_REG_DMA_QM_0_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
index da928390f89c..51d432d05ac4 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
@@ -176,4 +176,3 @@
#define mmDMA_QM_1_CQ_BUF_RDATA 0x40830C
#endif /* ASIC_REG_DMA_QM_1_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
index b4f06e9b71d6..18fc0c2b6cc2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
@@ -176,4 +176,3 @@
#define mmDMA_QM_2_CQ_BUF_RDATA 0x41030C
#endif /* ASIC_REG_DMA_QM_2_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
index 53e3cd78a06b..6cf7204bf5cc 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
@@ -176,4 +176,3 @@
#define mmDMA_QM_3_CQ_BUF_RDATA 0x41830C
#endif /* ASIC_REG_DMA_QM_3_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
index e0eb5f260201..36fef2682875 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
@@ -176,4 +176,3 @@
#define mmDMA_QM_4_CQ_BUF_RDATA 0x42030C
#endif /* ASIC_REG_DMA_QM_4_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index a161ecfe74de..8618891d5afa 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -189,18 +189,6 @@
1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\
1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
-/* PCI CONFIGURATION SPACE */
-#define mmPCI_CONFIG_ELBI_ADDR 0xFF0
-#define mmPCI_CONFIG_ELBI_DATA 0xFF4
-#define mmPCI_CONFIG_ELBI_CTRL 0xFF8
-#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31)
-
-#define mmPCI_CONFIG_ELBI_STS 0xFFC
-#define PCI_CONFIG_ELBI_STS_ERR (1 << 30)
-#define PCI_CONFIG_ELBI_STS_DONE (1 << 31)
-#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
- PCI_CONFIG_ELBI_STS_DONE)
-
#define GOYA_IRQ_HBW_ID_MASK 0x1FFF
#define GOYA_IRQ_HBW_ID_SHIFT 0
#define GOYA_IRQ_HBW_INTERNAL_ID_MASK 0xE000
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
index 6cb0b6e54d41..506e71e201e1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2018 HabanaLabs, Ltd.
+ * Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -12,6 +12,7 @@
#include "stlb_regs.h"
#include "mmu_regs.h"
#include "pcie_aux_regs.h"
+#include "pcie_wrap_regs.h"
#include "psoc_global_conf_regs.h"
#include "psoc_spi_regs.h"
#include "psoc_mme_pll_regs.h"
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
index 0a743817aad7..4ae7fed8b18c 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
@@ -102,4 +102,3 @@
#define mmIC_PLL_FREQ_CALC_EN 0x4A3440
#endif /* ASIC_REG_IC_PLL_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
index 4408188aa067..6d35d852798b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
@@ -102,4 +102,3 @@
#define mmMC_PLL_FREQ_CALC_EN 0x4A1440
#endif /* ASIC_REG_MC_PLL_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
index 687bca5c5fe3..6c23f8b96e7e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
@@ -650,4 +650,3 @@
#define MME1_RTR_NON_LIN_SCRAMB_EN_MASK 0x1
#endif /* ASIC_REG_MME1_RTR_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
index c248339a1cbe..122e9d529939 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
@@ -328,4 +328,3 @@
#define mmMME1_RTR_NON_LIN_SCRAMB 0x40604
#endif /* ASIC_REG_MME1_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
index 7a2b777bdc4f..00ce2252bbfb 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
@@ -328,4 +328,3 @@
#define mmMME2_RTR_NON_LIN_SCRAMB 0x80604
#endif /* ASIC_REG_MME2_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
index b78f8bc387fc..8e3eb7fd2070 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
@@ -328,4 +328,3 @@
#define mmMME3_RTR_NON_LIN_SCRAMB 0xC0604
#endif /* ASIC_REG_MME3_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
index d9a4a02cefa3..79b67bbc8567 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
@@ -328,4 +328,3 @@
#define mmMME4_RTR_NON_LIN_SCRAMB 0x100604
#endif /* ASIC_REG_MME4_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
index 205adc988407..0ac3c37ce47f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
@@ -328,4 +328,3 @@
#define mmMME5_RTR_NON_LIN_SCRAMB 0x140604
#endif /* ASIC_REG_MME5_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
index fcec68388278..50c49cce72a6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
@@ -328,4 +328,3 @@
#define mmMME6_RTR_NON_LIN_SCRAMB 0x180604
#endif /* ASIC_REG_MME6_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
index a0d4382fbbd0..fe7d95bdcef9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
@@ -370,4 +370,3 @@
#define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
#endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
index 5c2f6b870a58..5f8b85d2b4b1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
@@ -136,4 +136,3 @@
#define mmMME_CMDQ_CQ_BUF_RDATA 0xD930C
#endif /* ASIC_REG_MME_CMDQ_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
index c7b1b0bb3384..1882c413cbe0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
@@ -1534,4 +1534,3 @@
#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
#endif /* ASIC_REG_MME_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
index d4bfa58dce19..e464e381555c 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
@@ -462,4 +462,3 @@
#define MME_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
#endif /* ASIC_REG_MME_QM_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
index b5b1c776f6c3..538708beffc9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
@@ -176,4 +176,3 @@
#define mmMME_QM_CQ_BUF_RDATA 0xD830C
#endif /* ASIC_REG_MME_QM_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
index 9436b1e2705a..0396cbfd5c89 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
@@ -1150,4 +1150,3 @@
#define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT 0xD0BAC
#endif /* ASIC_REG_MME_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
index 3a78078d3c4c..c3e69062b135 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
@@ -140,4 +140,3 @@
#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
#endif /* ASIC_REG_MMU_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
index bec6c014135c..7ec81f12031e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
@@ -50,4 +50,3 @@
#define mmMMU_ACCESS_ERROR_CAPTURE_VA 0x480040
#endif /* ASIC_REG_MMU_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
index 209e41402a11..ceb59f2e28b3 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
@@ -206,4 +206,3 @@
#define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
#endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
index 447e5d4e7dc8..dd067f301ac2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
@@ -224,4 +224,3 @@
#define mmPCI_NRTR_NON_LIN_SCRAMB 0x604
#endif /* ASIC_REG_PCI_NRTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
index daaf5d9079dc..35b1d8ac6f63 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
@@ -240,4 +240,3 @@
#define mmPCIE_AUX_PERST 0xC079B8
#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
new file mode 100644
index 000000000000..d1e55aace4a0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_WRAP_REGS_H_
+#define ASIC_REG_PCIE_WRAP_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_WRAP (Prototype: PCIE_WRAP)
+ *****************************************
+ */
+
+#define mmPCIE_WRAP_PHY_RST_N 0xC01300
+
+#define mmPCIE_WRAP_OUTSTAND_TRANS 0xC01400
+
+#define mmPCIE_WRAP_MASK_REQ 0xC01404
+
+#define mmPCIE_WRAP_IND_AWADDR_L 0xC01500
+
+#define mmPCIE_WRAP_IND_AWADDR_H 0xC01504
+
+#define mmPCIE_WRAP_IND_AWLEN 0xC01508
+
+#define mmPCIE_WRAP_IND_AWSIZE 0xC0150C
+
+#define mmPCIE_WRAP_IND_AWBURST 0xC01510
+
+#define mmPCIE_WRAP_IND_AWLOCK 0xC01514
+
+#define mmPCIE_WRAP_IND_AWCACHE 0xC01518
+
+#define mmPCIE_WRAP_IND_AWPROT 0xC0151C
+
+#define mmPCIE_WRAP_IND_AWVALID 0xC01520
+
+#define mmPCIE_WRAP_IND_WDATA_0 0xC01524
+
+#define mmPCIE_WRAP_IND_WDATA_1 0xC01528
+
+#define mmPCIE_WRAP_IND_WDATA_2 0xC0152C
+
+#define mmPCIE_WRAP_IND_WDATA_3 0xC01530
+
+#define mmPCIE_WRAP_IND_WSTRB 0xC01544
+
+#define mmPCIE_WRAP_IND_WLAST 0xC01548
+
+#define mmPCIE_WRAP_IND_WVALID 0xC0154C
+
+#define mmPCIE_WRAP_IND_BRESP 0xC01550
+
+#define mmPCIE_WRAP_IND_BVALID 0xC01554
+
+#define mmPCIE_WRAP_IND_ARADDR_0 0xC01558
+
+#define mmPCIE_WRAP_IND_ARADDR_1 0xC0155C
+
+#define mmPCIE_WRAP_IND_ARLEN 0xC01560
+
+#define mmPCIE_WRAP_IND_ARSIZE 0xC01564
+
+#define mmPCIE_WRAP_IND_ARBURST 0xC01568
+
+#define mmPCIE_WRAP_IND_ARLOCK 0xC0156C
+
+#define mmPCIE_WRAP_IND_ARCACHE 0xC01570
+
+#define mmPCIE_WRAP_IND_ARPROT 0xC01574
+
+#define mmPCIE_WRAP_IND_ARVALID 0xC01578
+
+#define mmPCIE_WRAP_IND_RDATA_0 0xC0157C
+
+#define mmPCIE_WRAP_IND_RDATA_1 0xC01580
+
+#define mmPCIE_WRAP_IND_RDATA_2 0xC01584
+
+#define mmPCIE_WRAP_IND_RDATA_3 0xC01588
+
+#define mmPCIE_WRAP_IND_RLAST 0xC0159C
+
+#define mmPCIE_WRAP_IND_RRESP 0xC015A0
+
+#define mmPCIE_WRAP_IND_RVALID 0xC015A4
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO 0xC015A8
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_0 0xC015AC
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_1 0xC015B0
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_P_TAG 0xC015B4
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_ATU_BYPAS 0xC015B8
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_FUNC_NUM 0xC015BC
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_ACT 0xC015C0
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_NUM 0xC015C4
+
+#define mmPCIE_WRAP_IND_AWMISC_INFO_TLPPRFX 0xC015C8
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO 0xC015CC
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_TLPPRFX 0xC015D0
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_ATU_BYP 0xC015D4
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_FUNC_NUM 0xC015D8
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_ACT 0xC015DC
+
+#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_NUM 0xC015E0
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO 0xC01800
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_0 0xC01804
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_1 0xC01808
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_P_TAG 0xC0180C
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_ATU_BYPAS 0xC01810
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_FUNC_NUM 0xC01814
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_ACT 0xC01818
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_NUM 0xC0181C
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_TLPPRFX 0xC01820
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO 0xC01824
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_TLPPRFX 0xC01828
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_ATU_BYP 0xC0182C
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_FUNC_NUM 0xC01830
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_ACT 0xC01834
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_NUM 0xC01838
+
+#define mmPCIE_WRAP_MAX_QID 0xC01900
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_L_0 0xC01910
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_L_1 0xC01914
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_L_2 0xC01918
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_L_3 0xC0191C
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_H_0 0xC01920
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_H_1 0xC01924
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_H_2 0xC01928
+
+#define mmPCIE_WRAP_DB_BASE_ADDR_H_3 0xC0192C
+
+#define mmPCIE_WRAP_DB_MASK 0xC01940
+
+#define mmPCIE_WRAP_SQ_BASE_ADDR_H 0xC01A00
+
+#define mmPCIE_WRAP_SQ_BASE_ADDR_L 0xC01A04
+
+#define mmPCIE_WRAP_SQ_STRIDE_ACCRESS 0xC01A08
+
+#define mmPCIE_WRAP_SQ_POP_CMD 0xC01A10
+
+#define mmPCIE_WRAP_SQ_POP_DATA 0xC01A14
+
+#define mmPCIE_WRAP_DB_INTR_0 0xC01A20
+
+#define mmPCIE_WRAP_DB_INTR_1 0xC01A24
+
+#define mmPCIE_WRAP_DB_INTR_2 0xC01A28
+
+#define mmPCIE_WRAP_DB_INTR_3 0xC01A2C
+
+#define mmPCIE_WRAP_DB_INTR_4 0xC01A30
+
+#define mmPCIE_WRAP_DB_INTR_5 0xC01A34
+
+#define mmPCIE_WRAP_DB_INTR_6 0xC01A38
+
+#define mmPCIE_WRAP_DB_INTR_7 0xC01A3C
+
+#define mmPCIE_WRAP_MMU_BYPASS_DMA 0xC01A80
+
+#define mmPCIE_WRAP_MMU_BYPASS_NON_DMA 0xC01A84
+
+#define mmPCIE_WRAP_ASID_NON_DMA 0xC01A90
+
+#define mmPCIE_WRAP_ASID_DMA_0 0xC01AA0
+
+#define mmPCIE_WRAP_ASID_DMA_1 0xC01AA4
+
+#define mmPCIE_WRAP_ASID_DMA_2 0xC01AA8
+
+#define mmPCIE_WRAP_ASID_DMA_3 0xC01AAC
+
+#define mmPCIE_WRAP_ASID_DMA_4 0xC01AB0
+
+#define mmPCIE_WRAP_ASID_DMA_5 0xC01AB4
+
+#define mmPCIE_WRAP_ASID_DMA_6 0xC01AB8
+
+#define mmPCIE_WRAP_ASID_DMA_7 0xC01ABC
+
+#define mmPCIE_WRAP_CPU_HOT_RST 0xC01AE0
+
+#define mmPCIE_WRAP_AXI_PROT_OVR 0xC01AE4
+
+#define mmPCIE_WRAP_CACHE_OVR 0xC01B00
+
+#define mmPCIE_WRAP_LOCK_OVR 0xC01B04
+
+#define mmPCIE_WRAP_PROT_OVR 0xC01B08
+
+#define mmPCIE_WRAP_ARUSER_OVR 0xC01B0C
+
+#define mmPCIE_WRAP_AWUSER_OVR 0xC01B10
+
+#define mmPCIE_WRAP_ARUSER_OVR_EN 0xC01B14
+
+#define mmPCIE_WRAP_AWUSER_OVR_EN 0xC01B18
+
+#define mmPCIE_WRAP_MAX_OUTSTAND 0xC01B20
+
+#define mmPCIE_WRAP_MST_IN 0xC01B24
+
+#define mmPCIE_WRAP_RSP_OK 0xC01B28
+
+#define mmPCIE_WRAP_LBW_CACHE_OVR 0xC01B40
+
+#define mmPCIE_WRAP_LBW_LOCK_OVR 0xC01B44
+
+#define mmPCIE_WRAP_LBW_PROT_OVR 0xC01B48
+
+#define mmPCIE_WRAP_LBW_ARUSER_OVR 0xC01B4C
+
+#define mmPCIE_WRAP_LBW_AWUSER_OVR 0xC01B50
+
+#define mmPCIE_WRAP_LBW_ARUSER_OVR_EN 0xC01B58
+
+#define mmPCIE_WRAP_LBW_AWUSER_OVR_EN 0xC01B5C
+
+#define mmPCIE_WRAP_LBW_MAX_OUTSTAND 0xC01B60
+
+#define mmPCIE_WRAP_LBW_MST_IN 0xC01B64
+
+#define mmPCIE_WRAP_LBW_RSP_OK 0xC01B68
+
+#define mmPCIE_WRAP_QUEUE_INIT 0xC01C00
+
+#define mmPCIE_WRAP_AXI_SPLIT_INTR_0 0xC01C10
+
+#define mmPCIE_WRAP_AXI_SPLIT_INTR_1 0xC01C14
+
+#define mmPCIE_WRAP_DB_AWUSER 0xC01D00
+
+#define mmPCIE_WRAP_DB_ARUSER 0xC01D04
+
+#define mmPCIE_WRAP_PCIE_AWUSER 0xC01D08
+
+#define mmPCIE_WRAP_PCIE_ARUSER 0xC01D0C
+
+#define mmPCIE_WRAP_PSOC_AWUSER 0xC01D10
+
+#define mmPCIE_WRAP_PSOC_ARUSER 0xC01D14
+
+#define mmPCIE_WRAP_SCH_Q_AWUSER 0xC01D18
+
+#define mmPCIE_WRAP_SCH_Q_ARUSER 0xC01D1C
+
+#define mmPCIE_WRAP_PSOC2PCI_AWUSER 0xC01D40
+
+#define mmPCIE_WRAP_PSOC2PCI_ARUSER 0xC01D44
+
+#define mmPCIE_WRAP_DRAIN_TIMEOUT 0xC01D50
+
+#define mmPCIE_WRAP_DRAIN_CFG 0xC01D54
+
+#define mmPCIE_WRAP_DB_AXI_ERR 0xC01DE0
+
+#define mmPCIE_WRAP_SPMU_INTR 0xC01DE4
+
+#define mmPCIE_WRAP_AXI_INTR 0xC01DE8
+
+#define mmPCIE_WRAP_E2E_CTRL 0xC01DF0
+
+#endif /* ASIC_REG_PCIE_WRAP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
index 8eda4de58788..9271ea95ebe9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
@@ -102,4 +102,3 @@
#define mmPSOC_EMMC_PLL_FREQ_CALC_EN 0xC70440
#endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
index d4bf0e1db4df..324266653c9a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
@@ -444,4 +444,3 @@
#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3
#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
index cfbdd2c9c5c7..8141f422e712 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
@@ -742,4 +742,3 @@
#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BA44
#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
index 6723d8f76f30..4789ebb9c337 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
@@ -102,4 +102,3 @@
#define mmPSOC_MME_PLL_FREQ_CALC_EN 0xC71440
#endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
index abcded0531c9..27a296ea6c3d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
@@ -102,4 +102,3 @@
#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440
#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
index 5925c7477c25..66aee7fa6b1e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
@@ -140,4 +140,3 @@
#define mmPSOC_SPI_RSVD_2 0xC430FC
#endif /* ASIC_REG_PSOC_SPI_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
index d56c9fa0e7ba..2ea1770b078f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
@@ -80,4 +80,3 @@
#define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX 0x201330
#endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
index 5624544303ca..37e0713efa73 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
@@ -80,4 +80,3 @@
#define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX 0x205330
#endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
index 3322bc0bd1df..d2572279a2b9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
@@ -80,4 +80,3 @@
#define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX 0x209330
#endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
index 81e393db2027..68c5b402c506 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
@@ -80,4 +80,3 @@
#define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX 0x20D330
#endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
index b2e11b1de385..a42f1ba06d28 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
@@ -80,4 +80,3 @@
#define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX 0x211330
#endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
index b4ea8cae2757..94f2ed4a36bd 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
@@ -114,4 +114,3 @@
#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10
#endif /* ASIC_REG_STLB_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
index 0f5281d3e65b..35013f65acd2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
@@ -52,4 +52,3 @@
#define mmSTLB_SRAM_INIT 0x49004C
#endif /* ASIC_REG_STLB_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
index e5587b49eecd..89c9507a512f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
@@ -1604,4 +1604,3 @@
#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000
#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
index 2be28a63c50a..7d71c4b73a5e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
@@ -884,4 +884,3 @@
#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE06E2C
#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
index 9aa2d8b53207..9395f2458771 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
@@ -370,4 +370,3 @@
#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
#endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
index 3572752ba66e..bc51df573bf0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
@@ -136,4 +136,3 @@
#define mmTPC0_CMDQ_CQ_BUF_RDATA 0xE0930C
#endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
index ed866d93c440..553c6b6bd5ec 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
@@ -344,4 +344,3 @@
#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK 0x1
#endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
index f1a1b4fa4841..8495479c3659 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
@@ -310,4 +310,3 @@
#define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL 0x3040334
#endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
index 7f86621179a5..43fafcf01041 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
@@ -206,4 +206,3 @@
#define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
#endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
index dc280f4e6608..ce3346dd2042 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
@@ -224,4 +224,3 @@
#define mmTPC0_NRTR_NON_LIN_SCRAMB 0xE00604
#endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
index 80d97ee3d8d6..2e4b45947944 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
@@ -462,4 +462,3 @@
#define TPC0_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
index 7552d4ba61fe..4fa09eb88878 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
@@ -176,4 +176,3 @@
#define mmTPC0_QM_CQ_BUF_RDATA 0xE0830C
#endif /* ASIC_REG_TPC0_QM_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
index 19894413474a..928eef1808ae 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
@@ -884,4 +884,3 @@
#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE46E2C
#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
index 9099ebd7ab23..30ae0f307328 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
@@ -136,4 +136,3 @@
#define mmTPC1_CMDQ_CQ_BUF_RDATA 0xE4930C
#endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
index bc8b9a10391f..b95de4f95ba9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
@@ -176,4 +176,3 @@
#define mmTPC1_QM_CQ_BUF_RDATA 0xE4830C
#endif /* ASIC_REG_TPC1_QM_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
index ae267f8f457e..0f91e307879e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
@@ -320,4 +320,3 @@
#define mmTPC1_RTR_NON_LIN_SCRAMB 0xE40604
#endif /* ASIC_REG_TPC1_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
index 9c33fc039036..73421227f35b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
@@ -884,4 +884,3 @@
#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE86E2C
#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
index 7a643887d6e1..27b66bf2da9f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
@@ -136,4 +136,3 @@
#define mmTPC2_CMDQ_CQ_BUF_RDATA 0xE8930C
#endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
index f3e32c018064..31e5b2f53905 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
@@ -176,4 +176,3 @@
#define mmTPC2_QM_CQ_BUF_RDATA 0xE8830C
#endif /* ASIC_REG_TPC2_QM_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
index 0eb0cd1fbd19..4eddeaa15d94 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
@@ -320,4 +320,3 @@
#define mmTPC2_RTR_NON_LIN_SCRAMB 0xE80604
#endif /* ASIC_REG_TPC2_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
index 0baf63c69b25..ce573a1a8361 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
@@ -884,4 +884,3 @@
#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC6E2C
#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
index 82a5261e852f..11d81fca0a0f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
@@ -136,4 +136,3 @@
#define mmTPC3_CMDQ_CQ_BUF_RDATA 0xEC930C
#endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
index b05b1e18e664..e41595a19e69 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
@@ -176,4 +176,3 @@
#define mmTPC3_QM_CQ_BUF_RDATA 0xEC830C
#endif /* ASIC_REG_TPC3_QM_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
index 5a2fd7652650..34a438b1efe5 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
@@ -320,4 +320,3 @@
#define mmTPC3_RTR_NON_LIN_SCRAMB 0xEC0604
#endif /* ASIC_REG_TPC3_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
index d64a100075f2..d44caf0fc1bb 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
@@ -884,4 +884,3 @@
#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF06E2C
#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
index 565b42885b0d..f13a6532961f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
@@ -136,4 +136,3 @@
#define mmTPC4_CMDQ_CQ_BUF_RDATA 0xF0930C
#endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
index 196da3f12710..db081fc17cfc 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
@@ -176,4 +176,3 @@
#define mmTPC4_QM_CQ_BUF_RDATA 0xF0830C
#endif /* ASIC_REG_TPC4_QM_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
index 8b54041d144a..8c5372303b28 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
@@ -320,4 +320,3 @@
#define mmTPC4_RTR_NON_LIN_SCRAMB 0xF00604
#endif /* ASIC_REG_TPC4_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
index 3f00954fcdba..5139fde71011 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
@@ -884,4 +884,3 @@
#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF46E2C
#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
index d8e72a8e18d7..1e7cd6e1e888 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
@@ -136,4 +136,3 @@
#define mmTPC5_CMDQ_CQ_BUF_RDATA 0xF4930C
#endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
index be2e68624709..ac0d3820cd6b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
@@ -176,4 +176,3 @@
#define mmTPC5_QM_CQ_BUF_RDATA 0xF4830C
#endif /* ASIC_REG_TPC5_QM_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
index 6f301c7bbc2f..57f83bc3b17d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
@@ -320,4 +320,3 @@
#define mmTPC5_RTR_NON_LIN_SCRAMB 0xF40604
#endif /* ASIC_REG_TPC5_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
index 1e1168601c41..94e0191c06c1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
@@ -884,4 +884,3 @@
#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF86E2C
#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
index fbca6b47284e..7a1a0e87b225 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
@@ -136,4 +136,3 @@
#define mmTPC6_CMDQ_CQ_BUF_RDATA 0xF8930C
#endif /* ASIC_REG_TPC6_CMDQ_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
index bf32465dabcb..80fa0fe0f60f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
@@ -176,4 +176,3 @@
#define mmTPC6_QM_CQ_BUF_RDATA 0xF8830C
#endif /* ASIC_REG_TPC6_QM_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
index 609bb90e1046..d6cae8b8af66 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
@@ -320,4 +320,3 @@
#define mmTPC6_RTR_NON_LIN_SCRAMB 0xF80604
#endif /* ASIC_REG_TPC6_RTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
index bf2fd0f73906..234147adb779 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
@@ -884,4 +884,3 @@
#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC6E2C
#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
index 65d83043bf63..4c160632fe7d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
@@ -136,4 +136,3 @@
#define mmTPC7_CMDQ_CQ_BUF_RDATA 0xFC930C
#endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
index 3d5848d87304..0c13d4d167aa 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
@@ -224,4 +224,3 @@
#define mmTPC7_NRTR_NON_LIN_SCRAMB 0xFC0604
#endif /* ASIC_REG_TPC7_NRTR_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
index 25f5095f68fb..cbe11425bfb0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
@@ -176,4 +176,3 @@
#define mmTPC7_QM_CQ_BUF_RDATA 0xFC830C
#endif /* ASIC_REG_TPC7_QM_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
index 920231d0afa5..e25e19660a9d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
@@ -102,4 +102,3 @@
#define mmTPC_PLL_FREQ_CALC_EN 0xE01440
#endif /* ASIC_REG_TPC_PLL_REGS_H_ */
-
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h
index 614149efa412..3f02a52ba4ce 100644
--- a/drivers/misc/habanalabs/include/goya/goya.h
+++ b/drivers/misc/habanalabs/include/goya/goya.h
@@ -8,10 +8,6 @@
#ifndef GOYA_H
#define GOYA_H
-#include "asic_reg/goya_regs.h"
-
-#include <linux/types.h>
-
#define SRAM_CFG_BAR_ID 0
#define MSIX_BAR_ID 2
#define DDR_BAR_ID 4
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h
index 497937a17ee9..bb7a1aa3279e 100644
--- a/drivers/misc/habanalabs/include/goya/goya_async_events.h
+++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h
@@ -9,7 +9,9 @@
#define __GOYA_ASYNC_EVENTS_H_
enum goya_async_event_id {
+ GOYA_ASYNC_EVENT_ID_PCIE_CORE = 32,
GOYA_ASYNC_EVENT_ID_PCIE_IF = 33,
+ GOYA_ASYNC_EVENT_ID_PCIE_PHY = 34,
GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36,
GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39,
GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42,
@@ -23,6 +25,8 @@ enum goya_async_event_id {
GOYA_ASYNC_EVENT_ID_MMU_ECC = 63,
GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64,
GOYA_ASYNC_EVENT_ID_DMA_ECC = 66,
+ GOYA_ASYNC_EVENT_ID_DDR0_PARITY = 69,
+ GOYA_ASYNC_EVENT_ID_DDR1_PARITY = 72,
GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75,
GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78,
GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79,
@@ -72,6 +76,7 @@ enum goya_async_event_id {
GOYA_ASYNC_EVENT_ID_MME_WACSD = 142,
GOYA_ASYNC_EVENT_ID_PLL0 = 143,
GOYA_ASYNC_EVENT_ID_PLL1 = 144,
+ GOYA_ASYNC_EVENT_ID_PLL2 = 145,
GOYA_ASYNC_EVENT_ID_PLL3 = 146,
GOYA_ASYNC_EVENT_ID_PLL4 = 147,
GOYA_ASYNC_EVENT_ID_PLL5 = 148,
@@ -81,6 +86,7 @@ enum goya_async_event_id {
GOYA_ASYNC_EVENT_ID_PSOC = 160,
GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171,
GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172,
+ GOYA_ASYNC_EVENT_ID_PCIE_PERST = 173,
GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174,
GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175,
GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176,
@@ -144,8 +150,11 @@ enum goya_async_event_id {
GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330,
GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331,
GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_3 = 333,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_4 = 334,
GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356,
GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361,
+ GOYA_ASYNC_EVENT_ID_FAN = 425,
GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430,
GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431,
GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432,
diff --git a/drivers/misc/habanalabs/include/goya/goya_coresight.h b/drivers/misc/habanalabs/include/goya/goya_coresight.h
new file mode 100644
index 000000000000..6e933c0ca5cd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_coresight.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_CORESIGHT_H
+#define GOYA_CORESIGHT_H
+
+enum goya_debug_stm_regs_index {
+ GOYA_STM_FIRST = 0,
+ GOYA_STM_CPU = GOYA_STM_FIRST,
+ GOYA_STM_DMA_CH_0_CS,
+ GOYA_STM_DMA_CH_1_CS,
+ GOYA_STM_DMA_CH_2_CS,
+ GOYA_STM_DMA_CH_3_CS,
+ GOYA_STM_DMA_CH_4_CS,
+ GOYA_STM_DMA_MACRO_CS,
+ GOYA_STM_MME1_SBA,
+ GOYA_STM_MME3_SBB,
+ GOYA_STM_MME4_WACS2,
+ GOYA_STM_MME4_WACS,
+ GOYA_STM_MMU_CS,
+ GOYA_STM_PCIE,
+ GOYA_STM_PSOC,
+ GOYA_STM_TPC0_EML,
+ GOYA_STM_TPC1_EML,
+ GOYA_STM_TPC2_EML,
+ GOYA_STM_TPC3_EML,
+ GOYA_STM_TPC4_EML,
+ GOYA_STM_TPC5_EML,
+ GOYA_STM_TPC6_EML,
+ GOYA_STM_TPC7_EML,
+ GOYA_STM_LAST = GOYA_STM_TPC7_EML
+};
+
+enum goya_debug_etf_regs_index {
+ GOYA_ETF_FIRST = 0,
+ GOYA_ETF_CPU_0 = GOYA_ETF_FIRST,
+ GOYA_ETF_CPU_1,
+ GOYA_ETF_CPU_TRACE,
+ GOYA_ETF_DMA_CH_0_CS,
+ GOYA_ETF_DMA_CH_1_CS,
+ GOYA_ETF_DMA_CH_2_CS,
+ GOYA_ETF_DMA_CH_3_CS,
+ GOYA_ETF_DMA_CH_4_CS,
+ GOYA_ETF_DMA_MACRO_CS,
+ GOYA_ETF_MME1_SBA,
+ GOYA_ETF_MME3_SBB,
+ GOYA_ETF_MME4_WACS2,
+ GOYA_ETF_MME4_WACS,
+ GOYA_ETF_MMU_CS,
+ GOYA_ETF_PCIE,
+ GOYA_ETF_PSOC,
+ GOYA_ETF_TPC0_EML,
+ GOYA_ETF_TPC1_EML,
+ GOYA_ETF_TPC2_EML,
+ GOYA_ETF_TPC3_EML,
+ GOYA_ETF_TPC4_EML,
+ GOYA_ETF_TPC5_EML,
+ GOYA_ETF_TPC6_EML,
+ GOYA_ETF_TPC7_EML,
+ GOYA_ETF_LAST = GOYA_ETF_TPC7_EML
+};
+
+enum goya_debug_funnel_regs_index {
+ GOYA_FUNNEL_FIRST = 0,
+ GOYA_FUNNEL_CPU = GOYA_FUNNEL_FIRST,
+ GOYA_FUNNEL_DMA_CH_6_1,
+ GOYA_FUNNEL_DMA_MACRO_3_1,
+ GOYA_FUNNEL_MME0_RTR,
+ GOYA_FUNNEL_MME1_RTR,
+ GOYA_FUNNEL_MME2_RTR,
+ GOYA_FUNNEL_MME3_RTR,
+ GOYA_FUNNEL_MME4_RTR,
+ GOYA_FUNNEL_MME5_RTR,
+ GOYA_FUNNEL_PCIE,
+ GOYA_FUNNEL_PSOC,
+ GOYA_FUNNEL_TPC0_EML,
+ GOYA_FUNNEL_TPC1_EML,
+ GOYA_FUNNEL_TPC1_RTR,
+ GOYA_FUNNEL_TPC2_EML,
+ GOYA_FUNNEL_TPC2_RTR,
+ GOYA_FUNNEL_TPC3_EML,
+ GOYA_FUNNEL_TPC3_RTR,
+ GOYA_FUNNEL_TPC4_EML,
+ GOYA_FUNNEL_TPC4_RTR,
+ GOYA_FUNNEL_TPC5_EML,
+ GOYA_FUNNEL_TPC5_RTR,
+ GOYA_FUNNEL_TPC6_EML,
+ GOYA_FUNNEL_TPC6_RTR,
+ GOYA_FUNNEL_TPC7_EML,
+ GOYA_FUNNEL_LAST = GOYA_FUNNEL_TPC7_EML
+};
+
+enum goya_debug_bmon_regs_index {
+ GOYA_BMON_FIRST = 0,
+ GOYA_BMON_CPU_RD = GOYA_BMON_FIRST,
+ GOYA_BMON_CPU_WR,
+ GOYA_BMON_DMA_CH_0_0,
+ GOYA_BMON_DMA_CH_0_1,
+ GOYA_BMON_DMA_CH_1_0,
+ GOYA_BMON_DMA_CH_1_1,
+ GOYA_BMON_DMA_CH_2_0,
+ GOYA_BMON_DMA_CH_2_1,
+ GOYA_BMON_DMA_CH_3_0,
+ GOYA_BMON_DMA_CH_3_1,
+ GOYA_BMON_DMA_CH_4_0,
+ GOYA_BMON_DMA_CH_4_1,
+ GOYA_BMON_DMA_MACRO_0,
+ GOYA_BMON_DMA_MACRO_1,
+ GOYA_BMON_DMA_MACRO_2,
+ GOYA_BMON_DMA_MACRO_3,
+ GOYA_BMON_DMA_MACRO_4,
+ GOYA_BMON_DMA_MACRO_5,
+ GOYA_BMON_DMA_MACRO_6,
+ GOYA_BMON_DMA_MACRO_7,
+ GOYA_BMON_MME1_SBA_0,
+ GOYA_BMON_MME1_SBA_1,
+ GOYA_BMON_MME3_SBB_0,
+ GOYA_BMON_MME3_SBB_1,
+ GOYA_BMON_MME4_WACS2_0,
+ GOYA_BMON_MME4_WACS2_1,
+ GOYA_BMON_MME4_WACS2_2,
+ GOYA_BMON_MME4_WACS_0,
+ GOYA_BMON_MME4_WACS_1,
+ GOYA_BMON_MME4_WACS_2,
+ GOYA_BMON_MME4_WACS_3,
+ GOYA_BMON_MME4_WACS_4,
+ GOYA_BMON_MME4_WACS_5,
+ GOYA_BMON_MME4_WACS_6,
+ GOYA_BMON_MMU_0,
+ GOYA_BMON_MMU_1,
+ GOYA_BMON_PCIE_MSTR_RD,
+ GOYA_BMON_PCIE_MSTR_WR,
+ GOYA_BMON_PCIE_SLV_RD,
+ GOYA_BMON_PCIE_SLV_WR,
+ GOYA_BMON_TPC0_EML_0,
+ GOYA_BMON_TPC0_EML_1,
+ GOYA_BMON_TPC0_EML_2,
+ GOYA_BMON_TPC0_EML_3,
+ GOYA_BMON_TPC1_EML_0,
+ GOYA_BMON_TPC1_EML_1,
+ GOYA_BMON_TPC1_EML_2,
+ GOYA_BMON_TPC1_EML_3,
+ GOYA_BMON_TPC2_EML_0,
+ GOYA_BMON_TPC2_EML_1,
+ GOYA_BMON_TPC2_EML_2,
+ GOYA_BMON_TPC2_EML_3,
+ GOYA_BMON_TPC3_EML_0,
+ GOYA_BMON_TPC3_EML_1,
+ GOYA_BMON_TPC3_EML_2,
+ GOYA_BMON_TPC3_EML_3,
+ GOYA_BMON_TPC4_EML_0,
+ GOYA_BMON_TPC4_EML_1,
+ GOYA_BMON_TPC4_EML_2,
+ GOYA_BMON_TPC4_EML_3,
+ GOYA_BMON_TPC5_EML_0,
+ GOYA_BMON_TPC5_EML_1,
+ GOYA_BMON_TPC5_EML_2,
+ GOYA_BMON_TPC5_EML_3,
+ GOYA_BMON_TPC6_EML_0,
+ GOYA_BMON_TPC6_EML_1,
+ GOYA_BMON_TPC6_EML_2,
+ GOYA_BMON_TPC6_EML_3,
+ GOYA_BMON_TPC7_EML_0,
+ GOYA_BMON_TPC7_EML_1,
+ GOYA_BMON_TPC7_EML_2,
+ GOYA_BMON_TPC7_EML_3,
+ GOYA_BMON_LAST = GOYA_BMON_TPC7_EML_3
+};
+
+enum goya_debug_spmu_regs_index {
+ GOYA_SPMU_FIRST = 0,
+ GOYA_SPMU_DMA_CH_0_CS = GOYA_SPMU_FIRST,
+ GOYA_SPMU_DMA_CH_1_CS,
+ GOYA_SPMU_DMA_CH_2_CS,
+ GOYA_SPMU_DMA_CH_3_CS,
+ GOYA_SPMU_DMA_CH_4_CS,
+ GOYA_SPMU_DMA_MACRO_CS,
+ GOYA_SPMU_MME1_SBA,
+ GOYA_SPMU_MME3_SBB,
+ GOYA_SPMU_MME4_WACS2,
+ GOYA_SPMU_MME4_WACS,
+ GOYA_SPMU_MMU_CS,
+ GOYA_SPMU_PCIE,
+ GOYA_SPMU_TPC0_EML,
+ GOYA_SPMU_TPC1_EML,
+ GOYA_SPMU_TPC2_EML,
+ GOYA_SPMU_TPC3_EML,
+ GOYA_SPMU_TPC4_EML,
+ GOYA_SPMU_TPC5_EML,
+ GOYA_SPMU_TPC6_EML,
+ GOYA_SPMU_TPC7_EML,
+ GOYA_SPMU_LAST = GOYA_SPMU_TPC7_EML
+};
+
+#endif /* GOYA_CORESIGHT_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
index a9920cb4a07b..0fa80fe9f6cc 100644
--- a/drivers/misc/habanalabs/include/goya/goya_fw_if.h
+++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
@@ -8,6 +8,8 @@
#ifndef GOYA_FW_IF_H
#define GOYA_FW_IF_H
+#define GOYA_EVENT_QUEUE_MSIX_IDX 5
+
#define CPU_BOOT_ADDR 0x7FF8040000ull
#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index 7475732b9996..4cd04c090285 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -18,7 +18,8 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_IN_SPL,
CPU_BOOT_STATUS_IN_UBOOT,
CPU_BOOT_STATUS_DRAM_INIT_FAIL,
- CPU_BOOT_STATUS_FIT_CORRUPTED
+ CPU_BOOT_STATUS_FIT_CORRUPTED,
+ CPU_BOOT_STATUS_UBOOT_NOT_READY,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index b680052ee3f0..71ea3c3e8ba3 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -14,16 +14,16 @@
#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1))
-#define PAGE_PRESENT_MASK 0x0000000000001
-#define SWAP_OUT_MASK 0x0000000000004
-#define LAST_MASK 0x0000000000800
-#define PHYS_ADDR_MASK 0x3FFFFFFFFF000ull
+#define PAGE_PRESENT_MASK 0x0000000000001ull
+#define SWAP_OUT_MASK 0x0000000000004ull
+#define LAST_MASK 0x0000000000800ull
+#define PHYS_ADDR_MASK 0xFFFFFFFFFFFFF000ull
#define HOP0_MASK 0x3000000000000ull
#define HOP1_MASK 0x0FF8000000000ull
#define HOP2_MASK 0x0007FC0000000ull
-#define HOP3_MASK 0x000003FE00000
-#define HOP4_MASK 0x00000001FF000
-#define OFFSET_MASK 0x0000000000FFF
+#define HOP3_MASK 0x000003FE00000ull
+#define HOP4_MASK 0x00000001FF000ull
+#define OFFSET_MASK 0x0000000000FFFull
#define HOP0_SHIFT 48
#define HOP1_SHIFT 39
@@ -32,7 +32,7 @@
#define HOP4_SHIFT 12
#define PTE_PHYS_ADDR_SHIFT 12
-#define PTE_PHYS_ADDR_MASK ~0xFFF
+#define PTE_PHYS_ADDR_MASK ~OFFSET_MASK
#define HL_PTE_SIZE sizeof(u64)
#define HOP_TABLE_SIZE PAGE_SIZE_4KB
diff --git a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h
new file mode 100644
index 000000000000..d232081d4e0f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef INCLUDE_PCI_GENERAL_H_
+#define INCLUDE_PCI_GENERAL_H_
+
+/* PCI CONFIGURATION SPACE */
+#define mmPCI_CONFIG_ELBI_ADDR 0xFF0
+#define mmPCI_CONFIG_ELBI_DATA 0xFF4
+#define mmPCI_CONFIG_ELBI_CTRL 0xFF8
+#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31)
+
+#define mmPCI_CONFIG_ELBI_STS 0xFFC
+#define PCI_CONFIG_ELBI_STS_ERR (1 << 30)
+#define PCI_CONFIG_ELBI_STS_DONE (1 << 31)
+#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
+ PCI_CONFIG_ELBI_STS_DONE)
+
+#endif /* INCLUDE_PCI_GENERAL_H_ */
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
index e69a09c10e3f..ea9f72ff456c 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/irq.c
@@ -222,7 +222,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
- p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
+ p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
&q->bus_address, GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
@@ -248,7 +248,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
*/
void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
{
- hdev->asic_funcs->dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
(void *) (uintptr_t) q->kernel_address, q->bus_address);
}
@@ -284,8 +284,9 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
- p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
- &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ HL_EQ_SIZE_IN_BYTES,
+ &q->bus_address);
if (!p)
return -ENOMEM;
@@ -308,8 +309,9 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
{
flush_workqueue(hdev->eq_wq);
- hdev->asic_funcs->dma_free_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
- (void *) (uintptr_t) q->kernel_address, q->bus_address);
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
+ HL_EQ_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address);
}
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index ce1fda40a8b8..d67d24c13efd 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -109,7 +109,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
page_size);
if (!phys_pg_pack->pages[i]) {
dev_err(hdev->dev,
- "ioctl failed to allocate page\n");
+ "Failed to allocate device memory (out of memory)\n");
rc = -ENOMEM;
goto page_err;
}
@@ -759,10 +759,6 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
- /* For accessing the host we need to turn on bit 39 */
- if (phys_pg_pack->created_from_userptr)
- paddr += hdev->asic_prop.host_phys_base_address;
-
rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
if (rc) {
dev_err(hdev->dev,
@@ -1046,10 +1042,17 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
mutex_lock(&ctx->mmu_lock);
- for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size)
+ for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
if (hl_mmu_unmap(ctx, next_vaddr, page_size))
dev_warn_ratelimited(hdev->dev,
- "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+ "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+
+ /* unmapping on Palladium can be really long, so avoid a CPU
+ * soft lockup bug by sleeping a little between unmapping pages
+ */
+ if (hdev->pldm)
+ usleep_range(500, 1000);
+ }
hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
@@ -1083,6 +1086,64 @@ vm_type_err:
return rc;
}
+static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_ctx *ctx = hpriv->ctx;
+ u64 device_addr = 0;
+ u32 handle = 0;
+ int rc;
+
+ switch (args->in.op) {
+ case HL_MEM_OP_ALLOC:
+ if (args->in.alloc.mem_size == 0) {
+ dev_err(hdev->dev,
+ "alloc size must be larger than 0\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Force contiguous as there are no real MMU
+ * translations to overcome physical memory gaps
+ */
+ args->in.flags |= HL_MEM_CONTIGUOUS;
+ rc = alloc_device_memory(ctx, &args->in, &handle);
+
+ memset(args, 0, sizeof(*args));
+ args->out.handle = (__u64) handle;
+ break;
+
+ case HL_MEM_OP_FREE:
+ rc = free_device_memory(ctx, args->in.free.handle);
+ break;
+
+ case HL_MEM_OP_MAP:
+ if (args->in.flags & HL_MEM_USERPTR) {
+ device_addr = args->in.map_host.host_virt_addr;
+ rc = 0;
+ } else {
+ rc = get_paddr_from_handle(ctx, &args->in,
+ &device_addr);
+ }
+
+ memset(args, 0, sizeof(*args));
+ args->out.device_virt_addr = device_addr;
+ break;
+
+ case HL_MEM_OP_UNMAP:
+ rc = 0;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
+ rc = -ENOTTY;
+ break;
+ }
+
+out:
+ return rc;
+}
+
int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_mem_args *args = data;
@@ -1094,104 +1155,54 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
if (hl_device_disabled_or_in_reset(hdev)) {
dev_warn_ratelimited(hdev->dev,
- "Device is disabled or in reset. Can't execute memory IOCTL\n");
+ "Device is %s. Can't execute MEMORY IOCTL\n",
+ atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
return -EBUSY;
}
- if (hdev->mmu_enable) {
- switch (args->in.op) {
- case HL_MEM_OP_ALLOC:
- if (!hdev->dram_supports_virtual_memory) {
- dev_err(hdev->dev,
- "DRAM alloc is not supported\n");
- rc = -EINVAL;
- goto out;
- }
- if (args->in.alloc.mem_size == 0) {
- dev_err(hdev->dev,
- "alloc size must be larger than 0\n");
- rc = -EINVAL;
- goto out;
- }
- rc = alloc_device_memory(ctx, &args->in, &handle);
-
- memset(args, 0, sizeof(*args));
- args->out.handle = (__u64) handle;
- break;
-
- case HL_MEM_OP_FREE:
- if (!hdev->dram_supports_virtual_memory) {
- dev_err(hdev->dev,
- "DRAM free is not supported\n");
- rc = -EINVAL;
- goto out;
- }
- rc = free_device_memory(ctx, args->in.free.handle);
- break;
-
- case HL_MEM_OP_MAP:
- rc = map_device_va(ctx, &args->in, &device_addr);
-
- memset(args, 0, sizeof(*args));
- args->out.device_virt_addr = device_addr;
- break;
-
- case HL_MEM_OP_UNMAP:
- rc = unmap_device_va(ctx,
- args->in.unmap.device_virt_addr);
- break;
+ if (!hdev->mmu_enable)
+ return mem_ioctl_no_mmu(hpriv, args);
- default:
- dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
- rc = -ENOTTY;
- break;
+ switch (args->in.op) {
+ case HL_MEM_OP_ALLOC:
+ if (!hdev->dram_supports_virtual_memory) {
+ dev_err(hdev->dev, "DRAM alloc is not supported\n");
+ rc = -EINVAL;
+ goto out;
}
- } else {
- switch (args->in.op) {
- case HL_MEM_OP_ALLOC:
- if (args->in.alloc.mem_size == 0) {
- dev_err(hdev->dev,
- "alloc size must be larger than 0\n");
- rc = -EINVAL;
- goto out;
- }
- /* Force contiguous as there are no real MMU
- * translations to overcome physical memory gaps
- */
- args->in.flags |= HL_MEM_CONTIGUOUS;
- rc = alloc_device_memory(ctx, &args->in, &handle);
+ if (args->in.alloc.mem_size == 0) {
+ dev_err(hdev->dev,
+ "alloc size must be larger than 0\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = alloc_device_memory(ctx, &args->in, &handle);
- memset(args, 0, sizeof(*args));
- args->out.handle = (__u64) handle;
- break;
+ memset(args, 0, sizeof(*args));
+ args->out.handle = (__u64) handle;
+ break;
- case HL_MEM_OP_FREE:
- rc = free_device_memory(ctx, args->in.free.handle);
- break;
+ case HL_MEM_OP_FREE:
+ rc = free_device_memory(ctx, args->in.free.handle);
+ break;
- case HL_MEM_OP_MAP:
- if (args->in.flags & HL_MEM_USERPTR) {
- device_addr = args->in.map_host.host_virt_addr;
- rc = 0;
- } else {
- rc = get_paddr_from_handle(ctx, &args->in,
- &device_addr);
- }
+ case HL_MEM_OP_MAP:
+ rc = map_device_va(ctx, &args->in, &device_addr);
- memset(args, 0, sizeof(*args));
- args->out.device_virt_addr = device_addr;
- break;
+ memset(args, 0, sizeof(*args));
+ args->out.device_virt_addr = device_addr;
+ break;
- case HL_MEM_OP_UNMAP:
- rc = 0;
- break;
+ case HL_MEM_OP_UNMAP:
+ rc = unmap_device_va(ctx,
+ args->in.unmap.device_virt_addr);
+ break;
- default:
- dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
- rc = -ENOTTY;
- break;
- }
+ default:
+ dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
+ rc = -ENOTTY;
+ break;
}
out:
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 3a5a2cec8305..533d9315b6fb 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -11,13 +11,15 @@
#include <linux/genalloc.h>
#include <linux/slab.h>
-static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr)
+static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
+
+static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
{
struct pgt_info *pgt_info = NULL;
- hash_for_each_possible(ctx->mmu_hash, pgt_info, node,
- (unsigned long) addr)
- if (addr == pgt_info->addr)
+ hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
+ (unsigned long) hop_addr)
+ if (hop_addr == pgt_info->shadow_addr)
break;
return pgt_info;
@@ -25,45 +27,109 @@ static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr)
static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
{
+ struct hl_device *hdev = ctx->hdev;
struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
- gen_pool_free(pgt_info->ctx->hdev->mmu_pgt_pool, pgt_info->addr,
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
+ hdev->asic_prop.mmu_hop_table_size);
hash_del(&pgt_info->node);
-
+ kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
kfree(pgt_info);
}
static u64 alloc_hop(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pgt_info *pgt_info;
- u64 addr;
+ u64 phys_addr, shadow_addr;
pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
if (!pgt_info)
return ULLONG_MAX;
- addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
- hdev->asic_prop.mmu_hop_table_size);
- if (!addr) {
+ phys_addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
+ prop->mmu_hop_table_size);
+ if (!phys_addr) {
dev_err(hdev->dev, "failed to allocate page\n");
- kfree(pgt_info);
- return ULLONG_MAX;
+ goto pool_add_err;
}
- pgt_info->addr = addr;
+ shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
+ GFP_KERNEL);
+ if (!shadow_addr)
+ goto shadow_err;
+
+ pgt_info->phys_addr = phys_addr;
+ pgt_info->shadow_addr = shadow_addr;
pgt_info->ctx = ctx;
pgt_info->num_of_ptes = 0;
- hash_add(ctx->mmu_hash, &pgt_info->node, addr);
+ hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
+
+ return shadow_addr;
+
+shadow_err:
+ gen_pool_free(hdev->mmu_pgt_pool, phys_addr, prop->mmu_hop_table_size);
+pool_add_err:
+ kfree(pgt_info);
+
+ return ULLONG_MAX;
+}
+
+static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
+{
+ return ctx->hdev->asic_prop.mmu_pgt_addr +
+ (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline u64 get_hop0_addr(struct hl_ctx *ctx)
+{
+ return (u64) (uintptr_t) ctx->hdev->mmu_shadow_hop0 +
+ (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline void flush(struct hl_ctx *ctx)
+{
+ /* flush all writes from all cores to reach PCI */
+ mb();
+ ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
+}
+
+/* transform the value to physical address when writing to H/W */
+static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
+{
+ /*
+ * The value to write is actually the address of the next shadow hop +
+ * flags at the 12 LSBs.
+ * Hence in order to get the value to write to the physical PTE, we
+ * clear the 12 LSBs and translate the shadow hop to its associated
+ * physical hop, and add back the original 12 LSBs.
+ */
+ u64 phys_val = get_phys_addr(ctx, val & PTE_PHYS_ADDR_MASK) |
+ (val & OFFSET_MASK);
+
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+ get_phys_addr(ctx, shadow_pte_addr),
+ phys_val);
+
+ *(u64 *) (uintptr_t) shadow_pte_addr = val;
+}
- return addr;
+/* do not transform the value to physical address when writing to H/W */
+static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
+ u64 val)
+{
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+ get_phys_addr(ctx, shadow_pte_addr),
+ val);
+ *(u64 *) (uintptr_t) shadow_pte_addr = val;
}
-static inline void clear_pte(struct hl_device *hdev, u64 pte_addr)
+/* clear the last and present bits */
+static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
{
- /* clear the last and present bits */
- hdev->asic_funcs->write_pte(hdev, pte_addr, 0);
+ /* no need to transform the value to physical address */
+ write_final_pte(ctx, pte_addr, 0);
}
static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
@@ -98,12 +164,6 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
return num_of_ptes_left;
}
-static inline u64 get_hop0_addr(struct hl_ctx *ctx)
-{
- return ctx->hdev->asic_prop.mmu_pgt_addr +
- (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
-}
-
static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
u64 virt_addr, u64 mask, u64 shift)
{
@@ -136,7 +196,7 @@ static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
}
-static inline u64 get_next_hop_addr(u64 curr_pte)
+static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
return curr_pte & PHYS_ADDR_MASK;
@@ -147,7 +207,7 @@ static inline u64 get_next_hop_addr(u64 curr_pte)
static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
bool *is_new_hop)
{
- u64 hop_addr = get_next_hop_addr(curr_pte);
+ u64 hop_addr = get_next_hop_addr(ctx, curr_pte);
if (hop_addr == ULLONG_MAX) {
hop_addr = alloc_hop(ctx);
@@ -157,106 +217,30 @@ static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
return hop_addr;
}
-/*
- * hl_mmu_init - init the mmu module
- *
- * @hdev: pointer to the habanalabs device structure
- *
- * This function does the following:
- * - Allocate max_asid zeroed hop0 pgts so no mapping is available
- * - Enable mmu in hw
- * - Invalidate the mmu cache
- * - Create a pool of pages for pgts
- * - Returns 0 on success
- *
- * This function depends on DMA QMAN to be working!
- */
-int hl_mmu_init(struct hl_device *hdev)
+/* translates shadow address inside hop to a physical address */
+static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- int rc;
+ u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
+ u64 shadow_hop_addr = shadow_addr & ~page_mask;
+ u64 pte_offset = shadow_addr & page_mask;
+ u64 phys_hop_addr;
- if (!hdev->mmu_enable)
- return 0;
-
- /* MMU HW init was already done in device hw_init() */
-
- mutex_init(&hdev->mmu_cache_lock);
-
- hdev->mmu_pgt_pool =
- gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
-
- if (!hdev->mmu_pgt_pool) {
- dev_err(hdev->dev, "Failed to create page gen pool\n");
- rc = -ENOMEM;
- goto err_pool_create;
- }
-
- rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
- prop->mmu_hop0_tables_total_size,
- prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
- -1);
- if (rc) {
- dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
- goto err_pool_add;
- }
-
- return 0;
-
-err_pool_add:
- gen_pool_destroy(hdev->mmu_pgt_pool);
-err_pool_create:
- mutex_destroy(&hdev->mmu_cache_lock);
+ if (shadow_hop_addr != get_hop0_addr(ctx))
+ phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
+ else
+ phys_hop_addr = get_phys_hop0_addr(ctx);
- return rc;
+ return phys_hop_addr + pte_offset;
}
-/*
- * hl_mmu_fini - release the mmu module.
- *
- * @hdev: pointer to the habanalabs device structure
- *
- * This function does the following:
- * - Disable mmu in hw
- * - free the pgts pool
- *
- * All ctxs should be freed before calling this func
- */
-void hl_mmu_fini(struct hl_device *hdev)
-{
- if (!hdev->mmu_enable)
- return;
-
- gen_pool_destroy(hdev->mmu_pgt_pool);
-
- mutex_destroy(&hdev->mmu_cache_lock);
-
- /* MMU HW fini will be done in device hw_fini() */
-}
-
-/**
- * hl_mmu_ctx_init() - initialize a context for using the MMU module.
- * @ctx: pointer to the context structure to initialize.
- *
- * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
- * page tables hops related to this context and an optional DRAM default page
- * mapping.
- * Return: 0 on success, non-zero otherwise.
- */
-int hl_mmu_ctx_init(struct hl_ctx *ctx)
+static int dram_default_mapping_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
- hop3_pte_addr, pte_val;
+ u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
+ hop2_pte_addr, hop3_pte_addr, pte_val;
int rc, i, j, hop3_allocated = 0;
- if (!hdev->mmu_enable)
- return 0;
-
- mutex_init(&ctx->mmu_lock);
- hash_init(ctx->mmu_hash);
-
if (!hdev->dram_supports_virtual_memory ||
!hdev->dram_default_page_mapping)
return 0;
@@ -269,10 +253,10 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
total_hops = num_of_hop3 + 2;
ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
- if (!ctx->dram_default_hops) {
- rc = -ENOMEM;
- goto alloc_err;
- }
+ if (!ctx->dram_default_hops)
+ return -ENOMEM;
+
+ hop0_addr = get_hop0_addr(ctx);
hop1_addr = alloc_hop(ctx);
if (hop1_addr == ULLONG_MAX) {
@@ -304,17 +288,17 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
/* need only pte 0 in hops 0 and 1 */
pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- hdev->asic_funcs->write_pte(hdev, get_hop0_addr(ctx), pte_val);
+ write_pte(ctx, hop0_addr, pte_val);
pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- hdev->asic_funcs->write_pte(hdev, hop1_addr, pte_val);
+ write_pte(ctx, hop1_addr, pte_val);
get_pte(ctx, hop1_addr);
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
- hdev->asic_funcs->write_pte(hdev, hop2_pte_addr, pte_val);
+ write_pte(ctx, hop2_pte_addr, pte_val);
get_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
@@ -325,33 +309,183 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
- hdev->asic_funcs->write_pte(hdev, hop3_pte_addr,
- pte_val);
+ write_final_pte(ctx, hop3_pte_addr, pte_val);
get_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
}
}
- /* flush all writes to reach PCI */
- mb();
- hdev->asic_funcs->read_pte(hdev, hop2_addr);
+ flush(ctx);
return 0;
hop3_err:
for (i = 0 ; i < hop3_allocated ; i++)
free_hop(ctx, ctx->dram_default_hops[i]);
+
free_hop(ctx, hop2_addr);
hop2_err:
free_hop(ctx, hop1_addr);
hop1_err:
kfree(ctx->dram_default_hops);
-alloc_err:
- mutex_destroy(&ctx->mmu_lock);
return rc;
}
+static void dram_default_mapping_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
+ hop2_pte_addr, hop3_pte_addr;
+ int i, j;
+
+ if (!hdev->dram_supports_virtual_memory ||
+ !hdev->dram_default_page_mapping)
+ return;
+
+ num_of_hop3 = prop->dram_size_for_default_page_mapping;
+ do_div(num_of_hop3, prop->dram_page_size);
+ do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+
+ hop0_addr = get_hop0_addr(ctx);
+ /* add hop1 and hop2 */
+ total_hops = num_of_hop3 + 2;
+ hop1_addr = ctx->dram_default_hops[total_hops - 1];
+ hop2_addr = ctx->dram_default_hops[total_hops - 2];
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ hop3_pte_addr = ctx->dram_default_hops[i];
+ for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ clear_pte(ctx, hop3_pte_addr);
+ put_pte(ctx, ctx->dram_default_hops[i]);
+ hop3_pte_addr += HL_PTE_SIZE;
+ }
+ }
+
+ hop2_pte_addr = hop2_addr;
+ hop2_pte_addr = hop2_addr;
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ clear_pte(ctx, hop2_pte_addr);
+ put_pte(ctx, hop2_addr);
+ hop2_pte_addr += HL_PTE_SIZE;
+ }
+
+ clear_pte(ctx, hop1_addr);
+ put_pte(ctx, hop1_addr);
+ clear_pte(ctx, hop0_addr);
+
+ kfree(ctx->dram_default_hops);
+
+ flush(ctx);
+}
+
+/**
+ * hl_mmu_init() - initialize the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Allocate max_asid zeroed hop0 pgts so no mapping is available.
+ * - Enable MMU in H/W.
+ * - Invalidate the MMU cache.
+ * - Create a pool of pages for pgt_infos.
+ *
+ * This function depends on DMA QMAN to be working!
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int hl_mmu_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ /* MMU H/W init was already done in device hw_init() */
+
+ mutex_init(&hdev->mmu_cache_lock);
+
+ hdev->mmu_pgt_pool =
+ gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
+
+ if (!hdev->mmu_pgt_pool) {
+ dev_err(hdev->dev, "Failed to create page gen pool\n");
+ rc = -ENOMEM;
+ goto err_pool_create;
+ }
+
+ rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
+ prop->mmu_hop0_tables_total_size,
+ prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
+ -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
+ goto err_pool_add;
+ }
+
+ hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
+ prop->mmu_hop_table_size,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!hdev->mmu_shadow_hop0) {
+ rc = -ENOMEM;
+ goto err_pool_add;
+ }
+
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(hdev->mmu_pgt_pool);
+err_pool_create:
+ mutex_destroy(&hdev->mmu_cache_lock);
+
+ return rc;
+}
+
+/**
+ * hl_mmu_fini() - release the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Disable MMU in H/W.
+ * - Free the pgt_infos pool.
+ *
+ * All contexts should be freed before calling this function.
+ */
+void hl_mmu_fini(struct hl_device *hdev)
+{
+ if (!hdev->mmu_enable)
+ return;
+
+ kvfree(hdev->mmu_shadow_hop0);
+ gen_pool_destroy(hdev->mmu_pgt_pool);
+ mutex_destroy(&hdev->mmu_cache_lock);
+
+ /* MMU H/W fini will be done in device hw_fini() */
+}
+
+/**
+ * hl_mmu_ctx_init() - initialize a context for using the MMU module.
+ * @ctx: pointer to the context structure to initialize.
+ *
+ * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
+ * page tables hops related to this context.
+ * Return: 0 on success, non-zero otherwise.
+ */
+int hl_mmu_ctx_init(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ mutex_init(&ctx->mmu_lock);
+ hash_init(ctx->mmu_phys_hash);
+ hash_init(ctx->mmu_shadow_hash);
+
+ return dram_default_mapping_init(ctx);
+}
+
/*
* hl_mmu_ctx_fini - disable a ctx from using the mmu module
*
@@ -365,63 +499,23 @@ alloc_err:
void hl_mmu_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pgt_info *pgt_info;
struct hlist_node *tmp;
- u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
- hop3_pte_addr;
- int i, j;
+ int i;
- if (!ctx->hdev->mmu_enable)
+ if (!hdev->mmu_enable)
return;
- if (hdev->dram_supports_virtual_memory &&
- hdev->dram_default_page_mapping) {
-
- num_of_hop3 = prop->dram_size_for_default_page_mapping;
- do_div(num_of_hop3, prop->dram_page_size);
- do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
-
- /* add hop1 and hop2 */
- total_hops = num_of_hop3 + 2;
- hop1_addr = ctx->dram_default_hops[total_hops - 1];
- hop2_addr = ctx->dram_default_hops[total_hops - 2];
-
- for (i = 0 ; i < num_of_hop3 ; i++) {
- hop3_pte_addr = ctx->dram_default_hops[i];
- for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
- clear_pte(hdev, hop3_pte_addr);
- put_pte(ctx, ctx->dram_default_hops[i]);
- hop3_pte_addr += HL_PTE_SIZE;
- }
- }
+ dram_default_mapping_fini(ctx);
- hop2_pte_addr = hop2_addr;
- for (i = 0 ; i < num_of_hop3 ; i++) {
- clear_pte(hdev, hop2_pte_addr);
- put_pte(ctx, hop2_addr);
- hop2_pte_addr += HL_PTE_SIZE;
- }
-
- clear_pte(hdev, hop1_addr);
- put_pte(ctx, hop1_addr);
- clear_pte(hdev, get_hop0_addr(ctx));
-
- kfree(ctx->dram_default_hops);
-
- /* flush all writes to reach PCI */
- mb();
- hdev->asic_funcs->read_pte(hdev, hop2_addr);
- }
-
- if (!hash_empty(ctx->mmu_hash))
+ if (!hash_empty(ctx->mmu_shadow_hash))
dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
- hash_for_each_safe(ctx->mmu_hash, i, tmp, pgt_info, node) {
+ hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
dev_err(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
- pgt_info->addr, ctx->asid, pgt_info->num_of_ptes);
- free_hop(ctx, pgt_info->addr);
+ pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
+ free_hop(ctx, pgt_info->shadow_addr);
}
mutex_destroy(&ctx->mmu_lock);
@@ -437,45 +531,43 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
hop3_addr = 0, hop3_pte_addr = 0,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte;
- int clear_hop3 = 1;
- bool is_dram_addr, is_huge, is_dram_default_page_mapping;
+ bool is_dram_addr, is_huge, clear_hop3 = true;
is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
prop->va_space_dram_start_address,
prop->va_space_dram_end_address);
hop0_addr = get_hop0_addr(ctx);
-
hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
- hop1_addr = get_next_hop_addr(curr_pte);
+ hop1_addr = get_next_hop_addr(ctx, curr_pte);
if (hop1_addr == ULLONG_MAX)
goto not_mapped;
hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
- hop2_addr = get_next_hop_addr(curr_pte);
+ hop2_addr = get_next_hop_addr(ctx, curr_pte);
if (hop2_addr == ULLONG_MAX)
goto not_mapped;
hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
- hop3_addr = get_next_hop_addr(curr_pte);
+ hop3_addr = get_next_hop_addr(ctx, curr_pte);
if (hop3_addr == ULLONG_MAX)
goto not_mapped;
hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
is_huge = curr_pte & LAST_MASK;
@@ -485,27 +577,24 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
return -EFAULT;
}
- is_dram_default_page_mapping =
- hdev->dram_default_page_mapping && is_dram_addr;
-
if (!is_huge) {
- hop4_addr = get_next_hop_addr(curr_pte);
+ hop4_addr = get_next_hop_addr(ctx, curr_pte);
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
- clear_hop3 = 0;
+ clear_hop3 = false;
}
- if (is_dram_default_page_mapping) {
- u64 zero_pte = (prop->mmu_dram_default_page_addr &
+ if (hdev->dram_default_page_mapping && is_dram_addr) {
+ u64 default_pte = (prop->mmu_dram_default_page_addr &
PTE_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
- if (curr_pte == zero_pte) {
+ if (curr_pte == default_pte) {
dev_err(hdev->dev,
"DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
virt_addr);
@@ -519,40 +608,43 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
goto not_mapped;
}
- hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, zero_pte);
+ write_final_pte(ctx, hop3_pte_addr, default_pte);
put_pte(ctx, hop3_addr);
} else {
if (!(curr_pte & PAGE_PRESENT_MASK))
goto not_mapped;
- clear_pte(hdev, hop4_addr ? hop4_pte_addr : hop3_pte_addr);
+ if (hop4_addr)
+ clear_pte(ctx, hop4_pte_addr);
+ else
+ clear_pte(ctx, hop3_pte_addr);
if (hop4_addr && !put_pte(ctx, hop4_addr))
- clear_hop3 = 1;
+ clear_hop3 = true;
if (!clear_hop3)
goto flush;
- clear_pte(hdev, hop3_pte_addr);
+
+ clear_pte(ctx, hop3_pte_addr);
if (put_pte(ctx, hop3_addr))
goto flush;
- clear_pte(hdev, hop2_pte_addr);
+
+ clear_pte(ctx, hop2_pte_addr);
if (put_pte(ctx, hop2_addr))
goto flush;
- clear_pte(hdev, hop1_pte_addr);
+
+ clear_pte(ctx, hop1_pte_addr);
if (put_pte(ctx, hop1_addr))
goto flush;
- clear_pte(hdev, hop0_pte_addr);
+
+ clear_pte(ctx, hop0_pte_addr);
}
flush:
- /* flush all writes from all cores to reach PCI */
- mb();
-
- hdev->asic_funcs->read_pte(hdev,
- hop4_addr ? hop4_pte_addr : hop3_pte_addr);
+ flush(ctx);
return 0;
@@ -632,8 +724,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte = 0;
bool hop1_new = false, hop2_new = false, hop3_new = false,
- hop4_new = false, is_huge, is_dram_addr,
- is_dram_default_page_mapping;
+ hop4_new = false, is_huge, is_dram_addr;
int rc = -ENOMEM;
/*
@@ -654,59 +745,46 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
return -EFAULT;
}
- is_dram_default_page_mapping =
- hdev->dram_default_page_mapping && is_dram_addr;
-
hop0_addr = get_hop0_addr(ctx);
-
hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
-
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
-
if (hop1_addr == ULLONG_MAX)
goto err;
hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
-
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
-
if (hop2_addr == ULLONG_MAX)
goto err;
hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
-
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
-
if (hop3_addr == ULLONG_MAX)
goto err;
hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
-
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
if (!is_huge) {
hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
-
if (hop4_addr == ULLONG_MAX)
goto err;
hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
-
- curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
}
- if (is_dram_default_page_mapping) {
- u64 zero_pte = (prop->mmu_dram_default_page_addr &
+ if (hdev->dram_default_page_mapping && is_dram_addr) {
+ u64 default_pte = (prop->mmu_dram_default_page_addr &
PTE_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
- if (curr_pte != zero_pte) {
+ if (curr_pte != default_pte) {
dev_err(hdev->dev,
"DRAM: mapping already exists for virt_addr 0x%llx\n",
virt_addr);
@@ -722,27 +800,22 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
}
} else if (curr_pte & PAGE_PRESENT_MASK) {
dev_err(hdev->dev,
- "mapping already exists for virt_addr 0x%llx\n",
- virt_addr);
+ "mapping already exists for virt_addr 0x%llx\n",
+ virt_addr);
dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
- hdev->asic_funcs->read_pte(hdev, hop0_pte_addr),
- hop0_pte_addr);
+ *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
- hdev->asic_funcs->read_pte(hdev, hop1_pte_addr),
- hop1_pte_addr);
+ *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
- hdev->asic_funcs->read_pte(hdev, hop2_pte_addr),
- hop2_pte_addr);
+ *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
- hdev->asic_funcs->read_pte(hdev, hop3_pte_addr),
- hop3_pte_addr);
+ *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
if (!is_huge)
dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
- hdev->asic_funcs->read_pte(hdev,
- hop4_pte_addr),
- hop4_pte_addr);
+ *(u64 *) (uintptr_t) hop4_pte_addr,
+ hop4_pte_addr);
rc = -EINVAL;
goto err;
@@ -751,28 +824,26 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
| PAGE_PRESENT_MASK;
- hdev->asic_funcs->write_pte(hdev,
- is_huge ? hop3_pte_addr : hop4_pte_addr,
- curr_pte);
+ if (is_huge)
+ write_final_pte(ctx, hop3_pte_addr, curr_pte);
+ else
+ write_final_pte(ctx, hop4_pte_addr, curr_pte);
if (hop1_new) {
- curr_pte = (hop1_addr & PTE_PHYS_ADDR_MASK) |
- PAGE_PRESENT_MASK;
- ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop0_pte_addr,
- curr_pte);
+ curr_pte =
+ (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop0_pte_addr, curr_pte);
}
if (hop2_new) {
- curr_pte = (hop2_addr & PTE_PHYS_ADDR_MASK) |
- PAGE_PRESENT_MASK;
- ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop1_pte_addr,
- curr_pte);
+ curr_pte =
+ (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop1_pte_addr, curr_pte);
get_pte(ctx, hop1_addr);
}
if (hop3_new) {
- curr_pte = (hop3_addr & PTE_PHYS_ADDR_MASK) |
- PAGE_PRESENT_MASK;
- ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop2_pte_addr,
- curr_pte);
+ curr_pte =
+ (hop3_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop2_pte_addr, curr_pte);
get_pte(ctx, hop2_addr);
}
@@ -780,8 +851,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hop4_new) {
curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
- ctx->hdev->asic_funcs->write_pte(ctx->hdev,
- hop3_pte_addr, curr_pte);
+ write_pte(ctx, hop3_pte_addr, curr_pte);
get_pte(ctx, hop3_addr);
}
@@ -790,11 +860,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
get_pte(ctx, hop3_addr);
}
- /* flush all writes from all cores to reach PCI */
- mb();
-
- hdev->asic_funcs->read_pte(hdev,
- is_huge ? hop3_pte_addr : hop4_pte_addr);
+ flush(ctx);
return 0;
diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/pci.c
new file mode 100644
index 000000000000..0e78a04d63f4
--- /dev/null
+++ b/drivers/misc/habanalabs/pci.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "include/hw_ip/pci/pci_general.h"
+
+#include <linux/pci.h>
+
+/**
+ * hl_pci_bars_map() - Map PCI BARs.
+ * @hdev: Pointer to hl_device structure.
+ * @bar_name: Array of BAR names.
+ * @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
+ *
+ * Request PCI regions and map them to kernel virtual addresses.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
+ bool is_wc[3])
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int rc, i, bar;
+
+ rc = pci_request_regions(pdev, HL_NAME);
+ if (rc) {
+ dev_err(hdev->dev, "Cannot obtain PCI resources\n");
+ return rc;
+ }
+
+ for (i = 0 ; i < 3 ; i++) {
+ bar = i * 2; /* 64-bit BARs */
+ hdev->pcie_bar[bar] = is_wc[i] ?
+ pci_ioremap_wc_bar(pdev, bar) :
+ pci_ioremap_bar(pdev, bar);
+ if (!hdev->pcie_bar[bar]) {
+ dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n",
+ is_wc[i] ? "_wc" : "", name[i]);
+ rc = -ENODEV;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ for (i = 2 ; i >= 0 ; i--) {
+ bar = i * 2; /* 64-bit BARs */
+ if (hdev->pcie_bar[bar])
+ iounmap(hdev->pcie_bar[bar]);
+ }
+
+ pci_release_regions(pdev);
+
+ return rc;
+}
+
+/*
+ * hl_pci_bars_unmap() - Unmap PCI BARS.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Release all PCI BARs and unmap their virtual addresses.
+ */
+static void hl_pci_bars_unmap(struct hl_device *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int i, bar;
+
+ for (i = 2 ; i >= 0 ; i--) {
+ bar = i * 2; /* 64-bit BARs */
+ iounmap(hdev->pcie_bar[bar]);
+ }
+
+ pci_release_regions(pdev);
+}
+
+/*
+ * hl_pci_elbi_write() - Write through the ELBI interface.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Return: 0 on success, negative value for failure.
+ */
+static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ ktime_t timeout;
+ u32 val;
+
+ /* Clear previous status */
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
+
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
+ PCI_CONFIG_ELBI_CTRL_WRITE);
+
+ timeout = ktime_add_ms(ktime_get(), 10);
+ for (;;) {
+ pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
+ if (val & PCI_CONFIG_ELBI_STS_MASK)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
+ &val);
+ break;
+ }
+
+ usleep_range(300, 500);
+ }
+
+ if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
+ return 0;
+
+ if (val & PCI_CONFIG_ELBI_STS_ERR) {
+ dev_err(hdev->dev, "Error writing to ELBI\n");
+ return -EIO;
+ }
+
+ if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
+ dev_err(hdev->dev, "ELBI write didn't finish in time\n");
+ return -EIO;
+ }
+
+ dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
+ return -EIO;
+}
+
+/**
+ * hl_pci_iatu_write() - iatu write routine.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Return: 0 on success, negative value for failure.
+ */
+int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 dbi_offset;
+ int rc;
+
+ dbi_offset = addr & 0xFFF;
+
+ rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+ rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+ data);
+
+ if (rc)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * hl_pci_reset_link_through_bridge() - Reset PCI link.
+ * @hdev: Pointer to hl_device structure.
+ */
+static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct pci_dev *parent_port;
+ u16 val;
+
+ parent_port = pdev->bus->self;
+ pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
+ val |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
+ ssleep(1);
+
+ val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
+ pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
+ ssleep(3);
+}
+
+/**
+ * hl_pci_set_dram_bar_base() - Set DDR BAR to map specific device address.
+ * @hdev: Pointer to hl_device structure.
+ * @inbound_region: Inbound region number.
+ * @bar: PCI BAR number.
+ * @addr: Address in DRAM. Must be aligned to DRAM bar size.
+ *
+ * Configure the iATU so that the DRAM bar will start at the specified address.
+ *
+ * Return: 0 on success, negative value for failure.
+ */
+int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
+ u64 addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 offset;
+ int rc;
+
+ switch (inbound_region) {
+ case 0:
+ offset = 0x100;
+ break;
+ case 1:
+ offset = 0x300;
+ break;
+ case 2:
+ offset = 0x500;
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid inbound region %d\n",
+ inbound_region);
+ return -EINVAL;
+ }
+
+ if (bar != 0 && bar != 2 && bar != 4) {
+ dev_err(hdev->dev, "Invalid PCI BAR %d\n", bar);
+ return -EINVAL;
+ }
+
+ /* Point to the specified address */
+ rc = hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(addr));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(addr));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
+ /* Enable + BAR match + match enable + BAR number */
+ rc |= hl_pci_iatu_write(hdev, offset + 0x4, 0xC0080000 | (bar << 8));
+
+ /* Return the DBI window to the default location */
+ rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
+ rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to map DRAM bar to 0x%08llx\n",
+ addr);
+
+ return rc;
+}
+
+/**
+ * hl_pci_init_iatu() - Initialize the iATU unit inside the PCI controller.
+ * @hdev: Pointer to hl_device structure.
+ * @sram_base_address: SRAM base address.
+ * @dram_base_address: DRAM base address.
+ * @host_phys_base_address: Base physical address of host memory for device
+ * transactions.
+ * @host_phys_size: Size of host memory for device transactions.
+ *
+ * This is needed in case the firmware doesn't initialize the iATU.
+ *
+ * Return: 0 on success, negative value for failure.
+ */
+int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
+ u64 dram_base_address, u64 host_phys_base_address,
+ u64 host_phys_size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 host_phys_end_addr;
+ int rc = 0;
+
+ /* Inbound Region 0 - Bar 0 - Point to SRAM base address */
+ rc = hl_pci_iatu_write(hdev, 0x114, lower_32_bits(sram_base_address));
+ rc |= hl_pci_iatu_write(hdev, 0x118, upper_32_bits(sram_base_address));
+ rc |= hl_pci_iatu_write(hdev, 0x100, 0);
+ /* Enable + Bar match + match enable */
+ rc |= hl_pci_iatu_write(hdev, 0x104, 0xC0080000);
+
+ /* Point to DRAM */
+ if (!hdev->asic_funcs->set_dram_bar_base)
+ return -EINVAL;
+ if (hdev->asic_funcs->set_dram_bar_base(hdev, dram_base_address) ==
+ U64_MAX)
+ return -EIO;
+
+
+ /* Outbound Region 0 - Point to Host */
+ host_phys_end_addr = host_phys_base_address + host_phys_size - 1;
+ rc |= hl_pci_iatu_write(hdev, 0x008,
+ lower_32_bits(host_phys_base_address));
+ rc |= hl_pci_iatu_write(hdev, 0x00C,
+ upper_32_bits(host_phys_base_address));
+ rc |= hl_pci_iatu_write(hdev, 0x010, lower_32_bits(host_phys_end_addr));
+ rc |= hl_pci_iatu_write(hdev, 0x014, 0);
+ rc |= hl_pci_iatu_write(hdev, 0x018, 0);
+ rc |= hl_pci_iatu_write(hdev, 0x020, upper_32_bits(host_phys_end_addr));
+ /* Increase region size */
+ rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
+ /* Enable */
+ rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
+
+ /* Return the DBI window to the default location */
+ rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
+ rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+
+ if (rc)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * hl_pci_set_dma_mask() - Set DMA masks for the device.
+ * @hdev: Pointer to hl_device structure.
+ * @dma_mask: number of bits for the requested dma mask.
+ *
+ * This function sets the DMA masks (regular and consistent) for a specified
+ * value. If it doesn't succeed, it tries to set it to a fall-back value
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int rc;
+
+ /* set DMA mask */
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+ if (rc) {
+ dev_warn(hdev->dev,
+ "Failed to set pci dma mask to %d bits, error %d\n",
+ dma_mask, rc);
+
+ dma_mask = hdev->dma_mask;
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to set pci dma mask to %d bits, error %d\n",
+ dma_mask, rc);
+ return rc;
+ }
+ }
+
+ /*
+ * We managed to set the dma mask, so update the dma mask field. If
+ * the set to the coherent mask will fail with that mask, we will
+ * fail the entire function
+ */
+ hdev->dma_mask = dma_mask;
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to set pci consistent dma mask to %d bits, error %d\n",
+ dma_mask, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * hl_pci_init() - PCI initialization code.
+ * @hdev: Pointer to hl_device structure.
+ * @dma_mask: number of bits for the requested dma mask.
+ *
+ * Set DMA masks, initialize the PCI controller and map the PCI BARs.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int hl_pci_init(struct hl_device *hdev, u8 dma_mask)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int rc;
+
+ rc = hl_pci_set_dma_mask(hdev, dma_mask);
+ if (rc)
+ return rc;
+
+ if (hdev->reset_pcilink)
+ hl_pci_reset_link_through_bridge(hdev);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ dev_err(hdev->dev, "can't enable PCI device\n");
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ rc = hdev->asic_funcs->init_iatu(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize iATU\n");
+ goto disable_device;
+ }
+
+ rc = hdev->asic_funcs->pci_bars_map(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize PCI BARs\n");
+ goto disable_device;
+ }
+
+ return 0;
+
+disable_device:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+/**
+ * hl_fw_fini() - PCI finalization code.
+ * @hdev: Pointer to hl_device structure
+ *
+ * Unmap PCI bars and disable PCI device.
+ */
+void hl_pci_fini(struct hl_device *hdev)
+{
+ hl_pci_bars_unmap(hdev);
+
+ pci_clear_master(hdev->pdev);
+ pci_disable_device(hdev->pdev);
+}
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index ec0832278170..9d0445a567db 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -156,7 +156,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
/* Reset to power-on state */
writel(0, &idd->idd_misc_regs->int_out.raw);
- mmiowb();
/* Set up square wave */
int_out.raw = 0;
@@ -164,7 +163,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
int_out.fields.diag = 0;
writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
- mmiowb();
/* Check square wave period averaged over some number of cycles */
start = ktime_get_ns();
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index de20bdaa148d..8b01257783dd 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -1135,7 +1135,7 @@ static void kgdbts_put_char(u8 chr)
static int param_set_kgdbts_var(const char *kmessage,
const struct kernel_param *kp)
{
- int len = strlen(kmessage);
+ size_t len = strlen(kmessage);
if (len >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdbts: config string too long\n");
@@ -1155,7 +1155,7 @@ static int param_set_kgdbts_var(const char *kmessage,
strcpy(config, kmessage);
/* Chop out \n char as a result of echo */
- if (config[len - 1] == '\n')
+ if (len && config[len - 1] == '\n')
config[len - 1] = '\0';
/* Go and configure with the new params. */
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 74e2c667dce0..9d7b3719bfa0 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
config INTEL_MEI
tristate "Intel Management Engine Interface"
depends on X86 && PCI
@@ -44,12 +46,4 @@ config INTEL_MEI_TXE
Supported SoCs:
Intel Bay Trail
-config INTEL_MEI_HDCP
- tristate "Intel HDCP2.2 services of ME Interface"
- select INTEL_MEI_ME
- depends on DRM_I915
- help
- MEI Support for HDCP2.2 Services on Intel platforms.
-
- Enables the ME FW services required for HDCP2.2 support through
- I915 display driver of Intel.
+source "drivers/misc/mei/hdcp/Kconfig"
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 8c2d9565a4cb..f1c76f7ee804 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
#
+# Copyright (c) 2010-2019, Intel Corporation. All rights reserved.
# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver
-# Copyright (c) 2010-2014, Intel Corporation.
#
obj-$(CONFIG_INTEL_MEI) += mei.o
mei-objs := init.o
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 5fcac02233af..32e9b1aed2ca 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2018, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 65bec998eb6e..985bd4fd3328 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2012-2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index ca4c9cc218a2..1e3edbbacb1e 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/sched/signal.h>
@@ -679,7 +669,7 @@ int mei_cl_unlink(struct mei_cl *cl)
void mei_host_client_init(struct mei_device *dev)
{
- dev->dev_state = MEI_DEV_ENABLED;
+ mei_set_devstate(dev, MEI_DEV_ENABLED);
dev->reset_count = 0;
schedule_work(&dev->bus_rescan_work);
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 64e318f589b4..c1f9e810cf81 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef _MEI_CLIENT_H_
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 7b5df8fd6c5a..0970142bcace 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2012-2016, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2012-2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
+
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c
index 795641b82181..ef56f849b251 100644
--- a/drivers/misc/mei/dma-ring.c
+++ b/drivers/misc/mei/dma-ring.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include <linux/mei.h>
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index e6207f614816..a44094cdbc36 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
-
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 0171a7e79bab..5aa58cffdd2e 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef _MEI_HBM_H_
diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig
new file mode 100644
index 000000000000..95b2d6d37f10
--- /dev/null
+++ b/drivers/misc/mei/hdcp/Kconfig
@@ -0,0 +1,13 @@
+
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2019, Intel Corporation. All rights reserved.
+#
+config INTEL_MEI_HDCP
+ tristate "Intel HDCP2.2 services of ME Interface"
+ select INTEL_MEI_ME
+ depends on DRM_I915
+ help
+ MEI Support for HDCP2.2 Services on Intel platforms.
+
+ Enables the ME FW services required for HDCP2.2 support through
+ I915 display driver of Intel.
diff --git a/drivers/misc/mei/hdcp/Makefile b/drivers/misc/mei/hdcp/Makefile
index adbe7506282d..3fbb56485ce8 100644
--- a/drivers/misc/mei/hdcp/Makefile
+++ b/drivers/misc/mei/hdcp/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Intel Corporation. All rights reserved.
#
# Makefile - HDCP client driver for Intel MEI Bus Driver.
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 90b6ae8e9dae..b07000202d4a 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0)
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2019 Intel Corporation
*
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
index 5f74b908e486..e4b1cd54c853 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.h
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+) */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright © 2019 Intel Corporation
*
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index bb1ee9834a02..d74b182e19f3 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -1,68 +1,8 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Intel MEI Interface Header
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation.
- * linux-mei@linux.intel.com
- * http://www.intel.com
- *
- * BSD LICENSE
- *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
+ */
#ifndef _MEI_HW_MEI_REGS_H_
#define _MEI_HW_MEI_REGS_H_
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 3fbbadfa2ae1..abe1b1f4362f 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/pci.h>
@@ -350,9 +340,6 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
hcsr |= H_IG;
hcsr &= ~H_RST;
mei_hcsr_set(dev, hcsr);
-
- /* complete this write before we set host ready on another CPU */
- mmiowb();
}
/**
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index bbcc5fc106cd..08c84a0de4a8 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- *
+ * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
-
-
#ifndef _MEI_INTERFACE_H_
#define _MEI_INTERFACE_H_
diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h
index f19229c4e655..a92b306dac8b 100644
--- a/drivers/misc/mei/hw-txe-regs.h
+++ b/drivers/misc/mei/hw-txe-regs.h
@@ -1,63 +1,8 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Intel MEI Interface Header
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING
- *
- * Contact Information:
- * Intel Corporation.
- * linux-mei@linux.intel.com
- * http://www.intel.com
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
+ */
#ifndef _MEI_HW_TXE_REGS_H_
#define _MEI_HW_TXE_REGS_H_
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 8449fe0367ff..5e58656b8e19 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/pci.h>
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
index e1e8b66d7648..96511b04bf88 100644
--- a/drivers/misc/mei/hw-txe.h
+++ b/drivers/misc/mei/hw-txe.h
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- *
+ * Copyright (c) 2013-2016, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef _MEI_HW_TXE_H_
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index b7d2487b8409..d025a5f8317e 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef _MEI_HW_TYPES_H_
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index eb026e2a0537..b9fef773e71b 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/export.h>
@@ -133,12 +123,12 @@ int mei_reset(struct mei_device *dev)
/* enter reset flow */
interrupts_enabled = state != MEI_DEV_POWER_DOWN;
- dev->dev_state = MEI_DEV_RESETTING;
+ mei_set_devstate(dev, MEI_DEV_RESETTING);
dev->reset_count++;
if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
- dev->dev_state = MEI_DEV_DISABLED;
+ mei_set_devstate(dev, MEI_DEV_DISABLED);
return -ENODEV;
}
@@ -160,7 +150,7 @@ int mei_reset(struct mei_device *dev)
if (state == MEI_DEV_POWER_DOWN) {
dev_dbg(dev->dev, "powering down: end of reset\n");
- dev->dev_state = MEI_DEV_DISABLED;
+ mei_set_devstate(dev, MEI_DEV_DISABLED);
return 0;
}
@@ -172,11 +162,11 @@ int mei_reset(struct mei_device *dev)
dev_dbg(dev->dev, "link is established start sending messages.\n");
- dev->dev_state = MEI_DEV_INIT_CLIENTS;
+ mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS);
ret = mei_hbm_start_req(dev);
if (ret) {
dev_err(dev->dev, "hbm_start failed ret = %d\n", ret);
- dev->dev_state = MEI_DEV_RESETTING;
+ mei_set_devstate(dev, MEI_DEV_RESETTING);
return ret;
}
@@ -206,7 +196,7 @@ int mei_start(struct mei_device *dev)
dev->reset_count = 0;
do {
- dev->dev_state = MEI_DEV_INITIALIZING;
+ mei_set_devstate(dev, MEI_DEV_INITIALIZING);
ret = mei_reset(dev);
if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
@@ -241,7 +231,7 @@ int mei_start(struct mei_device *dev)
return 0;
err:
dev_err(dev->dev, "link layer initialization failed.\n");
- dev->dev_state = MEI_DEV_DISABLED;
+ mei_set_devstate(dev, MEI_DEV_DISABLED);
mutex_unlock(&dev->device_lock);
return -ENODEV;
}
@@ -260,7 +250,7 @@ int mei_restart(struct mei_device *dev)
mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_POWER_UP;
+ mei_set_devstate(dev, MEI_DEV_POWER_UP);
dev->reset_count = 0;
err = mei_reset(dev);
@@ -311,7 +301,7 @@ void mei_stop(struct mei_device *dev)
dev_dbg(dev->dev, "stopping the device.\n");
mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_POWER_DOWN;
+ mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
mutex_unlock(&dev->device_lock);
mei_cl_bus_remove_devices(dev);
@@ -324,7 +314,7 @@ void mei_stop(struct mei_device *dev)
mei_reset(dev);
/* move device to disabled state unconditionally */
- dev->dev_state = MEI_DEV_DISABLED;
+ mei_set_devstate(dev, MEI_DEV_DISABLED);
mutex_unlock(&dev->device_lock);
}
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 055c2d89b310..c70a8c74cc57 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
-
#include <linux/export.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 87281b3695e6..ad02097d7fee 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2018, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -37,6 +28,12 @@
#include "mei_dev.h"
#include "client.h"
+static struct class *mei_class;
+static dev_t mei_devt;
+#define MEI_MAX_DEVS MINORMASK
+static DEFINE_MUTEX(mei_minor_lock);
+static DEFINE_IDR(mei_idr);
+
/**
* mei_open - the open function
*
@@ -838,12 +835,65 @@ static ssize_t fw_ver_show(struct device *device,
}
static DEVICE_ATTR_RO(fw_ver);
+/**
+ * dev_state_show - display device state
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t dev_state_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ enum mei_dev_state dev_state;
+
+ mutex_lock(&dev->device_lock);
+ dev_state = dev->dev_state;
+ mutex_unlock(&dev->device_lock);
+
+ return sprintf(buf, "%s", mei_dev_state_str(dev_state));
+}
+static DEVICE_ATTR_RO(dev_state);
+
+static int match_devt(struct device *dev, const void *data)
+{
+ const dev_t *devt = data;
+
+ return dev->devt == *devt;
+}
+
+/**
+ * dev_set_devstate: set to new device state and notify sysfs file.
+ *
+ * @dev: mei_device
+ * @state: new device state
+ */
+void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
+{
+ struct device *clsdev;
+
+ if (dev->dev_state == state)
+ return;
+
+ dev->dev_state = state;
+
+ clsdev = class_find_device(mei_class, NULL, &dev->cdev.dev, match_devt);
+ if (clsdev) {
+ sysfs_notify(&clsdev->kobj, NULL, "dev_state");
+ put_device(clsdev);
+ }
+}
+
static struct attribute *mei_attrs[] = {
&dev_attr_fw_status.attr,
&dev_attr_hbm_ver.attr,
&dev_attr_hbm_ver_drv.attr,
&dev_attr_tx_queue_limit.attr,
&dev_attr_fw_ver.attr,
+ &dev_attr_dev_state.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
@@ -867,12 +917,6 @@ static const struct file_operations mei_fops = {
.llseek = no_llseek
};
-static struct class *mei_class;
-static dev_t mei_devt;
-#define MEI_MAX_DEVS MINORMASK
-static DEFINE_MUTEX(mei_minor_lock);
-static DEFINE_IDR(mei_idr);
-
/**
* mei_minor_get - obtain next free device minor number
*
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
index 374edde72a14..48d4c4fcefd2 100644
--- a/drivers/misc/mei/mei-trace.c
+++ b/drivers/misc/mei/mei-trace.c
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2015-2016, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
index b52e9b97a7c0..df758033dc93 100644
--- a/drivers/misc/mei/mei-trace.h
+++ b/drivers/misc/mei/mei-trace.h
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- *
+ * Copyright (c) 2015-2016, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#if !defined(_MEI_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 685b78ce30a5..fca832fcac57 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- *
+ * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2018, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef _MEI_DEV_H_
@@ -535,7 +525,6 @@ struct mei_device {
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
-
const struct mei_hw_ops *ops;
char hw[0] __aligned(sizeof(void *));
};
@@ -594,6 +583,8 @@ int mei_restart(struct mei_device *dev);
void mei_stop(struct mei_device *dev);
void mei_cancel_work(struct mei_device *dev);
+void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state);
+
int mei_dmam_ring_alloc(struct mei_device *dev);
void mei_dmam_ring_free(struct mei_device *dev);
bool mei_dma_ring_is_allocated(struct mei_device *dev);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3ab946ad3257..7a2b3545a7f9 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index e1b909123fb0..2e37fc2e0fa8 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
+ * Copyright (c) 2013-2017, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 242dcee14689..6736f72cc14a 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -4,7 +4,7 @@ comment "Intel MIC Bus Driver"
config INTEL_MIC_BUS
tristate "Intel MIC Bus Driver"
- depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+ depends on 64BIT && PCI && X86
help
This option is selected by any driver which registers a
device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST,
@@ -21,7 +21,7 @@ comment "SCIF Bus Driver"
config SCIF_BUS
tristate "SCIF Bus Driver"
- depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+ depends on 64BIT && PCI && X86
help
This option is selected by any driver which registers a
device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
diff --git a/drivers/misc/ocxl/Makefile b/drivers/misc/ocxl/Makefile
index 5229dcda8297..d07d1bb8e8d4 100644
--- a/drivers/misc/ocxl/Makefile
+++ b/drivers/misc/ocxl/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0+
ccflags-$(CONFIG_PPC_WERROR) += -Werror
-ocxl-y += main.o pci.o config.o file.o pasid.o
+ocxl-y += main.o pci.o config.o file.o pasid.o mmio.o
ocxl-y += link.o context.o afu_irq.o sysfs.o trace.o
+ocxl-y += core.o
obj-$(CONFIG_OCXL) += ocxl.o
# For tracepoints to include our trace.h from tracepoint infrastructure:
diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c
index 11ab996657a2..70f8f1c3929d 100644
--- a/drivers/misc/ocxl/afu_irq.c
+++ b/drivers/misc/ocxl/afu_irq.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/interrupt.h>
-#include <linux/eventfd.h>
+#include <asm/pnv-ocxl.h>
#include "ocxl_internal.h"
#include "trace.h"
@@ -11,27 +11,59 @@ struct afu_irq {
unsigned int virq;
char *name;
u64 trigger_page;
- struct eventfd_ctx *ev_ctx;
+ irqreturn_t (*handler)(void *private);
+ void (*free_private)(void *private);
+ void *private;
};
-static int irq_offset_to_id(struct ocxl_context *ctx, u64 offset)
+int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset)
{
return (offset - ctx->afu->irq_base_offset) >> PAGE_SHIFT;
}
-static u64 irq_id_to_offset(struct ocxl_context *ctx, int id)
+u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id)
{
- return ctx->afu->irq_base_offset + (id << PAGE_SHIFT);
+ return ctx->afu->irq_base_offset + (irq_id << PAGE_SHIFT);
}
+int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id,
+ irqreturn_t (*handler)(void *private),
+ void (*free_private)(void *private),
+ void *private)
+{
+ struct afu_irq *irq;
+ int rc;
+
+ mutex_lock(&ctx->irq_lock);
+ irq = idr_find(&ctx->irq_idr, irq_id);
+ if (!irq) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+
+ irq->handler = handler;
+ irq->private = private;
+ irq->free_private = free_private;
+
+ rc = 0;
+ // Fall through to unlock
+
+unlock:
+ mutex_unlock(&ctx->irq_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ocxl_irq_set_handler);
+
static irqreturn_t afu_irq_handler(int virq, void *data)
{
struct afu_irq *irq = (struct afu_irq *) data;
trace_ocxl_afu_irq_receive(virq);
- if (irq->ev_ctx)
- eventfd_signal(irq->ev_ctx, 1);
- return IRQ_HANDLED;
+
+ if (irq->handler)
+ return irq->handler(irq->private);
+
+ return IRQ_HANDLED; // Just drop it on the ground
}
static int setup_afu_irq(struct ocxl_context *ctx, struct afu_irq *irq)
@@ -69,7 +101,7 @@ static void release_afu_irq(struct afu_irq *irq)
kfree(irq->name);
}
-int ocxl_afu_irq_alloc(struct ocxl_context *ctx, u64 *irq_offset)
+int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id)
{
struct afu_irq *irq;
int rc;
@@ -101,11 +133,11 @@ int ocxl_afu_irq_alloc(struct ocxl_context *ctx, u64 *irq_offset)
if (rc)
goto err_alloc;
- *irq_offset = irq_id_to_offset(ctx, irq->id);
-
- trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq,
- *irq_offset);
+ trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq);
mutex_unlock(&ctx->irq_lock);
+
+ *irq_id = irq->id;
+
return 0;
err_alloc:
@@ -117,29 +149,29 @@ err_unlock:
kfree(irq);
return rc;
}
+EXPORT_SYMBOL_GPL(ocxl_afu_irq_alloc);
static void afu_irq_free(struct afu_irq *irq, struct ocxl_context *ctx)
{
trace_ocxl_afu_irq_free(ctx->pasid, irq->id);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping,
- irq_id_to_offset(ctx, irq->id),
+ ocxl_irq_id_to_offset(ctx, irq->id),
1 << PAGE_SHIFT, 1);
release_afu_irq(irq);
- if (irq->ev_ctx)
- eventfd_ctx_put(irq->ev_ctx);
+ if (irq->free_private)
+ irq->free_private(irq->private);
ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq);
kfree(irq);
}
-int ocxl_afu_irq_free(struct ocxl_context *ctx, u64 irq_offset)
+int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id)
{
struct afu_irq *irq;
- int id = irq_offset_to_id(ctx, irq_offset);
mutex_lock(&ctx->irq_lock);
- irq = idr_find(&ctx->irq_idr, id);
+ irq = idr_find(&ctx->irq_idr, irq_id);
if (!irq) {
mutex_unlock(&ctx->irq_lock);
return -EINVAL;
@@ -149,6 +181,7 @@ int ocxl_afu_irq_free(struct ocxl_context *ctx, u64 irq_offset)
mutex_unlock(&ctx->irq_lock);
return 0;
}
+EXPORT_SYMBOL_GPL(ocxl_afu_irq_free);
void ocxl_afu_irq_free_all(struct ocxl_context *ctx)
{
@@ -161,41 +194,16 @@ void ocxl_afu_irq_free_all(struct ocxl_context *ctx)
mutex_unlock(&ctx->irq_lock);
}
-int ocxl_afu_irq_set_fd(struct ocxl_context *ctx, u64 irq_offset, int eventfd)
-{
- struct afu_irq *irq;
- struct eventfd_ctx *ev_ctx;
- int rc = 0, id = irq_offset_to_id(ctx, irq_offset);
-
- mutex_lock(&ctx->irq_lock);
- irq = idr_find(&ctx->irq_idr, id);
- if (!irq) {
- rc = -EINVAL;
- goto unlock;
- }
-
- ev_ctx = eventfd_ctx_fdget(eventfd);
- if (IS_ERR(ev_ctx)) {
- rc = -EINVAL;
- goto unlock;
- }
-
- irq->ev_ctx = ev_ctx;
-unlock:
- mutex_unlock(&ctx->irq_lock);
- return rc;
-}
-
-u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, u64 irq_offset)
+u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id)
{
struct afu_irq *irq;
- int id = irq_offset_to_id(ctx, irq_offset);
u64 addr = 0;
mutex_lock(&ctx->irq_lock);
- irq = idr_find(&ctx->irq_idr, id);
+ irq = idr_find(&ctx->irq_idr, irq_id);
if (irq)
addr = irq->trigger_page;
mutex_unlock(&ctx->irq_lock);
return addr;
}
+EXPORT_SYMBOL_GPL(ocxl_afu_irq_get_addr);
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 8f2c5d8bd2ee..5e65acb8e134 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -2,8 +2,8 @@
// Copyright 2017 IBM Corp.
#include <linux/pci.h>
#include <asm/pnv-ocxl.h>
-#include <misc/ocxl.h>
#include <misc/ocxl-config.h>
+#include "ocxl_internal.h"
#define EXTRACT_BIT(val, bit) (!!(val & BIT(bit)))
#define EXTRACT_BITS(val, s, e) ((val & GENMASK(e, s)) >> s)
@@ -68,7 +68,7 @@ static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx)
return 0;
}
-static int read_pasid(struct pci_dev *dev, struct ocxl_fn_config *fn)
+static void read_pasid(struct pci_dev *dev, struct ocxl_fn_config *fn)
{
u16 val;
int pos;
@@ -89,7 +89,6 @@ static int read_pasid(struct pci_dev *dev, struct ocxl_fn_config *fn)
out:
dev_dbg(&dev->dev, "PASID capability:\n");
dev_dbg(&dev->dev, " Max PASID log = %d\n", fn->max_pasid_log);
- return 0;
}
static int read_dvsec_tl(struct pci_dev *dev, struct ocxl_fn_config *fn)
@@ -205,11 +204,7 @@ int ocxl_config_read_function(struct pci_dev *dev, struct ocxl_fn_config *fn)
{
int rc;
- rc = read_pasid(dev, fn);
- if (rc) {
- dev_err(&dev->dev, "Invalid PASID configuration: %d\n", rc);
- return -ENODEV;
- }
+ read_pasid(dev, fn);
rc = read_dvsec_tl(dev, fn);
if (rc) {
@@ -304,7 +299,6 @@ int ocxl_config_check_afu_index(struct pci_dev *dev,
}
return 1;
}
-EXPORT_SYMBOL_GPL(ocxl_config_check_afu_index);
static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn,
struct ocxl_afu_config *afu)
@@ -540,7 +534,6 @@ int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count)
{
return pnv_ocxl_get_pasid_count(dev, count);
}
-EXPORT_SYMBOL_GPL(ocxl_config_get_pasid_info);
void ocxl_config_set_afu_pasid(struct pci_dev *dev, int pos, int pasid_base,
u32 pasid_count_log)
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index c10a940e3b38..bab9c9364184 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -4,15 +4,17 @@
#include "trace.h"
#include "ocxl_internal.h"
-struct ocxl_context *ocxl_context_alloc(void)
-{
- return kzalloc(sizeof(struct ocxl_context), GFP_KERNEL);
-}
-
-int ocxl_context_init(struct ocxl_context *ctx, struct ocxl_afu *afu,
+int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
struct address_space *mapping)
{
int pasid;
+ struct ocxl_context *ctx;
+
+ *context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL);
+ if (!*context)
+ return -ENOMEM;
+
+ ctx = *context;
ctx->afu = afu;
mutex_lock(&afu->contexts_lock);
@@ -43,6 +45,7 @@ int ocxl_context_init(struct ocxl_context *ctx, struct ocxl_afu *afu,
ocxl_afu_get(afu);
return 0;
}
+EXPORT_SYMBOL_GPL(ocxl_context_alloc);
/*
* Callback for when a translation fault triggers an error
@@ -63,7 +66,7 @@ static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
wake_up_all(&ctx->events_wq);
}
-int ocxl_context_attach(struct ocxl_context *ctx, u64 amr)
+int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
{
int rc;
@@ -75,7 +78,7 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr)
}
rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid,
- current->mm->context.id, ctx->tidr, amr, current->mm,
+ mm->context.id, ctx->tidr, amr, mm,
xsl_fault_error, ctx);
if (rc)
goto out;
@@ -85,13 +88,15 @@ out:
mutex_unlock(&ctx->status_mutex);
return rc;
}
+EXPORT_SYMBOL_GPL(ocxl_context_attach);
static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address,
u64 offset, struct ocxl_context *ctx)
{
u64 trigger_addr;
+ int irq_id = ocxl_irq_offset_to_id(ctx, offset);
- trigger_addr = ocxl_afu_irq_get_addr(ctx, offset);
+ trigger_addr = ocxl_afu_irq_get_addr(ctx, irq_id);
if (!trigger_addr)
return VM_FAULT_SIGBUS;
@@ -151,12 +156,14 @@ static const struct vm_operations_struct ocxl_vmops = {
static int check_mmap_afu_irq(struct ocxl_context *ctx,
struct vm_area_struct *vma)
{
+ int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT);
+
/* only one page */
if (vma_pages(vma) != 1)
return -EINVAL;
/* check offset validty */
- if (!ocxl_afu_irq_get_addr(ctx, vma->vm_pgoff << PAGE_SHIFT))
+ if (!ocxl_afu_irq_get_addr(ctx, irq_id))
return -EINVAL;
/*
@@ -238,11 +245,12 @@ int ocxl_context_detach(struct ocxl_context *ctx)
}
rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid);
if (rc) {
- dev_warn(&ctx->afu->dev,
+ dev_warn(&dev->dev,
"Couldn't remove PE entry cleanly: %d\n", rc);
}
return 0;
}
+EXPORT_SYMBOL_GPL(ocxl_context_detach);
void ocxl_context_detach_all(struct ocxl_afu *afu)
{
@@ -280,3 +288,4 @@ void ocxl_context_free(struct ocxl_context *ctx)
ocxl_afu_put(ctx->afu);
kfree(ctx);
}
+EXPORT_SYMBOL_GPL(ocxl_context_free);
diff --git a/drivers/misc/ocxl/core.c b/drivers/misc/ocxl/core.c
new file mode 100644
index 000000000000..b7a09b21ab36
--- /dev/null
+++ b/drivers/misc/ocxl/core.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2019 IBM Corp.
+#include <linux/idr.h>
+#include "ocxl_internal.h"
+
+static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn)
+{
+ return (get_device(&fn->dev) == NULL) ? NULL : fn;
+}
+
+static void ocxl_fn_put(struct ocxl_fn *fn)
+{
+ put_device(&fn->dev);
+}
+
+static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn)
+{
+ struct ocxl_afu *afu;
+
+ afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL);
+ if (!afu)
+ return NULL;
+
+ kref_init(&afu->kref);
+ mutex_init(&afu->contexts_lock);
+ mutex_init(&afu->afu_control_lock);
+ idr_init(&afu->contexts_idr);
+ afu->fn = fn;
+ ocxl_fn_get(fn);
+ return afu;
+}
+
+static void free_afu(struct kref *kref)
+{
+ struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref);
+
+ idr_destroy(&afu->contexts_idr);
+ ocxl_fn_put(afu->fn);
+ kfree(afu);
+}
+
+void ocxl_afu_get(struct ocxl_afu *afu)
+{
+ kref_get(&afu->kref);
+}
+EXPORT_SYMBOL_GPL(ocxl_afu_get);
+
+void ocxl_afu_put(struct ocxl_afu *afu)
+{
+ kref_put(&afu->kref, free_afu);
+}
+EXPORT_SYMBOL_GPL(ocxl_afu_put);
+
+static int assign_afu_actag(struct ocxl_afu *afu)
+{
+ struct ocxl_fn *fn = afu->fn;
+ int actag_count, actag_offset;
+ struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
+
+ /*
+ * if there were not enough actags for the function, each afu
+ * reduces its count as well
+ */
+ actag_count = afu->config.actag_supported *
+ fn->actag_enabled / fn->actag_supported;
+ actag_offset = ocxl_actag_afu_alloc(fn, actag_count);
+ if (actag_offset < 0) {
+ dev_err(&pci_dev->dev, "Can't allocate %d actags for AFU: %d\n",
+ actag_count, actag_offset);
+ return actag_offset;
+ }
+ afu->actag_base = fn->actag_base + actag_offset;
+ afu->actag_enabled = actag_count;
+
+ ocxl_config_set_afu_actag(pci_dev, afu->config.dvsec_afu_control_pos,
+ afu->actag_base, afu->actag_enabled);
+ dev_dbg(&pci_dev->dev, "actag base=%d enabled=%d\n",
+ afu->actag_base, afu->actag_enabled);
+ return 0;
+}
+
+static void reclaim_afu_actag(struct ocxl_afu *afu)
+{
+ struct ocxl_fn *fn = afu->fn;
+ int start_offset, size;
+
+ start_offset = afu->actag_base - fn->actag_base;
+ size = afu->actag_enabled;
+ ocxl_actag_afu_free(afu->fn, start_offset, size);
+}
+
+static int assign_afu_pasid(struct ocxl_afu *afu)
+{
+ struct ocxl_fn *fn = afu->fn;
+ int pasid_count, pasid_offset;
+ struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
+
+ /*
+ * We only support the case where the function configuration
+ * requested enough PASIDs to cover all AFUs.
+ */
+ pasid_count = 1 << afu->config.pasid_supported_log;
+ pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count);
+ if (pasid_offset < 0) {
+ dev_err(&pci_dev->dev, "Can't allocate %d PASIDs for AFU: %d\n",
+ pasid_count, pasid_offset);
+ return pasid_offset;
+ }
+ afu->pasid_base = fn->pasid_base + pasid_offset;
+ afu->pasid_count = 0;
+ afu->pasid_max = pasid_count;
+
+ ocxl_config_set_afu_pasid(pci_dev, afu->config.dvsec_afu_control_pos,
+ afu->pasid_base,
+ afu->config.pasid_supported_log);
+ dev_dbg(&pci_dev->dev, "PASID base=%d, enabled=%d\n",
+ afu->pasid_base, pasid_count);
+ return 0;
+}
+
+static void reclaim_afu_pasid(struct ocxl_afu *afu)
+{
+ struct ocxl_fn *fn = afu->fn;
+ int start_offset, size;
+
+ start_offset = afu->pasid_base - fn->pasid_base;
+ size = 1 << afu->config.pasid_supported_log;
+ ocxl_pasid_afu_free(afu->fn, start_offset, size);
+}
+
+static int reserve_fn_bar(struct ocxl_fn *fn, int bar)
+{
+ struct pci_dev *dev = to_pci_dev(fn->dev.parent);
+ int rc, idx;
+
+ if (bar != 0 && bar != 2 && bar != 4)
+ return -EINVAL;
+
+ idx = bar >> 1;
+ if (fn->bar_used[idx]++ == 0) {
+ rc = pci_request_region(dev, bar, "ocxl");
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void release_fn_bar(struct ocxl_fn *fn, int bar)
+{
+ struct pci_dev *dev = to_pci_dev(fn->dev.parent);
+ int idx;
+
+ if (bar != 0 && bar != 2 && bar != 4)
+ return;
+
+ idx = bar >> 1;
+ if (--fn->bar_used[idx] == 0)
+ pci_release_region(dev, bar);
+ WARN_ON(fn->bar_used[idx] < 0);
+}
+
+static int map_mmio_areas(struct ocxl_afu *afu)
+{
+ int rc;
+ struct pci_dev *pci_dev = to_pci_dev(afu->fn->dev.parent);
+
+ rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar);
+ if (rc)
+ return rc;
+
+ rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar);
+ if (rc) {
+ release_fn_bar(afu->fn, afu->config.global_mmio_bar);
+ return rc;
+ }
+
+ afu->global_mmio_start =
+ pci_resource_start(pci_dev, afu->config.global_mmio_bar) +
+ afu->config.global_mmio_offset;
+ afu->pp_mmio_start =
+ pci_resource_start(pci_dev, afu->config.pp_mmio_bar) +
+ afu->config.pp_mmio_offset;
+
+ afu->global_mmio_ptr = ioremap(afu->global_mmio_start,
+ afu->config.global_mmio_size);
+ if (!afu->global_mmio_ptr) {
+ release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
+ release_fn_bar(afu->fn, afu->config.global_mmio_bar);
+ dev_err(&pci_dev->dev, "Error mapping global mmio area\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Leave an empty page between the per-process mmio area and
+ * the AFU interrupt mappings
+ */
+ afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE;
+ return 0;
+}
+
+static void unmap_mmio_areas(struct ocxl_afu *afu)
+{
+ if (afu->global_mmio_ptr) {
+ iounmap(afu->global_mmio_ptr);
+ afu->global_mmio_ptr = NULL;
+ }
+ afu->global_mmio_start = 0;
+ afu->pp_mmio_start = 0;
+ release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
+ release_fn_bar(afu->fn, afu->config.global_mmio_bar);
+}
+
+static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev)
+{
+ int rc;
+
+ rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx);
+ if (rc)
+ return rc;
+
+ rc = assign_afu_actag(afu);
+ if (rc)
+ return rc;
+
+ rc = assign_afu_pasid(afu);
+ if (rc)
+ goto err_free_actag;
+
+ rc = map_mmio_areas(afu);
+ if (rc)
+ goto err_free_pasid;
+
+ return 0;
+
+err_free_pasid:
+ reclaim_afu_pasid(afu);
+err_free_actag:
+ reclaim_afu_actag(afu);
+ return rc;
+}
+
+static void deconfigure_afu(struct ocxl_afu *afu)
+{
+ unmap_mmio_areas(afu);
+ reclaim_afu_pasid(afu);
+ reclaim_afu_actag(afu);
+}
+
+static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu)
+{
+ ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1);
+
+ return 0;
+}
+
+static void deactivate_afu(struct ocxl_afu *afu)
+{
+ struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent);
+
+ ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0);
+}
+
+static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx)
+{
+ int rc;
+ struct ocxl_afu *afu;
+
+ afu = alloc_afu(fn);
+ if (!afu)
+ return -ENOMEM;
+
+ rc = configure_afu(afu, afu_idx, dev);
+ if (rc) {
+ ocxl_afu_put(afu);
+ return rc;
+ }
+
+ rc = activate_afu(dev, afu);
+ if (rc) {
+ deconfigure_afu(afu);
+ ocxl_afu_put(afu);
+ return rc;
+ }
+
+ list_add_tail(&afu->list, &fn->afu_list);
+
+ return 0;
+}
+
+static void remove_afu(struct ocxl_afu *afu)
+{
+ list_del(&afu->list);
+ ocxl_context_detach_all(afu);
+ deactivate_afu(afu);
+ deconfigure_afu(afu);
+ ocxl_afu_put(afu); // matches the implicit get in alloc_afu
+}
+
+static struct ocxl_fn *alloc_function(void)
+{
+ struct ocxl_fn *fn;
+
+ fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL);
+ if (!fn)
+ return NULL;
+
+ INIT_LIST_HEAD(&fn->afu_list);
+ INIT_LIST_HEAD(&fn->pasid_list);
+ INIT_LIST_HEAD(&fn->actag_list);
+
+ return fn;
+}
+
+static void free_function(struct ocxl_fn *fn)
+{
+ WARN_ON(!list_empty(&fn->afu_list));
+ WARN_ON(!list_empty(&fn->pasid_list));
+ kfree(fn);
+}
+
+static void free_function_dev(struct device *dev)
+{
+ struct ocxl_fn *fn = container_of(dev, struct ocxl_fn, dev);
+
+ free_function(fn);
+}
+
+static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev)
+{
+ int rc;
+
+ fn->dev.parent = &dev->dev;
+ fn->dev.release = free_function_dev;
+ rc = dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
+ if (rc)
+ return rc;
+ return 0;
+}
+
+static int assign_function_actag(struct ocxl_fn *fn)
+{
+ struct pci_dev *dev = to_pci_dev(fn->dev.parent);
+ u16 base, enabled, supported;
+ int rc;
+
+ rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported);
+ if (rc)
+ return rc;
+
+ fn->actag_base = base;
+ fn->actag_enabled = enabled;
+ fn->actag_supported = supported;
+
+ ocxl_config_set_actag(dev, fn->config.dvsec_function_pos,
+ fn->actag_base, fn->actag_enabled);
+ dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n",
+ fn->actag_base, fn->actag_enabled);
+ return 0;
+}
+
+static int set_function_pasid(struct ocxl_fn *fn)
+{
+ struct pci_dev *dev = to_pci_dev(fn->dev.parent);
+ int rc, desired_count, max_count;
+
+ /* A function may not require any PASID */
+ if (fn->config.max_pasid_log < 0)
+ return 0;
+
+ rc = ocxl_config_get_pasid_info(dev, &max_count);
+ if (rc)
+ return rc;
+
+ desired_count = 1 << fn->config.max_pasid_log;
+
+ if (desired_count > max_count) {
+ dev_err(&fn->dev,
+ "Function requires more PASIDs than is available (%d vs. %d)\n",
+ desired_count, max_count);
+ return -ENOSPC;
+ }
+
+ fn->pasid_base = 0;
+ return 0;
+}
+
+static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev)
+{
+ int rc;
+
+ rc = pci_enable_device(dev);
+ if (rc) {
+ dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Once it has been confirmed to work on our hardware, we
+ * should reset the function, to force the adapter to restart
+ * from scratch.
+ * A function reset would also reset all its AFUs.
+ *
+ * Some hints for implementation:
+ *
+ * - there's not status bit to know when the reset is done. We
+ * should try reading the config space to know when it's
+ * done.
+ * - probably something like:
+ * Reset
+ * wait 100ms
+ * issue config read
+ * allow device up to 1 sec to return success on config
+ * read before declaring it broken
+ *
+ * Some shared logic on the card (CFG, TLX) won't be reset, so
+ * there's no guarantee that it will be enough.
+ */
+ rc = ocxl_config_read_function(dev, &fn->config);
+ if (rc)
+ return rc;
+
+ rc = set_function_device(fn, dev);
+ if (rc)
+ return rc;
+
+ rc = assign_function_actag(fn);
+ if (rc)
+ return rc;
+
+ rc = set_function_pasid(fn);
+ if (rc)
+ return rc;
+
+ rc = ocxl_link_setup(dev, 0, &fn->link);
+ if (rc)
+ return rc;
+
+ rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos);
+ if (rc) {
+ ocxl_link_release(dev, fn->link);
+ return rc;
+ }
+ return 0;
+}
+
+static void deconfigure_function(struct ocxl_fn *fn)
+{
+ struct pci_dev *dev = to_pci_dev(fn->dev.parent);
+
+ ocxl_link_release(dev, fn->link);
+ pci_disable_device(dev);
+}
+
+static struct ocxl_fn *init_function(struct pci_dev *dev)
+{
+ struct ocxl_fn *fn;
+ int rc;
+
+ fn = alloc_function();
+ if (!fn)
+ return ERR_PTR(-ENOMEM);
+
+ rc = configure_function(fn, dev);
+ if (rc) {
+ free_function(fn);
+ return ERR_PTR(rc);
+ }
+
+ rc = device_register(&fn->dev);
+ if (rc) {
+ deconfigure_function(fn);
+ put_device(&fn->dev);
+ return ERR_PTR(rc);
+ }
+ return fn;
+}
+
+// Device detection & initialisation
+
+struct ocxl_fn *ocxl_function_open(struct pci_dev *dev)
+{
+ int rc, afu_count = 0;
+ u8 afu;
+ struct ocxl_fn *fn;
+
+ if (!radix_enabled()) {
+ dev_err(&dev->dev, "Unsupported memory model (hash)\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ fn = init_function(dev);
+ if (IS_ERR(fn)) {
+ dev_err(&dev->dev, "function init failed: %li\n",
+ PTR_ERR(fn));
+ return fn;
+ }
+
+ for (afu = 0; afu <= fn->config.max_afu_index; afu++) {
+ rc = ocxl_config_check_afu_index(dev, &fn->config, afu);
+ if (rc > 0) {
+ rc = init_afu(dev, fn, afu);
+ if (rc) {
+ dev_err(&dev->dev,
+ "Can't initialize AFU index %d\n", afu);
+ continue;
+ }
+ afu_count++;
+ }
+ }
+ dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count);
+ return fn;
+}
+EXPORT_SYMBOL_GPL(ocxl_function_open);
+
+struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn)
+{
+ return &fn->afu_list;
+}
+EXPORT_SYMBOL_GPL(ocxl_function_afu_list);
+
+struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx)
+{
+ struct ocxl_afu *afu;
+
+ list_for_each_entry(afu, &fn->afu_list, list) {
+ if (afu->config.idx == afu_idx)
+ return afu;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ocxl_function_fetch_afu);
+
+const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn)
+{
+ return &fn->config;
+}
+EXPORT_SYMBOL_GPL(ocxl_function_config);
+
+void ocxl_function_close(struct ocxl_fn *fn)
+{
+ struct ocxl_afu *afu, *tmp;
+
+ list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) {
+ remove_afu(afu);
+ }
+
+ deconfigure_function(fn);
+ device_unregister(&fn->dev);
+}
+EXPORT_SYMBOL_GPL(ocxl_function_close);
+
+// AFU Metadata
+
+struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu)
+{
+ return &afu->config;
+}
+EXPORT_SYMBOL_GPL(ocxl_afu_config);
+
+void ocxl_afu_set_private(struct ocxl_afu *afu, void *private)
+{
+ afu->private = private;
+}
+EXPORT_SYMBOL_GPL(ocxl_afu_set_private);
+
+void *ocxl_afu_get_private(struct ocxl_afu *afu)
+{
+ if (afu)
+ return afu->private;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ocxl_afu_get_private);
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index e6a607488f8a..2870c25da166 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -3,6 +3,7 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
+#include <linux/eventfd.h>
#include <linux/uaccess.h>
#include <uapi/misc/ocxl.h>
#include <asm/reg.h>
@@ -17,70 +18,56 @@ static struct class *ocxl_class;
static struct mutex minors_idr_lock;
static struct idr minors_idr;
-static struct ocxl_afu *find_and_get_afu(dev_t devno)
+static struct ocxl_file_info *find_file_info(dev_t devno)
{
- struct ocxl_afu *afu;
- int afu_minor;
+ struct ocxl_file_info *info;
- afu_minor = MINOR(devno);
/*
* We don't declare an RCU critical section here, as our AFU
* is protected by a reference counter on the device. By the time the
- * minor number of a device is removed from the idr, the ref count of
+ * info reference is removed from the idr, the ref count of
* the device is already at 0, so no user API will access that AFU and
* this function can't return it.
*/
- afu = idr_find(&minors_idr, afu_minor);
- if (afu)
- ocxl_afu_get(afu);
- return afu;
+ info = idr_find(&minors_idr, MINOR(devno));
+ return info;
}
-static int allocate_afu_minor(struct ocxl_afu *afu)
+static int allocate_minor(struct ocxl_file_info *info)
{
int minor;
mutex_lock(&minors_idr_lock);
- minor = idr_alloc(&minors_idr, afu, 0, OCXL_NUM_MINORS, GFP_KERNEL);
+ minor = idr_alloc(&minors_idr, info, 0, OCXL_NUM_MINORS, GFP_KERNEL);
mutex_unlock(&minors_idr_lock);
return minor;
}
-static void free_afu_minor(struct ocxl_afu *afu)
+static void free_minor(struct ocxl_file_info *info)
{
mutex_lock(&minors_idr_lock);
- idr_remove(&minors_idr, MINOR(afu->dev.devt));
+ idr_remove(&minors_idr, MINOR(info->dev.devt));
mutex_unlock(&minors_idr_lock);
}
static int afu_open(struct inode *inode, struct file *file)
{
- struct ocxl_afu *afu;
+ struct ocxl_file_info *info;
struct ocxl_context *ctx;
int rc;
pr_debug("%s for device %x\n", __func__, inode->i_rdev);
- afu = find_and_get_afu(inode->i_rdev);
- if (!afu)
+ info = find_file_info(inode->i_rdev);
+ if (!info)
return -ENODEV;
- ctx = ocxl_context_alloc();
- if (!ctx) {
- rc = -ENOMEM;
- goto put_afu;
- }
-
- rc = ocxl_context_init(ctx, afu, inode->i_mapping);
+ rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping);
if (rc)
- goto put_afu;
+ return rc;
+
file->private_data = ctx;
- ocxl_afu_put(afu);
return 0;
-
-put_afu:
- ocxl_afu_put(afu);
- return rc;
}
static long afu_ioctl_attach(struct ocxl_context *ctx,
@@ -100,7 +87,7 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,
return -EINVAL;
amr = arg.amr & mfspr(SPRN_UAMOR);
- rc = ocxl_context_attach(ctx, amr);
+ rc = ocxl_context_attach(ctx, amr, current->mm);
return rc;
}
@@ -151,10 +138,9 @@ static long afu_ioctl_enable_p9_wait(struct ocxl_context *ctx,
mutex_unlock(&ctx->status_mutex);
if (status == ATTACHED) {
- int rc;
- struct link *link = ctx->afu->fn->link;
+ int rc = ocxl_link_update_pe(ctx->afu->fn->link,
+ ctx->pasid, ctx->tidr);
- rc = ocxl_link_update_pe(link, ctx->pasid, ctx->tidr);
if (rc)
return rc;
}
@@ -198,18 +184,40 @@ static long afu_ioctl_get_features(struct ocxl_context *ctx,
x == OCXL_IOCTL_GET_FEATURES ? "GET_FEATURES" : \
"UNKNOWN")
+static irqreturn_t irq_handler(void *private)
+{
+ struct eventfd_ctx *ev_ctx = private;
+
+ eventfd_signal(ev_ctx, 1);
+ return IRQ_HANDLED;
+}
+
+static void irq_free(void *private)
+{
+ struct eventfd_ctx *ev_ctx = private;
+
+ eventfd_ctx_put(ev_ctx);
+}
+
static long afu_ioctl(struct file *file, unsigned int cmd,
unsigned long args)
{
struct ocxl_context *ctx = file->private_data;
struct ocxl_ioctl_irq_fd irq_fd;
+ struct eventfd_ctx *ev_ctx;
+ int irq_id;
u64 irq_offset;
long rc;
+ bool closed;
pr_debug("%s for context %d, command %s\n", __func__, ctx->pasid,
CMD_STR(cmd));
- if (ctx->status == CLOSED)
+ mutex_lock(&ctx->status_mutex);
+ closed = (ctx->status == CLOSED);
+ mutex_unlock(&ctx->status_mutex);
+
+ if (closed)
return -EIO;
switch (cmd) {
@@ -219,12 +227,13 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
break;
case OCXL_IOCTL_IRQ_ALLOC:
- rc = ocxl_afu_irq_alloc(ctx, &irq_offset);
+ rc = ocxl_afu_irq_alloc(ctx, &irq_id);
if (!rc) {
+ irq_offset = ocxl_irq_id_to_offset(ctx, irq_id);
rc = copy_to_user((u64 __user *) args, &irq_offset,
sizeof(irq_offset));
if (rc) {
- ocxl_afu_irq_free(ctx, irq_offset);
+ ocxl_afu_irq_free(ctx, irq_id);
return -EFAULT;
}
}
@@ -235,7 +244,8 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
sizeof(irq_offset));
if (rc)
return -EFAULT;
- rc = ocxl_afu_irq_free(ctx, irq_offset);
+ irq_id = ocxl_irq_offset_to_id(ctx, irq_offset);
+ rc = ocxl_afu_irq_free(ctx, irq_id);
break;
case OCXL_IOCTL_IRQ_SET_FD:
@@ -245,8 +255,11 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (irq_fd.reserved)
return -EINVAL;
- rc = ocxl_afu_irq_set_fd(ctx, irq_fd.irq_offset,
- irq_fd.eventfd);
+ irq_id = ocxl_irq_offset_to_id(ctx, irq_fd.irq_offset);
+ ev_ctx = eventfd_ctx_fdget(irq_fd.eventfd);
+ if (IS_ERR(ev_ctx))
+ return PTR_ERR(ev_ctx);
+ rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx);
break;
case OCXL_IOCTL_GET_METADATA:
@@ -469,39 +482,102 @@ static const struct file_operations ocxl_afu_fops = {
.release = afu_release,
};
-int ocxl_create_cdev(struct ocxl_afu *afu)
+// Free the info struct
+static void info_release(struct device *dev)
+{
+ struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev);
+
+ free_minor(info);
+ ocxl_afu_put(info->afu);
+ kfree(info);
+}
+
+static int ocxl_file_make_visible(struct ocxl_file_info *info)
{
int rc;
- cdev_init(&afu->cdev, &ocxl_afu_fops);
- rc = cdev_add(&afu->cdev, afu->dev.devt, 1);
+ cdev_init(&info->cdev, &ocxl_afu_fops);
+ rc = cdev_add(&info->cdev, info->dev.devt, 1);
if (rc) {
- dev_err(&afu->dev, "Unable to add afu char device: %d\n", rc);
+ dev_err(&info->dev, "Unable to add afu char device: %d\n", rc);
return rc;
}
+
return 0;
}
-void ocxl_destroy_cdev(struct ocxl_afu *afu)
+static void ocxl_file_make_invisible(struct ocxl_file_info *info)
{
- cdev_del(&afu->cdev);
+ cdev_del(&info->cdev);
}
-int ocxl_register_afu(struct ocxl_afu *afu)
+int ocxl_file_register_afu(struct ocxl_afu *afu)
{
int minor;
+ int rc;
+ struct ocxl_file_info *info;
+ struct ocxl_fn *fn = afu->fn;
+ struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
- minor = allocate_afu_minor(afu);
- if (minor < 0)
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ return -ENOMEM;
+
+ minor = allocate_minor(info);
+ if (minor < 0) {
+ kfree(info);
return minor;
- afu->dev.devt = MKDEV(MAJOR(ocxl_dev), minor);
- afu->dev.class = ocxl_class;
- return device_register(&afu->dev);
+ }
+
+ info->dev.parent = &fn->dev;
+ info->dev.devt = MKDEV(MAJOR(ocxl_dev), minor);
+ info->dev.class = ocxl_class;
+ info->dev.release = info_release;
+
+ info->afu = afu;
+ ocxl_afu_get(afu);
+
+ rc = dev_set_name(&info->dev, "%s.%s.%hhu",
+ afu->config.name, dev_name(&pci_dev->dev), afu->config.idx);
+ if (rc)
+ goto err_put;
+
+ rc = device_register(&info->dev);
+ if (rc)
+ goto err_put;
+
+ rc = ocxl_sysfs_register_afu(info);
+ if (rc)
+ goto err_unregister;
+
+ rc = ocxl_file_make_visible(info);
+ if (rc)
+ goto err_unregister;
+
+ ocxl_afu_set_private(afu, info);
+
+ return 0;
+
+err_unregister:
+ ocxl_sysfs_unregister_afu(info); // safe to call even if register failed
+ device_unregister(&info->dev);
+err_put:
+ ocxl_afu_put(afu);
+ free_minor(info);
+ kfree(info);
+ return rc;
}
-void ocxl_unregister_afu(struct ocxl_afu *afu)
+void ocxl_file_unregister_afu(struct ocxl_afu *afu)
{
- free_afu_minor(afu);
+ struct ocxl_file_info *info = ocxl_afu_get_private(afu);
+
+ if (!info)
+ return;
+
+ ocxl_file_make_invisible(info);
+ ocxl_sysfs_unregister_afu(info);
+ device_unregister(&info->dev);
}
static char *ocxl_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index d50b861d7e57..cce5b0d64505 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -76,7 +76,7 @@ struct spa {
* limited number of opencapi slots on a system and lookup is only
* done when the device is probed
*/
-struct link {
+struct ocxl_link {
struct list_head list;
struct kref ref;
int domain;
@@ -163,7 +163,7 @@ static void xsl_fault_handler_bh(struct work_struct *fault_work)
if (fault->dsisr & SPA_XSL_S)
access |= _PAGE_WRITE;
- if (REGION_ID(fault->dar) != USER_REGION_ID)
+ if (get_region_id(fault->dar) != USER_REGION_ID)
access |= _PAGE_PRIVILEGED;
local_irq_save(flags);
@@ -179,12 +179,12 @@ ack:
static irqreturn_t xsl_fault_handler(int irq, void *data)
{
- struct link *link = (struct link *) data;
+ struct ocxl_link *link = (struct ocxl_link *) data;
struct spa *spa = link->spa;
u64 dsisr, dar, pe_handle;
struct pe_data *pe_data;
struct ocxl_process_element *pe;
- int lpid, pid, tid;
+ int pid;
bool schedule = false;
read_irq(spa, &dsisr, &dar, &pe_handle);
@@ -192,9 +192,7 @@ static irqreturn_t xsl_fault_handler(int irq, void *data)
WARN_ON(pe_handle > SPA_PE_MASK);
pe = spa->spa_mem + pe_handle;
- lpid = be32_to_cpu(pe->lpid);
pid = be32_to_cpu(pe->pid);
- tid = be32_to_cpu(pe->tid);
/* We could be reading all null values here if the PE is being
* removed while an interrupt kicks in. It's not supposed to
* happen if the driver notified the AFU to terminate the
@@ -256,7 +254,7 @@ static int map_irq_registers(struct pci_dev *dev, struct spa *spa)
&spa->reg_tfc, &spa->reg_pe_handle);
}
-static int setup_xsl_irq(struct pci_dev *dev, struct link *link)
+static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link)
{
struct spa *spa = link->spa;
int rc;
@@ -311,7 +309,7 @@ err_xsl:
return rc;
}
-static void release_xsl_irq(struct link *link)
+static void release_xsl_irq(struct ocxl_link *link)
{
struct spa *spa = link->spa;
@@ -323,7 +321,7 @@ static void release_xsl_irq(struct link *link)
unmap_irq_registers(spa);
}
-static int alloc_spa(struct pci_dev *dev, struct link *link)
+static int alloc_spa(struct pci_dev *dev, struct ocxl_link *link)
{
struct spa *spa;
@@ -350,7 +348,7 @@ static int alloc_spa(struct pci_dev *dev, struct link *link)
return 0;
}
-static void free_spa(struct link *link)
+static void free_spa(struct ocxl_link *link)
{
struct spa *spa = link->spa;
@@ -364,12 +362,12 @@ static void free_spa(struct link *link)
}
}
-static int alloc_link(struct pci_dev *dev, int PE_mask, struct link **out_link)
+static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_link)
{
- struct link *link;
+ struct ocxl_link *link;
int rc;
- link = kzalloc(sizeof(struct link), GFP_KERNEL);
+ link = kzalloc(sizeof(struct ocxl_link), GFP_KERNEL);
if (!link)
return -ENOMEM;
@@ -405,7 +403,7 @@ err_free:
return rc;
}
-static void free_link(struct link *link)
+static void free_link(struct ocxl_link *link)
{
release_xsl_irq(link);
free_spa(link);
@@ -415,7 +413,7 @@ static void free_link(struct link *link)
int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle)
{
int rc = 0;
- struct link *link;
+ struct ocxl_link *link;
mutex_lock(&links_list_lock);
list_for_each_entry(link, &links_list, list) {
@@ -442,7 +440,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_setup);
static void release_xsl(struct kref *ref)
{
- struct link *link = container_of(ref, struct link, ref);
+ struct ocxl_link *link = container_of(ref, struct ocxl_link, ref);
list_del(&link->list);
/* call platform code before releasing data */
@@ -452,7 +450,7 @@ static void release_xsl(struct kref *ref)
void ocxl_link_release(struct pci_dev *dev, void *link_handle)
{
- struct link *link = (struct link *) link_handle;
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
mutex_lock(&links_list_lock);
kref_put(&link->ref, release_xsl);
@@ -488,7 +486,7 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data)
{
- struct link *link = (struct link *) link_handle;
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc = 0;
@@ -558,7 +556,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_add_pe);
int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
{
- struct link *link = (struct link *) link_handle;
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc;
@@ -594,7 +592,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
int ocxl_link_remove_pe(void *link_handle, int pasid)
{
- struct link *link = (struct link *) link_handle;
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
struct pe_data *pe_data;
@@ -666,7 +664,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
int ocxl_link_irq_alloc(void *link_handle, int *hw_irq, u64 *trigger_addr)
{
- struct link *link = (struct link *) link_handle;
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
int rc, irq;
u64 addr;
@@ -687,7 +685,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
void ocxl_link_free_irq(void *link_handle, int hw_irq)
{
- struct link *link = (struct link *) link_handle;
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
pnv_ocxl_free_xive_irq(hw_irq);
atomic_inc(&link->irq_available);
diff --git a/drivers/misc/ocxl/mmio.c b/drivers/misc/ocxl/mmio.c
new file mode 100644
index 000000000000..aae713db4ebe
--- /dev/null
+++ b/drivers/misc/ocxl/mmio.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2019 IBM Corp.
+#include <linux/sched/mm.h>
+#include "trace.h"
+#include "ocxl_internal.h"
+
+int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u32 *val)
+{
+ if (offset > afu->config.global_mmio_size - 4)
+ return -EINVAL;
+
+#ifdef __BIG_ENDIAN__
+ if (endian == OCXL_HOST_ENDIAN)
+ endian = OCXL_BIG_ENDIAN;
+#endif
+
+ switch (endian) {
+ case OCXL_BIG_ENDIAN:
+ *val = readl_be((char *)afu->global_mmio_ptr + offset);
+ break;
+
+ default:
+ *val = readl((char *)afu->global_mmio_ptr + offset);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocxl_global_mmio_read32);
+
+int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u64 *val)
+{
+ if (offset > afu->config.global_mmio_size - 8)
+ return -EINVAL;
+
+#ifdef __BIG_ENDIAN__
+ if (endian == OCXL_HOST_ENDIAN)
+ endian = OCXL_BIG_ENDIAN;
+#endif
+
+ switch (endian) {
+ case OCXL_BIG_ENDIAN:
+ *val = readq_be((char *)afu->global_mmio_ptr + offset);
+ break;
+
+ default:
+ *val = readq((char *)afu->global_mmio_ptr + offset);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocxl_global_mmio_read64);
+
+int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u32 val)
+{
+ if (offset > afu->config.global_mmio_size - 4)
+ return -EINVAL;
+
+#ifdef __BIG_ENDIAN__
+ if (endian == OCXL_HOST_ENDIAN)
+ endian = OCXL_BIG_ENDIAN;
+#endif
+
+ switch (endian) {
+ case OCXL_BIG_ENDIAN:
+ writel_be(val, (char *)afu->global_mmio_ptr + offset);
+ break;
+
+ default:
+ writel(val, (char *)afu->global_mmio_ptr + offset);
+ break;
+ }
+
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocxl_global_mmio_write32);
+
+int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u64 val)
+{
+ if (offset > afu->config.global_mmio_size - 8)
+ return -EINVAL;
+
+#ifdef __BIG_ENDIAN__
+ if (endian == OCXL_HOST_ENDIAN)
+ endian = OCXL_BIG_ENDIAN;
+#endif
+
+ switch (endian) {
+ case OCXL_BIG_ENDIAN:
+ writeq_be(val, (char *)afu->global_mmio_ptr + offset);
+ break;
+
+ default:
+ writeq(val, (char *)afu->global_mmio_ptr + offset);
+ break;
+ }
+
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocxl_global_mmio_write64);
+
+int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u32 mask)
+{
+ u32 tmp;
+
+ if (offset > afu->config.global_mmio_size - 4)
+ return -EINVAL;
+
+#ifdef __BIG_ENDIAN__
+ if (endian == OCXL_HOST_ENDIAN)
+ endian = OCXL_BIG_ENDIAN;
+#endif
+
+ switch (endian) {
+ case OCXL_BIG_ENDIAN:
+ tmp = readl_be((char *)afu->global_mmio_ptr + offset);
+ tmp |= mask;
+ writel_be(tmp, (char *)afu->global_mmio_ptr + offset);
+ break;
+
+ default:
+ tmp = readl((char *)afu->global_mmio_ptr + offset);
+ tmp |= mask;
+ writel(tmp, (char *)afu->global_mmio_ptr + offset);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocxl_global_mmio_set32);
+
+int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u64 mask)
+{
+ u64 tmp;
+
+ if (offset > afu->config.global_mmio_size - 8)
+ return -EINVAL;
+
+#ifdef __BIG_ENDIAN__
+ if (endian == OCXL_HOST_ENDIAN)
+ endian = OCXL_BIG_ENDIAN;
+#endif
+
+ switch (endian) {
+ case OCXL_BIG_ENDIAN:
+ tmp = readq_be((char *)afu->global_mmio_ptr + offset);
+ tmp |= mask;
+ writeq_be(tmp, (char *)afu->global_mmio_ptr + offset);
+ break;
+
+ default:
+ tmp = readq((char *)afu->global_mmio_ptr + offset);
+ tmp |= mask;
+ writeq(tmp, (char *)afu->global_mmio_ptr + offset);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocxl_global_mmio_set64);
+
+int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u32 mask)
+{
+ u32 tmp;
+
+ if (offset > afu->config.global_mmio_size - 4)
+ return -EINVAL;
+
+#ifdef __BIG_ENDIAN__
+ if (endian == OCXL_HOST_ENDIAN)
+ endian = OCXL_BIG_ENDIAN;
+#endif
+
+ switch (endian) {
+ case OCXL_BIG_ENDIAN:
+ tmp = readl_be((char *)afu->global_mmio_ptr + offset);
+ tmp &= ~mask;
+ writel_be(tmp, (char *)afu->global_mmio_ptr + offset);
+ break;
+
+ default:
+ tmp = readl((char *)afu->global_mmio_ptr + offset);
+ tmp &= ~mask;
+ writel(tmp, (char *)afu->global_mmio_ptr + offset);
+ break;
+ }
+
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocxl_global_mmio_clear32);
+
+int ocxl_global_mmio_clear64(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u64 mask)
+{
+ u64 tmp;
+
+ if (offset > afu->config.global_mmio_size - 8)
+ return -EINVAL;
+
+#ifdef __BIG_ENDIAN__
+ if (endian == OCXL_HOST_ENDIAN)
+ endian = OCXL_BIG_ENDIAN;
+#endif
+
+ switch (endian) {
+ case OCXL_BIG_ENDIAN:
+ tmp = readq_be((char *)afu->global_mmio_ptr + offset);
+ tmp &= ~mask;
+ writeq_be(tmp, (char *)afu->global_mmio_ptr + offset);
+ break;
+
+ default:
+ tmp = readq((char *)afu->global_mmio_ptr + offset);
+ tmp &= ~mask;
+ writeq(tmp, (char *)afu->global_mmio_ptr + offset);
+ break;
+ }
+
+ writeq(tmp, (char *)afu->global_mmio_ptr + offset);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocxl_global_mmio_clear64);
diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
index a32f2151029f..97415afd79f3 100644
--- a/drivers/misc/ocxl/ocxl_internal.h
+++ b/drivers/misc/ocxl/ocxl_internal.h
@@ -11,12 +11,8 @@
#define MAX_IRQ_PER_LINK 2000
#define MAX_IRQ_PER_CONTEXT MAX_IRQ_PER_LINK
-#define to_ocxl_function(d) container_of(d, struct ocxl_fn, dev)
-#define to_ocxl_afu(d) container_of(d, struct ocxl_afu, dev)
-
extern struct pci_driver ocxl_pci_driver;
-
struct ocxl_fn {
struct device dev;
int bar_used[3];
@@ -31,11 +27,17 @@ struct ocxl_fn {
void *link;
};
+struct ocxl_file_info {
+ struct ocxl_afu *afu;
+ struct device dev;
+ struct cdev cdev;
+ struct bin_attribute attr_global_mmio;
+};
+
struct ocxl_afu {
+ struct kref kref;
struct ocxl_fn *fn;
struct list_head list;
- struct device dev;
- struct cdev cdev;
struct ocxl_afu_config config;
int pasid_base;
int pasid_count; /* opened contexts */
@@ -49,7 +51,7 @@ struct ocxl_afu {
u64 irq_base_offset;
void __iomem *global_mmio_ptr;
u64 pp_mmio_start;
- struct bin_attribute attr_global_mmio;
+ void *private;
};
enum ocxl_context_status {
@@ -92,41 +94,51 @@ struct ocxl_process_element {
__be32 software_state;
};
+int ocxl_create_cdev(struct ocxl_afu *afu);
+void ocxl_destroy_cdev(struct ocxl_afu *afu);
+int ocxl_file_register_afu(struct ocxl_afu *afu);
+void ocxl_file_unregister_afu(struct ocxl_afu *afu);
+
+int ocxl_file_init(void);
+void ocxl_file_exit(void);
+
+int ocxl_pasid_afu_alloc(struct ocxl_fn *fn, u32 size);
+void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size);
+int ocxl_actag_afu_alloc(struct ocxl_fn *fn, u32 size);
+void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size);
+
+/*
+ * Get the max PASID value that can be used by the function
+ */
+int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count);
+
+/*
+ * Check if an AFU index is valid for the given function.
+ *
+ * AFU indexes can be sparse, so a driver should check all indexes up
+ * to the maximum found in the function description
+ */
+int ocxl_config_check_afu_index(struct pci_dev *dev,
+ struct ocxl_fn_config *fn, int afu_idx);
+
+/**
+ * Update values within a Process Element
+ *
+ * link_handle: the link handle associated with the process element
+ * pasid: the PASID for the AFU context
+ * tid: the new thread id for the process element
+ */
+int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid);
+
+int ocxl_context_mmap(struct ocxl_context *ctx,
+ struct vm_area_struct *vma);
+void ocxl_context_detach_all(struct ocxl_afu *afu);
-extern struct ocxl_afu *ocxl_afu_get(struct ocxl_afu *afu);
-extern void ocxl_afu_put(struct ocxl_afu *afu);
-
-extern int ocxl_create_cdev(struct ocxl_afu *afu);
-extern void ocxl_destroy_cdev(struct ocxl_afu *afu);
-extern int ocxl_register_afu(struct ocxl_afu *afu);
-extern void ocxl_unregister_afu(struct ocxl_afu *afu);
-
-extern int ocxl_file_init(void);
-extern void ocxl_file_exit(void);
-
-extern int ocxl_pasid_afu_alloc(struct ocxl_fn *fn, u32 size);
-extern void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size);
-extern int ocxl_actag_afu_alloc(struct ocxl_fn *fn, u32 size);
-extern void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size);
+int ocxl_sysfs_register_afu(struct ocxl_file_info *info);
+void ocxl_sysfs_unregister_afu(struct ocxl_file_info *info);
-extern struct ocxl_context *ocxl_context_alloc(void);
-extern int ocxl_context_init(struct ocxl_context *ctx, struct ocxl_afu *afu,
- struct address_space *mapping);
-extern int ocxl_context_attach(struct ocxl_context *ctx, u64 amr);
-extern int ocxl_context_mmap(struct ocxl_context *ctx,
- struct vm_area_struct *vma);
-extern int ocxl_context_detach(struct ocxl_context *ctx);
-extern void ocxl_context_detach_all(struct ocxl_afu *afu);
-extern void ocxl_context_free(struct ocxl_context *ctx);
-
-extern int ocxl_sysfs_add_afu(struct ocxl_afu *afu);
-extern void ocxl_sysfs_remove_afu(struct ocxl_afu *afu);
-
-extern int ocxl_afu_irq_alloc(struct ocxl_context *ctx, u64 *irq_offset);
-extern int ocxl_afu_irq_free(struct ocxl_context *ctx, u64 irq_offset);
-extern void ocxl_afu_irq_free_all(struct ocxl_context *ctx);
-extern int ocxl_afu_irq_set_fd(struct ocxl_context *ctx, u64 irq_offset,
- int eventfd);
-extern u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, u64 irq_offset);
+int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset);
+u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id);
+void ocxl_afu_irq_free_all(struct ocxl_context *ctx);
#endif /* _OCXL_INTERNAL_H_ */
diff --git a/drivers/misc/ocxl/pci.c b/drivers/misc/ocxl/pci.c
index 21f425472a82..f2a3ef4b9bdd 100644
--- a/drivers/misc/ocxl/pci.c
+++ b/drivers/misc/ocxl/pci.c
@@ -1,9 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
-// Copyright 2017 IBM Corp.
+// Copyright 2019 IBM Corp.
#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/idr.h>
-#include <asm/pnv-ocxl.h>
#include "ocxl_internal.h"
/*
@@ -17,563 +14,47 @@ static const struct pci_device_id ocxl_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ocxl_pci_tbl);
-
-static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn)
-{
- return (get_device(&fn->dev) == NULL) ? NULL : fn;
-}
-
-static void ocxl_fn_put(struct ocxl_fn *fn)
-{
- put_device(&fn->dev);
-}
-
-struct ocxl_afu *ocxl_afu_get(struct ocxl_afu *afu)
-{
- return (get_device(&afu->dev) == NULL) ? NULL : afu;
-}
-
-void ocxl_afu_put(struct ocxl_afu *afu)
-{
- put_device(&afu->dev);
-}
-
-static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn)
-{
- struct ocxl_afu *afu;
-
- afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL);
- if (!afu)
- return NULL;
-
- mutex_init(&afu->contexts_lock);
- mutex_init(&afu->afu_control_lock);
- idr_init(&afu->contexts_idr);
- afu->fn = fn;
- ocxl_fn_get(fn);
- return afu;
-}
-
-static void free_afu(struct ocxl_afu *afu)
-{
- idr_destroy(&afu->contexts_idr);
- ocxl_fn_put(afu->fn);
- kfree(afu);
-}
-
-static void free_afu_dev(struct device *dev)
-{
- struct ocxl_afu *afu = to_ocxl_afu(dev);
-
- ocxl_unregister_afu(afu);
- free_afu(afu);
-}
-
-static int set_afu_device(struct ocxl_afu *afu, const char *location)
-{
- struct ocxl_fn *fn = afu->fn;
- int rc;
-
- afu->dev.parent = &fn->dev;
- afu->dev.release = free_afu_dev;
- rc = dev_set_name(&afu->dev, "%s.%s.%hhu", afu->config.name, location,
- afu->config.idx);
- return rc;
-}
-
-static int assign_afu_actag(struct ocxl_afu *afu, struct pci_dev *dev)
-{
- struct ocxl_fn *fn = afu->fn;
- int actag_count, actag_offset;
-
- /*
- * if there were not enough actags for the function, each afu
- * reduces its count as well
- */
- actag_count = afu->config.actag_supported *
- fn->actag_enabled / fn->actag_supported;
- actag_offset = ocxl_actag_afu_alloc(fn, actag_count);
- if (actag_offset < 0) {
- dev_err(&afu->dev, "Can't allocate %d actags for AFU: %d\n",
- actag_count, actag_offset);
- return actag_offset;
- }
- afu->actag_base = fn->actag_base + actag_offset;
- afu->actag_enabled = actag_count;
-
- ocxl_config_set_afu_actag(dev, afu->config.dvsec_afu_control_pos,
- afu->actag_base, afu->actag_enabled);
- dev_dbg(&afu->dev, "actag base=%d enabled=%d\n",
- afu->actag_base, afu->actag_enabled);
- return 0;
-}
-
-static void reclaim_afu_actag(struct ocxl_afu *afu)
-{
- struct ocxl_fn *fn = afu->fn;
- int start_offset, size;
-
- start_offset = afu->actag_base - fn->actag_base;
- size = afu->actag_enabled;
- ocxl_actag_afu_free(afu->fn, start_offset, size);
-}
-
-static int assign_afu_pasid(struct ocxl_afu *afu, struct pci_dev *dev)
-{
- struct ocxl_fn *fn = afu->fn;
- int pasid_count, pasid_offset;
-
- /*
- * We only support the case where the function configuration
- * requested enough PASIDs to cover all AFUs.
- */
- pasid_count = 1 << afu->config.pasid_supported_log;
- pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count);
- if (pasid_offset < 0) {
- dev_err(&afu->dev, "Can't allocate %d PASIDs for AFU: %d\n",
- pasid_count, pasid_offset);
- return pasid_offset;
- }
- afu->pasid_base = fn->pasid_base + pasid_offset;
- afu->pasid_count = 0;
- afu->pasid_max = pasid_count;
-
- ocxl_config_set_afu_pasid(dev, afu->config.dvsec_afu_control_pos,
- afu->pasid_base,
- afu->config.pasid_supported_log);
- dev_dbg(&afu->dev, "PASID base=%d, enabled=%d\n",
- afu->pasid_base, pasid_count);
- return 0;
-}
-
-static void reclaim_afu_pasid(struct ocxl_afu *afu)
-{
- struct ocxl_fn *fn = afu->fn;
- int start_offset, size;
-
- start_offset = afu->pasid_base - fn->pasid_base;
- size = 1 << afu->config.pasid_supported_log;
- ocxl_pasid_afu_free(afu->fn, start_offset, size);
-}
-
-static int reserve_fn_bar(struct ocxl_fn *fn, int bar)
-{
- struct pci_dev *dev = to_pci_dev(fn->dev.parent);
- int rc, idx;
-
- if (bar != 0 && bar != 2 && bar != 4)
- return -EINVAL;
-
- idx = bar >> 1;
- if (fn->bar_used[idx]++ == 0) {
- rc = pci_request_region(dev, bar, "ocxl");
- if (rc)
- return rc;
- }
- return 0;
-}
-
-static void release_fn_bar(struct ocxl_fn *fn, int bar)
-{
- struct pci_dev *dev = to_pci_dev(fn->dev.parent);
- int idx;
-
- if (bar != 0 && bar != 2 && bar != 4)
- return;
-
- idx = bar >> 1;
- if (--fn->bar_used[idx] == 0)
- pci_release_region(dev, bar);
- WARN_ON(fn->bar_used[idx] < 0);
-}
-
-static int map_mmio_areas(struct ocxl_afu *afu, struct pci_dev *dev)
-{
- int rc;
-
- rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar);
- if (rc)
- return rc;
-
- rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar);
- if (rc) {
- release_fn_bar(afu->fn, afu->config.global_mmio_bar);
- return rc;
- }
-
- afu->global_mmio_start =
- pci_resource_start(dev, afu->config.global_mmio_bar) +
- afu->config.global_mmio_offset;
- afu->pp_mmio_start =
- pci_resource_start(dev, afu->config.pp_mmio_bar) +
- afu->config.pp_mmio_offset;
-
- afu->global_mmio_ptr = ioremap(afu->global_mmio_start,
- afu->config.global_mmio_size);
- if (!afu->global_mmio_ptr) {
- release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
- release_fn_bar(afu->fn, afu->config.global_mmio_bar);
- dev_err(&dev->dev, "Error mapping global mmio area\n");
- return -ENOMEM;
- }
-
- /*
- * Leave an empty page between the per-process mmio area and
- * the AFU interrupt mappings
- */
- afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE;
- return 0;
-}
-
-static void unmap_mmio_areas(struct ocxl_afu *afu)
-{
- if (afu->global_mmio_ptr) {
- iounmap(afu->global_mmio_ptr);
- afu->global_mmio_ptr = NULL;
- }
- afu->global_mmio_start = 0;
- afu->pp_mmio_start = 0;
- release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
- release_fn_bar(afu->fn, afu->config.global_mmio_bar);
-}
-
-static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev)
-{
- int rc;
-
- rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx);
- if (rc)
- return rc;
-
- rc = set_afu_device(afu, dev_name(&dev->dev));
- if (rc)
- return rc;
-
- rc = assign_afu_actag(afu, dev);
- if (rc)
- return rc;
-
- rc = assign_afu_pasid(afu, dev);
- if (rc) {
- reclaim_afu_actag(afu);
- return rc;
- }
-
- rc = map_mmio_areas(afu, dev);
- if (rc) {
- reclaim_afu_pasid(afu);
- reclaim_afu_actag(afu);
- return rc;
- }
- return 0;
-}
-
-static void deconfigure_afu(struct ocxl_afu *afu)
-{
- unmap_mmio_areas(afu);
- reclaim_afu_pasid(afu);
- reclaim_afu_actag(afu);
-}
-
-static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu)
-{
- int rc;
-
- ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1);
- /*
- * Char device creation is the last step, as processes can
- * call our driver immediately, so all our inits must be finished.
- */
- rc = ocxl_create_cdev(afu);
- if (rc)
- return rc;
- return 0;
-}
-
-static void deactivate_afu(struct ocxl_afu *afu)
-{
- struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent);
-
- ocxl_destroy_cdev(afu);
- ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0);
-}
-
-static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx)
+static int ocxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int rc;
- struct ocxl_afu *afu;
-
- afu = alloc_afu(fn);
- if (!afu)
- return -ENOMEM;
-
- rc = configure_afu(afu, afu_idx, dev);
- if (rc) {
- free_afu(afu);
- return rc;
- }
-
- rc = ocxl_register_afu(afu);
- if (rc)
- goto err;
-
- rc = ocxl_sysfs_add_afu(afu);
- if (rc)
- goto err;
-
- rc = activate_afu(dev, afu);
- if (rc)
- goto err_sys;
-
- list_add_tail(&afu->list, &fn->afu_list);
- return 0;
-
-err_sys:
- ocxl_sysfs_remove_afu(afu);
-err:
- deconfigure_afu(afu);
- device_unregister(&afu->dev);
- return rc;
-}
-
-static void remove_afu(struct ocxl_afu *afu)
-{
- list_del(&afu->list);
- ocxl_context_detach_all(afu);
- deactivate_afu(afu);
- ocxl_sysfs_remove_afu(afu);
- deconfigure_afu(afu);
- device_unregister(&afu->dev);
-}
-
-static struct ocxl_fn *alloc_function(struct pci_dev *dev)
-{
+ struct ocxl_afu *afu, *tmp;
struct ocxl_fn *fn;
+ struct list_head *afu_list;
- fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL);
- if (!fn)
- return NULL;
-
- INIT_LIST_HEAD(&fn->afu_list);
- INIT_LIST_HEAD(&fn->pasid_list);
- INIT_LIST_HEAD(&fn->actag_list);
- return fn;
-}
-
-static void free_function(struct ocxl_fn *fn)
-{
- WARN_ON(!list_empty(&fn->afu_list));
- WARN_ON(!list_empty(&fn->pasid_list));
- kfree(fn);
-}
-
-static void free_function_dev(struct device *dev)
-{
- struct ocxl_fn *fn = to_ocxl_function(dev);
-
- free_function(fn);
-}
-
-static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev)
-{
- int rc;
+ fn = ocxl_function_open(dev);
+ if (IS_ERR(fn))
+ return PTR_ERR(fn);
- fn->dev.parent = &dev->dev;
- fn->dev.release = free_function_dev;
- rc = dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
- if (rc)
- return rc;
pci_set_drvdata(dev, fn);
- return 0;
-}
-
-static int assign_function_actag(struct ocxl_fn *fn)
-{
- struct pci_dev *dev = to_pci_dev(fn->dev.parent);
- u16 base, enabled, supported;
- int rc;
-
- rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported);
- if (rc)
- return rc;
-
- fn->actag_base = base;
- fn->actag_enabled = enabled;
- fn->actag_supported = supported;
-
- ocxl_config_set_actag(dev, fn->config.dvsec_function_pos,
- fn->actag_base, fn->actag_enabled);
- dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n",
- fn->actag_base, fn->actag_enabled);
- return 0;
-}
-static int set_function_pasid(struct ocxl_fn *fn)
-{
- struct pci_dev *dev = to_pci_dev(fn->dev.parent);
- int rc, desired_count, max_count;
-
- /* A function may not require any PASID */
- if (fn->config.max_pasid_log < 0)
- return 0;
-
- rc = ocxl_config_get_pasid_info(dev, &max_count);
- if (rc)
- return rc;
-
- desired_count = 1 << fn->config.max_pasid_log;
+ afu_list = ocxl_function_afu_list(fn);
- if (desired_count > max_count) {
- dev_err(&fn->dev,
- "Function requires more PASIDs than is available (%d vs. %d)\n",
- desired_count, max_count);
- return -ENOSPC;
+ list_for_each_entry_safe(afu, tmp, afu_list, list) {
+ // Cleanup handled within ocxl_file_register_afu()
+ rc = ocxl_file_register_afu(afu);
+ if (rc) {
+ dev_err(&dev->dev, "Failed to register AFU '%s' index %d",
+ afu->config.name, afu->config.idx);
+ }
}
- fn->pasid_base = 0;
return 0;
}
-static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev)
-{
- int rc;
-
- rc = pci_enable_device(dev);
- if (rc) {
- dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc);
- return rc;
- }
-
- /*
- * Once it has been confirmed to work on our hardware, we
- * should reset the function, to force the adapter to restart
- * from scratch.
- * A function reset would also reset all its AFUs.
- *
- * Some hints for implementation:
- *
- * - there's not status bit to know when the reset is done. We
- * should try reading the config space to know when it's
- * done.
- * - probably something like:
- * Reset
- * wait 100ms
- * issue config read
- * allow device up to 1 sec to return success on config
- * read before declaring it broken
- *
- * Some shared logic on the card (CFG, TLX) won't be reset, so
- * there's no guarantee that it will be enough.
- */
- rc = ocxl_config_read_function(dev, &fn->config);
- if (rc)
- return rc;
-
- rc = set_function_device(fn, dev);
- if (rc)
- return rc;
-
- rc = assign_function_actag(fn);
- if (rc)
- return rc;
-
- rc = set_function_pasid(fn);
- if (rc)
- return rc;
-
- rc = ocxl_link_setup(dev, 0, &fn->link);
- if (rc)
- return rc;
-
- rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos);
- if (rc) {
- ocxl_link_release(dev, fn->link);
- return rc;
- }
- return 0;
-}
-
-static void deconfigure_function(struct ocxl_fn *fn)
-{
- struct pci_dev *dev = to_pci_dev(fn->dev.parent);
-
- ocxl_link_release(dev, fn->link);
- pci_disable_device(dev);
-}
-
-static struct ocxl_fn *init_function(struct pci_dev *dev)
+void ocxl_remove(struct pci_dev *dev)
{
struct ocxl_fn *fn;
- int rc;
-
- fn = alloc_function(dev);
- if (!fn)
- return ERR_PTR(-ENOMEM);
-
- rc = configure_function(fn, dev);
- if (rc) {
- free_function(fn);
- return ERR_PTR(rc);
- }
-
- rc = device_register(&fn->dev);
- if (rc) {
- deconfigure_function(fn);
- put_device(&fn->dev);
- return ERR_PTR(rc);
- }
- return fn;
-}
-
-static void remove_function(struct ocxl_fn *fn)
-{
- deconfigure_function(fn);
- device_unregister(&fn->dev);
-}
-
-static int ocxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- int rc, afu_count = 0;
- u8 afu;
- struct ocxl_fn *fn;
-
- if (!radix_enabled()) {
- dev_err(&dev->dev, "Unsupported memory model (hash)\n");
- return -ENODEV;
- }
+ struct ocxl_afu *afu;
+ struct list_head *afu_list;
- fn = init_function(dev);
- if (IS_ERR(fn)) {
- dev_err(&dev->dev, "function init failed: %li\n",
- PTR_ERR(fn));
- return PTR_ERR(fn);
- }
+ fn = pci_get_drvdata(dev);
+ afu_list = ocxl_function_afu_list(fn);
- for (afu = 0; afu <= fn->config.max_afu_index; afu++) {
- rc = ocxl_config_check_afu_index(dev, &fn->config, afu);
- if (rc > 0) {
- rc = init_afu(dev, fn, afu);
- if (rc) {
- dev_err(&dev->dev,
- "Can't initialize AFU index %d\n", afu);
- continue;
- }
- afu_count++;
- }
+ list_for_each_entry(afu, afu_list, list) {
+ ocxl_file_unregister_afu(afu);
}
- dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count);
- return 0;
-}
-
-static void ocxl_remove(struct pci_dev *dev)
-{
- struct ocxl_afu *afu, *tmp;
- struct ocxl_fn *fn = pci_get_drvdata(dev);
- list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) {
- remove_afu(afu);
- }
- remove_function(fn);
+ ocxl_function_close(fn);
}
struct pci_driver ocxl_pci_driver = {
diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c
index 0ab1fd1b2682..58f1ba264206 100644
--- a/drivers/misc/ocxl/sysfs.c
+++ b/drivers/misc/ocxl/sysfs.c
@@ -3,11 +3,18 @@
#include <linux/sysfs.h>
#include "ocxl_internal.h"
+static inline struct ocxl_afu *to_afu(struct device *device)
+{
+ struct ocxl_file_info *info = container_of(device, struct ocxl_file_info, dev);
+
+ return info->afu;
+}
+
static ssize_t global_mmio_size_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
- struct ocxl_afu *afu = to_ocxl_afu(device);
+ struct ocxl_afu *afu = to_afu(device);
return scnprintf(buf, PAGE_SIZE, "%d\n",
afu->config.global_mmio_size);
@@ -17,7 +24,7 @@ static ssize_t pp_mmio_size_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
- struct ocxl_afu *afu = to_ocxl_afu(device);
+ struct ocxl_afu *afu = to_afu(device);
return scnprintf(buf, PAGE_SIZE, "%d\n",
afu->config.pp_mmio_stride);
@@ -27,7 +34,7 @@ static ssize_t afu_version_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
- struct ocxl_afu *afu = to_ocxl_afu(device);
+ struct ocxl_afu *afu = to_afu(device);
return scnprintf(buf, PAGE_SIZE, "%hhu:%hhu\n",
afu->config.version_major,
@@ -38,7 +45,7 @@ static ssize_t contexts_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
- struct ocxl_afu *afu = to_ocxl_afu(device);
+ struct ocxl_afu *afu = to_afu(device);
return scnprintf(buf, PAGE_SIZE, "%d/%d\n",
afu->pasid_count, afu->pasid_max);
@@ -55,7 +62,7 @@ static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct ocxl_afu *afu = to_ocxl_afu(kobj_to_dev(kobj));
+ struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj));
if (count == 0 || off < 0 ||
off >= afu->config.global_mmio_size)
@@ -86,7 +93,7 @@ static int global_mmio_mmap(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
struct vm_area_struct *vma)
{
- struct ocxl_afu *afu = to_ocxl_afu(kobj_to_dev(kobj));
+ struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj));
if ((vma_pages(vma) + vma->vm_pgoff) >
(afu->config.global_mmio_size >> PAGE_SHIFT))
@@ -99,27 +106,25 @@ static int global_mmio_mmap(struct file *filp, struct kobject *kobj,
return 0;
}
-int ocxl_sysfs_add_afu(struct ocxl_afu *afu)
+int ocxl_sysfs_register_afu(struct ocxl_file_info *info)
{
int i, rc;
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
- rc = device_create_file(&afu->dev, &afu_attrs[i]);
+ rc = device_create_file(&info->dev, &afu_attrs[i]);
if (rc)
goto err;
}
- sysfs_attr_init(&afu->attr_global_mmio.attr);
- afu->attr_global_mmio.attr.name = "global_mmio_area";
- afu->attr_global_mmio.attr.mode = 0600;
- afu->attr_global_mmio.size = afu->config.global_mmio_size;
- afu->attr_global_mmio.read = global_mmio_read;
- afu->attr_global_mmio.mmap = global_mmio_mmap;
- rc = device_create_bin_file(&afu->dev, &afu->attr_global_mmio);
+ sysfs_attr_init(&info->attr_global_mmio.attr);
+ info->attr_global_mmio.attr.name = "global_mmio_area";
+ info->attr_global_mmio.attr.mode = 0600;
+ info->attr_global_mmio.size = info->afu->config.global_mmio_size;
+ info->attr_global_mmio.read = global_mmio_read;
+ info->attr_global_mmio.mmap = global_mmio_mmap;
+ rc = device_create_bin_file(&info->dev, &info->attr_global_mmio);
if (rc) {
- dev_err(&afu->dev,
- "Unable to create global mmio attr for afu: %d\n",
- rc);
+ dev_err(&info->dev, "Unable to create global mmio attr for afu: %d\n", rc);
goto err;
}
@@ -127,15 +132,20 @@ int ocxl_sysfs_add_afu(struct ocxl_afu *afu)
err:
for (i--; i >= 0; i--)
- device_remove_file(&afu->dev, &afu_attrs[i]);
+ device_remove_file(&info->dev, &afu_attrs[i]);
+
return rc;
}
-void ocxl_sysfs_remove_afu(struct ocxl_afu *afu)
+void ocxl_sysfs_unregister_afu(struct ocxl_file_info *info)
{
int i;
+ /*
+ * device_remove_bin_file is safe to call if the file is not added as
+ * the files are removed by name, and early exit if not found
+ */
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
- device_remove_file(&afu->dev, &afu_attrs[i]);
- device_remove_bin_file(&afu->dev, &afu->attr_global_mmio);
+ device_remove_file(&info->dev, &afu_attrs[i]);
+ device_remove_bin_file(&info->dev, &info->attr_global_mmio);
}
diff --git a/drivers/misc/ocxl/trace.h b/drivers/misc/ocxl/trace.h
index bcb7ff330c1e..024f417e7e01 100644
--- a/drivers/misc/ocxl/trace.h
+++ b/drivers/misc/ocxl/trace.h
@@ -107,16 +107,14 @@ DEFINE_EVENT(ocxl_fault_handler, ocxl_fault_ack,
);
TRACE_EVENT(ocxl_afu_irq_alloc,
- TP_PROTO(int pasid, int irq_id, unsigned int virq, int hw_irq,
- u64 irq_offset),
- TP_ARGS(pasid, irq_id, virq, hw_irq, irq_offset),
+ TP_PROTO(int pasid, int irq_id, unsigned int virq, int hw_irq),
+ TP_ARGS(pasid, irq_id, virq, hw_irq),
TP_STRUCT__entry(
__field(int, pasid)
__field(int, irq_id)
__field(unsigned int, virq)
__field(int, hw_irq)
- __field(u64, irq_offset)
),
TP_fast_assign(
@@ -124,15 +122,13 @@ TRACE_EVENT(ocxl_afu_irq_alloc,
__entry->irq_id = irq_id;
__entry->virq = virq;
__entry->hw_irq = hw_irq;
- __entry->irq_offset = irq_offset;
),
- TP_printk("pasid=0x%x irq_id=%d virq=%u hw_irq=%d irq_offset=0x%llx",
+ TP_printk("pasid=0x%x irq_id=%d virq=%u hw_irq=%d",
__entry->pasid,
__entry->irq_id,
__entry->virq,
- __entry->hw_irq,
- __entry->irq_offset
+ __entry->hw_irq
)
);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 29582fe57151..7b015f2a1c6f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -75,6 +75,11 @@
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
+#define PCI_DEVICE_ID_TI_AM654 0xb00c
+
+#define is_am654_pci_dev(pdev) \
+ ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
+
static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
@@ -588,6 +593,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
int ret = -EINVAL;
enum pci_barno bar;
struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
+ struct pci_dev *pdev = test->pdev;
mutex_lock(&test->mutex);
switch (cmd) {
@@ -595,6 +601,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
bar = arg;
if (bar < 0 || bar > 5)
goto ret;
+ if (is_am654_pci_dev(pdev) && bar == BAR_0)
+ goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
case PCITEST_LEGACY_IRQ:
@@ -662,6 +670,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
test_reg_bar = data->test_reg_bar;
+ test->test_reg_bar = test_reg_bar;
test->alignment = data->alignment;
irq_type = data->irq_type;
}
@@ -785,11 +794,20 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static const struct pci_endpoint_test_data am654_data = {
+ .test_reg_bar = BAR_2,
+ .alignment = SZ_64K,
+ .irq_type = IRQ_TYPE_MSI,
+};
+
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
+ .driver_data = (kernel_ulong_t)&am654_data
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 9e443df44b3b..0c6de97dd347 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -572,6 +572,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
xpc_wakeup_channel_mgr(part);
}
+ /* fall through */
case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags |= XPC_P_ENGAGED_UV;
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 9ac95b48ef92..cc729f7ab32e 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -403,7 +403,6 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
fm->eject = tifm_7xx1_dummy_eject;
fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif;
writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
- mmiowb();
free_irq(dev->irq, fm);
tifm_remove_adapter(fm);
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 997f92543dd4..422d08da3244 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -242,7 +242,7 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
/*
* Lock physical page backing a given user VA.
*/
- retval = get_user_pages_fast(uva, 1, 1, &context->notify_page);
+ retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
if (retval != 1) {
context->notify_page = NULL;
return VMCI_ERROR_GENERIC;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f5f1aac9d163..1174735f003d 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -659,7 +659,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
int err = VMCI_SUCCESS;
retval = get_user_pages_fast((uintptr_t) produce_uva,
- produce_q->kernel_if->num_pages, 1,
+ produce_q->kernel_if->num_pages,
+ FOLL_WRITE,
produce_q->kernel_if->u.h.header_page);
if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
@@ -671,7 +672,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
}
retval = get_user_pages_fast((uintptr_t) consume_uva,
- consume_q->kernel_if->num_pages, 1,
+ consume_q->kernel_if->num_pages,
+ FOLL_WRITE,
consume_q->kernel_if->u.h.header_page);
if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 3a4402a79904..6a51f7a06ce7 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -363,11 +363,11 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
int num_ranges, i;
voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
- num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
if (!voltage_ranges) {
pr_debug("%pOF: voltage-ranges unspecified\n", np);
return 0;
}
+ num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
if (!num_ranges) {
pr_err("%pOF: voltage-ranges empty\n", np);
return -EINVAL;
@@ -429,8 +429,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
if (mmc_gpio_alloc(host)) {
put_device(&host->class_dev);
- ida_simple_remove(&mmc_host_ida, host->index);
- kfree(host);
return NULL;
}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index c5208fb312ae..a533cab8fccc 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -184,11 +184,7 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
if (err)
break;
- /* if we're just probing, do a single pass */
- if (ocr == 0)
- break;
-
- /* otherwise wait until reset completes */
+ /* wait until reset completes */
if (mmc_host_is_spi(host)) {
if (!(cmd.resp[0] & R1_SPI_IDLE))
break;
@@ -200,6 +196,16 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
err = -ETIMEDOUT;
mmc_delay(10);
+
+ /*
+ * According to eMMC specification v5.1 section 6.4.3, we
+ * should issue CMD1 repeatedly in the idle state until
+ * the eMMC is ready. Otherwise some eMMC devices seem to enter
+ * the inactive mode after mmc_init_card() issued CMD0 when
+ * the eMMC device is busy.
+ */
+ if (!ocr && !mmc_host_is_spi(host))
+ cmd.arg = cmd.resp[0] | BIT(30);
}
if (rocr && !mmc_host_is_spi(host))
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index efb8a7965dd4..154f4204d58c 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -30,19 +30,14 @@ struct mmc_pwrseq_emmc {
#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
-static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
-{
- gpiod_set_value(pwrseq->reset_gpio, 1);
- udelay(1);
- gpiod_set_value(pwrseq->reset_gpio, 0);
- udelay(200);
-}
-
static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
{
struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
- __mmc_pwrseq_emmc_reset(pwrseq);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+ udelay(200);
}
static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
@@ -50,8 +45,11 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
{
struct mmc_pwrseq_emmc *pwrseq = container_of(this,
struct mmc_pwrseq_emmc, reset_nb);
+ gpiod_set_value(pwrseq->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value(pwrseq->reset_gpio, 0);
+ udelay(200);
- __mmc_pwrseq_emmc_reset(pwrseq);
return NOTIFY_DONE;
}
@@ -72,14 +70,18 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
if (IS_ERR(pwrseq->reset_gpio))
return PTR_ERR(pwrseq->reset_gpio);
- /*
- * register reset handler to ensure emmc reset also from
- * emergency_reboot(), priority 255 is the highest priority
- * so it will be executed before any system reboot handler.
- */
- pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
- pwrseq->reset_nb.priority = 255;
- register_restart_handler(&pwrseq->reset_nb);
+ if (!gpiod_cansleep(pwrseq->reset_gpio)) {
+ /*
+ * register reset handler to ensure emmc reset also from
+ * emergency_reboot(), priority 255 is the highest priority
+ * so it will be executed before any system reboot handler.
+ */
+ pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
+ pwrseq->reset_nb.priority = 255;
+ register_restart_handler(&pwrseq->reset_nb);
+ } else {
+ dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n");
+ }
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
pwrseq->pwrseq.dev = dev;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 7c364a9c4eeb..b5b9c6142f08 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -472,6 +472,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_mq_unquiesce_queue(q);
blk_cleanup_queue(q);
+ blk_mq_free_tag_set(&mq->tag_set);
/*
* A request can be completed before the next request, potentially
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index dd2f73af8f2c..2d2d9ea8be4f 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -159,7 +159,7 @@ static inline void mmc_fixup_device(struct mmc_card *card,
(f->ext_csd_rev == EXT_CSD_REV_ANY ||
f->ext_csd_rev == card->ext_csd.rev) &&
rev >= f->rev_start && rev <= f->rev_end) {
- dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup);
+ dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup);
f->vendor_fixup(card, f->data);
}
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 265e1aeeb9d8..d3d32f9a2cb1 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -221,6 +221,14 @@ static int mmc_decode_scr(struct mmc_card *card)
if (scr->sda_spec3)
scr->cmds = UNSTUFF_BITS(resp, 32, 2);
+
+ /* SD Spec says: any SD Card shall set at least bits 0 and 2 */
+ if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
+ !(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) {
+ pr_err("%s: invalid bus width\n", mmc_hostname(card->host));
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 28fcd8f580a1..0e86340536b6 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -92,6 +92,7 @@ config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
depends on MMC_SDHCI && PCI
select MMC_CQHCI
+ select IOSF_MBI if X86
help
This selects the PCI Secure Digital Host Controller Interface.
Most controllers found today are PCI devices.
@@ -437,7 +438,7 @@ config MMC_WBSD
depends on ISA_DMA_API
help
This selects the Winbond(R) W83L51xD Secure digital and
- Multimedia card Interface.
+ Multimedia card Interface.
If you have a machine with a integrated W83L518D or W83L519D
SD/MMC card reader, say Y or M here.
@@ -515,7 +516,7 @@ config MMC_TIFM_SD
'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
(TIFM_7XX1)'.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called tifm_sd.
config MMC_MVSDIO
@@ -531,12 +532,12 @@ config MMC_MVSDIO
module will be called mvsdio.
config MMC_DAVINCI
- tristate "TI DAVINCI Multimedia Card Interface support"
- depends on ARCH_DAVINCI
- help
- This selects the TI DAVINCI Multimedia card Interface.
- If you have an DAVINCI board with a Multimedia Card slot,
- say Y or M here. If unsure, say N.
+ tristate "TI DAVINCI Multimedia Card Interface support"
+ depends on ARCH_DAVINCI
+ help
+ This selects the TI DAVINCI Multimedia card Interface.
+ If you have an DAVINCI board with a Multimedia Card slot,
+ say Y or M here. If unsure, say N.
config MMC_GOLDFISH
tristate "goldfish qemu Multimedia Card Interface support"
@@ -565,18 +566,18 @@ config MMC_S3C
depends on S3C24XX_DMAC
help
This selects a driver for the MCI interface found in
- Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
+ Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
If you have a board based on one of those and a MMC/SD
slot, say Y or M here.
If unsure, say N.
config MMC_S3C_HW_SDIO_IRQ
- bool "Hardware support for SDIO IRQ"
- depends on MMC_S3C
- help
- Enable the hardware support for SDIO interrupts instead of using
- the generic polling code.
+ bool "Hardware support for SDIO IRQ"
+ depends on MMC_S3C
+ help
+ Enable the hardware support for SDIO interrupts instead of using
+ the generic polling code.
choice
prompt "Samsung S3C SD/MMC transfer code"
@@ -941,6 +942,7 @@ config MMC_BCM2835
config MMC_MTK
tristate "MediaTek SD/MMC Card Interface support"
depends on HAS_DMA
+ select REGULATOR
help
This selects the MediaTek(R) Secure digital and Multimedia card Interface.
If you have a machine with a integrated SD/MMC card reader, say Y or M here.
@@ -948,15 +950,16 @@ config MMC_MTK
If unsure, say N.
config MMC_SDHCI_MICROCHIP_PIC32
- tristate "Microchip PIC32MZDA SDHCI support"
- depends on MMC_SDHCI && PIC32MZDA && MMC_SDHCI_PLTFM
- help
- This selects the Secure Digital Host Controller Interface (SDHCI)
- for PIC32MZDA platform.
+ tristate "Microchip PIC32MZDA SDHCI support"
+ depends on MMC_SDHCI && PIC32MZDA && MMC_SDHCI_PLTFM
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ for PIC32MZDA platform.
- If you have a controller with this interface, say Y or M here.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
- If unsure, say N.
config MMC_SDHCI_BRCMSTB
tristate "Broadcom SDIO/SD/MMC support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC
@@ -993,6 +996,7 @@ config MMC_SDHCI_OMAP
config MMC_SDHCI_AM654
tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
depends on MMC_SDHCI_PLTFM && OF
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's AM654 SOCs. The controller supports
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index 82a97866e0cf..e481535cba2b 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -43,12 +43,10 @@ struct alcor_sdmmc_host {
struct device *dev;
struct alcor_pci_priv *alcor_pci;
- struct mmc_host *mmc;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
unsigned int dma_on:1;
- unsigned int early_data:1;
struct mutex cmd_mutex;
@@ -118,6 +116,9 @@ static void alcor_reset(struct alcor_sdmmc_host *host, u8 val)
dev_err(host->dev, "%s: timeout\n", __func__);
}
+/*
+ * Perform DMA I/O of a single page.
+ */
static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
{
struct alcor_pci_priv *priv = host->alcor_pci;
@@ -144,8 +145,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
host->sg_count--;
}
-static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
- bool early)
+static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
{
struct alcor_pci_priv *priv = host->alcor_pci;
struct mmc_data *data = host->data;
@@ -155,19 +155,26 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
ctrl |= AU6601_DATA_WRITE;
if (data->host_cookie == COOKIE_MAPPED) {
- if (host->early_data) {
- host->early_data = false;
- return;
- }
-
- host->early_data = early;
-
+ /*
+ * For DMA transfers, this function is called just once,
+ * at the start of the operation. The hardware can only
+ * perform DMA I/O on a single page at a time, so here
+ * we kick off the transfer with the first page, and expect
+ * subsequent pages to be transferred upon IRQ events
+ * indicating that the single-page DMA was completed.
+ */
alcor_data_set_dma(host);
ctrl |= AU6601_DATA_DMA_MODE;
host->dma_on = 1;
alcor_write32(priv, data->sg_count * 0x1000,
AU6601_REG_BLOCK_SIZE);
} else {
+ /*
+ * For PIO transfers, we break down each operation
+ * into several sector-sized transfers. When one sector has
+ * complete, the IRQ handler will call this function again
+ * to kick off the transfer of the next sector.
+ */
alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE);
}
@@ -231,6 +238,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
static void alcor_prepare_data(struct alcor_sdmmc_host *host,
struct mmc_command *cmd)
{
+ struct alcor_pci_priv *priv = host->alcor_pci;
struct mmc_data *data = cmd->data;
if (!data)
@@ -248,7 +256,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host,
if (data->host_cookie != COOKIE_MAPPED)
alcor_prepare_sg_miter(host);
- alcor_trigger_data_transfer(host, true);
+ alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
}
static void alcor_send_cmd(struct alcor_sdmmc_host *host,
@@ -284,7 +292,7 @@ static void alcor_send_cmd(struct alcor_sdmmc_host *host,
break;
default:
dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n",
- mmc_hostname(host->mmc), mmc_resp_type(cmd));
+ mmc_hostname(mmc_from_priv(host)), mmc_resp_type(cmd));
break;
}
@@ -325,7 +333,7 @@ static void alcor_request_complete(struct alcor_sdmmc_host *host,
host->data = NULL;
host->dma_on = 0;
- mmc_request_done(host->mmc, mrq);
+ mmc_request_done(mmc_from_priv(host), mrq);
}
static void alcor_finish_data(struct alcor_sdmmc_host *host)
@@ -435,7 +443,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
if (!host->data)
return false;
- alcor_trigger_data_transfer(host, false);
+ alcor_trigger_data_transfer(host);
host->cmd = NULL;
return true;
}
@@ -456,7 +464,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
if (!host->data)
alcor_request_complete(host, 1);
else
- alcor_trigger_data_transfer(host, false);
+ alcor_trigger_data_transfer(host);
host->cmd = NULL;
}
@@ -487,15 +495,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
break;
case AU6601_INT_READ_BUF_RDY:
alcor_trf_block_pio(host, true);
- if (!host->blocks)
- break;
- alcor_trigger_data_transfer(host, false);
return 1;
case AU6601_INT_WRITE_BUF_RDY:
alcor_trf_block_pio(host, false);
- if (!host->blocks)
- break;
- alcor_trigger_data_transfer(host, false);
return 1;
case AU6601_INT_DMA_END:
if (!host->sg_count)
@@ -508,8 +510,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
break;
}
- if (intmask & AU6601_INT_DATA_END)
- return 0;
+ if (intmask & AU6601_INT_DATA_END) {
+ if (!host->dma_on && host->blocks) {
+ alcor_trigger_data_transfer(host);
+ return 1;
+ } else {
+ return 0;
+ }
+ }
return 1;
}
@@ -555,7 +563,7 @@ static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask)
alcor_request_complete(host, 1);
}
- mmc_detect_change(host->mmc, msecs_to_jiffies(1));
+ mmc_detect_change(mmc_from_priv(host), msecs_to_jiffies(1));
}
static irqreturn_t alcor_irq_thread(int irq, void *d)
@@ -779,12 +787,17 @@ static void alcor_pre_req(struct mmc_host *mmc,
data->host_cookie = COOKIE_UNMAPPED;
/* FIXME: looks like the DMA engine works only with CMD18 */
- if (cmd->opcode != 18)
+ if (cmd->opcode != MMC_READ_MULTIPLE_BLOCK
+ && cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
return;
/*
* We don't do DMA on "complex" transfers, i.e. with
- * non-word-aligned buffers or lengths. Also, we don't bother
- * with all the DMA setup overhead for short transfers.
+ * non-word-aligned buffers or lengths. A future improvement
+ * could be made to use temporary DMA bounce-buffers when these
+ * requirements are not met.
+ *
+ * Also, we don't bother with all the DMA setup overhead for
+ * short transfers.
*/
if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE)
return;
@@ -795,6 +808,8 @@ static void alcor_pre_req(struct mmc_host *mmc,
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE)
return;
+ if (sg->offset != 0)
+ return;
}
/* This data might be unmapped at this time */
@@ -967,7 +982,6 @@ static void alcor_timeout_timer(struct work_struct *work)
alcor_request_complete(host, 0);
}
- mmiowb();
mutex_unlock(&host->cmd_mutex);
}
@@ -1033,7 +1047,7 @@ static void alcor_hw_uninit(struct alcor_sdmmc_host *host)
static void alcor_init_mmc(struct alcor_sdmmc_host *host)
{
- struct mmc_host *mmc = host->mmc;
+ struct mmc_host *mmc = mmc_from_priv(host);
mmc->f_min = AU6601_MIN_CLOCK;
mmc->f_max = AU6601_MAX_CLOCK;
@@ -1045,26 +1059,21 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
mmc->ops = &alcor_sdc_ops;
/* The hardware does DMA data transfer of 4096 bytes to/from a single
- * buffer address. Scatterlists are not supported, but upon DMA
- * completion (signalled via IRQ), the original vendor driver does
- * then immediately set up another DMA transfer of the next 4096
- * bytes.
- *
- * This means that we need to handle the I/O in 4096 byte chunks.
- * Lacking a way to limit the sglist entries to 4096 bytes, we instead
- * impose that only one segment is provided, with maximum size 4096,
- * which also happens to be the minimum size. This means that the
- * single-entry sglist handled by this driver can be handed directly
- * to the hardware, nice and simple.
+ * buffer address. Scatterlists are not supported at the hardware
+ * level, however we can work with them at the driver level,
+ * provided that each segment is exactly 4096 bytes in size.
+ * Upon DMA completion of a single segment (signalled via IRQ), we
+ * immediately proceed to transfer the next segment from the
+ * scatterlist.
*
- * Unfortunately though, that means we only do 4096 bytes I/O per
- * MMC command. A future improvement would be to make the driver
- * accept sg lists and entries of any size, and simply iterate
- * through them 4096 bytes at a time.
+ * The overall request is limited to 240 sectors, matching the
+ * original vendor driver.
*/
mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
- mmc->max_req_size = mmc->max_seg_size;
+ mmc->max_blk_count = 240;
+ mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
+ dma_set_max_seg_size(host->dev, mmc->max_seg_size);
}
static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
@@ -1081,7 +1090,6 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
}
host = mmc_priv(mmc);
- host->mmc = mmc;
host->dev = &pdev->dev;
host->cur_power_mode = MMC_POWER_UNDEFINED;
host->alcor_pci = priv;
@@ -1113,13 +1121,14 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
{
struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
+ struct mmc_host *mmc = mmc_from_priv(host);
if (cancel_delayed_work_sync(&host->timeout_work))
alcor_request_complete(host, 0);
alcor_hw_uninit(host);
- mmc_remove_host(host->mmc);
- mmc_free_host(host->mmc);
+ mmc_remove_host(mmc);
+ mmc_free_host(mmc);
return 0;
}
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index a8af682a9182..d59cb0a51964 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -537,6 +537,8 @@ static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
CQHCI_ACT(0x5) |
CQHCI_CMD_INDEX(mrq->cmd->opcode) |
CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
+ if (cq_host->ops->update_dcmd_desc)
+ cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
*task_desc |= data;
desc = (u8 *)task_desc;
pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index 9e68286a07b4..1e8e01d81015 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -88,6 +88,7 @@
/* send status config 1 */
#define CQHCI_SSC1 0x40
+#define CQHCI_SSC1_CBC_MASK GENMASK(19, 16)
/* send status config 2 */
#define CQHCI_SSC2 0x44
@@ -147,6 +148,7 @@
struct cqhci_host_ops;
struct mmc_host;
+struct mmc_request;
struct cqhci_slot;
struct cqhci_host {
@@ -210,6 +212,8 @@ struct cqhci_host_ops {
u32 (*read_l)(struct cqhci_host *host, int reg);
void (*enable)(struct mmc_host *mmc);
void (*disable)(struct mmc_host *mmc, bool recovery);
+ void (*update_dcmd_desc)(struct mmc_host *mmc, struct mmc_request *mrq,
+ u64 *data);
};
static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 2eba507790e4..c5a8af4ca76b 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/iopoll.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
@@ -48,6 +49,8 @@
#define CLK_CORE_PHASE_MASK GENMASK(9, 8)
#define CLK_TX_PHASE_MASK GENMASK(11, 10)
#define CLK_RX_PHASE_MASK GENMASK(13, 12)
+#define CLK_PHASE_0 0
+#define CLK_PHASE_180 2
#define CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
#define CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
#define CLK_V2_ALWAYS_ON BIT(24)
@@ -56,10 +59,6 @@
#define CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
#define CLK_V3_ALWAYS_ON BIT(28)
-#define CLK_DELAY_STEP_PS 200
-#define CLK_PHASE_STEP 30
-#define CLK_PHASE_POINT_NUM (360 / CLK_PHASE_STEP)
-
#define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask)
#define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask)
#define CLK_ALWAYS_ON(h) (h->data->always_on)
@@ -164,10 +163,10 @@ struct meson_host {
void __iomem *regs;
struct clk *core_clk;
+ struct clk *mux_clk;
struct clk *mmc_clk;
- struct clk *rx_clk;
- struct clk *tx_clk;
unsigned long req_rate;
+ bool ddr;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
@@ -207,90 +206,6 @@ struct meson_host {
#define CMD_RESP_MASK GENMASK(31, 1)
#define CMD_RESP_SRAM BIT(0)
-struct meson_mmc_phase {
- struct clk_hw hw;
- void __iomem *reg;
- unsigned long phase_mask;
- unsigned long delay_mask;
- unsigned int delay_step_ps;
-};
-
-#define to_meson_mmc_phase(_hw) container_of(_hw, struct meson_mmc_phase, hw)
-
-static int meson_mmc_clk_get_phase(struct clk_hw *hw)
-{
- struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
- unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
- unsigned long period_ps, p, d;
- int degrees;
- u32 val;
-
- val = readl(mmc->reg);
- p = (val & mmc->phase_mask) >> __ffs(mmc->phase_mask);
- degrees = p * 360 / phase_num;
-
- if (mmc->delay_mask) {
- period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
- clk_get_rate(hw->clk));
- d = (val & mmc->delay_mask) >> __ffs(mmc->delay_mask);
- degrees += d * mmc->delay_step_ps * 360 / period_ps;
- degrees %= 360;
- }
-
- return degrees;
-}
-
-static void meson_mmc_apply_phase_delay(struct meson_mmc_phase *mmc,
- unsigned int phase,
- unsigned int delay)
-{
- u32 val;
-
- val = readl(mmc->reg);
- val &= ~mmc->phase_mask;
- val |= phase << __ffs(mmc->phase_mask);
-
- if (mmc->delay_mask) {
- val &= ~mmc->delay_mask;
- val |= delay << __ffs(mmc->delay_mask);
- }
-
- writel(val, mmc->reg);
-}
-
-static int meson_mmc_clk_set_phase(struct clk_hw *hw, int degrees)
-{
- struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
- unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
- unsigned long period_ps, d = 0, r;
- uint64_t p;
-
- p = degrees % 360;
-
- if (!mmc->delay_mask) {
- p = DIV_ROUND_CLOSEST_ULL(p, 360 / phase_num);
- } else {
- period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
- clk_get_rate(hw->clk));
-
- /* First compute the phase index (p), the remainder (r) is the
- * part we'll try to acheive using the delays (d).
- */
- r = do_div(p, 360 / phase_num);
- d = DIV_ROUND_CLOSEST(r * period_ps,
- 360 * mmc->delay_step_ps);
- d = min(d, mmc->delay_mask >> __ffs(mmc->delay_mask));
- }
-
- meson_mmc_apply_phase_delay(mmc, p, d);
- return 0;
-}
-
-static const struct clk_ops meson_mmc_clk_phase_ops = {
- .get_phase = meson_mmc_clk_get_phase,
- .set_phase = meson_mmc_clk_set_phase,
-};
-
static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
{
unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
@@ -383,16 +298,6 @@ static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
mmc_get_dma_dir(data));
}
-static bool meson_mmc_timing_is_ddr(struct mmc_ios *ios)
-{
- if (ios->timing == MMC_TIMING_MMC_DDR52 ||
- ios->timing == MMC_TIMING_UHS_DDR50 ||
- ios->timing == MMC_TIMING_MMC_HS400)
- return true;
-
- return false;
-}
-
/*
* Gating the clock on this controller is tricky. It seems the mmc clock
* is also used by the controller. It may crash during some operation if the
@@ -429,36 +334,41 @@ static void meson_mmc_clk_ungate(struct meson_host *host)
writel(cfg, host->regs + SD_EMMC_CFG);
}
-static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios)
+static int meson_mmc_clk_set(struct meson_host *host, unsigned long rate,
+ bool ddr)
{
struct mmc_host *mmc = host->mmc;
- unsigned long rate = ios->clock;
int ret;
u32 cfg;
- /* DDR modes require higher module clock */
- if (meson_mmc_timing_is_ddr(ios))
- rate <<= 1;
-
/* Same request - bail-out */
- if (host->req_rate == rate)
+ if (host->ddr == ddr && host->req_rate == rate)
return 0;
/* stop clock */
meson_mmc_clk_gate(host);
host->req_rate = 0;
+ mmc->actual_clock = 0;
- if (!rate) {
- mmc->actual_clock = 0;
- /* return with clock being stopped */
+ /* return with clock being stopped */
+ if (!rate)
return 0;
- }
/* Stop the clock during rate change to avoid glitches */
cfg = readl(host->regs + SD_EMMC_CFG);
cfg |= CFG_STOP_CLOCK;
writel(cfg, host->regs + SD_EMMC_CFG);
+ if (ddr) {
+ /* DDR modes require higher module clock */
+ rate <<= 1;
+ cfg |= CFG_DDR;
+ } else {
+ cfg &= ~CFG_DDR;
+ }
+ writel(cfg, host->regs + SD_EMMC_CFG);
+ host->ddr = ddr;
+
ret = clk_set_rate(host->mmc_clk, rate);
if (ret) {
dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
@@ -470,12 +380,14 @@ static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios)
mmc->actual_clock = clk_get_rate(host->mmc_clk);
/* We should report the real output frequency of the controller */
- if (meson_mmc_timing_is_ddr(ios))
+ if (ddr) {
+ host->req_rate >>= 1;
mmc->actual_clock >>= 1;
+ }
dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock);
- if (ios->clock != mmc->actual_clock)
- dev_dbg(host->dev, "requested rate was %u\n", ios->clock);
+ if (rate != mmc->actual_clock)
+ dev_dbg(host->dev, "requested rate was %lu\n", rate);
/* (re)start clock */
meson_mmc_clk_ungate(host);
@@ -493,8 +405,6 @@ static int meson_mmc_clk_init(struct meson_host *host)
struct clk_init_data init;
struct clk_mux *mux;
struct clk_divider *div;
- struct meson_mmc_phase *core, *tx, *rx;
- struct clk *clk;
char clk_name[32];
int i, ret = 0;
const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
@@ -502,9 +412,11 @@ static int meson_mmc_clk_init(struct meson_host *host)
u32 clk_reg;
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
- clk_reg = 0;
- clk_reg |= CLK_ALWAYS_ON(host);
+ clk_reg = CLK_ALWAYS_ON(host);
clk_reg |= CLK_DIV_MASK;
+ clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
+ clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
+ clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
writel(clk_reg, host->regs + SD_EMMC_CLOCK);
/* get the mux parents */
@@ -540,9 +452,9 @@ static int meson_mmc_clk_init(struct meson_host *host)
mux->mask = CLK_SRC_MASK >> mux->shift;
mux->hw.init = &init;
- clk = devm_clk_register(host->dev, &mux->hw);
- if (WARN_ON(IS_ERR(clk)))
- return PTR_ERR(clk);
+ host->mux_clk = devm_clk_register(host->dev, &mux->hw);
+ if (WARN_ON(IS_ERR(host->mux_clk)))
+ return PTR_ERR(host->mux_clk);
/* create the divider */
div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL);
@@ -553,7 +465,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
init.name = clk_name;
init.ops = &clk_divider_ops;
init.flags = CLK_SET_RATE_PARENT;
- clk_parent[0] = __clk_get_name(clk);
+ clk_parent[0] = __clk_get_name(host->mux_clk);
init.parent_names = clk_parent;
init.num_parents = 1;
@@ -563,190 +475,104 @@ static int meson_mmc_clk_init(struct meson_host *host)
div->hw.init = &init;
div->flags = CLK_DIVIDER_ONE_BASED;
- clk = devm_clk_register(host->dev, &div->hw);
- if (WARN_ON(IS_ERR(clk)))
- return PTR_ERR(clk);
-
- /* create the mmc core clock */
- core = devm_kzalloc(host->dev, sizeof(*core), GFP_KERNEL);
- if (!core)
- return -ENOMEM;
-
- snprintf(clk_name, sizeof(clk_name), "%s#core", dev_name(host->dev));
- init.name = clk_name;
- init.ops = &meson_mmc_clk_phase_ops;
- init.flags = CLK_SET_RATE_PARENT;
- clk_parent[0] = __clk_get_name(clk);
- init.parent_names = clk_parent;
- init.num_parents = 1;
-
- core->reg = host->regs + SD_EMMC_CLOCK;
- core->phase_mask = CLK_CORE_PHASE_MASK;
- core->hw.init = &init;
-
- host->mmc_clk = devm_clk_register(host->dev, &core->hw);
- if (WARN_ON(PTR_ERR_OR_ZERO(host->mmc_clk)))
+ host->mmc_clk = devm_clk_register(host->dev, &div->hw);
+ if (WARN_ON(IS_ERR(host->mmc_clk)))
return PTR_ERR(host->mmc_clk);
- /* create the mmc tx clock */
- tx = devm_kzalloc(host->dev, sizeof(*tx), GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
-
- snprintf(clk_name, sizeof(clk_name), "%s#tx", dev_name(host->dev));
- init.name = clk_name;
- init.ops = &meson_mmc_clk_phase_ops;
- init.flags = 0;
- clk_parent[0] = __clk_get_name(host->mmc_clk);
- init.parent_names = clk_parent;
- init.num_parents = 1;
-
- tx->reg = host->regs + SD_EMMC_CLOCK;
- tx->phase_mask = CLK_TX_PHASE_MASK;
- tx->delay_mask = CLK_TX_DELAY_MASK(host);
- tx->delay_step_ps = CLK_DELAY_STEP_PS;
- tx->hw.init = &init;
-
- host->tx_clk = devm_clk_register(host->dev, &tx->hw);
- if (WARN_ON(PTR_ERR_OR_ZERO(host->tx_clk)))
- return PTR_ERR(host->tx_clk);
-
- /* create the mmc rx clock */
- rx = devm_kzalloc(host->dev, sizeof(*rx), GFP_KERNEL);
- if (!rx)
- return -ENOMEM;
-
- snprintf(clk_name, sizeof(clk_name), "%s#rx", dev_name(host->dev));
- init.name = clk_name;
- init.ops = &meson_mmc_clk_phase_ops;
- init.flags = 0;
- clk_parent[0] = __clk_get_name(host->mmc_clk);
- init.parent_names = clk_parent;
- init.num_parents = 1;
-
- rx->reg = host->regs + SD_EMMC_CLOCK;
- rx->phase_mask = CLK_RX_PHASE_MASK;
- rx->delay_mask = CLK_RX_DELAY_MASK(host);
- rx->delay_step_ps = CLK_DELAY_STEP_PS;
- rx->hw.init = &init;
-
- host->rx_clk = devm_clk_register(host->dev, &rx->hw);
- if (WARN_ON(PTR_ERR_OR_ZERO(host->rx_clk)))
- return PTR_ERR(host->rx_clk);
-
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000);
ret = clk_set_rate(host->mmc_clk, host->mmc->f_min);
if (ret)
return ret;
- clk_set_phase(host->mmc_clk, 180);
- clk_set_phase(host->tx_clk, 0);
- clk_set_phase(host->rx_clk, 0);
-
return clk_prepare_enable(host->mmc_clk);
}
-static void meson_mmc_shift_map(unsigned long *map, unsigned long shift)
+static void meson_mmc_disable_resampling(struct meson_host *host)
{
- DECLARE_BITMAP(left, CLK_PHASE_POINT_NUM);
- DECLARE_BITMAP(right, CLK_PHASE_POINT_NUM);
+ unsigned int val = readl(host->regs + host->data->adjust);
- /*
- * shift the bitmap right and reintroduce the dropped bits on the left
- * of the bitmap
- */
- bitmap_shift_right(right, map, shift, CLK_PHASE_POINT_NUM);
- bitmap_shift_left(left, map, CLK_PHASE_POINT_NUM - shift,
- CLK_PHASE_POINT_NUM);
- bitmap_or(map, left, right, CLK_PHASE_POINT_NUM);
+ val &= ~ADJUST_ADJ_EN;
+ writel(val, host->regs + host->data->adjust);
}
-static void meson_mmc_find_next_region(unsigned long *map,
- unsigned long *start,
- unsigned long *stop)
+static void meson_mmc_reset_resampling(struct meson_host *host)
{
- *start = find_next_bit(map, CLK_PHASE_POINT_NUM, *start);
- *stop = find_next_zero_bit(map, CLK_PHASE_POINT_NUM, *start);
+ unsigned int val;
+
+ meson_mmc_disable_resampling(host);
+
+ val = readl(host->regs + host->data->adjust);
+ val &= ~ADJUST_ADJ_DELAY_MASK;
+ writel(val, host->regs + host->data->adjust);
}
-static int meson_mmc_find_tuning_point(unsigned long *test)
+static int meson_mmc_resampling_tuning(struct mmc_host *mmc, u32 opcode)
{
- unsigned long shift, stop, offset = 0, start = 0, size = 0;
+ struct meson_host *host = mmc_priv(mmc);
+ unsigned int val, dly, max_dly, i;
+ int ret;
- /* Get the all good/all bad situation out the way */
- if (bitmap_full(test, CLK_PHASE_POINT_NUM))
- return 0; /* All points are good so point 0 will do */
- else if (bitmap_empty(test, CLK_PHASE_POINT_NUM))
- return -EIO; /* No successful tuning point */
+ /* Resampling is done using the source clock */
+ max_dly = DIV_ROUND_UP(clk_get_rate(host->mux_clk),
+ clk_get_rate(host->mmc_clk));
- /*
- * Now we know there is a least one region find. Make sure it does
- * not wrap by the shifting the bitmap if necessary
- */
- shift = find_first_zero_bit(test, CLK_PHASE_POINT_NUM);
- if (shift != 0)
- meson_mmc_shift_map(test, shift);
+ val = readl(host->regs + host->data->adjust);
+ val |= ADJUST_ADJ_EN;
+ writel(val, host->regs + host->data->adjust);
- while (start < CLK_PHASE_POINT_NUM) {
- meson_mmc_find_next_region(test, &start, &stop);
+ if (mmc->doing_retune)
+ dly = FIELD_GET(ADJUST_ADJ_DELAY_MASK, val) + 1;
+ else
+ dly = 0;
- if ((stop - start) > size) {
- offset = start;
- size = stop - start;
- }
+ for (i = 0; i < max_dly; i++) {
+ val &= ~ADJUST_ADJ_DELAY_MASK;
+ val |= FIELD_PREP(ADJUST_ADJ_DELAY_MASK, (dly + i) % max_dly);
+ writel(val, host->regs + host->data->adjust);
- start = stop;
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (!ret) {
+ dev_dbg(mmc_dev(mmc), "resampling delay: %u\n",
+ (dly + i) % max_dly);
+ return 0;
+ }
}
- /* Get the center point of the region */
- offset += (size / 2);
-
- /* Shift the result back */
- offset = (offset + shift) % CLK_PHASE_POINT_NUM;
-
- return offset;
+ meson_mmc_reset_resampling(host);
+ return -EIO;
}
-static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
- struct clk *clk)
+static int meson_mmc_prepare_ios_clock(struct meson_host *host,
+ struct mmc_ios *ios)
{
- int point, ret;
- DECLARE_BITMAP(test, CLK_PHASE_POINT_NUM);
+ bool ddr;
- dev_dbg(mmc_dev(mmc), "%s phase/delay tunning...\n",
- __clk_get_name(clk));
- bitmap_zero(test, CLK_PHASE_POINT_NUM);
+ switch (ios->timing) {
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ ddr = true;
+ break;
- /* Explore tuning points */
- for (point = 0; point < CLK_PHASE_POINT_NUM; point++) {
- clk_set_phase(clk, point * CLK_PHASE_STEP);
- ret = mmc_send_tuning(mmc, opcode, NULL);
- if (!ret)
- set_bit(point, test);
+ default:
+ ddr = false;
+ break;
}
- /* Find the optimal tuning point and apply it */
- point = meson_mmc_find_tuning_point(test);
- if (point < 0)
- return point; /* tuning failed */
-
- clk_set_phase(clk, point * CLK_PHASE_STEP);
- dev_dbg(mmc_dev(mmc), "success with phase: %d\n",
- clk_get_phase(clk));
- return 0;
+ return meson_mmc_clk_set(host, ios->clock, ddr);
}
-static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+static void meson_mmc_check_resampling(struct meson_host *host,
+ struct mmc_ios *ios)
{
- struct meson_host *host = mmc_priv(mmc);
- int adj = 0;
-
- /* enable signal resampling w/o delay */
- adj = ADJUST_ADJ_EN;
- writel(adj, host->regs + host->data->adjust);
-
- return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_MMC_DDR52:
+ meson_mmc_disable_resampling(host);
+ break;
+ }
}
static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -775,12 +601,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
- /* disable signal resampling */
- writel(0, host->regs + host->data->adjust);
-
- /* Reset rx phase */
- clk_set_phase(host->rx_clk, 0);
-
break;
case MMC_POWER_ON:
@@ -817,20 +637,13 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
val = readl(host->regs + SD_EMMC_CFG);
val &= ~CFG_BUS_WIDTH_MASK;
val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
+ writel(val, host->regs + SD_EMMC_CFG);
- val &= ~CFG_DDR;
- if (meson_mmc_timing_is_ddr(ios))
- val |= CFG_DDR;
-
- val &= ~CFG_CHK_DS;
- if (ios->timing == MMC_TIMING_MMC_HS400)
- val |= CFG_CHK_DS;
-
- err = meson_mmc_clk_set(host, ios);
+ meson_mmc_check_resampling(host, ios);
+ err = meson_mmc_prepare_ios_clock(host, ios);
if (err)
dev_err(host->dev, "Failed to set clock: %d\n,", err);
- writel(val, host->regs + SD_EMMC_CFG);
dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val);
}
@@ -1081,9 +894,6 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
}
out:
- /* ack all enabled interrupts */
- writel(irq_en, host->regs + SD_EMMC_STATUS);
-
if (cmd->error) {
/* Stop desc in case of errors */
u32 start = readl(host->regs + SD_EMMC_START);
@@ -1095,12 +905,14 @@ out:
if (ret == IRQ_HANDLED)
meson_mmc_request_done(host->mmc, cmd->mrq);
+ /* ack all raised interrupts */
+ writel(status, host->regs + SD_EMMC_STATUS);
+
return ret;
}
static int meson_mmc_wait_desc_stop(struct meson_host *host)
{
- int loop;
u32 status;
/*
@@ -1110,20 +922,10 @@ static int meson_mmc_wait_desc_stop(struct meson_host *host)
* If we don't confirm the descriptor is stopped, it might raise new
* IRQs after we have called mmc_request_done() which is bad.
*/
- for (loop = 50; loop; loop--) {
- status = readl(host->regs + SD_EMMC_STATUS);
- if (status & (STATUS_BUSY | STATUS_DESC_BUSY))
- udelay(100);
- else
- break;
- }
-
- if (status & (STATUS_BUSY | STATUS_DESC_BUSY)) {
- dev_err(host->dev, "Timed out waiting for host to stop\n");
- return -ETIMEDOUT;
- }
- return 0;
+ return readl_poll_timeout(host->regs + SD_EMMC_STATUS, status,
+ !(status & (STATUS_BUSY | STATUS_DESC_BUSY)),
+ 100, 5000);
}
static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
@@ -1227,7 +1029,7 @@ static const struct mmc_host_ops meson_mmc_ops = {
.get_cd = meson_mmc_get_cd,
.pre_req = meson_mmc_pre_req,
.post_req = meson_mmc_post_req,
- .execute_tuning = meson_mmc_execute_tuning,
+ .execute_tuning = meson_mmc_resampling_tuning,
.card_busy = meson_mmc_card_busy,
.start_signal_voltage_switch = meson_mmc_voltage_switch,
};
@@ -1338,7 +1140,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
host->regs + SD_EMMC_IRQ_EN);
ret = request_threaded_irq(host->irq, meson_mmc_irq,
- meson_mmc_irq_thread, IRQF_SHARED,
+ meson_mmc_irq_thread, IRQF_ONESHOT,
dev_name(&pdev->dev), host);
if (ret)
goto err_init_clk;
@@ -1349,6 +1151,13 @@ static int meson_mmc_probe(struct platform_device *pdev)
mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc);
mmc->max_seg_size = mmc->max_req_size;
+ /*
+ * At the moment, we don't know how to reliably enable HS400.
+ * From the different datasheets, it is not even clear if this mode
+ * is officially supported by any of the SoCs
+ */
+ mmc->caps2 &= ~MMC_CAP2_HS400;
+
/* data bounce buffer */
host->bounce_buf_size = mmc->max_req_size;
host->bounce_buf =
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 1b1498805972..19544b121276 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * mmc_spi.c - Access SD/MMC cards through SPI master controllers
+ * Access SD/MMC cards through SPI master controllers
*
* (C) Copyright 2005, Intec Automation,
* Mike Lavender (mike@steroidmicros)
@@ -8,21 +9,6 @@
* Hans-Peter Nilsson (hp@axis.com)
* (C) Copyright 2007, ATRON electronic GmbH,
* Jan Nikitenko <jan.nikitenko@gmail.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/sched.h>
#include <linux/delay.h>
@@ -197,7 +183,7 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
unsigned n, u8 byte)
{
- u8 *cp = host->data->status;
+ u8 *cp = host->data->status;
unsigned long start = jiffies;
while (1) {
@@ -220,7 +206,7 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
* We use jiffies here because we want to have a relation
* between elapsed time and the blocking of the scheduler.
*/
- if (time_is_before_jiffies(start+1))
+ if (time_is_before_jiffies(start + 1))
schedule();
}
return -ETIMEDOUT;
@@ -415,7 +401,7 @@ checkstatus:
default:
dev_dbg(&host->spi->dev, "bad response type %04x\n",
- mmc_spi_resp_type(cmd));
+ mmc_spi_resp_type(cmd));
if (value >= 0)
value = -EINVAL;
goto done;
@@ -467,8 +453,8 @@ mmc_spi_command_send(struct mmc_spi_host *host,
memset(cp, 0xff, sizeof(data->status));
cp[1] = 0x40 | cmd->opcode;
- put_unaligned_be32(cmd->arg, cp+2);
- cp[6] = crc7_be(0, cp+1, 5) | 0x01;
+ put_unaligned_be32(cmd->arg, cp + 2);
+ cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
cp += 7;
/* Then, read up to 13 bytes (while writing all-ones):
@@ -642,9 +628,7 @@ mmc_spi_setup_data_message(
if (multiple || direction == DMA_TO_DEVICE) {
t = &host->early_status;
memset(t, 0, sizeof(*t));
- t->len = (direction == DMA_TO_DEVICE)
- ? sizeof(scratch->status)
- : 1;
+ t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
t->tx_buf = host->ones;
t->tx_dma = host->ones_dma;
t->rx_buf = scratch->status;
@@ -677,8 +661,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
u32 pattern;
if (host->mmc->use_spi_crc)
- scratch->crc_val = cpu_to_be16(
- crc_itu_t(0, t->tx_buf, t->len));
+ scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
if (host->dma_dev)
dma_sync_single_for_device(host->dma_dev,
host->data_dma, sizeof(*scratch),
@@ -819,6 +802,10 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
}
status = spi_sync_locked(spi, &host->m);
+ if (status < 0) {
+ dev_dbg(&spi->dev, "read error %d\n", status);
+ return status;
+ }
if (host->dma_dev) {
dma_sync_single_for_cpu(host->dma_dev,
@@ -855,9 +842,9 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
be16_to_cpus(&scratch->crc_val);
if (scratch->crc_val != crc) {
- dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, "
- "computed=0x%04x len=%d\n",
- scratch->crc_val, crc, t->len);
+ dev_dbg(&spi->dev,
+ "read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
+ scratch->crc_val, crc, t->len);
return -EILSEQ;
}
}
@@ -945,9 +932,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
dev_dbg(&host->spi->dev,
" mmc_spi: %s block, %d bytes\n",
- (direction == DMA_TO_DEVICE)
- ? "write"
- : "read",
+ (direction == DMA_TO_DEVICE) ? "write" : "read",
t->len);
if (direction == DMA_TO_DEVICE)
@@ -974,8 +959,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
if (status < 0) {
data->error = status;
dev_dbg(&spi->dev, "%s status %d\n",
- (direction == DMA_TO_DEVICE)
- ? "write" : "read",
+ (direction == DMA_TO_DEVICE) ? "write" : "read",
status);
break;
}
@@ -1249,8 +1233,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
mres = spi_setup(host->spi);
if (mres < 0)
dev_dbg(&host->spi->dev,
- "switch back to SPI mode 3"
- " failed\n");
+ "switch back to SPI mode 3 failed\n");
}
}
@@ -1470,7 +1453,7 @@ static int mmc_spi_probe(struct spi_device *spi)
return 0;
fail_add_host:
- mmc_remove_host (mmc);
+ mmc_remove_host(mmc);
fail_glue_init:
if (host->dma_dev)
dma_unmap_single(host->dma_dev, host->data_dma,
@@ -1485,7 +1468,6 @@ fail_ones_dma:
fail_nobuf1:
mmc_free_host(mmc);
mmc_spi_put_pdata(spi);
- dev_set_drvdata(&spi->dev, NULL);
nomem:
kfree(ones);
@@ -1496,32 +1478,27 @@ nomem:
static int mmc_spi_remove(struct spi_device *spi)
{
struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
- struct mmc_spi_host *host;
-
- if (mmc) {
- host = mmc_priv(mmc);
+ struct mmc_spi_host *host = mmc_priv(mmc);
- /* prevent new mmc_detect_change() calls */
- if (host->pdata && host->pdata->exit)
- host->pdata->exit(&spi->dev, mmc);
+ /* prevent new mmc_detect_change() calls */
+ if (host->pdata && host->pdata->exit)
+ host->pdata->exit(&spi->dev, mmc);
- mmc_remove_host(mmc);
+ mmc_remove_host(mmc);
- if (host->dma_dev) {
- dma_unmap_single(host->dma_dev, host->ones_dma,
- MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
- dma_unmap_single(host->dma_dev, host->data_dma,
- sizeof(*host->data), DMA_BIDIRECTIONAL);
- }
+ if (host->dma_dev) {
+ dma_unmap_single(host->dma_dev, host->ones_dma,
+ MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
+ dma_unmap_single(host->dma_dev, host->data_dma,
+ sizeof(*host->data), DMA_BIDIRECTIONAL);
+ }
- kfree(host->data);
- kfree(host->ones);
+ kfree(host->data);
+ kfree(host->ones);
- spi->max_speed_hz = mmc->f_max;
- mmc_free_host(mmc);
- mmc_spi_put_pdata(spi);
- dev_set_drvdata(&spi->dev, NULL);
- }
+ spi->max_speed_hz = mmc->f_max;
+ mmc_free_host(mmc);
+ mmc_spi_put_pdata(spi);
return 0;
}
@@ -1542,8 +1519,7 @@ static struct spi_driver mmc_spi_driver = {
module_spi_driver(mmc_spi_driver);
-MODULE_AUTHOR("Mike Lavender, David Brownell, "
- "Hans-Peter Nilsson, Jan Nikitenko");
+MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
MODULE_DESCRIPTION("SPI SD/MMC host driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:mmc_spi");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 387ff14587b8..356833a606d5 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -43,21 +43,11 @@
#include <asm/io.h>
#include "mmci.h"
-#include "mmci_qcom_dml.h"
#define DRIVER_NAME "mmci-pl18x"
-#ifdef CONFIG_DMA_ENGINE
-void mmci_variant_init(struct mmci_host *host);
-#else
-static inline void mmci_variant_init(struct mmci_host *host) {}
-#endif
-
-#ifdef CONFIG_MMC_STM32_SDMMC
-void sdmmc_variant_init(struct mmci_host *host);
-#else
-static inline void sdmmc_variant_init(struct mmci_host *host) {}
-#endif
+static void mmci_variant_init(struct mmci_host *host);
+static void ux500v2_variant_init(struct mmci_host *host);
static unsigned int fmax = 515633;
@@ -70,7 +60,6 @@ static struct variant_data variant_arm = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
.datactrl_blocksz = 11,
- .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.reversed_irq_handling = true,
@@ -90,7 +79,6 @@ static struct variant_data variant_arm_extended_fifo = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
.datactrl_blocksz = 11,
- .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.mmcimask1 = true,
@@ -110,7 +98,6 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
.datactrl_blocksz = 11,
- .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.mmcimask1 = true,
@@ -131,7 +118,6 @@ static struct variant_data variant_u300 = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
.datactrl_blocksz = 11,
- .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -157,7 +143,6 @@ static struct variant_data variant_nomadik = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
- .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -186,7 +171,6 @@ static struct variant_data variant_ux500 = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
- .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -220,11 +204,9 @@ static struct variant_data variant_ux500v2 = {
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
- .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
- .blksz_datactrl16 = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
.signal_direction = true,
@@ -238,7 +220,7 @@ static struct variant_data variant_ux500v2 = {
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
- .init = mmci_variant_init,
+ .init = ux500v2_variant_init,
};
static struct variant_data variant_stm32 = {
@@ -255,7 +237,6 @@ static struct variant_data variant_stm32 = {
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.datalength_bits = 24,
.datactrl_blocksz = 11,
- .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -299,10 +280,8 @@ static struct variant_data variant_qcom = {
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
- .blksz_datactrl4 = true,
.datalength_bits = 24,
.datactrl_blocksz = 11,
- .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 208000000,
.explicit_mclk_control = true,
@@ -624,6 +603,16 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
+static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
+{
+ return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
+}
+
+static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
+{
+ return MCI_DPSM_ENABLE | (host->data->blksz << 16);
+}
+
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
@@ -886,15 +875,11 @@ int mmci_dmae_prep_data(struct mmci_host *host,
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
- struct mmc_data *data = host->data;
host->dma_in_progress = true;
dmaengine_submit(dmae->desc_current);
dma_async_issue_pending(dmae->cur);
- if (host->variant->qcom_dml)
- dml_start_xfer(host, data);
-
*datactrl |= MCI_DPSM_DMAENABLE;
return 0;
@@ -952,6 +937,7 @@ void mmci_dmae_unprep_data(struct mmci_host *host,
static struct mmci_host_ops mmci_variant_ops = {
.prep_data = mmci_dmae_prep_data,
.unprep_data = mmci_dmae_unprep_data,
+ .get_datactrl_cfg = mmci_get_dctrl_cfg,
.get_next_data = mmci_dmae_get_next_data,
.dma_setup = mmci_dmae_setup,
.dma_release = mmci_dmae_release,
@@ -959,12 +945,22 @@ static struct mmci_host_ops mmci_variant_ops = {
.dma_finalize = mmci_dmae_finalize,
.dma_error = mmci_dmae_error,
};
+#else
+static struct mmci_host_ops mmci_variant_ops = {
+ .get_datactrl_cfg = mmci_get_dctrl_cfg,
+};
+#endif
void mmci_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
}
-#endif
+
+void ux500v2_variant_init(struct mmci_host *host)
+{
+ host->ops = &mmci_variant_ops;
+ host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
+}
static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
@@ -1000,7 +996,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
unsigned int datactrl, timeout, irqmask;
unsigned long long clks;
void __iomem *base;
- int blksz_bits;
dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
data->blksz, data->blocks, data->flags);
@@ -1018,18 +1013,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
writel(timeout, base + MMCIDATATIMER);
writel(host->size, base + MMCIDATALENGTH);
- blksz_bits = ffs(data->blksz) - 1;
- BUG_ON(1 << blksz_bits != data->blksz);
-
- if (variant->blksz_datactrl16)
- datactrl = variant->datactrl_dpsm_enable | (data->blksz << 16);
- else if (variant->blksz_datactrl4)
- datactrl = variant->datactrl_dpsm_enable | (data->blksz << 4);
- else
- datactrl = variant->datactrl_dpsm_enable | blksz_bits << 4;
-
- if (data->flags & MMC_DATA_READ)
- datactrl |= MCI_DPSM_DIRECTION;
+ datactrl = host->ops->get_datactrl_cfg(host);
+ datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
u32 clk;
@@ -1220,12 +1205,13 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
void __iomem *base = host->base;
- bool sbc;
+ bool sbc, busy_resp;
if (!cmd)
return;
sbc = (cmd == host->mrq->sbc);
+ busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
/*
* We need to be one of these interrupts to be considered worth
@@ -1239,8 +1225,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
/*
* ST Micro variant: handle busy detection.
*/
- if (host->variant->busy_detect) {
- bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
+ if (busy_resp && host->variant->busy_detect) {
/* We are busy with a command, return */
if (host->busy_status &&
@@ -1253,7 +1238,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
* that the special busy status bit is still set before
* proceeding.
*/
- if (!host->busy_status && busy_resp &&
+ if (!host->busy_status &&
!(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
(readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
@@ -1550,9 +1535,10 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
}
/*
- * Don't poll for busy completion in irq context.
+ * Busy detection has been handled by mmci_cmd_irq() above.
+ * Clear the status bit to prevent polling in IRQ context.
*/
- if (host->variant->busy_detect && host->busy_status)
+ if (host->variant->busy_detect_flag)
status &= ~host->variant->busy_detect_flag;
ret = 1;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 14df81054438..4f071bd34e59 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -131,6 +131,11 @@
/* Control register extensions in the Qualcomm versions */
#define MCI_DPSM_QCOM_DATA_PEND BIT(17)
#define MCI_DPSM_QCOM_RX_DATA_PEND BIT(20)
+/* Control register extensions in STM32 versions */
+#define MCI_DPSM_STM32_MODE_BLOCK (0 << 2)
+#define MCI_DPSM_STM32_MODE_SDIO (1 << 2)
+#define MCI_DPSM_STM32_MODE_STREAM (2 << 2)
+#define MCI_DPSM_STM32_MODE_BLOCK_STOP (3 << 2)
#define MMCIDATACNT 0x030
#define MMCISTATUS 0x034
@@ -275,12 +280,8 @@ struct mmci_host;
* @st_clkdiv: true if using a ST-specific clock divider algorithm
* @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
- * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
- * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
- * register
* @datactrl_mask_sdio: SDIO enable mask in datactrl register
* @datactrl_blksz: block size in power of two
- * @datactrl_dpsm_enable: enable value for DPSM
* @datactrl_first: true if data must be setup before send command
* @datacnt_useless: true if you could not use datacnt register to read
* remaining data
@@ -325,14 +326,11 @@ struct variant_data {
unsigned int datactrl_mask_ddrmode;
unsigned int datactrl_mask_sdio;
unsigned int datactrl_blocksz;
- unsigned int datactrl_dpsm_enable;
u8 datactrl_first:1;
u8 datacnt_useless:1;
u8 st_sdio:1;
u8 st_clkdiv:1;
u8 stm32_clkdiv:1;
- u8 blksz_datactrl16:1;
- u8 blksz_datactrl4:1;
u32 pwrreg_powerup;
u32 f_max;
u8 signal_direction:1;
@@ -362,6 +360,7 @@ struct mmci_host_ops {
bool next);
void (*unprep_data)(struct mmci_host *host, struct mmc_data *data,
int err);
+ u32 (*get_datactrl_cfg)(struct mmci_host *host);
void (*get_next_data)(struct mmci_host *host, struct mmc_data *data);
int (*dma_setup)(struct mmci_host *host);
void (*dma_release)(struct mmci_host *host);
@@ -429,6 +428,12 @@ struct mmci_host {
void mmci_write_clkreg(struct mmci_host *host, u32 clk);
void mmci_write_pwrreg(struct mmci_host *host, u32 pwr);
+static inline u32 mmci_dctrl_blksz(struct mmci_host *host)
+{
+ return (ffs(host->data->blksz) - 1) << 4;
+}
+
+#ifdef CONFIG_DMA_ENGINE
int mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
bool next);
void mmci_dmae_unprep_data(struct mmci_host *host, struct mmc_data *data,
@@ -439,3 +444,16 @@ void mmci_dmae_release(struct mmci_host *host);
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl);
void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data);
void mmci_dmae_error(struct mmci_host *host);
+#endif
+
+#ifdef CONFIG_MMC_QCOM_DML
+void qcom_variant_init(struct mmci_host *host);
+#else
+static inline void qcom_variant_init(struct mmci_host *host) {}
+#endif
+
+#ifdef CONFIG_MMC_STM32_SDMMC
+void sdmmc_variant_init(struct mmci_host *host);
+#else
+static inline void sdmmc_variant_init(struct mmci_host *host) {}
+#endif
diff --git a/drivers/mmc/host/mmci_qcom_dml.c b/drivers/mmc/host/mmci_qcom_dml.c
index 25d0a75533ea..3af396b3e0a0 100644
--- a/drivers/mmc/host/mmci_qcom_dml.c
+++ b/drivers/mmc/host/mmci_qcom_dml.c
@@ -54,10 +54,15 @@
#define DML_OFFSET 0x800
-void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
+static int qcom_dma_start(struct mmci_host *host, unsigned int *datactrl)
{
u32 config;
void __iomem *base = host->base + DML_OFFSET;
+ struct mmc_data *data = host->data;
+ int ret = mmci_dmae_start(host, datactrl);
+
+ if (ret)
+ return ret;
if (data->flags & MMC_DATA_READ) {
/* Read operation: configure DML for producer operation */
@@ -96,6 +101,7 @@ void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
/* make sure the dml is configured before dma is triggered */
wmb();
+ return 0;
}
static int of_get_dml_pipe_index(struct device_node *np, const char *name)
@@ -133,7 +139,6 @@ static int qcom_dma_setup(struct mmci_host *host)
producer_id = of_get_dml_pipe_index(np, "rx");
if (producer_id < 0 || consumer_id < 0) {
- host->variant->qcom_dml = false;
mmci_dmae_release(host);
return -EINVAL;
}
@@ -183,13 +188,19 @@ static int qcom_dma_setup(struct mmci_host *host)
return 0;
}
+static u32 qcom_get_dctrl_cfg(struct mmci_host *host)
+{
+ return MCI_DPSM_ENABLE | (host->data->blksz << 4);
+}
+
static struct mmci_host_ops qcom_variant_ops = {
.prep_data = mmci_dmae_prep_data,
.unprep_data = mmci_dmae_unprep_data,
+ .get_datactrl_cfg = qcom_get_dctrl_cfg,
.get_next_data = mmci_dmae_get_next_data,
.dma_setup = qcom_dma_setup,
.dma_release = mmci_dmae_release,
- .dma_start = mmci_dmae_start,
+ .dma_start = qcom_dma_start,
.dma_finalize = mmci_dmae_finalize,
.dma_error = mmci_dmae_error,
};
diff --git a/drivers/mmc/host/mmci_qcom_dml.h b/drivers/mmc/host/mmci_qcom_dml.h
deleted file mode 100644
index fa16f6f4d4ad..000000000000
--- a/drivers/mmc/host/mmci_qcom_dml.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- *
- * Copyright (c) 2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef __MMC_QCOM_DML_H__
-#define __MMC_QCOM_DML_H__
-
-#ifdef CONFIG_MMC_QCOM_DML
-void qcom_variant_init(struct mmci_host *host);
-void dml_start_xfer(struct mmci_host *host, struct mmc_data *data);
-#else
-static inline void qcom_variant_init(struct mmci_host *host)
-{
-}
-static inline void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
-{
-}
-#endif /* CONFIG_MMC_QCOM_DML */
-
-#endif /* __MMC_QCOM_DML_H__ */
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index cfbfc6f1048f..8e83ae6920ae 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -265,10 +265,28 @@ static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
}
}
+static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
+{
+ u32 datactrl;
+
+ datactrl = mmci_dctrl_blksz(host);
+
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card) &&
+ host->data->blocks == 1)
+ datactrl |= MCI_DPSM_STM32_MODE_SDIO;
+ else if (host->data->stop && !host->mrq->sbc)
+ datactrl |= MCI_DPSM_STM32_MODE_BLOCK_STOP;
+ else
+ datactrl |= MCI_DPSM_STM32_MODE_BLOCK;
+
+ return datactrl;
+}
+
static struct mmci_host_ops sdmmc_variant_ops = {
.validate_data = sdmmc_idma_validate_data,
.prep_data = sdmmc_idma_prep_data,
.unprep_data = sdmmc_idma_unprep_data,
+ .get_datactrl_cfg = sdmmc_get_dctrl_cfg,
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 833ef0590af8..c518cc208a1f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -300,6 +300,8 @@
#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */
#define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */
+#define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */
+
#define PAD_DELAY_MAX 32 /* PAD delay cells */
/*--------------------------------------------------------------------------*/
/* Descriptor Structure */
@@ -372,6 +374,7 @@ struct mtk_mmc_compatible {
bool stop_clk_fix;
bool enhance_rx;
bool support_64g;
+ bool use_internal_cd;
};
struct msdc_tune_para {
@@ -430,6 +433,7 @@ struct msdc_host {
bool hs400_cmd_resp_sel_rising;
/* cmd response sample selection for HS400 */
bool hs400_mode; /* current eMMC will run at hs400 mode */
+ bool internal_cd; /* Use internal card-detect logic */
struct msdc_save_para save_para; /* used when gate HCLK */
struct msdc_tune_para def_tune_para; /* default tune setting */
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
@@ -507,6 +511,28 @@ static const struct mtk_mmc_compatible mt7622_compat = {
.support_64g = false,
};
+static const struct mtk_mmc_compatible mt8516_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+};
+
+static const struct mtk_mmc_compatible mt7620_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+ .use_internal_cd = true,
+};
+
static const struct of_device_id msdc_of_ids[] = {
{ .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
{ .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
@@ -514,6 +540,8 @@ static const struct of_device_id msdc_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
{ .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
+ { .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
+ { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
{}
};
MODULE_DEVICE_TABLE(of, msdc_of_ids);
@@ -1407,6 +1435,12 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
sdio_signal_irq(host->mmc);
}
+ if ((events & event_mask) & MSDC_INT_CDSC) {
+ if (host->internal_cd)
+ mmc_detect_change(host->mmc, msecs_to_jiffies(20));
+ events &= ~MSDC_INT_CDSC;
+ }
+
if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
break;
@@ -1440,14 +1474,24 @@ static void msdc_init_hw(struct msdc_host *host)
/* Reset */
msdc_reset_hw(host);
- /* Disable card detection */
- sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
-
/* Disable and clear all interrupts */
writel(0, host->base + MSDC_INTEN);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
+ /* Configure card detection */
+ if (host->internal_cd) {
+ sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE,
+ DEFAULT_DEBOUNCE);
+ sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+ sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
+ sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
+ } else {
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
+ sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+ sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
+ }
+
if (host->top_base) {
writel(0, host->top_base + EMMC_TOP_CONTROL);
writel(0, host->top_base + EMMC_TOP_CMD);
@@ -1557,6 +1601,13 @@ static void msdc_init_hw(struct msdc_host *host)
static void msdc_deinit_hw(struct msdc_host *host)
{
u32 val;
+
+ if (host->internal_cd) {
+ /* Disabled card-detect */
+ sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
+ }
+
/* Disable and clear all interrupts */
writel(0, host->base + MSDC_INTEN);
@@ -2055,13 +2106,31 @@ static void msdc_ack_sdio_irq(struct mmc_host *mmc)
__msdc_enable_sdio_irq(mmc, 1);
}
+static int msdc_get_cd(struct mmc_host *mmc)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ int val;
+
+ if (mmc->caps & MMC_CAP_NONREMOVABLE)
+ return 1;
+
+ if (!host->internal_cd)
+ return mmc_gpio_get_cd(mmc);
+
+ val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS;
+ if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+ return !!val;
+ else
+ return !val;
+}
+
static const struct mmc_host_ops mt_msdc_ops = {
.post_req = msdc_post_req,
.pre_req = msdc_pre_req,
.request = msdc_ops_request,
.set_ios = msdc_ops_set_ios,
.get_ro = mmc_gpio_get_ro,
- .get_cd = mmc_gpio_get_cd,
+ .get_cd = msdc_get_cd,
.enable_sdio_irq = msdc_enable_sdio_irq,
.ack_sdio_irq = msdc_ack_sdio_irq,
.start_signal_voltage_switch = msdc_ops_switch_volt,
@@ -2123,9 +2192,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- host->top_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(host->top_base))
- host->top_base = NULL;
+ if (res) {
+ host->top_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->top_base))
+ host->top_base = NULL;
+ }
ret = mmc_regulator_get_supply(mmc);
if (ret)
@@ -2191,6 +2262,16 @@ static int msdc_drv_probe(struct platform_device *pdev)
else
mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
+ if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ !mmc_can_gpio_cd(mmc) &&
+ host->dev_comp->use_internal_cd) {
+ /*
+ * Is removable but no GPIO declared, so
+ * use internal functionality.
+ */
+ host->internal_cd = true;
+ }
+
if (mmc->caps & MMC_CAP_SDIO_IRQ)
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
@@ -2227,7 +2308,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
msdc_init_hw(host);
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT, pdev->name, host);
+ IRQF_TRIGGER_NONE, pdev->name, host);
if (ret)
goto release;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index e22bbff89c8d..9cb93e15b197 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -24,7 +24,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/unaligned.h>
#include "mvsdio.h"
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 4f06fb03c0a2..c021d433b04f 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -648,7 +648,8 @@ static int mxs_mmc_probe(struct platform_device *pdev)
/* set mmc core parameters */
mmc->ops = &mxs_mmc_ops;
mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
+ MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23 |
+ MMC_CAP_ERASE;
host->broken_cd = of_property_read_bool(np, "broken-cd");
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 8a274b91804e..3c4d950a4755 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenFirmware bindings for the MMC-over-SPI driver
*
* Copyright (c) MontaVista Software, Inc. 2008.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 29a1ddaa7466..952fa4063ff8 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2077,7 +2077,7 @@ static int omap_hsmmc_runtime_suspend(struct device *dev)
unsigned long flags;
int ret = 0;
- host = platform_get_drvdata(to_platform_device(dev));
+ host = dev_get_drvdata(dev);
omap_hsmmc_context_save(host);
dev_dbg(dev, "disabled\n");
@@ -2118,7 +2118,7 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
struct omap_hsmmc_host *host;
unsigned long flags;
- host = platform_get_drvdata(to_platform_device(dev));
+ host = dev_get_drvdata(dev);
omap_hsmmc_context_restore(host);
dev_dbg(dev, "enabled\n");
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c1d3f0e38921..e7d80c83da2c 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -35,7 +35,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/hardware.h>
#include <linux/platform_data/mmc-pxamci.h>
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 8394a7bb1fc1..c0504aa90857 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -3,7 +3,7 @@
* Renesas Mobile SDHI
*
* Copyright (C) 2017 Horms Solutions Ltd., Simon Horman
- * Copyright (C) 2017 Renesas Electronics Corporation
+ * Copyright (C) 2017-19 Renesas Electronics Corporation
*/
#ifndef RENESAS_SDHI_H
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 8742e27e4e8b..5e9e36ed2107 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -2,8 +2,8 @@
/*
* Renesas SDHI
*
- * Copyright (C) 2015-17 Renesas Electronics Corporation
- * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-19 Renesas Electronics Corporation
+ * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
* Copyright (C) 2009 Magnus Damm
*
@@ -779,14 +779,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
mmc_data->max_blk_count = U16_MAX;
- ret = tmio_mmc_host_probe(host);
- if (ret < 0)
- goto edisclk;
-
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
if (ver == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
+ ret = tmio_mmc_host_probe(host);
+ if (ret < 0)
+ goto edisclk;
+
/* Enable tuning iff we have an SCC and a supported mode */
if (of_data && of_data->scc_offset &&
(host->mmc->caps & MMC_CAP_UHS_SDR104 ||
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 9dfafa2a90a3..751fe91c7571 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -2,8 +2,9 @@
/*
* DMA support for Internal DMAC with SDHI SD/SDIO controller
*
- * Copyright (C) 2016-17 Renesas Electronics Corporation
+ * Copyright (C) 2016-19 Renesas Electronics Corporation
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
+ * Copyright (C) 2018-19 Sang Engineering, Wolfram Sang
*/
#include <linux/bitops.h>
@@ -95,8 +96,8 @@ static const struct renesas_sdhi_of_data of_rza2_compatible = {
.scc_offset = 0 - 0x1000,
.taps = rcar_gen3_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
- /* DMAC can handle 0xffffffff blk count but only 1 segment */
- .max_blk_count = 0xffffffff,
+ /* DMAC can handle 32bit blk count but only 1 segment */
+ .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
.max_segs = 1,
};
@@ -110,8 +111,8 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
.scc_offset = 0x1000,
.taps = rcar_gen3_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
- /* DMAC can handle 0xffffffff blk count but only 1 segment */
- .max_blk_count = 0xffffffff,
+ /* DMAC can handle 32bit blk count but only 1 segment */
+ .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
.max_segs = 1,
};
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 02cd878e209f..1d29b822efb8 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -2,8 +2,8 @@
/*
* DMA support use of SYS DMAC with SDHI SD/SDIO controller
*
- * Copyright (C) 2016-17 Renesas Electronics Corporation
- * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2016-19 Renesas Electronics Corporation
+ * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
* Copyright (C) 2017 Horms Solutions, Simon Horman
* Copyright (C) 2010-2011 Guennadi Liakhovetski
*/
@@ -65,7 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.scc_offset = 0x0300,
.taps = rcar_gen2_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
- .max_blk_count = 0xffffffff,
+ .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
};
/* Definitions for sampling clocks */
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 8dbbc1f62b70..c391510e9ef4 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/pm_qos.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
@@ -73,6 +74,7 @@
#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0)
#define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1)
#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3
+#define ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT (4 << 20)
#define ESDHC_STROBE_DLL_STATUS 0x74
#define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1)
@@ -156,6 +158,8 @@
#define ESDHC_FLAG_HS400_ES BIT(11)
/* The IP has Host Controller Interface for Command Queuing */
#define ESDHC_FLAG_CQHCI BIT(12)
+/* need request pmqos during low power */
+#define ESDHC_FLAG_PMQOS BIT(13)
struct esdhc_soc_data {
u32 flags;
@@ -204,6 +208,12 @@ static const struct esdhc_soc_data usdhc_imx7d_data = {
| ESDHC_FLAG_HS400,
};
+static struct esdhc_soc_data usdhc_imx7ulp_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400,
+};
+
static struct esdhc_soc_data usdhc_imx8qxp_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
@@ -229,6 +239,7 @@ struct pltfm_imx_data {
WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
} multiblock_status;
u32 is_ddr;
+ struct pm_qos_request pm_qos_req;
};
static const struct platform_device_id imx_esdhc_devtype[] = {
@@ -257,6 +268,7 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
{ .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, },
{ .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
+ { .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
{ /* sentinel */ }
};
@@ -983,15 +995,19 @@ static void esdhc_set_strobe_dll(struct sdhci_host *host)
/* force a reset on strobe dll */
writel(ESDHC_STROBE_DLL_CTRL_RESET,
host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+ /* clear the reset bit on strobe dll before any setting */
+ writel(0, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+
/*
* enable strobe dll ctrl and adjust the delay target
* for the uSDHC loopback read clock
*/
v = ESDHC_STROBE_DLL_CTRL_ENABLE |
+ ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT |
(7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
- /* wait 1us to make sure strobe dll status register stable */
- udelay(1);
+ /* wait 5us to make sure strobe dll status register stable */
+ udelay(5);
v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
dev_warn(mmc_dev(host->mmc),
@@ -1436,6 +1452,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *)
pdev->id_entry->driver_data;
+ if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
+ pm_qos_add_request(&imx_data->pm_qos_req,
+ PM_QOS_CPU_DMA_LATENCY, 0);
+
imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx_data->clk_ipg)) {
err = PTR_ERR(imx_data->clk_ipg);
@@ -1557,6 +1577,8 @@ disable_ipg_clk:
disable_per_clk:
clk_disable_unprepare(imx_data->clk_per);
free_sdhci:
+ if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
+ pm_qos_remove_request(&imx_data->pm_qos_req);
sdhci_pltfm_free(pdev);
return err;
}
@@ -1578,6 +1600,9 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
clk_disable_unprepare(imx_data->clk_ipg);
clk_disable_unprepare(imx_data->clk_ahb);
+ if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
+ pm_qos_remove_request(&imx_data->pm_qos_req);
+
sdhci_pltfm_free(pdev);
return 0;
@@ -1649,6 +1674,9 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
}
clk_disable_unprepare(imx_data->clk_ahb);
+ if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
+ pm_qos_remove_request(&imx_data->pm_qos_req);
+
return ret;
}
@@ -1659,9 +1687,13 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int err;
+ if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
+ pm_qos_add_request(&imx_data->pm_qos_req,
+ PM_QOS_CPU_DMA_LATENCY, 0);
+
err = clk_prepare_enable(imx_data->clk_ahb);
if (err)
- return err;
+ goto remove_pm_qos_request;
if (!sdhci_sdio_irq_enabled(host)) {
err = clk_prepare_enable(imx_data->clk_per);
@@ -1690,6 +1722,9 @@ disable_per_clk:
clk_disable_unprepare(imx_data->clk_per);
disable_ahb_clk:
clk_disable_unprepare(imx_data->clk_ahb);
+remove_pm_qos_request:
+ if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
+ pm_qos_remove_request(&imx_data->pm_qos_req);
return err;
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index c9e3e050ccc8..88dc3f00a5be 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -832,7 +832,10 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_arasan_voltage_switch;
sdhci_arasan->has_cqe = true;
- host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ host->mmc->caps2 |= MMC_CAP2_CQE;
+
+ if (!of_property_read_bool(np, "disable-cqe-dcmd"))
+ host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
}
ret = sdhci_arasan_add_host(sdhci_arasan);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 4e669b4edfc1..e20c00f14109 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -24,6 +24,7 @@
#include <linux/ktime.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -81,6 +82,7 @@ struct sdhci_esdhc {
bool quirk_limited_clk_division;
bool quirk_unreliable_pulse_detection;
bool quirk_fixup_tuning;
+ bool quirk_ignore_data_inhibit;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
u32 div_ratio;
@@ -147,6 +149,19 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
+ /*
+ * Some controllers have unreliable Data Line Active
+ * bit for commands with busy signal. This affects
+ * Command Inhibit (data) bit. Just ignore it since
+ * MMC core driver has already polled card status
+ * with CMD13 after any command with busy siganl.
+ */
+ if ((spec_reg == SDHCI_PRESENT_STATE) &&
+ (esdhc->quirk_ignore_data_inhibit == true)) {
+ ret = value & ~SDHCI_DATA_INHIBIT;
+ return ret;
+ }
+
ret = value;
return ret;
}
@@ -694,6 +709,9 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
+ mdelay(5);
+
if (mask & SDHCI_RESET_ALL) {
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_TB_EN;
@@ -864,6 +882,25 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host,
sdhci_set_uhs_signaling(host, timing);
}
+static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
+{
+ u32 command;
+
+ if (of_find_compatible_node(NULL, NULL,
+ "fsl,p2020-esdhc")) {
+ command = SDHCI_GET_CMD(sdhci_readw(host,
+ SDHCI_COMMAND));
+ if (command == MMC_WRITE_MULTIPLE_BLOCK &&
+ sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
+ intmask & SDHCI_INT_DATA_END) {
+ intmask &= ~SDHCI_INT_DATA_END;
+ sdhci_writel(host, SDHCI_INT_DATA_END,
+ SDHCI_INT_STATUS);
+ }
+ }
+ return intmask;
+}
+
#ifdef CONFIG_PM_SLEEP
static u32 esdhc_proctl;
static int esdhc_of_suspend(struct device *dev)
@@ -911,6 +948,7 @@ static const struct sdhci_ops sdhci_esdhc_be_ops = {
.set_bus_width = esdhc_pltfm_set_bus_width,
.reset = esdhc_reset,
.set_uhs_signaling = esdhc_set_uhs_signaling,
+ .irq = esdhc_irq,
};
static const struct sdhci_ops sdhci_esdhc_le_ops = {
@@ -928,6 +966,7 @@ static const struct sdhci_ops sdhci_esdhc_le_ops = {
.set_bus_width = esdhc_pltfm_set_bus_width,
.reset = esdhc_reset,
.set_uhs_signaling = esdhc_set_uhs_signaling,
+ .irq = esdhc_irq,
};
static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
@@ -955,6 +994,7 @@ static struct soc_device_attribute soc_incorrect_hostver[] = {
static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
+ { .family = "QorIQ LX2160A", .revision = "2.0", },
{ },
};
@@ -1074,6 +1114,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
if (esdhc->vendor_ver > VENDOR_V_22)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+ if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+ host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+ host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ }
+
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
@@ -1084,12 +1129,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ esdhc->quirk_ignore_data_inhibit = false;
if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
/*
* Freescale messed up with P2020 as it has a non-standard
* host control register
*/
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
+ esdhc->quirk_ignore_data_inhibit = true;
}
/* call to generic mmc_of_parse to support additional capabilities */
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 5bbed477c9b1..bdb80c503fde 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -785,7 +785,7 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host,
sdhci_omap_start_clock(omap_host);
}
-void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
+static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
@@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
sdhci_reset(host, mask);
}
+#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
+ SDHCI_INT_TIMEOUT)
+#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
+
+static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (omap_host->is_tuning && host->cmd && !host->data_early &&
+ (intmask & CMD_ERR_MASK)) {
+
+ /*
+ * Since we are not resetting data lines during tuning
+ * operation, data error or data complete interrupts
+ * might still arrive. Mark this request as a failure
+ * but still wait for the data interrupt
+ */
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ host->cmd->error = -EILSEQ;
+
+ host->cmd = NULL;
+
+ /*
+ * Sometimes command error interrupts and command complete
+ * interrupt will arrive together. Clear all command related
+ * interrupts here.
+ */
+ sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
+ intmask &= ~CMD_MASK;
+ }
+
+ return intmask;
+}
+
static struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
@@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = {
.platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
.reset = sdhci_omap_reset,
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
+ .irq = sdhci_omap_irq,
};
static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 99b0fec2836b..ab9e2b901094 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -31,6 +31,10 @@
#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
+#ifdef CONFIG_X86
+#include <asm/iosf_mbi.h>
+#endif
+
#include "cqhci.h"
#include "sdhci.h"
@@ -451,6 +455,50 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
.probe_slot = pch_hc_probe_slot,
};
+#ifdef CONFIG_X86
+
+#define BYT_IOSF_SCCEP 0x63
+#define BYT_IOSF_OCP_NETCTRL0 0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
+
+static void byt_ocp_setting(struct pci_dev *pdev)
+{
+ u32 val = 0;
+
+ if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
+ pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
+ pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
+ pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
+ return;
+
+ if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
+ &val)) {
+ dev_err(&pdev->dev, "%s read error\n", __func__);
+ return;
+ }
+
+ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+ return;
+
+ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+ if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
+ val)) {
+ dev_err(&pdev->dev, "%s write error\n", __func__);
+ return;
+ }
+
+ dev_dbg(&pdev->dev, "%s completed\n", __func__);
+}
+
+#else
+
+static inline void byt_ocp_setting(struct pci_dev *pdev)
+{
+}
+
+#endif
+
enum {
INTEL_DSM_FNS = 0,
INTEL_DSM_V18_SWITCH = 3,
@@ -715,6 +763,8 @@ static void byt_probe_slot(struct sdhci_pci_slot *slot)
byt_read_dsm(slot);
+ byt_ocp_setting(slot->chip->pdev);
+
ops->execute_tuning = intel_execute_tuning;
ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
@@ -938,7 +988,35 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+
+static int byt_resume(struct sdhci_pci_chip *chip)
+{
+ byt_ocp_setting(chip->pdev);
+
+ return sdhci_pci_resume_host(chip);
+}
+
+#endif
+
+#ifdef CONFIG_PM
+
+static int byt_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ byt_ocp_setting(chip->pdev);
+
+ return sdhci_pci_runtime_resume_host(chip);
+}
+
+#endif
+
static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+#ifdef CONFIG_PM_SLEEP
+ .resume = byt_resume,
+#endif
+#ifdef CONFIG_PM
+ .runtime_resume = byt_runtime_resume,
+#endif
.allow_runtime_pm = true,
.probe_slot = byt_emmc_probe_slot,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
@@ -972,6 +1050,12 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
};
static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
+#ifdef CONFIG_PM_SLEEP
+ .resume = byt_resume,
+#endif
+#ifdef CONFIG_PM
+ .runtime_resume = byt_runtime_resume,
+#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
@@ -983,6 +1067,12 @@ static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
+#ifdef CONFIG_PM_SLEEP
+ .resume = byt_resume,
+#endif
+#ifdef CONFIG_PM
+ .runtime_resume = byt_runtime_resume,
+#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
@@ -994,6 +1084,12 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+#ifdef CONFIG_PM_SLEEP
+ .resume = byt_resume,
+#endif
+#ifdef CONFIG_PM
+ .runtime_resume = byt_runtime_resume,
+#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
@@ -1576,6 +1672,8 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
+ SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 4ddb69a15cd7..e5dc6e44c7a4 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -50,6 +50,8 @@
#define PCI_DEVICE_ID_INTEL_CNPH_SD 0xa375
#define PCI_DEVICE_ID_INTEL_ICP_EMMC 0x34c4
#define PCI_DEVICE_ID_INTEL_ICP_SD 0x34f8
+#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
+#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 32e62904c0d3..f608417ae967 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -66,6 +66,22 @@
#define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
+#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000
+#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18
+#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0
+#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6
+#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000
+#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13
+#define TRIES_128 2
+#define TRIES_256 4
+#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7
+
+#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4
+#define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8
+#define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC
+#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF
+#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8
+#define TUNING_WORD_BIT_SIZE 32
#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
#define SDHCI_AUTO_CAL_START BIT(31)
@@ -90,6 +106,7 @@
#define NVQUIRK_HAS_PADCALIB BIT(6)
#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
+#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
@@ -97,6 +114,8 @@
struct sdhci_tegra_soc_data {
const struct sdhci_pltfm_data *pdata;
u32 nvquirks;
+ u8 min_tap_delay;
+ u8 max_tap_delay;
};
/* Magic pull up and pull down pad calibration offsets */
@@ -136,6 +155,8 @@ struct sdhci_tegra {
u32 default_trim;
u32 dqs_trim;
bool enable_hwcq;
+ unsigned long curr_clk_rate;
+ u8 tuned_tap_delay;
};
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
@@ -241,6 +262,7 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
if (is_tuning_cmd) {
udelay(1);
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
tegra_sdhci_configure_card_clk(host, clk_enabled);
}
}
@@ -722,6 +744,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
*/
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
clk_set_rate(pltfm_host->clk, host_clk);
+ tegra_host->curr_clk_rate = host_clk;
if (tegra_host->ddr_signaling)
host->max_clk = host_clk;
else
@@ -770,6 +793,159 @@ static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
"HS400 delay line calibration timed out\n");
}
+static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
+ u8 thd_low, u8 fixed_tap)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ u32 val, tun_status;
+ u8 word, bit, edge1, tap, window;
+ bool tap_result;
+ bool start_fail = false;
+ bool start_pass = false;
+ bool end_pass = false;
+ bool first_fail = false;
+ bool first_pass = false;
+ u8 start_pass_tap = 0;
+ u8 end_pass_tap = 0;
+ u8 first_fail_tap = 0;
+ u8 first_pass_tap = 0;
+ u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
+
+ /*
+ * Read auto-tuned results and extract good valid passing window by
+ * filtering out un-wanted bubble/partial/merged windows.
+ */
+ for (word = 0; word < total_tuning_words; word++) {
+ val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
+ val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
+ val |= word;
+ sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
+ tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
+ bit = 0;
+ while (bit < TUNING_WORD_BIT_SIZE) {
+ tap = word * TUNING_WORD_BIT_SIZE + bit;
+ tap_result = tun_status & (1 << bit);
+ if (!tap_result && !start_fail) {
+ start_fail = true;
+ if (!first_fail) {
+ first_fail_tap = tap;
+ first_fail = true;
+ }
+
+ } else if (tap_result && start_fail && !start_pass) {
+ start_pass_tap = tap;
+ start_pass = true;
+ if (!first_pass) {
+ first_pass_tap = tap;
+ first_pass = true;
+ }
+
+ } else if (!tap_result && start_fail && start_pass &&
+ !end_pass) {
+ end_pass_tap = tap - 1;
+ end_pass = true;
+ } else if (tap_result && start_pass && start_fail &&
+ end_pass) {
+ window = end_pass_tap - start_pass_tap;
+ /* discard merged window and bubble window */
+ if (window >= thd_up || window < thd_low) {
+ start_pass_tap = tap;
+ end_pass = false;
+ } else {
+ /* set tap at middle of valid window */
+ tap = start_pass_tap + window / 2;
+ tegra_host->tuned_tap_delay = tap;
+ return;
+ }
+ }
+
+ bit++;
+ }
+ }
+
+ if (!first_fail) {
+ WARN_ON("no edge detected, continue with hw tuned delay.\n");
+ } else if (first_pass) {
+ /* set tap location at fixed tap relative to the first edge */
+ edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
+ if (edge1 - 1 > fixed_tap)
+ tegra_host->tuned_tap_delay = edge1 - fixed_tap;
+ else
+ tegra_host->tuned_tap_delay = edge1 + fixed_tap;
+ }
+}
+
+static void tegra_sdhci_post_tuning(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+ u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
+ u8 fixed_tap, start_tap, end_tap, window_width;
+ u8 thdupper, thdlower;
+ u8 num_iter;
+ u32 clk_rate_mhz, period_ps, bestcase, worstcase;
+
+ /* retain HW tuned tap to use incase if no correction is needed */
+ val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+ tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
+ SDHCI_CLOCK_CTRL_TAP_SHIFT;
+ if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
+ min_tap_dly = soc_data->min_tap_delay;
+ max_tap_dly = soc_data->max_tap_delay;
+ clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
+ period_ps = USEC_PER_SEC / clk_rate_mhz;
+ bestcase = period_ps / min_tap_dly;
+ worstcase = period_ps / max_tap_dly;
+ /*
+ * Upper and Lower bound thresholds used to detect merged and
+ * bubble windows
+ */
+ thdupper = (2 * worstcase + bestcase) / 2;
+ thdlower = worstcase / 4;
+ /*
+ * fixed tap is used when HW tuning result contains single edge
+ * and tap is set at fixed tap delay relative to the first edge
+ */
+ avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
+ fixed_tap = avg_tap_dly / 2;
+
+ val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
+ start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
+ end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
+ SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
+ window_width = end_tap - start_tap;
+ num_iter = host->tuning_loop_count;
+ /*
+ * partial window includes edges of the tuning range.
+ * merged window includes more taps so window width is higher
+ * than upper threshold.
+ */
+ if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
+ (end_tap == num_iter - 2) || window_width >= thdupper) {
+ pr_debug("%s: Apply tuning correction\n",
+ mmc_hostname(host->mmc));
+ tegra_sdhci_tap_correction(host, thdupper, thdlower,
+ fixed_tap);
+ }
+ }
+
+ tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
+}
+
+static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err;
+
+ err = sdhci_execute_tuning(mmc, opcode);
+ if (!err && !host->tuning_err)
+ tegra_sdhci_post_tuning(host);
+
+ return err;
+}
+
static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
unsigned timing)
{
@@ -778,16 +954,22 @@ static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
bool set_default_tap = false;
bool set_dqs_trim = false;
bool do_hs400_dll_cal = false;
+ u8 iter = TRIES_256;
+ u32 val;
+ tegra_host->ddr_signaling = false;
switch (timing) {
case MMC_TIMING_UHS_SDR50:
+ break;
case MMC_TIMING_UHS_SDR104:
case MMC_TIMING_MMC_HS200:
/* Don't set default tap on tunable modes. */
+ iter = TRIES_128;
break;
case MMC_TIMING_MMC_HS400:
set_dqs_trim = true;
do_hs400_dll_cal = true;
+ iter = TRIES_128;
break;
case MMC_TIMING_MMC_DDR52:
case MMC_TIMING_UHS_DDR50:
@@ -799,11 +981,25 @@ static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
break;
}
+ val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
+ val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
+ SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
+ SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
+ val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
+ 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
+ 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
+ sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
+ sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
+
+ host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
+
sdhci_set_uhs_signaling(host, timing);
tegra_sdhci_pad_autocalib(host);
- if (set_default_tap)
+ if (tegra_host->tuned_tap_delay && !set_default_tap)
+ tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
+ else
tegra_sdhci_set_tap(host, tegra_host->default_tap);
if (set_dqs_trim)
@@ -928,23 +1124,86 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
tegra_host->pad_calib_required = true;
}
+static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ u8 ctrl;
+ ktime_t timeout;
+ bool timed_out;
+
+ /*
+ * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
+ * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
+ * to be re-configured.
+ * Tegra CQHCI/SDHCI prevents write access to block size register when
+ * CQE is unhalted. So handling CQE resume sequence here to configure
+ * SDHCI block registers prior to exiting CQE halt state.
+ */
+ if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
+ cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
+ sdhci_cqe_enable(mmc);
+ writel(val, cq_host->mmio + reg);
+ timeout = ktime_add_us(ktime_get(), 50);
+ while (1) {
+ timed_out = ktime_compare(ktime_get(), timeout) > 0;
+ ctrl = cqhci_readl(cq_host, CQHCI_CTL);
+ if (!(ctrl & CQHCI_HALT) || timed_out)
+ break;
+ }
+ /*
+ * CQE usually resumes very quick, but incase if Tegra CQE
+ * doesn't resume retry unhalt.
+ */
+ if (timed_out)
+ writel(val, cq_host->mmio + reg);
+ } else {
+ writel(val, cq_host->mmio + reg);
+ }
+}
+
+static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
+ struct mmc_request *mrq, u64 *data)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+ if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
+ mrq->cmd->flags & MMC_RSP_R1B)
+ *data |= CQHCI_CMD_TIMING(1);
+}
+
static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
{
struct cqhci_host *cq_host = mmc->cqe_private;
- u32 cqcfg = 0;
+ u32 val;
/*
- * Tegra SDMMC Controller design prevents write access to BLOCK_COUNT
- * registers when CQE is enabled.
+ * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
+ * register when CQE is enabled and unhalted.
+ * CQHCI driver enables CQE prior to activation, so disable CQE before
+ * programming block size in sdhci controller and enable it back.
*/
- cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
- if (cqcfg & CQHCI_ENABLE)
- cqhci_writel(cq_host, (cqcfg & ~CQHCI_ENABLE), CQHCI_CFG);
-
- sdhci_cqe_enable(mmc);
+ if (!cq_host->activated) {
+ val = cqhci_readl(cq_host, CQHCI_CFG);
+ if (val & CQHCI_ENABLE)
+ cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
+ CQHCI_CFG);
+ sdhci_cqe_enable(mmc);
+ if (val & CQHCI_ENABLE)
+ cqhci_writel(cq_host, val, CQHCI_CFG);
+ }
- if (cqcfg & CQHCI_ENABLE)
- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ /*
+ * CMD CRC errors are seen sometimes with some eMMC devices when status
+ * command is sent during transfer of last data block which is the
+ * default case as send status command block counter (CBC) is 1.
+ * Recommended fix to set CBC to 0 allowing send status command only
+ * when data lines are idle.
+ */
+ val = cqhci_readl(cq_host, CQHCI_SSC1);
+ val &= ~CQHCI_SSC1_CBC_MASK;
+ cqhci_writel(cq_host, val, CQHCI_SSC1);
}
static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
@@ -966,9 +1225,11 @@ static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
}
static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
+ .write_l = tegra_cqhci_writel,
.enable = sdhci_tegra_cqe_enable,
.disable = sdhci_cqe_disable,
.dumpregs = sdhci_tegra_dumpregs,
+ .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
};
static const struct sdhci_ops tegra_sdhci_ops = {
@@ -1109,6 +1370,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_SDR104,
+ .min_tap_delay = 106,
+ .max_tap_delay = 185,
};
static const struct sdhci_ops tegra186_sdhci_ops = {
@@ -1148,10 +1411,25 @@ static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
NVQUIRK_HAS_PADCALIB |
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
+ .min_tap_delay = 84,
+ .max_tap_delay = 136,
+};
+
+static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
+ .pdata = &sdhci_tegra186_pdata,
+ .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
+ NVQUIRK_HAS_PADCALIB |
+ NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
+ NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_SDR104,
+ .min_tap_delay = 96,
+ .max_tap_delay = 139,
};
static const struct of_device_id sdhci_tegra_dt_match[] = {
+ { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
@@ -1250,6 +1528,10 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
host->mmc_host_ops.hs400_enhanced_strobe =
tegra_sdhci_hs400_enhanced_strobe;
+ if (!host->ops->platform_execute_tuning)
+ host->mmc_host_ops.execute_tuning =
+ tegra_sdhci_execute_hw_tuning;
+
rc = mmc_of_parse(host->mmc);
if (rc)
goto err_parse_dt;
@@ -1329,11 +1611,67 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int ret;
+
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
+ ret = sdhci_suspend_host(host);
+ if (ret) {
+ cqhci_resume(host->mmc);
+ return ret;
+ }
+
+ clk_disable_unprepare(pltfm_host->clk);
+ return 0;
+}
+
+static int __maybe_unused sdhci_tegra_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int ret;
+
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret)
+ return ret;
+
+ ret = sdhci_resume_host(host);
+ if (ret)
+ goto disable_clk;
+
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_resume(host->mmc);
+ if (ret)
+ goto suspend_host;
+ }
+
+ return 0;
+
+suspend_host:
+ sdhci_suspend_host(host);
+disable_clk:
+ clk_disable_unprepare(pltfm_host->clk);
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
+ sdhci_tegra_resume);
+
static struct platform_driver sdhci_tegra_driver = {
.driver = {
.name = "sdhci-tegra",
.of_match_table = sdhci_tegra_dt_match,
- .pm = &sdhci_pltfm_pmops,
+ .pm = &sdhci_tegra_dev_pm_ops,
},
.probe = sdhci_tegra_probe,
.remove = sdhci_tegra_remove,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a8141ff9be03..97158344b862 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -446,6 +446,28 @@ static inline void sdhci_led_deactivate(struct sdhci_host *host)
#endif
+static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
+ unsigned long timeout)
+{
+ if (sdhci_data_line_cmd(mrq->cmd))
+ mod_timer(&host->data_timer, timeout);
+ else
+ mod_timer(&host->timer, timeout);
+}
+
+static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ if (sdhci_data_line_cmd(mrq->cmd))
+ del_timer(&host->data_timer);
+ else
+ del_timer(&host->timer);
+}
+
+static inline bool sdhci_has_requests(struct sdhci_host *host)
+{
+ return host->cmd || host->data_cmd;
+}
+
/*****************************************************************************\
* *
* Core functions *
@@ -1221,6 +1243,18 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
int i;
+ if (host->cmd && host->cmd->mrq == mrq)
+ host->cmd = NULL;
+
+ if (host->data_cmd && host->data_cmd->mrq == mrq)
+ host->data_cmd = NULL;
+
+ if (host->data && host->data->mrq == mrq)
+ host->data = NULL;
+
+ if (sdhci_needs_reset(host, mrq))
+ host->pending_reset = true;
+
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
if (host->mrqs_done[i] == mrq) {
WARN_ON(1);
@@ -1237,24 +1271,17 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
WARN_ON(i >= SDHCI_MAX_MRQS);
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_del_timer(host, mrq);
+
+ if (!sdhci_has_requests(host))
+ sdhci_led_deactivate(host);
}
static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
- if (host->cmd && host->cmd->mrq == mrq)
- host->cmd = NULL;
-
- if (host->data_cmd && host->data_cmd->mrq == mrq)
- host->data_cmd = NULL;
-
- if (host->data && host->data->mrq == mrq)
- host->data = NULL;
-
- if (sdhci_needs_reset(host, mrq))
- host->pending_reset = true;
-
__sdhci_finish_mrq(host, mrq);
+
+ queue_work(host->complete_wq, &host->complete_work);
}
static void sdhci_finish_data(struct sdhci_host *host)
@@ -1305,34 +1332,17 @@ static void sdhci_finish_data(struct sdhci_host *host)
* responsibility to send the stop command if required.
*/
if (data->mrq->cap_cmd_during_tfr) {
- sdhci_finish_mrq(host, data->mrq);
+ __sdhci_finish_mrq(host, data->mrq);
} else {
/* Avoid triggering warning in sdhci_send_command() */
host->cmd = NULL;
sdhci_send_command(host, data->stop);
}
} else {
- sdhci_finish_mrq(host, data->mrq);
+ __sdhci_finish_mrq(host, data->mrq);
}
}
-static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
- unsigned long timeout)
-{
- if (sdhci_data_line_cmd(mrq->cmd))
- mod_timer(&host->data_timer, timeout);
- else
- mod_timer(&host->timer, timeout);
-}
-
-static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
-{
- if (sdhci_data_line_cmd(mrq->cmd))
- del_timer(&host->data_timer);
- else
- del_timer(&host->timer);
-}
-
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
{
int flags;
@@ -1492,7 +1502,7 @@ static void sdhci_finish_command(struct sdhci_host *host)
sdhci_finish_data(host);
if (!cmd->data)
- sdhci_finish_mrq(host, cmd->mrq);
+ __sdhci_finish_mrq(host, cmd->mrq);
}
}
@@ -1807,7 +1817,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_send_command(host, mrq->cmd);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_request);
@@ -2010,8 +2019,6 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
*/
if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
-
- mmiowb();
}
EXPORT_SYMBOL_GPL(sdhci_set_ios);
@@ -2105,7 +2112,6 @@ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
- mmiowb();
}
}
@@ -2353,7 +2359,6 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
host->tuning_done = 0;
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
@@ -2369,9 +2374,9 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
/*
* Issue opcode repeatedly till Execute Tuning is set to 0 or the number
- * of loops reaches 40 times.
+ * of loops reaches tuning loop count.
*/
- for (i = 0; i < MAX_TUNING_LOOP; i++) {
+ for (i = 0; i < host->tuning_loop_count; i++) {
u16 ctrl;
sdhci_send_tuning(host, opcode);
@@ -2528,11 +2533,6 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
}
-static inline bool sdhci_has_requests(struct sdhci_host *host)
-{
- return host->cmd || host->data_cmd;
-}
-
static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
{
if (host->data_cmd) {
@@ -2594,7 +2594,7 @@ static const struct mmc_host_ops sdhci_ops = {
/*****************************************************************************\
* *
- * Tasklets *
+ * Request done *
* *
\*****************************************************************************/
@@ -2617,8 +2617,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
return true;
}
- sdhci_del_timer(host, mrq);
-
/*
* Always unmap the data buffers if they were mapped by
* sdhci_prepare_data() whenever we finish with a request.
@@ -2700,12 +2698,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
host->pending_reset = false;
}
- if (!sdhci_has_requests(host))
- sdhci_led_deactivate(host);
-
host->mrqs_done[i] = NULL;
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
@@ -2713,9 +2707,10 @@ static bool sdhci_request_done(struct sdhci_host *host)
return false;
}
-static void sdhci_tasklet_finish(unsigned long param)
+static void sdhci_complete_work(struct work_struct *work)
{
- struct sdhci_host *host = (struct sdhci_host *)param;
+ struct sdhci_host *host = container_of(work, struct sdhci_host,
+ complete_work);
while (!sdhci_request_done(host))
;
@@ -2739,7 +2734,6 @@ static void sdhci_timeout_timer(struct timer_list *t)
sdhci_finish_mrq(host, host->cmd->mrq);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -2761,6 +2755,7 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
if (host->data) {
host->data->error = -ETIMEDOUT;
sdhci_finish_data(host);
+ queue_work(host->complete_wq, &host->complete_work);
} else if (host->data_cmd) {
host->data_cmd->error = -ETIMEDOUT;
sdhci_finish_mrq(host, host->data_cmd->mrq);
@@ -2770,7 +2765,6 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
}
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -2827,7 +2821,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
return;
}
- sdhci_finish_mrq(host, host->cmd->mrq);
+ __sdhci_finish_mrq(host, host->cmd->mrq);
return;
}
@@ -2841,7 +2835,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
mrq->sbc->error = err;
- sdhci_finish_mrq(host, mrq);
+ __sdhci_finish_mrq(host, mrq);
return;
}
}
@@ -2905,7 +2899,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
host->data_cmd = NULL;
data_cmd->error = -ETIMEDOUT;
- sdhci_finish_mrq(host, data_cmd->mrq);
+ __sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
if (intmask & SDHCI_INT_DATA_END) {
@@ -2918,7 +2912,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (host->cmd == data_cmd)
return;
- sdhci_finish_mrq(host, data_cmd->mrq);
+ __sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
}
@@ -3001,12 +2995,24 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
}
+static inline bool sdhci_defer_done(struct sdhci_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ return host->pending_reset ||
+ ((host->flags & SDHCI_REQ_USE_DMA) && data &&
+ data->host_cookie == COOKIE_MAPPED);
+}
+
static irqreturn_t sdhci_irq(int irq, void *dev_id)
{
+ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
irqreturn_t result = IRQ_NONE;
struct sdhci_host *host = dev_id;
u32 intmask, mask, unexpected = 0;
int max_loops = 16;
+ int i;
spin_lock(&host->lock);
@@ -3100,9 +3106,30 @@ cont:
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
} while (intmask && --max_loops);
+
+ /* Determine if mrqs can be completed immediately */
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ struct mmc_request *mrq = host->mrqs_done[i];
+
+ if (!mrq)
+ continue;
+
+ if (sdhci_defer_done(host, mrq)) {
+ result = IRQ_WAKE_THREAD;
+ } else {
+ mrqs_done[i] = mrq;
+ host->mrqs_done[i] = NULL;
+ }
+ }
out:
spin_unlock(&host->lock);
+ /* Process mrqs ready for immediate completion */
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ if (mrqs_done[i])
+ mmc_request_done(host->mmc, mrqs_done[i]);
+ }
+
if (unexpected) {
pr_err("%s: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), unexpected);
@@ -3118,6 +3145,9 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
unsigned long flags;
u32 isr;
+ while (!sdhci_request_done(host))
+ ;
+
spin_lock_irqsave(&host->lock, flags);
isr = host->thread_isr;
host->thread_isr = 0;
@@ -3139,7 +3169,7 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
spin_unlock_irqrestore(&host->lock, flags);
}
- return isr ? IRQ_HANDLED : IRQ_NONE;
+ return IRQ_HANDLED;
}
/*****************************************************************************\
@@ -3251,7 +3281,6 @@ int sdhci_resume_host(struct sdhci_host *host)
mmc->ops->set_ios(mmc, &mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
- mmiowb();
}
if (host->irq_wake_enabled) {
@@ -3391,7 +3420,6 @@ void sdhci_cqe_enable(struct mmc_host *mmc)
mmc_hostname(mmc), host->ier,
sdhci_readl(host, SDHCI_INT_STATUS));
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
@@ -3416,7 +3444,6 @@ void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
mmc_hostname(mmc), host->ier,
sdhci_readl(host, SDHCI_INT_STATUS));
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
@@ -3494,6 +3521,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
host->tuning_delay = -1;
+ host->tuning_loop_count = MAX_TUNING_LOOP;
host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
@@ -4224,14 +4252,15 @@ EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
int __sdhci_add_host(struct sdhci_host *host)
{
+ unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
struct mmc_host *mmc = host->mmc;
int ret;
- /*
- * Init tasklets.
- */
- tasklet_init(&host->finish_tasklet,
- sdhci_tasklet_finish, (unsigned long)host);
+ host->complete_wq = alloc_workqueue("sdhci", flags, 0);
+ if (!host->complete_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&host->complete_work, sdhci_complete_work);
timer_setup(&host->timer, sdhci_timeout_timer, 0);
timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
@@ -4245,7 +4274,7 @@ int __sdhci_add_host(struct sdhci_host *host)
if (ret) {
pr_err("%s: Failed to request IRQ %d: %d\n",
mmc_hostname(mmc), host->irq, ret);
- goto untasklet;
+ goto unwq;
}
ret = sdhci_led_register(host);
@@ -4255,8 +4284,6 @@ int __sdhci_add_host(struct sdhci_host *host)
goto unirq;
}
- mmiowb();
-
ret = mmc_add_host(mmc);
if (ret)
goto unled;
@@ -4278,8 +4305,8 @@ unirq:
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
-untasklet:
- tasklet_kill(&host->finish_tasklet);
+unwq:
+ destroy_workqueue(host->complete_wq);
return ret;
}
@@ -4341,7 +4368,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
del_timer_sync(&host->timer);
del_timer_sync(&host->data_timer);
- tasklet_kill(&host->finish_tasklet);
+ destroy_workqueue(host->complete_wq);
if (!IS_ERR(mmc->supply.vqmmc))
regulator_disable(mmc->supply.vqmmc);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 01002cba1359..d6bcc584c92b 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -560,7 +560,8 @@ struct sdhci_host {
unsigned int desc_sz; /* ADMA descriptor size */
- struct tasklet_struct finish_tasklet; /* Tasklet structures */
+ struct workqueue_struct *complete_wq; /* Request completion wq */
+ struct work_struct complete_work; /* Request completion work */
struct timer_list timer; /* Timer for timeouts */
struct timer_list data_timer; /* Timer for data timeouts */
@@ -596,6 +597,7 @@ struct sdhci_host {
#define SDHCI_TUNING_MODE_3 2
/* Delay (ms) between tuning commands */
int tuning_delay;
+ int tuning_loop_count;
/* Host SDMA buffer boundary. */
u32 sdma_boundary;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index eea183e90f1b..a91c0b45c48d 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -158,6 +158,27 @@ static void sdhci_am654_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
+static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
+{
+ unsigned char timing = host->mmc->ios.timing;
+
+ if (reg == SDHCI_HOST_CONTROL) {
+ switch (timing) {
+ /*
+ * According to the data manual, HISPD bit
+ * should not be set in these speed modes.
+ */
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ val &= ~SDHCI_CTRL_HISPD;
+ }
+ }
+
+ writeb(val, host->ioaddr + reg);
+}
+
static struct sdhci_ops sdhci_am654_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -165,6 +186,7 @@ static struct sdhci_ops sdhci_am654_ops = {
.set_bus_width = sdhci_set_bus_width,
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_am654_set_clock,
+ .write_b = sdhci_am654_write_b,
.reset = sdhci_reset,
};
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index b6644ce296b2..35dd34b82a4d 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -889,7 +889,6 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
struct tifm_dev *sock = host->dev;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
- mmiowb();
host->clk_div = 61;
host->clk_freq = 20000000;
writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
@@ -940,7 +939,6 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC
| TIFM_MMCSD_ERRMASK,
sock->addr + SOCK_MMCSD_INT_ENABLE);
- mmiowb();
return 0;
}
@@ -1005,7 +1003,6 @@ static void tifm_sd_remove(struct tifm_dev *sock)
spin_lock_irqsave(&sock->lock, flags);
host->eject = 1;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
- mmiowb();
spin_unlock_irqrestore(&sock->lock, flags);
tasklet_kill(&host->finish_tasklet);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 2adb0d24360f..c5ba13fae399 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -4,8 +4,8 @@
*
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
*
- * Copyright (C) 2015-17 Renesas Electronics Corporation
- * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-19 Renesas Electronics Corporation
+ * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
@@ -105,6 +105,8 @@
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
+#define TMIO_MAX_BLK_SIZE 512
+
struct tmio_mmc_data;
struct tmio_mmc_host;
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 595949f1f001..130b91cb0f8a 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -4,8 +4,8 @@
*
* TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
*
- * Copyright (C) 2015-17 Renesas Electronics Corporation
- * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-19 Renesas Electronics Corporation
+ * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
* Copyright (C) 2017 Horms Solutions, Simon Horman
* Copyright (C) 2011 Guennadi Liakhovetski
* Copyright (C) 2007 Ian Molton
@@ -1186,7 +1186,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = pdata->max_segs ? : 32;
- mmc->max_blk_size = 512;
+ mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
mmc->max_blk_count = pdata->max_blk_count ? :
(PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index cd8b1b9d4d8a..b11ac2314328 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1339,7 +1339,7 @@ static int usdhi6_stop_cmd(struct usdhi6_host *host)
host->wait = USDHI6_WAIT_FOR_STOP;
return 0;
}
- /* Unsupported STOP command */
+ /* fall through - Unsupported STOP command. */
default:
dev_err(mmc_dev(host->mmc),
"unsupported stop CMD%d for CMD%d\n",
@@ -1687,7 +1687,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
switch (host->wait) {
default:
dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
- /* mrq can be NULL in this actually impossible case */
+ /* fall through - mrq can be NULL, but is impossible. */
case USDHI6_WAIT_FOR_CMD:
usdhi6_error_code(host);
if (mrq)
@@ -1709,10 +1709,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
host->offset, data->blocks, data->blksz, data->sg_len,
sg_dma_len(sg), sg->offset);
usdhi6_sg_unmap(host, true);
- /*
- * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped
- * the page
- */
+ /* fall through - page unmapped in USDHI6_WAIT_FOR_DATA_END. */
case USDHI6_WAIT_FOR_DATA_END:
usdhi6_error_code(host);
data->error = -ETIMEDOUT;
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 32c4211506fc..412395ac2935 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -686,7 +686,6 @@ static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
via_sdc_send_command(host, mrq->cmd);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -711,7 +710,6 @@ static void via_sdc_set_power(struct via_crdr_mmc_host *host,
gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
via_pwron_sleep(host);
@@ -770,7 +768,6 @@ static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
if (ios->power_mode != MMC_POWER_OFF)
@@ -830,7 +827,6 @@ static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
via_restore_pcictrlreg(host);
via_restore_sdcreg(host);
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -925,7 +921,6 @@ static irqreturn_t via_sdc_isr(int irq, void *dev_id)
result = IRQ_HANDLED;
- mmiowb();
out:
spin_unlock(&sdhost->lock);
@@ -960,7 +955,6 @@ static void via_sdc_timeout(struct timer_list *t)
}
}
- mmiowb();
spin_unlock_irqrestore(&sdhost->lock, flags);
}
@@ -1012,7 +1006,6 @@ static void via_sdc_card_detect(struct work_struct *work)
tasklet_schedule(&host->finish_tasklet);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
via_reset_pcictrl(host);
@@ -1020,7 +1013,6 @@ static void via_sdc_card_detect(struct work_struct *work)
spin_lock_irqsave(&host->lock, flags);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
via_print_pcictrl(host);
@@ -1188,7 +1180,6 @@ static void via_sd_remove(struct pci_dev *pcidev)
/* Disable generating further interrupts */
writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
- mmiowb();
if (sdhost->mrq) {
pr_err("%s: Controller removed during "
@@ -1197,7 +1188,6 @@ static void via_sd_remove(struct pci_dev *pcidev)
/* make sure all DMA is stopped */
writel(VIA_CRDR_DMACTRL_SFTRST,
sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
- mmiowb();
sdhost->mrq->cmd->error = -ENOMEDIUM;
if (sdhost->mrq->stop)
sdhost->mrq->stop->error = -ENOMEDIUM;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 79a8ff542883..fb31a7f649a3 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -60,22 +60,6 @@ config MTD_CMDLINE_PARTS
If unsure, say 'N'.
-config MTD_AFS_PARTS
- tristate "ARM Firmware Suite partition parsing"
- depends on (ARM || ARM64)
- help
- The ARM Firmware Suite allows the user to divide flash devices into
- multiple 'images'. Each such image has a header containing its name
- and offset/size etc.
-
- If you need code which can detect and parse these tables, and
- register MTD 'partitions' corresponding to each image detected,
- enable this option.
-
- You will still need the parsing functions to be called by the driver
- for your particular device. It won't happen automatically. The
- 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
-
config MTD_OF_PARTS
tristate "OpenFirmware partitioning information support"
default y
@@ -94,6 +78,7 @@ config MTD_BCM63XX_PARTS
tristate "BCM63XX CFE partitioning support"
depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
select CRC32
+ select MTD_PARSER_IMAGETAG
help
This provides partition parsing for BCM63xx devices with CFE
bootloaders.
@@ -230,12 +215,11 @@ config SSFDC
This enables read only access to SmartMedia formatted NAND
flash. You can mount it with FAT file system.
-
config SM_FTL
tristate "SmartMedia/xD new translation layer"
depends on BLOCK
select MTD_BLKDEVS
- select MTD_NAND_ECC
+ select MTD_NAND_ECC_SW_HAMMING
help
This enables EXPERIMENTAL R/W support for SmartMedia/xD
FTL (Flash translation layer).
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 58fc327a5276..806287e80e84 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -9,7 +9,6 @@ mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
-obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
deleted file mode 100644
index d61b7edfc938..000000000000
--- a/drivers/mtd/afs.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*======================================================================
-
- drivers/mtd/afs.c: ARM Flash Layout/Partitioning
-
- Copyright © 2000 ARM Limited
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- This is access code for flashes using ARM's flash partitioning
- standards.
-
-======================================================================*/
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#define AFSV1_FOOTER_MAGIC 0xA0FFFF9F
-
-struct footer_v1 {
- u32 image_info_base; /* Address of first word of ImageFooter */
- u32 image_start; /* Start of area reserved by this footer */
- u32 signature; /* 'Magic' number proves it's a footer */
- u32 type; /* Area type: ARM Image, SIB, customer */
- u32 checksum; /* Just this structure */
-};
-
-struct image_info_v1 {
- u32 bootFlags; /* Boot flags, compression etc. */
- u32 imageNumber; /* Unique number, selects for boot etc. */
- u32 loadAddress; /* Address program should be loaded to */
- u32 length; /* Actual size of image */
- u32 address; /* Image is executed from here */
- char name[16]; /* Null terminated */
- u32 headerBase; /* Flash Address of any stripped header */
- u32 header_length; /* Length of header in memory */
- u32 headerType; /* AIF, RLF, s-record etc. */
- u32 checksum; /* Image checksum (inc. this struct) */
-};
-
-static u32 word_sum(void *words, int num)
-{
- u32 *p = words;
- u32 sum = 0;
-
- while (num--)
- sum += *p++;
-
- return sum;
-}
-
-static int
-afs_read_footer_v1(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
- u_int off, u_int mask)
-{
- struct footer_v1 fs;
- u_int ptr = off + mtd->erasesize - sizeof(fs);
- size_t sz;
- int ret;
-
- ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
- if (ret >= 0 && sz != sizeof(fs))
- ret = -EINVAL;
-
- if (ret < 0) {
- printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
- ptr, ret);
- return ret;
- }
-
- /*
- * Does it contain the magic number?
- */
- if (fs.signature != AFSV1_FOOTER_MAGIC)
- return 0;
-
- /*
- * Check the checksum.
- */
- if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff)
- return 0;
-
- /*
- * Don't touch the SIB.
- */
- if (fs.type == 2)
- return 0;
-
- *iis_start = fs.image_info_base & mask;
- *img_start = fs.image_start & mask;
-
- /*
- * Check the image info base. This can not
- * be located after the footer structure.
- */
- if (*iis_start >= ptr)
- return 0;
-
- /*
- * Check the start of this image. The image
- * data can not be located after this block.
- */
- if (*img_start > off)
- return 0;
-
- return 1;
-}
-
-static int
-afs_read_iis_v1(struct mtd_info *mtd, struct image_info_v1 *iis, u_int ptr)
-{
- size_t sz;
- int ret, i;
-
- memset(iis, 0, sizeof(*iis));
- ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis);
- if (ret < 0)
- goto failed;
-
- if (sz != sizeof(*iis)) {
- ret = -EINVAL;
- goto failed;
- }
-
- ret = 0;
-
- /*
- * Validate the name - it must be NUL terminated.
- */
- for (i = 0; i < sizeof(iis->name); i++)
- if (iis->name[i] == '\0')
- break;
-
- if (i < sizeof(iis->name))
- ret = 1;
-
- return ret;
-
- failed:
- printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
- ptr, ret);
- return ret;
-}
-
-static int parse_afs_partitions(struct mtd_info *mtd,
- const struct mtd_partition **pparts,
- struct mtd_part_parser_data *data)
-{
- struct mtd_partition *parts;
- u_int mask, off, idx, sz;
- int ret = 0;
- char *str;
-
- /*
- * This is the address mask; we use this to mask off out of
- * range address bits.
- */
- mask = mtd->size - 1;
-
- /*
- * First, calculate the size of the array we need for the
- * partition information. We include in this the size of
- * the strings.
- */
- for (idx = off = sz = 0; off < mtd->size; off += mtd->erasesize) {
- struct image_info_v1 iis;
- u_int iis_ptr, img_ptr;
-
- ret = afs_read_footer_v1(mtd, &img_ptr, &iis_ptr, off, mask);
- if (ret < 0)
- break;
- if (ret) {
- ret = afs_read_iis_v1(mtd, &iis, iis_ptr);
- if (ret < 0)
- break;
- if (ret == 0)
- continue;
-
- sz += sizeof(struct mtd_partition);
- sz += strlen(iis.name) + 1;
- idx += 1;
- }
- }
-
- if (!sz)
- return ret;
-
- parts = kzalloc(sz, GFP_KERNEL);
- if (!parts)
- return -ENOMEM;
-
- str = (char *)(parts + idx);
-
- /*
- * Identify the partitions
- */
- for (idx = off = 0; off < mtd->size; off += mtd->erasesize) {
- struct image_info_v1 iis;
- u_int iis_ptr, img_ptr;
-
- /* Read the footer. */
- ret = afs_read_footer_v1(mtd, &img_ptr, &iis_ptr, off, mask);
- if (ret < 0)
- break;
- if (ret == 0)
- continue;
-
- /* Read the image info block */
- ret = afs_read_iis_v1(mtd, &iis, iis_ptr);
- if (ret < 0)
- break;
- if (ret == 0)
- continue;
-
- strcpy(str, iis.name);
-
- parts[idx].name = str;
- parts[idx].size = (iis.length + mtd->erasesize - 1) & ~(mtd->erasesize - 1);
- parts[idx].offset = img_ptr;
- parts[idx].mask_flags = 0;
-
- printk(" mtd%d: at 0x%08x, %5lluKiB, %8u, %s\n",
- idx, img_ptr, parts[idx].size / 1024,
- iis.imageNumber, str);
-
- idx += 1;
- str = str + strlen(iis.name) + 1;
- }
-
- if (!idx) {
- kfree(parts);
- parts = NULL;
- }
-
- *pparts = parts;
- return idx ? idx : ret;
-}
-
-static struct mtd_part_parser afs_parser = {
- .parse_fn = parse_afs_partitions,
- .name = "afs",
-};
-module_mtd_part_parser(afs_parser);
-
-MODULE_AUTHOR("ARM Ltd");
-MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 41d1d3149c61..b2bd04764e95 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -34,6 +34,7 @@
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/of.h>
#define BCM963XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
@@ -93,51 +94,19 @@ static int bcm63xx_read_nvram(struct mtd_info *master,
return 0;
}
-static int bcm63xx_read_image_tag(struct mtd_info *master, const char *name,
- loff_t tag_offset, struct bcm_tag *buf)
-{
- int ret;
- size_t retlen;
- u32 computed_crc;
-
- ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf);
- if (ret)
- return ret;
-
- if (retlen != sizeof(*buf))
- return -EIO;
-
- computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
- offsetof(struct bcm_tag, header_crc));
- if (computed_crc == buf->header_crc) {
- STR_NULL_TERMINATE(buf->board_id);
- STR_NULL_TERMINATE(buf->tag_version);
-
- pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n",
- name, tag_offset, buf->tag_version, buf->board_id);
-
- return 0;
- }
-
- pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n",
- name, tag_offset, buf->header_crc, computed_crc);
- return 1;
-}
+static const char * const bcm63xx_cfe_part_types[] = {
+ "bcm963xx-imagetag",
+ NULL,
+};
static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master,
const struct mtd_partition **pparts, struct bcm963xx_nvram *nvram)
{
- /* CFE, NVRAM and global Linux are always present */
- int nrparts = 3, curpart = 0;
- struct bcm_tag *buf = NULL;
struct mtd_partition *parts;
- int ret;
- unsigned int rootfsaddr, kerneladdr, spareaddr;
- unsigned int rootfslen, kernellen, sparelen, totallen;
+ int nrparts = 3, curpart = 0;
unsigned int cfelen, nvramlen;
unsigned int cfe_erasesize;
int i;
- bool rootfs_first = false;
cfe_erasesize = max_t(uint32_t, master->erasesize,
BCM963XX_CFE_BLOCK_SIZE);
@@ -146,83 +115,9 @@ static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master,
nvramlen = nvram->psi_size * SZ_1K;
nvramlen = roundup(nvramlen, cfe_erasesize);
- buf = vmalloc(sizeof(struct bcm_tag));
- if (!buf)
- return -ENOMEM;
-
- /* Get the tag */
- ret = bcm63xx_read_image_tag(master, "rootfs", cfelen, buf);
- if (!ret) {
- STR_NULL_TERMINATE(buf->flash_image_start);
- if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) ||
- rootfsaddr < BCM963XX_EXTENDED_SIZE) {
- pr_err("invalid rootfs address: %*ph\n",
- (int)sizeof(buf->flash_image_start),
- buf->flash_image_start);
- goto invalid_tag;
- }
-
- STR_NULL_TERMINATE(buf->kernel_address);
- if (kstrtouint(buf->kernel_address, 10, &kerneladdr) ||
- kerneladdr < BCM963XX_EXTENDED_SIZE) {
- pr_err("invalid kernel address: %*ph\n",
- (int)sizeof(buf->kernel_address),
- buf->kernel_address);
- goto invalid_tag;
- }
-
- STR_NULL_TERMINATE(buf->kernel_length);
- if (kstrtouint(buf->kernel_length, 10, &kernellen)) {
- pr_err("invalid kernel length: %*ph\n",
- (int)sizeof(buf->kernel_length),
- buf->kernel_length);
- goto invalid_tag;
- }
-
- STR_NULL_TERMINATE(buf->total_length);
- if (kstrtouint(buf->total_length, 10, &totallen)) {
- pr_err("invalid total length: %*ph\n",
- (int)sizeof(buf->total_length),
- buf->total_length);
- goto invalid_tag;
- }
-
- kerneladdr = kerneladdr - BCM963XX_EXTENDED_SIZE;
- rootfsaddr = rootfsaddr - BCM963XX_EXTENDED_SIZE;
- spareaddr = roundup(totallen, master->erasesize) + cfelen;
-
- if (rootfsaddr < kerneladdr) {
- /* default Broadcom layout */
- rootfslen = kerneladdr - rootfsaddr;
- rootfs_first = true;
- } else {
- /* OpenWrt layout */
- rootfsaddr = kerneladdr + kernellen;
- rootfslen = spareaddr - rootfsaddr;
- }
- } else if (ret > 0) {
-invalid_tag:
- kernellen = 0;
- rootfslen = 0;
- rootfsaddr = 0;
- spareaddr = cfelen;
- } else {
- goto out;
- }
- sparelen = master->size - spareaddr - nvramlen;
-
- /* Determine number of partitions */
- if (rootfslen > 0)
- nrparts++;
-
- if (kernellen > 0)
- nrparts++;
-
parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
- if (!parts) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!parts)
+ return -ENOMEM;
/* Start building partition list */
parts[curpart].name = "CFE";
@@ -230,30 +125,6 @@ invalid_tag:
parts[curpart].size = cfelen;
curpart++;
- if (kernellen > 0) {
- int kernelpart = curpart;
-
- if (rootfslen > 0 && rootfs_first)
- kernelpart++;
- parts[kernelpart].name = "kernel";
- parts[kernelpart].offset = kerneladdr;
- parts[kernelpart].size = kernellen;
- curpart++;
- }
-
- if (rootfslen > 0) {
- int rootfspart = curpart;
-
- if (kernellen > 0 && rootfs_first)
- rootfspart--;
- parts[rootfspart].name = "rootfs";
- parts[rootfspart].offset = rootfsaddr;
- parts[rootfspart].size = rootfslen;
- if (sparelen > 0 && !rootfs_first)
- parts[rootfspart].size += sparelen;
- curpart++;
- }
-
parts[curpart].name = "nvram";
parts[curpart].offset = master->size - nvramlen;
parts[curpart].size = nvramlen;
@@ -263,22 +134,13 @@ invalid_tag:
parts[curpart].name = "linux";
parts[curpart].offset = cfelen;
parts[curpart].size = master->size - cfelen - nvramlen;
+ parts[curpart].types = bcm63xx_cfe_part_types;
for (i = 0; i < nrparts; i++)
pr_info("Partition %d is %s offset %llx and length %llx\n", i,
parts[i].name, parts[i].offset, parts[i].size);
- pr_info("Spare partition is offset %x and length %x\n", spareaddr,
- sparelen);
-
*pparts = parts;
- ret = 0;
-
-out:
- vfree(buf);
-
- if (ret)
- return ret;
return nrparts;
}
@@ -311,9 +173,16 @@ out:
return ret;
};
+static const struct of_device_id parse_bcm63xx_cfe_match_table[] = {
+ { .compatible = "brcm,bcm963xx-cfe-nor-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, parse_bcm63xx_cfe_match_table);
+
static struct mtd_part_parser bcm63xx_cfe_parser = {
.parse_fn = bcm63xx_parse_cfe_partitions,
.name = "bcm63xxpart",
+ .of_match_table = parse_bcm63xx_cfe_match_table,
};
module_mtd_part_parser(bcm63xx_cfe_parser);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 72428b6bfc47..c8fa5906bdf9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -869,6 +869,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
+ /* fall through */
default:
sleep:
@@ -1876,7 +1877,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
continue;
}
- if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+ /*
+ * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
+ * the failure due to scheduling.
+ */
+ if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
break;
if (chip_good(map, adr, datum)) {
@@ -2747,6 +2752,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
+ /* fall through */
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 6f16552cd59f..e3b266ee06af 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -109,10 +109,13 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi
case 8:
onecmd |= (onecmd << (chip_mode * 32));
#endif
+ /* fall through */
case 4:
onecmd |= (onecmd << (chip_mode * 16));
+ /* fall through */
case 2:
onecmd |= (onecmd << (chip_mode * 8));
+ /* fall through */
case 1:
;
}
@@ -162,10 +165,13 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map,
case 8:
res |= (onestat >> (chip_mode * 32));
#endif
+ /* fall through */
case 4:
res |= (onestat >> (chip_mode * 16));
+ /* fall through */
case 2:
res |= (onestat >> (chip_mode * 8));
+ /* fall through */
case 1:
;
}
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index aa983422aa97..f9258d666846 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers"
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
- select BCH_CONST_PARAMS if !MTD_NAND_BCH
+ select BCH_CONST_PARAMS if !MTD_NAND_ECC_SW_BCH
select BITREVERSE
help
This provides an MTD device driver for the M-Systems DiskOnChip
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 9ee04b5f9311..8a8627c30aed 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -147,8 +147,10 @@ static int parse_num64(uint64_t *num64, char *token)
switch (token[len - 2]) {
case 'G':
shift += 10;
+ /* fall through */
case 'M':
shift += 10;
+ /* fall through */
case 'k':
shift += 10;
token[len - 2] = 0;
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index b13557fe52bd..76a4c73e100e 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -318,6 +318,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
+ /* fall through */
default:
sleep:
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index e0cf869c8544..544ed1931843 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -10,7 +10,7 @@ config MTD_COMPLEX_MAPPINGS
config MTD_PHYSMAP
tristate "Flash device in physical memory map"
- depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_LPDDR
+ depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_RAM || MTD_LPDDR
help
This provides a 'mapping' driver which allows the NOR Flash and
ROM driver code to communicate with chips which are mapped
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index d9a3e4bebe5d..21b556afc305 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -132,6 +132,8 @@ static void physmap_set_addr_gpios(struct physmap_flash_info *info,
gpiod_set_value(info->gpios->desc[i], !!(BIT(i) & ofs));
}
+
+ info->gpio_values = ofs;
}
#define win_mask(order) (BIT(order) - 1)
diff --git a/drivers/mtd/maps/physmap-gemini.c b/drivers/mtd/maps/physmap-gemini.c
index 60775b208fc9..a289c8b5cabf 100644
--- a/drivers/mtd/maps/physmap-gemini.c
+++ b/drivers/mtd/maps/physmap-gemini.c
@@ -86,7 +86,7 @@ static void gemini_flash_disable_pins(void)
static map_word __xipram gemini_flash_map_read(struct map_info *map,
unsigned long ofs)
{
- map_word __xipram ret;
+ map_word ret;
gemini_flash_enable_pins();
ret = inline_map_read(map, ofs);
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index fd5fe12d7461..893239629d6b 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -20,7 +20,7 @@
#include <linux/mtd/concat.h>
#include <mach/hardware.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/flash.h>
struct sa_subdev_info {
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index aef030ca8601..de4c46318abb 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -31,13 +31,7 @@
#define MAP_NAME "ram"
#endif
-/*
- * Blackfin uses uclinux_ram_map during startup, so it must not be static.
- * Provide a dummy declaration to make sparse happy.
- */
-extern struct map_info uclinux_ram_map;
-
-struct map_info uclinux_ram_map = {
+static struct map_info uclinux_ram_map = {
.name = MAP_NAME,
.size = 0,
};
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 37f174ccbcec..dfa241ad018b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -572,7 +572,7 @@ static ssize_t mtd_partition_offset_show(struct device *dev,
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_part *part = mtd_to_part(mtd);
- return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
+ return snprintf(buf, PAGE_SIZE, "%llu\n", part->offset);
}
static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 9033215e62ea..495751ed3fd7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -2,6 +2,5 @@ config MTD_NAND_CORE
tristate
source "drivers/mtd/nand/onenand/Kconfig"
-
source "drivers/mtd/nand/raw/Kconfig"
source "drivers/mtd/nand/spi/Kconfig"
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index 9c9f8936b63b..b6de955ac8bf 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -174,6 +174,40 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
/**
+ * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
+ * a specific region of the NAND device
+ * @mtd: MTD device
+ * @offs: offset of the NAND region
+ * @len: length of the NAND region
+ *
+ * Default implementation for mtd->_max_bad_blocks(). Only works if
+ * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
+ *
+ * Return: a positive number encoding the maximum number of eraseblocks on a
+ * portion of memory, a negative error code otherwise.
+ */
+int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos, end;
+ unsigned int max_bb = 0;
+
+ if (!nand->memorg.max_bad_eraseblocks_per_lun)
+ return -ENOTSUPP;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ nanddev_offs_to_pos(nand, offs + len, &end);
+
+ for (nanddev_offs_to_pos(nand, offs, &pos);
+ nanddev_pos_cmp(&pos, &end) < 0;
+ nanddev_pos_next_lun(nand, &pos))
+ max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
+
+ return max_bb;
+}
+EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
+
+/**
* nanddev_init() - Initialize a NAND device
* @nand: NAND device
* @ops: NAND device operations
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 4ca4b194e7d7..f41d76248550 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -2458,7 +2458,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* We write two bytes, so we don't have to mess with 16-bit access */
- ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
+ ofs += mtd->oobsize + (this->badblockpos & ~0x01);
/* FIXME : What to do when marking SLC block in partition
* with MLC erasesize? For now, it is not advisable to
* create partitions containing both SLC and MLC regions.
@@ -3967,6 +3967,9 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING))
this->unlock_all(mtd);
+ /* Set the bad block marker position */
+ this->badblockpos = ONENAND_BADBLOCK_POS;
+
ret = this->scan_bbt(mtd);
if ((!FLEXONENAND(this)) || ret)
return ret;
diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c
index dde20487937d..57c31c81be18 100644
--- a/drivers/mtd/nand/onenand/onenand_bbt.c
+++ b/drivers/mtd/nand/onenand/onenand_bbt.c
@@ -190,9 +190,6 @@ static int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
if (!bbm->bbt)
return -ENOMEM;
- /* Set the bad block position */
- bbm->badblockpos = ONENAND_BADBLOCK_POS;
-
/* Set erase shift */
bbm->bbt_erase_shift = this->erase_shift;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index e604625e2dfa..0500c42f31cb 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -1,34 +1,29 @@
-config MTD_NAND_ECC
+config MTD_NAND_ECC_SW_HAMMING
tristate
-config MTD_NAND_ECC_SMC
+config MTD_NAND_ECC_SW_HAMMING_SMC
bool "NAND ECC Smart Media byte order"
- depends on MTD_NAND_ECC
+ depends on MTD_NAND_ECC_SW_HAMMING
default n
help
Software ECC according to the Smart Media Specification.
The original Linux implementation had byte 0 and 1 swapped.
-
-menuconfig MTD_NAND
+menuconfig MTD_RAW_NAND
tristate "Raw/Parallel NAND Device Support"
depends on MTD
- select MTD_NAND_ECC
+ select MTD_NAND_CORE
+ select MTD_NAND_ECC_SW_HAMMING
help
This enables support for accessing all type of raw/parallel
NAND flash devices. For further information see
<http://www.linux-mtd.infradead.org/doc/nand.html>.
-if MTD_NAND
+if MTD_RAW_NAND
-config MTD_NAND_BCH
- tristate
- select BCH
- depends on MTD_NAND_ECC_BCH
- default MTD_NAND
-
-config MTD_NAND_ECC_BCH
+config MTD_NAND_ECC_SW_BCH
bool "Support software BCH ECC"
+ select BCH
default n
help
This enables support for software BCH error correction. Binary BCH
@@ -36,15 +31,13 @@ config MTD_NAND_ECC_BCH
ECC codes. They are used with NAND devices requiring more than 1 bit
of error correction.
-config MTD_SM_COMMON
- tristate
- default n
+comment "Raw/parallel NAND flash controllers"
config MTD_NAND_DENALI
tristate
config MTD_NAND_DENALI_PCI
- tristate "Support Denali NAND controller on Intel Moorestown"
+ tristate "Denali NAND controller on Intel Moorestown"
select MTD_NAND_DENALI
depends on PCI
help
@@ -52,31 +45,22 @@ config MTD_NAND_DENALI_PCI
Denali NAND controller core.
config MTD_NAND_DENALI_DT
- tristate "Support Denali NAND controller as a DT device"
+ tristate "Denali NAND controller as a DT device"
select MTD_NAND_DENALI
depends on HAS_DMA && HAVE_CLK && OF
help
Enable the driver for NAND flash on platforms using a Denali NAND
controller as a DT device.
-config MTD_NAND_GPIO
- tristate "GPIO assisted NAND Flash driver"
- depends on GPIOLIB || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables a NAND flash driver where control signals are
- connected to GPIO pins, and commands and data are communicated
- via a memory mapped interface.
-
config MTD_NAND_AMS_DELTA
- tristate "NAND Flash device on Amstrad E3"
+ tristate "Amstrad E3 NAND controller"
depends on MACH_AMS_DELTA || COMPILE_TEST
default y
help
Support for NAND flash on Amstrad E3 (Delta).
config MTD_NAND_OMAP2
- tristate "NAND Flash device on OMAP2, OMAP3, OMAP4 and Keystone"
+ tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -98,18 +82,6 @@ config MTD_NAND_OMAP_BCH
config MTD_NAND_OMAP_BCH_BUILD
def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
-config MTD_NAND_RICOH
- tristate "Ricoh xD card reader"
- default n
- depends on PCI
- select MTD_SM_COMMON
- help
- Enable support for Ricoh R5C852 xD card reader
- You also need to enable ether
- NAND SSFDC (SmartMedia) read only translation layer' or new
- expermental, readwrite
- 'SmartMedia/xD new translation layer'
-
config MTD_NAND_AU1550
tristate "Au1550/1200 NAND support"
depends on MIPS_ALCHEMY
@@ -117,8 +89,15 @@ config MTD_NAND_AU1550
This enables the driver for the NAND flash controller on the
AMD/Alchemy 1550 SOC.
+config MTD_NAND_NDFC
+ tristate "IBM/MCC 4xx NAND controller"
+ depends on 4xx
+ select MTD_NAND_ECC_SW_HAMMING_SMC
+ help
+ NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+
config MTD_NAND_S3C2410
- tristate "NAND Flash support for Samsung S3C SoCs"
+ tristate "Samsung S3C NAND controller"
depends on ARCH_S3C24XX || ARCH_S3C64XX
help
This enables the NAND flash controller on the S3C24xx and S3C64xx
@@ -128,18 +107,11 @@ config MTD_NAND_S3C2410
must advertise a platform_device for the driver to attach.
config MTD_NAND_S3C2410_DEBUG
- bool "Samsung S3C NAND driver debug"
+ bool "Samsung S3C NAND controller debug"
depends on MTD_NAND_S3C2410
help
Enable debugging of the S3C NAND driver
-config MTD_NAND_NDFC
- tristate "NDFC NanD Flash Controller"
- depends on 4xx
- select MTD_NAND_ECC_SMC
- help
- NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
-
config MTD_NAND_S3C2410_CLKSTOP
bool "Samsung S3C NAND IDLE clock stop"
depends on MTD_NAND_S3C2410
@@ -151,89 +123,19 @@ config MTD_NAND_S3C2410_CLKSTOP
approximately 5mA of power when there is nothing happening.
config MTD_NAND_TANGO
- tristate "NAND Flash support for Tango chips"
+ tristate "Tango NAND controller"
depends on ARCH_TANGO || COMPILE_TEST
depends on HAS_IOMEM
help
Enables the NAND Flash controller on Tango chips.
-config MTD_NAND_DISKONCHIP
- tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
- depends on HAS_IOMEM
- select REED_SOLOMON
- select REED_SOLOMON_DEC16
- help
- This is a reimplementation of M-Systems DiskOnChip 2000,
- Millennium and Millennium Plus as a standard NAND device driver,
- as opposed to the earlier self-contained MTD device drivers.
- This should enable, among other things, proper JFFS2 operation on
- these devices.
-
-config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- bool "Advanced detection options for DiskOnChip"
- depends on MTD_NAND_DISKONCHIP
- help
- This option allows you to specify nonstandard address at which to
- probe for a DiskOnChip, or to change the detection options. You
- are unlikely to need any of this unless you are using LinuxBIOS.
- Say 'N'.
-
-config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
- hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- depends on MTD_NAND_DISKONCHIP
- default "0"
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option allows you to specify a single address at which to probe
- for the device, which is useful if you have other devices in that
- range which get upset when they are probed.
-
- (Note that on PowerPC, the normal probe will only check at
- 0xE4000000.)
-
- Normally, you should leave this set to zero, to allow the probe at
- the normal addresses.
-
-config MTD_NAND_DISKONCHIP_PROBE_HIGH
- bool "Probe high addresses"
- depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option changes to make it probe between 0xFFFC8000 and
- 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
- useful to you. Say 'N'.
-
-config MTD_NAND_DISKONCHIP_BBTWRITE
- bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
- depends on MTD_NAND_DISKONCHIP
- help
- On DiskOnChip devices shipped with the INFTL filesystem (Millennium
- and 2000 TSOP/Alon), Linux reserves some space at the end of the
- device for the Bad Block Table (BBT). If you have existing INFTL
- data on your device (created by non-Linux tools such as M-Systems'
- DOS drivers), your data might overlap the area Linux wants to use for
- the BBT. If this is a concern for you, leave this option disabled and
- Linux will not write BBT data into this area.
- The downside of leaving this option disabled is that if bad blocks
- are detected by Linux, they will not be recorded in the BBT, which
- could cause future problems.
- Once you enable this option, new filesystems (INFTL or others, created
- in Linux or other operating systems) will not use the reserved area.
- The only reason not to enable this option is to prevent damage to
- preexisting filesystems.
- Even if you leave this disabled, you can enable BBT writes at module
- load time (assuming you build diskonchip as a module) with the module
- parameter "inftl_bbt_write=1".
-
config MTD_NAND_SHARPSL
- tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
+ tristate "Sharp SL Series (C7xx + others) NAND controller"
depends on ARCH_PXA || COMPILE_TEST
depends on HAS_IOMEM
config MTD_NAND_CAFE
- tristate "NAND support for OLPC CAFÉ chip"
+ tristate "OLPC CAFÉ NAND controller"
depends on PCI
select REED_SOLOMON
select REED_SOLOMON_DEC16
@@ -242,7 +144,7 @@ config MTD_NAND_CAFE
laptop.
config MTD_NAND_CS553X
- tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
+ tristate "CS5535/CS5536 (AMD Geode companion) NAND controller"
depends on X86_32
depends on !UML && HAS_IOMEM
help
@@ -256,7 +158,7 @@ config MTD_NAND_CS553X
If you say "m", the module will be called cs553x_nand.
config MTD_NAND_ATMEL
- tristate "Support for NAND Flash / SmartMedia on AT91"
+ tristate "Atmel AT91 NAND Flash/SmartMedia NAND controller"
depends on ARCH_AT91 || COMPILE_TEST
depends on HAS_IOMEM
select GENERIC_ALLOCATOR
@@ -265,8 +167,17 @@ config MTD_NAND_ATMEL
Enables support for NAND Flash / Smart Media Card interface
on Atmel AT91 processors.
+config MTD_NAND_ORION
+ tristate "Marvell Orion NAND controller"
+ depends on PLAT_ORION
+ help
+ This enables the NAND flash controller on Orion machines.
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
config MTD_NAND_MARVELL
- tristate "NAND controller support on Marvell boards"
+ tristate "Marvell EBU NAND controller"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
COMPILE_TEST
depends on HAS_IOMEM
@@ -278,7 +189,7 @@ config MTD_NAND_MARVELL
- 64-bit Aramda platforms (7k, 8k) (NFCv2)
config MTD_NAND_SLC_LPC32XX
- tristate "NXP LPC32xx SLC Controller"
+ tristate "NXP LPC32xx SLC NAND controller"
depends on ARCH_LPC32XX || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -290,7 +201,7 @@ config MTD_NAND_SLC_LPC32XX
by the SLC NAND controller.
config MTD_NAND_MLC_LPC32XX
- tristate "NXP LPC32xx MLC Controller"
+ tristate "NXP LPC32xx MLC NAND controller"
depends on ARCH_LPC32XX || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -302,38 +213,23 @@ config MTD_NAND_MLC_LPC32XX
by the MLC NAND controller.
config MTD_NAND_CM_X270
- tristate "Support for NAND Flash on CM-X270 modules"
+ tristate "CM-X270 modules NAND controller"
depends on MACH_ARMCORE
config MTD_NAND_PASEMI
- tristate "NAND support for PA Semi PWRficient"
+ tristate "PA Semi PWRficient NAND controller"
depends on PPC_PASEMI
help
Enables support for NAND Flash interface on PA Semi PWRficient
based boards
config MTD_NAND_TMIO
- tristate "NAND Flash device on Toshiba Mobile IO Controller"
+ tristate "Toshiba Mobile IO NAND controller"
depends on MFD_TMIO
help
Support for NAND flash connected to a Toshiba Mobile IO
Controller in some PDAs, including the Sharp SL6000x.
-config MTD_NAND_NANDSIM
- tristate "Support for NAND Flash Simulator"
- help
- The simulator may simulate various NAND flash chips for the
- MTD nand layer.
-
-config MTD_NAND_GPMI_NAND
- tristate "GPMI NAND Flash Controller driver"
- depends on MXS_DMA
- help
- Enables NAND Flash support for IMX23, IMX28 or IMX6.
- The GPMI controller is very powerful, with the help of BCH
- module, it can do the hardware ECC. The GPMI supports several
- NAND flashs at the same time.
-
config MTD_NAND_BRCMNAND
tristate "Broadcom STB NAND controller"
depends on ARM || ARM64 || MIPS || COMPILE_TEST
@@ -344,7 +240,7 @@ config MTD_NAND_BRCMNAND
BCM3xxx, BCM63xxx, iProc/Cygnus and more.
config MTD_NAND_BCM47XXNFLASH
- tristate "Support for NAND flash on BCM4706 BCMA bus"
+ tristate "BCM4706 BCMA NAND controller"
depends on BCMA_NFLASH
depends on BCMA
help
@@ -352,32 +248,31 @@ config MTD_NAND_BCM47XXNFLASH
registered by bcma as platform devices. This enables driver for
NAND flash memories. For now only BCM4706 is supported.
-config MTD_NAND_PLATFORM
- tristate "Support for generic platform NAND driver"
+config MTD_NAND_OXNAS
+ tristate "Oxford Semiconductor NAND controller"
+ depends on ARCH_OXNAS || COMPILE_TEST
depends on HAS_IOMEM
help
- This implements a generic NAND driver for on-SOC platform
- devices. You will need to provide platform-specific functions
- via platform_data.
+ This enables the NAND flash controller on Oxford Semiconductor SoCs.
-config MTD_NAND_ORION
- tristate "NAND Flash support for Marvell Orion SoC"
- depends on PLAT_ORION
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 NAND controller"
+ depends on PPC_MPC512x
help
- This enables the NAND flash controller on Orion machines.
-
- No board specific support is done by this driver, each board
- must advertise a platform_device for the driver to attach.
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
-config MTD_NAND_OXNAS
- tristate "NAND Flash support for Oxford Semiconductor SoC"
- depends on ARCH_OXNAS || COMPILE_TEST
- depends on HAS_IOMEM
+config MTD_NAND_GPMI_NAND
+ tristate "Freescale GPMI NAND controller"
+ depends on MXS_DMA
help
- This enables the NAND flash controller on Oxford Semiconductor SoCs.
+ Enables NAND Flash support for IMX23, IMX28 or IMX6.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time.
config MTD_NAND_FSL_ELBC
- tristate "NAND support for Freescale eLBC controllers"
+ tristate "Freescale eLBC NAND controller"
depends on FSL_SOC
select FSL_LBC
help
@@ -387,7 +282,7 @@ config MTD_NAND_FSL_ELBC
external NAND devices.
config MTD_NAND_FSL_IFC
- tristate "NAND support for Freescale IFC controller"
+ tristate "Freescale IFC NAND controller"
depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
depends on HAS_IOMEM
select FSL_IFC
@@ -399,22 +294,15 @@ config MTD_NAND_FSL_IFC
external NAND devices.
config MTD_NAND_FSL_UPM
- tristate "Support for NAND on Freescale UPM"
+ tristate "Freescale UPM NAND controller"
depends on PPC_83xx || PPC_85xx
select FSL_LBC
help
Enables support for NAND Flash chips wired onto Freescale PowerPC
processor localbus with User-Programmable Machine support.
-config MTD_NAND_MPC5121_NFC
- tristate "MPC5121 built-in NAND Flash Controller support"
- depends on PPC_MPC512x
- help
- This enables the driver for the NAND flash controller on the
- MPC5121 SoC.
-
config MTD_NAND_VF610_NFC
- tristate "Support for Freescale NFC for VF610/MPC5125"
+ tristate "Freescale VF610/MPC5125 NAND controller"
depends on (SOC_VF610 || COMPILE_TEST)
depends on HAS_IOMEM
help
@@ -426,7 +314,7 @@ config MTD_NAND_VF610_NFC
device tree.
config MTD_NAND_MXC
- tristate "MXC NAND support"
+ tristate "Freescale MXC NAND controller"
depends on ARCH_MXC || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -434,7 +322,7 @@ config MTD_NAND_MXC
MXC processors.
config MTD_NAND_SH_FLCTL
- tristate "Support for NAND on Renesas SuperH FLCTL"
+ tristate "Renesas SuperH FLCTL NAND controller"
depends on SUPERH || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -442,7 +330,7 @@ config MTD_NAND_SH_FLCTL
for NAND Flash using FLCTL.
config MTD_NAND_DAVINCI
- tristate "Support NAND on DaVinci/Keystone SoC"
+ tristate "DaVinci/Keystone NAND controller"
depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -450,42 +338,30 @@ config MTD_NAND_DAVINCI
DaVinci/Keystone processors.
config MTD_NAND_TXX9NDFMC
- tristate "NAND Flash support for TXx9 SoC"
+ tristate "TXx9 NAND controller"
depends on SOC_TX4938 || SOC_TX4939 || COMPILE_TEST
depends on HAS_IOMEM
help
This enables the NAND flash controller on the TXx9 SoCs.
config MTD_NAND_SOCRATES
- tristate "Support for NAND on Socrates board"
+ tristate "Socrates NAND controller"
depends on SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
config MTD_NAND_NUC900
- tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
+ tristate "Nuvoton NUC9xx/w90p910 NAND controller"
depends on ARCH_W90X900 || COMPILE_TEST
depends on HAS_IOMEM
help
This enables the driver for the NAND Flash on evaluation board based
on w90p910 / NUC9xx.
-config MTD_NAND_JZ4740
- tristate "Support for JZ4740 SoC NAND controller"
- depends on MACH_JZ4740 || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Enables support for NAND Flash on JZ4740 SoC based boards.
-
-config MTD_NAND_JZ4780
- tristate "Support for NAND on JZ4780 SoC"
- depends on JZ4780_NEMC
- help
- Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
- based boards, using the BCH controller for hardware error correction.
+source "drivers/mtd/nand/raw/ingenic/Kconfig"
config MTD_NAND_FSMC
- tristate "Support for NAND on ST Micros FSMC"
+ tristate "ST Micros FSMC NAND controller"
depends on OF && HAS_IOMEM
depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300 || \
COMPILE_TEST
@@ -494,28 +370,28 @@ config MTD_NAND_FSMC
Flexible Static Memory Controller (FSMC)
config MTD_NAND_XWAY
- bool "Support for NAND on Lantiq XWAY SoC"
+ bool "Lantiq XWAY NAND controller"
depends on LANTIQ && SOC_TYPE_XWAY
help
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
to the External Bus Unit (EBU).
config MTD_NAND_SUNXI
- tristate "Support for NAND on Allwinner SoCs"
+ tristate "Allwinner NAND controller"
depends on ARCH_SUNXI || COMPILE_TEST
depends on HAS_IOMEM
help
Enables support for NAND Flash chips on Allwinner SoCs.
config MTD_NAND_HISI504
- tristate "Support for NAND controller on Hisilicon SoC Hip04"
+ tristate "Hisilicon Hip04 NAND controller"
depends on ARCH_HISI || COMPILE_TEST
depends on HAS_IOMEM
help
Enables support for NAND controller on Hisilicon SoC Hip04.
config MTD_NAND_QCOM
- tristate "Support for NAND on QCOM SoCs"
+ tristate "QCOM NAND controller"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -523,7 +399,7 @@ config MTD_NAND_QCOM
controller. This controller is found on IPQ806x SoC.
config MTD_NAND_MTK
- tristate "Support for NAND controller on MTK SoCs"
+ tristate "MTK NAND controller"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -531,7 +407,7 @@ config MTD_NAND_MTK
This controller is found on mt27xx, mt81xx, mt65xx SoCs.
config MTD_NAND_TEGRA
- tristate "Support for NAND controller on NVIDIA Tegra"
+ tristate "NVIDIA Tegra NAND controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -558,4 +434,115 @@ config MTD_NAND_MESON
Enables support for NAND controller on Amlogic's Meson SoCs.
This controller is found on Meson SoCs.
-endif # MTD_NAND
+config MTD_NAND_GPIO
+ tristate "GPIO assisted NAND controller"
+ depends on GPIOLIB || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables a NAND flash driver where control signals are
+ connected to GPIO pins, and commands and data are communicated
+ via a memory mapped interface.
+
+config MTD_NAND_PLATFORM
+ tristate "Generic NAND controller"
+ depends on HAS_IOMEM
+ help
+ This implements a generic NAND driver for on-SOC platform
+ devices. You will need to provide platform-specific functions
+ via platform_data.
+
+comment "Misc"
+
+config MTD_SM_COMMON
+ tristate
+ default n
+
+config MTD_NAND_NANDSIM
+ tristate "Support for NAND Flash Simulator"
+ help
+ The simulator may simulate various NAND flash chips for the
+ MTD nand layer.
+
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable ether
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ expermental, readwrite
+ 'SmartMedia/xD new translation layer'
+
+config MTD_NAND_DISKONCHIP
+ tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
+ depends on HAS_IOMEM
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ This is a reimplementation of M-Systems DiskOnChip 2000,
+ Millennium and Millennium Plus as a standard NAND device driver,
+ as opposed to the earlier self-contained MTD device drivers.
+ This should enable, among other things, proper JFFS2 operation on
+ these devices.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ bool "Advanced detection options for DiskOnChip"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ This option allows you to specify nonstandard address at which to
+ probe for a DiskOnChip, or to change the detection options. You
+ are unlikely to need any of this unless you are using LinuxBIOS.
+ Say 'N'.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+ hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ depends on MTD_NAND_DISKONCHIP
+ default "0"
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option allows you to specify a single address at which to probe
+ for the device, which is useful if you have other devices in that
+ range which get upset when they are probed.
+
+ (Note that on PowerPC, the normal probe will only check at
+ 0xE4000000.)
+
+ Normally, you should leave this set to zero, to allow the probe at
+ the normal addresses.
+
+config MTD_NAND_DISKONCHIP_PROBE_HIGH
+ bool "Probe high addresses"
+ depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option changes to make it probe between 0xFFFC8000 and
+ 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
+ useful to you. Say 'N'.
+
+config MTD_NAND_DISKONCHIP_BBTWRITE
+ bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ On DiskOnChip devices shipped with the INFTL filesystem (Millennium
+ and 2000 TSOP/Alon), Linux reserves some space at the end of the
+ device for the Bad Block Table (BBT). If you have existing INFTL
+ data on your device (created by non-Linux tools such as M-Systems'
+ DOS drivers), your data might overlap the area Linux wants to use for
+ the BBT. If this is a concern for you, leave this option disabled and
+ Linux will not write BBT data into this area.
+ The downside of leaving this option disabled is that if bad blocks
+ are detected by Linux, they will not be recorded in the BBT, which
+ could cause future problems.
+ Once you enable this option, new filesystems (INFTL or others, created
+ in Linux or other operating systems) will not use the reserved area.
+ The only reason not to enable this option is to prevent damage to
+ preexisting filesystems.
+ Even if you leave this disabled, you can enable BBT writes at module
+ load time (assuming you build diskonchip as a module) with the module
+ parameter "inftl_bbt_write=1".
+
+endif # MTD_RAW_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 5a5a72f0793e..efaf5cd25edc 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MTD_NAND) += nand.o
-obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
-obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o
+obj-$(CONFIG_MTD_RAW_NAND) += nand.o
+obj-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += nand_ecc.o
+nand-$(CONFIG_MTD_NAND_ECC_SW_BCH) += nand_bch.o
obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
@@ -45,8 +45,7 @@ obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
-obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
-obj-$(CONFIG_MTD_NAND_JZ4780) += jz4780_nand.o jz4780_bch.o
+obj-y += ingenic/
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 5781fcf6b76c..8d6be90a6fe8 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 ATMEL
* Copyright 2017 Free Electrons
@@ -29,10 +30,6 @@
* Add Nand Flash Controller support for SAMA5 SoC
* Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* A few words about the naming convention in this file. This convention
* applies to structure and function names.
*
@@ -65,6 +62,7 @@
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
#include "pmecc.h"
@@ -211,6 +209,7 @@ struct atmel_nand_controller_caps {
bool legacy_of_bindings;
u32 ale_offs;
u32 cle_offs;
+ const char *ebi_csa_regmap_name;
const struct atmel_nand_controller_ops *ops;
};
@@ -231,10 +230,15 @@ to_nand_controller(struct nand_controller *ctl)
return container_of(ctl, struct atmel_nand_controller, base);
}
+struct atmel_smc_nand_ebi_csa_cfg {
+ u32 offs;
+ u32 nfd0_on_d16;
+};
+
struct atmel_smc_nand_controller {
struct atmel_nand_controller base;
- struct regmap *matrix;
- unsigned int ebi_csa_offs;
+ struct regmap *ebi_csa_regmap;
+ struct atmel_smc_nand_ebi_csa_cfg *ebi_csa;
};
static inline struct atmel_smc_nand_controller *
@@ -1068,15 +1072,15 @@ static int atmel_nand_pmecc_init(struct nand_chip *chip)
req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
else if (chip->ecc.strength)
req.ecc.strength = chip->ecc.strength;
- else if (chip->ecc_strength_ds)
- req.ecc.strength = chip->ecc_strength_ds;
+ else if (chip->base.eccreq.strength)
+ req.ecc.strength = chip->base.eccreq.strength;
else
req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
if (chip->ecc.size)
req.ecc.sectorsize = chip->ecc.size;
- else if (chip->ecc_step_ds)
- req.ecc.sectorsize = chip->ecc_step_ds;
+ else if (chip->base.eccreq.step_size)
+ req.ecc.sectorsize = chip->base.eccreq.step_size;
else
req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
@@ -1507,13 +1511,20 @@ static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
atmel_nand_init(nc, nand);
smc_nc = to_smc_nand_controller(chip->controller);
- if (!smc_nc->matrix)
+ if (!smc_nc->ebi_csa_regmap)
return;
/* Attach the CS to the NAND Flash logic. */
for (i = 0; i < nand->numcs; i++)
- regmap_update_bits(smc_nc->matrix, smc_nc->ebi_csa_offs,
+ regmap_update_bits(smc_nc->ebi_csa_regmap,
+ smc_nc->ebi_csa->offs,
BIT(nand->cs[i].id), BIT(nand->cs[i].id));
+
+ if (smc_nc->ebi_csa->nfd0_on_d16)
+ regmap_update_bits(smc_nc->ebi_csa_regmap,
+ smc_nc->ebi_csa->offs,
+ smc_nc->ebi_csa->nfd0_on_d16,
+ smc_nc->ebi_csa->nfd0_on_d16);
}
static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
@@ -1797,7 +1808,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
ret = of_property_read_u32(np, "#size-cells", &val);
if (ret) {
- dev_err(dev, "missing #address-cells property\n");
+ dev_err(dev, "missing #size-cells property\n");
return ret;
}
@@ -1833,34 +1844,71 @@ static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
clk_put(nc->mck);
}
-static const struct of_device_id atmel_matrix_of_ids[] = {
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9260_ebi_csa = {
+ .offs = AT91SAM9260_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9261_ebi_csa = {
+ .offs = AT91SAM9261_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9263_ebi_csa = {
+ .offs = AT91SAM9263_MATRIX_EBI0CSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9rl_ebi_csa = {
+ .offs = AT91SAM9RL_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9g45_ebi_csa = {
+ .offs = AT91SAM9G45_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9n12_ebi_csa = {
+ .offs = AT91SAM9N12_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9x5_ebi_csa = {
+ .offs = AT91SAM9X5_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
+ .offs = AT91_SFR_CCFG_EBICSA,
+ .nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
+};
+
+static const struct of_device_id atmel_ebi_csa_regmap_of_ids[] = {
{
.compatible = "atmel,at91sam9260-matrix",
- .data = (void *)AT91SAM9260_MATRIX_EBICSA,
+ .data = &at91sam9260_ebi_csa,
},
{
.compatible = "atmel,at91sam9261-matrix",
- .data = (void *)AT91SAM9261_MATRIX_EBICSA,
+ .data = &at91sam9261_ebi_csa,
},
{
.compatible = "atmel,at91sam9263-matrix",
- .data = (void *)AT91SAM9263_MATRIX_EBI0CSA,
+ .data = &at91sam9263_ebi_csa,
},
{
.compatible = "atmel,at91sam9rl-matrix",
- .data = (void *)AT91SAM9RL_MATRIX_EBICSA,
+ .data = &at91sam9rl_ebi_csa,
},
{
.compatible = "atmel,at91sam9g45-matrix",
- .data = (void *)AT91SAM9G45_MATRIX_EBICSA,
+ .data = &at91sam9g45_ebi_csa,
},
{
.compatible = "atmel,at91sam9n12-matrix",
- .data = (void *)AT91SAM9N12_MATRIX_EBICSA,
+ .data = &at91sam9n12_ebi_csa,
},
{
.compatible = "atmel,at91sam9x5-matrix",
- .data = (void *)AT91SAM9X5_MATRIX_EBICSA,
+ .data = &at91sam9x5_ebi_csa,
+ },
+ {
+ .compatible = "microchip,sam9x60-sfr",
+ .data = &sam9x60_ebi_csa,
},
{ /* sentinel */ },
};
@@ -1982,37 +2030,38 @@ atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
struct device_node *np;
int ret;
- /* We do not retrieve the matrix syscon when parsing old DTs. */
+ /* We do not retrieve the EBICSA regmap when parsing old DTs. */
if (nc->base.caps->legacy_of_bindings)
return 0;
- np = of_parse_phandle(dev->parent->of_node, "atmel,matrix", 0);
+ np = of_parse_phandle(dev->parent->of_node,
+ nc->base.caps->ebi_csa_regmap_name, 0);
if (!np)
return 0;
- match = of_match_node(atmel_matrix_of_ids, np);
+ match = of_match_node(atmel_ebi_csa_regmap_of_ids, np);
if (!match) {
of_node_put(np);
return 0;
}
- nc->matrix = syscon_node_to_regmap(np);
+ nc->ebi_csa_regmap = syscon_node_to_regmap(np);
of_node_put(np);
- if (IS_ERR(nc->matrix)) {
- ret = PTR_ERR(nc->matrix);
- dev_err(dev, "Could not get Matrix regmap (err = %d)\n", ret);
+ if (IS_ERR(nc->ebi_csa_regmap)) {
+ ret = PTR_ERR(nc->ebi_csa_regmap);
+ dev_err(dev, "Could not get EBICSA regmap (err = %d)\n", ret);
return ret;
}
- nc->ebi_csa_offs = (uintptr_t)match->data;
+ nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data;
/*
* The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
- * add 4 to ->ebi_csa_offs.
+ * add 4 to ->ebi_csa->offs.
*/
if (of_device_is_compatible(dev->parent->of_node,
"atmel,at91sam9263-ebi1"))
- nc->ebi_csa_offs += 4;
+ nc->ebi_csa->offs += 4;
return 0;
}
@@ -2341,6 +2390,7 @@ static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
.ale_offs = BIT(21),
.cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
.ops = &at91rm9200_nc_ops,
};
@@ -2355,12 +2405,14 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
.ale_offs = BIT(21),
.cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
.ops = &atmel_smc_nc_ops,
};
static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
.ale_offs = BIT(22),
.cle_offs = BIT(21),
+ .ebi_csa_regmap_name = "atmel,matrix",
.ops = &atmel_smc_nc_ops,
};
@@ -2368,6 +2420,15 @@ static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
.has_dma = true,
.ale_offs = BIT(21),
.cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &atmel_smc_nc_ops,
+};
+
+static const struct atmel_nand_controller_caps microchip_sam9x60_nc_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "microchip,sfr",
.ops = &atmel_smc_nc_ops,
};
@@ -2415,6 +2476,10 @@ static const struct of_device_id atmel_nand_controller_of_ids[] = {
.compatible = "atmel,sama5d3-nand-controller",
.data = &atmel_sama5_nc_caps,
},
+ {
+ .compatible = "microchip,sam9x60-nand-controller",
+ .data = &microchip_sam9x60_nc_caps,
+ },
/* Support for old/deprecated bindings: */
{
.compatible = "atmel,at91rm9200-nand",
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
index 9d3997840889..cbb023bf00f7 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 ATMEL
* Copyright 2017 Free Electrons
@@ -28,10 +29,6 @@
* Add Nand Flash Controller support for SAMA5 SoC
* Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* The PMECC is an hardware assisted BCH engine, which means part of the
* ECC algorithm is left to the software. The hardware/software repartition
* is explained in the "PMECC Controller Functional Description" chapter in
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.h b/drivers/mtd/nand/raw/atmel/pmecc.h
index 808f1be0d6ad..7851c05126cf 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.h
+++ b/drivers/mtd/nand/raw/atmel/pmecc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* © Copyright 2016 ATMEL
* © Copyright 2016 Free Electrons
@@ -28,11 +29,6 @@
*
* Add Nand Flash Controller support for SAMA5 SoC
* © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef ATMEL_PMECC_H
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index a37cbfe56567..a53ffb3d64b0 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -428,7 +428,7 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
}
/* Configure FLASH */
- chipsize = b47n->nand_chip.chipsize >> 20;
+ chipsize = nanddev_target_size(&b47n->nand_chip.base) >> 20;
tbits = ffs(chipsize); /* find first bit set */
if (!tbits || tbits != fls(chipsize)) {
pr_err("Invalid flash size: 0x%lX\n", chipsize);
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 482c6f093f99..ce0b8ffc7812 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -1676,11 +1676,8 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
int page = addr >> chip->page_shift;
int ret;
- if (!buf) {
- buf = chip->data_buf;
- /* Invalidate page cache */
- chip->pagebuf = -1;
- }
+ if (!buf)
+ buf = nand_get_data_buf(chip);
sas = mtd->oobsize / chip->ecc.steps;
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 24aeafc67cd4..3102ddbd8abd 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -3,7 +3,7 @@
* NAND Flash Controller Device Driver
* Copyright © 2009-2010, Intel Corporation and its suppliers.
*
- * Copyright (c) 2017 Socionext Inc.
+ * Copyright (c) 2017-2019 Socionext Inc.
* Reworked by Masahiro Yamada <yamada.masahiro@socionext.com>
*/
@@ -40,11 +40,16 @@
#define DENALI_BANK(denali) ((denali)->active_bank << 24)
#define DENALI_INVALID_BANK -1
-#define DENALI_NR_BANKS 4
-static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
+static struct denali_chip *to_denali_chip(struct nand_chip *chip)
{
- return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
+ return container_of(chip, struct denali_chip, chip);
+}
+
+static struct denali_controller *to_denali_controller(struct nand_chip *chip)
+{
+ return container_of(chip->controller, struct denali_controller,
+ controller);
}
/*
@@ -52,12 +57,12 @@ static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
* type, bank, block, and page address). The slave data is the actual data to
* be transferred. This mode requires 28 bits of address region allocated.
*/
-static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
+static u32 denali_direct_read(struct denali_controller *denali, u32 addr)
{
return ioread32(denali->host + addr);
}
-static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
+static void denali_direct_write(struct denali_controller *denali, u32 addr,
u32 data)
{
iowrite32(data, denali->host + addr);
@@ -69,77 +74,62 @@ static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
* control information and transferred data are latched by the registers in
* the translation module.
*/
-static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
+static u32 denali_indexed_read(struct denali_controller *denali, u32 addr)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
return ioread32(denali->host + DENALI_INDEXED_DATA);
}
-static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
+static void denali_indexed_write(struct denali_controller *denali, u32 addr,
u32 data)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
iowrite32(data, denali->host + DENALI_INDEXED_DATA);
}
-/*
- * Use the configuration feature register to determine the maximum number of
- * banks that the hardware supports.
- */
-static void denali_detect_max_banks(struct denali_nand_info *denali)
-{
- uint32_t features = ioread32(denali->reg + FEATURES);
-
- denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
-
- /* the encoding changed from rev 5.0 to 5.1 */
- if (denali->revision < 0x0501)
- denali->max_banks <<= 1;
-}
-
-static void denali_enable_irq(struct denali_nand_info *denali)
+static void denali_enable_irq(struct denali_controller *denali)
{
int i;
- for (i = 0; i < DENALI_NR_BANKS; i++)
+ for (i = 0; i < denali->nbanks; i++)
iowrite32(U32_MAX, denali->reg + INTR_EN(i));
iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
}
-static void denali_disable_irq(struct denali_nand_info *denali)
+static void denali_disable_irq(struct denali_controller *denali)
{
int i;
- for (i = 0; i < DENALI_NR_BANKS; i++)
+ for (i = 0; i < denali->nbanks; i++)
iowrite32(0, denali->reg + INTR_EN(i));
iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
}
-static void denali_clear_irq(struct denali_nand_info *denali,
- int bank, uint32_t irq_status)
+static void denali_clear_irq(struct denali_controller *denali,
+ int bank, u32 irq_status)
{
/* write one to clear bits */
iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
}
-static void denali_clear_irq_all(struct denali_nand_info *denali)
+static void denali_clear_irq_all(struct denali_controller *denali)
{
int i;
- for (i = 0; i < DENALI_NR_BANKS; i++)
+ for (i = 0; i < denali->nbanks; i++)
denali_clear_irq(denali, i, U32_MAX);
}
static irqreturn_t denali_isr(int irq, void *dev_id)
{
- struct denali_nand_info *denali = dev_id;
+ struct denali_controller *denali = dev_id;
irqreturn_t ret = IRQ_NONE;
- uint32_t irq_status;
+ u32 irq_status;
int i;
spin_lock(&denali->irq_lock);
- for (i = 0; i < DENALI_NR_BANKS; i++) {
+ for (i = 0; i < denali->nbanks; i++) {
irq_status = ioread32(denali->reg + INTR_STATUS(i));
if (irq_status)
ret = IRQ_HANDLED;
@@ -160,7 +150,7 @@ static irqreturn_t denali_isr(int irq, void *dev_id)
return ret;
}
-static void denali_reset_irq(struct denali_nand_info *denali)
+static void denali_reset_irq(struct denali_controller *denali)
{
unsigned long flags;
@@ -170,11 +160,10 @@ static void denali_reset_irq(struct denali_nand_info *denali)
spin_unlock_irqrestore(&denali->irq_lock, flags);
}
-static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
- uint32_t irq_mask)
+static u32 denali_wait_for_irq(struct denali_controller *denali, u32 irq_mask)
{
unsigned long time_left, flags;
- uint32_t irq_status;
+ u32 irq_status;
spin_lock_irqsave(&denali->irq_lock, flags);
@@ -201,128 +190,259 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
return denali->irq_status;
}
-static void denali_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+static void denali_select_target(struct nand_chip *chip, int cs)
{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs];
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
- int i;
- for (i = 0; i < len; i++)
- buf[i] = denali->host_read(denali, addr);
+ denali->active_bank = sel->bank;
+
+ iowrite32(1 << (chip->phys_erase_shift - chip->page_shift),
+ denali->reg + PAGES_PER_BLOCK);
+ iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
+ denali->reg + DEVICE_WIDTH);
+ iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ?
+ 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
+ denali->reg + ECC_CORRECTION);
+ iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.steps, denali->reg + CFG_NUM_DATA_BLOCKS);
+
+ if (chip->options & NAND_KEEP_TIMINGS)
+ return;
+
+ /* update timing registers unless NAND_KEEP_TIMINGS is set */
+ iowrite32(sel->hwhr2_and_we_2_re, denali->reg + TWHR2_AND_WE_2_RE);
+ iowrite32(sel->tcwaw_and_addr_2_data,
+ denali->reg + TCWAW_AND_ADDR_2_DATA);
+ iowrite32(sel->re_2_we, denali->reg + RE_2_WE);
+ iowrite32(sel->acc_clks, denali->reg + ACC_CLKS);
+ iowrite32(sel->rdwr_en_lo_cnt, denali->reg + RDWR_EN_LO_CNT);
+ iowrite32(sel->rdwr_en_hi_cnt, denali->reg + RDWR_EN_HI_CNT);
+ iowrite32(sel->cs_setup_cnt, denali->reg + CS_SETUP_CNT);
+ iowrite32(sel->re_2_re, denali->reg + RE_2_RE);
}
-static void denali_write_buf(struct nand_chip *chip, const uint8_t *buf,
- int len)
+static int denali_change_column(struct nand_chip *chip, unsigned int offset,
+ void *buf, unsigned int len, bool write)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
- int i;
-
- for (i = 0; i < len; i++)
- denali->host_write(denali, addr, buf[i]);
+ if (write)
+ return nand_change_write_column_op(chip, offset, buf, len,
+ false);
+ else
+ return nand_change_read_column_op(chip, offset, buf, len,
+ false);
}
-static void denali_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
+static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
- uint16_t *buf16 = (uint16_t *)buf;
- int i;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int writesize = mtd->writesize;
+ int oob_skip = denali->oob_skip_bytes;
+ int ret, i, pos, len;
+
+ for (i = 0; i < ecc->steps; i++) {
+ pos = i * (ecc->size + ecc->bytes);
+ len = ecc->size;
+
+ if (pos >= writesize) {
+ pos += oob_skip;
+ } else if (pos + len > writesize) {
+ /* This chunk overwraps the BBM area. Must be split */
+ ret = denali_change_column(chip, pos, buf,
+ writesize - pos, write);
+ if (ret)
+ return ret;
+
+ buf += writesize - pos;
+ len -= writesize - pos;
+ pos = writesize + oob_skip;
+ }
+
+ ret = denali_change_column(chip, pos, buf, len, write);
+ if (ret)
+ return ret;
- for (i = 0; i < len / 2; i++)
- buf16[i] = denali->host_read(denali, addr);
+ buf += len;
+ }
+
+ return 0;
}
-static void denali_write_buf16(struct nand_chip *chip, const uint8_t *buf,
- int len)
+static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
- const uint16_t *buf16 = (const uint16_t *)buf;
- int i;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int oob_skip = denali->oob_skip_bytes;
+ int ret, i, pos, len;
- for (i = 0; i < len / 2; i++)
- denali->host_write(denali, addr, buf16[i]);
+ /* BBM at the beginning of the OOB area */
+ ret = denali_change_column(chip, writesize, buf, oob_skip, write);
+ if (ret)
+ return ret;
+
+ buf += oob_skip;
+
+ for (i = 0; i < ecc->steps; i++) {
+ pos = ecc->size + i * (ecc->size + ecc->bytes);
+
+ if (i == ecc->steps - 1)
+ /* The last chunk includes OOB free */
+ len = writesize + oobsize - pos - oob_skip;
+ else
+ len = ecc->bytes;
+
+ if (pos >= writesize) {
+ pos += oob_skip;
+ } else if (pos + len > writesize) {
+ /* This chunk overwraps the BBM area. Must be split */
+ ret = denali_change_column(chip, pos, buf,
+ writesize - pos, write);
+ if (ret)
+ return ret;
+
+ buf += writesize - pos;
+ len -= writesize - pos;
+ pos = writesize + oob_skip;
+ }
+
+ ret = denali_change_column(chip, pos, buf, len, write);
+ if (ret)
+ return ret;
+
+ buf += len;
+ }
+
+ return 0;
}
-static uint8_t denali_read_byte(struct nand_chip *chip)
+static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf,
+ int page)
{
- uint8_t byte;
+ int ret;
+
+ if (!buf && !oob_buf)
+ return -EINVAL;
- denali_read_buf(chip, &byte, 1);
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
- return byte;
+ if (buf) {
+ ret = denali_payload_xfer(chip, buf, false);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_buf) {
+ ret = denali_oob_xfer(chip, oob_buf, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
-static void denali_write_byte(struct nand_chip *chip, uint8_t byte)
+static int denali_write_raw(struct nand_chip *chip, const void *buf,
+ const void *oob_buf, int page)
{
- denali_write_buf(chip, &byte, 1);
+ int ret;
+
+ if (!buf && !oob_buf)
+ return -EINVAL;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (buf) {
+ ret = denali_payload_xfer(chip, (void *)buf, true);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_buf) {
+ ret = denali_oob_xfer(chip, (void *)oob_buf, true);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
}
-static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
+static int denali_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- uint32_t type;
+ return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
+ page);
+}
- if (ctrl & NAND_CLE)
- type = DENALI_MAP11_CMD;
- else if (ctrl & NAND_ALE)
- type = DENALI_MAP11_ADDR;
- else
- return;
+static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
+ page);
+}
- /*
- * Some commands are followed by chip->legacy.waitfunc.
- * irq_status must be cleared here to catch the R/B# interrupt later.
- */
- if (ctrl & NAND_CTRL_CHANGE)
- denali_reset_irq(denali);
+static int denali_read_oob(struct nand_chip *chip, int page)
+{
+ return denali_read_raw(chip, NULL, chip->oob_poi, page);
+}
- denali->host_write(denali, DENALI_BANK(denali) | type, dat);
+static int denali_write_oob(struct nand_chip *chip, int page)
+{
+ return denali_write_raw(chip, NULL, chip->oob_poi, page);
}
-static int denali_check_erased_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf,
+static int denali_check_erased_page(struct nand_chip *chip, u8 *buf,
unsigned long uncor_ecc_flags,
unsigned int max_bitflips)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
- int ecc_steps = chip->ecc.steps;
- int ecc_size = chip->ecc.size;
- int ecc_bytes = chip->ecc.bytes;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
int i, stat;
- for (i = 0; i < ecc_steps; i++) {
+ for (i = 0; i < ecc->steps; i++) {
if (!(uncor_ecc_flags & BIT(i)))
continue;
- stat = nand_check_erased_ecc_chunk(buf, ecc_size,
- ecc_code, ecc_bytes,
- NULL, 0,
- chip->ecc.strength);
+ stat = nand_check_erased_ecc_chunk(buf, ecc->size, ecc_code,
+ ecc->bytes, NULL, 0,
+ ecc->strength);
if (stat < 0) {
- mtd->ecc_stats.failed++;
+ ecc_stats->failed++;
} else {
- mtd->ecc_stats.corrected += stat;
+ ecc_stats->corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
- buf += ecc_size;
- ecc_code += ecc_bytes;
+ buf += ecc->size;
+ ecc_code += ecc->bytes;
}
return max_bitflips;
}
-static int denali_hw_ecc_fixup(struct mtd_info *mtd,
- struct denali_nand_info *denali,
+static int denali_hw_ecc_fixup(struct nand_chip *chip,
unsigned long *uncor_ecc_flags)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
int bank = denali->active_bank;
- uint32_t ecc_cor;
+ u32 ecc_cor;
unsigned int max_bitflips;
ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
@@ -346,23 +466,24 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
* Unfortunately, we can not know the total number of corrected bits in
* the page. Increase the stats by max_bitflips. (compromised solution)
*/
- mtd->ecc_stats.corrected += max_bitflips;
+ ecc_stats->corrected += max_bitflips;
return max_bitflips;
}
-static int denali_sw_ecc_fixup(struct mtd_info *mtd,
- struct denali_nand_info *denali,
- unsigned long *uncor_ecc_flags, uint8_t *buf)
+static int denali_sw_ecc_fixup(struct nand_chip *chip,
+ unsigned long *uncor_ecc_flags, u8 *buf)
{
- unsigned int ecc_size = denali->nand.ecc.size;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ unsigned int ecc_size = chip->ecc.size;
unsigned int bitflips = 0;
unsigned int max_bitflips = 0;
- uint32_t err_addr, err_cor_info;
+ u32 err_addr, err_cor_info;
unsigned int err_byte, err_sector, err_device;
- uint8_t err_cor_value;
+ u8 err_cor_value;
unsigned int prev_sector = 0;
- uint32_t irq_status;
+ u32 irq_status;
denali_reset_irq(denali);
@@ -404,7 +525,7 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
/* correct the ECC error */
flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
buf[offset] ^= err_cor_value;
- mtd->ecc_stats.corrected += flips_in_byte;
+ ecc_stats->corrected += flips_in_byte;
bitflips += flips_in_byte;
max_bitflips = max(max_bitflips, bitflips);
@@ -424,10 +545,10 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-static void denali_setup_dma64(struct denali_nand_info *denali,
- dma_addr_t dma_addr, int page, int write)
+static void denali_setup_dma64(struct denali_controller *denali,
+ dma_addr_t dma_addr, int page, bool write)
{
- uint32_t mode;
+ u32 mode;
const int page_count = 1;
mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
@@ -439,7 +560,8 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
* burst len = 64 bytes, the number of pages
*/
denali->host_write(denali, mode,
- 0x01002000 | (64 << 16) | (write << 8) | page_count);
+ 0x01002000 | (64 << 16) |
+ (write ? BIT(8) : 0) | page_count);
/* 2. set memory low address */
denali->host_write(denali, mode, lower_32_bits(dma_addr));
@@ -448,10 +570,10 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
denali->host_write(denali, mode, upper_32_bits(dma_addr));
}
-static void denali_setup_dma32(struct denali_nand_info *denali,
- dma_addr_t dma_addr, int page, int write)
+static void denali_setup_dma32(struct denali_controller *denali,
+ dma_addr_t dma_addr, int page, bool write)
{
- uint32_t mode;
+ u32 mode;
const int page_count = 1;
mode = DENALI_MAP10 | DENALI_BANK(denali);
@@ -460,7 +582,7 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
/* 1. setup transfer type and # of pages */
denali->host_write(denali, mode | page,
- 0x2000 | (write << 8) | page_count);
+ 0x2000 | (write ? BIT(8) : 0) | page_count);
/* 2. set memory high address bits 23:8 */
denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
@@ -472,12 +594,11 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
denali->host_write(denali, mode | 0x14000, 0x2400);
}
-static int denali_pio_read(struct denali_nand_info *denali, void *buf,
+static int denali_pio_read(struct denali_controller *denali, u32 *buf,
size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
- uint32_t *buf32 = (uint32_t *)buf;
- uint32_t irq_status, ecc_err_mask;
+ u32 irq_status, ecc_err_mask;
int i;
if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
@@ -488,7 +609,7 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
denali_reset_irq(denali);
for (i = 0; i < size / 4; i++)
- *buf32++ = denali->host_read(denali, addr);
+ buf[i] = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC))
@@ -500,29 +621,29 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
return irq_status & ecc_err_mask ? -EBADMSG : 0;
}
-static int denali_pio_write(struct denali_nand_info *denali,
- const void *buf, size_t size, int page)
+static int denali_pio_write(struct denali_controller *denali, const u32 *buf,
+ size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
- const uint32_t *buf32 = (uint32_t *)buf;
- uint32_t irq_status;
+ u32 irq_status;
int i;
denali_reset_irq(denali);
for (i = 0; i < size / 4; i++)
- denali->host_write(denali, addr, *buf32++);
+ denali->host_write(denali, addr, buf[i]);
irq_status = denali_wait_for_irq(denali,
- INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
+ INTR__PROGRAM_COMP |
+ INTR__PROGRAM_FAIL);
if (!(irq_status & INTR__PROGRAM_COMP))
return -EIO;
return 0;
}
-static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int write)
+static int denali_pio_xfer(struct denali_controller *denali, void *buf,
+ size_t size, int page, bool write)
{
if (write)
return denali_pio_write(denali, buf, size, page);
@@ -530,11 +651,11 @@ static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
return denali_pio_read(denali, buf, size, page);
}
-static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int write)
+static int denali_dma_xfer(struct denali_controller *denali, void *buf,
+ size_t size, int page, bool write)
{
dma_addr_t dma_addr;
- uint32_t irq_mask, irq_status, ecc_err_mask;
+ u32 irq_mask, irq_status, ecc_err_mask;
enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
int ret = 0;
@@ -587,12 +708,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
return ret;
}
-static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int raw, int write)
+static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size,
+ int page, bool write)
{
- iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
- iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
- denali->reg + TRANSFER_SPARE_REG);
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ denali_select_target(chip, chip->cur_cs);
if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, write);
@@ -600,180 +721,23 @@ static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
return denali_pio_xfer(denali, buf, size, page, write);
}
-static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int write)
-{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- int writesize = mtd->writesize;
- int oobsize = mtd->oobsize;
- uint8_t *bufpoi = chip->oob_poi;
- int ecc_steps = chip->ecc.steps;
- int ecc_size = chip->ecc.size;
- int ecc_bytes = chip->ecc.bytes;
- int oob_skip = denali->oob_skip_bytes;
- size_t size = writesize + oobsize;
- int i, pos, len;
-
- /* BBM at the beginning of the OOB area */
- if (write)
- nand_prog_page_begin_op(chip, page, writesize, bufpoi,
- oob_skip);
- else
- nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
- bufpoi += oob_skip;
-
- /* OOB ECC */
- for (i = 0; i < ecc_steps; i++) {
- pos = ecc_size + i * (ecc_size + ecc_bytes);
- len = ecc_bytes;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- if (write)
- nand_change_write_column_op(chip, pos, bufpoi, len,
- false);
- else
- nand_change_read_column_op(chip, pos, bufpoi, len,
- false);
- bufpoi += len;
- if (len < ecc_bytes) {
- len = ecc_bytes - len;
- if (write)
- nand_change_write_column_op(chip, writesize +
- oob_skip, bufpoi,
- len, false);
- else
- nand_change_read_column_op(chip, writesize +
- oob_skip, bufpoi,
- len, false);
- bufpoi += len;
- }
- }
-
- /* OOB free */
- len = oobsize - (bufpoi - chip->oob_poi);
- if (write)
- nand_change_write_column_op(chip, size - len, bufpoi, len,
- false);
- else
- nand_change_read_column_op(chip, size - len, bufpoi, len,
- false);
-}
-
-static int denali_read_page_raw(struct nand_chip *chip, uint8_t *buf,
- int oob_required, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- int writesize = mtd->writesize;
- int oobsize = mtd->oobsize;
- int ecc_steps = chip->ecc.steps;
- int ecc_size = chip->ecc.size;
- int ecc_bytes = chip->ecc.bytes;
- void *tmp_buf = denali->buf;
- int oob_skip = denali->oob_skip_bytes;
- size_t size = writesize + oobsize;
- int ret, i, pos, len;
-
- ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
- if (ret)
- return ret;
-
- /* Arrange the buffer for syndrome payload/ecc layout */
- if (buf) {
- for (i = 0; i < ecc_steps; i++) {
- pos = i * (ecc_size + ecc_bytes);
- len = ecc_size;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- memcpy(buf, tmp_buf + pos, len);
- buf += len;
- if (len < ecc_size) {
- len = ecc_size - len;
- memcpy(buf, tmp_buf + writesize + oob_skip,
- len);
- buf += len;
- }
- }
- }
-
- if (oob_required) {
- uint8_t *oob = chip->oob_poi;
-
- /* BBM at the beginning of the OOB area */
- memcpy(oob, tmp_buf + writesize, oob_skip);
- oob += oob_skip;
-
- /* OOB ECC */
- for (i = 0; i < ecc_steps; i++) {
- pos = ecc_size + i * (ecc_size + ecc_bytes);
- len = ecc_bytes;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- memcpy(oob, tmp_buf + pos, len);
- oob += len;
- if (len < ecc_bytes) {
- len = ecc_bytes - len;
- memcpy(oob, tmp_buf + writesize + oob_skip,
- len);
- oob += len;
- }
- }
-
- /* OOB free */
- len = oobsize - (oob - chip->oob_poi);
- memcpy(oob, tmp_buf + size - len, len);
- }
-
- return 0;
-}
-
-static int denali_read_oob(struct nand_chip *chip, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
- denali_oob_xfer(mtd, chip, page, 0);
-
- return 0;
-}
-
-static int denali_write_oob(struct nand_chip *chip, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
- denali_oob_xfer(mtd, chip, page, 1);
-
- return nand_prog_page_end_op(chip);
-}
-
-static int denali_read_page(struct nand_chip *chip, uint8_t *buf,
+static int denali_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
+ struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
unsigned long uncor_ecc_flags = 0;
int stat = 0;
int ret;
- ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
+ ret = denali_page_xfer(chip, buf, mtd->writesize, page, false);
if (ret && ret != -EBADMSG)
return ret;
if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
- stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
+ stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags);
else if (ret == -EBADMSG)
- stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
+ stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf);
if (stat < 0)
return stat;
@@ -783,130 +747,32 @@ static int denali_read_page(struct nand_chip *chip, uint8_t *buf,
if (ret)
return ret;
- stat = denali_check_erased_page(mtd, chip, buf,
+ stat = denali_check_erased_page(chip, buf,
uncor_ecc_flags, stat);
}
return stat;
}
-static int denali_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
- int oob_required, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- int writesize = mtd->writesize;
- int oobsize = mtd->oobsize;
- int ecc_steps = chip->ecc.steps;
- int ecc_size = chip->ecc.size;
- int ecc_bytes = chip->ecc.bytes;
- void *tmp_buf = denali->buf;
- int oob_skip = denali->oob_skip_bytes;
- size_t size = writesize + oobsize;
- int i, pos, len;
-
- /*
- * Fill the buffer with 0xff first except the full page transfer.
- * This simplifies the logic.
- */
- if (!buf || !oob_required)
- memset(tmp_buf, 0xff, size);
-
- /* Arrange the buffer for syndrome payload/ecc layout */
- if (buf) {
- for (i = 0; i < ecc_steps; i++) {
- pos = i * (ecc_size + ecc_bytes);
- len = ecc_size;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- memcpy(tmp_buf + pos, buf, len);
- buf += len;
- if (len < ecc_size) {
- len = ecc_size - len;
- memcpy(tmp_buf + writesize + oob_skip, buf,
- len);
- buf += len;
- }
- }
- }
-
- if (oob_required) {
- const uint8_t *oob = chip->oob_poi;
-
- /* BBM at the beginning of the OOB area */
- memcpy(tmp_buf + writesize, oob, oob_skip);
- oob += oob_skip;
-
- /* OOB ECC */
- for (i = 0; i < ecc_steps; i++) {
- pos = ecc_size + i * (ecc_size + ecc_bytes);
- len = ecc_bytes;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- memcpy(tmp_buf + pos, oob, len);
- oob += len;
- if (len < ecc_bytes) {
- len = ecc_bytes - len;
- memcpy(tmp_buf + writesize + oob_skip, oob,
- len);
- oob += len;
- }
- }
-
- /* OOB free */
- len = oobsize - (oob - chip->oob_poi);
- memcpy(tmp_buf + size - len, oob, len);
- }
-
- return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
-}
-
-static int denali_write_page(struct nand_chip *chip, const uint8_t *buf,
+static int denali_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- return denali_data_xfer(denali, (void *)buf, mtd->writesize,
- page, 0, 1);
-}
-
-static void denali_select_chip(struct nand_chip *chip, int cs)
-{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
-
- denali->active_bank = cs;
-}
-
-static int denali_waitfunc(struct nand_chip *chip)
-{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- uint32_t irq_status;
-
- /* R/B# pin transitioned from low to high? */
- irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
-
- return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
+ return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
}
static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
const struct nand_data_interface *conf)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct denali_chip_sel *sel;
const struct nand_sdr_timings *timings;
unsigned long t_x, mult_x;
int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
int addr_2_data_mask;
- uint32_t tmp;
+ u32 tmp;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
@@ -929,6 +795,8 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
+ sel = &to_denali_chip(chip)->sels[chipnr];
+
/* tREA -> ACC_CLKS */
acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
@@ -936,7 +804,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE;
tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
- iowrite32(tmp, denali->reg + ACC_CLKS);
+ sel->acc_clks = tmp;
/* tRWH -> RE_2_WE */
re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
@@ -945,7 +813,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE;
tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
- iowrite32(tmp, denali->reg + RE_2_WE);
+ sel->re_2_we = tmp;
/* tRHZ -> RE_2_RE */
re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
@@ -954,7 +822,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE;
tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
- iowrite32(tmp, denali->reg + RE_2_RE);
+ sel->re_2_re = tmp;
/*
* tCCS, tWHR -> WE_2_RE
@@ -968,7 +836,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
- iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
+ sel->hwhr2_and_we_2_re = tmp;
/* tADL -> ADDR_2_DATA */
@@ -983,7 +851,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
- iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
+ sel->tcwaw_and_addr_2_data = tmp;
/* tREH, tWH -> RDWR_EN_HI_CNT */
rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
@@ -993,7 +861,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE;
tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
- iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
+ sel->rdwr_en_hi_cnt = tmp;
/* tRP, tWP -> RDWR_EN_LO_CNT */
rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
@@ -1006,7 +874,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
- iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
+ sel->rdwr_en_lo_cnt = tmp;
/* tCS, tCEA -> CS_SETUP_CNT */
cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
@@ -1017,39 +885,11 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE;
tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
- iowrite32(tmp, denali->reg + CS_SETUP_CNT);
+ sel->cs_setup_cnt = tmp;
return 0;
}
-static void denali_hw_init(struct denali_nand_info *denali)
-{
- /*
- * The REVISION register may not be reliable. Platforms are allowed to
- * override it.
- */
- if (!denali->revision)
- denali->revision = swab16(ioread32(denali->reg + REVISION));
-
- /*
- * Set how many bytes should be skipped before writing data in OOB.
- * If a non-zero value has already been set (by firmware or something),
- * just use it. Otherwise, set the driver default.
- */
- denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
- if (!denali->oob_skip_bytes) {
- denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
- iowrite32(denali->oob_skip_bytes,
- denali->reg + SPARE_AREA_SKIP_BYTES);
- }
-
- denali_detect_max_banks(denali);
- iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
- iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
-
- iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
-}
-
int denali_calc_ecc_bytes(int step_size, int strength)
{
/* BCH code. Denali requires ecc.bytes to be multiple of 2 */
@@ -1060,10 +900,10 @@ EXPORT_SYMBOL(denali_calc_ecc_bytes);
static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
- if (section)
+ if (section > 0)
return -ERANGE;
oobregion->offset = denali->oob_skip_bytes;
@@ -1075,10 +915,10 @@ static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
static int denali_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
- if (section)
+ if (section > 0)
return -ERANGE;
oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
@@ -1092,10 +932,13 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free,
};
-static int denali_multidev_fixup(struct denali_nand_info *denali)
+static int denali_multidev_fixup(struct nand_chip *chip)
{
- struct nand_chip *chip = &denali->nand;
+ struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
/*
* Support for multi device:
@@ -1125,11 +968,12 @@ static int denali_multidev_fixup(struct denali_nand_info *denali)
}
/* 2 chips in parallel */
+ memorg->pagesize <<= 1;
+ memorg->oobsize <<= 1;
mtd->size <<= 1;
mtd->erasesize <<= 1;
mtd->writesize <<= 1;
mtd->oobsize <<= 1;
- chip->chipsize <<= 1;
chip->page_shift += 1;
chip->phys_erase_shift += 1;
chip->bbt_erase_shift += 1;
@@ -1145,38 +989,10 @@ static int denali_multidev_fixup(struct denali_nand_info *denali)
static int denali_attach_chip(struct nand_chip *chip)
{
+ struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
int ret;
- if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
- denali->dma_avail = 1;
-
- if (denali->dma_avail) {
- int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
-
- ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
- if (ret) {
- dev_info(denali->dev,
- "Failed to set DMA mask. Disabling DMA.\n");
- denali->dma_avail = 0;
- }
- }
-
- if (denali->dma_avail) {
- chip->options |= NAND_USE_BOUNCE_BUFFER;
- chip->buf_align = 16;
- if (denali->caps & DENALI_CAP_DMA_64BIT)
- denali->setup_dma = denali_setup_dma64;
- else
- denali->setup_dma = denali_setup_dma32;
- }
-
- chip->bbt_options |= NAND_BBT_USE_FLASH;
- chip->bbt_options |= NAND_BBT_NO_OOB;
- chip->ecc.mode = NAND_ECC_HW_SYNDROME;
- chip->options |= NAND_NO_SUBPAGE_WRITE;
-
ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
mtd->oobsize - denali->oob_skip_bytes);
if (ret) {
@@ -1188,123 +1004,230 @@ static int denali_attach_chip(struct nand_chip *chip)
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
- iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
- FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
- denali->reg + ECC_CORRECTION);
- iowrite32(mtd->erasesize / mtd->writesize,
- denali->reg + PAGES_PER_BLOCK);
- iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
- denali->reg + DEVICE_WIDTH);
- iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
- denali->reg + TWO_ROW_ADDR_CYCLES);
- iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
- iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
+ ret = denali_multidev_fixup(chip);
+ if (ret)
+ return ret;
- iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
- iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
- /* chip->ecc.steps is set by nand_scan_tail(); not available here */
- iowrite32(mtd->writesize / chip->ecc.size,
- denali->reg + CFG_NUM_DATA_BLOCKS);
+ return 0;
+}
- mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
+static void denali_exec_in8(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len)
+{
+ int i;
- if (chip->options & NAND_BUSWIDTH_16) {
- chip->legacy.read_buf = denali_read_buf16;
- chip->legacy.write_buf = denali_write_buf16;
- } else {
- chip->legacy.read_buf = denali_read_buf;
- chip->legacy.write_buf = denali_write_buf;
+ for (i = 0; i < len; i++)
+ buf[i] = denali->host_read(denali, type | DENALI_BANK(denali));
+}
+
+static void denali_exec_in16(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < len; i += 2) {
+ data = denali->host_read(denali, type | DENALI_BANK(denali));
+ /* bit 31:24 and 15:8 are used for DDR */
+ buf[i] = data;
+ buf[i + 1] = data >> 16;
}
- chip->ecc.read_page = denali_read_page;
- chip->ecc.read_page_raw = denali_read_page_raw;
- chip->ecc.write_page = denali_write_page;
- chip->ecc.write_page_raw = denali_write_page_raw;
- chip->ecc.read_oob = denali_read_oob;
- chip->ecc.write_oob = denali_write_oob;
+}
- ret = denali_multidev_fixup(denali);
- if (ret)
- return ret;
+static void denali_exec_in(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len, bool width16)
+{
+ if (width16)
+ denali_exec_in16(denali, type, buf, len);
+ else
+ denali_exec_in8(denali, type, buf, len);
+}
- /*
- * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
- * use devm_kmalloc() because the memory allocated by devm_ does not
- * guarantee DMA-safe alignment.
- */
- denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
- if (!denali->buf)
- return -ENOMEM;
+static void denali_exec_out8(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len)
+{
+ int i;
- return 0;
+ for (i = 0; i < len; i++)
+ denali->host_write(denali, type | DENALI_BANK(denali), buf[i]);
}
-static void denali_detach_chip(struct nand_chip *chip)
+static void denali_exec_out16(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int i;
+
+ for (i = 0; i < len; i += 2)
+ denali->host_write(denali, type | DENALI_BANK(denali),
+ buf[i + 1] << 16 | buf[i]);
+}
- kfree(denali->buf);
+static void denali_exec_out(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len, bool width16)
+{
+ if (width16)
+ denali_exec_out16(denali, type, buf, len);
+ else
+ denali_exec_out8(denali, type, buf, len);
+}
+
+static int denali_exec_waitrdy(struct denali_controller *denali)
+{
+ u32 irq_stat;
+
+ /* R/B# pin transitioned from low to high? */
+ irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT);
+
+ /* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */
+ denali_reset_irq(denali);
+
+ return irq_stat & INTR__INT_ACT ? 0 : -EIO;
+}
+
+static int denali_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ denali_exec_out8(denali, DENALI_MAP11_CMD,
+ &instr->ctx.cmd.opcode, 1);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ denali_exec_out8(denali, DENALI_MAP11_ADDR,
+ instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ denali_exec_in(denali, DENALI_MAP11_DATA,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ !instr->ctx.data.force_8bit &&
+ chip->options & NAND_BUSWIDTH_16);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ denali_exec_out(denali, DENALI_MAP11_DATA,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ !instr->ctx.data.force_8bit &&
+ chip->options & NAND_BUSWIDTH_16);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return denali_exec_waitrdy(denali);
+ default:
+ WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
+ instr->type);
+
+ return -EINVAL;
+ }
+}
+
+static int denali_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ int i, ret;
+
+ if (check_only)
+ return 0;
+
+ denali_select_target(chip, op->cs);
+
+ /*
+ * Some commands contain NAND_OP_WAITRDY_INSTR.
+ * irq must be cleared here to catch the R/B# interrupt there.
+ */
+ denali_reset_irq(to_denali_controller(chip));
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = denali_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static const struct nand_controller_ops denali_controller_ops = {
.attach_chip = denali_attach_chip,
- .detach_chip = denali_detach_chip,
+ .exec_op = denali_exec_op,
.setup_data_interface = denali_setup_data_interface,
};
-int denali_init(struct denali_nand_info *denali)
+int denali_chip_init(struct denali_controller *denali,
+ struct denali_chip *dchip)
{
- struct nand_chip *chip = &denali->nand;
+ struct nand_chip *chip = &dchip->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
- u32 features = ioread32(denali->reg + FEATURES);
- int ret;
+ struct denali_chip *dchip2;
+ int i, j, ret;
- mtd->dev.parent = denali->dev;
- denali_hw_init(denali);
+ chip->controller = &denali->controller;
- init_completion(&denali->complete);
- spin_lock_init(&denali->irq_lock);
+ /* sanity checks for bank numbers */
+ for (i = 0; i < dchip->nsels; i++) {
+ unsigned int bank = dchip->sels[i].bank;
- denali_clear_irq_all(denali);
+ if (bank >= denali->nbanks) {
+ dev_err(denali->dev, "unsupported bank %d\n", bank);
+ return -EINVAL;
+ }
- ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
- IRQF_SHARED, DENALI_NAND_NAME, denali);
- if (ret) {
- dev_err(denali->dev, "Unable to request IRQ\n");
- return ret;
- }
+ for (j = 0; j < i; j++) {
+ if (bank == dchip->sels[j].bank) {
+ dev_err(denali->dev,
+ "bank %d is assigned twice in the same chip\n",
+ bank);
+ return -EINVAL;
+ }
+ }
- denali_enable_irq(denali);
+ list_for_each_entry(dchip2, &denali->chips, node) {
+ for (j = 0; j < dchip2->nsels; j++) {
+ if (bank == dchip2->sels[j].bank) {
+ dev_err(denali->dev,
+ "bank %d is already used\n",
+ bank);
+ return -EINVAL;
+ }
+ }
+ }
+ }
- denali->active_bank = DENALI_INVALID_BANK;
+ mtd->dev.parent = denali->dev;
- nand_set_flash_node(chip, denali->dev->of_node);
- /* Fallback to the default name if DT did not give "label" property */
- if (!mtd->name)
+ /*
+ * Fallback to the default name if DT did not give "label" property.
+ * Use "label" property if multiple chips are connected.
+ */
+ if (!mtd->name && list_empty(&denali->chips))
mtd->name = "denali-nand";
- chip->legacy.select_chip = denali_select_chip;
- chip->legacy.read_byte = denali_read_byte;
- chip->legacy.write_byte = denali_write_byte;
- chip->legacy.cmd_ctrl = denali_cmd_ctrl;
- chip->legacy.waitfunc = denali_waitfunc;
-
- if (features & FEATURES__INDEX_ADDR) {
- denali->host_read = denali_indexed_read;
- denali->host_write = denali_indexed_write;
- } else {
- denali->host_read = denali_direct_read;
- denali->host_write = denali_direct_write;
+ if (denali->dma_avail) {
+ chip->options |= NAND_USE_BOUNCE_BUFFER;
+ chip->buf_align = 16;
}
/* clk rate info is needed for setup_data_interface */
if (!denali->clk_rate || !denali->clk_x_rate)
chip->options |= NAND_KEEP_TIMINGS;
- chip->legacy.dummy_controller.ops = &denali_controller_ops;
- ret = nand_scan(chip, denali->max_banks);
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.read_page = denali_read_page;
+ chip->ecc.write_page = denali_write_page;
+ chip->ecc.read_page_raw = denali_read_page_raw;
+ chip->ecc.write_page_raw = denali_write_page_raw;
+ chip->ecc.read_oob = denali_read_oob;
+ chip->ecc.write_oob = denali_write_oob;
+
+ mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
+
+ ret = nand_scan(chip, dchip->nsels);
if (ret)
- goto disable_irq;
+ return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
@@ -1312,20 +1235,111 @@ int denali_init(struct denali_nand_info *denali)
goto cleanup_nand;
}
+ list_add_tail(&dchip->node, &denali->chips);
+
return 0;
cleanup_nand:
nand_cleanup(chip);
-disable_irq:
- denali_disable_irq(denali);
return ret;
}
+EXPORT_SYMBOL_GPL(denali_chip_init);
+
+int denali_init(struct denali_controller *denali)
+{
+ u32 features = ioread32(denali->reg + FEATURES);
+ int ret;
+
+ nand_controller_init(&denali->controller);
+ denali->controller.ops = &denali_controller_ops;
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
+ INIT_LIST_HEAD(&denali->chips);
+ denali->active_bank = DENALI_INVALID_BANK;
+
+ /*
+ * The REVISION register may not be reliable. Platforms are allowed to
+ * override it.
+ */
+ if (!denali->revision)
+ denali->revision = swab16(ioread32(denali->reg + REVISION));
+
+ denali->nbanks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
+
+ /* the encoding changed from rev 5.0 to 5.1 */
+ if (denali->revision < 0x0501)
+ denali->nbanks <<= 1;
+
+ if (features & FEATURES__DMA)
+ denali->dma_avail = true;
+
+ if (denali->dma_avail) {
+ int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
+
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
+ if (ret) {
+ dev_info(denali->dev,
+ "Failed to set DMA mask. Disabling DMA.\n");
+ denali->dma_avail = false;
+ }
+ }
+
+ if (denali->dma_avail) {
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
+ }
+
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
+ /*
+ * Set how many bytes should be skipped before writing data in OOB.
+ * If a non-zero value has already been set (by firmware or something),
+ * just use it. Otherwise, set the driver's default.
+ */
+ denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
+ if (!denali->oob_skip_bytes) {
+ denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
+ iowrite32(denali->oob_skip_bytes,
+ denali->reg + SPARE_AREA_SKIP_BYTES);
+ }
+
+ iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
+ iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
+ iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
+
+ denali_clear_irq_all(denali);
+
+ ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
+ IRQF_SHARED, DENALI_NAND_NAME, denali);
+ if (ret) {
+ dev_err(denali->dev, "Unable to request IRQ\n");
+ return ret;
+ }
+
+ denali_enable_irq(denali);
+
+ return 0;
+}
EXPORT_SYMBOL(denali_init);
-void denali_remove(struct denali_nand_info *denali)
+void denali_remove(struct denali_controller *denali)
{
- nand_release(&denali->nand);
+ struct denali_chip *dchip;
+
+ list_for_each_entry(dchip, &denali->chips, node)
+ nand_release(&dchip->chip);
+
denali_disable_irq(denali);
}
EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index c8c2620fc736..e5cdcda56d14 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -9,6 +9,7 @@
#include <linux/bits.h>
#include <linux/completion.h>
+#include <linux/list.h>
#include <linux/mtd/rawnand.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
@@ -290,38 +291,108 @@
#define CHNL_ACTIVE__CHANNEL2 BIT(2)
#define CHNL_ACTIVE__CHANNEL3 BIT(3)
-struct denali_nand_info {
- struct nand_chip nand;
- unsigned long clk_rate; /* core clock rate */
- unsigned long clk_x_rate; /* bus interface clock rate */
- int active_bank; /* currently selected bank */
+/**
+ * struct denali_chip_sel - per-CS data of Denali NAND
+ *
+ * @bank: bank id of the controller this CS is connected to
+ * @hwhr2_and_we_2_re: value of timing register HWHR2_AND_WE_2_RE
+ * @tcwaw_and_addr_2_data: value of timing register TCWAW_AND_ADDR_2_DATA
+ * @re_2_we: value of timing register RE_2_WE
+ * @acc_clks: value of timing register ACC_CLKS
+ * @rdwr_en_lo_cnt: value of timing register RDWR_EN_LO_CNT
+ * @rdwr_en_hi_cnt: value of timing register RDWR_EN_HI_CNT
+ * @cs_setup_cnt: value of timing register CS_SETUP_CNT
+ * @re_2_re: value of timing register RE_2_RE
+ */
+struct denali_chip_sel {
+ int bank;
+ u32 hwhr2_and_we_2_re;
+ u32 tcwaw_and_addr_2_data;
+ u32 re_2_we;
+ u32 acc_clks;
+ u32 rdwr_en_lo_cnt;
+ u32 rdwr_en_hi_cnt;
+ u32 cs_setup_cnt;
+ u32 re_2_re;
+};
+
+/**
+ * struct denali_chip - per-chip data of Denali NAND
+ *
+ * @chip: base NAND chip structure
+ * @node: node to be used to associate this chip with the controller
+ * @nsels: the number of CS lines of this chip
+ * @sels: the array of per-cs data
+ */
+struct denali_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ unsigned int nsels;
+ struct denali_chip_sel sels[0];
+};
+
+/**
+ * struct denali_controller - Denali NAND controller data
+ *
+ * @controller: base NAND controller structure
+ * @dev: device
+ * @chips: the list of chips attached to this controller
+ * @clk_rate: frequency of core clock
+ * @clk_x_rate: frequency of bus interface clock
+ * @reg: base of Register Interface
+ * @host: base of Host Data/Command interface
+ * @complete: completion used to wait for interrupts
+ * @irq: interrupt number
+ * @irq_mask: interrupt bits the controller is waiting for
+ * @irq_status: interrupt bits of events that have happened
+ * @irq_lock: lock to protect @irq_mask and @irq_status
+ * @dma_avail: set if DMA engine is available
+ * @devs_per_cs: number of devices connected in parallel
+ * @oob_skip_bytes: number of bytes in OOB skipped by the ECC engine
+ * @active_bank: active bank id
+ * @nbanks: the number of banks supported by this controller
+ * @revision: IP revision
+ * @caps: controller capabilities that cannot be detected run-time
+ * @ecc_caps: ECC engine capabilities
+ * @host_read: callback for read access of Host Data/Command Interface
+ * @host_write: callback for write access of Host Data/Command Interface
+ * @setup_dma: callback for setup of the Data DMA
+ */
+struct denali_controller {
+ struct nand_controller controller;
struct device *dev;
- void __iomem *reg; /* Register Interface */
- void __iomem *host; /* Host Data/Command Interface */
+ struct list_head chips;
+ unsigned long clk_rate;
+ unsigned long clk_x_rate;
+ void __iomem *reg;
+ void __iomem *host;
struct completion complete;
- spinlock_t irq_lock; /* protect irq_mask and irq_status */
- u32 irq_mask; /* interrupts we are waiting for */
- u32 irq_status; /* interrupts that have happened */
int irq;
- void *buf; /* for syndrome layout conversion */
- int dma_avail; /* can support DMA? */
- int devs_per_cs; /* devices connected in parallel */
- int oob_skip_bytes; /* number of bytes reserved for BBM */
- int max_banks;
- unsigned int revision; /* IP revision */
- unsigned int caps; /* IP capability (or quirk) */
+ u32 irq_mask;
+ u32 irq_status;
+ spinlock_t irq_lock;
+ bool dma_avail;
+ int devs_per_cs;
+ int oob_skip_bytes;
+ int active_bank;
+ int nbanks;
+ unsigned int revision;
+ unsigned int caps;
const struct nand_ecc_caps *ecc_caps;
- u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
- void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
- void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
- int page, int write);
+ u32 (*host_read)(struct denali_controller *denali, u32 addr);
+ void (*host_write)(struct denali_controller *denali, u32 addr,
+ u32 data);
+ void (*setup_dma)(struct denali_controller *denali, dma_addr_t dma_addr,
+ int page, bool write);
};
#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
#define DENALI_CAP_DMA_64BIT BIT(1)
int denali_calc_ecc_bytes(int step_size, int strength);
-int denali_init(struct denali_nand_info *denali);
-void denali_remove(struct denali_nand_info *denali);
+int denali_chip_init(struct denali_controller *denali,
+ struct denali_chip *dchip);
+int denali_init(struct denali_controller *denali);
+void denali_remove(struct denali_controller *denali);
#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 0b5ae2418815..5e14836f6bd5 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -18,7 +18,7 @@
#include "denali.h"
struct denali_dt {
- struct denali_nand_info denali;
+ struct denali_controller controller;
struct clk *clk; /* core clock */
struct clk *clk_x; /* bus interface clock */
struct clk *clk_ecc; /* ECC circuit clock */
@@ -71,19 +71,92 @@ static const struct of_device_id denali_nand_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+static int denali_dt_chip_init(struct denali_controller *denali,
+ struct device_node *chip_np)
+{
+ struct denali_chip *dchip;
+ u32 bank;
+ int nsels, i, ret;
+
+ nsels = of_property_count_u32_elems(chip_np, "reg");
+ if (nsels < 0)
+ return nsels;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip)
+ return -ENOMEM;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(chip_np, "reg", i, &bank);
+ if (ret)
+ return ret;
+
+ dchip->sels[i].bank = bank;
+
+ nand_set_flash_node(&dchip->chip, chip_np);
+ }
+
+ return denali_chip_init(denali, dchip);
+}
+
+/* Backward compatibility for old platforms */
+static int denali_dt_legacy_chip_init(struct denali_controller *denali)
+{
+ struct denali_chip *dchip;
+ int nsels, i;
+
+ nsels = denali->nbanks;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip)
+ return -ENOMEM;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++)
+ dchip->sels[i].bank = i;
+
+ nand_set_flash_node(&dchip->chip, denali->dev->of_node);
+
+ return denali_chip_init(denali, dchip);
+}
+
+/*
+ * Check the DT binding.
+ * The new binding expects chip subnodes in the controller node.
+ * So, #address-cells = <1>; #size-cells = <0>; are required.
+ * Check the #size-cells to distinguish the binding.
+ */
+static bool denali_dt_is_legacy_binding(struct device_node *np)
+{
+ u32 cells;
+ int ret;
+
+ ret = of_property_read_u32(np, "#size-cells", &cells);
+ if (ret)
+ return true;
+
+ return cells != 0;
+}
+
static int denali_dt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct denali_dt *dt;
const struct denali_dt_data *data;
- struct denali_nand_info *denali;
+ struct denali_controller *denali;
+ struct device_node *np;
int ret;
dt = devm_kzalloc(dev, sizeof(*dt), GFP_KERNEL);
if (!dt)
return -ENOMEM;
- denali = &dt->denali;
+ denali = &dt->controller;
data = of_device_get_match_data(dev);
if (data) {
@@ -140,9 +213,26 @@ static int denali_dt_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk_ecc;
+ if (denali_dt_is_legacy_binding(dev->of_node)) {
+ ret = denali_dt_legacy_chip_init(denali);
+ if (ret)
+ goto out_remove_denali;
+ } else {
+ for_each_child_of_node(dev->of_node, np) {
+ ret = denali_dt_chip_init(denali, np);
+ if (ret) {
+ of_node_put(np);
+ goto out_remove_denali;
+ }
+ }
+ }
+
platform_set_drvdata(pdev, dt);
+
return 0;
+out_remove_denali:
+ denali_remove(denali);
out_disable_clk_ecc:
clk_disable_unprepare(dt->clk_ecc);
out_disable_clk_x:
@@ -157,7 +247,7 @@ static int denali_dt_remove(struct platform_device *pdev)
{
struct denali_dt *dt = platform_get_drvdata(pdev);
- denali_remove(&dt->denali);
+ denali_remove(&dt->controller);
clk_disable_unprepare(dt->clk_ecc);
clk_disable_unprepare(dt->clk_x);
clk_disable_unprepare(dt->clk);
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index 48e9ac54ad53..d62aa5271753 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -29,10 +29,11 @@ NAND_ECC_CAPS_SINGLE(denali_pci_ecc_caps, denali_calc_ecc_bytes, 512, 8, 15);
static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int ret;
resource_size_t csr_base, mem_base;
unsigned long csr_len, mem_len;
- struct denali_nand_info *denali;
+ struct denali_controller *denali;
+ struct denali_chip *dchip;
+ int nsels, ret, i;
denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
if (!denali)
@@ -64,7 +65,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->dev = &dev->dev;
denali->irq = dev->irq;
denali->ecc_caps = &denali_pci_ecc_caps;
- denali->nand.ecc.options |= NAND_ECC_MAXIMIZE;
denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
@@ -84,27 +84,49 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
ret = -ENOMEM;
- goto failed_remap_reg;
+ goto out_unmap_reg;
}
ret = denali_init(denali);
if (ret)
- goto failed_remap_mem;
+ goto out_unmap_host;
+
+ nsels = denali->nbanks;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip) {
+ ret = -ENOMEM;
+ goto out_remove_denali;
+ }
+
+ dchip->chip.ecc.options |= NAND_ECC_MAXIMIZE;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++)
+ dchip->sels[i].bank = i;
+
+ ret = denali_chip_init(denali, dchip);
+ if (ret)
+ goto out_remove_denali;
pci_set_drvdata(dev, denali);
return 0;
-failed_remap_mem:
+out_remove_denali:
+ denali_remove(denali);
+out_unmap_host:
iounmap(denali->host);
-failed_remap_reg:
+out_unmap_reg:
iounmap(denali->reg);
return ret;
}
static void denali_pci_remove(struct pci_dev *dev)
{
- struct denali_nand_info *denali = pci_get_drvdata(dev);
+ struct denali_controller *denali = pci_get_drvdata(dev);
denali_remove(denali);
iounmap(denali->reg);
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 53f57e0f007e..f430c4bf0323 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1028,6 +1028,7 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
+ struct nand_memory_organization *memorg;
int ret = 0;
u_char *buf;
struct NFTLMediaHeader *mh;
@@ -1036,6 +1037,8 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
unsigned blocks, maxblocks;
int offs, numheaders;
+ memorg = nanddev_get_memorg(&this->base);
+
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
return 0;
@@ -1082,6 +1085,7 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
implementation of the NAND layer. */
if (mh->UnitSizeFactor != 0xff) {
this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
+ memorg->pages_per_eraseblock <<= (0xff - mh->UnitSizeFactor);
mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
pr_info("Setting virtual erase size to %d\n", mtd->erasesize);
blocks = mtd->size >> this->bbt_erase_shift;
@@ -1287,7 +1291,7 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
struct doc_priv *doc = nand_get_controller_data(this);
struct mtd_partition parts[5];
- if (this->numchips > doc->chips_per_floor) {
+ if (nanddev_ntargets(&this->base) > doc->chips_per_floor) {
pr_err("Multi-floor INFTL devices not yet supported.\n");
return -EIO;
}
@@ -1477,6 +1481,7 @@ static int __init doc_probe(unsigned long physadr)
break;
case DOC_ChipID_DocMilPlus32:
pr_err("DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
+ /* fall through */
default:
ret = -ENODEV;
goto notfound;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 70f0d2b450ea..423828ff68e6 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -355,6 +355,15 @@ static void fsl_elbc_cmdfunc(struct nand_chip *chip, unsigned int command,
fsl_elbc_run_command(mtd);
return;
+ /* RNDOUT moves the pointer inside the page */
+ case NAND_CMD_RNDOUT:
+ dev_dbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_RNDOUT, column: 0x%x.\n",
+ column);
+
+ elbc_fcm_ctrl->index = column;
+ return;
+
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
dev_vdbg(priv->dev,
@@ -635,79 +644,6 @@ static int fsl_elbc_wait(struct nand_chip *chip)
return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
}
-static int fsl_elbc_attach_chip(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
- struct fsl_lbc_ctrl *ctrl = priv->ctrl;
- struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
- unsigned int al;
-
- /* calculate FMR Address Length field */
- al = 0;
- if (chip->pagemask & 0xffff0000)
- al++;
- if (chip->pagemask & 0xff000000)
- al++;
-
- priv->fmr |= al << FMR_AL_SHIFT;
-
- dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
- chip->numchips);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
- chip->chipsize);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
- chip->pagemask);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->legacy.chip_delay = %d\n",
- chip->legacy.chip_delay);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
- chip->badblockpos);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
- chip->chip_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
- chip->page_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
- chip->phys_erase_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
- chip->ecc.mode);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
- chip->ecc.steps);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
- chip->ecc.bytes);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
- chip->ecc.total);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
- mtd->ooblayout);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
- mtd->erasesize);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
- mtd->writesize);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
- mtd->oobsize);
-
- /* adjust Option Register and ECC to match Flash page size */
- if (mtd->writesize == 512) {
- priv->page_size = 0;
- clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
- } else if (mtd->writesize == 2048) {
- priv->page_size = 1;
- setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
- } else {
- dev_err(priv->dev,
- "fsl_elbc_init: page size %d is not supported\n",
- mtd->writesize);
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static const struct nand_controller_ops fsl_elbc_controller_ops = {
- .attach_chip = fsl_elbc_attach_chip,
-};
-
static int fsl_elbc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
@@ -794,27 +730,116 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->controller = &elbc_fcm_ctrl->controller;
nand_set_controller_data(chip, priv);
- chip->ecc.read_page = fsl_elbc_read_page;
- chip->ecc.write_page = fsl_elbc_write_page;
- chip->ecc.write_subpage = fsl_elbc_write_subpage;
-
- /* If CS Base Register selects full hardware ECC then use it */
- if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
- BR_DECC_CHK_GEN) {
- chip->ecc.mode = NAND_ECC_HW;
- mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.strength = 1;
+ return 0;
+}
+
+static int fsl_elbc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ unsigned int al;
+
+ switch (chip->ecc.mode) {
+ /*
+ * if ECC was not chosen in DT, decide whether to use HW or SW ECC from
+ * CS Base Register
+ */
+ case NAND_ECC_NONE:
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
+
+ chip->ecc.mode = NAND_ECC_HW;
+ mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ } else {
+ /* otherwise fall back to default software ECC */
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ }
+ break;
+
+ /* if SW ECC was chosen in DT, we do not need to set anything here */
+ case NAND_ECC_SOFT:
+ break;
+
+ /* should we also implement NAND_ECC_HW to do as the code above? */
+ default:
+ return -EINVAL;
+ }
+
+ /* calculate FMR Address Length field */
+ al = 0;
+ if (chip->pagemask & 0xffff0000)
+ al++;
+ if (chip->pagemask & 0xff000000)
+ al++;
+
+ priv->fmr |= al << FMR_AL_SHIFT;
+
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
+ nanddev_ntargets(&chip->base));
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
+ nanddev_target_size(&chip->base));
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+ chip->pagemask);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->legacy.chip_delay = %d\n",
+ chip->legacy.chip_delay);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+ chip->badblockpos);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+ chip->chip_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+ chip->page_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
+ chip->ecc.mode);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+ chip->ecc.total);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
+ mtd->ooblayout);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+ mtd->erasesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+ mtd->writesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+ mtd->oobsize);
+
+ /* adjust Option Register and ECC to match Flash page size */
+ if (mtd->writesize == 512) {
+ priv->page_size = 0;
+ clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ } else if (mtd->writesize == 2048) {
+ priv->page_size = 1;
+ setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
} else {
- /* otherwise fall back to default software ECC */
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
+ dev_err(priv->dev,
+ "fsl_elbc_init: page size %d is not supported\n",
+ mtd->writesize);
+ return -ENOTSUPP;
}
return 0;
}
+static const struct nand_controller_ops fsl_elbc_controller_ops = {
+ .attach_chip = fsl_elbc_attach_chip,
+};
+
static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
{
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index e65d274399f9..04a3dcd675bf 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -722,9 +722,9 @@ static int fsl_ifc_attach_chip(struct nand_chip *chip)
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
- chip->numchips);
+ nanddev_ntargets(&chip->base));
dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
- chip->chipsize);
+ nanddev_target_size(&chip->base));
dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
chip->pagemask);
dev_dbg(priv->dev, "%s: nand->legacy.chip_delay = %d\n", __func__,
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index a4768df5083f..a8b26d2e793c 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -157,8 +157,7 @@ int gpmi_init(struct gpmi_nand_data *this)
* Reset BCH here, too. We got failures otherwise :(
* See later BCH reset for explanation of MX23 and MX28 handling
*/
- ret = gpmi_reset_block(r->bch_regs,
- GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
if (ret)
goto err_out;
@@ -266,8 +265,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
* and MX28.
*/
- ret = gpmi_reset_block(r->bch_regs,
- GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
if (ret)
goto err_out;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index ed405c9434fe..40df20d1adf5 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -171,7 +171,7 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
struct bch_geometry *geo = &this->bch_geometry;
/* Do the sanity check. */
- if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
+ if (GPMI_IS_MXS(this)) {
/* The mx23/mx28 only support the GF13. */
if (geo->gf_len == 14)
return false;
@@ -204,7 +204,8 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
default:
dev_err(this->dev,
"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
- chip->ecc_strength_ds, chip->ecc_step_ds);
+ chip->base.eccreq.strength,
+ chip->base.eccreq.step_size);
return -EINVAL;
}
geo->ecc_chunk_size = ecc_step;
@@ -417,11 +418,13 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
|| legacy_set_geometry(this)) {
- if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+ if (!(chip->base.eccreq.strength > 0 &&
+ chip->base.eccreq.step_size > 0))
return -EINVAL;
- return set_geometry_by_ecc_info(this, chip->ecc_strength_ds,
- chip->ecc_step_ds);
+ return set_geometry_by_ecc_info(this,
+ chip->base.eccreq.strength,
+ chip->base.eccreq.step_size);
}
return 0;
@@ -1602,7 +1605,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->data_buf;
+ u8 *buffer = nand_get_data_buf(chip);
int saved_chip_number;
int found_an_ncb_fingerprint = false;
@@ -1664,7 +1667,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
unsigned int block;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->data_buf;
+ u8 *buffer = nand_get_data_buf(chip);
int saved_chip_number;
int status;
@@ -1753,7 +1756,7 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
dev_dbg(dev, "Transcribing bad block marks...\n");
/* Compute the number of blocks in the entire medium. */
- block_count = chip->chipsize >> chip->phys_erase_shift;
+ block_count = nanddev_eraseblocks_per_target(&chip->base);
/*
* Loop over all the blocks in the medium, transcribing block marks as
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index d0b79bac2728..a804a4a5bd46 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -207,4 +207,5 @@ void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x) || \
GPMI_IS_MX7D(x))
+#define GPMI_IS_MXS(x) (GPMI_IS_MX23(x) || GPMI_IS_MX28(x))
#endif
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index f3f9aa160cff..e4526fff9da4 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -849,7 +849,7 @@ static int hisi_nfc_resume(struct device *dev)
struct hinfc_host *host = dev_get_drvdata(dev);
struct nand_chip *chip = &host->chip;
- for (cs = 0; cs < chip->numchips; cs++)
+ for (cs = 0; cs < nanddev_ntargets(&chip->base); cs++)
hisi_nfc_send_cmd_reset(host, cs);
hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
new file mode 100644
index 000000000000..7cfc77021154
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -0,0 +1,50 @@
+config MTD_NAND_JZ4740
+ tristate "JZ4740 NAND controller"
+ depends on MACH_JZ4740 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND Flash on JZ4740 SoC based boards.
+
+config MTD_NAND_JZ4780
+ tristate "JZ4780 NAND controller"
+ depends on JZ4780_NEMC
+ help
+ Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
+ based boards, using the BCH controller for hardware error correction.
+
+if MTD_NAND_JZ4780
+
+config MTD_NAND_INGENIC_ECC
+ tristate
+
+config MTD_NAND_JZ4740_ECC
+ tristate "Hardware BCH support for JZ4740 SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the Reed-Solomon error-correction
+ hardware present on the JZ4740 SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4740-ecc.
+
+config MTD_NAND_JZ4725B_BCH
+ tristate "Hardware BCH support for JZ4725B SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the BCH error-correction hardware
+ present on the JZ4725B SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4725b-bch.
+
+config MTD_NAND_JZ4780_BCH
+ tristate "Hardware BCH support for JZ4780 SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the BCH error-correction hardware
+ present on the JZ4780 SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4780-bch.
+
+endif # MTD_NAND_JZ4780
diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile
new file mode 100644
index 000000000000..ab2c5f47e5b7
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
+obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o
+
+obj-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o
+obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o
+obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
new file mode 100644
index 000000000000..d3e085c5685a
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ47xx ECC common code
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+/**
+ * ingenic_ecc_calculate() - calculate ECC for a data buffer
+ * @ecc: ECC device.
+ * @params: ECC parameters.
+ * @buf: input buffer with raw data.
+ * @ecc_code: output buffer with ECC.
+ *
+ * Return: 0 on success, -ETIMEDOUT if timed out while waiting for ECC
+ * controller.
+ */
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ return ecc->ops->calculate(ecc, params, buf, ecc_code);
+}
+EXPORT_SYMBOL(ingenic_ecc_calculate);
+
+/**
+ * ingenic_ecc_correct() - detect and correct bit errors
+ * @ecc: ECC device.
+ * @params: ECC parameters.
+ * @buf: raw data read from the chip.
+ * @ecc_code: ECC read from the chip.
+ *
+ * Given the raw data and the ECC read from the NAND device, detects and
+ * corrects errors in the data.
+ *
+ * Return: the number of bit errors corrected, -EBADMSG if there are too many
+ * errors to correct or -ETIMEDOUT if we timed out waiting for the controller.
+ */
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ return ecc->ops->correct(ecc, params, buf, ecc_code);
+}
+EXPORT_SYMBOL(ingenic_ecc_correct);
+
+/**
+ * ingenic_ecc_get() - get the ECC controller device
+ * @np: ECC device tree node.
+ *
+ * Gets the ECC controller device from the specified device tree node. The
+ * device must be released with ingenic_ecc_release() when it is no longer being
+ * used.
+ *
+ * Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct ingenic_ecc *ecc;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev || !platform_get_drvdata(pdev))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ get_device(&pdev->dev);
+
+ ecc = platform_get_drvdata(pdev);
+ clk_prepare_enable(ecc->clk);
+
+ return ecc;
+}
+
+/**
+ * of_ingenic_ecc_get() - get the ECC controller from a DT node
+ * @of_node: the node that contains an ecc-engine property.
+ *
+ * Get the ecc-engine property from the given device tree
+ * node and pass it to ingenic_ecc_get to do the work.
+ *
+ * Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node)
+{
+ struct ingenic_ecc *ecc = NULL;
+ struct device_node *np;
+
+ np = of_parse_phandle(of_node, "ecc-engine", 0);
+
+ /*
+ * If the ecc-engine property is not found, check for the deprecated
+ * ingenic,bch-controller property
+ */
+ if (!np)
+ np = of_parse_phandle(of_node, "ingenic,bch-controller", 0);
+
+ if (np) {
+ ecc = ingenic_ecc_get(np);
+ of_node_put(np);
+ }
+ return ecc;
+}
+EXPORT_SYMBOL(of_ingenic_ecc_get);
+
+/**
+ * ingenic_ecc_release() - release the ECC controller device
+ * @ecc: ECC device.
+ */
+void ingenic_ecc_release(struct ingenic_ecc *ecc)
+{
+ clk_disable_unprepare(ecc->clk);
+ put_device(ecc->dev);
+}
+EXPORT_SYMBOL(ingenic_ecc_release);
+
+int ingenic_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_ecc *ecc;
+ struct resource *res;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ ecc->ops = device_get_match_data(dev);
+ if (!ecc->ops)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ecc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ecc->base))
+ return PTR_ERR(ecc->base);
+
+ ecc->ops->disable(ecc);
+
+ ecc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ecc->clk)) {
+ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+ return PTR_ERR(ecc->clk);
+ }
+
+ mutex_init(&ecc->lock);
+
+ ecc->dev = dev;
+ platform_set_drvdata(pdev, ecc);
+
+ return 0;
+}
+EXPORT_SYMBOL(ingenic_ecc_probe);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic ECC common driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
new file mode 100644
index 000000000000..2cda439b5e11
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__
+#define __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__
+
+#include <linux/compiler_types.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <uapi/asm-generic/errno-base.h>
+
+struct clk;
+struct device;
+struct ingenic_ecc;
+struct platform_device;
+
+/**
+ * struct ingenic_ecc_params - ECC parameters
+ * @size: data bytes per ECC step.
+ * @bytes: ECC bytes per step.
+ * @strength: number of correctable bits per ECC step.
+ */
+struct ingenic_ecc_params {
+ int size;
+ int bytes;
+ int strength;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_INGENIC_ECC)
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code);
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params, u8 *buf,
+ u8 *ecc_code);
+
+void ingenic_ecc_release(struct ingenic_ecc *ecc);
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np);
+#else /* CONFIG_MTD_NAND_INGENIC_ECC */
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ return -ENODEV;
+}
+
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params, u8 *buf,
+ u8 *ecc_code)
+{
+ return -ENODEV;
+}
+
+void ingenic_ecc_release(struct ingenic_ecc *ecc)
+{
+}
+
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_MTD_NAND_INGENIC_ECC */
+
+struct ingenic_ecc_ops {
+ void (*disable)(struct ingenic_ecc *ecc);
+ int (*calculate)(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code);
+ int (*correct)(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code);
+};
+
+struct ingenic_ecc {
+ struct device *dev;
+ const struct ingenic_ecc_ops *ops;
+ void __iomem *base;
+ struct clk *clk;
+ struct mutex lock;
+};
+
+int ingenic_ecc_probe(struct platform_device *pdev);
+
+#endif /* __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__ */
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand.c
new file mode 100644
index 000000000000..d7b7c0f13909
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic JZ47xx NAND driver
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/jz4780-nemc.h>
+
+#include "ingenic_ecc.h"
+
+#define DRV_NAME "ingenic-nand"
+
+/* Command delay when there is no R/B pin. */
+#define RB_DELAY_US 100
+
+struct jz_soc_info {
+ unsigned long data_offset;
+ unsigned long addr_offset;
+ unsigned long cmd_offset;
+ const struct mtd_ooblayout_ops *oob_layout;
+};
+
+struct ingenic_nand_cs {
+ unsigned int bank;
+ void __iomem *base;
+};
+
+struct ingenic_nfc {
+ struct device *dev;
+ struct ingenic_ecc *ecc;
+ const struct jz_soc_info *soc_info;
+ struct nand_controller controller;
+ unsigned int num_banks;
+ struct list_head chips;
+ int selected;
+ struct ingenic_nand_cs cs[];
+};
+
+struct ingenic_nand {
+ struct nand_chip chip;
+ struct list_head chip_list;
+
+ struct gpio_desc *busy_gpio;
+ struct gpio_desc *wp_gpio;
+ unsigned int reading: 1;
+};
+
+static inline struct ingenic_nand *to_ingenic_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct ingenic_nand, chip);
+}
+
+static inline struct ingenic_nfc *to_ingenic_nfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct ingenic_nfc, controller);
+}
+
+static int qi_lb60_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = 12;
+
+ return 0;
+}
+
+static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 12;
+ oobregion->offset = 12 + ecc->total;
+
+ return 0;
+}
+
+const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
+ .ecc = qi_lb60_ooblayout_ecc,
+ .free = qi_lb60_ooblayout_free,
+};
+
+static int jz4725b_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = 3;
+
+ return 0;
+}
+
+static int jz4725b_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 3;
+ oobregion->offset = 3 + ecc->total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops jz4725b_ooblayout_ops = {
+ .ecc = jz4725b_ooblayout_ecc,
+ .free = jz4725b_ooblayout_free,
+};
+
+static void ingenic_nand_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_nand_cs *cs;
+
+ /* Ensure the currently selected chip is deasserted. */
+ if (chipnr == -1 && nfc->selected >= 0) {
+ cs = &nfc->cs[nfc->selected];
+ jz4780_nemc_assert(nfc->dev, cs->bank, false);
+ }
+
+ nfc->selected = chipnr;
+}
+
+static void ingenic_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_nand_cs *cs;
+
+ if (WARN_ON(nfc->selected < 0))
+ return;
+
+ cs = &nfc->cs[nfc->selected];
+
+ jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_ALE)
+ writeb(cmd, cs->base + nfc->soc_info->addr_offset);
+ else if (ctrl & NAND_CLE)
+ writeb(cmd, cs->base + nfc->soc_info->cmd_offset);
+}
+
+static int ingenic_nand_dev_ready(struct nand_chip *chip)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+
+ return !gpiod_get_value_cansleep(nand->busy_gpio);
+}
+
+static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+
+ nand->reading = (mode == NAND_ECC_READ);
+}
+
+static int ingenic_nand_ecc_calculate(struct nand_chip *chip, const u8 *dat,
+ u8 *ecc_code)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_ecc_params params;
+
+ /*
+ * Don't need to generate the ECC when reading, the ECC engine does it
+ * for us as part of decoding/correction.
+ */
+ if (nand->reading)
+ return 0;
+
+ params.size = nand->chip.ecc.size;
+ params.bytes = nand->chip.ecc.bytes;
+ params.strength = nand->chip.ecc.strength;
+
+ return ingenic_ecc_calculate(nfc->ecc, &params, dat, ecc_code);
+}
+
+static int ingenic_nand_ecc_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_ecc_params params;
+
+ params.size = nand->chip.ecc.size;
+ params.bytes = nand->chip.ecc.bytes;
+ params.strength = nand->chip.ecc.strength;
+
+ return ingenic_ecc_correct(nfc->ecc, &params, dat, read_ecc);
+}
+
+static int ingenic_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
+ int eccbytes;
+
+ if (chip->ecc.strength == 4) {
+ /* JZ4740 uses 9 bytes of ECC to correct maximum 4 errors */
+ chip->ecc.bytes = 9;
+ } else {
+ chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) *
+ (chip->ecc.strength / 8);
+ }
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_HW:
+ if (!nfc->ecc) {
+ dev_err(nfc->dev, "HW ECC selected, but ECC controller not found\n");
+ return -ENODEV;
+ }
+
+ chip->ecc.hwctl = ingenic_nand_ecc_hwctl;
+ chip->ecc.calculate = ingenic_nand_ecc_calculate;
+ chip->ecc.correct = ingenic_nand_ecc_correct;
+ /* fall through */
+ case NAND_ECC_SOFT:
+ dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
+ (nfc->ecc) ? "hardware ECC" : "software ECC",
+ chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
+ break;
+ case NAND_ECC_NONE:
+ dev_info(nfc->dev, "not using ECC\n");
+ break;
+ default:
+ dev_err(nfc->dev, "ECC mode %d not supported\n",
+ chip->ecc.mode);
+ return -EINVAL;
+ }
+
+ /* The NAND core will generate the ECC layout for SW ECC */
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return 0;
+
+ /* Generate ECC layout. ECC codes are right aligned in the OOB area. */
+ eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
+
+ if (eccbytes > mtd->oobsize - 2) {
+ dev_err(nfc->dev,
+ "invalid ECC config: required %d ECC bytes, but only %d are available",
+ eccbytes, mtd->oobsize - 2);
+ return -EINVAL;
+ }
+
+ /*
+ * The generic layout for BBT markers will most likely overlap with our
+ * ECC bytes in the OOB, so move the BBT markers outside the OOB area.
+ */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* For legacy reasons we use a different layout on the qi,lb60 board. */
+ if (of_machine_is_compatible("qi,lb60"))
+ mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd, nfc->soc_info->oob_layout);
+
+ return 0;
+}
+
+static const struct nand_controller_ops ingenic_nand_controller_ops = {
+ .attach_chip = ingenic_nand_attach_chip,
+};
+
+static int ingenic_nand_init_chip(struct platform_device *pdev,
+ struct ingenic_nfc *nfc,
+ struct device_node *np,
+ unsigned int chipnr)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_nand *nand;
+ struct ingenic_nand_cs *cs;
+ struct resource *res;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ const __be32 *reg;
+ int ret = 0;
+
+ cs = &nfc->cs[chipnr];
+
+ reg = of_get_property(np, "reg", NULL);
+ if (!reg)
+ return -EINVAL;
+
+ cs->bank = be32_to_cpu(*reg);
+
+ jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, chipnr);
+ cs->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cs->base))
+ return PTR_ERR(cs->base);
+
+ nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->busy_gpio = devm_gpiod_get_optional(dev, "rb", GPIOD_IN);
+
+ if (IS_ERR(nand->busy_gpio)) {
+ ret = PTR_ERR(nand->busy_gpio);
+ dev_err(dev, "failed to request busy GPIO: %d\n", ret);
+ return ret;
+ } else if (nand->busy_gpio) {
+ nand->chip.legacy.dev_ready = ingenic_nand_dev_ready;
+ }
+
+ nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
+
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ dev_err(dev, "failed to request WP GPIO: %d\n", ret);
+ return ret;
+ }
+
+ chip = &nand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
+ cs->bank);
+ if (!mtd->name)
+ return -ENOMEM;
+ mtd->dev.parent = dev;
+
+ chip->legacy.IO_ADDR_R = cs->base + nfc->soc_info->data_offset;
+ chip->legacy.IO_ADDR_W = cs->base + nfc->soc_info->data_offset;
+ chip->legacy.chip_delay = RB_DELAY_US;
+ chip->options = NAND_NO_SUBPAGE_WRITE;
+ chip->legacy.select_chip = ingenic_nand_select_chip;
+ chip->legacy.cmd_ctrl = ingenic_nand_cmd_ctrl;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ chip->controller->ops = &ingenic_nand_controller_ops;
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_release(chip);
+ return ret;
+ }
+
+ list_add_tail(&nand->chip_list, &nfc->chips);
+
+ return 0;
+}
+
+static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc)
+{
+ struct ingenic_nand *chip;
+
+ while (!list_empty(&nfc->chips)) {
+ chip = list_first_entry(&nfc->chips,
+ struct ingenic_nand, chip_list);
+ nand_release(&chip->chip);
+ list_del(&chip->chip_list);
+ }
+}
+
+static int ingenic_nand_init_chips(struct ingenic_nfc *nfc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ int i = 0;
+ int ret;
+ int num_chips = of_get_child_count(dev->of_node);
+
+ if (num_chips > nfc->num_banks) {
+ dev_err(dev, "found %d chips but only %d banks\n",
+ num_chips, nfc->num_banks);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(dev->of_node, np) {
+ ret = ingenic_nand_init_chip(pdev, nfc, np, i);
+ if (ret) {
+ ingenic_nand_cleanup_chips(nfc);
+ return ret;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int ingenic_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int num_banks;
+ struct ingenic_nfc *nfc;
+ int ret;
+
+ num_banks = jz4780_nemc_num_banks(dev);
+ if (num_banks == 0) {
+ dev_err(dev, "no banks found\n");
+ return -ENODEV;
+ }
+
+ nfc = devm_kzalloc(dev, struct_size(nfc, cs, num_banks), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->soc_info = device_get_match_data(dev);
+ if (!nfc->soc_info)
+ return -EINVAL;
+
+ /*
+ * Check for ECC HW before we call nand_scan_ident, to prevent us from
+ * having to call it again if the ECC driver returns -EPROBE_DEFER.
+ */
+ nfc->ecc = of_ingenic_ecc_get(dev->of_node);
+ if (IS_ERR(nfc->ecc))
+ return PTR_ERR(nfc->ecc);
+
+ nfc->dev = dev;
+ nfc->num_banks = num_banks;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ ret = ingenic_nand_init_chips(nfc, pdev);
+ if (ret) {
+ if (nfc->ecc)
+ ingenic_ecc_release(nfc->ecc);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+ return 0;
+}
+
+static int ingenic_nand_remove(struct platform_device *pdev)
+{
+ struct ingenic_nfc *nfc = platform_get_drvdata(pdev);
+
+ if (nfc->ecc)
+ ingenic_ecc_release(nfc->ecc);
+
+ ingenic_nand_cleanup_chips(nfc);
+
+ return 0;
+}
+
+static const struct jz_soc_info jz4740_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00008000,
+ .addr_offset = 0x00010000,
+ .oob_layout = &nand_ooblayout_lp_ops,
+};
+
+static const struct jz_soc_info jz4725b_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00008000,
+ .addr_offset = 0x00010000,
+ .oob_layout = &jz4725b_ooblayout_ops,
+};
+
+static const struct jz_soc_info jz4780_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00400000,
+ .addr_offset = 0x00800000,
+ .oob_layout = &nand_ooblayout_lp_ops,
+};
+
+static const struct of_device_id ingenic_nand_dt_match[] = {
+ { .compatible = "ingenic,jz4740-nand", .data = &jz4740_soc_info },
+ { .compatible = "ingenic,jz4725b-nand", .data = &jz4725b_soc_info },
+ { .compatible = "ingenic,jz4780-nand", .data = &jz4780_soc_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ingenic_nand_dt_match);
+
+static struct platform_driver ingenic_nand_driver = {
+ .probe = ingenic_nand_probe,
+ .remove = ingenic_nand_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(ingenic_nand_dt_match),
+ },
+};
+module_platform_driver(ingenic_nand_driver);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic JZ47xx NAND driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
new file mode 100644
index 000000000000..6c852eae09cf
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4725B BCH controller driver
+ *
+ * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
+ *
+ * Based on jz4780_bch.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define BCH_BHCR 0x0
+#define BCH_BHCSR 0x4
+#define BCH_BHCCR 0x8
+#define BCH_BHCNT 0xc
+#define BCH_BHDR 0x10
+#define BCH_BHPAR0 0x14
+#define BCH_BHERR0 0x28
+#define BCH_BHINT 0x24
+#define BCH_BHINTES 0x3c
+#define BCH_BHINTEC 0x40
+#define BCH_BHINTE 0x38
+
+#define BCH_BHCR_ENCE BIT(3)
+#define BCH_BHCR_BSEL BIT(2)
+#define BCH_BHCR_INIT BIT(1)
+#define BCH_BHCR_BCHE BIT(0)
+
+#define BCH_BHCNT_DEC_COUNT_SHIFT 16
+#define BCH_BHCNT_DEC_COUNT_MASK (0x3ff << BCH_BHCNT_DEC_COUNT_SHIFT)
+#define BCH_BHCNT_ENC_COUNT_SHIFT 0
+#define BCH_BHCNT_ENC_COUNT_MASK (0x3ff << BCH_BHCNT_ENC_COUNT_SHIFT)
+
+#define BCH_BHERR_INDEX0_SHIFT 0
+#define BCH_BHERR_INDEX0_MASK (0x1fff << BCH_BHERR_INDEX0_SHIFT)
+#define BCH_BHERR_INDEX1_SHIFT 16
+#define BCH_BHERR_INDEX1_MASK (0x1fff << BCH_BHERR_INDEX1_SHIFT)
+
+#define BCH_BHINT_ERRC_SHIFT 28
+#define BCH_BHINT_ERRC_MASK (0xf << BCH_BHINT_ERRC_SHIFT)
+#define BCH_BHINT_TERRC_SHIFT 16
+#define BCH_BHINT_TERRC_MASK (0x7f << BCH_BHINT_TERRC_SHIFT)
+#define BCH_BHINT_ALL_0 BIT(5)
+#define BCH_BHINT_ALL_F BIT(4)
+#define BCH_BHINT_DECF BIT(3)
+#define BCH_BHINT_ENCF BIT(2)
+#define BCH_BHINT_UNCOR BIT(1)
+#define BCH_BHINT_ERR BIT(0)
+
+/* Timeout for BCH calculation/correction. */
+#define BCH_TIMEOUT_US 100000
+
+static inline void jz4725b_bch_config_set(struct ingenic_ecc *bch, u32 cfg)
+{
+ writel(cfg, bch->base + BCH_BHCSR);
+}
+
+static inline void jz4725b_bch_config_clear(struct ingenic_ecc *bch, u32 cfg)
+{
+ writel(cfg, bch->base + BCH_BHCCR);
+}
+
+static int jz4725b_bch_reset(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params, bool calc_ecc)
+{
+ u32 reg, max_value;
+
+ /* Clear interrupt status. */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Initialise and enable BCH. */
+ jz4725b_bch_config_clear(bch, 0x1f);
+ jz4725b_bch_config_set(bch, BCH_BHCR_BCHE);
+
+ if (params->strength == 8)
+ jz4725b_bch_config_set(bch, BCH_BHCR_BSEL);
+ else
+ jz4725b_bch_config_clear(bch, BCH_BHCR_BSEL);
+
+ if (calc_ecc) /* calculate ECC from data */
+ jz4725b_bch_config_set(bch, BCH_BHCR_ENCE);
+ else /* correct data from ECC */
+ jz4725b_bch_config_clear(bch, BCH_BHCR_ENCE);
+
+ jz4725b_bch_config_set(bch, BCH_BHCR_INIT);
+
+ max_value = BCH_BHCNT_ENC_COUNT_MASK >> BCH_BHCNT_ENC_COUNT_SHIFT;
+ if (params->size > max_value)
+ return -EINVAL;
+
+ max_value = BCH_BHCNT_DEC_COUNT_MASK >> BCH_BHCNT_DEC_COUNT_SHIFT;
+ if (params->size + params->bytes > max_value)
+ return -EINVAL;
+
+ /* Set up BCH count register. */
+ reg = params->size << BCH_BHCNT_ENC_COUNT_SHIFT;
+ reg |= (params->size + params->bytes) << BCH_BHCNT_DEC_COUNT_SHIFT;
+ writel(reg, bch->base + BCH_BHCNT);
+
+ return 0;
+}
+
+static void jz4725b_bch_disable(struct ingenic_ecc *bch)
+{
+ /* Clear interrupts */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Disable the hardware */
+ jz4725b_bch_config_clear(bch, BCH_BHCR_BCHE);
+}
+
+static void jz4725b_bch_write_data(struct ingenic_ecc *bch, const u8 *buf,
+ size_t size)
+{
+ while (size--)
+ writeb(*buf++, bch->base + BCH_BHDR);
+}
+
+static void jz4725b_bch_read_parity(struct ingenic_ecc *bch, u8 *buf,
+ size_t size)
+{
+ size_t size32 = size / sizeof(u32);
+ size_t size8 = size % sizeof(u32);
+ u32 *dest32;
+ u8 *dest8;
+ u32 val, offset = 0;
+
+ dest32 = (u32 *)buf;
+ while (size32--) {
+ *dest32++ = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
+ offset += sizeof(u32);
+ }
+
+ dest8 = (u8 *)dest32;
+ val = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
+ switch (size8) {
+ case 3:
+ dest8[2] = (val >> 16) & 0xff;
+ /* fall-through */
+ case 2:
+ dest8[1] = (val >> 8) & 0xff;
+ /* fall-through */
+ case 1:
+ dest8[0] = val & 0xff;
+ break;
+ }
+}
+
+static int jz4725b_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
+ u32 *status)
+{
+ u32 reg;
+ int ret;
+
+ /*
+ * While we could use interrupts here and sleep until the operation
+ * completes, the controller works fairly quickly (usually a few
+ * microseconds) and so the overhead of sleeping until we get an
+ * interrupt quite noticeably decreases performance.
+ */
+ ret = readl_relaxed_poll_timeout(bch->base + BCH_BHINT, reg,
+ reg & irq, 0, BCH_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ if (status)
+ *status = reg;
+
+ writel(reg, bch->base + BCH_BHINT);
+
+ return 0;
+}
+
+static int jz4725b_calculate(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ int ret;
+
+ mutex_lock(&bch->lock);
+
+ ret = jz4725b_bch_reset(bch, params, true);
+ if (ret) {
+ dev_err(bch->dev, "Unable to init BCH with given parameters\n");
+ goto out_disable;
+ }
+
+ jz4725b_bch_write_data(bch, buf, params->size);
+
+ ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL);
+ if (ret) {
+ dev_err(bch->dev, "timed out while calculating ECC\n");
+ goto out_disable;
+ }
+
+ jz4725b_bch_read_parity(bch, ecc_code, params->bytes);
+
+out_disable:
+ jz4725b_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+
+ return ret;
+}
+
+static int jz4725b_correct(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ u32 reg, errors, bit;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&bch->lock);
+
+ ret = jz4725b_bch_reset(bch, params, false);
+ if (ret) {
+ dev_err(bch->dev, "Unable to init BCH with given parameters\n");
+ goto out;
+ }
+
+ jz4725b_bch_write_data(bch, buf, params->size);
+ jz4725b_bch_write_data(bch, ecc_code, params->bytes);
+
+ ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_DECF, &reg);
+ if (ret) {
+ dev_err(bch->dev, "timed out while correcting data\n");
+ goto out;
+ }
+
+ if (reg & (BCH_BHINT_ALL_F | BCH_BHINT_ALL_0)) {
+ /* Data and ECC is all 0xff or 0x00 - nothing to correct */
+ ret = 0;
+ goto out;
+ }
+
+ if (reg & BCH_BHINT_UNCOR) {
+ /* Uncorrectable ECC error */
+ ret = -EBADMSG;
+ goto out;
+ }
+
+ errors = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
+
+ /* Correct any detected errors. */
+ for (i = 0; i < errors; i++) {
+ if (i & 1) {
+ bit = (reg & BCH_BHERR_INDEX1_MASK) >> BCH_BHERR_INDEX1_SHIFT;
+ } else {
+ reg = readl(bch->base + BCH_BHERR0 + (i * 4));
+ bit = (reg & BCH_BHERR_INDEX0_MASK) >> BCH_BHERR_INDEX0_SHIFT;
+ }
+
+ buf[(bit >> 3)] ^= BIT(bit & 0x7);
+ }
+
+out:
+ jz4725b_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+
+ return ret;
+}
+
+static const struct ingenic_ecc_ops jz4725b_bch_ops = {
+ .disable = jz4725b_bch_disable,
+ .calculate = jz4725b_calculate,
+ .correct = jz4725b_correct,
+};
+
+static const struct of_device_id jz4725b_bch_dt_match[] = {
+ { .compatible = "ingenic,jz4725b-bch", .data = &jz4725b_bch_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4725b_bch_dt_match);
+
+static struct platform_driver jz4725b_bch_driver = {
+ .probe = ingenic_ecc_probe,
+ .driver = {
+ .name = "jz4725b-bch",
+ .of_match_table = jz4725b_bch_dt_match,
+ },
+};
+module_platform_driver(jz4725b_bch_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4725B BCH controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
new file mode 100644
index 000000000000..13fea645c7f0
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4740 ECC controller driver
+ *
+ * Copyright (c) 2019 Paul Cercueil <paul@crapouillou.net>
+ *
+ * based on jz4740-nand.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define JZ_REG_NAND_ECC_CTRL 0x00
+#define JZ_REG_NAND_DATA 0x04
+#define JZ_REG_NAND_PAR0 0x08
+#define JZ_REG_NAND_PAR1 0x0C
+#define JZ_REG_NAND_PAR2 0x10
+#define JZ_REG_NAND_IRQ_STAT 0x14
+#define JZ_REG_NAND_IRQ_CTRL 0x18
+#define JZ_REG_NAND_ERR(x) (0x1C + ((x) << 2))
+
+#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4)
+#define JZ_NAND_ECC_CTRL_ENCODING BIT(3)
+#define JZ_NAND_ECC_CTRL_RS BIT(2)
+#define JZ_NAND_ECC_CTRL_RESET BIT(1)
+#define JZ_NAND_ECC_CTRL_ENABLE BIT(0)
+
+#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29))
+#define JZ_NAND_STATUS_PAD_FINISH BIT(4)
+#define JZ_NAND_STATUS_DEC_FINISH BIT(3)
+#define JZ_NAND_STATUS_ENC_FINISH BIT(2)
+#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1)
+#define JZ_NAND_STATUS_ERROR BIT(0)
+
+static const uint8_t empty_block_ecc[] = {
+ 0xcd, 0x9d, 0x90, 0x58, 0xf4, 0x8b, 0xff, 0xb7, 0x6f
+};
+
+static void jz4740_ecc_reset(struct ingenic_ecc *ecc, bool calc_ecc)
+{
+ uint32_t reg;
+
+ /* Clear interrupt status */
+ writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
+
+ /* Initialize and enable ECC hardware */
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_RESET;
+ reg |= JZ_NAND_ECC_CTRL_ENABLE;
+ reg |= JZ_NAND_ECC_CTRL_RS;
+ if (calc_ecc) /* calculate ECC from data */
+ reg |= JZ_NAND_ECC_CTRL_ENCODING;
+ else /* correct data from ECC */
+ reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
+
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static int jz4740_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ uint32_t reg, status;
+ unsigned int timeout = 1000;
+ int i;
+
+ jz4740_ecc_reset(ecc, true);
+
+ do {
+ status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ for (i = 0; i < params->bytes; ++i)
+ ecc_code[i] = readb(ecc->base + JZ_REG_NAND_PAR0 + i);
+
+ /*
+ * If the written data is completely 0xff, we also want to write 0xff as
+ * ECC, otherwise we will get in trouble when doing subpage writes.
+ */
+ if (memcmp(ecc_code, empty_block_ecc, ARRAY_SIZE(empty_block_ecc)) == 0)
+ memset(ecc_code, 0xff, ARRAY_SIZE(empty_block_ecc));
+
+ return 0;
+}
+
+static void jz_nand_correct_data(uint8_t *buf, int index, int mask)
+{
+ int offset = index & 0x7;
+ uint16_t data;
+
+ index += (index >> 3);
+
+ data = buf[index];
+ data |= buf[index + 1] << 8;
+
+ mask ^= (data >> offset) & 0x1ff;
+ data &= ~(0x1ff << offset);
+ data |= (mask << offset);
+
+ buf[index] = data & 0xff;
+ buf[index + 1] = (data >> 8) & 0xff;
+}
+
+static int jz4740_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ int i, error_count, index;
+ uint32_t reg, status, error;
+ unsigned int timeout = 1000;
+
+ jz4740_ecc_reset(ecc, false);
+
+ for (i = 0; i < params->bytes; ++i)
+ writeb(ecc_code[i], ecc->base + JZ_REG_NAND_PAR0 + i);
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_PAR_READY;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ do {
+ status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ if (status & JZ_NAND_STATUS_ERROR) {
+ if (status & JZ_NAND_STATUS_UNCOR_ERROR)
+ return -EBADMSG;
+
+ error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
+
+ for (i = 0; i < error_count; ++i) {
+ error = readl(ecc->base + JZ_REG_NAND_ERR(i));
+ index = ((error >> 16) & 0x1ff) - 1;
+ if (index >= 0 && index < params->size)
+ jz_nand_correct_data(buf, index, error & 0x1ff);
+ }
+
+ return error_count;
+ }
+
+ return 0;
+}
+
+static void jz4740_ecc_disable(struct ingenic_ecc *ecc)
+{
+ u32 reg;
+
+ writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static const struct ingenic_ecc_ops jz4740_ecc_ops = {
+ .disable = jz4740_ecc_disable,
+ .calculate = jz4740_ecc_calculate,
+ .correct = jz4740_ecc_correct,
+};
+
+static const struct of_device_id jz4740_ecc_dt_match[] = {
+ { .compatible = "ingenic,jz4740-ecc", .data = &jz4740_ecc_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4740_ecc_dt_match);
+
+static struct platform_driver jz4740_ecc_driver = {
+ .probe = ingenic_ecc_probe,
+ .driver = {
+ .name = "jz4740-ecc",
+ .of_match_table = jz4740_ecc_dt_match,
+ },
+};
+module_platform_driver(jz4740_ecc_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4740 ECC controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/ingenic/jz4740_nand.c
index 9526d5b23c80..f759f1672855 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4740_nand.c
@@ -313,8 +313,11 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
uint32_t ctrl;
struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
u8 id[2];
+ memorg = nanddev_get_memorg(&chip->base);
+
/* Request I/O resource. */
sprintf(res_name, "bank%d", bank);
ret = jz_nand_ioremap_resource(pdev, res_name,
@@ -351,8 +354,8 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
}
/* Update size of the MTD. */
- chip->numchips++;
- mtd->size += chip->chipsize;
+ memorg->ntargets++;
+ mtd->size += nanddev_target_size(&chip->base);
}
dev_info(&pdev->dev, "Found chip %zu on bank %i\n", chipnr, bank);
diff --git a/drivers/mtd/nand/raw/jz4780_bch.c b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
index c5f74ed85862..079266a0d6cf 100644
--- a/drivers/mtd/nand/raw/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
@@ -1,28 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * JZ4780 BCH controller
+ * JZ4780 BCH controller driver
*
* Copyright (c) 2015 Imagination Technologies
* Author: Alex Smith <alex.smith@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include "jz4780_bch.h"
+#include "ingenic_ecc.h"
#define BCH_BHCR 0x0
#define BCH_BHCCR 0x8
@@ -65,15 +59,8 @@
/* Timeout for BCH calculation/correction. */
#define BCH_TIMEOUT_US 100000
-struct jz4780_bch {
- struct device *dev;
- void __iomem *base;
- struct clk *clk;
- struct mutex lock;
-};
-
-static void jz4780_bch_init(struct jz4780_bch *bch,
- struct jz4780_bch_params *params, bool encode)
+static void jz4780_bch_reset(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params, bool encode)
{
u32 reg;
@@ -93,13 +80,13 @@ static void jz4780_bch_init(struct jz4780_bch *bch,
writel(reg, bch->base + BCH_BHCR);
}
-static void jz4780_bch_disable(struct jz4780_bch *bch)
+static void jz4780_bch_disable(struct ingenic_ecc *bch)
{
writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
writel(BCH_BHCR_BCHE, bch->base + BCH_BHCCR);
}
-static void jz4780_bch_write_data(struct jz4780_bch *bch, const void *buf,
+static void jz4780_bch_write_data(struct ingenic_ecc *bch, const void *buf,
size_t size)
{
size_t size32 = size / sizeof(u32);
@@ -116,7 +103,7 @@ static void jz4780_bch_write_data(struct jz4780_bch *bch, const void *buf,
writeb(*src8++, bch->base + BCH_BHDR);
}
-static void jz4780_bch_read_parity(struct jz4780_bch *bch, void *buf,
+static void jz4780_bch_read_parity(struct ingenic_ecc *bch, void *buf,
size_t size)
{
size_t size32 = size / sizeof(u32);
@@ -146,7 +133,7 @@ static void jz4780_bch_read_parity(struct jz4780_bch *bch, void *buf,
}
}
-static bool jz4780_bch_wait_complete(struct jz4780_bch *bch, unsigned int irq,
+static bool jz4780_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
u32 *status)
{
u32 reg;
@@ -170,23 +157,15 @@ static bool jz4780_bch_wait_complete(struct jz4780_bch *bch, unsigned int irq,
return true;
}
-/**
- * jz4780_bch_calculate() - calculate ECC for a data buffer
- * @bch: BCH device.
- * @params: BCH parameters.
- * @buf: input buffer with raw data.
- * @ecc_code: output buffer with ECC.
- *
- * Return: 0 on success, -ETIMEDOUT if timed out while waiting for BCH
- * controller.
- */
-int jz4780_bch_calculate(struct jz4780_bch *bch, struct jz4780_bch_params *params,
- const u8 *buf, u8 *ecc_code)
+static int jz4780_calculate(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
{
int ret = 0;
mutex_lock(&bch->lock);
- jz4780_bch_init(bch, params, true);
+
+ jz4780_bch_reset(bch, params, true);
jz4780_bch_write_data(bch, buf, params->size);
if (jz4780_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL)) {
@@ -200,30 +179,17 @@ int jz4780_bch_calculate(struct jz4780_bch *bch, struct jz4780_bch_params *param
mutex_unlock(&bch->lock);
return ret;
}
-EXPORT_SYMBOL(jz4780_bch_calculate);
-
-/**
- * jz4780_bch_correct() - detect and correct bit errors
- * @bch: BCH device.
- * @params: BCH parameters.
- * @buf: raw data read from the chip.
- * @ecc_code: ECC read from the chip.
- *
- * Given the raw data and the ECC read from the NAND device, detects and
- * corrects errors in the data.
- *
- * Return: the number of bit errors corrected, -EBADMSG if there are too many
- * errors to correct or -ETIMEDOUT if we timed out waiting for the controller.
- */
-int jz4780_bch_correct(struct jz4780_bch *bch, struct jz4780_bch_params *params,
- u8 *buf, u8 *ecc_code)
+
+static int jz4780_correct(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
{
u32 reg, mask, index;
int i, ret, count;
mutex_lock(&bch->lock);
- jz4780_bch_init(bch, params, false);
+ jz4780_bch_reset(bch, params, false);
jz4780_bch_write_data(bch, buf, params->size);
jz4780_bch_write_data(bch, ecc_code, params->bytes);
@@ -262,110 +228,30 @@ out:
mutex_unlock(&bch->lock);
return ret;
}
-EXPORT_SYMBOL(jz4780_bch_correct);
-
-/**
- * jz4780_bch_get() - get the BCH controller device
- * @np: BCH device tree node.
- *
- * Gets the BCH controller device from the specified device tree node. The
- * device must be released with jz4780_bch_release() when it is no longer being
- * used.
- *
- * Return: a pointer to jz4780_bch, errors are encoded into the pointer.
- * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
- */
-static struct jz4780_bch *jz4780_bch_get(struct device_node *np)
-{
- struct platform_device *pdev;
- struct jz4780_bch *bch;
-
- pdev = of_find_device_by_node(np);
- if (!pdev)
- return ERR_PTR(-EPROBE_DEFER);
-
- bch = platform_get_drvdata(pdev);
- if (!bch) {
- put_device(&pdev->dev);
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- clk_prepare_enable(bch->clk);
-
- return bch;
-}
-
-/**
- * of_jz4780_bch_get() - get the BCH controller from a DT node
- * @of_node: the node that contains a bch-controller property.
- *
- * Get the bch-controller property from the given device tree
- * node and pass it to jz4780_bch_get to do the work.
- *
- * Return: a pointer to jz4780_bch, errors are encoded into the pointer.
- * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
- */
-struct jz4780_bch *of_jz4780_bch_get(struct device_node *of_node)
-{
- struct jz4780_bch *bch = NULL;
- struct device_node *np;
-
- np = of_parse_phandle(of_node, "ingenic,bch-controller", 0);
-
- if (np) {
- bch = jz4780_bch_get(np);
- of_node_put(np);
- }
- return bch;
-}
-EXPORT_SYMBOL(of_jz4780_bch_get);
-
-/**
- * jz4780_bch_release() - release the BCH controller device
- * @bch: BCH device.
- */
-void jz4780_bch_release(struct jz4780_bch *bch)
-{
- clk_disable_unprepare(bch->clk);
- put_device(bch->dev);
-}
-EXPORT_SYMBOL(jz4780_bch_release);
static int jz4780_bch_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct jz4780_bch *bch;
- struct resource *res;
-
- bch = devm_kzalloc(dev, sizeof(*bch), GFP_KERNEL);
- if (!bch)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bch->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(bch->base))
- return PTR_ERR(bch->base);
-
- jz4780_bch_disable(bch);
+ struct ingenic_ecc *bch;
+ int ret;
- bch->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(bch->clk)) {
- dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(bch->clk));
- return PTR_ERR(bch->clk);
- }
+ ret = ingenic_ecc_probe(pdev);
+ if (ret)
+ return ret;
+ bch = platform_get_drvdata(pdev);
clk_set_rate(bch->clk, BCH_CLK_RATE);
- mutex_init(&bch->lock);
-
- bch->dev = dev;
- platform_set_drvdata(pdev, bch);
-
return 0;
}
+static const struct ingenic_ecc_ops jz4780_bch_ops = {
+ .disable = jz4780_bch_disable,
+ .calculate = jz4780_calculate,
+ .correct = jz4780_correct,
+};
+
static const struct of_device_id jz4780_bch_dt_match[] = {
- { .compatible = "ingenic,jz4780-bch" },
+ { .compatible = "ingenic,jz4780-bch", .data = &jz4780_bch_ops },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_bch_dt_match);
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index fbf6ca015cd7..cba6fe7dd8c4 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -76,6 +76,7 @@ extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
/* Core functions */
const struct nand_manufacturer *nand_get_manufacturer(u8 id);
+int nand_bbm_get_next_page(struct nand_chip *chip, int page);
int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs);
int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
int allowbbt);
@@ -110,7 +111,7 @@ static inline int nand_exec_op(struct nand_chip *chip,
if (!nand_has_exec_op(chip))
return -ENOTSUPP;
- if (WARN_ON(op->cs >= chip->numchips))
+ if (WARN_ON(op->cs >= nanddev_ntargets(&chip->base)))
return -EINVAL;
return chip->controller->ops->exec_op(chip, op, false);
diff --git a/drivers/mtd/nand/raw/jz4780_bch.h b/drivers/mtd/nand/raw/jz4780_bch.h
deleted file mode 100644
index bf4718088a3a..000000000000
--- a/drivers/mtd/nand/raw/jz4780_bch.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * JZ4780 BCH controller
- *
- * Copyright (c) 2015 Imagination Technologies
- * Author: Alex Smith <alex.smith@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef __DRIVERS_MTD_NAND_JZ4780_BCH_H__
-#define __DRIVERS_MTD_NAND_JZ4780_BCH_H__
-
-#include <linux/types.h>
-
-struct device;
-struct device_node;
-struct jz4780_bch;
-
-/**
- * struct jz4780_bch_params - BCH parameters
- * @size: data bytes per ECC step.
- * @bytes: ECC bytes per step.
- * @strength: number of correctable bits per ECC step.
- */
-struct jz4780_bch_params {
- int size;
- int bytes;
- int strength;
-};
-
-int jz4780_bch_calculate(struct jz4780_bch *bch,
- struct jz4780_bch_params *params,
- const u8 *buf, u8 *ecc_code);
-int jz4780_bch_correct(struct jz4780_bch *bch,
- struct jz4780_bch_params *params, u8 *buf,
- u8 *ecc_code);
-
-void jz4780_bch_release(struct jz4780_bch *bch);
-struct jz4780_bch *of_jz4780_bch_get(struct device_node *np);
-
-#endif /* __DRIVERS_MTD_NAND_JZ4780_BCH_H__ */
diff --git a/drivers/mtd/nand/raw/jz4780_nand.c b/drivers/mtd/nand/raw/jz4780_nand.c
deleted file mode 100644
index 22e58975f0d5..000000000000
--- a/drivers/mtd/nand/raw/jz4780_nand.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * JZ4780 NAND driver
- *
- * Copyright (c) 2015 Imagination Technologies
- * Author: Alex Smith <alex.smith@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#include <linux/jz4780-nemc.h>
-
-#include "jz4780_bch.h"
-
-#define DRV_NAME "jz4780-nand"
-
-#define OFFSET_DATA 0x00000000
-#define OFFSET_CMD 0x00400000
-#define OFFSET_ADDR 0x00800000
-
-/* Command delay when there is no R/B pin. */
-#define RB_DELAY_US 100
-
-struct jz4780_nand_cs {
- unsigned int bank;
- void __iomem *base;
-};
-
-struct jz4780_nand_controller {
- struct device *dev;
- struct jz4780_bch *bch;
- struct nand_controller controller;
- unsigned int num_banks;
- struct list_head chips;
- int selected;
- struct jz4780_nand_cs cs[];
-};
-
-struct jz4780_nand_chip {
- struct nand_chip chip;
- struct list_head chip_list;
-
- struct gpio_desc *busy_gpio;
- struct gpio_desc *wp_gpio;
- unsigned int reading: 1;
-};
-
-static inline struct jz4780_nand_chip *to_jz4780_nand_chip(struct mtd_info *mtd)
-{
- return container_of(mtd_to_nand(mtd), struct jz4780_nand_chip, chip);
-}
-
-static inline struct jz4780_nand_controller
-*to_jz4780_nand_controller(struct nand_controller *ctrl)
-{
- return container_of(ctrl, struct jz4780_nand_controller, controller);
-}
-
-static void jz4780_nand_select_chip(struct nand_chip *chip, int chipnr)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
- struct jz4780_nand_cs *cs;
-
- /* Ensure the currently selected chip is deasserted. */
- if (chipnr == -1 && nfc->selected >= 0) {
- cs = &nfc->cs[nfc->selected];
- jz4780_nemc_assert(nfc->dev, cs->bank, false);
- }
-
- nfc->selected = chipnr;
-}
-
-static void jz4780_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
- struct jz4780_nand_cs *cs;
-
- if (WARN_ON(nfc->selected < 0))
- return;
-
- cs = &nfc->cs[nfc->selected];
-
- jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_ALE)
- writeb(cmd, cs->base + OFFSET_ADDR);
- else if (ctrl & NAND_CLE)
- writeb(cmd, cs->base + OFFSET_CMD);
-}
-
-static int jz4780_nand_dev_ready(struct nand_chip *chip)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
-
- return !gpiod_get_value_cansleep(nand->busy_gpio);
-}
-
-static void jz4780_nand_ecc_hwctl(struct nand_chip *chip, int mode)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
-
- nand->reading = (mode == NAND_ECC_READ);
-}
-
-static int jz4780_nand_ecc_calculate(struct nand_chip *chip, const u8 *dat,
- u8 *ecc_code)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
- struct jz4780_bch_params params;
-
- /*
- * Don't need to generate the ECC when reading, BCH does it for us as
- * part of decoding/correction.
- */
- if (nand->reading)
- return 0;
-
- params.size = nand->chip.ecc.size;
- params.bytes = nand->chip.ecc.bytes;
- params.strength = nand->chip.ecc.strength;
-
- return jz4780_bch_calculate(nfc->bch, &params, dat, ecc_code);
-}
-
-static int jz4780_nand_ecc_correct(struct nand_chip *chip, u8 *dat,
- u8 *read_ecc, u8 *calc_ecc)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
- struct jz4780_bch_params params;
-
- params.size = nand->chip.ecc.size;
- params.bytes = nand->chip.ecc.bytes;
- params.strength = nand->chip.ecc.strength;
-
- return jz4780_bch_correct(nfc->bch, &params, dat, read_ecc);
-}
-
-static int jz4780_nand_attach_chip(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller);
- int eccbytes;
-
- chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) *
- (chip->ecc.strength / 8);
-
- switch (chip->ecc.mode) {
- case NAND_ECC_HW:
- if (!nfc->bch) {
- dev_err(nfc->dev,
- "HW BCH selected, but BCH controller not found\n");
- return -ENODEV;
- }
-
- chip->ecc.hwctl = jz4780_nand_ecc_hwctl;
- chip->ecc.calculate = jz4780_nand_ecc_calculate;
- chip->ecc.correct = jz4780_nand_ecc_correct;
- /* fall through */
- case NAND_ECC_SOFT:
- dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
- (nfc->bch) ? "hardware BCH" : "software ECC",
- chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
- break;
- case NAND_ECC_NONE:
- dev_info(nfc->dev, "not using ECC\n");
- break;
- default:
- dev_err(nfc->dev, "ECC mode %d not supported\n",
- chip->ecc.mode);
- return -EINVAL;
- }
-
- /* The NAND core will generate the ECC layout for SW ECC */
- if (chip->ecc.mode != NAND_ECC_HW)
- return 0;
-
- /* Generate ECC layout. ECC codes are right aligned in the OOB area. */
- eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
-
- if (eccbytes > mtd->oobsize - 2) {
- dev_err(nfc->dev,
- "invalid ECC config: required %d ECC bytes, but only %d are available",
- eccbytes, mtd->oobsize - 2);
- return -EINVAL;
- }
-
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-
- return 0;
-}
-
-static const struct nand_controller_ops jz4780_nand_controller_ops = {
- .attach_chip = jz4780_nand_attach_chip,
-};
-
-static int jz4780_nand_init_chip(struct platform_device *pdev,
- struct jz4780_nand_controller *nfc,
- struct device_node *np,
- unsigned int chipnr)
-{
- struct device *dev = &pdev->dev;
- struct jz4780_nand_chip *nand;
- struct jz4780_nand_cs *cs;
- struct resource *res;
- struct nand_chip *chip;
- struct mtd_info *mtd;
- const __be32 *reg;
- int ret = 0;
-
- cs = &nfc->cs[chipnr];
-
- reg = of_get_property(np, "reg", NULL);
- if (!reg)
- return -EINVAL;
-
- cs->bank = be32_to_cpu(*reg);
-
- jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, chipnr);
- cs->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(cs->base))
- return PTR_ERR(cs->base);
-
- nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
- if (!nand)
- return -ENOMEM;
-
- nand->busy_gpio = devm_gpiod_get_optional(dev, "rb", GPIOD_IN);
-
- if (IS_ERR(nand->busy_gpio)) {
- ret = PTR_ERR(nand->busy_gpio);
- dev_err(dev, "failed to request busy GPIO: %d\n", ret);
- return ret;
- } else if (nand->busy_gpio) {
- nand->chip.legacy.dev_ready = jz4780_nand_dev_ready;
- }
-
- nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
-
- if (IS_ERR(nand->wp_gpio)) {
- ret = PTR_ERR(nand->wp_gpio);
- dev_err(dev, "failed to request WP GPIO: %d\n", ret);
- return ret;
- }
-
- chip = &nand->chip;
- mtd = nand_to_mtd(chip);
- mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
- cs->bank);
- if (!mtd->name)
- return -ENOMEM;
- mtd->dev.parent = dev;
-
- chip->legacy.IO_ADDR_R = cs->base + OFFSET_DATA;
- chip->legacy.IO_ADDR_W = cs->base + OFFSET_DATA;
- chip->legacy.chip_delay = RB_DELAY_US;
- chip->options = NAND_NO_SUBPAGE_WRITE;
- chip->legacy.select_chip = jz4780_nand_select_chip;
- chip->legacy.cmd_ctrl = jz4780_nand_cmd_ctrl;
- chip->ecc.mode = NAND_ECC_HW;
- chip->controller = &nfc->controller;
- nand_set_flash_node(chip, np);
-
- chip->controller->ops = &jz4780_nand_controller_ops;
- ret = nand_scan(chip, 1);
- if (ret)
- return ret;
-
- ret = mtd_device_register(mtd, NULL, 0);
- if (ret) {
- nand_release(chip);
- return ret;
- }
-
- list_add_tail(&nand->chip_list, &nfc->chips);
-
- return 0;
-}
-
-static void jz4780_nand_cleanup_chips(struct jz4780_nand_controller *nfc)
-{
- struct jz4780_nand_chip *chip;
-
- while (!list_empty(&nfc->chips)) {
- chip = list_first_entry(&nfc->chips, struct jz4780_nand_chip, chip_list);
- nand_release(&chip->chip);
- list_del(&chip->chip_list);
- }
-}
-
-static int jz4780_nand_init_chips(struct jz4780_nand_controller *nfc,
- struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np;
- int i = 0;
- int ret;
- int num_chips = of_get_child_count(dev->of_node);
-
- if (num_chips > nfc->num_banks) {
- dev_err(dev, "found %d chips but only %d banks\n", num_chips, nfc->num_banks);
- return -EINVAL;
- }
-
- for_each_child_of_node(dev->of_node, np) {
- ret = jz4780_nand_init_chip(pdev, nfc, np, i);
- if (ret) {
- jz4780_nand_cleanup_chips(nfc);
- return ret;
- }
-
- i++;
- }
-
- return 0;
-}
-
-static int jz4780_nand_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- unsigned int num_banks;
- struct jz4780_nand_controller *nfc;
- int ret;
-
- num_banks = jz4780_nemc_num_banks(dev);
- if (num_banks == 0) {
- dev_err(dev, "no banks found\n");
- return -ENODEV;
- }
-
- nfc = devm_kzalloc(dev, struct_size(nfc, cs, num_banks), GFP_KERNEL);
- if (!nfc)
- return -ENOMEM;
-
- /*
- * Check for BCH HW before we call nand_scan_ident, to prevent us from
- * having to call it again if the BCH driver returns -EPROBE_DEFER.
- */
- nfc->bch = of_jz4780_bch_get(dev->of_node);
- if (IS_ERR(nfc->bch))
- return PTR_ERR(nfc->bch);
-
- nfc->dev = dev;
- nfc->num_banks = num_banks;
-
- nand_controller_init(&nfc->controller);
- INIT_LIST_HEAD(&nfc->chips);
-
- ret = jz4780_nand_init_chips(nfc, pdev);
- if (ret) {
- if (nfc->bch)
- jz4780_bch_release(nfc->bch);
- return ret;
- }
-
- platform_set_drvdata(pdev, nfc);
- return 0;
-}
-
-static int jz4780_nand_remove(struct platform_device *pdev)
-{
- struct jz4780_nand_controller *nfc = platform_get_drvdata(pdev);
-
- if (nfc->bch)
- jz4780_bch_release(nfc->bch);
-
- jz4780_nand_cleanup_chips(nfc);
-
- return 0;
-}
-
-static const struct of_device_id jz4780_nand_dt_match[] = {
- { .compatible = "ingenic,jz4780-nand" },
- {},
-};
-MODULE_DEVICE_TABLE(of, jz4780_nand_dt_match);
-
-static struct platform_driver jz4780_nand_driver = {
- .probe = jz4780_nand_probe,
- .remove = jz4780_nand_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(jz4780_nand_dt_match),
- },
-};
-module_platform_driver(jz4780_nand_driver);
-
-MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
-MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index f38e5c1b87e4..fc49e13d81ec 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -722,12 +722,6 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 ndcr_generic;
- if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
- return;
-
- writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
- writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
-
/*
* Reset the NDCR register to a clean state for this particular chip,
* also clear ND_RUN bit.
@@ -739,6 +733,12 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
/* Also reset the interrupt status register */
marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+ return;
+
+ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
nfc->selected_chip = chip;
marvell_nand->selected_die = die_nr;
}
@@ -1083,12 +1083,11 @@ static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
*/
static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
{
- /* Invalidate page cache */
- chip->pagebuf = -1;
+ u8 *buf = nand_get_data_buf(chip);
marvell_nfc_select_target(chip, chip->cur_cs);
- return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
- chip->oob_poi, true, page);
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
}
/* Hamming write helpers */
@@ -1179,15 +1178,13 @@ static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
- /* Invalidate page cache */
- chip->pagebuf = -1;
-
- memset(chip->data_buf, 0xFF, mtd->writesize);
+ memset(buf, 0xFF, mtd->writesize);
marvell_nfc_select_target(chip, chip->cur_cs);
- return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
- chip->oob_poi, true, page);
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
}
/* BCH read helpers */
@@ -1434,18 +1431,16 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page)
{
- /* Invalidate page cache */
- chip->pagebuf = -1;
+ u8 *buf = nand_get_data_buf(chip);
- return chip->ecc.read_page_raw(chip, chip->data_buf, true, page);
+ return chip->ecc.read_page_raw(chip, buf, true, page);
}
static int marvell_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, int page)
{
- /* Invalidate page cache */
- chip->pagebuf = -1;
+ u8 *buf = nand_get_data_buf(chip);
- return chip->ecc.read_page(chip, chip->data_buf, true, page);
+ return chip->ecc.read_page(chip, buf, true, page);
}
/* BCH write helpers */
@@ -1619,25 +1614,21 @@ static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct nand_chip *chip,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
- /* Invalidate page cache */
- chip->pagebuf = -1;
+ memset(buf, 0xFF, mtd->writesize);
- memset(chip->data_buf, 0xFF, mtd->writesize);
-
- return chip->ecc.write_page_raw(chip, chip->data_buf, true, page);
+ return chip->ecc.write_page_raw(chip, buf, true, page);
}
static int marvell_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
- /* Invalidate page cache */
- chip->pagebuf = -1;
-
- memset(chip->data_buf, 0xFF, mtd->writesize);
+ memset(buf, 0xFF, mtd->writesize);
- return chip->ecc.write_page(chip, chip->data_buf, true, page);
+ return chip->ecc.write_page(chip, buf, true, page);
}
/* NAND framework ->exec_op() hooks and related helpers */
@@ -2257,9 +2248,9 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd,
int ret;
if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
- if (chip->ecc_step_ds && chip->ecc_strength_ds) {
- ecc->size = chip->ecc_step_ds;
- ecc->strength = chip->ecc_strength_ds;
+ if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
+ ecc->size = chip->base.eccreq.step_size;
+ ecc->strength = chip->base.eccreq.strength;
} else {
dev_info(nfc->dev,
"No minimum ECC strength, using 1b/512B\n");
@@ -2989,7 +2980,7 @@ static int __maybe_unused marvell_nfc_resume(struct device *dev)
/*
* Reset nfc->selected_chip so the next command will cause the timing
- * registers to be restored in marvell_nfc_select_chip().
+ * registers to be restored in marvell_nfc_select_target().
*/
nfc->selected_chip = NULL;
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 3e8aa71407b5..ea57ddcec41e 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -400,7 +400,7 @@ static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms)
cfg |= NFC_RB_IRQ_EN;
writel(cfg, nfc->reg_base + NFC_REG_CFG);
- init_completion(&nfc->completion);
+ reinit_completion(&nfc->completion);
/* use the max erase time as the maximum clock for waiting R/B */
cmd = NFC_CMD_RB | NFC_CMD_RB_INT
@@ -470,15 +470,15 @@ static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
return ret;
}
-static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, u8 *databuf,
- int datalen, u8 *infobuf, int infolen,
+static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
+ int datalen, void *infobuf, int infolen,
enum dma_data_direction dir)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
u32 cmd;
int ret = 0;
- nfc->daddr = dma_map_single(nfc->dev, (void *)databuf, datalen, dir);
+ nfc->daddr = dma_map_single(nfc->dev, databuf, datalen, dir);
ret = dma_mapping_error(nfc->dev, nfc->daddr);
if (ret) {
dev_err(nfc->dev, "DMA mapping error\n");
@@ -528,10 +528,13 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
u8 *info;
info = kzalloc(PER_INFO_BYTE, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
ret = meson_nfc_dma_buffer_setup(nand, buf, len, info,
PER_INFO_BYTE, DMA_FROM_DEVICE);
if (ret)
- return ret;
+ goto out;
cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
@@ -539,6 +542,8 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
meson_nfc_drain_cmd(nfc);
meson_nfc_wait_cmd_finish(nfc, 1000);
meson_nfc_dma_buffer_release(nand, len, PER_INFO_BYTE, DMA_FROM_DEVICE);
+
+out:
kfree(info);
return ret;
@@ -640,7 +645,7 @@ static int meson_nfc_write_page_sub(struct nand_chip *nand,
return ret;
ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
- data_len, (u8 *)meson_chip->info_buf,
+ data_len, meson_chip->info_buf,
info_len, DMA_TO_DEVICE);
if (ret)
return ret;
@@ -724,7 +729,7 @@ static int meson_nfc_read_page_sub(struct nand_chip *nand,
return ret;
ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
- data_len, (u8 *)meson_chip->info_buf,
+ data_len, meson_chip->info_buf,
info_len, DMA_FROM_DEVICE);
if (ret)
return ret;
@@ -1183,6 +1188,8 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
return -EINVAL;
}
+ mtd_set_ooblayout(mtd, &meson_ooblayout_ops);
+
ret = meson_nand_bch_mode(nand);
if (ret)
return -EINVAL;
@@ -1226,17 +1233,13 @@ meson_nfc_nand_chip_init(struct device *dev,
int ret, i;
u32 tmp, nsels;
- if (!of_get_property(np, "reg", &nsels))
- return -EINVAL;
-
- nsels /= sizeof(u32);
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
if (!nsels || nsels > MAX_CE_NUM) {
dev_err(dev, "invalid register property size\n");
return -EINVAL;
}
- meson_chip = devm_kzalloc(dev,
- sizeof(*meson_chip) + (nsels * sizeof(u8)),
+ meson_chip = devm_kzalloc(dev, struct_size(meson_chip, sels, nsels),
GFP_KERNEL);
if (!meson_chip)
return -ENOMEM;
@@ -1377,6 +1380,7 @@ static int meson_nfc_probe(struct platform_device *pdev)
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
+ init_completion(&nfc->completion);
nfc->dev = dev;
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 2c0e09187773..b17619f30b1b 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1197,8 +1197,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
/* if optional dt settings not present */
if (!nand->ecc.size || !nand->ecc.strength) {
/* use datasheet requirements */
- nand->ecc.strength = nand->ecc_strength_ds;
- nand->ecc.size = nand->ecc_step_ds;
+ nand->ecc.strength = nand->base.eccreq.strength;
+ nand->ecc.size = nand->base.eccreq.step_size;
/*
* align eccstrength and eccsize
diff --git a/drivers/mtd/nand/raw/nand_amd.c b/drivers/mtd/nand/raw/nand_amd.c
index 890c5b43e03c..6217555c19a6 100644
--- a/drivers/mtd/nand/raw/nand_amd.c
+++ b/drivers/mtd/nand/raw/nand_amd.c
@@ -20,6 +20,9 @@
static void amd_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
nand_decode_ext_id(chip);
@@ -31,16 +34,24 @@ static void amd_nand_decode_id(struct nand_chip *chip)
*/
if (chip->id.data[4] != 0x00 && chip->id.data[5] == 0x00 &&
chip->id.data[6] == 0x00 && chip->id.data[7] == 0x00 &&
- mtd->writesize == 512) {
- mtd->erasesize = 128 * 1024;
- mtd->erasesize <<= ((chip->id.data[3] & 0x03) << 1);
+ memorg->pagesize == 512) {
+ memorg->pages_per_eraseblock = 256;
+ memorg->pages_per_eraseblock <<= ((chip->id.data[3] & 0x03) << 1);
+ mtd->erasesize = memorg->pages_per_eraseblock *
+ memorg->pagesize;
}
}
static int amd_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ /*
+ * According to the datasheet of some Cypress SLC NANDs,
+ * the bad block markers can be in the first, second or last
+ * page of a block. So let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index ddd396e93e32..2cf71060d6f8 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -240,10 +240,10 @@ static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
void nand_select_target(struct nand_chip *chip, unsigned int cs)
{
/*
- * cs should always lie between 0 and chip->numchips, when that's not
- * the case it's a bug and the caller should be fixed.
+ * cs should always lie between 0 and nanddev_ntargets(), when that's
+ * not the case it's a bug and the caller should be fixed.
*/
- if (WARN_ON(cs > chip->numchips))
+ if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
return;
chip->cur_cs = cs;
@@ -283,6 +283,31 @@ static void nand_release_device(struct nand_chip *chip)
}
/**
+ * nand_bbm_get_next_page - Get the next page for bad block markers
+ * @chip: NAND chip object
+ * @page: First page to start checking for bad block marker usage
+ *
+ * Returns an integer that corresponds to the page offset within a block, for
+ * a page that is used to store bad block markers. If no more pages are
+ * available, -EINVAL is returned.
+ */
+int nand_bbm_get_next_page(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int last_page = ((mtd->erasesize - mtd->writesize) >>
+ chip->page_shift) & chip->pagemask;
+
+ if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
+ return 0;
+ else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
+ return 1;
+ else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
+ return last_page;
+
+ return -EINVAL;
+}
+
+/**
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
* @chip: NAND chip object
* @ofs: offset from device start
@@ -291,18 +316,15 @@ static void nand_release_device(struct nand_chip *chip)
*/
static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int page, page_end, res;
+ int first_page, page_offset;
+ int res;
u8 bad;
- if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
- ofs += mtd->erasesize - mtd->writesize;
-
- page = (int)(ofs >> chip->page_shift) & chip->pagemask;
- page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
+ first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ page_offset = nand_bbm_get_next_page(chip, 0);
- for (; page < page_end; page++) {
- res = chip->ecc.read_oob(chip, page);
+ while (page_offset >= 0) {
+ res = chip->ecc.read_oob(chip, first_page + page_offset);
if (res < 0)
return res;
@@ -314,6 +336,8 @@ static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
res = hweight8(bad) < chip->badblockbits;
if (res)
return res;
+
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
}
return 0;
@@ -459,8 +483,8 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
}
/* Invalidate the page cache, if we write to the cached page */
- if (page == chip->pagebuf)
- chip->pagebuf = -1;
+ if (page == chip->pagecache.page)
+ chip->pagecache.page = -1;
nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
@@ -493,7 +517,7 @@ static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
struct mtd_info *mtd = nand_to_mtd(chip);
struct mtd_oob_ops ops;
uint8_t buf[2] = { 0, 0 };
- int ret = 0, res, i = 0;
+ int ret = 0, res, page_offset;
memset(&ops, 0, sizeof(ops));
ops.oobbuf = buf;
@@ -506,17 +530,18 @@ static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
}
ops.mode = MTD_OPS_PLACE_OOB;
- /* Write to first/last page(s) if necessary */
- if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
- ofs += mtd->erasesize - mtd->writesize;
- do {
- res = nand_do_write_oob(chip, ofs, &ops);
+ page_offset = nand_bbm_get_next_page(chip, 0);
+
+ while (page_offset >= 0) {
+ res = nand_do_write_oob(chip,
+ ofs + (page_offset * mtd->writesize),
+ &ops);
+
if (!ret)
ret = res;
- i++;
- ofs += mtd->writesize;
- } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
+ }
return ret;
}
@@ -3173,7 +3198,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
use_bufpoi = 0;
/* Is the current page in the buffer? */
- if (realpage != chip->pagebuf || oob) {
+ if (realpage != chip->pagecache.page || oob) {
bufpoi = use_bufpoi ? chip->data_buf : buf;
if (use_bufpoi && aligned)
@@ -3199,7 +3224,7 @@ read_retry:
if (ret < 0) {
if (use_bufpoi)
/* Invalidate page cache */
- chip->pagebuf = -1;
+ chip->pagecache.page = -1;
break;
}
@@ -3208,11 +3233,11 @@ read_retry:
if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
!(mtd->ecc_stats.failed - ecc_failures) &&
(ops->mode != MTD_OPS_RAW)) {
- chip->pagebuf = realpage;
- chip->pagebuf_bitflips = ret;
+ chip->pagecache.page = realpage;
+ chip->pagecache.bitflips = ret;
} else {
/* Invalidate page cache */
- chip->pagebuf = -1;
+ chip->pagecache.page = -1;
}
memcpy(buf, chip->data_buf + col, bytes);
}
@@ -3252,7 +3277,7 @@ read_retry:
memcpy(buf, chip->data_buf + col, bytes);
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
- chip->pagebuf_bitflips);
+ chip->pagecache.bitflips);
}
readlen -= bytes;
@@ -3973,9 +3998,9 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
page = realpage & chip->pagemask;
/* Invalidate the page cache, when we write to the cached page */
- if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
- ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
- chip->pagebuf = -1;
+ if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
+ ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
+ chip->pagecache.page = -1;
/* Don't allow multipage oob writes with offset */
if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
@@ -4004,10 +4029,9 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
__func__, buf);
if (part_pagewr)
bytes = min_t(int, bytes - column, writelen);
- chip->pagebuf = -1;
- memset(chip->data_buf, 0xff, mtd->writesize);
- memcpy(&chip->data_buf[column], buf, bytes);
- wbuf = chip->data_buf;
+ wbuf = nand_get_data_buf(chip);
+ memset(wbuf, 0xff, mtd->writesize);
+ memcpy(&wbuf[column], buf, bytes);
}
if (unlikely(oob)) {
@@ -4197,9 +4221,9 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
* Invalidate the page cache, if we erase the block which
* contains the current cached page.
*/
- if (page <= chip->pagebuf && chip->pagebuf <
+ if (page <= chip->pagecache.page && chip->pagecache.page <
(page + pages_per_block))
- chip->pagebuf = -1;
+ chip->pagecache.page = -1;
ret = nand_erase_op(chip, (page & chip->pagemask) >>
(chip->phys_erase_shift - chip->page_shift));
@@ -4299,42 +4323,6 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/**
- * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
- * @len: length of mtd
- */
-static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- u32 part_start_block;
- u32 part_end_block;
- u32 part_start_die;
- u32 part_end_die;
-
- /*
- * max_bb_per_die and blocks_per_die used to determine
- * the maximum bad block count.
- */
- if (!chip->max_bb_per_die || !chip->blocks_per_die)
- return -ENOTSUPP;
-
- /* Get the start and end of the partition in erase blocks. */
- part_start_block = mtd_div_by_eb(ofs, mtd);
- part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
-
- /* Get the start and end LUNs of the partition. */
- part_start_die = part_start_block / chip->blocks_per_die;
- part_end_die = part_end_block / chip->blocks_per_die;
-
- /*
- * Look up the bad blocks per unit and multiply by the number of units
- * that the partition spans.
- */
- return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
-}
-
-/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
*/
@@ -4485,21 +4473,29 @@ static int nand_get_bits_per_cell(u8 cellinfo)
*/
void nand_decode_ext_id(struct nand_chip *chip)
{
+ struct nand_memory_organization *memorg;
struct mtd_info *mtd = nand_to_mtd(chip);
int extid;
u8 *id_data = chip->id.data;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
/* The 3rd id byte holds MLC / multichip data */
- chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
/* The 4th id byte is the important one */
extid = id_data[3];
/* Calc pagesize */
- mtd->writesize = 1024 << (extid & 0x03);
+ memorg->pagesize = 1024 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
extid >>= 2;
/* Calc oobsize */
- mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
+ memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
+ mtd->oobsize = memorg->oobsize;
extid >>= 2;
/* Calc blocksize. Blocksize is multiples of 64KiB */
+ memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
+ memorg->pagesize;
mtd->erasesize = (64 * 1024) << (extid & 0x03);
extid >>= 2;
/* Get buswidth information */
@@ -4516,13 +4512,19 @@ EXPORT_SYMBOL_GPL(nand_decode_ext_id);
static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+ memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
mtd->erasesize = type->erasesize;
- mtd->writesize = type->pagesize;
- mtd->oobsize = mtd->writesize / 32;
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->oobsize = memorg->pagesize / 32;
+ mtd->oobsize = memorg->oobsize;
/* All legacy ID NAND are small-page, SLC */
- chip->bits_per_cell = 1;
+ memorg->bits_per_cell = 1;
}
/*
@@ -4536,9 +4538,9 @@ static void nand_decode_bbm_options(struct nand_chip *chip)
/* Set the bad block position */
if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
- chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ chip->badblockpos = NAND_BBM_POS_LARGE;
else
- chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+ chip->badblockpos = NAND_BBM_POS_SMALL;
}
static inline bool is_full_id_nand(struct nand_flash_dev *type)
@@ -4550,18 +4552,28 @@ static bool find_full_id_nand(struct nand_chip *chip,
struct nand_flash_dev *type)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
u8 *id_data = chip->id.data;
+ memorg = nanddev_get_memorg(&chip->base);
+
if (!strncmp(type->id, id_data, type->id_len)) {
- mtd->writesize = type->pagesize;
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->pages_per_eraseblock = type->erasesize /
+ type->pagesize;
mtd->erasesize = type->erasesize;
- mtd->oobsize = type->oobsize;
-
- chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
- chip->chipsize = (uint64_t)type->chipsize << 20;
+ memorg->oobsize = type->oobsize;
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
chip->options |= type->options;
- chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
- chip->ecc_step_ds = NAND_ECC_STEP(type);
+ chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
+ chip->base.eccreq.step_size = NAND_ECC_STEP(type);
chip->onfi_timing_mode_default =
type->onfi_timing_mode_default;
@@ -4587,8 +4599,12 @@ static void nand_manufacturer_detect(struct nand_chip *chip)
*/
if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
chip->manufacturer.desc->ops->detect) {
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
/* The 3rd id byte holds MLC / multichip data */
- chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
+ memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
chip->manufacturer.desc->ops->detect(chip);
} else {
nand_decode_ext_id(chip);
@@ -4637,9 +4653,20 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
{
const struct nand_manufacturer *manufacturer;
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
int busw, ret;
u8 *id_data = chip->id.data;
u8 maf_id, dev_id;
+ u64 targetsize;
+
+ /*
+ * Let's start by initializing memorg fields that might be left
+ * unassigned by the ID-based detection logic.
+ */
+ memorg = nanddev_get_memorg(&chip->base);
+ memorg->planes_per_lun = 1;
+ memorg->luns_per_target = 1;
+ memorg->ntargets = 1;
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
@@ -4735,8 +4762,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
if (!chip->parameters.model)
return -ENOMEM;
- chip->chipsize = (uint64_t)type->chipsize << 20;
-
if (!type->pagesize)
nand_manufacturer_detect(chip);
else
@@ -4745,6 +4770,11 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
/* Get chip options */
chip->options |= type->options;
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
+
ident_done:
if (!mtd->name)
mtd->name = chip->parameters.model;
@@ -4773,14 +4803,15 @@ ident_done:
/* Calculate the address shift from the page size */
chip->page_shift = ffs(mtd->writesize) - 1;
/* Convert chipsize to number of pages per chip -1 */
- chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ targetsize = nanddev_target_size(&chip->base);
+ chip->pagemask = (targetsize >> chip->page_shift) - 1;
chip->bbt_erase_shift = chip->phys_erase_shift =
ffs(mtd->erasesize) - 1;
- if (chip->chipsize & 0xffffffff)
- chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
+ if (targetsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)targetsize) - 1;
else {
- chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
+ chip->chip_shift = ffs((unsigned)(targetsize >> 32));
chip->chip_shift += 32 - 1;
}
@@ -4796,7 +4827,7 @@ ident_done:
pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
chip->parameters.model);
pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
- (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
return 0;
@@ -4971,10 +5002,13 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
struct nand_flash_dev *table)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
int nand_maf_id, nand_dev_id;
unsigned int i;
int ret;
+ memorg = nanddev_get_memorg(&chip->base);
+
/* Assume all dies are deselected when we enter nand_scan_ident(). */
chip->cur_cs = -1;
@@ -4990,12 +5024,6 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- /*
- * Start with chips->numchips = maxchips to let nand_select_target() do
- * its job. chip->numchips will be adjusted after.
- */
- chip->numchips = maxchips;
-
/* Set the default functions */
nand_set_defaults(chip);
@@ -5042,8 +5070,8 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
pr_info("%d chips detected\n", i);
/* Store the number of chips and calc total size for mtd */
- chip->numchips = i;
- mtd->size = i * chip->chipsize;
+ memorg->ntargets = i;
+ mtd->size = i * nanddev_target_size(&chip->base);
return 0;
}
@@ -5078,13 +5106,13 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
ecc->bytes = 3;
ecc->strength = 1;
- if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC))
+ if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
return 0;
case NAND_ECC_BCH:
if (!mtd_nand_has_bch()) {
- WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
return -EINVAL;
}
ecc->calculate = nand_bch_calculate_ecc;
@@ -5224,8 +5252,8 @@ nand_match_ecc_req(struct nand_chip *chip,
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
- int req_step = chip->ecc_step_ds;
- int req_strength = chip->ecc_strength_ds;
+ int req_step = chip->base.eccreq.step_size;
+ int req_strength = chip->base.eccreq.strength;
int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
int best_step, best_strength, best_ecc_bytes;
int best_ecc_bytes_total = INT_MAX;
@@ -5418,7 +5446,7 @@ static bool nand_ecc_strength_good(struct nand_chip *chip)
struct nand_ecc_ctrl *ecc = &chip->ecc;
int corr, ds_corr;
- if (ecc->size == 0 || chip->ecc_step_ds == 0)
+ if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
/* Not enough information */
return true;
@@ -5427,11 +5455,56 @@ static bool nand_ecc_strength_good(struct nand_chip *chip)
* the correction density.
*/
corr = (mtd->writesize * ecc->strength) / ecc->size;
- ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
+ ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
+ chip->base.eccreq.step_size;
- return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
+ return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
}
+static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ unsigned int eb = nanddev_pos_to_row(nand, pos);
+ int ret;
+
+ eb >>= nand->rowconv.eraseblock_addr_shift;
+
+ nand_select_target(chip, pos->target);
+ ret = nand_erase_op(chip, eb);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static int rawnand_markbad(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+
+ return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+}
+
+static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ int ret;
+
+ nand_select_target(chip, pos->target);
+ ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static const struct nand_ops rawnand_ops = {
+ .erase = rawnand_erase,
+ .markbad = rawnand_markbad,
+ .isbad = rawnand_isbad,
+};
+
/**
* nand_scan_tail - Scan for the NAND device
* @chip: NAND chip object
@@ -5687,7 +5760,7 @@ static int nand_scan_tail(struct nand_chip *chip)
chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
/* Invalidate the pagebuffer reference */
- chip->pagebuf = -1;
+ chip->pagecache.page = -1;
/* Large page NAND with SOFT_ECC should support subpage reads */
switch (ecc->mode) {
@@ -5700,10 +5773,15 @@ static int nand_scan_tail(struct nand_chip *chip)
break;
}
+ ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
+ if (ret)
+ goto err_nand_manuf_cleanup;
+
+ /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
+ if (chip->options & NAND_ROM)
+ mtd->flags = MTD_CAP_ROM;
+
/* Fill in remaining MTD driver data */
- mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
- mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
- MTD_CAP_NANDFLASH;
mtd->_erase = nand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
@@ -5719,8 +5797,7 @@ static int nand_scan_tail(struct nand_chip *chip)
mtd->_block_isreserved = nand_block_isreserved;
mtd->_block_isbad = nand_block_isbad;
mtd->_block_markbad = nand_block_markbad;
- mtd->_max_bad_blocks = nand_max_bad_blocks;
- mtd->writebufsize = mtd->writesize;
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
/*
* Initialize bitflip_threshold to its default prior scan_bbt() call.
@@ -5733,13 +5810,13 @@ static int nand_scan_tail(struct nand_chip *chip)
/* Initialize the ->data_interface field. */
ret = nand_init_data_interface(chip);
if (ret)
- goto err_nand_manuf_cleanup;
+ goto err_nanddev_cleanup;
/* Enter fastest possible mode on all dies. */
- for (i = 0; i < chip->numchips; i++) {
+ for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
ret = nand_setup_data_interface(chip, i);
if (ret)
- goto err_nand_manuf_cleanup;
+ goto err_nanddev_cleanup;
}
/* Check, if we should skip the bad block table scan */
@@ -5749,11 +5826,14 @@ static int nand_scan_tail(struct nand_chip *chip)
/* Build bad block table */
ret = nand_create_bbt(chip);
if (ret)
- goto err_nand_manuf_cleanup;
+ goto err_nanddev_cleanup;
return 0;
+err_nanddev_cleanup:
+ nanddev_cleanup(&chip->base);
+
err_nand_manuf_cleanup:
nand_manufacturer_cleanup(chip);
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 19a2b563acdf..fd3c10216eda 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -264,18 +264,19 @@ static int read_abs_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, int chip)
{
struct mtd_info *mtd = nand_to_mtd(this);
+ u64 targetsize = nanddev_target_size(&this->base);
int res = 0, i;
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
- for (i = 0; i < this->numchips; i++) {
+ for (i = 0; i < nanddev_ntargets(&this->base); i++) {
if (chip == -1 || chip == i)
res = read_bbt(this, buf, td->pages[i],
- this->chipsize >> this->bbt_erase_shift,
+ targetsize >> this->bbt_erase_shift,
td, offs);
if (res)
return res;
- offs += this->chipsize >> this->bbt_erase_shift;
+ offs += targetsize >> this->bbt_erase_shift;
}
} else {
res = read_bbt(this, buf, td->pages[0],
@@ -415,11 +416,12 @@ static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
/* Scan a given block partially */
static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
- loff_t offs, uint8_t *buf, int numpages)
+ loff_t offs, uint8_t *buf)
{
struct mtd_info *mtd = nand_to_mtd(this);
+
struct mtd_oob_ops ops;
- int j, ret;
+ int ret, page_offset;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
@@ -427,12 +429,15 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
ops.datbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
- for (j = 0; j < numpages; j++) {
+ page_offset = nand_bbm_get_next_page(this, 0);
+
+ while (page_offset >= 0) {
/*
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
- ret = mtd_read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs + (page_offset * mtd->writesize),
+ &ops);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
@@ -440,8 +445,9 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
if (check_short_pattern(buf, bd))
return 1;
- offs += mtd->writesize;
+ page_offset = nand_bbm_get_next_page(this, page_offset + 1);
}
+
return 0;
}
@@ -459,43 +465,35 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
static int create_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
- int i, numblocks, numpages;
- int startblock;
+ int i, numblocks, startblock;
loff_t from;
pr_info("Scanning device for bad blocks\n");
- if (bd->options & NAND_BBT_SCAN2NDPAGE)
- numpages = 2;
- else
- numpages = 1;
-
if (chip == -1) {
numblocks = mtd->size >> this->bbt_erase_shift;
startblock = 0;
from = 0;
} else {
- if (chip >= this->numchips) {
+ if (chip >= nanddev_ntargets(&this->base)) {
pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
- chip + 1, this->numchips);
+ chip + 1, nanddev_ntargets(&this->base));
return -EINVAL;
}
- numblocks = this->chipsize >> this->bbt_erase_shift;
+ numblocks = targetsize >> this->bbt_erase_shift;
startblock = chip * numblocks;
numblocks += startblock;
from = (loff_t)startblock << this->bbt_erase_shift;
}
- if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
- from += mtd->erasesize - (mtd->writesize * numpages);
-
for (i = startblock; i < numblocks; i++) {
int ret;
BUG_ON(bd->options & NAND_BBT_NO_OOB);
- ret = scan_block_fast(this, bd, from, buf, numpages);
+ ret = scan_block_fast(this, bd, from, buf);
if (ret < 0)
return ret;
@@ -529,6 +527,7 @@ static int create_bbt(struct nand_chip *this, uint8_t *buf,
static int search_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
int i, chips;
int startblock, block, dir;
@@ -547,8 +546,8 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf,
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
- chips = this->numchips;
- bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ chips = nanddev_ntargets(&this->base);
+ bbtblocks = targetsize >> this->bbt_erase_shift;
startblock &= bbtblocks - 1;
} else {
chips = 1;
@@ -576,7 +575,7 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf,
break;
}
}
- startblock += this->chipsize >> this->bbt_erase_shift;
+ startblock += targetsize >> this->bbt_erase_shift;
}
/* Check, if we found a bbt for each requested chip */
for (i = 0; i < chips; i++) {
@@ -626,6 +625,7 @@ static void search_read_bbts(struct nand_chip *this, uint8_t *buf,
static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
struct nand_bbt_descr *md, int chip)
{
+ u64 targetsize = nanddev_target_size(&this->base);
int startblock, dir, page, numblocks, i;
/*
@@ -637,9 +637,9 @@ static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
return td->pages[chip] >>
(this->bbt_erase_shift - this->page_shift);
- numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
if (!(td->options & NAND_BBT_PERCHIP))
- numblocks *= this->numchips;
+ numblocks *= nanddev_ntargets(&this->base);
/*
* Automatic placement of the bad block table. Search direction
@@ -717,6 +717,7 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
struct erase_info einfo;
int i, res, chip = 0;
@@ -737,10 +738,10 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
rcode = 0xff;
/* Write bad block table per chip rather than per device? */
if (td->options & NAND_BBT_PERCHIP) {
- numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
/* Full device write or specific chip? */
if (chipsel == -1) {
- nrchips = this->numchips;
+ nrchips = nanddev_ntargets(&this->base);
} else {
nrchips = chipsel + 1;
chip = chipsel;
@@ -901,7 +902,9 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
static inline int nand_memory_bbt(struct nand_chip *this,
struct nand_bbt_descr *bd)
{
- return create_bbt(this, this->data_buf, bd, -1);
+ u8 *pagebuf = nand_get_data_buf(this);
+
+ return create_bbt(this, pagebuf, bd, -1);
}
/**
@@ -925,7 +928,7 @@ static int check_create(struct nand_chip *this, uint8_t *buf,
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP)
- chips = this->numchips;
+ chips = nanddev_ntargets(&this->base);
else
chips = 1;
@@ -1097,14 +1100,15 @@ static int nand_update_bbt(struct nand_chip *this, loff_t offs)
*/
static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
int i, j, chips, block, nrblocks, update;
uint8_t oldval;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
- chips = this->numchips;
- nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ chips = nanddev_ntargets(&this->base);
+ nrblocks = (int)(targetsize >> this->bbt_erase_shift);
} else {
chips = 1;
nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
@@ -1157,6 +1161,7 @@ static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
*/
static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
u32 pattern_len;
u32 bits;
@@ -1185,7 +1190,7 @@ static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
}
if (bd->options & NAND_BBT_PERCHIP)
- table_size = this->chipsize >> this->bbt_erase_shift;
+ table_size = targetsize >> this->bbt_erase_shift;
else
table_size = mtd->size >> this->bbt_erase_shift;
table_size >>= 3;
diff --git a/drivers/mtd/nand/raw/nand_esmt.c b/drivers/mtd/nand/raw/nand_esmt.c
index 96f039a83bc8..3338c68aaaf1 100644
--- a/drivers/mtd/nand/raw/nand_esmt.c
+++ b/drivers/mtd/nand/raw/nand_esmt.c
@@ -14,20 +14,20 @@ static void esmt_nand_decode_id(struct nand_chip *chip)
/* Extract ECC requirements from 5th id byte. */
if (chip->id.len >= 5 && nand_is_slc(chip)) {
- chip->ecc_step_ds = 512;
+ chip->base.eccreq.step_size = 512;
switch (chip->id.data[4] & 0x3) {
case 0x0:
- chip->ecc_strength_ds = 4;
+ chip->base.eccreq.strength = 4;
break;
case 0x1:
- chip->ecc_strength_ds = 2;
+ chip->base.eccreq.strength = 2;
break;
case 0x2:
- chip->ecc_strength_ds = 1;
+ chip->base.eccreq.strength = 1;
break;
default:
WARN(1, "Could not get ECC info");
- chip->ecc_step_ds = 0;
+ chip->base.eccreq.step_size = 0;
break;
}
}
@@ -36,7 +36,14 @@ static void esmt_nand_decode_id(struct nand_chip *chip)
static int esmt_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ /*
+ * It is known that some ESMT SLC NANDs have been shipped
+ * with the factory bad block markers in the first or last page
+ * of the block, instead of the first or second page. To be on
+ * the safe side, let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 343f477362d1..7c600c4d5ec8 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -418,24 +418,27 @@ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
bool valid_jedecid)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
u8 oobsize;
+ memorg = nanddev_get_memorg(&chip->base);
+
oobsize = ((chip->id.data[3] >> 2) & 0x3) |
((chip->id.data[3] >> 4) & 0x4);
if (valid_jedecid) {
switch (oobsize) {
case 0:
- mtd->oobsize = 2048;
+ memorg->oobsize = 2048;
break;
case 1:
- mtd->oobsize = 1664;
+ memorg->oobsize = 1664;
break;
case 2:
- mtd->oobsize = 1024;
+ memorg->oobsize = 1024;
break;
case 3:
- mtd->oobsize = 640;
+ memorg->oobsize = 640;
break;
default:
/*
@@ -450,25 +453,25 @@ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
} else {
switch (oobsize) {
case 0:
- mtd->oobsize = 128;
+ memorg->oobsize = 128;
break;
case 1:
- mtd->oobsize = 224;
+ memorg->oobsize = 224;
break;
case 2:
- mtd->oobsize = 448;
+ memorg->oobsize = 448;
break;
case 3:
- mtd->oobsize = 64;
+ memorg->oobsize = 64;
break;
case 4:
- mtd->oobsize = 32;
+ memorg->oobsize = 32;
break;
case 5:
- mtd->oobsize = 16;
+ memorg->oobsize = 16;
break;
case 6:
- mtd->oobsize = 640;
+ memorg->oobsize = 640;
break;
default:
/*
@@ -492,8 +495,10 @@ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
* the actual OOB size for this chip is: 640 * 16k / 8k).
*/
if (chip->id.data[1] == 0xde)
- mtd->oobsize *= mtd->writesize / SZ_8K;
+ memorg->oobsize *= memorg->pagesize / SZ_8K;
}
+
+ mtd->oobsize = memorg->oobsize;
}
static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
@@ -503,30 +508,30 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
if (valid_jedecid) {
/* Reference: H27UCG8T2E datasheet */
- chip->ecc_step_ds = 1024;
+ chip->base.eccreq.step_size = 1024;
switch (ecc_level) {
case 0:
- chip->ecc_step_ds = 0;
- chip->ecc_strength_ds = 0;
+ chip->base.eccreq.step_size = 0;
+ chip->base.eccreq.strength = 0;
break;
case 1:
- chip->ecc_strength_ds = 4;
+ chip->base.eccreq.strength = 4;
break;
case 2:
- chip->ecc_strength_ds = 24;
+ chip->base.eccreq.strength = 24;
break;
case 3:
- chip->ecc_strength_ds = 32;
+ chip->base.eccreq.strength = 32;
break;
case 4:
- chip->ecc_strength_ds = 40;
+ chip->base.eccreq.strength = 40;
break;
case 5:
- chip->ecc_strength_ds = 50;
+ chip->base.eccreq.strength = 50;
break;
case 6:
- chip->ecc_strength_ds = 60;
+ chip->base.eccreq.strength = 60;
break;
default:
/*
@@ -547,14 +552,14 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
if (nand_tech < 3) {
/* > 26nm, reference: H27UBG8T2A datasheet */
if (ecc_level < 5) {
- chip->ecc_step_ds = 512;
- chip->ecc_strength_ds = 1 << ecc_level;
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1 << ecc_level;
} else if (ecc_level < 7) {
if (ecc_level == 5)
- chip->ecc_step_ds = 2048;
+ chip->base.eccreq.step_size = 2048;
else
- chip->ecc_step_ds = 1024;
- chip->ecc_strength_ds = 24;
+ chip->base.eccreq.step_size = 1024;
+ chip->base.eccreq.strength = 24;
} else {
/*
* We should never reach this case, but if that
@@ -567,14 +572,14 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
} else {
/* <= 26nm, reference: H27UBG8T2B datasheet */
if (!ecc_level) {
- chip->ecc_step_ds = 0;
- chip->ecc_strength_ds = 0;
+ chip->base.eccreq.step_size = 0;
+ chip->base.eccreq.strength = 0;
} else if (ecc_level < 5) {
- chip->ecc_step_ds = 512;
- chip->ecc_strength_ds = 1 << (ecc_level - 1);
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1 << (ecc_level - 1);
} else {
- chip->ecc_step_ds = 1024;
- chip->ecc_strength_ds = 24 +
+ chip->base.eccreq.step_size = 1024;
+ chip->base.eccreq.strength = 24 +
(8 * (ecc_level - 5));
}
}
@@ -587,7 +592,7 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
u8 nand_tech;
/* We need scrambling on all TLC NANDs*/
- if (chip->bits_per_cell > 2)
+ if (nanddev_bits_per_cell(&chip->base) > 2)
chip->options |= NAND_NEED_SCRAMBLING;
/* And on MLC NANDs with sub-3xnm process */
@@ -609,9 +614,12 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
static void hynix_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
bool valid_jedecid;
u8 tmp;
+ memorg = nanddev_get_memorg(&chip->base);
+
/*
* Exclude all SLC NANDs from this advanced detection scheme.
* According to the ranges defined in several datasheets, it might
@@ -625,7 +633,8 @@ static void hynix_nand_decode_id(struct nand_chip *chip)
}
/* Extract pagesize */
- mtd->writesize = 2048 << (chip->id.data[3] & 0x03);
+ memorg->pagesize = 2048 << (chip->id.data[3] & 0x03);
+ mtd->writesize = memorg->pagesize;
tmp = (chip->id.data[3] >> 4) & 0x3;
/*
@@ -635,12 +644,19 @@ static void hynix_nand_decode_id(struct nand_chip *chip)
* The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
* this case the erasesize is set to 768KiB.
*/
- if (chip->id.data[3] & 0x80)
+ if (chip->id.data[3] & 0x80) {
+ memorg->pages_per_eraseblock = (SZ_1M << tmp) /
+ memorg->pagesize;
mtd->erasesize = SZ_1M << tmp;
- else if (tmp == 3)
+ } else if (tmp == 3) {
+ memorg->pages_per_eraseblock = (SZ_512K + SZ_256K) /
+ memorg->pagesize;
mtd->erasesize = SZ_512K + SZ_256K;
- else
+ } else {
+ memorg->pages_per_eraseblock = (SZ_128K << tmp) /
+ memorg->pagesize;
mtd->erasesize = SZ_128K << tmp;
+ }
/*
* Modern Toggle DDR NANDs have a valid JEDECID even though they are
@@ -672,9 +688,9 @@ static int hynix_nand_init(struct nand_chip *chip)
int ret;
if (!nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ chip->options |= NAND_BBM_LASTPAGE;
else
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
if (!hynix)
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 38b5dc22cb30..9b540e76f84f 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -22,12 +22,15 @@
int nand_jedec_detect(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
struct nand_jedec_params *p;
struct jedec_ecc_info *ecc;
int jedec_version = 0;
char id[5];
int i, val, ret;
+ memorg = nanddev_get_memorg(&chip->base);
+
/* Try JEDEC for unknown chip or LP */
ret = nand_readid_op(chip, 0x40, id, sizeof(id));
if (ret || strncmp(id, "JEDEC", sizeof(id)))
@@ -81,18 +84,24 @@ int nand_jedec_detect(struct nand_chip *chip)
goto free_jedec_param_page;
}
- mtd->writesize = le32_to_cpu(p->byte_per_page);
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
/* Please reference to the comment for nand_flash_detect_onfi. */
- mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
- mtd->erasesize *= mtd->writesize;
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
+
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
- mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->multi_plane_addr;
/* Please reference to the comment for nand_flash_detect_onfi. */
- chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
- chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
- chip->bits_per_cell = p->bits_per_cell;
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->bits_per_cell = p->bits_per_cell;
if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
chip->options |= NAND_BUSWIDTH_16;
@@ -101,8 +110,8 @@ int nand_jedec_detect(struct nand_chip *chip)
ecc = &p->ecc_info[0];
if (ecc->codeword_size >= 9) {
- chip->ecc_strength_ds = ecc->ecc_bits;
- chip->ecc_step_ds = 1 << ecc->codeword_size;
+ chip->base.eccreq.strength = ecc->ecc_bits;
+ chip->base.eccreq.step_size = 1 << ecc->codeword_size;
} else {
pr_warn("Invalid codeword size\n");
}
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 47d8cda547cf..e287e71347c5 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -62,7 +62,7 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
static int macronix_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
macronix_nand_fix_broken_get_timings(chip);
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index b85e1c13b79e..cbd4f09ac178 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -385,13 +385,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
if (!chip->parameters.onfi)
return MICRON_ON_DIE_UNSUPPORTED;
- if (chip->bits_per_cell != 1)
+ if (nanddev_bits_per_cell(&chip->base) != 1)
return MICRON_ON_DIE_UNSUPPORTED;
/*
* We only support on-die ECC of 4/512 or 8/512
*/
- if (chip->ecc_strength_ds != 4 && chip->ecc_strength_ds != 8)
+ if (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
return MICRON_ON_DIE_UNSUPPORTED;
/* 0x2 means on-die ECC is available. */
@@ -424,7 +424,7 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
/*
* We only support on-die ECC of 4/512 or 8/512
*/
- if (chip->ecc_strength_ds != 4 && chip->ecc_strength_ds != 8)
+ if (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
return MICRON_ON_DIE_UNSUPPORTED;
return MICRON_ON_DIE_SUPPORTED;
@@ -448,7 +448,7 @@ static int micron_nand_init(struct nand_chip *chip)
goto err_free_manuf_data;
if (mtd->writesize == 2048)
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
ondie = micron_supports_on_die_ecc(chip);
@@ -479,7 +479,7 @@ static int micron_nand_init(struct nand_chip *chip)
* That's not needed for 8-bit ECC, because the status expose
* a better approximation of the number of bitflips in a page.
*/
- if (chip->ecc_strength_ds == 4) {
+ if (chip->base.eccreq.strength == 4) {
micron->ecc.rawbuf = kmalloc(mtd->writesize +
mtd->oobsize,
GFP_KERNEL);
@@ -489,16 +489,16 @@ static int micron_nand_init(struct nand_chip *chip)
}
}
- if (chip->ecc_strength_ds == 4)
+ if (chip->base.eccreq.strength == 4)
mtd_set_ooblayout(mtd,
&micron_nand_on_die_4_ooblayout_ops);
else
mtd_set_ooblayout(mtd,
&micron_nand_on_die_8_ooblayout_ops);
- chip->ecc.bytes = chip->ecc_strength_ds * 2;
+ chip->ecc.bytes = chip->base.eccreq.strength * 2;
chip->ecc.size = 512;
- chip->ecc.strength = chip->ecc_strength_ds;
+ chip->ecc.strength = chip->base.eccreq.strength;
chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index d8184cf591ad..0b879bd0a68c 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -94,8 +94,8 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
goto ext_out;
}
- chip->ecc_strength_ds = ecc->ecc_bits;
- chip->ecc_step_ds = 1 << ecc->codeword_size;
+ chip->base.eccreq.strength = ecc->ecc_bits;
+ chip->base.eccreq.step_size = 1 << ecc->codeword_size;
ret = 0;
ext_out:
@@ -140,12 +140,15 @@ static void nand_bit_wise_majority(const void **srcbufs,
int nand_onfi_detect(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
struct nand_onfi_params *p;
struct onfi_params *onfi;
int onfi_version = 0;
char id[4];
int i, ret, val;
+ memorg = nanddev_get_memorg(&chip->base);
+
/* Try ONFI for unknown chip or LP */
ret = nand_readid_op(chip, 0x20, id, sizeof(id));
if (ret || strncmp(id, "ONFI", 4))
@@ -221,32 +224,36 @@ int nand_onfi_detect(struct nand_chip *chip)
goto free_onfi_param_page;
}
- mtd->writesize = le32_to_cpu(p->byte_per_page);
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
/*
* pages_per_block and blocks_per_lun may not be a power-of-2 size
* (don't ask me who thought of this...). MTD assumes that these
* dimensions will be power-of-2, so just truncate the remaining area.
*/
- mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
- mtd->erasesize *= mtd->writesize;
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
- mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
- /* See erasesize comment */
- chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
- chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
- chip->bits_per_cell = p->bits_per_cell;
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->interleaved_bits;
- chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
- chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
+ /* See erasesize comment */
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->max_bad_eraseblocks_per_lun = le32_to_cpu(p->blocks_per_lun);
+ memorg->bits_per_cell = p->bits_per_cell;
if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
chip->options |= NAND_BUSWIDTH_16;
if (p->ecc_bits != 0xff) {
- chip->ecc_strength_ds = p->ecc_bits;
- chip->ecc_step_ds = 512;
+ chip->base.eccreq.strength = p->ecc_bits;
+ chip->base.eccreq.step_size = 512;
} else if (onfi_version >= 21 &&
(le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
diff --git a/drivers/mtd/nand/raw/nand_samsung.c b/drivers/mtd/nand/raw/nand_samsung.c
index e46d4c492ad8..5552ce20ede0 100644
--- a/drivers/mtd/nand/raw/nand_samsung.c
+++ b/drivers/mtd/nand/raw/nand_samsung.c
@@ -20,6 +20,9 @@
static void samsung_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
/* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) */
if (chip->id.len == 6 && !nand_is_slc(chip) &&
@@ -27,29 +30,30 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
u8 extid = chip->id.data[3];
/* Get pagesize */
- mtd->writesize = 2048 << (extid & 0x03);
+ memorg->pagesize = 2048 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
extid >>= 2;
/* Get oobsize */
switch (((extid >> 2) & 0x4) | (extid & 0x3)) {
case 1:
- mtd->oobsize = 128;
+ memorg->oobsize = 128;
break;
case 2:
- mtd->oobsize = 218;
+ memorg->oobsize = 218;
break;
case 3:
- mtd->oobsize = 400;
+ memorg->oobsize = 400;
break;
case 4:
- mtd->oobsize = 436;
+ memorg->oobsize = 436;
break;
case 5:
- mtd->oobsize = 512;
+ memorg->oobsize = 512;
break;
case 6:
- mtd->oobsize = 640;
+ memorg->oobsize = 640;
break;
default:
/*
@@ -62,31 +66,37 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
break;
}
+ mtd->oobsize = memorg->oobsize;
+
/* Get blocksize */
extid >>= 2;
+ memorg->pages_per_eraseblock = (128 * 1024) <<
+ (((extid >> 1) & 0x04) |
+ (extid & 0x03)) /
+ memorg->pagesize;
mtd->erasesize = (128 * 1024) <<
(((extid >> 1) & 0x04) | (extid & 0x03));
/* Extract ECC requirements from 5th id byte*/
extid = (chip->id.data[4] >> 4) & 0x07;
if (extid < 5) {
- chip->ecc_step_ds = 512;
- chip->ecc_strength_ds = 1 << extid;
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1 << extid;
} else {
- chip->ecc_step_ds = 1024;
+ chip->base.eccreq.step_size = 1024;
switch (extid) {
case 5:
- chip->ecc_strength_ds = 24;
+ chip->base.eccreq.strength = 24;
break;
case 6:
- chip->ecc_strength_ds = 40;
+ chip->base.eccreq.strength = 40;
break;
case 7:
- chip->ecc_strength_ds = 60;
+ chip->base.eccreq.strength = 60;
break;
default:
WARN(1, "Could not decode ECC info");
- chip->ecc_step_ds = 0;
+ chip->base.eccreq.step_size = 0;
}
}
} else {
@@ -96,8 +106,8 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
switch (chip->id.data[1]) {
/* K9F4G08U0D-S[I|C]B0(T00) */
case 0xDC:
- chip->ecc_step_ds = 512;
- chip->ecc_strength_ds = 1;
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1;
break;
/* K9F1G08U0E 21nm chips do not support subpage write */
@@ -121,9 +131,9 @@ static int samsung_nand_init(struct nand_chip *chip)
chip->options |= NAND_SAMSUNG_LP_OPTIONS;
if (!nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ chip->options |= NAND_BBM_LASTPAGE;
else
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
index d068163b64b3..74ffcae48726 100644
--- a/drivers/mtd/nand/raw/nand_toshiba.c
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -101,6 +101,9 @@ static void toshiba_nand_benand_init(struct nand_chip *chip)
static void toshiba_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
nand_decode_ext_id(chip);
@@ -114,8 +117,10 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
*/
if (chip->id.len >= 6 && nand_is_slc(chip) &&
(chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
- !(chip->id.data[4] & 0x80) /* !BENAND */)
- mtd->oobsize = 32 * mtd->writesize >> 9;
+ !(chip->id.data[4] & 0x80) /* !BENAND */) {
+ memorg->oobsize = 32 * memorg->pagesize >> 9;
+ mtd->oobsize = memorg->oobsize;
+ }
/*
* Extract ECC requirements from 6th id byte.
@@ -125,20 +130,20 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
* - 24nm: 8 bit ECC for each 512Byte is required.
*/
if (chip->id.len >= 6 && nand_is_slc(chip)) {
- chip->ecc_step_ds = 512;
+ chip->base.eccreq.step_size = 512;
switch (chip->id.data[5] & 0x7) {
case 0x4:
- chip->ecc_strength_ds = 1;
+ chip->base.eccreq.strength = 1;
break;
case 0x5:
- chip->ecc_strength_ds = 4;
+ chip->base.eccreq.strength = 4;
break;
case 0x6:
- chip->ecc_strength_ds = 8;
+ chip->base.eccreq.strength = 8;
break;
default:
WARN(1, "Could not get ECC info");
- chip->ecc_step_ds = 0;
+ chip->base.eccreq.step_size = 0;
break;
}
}
@@ -147,7 +152,7 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
static int toshiba_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
/* Check that chip is BENAND and ECC mode is on-die */
if (nand_is_slc(chip) && chip->ecc.mode == NAND_ECC_ON_DIE &&
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 933d1a629c51..df63fa564082 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -298,6 +298,8 @@ union ns_mem {
* The structure which describes all the internal simulator data.
*/
struct nandsim {
+ struct nand_chip chip;
+ struct nand_controller base;
struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
unsigned int nbparts;
@@ -644,9 +646,6 @@ static int __init init_nandsim(struct mtd_info *mtd)
return -EIO;
}
- /* Force mtd to not do delays */
- chip->legacy.chip_delay = 0;
-
/* Initialize the NAND flash parameters */
ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
ns->geom.totsz = mtd->size;
@@ -2076,24 +2075,6 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
return;
}
-static void ns_hwcontrol(struct nand_chip *chip, int cmd, unsigned int bitmask)
-{
- struct nandsim *ns = nand_get_controller_data(chip);
-
- ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
- ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
- ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
-
- if (cmd != NAND_CMD_NONE)
- ns_nand_write_byte(chip, cmd);
-}
-
-static int ns_device_ready(struct nand_chip *chip)
-{
- NS_DBG("device_ready\n");
- return 1;
-}
-
static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
int len)
{
@@ -2145,7 +2126,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
int i;
for (i = 0; i < len; i++)
- buf[i] = chip->legacy.read_byte(chip);
+ buf[i] = ns_nand_read_byte(chip);
return;
}
@@ -2168,6 +2149,46 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
return;
}
+static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ int i;
+ unsigned int op_id;
+ const struct nand_op_instr *instr = NULL;
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ ns->lines.ce = 1;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+ ns->lines.cle = 0;
+ ns->lines.ale = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ns->lines.cle = 1;
+ ns_nand_write_byte(chip, instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ ns->lines.ale = 1;
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ ns_nand_write_byte(chip, instr->ctx.addr.addrs[i]);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ ns_nand_read_buf(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ ns_nand_write_buf(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ /* we are always ready */
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int ns_attach_chip(struct nand_chip *chip)
{
unsigned int eccsteps, eccbytes;
@@ -2208,6 +2229,7 @@ static int ns_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops ns_controller_ops = {
.attach_chip = ns_attach_chip,
+ .exec_op = ns_exec_op,
};
/*
@@ -2216,7 +2238,7 @@ static const struct nand_controller_ops ns_controller_ops = {
static int __init ns_init_module(void)
{
struct nand_chip *chip;
- struct nandsim *nand;
+ struct nandsim *ns;
int retval = -ENOMEM, i;
if (bus_width != 8 && bus_width != 16) {
@@ -2224,25 +2246,15 @@ static int __init ns_init_module(void)
return -EINVAL;
}
- /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
- chip = kzalloc(sizeof(struct nand_chip) + sizeof(struct nandsim),
- GFP_KERNEL);
- if (!chip) {
+ ns = kzalloc(sizeof(struct nandsim), GFP_KERNEL);
+ if (!ns) {
NS_ERR("unable to allocate core structures.\n");
return -ENOMEM;
}
+ chip = &ns->chip;
nsmtd = nand_to_mtd(chip);
- nand = (struct nandsim *)(chip + 1);
- nand_set_controller_data(chip, (void *)nand);
+ nand_set_controller_data(chip, (void *)ns);
- /*
- * Register simulator's callbacks.
- */
- chip->legacy.cmd_ctrl = ns_hwcontrol;
- chip->legacy.read_byte = ns_nand_read_byte;
- chip->legacy.dev_ready = ns_device_ready;
- chip->legacy.write_buf = ns_nand_write_buf;
- chip->legacy.read_buf = ns_nand_read_buf;
chip->ecc.mode = NAND_ECC_SOFT;
chip->ecc.algo = NAND_ECC_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
@@ -2251,9 +2263,11 @@ static int __init ns_init_module(void)
switch (bbt) {
case 2:
- chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ /* fall through */
case 1:
- chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ /* fall through */
case 0:
break;
default:
@@ -2266,19 +2280,19 @@ static int __init ns_init_module(void)
* the initial ID read command correctly
*/
if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
- nand->geom.idbytes = 8;
+ ns->geom.idbytes = 8;
else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
- nand->geom.idbytes = 6;
+ ns->geom.idbytes = 6;
else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
- nand->geom.idbytes = 4;
+ ns->geom.idbytes = 4;
else
- nand->geom.idbytes = 2;
- nand->regs.status = NS_STATUS_OK(nand);
- nand->nxstate = STATE_UNKNOWN;
- nand->options |= OPT_PAGE512; /* temporary value */
- memcpy(nand->ids, id_bytes, sizeof(nand->ids));
+ ns->geom.idbytes = 2;
+ ns->regs.status = NS_STATUS_OK(ns);
+ ns->nxstate = STATE_UNKNOWN;
+ ns->options |= OPT_PAGE512; /* temporary value */
+ memcpy(ns->ids, id_bytes, sizeof(ns->ids));
if (bus_width == 16) {
- nand->busw = 16;
+ ns->busw = 16;
chip->options |= NAND_BUSWIDTH_16;
}
@@ -2293,7 +2307,10 @@ static int __init ns_init_module(void)
if ((retval = parse_gravepages()) != 0)
goto error;
- chip->legacy.dummy_controller.ops = &ns_controller_ops;
+ nand_controller_init(&ns->base);
+ ns->base.ops = &ns_controller_ops;
+ chip->controller = &ns->base;
+
retval = nand_scan(chip, 1);
if (retval) {
NS_ERR("Could not scan NAND Simulator device\n");
@@ -2302,16 +2319,23 @@ static int __init ns_init_module(void)
if (overridesize) {
uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+ struct nand_memory_organization *memorg;
+ u64 targetsize;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
retval = -EINVAL;
goto err_exit;
}
+
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
nsmtd->size = new_size;
- chip->chipsize = new_size;
+ memorg->eraseblocks_per_lun = 1 << overridesize;
+ targetsize = nanddev_target_size(&chip->base);
chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
- chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ chip->pagemask = (targetsize >> chip->page_shift) - 1;
}
if ((retval = setup_wear_reporting(nsmtd)) != 0)
@@ -2323,27 +2347,27 @@ static int __init ns_init_module(void)
if ((retval = nand_create_bbt(chip)) != 0)
goto err_exit;
- if ((retval = parse_badblocks(nand, nsmtd)) != 0)
+ if ((retval = parse_badblocks(ns, nsmtd)) != 0)
goto err_exit;
/* Register NAND partitions */
- retval = mtd_device_register(nsmtd, &nand->partitions[0],
- nand->nbparts);
+ retval = mtd_device_register(nsmtd, &ns->partitions[0],
+ ns->nbparts);
if (retval != 0)
goto err_exit;
- if ((retval = nandsim_debugfs_create(nand)) != 0)
+ if ((retval = nandsim_debugfs_create(ns)) != 0)
goto err_exit;
return 0;
err_exit:
- free_nandsim(nand);
+ free_nandsim(ns);
nand_release(chip);
- for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
- kfree(nand->partitions[i].name);
+ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
error:
- kfree(chip);
+ kfree(ns);
free_lists();
return retval;
@@ -2364,7 +2388,7 @@ static void __exit ns_cleanup_module(void)
nand_release(chip); /* Unregister driver */
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
kfree(ns->partitions[i].name);
- kfree(mtd_to_nand(nsmtd)); /* Free other structures */
+ kfree(ns); /* Free other structures */
free_lists();
}
diff --git a/drivers/mtd/nand/raw/nuc900_nand.c b/drivers/mtd/nand/raw/nuc900_nand.c
index 38b1994e7ed3..56fa84029482 100644
--- a/drivers/mtd/nand/raw/nuc900_nand.c
+++ b/drivers/mtd/nand/raw/nuc900_nand.c
@@ -192,8 +192,9 @@ static void nuc900_nand_command_lp(struct nand_chip *chip,
return;
case NAND_CMD_READ0:
-
write_cmd_reg(nand, NAND_CMD_READSTART);
+ /* fall through */
+
default:
if (!chip->legacy.dev_ready) {
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 8f280a2962c8..a9a275342a41 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1725,9 +1725,9 @@ static bool omap2_nand_ecc_check(struct omap_nand_info *info)
break;
}
- if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_BCH)) {
+ if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
dev_err(&info->pdev->dev,
- "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
return false;
}
if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index a3f32f939cc1..94c6401ef32f 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -465,11 +465,13 @@ static int elm_context_save(struct elm_info *info)
ELM_SYNDROME_FRAGMENT_5 + offset);
regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_4 + offset);
+ /* fall through */
case BCH8_ECC:
regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_3 + offset);
regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_2 + offset);
+ /* fall through */
case BCH4_ECC:
regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_1 + offset);
@@ -511,11 +513,13 @@ static int elm_context_restore(struct elm_info *info)
regs->elm_syndrome_fragment_5[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
regs->elm_syndrome_fragment_4[i]);
+ /* fall through */
case BCH8_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
regs->elm_syndrome_fragment_3[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
regs->elm_syndrome_fragment_2[i]);
+ /* fall through */
case BCH4_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
regs->elm_syndrome_fragment_1[i]);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 920e7375084f..6ead55e05b80 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -1680,14 +1680,12 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
u8 *cw_data_buf, *cw_oob_buf;
int cw, data_size, oob_size, ret = 0;
- if (!data_buf) {
- data_buf = chip->data_buf;
- chip->pagebuf = -1;
- }
+ if (!data_buf)
+ data_buf = nand_get_data_buf(chip);
if (!oob_buf) {
+ nand_get_data_buf(chip);
oob_buf = chip->oob_poi;
- chip->pagebuf = -1;
}
for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index 86456216fb93..7b99831aa046 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -45,7 +45,6 @@ static inline void r852_write_reg(struct r852_device *dev,
int address, uint8_t value)
{
writeb(value, dev->mmio + address);
- mmiowb();
}
@@ -61,7 +60,6 @@ static inline void r852_write_reg_dword(struct r852_device *dev,
int address, uint32_t value)
{
writel(cpu_to_le32(value), dev->mmio + address);
- mmiowb();
}
/* returns pointer to our private structure */
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index cf6b1be1cf9c..e509c93737c4 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -101,14 +101,12 @@ static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static struct nand_bbt_descr flctl_4secc_smallpage = {
- .options = NAND_BBT_SCAN2NDPAGE,
.offs = 11,
.len = 1,
.pattern = scan_ff_pattern,
};
static struct nand_bbt_descr flctl_4secc_largepage = {
- .options = NAND_BBT_SCAN2NDPAGE,
.offs = 0,
.len = 2,
.pattern = scan_ff_pattern,
@@ -986,6 +984,7 @@ static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
static int flctl_chip_attach_chip(struct nand_chip *chip)
{
+ u64 targetsize = nanddev_target_size(&chip->base);
struct mtd_info *mtd = nand_to_mtd(chip);
struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -998,11 +997,11 @@ static int flctl_chip_attach_chip(struct nand_chip *chip)
if (mtd->writesize == 512) {
flctl->page_size = 0;
- if (chip->chipsize > (32 << 20)) {
+ if (targetsize > (32 << 20)) {
/* big than 32MB */
flctl->rw_ADRCNT = ADRCNT_4;
flctl->erase_ADRCNT = ADRCNT_3;
- } else if (chip->chipsize > (2 << 16)) {
+ } else if (targetsize > (2 << 16)) {
/* big than 128KB */
flctl->rw_ADRCNT = ADRCNT_3;
flctl->erase_ADRCNT = ADRCNT_2;
@@ -1012,11 +1011,11 @@ static int flctl_chip_attach_chip(struct nand_chip *chip)
}
} else {
flctl->page_size = 1;
- if (chip->chipsize > (128 << 20)) {
+ if (targetsize > (128 << 20)) {
/* big than 128MB */
flctl->rw_ADRCNT = ADRCNT2_E;
flctl->erase_ADRCNT = ADRCNT_3;
- } else if (chip->chipsize > (8 << 16)) {
+ } else if (targetsize > (8 << 16)) {
/* big than 512KB */
flctl->rw_ADRCNT = ADRCNT_4;
flctl->erase_ADRCNT = ADRCNT_2;
@@ -1178,6 +1177,8 @@ static int flctl_probe(struct platform_device *pdev)
if (pdata->flcmncr_val & SEL_16BIT)
nand->options |= NAND_BUSWIDTH_16;
+ nand->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 4282bc477761..b021a5720b42 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -42,7 +42,8 @@
#define NFC_REG_CMD 0x0024
#define NFC_REG_RCMD_SET 0x0028
#define NFC_REG_WCMD_SET 0x002C
-#define NFC_REG_IO_DATA 0x0030
+#define NFC_REG_A10_IO_DATA 0x0030
+#define NFC_REG_A23_IO_DATA 0x0300
#define NFC_REG_ECC_CTL 0x0034
#define NFC_REG_ECC_ST 0x0038
#define NFC_REG_DEBUG 0x003C
@@ -200,6 +201,22 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
return container_of(nand, struct sunxi_nand_chip, nand);
}
+/*
+ * NAND Controller capabilities structure: stores NAND controller capabilities
+ * for distinction between compatible strings.
+ *
+ * @sram_through_ahb: On A23, we choose to access the internal RAM through AHB
+ * instead of MBUS (less configuration). A10, A10s, A13 and
+ * A20 use the MBUS but no extra configuration is needed.
+ * @reg_io_data: I/O data register
+ * @dma_maxburst: DMA maxburst
+ */
+struct sunxi_nfc_caps {
+ bool sram_through_ahb;
+ unsigned int reg_io_data;
+ unsigned int dma_maxburst;
+};
+
/**
* struct sunxi_nfc - stores sunxi NAND controller information
*
@@ -228,6 +245,7 @@ struct sunxi_nfc {
struct list_head chips;
struct completion complete;
struct dma_chan *dmac;
+ const struct sunxi_nfc_caps *caps;
};
static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_controller *ctrl)
@@ -350,10 +368,29 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
goto err_unmap_buf;
}
- writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
- nfc->regs + NFC_REG_CTL);
+ /*
+ * On A23, we suppose the "internal RAM" (p.12 of the NFC user manual)
+ * refers to the NAND controller's internal SRAM. This memory is mapped
+ * and so is accessible from the AHB. It seems that it can also be
+ * accessed by the MBUS. MBUS accesses are mandatory when using the
+ * internal DMA instead of the external DMA engine.
+ *
+ * During DMA I/O operation, either we access this memory from the AHB
+ * by clearing the NFC_RAM_METHOD bit, or we set the bit and use the
+ * MBUS. In this case, we should also configure the MBUS DMA length
+ * NFC_REG_MDMA_CNT(0xC4) to be chunksize * nchunks. NAND I/O over MBUS
+ * are also limited to 32kiB pages.
+ */
+ if (nfc->caps->sram_through_ahb)
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+ else
+ writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+
writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
writel(chunksize, nfc->regs + NFC_REG_CNT);
+
dmat = dmaengine_submit(dmad);
ret = dma_submit_error(dmat);
@@ -1313,20 +1350,19 @@ pio_fallback:
static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page)
{
- nand->pagebuf = -1;
+ u8 *buf = nand_get_data_buf(nand);
- return nand->ecc.read_page(nand, nand->data_buf, 1, page);
+ return nand->ecc.read_page(nand, buf, 1, page);
}
static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
+ u8 *buf = nand_get_data_buf(nand);
int ret;
- nand->pagebuf = -1;
-
- memset(nand->data_buf, 0xff, mtd->writesize);
- ret = nand->ecc.write_page(nand, nand->data_buf, 1, page);
+ memset(buf, 0xff, mtd->writesize);
+ ret = nand->ecc.write_page(nand, buf, 1, page);
if (ret)
return ret;
@@ -1724,8 +1760,8 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand)
nand->options |= NAND_SUBPAGE_READ;
if (!ecc->size) {
- ecc->size = nand->ecc_step_ds;
- ecc->strength = nand->ecc_strength_ds;
+ ecc->size = nand->base.eccreq.step_size;
+ ecc->strength = nand->base.eccreq.strength;
}
if (!ecc->size || !ecc->strength)
@@ -2088,6 +2124,12 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
goto out_mod_clk_unprepare;
}
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+ if (!nfc->caps) {
+ ret = -EINVAL;
+ goto out_ahb_reset_reassert;
+ }
+
ret = sunxi_nfc_rst(nfc);
if (ret)
goto out_ahb_reset_reassert;
@@ -2102,12 +2144,12 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (nfc->dmac) {
struct dma_slave_config dmac_cfg = { };
- dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
+ dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
dmac_cfg.dst_addr = dmac_cfg.src_addr;
dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
- dmac_cfg.src_maxburst = 4;
- dmac_cfg.dst_maxburst = 4;
+ dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
+ dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
dmaengine_slave_config(nfc->dmac, &dmac_cfg);
} else {
dev_warn(dev, "failed to request rxtx DMA channel\n");
@@ -2152,8 +2194,26 @@ static int sunxi_nfc_remove(struct platform_device *pdev)
return 0;
}
+static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
+ .reg_io_data = NFC_REG_A10_IO_DATA,
+ .dma_maxburst = 4,
+};
+
+static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
+ .sram_through_ahb = true,
+ .reg_io_data = NFC_REG_A23_IO_DATA,
+ .dma_maxburst = 8,
+};
+
static const struct of_device_id sunxi_nfc_ids[] = {
- { .compatible = "allwinner,sun4i-a10-nand" },
+ {
+ .compatible = "allwinner,sun4i-a10-nand",
+ .data = &sunxi_nfc_a10_caps,
+ },
+ {
+ .compatible = "allwinner,sun8i-a23-nand-controller",
+ .data = &sunxi_nfc_a23_caps,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 13be32c38194..3cc9a4c41443 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -853,7 +853,7 @@ static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
} else {
strength_sel = strength[i];
- if (strength_sel < chip->ecc_strength_ds)
+ if (strength_sel < chip->base.eccreq.strength)
continue;
}
@@ -917,9 +917,9 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 512;
chip->ecc.steps = mtd->writesize / chip->ecc.size;
- if (chip->ecc_step_ds != 512) {
+ if (chip->base.eccreq.step_size != 512) {
dev_err(ctrl->dev, "Unsupported step size %d\n",
- chip->ecc_step_ds);
+ chip->base.eccreq.step_size);
return -EINVAL;
}
@@ -950,7 +950,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
if (ret < 0) {
dev_err(ctrl->dev,
"No valid strength found, minimum %d\n",
- chip->ecc_strength_ds);
+ chip->base.eccreq.strength);
return ret;
}
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index ddf0420c0997..97978227aa55 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -159,7 +159,6 @@ static void txx9ndfmc_cmd_ctrl(struct nand_chip *chip, int cmd,
if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
}
- mmiowb();
}
static int txx9ndfmc_dev_ready(struct nand_chip *chip)
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index a662ca1970e5..e4fe8c4bc711 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -364,7 +364,7 @@ static int vf610_nfc_cmd(struct nand_chip *chip,
{
const struct nand_op_instr *instr;
struct vf610_nfc *nfc = chip_to_nfc(chip);
- int op_id = -1, trfr_sz = 0, offset;
+ int op_id = -1, trfr_sz = 0, offset = 0;
u32 col = 0, row = 0, cmd1 = 0, cmd2 = 0, code = 0;
bool force8bit = false;
@@ -850,6 +850,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
}
of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
+ if (!of_id)
+ return -ENODEV;
+
nfc->variant = (enum vf610_nfc_variant)of_id->data;
for_each_available_child_of_node(nfc->dev->of_node, child) {
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index fa87ae28cdfe..4c15bb58c623 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -19,21 +19,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- u16 *column)
-{
- struct nand_device *nand = spinand_to_nand(spinand);
- unsigned int shift;
-
- if (nand->memorg.planes_per_lun < 2)
- return;
-
- /* The plane number is passed in MSB just above the column address */
- shift = fls(nand->memorg.pagesize);
- *column |= req->pos.plane << shift;
-}
-
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
@@ -227,27 +212,21 @@ static int spinand_load_page_op(struct spinand_device *spinand,
static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.read_cache;
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
+ struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
u16 column = 0;
- int ret;
+ ssize_t ret;
if (req->datalen) {
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.dataoffs = 0;
- adjreq.databuf.in = spinand->databuf;
buf = spinand->databuf;
- nbytes = adjreq.datalen;
+ nbytes = nanddev_page_size(nand);
+ column = 0;
}
if (req->ooblen) {
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- adjreq.oobbuf.in = spinand->oobbuf;
nbytes += nanddev_per_page_oobsize(nand);
if (!buf) {
buf = spinand->oobbuf;
@@ -255,28 +234,19 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
}
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
- op.addr.val = column;
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
- /*
- * Some controllers are limited in term of max RX data size. In this
- * case, just repeat the READ_CACHE operation after updating the
- * column.
- */
while (nbytes) {
- op.data.buf.in = buf;
- op.data.nbytes = nbytes;
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
- if (ret)
+ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- ret = spi_mem_exec_op(spinand->spimem, &op);
- if (ret)
- return ret;
+ if (!ret || ret > nbytes)
+ return -EIO;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
}
if (req->datalen)
@@ -300,14 +270,12 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.write_cache;
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
+ struct spi_mem_dirmap_desc *wdesc;
+ unsigned int nbytes, column = 0;
void *buf = spinand->databuf;
- unsigned int nbytes;
- u16 column = 0;
- int ret;
+ ssize_t ret;
/*
* Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
@@ -318,12 +286,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
*/
nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
memset(spinand->databuf, 0xff, nbytes);
- adjreq.dataoffs = 0;
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.databuf.out = spinand->databuf;
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- adjreq.oobbuf.out = spinand->oobbuf;
if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
@@ -340,42 +302,19 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
req->ooblen);
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
- op = *spinand->op_templates.write_cache;
- op.addr.val = column;
-
- /*
- * Some controllers are limited in term of max TX data size. In this
- * case, split the operation into one LOAD CACHE and one or more
- * LOAD RANDOM CACHE.
- */
while (nbytes) {
- op.data.buf.out = buf;
- op.data.nbytes = nbytes;
-
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
- if (ret)
- return ret;
-
- ret = spi_mem_exec_op(spinand->spimem, &op);
- if (ret)
+ ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ if (!ret || ret > nbytes)
+ return -EIO;
- /*
- * We need to use the RANDOM LOAD CACHE operation if there's
- * more than one iteration, because the LOAD operation might
- * reset the cache to 0xff.
- */
- if (nbytes) {
- column = op.addr.val;
- op = *spinand->op_templates.update_cache;
- op.addr.val = column;
- }
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
}
return 0;
@@ -755,6 +694,59 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
return ret;
}
+static int spinand_create_dirmap(struct spinand_device *spinand,
+ unsigned int plane)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_info info = {
+ .length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand),
+ };
+ struct spi_mem_dirmap_desc *desc;
+
+ /* The plane number is passed in MSB just above the column address */
+ info.offset = plane << fls(nand->memorg.pagesize);
+
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].wdesc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].rdesc = desc;
+
+ return 0;
+}
+
+static int spinand_create_dirmaps(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int i, ret;
+
+ spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
+ sizeof(*spinand->dirmaps) *
+ nand->memorg.planes_per_lun,
+ GFP_KERNEL);
+ if (!spinand->dirmaps)
+ return -ENOMEM;
+
+ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
+ ret = spinand_create_dirmap(spinand, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct nand_ops spinand_ops = {
.erase = spinand_erase,
.markbad = spinand_markbad,
@@ -1012,6 +1004,14 @@ static int spinand_init(struct spinand_device *spinand)
goto err_free_bufs;
}
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ goto err_manuf_cleanup;
+ }
+
/* After power up, all blocks are locked, so unlock them here. */
for (i = 0; i < nand->memorg.ntargets; i++) {
ret = spinand_select_target(spinand, i);
@@ -1037,6 +1037,7 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_block_markbad = spinand_mtd_block_markbad;
mtd->_block_isreserved = spinand_mtd_block_isreserved;
mtd->_erase = spinand_mtd_erase;
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
if (spinand->eccinfo.ooblayout)
mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 0b49d8264bef..e5586390026a 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -162,7 +162,7 @@ static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = {
static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO("GD5F1GQ4xA", 0xF1,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -171,7 +171,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F2GQ4xA", 0xF2,
- NAND_MEMORG(1, 2048, 64, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -180,7 +180,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4xA", 0xF4,
- NAND_MEMORG(1, 2048, 64, 64, 4096, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 4096, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -189,7 +189,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
- NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index d16b57081c95..6502727049a8 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -100,7 +100,7 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB", 0x12,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 40, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -109,7 +109,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB", 0x22,
- NAND_MEMORG(1, 2048, 64, 64, 2048, 2, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 20, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 9c4381d6847b..7d7b1f7fcf71 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -92,7 +92,7 @@ static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
static const struct spinand_info micron_spinand_table[] = {
SPINAND_INFO("MT29F2G01ABAGD", 0x24,
- NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index db8021da45b5..1cb3760ff779 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -96,7 +96,7 @@ static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
static const struct spinand_info toshiba_spinand_table[] = {
/* 3.3V 1Gb */
SPINAND_INFO("TC58CVG0S3", 0xC2,
- NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -106,7 +106,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 3.3V 2Gb */
SPINAND_INFO("TC58CVG1S3", 0xCB,
- NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -116,7 +116,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 3.3V 4Gb */
SPINAND_INFO("TC58CVG2S0", 0xCD,
- NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -126,7 +126,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 1.8V 1Gb */
SPINAND_INFO("TC58CYG0S3", 0xB2,
- NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -136,7 +136,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 1.8V 2Gb */
SPINAND_INFO("TC58CYG1S3", 0xBB,
- NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -146,7 +146,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 1.8V 4Gb */
SPINAND_INFO("TC58CYG2S0", 0xBD,
- NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 5d944580b898..a6c17e0cace8 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -76,7 +76,7 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
static const struct spinand_info winbond_spinand_table[] = {
SPINAND_INFO("W25M02GV", 0xAB,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 2),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -85,7 +85,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
SPINAND_INFO("W25N01GV", 0xAA,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index fccf1950e92d..bc201327dda0 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -1,3 +1,30 @@
+config MTD_PARSER_IMAGETAG
+ tristate "Parser for BCM963XX Image Tag format partitions"
+ depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ select CRC32
+ help
+ Image Tag is the firmware header used by broadcom on their xDSL line
+ of devices. It is used to describe the offsets and lengths of kernel
+ and rootfs partitions.
+ This driver adds support for parsing a partition with an Image Tag
+ header and creates up to two partitions, kernel and rootfs.
+
+config MTD_AFS_PARTS
+ tristate "ARM Firmware Suite partition parsing"
+ depends on (ARM || ARM64)
+ help
+ The ARM Firmware Suite allows the user to divide flash devices into
+ multiple 'images'. Each such image has a header containing its name
+ and offset/size etc.
+
+ If you need code which can detect and parse these tables, and
+ register MTD 'partitions' corresponding to each image detected,
+ enable this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically. The
+ 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
+
config MTD_PARSER_TRX
tristate "Parser for TRX format partitions"
depends on MTD && (BCM47XX || ARCH_BCM_5301X || COMPILE_TEST)
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index d8418bf6804a..cddc8f35a856 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -1,3 +1,5 @@
+obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
+obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
new file mode 100644
index 000000000000..0c730024f806
--- /dev/null
+++ b/drivers/mtd/parsers/afs.c
@@ -0,0 +1,410 @@
+/*======================================================================
+
+ drivers/mtd/afs.c: ARM Flash Layout/Partitioning
+
+ Copyright © 2000 ARM Limited
+ Copyright (C) 2019 Linus Walleij
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ This is access code for flashes using ARM's flash partitioning
+ standards.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#define AFSV1_FOOTER_MAGIC 0xA0FFFF9F
+#define AFSV2_FOOTER_MAGIC1 0x464C5348 /* "FLSH" */
+#define AFSV2_FOOTER_MAGIC2 0x464F4F54 /* "FOOT" */
+
+struct footer_v1 {
+ u32 image_info_base; /* Address of first word of ImageFooter */
+ u32 image_start; /* Start of area reserved by this footer */
+ u32 signature; /* 'Magic' number proves it's a footer */
+ u32 type; /* Area type: ARM Image, SIB, customer */
+ u32 checksum; /* Just this structure */
+};
+
+struct image_info_v1 {
+ u32 bootFlags; /* Boot flags, compression etc. */
+ u32 imageNumber; /* Unique number, selects for boot etc. */
+ u32 loadAddress; /* Address program should be loaded to */
+ u32 length; /* Actual size of image */
+ u32 address; /* Image is executed from here */
+ char name[16]; /* Null terminated */
+ u32 headerBase; /* Flash Address of any stripped header */
+ u32 header_length; /* Length of header in memory */
+ u32 headerType; /* AIF, RLF, s-record etc. */
+ u32 checksum; /* Image checksum (inc. this struct) */
+};
+
+static u32 word_sum(void *words, int num)
+{
+ u32 *p = words;
+ u32 sum = 0;
+
+ while (num--)
+ sum += *p++;
+
+ return sum;
+}
+
+static u32 word_sum_v2(u32 *p, u32 num)
+{
+ u32 sum = 0;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ u32 val;
+
+ val = p[i];
+ if (val > ~sum)
+ sum++;
+ sum += val;
+ }
+ return ~sum;
+}
+
+static bool afs_is_v1(struct mtd_info *mtd, u_int off)
+{
+ /* The magic is 12 bytes from the end of the erase block */
+ u_int ptr = off + mtd->erasesize - 12;
+ u32 magic;
+ size_t sz;
+ int ret;
+
+ ret = mtd_read(mtd, ptr, 4, &sz, (u_char *)&magic);
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return false;
+ }
+ if (ret >= 0 && sz != 4)
+ return false;
+
+ return (magic == AFSV1_FOOTER_MAGIC);
+}
+
+static bool afs_is_v2(struct mtd_info *mtd, u_int off)
+{
+ /* The magic is the 8 last bytes of the erase block */
+ u_int ptr = off + mtd->erasesize - 8;
+ u32 foot[2];
+ size_t sz;
+ int ret;
+
+ ret = mtd_read(mtd, ptr, 8, &sz, (u_char *)foot);
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return false;
+ }
+ if (ret >= 0 && sz != 8)
+ return false;
+
+ return (foot[0] == AFSV2_FOOTER_MAGIC1 &&
+ foot[1] == AFSV2_FOOTER_MAGIC2);
+}
+
+static int afs_parse_v1_partition(struct mtd_info *mtd,
+ u_int off, struct mtd_partition *part)
+{
+ struct footer_v1 fs;
+ struct image_info_v1 iis;
+ u_int mask;
+ /*
+ * Static checks cannot see that we bail out if we have an error
+ * reading the footer.
+ */
+ u_int uninitialized_var(iis_ptr);
+ u_int uninitialized_var(img_ptr);
+ u_int ptr;
+ size_t sz;
+ int ret;
+ int i;
+
+ /*
+ * This is the address mask; we use this to mask off out of
+ * range address bits.
+ */
+ mask = mtd->size - 1;
+
+ ptr = off + mtd->erasesize - sizeof(fs);
+ ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
+ if (ret >= 0 && sz != sizeof(fs))
+ ret = -EINVAL;
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return ret;
+ }
+ /*
+ * Check the checksum.
+ */
+ if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff)
+ return -EINVAL;
+
+ /*
+ * Hide the SIB (System Information Block)
+ */
+ if (fs.type == 2)
+ return 0;
+
+ iis_ptr = fs.image_info_base & mask;
+ img_ptr = fs.image_start & mask;
+
+ /*
+ * Check the image info base. This can not
+ * be located after the footer structure.
+ */
+ if (iis_ptr >= ptr)
+ return 0;
+
+ /*
+ * Check the start of this image. The image
+ * data can not be located after this block.
+ */
+ if (img_ptr > off)
+ return 0;
+
+ /* Read the image info block */
+ memset(&iis, 0, sizeof(iis));
+ ret = mtd_read(mtd, iis_ptr, sizeof(iis), &sz, (u_char *)&iis);
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ iis_ptr, ret);
+ return -EINVAL;
+ }
+
+ if (sz != sizeof(iis))
+ return -EINVAL;
+
+ /*
+ * Validate the name - it must be NUL terminated.
+ */
+ for (i = 0; i < sizeof(iis.name); i++)
+ if (iis.name[i] == '\0')
+ break;
+ if (i > sizeof(iis.name))
+ return -EINVAL;
+
+ part->name = kstrdup(iis.name, GFP_KERNEL);
+ if (!part->name)
+ return -ENOMEM;
+
+ part->size = (iis.length + mtd->erasesize - 1) & ~(mtd->erasesize - 1);
+ part->offset = img_ptr;
+ part->mask_flags = 0;
+
+ printk(" mtd: at 0x%08x, %5lluKiB, %8u, %s\n",
+ img_ptr, part->size / 1024,
+ iis.imageNumber, part->name);
+
+ return 0;
+}
+
+static int afs_parse_v2_partition(struct mtd_info *mtd,
+ u_int off, struct mtd_partition *part)
+{
+ u_int ptr;
+ u32 footer[12];
+ u32 imginfo[36];
+ char *name;
+ u32 version;
+ u32 entrypoint;
+ u32 attributes;
+ u32 region_count;
+ u32 block_start;
+ u32 block_end;
+ u32 crc;
+ size_t sz;
+ int ret;
+ int i;
+ int pad = 0;
+
+ pr_debug("Parsing v2 partition @%08x-%08x\n",
+ off, off + mtd->erasesize);
+
+ /* First read the footer */
+ ptr = off + mtd->erasesize - sizeof(footer);
+ ret = mtd_read(mtd, ptr, sizeof(footer), &sz, (u_char *)footer);
+ if ((ret < 0) || (ret >= 0 && sz != sizeof(footer))) {
+ pr_err("AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return -EIO;
+ }
+ name = (char *) &footer[0];
+ version = footer[9];
+ ptr = off + mtd->erasesize - sizeof(footer) - footer[8];
+
+ pr_debug("found image \"%s\", version %08x, info @%08x\n",
+ name, version, ptr);
+
+ /* Then read the image information */
+ ret = mtd_read(mtd, ptr, sizeof(imginfo), &sz, (u_char *)imginfo);
+ if ((ret < 0) || (ret >= 0 && sz != sizeof(imginfo))) {
+ pr_err("AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return -EIO;
+ }
+
+ /* 32bit platforms have 4 bytes padding */
+ crc = word_sum_v2(&imginfo[1], 34);
+ if (!crc) {
+ pr_debug("Padding 1 word (4 bytes)\n");
+ pad = 1;
+ } else {
+ /* 64bit platforms have 8 bytes padding */
+ crc = word_sum_v2(&imginfo[2], 34);
+ if (!crc) {
+ pr_debug("Padding 2 words (8 bytes)\n");
+ pad = 2;
+ }
+ }
+ if (crc) {
+ pr_err("AFS: bad checksum on v2 image info: %08x\n", crc);
+ return -EINVAL;
+ }
+ entrypoint = imginfo[pad];
+ attributes = imginfo[pad+1];
+ region_count = imginfo[pad+2];
+ block_start = imginfo[20];
+ block_end = imginfo[21];
+
+ pr_debug("image entry=%08x, attr=%08x, regions=%08x, "
+ "bs=%08x, be=%08x\n",
+ entrypoint, attributes, region_count,
+ block_start, block_end);
+
+ for (i = 0; i < region_count; i++) {
+ u32 region_load_addr = imginfo[pad + 3 + i*4];
+ u32 region_size = imginfo[pad + 4 + i*4];
+ u32 region_offset = imginfo[pad + 5 + i*4];
+ u32 region_start;
+ u32 region_end;
+
+ pr_debug(" region %d: address: %08x, size: %08x, "
+ "offset: %08x\n",
+ i,
+ region_load_addr,
+ region_size,
+ region_offset);
+
+ region_start = off + region_offset;
+ region_end = region_start + region_size;
+ /* Align partition to end of erase block */
+ region_end += (mtd->erasesize - 1);
+ region_end &= ~(mtd->erasesize -1);
+ pr_debug(" partition start = %08x, partition end = %08x\n",
+ region_start, region_end);
+
+ /* Create one partition per region */
+ part->name = kstrdup(name, GFP_KERNEL);
+ if (!part->name)
+ return -ENOMEM;
+ part->offset = region_start;
+ part->size = region_end - region_start;
+ part->mask_flags = 0;
+ }
+
+ return 0;
+}
+
+static int parse_afs_partitions(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ u_int off, sz;
+ int ret = 0;
+ int i;
+
+ /* Count the partitions by looping over all erase blocks */
+ for (i = off = sz = 0; off < mtd->size; off += mtd->erasesize) {
+ if (afs_is_v1(mtd, off)) {
+ sz += sizeof(struct mtd_partition);
+ i += 1;
+ }
+ if (afs_is_v2(mtd, off)) {
+ sz += sizeof(struct mtd_partition);
+ i += 1;
+ }
+ }
+
+ if (!i)
+ return 0;
+
+ parts = kzalloc(sz, GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ /*
+ * Identify the partitions
+ */
+ for (i = off = 0; off < mtd->size; off += mtd->erasesize) {
+ if (afs_is_v1(mtd, off)) {
+ ret = afs_parse_v1_partition(mtd, off, &parts[i]);
+ if (ret)
+ goto out_free_parts;
+ i++;
+ }
+ if (afs_is_v2(mtd, off)) {
+ ret = afs_parse_v2_partition(mtd, off, &parts[i]);
+ if (ret)
+ goto out_free_parts;
+ i++;
+ }
+ }
+
+ *pparts = parts;
+ return i;
+
+out_free_parts:
+ while (i >= 0) {
+ if (parts[i].name)
+ kfree(parts[i].name);
+ i--;
+ }
+ kfree(parts);
+ *pparts = NULL;
+ return ret;
+}
+
+static const struct of_device_id mtd_parser_afs_of_match_table[] = {
+ { .compatible = "arm,arm-firmware-suite" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtd_parser_afs_of_match_table);
+
+static struct mtd_part_parser afs_parser = {
+ .parse_fn = parse_afs_partitions,
+ .name = "afs",
+ .of_match_table = mtd_parser_afs_of_match_table,
+};
+module_mtd_part_parser(afs_parser);
+
+MODULE_AUTHOR("ARM Ltd");
+MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/parsers/parser_imagetag.c b/drivers/mtd/parsers/parser_imagetag.c
new file mode 100644
index 000000000000..9537c183a3be
--- /dev/null
+++ b/drivers/mtd/parsers/parser_imagetag.c
@@ -0,0 +1,222 @@
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright © 2011-2013 Jonas Gorski <jonas.gorski@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcm963xx_tag.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+
+/* Ensure strings read from flash structs are null terminated */
+#define STR_NULL_TERMINATE(x) \
+ do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
+
+static int bcm963xx_read_imagetag(struct mtd_info *master, const char *name,
+ loff_t tag_offset, struct bcm_tag *buf)
+{
+ int ret;
+ size_t retlen;
+ u32 computed_crc;
+
+ ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf);
+ if (ret)
+ return ret;
+
+ if (retlen != sizeof(*buf))
+ return -EIO;
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ STR_NULL_TERMINATE(buf->board_id);
+ STR_NULL_TERMINATE(buf->tag_version);
+
+ pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n",
+ name, tag_offset, buf->tag_version, buf->board_id);
+
+ return 0;
+ }
+
+ pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n",
+ name, tag_offset, buf->header_crc, computed_crc);
+ return -EINVAL;
+}
+
+static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 0, curpart = 0;
+ struct bcm_tag *buf = NULL;
+ struct mtd_partition *parts;
+ int ret;
+ unsigned int rootfsaddr, kerneladdr, spareaddr, offset;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ int i;
+ bool rootfs_first = false;
+
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = bcm963xx_read_imagetag(master, "rootfs", 0, buf);
+ if (!ret) {
+ STR_NULL_TERMINATE(buf->flash_image_start);
+ if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) ||
+ rootfsaddr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid rootfs address: %*ph\n",
+ (int)sizeof(buf->flash_image_start),
+ buf->flash_image_start);
+ goto out;
+ }
+
+ STR_NULL_TERMINATE(buf->kernel_address);
+ if (kstrtouint(buf->kernel_address, 10, &kerneladdr) ||
+ kerneladdr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid kernel address: %*ph\n",
+ (int)sizeof(buf->kernel_address),
+ buf->kernel_address);
+ goto out;
+ }
+
+ STR_NULL_TERMINATE(buf->kernel_length);
+ if (kstrtouint(buf->kernel_length, 10, &kernellen)) {
+ pr_err("invalid kernel length: %*ph\n",
+ (int)sizeof(buf->kernel_length),
+ buf->kernel_length);
+ goto out;
+ }
+
+ STR_NULL_TERMINATE(buf->total_length);
+ if (kstrtouint(buf->total_length, 10, &totallen)) {
+ pr_err("invalid total length: %*ph\n",
+ (int)sizeof(buf->total_length),
+ buf->total_length);
+ goto out;
+ }
+
+ /*
+ * Addresses are flash absolute, so convert to partition
+ * relative addresses. Assume either kernel or rootfs will
+ * directly follow the image tag.
+ */
+ if (rootfsaddr < kerneladdr)
+ offset = rootfsaddr - sizeof(struct bcm_tag);
+ else
+ offset = kerneladdr - sizeof(struct bcm_tag);
+
+ kerneladdr = kerneladdr - offset;
+ rootfsaddr = rootfsaddr - offset;
+ spareaddr = roundup(totallen, master->erasesize);
+
+ if (rootfsaddr < kerneladdr) {
+ /* default Broadcom layout */
+ rootfslen = kerneladdr - rootfsaddr;
+ rootfs_first = true;
+ } else {
+ /* OpenWrt layout */
+ rootfsaddr = kerneladdr + kernellen;
+ rootfslen = spareaddr - rootfsaddr;
+ }
+ } else {
+ goto out;
+ }
+ sparelen = master->size - spareaddr;
+
+ /* Determine number of partitions */
+ if (rootfslen > 0)
+ nrparts++;
+
+ if (kernellen > 0)
+ nrparts++;
+
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Start building partition list */
+ if (kernellen > 0) {
+ int kernelpart = curpart;
+
+ if (rootfslen > 0 && rootfs_first)
+ kernelpart++;
+ parts[kernelpart].name = "kernel";
+ parts[kernelpart].offset = kerneladdr;
+ parts[kernelpart].size = kernellen;
+ curpart++;
+ }
+
+ if (rootfslen > 0) {
+ int rootfspart = curpart;
+
+ if (kernellen > 0 && rootfs_first)
+ rootfspart--;
+ parts[rootfspart].name = "rootfs";
+ parts[rootfspart].offset = rootfsaddr;
+ parts[rootfspart].size = rootfslen;
+ if (sparelen > 0 && !rootfs_first)
+ parts[rootfspart].size += sparelen;
+ curpart++;
+ }
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %llx and length %llx\n", i,
+ parts[i].name, parts[i].offset, parts[i].size);
+
+ pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+ sparelen);
+
+ *pparts = parts;
+ ret = 0;
+
+out:
+ vfree(buf);
+
+ if (ret)
+ return ret;
+
+ return nrparts;
+}
+
+static const struct of_device_id parse_bcm963xx_imagetag_match_table[] = {
+ { .compatible = "brcm,bcm963xx-imagetag" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, parse_bcm963xx_imagetag_match_table);
+
+static struct mtd_part_parser bcm963xx_imagetag_parser = {
+ .parse_fn = bcm963xx_parse_imagetag_partitions,
+ .name = "bcm963xx-imagetag",
+ .of_match_table = parse_bcm963xx_imagetag_match_table,
+};
+module_mtd_part_parser(bcm963xx_imagetag_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com>");
+MODULE_DESCRIPTION("MTD parser for BCM963XX CFE Image Tag partitions");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 89227b1d036a..e0955a98a0f4 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -222,17 +222,17 @@ static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
uint8_t ecc[3];
__nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC)) < 0)
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
return -EIO;
buffer += SM_SMALL_PAGE;
__nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC)) < 0)
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
return -EIO;
return 0;
}
@@ -399,11 +399,11 @@ restart:
if (ftl->smallpagenand) {
__nand_calculate_ecc(buf + boffset, SM_SMALL_PAGE,
oob.ecc1,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
__nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
SM_SMALL_PAGE, oob.ecc2,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
}
if (!sm_write_sector(ftl, zone, block, boffset,
buf + boffset, &oob))
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index 872b40922608..bfbfc17ed6aa 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -63,6 +63,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index af0a22019516..d60cbf23d9aa 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -632,6 +632,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ /* Read cannot cross 4K boundary */
+ block_size = min_t(loff_t, from + block_size,
+ round_up(from + 1, SZ_4K)) - from;
+
writel(from, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
@@ -685,6 +689,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ /* Write cannot cross 4K boundary */
+ block_size = min_t(loff_t, to + block_size,
+ round_up(to + 1, SZ_4K)) - to;
+
writel(to, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index fae147452aff..73172d7f512b 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -744,7 +744,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
/*
- * Erase types are ordered by size, with the biggest erase type at
+ * Erase types are ordered by size, with the smallest erase type at
* index 0.
*/
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
@@ -1905,7 +1905,9 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | USE_CLSR) },
{ "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
@@ -2071,8 +2073,8 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
return &spi_nor_ids[tmp];
}
}
- dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
- id[0], id[1], id[2]);
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
+ SPI_NOR_MAX_ID_LEN, id);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index c71523e94580..73b06304c975 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -21,7 +21,7 @@
* or detected.
*/
-#if IS_ENABLED(CONFIG_MTD_NAND)
+#if IS_ENABLED(CONFIG_MTD_RAW_NAND)
struct nand_ecc_test {
const char *name;
@@ -122,9 +122,9 @@ static int no_bit_error_verify(void *error_data, void *error_ecc,
int ret;
__nand_calculate_ecc(error_data, size, calc_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
if (ret == 0 && !memcmp(correct_data, error_data, size))
return 0;
@@ -152,9 +152,9 @@ static int single_bit_error_correct(void *error_data, void *error_ecc,
int ret;
__nand_calculate_ecc(error_data, size, calc_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
if (ret == 1 && !memcmp(correct_data, error_data, size))
return 0;
@@ -189,9 +189,9 @@ static int double_bit_error_detect(void *error_data, void *error_ecc,
int ret;
__nand_calculate_ecc(error_data, size, calc_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
return (ret == -EBADMSG) ? 0 : -EINVAL;
}
@@ -266,7 +266,7 @@ static int nand_ecc_test_run(const size_t size)
prandom_bytes(correct_data, size);
__nand_calculate_ecc(correct_data, size, correct_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
nand_ecc_test[i].prepare(error_data, error_ecc,
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2709dc02fc24..1f56c655832b 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1475,7 +1475,7 @@ static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
*/
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
{
- int err;
+ int err = 0;
struct ubi_wl_entry *e;
if (pnum < 0 || pnum >= ubi->peb_count) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7a96d168efc4..bc42f131f47c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -505,6 +505,7 @@ source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
tristate "Simulated networking device"
depends on DEBUG_FS
+ select NET_DEVLINK
help
This driver is a developer testing tool and software model that can
be used to test various control path networking APIs, especially
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 3d27616d9c85..51cf5eca9c7f 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -116,11 +116,15 @@ static struct net_device * __init ipddp_init(void)
*/
static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
{
- __be32 paddr = skb_rtable(skb)->rt_gateway;
+ struct rtable *rtable = skb_rtable(skb);
+ __be32 paddr = 0;
struct ddpehdr *ddp;
struct ipddp_route *rt;
struct atalk_addr *our_addr;
+ if (rtable->rt_gw_family == AF_INET)
+ paddr = rtable->rt_gw4;
+
spin_lock(&ipddp_route_lock);
/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b59708c35faf..062fa7e3af4c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
if (event_dev->flags & IFF_MASTER) {
+ int ret;
+
netdev_dbg(event_dev, "IFF_MASTER\n");
- return bond_master_netdev_event(event, event_dev);
+ ret = bond_master_netdev_event(event, event_dev);
+ if (ret != NOTIFY_DONE)
+ return ret;
}
if (event_dev->flags & IFF_SLAVE) {
@@ -4114,8 +4118,7 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
/* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index b286f591242e..022044b59d6a 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -546,7 +546,7 @@ static int bond_fill_info(struct sk_buff *skb,
if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
goto nla_put_failure;
- targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
+ targets = nla_nest_start_noflag(skb, IFLA_BOND_ARP_IP_TARGET);
if (!targets)
goto nla_put_failure;
@@ -644,7 +644,7 @@ static int bond_fill_info(struct sk_buff *skb,
if (!bond_3ad_get_active_agg_info(bond, &info)) {
struct nlattr *nest;
- nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
+ nest = nla_nest_start_noflag(skb, IFLA_BOND_AD_INFO);
if (!nest)
goto nla_put_failure;
@@ -711,7 +711,7 @@ static int bond_fill_linkxstats(struct sk_buff *skb,
return -EINVAL;
}
- nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BOND);
+ nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BOND);
if (!nest)
return -EMSGSIZE;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -722,7 +722,7 @@ static int bond_fill_linkxstats(struct sk_buff *skb,
else
stats = &BOND_AD_INFO(bond).stats;
- nest2 = nla_nest_start(skb, BOND_XSTATS_3AD);
+ nest2 = nla_nest_start_noflag(skb, BOND_XSTATS_3AD);
if (!nest2) {
nla_nest_end(skb, nest);
return -EMSGSIZE;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index da1fc17295d9..b996967af8d9 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1098,13 +1098,6 @@ static int bond_option_arp_validate_set(struct bonding *bond,
{
netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
newval->string, newval->value);
-
- if (bond->dev->flags & IFF_UP) {
- if (!newval->value)
- bond->recv_probe = NULL;
- else if (bond->params.arp_interval)
- bond->recv_probe = bond_arp_rcv;
- }
bond->params.arp_validate = newval->value;
return 0;
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2ffef0..4985268e2273 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+ return sprintf(buf, "%*phC\n",
+ slave->dev->addr_len,
+ slave->perm_hwaddr);
}
static SLAVE_ATTR_RO(perm_hwaddr);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 71bb3aebded4..c6c5ecdbcaef 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -41,7 +41,7 @@ config NET_DSA_MT7530
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
- depends on NET_DSA && NET_DSA_LEGACY
+ depends on NET_DSA
select NET_DSA_TAG_TRAILER
---help---
This enables support for the Marvell 88E6060 ethernet switch
@@ -51,6 +51,8 @@ source "drivers/net/dsa/microchip/Kconfig"
source "drivers/net/dsa/mv88e6xxx/Kconfig"
+source "drivers/net/dsa/sja1105/Kconfig"
+
config NET_DSA_QCA8K
tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
depends on NET_DSA
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 82e5d794c41f..fefb6aaa82ba 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o
obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
+obj-y += sja1105/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0852e5e08177..c8040ecf4425 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -428,7 +428,6 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable,
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
dev->vlan_enabled = enable;
- dev->vlan_filtering_enabled = enable_filtering;
}
static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -665,7 +664,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
+ b53_enable_vlan(dev, false, ds->vlan_filtering);
b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE,
@@ -966,6 +965,13 @@ static int b53_setup(struct dsa_switch *ds)
b53_disable_port(ds, port);
}
+ /* Let DSA handle the case were multiple bridges span the same switch
+ * device and different VLAN awareness settings are requested, which
+ * would be breaking filtering semantics for any of the other bridge
+ * devices. (not hardware supported)
+ */
+ ds->vlan_filtering_is_global = true;
+
return ret;
}
@@ -1275,35 +1281,17 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
struct b53_device *dev = ds->priv;
- struct net_device *bridge_dev;
- unsigned int i;
u16 pvid, new_pvid;
- /* Handle the case were multiple bridges span the same switch device
- * and one of them has a different setting than what is being requested
- * which would be breaking filtering semantics for any of the other
- * bridge devices.
- */
- b53_for_each_port(dev, i) {
- bridge_dev = dsa_to_port(ds, i)->bridge_dev;
- if (bridge_dev &&
- bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
- br_vlan_enabled(bridge_dev) != vlan_filtering) {
- netdev_err(bridge_dev,
- "VLAN filtering is global to the switch!\n");
- return -EINVAL;
- }
- }
-
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
new_pvid = pvid;
- if (dev->vlan_filtering_enabled && !vlan_filtering) {
+ if (!vlan_filtering) {
/* Filtering is currently enabled, use the default PVID since
* the bridge does not expect tagging anymore
*/
dev->ports[port].pvid = pvid;
new_pvid = b53_default_pvid(dev);
- } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
+ } else {
/* Filtering is currently disabled, restore the previous PVID */
new_pvid = dev->ports[port].pvid;
}
@@ -1329,7 +1317,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid_end > dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
+ b53_enable_vlan(dev, true, ds->vlan_filtering);
return 0;
}
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index e3441dcf2d21..f25bc80c4ffc 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -139,7 +139,6 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
bool vlan_enabled;
- bool vlan_filtering_enabled;
unsigned int num_ports;
struct b53_port *ports;
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index c8e3f05e1d72..4ccb3239f5f7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1188,10 +1188,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (ret)
goto out_mdio;
- pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
- priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
- priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
- priv->core, priv->irq0, priv->irq1);
+ dev_info(&pdev->dev,
+ "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n",
+ priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
+ priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
+ priv->irq0, priv->irq1);
return 0;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index e6234d209787..4212bc4a5f31 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -886,6 +886,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
fs->m_ext.data[1]))
return -EINVAL;
+ if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
+ return -EINVAL;
+
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
@@ -974,6 +977,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
struct cfp_rule *rule;
int ret;
+ if (loc >= CFP_NUM_RULES)
+ return -EINVAL;
+
/* Refuse deleting unused rules, and those that are not unique since
* that could leave IPv6 rules with one of the chained rule in the
* table.
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index d8328866908c..4e64835deac2 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -4,7 +4,25 @@
*
* Copyright (C) 2010 Lantiq Deutschland
* Copyright (C) 2012 John Crispin <john@phrozen.org>
- * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * The VLAN and bridge model the GSWIP hardware uses does not directly
+ * matches the model DSA uses.
+ *
+ * The hardware has 64 possible table entries for bridges with one VLAN
+ * ID, one flow id and a list of ports for each bridge. All entries which
+ * match the same flow ID are combined in the mac learning table, they
+ * act as one global bridge.
+ * The hardware does not support VLAN filter on the port, but on the
+ * bridge, this driver converts the DSA model to the hardware.
+ *
+ * The CPU gets all the exception frames which do not match any forwarding
+ * rule and the CPU port is also added to all bridges. This makes it possible
+ * to handle all the special cases easily in software.
+ * At the initialization the driver allocates one bridge table entry for
+ * each switch port which is used when the port is used without an
+ * explicit bridge. This prevents the frames from being forwarded
+ * between all LAN ports by default.
*/
#include <linux/clk.h>
@@ -148,19 +166,29 @@
#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
#define GSWIP_PCE_GCTRL_0 0x456
+#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
#define GSWIP_PCE_GCTRL_1 0x457
#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
-#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11)
+#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
+#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
+#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
+#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
+#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
+#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
+#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
+#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
+#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
+#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
#define GSWIP_MAC_FLEN 0x8C5
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
@@ -183,6 +211,11 @@
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
+#define GSWIP_TABLE_ACTIVE_VLAN 0x01
+#define GSWIP_TABLE_VLAN_MAPPING 0x02
+#define GSWIP_TABLE_MAC_BRIDGE 0x0b
+#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
+
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
struct gswip_hw_info {
@@ -202,6 +235,12 @@ struct gswip_gphy_fw {
char *fw_name;
};
+struct gswip_vlan {
+ struct net_device *bridge;
+ u16 vid;
+ u8 fid;
+};
+
struct gswip_priv {
__iomem void *gswip;
__iomem void *mdio;
@@ -211,8 +250,22 @@ struct gswip_priv {
struct dsa_switch *ds;
struct device *dev;
struct regmap *rcu_regmap;
+ struct gswip_vlan vlans[64];
int num_gphy_fw;
struct gswip_gphy_fw *gphy_fw;
+ u32 port_vlan_filter;
+};
+
+struct gswip_pce_table_entry {
+ u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
+ u16 table; // PCE_TBL_CTRL.ADDR = pData->table
+ u16 key[8];
+ u16 val[5];
+ u16 mask;
+ u8 gmap;
+ bool type;
+ bool valid;
+ bool key_mode;
};
struct gswip_rmon_cnt_desc {
@@ -447,10 +500,153 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
}
+static int gswip_pce_table_entry_read(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u16 crtl;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+
+ gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
+ GSWIP_PCE_TBL_CTRL);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
+ tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
+
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
+ tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
+
+ tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
+
+ crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
+
+ tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
+ tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
+ tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
+
+ return 0;
+}
+
+static int gswip_pce_table_entry_write(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u16 crtl;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+
+ gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode,
+ GSWIP_PCE_TBL_CTRL);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
+ gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
+
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
+ gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
+
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode,
+ GSWIP_PCE_TBL_CTRL);
+
+ gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
+
+ crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
+ crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
+ GSWIP_PCE_TBL_CTRL_GMAP_MASK);
+ if (tbl->type)
+ crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
+ if (tbl->valid)
+ crtl |= GSWIP_PCE_TBL_CTRL_VLD;
+ crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
+ crtl |= GSWIP_PCE_TBL_CTRL_BAS;
+ gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
+
+ return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+}
+
+/* Add the LAN port into a bridge with the CPU port by
+ * default. This prevents automatic forwarding of
+ * packages between the LAN ports when no explicit
+ * bridge is configured.
+ */
+static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int err;
+
+ if (port >= max_ports) {
+ dev_err(priv->dev, "single port for %i supported\n", port);
+ return -EIO;
+ }
+
+ vlan_active.index = port + 1;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = 0; /* vid */
+ vlan_active.val[0] = port + 1 /* fid */;
+ vlan_active.valid = add;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ if (!add)
+ return 0;
+
+ vlan_mapping.index = port + 1;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ vlan_mapping.val[0] = 0 /* vid */;
+ vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
+ vlan_mapping.val[2] = 0;
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
static int gswip_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct gswip_priv *priv = ds->priv;
+ int err;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ err = gswip_add_single_port_br(priv, port, true);
+ if (err)
+ return err;
+ }
/* RMON Counter Enable for port */
gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
@@ -461,8 +657,6 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
GSWIP_SDMA_PCTRLp(port));
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
- GSWIP_PCE_PCTRL_0p(port));
if (!dsa_is_cpu_port(ds, port)) {
u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
@@ -535,6 +729,39 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
return 0;
}
+static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+
+ /* Do not allow changing the VLAN filtering options while in bridge */
+ if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering && bridge)
+ return -EIO;
+
+ if (vlan_filtering) {
+ /* Use port based VLAN tag */
+ gswip_switch_mask(priv,
+ GSWIP_PCE_VCTRL_VSR,
+ GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR,
+ GSWIP_PCE_VCTRL(port));
+ gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
+ GSWIP_PCE_PCTRL_0p(port));
+ } else {
+ /* Use port based VLAN tag */
+ gswip_switch_mask(priv,
+ GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR,
+ GSWIP_PCE_VCTRL_VSR,
+ GSWIP_PCE_VCTRL(port));
+ gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
+ GSWIP_PCE_PCTRL_0p(port));
+ }
+
+ return 0;
+}
+
static int gswip_setup(struct dsa_switch *ds)
{
struct gswip_priv *priv = ds->priv;
@@ -547,8 +774,10 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_w(priv, 0, GSWIP_SWRES);
/* disable port fetch/store dma on all ports */
- for (i = 0; i < priv->hw_info->max_ports; i++)
+ for (i = 0; i < priv->hw_info->max_ports; i++) {
gswip_port_disable(ds, i);
+ gswip_port_vlan_filtering(ds, i, false);
+ }
/* enable Switch */
gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
@@ -578,6 +807,10 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
GSWIP_FDMA_PCTRLp(cpu_port));
+ /* accept special tag in ingress direction */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
+ GSWIP_PCE_PCTRL_0p(cpu_port));
+
gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
GSWIP_MAC_CTRL_2p(cpu_port));
gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
@@ -587,10 +820,15 @@ static int gswip_setup(struct dsa_switch *ds)
/* VLAN aware Switching */
gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
- /* Mac Address Table Lock */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_1_MAC_GLOCK |
- GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD,
- GSWIP_PCE_GCTRL_1);
+ /* Flush MAC Table */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MTFL);
+ if (err) {
+ dev_err(priv->dev, "MAC flushing didn't finish\n");
+ return err;
+ }
gswip_port_enable(ds, cpu_port, NULL);
return 0;
@@ -602,6 +840,551 @@ static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_GSWIP;
}
+static int gswip_vlan_active_create(struct gswip_priv *priv,
+ struct net_device *bridge,
+ int fid, u16 vid)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int idx = -1;
+ int err;
+ int i;
+
+ /* Look for a free slot */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (!priv->vlans[i].bridge) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1)
+ return -ENOSPC;
+
+ if (fid == -1)
+ fid = idx;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = vid;
+ vlan_active.val[0] = fid;
+ vlan_active.valid = true;
+
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ priv->vlans[idx].bridge = bridge;
+ priv->vlans[idx].vid = vid;
+ priv->vlans[idx].fid = fid;
+
+ return idx;
+}
+
+static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ int err;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.valid = false;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err)
+ dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
+ priv->vlans[idx].bridge = NULL;
+
+ return err;
+}
+
+static int gswip_vlan_add_unaware(struct gswip_priv *priv,
+ struct net_device *bridge, int port)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ bool active_vlan_created = false;
+ int idx = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ idx = i;
+ break;
+ }
+ }
+
+ /* If this bridge is not programmed yet, add a Active VLAN table
+ * entry in a free slot and prepare the VLAN mapping table entry.
+ */
+ if (idx == -1) {
+ idx = gswip_vlan_active_create(priv, bridge, -1, 0);
+ if (idx < 0)
+ return idx;
+ active_vlan_created = true;
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ /* VLAN ID byte, maps to the VLAN ID of vlan active table */
+ vlan_mapping.val[0] = 0;
+ } else {
+ /* Read the existing VLAN mapping entry from the switch */
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* Update the VLAN mapping entry and write it to the switch */
+ vlan_mapping.val[1] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ /* In case an Active VLAN was creaetd delete it again */
+ if (active_vlan_created)
+ gswip_vlan_active_remove(priv, idx);
+ return err;
+ }
+
+ gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
+ return 0;
+}
+
+static int gswip_vlan_add_aware(struct gswip_priv *priv,
+ struct net_device *bridge, int port,
+ u16 vid, bool untagged,
+ bool pvid)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ bool active_vlan_created = false;
+ int idx = -1;
+ int fid = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ if (fid != -1 && fid != priv->vlans[i].fid)
+ dev_err(priv->dev, "one bridge with multiple flow ids\n");
+ fid = priv->vlans[i].fid;
+ if (priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+ }
+
+ /* If this bridge is not programmed yet, add a Active VLAN table
+ * entry in a free slot and prepare the VLAN mapping table entry.
+ */
+ if (idx == -1) {
+ idx = gswip_vlan_active_create(priv, bridge, fid, vid);
+ if (idx < 0)
+ return idx;
+ active_vlan_created = true;
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ /* VLAN ID byte, maps to the VLAN ID of vlan active table */
+ vlan_mapping.val[0] = vid;
+ } else {
+ /* Read the existing VLAN mapping entry from the switch */
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ vlan_mapping.val[0] = vid;
+ /* Update the VLAN mapping entry and write it to the switch */
+ vlan_mapping.val[1] |= BIT(cpu_port);
+ vlan_mapping.val[2] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= BIT(port);
+ if (untagged)
+ vlan_mapping.val[2] &= ~BIT(port);
+ else
+ vlan_mapping.val[2] |= BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ /* In case an Active VLAN was creaetd delete it again */
+ if (active_vlan_created)
+ gswip_vlan_active_remove(priv, idx);
+ return err;
+ }
+
+ if (pvid)
+ gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
+
+ return 0;
+}
+
+static int gswip_vlan_remove(struct gswip_priv *priv,
+ struct net_device *bridge, int port,
+ u16 vid, bool pvid, bool vlan_aware)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ int idx = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ (!vlan_aware || priv->vlans[i].vid == vid)) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ dev_err(priv->dev, "bridge to leave does not exists\n");
+ return -ENOENT;
+ }
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ vlan_mapping.val[1] &= ~BIT(port);
+ vlan_mapping.val[2] &= ~BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ /* In case all ports are removed from the bridge, remove the VLAN */
+ if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
+ err = gswip_vlan_active_remove(priv, idx);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
+ if (pvid)
+ gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
+
+ return 0;
+}
+
+static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ /* When the bridge uses VLAN filtering we have to configure VLAN
+ * specific bridges. No bridge is configured here.
+ */
+ if (!br_vlan_enabled(bridge)) {
+ err = gswip_vlan_add_unaware(priv, bridge, port);
+ if (err)
+ return err;
+ priv->port_vlan_filter &= ~BIT(port);
+ } else {
+ priv->port_vlan_filter |= BIT(port);
+ }
+ return gswip_add_single_port_br(priv, port, false);
+}
+
+static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ gswip_add_single_port_br(priv, port, true);
+
+ /* When the bridge uses VLAN filtering we have to configure VLAN
+ * specific bridges. No bridge is configured here.
+ */
+ if (!br_vlan_enabled(bridge))
+ gswip_vlan_remove(priv, bridge, port, 0, true, false);
+}
+
+static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ unsigned int max_ports = priv->hw_info->max_ports;
+ u16 vid;
+ int i;
+ int pos = max_ports;
+
+ /* We only support VLAN filtering on bridges */
+ if (!dsa_is_cpu_port(ds, port) && !bridge)
+ return -EOPNOTSUPP;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ int idx = -1;
+
+ /* Check if there is already a page for this VLAN */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+
+ /* If this VLAN is not programmed yet, we have to reserve
+ * one entry in the VLAN table. Make sure we start at the
+ * next position round.
+ */
+ if (idx == -1) {
+ /* Look for a free slot */
+ for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
+ if (!priv->vlans[pos].bridge) {
+ idx = pos;
+ pos++;
+ break;
+ }
+ }
+
+ if (idx == -1)
+ return -ENOSPC;
+ }
+ }
+
+ return 0;
+}
+
+static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ u16 vid;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid);
+}
+
+static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ u16 vid;
+ int err;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void gswip_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to read mac bridge: %d\n",
+ err);
+ return;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
+ continue;
+
+ if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
+ continue;
+
+ mac_bridge.valid = false;
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to write mac bridge: %d\n",
+ err);
+ return;
+ }
+ }
+}
+
+static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct gswip_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
+ GSWIP_SDMA_PCTRLp(port));
+ return;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
+ break;
+ default:
+ dev_err(priv->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
+ GSWIP_SDMA_PCTRLp(port));
+ gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
+ GSWIP_PCE_PCTRL_0p(port));
+}
+
+static int gswip_port_fdb(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, bool add)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ int fid = -1;
+ int i;
+ int err;
+
+ if (!bridge)
+ return -EINVAL;
+
+ for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ fid = priv->vlans[i].fid;
+ break;
+ }
+ }
+
+ if (fid == -1) {
+ dev_err(priv->dev, "Port not part of a bridge\n");
+ return -EINVAL;
+ }
+
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.key_mode = true;
+ mac_bridge.key[0] = addr[5] | (addr[4] << 8);
+ mac_bridge.key[1] = addr[3] | (addr[2] << 8);
+ mac_bridge.key[2] = addr[1] | (addr[0] << 8);
+ mac_bridge.key[3] = fid;
+ mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
+ mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
+ mac_bridge.valid = add;
+
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err)
+ dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
+
+ return err;
+}
+
+static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ return gswip_port_fdb(ds, port, addr, vid, true);
+}
+
+static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ return gswip_port_fdb(ds, port, addr, vid, false);
+}
+
+static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned char addr[6];
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to write mac bridge: %d\n",
+ err);
+ return err;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ addr[5] = mac_bridge.key[0] & 0xff;
+ addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
+ addr[3] = mac_bridge.key[1] & 0xff;
+ addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
+ addr[1] = mac_bridge.key[2] & 0xff;
+ addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
+ if (mac_bridge.val[0] & BIT(port))
+ cb(addr, 0, true, data);
+ } else {
+ if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
+ cb(addr, 0, false, data);
+ }
+ }
+ return 0;
+}
+
static void gswip_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -809,6 +1592,17 @@ static const struct dsa_switch_ops gswip_switch_ops = {
.setup = gswip_setup,
.port_enable = gswip_port_enable,
.port_disable = gswip_port_disable,
+ .port_bridge_join = gswip_port_bridge_join,
+ .port_bridge_leave = gswip_port_bridge_leave,
+ .port_fast_age = gswip_port_fast_age,
+ .port_vlan_filtering = gswip_port_vlan_filtering,
+ .port_vlan_prepare = gswip_port_vlan_prepare,
+ .port_vlan_add = gswip_port_vlan_add,
+ .port_vlan_del = gswip_port_vlan_del,
+ .port_stp_state_set = gswip_port_stp_state_set,
+ .port_fdb_add = gswip_port_fdb_add,
+ .port_fdb_del = gswip_port_fdb_del,
+ .port_fdb_dump = gswip_port_fdb_dump,
.phylink_validate = gswip_phylink_validate,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index f16e1d7d8615..c026d15721f6 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1144,6 +1144,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
interface = PHY_INTERFACE_MODE_GMII;
if (gbit)
break;
+ /* fall through */
case 0:
interface = PHY_INTERFACE_MODE_MII;
break;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 7357b4fc0185..8d531c5f21f3 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -828,11 +828,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
- priv->ports[port].vlan_filtering = false;
-
for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
- priv->ports[i].vlan_filtering) {
+ dsa_port_is_vlan_filtering(&ds->ports[i])) {
all_user_ports_removed = false;
break;
}
@@ -891,8 +889,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
* And the other port's port matrix cannot be broken when the
* other port is still a VLAN-aware port.
*/
- if (!priv->ports[i].vlan_filtering &&
- dsa_is_user_port(ds, i) && i != port) {
+ if (dsa_is_user_port(ds, i) && i != port &&
+ !dsa_port_is_vlan_filtering(&ds->ports[i])) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
@@ -910,8 +908,6 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
PCR_MATRIX(BIT(MT7530_CPU_PORT)));
priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
- mt7530_port_set_vlan_unaware(ds, port);
-
mutex_unlock(&priv->reg_mutex);
}
@@ -1013,10 +1009,6 @@ static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering)
{
- struct mt7530_priv *priv = ds->priv;
-
- priv->ports[port].vlan_filtering = vlan_filtering;
-
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1025,6 +1017,8 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
*/
mt7530_port_set_vlan_aware(ds, port);
mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+ } else {
+ mt7530_port_set_vlan_unaware(ds, port);
}
return 0;
@@ -1139,7 +1133,7 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!priv->ports[port].vlan_filtering)
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
return;
mutex_lock(&priv->reg_mutex);
@@ -1170,7 +1164,7 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!priv->ports[port].vlan_filtering)
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
return 0;
mutex_lock(&priv->reg_mutex);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index a95ed958df5b..1eec7bdc283a 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -410,7 +410,6 @@ struct mt7530_port {
bool enable;
u32 pm;
u16 pvid;
- bool vlan_filtering;
};
/* struct mt7530_priv - This is the main data structure for holding the state
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 0b3e51f248c2..2a2489b5196d 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips
* Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/delay.h>
@@ -18,40 +14,16 @@
#include <net/dsa.h>
#include "mv88e6060.h"
-static int reg_read(struct dsa_switch *ds, int addr, int reg)
+static int reg_read(struct mv88e6060_priv *priv, int addr, int reg)
{
- struct mv88e6060_priv *priv = ds->priv;
-
return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg);
}
-#define REG_READ(addr, reg) \
- ({ \
- int __ret; \
- \
- __ret = reg_read(ds, addr, reg); \
- if (__ret < 0) \
- return __ret; \
- __ret; \
- })
-
-
-static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+static int reg_write(struct mv88e6060_priv *priv, int addr, int reg, u16 val)
{
- struct mv88e6060_priv *priv = ds->priv;
-
return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val);
}
-#define REG_WRITE(addr, reg, val) \
- ({ \
- int __ret; \
- \
- __ret = reg_write(ds, addr, reg, val); \
- if (__ret < 0) \
- return __ret; \
- })
-
static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
{
int ret;
@@ -76,28 +48,7 @@ static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_TRAILER;
}
-static const char *mv88e6060_drv_probe(struct device *dsa_dev,
- struct device *host_dev, int sw_addr,
- void **_priv)
-{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
- struct mv88e6060_priv *priv;
- const char *name;
-
- name = mv88e6060_get_name(bus, sw_addr);
- if (name) {
- priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return NULL;
- *_priv = priv;
- priv->bus = bus;
- priv->sw_addr = sw_addr;
- }
-
- return name;
-}
-
-static int mv88e6060_switch_reset(struct dsa_switch *ds)
+static int mv88e6060_switch_reset(struct mv88e6060_priv *priv)
{
int i;
int ret;
@@ -105,23 +56,32 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
/* Set all ports to the disabled state. */
for (i = 0; i < MV88E6060_PORTS; i++) {
- ret = REG_READ(REG_PORT(i), PORT_CONTROL);
- REG_WRITE(REG_PORT(i), PORT_CONTROL,
- ret & ~PORT_CONTROL_STATE_MASK);
+ ret = reg_read(priv, REG_PORT(i), PORT_CONTROL);
+ if (ret < 0)
+ return ret;
+ ret = reg_write(priv, REG_PORT(i), PORT_CONTROL,
+ ret & ~PORT_CONTROL_STATE_MASK);
+ if (ret)
+ return ret;
}
/* Wait for transmit queues to drain. */
usleep_range(2000, 4000);
/* Reset the switch. */
- REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_SWRESET |
- GLOBAL_ATU_CONTROL_LEARNDIS);
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_SWRESET |
+ GLOBAL_ATU_CONTROL_LEARNDIS);
+ if (ret)
+ return ret;
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+ ret = reg_read(priv, REG_GLOBAL, GLOBAL_STATUS);
+ if (ret < 0)
+ return ret;
+
if (ret & GLOBAL_STATUS_INIT_READY)
break;
@@ -133,61 +93,69 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
return 0;
}
-static int mv88e6060_setup_global(struct dsa_switch *ds)
+static int mv88e6060_setup_global(struct mv88e6060_priv *priv)
{
+ int ret;
+
/* Disable discarding of frames with excessive collisions,
* set the maximum frame size to 1536 bytes, and mask all
* interrupt sources.
*/
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_CONTROL,
+ GLOBAL_CONTROL_MAX_FRAME_1536);
+ if (ret)
+ return ret;
/* Disable automatic address learning.
*/
- REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_LEARNDIS);
-
- return 0;
+ return reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_LEARNDIS);
}
-static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
+static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
{
int addr = REG_PORT(p);
+ int ret;
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
* port, enable Ingress and Egress Trailer tagging mode.
*/
- REG_WRITE(addr, PORT_CONTROL,
- dsa_is_cpu_port(ds, p) ?
+ ret = reg_write(priv, addr, PORT_CONTROL,
+ dsa_is_cpu_port(priv->ds, p) ?
PORT_CONTROL_TRAILER |
PORT_CONTROL_INGRESS_MODE |
PORT_CONTROL_STATE_FORWARDING :
PORT_CONTROL_STATE_FORWARDING);
+ if (ret)
+ return ret;
/* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the CPU port.
*/
- REG_WRITE(addr, PORT_VLAN_MAP,
- ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
- (dsa_is_cpu_port(ds, p) ? dsa_user_ports(ds) :
- BIT(dsa_to_port(ds, p)->cpu_dp->index)));
+ ret = reg_write(priv, addr, PORT_VLAN_MAP,
+ ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
+ (dsa_is_cpu_port(priv->ds, p) ?
+ dsa_user_ports(priv->ds) :
+ BIT(dsa_to_port(priv->ds, p)->cpu_dp->index)));
+ if (ret)
+ return ret;
/* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
*/
- REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p));
-
- return 0;
+ return reg_write(priv, addr, PORT_ASSOC_VECTOR, BIT(p));
}
-static int mv88e6060_setup_addr(struct dsa_switch *ds)
+static int mv88e6060_setup_addr(struct mv88e6060_priv *priv)
{
u8 addr[ETH_ALEN];
+ int ret;
u16 val;
eth_random_addr(addr);
@@ -199,34 +167,43 @@ static int mv88e6060_setup_addr(struct dsa_switch *ds)
*/
val &= 0xfeff;
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_01, val);
+ if (ret)
+ return ret;
+
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_23,
+ (addr[2] << 8) | addr[3]);
+ if (ret)
+ return ret;
- return 0;
+ return reg_write(priv, REG_GLOBAL, GLOBAL_MAC_45,
+ (addr[4] << 8) | addr[5]);
}
static int mv88e6060_setup(struct dsa_switch *ds)
{
+ struct mv88e6060_priv *priv = ds->priv;
int ret;
int i;
- ret = mv88e6060_switch_reset(ds);
+ priv->ds = ds;
+
+ ret = mv88e6060_switch_reset(priv);
if (ret < 0)
return ret;
/* @@@ initialise atu */
- ret = mv88e6060_setup_global(ds);
+ ret = mv88e6060_setup_global(priv);
if (ret < 0)
return ret;
- ret = mv88e6060_setup_addr(ds);
+ ret = mv88e6060_setup_addr(priv);
if (ret < 0)
return ret;
for (i = 0; i < MV88E6060_PORTS; i++) {
- ret = mv88e6060_setup_port(ds, i);
+ ret = mv88e6060_setup_port(priv, i);
if (ret < 0)
return ret;
}
@@ -243,51 +220,93 @@ static int mv88e6060_port_to_phy_addr(int port)
static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum)
{
+ struct mv88e6060_priv *priv = ds->priv;
int addr;
addr = mv88e6060_port_to_phy_addr(port);
if (addr == -1)
return 0xffff;
- return reg_read(ds, addr, regnum);
+ return reg_read(priv, addr, regnum);
}
static int
mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
{
+ struct mv88e6060_priv *priv = ds->priv;
int addr;
addr = mv88e6060_port_to_phy_addr(port);
if (addr == -1)
return 0xffff;
- return reg_write(ds, addr, regnum, val);
+ return reg_write(priv, addr, regnum, val);
}
static const struct dsa_switch_ops mv88e6060_switch_ops = {
.get_tag_protocol = mv88e6060_get_tag_protocol,
- .probe = mv88e6060_drv_probe,
.setup = mv88e6060_setup,
.phy_read = mv88e6060_phy_read,
.phy_write = mv88e6060_phy_write,
};
-static struct dsa_switch_driver mv88e6060_switch_drv = {
- .ops = &mv88e6060_switch_ops,
-};
-
-static int __init mv88e6060_init(void)
+static int mv88e6060_probe(struct mdio_device *mdiodev)
{
- register_switch_driver(&mv88e6060_switch_drv);
- return 0;
+ struct device *dev = &mdiodev->dev;
+ struct mv88e6060_priv *priv;
+ struct dsa_switch *ds;
+ const char *name;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+ priv->sw_addr = mdiodev->addr;
+
+ name = mv88e6060_get_name(priv->bus, priv->sw_addr);
+ if (!name)
+ return -ENODEV;
+
+ dev_info(dev, "switch %s detected\n", name);
+
+ ds = dsa_switch_alloc(dev, MV88E6060_PORTS);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->priv = priv;
+ ds->dev = dev;
+ ds->ops = &mv88e6060_switch_ops;
+
+ dev_set_drvdata(dev, ds);
+
+ return dsa_register_switch(ds);
}
-module_init(mv88e6060_init);
-static void __exit mv88e6060_cleanup(void)
+static void mv88e6060_remove(struct mdio_device *mdiodev)
{
- unregister_switch_driver(&mv88e6060_switch_drv);
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ dsa_unregister_switch(ds);
}
-module_exit(mv88e6060_cleanup);
+
+static const struct of_device_id mv88e6060_of_match[] = {
+ {
+ .compatible = "marvell,mv88e6060",
+ },
+ { /* sentinel */ },
+};
+
+static struct mdio_driver mv88e6060_driver = {
+ .probe = mv88e6060_probe,
+ .remove = mv88e6060_remove,
+ .mdiodrv.driver = {
+ .name = "mv88e6060",
+ .of_match_table = mv88e6060_of_match,
+ },
+};
+
+mdio_module_driver(mv88e6060_driver);
MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
index 10249bd16292..c0e7a0f2fb6a 100644
--- a/drivers/net/dsa/mv88e6060.h
+++ b/drivers/net/dsa/mv88e6060.h
@@ -117,6 +117,7 @@ struct mv88e6060_priv {
*/
struct mii_bus *bus;
int sw_addr;
+ struct dsa_switch *ds;
};
#endif
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 50de304abe2f..e85755dde90b 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -12,3 +12,4 @@ mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
mv88e6xxx-objs += serdes.o
+mv88e6xxx-objs += smi.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f4e2db44ad91..28414db979b0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -43,6 +43,7 @@
#include "port.h"
#include "ptp.h"
#include "serdes.h"
+#include "smi.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
{
@@ -52,149 +53,6 @@ static void assert_reg_lock(struct mv88e6xxx_chip *chip)
}
}
-/* The switch ADDR[4:1] configuration pins define the chip SMI device address
- * (ADDR[0] is always zero, thus only even SMI addresses can be strapped).
- *
- * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it
- * is the only device connected to the SMI master. In this mode it responds to
- * all 32 possible SMI addresses, and thus maps directly the internal devices.
- *
- * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
- * multiple devices to share the SMI interface. In this mode it responds to only
- * 2 registers, used to indirectly access the internal SMI devices.
- */
-
-static int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 *val)
-{
- if (!chip->smi_ops)
- return -EOPNOTSUPP;
-
- return chip->smi_ops->read(chip, addr, reg, val);
-}
-
-static int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 val)
-{
- if (!chip->smi_ops)
- return -EOPNOTSUPP;
-
- return chip->smi_ops->write(chip, addr, reg, val);
-}
-
-static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 *val)
-{
- int ret;
-
- ret = mdiobus_read_nested(chip->bus, addr, reg);
- if (ret < 0)
- return ret;
-
- *val = ret & 0xffff;
-
- return 0;
-}
-
-static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 val)
-{
- int ret;
-
- ret = mdiobus_write_nested(chip->bus, addr, reg, val);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_single_chip_ops = {
- .read = mv88e6xxx_smi_single_chip_read,
- .write = mv88e6xxx_smi_single_chip_write,
-};
-
-static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_chip *chip)
-{
- int ret;
- int i;
-
- for (i = 0; i < 16; i++) {
- ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_CMD);
- if (ret < 0)
- return ret;
-
- if ((ret & SMI_CMD_BUSY) == 0)
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
-static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 *val)
-{
- int ret;
-
- /* Wait for the bus to become free. */
- ret = mv88e6xxx_smi_multi_chip_wait(chip);
- if (ret < 0)
- return ret;
-
- /* Transmit the read command. */
- ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
- SMI_CMD_OP_22_READ | (addr << 5) | reg);
- if (ret < 0)
- return ret;
-
- /* Wait for the read command to complete. */
- ret = mv88e6xxx_smi_multi_chip_wait(chip);
- if (ret < 0)
- return ret;
-
- /* Read the data. */
- ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_DATA);
- if (ret < 0)
- return ret;
-
- *val = ret & 0xffff;
-
- return 0;
-}
-
-static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 val)
-{
- int ret;
-
- /* Wait for the bus to become free. */
- ret = mv88e6xxx_smi_multi_chip_wait(chip);
- if (ret < 0)
- return ret;
-
- /* Transmit the data to write. */
- ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_DATA, val);
- if (ret < 0)
- return ret;
-
- /* Transmit the write command. */
- ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
- SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
- if (ret < 0)
- return ret;
-
- /* Wait for the write command to complete. */
- ret = mv88e6xxx_smi_multi_chip_wait(chip);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_multi_chip_ops = {
- .read = mv88e6xxx_smi_multi_chip_read,
- .write = mv88e6xxx_smi_multi_chip_write,
-};
-
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val)
{
int err;
@@ -553,11 +411,28 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
int speed, int duplex, int pause,
phy_interface_t mode)
{
+ struct phylink_link_state state;
int err;
if (!chip->info->ops->port_set_link)
return 0;
+ if (!chip->info->ops->port_link_state)
+ return 0;
+
+ err = chip->info->ops->port_link_state(chip, port, &state);
+ if (err)
+ return err;
+
+ /* Has anything actually changed? We don't expect the
+ * interface mode to change without one of the other
+ * parameters also changing
+ */
+ if (state.link == link &&
+ state.speed == speed &&
+ state.duplex == duplex)
+ return 0;
+
/* Port's MAC control must not be changed unless the link is down */
err = chip->info->ops->port_set_link(chip, port, 0);
if (err)
@@ -2411,6 +2286,9 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED))
+ dev_err(chip->dev, "failed to disable port\n");
+
if (chip->info->ops->serdes_irq_free)
chip->info->ops->serdes_irq_free(chip, port);
@@ -2579,8 +2457,18 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
- if (dsa_is_unused_port(ds, i))
+ if (dsa_is_unused_port(ds, i)) {
+ err = mv88e6xxx_port_set_state(chip, i,
+ BR_STATE_DISABLED);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_serdes_power(chip, i, false);
+ if (err)
+ goto unlock;
+
continue;
+ }
err = mv88e6xxx_setup_port(chip, i);
if (err)
@@ -4615,30 +4503,6 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
return chip;
}
-static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus, int sw_addr)
-{
- if (sw_addr == 0)
- chip->smi_ops = &mv88e6xxx_smi_single_chip_ops;
- else if (chip->info->multi_chip)
- chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops;
- else
- return -EINVAL;
-
- chip->bus = bus;
- chip->sw_addr = sw_addr;
-
- return 0;
-}
-
-static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
-{
- int i;
-
- for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
- chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
-}
-
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
int port)
{
@@ -4647,58 +4511,6 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->info->tag_protocol;
}
-#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
-static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
- struct device *host_dev, int sw_addr,
- void **priv)
-{
- struct mv88e6xxx_chip *chip;
- struct mii_bus *bus;
- int err;
-
- bus = dsa_host_dev_to_mii_bus(host_dev);
- if (!bus)
- return NULL;
-
- chip = mv88e6xxx_alloc_chip(dsa_dev);
- if (!chip)
- return NULL;
-
- /* Legacy SMI probing will only support chips similar to 88E6085 */
- chip->info = &mv88e6xxx_table[MV88E6085];
-
- err = mv88e6xxx_smi_init(chip, bus, sw_addr);
- if (err)
- goto free;
-
- err = mv88e6xxx_detect(chip);
- if (err)
- goto free;
-
- mv88e6xxx_ports_cmode_init(chip);
-
- mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_switch_reset(chip);
- mutex_unlock(&chip->reg_lock);
- if (err)
- goto free;
-
- mv88e6xxx_phy_init(chip);
-
- err = mv88e6xxx_mdios_register(chip, NULL);
- if (err)
- goto free;
-
- *priv = chip;
-
- return chip->info->name;
-free:
- devm_kfree(dsa_dev, chip);
-
- return NULL;
-}
-#endif
-
static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -4753,9 +4565,6 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
}
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
-#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
- .probe = mv88e6xxx_drv_probe,
-#endif
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
.adjust_link = mv88e6xxx_adjust_link,
@@ -4801,10 +4610,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_ts_info = mv88e6xxx_get_ts_info,
};
-static struct dsa_switch_driver mv88e6xxx_switch_drv = {
- .ops = &mv88e6xxx_switch_ops,
-};
-
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
{
struct device *dev = chip->dev;
@@ -4915,7 +4720,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out;
- mv88e6xxx_ports_cmode_init(chip);
mv88e6xxx_phy_init(chip);
if (chip->info->ops->get_eeprom) {
@@ -4932,12 +4736,17 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out;
- chip->irq = of_irq_get(np, 0);
- if (chip->irq == -EPROBE_DEFER) {
- err = chip->irq;
- goto out;
+ if (np) {
+ chip->irq = of_irq_get(np, 0);
+ if (chip->irq == -EPROBE_DEFER) {
+ err = chip->irq;
+ goto out;
+ }
}
+ if (pdata)
+ chip->irq = pdata->irq;
+
/* Has to be performed before the MDIO bus is created, because
* the PHYs will link their interrupts to these interrupt
* controllers
@@ -5047,19 +4856,7 @@ static struct mdio_driver mv88e6xxx_driver = {
},
};
-static int __init mv88e6xxx_init(void)
-{
- register_switch_driver(&mv88e6xxx_switch_drv);
- return mdio_driver_register(&mv88e6xxx_driver);
-}
-module_init(mv88e6xxx_init);
-
-static void __exit mv88e6xxx_cleanup(void)
-{
- mdio_driver_unregister(&mv88e6xxx_driver);
- unregister_switch_driver(&mv88e6xxx_switch_drv);
-}
-module_exit(mv88e6xxx_cleanup);
+mdio_module_driver(mv88e6xxx_driver);
MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 19c07dff0440..faa3fa889f19 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -21,17 +21,6 @@
#include <linux/timecounter.h>
#include <net/dsa.h>
-#define SMI_CMD 0x00
-#define SMI_CMD_BUSY BIT(15)
-#define SMI_CMD_CLAUSE_22 BIT(12)
-#define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
-#define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
-#define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY)
-#define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY)
-#define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY)
-#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
-#define SMI_DATA 0x01
-
#define MV88E6XXX_N_FID 4096
/* PVT limits for 4-bit port and 5-bit switch */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dce84a2a65c7..c44b2822e4dd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
lane = mv88e6390x_serdes_get_lane(chip, port);
- if (lane < 0)
+ if (lane < 0 && lane != -ENODEV)
return lane;
- if (chip->ports[port].serdes_irq) {
- err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (lane >= 0) {
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390x_serdes_power(chip, port, false);
if (err)
return err;
}
- err = mv88e6390x_serdes_power(chip, port, false);
- if (err)
- return err;
+ chip->ports[port].cmode = 0;
if (cmode) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
+ chip->ports[port].cmode = cmode;
+
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return lane;
+
err = mv88e6390x_serdes_power(chip, port, true);
if (err)
return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
}
}
- chip->ports[port].cmode = cmode;
-
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index c7bed263a0f4..39c85e98fb92 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -52,7 +52,6 @@
#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
-#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
#define MV88E6XXX_PORT_MAC_CTL 0x01
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
new file mode 100644
index 000000000000..96f7d2685bdc
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -0,0 +1,158 @@
+/*
+ * Marvell 88E6xxx System Management Interface (SMI) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2019 Vivien Didelot <vivien.didelot@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "chip.h"
+#include "smi.h"
+
+/* The switch ADDR[4:1] configuration pins define the chip SMI device address
+ * (ADDR[0] is always zero, thus only even SMI addresses can be strapped).
+ *
+ * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it
+ * is the only device connected to the SMI master. In this mode it responds to
+ * all 32 possible SMI addresses, and thus maps directly the internal devices.
+ *
+ * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
+ * multiple devices to share the SMI interface. In this mode it responds to only
+ * 2 registers, used to indirectly access the internal SMI devices.
+ */
+
+static int mv88e6xxx_smi_direct_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ int ret;
+
+ ret = mdiobus_read_nested(chip->bus, dev, reg);
+ if (ret < 0)
+ return ret;
+
+ *data = ret & 0xffff;
+
+ return 0;
+}
+
+static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ int ret;
+
+ ret = mdiobus_write_nested(chip->bus, dev, reg, data);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
+ int dev, int reg, int bit, int val)
+{
+ u16 data;
+ int err;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data);
+ if (err)
+ return err;
+
+ if (!!(data >> bit) == !!val)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_direct_ops = {
+ .read = mv88e6xxx_smi_direct_read,
+ .write = mv88e6xxx_smi_direct_write,
+};
+
+/* Offset 0x00: SMI Command Register
+ * Offset 0x01: SMI Data Register
+ */
+
+static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ int err;
+
+ err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD,
+ MV88E6XXX_SMI_CMD_BUSY |
+ MV88E6XXX_SMI_CMD_MODE_22 |
+ MV88E6XXX_SMI_CMD_OP_22_READ |
+ (dev << 5) | reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+ if (err)
+ return err;
+
+ return mv88e6xxx_smi_direct_read(chip, chip->sw_addr,
+ MV88E6XXX_SMI_DATA, data);
+}
+
+static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ int err;
+
+ err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
+ MV88E6XXX_SMI_DATA, data);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD,
+ MV88E6XXX_SMI_CMD_BUSY |
+ MV88E6XXX_SMI_CMD_MODE_22 |
+ MV88E6XXX_SMI_CMD_OP_22_WRITE |
+ (dev << 5) | reg);
+ if (err)
+ return err;
+
+ return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+}
+
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
+ .read = mv88e6xxx_smi_indirect_read,
+ .write = mv88e6xxx_smi_indirect_write,
+};
+
+int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int sw_addr)
+{
+ if (sw_addr == 0)
+ chip->smi_ops = &mv88e6xxx_smi_direct_ops;
+ else if (chip->info->multi_chip)
+ chip->smi_ops = &mv88e6xxx_smi_indirect_ops;
+ else
+ return -EINVAL;
+
+ chip->bus = bus;
+ chip->sw_addr = sw_addr;
+
+ return 0;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/smi.h b/drivers/net/dsa/mv88e6xxx/smi.h
new file mode 100644
index 000000000000..35e6403b65dc
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/smi.h
@@ -0,0 +1,59 @@
+/*
+ * Marvell 88E6xxx System Management Interface (SMI) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2019 Vivien Didelot <vivien.didelot@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_SMI_H
+#define _MV88E6XXX_SMI_H
+
+#include "chip.h"
+
+/* Offset 0x00: SMI Command Register */
+#define MV88E6XXX_SMI_CMD 0x00
+#define MV88E6XXX_SMI_CMD_BUSY 0x8000
+#define MV88E6XXX_SMI_CMD_MODE_MASK 0x1000
+#define MV88E6XXX_SMI_CMD_MODE_45 0x0000
+#define MV88E6XXX_SMI_CMD_MODE_22 0x1000
+#define MV88E6XXX_SMI_CMD_OP_MASK 0x0c00
+#define MV88E6XXX_SMI_CMD_OP_22_WRITE 0x0400
+#define MV88E6XXX_SMI_CMD_OP_22_READ 0x0800
+#define MV88E6XXX_SMI_CMD_OP_45_WRITE_ADDR 0x0000
+#define MV88E6XXX_SMI_CMD_OP_45_WRITE_DATA 0x0400
+#define MV88E6XXX_SMI_CMD_OP_45_READ_DATA 0x0800
+#define MV88E6XXX_SMI_CMD_OP_45_READ_DATA_INC 0x0c00
+#define MV88E6XXX_SMI_CMD_DEV_ADDR_MASK 0x003e
+#define MV88E6XXX_SMI_CMD_REG_ADDR_MASK 0x001f
+
+/* Offset 0x01: SMI Data Register */
+#define MV88E6XXX_SMI_DATA 0x01
+
+int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int sw_addr);
+
+static inline int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ if (chip->smi_ops && chip->smi_ops->read)
+ return chip->smi_ops->read(chip, dev, reg, data);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ if (chip->smi_ops && chip->smi_ops->write)
+ return chip->smi_ops->write(chip, dev, reg, data);
+
+ return -EOPNOTSUPP;
+}
+
+#endif /* _MV88E6XXX_SMI_H */
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
new file mode 100644
index 000000000000..757751a89819
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -0,0 +1,17 @@
+config NET_DSA_SJA1105
+tristate "NXP SJA1105 Ethernet switch family support"
+ depends on NET_DSA && SPI
+ select NET_DSA_TAG_SJA1105
+ select PACKING
+ select CRC32
+ help
+ This is the driver for the NXP SJA1105 automotive Ethernet switch
+ family. These are 5-port devices and are managed over an SPI
+ interface. Probing is handled based on OF bindings and so is the
+ linkage to phylib. The driver supports the following revisions:
+ - SJA1105E (Gen. 1, No TT-Ethernet)
+ - SJA1105T (Gen. 1, TT-Ethernet)
+ - SJA1105P (Gen. 2, No SGMII, No TT-Ethernet)
+ - SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
+ - SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
+ - SJA1105S (Gen. 2, SGMII, TT-Ethernet)
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
new file mode 100644
index 000000000000..1c2b55fec959
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
+
+sja1105-objs := \
+ sja1105_spi.o \
+ sja1105_main.o \
+ sja1105_ethtool.o \
+ sja1105_clocking.o \
+ sja1105_static_config.o \
+ sja1105_dynamic_config.o \
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
new file mode 100644
index 000000000000..b043bfc408f2
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_H
+#define _SJA1105_H
+
+#include <linux/dsa/sja1105.h>
+#include <net/dsa.h>
+#include <linux/mutex.h>
+#include "sja1105_static_config.h"
+
+#define SJA1105_NUM_PORTS 5
+#define SJA1105_NUM_TC 8
+#define SJA1105ET_FDB_BIN_SIZE 4
+/* The hardware value is in multiples of 10 ms.
+ * The passed parameter is in multiples of 1 ms.
+ */
+#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+
+/* Keeps the different addresses between E/T and P/Q/R/S */
+struct sja1105_regs {
+ u64 device_id;
+ u64 prod_id;
+ u64 status;
+ u64 port_control;
+ u64 rgu;
+ u64 config;
+ u64 rmii_pll1;
+ u64 pad_mii_tx[SJA1105_NUM_PORTS];
+ u64 cgu_idiv[SJA1105_NUM_PORTS];
+ u64 rgmii_pad_mii_tx[SJA1105_NUM_PORTS];
+ u64 mii_tx_clk[SJA1105_NUM_PORTS];
+ u64 mii_rx_clk[SJA1105_NUM_PORTS];
+ u64 mii_ext_tx_clk[SJA1105_NUM_PORTS];
+ u64 mii_ext_rx_clk[SJA1105_NUM_PORTS];
+ u64 rgmii_tx_clk[SJA1105_NUM_PORTS];
+ u64 rmii_ref_clk[SJA1105_NUM_PORTS];
+ u64 rmii_ext_tx_clk[SJA1105_NUM_PORTS];
+ u64 mac[SJA1105_NUM_PORTS];
+ u64 mac_hl1[SJA1105_NUM_PORTS];
+ u64 mac_hl2[SJA1105_NUM_PORTS];
+ u64 qlevel[SJA1105_NUM_PORTS];
+};
+
+struct sja1105_info {
+ u64 device_id;
+ /* Needed for distinction between P and R, and between Q and S
+ * (since the parts with/without SGMII share the same
+ * switch core and device_id)
+ */
+ u64 part_no;
+ const struct sja1105_dynamic_table_ops *dyn_ops;
+ const struct sja1105_table_ops *static_ops;
+ const struct sja1105_regs *regs;
+ int (*reset_cmd)(const void *ctx, const void *data);
+ int (*setup_rgmii_delay)(const void *ctx, int port);
+ const char *name;
+};
+
+struct sja1105_private {
+ struct sja1105_static_config static_config;
+ bool rgmii_rx_delay[SJA1105_NUM_PORTS];
+ bool rgmii_tx_delay[SJA1105_NUM_PORTS];
+ const struct sja1105_info *info;
+ struct gpio_desc *reset_gpio;
+ struct spi_device *spidev;
+ struct dsa_switch *ds;
+ struct sja1105_port ports[SJA1105_NUM_PORTS];
+ /* Serializes transmission of management frames so that
+ * the switch doesn't confuse them with one another.
+ */
+ struct mutex mgmt_lock;
+};
+
+#include "sja1105_dynamic_config.h"
+
+struct sja1105_spi_message {
+ u64 access;
+ u64 read_count;
+ u64 address;
+};
+
+typedef enum {
+ SPI_READ = 0,
+ SPI_WRITE = 1,
+} sja1105_spi_rw_mode_t;
+
+/* From sja1105_spi.c */
+int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ void *packed_buf, size_t size_bytes);
+int sja1105_spi_send_int(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u64 *value, u64 size_bytes);
+int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 base_addr,
+ void *packed_buf, u64 buf_len);
+int sja1105_static_config_upload(struct sja1105_private *priv);
+
+extern struct sja1105_info sja1105e_info;
+extern struct sja1105_info sja1105t_info;
+extern struct sja1105_info sja1105p_info;
+extern struct sja1105_info sja1105q_info;
+extern struct sja1105_info sja1105r_info;
+extern struct sja1105_info sja1105s_info;
+
+/* From sja1105_clocking.c */
+
+typedef enum {
+ XMII_MAC = 0,
+ XMII_PHY = 1,
+} sja1105_mii_role_t;
+
+typedef enum {
+ XMII_MODE_MII = 0,
+ XMII_MODE_RMII = 1,
+ XMII_MODE_RGMII = 2,
+} sja1105_phy_interface_t;
+
+typedef enum {
+ SJA1105_SPEED_10MBPS = 3,
+ SJA1105_SPEED_100MBPS = 2,
+ SJA1105_SPEED_1000MBPS = 1,
+ SJA1105_SPEED_AUTO = 0,
+} sja1105_speed_t;
+
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port);
+int sja1105_clocking_setup(struct sja1105_private *priv);
+
+/* From sja1105_ethtool.c */
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data);
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data);
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* From sja1105_dynamic_config.c */
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry);
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep);
+
+u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
+
+/* Common implementations for the static and dynamic configs */
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
new file mode 100644
index 000000000000..94bfe0ee50a8
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/packing.h>
+#include "sja1105.h"
+
+#define SJA1105_SIZE_CGU_CMD 4
+
+struct sja1105_cfg_pad_mii_tx {
+ u64 d32_os;
+ u64 d32_ipud;
+ u64 d10_os;
+ u64 d10_ipud;
+ u64 ctrl_os;
+ u64 ctrl_ipud;
+ u64 clk_os;
+ u64 clk_ih;
+ u64 clk_ipud;
+};
+
+/* UM10944 Table 82.
+ * IDIV_0_C to IDIV_4_C control registers
+ * (addr. 10000Bh to 10000Fh)
+ */
+struct sja1105_cgu_idiv {
+ u64 clksrc;
+ u64 autoblock;
+ u64 idiv;
+ u64 pd;
+};
+
+/* PLL_1_C control register
+ *
+ * SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
+ * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
+ */
+struct sja1105_cgu_pll_ctrl {
+ u64 pllclksrc;
+ u64 msel;
+ u64 autoblock;
+ u64 psel;
+ u64 direct;
+ u64 fbsel;
+ u64 bypass;
+ u64 pd;
+};
+
+enum {
+ CLKSRC_MII0_TX_CLK = 0x00,
+ CLKSRC_MII0_RX_CLK = 0x01,
+ CLKSRC_MII1_TX_CLK = 0x02,
+ CLKSRC_MII1_RX_CLK = 0x03,
+ CLKSRC_MII2_TX_CLK = 0x04,
+ CLKSRC_MII2_RX_CLK = 0x05,
+ CLKSRC_MII3_TX_CLK = 0x06,
+ CLKSRC_MII3_RX_CLK = 0x07,
+ CLKSRC_MII4_TX_CLK = 0x08,
+ CLKSRC_MII4_RX_CLK = 0x09,
+ CLKSRC_PLL0 = 0x0B,
+ CLKSRC_PLL1 = 0x0E,
+ CLKSRC_IDIV0 = 0x11,
+ CLKSRC_IDIV1 = 0x12,
+ CLKSRC_IDIV2 = 0x13,
+ CLKSRC_IDIV3 = 0x14,
+ CLKSRC_IDIV4 = 0x15,
+};
+
+/* UM10944 Table 83.
+ * MIIx clock control registers 1 to 30
+ * (addresses 100013h to 100035h)
+ */
+struct sja1105_cgu_mii_ctrl {
+ u64 clksrc;
+ u64 autoblock;
+ u64 pd;
+};
+
+static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &idiv->idiv, 5, 2, size, op);
+ sja1105_packing(buf, &idiv->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
+ bool enabled, int factor)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = priv->ds->dev;
+ struct sja1105_cgu_idiv idiv;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (enabled && factor != 1 && factor != 10) {
+ dev_err(dev, "idiv factor must be 1 or 10\n");
+ return -ERANGE;
+ }
+
+ /* Payload for packed_buf */
+ idiv.clksrc = 0x0A; /* 25MHz */
+ idiv.autoblock = 1; /* Block clk automatically */
+ idiv.idiv = factor - 1; /* Divide by 1 or 10 */
+ idiv.pd = enabled ? 0 : 1; /* Power down? */
+ sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->cgu_idiv[port], packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+}
+
+static void
+sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
+ int port, sja1105_mii_role_t role)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_tx_clk;
+ const int mac_clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+ const int phy_clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (role == XMII_MAC)
+ clksrc = mac_clk_sources[port];
+ else
+ clksrc = phy_clk_sources[port];
+
+ /* Payload for packed_buf */
+ mii_tx_clk.clksrc = clksrc;
+ mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->mii_tx_clk[port], packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_RX_CLK,
+ CLKSRC_MII1_RX_CLK,
+ CLKSRC_MII2_RX_CLK,
+ CLKSRC_MII3_RX_CLK,
+ CLKSRC_MII4_RX_CLK,
+ };
+
+ /* Payload for packed_buf */
+ mii_rx_clk.clksrc = clk_sources[port];
+ mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->mii_rx_clk[port], packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ /* Payload for packed_buf */
+ mii_ext_tx_clk.clksrc = clk_sources[port];
+ mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->mii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ /* Payload for packed_buf */
+ mii_ext_rx_clk.clksrc = clk_sources[port];
+ mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->mii_ext_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring MII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* If role is MAC, disable IDIV
+ * If role is PHY, enable IDIV and configure for 1/1 divider
+ */
+ rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_TX_CLK_n
+ * * If role is MAC, select TX_CLK_n
+ * * If role is PHY, select IDIV_n
+ */
+ rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_RX_CLK_n
+ * Select RX_CLK_n
+ */
+ rc = sja1105_cgu_mii_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ if (role == XMII_PHY) {
+ /* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
+
+ /* Configure CLKSRC of EXT_TX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of EXT_RX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static void
+sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->msel, 23, 16, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->psel, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->direct, 7, 7, size, op);
+ sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op);
+ sja1105_packing(buf, &cmd->bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
+ int port, sja1105_speed_t speed)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl txc;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (speed == SJA1105_SPEED_1000MBPS) {
+ clksrc = CLKSRC_PLL0;
+ } else {
+ int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
+ CLKSRC_IDIV3, CLKSRC_IDIV4};
+ clksrc = clk_sources[port];
+ }
+
+ /* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
+ txc.clksrc = clksrc;
+ /* Autoblock clk while changing clksrc */
+ txc.autoblock = 1;
+ /* Power Down off => enabled */
+ txc.pd = 0;
+ sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->rgmii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+/* AGU */
+static void
+sja1105_cfg_pad_mii_tx_packing(void *buf, struct sja1105_cfg_pad_mii_tx *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
+ sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
+ sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
+ sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
+ sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
+ sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op);
+}
+
+static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_tx pad_mii_tx;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ /* Payload */
+ pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */
+ pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
+ pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
+ pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
+ pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
+ sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->rgmii_pad_mii_tx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port)
+{
+ struct device *dev = priv->ds->dev;
+ struct sja1105_mac_config_entry *mac;
+ sja1105_speed_t speed;
+ int rc;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+ speed = mac[port].speed;
+
+ dev_dbg(dev, "Configuring port %d RGMII at speed %dMbps\n",
+ port, speed);
+
+ switch (speed) {
+ case SJA1105_SPEED_1000MBPS:
+ /* 1000Mbps, IDIV disabled (125 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ break;
+ case SJA1105_SPEED_100MBPS:
+ /* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 1);
+ break;
+ case SJA1105_SPEED_10MBPS:
+ /* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 10);
+ break;
+ case SJA1105_SPEED_AUTO:
+ /* Skip CGU configuration if there is no speed available
+ * (e.g. link is not established yet)
+ */
+ dev_dbg(dev, "Speed not available, skipping CGU config\n");
+ return 0;
+ default:
+ rc = -EINVAL;
+ }
+
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure idiv\n");
+ return rc;
+ }
+ rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure RGMII Tx clock\n");
+ return rc;
+ }
+ rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure Tx pad registers\n");
+ return rc;
+ }
+ if (!priv->info->setup_rgmii_delay)
+ return 0;
+
+ return priv->info->setup_rgmii_delay(priv, port);
+}
+
+static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ref_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+
+ /* Payload for packed_buf */
+ ref_clk.clksrc = clk_sources[port];
+ ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ref_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->rmii_ref_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ /* Payload for packed_buf */
+ ext_tx_clk.clksrc = CLKSRC_PLL1;
+ ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->rmii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ struct sja1105_cgu_pll_ctrl pll = {0};
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ /* PLL1 must be enabled and output 50 Mhz.
+ * This is done by writing first 0x0A010941 to
+ * the PLL_1_C register and then deasserting
+ * power down (PD) 0x0A010940.
+ */
+
+ /* Step 1: PLL1 setup for 50Mhz */
+ pll.pllclksrc = 0xA;
+ pll.msel = 0x1;
+ pll.autoblock = 0x1;
+ pll.psel = 0x1;
+ pll.direct = 0x0;
+ pll.fbsel = 0x1;
+ pll.bypass = 0x0;
+ pll.pd = 0x1;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to configure PLL1 for 50MHz\n");
+ return rc;
+ }
+
+ /* Step 2: Enable PLL1 */
+ pll.pd = 0x0;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to enable PLL1\n");
+ return rc;
+ }
+ return rc;
+}
+
+static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring RMII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* AH1601.pdf chapter 2.5.1. Sources */
+ if (role == XMII_MAC) {
+ /* Configure and enable PLL1 for 50Mhz output */
+ rc = sja1105_cgu_rmii_pll_config(priv);
+ if (rc < 0)
+ return rc;
+ }
+ /* Disable IDIV for this port */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ if (rc < 0)
+ return rc;
+ /* Source to sink mappings */
+ rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ if (role == XMII_MAC) {
+ rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
+{
+ struct sja1105_xmii_params_entry *mii;
+ struct device *dev = priv->ds->dev;
+ sja1105_phy_interface_t phy_mode;
+ sja1105_mii_role_t role;
+ int rc;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+
+ /* RGMII etc */
+ phy_mode = mii->xmii_mode[port];
+ /* MAC or PHY, for applicable types (not RGMII) */
+ role = mii->phy_mac[port];
+
+ switch (phy_mode) {
+ case XMII_MODE_MII:
+ rc = sja1105_mii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RMII:
+ rc = sja1105_rmii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RGMII:
+ rc = sja1105_rgmii_clocking_setup(priv, port);
+ break;
+ default:
+ dev_err(dev, "Invalid interface mode specified: %d\n",
+ phy_mode);
+ return -EINVAL;
+ }
+ if (rc)
+ dev_err(dev, "Clocking setup for port %d failed: %d\n",
+ port, rc);
+ return rc;
+}
+
+int sja1105_clocking_setup(struct sja1105_private *priv)
+{
+ int port, rc;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ rc = sja1105_clocking_setup_port(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
new file mode 100644
index 000000000000..e73ab28bf632
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+#define SJA1105_SIZE_DYN_CMD 4
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + 4 + SJA1105_SIZE_VLAN_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_L2_FORWARDING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_FORWARDING_ENTRY)
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY)
+
+#define SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY)
+
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105_MAX_DYN_CMD_SIZE \
+ SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD
+
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - The hardware takes the 'index' field within
+ * struct sja1105_l2_lookup_entry as the index on which this command
+ * will operate. However it will ignore everything else, so 'index'
+ * is logically part of command but physically part of entry.
+ * Populate the 'index' entry field from within the command callback,
+ * such that our API doesn't need to ask for a full-blown entry
+ * structure when e.g. a delete is requested.
+ */
+ sja1105_packing(buf, &cmd->index, 29, 20,
+ SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
+ /* TODO hostcmd */
+}
+
+static void
+sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above. */
+ sja1105_packing(buf, &cmd->index, 29, 20,
+ SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
+}
+
+static void
+sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105et_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105et_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+
+ /* UM10944: To specify if a PTP egress timestamp shall be captured on
+ * each port upon transmission of the frame, the LSB of VLANID in the
+ * ENTRY field provided by the host must be set.
+ * Bit 1 of VLANID then specifies the register where the timestamp for
+ * this port is stored in.
+ */
+ sja1105_packing(buf, &entry->tsreg, 85, 85, size, op);
+ sja1105_packing(buf, &entry->takets, 84, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ return size;
+}
+
+/* In E/T, entry is at addresses 0x27-0x28. There is a 4 byte gap at 0x29,
+ * and command is at 0x2a. Similarly in P/Q/R/S there is a 1 register gap
+ * between entry (0x2d, 0x2e) and command (0x30).
+ */
+static void
+sja1105_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VLAN_LOOKUP_ENTRY + 4;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above, applied for 'vlanid' field of
+ * struct sja1105_vlan_lookup_entry.
+ */
+ sja1105_packing(buf, &cmd->index, 38, 27,
+ SJA1105_SIZE_VLAN_LOOKUP_ENTRY, op);
+}
+
+static void
+sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void
+sja1105et_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+
+ sja1105_packing(reg1, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(reg1, &cmd->index, 26, 24, size, op);
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+ u8 *reg2 = buf;
+
+ sja1105_packing(reg1, &entry->speed, 30, 29, size, op);
+ sja1105_packing(reg1, &entry->drpdtag, 23, 23, size, op);
+ sja1105_packing(reg1, &entry->drpuntag, 22, 22, size, op);
+ sja1105_packing(reg1, &entry->retag, 21, 21, size, op);
+ sja1105_packing(reg1, &entry->dyn_learn, 20, 20, size, op);
+ sja1105_packing(reg1, &entry->egress, 19, 19, size, op);
+ sja1105_packing(reg1, &entry->ingress, 18, 18, size, op);
+ sja1105_packing(reg1, &entry->ing_mirr, 17, 17, size, op);
+ sja1105_packing(reg1, &entry->egr_mirr, 16, 16, size, op);
+ sja1105_packing(reg1, &entry->vlanprio, 14, 12, size, op);
+ sja1105_packing(reg1, &entry->vlanid, 11, 0, size, op);
+ sja1105_packing(reg2, &entry->tp_delin, 31, 16, size, op);
+ sja1105_packing(reg2, &entry->tp_delout, 15, 0, size, op);
+ /* MAC configuration table entries which can't be reconfigured:
+ * top, base, enabled, ifg, maxage, drpnona664
+ */
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 2, 0, size, op);
+}
+
+static void
+sja1105et_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ sja1105_packing(buf, &cmd->valid, 31, 31,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->poly, 7, 0,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+}
+
+static size_t
+sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_general_params_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &entry->mirr_port, 2, 0, size, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+#define OP_READ BIT(0)
+#define OP_WRITE BIT(1)
+#define OP_DEL BIT(2)
+
+/* SJA1105E/T: First generation */
+struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105et_l2_lookup_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105et_mgmt_route_entry_packing,
+ .cmd_packing = sja1105et_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_L2_POLICING] = {0},
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x27,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105et_mac_config_entry_packing,
+ .cmd_packing = sja1105et_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x36,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105et_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {0},
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105et_general_params_entry_packing,
+ .cmd_packing = sja1105et_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x34,
+ },
+ [BLK_IDX_XMII_PARAMS] = {0},
+};
+
+/* SJA1105P/Q/R/S: Second generation: TODO */
+struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105pqrs_l2_lookup_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_L2_POLICING] = {0},
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x2D,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x2A,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105pqrs_mac_config_entry_packing,
+ .cmd_packing = sja1105pqrs_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x4B,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105et_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {0},
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105et_general_params_entry_packing,
+ .cmd_packing = sja1105et_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x34,
+ },
+ [BLK_IDX_XMII_PARAMS] = {0},
+};
+
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int retries = 3;
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= ops->max_entry_count)
+ return -ERANGE;
+ if (!(ops->access & OP_READ))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_READ; /* Action is read */
+ cmd.index = index;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
+ packed_buf, ops->packed_size);
+ if (rc < 0)
+ return rc;
+
+ /* Loop until we have confirmation that hardware has finished
+ * processing the command and has cleared the VALID field
+ */
+ do {
+ memset(packed_buf, 0, ops->packed_size);
+
+ /* Retrieve the read operation's result */
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, ops->addr,
+ packed_buf, ops->packed_size);
+ if (rc < 0)
+ return rc;
+
+ cmd = (struct sja1105_dyn_cmd) {0};
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
+ /* UM10944: [valident] will always be found cleared
+ * during a read access with MGMTROUTE set.
+ * So don't error out in that case.
+ */
+ if (!cmd.valident && blk_idx != BLK_IDX_MGMT_ROUTE)
+ return -EINVAL;
+ cpu_relax();
+ } while (cmd.valid && --retries);
+
+ if (cmd.valid)
+ return -ETIMEDOUT;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+ return 0;
+}
+
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= ops->max_entry_count)
+ return -ERANGE;
+ if (!(ops->access & OP_WRITE))
+ return -EOPNOTSUPP;
+ if (!keep && !(ops->access & OP_DEL))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+
+ cmd.valident = keep; /* If false, deletes entry */
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_WRITE; /* Action is write */
+ cmd.index = index;
+
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+ /* Don't dereference potentially NULL pointer if just
+ * deleting a table entry is what was requested. For cases
+ * where 'index' field is physically part of entry structure,
+ * and needed here, we deal with that in the cmd_packing callback.
+ */
+ if (keep)
+ ops->entry_packing(packed_buf, entry, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
+ packed_buf, ops->packed_size);
+ if (rc < 0)
+ return rc;
+
+ cmd = (struct sja1105_dyn_cmd) {0};
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
+ if (cmd.errors)
+ return -EINVAL;
+
+ return 0;
+}
+
+static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if ((crc ^ byte) & (1 << 7)) {
+ crc <<= 1;
+ crc ^= poly;
+ } else {
+ crc <<= 1;
+ }
+ byte <<= 1;
+ }
+ return crc;
+}
+
+/* CRC8 algorithm with non-reversed input, non-reversed output,
+ * no input xor and no output xor. Code customized for receiving
+ * the SJA1105 E/T FDB keys (vlanid, macaddr) as input. CRC polynomial
+ * is also received as argument in the Koopman notation that the switch
+ * hardware stores it in.
+ */
+u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params =
+ priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
+ u64 poly_koopman = l2_lookup_params->poly;
+ /* Convert polynomial from Koopman to 'normal' notation */
+ u8 poly = (u8)(1 + (poly_koopman << 1));
+ u64 vlanid = l2_lookup_params->shared_learn ? 0 : vid;
+ u64 input = (vlanid << 48) | ether_addr_to_u64(addr);
+ u8 crc = 0; /* seed */
+ int i;
+
+ /* Mask the eight bytes starting from MSB one at a time */
+ for (i = 56; i >= 0; i -= 8) {
+ u8 byte = (input & (0xffull << i)) >> i;
+
+ crc = sja1105_crc8_add(crc, byte, poly);
+ }
+ return crc;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
new file mode 100644
index 000000000000..77be59546a55
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_DYNAMIC_CONFIG_H
+#define _SJA1105_DYNAMIC_CONFIG_H
+
+#include "sja1105.h"
+#include <linux/packing.h>
+
+struct sja1105_dyn_cmd {
+ u64 valid;
+ u64 rdwrset;
+ u64 errors;
+ u64 valident;
+ u64 index;
+};
+
+struct sja1105_dynamic_table_ops {
+ /* This returns size_t just to keep same prototype as the
+ * static config ops, of which we are reusing some functions.
+ */
+ size_t (*entry_packing)(void *buf, void *entry_ptr, enum packing_op op);
+ void (*cmd_packing)(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op);
+ size_t max_entry_count;
+ size_t packed_size;
+ u64 addr;
+ u8 access;
+};
+
+struct sja1105_mgmt_entry {
+ u64 tsreg;
+ u64 takets;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+};
+
+extern struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN];
+extern struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN];
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
new file mode 100644
index 000000000000..ab581a28cd41
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+#define SJA1105_SIZE_MAC_AREA (0x02 * 4)
+#define SJA1105_SIZE_HL1_AREA (0x10 * 4)
+#define SJA1105_SIZE_HL2_AREA (0x4 * 4)
+#define SJA1105_SIZE_QLEVEL_AREA (0x8 * 4) /* 0x4 to 0xB */
+
+struct sja1105_port_status_mac {
+ u64 n_runt;
+ u64 n_soferr;
+ u64 n_alignerr;
+ u64 n_miierr;
+ u64 typeerr;
+ u64 sizeerr;
+ u64 tctimeout;
+ u64 priorerr;
+ u64 nomaster;
+ u64 memov;
+ u64 memerr;
+ u64 invtyp;
+ u64 intcyov;
+ u64 domerr;
+ u64 pcfbagdrop;
+ u64 spcprior;
+ u64 ageprior;
+ u64 portdrop;
+ u64 lendrop;
+ u64 bagdrop;
+ u64 policeerr;
+ u64 drpnona664err;
+ u64 spcerr;
+ u64 agedrp;
+};
+
+struct sja1105_port_status_hl1 {
+ u64 n_n664err;
+ u64 n_vlanerr;
+ u64 n_unreleased;
+ u64 n_sizeerr;
+ u64 n_crcerr;
+ u64 n_vlnotfound;
+ u64 n_ctpolerr;
+ u64 n_polerr;
+ u64 n_rxfrmsh;
+ u64 n_rxfrm;
+ u64 n_rxbytesh;
+ u64 n_rxbyte;
+ u64 n_txfrmsh;
+ u64 n_txfrm;
+ u64 n_txbytesh;
+ u64 n_txbyte;
+};
+
+struct sja1105_port_status_hl2 {
+ u64 n_qfull;
+ u64 n_part_drop;
+ u64 n_egr_disabled;
+ u64 n_not_reach;
+ u64 qlevel_hwm[8]; /* Only for P/Q/R/S */
+ u64 qlevel[8]; /* Only for P/Q/R/S */
+};
+
+struct sja1105_port_status {
+ struct sja1105_port_status_mac mac;
+ struct sja1105_port_status_hl1 hl1;
+ struct sja1105_port_status_hl2 hl2;
+};
+
+static void
+sja1105_port_status_mac_unpack(void *buf,
+ struct sja1105_port_status_mac *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0x0, &status->n_runt, 31, 24, 4);
+ sja1105_unpack(p + 0x0, &status->n_soferr, 23, 16, 4);
+ sja1105_unpack(p + 0x0, &status->n_alignerr, 15, 8, 4);
+ sja1105_unpack(p + 0x0, &status->n_miierr, 7, 0, 4);
+ sja1105_unpack(p + 0x1, &status->typeerr, 27, 27, 4);
+ sja1105_unpack(p + 0x1, &status->sizeerr, 26, 26, 4);
+ sja1105_unpack(p + 0x1, &status->tctimeout, 25, 25, 4);
+ sja1105_unpack(p + 0x1, &status->priorerr, 24, 24, 4);
+ sja1105_unpack(p + 0x1, &status->nomaster, 23, 23, 4);
+ sja1105_unpack(p + 0x1, &status->memov, 22, 22, 4);
+ sja1105_unpack(p + 0x1, &status->memerr, 21, 21, 4);
+ sja1105_unpack(p + 0x1, &status->invtyp, 19, 19, 4);
+ sja1105_unpack(p + 0x1, &status->intcyov, 18, 18, 4);
+ sja1105_unpack(p + 0x1, &status->domerr, 17, 17, 4);
+ sja1105_unpack(p + 0x1, &status->pcfbagdrop, 16, 16, 4);
+ sja1105_unpack(p + 0x1, &status->spcprior, 15, 12, 4);
+ sja1105_unpack(p + 0x1, &status->ageprior, 11, 8, 4);
+ sja1105_unpack(p + 0x1, &status->portdrop, 6, 6, 4);
+ sja1105_unpack(p + 0x1, &status->lendrop, 5, 5, 4);
+ sja1105_unpack(p + 0x1, &status->bagdrop, 4, 4, 4);
+ sja1105_unpack(p + 0x1, &status->policeerr, 3, 3, 4);
+ sja1105_unpack(p + 0x1, &status->drpnona664err, 2, 2, 4);
+ sja1105_unpack(p + 0x1, &status->spcerr, 1, 1, 4);
+ sja1105_unpack(p + 0x1, &status->agedrp, 0, 0, 4);
+}
+
+static void
+sja1105_port_status_hl1_unpack(void *buf,
+ struct sja1105_port_status_hl1 *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0xF, &status->n_n664err, 31, 0, 4);
+ sja1105_unpack(p + 0xE, &status->n_vlanerr, 31, 0, 4);
+ sja1105_unpack(p + 0xD, &status->n_unreleased, 31, 0, 4);
+ sja1105_unpack(p + 0xC, &status->n_sizeerr, 31, 0, 4);
+ sja1105_unpack(p + 0xB, &status->n_crcerr, 31, 0, 4);
+ sja1105_unpack(p + 0xA, &status->n_vlnotfound, 31, 0, 4);
+ sja1105_unpack(p + 0x9, &status->n_ctpolerr, 31, 0, 4);
+ sja1105_unpack(p + 0x8, &status->n_polerr, 31, 0, 4);
+ sja1105_unpack(p + 0x7, &status->n_rxfrmsh, 31, 0, 4);
+ sja1105_unpack(p + 0x6, &status->n_rxfrm, 31, 0, 4);
+ sja1105_unpack(p + 0x5, &status->n_rxbytesh, 31, 0, 4);
+ sja1105_unpack(p + 0x4, &status->n_rxbyte, 31, 0, 4);
+ sja1105_unpack(p + 0x3, &status->n_txfrmsh, 31, 0, 4);
+ sja1105_unpack(p + 0x2, &status->n_txfrm, 31, 0, 4);
+ sja1105_unpack(p + 0x1, &status->n_txbytesh, 31, 0, 4);
+ sja1105_unpack(p + 0x0, &status->n_txbyte, 31, 0, 4);
+ status->n_rxfrm += status->n_rxfrmsh << 32;
+ status->n_rxbyte += status->n_rxbytesh << 32;
+ status->n_txfrm += status->n_txfrmsh << 32;
+ status->n_txbyte += status->n_txbytesh << 32;
+}
+
+static void
+sja1105_port_status_hl2_unpack(void *buf,
+ struct sja1105_port_status_hl2 *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0x3, &status->n_qfull, 31, 0, 4);
+ sja1105_unpack(p + 0x2, &status->n_part_drop, 31, 0, 4);
+ sja1105_unpack(p + 0x1, &status->n_egr_disabled, 31, 0, 4);
+ sja1105_unpack(p + 0x0, &status->n_not_reach, 31, 0, 4);
+}
+
+static void
+sja1105pqrs_port_status_qlevel_unpack(void *buf,
+ struct sja1105_port_status_hl2 *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ sja1105_unpack(p + i, &status->qlevel_hwm[i], 24, 16, 4);
+ sja1105_unpack(p + i, &status->qlevel[i], 8, 0, 4);
+ }
+}
+
+static int sja1105_port_status_get_mac(struct sja1105_private *priv,
+ struct sja1105_port_status_mac *status,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_MAC_AREA] = {0};
+ int rc;
+
+ /* MAC area */
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac[port],
+ packed_buf, SJA1105_SIZE_MAC_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105_port_status_mac_unpack(packed_buf, status);
+
+ return 0;
+}
+
+static int sja1105_port_status_get_hl1(struct sja1105_private *priv,
+ struct sja1105_port_status_hl1 *status,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_HL1_AREA] = {0};
+ int rc;
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl1[port],
+ packed_buf, SJA1105_SIZE_HL1_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105_port_status_hl1_unpack(packed_buf, status);
+
+ return 0;
+}
+
+static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
+ struct sja1105_port_status_hl2 *status,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_QLEVEL_AREA] = {0};
+ int rc;
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl2[port],
+ packed_buf, SJA1105_SIZE_HL2_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105_port_status_hl2_unpack(packed_buf, status);
+
+ /* Code below is strictly P/Q/R/S specific. */
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return 0;
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->qlevel[port],
+ packed_buf, SJA1105_SIZE_QLEVEL_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105pqrs_port_status_qlevel_unpack(packed_buf, status);
+
+ return 0;
+}
+
+static int sja1105_port_status_get(struct sja1105_private *priv,
+ struct sja1105_port_status *status,
+ int port)
+{
+ int rc;
+
+ rc = sja1105_port_status_get_mac(priv, &status->mac, port);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_port_status_get_hl1(priv, &status->hl1, port);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_port_status_get_hl2(priv, &status->hl2, port);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static char sja1105_port_stats[][ETH_GSTRING_LEN] = {
+ /* MAC-Level Diagnostic Counters */
+ "n_runt",
+ "n_soferr",
+ "n_alignerr",
+ "n_miierr",
+ /* MAC-Level Diagnostic Flags */
+ "typeerr",
+ "sizeerr",
+ "tctimeout",
+ "priorerr",
+ "nomaster",
+ "memov",
+ "memerr",
+ "invtyp",
+ "intcyov",
+ "domerr",
+ "pcfbagdrop",
+ "spcprior",
+ "ageprior",
+ "portdrop",
+ "lendrop",
+ "bagdrop",
+ "policeerr",
+ "drpnona664err",
+ "spcerr",
+ "agedrp",
+ /* High-Level Diagnostic Counters */
+ "n_n664err",
+ "n_vlanerr",
+ "n_unreleased",
+ "n_sizeerr",
+ "n_crcerr",
+ "n_vlnotfound",
+ "n_ctpolerr",
+ "n_polerr",
+ "n_rxfrm",
+ "n_rxbyte",
+ "n_txfrm",
+ "n_txbyte",
+ "n_qfull",
+ "n_part_drop",
+ "n_egr_disabled",
+ "n_not_reach",
+};
+
+static char sja1105pqrs_extra_port_stats[][ETH_GSTRING_LEN] = {
+ /* Queue Levels */
+ "qlevel_hwm_0",
+ "qlevel_hwm_1",
+ "qlevel_hwm_2",
+ "qlevel_hwm_3",
+ "qlevel_hwm_4",
+ "qlevel_hwm_5",
+ "qlevel_hwm_6",
+ "qlevel_hwm_7",
+ "qlevel_0",
+ "qlevel_1",
+ "qlevel_2",
+ "qlevel_3",
+ "qlevel_4",
+ "qlevel_5",
+ "qlevel_6",
+ "qlevel_7",
+};
+
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port_status status;
+ int rc, i, k = 0;
+
+ memset(&status, 0, sizeof(status));
+
+ rc = sja1105_port_status_get(priv, &status, port);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read port %d counters: %d\n",
+ port, rc);
+ return;
+ }
+ memset(data, 0, ARRAY_SIZE(sja1105_port_stats) * sizeof(u64));
+ data[k++] = status.mac.n_runt;
+ data[k++] = status.mac.n_soferr;
+ data[k++] = status.mac.n_alignerr;
+ data[k++] = status.mac.n_miierr;
+ data[k++] = status.mac.typeerr;
+ data[k++] = status.mac.sizeerr;
+ data[k++] = status.mac.tctimeout;
+ data[k++] = status.mac.priorerr;
+ data[k++] = status.mac.nomaster;
+ data[k++] = status.mac.memov;
+ data[k++] = status.mac.memerr;
+ data[k++] = status.mac.invtyp;
+ data[k++] = status.mac.intcyov;
+ data[k++] = status.mac.domerr;
+ data[k++] = status.mac.pcfbagdrop;
+ data[k++] = status.mac.spcprior;
+ data[k++] = status.mac.ageprior;
+ data[k++] = status.mac.portdrop;
+ data[k++] = status.mac.lendrop;
+ data[k++] = status.mac.bagdrop;
+ data[k++] = status.mac.policeerr;
+ data[k++] = status.mac.drpnona664err;
+ data[k++] = status.mac.spcerr;
+ data[k++] = status.mac.agedrp;
+ data[k++] = status.hl1.n_n664err;
+ data[k++] = status.hl1.n_vlanerr;
+ data[k++] = status.hl1.n_unreleased;
+ data[k++] = status.hl1.n_sizeerr;
+ data[k++] = status.hl1.n_crcerr;
+ data[k++] = status.hl1.n_vlnotfound;
+ data[k++] = status.hl1.n_ctpolerr;
+ data[k++] = status.hl1.n_polerr;
+ data[k++] = status.hl1.n_rxfrm;
+ data[k++] = status.hl1.n_rxbyte;
+ data[k++] = status.hl1.n_txfrm;
+ data[k++] = status.hl1.n_txbyte;
+ data[k++] = status.hl2.n_qfull;
+ data[k++] = status.hl2.n_part_drop;
+ data[k++] = status.hl2.n_egr_disabled;
+ data[k++] = status.hl2.n_not_reach;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return;
+
+ memset(data + k, 0, ARRAY_SIZE(sja1105pqrs_extra_port_stats) *
+ sizeof(u64));
+ for (i = 0; i < 8; i++) {
+ data[k++] = status.hl2.qlevel_hwm[i];
+ data[k++] = status.hl2.qlevel[i];
+ }
+}
+
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(sja1105_port_stats); i++) {
+ strlcpy(p, sja1105_port_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return;
+ for (i = 0; i < ARRAY_SIZE(sja1105pqrs_extra_port_stats); i++) {
+ strlcpy(p, sja1105pqrs_extra_port_stats[i],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ int count = ARRAY_SIZE(sja1105_port_stats);
+ struct sja1105_private *priv = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ if (priv->info->device_id == SJA1105PR_DEVICE_ID ||
+ priv->info->device_id == SJA1105QS_DEVICE_ID)
+ count += ARRAY_SIZE(sja1105pqrs_extra_port_stats);
+
+ return count;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
new file mode 100644
index 000000000000..0663b78a2f6c
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -0,0 +1,1679 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/spi/spi.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/phylink.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+#include <linux/netdev_features.h>
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105.h"
+
+static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
+ unsigned int startup_delay)
+{
+ gpiod_set_value_cansleep(gpio, 1);
+ /* Wait for minimum reset pulse length */
+ msleep(pulse_len);
+ gpiod_set_value_cansleep(gpio, 0);
+ /* Wait until chip is ready after reset */
+ msleep(startup_delay);
+}
+
+static void
+sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
+ int from, int to, bool allow)
+{
+ if (allow) {
+ l2_fwd[from].bc_domain |= BIT(to);
+ l2_fwd[from].reach_port |= BIT(to);
+ l2_fwd[from].fl_domain |= BIT(to);
+ } else {
+ l2_fwd[from].bc_domain &= ~BIT(to);
+ l2_fwd[from].reach_port &= ~BIT(to);
+ l2_fwd[from].fl_domain &= ~BIT(to);
+ }
+}
+
+/* Structure used to temporarily transport device tree
+ * settings into sja1105_setup
+ */
+struct sja1105_dt_port {
+ phy_interface_t phy_mode;
+ sja1105_mii_role_t role;
+};
+
+static int sja1105_init_mac_settings(struct sja1105_private *priv)
+{
+ struct sja1105_mac_config_entry default_mac = {
+ /* Enable all 8 priority queues on egress.
+ * Every queue i holds top[i] - base[i] frames.
+ * Sum of top[i] - base[i] is 511 (max hardware limit).
+ */
+ .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
+ .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
+ .enabled = {true, true, true, true, true, true, true, true},
+ /* Keep standard IFG of 12 bytes on egress. */
+ .ifg = 0,
+ /* Always put the MAC speed in automatic mode, where it can be
+ * retrieved from the PHY object through phylib and
+ * sja1105_adjust_port_config.
+ */
+ .speed = SJA1105_SPEED_AUTO,
+ /* No static correction for 1-step 1588 events */
+ .tp_delin = 0,
+ .tp_delout = 0,
+ /* Disable aging for critical TTEthernet traffic */
+ .maxage = 0xFF,
+ /* Internal VLAN (pvid) to apply to untagged ingress */
+ .vlanprio = 0,
+ .vlanid = 0,
+ .ing_mirr = false,
+ .egr_mirr = false,
+ /* Don't drop traffic with other EtherType than ETH_P_IP */
+ .drpnona664 = false,
+ /* Don't drop double-tagged traffic */
+ .drpdtag = false,
+ /* Don't drop untagged traffic */
+ .drpuntag = false,
+ /* Don't retag 802.1p (VID 0) traffic with the pvid */
+ .retag = false,
+ /* Disable learning and I/O on user ports by default -
+ * STP will enable it.
+ */
+ .dyn_learn = false,
+ .egress = false,
+ .ingress = false,
+ };
+ struct sja1105_mac_config_entry *mac;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
+
+ /* Discard previous MAC Configuration Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_NUM_PORTS,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ /* Override table based on phylib DT bindings */
+ table->entry_count = SJA1105_NUM_PORTS;
+
+ mac = table->entries;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ mac[i] = default_mac;
+ if (i == dsa_upstream_port(priv->ds, i)) {
+ /* STP doesn't get called for CPU port, so we need to
+ * set the I/O parameters statically.
+ */
+ mac[i].dyn_learn = true;
+ mac[i].ingress = true;
+ mac[i].egress = true;
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_init_mii_settings(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct sja1105_xmii_params_entry *mii;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
+
+ /* Discard previous xMII Mode Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ /* Override table based on phylib DT bindings */
+ table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
+
+ mii = table->entries;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ switch (ports[i].phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ mii->xmii_mode[i] = XMII_MODE_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ mii->xmii_mode[i] = XMII_MODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mii->xmii_mode[i] = XMII_MODE_RGMII;
+ break;
+ default:
+ dev_err(dev, "Unsupported PHY mode %s!\n",
+ phy_modes(ports[i].phy_mode));
+ }
+
+ mii->phy_mac[i] = ports[i].role;
+ }
+ return 0;
+}
+
+static int sja1105_init_static_fdb(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+
+ /* We only populate the FDB table through dynamic
+ * L2 Address Lookup entries
+ */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+ return 0;
+}
+
+static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+ struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
+ /* Learned FDB entries are forgotten after 300 seconds */
+ .maxage = SJA1105_AGEING_TIME_MS(300000),
+ /* All entries within a FDB bin are available for learning */
+ .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
+ /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
+ .poly = 0x97,
+ /* This selects between Independent VLAN Learning (IVL) and
+ * Shared VLAN Learning (SVL)
+ */
+ .shared_learn = false,
+ /* Don't discard management traffic based on ENFPORT -
+ * we don't perform SMAC port enforcement anyway, so
+ * what we are setting here doesn't matter.
+ */
+ .no_enf_hostprt = false,
+ /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
+ * Maybe correlate with no_linklocal_learn from bridge driver?
+ */
+ .no_mgmt_learn = true,
+ };
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
+
+ /* This table only has a single entry */
+ ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
+ default_l2_lookup_params;
+
+ return 0;
+}
+
+static int sja1105_init_static_vlan(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+ struct sja1105_vlan_lookup_entry pvid = {
+ .ving_mirr = 0,
+ .vegr_mirr = 0,
+ .vmemb_port = 0,
+ .vlan_bc = 0,
+ .tag_port = 0,
+ .vlanid = 0,
+ };
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ /* The static VLAN table will only contain the initial pvid of 0.
+ * All other VLANs are to be configured through dynamic entries,
+ * and kept in the static configuration table as backing memory.
+ * The pvid of 0 is sufficient to pass traffic while the ports are
+ * standalone and when vlan_filtering is disabled. When filtering
+ * gets enabled, the switchdev core sets up the VLAN ID 1 and sets
+ * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge
+ * vlan' even when vlan_filtering is off, but it has no effect.
+ */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = 1;
+
+ /* VLAN ID 0: all DT-defined ports are members; no restrictions on
+ * forwarding; always transmit priority-tagged frames as untagged.
+ */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ pvid.vmemb_port |= BIT(i);
+ pvid.vlan_bc |= BIT(i);
+ pvid.tag_port &= ~BIT(i);
+ }
+
+ ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_entry *l2fwd;
+ struct sja1105_table *table;
+ int i, j;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
+
+ l2fwd = table->entries;
+
+ /* First 5 entries define the forwarding rules */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ unsigned int upstream = dsa_upstream_port(priv->ds, i);
+
+ for (j = 0; j < SJA1105_NUM_TC; j++)
+ l2fwd[i].vlan_pmap[j] = j;
+
+ if (i == upstream)
+ continue;
+
+ sja1105_port_allow_traffic(l2fwd, i, upstream, true);
+ sja1105_port_allow_traffic(l2fwd, upstream, i, true);
+ }
+ /* Next 8 entries define VLAN PCP mapping from ingress to egress.
+ * Create a one-to-one mapping.
+ */
+ for (i = 0; i < SJA1105_NUM_TC; i++)
+ for (j = 0; j < SJA1105_NUM_PORTS; j++)
+ l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
+
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
+ /* Disallow dynamic reconfiguration of vlan_pmap */
+ .max_dynp = 0,
+ /* Use a single memory partition for all ingress queues */
+ .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
+ };
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
+
+ /* This table only has a single entry */
+ ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
+ default_l2fwd_params;
+
+ return 0;
+}
+
+static int sja1105_init_general_params(struct sja1105_private *priv)
+{
+ struct sja1105_general_params_entry default_general_params = {
+ /* Disallow dynamic changing of the mirror port */
+ .mirr_ptacu = 0,
+ .switchid = priv->ds->index,
+ /* Priority queue for link-local frames trapped to CPU */
+ .hostprio = 0,
+ .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
+ .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
+ .incl_srcpt1 = true,
+ .send_meta1 = false,
+ .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
+ .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
+ .incl_srcpt0 = true,
+ .send_meta0 = false,
+ /* The destination for traffic matching mac_fltres1 and
+ * mac_fltres0 on all ports except host_port. Such traffic
+ * receieved on host_port itself would be dropped, except
+ * by installing a temporary 'management route'
+ */
+ .host_port = dsa_upstream_port(priv->ds, 0),
+ /* Same as host port */
+ .mirr_port = dsa_upstream_port(priv->ds, 0),
+ /* Link-local traffic received on casc_port will be forwarded
+ * to host_port without embedding the source port and device ID
+ * info in the destination MAC address (presumably because it
+ * is a cascaded port and a downstream SJA switch already did
+ * that). Default to an invalid port (to disable the feature)
+ * and overwrite this if we find any DSA (cascaded) ports.
+ */
+ .casc_port = SJA1105_NUM_PORTS,
+ /* No TTEthernet */
+ .vllupformat = 0,
+ .vlmarker = 0,
+ .vlmask = 0,
+ /* Only update correctionField for 1-step PTP (L2 transport) */
+ .ignore2stf = 0,
+ /* Forcefully disable VLAN filtering by telling
+ * the switch that VLAN has a different EtherType.
+ */
+ .tpid = ETH_P_SJA1105,
+ .tpid2 = ETH_P_SJA1105,
+ };
+ struct sja1105_table *table;
+ int i, k = 0;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (dsa_is_dsa_port(priv->ds, i))
+ default_general_params.casc_port = i;
+ else if (dsa_is_user_port(priv->ds, i))
+ priv->ports[i].mgmt_slot = k++;
+ }
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
+
+ /* This table only has a single entry */
+ ((struct sja1105_general_params_entry *)table->entries)[0] =
+ default_general_params;
+
+ return 0;
+}
+
+#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
+
+static inline void
+sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
+ int index)
+{
+ policing[index].sharindx = index;
+ policing[index].smax = 65535; /* Burst size in bytes */
+ policing[index].rate = SJA1105_RATE_MBPS(1000);
+ policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+ policing[index].partition = 0;
+}
+
+static int sja1105_init_l2_policing(struct sja1105_private *priv)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_table *table;
+ int i, j, k;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
+
+ /* Discard previous L2 Policing Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
+
+ policing = table->entries;
+
+ /* k sweeps through all unicast policers (0-39).
+ * bcast sweeps through policers 40-44.
+ */
+ for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
+
+ for (j = 0; j < SJA1105_NUM_TC; j++, k++)
+ sja1105_setup_policer(policing, k);
+
+ /* Set up this port's policer for broadcast traffic */
+ sja1105_setup_policer(policing, bcast);
+ }
+ return 0;
+}
+
+static int sja1105_static_config_load(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports)
+{
+ int rc;
+
+ sja1105_static_config_free(&priv->static_config);
+ rc = sja1105_static_config_init(&priv->static_config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return rc;
+
+ /* Build static configuration */
+ rc = sja1105_init_mac_settings(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_mii_settings(priv, ports);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_fdb(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_vlan(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_lookup_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_policing(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_general_params(priv);
+ if (rc < 0)
+ return rc;
+
+ /* Send initial configuration to hardware via SPI */
+ return sja1105_static_config_upload(priv);
+}
+
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
+ const struct sja1105_dt_port *ports)
+{
+ int i;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (ports->role == XMII_MAC)
+ continue;
+
+ if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_rx_delay[i] = true;
+
+ if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_tx_delay[i] = true;
+
+ if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
+ !priv->info->setup_rgmii_delay)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sja1105_parse_ports_node(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports,
+ struct device_node *ports_node)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *child;
+
+ for_each_child_of_node(ports_node, child) {
+ struct device_node *phy_node;
+ int phy_mode;
+ u32 index;
+
+ /* Get switch port number from DT */
+ if (of_property_read_u32(child, "reg", &index) < 0) {
+ dev_err(dev, "Port number not defined in device tree "
+ "(property \"reg\")\n");
+ return -ENODEV;
+ }
+
+ /* Get PHY mode from DT */
+ phy_mode = of_get_phy_mode(child);
+ if (phy_mode < 0) {
+ dev_err(dev, "Failed to read phy-mode or "
+ "phy-interface-type property for port %d\n",
+ index);
+ return -ENODEV;
+ }
+ ports[index].phy_mode = phy_mode;
+
+ phy_node = of_parse_phandle(child, "phy-handle", 0);
+ if (!phy_node) {
+ if (!of_phy_is_fixed_link(child)) {
+ dev_err(dev, "phy-handle or fixed-link "
+ "properties missing!\n");
+ return -ENODEV;
+ }
+ /* phy-handle is missing, but fixed-link isn't.
+ * So it's a fixed link. Default to PHY role.
+ */
+ ports[index].role = XMII_PHY;
+ } else {
+ /* phy-handle present => put port in MAC role */
+ ports[index].role = XMII_MAC;
+ of_node_put(phy_node);
+ }
+
+ /* The MAC/PHY role can be overridden with explicit bindings */
+ if (of_property_read_bool(child, "sja1105,role-mac"))
+ ports[index].role = XMII_MAC;
+ else if (of_property_read_bool(child, "sja1105,role-phy"))
+ ports[index].role = XMII_PHY;
+ }
+
+ return 0;
+}
+
+static int sja1105_parse_dt(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *switch_node = dev->of_node;
+ struct device_node *ports_node;
+ int rc;
+
+ ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node) {
+ dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ return -ENODEV;
+ }
+
+ rc = sja1105_parse_ports_node(priv, ports, ports_node);
+ of_node_put(ports_node);
+
+ return rc;
+}
+
+/* Convert back and forth MAC speed from Mbps to SJA1105 encoding */
+static int sja1105_speed[] = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 10,
+ [SJA1105_SPEED_100MBPS] = 100,
+ [SJA1105_SPEED_1000MBPS] = 1000,
+};
+
+static sja1105_speed_t sja1105_get_speed_cfg(unsigned int speed_mbps)
+{
+ int i;
+
+ for (i = SJA1105_SPEED_AUTO; i <= SJA1105_SPEED_1000MBPS; i++)
+ if (sja1105_speed[i] == speed_mbps)
+ return i;
+ return -EINVAL;
+}
+
+/* Set link speed and enable/disable traffic I/O in the MAC configuration
+ * for a specific port.
+ *
+ * @speed_mbps: If 0, leave the speed unchanged, else adapt MAC to PHY speed.
+ * @enabled: Manage Rx and Tx settings for this port. If false, overrides the
+ * settings from the STP state, but not persistently (does not
+ * overwrite the static MAC info for this port).
+ */
+static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
+ int speed_mbps, bool enabled)
+{
+ struct sja1105_mac_config_entry dyn_mac;
+ struct sja1105_xmii_params_entry *mii;
+ struct sja1105_mac_config_entry *mac;
+ struct device *dev = priv->ds->dev;
+ sja1105_phy_interface_t phy_mode;
+ sja1105_speed_t speed;
+ int rc;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ speed = sja1105_get_speed_cfg(speed_mbps);
+ if (speed_mbps && speed < 0) {
+ dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
+ return -EINVAL;
+ }
+
+ /* If requested, overwrite SJA1105_SPEED_AUTO from the static MAC
+ * configuration table, since this will be used for the clocking setup,
+ * and we no longer need to store it in the static config (already told
+ * hardware we want auto during upload phase).
+ */
+ if (speed_mbps)
+ mac[port].speed = speed;
+ else
+ mac[port].speed = SJA1105_SPEED_AUTO;
+
+ /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
+ * tables. On E/T, MAC reconfig tables are not readable, only writable.
+ * We have to *know* what the MAC looks like. For the sake of keeping
+ * the code common, we'll use the static configuration tables as a
+ * reasonable approximation for both E/T and P/Q/R/S.
+ */
+ dyn_mac = mac[port];
+ dyn_mac.ingress = enabled && mac[port].ingress;
+ dyn_mac.egress = enabled && mac[port].egress;
+
+ /* Write to the dynamic reconfiguration tables */
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG,
+ port, &dyn_mac, true);
+ if (rc < 0) {
+ dev_err(dev, "Failed to write MAC config: %d\n", rc);
+ return rc;
+ }
+
+ /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
+ * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
+ * RMII no change of the clock setup is required. Actually, changing
+ * the clock setup does interrupt the clock signal for a certain time
+ * which causes trouble for all PHYs relying on this signal.
+ */
+ if (!enabled)
+ return 0;
+
+ phy_mode = mii->xmii_mode[port];
+ if (phy_mode != XMII_MODE_RGMII)
+ return 0;
+
+ return sja1105_clocking_setup_port(priv, port);
+}
+
+static void sja1105_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ if (!phydev->link)
+ sja1105_adjust_port_config(priv, port, 0, false);
+ else
+ sja1105_adjust_port_config(priv, port, phydev->speed, true);
+}
+
+static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ /* Construct a new mask which exhaustively contains all link features
+ * supported by the MAC, and then apply that (logical AND) to what will
+ * be sent to the PHY for "marketing".
+ */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_xmii_params_entry *mii;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+
+ /* The MAC does not support pause frames, and also doesn't
+ * support half-duplex traffic modes.
+ */
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, MII);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ if (mii->xmii_mode[port] == XMII_MODE_RGMII)
+ phylink_set(mask, 1000baseT_Full);
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+/* First-generation switches have a 4-way set associative TCAM that
+ * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
+ * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
+ * For the placement of a newly learnt FDB entry, the switch selects the bin
+ * based on a hash function, and the way within that bin incrementally.
+ */
+static inline int sja1105et_fdb_index(int bin, int way)
+{
+ return bin * SJA1105ET_FDB_BIN_SIZE + way;
+}
+
+static int sja1105_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
+ const u8 *addr, u16 vid,
+ struct sja1105_l2_lookup_entry *match,
+ int *last_unused)
+{
+ int way;
+
+ for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* Skip unused entries, optionally marking them
+ * into the return value
+ */
+ if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup)) {
+ if (last_unused)
+ *last_unused = way;
+ continue;
+ }
+
+ if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
+ l2_lookup.vlanid == vid) {
+ if (match)
+ *match = l2_lookup;
+ return way;
+ }
+ }
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int last_unused = -1;
+ int bin, way;
+
+ bin = sja1105_fdb_hash(priv, addr, vid);
+
+ way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, &last_unused);
+ if (way >= 0) {
+ /* We have an FDB entry. Is our port in the destination
+ * mask? If yes, we need to do nothing. If not, we need
+ * to rewrite the entry by adding this port to it.
+ */
+ if (l2_lookup.destports & BIT(port))
+ return 0;
+ l2_lookup.destports |= BIT(port);
+ } else {
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* We don't have an FDB entry. We construct a new one and
+ * try to find a place for it within the FDB table.
+ */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.destports = BIT(port);
+ l2_lookup.vlanid = vid;
+
+ if (last_unused >= 0) {
+ way = last_unused;
+ } else {
+ /* Bin is full, need to evict somebody.
+ * Choose victim at random. If you get these messages
+ * often, you may need to consider changing the
+ * distribution function:
+ * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
+ */
+ get_random_bytes(&way, sizeof(u8));
+ way %= SJA1105ET_FDB_BIN_SIZE;
+ dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
+ bin, addr, way);
+ /* Evict entry */
+ sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, NULL, false);
+ }
+ }
+ l2_lookup.index = sja1105et_fdb_index(bin, way);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+}
+
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ int index, bin, way;
+ bool keep;
+
+ bin = sja1105_fdb_hash(priv, addr, vid);
+ way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, NULL);
+ if (way < 0)
+ return 0;
+ index = sja1105et_fdb_index(bin, way);
+
+ /* We have an FDB entry. Is our port in the destination mask? If yes,
+ * we need to remove it. If the resulting port mask becomes empty, we
+ * need to completely evict the FDB entry.
+ * Otherwise we just write it back.
+ */
+ if (l2_lookup.destports & BIT(port))
+ l2_lookup.destports &= ~BIT(port);
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup, keep);
+}
+
+static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int i;
+
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ u8 macaddr[ETH_ALEN];
+ int rc;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &l2_lookup);
+ /* No fdb entry at i, not an issue */
+ if (rc == -EINVAL)
+ continue;
+ if (rc) {
+ dev_err(dev, "Failed to dump FDB: %d\n", rc);
+ return rc;
+ }
+
+ /* FDB dump callback is per port. This means we have to
+ * disregard a valid entry if it's not for this port, even if
+ * only to revisit it later. This is inefficient because the
+ * 1024-sized FDB table needs to be traversed 4 times through
+ * SPI during a 'bridge fdb show' command.
+ */
+ if (!(l2_lookup.destports & BIT(port)))
+ continue;
+ u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ cb(macaddr, l2_lookup.vlanid, false, data);
+ }
+ return 0;
+}
+
+/* This callback needs to be present */
+static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ return 0;
+}
+
+static void sja1105_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+}
+
+static int sja1105_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
+}
+
+static int sja1105_bridge_member(struct dsa_switch *ds, int port,
+ struct net_device *br, bool member)
+{
+ struct sja1105_l2_forwarding_entry *l2_fwd;
+ struct sja1105_private *priv = ds->priv;
+ int i, rc;
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ /* Add this port to the forwarding matrix of the
+ * other ports in the same bridge, and viceversa.
+ */
+ if (!dsa_is_user_port(ds, i))
+ continue;
+ /* For the ports already under the bridge, only one thing needs
+ * to be done, and that is to add this port to their
+ * reachability domain. So we can perform the SPI write for
+ * them immediately. However, for this port itself (the one
+ * that is new to the bridge), we need to add all other ports
+ * to its reachability domain. So we do that incrementally in
+ * this loop, and perform the SPI write only at the end, once
+ * the domain contains all other bridge ports.
+ */
+ if (i == port)
+ continue;
+ if (dsa_to_port(ds, i)->bridge_dev != br)
+ continue;
+ sja1105_port_allow_traffic(l2_fwd, i, port, member);
+ sja1105_port_allow_traffic(l2_fwd, port, i, member);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ i, &l2_fwd[i], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ port, &l2_fwd[port], true);
+}
+
+static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ /* From UM10944 description of DRPDTAG (why put this there?):
+ * "Management traffic flows to the port regardless of the state
+ * of the INGRESS flag". So BPDUs are still be allowed to pass.
+ * At the moment no difference between DISABLED and BLOCKING.
+ */
+ mac[port].ingress = false;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LISTENING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LEARNING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = true;
+ break;
+ case BR_STATE_FORWARDING:
+ mac[port].ingress = true;
+ mac[port].egress = true;
+ mac[port].dyn_learn = true;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ return sja1105_bridge_member(ds, port, br, true);
+}
+
+static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ sja1105_bridge_member(ds, port, br, false);
+}
+
+static u8 sja1105_stp_state_get(struct sja1105_private *priv, int port)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (!mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
+ return BR_STATE_BLOCKING;
+ if (mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
+ return BR_STATE_LISTENING;
+ if (mac[port].ingress && !mac[port].egress && mac[port].dyn_learn)
+ return BR_STATE_LEARNING;
+ if (mac[port].ingress && mac[port].egress && mac[port].dyn_learn)
+ return BR_STATE_FORWARDING;
+ /* This is really an error condition if the MAC was in none of the STP
+ * states above. But treating the port as disabled does nothing, which
+ * is adequate, and it also resets the MAC to a known state later on.
+ */
+ return BR_STATE_DISABLED;
+}
+
+/* For situations where we need to change a setting at runtime that is only
+ * available through the static configuration, resetting the switch in order
+ * to upload the new static config is unavoidable. Back up the settings we
+ * modify at runtime (currently only MAC) and restore them after uploading,
+ * such that this operation is relatively seamless.
+ */
+static int sja1105_static_config_reload(struct sja1105_private *priv)
+{
+ struct sja1105_mac_config_entry *mac;
+ int speed_mbps[SJA1105_NUM_PORTS];
+ u8 stp_state[SJA1105_NUM_PORTS];
+ int rc, i;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ /* Back up settings changed by sja1105_adjust_port_config and
+ * sja1105_bridge_stp_state_set and restore their defaults.
+ */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ speed_mbps[i] = sja1105_speed[mac[i].speed];
+ mac[i].speed = SJA1105_SPEED_AUTO;
+ if (i == dsa_upstream_port(priv->ds, i)) {
+ mac[i].ingress = true;
+ mac[i].egress = true;
+ mac[i].dyn_learn = true;
+ } else {
+ stp_state[i] = sja1105_stp_state_get(priv, i);
+ mac[i].ingress = false;
+ mac[i].egress = false;
+ mac[i].dyn_learn = false;
+ }
+ }
+
+ /* Reset switch and send updated static configuration */
+ rc = sja1105_static_config_upload(priv);
+ if (rc < 0)
+ goto out;
+
+ /* Configure the CGU (PLLs) for MII and RMII PHYs.
+ * For these interfaces there is no dynamic configuration
+ * needed, since PLLs have same settings at all speeds.
+ */
+ rc = sja1105_clocking_setup(priv);
+ if (rc < 0)
+ goto out;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ bool enabled = (speed_mbps[i] != 0);
+
+ if (i != dsa_upstream_port(priv->ds, i))
+ sja1105_bridge_stp_state_set(priv->ds, i, stp_state[i]);
+
+ rc = sja1105_adjust_port_config(priv, i, speed_mbps[i],
+ enabled);
+ if (rc < 0)
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/* The TPID setting belongs to the General Parameters table,
+ * which can only be partially reconfigured at runtime (and not the TPID).
+ * So a switch reset is required.
+ */
+static int sja1105_change_tpid(struct sja1105_private *priv,
+ u16 tpid, u16 tpid2)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ general_params->tpid = tpid;
+ general_params->tpid2 = tpid2;
+ return sja1105_static_config_reload(priv);
+}
+
+static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ mac[port].vlanid = pvid;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ int count, i;
+
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+ count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
+
+ for (i = 0; i < count; i++)
+ if (vlan[i].vlanid == vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
+ bool enabled, bool untagged)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ bool keep = true;
+ int match, rc;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ if (match < 0) {
+ /* Can't delete a missing entry. */
+ if (!enabled)
+ return 0;
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+ match = table->entry_count - 1;
+ }
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
+ vlan[match].vlanid = vid;
+ if (enabled) {
+ vlan[match].vlan_bc |= BIT(port);
+ vlan[match].vmemb_port |= BIT(port);
+ } else {
+ vlan[match].vlan_bc &= ~BIT(port);
+ vlan[match].vmemb_port &= ~BIT(port);
+ }
+ /* Also unset tag_port if removing this VLAN was requested,
+ * just so we don't have a confusing bitmap (no practical purpose).
+ */
+ if (untagged || !enabled)
+ vlan[match].tag_port &= ~BIT(port);
+ else
+ vlan[match].tag_port |= BIT(port);
+ /* If there's no port left as member of this VLAN,
+ * it's time for it to go.
+ */
+ if (!vlan[match].vmemb_port)
+ keep = false;
+
+ dev_dbg(priv->ds->dev,
+ "%s: port %d, vid %llu, broadcast domain 0x%llx, "
+ "port members 0x%llx, tagged ports 0x%llx, keep %d\n",
+ __func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
+ vlan[match].vmemb_port, vlan[match].tag_port, keep);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], keep);
+ if (rc < 0)
+ return rc;
+
+ if (!keep)
+ return sja1105_table_delete_entry(table, match);
+
+ return 0;
+}
+
+static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
+{
+ int rc, i;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
+ i, rc);
+ return rc;
+ }
+ }
+ dev_info(ds->dev, "%s switch tagging\n",
+ enabled ? "Enabled" : "Disabled");
+ return 0;
+}
+
+static enum dsa_tag_protocol
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
+{
+ return DSA_TAG_PROTO_SJA1105;
+}
+
+/* This callback needs to be present */
+static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ return 0;
+}
+
+static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (enabled)
+ /* Enable VLAN filtering. */
+ rc = sja1105_change_tpid(priv, ETH_P_8021Q, ETH_P_8021AD);
+ else
+ /* Disable VLAN filtering. */
+ rc = sja1105_change_tpid(priv, ETH_P_SJA1105, ETH_P_SJA1105);
+ if (rc)
+ dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
+
+ /* Switch port identification based on 802.1Q is only passable
+ * if we are not under a vlan_filtering bridge. So make sure
+ * the two configurations are mutually exclusive.
+ */
+ return sja1105_setup_8021q_tagging(ds, !enabled);
+}
+
+static void sja1105_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct sja1105_private *priv = ds->priv;
+ u16 vid;
+ int rc;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
+ vid, port, rc);
+ return;
+ }
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ rc = sja1105_pvid_apply(ds->priv, port, vid);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
+ vid, port, rc);
+ return;
+ }
+ }
+ }
+}
+
+static int sja1105_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct sja1105_private *priv = ds->priv;
+ u16 vid;
+ int rc;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
+ vid, port, rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+/* The programming model for the SJA1105 switch is "all-at-once" via static
+ * configuration tables. Some of these can be dynamically modified at runtime,
+ * but not the xMII mode parameters table.
+ * Furthermode, some PHYs may not have crystals for generating their clocks
+ * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
+ * ref_clk pin. So port clocking needs to be initialized early, before
+ * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
+ * Setting correct PHY link speed does not matter now.
+ * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
+ * bindings are not yet parsed by DSA core. We need to parse early so that we
+ * can populate the xMII mode parameters table.
+ */
+static int sja1105_setup(struct dsa_switch *ds)
+{
+ struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = sja1105_parse_dt(priv, ports);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
+ return rc;
+ }
+
+ /* Error out early if internal delays are required through DT
+ * and we can't apply them.
+ */
+ rc = sja1105_parse_rgmii_delays(priv, ports);
+ if (rc < 0) {
+ dev_err(ds->dev, "RGMII delay not supported\n");
+ return rc;
+ }
+
+ /* Create and send configuration down to device */
+ rc = sja1105_static_config_load(priv, ports);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to load static config: %d\n", rc);
+ return rc;
+ }
+ /* Configure the CGU (PHY link modes and speeds) */
+ rc = sja1105_clocking_setup(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
+ return rc;
+ }
+ /* On SJA1105, VLAN filtering per se is always enabled in hardware.
+ * The only thing we can do to disable it is lie about what the 802.1Q
+ * EtherType is.
+ * So it will still try to apply VLAN filtering, but all ingress
+ * traffic (except frames received with EtherType of ETH_P_SJA1105)
+ * will be internally tagged with a distorted VLAN header where the
+ * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* The DSA/switchdev model brings up switch ports in standalone mode by
+ * default, and that means vlan_filtering is 0 since they're not under
+ * a bridge, so it's safe to set up switch tagging at this time.
+ */
+ return sja1105_setup_8021q_tagging(ds, true);
+}
+
+static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
+ struct sk_buff *skb)
+{
+ struct sja1105_mgmt_entry mgmt_route = {0};
+ struct sja1105_private *priv = ds->priv;
+ struct ethhdr *hdr;
+ int timeout = 10;
+ int rc;
+
+ hdr = eth_hdr(skb);
+
+ mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
+ mgmt_route.destports = BIT(port);
+ mgmt_route.enfport = 1;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, true);
+ if (rc < 0) {
+ kfree_skb(skb);
+ return rc;
+ }
+
+ /* Transfer skb to the host port. */
+ dsa_enqueue_skb(skb, ds->ports[port].slave);
+
+ /* Wait until the switch has processed the frame */
+ do {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route);
+ if (rc < 0) {
+ dev_err_ratelimited(priv->ds->dev,
+ "failed to poll for mgmt route\n");
+ continue;
+ }
+
+ /* UM10944: The ENFPORT flag of the respective entry is
+ * cleared when a match is found. The host can use this
+ * flag as an acknowledgment.
+ */
+ cpu_relax();
+ } while (mgmt_route.enfport && --timeout);
+
+ if (!timeout) {
+ /* Clean up the management route so that a follow-up
+ * frame may not match on it by mistake.
+ */
+ sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, false);
+ dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/* Deferred work is unfortunately necessary because setting up the management
+ * route cannot be done from atomit context (SPI transfer takes a sleepable
+ * lock on the bus)
+ */
+static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+ int slot = sp->mgmt_slot;
+
+ /* The tragic fact about the switch having 4x2 slots for installing
+ * management routes is that all of them except one are actually
+ * useless.
+ * If 2 slots are simultaneously configured for two BPDUs sent to the
+ * same (multicast) DMAC but on different egress ports, the switch
+ * would confuse them and redirect first frame it receives on the CPU
+ * port towards the port configured on the numerically first slot
+ * (therefore wrong port), then second received frame on second slot
+ * (also wrong port).
+ * So for all practical purposes, there needs to be a lock that
+ * prevents that from happening. The slot used here is utterly useless
+ * (could have simply been 0 just as fine), but we are doing it
+ * nonetheless, in case a smarter idea ever comes up in the future.
+ */
+ mutex_lock(&priv->mgmt_lock);
+
+ sja1105_mgmt_xmit(ds, port, slot, skb);
+
+ mutex_unlock(&priv->mgmt_lock);
+ return NETDEV_TX_OK;
+}
+
+/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
+ * which cannot be reconfigured at runtime. So a switch reset is required.
+ */
+static int sja1105_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_table *table;
+ unsigned int maxage;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+ l2_lookup_params = table->entries;
+
+ maxage = SJA1105_AGEING_TIME_MS(ageing_time);
+
+ if (l2_lookup_params->maxage == maxage)
+ return 0;
+
+ l2_lookup_params->maxage = maxage;
+
+ return sja1105_static_config_reload(priv);
+}
+
+static const struct dsa_switch_ops sja1105_switch_ops = {
+ .get_tag_protocol = sja1105_get_tag_protocol,
+ .setup = sja1105_setup,
+ .adjust_link = sja1105_adjust_link,
+ .set_ageing_time = sja1105_set_ageing_time,
+ .phylink_validate = sja1105_phylink_validate,
+ .get_strings = sja1105_get_strings,
+ .get_ethtool_stats = sja1105_get_ethtool_stats,
+ .get_sset_count = sja1105_get_sset_count,
+ .port_fdb_dump = sja1105_fdb_dump,
+ .port_fdb_add = sja1105_fdb_add,
+ .port_fdb_del = sja1105_fdb_del,
+ .port_bridge_join = sja1105_bridge_join,
+ .port_bridge_leave = sja1105_bridge_leave,
+ .port_stp_state_set = sja1105_bridge_stp_state_set,
+ .port_vlan_prepare = sja1105_vlan_prepare,
+ .port_vlan_filtering = sja1105_vlan_filtering,
+ .port_vlan_add = sja1105_vlan_add,
+ .port_vlan_del = sja1105_vlan_del,
+ .port_mdb_prepare = sja1105_mdb_prepare,
+ .port_mdb_add = sja1105_mdb_add,
+ .port_mdb_del = sja1105_mdb_del,
+ .port_deferred_xmit = sja1105_port_deferred_xmit,
+};
+
+static int sja1105_check_device_id(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
+ struct device *dev = &priv->spidev->dev;
+ u64 device_id;
+ u64 part_no;
+ int rc;
+
+ rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
+ &device_id, SJA1105_SIZE_DEVICE_ID);
+ if (rc < 0)
+ return rc;
+
+ if (device_id != priv->info->device_id) {
+ dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
+ priv->info->device_id, device_id);
+ return -ENODEV;
+ }
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
+ prod_id, SJA1105_SIZE_DEVICE_ID);
+ if (rc < 0)
+ return rc;
+
+ sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
+
+ if (part_no != priv->info->part_no) {
+ dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n",
+ priv->info->part_no, part_no);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int sja1105_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct sja1105_private *priv;
+ struct dsa_switch *ds;
+ int rc, i;
+
+ if (!dev->of_node) {
+ dev_err(dev, "No DTS bindings for SJA1105 driver\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Configure the optional reset pin and bring up switch */
+ priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ dev_dbg(dev, "reset-gpios not defined, ignoring\n");
+ else
+ sja1105_hw_reset(priv->reset_gpio, 1, 1);
+
+ /* Populate our driver private structure (priv) based on
+ * the device tree node that was probed (spi)
+ */
+ priv->spidev = spi;
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ rc = spi_setup(spi);
+ if (rc < 0) {
+ dev_err(dev, "Could not init SPI\n");
+ return rc;
+ }
+
+ priv->info = of_device_get_match_data(dev);
+
+ /* Detect hardware device */
+ rc = sja1105_check_device_id(priv);
+ if (rc < 0) {
+ dev_err(dev, "Device ID check failed: %d\n", rc);
+ return rc;
+ }
+
+ dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
+
+ ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->ops = &sja1105_switch_ops;
+ ds->priv = priv;
+ priv->ds = ds;
+
+ /* Connections between dsa_port and sja1105_port */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ struct sja1105_port *sp = &priv->ports[i];
+
+ ds->ports[i].priv = sp;
+ sp->dp = &ds->ports[i];
+ }
+ mutex_init(&priv->mgmt_lock);
+
+ return dsa_register_switch(priv->ds);
+}
+
+static int sja1105_remove(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ dsa_unregister_switch(priv->ds);
+ sja1105_static_config_free(&priv->static_config);
+ return 0;
+}
+
+static const struct of_device_id sja1105_dt_ids[] = {
+ { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
+ { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
+ { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
+ { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
+ { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
+ { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+
+static struct spi_driver sja1105_driver = {
+ .driver = {
+ .name = "sja1105",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sja1105_dt_ids),
+ },
+ .probe = sja1105_probe,
+ .remove = sja1105_remove,
+};
+
+module_spi_driver(sja1105_driver);
+
+MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
+MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
+MODULE_DESCRIPTION("SJA1105 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
new file mode 100644
index 000000000000..2eb70b8acfc3
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/spi/spi.h>
+#include <linux/packing.h>
+#include "sja1105.h"
+
+#define SJA1105_SIZE_PORT_CTRL 4
+#define SJA1105_SIZE_RESET_CMD 4
+#define SJA1105_SIZE_SPI_MSG_HEADER 4
+#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
+#define SJA1105_SIZE_SPI_TRANSFER_MAX \
+ (SJA1105_SIZE_SPI_MSG_HEADER + SJA1105_SIZE_SPI_MSG_MAXLEN)
+
+static int sja1105_spi_transfer(const struct sja1105_private *priv,
+ const void *tx, void *rx, int size)
+{
+ struct spi_device *spi = priv->spidev;
+ struct spi_transfer transfer = {
+ .tx_buf = tx,
+ .rx_buf = rx,
+ .len = size,
+ };
+ struct spi_message msg;
+ int rc;
+
+ if (size > SJA1105_SIZE_SPI_TRANSFER_MAX) {
+ dev_err(&spi->dev, "SPI message (%d) longer than max of %d\n",
+ size, SJA1105_SIZE_SPI_TRANSFER_MAX);
+ return -EMSGSIZE;
+ }
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&transfer, &msg);
+
+ rc = spi_sync(spi, &msg);
+ if (rc < 0) {
+ dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void
+sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
+{
+ const int size = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ memset(buf, 0, size);
+
+ sja1105_pack(buf, &msg->access, 31, 31, size);
+ sja1105_pack(buf, &msg->read_count, 30, 25, size);
+ sja1105_pack(buf, &msg->address, 24, 4, size);
+}
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr, taking size_bytes from *packed_buf
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr, writing size_bytes into *packed_buf
+ *
+ * This function should only be called if it is priorly known that
+ * @size_bytes is smaller than SIZE_SPI_MSG_MAXLEN. Larger packed buffers
+ * are chunked in smaller pieces by sja1105_spi_send_long_packed_buf below.
+ */
+int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ void *packed_buf, size_t size_bytes)
+{
+ u8 tx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
+ u8 rx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
+ const int msg_len = size_bytes + SJA1105_SIZE_SPI_MSG_HEADER;
+ struct sja1105_spi_message msg = {0};
+ int rc;
+
+ if (msg_len > SJA1105_SIZE_SPI_TRANSFER_MAX)
+ return -ERANGE;
+
+ msg.access = rw;
+ msg.address = reg_addr;
+ if (rw == SPI_READ)
+ msg.read_count = size_bytes / 4;
+
+ sja1105_spi_message_pack(tx_buf, &msg);
+
+ if (rw == SPI_WRITE)
+ memcpy(tx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
+ packed_buf, size_bytes);
+
+ rc = sja1105_spi_transfer(priv, tx_buf, rx_buf, msg_len);
+ if (rc < 0)
+ return rc;
+
+ if (rw == SPI_READ)
+ memcpy(packed_buf, rx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
+ size_bytes);
+
+ return 0;
+}
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr, taking size_bytes from *packed_buf
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr, writing size_bytes into *packed_buf
+ *
+ * The u64 *value is unpacked, meaning that it's stored in the native
+ * CPU endianness and directly usable by software running on the core.
+ *
+ * This is a wrapper around sja1105_spi_send_packed_buf().
+ */
+int sja1105_spi_send_int(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u64 *value, u64 size_bytes)
+{
+ u8 packed_buf[SJA1105_SIZE_SPI_MSG_MAXLEN];
+ int rc;
+
+ if (size_bytes > SJA1105_SIZE_SPI_MSG_MAXLEN)
+ return -ERANGE;
+
+ if (rw == SPI_WRITE)
+ sja1105_pack(packed_buf, value, 8 * size_bytes - 1, 0,
+ size_bytes);
+
+ rc = sja1105_spi_send_packed_buf(priv, rw, reg_addr, packed_buf,
+ size_bytes);
+
+ if (rw == SPI_READ)
+ sja1105_unpack(packed_buf, value, 8 * size_bytes - 1, 0,
+ size_bytes);
+
+ return rc;
+}
+
+/* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN
+ * must be sent/received. Splitting the buffer into chunks and assembling
+ * those into SPI messages is done automatically by this function.
+ */
+int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 base_addr,
+ void *packed_buf, u64 buf_len)
+{
+ struct chunk {
+ void *buf_ptr;
+ int len;
+ u64 spi_address;
+ } chunk;
+ int distance_to_end;
+ int rc;
+
+ /* Initialize chunk */
+ chunk.buf_ptr = packed_buf;
+ chunk.spi_address = base_addr;
+ chunk.len = min_t(int, buf_len, SJA1105_SIZE_SPI_MSG_MAXLEN);
+
+ while (chunk.len) {
+ rc = sja1105_spi_send_packed_buf(priv, rw, chunk.spi_address,
+ chunk.buf_ptr, chunk.len);
+ if (rc < 0)
+ return rc;
+
+ chunk.buf_ptr += chunk.len;
+ chunk.spi_address += chunk.len / 4;
+ distance_to_end = (uintptr_t)(packed_buf + buf_len -
+ chunk.buf_ptr);
+ chunk.len = min(distance_to_end, SJA1105_SIZE_SPI_MSG_MAXLEN);
+ }
+
+ return 0;
+}
+
+/* Back-ported structure from UM11040 Table 112.
+ * Reset control register (addr. 100440h)
+ * In the SJA1105 E/T, only warm_rst and cold_rst are
+ * supported (exposed in UM10944 as rst_ctrl), but the bit
+ * offsets of warm_rst and cold_rst are actually reversed.
+ */
+struct sja1105_reset_cmd {
+ u64 switch_rst;
+ u64 cfg_rst;
+ u64 car_rst;
+ u64 otp_rst;
+ u64 warm_rst;
+ u64 cold_rst;
+ u64 por_rst;
+};
+
+static void
+sja1105et_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
+{
+ const int size = SJA1105_SIZE_RESET_CMD;
+
+ memset(buf, 0, size);
+
+ sja1105_pack(buf, &reset->cold_rst, 3, 3, size);
+ sja1105_pack(buf, &reset->warm_rst, 2, 2, size);
+}
+
+static void
+sja1105pqrs_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
+{
+ const int size = SJA1105_SIZE_RESET_CMD;
+
+ memset(buf, 0, size);
+
+ sja1105_pack(buf, &reset->switch_rst, 8, 8, size);
+ sja1105_pack(buf, &reset->cfg_rst, 7, 7, size);
+ sja1105_pack(buf, &reset->car_rst, 5, 5, size);
+ sja1105_pack(buf, &reset->otp_rst, 4, 4, size);
+ sja1105_pack(buf, &reset->warm_rst, 3, 3, size);
+ sja1105_pack(buf, &reset->cold_rst, 2, 2, size);
+ sja1105_pack(buf, &reset->por_rst, 1, 1, size);
+}
+
+static int sja1105et_reset_cmd(const void *ctx, const void *data)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_reset_cmd *reset = data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = priv->ds->dev;
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD];
+
+ if (reset->switch_rst ||
+ reset->cfg_rst ||
+ reset->car_rst ||
+ reset->otp_rst ||
+ reset->por_rst) {
+ dev_err(dev, "Only warm and cold reset is supported "
+ "for SJA1105 E/T!\n");
+ return -EINVAL;
+ }
+
+ if (reset->warm_rst)
+ dev_dbg(dev, "Warm reset requested\n");
+ if (reset->cold_rst)
+ dev_dbg(dev, "Cold reset requested\n");
+
+ sja1105et_reset_cmd_pack(packed_buf, reset);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
+ packed_buf, SJA1105_SIZE_RESET_CMD);
+}
+
+static int sja1105pqrs_reset_cmd(const void *ctx, const void *data)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_reset_cmd *reset = data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = priv->ds->dev;
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD];
+
+ if (reset->switch_rst)
+ dev_dbg(dev, "Main reset for all functional modules requested\n");
+ if (reset->cfg_rst)
+ dev_dbg(dev, "Chip configuration reset requested\n");
+ if (reset->car_rst)
+ dev_dbg(dev, "Clock and reset control logic reset requested\n");
+ if (reset->otp_rst)
+ dev_dbg(dev, "OTP read cycle for reading product "
+ "config settings requested\n");
+ if (reset->warm_rst)
+ dev_dbg(dev, "Warm reset requested\n");
+ if (reset->cold_rst)
+ dev_dbg(dev, "Cold reset requested\n");
+ if (reset->por_rst)
+ dev_dbg(dev, "Power-on reset requested\n");
+
+ sja1105pqrs_reset_cmd_pack(packed_buf, reset);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
+ packed_buf, SJA1105_SIZE_RESET_CMD);
+}
+
+static int sja1105_cold_reset(const struct sja1105_private *priv)
+{
+ struct sja1105_reset_cmd reset = {0};
+
+ reset.cold_rst = 1;
+ return priv->info->reset_cmd(priv, &reset);
+}
+
+static int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ const unsigned long *port_bitmap)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u64 inhibit_cmd;
+ int port, rc;
+
+ rc = sja1105_spi_send_int(priv, SPI_READ, regs->port_control,
+ &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+ if (rc < 0)
+ return rc;
+
+ for_each_set_bit(port, port_bitmap, SJA1105_NUM_PORTS)
+ inhibit_cmd |= BIT(port);
+
+ return sja1105_spi_send_int(priv, SPI_WRITE, regs->port_control,
+ &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+}
+
+struct sja1105_status {
+ u64 configs;
+ u64 crcchkl;
+ u64 ids;
+ u64 crcchkg;
+};
+
+/* This is not reading the entire General Status area, which is also
+ * divergent between E/T and P/Q/R/S, but only the relevant bits for
+ * ensuring that the static config upload procedure was successful.
+ */
+static void sja1105_status_unpack(void *buf, struct sja1105_status *status)
+{
+ /* So that addition translates to 4 bytes */
+ u32 *p = buf;
+
+ /* device_id is missing from the buffer, but we don't
+ * want to diverge from the manual definition of the
+ * register addresses, so we'll back off one step with
+ * the register pointer, and never access p[0].
+ */
+ p--;
+ sja1105_unpack(p + 0x1, &status->configs, 31, 31, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkl, 30, 30, 4);
+ sja1105_unpack(p + 0x1, &status->ids, 29, 29, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkg, 28, 28, 4);
+}
+
+static int sja1105_status_get(struct sja1105_private *priv,
+ struct sja1105_status *status)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[4];
+ int rc;
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
+ regs->status,
+ packed_buf, 4);
+ if (rc < 0)
+ return rc;
+
+ sja1105_status_unpack(packed_buf, status);
+
+ return 0;
+}
+
+/* Not const because unpacking priv->static_config into buffers and preparing
+ * for upload requires the recalculation of table CRCs and updating the
+ * structures with these.
+ */
+static int
+static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len)
+{
+ struct sja1105_static_config *config = &priv->static_config;
+ struct sja1105_table_header final_header;
+ sja1105_config_valid_t valid;
+ char *final_header_ptr;
+ int crc_len;
+
+ valid = sja1105_static_config_check_valid(config);
+ if (valid != SJA1105_CONFIG_OK) {
+ dev_err(&priv->spidev->dev,
+ sja1105_static_config_error_msg[valid]);
+ return -EINVAL;
+ }
+
+ /* Write Device ID and config tables to config_buf */
+ sja1105_static_config_pack(config_buf, config);
+ /* Recalculate CRC of the last header (right now 0xDEADBEEF).
+ * Don't include the CRC field itself.
+ */
+ crc_len = buf_len - 4;
+ /* Read the whole table header */
+ final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER;
+ sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK);
+ /* Modify */
+ final_header.crc = sja1105_crc32(config_buf, crc_len);
+ /* Rewrite */
+ sja1105_table_header_packing(final_header_ptr, &final_header, PACK);
+
+ return 0;
+}
+
+#define RETRIES 10
+
+int sja1105_static_config_upload(struct sja1105_private *priv)
+{
+ unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0);
+ struct sja1105_static_config *config = &priv->static_config;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = &priv->spidev->dev;
+ struct sja1105_status status;
+ int rc, retries = RETRIES;
+ u8 *config_buf;
+ int buf_len;
+
+ buf_len = sja1105_static_config_get_length(config);
+ config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL);
+ if (!config_buf)
+ return -ENOMEM;
+
+ rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Invalid config, cannot upload\n");
+ return -EINVAL;
+ }
+ /* Prevent PHY jabbering during switch reset by inhibiting
+ * Tx on all ports and waiting for current packet to drain.
+ * Otherwise, the PHY will see an unterminated Ethernet packet.
+ */
+ rc = sja1105_inhibit_tx(priv, &port_bitmap);
+ if (rc < 0) {
+ dev_err(dev, "Failed to inhibit Tx on ports\n");
+ return -ENXIO;
+ }
+ /* Wait for an eventual egress packet to finish transmission
+ * (reach IFG). It is guaranteed that a second one will not
+ * follow, and that switch cold reset is thus safe
+ */
+ usleep_range(500, 1000);
+ do {
+ /* Put the SJA1105 in programming mode */
+ rc = sja1105_cold_reset(priv);
+ if (rc < 0) {
+ dev_err(dev, "Failed to reset switch, retrying...\n");
+ continue;
+ }
+ /* Wait for the switch to come out of reset */
+ usleep_range(1000, 5000);
+ /* Upload the static config to the device */
+ rc = sja1105_spi_send_long_packed_buf(priv, SPI_WRITE,
+ regs->config,
+ config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Failed to upload config, retrying...\n");
+ continue;
+ }
+ /* Check that SJA1105 responded well to the config upload */
+ rc = sja1105_status_get(priv, &status);
+ if (rc < 0)
+ continue;
+
+ if (status.ids == 1) {
+ dev_err(dev, "Mismatch between hardware and static config "
+ "device id. Wrote 0x%llx, wants 0x%llx\n",
+ config->device_id, priv->info->device_id);
+ continue;
+ }
+ if (status.crcchkl == 1) {
+ dev_err(dev, "Switch reported invalid local CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.crcchkg == 1) {
+ dev_err(dev, "Switch reported invalid global CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.configs == 0) {
+ dev_err(dev, "Switch reported that configuration is "
+ "invalid, retrying...\n");
+ continue;
+ }
+ /* Success! */
+ break;
+ } while (--retries);
+
+ if (!retries) {
+ rc = -EIO;
+ dev_err(dev, "Failed to upload config to device, giving up\n");
+ goto out;
+ } else if (retries != RETRIES) {
+ dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
+ }
+
+ dev_info(dev, "Reset switch and programmed static config\n");
+out:
+ kfree(config_buf);
+ return rc;
+}
+
+static struct sja1105_regs sja1105et_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x11,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .rgmii_pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
+ /* UM10944.pdf, Table 78, CGU Register overview */
+ .mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
+ .mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
+ .mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+ .mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
+ .rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
+ .rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
+ .rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+};
+
+static struct sja1105_regs sja1105pqrs_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x12,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .rgmii_pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
+ /* UM11040.pdf, Table 114 */
+ .mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
+ .mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
+ .mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
+ .rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
+ .rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
+ .rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
+};
+
+struct sja1105_info sja1105e_info = {
+ .device_id = SJA1105E_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105e_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .reset_cmd = sja1105et_reset_cmd,
+ .regs = &sja1105et_regs,
+ .name = "SJA1105E",
+};
+struct sja1105_info sja1105t_info = {
+ .device_id = SJA1105T_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105t_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .reset_cmd = sja1105et_reset_cmd,
+ .regs = &sja1105et_regs,
+ .name = "SJA1105T",
+};
+struct sja1105_info sja1105p_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105P_PART_NO,
+ .static_ops = sja1105p_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .regs = &sja1105pqrs_regs,
+ .name = "SJA1105P",
+};
+struct sja1105_info sja1105q_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105Q_PART_NO,
+ .static_ops = sja1105q_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .regs = &sja1105pqrs_regs,
+ .name = "SJA1105Q",
+};
+struct sja1105_info sja1105r_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105R_PART_NO,
+ .static_ops = sja1105r_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .regs = &sja1105pqrs_regs,
+ .name = "SJA1105R",
+};
+struct sja1105_info sja1105s_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105S_PART_NO,
+ .static_ops = sja1105s_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .regs = &sja1105pqrs_regs,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .name = "SJA1105S",
+};
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
new file mode 100644
index 000000000000..b3c992b0abb0
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -0,0 +1,987 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105_static_config.h"
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+/* Convenience wrappers over the generic packing functions. These take into
+ * account the SJA1105 memory layout quirks and provide some level of
+ * programmer protection against incorrect API use. The errors are not expected
+ * to occur durring runtime, therefore printing and swallowing them here is
+ * appropriate instead of clutterring up higher-level code.
+ */
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
+{
+ int rc = packing(buf, (u64 *)val, start, end, len,
+ PACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len)
+{
+ int rc = packing((void *)buf, val, start, end, len,
+ UNPACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL)
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ else if (rc == -ERANGE)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ dump_stack();
+}
+
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op)
+{
+ int rc = packing(buf, val, start, end, len, op, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+/* Little-endian Ethernet CRC32 of data packed as big-endian u32 words */
+u32 sja1105_crc32(const void *buf, size_t len)
+{
+ unsigned int i;
+ u64 word;
+ u32 crc;
+
+ /* seed */
+ crc = ~0;
+ for (i = 0; i < len; i += 4) {
+ sja1105_unpack((void *)buf + i, &word, 31, 0, 4);
+ crc = crc32_le(crc, (u8 *)&word, 4);
+ }
+ return ~crc;
+}
+
+static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 319, 319, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 318, 318, size, op);
+ sja1105_packing(buf, &entry->switchid, 317, 315, size, op);
+ sja1105_packing(buf, &entry->hostprio, 314, 312, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 311, 264, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 263, 216, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 215, 168, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 167, 120, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 119, 119, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 118, 118, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 117, 117, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 116, 116, size, op);
+ sja1105_packing(buf, &entry->casc_port, 115, 113, size, op);
+ sja1105_packing(buf, &entry->host_port, 112, 110, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 109, 107, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 106, 75, size, op);
+ sja1105_packing(buf, &entry->vlmask, 74, 43, size, op);
+ sja1105_packing(buf, &entry->tpid, 42, 27, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 26, 26, size, op);
+ sja1105_packing(buf, &entry->tpid2, 25, 10, size, op);
+ return size;
+}
+
+static size_t
+sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 351, 351, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 350, 350, size, op);
+ sja1105_packing(buf, &entry->switchid, 349, 347, size, op);
+ sja1105_packing(buf, &entry->hostprio, 346, 344, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 343, 296, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 295, 248, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 247, 200, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 199, 152, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 151, 151, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 150, 150, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 149, 149, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 148, 148, size, op);
+ sja1105_packing(buf, &entry->casc_port, 147, 145, size, op);
+ sja1105_packing(buf, &entry->host_port, 144, 142, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
+ sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
+ sja1105_packing(buf, &entry->tpid, 74, 59, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
+ sja1105_packing(buf, &entry->tpid2, 57, 42, size, op);
+ sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
+ sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
+ sja1105_packing(buf, &entry->egrmirrdei, 25, 25, size, op);
+ sja1105_packing(buf, &entry->replay_port, 24, 22, size, op);
+ return size;
+}
+
+static size_t
+sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
+ struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
+ for (i = 0, offset = 13; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->part_spc[i],
+ offset + 9, offset + 0, size, op);
+ return size;
+}
+
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ struct sja1105_l2_forwarding_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->bc_domain, 63, 59, size, op);
+ sja1105_packing(buf, &entry->reach_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->fl_domain, 53, 49, size, op);
+ for (i = 0, offset = 25; i < 8; i++, offset += 3)
+ sja1105_packing(buf, &entry->vlan_pmap[i],
+ offset + 2, offset + 0, size, op);
+ return size;
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->maxage, 31, 17, size, op);
+ sja1105_packing(buf, &entry->dyn_tbsz, 16, 14, size, op);
+ sja1105_packing(buf, &entry->poly, 13, 6, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 5, 5, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 4, 4, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 3, 3, size, op);
+ return size;
+}
+
+static size_t
+sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->maxage, 57, 43, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 27, 27, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 26, 26, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 25, 25, size, op);
+ return size;
+}
+
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vlanid, 95, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ sja1105_packing(buf, &entry->index, 29, 20, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ /* These are static L2 lookup entries, so the structure
+ * should match UM11040 Table 16/17 definitions when
+ * LOCKEDS is 1.
+ */
+ sja1105_packing(buf, &entry->vlanid, 81, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ sja1105_packing(buf, &entry->index, 15, 6, size, op);
+ return size;
+}
+
+static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
+ struct sja1105_l2_policing_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->sharindx, 63, 58, size, op);
+ sja1105_packing(buf, &entry->smax, 57, 42, size, op);
+ sja1105_packing(buf, &entry->rate, 41, 26, size, op);
+ sja1105_packing(buf, &entry->maxlen, 25, 15, size, op);
+ sja1105_packing(buf, &entry->partition, 14, 12, size, op);
+ return size;
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 72; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 71, 67, size, op);
+ sja1105_packing(buf, &entry->speed, 66, 65, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 64, 49, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 48, 33, size, op);
+ sja1105_packing(buf, &entry->maxage, 32, 25, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 24, 22, size, op);
+ sja1105_packing(buf, &entry->vlanid, 21, 10, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 9, 9, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 8, 8, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 7, 7, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 6, 6, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 5, 5, size, op);
+ sja1105_packing(buf, &entry->retag, 4, 4, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 3, 3, size, op);
+ sja1105_packing(buf, &entry->egress, 2, 2, size, op);
+ sja1105_packing(buf, &entry->ingress, 1, 1, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 104; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 103, 99, size, op);
+ sja1105_packing(buf, &entry->speed, 98, 97, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 96, 81, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 80, 65, size, op);
+ sja1105_packing(buf, &entry->maxage, 64, 57, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 56, 54, size, op);
+ sja1105_packing(buf, &entry->vlanid, 53, 42, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 40, 40, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 39, 39, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 38, 38, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 35, 35, size, op);
+ sja1105_packing(buf, &entry->retag, 34, 34, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 33, 33, size, op);
+ sja1105_packing(buf, &entry->egress, 32, 32, size, op);
+ sja1105_packing(buf, &entry->ingress, 31, 31, size, op);
+ return size;
+}
+
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY;
+ struct sja1105_vlan_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->ving_mirr, 63, 59, size, op);
+ sja1105_packing(buf, &entry->vegr_mirr, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vmemb_port, 53, 49, size, op);
+ sja1105_packing(buf, &entry->vlan_bc, 48, 44, size, op);
+ sja1105_packing(buf, &entry->tag_port, 43, 39, size, op);
+ sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
+ return size;
+}
+
+static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_XMII_PARAMS_ENTRY;
+ struct sja1105_xmii_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 17; i < 5; i++, offset += 3) {
+ sja1105_packing(buf, &entry->xmii_mode[i],
+ offset + 1, offset + 0, size, op);
+ sja1105_packing(buf, &entry->phy_mac[i],
+ offset + 2, offset + 2, size, op);
+ }
+ return size;
+}
+
+size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_TABLE_HEADER;
+ struct sja1105_table_header *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->block_id, 31, 24, size, op);
+ sja1105_packing(buf, &entry->len, 55, 32, size, op);
+ sja1105_packing(buf, &entry->crc, 95, 64, size, op);
+ return size;
+}
+
+/* WARNING: the *hdr pointer is really non-const, because it is
+ * modifying the CRC of the header for a 2-stage packing operation
+ */
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr)
+{
+ /* First pack the table as-is, then calculate the CRC, and
+ * finally put the proper CRC into the packed buffer
+ */
+ memset(buf, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(buf, hdr, PACK);
+ hdr->crc = sja1105_crc32(buf, SJA1105_SIZE_TABLE_HEADER - 4);
+ sja1105_pack(buf + SJA1105_SIZE_TABLE_HEADER - 4, &hdr->crc, 31, 0, 4);
+}
+
+static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr)
+{
+ u64 computed_crc;
+ int len_bytes;
+
+ len_bytes = (uintptr_t)(crc_ptr - table_start);
+ computed_crc = sja1105_crc32(table_start, len_bytes);
+ sja1105_pack(crc_ptr, &computed_crc, 31, 0, 4);
+}
+
+/* The block IDs that the switches support are unfortunately sparse, so keep a
+ * mapping table to "block indices" and translate back and forth so that we
+ * don't waste useless memory in struct sja1105_static_config.
+ * Also, since the block id comes from essentially untrusted input (unpacking
+ * the static config from userspace) it has to be sanitized (range-checked)
+ * before blindly indexing kernel memory with the blk_idx.
+ */
+static u64 blk_id_map[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP,
+ [BLK_IDX_L2_POLICING] = BLKID_L2_POLICING,
+ [BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP,
+ [BLK_IDX_L2_FORWARDING] = BLKID_L2_FORWARDING,
+ [BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
+ [BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
+ [BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
+ [BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
+ [BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
+};
+
+const char *sja1105_static_config_error_msg[] = {
+ [SJA1105_CONFIG_OK] = "",
+ [SJA1105_MISSING_L2_POLICING_TABLE] =
+ "l2-policing-table needs to have at least one entry",
+ [SJA1105_MISSING_L2_FORWARDING_TABLE] =
+ "l2-forwarding-table is either missing or incomplete",
+ [SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE] =
+ "l2-forwarding-parameters-table is missing",
+ [SJA1105_MISSING_GENERAL_PARAMS_TABLE] =
+ "general-parameters-table is missing",
+ [SJA1105_MISSING_VLAN_TABLE] =
+ "vlan-lookup-table needs to have at least the default untagged VLAN",
+ [SJA1105_MISSING_XMII_TABLE] =
+ "xmii-table is missing",
+ [SJA1105_MISSING_MAC_TABLE] =
+ "mac-configuration-table needs to contain an entry for each port",
+ [SJA1105_OVERCOMMITTED_FRAME_MEMORY] =
+ "Not allowed to overcommit frame memory. L2 memory partitions "
+ "and VL memory partitions share the same space. The sum of all "
+ "16 memory partitions is not allowed to be larger than 929 "
+ "128-byte blocks (or 910 with retagging). Please adjust "
+ "l2-forwarding-parameters-table.part_spc and/or "
+ "vl-forwarding-parameters-table.partspc.",
+};
+
+sja1105_config_valid_t
+static_config_check_memory_size(const struct sja1105_table *tables)
+{
+ const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ int i, mem = 0;
+
+ l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
+
+ for (i = 0; i < 8; i++)
+ mem += l2_fwd_params->part_spc[i];
+
+ if (mem > SJA1105_MAX_FRAME_MEMORY)
+ return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
+
+ return SJA1105_CONFIG_OK;
+}
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config)
+{
+ const struct sja1105_table *tables = config->tables;
+#define IS_FULL(blk_idx) \
+ (tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count)
+
+ if (tables[BLK_IDX_L2_POLICING].entry_count == 0)
+ return SJA1105_MISSING_L2_POLICING_TABLE;
+
+ if (tables[BLK_IDX_VLAN_LOOKUP].entry_count == 0)
+ return SJA1105_MISSING_VLAN_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING))
+ return SJA1105_MISSING_L2_FORWARDING_TABLE;
+
+ if (!IS_FULL(BLK_IDX_MAC_CONFIG))
+ return SJA1105_MISSING_MAC_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING_PARAMS))
+ return SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_GENERAL_PARAMS))
+ return SJA1105_MISSING_GENERAL_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_XMII_PARAMS))
+ return SJA1105_MISSING_XMII_TABLE;
+
+ return static_config_check_memory_size(tables);
+#undef IS_FULL
+}
+
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config)
+{
+ struct sja1105_table_header header = {0};
+ enum sja1105_blk_idx i;
+ char *p = buf;
+ int j;
+
+ sja1105_pack(p, &config->device_id, 31, 0, 4);
+ p += SJA1105_SIZE_DEVICE_ID;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+ char *table_start;
+
+ table = &config->tables[i];
+ if (!table->entry_count)
+ continue;
+
+ header.block_id = blk_id_map[i];
+ header.len = table->entry_count *
+ table->ops->packed_entry_size / 4;
+ sja1105_table_header_pack_with_crc(p, &header);
+ p += SJA1105_SIZE_TABLE_HEADER;
+ table_start = p;
+ for (j = 0; j < table->entry_count; j++) {
+ u8 *entry_ptr = table->entries;
+
+ entry_ptr += j * table->ops->unpacked_entry_size;
+ memset(p, 0, table->ops->packed_entry_size);
+ table->ops->packing(p, entry_ptr, PACK);
+ p += table->ops->packed_entry_size;
+ }
+ sja1105_table_write_crc(table_start, p);
+ p += 4;
+ }
+ /* Final header:
+ * Block ID does not matter
+ * Length of 0 marks that header is final
+ * CRC will be replaced on-the-fly on "config upload"
+ */
+ header.block_id = 0;
+ header.len = 0;
+ header.crc = 0xDEADBEEF;
+ memset(p, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(p, &header, PACK);
+}
+
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config)
+{
+ unsigned int sum;
+ unsigned int header_count;
+ enum sja1105_blk_idx i;
+
+ /* Ending header */
+ header_count = 1;
+ sum = SJA1105_SIZE_DEVICE_ID;
+
+ /* Tables (headers and entries) */
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+
+ table = &config->tables[i];
+ if (table->entry_count)
+ header_count++;
+
+ sum += table->ops->packed_entry_size * table->entry_count;
+ }
+ /* Headers have an additional CRC at the end */
+ sum += header_count * (SJA1105_SIZE_TABLE_HEADER + 4);
+ /* Last header does not have an extra CRC because there is no data */
+ sum -= 4;
+
+ return sum;
+}
+
+/* Compatibility matrices */
+
+/* SJA1105E: First generation, no TTEthernet */
+struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105T: First generation, TTEthernet */
+struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105P: Second generation, no TTEthernet, no SGMII */
+struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105Q: Second generation, TTEthernet, no SGMII */
+struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105R: Second generation, no TTEthernet, SGMII */
+struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105S: Second generation, TTEthernet, SGMII */
+struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id)
+{
+ enum sja1105_blk_idx i;
+
+ *config = (struct sja1105_static_config) {0};
+
+ /* Transfer static_ops array from priv into per-table ops
+ * for handier access
+ */
+ for (i = 0; i < BLK_IDX_MAX; i++)
+ config->tables[i].ops = &static_ops[i];
+
+ config->device_id = device_id;
+ return 0;
+}
+
+void sja1105_static_config_free(struct sja1105_static_config *config)
+{
+ enum sja1105_blk_idx i;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ if (config->tables[i].entry_count) {
+ kfree(config->tables[i].entries);
+ config->tables[i].entry_count = 0;
+ }
+ }
+}
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ u8 *entries = table->entries;
+
+ if (i > table->entry_count)
+ return -ERANGE;
+
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i) * entry_size);
+
+ table->entry_count--;
+
+ return 0;
+}
+
+/* No pointers to table->entries should be kept when this is called. */
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ void *new_entries, *old_entries = table->entries;
+
+ if (new_count > table->ops->max_entry_count)
+ return -ERANGE;
+
+ new_entries = kcalloc(new_count, entry_size, GFP_KERNEL);
+ if (!new_entries)
+ return -ENOMEM;
+
+ memcpy(new_entries, old_entries, min(new_count, table->entry_count) *
+ entry_size);
+
+ table->entries = new_entries;
+ table->entry_count = new_count;
+ kfree(old_entries);
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
new file mode 100644
index 000000000000..069ca8fd059c
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_STATIC_CONFIG_H
+#define _SJA1105_STATIC_CONFIG_H
+
+#include <linux/packing.h>
+#include <linux/types.h>
+#include <asm/types.h>
+
+#define SJA1105_SIZE_DEVICE_ID 4
+#define SJA1105_SIZE_TABLE_HEADER 12
+#define SJA1105_SIZE_L2_POLICING_ENTRY 8
+#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
+#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
+#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
+#define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12
+#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
+#define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40
+#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
+#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
+#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
+#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
+
+/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
+enum {
+ BLKID_L2_LOOKUP = 0x05,
+ BLKID_L2_POLICING = 0x06,
+ BLKID_VLAN_LOOKUP = 0x07,
+ BLKID_L2_FORWARDING = 0x08,
+ BLKID_MAC_CONFIG = 0x09,
+ BLKID_L2_LOOKUP_PARAMS = 0x0D,
+ BLKID_L2_FORWARDING_PARAMS = 0x0E,
+ BLKID_GENERAL_PARAMS = 0x11,
+ BLKID_XMII_PARAMS = 0x4E,
+};
+
+enum sja1105_blk_idx {
+ BLK_IDX_L2_LOOKUP = 0,
+ BLK_IDX_L2_POLICING,
+ BLK_IDX_VLAN_LOOKUP,
+ BLK_IDX_L2_FORWARDING,
+ BLK_IDX_MAC_CONFIG,
+ BLK_IDX_L2_LOOKUP_PARAMS,
+ BLK_IDX_L2_FORWARDING_PARAMS,
+ BLK_IDX_GENERAL_PARAMS,
+ BLK_IDX_XMII_PARAMS,
+ BLK_IDX_MAX,
+ /* Fake block indices that are only valid for dynamic access */
+ BLK_IDX_MGMT_ROUTE,
+ BLK_IDX_MAX_DYN,
+ BLK_IDX_INVAL = -1,
+};
+
+#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
+#define SJA1105_MAX_L2_POLICING_COUNT 45
+#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
+#define SJA1105_MAX_L2_FORWARDING_COUNT 13
+#define SJA1105_MAX_MAC_CONFIG_COUNT 5
+#define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1
+#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
+#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
+#define SJA1105_MAX_XMII_PARAMS_COUNT 1
+
+#define SJA1105_MAX_FRAME_MEMORY 929
+
+#define SJA1105E_DEVICE_ID 0x9C00000Cull
+#define SJA1105T_DEVICE_ID 0x9E00030Eull
+#define SJA1105PR_DEVICE_ID 0xAF00030Eull
+#define SJA1105QS_DEVICE_ID 0xAE00030Eull
+
+#define SJA1105ET_PART_NO 0x9A83
+#define SJA1105P_PART_NO 0x9A84
+#define SJA1105Q_PART_NO 0x9A85
+#define SJA1105R_PART_NO 0x9A86
+#define SJA1105S_PART_NO 0x9A87
+
+struct sja1105_general_params_entry {
+ u64 vllupformat;
+ u64 mirr_ptacu;
+ u64 switchid;
+ u64 hostprio;
+ u64 mac_fltres1;
+ u64 mac_fltres0;
+ u64 mac_flt1;
+ u64 mac_flt0;
+ u64 incl_srcpt1;
+ u64 incl_srcpt0;
+ u64 send_meta1;
+ u64 send_meta0;
+ u64 casc_port;
+ u64 host_port;
+ u64 mirr_port;
+ u64 vlmarker;
+ u64 vlmask;
+ u64 tpid;
+ u64 ignore2stf;
+ u64 tpid2;
+ /* P/Q/R/S only */
+ u64 queue_ts;
+ u64 egrmirrvid;
+ u64 egrmirrpcp;
+ u64 egrmirrdei;
+ u64 replay_port;
+};
+
+struct sja1105_vlan_lookup_entry {
+ u64 ving_mirr;
+ u64 vegr_mirr;
+ u64 vmemb_port;
+ u64 vlan_bc;
+ u64 tag_port;
+ u64 vlanid;
+};
+
+struct sja1105_l2_lookup_entry {
+ u64 vlanid;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+};
+
+struct sja1105_l2_lookup_params_entry {
+ u64 maxage; /* Shared */
+ u64 dyn_tbsz; /* E/T only */
+ u64 poly; /* E/T only */
+ u64 shared_learn; /* Shared */
+ u64 no_enf_hostprt; /* Shared */
+ u64 no_mgmt_learn; /* Shared */
+};
+
+struct sja1105_l2_forwarding_entry {
+ u64 bc_domain;
+ u64 reach_port;
+ u64 fl_domain;
+ u64 vlan_pmap[8];
+};
+
+struct sja1105_l2_forwarding_params_entry {
+ u64 max_dynp;
+ u64 part_spc[8];
+};
+
+struct sja1105_l2_policing_entry {
+ u64 sharindx;
+ u64 smax;
+ u64 rate;
+ u64 maxlen;
+ u64 partition;
+};
+
+struct sja1105_mac_config_entry {
+ u64 top[8];
+ u64 base[8];
+ u64 enabled[8];
+ u64 ifg;
+ u64 speed;
+ u64 tp_delin;
+ u64 tp_delout;
+ u64 maxage;
+ u64 vlanprio;
+ u64 vlanid;
+ u64 ing_mirr;
+ u64 egr_mirr;
+ u64 drpnona664;
+ u64 drpdtag;
+ u64 drpuntag;
+ u64 retag;
+ u64 dyn_learn;
+ u64 egress;
+ u64 ingress;
+};
+
+struct sja1105_xmii_params_entry {
+ u64 phy_mac[5];
+ u64 xmii_mode[5];
+};
+
+struct sja1105_table_header {
+ u64 block_id;
+ u64 len;
+ u64 crc;
+};
+
+struct sja1105_table_ops {
+ size_t (*packing)(void *buf, void *entry_ptr, enum packing_op op);
+ size_t unpacked_entry_size;
+ size_t packed_entry_size;
+ size_t max_entry_count;
+};
+
+struct sja1105_table {
+ const struct sja1105_table_ops *ops;
+ size_t entry_count;
+ void *entries;
+};
+
+struct sja1105_static_config {
+ u64 device_id;
+ struct sja1105_table tables[BLK_IDX_MAX];
+};
+
+extern struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX];
+
+size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op);
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr);
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config);
+
+typedef enum {
+ SJA1105_CONFIG_OK = 0,
+ SJA1105_MISSING_L2_POLICING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE,
+ SJA1105_MISSING_GENERAL_PARAMS_TABLE,
+ SJA1105_MISSING_VLAN_TABLE,
+ SJA1105_MISSING_XMII_TABLE,
+ SJA1105_MISSING_MAC_TABLE,
+ SJA1105_OVERCOMMITTED_FRAME_MEMORY,
+} sja1105_config_valid_t;
+
+extern const char *sja1105_static_config_error_msg[];
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config);
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config);
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id);
+void sja1105_static_config_free(struct sja1105_static_config *config);
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i);
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count);
+
+u32 sja1105_crc32(const void *buf, size_t len);
+
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len);
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len);
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op);
+
+#endif
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 0d15a12a4560..3568129fb7da 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
@@ -131,21 +132,9 @@ static void dummy_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int dummy_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *ts_info)
-{
- ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-
- ts_info->phc_index = -1;
-
- return 0;
-};
-
static const struct ethtool_ops dummy_ethtool_ops = {
.get_drvinfo = dummy_get_drvinfo,
- .get_ts_info = dummy_get_ts_info,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static void dummy_setup(struct net_device *dev)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 47e5984f16fb..90080a886cd9 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -613,7 +613,6 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id)
napi_schedule(&greth->napi);
}
- mmiowb();
spin_unlock(&greth->devlock);
return retval;
@@ -1459,7 +1458,7 @@ static int greth_of_probe(struct platform_device *ofdev)
const u8 *addr;
addr = of_get_mac_address(ofdev->dev.of_node);
- if (addr) {
+ if (!IS_ERR(addr)) {
for (i = 0; i < 6; i++)
macaddr[i] = (unsigned int) addr[i];
} else {
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 16477aa6d61f..4f7e792e50e9 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -345,8 +345,6 @@ static void slic_set_rx_mode(struct net_device *dev)
if (sdev->promisc != set_promisc) {
sdev->promisc = set_promisc;
slic_configure_rcv(sdev);
- /* make sure writes to receiver cant leak out of the lock */
- mmiowb();
}
spin_unlock_bh(&sdev->link_lock);
}
@@ -1461,8 +1459,6 @@ static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev)
if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)
netif_stop_queue(dev);
- /* make sure writes to io-memory cant leak out of tx queue lock */
- mmiowb();
return NETDEV_TX_OK;
drop_skb:
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index e1acafa82214..9e06dff619c3 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -870,8 +870,8 @@ static int emac_probe(struct platform_device *pdev)
/* Read MAC-address from DT */
mac_addr = of_get_mac_address(np);
- if (mac_addr)
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(ndev->dev_addr, mac_addr);
/* Check if the MAC address is valid, if not get a random one */
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index aa1d1f5339d2..877e67f4344b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1537,7 +1537,7 @@ static int altera_tse_probe(struct platform_device *pdev)
/* get default MAC address from device tree */
macaddr = of_get_mac_address(pdev->dev.of_node);
- if (macaddr)
+ if (!IS_ERR(macaddr))
ether_addr_copy(ndev->dev_addr, macaddr);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index b17d435de09f..7f8266b191ae 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -731,7 +731,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
if (rc)
pr_err("Cannot set LLQ configuration: %d\n", rc);
- return 0;
+ return rc;
}
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
@@ -2016,7 +2016,6 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
mb();
writel_relaxed((u32)aenq->head,
dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
- mmiowb();
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2195,7 +2194,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (unlikely(ret))
return ret;
- if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+ if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
pr_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return -EOPNOTSUPP;
@@ -2280,6 +2279,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EINVAL;
}
+ rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
/* Restore the old function */
@@ -2802,7 +2802,11 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
/* if moderation is supported by device we set adaptive moderation */
delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
- ena_com_enable_adaptive_moderation(ena_dev);
+
+ /* Disable adaptive moderation by default - can be enabled from
+ * ethtool
+ */
+ ena_com_disable_adaptive_moderation(ena_dev);
return 0;
err:
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index f3a5a384e6e8..fe596bc30a96 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -697,8 +697,8 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
if (indir) {
for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
rc = ena_com_indirect_table_fill_entry(ena_dev,
- ENA_IO_RXQ_IDX(indir[i]),
- i);
+ i,
+ ENA_IO_RXQ_IDX(indir[i]));
if (unlikely(rc)) {
netif_err(adapter, drv, netdev,
"Cannot fill indirect table (index is too large)\n");
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a6eacf2099c3..9c83642922c7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -224,28 +224,23 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
if (!tx_ring->tx_buffer_info) {
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
- return -ENOMEM;
+ goto err_tx_buffer_info;
}
size = sizeof(u16) * tx_ring->ring_size;
tx_ring->free_tx_ids = vzalloc_node(size, node);
if (!tx_ring->free_tx_ids) {
tx_ring->free_tx_ids = vzalloc(size);
- if (!tx_ring->free_tx_ids) {
- vfree(tx_ring->tx_buffer_info);
- return -ENOMEM;
- }
+ if (!tx_ring->free_tx_ids)
+ goto err_free_tx_ids;
}
size = tx_ring->tx_max_header_size;
tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
if (!tx_ring->push_buf_intermediate_buf) {
tx_ring->push_buf_intermediate_buf = vzalloc(size);
- if (!tx_ring->push_buf_intermediate_buf) {
- vfree(tx_ring->tx_buffer_info);
- vfree(tx_ring->free_tx_ids);
- return -ENOMEM;
- }
+ if (!tx_ring->push_buf_intermediate_buf)
+ goto err_push_buf_intermediate_buf;
}
/* Req id ring for TX out of order completions */
@@ -259,6 +254,15 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->next_to_clean = 0;
tx_ring->cpu = ena_irq->cpu;
return 0;
+
+err_push_buf_intermediate_buf:
+ vfree(tx_ring->free_tx_ids);
+ tx_ring->free_tx_ids = NULL;
+err_free_tx_ids:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+err_tx_buffer_info:
+ return -ENOMEM;
}
/* ena_free_tx_resources - Free I/O Tx Resources per Queue
@@ -378,6 +382,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
rx_ring->free_rx_ids = vzalloc(size);
if (!rx_ring->free_rx_ids) {
vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
return -ENOMEM;
}
}
@@ -1820,6 +1825,7 @@ err_setup_rx:
err_setup_tx:
ena_free_io_irq(adapter);
err_req_irq:
+ ena_del_napi(adapter);
return rc;
}
@@ -2236,7 +2242,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (netif_xmit_stopped(txq) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
/* trigger the dma engine. ena_com_write_sq_doorbell()
* has a mb
*/
@@ -2258,8 +2264,7 @@ error_drop_packet:
}
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
u16 qid;
/* we suspect that this is good for in--kernel network services that
@@ -2269,7 +2274,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
- qid = fallback(dev, skb, NULL);
+ qid = netdev_pick_tx(dev, skb, NULL);
return qid;
}
@@ -2292,7 +2297,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strncpy(host_info->kernel_ver_str, utsname()->version,
+ strlcpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
strncpy(host_info->os_dist_str, utsname()->release,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 4666084eda16..d5fd49dd25f3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1887,7 +1887,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
smp_wmb();
ring->cur = cur_index + 1;
- if (!packet->skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
xgbe_tx_start_xmit(channel, ring);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 0cc911f928b1..3dd0cecddba8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1612,7 +1612,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
/* PTP v2, UDP, any kind of event packet */
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, any kind of event packet */
+ /* Fall through - to PTP v1, UDP, any kind of event packet */
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
@@ -1623,7 +1623,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
/* PTP v2, UDP, Sync packet */
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, Sync packet */
+ /* Fall through - to PTP v1, UDP, Sync packet */
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
@@ -1634,7 +1634,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
/* PTP v2, UDP, Delay_req packet */
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, Delay_req packet */
+ /* Fall through - to PTP v1, UDP, Delay_req packet */
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index 7d623e90dc19..12472c5bb34d 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -17,7 +17,8 @@ if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
- depends on PCI && X86_64
+ depends on PCI
+ depends on X86_64 || ARM64 || COMPILE_TEST
---help---
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 4556630ee286..1f99cf832476 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \
aq_ring.o \
aq_hw_utils.o \
aq_ethtool.o \
+ aq_drvinfo.o \
aq_filters.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 3944ce7f0870..8f35c3f883f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -16,7 +16,7 @@
#define AQ_CFG_TCS_DEF 1U
#define AQ_CFG_TXDS_DEF 4096U
-#define AQ_CFG_RXDS_DEF 1024U
+#define AQ_CFG_RXDS_DEF 2048U
#define AQ_CFG_IS_POLLING_DEF 0U
@@ -34,10 +34,16 @@
#define AQ_CFG_TCS_MAX 8U
#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
-#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
+#define AQ_CFG_RX_FRAME_MAX (2U * 1024U)
#define AQ_CFG_TX_CLEAN_BUDGET 256U
+#define AQ_CFG_RX_REFILL_THRES 32U
+
+#define AQ_CFG_RX_HDR_SIZE 256U
+
+#define AQ_CFG_RX_PAGEORDER 0U
+
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 6b6d1724676e..235bb3a72d66 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -41,9 +41,6 @@
#define AQ_DEVICE_ID_AQC111S 0x91B1
#define AQ_DEVICE_ID_AQC112S 0x92B1
-#define AQ_DEVICE_ID_AQC111E 0x51B1
-#define AQ_DEVICE_ID_AQC112E 0x52B1
-
#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
#define AQ_HWREV_ANY 0
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
new file mode 100644
index 000000000000..adad6a7acabe
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2014-2019 aQuantia Corporation. */
+
+/* File aq_drvinfo.c: Definition of common code for firmware info in sys.*/
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/hwmon.h>
+#include <linux/uaccess.h>
+
+#include "aq_drvinfo.h"
+
+#if IS_REACHABLE(CONFIG_HWMON)
+static int aq_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct aq_nic_s *aq_nic = dev_get_drvdata(dev);
+ int temp;
+ int err;
+
+ if (!aq_nic)
+ return -EIO;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ if (!aq_nic->aq_fw_ops->get_phy_temp)
+ return -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = aq_nic->aq_fw_ops->get_phy_temp(aq_nic->aq_hw, &temp);
+ *value = temp;
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aq_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct aq_nic_s *aq_nic = dev_get_drvdata(dev);
+
+ if (!aq_nic)
+ return -EIO;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ if (!aq_nic->aq_fw_ops->get_phy_temp)
+ return -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = "PHY Temperature";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t aq_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_ops aq_hwmon_ops = {
+ .is_visible = aq_hwmon_is_visible,
+ .read = aq_hwmon_read,
+ .read_string = aq_hwmon_read_string,
+};
+
+static u32 aq_hwmon_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ 0,
+};
+
+static const struct hwmon_channel_info aq_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = aq_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *aq_hwmon_info[] = {
+ &aq_hwmon_temp,
+ NULL,
+};
+
+static const struct hwmon_chip_info aq_hwmon_chip_info = {
+ .ops = &aq_hwmon_ops,
+ .info = aq_hwmon_info,
+};
+
+int aq_drvinfo_init(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct device *dev = &aq_nic->pdev->dev;
+ struct device *hwmon_dev;
+ int err = 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ ndev->name,
+ aq_nic,
+ &aq_hwmon_chip_info,
+ NULL);
+
+ if (IS_ERR(hwmon_dev))
+ err = PTR_ERR(hwmon_dev);
+
+ return err;
+}
+
+#else
+int aq_drvinfo_init(struct net_device *ndev) { return 0; }
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
new file mode 100644
index 000000000000..41fbb1358068
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_drvinfo.h: Declaration of common code for firmware info in sys.*/
+
+#ifndef AQ_DRVINFO_H
+#define AQ_DRVINFO_H
+
+#include "aq_nic.h"
+#include "aq_hw.h"
+#include "hw_atl/hw_atl_utils.h"
+
+int aq_drvinfo_init(struct net_device *ndev);
+
+#endif /* AQ_DRVINFO_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a718d7a1f76c..79da48094770 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -405,8 +405,10 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
if (!aq_nic->aq_fw_ops->get_eee_rate)
return -EOPNOTSUPP;
+ mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
&supported_rates);
+ mutex_unlock(&aq_nic->fwreq_mutex);
if (err < 0)
return err;
@@ -439,8 +441,10 @@ static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
!aq_nic->aq_fw_ops->set_eee_rate))
return -EOPNOTSUPP;
+ mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
&supported_rates);
+ mutex_unlock(&aq_nic->fwreq_mutex);
if (err < 0)
return err;
@@ -452,20 +456,28 @@ static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
cfg->eee_speeds = 0;
}
- return aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate);
+ mutex_lock(&aq_nic->fwreq_mutex);
+ err = aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return err;
}
static int aq_ethtool_nway_reset(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
if (unlikely(!aq_nic->aq_fw_ops->renegotiate))
return -EOPNOTSUPP;
- if (netif_running(ndev))
- return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+ if (netif_running(ndev)) {
+ mutex_lock(&aq_nic->fwreq_mutex);
+ err = aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+ }
- return 0;
+ return err;
}
static void aq_ethtool_get_pauseparam(struct net_device *ndev,
@@ -503,7 +515,9 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
else
aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+ mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
+ mutex_unlock(&aq_nic->fwreq_mutex);
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 81aab73dc22f..95fd6c852a9d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -88,6 +88,8 @@ struct aq_stats_s {
#define AQ_HW_IRQ_MSI 2U
#define AQ_HW_IRQ_MSIX 3U
+#define AQ_HW_SERVICE_IRQS 1U
+
#define AQ_HW_POWER_STATE_D0 0U
#define AQ_HW_POWER_STATE_D3 3U
@@ -259,6 +261,8 @@ struct aq_fw_ops {
int (*update_stats)(struct aq_hw_s *self);
+ int (*get_phy_temp)(struct aq_hw_s *self, int *temp);
+
u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
int (*set_flow_control)(struct aq_hw_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index d526c4f19d34..22a1c784dc9c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -53,6 +53,18 @@ void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
writel(value, hw->mmio + reg);
}
+/* Most of 64-bit registers are in LSW, MSW form.
+ Counters are normally implemented by HW as latched pairs:
+ reading LSW first locks MSW, to overcome LSW overflow
+ */
+u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
+{
+ u64 value = aq_hw_read_reg(hw, reg);
+
+ value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32;
+ return value;
+}
+
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index bc711238ca0c..bf73428ed689 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,7 @@ void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
+u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
#endif /* AQ_HW_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 2a11c1eefd8f..1ea8b77fc1a7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -23,8 +23,17 @@ MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
+
static const struct net_device_ops aq_ndev_ops;
+static struct workqueue_struct *aq_ndev_wq;
+
+void aq_ndev_schedule_work(struct work_struct *work)
+{
+ queue_work(aq_ndev_wq, work);
+}
+
struct net_device *aq_ndev_alloc(void)
{
struct net_device *ndev = NULL;
@@ -209,3 +218,35 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
+
+static int __init aq_ndev_init_module(void)
+{
+ int ret;
+
+ aq_ndev_wq = create_singlethread_workqueue(aq_ndev_driver_name);
+ if (!aq_ndev_wq) {
+ pr_err("Failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ ret = aq_pci_func_register_driver();
+ if (ret) {
+ destroy_workqueue(aq_ndev_wq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit aq_ndev_exit_module(void)
+{
+ aq_pci_func_unregister_driver();
+
+ if (aq_ndev_wq) {
+ destroy_workqueue(aq_ndev_wq);
+ aq_ndev_wq = NULL;
+ }
+}
+
+module_init(aq_ndev_init_module);
+module_exit(aq_ndev_exit_module);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index ce92152eb43e..5448b82fb7ea 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -13,7 +13,9 @@
#define AQ_MAIN_H
#include "aq_common.h"
+#include "aq_nic.h"
+void aq_ndev_schedule_work(struct work_struct *work);
struct net_device *aq_ndev_alloc(void);
#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ff83667410bd..e82d25a91bc1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -14,6 +14,7 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
+#include "aq_main.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -73,6 +74,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->tx_itr = aq_itr_tx;
cfg->rx_itr = aq_itr_rx;
+ cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
@@ -91,7 +93,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
/*rss rings */
cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
cfg->vecs = min(cfg->vecs, num_online_cpus());
- cfg->vecs = min(cfg->vecs, self->irqvecs);
+ if (self->irqvecs > AQ_HW_SERVICE_IRQS)
+ cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
/* cfg->vecs should be power of 2 for RSS */
if (cfg->vecs >= 8U)
cfg->vecs = 8U;
@@ -115,6 +118,15 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->vecs = 1U;
}
+ /* Check if we have enough vectors allocated for
+ * link status IRQ. If no - we'll know link state from
+ * slower service task.
+ */
+ if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
+ cfg->link_irq_vec = cfg->vecs;
+ else
+ cfg->link_irq_vec = 0;
+
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
cfg->features = cfg->aq_hw_caps->hw_features;
}
@@ -160,30 +172,48 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
return 0;
}
-static void aq_nic_service_timer_cb(struct timer_list *t)
+static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
{
- struct aq_nic_s *self = from_timer(self, t, service_timer);
- int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
- int err = 0;
+ struct aq_nic_s *self = private;
+
+ if (!self)
+ return IRQ_NONE;
+
+ aq_nic_update_link_status(self);
+
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw,
+ BIT(self->aq_nic_cfg.link_irq_vec));
+ return IRQ_HANDLED;
+}
+
+static void aq_nic_service_task(struct work_struct *work)
+{
+ struct aq_nic_s *self = container_of(work, struct aq_nic_s,
+ service_task);
+ int err;
if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
- goto err_exit;
+ return;
err = aq_nic_update_link_status(self);
if (err)
- goto err_exit;
+ return;
+ mutex_lock(&self->fwreq_mutex);
if (self->aq_fw_ops->update_stats)
self->aq_fw_ops->update_stats(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
aq_nic_update_ndev_stats(self);
+}
+
+static void aq_nic_service_timer_cb(struct timer_list *t)
+{
+ struct aq_nic_s *self = from_timer(self, t, service_timer);
- /* If no link - use faster timer rate to detect link up asap */
- if (!netif_carrier_ok(self->ndev))
- ctimer = max(ctimer / 2, 1);
+ mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
-err_exit:
- mod_timer(&self->service_timer, jiffies + ctimer);
+ aq_ndev_schedule_work(&self->service_task);
}
static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -213,8 +243,10 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
if (err)
goto err_exit;
+ mutex_lock(&self->fwreq_mutex);
err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
self->ndev->dev_addr);
+ mutex_unlock(&self->fwreq_mutex);
if (err)
goto err_exit;
@@ -283,7 +315,9 @@ int aq_nic_init(struct aq_nic_s *self)
unsigned int i = 0U;
self->power_state = AQ_HW_POWER_STATE_D0;
+ mutex_lock(&self->fwreq_mutex);
err = self->aq_hw_ops->hw_reset(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
if (err < 0)
goto err_exit;
@@ -333,9 +367,11 @@ int aq_nic_start(struct aq_nic_s *self)
err = aq_nic_update_interrupt_moderation_settings(self);
if (err)
goto err_exit;
+
+ INIT_WORK(&self->service_task, aq_nic_service_task);
+
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
- mod_timer(&self->service_timer, jiffies +
- AQ_CFG_SERVICE_TIMER_INTERVAL);
+ aq_nic_service_timer_cb(&self->service_timer);
if (self->aq_nic_cfg.is_polling) {
timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
@@ -344,13 +380,25 @@ int aq_nic_start(struct aq_nic_s *self)
} else {
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
- err = aq_pci_func_alloc_irq(self, i,
- self->ndev->name, aq_vec,
+ err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
+ aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
if (err < 0)
goto err_exit;
}
+ if (self->aq_nic_cfg.link_irq_vec) {
+ int irqvec = pci_irq_vector(self->pdev,
+ self->aq_nic_cfg.link_irq_vec);
+ err = request_threaded_irq(irqvec, NULL,
+ aq_linkstate_threaded_isr,
+ IRQF_SHARED,
+ self->ndev->name, self);
+ if (err < 0)
+ goto err_exit;
+ self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
+ }
+
err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
AQ_CFG_IRQ_MASK);
if (err < 0)
@@ -652,7 +700,14 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
unsigned int i = 0U;
unsigned int count = 0U;
struct aq_vec_s *aq_vec = NULL;
- struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
+ struct aq_stats_s *stats;
+
+ if (self->aq_fw_ops->update_stats) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->update_stats(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+ stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
if (!stats)
goto err_exit;
@@ -698,11 +753,12 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
struct net_device *ndev = self->ndev;
struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
- ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
- ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
+ ndev->stats.rx_packets = stats->dma_pkt_rc;
+ ndev->stats.rx_bytes = stats->dma_oct_rc;
ndev->stats.rx_errors = stats->erpr;
- ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
- ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
+ ndev->stats.rx_dropped = stats->dpc;
+ ndev->stats.tx_packets = stats->dma_pkt_tc;
+ ndev->stats.tx_bytes = stats->dma_oct_tc;
ndev->stats.tx_errors = stats->erpt;
ndev->stats.multicast = stats->mprc;
}
@@ -839,7 +895,9 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
self->aq_nic_cfg.is_autoneg = false;
}
+ mutex_lock(&self->fwreq_mutex);
err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
+ mutex_unlock(&self->fwreq_mutex);
if (err < 0)
goto err_exit;
@@ -872,6 +930,7 @@ int aq_nic_stop(struct aq_nic_s *self)
netif_carrier_off(self->ndev);
del_timer_sync(&self->service_timer);
+ cancel_work_sync(&self->service_task);
self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
@@ -899,14 +958,22 @@ void aq_nic_deinit(struct aq_nic_s *self)
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_deinit(aq_vec);
- self->aq_fw_ops->deinit(self->aq_hw);
+ if (likely(self->aq_fw_ops->deinit)) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->deinit(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
+ }
if (self->power_state != AQ_HW_POWER_STATE_D0 ||
- self->aq_hw->aq_nic_cfg->wol) {
- self->aq_fw_ops->set_power(self->aq_hw,
- self->power_state,
- self->ndev->dev_addr);
- }
+ self->aq_hw->aq_nic_cfg->wol)
+ if (likely(self->aq_fw_ops->set_power)) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->set_power(self->aq_hw,
+ self->power_state,
+ self->ndev->dev_addr);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+
err_exit:;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 8e34c1e49bf2..c03d38ed105d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -26,11 +26,13 @@ struct aq_nic_cfg_s {
u64 features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
- u32 vecs; /* vecs==allocated irqs */
+ u32 vecs; /* allocated rx/tx vectors */
+ u32 link_irq_vec;
u32 irq_type;
u32 itr;
u16 rx_itr;
u16 tx_itr;
+ u32 rxpageorder;
u32 num_rss_queues;
u32 mtu;
u32 flow_control;
@@ -91,6 +93,7 @@ struct aq_nic_s {
const struct aq_fw_ops *aq_fw_ops;
struct aq_nic_cfg_s aq_nic_cfg;
struct timer_list service_timer;
+ struct work_struct service_task;
struct timer_list polling_timer;
struct aq_hw_link_status_s link_status;
struct {
@@ -103,6 +106,8 @@ struct aq_nic_s {
struct pci_dev *pdev;
unsigned int msix_entry_mask;
u32 irqvecs;
+ /* mutex to serialize FW interface access operations */
+ struct mutex fwreq_mutex;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 0217ff4669a4..9cb0864d6d8d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -20,6 +20,7 @@
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
#include "aq_filters.h"
+#include "aq_drvinfo.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -42,9 +43,6 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
- { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), },
- { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), },
-
{}
};
@@ -74,9 +72,6 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
{ AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
{ AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
-
- { AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, },
- { AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, },
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
@@ -139,26 +134,27 @@ err_exit:
}
int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
- char *name, void *aq_vec, cpumask_t *affinity_mask)
+ char *name, irq_handler_t irq_handler,
+ void *irq_arg, cpumask_t *affinity_mask)
{
struct pci_dev *pdev = self->pdev;
int err;
if (pdev->msix_enabled || pdev->msi_enabled)
- err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
- name, aq_vec);
+ err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
+ name, irq_arg);
else
err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
- IRQF_SHARED, name, aq_vec);
+ IRQF_SHARED, name, irq_arg);
if (err >= 0) {
self->msix_entry_mask |= (1 << i);
- self->aq_vec[i] = aq_vec;
- if (pdev->msix_enabled)
+ if (pdev->msix_enabled && affinity_mask)
irq_set_affinity_hint(pci_irq_vector(pdev, i),
affinity_mask);
}
+
return err;
}
@@ -166,16 +162,22 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
struct pci_dev *pdev = self->pdev;
unsigned int i;
+ void *irq_data;
for (i = 32U; i--;) {
if (!((1U << i) & self->msix_entry_mask))
continue;
- if (i >= AQ_CFG_VECS_MAX)
+ if (self->aq_nic_cfg.link_irq_vec &&
+ i == self->aq_nic_cfg.link_irq_vec)
+ irq_data = self;
+ else if (i < AQ_CFG_VECS_MAX)
+ irq_data = self->aq_vec[i];
+ else
continue;
if (pdev->msix_enabled)
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
- free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
+ free_irq(pci_irq_vector(pdev, i), irq_data);
self->msix_entry_mask &= ~(1U << i);
}
}
@@ -185,7 +187,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
if (self->pdev->msix_enabled)
return AQ_HW_IRQ_MSIX;
if (self->pdev->msi_enabled)
- return AQ_HW_IRQ_MSIX;
+ return AQ_HW_IRQ_MSI;
return AQ_HW_IRQ_LEGACY;
}
@@ -223,6 +225,8 @@ static int aq_pci_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(ndev, &pdev->dev);
pci_set_drvdata(pdev, self);
+ mutex_init(&self->fwreq_mutex);
+
err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
&aq_nic_get_cfg(self)->aq_hw_caps);
if (err)
@@ -268,6 +272,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs = min((u8)AQ_CFG_VECS_DEF,
aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
numvecs = min(numvecs, num_online_cpus());
+ numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
@@ -289,6 +294,8 @@ static int aq_pci_probe(struct pci_dev *pdev,
if (err < 0)
goto err_register;
+ aq_drvinfo_init(ndev);
+
return 0;
err_register:
@@ -365,4 +372,13 @@ static struct pci_driver aq_pci_ops = {
.shutdown = aq_pci_shutdown,
};
-module_pci_driver(aq_pci_ops);
+int aq_pci_func_register_driver(void)
+{
+ return pci_register_driver(&aq_pci_ops);
+}
+
+void aq_pci_func_unregister_driver(void)
+{
+ pci_unregister_driver(&aq_pci_ops);
+}
+
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
index aeee67bf69fa..670f9a940d65 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -24,9 +24,12 @@ struct aq_board_revision_s {
int aq_pci_func_init(struct pci_dev *pdev);
int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
- char *name, void *aq_vec,
- cpumask_t *affinity_mask);
+ char *name, irq_handler_t irq_handler,
+ void *irq_arg, cpumask_t *affinity_mask);
void aq_pci_func_free_irqs(struct aq_nic_s *self);
unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self);
+int aq_pci_func_register_driver(void);
+void aq_pci_func_unregister_driver(void);
+
#endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index e2ffb159cbe2..350e385528fd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -12,10 +12,89 @@
#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
+#include "aq_hw_utils.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
+{
+ unsigned int len = PAGE_SIZE << rxpage->order;
+
+ dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
+
+ /* Drop the ref for being in the ring. */
+ __free_pages(rxpage->page, rxpage->order);
+ rxpage->page = NULL;
+}
+
+static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
+ struct device *dev)
+{
+ struct page *page;
+ dma_addr_t daddr;
+ int ret = -ENOMEM;
+
+ page = dev_alloc_pages(order);
+ if (unlikely(!page))
+ goto err_exit;
+
+ daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, daddr)))
+ goto free_page;
+
+ rxpage->page = page;
+ rxpage->daddr = daddr;
+ rxpage->order = order;
+ rxpage->pg_off = 0;
+
+ return 0;
+
+free_page:
+ __free_pages(page, order);
+
+err_exit:
+ return ret;
+}
+
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
+ int order)
+{
+ int ret;
+
+ if (rxbuf->rxdata.page) {
+ /* One means ring is the only user and can reuse */
+ if (page_ref_count(rxbuf->rxdata.page) > 1) {
+ /* Try reuse buffer */
+ rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
+ if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
+ (PAGE_SIZE << order)) {
+ self->stats.rx.pg_flips++;
+ } else {
+ /* Buffer exhausted. We have other users and
+ * should release this page and realloc
+ */
+ aq_free_rxpage(&rxbuf->rxdata,
+ aq_nic_get_dev(self->aq_nic));
+ self->stats.rx.pg_losts++;
+ }
+ } else {
+ rxbuf->rxdata.pg_off = 0;
+ self->stats.rx.pg_reuses++;
+ }
+ }
+
+ if (!rxbuf->rxdata.page) {
+ ret = aq_get_rxpage(&rxbuf->rxdata, order,
+ aq_nic_get_dev(self->aq_nic));
+ return ret;
+ }
+
+ return 0;
+}
+
static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic)
{
@@ -81,6 +160,11 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
+ self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
+ (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
self = aq_ring_alloc(self, aq_nic);
if (!self) {
@@ -201,22 +285,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- int err = 0;
bool is_rsc_completed = true;
+ int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
self->sw_head = aq_ring_next_dx(self, self->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ struct aq_ring_buff_s *buff_ = NULL;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
unsigned int i = 0U;
- struct aq_ring_buff_s *buff_ = NULL;
+ u16 hdr_len;
- if (buff->is_error) {
- __free_pages(buff->page, 0);
+ if (buff->is_error)
continue;
- }
if (buff->is_cleaned)
continue;
@@ -246,45 +329,67 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
}
+ dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+
/* for single fragment packets use build_skb() */
if (buff->is_eop &&
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
- skb = build_skb(page_address(buff->page),
+ skb = build_skb(aq_buf_vaddr(&buff->rxdata),
AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
}
-
skb_put(skb, buff->len);
+ page_ref_inc(buff->rxdata.page);
} else {
- skb = netdev_alloc_skb(ndev, ETH_HLEN);
+ skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
}
- skb_put(skb, ETH_HLEN);
- memcpy(skb->data, page_address(buff->page), ETH_HLEN);
- skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
- buff->len - ETH_HLEN,
- SKB_TRUESIZE(buff->len - ETH_HLEN));
+ hdr_len = buff->len;
+ if (hdr_len > AQ_CFG_RX_HDR_SIZE)
+ hdr_len = eth_get_headlen(skb->dev,
+ aq_buf_vaddr(&buff->rxdata),
+ AQ_CFG_RX_HDR_SIZE);
+
+ memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
+ ALIGN(hdr_len, sizeof(long)));
+
+ if (buff->len - hdr_len > 0) {
+ skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ buff->rxdata.pg_off + hdr_len,
+ buff->len - hdr_len,
+ AQ_CFG_RX_FRAME_MAX);
+ page_ref_inc(buff->rxdata.page);
+ }
if (!buff->is_eop) {
- for (i = 1U, next_ = buff->next,
- buff_ = &self->buff_ring[next_];
- true; next_ = buff_->next,
- buff_ = &self->buff_ring[next_], ++i) {
- skb_add_rx_frag(skb, i,
- buff_->page, 0,
+ buff_ = buff;
+ i = 1U;
+ do {
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_];
+
+ dma_sync_single_range_for_cpu(
+ aq_nic_get_dev(self->aq_nic),
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
buff_->len,
- SKB_TRUESIZE(buff->len -
- ETH_HLEN));
+ DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, i++,
+ buff_->rxdata.page,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ AQ_CFG_RX_FRAME_MAX);
+ page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
-
- if (buff_->is_eop)
- break;
- }
+ } while (!buff_->is_eop);
}
}
@@ -310,12 +415,15 @@ err_exit:
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+ unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
+ if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
+ self->size / 2))
+ return err;
+
for (i = aq_ring_avail_dx(self); i--;
self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
buff = &self->buff_ring[self->sw_tail];
@@ -323,30 +431,15 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff->flags = 0U;
buff->len = AQ_CFG_RX_FRAME_MAX;
- buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
- if (!buff->page) {
- err = -ENOMEM;
+ err = aq_get_rxpages(self, buff, page_order);
+ if (err)
goto err_exit;
- }
-
- buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
- buff->page, 0,
- AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
- if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
- err = -ENOMEM;
- goto err_exit;
- }
+ buff->pa = aq_buf_daddr(&buff->rxdata);
buff = NULL;
}
err_exit:
- if (err < 0) {
- if (buff && buff->page)
- __free_pages(buff->page, 0);
- }
-
return err;
}
@@ -359,10 +452,7 @@ void aq_ring_rx_deinit(struct aq_ring_s *self)
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
- dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
- AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
- __free_pages(buff->page, 0);
+ aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
}
err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index ac1329f4051d..cfffc301e746 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -17,6 +17,13 @@
struct page;
struct aq_nic_cfg_s;
+struct aq_rxpage {
+ struct page *page;
+ dma_addr_t daddr;
+ unsigned int order;
+ unsigned int pg_off;
+};
+
/* TxC SOP DX EOP
* +----------+----------+----------+-----------
* 8bytes|len l3,l4 | pa | pa | pa
@@ -31,28 +38,21 @@ struct aq_nic_cfg_s;
*/
struct __packed aq_ring_buff_s {
union {
+ /* RX/TX */
+ dma_addr_t pa;
/* RX */
struct {
u32 rss_hash;
u16 next;
u8 is_hash_l4;
u8 rsvd1;
- struct page *page;
+ struct aq_rxpage rxdata;
};
/* EOP */
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
};
- /* DX */
- struct {
- dma_addr_t pa;
- };
- /* SOP */
- struct {
- dma_addr_t pa_sop;
- u32 len_pkt_sop;
- };
/* TxC */
struct {
u32 mss;
@@ -91,6 +91,9 @@ struct aq_ring_stats_rx_s {
u64 bytes;
u64 lro_packets;
u64 jumbo_packets;
+ u64 pg_losts;
+ u64 pg_flips;
+ u64 pg_reuses;
};
struct aq_ring_stats_tx_s {
@@ -116,6 +119,7 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
+ unsigned int page_order;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
};
@@ -126,6 +130,16 @@ struct aq_ring_param_s {
cpumask_t affinity_mask;
};
+static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage)
+{
+ return page_to_virt(rxpage->page) + rxpage->pg_off;
+}
+
+static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage)
+{
+ return rxpage->daddr + rxpage->pg_off;
+}
+
static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
unsigned int dx)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index d335c334fa56..a2e4ca1782ae 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -353,6 +353,9 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_rx->errors += rx->errors;
stats_rx->jumbo_packets += rx->jumbo_packets;
stats_rx->lro_packets += rx->lro_packets;
+ stats_rx->pg_losts += rx->pg_losts;
+ stats_rx->pg_flips += rx->pg_flips;
+ stats_rx->pg_reuses += rx->pg_reuses;
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index f6f8338153a2..9fe507fe2d7f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -350,10 +350,10 @@ err_exit:
static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
- { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
- { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
- { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
- { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
int err = 0;
@@ -619,8 +619,6 @@ err_exit:
static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- struct device *ndev = aq_nic_get_dev(ring->aq_nic);
-
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL;
@@ -687,8 +685,6 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
is_err &= ~0x18U;
is_err &= ~0x04U;
- dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
-
if (is_err || rxd_wb->type & 0x1000U) {
/* status error or DMA error */
buff->is_error = 1U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index b31dba1b1a55..bfcda12d73de 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -259,7 +259,13 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
hw_atl_rpo_lro_inactive_interval_set(self, 0);
- hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
+ /* the LRO timebase divider is 5 uS (0x61a),
+ * which is multiplied by 50(0x32)
+ * to get a maximum coalescing interval of 250 uS,
+ * which is the default value
+ */
+ hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
+
hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
@@ -273,6 +279,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_rpo_lro_en_set(self,
aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ hw_atl_itr_rsc_en_set(self,
+ aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+
+ hw_atl_itr_rsc_delay_set(self, 1U);
}
return aq_hw_err_from_flags(self);
}
@@ -378,10 +388,10 @@ err_exit:
static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
- { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
- { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
- { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
- { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
int err = 0;
@@ -433,6 +443,11 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
+ /* Enable link interrupt */
+ if (aq_nic_cfg->link_irq_vec)
+ hw_atl_reg_gen_irq_map_set(self, BIT(7) |
+ aq_nic_cfg->link_irq_vec, 3U);
+
hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
err_exit:
@@ -654,8 +669,6 @@ err_exit:
static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- struct device *ndev = aq_nic_get_dev(ring->aq_nic);
-
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL;
@@ -697,8 +710,6 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_cso_err = 0U;
}
- dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
-
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
/* MAC error or DMA error */
buff->is_error = 1U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index 2cc8dacfdc27..b1c0b6850e60 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -32,9 +32,6 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
-#define hw_atl_b0_caps_aqc111e hw_atl_b0_caps_aqc108
-#define hw_atl_b0_caps_aqc112e hw_atl_b0_caps_aqc109
-
extern const struct aq_hw_ops hw_atl_ops_b0;
#define hw_atl_ops_b1 hw_atl_ops_b0
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index b318eefd36ae..ea98a08d7820 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -78,7 +78,7 @@
#define HW_ATL_B0_TC_MAX 1U
#define HW_ATL_B0_RSS_MAX 8U
-#define HW_ATL_B0_LRO_RXD_MAX 2U
+#define HW_ATL_B0_LRO_RXD_MAX 16U
#define HW_ATL_B0_RS_SLIP_ENABLED 0U
/* (256k -1(max pay_len) - 54(header)) */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 0722b8e01964..eaab25cd08b3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -49,11 +49,6 @@ u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
HW_ATL_GLB_SOFT_RES_SHIFT);
}
-u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR);
-}
-
u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
@@ -65,44 +60,24 @@ u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
}
-u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
-}
-
-u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
-}
-
-u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
-}
-
-u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
-}
-
-u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
}
/* interrupt */
@@ -315,6 +290,21 @@ void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
HW_ATL_ITR_RES_SHIFT, res_irq);
}
+/* set RSC interrupt */
+void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_RSC_EN_ADR, enable);
+}
+
+/* set RSC delay */
+void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RSC_DELAY_ADR,
+ HW_ATL_ITR_RSC_DELAY_MSK,
+ HW_ATL_ITR_RSC_DELAY_SHIFT,
+ delay);
+}
+
/* rdm */
void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index d46351890b16..2eb44e1cff70 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -40,29 +40,17 @@ u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
-/* get rx dma good octet counter lsw */
-u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+/* get rx dma good octet counter */
+u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw);
-/* get rx dma good packet counter lsw */
-u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+/* get rx dma good packet counter */
+u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw);
-/* get tx dma good octet counter lsw */
-u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+/* get tx dma good octet counter */
+u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw);
-/* get tx dma good packet counter lsw */
-u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
-
-/* get rx dma good octet counter msw */
-u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
-
-/* get rx dma good packet counter msw */
-u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
-
-/* get tx dma good octet counter msw */
-u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
-
-/* get tx dma good packet counter msw */
-u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+/* get tx dma good packet counter */
+u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw);
/* get msm rx errors counter register */
u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
@@ -82,9 +70,6 @@ u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm rx unicast octets counter register 0 */
u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
-/* get rx dma statistics counter 7 */
-u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
-
/* get msm tx errors counter register */
u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
@@ -152,6 +137,12 @@ u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
/* set reset interrupt */
void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+/* set RSC interrupt */
+void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable);
+
+/* set RSC delay */
+void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay);
+
/* rdm */
/* set cpu id */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index fb45bc2d99cf..b64140924a02 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -58,9 +58,6 @@
/* preprocessor definitions for msm rx unicast octets counter register 0 */
#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
-/* preprocessor definitions for rx dma statistics counter 7 */
-#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u
-
/* preprocessor definitions for msm tx unicast frames counter register */
#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
@@ -95,6 +92,19 @@
#define HW_ATL_ITR_RES_MSK 0x80000000
/* lower bit position of bitfield itr_reset */
#define HW_ATL_ITR_RES_SHIFT 31
+
+/* register address for bitfield rsc_en */
+#define HW_ATL_ITR_RSC_EN_ADR 0x00002200
+
+/* register address for bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_ADR 0x00002204
+/* bitmask for bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_MSK 0x0000000f
+/* width of bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_WIDTH 4
+/* lower bit position of bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_SHIFT 0
+
/* register address for bitfield dca{d}_cpuid[7:0] */
#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index eb4b99d56081..1208f7ecdd76 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -545,7 +545,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
pmbox->stats.dpc = atomic_read(&self->dpc);
} else {
- pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self);
+ pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
}
err_exit:;
@@ -763,6 +763,7 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
struct hw_atl_utils_mbox mbox;
+ struct aq_stats_s *cs = &self->curr_stats;
hw_atl_utils_mpi_read_stats(self, &mbox);
@@ -789,10 +790,11 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(dpc);
}
#undef AQ_SDELTA
- self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self);
- self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self);
- self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self);
- self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self);
+
+ cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
@@ -960,6 +962,7 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_state = hw_atl_utils_mpi_set_state,
.update_link_status = hw_atl_utils_mpi_get_link_status,
.update_stats = hw_atl_utils_update_stats,
+ .get_phy_temp = NULL,
.set_power = aq_fw1x_set_power,
.set_eee_rate = NULL,
.get_eee_rate = NULL,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index fe6c5658e016..fbc9d6ac841f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -38,6 +38,7 @@
#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL)
#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP)
#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE)
+#define HW_ATL_FW2X_CTRL_TEMPERATURE BIT(CTRL_TEMPERATURE)
#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT)
@@ -310,6 +311,40 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
return hw_atl_utils_update_stats(self);
}
+static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ u32 temp_val = mpi_opts & HW_ATL_FW2X_CTRL_TEMPERATURE;
+ u32 phy_temp_offset;
+ u32 temp_res;
+ int err = 0;
+ u32 val;
+
+ phy_temp_offset = self->mbox_addr +
+ offsetof(struct hw_atl_utils_mbox, info) +
+ offsetof(struct hw_aq_info, phy_temperature);
+ /* Toggle statistics bit for FW to 0x36C.18 (CTRL_TEMPERATURE) */
+ mpi_opts = mpi_opts ^ HW_ATL_FW2X_CTRL_TEMPERATURE;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ temp_val !=
+ (val & HW_ATL_FW2X_CTRL_TEMPERATURE),
+ 1U, 10000U);
+ err = hw_atl_utils_fw_downld_dwords(self, phy_temp_offset,
+ &temp_res, 1);
+
+ if (err)
+ return err;
+
+ /* Convert PHY temperature from 1/256 degree Celsius
+ * to 1/1000 degree Celsius.
+ */
+ *temp = temp_res * 1000 / 256;
+
+ return 0;
+}
+
static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
@@ -509,6 +544,7 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
.update_stats = aq_fw2x_update_stats,
+ .get_phy_temp = aq_fw2x_get_phy_temp,
.set_power = aq_fw2x_set_power,
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index ff3d68532f5f..13a1d99b29c6 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -960,8 +960,8 @@ int arc_emac_probe(struct net_device *ndev, int interface)
/* Get MAC address from device tree */
mac_addr = of_get_mac_address(dev->of_node);
- if (mac_addr)
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(ndev->dev_addr, mac_addr);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 9e07b469066a..f35c9a75be50 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1721,7 +1721,7 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
adapter->soft_stats.scc += smb->tx_1_col;
adapter->soft_stats.mcc += smb->tx_2_col;
adapter->soft_stats.latecol += smb->tx_late_col;
- adapter->soft_stats.tx_underun += smb->tx_underrun;
+ adapter->soft_stats.tx_underrun += smb->tx_underrun;
adapter->soft_stats.tx_trunc += smb->tx_trunc;
adapter->soft_stats.tx_pause += smb->tx_pause;
@@ -2439,7 +2439,6 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
atl1_tx_map(adapter, skb, ptpd);
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
- mmiowb();
return NETDEV_TX_OK;
}
@@ -3179,7 +3178,7 @@ static struct atl1_stats atl1_gstrings_stats[] = {
{"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
{"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
{"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
- {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
+ {"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)},
{"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
{"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
{"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 34a58cd846a0..eacff19ea05b 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -681,7 +681,7 @@ struct atl1_sft_stats {
u64 scc; /* packets TX after a single collision */
u64 mcc; /* packets TX after multiple collisions */
u64 latecol; /* TX packets w/ late collisions */
- u64 tx_underun; /* TX packets aborted due to TX FIFO underrun
+ u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun
* or TRD FIFO underrun */
u64 tx_trunc; /* TX packets truncated due to size > MTU */
u64 rx_pause; /* num Pause packets received. */
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index d99317b3d891..dd81c5863111 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -553,7 +553,7 @@ static void atl2_intr_tx(struct atl2_adapter *adapter)
netdev->stats.tx_aborted_errors++;
if (txs->late_col)
netdev->stats.tx_window_errors++;
- if (txs->underun)
+ if (txs->underrun)
netdev->stats.tx_fifo_errors++;
} while (1);
@@ -908,7 +908,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
(adapter->txd_write_ptr >> 2));
- mmiowb();
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index c64a6bdfa7ae..25ec84cb4853 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -260,7 +260,7 @@ struct tx_pkt_status {
unsigned multi_col:1;
unsigned late_col:1;
unsigned abort_col:1;
- unsigned underun:1; /* current packet is aborted
+ unsigned underrun:1; /* current packet is aborted
* due to txram underrun */
unsigned:3; /* reserved */
unsigned update:1; /* always 1'b1 in tx_status_buf */
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 6f56276015a4..3c4967eecef1 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -404,6 +404,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int dma_len;
unsigned int align;
unsigned int next;
+ bool xmit_more;
if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
netif_stop_queue(dev);
@@ -423,9 +424,10 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ xmit_more = netdev_xmit_more();
if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
netif_stop_queue(dev);
- skb->xmit_more = 0;
+ xmit_more = false;
}
next = priv->tx_next;
@@ -450,7 +452,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
desc->n_addr = priv->tx_bufs[next].dma_desc;
desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
- if (!skb->xmit_more)
+ if (!xmit_more)
desc->config |= DESC_EOC;
txb->skb = skb;
@@ -468,7 +470,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_next = next;
- if (!skb->xmit_more) {
+ if (!xmit_more) {
smp_wmb();
priv->tx_chain->ready = true;
priv->tx_chain = NULL;
@@ -1461,7 +1463,7 @@ static int nb8800_probe(struct platform_device *pdev)
dev->irq = irq;
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(dev->dev_addr, mac);
if (!is_valid_ether_addr(dev->dev_addr))
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 716bfbba59cf..461b2c0b2ed6 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -196,6 +196,7 @@ config BNXT
depends on PCI
select FW_LOADER
select LIBCRC32C
+ select NET_DEVLINK
---help---
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index bc3ac369cbe3..c623896e3ccb 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -116,15 +116,6 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
}
-static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
- struct dma_desc *desc,
- unsigned int port)
-{
- /* Ports are latched, so write upper address first */
- tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
- tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
-}
-
/* Ethtool operations */
static void bcm_sysport_set_rx_csum(struct net_device *dev,
netdev_features_t wanted)
@@ -1291,11 +1282,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
struct bcm_sysport_tx_ring *ring;
struct bcm_sysport_cb *cb;
struct netdev_queue *txq;
- struct dma_desc *desc;
+ u32 len_status, addr_lo;
unsigned int skb_len;
unsigned long flags;
dma_addr_t mapping;
- u32 len_status;
u16 queue;
int ret;
@@ -1338,10 +1328,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
dma_unmap_addr_set(cb, dma_addr, mapping);
dma_unmap_len_set(cb, dma_len, skb_len);
- /* Fetch a descriptor entry from our pool */
- desc = ring->desc_cpu;
-
- desc->addr_lo = lower_32_bits(mapping);
+ addr_lo = lower_32_bits(mapping);
len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
len_status |= (skb_len << DESC_LEN_SHIFT);
len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
@@ -1354,16 +1341,9 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
ring->curr_desc = 0;
ring->desc_count--;
- /* Ensure write completion of the descriptor status/length
- * in DRAM before the System Port WRITE_PORT register latches
- * the value
- */
- wmb();
- desc->addr_status_len = len_status;
- wmb();
-
- /* Write this descriptor address to the RING write port */
- tdma_port_write_desc_addr(priv, desc, ring->index);
+ /* Ports are latched, so write upper address first */
+ tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
+ tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
/* Check ring space and update SW control flow */
if (ring->desc_count == 0)
@@ -1489,28 +1469,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
unsigned int index)
{
struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
- struct device *kdev = &priv->pdev->dev;
size_t size;
- void *p;
u32 reg;
/* Simple descriptors partitioning for now */
size = 256;
- /* We just need one DMA descriptor which is DMA-able, since writing to
- * the port will allocate a new descriptor in its internal linked-list
- */
- p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
- GFP_KERNEL);
- if (!p) {
- netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
- return -ENOMEM;
- }
-
ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
if (!ring->cbs) {
- dma_free_coherent(kdev, sizeof(struct dma_desc),
- ring->desc_cpu, ring->desc_dma);
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
return -ENOMEM;
}
@@ -1523,7 +1489,6 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
ring->size = size;
ring->clean_index = 0;
ring->alloc_size = ring->size;
- ring->desc_cpu = p;
ring->desc_count = ring->size;
ring->curr_desc = 0;
@@ -1578,8 +1543,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
napi_enable(&ring->napi);
netif_dbg(priv, hw, priv->netdev,
- "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
- ring->size, ring->desc_cpu, ring->switch_queue,
+ "TDMA cfg, size=%d, switch q=%d,port=%d\n",
+ ring->size, ring->switch_queue,
ring->switch_port);
return 0;
@@ -1589,7 +1554,6 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
unsigned int index)
{
struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
- struct device *kdev = &priv->pdev->dev;
u32 reg;
/* Caller should stop the TDMA engine */
@@ -1611,12 +1575,6 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
kfree(ring->cbs);
ring->cbs = NULL;
-
- if (ring->desc_dma) {
- dma_free_coherent(kdev, sizeof(struct dma_desc),
- ring->desc_cpu, ring->desc_dma);
- ring->desc_dma = 0;
- }
ring->size = 0;
ring->alloc_size = 0;
@@ -2274,8 +2232,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
};
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
u16 queue = skb_get_queue_mapping(skb);
@@ -2283,7 +2240,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
- return fallback(dev, skb, NULL);
+ return netdev_pick_tx(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@@ -2291,7 +2248,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
- return fallback(dev, skb, NULL);
+ return netdev_pick_tx(dev, skb, NULL);
return tx_ring->index;
}
@@ -2548,7 +2505,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* Initialize netdevice members */
macaddr = of_get_mac_address(dn);
- if (!macaddr || !is_valid_ether_addr(macaddr)) {
+ if (IS_ERR(macaddr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
eth_hw_addr_random(dev);
} else {
@@ -2599,11 +2556,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
- "Broadcom SYSTEMPORT%s" REV_FMT
- " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ "Broadcom SYSTEMPORT%s " REV_FMT
+ " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
priv->is_lite ? " Lite" : "",
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
- priv->base, priv->irq0, priv->irq1, txq, rxq);
+ priv->irq0, priv->irq1, txq, rxq);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 0b192fea9c5d..6f3141c86436 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -516,12 +516,6 @@ struct bcm_rsb {
#define TDMA_DEBUG 0x64c
-/* Transmit/Receive descriptor */
-struct dma_desc {
- u32 addr_status_len;
- u32 addr_lo;
-};
-
/* Number of Receive hardware descriptor words */
#define SP_NUM_HW_RX_DESC_WORDS 1024
#define SP_LT_NUM_HW_RX_DESC_WORDS 256
@@ -530,7 +524,7 @@ struct dma_desc {
#define SP_NUM_TX_DESC 1536
#define SP_LT_NUM_TX_DESC 256
-#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
+#define WORDS_PER_DESC 2
/* Rx/Tx common counter group.*/
struct bcm_sysport_pkt_counters {
@@ -718,7 +712,6 @@ struct bcm_sysport_net_dim {
struct bcm_sysport_tx_ring {
spinlock_t lock; /* Ring lock for tx reclaim/xmit */
struct napi_struct napi; /* NAPI per tx queue */
- dma_addr_t desc_dma; /* DMA cookie */
unsigned int index; /* Ring index */
unsigned int size; /* Ring current size */
unsigned int alloc_size; /* Ring one-time allocated size */
@@ -727,7 +720,6 @@ struct bcm_sysport_tx_ring {
unsigned int c_index; /* Last consumer index */
unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
- struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
unsigned long packets; /* packets statistics */
unsigned long bytes; /* bytes statistics */
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 6fe074c1588b..34d18302b1a3 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -132,7 +132,7 @@ static int bgmac_probe(struct bcma_device *core)
mac = of_get_mac_address(bgmac->dev->of_node);
/* If no MAC address assigned via device tree, check SPROM */
- if (!mac) {
+ if (IS_ERR_OR_NULL(mac)) {
switch (core->core_unit) {
case 0:
mac = sprom->et0mac;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 894eda5b13cf..6dc0dd91ad11 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -193,7 +193,7 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->dma_dev = &pdev->dev;
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr);
else
dev_warn(&pdev->dev, "MAC address not present in device tree\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d63371d70bce..dfdd14eadd57 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3305,8 +3305,6 @@ next_rx:
BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
- mmiowb();
-
return rx_pkt;
}
@@ -6723,8 +6721,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
BNX2_WR16(bp, txr->tx_bidx_addr, prod);
BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
- mmiowb();
-
txr->tx_prod = prod;
if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ecb1bd7eb508..008ad0ca89ba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1909,8 +1909,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1932,7 +1931,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb, NULL) %
+ return netdev_pick_tx(dev, skb, NULL) %
(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
@@ -4166,8 +4165,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
- mmiowb();
-
txdata->tx_bd_prod += nbd;
if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 2462e7aa0c5d..c2f6e44e9a3f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -498,8 +498,7 @@ int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
@@ -527,8 +526,6 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4,
((u32 *)&rx_prods)[i]);
- mmiowb(); /* keep prod updates ordered */
-
DP(NETIF_MSG_RX_STATUS,
"queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
@@ -653,7 +650,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
/* Make sure that ACK is written */
- mmiowb();
barrier();
}
@@ -674,7 +670,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
/* Make sure that ACK is written */
- mmiowb();
barrier();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 749d0ef44371..0745cccd416d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2623,7 +2623,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
wmb();
DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
- mmiowb();
barrier();
num_pkts++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index d9057c8bbeef..78326a6c0aba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 1
+#define BCM_5710_FW_REVISION_VERSION 11
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3639,8 +3639,10 @@ struct client_init_rx_data {
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
-#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
-#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE (0x1<<3)
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE_SHIFT 3
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0xF<<4)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 4
u8 vmqueue_mode_en_flg;
u8 extra_data_over_sgl_en_flg;
u8 cache_line_alignment_log_size;
@@ -3831,7 +3833,7 @@ struct eth_classify_cmd_header {
*/
struct eth_classify_header {
u8 rule_cnt;
- u8 reserved0;
+ u8 warning_on_error;
__le16 reserved1;
__le32 echo;
};
@@ -4752,6 +4754,8 @@ struct tpa_update_ramrod_data {
__le32 sge_page_base_hi;
__le16 sge_pause_thr_low;
__le16 sge_pause_thr_high;
+ u8 tpa_over_vlan_disable;
+ u8 reserved[7];
};
@@ -4946,7 +4950,7 @@ struct fairness_vars_per_port {
u32 upper_bound;
u32 fair_threshold;
u32 fairness_timeout;
- u32 reserved0;
+ u32 size_thr;
};
/*
@@ -5415,7 +5419,9 @@ struct function_start_data {
u8 sd_vlan_force_pri_val;
u8 c2s_pri_tt_valid;
u8 c2s_pri_default;
- u8 reserved2[6];
+ u8 tx_vlan_filtering_enable;
+ u8 tx_vlan_filtering_use_pvid;
+ u8 reserved2[4];
struct c2s_pri_trans_table_entry c2s_pri_trans_table;
};
@@ -5448,7 +5454,8 @@ struct function_update_data {
u8 reserved1;
__le16 sd_vlan_tag;
__le16 sd_vlan_eth_type;
- __le16 reserved0;
+ u8 tx_vlan_filtering_pvid_change_flg;
+ u8 reserved0;
__le32 reserved2;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 626b491f7674..03ac10b1cd1e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -869,9 +869,6 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
"write %x to HC %d (addr 0x%x)\n",
val, port, addr);
- /* flush all outstanding writes */
- mmiowb();
-
REG_WR(bp, addr, val);
if (REG_RD(bp, addr) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
@@ -887,9 +884,6 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
- /* flush all outstanding writes */
- mmiowb();
-
REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
@@ -1595,7 +1589,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
/*
* Ensure that HC_CONFIG is written before leading/trailing edge config
*/
- mmiowb();
barrier();
if (!CHIP_IS_E1(bp)) {
@@ -1611,9 +1604,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
}
-
- /* Make sure that interrupts are indeed enabled from here on */
- mmiowb();
}
static void bnx2x_igu_int_enable(struct bnx2x *bp)
@@ -1674,9 +1664,6 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
-
- /* Make sure that interrupts are indeed enabled from here on */
- mmiowb();
}
void bnx2x_int_enable(struct bnx2x *bp)
@@ -3833,7 +3820,6 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp)
REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
bp->spq_prod_idx);
- mmiowb();
}
/**
@@ -5244,7 +5230,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
{
/* No memory barriers */
storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
- mmiowb(); /* keep prod updates ordered */
}
static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
@@ -6513,7 +6498,6 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp)
/* flush all */
mb();
- mmiowb();
}
void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
@@ -6553,7 +6537,6 @@ void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
/* flush all before enabling interrupts */
mb();
- mmiowb();
bnx2x_int_enable(bp);
@@ -7775,12 +7758,10 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
data, igu_addr_data);
REG_WR(bp, igu_addr_data, data);
- mmiowb();
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
- mmiowb();
barrier();
/* wait for clean up to finish */
@@ -9550,7 +9531,6 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
close ? "closing" : "opening");
- mmiowb();
}
#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
@@ -9674,7 +9654,6 @@ static void bnx2x_pxp_prep(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) {
REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
- mmiowb();
}
}
@@ -9774,16 +9753,13 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
reset_mask1 & (~not_reset_mask1));
barrier();
- mmiowb();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
reset_mask2 & (~stay_reset2));
barrier();
- mmiowb();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
- mmiowb();
}
/**
@@ -9867,9 +9843,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
REG_WR(bp, MISC_REG_UNPREPARED, 0);
barrier();
- /* Make sure all is written to the chip before the reset */
- mmiowb();
-
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
@@ -14828,7 +14801,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
if (rc)
break;
- mmiowb();
barrier();
/* Start accepting on iSCSI L2 ring */
@@ -14863,7 +14835,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
if (!bnx2x_wait_sp_comp(bp, sp_bits))
BNX2X_ERR("rx_mode completion timed out!\n");
- mmiowb();
barrier();
/* Unset iSCSI L2 MAC */
@@ -15376,27 +15347,47 @@ static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
return 0;
}
+#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
+#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
+#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
+#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
+#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
+#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
+#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
+#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
+#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+
int bnx2x_configure_ptp_filters(struct bnx2x *bp)
{
int port = BP_PORT(bp);
+ u32 param, rule;
int rc;
if (!bp->hwtstamp_ioctl_called)
return 0;
+ param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK;
switch (bp->tx_type) {
case HWTSTAMP_TX_ON:
bp->flags |= TX_TIMESTAMPING_EN;
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
- NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
- NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
BNX2X_ERR("One-step timestamping is not supported\n");
return -ERANGE;
}
+ param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK;
switch (bp->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
@@ -15410,30 +15401,24 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+ REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
/* Initialize PTP detection L2 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+ REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -15441,10 +15426,8 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
break;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7b22a6d8514c..80d250a6d048 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5039,7 +5039,6 @@ static inline int bnx2x_q_init(struct bnx2x *bp,
/* As no ramrod is sent, complete the command immediately */
o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
- mmiowb();
smp_mb();
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index c97b642e6537..0edbb0a76847 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -100,13 +100,11 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
cmd_data.sb_id_and_flags, igu_addr_data);
REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
- mmiowb();
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
- mmiowb();
barrier();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index a9bdc21873d3..0752b7fa4d9c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -172,8 +172,6 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
/* Trigger the PF FW */
writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
- mmiowb();
-
/* Wait for PF to complete */
while ((tout >= 0) && (!*done)) {
msleep(interval);
@@ -957,7 +955,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
bnx2x_sample_bulletin(bp);
if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
- BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+ BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
rc = -EINVAL;
goto out;
}
@@ -1179,7 +1177,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
/* ack the FW */
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
- mmiowb();
/* copy the response header including status-done field,
* must be last dmae, must be after FW is acked
@@ -2174,7 +2171,6 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
*/
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
/* Firmware ack should be written before unlocking channel */
- mmiowb();
bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 0bb9d7b3a2b6..8314c00d7537 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -114,6 +114,7 @@ enum board_idx {
BCM5745x_NPAR,
BCM57508,
BCM57504,
+ BCM57502,
BCM58802,
BCM58804,
BCM58808,
@@ -158,6 +159,7 @@ static const struct {
[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
+ [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -205,6 +207,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
+ { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -216,6 +219,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
@@ -551,15 +555,13 @@ normal_tx:
prod = NEXT_TX(prod);
txr->tx_prod = prod;
- if (!skb->xmit_more || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_db_write(bp, &txr->tx_db, prod);
tx_done:
- mmiowb();
-
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
- if (skb->xmit_more && !tx_buf->is_push)
+ if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod);
netif_tx_stop_queue(txq);
@@ -899,7 +901,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload))
- payload = eth_get_headlen(data_ptr, len);
+ payload = eth_get_headlen(bp->dev, data_ptr, len);
skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
if (!skb) {
@@ -1133,6 +1135,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons)) {
+ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return;
}
@@ -1585,15 +1589,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
cons = rxcmp->rx_cmp_opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- data = rx_buf->data;
- data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
+ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return rc1;
}
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ data_ptr = rx_buf->data_ptr;
prefetch(data_ptr);
misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
@@ -1610,12 +1616,18 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
+
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
rc = -EIO;
- goto next_rx;
+ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+ bnxt_sched_reset(bp, rxr);
+ }
+ goto next_rx_no_len;
}
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
@@ -1696,12 +1708,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = 1;
next_rx:
- rxr->rx_prod = NEXT_RX(prod);
- rxr->rx_next_cons = NEXT_RX(cons);
-
cpr->rx_packets += 1;
cpr->rx_bytes += len;
+next_rx_no_len:
+ rxr->rx_prod = NEXT_RX(prod);
+ rxr->rx_next_cons = NEXT_RX(cons);
+
next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
@@ -2123,7 +2136,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
&dim_sample);
net_dim(&cpr->dim, dim_sample);
}
- mmiowb();
return work_done;
}
@@ -3385,6 +3397,12 @@ static void bnxt_free_port_stats(struct bnxt *bp)
bp->hw_rx_port_stats_ext_map);
bp->hw_rx_port_stats_ext = NULL;
}
+
+ if (bp->hw_pcie_stats) {
+ dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
+ bp->hw_pcie_stats, bp->hw_pcie_stats_map);
+ bp->hw_pcie_stats = NULL;
+ }
}
static void bnxt_free_ring_stats(struct bnxt *bp)
@@ -3429,56 +3447,68 @@ static int bnxt_alloc_stats(struct bnxt *bp)
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
- if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
- if (bp->hw_rx_port_stats)
- goto alloc_ext_stats;
+ if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
+ return 0;
- bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
- sizeof(struct tx_port_stats) + 1024;
+ if (bp->hw_rx_port_stats)
+ goto alloc_ext_stats;
- bp->hw_rx_port_stats =
- dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
- &bp->hw_rx_port_stats_map,
- GFP_KERNEL);
- if (!bp->hw_rx_port_stats)
- return -ENOMEM;
+ bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
+ sizeof(struct tx_port_stats) + 1024;
+
+ bp->hw_rx_port_stats =
+ dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
+ &bp->hw_rx_port_stats_map,
+ GFP_KERNEL);
+ if (!bp->hw_rx_port_stats)
+ return -ENOMEM;
- bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
- 512;
- bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
- sizeof(struct rx_port_stats) + 512;
- bp->flags |= BNXT_FLAG_PORT_STATS;
+ bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
+ bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
+ sizeof(struct rx_port_stats) + 512;
+ bp->flags |= BNXT_FLAG_PORT_STATS;
alloc_ext_stats:
- /* Display extended statistics only if FW supports it */
- if (bp->hwrm_spec_code < 0x10804 ||
- bp->hwrm_spec_code == 0x10900)
+ /* Display extended statistics only if FW supports it */
+ if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
+ if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
return 0;
- if (bp->hw_rx_port_stats_ext)
- goto alloc_tx_ext_stats;
+ if (bp->hw_rx_port_stats_ext)
+ goto alloc_tx_ext_stats;
- bp->hw_rx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct rx_port_stats_ext),
- &bp->hw_rx_port_stats_ext_map,
- GFP_KERNEL);
- if (!bp->hw_rx_port_stats_ext)
- return 0;
+ bp->hw_rx_port_stats_ext =
+ dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
+ &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
+ if (!bp->hw_rx_port_stats_ext)
+ return 0;
alloc_tx_ext_stats:
- if (bp->hw_tx_port_stats_ext)
- return 0;
+ if (bp->hw_tx_port_stats_ext)
+ goto alloc_pcie_stats;
- if (bp->hwrm_spec_code >= 0x10902) {
- bp->hw_tx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct tx_port_stats_ext),
- &bp->hw_tx_port_stats_ext_map,
- GFP_KERNEL);
- }
- bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
+ if (bp->hwrm_spec_code >= 0x10902 ||
+ (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
+ bp->hw_tx_port_stats_ext =
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_port_stats_ext),
+ &bp->hw_tx_port_stats_ext_map,
+ GFP_KERNEL);
}
+ bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
+
+alloc_pcie_stats:
+ if (bp->hw_pcie_stats ||
+ !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return 0;
+
+ bp->hw_pcie_stats =
+ dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
+ &bp->hw_pcie_stats_map, GFP_KERNEL);
+ if (!bp->hw_pcie_stats)
+ return 0;
+
+ bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0;
}
@@ -4197,16 +4227,25 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_vnic_info *vnic;
+ u32 dst_ena = 0;
int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
- req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
+ dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
+ req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
+ vnic = &bp->vnic_info[0];
+ } else {
+ vnic = &bp->vnic_info[fltr->rxq + 1];
+ }
+ req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
req.ethertype = htons(ETH_P_IP);
memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
@@ -4244,7 +4283,6 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.dst_port = keys->ports.dst;
req.dst_port_mask = cpu_to_be16(0xffff);
- req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
@@ -5125,10 +5163,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 cmpl_ring_id;
- cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
+
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_TX,
close_path ? cmpl_ring_id :
@@ -5141,10 +5179,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id;
- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
close_path ? cmpl_ring_id :
@@ -5163,10 +5201,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id;
- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+
hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
@@ -5305,17 +5343,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->num_tx_rings = cpu_to_le16(tx_rings);
if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
enables |= tx_rings + ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
enables |= rx_rings ?
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
enables |= cp_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
enables |= ring_grps ?
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
@@ -5355,14 +5392,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
enables |= tx_rings + ring_grps ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
enables |= ring_grps ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
@@ -5494,11 +5530,13 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
stat = bnxt_get_func_stat_ctxs(bp);
if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
- hw_resc->resv_stat_ctxs != stat ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5))))
return true;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
+ hw_resc->resv_irqs != nq)
+ return true;
return false;
}
@@ -6047,6 +6085,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_entries_multiple = 1;
ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctx->mrav_num_entries_units =
+ le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
} else {
@@ -6093,6 +6133,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct bnxt_ctx_pg_info *ctx_pg;
__le32 *num_entries;
__le64 *pg_dir;
+ u32 flags = 0;
u8 *pg_attr;
int i, rc;
u32 ena;
@@ -6152,6 +6193,9 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
ctx_pg = &ctx->mrav_mem;
req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
+ if (ctx->mrav_num_entries_units)
+ flags |=
+ FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.mrav_pg_size_mrav_lvl,
@@ -6178,6 +6222,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
*num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
+ req.flags = cpu_to_le32(flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
rc = -EIO;
@@ -6316,6 +6361,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries;
+ u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
u8 pg_lvl = 1;
@@ -6379,12 +6425,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctx_pg = &ctx->mrav_mem;
- ctx_pg->entries = extra_qps * 4;
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = 1024 * 256;
+ num_ah = 1024 * 128;
+ ctx_pg->entries = num_mr + num_ah;
mem_size = ctx->mrav_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
if (rc)
return rc;
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+ if (ctx->mrav_num_entries_units)
+ ctx_pg->entries =
+ ((num_mr / ctx->mrav_num_entries_units) << 16) |
+ (num_ah / ctx->mrav_num_entries_units);
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
@@ -6499,6 +6554,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
bp->tx_push_thresh = 0;
if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
@@ -6571,6 +6630,34 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
return 0;
}
+static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
+{
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
+ int rc = 0;
+ u32 flags;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
+ return 0;
+
+ resp = bp->hwrm_cmd_resp_addr;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_cfa_adv_qcaps_exit;
+
+ flags = le32_to_cpu(resp->flags);
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
+
+hwrm_cfa_adv_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
struct hwrm_func_reset_input req = {0};
@@ -6662,6 +6749,15 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+ if (strlen(resp->active_pkg_name)) {
+ int fw_ver_len = strlen(bp->fw_ver_str);
+
+ snprintf(bp->fw_ver_str + fw_ver_len,
+ FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
+ resp->active_pkg_name);
+ bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
+ }
+
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
@@ -6694,6 +6790,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6743,6 +6843,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
struct hwrm_port_qstats_ext_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
+ u32 tx_stat_size;
int rc;
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
@@ -6752,13 +6853,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
req.port_id = cpu_to_le16(pf->port_id);
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
+ tx_stat_size = bp->hw_tx_port_stats_ext ?
+ sizeof(*bp->hw_tx_port_stats_ext) : 0;
+ req.tx_stat_size = cpu_to_le16(tx_stat_size);
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
- bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
+ bp->fw_tx_stats_ext_size = tx_stat_size ?
+ le16_to_cpu(resp->tx_stat_size) / 8 : 0;
} else {
bp->fw_rx_stats_ext_size = 0;
bp->fw_tx_stats_ext_size = 0;
@@ -6795,6 +6899,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
return rc;
}
+static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
+{
+ struct hwrm_pcie_qstats_input req = {0};
+
+ if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
+ req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
+ req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
if (bp->vxlan_port_cnt) {
@@ -8642,7 +8759,7 @@ static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
req.port_id = cpu_to_le16(bp->pf.port_id);
req.phy_addr = phy_addr;
req.reg_addr = cpu_to_le16(reg & 0x1f);
- if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
+ if (mdio_phy_id_is_c45(phy_addr)) {
req.cl45_mdio = 1;
req.phy_addr = mdio_phy_id_prtad(phy_addr);
req.dev_addr = mdio_phy_id_devad(phy_addr);
@@ -8669,7 +8786,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
req.port_id = cpu_to_le16(bp->pf.port_id);
req.phy_addr = phy_addr;
req.reg_addr = cpu_to_le16(reg & 0x1f);
- if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
+ if (mdio_phy_id_is_c45(phy_addr)) {
req.cl45_mdio = 1;
req.phy_addr = mdio_phy_id_prtad(phy_addr);
req.dev_addr = mdio_phy_id_devad(phy_addr);
@@ -8951,8 +9068,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
skip_uc:
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc && vnic->mc_list_count) {
+ netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ }
if (rc)
- netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
rc);
return rc;
@@ -8980,8 +9104,11 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
+ return true;
return false;
+ }
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
@@ -8996,7 +9123,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
int vnics, max_vnics, max_rss_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- return false;
+ return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false;
@@ -9378,6 +9505,7 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_port_qstats_ext(bp);
+ bnxt_hwrm_pcie_qstats(bp);
}
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
@@ -10048,23 +10176,6 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
return rc;
}
-static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
- size_t len)
-{
- struct bnxt *bp = netdev_priv(dev);
- int rc;
-
- /* The PF and it's VF-reps only support the switchdev framework */
- if (!BNXT_PF(bp))
- return -EOPNOTSUPP;
-
- rc = snprintf(buf, len, "p%d", bp->pf.port_id);
-
- if (rc >= len)
- return -EOPNOTSUPP;
- return 0;
-}
-
int bnxt_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
@@ -10083,6 +10194,13 @@ int bnxt_get_port_parent_id(struct net_device *dev,
return 0;
}
+static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return &bp->dl_port;
+}
+
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
@@ -10114,8 +10232,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bpf = bnxt_xdp,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
- .ndo_get_port_parent_id = bnxt_get_port_parent_id,
- .ndo_get_phys_port_name = bnxt_get_phys_port_name
+ .ndo_get_devlink_port = bnxt_get_devlink_port,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -10439,6 +10556,26 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+ struct pci_dev *pdev = bp->pdev;
+ int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ u32 dw;
+
+ if (!pos) {
+ netdev_info(bp->dev, "Unable do read adapter's DSN");
+ return -EOPNOTSUPP;
+ }
+
+ /* DSN (two dw) is at an offset of 4 from the cap pos */
+ pos += 4;
+ pci_read_config_dword(pdev, pos, &dw);
+ put_unaligned_le32(dw, &dsn[0]);
+ pci_read_config_dword(pdev, pos + 4, &dw);
+ put_unaligned_le32(dw, &dsn[4]);
+ return 0;
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -10572,6 +10709,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -1;
goto init_err_pci_clean;
}
+
+ rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
+ rc);
+
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
@@ -10579,6 +10722,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
}
+ /* Read the adapter's DSN to use as the eswitch switch_id */
+ rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+ if (rc)
+ goto init_err_pci_clean;
+
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -10675,6 +10823,7 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
+ bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index cf81ace7a6e6..eca36dd6b751 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1227,6 +1227,7 @@ struct bnxt_ctx_mem_info {
u16 mrav_entry_size;
u16 tim_entry_size;
u32 tim_max_entries;
+ u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u32 flags;
@@ -1354,6 +1355,7 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1480,6 +1482,11 @@ struct bnxt {
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_FW_CAP_TRUSTED_VF 0x00000800
+ #define BNXT_FW_CAP_PKG_VER 0x00004000
+ #define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
+ #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
+ #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1498,10 +1505,12 @@ struct bnxt {
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
struct tx_port_stats_ext *hw_tx_port_stats_ext;
+ struct pcie_ctx_hw_stats *hw_pcie_stats;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map;
dma_addr_t hw_tx_port_stats_ext_map;
+ dma_addr_t hw_pcie_stats_map;
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
@@ -1634,6 +1643,9 @@ struct bnxt {
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
+#define BNXT_PCIE_STATS_OFFSET(counter) \
+ (offsetof(struct pcie_ctx_hw_stats, counter) / 8)
+
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFF_DIAG_SUPPORT_OFFSET 0x5c
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index e1feb97bcd81..549c90d3e465 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <net/devlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_vfr.h"
@@ -228,6 +229,9 @@ int bnxt_dl_register(struct bnxt *bp)
goto err_dl_unreg;
}
+ devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ bp->pf.port_id, false, 0,
+ bp->switch_id, sizeof(bp->switch_id));
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index adabbe94a259..b1263821a6e9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -235,6 +235,9 @@ reset_coalesce:
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
+#define BNXT_PCIE_STATS_ENTRY(counter) \
+ { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
+
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -345,6 +348,10 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
BNXT_RX_STATS_EXT_COS_ENTRIES,
BNXT_RX_STATS_EXT_PFC_ENTRIES,
+ BNXT_RX_STATS_EXT_ENTRY(rx_bits),
+ BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
+ BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
+ BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
};
static const struct {
@@ -383,6 +390,24 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
+static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+} bnxt_pcie_stats_arr[] = {
+ BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
+ BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
+ BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
+ BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
+ BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
+ BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
+ BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
+ BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
+};
+
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
@@ -390,6 +415,7 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
+#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_stats(struct bnxt *bp)
{
@@ -407,6 +433,9 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI;
}
+ if (bp->flags & BNXT_FLAG_PCIE_STATS)
+ num_stats += BNXT_NUM_PCIE_STATS;
+
return num_stats;
}
@@ -509,6 +538,14 @@ skip_ring_stats:
}
}
}
+ if (bp->flags & BNXT_FLAG_PCIE_STATS) {
+ __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
+
+ for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
+ buf[j] = le64_to_cpu(*(pcie_stats +
+ bnxt_pcie_stats_arr[i].offset));
+ }
+ }
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -609,6 +646,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
}
+ if (bp->flags & BNXT_FLAG_PCIE_STATS) {
+ for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
+ strcpy(buf, bnxt_pcie_stats_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
break;
case ETH_SS_TEST:
if (bp->num_tests)
@@ -3262,7 +3305,8 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct net_device *dev = bp->dev;
int i, rc;
- bnxt_get_pkgver(dev);
+ if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
+ bnxt_get_pkgver(dev);
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index b6c610339501..12bbb2a207d0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -89,7 +89,10 @@ struct hwrm_short_input {
__le16 signature;
#define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
#define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
- __le16 unused_0;
+ __le16 target_id;
+ #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
+ #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
+ #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
__le16 size;
__le64 req_addr;
};
@@ -211,6 +214,7 @@ struct cmd_nums {
#define HWRM_FWD_RESP 0xd2UL
#define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
#define HWRM_OEM_CMD 0xd4UL
+ #define HWRM_PORT_PRBS_TEST 0xd5UL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
@@ -262,6 +266,7 @@ struct cmd_nums {
#define HWRM_CFA_EEM_QCFG 0x122UL
#define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
+ #define HWRM_CFA_TFLIB 0x125UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
@@ -272,6 +277,7 @@ struct cmd_nums {
#define HWRM_ENGINE_CKV_RNG_GET 0x134UL
#define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
#define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
#define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
#define HWRM_ENGINE_QG_QUERY 0x13dUL
#define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
@@ -312,6 +318,11 @@ struct cmd_nums {
#define HWRM_SELFTEST_IRQ 0x202UL
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
#define HWRM_PCIE_QSTATS 0x204UL
+ #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
+ #define HWRM_MFG_TIMERS_QUERY 0x206UL
+ #define HWRM_MFG_OTP_CFG 0x207UL
+ #define HWRM_MFG_OTP_QCFG 0x208UL
+ #define HWRM_MFG_HDMA_TEST 0x209UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -325,6 +336,8 @@ struct cmd_nums {
#define HWRM_DBG_FW_CLI 0xff1aUL
#define HWRM_DBG_I2C_CMD 0xff1bUL
#define HWRM_DBG_RING_INFO_GET 0xff1cUL
+ #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
+ #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -350,23 +363,26 @@ struct cmd_nums {
/* ret_codes (size:64b/8B) */
struct ret_codes {
__le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS 0x0UL
- #define HWRM_ERR_CODE_FAIL 0x1UL
- #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
- #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
- #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
- #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
- #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
- #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
- #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
- #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
- #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
- #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
- #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
- #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
+ #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
+ #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
+ #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
__le16 unused_0[3];
};
@@ -387,11 +403,15 @@ struct hwrm_err_output {
#define HW_HASH_INDEX_SIZE 0x80
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1
+#define HWRM_TARGET_ID_BONO 0xFFF8
+#define HWRM_TARGET_ID_KONG 0xFFF9
+#define HWRM_TARGET_ID_APE 0xFFFA
+#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 47
-#define HWRM_VERSION_STR "1.10.0.47"
+#define HWRM_VERSION_RSVD 69
+#define HWRM_VERSION_STR "1.10.0.69"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -442,6 +462,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -449,7 +470,7 @@ struct hwrm_ver_get_output {
char hwrm_fw_name[16];
char mgmt_fw_name[16];
char netctrl_fw_name[16];
- u8 reserved2[16];
+ char active_pkg_name[16];
char roce_fw_name[16];
__le16 chip_num;
u8 chip_rev;
@@ -1047,6 +1068,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
#define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1715,7 +1737,7 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_entry_size;
__le16 tim_entry_size;
__le32 tim_max_entries;
- u8 unused_0[2];
+ __le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 valid;
};
@@ -1728,7 +1750,8 @@ struct hwrm_func_backing_store_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables;
#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
@@ -2580,7 +2603,7 @@ struct hwrm_port_phy_qcfg_output {
u8 valid;
};
-/* hwrm_port_mac_cfg_input (size:320b/40B) */
+/* hwrm_port_mac_cfg_input (size:384b/48B) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2601,6 +2624,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -2610,6 +2634,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -2642,6 +2667,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
+ __s32 ptp_freq_adj_ppb;
+ u8 unused_1[4];
};
/* hwrm_port_mac_cfg_output (size:128b/16B) */
@@ -2680,8 +2707,9 @@ struct hwrm_port_mac_ptp_qcfg_output {
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -2888,7 +2916,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:2368b/296B) */
+/* rx_port_stats_ext (size:2624b/328B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -2927,6 +2955,10 @@ struct rx_port_stats_ext {
__le64 pfc_pri6_rx_transitions;
__le64 pfc_pri7_rx_duration_us;
__le64 pfc_pri7_rx_transitions;
+ __le64 rx_bits;
+ __le64 rx_buffer_passed_threshold;
+ __le64 rx_pcs_symbol_err;
+ __le64 rx_corrected_bits;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -3029,6 +3061,35 @@ struct hwrm_port_lpbk_clr_stats_output {
u8 valid;
};
+/* hwrm_port_ts_query_input (size:192b/24B) */
+struct hwrm_port_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
+ #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ptp_msg_ts;
+ __le16 ptp_msg_seqid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
/* hwrm_port_phy_qcaps_input (size:192b/24B) */
struct hwrm_port_phy_qcaps_input {
__le16 req_type;
@@ -4703,7 +4764,8 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- u8 unused_1[7];
+ __le16 max_aggs_supported;
+ u8 unused_1[5];
u8 valid;
};
@@ -4723,6 +4785,7 @@ struct hwrm_vnic_tpa_cfg_input {
#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
#define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
#define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
__le32 enables;
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
@@ -5254,6 +5317,8 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
__le32 enables;
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
@@ -5272,8 +5337,11 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
u8 l2_addr[6];
- u8 unused_0[2];
+ u8 num_vlans;
+ u8 t_num_vlans;
u8 l2_addr_mask[6];
__le16 l2_ovlan;
__le16 l2_ovlan_mask;
@@ -5338,6 +5406,16 @@ struct hwrm_cfa_l2_filter_alloc_output {
__le16 resp_len;
__le64 l2_filter_id;
__le32 flow_id;
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
u8 unused_0[3];
u8 valid;
};
@@ -5504,6 +5582,16 @@ struct hwrm_cfa_tunnel_filter_alloc_output {
__le16 resp_len;
__le64 tunnel_filter_id;
__le32 flow_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
u8 unused_0[3];
u8 valid;
};
@@ -5646,7 +5734,7 @@ struct hwrm_cfa_encap_record_free_output {
u8 valid;
};
-/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1088b/136B) */
struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5678,6 +5766,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
__le64 l2_filter_id;
u8 src_macaddr[6];
__be16 ethertype;
@@ -5725,6 +5814,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__be16 dst_port;
__be16 dst_port_mask;
__le64 ntuple_filter_id_hint;
+ __le16 rfs_ring_tbl_idx;
+ u8 unused_0[6];
};
/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
@@ -5735,6 +5826,16 @@ struct hwrm_cfa_ntuple_filter_alloc_output {
__le16 resp_len;
__le64 ntuple_filter_id;
__le32 flow_id;
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
u8 unused_0[3];
u8 valid;
};
@@ -5934,19 +6035,20 @@ struct hwrm_cfa_flow_alloc_input {
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
@@ -5997,6 +6099,16 @@ struct hwrm_cfa_flow_alloc_output {
__le16 flow_handle;
u8 unused_0[2];
__le32 flow_id;
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
__le64 ext_flow_handle;
__le32 flow_counter_id;
u8 unused_1[3];
@@ -6011,7 +6123,8 @@ struct hwrm_cfa_flow_free_input {
__le16 target_id;
__le64 resp_addr;
__le16 flow_handle;
- u8 unused_0[6];
+ __le16 unused_0;
+ __le32 flow_counter_id;
__le64 ext_flow_handle;
};
@@ -6199,8 +6312,10 @@ struct hwrm_cfa_eem_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
__le32 unused_0;
__le32 supported;
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
@@ -6226,7 +6341,9 @@ struct hwrm_cfa_eem_cfg_input {
#define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 unused_0;
+ #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
+ __le16 group_id;
+ __le16 unused_0;
__le32 num_entries;
__le32 unused_1;
__le16 key0_ctx_id;
@@ -6258,7 +6375,7 @@ struct hwrm_cfa_eem_qcfg_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcfg_output (size:128b/16B) */
+/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */
struct hwrm_cfa_eem_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -6269,6 +6386,8 @@ struct hwrm_cfa_eem_qcfg_output {
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 num_entries;
+ u8 unused_0[7];
+ u8 valid;
};
/* hwrm_cfa_eem_op_input (size:192b/24B) */
@@ -6300,6 +6419,39 @@ struct hwrm_cfa_eem_op_output {
u8 valid;
};
+/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[4];
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
@@ -6636,7 +6788,8 @@ struct hwrm_fw_qstatus_output {
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
u8 unused_0[6];
u8 valid;
};
@@ -6659,8 +6812,8 @@ struct hwrm_fw_set_time_input {
u8 unused_0;
__le16 millisecond;
__le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ #define FW_SET_TIME_REQ_ZONE_UTC 0
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
#define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
u8 unused_1[4];
};
@@ -7064,7 +7217,9 @@ struct hwrm_dbg_coredump_list_input {
__le64 host_dest_addr;
__le32 host_buf_len;
__le16 seq_no;
- u8 unused_0[2];
+ u8 flags;
+ #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
+ u8 unused_0[1];
};
/* hwrm_dbg_coredump_list_output (size:128b/16B) */
@@ -7392,7 +7547,9 @@ struct hwrm_nvm_get_dev_info_output {
__le32 nvram_size;
__le32 reserved_size;
__le32 available_size;
- u8 unused_0[3];
+ u8 nvm_cfg_ver_maj;
+ u8 nvm_cfg_ver_min;
+ u8 nvm_cfg_ver_upd;
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 2bdd2da9aac7..f760921389a3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -406,26 +406,6 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->min_mtu = ETH_ZLEN;
}
-static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
-{
- struct pci_dev *pdev = bp->pdev;
- int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- u32 dw;
-
- if (!pos) {
- netdev_info(bp->dev, "Unable do read adapter's DSN");
- return -EOPNOTSUPP;
- }
-
- /* DSN (two dw) is at an offset of 4 from the cap pos */
- pos += 4;
- pci_read_config_dword(pdev, pos, &dw);
- put_unaligned_le32(dw, &dsn[0]);
- pci_read_config_dword(pdev, pos + 4, &dw);
- put_unaligned_le32(dw, &dsn[4]);
- return 0;
-}
-
static int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -490,11 +470,6 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
}
}
- /* Read the adapter's DSN to use as the eswitch switch_id */
- rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
- if (rc)
- goto err;
-
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 510dfc1c236b..57dc3cbff36e 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4038,15 +4038,14 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
case L5CM_RAMROD_CMD_ID_CLOSE: {
struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
- if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
- netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
- l4kcqe->status, l5kcqe->completion_status);
- opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
- /* Fall through */
- } else {
+ if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
break;
- }
+
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
+ l4kcqe->status, l5kcqe->completion_status);
+ opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
}
+ /* Fall through */
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 983245c0867c..374b9ff05c88 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1665,7 +1665,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
- if (!skb->xmit_more || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
/* Packets are ready, update producer index */
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);
@@ -3476,7 +3476,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (dn) {
macaddr = of_get_mac_address(dn);
- if (!macaddr) {
+ if (IS_ERR(macaddr)) {
dev_err(&pdev->dev, "can't find MAC address\n");
err = -EINVAL;
goto err;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 328373e0578f..6d1f9c822548 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1073,7 +1073,6 @@ static void tg3_int_reenable(struct tg3_napi *tnapi)
struct tg3 *tp = tnapi->tp;
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
- mmiowb();
/* When doing tagged status, this work check is unnecessary.
* The last_tag we write above tells the chip which piece of
@@ -4283,7 +4282,7 @@ static void tg3_power_down(struct tg3 *tp)
pci_set_power_state(tp->pdev, PCI_D3hot);
}
-static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
{
switch (val & MII_TG3_AUX_STAT_SPDMASK) {
case MII_TG3_AUX_STAT_10HALF:
@@ -4787,7 +4786,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
bool current_link_up;
u32 bmsr, val;
u32 lcl_adv, rmt_adv;
- u16 current_speed;
+ u32 current_speed;
u8 current_duplex;
int i, err;
@@ -5719,7 +5718,7 @@ out:
static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
{
u32 orig_pause_cfg;
- u16 orig_active_speed;
+ u32 orig_active_speed;
u8 orig_active_duplex;
u32 mac_status;
bool current_link_up;
@@ -5823,7 +5822,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
{
int err = 0;
u32 bmsr, bmcr;
- u16 current_speed = SPEED_UNKNOWN;
+ u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
u32 local_adv, remote_adv, sgsr;
@@ -6999,7 +6998,6 @@ next_pkt_nopost:
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
tpr->rx_jmb_prod_idx);
}
- mmiowb();
} else if (work_mask) {
/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
* updated before the producer indices can be updated.
@@ -7210,8 +7208,6 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
dpr->rx_jmb_prod_idx);
- mmiowb();
-
if (err)
tw32_f(HOSTCC_MODE, tp->coal_now);
}
@@ -7278,7 +7274,6 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
HOSTCC_MODE_ENABLE |
tnapi->coal_now);
}
- mmiowb();
break;
}
}
@@ -8156,10 +8151,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_wake_queue(txq);
}
- if (!skb->xmit_more || netif_xmit_stopped(txq)) {
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
/* Packets are ready, update Tx producer idx on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
- mmiowb();
}
return NETDEV_TX_OK;
@@ -12763,9 +12757,6 @@ static int tg3_set_phys_id(struct net_device *dev,
{
struct tg3 *tp = netdev_priv(dev);
- if (!netif_running(tp->dev))
- return -EAGAIN;
-
switch (state) {
case ETHTOOL_ID_ACTIVE:
return 1; /* cycle on/off once per second */
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index a772a33b685c..6953d0546acb 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info {
struct tg3_link_config {
/* Describes what we're trying to get. */
u32 advertising;
- u16 speed;
+ u32 speed;
u8 duplex;
u8 autoneg;
u8 flowctrl;
@@ -2882,7 +2882,7 @@ struct tg3_link_config {
u8 active_flowctrl;
u8 active_duplex;
- u16 active_speed;
+ u32 active_speed;
u32 rmt_adv;
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1522aee81884..c049410bc888 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -285,34 +285,22 @@ static void macb_set_hwaddr(struct macb *bp)
static void macb_get_hwaddr(struct macb *bp)
{
- struct macb_platform_data *pdata;
u32 bottom;
u16 top;
u8 addr[6];
int i;
- pdata = dev_get_platdata(&bp->pdev->dev);
-
/* Check all 4 address register for valid address */
for (i = 0; i < 4; i++) {
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
top = macb_or_gem_readl(bp, SA1T + i * 8);
- if (pdata && pdata->rev_eth_addr) {
- addr[5] = bottom & 0xff;
- addr[4] = (bottom >> 8) & 0xff;
- addr[3] = (bottom >> 16) & 0xff;
- addr[2] = (bottom >> 24) & 0xff;
- addr[1] = top & 0xff;
- addr[0] = (top & 0xff00) >> 8;
- } else {
- addr[0] = bottom & 0xff;
- addr[1] = (bottom >> 8) & 0xff;
- addr[2] = (bottom >> 16) & 0xff;
- addr[3] = (bottom >> 24) & 0xff;
- addr[4] = top & 0xff;
- addr[5] = (top >> 8) & 0xff;
- }
+ addr[0] = bottom & 0xff;
+ addr[1] = (bottom >> 8) & 0xff;
+ addr[2] = (bottom >> 16) & 0xff;
+ addr[3] = (bottom >> 24) & 0xff;
+ addr[4] = top & 0xff;
+ addr[5] = (top >> 8) & 0xff;
if (is_valid_ether_addr(addr)) {
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
@@ -510,12 +498,10 @@ static void macb_handle_link_change(struct net_device *dev)
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct macb_platform_data *pdata;
struct phy_device *phydev;
struct device_node *np;
- int phy_irq, ret, i;
+ int ret, i;
- pdata = dev_get_platdata(&bp->pdev->dev);
np = bp->pdev->dev.of_node;
ret = 0;
@@ -530,8 +516,6 @@ static int macb_mii_probe(struct net_device *dev)
*/
if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
for (i = 0; i < PHY_MAX_ADDR; i++) {
- struct phy_device *phydev;
-
phydev = mdiobus_scan(bp->mii_bus, i);
if (IS_ERR(phydev) &&
PTR_ERR(phydev) != -ENODEV) {
@@ -559,19 +543,6 @@ static int macb_mii_probe(struct net_device *dev)
return -ENXIO;
}
- if (pdata) {
- if (gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev,
- pdata->phy_irq_pin, "phy int");
- if (!ret) {
- phy_irq = gpio_to_irq(pdata->phy_irq_pin);
- phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
- }
- } else {
- phydev->irq = PHY_POLL;
- }
- }
-
/* attach the mac to the phy */
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
bp->phy_interface);
@@ -600,7 +571,6 @@ static int macb_mii_probe(struct net_device *dev)
static int macb_mii_init(struct macb *bp)
{
- struct macb_platform_data *pdata;
struct device_node *np;
int err = -ENXIO;
@@ -620,7 +590,6 @@ static int macb_mii_init(struct macb *bp)
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->pdev->dev;
- pdata = dev_get_platdata(&bp->pdev->dev);
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -634,9 +603,6 @@ static int macb_mii_init(struct macb *bp)
err = mdiobus_register(bp->mii_bus);
} else {
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
-
err = of_mdiobus_register(bp->mii_bus, np);
}
@@ -898,7 +864,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
- if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP) &&
+ gem_ptp_do_txstamp(queue, skb, desc) == 0) {
/* skb now belongs to timestamp buffer
* and will be removed later
*/
@@ -2459,12 +2427,12 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
- bp->macbgem_ops.mog_init_rings(bp);
- macb_init_hw(bp);
-
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_enable(&queue->napi);
+ bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_hw(bp);
+
/* schedule a link state check */
phy_start(dev->phydev);
@@ -4050,7 +4018,6 @@ static int macb_probe(struct platform_device *pdev)
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
struct clk *tsu_clk = NULL;
unsigned int queue_mask, num_queues;
- struct macb_platform_data *pdata;
bool native_io;
struct phy_device *phydev;
struct net_device *dev;
@@ -4170,27 +4137,21 @@ static int macb_probe(struct platform_device *pdev)
bp->rx_intr_mask |= MACB_BIT(RXUBR);
mac = of_get_mac_address(np);
- if (mac) {
+ if (PTR_ERR(mac) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_out_free_netdev;
+ } else if (!IS_ERR(mac)) {
ether_addr_copy(bp->dev->dev_addr, mac);
} else {
- err = nvmem_get_mac_address(&pdev->dev, bp->dev->dev_addr);
- if (err) {
- if (err == -EPROBE_DEFER)
- goto err_out_free_netdev;
- macb_get_hwaddr(bp);
- }
+ macb_get_hwaddr(bp);
}
err = of_get_phy_mode(np);
- if (err < 0) {
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata && pdata->is_rmii)
- bp->phy_interface = PHY_INTERFACE_MODE_RMII;
- else
- bp->phy_interface = PHY_INTERFACE_MODE_MII;
- } else {
+ if (err < 0)
+ /* not found in DT, MII by default */
+ bp->phy_interface = PHY_INTERFACE_MODE_MII;
+ else
bp->phy_interface = err;
- }
/* IP specific init */
err = init(pdev);
@@ -4360,8 +4321,7 @@ static int __maybe_unused macb_resume(struct device *dev)
static int __maybe_unused macb_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
if (!(device_may_wakeup(&bp->dev->dev))) {
@@ -4377,8 +4337,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
static int __maybe_unused macb_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
if (!(device_may_wakeup(&bp->dev->dev))) {
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 6650e2a5f171..7612ab6b286d 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -68,6 +68,7 @@ config LIQUIDIO
imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
+ select NET_DEVLINK
---help---
This driver supports Cavium LiquidIO Intelligent Server Adapters
based on CN66XX, CN68XX and CN23XX chips.
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index 2df7440f58df..39643be8c30a 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -38,9 +38,6 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct)
lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
- /* make sure that the reset is written before starting timer */
- mmiowb();
-
/* Wait for 10ms as Octeon resets. */
mdelay(100);
@@ -487,9 +484,6 @@ void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
/* Disable Interrupts */
writeq(0, cn6xxx->intr_enb_reg64);
-
- /* make sure interrupts are really disabled */
- mmiowb();
}
static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
@@ -555,10 +549,6 @@ static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
value &= ~(1 << oq_no);
octeon_write_csr(oct, reg, value);
- /* Ensure that the enable register is written.
- */
- mmiowb();
-
spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index fb6f813cff65..eab805579f96 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2522,7 +2522,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
- xmit_more = skb->xmit_more;
+ xmit_more = netdev_xmit_more();
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 54b245797d2e..db0b90555acb 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1585,7 +1585,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
}
- xmit_more = skb->xmit_more;
+ xmit_more = netdev_xmit_more();
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index ce8c3f818666..934115d18488 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1449,7 +1449,6 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
iq->pkt_in_done -= iq->pkts_processed;
iq->pkts_processed = 0;
/* this write needs to be flushed before we release the lock */
- mmiowb();
spin_unlock_bh(&iq->lock);
oct = iq->oct_dev;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index a0c099f71524..017169023cca 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -513,8 +513,6 @@ int octeon_retry_droq_refill(struct octeon_droq *droq)
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
reschedule = 0;
@@ -712,8 +710,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
}
}
} /* for (each packet)... */
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index c6f4cbda040f..fcf20a8f92d9 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -278,7 +278,6 @@ ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
writel(iq->fill_cnt, iq->doorbell_reg);
/* make sure doorbell write goes through */
- mmiowb();
iq->fill_cnt = 0;
iq->last_db_time = jiffies;
return;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 5359c1021f42..0e5de88fd6e8 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1503,8 +1503,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
- memcpy(netdev->dev_addr, mac, ETH_ALEN);
+ if (!IS_ERR(mac))
+ ether_addr_copy(netdev->dev_addr, mac);
else
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aa2be4807191..c032bef1b776 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -32,6 +32,13 @@
#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
/* Supported devices */
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1328,10 +1335,11 @@ int nicvf_stop(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
- cancel_delayed_work_sync(&nic->link_change_work);
-
/* wait till all queued set_rx_mode tasks completes */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq) {
+ cancel_delayed_work_sync(&nic->link_change_work);
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+ }
mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1460,8 @@ int nicvf_open(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
/* wait till all queued set_rx_mode tasks completes if any */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq)
+ drain_workqueue(nic->nicvf_rx_mode_wq);
netif_carrier_off(netdev);
@@ -1550,10 +1559,12 @@ int nicvf_open(struct net_device *netdev)
/* Send VF config done msg to PF */
nicvf_send_cfg_done(nic);
- INIT_DELAYED_WORK(&nic->link_change_work,
- nicvf_link_status_check_task);
- queue_delayed_work(nic->nicvf_rx_mode_wq,
- &nic->link_change_work, 0);
+ if (nic->nicvf_rx_mode_wq) {
+ INIT_DELAYED_WORK(&nic->link_change_work,
+ nicvf_link_status_check_task);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 0);
+ }
return 0;
cleanup:
@@ -1578,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
struct nicvf *nic = netdev_priv(netdev);
int orig_mtu = netdev->mtu;
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
+
netdev->mtu = new_mtu;
if (!netif_running(netdev))
@@ -1826,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
bool bpf_attached = false;
int ret = 0;
- /* For now just support only the usual MTU sized frames */
- if (prog && (dev->mtu > 1500)) {
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (prog && dev->mtu > MAX_XDP_MTU) {
netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
dev->mtu);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 5b4d3badcb73..e246f9733bb8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
/* Check if page can be recycled */
if (page) {
ref_count = page_ref_count(page);
- /* Check if this page has been used once i.e 'put_page'
- * called after packet transmission i.e internal ref_count
- * and page's ref_count are equal i.e page can be recycled.
+ /* This page can be recycled if internal ref_count and page's
+ * ref_count are equal, indicating that the page has been used
+ * once for packet transmission. For non-XDP mode, internal
+ * ref_count is always '1'.
*/
- if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
- pgcache->ref_count--;
- else
- page = NULL;
-
- /* In non-XDP mode, page's ref_count needs to be '1' for it
- * to be recycled.
- */
- if (!rbdr->is_xdp && (ref_count != 1))
+ if (rbdr->is_xdp) {
+ if (ref_count == pgcache->ref_count)
+ pgcache->ref_count--;
+ else
+ page = NULL;
+ } else if (ref_count != 1) {
page = NULL;
+ }
}
if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
while (head < rbdr->pgcnt) {
pgcache = &rbdr->pgcache[head];
if (pgcache->page && page_ref_count(pgcache->page) != 0) {
- if (!rbdr->is_xdp) {
- put_page(pgcache->page);
- continue;
+ if (rbdr->is_xdp) {
+ page_ref_sub(pgcache->page,
+ pgcache->ref_count - 1);
}
- page_ref_sub(pgcache->page, pgcache->ref_count - 1);
put_page(pgcache->page);
}
head++;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 673c57b8023f..a65be851124f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -962,13 +962,13 @@ static void bgx_poll_for_sgmii_link(struct lmac *lmac)
lmac->last_duplex = (an_result >> 1) & 0x1;
switch (speed) {
case 0:
- lmac->last_speed = 10;
+ lmac->last_speed = SPEED_10;
break;
case 1:
- lmac->last_speed = 100;
+ lmac->last_speed = SPEED_100;
break;
case 2:
- lmac->last_speed = 1000;
+ lmac->last_speed = SPEED_1000;
break;
default:
lmac->link_up = false;
@@ -1012,10 +1012,10 @@ static void bgx_poll_for_link(struct work_struct *work)
!(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->lmac_type == BGX_MODE_XLAUI)
- lmac->last_speed = 40000;
+ lmac->last_speed = SPEED_40000;
else
- lmac->last_speed = 10000;
- lmac->last_duplex = 1;
+ lmac->last_speed = SPEED_10000;
+ lmac->last_duplex = DUPLEX_FULL;
} else {
lmac->link_up = 0;
lmac->last_speed = SPEED_UNKNOWN;
@@ -1105,8 +1105,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
} else {
/* Default to below link speed and duplex */
lmac->link_up = true;
- lmac->last_speed = 1000;
- lmac->last_duplex = 1;
+ lmac->last_speed = SPEED_1000;
+ lmac->last_duplex = DUPLEX_FULL;
bgx_sgmii_change_link_state(lmac);
return 0;
}
@@ -1484,7 +1484,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
break;
mac = of_get_mac_address(node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(bgx->lmac[lmac].mac, mac);
SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 0e9182d3f02c..b3e4118a15e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -443,9 +443,9 @@ found:
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
{
struct l2t_data *d;
- int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
+ int i;
- d = kvzalloc(size, GFP_KERNEL);
+ d = kvzalloc(struct_size(d, l2tab, l2t_capacity), GFP_KERNEL);
if (!d)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c2fd323c4078..ea75f275023f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -75,8 +75,8 @@ struct l2t_data {
struct l2t_entry *rover; /* starting point for next allocation */
atomic_t nfree; /* number of free entries */
rwlock_t lock;
- struct l2t_entry l2tab[0];
struct rcu_head rcu_head; /* to handle rcu cleanup */
+ struct l2t_entry l2tab[];
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 956219c178e1..a8fe0808823d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1575,9 +1575,11 @@ int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(void __iomem *regs);
+fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
+ struct link_config *lc);
int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox,
unsigned int port, struct link_config *lc,
- bool sleep_ok, int timeout);
+ u8 sleep_ok, int timeout);
static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
unsigned int port, struct link_config *lc)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index bec4711005cc..9e589302af90 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -442,7 +442,7 @@ static unsigned int speed_to_fw_caps(int speed)
* Link Mode Mask.
*/
static void fw_caps_to_lmm(enum fw_port_type port_type,
- unsigned int fw_caps,
+ fw_port_cap32_t fw_caps,
unsigned long *link_mode_mask)
{
#define SET_LMM(__lmm_name) \
@@ -632,7 +632,10 @@ static int get_link_ksettings(struct net_device *dev,
fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
link_ksettings->link_modes.supported);
- fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
+ fw_caps_to_lmm(pi->port_type,
+ t4_link_acaps(pi->adapter,
+ pi->lport,
+ &pi->link_cfg),
link_ksettings->link_modes.advertising);
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
link_ksettings->link_modes.lp_advertising);
@@ -642,22 +645,6 @@ static int get_link_ksettings(struct net_device *dev,
: SPEED_UNKNOWN);
base->duplex = DUPLEX_FULL;
- if (pi->link_cfg.fc & PAUSE_RX) {
- if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Pause);
- } else {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
- } else if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
-
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 5afb43000049..4107007b6ec4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -524,8 +524,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
return -ENOMEM;
fwr = __skb_put(skb, len);
- t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & CXGB4_SHUTTING_DOWN) ? -1
- : adapter->sge.fw_evtq.abs_id);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
@@ -744,16 +743,40 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
void clear_all_filters(struct adapter *adapter)
{
+ struct net_device *dev = adapter->port[0];
unsigned int i;
if (adapter->tids.ftid_tab) {
struct filter_entry *f = &adapter->tids.ftid_tab[0];
unsigned int max_ftid = adapter->tids.nftids +
adapter->tids.nsftids;
-
+ /* Clear all TCAM filters */
for (i = 0; i < max_ftid; i++, f++)
if (f->valid || f->pending)
- clear_filter(adapter, f);
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
+
+ /* Clear all hash filters */
+ if (is_hashfilter(adapter) && adapter->tids.tid_tab) {
+ struct filter_entry *f;
+ unsigned int sb;
+
+ for (i = adapter->tids.hash_base;
+ i <= adapter->tids.ntids; i++) {
+ f = (struct filter_entry *)
+ adapter->tids.tid_tab[i];
+
+ if (f && (f->valid || f->pending))
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
+
+ sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
+ for (i = 0; i < sb; i++) {
+ f = (struct filter_entry *)adapter->tids.tid_tab[i];
+
+ if (f && (f->valid || f->pending))
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
}
}
@@ -1568,9 +1591,8 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx ctx;
int ret;
- /* If we are shutting down the adapter do not wait for completion */
if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
- return __cxgb4_del_filter(dev, filter_id, fs, NULL);
+ return 0;
init_completion(&ctx.completion);
@@ -1722,12 +1744,13 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
break;
default:
- dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
- __func__, status);
+ if (status != CPL_ERR_TCAM_FULL)
+ dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
+ __func__, status);
if (ctx) {
if (status == CPL_ERR_TCAM_FULL)
- ctx->result = -EAGAIN;
+ ctx->result = -ENOSPC;
else
ctx->result = -EINVAL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 89179e316687..715e4edcf4a2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -979,8 +979,7 @@ freeout:
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
int txq;
@@ -1022,7 +1021,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)
@@ -6025,6 +6024,11 @@ static void remove_one(struct pci_dev *pdev)
return;
}
+ /* If we allocated filters, free up state associated with any
+ * valid filters ...
+ */
+ clear_all_filters(adapter);
+
adapter->flags |= CXGB4_SHUTTING_DOWN;
if (adapter->pf == 4) {
@@ -6055,11 +6059,6 @@ static void remove_one(struct pci_dev *pdev)
if (IS_REACHABLE(CONFIG_THERMAL))
cxgb4_thermal_remove(adapter);
- /* If we allocated filters, free up state associated with any
- * valid filters ...
- */
- clear_all_filters(adapter);
-
if (adapter->flags & CXGB4_FULL_INIT_DONE)
cxgb_down(adapter);
@@ -6161,15 +6160,24 @@ static int __init cxgb4_init_module(void)
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
- debugfs_remove(cxgb4_debugfs_root);
+ goto err_pci;
#if IS_ENABLED(CONFIG_IPV6)
if (!inet6addr_registered) {
- register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
- inet6addr_registered = true;
+ ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+ if (ret)
+ pci_unregister_driver(&cxgb4_driver);
+ else
+ inet6addr_registered = true;
}
#endif
+ if (ret == 0)
+ return ret;
+
+err_pci:
+ debugfs_remove(cxgb4_debugfs_root);
+
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 82a8d1970060..6e2d80008a79 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -687,11 +687,8 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
ret = ctx.result;
/* Check if hw returned error for filter creation */
- if (ret) {
- netdev_err(dev, "%s: filter creation err %d\n",
- __func__, ret);
+ if (ret)
goto free_entry;
- }
ch_flower->tc_flower_cookie = cls->cookie;
ch_flower->filter_id = ctx.tid;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a3544041ad32..f9b70be59792 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3964,6 +3964,14 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
}
+/* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
+ * Capabilities which we control with separate controls -- see, for instance,
+ * Pause Frames and Forward Error Correction. In order to determine what the
+ * full set of Advertised Port Capabilities are, the base Advertised Port
+ * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
+ * Port Capabilities associated with those other controls. See
+ * t4_link_acaps() for how this is done.
+ */
#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
FW_PORT_CAP32_ANEG)
@@ -4061,6 +4069,9 @@ static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
/* Translate Common Code Pause specification into Firmware Port Capabilities */
static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
{
+ /* Translate orthogonal RX/TX Pause Controls for L1 Configure
+ * commands, etc.
+ */
fw_port_cap32_t fw_pause = 0;
if (cc_pause & PAUSE_RX)
@@ -4070,6 +4081,19 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
if (!(cc_pause & PAUSE_AUTONEG))
fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
+ /* Translate orthogonal Pause controls into IEEE 802.3 Pause,
+ * Asymetrical Pause for use in reporting to upper layer OS code, etc.
+ * Note that these bits are ignored in L1 Configure commands.
+ */
+ if (cc_pause & PAUSE_RX) {
+ if (cc_pause & PAUSE_TX)
+ fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
+ else
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ } else if (cc_pause & PAUSE_TX) {
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ }
+
return fw_pause;
}
@@ -4100,31 +4124,22 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
}
/**
- * t4_link_l1cfg - apply link configuration to MAC/PHY
+ * t4_link_acaps - compute Link Advertised Port Capabilities
* @adapter: the adapter
- * @mbox: the Firmware Mailbox to use
* @port: the Port ID
* @lc: the Port's Link Configuration
- * @sleep_ok: if true we may sleep while awaiting command completion
- * @timeout: time to wait for command to finish before timing out
- * (negative implies @sleep_ok=false)
*
- * Set up a port's MAC and PHY according to a desired link configuration.
- * - If the PHY can auto-negotiate first decide what to advertise, then
- * enable/disable auto-negotiation as desired, and reset.
- * - If the PHY does not auto-negotiate just reset it.
- * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
- * otherwise do it later based on the outcome of auto-negotiation.
+ * Synthesize the Advertised Port Capabilities we'll be using based on
+ * the base Advertised Port Capabilities (which have been filtered by
+ * ADVERT_MASK) plus the individual controls for things like Pause
+ * Frames, Forward Error Correction, MDI, etc.
*/
-int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
- unsigned int port, struct link_config *lc,
- bool sleep_ok, int timeout)
+fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
+ struct link_config *lc)
{
- unsigned int fw_caps = adapter->params.fw_caps_support;
- fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
- struct fw_port_cmd cmd;
+ fw_port_cap32_t fw_fc, fw_fec, acaps;
unsigned int fw_mdi;
- int ret;
+ char cc_fec;
fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
@@ -4151,18 +4166,15 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
* init_link_config().
*/
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
- if (lc->autoneg == AUTONEG_ENABLE)
- return -EINVAL;
-
- rcap = lc->acaps | fw_fc | fw_fec;
+ acaps = lc->acaps | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
- rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
+ acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
} else {
- rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
+ acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
/* Some Requested Port Capabilities are trivially wrong if they exceed
@@ -4173,15 +4185,50 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
* we need to exclude this from this check in order to maintain
* compatibility ...
*/
- if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
- dev_err(adapter->pdev_dev,
- "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
- rcap, lc->pcaps);
+ if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
+ dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
+ acaps, lc->pcaps);
+ return -EINVAL;
+ }
+
+ return acaps;
+}
+
+/**
+ * t4_link_l1cfg_core - apply link configuration to MAC/PHY
+ * @adapter: the adapter
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+ unsigned int port, struct link_config *lc,
+ u8 sleep_ok, int timeout)
+{
+ unsigned int fw_caps = adapter->params.fw_caps_support;
+ struct fw_port_cmd cmd;
+ fw_port_cap32_t rcap;
+ int ret;
+
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
+ lc->autoneg == AUTONEG_ENABLE) {
return -EINVAL;
}
- /* And send that on to the Firmware ...
+ /* Compute our Requested Port Capabilities and send that on to the
+ * Firmware.
*/
+ rcap = t4_link_acaps(adapter, port, lc);
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
@@ -4211,7 +4258,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
rcap, -ret);
return ret;
}
- return ret;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 9125ddd89dd1..a02b1dff403e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x16
-#define T4FW_VERSION_MICRO 0x09
+#define T4FW_VERSION_MINOR 0x17
+#define T4FW_VERSION_MICRO 0x03
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x16
-#define T5FW_VERSION_MICRO 0x09
+#define T5FW_VERSION_MINOR 0x17
+#define T5FW_VERSION_MICRO 0x03
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x16
-#define T6FW_VERSION_MICRO 0x09
+#define T6FW_VERSION_MINOR 0x17
+#define T6FW_VERSION_MICRO 0x03
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index adc4d481815b..6d4cf3d0b2f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -518,8 +518,8 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
break;
}
cpl = (void *)p;
- /*FALLTHROUGH*/
}
+ /* Fall through */
case CPL_SGE_EGR_UPDATE: {
/*
@@ -1479,22 +1479,6 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev,
base->duplex = DUPLEX_UNKNOWN;
}
- if (pi->link_cfg.fc & PAUSE_RX) {
- if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Pause);
- } else {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
- } else if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
-
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 84dff74ca9cd..8a389d617a23 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -313,7 +313,17 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
return ret;
}
+/* In the Physical Function Driver Common Code, the ADVERT_MASK is used to
+ * mask out bits in the Advertised Port Capabilities which are managed via
+ * separate controls, like Pause Frames and Forward Error Correction. In the
+ * Virtual Function Common Code, since we never perform L1 Configuration on
+ * the Link, the only things we really need to filter out are things which
+ * we decode and report separately like Speed.
+ */
#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
+ FW_PORT_CAP32_802_3_PAUSE | \
+ FW_PORT_CAP32_802_3_ASM_DIR | \
+ FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) | \
FW_PORT_CAP32_ANEG)
/**
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
ppmax = max;
/* pool size must be multiple of unsigned long */
- bmap = BITS_TO_LONGS(ppmax);
+ bmap = ppmax / BITS_PER_TYPE(unsigned long);
+ if (!bmap)
+ return NULL;
+
ppmax = (bmap * sizeof(unsigned long)) << 3;
alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
if (reserve_factor) {
ppmax_pool = ppmax / reserve_factor;
pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+ if (!pool) {
+ ppmax_pool = 0;
+ reserve_factor = 0;
+ }
pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index e9a0213b08c4..6238e6951336 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -41,7 +41,7 @@ config CS89x0_PLATFORM
config EP93XX_ETH
tristate "EP93xx Ethernet support"
- depends on ARM && ARCH_EP93XX
+ depends on (ARM && ARCH_EP93XX) || COMPILE_TEST
select MII
help
This is a driver for the ethernet hardware included in EP93xx CPUs.
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 13dfdfca49fc..a6da9873570b 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -25,7 +25,7 @@
#include <linux/io.h>
#include <linux/slab.h>
-#include <mach/hardware.h>
+#include <linux/platform_data/eth-ep93xx.h>
#define DRV_MODULE_NAME "ep93xx-eth"
#define DRV_MODULE_VERSION "0.1"
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 733d9172425b..acb2856936d2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -897,7 +897,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(txq);
skb_tx_timestamp(skb);
- if (!skb->xmit_more || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
spin_unlock(&enic->wq_lock[txq_map]);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 949103db8a8a..9003eb6716cd 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1235,8 +1235,6 @@ static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
int txq_num, nfrags;
union dma_rwptr rw;
- SKB_FRAG_ASSERT(skb);
-
if (skb->len >= 0x10000)
goto out_drop_free;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index c2586f44c29d..5e1aff9a5fd6 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1412,8 +1412,8 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
pdata->flags |= DM9000_PLATF_NO_EEPROM;
mac_addr = of_get_mac_address(np);
- if (mac_addr)
- memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(pdata->dev_addr, mac_addr);
return pdata;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3c7c04406a2b..e2f9fbced174 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1376,7 +1376,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
u16 q_idx = skb_get_queue_mapping(skb);
struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
struct be_wrb_params wrb_params = { 0 };
- bool flush = !skb->xmit_more;
+ bool flush = !netdev_xmit_more();
u16 wrb_cnt;
skb = be_xmit_workarounds(adapter, skb, &wrb_params);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0f3e7f21c6fa..71da0490521b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1153,7 +1153,7 @@ static int ethoc_probe(struct platform_device *pdev)
const void *mac;
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(netdev->dev_addr, mac);
priv->phy_id = -1;
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 659f1ad37e96..b4ce26155087 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -616,7 +616,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* set kernel MAC address to dev */
mac_addr = of_get_mac_address(dev->of_node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index dfebc30c4841..d3f2408dc9e8 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1648,7 +1648,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
qm_sg_entry_get_len(&sgt[0]), dma_dir);
/* remaining pages were mapped with skb_frag_dma_map() */
- for (i = 1; i < nr_frags; i++) {
+ for (i = 1; i <= nr_frags; i++) {
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index dc339dc1adb2..63b1ecc18c26 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -435,7 +435,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- napi_gro_receive(&ch->napi, skb);
+ list_add_tail(&skb->list, ch->rx_list);
return;
@@ -1113,12 +1113,16 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_fq *fq, *txc_fq = NULL;
struct netdev_queue *nq;
int store_cleaned, work_done;
+ struct list_head rx_list;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
ch->xdp.res = 0;
priv = ch->priv;
+ INIT_LIST_HEAD(&rx_list);
+ ch->rx_list = &rx_list;
+
do {
err = pull_channel(ch);
if (unlikely(err))
@@ -1162,6 +1166,8 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
work_done = max(rx_cleaned, 1);
out:
+ netif_receive_skb_list(ch->rx_list);
+
if (txc_fq && txc_fq->dq_frames) {
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
@@ -2565,10 +2571,12 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L2DA,
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_DA,
+ .id = DPAA2_ETH_DIST_ETHDST,
.size = 6,
}, {
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_SA,
+ .id = DPAA2_ETH_DIST_ETHSRC,
.size = 6,
}, {
/* This is the last ethertype field parsed:
@@ -2577,28 +2585,33 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
*/
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_TYPE,
+ .id = DPAA2_ETH_DIST_ETHTYPE,
.size = 2,
}, {
/* VLAN header */
.rxnfc_field = RXH_VLAN,
.cls_prot = NET_PROT_VLAN,
.cls_field = NH_FLD_VLAN_TCI,
+ .id = DPAA2_ETH_DIST_VLAN,
.size = 2,
}, {
/* IP header */
.rxnfc_field = RXH_IP_SRC,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_SRC,
+ .id = DPAA2_ETH_DIST_IPSRC,
.size = 4,
}, {
.rxnfc_field = RXH_IP_DST,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_DST,
+ .id = DPAA2_ETH_DIST_IPDST,
.size = 4,
}, {
.rxnfc_field = RXH_L3_PROTO,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_PROTO,
+ .id = DPAA2_ETH_DIST_IPPROTO,
.size = 1,
}, {
/* Using UDP ports, this is functionally equivalent to raw
@@ -2607,11 +2620,13 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L4_B_0_1,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_SRC,
+ .id = DPAA2_ETH_DIST_L4SRC,
.size = 2,
}, {
.rxnfc_field = RXH_L4_B_2_3,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_DST,
+ .id = DPAA2_ETH_DIST_L4DST,
.size = 2,
},
};
@@ -2677,12 +2692,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
}
/* Size of the Rx flow classification key */
-int dpaa2_eth_cls_key_size(void)
+int dpaa2_eth_cls_key_size(u64 fields)
{
int i, size = 0;
- for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (!(fields & dist_fields[i].id))
+ continue;
size += dist_fields[i].size;
+ }
return size;
}
@@ -2703,6 +2721,24 @@ int dpaa2_eth_cls_fld_off(int prot, int field)
return 0;
}
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+ int off = 0, new_off = 0;
+ int i, size;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ size = dist_fields[i].size;
+ if (dist_fields[i].id & fields) {
+ memcpy(key_mem + new_off, key_mem + off, size);
+ new_off += size;
+ }
+ off += size;
+ }
+}
+
/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
@@ -2724,14 +2760,13 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
struct dpkg_extract *key =
&cls_cfg.extracts[cls_cfg.num_extracts];
- /* For Rx hashing key we set only the selected fields.
- * For Rx flow classification key we set all supported fields
+ /* For both Rx hashing and classification keys
+ * we set only the selected fields.
*/
- if (type == DPAA2_ETH_RX_DIST_HASH) {
- if (!(flags & dist_fields[i].rxnfc_field))
- continue;
+ if (!(flags & dist_fields[i].id))
+ continue;
+ if (type == DPAA2_ETH_RX_DIST_HASH)
rx_hash_fields |= dist_fields[i].rxnfc_field;
- }
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n");
@@ -2786,16 +2821,28 @@ free_key:
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u64 key = 0;
+ int i;
if (!dpaa2_eth_hash_enabled(priv))
return -EOPNOTSUPP;
- return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ if (dist_fields[i].rxnfc_field & flags)
+ key |= dist_fields[i].id;
+
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
+}
+
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
}
-static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
+ int err;
/* Check if we actually support Rx flow classification */
if (dpaa2_eth_has_legacy_dist(priv)) {
@@ -2803,8 +2850,7 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP;
}
- if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
- !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
+ if (!dpaa2_eth_fs_enabled(priv)) {
dev_dbg(dev, "Rx cls disabled in DPNI options\n");
return -EOPNOTSUPP;
}
@@ -2814,9 +2860,21 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP;
}
+ /* If there is no support for masking in the classification table,
+ * we don't set a default key, as it will depend on the rules
+ * added by the user at runtime.
+ */
+ if (!dpaa2_eth_fs_mask_enabled(priv))
+ goto out;
+
+ err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
+ if (err)
+ return err;
+
+out:
priv->rx_cls_enabled = 1;
- return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+ return 0;
}
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
@@ -2851,7 +2909,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
/* Configure the flow classification key; it includes all
* supported header fields and cannot be modified at runtime
*/
- err = dpaa2_eth_set_cls(priv);
+ err = dpaa2_eth_set_default_cls(priv);
if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure Rx classification key\n");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 7879622aa3e6..5fb8f5c0dc9f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -334,6 +334,7 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_ch_stats stats;
struct dpaa2_eth_ch_xdp xdp;
struct xdp_rxq_info xdp_rxq;
+ struct list_head *rx_list;
};
struct dpaa2_eth_dist_fields {
@@ -341,6 +342,7 @@ struct dpaa2_eth_dist_fields {
enum net_prot cls_prot;
int cls_field;
int size;
+ u64 id;
};
struct dpaa2_eth_cls_rule {
@@ -393,6 +395,7 @@ struct dpaa2_eth_priv {
/* enabled ethtool hashing bits */
u64 rx_hash_fields;
+ u64 rx_cls_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
struct bpf_prog *xdp_prog;
@@ -436,6 +439,12 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+#define dpaa2_eth_fs_enabled(priv) \
+ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
+
+#define dpaa2_eth_fs_mask_enabled(priv) \
+ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
+
#define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries)
@@ -448,6 +457,18 @@ enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_CLS
};
+/* Unique IDs for the supported Rx classification header fields */
+#define DPAA2_ETH_DIST_ETHDST BIT(0)
+#define DPAA2_ETH_DIST_ETHSRC BIT(1)
+#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
+#define DPAA2_ETH_DIST_VLAN BIT(3)
+#define DPAA2_ETH_DIST_IPSRC BIT(4)
+#define DPAA2_ETH_DIST_IPDST BIT(5)
+#define DPAA2_ETH_DIST_IPPROTO BIT(6)
+#define DPAA2_ETH_DIST_L4SRC BIT(7)
+#define DPAA2_ETH_DIST_L4DST BIT(8)
+#define DPAA2_ETH_DIST_ALL (~0U)
+
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
@@ -482,7 +503,9 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
-int dpaa2_eth_cls_key_size(void);
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
+int dpaa2_eth_cls_key_size(u64 key);
int dpaa2_eth_cls_fld_off(int prot, int field);
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 591dfcf76adb..76bd8d2872cc 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -264,7 +264,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -272,18 +272,21 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = eth_value->h_proto;
*(__be16 *)(mask + off) = eth_mask->h_proto;
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
}
if (!is_zero_ether_addr(eth_mask->h_source)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
ether_addr_copy(key + off, eth_value->h_source);
ether_addr_copy(mask + off, eth_mask->h_source);
+ *fields |= DPAA2_ETH_DIST_ETHSRC;
}
if (!is_zero_ether_addr(eth_mask->h_dest)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, eth_value->h_dest);
ether_addr_copy(mask + off, eth_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
}
return 0;
@@ -291,7 +294,7 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
struct ethtool_usrip4_spec *uip_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
u32 tmp_value, tmp_mask;
@@ -303,18 +306,21 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = uip_value->ip4src;
*(__be32 *)(mask + off) = uip_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
}
if (uip_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = uip_value->ip4dst;
*(__be32 *)(mask + off) = uip_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
}
if (uip_mask->proto) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = uip_value->proto;
*(u8 *)(mask + off) = uip_mask->proto;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
}
if (uip_mask->l4_4_bytes) {
@@ -324,23 +330,26 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = htons(tmp_value >> 16);
*(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+ *fields |= DPAA2_ETH_DIST_L4SRC;
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
*(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+ *fields |= DPAA2_ETH_DIST_L4DST;
}
/* Only apply the rule for IPv4 frames */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
return 0;
}
static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
struct ethtool_tcpip4_spec *l4_mask,
- void *key, void *mask, u8 l4_proto)
+ void *key, void *mask, u8 l4_proto, u64 *fields)
{
int off;
@@ -351,41 +360,47 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = l4_value->ip4src;
*(__be32 *)(mask + off) = l4_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
}
if (l4_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = l4_value->ip4dst;
*(__be32 *)(mask + off) = l4_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
}
if (l4_mask->psrc) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = l4_value->psrc;
*(__be16 *)(mask + off) = l4_mask->psrc;
+ *fields |= DPAA2_ETH_DIST_L4SRC;
}
if (l4_mask->pdst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = l4_value->pdst;
*(__be16 *)(mask + off) = l4_mask->pdst;
+ *fields |= DPAA2_ETH_DIST_L4DST;
}
/* Only apply the rule for IPv4 frames with the specified L4 proto */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = l4_proto;
*(u8 *)(mask + off) = 0xFF;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
return 0;
}
static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -396,6 +411,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
*(__be16 *)(key + off) = ext_value->vlan_tci;
*(__be16 *)(mask + off) = ext_mask->vlan_tci;
+ *fields |= DPAA2_ETH_DIST_VLAN;
}
return 0;
@@ -403,7 +419,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -411,36 +427,38 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, ext_value->h_dest);
ether_addr_copy(mask + off, ext_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
}
return 0;
}
-static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
+ u64 *fields)
{
int err;
switch (fs->flow_type & 0xFF) {
case ETHER_FLOW:
err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
- key, mask);
+ key, mask, fields);
break;
case IP_USER_FLOW:
err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
- &fs->m_u.usr_ip4_spec, key, mask);
+ &fs->m_u.usr_ip4_spec, key, mask, fields);
break;
case TCP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
- key, mask, IPPROTO_TCP);
+ key, mask, IPPROTO_TCP, fields);
break;
case UDP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
- key, mask, IPPROTO_UDP);
+ key, mask, IPPROTO_UDP, fields);
break;
case SCTP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
&fs->m_u.sctp_ip4_spec, key, mask,
- IPPROTO_SCTP);
+ IPPROTO_SCTP, fields);
break;
default:
return -EOPNOTSUPP;
@@ -450,13 +468,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
return err;
if (fs->flow_type & FLOW_EXT) {
- err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
if (err)
return err;
}
if (fs->flow_type & FLOW_MAC_EXT) {
- err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
+ fields);
if (err)
return err;
}
@@ -473,6 +492,7 @@ static int do_cls_rule(struct net_device *net_dev,
struct dpni_rule_cfg rule_cfg = { 0 };
struct dpni_fs_action_cfg fs_act = { 0 };
dma_addr_t key_iova;
+ u64 fields = 0;
void *key_buf;
int err;
@@ -480,7 +500,7 @@ static int do_cls_rule(struct net_device *net_dev,
fs->ring_cookie >= dpaa2_eth_queue_count(priv))
return -EINVAL;
- rule_cfg.key_size = dpaa2_eth_cls_key_size();
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
/* allocate twice the key size, for the actual key and for mask */
key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
@@ -488,10 +508,36 @@ static int do_cls_rule(struct net_device *net_dev,
return -ENOMEM;
/* Fill the key and mask memory areas */
- err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
+ err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
if (err)
goto free_mem;
+ if (!dpaa2_eth_fs_mask_enabled(priv)) {
+ /* Masking allows us to configure a maximal key during init and
+ * use it for all flow steering rules. Without it, we include
+ * in the key only the fields actually used, so we need to
+ * extract the others from the final key buffer.
+ *
+ * Program the FS key if needed, or return error if previously
+ * set key can't be used for the current rule. User needs to
+ * delete existing rules in this case to allow for the new one.
+ */
+ if (!priv->rx_cls_fields) {
+ err = dpaa2_eth_set_cls(net_dev, fields);
+ if (err)
+ goto free_mem;
+
+ priv->rx_cls_fields = fields;
+ } else if (priv->rx_cls_fields != fields) {
+ netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
+ err = -EOPNOTSUPP;
+ goto free_mem;
+ }
+
+ dpaa2_eth_cls_trim_rule(key_buf, fields);
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
+ }
+
key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_iova)) {
@@ -500,7 +546,8 @@ static int do_cls_rule(struct net_device *net_dev,
}
rule_cfg.key_iova = key_iova;
- rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+ if (dpaa2_eth_fs_mask_enabled(priv))
+ rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
if (add) {
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
@@ -522,6 +569,17 @@ free_mem:
return err;
}
+static int num_rules(struct dpaa2_eth_priv *priv)
+{
+ int i, rules = 0;
+
+ for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
+ if (priv->cls_rules[i].in_use)
+ rules++;
+
+ return rules;
+}
+
static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs,
int location)
@@ -545,6 +603,9 @@ static int update_cls_rule(struct net_device *net_dev,
return err;
rule->in_use = 0;
+
+ if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+ priv->rx_cls_fields = 0;
}
/* If no new entry to add, return here */
@@ -581,9 +642,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
- for (i = 0; i < max_rules; i++)
- if (priv->cls_rules[i].in_use)
- rxnfc->rule_cnt++;
+ rxnfc->rule_cnt = num_rules(priv);
rxnfc->data = max_rules;
break;
case ETHTOOL_GRXCLSRULE:
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 697c2427f2b7..aa7d4e27c5d1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1655,7 +1655,7 @@ static void fec_get_mac(struct net_device *ndev)
struct device_node *np = fep->pdev->dev.of_node;
if (np) {
const char *mac = of_get_mac_address(np);
- if (mac)
+ if (!IS_ERR(mac))
iap = (unsigned char *) mac;
}
}
@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
int ret;
if (enable) {
- ret = clk_prepare_enable(fep->clk_ahb);
- if (ret)
- return ret;
-
ret = clk_prepare_enable(fep->clk_enet_out);
if (ret)
- goto failed_clk_enet_out;
+ return ret;
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
phy_reset_after_clk_enable(ndev->phydev);
} else {
- clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
@@ -1885,8 +1880,6 @@ failed_clk_ref:
failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ahb);
return ret;
}
@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev)
ret = clk_prepare_enable(fep->clk_ipg);
if (ret)
goto failed_clk_ipg;
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk_ahb;
fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
@@ -3563,6 +3559,9 @@ failed_reset:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
failed_regulator:
+ clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+ clk_disable_unprepare(fep->clk_ipg);
failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
return 0;
@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
- return clk_prepare_enable(fep->clk_ipg);
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ return 0;
+
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+ return ret;
}
static const struct dev_pm_ops fec_pm_ops = {
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index c1968b3ecec8..30cdb246d020 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -902,8 +902,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
* First try to read MAC address from DT
*/
mac_addr = of_get_mac_address(np);
- if (mac_addr) {
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(ndev->dev_addr, mac_addr);
} else {
struct mpc52xx_fec __iomem *fec = priv->fec;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 3c21486c6c84..7ab8095db192 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -724,12 +724,12 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the MAC address */
mac_addr = of_get_mac_address(mac_node);
- if (!mac_addr) {
+ if (IS_ERR(mac_addr)) {
dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
err = -EINVAL;
goto _return_of_get_parent;
}
- memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
+ ether_addr_copy(mac_dev->addr, mac_addr);
/* Get the port handles */
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 7c548ed535da..5fad73b2e123 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1014,8 +1014,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
spin_lock_init(&fep->tx_lock);
mac_addr = of_get_mac_address(ofdev->dev.of_node);
- if (mac_addr)
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(ndev->dev_addr, mac_addr);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 45fcc96be90e..e670cd293dba 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -872,8 +872,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
mac_addr = of_get_mac_address(np);
- if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(dev->dev_addr, mac_addr);
if (model && !strcasecmp(model, "TSEC"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index eb3e65e8868f..4d6892d2f0a4 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3910,8 +3910,8 @@ static int ucc_geth_probe(struct platform_device* ofdev)
}
mac_addr = of_get_mac_address(np);
- if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(dev->dev_addr, mac_addr);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 0beee2cc2ddd..722b6de24816 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -252,14 +252,12 @@ uec_set_ringparam(struct net_device *netdev,
return -EINVAL;
}
+ if (netif_running(netdev))
+ return -EBUSY;
+
ug_info->bdRingLenRx[queue] = ring->rx_pending;
ug_info->bdRingLenTx[queue] = ring->tx_pending;
- if (netif_running(netdev)) {
- /* FIXME: restart automatically */
- netdev_info(netdev, "Please re-open the interface\n");
- }
-
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 2c2808830e95..96c32ae320b0 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -870,7 +870,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
phy_modes(phy->interface));
mac_addr = of_get_mac_address(node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index e5d853b7b454..b1cb58f0aaf6 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1229,7 +1229,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
}
mac_addr = of_get_mac_address(node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8ee7b1..c7fa97a7e1f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ out_buffer_fail:
/* free desc along with its attached buffer */
static void hnae_free_desc(struct hnae_ring *ring)
{
- hnae_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
/* fini ring, also free the buffer for the ring */
static void hnae_fini_ring(struct hnae_ring *ring)
{
+ if (is_rx_ring(ring))
+ hnae_free_buffers(ring);
+
hnae_free_desc(ring);
kfree(ring->desc_cb);
ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 08a750fb60c4..d6fb83437230 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
};
struct hnae_queue {
- void __iomem *io_base;
+ u8 __iomem *io_base;
phys_addr_t phy_base;
struct hnae_ae_dev *dev; /* the device who use this queue */
struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a97228c93831..6c0507921623 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
static void hns_mac_param_get(struct mac_params *param,
struct hns_mac_cb *mac_cb)
{
- param->vaddr = (void *)mac_cb->vaddr;
+ param->vaddr = mac_cb->vaddr;
param->mac_mode = hns_get_enet_interface(mac_cb);
ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
param->mac_id = mac_cb->mac_id;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index fbc75341bef7..22589799f1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -187,7 +187,7 @@ struct mac_statistics {
/*mac para struct ,mac get param from nic or dsaf when initialize*/
struct mac_params {
char addr[ETH_ALEN];
- void *vaddr; /*virtual address*/
+ u8 __iomem *vaddr; /*virtual address*/
struct device *dev;
u8 mac_id;
/**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
enum mac_mode mac_mode;
u8 mac_id;
struct hns_mac_cb *mac_cb;
- void __iomem *io_base;
+ u8 __iomem *io_base;
unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
unsigned int virt_dev_num;
struct device *dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac55db065f16..e05d2095d09b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
DSAF_TBL_TCAM_KEY_PORT_S, port);
-
- mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
}
/**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
/* default config dvc to 0 */
mac_data.tbl_ucast_dvc = 0;
mac_data.tbl_ucast_out_port = mac_entry->port_num;
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
0xff,
mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
/* config mc entry with mask */
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* config key mask */
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
soft_mac_entry += entry_index;
soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
} else { /* not zero, just del port, update */
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
&tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
+static int hns_dsaf_get_port_id(u8 port)
+{
+ if (port < DSAF_SERVICE_NW_NUM)
+ return port;
+
+ if (port >= DSAF_BASE_INNER_PORT_NUM)
+ return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+ return -EINVAL;
+}
+
static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2766,7 +2769,7 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
struct hns_mac_cb *mac_cb;
u8 addr[ETH_ALEN] = {0};
u8 port_num;
- u16 mskid;
+ int mskid;
/* promisc use vague table match with vlanid = 0 & macaddr = 0 */
hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
memset(&temp_key, 0x0, sizeof(temp_key));
mask_entry.addr[0] = 0x01;
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
- port, mask_entry.addr);
+ 0xf, mask_entry.addr);
tbl_tcam_mcast.tbl_mcast_item_vld = 1;
tbl_tcam_mcast.tbl_mcast_old_en = 0;
- if (port < DSAF_SERVICE_NW_NUM) {
- mskid = port;
- } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
- mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
- } else {
+ /* set MAC port to handle multicast */
+ mskid = hns_dsaf_get_port_id(port);
+ if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port,
mask_key.high.val, mask_key.low.val);
return;
}
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+ /* set pool bit map to handle multicast */
+ mskid = hns_dsaf_get_port_id(port_num);
+ if (mskid == -EINVAL) {
+ dev_err(dsaf_dev->dev,
+ "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port_num,
+ mask_key.high.val, mask_key.low.val);
+ return;
+ }
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
+
memcpy(&temp_key, &mask_key, sizeof(mask_key));
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
(struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0e1cd99831a6..76cc8887e1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 16294cd3c954..19b94879691f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
} else {
- u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ u8 __iomem *base_addr = mac_cb->serdes_vaddr +
(mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 3d07c8a7639d..17c019106e6e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
}
}
-static void __iomem *
+static u8 __iomem *
hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
{
return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
dsaf_dev->ppe_common[comm_index] = NULL;
}
-static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
- int ppe_idx)
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+ int ppe_idx)
{
return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index f670e63a5a01..110c6e8222c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
struct hns_ppe_hw_stats hw_stats;
u8 index; /* index in a ppe common device */
- void __iomem *io_base;
+ u8 __iomem *io_base;
int virq;
u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
struct ppe_common_cb {
struct device *dev;
struct dsaf_device *dsaf_dev;
- void __iomem *io_base;
+ u8 __iomem *io_base;
enum ppe_common_mode ppe_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6bf346c11b25..ac3518ca4d7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
} else {
ring = &q->tx_ring;
- ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+ ring->io_base = ring_pair_cb->q.io_base +
HNS_RCB_TX_REG_OFFSET;
irq_idx = HNS_RCB_IRQ_IDX_TX;
mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
}
}
-static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b9733b0b8482..b9e7f11f0896 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1018,7 +1018,7 @@
#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
-static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
}
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
#define dsaf_set_bit(origin, shift, val) \
dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
-static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift, u32 val)
{
u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
#define dsaf_get_bit(origin, shift) \
dsaf_get_field((origin), (1ull << (shift)), (shift))
-static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift)
{
u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
#define dsaf_write_b(addr, data)\
- writeb((data), (__iomem unsigned char *)(addr))
+ writeb((data), (__iomem u8 *)(addr))
#define dsaf_read_b(addr)\
- readb((__iomem unsigned char *)(addr))
+ readb((__iomem u8 *)(addr))
#define hns_mac_reg_read64(drv, offset) \
- readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+ readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba4316910dea..a60f207768fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
- dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+ dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 60e7d7ae3787..65b985acae38 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
#define SERVICE_TIMER_HZ (1 * HZ)
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
- ring->stats.tx_pkts++;
- ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
@@ -603,7 +598,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
} else {
ring->stats.seg_pkt_cnt++;
- pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
+ pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE);
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
/* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
+ /* update tx ring statistics. */
+ ring->stats.tx_pkts += pkts;
+ ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring);
@@ -1964,8 +1962,7 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -1975,7 +1972,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
- return fallback(ndev, skb, NULL);
+ return netdev_pick_tx(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {
@@ -2152,7 +2149,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2162,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 299b277bc7ae..83e19c6b974e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -43,6 +43,8 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
+ HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
+ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
};
@@ -62,6 +64,8 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
+ HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
@@ -80,12 +84,15 @@ struct hclgevf_mbx_resp_status {
struct hclge_mbx_vf_to_pf_cmd {
u8 rsv;
u8 mbx_src_vfid; /* Auto filled by IMP */
- u8 rsv1[2];
+ u8 mbx_need_resp;
+ u8 rsv1[1];
u8 msg_len;
u8 rsv2[3];
u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
};
+#define HCLGE_MBX_NEED_RESP_BIT BIT(0)
+
struct hclge_mbx_pf_to_vf_cmd {
u8 dest_vfid;
u8 rsv[3];
@@ -107,7 +114,7 @@ struct hclgevf_mbx_arq_ring {
struct hclgevf_dev *hdev;
u32 head;
u32 tail;
- u32 count;
+ atomic_t count;
u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 17ab4f4af6ad..fa8b8506b120 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -76,8 +76,8 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
return inited;
}
-static int hnae3_match_n_instantiate(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev, bool is_reg)
+static int hnae3_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
int ret;
@@ -87,23 +87,27 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
return 0;
}
- /* now, (un-)instantiate client by calling lower layer */
- if (is_reg) {
- ret = ae_dev->ops->init_client_instance(client, ae_dev);
- if (ret)
- dev_err(&ae_dev->pdev->dev,
- "fail to instantiate client, ret = %d\n", ret);
+ ret = ae_dev->ops->init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "fail to instantiate client, ret = %d\n", ret);
- return ret;
- }
+ return ret;
+}
+
+static void hnae3_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ /* check if this client matches the type of ae_dev */
+ if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
+ return;
if (hnae3_get_client_init_flag(client, ae_dev)) {
ae_dev->ops->uninit_client_instance(client, ae_dev);
hnae3_set_client_init_flag(client, ae_dev, 0);
}
-
- return 0;
}
int hnae3_register_client(struct hnae3_client *client)
@@ -129,7 +133,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
- ret = hnae3_match_n_instantiate(client, ae_dev, true);
+ ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port, ret = %d\n",
@@ -153,7 +157,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
mutex_lock(&hnae3_common_lock);
/* un-initialize the client on every matched port */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
- hnae3_match_n_instantiate(client, ae_dev, false);
+ hnae3_uninit_client_instance(client, ae_dev);
}
list_del(&client->node);
@@ -205,7 +209,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true);
+ ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed, ret = %d\n",
@@ -243,7 +247,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
* un-initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node)
- hnae3_match_n_instantiate(client, ae_dev, false);
+ hnae3_uninit_client_instance(client, ae_dev);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
@@ -301,7 +305,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true);
+ ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed, ret = %d\n",
@@ -343,7 +347,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
continue;
list_for_each_entry(client, &hnae3_client_list, node)
- hnae3_match_n_instantiate(client, ae_dev, false);
+ hnae3_uninit_client_instance(client, ae_dev);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 38b430f11fc1..ad21b0ef1946 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -120,6 +120,25 @@ enum hnae3_media_type {
HNAE3_MEDIA_TYPE_NONE,
};
+/* must be consistent with definition in firmware */
+enum hnae3_module_type {
+ HNAE3_MODULE_TYPE_UNKNOWN = 0x00,
+ HNAE3_MODULE_TYPE_FIBRE_LR = 0x01,
+ HNAE3_MODULE_TYPE_FIBRE_SR = 0x02,
+ HNAE3_MODULE_TYPE_AOC = 0x03,
+ HNAE3_MODULE_TYPE_CR = 0x04,
+ HNAE3_MODULE_TYPE_KR = 0x05,
+ HNAE3_MODULE_TYPE_TP = 0x06,
+
+};
+
+enum hnae3_fec_mode {
+ HNAE3_FEC_AUTO = 0,
+ HNAE3_FEC_BASER,
+ HNAE3_FEC_RS,
+ HNAE3_FEC_USER_DEF,
+};
+
enum hnae3_reset_notify_type {
HNAE3_UP_CLIENT,
HNAE3_DOWN_CLIENT,
@@ -147,6 +166,13 @@ enum hnae3_flr_state {
HNAE3_FLR_DONE,
};
+enum hnae3_port_base_vlan_state {
+ HNAE3_PORT_BASE_VLAN_DISABLE,
+ HNAE3_PORT_BASE_VLAN_ENABLE,
+ HNAE3_PORT_BASE_VLAN_MODIFY,
+ HNAE3_PORT_BASE_VLAN_NOCHANGE,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -223,10 +249,10 @@ struct hnae3_ae_dev {
* non-ok
* get_ksettings_an_result()
* Get negotiation status,speed and duplex
- * update_speed_duplex_h()
- * Update hardware speed and duplex
* get_media_type()
* Get media type of MAC
+ * check_port_speed()
+ * Check target speed whether is supported
* adjust_link()
* Adjust link status
* set_loopback()
@@ -243,6 +269,8 @@ struct hnae3_ae_dev {
* set auto autonegotiation of pause frame use
* get_autoneg()
* get auto autonegotiation of pause frame use
+ * restart_autoneg()
+ * restart autonegotiation
* get_coalesce_usecs()
* get usecs to delay a TX interrupt after a packet is sent
* get_rx_max_coalesced_frames()
@@ -333,11 +361,15 @@ struct hnae3_ae_ops {
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex);
- int (*update_speed_duplex_h)(struct hnae3_handle *handle);
int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
u8 duplex);
- void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type);
+ void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type);
+ int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
+ u8 *fec_mode);
+ int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex);
int (*set_loopback)(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en);
@@ -353,6 +385,7 @@ struct hnae3_ae_ops {
int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
int (*get_autoneg)(struct hnae3_handle *handle);
+ int (*restart_autoneg)(struct hnae3_handle *handle);
void (*get_coalesce_usecs)(struct hnae3_handle *handle,
u32 *tx_usecs, u32 *rx_usecs);
@@ -385,7 +418,8 @@ struct hnae3_ae_ops {
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae3_handle *handle, u64 *data);
-
+ void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
+ u64 *rx_cnt);
void (*get_strings)(struct hnae3_handle *handle,
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
@@ -578,8 +612,13 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
+ enum hnae3_port_base_vlan_state port_base_vlan_state;
+
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
+
+ /* Network interface message level enabled bits */
+ u32 msg_enable;
};
#define hnae3_set_field(origin, mask, shift, val) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 0de543faa5b1..fc4917ac44be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -239,6 +239,10 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "queue info [number]\n");
dev_info(&h->pdev->dev, "queue map\n");
dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
+
+ if (!hns3_is_phys_func(h->pdev))
+ return;
+
dev_info(&h->pdev->dev, "dump fd tcam\n");
dev_info(&h->pdev->dev, "dump tc\n");
dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
@@ -247,6 +251,9 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump qos pri map\n");
dev_info(&h->pdev->dev, "dump qos buf cfg\n");
dev_info(&h->pdev->dev, "dump mng tbl\n");
+ dev_info(&h->pdev->dev, "dump reset info\n");
+ dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
+ dev_info(&h->pdev->dev, "dump mac tnl status\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
@@ -341,6 +348,8 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
ret = hns3_dbg_bd_info(handle, cmd_buf);
else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+ else
+ ret = -EOPNOTSUPP;
if (ret)
hns3_dbg_help(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 162cb9afa0e7..196a3d780dcf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -35,6 +35,13 @@ static const char hns3_driver_string[] =
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
static struct hnae3_client client;
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -67,7 +74,7 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
struct hns3_enet_tqp_vector *tqp_vector = vector;
- napi_schedule(&tqp_vector->napi);
+ napi_schedule_irqoff(&tqp_vector->napi);
return IRQ_HANDLED;
}
@@ -730,95 +737,6 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
return 0;
}
-static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
- u8 il4_proto, u32 *type_cs_vlan_tso,
- u32 *ol_type_vlan_len_msec)
-{
- union l3_hdr_info l3;
- union l4_hdr_info l4;
- unsigned char *l2_hdr;
- u8 l4_proto = ol4_proto;
- u32 ol2_len;
- u32 ol3_len;
- u32 ol4_len;
- u32 l2_len;
- u32 l3_len;
-
- l3.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
-
- /* compute L2 header size for normal packet, defined in 2 Bytes */
- l2_len = l3.hdr - skb->data;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
-
- /* tunnel packet*/
- if (skb->encapsulation) {
- /* compute OL2 header size, defined in 2 Bytes */
- ol2_len = l2_len;
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L2LEN_S, ol2_len >> 1);
-
- /* compute OL3 header size, defined in 4 Bytes */
- ol3_len = l4.hdr - l3.hdr;
- hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
- ol3_len >> 2);
-
- /* MAC in UDP, MAC in GRE (0x6558)*/
- if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
- /* switch MAC header ptr from outer to inner header.*/
- l2_hdr = skb_inner_mac_header(skb);
-
- /* compute OL4 header size, defined in 4 Bytes. */
- ol4_len = l2_hdr - l4.hdr;
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L4LEN_S, ol4_len >> 2);
-
- /* switch IP header ptr from outer to inner header */
- l3.hdr = skb_inner_network_header(skb);
-
- /* compute inner l2 header size, defined in 2 Bytes. */
- l2_len = l3.hdr - l2_hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S,
- l2_len >> 1);
- } else {
- /* skb packet types not supported by hardware,
- * txbd len fild doesn't be filled.
- */
- return;
- }
-
- /* switch L4 header pointer from outer to inner */
- l4.hdr = skb_inner_transport_header(skb);
-
- l4_proto = il4_proto;
- }
-
- /* compute inner(/normal) L3 header size, defined in 4 Bytes */
- l3_len = l4.hdr - l3.hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
-
- /* compute inner(/normal) L4 header size, defined in 4 Bytes */
- switch (l4_proto) {
- case IPPROTO_TCP:
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
- l4.tcp->doff);
- break;
- case IPPROTO_SCTP:
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
- (sizeof(struct sctphdr) >> 2));
- break;
- case IPPROTO_UDP:
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
- (sizeof(struct udphdr) >> 2));
- break;
- default:
- /* skb packet types not supported by hardware,
- * txbd len fild doesn't be filled.
- */
- return;
- }
-}
-
/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
* and it is udp packet, which has a dest port as the IANA assigned.
* the hardware is expected to do the checksum offload, but the
@@ -827,12 +745,12 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
-#define IANA_VXLAN_PORT 4789
union l4_hdr_info l4;
l4.hdr = skb_transport_header(skb);
- if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
+ if (!(!skb->encapsulation &&
+ l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
return false;
skb_checksum_help(skb);
@@ -840,46 +758,71 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
return true;
}
-static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
- u8 il4_proto, u32 *type_cs_vlan_tso,
- u32 *ol_type_vlan_len_msec)
+static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u32 *ol_type_vlan_len_msec)
{
+ u32 l2_len, l3_len, l4_len;
+ unsigned char *il2_hdr;
union l3_hdr_info l3;
- u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
l3.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
- /* define OL3 type and tunnel type(OL4).*/
- if (skb->encapsulation) {
- /* define outer network header type.*/
- if (skb->protocol == htons(ETH_P_IP)) {
- if (skb_is_gso(skb))
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_CSUM);
- else
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
-
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
- hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV6);
- }
+ /* compute OL2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - skb->data;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
- /* define tunnel type(OL4).*/
- switch (l4_proto) {
- case IPPROTO_UDP:
+ /* compute OL3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ il2_hdr = skb_inner_mac_header(skb);
+ /* compute OL4 header size, defined in 4 Bytes. */
+ l4_len = il2_hdr - l4.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
+
+ /* define outer network header type */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb_is_gso(skb))
hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
- break;
- case IPPROTO_GRE:
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
+ else
hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
- break;
- default:
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV6);
+ }
+
+ if (ol4_proto == IPPROTO_UDP)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
+ else if (ol4_proto == IPPROTO_GRE)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
+}
+
+static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+{
+ unsigned char *l2_hdr = skb->data;
+ u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
+ union l3_hdr_info l3;
+ u32 l2_len, l3_len;
+
+ l4.hdr = skb_transport_header(skb);
+ l3.hdr = skb_network_header(skb);
+
+ /* handle encapsulation skb */
+ if (skb->encapsulation) {
+ /* If this is a not UDP/GRE encapsulation skb */
+ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
/* drop the skb tunnel packet if hardware don't support,
* because hardware can't calculate csum when TSO.
*/
@@ -893,7 +836,12 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
return 0;
}
+ hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
+
+ /* switch to inner header */
+ l2_hdr = skb_inner_mac_header(skb);
l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
l4_proto = il4_proto;
}
@@ -911,11 +859,22 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
HNS3_L3T_IPV6);
}
+ /* compute inner(/normal) L2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - l2_hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ /* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
HNS3_L4T_TCP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ l4.tcp->doff);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
@@ -924,11 +883,15 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
HNS3_L4T_UDP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
break;
case IPPROTO_SCTP:
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
HNS3_L4T_SCTP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -963,6 +926,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
{
#define HNS3_TX_VLAN_PRIO_SHIFT 13
+ struct hnae3_handle *handle = tx_ring->tqp->handle;
+
+ /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
+ * header is allowed in skb, otherwise it will cause RAS error.
+ */
+ if (unlikely(skb_vlan_tagged_multi(skb) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_ENABLE))
+ return -EINVAL;
+
if (skb->protocol == htons(ETH_P_8021Q) &&
!(tx_ring->tqp->handle->kinfo.netdev->features &
NETIF_F_HW_VLAN_CTAG_TX)) {
@@ -984,8 +957,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
- *out_vtag = vlan_tag;
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE){
+ hns3_set_field(*out_vlan_flag,
+ HNS3_TXD_OVLAN_B, 1);
+ *out_vtag = vlan_tag;
+ } else {
+ hns3_set_field(*inner_vlan_flag,
+ HNS3_TXD_VLAN_B, 1);
+ *inner_vtag = vlan_tag;
+ }
} else {
hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
@@ -1012,7 +993,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- u16 bdtp_fe_sc_vld_ra_ri = 0;
struct skb_frag_struct *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
@@ -1042,12 +1022,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (unlikely(ret))
return ret;
- hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
+
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec);
if (unlikely(ret))
return ret;
@@ -1073,19 +1051,37 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
- if (unlikely(dma_mapping_error(ring->dev, dma))) {
+ if (unlikely(dma_mapping_error(dev, dma))) {
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
desc_cb->length = size;
+ if (likely(size <= HNS3_MAX_BD_SIZE)) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16(size);
+ hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+
+ ring_ptr_move_fw(ring, next_to_use);
+ return 0;
+ }
+
frag_buf_num = hns3_tx_bd_count(size);
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
@@ -1112,64 +1108,92 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return 0;
}
-static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+static int hns3_nic_bd_num(struct sk_buff *skb)
{
- struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- struct skb_frag_struct *frag;
- int bdnum_for_frag;
- int frag_num;
- int buf_num;
- int size;
- int i;
+ int size = skb_headlen(skb);
+ int i, bd_num;
- size = skb_headlen(skb);
- buf_num = hns3_tx_bd_count(size);
+ /* if the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE))
+ return skb_shinfo(skb)->nr_frags + 1;
+
+ bd_num = hns3_tx_bd_count(size);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int frag_bd_num;
- frag_num = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < frag_num; i++) {
- frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
- bdnum_for_frag = hns3_tx_bd_count(size);
- if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
+ frag_bd_num = hns3_tx_bd_count(size);
+
+ if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
- buf_num += bdnum_for_frag;
+ bd_num += frag_bd_num;
}
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
- return -EBUSY;
- /* manual split the send packet */
- new_skb = skb_copy(skb, GFP_ATOMIC);
- if (!new_skb)
- return -ENOMEM;
- dev_kfree_skb_any(skb);
- *out_skb = new_skb;
- }
+ return bd_num;
+}
- if (unlikely(ring_space(ring) < buf_num))
- return -EBUSY;
+static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
+{
+ if (!skb->encapsulation)
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
- *bnum = buf_num;
- return 0;
+ return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
}
-static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+/* HW need every continuous 8 buffer data to be larger than MSS,
+ * we simplify it by ensuring skb_headlen + the first continuous
+ * 7 frags to to be larger than gso header len + mss, and the remaining
+ * continuous 7 frags to be larger than MSS except the last 7 frags.
+ */
+static bool hns3_skb_need_linearized(struct sk_buff *skb)
+{
+ int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
+ unsigned int tot_len = 0;
+ int i;
+
+ for (i = 0; i < bd_limit; i++)
+ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ /* ensure headlen + the first 7 frags is greater than mss + header
+ * and the first 7 frags is greater than mss.
+ */
+ if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
+ hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
+ return true;
+
+ /* ensure the remaining continuous 7 buffer is greater than mss */
+ for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
+ tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
+
+ if (tot_len < skb_shinfo(skb)->gso_size)
+ return true;
+ }
+
+ return false;
+}
+
+static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
{
struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- int buf_num;
+ int bd_num;
- /* No. of segments (plus a header) */
- buf_num = skb_shinfo(skb)->nr_frags + 1;
+ bd_num = hns3_nic_bd_num(skb);
+ if (bd_num < 0)
+ return bd_num;
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
+ if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ struct sk_buff *new_skb;
+
+ if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
+ goto out;
+
+ bd_num = hns3_tx_bd_count(skb->len);
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
@@ -1177,14 +1201,17 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
return -ENOMEM;
dev_kfree_skb_any(skb);
*out_skb = new_skb;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_copy++;
+ u64_stats_update_end(&ring->syncp);
}
- if (unlikely(ring_space(ring) < buf_num))
+out:
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
- *bnum = buf_num;
-
- return 0;
+ return bd_num;
}
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
@@ -1197,6 +1224,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
if (ring->next_to_use == next_to_use_orig)
break;
+ /* rollback one */
+ ring_ptr_move_bw(ring, next_to_use);
+
/* unmap the descriptor dma address */
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
dma_unmap_single(dev,
@@ -1210,9 +1240,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
DMA_TO_DEVICE);
ring->desc_cb[ring->next_to_use].length = 0;
-
- /* rollback one */
- ring_ptr_move_bw(ring, next_to_use);
+ ring->desc_cb[ring->next_to_use].dma = 0;
}
}
@@ -1225,7 +1253,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int next_to_use_head;
- int next_to_use_frag;
int buf_num;
int seg_num;
int size;
@@ -1235,22 +1262,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Prefetch the data used later */
prefetch(skb->data);
- switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
- case -EBUSY:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
+ buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
+ if (unlikely(buf_num <= 0)) {
+ if (buf_num == -EBUSY) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_busy++;
+ u64_stats_update_end(&ring->syncp);
+ goto out_net_tx_busy;
+ } else if (buf_num == -ENOMEM) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ }
- goto out_net_tx_busy;
- case -ENOMEM:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- netdev_err(netdev, "no memory to xmit!\n");
+ if (net_ratelimit())
+ netdev_err(netdev, "xmit error: %d!\n", buf_num);
goto out_err_tx_ok;
- default:
- break;
}
/* No. of segments (plus a header) */
@@ -1263,9 +1291,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
DESC_TYPE_SKB);
if (unlikely(ret))
- goto head_fill_err;
+ goto fill_err;
- next_to_use_frag = ring->next_to_use;
/* Fill the fragments */
for (i = 1; i < seg_num; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1276,7 +1303,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
DESC_TYPE_PAGE);
if (unlikely(ret))
- goto frag_fill_err;
+ goto fill_err;
}
/* Complete translate all packets */
@@ -1289,10 +1316,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-frag_fill_err:
- hns3_clear_desc(ring, next_to_use_frag);
-
-head_fill_err:
+fill_err:
hns3_clear_desc(ring, next_to_use_head);
out_err_tx_ok:
@@ -1355,13 +1379,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
bool enable;
int ret;
- if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
- if (features & (NETIF_F_TSO | NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
- }
-
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
enable = !!(features & NETIF_F_GRO_HW);
ret = h->ae_algo->ops->set_gro_en(h, enable);
@@ -1574,6 +1591,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
@@ -1590,13 +1610,19 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring = NULL;
+ struct napi_struct *napi;
int timeout_queue = 0;
int hw_head, hw_tail;
+ int fbd_num, fbd_oft;
+ int ebd_num, ebd_oft;
+ int bd_num, bd_err;
+ int ring_en, tc;
int i;
/* Find the stopped queue the same way the stack does */
- for (i = 0; i < ndev->real_num_tx_queues; i++) {
+ for (i = 0; i < ndev->num_tx_queues; i++) {
struct netdev_queue *q;
unsigned long trans_start;
@@ -1617,21 +1643,66 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
return false;
}
+ priv->tx_timeout_count++;
+
tx_ring = priv->ring_data[timeout_queue].ring;
+ napi = &tx_ring->tqp_vector->napi;
+
+ netdev_info(ndev,
+ "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
+ priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
+ tx_ring->next_to_clean, napi->state);
+
+ netdev_info(ndev,
+ "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
+ tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
+ tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
+
+ netdev_info(ndev,
+ "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
+ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
+ tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+
+ /* When mac received many pause frames continuous, it's unable to send
+ * packets, which may cause tx timeout
+ */
+ if (h->ae_algo->ops->update_stats &&
+ h->ae_algo->ops->get_mac_pause_stats) {
+ u64 tx_pause_cnt, rx_pause_cnt;
+
+ h->ae_algo->ops->update_stats(h, &ndev->stats);
+ h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
+ &rx_pause_cnt);
+ netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
+ tx_pause_cnt, rx_pause_cnt);
+ }
hw_head = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
hw_tail = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
+ fbd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG);
+ fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG);
+ ebd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBDNUM_REG);
+ ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBD_OFFSET_REG);
+ bd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG);
+ bd_err = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_ERR_REG);
+ ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
+ tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
+
netdev_info(ndev,
- "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
- priv->tx_timeout_count,
- timeout_queue,
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- hw_head,
- hw_tail,
+ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
+ bd_num, hw_head, hw_tail, bd_err,
readl(tx_ring->tqp_vector->mask_addr));
+ netdev_info(ndev,
+ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
+ ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
return true;
}
@@ -1644,8 +1715,6 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
if (!hns3_get_tx_timeo_queue_info(ndev))
return;
- priv->tx_timeout_count++;
-
/* request the reset, and let the hclge to determine
* which reset level should be done
*/
@@ -1670,7 +1739,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
};
-static bool hns3_is_phys_func(struct pci_dev *pdev)
+bool hns3_is_phys_func(struct pci_dev *pdev)
{
u32 dev_id = pdev->device;
@@ -2117,17 +2186,30 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
ring->desc[i].rx.bd_base_info = 0;
}
-static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
- int *pkts)
+static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
+ int *bytes, int *pkts)
{
- struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ int ntc = ring->next_to_clean;
+ struct hns3_desc_cb *desc_cb;
- (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
- (*bytes) += desc_cb->length;
- /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
- hns3_free_buffer_detach(ring, ring->next_to_clean);
+ while (head != ntc) {
+ desc_cb = &ring->desc_cb[ntc];
+ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+ (*bytes) += desc_cb->length;
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
+ hns3_free_buffer_detach(ring, ntc);
- ring_ptr_move_fw(ring, next_to_clean);
+ if (++ntc == ring->desc_num)
+ ntc = 0;
+
+ /* Issue prefetch for next Tx descriptor */
+ prefetch(&ring->desc_cb[ntc]);
+ }
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * ring_space called by hns3_nic_net_xmit.
+ */
+ smp_store_release(&ring->next_to_clean, ntc);
}
static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
@@ -2167,11 +2249,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
bytes = 0;
pkts = 0;
- while (head != ring->next_to_clean) {
- hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
- /* Issue prefetch for next Tx descriptor */
- prefetch(&ring->desc_cb[ring->next_to_clean]);
- }
+ hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
ring->tqp_vector->tx_group.total_bytes += bytes;
ring->tqp_vector->tx_group.total_packets += pkts;
@@ -2233,6 +2311,10 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
break;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.non_reuse_pg++;
+ u64_stats_update_end(&ring->syncp);
}
ring_ptr_move_fw(ring, next_to_use);
@@ -2246,64 +2328,78 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
{
- struct hns3_desc *desc;
- u32 truesize;
- int size;
- int last_offset;
- bool twobufs;
-
- twobufs = ((PAGE_SIZE < 8192) &&
- hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
-
- desc = &ring->desc[ring->next_to_clean];
- size = le16_to_cpu(desc->rx.size);
-
- truesize = hnae3_buf_size(ring);
-
- if (!twobufs)
- last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ int size = le16_to_cpu(desc->rx.size);
+ u32 truesize = hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
- /* Avoid re-using remote pages,flag default unreuse */
- if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
- return;
-
- if (twobufs) {
- /* If we are only owner of page we can reuse it */
- if (likely(page_count(desc_cb->priv) == 1)) {
- /* Flip page offset to other buffer */
- desc_cb->page_offset ^= truesize;
-
- desc_cb->reuse_flag = 1;
- /* bump ref count on page before it is given*/
- get_page(desc_cb->priv);
- }
+ /* Avoid re-using remote pages, or the stack is still using the page
+ * when page_offset rollback to zero, flag default unreuse
+ */
+ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
+ (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return;
- }
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
- if (desc_cb->page_offset <= last_offset) {
+ if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given*/
get_page(desc_cb->priv);
+ } else if (page_count(desc_cb->priv) == 1) {
+ desc_cb->reuse_flag = 1;
+ desc_cb->page_offset = 0;
+ get_page(desc_cb->priv);
}
}
+static int hns3_gro_complete(struct sk_buff *skb)
+{
+ __be16 type = skb->protocol;
+ struct tcphdr *th;
+ int depth = 0;
+
+ while (type == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vh;
+
+ if ((depth + VLAN_HLEN) > skb_headlen(skb))
+ return -EFAULT;
+
+ vh = (struct vlan_hdr *)(skb->data + depth);
+ type = vh->h_vlan_encapsulated_proto;
+ depth += VLAN_HLEN;
+ }
+
+ if (type == htons(ETH_P_IP)) {
+ depth += sizeof(struct iphdr);
+ } else if (type == htons(ETH_P_IPV6)) {
+ depth += sizeof(struct ipv6hdr);
+ } else {
+ netdev_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
+ return -EFAULT;
+ }
+
+ th = (struct tcphdr *)(skb->data + depth);
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ if (th->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return 0;
+}
+
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
- struct hns3_desc *desc)
+ u32 l234info, u32 bd_base_info, u32 ol_info)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int l3_type, l4_type;
- u32 bd_base_info;
int ol4_type;
- u32 l234info;
-
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- l234info = le32_to_cpu(desc->rx.l234_info);
skb->ip_summed = CHECKSUM_NONE;
@@ -2312,12 +2408,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
- /* We MUST enable hardware checksum before enabling hardware GRO */
- if (skb_shinfo(skb)->gso_size) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- }
-
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
@@ -2332,7 +2422,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
}
- ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+ ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
HNS3_RXD_OL4ID_S);
switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP:
@@ -2370,6 +2460,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
struct hns3_desc *desc, u32 l234info,
u16 *vlan_tag)
{
+ struct hnae3_handle *handle = ring->tqp->handle;
struct pci_dev *pdev = ring->tqp->handle->pdev;
if (pdev->revision == 0x20) {
@@ -2382,15 +2473,36 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
+#define HNS3_STRP_BOTH 0x3
+ /* Hardware always insert VLAN tag into RX descriptor when
+ * remove the tag from packet, driver needs to determine
+ * reporting which tag to stack.
+ */
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
return true;
case HNS3_STRP_INNER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
return true;
+ case HNS3_STRP_BOTH:
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ else
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return true;
default:
return false;
}
@@ -2437,7 +2549,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
- ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
desc_cb);
@@ -2512,8 +2624,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
return 0;
}
-static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
- u32 bd_base_info)
+static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info, u32 ol_info)
{
u16 gro_count;
u32 l3_type;
@@ -2521,12 +2634,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
/* if there is no HW GRO, do not set gro params */
- if (!gro_count)
- return;
+ if (!gro_count) {
+ hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
+ return 0;
+ }
- /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
- * to skb_shinfo(skb)->gso_segs
- */
NAPI_GRO_CB(skb)->count = gro_count;
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
@@ -2536,47 +2648,121 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
else if (l3_type == HNS3_L3_TYPE_IPV6)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
else
- return;
+ return -EFAULT;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
- if (skb_shinfo(skb)->gso_size)
- tcp_gro_complete(skb);
+
+ return hns3_gro_complete(skb);
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 rss_hash)
{
struct hnae3_handle *handle = ring->tqp->handle;
enum pkt_hash_types rss_type;
- struct hns3_desc *desc;
- int last_bd;
-
- /* When driver handle the rss type, ring->next_to_clean indicates the
- * first descriptor of next packet, need -1 here.
- */
- last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
- desc = &ring->desc[last_bd];
- if (le32_to_cpu(desc->rx.rss_hash))
+ if (rss_hash)
rss_type = handle->kinfo.rss_type;
else
rss_type = PKT_HASH_TYPE_NONE;
- skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
+ skb_set_hash(skb, rss_hash, rss_type);
}
-static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb)
+static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
enum hns3_pkt_l2t_type l2_frame_type;
+ u32 bd_base_info, l234info, ol_info;
+ struct hns3_desc *desc;
+ unsigned int len;
+ int pre_ntc, ret;
+
+ /* bdinfo handled below is only valid on the last BD of the
+ * current packet, and ring->next_to_clean indicates the first
+ * descriptor of next packet, so need - 1 below.
+ */
+ pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
+ (ring->desc_num - 1);
+ desc = &ring->desc[pre_ntc];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ l234info = le32_to_cpu(desc->rx.l234_info);
+ ol_info = le32_to_cpu(desc->rx.ol_info);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.non_vld_descs++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EINVAL;
+ }
+
+ if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
+ BIT(HNS3_RXD_L2E_B))))) {
+ u64_stats_update_begin(&ring->syncp);
+ if (l234info & BIT(HNS3_RXD_L2E_B))
+ ring->stats.l2_err++;
+ else
+ ring->stats.err_pkt_len++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EFAULT;
+ }
+
+ len = skb->len;
+
+ /* Do update ip stack process */
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* This is needed in order to enable forwarding support */
+ ret = hns3_set_gro_and_checksum(ring, skb, l234info,
+ bd_base_info, ol_info);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+ HNS3_RXD_DMAC_S);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += len;
+
+ if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+ ring->stats.rx_multicast++;
+
+ u64_stats_update_end(&ring->syncp);
+
+ ring->tqp_vector->rx_group.total_bytes += len;
+
+ hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
+ return 0;
+}
+
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
+{
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
u32 bd_base_info;
- u32 l234info;
int length;
int ret;
@@ -2636,64 +2822,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long)));
}
- l234info = le32_to_cpu(desc->rx.l234_info);
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
-
- /* Based on hw strategy, the tag offloaded will be stored at
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
- * in one layer tag case.
- */
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- u16 vlan_tag;
-
- if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
- }
-
- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_vld_descs++;
- u64_stats_update_end(&ring->syncp);
-
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- if (unlikely((!desc->rx.pkt_len) ||
- (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
- BIT(HNS3_RXD_L2E_B))))) {
- u64_stats_update_begin(&ring->syncp);
- if (l234info & BIT(HNS3_RXD_L2E_B))
- ring->stats.l2_err++;
- else
- ring->stats.err_pkt_len++;
- u64_stats_update_end(&ring->syncp);
-
+ ret = hns3_handle_bdinfo(ring, skb);
+ if (unlikely(ret)) {
dev_kfree_skb_any(skb);
- return -EFAULT;
+ return ret;
}
-
- l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
- HNS3_RXD_DMAC_S);
- u64_stats_update_begin(&ring->syncp);
- if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
- ring->stats.rx_multicast++;
-
- ring->stats.rx_pkts++;
- ring->stats.rx_bytes += skb->len;
- u64_stats_update_end(&ring->syncp);
-
- ring->tqp_vector->rx_group.total_bytes += skb->len;
-
- /* This is needed in order to enable forwarding support */
- hns3_set_gro_param(skb, l234info, bd_base_info);
-
- hns3_rx_checksum(ring, skb, desc);
*out_skb = skb;
- hns3_set_rx_skb_rss_type(ring, skb);
return 0;
}
@@ -2703,9 +2838,8 @@ int hns3_clean_rx_ring(
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
int num;
@@ -2714,6 +2848,7 @@ int hns3_clean_rx_ring(
recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
+ unused_count -= ring->pending_buf;
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
@@ -2740,8 +2875,6 @@ int hns3_clean_rx_ring(
continue;
}
- /* Do update ip stack process */
- skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
clean_count += ring->pending_buf;
@@ -2891,7 +3024,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
struct hns3_enet_tqp_vector *tqp_vector =
container_of(napi, struct hns3_enet_tqp_vector, napi);
bool clean_complete = true;
- int rx_budget;
+ int rx_budget = budget;
if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
napi_complete(napi);
@@ -2905,7 +3038,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
hns3_clean_tx_ring(ring);
/* make sure rx ring budget not smaller than 1 */
- rx_budget = max(budget / tqp_vector->num_tqps, 1);
+ if (tqp_vector->num_tqps > 1)
+ rx_budget = max(budget / tqp_vector->num_tqps, 1);
hns3_for_each_ring(ring, tqp_vector->rx_group) {
int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
@@ -3316,6 +3450,7 @@ err:
}
devm_kfree(&pdev->dev, priv->ring_data);
+ priv->ring_data = NULL;
return ret;
}
@@ -3324,12 +3459,16 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
struct hnae3_handle *h = priv->ae_handle;
int i;
+ if (!priv->ring_data)
+ return;
+
for (i = 0; i < h->kinfo.num_tqps; i++) {
devm_kfree(priv->dev, priv->ring_data[i].ring);
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
}
devm_kfree(priv->dev, priv->ring_data);
+ priv->ring_data = NULL;
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
@@ -3339,8 +3478,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
if (ring->desc_num <= 0 || ring->buf_size <= 0)
return -EINVAL;
- ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
- GFP_KERNEL);
+ ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
+ sizeof(ring->desc_cb[0]), GFP_KERNEL);
if (!ring->desc_cb) {
ret = -ENOMEM;
goto out;
@@ -3361,7 +3500,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
out_with_desc:
hns3_free_desc(ring);
out_with_desc_cb:
- kfree(ring->desc_cb);
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL;
out:
return ret;
@@ -3370,7 +3509,7 @@ out:
static void hns3_fini_ring(struct hns3_enet_ring *ring)
{
hns3_free_desc(ring);
- kfree(ring->desc_cb);
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL;
ring->next_to_clean = 0;
ring->next_to_use = 0;
@@ -3557,17 +3696,6 @@ static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
h->ae_algo->ops->del_all_fd_entries(h, clear_list);
}
-static void hns3_nic_set_priv_ops(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
-
- if ((netdev->features & NETIF_F_TSO) ||
- (netdev->features & NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
-}
-
static int hns3_client_start(struct hnae3_handle *handle)
{
if (!handle->ae_algo->ops->client_start)
@@ -3584,6 +3712,21 @@ static void hns3_client_stop(struct hnae3_handle *handle)
handle->ae_algo->ops->client_stop(handle);
}
+static void hns3_info_show(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+
+ dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
+ dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -3605,6 +3748,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->tx_timeout_count = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
+
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
@@ -3617,7 +3762,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
netdev->netdev_ops = &hns3_nic_netdev_ops;
SET_NETDEV_DEV(netdev, &pdev->dev);
hns3_ethtool_set_ops(netdev);
- hns3_nic_set_priv_ops(netdev);
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -3671,6 +3815,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (netif_msg_drv(handle))
+ hns3_info_show(priv);
+
return ret;
out_client_start:
@@ -3697,13 +3844,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_client_stop(handle);
-
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ hns3_client_stop(handle);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
goto out_netdev_free;
@@ -3729,8 +3876,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_dbg_uninit(handle);
- priv->ring_data = NULL;
-
out_netdev_free:
free_netdev(netdev);
}
@@ -3745,11 +3890,13 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
- netdev_info(netdev, "link up\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link up\n");
} else {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
- netdev_info(netdev, "link down\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link down\n");
}
}
@@ -3773,12 +3920,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
struct netdev_hw_addr *ha, *tmp;
int ret = 0;
+ netif_addr_lock_bh(ndev);
/* go through and sync uc_addr entries to the device */
list = &ndev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_uc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
/* go through and sync mc_addr entries to the device */
@@ -3786,9 +3934,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_mc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
+out:
+ netif_addr_unlock_bh(ndev);
return ret;
}
@@ -3799,6 +3949,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+ netif_addr_lock_bh(netdev);
/* go through and unsync uc_addr entries to the device */
list = &netdev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list)
@@ -3809,6 +3960,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
list_for_each_entry_safe(ha, tmp, &list->list, list)
if (ha->refcount > 1)
hns3_nic_mc_unsync(netdev, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
}
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
@@ -3850,6 +4003,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ring_ptr_move_fw(ring, next_to_use);
}
+ /* Free the pending skb in rx ring */
+ if (ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ ring->pending_buf = 0;
+ }
+
return 0;
}
@@ -4048,18 +4208,24 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_uninit_vector;
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto err_uninit_ring;
+ }
+
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
+err_uninit_ring:
+ hns3_uninit_all_ring(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
- priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
err_put_ring:
hns3_put_ring_config(priv);
- priv->ring_data = NULL;
return ret;
}
@@ -4101,7 +4267,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
}
@@ -4121,9 +4287,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
netdev_err(netdev, "uninit ring error\n");
hns3_put_ring_config(priv);
- priv->ring_data = NULL;
-
- clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 75669cd0c311..c14480f9b625 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -42,8 +42,10 @@ enum hns3_nic_state {
#define HNS3_RING_TX_RING_HEAD_REG 0x0005C
#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
#define HNS3_RING_TX_RING_OFFSET_REG 0x00064
+#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068
#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
-
+#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
+#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
@@ -374,6 +376,7 @@ struct ring_stats {
u64 tx_err_cnt;
u64 restart_queue;
u64 tx_busy;
+ u64 tx_copy;
};
struct {
u64 rx_pkts;
@@ -386,6 +389,7 @@ struct ring_stats {
u64 l2_err;
u64 l3l4_csum_err;
u64 rx_multicast;
+ u64 non_reuse_pg;
};
};
};
@@ -397,7 +401,6 @@ struct hns3_enet_ring {
struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp;
- char ring_name[HNS3_RING_NAME_LEN];
struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */
@@ -407,9 +410,6 @@ struct hns3_enet_ring {
dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */
- u16 max_desc_num_per_pkt;
- u16 max_raw_data_sz_per_desc;
- u16 max_pkt_size;
int next_to_use; /* idx of next spare desc */
/* idx of lastest sent desc, the ring is empty when equal to
@@ -423,9 +423,6 @@ struct hns3_enet_ring {
u32 flag; /* ring attribute */
- int numa_node;
- cpumask_t affinity_mask;
-
int pending_buf;
struct sk_buff *skb;
struct sk_buff *tail_skb;
@@ -442,11 +439,6 @@ struct hns3_nic_ring_data {
void (*fini_process)(struct hns3_nic_ring_data *);
};
-struct hns3_nic_ops {
- int (*maybe_stop_tx)(struct sk_buff **out_skb,
- int *bnum, struct hns3_enet_ring *ring);
-};
-
enum hns3_flow_level_range {
HNS3_FLOW_LOW = 0,
HNS3_FLOW_MID = 1,
@@ -536,7 +528,6 @@ struct hns3_nic_priv {
u32 port_id;
struct net_device *netdev;
struct device *dev;
- struct hns3_nic_ops ops;
/**
* the cb for nic to manage the ring buffer, the first half of the
@@ -577,18 +568,16 @@ union l4_hdr_info {
unsigned char *hdr;
};
-/* the distance between [begin, end) in a ring buffer
- * note: there is a unuse slot between the begin and the end
- */
-static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end)
-{
- return (end - begin + ring->desc_num) % ring->desc_num;
-}
-
static inline int ring_space(struct hns3_enet_ring *ring)
{
- return ring->desc_num -
- ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
+ */
+ int begin = smp_load_acquire(&ring->next_to_clean);
+ int end = READ_ONCE(ring->next_to_use);
+
+ return ((end >= begin) ? (ring->desc_num - end + begin) :
+ (begin - end)) - 1;
}
static inline int is_ring_empty(struct hns3_enet_ring *ring)
@@ -633,7 +622,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
-#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
+#define ring_to_dev(ring) ((ring)->dev)
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE)
@@ -666,6 +655,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool hns3_is_phys_func(struct pci_dev *pdev);
int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 359d4731fb2d..d1588ea6132c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -29,6 +29,7 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("errors", tx_err_cnt),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
+ HNS3_TQP_STAT("copy", tx_copy),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -48,6 +49,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
HNS3_TQP_STAT("multicast", rx_multicast),
+ HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -483,6 +485,11 @@ static void hns3_get_stats(struct net_device *netdev,
struct hnae3_handle *h = hns3_get_handle(netdev);
u64 *p = data;
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting, could not get stats\n");
+ return;
+ }
+
if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
netdev_err(netdev, "could not get any statistics\n");
return;
@@ -599,6 +606,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
+ u8 module_type;
u8 media_type;
u8 link_stat;
@@ -607,7 +615,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
ops = h->ae_algo->ops;
if (ops->get_media_type)
- ops->get_media_type(h, &media_type);
+ ops->get_media_type(h, &media_type, &module_type);
else
return -EOPNOTSUPP;
@@ -617,7 +625,15 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_FIBER:
- cmd->base.port = PORT_FIBRE;
+ if (module_type == HNAE3_MODULE_TYPE_CR)
+ cmd->base.port = PORT_DA;
+ else
+ cmd->base.port = PORT_FIBRE;
+
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_BACKPLANE:
+ cmd->base.port = PORT_NONE;
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_COPPER:
@@ -645,14 +661,79 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
return 0;
}
+static int hns3_check_ksettings_param(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
+ u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u8 autoneg;
+ u32 speed;
+ u8 duplex;
+ int ret;
+
+ if (ops->get_ksettings_an_result) {
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
+ if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
+ cmd->base.duplex == duplex)
+ return 0;
+ }
+
+ if (ops->get_media_type)
+ ops->get_media_type(handle, &media_type, &module_type);
+
+ if (cmd->base.duplex != DUPLEX_FULL &&
+ media_type != HNAE3_MEDIA_TYPE_COPPER) {
+ netdev_err(netdev,
+ "only copper port supports half duplex!");
+ return -EINVAL;
+ }
+
+ if (ops->check_port_speed) {
+ ret = ops->check_port_speed(handle, cmd->base.speed);
+ if (ret) {
+ netdev_err(netdev, "unsupported speed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ int ret = 0;
+
+ /* Chip don't support this mode. */
+ if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev)
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
- return -EOPNOTSUPP;
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ ret = hns3_check_ksettings_param(netdev, cmd);
+ if (ret)
+ return ret;
+
+ if (ops->set_autoneg) {
+ ret = ops->set_autoneg(handle, cmd->base.autoneg);
+ if (ret)
+ return ret;
+ }
+
+ if (ops->cfg_mac_speed_dup_h)
+ ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
+ cmd->base.duplex);
+
+ return ret;
}
static u32 hns3_get_rss_key_size(struct net_device *netdev)
@@ -857,19 +938,36 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
static int hns3_nway_reset(struct net_device *netdev)
{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
struct phy_device *phy = netdev->phydev;
+ int autoneg;
if (!netif_running(netdev))
return 0;
- /* Only support nway_reset for netdev with phy attached for now */
- if (!phy)
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return -EBUSY;
+ }
+
+ if (!ops->get_autoneg || !ops->restart_autoneg)
return -EOPNOTSUPP;
- if (phy->autoneg != AUTONEG_ENABLE)
+ autoneg = ops->get_autoneg(handle);
+ if (autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev,
+ "Autoneg is off, don't support to restart it\n");
return -EINVAL;
+ }
+
+ if (phy)
+ return genphy_restart_aneg(phy);
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
- return genphy_restart_aneg(phy);
+ return ops->restart_autoneg(handle);
}
static void hns3_get_channels(struct net_device *netdev,
@@ -1101,6 +1199,95 @@ static int hns3_set_phys_id(struct net_device *netdev,
return h->ae_algo->ops->set_led_id(h, state);
}
+static u32 hns3_get_msglevel(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ return h->msg_enable;
+}
+
+static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ h->msg_enable = msg_level;
+}
+
+/* Translate local fec value into ethtool value. */
+static unsigned int loc_to_eth_fec(u8 loc_fec)
+{
+ u32 eth_fec = 0;
+
+ if (loc_fec & BIT(HNAE3_FEC_AUTO))
+ eth_fec |= ETHTOOL_FEC_AUTO;
+ if (loc_fec & BIT(HNAE3_FEC_RS))
+ eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_BASER))
+ eth_fec |= ETHTOOL_FEC_BASER;
+
+ /* if nothing is set, then FEC is off */
+ if (!eth_fec)
+ eth_fec = ETHTOOL_FEC_OFF;
+
+ return eth_fec;
+}
+
+/* Translate ethtool fec value into local value. */
+static unsigned int eth_to_loc_fec(unsigned int eth_fec)
+{
+ u32 loc_fec = 0;
+
+ if (eth_fec & ETHTOOL_FEC_OFF)
+ return loc_fec;
+
+ if (eth_fec & ETHTOOL_FEC_AUTO)
+ loc_fec |= BIT(HNAE3_FEC_AUTO);
+ if (eth_fec & ETHTOOL_FEC_RS)
+ loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_BASER)
+ loc_fec |= BIT(HNAE3_FEC_BASER);
+
+ return loc_fec;
+}
+
+static int hns3_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u8 fec_ability;
+ u8 fec_mode;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ if (!ops->get_fec)
+ return -EOPNOTSUPP;
+
+ ops->get_fec(handle, &fec_ability, &fec_mode);
+
+ fec->fec = loc_to_eth_fec(fec_ability);
+ fec->active_fec = loc_to_eth_fec(fec_mode);
+
+ return 0;
+}
+
+static int hns3_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u32 fec_mode;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ if (!ops->set_fec)
+ return -EOPNOTSUPP;
+ fec_mode = eth_to_loc_fec(fec->fec);
+ return ops->set_fec(handle, fec_mode);
+}
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
@@ -1121,6 +1308,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
.get_link = hns3_get_link,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
};
static const struct ethtool_ops hns3_ethtool_ops = {
@@ -1150,6 +1339,10 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
.set_phys_id = hns3_set_phys_id,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
+ .get_fecparam = hns3_get_fecparam,
+ .set_fecparam = hns3_set_fecparam,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index fffe8c1c45d3..0fb61d440d3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 3a093a92eac5..fbd904e3077c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -355,7 +355,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock_bh(&hdev->hw.cmq.crq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
@@ -364,7 +364,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hclge_cmd_init_regs(&hdev->hw);
- spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
@@ -373,21 +373,26 @@ int hclge_cmd_init(struct hclge_dev *hdev)
* reset may happen when lower level reset is being processed.
*/
if ((hclge_is_reset_pending(hdev))) {
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err_cmd_init;
}
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
"firmware version query failed %d\n", ret);
- return ret;
+ goto err_cmd_init;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
+
+err_cmd_init:
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+ return ret;
}
static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
@@ -411,7 +416,7 @@ static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
spin_unlock(&ring->lock);
}
-void hclge_destroy_cmd_queue(struct hclge_hw *hw)
+static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
{
hclge_destroy_queue(&hw->cmq.csq);
hclge_destroy_queue(&hw->cmq.crq);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 3714733c96d9..d79a209b80f6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -109,7 +109,11 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
+ HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
+ HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
+ HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
/* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
@@ -237,8 +241,11 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+ /* NCL config command */
+ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
/* SFP command */
- HCLGE_OPC_SFP_GET_SPEED = 0x7104,
+ HCLGE_OPC_GET_SFP_INFO = 0x7104,
/* Error INT commands */
HCLGE_MAC_COMMON_INT_EN = 0x030E,
@@ -593,9 +600,30 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
-struct hclge_sfp_speed_cmd {
- __le32 sfp_speed;
- u32 rsv[5];
+struct hclge_sfp_info_cmd {
+ __le32 speed;
+ u8 query_type; /* 0: sfp speed, 1: active speed */
+ u8 active_fec;
+ u8 autoneg; /* autoneg state */
+ u8 autoneg_ability; /* whether support autoneg */
+ __le32 speed_ability; /* speed ability for current media */
+ __le32 module_type;
+ u8 rsv[8];
+};
+
+#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
+#define HCLGE_MAC_CFG_FEC_MODE_S 1
+#define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1)
+#define HCLGE_MAC_CFG_FEC_SET_DEF_B 0
+#define HCLGE_MAC_CFG_FEC_CLR_DEF_B 1
+
+#define HCLGE_MAC_FEC_OFF 0
+#define HCLGE_MAC_FEC_BASER 1
+#define HCLGE_MAC_FEC_RS 2
+struct hclge_config_fec_cmd {
+ u8 fec_mode;
+ u8 default_config;
+ u8 rsv[22];
};
#define HCLGE_MAC_UPLINK_PORT 0x100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 1192cf6f2321..a9ffb57c4607 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -901,6 +901,109 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
}
}
+static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "PF reset count: %d\n",
+ hdev->rst_stats.pf_rst_cnt);
+ dev_info(&hdev->pdev->dev, "FLR reset count: %d\n",
+ hdev->rst_stats.flr_rst_cnt);
+ dev_info(&hdev->pdev->dev, "CORE reset count: %d\n",
+ hdev->rst_stats.core_rst_cnt);
+ dev_info(&hdev->pdev->dev, "GLOBAL reset count: %d\n",
+ hdev->rst_stats.global_rst_cnt);
+ dev_info(&hdev->pdev->dev, "IMP reset count: %d\n",
+ hdev->rst_stats.imp_rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset done count: %d\n",
+ hdev->rst_stats.reset_done_cnt);
+ dev_info(&hdev->pdev->dev, "HW reset done count: %d\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ dev_info(&hdev->pdev->dev, "reset count: %d\n",
+ hdev->rst_stats.reset_cnt);
+}
+
+/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
+ * @hdev: pointer to struct hclge_dev
+ * @cmd_buf: string that contains offset and length
+ */
+static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf)
+{
+#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
+#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
+#define HCLGE_CMD_DATA_NUM 6
+
+ struct hclge_desc desc[5];
+ u32 byte_offset;
+ int bd_num = 5;
+ int offset;
+ int length;
+ int data0;
+ int ret;
+ int i;
+ int j;
+
+ ret = sscanf(cmd_buf, "%x %x", &offset, &length);
+ if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
+ length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
+ dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
+ return;
+ }
+ if (offset < 0 || length <= 0) {
+ dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
+ return;
+ }
+
+ dev_info(&hdev->pdev->dev, "offset | data\n");
+
+ while (length > 0) {
+ data0 = offset;
+ if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
+ data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
+ else
+ data0 |= length << 16;
+ ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
+ HCLGE_OPC_QUERY_NCL_CONFIG);
+ if (ret)
+ return;
+
+ byte_offset = offset;
+ for (i = 0; i < bd_num; i++) {
+ for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
+ if (i == 0 && j == 0)
+ continue;
+
+ dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
+ byte_offset,
+ le32_to_cpu(desc[i].data[j]));
+ byte_offset += sizeof(u32);
+ length -= sizeof(u32);
+ if (length <= 0)
+ return;
+ }
+ }
+ offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
+ }
+}
+
+/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
+ * @hdev: pointer to struct hclge_dev
+ */
+static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
+{
+#define HCLGE_BILLION_NANO_SECONDS 1000000000
+
+ struct hclge_mac_tnl_stats stats;
+ unsigned long rem_nsec;
+
+ dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
+
+ while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
+ rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
+ dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
+ }
+}
+
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -924,6 +1027,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
hclge_dbg_dump_mng_table(hdev);
} else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
+ hclge_dbg_dump_rst_info(hdev);
+ } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
+ hclge_dbg_dump_ncl_config(hdev,
+ &cmd_buf[sizeof("dump ncl_config")]);
+ } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
+ hclge_dbg_dump_mac_tnl_status(hdev);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 1f52d11f77b5..4ac80634c984 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -4,287 +4,468 @@
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
- { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
- { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
- { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
- { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
- { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
- { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
- { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
- { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
- { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
- { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_int[] = {
- { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
+ { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
- { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
- { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
- { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
- { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
- { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
- { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
+ { .int_msk = BIT(0), .msg = "rx_buf_overflow",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(3), .msg = "tx_buf_overflow",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(4), .msg = "tx_buf_underrun",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
+ .reset_level = HNAE3_CORE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ncsi_err_int[] = {
- { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
- { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
- { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
- { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
- { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
- { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
- { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
- { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
- { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
- { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
- { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
- { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
- { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
- { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
- { .int_msk = BIT(27),
- .msg = "flow_director_ad_mem0_ecc_mbit_err" },
- { .int_msk = BIT(28),
- .msg = "flow_director_ad_mem1_ecc_mbit_err" },
- { .int_msk = BIT(29),
- .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
- { .int_msk = BIT(30),
- .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
+ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
- { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
- { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
+ { .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
- { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
- { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
+ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tm_sch_rint[] = {
- { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
- { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
- { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" },
- { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" },
- { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" },
- { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" },
- { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" },
- { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" },
- { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" },
- { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" },
- { .int_msk = BIT(12),
- .msg = "tm_sch_port_shap_offset_fifo_wr_err" },
- { .int_msk = BIT(13),
- .msg = "tm_sch_port_shap_offset_fifo_rd_err" },
- { .int_msk = BIT(14),
- .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" },
- { .int_msk = BIT(15),
- .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" },
- { .int_msk = BIT(16),
- .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" },
- { .int_msk = BIT(17),
- .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" },
- { .int_msk = BIT(18),
- .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" },
- { .int_msk = BIT(19),
- .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" },
- { .int_msk = BIT(20),
- .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" },
- { .int_msk = BIT(21),
- .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" },
- { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" },
- { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" },
- { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" },
- { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" },
- { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" },
- { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" },
- { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" },
- { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" },
- { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" },
- { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" },
+ { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
- { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
- { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
- { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
- { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" },
- { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" },
- { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" },
- { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" },
- { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" },
- { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" },
- { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" },
- { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" },
- { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" },
- { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" },
- { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" },
- { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" },
- { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" },
- { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" },
- { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" },
+ { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
- { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
- { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
- { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
- { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" },
- { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" },
- { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" },
- { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" },
- { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" },
- { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" },
+ { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
- { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
- { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" },
- { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" },
- { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" },
- { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" },
- { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" },
- { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" },
- { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" },
- { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" },
- { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" },
- { .int_msk = BIT(26), .msg = "rd_bus_err" },
- { .int_msk = BIT(27), .msg = "wr_bus_err" },
- { .int_msk = BIT(28), .msg = "reg_search_miss" },
- { .int_msk = BIT(29), .msg = "rx_q_search_miss" },
- { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" },
- { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" },
+ { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(26), .msg = "rd_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(27), .msg = "wr_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(28), .msg = "reg_search_miss",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(29), .msg = "rx_q_search_miss",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
- { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
- { .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
- { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
- { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
- { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" },
- { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" },
- { .int_msk = BIT(5), .msg = "buf_wait_timeout" },
+ { .int_msk = BIT(0), .msg = "over_8bd_no_fe",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(5), .msg = "buf_wait_timeout",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
- { .int_msk = BIT(0), .msg = "buf_sum_err" },
- { .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
- { .int_msk = BIT(2), .msg = "ppp_mbid_err" },
- { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" },
- { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" },
- { .int_msk = BIT(5), .msg = "cks_edit_position_err" },
- { .int_msk = BIT(6), .msg = "cks_edit_condition_err" },
- { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" },
- { .int_msk = BIT(8), .msg = "vlan_num_ot_err" },
- { .int_msk = BIT(9), .msg = "vlan_num_in_err" },
+ { .int_msk = BIT(0), .msg = "buf_sum_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(1), .msg = "ppp_mb_num_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(2), .msg = "ppp_mbid_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "cks_edit_position_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "cks_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "vlan_num_ot_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "vlan_num_in_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
#define HCLGE_SSU_MEM_ECC_ERR(x) \
- { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" }
+ { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
+ .reset_level = HNAE3_GLOBAL_RESET }
static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
HCLGE_SSU_MEM_ECC_ERR(0),
@@ -323,62 +504,106 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
};
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
- { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
- { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
- { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" },
- { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" },
- { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" },
- { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" },
- { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" },
- { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" },
- { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" },
- { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" },
- { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" },
- { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" },
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
- { .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
- { .int_msk = BIT(1), .msg = "ig_host_inf_int" },
- { .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
- { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" },
- { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" },
- { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" },
- { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" },
- { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" },
- { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" },
- { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" },
- { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" },
- { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" },
- { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" },
- { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" },
- { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" },
- { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" },
- { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" },
- { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" },
- { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" },
- { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" },
- { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" },
- { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" },
- { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" },
- { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" },
+ { .int_msk = BIT(0), .msg = "ig_mac_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "ig_host_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "ig_roc_buf_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
- { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
- { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
- { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
- { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" },
+ { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
- { .int_msk = BIT(9), .msg = "low_water_line_err_port" },
- { .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "low_water_line_err_port",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(10), .msg = "hi_water_line_err_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
@@ -406,16 +631,29 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ /* sentinel */ }
};
-static void hclge_log_error(struct device *dev, char *reg,
- const struct hclge_hw_error *err,
- u32 err_sts)
+static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
+ const struct hclge_hw_error *err,
+ u32 err_sts)
{
+ enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
+ bool need_reset = false;
+
while (err->msg) {
- if (err->int_msk & err_sts)
+ if (err->int_msk & err_sts) {
dev_warn(dev, "%s %s found [error status=0x%x]\n",
reg, err->msg, err_sts);
+ if (err->reset_level != HNAE3_NONE_RESET &&
+ err->reset_level >= reset_level) {
+ reset_level = err->reset_level;
+ need_reset = true;
+ }
+ }
err++;
}
+ if (need_reset)
+ return reset_level;
+ else
+ return HNAE3_NONE_RESET;
}
/* hclge_cmd_query_error: read the error information
@@ -454,6 +692,16 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
return ret;
}
+static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false);
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
@@ -673,6 +921,21 @@ static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
return ret;
}
+int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
bool en)
{
@@ -826,6 +1089,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
int num)
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ enum hnae3_reset_type reset_level;
struct device *dev = &hdev->pdev->dev;
__le32 *desc_data;
u32 status;
@@ -845,78 +1109,94 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* log HNS common errors */
status = le32_to_cpu(desc[0].data[0]);
if (status) {
- hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
- &hclge_imp_tcm_ecc_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+ &hclge_imp_tcm_ecc_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[1]);
if (status) {
- hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
- &hclge_cmdq_nic_mem_ecc_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+ &hclge_cmdq_nic_mem_ecc_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
dev_warn(dev, "imp_rd_data_poison_err found\n");
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
}
status = le32_to_cpu(desc[0].data[3]);
if (status) {
- hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
- &hclge_tqp_int_ecc_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+ &hclge_tqp_int_ecc_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[4]);
if (status) {
- hclge_log_error(dev, "MSIX_ECC_INT_STS",
- &hclge_msix_sram_ecc_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
+ &hclge_msix_sram_ecc_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log SSU(Storage Switch Unit) errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*(desc_data + 2));
if (status) {
- hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
- &hclge_ssu_mem_ecc_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
+ &hclge_ssu_mem_ecc_err_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
if (status) {
- hclge_log_error(dev, "SSU_COMMON_ERR_INT",
- &hclge_ssu_com_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+ &hclge_ssu_com_err_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log IGU(Ingress Unit) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
- if (status)
- hclge_log_error(dev, "IGU_INT_STS",
- &hclge_igu_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "IGU_INT_STS",
+ &hclge_igu_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
/* log PPP(Programmable Packet Process) errors */
desc_data = (__le32 *)&desc[4];
status = le32_to_cpu(*(desc_data + 1));
- if (status)
- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
- &hclge_ppp_mpf_abnormal_int_st1[0], status);
+ if (status) {
+ reset_level =
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+ &hclge_ppp_mpf_abnormal_int_st1[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
- if (status)
- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
- &hclge_ppp_mpf_abnormal_int_st3[0], status);
+ if (status) {
+ reset_level =
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppp_mpf_abnormal_int_st3[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[5];
@@ -924,55 +1204,60 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
if (status) {
dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
"rpu_rx_pkt_ecc_mbit_err");
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
status = le32_to_cpu(*(desc_data + 2));
if (status) {
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
- &hclge_ppu_mpf_abnormal_int_st2[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level =
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
if (status) {
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
- &hclge_ppu_mpf_abnormal_int_st3[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level =
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppu_mpf_abnormal_int_st3[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log TM(Traffic Manager) errors */
desc_data = (__le32 *)&desc[6];
status = le32_to_cpu(*desc_data);
if (status) {
- hclge_log_error(dev, "TM_SCH_RINT",
- &hclge_tm_sch_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "TM_SCH_RINT",
+ &hclge_tm_sch_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log QCN(Quantized Congestion Control) errors */
desc_data = (__le32 *)&desc[7];
status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
if (status) {
- hclge_log_error(dev, "QCN_FIFO_RINT",
- &hclge_qcn_fifo_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
+ &hclge_qcn_fifo_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
if (status) {
- hclge_log_error(dev, "QCN_ECC_RINT",
- &hclge_qcn_ecc_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
+ &hclge_qcn_ecc_rint[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log NCSI errors */
desc_data = (__le32 *)&desc[9];
status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
if (status) {
- hclge_log_error(dev, "NCSI_ECC_INT_RPT",
- &hclge_ncsi_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+ &hclge_ncsi_err_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* clear all main PF RAS errors */
@@ -1000,6 +1285,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
+ enum hnae3_reset_type reset_level;
__le32 *desc_data;
u32 status;
int ret;
@@ -1018,38 +1304,47 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* log SSU(Storage Switch Unit) errors */
status = le32_to_cpu(desc[0].data[0]);
if (status) {
- hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
- &hclge_ssu_port_based_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_err_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[1]);
if (status) {
- hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
- &hclge_ssu_fifo_overflow_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+ &hclge_ssu_fifo_overflow_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[2]);
if (status) {
- hclge_log_error(dev, "SSU_ETS_TCG_INT",
- &hclge_ssu_ets_tcg_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
+ &hclge_ssu_ets_tcg_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
- if (status)
- hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
- &hclge_igu_egu_tnl_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+ &hclge_igu_egu_tnl_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
- if (status)
- hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
- &hclge_ppu_pf_abnormal_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
+ &hclge_ppu_pf_abnormal_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
@@ -1341,16 +1636,15 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
+ struct hclge_mac_tnl_stats mac_tnl_stats;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
+ enum hnae3_reset_type reset_level;
struct hclge_desc desc_bd;
struct hclge_desc *desc;
__le32 *desc_data;
- int ret = 0;
u32 status;
-
- /* set default handling */
- set_bit(HNAE3_FUNC_RESET, reset_requests);
+ int ret;
/* query the number of bds for the MSIx int status */
hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
@@ -1359,8 +1653,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "fail(%d) to query msix int status bd num\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
return ret;
}
@@ -1381,8 +1673,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
goto msi_error;
}
@@ -1390,9 +1680,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data);
if (status) {
- hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
- &hclge_mac_afifo_tnl_int[0], status);
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+ &hclge_mac_afifo_tnl_int[0],
+ status);
+ set_bit(reset_level, reset_requests);
}
/* log PPU(RCB) MPF errors */
@@ -1400,9 +1691,11 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status) {
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
- &hclge_ppu_mpf_abnormal_int_st2[0], status);
- set_bit(HNAE3_CORE_RESET, reset_requests);
+ reset_level =
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0],
+ status);
+ set_bit(reset_level, reset_requests);
}
/* clear all main PF MSIx errors */
@@ -1413,8 +1706,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
goto msi_error;
}
@@ -1428,32 +1719,37 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "query all pf msix int cmd failed (%d)\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
goto msi_error;
}
/* log SSU PF errors */
status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
if (status) {
- hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
- &hclge_ssu_port_based_pf_int[0], status);
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_pf_int[0],
+ status);
+ set_bit(reset_level, reset_requests);
}
/* read and log PPP PF errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*desc_data);
- if (status)
- hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
- &hclge_ppp_pf_abnormal_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+ &hclge_ppp_pf_abnormal_int[0],
+ status);
+ set_bit(reset_level, reset_requests);
+ }
/* log PPU(RCB) PF errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
- if (status)
- hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
- &hclge_ppu_pf_abnormal_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+ &hclge_ppu_pf_abnormal_int[0],
+ status);
+ set_bit(reset_level, reset_requests);
+ }
/* clear all PF MSIx errors */
hclge_cmd_reuse_desc(&desc[0], false);
@@ -1463,8 +1759,31 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ }
+
+ /* query and clear mac tnl interruptions */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
+ if (ret) {
+ dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret);
+ goto msi_error;
+ }
+
+ status = le32_to_cpu(desc->data[0]);
+ if (status) {
+ /* When mac tnl interrupt occurs, we record current time and
+ * register status here in a fifo, then clear the status. So
+ * that if link status changes suddenly at some time, we can
+ * query them by debugfs.
+ */
+ mac_tnl_stats.time = local_clock();
+ mac_tnl_stats.status = status;
+ kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
+ ret = hclge_clear_mac_tnl_int(hdev);
+ if (ret)
+ dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
+ set_bit(HNAE3_NONE_RESET, reset_requests);
}
msi_error:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index fc068280d391..9645590c9294 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -47,6 +47,9 @@
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
+#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0)
+#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0)
+#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
@@ -112,8 +115,10 @@ struct hclge_hw_blk {
struct hclge_hw_error {
u32 int_msk;
const char *msg;
+ enum hnae3_reset_type reset_level;
};
+int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index deda606c51e7..d3b1f8cb1155 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -31,6 +32,7 @@
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
u16 *allocated_size, bool is_alloc);
@@ -697,6 +699,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_tqps_get_stats(handle, p);
}
+static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
+ u64 *rx_cnt)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
+ *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+}
+
static int hclge_parse_func_status(struct hclge_dev *hdev,
struct hclge_func_status_cmd *status)
{
@@ -833,33 +845,189 @@ static int hclge_parse_speed(int speed_cmd, int *speed)
return 0;
}
-static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
{
- unsigned long *supported = hdev->hw.mac.supported;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 speed_ability = hdev->hw.mac.speed_ability;
+ u32 speed_bit = 0;
- if (speed_ability & HCLGE_SUPPORT_1G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- supported);
+ switch (speed) {
+ case HCLGE_MAC_SPEED_10M:
+ speed_bit = HCLGE_SUPPORT_10M_BIT;
+ break;
+ case HCLGE_MAC_SPEED_100M:
+ speed_bit = HCLGE_SUPPORT_100M_BIT;
+ break;
+ case HCLGE_MAC_SPEED_1G:
+ speed_bit = HCLGE_SUPPORT_1G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_10G:
+ speed_bit = HCLGE_SUPPORT_10G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ speed_bit = HCLGE_SUPPORT_25G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_40G:
+ speed_bit = HCLGE_SUPPORT_40G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_50G:
+ speed_bit = HCLGE_SUPPORT_50G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ speed_bit = HCLGE_SUPPORT_100G_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (speed_bit & speed_ability)
+ return 0;
+
+ return -EINVAL;
+}
+static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
+{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- supported);
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ mac->supported);
+}
+static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ mac->supported);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- supported);
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ mac->supported);
+}
+static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ mac->supported);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ mac->supported);
+}
+static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ mac->supported);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ mac->supported);
+}
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+static void hclge_convert_setting_fec(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+
+ switch (mac->speed) {
+ case HCLGE_MAC_SPEED_10G:
+ case HCLGE_MAC_SPEED_40G:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ mac->fec_ability =
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ case HCLGE_MAC_SPEED_50G:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ mac->fec_ability =
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO);
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ break;
+ default:
+ mac->fec_ability = 0;
+ break;
+ }
+}
+
+static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
+ u8 speed_ability)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ mac->supported);
+
+ hclge_convert_setting_sr(mac, speed_ability);
+ hclge_convert_setting_lr(mac, speed_ability);
+ hclge_convert_setting_cr(mac, speed_ability);
+ if (hdev->pdev->revision >= 0x21)
+ hclge_convert_setting_fec(mac);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+}
+
+static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
+ u8 speed_ability)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ hclge_convert_setting_kr(mac, speed_ability);
+ if (hdev->pdev->revision >= 0x21)
+ hclge_convert_setting_fec(mac);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}
static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
@@ -900,8 +1068,9 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
hclge_parse_fiber_link_mode(hdev, speed_ability);
else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
hclge_parse_copper_link_mode(hdev, speed_ability);
+ else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
+ hclge_parse_backplane_link_mode(hdev, speed_ability);
}
-
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1015,6 +1184,23 @@ static int hclge_get_cap(struct hclge_dev *hdev)
return ret;
}
+static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
+{
+#define HCLGE_MIN_TX_DESC 64
+#define HCLGE_MIN_RX_DESC 64
+
+ if (!is_kdump_kernel())
+ return;
+
+ dev_info(&hdev->pdev->dev,
+ "Running kdump kernel. Using minimal resources\n");
+
+ /* minimal queue pairs equals to the number of vports */
+ hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
+ hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
+}
+
static int hclge_configure(struct hclge_dev *hdev)
{
struct hclge_cfg cfg;
@@ -1074,6 +1260,8 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+ hclge_init_kdump_kernel_config(hdev);
+
return ret;
}
@@ -1337,6 +1525,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->back = hdev;
vport->vport_id = i;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ vport->rxvlan_cfg.rx_vlan_offload_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
@@ -1399,7 +1589,7 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
return ret;
}
-static int hclge_get_tc_num(struct hclge_dev *hdev)
+static u32 hclge_get_tc_num(struct hclge_dev *hdev)
{
int i, cnt = 0;
@@ -1409,17 +1599,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev)
return cnt;
}
-static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
-{
- int i, cnt = 0;
-
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- if (hdev->hw_tc_map & BIT(i) &&
- hdev->tm_info.hw_pfc_map & BIT(i))
- cnt++;
- return cnt;
-}
-
/* Get the number of pfc enabled TCs, which have private buffer */
static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
@@ -1483,14 +1662,12 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc,
u32 rx_all)
{
- u32 shared_buf_min, shared_buf_tc, shared_std;
- int tc_num, pfc_enable_num;
+ u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
+ u32 tc_num = hclge_get_tc_num(hdev);
u32 shared_buf, aligned_mps;
u32 rx_priv;
int i;
- tc_num = hclge_get_tc_num(hdev);
- pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
@@ -1499,9 +1676,7 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ hdev->dv_buf_size;
- shared_buf_tc = pfc_enable_num * aligned_mps +
- (tc_num - pfc_enable_num) * aligned_mps / 2 +
- aligned_mps;
+ shared_buf_tc = tc_num * aligned_mps + aligned_mps;
shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
HCLGE_BUF_SIZE_UNIT);
@@ -1518,19 +1693,26 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
} else {
buf_alloc->s_buf.self.high = aligned_mps +
HCLGE_NON_DCB_ADDITIONAL_BUF;
- buf_alloc->s_buf.self.low =
- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
+ buf_alloc->s_buf.self.low = aligned_mps;
+ }
+
+ if (hnae3_dev_dcb_supported(hdev)) {
+ if (tc_num)
+ hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
+ else
+ hi_thrd = shared_buf - hdev->dv_buf_size;
+
+ hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
+ hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
+ lo_thrd = hi_thrd - aligned_mps / 2;
+ } else {
+ hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
+ lo_thrd = aligned_mps;
}
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- if ((hdev->hw_tc_map & BIT(i)) &&
- (hdev->tm_info.hw_pfc_map & BIT(i))) {
- buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
- buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
- } else {
- buf_alloc->s_buf.tc_thrd[i].low = 0;
- buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
- }
+ buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
+ buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
}
return true;
@@ -2095,6 +2277,16 @@ static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ if (!hdev->hw.mac.support_autoneg) {
+ if (enable) {
+ dev_err(&hdev->pdev->dev,
+ "autoneg is not supported by current port\n");
+ return -EOPNOTSUPP;
+ } else {
+ return 0;
+ }
+ }
+
return hclge_set_autoneg_en(hdev, enable);
}
@@ -2110,6 +2302,78 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
return hdev->hw.mac.autoneg;
}
+static int hclge_restart_autoneg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
+static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
+{
+ struct hclge_config_fec_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
+
+ req = (struct hclge_config_fec_cmd *)desc.data;
+ if (fec_mode & BIT(HNAE3_FEC_AUTO))
+ hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
+ if (fec_mode & BIT(HNAE3_FEC_RS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_BASER))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
+
+ return ret;
+}
+
+static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int ret;
+
+ if (fec_mode && !(mac->fec_ability & fec_mode)) {
+ dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
+ return -EINVAL;
+ }
+
+ ret = hclge_set_fec_hw(hdev, fec_mode);
+ if (ret)
+ return ret;
+
+ mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
+ return 0;
+}
+
+static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
+ u8 *fec_mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ if (fec_ability)
+ *fec_ability = mac->fec_ability;
+ if (fec_mode)
+ *fec_mode = mac->fec_mode;
+}
+
static int hclge_mac_init(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
@@ -2127,6 +2391,15 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
+ if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
+ ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Fec mode init fail, ret = %d\n", ret);
+ return ret;
+ }
+ }
+
ret = hclge_set_mac_mtu(hdev, hdev->mps);
if (ret) {
dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
@@ -2143,7 +2416,8 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
- if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
schedule_work(&hdev->mbx_service_task);
}
@@ -2222,6 +2496,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
handle = &hdev->vport[i].nic;
client->ops->link_status_change(handle, state);
+ hclge_config_mac_tnl_int(hdev, state);
rhandle = &hdev->vport[i].roce;
if (rclient && rclient->ops->link_status_change)
rclient->ops->link_status_change(rhandle,
@@ -2231,14 +2506,35 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
}
+static void hclge_update_port_capability(struct hclge_mac *mac)
+{
+ /* firmware can not identify back plane type, the media type
+ * read from configuration can help deal it
+ */
+ if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
+ mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ mac->module_type = HNAE3_MODULE_TYPE_KR;
+ else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
+ mac->module_type = HNAE3_MODULE_TYPE_TP;
+
+ if (mac->support_autoneg == true) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
+ linkmode_copy(mac->advertising, mac->supported);
+ } else {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ mac->supported);
+ linkmode_zero(mac->advertising);
+ }
+}
+
static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
{
- struct hclge_sfp_speed_cmd *resp = NULL;
+ struct hclge_sfp_info_cmd *resp = NULL;
struct hclge_desc desc;
int ret;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
- resp = (struct hclge_sfp_speed_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret == -EOPNOTSUPP) {
dev_warn(&hdev->pdev->dev,
@@ -2249,28 +2545,67 @@ static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
return ret;
}
- *speed = resp->sfp_speed;
+ *speed = le32_to_cpu(resp->speed);
return 0;
}
-static int hclge_update_speed_duplex(struct hclge_dev *hdev)
+static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
{
- struct hclge_mac mac = hdev->hw.mac;
- int speed;
+ struct hclge_sfp_info_cmd *resp;
+ struct hclge_desc desc;
int ret;
- /* get the speed from SFP cmd when phy
- * doesn't exit.
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
+
+ resp->query_type = QUERY_ACTIVE_SPEED;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "IMP does not support get SFP info %d\n", ret);
+ return ret;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
+ return ret;
+ }
+
+ mac->speed = le32_to_cpu(resp->speed);
+ /* if resp->speed_ability is 0, it means it's an old version
+ * firmware, do not update these params
*/
- if (mac.phydev)
+ if (resp->speed_ability) {
+ mac->module_type = le32_to_cpu(resp->module_type);
+ mac->speed_ability = le32_to_cpu(resp->speed_ability);
+ mac->autoneg = resp->autoneg;
+ mac->support_autoneg = resp->autoneg_ability;
+ } else {
+ mac->speed_type = QUERY_SFP_SPEED;
+ }
+
+ return 0;
+}
+
+static int hclge_update_port_info(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int speed = HCLGE_MAC_SPEED_UNKNOWN;
+ int ret;
+
+ /* get the port info from SFP cmd if not copper port */
+ if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
return 0;
- /* if IMP does not support get SFP/qSFP speed, return directly */
+ /* if IMP does not support get SFP/qSFP info, return directly */
if (!hdev->support_sfp_query)
return 0;
- ret = hclge_get_sfp_speed(hdev, &speed);
+ if (hdev->pdev->revision >= 0x21)
+ ret = hclge_get_sfp_info(hdev, mac);
+ else
+ ret = hclge_get_sfp_speed(hdev, &speed);
+
if (ret == -EOPNOTSUPP) {
hdev->support_sfp_query = false;
return ret;
@@ -2278,19 +2613,20 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev)
return ret;
}
- if (speed == HCLGE_MAC_SPEED_UNKNOWN)
- return 0; /* do nothing if no SFP */
-
- /* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
-}
-
-static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
+ if (hdev->pdev->revision >= 0x21) {
+ if (mac->speed_type == QUERY_ACTIVE_SPEED) {
+ hclge_update_port_capability(mac);
+ return 0;
+ }
+ return hclge_cfg_mac_speed_dup(hdev, mac->speed,
+ HCLGE_MAC_FULL);
+ } else {
+ if (speed == HCLGE_MAC_SPEED_UNKNOWN)
+ return 0; /* do nothing if no SFP */
- return hclge_update_speed_duplex(hdev);
+ /* must config full duplex for SFP */
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ }
}
static int hclge_get_status(struct hnae3_handle *handle)
@@ -2344,6 +2680,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ hdev->rst_stats.imp_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
@@ -2352,6 +2689,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
@@ -2360,12 +2698,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+ hdev->rst_stats.core_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
/* check for vector0 msix event source */
- if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
+ dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
+ msix_src_reg);
return HCLGE_VECTOR0_EVENT_ERR;
+ }
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
@@ -2374,6 +2716,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_MBX;
}
+ /* print other vector0 event source */
+ dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
+ cmdq_src_reg, msix_src_reg);
return HCLGE_VECTOR0_EVENT_OTHER;
}
@@ -2657,7 +3002,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return ret;
}
- if (!reset)
+ if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
continue;
/* Inform VF to process the reset.
@@ -2694,9 +3039,18 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
static void hclge_do_reset(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct pci_dev *pdev = hdev->pdev;
u32 val;
+ if (hclge_get_hw_reset_stat(handle)) {
+ dev_info(&pdev->dev, "Hardware reset not finish\n");
+ dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
+ hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
+ return;
+ }
+
switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
@@ -2775,6 +3129,10 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
clear_bit(HNAE3_FLR_RESET, addr);
}
+ if (hdev->reset_type != HNAE3_NONE_RESET &&
+ rst_level < hdev->reset_type)
+ return HNAE3_NONE_RESET;
+
return rst_level;
}
@@ -2844,6 +3202,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
* after hclge_cmd_init is called.
*/
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
/* There is no mechanism for PF to know if VF has stopped IO
@@ -2852,6 +3211,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
msleep(100);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ hdev->rst_stats.flr_rst_cnt++;
break;
case HNAE3_IMP_RESET:
reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
@@ -2932,7 +3292,7 @@ static void hclge_reset(struct hclge_dev *hdev)
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
- hdev->reset_count++;
+ hdev->rst_stats.reset_cnt++;
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
@@ -2958,6 +3318,8 @@ static void hclge_reset(struct hclge_dev *hdev)
goto err_reset;
}
+ hdev->rst_stats.hw_reset_done_cnt++;
+
ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
goto err_reset;
@@ -3001,7 +3363,9 @@ static void hclge_reset(struct hclge_dev *hdev)
hdev->last_reset_time = jiffies;
hdev->reset_fail_cnt = 0;
+ hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
+ del_timer(&hdev->reset_timer);
return;
@@ -3154,7 +3518,7 @@ static void hclge_service_task(struct work_struct *work)
hdev->hw_stats.stats_timer = 0;
}
- hclge_update_speed_duplex(hdev);
+ hclge_update_port_info(hdev);
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
hclge_service_complete(hdev);
@@ -5194,7 +5558,7 @@ static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hdev->reset_count;
+ return hdev->rst_stats.hw_reset_done_cnt;
}
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
@@ -5282,8 +5646,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
-#define HCLGE_MAC_LINK_STATUS_MS 20
-#define HCLGE_MAC_LINK_STATUS_NUM 10
+#define HCLGE_MAC_LINK_STATUS_MS 10
+#define HCLGE_MAC_LINK_STATUS_NUM 100
#define HCLGE_MAC_LINK_STATUS_DOWN 0
#define HCLGE_MAC_LINK_STATUS_UP 1
@@ -5942,8 +6306,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
/* check if we just hit the duplicate */
- if (!ret)
- ret = -EINVAL;
+ if (!ret) {
+ dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
+ vport->vport_id, addr);
+ return 0;
+ }
dev_err(&hdev->pdev->dev,
"PF failed to add unicast entry(%pM) in the MAC table\n",
@@ -6293,7 +6660,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
return -EINVAL;
}
- if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
+ if ((!is_first || is_kdump_kernel()) &&
+ hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
dev_warn(&hdev->pdev->dev,
"remove old uc mac address fail.\n");
@@ -6543,30 +6911,6 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return ret;
}
-int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
- u16 vlan_id, bool is_kill)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
-
- return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
- 0, is_kill);
-}
-
-static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
- u16 vlan, u8 qos, __be16 proto)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
-
- if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
- return -EINVAL;
- if (proto != htons(ETH_P_8021Q))
- return -EPROTONOSUPPORT;
-
- return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
-}
-
static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
{
struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
@@ -6640,6 +6984,52 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
return status;
}
+static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
+ u16 port_base_vlan_state,
+ u16 vlan_tag)
+{
+ int ret;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->txvlan_cfg.accept_tag1 = true;
+ vport->txvlan_cfg.insert_tag1_en = false;
+ vport->txvlan_cfg.default_tag1 = 0;
+ } else {
+ vport->txvlan_cfg.accept_tag1 = false;
+ vport->txvlan_cfg.insert_tag1_en = true;
+ vport->txvlan_cfg.default_tag1 = vlan_tag;
+ }
+
+ vport->txvlan_cfg.accept_untag1 = true;
+
+ /* accept_tag2 and accept_untag2 are not supported on
+ * pdev revision(0x20), new revision support them,
+ * this two fields can not be configured by user.
+ */
+ vport->txvlan_cfg.accept_tag2 = true;
+ vport->txvlan_cfg.accept_untag2 = true;
+ vport->txvlan_cfg.insert_tag2_en = false;
+ vport->txvlan_cfg.default_tag2 = 0;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en =
+ vport->rxvlan_cfg.rx_vlan_offload_en;
+ } else {
+ vport->rxvlan_cfg.strip_tag1_en =
+ vport->rxvlan_cfg.rx_vlan_offload_en;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ }
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
+ if (ret)
+ return ret;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
{
struct hclge_rx_vlan_type_cfg_cmd *rx_req;
@@ -6730,34 +7120,14 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vport->txvlan_cfg.accept_tag1 = true;
- vport->txvlan_cfg.accept_untag1 = true;
-
- /* accept_tag2 and accept_untag2 are not supported on
- * pdev revision(0x20), new revision support them. The
- * value of this two fields will not return error when driver
- * send command to fireware in revision(0x20).
- * This two fields can not configured by user.
- */
- vport->txvlan_cfg.accept_tag2 = true;
- vport->txvlan_cfg.accept_untag2 = true;
+ u16 vlan_tag;
- vport->txvlan_cfg.insert_tag1_en = false;
- vport->txvlan_cfg.insert_tag2_en = false;
- vport->txvlan_cfg.default_tag1 = 0;
- vport->txvlan_cfg.default_tag2 = 0;
-
- ret = hclge_set_vlan_tx_offload_cfg(vport);
- if (ret)
- return ret;
-
- vport->rxvlan_cfg.strip_tag1_en = false;
- vport->rxvlan_cfg.strip_tag2_en = true;
- vport->rxvlan_cfg.vlan1_vlan_prionly = false;
- vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+ vport = &hdev->vport[i];
+ vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- ret = hclge_set_vlan_rx_offload_cfg(vport);
+ ret = hclge_vlan_offload_cfg(vport,
+ vport->port_base_vlan_cfg.state,
+ vlan_tag);
if (ret)
return ret;
}
@@ -6765,7 +7135,8 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
-void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
+static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool writen_to_tbl)
{
struct hclge_vport_vlan_cfg *vlan;
@@ -6777,14 +7148,38 @@ void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
if (!vlan)
return;
- vlan->hd_tbl_status = true;
+ vlan->hd_tbl_status = writen_to_tbl;
vlan->vlan_id = vlan_id;
list_add_tail(&vlan->node, &vport->vlan_list);
}
-void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
- bool is_write_tbl)
+static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (!vlan->hd_tbl_status) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, 0, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "restore vport vlan list failed, ret=%d\n",
+ ret);
+ return ret;
+ }
+ }
+ vlan->hd_tbl_status = true;
+ }
+
+ return 0;
+}
+
+static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool is_write_tbl)
{
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
@@ -6847,14 +7242,203 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- vport->rxvlan_cfg.strip_tag1_en = false;
- vport->rxvlan_cfg.strip_tag2_en = enable;
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = enable;
+ } else {
+ vport->rxvlan_cfg.strip_tag1_en = enable;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ }
vport->rxvlan_cfg.vlan1_vlan_prionly = false;
vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+ vport->rxvlan_cfg.rx_vlan_offload_en = enable;
return hclge_set_vlan_rx_offload_cfg(vport);
}
+static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
+ u16 port_base_vlan_state,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
+ hclge_rm_vport_all_vlan_table(vport, false);
+ return hclge_set_vlan_filter_hw(hdev,
+ htons(new_info->vlan_proto),
+ vport->vport_id,
+ new_info->vlan_tag,
+ new_info->qos, false);
+ }
+
+ ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
+ vport->vport_id, old_info->vlan_tag,
+ old_info->qos, true);
+ if (ret)
+ return ret;
+
+ return hclge_add_vport_all_vlan_table(vport);
+}
+
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+ struct hclge_vlan_info *vlan_info)
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hclge_vlan_info *old_vlan_info;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
+ if (ret)
+ return ret;
+
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev,
+ htons(vlan_info->vlan_proto),
+ vport->vport_id,
+ vlan_info->vlan_tag,
+ vlan_info->qos, false);
+ if (ret)
+ return ret;
+
+ /* remove old VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev,
+ htons(old_vlan_info->vlan_proto),
+ vport->vport_id,
+ old_vlan_info->vlan_tag,
+ old_vlan_info->qos, true);
+ if (ret)
+ return ret;
+
+ goto update;
+ }
+
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
+ if (ret)
+ return ret;
+
+ /* update state only when disable/enable port based VLAN */
+ vport->port_base_vlan_cfg.state = state;
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+
+update:
+ vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
+ vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
+ vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
+
+ return 0;
+}
+
+static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
+ enum hnae3_port_base_vlan_state state,
+ u16 vlan)
+{
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ if (!vlan)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+ else
+ return HNAE3_PORT_BASE_VLAN_ENABLE;
+ } else {
+ if (!vlan)
+ return HNAE3_PORT_BASE_VLAN_DISABLE;
+ else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+ else
+ return HNAE3_PORT_BASE_VLAN_MODIFY;
+ }
+}
+
+static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
+ u16 vlan, u8 qos, __be16 proto)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info vlan_info;
+ u16 state;
+ int ret;
+
+ if (hdev->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ /* qos is a 3 bits value, so can not be bigger than 7 */
+ if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
+ return -EINVAL;
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ vport = &hdev->vport[vfid];
+ state = hclge_get_port_base_vlan_state(vport,
+ vport->port_base_vlan_cfg.state,
+ vlan);
+ if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
+ return 0;
+
+ vlan_info.vlan_tag = vlan;
+ vlan_info.qos = qos;
+ vlan_info.vlan_proto = ntohs(proto);
+
+ /* update port based VLAN for PF */
+ if (!vfid) {
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+
+ return ret;
+ }
+
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ return hclge_update_port_base_vlan_cfg(vport, state,
+ &vlan_info);
+ } else {
+ ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ (u8)vfid, state,
+ vlan, qos,
+ ntohs(proto));
+ return ret;
+ }
+}
+
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ bool writen_to_tbl = false;
+ int ret = 0;
+
+ /* when port based VLAN enabled, we use port based VLAN as the VLAN
+ * filter entry. In this case, we don't update VLAN filter table
+ * when user add new VLAN or remove exist VLAN, just update the vport
+ * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
+ * table until port based VLAN disabled
+ */
+ if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
+ vlan_id, 0, is_kill);
+ writen_to_tbl = true;
+ }
+
+ if (ret)
+ return ret;
+
+ if (is_kill)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ else
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+
+ return 0;
+}
+
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
{
struct hclge_config_max_frm_size_cmd *req;
@@ -7199,13 +7783,13 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
if (!fc_autoneg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
- /* Only support flow control negotiation for netdev with
- * phy attached for now.
- */
- if (!phydev)
+ if (phydev)
+ return phy_start_aneg(phydev);
+
+ if (hdev->pdev->revision == 0x20)
return -EOPNOTSUPP;
- return phy_start_aneg(phydev);
+ return hclge_restart_autoneg(handle);
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
@@ -7222,13 +7806,17 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*auto_neg = hdev->hw.mac.autoneg;
}
-static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
+static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
if (media_type)
*media_type = hdev->hw.mac.media_type;
+
+ if (module_type)
+ *module_type = hdev->hw.mac.module_type;
}
static void hclge_get_mdix_mode(struct hnae3_handle *handle,
@@ -7280,6 +7868,32 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
*tp_mdix = ETH_TP_MDI;
}
+static void hclge_info_show(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "PF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
+ dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
+ dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
+ dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
+ dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
+ dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
+ dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
+ dev_info(dev, "This is %s PF\n",
+ hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
+ dev_info(dev, "DCB %s\n",
+ hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
+ dev_info(dev, "MQPRIO %s\n",
+ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+
+ dev_info(dev, "PF info end.\n");
+}
+
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -7301,6 +7915,9 @@ static int hclge_init_client_instance(struct hnae3_client *client,
hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (netif_msg_drv(&hdev->vport->nic))
+ hclge_info_show(hdev);
+
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
@@ -7660,6 +8277,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ INIT_KFIFO(hdev->mac_tnl_log);
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
@@ -7708,7 +8327,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev)
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
- hclge_vport_start(vport);
+ hclge_vport_stop(vport);
vport++;
}
}
@@ -7813,6 +8432,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
+ hclge_config_mac_tnl_int(hdev, false);
hclge_hw_error_set_state(hdev, false);
hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
@@ -8234,9 +8854,11 @@ static const struct hnae3_ae_ops hclge_ops = {
.client_stop = hclge_client_stop,
.get_status = hclge_get_status,
.get_ksettings_an_result = hclge_get_ksettings_an_result,
- .update_speed_duplex_h = hclge_update_speed_duplex_h,
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
+ .check_port_speed = hclge_check_port_speed,
+ .get_fec = hclge_get_fec,
+ .set_fec = hclge_set_fec,
.get_rss_key_size = hclge_get_rss_key_size,
.get_rss_indir_size = hclge_get_rss_indir_size,
.get_rss = hclge_get_rss,
@@ -8253,11 +8875,13 @@ static const struct hnae3_ae_ops hclge_ops = {
.rm_mc_addr = hclge_rm_mc_addr,
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
+ .restart_autoneg = hclge_restart_autoneg,
.get_pauseparam = hclge_get_pauseparam,
.set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
+ .get_mac_pause_stats = hclge_get_mac_pause_stat,
.update_stats = hclge_update_stats,
.get_strings = hclge_get_strings,
.get_sset_count = hclge_get_sset_count,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index b57ac4beb313..dd06b11187b0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
+#include <linux/kfifo.h>
#include "hclge_cmd.h"
#include "hnae3.h"
@@ -188,6 +189,8 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_25G_BIT BIT(2)
#define HCLGE_SUPPORT_50G_BIT BIT(3)
#define HCLGE_SUPPORT_100G_BIT BIT(4)
+/* to be compatible with exsit board */
+#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
#define HCLGE_SUPPORT_GE \
@@ -235,15 +238,25 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
+#define QUERY_SFP_SPEED 0
+#define QUERY_ACTIVE_SPEED 1
+
struct hclge_mac {
u8 phy_addr;
u8 flag;
- u8 media_type;
+ u8 media_type; /* port media type, e.g. fibre/copper/backplane */
u8 mac_addr[ETH_ALEN];
u8 autoneg;
u8 duplex;
+ u8 support_autoneg;
+ u8 speed_type; /* 0: sfp speed, 1: active speed */
u32 speed;
- int link; /* store the link status of mac & phy (if phy exit)*/
+ u32 speed_ability; /* speed ability supported by current media */
+ u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
+ u32 fec_mode; /* active fec mode */
+ u32 user_fec_mode;
+ u32 fec_ability;
+ int link; /* store the link status of mac & phy (if phy exit) */
struct phy_device *phydev;
struct mii_bus *mdio_bus;
phy_interface_t phy_if;
@@ -649,6 +662,23 @@ struct hclge_vport_vlan_cfg {
u16 vlan_id;
};
+struct hclge_rst_stats {
+ u32 reset_done_cnt; /* the number of reset has completed */
+ u32 hw_reset_done_cnt; /* the number of HW reset has completed */
+ u32 pf_rst_cnt; /* the number of PF reset */
+ u32 flr_rst_cnt; /* the number of FLR */
+ u32 core_rst_cnt; /* the number of CORE reset */
+ u32 global_rst_cnt; /* the number of GLOBAL */
+ u32 imp_rst_cnt; /* the number of IMP reset */
+ u32 reset_cnt; /* the number of reset */
+};
+
+/* time and register status when mac tunnel interruption occur */
+struct hclge_mac_tnl_stats {
+ u64 time;
+ u32 status;
+};
+
/* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below:
* ----------------------------------
@@ -675,6 +705,7 @@ struct hclge_vport_vlan_cfg {
(y) = (_k_ ^ ~_v_) & (_k_); \
} while (0)
+#define HCLGE_MAC_TNL_LOG_SIZE 8
#define HCLGE_VPORT_NUM 256
struct hclge_dev {
struct pci_dev *pdev;
@@ -691,7 +722,7 @@ struct hclge_dev {
unsigned long default_reset_request;
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
- unsigned long reset_count; /* the number of reset has been done */
+ struct hclge_rst_stats rst_stats;
u32 reset_fail_cnt;
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
@@ -791,6 +822,9 @@ struct hclge_dev {
struct mutex umv_mutex; /* protect share_umv_size */
struct mutex vport_cfg_mutex; /* Protect stored vf table */
+
+ DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
+ HCLGE_MAC_TNL_LOG_SIZE);
};
/* VPort level vlan tag configuration for TX direction */
@@ -807,10 +841,11 @@ struct hclge_tx_vtag_cfg {
/* VPort level vlan tag configuration for RX direction */
struct hclge_rx_vtag_cfg {
- bool strip_tag1_en; /* Whether strip inner vlan tag */
- bool strip_tag2_en; /* Whether strip outer vlan tag */
- bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
- bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
+ u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */
+ u8 strip_tag1_en; /* Whether strip inner vlan tag */
+ u8 strip_tag2_en; /* Whether strip outer vlan tag */
+ u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
+ u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
};
struct hclge_rss_tuple_cfg {
@@ -829,6 +864,17 @@ enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_MAX
};
+struct hclge_vlan_info {
+ u16 vlan_proto; /* so far support 802.1Q only */
+ u16 qos;
+ u16 vlan_tag;
+};
+
+struct hclge_port_base_vlan_config {
+ u16 state;
+ struct hclge_vlan_info vlan_info;
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -842,9 +888,10 @@ struct hclge_vport {
u16 alloc_rss_size;
u16 qs_offset;
- u16 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
@@ -924,9 +971,11 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
-void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id);
-void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
- bool is_write_tbl);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+ struct hclge_vlan_info *vlan_info);
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+ u16 state, u16 vlan_tag, u16 qos,
+ u16 vlan_proto);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 306a23e486de..0e04e63f2a94 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -212,8 +212,7 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
}
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
struct hclge_dev *hdev = vport->back;
@@ -249,7 +248,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return -EIO;
}
- if (gen_resp)
+ if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
return 0;
@@ -289,9 +288,25 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
return 0;
}
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+ u16 state, u16 vlan_tag, u16 qos,
+ u16 vlan_proto)
+{
+#define MSG_DATA_SIZE 8
+
+ u8 msg_data[MSG_DATA_SIZE];
+
+ memcpy(&msg_data[0], &state, sizeof(u16));
+ memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
+ memcpy(&msg_data[4], &qos, sizeof(u16));
+ memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
+
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HLCGE_MBX_PUSH_VLAN_INFO, vfid);
+}
+
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
int status = 0;
@@ -305,19 +320,27 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
- if (!status)
- is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false)
- : hclge_add_vport_vlan_table(vport, vlan);
} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = mbx_req->msg[2] ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
+ struct hclge_vlan_info *vlan_info;
+ u16 *state;
+
+ state = (u16 *)&mbx_req->msg[2];
+ vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
+ status = hclge_update_port_base_vlan_cfg(vport, *state,
+ vlan_info);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
+ u8 state;
+
+ state = vport->port_base_vlan_cfg.state;
+ status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
+ sizeof(u8));
}
- if (gen_resp)
- status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
-
return status;
}
@@ -385,24 +408,33 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
HCLGE_TQPS_DEPTH_INFO_LEN);
}
+static int hclge_get_vf_media_type(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ u8 resp_data[2];
+
+ resp_data[0] = hdev->hw.mac.media_type;
+ resp_data[1] = hdev->hw.mac.module_type;
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ sizeof(resp_data));
+}
+
static int hclge_get_link_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
struct hclge_dev *hdev = vport->back;
u16 link_status;
- u8 msg_data[10];
- u16 media_type;
+ u8 msg_data[8];
u8 dest_vfid;
u16 duplex;
/* mac.link can only be 0 or 1 */
link_status = (u16)hdev->hw.mac.link;
duplex = hdev->hw.mac.duplex;
- media_type = hdev->hw.mac.media_type;
memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16));
- memcpy(&msg_data[8], &media_type, sizeof(u16));
dest_vfid = mbx_req->mbx_src_vfid;
/* send this requested info to VF */
@@ -565,7 +597,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_SET_UNICAST:
- ret = hclge_set_vf_uc_mac_addr(vport, req, true);
+ ret = hclge_set_vf_uc_mac_addr(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF fail(%d) to set VF UC MAC Addr\n",
@@ -579,7 +611,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req, false);
+ ret = hclge_set_vf_vlan_cfg(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF failed(%d) to config VF's VLAN\n",
@@ -662,6 +694,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_rm_vport_all_vlan_table(vport, true);
mutex_unlock(&hdev->vport_cfg_mutex);
break;
+ case HCLGE_MBX_GET_MEDIA_TYPE:
+ ret = hclge_get_vf_media_type(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to media type for VF\n",
+ ret);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 48eda2c6fdae..1e8134892d77 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
+#include <linux/marvell_phy.h>
#include "hclge_cmd.h"
#include "hclge_main.h"
@@ -121,12 +122,18 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
int hclge_mac_mdio_config(struct hclge_dev *hdev)
{
+#define PHY_INEXISTENT 255
+
struct hclge_mac *mac = &hdev->hw.mac;
struct phy_device *phydev;
struct mii_bus *mdio_bus;
int ret;
- if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
+ if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) {
+ dev_info(&hdev->pdev->dev,
+ "no phy device is connected to mdio bus\n");
+ return 0;
+ } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
hdev->hw.mac.phy_addr);
return -EINVAL;
@@ -203,6 +210,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+ phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE;
+
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index aafc69f4bfdd..a7bbb6d3091a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1331,8 +1331,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
ret = hclge_pfc_setup_hw(hdev);
if (init && ret == -EOPNOTSUPP)
dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
- else
+ else if (ret) {
+ dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
+ ret);
return ret;
+ }
return hclge_tm_bp_setup(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd35845..6193f8fa7cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 9441b453d38d..71f356fc2446 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
return ring->desc_num - used - 1;
}
+static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
+ int head)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
+ struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- u16 ntc = csq->next_to_clean;
- struct hclgevf_desc *desc;
int clean = 0;
u32 head;
- desc = &csq->desc[ntc];
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EIO;
}
- csq->next_to_clean = ntc;
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
return clean;
}
@@ -321,13 +334,13 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock_bh(&hdev->hw.cmq.crq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
/* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev;
hdev->arq.head = 0;
hdev->arq.tail = 0;
- hdev->arq.count = 0;
+ atomic_set(&hdev->arq.count, 0);
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
hdev->hw.cmq.crq.next_to_clean = 0;
@@ -335,7 +348,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
hclgevf_cmd_init_regs(&hdev->hw);
- spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
@@ -344,8 +357,8 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
* reset may happen when lower level reset is being processed.
*/
if (hclgevf_is_reset_pending(hdev)) {
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err_cmd_init;
}
/* get firmware version */
@@ -353,13 +366,18 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret);
- return ret;
+ goto err_cmd_init;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
+
+err_cmd_init:
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ return ret;
}
static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8bc28e6f465f..5d53467ee2d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -245,6 +245,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
return 0;
}
+static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ u8 resp_msg;
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
+ NULL, 0, true, &resp_msg, sizeof(u8));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get port based vlan state failed %d",
+ ret);
+ return ret;
+ }
+
+ nic->port_base_vlan_state = resp_msg;
+
+ return 0;
+}
+
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 6
@@ -307,6 +328,26 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
return qid_in_pf;
}
+static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
+{
+ u8 resp_msg[2];
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
+ true, resp_msg, sizeof(resp_msg));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get the pf port media type failed %d",
+ ret);
+ return ret;
+ }
+
+ hdev->hw.mac.media_type = resp_msg[0];
+ hdev->hw.mac.module_type = resp_msg[1];
+
+ return 0;
+}
+
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
struct hclgevf_tqp *tqp;
@@ -404,7 +445,7 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
}
}
-void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
+static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
{
#define HCLGEVF_ADVERTISING 0
#define HCLGEVF_SUPPORTED 1
@@ -1375,9 +1416,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
case HNAE3_VF_FUNC_RESET:
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8));
+ hdev->rst_stats.vf_func_rst_cnt++;
break;
case HNAE3_FLR_RESET:
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ hdev->rst_stats.flr_rst_cnt++;
break;
default:
break;
@@ -1400,7 +1443,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
- hdev->reset_count++;
+ hdev->rst_stats.rst_cnt++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
@@ -1426,6 +1469,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
goto err_reset;
}
+ hdev->rst_stats.hw_rst_done_cnt++;
+
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
@@ -1444,6 +1489,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
+ hdev->rst_stats.rst_done_cnt++;
return ret;
err_reset_lock:
@@ -1455,6 +1501,8 @@ err_reset:
*/
hclgevf_cmd_init(hdev);
dev_err(&hdev->pdev->dev, "failed to reset VF\n");
+ if (hclgevf_is_reset_pending(hdev))
+ hclgevf_reset_task_schedule(hdev);
return ret;
}
@@ -1564,8 +1612,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
- !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
+ if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) {
set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
schedule_work(&hdev->rst_service_task);
}
@@ -1603,6 +1650,7 @@ static void hclgevf_service_timer(struct timer_list *t)
mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+ hdev->stats_timer++;
hclgevf_task_schedule(hdev);
}
@@ -1711,7 +1759,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
@@ -1723,9 +1771,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
static void hclgevf_service_task(struct work_struct *work)
{
+ struct hnae3_handle *handle;
struct hclgevf_dev *hdev;
hdev = container_of(work, struct hclgevf_dev, service_task);
+ handle = &hdev->nic;
+
+ if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
+ hclgevf_tqps_update_stats(handle);
+ hdev->stats_timer = 0;
+ }
/* request the link status from the PF. PF would be able to tell VF
* about such updates in future so we might remove this later
@@ -1762,6 +1817,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
*clearval = cmdq_src_reg;
+ hdev->rst_stats.vf_rst_cnt++;
return HCLGEVF_VECTOR0_EVENT_RST;
}
@@ -1814,6 +1870,11 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{
int ret;
+ /* get current port based vlan state from PF */
+ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
+ if (ret)
+ return ret;
+
/* get queue configuration from PF */
ret = hclgevf_get_queue_info(hdev);
if (ret)
@@ -1824,6 +1885,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
if (ret)
return ret;
+ ret = hclgevf_get_pf_media_type(hdev);
+ if (ret)
+ return ret;
+
/* get tc configuration from PF */
return hclgevf_get_tc_info(hdev);
}
@@ -1986,8 +2051,10 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- for (i = 0; i < handle->kinfo.num_tqps; i++)
- hclgevf_reset_tqp(handle, i);
+ if (hdev->reset_type != HNAE3_VF_RESET)
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+ if (hclgevf_reset_tqp(handle, i))
+ break;
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
@@ -2007,9 +2074,15 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
static int hclgevf_client_start(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_set_alive(handle, true);
+ if (ret)
+ return ret;
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
- return hclgevf_set_alive(handle, true);
+
+ return 0;
}
static void hclgevf_client_stop(struct hnae3_handle *handle)
@@ -2051,6 +2124,10 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
{
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ if (hdev->keep_alive_timer.function)
+ del_timer_sync(&hdev->keep_alive_timer);
+ if (hdev->keep_alive_task.func)
+ cancel_work_sync(&hdev->keep_alive_task);
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
@@ -2155,6 +2232,23 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
hclgevf_free_vector(hdev, 0);
}
+static void hclgevf_info_show(struct hclgevf_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "VF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
+ dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
+ dev_info(dev, "PF media type of this VF: %d\n",
+ hdev->hw.mac.media_type);
+
+ dev_info(dev, "VF info end.\n");
+}
+
static int hclgevf_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -2172,6 +2266,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (netif_msg_drv(&hdev->nic))
+ hclgevf_info_show(hdev);
+
if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
@@ -2651,12 +2748,16 @@ static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
return hclgevf_config_gro(hdev, enable);
}
-static void hclgevf_get_media_type(struct hnae3_handle *handle,
- u8 *media_type)
+static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
if (media_type)
*media_type = hdev->hw.mac.media_type;
+
+ if (module_type)
+ *module_type = hdev->hw.mac.module_type;
}
static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
@@ -2677,7 +2778,7 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- return hdev->reset_count;
+ return hdev->rst_stats.hw_rst_done_cnt;
}
static void hclgevf_get_link_mode(struct hnae3_handle *handle,
@@ -2756,6 +2857,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
}
}
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ u8 *port_base_vlan_info, u8 data_size)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+
+ rtnl_lock();
+ hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
+
+ /* send msg to PF and wait update port based vlan info */
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_PORT_BASE_VLAN_CFG,
+ port_base_vlan_info, data_size,
+ false, NULL, 0);
+
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+
+ rtnl_lock();
+ hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index c128863ee7d0..cc52f54f8c08 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -116,6 +116,8 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_STATS_TIMER_INTERVAL (36)
+
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
@@ -141,6 +143,7 @@ enum hclgevf_states {
struct hclgevf_mac {
u8 media_type;
+ u8 module_type;
u8 mac_addr[ETH_ALEN];
int link;
u8 duplex;
@@ -210,6 +213,15 @@ struct hclgevf_misc_vector {
int vector_irq;
};
+struct hclgevf_rst_stats {
+ u32 rst_cnt; /* the number of reset */
+ u32 vf_func_rst_cnt; /* the number of VF function reset */
+ u32 flr_rst_cnt; /* the number of FLR */
+ u32 vf_rst_cnt; /* the number of VF reset */
+ u32 rst_done_cnt; /* the number of reset completed */
+ u32 hw_rst_done_cnt; /* the number of HW reset completed */
+};
+
struct hclgevf_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@@ -227,7 +239,7 @@ struct hclgevf_dev {
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
unsigned long reset_state; /* requested, pending */
- unsigned long reset_count; /* the number of reset has been done */
+ struct hclgevf_rst_stats rst_stats;
u32 reset_attempts;
u32 fw_version;
@@ -272,6 +284,7 @@ struct hclgevf_dev {
struct hnae3_client *nic_client;
struct hnae3_client *roce_client;
u32 flag;
+ u32 stats_timer;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
@@ -290,4 +303,6 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
u8 duplex);
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ u8 *port_base_vlan_info, u8 data_size);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 7dc3c9f79169..30f2e9352cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -49,8 +49,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
- "VF could not get mbx resp(=%d) from PF in %d tries\n",
- hdev->mbx_resp.received_resp, i);
+ "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n",
+ code0, code1, hdev->mbx_resp.received_resp, i);
return -EIO;
}
@@ -68,8 +68,11 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
dev_err(&hdev->pdev->dev,
- "VF could not match resp code(code0=%d,code1=%d), %d",
+ "VF could not match resp code(code0=%d,code1=%d), %d\n",
code0, code1, mbx_resp->resp_status);
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n",
+ r_code0, r_code1);
return -EIO;
}
@@ -95,6 +98,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT :
+ ~HCLGE_MBX_NEED_RESP_BIT;
req->msg[0] = code;
req->msg[1] = subcode;
memcpy(&req->msg[2], msg_data, msg_len);
@@ -198,6 +203,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
+ case HLCGE_MBX_PUSH_VLAN_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
@@ -208,7 +214,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* we will drop the async msg if we find ARQ as full
* and continue with next message
*/
- if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+ if (atomic_read(&hdev->arq.count) >=
+ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%d)\n",
req->msg[1]);
@@ -220,7 +227,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
memcpy(&msg_q[0], req->msg,
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
- hdev->arq.count++;
+ atomic_inc(&hdev->arq.count);
hclgevf_mbx_task_schedule(hdev);
@@ -243,8 +250,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
- u16 link_status;
- u16 *msg_q;
+ u16 link_status, state;
+ u16 *msg_q, *vlan_info;
u8 duplex;
u32 speed;
u32 tail;
@@ -272,7 +279,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = le16_to_cpu(msg_q[1]);
memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)le16_to_cpu(msg_q[4]);
- hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
@@ -300,6 +306,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_reset_task_schedule(hdev);
break;
+ case HLCGE_MBX_PUSH_VLAN_INFO:
+ state = le16_to_cpu(msg_q[1]);
+ vlan_info = &msg_q[1];
+ hclgevf_update_port_base_vlan_info(hdev, state,
+ (u8 *)vlan_info, 8);
+ break;
default:
dev_err(&hdev->pdev->dev,
"fetched unsupported(%d) message from arq\n",
@@ -308,7 +320,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
}
hclge_mbx_head_ptr_move_arq(hdev->arq);
- hdev->arq.count--;
+ atomic_dec(&hdev->arq.count);
msg_q = hdev->arq.msg_q[hdev->arq.head];
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc251f32..8b8a7d00e8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
};
struct hns_mdio_device {
- void *vbase; /* mdio reg base address */
+ u8 __iomem *vbase; /* mdio reg base address */
struct regmap *subctrl_vbase;
struct hns_mdio_sc_reg sc_reg;
};
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
#define MDIO_SC_CLK_ST 0x531C
#define MDIO_SC_RESET_ST 0x5A1C
-static void mdio_write_reg(void *base, u32 reg, u32 value)
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- writel_relaxed(value, reg_addr + reg);
+ writel_relaxed(value, base + reg);
}
#define MDIO_WRITE_REG(a, reg, value) \
mdio_write_reg((a)->vbase, (reg), (value))
-static u32 mdio_read_reg(void *base, u32 reg)
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- return readl_relaxed(reg_addr + reg);
+ return readl_relaxed(base + reg);
}
#define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
-static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
u32 val)
{
u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
-static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
{
u32 origin;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index e17bf33eba0c..0fbe8046824b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -518,7 +518,7 @@ process_sq_wqe:
flush_skbs:
netdev_txq = netdev_get_tx_queue(netdev, q_id);
- if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
+ if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
return err;
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index b69c622ba8b2..211c5f74b4c8 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -105,7 +105,7 @@
#define DMA_WBACK_INV(ndev, addr, len) \
do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
-#define SYSBUS 0x0000006c;
+#define SYSBUS 0x0000006c
/* big endian CPU, 82596 "big" endian mode */
#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
@@ -141,7 +141,8 @@ static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
}
gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
- udelay(1);
+ if (!running_on_qemu)
+ udelay(1);
gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 90b62c1412c8..707c8ba120c2 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1463,7 +1463,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
memset(pr, 0, sizeof(struct ehea_port_res));
- pr->tx_bytes = rx_bytes;
+ pr->tx_bytes = tx_bytes;
pr->tx_packets = tx_packets;
pr->rx_bytes = rx_bytes;
pr->rx_packets = rx_packets;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 5e4e37132bf2..77ce17383aba 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -123,8 +123,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
int nr_of_cqe, u64 eq_handle, u32 cq_token)
{
struct ehea_cq *cq;
- struct h_epa epa;
- u64 *cq_handle_ref, hret, rpage;
+ u64 hret, rpage;
u32 counter;
int ret;
void *vpage;
@@ -139,8 +138,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
cq->adapter = adapter;
- cq_handle_ref = &cq->fw_handle;
-
hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
&cq->fw_handle, &cq->epas);
if (hret != H_SUCCESS) {
@@ -188,7 +185,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
}
hw_qeit_reset(&cq->hw_queue);
- epa = cq->epas.kernel;
ehea_reset_cq_ep(cq);
ehea_reset_cq_n1(cq);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index dd71d5db7274..d86b0e5895a6 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -93,7 +93,7 @@ struct ibmveth_stat {
#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
-struct ibmveth_stat ibmveth_stats[] = {
+static struct ibmveth_stat ibmveth_stats[] = {
{ "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
{ "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
{ "replenish_add_buff_failure",
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ecbb1adcf3b..3dcd9c3d8781 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -118,8 +118,9 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static int ibmvnic_reset_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
+static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
+static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -848,11 +849,7 @@ static int ibmvnic_login(struct net_device *netdev)
}
} while (retry);
- /* handle pending MAC address changes after successful login */
- if (adapter->mac_change_pending) {
- __ibmvnic_set_mac(netdev, &adapter->desired.mac);
- adapter->mac_change_pending = false;
- }
+ __ibmvnic_set_mac(netdev, adapter->mac_addr);
return 0;
}
@@ -1114,7 +1111,6 @@ static int ibmvnic_open(struct net_device *netdev)
}
rc = __ibmvnic_open(netdev);
- netif_carrier_on(netdev);
return rc;
}
@@ -1685,28 +1681,40 @@ static void ibmvnic_set_multi(struct net_device *netdev)
}
}
-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
+static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
union ibmvnic_crq crq;
int rc;
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
+ if (!is_valid_ether_addr(dev_addr)) {
+ rc = -EADDRNOTAVAIL;
+ goto err;
+ }
memset(&crq, 0, sizeof(crq));
crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
- ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
+ ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
init_completion(&adapter->fw_done);
rc = ibmvnic_send_crq(adapter, &crq);
- if (rc)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto err;
+ }
+
wait_for_completion(&adapter->fw_done);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
- return adapter->fw_done_rc ? -EIO : 0;
+ if (adapter->fw_done_rc) {
+ rc = -EIO;
+ goto err;
+ }
+
+ return 0;
+err:
+ ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
+ return rc;
}
static int ibmvnic_set_mac(struct net_device *netdev, void *p)
@@ -1715,13 +1723,10 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
int rc;
- if (adapter->state == VNIC_PROBED) {
- memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
- adapter->mac_change_pending = true;
- return 0;
- }
-
- rc = __ibmvnic_set_mac(netdev, addr);
+ rc = 0;
+ ether_addr_copy(adapter->mac_addr, addr->sa_data);
+ if (adapter->state != VNIC_PROBED)
+ rc = __ibmvnic_set_mac(netdev, addr->sa_data);
return rc;
}
@@ -1858,8 +1863,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
- netif_carrier_on(netdev);
-
return 0;
}
@@ -1885,6 +1888,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_completion(&adapter->init_done);
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -1928,8 +1932,6 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
return 0;
}
- netif_carrier_on(netdev);
-
return 0;
}
@@ -1967,13 +1969,11 @@ static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
- struct net_device *netdev;
bool we_lock_rtnl = false;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
- netdev = adapter->netdev;
/* netif_set_real_num_xx_queues needs to take rtnl lock here
* unless wait_for_reset is set, in which case the rtnl lock
@@ -2278,23 +2278,20 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
static int ibmvnic_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc;
- supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL;
+ rc = send_query_phys_parms(adapter);
+ if (rc) {
+ adapter->speed = SPEED_UNKNOWN;
+ adapter->duplex = DUPLEX_UNKNOWN;
+ }
+ cmd->base.speed = adapter->speed;
+ cmd->base.duplex = adapter->duplex;
cmd->base.port = PORT_FIBRE;
cmd->base.phy_address = 0;
cmd->base.autoneg = AUTONEG_ENABLE;
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
return 0;
}
@@ -2922,8 +2919,10 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
goto req_tx_irq_failed;
}
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
+ adapter->vdev->unit_address, i);
rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
- 0, "ibmvnic_tx", scrq);
+ 0, scrq->name, scrq);
if (rc) {
dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
@@ -2943,8 +2942,10 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
dev_err(dev, "Error mapping irq\n");
goto req_rx_irq_failed;
}
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
+ adapter->vdev->unit_address, i);
rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
- 0, "ibmvnic_rx", scrq);
+ 0, scrq->name, scrq);
if (rc) {
dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
scrq->irq, rc);
@@ -3761,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+ netdev_features_t old_hw_features = 0;
union ibmvnic_crq crq;
int i;
@@ -3836,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
- adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
+ if (adapter->state != VNIC_PROBING) {
+ old_hw_features = adapter->netdev->hw_features;
+ adapter->netdev->hw_features = 0;
+ }
+
+ adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
- adapter->netdev->features |= NETIF_F_IP_CSUM;
+ adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
- adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+ adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
if ((adapter->netdev->features &
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
- adapter->netdev->features |= NETIF_F_RXCSUM;
+ adapter->netdev->hw_features |= NETIF_F_RXCSUM;
if (buf->large_tx_ipv4)
- adapter->netdev->features |= NETIF_F_TSO;
+ adapter->netdev->hw_features |= NETIF_F_TSO;
if (buf->large_tx_ipv6)
- adapter->netdev->features |= NETIF_F_TSO6;
+ adapter->netdev->hw_features |= NETIF_F_TSO6;
- adapter->netdev->hw_features |= adapter->netdev->features;
+ if (adapter->state == VNIC_PROBING) {
+ adapter->netdev->features |= adapter->netdev->hw_features;
+ } else if (old_hw_features != adapter->netdev->hw_features) {
+ netdev_features_t tmp = 0;
+
+ /* disable features no longer supported */
+ adapter->netdev->features &= adapter->netdev->hw_features;
+ /* turn on features now supported if previously enabled */
+ tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+ adapter->netdev->hw_features;
+ adapter->netdev->features |=
+ tmp & adapter->netdev->wanted_features;
+ }
memset(&crq, 0, sizeof(crq));
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
@@ -3918,8 +3937,8 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
goto out;
}
- memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
- ETH_ALEN);
+ ether_addr_copy(netdev->dev_addr,
+ &crq->change_mac_addr_rsp.mac_addr[0]);
out:
complete(&adapter->fw_done);
return rc;
@@ -4278,6 +4297,73 @@ out:
}
}
+static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+ int rc;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
+ crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
+ init_completion(&adapter->fw_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc)
+ return rc;
+ wait_for_completion(&adapter->fw_done);
+ return adapter->fw_done_rc ? -EIO : 0;
+}
+
+static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int rc;
+
+ rc = crq->query_phys_parms_rsp.rc.code;
+ if (rc) {
+ netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
+ return rc;
+ }
+ switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
+ case IBMVNIC_10MBPS:
+ adapter->speed = SPEED_10;
+ break;
+ case IBMVNIC_100MBPS:
+ adapter->speed = SPEED_100;
+ break;
+ case IBMVNIC_1GBPS:
+ adapter->speed = SPEED_1000;
+ break;
+ case IBMVNIC_10GBP:
+ adapter->speed = SPEED_10000;
+ break;
+ case IBMVNIC_25GBPS:
+ adapter->speed = SPEED_25000;
+ break;
+ case IBMVNIC_40GBPS:
+ adapter->speed = SPEED_40000;
+ break;
+ case IBMVNIC_50GBPS:
+ adapter->speed = SPEED_50000;
+ break;
+ case IBMVNIC_100GBPS:
+ adapter->speed = SPEED_100000;
+ break;
+ default:
+ netdev_warn(netdev, "Unknown speed 0x%08x\n",
+ cpu_to_be32(crq->query_phys_parms_rsp.speed));
+ adapter->speed = SPEED_UNKNOWN;
+ }
+ if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
+ adapter->duplex = DUPLEX_FULL;
+ else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
+ adapter->duplex = DUPLEX_HALF;
+ else
+ adapter->duplex = DUPLEX_UNKNOWN;
+
+ return rc;
+}
+
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -4389,6 +4475,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
crq->link_state_indication.phys_link_state;
adapter->logical_link_state =
crq->link_state_indication.logical_link_state;
+ if (adapter->phys_link_state && adapter->logical_link_state)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
break;
case CHANGE_MAC_ADDR_RSP:
netdev_dbg(netdev, "Got MAC address change Response\n");
@@ -4426,6 +4516,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case GET_VPD_RSP:
handle_vpd_rsp(crq, adapter);
break;
+ case QUERY_PHYS_PARMS_RSP:
+ adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
+ complete(&adapter->fw_done);
+ break;
default:
netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
gen_crq->cmd);
@@ -4581,8 +4675,9 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
(unsigned long)adapter);
netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
- rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
- adapter);
+ snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
+ adapter->vdev->unit_address);
+ rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
if (rc) {
dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
vdev->irq, rc);
@@ -4625,7 +4720,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
- init_completion(&adapter->init_done);
+ reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4775,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
- init_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,10 +4853,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ init_completion(&adapter->init_done);
adapter->resetting = false;
- adapter->mac_change_pending = false;
-
do {
rc = init_crq_queue(adapter);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f2018dbebfa5..dcf2eb6d9290 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -377,11 +377,16 @@ struct ibmvnic_phys_parms {
u8 flags2;
#define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
__be32 speed;
-#define IBMVNIC_AUTONEG 0x80
-#define IBMVNIC_10MBPS 0x40
-#define IBMVNIC_100MBPS 0x20
-#define IBMVNIC_1GBPS 0x10
-#define IBMVNIC_10GBPS 0x08
+#define IBMVNIC_AUTONEG 0x80000000
+#define IBMVNIC_10MBPS 0x40000000
+#define IBMVNIC_100MBPS 0x20000000
+#define IBMVNIC_1GBPS 0x10000000
+#define IBMVNIC_10GBP 0x08000000
+#define IBMVNIC_40GBPS 0x04000000
+#define IBMVNIC_100GBPS 0x02000000
+#define IBMVNIC_25GBPS 0x01000000
+#define IBMVNIC_50GBPS 0x00800000
+#define IBMVNIC_200GBPS 0x00400000
__be32 mtu;
struct ibmvnic_rc rc;
} __packed __aligned(8);
@@ -850,6 +855,7 @@ struct ibmvnic_crq_queue {
dma_addr_t msg_token;
spinlock_t lock;
bool active;
+ char name[32];
};
union sub_crq {
@@ -876,6 +882,7 @@ struct ibmvnic_sub_crq_queue {
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
atomic_t used;
+ char name[32];
};
struct ibmvnic_long_term_buff {
@@ -962,7 +969,6 @@ struct ibmvnic_tunables {
u64 rx_entries;
u64 tx_entries;
u64 mtu;
- struct sockaddr mac;
};
struct ibmvnic_adapter {
@@ -999,6 +1005,9 @@ struct ibmvnic_adapter {
int phys_link_state;
int logical_link_state;
+ u32 speed;
+ u8 duplex;
+
/* login data */
struct ibmvnic_login_buffer *login_buf;
dma_addr_t login_buf_token;
@@ -1081,7 +1090,6 @@ struct ibmvnic_adapter {
bool resetting;
bool napi_enabled, from_passive_init;
- bool mac_change_pending;
bool failover_pending;
bool force_reset_recovery;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 0fd268070fb4..a65d5a9ba7db 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2797,7 +2797,7 @@ static int e100_set_features(struct net_device *netdev,
netdev->features = features;
e100_exec_cb(nic, NULL, e100_configure);
- return 0;
+ return 1;
}
static const struct net_device_ops e100_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 8fe9af0e2ab7..551de8c2fef2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -820,7 +820,7 @@ static int e1000_set_features(struct net_device *netdev,
else
e1000_reset(adapter);
- return 0;
+ return 1;
}
static const struct net_device_ops e1000_netdev_ops = {
@@ -3267,14 +3267,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
- if (!skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
- /* we need this if more than one processor can write to
- * our tail at a time, it synchronizes IO on IA64/Altix
- * systems
- */
- mmiowb();
}
} else {
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7acc61e4f645..0e09bede42a2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3816,7 +3816,6 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0;
ew32(TDT(0), tx_ring->next_to_use);
- mmiowb();
usleep_range(200, 250);
}
@@ -5897,19 +5896,13 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2));
- if (!skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring,
tx_ring->next_to_use);
else
writel(tx_ring->next_to_use, tx_ring->tail);
-
- /* we need this if more than one processor can write
- * to our tail at a time, it synchronizes IO on
- *IA64/Altix systems
- */
- mmiowb();
}
} else {
dev_kfree_skb_any(skb);
@@ -7003,7 +6996,7 @@ static int e1000_set_features(struct net_device *netdev,
else
e1000e_reset(adapter);
- return 0;
+ return 1;
}
static const struct net_device_ops e1000e_netdev_ops = {
@@ -7350,7 +7343,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
- if (pci_dev_run_wake(pdev))
+ if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
pm_runtime_put_noidle(&pdev->dev);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 5d4f1761dc0c..8de77155f2e7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -321,8 +321,6 @@ static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
err_mask |= PCI_ERR_UNC_COMP_ABORT;
pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
-
- mmiowb();
}
int fm10k_iov_resume(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5a0419421511..90270b4a1682 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
/* create driver workqueue */
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
fm10k_driver_name);
+ if (!fm10k_workqueue)
+ return -ENOMEM;
fm10k_dbg_init();
@@ -278,7 +280,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
/* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
- pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
+ pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
@@ -1035,13 +1037,8 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return;
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 50590e8d1fd1..2f21b3e89fd0 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -21,6 +21,7 @@ i40e-objs := i40e_main.o \
i40e_diag.o \
i40e_txrx.o \
i40e_ptp.o \
+ i40e_ddp.o \
i40e_client.o \
i40e_virtchnl_pf.o \
i40e_xsk.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d684998ba2b0..7ce42040b851 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -149,6 +149,7 @@ enum i40e_state_t {
__I40E_CLIENT_L2_CHANGE,
__I40E_CLIENT_RESET,
__I40E_VIRTCHNL_OP_PENDING,
+ __I40E_RECOVERY_MODE,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
@@ -321,6 +322,29 @@ struct i40e_udp_port_config {
u8 filter_index;
};
+#define I40_DDP_FLASH_REGION 100
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+#define I40E_PROFILE_LIST_SIZE \
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)
+#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/"
+#define I40E_DDP_PROFILE_NAME_MAX 64
+
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ bool is_add);
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+
+struct i40e_ddp_profile_list {
+ u32 p_count;
+ struct i40e_profile_info p_info[0];
+};
+
+struct i40e_ddp_old_profile_list {
+ struct list_head list;
+ size_t old_ddp_size;
+ u8 old_ddp_buf[0];
+};
+
/* macros related to FLX_PIT */
#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
@@ -589,6 +613,8 @@ struct i40e_pf {
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
+ struct timespec64 ptp_prev_hw_time;
+ ktime_t ptp_reset_start;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u32 ptp_adj_mult;
u32 tx_hwtstamp_timeouts;
@@ -610,6 +636,8 @@ struct i40e_pf {
u16 override_q_count;
u16 last_sw_conf_flags;
u16 last_sw_conf_valid_flags;
+ /* List to keep previous DDP profiles to be rolled back in the future */
+ struct list_head ddp_old_prof;
};
/**
@@ -790,6 +818,8 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
+
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -1081,6 +1111,8 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_save_hw_time(struct i40e_pf *pf);
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
@@ -1096,20 +1128,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
return !!vsi->xdp_prog;
}
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
- bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
- int qid = ring->queue_index;
-
- if (ring_is_xdp(ring))
- qid -= ring->vsi->alloc_queue_pairs;
-
- if (!xdp_on)
- return NULL;
-
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
-}
-
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 7ab61f6ebb5f..243dcd4bec19 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -608,6 +608,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->aq.api_min_ver >= 7))
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 8))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
@@ -749,7 +754,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
- status = I40E_ERR_QUEUE_EMPTY;
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 11506102471c..6536023fa074 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,8 +11,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0006
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X722 0x0008
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -261,6 +261,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+ i40e_aqc_opc_lldp_restore = 0x0A0A,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
@@ -1887,6 +1888,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_25GBASE_AOC = 0x23,
I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_2_5GBASE_T = 0x30,
+ I40E_PHY_TYPE_5GBASE_T = 0x31,
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
@@ -1928,19 +1931,25 @@ enum i40e_aq_phy_type {
BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
- BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC))
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \
+ BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_5GBASE_T))
+#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
#define I40E_LINK_SPEED_25GB_SHIFT 0x6
+#define I40E_LINK_SPEED_5GB_SHIFT 0x7
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT),
+ I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT),
I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
@@ -1986,6 +1995,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
+#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40
+#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -2498,18 +2509,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
-
struct i40e_aqc_lldp_start {
u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2
u8 reserved[15];
};
@@ -2633,6 +2645,16 @@ struct i40e_aqc_lldp_stop_start_specific_agent {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+/* Restore LLDP Agent factory settings (direct 0x0A0A) */
+struct i40e_aqc_lldp_restore {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0
+#define I40E_AQ_LLDP_AGENT_RESTORE 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore);
+
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 5f3b8b9ff511..e81530ca08d0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -578,11 +578,9 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_hw *hw = &pf->hw;
struct i40e_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
- u32 size;
- size = sizeof(struct i40e_qvlist_info) +
- (sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
- ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
+ ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info,
+ qvlist_info->num_vectors - 1), GFP_KERNEL);
if (!ldev->qvlist_info)
return -ENOMEM;
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 97a9b1fb4763..ecb1adaa54ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -28,10 +28,14 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_B:
+ case I40E_DEV_ID_10G_SFP:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
+ case I40E_DEV_ID_X710_N3000:
+ case I40E_DEV_ID_XXV710_N3000:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_KX_X722:
@@ -1149,6 +1153,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
break;
case I40E_PHY_TYPE_100BASE_TX:
case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_2_5GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T:
case I40E_PHY_TYPE_10GBASE_T:
media = I40E_MEDIA_TYPE_BASET;
break;
@@ -1466,7 +1472,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
- u32 current_mode = 0;
u32 mode = 0;
int i;
@@ -1479,21 +1484,6 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
- /* ignore gpio LED src mode entries related to the activity
- * LEDs
- */
- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
- switch (current_mode) {
- case I40E_COMBINED_ACTIVITY:
- case I40E_FILTER_ACTIVITY:
- case I40E_MAC_ACTIVITY:
- case I40E_LINK_ACTIVITY:
- continue;
- default:
- break;
- }
-
mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
@@ -1513,7 +1503,6 @@ u32 i40e_led_get(struct i40e_hw *hw)
**/
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
- u32 current_mode = 0;
int i;
if (mode & 0xfffffff0)
@@ -1527,22 +1516,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
-
- /* ignore gpio LED src mode entries related to the activity
- * LEDs
- */
- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
- switch (current_mode) {
- case I40E_COMBINED_ACTIVITY:
- case I40E_FILTER_ACTIVITY:
- case I40E_MAC_ACTIVITY:
- case I40E_LINK_ACTIVITY:
- continue;
- default:
- break;
- }
-
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -3657,14 +3630,54 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
+ * i40e_aq_restore_lldp
+ * @hw: pointer to the hw struct
+ * @setting: pointer to factory setting variable or NULL
+ * @restore: True if factory settings should be restored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Restore LLDP Agent factory settings if @restore set to True. In other case
+ * only returns factory setting in AQ response.
+ **/
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_restore *cmd =
+ (struct i40e_aqc_lldp_restore *)&desc.params.raw;
+ i40e_status status;
+
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Restore LLDP not supported by current FW version.\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
+
+ if (restore)
+ cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (setting)
+ *setting = cmd->command & 1;
+
+ return status;
+}
+
+/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @persist: True if stop of LLDP should be persistent across power cycles
* @cmd_details: pointer to command details structure or NULL
*
* Stop or Shutdown the embedded LLDP Agent
**/
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -3677,6 +3690,14 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
if (shutdown_agent)
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Stop LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -3686,13 +3707,14 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
* @buff: buffer for result
+ * @persist: True if start of LLDP should be persistent across power cycles
* @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
**/
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_start *cmd =
@@ -3702,6 +3724,15 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Start LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4873,6 +4904,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
@@ -5448,6 +5480,163 @@ i40e_find_segment_in_package(u32 segment_type,
return NULL;
}
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct i40e_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset) \
+ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_I40E)
+ return NULL;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+static enum
+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+
+ i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= cpu_to_le16(aq->flags);
+ memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
+
+ return 0;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+static enum i40e_status_code
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
/**
* i40e_write_profile
* @hw: pointer to the hardware structure
@@ -5463,47 +5652,99 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
i40e_status status = 0;
struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
- u32 dev_cnt;
- u32 vendor_dev_id;
- u32 *nvm;
+ struct i40e_profile_aq_section *ddp_aq;
u32 section_size = 0;
u32 offset = 0, info = 0;
+ u32 sec_off;
u32 i;
- dev_cnt = profile->device_table_count;
+ status = i40e_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
- for (i = 0; i < dev_cnt; i++) {
- vendor_dev_id = profile->device_table[i].vendor_dev_id;
- if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
- if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+ status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
}
- if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
- }
+ return status;
+}
+
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ u32 sec_off;
+ int i;
- nvm = (u32 *)&profile->device_table[dev_cnt];
- sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+ status = i40e_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec = (struct i40e_profile_section_header *)((u8 *)profile +
- sec_tbl->section_offset[i]);
+ I40E_SECTION_TABLE(profile, sec_tbl);
- /* Skip 'AQ', 'note' and 'name' sections */
- if (sec->section.type != SECTION_TYPE_MMIO)
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
continue;
section_size = sec->section.size +
sizeof(struct i40e_profile_section_header);
- /* Write profile */
+ /* Write roll-back MMIO section */
status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
- "Failed to write profile: offset %d, info %d",
- offset, info);
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
break;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 56bff8faf371..292eeb3def10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -863,22 +863,23 @@ out:
/**
* i40e_init_dcb
* @hw: pointer to the hw struct
+ * @enable_mib_change: enable mib change event
*
* Update DCB configuration from the Firmware
**/
-i40e_status i40e_init_dcb(struct i40e_hw *hw)
+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
{
i40e_status ret = 0;
struct i40e_lldp_variables lldp_cfg;
u8 adminstatus = 0;
if (!hw->func_caps.dcb)
- return ret;
+ return I40E_NOT_SUPPORTED;
/* Read LLDP NVM area */
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
if (ret)
- return ret;
+ return I40E_ERR_NOT_READY;
/* Get the LLDP AdminStatus for the current port */
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
@@ -887,7 +888,7 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
/* LLDP agent disabled */
if (!adminstatus) {
hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
- return ret;
+ return I40E_ERR_NOT_READY;
}
/* Get DCBX status */
@@ -896,26 +897,19 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
return ret;
/* Check the DCBX Status */
- switch (hw->dcbx_status) {
- case I40E_DCBX_STATUS_DONE:
- case I40E_DCBX_STATUS_IN_PROGRESS:
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
+ hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
/* Get current DCBX configuration */
ret = i40e_get_dcb_config(hw);
if (ret)
return ret;
- break;
- case I40E_DCBX_STATUS_DISABLED:
- return ret;
- case I40E_DCBX_STATUS_NOT_STARTED:
- case I40E_DCBX_STATUS_MULTIPLE_PEERS:
- default:
- break;
+ } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ return I40E_ERR_NOT_READY;
}
/* Configure the LLDP MIB change event */
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
- if (ret)
- return ret;
+ if (enable_mib_change)
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2b748a60a843..ddb48ae7cce4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -124,5 +124,5 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
u8 bridgetype,
struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change);
#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
new file mode 100644
index 000000000000..5e08f100c413
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e.h"
+
+#include <linux/firmware.h>
+
+/**
+ * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent
+ * @a: new profile info
+ * @b: old profile info
+ *
+ * checks if DDP profiles are the equivalent.
+ * Returns true if profiles are the same.
+ **/
+static bool i40e_ddp_profiles_eq(struct i40e_profile_info *a,
+ struct i40e_profile_info *b)
+{
+ return a->track_id == b->track_id &&
+ !memcmp(&a->version, &b->version, sizeof(a->version)) &&
+ !memcmp(&a->name, &b->name, I40E_DDP_NAME_SIZE);
+}
+
+/**
+ * i40e_ddp_does_profile_exist - checks if DDP profile loaded already
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile loaded already.
+ * Returns >0 if the profile exists.
+ * Returns 0 if the profile is absent.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
+ struct i40e_profile_info *pinfo)
+{
+ struct i40e_ddp_profile_list *profile_list;
+ u8 buff[I40E_PROFILE_LIST_SIZE];
+ i40e_status status;
+ int i;
+
+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+ NULL);
+ if (status)
+ return -1;
+
+ profile_list = (struct i40e_ddp_profile_list *)buff;
+ for (i = 0; i < profile_list->p_count; i++) {
+ if (i40e_ddp_profiles_eq(pinfo, &profile_list->p_info[i]))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * i40e_ddp_profiles_overlap - checks if DDP profiles overlap.
+ * @new: new profile info
+ * @old: old profile info
+ *
+ * checks if DDP profiles overlap.
+ * Returns true if profiles are overlap.
+ **/
+static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
+ struct i40e_profile_info *old)
+{
+ unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
+ unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
+
+ /* 0x00 group must be only the first */
+ if (group_id_new == 0)
+ return true;
+ /* 0xFF group is compatible with anything else */
+ if (group_id_new == 0xFF || group_id_old == 0xFF)
+ return false;
+ /* otherwise only profiles from the same group are compatible*/
+ return group_id_old != group_id_new;
+}
+
+/**
+ * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one.
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile overlaps with existing one.
+ * Returns >0 if the profile overlaps.
+ * Returns 0 if the profile is ok.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+ struct i40e_profile_info *pinfo)
+{
+ struct i40e_ddp_profile_list *profile_list;
+ u8 buff[I40E_PROFILE_LIST_SIZE];
+ i40e_status status;
+ int i;
+
+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+ NULL);
+ if (status)
+ return -EIO;
+
+ profile_list = (struct i40e_ddp_profile_list *)buff;
+ for (i = 0; i < profile_list->p_count; i++) {
+ if (i40e_ddp_profiles_overlap(pinfo,
+ &profile_list->p_info[i]))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * i40e_add_pinfo
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+static enum i40e_status_code
+i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_profile_info *pinfo;
+ i40e_status status;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+
+ /* Clear reserved field */
+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
+
+/**
+ * i40e_del_pinfo - delete DDP profile info from NIC
+ * @hw: HW data structure
+ * @profile: DDP profile segment to be deleted
+ * @profile_info_sec: DDP profile section header
+ * @track_id: track ID of the profile for deletion
+ *
+ * Removes DDP profile from the NIC.
+ **/
+static enum i40e_status_code
+i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_profile_info *pinfo;
+ i40e_status status;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_REMOVE_TRACKID;
+
+ /* Clear reserved field */
+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
+
+/**
+ * i40e_ddp_is_pkg_hdr_valid - performs basic pkg header integrity checks
+ * @netdev: net device structure (for logging purposes)
+ * @pkg_hdr: pointer to package header
+ * @size_huge: size of the whole DDP profile package in size_t
+ *
+ * Checks correctness of pkg header: Version, size too big/small, and
+ * all segment offsets alignment and boundaries. This function lets
+ * reject non DDP profile file to be loaded by administrator mistake.
+ **/
+static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
+ struct i40e_package_header *pkg_hdr,
+ size_t size_huge)
+{
+ u32 size = 0xFFFFFFFFU & size_huge;
+ u32 pkg_hdr_size;
+ u32 segment;
+
+ if (!pkg_hdr)
+ return false;
+
+ if (pkg_hdr->version.major > 0) {
+ struct i40e_ddp_version ver = pkg_hdr->version;
+
+ netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u",
+ ver.major, ver.minor, ver.update, ver.draft);
+ return false;
+ }
+ if (size_huge > size) {
+ netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G");
+ return false;
+ }
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+ netdev_err(netdev, "Invalid DDP profile - size is too small.");
+ return false;
+ }
+
+ pkg_hdr_size = sizeof(u32) * (pkg_hdr->segment_count + 2U);
+ if (size < pkg_hdr_size) {
+ netdev_err(netdev, "Invalid DDP profile - too many segments");
+ return false;
+ }
+ for (segment = 0; segment < pkg_hdr->segment_count; ++segment) {
+ u32 offset = pkg_hdr->segment_offset[segment];
+
+ if (0xFU & offset) {
+ netdev_err(netdev,
+ "Invalid DDP profile %u segment alignment",
+ segment);
+ return false;
+ }
+ if (pkg_hdr_size > offset || offset >= size) {
+ netdev_err(netdev,
+ "Invalid DDP profile %u segment offset",
+ segment);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * i40e_ddp_load - performs DDP loading
+ * @netdev: net device structure
+ * @data: buffer containing recipe file
+ * @size: size of the buffer
+ * @is_add: true when loading profile, false when rolling back the previous one
+ *
+ * Checks correctness and loads DDP profile to the NIC. The function is
+ * also used for rolling back previously loaded profile.
+ **/
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ bool is_add)
+{
+ u8 profile_info_sec[sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info)];
+ struct i40e_metadata_segment *metadata_hdr;
+ struct i40e_profile_segment *profile_hdr;
+ struct i40e_profile_info pinfo;
+ struct i40e_package_header *pkg_hdr;
+ i40e_status status;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 track_id;
+ int istatus;
+
+ pkg_hdr = (struct i40e_package_header *)data;
+ if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
+ return -EINVAL;
+
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+ netdev_err(netdev, "Invalid DDP recipe size.");
+ return -EINVAL;
+ }
+
+ /* Find beginning of segment data in buffer */
+ metadata_hdr = (struct i40e_metadata_segment *)
+ i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr);
+ if (!metadata_hdr) {
+ netdev_err(netdev, "Failed to find metadata segment in DDP recipe.");
+ return -EINVAL;
+ }
+
+ track_id = metadata_hdr->track_id;
+ profile_hdr = (struct i40e_profile_segment *)
+ i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+ if (!profile_hdr) {
+ netdev_err(netdev, "Failed to find profile segment in DDP recipe.");
+ return -EINVAL;
+ }
+
+ pinfo.track_id = track_id;
+ pinfo.version = profile_hdr->version;
+ if (is_add)
+ pinfo.op = I40E_DDP_ADD_TRACKID;
+ else
+ pinfo.op = I40E_DDP_REMOVE_TRACKID;
+
+ memcpy(pinfo.name, profile_hdr->name, I40E_DDP_NAME_SIZE);
+
+ /* Check if profile data already exists*/
+ istatus = i40e_ddp_does_profile_exist(&pf->hw, &pinfo);
+ if (istatus < 0) {
+ netdev_err(netdev, "Failed to fetch loaded profiles.");
+ return istatus;
+ }
+ if (is_add) {
+ if (istatus > 0) {
+ netdev_err(netdev, "DDP profile already loaded.");
+ return -EINVAL;
+ }
+ istatus = i40e_ddp_does_profile_overlap(&pf->hw, &pinfo);
+ if (istatus < 0) {
+ netdev_err(netdev, "Failed to fetch loaded profiles.");
+ return istatus;
+ }
+ if (istatus > 0) {
+ netdev_err(netdev, "DDP profile overlaps with existing one.");
+ return -EINVAL;
+ }
+ } else {
+ if (istatus == 0) {
+ netdev_err(netdev,
+ "DDP profile for deletion does not exist.");
+ return -EINVAL;
+ }
+ }
+
+ /* Load profile data */
+ if (is_add) {
+ status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
+ if (status) {
+ if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) {
+ netdev_err(netdev,
+ "Profile is not supported by the device.");
+ return -EPERM;
+ }
+ netdev_err(netdev, "Failed to write DDP profile.");
+ return -EIO;
+ }
+ } else {
+ status = i40e_rollback_profile(&pf->hw, profile_hdr, track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to remove DDP profile.");
+ return -EIO;
+ }
+ }
+
+ /* Add/remove profile to/from profile list in FW */
+ if (is_add) {
+ status = i40e_add_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+ track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to add DDP profile info.");
+ return -EIO;
+ }
+ } else {
+ status = i40e_del_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+ track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to restore DDP profile info.");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_ddp_restore - restore previously loaded profile and remove from list
+ * @pf: PF data struct
+ *
+ * Restores previously loaded profile stored on the list in driver memory.
+ * After rolling back removes entry from the list.
+ **/
+static int i40e_ddp_restore(struct i40e_pf *pf)
+{
+ struct i40e_ddp_old_profile_list *entry;
+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+ int status = 0;
+
+ if (!list_empty(&pf->ddp_old_prof)) {
+ entry = list_first_entry(&pf->ddp_old_prof,
+ struct i40e_ddp_old_profile_list,
+ list);
+ status = i40e_ddp_load(netdev, entry->old_ddp_buf,
+ entry->old_ddp_size, false);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ return status;
+}
+
+/**
+ * i40e_ddp_flash - callback function for ethtool flash feature
+ * @netdev: net device structure
+ * @flash: kernel flash structure
+ *
+ * Ethtool callback function used for loading and unloading DDP profiles.
+ **/
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash)
+{
+ const struct firmware *ddp_config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int status = 0;
+
+ /* Check for valid region first */
+ if (flash->region != I40_DDP_FLASH_REGION) {
+ netdev_err(netdev, "Requested firmware region is not recognized by this driver.");
+ return -EINVAL;
+ }
+ if (pf->hw.bus.func != 0) {
+ netdev_err(netdev, "Any DDP operation is allowed only on Phy0 NIC interface");
+ return -EINVAL;
+ }
+
+ /* If the user supplied "-" instead of file name rollback previously
+ * stored profile.
+ */
+ if (strncmp(flash->data, "-", 2) != 0) {
+ struct i40e_ddp_old_profile_list *list_entry;
+ char profile_name[sizeof(I40E_DDP_PROFILE_PATH)
+ + I40E_DDP_PROFILE_NAME_MAX];
+
+ profile_name[sizeof(profile_name) - 1] = 0;
+ strncpy(profile_name, I40E_DDP_PROFILE_PATH,
+ sizeof(profile_name) - 1);
+ strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX);
+ /* Load DDP recipe. */
+ status = request_firmware(&ddp_config, profile_name,
+ &netdev->dev);
+ if (status) {
+ netdev_err(netdev, "DDP recipe file request failed.");
+ return status;
+ }
+
+ status = i40e_ddp_load(netdev, ddp_config->data,
+ ddp_config->size, true);
+
+ if (!status) {
+ list_entry =
+ kzalloc(sizeof(struct i40e_ddp_old_profile_list) +
+ ddp_config->size, GFP_KERNEL);
+ if (!list_entry) {
+ netdev_info(netdev, "Failed to allocate memory for previous DDP profile data.");
+ netdev_info(netdev, "New profile loaded but roll-back will be impossible.");
+ } else {
+ memcpy(list_entry->old_ddp_buf,
+ ddp_config->data, ddp_config->size);
+ list_entry->old_ddp_size = ddp_config->size;
+ list_add(&list_entry->list, &pf->ddp_old_prof);
+ }
+ }
+
+ release_firmware(ddp_config);
+ } else {
+ if (!list_empty(&pf->ddp_old_prof)) {
+ status = i40e_ddp_restore(pf);
+ } else {
+ netdev_warn(netdev, "There is no DDP profile to restore.");
+ status = -ENOENT;
+ }
+ }
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index c67d485d6f99..7ea4f09229e4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1321,7 +1321,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
- ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
+ ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Stop LLDP AQ command failed =0x%x\n",
@@ -1358,7 +1358,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
/* Continue and start FW LLDP anyways */
}
- ret = i40e_aq_start_lldp(&pf->hw, NULL);
+ ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Start LLDP AQ command failed =0x%x\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 334b05ff685a..bac4da031f9b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -5,6 +5,8 @@
#define _I40E_DEVIDS_H_
/* Device IDs */
+#define I40E_DEV_ID_X710_N3000 0x0CF8
+#define I40E_DEV_ID_XXV710_N3000 0x0D58
#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_B 0x1580
@@ -18,6 +20,9 @@
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_25G_B 0x158A
#define I40E_DEV_ID_25G_SFP28 0x158B
+#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
+#define I40E_DEV_ID_10G_B 0x104F
+#define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4c885801fa26..7545b21bee3c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -508,6 +508,20 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
}
+ if (phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_5GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ }
if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
@@ -535,17 +549,23 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
}
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
+ }
if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseLR4_Full);
+ 40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseLR4_Full);
+ 40000baseKR4_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -668,13 +688,15 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_5GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
@@ -720,14 +742,20 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_40GBASE_AOC:
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
break;
case I40E_PHY_TYPE_40GBASE_SR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
break;
case I40E_PHY_TYPE_40GBASE_LR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
break;
case I40E_PHY_TYPE_25GBASE_SR:
case I40E_PHY_TYPE_25GBASE_LR:
@@ -778,12 +806,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
10000baseT_Full);
break;
case I40E_PHY_TYPE_10GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T:
+ case I40E_PHY_TYPE_2_5GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_100BASE_TX:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
@@ -791,6 +825,12 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
@@ -946,6 +986,12 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_LINK_SPEED_10GB:
ks->base.speed = SPEED_10000;
break;
+ case I40E_LINK_SPEED_5GB:
+ ks->base.speed = SPEED_5000;
+ break;
+ case I40E_LINK_SPEED_2_5GB:
+ ks->base.speed = SPEED_2500;
+ break;
case I40E_LINK_SPEED_1GB:
ks->base.speed = SPEED_1000;
break;
@@ -1033,6 +1079,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
break;
case I40E_MEDIA_TYPE_FIBER:
ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
ks->base.port = PORT_FIBRE;
break;
case I40E_MEDIA_TYPE_UNKNOWN:
@@ -1231,6 +1278,12 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
10000baseLR_Full))
config.link_speed |= I40E_LINK_SPEED_10GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full))
+ config.link_speed |= I40E_LINK_SPEED_2_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 5000baseT_Full))
+ config.link_speed |= I40E_LINK_SPEED_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
20000baseKR2_Full))
config.link_speed |= I40E_LINK_SPEED_20GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
@@ -2573,8 +2626,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
/* only magic packet is supported */
- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
- | (wol->wolopts != WAKE_FILTER))
+ if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
/* is this a new value? */
@@ -4946,7 +4998,7 @@ flags_complete:
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
struct i40e_dcbx_config *dcbcfg;
- i40e_aq_stop_lldp(&pf->hw, true, NULL);
+ i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
/* reset local_dcbx_config to default */
dcbcfg = &pf->hw.local_dcbx_config;
@@ -4961,7 +5013,7 @@ flags_complete:
dcbcfg->pfc.willing = 1;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
} else {
- i40e_aq_start_lldp(&pf->hw, NULL);
+ i40e_aq_start_lldp(&pf->hw, false, NULL);
}
}
@@ -5129,6 +5181,12 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
+ .set_eeprom = i40e_set_eeprom,
+ .get_eeprom_len = i40e_get_eeprom_len,
+ .get_eeprom = i40e_get_eeprom,
+};
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
@@ -5172,9 +5230,16 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_link_ksettings = i40e_set_link_ksettings,
.get_fecparam = i40e_get_fec_param,
.set_fecparam = i40e_set_fec_param,
+ .flash_device = i40e_ddp_flash,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &i40e_ethtool_ops;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ netdev->ethtool_ops = &i40e_ethtool_ops;
+ else
+ netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da62218eb70a..320562b39686 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -27,7 +27,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 2
#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 10
+#define DRV_VERSION_BUILD 20
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -46,6 +46,10 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
+static bool i40e_check_recovery_mode(struct i40e_pf *pf);
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
static int i40e_get_capabilities(struct i40e_pf *pf,
@@ -69,6 +73,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
@@ -77,6 +83,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
/* required last entry */
@@ -278,8 +286,9 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
**/
void i40e_service_event_schedule(struct i40e_pf *pf)
{
- if (!test_bit(__I40E_DOWN, pf->state) &&
- !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ if ((!test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
+ test_bit(__I40E_RECOVERY_MODE, pf->state))
queue_work(i40e_wq, &pf->service_task);
}
@@ -2107,11 +2116,22 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
- set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
- dev_warn(&vsi->back->pdev->dev,
- "Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err),
- vsi_name);
+ if (vsi->type == I40E_VSI_MAIN) {
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err), vsi_name);
+ } else if (vsi->type == I40E_VSI_SRIOV ||
+ vsi->type == I40E_VSI_VMDQ1 ||
+ vsi->type == I40E_VSI_VMDQ2) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
+ i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
+ } else {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
+ i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
+ }
}
}
@@ -2654,6 +2674,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
struct i40e_vsi_context ctxt;
i40e_status ret;
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
if ((vsi->info.valid_sections &
cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
@@ -2684,6 +2708,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
struct i40e_vsi_context ctxt;
i40e_status ret;
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
if ((vsi->info.valid_sections &
cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
@@ -2949,9 +2977,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
- i40e_vlan_stripping_disable(vsi);
-
vsi->info.pvid = 0;
+
+ i40e_vlan_stripping_disable(vsi);
}
/**
@@ -3064,6 +3092,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+ bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+ int qid = ring->queue_index;
+
+ if (ring_is_xdp(ring))
+ qid -= ring->vsi->alloc_queue_pairs;
+
+ if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
+/**
* i40e_configure_tx_ring - Configure a transmit ring context and rest
* @ring: The Tx ring to configure
*
@@ -3980,7 +4028,8 @@ static irqreturn_t i40e_intr(int irq, void *data)
enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- if (!test_bit(__I40E_DOWN, pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_RECOVERY_MODE, pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf);
}
@@ -6383,7 +6432,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
goto out;
/* Get the initial DCB configuration */
- err = i40e_init_dcb(hw);
+ err = i40e_init_dcb(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
if ((!hw->func_caps.dcb) ||
@@ -6473,6 +6522,12 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_10GB:
speed = "10 G";
break;
+ case I40E_LINK_SPEED_5GB:
+ speed = "5 G";
+ break;
+ case I40E_LINK_SPEED_2_5GB:
+ speed = "2.5 G";
+ break;
case I40E_LINK_SPEED_1GB:
speed = "1000 M";
break;
@@ -6826,10 +6881,12 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
struct i40e_pf *pf = vsi->back;
u8 enabled_tc = 0, num_tc, hw;
bool need_reset = false;
+ int old_queue_pairs;
int ret = -EINVAL;
u16 mode;
int i;
+ old_queue_pairs = vsi->num_queue_pairs;
num_tc = mqprio_qopt->qopt.num_tc;
hw = mqprio_qopt->qopt.hw;
mode = mqprio_qopt->mode;
@@ -6930,6 +6987,7 @@ config_tc:
}
ret = i40e_configure_queue_channels(vsi);
if (ret) {
+ vsi->num_queue_pairs = old_queue_pairs;
netdev_info(netdev,
"Failed configuring queue channels\n");
need_reset = true;
@@ -9270,6 +9328,11 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
dev_warn(&pf->pdev->dev,
"shutdown_lan_hmc failed: %d\n", ret);
}
+
+ /* Save the current PTP time so that we can restore the time after the
+ * reset completes.
+ */
+ i40e_ptp_save_hw_time(pf);
}
/**
@@ -9362,6 +9425,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
+ int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
u8 set_fc_aq_fail = 0;
@@ -9369,7 +9433,14 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
u32 val;
int v;
- if (test_bit(__I40E_DOWN, pf->state))
+ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
+ i40e_check_recovery_mode(pf)) {
+ i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
+ }
+
+ if (test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !old_recovery_mode_bit)
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -9398,6 +9469,44 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
i40e_verify_eeprom(pf);
+ /* if we are going out of or into recovery mode we have to act
+ * accordingly with regard to resources initialization
+ * and deinitialization
+ */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
+ old_recovery_mode_bit) {
+ if (i40e_get_capabilities(pf,
+ i40e_aqc_opc_list_func_capabilities))
+ goto end_unlock;
+
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ /* we're staying in recovery mode so we'll reinitialize
+ * misc vector here
+ */
+ if (i40e_setup_misc_vector_for_recovery_mode(pf))
+ goto end_unlock;
+ } else {
+ if (!lock_acquired)
+ rtnl_lock();
+ /* we're going out of recovery mode so we'll free
+ * the IRQ allocated specifically for recovery mode
+ * and restore the interrupt scheme
+ */
+ free_irq(pf->pdev->irq, pf);
+ i40e_clear_interrupt_scheme(pf);
+ if (i40e_restore_interrupt_scheme(pf))
+ goto end_unlock;
+ }
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* bail out in case recovery mode was detected, as there is
+ * no need for further configuration.
+ */
+ goto end_unlock;
+ }
+
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (ret)
@@ -9649,7 +9758,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
bool mdd_detected = false;
- bool pf_mdd_detected = false;
struct i40e_vf *vf;
u32 reg;
int i;
@@ -9695,19 +9803,12 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_PF_MDET_TX);
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
- dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
+ dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
}
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
- dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
- /* Queue belongs to the PF, initiate a reset */
- if (pf_mdd_detected) {
- set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
- i40e_service_event_schedule(pf);
+ dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
}
}
@@ -9720,6 +9821,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
@@ -9728,11 +9832,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
- }
-
- if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
- dev_info(&pf->pdev->dev,
- "Too many MDD events on VF %d, disabled\n", i);
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
@@ -9859,31 +9958,38 @@ static void i40e_service_task(struct work_struct *work)
unsigned long start_time = jiffies;
/* don't bother with service tasks if a reset is in progress */
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_SUSPENDED, pf->state))
return;
if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
- i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
- i40e_sync_filters_subtask(pf);
- i40e_reset_subtask(pf);
- i40e_handle_mdd_event(pf);
- i40e_vc_process_vflr_event(pf);
- i40e_watchdog_subtask(pf);
- i40e_fdir_reinit_subtask(pf);
- if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
- /* Client subtask will reopen next time through. */
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
+ i40e_sync_filters_subtask(pf);
+ i40e_reset_subtask(pf);
+ i40e_handle_mdd_event(pf);
+ i40e_vc_process_vflr_event(pf);
+ i40e_watchdog_subtask(pf);
+ i40e_fdir_reinit_subtask(pf);
+ if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
+ /* Client subtask will reopen next time through. */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
+ true);
+ } else {
+ i40e_client_subtask(pf);
+ if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
+ pf->state))
+ i40e_notify_client_of_l2_param_changes(
+ pf->vsi[pf->lan_vsi]);
+ }
+ i40e_sync_filters_subtask(pf);
+ i40e_sync_udp_filters_subtask(pf);
} else {
- i40e_client_subtask(pf);
- if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
- pf->state))
- i40e_notify_client_of_l2_param_changes(
- pf->vsi[pf->lan_vsi]);
- }
- i40e_sync_filters_subtask(pf);
- i40e_sync_udp_filters_subtask(pf);
+ i40e_reset_subtask(pf);
+ }
+
i40e_clean_adminq_subtask(pf);
/* flush memory to make sure state is correct before next watchdog */
@@ -10064,6 +10170,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
+ if (type == I40E_VSI_MAIN) {
+ vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_rings;
+ }
+
ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
goto err_rings;
@@ -10082,6 +10194,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
goto unlock_pf;
err_rings:
+ bitmap_free(vsi->af_xdp_zc_qps);
pf->next_vsi = i - 1;
kfree(vsi);
unlock_pf:
@@ -10162,6 +10275,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ bitmap_free(vsi->af_xdp_zc_qps);
i40e_vsi_free_arrays(vsi, true);
i40e_clear_rss_config_user(vsi);
@@ -10698,6 +10812,48 @@ err_unwind:
}
/**
+ * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
+ * non queue events in recovery mode
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
+ * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
+ * This is handled differently than in recovery mode since no Tx/Rx resources
+ * are being allocated.
+ **/
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
+{
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_setup_misc_vector(pf);
+
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI-X misc vector request failed, error %d\n",
+ err);
+ return err;
+ }
+ } else {
+ u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
+
+ err = request_irq(pf->pdev->irq, i40e_intr, flags,
+ pf->int_name, pf);
+
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI/legacy misc vector request failed, error %d\n",
+ err);
+ return err;
+ }
+ i40e_enable_misc_int_causes(pf);
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return 0;
+}
+
+/**
* i40e_setup_misc_vector - Setup the misc vector to handle non queue events
* @pf: board private structure
*
@@ -13860,6 +14016,125 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
}
/**
+ * i40e_check_recovery_mode - check if we are running transition firmware
+ * @pf: board private structure
+ *
+ * Check registers indicating the firmware runs in recovery mode. Sets the
+ * appropriate driver state.
+ *
+ * Returns true if the recovery mode was detected, false otherwise
+ **/
+static bool i40e_check_recovery_mode(struct i40e_pf *pf)
+{
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
+
+ if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
+ dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+ set_bit(__I40E_RECOVERY_MODE, pf->state);
+
+ return true;
+ }
+ if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
+ dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
+
+ return false;
+}
+
+/**
+ * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
+ * @pf: board private structure
+ * @hw: ptr to the hardware info
+ *
+ * This function does a minimal setup of all subsystems needed for running
+ * recovery mode.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+{
+ struct i40e_vsi *vsi;
+ int err;
+ int v_idx;
+
+ pci_save_state(pf->pdev);
+
+ /* set up periodic task facility */
+ timer_setup(&pf->service_timer, i40e_service_timer, 0);
+ pf->service_timer_period = HZ;
+
+ INIT_WORK(&pf->service_task, i40e_service_task);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
+
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ goto err_switch_setup;
+
+ /* The number of VSIs reported by the FW is the minimum guaranteed
+ * to us; HW supports far more and we share the remaining pool with
+ * the other PFs. We allocate space for more than the guarantee with
+ * the understanding that we might not get them all later.
+ */
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+ else
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+ /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
+ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
+ GFP_KERNEL);
+ if (!pf->vsi) {
+ err = -ENOMEM;
+ goto err_switch_setup;
+ }
+
+ /* We allocate one VSI which is needed as absolute minimum
+ * in order to register the netdev
+ */
+ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
+ if (v_idx < 0)
+ goto err_switch_setup;
+ pf->lan_vsi = v_idx;
+ vsi = pf->vsi[v_idx];
+ if (!vsi)
+ goto err_switch_setup;
+ vsi->alloc_queue_pairs = 1;
+ err = i40e_config_netdev(vsi);
+ if (err)
+ goto err_switch_setup;
+ err = register_netdev(vsi->netdev);
+ if (err)
+ goto err_switch_setup;
+ vsi->netdev_registered = true;
+ i40e_dbg_pf_init(pf);
+
+ err = i40e_setup_misc_vector_for_recovery_mode(pf);
+ if (err)
+ goto err_switch_setup;
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* since everything's happy, start the service_task timer */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
+ del_timer_sync(&pf->service_timer);
+ i40e_shutdown_adminq(hw);
+ iounmap(hw->hw_addr);
+ pci_disable_pcie_error_reporting(pf->pdev);
+ pci_release_mem_regions(pf->pdev);
+ pci_disable_device(pf->pdev);
+ kfree(pf);
+
+ return err;
+}
+
+/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
@@ -13956,6 +14231,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&pf->l3_flex_pit_list);
INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+ INIT_LIST_HEAD(&pf->ddp_old_prof);
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
@@ -13983,13 +14259,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Reset here to make sure all is clean and to define PF 'n' */
i40e_clear_hw(hw);
- err = i40e_pf_reset(hw);
- if (err) {
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
- goto err_pf_reset;
+ if (!i40e_check_recovery_mode(pf)) {
+ err = i40e_pf_reset(hw);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
+ }
+ pf->pfr_count++;
}
- pf->pfr_count++;
-
hw->aq.num_arq_entries = I40E_AQ_LEN;
hw->aq.num_asq_entries = I40E_AQ_LEN;
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
@@ -14014,7 +14291,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
if (err == I40E_ERR_FIRMWARE_API_VERSION)
dev_info(&pdev->dev,
- "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+ "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
else
dev_info(&pdev->dev,
"The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
@@ -14023,19 +14304,28 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_oem_version(hw);
- /* provide nvm, fw, api versions */
- dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
+ /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
+ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
- i40e_nvm_version_str(hw));
+ i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
+ hw->subsystem_vendor_id, hw->subsystem_device_id);
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
dev_info(&pdev->dev,
- "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
dev_info(&pdev->dev,
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
i40e_verify_eeprom(pf);
@@ -14044,6 +14334,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
i40e_clear_pxe_mode(hw);
+
err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (err)
goto err_adminq_setup;
@@ -14054,6 +14345,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_sw_init;
}
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+ return i40e_init_recovery_mode(pf, hw);
+
err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (err) {
@@ -14074,7 +14368,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
- i40e_aq_stop_lldp(hw, true, NULL);
+ i40e_aq_stop_lldp(hw, true, false, NULL);
}
/* allow a platform config to override the HW addr */
@@ -14439,6 +14733,19 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->service_task.func)
cancel_work_sync(&pf->service_task);
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ struct i40e_vsi *vsi = pf->vsi[0];
+
+ /* We know that we have allocated only one vsi for this PF,
+ * it was just for registering netdevice, so the interface
+ * could be visible in the 'ifconfig' output
+ */
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+
+ goto unmap;
+ }
+
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
@@ -14488,6 +14795,12 @@ static void i40e_remove(struct pci_dev *pdev)
ret_code);
}
+unmap:
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ free_irq(pf->pdev->irq, pf);
+
/* shutdown the adminq */
i40e_shutdown_adminq(hw);
@@ -14500,7 +14813,8 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_clear_interrupt_scheme(pf);
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
- i40e_vsi_clear_rings(pf->vsi[i]);
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ i40e_vsi_clear_rings(pf->vsi[i]);
i40e_vsi_clear(pf->vsi[i]);
pf->vsi[i] = NULL;
}
@@ -14708,6 +15022,11 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_WUFC,
(pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ free_irq(pf->pdev->irq, pf);
+
/* Since we're going to destroy queues during the
* i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
* whole section
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 0299e5bbb902..c508b75c3c09 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -578,11 +578,10 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
__le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- if (!ret_code) {
- le_sum = cpu_to_le16(checksum);
+ le_sum = cpu_to_le16(checksum);
+ if (!ret_code)
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &le_sum, true);
- }
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index e08d754824b1..882627073dce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -203,14 +203,18 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
bool dcb_enable,
struct i40e_asq_cmd_details
*cmd_details);
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
@@ -429,10 +433,16 @@ i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile);
enum i40e_status_code
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5fb4353c742b..439c35f0c581 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now;
+ struct timespec64 now, then;
+ then = ns_to_timespec64(delta);
mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now, NULL);
- timespec64_add_ns(&now, delta);
+ now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
mutex_unlock(&pf->tmreg_lock);
@@ -724,16 +725,68 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ /* Set the previous "reset" time to the current Kernel clock time */
+ pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real());
+ pf->ptp_reset_start = ktime_get();
+
return 0;
}
/**
+ * i40e_ptp_save_hw_time - Save the current PTP time as ptp_prev_hw_time
+ * @pf: Board private structure
+ *
+ * Read the current PTP time and save it into pf->ptp_prev_hw_time. This should
+ * be called at the end of preparing to reset, just before hardware reset
+ * occurs, in order to preserve the PTP time as close as possible across
+ * resets.
+ */
+void i40e_ptp_save_hw_time(struct i40e_pf *pf)
+{
+ /* don't try to access the PTP clock if it's not enabled */
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return;
+
+ i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL);
+ /* Get a monotonic starting time for this reset */
+ pf->ptp_reset_start = ktime_get();
+}
+
+/**
+ * i40e_ptp_restore_hw_time - Restore the ptp_prev_hw_time + delta to PTP regs
+ * @pf: Board private structure
+ *
+ * Restore the PTP hardware clock registers. We previously cached the PTP
+ * hardware time as pf->ptp_prev_hw_time. To be as accurate as possible,
+ * update this value based on the time delta since the time was saved, using
+ * CLOCK_MONOTONIC (via ktime_get()) to calculate the time difference.
+ *
+ * This ensures that the hardware clock is restored to nearly what it should
+ * have been if a reset had not occurred.
+ */
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
+{
+ ktime_t delta = ktime_sub(ktime_get(), pf->ptp_reset_start);
+
+ /* Update the previous HW time with the ktime delta */
+ timespec64_add_ns(&pf->ptp_prev_hw_time, ktime_to_ns(delta));
+
+ /* Restore the hardware clock registers */
+ i40e_ptp_settime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
+}
+
+/**
* i40e_ptp_init - Initialize the 1588 support after device probe or reset
* @pf: Board private structure
*
* This function sets device up for 1588 support. The first time it is run, it
* will create a PHC clock device. It does not create a clock device if one
* already exists. It also reconfigures the device after a reset.
+ *
+ * The first time a clock is created, i40e_ptp_create_clock will set
+ * pf->ptp_prev_hw_time to the current system time. During resets, it is
+ * expected that this timespec will be set to the last known PTP clock time,
+ * in order to preserve the clock time as close as possible across a reset.
**/
void i40e_ptp_init(struct i40e_pf *pf)
{
@@ -765,7 +818,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
} else if (pf->ptp_clock) {
- struct timespec64 ts;
u32 regval;
if (pf->hw.debug_mask & I40E_DEBUG_LAN)
@@ -786,9 +838,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
/* reset timestamping mode */
i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
- /* Set the clock value. */
- ts = ktime_to_timespec64(ktime_get_real());
- i40e_ptp_settime(&pf->ptp_caps, &ts);
+ /* Restore the clock time based on last known value */
+ i40e_ptp_restore_hw_time(pf);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6c97667d20ef..20a283702c9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2035,7 +2035,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > I40E_RX_HDR_SIZE)
- headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, xdp->data,
+ I40E_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), xdp->data,
@@ -3469,13 +3470,8 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 2781ab91ca82..8f43aa47c263 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -252,6 +252,12 @@ struct i40e_phy_info {
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
I40E_PHY_TYPE_OFFSET)
+/* Offset for 2.5G/5G PHY Types value to bit number conversion */
+#define I40E_PHY_TYPE_OFFSET2 (-10)
+#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
+#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
@@ -616,6 +622,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
+#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
u64 flags;
/* Used in set switch config AQ command */
@@ -1527,6 +1534,8 @@ struct i40e_generic_seg_header {
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_RDONLY 0
+#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
u32 track_id;
char name[I40E_DDP_NAME_SIZE];
};
@@ -1555,15 +1564,36 @@ struct i40e_profile_section_header {
struct {
#define SECTION_TYPE_INFO 0x00000010
#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
#define SECTION_TYPE_NOTE 0x80000000
#define SECTION_TYPE_NAME 0x80000001
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
u32 type;
u32 offset;
u32 size;
} section;
};
+struct i40e_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
struct i40e_profile_info {
u32 track_id;
struct i40e_ddp_version version;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 831d52bc3c9a..479bc60c8f71 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -181,7 +181,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
* check for the valid queue id
**/
static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
- u8 qid)
+ u16 qid)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
@@ -196,7 +196,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
*
* check for the valid vector id
**/
-static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
{
struct i40e_pf *pf = vf->pf;
@@ -441,14 +441,28 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf, size;
+ int ret = 0;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
+ if (qvlist_info->num_vectors > msix_vf) {
+ dev_warn(&pf->pdev->dev,
+ "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
+ qvlist_info->num_vectors,
+ msix_vf);
+ ret = -EINVAL;
+ goto err_out;
+ }
size = sizeof(struct virtchnl_iwarp_qvlist_info) +
(sizeof(struct virtchnl_iwarp_qv_info) *
(qvlist_info->num_vectors - 1));
+ kfree(vf->qvlist_info);
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
- if (!vf->qvlist_info)
- return -ENOMEM;
-
+ if (!vf->qvlist_info) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -459,8 +473,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this vf */
- if (!i40e_vc_isvalid_vector_id(vf, v_idx))
- goto err;
+ if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
+ ret = -EINVAL;
+ goto err_free;
+ }
vf->qvlist_info->qv_info[i] = *qv_info;
@@ -502,10 +518,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
}
return 0;
-err:
+err_free:
kfree(vf->qvlist_info);
vf->qvlist_info = NULL;
- return -EINVAL;
+err_out:
+ return ret;
}
/**
@@ -1112,15 +1129,6 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
return I40E_ERR_PARAM;
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (allmulti || alluni)) {
- dev_err(&pf->pdev->dev,
- "Unprivileged VF %d is attempting to configure promiscuous mode\n",
- vf->vf_id);
- /* Lie to the VF on purpose. */
- return 0;
- }
-
if (vf->port_vlan_id) {
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
allmulti,
@@ -1997,8 +2005,31 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
bool allmulti = false;
bool alluni = false;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
- return I40E_ERR_PARAM;
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+
+ /* Lie to the VF on purpose, because this is an error we can
+ * ignore. Unprivileged VF is not a virtual channel error.
+ */
+ aq_ret = 0;
+ goto err_out;
+ }
+
+ if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
/* Multicast promiscuous handling*/
if (info->flags & FLAG_VF_MULTICAST_PROMISC)
@@ -2032,7 +2063,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
}
}
-
+err_out:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
@@ -2054,17 +2085,16 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_pair_info *qpi;
struct i40e_pf *pf = vf->pf;
u16 vsi_id, vsi_queue_id = 0;
+ u16 num_qps_all = 0;
i40e_status aq_ret = 0;
int i, j = 0, idx = 0;
- vsi_id = qci->vsi_id;
-
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2074,10 +2104,27 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
+ if (vf->adq_enabled) {
+ for (i = 0; i < I40E_MAX_VF_VSI; i++)
+ num_qps_all += vf->ch[i].num_qps;
+ if (num_qps_all != qci->num_queue_pairs) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ vsi_id = qci->vsi_id;
+
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (!vf->adq_enabled) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ qpi->txq.queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
vsi_queue_id = qpi->txq.queue_id;
if (qpi->txq.vsi_id != qci->vsi_id ||
@@ -2088,10 +2135,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
+ if (vf->adq_enabled)
+ vsi_id = vf->ch[idx].vsi_id;
if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
&qpi->rxq) ||
@@ -2115,7 +2160,6 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
j++;
vsi_queue_id++;
}
- vsi_id = vf->ch[idx].vsi_id;
}
}
/* set vsi num_queue_pairs in use to num configured by VF */
@@ -2174,7 +2218,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
- u16 vsi_id, vector_id;
+ u16 vsi_id;
i40e_status aq_ret = 0;
int i;
@@ -2183,16 +2227,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
+ if (irqmap_info->num_vectors >
+ vf->pf->hw.func_caps.num_msix_vectors_vf) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
for (i = 0; i < irqmap_info->num_vectors; i++) {
map = &irqmap_info->vecmap[i];
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
/* validate msg params */
- if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+ vsi_id = map->vsi_id;
if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
aq_ret = I40E_ERR_PARAM;
@@ -2340,7 +2389,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
+ vqs->rx_queues > I40E_MAX_VF_QUEUES ||
+ vqs->tx_queues > I40E_MAX_VF_QUEUES) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2454,8 +2505,10 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
-/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program
+ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
+ */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
#define I40E_VC_MAX_VLAN_PER_VF 8
/**
@@ -2764,7 +2817,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
- aq_ret = I40E_ERR_PARAM;
+ if (vfl->num_elements > 1 || vfl->vlan_id[0])
+ aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -3126,7 +3180,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
}
if (mask.dst_port & data.dst_port) {
- if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
+ if (!data.dst_port) {
dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
vf->vf_id);
goto err;
@@ -3134,7 +3188,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
}
if (mask.src_port & data.src_port) {
- if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
+ if (!data.src_port) {
dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
vf->vf_id);
goto err;
@@ -3374,7 +3428,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
if (!vf->adq_enabled) {
@@ -3382,7 +3436,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: ADq is not enabled, can't apply cloud filter\n",
vf->vf_id);
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
if (i40e_validate_cloud_filter(vf, vcf)) {
@@ -3390,7 +3444,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: Invalid input/s, can't apply cloud filter\n",
vf->vf_id);
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
@@ -3451,13 +3505,17 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
vf->vf_id, i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- goto err;
+ goto err_free;
}
INIT_HLIST_NODE(&cfilter->cloud_node);
hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
+ /* release the pointer passing it to the collection */
+ cfilter = NULL;
vf->num_cloud_filters++;
-err:
+err_free:
+ kfree(cfilter);
+err_out:
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
aq_ret);
}
@@ -4009,6 +4067,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
{
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ bool allmulti = false, alluni = false;
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
@@ -4093,6 +4152,15 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* disable promisc modes in case they were enabled */
+ ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
+ allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
+ goto error_pvid;
+ }
+
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
@@ -4119,6 +4187,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+ alluni = true;
+
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+ allmulti = true;
+
/* Schedule the worker thread to take care of applying changes */
i40e_service_event_schedule(vsi->back);
@@ -4131,6 +4205,13 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
* default LAN MAC address.
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+ ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
+ goto error_pvid;
+ }
+
ret = 0;
error_pvid:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index f9621026beef..f65cc0c16550 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -17,6 +17,8 @@
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0xE000
+#define I40E_MAX_VF_PROMISC_FLAGS 3
+
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b5c182e688e3..1b17486543ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err;
}
+ clear_bit(qid, vsi->af_xdp_zc_qps);
i40e_xsk_umem_dma_unmap(vsi, umem);
if (if_running) {
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
index af4f94a6541e..e5ae4a1c0cff 100644
--- a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
@@ -14,7 +14,7 @@
#define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 9b4d7cec2e18..06d1509d57f7 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1315,7 +1315,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IAVF_RX_HDR_SIZE)
- headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
@@ -2358,13 +2358,8 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index e5d6f684437e..2d140ba83781 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,3 +17,4 @@ ice-y := ice_main.o \
ice_txrx.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
+ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 89440775aea1..792e6e42030e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,6 +34,7 @@
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
+#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
@@ -42,10 +43,21 @@
extern const char ice_drv_ver[];
#define ICE_BAR0 0
-#define ICE_DFLT_NUM_DESC 128
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
#define ICE_MAX_NUM_DESC 8160
+/* set default number of Rx/Tx descriptors to the minimum between
+ * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
+ */
+#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(union ice_32byte_rx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(struct ice_tx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
@@ -71,6 +83,8 @@ extern const char ice_drv_ver[];
#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4
+#define ICE_NONQ_VECS_VF 1
+#define ICE_MAX_SCATTER_QS_PER_VF 16
#define ICE_MAX_BASE_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
@@ -114,6 +128,23 @@ extern const char ice_drv_ver[];
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
+ ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
+#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -123,7 +154,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
- u8 ena_tc; /* TX map */
+ u8 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
@@ -134,7 +165,7 @@ struct ice_res_tracker {
};
struct ice_qs_cfg {
- struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */
+ struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
unsigned long *pf_map;
unsigned long pf_map_size;
unsigned int q_count;
@@ -224,6 +255,8 @@ struct ice_vsi {
s16 vf_id; /* VF ID for SR-IOV VSIs */
+ u16 ethtype; /* Ethernet protocol for pause frame */
+
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
@@ -247,6 +280,7 @@ struct ice_vsi {
u8 irqs_ready;
u8 current_isup; /* Sync 'link up' logging */
u8 stat_offsets_loaded;
+ u8 vlan_ena;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -257,26 +291,34 @@ struct ice_vsi {
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
u16 num_rxq; /* Used Rx queues */
- u16 num_desc;
+ u16 num_rx_desc;
+ u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
struct ice_q_vector {
struct ice_vsi *vsi;
- cpumask_t affinity_mask;
- struct napi_struct napi;
- struct ice_ring_container rx;
- struct ice_ring_container tx;
- struct irq_affinity_notify affinity_notify;
+
u16 v_idx; /* index in the vsi->q_vector array. */
- u8 num_ring_tx; /* total number of Tx rings in vector */
+ u16 reg_idx;
u8 num_ring_rx; /* total number of Rx rings in vector */
- char name[ICE_INT_NAME_STR_LEN];
+ u8 num_ring_tx; /* total number of Tx rings in vector */
+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
*/
u8 intrl;
+
+ struct napi_struct napi;
+
+ struct ice_ring_container rx;
+ struct ice_ring_container tx;
+
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
+
+ char name[ICE_INT_NAME_STR_LEN];
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
@@ -285,7 +327,11 @@ enum ice_pf_flags {
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
+ ICE_FLAG_DCB_CAPABLE,
+ ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+ ICE_FLAG_DISABLE_FW_LLDP,
+ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -324,8 +370,8 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u16 num_lan_tx; /* num lan Tx queues setup */
- u16 num_lan_rx; /* num lan Rx queues setup */
+ u16 num_lan_tx; /* num LAN Tx queues setup */
+ u16 num_lan_rx; /* num LAN Rx queues setup */
u16 q_left_tx; /* remaining num Tx queues left unclaimed */
u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -339,6 +385,9 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */
+#ifdef CONFIG_DCB
+ u16 dcbx_cap;
+#endif /* CONFIG_DCB */
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
@@ -351,14 +400,15 @@ struct ice_netdev_priv {
/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
- * @hw: pointer to hw struct
- * @vsi: pointer to vsi struct, can be NULL
+ * @hw: pointer to HW struct
+ * @vsi: pointer to VSI struct, can be NULL
* @q_vector: pointer to q_vector, can be NULL
*/
-static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
- struct ice_q_vector *q_vector)
+static inline void
+ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
+ struct ice_q_vector *q_vector)
{
- u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
+ u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
((struct ice_pf *)hw->back)->hw_oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
@@ -374,10 +424,24 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
wr32(hw, GLINT_DYN_CTL(vector), val);
}
-static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
+/**
+ * ice_find_vsi_by_type - Find and return VSI of a given type
+ * @pf: PF to search for VSI
+ * @type: Value indicating type of VSI we are looking for
+ */
+static inline struct ice_vsi *
+ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
- vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
- vsi->tc_cfg.numtc = 1;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
}
void ice_set_ethtool_ops(struct net_device *netdev);
@@ -388,5 +452,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
void ice_napi_del(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
+void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
+#endif /* CONFIG_DCB */
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 242c78469181..6ef083002f5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -62,7 +62,7 @@ struct ice_aqc_req_res {
#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
- /* For SDP: pin id of the SDP */
+ /* For SDP: pin ID of the SDP */
__le32 res_number;
/* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
__le16 status;
@@ -747,6 +747,32 @@ struct ice_aqc_delete_elem {
__le32 teid[1];
};
+/* Query Port ETS (indirect 0x040E)
+ *
+ * This indirect command is used to query port TC node configuration.
+ */
+struct ice_aqc_query_port_ets {
+ __le32 port_teid;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_port_ets_elem {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ /* 3 bits for UP per TC 0-7, 4th byte reserved */
+ __le32 up2tc;
+ u8 tc_bw_share[8];
+ __le32 port_eir_prof_id;
+ __le32 port_cir_prof_id;
+ /* 3 bits per Node priority to TC 0-7, 4th byte reserved */
+ __le32 tc_node_prio;
+#define ICE_TC_NODE_PRIO_S 0x4
+ u8 reserved1[4];
+ __le32 tc_node_teid[8]; /* Used for response, reserved in command */
+};
+
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -953,8 +979,9 @@ struct ice_aqc_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
-#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
-#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define ICE_AQ_PHY_ENA_VALID_MASK ICE_M(0xef, 0)
+#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
#define ICE_AQ_PHY_ENA_LINK BIT(3)
#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
@@ -1023,7 +1050,7 @@ struct ice_aqc_get_link_status_data {
u8 ext_info;
#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0)
#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
- /* Port TX Suspended */
+ /* Port Tx Suspended */
#define ICE_AQ_LINK_TX_S 2
#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S)
#define ICE_AQ_LINK_TX_ACTIVE 0
@@ -1119,9 +1146,9 @@ struct ice_aqc_nvm {
};
/**
- * Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to PF command (indirect 0x0801) ID is only used by PF
*
- * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to VF command (indirect 0x0802) ID is only used by PF
*
*/
struct ice_aqc_pf_vf_msg {
@@ -1131,6 +1158,126 @@ struct ice_aqc_pf_vf_msg {
__le32 addr_low;
};
+/* Get LLDP MIB (indirect 0x0A00)
+ * Note: This is also used by the LLDP MIB Change Event (0x0A01)
+ * as the format is the same.
+ */
+struct ice_aqc_lldp_get_mib {
+ u8 type;
+#define ICE_AQ_LLDP_MIB_TYPE_S 0
+#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S)
+#define ICE_AQ_LLDP_MIB_LOCAL 0
+#define ICE_AQ_LLDP_MIB_REMOTE 1
+#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2
+#define ICE_AQ_LLDP_BRID_TYPE_S 2
+#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S)
+#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0
+#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1
+/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */
+#define ICE_AQ_LLDP_TX_S 0x4
+#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S)
+#define ICE_AQ_LLDP_TX_ACTIVE 0
+#define ICE_AQ_LLDP_TX_SUSPENDED 1
+#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
+ * and in the LLDP MIB Change Event (0x0A01). They are valid for the
+ * Get LLDP MIB (0x0A00) response only.
+ */
+ u8 reserved1;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Configure LLDP MIB Change Event (direct 0x0A01) */
+/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */
+struct ice_aqc_lldp_set_mib_change {
+ u8 command;
+#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+ u8 reserved[15];
+};
+
+/* Stop LLDP (direct 0x0A05) */
+struct ice_aqc_lldp_stop {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0)
+#define ICE_AQ_LLDP_AGENT_STOP 0x0
+#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK
+#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1)
+ u8 reserved[15];
+};
+
+/* Start LLDP (direct 0x0A06) */
+struct ice_aqc_lldp_start {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_START BIT(0)
+#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1)
+ u8 reserved[15];
+};
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * The command uses the generic descriptor struct and
+ * returns the struct below as an indirect response.
+ */
+struct ice_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+#define ICE_AQC_CEE_APP_FCOE_S 0
+#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S)
+#define ICE_AQC_CEE_APP_ISCSI_S 3
+#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S)
+#define ICE_AQC_CEE_APP_FIP_S 8
+#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S)
+ __le32 tlv_status;
+#define ICE_AQC_CEE_PG_STATUS_S 0
+#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S)
+#define ICE_AQC_CEE_PFC_STATUS_S 3
+#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S)
+#define ICE_AQC_CEE_FCOE_STATUS_S 8
+#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S)
+#define ICE_AQC_CEE_ISCSI_STATUS_S 11
+#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S)
+#define ICE_AQC_CEE_FIP_STATUS_S 16
+#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S)
+ u8 reserved[12];
+};
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct ice_aqc_lldp_set_local_mib {
+ u8 type;
+#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
+#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
+#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
+#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
+#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx.
+ * The same structure is used for the response, with the command field
+ * being used as the status field.
+ */
+struct ice_aqc_lldp_stop_start_specific_agent {
+ u8 command;
+#define ICE_AQC_START_STOP_AGENT_M BIT(0)
+#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0
+#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M
+ u8 reserved[15];
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1144,6 +1291,9 @@ struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28
#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC
+#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \
+ (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \
+ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)
struct ice_aqc_get_set_rss_keys {
u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
@@ -1185,7 +1335,7 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
-/* Add TX LAN Queues (indirect 0x0C30) */
+/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
u8 reserved[3];
@@ -1194,7 +1344,7 @@ struct ice_aqc_add_txqs {
__le32 addr_low;
};
-/* This is the descriptor of each queue entry for the Add TX LAN Queues
+/* This is the descriptor of each queue entry for the Add Tx LAN Queues
* command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
*/
struct ice_aqc_add_txqs_perq {
@@ -1206,7 +1356,7 @@ struct ice_aqc_add_txqs_perq {
struct ice_aqc_txsched_elem info;
};
-/* The format of the command buffer for Add TX LAN Queues (0x0C30)
+/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
* each struct ice_aqc_add_tx_qgrp is variable due
* to the variable number of queues in each group!
@@ -1218,7 +1368,7 @@ struct ice_aqc_add_tx_qgrp {
struct ice_aqc_add_txqs_perq txqs[1];
};
-/* Disable TX LAN Queues (indirect 0x0C31) */
+/* Disable Tx LAN Queues (indirect 0x0C31) */
struct ice_aqc_dis_txqs {
u8 cmd_type;
#define ICE_AQC_Q_DIS_CMD_S 0
@@ -1240,7 +1390,7 @@ struct ice_aqc_dis_txqs {
__le32 addr_low;
};
-/* The buffer for Disable TX LAN Queues (indirect 0x0C31)
+/* The buffer for Disable Tx LAN Queues (indirect 0x0C31)
* contains the following structures, arrayed one after the
* other.
* Note: Since the q_id is 16 bits wide, if the
@@ -1387,8 +1537,15 @@ struct ice_aq_desc {
struct ice_aqc_get_topo get_topo;
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
+ struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_lldp_get_mib lldp_get_mib;
+ struct ice_aqc_lldp_set_mib_change lldp_set_event;
+ struct ice_aqc_lldp_stop lldp_stop;
+ struct ice_aqc_lldp_start lldp_start;
+ struct ice_aqc_lldp_set_local_mib lldp_set_mib;
+ struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@@ -1421,6 +1578,8 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* Success */
+ ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
+ ICE_AQ_RC_ENOENT = 2, /* No such element */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
@@ -1473,6 +1632,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
+ ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F,
ice_aqc_opc_query_sched_res = 0x0412,
@@ -1490,6 +1650,14 @@ enum ice_adminq_opc {
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
ice_mbx_opc_send_msg_to_vf = 0x0802,
+ /* LLDP commands */
+ ice_aqc_opc_lldp_get_mib = 0x0A00,
+ ice_aqc_opc_lldp_set_mib_change = 0x0A01,
+ ice_aqc_opc_lldp_stop = 0x0A05,
+ ice_aqc_opc_lldp_start = 0x0A06,
+ ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ ice_aqc_opc_lldp_set_local_mib = 0x0A08,
+ ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
@@ -1497,7 +1665,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
- /* TX queue handling commands/events */
+ /* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 63f003441300..da7878529929 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -31,7 +31,7 @@
* @hw: pointer to the HW structure
*
* This function sets the MAC type of the adapter based on the
- * vendor ID and device ID stored in the hw structure.
+ * vendor ID and device ID stored in the HW structure.
*/
static enum ice_status ice_set_mac_type(struct ice_hw *hw)
{
@@ -77,7 +77,7 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
/**
* ice_aq_manage_mac_read - manage MAC address read command
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the manage MAC read response
* @buf_size: Size of the virtual buffer
* @cd: pointer to command details structure or NULL
@@ -262,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-static enum ice_status
+enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -331,7 +331,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
- return status;
+ return 0;
}
/**
@@ -358,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
*/
case ICE_RXDID_FLEX_NIC:
case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
- ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_FIN, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
+ ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
+ ICE_FLG_FIN, idx++);
/* flex flag 1 is not used for flexi-flag programming, skipping
* these four FLG64 bits.
*/
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
- ICE_RXFLG_EVLAN_x9100, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
- ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
- ICE_RXFLG_TNL0, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
+ ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
+ ICE_FLG_EVLAN_x9100, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
+ ICE_FLG_TNL0, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
break;
default:
@@ -418,7 +418,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
@@ -438,7 +438,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
{
@@ -477,7 +477,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cfg_fw_log - configure FW logging
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @enable: enable certain FW logging events if true, disable all if false
*
* This function enables/disables the FW logging via Rx CQ events and a UART
@@ -626,7 +626,7 @@ out:
/**
* ice_output_fw_log
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @desc: pointer to the AQ message descriptor
* @buf: pointer to the buffer accompanying the AQ message
*
@@ -642,12 +642,12 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
/**
* ice_get_itr_intrl_gran - determine int/intrl granularity
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Determines the itr/intrl granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
-static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
+static void ice_get_itr_intrl_gran(struct ice_hw *hw)
{
u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
@@ -664,13 +664,7 @@ static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
hw->itr_gran = ICE_ITR_GRAN_MAX_25;
hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
break;
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to determine itr/intrl granularity\n");
- return ICE_ERR_CFG;
}
-
- return 0;
}
/**
@@ -697,9 +691,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- status = ice_get_itr_intrl_gran(hw);
- if (status)
- return status;
+ ice_get_itr_intrl_gran(hw);
status = ice_init_all_ctrlq(hw);
if (status)
@@ -731,7 +723,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
- /* set the back pointer to hw */
+ /* set the back pointer to HW */
hw->port_info->hw = hw;
/* Initialize port_info struct with switch configuration data */
@@ -988,7 +980,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* @ice_rxq_ctx: pointer to the rxq context
* @rxq_index: the index of the Rx queue
*
- * Copies rxq context from dense structure to hw register space
+ * Copies rxq context from dense structure to HW register space
*/
static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
@@ -1001,7 +993,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
if (rxq_index > QRX_CTRL_MAX_INDEX)
return ICE_ERR_PARAM;
- /* Copy each dword separately to hw */
+ /* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
wr32(hw, QRX_CONTEXT(i, rxq_index),
*((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
@@ -1045,7 +1037,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
- * it to hw register space
+ * it to HW register space
*/
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
@@ -1100,8 +1092,9 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
*
* Dumps debug log about control command with descriptor contents.
*/
-void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
- void *buf, u16 buf_len)
+void
+ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
+ u16 buf_len)
{
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
u16 len;
@@ -1143,7 +1136,7 @@ void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @desc: descriptor describing the command
* @buf: buffer to use for indirect commands (NULL for direct commands)
* @buf_size: size of buffer for indirect commands (0 for direct commands)
@@ -1160,7 +1153,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
/**
* ice_aq_get_fw_ver
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cd: pointer to command details structure or NULL
*
* Get the firmware version (0x0001) from the admin queue commands
@@ -1194,7 +1187,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
/**
* ice_aq_q_shutdown
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
@@ -1217,8 +1210,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
/**
* ice_aq_req_res
- * @hw: pointer to the hw struct
- * @res: resource id
+ * @hw: pointer to the HW struct
+ * @res: resource ID
* @access: access type
* @sdp_number: resource number
* @timeout: the maximum time in ms that the driver may hold the resource
@@ -1303,8 +1296,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/**
* ice_aq_release_res
- * @hw: pointer to the hw struct
- * @res: resource id
+ * @hw: pointer to the HW struct
+ * @res: resource ID
* @sdp_number: resource number
* @cd: pointer to command details structure or NULL
*
@@ -1330,7 +1323,7 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
/**
* ice_acquire_res
* @hw: pointer to the HW structure
- * @res: resource id
+ * @res: resource ID
* @access: access type (read or write)
* @timeout: timeout in milliseconds
*
@@ -1392,7 +1385,7 @@ ice_acquire_res_exit:
/**
* ice_release_res
* @hw: pointer to the HW structure
- * @res: resource id
+ * @res: resource ID
*
* This function will release a resource using the proper Admin Command.
*/
@@ -1404,7 +1397,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
status = ice_aq_release_res(hw, res, 0, NULL);
/* there are some rare cases when trying to release the resource
- * results in an admin Q timeout, so handle them correctly
+ * results in an admin queue timeout, so handle them correctly
*/
while ((status == ICE_ERR_AQ_TIMEOUT) &&
(total_delay < hw->adminq.sq_cmd_timeout)) {
@@ -1415,13 +1408,15 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
- * ice_get_guar_num_vsi - determine number of guar VSI for a PF
- * @hw: pointer to the hw structure
+ * ice_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
*
* Determine the number of valid functions by going through the bitmap returned
- * from parsing capabilities and use this to calculate the number of VSI per PF.
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
*/
-static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
{
u8 funcs;
@@ -1432,12 +1427,12 @@ static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
if (!funcs)
return 0;
- return ICE_MAX_VSI / funcs;
+ return max / funcs;
}
/**
* ice_parse_caps - parse function/device capabilities
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: pointer to a buffer containing function/device capability records
* @cap_count: number of capability records in the list
* @opc: type of capabilities list to parse
@@ -1512,7 +1507,8 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
- func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
+ func_p->guar_num_vsi =
+ ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
number);
@@ -1578,7 +1574,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
/**
* ice_aq_discover_caps - query function/device capabilities
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the capabilities
* @buf_size: Size of the virtual buffer
* @cap_count: cap count needed if AQ err==ENOMEM
@@ -1617,8 +1613,8 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* @hw: pointer to the hardware structure
* @opc: capabilities type to discover - pass in the command opcode
*/
-static enum ice_status ice_discover_caps(struct ice_hw *hw,
- enum ice_adminq_opc opc)
+static enum ice_status
+ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
{
enum ice_status status;
u32 cap_count;
@@ -1677,7 +1673,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
/**
* ice_aq_manage_mac_write - manage MAC address write command
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
* @flags: flags to control write behavior
* @cd: pointer to command details structure or NULL
@@ -1705,7 +1701,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
/**
* ice_aq_clear_pxe_mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
@@ -1721,7 +1717,7 @@ static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
/**
* ice_clear_pxe_mode - clear pxe operations mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Make sure all PXE mode settings are cleared, including things
* like descriptor fetch/write-back mode.
@@ -1737,10 +1733,10 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
*
- * This helper function will convert an entry in phy type structure
+ * This helper function will convert an entry in PHY type structure
* [phy_type_low, phy_type_high] to its corresponding link speed.
* Note: In the structure of [phy_type_low, phy_type_high], there should
- * be one bit set, as this function will convert one phy type to its
+ * be one bit set, as this function will convert one PHY type to its
* speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
@@ -1884,10 +1880,10 @@ void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap)
{
- u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
u64 pt_high;
u64 pt_low;
int index;
+ u16 speed;
/* We first check with low part of phy_type */
for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
@@ -1910,7 +1906,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
/**
* ice_aq_set_phy_cfg
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @lport: logical port number
* @cfg: structure with PHY configuration data to be set
* @cd: pointer to command details structure or NULL
@@ -1929,6 +1925,15 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
if (!cfg)
return ICE_ERR_PARAM;
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
+ ice_debug(hw, ICE_DBG_PHY,
+ "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
+ cfg->caps);
+
+ cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
+ }
+
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
desc.params.set_phy.lport_num = lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -2016,7 +2021,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
- /* Get the current phy config */
+ /* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status) {
@@ -2027,8 +2032,10 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
/* clear the old pause settings */
cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
/* set the new capabilities */
cfg.caps |= pause_mask;
+
/* If the capabilities have changed, then set the new config */
if (cfg.caps != pcaps->caps) {
int retry_count, retry_max = 10;
@@ -2136,6 +2143,32 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
}
/**
+ * ice_aq_set_event_mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set event mask (0x0613)
+ */
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_event_mask *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_port_id_led
* @pi: pointer to the port information
* @is_orig_mode: is this LED set to original mode (by the net-list)
@@ -2297,7 +2330,7 @@ ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
/**
* __ice_aq_get_set_rss_key
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_id: VSI FW index
* @key: pointer to key info struct
* @set: set true to set the key, false to get the key
@@ -2332,7 +2365,7 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
/**
* ice_aq_get_rss_key
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
* @key: pointer to key info struct
*
@@ -2351,7 +2384,7 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_aq_set_rss_key
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
* @keys: pointer to key info struct
*
@@ -2436,7 +2469,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @num_qgrps: number of groups in the list
* @qg_list: the list of groups to disable
* @buf_size: the total size of the qg_list buffer in bytes
- * @rst_src: if called due to reset, specifies the RST source
+ * @rst_src: if called due to reset, specifies the reset source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
@@ -2476,7 +2509,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
break;
case ICE_VF_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
- /* In this case, FW expects vmvf_num to be absolute VF id */
+ /* In this case, FW expects vmvf_num to be absolute VF ID */
cmd->vmvf_and_timeout |=
cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
ICE_AQC_Q_DIS_VMVF_NUM_M);
@@ -2534,8 +2567,8 @@ do_aq:
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@@ -2573,8 +2606,8 @@ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@@ -2616,8 +2649,8 @@ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@@ -2667,8 +2700,8 @@ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@@ -2750,24 +2783,50 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
+ * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @q_handle: software queue handle
+ */
+static struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ struct ice_q_ctx *q_ctx;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return NULL;
+ if (q_handle >= vsi->num_lan_q_entries[tc])
+ return NULL;
+ if (!vsi->lan_q_ctx[tc])
+ return NULL;
+ q_ctx = vsi->lan_q_ctx[tc];
+ return &q_ctx[q_handle];
+}
+
+/**
* ice_ena_vsi_txq
* @pi: port information structure
* @vsi_handle: software VSI handle
- * @tc: tc number
+ * @tc: TC number
+ * @q_handle: software queue handle
* @num_qgrps: Number of added queue groups
* @buf: list of queue groups to be added
* @buf_size: size of buffer for indirect command
* @cd: pointer to command details structure or NULL
*
- * This function adds one lan q
+ * This function adds one LAN queue
*/
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
- struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent;
+ struct ice_q_ctx *q_ctx;
enum ice_status status;
struct ice_hw *hw;
@@ -2784,6 +2843,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
mutex_lock(&pi->sched_lock);
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
+ if (!q_ctx) {
+ ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
+ q_handle);
+ status = ICE_ERR_PARAM;
+ goto ena_txq_exit;
+ }
+
/* find a parent node */
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
@@ -2803,14 +2870,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
* Bit 5-6.
* - Bit 7 is reserved.
* Without setting the generic section as valid in valid_sections, the
- * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
+ * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
- /* add the lan q */
+ /* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
if (status) {
- ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
le16_to_cpu(buf->txqs[0].txq_id),
hw->adminq.sq_last_status);
goto ena_txq_exit;
@@ -2818,8 +2885,9 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+ q_ctx->q_handle = q_handle;
- /* add a leaf node into schduler tree q layer */
+ /* add a leaf node into schduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
ena_txq_exit:
@@ -2830,35 +2898,43 @@ ena_txq_exit:
/**
* ice_dis_vsi_txq
* @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
* @num_queues: number of queues
+ * @q_handles: pointer to software queue handle array
* @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids
- * @rst_src: if called due to reset, specifies the RST source
+ * @rst_src: if called due to reset, specifies the reset source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* This function removes queues and their corresponding nodes in SW DB
*/
enum ice_status
-ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handles, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item qg_list;
+ struct ice_q_ctx *q_ctx;
u16 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
- /* if queue is disabled already yet the disable queue command has to be
- * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
- * any queue information
- */
- if (!num_queues && rst_src)
- return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
- NULL);
+ if (!num_queues) {
+ /* if queue is disabled already yet the disable queue command
+ * has to be sent to complete the VF reset, then call
+ * ice_aq_dis_lan_txq without any queue information
+ */
+ if (rst_src)
+ return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+ vmvf_num, NULL);
+ return ICE_ERR_CFG;
+ }
mutex_lock(&pi->sched_lock);
@@ -2868,6 +2944,17 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
if (!node)
continue;
+ q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+ if (!q_ctx) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+ q_handles[i]);
+ continue;
+ }
+ if (q_ctx->q_handle != q_handles[i]) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+ q_ctx->q_handle, q_handles[i]);
+ continue;
+ }
qg_list.parent_teid = node->info.parent_teid;
qg_list.num_qs = 1;
qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
@@ -2878,18 +2965,19 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
if (status)
break;
ice_free_sched_node(pi, node);
+ q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
}
mutex_unlock(&pi->sched_lock);
return status;
}
/**
- * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
+ * ice_cfg_vsi_qs - configure the new/existing VSI queues
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @maxqs: max queues array per TC
- * @owner: lan or rdma
+ * @owner: LAN or RDMA
*
* This function adds/updates the VSI queues per TC.
*/
@@ -2908,7 +2996,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
mutex_lock(&pi->sched_lock);
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
/* configuration is possible only if TC node is present */
if (!ice_sched_get_tc_node(pi, i))
continue;
@@ -2924,13 +3012,13 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
}
/**
- * ice_cfg_vsi_lan - configure VSI lan queues
+ * ice_cfg_vsi_lan - configure VSI LAN queues
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
- * @max_lanqs: max lan queues array per TC
+ * @max_lanqs: max LAN queues array per TC
*
- * This function adds/updates the VSI lan queues per TC.
+ * This function adds/updates the VSI LAN queues per TC.
*/
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
@@ -2942,7 +3030,7 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
/**
* ice_replay_pre_init - replay pre initialization
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
@@ -2966,7 +3054,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
/**
* ice_replay_vsi - replay VSI configuration
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: driver VSI handle
*
* Restore all VSI configuration after reset. It is required to call this
@@ -2993,7 +3081,7 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_replay_post - post replay configuration cleanup
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Post replay cleanup.
*/
@@ -3012,8 +3100,9 @@ void ice_replay_post(struct ice_hw *hw)
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
-void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+void
+ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
{
u64 new_data;
@@ -3043,8 +3132,9 @@ void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
-void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat)
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
{
u32 new_data;
@@ -3063,3 +3153,28 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
/* to manage the potential roll-over */
*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
}
+
+/**
+ * ice_sched_query_elem - query element information from HW
+ * @hw: pointer to the HW struct
+ * @node_teid: node TEID to be queried
+ * @buf: buffer to element information
+ *
+ * This function queries HW element information
+ */
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf)
+{
+ u16 buf_size, num_elem_ret = 0;
+ enum ice_status status;
+
+ buf_size = sizeof(*buf);
+ memset(buf, 0, buf_size);
+ buf->generic[0].node_teid = cpu_to_le32(node_teid);
+ status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
+ NULL);
+ if (status || num_elem_ret != 1)
+ ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index d7c7c2ed8823..f1ddebf45231 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -9,8 +9,8 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
-void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
- u16 buf_len);
+void
+ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@@ -28,8 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words,
- u16 *data);
+enum ice_status
+ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
@@ -89,25 +89,37 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd);
+enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
enum ice_status
-ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
- struct ice_sq_cd *cmd_details);
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handle, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd);
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
- struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
-void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
-void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 2bf5e11f559a..cc8cb5fdcdc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -51,7 +51,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
/**
* ice_check_sq_alive
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
* Returns true if Queue is enabled else false.
@@ -287,7 +287,7 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
- * Configure base address and length registers for the receive (event q)
+ * Configure base address and length registers for the receive (event queue)
*/
static enum ice_status
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
@@ -751,7 +751,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/**
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
* Returns true if the firmware has processed all descriptors on the
@@ -767,7 +767,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/**
* ice_sq_send_cmd - send command to Control Queue (ATQ)
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buf: buffer to use for indirect commands (or NULL for direct commands)
@@ -962,7 +962,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
/**
* ice_clean_rq_elem
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 0038a4109c99..e0585394d984 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -79,6 +79,7 @@ struct ice_rq_event_info {
/* Control Queue information */
struct ice_ctl_q_info {
enum ice_ctl_q qtype;
+ enum ice_aq_err rq_last_status; /* last status on receive queue */
struct ice_ctl_q_ring rq; /* receive queue */
struct ice_ctl_q_ring sq; /* send queue */
u32 sq_cmd_timeout; /* send queue cmd write back timeout */
@@ -86,10 +87,9 @@ struct ice_ctl_q_info {
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */
+ enum ice_aq_err sq_last_status; /* last status on send queue */
struct mutex sq_lock; /* Send queue lock */
struct mutex rq_lock; /* Receive queue lock */
- enum ice_aq_err sq_last_status; /* last status on send queue */
- enum ice_aq_err rq_last_status; /* last status on receive queue */
};
#endif /* _ICE_CONTROLQ_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
new file mode 100644
index 000000000000..8bbf48e04a1c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -0,0 +1,1392 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_sched.h"
+#include "ice_dcb.h"
+
+/**
+ * ice_aq_get_lldp_mib
+ * @hw: pointer to the HW struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @local_len: length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet). (0x0A00)
+ */
+static enum ice_status
+ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
+ u16 buf_size, u16 *local_len, u16 *remote_len,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_get_mib *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.lldp_get_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
+
+ cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M;
+ cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M;
+
+ desc.datalen = cpu_to_le16(buf_size);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status) {
+ if (local_len)
+ *local_len = le16_to_cpu(cmd->local_len);
+ if (remote_len)
+ *remote_len = le16_to_cpu(cmd->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_cfg_lldp_mib_change
+ * @hw: pointer to the HW struct
+ * @ena_update: Enable or Disable event posting
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes (0x0A01)
+ */
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_mib_change *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_event;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change);
+
+ if (!ena_update)
+ cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_stop_lldp
+ * @hw: pointer to the HW struct
+ * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown
+ * False if LLDP Agent needs to be Stopped
+ * @cd: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent (0x0A05)
+ */
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_stop;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop);
+
+ if (shutdown_lldp_agent)
+ cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_start_lldp
+ * @hw: pointer to the HW struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports. (0x0A06)
+ */
+enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_start *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_start;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
+
+ cmd->command = ICE_AQ_LLDP_AGENT_START;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+static enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = cpu_to_le16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_get_dcbx_status
+ * @hw: pointer to the HW struct
+ *
+ * Get the DCBX status from the Firmware
+ */
+u8 ice_get_dcbx_status(struct ice_hw *hw)
+{
+ u32 reg;
+
+ reg = rd32(hw, PRTDCB_GENS);
+ return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >>
+ PRTDCB_GENS_DCBX_STATUS_S);
+}
+
+/**
+ * ice_parse_ieee_ets_common_tlv
+ * @buf: Data buffer to be parsed for ETS CFG/REC data
+ * @ets_cfg: Container to store parsed data
+ *
+ * Parses the common data of IEEE 802.1Qaz ETS CFG/REC TLV
+ */
+static void
+ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ ets_cfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >>
+ ICE_IEEE_ETS_PRIO_1_S);
+ ets_cfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >>
+ ICE_IEEE_ETS_PRIO_0_S);
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 9 | 10| 11| 12| 13| 14| 15| 16|
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ ets_cfg->tcbwtable[i] = buf[offset];
+ ets_cfg->tsatable[i] = buf[ICE_MAX_TRAFFIC_CLASS + offset++];
+ }
+}
+
+/**
+ * ice_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >>
+ ICE_IEEE_ETS_WILLING_S);
+ etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S);
+ etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >>
+ ICE_IEEE_ETS_MAXTC_S);
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_parse_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], &dcbcfg->etsrec);
+}
+
+/**
+ * ice_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >>
+ ICE_IEEE_PFC_WILLING_S);
+ dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S);
+ dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >>
+ ICE_IEEE_PFC_CAP_S);
+ dcbcfg->pfc.pfcena = buf[1];
+}
+
+/**
+ * ice_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ */
+static void
+ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 offset = 0;
+ u16 typelen;
+ int i = 0;
+ u16 len;
+ u8 *buf;
+
+ typelen = ntohs(tlv->typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ buf = tlv->tlvinfo;
+
+ /* Removing sizeof(ouisubtype) and reserved byte from len.
+ * Remaining len div 3 is number of APP TLVs.
+ */
+ len -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < len) {
+ dcbcfg->app[i].priority = ((buf[offset] &
+ ICE_IEEE_APP_PRIO_M) >>
+ ICE_IEEE_APP_PRIO_S);
+ dcbcfg->app[i].selector = ((buf[offset] &
+ ICE_IEEE_APP_SEL_M) >>
+ ICE_IEEE_APP_SEL_S);
+ dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * ice_parse_ieee_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ switch (subtype) {
+ case ICE_IEEE_SUBTYPE_ETS_CFG:
+ ice_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_ETS_REC:
+ ice_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_PFC_CFG:
+ ice_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_APP_PRI:
+ ice_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ */
+static void
+ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ etscfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ etscfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * ice_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ */
+static void
+ice_parse_cee_pfccfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcena = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * ice_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ */
+static void
+ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, typelen, offset = 0;
+ struct ice_cee_app_prio *app;
+ u8 i;
+
+ typelen = ntohs(tlv->hdr.typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+
+ dcbcfg->numapps = len / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+ if (dcbcfg->numapps > ICE_DCBX_MAX_APPS)
+ dcbcfg->numapps = ICE_DCBX_MAX_APPS;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct ice_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < ICE_MAX_USER_PRIORITY; up++)
+ if (app->prio_map & BIT(up))
+ break;
+
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & ICE_CEE_APP_SELECTOR_M);
+ switch (selector) {
+ case ICE_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = ICE_APP_SEL_ETHTYPE;
+ break;
+ case ICE_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = ICE_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].prot_id = ntohs(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * ice_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u16 len, tlvlen, typelen;
+ u32 ouisubtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ /* Return if not CEE DCBX */
+ if (subtype != ICE_CEE_DCBX_TYPE)
+ return;
+
+ typelen = ntohs(tlv->typelen);
+ tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = sizeof(tlv->typelen) + sizeof(ouisubtype) +
+ sizeof(struct ice_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct ice_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < ICE_CEE_MAX_FEAT_TYPE) {
+ u16 sublen;
+
+ typelen = ntohs(sub_tlv->hdr.typelen);
+ sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >>
+ ICE_LLDP_TLV_TYPE_S);
+ switch (subtype) {
+ case ICE_CEE_SUBTYPE_PG_CFG:
+ ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_PFC_CFG:
+ ice_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_APP_PRI:
+ ice_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct ice_cee_feat_tlv *)
+ ((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * ice_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ */
+static void
+ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S);
+ switch (oui) {
+ case ICE_IEEE_8021QAZ_OUI:
+ ice_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case ICE_CEE_DCBX_OUI:
+ ice_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_lldp_to_dcb_cfg
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ */
+enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_lldp_org_tlv *tlv;
+ enum ice_status ret = 0;
+ u16 offset = 0;
+ u16 typelen;
+ u16 type;
+ u16 len;
+
+ if (!lldpmib || !dcbcfg)
+ return ICE_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HLEN;
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelen = ntohs(tlv->typelen);
+ type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ offset += sizeof(typelen) + len;
+
+ /* END TLV or beyond LLDPDU size */
+ if (type == ICE_TLV_TYPE_END || offset > ICE_LLDPDU_SIZE)
+ break;
+
+ switch (type) {
+ case ICE_TLV_TYPE_ORG:
+ ice_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_get_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the firmware
+ */
+static enum ice_status
+ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ enum ice_status ret;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
+ ICE_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (!ret)
+ /* Parse LLDP MIB to get DCB configuration */
+ ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg);
+
+ devm_kfree(ice_hw_to_dev(hw), lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the HW struct
+ * @start_dcbx_agent: True if DCBx Agent needs to be started
+ * False if DCBx Agent needs to be stopped
+ * @dcbx_agent_status: FW indicates back the DCBx agent status
+ * True if DCBx Agent is active
+ * False if DCBx Agent is stopped
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent. In case that this wrapper function
+ * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
+ */
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop_start_specific_agent *cmd;
+ enum ice_status status;
+ struct ice_aq_desc desc;
+ u16 opcode;
+
+ cmd = &desc.params.lldp_agent_ctrl;
+
+ opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+
+ if (start_dcbx_agent)
+ cmd->command = ICE_AQC_START_STOP_AGENT_START_DCBX;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ *dcbx_agent_status = false;
+
+ if (!status &&
+ cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX)
+ *dcbx_agent_status = true;
+
+ return status;
+}
+
+/**
+ * ice_aq_get_cee_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware (0x0A07)
+ */
+static enum ice_status
+ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
+ struct ice_aqc_get_cee_dcb_cfg_resp *buff,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg);
+
+ return ice_aq_send_cmd(hw, &desc, (void *)buff, sizeof(*buff), cd);
+}
+
+/**
+ * ice_cee_to_dcb_cfg
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ */
+static void
+ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
+ u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u8 i, err, sync, oper, app_index, ice_app_sel_type;
+ u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+ u16 ice_app_prot_id_type;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ dcbcfg->etscfg.prio_table[i * 2] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ dcbcfg->etscfg.prio_table[i * 2 + 1] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ }
+
+ ice_for_each_traffic_class(i) {
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ if (dcbcfg->etscfg.prio_table[i] == ICE_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prio_table[i] = cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
+
+ app_index = 0;
+ for (i = 0; i < 3; i++) {
+ if (i == 0) {
+ /* FCoE APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FCOE_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FCOE_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE;
+ } else if (i == 1) {
+ /* iSCSI APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_ISCSI_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
+ ice_app_sel_type = ICE_APP_SEL_TCPIP;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+ } else {
+ /* FIP APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FIP_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FIP;
+ }
+
+ status = (tlv_status & ice_aqc_cee_status_mask) >>
+ ice_aqc_cee_status_shift;
+ err = (status & ICE_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & ICE_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & ICE_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE/iSCSI/FIP APP if Error is False and
+ * Oper/Sync is True
+ */
+ if (!err && sync && oper) {
+ dcbcfg->app[app_index].priority =
+ (app_prio & ice_aqc_cee_app_mask) >>
+ ice_aqc_cee_app_shift;
+ dcbcfg->app[app_index].selector = ice_app_sel_type;
+ dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
+ app_index++;
+ }
+ }
+
+ dcbcfg->numapps = app_index;
+}
+
+/**
+ * ice_get_ieee_dcb_cfg
+ * @pi: port information structure
+ * @dcbx_mode: mode of DCBX (IEEE or CEE)
+ *
+ * Get IEEE or CEE mode DCB configuration from the Firmware
+ */
+static enum ice_status
+ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = NULL;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ if (dcbx_mode == ICE_DCBX_MODE_IEEE)
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ else if (dcbx_mode == ICE_DCBX_MODE_CEE)
+ dcbx_cfg = &pi->desired_dcbx_cfg;
+
+ /* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
+ * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
+ */
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_LOCAL,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ dcbx_cfg = &pi->remote_dcbx_cfg;
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * ice_get_dcb_cfg
+ * @pi: port information structure
+ *
+ * Get DCB configuration from the Firmware
+ */
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct ice_dcbx_cfg *dcbx_cfg;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
+ if (!ret) {
+ /* CEE mode */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+ dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
+ ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+ } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ /* CEE mode not enabled try querying IEEE data */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_init_dcb
+ * @hw: pointer to the HW struct
+ *
+ * Update DCB configuration from the Firmware
+ */
+enum ice_status ice_init_dcb(struct ice_hw *hw)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status ret = 0;
+
+ if (!hw->func_caps.common_cap.dcb)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ pi->is_sw_lldp = true;
+
+ /* Get DCBX status */
+ pi->dcbx_status = ice_get_dcbx_status(hw);
+
+ if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
+ pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) {
+ /* Get current DCBX configuration */
+ ret = ice_get_dcb_cfg(pi);
+ pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM);
+ if (ret)
+ return ret;
+ } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
+ return ICE_ERR_NOT_READY;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
+ if (!ret)
+ pi->is_sw_lldp = false;
+
+ return ret;
+}
+
+/**
+ * ice_add_ieee_ets_common_tlv
+ * @buf: Data buffer to be populated with ice_dcb_ets_cfg data
+ * @ets_cfg: Container for ice_dcb_ets_cfg data
+ *
+ * Populate the TLV buffer with ice_dcb_ets_cfg data
+ */
+static void
+ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 priority0, priority1;
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ priority0 = ets_cfg->prio_table[i * 2] & 0xF;
+ priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ buf[offset] = ets_cfg->tcbwtable[i];
+ buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i];
+ offset++;
+ }
+}
+
+/**
+ * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u8 maxtcwilling = 0;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S);
+ maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
+ buf[0] = maxtcwilling;
+
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etsrec;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+
+ /* First Octet is reserved */
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etsrec);
+}
+
+/**
+ * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store which holds the PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(ICE_IEEE_PFC_WILLING_S);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(ICE_IEEE_PFC_MBC_S);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
+}
+
+/**
+ * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store which holds the APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ */
+static void
+ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 typelen, len, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+ /* len includes size of ouisubtype + 1 reserved + 3*numapps */
+ len = sizeof(tlv->ouisubtype) + 1 + (i * 3);
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF));
+ tlv->typelen = htons(typelen);
+}
+
+/**
+ * ice_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: Fill TLV data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ * @tlvid: Type of IEEE TLV
+ *
+ * Add tlv information
+ */
+static void
+ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case ICE_IEEE_TLV_ID_ETS_CFG:
+ ice_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_ETS_REC:
+ ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_PFC_CFG:
+ ice_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_APP_PRI:
+ ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format
+ * @lldpmib: pointer to the HW struct
+ * @miblen: length of LLDP MIB
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Convert the DCB configuration to MIB format
+ */
+static void
+ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, offset = 0, tlvid = ICE_TLV_ID_START;
+ struct ice_lldp_org_tlv *tlv;
+ u16 typelen;
+
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ ice_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelen = ntohs(tlv->typelen);
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ if (len)
+ offset += len + 2;
+ /* END TLV or beyond LLDPDU size */
+ if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU ||
+ offset > ICE_LLDPDU_SIZE)
+ break;
+ /* Move to next TLV */
+ if (len)
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+ *miblen = offset;
+}
+
+/**
+ * ice_set_dcb_cfg - Set the local LLDP MIB to FW
+ * @pi: port information structure
+ *
+ * Set DCB configuration to the Firmware
+ */
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+{
+ u8 mib_type, *lldpmib = NULL;
+ struct ice_dcbx_cfg *dcbcfg;
+ enum ice_status ret;
+ struct ice_hw *hw;
+ u16 miblen;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ /* update the HW local config */
+ dcbcfg = &pi->local_dcbx_cfg;
+ /* Allocate the LLDPDU */
+ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+ mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING;
+
+ ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen,
+ NULL);
+
+ devm_kfree(ice_hw_to_dev(hw), lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_query_port_ets - query port ets configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ets configuration
+ */
+static enum ice_status
+ice_aq_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_query_port_ets *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ cmd = &desc.params.port_ets;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
+ cmd->port_teid = pi->root->info.node_teid;
+
+ status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd);
+ return status;
+}
+
+/**
+ * ice_update_port_tc_tree_cfg - update TC tree configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ *
+ * update the SW DB with the new TC changes
+ */
+static enum ice_status
+ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf)
+{
+ struct ice_sched_node *node, *tc_node;
+ struct ice_aqc_get_elem elem;
+ enum ice_status status = 0;
+ u32 teid1, teid2;
+ u8 i, j;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ /* suspend the missing TC nodes */
+ for (i = 0; i < pi->root->num_children; i++) {
+ teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
+ ice_for_each_traffic_class(j) {
+ teid2 = le32_to_cpu(buf->tc_node_teid[j]);
+ if (teid1 == teid2)
+ break;
+ }
+ if (j < ICE_MAX_TRAFFIC_CLASS)
+ continue;
+ /* TC is missing */
+ pi->root->children[i]->in_use = false;
+ }
+ /* add the new TC nodes */
+ ice_for_each_traffic_class(j) {
+ teid2 = le32_to_cpu(buf->tc_node_teid[j]);
+ if (teid2 == ICE_INVAL_TEID)
+ continue;
+ /* Is it already present in the tree ? */
+ for (i = 0; i < pi->root->num_children; i++) {
+ tc_node = pi->root->children[i];
+ if (!tc_node)
+ continue;
+ teid1 = le32_to_cpu(tc_node->info.node_teid);
+ if (teid1 == teid2) {
+ tc_node->tc_num = j;
+ tc_node->in_use = true;
+ break;
+ }
+ }
+ if (i < pi->root->num_children)
+ continue;
+ /* new TC */
+ status = ice_sched_query_elem(pi->hw, teid2, &elem);
+ if (!status)
+ status = ice_sched_add_node(pi, 1, &elem.generic[0]);
+ if (status)
+ break;
+ /* update the TC number */
+ node = ice_sched_find_node_by_teid(pi->root, teid2);
+ if (node)
+ node->tc_num = j;
+ }
+ return status;
+}
+
+/**
+ * ice_query_port_ets - query port ets configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ets configuration and update the
+ * SW DB with the TC changes
+ */
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ enum ice_status status;
+
+ mutex_lock(&pi->sched_lock);
+ status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
+ if (!status)
+ status = ice_update_port_tc_tree_cfg(pi, buf);
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
new file mode 100644
index 000000000000..e7d4416e3a66
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DCB_H_
+#define _ICE_DCB_H_
+
+#include "ice_type.h"
+
+#define ICE_DCBX_STATUS_NOT_STARTED 0
+#define ICE_DCBX_STATUS_IN_PROGRESS 1
+#define ICE_DCBX_STATUS_DONE 2
+#define ICE_DCBX_STATUS_DIS 7
+
+#define ICE_TLV_TYPE_END 0
+#define ICE_TLV_TYPE_ORG 127
+
+#define ICE_IEEE_8021QAZ_OUI 0x0080C2
+#define ICE_IEEE_SUBTYPE_ETS_CFG 9
+#define ICE_IEEE_SUBTYPE_ETS_REC 10
+#define ICE_IEEE_SUBTYPE_PFC_CFG 11
+#define ICE_IEEE_SUBTYPE_APP_PRI 12
+
+#define ICE_CEE_DCBX_OUI 0x001B21
+#define ICE_CEE_DCBX_TYPE 2
+#define ICE_CEE_SUBTYPE_PG_CFG 2
+#define ICE_CEE_SUBTYPE_PFC_CFG 3
+#define ICE_CEE_SUBTYPE_APP_PRI 4
+#define ICE_CEE_MAX_FEAT_TYPE 3
+/* Defines for LLDP TLV header */
+#define ICE_LLDP_TLV_LEN_S 0
+#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
+#define ICE_LLDP_TLV_TYPE_S 9
+#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
+#define ICE_LLDP_TLV_SUBTYPE_S 0
+#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
+#define ICE_LLDP_TLV_OUI_S 8
+#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
+
+/* Defines for IEEE ETS TLV */
+#define ICE_IEEE_ETS_MAXTC_S 0
+#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
+#define ICE_IEEE_ETS_CBS_S 6
+#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
+#define ICE_IEEE_ETS_WILLING_S 7
+#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
+#define ICE_IEEE_ETS_PRIO_0_S 0
+#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
+#define ICE_IEEE_ETS_PRIO_1_S 4
+#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
+#define ICE_CEE_PGID_PRIO_0_S 0
+#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
+#define ICE_CEE_PGID_PRIO_1_S 4
+#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
+#define ICE_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define ICE_IEEE_TSA_STRICT 0
+#define ICE_IEEE_TSA_ETS 2
+
+/* Defines for IEEE PFC TLV */
+#define ICE_IEEE_PFC_CAP_S 0
+#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
+#define ICE_IEEE_PFC_MBC_S 6
+#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
+#define ICE_IEEE_PFC_WILLING_S 7
+#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
+
+/* Defines for IEEE APP TLV */
+#define ICE_IEEE_APP_SEL_S 0
+#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
+#define ICE_IEEE_APP_PRIO_S 5
+#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
+
+/* TLV definitions for preparing MIB */
+#define ICE_IEEE_TLV_ID_ETS_CFG 3
+#define ICE_IEEE_TLV_ID_ETS_REC 4
+#define ICE_IEEE_TLV_ID_PFC_CFG 5
+#define ICE_IEEE_TLV_ID_APP_PRI 6
+#define ICE_TLV_ID_END_OF_LLDPPDU 7
+#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
+
+#define ICE_IEEE_ETS_TLV_LEN 25
+#define ICE_IEEE_PFC_TLV_LEN 6
+#define ICE_IEEE_APP_TLV_LEN 11
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct ice_lldp_org_tlv {
+ __be16 typelen;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+} __packed;
+
+struct ice_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct ice_cee_ctrl_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct ice_cee_feat_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define ICE_CEE_FEAT_TLV_ENA_M 0x80
+#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
+#define ICE_CEE_FEAT_TLV_ERR_M 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct ice_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define ICE_CEE_APP_SELECTOR_M 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+} __packed;
+
+u8 ice_get_dcbx_status(struct ice_hw *hw);
+enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_init_dcb(struct ice_hw *hw);
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cmd_details);
+#ifdef CONFIG_DCB
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ struct ice_sq_cd *cd);
+enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd);
+#else /* CONFIG_DCB */
+static inline enum ice_status
+ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
+ bool __always_unused shutdown_lldp_agent,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline enum ice_status
+ice_aq_start_lldp(struct ice_hw __always_unused *hw,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
+ bool __always_unused start_dcbx_agent,
+ bool *dcbx_agent_status,
+ struct ice_sq_cd __always_unused *cd)
+{
+ *dcbx_agent_status = false;
+
+ return 0;
+}
+
+static inline enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
+ bool __always_unused ena_update,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+#endif /* CONFIG_DCB */
+#endif /* _ICE_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
new file mode 100644
index 000000000000..3e81af1884fc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_dcb_lib.h"
+
+/**
+ * ice_dcb_get_ena_tc - return bitmap of enabled TCs
+ * @dcbcfg: DCB config to evaluate for enabled TCs
+ */
+u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 i, num_tc, ena_tc = 1;
+
+ num_tc = ice_dcb_get_num_tc(dcbcfg);
+
+ for (i = 0; i < num_tc; i++)
+ ena_tc |= BIT(i);
+
+ return ena_tc;
+}
+
+/**
+ * ice_dcb_get_num_tc - Get the number of TCs from DCBX config
+ * @dcbcfg: config to retrieve number of TCs from
+ */
+u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
+{
+ bool tc_unused = false;
+ u8 num_tc = 0;
+ u8 ret = 0;
+ int i;
+
+ /* Scan the ETS Config Priority Table to find traffic classes
+ * enabled and create a bitmask of enabled TCs
+ */
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
+ num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
+
+ /* Scan bitmask for contiguous TCs starting with TC0 */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (num_tc & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ pr_err("Non-contiguous TCs - Disabling DCB\n");
+ return 1;
+ }
+ } else {
+ tc_unused = true;
+ }
+ }
+
+ /* There is always at least 1 TC */
+ if (!ret)
+ ret = 1;
+
+ return ret;
+}
+
+/**
+ * ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
+ * @vsi: VSI owner of rings being updated
+ */
+void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
+{
+ struct ice_ring *tx_ring, *rx_ring;
+ u16 qoffset, qcount;
+ int i, n;
+
+ if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_txq; i++) {
+ tx_ring = vsi->tx_rings[i];
+ tx_ring->dcb_tc = 0;
+ }
+ for (i = 0; i < vsi->num_rxq; i++) {
+ rx_ring = vsi->rx_rings[i];
+ rx_ring->dcb_tc = 0;
+ }
+ return;
+ }
+
+ ice_for_each_traffic_class(n) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(n)))
+ break;
+
+ qoffset = vsi->tc_cfg.tc_info[n].qoffset;
+ qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ tx_ring = vsi->tx_rings[i];
+ rx_ring = vsi->rx_rings[i];
+ tx_ring->dcb_tc = n;
+ rx_ring->dcb_tc = n;
+ }
+ }
+}
+
+/**
+ * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
+ * @pf: pointer to the PF struct
+ *
+ * Assumed caller has already disabled all VSIs before
+ * calling this function. Reconfiguring DCB based on
+ * local_dcbx_cfg.
+ */
+static void ice_pf_dcb_recfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ u8 tc_map = 0;
+ int v, ret;
+
+ /* Update each VSI */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+
+ if (pf->vsi[v]->type == ICE_VSI_PF)
+ tc_map = ice_dcb_get_ena_tc(dcbcfg);
+ else
+ tc_map = ICE_DFLT_TRAFFIC_CLASS;
+
+ ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to config TC for VSI index: %d\n",
+ pf->vsi[v]->idx);
+ else
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ }
+}
+
+/**
+ * ice_pf_dcb_cfg - Apply new DCB configuration
+ * @pf: pointer to the PF struct
+ * @new_cfg: DCBX config to apply
+ */
+static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
+{
+ struct ice_dcbx_cfg *old_cfg, *curr_cfg;
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ int ret = 0;
+
+ curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ /* Enable DCB tagging only when more than one TC */
+ if (ice_dcb_get_num_tc(new_cfg) > 1) {
+ dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ }
+
+ if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
+ dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
+ return ret;
+ }
+
+ /* Store old config in case FW config fails */
+ old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
+ memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
+
+ /* avoid race conditions by holding the lock while disabling and
+ * re-enabling the VSI
+ */
+ rtnl_lock();
+ ice_pf_dis_all_vsi(pf, true);
+
+ memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
+ memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
+
+ /* Only send new config to HW if we are in SW LLDP mode. Otherwise,
+ * the new config came from the HW in the first place.
+ */
+ if (pf->hw.port_info->is_sw_lldp) {
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
+ /* Restore previous settings to local config */
+ memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
+ goto out;
+ }
+ }
+
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ goto out;
+ }
+
+ ice_pf_dcb_recfg(pf);
+
+out:
+ ice_pf_ena_all_vsi(pf, true);
+ rtnl_unlock();
+ devm_kfree(&pf->pdev->dev, old_cfg);
+ return ret;
+}
+
+/**
+ * ice_dcb_rebuild - rebuild DCB post reset
+ * @pf: physical function instance
+ */
+void ice_dcb_rebuild(struct ice_pf *pf)
+{
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ struct ice_dcbx_cfg *prev_cfg;
+ enum ice_status ret;
+ u8 willing;
+
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ goto dcb_error;
+ }
+
+ /* If DCB was not enabled previously, we are done */
+ if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ return;
+
+ /* Save current willing state and force FW to unwilling */
+ willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
+ pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
+ goto dcb_error;
+ }
+
+ /* Retrieve DCB config and ensure same as current in SW */
+ prev_cfg = devm_kmemdup(&pf->pdev->dev,
+ &pf->hw.port_info->local_dcbx_cfg,
+ sizeof(*prev_cfg), GFP_KERNEL);
+ if (!prev_cfg) {
+ dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
+ goto dcb_error;
+ }
+
+ ice_init_dcb(&pf->hw);
+ if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
+ sizeof(*prev_cfg))) {
+ /* difference in cfg detected - disable DCB till next MIB */
+ dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
+ devm_kfree(&pf->pdev->dev, prev_cfg);
+ goto dcb_error;
+ }
+
+ /* fetched config congruent to previous configuration */
+ devm_kfree(&pf->pdev->dev, prev_cfg);
+
+ /* Configuration replayed - reset willing state to previous */
+ pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
+ goto dcb_error;
+ }
+ dev_info(&pf->pdev->dev, "DCB restored after reset\n");
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ goto dcb_error;
+ }
+
+ return;
+
+dcb_error:
+ dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n");
+ prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL);
+ prev_cfg->etscfg.willing = true;
+ prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
+ prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
+ ice_pf_dcb_cfg(pf, prev_cfg);
+ devm_kfree(&pf->pdev->dev, prev_cfg);
+}
+
+/**
+ * ice_dcb_init_cfg - set the initial DCB config in SW
+ * @pf: pf to apply config to
+ */
+static int ice_dcb_init_cfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *newcfg;
+ struct ice_port_info *pi;
+ int ret = 0;
+
+ pi = pf->hw.port_info;
+ newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
+ if (!newcfg)
+ return -ENOMEM;
+
+ memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
+ memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
+
+ dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
+ if (ice_pf_dcb_cfg(pf, newcfg))
+ ret = -EINVAL;
+
+ devm_kfree(&pf->pdev->dev, newcfg);
+
+ return ret;
+}
+
+/**
+ * ice_dcb_sw_default_config - Apply a default DCB config
+ * @pf: pf to apply config to
+ */
+static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
+{
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ struct ice_dcbx_cfg *dcbcfg;
+ struct ice_port_info *pi;
+ struct ice_hw *hw;
+ int ret;
+
+ hw = &pf->hw;
+ pi = hw->port_info;
+ dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL);
+
+ memset(dcbcfg, 0, sizeof(*dcbcfg));
+ memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
+
+ dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.maxtcs = 8;
+ dcbcfg->etscfg.tcbwtable[0] = 100;
+ dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+
+ memcpy(&dcbcfg->etsrec, &dcbcfg->etscfg,
+ sizeof(dcbcfg->etsrec));
+ dcbcfg->etsrec.willing = 0;
+
+ dcbcfg->pfc.willing = 1;
+ dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS;
+
+ dcbcfg->numapps = 1;
+ dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].priority = 3;
+ dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
+
+ ret = ice_pf_dcb_cfg(pf, dcbcfg);
+ devm_kfree(&pf->pdev->dev, dcbcfg);
+ if (ret)
+ return ret;
+
+ return ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
+}
+
+/**
+ * ice_init_pf_dcb - initialize DCB for a PF
+ * @pf: pf to initiialize DCB for
+ */
+int ice_init_pf_dcb(struct ice_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ struct ice_port_info *port_info;
+ struct ice_hw *hw = &pf->hw;
+ int sw_default = 0;
+ int err;
+
+ port_info = hw->port_info;
+
+ /* check if device is DCB capable */
+ if (!hw->func_caps.common_cap.dcb) {
+ dev_dbg(dev, "DCB not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Best effort to put DCBx and LLDP into a good state */
+ port_info->dcbx_status = ice_get_dcbx_status(hw);
+ if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
+ port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
+ bool dcbx_status;
+
+ /* Attempt to start LLDP engine. Ignore errors
+ * as this will error if it is already started
+ */
+ ice_aq_start_lldp(hw, NULL);
+
+ /* Attempt to start DCBX. Ignore errors as this
+ * will error if it is already started
+ */
+ ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
+ }
+
+ err = ice_init_dcb(hw);
+ if (err) {
+ /* FW LLDP not in usable state, default to SW DCBx/LLDP */
+ dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n");
+ hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
+ hw->port_info->is_sw_lldp = true;
+ }
+
+ if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
+ dev_info(&pf->pdev->dev, "DCBX disabled\n");
+
+ /* LLDP disabled in FW */
+ if (port_info->is_sw_lldp) {
+ sw_default = 1;
+ dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
+ }
+
+ if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
+ sw_default = 1;
+ dev_info(&pf->pdev->dev, "DCBX not started\n");
+ }
+
+ if (sw_default) {
+ err = ice_dcb_sw_dflt_cfg(pf);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set local DCB config %d\n", err);
+ err = -EIO;
+ goto dcb_init_err;
+ }
+
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ return 0;
+ }
+
+ /* DCBX in FW and LLDP enabled in FW */
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
+
+ set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+
+ err = ice_dcb_init_cfg(pf);
+ if (err)
+ goto dcb_init_err;
+
+ dev_info(&pf->pdev->dev, "DCBX offload supported\n");
+ return err;
+
+dcb_init_err:
+ dev_err(dev, "DCB init failed\n");
+ return err;
+}
+
+/**
+ * ice_update_dcb_stats - Update DCB stats counters
+ * @pf: PF whose stats needs to be updated
+ */
+void ice_update_dcb_stats(struct ice_pf *pf)
+{
+ struct ice_hw_port_stats *prev_ps, *cur_ps;
+ struct ice_hw *hw = &pf->hw;
+ u8 pf_id = hw->pf_id;
+ int i;
+
+ prev_ps = &pf->stats_prev;
+ cur_ps = &pf->stats;
+
+ for (i = 0; i < 8; i++) {
+ ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xoff_rx[i],
+ &cur_ps->priority_xoff_rx[i]);
+ ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xon_rx[i],
+ &cur_ps->priority_xon_rx[i]);
+ ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xon_tx[i],
+ &cur_ps->priority_xon_tx[i]);
+ ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xoff_tx[i],
+ &cur_ps->priority_xoff_tx[i]);
+ ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xon_2_xoff[i],
+ &cur_ps->priority_xon_2_xoff[i]);
+ }
+}
+
+/**
+ * ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
+ * @tx_ring: ring to send buffer on
+ * @first: pointer to struct ice_tx_buf
+ */
+int
+ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ struct ice_tx_buf *first)
+{
+ struct sk_buff *skb = first->skb;
+
+ if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
+ return 0;
+
+ /* Insert 802.1p priority into VLAN header */
+ if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) ||
+ skb->priority != TC_PRIO_CONTROL) {
+ first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
+ /* Mask the lower 3 bits to set the 802.1p priority */
+ first->tx_flags |= (skb->priority & 0x7) <<
+ ICE_TX_FLAGS_VLAN_PR_S;
+ if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ rc = skb_cow_head(skb, 0);
+ if (rc < 0)
+ return rc;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI = htons(first->tx_flags >>
+ ICE_TX_FLAGS_VLAN_S);
+ } else {
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_dcb_process_lldp_set_mib_change - Process MIB change
+ * @pf: ptr to ice_pf
+ * @event: pointer to the admin queue receive event
+ */
+void
+ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ struct ice_rq_event_info *event)
+{
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ struct ice_dcbx_cfg *dcbcfg, *prev_cfg;
+ int err;
+
+ prev_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg,
+ sizeof(*dcbcfg), GFP_KERNEL);
+ if (!dcbcfg)
+ return;
+
+ err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg);
+ if (!err)
+ ice_pf_dcb_cfg(pf, dcbcfg);
+
+ devm_kfree(&pf->pdev->dev, dcbcfg);
+
+ /* Get updated DCBx data from firmware */
+ err = ice_get_dcb_cfg(pf->hw.port_info);
+ if (err)
+ dev_err(&pf->pdev->dev,
+ "Failed to get DCB config\n");
+ } else {
+ dev_dbg(&pf->pdev->dev,
+ "MIB Change Event in HOST mode\n");
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
new file mode 100644
index 000000000000..ca7b76faa03c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DCB_LIB_H_
+#define _ICE_DCB_LIB_H_
+
+#include "ice.h"
+#include "ice_lib.h"
+
+#ifdef CONFIG_DCB
+#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
+
+void ice_dcb_rebuild(struct ice_pf *pf);
+u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
+u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
+void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
+int ice_init_pf_dcb(struct ice_pf *pf);
+void ice_update_dcb_stats(struct ice_pf *pf);
+int
+ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ struct ice_tx_buf *first);
+void
+ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+static inline void
+ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
+{
+ tlan_ctx->cgd_num = ring->dcb_tc;
+}
+#else
+#define ice_dcb_rebuild(pf) do {} while (0)
+
+static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
+{
+ return ICE_DFLT_TRAFFIC_CLASS;
+}
+
+static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
+{
+ return 1;
+}
+
+static inline int ice_init_pf_dcb(struct ice_pf *pf)
+{
+ dev_dbg(&pf->pdev->dev, "DCB not supported\n");
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
+ struct ice_tx_buf __always_unused *first)
+{
+ return 0;
+}
+
+#define ice_update_dcb_stats(pf) do {} while (0)
+#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
+#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
+#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
+#endif /* CONFIG_DCB */
+#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index eb8d149e317c..1341fde8d53f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4,6 +4,8 @@
/* ethtool support for ice */
#include "ice.h"
+#include "ice_lib.h"
+#include "ice_dcb_lib.h"
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -33,8 +35,14 @@ static int ice_q_stats_len(struct net_device *netdev)
#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
-#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
- ice_q_stats_len(n))
+#define ICE_PFC_STATS_LEN ( \
+ (FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \
+ FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \
+ FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \
+ FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \
+ / sizeof(u64))
+#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
+ ICE_VSI_STATS_LEN + ice_q_stats_len(n))
static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
@@ -126,6 +134,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
+ ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -309,6 +318,22 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx-priority-%u-xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx-priority-%u-xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx-priority-%u-xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx-priority-%u-xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
@@ -382,13 +407,19 @@ static u32 ice_get_priv_flags(struct net_device *netdev)
static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ int ret = 0;
u32 i;
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
return -EINVAL;
+ set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
+
+ bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
const struct ice_priv_flag *priv_flag;
@@ -400,7 +431,79 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
clear_bit(priv_flag->bitno, pf->flags);
}
- return 0;
+ bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
+
+ if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) {
+ if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) {
+ enum ice_status status;
+
+ status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
+ NULL);
+ /* If unregistering for LLDP events fails, this is
+ * not an error state, as there shouldn't be any
+ * events to respond to.
+ */
+ if (status)
+ dev_info(&pf->pdev->dev,
+ "Failed to unreg for LLDP events\n");
+
+ /* The AQ call to stop the FW LLDP agent will generate
+ * an error if the agent is already stopped.
+ */
+ status = ice_aq_stop_lldp(&pf->hw, true, NULL);
+ if (status)
+ dev_warn(&pf->pdev->dev,
+ "Fail to stop LLDP agent\n");
+ /* Use case for having the FW LLDP agent stopped
+ * will likely not need DCB, so failure to init is
+ * not a concern of ethtool
+ */
+ status = ice_init_pf_dcb(pf);
+ if (status)
+ dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
+ } else {
+ enum ice_status status;
+ bool dcbx_agent_status;
+
+ /* AQ command to start FW LLDP agent will return an
+ * error if the agent is already started
+ */
+ status = ice_aq_start_lldp(&pf->hw, NULL);
+ if (status)
+ dev_warn(&pf->pdev->dev,
+ "Fail to start LLDP Agent\n");
+
+ /* AQ command to start FW DCBx agent will fail if
+ * the agent is already started
+ */
+ status = ice_aq_start_stop_dcbx(&pf->hw, true,
+ &dcbx_agent_status,
+ NULL);
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to start FW DCBX\n");
+
+ dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n",
+ dcbx_agent_status ? "ACTIVE" : "DISABLED");
+
+ /* Failure to configure MIB change or init DCB is not
+ * relevant to ethtool. Print notification that
+ * registration/init failed but do not return error
+ * state to ethtool
+ */
+ status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
+ NULL);
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Fail to reg for MIB change\n");
+
+ status = ice_init_pf_dcb(pf);
+ if (status)
+ dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
+ }
+ }
+ clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
+ return ret;
}
static int ice_get_sset_count(struct net_device *netdev, int sset)
@@ -486,6 +589,16 @@ ice_get_ethtool_stats(struct net_device *netdev,
data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+
+ for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+
+ for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
}
/**
@@ -811,7 +924,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
link_info = &vsi->port_info->phy.link_info;
- /* Initialize supported and advertised settings based on phy settings */
+ /* Initialize supported and advertised settings based on PHY settings */
switch (link_info->phy_type_low) {
case ICE_PHY_TYPE_LOW_100BASE_TX:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
@@ -921,6 +1034,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
25000baseCR_Full);
break;
case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
break;
@@ -1137,10 +1251,10 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
*/
static void
ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
- struct net_device __always_unused *netdev)
+ struct net_device *netdev)
{
/* link is down and the driver needs to fall back on
- * supported phy types to figure out what info to display
+ * supported PHY types to figure out what info to display
*/
ice_phy_type_to_ethtool(netdev, ks);
@@ -1156,8 +1270,9 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
*
* Reports speed/duplex settings based on media_type
*/
-static int ice_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
+static int
+ice_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
@@ -1349,7 +1464,7 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
} else {
/* If autoneg is currently enabled */
if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
- /* If autoneg is supported 10GBASE_T is the only phy
+ /* If autoneg is supported 10GBASE_T is the only PHY
* that can disable it, so otherwise return error
*/
if (ethtool_link_ksettings_test_link_mode(ks,
@@ -1399,14 +1514,13 @@ ice_set_link_ksettings(struct net_device *netdev,
if (!p)
return -EOPNOTSUPP;
- /* Check if this is lan vsi */
- for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) {
+ /* Check if this is LAN VSI */
+ ice_for_each_vsi(pf, idx)
if (pf->vsi[idx]->type == ICE_VSI_PF) {
if (np->vsi != pf->vsi[idx])
return -EOPNOTSUPP;
break;
}
- }
if (p->phy.media_type != ICE_MEDIA_BASET &&
p->phy.media_type != ICE_MEDIA_FIBER &&
@@ -1464,7 +1578,7 @@ ice_set_link_ksettings(struct net_device *netdev,
if (!abilities)
return -ENOMEM;
- /* Get the current phy config */
+ /* Get the current PHY config */
status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
NULL);
if (status) {
@@ -1559,15 +1673,16 @@ done:
}
/**
- * ice_get_rxnfc - command to get RX flow classification rules
+ * ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
* @rule_locs: buffer to rturn Rx flow classification rules
*
* Returns Success if the command is supported.
*/
-static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+static int
+ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 __always_unused *rule_locs)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -1821,18 +1936,21 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_vsi *vsi = np->vsi;
+ struct ice_dcbx_cfg *dcbx_cfg;
enum ice_status status;
/* Initialize pause params */
pause->rx_pause = 0;
pause->tx_pause = 0;
+ dcbx_cfg = &pi->local_dcbx_cfg;
+
pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
return;
- /* Get current phy config */
+ /* Get current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status)
@@ -1841,6 +1959,10 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
+ if (dcbx_cfg->pfc.pfcena)
+ /* PFC enabled so report LFC as off */
+ goto out;
+
if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
pause->tx_pause = 1;
if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
@@ -1861,6 +1983,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_pf *pf = np->vsi->back;
+ struct ice_dcbx_cfg *dcbx_cfg;
struct ice_vsi *vsi = np->vsi;
struct ice_hw *hw = &pf->hw;
struct ice_port_info *pi;
@@ -1871,6 +1994,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
+ dcbx_cfg = &pi->local_dcbx_cfg;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* Changing the port's flow control is not supported if this isn't the
@@ -1893,6 +2017,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
+ if (dcbx_cfg->pfc.pfcena) {
+ netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
if (pause->rx_pause && pause->tx_pause)
pi->fc.req_mode = ICE_FC_FULL;
else if (pause->rx_pause && !pause->tx_pause)
@@ -2021,11 +2149,12 @@ out:
* @key: hash key
* @hfunc: hash function
*
- * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue ID, otherwise
* returns 0 after programming the table.
*/
-static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int
+ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
+ const u8 hfunc)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2087,7 +2216,7 @@ enum ice_container_type {
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
- * @c_type: container type, RX or TX
+ * @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
@@ -2100,12 +2229,18 @@ static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
struct ice_ring_container *rc)
{
- struct ice_pf *pf = rc->ring->vsi->back;
+ struct ice_pf *pf;
+
+ if (!rc->ring)
+ return -EINVAL;
+
+ pf = rc->ring->vsi->back;
switch (c_type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
break;
case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
@@ -2120,49 +2255,60 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
}
/**
+ * ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings
+ * @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ *
+ * Return 0 on success, and negative under the following conditions:
+ * 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed.
+ * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
+ */
+static int
+ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
+{
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx))
+ return -EINVAL;
+ if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx))
+ return -EINVAL;
+ } else if (q_num < vsi->num_rxq) {
+ if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx))
+ return -EINVAL;
+ } else if (q_num < vsi->num_txq) {
+ if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* __ice_get_coalesce - get ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
* @q_num: queue number to get the coalesce settings for
+ *
+ * If the caller passes in a negative q_num then we return coalesce settings
+ * based on queue number 0, else use the actual q_num passed in.
*/
static int
__ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- int tx = -EINVAL, rx = -EINVAL;
struct ice_vsi *vsi = np->vsi;
- if (q_num < 0) {
- rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
- &vsi->rx_rings[0]->q_vector->rx);
- tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
- &vsi->tx_rings[0]->q_vector->tx);
-
- goto update_coalesced_frames;
- }
-
- if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
- &vsi->rx_rings[q_num]->q_vector->rx);
- tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
- &vsi->tx_rings[q_num]->q_vector->tx);
- } else if (q_num < vsi->num_rxq) {
- rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
- &vsi->rx_rings[q_num]->q_vector->rx);
- } else if (q_num < vsi->num_txq) {
- tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
- &vsi->tx_rings[q_num]->q_vector->tx);
- } else {
- /* q_num is invalid for both Rx and Tx queues */
- return -EINVAL;
- }
+ if (q_num < 0)
+ q_num = 0;
-update_coalesced_frames:
- /* either q_num is invalid for both Rx and Tx queues or setting coalesce
- * failed completely
- */
- if (tx && rx)
+ if (ice_get_q_coalesce(vsi, ec, q_num))
return -EINVAL;
if (q_num < vsi->num_txq)
@@ -2180,15 +2326,16 @@ ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_get_coalesce(netdev, ec, -1);
}
-static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
- struct ethtool_coalesce *ec)
+static int
+ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
{
return __ice_get_coalesce(netdev, ec, q_num);
}
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
- * @c_type: container type, RX or TX
+ * @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container
@@ -2213,6 +2360,23 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
switch (c_type) {
case ICE_RX_CONTAINER:
+ if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
+ (ec->rx_coalesce_usecs_high &&
+ ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
+ netdev_info(vsi->netdev,
+ "Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n",
+ pf->hw.intrl_gran, ICE_MAX_INTRL);
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
+ rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
+ wr32(&pf->hw, GLINT_RATE(vsi->hw_base_vector +
+ rc->ring->q_vector->v_idx),
+ ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high,
+ pf->hw.intrl_gran));
+ }
+
if (ec->rx_coalesce_usecs != itr_setting &&
ec->use_adaptive_rx_coalesce) {
netdev_info(vsi->netdev,
@@ -2235,6 +2399,12 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
}
break;
case ICE_TX_CONTAINER:
+ if (ec->tx_coalesce_usecs_high) {
+ netdev_info(vsi->netdev,
+ "setting tx-usecs-high is not supported\n");
+ return -EINVAL;
+ }
+
if (ec->tx_coalesce_usecs != itr_setting &&
ec->use_adaptive_tx_coalesce) {
netdev_info(vsi->netdev,
@@ -2264,54 +2434,77 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
return 0;
}
+/**
+ * ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings
+ * @vsi: VSI associated to the queue that need updating
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ *
+ * Return 0 on success, and negative under the following conditions:
+ * 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed.
+ * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
+ */
+static int
+ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
+{
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi))
+ return -EINVAL;
+
+ if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi))
+ return -EINVAL;
+ } else if (q_num < vsi->num_rxq) {
+ if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi))
+ return -EINVAL;
+ } else if (q_num < vsi->num_txq) {
+ if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __ice_set_coalesce - set ITR/INTRL values for the device
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ * @q_num: queue number to get the coalesce settings for
+ *
+ * If the caller passes in a negative q_num then we set the coalesce settings
+ * for all Tx/Rx queues, else use the actual q_num passed in.
+ */
static int
__ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- int rx = -EINVAL, tx = -EINVAL;
struct ice_vsi *vsi = np->vsi;
if (q_num < 0) {
int i;
ice_for_each_q_vector(vsi, i) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
- &q_vector->rx, vsi) ||
- ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
- &q_vector->tx, vsi))
+ if (ice_set_q_coalesce(vsi, ec, i))
return -EINVAL;
}
-
goto set_work_lmt;
}
- if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
- &vsi->rx_rings[q_num]->q_vector->rx,
- vsi);
- tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
- &vsi->tx_rings[q_num]->q_vector->tx,
- vsi);
- } else if (q_num < vsi->num_rxq) {
- rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
- &vsi->rx_rings[q_num]->q_vector->rx,
- vsi);
- } else if (q_num < vsi->num_txq) {
- tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
- &vsi->tx_rings[q_num]->q_vector->tx,
- vsi);
- }
-
- /* either q_num is invalid for both Rx and Tx queues or setting coalesce
- * failed completely
- */
- if (rx && tx)
+ if (ice_set_q_coalesce(vsi, ec, q_num))
return -EINVAL;
set_work_lmt:
+
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq,
ec->rx_max_coalesced_frames_irq);
@@ -2325,8 +2518,9 @@ ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_set_coalesce(netdev, ec, -1);
}
-static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
- struct ethtool_coalesce *ec)
+static int
+ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
{
return __ice_set_coalesce(netdev, ec, q_num);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6bf5cc064270..ec25f26069b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -49,6 +49,9 @@
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
+#define PRTDCB_GENS 0x00083020
+#define PRTDCB_GENS_DCBX_STATUS_S 0
+#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
@@ -106,6 +109,16 @@
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
#define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400
+#define GLINT_CTL 0x0016CC54
+#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
+#define GLINT_CTL_ITR_GRAN_200_S 16
+#define GLINT_CTL_ITR_GRAN_200_M ICE_M(0xF, 16)
+#define GLINT_CTL_ITR_GRAN_100_S 20
+#define GLINT_CTL_ITR_GRAN_100_M ICE_M(0xF, 20)
+#define GLINT_CTL_ITR_GRAN_50_S 24
+#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
+#define GLINT_CTL_ITR_GRAN_25_S 28
+#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_M BIT(0)
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
@@ -150,11 +163,15 @@
#define PFINT_OICR_ENA 0x0016C900
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
+#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define QINT_RQCTL_ITR_INDX_S 11
+#define QINT_RQCTL_ITR_INDX_M ICE_M(0x3, 11)
#define QINT_RQCTL_CAUSE_ENA_M BIT(30)
#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
#define QINT_TQCTL_MSIX_INDX_S 0
+#define QINT_TQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define QINT_TQCTL_ITR_INDX_S 11
+#define QINT_TQCTL_ITR_INDX_M ICE_M(0x3, 11)
#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
#define VPINT_ALLOC_FIRST_S 0
@@ -168,6 +185,8 @@
#define VPINT_ALLOC_PCI_LAST_S 12
#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
+#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
+#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
@@ -306,11 +325,16 @@
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
+#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64))
+#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64))
+#define GLPRT_PXONRXC(_i, _j) (0x00380300 + ((_i) * 8 + (_j) * 64))
+#define GLPRT_PXONTXC(_i, _j) (0x00380D40 + ((_i) * 8 + (_j) * 64))
#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
+#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64))
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index ef4c79b5aa32..510a8c900e61 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -20,7 +20,7 @@ union ice_32byte_rx_desc {
} lo_dword;
union {
__le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow Director filter id */
+ __le32 fd_id; /* Flow Director filter ID */
} hi_dword;
} qword0;
struct {
@@ -99,7 +99,7 @@ enum ice_rx_ptype_payload_layer {
ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
-/* RX Flex Descriptor
+/* Rx Flex Descriptor
* This descriptor is used instead of the legacy version descriptor when
* ice_rlan_ctx.adv_desc is set
*/
@@ -113,7 +113,7 @@ union ice_32b_rx_flex_desc {
} read;
struct {
/* Qword 0 */
- u8 rxdid; /* descriptor builder profile id */
+ u8 rxdid; /* descriptor builder profile ID */
u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
__le16 pkt_len; /* [15:14] are reserved */
@@ -149,7 +149,7 @@ union ice_32b_rx_flex_desc {
/* Rx Flex Descriptor NIC Profile
* This descriptor corresponds to RxDID 2 which contains
- * metadata fields for RSS, flow id and timestamp info
+ * metadata fields for RSS, flow ID and timestamp info
*/
struct ice_32b_rx_flex_desc_nic {
/* Qword 0 */
@@ -208,23 +208,23 @@ enum ice_flex_rx_mdid {
ICE_RX_MDID_HASH_HIGH,
};
-/* Rx Flag64 packet flag bits */
-enum ice_rx_flg64_bits {
- ICE_RXFLG_PKT_DSI = 0,
- ICE_RXFLG_EVLAN_x8100 = 15,
- ICE_RXFLG_EVLAN_x9100,
- ICE_RXFLG_VLAN_x8100,
- ICE_RXFLG_TNL_MAC = 22,
- ICE_RXFLG_TNL_VLAN,
- ICE_RXFLG_PKT_FRG,
- ICE_RXFLG_FIN = 32,
- ICE_RXFLG_SYN,
- ICE_RXFLG_RST,
- ICE_RXFLG_TNL0 = 38,
- ICE_RXFLG_TNL1,
- ICE_RXFLG_TNL2,
- ICE_RXFLG_UDP_GRE,
- ICE_RXFLG_RSVD = 63
+/* Rx/Tx Flag64 packet flag bits */
+enum ice_flg64_bits {
+ ICE_FLG_PKT_DSI = 0,
+ ICE_FLG_EVLAN_x8100 = 15,
+ ICE_FLG_EVLAN_x9100,
+ ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_MAC = 22,
+ ICE_FLG_TNL_VLAN,
+ ICE_FLG_PKT_FRG,
+ ICE_FLG_FIN = 32,
+ ICE_FLG_SYN,
+ ICE_FLG_RST,
+ ICE_FLG_TNL0 = 38,
+ ICE_FLG_TNL1,
+ ICE_FLG_TNL2,
+ ICE_FLG_UDP_GRE,
+ ICE_FLG_RSVD = 63
};
/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
@@ -322,7 +322,7 @@ enum ice_rlan_ctx_rx_hsplit_1 {
ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
};
-/* TX Descriptor */
+/* Tx Descriptor */
struct ice_tx_desc {
__le64 buf_addr; /* Address of descriptor's data buf */
__le64 cmd_type_offset_bsz;
@@ -342,12 +342,12 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_EOP = 0x0001,
ICE_TX_DESC_CMD_RS = 0x0002,
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
- ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
- ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
- ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
+ ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
+ ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
+ ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
+ ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
+ ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
};
#define ICE_TXD_QW1_OFFSET_S 16
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index fa61203bee26..fbf1eba0cc2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_dcb_lib.h"
/**
* ice_setup_rx_ctx - Configure a receive ring context
@@ -73,7 +74,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
- /* increasing context priority to pick up profile id;
+ /* increasing context priority to pick up profile ID;
* default is 0x01; setting to 0x03 to ensure profile
* is programming if prev context is of same priority
*/
@@ -124,6 +125,8 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;
+ ice_set_cgd_num(tlan_ctx, ring);
+
/* PF number */
tlan_ctx->pf_num = hw->pf_id;
@@ -138,7 +141,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
- /* Firmware expects vmvf_num to be absolute VF id */
+ /* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
@@ -175,17 +178,14 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
int i;
for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
- u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
-
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- break;
+ if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
+ QRX_CTRL_QENA_STAT_M))
+ return 0;
usleep_range(20, 40);
}
- if (i >= ICE_Q_WAIT_MAX_RETRY)
- return -ETIMEDOUT;
- return 0;
+ return -ETIMEDOUT;
}
/**
@@ -197,19 +197,13 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int i, j, ret = 0;
+ int i, ret = 0;
for (i = 0; i < vsi->num_rxq; i++) {
int pf_q = vsi->rxq_map[i];
u32 rx_reg;
- for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
- if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
- ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
- break;
- usleep_range(1000, 2000);
- }
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
/* Skip if the queue is already in the requested state */
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
@@ -238,12 +232,11 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
/**
* ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
* @vsi: VSI pointer
- * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
*
* On error: returns error code (negative)
* On success: returns 0
*/
-static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
+static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
@@ -258,15 +251,11 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
if (!vsi->rx_rings)
goto err_rxrings;
- if (alloc_qvectors) {
- /* allocate memory for q_vector pointers */
- vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
- vsi->num_q_vectors,
- sizeof(*vsi->q_vectors),
- GFP_KERNEL);
- if (!vsi->q_vectors)
- goto err_vectors;
- }
+ /* allocate memory for q_vector pointers */
+ vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
+ sizeof(*vsi->q_vectors), GFP_KERNEL);
+ if (!vsi->q_vectors)
+ goto err_vectors;
return 0;
@@ -279,25 +268,49 @@ err_txrings:
}
/**
- * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
+ * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
* @vsi: the VSI being configured
+ */
+static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
+{
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
+ vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
+ break;
+ default:
+ dev_dbg(&vsi->back->pdev->dev,
+ "Not setting number of Tx/Rx descriptors for VSI type %d\n",
+ vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
+ * @vsi: the VSI being configured
+ * @vf_id: ID of the VF being configured
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
{
struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = NULL;
+
+ if (vsi->type == ICE_VSI_VF)
+ vsi->vf_id = vf_id;
switch (vsi->type) {
case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx;
vsi->alloc_rxq = pf->num_lan_rx;
- vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
case ICE_VSI_VF:
- vsi->alloc_txq = pf->num_vf_qps;
- vsi->alloc_rxq = pf->num_vf_qps;
+ vf = &pf->vf[vsi->vf_id];
+ vsi->alloc_txq = vf->num_vf_qs;
+ vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 from the original vector
@@ -306,10 +319,11 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->num_q_vectors = pf->num_vf_msix - 1;
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
break;
}
+
+ ice_vsi_set_num_desc(vsi);
}
/**
@@ -370,16 +384,15 @@ void ice_vsi_delete(struct ice_vsi *vsi)
}
/**
- * ice_vsi_free_arrays - clean up VSI resources
+ * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
* @vsi: pointer to VSI being cleared
- * @free_qvectors: bool to specify if q_vectors should be deallocated
*/
-static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
+static void ice_vsi_free_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
/* free the ring and vector containers */
- if (free_qvectors && vsi->q_vectors) {
+ if (vsi->q_vectors) {
devm_kfree(&pf->pdev->dev, vsi->q_vectors);
vsi->q_vectors = NULL;
}
@@ -427,7 +440,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
if (vsi->idx < pf->next_vsi)
pf->next_vsi = vsi->idx;
- ice_vsi_free_arrays(vsi, true);
+ ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
devm_kfree(&pf->pdev->dev, vsi);
@@ -455,10 +468,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @type: type of VSI
+ * @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
+static struct ice_vsi *
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{
struct ice_vsi *vsi = NULL;
@@ -484,18 +499,21 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
vsi->idx = pf->next_vsi;
vsi->work_lmt = ICE_DFLT_IRQ_WORK;
- ice_vsi_set_num_qs(vsi);
+ if (type == ICE_VSI_VF)
+ ice_vsi_set_num_qs(vsi, vf_id);
+ else
+ ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) {
case ICE_VSI_PF:
- if (ice_vsi_alloc_arrays(vsi, true))
+ if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
case ICE_VSI_VF:
- if (ice_vsi_alloc_arrays(vsi, true))
+ if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
default:
@@ -547,7 +565,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
/**
* __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
*
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
@@ -579,11 +597,10 @@ err_scatter:
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
*
- * This is an internal function for assigning queues from the PF to VSI and
- * initially tries to find contiguous space. If it is not successful to find
- * contiguous space, then it tries with the scatter approach.
+ * This function first tries to find contiguous space. If it is not successful,
+ * it tries with the scatter approach.
*
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
@@ -827,7 +844,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount_rx);
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
@@ -852,7 +869,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
- vsi->num_rxq = offset;
+
+ /* if offset is non-zero, means it is calculated correctly based on
+ * enabled TCs for a given VSI otherwise qcount_rx will always
+ * be correct and non-zero because it is based off - VSI's
+ * allocated Rx queues which is at least 1 (hence qcount_tx will be
+ * at least 1)
+ */
+ if (offset)
+ vsi->num_rxq = offset;
+ else
+ vsi->num_rxq = qcount_rx;
+
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
@@ -881,6 +909,9 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
u8 lut_type, hash_type;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
switch (vsi->type) {
case ICE_VSI_PF:
@@ -894,8 +925,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
return;
}
@@ -923,6 +953,7 @@ static int ice_vsi_init(struct ice_vsi *vsi)
if (!ctxt)
return -ENOMEM;
+ ctxt->info = vsi->info;
switch (vsi->type) {
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
@@ -948,6 +979,14 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, ctxt);
+ /* Enable MAC Antispoof with new VSI being initialized or updated */
+ if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ }
+
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -973,10 +1012,11 @@ static int ice_vsi_init(struct ice_vsi *vsi)
static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_q_vector *q_vector;
+ struct ice_pf *pf = vsi->back;
struct ice_ring *ring;
if (!vsi->q_vectors[v_idx]) {
- dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
+ dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
v_idx);
return;
}
@@ -991,7 +1031,7 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
if (vsi->netdev)
netif_napi_del(&q_vector->napi);
- devm_kfree(&vsi->back->pdev->dev, q_vector);
+ devm_kfree(&pf->pdev->dev, q_vector);
vsi->q_vectors[v_idx] = NULL;
}
@@ -1003,7 +1043,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
{
int v_idx;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_for_each_q_vector(vsi, v_idx)
ice_free_q_vector(vsi, v_idx);
}
@@ -1143,8 +1183,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors, vsi->idx);
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
break;
}
@@ -1153,7 +1192,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
"Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
if (vsi->type != ICE_VSI_VF) {
- ice_free_res(vsi->back->sw_irq_tracker,
+ ice_free_res(pf->sw_irq_tracker,
vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += num_q_vectors;
}
@@ -1215,7 +1254,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->ring_active = false;
ring->vsi = vsi;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_tx_desc;
vsi->tx_rings[i] = ring;
}
@@ -1234,7 +1273,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_rx_desc;
vsi->rx_rings[i] = ring;
}
@@ -1253,7 +1292,11 @@ err_out:
* through the MSI-X enabling code. On a constrained vector budget, we map Tx
* and Rx rings to the vector as "efficiently" as possible.
*/
+#ifdef CONFIG_DCB
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+#else
static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+#endif /* CONFIG_DCB */
{
int q_vectors = vsi->num_q_vectors;
int tx_rings_rem, rx_rings_rem;
@@ -1339,7 +1382,6 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
*/
static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
- u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
struct ice_aqc_get_set_rss_keys *key;
struct ice_pf *pf = vsi->back;
enum ice_status status;
@@ -1361,31 +1403,30 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size);
if (status) {
- dev_err(&vsi->back->pdev->dev,
+ dev_err(&pf->pdev->dev,
"set_rss_lut failed, error %d\n", status);
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
- key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
+ key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto ice_vsi_cfg_rss_exit;
}
if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ memcpy(key,
+ (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
+ ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
else
- netdev_rss_key_fill((void *)seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
- memcpy(&key->standard_rss_key, seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ netdev_rss_key_fill((void *)key,
+ ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
- dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
+ dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
status);
err = -EIO;
}
@@ -1397,12 +1438,12 @@ ice_vsi_cfg_rss_exit:
}
/**
- * ice_add_mac_to_list - Add a mac address filter entry to the list
+ * ice_add_mac_to_list - Add a MAC address filter entry to the list
* @vsi: the VSI to be forwarded to
* @add_list: pointer to the list which contains MAC filter entries
* @macaddr: the MAC address to be added.
*
- * Adds mac address filter entry to the temp list
+ * Adds MAC address filter entry to the temp list
*
* Returns 0 on success or ENOMEM on failure.
*/
@@ -1504,7 +1545,7 @@ void ice_free_fltr_list(struct device *dev, struct list_head *h)
/**
* ice_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be added
+ * @vid: VLAN ID to be added
*/
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
{
@@ -1542,7 +1583,7 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
/**
* ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be removed
+ * @vid: VLAN ID to be removed
*
* Returns 0 on success and negative on failure
*/
@@ -1551,7 +1592,8 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
- int status = 0;
+ enum ice_status status;
+ int err = 0;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
if (!list)
@@ -1567,14 +1609,20 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
- if (ice_remove_vlan(&pf->hw, &tmp_add_list)) {
- dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
- vid, vsi->vsi_num);
- status = -EIO;
+ status = ice_remove_vlan(&pf->hw, &tmp_add_list);
+ if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&pf->pdev->dev,
+ "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
+ vid, vsi->vsi_num, status);
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "Error removing VLAN %d on vsi %i error: %d\n",
+ vid, vsi->vsi_num, status);
+ err = -EIO;
}
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
- return status;
+ return err;
}
/**
@@ -1586,7 +1634,6 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
*/
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
- int err = 0;
u16 i;
if (vsi->type == ICE_VSI_VF)
@@ -1601,14 +1648,19 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
vsi->rx_buf_len = ICE_RXBUF_2048;
setup_rings:
/* set up individual rings */
- for (i = 0; i < vsi->num_rxq && !err; i++)
- err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+ for (i = 0; i < vsi->num_rxq; i++) {
+ int err;
- if (err) {
- dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
- return -EIO;
+ err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+ i, err);
+ return err;
+ }
}
- return err;
+
+ return 0;
}
/**
@@ -1640,7 +1692,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
num_q_grps = 1;
/* set up and configure the Tx queues for each enabled TC */
- for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
+ ice_for_each_traffic_class(tc) {
if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
break;
@@ -1660,10 +1712,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- num_q_grps, qg_buf, buf_len,
- NULL);
+ i, num_q_grps, qg_buf,
+ buf_len, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev,
+ dev_err(&pf->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
err = -ENODEV;
@@ -1707,7 +1759,7 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
* This function converts a decimal interrupt rate limit in usecs to the format
* expected by firmware.
*/
-static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
u32 val = intrl / gran;
@@ -1717,17 +1769,49 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
}
/**
+ * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
+ * @hw: board specific structure
+ */
+static void ice_cfg_itr_gran(struct ice_hw *hw)
+{
+ u32 regval = rd32(hw, GLINT_CTL);
+
+ /* no need to update global register if ITR gran is already set */
+ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
+ (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
+ GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
+ GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
+ GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
+ GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ return;
+
+ regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
+ GLINT_CTL_ITR_GRAN_200_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
+ GLINT_CTL_ITR_GRAN_100_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
+ GLINT_CTL_ITR_GRAN_50_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
+ GLINT_CTL_ITR_GRAN_25_M);
+ wr32(hw, GLINT_CTL, regval);
+}
+
+/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
* @q_vector: interrupt vector that's being configured
- * @vector: HW vector index to apply the interrupt throttling to
*
* Configure interrupt throttling values for the ring containers that are
* associated with the interrupt vector passed in.
*/
static void
-ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
+ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
{
+ ice_cfg_itr_gran(hw);
+
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
@@ -1738,8 +1822,7 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
- rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
@@ -1753,8 +1836,7 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
- rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
}
@@ -1766,17 +1848,17 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- u16 vector = vsi->hw_base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0, rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
+ u16 reg_idx = q_vector->reg_idx;
- ice_cfg_itr(hw, q_vector, vector);
+ ice_cfg_itr(hw, q_vector);
- wr32(hw, GLINT_RATE(vector),
+ wr32(hw, GLINT_RATE(reg_idx),
ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
/* Both Transmit Queue Interrupt Cause Control register
@@ -1791,33 +1873,37 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
* tracked for this PF.
*/
for (q = 0; q < q_vector->num_ring_tx; q++) {
- int itr_idx = q_vector->tx.itr_idx;
+ int itr_idx = (q_vector->tx.itr_idx <<
+ QINT_TQCTL_ITR_INDX_S) &
+ QINT_TQCTL_ITR_INDX_M;
u32 val;
if (vsi->type == ICE_VSI_VF)
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_TQCTL_ITR_INDX_S) |
- ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ (((i + 1) << QINT_TQCTL_MSIX_INDX_S) &
+ QINT_TQCTL_MSIX_INDX_M);
else
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_TQCTL_ITR_INDX_S) |
- (vector << QINT_TQCTL_MSIX_INDX_S);
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ ((reg_idx << QINT_TQCTL_MSIX_INDX_S) &
+ QINT_TQCTL_MSIX_INDX_M);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
txq++;
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
- int itr_idx = q_vector->rx.itr_idx;
+ int itr_idx = (q_vector->rx.itr_idx <<
+ QINT_RQCTL_ITR_INDX_S) &
+ QINT_RQCTL_ITR_INDX_M;
u32 val;
if (vsi->type == ICE_VSI_VF)
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_RQCTL_ITR_INDX_S) |
- ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ (((i + 1) << QINT_RQCTL_MSIX_INDX_S) &
+ QINT_RQCTL_MSIX_INDX_M);
else
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_RQCTL_ITR_INDX_S) |
- (vector << QINT_RQCTL_MSIX_INDX_S);
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ ((reg_idx << QINT_RQCTL_MSIX_INDX_S) &
+ QINT_RQCTL_MSIX_INDX_M);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq++;
}
@@ -1848,6 +1934,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
*/
ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
+ /* Preserve existing VLAN strip setting */
+ ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
+ ICE_AQ_VSI_VLAN_EMOD_M);
+
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
@@ -1937,7 +2027,7 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
- * @rel_vmvf_num: Relative id of VF/VM
+ * @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped
* @offset: offset within vsi->txq_map
*/
@@ -1947,10 +2037,10 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
+ int tc, q_idx = 0, err = 0;
+ u16 *q_ids, *q_handles, i;
enum ice_status status;
u32 *q_teids, val;
- u16 *q_ids, i;
- int err = 0;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
@@ -1967,50 +2057,69 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
goto err_alloc_q_ids;
}
- /* set up the Tx queue list to be disabled */
- ice_for_each_txq(vsi, i) {
- u16 v_idx;
+ q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
+ sizeof(*q_handles), GFP_KERNEL);
+ if (!q_handles) {
+ err = -ENOMEM;
+ goto err_alloc_q_handles;
+ }
- if (!rings || !rings[i] || !rings[i]->q_vector) {
- err = -EINVAL;
- goto err_out;
- }
+ /* set up the Tx queue list to be disabled for each enabled TC */
+ ice_for_each_traffic_class(tc) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
+ break;
- q_ids[i] = vsi->txq_map[i + offset];
- q_teids[i] = rings[i]->txq_teid;
+ for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
+ if (!rings || !rings[q_idx] ||
+ !rings[q_idx]->q_vector) {
+ err = -EINVAL;
+ goto err_out;
+ }
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
+ q_ids[i] = vsi->txq_map[q_idx + offset];
+ q_teids[i] = rings[q_idx]->txq_teid;
+ q_handles[i] = i;
- /* software is expected to wait for 100 ns */
- ndelay(100);
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
- /* trigger a software interrupt for the vector associated to
- * the queue to schedule NAPI handler
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector
+ * associated to the queue to schedule NAPI handler
+ */
+ wr32(hw, GLINT_DYN_CTL(rings[i]->q_vector->reg_idx),
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_MSK_M);
+ q_idx++;
+ }
+ status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc,
+ vsi->num_txq, q_handles, q_ids,
+ q_teids, rst_src, rel_vmvf_num, NULL);
+
+ /* if the disable queue command was exercised during an active
+ * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not
+ * an error as the reset operation disables queues at the
+ * hardware level anyway.
*/
- v_idx = rings[i]->q_vector->v_idx;
- wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
- GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
- }
- status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
- rst_src, rel_vmvf_num, NULL);
- /* if the disable queue command was exercised during an active reset
- * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
- * the reset operation disables queues at the hardware level anyway.
- */
- if (status == ICE_ERR_RESET_ONGOING) {
- dev_info(&pf->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n",
- status);
- err = -ENODEV;
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&pf->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ err = -ENODEV;
+ }
}
err_out:
+ devm_kfree(&pf->pdev->dev, q_handles);
+
+err_alloc_q_handles:
devm_kfree(&pf->pdev->dev, q_ids);
err_alloc_q_ids:
@@ -2023,10 +2132,11 @@ err_alloc_q_ids:
* ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
- * @rel_vmvf_num: Relative id of VF/VM
+ * @rel_vmvf_num: Relative ID of VF/VM
*/
-int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
- enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
+int
+ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num)
{
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
0);
@@ -2036,19 +2146,22 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
+ * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
*
* returns 0 if VSI is updated, negative otherwise
*/
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
{
struct ice_vsi_ctx *ctxt;
struct device *dev;
+ struct ice_pf *pf;
int status;
if (!vsi)
return -EINVAL;
- dev = &vsi->back->pdev->dev;
+ pf = vsi->back;
+ dev = &pf->pdev->dev;
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -2067,14 +2180,16 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
+ if (!vlan_promisc)
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- vsi->back->hw.adminq.sq_last_status);
+ pf->hw.adminq.sq_last_status);
goto err_out;
}
@@ -2089,12 +2204,98 @@ err_out:
return -EIO;
}
+static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
+
+ vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
+ vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+}
+
+/**
+ * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
+ * @vsi: VSI to set the q_vectors register index on
+ */
+static int
+ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
+{
+ u16 i;
+
+ if (!vsi || !vsi->q_vectors)
+ return -EINVAL;
+
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ if (!q_vector) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set reg_idx on q_vector %d VSI %d\n",
+ i, vsi->vsi_num);
+ goto clear_reg_idx;
+ }
+
+ q_vector->reg_idx = q_vector->v_idx + vsi->hw_base_vector;
+ }
+
+ return 0;
+
+clear_reg_idx:
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ if (q_vector)
+ q_vector->reg_idx = 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
+ * @vsi: the VSI being configured
+ * @add_rule: boolean value to add or remove ethertype filter rule
+ */
+static void
+ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
+{
+ struct ice_fltr_list_entry *list;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+
+ list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return;
+
+ list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ list->fltr_info.fltr_act = ICE_DROP_PACKET;
+ list->fltr_info.flag = ICE_FLTR_TX;
+ list->fltr_info.src_id = ICE_SRC_ID_VSI;
+ list->fltr_info.vsi_handle = vsi->idx;
+ list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
+
+ INIT_LIST_HEAD(&list->list_entry);
+ list_add(&list->list_entry, &tmp_add_list);
+
+ if (add_rule)
+ status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
+ else
+ status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
+
+ if (status)
+ dev_err(&pf->pdev->dev,
+ "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
+ vsi->vsi_num, status);
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+}
+
/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
* @type: VSI type
- * @vf_id: defines VF id to which this VSI connects. This field is meant to be
+ * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
*
@@ -2112,7 +2313,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- vsi = ice_vsi_alloc(pf, type);
+ if (type == ICE_VSI_VF)
+ vsi = ice_vsi_alloc(pf, type, vf_id);
+ else
+ vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
+
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
return NULL;
@@ -2120,6 +2325,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
vsi->port_info = pi;
vsi->vsw = pf->first_sw;
+ if (vsi->type == ICE_VSI_PF)
+ vsi->ethtype = ETH_P_PAUSE;
+
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
@@ -2132,7 +2340,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
- /* set tc configuration */
+ /* set TC configuration */
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
@@ -2150,6 +2358,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_alloc_q_vector;
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto unroll_vector_base;
@@ -2188,6 +2400,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
} else {
vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
}
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
pf->q_left_tx -= vsi->alloc_txq;
pf->q_left_rx -= vsi->alloc_rxq;
break;
@@ -2203,18 +2419,29 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (ret) {
- dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
+ dev_err(&pf->pdev->dev,
+ "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
goto unroll_vector_base;
}
+ /* Add switch rule to drop all Tx Flow Control Frames, of look up
+ * type ETHERTYPE from VSIs, and restrict malicious VF from sending
+ * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
+ * The rule is added once for PF VSI in order to create appropriate
+ * recipe, since VSI/VSI list is ignored with drop action...
+ */
+ if (vsi->type == ICE_VSI_PF)
+ ice_vsi_add_rem_eth_mac(vsi, true);
+
return vsi;
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
/* reclaim HW interrupt back to the common pool */
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
pf->num_avail_hw_msix += vsi->num_q_vectors;
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
@@ -2281,7 +2508,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
u16 vector = i + base;
int irq_num;
@@ -2500,12 +2727,12 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
/* disable each interrupt */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- for (i = vsi->hw_base_vector;
- i < (vsi->num_q_vectors + vsi->hw_base_vector); i++)
- wr32(hw, GLINT_DYN_CTL(i), 0);
+ ice_for_each_q_vector(vsi, i)
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
ice_flush(hw);
- for (i = 0; i < vsi->num_q_vectors; i++)
+
+ ice_for_each_q_vector(vsi, i)
synchronize_irq(pf->msix_entries[i + base].vector);
}
}
@@ -2551,22 +2778,23 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* reclaim interrupt vectors back to PF */
if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
- vsi->idx);
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
/* reclaim HW interrupts back to the common pool */
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
- vsi->idx);
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
pf->num_avail_hw_msix += vsi->num_q_vectors;
} else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
/* Reclaim VF resources back only while freeing all VFs or
* vector reassignment is requested
*/
- ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
+ ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
vsi->idx);
pf->num_avail_hw_msix += pf->num_vf_msix;
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_vsi_add_rem_eth_mac(vsi, false);
+
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
@@ -2596,6 +2824,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vf *vf = NULL;
struct ice_pf *pf;
int ret, i;
@@ -2603,16 +2832,38 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
return -EINVAL;
pf = vsi->back;
+ if (vsi->type == ICE_VSI_VF)
+ vf = &pf->vf[vsi->vf_id];
+
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
- vsi->sw_base_vector = 0;
+
+ if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ vsi->sw_base_vector = 0;
+ /* reclaim HW interrupts back to the common pool */
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector,
+ vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+ } else {
+ /* Reclaim VF resources back to the common pool for reset and
+ * and rebuild, with vector reassignment
+ */
+ ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
+ vsi->idx);
+ pf->num_avail_hw_msix += pf->num_vf_msix;
+ }
vsi->hw_base_vector = 0;
+
ice_vsi_clear_rings(vsi);
- ice_vsi_free_arrays(vsi, false);
- ice_dev_onetime_setup(&vsi->back->hw);
- ice_vsi_set_num_qs(vsi);
+ ice_vsi_free_arrays(vsi);
+ ice_dev_onetime_setup(&pf->hw);
+ if (vsi->type == ICE_VSI_VF)
+ ice_vsi_set_num_qs(vsi, vf->vf_id);
+ else
+ ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
@@ -2620,7 +2871,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret < 0)
goto err_vsi;
- ret = ice_vsi_alloc_arrays(vsi, false);
+ ret = ice_vsi_alloc_arrays(vsi);
if (ret < 0)
goto err_vsi;
@@ -2634,6 +2885,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto err_vectors;
+
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
@@ -2643,7 +2898,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
* receive traffic on first queue. Hence no need to capture
* return value
*/
- if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
break;
case ICE_VSI_VF:
@@ -2655,12 +2910,16 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto err_vectors;
+
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
- vsi->back->q_left_tx -= vsi->alloc_txq;
- vsi->back->q_left_rx -= vsi->alloc_rxq;
+ pf->q_left_tx -= vsi->alloc_txq;
+ pf->q_left_rx -= vsi->alloc_rxq;
break;
default:
break;
@@ -2673,8 +2932,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Failed VSI lan queue config\n");
+ dev_err(&pf->pdev->dev,
+ "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
goto err_vectors;
}
return 0;
@@ -2690,7 +2950,7 @@ err_rings:
}
err_vsi:
ice_vsi_clear(vsi);
- set_bit(__ICE_RESET_FAILED, vsi->back->state);
+ set_bit(__ICE_RESET_FAILED, pf->state);
return ret;
}
@@ -2705,3 +2965,125 @@ bool ice_is_reset_in_progress(unsigned long *state)
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
}
+
+#ifdef CONFIG_DCB
+/**
+ * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
+ * @vsi: VSI being configured
+ * @ctx: the context buffer returned from AQ VSI update command
+ */
+static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
+{
+ vsi->info.mapping_flags = ctx->info.mapping_flags;
+ memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
+ sizeof(vsi->info.q_mapping));
+ memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+}
+
+/**
+ * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @ena_tc: TC map to be enabled
+ */
+static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf = vsi->back;
+ struct ice_dcbx_cfg *dcbcfg;
+ u8 netdev_tc;
+ int i;
+
+ if (!netdev)
+ return;
+
+ if (!ena_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
+ return;
+
+ dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ u8 ets_tc = dcbcfg->etscfg.prio_table[i];
+
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
+ * @vsi: VSI to be configured
+ * @ena_tc: TC bitmap
+ *
+ * VSI queues expected to be quiesced before calling this function
+ */
+int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vsi_ctx *ctx;
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ int i, ret = 0;
+ u8 num_tc = 0;
+
+ ice_for_each_traffic_class(i) {
+ /* build bitmap of enabled TCs */
+ if (ena_tc & BIT(i))
+ num_tc++;
+ /* populate max_txqs per TC */
+ max_txqs[i] = pf->num_lan_tx;
+ }
+
+ vsi->tc_cfg.ena_tc = ena_tc;
+ vsi->tc_cfg.numtc = num_tc;
+
+ ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->vf_num = 0;
+ ctx->info = vsi->info;
+
+ ice_vsi_setup_q_map(vsi, ctx);
+
+ /* must to indicate which section of VSI context are being modified */
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
+ if (status) {
+ dev_info(&pf->pdev->dev, "Failed VSI Update\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "VSI %d failed TC config, error %d\n",
+ vsi->vsi_num, status);
+ ret = -EIO;
+ goto out;
+ }
+ ice_vsi_update_q_map(vsi, ctx);
+ vsi->info.valid_sections = 0;
+
+ ice_vsi_cfg_netdev_tc(vsi, ena_tc);
+out:
+ devm_kfree(&pf->pdev->dev, ctx);
+ return ret;
+}
+#endif /* CONFIG_DCB */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 7988a53729a9..a91d3553cc89 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -35,12 +35,16 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
+#endif /* CONFIG_DCB */
+
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id);
@@ -62,6 +66,10 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
void ice_vsi_put_qs(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
+#endif /* CONFIG_DCB */
+
void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi);
@@ -70,8 +78,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
-int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
-
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 47cc3f905b7f..7843abf4d44d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -7,8 +7,9 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_dcb_lib.h"
-#define DRV_VERSION "0.7.2-k"
+#define DRV_VERSION "0.7.4-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -30,7 +31,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_ops;
-static void ice_pf_dis_all_vsi(struct ice_pf *pf);
static void ice_rebuild(struct ice_pf *pf);
static void ice_vsi_release_all(struct ice_pf *pf);
@@ -113,14 +113,14 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
}
/**
- * ice_add_mac_to_sync_list - creates list of mac addresses to be synced
+ * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
* @netdev: the net device on which the sync is happening
- * @addr: mac address to sync
+ * @addr: MAC address to sync
*
* This is a callback function which is called by the in kernel device sync
* functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
* populates the tmp_sync_list, which is later used by ice_add_mac to add the
- * mac filters from the hardware.
+ * MAC filters from the hardware.
*/
static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
{
@@ -134,14 +134,14 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
}
/**
- * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced
+ * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
* @netdev: the net device on which the unsync is happening
- * @addr: mac address to unsync
+ * @addr: MAC address to unsync
*
* This is a callback function which is called by the in kernel device unsync
* functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
* populates the tmp_unsync_list, which is later used by ice_remove_mac to
- * delete the mac filters from the hardware.
+ * delete the MAC filters from the hardware.
*/
static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
{
@@ -168,6 +168,39 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ * @set_promisc: enable or disable promisc flag request
+ *
+ */
+static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ enum ice_status status = 0;
+
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ if (vsi->vlan_ena) {
+ status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+ set_promisc);
+ } else {
+ if (set_promisc)
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ else
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ }
+
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -182,6 +215,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
struct ice_hw *hw = &pf->hw;
enum ice_status status = 0;
u32 changed_flags = 0;
+ u8 promisc_m;
int err = 0;
if (!vsi->netdev)
@@ -211,7 +245,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
netif_addr_unlock_bh(netdev);
}
- /* Remove mac addresses in the unsync list */
+ /* Remove MAC addresses in the unsync list */
status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
if (status) {
@@ -223,12 +257,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
}
- /* Add mac addresses in the sync list */
+ /* Add MAC addresses in the sync list */
status = ice_add_mac(hw, &vsi->tmp_sync_list);
ice_free_fltr_list(dev, &vsi->tmp_sync_list);
- if (status) {
+ /* If filter is added successfully or already exists, do not go into
+ * 'if' condition and report it as error. Instead continue processing
+ * rest of the function.
+ */
+ if (status && status != ICE_ERR_ALREADY_EXISTS) {
netdev_err(netdev, "Failed to add MAC filters\n");
- /* If there is no more space for new umac filters, vsi
+ /* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some
* space reserved for promiscuous filters.
*/
@@ -245,49 +283,56 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
}
/* check for changes in promiscuous modes */
- if (changed_flags & IFF_ALLMULTI)
- netdev_warn(netdev, "Unsupported configuration\n");
+ if (changed_flags & IFF_ALLMULTI) {
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ if (vsi->vlan_ena)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+ err = ice_cfg_promisc(vsi, promisc_m, true);
+ if (err) {
+ netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags &= ~IFF_ALLMULTI;
+ goto out_promisc;
+ }
+ } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ if (vsi->vlan_ena)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+ err = ice_cfg_promisc(vsi, promisc_m, false);
+ if (err) {
+ netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ goto out_promisc;
+ }
+ }
+ }
if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
- /* Apply TX filter rule to get traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
- ICE_FLTR_TX);
- if (status) {
- netdev_err(netdev, "Error setting default VSI %i tx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags &= ~IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
- }
- /* Apply RX filter rule to get traffic from wire */
+ /* Apply Rx filter rule to get traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_RX);
if (status) {
- netdev_err(netdev, "Error setting default VSI %i rx rule\n",
+ netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_PROMISC;
err = -EIO;
goto out_promisc;
}
} else {
- /* Clear TX filter rule to stop traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
- ICE_FLTR_TX);
- if (status) {
- netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags |= IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
- }
- /* Clear RX filter to remove traffic from wire */
+ /* Clear Rx filter to remove traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
+ netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags |= IFF_PROMISC;
err = -EIO;
@@ -322,7 +367,7 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
- for (v = 0; v < pf->num_alloc_vsi; v++)
+ ice_for_each_vsi(pf, v)
if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
ice_vsi_sync_fltr(pf->vsi[v])) {
/* come back and try again later */
@@ -332,6 +377,51 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
/**
+ * ice_dis_vsi - pause a VSI
+ * @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
+ */
+static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+{
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ if (!locked) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ }
+ } else {
+ ice_vsi_close(vsi);
+ }
+ }
+}
+
+/**
+ * ice_pf_dis_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ * @locked: is the rtnl_lock already held
+ */
+#ifdef CONFIG_DCB
+void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
+#else
+static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
+#endif /* CONFIG_DCB */
+{
+ int v;
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ ice_dis_vsi(pf->vsi[v], locked);
+}
+
+/**
* ice_prepare_for_reset - prep for the core to reset
* @pf: board private structure
*
@@ -342,12 +432,16 @@ ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ /* already prepared for reset */
+ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
+ return;
+
/* Notify VFs of impending reset */
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
/* disable the VSIs and their queues that are not already DOWN */
- ice_pf_dis_all_vsi(pf);
+ ice_pf_dis_all_vsi(pf, false);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -394,6 +488,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild(pf);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state);
+ ice_reset_all_vfs(pf, true);
}
}
@@ -416,10 +511,15 @@ static void ice_reset_subtask(struct ice_pf *pf)
* for the reset now), poll for reset done, rebuild and return.
*/
if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
- clear_bit(__ICE_GLOBR_RECV, pf->state);
- clear_bit(__ICE_CORER_RECV, pf->state);
- if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
- ice_prepare_for_reset(pf);
+ /* Perform the largest reset requested */
+ if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
+ reset_type = ICE_RESET_CORER;
+ if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
+ reset_type = ICE_RESET_GLOBR;
+ /* return if no valid reset type requested */
+ if (reset_type == ICE_RESET_INVAL)
+ return;
+ ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw)) {
@@ -429,13 +529,14 @@ static void ice_reset_subtask(struct ice_pf *pf)
pf->hw.reset_ongoing = false;
ice_rebuild(pf);
/* clear bit to resume normal operations, but
- * ICE_NEEDS_RESTART bit is set incase rebuild failed
+ * ICE_NEEDS_RESTART bit is set in case rebuild failed
*/
clear_bit(__ICE_RESET_OICR_RECV, pf->state);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state);
clear_bit(__ICE_CORER_REQ, pf->state);
clear_bit(__ICE_GLOBR_REQ, pf->state);
+ ice_reset_all_vfs(pf, true);
}
return;
@@ -469,6 +570,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
const char *speed;
const char *fc;
+ if (!vsi)
+ return;
+
if (vsi->current_isup == isup)
return;
@@ -519,6 +623,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
case ICE_FC_RX_PAUSE:
fc = "RX";
break;
+ case ICE_FC_NONE:
+ fc = "None";
+ break;
default:
fc = "Unknown";
break;
@@ -529,21 +636,22 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
/**
- * ice_vsi_link_event - update the vsi's netdev
- * @vsi: the vsi on which the link event occurred
- * @link_up: whether or not the vsi needs to be set up or down
+ * ice_vsi_link_event - update the VSI's netdev
+ * @vsi: the VSI on which the link event occurred
+ * @link_up: whether or not the VSI needs to be set up or down
*/
static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
{
- if (!vsi || test_bit(__ICE_DOWN, vsi->state))
+ if (!vsi)
+ return;
+
+ if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
return;
if (vsi->type == ICE_VSI_PF) {
- if (!vsi->netdev) {
- dev_dbg(&vsi->back->pdev->dev,
- "vsi->netdev is not initialized!\n");
+ if (link_up == netif_carrier_ok(vsi->netdev))
return;
- }
+
if (link_up) {
netif_carrier_on(vsi->netdev);
netif_tx_wake_all_queues(vsi->netdev);
@@ -558,61 +666,51 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
* ice_link_event - process the link event
* @pf: pf that the link event is associated with
* @pi: port_info for the port that the link event is associated with
+ * @link_up: true if the physical link is up and false if it is down
+ * @link_speed: current link speed received from the link event
*
- * Returns -EIO if ice_get_link_status() fails
- * Returns 0 on success
+ * Returns 0 on success and negative on failure
*/
static int
-ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
+ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
+ u16 link_speed)
{
- u8 new_link_speed, old_link_speed;
struct ice_phy_info *phy_info;
- bool new_link_same_as_old;
- bool new_link, old_link;
- u8 lport;
- u16 v;
+ struct ice_vsi *vsi;
+ u16 old_link_speed;
+ bool old_link;
+ int result;
phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info;
- /* Force ice_get_link_status() to update link info */
- phy_info->get_link_info = true;
- old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
+ old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
old_link_speed = phy_info->link_info_old.link_speed;
- lport = pi->lport;
- if (ice_get_link_status(pi, &new_link)) {
+ /* update the link info structures and re-enable link events,
+ * don't bail on failure due to other book keeping needed
+ */
+ result = ice_update_link_info(pi);
+ if (result)
dev_dbg(&pf->pdev->dev,
- "Could not get link status for port %d\n", lport);
- return -EIO;
- }
-
- new_link_speed = phy_info->link_info.link_speed;
+ "Failed to update link status and re-enable link events for port %d\n",
+ pi->lport);
- new_link_same_as_old = (new_link == old_link &&
- new_link_speed == old_link_speed);
-
- ice_for_each_vsi(pf, v) {
- struct ice_vsi *vsi = pf->vsi[v];
-
- if (!vsi || !vsi->port_info)
- continue;
+ /* if the old link up/down and speed is the same as the new */
+ if (link_up == old_link && link_speed == old_link_speed)
+ return result;
- if (new_link_same_as_old &&
- (test_bit(__ICE_DOWN, vsi->state) ||
- new_link == netif_carrier_ok(vsi->netdev)))
- continue;
+ vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
+ if (!vsi || !vsi->port_info)
+ return -EINVAL;
- if (vsi->port_info->lport == lport) {
- ice_print_link_msg(vsi, new_link);
- ice_vsi_link_event(vsi, new_link);
- }
- }
+ ice_vsi_link_event(vsi, link_up);
+ ice_print_link_msg(vsi, link_up);
- if (!new_link_same_as_old && pf->num_alloc_vfs)
+ if (pf->num_alloc_vfs)
ice_vc_notify_link_state(pf);
- return 0;
+ return result;
}
/**
@@ -635,19 +733,73 @@ static void ice_watchdog_subtask(struct ice_pf *pf)
pf->serv_tmr_prev = jiffies;
- if (ice_link_event(pf, pf->hw.port_info))
- dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
-
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++)
+ ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->netdev)
ice_update_vsi_stats(pf->vsi[i]);
}
/**
+ * ice_init_link_events - enable/initialize link events
+ * @pi: pointer to the port_info instance
+ *
+ * Returns -EIO on failure, 0 on success
+ */
+static int ice_init_link_events(struct ice_port_info *pi)
+{
+ u16 mask;
+
+ mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
+ ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
+
+ if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to set link event mask for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to enable link events for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_handle_link_event - handle link event via ARQ
+ * @pf: pf that the link event is associated with
+ * @event: event structure containing link status info
+ */
+static int
+ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ struct ice_aqc_get_link_status_data *link_data;
+ struct ice_port_info *port_info;
+ int status;
+
+ link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
+ port_info = pf->hw.port_info;
+ if (!port_info)
+ return -EINVAL;
+
+ status = ice_link_event(pf, port_info,
+ !!(link_data->link_info & ICE_AQ_LINK_UP),
+ le16_to_cpu(link_data->link_speed));
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Could not process link event, error %d\n", status);
+
+ return status;
+}
+
+/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
* @q_type: specific Control queue type
@@ -750,12 +902,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) {
+ case ice_aqc_opc_get_link_status:
+ if (ice_handle_link_event(pf, &event))
+ dev_err(&pf->pdev->dev,
+ "Could not handle link event\n");
+ break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
break;
+ case ice_aqc_opc_lldp_set_mib_change:
+ ice_dcb_process_lldp_set_mib_change(pf, &event);
+ break;
default:
dev_dbg(&pf->pdev->dev,
"%s Receive Queue unknown event 0x%04x ignored\n",
@@ -877,6 +1037,18 @@ static void ice_service_task_stop(struct ice_pf *pf)
}
/**
+ * ice_service_task_restart - restart service task and schedule works
+ * @pf: board private structure
+ *
+ * This function is needed for suspend and resume works (e.g WoL scenario)
+ */
+static void ice_service_task_restart(struct ice_pf *pf)
+{
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
+ ice_service_task_schedule(pf);
+}
+
+/**
* ice_service_timer - timer callback to schedule service task
* @t: pointer to timer_list
*/
@@ -901,7 +1073,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
u32 reg;
int i;
- if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
return;
/* find what triggered the MDD event */
@@ -993,10 +1165,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
struct ice_vf *vf = &pf->vf[i];
+ mdd_detected = false;
+
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1004,7 +1178,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1012,7 +1186,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1020,26 +1194,19 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
}
- if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
- dev_info(&pf->pdev->dev,
- "Too many MDD events on VF %d, disabled\n", i);
+ if (mdd_detected) {
+ vf->num_mdd_events++;
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
}
}
- /* re-enable MDD interrupt cause */
- clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
- reg = rd32(hw, PFINT_OICR_ENA);
- reg |= PFINT_OICR_MAL_DETECT_M;
- wr32(hw, PFINT_OICR_ENA, reg);
- ice_flush(hw);
}
/**
@@ -1089,7 +1256,7 @@ static void ice_service_task(struct work_struct *work)
/**
* ice_set_ctrlq_len - helper function to set controlq length
- * @hw: pointer to the hw instance
+ * @hw: pointer to the HW instance
*/
static void ice_set_ctrlq_len(struct ice_hw *hw)
{
@@ -1111,8 +1278,9 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks.
*/
-static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
+static void
+ice_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
{
struct ice_q_vector *q_vector =
container_of(notify, struct ice_q_vector, affinity_notify);
@@ -1142,7 +1310,7 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i;
- for (i = 0; i < vsi->num_q_vectors; i++)
+ ice_for_each_q_vector(vsi, i)
ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
}
@@ -1184,10 +1352,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[base + vector].vector,
- vsi->irq_handler, 0, q_vector->name,
- q_vector);
+ err = devm_request_irq(&pf->pdev->dev, irq_num,
+ vsi->irq_handler, 0,
+ q_vector->name, q_vector);
if (err) {
netdev_err(vsi->netdev,
"MSIX request_irq failed, error: %d\n", err);
@@ -1328,7 +1495,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
rd32(hw, PFHMC_ERRORDATA));
}
- /* Report and mask off any remaining unexpected interrupts */
+ /* Report any remaining unexpected interrupts */
oicr &= ena_mask;
if (oicr) {
dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
@@ -1342,12 +1509,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_PFR_REQ, pf->state);
ice_service_task_schedule(pf);
}
- ena_mask &= ~oicr;
}
ret = IRQ_HANDLED;
- /* re-enable interrupt causes that are not handled during this pass */
- wr32(hw, PFINT_OICR_ENA, ena_mask);
if (!test_bit(__ICE_DOWN, pf->state)) {
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -1406,23 +1570,23 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
/**
* ice_ena_ctrlq_interrupts - enable control queue interrupts
* @hw: pointer to HW structure
- * @v_idx: HW vector index to associate the control queue interrupts with
+ * @reg_idx: HW vector index to associate the control queue interrupts with
*/
-static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 v_idx)
+static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
{
u32 val;
- val = ((v_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* enable Admin queue Interrupt causes */
- val = ((v_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
/* enable Mailbox queue Interrupt causes */
- val = ((v_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
+ val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
@@ -1510,7 +1674,7 @@ void ice_napi_del(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_for_each_q_vector(vsi, v_idx)
netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}
@@ -1529,7 +1693,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_for_each_q_vector(vsi, v_idx)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll, NAPI_POLL_WEIGHT);
}
@@ -1649,18 +1813,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
- * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
- * @vid: vlan id to be added
+ * @vid: VLAN ID to be added
*
- * net_device_ops implementation for adding vlan ids
+ * net_device_ops implementation for adding VLAN IDs
*/
-static int ice_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int
+ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
+ u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1673,33 +1839,39 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
- int ret = ice_cfg_vlan_pruning(vsi, true);
-
+ ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
- /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
+ /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
- return ice_vsi_add_vlan(vsi, vid);
+ ret = ice_vsi_add_vlan(vsi, vid);
+ if (!ret) {
+ vsi->vlan_ena = true;
+ set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ }
+
+ return ret;
}
/**
- * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
- * @vid: vlan id to be removed
+ * @vid: VLAN ID to be removed
*
- * net_device_ops implementation for removing vlan ids
+ * net_device_ops implementation for removing VLAN IDs
*/
-static int ice_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int
+ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
+ u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int status;
+ int ret;
if (vsi->info.pvid)
return -EINVAL;
@@ -1707,15 +1879,17 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
- status = ice_vsi_kill_vlan(vsi, vid);
- if (status)
- return status;
+ ret = ice_vsi_kill_vlan(vsi, vid);
+ if (ret)
+ return ret;
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
- status = ice_cfg_vlan_pruning(vsi, false);
+ ret = ice_cfg_vlan_pruning(vsi, false, false);
- return status;
+ vsi->vlan_ena = false;
+ set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ return ret;
}
/**
@@ -2033,23 +2207,6 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
- * ice_verify_itr_gran - verify driver's assumption of ITR granularity
- * @pf: pointer to the PF structure
- *
- * There is no error returned here because the driver will be able to handle a
- * different ITR granularity, but interrupt moderation will not be accurate if
- * the driver's assumptions are not verified. This assumption is made so we can
- * use constants in the hot path instead of accessing structure members.
- */
-static void ice_verify_itr_gran(struct ice_pf *pf)
-{
- if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1))
- dev_warn(&pf->pdev->dev,
- "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n",
- (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran);
-}
-
-/**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure
*
@@ -2072,9 +2229,10 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
*
* Returns 0 on success, negative on failure
*/
-static int ice_probe(struct pci_dev *pdev,
- const struct pci_device_id __always_unused *ent)
+static int
+ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
+ struct device *dev = &pdev->dev;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
@@ -2086,20 +2244,20 @@ static int ice_probe(struct pci_dev *pdev,
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
- dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
+ dev_err(dev, "BAR0 I/O map error %d\n", err);
return err;
}
- pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
+ pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
if (!pf)
return -ENOMEM;
/* set up for high or low dma */
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
}
@@ -2133,17 +2291,26 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_hw(hw);
if (err) {
- dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
err = -EIO;
goto err_exit_unroll;
}
- dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
+ dev_info(dev, "firmware %d.%d.%05d api %d.%d\n",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
ice_init_pf(pf);
+ err = ice_init_pf_dcb(pf);
+ if (err) {
+ clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+
+ /* do not fail overall init if DCB init fails */
+ err = 0;
+ }
+
ice_determine_q_usage(pf);
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
@@ -2152,8 +2319,8 @@ static int ice_probe(struct pci_dev *pdev,
goto err_init_pf_unroll;
}
- pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
- sizeof(*pf->vsi), GFP_KERNEL);
+ pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
+ GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_init_pf_unroll;
@@ -2161,8 +2328,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_interrupt_scheme(pf);
if (err) {
- dev_err(&pdev->dev,
- "ice_init_interrupt_scheme failed: %d\n", err);
+ dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_interrupt_unroll;
}
@@ -2178,15 +2344,13 @@ static int ice_probe(struct pci_dev *pdev,
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf);
if (err) {
- dev_err(&pdev->dev,
- "setup of misc vector failed: %d\n", err);
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
goto err_init_interrupt_unroll;
}
}
/* create switch struct for the switch element created by FW on boot */
- pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw),
- GFP_KERNEL);
+ pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
if (!pf->first_sw) {
err = -ENOMEM;
goto err_msix_misc_unroll;
@@ -2204,8 +2368,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_setup_pf_sw(pf);
if (err) {
- dev_err(&pdev->dev,
- "probe failed due to setup pf switch:%d\n", err);
+ dev_err(dev, "probe failed due to setup pf switch:%d\n", err);
goto err_alloc_sw_unroll;
}
@@ -2214,8 +2377,13 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ err = ice_init_link_events(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_link_events failed: %d\n", err);
+ goto err_alloc_sw_unroll;
+ }
+
ice_verify_cacheline_size(pf);
- ice_verify_itr_gran(pf);
return 0;
@@ -2227,7 +2395,7 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
- devm_kfree(&pdev->dev, pf->vsi);
+ devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
ice_deinit_hw(hw);
@@ -2272,6 +2440,136 @@ static void ice_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
}
+/**
+ * ice_pci_err_detected - warning that PCI error has been detected
+ * @pdev: PCI device information struct
+ * @err: the type of PCI error
+ *
+ * Called to warn that something happened on the PCI bus and the error handling
+ * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
+ */
+static pci_ers_result_t
+ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!pf) {
+ dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
+ __func__, err);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ ice_service_task_stop(pf);
+
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(__ICE_PFR_REQ, pf->state);
+ ice_prepare_for_reset(pf);
+ }
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ice_pci_err_slot_reset - a PCI slot reset has just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to determine if the driver can recover from the PCI slot reset by
+ * using a register read to determine if the device is recoverable.
+ */
+static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ int err;
+ u32 reg;
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset, error %d\n",
+ err);
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ /* Check for life */
+ reg = rd32(&pf->hw, GLGEN_RTRIG);
+ if (!reg)
+ result = PCI_ERS_RESULT_RECOVERED;
+ else
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err)
+ dev_dbg(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
+ err);
+ /* non-fatal, continue */
+
+ return result;
+}
+
+/**
+ * ice_pci_err_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error and/or
+ * reset recovery have finished
+ */
+static void ice_pci_err_resume(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!pf) {
+ dev_err(&pdev->dev,
+ "%s failed, device is unrecoverable\n", __func__);
+ return;
+ }
+
+ if (test_bit(__ICE_SUSPENDED, pf->state)) {
+ dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
+ __func__);
+ return;
+ }
+
+ ice_do_reset(pf, ICE_RESET_PFR);
+ ice_service_task_restart(pf);
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+}
+
+/**
+ * ice_pci_err_reset_prepare - prepare device driver for PCI reset
+ * @pdev: PCI device information struct
+ */
+static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ ice_service_task_stop(pf);
+
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(__ICE_PFR_REQ, pf->state);
+ ice_prepare_for_reset(pf);
+ }
+ }
+}
+
+/**
+ * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void ice_pci_err_reset_done(struct pci_dev *pdev)
+{
+ ice_pci_err_resume(pdev);
+}
+
/* ice_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
@@ -2289,12 +2587,21 @@ static const struct pci_device_id ice_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
+static const struct pci_error_handlers ice_pci_err_handler = {
+ .error_detected = ice_pci_err_detected,
+ .slot_reset = ice_pci_err_slot_reset,
+ .reset_prepare = ice_pci_err_reset_prepare,
+ .reset_done = ice_pci_err_reset_done,
+ .resume = ice_pci_err_resume
+};
+
static struct pci_driver ice_driver = {
.name = KBUILD_MODNAME,
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
.sriov_configure = ice_sriov_configure,
+ .err_handler = &ice_pci_err_handler
};
/**
@@ -2341,7 +2648,7 @@ static void __exit ice_module_exit(void)
module_exit(ice_module_exit);
/**
- * ice_set_mac_address - NDO callback to set mac address
+ * ice_set_mac_address - NDO callback to set MAC address
* @netdev: network interface device structure
* @pi: pointer to an address structure
*
@@ -2378,14 +2685,14 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
- /* When we change the mac address we also have to change the mac address
- * based filter rules that were created previously for the old mac
+ /* When we change the MAC address we also have to change the MAC address
+ * based filter rules that were created previously for the old MAC
* address. So first, we remove the old filter rule using ice_remove_mac
* and then create a new filter rule using ice_add_mac. Note that for
- * both these operations, we first need to form a "list" of mac
- * addresses (even though in this case, we have only 1 mac address to be
+ * both these operations, we first need to form a "list" of MAC
+ * addresses (even though in this case, we have only 1 MAC address to be
* added/removed) and this done using ice_add_mac_to_list. Depending on
- * the ensuing operation this "list" of mac addresses is either to be
+ * the ensuing operation this "list" of MAC addresses is either to be
* added or removed from the filter.
*/
err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
@@ -2423,12 +2730,12 @@ free_lists:
return err;
}
- /* change the netdev's mac address */
+ /* change the netdev's MAC address */
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
netdev->dev_addr);
- /* write new mac address to the firmware */
+ /* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
@@ -2470,7 +2777,7 @@ static void ice_set_rx_mode(struct net_device *netdev)
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
- * @vid: VLAN id
+ * @vid: VLAN ID
* @flags: instructions from stack about fdb operation
* @extack: netlink extended ack
*/
@@ -2510,11 +2817,12 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
- * @vid: VLAN id
+ * @vid: VLAN ID
*/
-static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid)
+static int
+ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ __always_unused u16 vid)
{
int err;
@@ -2538,13 +2846,16 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
*/
-static int ice_set_features(struct net_device *netdev,
- netdev_features_t features)
+static int
+ice_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret = 0;
+ /* Multiple features can be changed in one call so keep features in
+ * separate if/else statements to guarantee each feature is checked
+ */
if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
ret = ice_vsi_manage_rss_lut(vsi, true);
else if (!(features & NETIF_F_RXHASH) &&
@@ -2557,8 +2868,9 @@ static int ice_set_features(struct net_device *netdev,
else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
ret = ice_vsi_manage_vlan_stripping(vsi, false);
- else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
ret = ice_vsi_manage_vlan_insertion(vsi);
else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
@@ -2568,8 +2880,8 @@ static int ice_set_features(struct net_device *netdev,
}
/**
- * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
- * @vsi: VSI to setup vlan properties for
+ * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
+ * @vsi: VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
@@ -2601,6 +2913,7 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
+ ice_vsi_cfg_dcb_rings(vsi);
err = ice_vsi_cfg_lan_txqs(vsi);
if (!err)
@@ -2620,7 +2933,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
@@ -2666,7 +2979,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_service_task_schedule(pf);
- return err;
+ return 0;
}
/**
@@ -2693,8 +3006,8 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
-static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
- u64 *bytes)
+static void
+ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
{
unsigned int start;
*pkts = 0;
@@ -2911,6 +3224,8 @@ static void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
&prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
+ ice_update_dcb_stats(pf);
+
ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
&prev_ps->crc_errors, &cur_ps->crc_errors);
@@ -2992,7 +3307,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
@@ -3276,7 +3591,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
if (!pf->vsi)
return;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
@@ -3289,47 +3604,31 @@ static void ice_vsi_release_all(struct ice_pf *pf)
}
/**
- * ice_dis_vsi - pause a VSI
- * @vsi: the VSI being paused
+ * ice_ena_vsi - resume a VSI
+ * @vsi: the VSI being resume
* @locked: is the rtnl_lock already held
*/
-static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
- if (test_bit(__ICE_DOWN, vsi->state))
- return;
+ int err = 0;
- set_bit(__ICE_NEEDS_RESTART, vsi->state);
+ if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
+ return err;
+
+ clear_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ struct net_device *netd = vsi->netdev;
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
- if (!locked) {
+ if (locked) {
+ err = netd->netdev_ops->ndo_open(netd);
+ } else {
rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ err = netd->netdev_ops->ndo_open(netd);
rtnl_unlock();
- } else {
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
}
} else {
- ice_vsi_close(vsi);
- }
- }
-}
-
-/**
- * ice_ena_vsi - resume a VSI
- * @vsi: the VSI being resume
- */
-static int ice_ena_vsi(struct ice_vsi *vsi)
-{
- int err = 0;
-
- if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
- vsi->netdev) {
- if (netif_running(vsi->netdev)) {
- rtnl_lock();
- err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
- rtnl_unlock();
- } else {
err = ice_vsi_open(vsi);
}
}
@@ -3338,29 +3637,21 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
}
/**
- * ice_pf_dis_all_vsi - Pause all VSIs on a PF
- * @pf: the PF
- */
-static void ice_pf_dis_all_vsi(struct ice_pf *pf)
-{
- int v;
-
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- ice_dis_vsi(pf->vsi[v], false);
-}
-
-/**
* ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF
+ * @locked: is the rtnl_lock already held
*/
-static int ice_pf_ena_all_vsi(struct ice_pf *pf)
+#ifdef CONFIG_DCB
+int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
+#else
+static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
+#endif /* CONFIG_DCB */
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- if (ice_ena_vsi(pf->vsi[v]))
+ if (ice_ena_vsi(pf->vsi[v], locked))
return -EIO;
return 0;
@@ -3375,16 +3666,12 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and reinit the VSI if found */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
int err;
if (!pf->vsi[i])
continue;
- /* VF VSI rebuild isn't supported yet */
- if (pf->vsi[i]->type == ICE_VSI_VF)
- continue;
-
err = ice_vsi_rebuild(pf->vsi[i]);
if (err) {
dev_err(&pf->pdev->dev,
@@ -3412,7 +3699,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and replay the VSI if found */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
@@ -3479,6 +3766,8 @@ static void ice_rebuild(struct ice_pf *pf)
if (err)
goto err_sched_init_port;
+ ice_dcb_rebuild(pf);
+
/* reset search_hint of irq_trackers to 0 since interrupts are
* reclaimed and could be allocated from beginning during VSI rebuild
*/
@@ -3512,7 +3801,7 @@ static void ice_rebuild(struct ice_pf *pf)
}
/* restart the VSIs that were rebuilt and running before the reset */
- err = ice_pf_ena_all_vsi(pf);
+ err = ice_pf_ena_all_vsi(pf, false);
if (err) {
dev_err(&pf->pdev->dev, "error enabling VSIs\n");
/* no need to disable VSIs in tear down path in ice_rebuild()
@@ -3521,9 +3810,7 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_vsi_rebuild;
}
- ice_reset_all_vfs(pf, true);
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
bool link_up;
if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
@@ -3710,7 +3997,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
/**
* ice_bridge_getlink - Get the hardware bridge mode
* @skb: skb buff
- * @pid: process id
+ * @pid: process ID
* @seq: RTNL message seq
* @dev: the netdev being configured
* @filter_mask: filter mask passed in
@@ -3909,8 +4196,7 @@ static void ice_tx_timeout(struct net_device *netdev)
/* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
val = rd32(hw,
- GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
- tx_ring->vsi->hw_base_vector));
+ GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 413fdbbcc4d0..62571d33d0d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -5,7 +5,7 @@
/**
* ice_aq_read_nvm
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @module_typeid: module pointer location in words from the NVM beginning
* @offset: byte offset from the module beginning
* @length: length of the section to be read (in bytes from the offset)
@@ -235,7 +235,7 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
/**
* ice_init_nvm - initializes NVM setting
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
@@ -248,7 +248,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
u32 fla, gens_stat;
u8 sr_size;
- /* The SR size is stored regardless of the nvm programming mode
+ /* The SR size is stored regardless of the NVM programming mode
* as the blank mode may be used in the factory line.
*/
gens_stat = rd32(hw, GLNVM_GENS);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 56049739a250..8d49f83be7a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -43,9 +43,9 @@ ice_sched_add_root_node(struct ice_port_info *pi,
/**
* ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
* @start_node: pointer to the starting ice_sched_node struct in a sub-tree
- * @teid: node teid to search
+ * @teid: node TEID to search
*
- * This function searches for a node matching the teid in the scheduling tree
+ * This function searches for a node matching the TEID in the scheduling tree
* from the SW DB. The search is recursive and is restricted by the number of
* layers it has searched through; stopping at the max supported layer.
*
@@ -66,7 +66,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
return NULL;
- /* Check if teid matches to any of the children nodes */
+ /* Check if TEID matches to any of the children nodes */
for (i = 0; i < start_node->num_children; i++)
if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
return start_node->children[i];
@@ -86,7 +86,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
* @elems_req: number of elements to request
* @buf: pointer to buffer
@@ -118,7 +118,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
/**
* ice_aq_query_sched_elems - query scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @elems_req: number of elements to query
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
-static enum ice_status
+enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@@ -138,31 +138,6 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
}
/**
- * ice_sched_query_elem - query element information from hw
- * @hw: pointer to the hw struct
- * @node_teid: node teid to be queried
- * @buf: buffer to element information
- *
- * This function queries HW element information
- */
-static enum ice_status
-ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf)
-{
- u16 buf_size, num_elem_ret = 0;
- enum ice_status status;
-
- buf_size = sizeof(*buf);
- memset(buf, 0, buf_size);
- buf->generic[0].node_teid = cpu_to_le32(node_teid);
- status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
- NULL);
- if (status || num_elem_ret != 1)
- ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
- return status;
-}
-
-/**
* ice_sched_add_node - Insert the Tx scheduler node in SW DB
* @pi: port information structure
* @layer: Scheduler layer of the node
@@ -226,7 +201,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
/**
* ice_aq_delete_sched_elems - delete scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @grps_req: number of groups to delete
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -246,13 +221,13 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
}
/**
- * ice_sched_remove_elems - remove nodes from hw
- * @hw: pointer to the hw struct
+ * ice_sched_remove_elems - remove nodes from HW
+ * @hw: pointer to the HW struct
* @parent: pointer to the parent node
* @num_nodes: number of nodes
* @node_teids: array of node teids to be deleted
*
- * This function remove nodes from hw
+ * This function remove nodes from HW
*/
static enum ice_status
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
@@ -276,7 +251,8 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
if (status || num_groups_removed != 1)
- ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
+ hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return status;
@@ -284,7 +260,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
/**
* ice_sched_get_first_node - get the first node of the given layer
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @parent: pointer the base node of the subtree
* @layer: layer number
*
@@ -360,12 +336,8 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- enum ice_status status;
- status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
- if (status)
- ice_debug(hw, ICE_DBG_SCHED,
- "remove element failed %d\n", status);
+ ice_sched_remove_elems(hw, node->parent, 1, &teid);
}
parent = node->parent;
/* root has no parent */
@@ -409,7 +381,7 @@ err_exit:
/**
* ice_aq_get_dflt_topo - gets default scheduler topology
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @lport: logical port number
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -439,7 +411,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
/**
* ice_aq_add_sched_elems - adds scheduling element
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @grps_req: the number of groups that are requested to be added
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -460,7 +432,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
/**
* ice_aq_suspend_sched_elems - suspend scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @elems_req: number of elements to suspend
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -481,7 +453,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
/**
* ice_aq_resume_sched_elems - resume scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @elems_req: number of elements to resume
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -502,7 +474,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
/**
* ice_aq_query_sched_res - query scheduler resource
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf_size: buffer size in bytes
* @buf: pointer to buffer
* @cd: pointer to command details structure or NULL
@@ -521,13 +493,13 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
}
/**
- * ice_sched_suspend_resume_elems - suspend or resume hw nodes
- * @hw: pointer to the hw struct
+ * ice_sched_suspend_resume_elems - suspend or resume HW nodes
+ * @hw: pointer to the HW struct
* @num_nodes: number of nodes
* @node_teids: array of node teids to be suspended or resumed
* @suspend: true means suspend / false means resume
*
- * This function suspends or resumes hw nodes
+ * This function suspends or resumes HW nodes
*/
static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
@@ -561,10 +533,54 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
}
/**
- * ice_sched_clear_agg - clears the agg related information
+ * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_q_ctx *q_ctx;
+
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ /* allocate LAN queue contexts */
+ if (!vsi_ctx->lan_q_ctx[tc]) {
+ vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
+ new_numqs,
+ sizeof(*q_ctx),
+ GFP_KERNEL);
+ if (!vsi_ctx->lan_q_ctx[tc])
+ return ICE_ERR_NO_MEMORY;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ return 0;
+ }
+ /* num queues are increased, update the queue contexts */
+ if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
+ u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
+
+ q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+ sizeof(*q_ctx), GFP_KERNEL);
+ if (!q_ctx)
+ return ICE_ERR_NO_MEMORY;
+ memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
+ prev_num * sizeof(*q_ctx));
+ devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
+ vsi_ctx->lan_q_ctx[tc] = q_ctx;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_clear_agg - clears the aggregator related information
* @hw: pointer to the hardware structure
*
- * This function removes agg list and free up agg related memory
+ * This function removes aggregator list and free up aggregator related memory
* previously allocated.
*/
void ice_sched_clear_agg(struct ice_hw *hw)
@@ -622,7 +638,7 @@ void ice_sched_clear_port(struct ice_port_info *pi)
/**
* ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Cleanup scheduling elements from SW DB for all the ports
*/
@@ -646,16 +662,16 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
}
/**
- * ice_sched_add_elems - add nodes to hw and SW DB
+ * ice_sched_add_elems - add nodes to HW and SW DB
* @pi: port information structure
* @tc_node: pointer to the branch node
* @parent: pointer to the parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes
* @num_nodes_added: pointer to num nodes added
- * @first_node_teid: if new nodes are added then return the teid of first node
+ * @first_node_teid: if new nodes are added then return the TEID of first node
*
- * This function add nodes to hw as well as to SW DB for a given layer
+ * This function add nodes to HW as well as to SW DB for a given layer
*/
static enum ice_status
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
@@ -697,7 +713,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
&num_groups_added, NULL);
if (status || num_groups_added != 1) {
- ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
+ hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return ICE_ERR_CFG;
}
@@ -748,7 +765,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
* @parent: pointer to parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes to be added
- * @first_node_teid: pointer to the first node teid
+ * @first_node_teid: pointer to the first node TEID
* @num_nodes_added: pointer to number of nodes added
*
* This function add nodes to a given layer.
@@ -800,7 +817,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
*num_nodes_added += num_added;
}
- /* Don't modify the first node teid memory if the first node was
+ /* Don't modify the first node TEID memory if the first node was
* added already in the above call. Instead send some temp
* memory for all other recursive calls.
*/
@@ -832,7 +849,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
/**
* ice_sched_get_qgrp_layer - get the current queue group layer number
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* This function returns the current queue group layer number
*/
@@ -844,7 +861,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
/**
* ice_sched_get_vsi_layer - get the current VSI layer number
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* This function returns the current VSI layer number
*/
@@ -855,7 +872,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 7 4
* 5 or less sw_entry_point_layer
*/
- /* calculate the vsi layer based on number of layers. */
+ /* calculate the VSI layer based on number of layers. */
if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
@@ -973,7 +990,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
goto err_init_port;
}
- /* If the last node is a leaf node then the index of the Q group
+ /* If the last node is a leaf node then the index of the queue group
* layer is two less than the number of elements.
*/
if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
@@ -1082,7 +1099,7 @@ sched_query_out:
/**
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @base: pointer to the base node
* @node: pointer to the node to search
*
@@ -1114,13 +1131,13 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
}
/**
- * ice_sched_get_free_qparent - Get a free lan or rdma q group node
+ * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: branch number
- * @owner: lan or rdma
+ * @owner: LAN or RDMA
*
- * This function retrieves a free lan or rdma q group node
+ * This function retrieves a free LAN or RDMA queue group node
*/
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
@@ -1138,11 +1155,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_ctx)
return NULL;
vsi_node = vsi_ctx->sched.vsi_node[tc];
- /* validate invalid VSI id */
+ /* validate invalid VSI ID */
if (!vsi_node)
goto lan_q_exit;
- /* get the first q group node from VSI sub-tree */
+ /* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */
@@ -1158,12 +1175,12 @@ lan_q_exit:
}
/**
- * ice_sched_get_vsi_node - Get a VSI node based on VSI id
- * @hw: pointer to the hw struct
+ * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
+ * @hw: pointer to the HW struct
* @tc_node: pointer to the TC node
* @vsi_handle: software VSI handle
*
- * This function retrieves a VSI node for a given VSI id from a given
+ * This function retrieves a VSI node for a given VSI ID from a given
* TC branch
*/
static struct ice_sched_node *
@@ -1188,7 +1205,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @num_qs: number of queues
* @num_nodes: num nodes array
*
@@ -1204,7 +1221,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
- /* calculate num nodes from q group to VSI layer */
+ /* calculate num nodes from queue group to VSI layer */
for (i = qgl; i > vsil; i--) {
/* round to the next integer if there is a remainder */
num = DIV_ROUND_UP(num, hw->max_children[i]);
@@ -1220,10 +1237,10 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* @vsi_handle: software VSI handle
* @tc_node: pointer to the TC node
* @num_nodes: pointer to the num nodes that needs to be added per layer
- * @owner: node owner (lan or rdma)
+ * @owner: node owner (LAN or RDMA)
*
* This function adds the VSI child nodes to tree. It gets called for
- * lan and rdma separately.
+ * LAN and RDMA separately.
*/
static enum ice_status
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
@@ -1271,44 +1288,8 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
- * @pi: port information structure
- * @vsi_node: pointer to the VSI node
- * @num_nodes: pointer to the num nodes that needs to be removed per layer
- * @owner: node owner (lan or rdma)
- *
- * This function removes the VSI child nodes from the tree. It gets called for
- * lan and rdma separately.
- */
-static void
-ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
- struct ice_sched_node *vsi_node, u16 *num_nodes,
- u8 owner)
-{
- struct ice_sched_node *node, *next;
- u8 i, qgl, vsil;
- u16 num;
-
- qgl = ice_sched_get_qgrp_layer(pi->hw);
- vsil = ice_sched_get_vsi_layer(pi->hw);
-
- for (i = qgl; i > vsil; i--) {
- num = num_nodes[i];
- node = ice_sched_get_first_node(pi->hw, vsi_node, i);
- while (node && num) {
- next = node->sibling;
- if (node->owner == owner && !node->num_children) {
- ice_free_sched_node(pi, node);
- num--;
- }
- node = next;
- }
- }
-}
-
-/**
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
@@ -1427,7 +1408,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
/* calculate number of supported nodes needed for this VSI */
ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
- /* add vsi supported nodes to tc subtree */
+ /* add VSI supported nodes to TC subtree */
return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
num_nodes);
}
@@ -1446,7 +1427,6 @@ static enum ice_status
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
- u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
@@ -1454,7 +1434,6 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
- u8 i;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -1468,41 +1447,30 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!vsi_ctx)
return ICE_ERR_PARAM;
- if (owner == ICE_SCHED_NODE_OWNER_LAN)
- prev_numqs = vsi_ctx->sched.max_lanq[tc];
- else
- return ICE_ERR_PARAM;
-
- /* num queues are not changed */
- if (prev_numqs == new_numqs)
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ /* num queues are not changed or less than the previous number */
+ if (new_numqs <= prev_numqs)
+ return status;
+ status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
return status;
-
- /* calculate number of nodes based on prev/new number of qs */
- if (prev_numqs)
- ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
-
- if (prev_numqs > new_numqs) {
- for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
-
- ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
- owner);
- } else {
- for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- new_num_nodes[i] -= prev_num_nodes[i];
-
- status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
- new_num_nodes, owner);
- if (status)
- return status;
- }
-
+ /* Keep the max number of queue configuration all the time. Update the
+ * tree only if number of queues > previous number of queues. This may
+ * leave some extra nodes in the tree if number of queues < previous
+ * number but that wouldn't harm anything. Removing those extra nodes
+ * may complicate the code if those nodes are part of SRL or
+ * individually rate limited.
+ */
+ status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes, owner);
+ if (status)
+ return status;
vsi_ctx->sched.max_lanq[tc] = new_numqs;
- return status;
+ return 0;
}
/**
@@ -1511,7 +1479,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* @vsi_handle: software VSI handle
* @tc: TC number
* @maxqs: max number of queues
- * @owner: lan or rdma
+ * @owner: LAN or RDMA
* @enable: TC enabled or disabled
*
* This function adds/updates VSI nodes based on the number of queues. If TC is
@@ -1527,6 +1495,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
@@ -1535,7 +1504,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
return ICE_ERR_PARAM;
vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
- /* suspend the VSI if tc is not enabled */
+ /* suspend the VSI if TC is not enabled */
if (!enable) {
if (vsi_node && vsi_node->in_use) {
u32 teid = le32_to_cpu(vsi_node->info.node_teid);
@@ -1586,7 +1555,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
}
/**
- * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
+ * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
* @pi: port information structure
* @vsi_handle: software VSI handle
*
@@ -1646,8 +1615,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
- u8 i, j = 0;
+ u8 i;
+ ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return status;
mutex_lock(&pi->sched_lock);
@@ -1655,8 +1625,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_ctx)
goto exit_sched_rm_vsi_cfg;
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
+ u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
@@ -1689,7 +1660,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_free_sched_node(pi, vsi_node);
vsi_ctx->sched.vsi_node[i] = NULL;
- /* clean up agg related vsi info if any */
+ /* clean up aggregator related VSI info if any */
ice_sched_rm_agg_vsi_info(pi, vsi_handle);
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index bee8221ad146..3902a8ad3025 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -24,6 +24,10 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
+enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 683f48824a29..17afe6acb18a 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -12,6 +12,7 @@ enum ice_status {
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
+ ICE_ERR_NOT_SUPPORTED = -4,
ICE_ERR_BAD_PTR = -5,
ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 09d1c314b68f..9f1f595ae7e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -19,7 +19,7 @@
* byte 6 = 0x2: to identify it as locally administered SA MAC
* byte 12 = 0x81 & byte 13 = 0x00:
* In case of VLAN filter first two bytes defines ether type (0x8100)
- * and remaining two bytes are placeholder for programming a given VLAN id
+ * and remaining two bytes are placeholder for programming a given VLAN ID
* In case of Ether type filter it is treated as header without VLAN tag
* and byte 12 and 13 is used to program a given Ether type instead
*/
@@ -51,7 +51,7 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
/**
* ice_aq_alloc_free_res - command to allocate/free resources
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @num_entries: number of resource entries in buffer
* @buf: Indirect buffer to hold data parameters and response
* @buf_size: size of buffer for indirect commands
@@ -87,7 +87,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
@@ -163,7 +163,7 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
/**
* ice_aq_add_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
*
@@ -206,7 +206,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_free_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
* @cd: pointer to command details structure or NULL
@@ -242,7 +242,7 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_update_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
*
@@ -279,7 +279,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_is_vsi_valid - check whether the VSI is valid or not
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* check whether the VSI is valid or not
@@ -290,11 +290,11 @@ bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
}
/**
- * ice_get_hw_vsi_num - return the hw VSI number
- * @hw: pointer to the hw struct
+ * ice_get_hw_vsi_num - return the HW VSI number
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
- * return the hw VSI number
+ * return the HW VSI number
* Caution: call this function only if VSI is valid (ice_is_vsi_valid)
*/
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
@@ -304,7 +304,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* return the VSI context entry for a given VSI handle
@@ -316,21 +316,42 @@ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_save_vsi_ctx - save the VSI context for a given VSI handle
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
* @vsi: VSI context pointer
*
* save the VSI context entry for a given VSI handle
*/
-static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
- struct ice_vsi_ctx *vsi)
+static void
+ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
{
hw->vsi_ctx[vsi_handle] = vsi;
}
/**
+ * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ */
+static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ u8 i;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return;
+ ice_for_each_traffic_class(i) {
+ if (vsi->lan_q_ctx[i]) {
+ devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
+ vsi->lan_q_ctx[i] = NULL;
+ }
+ }
+}
+
+/**
* ice_clear_vsi_ctx - clear the VSI context entry
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* clear the VSI context entry
@@ -341,6 +362,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
vsi = ice_get_vsi_ctx(hw, vsi_handle);
if (vsi) {
+ ice_clear_vsi_q_ctx(hw, vsi_handle);
devm_kfree(ice_hw_to_dev(hw), vsi);
hw->vsi_ctx[vsi_handle] = NULL;
}
@@ -348,7 +370,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_clear_all_vsi_ctx - clear all the VSI context entries
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
void ice_clear_all_vsi_ctx(struct ice_hw *hw)
{
@@ -360,7 +382,7 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
/**
* ice_add_vsi - add VSI context to the hardware and VSI handle list
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle provided by drivers
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
@@ -383,7 +405,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
return status;
tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!tmp_vsi_ctx) {
- /* Create a new vsi context */
+ /* Create a new VSI context */
tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*tmp_vsi_ctx), GFP_KERNEL);
if (!tmp_vsi_ctx) {
@@ -398,12 +420,12 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
- return status;
+ return 0;
}
/**
* ice_free_vsi- free VSI context from hardware and VSI handle list
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle
* @vsi_ctx: pointer to a VSI context struct
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
@@ -428,7 +450,7 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_update_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
@@ -447,8 +469,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_alloc_free_vsi_list
- * @hw: pointer to the hw struct
- * @vsi_list_id: VSI list id returned or used for lookup
+ * @hw: pointer to the HW struct
+ * @vsi_list_id: VSI list ID returned or used for lookup
* @lkup_type: switch rule filter lookup type
* @opc: switch rules population command type - pass in the command opcode
*
@@ -504,7 +526,7 @@ ice_aq_alloc_free_vsi_list_exit:
/**
* ice_aq_sw_rules - add/update/remove switch rules
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @rule_list: pointer to switch rule population list
* @rule_list_sz: total size of the rule list in bytes
* @num_rules: number of switch rules in the rule_list
@@ -643,21 +665,43 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
fi->fltr_act == ICE_FWD_TO_Q ||
fi->fltr_act == ICE_FWD_TO_QGRP)) {
- fi->lb_en = true;
- /* Do not set lan_en to TRUE if
+ /* Setting LB for prune actions will result in replicated
+ * packets to the internal switch that will be dropped.
+ */
+ if (fi->lkup_type != ICE_SW_LKUP_VLAN)
+ fi->lb_en = true;
+
+ /* Set lan_en to TRUE if
* 1. The switch is a VEB AND
* 2
- * 2.1 The lookup is MAC with unicast addr for MAC, OR
- * 2.2 The lookup is MAC_VLAN with unicast addr for MAC
+ * 2.1 The lookup is a directional lookup like ethertype,
+ * promiscuous, ethertype-MAC, promiscuous-VLAN
+ * and default-port OR
+ * 2.2 The lookup is VLAN, OR
+ * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
+ * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
+ *
+ * OR
+ *
+ * The switch is a VEPA.
*
- * In all other cases, the LAN enable has to be set to true.
+ * In all other cases, the LAN enable has to be set to false.
*/
- if (!(hw->evb_veb &&
- ((fi->lkup_type == ICE_SW_LKUP_MAC &&
- is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
- (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
- is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
+ if (hw->evb_veb) {
+ if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC ||
+ fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ fi->lkup_type == ICE_SW_LKUP_DFLT ||
+ fi->lkup_type == ICE_SW_LKUP_VLAN ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC &&
+ !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
+ !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
+ fi->lan_en = true;
+ } else {
fi->lan_en = true;
+ }
}
}
@@ -799,7 +843,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* @hw: pointer to the hardware structure
* @m_ent: the management entry for which sw marker needs to be added
* @sw_marker: sw marker to tag the Rx descriptor with
- * @l_id: large action resource id
+ * @l_id: large action resource ID
*
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
@@ -811,8 +855,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
/* For software marker we need 3 large actions
* 1. FWD action: FWD TO VSI or VSI LIST
- * 2. GENERIC VALUE action to hold the profile id
- * 3. GENERIC VALUE action to hold the software marker id
+ * 2. GENERIC VALUE action to hold the profile ID
+ * 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
enum ice_status status;
@@ -875,13 +919,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
- /* Update the action to point to the large action id */
+ /* Update the action to point to the large action ID */
rx_tx->pdata.lkup_tx_rx.act =
cpu_to_le32(ICE_SINGLE_ACT_PTR |
((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
ICE_SINGLE_ACT_PTR_VAL_M));
- /* Use the filter rule id of the previously created rule with single
+ /* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
* action
*/
@@ -904,10 +948,10 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* @hw: pointer to the hardware structure
* @vsi_handle_arr: array of VSI handles to set in the VSI mapping
* @num_vsi: number of VSI handles in the array
- * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
*
- * Helper function to create a new entry of VSI list id to VSI mapping
- * using the given VSI list id
+ * Helper function to create a new entry of VSI list ID to VSI mapping
+ * using the given VSI list ID
*/
static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
@@ -935,13 +979,13 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* @hw: pointer to the hardware structure
* @vsi_handle_arr: array of VSI handles to form a VSI list
* @num_vsi: number of VSI handles in the array
- * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
* @remove: Boolean value to indicate if this is a remove action
* @opc: switch rules population command type - pass in the command opcode
* @lkup_type: lookup type of the filter
*
* Call AQ command to add a new switch rule or update existing switch rule
- * using the given VSI list id
+ * using the given VSI list ID
*/
static enum ice_status
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
@@ -998,7 +1042,7 @@ exit:
/**
* ice_create_vsi_list_rule - Creates and populates a VSI list rule
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle_arr: array of VSI handles to form a VSI list
* @num_vsi: number of VSI handles in the array
* @vsi_list_id: stores the ID of the VSI list to be created
@@ -1092,7 +1136,7 @@ ice_create_pkt_fwd_rule_exit:
* @f_info: filter information for switch rule
*
* Call AQ command to update a previously created switch rule with a
- * VSI list id
+ * VSI list ID
*/
static enum ice_status
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
@@ -1119,7 +1163,7 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
/**
* ice_update_sw_rule_bridge_mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
@@ -1174,7 +1218,7 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Allocate a new VSI list and add two VSIs
* to this list using switch rule command
* Update the previously created switch rule with the
- * newly created VSI list id
+ * newly created VSI list ID
* if a VSI list was previously created
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
@@ -1255,7 +1299,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
return 0;
/* Update the previously created VSI list set with
- * the new VSI id passed in
+ * the new VSI ID passed in
*/
vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
opcode = ice_aqc_opc_update_sw_rules;
@@ -1263,7 +1307,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
vsi_list_id, false, opcode,
new_fltr->lkup_type);
- /* update VSI list mapping info with new VSI id */
+ /* update VSI list mapping info with new VSI ID */
if (!status)
set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
}
@@ -1305,7 +1349,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
* @hw: pointer to the hardware structure
* @recp_id: lookup type for which VSI lists needs to be searched
* @vsi_handle: VSI handle to be found in VSI list
- * @vsi_list_id: VSI list id found containing vsi_handle
+ * @vsi_list_id: VSI list ID found containing vsi_handle
*
* Helper function to search a VSI list with single entry containing given VSI
* handle element. This can be extended further to search VSI list with more
@@ -1336,7 +1380,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
/**
* ice_add_rule_internal - add rule for a given lookup type
* @hw: pointer to the hardware structure
- * @recp_id: lookup type (recipe id) for which rule has to be added
+ * @recp_id: lookup type (recipe ID) for which rule has to be added
* @f_entry: structure containing MAC forwarding information
*
* Adds or updates the rule lists for a given recipe
@@ -1381,7 +1425,7 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
/**
* ice_remove_vsi_list_rule
* @hw: pointer to the hardware structure
- * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
* @lkup_type: switch rule filter lookup type
*
* The VSI list should be emptied before this function is called to remove the
@@ -1506,7 +1550,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_remove_rule_internal - Remove a filter rule of a given type
* @hw: pointer to the hardware structure
- * @recp_id: recipe id for which the rule needs to removed
+ * @recp_id: recipe ID for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
static enum ice_status
@@ -1556,7 +1600,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
if (status)
goto exit;
- /* if vsi count goes to zero after updating the vsi list */
+ /* if VSI count goes to zero after updating the VSI list */
if (list_elem->vsi_count == 0)
remove_rule = true;
}
@@ -1634,7 +1678,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
return ICE_ERR_PARAM;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
- /* update the src in case it is vsi num */
+ /* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM;
m_list_itr->fltr_info.src = hw_vsi_id;
@@ -1710,7 +1754,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
((u8 *)r_iter + (elem_sent * s_rule_size));
}
- /* Fill up rule id based on the value returned from FW */
+ /* Fill up rule ID based on the value returned from FW */
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
@@ -1770,7 +1814,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
new_fltr = &f_entry->fltr_info;
- /* VLAN id should only be 12 bits */
+ /* VLAN ID should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
return ICE_ERR_PARAM;
@@ -1828,7 +1872,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
}
}
} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
- /* Update existing VSI list to add new VSI id only if it used
+ /* Update existing VSI list to add new VSI ID only if it used
* by one VLAN rule.
*/
cur_fltr = &v_list_itr->fltr_info;
@@ -1838,7 +1882,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* If VLAN rule exists and VSI list being used by this rule is
* referenced by more than 1 VLAN rule. Then create a new VSI
* list appending previous VSI with new VSI and update existing
- * VLAN rule to point to new VSI list id
+ * VLAN rule to point to new VSI list ID
*/
struct ice_fltr_info tmp_fltr;
u16 vsi_handle_arr[2];
@@ -1926,6 +1970,65 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
}
/**
+ * ice_add_eth_mac - Add ethertype and MAC based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ether type MAC filter, MAC is optional
+ */
+enum ice_status
+ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+{
+ struct ice_fltr_list_entry *em_list_itr;
+
+ if (!em_list || !hw)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry(em_list_itr, em_list, list_entry) {
+ enum ice_sw_lkup_type l_type =
+ em_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
+ l_type != ICE_SW_LKUP_ETHERTYPE)
+ return ICE_ERR_PARAM;
+
+ em_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ em_list_itr->status = ice_add_rule_internal(hw, l_type,
+ em_list_itr);
+ if (em_list_itr->status)
+ return em_list_itr->status;
+ }
+ return 0;
+}
+
+/**
+ * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ethertype or ethertype MAC entries
+ */
+enum ice_status
+ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+{
+ struct ice_fltr_list_entry *em_list_itr, *tmp;
+
+ if (!em_list || !hw)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
+ enum ice_sw_lkup_type l_type =
+ em_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
+ l_type != ICE_SW_LKUP_ETHERTYPE)
+ return ICE_ERR_PARAM;
+
+ em_list_itr->status = ice_remove_rule_internal(hw, l_type,
+ em_list_itr);
+ if (em_list_itr->status)
+ return em_list_itr->status;
+ }
+ return 0;
+}
+
+/**
* ice_rem_sw_rule_info
* @hw: pointer to the hardware structure
* @rule_head: pointer to the switch list structure that we want to delete
@@ -2170,7 +2273,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_entry;
enum ice_status status = 0;
- /* check to make sure VSI id is valid and within boundary */
+ /* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
@@ -2190,6 +2293,291 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
}
/**
+ * ice_determine_promisc_mask
+ * @fi: filter info to parse
+ *
+ * Helper function to determine which ICE_PROMISC_ mask corresponds
+ * to given filter into.
+ */
+static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
+{
+ u16 vid = fi->l_data.mac_vlan.vlan_id;
+ u8 *macaddr = fi->l_data.mac.mac_addr;
+ bool is_tx_fltr = false;
+ u8 promisc_mask = 0;
+
+ if (fi->flag == ICE_FLTR_TX)
+ is_tx_fltr = true;
+
+ if (is_broadcast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
+ else if (is_multicast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
+ else if (is_unicast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
+ if (vid)
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
+
+ return promisc_mask;
+}
+
+/**
+ * ice_remove_promisc - Remove promisc based filter rules
+ * @hw: pointer to the hardware structure
+ * @recp_id: recipe ID for which the rule needs to removed
+ * @v_list: list of promisc entries
+ */
+static enum ice_status
+ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
+ struct list_head *v_list)
+{
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
+
+ list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
+ v_list_itr->status =
+ ice_remove_rule_internal(hw, recp_id, v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
+ }
+ return 0;
+}
+
+/**
+ * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *fm_entry, *tmp;
+ struct list_head remove_list_head;
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
+ u8 recipe_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (vid)
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ else
+ recipe_id = ICE_SW_LKUP_PROMISC;
+
+ rule_head = &sw->recp_list[recipe_id].filt_rules;
+ rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
+
+ INIT_LIST_HEAD(&remove_list_head);
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(itr, rule_head, list_entry) {
+ u8 fltr_promisc_mask = 0;
+
+ if (!ice_vsi_uses_fltr(itr, vsi_handle))
+ continue;
+
+ fltr_promisc_mask |=
+ ice_determine_promisc_mask(&itr->fltr_info);
+
+ /* Skip if filter is not completely specified by given mask */
+ if (fltr_promisc_mask & ~promisc_mask)
+ continue;
+
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
+ &remove_list_head,
+ &itr->fltr_info);
+ if (status) {
+ mutex_unlock(rule_lock);
+ goto free_fltr_list;
+ }
+ }
+ mutex_unlock(rule_lock);
+
+ status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
+
+free_fltr_list:
+ list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
+ list_del(&fm_entry->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fm_entry);
+ }
+
+ return status;
+}
+
+/**
+ * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
+{
+ enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
+ struct ice_fltr_list_entry f_list_entry;
+ struct ice_fltr_info new_fltr;
+ enum ice_status status = 0;
+ bool is_tx_fltr;
+ u16 hw_vsi_id;
+ int pkt_type;
+ u8 recipe_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ memset(&new_fltr, 0, sizeof(new_fltr));
+
+ if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
+ new_fltr.l_data.mac_vlan.vlan_id = vid;
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ } else {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
+ recipe_id = ICE_SW_LKUP_PROMISC;
+ }
+
+ /* Separate filters must be set for each direction/packet type
+ * combination, so we will loop over the mask value, store the
+ * individual type, and clear it out in the input mask as it
+ * is found.
+ */
+ while (promisc_mask) {
+ u8 *mac_addr;
+
+ pkt_type = 0;
+ is_tx_fltr = false;
+
+ if (promisc_mask & ICE_PROMISC_UCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_RX;
+ pkt_type = UCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_TX;
+ pkt_type = UCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_RX;
+ pkt_type = MCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_TX;
+ pkt_type = MCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_RX;
+ pkt_type = BCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_TX;
+ pkt_type = BCAST_FLTR;
+ is_tx_fltr = true;
+ }
+
+ /* Check for VLAN promiscuous flag */
+ if (promisc_mask & ICE_PROMISC_VLAN_RX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_RX;
+ } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_TX;
+ is_tx_fltr = true;
+ }
+
+ /* Set filter DA based on packet type */
+ mac_addr = new_fltr.l_data.mac.mac_addr;
+ if (pkt_type == BCAST_FLTR) {
+ eth_broadcast_addr(mac_addr);
+ } else if (pkt_type == MCAST_FLTR ||
+ pkt_type == UCAST_FLTR) {
+ /* Use the dummy ether header DA */
+ ether_addr_copy(mac_addr, dummy_eth_header);
+ if (pkt_type == MCAST_FLTR)
+ mac_addr[0] |= 0x1; /* Set multicast bit */
+ }
+
+ /* Need to reset this to zero for all iterations */
+ new_fltr.flag = 0;
+ if (is_tx_fltr) {
+ new_fltr.flag |= ICE_FLTR_TX;
+ new_fltr.src = hw_vsi_id;
+ } else {
+ new_fltr.flag |= ICE_FLTR_RX;
+ new_fltr.src = hw->port_info->lport;
+ }
+
+ new_fltr.fltr_act = ICE_FWD_TO_VSI;
+ new_fltr.vsi_handle = vsi_handle;
+ new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_list_entry.fltr_info = new_fltr;
+
+ status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
+ if (status)
+ goto set_promisc_exit;
+ }
+
+set_promisc_exit:
+ return status;
+}
+
+/**
+ * ice_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @rm_vlan_promisc: Clear VLANs VSI promisc mode
+ *
+ * Configure VSI with all associated VLANs to given promiscuous mode(s)
+ */
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *list_itr, *tmp;
+ struct list_head vsi_list_head;
+ struct list_head *vlan_head;
+ struct mutex *vlan_lock; /* Lock to protect filter rule list */
+ enum ice_status status;
+ u16 vlan_id;
+
+ INIT_LIST_HEAD(&vsi_list_head);
+ vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+ mutex_lock(vlan_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
+ &vsi_list_head);
+ mutex_unlock(vlan_lock);
+ if (status)
+ goto free_fltr_list;
+
+ list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
+ if (rm_vlan_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ else
+ status = ice_set_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ if (status)
+ break;
+ }
+
+free_fltr_list:
+ list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
+ list_del(&list_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_itr);
+ }
+ return status;
+}
+
+/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to remove filters from
@@ -2224,12 +2612,14 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
case ICE_SW_LKUP_VLAN:
ice_remove_vlan(hw, &remove_list_head);
break;
+ case ICE_SW_LKUP_PROMISC:
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ ice_remove_promisc(hw, lkup, &remove_list_head);
+ break;
case ICE_SW_LKUP_MAC_VLAN:
case ICE_SW_LKUP_ETHERTYPE:
case ICE_SW_LKUP_ETHERTYPE_MAC:
- case ICE_SW_LKUP_PROMISC:
case ICE_SW_LKUP_DFLT:
- case ICE_SW_LKUP_PROMISC_VLAN:
case ICE_SW_LKUP_LAST:
default:
ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
@@ -2263,7 +2653,7 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
- * @recp_id: Recipe id for which rules need to be replayed
+ * @recp_id: Recipe ID for which rules need to be replayed
* @list_head: list for which filters need to be replayed
*
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
@@ -2287,7 +2677,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
f_entry.fltr_info = itr->fltr_info;
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
itr->fltr_info.vsi_handle == vsi_handle) {
- /* update the src in case it is vsi num */
+ /* update the src in case it is VSI num */
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
status = ice_add_rule_internal(hw, recp_id, &f_entry);
@@ -2302,7 +2692,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
f_entry.fltr_info.vsi_handle = vsi_handle;
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
- /* update the src in case it is vsi num */
+ /* update the src in case it is VSI num */
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
if (recp_id == ICE_SW_LKUP_VLAN)
@@ -2342,7 +2732,7 @@ enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_rm_all_sw_replay_rule_info - deletes filter replay rules
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Deletes the filter replay rules.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d5ef0bd58bf9..732b0b9b2e15 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -9,6 +9,13 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
#define ICE_VSI_INVAL_ID 0xffff
+#define ICE_INVAL_Q_HANDLE 0xFFFF
+#define ICE_INVAL_Q_HANDLE 0xFFFF
+
+/* VSI queue context structure */
+struct ice_q_ctx {
+ u16 q_handle;
+};
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
@@ -20,6 +27,8 @@ struct ice_vsi_ctx {
struct ice_sched_vsi_info sched;
u8 alloc_from_pool;
u8 vf_num;
+ u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
enum ice_sw_fwd_act_type {
@@ -44,7 +53,7 @@ enum ice_sw_lkup_type {
ICE_SW_LKUP_LAST
};
-/* type of filter src id */
+/* type of filter src ID */
enum ice_src_id {
ICE_SRC_ID_UNKNOWN = 0,
ICE_SRC_ID_VSI,
@@ -95,8 +104,8 @@ struct ice_fltr_info {
/* Depending on filter action */
union {
- /* queue id in case of ICE_FWD_TO_Q and starting
- * queue id in case of ICE_FWD_TO_QGRP.
+ /* queue ID in case of ICE_FWD_TO_Q and starting
+ * queue ID in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
u16 hw_vsi_id:10;
@@ -143,7 +152,7 @@ struct ice_sw_recipe {
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
};
-/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
+/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
struct ice_vsi_list_map_info {
struct list_head list_entry;
DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
@@ -165,7 +174,7 @@ struct ice_fltr_list_entry {
* used for VLAN membership.
*/
struct ice_fltr_mgmt_list_entry {
- /* back pointer to VSI list id to VSI list mapping */
+ /* back pointer to VSI list ID to VSI list mapping */
struct ice_vsi_list_map_info *vsi_list_info;
u16 vsi_count;
#define ICE_INVAL_LG_ACT_INDEX 0xffff
@@ -178,6 +187,17 @@ struct ice_fltr_mgmt_list_entry {
u8 counter_index;
};
+enum ice_promisc_flags {
+ ICE_PROMISC_UCAST_RX = 0x1,
+ ICE_PROMISC_UCAST_TX = 0x2,
+ ICE_PROMISC_MCAST_RX = 0x4,
+ ICE_PROMISC_MCAST_TX = 0x8,
+ ICE_PROMISC_BCAST_RX = 0x10,
+ ICE_PROMISC_BCAST_TX = 0x20,
+ ICE_PROMISC_VLAN_RX = 0x40,
+ ICE_PROMISC_VLAN_TX = 0x80,
+};
+
/* VSI related commands */
enum ice_status
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
@@ -198,11 +218,27 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
+enum ice_status
+ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+enum ice_status
+ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
+enum ice_status
+ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
+
+/* Promisc/defport setup for VSIs */
enum ice_status
ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc);
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c289d97f477d..2364eaf33d23 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -6,6 +6,7 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
#include "ice.h"
+#include "ice_dcb_lib.h"
#define ICE_RX_HDR_SIZE 256
@@ -100,8 +101,8 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
- int napi_budget)
+static bool
+ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = vsi->work_lmt;
@@ -236,9 +237,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
if (!tx_ring->tx_buf)
return -ENOMEM;
- /* round up to nearest 4K */
+ /* round up to nearest page */
tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
- 4096);
+ PAGE_SIZE);
tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
@@ -282,8 +283,17 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_buf->page)
continue;
- dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(rx_buf->page, 0);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(dev, rx_buf->dma,
+ rx_buf->page_offset,
+ ICE_RXBUF_2048, DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
rx_buf->page = NULL;
rx_buf->page_offset = 0;
@@ -339,9 +349,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return -ENOMEM;
- /* round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
- rx_ring->size = ALIGN(rx_ring->size, 4096);
+ /* round up to nearest page */
+ rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
@@ -389,8 +399,8 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
* Returns true if the page was successfully allocated or
* reused.
*/
-static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
- struct ice_rx_buf *bi)
+static bool
+ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -409,7 +419,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -423,6 +434,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
return true;
}
@@ -444,7 +457,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (!rx_ring->netdev || !cleaned_count)
return false;
- /* get the RX descriptor and buffer based on next_to_use */
+ /* get the Rx descriptor and buffer based on next_to_use */
rx_desc = ICE_RX_DESC(rx_ring, ntu);
bi = &rx_ring->rx_buf[ntu];
@@ -452,6 +465,12 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (!ice_alloc_mapped_page(rx_ring, bi))
goto no_bufs;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ ICE_RXBUF_2048,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -497,61 +516,43 @@ static bool ice_page_is_reserved(struct page *page)
}
/**
- * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_buf: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
- * @skb: sk_buf to place the data into
- *
- * This function will add the data contained in rx_buf->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
+ * @rx_buf: Rx buffer to adjust
+ * @size: Size of adjustment
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * Update the offset within page so that Rx buf will be ready to be reused.
+ * For systems with PAGE_SIZE < 8192 this function will flip the page offset
+ * so the second half of page assigned to Rx buffer will be used, otherwise
+ * the offset is moved by the @size bytes
*/
-static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+static void
+ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
{
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ICE_RXBUF_2048;
+ /* flip page offset to other buffer */
+ rx_buf->page_offset ^= size;
#else
- unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
- unsigned int truesize;
-#endif /* PAGE_SIZE < 8192) */
-
- struct page *page;
- unsigned int size;
-
- size = le16_to_cpu(rx_desc->wb.pkt_len) &
- ICE_RX_FLX_DESC_PKT_LEN_M;
-
- page = rx_buf->page;
+ /* move offset up to the next cache line */
+ rx_buf->page_offset += size;
+#endif
+}
+/**
+ * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
+ * @rx_buf: buffer containing the page
+ *
+ * If page is reusable, we have a green light for calling ice_reuse_rx_page,
+ * which will assign the current buffer to the buffer that next_to_alloc is
+ * pointing to; otherwise, the DMA mapping needs to be destroyed and
+ * page freed
+ */
+static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+{
#if (PAGE_SIZE >= 8192)
- truesize = ALIGN(size, L1_CACHE_BYTES);
-#endif /* PAGE_SIZE >= 8192) */
-
- /* will the data fit in the skb we allocated? if so, just
- * copy it as it is pretty small anyway
- */
- if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buf->page_offset;
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!ice_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- __free_pages(page, 0);
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buf->page_offset, size, truesize);
+ unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
+#endif
+ unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
+ struct page *page = rx_buf->page;
/* avoid re-using remote pages */
if (unlikely(ice_page_is_reserved(page)))
@@ -559,36 +560,61 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += truesize;
-
if (rx_buf->page_offset > last_offset)
return false;
#endif /* PAGE_SIZE < 8192) */
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
*/
- get_page(rx_buf->page);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
+ rx_buf->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
/**
+ * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_buf: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
+ *
+ * This function will add the data contained in rx_buf->page to the skb.
+ * It will just attach the page as a frag to the skb.
+ * The function will then update the page offset.
+ */
+static void
+ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
+ unsigned int size)
+{
+#if (PAGE_SIZE >= 8192)
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#else
+ unsigned int truesize = ICE_RXBUF_2048;
+#endif
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
+ rx_buf->page_offset, size, truesize);
+
+ /* page is being used so we must update the page offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+}
+
+/**
* ice_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
*/
-static void ice_reuse_rx_page(struct ice_ring *rx_ring,
- struct ice_rx_buf *old_buf)
+static void
+ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
@@ -599,121 +625,132 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buf = *old_buf;
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls and unnecessary copy of skb.
+ */
+ new_buf->dma = old_buf->dma;
+ new_buf->page = old_buf->page;
+ new_buf->page_offset = old_buf->page_offset;
+ new_buf->pagecnt_bias = old_buf->pagecnt_bias;
}
/**
- * ice_fetch_rx_buf - Allocate skb and populate it
+ * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @skb: skb to be used
+ * @size: size of buffer to add to skb
*
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
*/
-static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
+static struct ice_rx_buf *
+ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
+ const unsigned int size)
{
struct ice_rx_buf *rx_buf;
- struct sk_buff *skb;
- struct page *page;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- page = rx_buf->page;
- prefetchw(page);
+ prefetchw(rx_buf->page);
+ *skb = rx_buf->skb;
- skb = rx_buf->skb;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
+ rx_buf->page_offset, size,
+ DMA_FROM_DEVICE);
- if (likely(!skb)) {
- u8 *page_addr = page_address(page) + rx_buf->page_offset;
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buf->pagecnt_bias--;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ return rx_buf;
+}
+
+/**
+ * ice_construct_skb - Allocate skb and populate it
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ * @size: the length of the packet
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+static struct sk_buff *
+ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ unsigned int size)
+{
+ void *va = page_address(rx_buf->page) + rx_buf->page_offset;
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch((void *)(page_addr + L1_CACHE_BYTES));
+ prefetch((u8 *)va + L1_CACHE_BYTES);
#endif /* L1_CACHE_BYTES */
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- ICE_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_buf_failed++;
- return NULL;
- }
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
+ skb_record_rx_queue(skb, rx_ring->q_index);
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > ICE_RX_HDR_SIZE)
+ headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
- skb_record_rx_queue(skb, rx_ring->q_index);
- } else {
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
- rx_buf->page_offset,
- ICE_RXBUF_2048,
- DMA_FROM_DEVICE);
-
- rx_buf->skb = NULL;
- }
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
- /* pull page into skb */
- if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
+ /* if we exhaust the linear part then add what is left as a frag */
+ size -= headlen;
+ if (size) {
+#if (PAGE_SIZE >= 8192)
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#else
+ unsigned int truesize = ICE_RXBUF_2048;
+#endif
+ skb_add_rx_frag(skb, 0, rx_buf->page,
+ rx_buf->page_offset + headlen, size, truesize);
+ /* buffer is used by skb, update page_offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ /* buffer is unused, reset bias back to rx_buf; data was copied
+ * onto skb's linear part so there's no need for adjusting
+ * page offset and we can reuse this buffer as-is
+ */
+ rx_buf->pagecnt_bias++;
}
- /* clear contents of buffer_info */
- rx_buf->page = NULL;
-
return skb;
}
/**
- * ice_pull_tail - ice specific version of skb_pull_tail
- * @skb: pointer to current skb being adjusted
+ * ice_put_rx_buf - Clean up used buffer and either recycle or free
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
*
- * This function is an ice specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
+ * This function will clean up the contents of the rx_buf. It will
+ * either recycle the buffer or unmap it and free the associated resources.
*/
-static void ice_pull_tail(struct sk_buff *skb)
+static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int pull_len;
- unsigned char *va;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+ /* hand second half of page back to the ring */
+ if (ice_can_reuse_rx_page(rx_buf)) {
+ ice_reuse_rx_page(rx_ring, rx_buf);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
+ }
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
+ /* clear contents of buffer_info */
+ rx_buf->page = NULL;
+ rx_buf->skb = NULL;
}
/**
@@ -730,10 +767,6 @@ static void ice_pull_tail(struct sk_buff *skb)
*/
static bool ice_cleanup_headers(struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- ice_pull_tail(skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -751,8 +784,8 @@ static bool ice_cleanup_headers(struct sk_buff *skb)
* The status_error_len doesn't need to be shifted because it begins
* at offset zero.
*/
-static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
- const u16 stat_err_bits)
+static bool
+ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
{
return !!(rx_desc->wb.status_error0 &
cpu_to_le16(stat_err_bits));
@@ -769,9 +802,9 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
*/
-static bool ice_is_non_eop(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+static bool
+ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -838,8 +871,9 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*
* skb->protocol must be set before this function is called
*/
-static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+static void
+ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
+ union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{
struct ice_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
@@ -909,9 +943,10 @@ checksum_fail:
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
*/
-static void ice_process_skb_fields(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype)
+static void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype)
{
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
@@ -925,18 +960,17 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: Rx ring in play
* @skb: packet to send up
- * @vlan_tag: vlan tag for packet
+ * @vlan_tag: VLAN tag for packet
*
* This function sends the completed packet (via. skb) up the stack using
- * gro receive functions (with/without vlan tag)
+ * gro receive functions (with/without VLAN tag)
*/
-static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
- u16 vlan_tag)
+static void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK)) {
+ (vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- }
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -958,10 +992,12 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
bool failure = false;
- /* start the loop to process RX packets bounded by 'budget' */
+ /* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
+ struct ice_rx_buf *rx_buf;
struct sk_buff *skb;
+ unsigned int size;
u16 stat_err_bits;
u16 vlan_tag = 0;
u8 rx_ptype;
@@ -973,7 +1009,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
cleaned_count = 0;
}
- /* get the RX desc from RX ring based on 'next_to_clean' */
+ /* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
/* status_error_len will always be zero for unused descriptors
@@ -991,11 +1027,24 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
*/
dma_rmb();
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+
+ rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
/* allocate (if needed) and populate skb */
- skb = ice_fetch_rx_buf(rx_ring, rx_desc);
- if (!skb)
+ if (skb)
+ ice_add_rx_frag(rx_buf, skb, size);
+ else
+ skb = ice_construct_skb(rx_ring, rx_buf, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buf_failed++;
+ rx_buf->pagecnt_bias++;
break;
+ }
+ ice_put_rx_buf(rx_ring, rx_buf);
cleaned_count++;
/* skip if it is NOP desc */
@@ -1049,17 +1098,247 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
}
/**
+ * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
+ * @port_info: port_info structure containing the current link speed
+ * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
+ * @itr: itr value to update
+ *
+ * Calculate how big of an increment should be applied to the ITR value passed
+ * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
+ * link speed.
+ *
+ * The following is a calculation derived from:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ *
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to:
+ *
+ * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
+ * ITR = -------------------------------------------- * --------------
+ * rate pkt_size + 640
+ */
+static unsigned int
+ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
+ unsigned int avg_pkt_size,
+ unsigned int itr)
+{
+ switch (port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ /* fall through */
+ default:
+ itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ }
+
+ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= ICE_ITR_ADAPTIVE_LATENCY;
+ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ }
+
+ return itr;
+}
+
+/**
+ * ice_update_itr - update the adaptive ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ */
+static void
+ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
+{
+ unsigned long next_update = jiffies;
+ unsigned int packets, bytes, itr;
+ bool container_is_rx;
+
+ if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
+ return;
+
+ /* If itr_countdown is set it means we programmed an ITR within
+ * the last 4 interrupt cycles. This has a side effect of us
+ * potentially firing an early interrupt. In order to work around
+ * this we need to throw out any data received for a few
+ * interrupts following the update.
+ */
+ if (q_vector->itr_countdown) {
+ itr = rc->target_itr;
+ goto clear_counts;
+ }
+
+ container_is_rx = (&q_vector->rx == rc);
+ /* For Rx we want to push the delay up and default to low latency.
+ * for Tx we want to pull the delay down and default to high latency.
+ */
+ itr = container_is_rx ?
+ ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
+ ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
+
+ /* If we didn't update within up to 1 - 2 jiffies we can assume
+ * that either packets are coming in so slow there hasn't been
+ * any work, or that there is so much work that NAPI is dealing
+ * with interrupt moderation and we don't need to do anything.
+ */
+ if (time_after(next_update, rc->next_update))
+ goto clear_counts;
+
+ packets = rc->total_pkts;
+ bytes = rc->total_bytes;
+
+ if (container_is_rx) {
+ /* If Rx there are 1 to 4 packets and bytes are less than
+ * 9000 assume insufficient data to use bulk rate limiting
+ * approach unless Tx is already in bulk rate limiting. We
+ * are likely latency driven.
+ */
+ if (packets && packets < 4 && bytes < 9000 &&
+ (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
+ itr = ICE_ITR_ADAPTIVE_LATENCY;
+ goto adjust_by_size_and_speed;
+ }
+ } else if (packets < 4) {
+ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
+ * bulk mode and we are receiving 4 or fewer packets just
+ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
+ * that the Rx can relax.
+ */
+ if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
+ (q_vector->rx.target_itr & ICE_ITR_MASK) ==
+ ICE_ITR_ADAPTIVE_MAX_USECS)
+ goto clear_counts;
+ } else if (packets > 32) {
+ /* If we have processed over 32 packets in a single interrupt
+ * for Tx assume we need to switch over to "bulk" mode.
+ */
+ rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
+ }
+
+ /* We have no packets to actually measure against. This means
+ * either one of the other queues on this vector is active or
+ * we are a Tx queue doing TSO with too high of an interrupt rate.
+ *
+ * Between 4 and 56 we can assume that our current interrupt delay
+ * is only slightly too low. As such we should increase it by a small
+ * fixed amount.
+ */
+ if (packets < 56) {
+ itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
+ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= ICE_ITR_ADAPTIVE_LATENCY;
+ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ }
+ goto clear_counts;
+ }
+
+ if (packets <= 256) {
+ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
+ itr &= ICE_ITR_MASK;
+
+ /* Between 56 and 112 is our "goldilocks" zone where we are
+ * working out "just right". Just report that our current
+ * ITR is good for us.
+ */
+ if (packets <= 112)
+ goto clear_counts;
+
+ /* If packet count is 128 or greater we are likely looking
+ * at a slight overrun of the delay we want. Try halving
+ * our delay to see if that will cut the number of packets
+ * in half per interrupt.
+ */
+ itr >>= 1;
+ itr &= ICE_ITR_MASK;
+ if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
+ itr = ICE_ITR_ADAPTIVE_MIN_USECS;
+
+ goto clear_counts;
+ }
+
+ /* The paths below assume we are dealing with a bulk ITR since
+ * number of packets is greater than 256. We are just going to have
+ * to compute a value and try to bring the count under control,
+ * though for smaller packet sizes there isn't much we can do as
+ * NAPI polling will likely be kicking in sooner rather than later.
+ */
+ itr = ICE_ITR_ADAPTIVE_BULK;
+
+adjust_by_size_and_speed:
+
+ /* based on checks above packets cannot be 0 so division is safe */
+ itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
+ bytes / packets, itr);
+
+clear_counts:
+ /* write back value */
+ rc->target_itr = itr;
+
+ /* next update should occur within next jiffy */
+ rc->next_update = next_update + 1;
+
+ rc->total_bytes = 0;
+ rc->total_pkts = 0;
+}
+
+/**
* ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
* @itr_idx: interrupt throttling index
- * @reg_itr: interrupt throttling value adjusted based on ITR granularity
+ * @itr: interrupt throttling value in usecs
*/
-static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
+static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
{
+ /* The itr value is reported in microseconds, and the register value is
+ * recorded in 2 microsecond units. For this reason we only need to
+ * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
+ * granularity as a shift instead of division. The mask makes sure the
+ * ITR value is never odd so we don't accidentally write into the field
+ * prior to the ITR field.
+ */
+ itr &= ICE_ITR_MASK;
+
return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
- (reg_itr << GLINT_DYN_CTL_INTERVAL_S);
+ (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
}
+/* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+ * updated we make the adaptive scheme wait until either the ITR completely
+ * expires via the next_update expiration or we have been through at least
+ * 3 interrupts.
+ */
+#define ITR_COUNTDOWN_START 3
+
/**
* ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
* @vsi: the VSI associated with the q_vector
@@ -1068,10 +1347,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
static void
ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_ring_container *rc;
+ struct ice_ring_container *tx = &q_vector->tx;
+ struct ice_ring_container *rx = &q_vector->rx;
u32 itr_val;
+ /* This will do nothing if dynamic updates are not enabled */
+ ice_update_itr(q_vector, tx);
+ ice_update_itr(q_vector, rx);
+
/* This block of logic allows us to get away with only updating
* one ITR value with each interrupt. The idea is to perform a
* pseudo-lazy update with the following criteria.
@@ -1080,35 +1363,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
* 2. If we must reduce an ITR that is given highest priority.
* 3. We then give priority to increasing ITR based on amount.
*/
- if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
- rc = &q_vector->rx;
+ if (rx->target_itr < rx->current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
- } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
- ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
- (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
- rc = &q_vector->tx;
+ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
+ rx->current_itr = rx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if ((tx->target_itr < tx->current_itr) ||
+ ((rx->target_itr - rx->current_itr) <
+ (tx->target_itr - tx->current_itr))) {
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
- } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
- rc = &q_vector->rx;
+ itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
+ tx->current_itr = tx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if (rx->current_itr != rx->target_itr) {
/* Rx ITR needs to be increased, third priority */
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
+ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
+ rx->current_itr = rx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* Still have to re-enable the interrupts */
itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
}
- if (!test_bit(__ICE_DOWN, vsi->state)) {
- int vector = vsi->hw_base_vector + q_vector->v_idx;
-
- wr32(hw, GLINT_DYN_CTL(vector), itr_val);
- }
+ if (!test_bit(__ICE_DOWN, vsi->state))
+ wr32(&vsi->back->hw,
+ GLINT_DYN_CTL(q_vector->reg_idx),
+ itr_val);
}
/**
@@ -1354,13 +1638,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return;
@@ -1480,7 +1759,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
}
/**
- * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
* @tx_ring: ring to send buffer on
* @first: pointer to struct ice_tx_buf
*
@@ -1506,7 +1785,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
* to the encapsulated ethertype.
*/
skb->protocol = vlan_get_protocol(skb);
- goto out;
+ return 0;
}
/* if we have a HW VLAN tag being added, default to the HW one */
@@ -1528,8 +1807,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
}
-out:
- return 0;
+ return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
}
/**
@@ -1566,6 +1844,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (err < 0)
return err;
+ /* cppcheck-suppress unreadVariable */
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index fc358ea81816..66e05032ee56 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -45,8 +45,13 @@
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
+#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
+#define ICE_TX_FLAGS_VLAN_PR_S 29
#define ICE_TX_FLAGS_VLAN_S 16
+#define ICE_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
struct ice_tx_buf {
struct ice_tx_desc *next_to_watch;
struct sk_buff *skb;
@@ -73,6 +78,7 @@ struct ice_rx_buf {
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
+ u16 pagecnt_bias;
};
struct ice_q_stats {
@@ -124,11 +130,19 @@ enum ice_rx_dtype {
#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
-#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
+#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
+#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
+#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
+#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
+#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
+#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
+#define ICE_ITR_ADAPTIVE_BULK 0x0000
+
#define ICE_DFLT_INTRL 0
+#define ICE_MAX_INTRL 236
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
@@ -149,6 +163,9 @@ struct ice_ring {
};
u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */
+#ifdef CONFIG_DCB
+ u8 dcb_tc; /* Traffic class of ring */
+#endif /* CONFIG_DCB */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -173,21 +190,13 @@ struct ice_ring {
u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
-enum ice_latency_range {
- ICE_LOWEST_LATENCY = 0,
- ICE_LOW_LATENCY = 1,
- ICE_BULK_LATENCY = 2,
- ICE_ULTRA_LATENCY = 3,
-};
-
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
unsigned long next_update; /* jiffies value of next queue update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
- enum ice_latency_range latency_range;
- int itr_idx; /* index in the interrupt vector */
+ u16 itr_idx; /* index in the interrupt vector */
u16 target_itr; /* value in usecs divided by the hw->itr_gran */
u16 current_itr; /* value in usecs divided by the hw->itr_gran */
/* high bit set means dynamic ITR, rest is used to store user
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 17086d5b5c33..a862af4cbf78 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -24,6 +24,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_LINK BIT_ULL(4)
+#define ICE_DBG_PHY BIT_ULL(5)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
@@ -106,7 +107,7 @@ struct ice_link_status {
};
/* Different reset sources for which a disable queue AQ call has to be made in
- * order to clean the TX scheduler as a part of the reset
+ * order to clean the Tx scheduler as a part of the reset
*/
enum ice_disq_rst_src {
ICE_NO_RESET = 0,
@@ -128,11 +129,11 @@ struct ice_phy_info {
struct ice_hw_common_caps {
u32 valid_functions;
- /* TX/RX queues */
- u16 num_rxq; /* Number/Total RX queues */
- u16 rxq_first_id; /* First queue ID for RX queues */
- u16 num_txq; /* Number/Total TX queues */
- u16 txq_first_id; /* First queue ID for TX queues */
+ /* Tx/Rx queues */
+ u16 num_rxq; /* Number/Total Rx queues */
+ u16 rxq_first_id; /* First queue ID for Rx queues */
+ u16 num_txq; /* Number/Total Tx queues */
+ u16 txq_first_id; /* First queue ID for Tx queues */
/* MSI-X vectors */
u16 num_msix_vectors;
@@ -147,6 +148,8 @@ struct ice_hw_common_caps {
/* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */
+
+ u8 dcb;
};
/* Function specific capabilities */
@@ -209,12 +212,17 @@ struct ice_nvm_info {
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
+#define ice_for_each_traffic_class(_i) \
+ for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
+
+#define ICE_INVAL_TEID 0xFFFFFFFF
+
struct ice_sched_node {
struct ice_sched_node *parent;
struct ice_sched_node *sibling; /* next sibling in the same layer */
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
- u32 agg_id; /* aggregator group id */
+ u32 agg_id; /* aggregator group ID */
u16 vsi_handle;
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
@@ -241,13 +249,12 @@ enum ice_agg_type {
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_DFLT_BW_WT 1
-/* vsi type list entry to locate corresponding vsi/ag nodes */
+/* VSI type list entry to locate corresponding VSI/ag nodes */
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
- u16 vsi_id;
};
/* driver defines the policy */
@@ -257,15 +264,70 @@ struct ice_sched_tx_policy {
u8 rdma_ena;
};
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct ice_dcb_ets_cfg {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prio_table[ICE_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[ICE_MAX_TRAFFIC_CLASS];
+ u8 tsatable[ICE_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct ice_dcb_pfc_cfg {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcena;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct ice_dcb_app_priority_table {
+ u16 prot_id;
+ u8 priority;
+ u8 selector;
+};
+
+#define ICE_MAX_USER_PRIORITY 8
+#define ICE_DCBX_MAX_APPS 32
+#define ICE_LLDPDU_SIZE 1500
+#define ICE_TLV_STATUS_OPER 0x1
+#define ICE_TLV_STATUS_SYNC 0x2
+#define ICE_TLV_STATUS_ERR 0x4
+#define ICE_APP_PROT_ID_FCOE 0x8906
+#define ICE_APP_PROT_ID_ISCSI 0x0cbc
+#define ICE_APP_PROT_ID_FIP 0x8914
+#define ICE_APP_SEL_ETHTYPE 0x1
+#define ICE_APP_SEL_TCPIP 0x2
+#define ICE_CEE_APP_SEL_ETHTYPE 0x0
+#define ICE_CEE_APP_SEL_TCPIP 0x1
+
+struct ice_dcbx_cfg {
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct ice_dcb_ets_cfg etscfg;
+ struct ice_dcb_ets_cfg etsrec;
+ struct ice_dcb_pfc_cfg pfc;
+ struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
+ u8 dcbx_mode;
+#define ICE_DCBX_MODE_CEE 0x1
+#define ICE_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define ICE_DCBX_APPS_NON_WILLING 0x1
+};
+
struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */
- struct ice_hw *hw; /* back pointer to hw instance */
+ struct ice_hw *hw; /* back pointer to HW instance */
u32 last_node_teid; /* scheduler last node info */
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
u8 port_state;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
+ u8 lport;
+#define ICE_LPORT_MASK 0xff
u16 dflt_tx_vsi_rule_id;
u16 dflt_tx_vsi_num;
u16 dflt_rx_vsi_rule_id;
@@ -274,9 +336,14 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
- u8 lport;
-#define ICE_LPORT_MASK 0xff
- u8 is_vf;
+ struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
+ /* DCBX info */
+ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
+ struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
+ /* LLDP/DCBX Status */
+ u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
+ u8 is_sw_lldp:1;
+ u8 is_vf:1;
};
struct ice_switch_info {
@@ -320,7 +387,7 @@ struct ice_hw {
u8 pf_id; /* device profile info */
- /* TX Scheduler values */
+ /* Tx Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
u8 flattened_layers;
@@ -331,7 +398,7 @@ struct ice_hw {
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
- u8 reset_ongoing; /* true if hw is in reset, false otherwise */
+ u8 reset_ongoing; /* true if HW is in reset, false otherwise */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -410,6 +477,11 @@ struct ice_hw_port_stats {
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
u64 rx_size_64; /* prc64 */
u64 rx_size_127; /* prc127 */
u64 rx_size_255; /* prc255 */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 57155b4a59dc..a805cbdd69be 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -5,6 +5,37 @@
#include "ice_lib.h"
/**
+ * ice_err_to_virt err - translate errors for VF return code
+ * @ice_err: error return code
+ */
+static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
+{
+ switch (ice_err) {
+ case ICE_SUCCESS:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case ICE_ERR_BAD_PTR:
+ case ICE_ERR_INVAL_SIZE:
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ case ICE_ERR_PARAM:
+ case ICE_ERR_CFG:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case ICE_ERR_NO_MEMORY:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case ICE_ERR_NOT_READY:
+ case ICE_ERR_RESET_FAILED:
+ case ICE_ERR_FW_API_VER:
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_FULL:
+ case ICE_ERR_AQ_NO_WORK:
+ case ICE_ERR_AQ_EMPTY:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
+
+/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -14,7 +45,7 @@
*/
static void
ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
- enum ice_status v_retval, u8 *msg, u16 msglen)
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
struct ice_vf *vf = pf->vf;
@@ -104,7 +135,8 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
ICE_AQ_LINK_UP);
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
sizeof(pfe), NULL);
}
@@ -343,11 +375,41 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
}
/**
- * ice_vsi_set_pvid - Set port VLAN id for the VSI
- * @vsi: the VSI being changed
- * @vid: the VLAN id to set as a PVID
+ * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
+ * @ctxt: the VSI ctxt to fill
+ * @vid: the VLAN ID to set as a PVID
+ */
+static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
+{
+ ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR);
+ ctxt->info.pvid = cpu_to_le16(vid);
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+}
+
+/**
+ * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
+ * @ctxt: the VSI ctxt to fill
+ */
+static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
+{
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+}
+
+/**
+ * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
+ * @vsi: the VSI to update
+ * @vid: the VLAN ID to set as a PVID
+ * @enable: true for enable PVID false for disable
*/
-static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
+static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
@@ -359,50 +421,31 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
if (!ctxt)
return -ENOMEM;
- ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR);
- ctxt->info.pvid = cpu_to_le16(vid);
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt->info = vsi->info;
+ if (enable)
+ ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
+ else
+ ice_vsi_kill_pvid_fill_ctxt(ctxt);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
}
- vsi->info.pvid = ctxt->info.pvid;
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ vsi->info = ctxt->info;
out:
devm_kfree(dev, ctxt);
return ret;
}
/**
- * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
- * @vsi: the VSI being changed
- */
-static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
-
- if (ice_vsi_manage_vlan_stripping(vsi, false)) {
- dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
- vsi->vsi_num);
- return -ENODEV;
- }
-
- vsi->info.pvid = 0;
- return 0;
-}
-
-/**
* ice_vf_vsi_setup - Set up a VF VSI
* @pf: board private structure
* @pi: pointer to the port_info instance
- * @vf_id: defines VF id to which this VSI connects.
+ * @vf_id: defines VF ID to which this VSI connects.
*
* Returns pointer to the successfully allocated VSI struct on success,
* otherwise returns NULL on failure.
@@ -446,8 +489,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vsi->hw_base_vector += 1;
/* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_id)
- ice_vsi_set_pvid(vsi, vf->port_vlan_id);
+ if (vf->port_vlan_id) {
+ ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
+ ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
+ }
eth_broadcast_addr(broadcast);
@@ -468,7 +513,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
- * We dont want to reconfigure interrupts since AVF driver doesn't
+ * We don't want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
@@ -484,6 +529,8 @@ ice_alloc_vsi_res_exit:
*/
static int ice_alloc_vf_res(struct ice_vf *vf)
{
+ struct ice_pf *pf = vf->pf;
+ int tx_rx_queue_left;
int status;
/* setup VF VSI and necessary resources */
@@ -491,6 +538,15 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
if (status)
goto ice_alloc_vf_res_exit;
+ /* Update number of VF queues, in case VF had requested for queue
+ * changes
+ */
+ tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
+ if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
+ vf->num_req_qs != vf->num_vf_qs)
+ vf->num_vf_qs = vf->num_req_qs;
+
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
@@ -548,6 +604,10 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
+ /* Map mailbox interrupt. We put an explicit 0 here to remind us that
+ * VF admin queue interrupts will go to VF MSI-X vector 0.
+ */
+ wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
/* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
@@ -750,6 +810,47 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
}
/**
+ * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ * @rm_promisc: promisc flag request from the VF to remove or add filter
+ *
+ * This function configures VF VSI promiscuous mode, based on the VF requests,
+ * for Unicast, Multicast and VLAN
+ */
+static enum ice_status
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
+ bool rm_promisc)
+{
+ struct ice_pf *pf = vf->pf;
+ enum ice_status status = 0;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ if (vf->num_vlan) {
+ status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+ rm_promisc);
+ } else if (vf->port_vlan_id) {
+ if (rm_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_id);
+ else
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_id);
+ } else {
+ if (rm_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ else
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ }
+
+ return status;
+}
+
+/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -764,6 +865,7 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
{
struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
int v, i;
/* If we don't have any VFs, then there is nothing to reset */
@@ -778,12 +880,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr);
- /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
- * queues to inform Firmware about VF reset.
- */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
- ICE_VF_RESET, v, NULL);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ struct ice_vsi *vsi;
+
+ vf = &pf->vf[v];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
+ ice_vsi_stop_rx_rings(vsi);
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ }
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -796,9 +903,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
- struct ice_vf *vf = &pf->vf[v];
u32 reg;
+ vf = &pf->vf[v];
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & VPGEN_VFRSTAT_VFRD_M))
break;
@@ -818,8 +925,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_free_vf_res(&pf->vf[v]);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+
+ ice_free_vf_res(vf);
+
+ /* Free VF queues as well, and reallocate later.
+ * If a given VF has different number of queues
+ * configured, the request for update will come
+ * via mailbox communication.
+ */
+ vf->num_vf_qs = 0;
+ }
if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
@@ -828,8 +945,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_cleanup_and_realloc_vf(&pf->vf[v]);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+
+ vf->num_vf_qs = pf->num_vf_qps;
+ dev_dbg(&pf->pdev->dev,
+ "VF-id %d has %d queues configured\n",
+ vf->vf_id, vf->num_vf_qs);
+ ice_cleanup_and_realloc_vf(vf);
+ }
ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
@@ -847,9 +971,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
- struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
+ struct ice_hw *hw;
bool rsd = false;
+ u8 promisc_m;
u32 reg;
int i;
@@ -871,10 +996,11 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
/* Call Disable LAN Tx queue AQ call even when queues are not
* enabled. This is needed for successful completiom of VFR
*/
- ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET,
- vf->vf_id, NULL);
+ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+ NULL, ICE_VF_RESET, vf->vf_id, NULL);
}
+ hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure
* that reset is complete
*/
@@ -900,6 +1026,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
usleep_range(10000, 20000);
+ /* disable promiscuous modes in case they were enabled
+ * ignore any error if disabling process failed
+ */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ if (vf->port_vlan_id || vf->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
+ dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
+ }
+
/* free VF resources to begin resetting the VSI state */
ice_free_vf_res(vf);
@@ -938,7 +1079,7 @@ void ice_vc_notify_reset(struct ice_pf *pf)
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
@@ -961,8 +1102,9 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
- (u8 *)&pfe, sizeof(pfe), NULL);
+ ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
+ NULL);
}
/**
@@ -1012,7 +1154,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */
- if (!ice_reset_all_vfs(pf, false))
+ if (!ice_reset_all_vfs(pf, true))
goto err_unroll_sriov;
goto err_unroll_intr;
@@ -1131,21 +1273,10 @@ void ice_process_vflr_event(struct ice_pf *pf)
int vf_id;
u32 reg;
- if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
!pf->num_alloc_vfs)
return;
- /* Re-enable the VFLR interrupt cause here, before looking for which
- * VF got reset. Otherwise, if another VF gets a reset while the
- * first one is being processed, that interrupt will be lost, and
- * that VF will be stuck in reset forever.
- */
- reg = rd32(hw, PFINT_OICR_ENA);
- reg |= PFINT_OICR_VFLR_M;
- wr32(hw, PFINT_OICR_ENA, reg);
- ice_flush(hw);
-
- clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
struct ice_vf *vf = &pf->vf[vf_id];
u32 reg_idx, bit_idx;
@@ -1182,8 +1313,9 @@ static void ice_vc_dis_vf(struct ice_vf *vf)
*
* send msg to VF
*/
-static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum ice_status v_retval, u8 *msg, u16 msglen)
+static int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
enum ice_status aq_ret;
struct ice_pf *pf;
@@ -1243,8 +1375,8 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
if (VF_IS_V10(&vf->vf_ver))
info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
- (u8 *)&info,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
sizeof(struct virtchnl_version_info));
}
@@ -1257,15 +1389,15 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
*/
static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_resource *vfres = NULL;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int len = 0;
int ret;
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
}
@@ -1273,7 +1405,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
if (!vfres) {
- aq_ret = ICE_ERR_NO_MEMORY;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
len = 0;
goto err;
}
@@ -1286,6 +1418,11 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
if (!vsi->info.pvid)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
@@ -1336,7 +1473,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
err:
/* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
(u8 *)vfres, len);
devm_kfree(&pf->pdev->dev, vfres);
@@ -1360,15 +1497,15 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
/**
* ice_find_vsi_from_id
* @pf: the pf structure to search for the VSI
- * @id: id of the VSI it is searching for
+ * @id: ID of the VSI it is searching for
*
- * searches for the VSI with the given id
+ * searches for the VSI with the given ID
*/
static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
{
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
+ ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
return pf->vsi[i];
@@ -1378,9 +1515,9 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
/**
* ice_vc_isvalid_vsi_id
* @vf: pointer to the VF info
- * @vsi_id: VF relative VSI id
+ * @vsi_id: VF relative VSI ID
*
- * check for the valid VSI id
+ * check for the valid VSI ID
*/
static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
@@ -1395,10 +1532,10 @@ static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
/**
* ice_vc_isvalid_q_id
* @vf: pointer to the VF info
- * @vsi_id: VSI id
- * @qid: VSI relative queue id
+ * @vsi_id: VSI ID
+ * @qid: VSI relative queue ID
*
- * check for the valid queue id
+ * check for the valid queue ID
*/
static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
{
@@ -1416,42 +1553,42 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
*/
static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi = NULL;
- enum ice_status aq_ret;
- int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- ret = ice_set_rss(vsi, vrk->key, NULL, 0);
- aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+ if (ice_set_rss(vsi, vrk->key, NULL, 0))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
NULL, 0);
}
@@ -1465,40 +1602,40 @@ error_param:
static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
{
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi = NULL;
- enum ice_status aq_ret;
- int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
- aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+ if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
NULL, 0);
}
@@ -1511,25 +1648,26 @@ error_param:
*/
static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_eth_stats stats;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1540,7 +1678,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
(u8 *)&stats, sizeof(stats));
}
@@ -1553,29 +1691,30 @@ error_param:
*/
static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!vqs->rx_queues && !vqs->tx_queues) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1584,15 +1723,15 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* programmed using ice_vsi_cfg_txqs
*/
if (ice_vsi_start_rx_rings(vsi))
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
/* Set flag to indicate that queues are enabled */
- if (!aq_ret)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
set_bit(ICE_VF_STATE_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
NULL, 0);
}
@@ -1606,30 +1745,31 @@ error_param:
*/
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!vqs->rx_queues && !vqs->tx_queues) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1637,23 +1777,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
dev_err(&vsi->back->pdev->dev,
"Failed to stop tx rings on VSI %d\n",
vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
if (ice_vsi_stop_rx_rings(vsi)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop rx rings on VSI %d\n",
vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
/* Clear enabled queues flag */
- if (!aq_ret)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
NULL, 0);
}
@@ -1666,22 +1806,30 @@ error_param:
*/
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map;
struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf;
- enum ice_status aq_ret = 0;
unsigned long qmap;
+ u16 num_q_vectors;
int i;
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !vsi || vsi->num_q_vectors < num_q_vectors ||
+ irqmap_info->num_vectors == 0) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- for (i = 0; i < irqmap_info->num_vectors; i++) {
+ for (i = 0; i < num_q_vectors; i++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
map = &irqmap_info->vecmap[i];
vector_id = map->vector_id;
@@ -1689,40 +1837,30 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* validate msg params */
if (!(vector_id < pf->hw.func_caps.common_cap
.num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
- if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* lookout for the invalid queue index */
qmap = map->rxq_map;
+ q_vector->num_ring_rx = 0;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- struct ice_q_vector *q_vector;
-
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- q_vector = vsi->q_vectors[i];
q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
}
qmap = map->txq_map;
+ q_vector->num_ring_tx = 0;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- struct ice_q_vector *q_vector;
-
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- q_vector = vsi->q_vectors[i];
q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
@@ -1733,7 +1871,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
ice_vsi_cfg_msix(vsi);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
NULL, 0);
}
@@ -1746,26 +1884,34 @@ error_param:
*/
static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, qci->num_queue_pairs);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1775,7 +1921,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != qpi->txq.queue_id ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* copy Tx queue info from VF into VSI */
@@ -1785,13 +1931,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->rx_buf_len = qpi->rxq.databuffer_size;
if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
qpi->rxq.max_pkt_size < 64) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->max_frame = qpi->rxq.max_pkt_size;
@@ -1802,15 +1948,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
*/
vsi->num_txq = qci->num_queue_pairs;
vsi->num_rxq = qci->num_queue_pairs;
+ /* All queues of VF VSI are in TC 0 */
+ vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
+ vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
- if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
- aq_ret = 0;
- else
- aq_ret = ICE_ERR_PARAM;
+ if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
NULL, 0);
}
@@ -1845,18 +1992,18 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* ice_vc_handle_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @set: true if mac filters are being set, false otherwise
+ * @set: true if MAC filters are being set, false otherwise
*
* add guest MAC address filter
*/
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- enum ice_status ret;
LIST_HEAD(mac_list);
struct ice_vsi *vsi;
int mac_count = 0;
@@ -1869,19 +2016,27 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
dev_err(&pf->pdev->dev,
- "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
- ret = ICE_ERR_PARAM;
+ "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+ vf->vf_id);
+ /* There is no need to let VF know about not being trusted
+ * to add more MAC addr, so we can just return success message.
+ */
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
for (i = 0; i < al->num_elements; i++) {
u8 *maddr = al->list[i].addr;
@@ -1893,40 +2048,39 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
* already added. Just continue.
*/
dev_info(&pf->pdev->dev,
- "mac %pM already set for VF %d\n",
+ "MAC %pM already set for VF %d\n",
maddr, vf->vf_id);
continue;
} else {
- /* VF can't remove dflt_lan_addr/bcast mac */
+ /* VF can't remove dflt_lan_addr/bcast MAC */
dev_err(&pf->pdev->dev,
- "can't remove mac %pM for VF %d\n",
+ "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
maddr, vf->vf_id);
- ret = ICE_ERR_PARAM;
- goto handle_mac_exit;
+ continue;
}
}
/* check for the invalid cases and bail if necessary */
if (is_zero_ether_addr(maddr)) {
dev_err(&pf->pdev->dev,
- "invalid mac %pM provided for VF %d\n",
+ "invalid MAC %pM provided for VF %d\n",
maddr, vf->vf_id);
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (is_unicast_ether_addr(maddr) &&
!ice_can_vf_change_mac(vf)) {
dev_err(&pf->pdev->dev,
- "can't change unicast mac for untrusted VF %d\n",
+ "can't change unicast MAC for untrusted VF %d\n",
vf->vf_id);
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
- /* get here if maddr is multicast or if VF can change mac */
+ /* get here if maddr is multicast or if VF can change MAC */
if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
- ret = ICE_ERR_NO_MEMORY;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
goto handle_mac_exit;
}
mac_count++;
@@ -1934,14 +2088,14 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
/* program the updated filter list */
if (set)
- ret = ice_add_mac(&pf->hw, &mac_list);
+ v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
else
- ret = ice_remove_mac(&pf->hw, &mac_list);
+ v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
- if (ret) {
+ if (v_ret) {
dev_err(&pf->pdev->dev,
- "can't update mac filters for VF %d, error %d\n",
- vf->vf_id, ret);
+ "can't update MAC filters for VF %d, error %d\n",
+ vf->vf_id, v_ret);
} else {
if (set)
vf->num_mac += mac_count;
@@ -1952,7 +2106,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
handle_mac_exit:
ice_free_fltr_list(&pf->pdev->dev, &mac_list);
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
+ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
}
/**
@@ -1987,39 +2141,42 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to vf.
+ * available queue pairs via virtchnl message response to VF.
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
int req_queues = vfres->num_queue_pairs;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
+ int max_allowed_vf_queues;
int tx_rx_queue_left;
int cur_queues;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- cur_queues = pf->num_vf_qps;
+ cur_queues = vf->num_vf_qs;
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
- } else if (req_queues > ICE_MAX_QS_PER_VF) {
+ } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
+ vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
+ vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
+ ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
@@ -2033,18 +2190,18 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- aq_ret, (u8 *)vfres, sizeof(*vfres));
+ v_ret, (u8 *)vfres, sizeof(*vfres));
}
/**
* ice_set_vf_port_vlan
* @netdev: network interface device structure
* @vf_id: VF identifier
- * @vlan_id: VLAN id being set
+ * @vlan_id: VLAN ID being set
* @qos: priority setting
* @vlan_proto: VLAN protocol
*
- * program VF Port VLAN id and/or qos
+ * program VF Port VLAN ID and/or QoS
*/
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -2087,17 +2244,18 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
return ret;
}
- /* If pvid, then remove all filters on the old VLAN */
+ /* If PVID, then remove all filters on the old VLAN */
if (vsi->info.pvid)
ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
VLAN_VID_MASK));
if (vlan_id || qos) {
- ret = ice_vsi_set_pvid(vsi, vlanprio);
+ ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
goto error_set_pvid;
} else {
- ice_vsi_kill_pvid(vsi);
+ ice_vsi_manage_pvid(vsi, 0, false);
+ vsi->info.pvid = 0;
}
if (vlan_id) {
@@ -2125,52 +2283,60 @@ error_set_pvid:
* @msg: pointer to the msg buffer
* @add_v: Add VLAN if true, otherwise delete VLAN
*
- * Process virtchnl op to add or remove programmed guest VLAN id
+ * Process virtchnl op to add or remove programmed guest VLAN ID
*/
static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
+ bool vlan_promisc = false;
struct ice_vsi *vsi;
+ struct ice_hw *hw;
+ int status = 0;
+ u8 promisc_m;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (add_v && !ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(&pf->pdev->dev,
- "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
- aq_ret = ICE_ERR_PARAM;
+ "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being not trusted,
+ * so we can just return success message here
+ */
goto error_param;
}
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(&pf->pdev->dev,
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
goto error_param;
}
}
- vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vsi->info.pvid) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2178,38 +2344,94 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
dev_err(&pf->pdev->dev,
"%sable VLAN stripping failed for VSI %i\n",
add_v ? "en" : "dis", vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ vlan_promisc = true;
+
if (add_v) {
for (i = 0; i < vfl->num_elements; i++) {
u16 vid = vfl->vlan_id[i];
- if (!ice_vsi_add_vlan(vsi, vid)) {
- vf->num_vlan++;
+ if (!ice_is_vf_trusted(vf) &&
+ vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(&pf->pdev->dev,
+ "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being
+ * not trusted, so we can just return success
+ * message here as well.
+ */
+ goto error_param;
+ }
+
+ if (ice_vsi_add_vlan(vsi, vid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
- /* Enable VLAN pruning when VLAN 0 is added */
- if (unlikely(!vid))
- if (ice_cfg_vlan_pruning(vsi, true))
- aq_ret = ICE_ERR_PARAM;
+ vf->num_vlan++;
+ /* Enable VLAN pruning when VLAN is added */
+ if (!vlan_promisc) {
+ status = ice_cfg_vlan_pruning(vsi, true, false);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
} else {
- aq_ret = ICE_ERR_PARAM;
+ /* Enable Ucast/Mcast VLAN promiscuous mode */
+ promisc_m = ICE_PROMISC_VLAN_TX |
+ ICE_PROMISC_VLAN_RX;
+
+ status = ice_set_vsi_promisc(hw, vsi->idx,
+ promisc_m, vid);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+ vid, status);
+ }
}
}
} else {
- for (i = 0; i < vfl->num_elements; i++) {
+ /* In case of non_trusted VF, number of VLAN elements passed
+ * to PF for removal might be greater than number of VLANs
+ * filter programmed for that VF - So, use actual number of
+ * VLANS added earlier with add VLAN opcode. In order to avoid
+ * removing VLAN that doesn't exist, which result to sending
+ * erroneous failed message back to the VF
+ */
+ int num_vf_vlan;
+
+ num_vf_vlan = vf->num_vlan;
+ for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i];
/* Make sure ice_vsi_kill_vlan is successful before
* updating VLAN information
*/
- if (!ice_vsi_kill_vlan(vsi, vid)) {
- vf->num_vlan--;
+ if (ice_vsi_kill_vlan(vsi, vid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vf->num_vlan--;
+ /* Disable VLAN pruning when removing VLAN */
+ ice_cfg_vlan_pruning(vsi, false, false);
- /* Disable VLAN pruning when removing VLAN 0 */
- if (unlikely(!vid))
- ice_cfg_vlan_pruning(vsi, false);
+ /* Disable Unicast/Multicast VLAN promiscuous mode */
+ if (vlan_promisc) {
+ promisc_m = ICE_PROMISC_VLAN_TX |
+ ICE_PROMISC_VLAN_RX;
+
+ ice_clear_vsi_promisc(hw, vsi->idx,
+ promisc_m, vid);
}
}
}
@@ -2217,10 +2439,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
error_param:
/* send the response to the VF */
if (add_v)
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
NULL, 0);
else
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
NULL, 0);
}
@@ -2229,7 +2451,7 @@ error_param:
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Add and program guest VLAN id
+ * Add and program guest VLAN ID
*/
static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
{
@@ -2241,7 +2463,7 @@ static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * remove programmed guest VLAN id
+ * remove programmed guest VLAN ID
*/
static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
{
@@ -2256,22 +2478,22 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
*/
static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
{
- enum ice_status aq_ret = 0;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vsi_manage_vlan_stripping(vsi, true))
- aq_ret = ICE_ERR_AQ_ERROR;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- aq_ret, NULL, 0);
+ v_ret, NULL, 0);
}
/**
@@ -2282,22 +2504,27 @@ error_param:
*/
static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
{
- enum ice_status aq_ret = 0;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (ice_vsi_manage_vlan_stripping(vsi, false))
- aq_ret = ICE_ERR_AQ_ERROR;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- aq_ret, NULL, 0);
+ v_ret, NULL, 0);
}
/**
@@ -2333,7 +2560,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
if (err) {
- if (err == VIRTCHNL_ERR_PARAM)
+ if (err == VIRTCHNL_STATUS_ERR_PARAM)
err = -EPERM;
else
err = -EINVAL;
@@ -2355,7 +2582,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
error_handler:
if (err) {
- ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
+ ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
+ NULL, 0);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
return;
@@ -2418,7 +2646,8 @@ error_handler:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, vf_id);
- err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
+ err = ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
NULL, 0);
break;
}
@@ -2427,7 +2656,7 @@ error_handler:
* as it is busy with pending work.
*/
dev_info(&pf->pdev->dev,
- "PF failed to honor VF %d, opcode %d\n, error %d\n",
+ "PF failed to honor VF %d, opcode %d, error %d\n",
vf_id, v_opcode, err);
}
}
@@ -2440,8 +2669,8 @@ error_handler:
*
* return VF configuration
*/
-int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
- struct ifla_vf_info *ivi)
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2550,9 +2779,9 @@ out:
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
- * @mac: mac address
+ * @mac: MAC address
*
- * program VF mac address
+ * program VF MAC address
*/
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
@@ -2579,7 +2808,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return -EINVAL;
}
- /* copy mac into dflt_lan_addr and trigger a VF reset. The reset
+ /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
* flow will use the updated dflt_lan_addr and add a MAC filter
* using ice_add_mac. Also set pf_set_mac to indicate that the PF has
* set the MAC address for this VF.
@@ -2587,7 +2816,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
vf->pf_set_mac = true;
netdev_info(netdev,
- "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
+ "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
ice_vc_dis_vf(vf);
@@ -2690,7 +2919,8 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
/* Notify the VF of its new link state */
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
sizeof(pfe), NULL);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 01470a8ee03a..3725aea16840 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -48,10 +48,10 @@ enum ice_virtchnl_cap {
struct ice_vf {
struct ice_pf *pf;
- s16 vf_id; /* VF id in the PF space */
+ s16 vf_id; /* VF ID in the PF space */
u32 driver_caps; /* reported by VF driver */
int first_vector_idx; /* first vector index of this VF */
- struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
+ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
struct virtchnl_ether_addr dflt_lan_addr;
u16 port_vlan_id;
@@ -59,10 +59,10 @@ struct ice_vf {
u8 trusted;
u16 lan_vsi_idx; /* index into PF struct */
u16 lan_vsi_num; /* ID as used by firmware */
- u64 num_mdd_events; /* number of mdd events detected */
+ u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
- unsigned long vf_caps; /* vf's adv. capabilities */
+ unsigned long vf_caps; /* VF's adv. capabilities */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
u8 link_forced;
@@ -70,6 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
+ u16 num_vf_qs; /* num of queue configured per VF */
u8 num_req_qs; /* num of queue pairs requested by VF */
};
@@ -77,8 +78,8 @@ struct ice_vf {
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
- struct ifla_vf_info *ivi);
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
@@ -86,11 +87,9 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
-int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
- u16 vlan_id, u8 qos, __be16 vlan_proto);
-
-int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate);
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto);
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
@@ -162,12 +161,5 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline int
-ice_set_vf_bw(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused min_tx_rate,
- int __always_unused max_tx_rate)
-{
- return -EOPNOTSUPP;
-}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 01fcfc6f3415..d2e2c50ce257 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c57671068245..c645d9e648e0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3158,8 +3158,8 @@ static int igb_set_eee(struct net_device *netdev,
} else if (!edata->eee_enabled) {
dev_err(&adapter->pdev->dev,
"Setting EEE options are not supported with EEE disabled\n");
- return -EINVAL;
- }
+ return -EINVAL;
+ }
adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 69b230c53fed..39f33afc479c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2480,7 +2480,7 @@ static int igb_set_features(struct net_device *netdev,
else
igb_reset(adapter);
- return 0;
+ return 1;
}
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -3452,6 +3452,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
}
+
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -6026,13 +6029,8 @@ static int igb_tx_map(struct igb_ring *tx_ring,
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return 0;
@@ -8048,7 +8046,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGB_RX_HDR_LEN)
- headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
+ headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
@@ -8740,9 +8738,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
+ bool wake;
rtnl_lock();
netif_device_detach(netdev);
@@ -8755,14 +8751,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- if (!runtime) {
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
- }
-#endif
-
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8767,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
ctrl = rd32(E1000_CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
wr32(E1000_CTRL, ctrl);
@@ -8796,12 +8780,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
wr32(E1000_WUFC, 0);
}
- *enable_wake = wufc || adapter->en_mng_pt;
- if (!*enable_wake)
+ wake = wufc || adapter->en_mng_pt;
+ if (!wake)
igb_power_down_link(adapter);
else
igb_power_up_link(adapter);
+ if (enable_wake)
+ *enable_wake = wake;
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -8844,22 +8831,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
static int __maybe_unused igb_suspend(struct device *dev)
{
- int retval;
- bool wake;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- retval = __igb_shutdown(pdev, &wake, 0);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8902,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
static int __maybe_unused igb_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __igb_shutdown(pdev, &wake, 1);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 4eab83faec62..34cd30d7162f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2279,10 +2279,6 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 80faccc34cda..0f5534ce27b0 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -29,9 +29,15 @@ unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter);
void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
const u32 max_rss_queues);
int igc_reinit_queues(struct igc_adapter *adapter);
+void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
+int igc_add_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+int igc_del_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+void igc_update_stats(struct igc_adapter *adapter);
extern char igc_driver_name[];
extern char igc_driver_version[];
@@ -51,6 +57,13 @@ extern char igc_driver_version[];
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16)
+#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
+#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
+
+#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
+#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
#define IGC_20K_ITR 196
@@ -284,15 +297,50 @@ struct igc_q_vector {
struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
};
+#define MAX_ETYPE_FILTER (4 - 1)
+
+enum igc_filter_match_flags {
+ IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
+ IGC_FILTER_FLAG_VLAN_TCI = 0x2,
+ IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
+ IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
+};
+
+/* RX network flow classification data structure */
+struct igc_nfc_input {
+ /* Byte layout in order, all values with MSB first:
+ * match_flags - 1 byte
+ * etype - 2 bytes
+ * vlan_tci - 2 bytes
+ */
+ u8 match_flags;
+ __be16 etype;
+ __be16 vlan_tci;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
+};
+
+struct igc_nfc_filter {
+ struct hlist_node nfc_node;
+ struct igc_nfc_input filter;
+ unsigned long cookie;
+ u16 etype_reg_index;
+ u16 sw_idx;
+ u16 action;
+};
+
struct igc_mac_addr {
u8 addr[ETH_ALEN];
u8 queue;
u8 state; /* bitmask */
};
-#define IGC_MAC_STATE_DEFAULT 0x1
-#define IGC_MAC_STATE_MODIFIED 0x2
-#define IGC_MAC_STATE_IN_USE 0x4
+#define IGC_MAC_STATE_DEFAULT 0x1
+#define IGC_MAC_STATE_IN_USE 0x2
+#define IGC_MAC_STATE_SRC_ADDR 0x4
+#define IGC_MAC_STATE_QUEUE_STEERING 0x8
+
+#define IGC_MAX_RXNFC_FILTERS 16
/* Board specific private data structure */
struct igc_adapter {
@@ -356,12 +404,22 @@ struct igc_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 rx_hwtstamp_cleared;
u32 *shadow_vfta;
u32 rss_queues;
+ u32 rss_indir_tbl_init;
+
+ /* RX network flow classification support */
+ struct hlist_head nfc_filter_list;
+ struct hlist_head cls_flower_list;
+ unsigned int nfc_filter_count;
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
+ bool etype_bitmap[MAX_ETYPE_FILTER];
struct igc_mac_addr *mac_table;
@@ -447,6 +505,10 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
/* forward declaration */
void igc_reinit_locked(struct igc_adapter *);
+int igc_add_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input);
+int igc_erase_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index 76d4991d7284..58d1109d7f3f 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
-#ifndef _IGC_BASE_H
-#define _IGC_BASE_H
+#ifndef _IGC_BASE_H_
+#define _IGC_BASE_H_
/* forward declaration */
void igc_rx_fifo_flush_base(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 7d1bdcd1225a..a9a30268de59 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -310,6 +310,12 @@
IGC_RXDEXT_STATERR_CXE | \
IGC_RXDEXT_STATERR_RXE)
+#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
#define IGC_RFCTL_LEF 0x00040000
@@ -325,6 +331,10 @@
#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+/* Receive Checksum Control */
+#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
/* GPY211 - I225 defines */
#define GPY_MMD_MASK 0xFFFF0000
#define GPY_MMD_SHIFT 16
@@ -390,4 +400,11 @@
#define IGC_N0_QUEUE -1
+#define IGC_MAX_MAC_HDR_LEN 127
+#define IGC_MAX_NETWORK_HDR_LEN 511
+
+#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4))
+#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4))
+#define IGC_VLAPQF_QUEUE_MASK 0x03
+
#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index eff37a6c0afa..ac98f1d96892 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -2,10 +2,120 @@
/* Copyright (c) 2018 Intel Corporation */
/* ethtool support for igc */
+#include <linux/if_vlan.h>
#include <linux/pm_runtime.h>
#include "igc.h"
+/* forward declaration */
+struct igc_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IGC_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \
+ .stat_offset = offsetof(struct igc_adapter, _stat) \
+}
+
+static const struct igc_stats igc_gstrings_stats[] = {
+ IGC_STAT("rx_packets", stats.gprc),
+ IGC_STAT("tx_packets", stats.gptc),
+ IGC_STAT("rx_bytes", stats.gorc),
+ IGC_STAT("tx_bytes", stats.gotc),
+ IGC_STAT("rx_broadcast", stats.bprc),
+ IGC_STAT("tx_broadcast", stats.bptc),
+ IGC_STAT("rx_multicast", stats.mprc),
+ IGC_STAT("tx_multicast", stats.mptc),
+ IGC_STAT("multicast", stats.mprc),
+ IGC_STAT("collisions", stats.colc),
+ IGC_STAT("rx_crc_errors", stats.crcerrs),
+ IGC_STAT("rx_no_buffer_count", stats.rnbc),
+ IGC_STAT("rx_missed_errors", stats.mpc),
+ IGC_STAT("tx_aborted_errors", stats.ecol),
+ IGC_STAT("tx_carrier_errors", stats.tncrs),
+ IGC_STAT("tx_window_errors", stats.latecol),
+ IGC_STAT("tx_abort_late_coll", stats.latecol),
+ IGC_STAT("tx_deferred_ok", stats.dc),
+ IGC_STAT("tx_single_coll_ok", stats.scc),
+ IGC_STAT("tx_multi_coll_ok", stats.mcc),
+ IGC_STAT("tx_timeout_count", tx_timeout_count),
+ IGC_STAT("rx_long_length_errors", stats.roc),
+ IGC_STAT("rx_short_length_errors", stats.ruc),
+ IGC_STAT("rx_align_errors", stats.algnerrc),
+ IGC_STAT("tx_tcp_seg_good", stats.tsctc),
+ IGC_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ IGC_STAT("rx_flow_control_xon", stats.xonrxc),
+ IGC_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ IGC_STAT("tx_flow_control_xon", stats.xontxc),
+ IGC_STAT("tx_flow_control_xoff", stats.xofftxc),
+ IGC_STAT("rx_long_byte_count", stats.gorc),
+ IGC_STAT("tx_dma_out_of_sync", stats.doosync),
+ IGC_STAT("tx_smbus", stats.mgptc),
+ IGC_STAT("rx_smbus", stats.mgprc),
+ IGC_STAT("dropped_smbus", stats.mgpdc),
+ IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+ IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ IGC_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+ IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+};
+
+#define IGC_NETDEV_STAT(_net_stat) { \
+ .stat_string = __stringify(_net_stat), \
+ .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
+}
+
+static const struct igc_stats igc_gstrings_net_stats[] = {
+ IGC_NETDEV_STAT(rx_errors),
+ IGC_NETDEV_STAT(tx_errors),
+ IGC_NETDEV_STAT(tx_dropped),
+ IGC_NETDEV_STAT(rx_length_errors),
+ IGC_NETDEV_STAT(rx_over_errors),
+ IGC_NETDEV_STAT(rx_frame_errors),
+ IGC_NETDEV_STAT(rx_fifo_errors),
+ IGC_NETDEV_STAT(tx_fifo_errors),
+ IGC_NETDEV_STAT(tx_heartbeat_errors)
+};
+
+enum igc_diagnostics_results {
+ TEST_REG = 0,
+ TEST_EEP,
+ TEST_IRQ,
+ TEST_LOOP,
+ TEST_LINK
+};
+
+static const char igc_gstrings_test[][ETH_GSTRING_LEN] = {
+ [TEST_REG] = "Register test (offline)",
+ [TEST_EEP] = "Eeprom test (offline)",
+ [TEST_IRQ] = "Interrupt test (offline)",
+ [TEST_LOOP] = "Loopback test (offline)",
+ [TEST_LINK] = "Link test (on/offline)"
+};
+
+#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN)
+
+#define IGC_GLOBAL_STATS_LEN \
+ (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats))
+#define IGC_NETDEV_STATS_LEN \
+ (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats))
+#define IGC_RX_QUEUE_STATS_LEN \
+ (sizeof(struct igc_rx_queue_stats) / sizeof(u64))
+#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
+#define IGC_QUEUE_STATS_LEN \
+ ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \
+ IGC_RX_QUEUE_STATS_LEN) + \
+ (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \
+ IGC_TX_QUEUE_STATS_LEN))
+#define IGC_STATS_LEN \
+ (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN)
+
static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
@@ -545,6 +655,127 @@ static int igc_set_pauseparam(struct net_device *netdev,
return retval;
}
+static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *igc_gstrings_test,
+ IGC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, igc_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) {
+ memcpy(p, igc_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_restart", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_drops", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_csum_err", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_alloc_failed", i);
+ p += ETH_GSTRING_LEN;
+ }
+ /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, igc_priv_flags_strings,
+ IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static int igc_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IGC_STATS_LEN;
+ case ETH_SS_TEST:
+ return IGC_TEST_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IGC_PRIV_FLAGS_STR_LEN;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static void igc_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+ unsigned int start;
+ struct igc_ring *ring;
+ int i, j;
+ char *p;
+
+ spin_lock(&adapter->stats64_lock);
+ igc_update_stats(adapter);
+
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + igc_gstrings_stats[i].stat_offset;
+ data[i] = (igc_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) {
+ p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset;
+ data[i] = (igc_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ u64 restart2;
+
+ ring = adapter->tx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ data[i] = ring->tx_stats.packets;
+ data[i + 1] = ring->tx_stats.bytes;
+ data[i + 2] = ring->tx_stats.restart_queue;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
+ restart2 = ring->tx_stats.restart_queue2;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
+ data[i + 2] += restart2;
+
+ i += IGC_TX_QUEUE_STATS_LEN;
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ data[i] = ring->rx_stats.packets;
+ data[i + 1] = ring->rx_stats.bytes;
+ data[i + 2] = ring->rx_stats.drops;
+ data[i + 3] = ring->rx_stats.csum_err;
+ data[i + 4] = ring->rx_stats.alloc_failed;
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ i += IGC_RX_QUEUE_STATS_LEN;
+ }
+ spin_unlock(&adapter->stats64_lock);
+}
+
static int igc_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -643,6 +874,605 @@ static int igc_set_coalesce(struct net_device *netdev,
return 0;
}
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct igc_nfc_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = IGC_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ if (rule->filter.match_flags) {
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->action;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = rule->filter.etype;
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
+ fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ rule->filter.dst_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_source,
+ rule->filter.src_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
+ }
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igc_nfc_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = IGC_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int igc_get_rss_hash_opts(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igc */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case SCTP_V4_FLOW:
+ /* Fall through */
+ case AH_ESP_V4_FLOW:
+ /* Fall through */
+ case AH_V4_FLOW:
+ /* Fall through */
+ case ESP_V4_FLOW:
+ /* Fall through */
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case SCTP_V6_FLOW:
+ /* Fall through */
+ case AH_ESP_V6_FLOW:
+ /* Fall through */
+ case AH_V6_FLOW:
+ /* Fall through */
+ case ESP_V6_FLOW:
+ /* Fall through */
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->nfc_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = igc_get_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs);
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igc_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGC_FLAG_RSS_FIELD_IPV6_UDP)
+static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct igc_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(IGC_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ dev_err(&adapter->pdev->dev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4 |
+ IGC_MRQC_RSS_FIELD_IPV4_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6 |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP |
+ IGC_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
+
+ wr32(IGC_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 i;
+ u32 etqf;
+ u16 etype;
+
+ /* find an empty etype filter register */
+ for (i = 0; i < MAX_ETYPE_FILTER; ++i) {
+ if (!adapter->etype_bitmap[i])
+ break;
+ }
+ if (i == MAX_ETYPE_FILTER) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n");
+ return -EINVAL;
+ }
+
+ adapter->etype_bitmap[i] = true;
+
+ etqf = rd32(IGC_ETQF(i));
+ etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK);
+
+ etqf |= IGC_ETQF_FILTER_ENABLE;
+ etqf &= ~IGC_ETQF_ETYPE_MASK;
+ etqf |= (etype & IGC_ETQF_ETYPE_MASK);
+
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT)
+ & IGC_ETQF_QUEUE_MASK);
+ etqf |= IGC_ETQF_QUEUE_ENABLE;
+
+ wr32(IGC_ETQF(i), etqf);
+
+ input->etype_reg_index = i;
+
+ return 0;
+}
+
+static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u16 queue_index;
+ u32 vlapqf;
+
+ vlapqf = rd32(IGC_VLAPQF);
+ vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK;
+
+ /* check whether this vlan prio is already set */
+ if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) &&
+ queue_index != input->action) {
+ dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n");
+ return -EEXIST;
+ }
+
+ vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority);
+ vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action);
+
+ wr32(IGC_VLAPQF, vlapqf);
+
+ return 0;
+}
+
+int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err = -EINVAL;
+
+ if (hw->mac.type == igc_i225 &&
+ !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) {
+ dev_err(&adapter->pdev->dev,
+ "i225 doesn't support flow classification rules specifying only source addresses.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ err = igc_rxnfc_write_etype_filter(adapter, input);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ err = igc_add_mac_steering_filter(adapter,
+ input->filter.dst_addr,
+ input->action, 0);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ err = igc_add_mac_steering_filter(adapter,
+ input->filter.src_addr,
+ input->action,
+ IGC_MAC_STATE_SRC_ADDR);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ err = igc_rxnfc_write_vlan_prio_filter(adapter, input);
+
+ return err;
+}
+
+static void igc_clear_etype_filter_regs(struct igc_adapter *adapter,
+ u16 reg_index)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 etqf = rd32(IGC_ETQF(reg_index));
+
+ etqf &= ~IGC_ETQF_QUEUE_ENABLE;
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf &= ~IGC_ETQF_FILTER_ENABLE;
+
+ wr32(IGC_ETQF(reg_index), etqf);
+
+ adapter->etype_bitmap[reg_index] = false;
+}
+
+static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter,
+ u16 vlan_tci)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u32 vlapqf;
+
+ vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ vlapqf = rd32(IGC_VLAPQF);
+ vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority);
+ vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority,
+ IGC_VLAPQF_QUEUE_MASK);
+
+ wr32(IGC_VLAPQF, vlapqf);
+}
+
+int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+{
+ if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
+ igc_clear_etype_filter_regs(adapter,
+ input->etype_reg_index);
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ igc_clear_vlan_prio_filter(adapter,
+ ntohs(input->filter.vlan_tci));
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
+ igc_del_mac_steering_filter(adapter, input->filter.src_addr,
+ input->action,
+ IGC_MAC_STATE_SRC_ADDR);
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
+ igc_del_mac_steering_filter(adapter, input->filter.dst_addr,
+ input->action, 0);
+
+ return 0;
+}
+
+static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input,
+ u16 sw_idx)
+{
+ struct igc_nfc_filter *rule, *parent;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = rule;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && rule->sw_idx == sw_idx) {
+ if (!input)
+ err = igc_erase_filter(adapter, rule);
+
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ adapter->nfc_filter_count--;
+ }
+
+ /* If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
+
+ /* initialize node */
+ INIT_HLIST_NODE(&input->nfc_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_behind(&input->nfc_node, &parent->nfc_node);
+ else
+ hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
+
+ /* update counts */
+ adapter->nfc_filter_count++;
+
+ return 0;
+}
+
+static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct igc_nfc_filter *input, *rule;
+ int err = 0;
+
+ if (!(netdev->hw_features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
+ /* Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
+ */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
+ fsp->ring_cookie >= adapter->num_rx_queues) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n");
+ return -EINVAL;
+ }
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= IGC_MAX_RXNFC_FILTERS) {
+ dev_err(&adapter->pdev->dev, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ return -EINVAL;
+
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
+ input->filter.etype = fsp->h_u.ether_spec.h_proto;
+ input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE;
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
+ input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(input->filter.src_addr,
+ fsp->h_u.ether_spec.h_source);
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(input->filter.dst_addr,
+ fsp->h_u.ether_spec.h_dest);
+ }
+
+ if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ input->filter.vlan_tci = fsp->h_ext.vlan_tci;
+ input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
+ }
+
+ input->action = fsp->ring_cookie;
+ input->sw_idx = fsp->location;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (!memcmp(&input->filter, &rule->filter,
+ sizeof(input->filter))) {
+ err = -EEXIST;
+ dev_err(&adapter->pdev->dev,
+ "ethtool: this filter is already set\n");
+ goto err_out_w_lock;
+ }
+ }
+
+ err = igc_add_filter(adapter, input);
+ if (err)
+ goto err_out_w_lock;
+
+ igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+
+ spin_unlock(&adapter->nfc_lock);
+ return 0;
+
+err_out_w_lock:
+ spin_unlock(&adapter->nfc_lock);
+err_out:
+ kfree(input);
+ return err;
+}
+
+static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
+
+ spin_lock(&adapter->nfc_lock);
+ err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location);
+ spin_unlock(&adapter->nfc_lock);
+
+ return err;
+}
+
+static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igc_set_rss_hash_opt(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = igc_add_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = igc_del_ethtool_nfc_entry(adapter, cmd);
+ default:
+ break;
+ }
+
+ return ret;
+}
+
void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -885,17 +1715,13 @@ static int igc_get_link_ksettings(struct net_device *netdev,
if (hw->mac.type == igc_i225 &&
(status & IGC_STATUS_SPEED_2500)) {
speed = SPEED_2500;
- hw_dbg("2500 Mbs, ");
} else {
speed = SPEED_1000;
- hw_dbg("1000 Mbs, ");
}
} else if (status & IGC_STATUS_SPEED_100) {
speed = SPEED_100;
- hw_dbg("100 Mbs, ");
} else {
speed = SPEED_10;
- hw_dbg("10 Mbs, ");
}
if ((status & IGC_STATUS_FD) ||
hw->phy.media_type != igc_media_type_copper)
@@ -1011,8 +1837,13 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_ringparam = igc_set_ringparam,
.get_pauseparam = igc_get_pauseparam,
.set_pauseparam = igc_set_pauseparam,
+ .get_strings = igc_get_strings,
+ .get_sset_count = igc_get_sset_count,
+ .get_ethtool_stats = igc_get_ethtool_stats,
.get_coalesce = igc_get_coalesce,
.set_coalesce = igc_set_coalesce,
+ .get_rxnfc = igc_get_rxnfc,
+ .set_rxnfc = igc_set_rxnfc,
.get_rxfh_indir_size = igc_get_rxfh_indir_size,
.get_rxfh = igc_get_rxfh,
.set_rxfh = igc_set_rxfh,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 87a11879bf2d..34fa0e60a780 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -620,6 +620,55 @@ static void igc_configure_tx(struct igc_adapter *adapter)
*/
static void igc_setup_mrqc(struct igc_adapter *adapter)
{
+ struct igc_hw *hw = &adapter->hw;
+ u32 j, num_rx_queues;
+ u32 mrqc, rxcsum;
+ u32 rss_key[10];
+
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ for (j = 0; j < 10; j++)
+ wr32(IGC_RSSRK(j), rss_key[j]);
+
+ num_rx_queues = adapter->rss_queues;
+
+ if (adapter->rss_indir_tbl_init != num_rx_queues) {
+ for (j = 0; j < IGC_RETA_SIZE; j++)
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGC_RETA_SIZE;
+ adapter->rss_indir_tbl_init = num_rx_queues;
+ }
+ igc_write_rss_indir_tbl(adapter);
+
+ /* Disable raw packet checksumming so that RSS hash is placed in
+ * descriptor on writeback. No need to enable TCP/UDP/IP checksum
+ * offloads as they are enabled by default
+ */
+ rxcsum = rd32(IGC_RXCSUM);
+ rxcsum |= IGC_RXCSUM_PCSD;
+
+ /* Enable Receive Checksum Offload for SCTP */
+ rxcsum |= IGC_RXCSUM_CRCOFL;
+
+ /* Don't need to set TUOFL or IPOFL, they default to 1 */
+ wr32(IGC_RXCSUM, rxcsum);
+
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
+ mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
+ IGC_MRQC_RSS_FIELD_IPV4_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6 |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
+
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
+
+ mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
+
+ wr32(IGC_MRQC, mrqc);
}
/**
@@ -890,13 +939,8 @@ static int igc_tx_map(struct igc_ring *tx_ring,
/* Make sure there is space in the ring for the next send. */
igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return 0;
@@ -1150,7 +1194,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGC_RX_HDR_LEN)
- headlen = eth_get_headlen(va, IGC_RX_HDR_LEN);
+ headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
@@ -1738,12 +1782,200 @@ void igc_up(struct igc_adapter *adapter)
* igc_update_stats - Update the board statistics counters
* @adapter: board private structure
*/
-static void igc_update_stats(struct igc_adapter *adapter)
+void igc_update_stats(struct igc_adapter *adapter)
{
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+ u64 _bytes, _packets;
+ u64 bytes, packets;
+ unsigned int start;
+ u32 mpc;
+ int i;
+
+ /* Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ packets = 0;
+ bytes = 0;
+
+ rcu_read_lock();
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+ u32 rqdpc = rd32(IGC_RQDPC(i));
+
+ if (hw->mac.type >= igc_i225)
+ wr32(IGC_RQDPC(i), 0);
+
+ if (rqdpc) {
+ ring->rx_stats.drops += rqdpc;
+ net_stats->rx_fifo_errors += rqdpc;
+ }
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ _bytes = ring->rx_stats.bytes;
+ _packets = ring->rx_stats.packets;
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ bytes += _bytes;
+ packets += _packets;
+ }
+
+ net_stats->rx_bytes = bytes;
+ net_stats->rx_packets = packets;
+
+ packets = 0;
+ bytes = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ _bytes = ring->tx_stats.bytes;
+ _packets = ring->tx_stats.packets;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ bytes += _bytes;
+ packets += _packets;
+ }
+ net_stats->tx_bytes = bytes;
+ net_stats->tx_packets = packets;
+ rcu_read_unlock();
+
+ /* read stats registers */
+ adapter->stats.crcerrs += rd32(IGC_CRCERRS);
+ adapter->stats.gprc += rd32(IGC_GPRC);
+ adapter->stats.gorc += rd32(IGC_GORCL);
+ rd32(IGC_GORCH); /* clear GORCL */
+ adapter->stats.bprc += rd32(IGC_BPRC);
+ adapter->stats.mprc += rd32(IGC_MPRC);
+ adapter->stats.roc += rd32(IGC_ROC);
+
+ adapter->stats.prc64 += rd32(IGC_PRC64);
+ adapter->stats.prc127 += rd32(IGC_PRC127);
+ adapter->stats.prc255 += rd32(IGC_PRC255);
+ adapter->stats.prc511 += rd32(IGC_PRC511);
+ adapter->stats.prc1023 += rd32(IGC_PRC1023);
+ adapter->stats.prc1522 += rd32(IGC_PRC1522);
+ adapter->stats.symerrs += rd32(IGC_SYMERRS);
+ adapter->stats.sec += rd32(IGC_SEC);
+
+ mpc = rd32(IGC_MPC);
+ adapter->stats.mpc += mpc;
+ net_stats->rx_fifo_errors += mpc;
+ adapter->stats.scc += rd32(IGC_SCC);
+ adapter->stats.ecol += rd32(IGC_ECOL);
+ adapter->stats.mcc += rd32(IGC_MCC);
+ adapter->stats.latecol += rd32(IGC_LATECOL);
+ adapter->stats.dc += rd32(IGC_DC);
+ adapter->stats.rlec += rd32(IGC_RLEC);
+ adapter->stats.xonrxc += rd32(IGC_XONRXC);
+ adapter->stats.xontxc += rd32(IGC_XONTXC);
+ adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
+ adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
+ adapter->stats.fcruc += rd32(IGC_FCRUC);
+ adapter->stats.gptc += rd32(IGC_GPTC);
+ adapter->stats.gotc += rd32(IGC_GOTCL);
+ rd32(IGC_GOTCH); /* clear GOTCL */
+ adapter->stats.rnbc += rd32(IGC_RNBC);
+ adapter->stats.ruc += rd32(IGC_RUC);
+ adapter->stats.rfc += rd32(IGC_RFC);
+ adapter->stats.rjc += rd32(IGC_RJC);
+ adapter->stats.tor += rd32(IGC_TORH);
+ adapter->stats.tot += rd32(IGC_TOTH);
+ adapter->stats.tpr += rd32(IGC_TPR);
+
+ adapter->stats.ptc64 += rd32(IGC_PTC64);
+ adapter->stats.ptc127 += rd32(IGC_PTC127);
+ adapter->stats.ptc255 += rd32(IGC_PTC255);
+ adapter->stats.ptc511 += rd32(IGC_PTC511);
+ adapter->stats.ptc1023 += rd32(IGC_PTC1023);
+ adapter->stats.ptc1522 += rd32(IGC_PTC1522);
+
+ adapter->stats.mptc += rd32(IGC_MPTC);
+ adapter->stats.bptc += rd32(IGC_BPTC);
+
+ adapter->stats.tpt += rd32(IGC_TPT);
+ adapter->stats.colc += rd32(IGC_COLC);
+
+ adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
+
+ adapter->stats.tsctc += rd32(IGC_TSCTC);
+ adapter->stats.tsctfc += rd32(IGC_TSCTFC);
+
+ adapter->stats.iac += rd32(IGC_IAC);
+ adapter->stats.icrxoc += rd32(IGC_ICRXOC);
+ adapter->stats.icrxptc += rd32(IGC_ICRXPTC);
+ adapter->stats.icrxatc += rd32(IGC_ICRXATC);
+ adapter->stats.ictxptc += rd32(IGC_ICTXPTC);
+ adapter->stats.ictxatc += rd32(IGC_ICTXATC);
+ adapter->stats.ictxqec += rd32(IGC_ICTXQEC);
+ adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC);
+ adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC);
+
+ /* Fill out the OS statistics structure */
+ net_stats->multicast = adapter->stats.mprc;
+ net_stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ net_stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ net_stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ net_stats->rx_crc_errors = adapter->stats.crcerrs;
+ net_stats->rx_frame_errors = adapter->stats.algnerrc;
+ net_stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ net_stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ net_stats->tx_aborted_errors = adapter->stats.ecol;
+ net_stats->tx_window_errors = adapter->stats.latecol;
+ net_stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Management Stats */
+ adapter->stats.mgptc += rd32(IGC_MGTPTC);
+ adapter->stats.mgprc += rd32(IGC_MGTPRC);
+ adapter->stats.mgpdc += rd32(IGC_MGTPDC);
}
static void igc_nfc_filter_exit(struct igc_adapter *adapter)
{
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igc_erase_filter(adapter, rule);
+
+ hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
+ igc_erase_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
+
+static void igc_nfc_filter_restore(struct igc_adapter *adapter)
+{
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igc_add_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
}
/**
@@ -1890,6 +2122,86 @@ static struct net_device_stats *igc_get_stats(struct net_device *netdev)
return &netdev->stats;
}
+static netdev_features_t igc_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+ return features;
+}
+
+static int igc_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ /* Add VLAN support */
+ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
+ return 0;
+
+ if (!(features & NETIF_F_NTUPLE)) {
+ struct hlist_node *node2;
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+ hlist_for_each_entry_safe(rule, node2,
+ &adapter->nfc_filter_list, nfc_node) {
+ igc_erase_filter(adapter, rule);
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ }
+ spin_unlock(&adapter->nfc_lock);
+ adapter->nfc_filter_count = 0;
+ }
+
+ netdev->features = features;
+
+ if (netif_running(netdev))
+ igc_reinit_locked(adapter);
+ else
+ igc_reset(adapter);
+
+ return 1;
+}
+
+static netdev_features_t
+igc_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPv4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
/**
* igc_configure - configure the hardware for RX and TX
* @adapter: private board structure
@@ -1906,6 +2218,7 @@ static void igc_configure(struct igc_adapter *adapter)
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
+ igc_nfc_filter_restore(adapter);
igc_configure_tx(adapter);
igc_configure_rx(adapter);
@@ -1967,6 +2280,127 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter)
igc_rar_set_index(adapter, 0);
}
+/* If the filter to be added and an already existing filter express
+ * the same address and address type, it should be possible to only
+ * override the other configurations, for example the queue to steer
+ * traffic.
+ */
+static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
+ const u8 *addr, const u8 flags)
+{
+ if (!(entry->state & IGC_MAC_STATE_IN_USE))
+ return true;
+
+ if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
+ (flags & IGC_MAC_STATE_SRC_ADDR))
+ return false;
+
+ if (!ether_addr_equal(addr, entry->addr))
+ return false;
+
+ return true;
+}
+
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_add_mac_filter_flags(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, flags))
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags;
+
+ igc_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+int igc_add_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ return igc_add_mac_filter_flags(adapter, addr, queue,
+ IGC_MAC_STATE_QUEUE_STEERING | flags);
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_del_mac_filter_flags(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if ((adapter->mac_table[i].state & flags) != flags)
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
+
+ igc_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int igc_del_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ return igc_del_mac_filter_flags(adapter, addr, queue,
+ IGC_MAC_STATE_QUEUE_STEERING | flags);
+}
+
/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -3434,6 +3868,9 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
+ .ndo_fix_features = igc_fix_features,
+ .ndo_set_features = igc_set_features,
+ .ndo_features_check = igc_features_check,
};
/* PCIe configuration access */
@@ -3663,6 +4100,9 @@ static int igc_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 5afe7a8d3faf..50d7c04dccf5 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -80,8 +80,23 @@
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
+/* RSS registers */
+#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
+
+/* Filtering Registers */
+#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+/* ETQF register bit definitions */
+#define IGC_ETQF_FILTER_ENABLE BIT(26)
+#define IGC_ETQF_QUEUE_ENABLE BIT(31)
+#define IGC_ETQF_QUEUE_SHIFT 16
+#define IGC_ETQF_QUEUE_MASK 0x00070000
+#define IGC_ETQF_ETYPE_MASK 0x0000FFFF
+
/* Redirection Table - RW Array */
#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
+/* RSS Random Key - RW Array */
+#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4))
/* Receive Register Descriptions */
#define IGC_RCTL 0x00100 /* Rx Control - RW */
@@ -101,6 +116,7 @@
#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08))
#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08))
+#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */
/* Transmit Register Descriptions */
#define IGC_TCTL 0x00400 /* Tx Control - RW */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e100054a3765..57fd9ee6de66 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1800,7 +1800,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
- pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
+ pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
@@ -8297,13 +8297,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return 0;
@@ -8483,8 +8478,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
@@ -8514,7 +8508,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
break;
/* fall through */
default:
- return fallback(dev, skb, sb_dev);
+ return netdev_pick_tx(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -9796,7 +9790,7 @@ static int ixgbe_set_features(struct net_device *netdev,
NETIF_F_HW_VLAN_CTAG_FILTER))
ixgbe_set_rx_mode(netdev);
- return 0;
+ return 1;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index cc4907f9ff02..2fb97967961c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
struct mii_bus *bus;
+ int err = -ENODEV;
- adapter->mii_bus = devm_mdiobus_alloc(dev);
- if (!adapter->mii_bus)
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
return -ENOMEM;
- bus = adapter->mii_bus;
-
switch (hw->device_id) {
/* C3000 SoCs */
case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*/
hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
- return mdiobus_register(bus);
+ err = mdiobus_register(bus);
+ if (!err) {
+ adapter->mii_bus = bus;
+ return 0;
+ }
ixgbe_no_mii_bus:
devm_mdiobus_free(dev, bus);
- adapter->mii_bus = NULL;
- return -ENODEV;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 49e23afa05a2..d189ed247665 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -895,7 +895,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IXGBEVF_RX_HDR_SIZE)
- headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, xdp->data,
+ IXGBEVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), xdp->data,
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index d29104de0d53..cda641ef89af 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -478,7 +478,7 @@ static int xrx200_probe(struct platform_device *pdev)
}
mac = of_get_mac_address(np);
- if (mac && is_valid_ether_addr(mac))
+ if (!IS_ERR(mac))
ether_addr_copy(net_dev->dev_addr, mac);
else
eth_hw_addr_random(net_dev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 292a668ce88e..409b69fd4374 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2749,8 +2749,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
mac_addr = of_get_mac_address(pnp);
- if (mac_addr)
- memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(ppd.mac_addr, mac_addr);
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c0a3718b2e2a..e758650b2c26 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2467,7 +2467,7 @@ out:
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
- if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+ if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
else
@@ -3385,6 +3385,7 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
phylink_set(mask, 1000baseX_Full);
}
if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
}
@@ -4475,15 +4476,14 @@ static int mvneta_probe(struct platform_device *pdev)
int err;
int cpu;
- dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
+ dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
+ txq_number, rxq_number);
if (!dev)
return -ENOMEM;
dev->irq = irq_of_parse_and_map(dn, 0);
- if (dev->irq == 0) {
- err = -EINVAL;
- goto err_free_netdev;
- }
+ if (dev->irq == 0)
+ return -EINVAL;
phy_mode = of_get_phy_mode(dn);
if (phy_mode < 0) {
@@ -4563,9 +4563,9 @@ static int mvneta_probe(struct platform_device *pdev)
}
dt_mac_addr = of_get_mac_address(dn);
- if (dt_mac_addr) {
+ if (!IS_ERR(dt_mac_addr)) {
mac_from = "device tree";
- memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, dt_mac_addr);
} else {
mvneta_get_mac_addr(pp, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
@@ -4704,8 +4704,6 @@ err_free_phylink:
phylink_destroy(pp->phylink);
err_free_irq:
irq_dispose_mapping(dev->irq);
-err_free_netdev:
- free_netdev(dev);
return err;
}
@@ -4722,7 +4720,6 @@ static int mvneta_remove(struct platform_device *pdev)
free_percpu(pp->stats);
irq_dispose_mapping(dev->irq);
phylink_destroy(pp->phylink);
- free_netdev(dev);
if (pp->bm_priv) {
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index ff0f4c503f53..6171270a016c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#include <net/flow_offload.h>
/* Fifo Registers */
#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
@@ -101,6 +102,7 @@
#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
+#define MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu) (((lu) & 0x3f) << 3)
#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
@@ -123,13 +125,18 @@
#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
+#define MVPP22_CLS_C2_LU_TYPE(lu) ((lu) & 0x3f)
#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
+#define MVPP22_CLS_C2_PORT_MASK (0xff << 8)
+#define MVPP22_CLS_C2_TCAM_INV 0x1b24
+#define MVPP22_CLS_C2_TCAM_INV_BIT BIT(31)
#define MVPP22_CLS_C2_HIT_CTR 0x1b50
#define MVPP22_CLS_C2_ACT 0x1b60
#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13)
#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11)
#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9)
+#define MVPP22_CLS_C2_ACT_COLOR(act) ((act) & 0x7)
#define MVPP22_CLS_C2_ATTR0 0x1b64
#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24)
#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f
@@ -610,6 +617,12 @@
#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
+#define MVPP2_N_PRS_FLOWS 52
+#define MVPP2_N_RFS_ENTRIES_PER_FLOW 4
+
+/* There are 7 supported high-level flows */
+#define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7)
+
/* RSS constants */
#define MVPP22_RSS_TABLE_ENTRIES 32
@@ -710,6 +723,7 @@ enum mvpp2_prs_l3_cast {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
/* Definitions */
+struct mvpp2_dbgfs_entries;
/* Shared Packet Processor resources */
struct mvpp2 {
@@ -771,6 +785,9 @@ struct mvpp2 {
/* Debugfs root entry */
struct dentry *dbgfs_dir;
+
+ /* Debugfs entries private data */
+ struct mvpp2_dbgfs_entries *dbgfs_entries;
};
struct mvpp2_pcpu_stats {
@@ -802,6 +819,37 @@ struct mvpp2_queue_vector {
struct cpumask *mask;
};
+/* Internal represention of a Flow Steering rule */
+struct mvpp2_rfs_rule {
+ /* Rule location inside the flow*/
+ int loc;
+
+ /* Flow type, such as TCP_V4_FLOW, IP6_FLOW, etc. */
+ int flow_type;
+
+ /* Index of the C2 TCAM entry handling this rule */
+ int c2_index;
+
+ /* Header fields that needs to be extracted to match this flow */
+ u16 hek_fields;
+
+ /* CLS engine : only c2 is supported for now. */
+ u8 engine;
+
+ /* TCAM key and mask for C2-based steering. These fields should be
+ * encapsulated in a union should we add more engines.
+ */
+ u64 c2_tcam;
+ u64 c2_tcam_mask;
+
+ struct flow_rule *flow;
+};
+
+struct mvpp2_ethtool_fs {
+ struct mvpp2_rfs_rule rule;
+ struct ethtool_rxnfc rxnfc;
+};
+
struct mvpp2_port {
u8 id;
@@ -873,6 +921,10 @@ struct mvpp2_port {
/* RSS indirection table */
u32 indir[MVPP22_RSS_TABLE_ENTRIES];
+
+ /* List of steering rules active on that port */
+ struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_RULES];
+ int n_rfs_rules;
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index efdb7a656835..d046f7a1dcf5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -22,302 +22,302 @@
} \
}
-static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
+static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* TCP over IPv4 flows, Not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, Not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv4 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, Not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, Not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv6 flows, not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv6 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv6 flows, not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv6 flows, not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv6 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv6 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* IPv4 flows, no vlan tag */
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv4 flows, with vlan tag */
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER,
MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv6 flows, no vlan tag */
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv6 flows, with vlan tag */
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
/* Non IP flow, no vlan tag */
- MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
0,
MVPP2_PRS_RI_VLAN_NONE,
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
- MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
MVPP22_CLS_HEK_OPT_VLAN,
0, 0),
};
@@ -344,9 +344,9 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv,
struct mvpp2_cls_flow_entry *fe)
{
mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
}
u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
@@ -429,12 +429,6 @@ static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
}
-static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
-{
- fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
- fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
-}
-
static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
bool is_last)
{
@@ -454,9 +448,22 @@ static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
}
+static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
+ u32 port)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
+ u8 lu_type)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
+}
+
/* Initialize the parser entry for the given flow */
static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
- struct mvpp2_cls_flow *flow)
+ const struct mvpp2_cls_flow *flow)
{
mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
flow->prs_ri.ri_mask);
@@ -464,7 +471,7 @@ static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
/* Initialize the Lookup Id table entry for the given flow */
static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
- struct mvpp2_cls_flow *flow)
+ const struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_lookup_entry le;
@@ -477,7 +484,7 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
/* We point on the first lookup in the sequence for the flow, that is
* the C2 lookup.
*/
- le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
/* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
@@ -485,21 +492,111 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
mvpp2_cls_lookup_write(priv, &le);
}
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ u32 val;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+ if (c2->valid)
+ val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
+ else
+ val |= MVPP22_CLS_C2_TCAM_INV_BIT;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ u32 val;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+ c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
+}
+
+static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
+{
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ return MVPP22_FLOW_TCP4;
+ case TCP_V6_FLOW:
+ return MVPP22_FLOW_TCP6;
+ case UDP_V4_FLOW:
+ return MVPP22_FLOW_UDP4;
+ case UDP_V6_FLOW:
+ return MVPP22_FLOW_UDP6;
+ case IPV4_FLOW:
+ return MVPP22_FLOW_IP4;
+ case IPV6_FLOW:
+ return MVPP22_FLOW_IP6;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
+{
+ return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
+}
+
/* Initialize the flow table entries for the given flow */
-static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
+static void mvpp2_cls_flow_init(struct mvpp2 *priv,
+ const struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_flow_entry fe;
- int i;
+ int i, pri = 0;
+
+ /* Assign default values to all entries in the flow */
+ for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
+ i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
+ memset(&fe, 0, sizeof(fe));
+ fe.index = i;
+ mvpp2_cls_flow_pri_set(&fe, pri++);
+
+ if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
+ mvpp2_cls_flow_last_set(&fe, 1);
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
- /* C2 lookup */
- memset(&fe, 0, sizeof(fe));
- fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
+ /* RSS config C2 lookup */
+ mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
+ &fe);
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_cls_flow_last_set(&fe, 0);
- mvpp2_cls_flow_pri_set(&fe, 0);
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
+ mvpp2_cls_flow_lu_type_set(&fe, MVPP22_FLOW_ETHERNET);
/* Add all ports */
for (i = 0; i < MVPP2_MAX_PORTS; i++)
@@ -509,22 +606,19 @@ static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
/* C3Hx lookups */
for (i = 0; i < MVPP2_MAX_PORTS; i++) {
- memset(&fe, 0, sizeof(fe));
- fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
+ mvpp2_cls_flow_read(priv,
+ MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
+ &fe);
+ /* Set a default engine. Will be overwritten when setting the
+ * real HEK parameters
+ */
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_cls_flow_pri_set(&fe, i + 1);
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
mvpp2_cls_flow_port_add(&fe, BIT(i));
mvpp2_cls_flow_write(priv, &fe);
}
-
- /* Update the last entry */
- mvpp2_cls_flow_last_set(&fe, 1);
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
-
- mvpp2_cls_flow_write(priv, &fe);
}
/* Adds a field to the Header Extracted Key generation parameters*/
@@ -555,6 +649,9 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ field_id = MVPP22_CLS_FIELD_MAC_DA;
+ break;
case MVPP22_CLS_HEK_OPT_VLAN:
field_id = MVPP22_CLS_FIELD_VLAN;
break;
@@ -586,9 +683,29 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
return 0;
}
-struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+/* Returns the size, in bits, of the corresponding HEK field */
+static int mvpp2_cls_hek_field_size(u32 field)
{
- if (flow >= MVPP2_N_FLOWS)
+ switch (field) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ return 48;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ return 32;
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ return 128;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ return 16;
+ default:
+ return -1;
+ }
+}
+
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+{
+ if (flow >= MVPP2_N_PRS_FLOWS)
return NULL;
return &cls_flows[flow];
@@ -608,21 +725,17 @@ struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
u16 requested_opts)
{
+ const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *flow;
int i, engine, flow_index;
u16 hash_opts;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for_each_cls_flow_id_with_type(i, flow_type) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return -EINVAL;
- if (flow->flow_type != flow_type)
- continue;
-
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
- flow->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -697,21 +810,17 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
*/
static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
{
+ const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *flow;
int i, flow_index;
u16 hash_opts = 0;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for_each_cls_flow_id_with_type(i, flow_type) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return 0;
- if (flow->flow_type != flow_type)
- continue;
-
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
- flow->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -723,10 +832,10 @@ static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
{
- struct mvpp2_cls_flow *flow;
+ const struct mvpp2_cls_flow *flow;
int i;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
break;
@@ -737,47 +846,6 @@ static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
}
}
-static void mvpp2_cls_c2_write(struct mvpp2 *priv,
- struct mvpp2_cls_c2_entry *c2)
-{
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
-
- /* Write TCAM */
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
-}
-
-void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
- struct mvpp2_cls_c2_entry *c2)
-{
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
-
- c2->index = index;
-
- c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
- c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
- c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
- c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
- c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
-
- c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
-
- c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
- c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
- c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
- c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
-}
-
static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
@@ -791,6 +859,10 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+ /* Match on Lookup Type */
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_FLOW_ETHERNET);
+
/* Update RSS status after matching this entry */
c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
@@ -809,6 +881,8 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
MVPP22_CLS_C2_ATTR0_QLOW(ql);
+ c2.valid = true;
+
mvpp2_cls_c2_write(port->priv, &c2);
}
@@ -817,6 +891,7 @@ void mvpp2_cls_init(struct mvpp2 *priv)
{
struct mvpp2_cls_lookup_entry le;
struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_c2_entry c2;
int index;
/* Enable classifier */
@@ -840,6 +915,14 @@ void mvpp2_cls_init(struct mvpp2 *priv)
mvpp2_cls_lookup_write(priv, &le);
}
+ /* Clear C2 TCAM engine table */
+ memset(&c2, 0, sizeof(c2));
+ c2.valid = false;
+ for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
+ c2.index = index;
+ mvpp2_cls_c2_write(priv, &c2);
+ }
+
mvpp2_cls_port_init_flows(priv);
}
@@ -902,16 +985,28 @@ static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
mvpp2_cls_c2_write(port->priv, &c2);
}
-void mvpp22_rss_enable(struct mvpp2_port *port)
+void mvpp22_port_rss_enable(struct mvpp2_port *port)
{
mvpp2_rss_port_c2_enable(port);
}
-void mvpp22_rss_disable(struct mvpp2_port *port)
+void mvpp22_port_rss_disable(struct mvpp2_port *port)
{
mvpp2_rss_port_c2_disable(port);
}
+static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, entry, &c2);
+
+ /* Clear the port map so that the entry doesn't match anymore */
+ c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
/* Set CPU queue number for oversize packets */
void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
{
@@ -928,6 +1023,289 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
+static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ struct flow_action_entry *act;
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql, pmap;
+ int index;
+
+ memset(&c2, 0, sizeof(c2));
+
+ index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
+ if (index < 0)
+ return -EINVAL;
+ c2.index = index;
+
+ act = &rule->flow->action.entries[0];
+
+ rule->c2_index = c2.index;
+
+ c2.tcam[0] = (rule->c2_tcam & 0xffff) |
+ ((rule->c2_tcam_mask & 0xffff) << 16);
+ c2.tcam[1] = ((rule->c2_tcam >> 16) & 0xffff) |
+ (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
+ c2.tcam[2] = ((rule->c2_tcam >> 32) & 0xffff) |
+ (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
+ c2.tcam[3] = ((rule->c2_tcam >> 48) & 0xffff) |
+ (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
+
+ pmap = BIT(port->id);
+ c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+ /* Match on Lookup Type */
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
+
+ if (act->id == FLOW_ACTION_DROP) {
+ c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
+ } else {
+ /* We want to keep the default color derived from the Header
+ * Parser drop entries, for VLAN and MAC filtering. This will
+ * assign a default color of Green or Red, and we want matches
+ * with a non-drop action to keep that color.
+ */
+ c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
+
+ /* Mark packet as "forwarded to software", needed for RSS */
+ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
+ MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
+
+ qh = ((act->queue.index + port->first_rxq) >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = (act->queue.index + port->first_rxq) & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+ }
+
+ c2.valid = true;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+
+ return 0;
+}
+
+static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ return mvpp2_port_c2_tcam_rule_add(port, rule);
+}
+
+static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ const struct mvpp2_cls_flow *flow;
+ struct mvpp2_cls_flow_entry fe;
+ int index, i;
+
+ for_each_cls_flow_id_containing_type(i, rule->flow_type) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
+
+ mvpp2_cls_flow_read(port->priv, index, &fe);
+ mvpp2_cls_flow_port_remove(&fe, BIT(port->id));
+ mvpp2_cls_flow_write(port->priv, &fe);
+ }
+
+ if (rule->c2_index >= 0)
+ mvpp22_port_c2_lookup_disable(port, rule->c2_index);
+
+ return 0;
+}
+
+static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ const struct mvpp2_cls_flow *flow;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_cls_flow_entry fe;
+ int index, ret, i;
+
+ if (rule->engine != MVPP22_CLS_ENGINE_C2)
+ return -EOPNOTSUPP;
+
+ ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
+ if (ret)
+ return ret;
+
+ for_each_cls_flow_id_containing_type(i, rule->flow_type) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
+
+ mvpp2_cls_flow_read(priv, index, &fe);
+ mvpp2_cls_flow_eng_set(&fe, rule->engine);
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_flow_set_hek_fields(&fe, rule->hek_fields);
+ mvpp2_cls_flow_lu_type_set(&fe, rule->loc);
+ mvpp2_cls_flow_port_add(&fe, 0xf);
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ return 0;
+}
+
+static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
+{
+ struct flow_rule *flow = rule->flow;
+ int offs = 64;
+
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(flow, &match);
+ if (match.mask->src) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
+ offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
+
+ rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
+ rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
+ }
+
+ if (match.mask->dst) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
+ offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
+
+ rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
+ rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
+ }
+ }
+
+ if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
+{
+ struct flow_rule *flow = rule->flow;
+ struct flow_action_entry *act;
+
+ act = &flow->action.entries[0];
+ if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
+ return -EOPNOTSUPP;
+
+ /* For now, only use the C2 engine which has a HEK size limited to 64
+ * bits for TCAM matching.
+ */
+ rule->engine = MVPP22_CLS_ENGINE_C2;
+
+ if (mvpp2_cls_c2_build_match(rule))
+ return -EINVAL;
+
+ return 0;
+}
+
+int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
+ struct ethtool_rxnfc *rxnfc)
+{
+ struct mvpp2_ethtool_fs *efs;
+
+ if (rxnfc->fs.location >= MVPP2_N_RFS_RULES)
+ return -EINVAL;
+
+ efs = port->rfs_rules[rxnfc->fs.location];
+ if (!efs)
+ return -ENOENT;
+
+ memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
+
+ return 0;
+}
+
+int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info)
+{
+ struct ethtool_rx_flow_spec_input input = {};
+ struct ethtool_rx_flow_rule *ethtool_rule;
+ struct mvpp2_ethtool_fs *efs, *old_efs;
+ int ret = 0;
+
+ if (info->fs.location >= 4 ||
+ info->fs.location < 0)
+ return -EINVAL;
+
+ efs = kzalloc(sizeof(*efs), GFP_KERNEL);
+ if (!efs)
+ return -ENOMEM;
+
+ input.fs = &info->fs;
+
+ ethtool_rule = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(ethtool_rule)) {
+ ret = PTR_ERR(ethtool_rule);
+ goto clean_rule;
+ }
+
+ efs->rule.flow = ethtool_rule->rule;
+ efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
+
+ ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
+ if (ret)
+ goto clean_eth_rule;
+
+ efs->rule.loc = info->fs.location;
+
+ /* Replace an already existing rule */
+ if (port->rfs_rules[efs->rule.loc]) {
+ old_efs = port->rfs_rules[efs->rule.loc];
+ ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule);
+ if (ret)
+ goto clean_eth_rule;
+ kfree(old_efs);
+ port->n_rfs_rules--;
+ }
+
+ ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule);
+ if (ret)
+ goto clean_eth_rule;
+
+ memcpy(&efs->rxnfc, info, sizeof(*info));
+ port->rfs_rules[efs->rule.loc] = efs;
+ port->n_rfs_rules++;
+
+ return ret;
+
+clean_eth_rule:
+ ethtool_rx_flow_rule_destroy(ethtool_rule);
+clean_rule:
+ kfree(efs);
+ return ret;
+}
+
+int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info)
+{
+ struct mvpp2_ethtool_fs *efs;
+ int ret;
+
+ efs = port->rfs_rules[info->fs.location];
+ if (!efs)
+ return -EINVAL;
+
+ /* Remove the rule from the engines. */
+ ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule);
+ if (ret)
+ return ret;
+
+ port->n_rfs_rules--;
+ port->rfs_rules[info->fs.location] = NULL;
+ kfree(efs);
+
+ return 0;
+}
+
static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
{
int nrxqs, cpu, cpus = num_possible_cpus();
@@ -965,19 +1343,22 @@ void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
{
u16 hash_opts = 0;
+ u32 flow_type;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- case UDP_V4_FLOW:
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
+ flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
+
+ switch (flow_type) {
+ case MVPP22_FLOW_TCP4:
+ case MVPP22_FLOW_UDP4:
+ case MVPP22_FLOW_TCP6:
+ case MVPP22_FLOW_UDP6:
if (info->data & RXH_L4_B_0_1)
hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
if (info->data & RXH_L4_B_2_3)
hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
/* Fallthrough */
- case IPV4_FLOW:
- case IPV6_FLOW:
+ case MVPP22_FLOW_IP4:
+ case MVPP22_FLOW_IP6:
if (info->data & RXH_L2DA)
hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
if (info->data & RXH_VLAN)
@@ -994,15 +1375,18 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
default: return -EOPNOTSUPP;
}
- return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
+ return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
}
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
{
unsigned long hash_opts;
+ u32 flow_type;
int i;
- hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
+ flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
+
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
info->data = 0;
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
@@ -1037,7 +1421,7 @@ int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return 0;
}
-void mvpp22_rss_port_init(struct mvpp2_port *port)
+void mvpp22_port_rss_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
int i;
@@ -1065,10 +1449,10 @@ void mvpp22_rss_port_init(struct mvpp2_port *port)
mvpp22_rss_fill_table(port, port->id);
/* Configure default flows */
- mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
- mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
- mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
- mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
- mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
- mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 089f05f29891..56b617375a65 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -71,14 +71,6 @@ enum mvpp2_cls_field_id {
MVPP22_CLS_FIELD_L4DIP = 0x1e,
};
-enum mvpp2_cls_flow_seq {
- MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
- MVPP2_CLS_FLOW_SEQ_FIRST1,
- MVPP2_CLS_FLOW_SEQ_FIRST2,
- MVPP2_CLS_FLOW_SEQ_LAST,
- MVPP2_CLS_FLOW_SEQ_MIDDLE
-};
-
/* Classifier C2 engine constants */
#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
@@ -100,39 +92,62 @@ enum mvpp22_cls_c2_fwd_action {
MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
};
+enum mvpp22_cls_c2_color_action {
+ MVPP22_C2_COL_NO_UPD = 0,
+ MVPP22_C2_COL_NO_UPD_LOCK,
+ MVPP22_C2_COL_GREEN,
+ MVPP22_C2_COL_GREEN_LOCK,
+ MVPP22_C2_COL_YELLOW,
+ MVPP22_C2_COL_YELLOW_LOCK,
+ MVPP22_C2_COL_RED, /* Drop */
+ MVPP22_C2_COL_RED_LOCK, /* Drop */
+};
+
#define MVPP2_CLS_C2_TCAM_WORDS 5
#define MVPP2_CLS_C2_ATTR_WORDS 5
struct mvpp2_cls_c2_entry {
u32 index;
+ /* TCAM lookup key */
u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+ /* Actions to perform upon TCAM match */
u32 act;
+ /* Attributes relative to the actions to perform */
u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+ /* Entry validity */
+ u8 valid;
};
+#define MVPP22_FLOW_ETHER_BIT BIT(0)
+#define MVPP22_FLOW_IP4_BIT BIT(1)
+#define MVPP22_FLOW_IP6_BIT BIT(2)
+#define MVPP22_FLOW_TCP_BIT BIT(3)
+#define MVPP22_FLOW_UDP_BIT BIT(4)
+
+#define MVPP22_FLOW_TCP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_TCP_BIT)
+#define MVPP22_FLOW_TCP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_TCP_BIT)
+#define MVPP22_FLOW_UDP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_UDP_BIT)
+#define MVPP22_FLOW_UDP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_UDP_BIT)
+#define MVPP22_FLOW_IP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT)
+#define MVPP22_FLOW_IP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT)
+#define MVPP22_FLOW_ETHERNET (MVPP22_FLOW_ETHER_BIT)
+
/* Classifier C2 engine entries */
-#define MVPP22_CLS_C2_RSS_ENTRY(port) (port)
-#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS
+#define MVPP22_CLS_C2_N_ENTRIES 256
-/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
- *
- * The first performs a lookup using the C2 TCAM engine, to tag the
- * packet for software forwarding (needed for RSS), enable or disable RSS, and
- * assign the default rx queue.
- *
- * The second configures the hash generation, by specifying which fields of the
- * packet header are used to generate the hash, and specifies the relevant hash
- * engine to use.
+/* Number of per-port dedicated entries in the C2 TCAM */
+#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW
+
+/* Each port has oen range per flow type + one entry controling the global RSS
+ * setting and the default rx queue
*/
-#define MVPP22_RSS_FLOW_C2_OFFS 0
-#define MVPP22_RSS_FLOW_HASH_OFFS 1
-#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1)
+#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1)
+#define MVPP22_CLS_C2_PORT_FIRST(p) ((p) * MVPP22_CLS_C2_PORT_RANGE)
+#define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1)
-#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
- MVPP22_RSS_FLOW_C2_OFFS)
-#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
- MVPP22_RSS_FLOW_HASH_OFFS)
-#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port)
+#define MVPP22_CLS_C2_PORT_FLOW_FIRST(p) (MVPP22_CLS_C2_PORT_FIRST(p))
+
+#define MVPP22_CLS_C2_RFS_LOC(p, loc) (MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc))
/* Packet flow ID */
enum mvpp2_prs_flow {
@@ -162,6 +177,11 @@ enum mvpp2_prs_flow {
MVPP2_FL_LAST,
};
+/* LU Type defined for all engines, and specified in the flow table */
+#define MVPP2_CLS_LU_TYPE_MASK 0x3f
+
+#define MVPP2_N_FLOWS (MVPP2_FL_LAST - MVPP2_FL_START)
+
struct mvpp2_cls_flow {
/* The L2-L4 traffic flow type */
int flow_type;
@@ -176,12 +196,48 @@ struct mvpp2_cls_flow {
struct mvpp2_prs_result_info prs_ri;
};
-#define MVPP2_N_FLOWS 52
+#define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1 + 16)
+#define MVPP2_CLS_FLT_FIRST(id) (((id) - MVPP2_FL_START) * \
+ MVPP2_CLS_FLT_ENTRIES_PER_FLOW)
+
+#define MVPP2_CLS_FLT_C2_RFS(port, id, rfs_n) (MVPP2_CLS_FLT_FIRST(id) + \
+ ((port) * MVPP2_MAX_PORTS) + \
+ (rfs_n))
+
+#define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_C2_RFS(MVPP2_MAX_PORTS, id, 0))
+#define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + 1 + (port))
+#define MVPP2_CLS_FLT_LAST(id) (MVPP2_CLS_FLT_FIRST(id) + \
+ MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1)
+
+/* Iterate on each classifier flow id. Sets 'i' to be the index of the first
+ * entry in the cls_flows table for each different flow_id.
+ * This relies on entries having the same flow_id in the cls_flows table being
+ * contiguous.
+ */
+#define for_each_cls_flow_id(i) \
+ for ((i) = 0; (i) < MVPP2_N_PRS_FLOWS; (i)++) \
+ if ((i) > 0 && \
+ cls_flows[(i)].flow_id == cls_flows[(i) - 1].flow_id) \
+ continue; \
+ else
+
+/* Iterate on each classifier flow that has a given flow_type. Sets 'i' to be
+ * the index of the first entry in the cls_flow table for each different flow_id
+ * that has the given flow_type. This allows to operate on all flows that
+ * matches a given ethtool flow type.
+ */
+#define for_each_cls_flow_id_with_type(i, type) \
+ for_each_cls_flow_id((i)) \
+ if (cls_flows[(i)].flow_type != (type)) \
+ continue; \
+ else
+
+#define for_each_cls_flow_id_containing_type(i, type) \
+ for_each_cls_flow_id((i)) \
+ if ((cls_flows[(i)].flow_type & (type)) != (type)) \
+ continue; \
+ else
-#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
-#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW)
-#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \
- (port) + 1)
struct mvpp2_cls_flow_entry {
u32 index;
u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
@@ -194,11 +250,10 @@ struct mvpp2_cls_lookup_entry {
};
void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
+void mvpp22_port_rss_init(struct mvpp2_port *port);
-void mvpp22_rss_port_init(struct mvpp2_port *port);
-
-void mvpp22_rss_enable(struct mvpp2_port *port);
-void mvpp22_rss_disable(struct mvpp2_port *port);
+void mvpp22_port_rss_enable(struct mvpp2_port *port);
+void mvpp22_port_rss_disable(struct mvpp2_port *port);
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
@@ -213,7 +268,7 @@ int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
-struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
@@ -230,4 +285,13 @@ u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
struct mvpp2_cls_c2_entry *c2);
+int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
+ struct ethtool_rxnfc *rxnfc);
+
+int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info);
+
+int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index f9744a61e5dd..0ee39ea47b6b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -18,22 +18,48 @@ struct mvpp2_dbgfs_prs_entry {
struct mvpp2 *priv;
};
+struct mvpp2_dbgfs_c2_entry {
+ int id;
+ struct mvpp2 *priv;
+};
+
struct mvpp2_dbgfs_flow_entry {
int flow;
struct mvpp2 *priv;
};
+struct mvpp2_dbgfs_flow_tbl_entry {
+ int id;
+ struct mvpp2 *priv;
+};
+
struct mvpp2_dbgfs_port_flow_entry {
struct mvpp2_port *port;
struct mvpp2_dbgfs_flow_entry *dbg_fe;
};
+struct mvpp2_dbgfs_entries {
+ /* Entries for Header Parser debug info */
+ struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE];
+
+ /* Entries for Classifier C2 engine debug info */
+ struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES];
+
+ /* Entries for Classifier Flow Table debug info */
+ struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE];
+
+ /* Entries for Classifier flows debug info */
+ struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS];
+
+ /* Entries for per-port flows debug info */
+ struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS];
+};
+
static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_flow_entry *entry = s->private;
- int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
+ struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private;
- u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id);
seq_printf(s, "%u\n", hits);
@@ -58,7 +84,7 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_flow_entry *entry = s->private;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_cls_flow *f;
const char *flow_name;
f = mvpp2_cls_flow_get(entry->flow);
@@ -93,30 +119,12 @@ static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
return 0;
}
-static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
-}
-
-static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private;
-
- kfree(flow_entry);
- return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
- .open = mvpp2_dbgfs_flow_type_open,
- .read = seq_read,
- .release = mvpp2_dbgfs_flow_type_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type);
static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_flow_entry *entry = s->private;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ const struct mvpp2_cls_flow *f;
f = mvpp2_cls_flow_get(entry->flow);
if (!f)
@@ -134,7 +142,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_cls_flow *f;
int flow_index;
u16 hash_opts;
@@ -142,7 +150,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
if (!f)
return -EINVAL;
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -153,42 +161,21 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
return 0;
}
-static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
- inode->i_private);
-}
-
-static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode,
- struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private;
-
- kfree(flow_entry);
- return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
- .open = mvpp2_dbgfs_port_flow_hash_opt_open,
- .read = seq_read,
- .release = mvpp2_dbgfs_port_flow_hash_opt_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt);
static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_cls_flow *f;
int flow_index, engine;
f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
if (!f)
return -EINVAL;
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -203,11 +190,10 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_port *port = s->private;
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
u32 hits;
- hits = mvpp2_cls_c2_hit_count(port->priv,
- MVPP22_CLS_C2_RSS_ENTRY(port->id));
+ hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id);
seq_printf(s, "%u\n", hits);
@@ -218,11 +204,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
{
- struct mvpp2_port *port = s->private;
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
struct mvpp2_cls_c2_entry c2;
u8 qh, ql;
- mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
@@ -239,11 +225,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
{
- struct mvpp2_port *port = s->private;
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
struct mvpp2_cls_c2_entry c2;
int enabled;
- mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
@@ -456,25 +442,7 @@ static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
return 0;
}
-static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
-}
-
-static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct mvpp2_dbgfs_prs_entry *entry = seq->private;
-
- kfree(entry);
- return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
- .open = mvpp2_dbgfs_prs_valid_open,
- .read = seq_read,
- .release = mvpp2_dbgfs_prs_valid_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid);
static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct mvpp2_port *port,
@@ -487,10 +455,7 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
if (IS_ERR(port_dir))
return PTR_ERR(port_dir);
- /* This will be freed by 'hash_opts' release op */
- port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
- if (!port_entry)
- return -ENOMEM;
+ port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
port_entry->port = port;
port_entry->dbg_fe = entry;
@@ -518,17 +483,11 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
if (!flow_entry_dir)
return -ENOMEM;
- /* This will be freed by 'type' release op */
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
+ entry = &priv->dbgfs_entries->flow_entries[flow];
entry->flow = flow;
entry->priv = priv;
- debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
- &mvpp2_dbgfs_flow_flt_hits_fops);
-
debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
&mvpp2_dbgfs_flow_dec_hits_fops);
@@ -545,6 +504,7 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
if (ret)
return ret;
}
+
return 0;
}
@@ -557,7 +517,7 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
if (!flow_dir)
return -ENOMEM;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
if (ret)
return ret;
@@ -582,10 +542,7 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
if (!prs_entry_dir)
return -ENOMEM;
- /* The 'valid' entry's ops will free that */
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
+ entry = &priv->dbgfs_entries->prs_entries[tid];
entry->tid = tid;
entry->priv = priv;
@@ -630,6 +587,98 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
return 0;
}
+static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int id)
+{
+ struct mvpp2_dbgfs_c2_entry *entry;
+ struct dentry *c2_entry_dir;
+ char c2_entry_name[10];
+
+ if (id >= MVPP22_CLS_C2_N_ENTRIES)
+ return -EINVAL;
+
+ sprintf(c2_entry_name, "%03d", id);
+
+ c2_entry_dir = debugfs_create_dir(c2_entry_name, parent);
+ if (!c2_entry_dir)
+ return -ENOMEM;
+
+ entry = &priv->dbgfs_entries->c2_entries[id];
+
+ entry->id = id;
+ entry->priv = priv;
+
+ debugfs_create_file("hits", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int id)
+{
+ struct mvpp2_dbgfs_flow_tbl_entry *entry;
+ struct dentry *flow_tbl_entry_dir;
+ char flow_tbl_entry_name[10];
+
+ if (id >= MVPP2_CLS_FLOWS_TBL_SIZE)
+ return -EINVAL;
+
+ sprintf(flow_tbl_entry_name, "%03d", id);
+
+ flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent);
+ if (!flow_tbl_entry_dir)
+ return -ENOMEM;
+
+ entry = &priv->dbgfs_entries->flt_entries[id];
+
+ entry->id = id;
+ entry->priv = priv;
+
+ debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *cls_dir, *c2_dir, *flow_tbl_dir;
+ int i, ret;
+
+ cls_dir = debugfs_create_dir("classifier", parent);
+ if (!cls_dir)
+ return -ENOMEM;
+
+ c2_dir = debugfs_create_dir("c2", cls_dir);
+ if (!c2_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) {
+ ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir);
+ if (!flow_tbl_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) {
+ ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int mvpp2_dbgfs_port_init(struct dentry *parent,
struct mvpp2_port *port)
{
@@ -648,21 +697,14 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
debugfs_create_file("vid_filter", 0444, port_dir, port,
&mvpp2_dbgfs_port_vid_fops);
- debugfs_create_file("c2_hits", 0444, port_dir, port,
- &mvpp2_dbgfs_flow_c2_hits_fops);
-
- debugfs_create_file("default_rxq", 0444, port_dir, port,
- &mvpp2_dbgfs_flow_c2_rxq_fops);
-
- debugfs_create_file("rss_enable", 0444, port_dir, port,
- &mvpp2_dbgfs_flow_c2_enable_fops);
-
return 0;
}
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
+
+ kfree(priv->dbgfs_entries);
}
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
@@ -682,11 +724,18 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
return;
priv->dbgfs_dir = mvpp2_dir;
+ priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
+ if (!priv->dbgfs_entries)
+ goto err;
ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
if (ret)
goto err;
+ ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
for (i = 0; i < priv->port_count; i++) {
ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 25fbed2b8d94..d38952eb7aa9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3741,9 +3741,9 @@ static int mvpp2_set_features(struct net_device *dev,
if (changed & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- mvpp22_rss_enable(port);
+ mvpp22_port_rss_enable(port);
else
- mvpp22_rss_disable(port);
+ mvpp22_port_rss_disable(port);
}
return 0;
@@ -3937,7 +3937,7 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rules)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
+ int ret = 0, i, loc = 0;
if (!mvpp22_rss_is_supported())
return -EOPNOTSUPP;
@@ -3949,6 +3949,18 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
case ETHTOOL_GRXRINGS:
info->data = port->nrxqs;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = port->n_rfs_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = mvpp2_ethtool_cls_rule_get(port, info);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ for (i = 0; i < MVPP2_N_RFS_RULES; i++) {
+ if (port->rfs_rules[i])
+ rules[loc++] = i;
+ }
+ break;
default:
return -ENOTSUPP;
}
@@ -3969,6 +3981,12 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
case ETHTOOL_SRXFH:
ret = mvpp2_ethtool_rxfh_set(port, info);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = mvpp2_ethtool_cls_rule_ins(port, info);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = mvpp2_ethtool_cls_rule_del(port, info);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -4301,7 +4319,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_port_config(port);
if (mvpp22_rss_is_supported())
- mvpp22_rss_port_init(port);
+ mvpp22_port_rss_init(port);
/* Provide an initial Rx packet size */
port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
@@ -4848,6 +4866,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
struct device_node *port_node = to_of_node(port_fwnode);
+ netdev_features_t features;
struct net_device *dev;
struct resource *res;
struct phylink *phylink;
@@ -4856,7 +4875,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
unsigned long flags = 0;
bool has_tx_irqs;
u32 id;
- int features;
int phy_mode;
int err, i;
@@ -5040,8 +5058,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
- if (mvpp22_rss_is_supported())
+ if (mvpp22_rss_is_supported()) {
dev->hw_features |= NETIF_F_RXHASH;
+ dev->features |= NETIF_F_NTUPLE;
+ }
if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 35f2142aac5e..ce037e8530fa 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1461,7 +1461,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
if (pdev->dev.of_node)
mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (mac_addr && is_valid_ether_addr(mac_addr)) {
+ if (!IS_ERR_OR_NULL(mac_addr)) {
ether_addr_copy(dev->dev_addr, mac_addr);
} else {
/* try reading the mac address, if set by the bootloader */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 8b3495ee2b6e..5adf307fbbfd 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1139,9 +1139,6 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
/* Make sure write' to descriptors are complete before we tell hardware */
wmb();
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
-
- /* Synchronize I/O on since next processor may write to tail */
- mmiowb();
}
@@ -1354,7 +1351,6 @@ stopped:
/* reset the Rx prefetch unit */
sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
- mmiowb();
}
/* Clean out receive buffer area, assumes receiver hardware stopped */
@@ -4808,8 +4804,8 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
* 2) from internal registers set by bootloader
*/
iap = of_get_mac_address(hw->pdev->dev.of_node);
- if (iap)
- memcpy(dev->dev_addr, iap, ETH_ALEN);
+ if (!IS_ERR(iap))
+ ether_addr_copy(dev->dev_addr, iap);
else
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
ETH_ALEN);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 549d36497b8c..f9fbb3ffa3a6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -767,7 +767,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+ !netdev_xmit_more())
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
return 0;
@@ -2027,7 +2028,7 @@ static int __init mtk_init(struct net_device *dev)
const char *mac_addr;
mac_addr = of_get_mac_address(mac->of_node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(dev->dev_addr, mac_addr);
/* If the mac address is invalid, use random mac address */
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index ff8057ed97ee..8491db57b0b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -26,6 +26,7 @@ config MLX4_EN_DCB
config MLX4_CORE
tristate
depends on PCI
+ select NET_DEVLINK
default n
config MLX4_DEBUG
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c81d15bf259c..87e90b5d4d7d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -129,10 +129,6 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
__raw_writel((__force u32)cpu_to_be32(comm_flags),
(__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
- /* Make sure that our comm channel write doesn't
- * get mixed in with writes from another CPU.
- */
- mmiowb();
end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
while (time_before(jiffies, end)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index a5d5d6fc1da0..c678344d22a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -281,7 +281,6 @@ static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
__raw_writel((__force u32) cpu_to_be32(val),
&priv->mfunc.comm->slave_write);
- mmiowb();
mutex_unlock(&dev->persist->device_state_mutex);
return 0;
}
@@ -496,12 +495,6 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
(op_modifier << HCR_OPMOD_SHIFT) |
op), hcr + 6);
- /*
- * Make sure that our HCR writes don't get mixed in with
- * writes from another CPU starting a FW command.
- */
- mmiowb();
-
cmd->toggle = cmd->toggle ^ 1;
ret = 0;
@@ -2206,7 +2199,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
}
__raw_writel((__force u32) cpu_to_be32(reply),
&priv->mfunc.comm[slave].slave_read);
- mmiowb();
return;
@@ -2410,7 +2402,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
&priv->mfunc.comm[i].slave_write);
__raw_writel((__force u32) 0,
&priv->mfunc.comm[i].slave_read);
- mmiowb();
for (port = 1; port <= MLX4_MAX_PORTS; port++) {
struct mlx4_vport_state *admin_vport;
struct mlx4_vport_state *oper_vport;
@@ -2576,10 +2567,6 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
__raw_writel((__force u32)cpu_to_be32(slave_read),
&priv->mfunc.comm[slave].slave_read);
- /* Make sure that our comm channel write doesn't
- * get mixed in with writes from another CPU.
- */
- mmiowb();
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 2cbd2bd7c67c..36a92b19e613 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -685,16 +685,15 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return fallback(dev, skb, NULL);
+ return netdev_pick_tx(dev, skb, NULL);
- return fallback(dev, skb, NULL) % rings_p_up;
+ return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
@@ -1043,7 +1042,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
tx_info->nr_bytes,
- skb->xmit_more);
+ netdev_xmit_more());
real_size = (real_size / 16) & 0x3f;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8137454e2534..630f15977f09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -698,8 +698,7 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 6debffb8336b..9aca8086ee01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -5,6 +5,7 @@
config MLX5_CORE
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on PCI
+ select NET_DEVLINK
imply PTP_1588_CLOCK
imply VXLAN
default n
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 1a16f6d73cbc..243368dc23db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -22,7 +22,8 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
- en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o
+ en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o \
+ en/params.o
#
# Netdev extra
@@ -35,7 +36,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tu
#
# Core extra
#
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
@@ -57,5 +58,3 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
-
-CFLAGS_tracepoint.o := -I$(src)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 9008e17126db..549f962cd86e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -57,15 +57,16 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
int node)
{
struct mlx5_priv *priv = &dev->priv;
+ struct device *device = dev->device;
int original_node;
void *cpu_handle;
mutex_lock(&priv->alloc_mutex);
- original_node = dev_to_node(&dev->pdev->dev);
- set_dev_node(&dev->pdev->dev, node);
- cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
+ original_node = dev_to_node(device);
+ set_dev_node(device, node);
+ cpu_handle = dma_alloc_coherent(device, size, dma_handle,
GFP_KERNEL);
- set_dev_node(&dev->pdev->dev, original_node);
+ set_dev_node(device, original_node);
mutex_unlock(&priv->alloc_mutex);
return cpu_handle;
}
@@ -110,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
- dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
+ dma_free_coherent(dev->device, buf->size, buf->frags->buf,
buf->frags->map);
kfree(buf->frags);
@@ -139,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
if (!frag->buf)
goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) {
- dma_free_coherent(&dev->pdev->dev, frag_sz,
+ dma_free_coherent(dev->device, frag_sz,
buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift);
@@ -152,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
err_free_buf:
while (i--)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
+ dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map);
kfree(buf->frags);
err_out:
@@ -168,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE);
- dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
+ dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
buf->frags[i].map);
size -= frag_sz;
}
@@ -274,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
__set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ dma_free_coherent(dev->device, PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index be48c6440251..937ba4bcb056 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -917,7 +917,6 @@ static void cmd_work_handler(struct work_struct *work)
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
- mmiowb();
/* if not in polling don't use ent after this point */
if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
poll_timeout(ent);
@@ -1347,7 +1346,7 @@ static void set_wqname(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd;
snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
- dev_name(&dev->pdev->dev));
+ dev_name(dev->device));
}
static void clean_debug_files(struct mlx5_core_dev *dev)
@@ -1852,7 +1851,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev = dev->device;
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
&cmd->alloc_dma, GFP_KERNEL);
@@ -1883,7 +1882,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev = dev->device;
dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
cmd->alloc_dma);
@@ -1902,14 +1901,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
memset(cmd, 0, sizeof(*cmd));
cmd_if_rev = cmdif_rev(dev);
if (cmd_if_rev != CMD_IF_REV) {
- dev_err(&dev->pdev->dev,
- "Driver cmdif rev(%d) differs from firmware's(%d)\n",
- CMD_IF_REV, cmd_if_rev);
+ mlx5_core_err(dev,
+ "Driver cmdif rev(%d) differs from firmware's(%d)\n",
+ CMD_IF_REV, cmd_if_rev);
return -EINVAL;
}
- cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align,
- 0);
+ cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
if (!cmd->pool)
return -ENOMEM;
@@ -1921,14 +1919,14 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->log_sz = cmd_l >> 4 & 0xf;
cmd->log_stride = cmd_l & 0xf;
if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
- dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
- 1 << cmd->log_sz);
+ mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
+ 1 << cmd->log_sz);
err = -EINVAL;
goto err_free_page;
}
if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
- dev_err(&dev->pdev->dev, "command queue size overflow\n");
+ mlx5_core_err(dev, "command queue size overflow\n");
err = -EINVAL;
goto err_free_page;
}
@@ -1939,8 +1937,8 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
if (cmd->cmdif_rev > CMD_IF_REV) {
- dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
- CMD_IF_REV, cmd->cmdif_rev);
+ mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
+ CMD_IF_REV, cmd->cmdif_rev);
err = -EOPNOTSUPP;
goto err_free_page;
}
@@ -1956,7 +1954,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma);
if (cmd_l & 0xfff) {
- dev_err(&dev->pdev->dev, "invalid command queue address\n");
+ mlx5_core_err(dev, "invalid command queue address\n");
err = -ENOMEM;
goto err_free_page;
}
@@ -1976,7 +1974,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
set_wqname(dev);
cmd->wq = create_singlethread_workqueue(cmd->wq_name);
if (!cmd->wq) {
- dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
+ mlx5_core_err(dev, "failed to create command workqueue\n");
err = -ENOMEM;
goto err_cache;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
index 83f90e9aff45..3038be575923 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
@@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw,
TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
TP_STRUCT__entry(
- __string(dev_name, dev_name(&tracer->dev->pdev->dev))
+ __string(dev_name, dev_name(tracer->dev->device))
__field(u64, trace_timestamp)
__field(bool, lost)
__field(u8, event_id)
@@ -55,7 +55,8 @@ TRACE_EVENT(mlx5_fw,
),
TP_fast_assign(
- __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev));
+ __assign_str(dev_name,
+ dev_name(tracer->dev->device));
__entry->trace_timestamp = trace_timestamp;
__entry->lost = lost;
__entry->event_id = event_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 71c65cc17904..3a183d690e23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -240,8 +240,8 @@ struct mlx5e_params {
bool rx_cqe_compress_def;
struct net_dim_cq_moder rx_cq_moderation;
struct net_dim_cq_moder tx_cq_moderation;
+ bool tunneled_offload_en;
bool lro_en;
- u32 lro_wqe_sz;
u8 tx_min_inline_mode;
bool vlan_strip_disable;
bool scatter_fcs_en;
@@ -410,14 +410,17 @@ struct mlx5e_xdp_info_fifo {
struct mlx5e_xdp_wqe_info {
u8 num_wqebbs;
- u8 num_ds;
+ u8 num_pkts;
};
struct mlx5e_xdp_mpwqe {
/* Current MPWQE session */
struct mlx5e_tx_wqe *wqe;
u8 ds_count;
+ u8 pkt_count;
u8 max_ds_count;
+ u8 complete;
+ u8 inline_on;
};
struct mlx5e_xdpsq;
@@ -429,7 +432,6 @@ struct mlx5e_xdpsq {
/* dirtied @completion */
u32 xdpi_fifo_cc;
u16 cc;
- bool redirect_flush;
/* dirtied @xmit */
u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
@@ -462,10 +464,10 @@ struct mlx5e_xdpsq {
struct mlx5e_icosq {
/* data path */
+ u16 cc;
+ u16 pc;
- /* dirtied @xmit */
- u16 pc ____cacheline_aligned_in_smp;
-
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
struct mlx5e_cq cq;
/* write@xmit, read@completion */
@@ -532,7 +534,8 @@ typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
enum mlx5e_rq_flag {
- MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
+ MLX5E_RQ_FLAG_XDP_XMIT,
+ MLX5E_RQ_FLAG_XDP_REDIRECT,
};
struct mlx5e_rq_frag_info {
@@ -563,8 +566,10 @@ struct mlx5e_rq {
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
u16 num_strides;
+ u16 actual_wq_head;
u8 log_stride_sz;
- bool umr_in_progress;
+ u8 umr_in_progress;
+ u8 umr_last_bulk;
} mpwqe;
};
struct {
@@ -769,12 +774,12 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi);
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
+void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
@@ -858,6 +863,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
* switching channels
*/
typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_hw_modify hw_modify);
@@ -885,6 +891,53 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}
+static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_ETH(mdev, swp) &&
+ MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
+}
+
+struct mlx5e_swp_spec {
+ __be16 l3_proto;
+ u8 l4_proto;
+ u8 is_tun;
+ __be16 tun_l3_proto;
+ u8 tun_l4_proto;
+};
+
+static inline void
+mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
+ struct mlx5e_swp_spec *swp_spec)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (swp_spec->l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+ if (swp_spec->l4_proto) {
+ eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
+ if (swp_spec->l4_proto == IPPROTO_UDP)
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+ }
+
+ if (swp_spec->is_tun) {
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
+ eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
+ if (swp_spec->l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ }
+ switch (swp_spec->tun_l4_proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ /* fall through */
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ }
+}
+
static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
@@ -929,7 +982,7 @@ void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
*/
wmb();
- mlx5_write64((__be32 *)ctrl, uar_map, NULL);
+ mlx5_write64((__be32 *)ctrl, uar_map);
}
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
@@ -1041,6 +1094,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
+void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
new file mode 100644
index 000000000000..d3744bffbae3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "en/params.h"
+
+u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
+{
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ u32 frag_sz;
+
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
+
+ if (params->xdp_prog && frag_sz < PAGE_SIZE)
+ frag_sz = PAGE_SIZE;
+
+ return frag_sz;
+}
+
+u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
+{
+ u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+
+ return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
+}
+
+bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params)
+{
+ u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+
+ return !params->lro_en && frag_sz <= PAGE_SIZE;
+}
+
+#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
+ MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
+bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+ s8 signed_log_num_strides_param;
+ u8 log_num_strides;
+
+ if (!mlx5e_rx_is_linear_skb(params))
+ return false;
+
+ if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+ return false;
+
+ if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
+ return true;
+
+ log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
+ signed_log_num_strides_param =
+ (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
+
+ return signed_log_num_strides_param >= 0;
+}
+
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
+{
+ u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params);
+
+ /* Numbers are unsigned, don't subtract to avoid underflow. */
+ if (params->log_rq_mtu_frames <
+ log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+
+ return params->log_rq_mtu_frames - log_pkts_per_wqe;
+}
+
+u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
+ return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
+
+ return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+}
+
+u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return MLX5_MPWRQ_LOG_WQE_SZ -
+ mlx5e_mpwqe_get_log_stride_size(mdev, params);
+}
+
+u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ bool is_linear_skb;
+
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
+ mlx5e_rx_is_linear_skb(params) :
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params);
+
+ return is_linear_skb ? linear_rq_headroom : 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
new file mode 100644
index 000000000000..b106a0236f36
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_PARAMS_H__
+#define __MLX5_EN_PARAMS_H__
+
+#include "en.h"
+
+u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params);
+u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params);
+bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params);
+bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params);
+u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+
+#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 122927f3a600..d5e5afbdca6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
if (!eproto)
return -EINVAL;
- if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
- return -EOPNOTSUPP;
-
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index eac245a93f91..633b117eb13e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -122,7 +122,9 @@ out:
return err;
}
-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+ * minimum speed value is 40Gbps
+ */
static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
{
u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
int err;
err = mlx5e_port_linkspeed(priv->mdev, &speed);
- if (err) {
- mlx5_core_warn(priv->mdev, "cannot get port speed\n");
- return 0;
- }
+ if (err)
+ speed = SPEED_40000;
+ speed = max_t(u32, speed, SPEED_40000);
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
}
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
- u32 xoff, unsigned int mtu)
+ u32 xoff, unsigned int max_mtu)
{
int i;
@@ -154,36 +155,37 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}
if (port_buffer->buffer[i].size <
- (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+ (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
return -ENOMEM;
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
- port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
+ port_buffer->buffer[i].xon =
+ port_buffer->buffer[i].xoff - max_mtu;
}
return 0;
}
/**
- * update_buffer_lossy()
- * mtu: device's MTU
- * pfc_en: <input> current pfc configuration
- * buffer: <input> current prio to buffer mapping
- * xoff: <input> xoff value
- * port_buffer: <output> port receive buffer configuration
- * change: <output>
+ * update_buffer_lossy - Update buffer configuration based on pfc
+ * @max_mtu: netdev's max_mtu
+ * @pfc_en: <input> current pfc configuration
+ * @buffer: <input> current prio to buffer mapping
+ * @xoff: <input> xoff value
+ * @port_buffer: <output> port receive buffer configuration
+ * @change: <output>
*
- * Update buffer configuration based on pfc configuraiton and priority
- * to buffer mapping.
- * Buffer's lossy bit is changed to:
- * lossless if there is at least one PFC enabled priority mapped to this buffer
- * lossy if all priorities mapped to this buffer are PFC disabled
+ * Update buffer configuration based on pfc configuraiton and
+ * priority to buffer mapping.
+ * Buffer's lossy bit is changed to:
+ * lossless if there is at least one PFC enabled priority
+ * mapped to this buffer lossy if all priorities mapped to
+ * this buffer are PFC disabled
*
- * Return:
- * Return 0 if no error.
- * Set change to true if buffer configuration is modified.
+ * @return: 0 if no error,
+ * sets change to true if buffer configuration was modified.
*/
-static int update_buffer_lossy(unsigned int mtu,
+static int update_buffer_lossy(unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff,
struct mlx5e_port_buffer *port_buffer,
bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
}
if (changed) {
- err = update_xoff_threshold(port_buffer, xoff, mtu);
+ err = update_xoff_threshold(port_buffer, xoff, max_mtu);
if (err)
return err;
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
return 0;
}
+#define MINIMUM_MAX_MTU 9216
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
bool update_prio2buffer = false;
u8 buffer[MLX5E_MAX_PRIORITY];
bool update_buffer = false;
+ unsigned int max_mtu;
u32 total_used = 0;
u8 curr_pfc_en;
int err;
int i;
mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+ max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
err = mlx5e_port_query_buffer(priv, &port_buffer);
if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
&port_buffer, &update_buffer);
if (err)
return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
- &port_buffer, &update_buffer);
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+ xoff, &port_buffer, &update_buffer);
if (err)
return err;
}
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return -EINVAL;
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
/* Need to update buffer configuration if xoff value is changed */
if (!update_buffer && xoff != priv->dcbx.xoff) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 9d38e62cdf24..476dd97f7f2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
{
- int err;
+ int err = 0;
rtnl_lock();
mutex_lock(&priv->state_lock);
- mlx5e_close_locked(priv->netdev);
- err = mlx5e_open_locked(priv->netdev);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ err = mlx5e_safe_reopen_channels(priv);
+
+out:
mutex_unlock(&priv->state_lock);
rtnl_unlock();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index fa2a3c444cdc..fe5d4d7f15ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (!(mlx5e_eswitch_rep(*out_dev) &&
+ mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
+ return -EOPNOTSUPP;
+
return 0;
}
@@ -70,7 +74,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
if (ret)
return ret;
- if (mlx5_lag_is_multipath(mdev) && !rt->rt_gateway)
+ if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
return -ENETUNREACH;
#else
return -EOPNOTSUPP;
@@ -96,7 +100,7 @@ static const char *mlx5e_netdev_kind(struct net_device *dev)
if (dev->rtnl_link_ops)
return dev->rtnl_link_ops->kind;
else
- return "";
+ return "unknown";
}
static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
@@ -636,8 +640,10 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
headers_c, headers_v);
} else {
netdev_warn(priv->netdev,
- "decapsulation offload is not supported for %s net device (%d)\n",
- mlx5e_netdev_kind(filter_dev), tunnel_type);
+ "decapsulation offload is not supported for %s (kind: \"%s\")\n",
+ netdev_name(filter_dev),
+ mlx5e_netdev_kind(filter_dev));
+
return -EOPNOTSUPP;
}
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 03b2a9f9c589..eb8ef78e5626 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -33,6 +33,26 @@
#include <linux/bpf_trace.h>
#include "en/xdp.h"
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
+{
+ int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+
+ /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
+ * The condition checked in mlx5e_rx_is_linear_skb is:
+ * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1)
+ * (Note that hw_mtu == sw_mtu + hard_mtu.)
+ * What is returned from this function is:
+ * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2)
+ * After assigning sw_mtu := max_mtu, the left side of (1) turns to
+ * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
+ * because both PAGE_SIZE and S are already aligned. Any number greater
+ * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
+ * so max_mtu is the maximum MTU allowed.
+ */
+
+ return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
+}
+
static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
struct xdp_buff *xdp)
@@ -85,7 +105,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
if (unlikely(err))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
- rq->xdpsq.redirect_flush = true;
+ __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
mlx5e_page_dma_unmap(rq, di);
rq->stats->xdp_redirect++;
return true;
@@ -105,6 +125,7 @@ xdp_abort:
static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
{
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
u8 wqebbs;
u16 pi;
@@ -112,7 +133,9 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
mlx5e_xdpsq_fetch_wqe(sq, &session->wqe);
prefetchw(session->wqe->data);
- session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
+ session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
+ session->pkt_count = 0;
+ session->complete = 0;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -131,6 +154,10 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
MLX5E_XDP_MPW_MAX_WQEBBS);
session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs;
+
+ mlx5e_xdp_update_inline_state(sq);
+
+ stats->mpwqe++;
}
static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
@@ -147,7 +174,7 @@ static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS);
- wi->num_ds = ds_count - MLX5E_XDP_TX_EMPTY_DS_COUNT;
+ wi->num_pkts = session->pkt_count;
sq->pc += wi->num_wqebbs;
@@ -162,11 +189,9 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
- dma_addr_t dma_addr = xdpi->dma_addr;
struct xdp_frame *xdpf = xdpi->xdpf;
- unsigned int dma_len = xdpf->len;
- if (unlikely(sq->hw_mtu < dma_len)) {
+ if (unlikely(sq->hw_mtu < xdpf->len)) {
stats->err++;
return false;
}
@@ -183,9 +208,10 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
mlx5e_xdp_mpwqe_session_start(sq);
}
- mlx5e_xdp_mpwqe_add_dseg(sq, dma_addr, dma_len);
+ mlx5e_xdp_mpwqe_add_dseg(sq, xdpi, stats);
- if (unlikely(session->ds_count == session->max_ds_count))
+ if (unlikely(session->complete ||
+ session->ds_count == session->max_ds_count))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
@@ -249,12 +275,33 @@ static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *
return true;
}
+static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
+ struct mlx5e_xdp_wqe_info *wi,
+ struct mlx5e_rq *rq,
+ bool recycle)
+{
+ struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
+ u16 i;
+
+ for (i = 0; i < wi->num_pkts; i++) {
+ struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+
+ if (rq) {
+ /* XDP_TX */
+ mlx5e_page_release(rq, &xdpi.di, recycle);
+ } else {
+ /* XDP_REDIRECT */
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, DMA_TO_DEVICE);
+ xdp_return_frame(xdpi.xdpf);
+ }
+ }
+}
+
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
{
- struct mlx5e_xdp_info_fifo *xdpi_fifo;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
- bool is_redirect;
u16 sqcc;
int i;
@@ -267,9 +314,6 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
if (!cqe)
return false;
- is_redirect = !rq;
- xdpi_fifo = &sq->db.xdpi_fifo;
-
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
@@ -291,7 +335,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
do {
struct mlx5e_xdp_wqe_info *wi;
- u16 ci, j;
+ u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
@@ -299,19 +343,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
sqcc += wi->num_wqebbs;
- for (j = 0; j < wi->num_ds; j++) {
- struct mlx5e_xdp_info xdpi =
- mlx5e_xdpi_fifo_pop(xdpi_fifo);
-
- if (is_redirect) {
- xdp_return_frame(xdpi.xdpf);
- dma_unmap_single(sq->pdev, xdpi.dma_addr,
- xdpi.xdpf->len, DMA_TO_DEVICE);
- } else {
- /* Recycle RX page */
- mlx5e_page_release(rq, &xdpi.di, true);
- }
- }
+ mlx5e_free_xdpsq_desc(sq, wi, rq, true);
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
@@ -328,31 +360,16 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
- struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
- bool is_redirect = !rq;
-
while (sq->cc != sq->pc) {
struct mlx5e_xdp_wqe_info *wi;
- u16 ci, i;
+ u16 ci;
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
wi = &sq->db.wqe_info[ci];
sq->cc += wi->num_wqebbs;
- for (i = 0; i < wi->num_ds; i++) {
- struct mlx5e_xdp_info xdpi =
- mlx5e_xdpi_fifo_pop(xdpi_fifo);
-
- if (is_redirect) {
- xdp_return_frame(xdpi.xdpf);
- dma_unmap_single(sq->pdev, xdpi.dma_addr,
- xdpi.xdpf->len, DMA_TO_DEVICE);
- } else {
- /* Recycle RX page */
- mlx5e_page_release(rq, &xdpi.di, false);
- }
- }
+ mlx5e_free_xdpsq_desc(sq, wi, rq, false);
}
}
@@ -419,9 +436,9 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
mlx5e_xmit_xdp_doorbell(xdpsq);
- if (xdpsq->redirect_flush) {
+ if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
xdp_do_flush_map();
- xdpsq->redirect_flush = false;
+ __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index ee27a7c8cd87..8b537a4b0840 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -34,13 +34,12 @@
#include "en.h"
-#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
- MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_TX_EMPTY_DS_COUNT \
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
@@ -75,16 +74,68 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
}
}
+/* Enable inline WQEs to shift some load from a congested HCA (HW) to
+ * a less congested cpu (SW).
+ */
+static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
+{
+ u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+
+#define MLX5E_XDP_INLINE_WATERMARK_LOW 10
+#define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
+
+ if (session->inline_on) {
+ if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
+ session->inline_on = 0;
+ return;
+ }
+
+ /* inline is false */
+ if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
+ session->inline_on = 1;
+}
+
static inline void
-mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, dma_addr_t dma_addr, u16 dma_len)
+mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi,
+ struct mlx5e_xdpsq_stats *stats)
{
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ dma_addr_t dma_addr = xdpi->dma_addr;
+ struct xdp_frame *xdpf = xdpi->xdpf;
struct mlx5_wqe_data_seg *dseg =
- (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count++;
+ (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
+ u16 dma_len = xdpf->len;
+ session->pkt_count++;
+
+#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
+
+ if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
+ struct mlx5_wqe_inline_seg *inline_dseg =
+ (struct mlx5_wqe_inline_seg *)dseg;
+ u16 ds_len = sizeof(*inline_dseg) + dma_len;
+ u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
+
+ if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) {
+ /* Not enough space for inline wqe, send with memory pointer */
+ session->complete = true;
+ goto no_inline;
+ }
+
+ inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
+ memcpy(inline_dseg->data, xdpf->data, dma_len);
+
+ session->ds_count += ds_cnt;
+ stats->inlnw++;
+ return;
+ }
+
+no_inline:
dseg->addr = cpu_to_be64(dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be;
+ session->ds_count++;
}
static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq,
@@ -111,5 +162,4 @@ mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
{
return fifo->xi[(*fifo->cc)++ & fifo->mask];
}
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 1dd225380a66..6da7c88742dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -40,6 +40,57 @@
#include "en_accel/tls_rxtx.h"
#include "en.h"
+#if IS_ENABLED(CONFIG_GENEVE)
+static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
+{
+ return mlx5_tx_swp_supported(mdev);
+}
+
+static inline void
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+{
+ struct mlx5e_swp_spec swp_spec = {};
+ unsigned int offset = 0;
+ __be16 l3_proto;
+ u8 l4_proto;
+
+ l3_proto = vlan_get_protocol(skb);
+ switch (l3_proto) {
+ case htons(ETH_P_IP):
+ l4_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
+ break;
+ default:
+ return;
+ }
+
+ if (l4_proto != IPPROTO_UDP ||
+ udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
+ return;
+ swp_spec.l3_proto = l3_proto;
+ swp_spec.l4_proto = l4_proto;
+ swp_spec.is_tun = true;
+ if (inner_ip_hdr(skb)->version == 6) {
+ swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
+ swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
+ } else {
+ swp_spec.tun_l3_proto = htons(ETH_P_IP);
+ swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
+ }
+
+ mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+}
+
+#else
+static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+#endif /* CONFIG_GENEVE */
+
static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 53608afd39b6..0dd17514caae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -136,7 +136,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg, u8 mode,
struct xfrm_offload *xo)
{
- u8 proto;
+ struct mlx5e_swp_spec swp_spec = {};
/* Tunnel Mode:
* SWP: OutL3 InL3 InL4
@@ -146,35 +146,23 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* SWP: OutL3 InL4
* InL3
* Pkt: MAC IP ESP L4
- *
- * Offsets are in 2-byte words, counting from start of frame
*/
- eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
-
- if (mode == XFRM_MODE_TUNNEL) {
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ swp_spec.l3_proto = skb->protocol;
+ swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
+ if (swp_spec.is_tun) {
if (xo->proto == IPPROTO_IPV6) {
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- proto = inner_ipv6_hdr(skb)->nexthdr;
+ swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
+ swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
} else {
- proto = inner_ip_hdr(skb)->protocol;
+ swp_spec.tun_l3_proto = htons(ETH_P_IP);
+ swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
}
} else {
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- proto = xo->proto;
- }
- switch (proto) {
- case IPPROTO_UDP:
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- /* Fall through */
- case IPPROTO_TCP:
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
- break;
+ swp_spec.tun_l3_proto = skb->protocol;
+ swp_spec.tun_l4_proto = xo->proto;
}
+
+ mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
}
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index be137d4a9169..439bf5953885 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -181,7 +181,6 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
*/
nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->xmit_more = 1;
nskb->queue_mapping = skb->queue_mapping;
}
@@ -248,7 +247,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
+ mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
mlx5e_sq_fetch_wqe(sq, wqe, pi);
return skb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3078491cc0d0..1539cf3de5dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
if (err)
return err;
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return 0;
}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir)
{
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
mlx5_core_destroy_tir(mdev, tir->tirn);
list_del(&tir->list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
}
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+ mutex_init(&mdev->mlx5e_res.td.list_lock);
return 0;
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
- int err = -ENOMEM;
+ int err = 0;
u32 tirn = 0;
int inlen;
void *in;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ if (!in) {
+ err = -ENOMEM;
goto out;
+ }
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a0987cc5fe4a..7efaa58ae034 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
- unsigned long *advertising_modes,
- u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+ u32 eth_proto_cap, bool ext)
{
unsigned long proto_cap = eth_proto_cap;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+ table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+ max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+ ARRAY_SIZE(ptys2legacy_ethtool_table);
+
for_each_set_bit(proto, &proto_cap, max_size)
bitmap_or(advertising_modes, advertising_modes,
table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
- u8 tx_pause, u8 rx_pause,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+ struct ethtool_link_ksettings *link_ksettings,
+ bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
+ ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+ bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
- ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
+ ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
u8 an_disable_admin;
u8 an_status;
u8 connector_type;
+ bool admin_ext;
bool ext;
int err;
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_capability);
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_admin);
+ /* Fields: eth_proto_admin and ext_eth_proto_admin are
+ * mutually exclusive. Hence try reading legacy advertising
+ * when extended advertising is zero.
+ * admin_ext indicates how eth_proto_admin should be
+ * interpreted
+ */
+ admin_ext = ext;
+ if (ext && !eth_proto_admin) {
+ eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
+ eth_proto_admin);
+ admin_ext = false;
+ }
+
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_oper);
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
get_supported(mdev, eth_proto_cap, link_ksettings);
- get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+ get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+ admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
- ext_requested = (link_ksettings->link_modes.advertising[0] >
- MLX5E_PTYS_EXT);
+ ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+ MLX5E_PTYS_EXT ||
+ link_ksettings->link_modes.advertising[1]);
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
-
- /*when ptys_extended_ethernet is set legacy link modes are deprecated */
- if (ext_requested != ext_supported)
- return -EPROTONOSUPPORT;
+ ext_requested &= ext_supported;
speed = link_ksettings->base.speed;
ethtool2ptys_adver_func = ext_requested ?
mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
- err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
+ mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
mlx5_toggle_port_link(mdev);
out:
@@ -1545,7 +1561,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *dev = priv->mdev;
int size_read = 0;
- u8 data[4];
+ u8 data[4] = {0};
size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
if (size_read < 2)
@@ -1555,22 +1571,22 @@ static int mlx5e_get_module_info(struct net_device *netdev,
switch (data[0]) {
case MLX5_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
/* data[1] = revision id */
if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLX5_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
break;
default:
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
@@ -1752,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
struct mlx5e_channel *c;
int i;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
+ priv->channels.params.xdp_prog)
return 0;
for (i = 0; i < channels->num; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b5fdbd3190d9..457cc39423f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
+#include <net/geneve.h>
#include <linux/bpf.h>
#include <linux/if_bridge.h>
#include <net/page_pool.h>
@@ -43,6 +44,7 @@
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/en_accel.h"
#include "en_accel/tls.h"
#include "accel/ipsec.h"
#include "accel/tls.h"
@@ -53,6 +55,7 @@
#include "lib/eq.h"
#include "en/monitor_stats.h"
#include "en/reporter.h"
+#include "en/params.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -101,108 +104,9 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
return true;
}
-static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
-{
- u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 linear_rq_headroom = params->xdp_prog ?
- XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
- u32 frag_sz;
-
- linear_rq_headroom += NET_IP_ALIGN;
-
- frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
-
- if (params->xdp_prog && frag_sz < PAGE_SIZE)
- frag_sz = PAGE_SIZE;
-
- return frag_sz;
-}
-
-static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
-{
- u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params);
-
- return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
-}
-
-static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
-
- return !params->lro_en && frag_sz <= PAGE_SIZE;
-}
-
-#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
- MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
-static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
- s8 signed_log_num_strides_param;
- u8 log_num_strides;
-
- if (!mlx5e_rx_is_linear_skb(mdev, params))
- return false;
-
- if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
- return false;
-
- if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
- return true;
-
- log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
- signed_log_num_strides_param =
- (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
-
- return signed_log_num_strides_param >= 0;
-}
-
-static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
-{
- if (params->log_rq_mtu_frames <
- mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
- return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
-
- return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
-}
-
-static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
- return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
-
- return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
-}
-
-static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- return MLX5_MPWRQ_LOG_WQE_SZ -
- mlx5e_mpwqe_get_log_stride_size(mdev, params);
-}
-
-static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u16 linear_rq_headroom = params->xdp_prog ?
- XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
- bool is_linear_skb;
-
- linear_rq_headroom += NET_IP_ALIGN;
-
- is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
- mlx5e_rx_is_linear_skb(mdev, params) :
- mlx5e_rx_mpwqe_is_linear_skb(mdev, params);
-
- return is_linear_skb ? linear_rq_headroom : 0;
-}
-
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
- params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
@@ -469,7 +373,6 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
}
static int mlx5e_init_di_list(struct mlx5e_rq *rq,
- struct mlx5e_params *params,
int wq_sz, int cpu)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
@@ -597,7 +500,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_free;
}
- err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu);
+ err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
if (err)
goto err_free;
rq->post_wqes = mlx5e_post_rx_wqes;
@@ -615,7 +518,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_free;
}
- rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ?
+ rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->mkey_be = c->mkey_be;
@@ -902,10 +805,14 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
+ u16 head = wq->head;
+ int i;
- /* UMR WQE (if in progress) is always at wq->head */
- if (rq->mpwqe.umr_in_progress)
- rq->dealloc_wqe(rq, wq->head);
+ /* Outstanding UMR WQEs (in progress) start at wq->head */
+ for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
+ rq->dealloc_wqe(rq, head);
+ head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+ }
while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe;
@@ -951,7 +858,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (params->rx_dim_enabled)
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
+ /* We disable csum_complete when XDP is enabled since
+ * XDP programs might manipulate packets which will render
+ * skb->checksum incorrect.
+ */
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
return 0;
@@ -966,16 +877,8 @@ err_free_rq:
static void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
- struct mlx5e_icosq *sq = &rq->channel->icosq;
- struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *nopwqe;
-
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
- nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+ mlx5e_trigger_irq(&rq->channel->icosq);
}
static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -1087,7 +990,7 @@ static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
- u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
sizeof(*sq->db.ico_wqe)),
@@ -1523,7 +1426,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
dseg->lkey = sq->mkey_be;
wi->num_wqebbs = 1;
- wi->num_ds = 1;
+ wi->num_pkts = 1;
}
}
@@ -1891,7 +1794,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
- c->pdev = &priv->mdev->pdev->dev;
+ c->pdev = priv->mdev->device;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
@@ -2049,7 +1952,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
byte_count += MLX5E_METADATA_ETHER_LEN;
#endif
- if (mlx5e_rx_is_linear_skb(mdev, params)) {
+ if (mlx5e_rx_is_linear_skb(params)) {
int frag_stride;
frag_stride = mlx5e_rx_get_linear_frag_sz(params);
@@ -2103,6 +2006,13 @@ static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
return order_base_2(sz);
}
+static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
+{
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ return MLX5_GET(wq, wq, log_wq_sz);
+}
+
static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_rq_param *param)
@@ -2137,7 +2047,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(mdev->device);
}
static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
@@ -2152,7 +2062,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
- param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(mdev->device);
}
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@@ -2164,7 +2074,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
- param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
@@ -2173,10 +2083,13 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ bool allow_swp;
+ allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
+ !!MLX5_IPSEC_DEV(priv->mdev);
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
+ MLX5_SET(sqc, sqc, allow_swp, allow_swp);
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -2266,13 +2179,28 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
}
+static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
+ struct mlx5e_rq_param *rqp)
+{
+ switch (params->rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return order_base_2(MLX5E_UMR_WQEBBS) +
+ mlx5e_get_rq_log_wq_sz(rqp->rqc);
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ }
+}
+
static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
- u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ u8 icosq_log_wq_sz;
mlx5e_build_rq_param(priv, params, &cparam->rq);
+
+ icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
+
mlx5e_build_sq_param(priv, params, &cparam->sq);
mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
@@ -2328,14 +2256,18 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs)
mlx5e_activate_channel(chs->c[i]);
}
+#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
+
static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
{
int err = 0;
int i;
- for (i = 0; i < chs->num; i++)
- err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
- err ? 0 : 20000);
+ for (i = 0; i < chs->num; i++) {
+ int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
+
+ err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
+ }
return err ? -ETIMEDOUT : 0;
}
@@ -2632,7 +2564,7 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
}
@@ -2742,22 +2674,6 @@ free_in:
return err;
}
-static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types tt,
- u32 *tirc)
-{
- MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
-
- mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
-
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
- MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
-
- mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
- &tirc_default_config[tt], tirc, true);
-}
-
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
{
@@ -2807,6 +2723,21 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
return 0;
}
+void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
+{
+ struct mlx5e_params *params = &priv->channels.params;
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_mtu;
+
+ /* MTU range: 68 - hw-specific max */
+ netdev->min_mtu = ETH_MIN_MTU;
+
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
+ ETH_MAX_MTU);
+}
+
static void mlx5e_netdev_set_tcs(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -2937,6 +2868,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
return 0;
}
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channels new_channels = {};
+
+ new_channels.params = priv->channels.params;
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+}
+
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
@@ -3046,8 +2985,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
- param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
- param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(mdev->device);
+ param->wq.db_numa_node = dev_to_node(mdev->device);
return mlx5e_alloc_cq_common(mdev, param, cq);
}
@@ -3155,32 +3094,42 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
}
-static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types tt,
- u32 *tirc)
+static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
+ u32 rqtn, u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ MLX5_SET(tirc, tirc, tunneled_offload_en,
+ priv->channels.params.tunneled_offload_en);
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
+}
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types tt,
+ u32 *tirc)
+{
+ mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
&tirc_default_config[tt], tirc, false);
}
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
{
- MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
-
- mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
-
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
+static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types tt,
+ u32 *tirc)
+{
+ mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
+ mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
+ &tirc_default_config[tt], tirc, true);
+}
+
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
{
struct mlx5e_tir *tir;
@@ -3763,9 +3712,9 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params.sw_mtu = new_mtu;
if (params->xdp_prog &&
- !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ !mlx5e_rx_is_linear_skb(&new_channels.params)) {
netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
- new_mtu, MLX5E_XDP_MAX_MTU);
+ new_mtu, mlx5e_xdp_max_mtu(params));
err = -EINVAL;
goto out;
}
@@ -4103,6 +4052,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;
+
+#if IS_ENABLED(CONFIG_GENEVE)
+ /* Support Geneve offload for default UDP port */
+ if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
+ return features;
+#endif
}
out:
@@ -4161,11 +4116,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
if (!report_failed)
goto unlock;
- mlx5e_close_locked(priv->netdev);
- err = mlx5e_open_locked(priv->netdev);
+ err = mlx5e_safe_reopen_channels(priv);
if (err)
netdev_err(priv->netdev,
- "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
err);
unlock:
@@ -4199,9 +4153,10 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_channels.params = priv->channels.params;
new_channels.params.xdp_prog = prog;
- if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ if (!mlx5e_rx_is_linear_skb(&new_channels.params)) {
netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
- new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+ new_channels.params.sw_mtu,
+ mlx5e_xdp_max_mtu(&new_channels.params));
return -EINVAL;
}
@@ -4252,7 +4207,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
if (was_opened && reset)
- mlx5e_open_locked(netdev);
+ err = mlx5e_open_locked(netdev);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
@@ -4542,7 +4497,7 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
if (!slow_pci_heuristic(mdev) &&
mlx5e_striding_rq_possible(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
- !mlx5e_rx_is_linear_skb(mdev, params)))
+ !mlx5e_rx_is_linear_skb(params)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
@@ -4553,7 +4508,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
{
enum mlx5e_traffic_types tt;
- rss_params->hfunc = ETH_RSS_HASH_XOR;
+ rss_params->hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss_params->toeplitz_hash_key,
sizeof(rss_params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
@@ -4618,6 +4573,8 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
/* RSS */
mlx5e_build_rss_params(rss_params, params->num_channels);
+ params->tunneled_offload_en =
+ mlx5e_tunnel_inner_ft_supported(mdev);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -4639,7 +4596,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
bool fcs_supported;
bool fcs_enabled;
- SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+ SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
@@ -4674,7 +4631,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4682,7 +4640,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
}
- if (mlx5_vxlan_allowed(mdev->vxlan)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
@@ -4901,7 +4859,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_mtu;
mlx5e_init_l2_addr(priv);
@@ -4909,10 +4866,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (!netif_running(netdev))
mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
- /* MTU range: 68 - hw-specific max */
- netdev->min_mtu = ETH_MIN_MTU;
- mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+ mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index a66b6ed80b30..91e24f1cead8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -795,7 +795,8 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- if (!mlx5e_tc_tun_device_to_offload(priv, netdev))
+ if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
+ !is_vlan_dev(netdev))
return NOTIFY_OK;
switch (event) {
@@ -1374,6 +1375,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->num_tc = 1;
+ params->tunneled_offload_en = false;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
@@ -1389,7 +1391,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev;
if (rep->vport == MLX5_VPORT_UPLINK) {
- SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
+ SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
@@ -1623,13 +1625,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
{
- struct net_device *netdev = priv->netdev;
- struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_mtu;
-
- netdev->min_mtu = ETH_MIN_MTU;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+ mlx5e_set_netdev_mtu_boundaries(priv);
}
static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3dde5c7e0739..13133e7f088e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -409,14 +409,15 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle
mlx5e_page_release(rq, &dma_info[i], recycle);
}
-static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
+static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
{
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
- struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
- rq->mpwqe.umr_in_progress = false;
+ do {
+ u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
- mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ mlx5_wq_ll_push(wq, next_wqe_index);
+ } while (--n);
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
@@ -426,7 +427,7 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
{
- return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ return mlx5_wq_cyc_get_ctr_wrap_cnt(&sq->wq, sq->pc);
}
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
@@ -478,8 +479,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
wi->consumed_strides = 0;
- rq->mpwqe.umr_in_progress = true;
-
umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
@@ -487,7 +486,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->pc += MLX5E_UMR_WQEBBS;
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
+
+ sq->doorbell_cseg = &umr_wqe->ctrl;
return 0;
@@ -542,37 +542,13 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err;
}
-static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
- struct mlx5e_icosq *sq,
- struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
- struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
-
- mlx5_cqwq_pop(&cq->wq);
-
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
- netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
- return;
- }
-
- if (likely(icowi->opcode == MLX5_OPCODE_UMR)) {
- mlx5e_post_rx_mpwqe(rq);
- return;
- }
-
- if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
- netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
-}
-
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
+ u8 completed_umr = 0;
+ u16 sqcc;
+ int i;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
@@ -581,28 +557,96 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
if (likely(!cqe))
return;
- /* by design, there's only a single cqe */
- mlx5e_poll_ico_single_cqe(cq, sq, rq, cqe);
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ i = 0;
+ do {
+ u16 wqe_counter;
+ bool last_wqe;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
+ break;
+ }
+ do {
+ struct mlx5e_sq_wqe_info *wi;
+ u16 ci;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ wi = &sq->db.ico_wqe[ci];
+
+ if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
+ sqcc += MLX5E_UMR_WQEBBS;
+ completed_umr++;
+ } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
+ sqcc++;
+ } else {
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+ wi->opcode);
+ }
+
+ } while (!last_wqe);
+
+ } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+
+ sq->cc = sqcc;
mlx5_cqwq_update_db_record(&cq->wq);
+
+ if (likely(completed_umr)) {
+ mlx5e_post_rx_mpwqe(rq, completed_umr);
+ rq->mpwqe.umr_in_progress -= completed_umr;
+ }
}
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
+ struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
+ u8 missing, i;
+ u16 head;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
- mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq);
+ mlx5e_poll_ico_cq(&sq->cq, rq);
+
+ missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
- if (mlx5_wq_ll_is_full(wq))
+ if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
+ rq->stats->congst_umr++;
+
+#define UMR_WQE_BULK (2)
+ if (likely(missing < UMR_WQE_BULK))
return false;
- if (!rq->mpwqe.umr_in_progress)
- mlx5e_alloc_rx_mpwqe(rq, wq->head);
- else
- rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2;
+ head = rq->mpwqe.actual_wq_head;
+ i = missing;
+ do {
+ if (unlikely(mlx5e_alloc_rx_mpwqe(rq, head)))
+ break;
+ head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+ } while (--i);
+
+ rq->mpwqe.umr_last_bulk = missing - i;
+ if (sq->doorbell_cseg) {
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
+ sq->doorbell_cseg = NULL;
+ }
+
+ rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
+ rq->mpwqe.actual_wq_head = head;
return false;
}
@@ -692,7 +736,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
{
*proto = ((struct ethhdr *)skb->data)->h_proto;
*proto = __vlan_get_protocol(skb, *proto, network_depth);
- return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
+
+ if (*proto == htons(ETH_P_IP))
+ return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
+
+ if (*proto == htons(ETH_P_IPV6))
+ return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
+
+ return false;
}
static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
@@ -712,17 +763,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
rq->stats->ecn_mark += !!rc;
}
-static u32 mlx5e_get_fcs(const struct sk_buff *skb)
-{
- const void *fcs_bytes;
- u32 _fcs_bytes;
-
- fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
- ETH_FCS_LEN, &_fcs_bytes);
-
- return __get_unaligned_cpu32(fcs_bytes);
-}
-
static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
{
void *ip_p = skb->data + network_depth;
@@ -733,6 +773,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+#define MAX_PADDING 8
+
+static void
+tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
+ struct mlx5e_rq_stats *stats)
+{
+ stats->csum_complete_tail_slow++;
+ skb->csum = csum_block_add(skb->csum,
+ skb_checksum(skb, offset, len, 0),
+ offset);
+}
+
+static void
+tail_padding_csum(struct sk_buff *skb, int offset,
+ struct mlx5e_rq_stats *stats)
+{
+ u8 tail_padding[MAX_PADDING];
+ int len = skb->len - offset;
+ void *tail;
+
+ if (unlikely(len > MAX_PADDING)) {
+ tail_padding_csum_slow(skb, offset, len, stats);
+ return;
+ }
+
+ tail = skb_header_pointer(skb, offset, len, tail_padding);
+ if (unlikely(!tail)) {
+ tail_padding_csum_slow(skb, offset, len, stats);
+ return;
+ }
+
+ stats->csum_complete_tail++;
+ skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
+}
+
+static void
+mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
+ struct mlx5e_rq_stats *stats)
+{
+ struct ipv6hdr *ip6;
+ struct iphdr *ip4;
+ int pkt_len;
+
+ switch (proto) {
+ case htons(ETH_P_IP):
+ ip4 = (struct iphdr *)(skb->data + network_depth);
+ pkt_len = network_depth + ntohs(ip4->tot_len);
+ break;
+ case htons(ETH_P_IPV6):
+ ip6 = (struct ipv6hdr *)(skb->data + network_depth);
+ pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+ break;
+ default:
+ return;
+ }
+
+ if (likely(pkt_len >= skb->len))
+ return;
+
+ tail_padding_csum(skb, pkt_len, stats);
+}
+
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
@@ -752,7 +854,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+ /* True when explicitly set via priv flag, or XDP prog is loaded */
+ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
goto csum_unnecessary;
/* CQE csum doesn't cover padding octets in short ethernet
@@ -780,18 +883,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN,
skb->csum);
- if (unlikely(netdev->features & NETIF_F_RXFCS))
- skb->csum = csum_block_add(skb->csum,
- (__force __wsum)mlx5e_get_fcs(skb),
- skb->len - ETH_FCS_LEN);
+
+ mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
stats->csum_complete++;
return;
}
csum_unnecessary:
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
- ((cqe->hds_ip_ext & CQE_L4_OK) ||
- (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
+ (cqe->hds_ip_ext & CQE_L4_OK))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1a78e05cbba8..483d321d2151 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -59,10 +59,14 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
@@ -77,6 +81,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
@@ -87,7 +93,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
@@ -151,11 +156,15 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+ s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop;
s->rx_xdp_redirect += rq_stats->xdp_redirect;
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
+ s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
+ s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
@@ -166,7 +175,6 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
- s->rx_page_reuse += rq_stats->page_reuse;
s->rx_cache_reuse += rq_stats->cache_reuse;
s->rx_cache_full += rq_stats->cache_full;
s->rx_cache_empty += rq_stats->cache_empty;
@@ -181,6 +189,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->ch_eq_rearm += ch_stats->eq_rearm;
/* xdp redirect */
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
+ s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
+ s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
@@ -1190,6 +1200,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
@@ -1206,7 +1218,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
@@ -1239,6 +1250,8 @@ static const struct counter_desc sq_stats_desc[] = {
static const struct counter_desc rq_xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
@@ -1246,6 +1259,8 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = {
static const struct counter_desc xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 4640d4f986f8..cdddcc46971b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -71,10 +71,14 @@ struct mlx5e_sw_stats {
u64 rx_csum_unnecessary;
u64 rx_csum_none;
u64 rx_csum_complete;
+ u64 rx_csum_complete_tail;
+ u64 rx_csum_complete_tail_slow;
u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
u64 rx_xdp_redirect;
u64 rx_xdp_tx_xmit;
+ u64 rx_xdp_tx_mpwqe;
+ u64 rx_xdp_tx_inlnw;
u64 rx_xdp_tx_full;
u64 rx_xdp_tx_err;
u64 rx_xdp_tx_cqe;
@@ -89,6 +93,8 @@ struct mlx5e_sw_stats {
u64 tx_queue_wake;
u64 tx_cqe_err;
u64 tx_xdp_xmit;
+ u64 tx_xdp_mpwqe;
+ u64 tx_xdp_inlnw;
u64 tx_xdp_full;
u64 tx_xdp_err;
u64 tx_xdp_cqes;
@@ -99,7 +105,6 @@ struct mlx5e_sw_stats {
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
- u64 rx_page_reuse;
u64 rx_cache_reuse;
u64 rx_cache_full;
u64 rx_cache_empty;
@@ -181,6 +186,8 @@ struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
u64 csum_complete;
+ u64 csum_complete_tail;
+ u64 csum_complete_tail_slow;
u64 csum_unnecessary;
u64 csum_unnecessary_inner;
u64 csum_none;
@@ -197,7 +204,6 @@ struct mlx5e_rq_stats {
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
- u64 page_reuse;
u64 cache_reuse;
u64 cache_full;
u64 cache_empty;
@@ -237,6 +243,8 @@ struct mlx5e_sq_stats {
struct mlx5e_xdpsq_stats {
u64 xmit;
+ u64 mpwqe;
+ u64 inlnw;
u64 full;
u64 err;
/* dirtied @completion */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b4967a0ff8c7..122f457091a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -44,6 +44,7 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/arp.h>
+#include <net/ipv6_stubs.h>
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
@@ -663,7 +664,8 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
- hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
+ hp->tirn, hp->pair->rqn[0],
+ dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
hpe->hp = hp;
@@ -700,7 +702,7 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
- hpe->hp->pair->peer_mdev->priv.name);
+ dev_name(hpe->hp->pair->peer_mdev->device));
mlx5e_hairpin_destroy(hpe->hp);
hash_del(&hpe->hairpin_hlist);
@@ -1437,6 +1439,26 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return 0;
}
+static void *get_match_headers_criteria(u32 flags,
+ struct mlx5_flow_spec *spec)
+{
+ return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
+ MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ inner_headers) :
+ MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+}
+
+static void *get_match_headers_value(u32 flags,
+ struct mlx5_flow_spec *spec)
+{
+ return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
+ MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ inner_headers) :
+ MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+}
+
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
@@ -1502,10 +1524,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
/* In decap flow, header pointers should point to the inner
* headers, outer header were already set by parse_tunnel_attr
*/
- headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- inner_headers);
+ headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
+ spec);
+ headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
+ spec);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -1520,11 +1542,23 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->n_proto)
*match_level = MLX5_MATCH_L2;
}
-
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
+ is_vlan_dev(filter_dev)) {
+ struct flow_dissector_key_vlan filter_dev_mask;
+ struct flow_dissector_key_vlan filter_dev_key;
struct flow_match_vlan match;
- flow_rule_match_vlan(rule, &match);
+ if (is_vlan_dev(filter_dev)) {
+ match.key = &filter_dev_key;
+ match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
+ match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
+ match.key->vlan_priority = 0;
+ match.mask = &filter_dev_mask;
+ memset(match.mask, 0xff, sizeof(*match.mask));
+ match.mask->vlan_priority = 0;
+ } else {
+ flow_rule_match_vlan(rule, &match);
+ }
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
@@ -1827,6 +1861,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct pedit_headers {
struct ethhdr eth;
+ struct vlan_hdr vlan;
struct iphdr ip4;
struct ipv6hdr ip6;
struct tcphdr tcp;
@@ -1873,38 +1908,73 @@ struct mlx5_fields {
u8 field;
u8 size;
u32 offset;
+ u32 match_offset;
};
-#define OFFLOAD(fw_field, size, field, off) \
- {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
+#define OFFLOAD(fw_field, size, field, off, match_field) \
+ {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \
+ offsetof(struct pedit_headers, field) + (off), \
+ MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
+
+static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
+ void *matchmaskp, int size)
+{
+ bool same = false;
+
+ switch (size) {
+ case sizeof(u8):
+ same = ((*(u8 *)valp) & (*(u8 *)maskp)) ==
+ ((*(u8 *)matchvalp) & (*(u8 *)matchmaskp));
+ break;
+ case sizeof(u16):
+ same = ((*(u16 *)valp) & (*(u16 *)maskp)) ==
+ ((*(u16 *)matchvalp) & (*(u16 *)matchmaskp));
+ break;
+ case sizeof(u32):
+ same = ((*(u32 *)valp) & (*(u32 *)maskp)) ==
+ ((*(u32 *)matchvalp) & (*(u32 *)matchmaskp));
+ break;
+ }
+
+ return same;
+}
static struct mlx5_fields fields[] = {
- OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
- OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
- OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
- OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
- OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
-
- OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
- OFFLOAD(SIPV4, 4, ip4.saddr, 0),
- OFFLOAD(DIPV4, 4, ip4.daddr, 0),
-
- OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
- OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
- OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
- OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
- OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
- OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
- OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
- OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
- OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
-
- OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
- OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
- OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
-
- OFFLOAD(UDP_SPORT, 2, udp.source, 0),
- OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
+ OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16),
+ OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0),
+ OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16),
+ OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0),
+ OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype),
+ OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid),
+
+ OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit),
+ OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+
+ OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
+ OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
+ OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
+ OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
+ OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
+ OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
+ OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
+ OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
+ OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit),
+
+ OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport),
+ OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport),
+ OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags),
+
+ OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport),
+ OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport),
};
/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
@@ -1913,9 +1983,14 @@ static struct mlx5_fields fields[] = {
*/
static int offload_pedit_fields(struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
+ u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ void *headers_c = get_match_headers_criteria(*action_flags,
+ &parse_attr->spec);
+ void *headers_v = get_match_headers_value(*action_flags,
+ &parse_attr->spec);
int i, action_size, nactions, max_actions, first, last, next_z;
void *s_masks_p, *a_masks_p, *vals_p;
struct mlx5_fields *f;
@@ -1939,6 +2014,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
nactions = parse_attr->num_mod_hdr_actions;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ bool skip;
+
f = &fields[i];
/* avoid seeing bits set from previous iterations */
s_mask = 0;
@@ -1967,19 +2044,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return -EOPNOTSUPP;
}
+ skip = false;
if (s_mask) {
+ void *match_mask = headers_c + f->match_offset;
+ void *match_val = headers_v + f->match_offset;
+
cmd = MLX5_ACTION_TYPE_SET;
mask = s_mask;
vals_p = (void *)set_vals + f->offset;
+ /* don't rewrite if we have a match on the same value */
+ if (cmp_val_mask(vals_p, s_masks_p, match_val,
+ match_mask, f->size))
+ skip = true;
/* clear to denote we consumed this field */
memset(s_masks_p, 0, f->size);
} else {
+ u32 zero = 0;
+
cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask;
vals_p = (void *)add_vals + f->offset;
+ /* add 0 is no change */
+ if (!memcmp(vals_p, &zero, f->size))
+ skip = true;
/* clear to denote we consumed this field */
memset(a_masks_p, 0, f->size);
}
+ if (skip)
+ continue;
field_bsize = f->size * BITS_PER_BYTE;
@@ -2026,6 +2118,15 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return 0;
}
+static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
+ int namespace)
+{
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
+ else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+ return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
+}
+
static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
struct pedit_headers_action *hdrs,
int namespace,
@@ -2037,11 +2138,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
- max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
- else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
- max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
-
+ max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
/* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
max_actions = min(max_actions, nkeys * 16);
@@ -2074,6 +2171,12 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
goto out_err;
}
+ if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The pedit offload action is not supported");
+ goto out_err;
+ }
+
mask = act->mangle.mask;
val = act->mangle.val;
offset = act->mangle.offset;
@@ -2092,6 +2195,7 @@ out_err:
static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
+ u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *cmd_masks;
@@ -2104,7 +2208,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
goto out_err;
}
- err = offload_pedit_fields(hdrs, parse_attr, extack);
+ err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
@@ -2158,6 +2262,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
return true;
}
+struct ip_ttl_word {
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+};
+
+struct ipv6_hoplimit_word {
+ __be16 payload_len;
+ __u8 nexthdr;
+ __u8 hop_limit;
+};
+
+static bool is_action_keys_supported(const struct flow_action_entry *act)
+{
+ u32 mask, offset;
+ u8 htype;
+
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+ mask = ~act->mangle.mask;
+ /* For IPv4 & IPv6 header check 4 byte word,
+ * to determine that modified fields
+ * are NOT ttl & hop_limit only.
+ */
+ if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+ struct ip_ttl_word *ttl_word =
+ (struct ip_ttl_word *)&mask;
+
+ if (offset != offsetof(struct iphdr, ttl) ||
+ ttl_word->protocol ||
+ ttl_word->check) {
+ return true;
+ }
+ } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ struct ipv6_hoplimit_word *hoplimit_word =
+ (struct ipv6_hoplimit_word *)&mask;
+
+ if (offset != offsetof(struct ipv6hdr, payload_len) ||
+ hoplimit_word->payload_len ||
+ hoplimit_word->nexthdr) {
+ return true;
+ }
+ }
+ return false;
+}
+
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
u32 actions,
@@ -2165,16 +2315,12 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
{
const struct flow_action_entry *act;
bool modify_ip_header;
- u8 htype, ip_proto;
void *headers_v;
u16 ethertype;
+ u8 ip_proto;
int i;
- if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
- else
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
-
+ headers_v = get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
@@ -2187,9 +2333,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
act->id != FLOW_ACTION_ADD)
continue;
- htype = act->mangle.htype;
- if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
- htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ if (is_action_keys_supported(act)) {
modify_ip_header = true;
break;
}
@@ -2222,7 +2366,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
actions = flow->nic_attr->action;
if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
- !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
+ !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
+ (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)))
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -2247,6 +2392,74 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
+static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ u16 mask16 = VLAN_VID_MASK;
+ u16 val16 = act->vlan.vid & VLAN_VID_MASK;
+ const struct flow_action_entry pedit_act = {
+ .id = FLOW_ACTION_MANGLE,
+ .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
+ .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
+ .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
+ };
+ u8 match_prio_mask, match_prio_val;
+ void *headers_c, *headers_v;
+ int err;
+
+ headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
+ headers_v = get_match_headers_value(*action, &parse_attr->spec);
+
+ if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN rewrite action must have VLAN protocol match");
+ return -EOPNOTSUPP;
+ }
+
+ match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+ match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+ if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing VLAN prio is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
+ hdrs, NULL);
+ *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return err;
+}
+
+static int
+add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry prio_tag_act = {
+ .vlan.vid = 0,
+ .vlan.prio =
+ MLX5_GET(fte_match_set_lyr_2_4,
+ get_match_headers_value(*action,
+ &parse_attr->spec),
+ first_prio) &
+ MLX5_GET(fte_match_set_lyr_2_4,
+ get_match_headers_criteria(*action,
+ &parse_attr->spec),
+ first_prio),
+ };
+
+ return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
+ &prio_tag_act, parse_attr, hdrs, action,
+ extack);
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -2282,6 +2495,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ err = add_vlan_rewrite_action(priv,
+ MLX5_FLOW_NAMESPACE_KERNEL,
+ act, parse_attr, hdrs,
+ &action, extack);
+ if (err)
+ return err;
+
+ break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
act->csum_flags,
@@ -2321,16 +2543,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
}
break;
default:
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ return -EOPNOTSUPP;
}
}
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, extack);
+ parse_attr, hdrs, &action, extack);
if (err)
return err;
+ /* in case all pedit actions are skipped, remove the MOD_HDR
+ * flag.
+ */
+ if (parse_attr->num_mod_hdr_actions == 0)
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
attr->action = action;
@@ -2340,15 +2568,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return 0;
}
-static inline int cmp_encap_info(struct ip_tunnel_key *a,
- struct ip_tunnel_key *b)
+struct encap_key {
+ struct ip_tunnel_key *ip_tun_key;
+ int tunnel_type;
+};
+
+static inline int cmp_encap_info(struct encap_key *a,
+ struct encap_key *b)
{
- return memcmp(a, b, sizeof(*a));
+ return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+ a->tunnel_type != b->tunnel_type;
}
-static inline int hash_encap_info(struct ip_tunnel_key *key)
+static inline int hash_encap_info(struct encap_key *key)
{
- return jhash(key, sizeof(*key), 0);
+ return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+ key->tunnel_type);
}
@@ -2379,7 +2614,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct ip_tunnel_info *tun_info;
- struct ip_tunnel_key *key;
+ struct encap_key key, e_key;
struct mlx5e_encap_entry *e;
unsigned short family;
uintptr_t hash_key;
@@ -2389,13 +2624,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = &parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info);
- key = &tun_info->key;
+ key.ip_tun_key = &tun_info->key;
+ key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
- hash_key = hash_encap_info(key);
+ hash_key = hash_encap_info(&key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- if (!cmp_encap_info(&e->tun_info.key, key)) {
+ e_key.ip_tun_key = &e->tun_info.key;
+ e_key.tunnel_type = e->tunnel_type;
+ if (!cmp_encap_info(&e_key, &key)) {
found = true;
break;
}
@@ -2490,8 +2728,7 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
}
break;
default:
- /* action is FLOW_ACT_VLAN_MANGLE */
- return -EOPNOTSUPP;
+ return -EINVAL;
}
attr->total_vlan = vlan_idx + 1;
@@ -2499,15 +2736,60 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
return 0;
}
+static int add_vlan_push_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ struct net_device **out_dev,
+ u32 *action)
+{
+ struct net_device *vlan_dev = *out_dev;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_PUSH,
+ .vlan.vid = vlan_dev_vlan_id(vlan_dev),
+ .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
+ .vlan.prio = 0,
+ };
+ int err;
+
+ err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ if (err)
+ return err;
+
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
+ dev_get_iflink(vlan_dev));
+ if (is_vlan_dev(*out_dev))
+ err = add_vlan_push_action(priv, attr, out_dev, action);
+
+ return err;
+}
+
+static int add_vlan_pop_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action)
+{
+ int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_POP,
+ };
+ int err = 0;
+
+ while (nest_level--) {
+ err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
const struct flow_action_entry *act;
@@ -2579,6 +2861,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
uplink_upper == out_dev)
out_dev = uplink_dev;
+ if (is_vlan_dev(out_dev)) {
+ err = add_vlan_push_action(priv, attr,
+ &out_dev,
+ &action);
+ if (err)
+ return err;
+ }
+ if (is_vlan_dev(parse_attr->filter_dev)) {
+ err = add_vlan_pop_action(priv, attr,
+ &action);
+ if (err)
+ return err;
+ }
+
if (!mlx5e_eswitch_rep(out_dev))
return -EOPNOTSUPP;
@@ -2592,7 +2888,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = *info;
encap = false;
- attr->parse_attr = parse_attr;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
attr->out_count++;
@@ -2625,7 +2920,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
- err = parse_tc_vlan_action(priv, act, attr, &action);
+ if (act->id == FLOW_ACTION_VLAN_PUSH &&
+ (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
+ /* Replace vlan pop+push with vlan modify */
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = add_vlan_rewrite_action(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ act, parse_attr, hdrs,
+ &action, extack);
+ } else {
+ err = parse_tc_vlan_action(priv, act, attr, &action);
+ }
+ if (err)
+ return err;
+
+ attr->split_count = attr->out_count;
+ break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ err = add_vlan_rewrite_action(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ act, parse_attr, hdrs,
+ &action, extack);
if (err)
return err;
@@ -2651,16 +2966,39 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
}
default:
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ return -EOPNOTSUPP;
}
}
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
+ action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ /* For prio tag mode, replace vlan pop with rewrite vlan prio
+ * tag rewrite.
+ */
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
+ &action, extack);
+ if (err)
+ return err;
+ }
+
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, extack);
+ err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
+ parse_attr, hdrs, &action, extack);
if (err)
return err;
+ /* in case all pedit actions are skipped, remove the MOD_HDR
+ * flag. we might have set split_count either by pedit or
+ * pop/push. if there is no pop/push either, reset it too.
+ */
+ if (parse_attr->num_mod_hdr_actions == 0) {
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+ (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
+ attr->split_count = 0;
+ }
}
attr->action = action;
@@ -2829,7 +3167,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack);
+ err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
@@ -3026,6 +3364,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
netdev_warn_once(priv->netdev,
"flow cookie %lx already exists, ignoring\n",
f->cookie);
+ err = -EEXIST;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 25a8f8260c14..7b61126fcec9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
#include "ipoib/ipoib.h"
@@ -110,11 +111,10 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
+ int channel_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev);
- int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;
@@ -163,7 +163,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
case MLX5_INLINE_MODE_NONE:
return 0;
case MLX5_INLINE_MODE_TCP_UDP:
- hlen = eth_get_headlen(skb->data, skb_headlen(skb));
+ hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
hlen += VLAN_HLEN;
break;
@@ -297,7 +297,8 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
- struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
+ struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
+ bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -320,14 +321,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->stopped++;
}
- if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+ if (!xmit_more || netif_xmit_stopped(sq->txq))
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi)
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
@@ -360,7 +361,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
stats->bytes += num_bytes;
- stats->xmit_more += skb->xmit_more;
+ stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
@@ -392,6 +393,10 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg = &wqe->eth;
dseg = wqe->data;
+#if IS_ENABLED(CONFIG_GENEVE)
+ if (skb->encapsulation)
+ mlx5e_tx_tunnel_accel(skb, eseg);
+#endif
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
eseg->mss = mss;
@@ -419,7 +424,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg);
+ num_dma, wi, cseg, xmit_more);
return NETDEV_TX_OK;
@@ -445,7 +450,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!skb))
return NETDEV_TX_OK;
- return mlx5e_sq_xmit(sq, skb, wqe, pi);
+ return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
}
static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
@@ -655,7 +660,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
stats->bytes += num_bytes;
- stats->xmit_more += skb->xmit_more;
+ stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
@@ -700,7 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg);
+ num_dma, wi, cseg, false);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index b4af5e19f6ac..f9862bf75491 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -71,6 +71,17 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
net_dim(&rq->dim, dim_sample);
}
+void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *nopwqe;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index bb6e5b5d9681..23883d1fa22f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -291,6 +291,9 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
mlx5_fill_page_array(&eq->buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
+ if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx))
+ MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
+
MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
@@ -504,8 +507,7 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_VPORT_MANAGER(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
- if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
- MLX5_CAP_GEN(dev, general_notification_event))
+ if (MLX5_CAP_GEN(dev, general_notification_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
if (MLX5_CAP_GEN(dev, port_module_event))
@@ -707,7 +709,7 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
__raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
- mb();
+ wmb();
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
@@ -900,14 +902,12 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
}
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
+#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{
-#ifdef CONFIG_RFS_ACCEL
return dev->priv.eq_table->rmap;
-#else
- return NULL;
-#endif
}
+#endif
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ecd2c747f726..9ea0ccfe5ef5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -72,25 +72,22 @@ static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
-/* The vport getter/iterator are only valid after esw->total_vports
- * and vport->vport are initialized in mlx5_eswitch_init.
- */
-#define mlx5_esw_for_all_vports(esw, i, vport) \
- for ((i) = MLX5_VPORT_PF; \
- (vport) = &(esw)->vports[i], \
- (i) < (esw)->total_vports; (i)++)
+struct mlx5_vport *__must_check
+mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ u16 idx;
-#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
- for ((i) = MLX5_VPORT_FIRST_VF; \
- (vport) = &(esw)->vports[i], \
- (i) <= (nvfs); (i)++)
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ return ERR_PTR(-EPERM);
-static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw,
- u16 vport_num)
-{
- u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
+ idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
+
+ if (idx > esw->total_vports - 1) {
+ esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
+ vport_num, idx);
+ return ERR_PTR(-EINVAL);
+ }
- WARN_ON(idx > esw->total_vports - 1);
return &esw->vports[idx];
}
@@ -105,8 +102,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -134,8 +130,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
@@ -431,6 +426,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
{
int err;
+ memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+
err = esw_create_legacy_vepa_table(esw);
if (err)
return err;
@@ -644,9 +641,8 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
- u16 vport_num, int list_type)
+ struct mlx5_vport *vport, int list_type)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
vport_addr_action vport_addr_add;
vport_addr_action vport_addr_del;
@@ -679,9 +675,8 @@ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
- u16 vport_num, int list_type)
+ struct mlx5_vport *vport, int list_type)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
u8 (*mac_list)[ETH_ALEN];
struct l2addr_node *node;
@@ -710,12 +705,12 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
if (!vport->enabled)
goto out;
- err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
+ err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
mac_list, &size);
if (err)
goto out;
esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
- vport_num, is_uc ? "UC" : "MC", size);
+ vport->vport, is_uc ? "UC" : "MC", size);
for (i = 0; i < size; i++) {
if (is_uc && !is_valid_ether_addr(mac_list[i]))
@@ -753,10 +748,10 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
if (!addr) {
esw_warn(esw->dev,
"Failed to add MAC(%pM) to vport[%d] DB\n",
- mac_list[i], vport_num);
+ mac_list[i], vport->vport);
continue;
}
- addr->vport = vport_num;
+ addr->vport = vport->vport;
addr->action = MLX5_ACTION_ADD;
}
out:
@@ -766,9 +761,9 @@ out:
/* Sync vport UC/MC list from vport context
* Must be called after esw_update_vport_addr_list
*/
-static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
struct l2addr_node *node;
struct vport_addr *addr;
struct hlist_head *hash;
@@ -791,20 +786,20 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
if (!addr) {
esw_warn(esw->dev,
"Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
- mac, vport_num);
+ mac, vport->vport);
continue;
}
- addr->vport = vport_num;
+ addr->vport = vport->vport;
addr->action = MLX5_ACTION_ADD;
addr->mc_promisc = true;
}
}
/* Apply vport rx mode to HW FDB table */
-static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
bool promisc, bool mc_promisc)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
@@ -812,7 +807,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
if (mc_promisc) {
vport->allmulti_rule =
- esw_fdb_set_vport_allmulti_rule(esw, vport_num);
+ esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
if (!allmulti_addr->uplink_rule)
allmulti_addr->uplink_rule =
esw_fdb_set_vport_allmulti_rule(esw,
@@ -835,8 +830,8 @@ promisc:
return;
if (promisc) {
- vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
- vport_num);
+ vport->promisc_rule =
+ esw_fdb_set_vport_promisc_rule(esw, vport->vport);
} else if (vport->promisc_rule) {
mlx5_del_flow_rules(vport->promisc_rule);
vport->promisc_rule = NULL;
@@ -844,23 +839,23 @@ promisc:
}
/* Sync vport rx mode from vport context */
-static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int promisc_all = 0;
int promisc_uc = 0;
int promisc_mc = 0;
int err;
err = mlx5_query_nic_vport_promisc(esw->dev,
- vport_num,
+ vport->vport,
&promisc_uc,
&promisc_mc,
&promisc_all);
if (err)
return;
esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
- vport_num, promisc_all, promisc_mc);
+ vport->vport, promisc_all, promisc_mc);
if (!vport->info.trusted || !vport->enabled) {
promisc_uc = 0;
@@ -868,7 +863,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
promisc_all = 0;
}
- esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
+ esw_apply_vport_rx_mode(esw, vport, promisc_all,
(promisc_all || promisc_mc));
}
@@ -883,27 +878,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
vport->vport, mac);
if (vport->enabled_events & UC_ADDR_CHANGE) {
- esw_update_vport_addr_list(esw, vport->vport,
- MLX5_NVPRT_LIST_TYPE_UC);
- esw_apply_vport_addr_list(esw, vport->vport,
- MLX5_NVPRT_LIST_TYPE_UC);
+ esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
+ esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
}
- if (vport->enabled_events & MC_ADDR_CHANGE) {
- esw_update_vport_addr_list(esw, vport->vport,
- MLX5_NVPRT_LIST_TYPE_MC);
- }
+ if (vport->enabled_events & MC_ADDR_CHANGE)
+ esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
if (vport->enabled_events & PROMISC_CHANGE) {
- esw_update_vport_rx_mode(esw, vport->vport);
+ esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
- esw_update_vport_mc_promisc(esw, vport->vport);
+ esw_update_vport_mc_promisc(esw, vport);
}
- if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
- esw_apply_vport_addr_list(esw, vport->vport,
- MLX5_NVPRT_LIST_TYPE_MC);
- }
+ if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
+ esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
if (vport->enabled)
@@ -922,8 +911,8 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
-static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *vlan_grp = NULL;
@@ -1006,8 +995,8 @@ out:
return err;
}
-static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
mlx5_del_flow_rules(vport->egress.allowed_vlan);
@@ -1019,8 +1008,8 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
vport->egress.drop_rule = NULL;
}
-static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
if (IS_ERR_OR_NULL(vport->egress.acl))
return;
@@ -1036,8 +1025,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -1168,8 +1157,8 @@ out:
return err;
}
-static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
mlx5_del_flow_rules(vport->ingress.drop_rule);
@@ -1181,8 +1170,8 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
vport->ingress.allow_rule = NULL;
}
-static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
if (IS_ERR_OR_NULL(vport->ingress.acl))
return;
@@ -1420,10 +1409,10 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
esw->qos.enabled = false;
}
-static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
+static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
u32 initial_max_rate, u32 initial_bw_share)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
@@ -1440,7 +1429,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
- MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
+ MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
@@ -1453,7 +1442,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
&vport->qos.esw_tsar_ix);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
- vport_num, err);
+ vport->vport, err);
return err;
}
@@ -1461,10 +1450,10 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
return 0;
}
-static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
+static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
- int err = 0;
+ int err;
if (!vport->qos.enabled)
return;
@@ -1474,15 +1463,15 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
vport->qos.esw_tsar_ix);
if (err)
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
- vport_num, err);
+ vport->vport, err);
vport->qos.enabled = false;
}
-static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
+static int esw_vport_qos_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
u32 max_rate, u32 bw_share)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
@@ -1499,7 +1488,7 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
- MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
+ MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
@@ -1515,7 +1504,7 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
bitmask);
if (err) {
esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
- vport_num, err);
+ vport->vport, err);
return err;
}
@@ -1618,7 +1607,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_apply_vport_conf(esw, vport);
/* Attach vport to the eswitch rate limiter */
- if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
+ if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
vport->qos.bw_share))
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
@@ -1663,7 +1652,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
- esw_vport_disable_qos(esw, vport_num);
+ esw_vport_disable_qos(esw, vport);
if (esw->manager_vport != vport_num &&
esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
@@ -1688,6 +1677,9 @@ static int eswitch_vport_event(struct notifier_block *nb,
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return NOTIFY_OK;
+
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
@@ -1922,22 +1914,19 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
}
/* Vport Administration */
-#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
-
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
u64 node_guid;
int err = 0;
- if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
- return -EPERM;
- if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
+ if (is_multicast_ether_addr(mac))
return -EINVAL;
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
if (evport->info.spoofchk && !is_valid_ether_addr(mac))
mlx5_core_warn(esw->dev,
@@ -1972,16 +1961,15 @@ unlock:
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
err = mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
@@ -2003,14 +1991,10 @@ unlock:
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
- if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
- return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
-
- evport = &esw->vports[vport];
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
@@ -2032,16 +2016,17 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
+ if (vlan > 4095 || qos > 7)
return -EINVAL;
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
@@ -2075,17 +2060,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
bool pschk;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
pschk = evport->info.spoofchk;
evport->info.spoofchk = spoofchk;
if (pschk && !is_valid_ether_addr(evport->info.mac))
@@ -2157,6 +2141,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
/* Star rule to forward all traffic to uplink vport */
memset(spec, 0, sizeof(*spec));
+ memset(&dest, 0, sizeof(dest));
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = MLX5_VPORT_UPLINK;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@@ -2225,15 +2210,14 @@ out:
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport, bool setting)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
evport->info.trusted = setting;
if (evport->enabled)
esw_vport_change_handle_locked(evport);
@@ -2283,7 +2267,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
if (bw_share == evport->qos.bw_share)
continue;
- err = esw_vport_qos_config(esw, evport->vport, vport_max_rate,
+ err = esw_vport_qos_config(esw, evport, vport_max_rate,
bw_share);
if (!err)
evport->qos.bw_share = bw_share;
@@ -2297,7 +2281,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
u32 max_rate, u32 min_rate)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
u32 fw_max_bw_share;
u32 previous_min_rate;
u32 divider;
@@ -2307,8 +2291,8 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
@@ -2319,7 +2303,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
return -EOPNOTSUPP;
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
if (min_rate == evport->info.min_rate)
goto set_max_rate;
@@ -2337,7 +2320,7 @@ set_max_rate:
if (max_rate == evport->info.max_rate)
goto unlock;
- err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
+ err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
if (!err)
evport->info.max_rate = max_rate;
@@ -2347,11 +2330,10 @@ unlock:
}
static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
- int vport_idx,
+ struct mlx5_vport *vport,
struct mlx5_vport_drop_stats *stats)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_vport *vport = &esw->vports[vport_idx];
u64 rx_discard_vport_down, tx_discard_vport_down;
u64 bytes = 0;
int err = 0;
@@ -2371,7 +2353,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
return 0;
- err = mlx5_query_vport_down_stats(dev, vport_idx, 1,
+ err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
&rx_discard_vport_down,
&tx_discard_vport_down);
if (err)
@@ -2386,19 +2368,18 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
- int vport,
+ int vport_num,
struct ifla_vf_stats *vf_stats)
{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
- if (!ESW_ALLOWED(esw))
- return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
@@ -2407,7 +2388,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
- MLX5_SET(query_vport_counter_in, in, vport_number, vport);
+ MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
memset(out, 0, outlen);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3f3cd32ae60a..ed3fad689ec9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -227,6 +227,18 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
int total_nvports);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -376,11 +388,11 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
-#define esw_info(dev, format, ...) \
- pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+#define esw_info(__dev, format, ...) \
+ dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
-#define esw_warn(dev, format, ...) \
- pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+#define esw_warn(__dev, format, ...) \
+ dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
#define esw_debug(dev, format, ...) \
mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
@@ -431,6 +443,54 @@ static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
return index;
}
+/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
+void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
+
+/* The vport getter/iterator are only valid after esw->total_vports
+ * and vport->vport are initialized in mlx5_eswitch_init.
+ */
+#define mlx5_esw_for_all_vports(esw, i, vport) \
+ for ((i) = MLX5_VPORT_PF; \
+ (vport) = &(esw)->vports[i], \
+ (i) < (esw)->total_vports; (i)++)
+
+#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
+ for ((i) = MLX5_VPORT_FIRST_VF; \
+ (vport) = &(esw)->vports[(i)], \
+ (i) <= (nvfs); (i)++)
+
+#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
+ for ((i) = (nvfs); \
+ (vport) = &(esw)->vports[(i)], \
+ (i) >= MLX5_VPORT_FIRST_VF; (i)--)
+
+/* The rep getter/iterator are only valid after esw->total_vports
+ * and vport->vport are initialized in mlx5_eswitch_init.
+ */
+#define mlx5_esw_for_all_reps(esw, i, rep) \
+ for ((i) = MLX5_VPORT_PF; \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) < (esw)->total_vports; (i)++)
+
+#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
+ for ((i) = MLX5_VPORT_FIRST_VF; \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) <= (nvfs); (i)++)
+
+#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
+ for ((i) = (nvfs); \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) >= MLX5_VPORT_FIRST_VF; (i)--)
+
+#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
+ for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
+
+#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
+ for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
+
+struct mlx5_vport *__must_check
+mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f2260391be5b..e09ae27485ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -37,17 +37,13 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "rdma.h"
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
#include "ecpf.h"
#include "lib/eq.h"
-enum {
- FDB_FAST_PATH = 0,
- FDB_SLOW_PATH
-};
-
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
@@ -58,32 +54,6 @@ enum {
#define UPLINK_REP_INDEX 0
-/* The rep getter/iterator are only valid after esw->total_vports
- * and vport->vport are initialized in mlx5_eswitch_init.
- */
-#define mlx5_esw_for_all_reps(esw, i, rep) \
- for ((i) = MLX5_VPORT_PF; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) < (esw)->total_vports; (i)++)
-
-#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
- for ((i) = MLX5_VPORT_FIRST_VF; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) <= (nvfs); (i)++)
-
-#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
- for ((i) = (nvfs); \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) >= MLX5_VPORT_FIRST_VF; (i)--)
-
-#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \
- for ((vport) = MLX5_VPORT_FIRST_VF; \
- (vport) <= (nvfs); (vport)++)
-
-#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \
- for ((vport) = (nvfs); \
- (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
-
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
@@ -363,7 +333,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
- if (rep->rep_if[REP_ETH].state != REP_LOADED)
+ if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -663,7 +633,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
}
- mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) {
+ mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
MLX5_SET(fte_match_set_misc, misc, source_port, i);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
@@ -681,7 +651,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
add_vf_flow_err:
nvports = --i;
- mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports)
+ mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
mlx5_del_flow_rules(flows[i]);
if (mlx5_ecpf_vport_exists(esw->dev))
@@ -704,7 +674,8 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
flows = esw->fdb_table.offloads.peer_miss_rules;
- mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev))
+ mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
+ mlx5_core_max_vfs(esw->dev))
mlx5_del_flow_rules(flows[i]);
if (mlx5_ecpf_vport_exists(esw->dev))
@@ -1287,13 +1258,13 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
- int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+ int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_eswitch_rep *rep;
u8 hw_id[ETH_ALEN], rep_type;
int vport;
- esw->offloads.vport_reps = kcalloc(total_vfs,
+ esw->offloads.vport_reps = kcalloc(total_vports,
sizeof(struct mlx5_eswitch_rep),
GFP_KERNEL);
if (!esw->offloads.vport_reps)
@@ -1306,7 +1277,8 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- rep->rep_if[rep_type].state = REP_UNREGISTERED;
+ atomic_set(&rep->rep_if[rep_type].state,
+ REP_UNREGISTERED);
}
return 0;
@@ -1315,11 +1287,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
- if (rep->rep_if[rep_type].state != REP_LOADED)
- return;
-
- rep->rep_if[rep_type].unload(rep);
- rep->rep_if[rep_type].state = REP_REGISTERED;
+ if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
+ REP_LOADED, REP_REGISTERED) == REP_LOADED)
+ rep->rep_if[rep_type].unload(rep);
}
static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
@@ -1380,16 +1350,15 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
{
int err = 0;
- if (rep->rep_if[rep_type].state != REP_REGISTERED)
- return 0;
-
- err = rep->rep_if[rep_type].load(esw->dev, rep);
- if (err)
- return err;
-
- rep->rep_if[rep_type].state = REP_LOADED;
+ if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
+ REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
+ err = rep->rep_if[rep_type].load(esw->dev, rep);
+ if (err)
+ atomic_set(&rep->rep_if[rep_type].state,
+ REP_REGISTERED);
+ }
- return 0;
+ return err;
}
static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
@@ -1523,8 +1492,6 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
return 0;
}
-void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
-
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
{
mlx5e_tc_clean_fdb_peer_flows(esw);
@@ -1607,12 +1574,182 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
+static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) Untagged packets - push prio tag VLAN, allow
+ * Unmatched traffic is allowed by default
+ */
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ err = esw_vport_enable_ingress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable prio tag ingress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules\n", vport->vport);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto out_no_mem;
+ }
+
+ /* Untagged packets - push prio tag VLAN, allow */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ flow_act.vlan[0].ethtype = ETH_P_8021Q;
+ flow_act.vlan[0].vid = 0;
+ flow_act.vlan[0].prio = 0;
+ vport->ingress.allow_rule =
+ mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress untagged allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ goto out;
+ }
+
+out:
+ kvfree(spec);
+out_no_mem:
+ if (err)
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ return err;
+}
+
+static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) prio tag packets - pop the prio tag VLAN, allow
+ * Unmatched traffic is allowed by default
+ */
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable egress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
+
+ esw_debug(esw->dev,
+ "vport[%d] configure prio tag egress rules\n", vport->vport);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto out_no_mem;
+ }
+
+ /* prio tag vlan rule - pop it so VF receives untagged packets */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ goto out;
+ }
+
+out:
+ kvfree(spec);
+out_no_mem:
+ if (err)
+ esw_vport_cleanup_egress_rules(esw, vport);
+ return err;
+}
+
+static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
+{
+ struct mlx5_vport *vport = NULL;
+ int i, j;
+ int err;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, nvports) {
+ err = esw_vport_ingress_prio_tag_config(esw, vport);
+ if (err)
+ goto err_ingress;
+ err = esw_vport_egress_prio_tag_config(esw, vport);
+ if (err)
+ goto err_egress;
+ }
+
+ return 0;
+
+err_egress:
+ esw_vport_disable_ingress_acl(esw, vport);
+err_ingress:
+ mlx5_esw_for_each_vf_vport_reverse(esw, j, vport, i - 1) {
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_disable_ingress_acl(esw, vport);
+ }
+
+ return err;
+}
+
+static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->nvports) {
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_disable_ingress_acl(esw, vport);
+ }
+}
+
static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
+ memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
+ err = esw_prio_tag_acls_config(esw, nvports);
+ if (err)
+ return err;
+ }
+
err = esw_create_offloads_fdb_tables(esw, nvports);
if (err)
return err;
@@ -1641,6 +1778,8 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ esw_prio_tag_acls_cleanup(esw);
}
static void esw_host_params_event_handler(struct work_struct *work)
@@ -1699,8 +1838,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
{
int err;
- mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
-
err = esw_offloads_steering_init(esw, total_nvports);
if (err)
return err;
@@ -1718,6 +1855,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
esw->host_info.num_vfs = vf_nvports;
}
+ mlx5_rdma_enable_roce(esw->dev);
+
return 0;
err_reps:
@@ -1756,6 +1895,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
num_vfs = esw->dev->priv.sriov.num_vfs;
}
+ mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw, num_vfs);
esw_offloads_steering_cleanup(esw);
@@ -2075,7 +2215,7 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
rep_if->get_proto_dev = __rep_if->get_proto_dev;
rep_if->priv = __rep_if->priv;
- rep_if->state = REP_REGISTERED;
+ atomic_set(&rep_if->state, REP_REGISTERED);
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
@@ -2090,7 +2230,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
__unload_reps_all_vport(esw, max_vf, rep_type);
mlx5_esw_for_all_reps(esw, i, rep)
- rep->rep_if[rep_type].state = REP_UNREGISTERED;
+ atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
}
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
@@ -2110,7 +2250,7 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
rep = mlx5_eswitch_get_rep(esw, vport);
- if (rep->rep_if[rep_type].state == REP_LOADED &&
+ if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
rep->rep_if[rep_type].get_proto_dev)
return rep->rep_if[rep_type].get_proto_dev(rep);
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 5d5864e8df3c..a81e8d2168d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -21,6 +21,7 @@ struct mlx5_event_nb {
static int any_notifier(struct notifier_block *, unsigned long, void *);
static int temp_warn(struct notifier_block *, unsigned long, void *);
static int port_module(struct notifier_block *, unsigned long, void *);
+static int pcie_core(struct notifier_block *, unsigned long, void *);
/* handler which forwards the event to events->nh, driver notifiers */
static int forward_event(struct notifier_block *, unsigned long, void *);
@@ -30,6 +31,7 @@ static struct mlx5_nb events_nbs_ref[] = {
{.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY },
{.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT },
{.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT },
+ {.nb.notifier_call = pcie_core, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
/* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
@@ -51,11 +53,14 @@ static struct mlx5_nb events_nbs_ref[] = {
struct mlx5_events {
struct mlx5_core_dev *dev;
+ struct workqueue_struct *wq;
struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)];
/* driver notifier chain */
struct atomic_notifier_head nh;
/* port module events stats */
struct mlx5_pme_stats pme_stats;
+ /*pcie_core*/
+ struct work_struct pcie_core_work;
};
static const char *eqe_type_str(u8 type)
@@ -249,6 +254,69 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
return NOTIFY_OK;
}
+enum {
+ MLX5_PCI_POWER_COULD_NOT_BE_READ = 0x0,
+ MLX5_PCI_POWER_SUFFICIENT_REPORTED = 0x1,
+ MLX5_PCI_POWER_INSUFFICIENT_REPORTED = 0x2,
+};
+
+static void mlx5_pcie_event(struct work_struct *work)
+{
+ u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {0};
+ struct mlx5_events *events;
+ struct mlx5_core_dev *dev;
+ u8 power_status;
+ u16 pci_power;
+
+ events = container_of(work, struct mlx5_events, pcie_core_work);
+ dev = events->dev;
+
+ if (!MLX5_CAP_MCAM_FEATURE(dev, pci_status_and_power))
+ return;
+
+ mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MPEIN, 0, 0);
+ power_status = MLX5_GET(mpein_reg, out, pwr_status);
+ pci_power = MLX5_GET(mpein_reg, out, pci_power);
+
+ switch (power_status) {
+ case MLX5_PCI_POWER_COULD_NOT_BE_READ:
+ mlx5_core_info_rl(dev,
+ "PCIe slot power capability was not advertised.\n");
+ break;
+ case MLX5_PCI_POWER_INSUFFICIENT_REPORTED:
+ mlx5_core_warn_rl(dev,
+ "Detected insufficient power on the PCIe slot (%uW).\n",
+ pci_power);
+ break;
+ case MLX5_PCI_POWER_SUFFICIENT_REPORTED:
+ mlx5_core_info_rl(dev,
+ "PCIe slot advertised sufficient power (%uW).\n",
+ pci_power);
+ break;
+ }
+}
+
+static int pcie_core(struct notifier_block *nb, unsigned long type, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb,
+ struct mlx5_event_nb,
+ nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+
+ switch (eqe->sub_type) {
+ case MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT:
+ queue_work(events->wq, &events->pcie_core_work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats)
{
*stats = dev->priv.events->pme_stats;
@@ -277,11 +345,17 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
events->dev = dev;
dev->priv.events = events;
+ events->wq = create_singlethread_workqueue("mlx5_events");
+ if (!events->wq)
+ return -ENOMEM;
+ INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
+
return 0;
}
void mlx5_events_cleanup(struct mlx5_core_dev *dev)
{
+ destroy_workqueue(dev->priv.events->wq);
kvfree(dev->priv.events);
}
@@ -304,6 +378,7 @@ void mlx5_events_stop(struct mlx5_core_dev *dev)
for (i = ARRAY_SIZE(events_nbs_ref) - 1; i >= 0 ; i--)
mlx5_eq_notifier_unregister(dev, &events->notifiers[i].nb);
+ flush_workqueue(events->wq);
}
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 873541ef4c1b..ca2296a2f9ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -135,7 +135,7 @@ static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
*conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
/* Make sure that doorbell record is visible before ringing */
wmb();
- mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
+ mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET);
}
static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 7e2e871dbf83..52c9dee91ea4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -37,6 +37,7 @@
#include <linux/mlx5/eq.h>
+#include "mlx5_core.h"
#include "lib/eq.h"
#include "fpga/cmd.h"
@@ -62,26 +63,26 @@ struct mlx5_fpga_device {
};
#define mlx5_fpga_dbg(__adev, format, ...) \
- dev_dbg(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, ##__VA_ARGS__)
+ mlx5_core_dbg((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
#define mlx5_fpga_err(__adev, format, ...) \
- dev_err(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, ##__VA_ARGS__)
+ mlx5_core_err((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
#define mlx5_fpga_warn(__adev, format, ...) \
- dev_warn(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, ##__VA_ARGS__)
+ mlx5_core_warn((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
#define mlx5_fpga_warn_ratelimited(__adev, format, ...) \
- dev_warn_ratelimited(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d: " \
- format, __func__, __LINE__, ##__VA_ARGS__)
+ mlx5_core_err_rl((__adev)->mdev, "FPGA: %s:%d: " \
+ format, __func__, __LINE__, ##__VA_ARGS__)
#define mlx5_fpga_notice(__adev, format, ...) \
- dev_notice(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+ mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
#define mlx5_fpga_info(__adev, format, ...) \
- dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+ mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
int mlx5_fpga_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 5a22c5874f3b..52c47d3dd5a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -989,32 +989,33 @@ static enum fs_flow_table_type egress_to_fs_ft(bool egress)
return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
}
-static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
+static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id,
+ struct mlx5_flow_group *fg,
bool is_egress)
{
- int (*create_flow_group)(struct mlx5_core_dev *dev,
+ int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 *in,
- unsigned int *group_id) =
+ struct mlx5_flow_group *fg) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
match_criteria.misc_parameters);
+ struct mlx5_core_dev *dev = ns->dev;
u32 saved_outer_esp_spi_mask;
u8 match_criteria_enable;
int ret;
if (MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- return create_flow_group(dev, ft, in, group_id);
+ return create_flow_group(ns, ft, in, fg);
match_criteria_enable =
MLX5_GET(create_flow_group_in, in, match_criteria_enable);
saved_outer_esp_spi_mask =
MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
if (!match_criteria_enable || !saved_outer_esp_spi_mask)
- return create_flow_group(dev, ft, in, group_id);
+ return create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
@@ -1023,7 +1024,7 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_group_in, in, match_criteria_enable,
match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
- ret = create_flow_group(dev, ft, in, group_id);
+ ret = create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
@@ -1031,17 +1032,18 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
return ret;
}
-static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
+static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte,
bool is_egress)
{
- int (*create_fte)(struct mlx5_core_dev *dev,
+ int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
+ struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
@@ -1053,7 +1055,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return create_fte(dev, ft, fg, fte);
+ return create_fte(ns, ft, fg, fte);
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
@@ -1070,7 +1072,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
WARN_ON(rule_insert(fipsec, rule));
modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = create_fte(dev, ft, fg, fte);
+ ret = create_fte(ns, ft, fg, fte);
restore_spec_mailbox(fte, &mbox_mod);
if (ret) {
_rule_delete(fipsec, rule);
@@ -1081,19 +1083,20 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
return ret;
}
-static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
+static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte,
bool is_egress)
{
- int (*update_fte)(struct mlx5_core_dev *dev,
+ int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
+ struct mlx5_core_dev *dev = ns->dev;
bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod;
int ret;
@@ -1102,24 +1105,25 @@ static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return update_fte(dev, ft, group_id, modify_mask, fte);
+ return update_fte(ns, ft, fg, modify_mask, fte);
modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = update_fte(dev, ft, group_id, modify_mask, fte);
+ ret = update_fte(ns, ft, fg, modify_mask, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
-static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
+static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte,
bool is_egress)
{
- int (*delete_fte)(struct mlx5_core_dev *dev,
+ int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
+ struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
@@ -1131,7 +1135,7 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return delete_fte(dev, ft, fte);
+ return delete_fte(ns, ft, fte);
rule = rule_search(fipsec, fte);
if (!rule)
@@ -1141,84 +1145,84 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
rule_delete(fipsec, rule);
modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = delete_fte(dev, ft, fte);
+ ret = delete_fte(ns, ft, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
static int
-mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id)
+ struct mlx5_flow_group *fg)
{
- return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
+ return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
}
static int
-mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
+ return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
}
static int
-mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
+ return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
true);
}
static int
-mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
+ return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
}
static int
-mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id)
+ struct mlx5_flow_group *fg)
{
- return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
+ return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
}
static int
-mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
+ return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
}
static int
-mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
+ return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
false);
}
static int
-mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
+ return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
}
static struct mlx5_flow_cmds fpga_ipsec_ingress;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 5cf5f2a9d51f..22a2ef111514 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
return ret;
}
-static void mlx5_fpga_tls_release_swid(struct idr *idr,
- spinlock_t *idr_spinlock, u32 swid)
+static void *mlx5_fpga_tls_release_swid(struct idr *idr,
+ spinlock_t *idr_spinlock, u32 swid)
{
unsigned long flags;
+ void *ptr;
spin_lock_irqsave(idr_spinlock, flags);
- idr_remove(idr, swid);
+ ptr = idr_remove(idr, swid);
spin_unlock_irqrestore(idr_spinlock, flags);
+ return ptr;
}
static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
kfree(buf);
}
-struct mlx5_teardown_stream_context {
- struct mlx5_fpga_tls_command_context cmd;
- u32 swid;
-};
-
static void
mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_tls_command_context *cmd,
struct mlx5_fpga_dma_buf *resp)
{
- struct mlx5_teardown_stream_context *ctx =
- container_of(cmd, struct mlx5_teardown_stream_context, cmd);
-
if (resp) {
u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d",
syndrome);
- else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
- mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
- &fdev->tls->tx_idr_spinlock,
- ctx->swid);
- else
- mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
- &fdev->tls->rx_idr_spinlock,
- ctx->swid);
}
mlx5_fpga_tls_put_command_ctx(cmd);
}
@@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
rcu_read_lock();
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
- rcu_read_unlock();
+ if (unlikely(!flow)) {
+ rcu_read_unlock();
+ WARN_ONCE(1, "Received NULL pointer for handle\n");
+ kfree(buf);
+ return -EINVAL;
+ }
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+ rcu_read_unlock();
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
@@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
buf->complete = mlx_tls_kfree_complete;
ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+ if (ret < 0)
+ kfree(buf);
return ret;
}
@@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags)
{
- struct mlx5_teardown_stream_context *ctx;
+ struct mlx5_fpga_tls_command_context *ctx;
struct mlx5_fpga_dma_buf *buf;
void *cmd;
@@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
if (!ctx)
return;
- buf = &ctx->cmd.buf;
+ buf = &ctx->buf;
cmd = (ctx + 1);
MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
MLX5_SET(tls_cmd, cmd, swid, swid);
@@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
buf->sg[0].data = cmd;
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- ctx->swid = swid;
- mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+ mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
mlx5_fpga_tls_teardown_completion);
}
@@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow;
- rcu_read_lock();
if (direction_sx)
- flow = idr_find(&tls->tx_idr, swid);
+ flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock,
+ swid);
else
- flow = idr_find(&tls->rx_idr, swid);
-
- rcu_read_unlock();
+ flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock,
+ swid);
if (!flow) {
mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
@@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
return;
}
+ synchronize_rcu(); /* before kfree(flow) */
mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c44ccb67c4a3..013b1ca4a791 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -39,7 +39,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
-static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect)
@@ -47,47 +47,43 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
return 0;
}
-static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_flow_table_op_mod op_mod,
- enum fs_flow_table_type type,
- unsigned int level,
+static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
unsigned int log_size,
- struct mlx5_flow_table *next_ft,
- unsigned int *table_id, u32 flags)
+ struct mlx5_flow_table *next_ft)
{
return 0;
}
-static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
return 0;
}
-static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
return 0;
}
-static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id)
+ struct mlx5_flow_group *fg)
{
return 0;
}
-static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id)
+ struct mlx5_flow_group *fg)
{
return 0;
}
-static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
struct fs_fte *fte)
@@ -95,28 +91,29 @@ static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
return 0;
}
-static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *group,
int modify_mask,
struct fs_fte *fte)
{
return -EOPNOTSUPP;
}
-static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return 0;
}
-static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
+static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
underlay_qpn == 0)
@@ -143,29 +140,26 @@ static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_flow_table_op_mod op_mod,
- enum fs_flow_table_type type,
- unsigned int level,
+static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
unsigned int log_size,
- struct mlx5_flow_table *next_ft,
- unsigned int *table_id, u32 flags)
+ struct mlx5_flow_table *next_ft)
{
- int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
- int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+ int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
+ int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
int err;
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
- MLX5_SET(create_flow_table_in, in, table_type, type);
- MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
+ MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
- if (vport) {
- MLX5_SET(create_flow_table_in, in, vport_number, vport);
+ if (ft->vport) {
+ MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
@@ -174,13 +168,18 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap);
- switch (op_mod) {
+ switch (ft->op_mod) {
case FS_FT_OP_MOD_NORMAL:
if (next_ft) {
MLX5_SET(create_flow_table_in, in,
- flow_table_context.table_miss_action, 1);
+ flow_table_context.table_miss_action,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD);
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_id, next_ft->id);
+ } else {
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.table_miss_action,
+ ns->def_miss_action);
}
break;
@@ -195,16 +194,17 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
- *table_id = MLX5_GET(create_flow_table_out, out,
- table_id);
+ ft->id = MLX5_GET(create_flow_table_out, out,
+ table_id);
return err;
}
-static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
+static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
@@ -218,12 +218,13 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
+static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(modify_flow_table_in, in, opcode,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
@@ -250,26 +251,29 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
MLX5_SET(modify_flow_table_in, in,
- flow_table_context.table_miss_action, 1);
+ flow_table_context.table_miss_action,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD);
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_id,
next_ft->id);
} else {
MLX5_SET(modify_flow_table_in, in,
- flow_table_context.table_miss_action, 0);
+ flow_table_context.table_miss_action,
+ ns->def_miss_action);
}
}
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
+static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id)
+ struct mlx5_flow_group *fg)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = ns->dev;
int err;
MLX5_SET(create_flow_group_in, in, opcode,
@@ -283,23 +287,24 @@ static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
- *group_id = MLX5_GET(create_flow_group_out, out,
- group_id);
+ fg->id = MLX5_GET(create_flow_group_out, out,
+ group_id);
return err;
}
-static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
+static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id)
+ struct mlx5_flow_group *fg)
{
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
- MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
if (ft->vport) {
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
@@ -505,23 +510,25 @@ err_out:
return err;
}
-static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
struct fs_fte *fte)
{
+ struct mlx5_core_dev *dev = ns->dev;
unsigned int group_id = group->id;
return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
}
-static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
int opmod;
+ struct mlx5_core_dev *dev = ns->dev;
int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
@@ -529,15 +536,16 @@ static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
return -EOPNOTSUPP;
opmod = 1;
- return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
+ return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
}
-static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, ft->type);
@@ -853,6 +861,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_SNIFFER_RX:
case FS_FT_SNIFFER_TX:
case FS_FT_NIC_TX:
+ case FS_FT_RDMA_RX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 6228ba7bfa1a..e340f9af2f5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -36,45 +36,42 @@
#include "fs_core.h"
struct mlx5_flow_cmds {
- int (*create_flow_table)(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_flow_table_op_mod op_mod,
- enum fs_flow_table_type type,
- unsigned int level, unsigned int log_size,
- struct mlx5_flow_table *next_ft,
- unsigned int *table_id, u32 flags);
- int (*destroy_flow_table)(struct mlx5_core_dev *dev,
+ int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ unsigned int log_size,
+ struct mlx5_flow_table *next_ft);
+ int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
- int (*modify_flow_table)(struct mlx5_core_dev *dev,
+ int (*modify_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft);
- int (*create_flow_group)(struct mlx5_core_dev *dev,
+ int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id);
+ struct mlx5_flow_group *fg);
- int (*destroy_flow_group)(struct mlx5_core_dev *dev,
+ int (*destroy_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id);
+ struct mlx5_flow_group *fg);
- int (*create_fte)(struct mlx5_core_dev *dev,
+ int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte);
- int (*update_fte)(struct mlx5_core_dev *dev,
+ int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte);
- int (*delete_fte)(struct mlx5_core_dev *dev,
+ int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte);
- int (*update_root_ft)(struct mlx5_core_dev *dev,
+ int (*update_root_ft)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0be3eb86dd84..fb5b61727ee7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -403,7 +403,7 @@ static void del_hw_flow_table(struct fs_node *node)
trace_mlx5_fs_del_ft(ft);
if (node->active) {
- err = root->cmds->destroy_flow_table(dev, ft);
+ err = root->cmds->destroy_flow_table(root, ft);
if (err)
mlx5_core_warn(dev, "flow steering can't destroy ft\n");
}
@@ -435,7 +435,7 @@ static void modify_fte(struct fs_fte *fte)
dev = get_dev(&fte->node);
root = find_root(&ft->node);
- err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte);
+ err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
@@ -492,7 +492,7 @@ static void del_hw_fte(struct fs_node *node)
dev = get_dev(&ft->node);
root = find_root(&ft->node);
if (node->active) {
- err = root->cmds->delete_fte(dev, ft, fte);
+ err = root->cmds->delete_fte(root, ft, fte);
if (err)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
@@ -532,7 +532,7 @@ static void del_hw_flow_group(struct fs_node *node)
trace_mlx5_fs_del_fg(fg);
root = find_root(&ft->node);
- if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
+ if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
}
@@ -783,7 +783,7 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
fs_for_each_ft(iter, prio) {
i++;
- err = root->cmds->modify_flow_table(dev, iter, ft);
+ err = root->cmds->modify_flow_table(root, iter, ft);
if (err) {
mlx5_core_warn(dev, "Failed to modify flow table %d\n",
iter->id);
@@ -819,7 +819,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
struct mlx5_ft_underlay_qp *uqp;
int min_level = INT_MAX;
- int err;
+ int err = 0;
u32 qpn;
if (root->root_ft)
@@ -831,11 +831,11 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0;
- err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
+ err = root->cmds->update_root_ft(root, ft, qpn, false);
} else {
list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn;
- err = root->cmds->update_root_ft(root->dev, ft,
+ err = root->cmds->update_root_ft(root, ft,
qpn, false);
if (err)
break;
@@ -871,7 +871,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
root = find_root(&ft->node);
- err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
+ err = root->cmds->update_fte(root, ft, fg,
modify_mask, fte);
up_write_ref_node(&fte->node, false);
@@ -1013,9 +1013,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
- err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
- ft->type, ft->level, log_table_sz,
- next_ft, &ft->id, ft->flags);
+ err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
@@ -1032,7 +1030,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
trace_mlx5_fs_add_ft(ft);
return ft;
destroy_ft:
- root->cmds->destroy_flow_table(root->dev, ft);
+ root->cmds->destroy_flow_table(root, ft);
free_ft:
kfree(ft);
unlock_root:
@@ -1114,7 +1112,6 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
start_flow_index);
int end_index = MLX5_GET(create_flow_group_in, fg_in,
end_flow_index);
- struct mlx5_core_dev *dev = get_dev(&ft->node);
struct mlx5_flow_group *fg;
int err;
@@ -1129,7 +1126,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (IS_ERR(fg))
return fg;
- err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
+ err = root->cmds->create_flow_group(root, ft, fg_in, fg);
if (err) {
tree_put_node(&fg->node, false);
return ERR_PTR(err);
@@ -1269,11 +1266,9 @@ add_rule_fte(struct fs_fte *fte,
fs_get_obj(ft, fg->node.parent);
root = find_root(&fg->node);
if (!(fte->status & FS_FTE_STATUS_EXISTING))
- err = root->cmds->create_fte(get_dev(&ft->node),
- ft, fg, fte);
+ err = root->cmds->create_fte(root, ft, fg, fte);
else
- err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
- modify_mask, fte);
+ err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
if (err)
goto free_handle;
@@ -1339,7 +1334,6 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
- struct mlx5_core_dev *dev = get_dev(&ft->node);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *match_criteria_addr;
u8 src_esw_owner_mask_on;
@@ -1369,7 +1363,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
memcpy(match_criteria_addr, fg->mask.match_criteria,
sizeof(fg->mask.match_criteria));
- err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
+ err = root->cmds->create_flow_group(root, ft, in, fg);
if (!err) {
fg->node.active = true;
trace_mlx5_fs_add_fg(fg);
@@ -1941,12 +1935,12 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0;
- err = root->cmds->update_root_ft(root->dev, new_root_ft,
+ err = root->cmds->update_root_ft(root, new_root_ft,
qpn, false);
} else {
list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn;
- err = root->cmds->update_root_ft(root->dev,
+ err = root->cmds->update_root_ft(root,
new_root_ft, qpn,
false);
if (err)
@@ -2060,6 +2054,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
+ if (steering->rdma_rx_root_ns)
+ return &steering->rdma_rx_root_ns->ns;
+ return NULL;
default:
break;
}
@@ -2456,6 +2454,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
+ cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
@@ -2497,6 +2496,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
return 0;
}
+static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
+ if (!steering->rdma_rx_root_ns)
+ return -ENOMEM;
+
+ steering->rdma_rx_root_ns->def_miss_action =
+ MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ return PTR_ERR(prio);
+ }
+ return 0;
+}
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct mlx5_flow_namespace *ns;
@@ -2516,8 +2534,16 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (!steering->fdb_sub_ns)
return -ENOMEM;
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
+ 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
- maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 0,
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
+ FDB_FAST_PATH,
levels);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
@@ -2542,7 +2568,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
steering->fdb_sub_ns[chain] = ns;
}
- maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
@@ -2725,6 +2751,13 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ err = init_rdma_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
@@ -2754,7 +2787,7 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto update_ft_fail;
}
- err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
false);
if (err) {
mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
@@ -2798,7 +2831,7 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto out;
}
- err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
true);
if (err)
mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 87de0e4d9124..a08c3d09a50f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -67,6 +67,7 @@ enum fs_flow_table_type {
FS_FT_FDB = 0X4,
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_RDMA_RX = 0X7,
FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
};
@@ -90,6 +91,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
+ struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
};
@@ -150,7 +152,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn;
};
-#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_800
+#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_a00
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
@@ -216,6 +218,7 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock;
struct list_head underlay_qpns;
const struct mlx5_flow_cmds *cmds;
+ enum mlx5_flow_table_miss_action def_miss_action;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index cb9fa3430c53..a2656f4008d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -152,11 +152,11 @@ static void health_recover(struct work_struct *work)
nic_state = mlx5_get_nic_state(dev);
if (nic_state == MLX5_NIC_IFC_INVALID) {
- dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+ mlx5_core_err(dev, "health recovery flow aborted since the nic state is invalid\n");
return;
}
- dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+ mlx5_core_err(dev, "starting health recovery flow\n");
mlx5_recover_device(dev);
}
@@ -180,8 +180,8 @@ static void health_care(struct work_struct *work)
if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
schedule_delayed_work(&health->recover_work, recover_delay);
else
- dev_err(&dev->pdev->dev,
- "new health works are not permitted at this stage\n");
+ mlx5_core_err(dev,
+ "new health works are not permitted at this stage\n");
spin_unlock_irqrestore(&health->wq_lock, flags);
}
@@ -228,18 +228,22 @@ static void print_health_info(struct mlx5_core_dev *dev)
return;
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
- dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
+ mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i,
+ ioread32be(h->assert_var + i));
- dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
- dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+ mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n",
+ ioread32be(&h->assert_exit_ptr));
+ mlx5_core_err(dev, "assert_callra 0x%08x\n",
+ ioread32be(&h->assert_callra));
sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
- dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
- dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
- dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
- dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
- dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+ mlx5_core_err(dev, "fw_ver %s\n", fw_str);
+ mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+ mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
+ mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd),
+ hsynd_str(ioread8(&h->synd)));
+ mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
fw = ioread32be(&h->fw_ver);
- dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw);
+ mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw);
}
static unsigned long get_next_poll_jiffies(void)
@@ -262,8 +266,7 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
queue_work(health->wq, &health->work);
else
- dev_err(&dev->pdev->dev,
- "new health works are not permitted at this stage\n");
+ mlx5_core_err(dev, "new health works are not permitted at this stage\n");
spin_unlock_irqrestore(&health->wq_lock, flags);
}
@@ -284,7 +287,7 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
- dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
+ mlx5_core_err(dev, "device's health compromised - reached miss count\n");
print_health_info(dev);
}
@@ -352,6 +355,13 @@ void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
cancel_delayed_work_sync(&dev->priv.health.recover_work);
}
+void mlx5_health_flush(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ flush_workqueue(health->wq);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -370,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
return -ENOMEM;
strcpy(name, "mlx5_health");
- strcat(name, dev_name(&dev->pdev->dev));
+ strcat(name, dev_name(dev->device));
health->wq = create_singlethread_workqueue(name);
kfree(name);
if (!health->wq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 4eac42555c7d..ada1b7c0e0b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -68,6 +68,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
params->lro_en = false;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
+ params->tunneled_offload_en = false;
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
@@ -77,15 +78,14 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- u16 max_mtu;
int err;
err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
if (err)
return err;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
- netdev->mtu = max_mtu;
+ mlx5e_set_netdev_mtu_boundaries(priv);
+ netdev->mtu = netdev->max_mtu;
mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params,
mlx5e_get_netdev_max_channels(netdev),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 5633f8572800..8212bfd05733 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -122,7 +122,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Handle add/replace event */
if (fi->fib_nhs == 1) {
if (__mlx5_lag_is_active(ldev)) {
- struct net_device *nh_dev = fi->fib_nh[0].nh_dev;
+ struct net_device *nh_dev = fi->fib_nh[0].fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
mlx5_lag_set_port_affinity(ldev, ++i);
@@ -134,10 +134,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
return;
/* Verify next hops are ports of the same hca */
- if (!(fi->fib_nh[0].nh_dev == ldev->pf[0].netdev &&
- fi->fib_nh[1].nh_dev == ldev->pf[1].netdev) &&
- !(fi->fib_nh[0].nh_dev == ldev->pf[1].netdev &&
- fi->fib_nh[1].nh_dev == ldev->pf[0].netdev)) {
+ if (!(fi->fib_nh[0].fib_nh_dev == ldev->pf[0].netdev &&
+ fi->fib_nh[1].fib_nh_dev == ldev->pf[1].netdev) &&
+ !(fi->fib_nh[0].fib_nh_dev == ldev->pf[1].netdev &&
+ fi->fib_nh[1].fib_nh_dev == ldev->pf[0].netdev)) {
mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n");
return;
}
@@ -167,7 +167,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->nh_dev);
+ int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
if (i >= 0) {
i = (i + 1) % 2 + 1; /* peer port */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index ca0ee9916e9e..0059b290e095 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -535,23 +535,16 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
do_div(ns, NSEC_PER_SEC / HZ);
clock->overflow_period = ns;
- mdev->clock_info_page = alloc_page(GFP_KERNEL);
- if (mdev->clock_info_page) {
- mdev->clock_info = kmap(mdev->clock_info_page);
- if (!mdev->clock_info) {
- __free_page(mdev->clock_info_page);
- mlx5_core_warn(mdev, "failed to map clock page\n");
- } else {
- mdev->clock_info->sign = 0;
- mdev->clock_info->nsec = clock->tc.nsec;
- mdev->clock_info->cycles = clock->tc.cycle_last;
- mdev->clock_info->mask = clock->cycles.mask;
- mdev->clock_info->mult = clock->nominal_c_mult;
- mdev->clock_info->shift = clock->cycles.shift;
- mdev->clock_info->frac = clock->tc.frac;
- mdev->clock_info->overflow_period =
- clock->overflow_period;
- }
+ mdev->clock_info =
+ (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
+ if (mdev->clock_info) {
+ mdev->clock_info->nsec = clock->tc.nsec;
+ mdev->clock_info->cycles = clock->tc.cycle_last;
+ mdev->clock_info->mask = clock->cycles.mask;
+ mdev->clock_info->mult = clock->nominal_c_mult;
+ mdev->clock_info->shift = clock->cycles.shift;
+ mdev->clock_info->frac = clock->tc.frac;
+ mdev->clock_info->overflow_period = clock->overflow_period;
}
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
@@ -599,8 +592,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
cancel_delayed_work_sync(&clock->overflow_work);
if (mdev->clock_info) {
- kunmap(mdev->clock_info_page);
- __free_page(mdev->clock_info_page);
+ free_page((unsigned long)mdev->clock_info);
mdev->clock_info = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
index 40f4a19b1ce1..be69c1d7941a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -80,10 +80,8 @@ void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy,
mlx5_query_port_tun_entropy(mdev, &entropy_flags);
tun_entropy->num_enabling_entries = 0;
tun_entropy->num_disabling_entries = 0;
- tun_entropy->enabled = entropy_flags.calc_enabled;
- tun_entropy->enabled =
- (entropy_flags.calc_supported) ?
- entropy_flags.calc_enabled : true;
+ tun_entropy->enabled = entropy_flags.calc_supported ?
+ entropy_flags.calc_enabled : true;
}
static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 9a8fd762167b..b9d4f4e19ff9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
+#include <net/vxlan.h>
#include "mlx5_core.h"
#include "vxlan.h"
@@ -204,8 +205,8 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
spin_lock_init(&vxlan->lock);
hash_init(vxlan->htable);
- /* Hardware adds 4789 by default */
- mlx5_vxlan_add_port(vxlan, 4789);
+ /* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */
+ mlx5_vxlan_add_port(vxlan, IANA_VXLAN_UDP_PORT);
return vxlan;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 70cc906a102b..61fa1d162d28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
.size = 8,
.limit = 4
},
- .mr_cache[16] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[17] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[18] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[19] = {
- .size = 4,
- .limit = 2
- },
- .mr_cache[20] = {
- .size = 4,
- .limit = 2
- },
},
};
@@ -587,24 +567,23 @@ query_ex:
static int set_hca_cap(struct mlx5_core_dev *dev)
{
- struct pci_dev *pdev = dev->pdev;
int err;
err = handle_hca_cap(dev);
if (err) {
- dev_err(&pdev->dev, "handle_hca_cap failed\n");
+ mlx5_core_err(dev, "handle_hca_cap failed\n");
goto out;
}
err = handle_hca_cap_atomic(dev);
if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
+ mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
goto out;
}
err = handle_hca_cap_odp(dev);
if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+ mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
goto out;
}
@@ -736,36 +715,28 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -EOPNOTSUPP;
}
-static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
- struct pci_dev *pdev = dev->pdev;
+ struct mlx5_priv *priv = &dev->priv;
int err = 0;
- pci_set_drvdata(dev->pdev, dev);
- strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
- priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
-
- mutex_init(&priv->pgdir_mutex);
- INIT_LIST_HEAD(&priv->pgdir_list);
- spin_lock_init(&priv->mkey_lock);
+ priv->pci_dev_data = id->driver_data;
- mutex_init(&priv->alloc_mutex);
+ pci_set_drvdata(dev->pdev, dev);
+ dev->bar_addr = pci_resource_start(pdev, 0);
priv->numa_node = dev_to_node(&dev->pdev->dev);
- if (mlx5_debugfs_root)
- priv->dbg_root =
- debugfs_create_dir(pci_name(pdev), mlx5_debugfs_root);
-
err = mlx5_pci_enable_device(dev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
- goto err_dbg;
+ mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
+ return err;
}
err = request_bar(pdev);
if (err) {
- dev_err(&pdev->dev, "error requesting BARs, aborting\n");
+ mlx5_core_err(dev, "error requesting BARs, aborting\n");
goto err_disable;
}
@@ -773,7 +744,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
err = set_dma_caps(pdev);
if (err) {
- dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
+ mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
goto err_clr_master;
}
@@ -782,11 +753,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
- dev->iseg_base = pci_resource_start(dev->pdev, 0);
+ dev->iseg_base = dev->bar_addr;
dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
if (!dev->iseg) {
err = -ENOMEM;
- dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
+ mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
goto err_clr_master;
}
@@ -797,52 +768,47 @@ err_clr_master:
release_bar(dev->pdev);
err_disable:
mlx5_pci_disable_device(dev);
-
-err_dbg:
- debugfs_remove(priv->dbg_root);
return err;
}
-static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static void mlx5_pci_close(struct mlx5_core_dev *dev)
{
iounmap(dev->iseg);
pci_clear_master(dev->pdev);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
- debugfs_remove_recursive(priv->dbg_root);
}
-static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_init_once(struct mlx5_core_dev *dev)
{
- struct pci_dev *pdev = dev->pdev;
int err;
- priv->devcom = mlx5_devcom_register_device(dev);
- if (IS_ERR(priv->devcom))
- dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n",
- priv->devcom);
+ dev->priv.devcom = mlx5_devcom_register_device(dev);
+ if (IS_ERR(dev->priv.devcom))
+ mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
+ dev->priv.devcom);
err = mlx5_query_board_id(dev);
if (err) {
- dev_err(&pdev->dev, "query board id failed\n");
+ mlx5_core_err(dev, "query board id failed\n");
goto err_devcom;
}
err = mlx5_eq_table_init(dev);
if (err) {
- dev_err(&pdev->dev, "failed to initialize eq\n");
+ mlx5_core_err(dev, "failed to initialize eq\n");
goto err_devcom;
}
err = mlx5_events_init(dev);
if (err) {
- dev_err(&pdev->dev, "failed to initialize events\n");
+ mlx5_core_err(dev, "failed to initialize events\n");
goto err_eq_cleanup;
}
err = mlx5_cq_debugfs_init(dev);
if (err) {
- dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
+ mlx5_core_err(dev, "failed to initialize cq debugfs\n");
goto err_events_cleanup;
}
@@ -858,31 +824,31 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
err = mlx5_init_rl_table(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init rate limiting\n");
+ mlx5_core_err(dev, "Failed to init rate limiting\n");
goto err_tables_cleanup;
}
err = mlx5_mpfs_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init l2 table %d\n", err);
+ mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
goto err_rl_cleanup;
}
err = mlx5_eswitch_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
+ mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
goto err_mpfs_cleanup;
}
err = mlx5_sriov_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
+ mlx5_core_err(dev, "Failed to init sriov %d\n", err);
goto err_eswitch_cleanup;
}
err = mlx5_fpga_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init fpga device %d\n", err);
+ mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
goto err_sriov_cleanup;
}
@@ -932,93 +898,78 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_devcom_unregister_device(dev->priv.devcom);
}
-static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
- bool boot)
+static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
{
- struct pci_dev *pdev = dev->pdev;
int err;
- dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
- mutex_lock(&dev->intf_state_mutex);
- if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
- dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
- __func__);
- goto out;
- }
-
- dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
- fw_rev_min(dev), fw_rev_sub(dev));
+ mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
+ fw_rev_min(dev), fw_rev_sub(dev));
/* Only PFs hold the relevant PCIe information for this query */
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
- /* on load removing any previous indication of internal error, device is
- * up
- */
- dev->state = MLX5_DEVICE_STATE_UP;
-
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
if (err) {
- dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
- FW_PRE_INIT_TIMEOUT_MILI);
- goto out_err;
+ mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+ FW_PRE_INIT_TIMEOUT_MILI);
+ return err;
}
err = mlx5_cmd_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
- goto out_err;
+ mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
+ return err;
}
err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
if (err) {
- dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
- FW_INIT_TIMEOUT_MILI);
+ mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
+ FW_INIT_TIMEOUT_MILI);
goto err_cmd_cleanup;
}
err = mlx5_core_enable_hca(dev, 0);
if (err) {
- dev_err(&pdev->dev, "enable hca failed\n");
+ mlx5_core_err(dev, "enable hca failed\n");
goto err_cmd_cleanup;
}
err = mlx5_core_set_issi(dev);
if (err) {
- dev_err(&pdev->dev, "failed to set issi\n");
+ mlx5_core_err(dev, "failed to set issi\n");
goto err_disable_hca;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
- dev_err(&pdev->dev, "failed to allocate boot pages\n");
+ mlx5_core_err(dev, "failed to allocate boot pages\n");
goto err_disable_hca;
}
err = set_hca_ctrl(dev);
if (err) {
- dev_err(&pdev->dev, "set_hca_ctrl failed\n");
+ mlx5_core_err(dev, "set_hca_ctrl failed\n");
goto reclaim_boot_pages;
}
err = set_hca_cap(dev);
if (err) {
- dev_err(&pdev->dev, "set_hca_cap failed\n");
+ mlx5_core_err(dev, "set_hca_cap failed\n");
goto reclaim_boot_pages;
}
err = mlx5_satisfy_startup_pages(dev, 0);
if (err) {
- dev_err(&pdev->dev, "failed to allocate init pages\n");
+ mlx5_core_err(dev, "failed to allocate init pages\n");
goto reclaim_boot_pages;
}
err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) {
- dev_err(&pdev->dev, "init hca failed\n");
+ mlx5_core_err(dev, "init hca failed\n");
goto reclaim_boot_pages;
}
@@ -1028,23 +979,50 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_query_hca_caps(dev);
if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto err_stop_poll;
+ mlx5_core_err(dev, "query hca failed\n");
+ goto stop_health;
}
- if (boot) {
- err = mlx5_init_once(dev, priv);
- if (err) {
- dev_err(&pdev->dev, "sw objs init failed\n");
- goto err_stop_poll;
- }
+ return 0;
+
+stop_health:
+ mlx5_stop_health_poll(dev, boot);
+reclaim_boot_pages:
+ mlx5_reclaim_startup_pages(dev);
+err_disable_hca:
+ mlx5_core_disable_hca(dev, 0);
+err_cmd_cleanup:
+ mlx5_cmd_cleanup(dev);
+
+ return err;
+}
+
+static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
+{
+ int err;
+
+ mlx5_stop_health_poll(dev, boot);
+ err = mlx5_cmd_teardown_hca(dev);
+ if (err) {
+ mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
+ return err;
}
+ mlx5_reclaim_startup_pages(dev);
+ mlx5_core_disable_hca(dev, 0);
+ mlx5_cmd_cleanup(dev);
+
+ return 0;
+}
+
+static int mlx5_load(struct mlx5_core_dev *dev)
+{
+ int err;
dev->priv.uar = mlx5_get_uars_page(dev);
if (IS_ERR(dev->priv.uar)) {
- dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+ mlx5_core_err(dev, "Failed allocating uar, aborting\n");
err = PTR_ERR(dev->priv.uar);
- goto err_get_uars;
+ return err;
}
mlx5_events_start(dev);
@@ -1052,132 +1030,155 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_eq_table_create(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to create EQs\n");
+ mlx5_core_err(dev, "Failed to create EQs\n");
goto err_eq_table;
}
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
- dev_err(&pdev->dev, "Failed to init FW tracer\n");
+ mlx5_core_err(dev, "Failed to init FW tracer\n");
goto err_fw_tracer;
}
err = mlx5_fpga_device_start(dev);
if (err) {
- dev_err(&pdev->dev, "fpga device start failed %d\n", err);
+ mlx5_core_err(dev, "fpga device start failed %d\n", err);
goto err_fpga_start;
}
err = mlx5_accel_ipsec_init(dev);
if (err) {
- dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
+ mlx5_core_err(dev, "IPSec device start failed %d\n", err);
goto err_ipsec_start;
}
err = mlx5_accel_tls_init(dev);
if (err) {
- dev_err(&pdev->dev, "TLS device start failed %d\n", err);
+ mlx5_core_err(dev, "TLS device start failed %d\n", err);
goto err_tls_start;
}
err = mlx5_init_fs(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init flow steering\n");
+ mlx5_core_err(dev, "Failed to init flow steering\n");
goto err_fs;
}
err = mlx5_core_set_hca_defaults(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to set hca defaults\n");
+ mlx5_core_err(dev, "Failed to set hca defaults\n");
goto err_fs;
}
err = mlx5_sriov_attach(dev);
if (err) {
- dev_err(&pdev->dev, "sriov init failed %d\n", err);
+ mlx5_core_err(dev, "sriov init failed %d\n", err);
goto err_sriov;
}
err = mlx5_ec_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init embedded CPU\n");
+ mlx5_core_err(dev, "Failed to init embedded CPU\n");
goto err_ec;
}
- if (mlx5_device_registered(dev)) {
- mlx5_attach_device(dev);
- } else {
- err = mlx5_register_device(dev);
- if (err) {
- dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
- goto err_reg_dev;
- }
- }
-
- set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
-out:
- mutex_unlock(&dev->intf_state_mutex);
-
return 0;
-err_reg_dev:
- mlx5_ec_cleanup(dev);
-
err_ec:
mlx5_sriov_detach(dev);
-
err_sriov:
mlx5_cleanup_fs(dev);
-
err_fs:
mlx5_accel_tls_cleanup(dev);
-
err_tls_start:
mlx5_accel_ipsec_cleanup(dev);
-
err_ipsec_start:
mlx5_fpga_device_stop(dev);
-
err_fpga_start:
mlx5_fw_tracer_cleanup(dev->tracer);
-
err_fw_tracer:
mlx5_eq_table_destroy(dev);
-
err_eq_table:
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, priv->uar);
+ mlx5_put_uars_page(dev, dev->priv.uar);
+ return err;
+}
-err_get_uars:
- if (boot)
- mlx5_cleanup_once(dev);
+static void mlx5_unload(struct mlx5_core_dev *dev)
+{
+ mlx5_ec_cleanup(dev);
+ mlx5_sriov_detach(dev);
+ mlx5_cleanup_fs(dev);
+ mlx5_accel_ipsec_cleanup(dev);
+ mlx5_accel_tls_cleanup(dev);
+ mlx5_fpga_device_stop(dev);
+ mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_eq_table_destroy(dev);
+ mlx5_pagealloc_stop(dev);
+ mlx5_events_stop(dev);
+ mlx5_put_uars_page(dev, dev->priv.uar);
+}
-err_stop_poll:
- mlx5_stop_health_poll(dev, boot);
- if (mlx5_cmd_teardown_hca(dev)) {
- dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- goto out_err;
+static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
+{
+ int err = 0;
+
+ dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
+ mutex_lock(&dev->intf_state_mutex);
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ mlx5_core_warn(dev, "interface is up, NOP\n");
+ goto out;
}
+ /* remove any previous indication of internal error */
+ dev->state = MLX5_DEVICE_STATE_UP;
-reclaim_boot_pages:
- mlx5_reclaim_startup_pages(dev);
+ err = mlx5_function_setup(dev, boot);
+ if (err)
+ goto out;
-err_disable_hca:
- mlx5_core_disable_hca(dev, 0);
+ if (boot) {
+ err = mlx5_init_once(dev);
+ if (err) {
+ mlx5_core_err(dev, "sw objs init failed\n");
+ goto function_teardown;
+ }
+ }
-err_cmd_cleanup:
- mlx5_cmd_cleanup(dev);
+ err = mlx5_load(dev);
+ if (err)
+ goto err_load;
+
+ if (mlx5_device_registered(dev)) {
+ mlx5_attach_device(dev);
+ } else {
+ err = mlx5_register_device(dev);
+ if (err) {
+ mlx5_core_err(dev, "register device failed %d\n", err);
+ goto err_reg_dev;
+ }
+ }
-out_err:
+ set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+out:
+ mutex_unlock(&dev->intf_state_mutex);
+
+ return err;
+
+err_reg_dev:
+ mlx5_unload(dev);
+err_load:
+ if (boot)
+ mlx5_cleanup_once(dev);
+function_teardown:
+ mlx5_function_teardown(dev, boot);
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);
return err;
}
-static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
- bool cleanup)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
int err = 0;
@@ -1186,8 +1187,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
- dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
- __func__);
+ mlx5_core_warn(dev, "%s: interface is down, NOP\n",
+ __func__);
if (cleanup)
mlx5_cleanup_once(dev);
goto out;
@@ -1198,30 +1199,12 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
- mlx5_ec_cleanup(dev);
- mlx5_sriov_detach(dev);
- mlx5_cleanup_fs(dev);
- mlx5_accel_ipsec_cleanup(dev);
- mlx5_accel_tls_cleanup(dev);
- mlx5_fpga_device_stop(dev);
- mlx5_fw_tracer_cleanup(dev->tracer);
- mlx5_eq_table_destroy(dev);
- mlx5_pagealloc_stop(dev);
- mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, priv->uar);
+ mlx5_unload(dev);
+
if (cleanup)
mlx5_cleanup_once(dev);
- mlx5_stop_health_poll(dev, cleanup);
-
- err = mlx5_cmd_teardown_hca(dev);
- if (err) {
- dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- goto out;
- }
- mlx5_reclaim_startup_pages(dev);
- mlx5_core_disable_hca(dev, 0);
- mlx5_cmd_cleanup(dev);
+ mlx5_function_teardown(dev, cleanup);
out:
mutex_unlock(&dev->intf_state_mutex);
return err;
@@ -1238,29 +1221,12 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
};
-#define MLX5_IB_MOD "mlx5_ib"
-static int init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
- struct mlx5_core_dev *dev;
- struct devlink *devlink;
- struct mlx5_priv *priv;
+ struct mlx5_priv *priv = &dev->priv;
int err;
- devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
- if (!devlink) {
- dev_err(&pdev->dev, "kzalloc failed\n");
- return -ENOMEM;
- }
-
- dev = devlink_priv(devlink);
- priv = &dev->priv;
- priv->pci_dev_data = id->driver_data;
-
- pci_set_drvdata(pdev, dev);
-
- dev->pdev = pdev;
- dev->profile = &profile[prof_sel];
+ dev->profile = &profile[profile_idx];
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
@@ -1272,25 +1238,75 @@ static int init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
- err = mlx5_pci_init(dev, priv);
- if (err) {
- dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
- goto clean_dev;
+ mutex_init(&priv->alloc_mutex);
+ mutex_init(&priv->pgdir_mutex);
+ INIT_LIST_HEAD(&priv->pgdir_list);
+ spin_lock_init(&priv->mkey_lock);
+
+ priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
+ mlx5_debugfs_root);
+ if (!priv->dbg_root) {
+ dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n");
+ return -ENOMEM;
}
err = mlx5_health_init(dev);
- if (err) {
- dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
- goto close_pci;
- }
+ if (err)
+ goto err_health_init;
err = mlx5_pagealloc_init(dev);
if (err)
goto err_pagealloc_init;
- err = mlx5_load_one(dev, priv, true);
+ return 0;
+
+err_pagealloc_init:
+ mlx5_health_cleanup(dev);
+err_health_init:
+ debugfs_remove(dev->priv.dbg_root);
+
+ return err;
+}
+
+static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
+{
+ mlx5_pagealloc_cleanup(dev);
+ mlx5_health_cleanup(dev);
+ debugfs_remove_recursive(dev->priv.dbg_root);
+}
+
+#define MLX5_IB_MOD "mlx5_ib"
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlx5_core_dev *dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
+ if (!devlink) {
+ dev_err(&pdev->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ dev = devlink_priv(devlink);
+ dev->device = &pdev->dev;
+ dev->pdev = pdev;
+
+ err = mlx5_mdev_init(dev, prof_sel);
+ if (err)
+ goto mdev_init_err;
+
+ err = mlx5_pci_init(dev, pdev, id);
+ if (err) {
+ mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
+ err);
+ goto pci_init_err;
+ }
+
+ err = mlx5_load_one(dev, true);
if (err) {
- dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
+ mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n",
+ err);
goto err_load_one;
}
@@ -1304,14 +1320,13 @@ static int init_one(struct pci_dev *pdev,
return 0;
clean_load:
- mlx5_unload_one(dev, priv, true);
+ mlx5_unload_one(dev, true);
+
err_load_one:
- mlx5_pagealloc_cleanup(dev);
-err_pagealloc_init:
- mlx5_health_cleanup(dev);
-close_pci:
- mlx5_pci_close(dev, priv);
-clean_dev:
+ mlx5_pci_close(dev);
+pci_init_err:
+ mlx5_mdev_uninit(dev);
+mdev_init_err:
devlink_free(devlink);
return err;
@@ -1321,20 +1336,18 @@ static void remove_one(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_priv *priv = &dev->priv;
devlink_unregister(devlink);
mlx5_unregister_device(dev);
- if (mlx5_unload_one(dev, priv, true)) {
- dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
- mlx5_health_cleanup(dev);
+ if (mlx5_unload_one(dev, true)) {
+ mlx5_core_err(dev, "mlx5_unload_one failed\n");
+ mlx5_health_flush(dev);
return;
}
- mlx5_pagealloc_cleanup(dev);
- mlx5_health_cleanup(dev);
- mlx5_pci_close(dev, priv);
+ mlx5_pci_close(dev);
+ mlx5_mdev_uninit(dev);
devlink_free(devlink);
}
@@ -1342,12 +1355,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_priv *priv = &dev->priv;
- dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_core_info(dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev, false);
- mlx5_unload_one(dev, priv, false);
+ mlx5_unload_one(dev, false);
/* In case of kernel call drain the health wq */
if (state) {
mlx5_drain_health_wq(dev);
@@ -1374,7 +1386,9 @@ static int wait_vital(struct pci_dev *pdev)
count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) {
if (last_count && last_count != count) {
- dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ mlx5_core_info(dev,
+ "wait vital counter value 0x%x after %d iterations\n",
+ count, i);
return 0;
}
last_count = count;
@@ -1390,12 +1404,12 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_core_info(dev, "%s was called\n", __func__);
err = mlx5_pci_enable_device(dev);
if (err) {
- dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
- , __func__, err);
+ mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
+ __func__, err);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -1404,7 +1418,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
if (wait_vital(pdev)) {
- dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+ mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -1414,17 +1428,16 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
static void mlx5_pci_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_priv *priv = &dev->priv;
int err;
- dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_core_info(dev, "%s was called\n", __func__);
- err = mlx5_load_one(dev, priv, false);
+ err = mlx5_load_one(dev, false);
if (err)
- dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
- , __func__, err);
+ mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
+ __func__, err);
else
- dev_info(&pdev->dev, "%s: device recovered\n", __func__);
+ mlx5_core_info(dev, "%s: device recovered\n", __func__);
}
static const struct pci_error_handlers mlx5_err_handler = {
@@ -1486,13 +1499,12 @@ succeed:
static void shutdown(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_priv *priv = &dev->priv;
int err;
- dev_info(&pdev->dev, "Shutdown was called\n");
+ mlx5_core_info(dev, "Shutdown was called\n");
err = mlx5_try_fast_unload(dev);
if (err)
- mlx5_unload_one(dev, priv, false);
+ mlx5_unload_one(dev, false);
mlx5_pci_disable_device(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 7b331674622c..22e69d4813e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -41,6 +41,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/driver.h>
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0"
@@ -48,44 +49,57 @@
extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
- dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_dbg_once(__dev, format, ...) \
- dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, \
+#define mlx5_core_dbg_once(__dev, format, ...) \
+ dev_dbg_once((__dev)->device, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
-do { \
- if ((mask) & mlx5_core_debug_mask) \
- mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
+#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
+do { \
+ if ((mask) & mlx5_core_debug_mask) \
+ mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
} while (0)
-#define mlx5_core_err(__dev, format, ...) \
- dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, \
+#define mlx5_core_err(__dev, format, ...) \
+ dev_err((__dev)->device, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_err_rl(__dev, format, ...) \
- dev_err_ratelimited(&(__dev)->pdev->dev, \
- "%s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, \
- ##__VA_ARGS__)
+#define mlx5_core_err_rl(__dev, format, ...) \
+ dev_err_ratelimited((__dev)->device, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_warn(__dev, format, ...) \
- dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, \
- ##__VA_ARGS__)
+#define mlx5_core_warn(__dev, format, ...) \
+ dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
#define mlx5_core_warn_once(__dev, format, ...) \
- dev_warn_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_info(__dev, format, ...) \
- dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx5_core_warn_rl(__dev, format, ...) \
+ dev_warn_ratelimited((__dev)->device, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
+#define mlx5_core_info(__dev, format, ...) \
+ dev_info((__dev)->device, format, ##__VA_ARGS__)
+
+#define mlx5_core_info_rl(__dev, format, ...) \
+ dev_info_ratelimited((__dev)->device, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
enum {
MLX5_CMD_DATA, /* print command payload only */
@@ -111,7 +125,6 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
-bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
@@ -176,6 +189,11 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
void mlx5e_init(void);
void mlx5e_cleanup(void);
+static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+{
+ return pci_num_vf(dev->pdev) ? true : false;
+}
+
static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
{
/* LACP owner conditions:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 41025387ff2c..91bd258ecf1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -200,7 +200,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1)
list_del(&fwp->list);
- dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
+ dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
@@ -211,11 +211,12 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
+ struct device *device = dev->device;
+ int nid = dev_to_node(device);
struct page *page;
u64 zero_addr = 1;
u64 addr;
int err;
- int nid = dev_to_node(&dev->pdev->dev);
page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
if (!page) {
@@ -223,9 +224,8 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
return -ENOMEM;
}
map:
- addr = dma_map_page(&dev->pdev->dev, page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&dev->pdev->dev, addr)) {
+ addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
goto err_mapping;
@@ -240,8 +240,7 @@ map:
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
err_mapping:
@@ -249,7 +248,7 @@ err_mapping:
__free_page(page);
if (zero_addr == 0)
- dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
+ dma_unmap_page(device, zero_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
return err;
@@ -600,8 +599,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
return 0;
}
- mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages,
- dev->priv.name);
+ mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
while (*pages) {
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
@@ -614,6 +612,6 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
msleep(50);
}
- mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
+ mlx5_core_dbg(dev, "All pages received\n");
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 21b7f05b16a5..cc262b30aed5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -293,15 +293,36 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
return 0;
}
+static int mlx5_eeprom_page(int offset)
+{
+ if (offset < MLX5_EEPROM_PAGE_LENGTH)
+ /* Addresses between 0-255 - page 00 */
+ return 0;
+
+ /* Addresses between 256 - 639 belongs to pages 01, 02 and 03
+ * For example, offset = 400 belongs to page 02:
+ * 1 + ((400 - 256)/128) = 2
+ */
+ return 1 + ((offset - MLX5_EEPROM_PAGE_LENGTH) /
+ MLX5_EEPROM_HIGH_PAGE_LENGTH);
+}
+
+static int mlx5_eeprom_high_page_offset(int page_num)
+{
+ if (!page_num) /* Page 0 always start from low page */
+ return 0;
+
+ /* High page */
+ return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
+}
+
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data)
{
+ int module_num, page_num, status, err;
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
u32 in[MLX5_ST_SZ_DW(mcia_reg)];
- int module_num;
u16 i2c_addr;
- int status;
- int err;
void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
err = mlx5_query_module_num(dev, &module_num);
@@ -311,21 +332,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
memset(in, 0, sizeof(in));
size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
- if (offset < MLX5_EEPROM_PAGE_LENGTH &&
- offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ /* Get the page number related to the given offset */
+ page_num = mlx5_eeprom_page(offset);
+
+ /* Set the right offset according to the page number,
+ * For page_num > 0, relative offset is always >= 128 (high page).
+ */
+ offset -= mlx5_eeprom_high_page_offset(page_num);
+
+ if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
i2c_addr = MLX5_I2C_ADDR_LOW;
- if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
- i2c_addr = MLX5_I2C_ADDR_HIGH;
- offset -= MLX5_EEPROM_PAGE_LENGTH;
- }
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);
MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
- MLX5_SET(mcia_reg, in, page_number, 0);
+ MLX5_SET(mcia_reg, in, page_number, page_num);
MLX5_SET(mcia_reg, in, device_address, offset);
MLX5_SET(mcia_reg, in, size, size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
new file mode 100644
index 000000000000..86f77456f873
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#include <linux/mlx5/vport.h>
+#include <rdma/ib_verbs.h>
+#include <net/addrconf.h>
+
+#include "lib/mlx5.h"
+#include "eswitch.h"
+#include "fs_core.h"
+#include "rdma.h"
+
+static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_roce *roce = &dev->priv.roce;
+
+ if (!roce->ft)
+ return;
+
+ mlx5_del_flow_rules(roce->allow_rule);
+ mlx5_destroy_flow_group(roce->fg);
+ mlx5_destroy_flow_table(roce->ft);
+}
+
+static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_roce *roce = &dev->priv.roce;
+ struct mlx5_flow_handle *flow_rule = NULL;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ void *match_criteria;
+ u32 *flow_group_in;
+ void *misc;
+ int err;
+
+ if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)))
+ return -EOPNOTSUPP;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ kvfree(flow_group_in);
+ return -ENOMEM;
+ }
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX);
+ if (!ns) {
+ mlx5_core_err(dev, "Failed to get RDMA RX namespace");
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ mlx5_core_err(dev, "Failed to create RDMA RX flow table");
+ err = PTR_ERR(ft);
+ goto free;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_port);
+
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(dev, "Failed to create RDMA RX flow group err(%d)\n", err);
+ goto destroy_flow_table;
+ }
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port,
+ dev->priv.eswitch->manager_vport);
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ mlx5_core_err(dev, "Failed to add RoCE allow rule, err=%d\n",
+ err);
+ goto destroy_flow_group;
+ }
+
+ kvfree(spec);
+ kvfree(flow_group_in);
+ roce->ft = ft;
+ roce->fg = fg;
+ roce->allow_rule = flow_rule;
+
+ return 0;
+
+destroy_flow_table:
+ mlx5_destroy_flow_table(ft);
+destroy_flow_group:
+ mlx5_destroy_flow_group(fg);
+free:
+ kvfree(spec);
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
+{
+ mlx5_core_roce_gid_set(dev, 0, 0, 0,
+ NULL, NULL, false, 0, 0);
+}
+
+static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
+{
+ u8 hw_id[ETH_ALEN];
+
+ mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ addrconf_addr_eui48(&gid->raw[8], hw_id);
+}
+
+static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
+{
+ union ib_gid gid;
+ u8 mac[ETH_ALEN];
+
+ mlx5_rdma_make_default_gid(dev, &gid);
+ return mlx5_core_roce_gid_set(dev, 0,
+ MLX5_ROCE_VERSION_1,
+ 0, gid.raw, mac,
+ false, 0, 1);
+}
+
+void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
+{
+ mlx5_rdma_disable_roce_steering(dev);
+ mlx5_rdma_del_roce_addr(dev);
+ mlx5_nic_vport_disable_roce(dev);
+}
+
+void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_nic_vport_enable_roce(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
+ return;
+ }
+
+ err = mlx5_rdma_add_roce_addr(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to add RoCE address: %d\n", err);
+ goto disable_roce;
+ }
+
+ err = mlx5_rdma_enable_roce_steering(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to enable RoCE steering: %d\n", err);
+ goto del_roce_addr;
+ }
+
+ return;
+
+del_roce_addr:
+ mlx5_rdma_del_roce_addr(dev);
+disable_roce:
+ mlx5_nic_vport_disable_roce(dev);
+ return;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
new file mode 100644
index 000000000000..750cff2a71a4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_RDMA_H__
+#define __MLX5_RDMA_H__
+
+#include "mlx5_core.h"
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
+
+#else /* CONFIG_MLX5_ESWITCH */
+
+static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
+static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
+
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_RDMA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 7b23fa8d2d60..a249b3c3843d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -36,13 +36,6 @@
#include "mlx5_core.h"
#include "eswitch.h"
-bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-
- return !!sriov->num_vfs;
-}
-
static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
@@ -151,33 +144,10 @@ out:
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
-static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
-{
- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- int err = 0;
-
- if (pci_num_vf(pdev)) {
- mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
- return -EBUSY;
- }
-
- err = pci_enable_sriov(pdev, num_vfs);
- if (err)
- mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
-
- return err;
-}
-
-static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
-{
- pci_disable_sriov(pdev);
-}
-
static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err = 0;
+ int err;
err = mlx5_device_enable_sriov(dev, num_vfs);
if (err) {
@@ -185,42 +155,37 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
return err;
}
- err = mlx5_pci_enable_sriov(pdev, num_vfs);
+ err = pci_enable_sriov(pdev, num_vfs);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
+ mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
mlx5_device_disable_sriov(dev);
- return err;
}
-
- sriov->num_vfs = num_vfs;
-
- return 0;
+ return err;
}
static void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- mlx5_pci_disable_sriov(pdev);
+ pci_disable_sriov(pdev);
mlx5_device_disable_sriov(dev);
- sriov->num_vfs = 0;
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err = 0;
mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
- if (!mlx5_core_is_pf(dev))
- return -EPERM;
if (num_vfs)
err = mlx5_sriov_enable(pdev, num_vfs);
else
mlx5_sriov_disable(pdev);
+ if (!err)
+ sriov->num_vfs = num_vfs;
return err ? err : num_vfs;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index c4d4b76096dc..b1068500f1df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -182,16 +182,24 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
+int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
+ u32 *in, int inlen,
+ u32 *out, int outlen)
+{
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+ return mlx5_cmd_exec(dev, in, inlen, out, outlen);
+}
+EXPORT_SYMBOL(mlx5_core_create_tir_out);
+
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
- u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err;
- MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_core_create_tir_out(dev, in, inlen,
+ out, sizeof(out));
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 94464723ff77..0d006224d7b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -79,7 +79,7 @@ static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
else
system_page_index = index;
- return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
+ return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
}
static void up_rel_func(struct kref *kref)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index ef95feca9961..95cdc8cbcba4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -371,67 +371,6 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
-int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
- u16 vport,
- u16 vlans[],
- int *size)
-{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
- void *nic_vport_ctx;
- int req_list_size;
- int max_list_size;
- int out_sz;
- void *out;
- int err;
- int i;
-
- req_list_size = *size;
- max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
- if (req_list_size > max_list_size) {
- mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
- req_list_size, max_list_size);
- req_list_size = max_list_size;
- }
-
- out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
- req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
-
- memset(in, 0, sizeof(in));
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- MLX5_SET(query_nic_vport_context_in, in, opcode,
- MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
- MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
- MLX5_NVPRT_LIST_TYPE_VLAN);
- MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
-
- if (vport)
- MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
-
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
- if (err)
- goto out;
-
- nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
- nic_vport_context);
- req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
- allowed_list_size);
-
- *size = req_list_size;
- for (i = 0; i < req_list_size; i++) {
- void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
- nic_vport_ctx,
- current_uc_mac_address[i]);
- vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
- }
-out:
- kfree(out);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
-
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int list_size)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index ea934a48c90a..1f87cce421e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -134,6 +134,11 @@ static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq)
*wq->db = cpu_to_be32(wq->wqe_ctr);
}
+static inline u16 mlx5_wq_cyc_get_ctr_wrap_cnt(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+ return ctr >> wq->fbc.log_sz;
+}
+
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
{
return ctr & wq->fbc.sz_m1;
@@ -243,6 +248,13 @@ static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
+static inline u16 mlx5_wq_ll_get_wqe_next_ix(struct mlx5_wq_ll *wq, u16 ix)
+{
+ struct mlx5_wqe_srq_next_seg *wqe = mlx5_wq_ll_get_wqe(wq, ix);
+
+ return be16_to_cpu(wqe->next_wqe_index);
+}
+
static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
{
wq->head = head_next;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 9c195dfed031..b6b3ff0fe17f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -4,6 +4,7 @@
config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
+ select NET_DEVLINK
---help---
This driver supports Mellanox Technologies Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index a01d15546e37..c4dc72e1ce63 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -28,8 +28,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o \
- spectrum_nve.o spectrum_nve_vxlan.o
+ spectrum_nve.o spectrum_nve_vxlan.o \
+ spectrum_dpipe.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
-mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index d23d53c0e284..bcbe07ec22be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
- emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+ emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
if (!emad_wq)
return -ENOMEM;
mlxsw_core->emad_wq = emad_wq;
@@ -781,7 +781,8 @@ mlxsw_devlink_sb_pool_get(struct devlink *devlink,
static int
mlxsw_devlink_sb_pool_set(struct devlink *devlink,
unsigned int sb_index, u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type)
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
@@ -789,7 +790,8 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink,
if (!mlxsw_driver->sb_pool_set)
return -EOPNOTSUPP;
return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
- pool_index, size, threshold_type);
+ pool_index, size, threshold_type,
+ extack);
}
static void *__dl_port(struct devlink_port *devlink_port)
@@ -829,7 +831,8 @@ static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold)
+ u32 threshold,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
@@ -839,7 +842,7 @@ static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
!mlxsw_core_port_check(mlxsw_core_port))
return -EOPNOTSUPP;
return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
- pool_index, threshold);
+ pool_index, threshold, extack);
}
static int
@@ -864,7 +867,8 @@ static int
mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold)
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
@@ -875,7 +879,7 @@ mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
return -EOPNOTSUPP;
return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
tc_index, pool_type,
- pool_index, threshold);
+ pool_index, threshold, extack);
}
static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
@@ -934,6 +938,46 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
pool_type, p_cur, p_max);
}
+static int
+mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
+ u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
+ char mgir_pl[MLXSW_REG_MGIR_LEN];
+ char buf[32];
+ int err;
+
+ err = devlink_info_driver_name_put(req,
+ mlxsw_core->bus_info->device_kind);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
+ &fw_minor, &fw_sub_minor);
+
+ sprintf(buf, "%X", hw_rev);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ return err;
+
+ err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
+ if (err)
+ return err;
+
+ sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
+ err = devlink_info_version_running_put(req, "fw.version", buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
@@ -968,6 +1012,7 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
+ .info_get = mlxsw_devlink_info_get,
};
static int
@@ -1718,7 +1763,11 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_res_get);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+ u32 port_number, bool split,
+ u32 split_port_subnumber,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct mlxsw_core_port *mlxsw_core_port =
@@ -1727,6 +1776,9 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
int err;
mlxsw_core_port->local_port = local_port;
+ devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_number, split, split_port_subnumber,
+ switch_id, switch_id_len);
err = devlink_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
@@ -1746,17 +1798,13 @@ void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
EXPORT_SYMBOL(mlxsw_core_port_fini);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
- void *port_driver_priv, struct net_device *dev,
- u32 port_number, bool split,
- u32 split_port_subnumber)
+ void *port_driver_priv, struct net_device *dev)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
mlxsw_core_port->port_driver_priv = port_driver_priv;
- devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- port_number, split, split_port_subnumber);
devlink_port_type_eth_set(devlink_port, dev);
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
@@ -1796,16 +1844,18 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_type_get);
-int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
- u8 local_port, char *name, size_t len)
+
+struct devlink_port *
+mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
+ u8 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
- return devlink_port_get_phys_port_name(devlink_port, name, len);
+ return devlink_port;
}
-EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name);
+EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
@@ -1958,10 +2008,10 @@ static int __init mlxsw_core_module_init(void)
{
int err;
- mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
if (!mlxsw_wq)
return -ENOMEM;
- mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+ mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
mlxsw_core_driver_name);
if (!mlxsw_owq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 8ec53f027575..917be621c904 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -164,20 +164,23 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 local_port);
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port);
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+ u32 port_number, bool split,
+ u32 split_port_subnumber,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len);
void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
- void *port_driver_priv, struct net_device *dev,
- u32 port_number, bool split,
- u32 split_port_subnumber);
+ void *port_driver_priv, struct net_device *dev);
void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
-int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
- u8 local_port, char *name, size_t len);
+struct devlink_port *
+mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
+ u8 local_port);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
@@ -251,13 +254,14 @@ struct mlxsw_driver {
struct devlink_sb_pool_info *pool_info);
int (*sb_pool_set)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type);
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold);
+ u32 threshold, struct netlink_ext_ack *extack);
int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
@@ -265,7 +269,8 @@ struct mlxsw_driver {
int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold);
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index);
int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 00c390024350..cf2114273b72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -51,33 +51,20 @@ static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
return 0;
}
-static int
-mlxsw_m_port_get_phys_port_name(struct net_device *dev, char *name, size_t len)
-{
- struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
- struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- u8 local_port = mlxsw_m_port->local_port;
-
- return mlxsw_core_port_get_phys_port_name(core, local_port, name, len);
-}
-
-static int mlxsw_m_port_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static struct devlink_port *
+mlxsw_m_port_get_devlink_port(struct net_device *dev)
{
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- ppid->id_len = sizeof(mlxsw_m->base_mac);
- memcpy(&ppid->id, &mlxsw_m->base_mac, ppid->id_len);
-
- return 0;
+ return mlxsw_core_port_devlink_port_get(mlxsw_m->core,
+ mlxsw_m_port->local_port);
}
static const struct net_device_ops mlxsw_m_port_netdev_ops = {
.ndo_open = mlxsw_m_port_dummy_open_stop,
.ndo_stop = mlxsw_m_port_dummy_open_stop,
- .ndo_get_phys_port_name = mlxsw_m_port_get_phys_port_name,
- .ndo_get_port_parent_id = mlxsw_m_port_get_port_parent_id,
+ .ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
};
static int mlxsw_m_get_module_info(struct net_device *netdev,
@@ -150,7 +137,10 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_m->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port,
+ module + 1, false, 0,
+ mlxsw_m->base_mac,
+ sizeof(mlxsw_m->base_mac));
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
@@ -190,7 +180,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
}
mlxsw_core_port_eth_set(mlxsw_m->core, mlxsw_m_port->local_port,
- mlxsw_m_port, dev, module + 1, false, 0);
+ mlxsw_m_port, dev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index ffee38e36ce8..8648ca171254 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index eb4c5e8964cd..e8002bfc1e8f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5210,6 +5210,42 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
mlxsw_reg_pspa_sub_port_set(payload, 0);
}
+/* PPLR - Port Physical Loopback Register
+ * --------------------------------------
+ * This register allows configuration of the port's loopback mode.
+ */
+#define MLXSW_REG_PPLR_ID 0x5018
+#define MLXSW_REG_PPLR_LEN 0x8
+
+MLXSW_REG_DEFINE(pplr, MLXSW_REG_PPLR_ID, MLXSW_REG_PPLR_LEN);
+
+/* reg_pplr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8);
+
+/* Phy local loopback. When set the port's egress traffic is looped back
+ * to the receiver and the port transmitter is disabled.
+ */
+#define MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL BIT(1)
+
+/* reg_pplr_lb_en
+ * Loopback enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pplr, lb_en, 0x04, 0, 8);
+
+static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
+ bool phy_local)
+{
+ MLXSW_REG_ZERO(pplr, payload);
+ mlxsw_reg_pplr_local_port_set(payload, local_port);
+ mlxsw_reg_pplr_lb_en_set(payload,
+ phy_local ?
+ MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
+}
+
/* HTGT - Host Trap Group Table
* ----------------------------
* Configures the properties for forwarding to CPU.
@@ -8534,6 +8570,60 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
mlxsw_reg_mpar_pa_id_set(payload, pa_id);
}
+/* MGIR - Management General Information Register
+ * ----------------------------------------------
+ * MGIR register allows software to query the hardware and firmware general
+ * information.
+ */
+#define MLXSW_REG_MGIR_ID 0x9020
+#define MLXSW_REG_MGIR_LEN 0x9C
+
+MLXSW_REG_DEFINE(mgir, MLXSW_REG_MGIR_ID, MLXSW_REG_MGIR_LEN);
+
+/* reg_mgir_hw_info_device_hw_revision
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, hw_info_device_hw_revision, 0x0, 16, 16);
+
+#define MLXSW_REG_MGIR_FW_INFO_PSID_SIZE 16
+
+/* reg_mgir_fw_info_psid
+ * PSID (ASCII string).
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mgir, fw_info_psid, 0x30, MLXSW_REG_MGIR_FW_INFO_PSID_SIZE);
+
+/* reg_mgir_fw_info_extended_major
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_major, 0x44, 0, 32);
+
+/* reg_mgir_fw_info_extended_minor
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_minor, 0x48, 0, 32);
+
+/* reg_mgir_fw_info_extended_sub_minor
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_sub_minor, 0x4C, 0, 32);
+
+static inline void mlxsw_reg_mgir_pack(char *payload)
+{
+ MLXSW_REG_ZERO(mgir, payload);
+}
+
+static inline void
+mlxsw_reg_mgir_unpack(char *payload, u32 *hw_rev, char *fw_info_psid,
+ u32 *fw_major, u32 *fw_minor, u32 *fw_sub_minor)
+{
+ *hw_rev = mlxsw_reg_mgir_hw_info_device_hw_revision_get(payload);
+ mlxsw_reg_mgir_fw_info_psid_memcpy_from(payload, fw_info_psid);
+ *fw_major = mlxsw_reg_mgir_fw_info_extended_major_get(payload);
+ *fw_minor = mlxsw_reg_mgir_fw_info_extended_minor_get(payload);
+ *fw_sub_minor = mlxsw_reg_mgir_fw_info_extended_sub_minor_get(payload);
+}
+
/* MRSR - Management Reset and Shutdown Register
* ---------------------------------------------
* MRSR register is used to reset or shutdown the switch or
@@ -9927,6 +10017,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pptb),
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
+ MLXSW_REG(pplr),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
@@ -9958,6 +10049,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcia),
MLXSW_REG(mpat),
MLXSW_REG(mpar),
+ MLXSW_REG(mgir),
MLXSW_REG(mrsr),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 773ef7fdb285..33a9fc9ef6a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -24,6 +24,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_SYSTEM_PORT,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
+ MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
+ MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
MLXSW_RES_ID_MAX_BUFFER_SIZE,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
@@ -78,6 +80,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
+ [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
+ [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
[MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9eb63300c1d3..dbb425717f5e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -21,7 +21,7 @@
#include <linux/dcbnl.h>
#include <linux/inetdevice.h>
#include <linux/netlink.h>
-#include <linux/random.h>
+#include <linux/jhash.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -46,8 +46,8 @@
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 1910
-#define MLXSW_SP1_FWREV_SUBMINOR 622
+#define MLXSW_SP1_FWREV_MINOR 2000
+#define MLXSW_SP1_FWREV_SUBMINOR 1122
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -1254,16 +1254,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
- size_t len)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-
- return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core,
- mlxsw_sp_port->local_port,
- name, len);
-}
-
static struct mlxsw_sp_port_mall_tc_entry *
mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
unsigned long cookie) {
@@ -1279,21 +1269,19 @@ mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
- const struct tc_action *a,
+ const struct flow_action_entry *act,
bool ingress)
{
enum mlxsw_sp_span_type span_type;
- struct net_device *to_dev;
- to_dev = tcf_mirred_dev(a);
- if (!to_dev) {
+ if (!act->dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL;
}
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
true, &mirror->span_id);
}
@@ -1312,7 +1300,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *cls,
- const struct tc_action *a,
+ const struct flow_action_entry *act,
bool ingress)
{
int err;
@@ -1323,18 +1311,18 @@ mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
netdev_err(mlxsw_sp_port->dev, "sample already active\n");
return -EEXIST;
}
- if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
+ if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
return -EOPNOTSUPP;
}
rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
- tcf_sample_psample_group(a));
- mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
- mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
- mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
+ act->sample.psample_group);
+ mlxsw_sp_port->sample->truncate = act->sample.truncate;
+ mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
+ mlxsw_sp_port->sample->rate = act->sample.rate;
- err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
+ err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
if (err)
goto err_port_sample_set;
return 0;
@@ -1360,10 +1348,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
__be16 protocol = f->common.protocol;
- const struct tc_action *a;
+ struct flow_action_entry *act;
int err;
- if (!tcf_exts_has_one_action(f->exts)) {
+ if (!flow_offload_has_one_action(&f->rule->action)) {
netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
return -EOPNOTSUPP;
}
@@ -1373,19 +1361,21 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
mall_tc_entry->cookie = f->cookie;
- a = tcf_exts_first_action(f->exts);
+ act = &f->rule->action.entries[0];
- if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
+ if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror;
err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
- mirror, a, ingress);
- } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
+ mirror, act,
+ ingress);
+ } else if (act->id == FLOW_ACTION_SAMPLE &&
+ protocol == htons(ETH_P_ALL)) {
mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
- a, ingress);
+ act, ingress);
} else {
err = -EOPNOTSUPP;
}
@@ -1679,6 +1669,25 @@ static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
return 0;
}
+static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ char pplr_pl[MLXSW_REG_PPLR_LEN];
+ int err;
+
+ if (netif_running(dev))
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+
+ mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
+ pplr_pl);
+
+ if (netif_running(dev))
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+
+ return err;
+}
+
typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
static int mlxsw_sp_handle_feature(struct net_device *dev,
@@ -1710,20 +1719,30 @@ static int mlxsw_sp_handle_feature(struct net_device *dev,
static int mlxsw_sp_set_features(struct net_device *dev,
netdev_features_t features)
{
- return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+ netdev_features_t oper_features = dev->features;
+ int err = 0;
+
+ err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
mlxsw_sp_feature_hw_tc);
+ err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
+ mlxsw_sp_feature_loopback);
+
+ if (err) {
+ dev->features = oper_features;
+ return -EINVAL;
+ }
+
+ return 0;
}
-static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static struct devlink_port *
+mlxsw_sp_port_get_devlink_port(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- ppid->id_len = sizeof(mlxsw_sp->base_mac);
- memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len);
-
- return 0;
+ return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ mlxsw_sp_port->local_port);
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1739,9 +1758,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
- .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
.ndo_set_features = mlxsw_sp_set_features,
- .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id,
+ .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -3126,11 +3144,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
if (err)
return err;
+ mlxsw_sp_port->link.autoneg = autoneg;
+
if (!netif_running(dev))
return 0;
- mlxsw_sp_port->link.autoneg = autoneg;
-
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
@@ -3316,7 +3334,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HIERARCY_TC,
i + 8, i,
- false, 0);
+ true, 100);
if (err)
return err;
}
@@ -3391,7 +3409,10 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
+ module + 1, split, lane / width,
+ mlxsw_sp->base_mac,
+ sizeof(mlxsw_sp->base_mac));
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
@@ -3462,7 +3483,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- dev->hw_features |= NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
dev->min_mtu = 0;
dev->max_mtu = ETH_MAX_MTU;
@@ -3573,8 +3594,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
- mlxsw_sp_port, dev, module + 1,
- mlxsw_sp_port->split, lane / width);
+ mlxsw_sp_port, dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
return 0;
@@ -3710,14 +3730,14 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
}
static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
- u8 module, unsigned int count)
+ u8 module, unsigned int count, u8 offset)
{
u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
int err, i;
for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
- module, width, i * width);
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
+ true, module, width, i * width);
if (err)
goto err_port_create;
}
@@ -3726,8 +3746,8 @@ static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
err_port_create:
for (i--; i >= 0; i--)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
return err;
}
@@ -3758,11 +3778,19 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 local_ports_in_1x, local_ports_in_2x, offset;
struct mlxsw_sp_port *mlxsw_sp_port;
u8 module, cur_width, base_port;
int i;
int err;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
+ return -EIO;
+
+ local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
+ local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
+
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -3788,13 +3816,15 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
/* Make sure we have enough slave (even) ports for the split. */
if (count == 2) {
+ offset = local_ports_in_2x;
base_port = local_port;
- if (mlxsw_sp->ports[base_port + 1]) {
+ if (mlxsw_sp->ports[base_port + local_ports_in_2x]) {
netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
return -EINVAL;
}
} else {
+ offset = local_ports_in_1x;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
if (mlxsw_sp->ports[base_port + 1] ||
mlxsw_sp->ports[base_port + 3]) {
@@ -3805,10 +3835,11 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
}
for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
- err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count,
+ offset);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
@@ -3825,11 +3856,19 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 local_ports_in_1x, local_ports_in_2x, offset;
struct mlxsw_sp_port *mlxsw_sp_port;
u8 cur_width, base_port;
unsigned int count;
int i;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
+ return -EIO;
+
+ local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
+ local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
+
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -3847,6 +3886,11 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
+ if (count == 2)
+ offset = local_ports_in_2x;
+ else
+ offset = local_ports_in_1x;
+
base_port = mlxsw_sp_cluster_base_port_get(local_port);
/* Determine which ports to remove. */
@@ -3854,8 +3898,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
base_port = base_port + 2;
for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
@@ -4238,7 +4282,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
u32 seed;
int err;
- get_random_bytes(&seed, sizeof(seed));
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index da6278b0caa4..8601b3041acd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -371,13 +371,14 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
struct devlink_sb_pool_info *pool_info);
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type);
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold);
+ u32 threshold, struct netlink_ext_ack *extack);
int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
@@ -385,7 +386,8 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold);
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index);
int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 8811f6513e36..e993159e8e4c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -216,7 +216,6 @@ struct mlxsw_sp_acl_tcam_vregion {
struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
} rehash;
struct mlxsw_sp *mlxsw_sp;
- bool failed_rollback; /* Indicates failed rollback during migration */
unsigned int ref_count;
};
@@ -1256,11 +1255,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
- if (IS_ERR(new_chunk)) {
- if (ctx->this_is_rollback)
- vchunk->vregion->failed_rollback = true;
+ if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
- }
vchunk->chunk2 = vchunk->chunk;
vchunk->chunk = new_chunk;
ctx->current_vchunk = vchunk;
@@ -1318,8 +1314,13 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
vchunk->chunk, credits);
if (err) {
- if (ctx->this_is_rollback)
+ if (ctx->this_is_rollback) {
+ /* Save the ventry which we ended with and try
+ * to continue later on.
+ */
+ ctx->start_ventry = ventry;
return err;
+ }
/* Swap the chunk and chunk2 pointers so the follow-up
* rollback call will see the original chunk pointer
* in vchunk->chunk.
@@ -1397,8 +1398,12 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
- if (err2)
- vregion->failed_rollback = true;
+ if (err2) {
+ trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
+ vregion);
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
+ /* Let the rollback to be continued later on. */
+ }
}
mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
@@ -1423,8 +1428,6 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
int err;
trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
- if (vregion->failed_rollback)
- return -EBUSY;
hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
if (IS_ERR(hints_priv))
@@ -1471,11 +1474,9 @@ mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- if (!vregion->failed_rollback) {
- vregion->region2 = NULL;
- mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
- mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
- }
+ vregion->region2 = NULL;
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
ops->region_rehash_hints_put(ctx->hints_priv);
ctx->hints_priv = NULL;
}
@@ -1506,11 +1507,6 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
ctx, credits);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
- if (vregion->failed_rollback) {
- trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp,
- vregion);
- dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
- }
}
if (*credits >= 0)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 9a79b5e11597..8512dd49e420 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -6,6 +6,7 @@
#include <linux/dcbnl.h>
#include <linux/if_ether.h>
#include <linux/list.h>
+#include <linux/netlink.h>
#include "spectrum.h"
#include "core.h"
@@ -15,6 +16,8 @@
struct mlxsw_sp_sb_pr {
enum mlxsw_reg_sbpr_mode mode;
u32 size;
+ u8 freeze_mode:1,
+ freeze_size:1;
};
struct mlxsw_cp_sb_occ {
@@ -27,6 +30,8 @@ struct mlxsw_sp_sb_cm {
u32 max_buff;
u16 pool_index;
struct mlxsw_cp_sb_occ occ;
+ u8 freeze_pool:1,
+ freeze_thresh:1;
};
#define MLXSW_SP_SB_INFI -1U
@@ -48,7 +53,12 @@ struct mlxsw_sp_sb_pool_des {
u8 pool;
};
-/* Order ingress pools before egress pools. */
+#define MLXSW_SP_SB_POOL_ING 0
+#define MLXSW_SP_SB_POOL_EGR 4
+#define MLXSW_SP_SB_POOL_EGR_MC 8
+#define MLXSW_SP_SB_POOL_ING_CPU 9
+#define MLXSW_SP_SB_POOL_EGR_CPU 10
+
static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_INGRESS, 0},
{MLXSW_REG_SBXX_DIR_INGRESS, 1},
@@ -59,6 +69,8 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
{MLXSW_REG_SBXX_DIR_EGRESS, 15},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 4},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 4},
};
static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
@@ -70,6 +82,9 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_EGRESS, 1},
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 15},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 4},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 4},
};
#define MLXSW_SP_SB_ING_TC_COUNT 8
@@ -93,6 +108,7 @@ struct mlxsw_sp_sb_vals {
unsigned int pool_count;
const struct mlxsw_sp_sb_pool_des *pool_dess;
const struct mlxsw_sp_sb_pm *pms;
+ const struct mlxsw_sp_sb_pm *pms_cpu;
const struct mlxsw_sp_sb_pr *prs;
const struct mlxsw_sp_sb_mm *mms;
const struct mlxsw_sp_sb_cm *cms_ingress;
@@ -274,7 +290,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
const u32 pbs[] = {
[0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
- [9] = 2 * MLXSW_PORT_MAX_MTU,
+ [9] = MLXSW_PORT_MAX_MTU,
};
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pbmc_pl[MLXSW_REG_PBMC_LEN];
@@ -389,45 +405,60 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
.size = _size, \
}
+#define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
+ { \
+ .mode = _mode, \
+ .size = _size, \
+ .freeze_mode = _freeze_mode, \
+ .freeze_size = _freeze_size, \
+ }
+
#define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
-#define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
#define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
+#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
+/* Order according to mlxsw_sp1_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
- /* Ingress pools. */
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
MLXSW_SP1_SB_PR_INGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE),
- /* Egress pools. */
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_EGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP1_SB_PR_EGRESS_SIZE, true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
+ true, true),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
};
#define MLXSW_SP2_SB_PR_INGRESS_SIZE 40960000
-#define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
#define MLXSW_SP2_SB_PR_EGRESS_SIZE 40960000
+#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
+/* Order according to mlxsw_sp2_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
- /* Ingress pools. */
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
MLXSW_SP2_SB_PR_INGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE),
- /* Egress pools. */
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_EGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_EGRESS_SIZE, true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
+ true, true),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
};
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
@@ -462,83 +493,106 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
.pool_index = _pool, \
}
+#define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_ING, \
+ }
+
+#define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_EGR, \
+ }
+
+#define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
+ .freeze_pool = true, \
+ .freeze_thresh = true, \
+ }
+
static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
- MLXSW_SP_SB_CM(10000, 8, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
- MLXSW_SP_SB_CM(20000, 1, 3),
+ MLXSW_SP_SB_CM_ING(10000, 8),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
};
static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
- MLXSW_SP_SB_CM(0, 7, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
- MLXSW_SP_SB_CM(20000, 1, 3),
+ MLXSW_SP_SB_CM_ING(0, 7),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
};
static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(1, 0xff, 4),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR(1, 0xff),
};
static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(1, 0xff, 4),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR(1, 0xff),
};
-#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
@@ -646,79 +700,116 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
.max_buff = _max_buff, \
}
+/* Order according to mlxsw_sp1_sb_pool_dess */
static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
- /* Ingress pools. */
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
- MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
- /* Egress pools. */
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, 7),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(10000, 90000),
+ MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
};
+/* Order according to mlxsw_sp2_sb_pool_dess */
static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
- /* Ingress pools. */
MLXSW_SP_SB_PM(0, 7),
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
- MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
- /* Egress pools. */
+ MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 7),
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(10000, 90000),
+ MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
};
-static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+/* Order according to mlxsw_sp*_sb_pool_dess */
+static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 90000),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+};
+
+static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const struct mlxsw_sp_sb_pm *pms,
+ bool skip_ingress)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int i;
- int err;
+ int i, err;
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
- const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i];
+ const struct mlxsw_sp_sb_pm *pm = &pms[i];
+ const struct mlxsw_sp_sb_pool_des *des;
u32 max_buff;
u32 min_buff;
+ des = &mlxsw_sp->sb_vals->pool_dess[i];
+ if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ continue;
+
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
max_buff = pm->max_buff;
if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
- err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
- i, min_buff, max_buff);
+ err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
+ max_buff);
if (err)
return err;
}
return 0;
}
-#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
+ mlxsw_sp->sb_vals->pms, false);
+}
+
+static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
+ true);
+}
+
+#define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
{ \
.min_buff = _min_buff, \
.max_buff = _max_buff, \
- .pool_index = _pool, \
+ .pool_index = MLXSW_SP_SB_POOL_EGR, \
}
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
};
static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
@@ -752,21 +843,22 @@ static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
{
int i;
- for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i)
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
- MLXSW_REG_SBXX_DIR_EGRESS)
- goto out;
- WARN(1, "No egress pools\n");
+ MLXSW_REG_SBXX_DIR_INGRESS)
+ (*p_ingress_len)++;
+ else
+ (*p_egress_len)++;
+ }
-out:
- *p_ingress_len = i;
- *p_egress_len = mlxsw_sp->sb_vals->pool_count - i;
+ WARN(*p_egress_len == 0, "No egress pools\n");
}
const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
.pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
.pool_dess = mlxsw_sp1_sb_pool_dess,
.pms = mlxsw_sp1_sb_pms,
+ .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
.prs = mlxsw_sp1_sb_prs,
.mms = mlxsw_sp_sb_mms,
.cms_ingress = mlxsw_sp1_sb_cms_ingress,
@@ -782,6 +874,7 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
.pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
.pool_dess = mlxsw_sp2_sb_pool_dess,
.pms = mlxsw_sp2_sb_pms,
+ .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
.prs = mlxsw_sp2_sb_prs,
.mms = mlxsw_sp_sb_mms,
.cms_ingress = mlxsw_sp2_sb_cms_ingress,
@@ -796,8 +889,8 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
u32 max_headroom_size;
- u16 ing_pool_count;
- u16 eg_pool_count;
+ u16 ing_pool_count = 0;
+ u16 eg_pool_count = 0;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
@@ -831,6 +924,9 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
if (err)
goto err_sb_cpu_port_sb_cms_init;
+ err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
+ if (err)
+ goto err_sb_cpu_port_pms_init;
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
if (err)
goto err_sb_mms_init;
@@ -848,6 +944,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
err_devlink_sb_register:
err_sb_mms_init:
+err_sb_cpu_port_pms_init:
err_sb_cpu_port_sb_cms_init:
err_sb_prs_init:
mlxsw_sp_sb_ports_fini(mlxsw_sp);
@@ -897,16 +994,32 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type)
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
+ const struct mlxsw_sp_sb_pr *pr;
enum mlxsw_reg_sbpr_mode mode;
- if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
+ mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
+ pr = &mlxsw_sp->sb_vals->prs[pool_index];
+
+ if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
return -EINVAL;
+ }
+
+ if (pr->freeze_mode && pr->mode != mode) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
+ return -EINVAL;
+ };
+
+ if (pr->freeze_size && pr->size != size) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
+ return -EINVAL;
+ };
- mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
pool_size, false);
}
@@ -924,7 +1037,8 @@ static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
}
static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
- u32 threshold, u32 *p_max_buff)
+ u32 threshold, u32 *p_max_buff,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
@@ -933,8 +1047,10 @@ static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
- val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
+ val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
return -EINVAL;
+ }
*p_max_buff = val;
} else {
*p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
@@ -960,7 +1076,7 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold)
+ u32 threshold, struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
@@ -970,7 +1086,7 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
int err;
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
- threshold, &max_buff);
+ threshold, &max_buff, extack);
if (err)
return err;
@@ -1001,22 +1117,41 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold)
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
+ const struct mlxsw_sp_sb_cm *cm;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
u32 max_buff;
int err;
- if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir)
+ if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
+ NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
return -EINVAL;
+ }
+
+ if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
+ else
+ cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
+
+ if (cm->freeze_pool && cm->pool_index != pool_index) {
+ NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
+ return -EINVAL;
+ }
+
+ if (cm->freeze_thresh && cm->max_buff != threshold) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
+ return -EINVAL;
+ }
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
- threshold, &max_buff);
+ threshold, &max_buff, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
index e689576231ab..246dbb3c0e1b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -4,24 +4,9 @@
#ifndef _MLXSW_PIPELINE_H_
#define _MLXSW_PIPELINE_H_
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
-
int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp);
-#else
-
-static inline int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
-{
- return 0;
-}
-
-static inline void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
-{
-}
-
-#endif
-
#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif"
#define MLXSW_SP_DPIPE_TABLE_NAME_HOST4 "mlxsw_host4"
#define MLXSW_SP_DPIPE_TABLE_NAME_HOST6 "mlxsw_host6"
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 52fed8c7bf1e..1cda8a248b12 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -13,9 +13,9 @@
#include <linux/socket.h>
#include <linux/route.h>
#include <linux/gcd.h>
-#include <linux/random.h>
#include <linux/if_macvlan.h>
#include <linux/refcount.h>
+#include <linux/jhash.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -2371,7 +2371,7 @@ static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
MLXSW_REG_RAUHT_OP_WRITE_DELETE;
}
-static void
+static int
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry,
enum mlxsw_reg_rauht_op op)
@@ -2385,10 +2385,10 @@ mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
if (neigh_entry->counter_valid)
mlxsw_reg_rauht_pack_counter(rauht_pl,
neigh_entry->counter_index);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
}
-static void
+static int
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry,
enum mlxsw_reg_rauht_op op)
@@ -2402,7 +2402,7 @@ mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
if (neigh_entry->counter_valid)
mlxsw_reg_rauht_pack_counter(rauht_pl,
neigh_entry->counter_index);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
}
bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
@@ -2424,20 +2424,33 @@ mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry,
bool adding)
{
+ enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
+ int err;
+
if (!adding && !neigh_entry->connected)
return;
neigh_entry->connected = adding;
if (neigh_entry->key.n->tbl->family == AF_INET) {
- mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
- mlxsw_sp_rauht_op(adding));
+ err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
+ op);
+ if (err)
+ return;
} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
return;
- mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
- mlxsw_sp_rauht_op(adding));
+ err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
+ op);
+ if (err)
+ return;
} else {
WARN_ON_ONCE(1);
+ return;
}
+
+ if (adding)
+ neigh_entry->key.n->flags |= NTF_OFFLOADED;
+ else
+ neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
}
void
@@ -2873,12 +2886,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
return false;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
struct in6_addr *gw;
int ifindex, weight;
- ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
- weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
- gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
+ ifindex = fib6_nh->fib_nh_dev->ifindex;
+ weight = fib6_nh->fib_nh_weight;
+ gw = &fib6_nh->fib_nh_gw6;
if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
weight))
return false;
@@ -2944,7 +2958,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
+ dev = mlxsw_sp_rt6->rt->fib6_nh.fib_nh_dev;
val ^= dev->ifindex;
}
@@ -3610,7 +3624,7 @@ static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
const struct fib_nh *fib_nh,
enum mlxsw_sp_ipip_type *p_ipipt)
{
- struct net_device *dev = fib_nh->nh_dev;
+ struct net_device *dev = fib_nh->fib_nh_dev;
return dev &&
fib_nh->nh_parent->fib_type == RTN_UNICAST &&
@@ -3637,7 +3651,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
struct fib_nh *fib_nh)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct net_device *dev = fib_nh->nh_dev;
+ struct net_device *dev = fib_nh->fib_nh_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif *rif;
int err;
@@ -3681,18 +3695,18 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
struct fib_nh *fib_nh)
{
- struct net_device *dev = fib_nh->nh_dev;
+ struct net_device *dev = fib_nh->fib_nh_dev;
struct in_device *in_dev;
int err;
nh->nh_grp = nh_grp;
nh->key.fib_nh = fib_nh;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- nh->nh_weight = fib_nh->nh_weight;
+ nh->nh_weight = fib_nh->fib_nh_weight;
#else
nh->nh_weight = 1;
#endif
- memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
+ memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
if (err)
return err;
@@ -3705,7 +3719,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
in_dev = __in_dev_get_rtnl(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- fib_nh->nh_flags & RTNH_F_LINKDOWN)
+ fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
return 0;
err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
@@ -3804,7 +3818,7 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib_info *fi)
{
- return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
+ return fi->fib_nh->fib_nh_scope == RT_SCOPE_LINK ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
}
@@ -3946,9 +3960,9 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
+ if (nh->rif && nh->rif->dev == rt->fib6_nh.fib_nh_dev &&
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
- &rt->fib6_nh.nh_gw))
+ &rt->fib6_nh.fib_nh_gw6))
return nh;
continue;
}
@@ -3966,7 +3980,7 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
- nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
+ nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return;
}
@@ -3974,9 +3988,9 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
if (nh->offloaded)
- nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
+ nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
else
- nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -3992,7 +4006,7 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
for (i = 0; i < nh_grp->count; i++) {
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
- nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4008,19 +4022,20 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
+ list)->rt->fib6_nh.fib_nh_flags |= RTNH_F_OFFLOAD;
return;
}
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+ struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
struct mlxsw_sp_nexthop *nh;
nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
if (nh && nh->offloaded)
- mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
+ fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
else
- mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4035,7 +4050,7 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
+ rt->fib6_nh.fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4913,7 +4928,7 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
{
/* RTF_CACHE routes are ignored */
- return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+ return !(rt->fib6_flags & RTF_ADDRCONF) && rt->fib6_nh.fib_nh_gw_family;
}
static struct fib6_info *
@@ -4972,8 +4987,8 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt,
enum mlxsw_sp_ipip_type *ret)
{
- return rt->fib6_nh.nh_dev &&
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
+ return rt->fib6_nh.fib_nh_dev &&
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.fib_nh_dev, ret);
}
static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
@@ -4983,7 +4998,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
struct mlxsw_sp_rif *rif;
int err;
@@ -5026,11 +5041,11 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
const struct fib6_info *rt)
{
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = rt->fib6_nh.nh_weight;
- memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
+ nh->nh_weight = rt->fib6_nh.fib_nh_weight;
+ memcpy(&nh->gw_addr, &rt->fib6_nh.fib_nh_gw6, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -5053,7 +5068,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
- return rt->fib6_flags & RTF_GATEWAY ||
+ return rt->fib6_nh.fib_nh_gw_family ||
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
@@ -6035,6 +6050,10 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
fr_info = container_of(info, struct fib_rule_notifier_info, info);
rule = fr_info->rule;
+ /* Rule only affects locally generated traffic */
+ if (rule->iifindex == info->net->loopback_dev->ifindex)
+ return 0;
+
switch (info->family) {
case AF_INET:
if (!fib4_rule_default(rule) && !rule->l3mdev)
@@ -6086,10 +6105,20 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return notifier_from_errno(err);
break;
case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
if (router->aborted) {
NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
return notifier_from_errno(-EINVAL);
}
+ if (info->family == AF_INET) {
+ struct fib_entry_notifier_info *fen_info = ptr;
+
+ if (fen_info->fi->fib_nh_is_v6) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
+ }
break;
}
@@ -6781,7 +6810,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
/* A RIF is not created for macvlan netdevs. Their MAC is used to
* populate the FDB
*/
- if (netif_is_macvlan(dev))
+ if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
return 0;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
@@ -7808,7 +7837,7 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
char recr2_pl[MLXSW_REG_RECR2_LEN];
u32 seed;
- get_random_bytes(&seed, sizeof(seed));
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
mlxsw_reg_recr2_pack(recr2_pl, seed);
mlxsw_sp_mp4_hash_init(recr2_pl);
mlxsw_sp_mp6_hash_init(recr2_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 536c23c578c3..560a60e522f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -316,7 +316,11 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
dev = rt->dst.dev;
*saddrp = fl4.saddr;
- *daddrp = rt->rt_gateway;
+ if (rt->rt_gw_family == AF_INET)
+ *daddrp = rt->rt_gw4;
+ /* can not offload if route has an IPv6 gateway */
+ else if (rt->rt_gw_family == AF_INET6)
+ dev = NULL;
out:
ip_rt_put(rt);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index f6ce386c3036..50111f228d77 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_index;
int err = 0;
- if (switchdev_trans_ph_prepare(trans))
+ if (switchdev_trans_ph_commit(trans))
return 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index bcf2e79a21c8..0d9356b3f65d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -30,6 +30,7 @@ struct mlxsw_sib {
struct mlxsw_sib_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
+ u8 hw_id[ETH_ALEN];
};
struct mlxsw_sib_port {
@@ -102,6 +103,18 @@ mlxsw_sib_tx_v1_hdr_construct(struct sk_buff *skb,
mlxsw_tx_v1_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+static int mlxsw_sib_hw_id_get(struct mlxsw_sib *mlxsw_sib)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sib->hw_id);
+ return 0;
+}
+
static int
mlxsw_sib_port_admin_status_set(struct mlxsw_sib_port *mlxsw_sib_port,
bool is_up)
@@ -267,7 +280,9 @@ static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port,
{
int err;
- err = mlxsw_core_port_init(mlxsw_sib->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_sib->core, local_port,
+ module + 1, false, 0,
+ mlxsw_sib->hw_id, sizeof(mlxsw_sib->hw_id));
if (err) {
dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
@@ -439,6 +454,12 @@ static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core,
mlxsw_sib->core = mlxsw_core;
mlxsw_sib->bus_info = mlxsw_bus_info;
+ err = mlxsw_sib_hw_id_get(mlxsw_sib);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Failed to get switch HW ID\n");
+ return err;
+ }
+
err = mlxsw_sib_ports_create(mlxsw_sib);
if (err) {
dev_err(mlxsw_sib->bus_info->dev, "Failed to create ports\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 533fe6235b7c..fc4f19167262 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -379,26 +379,14 @@ mlxsw_sx_port_get_stats64(struct net_device *dev,
stats->tx_dropped = tx_dropped;
}
-static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
- size_t len)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
-
- return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core,
- mlxsw_sx_port->local_port,
- name, len);
-}
-
-static int mlxsw_sx_port_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static struct devlink_port *
+mlxsw_sx_port_get_devlink_port(struct net_device *dev)
{
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- ppid->id_len = sizeof(mlxsw_sx->hw_id);
- memcpy(&ppid->id, &mlxsw_sx->hw_id, ppid->id_len);
-
- return 0;
+ return mlxsw_core_port_devlink_port_get(mlxsw_sx->core,
+ mlxsw_sx_port->local_port);
}
static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
@@ -407,8 +395,7 @@ static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
.ndo_start_xmit = mlxsw_sx_port_xmit,
.ndo_change_mtu = mlxsw_sx_port_change_mtu,
.ndo_get_stats64 = mlxsw_sx_port_get_stats64,
- .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
- .ndo_get_port_parent_id = mlxsw_sx_port_get_port_parent_id,
+ .ndo_get_devlink_port = mlxsw_sx_port_get_devlink_port,
};
static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
@@ -1102,7 +1089,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
- mlxsw_sx_port, dev, module + 1, false, 0);
+ mlxsw_sx_port, dev);
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
@@ -1127,7 +1114,9 @@ static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
{
int err;
- err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_sx->core, local_port,
+ module + 1, false, 0,
+ mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id));
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 7849119d407a..ba4fdf1b0dea 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -425,8 +425,8 @@ static void ks8851_init_mac(struct ks8851_net *ks)
const u8 *mac_addr;
mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
- if (mac_addr) {
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(dev->dev_addr, mac_addr);
ks8851_write_mac_addr(dev);
return;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index c946841c0a06..e5c8412c08c1 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1327,8 +1327,8 @@ static int ks8851_probe(struct platform_device *pdev)
/* overwriting the default MAC address */
if (pdev->dev.of_node) {
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
- memcpy(ks->mac_addr, mac, ETH_ALEN);
+ if (!IS_ERR(mac))
+ ether_addr_copy(ks->mac_addr, mac);
} else {
struct ks8851_mll_platform_data *pdata;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 8f72587b5a2c..0567e4f387a5 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Microchip ENC28J60 ethernet driver (MAC + PHY)
*
@@ -5,11 +6,6 @@
* Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
* based on enc28j60.c written by David Anders for 2.4 kernel version
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $
*/
@@ -18,9 +14,9 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
+#include <linux/property.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -28,7 +24,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/of_net.h>
#include "enc28j60_hw.h"
@@ -41,10 +36,11 @@
(NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK)
/* Buffer size required for the largest SPI transfer (i.e., reading a
- * frame). */
+ * frame).
+ */
#define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN)
-#define TX_TIMEOUT (4 * HZ)
+#define TX_TIMEOUT (4 * HZ)
/* Max TX retries in case of collision as suggested by errata datasheet */
#define MAX_TX_RETRYCOUNT 16
@@ -83,11 +79,12 @@ static struct {
/*
* SPI read buffer
- * wait for the SPI transfer and copy received data to destination
+ * Wait for the SPI transfer and copy received data to destination.
*/
static int
spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
{
+ struct device *dev = &priv->spi->dev;
u8 *rx_buf = priv->spi_transfer_buf + 4;
u8 *tx_buf = priv->spi_transfer_buf;
struct spi_transfer tx = {
@@ -113,8 +110,8 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
ret = msg.status;
}
if (ret && netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
return ret;
}
@@ -122,9 +119,9 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
/*
* SPI write buffer
*/
-static int spi_write_buf(struct enc28j60_net *priv, int len,
- const u8 *data)
+static int spi_write_buf(struct enc28j60_net *priv, int len, const u8 *data)
{
+ struct device *dev = &priv->spi->dev;
int ret;
if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0)
@@ -134,8 +131,8 @@ static int spi_write_buf(struct enc28j60_net *priv, int len,
memcpy(&priv->spi_transfer_buf[1], data, len);
ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
if (ret && netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
}
return ret;
}
@@ -143,9 +140,9 @@ static int spi_write_buf(struct enc28j60_net *priv, int len,
/*
* basic SPI read operation
*/
-static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
- u8 addr)
+static u8 spi_read_op(struct enc28j60_net *priv, u8 op, u8 addr)
{
+ struct device *dev = &priv->spi->dev;
u8 tx_buf[2];
u8 rx_buf[4];
u8 val = 0;
@@ -159,8 +156,8 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
tx_buf[0] = op | (addr & ADDR_MASK);
ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
if (ret)
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
else
val = rx_buf[slen - 1];
@@ -170,28 +167,25 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
/*
* basic SPI write operation
*/
-static int spi_write_op(struct enc28j60_net *priv, u8 op,
- u8 addr, u8 val)
+static int spi_write_op(struct enc28j60_net *priv, u8 op, u8 addr, u8 val)
{
+ struct device *dev = &priv->spi->dev;
int ret;
priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK);
priv->spi_transfer_buf[1] = val;
ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
if (ret && netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
return ret;
}
static void enc28j60_soft_reset(struct enc28j60_net *priv)
{
- if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
/* Errata workaround #1, CLKRDY check is unreliable,
- * delay at least 1 mS instead */
+ * delay at least 1 ms instead */
udelay(2000);
}
@@ -203,7 +197,7 @@ static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
u8 b = (addr & BANK_MASK) >> 5;
/* These registers (EIE, EIR, ESTAT, ECON2, ECON1)
- * are present in all banks, no need to switch bank
+ * are present in all banks, no need to switch bank.
*/
if (addr >= EIE && addr <= ECON1)
return;
@@ -242,15 +236,13 @@ static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
/*
* Register bit field Set
*/
-static void nolock_reg_bfset(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void nolock_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
{
enc28j60_set_bank(priv, addr);
spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask);
}
-static void locked_reg_bfset(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void locked_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
{
mutex_lock(&priv->lock);
nolock_reg_bfset(priv, addr, mask);
@@ -260,15 +252,13 @@ static void locked_reg_bfset(struct enc28j60_net *priv,
/*
* Register bit field Clear
*/
-static void nolock_reg_bfclr(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void nolock_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
{
enc28j60_set_bank(priv, addr);
spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask);
}
-static void locked_reg_bfclr(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void locked_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
{
mutex_lock(&priv->lock);
nolock_reg_bfclr(priv, addr, mask);
@@ -278,15 +268,13 @@ static void locked_reg_bfclr(struct enc28j60_net *priv,
/*
* Register byte read
*/
-static int nolock_regb_read(struct enc28j60_net *priv,
- u8 address)
+static int nolock_regb_read(struct enc28j60_net *priv, u8 address)
{
enc28j60_set_bank(priv, address);
return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
}
-static int locked_regb_read(struct enc28j60_net *priv,
- u8 address)
+static int locked_regb_read(struct enc28j60_net *priv, u8 address)
{
int ret;
@@ -300,8 +288,7 @@ static int locked_regb_read(struct enc28j60_net *priv,
/*
* Register word read
*/
-static int nolock_regw_read(struct enc28j60_net *priv,
- u8 address)
+static int nolock_regw_read(struct enc28j60_net *priv, u8 address)
{
int rl, rh;
@@ -312,8 +299,7 @@ static int nolock_regw_read(struct enc28j60_net *priv,
return (rh << 8) | rl;
}
-static int locked_regw_read(struct enc28j60_net *priv,
- u8 address)
+static int locked_regw_read(struct enc28j60_net *priv, u8 address)
{
int ret;
@@ -327,15 +313,13 @@ static int locked_regw_read(struct enc28j60_net *priv,
/*
* Register byte write
*/
-static void nolock_regb_write(struct enc28j60_net *priv,
- u8 address, u8 data)
+static void nolock_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
{
enc28j60_set_bank(priv, address);
spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data);
}
-static void locked_regb_write(struct enc28j60_net *priv,
- u8 address, u8 data)
+static void locked_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
{
mutex_lock(&priv->lock);
nolock_regb_write(priv, address, data);
@@ -345,8 +329,7 @@ static void locked_regb_write(struct enc28j60_net *priv,
/*
* Register word write
*/
-static void nolock_regw_write(struct enc28j60_net *priv,
- u8 address, u16 data)
+static void nolock_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
{
enc28j60_set_bank(priv, address);
spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data);
@@ -354,8 +337,7 @@ static void nolock_regw_write(struct enc28j60_net *priv,
(u8) (data >> 8));
}
-static void locked_regw_write(struct enc28j60_net *priv,
- u8 address, u16 data)
+static void locked_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
{
mutex_lock(&priv->lock);
nolock_regw_write(priv, address, data);
@@ -364,20 +346,23 @@ static void locked_regw_write(struct enc28j60_net *priv,
/*
* Buffer memory read
- * Select the starting address and execute a SPI buffer read
+ * Select the starting address and execute a SPI buffer read.
*/
-static void enc28j60_mem_read(struct enc28j60_net *priv,
- u16 addr, int len, u8 *data)
+static void enc28j60_mem_read(struct enc28j60_net *priv, u16 addr, int len,
+ u8 *data)
{
mutex_lock(&priv->lock);
nolock_regw_write(priv, ERDPTL, addr);
#ifdef CONFIG_ENC28J60_WRITEVERIFY
if (netif_msg_drv(priv)) {
+ struct device *dev = &priv->spi->dev;
u16 reg;
+
reg = nolock_regw_read(priv, ERDPTL);
if (reg != addr)
- printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT "
- "(0x%04x - 0x%04x)\n", __func__, reg, addr);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() error writing ERDPT (0x%04x - 0x%04x)\n",
+ __func__, reg, addr);
}
#endif
spi_read_buf(priv, len, data);
@@ -390,6 +375,8 @@ static void enc28j60_mem_read(struct enc28j60_net *priv,
static void
enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
{
+ struct device *dev = &priv->spi->dev;
+
mutex_lock(&priv->lock);
/* Set the write pointer to start of transmit buffer area */
nolock_regw_write(priv, EWRPTL, TXSTART_INIT);
@@ -398,9 +385,9 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
u16 reg;
reg = nolock_regw_read(priv, EWRPTL);
if (reg != TXSTART_INIT)
- printk(KERN_DEBUG DRV_NAME
- ": %s() ERWPT:0x%04x != 0x%04x\n",
- __func__, reg, TXSTART_INIT);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() ERWPT:0x%04x != 0x%04x\n",
+ __func__, reg, TXSTART_INIT);
}
#endif
/* Set the TXND pointer to correspond to the packet size given */
@@ -408,30 +395,28 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
/* write per-packet control byte */
spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME
- ": %s() after control byte ERWPT:0x%04x\n",
- __func__, nolock_regw_read(priv, EWRPTL));
+ dev_printk(KERN_DEBUG, dev,
+ "%s() after control byte ERWPT:0x%04x\n",
+ __func__, nolock_regw_read(priv, EWRPTL));
/* copy the packet into the transmit buffer */
spi_write_buf(priv, len, data);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME
- ": %s() after write packet ERWPT:0x%04x, len=%d\n",
- __func__, nolock_regw_read(priv, EWRPTL), len);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() after write packet ERWPT:0x%04x, len=%d\n",
+ __func__, nolock_regw_read(priv, EWRPTL), len);
mutex_unlock(&priv->lock);
}
-static unsigned long msec20_to_jiffies;
-
static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val)
{
- unsigned long timeout = jiffies + msec20_to_jiffies;
+ struct device *dev = &priv->spi->dev;
+ unsigned long timeout = jiffies + msecs_to_jiffies(20);
/* 20 msec timeout read */
while ((nolock_regb_read(priv, reg) & mask) != val) {
if (time_after(jiffies, timeout)) {
if (netif_msg_drv(priv))
- dev_dbg(&priv->spi->dev,
- "reg %02x ready timeout!\n", reg);
+ dev_dbg(dev, "reg %02x ready timeout!\n", reg);
return -ETIMEDOUT;
}
cpu_relax();
@@ -449,7 +434,7 @@ static int wait_phy_ready(struct enc28j60_net *priv)
/*
* PHY register read
- * PHY registers are not accessed directly, but through the MII
+ * PHY registers are not accessed directly, but through the MII.
*/
static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address)
{
@@ -465,7 +450,7 @@ static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address)
/* quit reading */
nolock_regb_write(priv, MICMD, 0x00);
/* return the data */
- ret = nolock_regw_read(priv, MIRDL);
+ ret = nolock_regw_read(priv, MIRDL);
mutex_unlock(&priv->lock);
return ret;
@@ -494,13 +479,13 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
{
int ret;
struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
mutex_lock(&priv->lock);
if (!priv->hw_enable) {
if (netif_msg_drv(priv))
- printk(KERN_INFO DRV_NAME
- ": %s: Setting MAC address to %pM\n",
- ndev->name, ndev->dev_addr);
+ dev_info(dev, "%s: Setting MAC address to %pM\n",
+ ndev->name, ndev->dev_addr);
/* NOTE: MAC address in ENC28J60 is byte-backward */
nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]);
nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]);
@@ -511,9 +496,9 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
ret = 0;
} else {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME
- ": %s() Hardware must be disabled to set "
- "Mac address\n", __func__);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() Hardware must be disabled to set Mac address\n",
+ __func__);
ret = -EBUSY;
}
mutex_unlock(&priv->lock);
@@ -532,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ ether_addr_copy(dev->dev_addr, address->sa_data);
return enc28j60_set_hw_macaddr(dev);
}
@@ -541,33 +526,36 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
*/
static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg)
{
+ struct device *dev = &priv->spi->dev;
+
mutex_lock(&priv->lock);
- printk(KERN_DEBUG DRV_NAME " %s\n"
- "HwRevID: 0x%02x\n"
- "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n"
- " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n"
- "MAC : MACON1 MACON3 MACON4\n"
- " 0x%02x 0x%02x 0x%02x\n"
- "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n"
- " 0x%04x 0x%04x 0x%04x 0x%04x "
- "0x%02x 0x%02x 0x%04x\n"
- "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n"
- " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n",
- msg, nolock_regb_read(priv, EREVID),
- nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2),
- nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR),
- nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1),
- nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4),
- nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL),
- nolock_regw_read(priv, ERXWRPTL),
- nolock_regw_read(priv, ERXRDPTL),
- nolock_regb_read(priv, ERXFCON),
- nolock_regb_read(priv, EPKTCNT),
- nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL),
- nolock_regw_read(priv, ETXNDL),
- nolock_regb_read(priv, MACLCON1),
- nolock_regb_read(priv, MACLCON2),
- nolock_regb_read(priv, MAPHSUP));
+ dev_printk(KERN_DEBUG, dev,
+ " %s\n"
+ "HwRevID: 0x%02x\n"
+ "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n"
+ " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n"
+ "MAC : MACON1 MACON3 MACON4\n"
+ " 0x%02x 0x%02x 0x%02x\n"
+ "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n"
+ " 0x%04x 0x%04x 0x%04x 0x%04x "
+ "0x%02x 0x%02x 0x%04x\n"
+ "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n"
+ " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n",
+ msg, nolock_regb_read(priv, EREVID),
+ nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2),
+ nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR),
+ nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1),
+ nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4),
+ nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL),
+ nolock_regw_read(priv, ERXWRPTL),
+ nolock_regw_read(priv, ERXRDPTL),
+ nolock_regb_read(priv, ERXFCON),
+ nolock_regb_read(priv, EPKTCNT),
+ nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL),
+ nolock_regw_read(priv, ETXNDL),
+ nolock_regb_read(priv, MACLCON1),
+ nolock_regb_read(priv, MACLCON2),
+ nolock_regb_read(priv, MAPHSUP));
mutex_unlock(&priv->lock);
}
@@ -599,12 +587,13 @@ static u16 rx_packet_start(u16 ptr)
static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{
+ struct device *dev = &priv->spi->dev;
u16 erxrdpt;
if (start > 0x1FFF || end > 0x1FFF || start > end) {
if (netif_msg_drv(priv))
- printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO "
- "bad parameters!\n", __func__, start, end);
+ dev_err(dev, "%s(%d, %d) RXFIFO bad parameters!\n",
+ __func__, start, end);
return;
}
/* set receive buffer start + end */
@@ -617,10 +606,12 @@ static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{
+ struct device *dev = &priv->spi->dev;
+
if (start > 0x1FFF || end > 0x1FFF || start > end) {
if (netif_msg_drv(priv))
- printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO "
- "bad parameters!\n", __func__, start, end);
+ dev_err(dev, "%s(%d, %d) TXFIFO bad parameters!\n",
+ __func__, start, end);
return;
}
/* set transmit buffer start + end */
@@ -630,14 +621,15 @@ static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
/*
* Low power mode shrinks power consumption about 100x, so we'd like
- * the chip to be in that mode whenever it's inactive. (However, we
- * can't stay in lowpower mode during suspend with WOL active.)
+ * the chip to be in that mode whenever it's inactive. (However, we
+ * can't stay in low power mode during suspend with WOL active.)
*/
static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low)
{
+ struct device *dev = &priv->spi->dev;
+
if (netif_msg_drv(priv))
- dev_dbg(&priv->spi->dev, "%s power...\n",
- is_low ? "low" : "high");
+ dev_dbg(dev, "%s power...\n", is_low ? "low" : "high");
mutex_lock(&priv->lock);
if (is_low) {
@@ -656,11 +648,12 @@ static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low)
static int enc28j60_hw_init(struct enc28j60_net *priv)
{
+ struct device *dev = &priv->spi->dev;
u8 reg;
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__,
- priv->full_duplex ? "FullDuplex" : "HalfDuplex");
+ dev_printk(KERN_DEBUG, dev, "%s() - %s\n", __func__,
+ priv->full_duplex ? "FullDuplex" : "HalfDuplex");
mutex_lock(&priv->lock);
/* first reset the chip */
@@ -682,15 +675,15 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
/*
* Check the RevID.
* If it's 0x00 or 0xFF probably the enc28j60 is not mounted or
- * damaged
+ * damaged.
*/
reg = locked_regb_read(priv, EREVID);
if (netif_msg_drv(priv))
- printk(KERN_INFO DRV_NAME ": chip RevID: 0x%02x\n", reg);
+ dev_info(dev, "chip RevID: 0x%02x\n", reg);
if (reg == 0x00 || reg == 0xff) {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n",
- __func__, reg);
+ dev_printk(KERN_DEBUG, dev, "%s() Invalid RevId %d\n",
+ __func__, reg);
return 0;
}
@@ -723,7 +716,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
/*
* MACLCON1 (default)
* MACLCON2 (default)
- * Set the maximum packet size which the controller will accept
+ * Set the maximum packet size which the controller will accept.
*/
locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN);
@@ -750,10 +743,12 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
static void enc28j60_hw_enable(struct enc28j60_net *priv)
{
+ struct device *dev = &priv->spi->dev;
+
/* enable interrupts */
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n",
- __func__);
+ dev_printk(KERN_DEBUG, dev, "%s() enabling interrupts.\n",
+ __func__);
enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
@@ -772,7 +767,7 @@ static void enc28j60_hw_enable(struct enc28j60_net *priv)
static void enc28j60_hw_disable(struct enc28j60_net *priv)
{
mutex_lock(&priv->lock);
- /* disable interrutps and packet reception */
+ /* disable interrupts and packet reception */
nolock_regb_write(priv, EIE, 0x00);
nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
priv->hw_enable = false;
@@ -793,14 +788,12 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex)
priv->full_duplex = (duplex == DUPLEX_FULL);
else {
if (netif_msg_link(priv))
- dev_warn(&ndev->dev,
- "unsupported link setting\n");
+ netdev_warn(ndev, "unsupported link setting\n");
ret = -EOPNOTSUPP;
}
} else {
if (netif_msg_link(priv))
- dev_warn(&ndev->dev, "Warning: hw must be disabled "
- "to set link mode\n");
+ netdev_warn(ndev, "Warning: hw must be disabled to set link mode\n");
ret = -EBUSY;
}
return ret;
@@ -811,21 +804,23 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex)
*/
static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
{
+ struct device *dev = &priv->spi->dev;
int endptr;
endptr = locked_regw_read(priv, ETXNDL);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
- endptr + 1);
+ dev_printk(KERN_DEBUG, dev, "reading TSV at addr:0x%04x\n",
+ endptr + 1);
enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
}
static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
- u8 tsv[TSV_SIZE])
+ u8 tsv[TSV_SIZE])
{
+ struct device *dev = &priv->spi->dev;
u16 tmp1, tmp2;
- printk(KERN_DEBUG DRV_NAME ": %s - TSV:\n", msg);
+ dev_printk(KERN_DEBUG, dev, "%s - TSV:\n", msg);
tmp1 = tsv[1];
tmp1 <<= 8;
tmp1 |= tsv[0];
@@ -834,30 +829,32 @@ static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
tmp2 <<= 8;
tmp2 |= tsv[4];
- printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, CollisionCount: %d,"
- " TotByteOnWire: %d\n", tmp1, tsv[2] & 0x0f, tmp2);
- printk(KERN_DEBUG DRV_NAME ": TxDone: %d, CRCErr:%d, LenChkErr: %d,"
- " LenOutOfRange: %d\n", TSV_GETBIT(tsv, TSV_TXDONE),
- TSV_GETBIT(tsv, TSV_TXCRCERROR),
- TSV_GETBIT(tsv, TSV_TXLENCHKERROR),
- TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE));
- printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, "
- "PacketDefer: %d, ExDefer: %d\n",
- TSV_GETBIT(tsv, TSV_TXMULTICAST),
- TSV_GETBIT(tsv, TSV_TXBROADCAST),
- TSV_GETBIT(tsv, TSV_TXPACKETDEFER),
- TSV_GETBIT(tsv, TSV_TXEXDEFER));
- printk(KERN_DEBUG DRV_NAME ": ExCollision: %d, LateCollision: %d, "
- "Giant: %d, Underrun: %d\n",
- TSV_GETBIT(tsv, TSV_TXEXCOLLISION),
- TSV_GETBIT(tsv, TSV_TXLATECOLLISION),
- TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN));
- printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d, "
- "BackPressApp: %d, VLanTagFrame: %d\n",
- TSV_GETBIT(tsv, TSV_TXCONTROLFRAME),
- TSV_GETBIT(tsv, TSV_TXPAUSEFRAME),
- TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP),
- TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME));
+ dev_printk(KERN_DEBUG, dev,
+ "ByteCount: %d, CollisionCount: %d, TotByteOnWire: %d\n",
+ tmp1, tsv[2] & 0x0f, tmp2);
+ dev_printk(KERN_DEBUG, dev,
+ "TxDone: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ TSV_GETBIT(tsv, TSV_TXDONE),
+ TSV_GETBIT(tsv, TSV_TXCRCERROR),
+ TSV_GETBIT(tsv, TSV_TXLENCHKERROR),
+ TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE));
+ dev_printk(KERN_DEBUG, dev,
+ "Multicast: %d, Broadcast: %d, PacketDefer: %d, ExDefer: %d\n",
+ TSV_GETBIT(tsv, TSV_TXMULTICAST),
+ TSV_GETBIT(tsv, TSV_TXBROADCAST),
+ TSV_GETBIT(tsv, TSV_TXPACKETDEFER),
+ TSV_GETBIT(tsv, TSV_TXEXDEFER));
+ dev_printk(KERN_DEBUG, dev,
+ "ExCollision: %d, LateCollision: %d, Giant: %d, Underrun: %d\n",
+ TSV_GETBIT(tsv, TSV_TXEXCOLLISION),
+ TSV_GETBIT(tsv, TSV_TXLATECOLLISION),
+ TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN));
+ dev_printk(KERN_DEBUG, dev,
+ "ControlFrame: %d, PauseFrame: %d, BackPressApp: %d, VLanTagFrame: %d\n",
+ TSV_GETBIT(tsv, TSV_TXCONTROLFRAME),
+ TSV_GETBIT(tsv, TSV_TXPAUSEFRAME),
+ TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP),
+ TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME));
}
/*
@@ -866,27 +863,29 @@ static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg,
u16 pk_ptr, int len, u16 sts)
{
- printk(KERN_DEBUG DRV_NAME ": %s - NextPk: 0x%04x - RSV:\n",
- msg, pk_ptr);
- printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, DribbleNibble: %d\n", len,
- RSV_GETBIT(sts, RSV_DRIBBLENIBBLE));
- printk(KERN_DEBUG DRV_NAME ": RxOK: %d, CRCErr:%d, LenChkErr: %d,"
- " LenOutOfRange: %d\n", RSV_GETBIT(sts, RSV_RXOK),
- RSV_GETBIT(sts, RSV_CRCERROR),
- RSV_GETBIT(sts, RSV_LENCHECKERR),
- RSV_GETBIT(sts, RSV_LENOUTOFRANGE));
- printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, "
- "LongDropEvent: %d, CarrierEvent: %d\n",
- RSV_GETBIT(sts, RSV_RXMULTICAST),
- RSV_GETBIT(sts, RSV_RXBROADCAST),
- RSV_GETBIT(sts, RSV_RXLONGEVDROPEV),
- RSV_GETBIT(sts, RSV_CARRIEREV));
- printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d,"
- " UnknownOp: %d, VLanTagFrame: %d\n",
- RSV_GETBIT(sts, RSV_RXCONTROLFRAME),
- RSV_GETBIT(sts, RSV_RXPAUSEFRAME),
- RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE),
- RSV_GETBIT(sts, RSV_RXTYPEVLAN));
+ struct device *dev = &priv->spi->dev;
+
+ dev_printk(KERN_DEBUG, dev, "%s - NextPk: 0x%04x - RSV:\n", msg, pk_ptr);
+ dev_printk(KERN_DEBUG, dev, "ByteCount: %d, DribbleNibble: %d\n",
+ len, RSV_GETBIT(sts, RSV_DRIBBLENIBBLE));
+ dev_printk(KERN_DEBUG, dev,
+ "RxOK: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ RSV_GETBIT(sts, RSV_RXOK),
+ RSV_GETBIT(sts, RSV_CRCERROR),
+ RSV_GETBIT(sts, RSV_LENCHECKERR),
+ RSV_GETBIT(sts, RSV_LENOUTOFRANGE));
+ dev_printk(KERN_DEBUG, dev,
+ "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
+ RSV_GETBIT(sts, RSV_RXMULTICAST),
+ RSV_GETBIT(sts, RSV_RXBROADCAST),
+ RSV_GETBIT(sts, RSV_RXLONGEVDROPEV),
+ RSV_GETBIT(sts, RSV_CARRIEREV));
+ dev_printk(KERN_DEBUG, dev,
+ "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
+ RSV_GETBIT(sts, RSV_RXCONTROLFRAME),
+ RSV_GETBIT(sts, RSV_RXPAUSEFRAME),
+ RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE),
+ RSV_GETBIT(sts, RSV_RXTYPEVLAN));
}
static void dump_packet(const char *msg, int len, const char *data)
@@ -904,20 +903,20 @@ static void dump_packet(const char *msg, int len, const char *data)
static void enc28j60_hw_rx(struct net_device *ndev)
{
struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
struct sk_buff *skb = NULL;
u16 erxrdpt, next_packet, rxstat;
u8 rsv[RSV_SIZE];
int len;
if (netif_msg_rx_status(priv))
- printk(KERN_DEBUG DRV_NAME ": RX pk_addr:0x%04x\n",
- priv->next_pk_ptr);
+ netdev_printk(KERN_DEBUG, ndev, "RX pk_addr:0x%04x\n",
+ priv->next_pk_ptr);
if (unlikely(priv->next_pk_ptr > RXEND_INIT)) {
if (netif_msg_rx_err(priv))
- dev_err(&ndev->dev,
- "%s() Invalid packet address!! 0x%04x\n",
- __func__, priv->next_pk_ptr);
+ netdev_err(ndev, "%s() Invalid packet address!! 0x%04x\n",
+ __func__, priv->next_pk_ptr);
/* packet address corrupted: reset RX logic */
mutex_lock(&priv->lock);
nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
@@ -950,7 +949,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) {
if (netif_msg_rx_err(priv))
- dev_err(&ndev->dev, "Rx Error (%04x)\n", rxstat);
+ netdev_err(ndev, "Rx Error (%04x)\n", rxstat);
ndev->stats.rx_errors++;
if (RSV_GETBIT(rxstat, RSV_CRCERROR))
ndev->stats.rx_crc_errors++;
@@ -962,8 +961,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN);
if (!skb) {
if (netif_msg_rx_err(priv))
- dev_err(&ndev->dev,
- "out of memory for Rx'd frame\n");
+ netdev_err(ndev, "out of memory for Rx'd frame\n");
ndev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
@@ -983,12 +981,12 @@ static void enc28j60_hw_rx(struct net_device *ndev)
/*
* Move the RX read pointer to the start of the next
* received packet.
- * This frees the memory we just read out
+ * This frees the memory we just read out.
*/
erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n",
- __func__, erxrdpt);
+ dev_printk(KERN_DEBUG, dev, "%s() ERXRDPT:0x%04x\n",
+ __func__, erxrdpt);
mutex_lock(&priv->lock);
nolock_regw_write(priv, ERXRDPTL, erxrdpt);
@@ -997,9 +995,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
u16 reg;
reg = nolock_regw_read(priv, ERXRDPTL);
if (reg != erxrdpt)
- printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify "
- "error (0x%04x - 0x%04x)\n", __func__,
- reg, erxrdpt);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() ERXRDPT verify error (0x%04x - 0x%04x)\n",
+ __func__, reg, erxrdpt);
}
#endif
priv->next_pk_ptr = next_packet;
@@ -1013,6 +1011,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
*/
static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
{
+ struct net_device *ndev = priv->netdev;
int epkcnt, erxst, erxnd, erxwr, erxrd;
int free_space;
@@ -1035,8 +1034,8 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
}
mutex_unlock(&priv->lock);
if (netif_msg_rx_status(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n",
- __func__, free_space);
+ netdev_printk(KERN_DEBUG, ndev, "%s() free_space = %d\n",
+ __func__, free_space);
return free_space;
}
@@ -1046,24 +1045,25 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
static void enc28j60_check_link_status(struct net_device *ndev)
{
struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
u16 reg;
int duplex;
reg = enc28j60_phy_read(priv, PHSTAT2);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, "
- "PHSTAT2: %04x\n", __func__,
- enc28j60_phy_read(priv, PHSTAT1), reg);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() PHSTAT1: %04x, PHSTAT2: %04x\n", __func__,
+ enc28j60_phy_read(priv, PHSTAT1), reg);
duplex = reg & PHSTAT2_DPXSTAT;
if (reg & PHSTAT2_LSTAT) {
netif_carrier_on(ndev);
if (netif_msg_ifup(priv))
- dev_info(&ndev->dev, "link up - %s\n",
- duplex ? "Full duplex" : "Half duplex");
+ netdev_info(ndev, "link up - %s\n",
+ duplex ? "Full duplex" : "Half duplex");
} else {
if (netif_msg_ifdown(priv))
- dev_info(&ndev->dev, "link down\n");
+ netdev_info(ndev, "link down\n");
netif_carrier_off(ndev);
}
}
@@ -1089,8 +1089,8 @@ static void enc28j60_tx_clear(struct net_device *ndev, bool err)
/*
* RX handler
- * ignore PKTIF because is unreliable! (look at the errata datasheet)
- * check EPKTCNT is the suggested workaround.
+ * Ignore PKTIF because is unreliable! (Look at the errata datasheet)
+ * Check EPKTCNT is the suggested workaround.
* We don't need to clear interrupt flag, automatically done when
* enc28j60_hw_rx() decrements the packet counter.
* Returns how many packet processed.
@@ -1102,13 +1102,14 @@ static int enc28j60_rx_interrupt(struct net_device *ndev)
pk_counter = locked_regb_read(priv, EPKTCNT);
if (pk_counter && netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME ": intRX, pk_cnt: %d\n", pk_counter);
+ netdev_printk(KERN_DEBUG, ndev, "intRX, pk_cnt: %d\n",
+ pk_counter);
if (pk_counter > priv->max_pk_counter) {
/* update statistics */
priv->max_pk_counter = pk_counter;
if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1)
- printk(KERN_DEBUG DRV_NAME ": RX max_pk_cnt: %d\n",
- priv->max_pk_counter);
+ netdev_printk(KERN_DEBUG, ndev, "RX max_pk_cnt: %d\n",
+ priv->max_pk_counter);
}
ret = pk_counter;
while (pk_counter-- > 0)
@@ -1124,8 +1125,6 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
struct net_device *ndev = priv->netdev;
int intflags, loop;
- if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
/* disable further interrupts */
locked_reg_bfclr(priv, EIE, EIE_INTIE);
@@ -1136,16 +1135,16 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
if ((intflags & EIR_DMAIF) != 0) {
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intDMA(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intDMA(%d)\n",
+ loop);
locked_reg_bfclr(priv, EIR, EIR_DMAIF);
}
/* LINK changed handler */
if ((intflags & EIR_LINKIF) != 0) {
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intLINK(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intLINK(%d)\n",
+ loop);
enc28j60_check_link_status(ndev);
/* read PHIR to clear the flag */
enc28j60_phy_read(priv, PHIR);
@@ -1156,13 +1155,12 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
bool err = false;
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intTX(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intTX(%d)\n",
+ loop);
priv->tx_retry_count = 0;
if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) {
if (netif_msg_tx_err(priv))
- dev_err(&ndev->dev,
- "Tx Error (aborted)\n");
+ netdev_err(ndev, "Tx Error (aborted)\n");
err = true;
}
if (netif_msg_tx_done(priv)) {
@@ -1179,8 +1177,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intTXErr(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intTXErr(%d)\n",
+ loop);
locked_reg_bfclr(priv, ECON1, ECON1_TXRTS);
enc28j60_read_tsv(priv, tsv);
if (netif_msg_tx_err(priv))
@@ -1194,9 +1192,9 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
/* Transmit Late collision check for retransmit */
if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) {
if (netif_msg_tx_err(priv))
- printk(KERN_DEBUG DRV_NAME
- ": LateCollision TXErr (%d)\n",
- priv->tx_retry_count);
+ netdev_printk(KERN_DEBUG, ndev,
+ "LateCollision TXErr (%d)\n",
+ priv->tx_retry_count);
if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT)
locked_reg_bfset(priv, ECON1,
ECON1_TXRTS);
@@ -1210,13 +1208,12 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
if ((intflags & EIR_RXERIF) != 0) {
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intRXErr(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intRXErr(%d)\n",
+ loop);
/* Check free FIFO space to flag RX overrun */
if (enc28j60_get_free_rxfifo(priv) <= 0) {
if (netif_msg_rx_err(priv))
- printk(KERN_DEBUG DRV_NAME
- ": RX Overrun\n");
+ netdev_printk(KERN_DEBUG, ndev, "RX Overrun\n");
ndev->stats.rx_dropped++;
}
locked_reg_bfclr(priv, EIR, EIR_RXERIF);
@@ -1228,8 +1225,6 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
/* re-enable interrupts */
locked_reg_bfset(priv, EIE, EIE_INTIE);
- if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__);
}
/*
@@ -1239,11 +1234,13 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
+ struct net_device *ndev = priv->netdev;
+
BUG_ON(!priv->tx_skb);
if (netif_msg_tx_queued(priv))
- printk(KERN_DEBUG DRV_NAME
- ": Tx Packet Len:%d\n", priv->tx_skb->len);
+ netdev_printk(KERN_DEBUG, ndev, "Tx Packet Len:%d\n",
+ priv->tx_skb->len);
if (netif_msg_pktdata(priv))
dump_packet(__func__,
@@ -1253,6 +1250,7 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv)
#ifdef CONFIG_ENC28J60_WRITEVERIFY
/* readback and verify written data */
if (netif_msg_drv(priv)) {
+ struct device *dev = &priv->spi->dev;
int test_len, k;
u8 test_buf[64]; /* limit the test to the first 64 bytes */
int okflag;
@@ -1266,16 +1264,14 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv)
okflag = 1;
for (k = 0; k < test_len; k++) {
if (priv->tx_skb->data[k] != test_buf[k]) {
- printk(KERN_DEBUG DRV_NAME
- ": Error, %d location differ: "
- "0x%02x-0x%02x\n", k,
- priv->tx_skb->data[k], test_buf[k]);
+ dev_printk(KERN_DEBUG, dev,
+ "Error, %d location differ: 0x%02x-0x%02x\n",
+ k, priv->tx_skb->data[k], test_buf[k]);
okflag = 0;
}
}
if (!okflag)
- printk(KERN_DEBUG DRV_NAME ": Tx write buffer, "
- "verify ERROR!\n");
+ dev_printk(KERN_DEBUG, dev, "Tx write buffer, verify ERROR!\n");
}
#endif
/* set TX request flag */
@@ -1287,14 +1283,11 @@ static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
{
struct enc28j60_net *priv = netdev_priv(dev);
- if (netif_msg_tx_queued(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
/* If some error occurs while trying to transmit this
* packet, you should return '1' from this function.
* In such a case you _may not_ do anything to the
* SKB, it is still owned by the network queueing
- * layer when an error is returned. This means you
+ * layer when an error is returned. This means you
* may not modify any SKB fields, you may not free
* the SKB, etc.
*/
@@ -1337,7 +1330,7 @@ static void enc28j60_tx_timeout(struct net_device *ndev)
struct enc28j60_net *priv = netdev_priv(ndev);
if (netif_msg_timer(priv))
- dev_err(&ndev->dev, DRV_NAME " tx timeout\n");
+ netdev_err(ndev, "tx timeout\n");
ndev->stats.tx_errors++;
/* can't restart safely under softirq */
@@ -1356,13 +1349,9 @@ static int enc28j60_net_open(struct net_device *dev)
{
struct enc28j60_net *priv = netdev_priv(dev);
- if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
if (!is_valid_ether_addr(dev->dev_addr)) {
if (netif_msg_ifup(priv))
- dev_err(&dev->dev, "invalid MAC address %pM\n",
- dev->dev_addr);
+ netdev_err(dev, "invalid MAC address %pM\n", dev->dev_addr);
return -EADDRNOTAVAIL;
}
/* Reset the hardware here (and take it out of low power mode) */
@@ -1370,7 +1359,7 @@ static int enc28j60_net_open(struct net_device *dev)
enc28j60_hw_disable(priv);
if (!enc28j60_hw_init(priv)) {
if (netif_msg_ifup(priv))
- dev_err(&dev->dev, "hw_reset() failed\n");
+ netdev_err(dev, "hw_reset() failed\n");
return -EINVAL;
}
/* Update the MAC address (in case user has changed it) */
@@ -1392,9 +1381,6 @@ static int enc28j60_net_close(struct net_device *dev)
{
struct enc28j60_net *priv = netdev_priv(dev);
- if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
enc28j60_hw_disable(priv);
enc28j60_lowpower(priv, true);
netif_stop_queue(dev);
@@ -1415,16 +1401,16 @@ static void enc28j60_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
if (netif_msg_link(priv))
- dev_info(&dev->dev, "promiscuous mode\n");
+ netdev_info(dev, "promiscuous mode\n");
priv->rxfilter = RXFILTER_PROMISC;
} else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
if (netif_msg_link(priv))
- dev_info(&dev->dev, "%smulticast mode\n",
- (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+ netdev_info(dev, "%smulticast mode\n",
+ (dev->flags & IFF_ALLMULTI) ? "all-" : "");
priv->rxfilter = RXFILTER_MULTI;
} else {
if (netif_msg_link(priv))
- dev_info(&dev->dev, "normal mode\n");
+ netdev_info(dev, "normal mode\n");
priv->rxfilter = RXFILTER_NORMAL;
}
@@ -1436,20 +1422,21 @@ static void enc28j60_setrx_work_handler(struct work_struct *work)
{
struct enc28j60_net *priv =
container_of(work, struct enc28j60_net, setrx_work);
+ struct device *dev = &priv->spi->dev;
if (priv->rxfilter == RXFILTER_PROMISC) {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": promiscuous mode\n");
+ dev_printk(KERN_DEBUG, dev, "promiscuous mode\n");
locked_regb_write(priv, ERXFCON, 0x00);
} else if (priv->rxfilter == RXFILTER_MULTI) {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": multicast mode\n");
+ dev_printk(KERN_DEBUG, dev, "multicast mode\n");
locked_regb_write(priv, ERXFCON,
ERXFCON_UCEN | ERXFCON_CRCEN |
ERXFCON_BCEN | ERXFCON_MCEN);
} else {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": normal mode\n");
+ dev_printk(KERN_DEBUG, dev, "normal mode\n");
locked_regb_write(priv, ERXFCON,
ERXFCON_UCEN | ERXFCON_CRCEN |
ERXFCON_BCEN);
@@ -1468,7 +1455,7 @@ static void enc28j60_restart_work_handler(struct work_struct *work)
enc28j60_net_close(ndev);
ret = enc28j60_net_open(ndev);
if (unlikely(ret)) {
- dev_info(&ndev->dev, " could not restart %d\n", ret);
+ netdev_info(ndev, "could not restart %d\n", ret);
dev_close(ndev);
}
}
@@ -1552,14 +1539,13 @@ static const struct net_device_ops enc28j60_netdev_ops = {
static int enc28j60_probe(struct spi_device *spi)
{
+ unsigned char macaddr[ETH_ALEN];
struct net_device *dev;
struct enc28j60_net *priv;
- const void *macaddr;
int ret = 0;
if (netif_msg_drv(&debug))
- dev_info(&spi->dev, DRV_NAME " Ethernet driver %s loaded\n",
- DRV_VERSION);
+ dev_info(&spi->dev, "Ethernet driver %s loaded\n", DRV_VERSION);
dev = alloc_etherdev(sizeof(struct enc28j60_net));
if (!dev) {
@@ -1570,8 +1556,7 @@ static int enc28j60_probe(struct spi_device *spi)
priv->netdev = dev; /* priv to netdev reference */
priv->spi = spi; /* priv to spi reference */
- priv->msg_enable = netif_msg_init(debug.msg_enable,
- ENC28J60_MSG_DEFAULT);
+ priv->msg_enable = netif_msg_init(debug.msg_enable, ENC28J60_MSG_DEFAULT);
mutex_init(&priv->lock);
INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler);
INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler);
@@ -1582,13 +1567,12 @@ static int enc28j60_probe(struct spi_device *spi)
if (!enc28j60_chipset_init(dev)) {
if (netif_msg_probe(priv))
- dev_info(&spi->dev, DRV_NAME " chip not found\n");
+ dev_info(&spi->dev, "chip not found\n");
ret = -EIO;
goto error_irq;
}
- macaddr = of_get_mac_address(spi->dev.of_node);
- if (macaddr)
+ if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr)))
ether_addr_copy(dev->dev_addr, macaddr);
else
eth_hw_addr_random(dev);
@@ -1600,8 +1584,8 @@ static int enc28j60_probe(struct spi_device *spi)
ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv);
if (ret < 0) {
if (netif_msg_probe(priv))
- dev_err(&spi->dev, DRV_NAME ": request irq %d failed "
- "(ret = %d)\n", spi->irq, ret);
+ dev_err(&spi->dev, "request irq %d failed (ret = %d)\n",
+ spi->irq, ret);
goto error_irq;
}
@@ -1616,11 +1600,10 @@ static int enc28j60_probe(struct spi_device *spi)
ret = register_netdev(dev);
if (ret) {
if (netif_msg_probe(priv))
- dev_err(&spi->dev, "register netdev " DRV_NAME
- " failed (ret = %d)\n", ret);
+ dev_err(&spi->dev, "register netdev failed (ret = %d)\n",
+ ret);
goto error_register;
}
- dev_info(&dev->dev, DRV_NAME " driver registered\n");
return 0;
@@ -1636,9 +1619,6 @@ static int enc28j60_remove(struct spi_device *spi)
{
struct enc28j60_net *priv = spi_get_drvdata(spi);
- if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": remove\n");
-
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
free_netdev(priv->netdev);
@@ -1660,22 +1640,7 @@ static struct spi_driver enc28j60_driver = {
.probe = enc28j60_probe,
.remove = enc28j60_remove,
};
-
-static int __init enc28j60_init(void)
-{
- msec20_to_jiffies = msecs_to_jiffies(20);
-
- return spi_register_driver(&enc28j60_driver);
-}
-
-module_init(enc28j60_init);
-
-static void __exit enc28j60_exit(void)
-{
- spi_unregister_driver(&enc28j60_driver);
-}
-
-module_exit(enc28j60_exit);
+module_spi_driver(enc28j60_driver);
MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a1d0d6e42533..d715ef4fc92f 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
struct netdev_hw_addr *hw_addr)
{
struct ocelot *ocelot = port->ocelot;
- struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+ struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
if (!ha)
return -ENOMEM;
@@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
ETH_GSTRING_LEN);
}
-static void ocelot_check_stats(struct work_struct *work)
+static void ocelot_update_stats(struct ocelot *ocelot)
{
- struct delayed_work *del_work = to_delayed_work(work);
- struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
int i, j;
mutex_lock(&ocelot->stats_lock);
@@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work)
}
}
- cancel_delayed_work(&ocelot->stats_work);
+ mutex_unlock(&ocelot->stats_lock);
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct ocelot *ocelot = container_of(del_work, struct ocelot,
+ stats_work);
+
+ ocelot_update_stats(ocelot);
+
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
-
- mutex_unlock(&ocelot->stats_lock);
}
static void ocelot_get_ethtool_stats(struct net_device *dev,
@@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
int i;
/* check and update now */
- ocelot_check_stats(&ocelot->stats_work.work);
+ ocelot_update_stats(ocelot);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
@@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
- INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
+ INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
return 0;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index e0340f778d8f..d8b7fba96d58 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1439,7 +1439,6 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
tx->queue_active = 0;
put_be32(htonl(1), tx->send_stop);
mb();
- mmiowb();
}
__netif_tx_unlock(dev_queue);
}
@@ -2861,7 +2860,6 @@ again:
tx->queue_active = 1;
put_be32(htonl(1), tx->send_go);
mb();
- mmiowb();
}
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index feda9644289d..3b2ae1a21678 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4153,8 +4153,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
writeq(val64, &tx_fifo->List_Control);
- mmiowb();
-
put_off++;
if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
put_off = 0;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 7cde387e5ec6..51cd57ab3d95 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
dma_object->addr))) {
vxge_os_dma_free(devh->pdev, memblock,
&dma_object->acc_handle);
+ memblock = NULL;
goto exit;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index b877acec5cde..1d334f2e0a56 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1826,7 +1826,6 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget)
vxge_hw_channel_msix_unmask(
(struct __vxge_hw_channel *)ring->handle,
ring->rx_vector_no);
- mmiowb();
}
/* We are copying and returning the local variable, in case if after
@@ -2234,8 +2233,6 @@ static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
fifo->tx_vector_no);
- mmiowb();
-
return IRQ_HANDLED;
}
@@ -2272,14 +2269,12 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
*/
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
- mmiowb();
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
msix_id);
- mmiowb();
continue;
}
vxge_debug_intr(VXGE_ERR,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 59e77e3086bb..709d20d9938f 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1399,11 +1399,7 @@ static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
&fifo->nofl_db->control_0);
- mmiowb();
-
writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
-
- mmiowb();
}
/**
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 549898d5d450..f0d0e09f60e2 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -19,6 +19,7 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
+ select NET_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 47c708f08ade..87bf784f8e8f 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -15,6 +15,7 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ ccm.o \
nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
@@ -42,7 +43,8 @@ nfp-objs += \
flower/match.o \
flower/metadata.o \
flower/offload.o \
- flower/tunnel_conf.o
+ flower/tunnel_conf.o \
+ flower/qos_conf.o
endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
index 9852080cf454..ff3913085665 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -39,7 +39,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
}
if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
knode->sel->offoff || knode->fshift) {
- NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+ NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported");
return false;
}
if (knode->sel->hoff || knode->sel->hmask) {
@@ -78,7 +78,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
k = &knode->sel->keys[0];
if (k->offmask) {
- NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+ NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported");
return false;
}
if (k->off) {
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 9584f03f3efa..69e84ff7f2e5 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -261,10 +261,15 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET;
struct nfp_net *nn = alink->vnic;
unsigned int i;
int err;
+ err = nfp_net_mbox_lock(nn, alink->abm->prio_map_len);
+ if (err)
+ return err;
+
/* Write data_len and wipe reserved */
nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
alink->abm->prio_map_len);
@@ -273,8 +278,7 @@ int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
packed[i / sizeof(u32)]);
- err = nfp_net_reconfig_mbox(nn,
- NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
+ err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
if (err)
nfp_err(alink->abm->app->cpp,
"setting DSCP -> VQ map failed with error %d\n", err);
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 4d4ff5844c47..9183b3e85d21 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -53,7 +53,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
}
}
-static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id)
+static struct net_device *
+nfp_abm_repr_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{
enum nfp_repr_type rtype;
struct nfp_reprs *reprs;
@@ -549,5 +550,5 @@ const struct nfp_app_type app_abm = {
.eswitch_mode_get = nfp_abm_eswitch_mode_get,
.eswitch_mode_set = nfp_abm_eswitch_mode_set,
- .repr_get = nfp_abm_repr_get,
+ .dev_get = nfp_abm_repr_get,
};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 9b6cfa697879..bc9850e4ec5e 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -6,48 +6,13 @@
#include <linux/bug.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
-#include <linux/wait.h>
+#include "../ccm.h"
#include "../nfp_app.h"
#include "../nfp_net.h"
#include "fw.h"
#include "main.h"
-#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
-
-static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
-{
- u16 used_tags;
-
- used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
-
- return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
-}
-
-static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
-{
- /* All FW communication for BPF is request-reply. To make sure we
- * don't reuse the message ID too early after timeout - limit the
- * number of requests in flight.
- */
- if (nfp_bpf_all_tags_busy(bpf)) {
- cmsg_warn(bpf, "all FW request contexts busy!\n");
- return -EAGAIN;
- }
-
- WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
- return bpf->tag_alloc_next++;
-}
-
-static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
-{
- WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
-
- while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
- bpf->tag_alloc_last != bpf->tag_alloc_next)
- bpf->tag_alloc_last++;
-}
-
static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
{
@@ -87,149 +52,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
return size;
}
-static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
-{
- struct cmsg_hdr *hdr;
-
- hdr = (struct cmsg_hdr *)skb->data;
-
- return hdr->type;
-}
-
-static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
-{
- struct cmsg_hdr *hdr;
-
- hdr = (struct cmsg_hdr *)skb->data;
-
- return be16_to_cpu(hdr->tag);
-}
-
-static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
-{
- unsigned int msg_tag;
- struct sk_buff *skb;
-
- skb_queue_walk(&bpf->cmsg_replies, skb) {
- msg_tag = nfp_bpf_cmsg_get_tag(skb);
- if (msg_tag == tag) {
- nfp_bpf_free_tag(bpf, tag);
- __skb_unlink(skb, &bpf->cmsg_replies);
- return skb;
- }
- }
-
- return NULL;
-}
-
-static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
-{
- struct sk_buff *skb;
-
- nfp_ctrl_lock(bpf->app->ctrl);
- skb = __nfp_bpf_reply(bpf, tag);
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- return skb;
-}
-
-static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
-{
- struct sk_buff *skb;
-
- nfp_ctrl_lock(bpf->app->ctrl);
- skb = __nfp_bpf_reply(bpf, tag);
- if (!skb)
- nfp_bpf_free_tag(bpf, tag);
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- return skb;
-}
-
-static struct sk_buff *
-nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
- int tag)
-{
- struct sk_buff *skb;
- int i, err;
-
- for (i = 0; i < 50; i++) {
- udelay(4);
- skb = nfp_bpf_reply(bpf, tag);
- if (skb)
- return skb;
- }
-
- err = wait_event_interruptible_timeout(bpf->cmsg_wq,
- skb = nfp_bpf_reply(bpf, tag),
- msecs_to_jiffies(5000));
- /* We didn't get a response - try last time and atomically drop
- * the tag even if no response is matched.
- */
- if (!skb)
- skb = nfp_bpf_reply_drop_tag(bpf, tag);
- if (err < 0) {
- cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
- err == ERESTARTSYS ? "interrupted" : "error",
- type, err);
- return ERR_PTR(err);
- }
- if (!skb) {
- cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
- type);
- return ERR_PTR(-ETIMEDOUT);
- }
-
- return skb;
-}
-
-static struct sk_buff *
-nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
- enum nfp_bpf_cmsg_type type, unsigned int reply_size)
-{
- struct cmsg_hdr *hdr;
- int tag;
-
- nfp_ctrl_lock(bpf->app->ctrl);
- tag = nfp_bpf_alloc_tag(bpf);
- if (tag < 0) {
- nfp_ctrl_unlock(bpf->app->ctrl);
- dev_kfree_skb_any(skb);
- return ERR_PTR(tag);
- }
-
- hdr = (void *)skb->data;
- hdr->ver = CMSG_MAP_ABI_VERSION;
- hdr->type = type;
- hdr->tag = cpu_to_be16(tag);
-
- __nfp_app_ctrl_tx(bpf->app, skb);
-
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
- if (IS_ERR(skb))
- return skb;
-
- hdr = (struct cmsg_hdr *)skb->data;
- if (hdr->type != __CMSG_REPLY(type)) {
- cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
- hdr->type, __CMSG_REPLY(type));
- goto err_free;
- }
- /* 0 reply_size means caller will do the validation */
- if (reply_size && skb->len != reply_size) {
- cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
- type, skb->len, reply_size);
- goto err_free;
- }
-
- return skb;
-err_free:
- dev_kfree_skb_any(skb);
- return ERR_PTR(-EIO);
-}
-
static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
struct cmsg_reply_map_simple *reply)
@@ -275,8 +97,8 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
req->map_type = cpu_to_be32(map->map_type);
req->map_flags = 0;
- skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
- sizeof(*reply));
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
+ sizeof(*reply));
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -310,8 +132,8 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid);
- skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
- sizeof(*reply));
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
+ sizeof(*reply));
if (IS_ERR(skb)) {
cmsg_warn(bpf, "leaking map - I/O error\n");
return;
@@ -354,8 +176,7 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
}
static int
-nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
- enum nfp_bpf_cmsg_type op,
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
{
struct nfp_bpf_map *nfp_map = offmap->dev_priv;
@@ -386,8 +207,8 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
map->value_size);
- skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
- nfp_bpf_cmsg_map_reply_size(bpf, 1));
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, op,
+ nfp_bpf_cmsg_map_reply_size(bpf, 1));
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -415,34 +236,34 @@ err_free:
int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
key, value, flags, NULL, NULL);
}
int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
key, NULL, 0, NULL, NULL);
}
int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
key, NULL, 0, NULL, value);
}
int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
void *next_key)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
NULL, NULL, 0, next_key, NULL);
}
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
key, NULL, 0, next_key, NULL);
}
@@ -456,54 +277,35 @@ unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_app_bpf *bpf = app->priv;
- unsigned int tag;
if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
- goto err_free;
+ dev_kfree_skb_any(skb);
+ return;
}
- if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
+ if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
dev_consume_skb_any(skb);
else
dev_kfree_skb_any(skb);
- return;
}
- nfp_ctrl_lock(bpf->app->ctrl);
-
- tag = nfp_bpf_cmsg_get_tag(skb);
- if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
- cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
- tag);
- goto err_unlock;
- }
-
- __skb_queue_tail(&bpf->cmsg_replies, skb);
- wake_up_interruptible_all(&bpf->cmsg_wq);
-
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- return;
-err_unlock:
- nfp_ctrl_unlock(bpf->app->ctrl);
-err_free:
- dev_kfree_skb_any(skb);
+ nfp_ccm_rx(&bpf->ccm, skb);
}
void
nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
{
+ const struct nfp_ccm_hdr *hdr = data;
struct nfp_app_bpf *bpf = app->priv;
- const struct cmsg_hdr *hdr = data;
if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
return;
}
- if (hdr->type == CMSG_TYPE_BPF_EVENT)
+ if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
nfp_bpf_event_output(bpf, data, len);
else
cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 721921bcf120..06c4286bd79e 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/types.h>
+#include "../ccm.h"
/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking
* our FW ABI. In that case we will do translation in the driver.
@@ -52,22 +53,6 @@ struct nfp_bpf_cap_tlv_maps {
/*
* Types defined for map related control messages
*/
-#define CMSG_MAP_ABI_VERSION 1
-
-enum nfp_bpf_cmsg_type {
- CMSG_TYPE_MAP_ALLOC = 1,
- CMSG_TYPE_MAP_FREE = 2,
- CMSG_TYPE_MAP_LOOKUP = 3,
- CMSG_TYPE_MAP_UPDATE = 4,
- CMSG_TYPE_MAP_DELETE = 5,
- CMSG_TYPE_MAP_GETNEXT = 6,
- CMSG_TYPE_MAP_GETFIRST = 7,
- CMSG_TYPE_BPF_EVENT = 8,
- __CMSG_TYPE_MAP_MAX,
-};
-
-#define CMSG_TYPE_MAP_REPLY_BIT 7
-#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
/* BPF ABIv2 fixed-length control message fields */
#define CMSG_MAP_KEY_LW 16
@@ -84,19 +69,13 @@ enum nfp_bpf_cmsg_status {
CMSG_RC_ERR_MAP_E2BIG = 7,
};
-struct cmsg_hdr {
- u8 type;
- u8 ver;
- __be16 tag;
-};
-
struct cmsg_reply_map_simple {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 rc;
};
struct cmsg_req_map_alloc_tbl {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 key_size; /* in bytes */
__be32 value_size; /* in bytes */
__be32 max_entries;
@@ -110,7 +89,7 @@ struct cmsg_reply_map_alloc_tbl {
};
struct cmsg_req_map_free_tbl {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 tid;
};
@@ -120,7 +99,7 @@ struct cmsg_reply_map_free_tbl {
};
struct cmsg_req_map_op {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 tid;
__be32 count;
__be32 flags;
@@ -135,7 +114,7 @@ struct cmsg_reply_map_op {
};
struct cmsg_bpf_event {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 cpu_id;
__be64 map_ptr;
__be32 data_size;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index f272247d1708..d4bf0e694541 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -328,7 +328,18 @@ __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
return;
}
- if (sc == SHF_SC_L_SHF)
+ /* NFP shift instruction has something special. If shift direction is
+ * left then shift amount of 1 to 31 is specified as 32 minus the amount
+ * to shift.
+ *
+ * But no need to do this for indirect shift which has shift amount be
+ * 0. Even after we do this subtraction, shift amount 0 will be turned
+ * into 32 which will eventually be encoded the same as 0 because only
+ * low 5 bits are encoded, but shift amount be 32 will fail the
+ * FIELD_PREP check done later on shift mask (0x1f), due to 32 is out of
+ * mask range.
+ */
+ if (sc == SHF_SC_L_SHF && shift)
shift = 32 - shift;
insn = OP_SHF_BASE |
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 275de9f4c61c..9c136da25221 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -442,14 +442,16 @@ static int nfp_bpf_init(struct nfp_app *app)
bpf->app = app;
app->priv = bpf;
- skb_queue_head_init(&bpf->cmsg_replies);
- init_waitqueue_head(&bpf->cmsg_wq);
INIT_LIST_HEAD(&bpf->map_list);
- err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
+ err = nfp_ccm_init(&bpf->ccm, app);
if (err)
goto err_free_bpf;
+ err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
+ if (err)
+ goto err_clean_ccm;
+
nfp_bpf_init_capabilities(bpf);
err = nfp_bpf_parse_capabilities(app);
@@ -474,6 +476,8 @@ static int nfp_bpf_init(struct nfp_app *app)
err_free_neutral_maps:
rhashtable_destroy(&bpf->maps_neutral);
+err_clean_ccm:
+ nfp_ccm_clean(&bpf->ccm);
err_free_bpf:
kfree(bpf);
return err;
@@ -484,7 +488,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
struct nfp_app_bpf *bpf = app->priv;
bpf_offload_dev_destroy(bpf->bpf_dev);
- WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+ nfp_ccm_clean(&bpf->ccm);
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
rhashtable_free_and_destroy(&bpf->maps_neutral,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index b25a48218bcf..e54d1ac84df2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/wait.h>
+#include "../ccm.h"
#include "../nfp_asm.h"
#include "fw.h"
@@ -84,16 +85,10 @@ enum pkt_vec {
/**
* struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app
+ * @ccm: common control message handler data
*
* @bpf_dev: BPF offload device handle
*
- * @tag_allocator: bitmap of control message tags in use
- * @tag_alloc_next: next tag bit to allocate
- * @tag_alloc_last: next tag bit to be freed
- *
- * @cmsg_replies: received cmsg replies waiting to be consumed
- * @cmsg_wq: work queue for waiting for cmsg replies
- *
* @cmsg_key_sz: size of key in cmsg element array
* @cmsg_val_sz: size of value in cmsg element array
*
@@ -132,16 +127,10 @@ enum pkt_vec {
*/
struct nfp_app_bpf {
struct nfp_app *app;
+ struct nfp_ccm ccm;
struct bpf_offload_dev *bpf_dev;
- DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
- u16 tag_alloc_next;
- u16 tag_alloc_last;
-
- struct sk_buff_head cmsg_replies;
- struct wait_queue_head cmsg_wq;
-
unsigned int cmsg_key_sz;
unsigned int cmsg_val_sz;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 15dce97650a5..39c9fec222b4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -22,6 +22,7 @@
#include <net/tc_act/tc_mirred.h>
#include "main.h"
+#include "../ccm.h"
#include "../nfp_app.h"
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
@@ -452,7 +453,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
- if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
+ if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
rcu_read_lock();
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.c b/drivers/net/ethernet/netronome/nfp/ccm.c
new file mode 100644
index 000000000000..94476e41e261
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
+
+#include <linux/bitops.h>
+
+#include "ccm.h"
+#include "nfp_app.h"
+#include "nfp_net.h"
+
+#define NFP_CCM_TYPE_REPLY_BIT 7
+#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
+
+#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
+
+#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
+
+static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
+{
+ u16 used_tags;
+
+ used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
+
+ return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
+}
+
+static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
+{
+ /* CCM is for FW communication which is request-reply. To make sure
+ * we don't reuse the message ID too early after timeout - limit the
+ * number of requests in flight.
+ */
+ if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
+ ccm_warn(ccm->app, "all FW request contexts busy!\n");
+ return -EAGAIN;
+ }
+
+ WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
+ return ccm->tag_alloc_next++;
+}
+
+static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
+{
+ WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
+
+ while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
+ ccm->tag_alloc_last != ccm->tag_alloc_next)
+ ccm->tag_alloc_last++;
+}
+
+static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
+{
+ unsigned int msg_tag;
+ struct sk_buff *skb;
+
+ skb_queue_walk(&ccm->replies, skb) {
+ msg_tag = nfp_ccm_get_tag(skb);
+ if (msg_tag == tag) {
+ nfp_ccm_free_tag(ccm, tag);
+ __skb_unlink(skb, &ccm->replies);
+ return skb;
+ }
+ }
+
+ return NULL;
+}
+
+static struct sk_buff *
+nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(app->ctrl);
+ skb = __nfp_ccm_reply(ccm, tag);
+ nfp_ctrl_unlock(app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(app->ctrl);
+ skb = __nfp_ccm_reply(ccm, tag);
+ if (!skb)
+ nfp_ccm_free_tag(ccm, tag);
+ nfp_ctrl_unlock(app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
+ enum nfp_ccm_type type, int tag)
+{
+ struct sk_buff *skb;
+ int i, err;
+
+ for (i = 0; i < 50; i++) {
+ udelay(4);
+ skb = nfp_ccm_reply(ccm, app, tag);
+ if (skb)
+ return skb;
+ }
+
+ err = wait_event_interruptible_timeout(ccm->wq,
+ skb = nfp_ccm_reply(ccm, app,
+ tag),
+ msecs_to_jiffies(5000));
+ /* We didn't get a response - try last time and atomically drop
+ * the tag even if no response is matched.
+ */
+ if (!skb)
+ skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
+ if (err < 0) {
+ ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
+ err == ERESTARTSYS ? "interrupted" : "error",
+ type, err);
+ return ERR_PTR(err);
+ }
+ if (!skb) {
+ ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
+ return ERR_PTR(-ETIMEDOUT);
+ }
+
+ return skb;
+}
+
+struct sk_buff *
+nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int reply_size)
+{
+ struct nfp_app *app = ccm->app;
+ struct nfp_ccm_hdr *hdr;
+ int reply_type, tag;
+
+ nfp_ctrl_lock(app->ctrl);
+ tag = nfp_ccm_alloc_tag(ccm);
+ if (tag < 0) {
+ nfp_ctrl_unlock(app->ctrl);
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(tag);
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = NFP_CCM_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(tag);
+
+ __nfp_app_ctrl_tx(app, skb);
+
+ nfp_ctrl_unlock(app->ctrl);
+
+ skb = nfp_ccm_wait_reply(ccm, app, type, tag);
+ if (IS_ERR(skb))
+ return skb;
+
+ reply_type = nfp_ccm_get_type(skb);
+ if (reply_type != __NFP_CCM_REPLY(type)) {
+ ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+ reply_type, __NFP_CCM_REPLY(type));
+ goto err_free;
+ }
+ /* 0 reply_size means caller will do the validation */
+ if (reply_size && skb->len != reply_size) {
+ ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
+ type, skb->len, reply_size);
+ goto err_free;
+ }
+
+ return skb;
+err_free:
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(-EIO);
+}
+
+void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
+{
+ struct nfp_app *app = ccm->app;
+ unsigned int tag;
+
+ if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
+ ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
+ goto err_free;
+ }
+
+ nfp_ctrl_lock(app->ctrl);
+
+ tag = nfp_ccm_get_tag(skb);
+ if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
+ ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
+ tag);
+ goto err_unlock;
+ }
+
+ __skb_queue_tail(&ccm->replies, skb);
+ wake_up_interruptible_all(&ccm->wq);
+
+ nfp_ctrl_unlock(app->ctrl);
+ return;
+
+err_unlock:
+ nfp_ctrl_unlock(app->ctrl);
+err_free:
+ dev_kfree_skb_any(skb);
+}
+
+int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
+{
+ ccm->app = app;
+ skb_queue_head_init(&ccm->replies);
+ init_waitqueue_head(&ccm->wq);
+ return 0;
+}
+
+void nfp_ccm_clean(struct nfp_ccm *ccm)
+{
+ WARN_ON(!skb_queue_empty(&ccm->replies));
+}
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
new file mode 100644
index 000000000000..ac963b128203
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CCM_H
+#define NFP_CCM_H 1
+
+#include <linux/bitmap.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+struct nfp_app;
+
+/* Firmware ABI */
+
+enum nfp_ccm_type {
+ NFP_CCM_TYPE_BPF_MAP_ALLOC = 1,
+ NFP_CCM_TYPE_BPF_MAP_FREE = 2,
+ NFP_CCM_TYPE_BPF_MAP_LOOKUP = 3,
+ NFP_CCM_TYPE_BPF_MAP_UPDATE = 4,
+ NFP_CCM_TYPE_BPF_MAP_DELETE = 5,
+ NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6,
+ NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7,
+ NFP_CCM_TYPE_BPF_BPF_EVENT = 8,
+ __NFP_CCM_TYPE_MAX,
+};
+
+#define NFP_CCM_ABI_VERSION 1
+
+struct nfp_ccm_hdr {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+};
+
+static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ hdr = (struct nfp_ccm_hdr *)skb->data;
+
+ return hdr->type;
+}
+
+static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ hdr = (struct nfp_ccm_hdr *)skb->data;
+
+ return be16_to_cpu(hdr->tag);
+}
+
+/* Implementation */
+
+/**
+ * struct nfp_ccm - common control message handling
+ * @app: APP handle
+ *
+ * @tag_allocator: bitmap of control message tags in use
+ * @tag_alloc_next: next tag bit to allocate
+ * @tag_alloc_last: next tag bit to be freed
+ *
+ * @replies: received cmsg replies waiting to be consumed
+ * @wq: work queue for waiting for cmsg replies
+ */
+struct nfp_ccm {
+ struct nfp_app *app;
+
+ DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+ u16 tag_alloc_next;
+ u16 tag_alloc_last;
+
+ struct sk_buff_head replies;
+ struct wait_queue_head wq;
+};
+
+int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app);
+void nfp_ccm_clean(struct nfp_ccm *ccm);
+void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
+struct sk_buff *
+nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int reply_size);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index eeda4ed98333..c56e31d9f8a4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
tmp_push_vlan_tci =
FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
- NFP_FL_PUSH_VLAN_CFI;
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
@@ -161,9 +160,9 @@ nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
struct nfp_flower_priv *priv = app->priv;
switch (tun->key.tp_dst) {
- case htons(NFP_FL_VXLAN_PORT):
+ case htons(IANA_VXLAN_UDP_PORT):
return NFP_FL_TUNNEL_VXLAN;
- case htons(NFP_FL_GENEVE_PORT):
+ case htons(GENEVE_UDP_PORT):
if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
return NFP_FL_TUNNEL_GENEVE;
/* FALLTHROUGH */
@@ -583,60 +582,23 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
}
}
-static int
-nfp_fl_pedit(const struct flow_action_entry *act,
- struct tc_cls_flower_offload *flow,
- char *nfp_action, int *a_len, u32 *csum_updated)
-{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+struct nfp_flower_pedit_acts {
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
- enum flow_action_mangle_base htype;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
+};
+
+static int
+nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
+ int *a_len, struct nfp_flower_pedit_acts *set_act,
+ u32 *csum_updated)
+{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
size_t act_size = 0;
u8 ip_proto = 0;
- u32 offset;
- int err;
-
- memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
- memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
- memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
- memset(&set_ip6_src, 0, sizeof(set_ip6_src));
- memset(&set_ip_addr, 0, sizeof(set_ip_addr));
- memset(&set_tport, 0, sizeof(set_tport));
- memset(&set_eth, 0, sizeof(set_eth));
-
- htype = act->mangle.htype;
- offset = act->mangle.offset;
-
- switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- err = nfp_fl_set_eth(act, offset, &set_eth);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(act, offset, &set_ip_addr,
- &set_ip_ttl_tos);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- err = nfp_fl_set_ip6(act, offset, &set_ip6_dst,
- &set_ip6_src, &set_ip6_tc_hl_fl);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- err = nfp_fl_set_tport(act, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_TCP);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- err = nfp_fl_set_tport(act, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_UDP);
- break;
- default:
- return -EOPNOTSUPP;
- }
- if (err)
- return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -645,77 +607,82 @@ nfp_fl_pedit(const struct flow_action_entry *act,
ip_proto = match.key->ip_proto;
}
- if (set_eth.head.len_lw) {
- act_size = sizeof(set_eth);
- memcpy(nfp_action, &set_eth, act_size);
+ if (set_act->set_eth.head.len_lw) {
+ act_size = sizeof(set_act->set_eth);
+ memcpy(nfp_action, &set_act->set_eth, act_size);
*a_len += act_size;
}
- if (set_ip_ttl_tos.head.len_lw) {
+
+ if (set_act->set_ip_ttl_tos.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip_ttl_tos);
- memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+ act_size = sizeof(set_act->set_ip_ttl_tos);
+ memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
*a_len += act_size;
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip_addr.head.len_lw) {
+
+ if (set_act->set_ip_addr.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip_addr);
- memcpy(nfp_action, &set_ip_addr, act_size);
+ act_size = sizeof(set_act->set_ip_addr);
+ memcpy(nfp_action, &set_act->set_ip_addr, act_size);
*a_len += act_size;
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip6_tc_hl_fl.head.len_lw) {
+
+ if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_tc_hl_fl);
- memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+ act_size = sizeof(set_act->set_ip6_tc_hl_fl);
+ memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+
+ if (set_act->set_ip6_dst.head.len_lw &&
+ set_act->set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
*/
nfp_action += act_size;
- act_size = sizeof(set_ip6_src);
- memcpy(nfp_action, &set_ip6_src, act_size);
+ act_size = sizeof(set_act->set_ip6_src);
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
*a_len += act_size;
- act_size = sizeof(set_ip6_dst);
- memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
- act_size);
+ act_size = sizeof(set_act->set_ip6_dst);
+ memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
+ &set_act->set_ip6_dst, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_dst.head.len_lw) {
+ } else if (set_act->set_ip6_dst.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_dst);
- memcpy(nfp_action, &set_ip6_dst, act_size);
+ act_size = sizeof(set_act->set_ip6_dst);
+ memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_src.head.len_lw) {
+ } else if (set_act->set_ip6_src.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_src);
- memcpy(nfp_action, &set_ip6_src, act_size);
+ act_size = sizeof(set_act->set_ip6_src);
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_tport.head.len_lw) {
+ if (set_act->set_tport.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_tport);
- memcpy(nfp_action, &set_tport, act_size);
+ act_size = sizeof(set_act->set_tport);
+ memcpy(nfp_action, &set_act->set_tport, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
@@ -726,7 +693,40 @@ nfp_fl_pedit(const struct flow_action_entry *act,
}
static int
-nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
+nfp_fl_pedit(const struct flow_action_entry *act,
+ struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
+ u32 *csum_updated, struct nfp_flower_pedit_acts *set_act)
+{
+ enum flow_action_mangle_base htype;
+ u32 offset;
+
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ return nfp_fl_set_eth(act, offset, &set_act->set_eth);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
+ &set_act->set_ip_ttl_tos);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
+ &set_act->set_ip6_src,
+ &set_act->set_ip6_tc_hl_fl);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_flower_output_action(struct nfp_app *app,
+ const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@ -776,7 +776,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated)
+ int *out_cnt, u32 *csum_updated,
+ struct nfp_flower_pedit_acts *set_act)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
@@ -861,7 +862,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return 0;
case FLOW_ACTION_MANGLE:
if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
- a_len, csum_updated))
+ a_len, csum_updated, set_act))
return -EOPNOTSUPP;
break;
case FLOW_ACTION_CSUM:
@@ -881,12 +882,49 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return 0;
}
+static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
+ int current_act_idx)
+{
+ struct flow_action_entry current_act;
+ struct flow_action_entry prev_act;
+
+ current_act = flow_act->entries[current_act_idx];
+ if (current_act.id != FLOW_ACTION_MANGLE)
+ return false;
+
+ if (current_act_idx == 0)
+ return true;
+
+ prev_act = flow_act->entries[current_act_idx - 1];
+
+ return prev_act.id != FLOW_ACTION_MANGLE;
+}
+
+static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
+ int current_act_idx)
+{
+ struct flow_action_entry current_act;
+ struct flow_action_entry next_act;
+
+ current_act = flow_act->entries[current_act_idx];
+ if (current_act.id != FLOW_ACTION_MANGLE)
+ return false;
+
+ if (current_act_idx == flow_act->num_entries)
+ return true;
+
+ next_act = flow_act->entries[current_act_idx + 1];
+
+ return next_act.id != FLOW_ACTION_MANGLE;
+}
+
int nfp_flower_compile_action(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
+ struct nfp_flower_pedit_acts set_act;
enum nfp_flower_tun_type tun_type;
struct flow_action_entry *act;
u32 csum_updated = 0;
@@ -900,12 +938,18 @@ int nfp_flower_compile_action(struct nfp_app *app,
out_cnt = 0;
flow_action_for_each(i, act, &flow->rule->action) {
+ if (nfp_fl_check_mangle_start(&flow->rule->action, i))
+ memset(&set_act, 0, sizeof(set_act));
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
- &out_cnt, &csum_updated);
+ &out_cnt, &csum_updated, &set_act);
if (err)
return err;
act_cnt++;
+ if (nfp_fl_check_mangle_end(&flow->rule->action, i))
+ nfp_fl_commit_mangle(flow,
+ &nfp_flow->action_data[act_len],
+ &act_len, &set_act, &csum_updated);
}
/* We optimise when the action list is small, this can unfortunately
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index cf9e1118ee8f..d5bbe3d6048b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -159,7 +159,7 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
rtnl_lock();
rcu_read_lock();
- netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock();
if (!netdev) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
@@ -192,7 +192,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
msg = nfp_flower_cmsg_get_data(skb);
rcu_read_lock();
- exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock();
if (!exists) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
@@ -205,6 +205,50 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
}
static void
+nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
+ struct nfp_flower_cmsg_merge_hint *msg;
+ struct nfp_fl_payload *sub_flows[2];
+ int err, i, flow_cnt;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ /* msg->count starts at 0 and always assumes at least 1 entry. */
+ flow_cnt = msg->count + 1;
+
+ if (msg_len < struct_size(msg, flow, flow_cnt)) {
+ nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %zd\n",
+ msg_len, struct_size(msg, flow, flow_cnt));
+ return;
+ }
+
+ if (flow_cnt != 2) {
+ nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n",
+ flow_cnt);
+ return;
+ }
+
+ rtnl_lock();
+ for (i = 0; i < flow_cnt; i++) {
+ u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
+
+ sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
+ if (!sub_flows[i]) {
+ nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
+ goto err_rtnl_unlock;
+ }
+ }
+
+ err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]);
+ /* Only warn on memory fail. Hint veto will not break functionality. */
+ if (err == -ENOMEM)
+ nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
+
+err_rtnl_unlock:
+ rtnl_unlock();
+}
+
+static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *app_priv = app->priv;
@@ -222,12 +266,21 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) {
+ nfp_flower_cmsg_merge_hint_rx(app, skb);
+ break;
+ }
+ goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
+ nfp_flower_stats_rlim_reply(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
@@ -235,6 +288,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
}
/* fall through */
default:
+err_default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
goto out;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4fcaf11ed56e..537f7fc19584 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,7 +26,7 @@
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
@@ -82,7 +82,6 @@
#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
@@ -403,11 +402,13 @@ struct nfp_flower_cmsg_hdr {
/* Types defined for port related control messages */
enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD = 1,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4,
NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_MERGE_HINT = 9,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
@@ -415,6 +416,9 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
+ NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
@@ -452,6 +456,16 @@ struct nfp_flower_cmsg_portreify {
#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0)
+/* NFP_FLOWER_CMSG_TYPE_FLOW_MERGE_HINT */
+struct nfp_flower_cmsg_merge_hint {
+ u8 reserved[3];
+ u8 count;
+ struct {
+ __be32 host_ctx;
+ __be64 host_cookie;
+ } __packed flow[0];
+};
+
enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
@@ -474,6 +488,13 @@ enum nfp_flower_cmsg_port_vnic_type {
#define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0)
#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0)
+static inline u32 nfp_flower_internal_port_get_port_id(u8 internal_port)
+{
+ return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, internal_port) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT);
+}
+
static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port)
{
return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 408089133599..eb846133943b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -22,6 +22,9 @@
#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
+#define NFP_MIN_INT_PORT_ID 1
+#define NFP_MAX_INT_PORT_ID 256
+
static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
{
return "FLOWER";
@@ -32,6 +35,113 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
return DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
+static int
+nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
+ struct net_device *netdev)
+{
+ struct net_device *entry;
+ int i, id = 0;
+
+ rcu_read_lock();
+ idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
+ if (entry == netdev) {
+ id = i;
+ break;
+ }
+ rcu_read_unlock();
+
+ return id;
+}
+
+static int
+nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int id;
+
+ id = nfp_flower_lookup_internal_port_id(priv, netdev);
+ if (id > 0)
+ return id;
+
+ idr_preload(GFP_ATOMIC);
+ spin_lock_bh(&priv->internal_ports.lock);
+ id = idr_alloc(&priv->internal_ports.port_ids, netdev,
+ NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
+ spin_unlock_bh(&priv->internal_ports.lock);
+ idr_preload_end();
+
+ return id;
+}
+
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ int ext_port;
+
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ return nfp_repr_get_port_id(netdev);
+ } else if (nfp_flower_internal_port_can_offload(app, netdev)) {
+ ext_port = nfp_flower_get_internal_port_id(app, netdev);
+ if (ext_port < 0)
+ return 0;
+
+ return nfp_flower_internal_port_get_port_id(ext_port);
+ }
+
+ return 0;
+}
+
+static struct net_device *
+nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct net_device *netdev;
+
+ rcu_read_lock();
+ netdev = idr_find(&priv->internal_ports.port_ids, port_id);
+ rcu_read_unlock();
+
+ return netdev;
+}
+
+static void
+nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int id;
+
+ id = nfp_flower_lookup_internal_port_id(priv, netdev);
+ if (!id)
+ return;
+
+ spin_lock_bh(&priv->internal_ports.lock);
+ idr_remove(&priv->internal_ports.port_ids, id);
+ spin_unlock_bh(&priv->internal_ports.lock);
+}
+
+static int
+nfp_flower_internal_port_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ if (event == NETDEV_UNREGISTER &&
+ nfp_flower_internal_port_can_offload(app, netdev))
+ nfp_flower_free_internal_port_id(app, netdev);
+
+ return NOTIFY_OK;
+}
+
+static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
+{
+ spin_lock_init(&priv->internal_ports.lock);
+ idr_init(&priv->internal_ports.port_ids);
+}
+
+static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
+{
+ idr_destroy(&priv->internal_ports.port_ids);
+}
+
static struct nfp_flower_non_repr_priv *
nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
{
@@ -119,12 +229,21 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
}
static struct net_device *
-nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
+nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{
enum nfp_repr_type repr_type;
struct nfp_reprs *reprs;
u8 port = 0;
+ /* Check if the port is internal. */
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
+ if (redir_egress)
+ *redir_egress = true;
+ port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
+ return nfp_flower_get_netdev_from_internal_port_id(app, port);
+ }
+
repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
if (repr_type > NFP_REPR_TYPE_MAX)
return NULL;
@@ -641,11 +760,33 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata;
}
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
+ /* Tell the firmware that the driver supports flow merging. */
+ err = nfp_rtsym_write_le(app->pf->rtbl,
+ "_abi_flower_merge_hint_enable", 1);
+ if (!err) {
+ app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
+ nfp_flower_internal_port_init(app_priv);
+ } else if (err == -ENOENT) {
+ nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
+ } else {
+ goto err_lag_clean;
+ }
+ } else {
+ nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
+ }
+
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
+ nfp_flower_qos_init(app);
+
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv);
return 0;
+err_lag_clean:
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+ nfp_flower_lag_cleanup(&app_priv->nfp_lag);
err_cleanup_metadata:
nfp_flower_metadata_cleanup(app);
err_free_app_priv:
@@ -661,9 +802,15 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
+ nfp_flower_qos_cleanup(app);
+
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
+ nfp_flower_internal_port_cleanup(app_priv);
+
nfp_flower_metadata_cleanup(app);
vfree(app->priv);
app->priv = NULL;
@@ -762,6 +909,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
if (ret & NOTIFY_STOP_MASK)
return ret;
+ ret = nfp_flower_internal_port_event_handler(app, netdev, event);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+
return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
}
@@ -800,7 +951,7 @@ const struct nfp_app_type app_flower = {
.sriov_disable = nfp_flower_sriov_disable,
.eswitch_mode_get = eswitch_mode_get,
- .repr_get = nfp_flower_repr_get,
+ .dev_get = nfp_flower_dev_get,
.setup_tc = nfp_flower_setup_tc,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c0945a5fd1a4..40957a8dbfe6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -5,6 +5,7 @@
#define __NFP_FLOWER_H__ 1
#include "cmsg.h"
+#include "../nfp_net.h"
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
@@ -34,14 +35,14 @@ struct nfp_app;
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
-#define NFP_FL_VXLAN_PORT 4789
-#define NFP_FL_GENEVE_PORT 6081
-
/* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_VLAN_PCP BIT(3)
+#define NFP_FL_FEATS_VF_RLIM BIT(4)
+#define NFP_FL_FEATS_FLOW_MOD BIT(5)
+#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {
@@ -118,6 +119,16 @@ struct nfp_fl_lag {
};
/**
+ * struct nfp_fl_internal_ports - Flower APP priv data for additional ports
+ * @port_ids: Assignment of ids to any additional ports
+ * @lock: Lock for extra ports list
+ */
+struct nfp_fl_internal_ports {
+ struct idr port_ids;
+ spinlock_t lock;
+};
+
+/**
* struct nfp_flower_priv - Flower APP per-vNIC priv data
* @app: Back pointer to app
* @nn: Pointer to vNIC
@@ -131,6 +142,7 @@ struct nfp_fl_lag {
* @flow_table: Hash table used to store flower rules
* @stats: Stored stats updates for flower rules
* @stats_lock: Lock for flower rule stats updates
+ * @stats_ctx_table: Hash table to map stats contexts to its flow rule
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs_high: List of higher priority skbs for control message
* processing
@@ -146,6 +158,10 @@ struct nfp_fl_lag {
* @non_repr_priv: List of offloaded non-repr ports and their priv data
* @active_mem_unit: Current active memory unit for flower rules
* @total_mem_units: Total number of available memory units for flower rules
+ * @internal_ports: Internal port ids used in offloaded rules
+ * @qos_stats_work: Workqueue for qos stats processing
+ * @qos_rate_limiters: Current active qos rate limiters
+ * @qos_stats_lock: Lock on qos stats updates
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -160,6 +176,7 @@ struct nfp_flower_priv {
struct rhashtable flow_table;
struct nfp_fl_stats *stats;
spinlock_t stats_lock; /* lock stats */
+ struct rhashtable stats_ctx_table;
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
@@ -172,6 +189,24 @@ struct nfp_flower_priv {
struct list_head non_repr_priv;
unsigned int active_mem_unit;
unsigned int total_mem_units;
+ struct nfp_fl_internal_ports internal_ports;
+ struct delayed_work qos_stats_work;
+ unsigned int qos_rate_limiters;
+ spinlock_t qos_stats_lock; /* Protect the qos stats */
+};
+
+/**
+ * struct nfp_fl_qos - Flower APP priv data for quality of service
+ * @netdev_port_id: NFP port number of repr with qos info
+ * @curr_stats: Currently stored stats updates for qos info
+ * @prev_stats: Previously stored updates for qos info
+ * @last_update: Stored time when last stats were updated
+ */
+struct nfp_fl_qos {
+ u32 netdev_port_id;
+ struct nfp_stat_pair curr_stats;
+ struct nfp_stat_pair prev_stats;
+ u64 last_update;
};
/**
@@ -180,14 +215,18 @@ struct nfp_flower_priv {
* @lag_port_flags: Extended port flags to record lag state of repr
* @mac_offloaded: Flag indicating a MAC address is offloaded for repr
* @offloaded_mac_addr: MAC address that has been offloaded for repr
+ * @block_shared: Flag indicating if offload applies to shared blocks
* @mac_list: List entry of reprs that share the same offloaded MAC
+ * @qos_table: Stored info on filters implementing qos
*/
struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr;
unsigned long lag_port_flags;
bool mac_offloaded;
u8 offloaded_mac_addr[ETH_ALEN];
+ bool block_shared;
struct list_head mac_list;
+ struct nfp_fl_qos qos_table;
};
/**
@@ -239,6 +278,25 @@ struct nfp_fl_payload {
char *unmasked_data;
char *mask_data;
char *action_data;
+ struct list_head linked_flows;
+ bool in_hw;
+};
+
+struct nfp_fl_payload_link {
+ /* A link contains a pointer to a merge flow and an associated sub_flow.
+ * Each merge flow will feature in 2 links to its underlying sub_flows.
+ * A sub_flow will have at least 1 link to a merge flow or more if it
+ * has been used to create multiple merge flows.
+ *
+ * For a merge flow, 'linked_flows' in its nfp_fl_payload struct lists
+ * all links to sub_flows (sub_flow.flow) via merge.list.
+ * For a sub_flow, 'linked_flows' gives all links to merge flows it has
+ * formed (merge_flow.flow) via sub_flow.list.
+ */
+ struct {
+ struct list_head list;
+ struct nfp_fl_payload *flow;
+ } merge_flow, sub_flow;
};
extern const struct rhashtable_params nfp_flower_table_params;
@@ -250,12 +308,40 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
+static inline bool
+nfp_flower_internal_port_can_offload(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+
+ if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE))
+ return false;
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+
+ return false;
+}
+
+/* The address of the merged flow acts as its cookie.
+ * Cookies supplied to us by TC flower are also addresses to allocated
+ * memory and thus this scheme should not generate any collisions.
+ */
+static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
+{
+ return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
+}
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
+int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2);
int nfp_flower_compile_flow_match(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
@@ -270,6 +356,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev);
+void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
+ struct nfp_fl_payload *nfp_flow);
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow);
@@ -277,6 +365,8 @@ struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev);
struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id);
+struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
@@ -302,6 +392,11 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+void nfp_flower_qos_init(struct nfp_app *app);
+void nfp_flower_qos_cleanup(struct nfp_app *app);
+int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow);
+void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event);
@@ -314,4 +409,6 @@ void
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+ struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e03c8ef2c28c..bfa4bf34911d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
- if (match.key->vlan_id || match.key->vlan_priority) {
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.key->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.key->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- ext->tci = cpu_to_be16(tmp_tci);
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.mask->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.mask->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- msk->tci = cpu_to_be16(tmp_tci);
- }
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.key->vlan_id);
+ ext->tci = cpu_to_be16(tmp_tci);
+
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.mask->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.mask->vlan_id);
+ msk->tci = cpu_to_be16(tmp_tci);
}
}
@@ -327,13 +326,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type)
{
- u32 cmsg_port = 0;
+ u32 port_id;
int err;
u8 *ext;
u8 *msk;
- if (nfp_netdev_is_nfp_repr(netdev))
- cmsg_port = nfp_repr_get_port_id(netdev);
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -359,13 +357,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- cmsg_port, false, tun_type);
+ port_id, false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- cmsg_port, true, tun_type);
+ port_id, true, tun_type);
if (err)
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 492837b852b6..3d326efdc814 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg {
unsigned long cookie;
};
+struct nfp_fl_stats_ctx_to_flow {
+ struct rhash_head ht_node;
+ u32 stats_cxt;
+ struct nfp_fl_payload *flow;
+};
+
+static const struct rhashtable_params stats_ctx_table_params = {
+ .key_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
+ .head_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
+ .key_len = sizeof(u32),
+};
+
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{
struct nfp_flower_priv *priv = app->priv;
@@ -264,9 +276,6 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
if (!mask_entry)
return false;
- if (meta_flags)
- *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
-
*mask_id = mask_entry->mask_id;
mask_entry->ref_cnt--;
if (!mask_entry->ref_cnt) {
@@ -285,25 +294,42 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev)
{
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry;
u8 new_mask_id;
u32 stats_cxt;
+ int err;
- if (nfp_get_stats_entry(app, &stats_cxt))
- return -ENOENT;
+ err = nfp_get_stats_entry(app, &stats_cxt);
+ if (err)
+ return err;
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
nfp_flow->ingress_dev = netdev;
+ ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
+ if (!ctx_entry) {
+ err = -ENOMEM;
+ goto err_release_stats;
+ }
+
+ ctx_entry->stats_cxt = stats_cxt;
+ ctx_entry->flow = nfp_flow;
+
+ if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
+ stats_ctx_table_params)) {
+ err = -ENOMEM;
+ goto err_free_ctx_entry;
+ }
+
new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) {
- if (nfp_release_stats_entry(app, stats_cxt))
- return -EINVAL;
- return -ENOENT;
+ err = -ENOENT;
+ goto err_remove_rhash;
}
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
@@ -317,43 +343,82 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
- if (nfp_release_stats_entry(app, stats_cxt))
- return -EINVAL;
-
- if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
- nfp_flow->meta.mask_len,
- NULL, &new_mask_id))
- return -EINVAL;
-
- return -EEXIST;
+ err = -EEXIST;
+ goto err_remove_mask;
}
return 0;
+
+err_remove_mask:
+ nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
+ NULL, &new_mask_id);
+err_remove_rhash:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+ &ctx_entry->ht_node,
+ stats_ctx_table_params));
+err_free_ctx_entry:
+ kfree(ctx_entry);
+err_release_stats:
+ nfp_release_stats_entry(app, stats_cxt);
+
+ return err;
+}
+
+void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
+ struct nfp_fl_payload *nfp_flow)
+{
+ nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
+ nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
+ priv->flower_version++;
}
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow)
{
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
u8 new_mask_id = 0;
u32 temp_ctx_id;
+ __nfp_modify_flow_metadata(priv, nfp_flow);
+
nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
&new_mask_id);
- nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
- priv->flower_version++;
-
/* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
- /* Release the stats ctx id. */
+ /* Release the stats ctx id and ctx to flow table entry. */
temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+ ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
+ stats_ctx_table_params);
+ if (!ctx_entry)
+ return -ENOENT;
+
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+ &ctx_entry->ht_node,
+ stats_ctx_table_params));
+ kfree(ctx_entry);
+
return nfp_release_stats_entry(app, temp_ctx_id);
}
+struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
+{
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
+ struct nfp_flower_priv *priv = app->priv;
+
+ ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
+ stats_ctx_table_params);
+ if (!ctx_entry)
+ return NULL;
+
+ return ctx_entry->flow;
+}
+
static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const void *obj)
{
@@ -403,6 +468,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
return err;
+ err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
+ if (err)
+ goto err_free_flow_table;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -410,7 +479,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- goto err_free_flow_table;
+ goto err_free_stats_ctx_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -447,6 +516,8 @@ err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_stats_ctx_table:
+ rhashtable_destroy(&priv->stats_ctx_table);
err_free_flow_table:
rhashtable_destroy(&priv->flow_table);
return -ENOMEM;
@@ -461,6 +532,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
rhashtable_free_and_destroy(&priv->flow_table,
nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&priv->stats_ctx_table,
+ nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 450d7296fd57..1fbfeb43c538 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -55,6 +55,28 @@
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+#define NFP_FLOWER_MERGE_FIELDS \
+ (NFP_FLOWER_LAYER_PORT | \
+ NFP_FLOWER_LAYER_MAC | \
+ NFP_FLOWER_LAYER_TP | \
+ NFP_FLOWER_LAYER_IPV4 | \
+ NFP_FLOWER_LAYER_IPV6)
+
+struct nfp_flower_merge_check {
+ union {
+ struct {
+ __be16 tci;
+ struct nfp_flower_mac_mpls l2;
+ struct nfp_flower_tp_ports l4;
+ union {
+ struct nfp_flower_ipv4 ipv4;
+ struct nfp_flower_ipv6 ipv6;
+ };
+ };
+ unsigned long vals[8];
+ };
+};
+
static int
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
u8 mtype)
@@ -195,7 +217,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_opts(rule, &enc_op);
switch (enc_ports.key->dst) {
- case htons(NFP_FL_VXLAN_PORT):
+ case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
@@ -203,7 +225,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (enc_op.key)
return -EOPNOTSUPP;
break;
- case htons(NFP_FL_GENEVE_PORT):
+ case htons(GENEVE_UDP_PORT):
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
return -EOPNOTSUPP;
*tun_type = NFP_FL_TUNNEL_GENEVE;
@@ -326,7 +348,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
break;
case cpu_to_be16(ETH_P_IPV6):
- key_layer |= NFP_FLOWER_LAYER_IPV6;
+ key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -376,6 +398,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
+ INIT_LIST_HEAD(&flow_pay->linked_flows);
+ flow_pay->in_hw = false;
return flow_pay;
@@ -388,6 +412,447 @@ err_free_flow:
return NULL;
}
+static int
+nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
+ struct nfp_flower_merge_check *merge,
+ u8 *last_act_id, int *act_out)
+{
+ struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
+ struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
+ struct nfp_fl_set_ip4_addrs *ipv4_add;
+ struct nfp_fl_set_ipv6_addr *ipv6_add;
+ struct nfp_fl_push_vlan *push_vlan;
+ struct nfp_fl_set_tport *tport;
+ struct nfp_fl_set_eth *eth;
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+ u8 act_id = 0;
+ u8 *ports;
+ int i;
+
+ while (act_off < flow->meta.act_len) {
+ a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
+ act_id = a->jump_id;
+
+ switch (act_id) {
+ case NFP_FL_ACTION_OPCODE_OUTPUT:
+ if (act_out)
+ (*act_out)++;
+ break;
+ case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
+ push_vlan = (struct nfp_fl_push_vlan *)a;
+ if (push_vlan->vlan_tci)
+ merge->tci = cpu_to_be16(0xffff);
+ break;
+ case NFP_FL_ACTION_OPCODE_POP_VLAN:
+ merge->tci = cpu_to_be16(0);
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
+ /* New tunnel header means l2 to l4 can be matched. */
+ eth_broadcast_addr(&merge->l2.mac_dst[0]);
+ eth_broadcast_addr(&merge->l2.mac_src[0]);
+ memset(&merge->l4, 0xff,
+ sizeof(struct nfp_flower_tp_ports));
+ memset(&merge->ipv4, 0xff,
+ sizeof(struct nfp_flower_ipv4));
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
+ eth = (struct nfp_fl_set_eth *)a;
+ for (i = 0; i < ETH_ALEN; i++)
+ merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
+ for (i = 0; i < ETH_ALEN; i++)
+ merge->l2.mac_src[i] |=
+ eth->eth_addr_mask[ETH_ALEN + i];
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
+ ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
+ merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
+ merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
+ ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
+ merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
+ merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
+ ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
+ for (i = 0; i < 4; i++)
+ merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
+ ipv6_add->ipv6[i].mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
+ ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
+ for (i = 0; i < 4; i++)
+ merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
+ ipv6_add->ipv6[i].mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
+ ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
+ merge->ipv6.ip_ext.ttl |=
+ ipv6_tc_hl_fl->ipv6_hop_limit_mask;
+ merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
+ merge->ipv6.ipv6_flow_label_exthdr |=
+ ipv6_tc_hl_fl->ipv6_label_mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_UDP:
+ case NFP_FL_ACTION_OPCODE_SET_TCP:
+ tport = (struct nfp_fl_set_tport *)a;
+ ports = (u8 *)&merge->l4.port_src;
+ for (i = 0; i < 4; i++)
+ ports[i] |= tport->tp_port_mask[i];
+ break;
+ case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ case NFP_FL_ACTION_OPCODE_PRE_LAG:
+ case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ if (last_act_id)
+ *last_act_id = act_id;
+
+ return 0;
+}
+
+static int
+nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
+ struct nfp_flower_merge_check *merge,
+ bool extra_fields)
+{
+ struct nfp_flower_meta_tci *meta_tci;
+ u8 *mask = flow->mask_data;
+ u8 key_layer, match_size;
+
+ memset(merge, 0, sizeof(struct nfp_flower_merge_check));
+
+ meta_tci = (struct nfp_flower_meta_tci *)mask;
+ key_layer = meta_tci->nfp_flow_key_layer;
+
+ if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
+ return -EOPNOTSUPP;
+
+ merge->tci = meta_tci->tci;
+ mask += sizeof(struct nfp_flower_meta_tci);
+
+ if (key_layer & NFP_FLOWER_LAYER_EXT_META)
+ mask += sizeof(struct nfp_flower_ext_meta);
+
+ mask += sizeof(struct nfp_flower_in_port);
+
+ if (key_layer & NFP_FLOWER_LAYER_MAC) {
+ match_size = sizeof(struct nfp_flower_mac_mpls);
+ memcpy(&merge->l2, mask, match_size);
+ mask += match_size;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_TP) {
+ match_size = sizeof(struct nfp_flower_tp_ports);
+ memcpy(&merge->l4, mask, match_size);
+ mask += match_size;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ match_size = sizeof(struct nfp_flower_ipv4);
+ memcpy(&merge->ipv4, mask, match_size);
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV6) {
+ match_size = sizeof(struct nfp_flower_ipv6);
+ memcpy(&merge->ipv6, mask, match_size);
+ }
+
+ return 0;
+}
+
+static int
+nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2)
+{
+ /* Two flows can be merged if sub_flow2 only matches on bits that are
+ * either matched by sub_flow1 or set by a sub_flow1 action. This
+ * ensures that every packet that hits sub_flow1 and recirculates is
+ * guaranteed to hit sub_flow2.
+ */
+ struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
+ int err, act_out = 0;
+ u8 last_act_id = 0;
+
+ err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
+ true);
+ if (err)
+ return err;
+
+ err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
+ false);
+ if (err)
+ return err;
+
+ err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
+ &last_act_id, &act_out);
+ if (err)
+ return err;
+
+ /* Must only be 1 output action and it must be the last in sequence. */
+ if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+ return -EOPNOTSUPP;
+
+ /* Reject merge if sub_flow2 matches on something that is not matched
+ * on or set in an action by sub_flow1.
+ */
+ err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
+ sub_flow1_merge.vals,
+ sizeof(struct nfp_flower_merge_check) * 8);
+ if (err)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int
+nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
+ bool *tunnel_act)
+{
+ unsigned int act_off = 0, act_len;
+ struct nfp_fl_act_head *a;
+ u8 act_id = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&act_src[act_off];
+ act_len = a->len_lw << NFP_FL_LW_SIZ;
+ act_id = a->jump_id;
+
+ switch (act_id) {
+ case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ if (tunnel_act)
+ *tunnel_act = true;
+ /* fall through */
+ case NFP_FL_ACTION_OPCODE_PRE_LAG:
+ memcpy(act_dst + act_off, act_src + act_off, act_len);
+ break;
+ default:
+ return act_off;
+ }
+
+ act_off += act_len;
+ }
+
+ return act_off;
+}
+
+static int nfp_fl_verify_post_tun_acts(char *acts, int len)
+{
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&acts[act_off];
+ if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+ return -EOPNOTSUPP;
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ return 0;
+}
+
+static int
+nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2,
+ struct nfp_fl_payload *merge_flow)
+{
+ unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
+ bool tunnel_act = false;
+ char *merge_act;
+ int err;
+
+ /* The last action of sub_flow1 must be output - do not merge this. */
+ sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
+ sub2_act_len = sub_flow2->meta.act_len;
+
+ if (!sub2_act_len)
+ return -EINVAL;
+
+ if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
+ return -EINVAL;
+
+ /* A shortcut can only be applied if there is a single action. */
+ if (sub1_act_len)
+ merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+ else
+ merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
+
+ merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
+ merge_act = merge_flow->action_data;
+
+ /* Copy any pre-actions to the start of merge flow action list. */
+ pre_off1 = nfp_flower_copy_pre_actions(merge_act,
+ sub_flow1->action_data,
+ sub1_act_len, &tunnel_act);
+ merge_act += pre_off1;
+ sub1_act_len -= pre_off1;
+ pre_off2 = nfp_flower_copy_pre_actions(merge_act,
+ sub_flow2->action_data,
+ sub2_act_len, NULL);
+ merge_act += pre_off2;
+ sub2_act_len -= pre_off2;
+
+ /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
+ * a tunnel, sub_flow 2 can only have output actions for a valid merge.
+ */
+ if (tunnel_act) {
+ char *post_tun_acts = &sub_flow2->action_data[pre_off2];
+
+ err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
+ if (err)
+ return err;
+ }
+
+ /* Copy remaining actions from sub_flows 1 and 2. */
+ memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
+ merge_act += sub1_act_len;
+ memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
+
+ return 0;
+}
+
+/* Flow link code should only be accessed under RTNL. */
+static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
+{
+ list_del(&link->merge_flow.list);
+ list_del(&link->sub_flow.list);
+ kfree(link);
+}
+
+static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link;
+
+ list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
+ if (link->sub_flow.flow == sub_flow) {
+ nfp_flower_unlink_flow(link);
+ return;
+ }
+}
+
+static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link;
+
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ link->merge_flow.flow = merge_flow;
+ list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
+ link->sub_flow.flow = sub_flow;
+ list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
+
+ return 0;
+}
+
+/**
+ * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
+ * @app: Pointer to the APP handle
+ * @sub_flow1: Initial flow matched to produce merge hint
+ * @sub_flow2: Post recirculation flow matched in merge hint
+ *
+ * Combines 2 flows (if valid) to a single flow, removing the initial from hw
+ * and offloading the new, merged flow.
+ *
+ * Return: negative value on error, 0 in success.
+ */
+int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2)
+{
+ struct tc_cls_flower_offload merge_tc_off;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *merge_flow;
+ struct nfp_fl_key_ls merge_key_ls;
+ int err;
+
+ ASSERT_RTNL();
+
+ if (sub_flow1 == sub_flow2 ||
+ nfp_flower_is_merge_flow(sub_flow1) ||
+ nfp_flower_is_merge_flow(sub_flow2))
+ return -EINVAL;
+
+ err = nfp_flower_can_merge(sub_flow1, sub_flow2);
+ if (err)
+ return err;
+
+ merge_key_ls.key_size = sub_flow1->meta.key_len;
+
+ merge_flow = nfp_flower_allocate_new(&merge_key_ls);
+ if (!merge_flow)
+ return -ENOMEM;
+
+ merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
+ merge_flow->ingress_dev = sub_flow1->ingress_dev;
+
+ memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
+ sub_flow1->meta.key_len);
+ memcpy(merge_flow->mask_data, sub_flow1->mask_data,
+ sub_flow1->meta.mask_len);
+
+ err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
+ if (err)
+ goto err_destroy_merge_flow;
+
+ err = nfp_flower_link_flows(merge_flow, sub_flow1);
+ if (err)
+ goto err_destroy_merge_flow;
+
+ err = nfp_flower_link_flows(merge_flow, sub_flow2);
+ if (err)
+ goto err_unlink_sub_flow1;
+
+ merge_tc_off.cookie = merge_flow->tc_flower_cookie;
+ err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
+ merge_flow->ingress_dev);
+ if (err)
+ goto err_unlink_sub_flow2;
+
+ err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
+ nfp_flower_table_params);
+ if (err)
+ goto err_release_metadata;
+
+ err = nfp_flower_xmit_flow(app, merge_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
+ if (err)
+ goto err_remove_rhash;
+
+ merge_flow->in_hw = true;
+ sub_flow1->in_hw = false;
+
+ return 0;
+
+err_remove_rhash:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &merge_flow->fl_node,
+ nfp_flower_table_params));
+err_release_metadata:
+ nfp_modify_flow_metadata(app, merge_flow);
+err_unlink_sub_flow2:
+ nfp_flower_unlink_flows(merge_flow, sub_flow2);
+err_unlink_sub_flow1:
+ nfp_flower_unlink_flows(merge_flow, sub_flow1);
+err_destroy_merge_flow:
+ kfree(merge_flow->action_data);
+ kfree(merge_flow->mask_data);
+ kfree(merge_flow->unmasked_data);
+ kfree(merge_flow);
+ return err;
+}
+
/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
@@ -454,6 +919,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (port)
port->tc_offload_cnt++;
+ flow_pay->in_hw = true;
+
/* Deallocate flow payload when flower rule has been destroyed. */
kfree(key_layer);
@@ -475,6 +942,75 @@ err_free_key_ls:
return err;
}
+static void
+nfp_flower_remove_merge_flow(struct nfp_app *app,
+ struct nfp_fl_payload *del_sub_flow,
+ struct nfp_fl_payload *merge_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload_link *link, *temp;
+ struct nfp_fl_payload *origin;
+ bool mod = false;
+ int err;
+
+ link = list_first_entry(&merge_flow->linked_flows,
+ struct nfp_fl_payload_link, merge_flow.list);
+ origin = link->sub_flow.flow;
+
+ /* Re-add rule the merge had overwritten if it has not been deleted. */
+ if (origin != del_sub_flow)
+ mod = true;
+
+ err = nfp_modify_flow_metadata(app, merge_flow);
+ if (err) {
+ nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
+ goto err_free_links;
+ }
+
+ if (!mod) {
+ err = nfp_flower_xmit_flow(app, merge_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ if (err) {
+ nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
+ goto err_free_links;
+ }
+ } else {
+ __nfp_modify_flow_metadata(priv, origin);
+ err = nfp_flower_xmit_flow(app, origin,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
+ origin->in_hw = true;
+ }
+
+err_free_links:
+ /* Clean any links connected with the merged flow. */
+ list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
+ merge_flow.list)
+ nfp_flower_unlink_flow(link);
+
+ kfree(merge_flow->action_data);
+ kfree(merge_flow->mask_data);
+ kfree(merge_flow->unmasked_data);
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &merge_flow->fl_node,
+ nfp_flower_table_params));
+ kfree_rcu(merge_flow, rcu);
+}
+
+static void
+nfp_flower_del_linked_merge_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link, *temp;
+
+ /* Remove any merge flow formed from the deleted sub_flow. */
+ list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
+ sub_flow.list)
+ nfp_flower_remove_merge_flow(app, sub_flow,
+ link->merge_flow.flow);
+}
+
/**
* nfp_flower_del_offload() - Removes a flow from hardware.
* @app: Pointer to the APP handle
@@ -482,7 +1018,7 @@ err_free_key_ls:
* @flow: TC flower classifier offload structure
*
* Removes a flow from the repeated hash structure and clears the
- * action payload.
+ * action payload. Any flows merged from this are also deleted.
*
* Return: negative value on error, 0 if removed successfully.
*/
@@ -504,17 +1040,22 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
- goto err_free_flow;
+ goto err_free_merge_flow;
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+ if (!nfp_flow->in_hw) {
+ err = 0;
+ goto err_free_merge_flow;
+ }
+
err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
- if (err)
- goto err_free_flow;
+ /* Fall through on error. */
-err_free_flow:
+err_free_merge_flow:
+ nfp_flower_del_linked_merge_flows(app, nfp_flow);
if (port)
port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
@@ -527,6 +1068,52 @@ err_free_flow:
return err;
}
+static void
+__nfp_flower_update_merge_stats(struct nfp_app *app,
+ struct nfp_fl_payload *merge_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload_link *link;
+ struct nfp_fl_payload *sub_flow;
+ u64 pkts, bytes, used;
+ u32 ctx_id;
+
+ ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
+ pkts = priv->stats[ctx_id].pkts;
+ /* Do not cycle subflows if no stats to distribute. */
+ if (!pkts)
+ return;
+ bytes = priv->stats[ctx_id].bytes;
+ used = priv->stats[ctx_id].used;
+
+ /* Reset stats for the merge flow. */
+ priv->stats[ctx_id].pkts = 0;
+ priv->stats[ctx_id].bytes = 0;
+
+ /* The merge flow has received stats updates from firmware.
+ * Distribute these stats to all subflows that form the merge.
+ * The stats will collected from TC via the subflows.
+ */
+ list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
+ sub_flow = link->sub_flow.flow;
+ ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
+ priv->stats[ctx_id].pkts += pkts;
+ priv->stats[ctx_id].bytes += bytes;
+ max_t(u64, priv->stats[ctx_id].used, used);
+ }
+}
+
+static void
+nfp_flower_update_merge_stats(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link;
+
+ /* Get merge flows that the subflow forms to distribute their stats. */
+ list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
+ __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
+}
+
/**
* nfp_flower_get_stats() - Populates flow stats obtained from hardware.
* @app: Pointer to the APP handle
@@ -553,6 +1140,10 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock);
+ /* If request is for a sub_flow, update stats from merged flows. */
+ if (!list_empty(&nfp_flow->linked_flows))
+ nfp_flower_update_merge_stats(app, nfp_flow);
+
flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
@@ -594,6 +1185,9 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
+ type_data);
default:
return -EOPNOTSUPP;
}
@@ -603,10 +1197,14 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
struct tc_block_offload *f)
{
struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_flower_repr_priv *repr_priv;
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
+ repr_priv = repr->app_priv;
+ repr_priv->block_shared = tcf_block_shared(f->block);
+
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
@@ -682,7 +1280,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
struct nfp_flower_priv *priv = app->priv;
int err;
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
switch (f->command) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
new file mode 100644
index 000000000000..86e968cd5ffd
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/math64.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_port.h"
+
+#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
+
+struct nfp_police_cfg_head {
+ __be32 flags_opts;
+ __be32 port;
+};
+
+/* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
+ * See RFC 2698 for more details.
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Flag options |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Port Ingress |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Token Bucket Peak |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Token Bucket Committed |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Peak Burst Size |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Committed Burst Size |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Peak Information Rate |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Committed Information Rate |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_police_config {
+ struct nfp_police_cfg_head head;
+ __be32 bkt_tkn_p;
+ __be32 bkt_tkn_c;
+ __be32 pbs;
+ __be32 cbs;
+ __be32 pir;
+ __be32 cir;
+};
+
+struct nfp_police_stats_reply {
+ struct nfp_police_cfg_head head;
+ __be64 pass_bytes;
+ __be64 pass_pkts;
+ __be64 drop_bytes;
+ __be64 drop_pkts;
+};
+
+static int
+nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *action = &flow->rule->action.entries[0];
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_police_config *config;
+ struct nfp_repr *repr;
+ struct sk_buff *skb;
+ u32 netdev_port_id;
+ u64 burst, rate;
+
+ if (!nfp_netdev_is_nfp_repr(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
+ return -EOPNOTSUPP;
+ }
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+
+ if (repr_priv->block_shared) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
+ return -EOPNOTSUPP;
+ }
+
+ if (repr->port->type != NFP_PORT_VF_PORT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_offload_has_one_action(&flow->rule->action)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow->common.prio != (1 << 16)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
+ return -EOPNOTSUPP;
+ }
+
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+
+ rate = action->police.rate_bytes_ps;
+ burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst),
+ PSCHED_TICKS_PER_SEC);
+ netdev_port_id = nfp_repr_get_port_id(netdev);
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ config->head.port = cpu_to_be32(netdev_port_id);
+ config->bkt_tkn_p = cpu_to_be32(burst);
+ config->bkt_tkn_c = cpu_to_be32(burst);
+ config->pbs = cpu_to_be32(burst);
+ config->cbs = cpu_to_be32(burst);
+ config->pir = cpu_to_be32(rate);
+ config->cir = cpu_to_be32(rate);
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ repr_priv->qos_table.netdev_port_id = netdev_port_id;
+ fl_priv->qos_rate_limiters++;
+ if (fl_priv->qos_rate_limiters == 1)
+ schedule_delayed_work(&fl_priv->qos_stats_work,
+ NFP_FL_QOS_UPDATE);
+
+ return 0;
+}
+
+static int
+nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_police_config *config;
+ struct nfp_repr *repr;
+ struct sk_buff *skb;
+ u32 netdev_port_id;
+
+ if (!nfp_netdev_is_nfp_repr(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
+ return -EOPNOTSUPP;
+ }
+ repr = netdev_priv(netdev);
+
+ netdev_port_id = nfp_repr_get_port_id(netdev);
+ repr_priv = repr->app_priv;
+
+ if (!repr_priv->qos_table.netdev_port_id) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
+ return -EOPNOTSUPP;
+ }
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Clear all qos associate data for this interface */
+ memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
+ fl_priv->qos_rate_limiters--;
+ if (!fl_priv->qos_rate_limiters)
+ cancel_delayed_work_sync(&fl_priv->qos_stats_work);
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ config->head.port = cpu_to_be32(netdev_port_id);
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ return 0;
+}
+
+void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_police_stats_reply *msg;
+ struct nfp_stat_pair *curr_stats;
+ struct nfp_stat_pair *prev_stats;
+ struct net_device *netdev;
+ struct nfp_repr *repr;
+ u32 netdev_port_id;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ netdev_port_id = be32_to_cpu(msg->head.port);
+ rcu_read_lock();
+ netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
+ if (!netdev)
+ goto exit_unlock_rcu;
+
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+ curr_stats = &repr_priv->qos_table.curr_stats;
+ prev_stats = &repr_priv->qos_table.prev_stats;
+
+ spin_lock_bh(&fl_priv->qos_stats_lock);
+ curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
+ be64_to_cpu(msg->drop_pkts);
+ curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
+ be64_to_cpu(msg->drop_bytes);
+
+ if (!repr_priv->qos_table.last_update) {
+ prev_stats->pkts = curr_stats->pkts;
+ prev_stats->bytes = curr_stats->bytes;
+ }
+
+ repr_priv->qos_table.last_update = jiffies;
+ spin_unlock_bh(&fl_priv->qos_stats_lock);
+
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+static void
+nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
+ u32 netdev_port_id)
+{
+ struct nfp_police_cfg_head *head;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(fl_priv->app,
+ sizeof(struct nfp_police_cfg_head),
+ NFP_FLOWER_CMSG_TYPE_QOS_STATS,
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ head = nfp_flower_cmsg_get_data(skb);
+ memset(head, 0, sizeof(struct nfp_police_cfg_head));
+ head->port = cpu_to_be32(netdev_port_id);
+
+ nfp_ctrl_tx(fl_priv->app->ctrl, skb);
+}
+
+static void
+nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
+{
+ struct nfp_reprs *repr_set;
+ int i;
+
+ rcu_read_lock();
+ repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
+ if (!repr_set)
+ goto exit_unlock_rcu;
+
+ for (i = 0; i < repr_set->num_reprs; i++) {
+ struct net_device *netdev;
+
+ netdev = rcu_dereference(repr_set->reprs[i]);
+ if (netdev) {
+ struct nfp_repr *priv = netdev_priv(netdev);
+ struct nfp_flower_repr_priv *repr_priv;
+ u32 netdev_port_id;
+
+ repr_priv = priv->app_priv;
+ netdev_port_id = repr_priv->qos_table.netdev_port_id;
+ if (!netdev_port_id)
+ continue;
+
+ nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
+ }
+ }
+
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+static void update_stats_cache(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct nfp_flower_priv *fl_priv;
+
+ delayed_work = to_delayed_work(work);
+ fl_priv = container_of(delayed_work, struct nfp_flower_priv,
+ qos_stats_work);
+
+ nfp_flower_stats_rlim_request_all(fl_priv);
+ schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
+}
+
+static int
+nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_stat_pair *curr_stats;
+ struct nfp_stat_pair *prev_stats;
+ u64 diff_bytes, diff_pkts;
+ struct nfp_repr *repr;
+
+ if (!nfp_netdev_is_nfp_repr(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
+ return -EOPNOTSUPP;
+ }
+ repr = netdev_priv(netdev);
+
+ repr_priv = repr->app_priv;
+ if (!repr_priv->qos_table.netdev_port_id) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_bh(&fl_priv->qos_stats_lock);
+ curr_stats = &repr_priv->qos_table.curr_stats;
+ prev_stats = &repr_priv->qos_table.prev_stats;
+ diff_pkts = curr_stats->pkts - prev_stats->pkts;
+ diff_bytes = curr_stats->bytes - prev_stats->bytes;
+ prev_stats->pkts = curr_stats->pkts;
+ prev_stats->bytes = curr_stats->bytes;
+ spin_unlock_bh(&fl_priv->qos_stats_lock);
+
+ flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
+ repr_priv->qos_table.last_update);
+ return 0;
+}
+
+void nfp_flower_qos_init(struct nfp_app *app)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ spin_lock_init(&fl_priv->qos_stats_lock);
+ INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
+}
+
+void nfp_flower_qos_cleanup(struct nfp_app *app)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ cancel_delayed_work_sync(&fl_priv->qos_stats_work);
+}
+
+int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow)
+{
+ struct netlink_ext_ack *extack = flow->common.extack;
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
+ return -EOPNOTSUPP;
+ }
+
+ switch (flow->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return nfp_flower_install_rate_limiter(app, netdev, flow,
+ extack);
+ case TC_CLSMATCHALL_DESTROY:
+ return nfp_flower_remove_rate_limiter(app, netdev, flow,
+ extack);
+ case TC_CLSMATCHALL_STATS:
+ return nfp_flower_stats_rate_limiter(app, netdev, flow,
+ extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 4d78be4ec4e9..faa06edf95ac 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -171,7 +171,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
for (i = 0; i < count; i++) {
ipv4_addr = payload->tun_info[i].ipv4;
port = be32_to_cpu(payload->tun_info[i].egress_port);
- netdev = nfp_app_repr_get(app, port);
+ netdev = nfp_app_dev_get(app, port, NULL);
if (!netdev)
continue;
@@ -270,9 +270,10 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{
struct nfp_tun_neigh payload;
+ u32 port_id;
- /* Only offload representor IPv4s for now. */
- if (!nfp_netdev_is_nfp_repr(netdev))
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
+ if (!port_id)
return;
memset(&payload, 0, sizeof(struct nfp_tun_neigh));
@@ -290,7 +291,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload.src_ipv4 = flow->saddr;
ether_addr_copy(payload.src_addr, netdev->dev_addr);
neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
- payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
+ payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */
nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
@@ -366,7 +367,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
payload = nfp_flower_cmsg_get_data(skb);
- netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
+ netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
if (!netdev)
goto route_fail_warning;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index f8d422713705..76d13af46a7a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -79,7 +79,7 @@ extern const struct nfp_app_type app_abm;
* @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
- * @repr_get: get representor netdev
+ * @dev_get: get representor or internal port representing netdev
*/
struct nfp_app_type {
enum nfp_app_id id;
@@ -143,7 +143,8 @@ struct nfp_app_type {
enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
int (*eswitch_mode_set)(struct nfp_app *app, u16 mode);
- struct net_device *(*repr_get)(struct nfp_app *app, u32 id);
+ struct net_device *(*dev_get)(struct nfp_app *app, u32 id,
+ bool *redir_egress);
};
/**
@@ -397,12 +398,14 @@ static inline void nfp_app_sriov_disable(struct nfp_app *app)
app->type->sriov_disable(app);
}
-static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
+static inline
+struct net_device *nfp_app_dev_get(struct nfp_app *app, u32 id,
+ bool *redir_egress)
{
- if (unlikely(!app || !app->type->repr_get))
+ if (unlikely(!app || !app->type->dev_get))
return NULL;
- return app->type->repr_get(app, id);
+ return app->type->dev_get(app, id, redir_egress);
}
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
@@ -433,6 +436,6 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id);
-struct devlink *nfp_devlink_get_devlink(struct net_device *netdev);
+struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index e9eca99cf493..c50fce42f473 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -144,7 +144,8 @@ nfp_devlink_sb_pool_get(struct devlink *devlink, unsigned int sb_index,
static int
nfp_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
- u32 size, enum devlink_sb_threshold_type threshold_type)
+ u32 size, enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -354,6 +355,8 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
{
struct nfp_eth_table_port eth_port;
struct devlink *devlink;
+ const u8 *serial;
+ int serial_len;
int ret;
rtnl_lock();
@@ -362,10 +365,10 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
if (ret)
return ret;
- devlink_port_type_eth_set(&port->dl_port, port->netdev);
+ serial_len = nfp_cpp_serial(port->app->cpp, &serial);
devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
eth_port.label_port, eth_port.is_split,
- eth_port.label_subport);
+ eth_port.label_subport, serial, serial_len);
devlink = priv_to_devlink(app->pf);
@@ -377,13 +380,23 @@ void nfp_devlink_port_unregister(struct nfp_port *port)
devlink_port_unregister(&port->dl_port);
}
-struct devlink *nfp_devlink_get_devlink(struct net_device *netdev)
+void nfp_devlink_port_type_eth_set(struct nfp_port *port)
+{
+ devlink_port_type_eth_set(&port->dl_port, port->netdev);
+}
+
+void nfp_devlink_port_type_clear(struct nfp_port *port)
{
- struct nfp_app *app;
+ devlink_port_type_clear(&port->dl_port);
+}
+
+struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev)
+{
+ struct nfp_port *port;
- app = nfp_app_from_netdev(netdev);
- if (!app)
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
return NULL;
- return priv_to_devlink(app->pf);
+ return &port->dl_port;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index f4c8776e42b6..948d1a4b4643 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -294,6 +294,9 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
+ if (!pci_get_drvdata(pdev))
+ return -ENOENT;
+
if (num_vfs == 0)
return nfp_pcie_sriov_disable(pdev);
else
@@ -720,9 +723,13 @@ err_pci_disable:
return err;
}
-static void nfp_pci_remove(struct pci_dev *pdev)
+static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
{
- struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct nfp_pf *pf;
+
+ pf = pci_get_drvdata(pdev);
+ if (!pf)
+ return;
nfp_hwmon_unregister(pf);
@@ -733,7 +740,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
vfree(pf->dumpspec);
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
- if (pf->fw_loaded)
+ if (unload_fw && pf->fw_loaded)
nfp_fw_unload(pf);
destroy_workqueue(pf->wq);
@@ -749,11 +756,22 @@ static void nfp_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static void nfp_pci_remove(struct pci_dev *pdev)
+{
+ __nfp_pci_shutdown(pdev, true);
+}
+
+static void nfp_pci_shutdown(struct pci_dev *pdev)
+{
+ __nfp_pci_shutdown(pdev, false);
+}
+
static struct pci_driver nfp_pci_driver = {
.name = nfp_driver_name,
.id_table = nfp_pci_device_ids,
.probe = nfp_pci_probe,
.remove = nfp_pci_remove,
+ .shutdown = nfp_pci_shutdown,
.sriov_configure = nfp_pcie_sriov_configure,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index be37c2d6151c..df9aff2684ed 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -539,12 +539,17 @@ struct nfp_net_dp {
* @shared_handler: Handler for shared interrupts
* @shared_name: Name for shared interrupt
* @me_freq_mhz: ME clock_freq (MHz)
- * @reconfig_lock: Protects HW reconfiguration request regs/machinery
+ * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
+ * @reconfig_sync_present and HW reconfiguration request
+ * regs/machinery from async requests (sync must take
+ * @bar_lock)
* @reconfig_posted: Pending reconfig bits coming from async sources
* @reconfig_timer_active: Timer for reading reconfiguration results is pending
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
* @reconfig_in_progress_update: Update FW is processing now (debug only)
+ * @bar_lock: vNIC config BAR access lock, protects: update,
+ * mailbox area
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -615,6 +620,8 @@ struct nfp_net {
struct timer_list reconfig_timer;
u32 reconfig_in_progress_update;
+ struct mutex bar_lock;
+
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
u32 tx_coalesce_usecs;
@@ -839,6 +846,16 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn)
spin_unlock_bh(&nn->r_vecs[0].lock);
}
+static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
+{
+ mutex_lock(&nn->bar_lock);
+}
+
+static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
+{
+ mutex_unlock(&nn->bar_lock);
+}
+
/* Globals */
extern const char nfp_driver_version[];
@@ -871,7 +888,9 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
-int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
+int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
+int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
+int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6d1b8816552e..b82b684f52ce 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/overflow.h>
#include <linux/page_ref.h>
@@ -137,20 +138,37 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
return false;
}
-static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
{
bool timed_out = false;
+ int i;
+
+ /* Poll update field, waiting for NFP to ack the config.
+ * Do an opportunistic wait-busy loop, afterward sleep.
+ */
+ for (i = 0; i < 50; i++) {
+ if (nfp_net_reconfig_check_done(nn, false))
+ return false;
+ udelay(4);
+ }
- /* Poll update field, waiting for NFP to ack the config */
while (!nfp_net_reconfig_check_done(nn, timed_out)) {
- msleep(1);
+ usleep_range(250, 500);
timed_out = time_is_before_eq_jiffies(deadline);
}
+ return timed_out;
+}
+
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+{
+ if (__nfp_net_reconfig_wait(nn, deadline))
+ return -EIO;
+
if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
return -EIO;
- return timed_out ? -EIO : 0;
+ return 0;
}
static void nfp_net_reconfig_timer(struct timer_list *t)
@@ -243,7 +261,7 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
}
/**
- * nfp_net_reconfig() - Reconfigure the firmware
+ * __nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure
* @update: The value for the update field in the BAR config
*
@@ -253,10 +271,12 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
*
* Return: Negative errno on error, 0 on success
*/
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+static int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
int ret;
+ lockdep_assert_held(&nn->bar_lock);
+
nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
@@ -274,8 +294,31 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
return ret;
}
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+{
+ int ret;
+
+ nn_ctrl_bar_lock(nn);
+ ret = __nfp_net_reconfig(nn, update);
+ nn_ctrl_bar_unlock(nn);
+
+ return ret;
+}
+
+int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
+{
+ if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
+ nn_err(nn, "mailbox too small for %u of data (%u)\n",
+ data_size, nn->tlv_caps.mbox_len);
+ return -EIO;
+ }
+
+ nn_ctrl_bar_lock(nn);
+ return 0;
+}
+
/**
- * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
+ * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
* @nn: NFP Net device to reconfigure
* @mbox_cmd: The value for the mailbox command
*
@@ -283,19 +326,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*
* Return: Negative errno on error, 0 on success
*/
-int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
{
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- if (!nfp_net_has_mbox(&nn->tlv_caps)) {
- nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
- return -EIO;
- }
-
+ lockdep_assert_held(&nn->bar_lock);
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
- ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
+ ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
if (ret) {
nn_err(nn, "Mailbox update error\n");
return ret;
@@ -304,6 +343,15 @@ int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
+int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
+{
+ int ret;
+
+ ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
+ nn_ctrl_bar_unlock(nn);
+ return ret;
+}
+
/* Interrupt configuration and handling
*/
@@ -909,7 +957,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
- if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK;
@@ -1635,6 +1683,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta;
+ bool redir_egress = false;
struct net_device *netdev;
dma_addr_t new_dma_addr;
u32 meta_len_xdp = 0;
@@ -1770,13 +1819,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net *nn;
nn = netdev_priv(dp->netdev);
- netdev = nfp_app_repr_get(nn->app, meta.portid);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
if (unlikely(!netdev)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
continue;
}
- nfp_repr_inc_rx_stats(netdev, pkt_len);
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
}
skb = build_skb(rxbuf->frag, true_bufsz);
@@ -1811,7 +1863,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
- napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
}
if (xdp_prog) {
@@ -3111,7 +3169,9 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
static int
nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
struct nfp_net *nn = netdev_priv(netdev);
+ int err;
/* Priority tagged packets with vlan id 0 are processed by the
* NFP as untagged packets
@@ -3119,17 +3179,23 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
+ if (err)
+ return err;
+
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
ETH_P_8021Q);
- return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
static int
nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
struct nfp_net *nn = netdev_priv(netdev);
+ int err;
/* Priority tagged packets with vlan id 0 are processed by the
* NFP as untagged packets
@@ -3137,11 +3203,15 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
+ if (err)
+ return err;
+
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
ETH_P_8021Q);
- return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
static void nfp_net_stat64(struct net_device *netdev,
@@ -3324,8 +3394,11 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
struct nfp_net *nn = netdev_priv(netdev);
int n;
+ /* If port is defined, devlink_port is registered and devlink core
+ * is taking care of name formatting.
+ */
if (nn->port)
- return nfp_port_get_phys_port_name(netdev, name, len);
+ return -EOPNOTSUPP;
if (nn->dp.is_vf || nn->vnic_no_name)
return -EOPNOTSUPP;
@@ -3517,6 +3590,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
.ndo_setup_tc = nfp_port_setup_tc,
@@ -3530,8 +3604,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_bpf = nfp_net_xdp,
- .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink = nfp_devlink_get_devlink,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
/**
@@ -3548,7 +3621,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3564,7 +3637,6 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
- nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
@@ -3632,6 +3704,8 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
+ mutex_init(&nn->bar_lock);
+
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
@@ -3659,6 +3733,9 @@ err_free_nn:
void nfp_net_free(struct nfp_net *nn)
{
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
+
+ mutex_destroy(&nn->bar_lock);
+
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -3920,9 +3997,6 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
- if (nn->dp.netdev)
- nfp_net_netdev_init(nn);
-
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
@@ -3935,6 +4009,9 @@ int nfp_net_init(struct nfp_net *nn)
if (err)
return err;
+ if (nn->dp.netdev)
+ nfp_net_netdev_init(nn);
+
nfp_net_vecs_init(nn);
if (!nn->dp.netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 372adea10e14..25919e338071 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -104,8 +104,6 @@
#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
-#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */
-#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
@@ -130,7 +128,6 @@
#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */
#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */
#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */
-#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
@@ -392,7 +389,6 @@
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
-#define NFP_NET_CFG_MBOX_SIMPLE_LEN 12
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
@@ -498,10 +494,4 @@ struct nfp_net_tlv_caps {
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
struct nfp_net_tlv_caps *caps);
-
-static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
-{
- return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
-}
-
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 690b62718dbb..851e31e0ba8e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/firmware.h>
+#include <linux/sfp.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
@@ -152,6 +153,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_RVEC_GATHER_STATS 9
#define NN_RVEC_PER_Q_STATS 3
+#define SFP_SFF_REV_COMPLIANCE 1
+
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
struct nfp_nsp *nsp;
@@ -1096,6 +1099,130 @@ nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
buffer);
}
+static int
+nfp_port_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ unsigned int read_len;
+ struct nfp_nsp *nsp;
+ int err = 0;
+ u8 data;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+ netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ switch (eth_port->interface) {
+ case NFP_INTERFACE_SFP:
+ case NFP_INTERFACE_SFP28:
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ SFP_SFF8472_COMPLIANCE, &data,
+ 1, &read_len);
+ if (err < 0)
+ goto exit_close_nsp;
+
+ if (!data) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case NFP_INTERFACE_QSFP:
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ SFP_SFF_REV_COMPLIANCE, &data,
+ 1, &read_len);
+ if (err < 0)
+ goto exit_close_nsp;
+
+ if (data < 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ }
+ break;
+ case NFP_INTERFACE_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ netdev_err(netdev, "Unsupported module 0x%x detected\n",
+ eth_port->interface);
+ err = -EINVAL;
+ }
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
+static int
+nfp_port_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_nsp *nsp;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+ netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ eeprom->offset, data, eeprom->len,
+ &eeprom->len);
+ if (err < 0) {
+ if (eeprom->len) {
+ netdev_warn(netdev,
+ "Incomplete read from module EEPROM: %d\n",
+ err);
+ err = 0;
+ } else {
+ netdev_err(netdev,
+ "Reading from module EEPROM failed: %d\n",
+ err);
+ }
+ }
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static int nfp_net_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -1253,6 +1380,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_module_info = nfp_port_get_module_info,
+ .get_module_eeprom = nfp_port_get_module_eeprom,
.get_coalesce = nfp_net_get_coalesce,
.set_coalesce = nfp_net_set_coalesce,
.get_channels = nfp_net_get_channels,
@@ -1272,6 +1401,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_module_info = nfp_port_get_module_info,
+ .get_module_eeprom = nfp_port_get_module_eeprom,
.get_link_ksettings = nfp_net_get_link_ksettings,
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 08f5fdbd8e41..986464d4a206 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -150,34 +150,39 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
nn->id = id;
+ if (nn->port) {
+ err = nfp_devlink_port_register(pf->app, nn->port);
+ if (err)
+ return err;
+ }
+
err = nfp_net_init(nn);
if (err)
- return err;
+ goto err_devlink_port_clean;
nfp_net_debugfs_vnic_add(nn, pf->ddir);
- if (nn->port) {
- err = nfp_devlink_port_register(pf->app, nn->port);
- if (err)
- goto err_dfs_clean;
- }
+ if (nn->port)
+ nfp_devlink_port_type_eth_set(nn->port);
nfp_net_info(nn);
if (nfp_net_is_data_vnic(nn)) {
err = nfp_app_vnic_init(pf->app, nn);
if (err)
- goto err_devlink_port_clean;
+ goto err_devlink_port_type_clean;
}
return 0;
-err_devlink_port_clean:
+err_devlink_port_type_clean:
if (nn->port)
- nfp_devlink_port_unregister(nn->port);
-err_dfs_clean:
+ nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
+err_devlink_port_clean:
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
return err;
}
@@ -221,9 +226,11 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
if (nfp_net_is_data_vnic(nn))
nfp_app_vnic_clean(pf->app, nn);
if (nn->port)
- nfp_devlink_port_unregister(nn->port);
+ nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
}
static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d2c803bb4e56..1eef446036d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = dev_queue_xmit(skb);
nfp_repr_inc_tx_stats(netdev, len, ret);
- return ret;
+ return NETDEV_TX_OK;
}
static int nfp_repr_stop(struct net_device *netdev)
@@ -267,13 +267,14 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
.ndo_fix_features = nfp_repr_fix_features,
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink = nfp_devlink_get_devlink,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
void
@@ -383,7 +384,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
- netdev->priv_flags |= IFF_NO_QUEUE;
+ netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
netdev->features |= NETIF_F_LLTX;
if (nfp_app_has_tc(app)) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index b6ec46ed0540..3fdaaf8ed2ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2017 Netronome Systems, Inc. */
+/* Copyright (C) 2017-2019 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/errno.h>
@@ -146,6 +146,30 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
"spoofchk");
}
+int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ unsigned int vf_offset;
+ u8 vf_ctrl;
+ int err;
+
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST,
+ "trust");
+ if (err)
+ return err;
+
+ /* Write trust control bit to VF entry in VF config symbol */
+ vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ +
+ NFP_NET_VF_CFG_CTRL;
+ vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset);
+ vf_ctrl &= ~NFP_NET_VF_CFG_CTRL_TRUST;
+ vf_ctrl |= FIELD_PREP(NFP_NET_VF_CFG_CTRL_TRUST, enable);
+ writeb(vf_ctrl, app->pf->vfcfg_tbl2 + vf_offset);
+
+ return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_TRUST,
+ "trust");
+}
+
int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int link_state)
{
@@ -213,6 +237,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci);
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
+ ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index c9f09c5bb5ee..a3db0cbf6425 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-/* Copyright (C) 2017 Netronome Systems, Inc. */
+/* Copyright (C) 2017-2019 Netronome Systems, Inc. */
#ifndef _NFP_NET_SRIOV_H_
#define _NFP_NET_SRIOV_H_
@@ -19,12 +19,14 @@
#define NFP_NET_VF_CFG_MB_CAP_VLAN (0x1 << 1)
#define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3)
+#define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4)
#define NFP_NET_VF_CFG_MB_RET 0x2
#define NFP_NET_VF_CFG_MB_UPD 0x4
#define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0)
#define NFP_NET_VF_CFG_MB_UPD_VLAN (0x1 << 1)
#define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3)
+#define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4)
#define NFP_NET_VF_CFG_MB_VF_NUM 0x7
/* VF config entry
@@ -35,6 +37,7 @@
#define NFP_NET_VF_CFG_MAC_HI 0x0
#define NFP_NET_VF_CFG_MAC_LO 0x6
#define NFP_NET_VF_CFG_CTRL 0x4
+#define NFP_NET_VF_CFG_CTRL_TRUST 0x8
#define NFP_NET_VF_CFG_CTRL_SPOOF 0x4
#define NFP_NET_VF_CFG_CTRL_LINK_STATE 0x3
#define NFP_NET_VF_CFG_LS_MODE_AUTO 0
@@ -48,6 +51,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto);
int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int link_state);
int nfp_app_get_vf_config(struct net_device *netdev, int vf,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 1145849ca7ba..e4977cdf7678 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -282,8 +282,14 @@ err_free_vf:
static void nfp_netvf_pci_remove(struct pci_dev *pdev)
{
- struct nfp_net_vf *vf = pci_get_drvdata(pdev);
- struct nfp_net *nn = vf->nn;
+ struct nfp_net_vf *vf;
+ struct nfp_net *nn;
+
+ vf = pci_get_drvdata(pdev);
+ if (!vf)
+ return;
+
+ nn = vf->nn;
/* Note, the order is slightly different from above as we need
* to keep the nn pointer around till we have freed everything.
@@ -317,4 +323,5 @@ struct pci_driver nfp_netvf_pci_driver = {
.id_table = nfp_netvf_pci_device_ids,
.probe = nfp_netvf_pci_probe,
.remove = nfp_netvf_pci_remove,
+ .shutdown = nfp_netvf_pci_remove,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 90ae053f5c07..d7fd203bb180 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -131,6 +131,8 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
void nfp_devlink_port_unregister(struct nfp_port *port);
+void nfp_devlink_port_type_eth_set(struct nfp_port *port);
+void nfp_devlink_port_type_clear(struct nfp_port *port);
/**
* Mac stats (0x0000 - 0x0200)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 3a4e224a64b7..42cf4fd875ea 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -79,6 +79,8 @@
#define NFP_VERSIONS_NCSI_OFF 22
#define NFP_VERSIONS_CFGR_OFF 26
+#define NSP_SFF_EEPROM_BLOCK_LEN 8
+
enum nfp_nsp_cmd {
SPCODE_NOOP = 0, /* No operation */
SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
@@ -95,6 +97,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */
SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */
SPCODE_VERSIONS = 21, /* Report FW versions */
+ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */
};
struct nfp_nsp_dma_buf {
@@ -965,3 +968,62 @@ const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash,
return (const char *)&buf[buf_off];
}
+
+static int
+__nfp_nsp_module_eeprom(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ struct nfp_nsp_command_buf_arg module_eeprom = {
+ {
+ .code = SPCODE_READ_SFF_EEPROM,
+ .option = size,
+ },
+ .in_buf = buf,
+ .in_size = size,
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &module_eeprom);
+}
+
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ unsigned int offset, void *data,
+ unsigned int len, unsigned int *read_len)
+{
+ struct eeprom_buf {
+ u8 metalen;
+ __le16 length;
+ __le16 offset;
+ __le16 readlen;
+ u8 eth_index;
+ u8 data[0];
+ } __packed *buf;
+ int bufsz, ret;
+
+ BUILD_BUG_ON(offsetof(struct eeprom_buf, data) % 8);
+
+ /* Buffer must be large enough and rounded to the next block size. */
+ bufsz = struct_size(buf, data, round_up(len, NSP_SFF_EEPROM_BLOCK_LEN));
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->metalen =
+ offsetof(struct eeprom_buf, data) / NSP_SFF_EEPROM_BLOCK_LEN;
+ buf->length = cpu_to_le16(len);
+ buf->offset = cpu_to_le16(offset);
+ buf->eth_index = eth_index;
+
+ ret = __nfp_nsp_module_eeprom(state, buf, bufsz);
+
+ *read_len = min_t(unsigned int, len, le16_to_cpu(buf->readlen));
+ if (*read_len)
+ memcpy(data, buf->data, *read_len);
+
+ if (!ret && *read_len < len)
+ ret = -EIO;
+
+ kfree(buf);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index bd9c358c646f..22ee6985ee1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -22,6 +22,9 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_mac_reinit(struct nfp_nsp *state);
int nfp_nsp_load_stored_fw(struct nfp_nsp *state);
int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ unsigned int offset, void *data,
+ unsigned int len, unsigned int *read_len);
static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
{
@@ -43,6 +46,11 @@ static inline bool nfp_nsp_has_versions(struct nfp_nsp *state)
return nfp_nsp_get_abi_ver_minor(state) > 27;
}
+static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 28;
+}
+
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
NFP_INTERFACE_SFP = 1,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 89d17399fb5a..fec604c4c0d3 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1368,8 +1368,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr)) {
const char *macaddr = of_get_mac_address(np);
- if (macaddr)
- memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
+ if (!IS_ERR(macaddr))
+ ether_addr_copy(ndev->dev_addr, macaddr);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index a5bf46310f60..5ffaee9f53b1 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1355,7 +1355,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
const int nh_off = skb_network_offset(skb);
const int nh_len = skb_network_header_len(skb);
const int nfrags = skb_shinfo(skb)->nr_frags;
- int cs_size, i, fill, hdr, cpyhdr, evt;
+ int cs_size, i, fill, hdr, evt;
dma_addr_t csdma;
fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
@@ -1396,7 +1396,6 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
fill++;
/* Copy the result into the TCP packet */
- cpyhdr = fill;
CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
XCT_FUN_LLEN(2) | XCT_FUN_SE;
CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
@@ -1839,7 +1838,7 @@ static void __exit pasemi_mac_cleanup_module(void)
pci_unregister_driver(&pasemi_mac_driver);
}
-int pasemi_mac_init_module(void)
+static int pasemi_mac_init_module(void)
{
int err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 43a57ec296fd..c5e96ce20f59 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -431,12 +431,16 @@ struct qed_qm_info {
u8 num_pf_rls;
};
+#define QED_OVERFLOW_BIT 1
+
struct qed_db_recovery_info {
struct list_head list;
/* Lock to protect the doorbell recovery mechanism list */
spinlock_t lock;
+ bool dorq_attn;
u32 db_recovery_counter;
+ unsigned long overflow;
};
struct storm_stats {
@@ -492,6 +496,9 @@ enum qed_mf_mode_bit {
/* Allow DSCP to TC mapping */
QED_MF_DSCP_TO_TC_MAP,
+
+ /* Do not insert a vlan tag with id 0 */
+ QED_MF_DONT_ADD_VLAN0_TAG,
};
enum qed_ufp_mode {
@@ -920,8 +927,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* doorbell recovery mechanism */
void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
- enum qed_db_rec_exec db_exec);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
/* Other Linux specific common definitions */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 69966dfc6e3d..5c6a276f69ac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -204,9 +204,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
else
p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
- /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
- if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
- test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
+ if (test_bit(QED_MF_DONT_ADD_VLAN0_TAG, &p_hwfn->cdev->mf_bits))
p_data->arr[type].dont_add_vlan0 = true;
/* QM reconf data */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 9df8c4b3b54e..fccdb06fc5c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
/* Doorbell address sanity (address within doorbell bar range) */
static bool qed_db_rec_sanity(struct qed_dev *cdev,
- void __iomem *db_addr, void *db_data)
+ void __iomem *db_addr,
+ enum qed_db_rec_width db_width,
+ void *db_data)
{
+ u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
/* Make sure doorbell address is within the doorbell bar */
if (db_addr < cdev->doorbells ||
- (u8 __iomem *)db_addr >
+ (u8 __iomem *)db_addr + width >
(u8 __iomem *)cdev->doorbells + cdev->db_size) {
WARN(true,
"Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
@@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev,
}
/* Sanitize doorbell address */
- if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+ if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
return -EINVAL;
/* Obtain hwfn from doorbell address */
@@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev,
return 0;
}
- /* Sanitize doorbell address */
- if (!qed_db_rec_sanity(cdev, db_addr, db_data))
- return -EINVAL;
-
/* Obtain hwfn from doorbell address */
p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
@@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
/* Ring the doorbell of a single doorbell recovery entry */
static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
- struct qed_db_recovery_entry *db_entry,
- enum qed_db_rec_exec db_exec)
-{
- if (db_exec != DB_REC_ONCE) {
- /* Print according to width */
- if (db_entry->db_width == DB_REC_WIDTH_32B) {
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "%s doorbell address %p data %x\n",
- db_exec == DB_REC_DRY_RUN ?
- "would have rung" : "ringing",
- db_entry->db_addr,
- *(u32 *)db_entry->db_data);
- } else {
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "%s doorbell address %p data %llx\n",
- db_exec == DB_REC_DRY_RUN ?
- "would have rung" : "ringing",
- db_entry->db_addr,
- *(u64 *)(db_entry->db_data));
- }
+ struct qed_db_recovery_entry *db_entry)
+{
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "ringing doorbell address %p data %x\n",
+ db_entry->db_addr,
+ *(u32 *)db_entry->db_data);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "ringing doorbell address %p data %llx\n",
+ db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
}
/* Sanity */
if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
- db_entry->db_data))
+ db_entry->db_width, db_entry->db_data))
return;
/* Flush the write combined buffer. Since there are multiple doorbelling
@@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
wmb();
/* Ring the doorbell */
- if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
- if (db_entry->db_width == DB_REC_WIDTH_32B)
- DIRECT_REG_WR(db_entry->db_addr,
- *(u32 *)(db_entry->db_data));
- else
- DIRECT_REG_WR64(db_entry->db_addr,
- *(u64 *)(db_entry->db_data));
- }
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
/* Flush the write combined buffer. Next doorbell may come from a
* different entity to the same address...
@@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
}
/* Traverse the doorbell recovery entry list and ring all the doorbells */
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
- enum qed_db_rec_exec db_exec)
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
{
struct qed_db_recovery_entry *db_entry = NULL;
- if (db_exec != DB_REC_ONCE) {
- DP_NOTICE(p_hwfn,
- "Executing doorbell recovery. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
+ DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
- /* Track amount of times recovery was executed */
- p_hwfn->db_recovery_info.db_recovery_counter++;
- }
+ /* Track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
/* Protect the list */
spin_lock_bh(&p_hwfn->db_recovery_info.lock);
list_for_each_entry(db_entry,
- &p_hwfn->db_recovery_info.list, list_entry) {
- qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
- if (db_exec == DB_REC_ONCE)
- break;
- }
-
+ &p_hwfn->db_recovery_info.list, list_entry)
+ qed_db_recovery_ring(p_hwfn, db_entry);
spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
}
@@ -3157,12 +3140,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
BIT(QED_MF_UFP_SPECIFIC) |
- BIT(QED_MF_8021Q_TAGGING);
+ BIT(QED_MF_8021Q_TAGGING) |
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
break;
case NVM_CFG1_GLOB_MF_MODE_BD:
cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
- BIT(QED_MF_8021AD_TAGGING);
+ BIT(QED_MF_8021AD_TAGGING) |
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index e23980e301b6..fdfedbc8e431 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
u32 count = QED_DB_REC_COUNT;
u32 usage = 1;
+ /* Flush any pending (e)dpms as they may never arrive */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
/* wait for usage to zero or count to run out. This is necessary since
* EDPM doorbell transactions can take multiple 64b cycles, and as such
* can "split" over the pci. Possibly, the doorbell drop can happen with
@@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 overflow;
+ u32 attn_ovfl, cur_ovfl;
int rc;
- overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
- DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
- if (!overflow) {
- qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+ attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
+ &p_hwfn->db_recovery_info.overflow);
+ cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ if (!cur_ovfl && !attn_ovfl)
return 0;
- }
- if (qed_edpm_enabled(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
+ attn_ovfl, cur_ovfl);
+
+ if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
if (rc)
return rc;
}
- /* Flush any pending (e)dpm as they may never arrive */
- qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
-
/* Release overflow sticky indication (stop silently dropping everything) */
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
/* Repeat all last doorbells (doorbell drop recovery) */
- qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+ qed_db_recovery_execute(p_hwfn);
return 0;
}
-static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
{
- u32 int_sts, first_drop_reason, details, address, all_drops_reason;
struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ u32 overflow;
int rc;
- int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
- DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+ overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ if (!overflow)
+ goto out;
+
+ /* Run PF doorbell recovery in next periodic handler */
+ set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
+
+ if (!p_hwfn->db_bar_no_edpm) {
+ rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+ if (rc)
+ goto out;
+ }
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+out:
+ /* Schedule the handler even if overflow was not detected */
+ qed_periodic_db_rec_start(p_hwfn);
+}
+
+static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
+{
+ u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+ struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
/* int_sts may be zero since all PFs were interrupted for doorbell
* overflow but another one already handled it. Can abort here. If
* This PF also requires overflow recovery we will be interrupted again.
* The masked almost full indication may also be set. Ignoring.
*/
+ int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
return 0;
+ DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+
/* check if db_drop or overflow happened */
if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
first_drop_reason, all_drops_reason);
- rc = qed_db_rec_handler(p_hwfn, p_ptt);
- qed_periodic_db_rec_start(p_hwfn);
- if (rc)
- return rc;
-
/* Clear the doorbell drop details and prepare for next drop */
qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
@@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ p_hwfn->db_recovery_info.dorq_attn = true;
+ qed_dorq_attn_overflow(p_hwfn);
+
+ return qed_dorq_attn_int_sts(p_hwfn);
+}
+
+static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->db_recovery_info.dorq_attn)
+ goto out;
+
+ /* Call DORQ callback if the attention was missed */
+ qed_dorq_attn_cb(p_hwfn);
+out:
+ p_hwfn->db_recovery_info.dorq_attn = false;
+}
+
/* Instead of major changes to the data-structure, we have a some 'special'
* identifiers for sources that changed meaning between adapters.
*/
@@ -774,18 +814,12 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
{
u16 rc = 0, index;
- /* Make certain HW write took affect */
- mmiowb();
-
index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
if (p_sb_desc->index != index) {
p_sb_desc->index = index;
rc = QED_SB_ATT_IDX;
}
- /* Make certain we got a consistent view with HW */
- mmiowb();
-
return rc;
}
@@ -1080,6 +1114,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
}
}
+ /* Handle missed DORQ attention */
+ qed_dorq_attn_handler(p_hwfn);
+
/* Clear IGU indication for the deasserted bits */
DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD +
@@ -1170,7 +1207,6 @@ static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
- mmiowb();
barrier();
}
@@ -1805,9 +1841,6 @@ static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
- /* Flush the writes to IGU */
- mmiowb();
-
/* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
}
@@ -1871,9 +1904,6 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
- /* Flush the write to IGU */
- mmiowb();
-
/* calculate where to read the status bit from */
sb_bit = 1 << (igu_sb_id % 32);
sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 1f356ed4f761..d473b522afc5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
* @brief - Doorbell Recovery handler.
- * Run DB_REAL_DEAL doorbell recovery in case of PF overflow
- * (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ * Run doorbell recovery in case of PF overflow (and flush DORQ if
+ * needed).
*
* @param p_hwfn
* @param p_ptt
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f164d4acebcb..6de23b56b294 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
}
}
-#define QED_PERIODIC_DB_REC_COUNT 100
+#define QED_PERIODIC_DB_REC_COUNT 10
#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
#define QED_PERIODIC_DB_REC_INTERVAL \
msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 79b311b86f66..f5f3c03b9dd2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -341,9 +341,6 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
REG_WR16(p_hwfn, addr, prod);
-
- /* keep prod updates ordered */
- mmiowb();
}
int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9faaa6df78ed..2f318aaf2b05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
} else {
DP_INFO(p_hwfn,
- "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
vf->abs_vf_id,
req->vfdev_info.eth_fp_hsi_major,
req->vfdev_info.eth_fp_hsi_minor,
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 63a78162cfaf..92fe226980fd 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -498,8 +498,7 @@ struct qede_reload_args {
/* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index b4c8949933f1..8911a97ab0ca 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -652,9 +652,9 @@ static void qede_get_drvinfo(struct net_device *ndev,
{
char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
struct qede_dev *edev = netdev_priv(ndev);
+ char mbi[ETHTOOL_FWVERS_LEN];
strlcpy(info->driver, "qede", sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
edev->dev_info.common.fw_major,
@@ -668,13 +668,27 @@ static void qede_get_drvinfo(struct net_device *ndev,
(edev->dev_info.common.mfw_rev >> 8) & 0xFF,
edev->dev_info.common.mfw_rev & 0xFF);
- if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) <
- sizeof(info->fw_version)) {
+ if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) <
+ sizeof(info->version))
+ snprintf(info->version, sizeof(info->version),
+ "%s [storm %s]", DRV_MODULE_VERSION, storm);
+ else
+ snprintf(info->version, sizeof(info->version),
+ "%s %s", DRV_MODULE_VERSION, storm);
+
+ if (edev->dev_info.common.mbi_version) {
+ snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d",
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_2_MASK) >> QED_MBI_VERSION_2_OFFSET,
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_1_MASK) >> QED_MBI_VERSION_1_OFFSET,
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_0_MASK) >> QED_MBI_VERSION_0_OFFSET);
snprintf(info->fw_version, sizeof(info->fw_version),
- "mfw %s storm %s", mfw, storm);
+ "mbi %s [mfw %s]", mbi, mfw);
} else {
snprintf(info->fw_version, sizeof(info->fw_version),
- "%s %s", mfw, storm);
+ "mfw %s", mfw);
}
strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
@@ -1526,14 +1540,6 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
barrier();
writel(txq->tx_db.raw, txq->doorbell_addr);
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the queue lock is released and another start_xmit is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
-
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
if (qede_txq_has_work(txq))
break;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 31b046e24565..0ae28f0d2523 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -580,14 +580,6 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
(u32 *)&rx_prods);
-
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the napi lock is released and another qede_poll is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
}
static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
@@ -1665,12 +1657,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
- if (skb->xmit_more)
+ if (netdev_xmit_more())
qede_update_tx_producer(txq);
netif_tx_stop_queue(netdev_txq);
@@ -1696,8 +1688,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct qede_dev *edev = netdev_priv(dev);
int total_txq;
@@ -1705,7 +1696,7 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
return QEDE_TSS_COUNT(edev) ?
- fallback(dev, skb, NULL) % total_txq : 0;
+ netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
}
/* 8B udp header + 8B base tunnel header + 32B option length */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 5f3f42a25361..bddb2b5982dc 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
if (IS_ERR(ptp->clock)) {
- rc = -EINVAL;
DP_ERR(edev, "PTP clock registration failed\n");
+ qede_ptp_disable(edev);
+ rc = -EINVAL;
goto err2;
}
return 0;
-err2:
- qede_ptp_disable(edev);
- ptp->clock = NULL;
err1:
kfree(ptp);
+err2:
edev->ptp = NULL;
return rc;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b61b88cbc0c7..457444894d80 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1858,7 +1858,6 @@ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
wmb();
writel_relaxed(qdev->small_buf_q_producer_index,
&port_regs->CommonRegs.rxSmallQProducerIndex);
- mmiowb();
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 0c443ea98479..374a4d4371f9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -497,7 +497,7 @@ struct qlcnic_hardware_context {
u16 board_type;
u16 supported_type;
- u16 link_speed;
+ u32 link_speed;
u16 link_duplex;
u16 link_autoneg;
u16 module_type;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 3e71b65a9546..ad7c5eb8a3b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2181,7 +2181,6 @@ static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
static inline void ql_write_db_reg(u32 val, void __iomem *addr)
{
writel(val, addr);
- mmiowb();
}
/*
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 07e1c623048e..6cae33072496 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2695,7 +2695,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
wmb();
ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- mmiowb();
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 97f92953bdb9..b28360bc2255 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -966,7 +966,7 @@ qca_spi_probe(struct spi_device *spi)
mac = of_get_mac_address(spi->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(qca->net_dev->dev_addr, mac);
if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index db6068cd7a1f..590616846cd1 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -351,7 +351,7 @@ static int qca_uart_probe(struct serdev_device *serdev)
mac = of_get_mac_address(serdev->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(qca->net_dev->dev_addr, mac);
if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 04aa592f35c3..ad335bca3273 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -840,7 +840,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* Trigger the MAC to check the TX descriptor */
- if (!skb->xmit_more || netif_queue_stopped(dev))
+ if (!netdev_xmit_more() || netif_queue_stopped(dev))
iowrite16(TM2TX, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7562ccbbb39a..2e20334b76a1 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
#include <linux/prefetch.h>
+#include <linux/pci-aspm.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
@@ -490,10 +491,6 @@ enum rtl_register_content {
PCIDAC = (1 << 4),
PCIMulRW = (1 << 3),
#define INTT_MASK GENMASK(1, 0)
- INTT_0 = 0x0000, // 8168
- INTT_1 = 0x0001, // 8168
- INTT_2 = 0x0002, // 8168
- INTT_3 = 0x0003, // 8168
/* rtl8169_PHYstatus */
TBI_Enable = 0x80,
@@ -702,6 +699,8 @@ struct rtl8169_private {
u32 ocp_base;
};
+typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
+
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
module_param_named(debug, debug.msg_enable, int, 0);
@@ -776,9 +775,9 @@ static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
int i;
for (i = 0; i < n; i++) {
- delay(d);
if (c->check(tp) == high)
return true;
+ delay(d);
}
netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
c->msg, !high, n, d);
@@ -1066,8 +1065,8 @@ DECLARE_RTL_COND(rtl_eriar_cond)
return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
}
-static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val, int type)
+static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
{
BUG_ON((addr & 3) || (mask == 0));
RTL_W32(tp, ERIDR, val);
@@ -1076,7 +1075,13 @@ static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
-static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val)
+{
+ _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
+}
+
+static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
@@ -1084,13 +1089,30 @@ static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
RTL_R32(tp, ERIDR) : ~0;
}
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
+{
+ return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
+}
+
static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
- u32 m, int type)
+ u32 m)
{
u32 val;
- val = rtl_eri_read(tp, addr, type);
- rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
+ val = rtl_eri_read(tp, addr);
+ rtl_eri_write(tp, addr, mask, (val & ~m) | p);
+}
+
+static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 p)
+{
+ rtl_w0w1_eri(tp, addr, mask, p, 0);
+}
+
+static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 m)
+{
+ rtl_w0w1_eri(tp, addr, mask, 0, m);
}
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
@@ -1102,7 +1124,7 @@ static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
- return rtl_eri_read(tp, reg, ERIAR_OOB);
+ return _rtl_eri_read(tp, reg, ERIAR_OOB);
}
static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
@@ -1116,13 +1138,13 @@ static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
u32 data)
{
- rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
- data, ERIAR_OOB);
+ _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
+ data, ERIAR_OOB);
}
static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd);
r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
}
@@ -1258,19 +1280,10 @@ static bool r8168_check_dash(struct rtl8169_private *tp)
}
}
-struct exgmac_reg {
- u16 addr;
- u16 mask;
- u32 val;
-};
-
-static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
- const struct exgmac_reg *r, int len)
+static void rtl_reset_packet_filter(struct rtl8169_private *tp)
{
- while (len-- > 0) {
- rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
- r++;
- }
+ rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
+ rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
}
DECLARE_RTL_COND(rtl_efusear_cond)
@@ -1326,48 +1339,31 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
if (phydev->speed == SPEED_1000) {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
} else if (phydev->speed == SPEED_100) {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
} else {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
}
- /* Reset packet filter */
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
- ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
- ERIAR_EXGMAC);
+ rtl_reset_packet_filter(tp);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
if (phydev->speed == SPEED_1000) {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
} else {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
if (phydev->speed == SPEED_10) {
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a);
} else {
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
}
}
}
@@ -1408,19 +1404,11 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
tmp = ARRAY_SIZE(cfg) - 1;
if (wolopts & WAKE_MAGIC)
- rtl_w0w1_eri(tp,
- 0x0dc,
- ERIAR_MASK_0100,
- MagicPacket_v2,
- 0x0000,
- ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
+ MagicPacket_v2);
else
- rtl_w0w1_eri(tp,
- 0x0dc,
- ERIAR_MASK_0100,
- 0x0000,
- MagicPacket_v2,
- ERIAR_EXGMAC);
+ rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
+ MagicPacket_v2);
break;
default:
tmp = ARRAY_SIZE(cfg);
@@ -2292,8 +2280,8 @@ struct phy_reg {
u16 val;
};
-static void rtl_writephy_batch(struct rtl8169_private *tp,
- const struct phy_reg *regs, int len)
+static void __rtl_writephy_batch(struct rtl8169_private *tp,
+ const struct phy_reg *regs, int len)
{
while (len-- > 0) {
rtl_writephy(tp, regs->reg, regs->val);
@@ -2301,6 +2289,8 @@ static void rtl_writephy_batch(struct rtl8169_private *tp,
}
}
+#define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
+
#define PHY_READ 0x00000000
#define PHY_DATA_OR 0x10000000
#define PHY_DATA_AND 0x20000000
@@ -2563,7 +2553,11 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
{
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
+ /* Adjust EEE LED frequency */
+ if (tp->mac_version != RTL_GIGA_MAC_VER_38)
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+
+ rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003);
}
static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
@@ -2652,7 +2646,7 @@ static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{ 0x00, 0x9200 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
@@ -2663,7 +2657,7 @@ static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
@@ -2721,7 +2715,7 @@ static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl8169scd_hw_phy_config_quirk(tp);
}
@@ -2776,7 +2770,7 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
@@ -2789,7 +2783,7 @@ static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0001);
rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
@@ -2800,7 +2794,7 @@ static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
@@ -2813,7 +2807,7 @@ static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
@@ -2828,7 +2822,7 @@ static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
rtl_patchphy(tp, 0x14, 1 << 5);
rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
@@ -2853,7 +2847,7 @@ static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x09, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x14, 1 << 5);
rtl_patchphy(tp, 0x0d, 1 << 5);
@@ -2880,7 +2874,7 @@ static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x16, 1 << 0);
rtl_patchphy(tp, 0x14, 1 << 5);
@@ -2902,7 +2896,7 @@ static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x16, 1 << 0);
rtl_patchphy(tp, 0x14, 1 << 5);
@@ -2958,7 +2952,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x0d, 0xf880 }
};
- rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+ rtl_writephy_batch(tp, phy_reg_init_0);
/*
* Rx Error Issue
@@ -2979,7 +2973,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
};
int val;
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
val = rtl_readphy(tp, 0x0d);
@@ -3005,7 +2999,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x06, 0x6662 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
/* RSET couple improve */
@@ -3069,7 +3063,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x0d, 0xf880 }
};
- rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+ rtl_writephy_batch(tp, phy_reg_init_0);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
@@ -3083,7 +3077,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
};
int val;
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
val = rtl_readphy(tp, 0x0d);
if ((val & 0x00ff) != 0x006c) {
@@ -3108,7 +3102,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x06, 0x2642 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
/* Fine tune PLL performance */
@@ -3186,7 +3180,7 @@ static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
@@ -3201,7 +3195,7 @@ static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x0d, 1 << 5);
}
@@ -3237,7 +3231,7 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
/* DCO enable for 10M IDLE Power */
rtl_writephy(tp, 0x1f, 0x0007);
@@ -3285,14 +3279,11 @@ static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
addr[2] | (addr[3] << 8),
addr[4] | (addr[5] << 8)
};
- const struct exgmac_reg e[] = {
- { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
- { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
- { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
- { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
- };
- rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
+ rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
+ rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
+ rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
+ rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
}
static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3326,7 +3317,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
/* For 4-corner performance improve */
rtl_writephy(tp, 0x1f, 0x0005);
@@ -3435,7 +3426,7 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl8168f_hw_phy_config(tp);
@@ -3501,7 +3492,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
/* Modify green table for giga */
rtl_writephy(tp, 0x1f, 0x0005);
@@ -3921,7 +3912,7 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
rtl_patchphy(tp, 0x19, 1 << 13);
rtl_patchphy(tp, 0x10, 1 << 15);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
@@ -3947,7 +3938,7 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
@@ -3960,7 +3951,7 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
/* EEE setting */
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
@@ -3983,139 +3974,73 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
+ rtl_writephy_batch(tp, phy_reg_init);
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
}
static void rtl_hw_phy_config(struct net_device *dev)
{
+ static const rtl_generic_fct phy_configs[] = {
+ /* PCI devices. */
+ [RTL_GIGA_MAC_VER_01] = NULL,
+ [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
+ [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
+ /* PCI-E devices. */
+ [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_10] = NULL,
+ [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_13] = NULL,
+ [RTL_GIGA_MAC_VER_14] = NULL,
+ [RTL_GIGA_MAC_VER_15] = NULL,
+ [RTL_GIGA_MAC_VER_16] = NULL,
+ [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_31] = NULL,
+ [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
+ [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
+ [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_41] = NULL,
+ [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ };
struct rtl8169_private *tp = netdev_priv(dev);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01:
- break;
- case RTL_GIGA_MAC_VER_02:
- case RTL_GIGA_MAC_VER_03:
- rtl8169s_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_04:
- rtl8169sb_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_05:
- rtl8169scd_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_06:
- rtl8169sce_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_07:
- case RTL_GIGA_MAC_VER_08:
- case RTL_GIGA_MAC_VER_09:
- rtl8102e_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_11:
- rtl8168bb_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_12:
- rtl8168bef_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_17:
- rtl8168bef_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_18:
- rtl8168cp_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_19:
- rtl8168c_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_20:
- rtl8168c_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_21:
- rtl8168c_3_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_22:
- rtl8168c_4_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- rtl8168cp_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_25:
- rtl8168d_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_26:
- rtl8168d_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_27:
- rtl8168d_3_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_28:
- rtl8168d_4_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_29:
- case RTL_GIGA_MAC_VER_30:
- rtl8105e_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_31:
- /* None. */
- break;
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- rtl8168e_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_34:
- rtl8168e_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_35:
- rtl8168f_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_36:
- rtl8168f_2_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_37:
- rtl8402_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_38:
- rtl8411_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_39:
- rtl8106e_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_40:
- rtl8168g_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- rtl8168g_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_47:
- rtl8168h_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_48:
- rtl8168h_2_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_49:
- rtl8168ep_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- rtl8168ep_2_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_41:
- default:
- break;
- }
+ if (phy_configs[tp->mac_version])
+ phy_configs[tp->mac_version](tp);
}
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
@@ -4146,14 +4071,6 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
phy_speed_up(tp->phydev);
genphy_soft_reset(tp->phydev);
-
- /* It was reported that several chips end up with 10MBit/Half on a
- * 1GBit link after resuming from S3. For whatever reason the PHY on
- * these chips doesn't properly start a renegotiation when soft-reset.
- * Explicitly requesting a renegotiation fixes this.
- */
- if (tp->phydev->autoneg == AUTONEG_ENABLE)
- phy_restart_aneg(tp->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4282,8 +4199,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
- rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
- 0xfc000000, ERIAR_EXGMAC);
+ rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
}
@@ -4311,8 +4227,7 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
- 0x00000000, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
break;
}
@@ -4702,6 +4617,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
+ /* disable interrupt coalescing */
+ RTL_W16(tp, IntrMitigate, 0x0000);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R8(tp, IntrMask);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -4734,12 +4651,6 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
rtl8169_set_magic_reg(tp, tp->mac_version);
- /*
- * Undocumented corner. Supposedly:
- * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
- */
- RTL_W16(tp, IntrMitigate, 0x0000);
-
RTL_W32(tp, RxMissed, 0);
}
@@ -4800,8 +4711,8 @@ struct ephy_info {
u16 bits;
};
-static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
- int len)
+static void __rtl_ephy_init(struct rtl8169_private *tp,
+ const struct ephy_info *e, int len)
{
u16 w;
@@ -4812,6 +4723,8 @@ static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
}
}
+#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
+
static void rtl_disable_clock_request(struct rtl8169_private *tp)
{
pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL,
@@ -4843,6 +4756,24 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
udelay(10);
}
+static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
+ u16 tx_stat, u16 rx_dyn, u16 tx_dyn)
+{
+ /* Usage of dynamic vs. static FIFO is controlled by bit
+ * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known.
+ */
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn);
+}
+
+static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
+ u8 low, u8 high)
+{
+ /* FIFO thresholds for pause flow control */
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
+}
+
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -4892,7 +4823,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+ rtl_ephy_init(tp, e_info_8168cp);
__rtl_hw_start_8168cp(tp);
}
@@ -4940,7 +4871,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
- rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+ rtl_ephy_init(tp, e_info_8168c_1);
__rtl_hw_start_8168cp(tp);
}
@@ -4954,7 +4885,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+ rtl_ephy_init(tp, e_info_8168c_2);
__rtl_hw_start_8168cp(tp);
}
@@ -5012,7 +4943,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4));
+ rtl_ephy_init(tp, e_info_8168d_4);
rtl_enable_clock_request(tp);
}
@@ -5037,7 +4968,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
+ rtl_ephy_init(tp, e_info_8168e_1);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5062,19 +4993,18 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+ rtl_ephy_init(tp, e_info_8168e_2);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
+ rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
+ rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
@@ -5082,9 +5012,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
-
rtl8168_config_eee_mac(tp);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
@@ -5100,16 +5027,14 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
+ rtl_reset_packet_filter(tp);
+ rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
+ rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4));
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
@@ -5134,12 +5059,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
-
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
}
static void rtl_hw_start_8411(struct rtl8169_private *tp)
@@ -5154,39 +5076,33 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
rtl_pcie_state_l2l3_disable(tp);
- rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00);
}
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
+ rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
+ rtl_reset_packet_filter(tp);
+ rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+ rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
+ rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
}
@@ -5204,7 +5120,7 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
+ rtl_ephy_init(tp, e_info_8168g_1);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5222,7 +5138,7 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
- rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
+ rtl_ephy_init(tp, e_info_8168g_2);
}
static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
@@ -5239,7 +5155,7 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
+ rtl_ephy_init(tp, e_info_8411_2);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5258,34 +5174,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
+ rtl_ephy_init(tp, e_info_8168h_1);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
+ rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_reset_packet_filter(tp);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
- rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00);
- rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl8168_config_eee_mac(tp);
@@ -5294,7 +5204,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+ rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
@@ -5344,34 +5254,28 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
+ rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_reset_packet_filter(tp);
- rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
- rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
+ rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
@@ -5390,7 +5294,7 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
+ rtl_ephy_init(tp, e_info_8168ep_1);
rtl_hw_start_8168ep(tp);
@@ -5407,7 +5311,7 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
+ rtl_ephy_init(tp, e_info_8168ep_2);
rtl_hw_start_8168ep(tp);
@@ -5429,7 +5333,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
+ rtl_ephy_init(tp, e_info_8168ep_3);
rtl_hw_start_8168ep(tp);
@@ -5452,128 +5356,6 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, true);
}
-static void rtl_hw_start_8168(struct rtl8169_private *tp)
-{
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
-
- tp->cp_cmd &= ~INTT_MASK;
- tp->cp_cmd |= PktCntrDisable | INTT_1;
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
-
- RTL_W16(tp, IntrMitigate, 0x5151);
-
- /* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
- tp->irq_mask |= RxFIFOOver;
- tp->irq_mask &= ~RxOverflow;
- }
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- rtl_hw_start_8168bb(tp);
- break;
-
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- rtl_hw_start_8168bef(tp);
- break;
-
- case RTL_GIGA_MAC_VER_18:
- rtl_hw_start_8168cp_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_19:
- rtl_hw_start_8168c_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_20:
- rtl_hw_start_8168c_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_21:
- rtl_hw_start_8168c_3(tp);
- break;
-
- case RTL_GIGA_MAC_VER_22:
- rtl_hw_start_8168c_4(tp);
- break;
-
- case RTL_GIGA_MAC_VER_23:
- rtl_hw_start_8168cp_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_24:
- rtl_hw_start_8168cp_3(tp);
- break;
-
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- rtl_hw_start_8168d(tp);
- break;
-
- case RTL_GIGA_MAC_VER_28:
- rtl_hw_start_8168d_4(tp);
- break;
-
- case RTL_GIGA_MAC_VER_31:
- rtl_hw_start_8168dp(tp);
- break;
-
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- rtl_hw_start_8168e_1(tp);
- break;
- case RTL_GIGA_MAC_VER_34:
- rtl_hw_start_8168e_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- rtl_hw_start_8168f_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_38:
- rtl_hw_start_8411(tp);
- break;
-
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- rtl_hw_start_8168g_1(tp);
- break;
- case RTL_GIGA_MAC_VER_42:
- rtl_hw_start_8168g_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_44:
- rtl_hw_start_8411_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- rtl_hw_start_8168h_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_49:
- rtl_hw_start_8168ep_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_50:
- rtl_hw_start_8168ep_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_51:
- rtl_hw_start_8168ep_3(tp);
- break;
-
- default:
- netif_err(tp, drv, tp->dev,
- "unknown chipset (mac_version = %d)\n",
- tp->mac_version);
- break;
- }
-}
-
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8102e_1[] = {
@@ -5602,7 +5384,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(tp, Config1, cfg1 & ~LEDS0);
- rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+ rtl_ephy_init(tp, e_info_8102e_1);
}
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
@@ -5644,7 +5426,7 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
- rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+ rtl_ephy_init(tp, e_info_8105e_1);
rtl_pcie_state_l2l3_disable(tp);
}
@@ -5669,17 +5451,15 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
+ rtl_ephy_init(tp, e_info_8402);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+ rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
+ rtl_reset_packet_filter(tp);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
rtl_pcie_state_l2l3_disable(tp);
}
@@ -5699,6 +5479,73 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, true);
}
+static void rtl_hw_config(struct rtl8169_private *tp)
+{
+ static const rtl_generic_fct hw_configs[] = {
+ [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1,
+ [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
+ [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
+ [RTL_GIGA_MAC_VER_10] = NULL,
+ [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb,
+ [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_13] = NULL,
+ [RTL_GIGA_MAC_VER_14] = NULL,
+ [RTL_GIGA_MAC_VER_15] = NULL,
+ [RTL_GIGA_MAC_VER_16] = NULL,
+ [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
+ [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
+ [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
+ [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3,
+ [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
+ [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
+ [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
+ [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
+ [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
+ [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
+ [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
+ [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
+ [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
+ [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp,
+ [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
+ [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
+ [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
+ [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1,
+ [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1,
+ [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402,
+ [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
+ [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
+ [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
+ [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
+ [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
+ [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
+ [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
+ [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
+ [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
+ [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
+ [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
+ [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
+ [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
+ [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
+ };
+
+ if (hw_configs[tp->mac_version])
+ hw_configs[tp->mac_version](tp);
+}
+
+static void rtl_hw_start_8168(struct rtl8169_private *tp)
+{
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
+
+ /* Workaround for RxFIFO overflow. */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+ tp->irq_mask |= RxFIFOOver;
+ tp->irq_mask &= ~RxOverflow;
+ }
+
+ rtl_hw_config(tp);
+}
+
static void rtl_hw_start_8101(struct rtl8169_private *tp)
{
if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
@@ -5714,43 +5561,7 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp)
tp->cp_cmd &= CPCMD_QUIRK_MASK;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_07:
- rtl_hw_start_8102e_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_08:
- rtl_hw_start_8102e_3(tp);
- break;
-
- case RTL_GIGA_MAC_VER_09:
- rtl_hw_start_8102e_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_29:
- rtl_hw_start_8105e_1(tp);
- break;
- case RTL_GIGA_MAC_VER_30:
- rtl_hw_start_8105e_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_37:
- rtl_hw_start_8402(tp);
- break;
-
- case RTL_GIGA_MAC_VER_39:
- rtl_hw_start_8106(tp);
- break;
- case RTL_GIGA_MAC_VER_43:
- rtl_hw_start_8168g_2(tp);
- break;
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- rtl_hw_start_8168h_1(tp);
- break;
- }
-
- RTL_W16(tp, IntrMitigate, 0x0000);
+ rtl_hw_config(tp);
}
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -6267,7 +6078,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
*/
smp_mb();
if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
- netif_wake_queue(dev);
+ netif_start_queue(dev);
}
return NETDEV_TX_OK;
@@ -6542,10 +6353,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
}
- if (status & (RTL_EVENT_NAPI | LinkChg)) {
- rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
- }
+ rtl_irq_disable(tp);
+ napi_schedule_irqoff(&tp->napi);
out:
rtl_ack_events(tp, status);
@@ -6644,8 +6453,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (!tp->supports_gmii)
phy_set_max_speed(phydev, SPEED_100);
- /* Ensure to advertise everything, incl. pause */
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
@@ -7122,13 +6930,13 @@ static void rtl_read_mac_address(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- value = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
+ value = rtl_eri_read(tp, 0xe0);
mac_addr[0] = (value >> 0) & 0xff;
mac_addr[1] = (value >> 8) & 0xff;
mac_addr[2] = (value >> 16) & 0xff;
mac_addr[3] = (value >> 24) & 0xff;
- value = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
+ value = rtl_eri_read(tp, 0xe4);
mac_addr[4] = (value >> 0) & 0xff;
mac_addr[5] = (value >> 8) & 0xff;
break;
@@ -7184,8 +6992,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x",
- PCI_DEVID(pdev->bus->number, pdev->devfn));
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
@@ -7352,6 +7159,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
+ /* Disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 8154b38c08f7..ef8f08931fe8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -111,7 +111,7 @@ static void ravb_set_buffer_align(struct sk_buff *skb)
*/
static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
{
- if (mac) {
+ if (!IS_ERR(mac)) {
ether_addr_copy(ndev->dev_addr, mac);
} else {
u32 mahr = ravb_read(ndev, MAHR);
@@ -728,7 +728,6 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
- mmiowb();
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
@@ -848,7 +847,6 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
}
- mmiowb();
spin_unlock(&priv->lock);
return result;
}
@@ -881,7 +879,6 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
}
- mmiowb();
spin_unlock(&priv->lock);
return result;
}
@@ -898,7 +895,6 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
if (ravb_queue_interrupt(ndev, q))
result = IRQ_HANDLED;
- mmiowb();
spin_unlock(&priv->lock);
return result;
}
@@ -943,7 +939,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
}
}
@@ -959,7 +954,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
ravb_write(ndev, mask, RIE0);
ravb_write(ndev, mask, TIE);
}
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
/* Receive error message handling */
@@ -1008,7 +1002,6 @@ static void ravb_adjust_link(struct net_device *ndev)
if (priv->no_avb_link && phydev->link)
ravb_rcv_snd_enable(ndev);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
if (new_state && netif_msg_link(priv))
@@ -1601,7 +1594,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
netif_stop_subqueue(ndev, q);
exit:
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
@@ -1615,8 +1607,7 @@ drop:
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
@@ -1673,7 +1664,6 @@ static void ravb_set_rx_mode(struct net_device *ndev)
spin_lock_irqsave(&priv->lock, flags);
ravb_modify(ndev, ECMR, ECMR_PRM,
ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1970,6 +1960,13 @@ static void ravb_set_config_mode(struct net_device *ndev)
}
}
+static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
+ { .soc_id = "r8a774c0" },
+ { .soc_id = "r8a77990" },
+ { .soc_id = "r8a77995" },
+ { /* sentinel */ }
+};
+
/* Set tx and rx clock internal delay modes */
static void ravb_set_delay_mode(struct net_device *ndev)
{
@@ -1981,8 +1978,12 @@ static void ravb_set_delay_mode(struct net_device *ndev)
set |= APSR_DM_RDM;
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
- set |= APSR_DM_TDM;
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
+ "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
+ phy_modes(priv->phy_interface)))
+ set |= APSR_DM_TDM;
+ }
ravb_modify(ndev, APSR, APSR_DM, set);
}
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index dce2a40a31e3..9a42580693cb 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -196,7 +196,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
ravb_write(ndev, GIE_PTCS, GIE);
else
ravb_write(ndev, GID_PTCD, GID);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
@@ -259,7 +258,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
else
ravb_write(ndev, GID_PTMD0, GID);
}
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
return error;
@@ -331,7 +329,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
spin_lock_irqsave(&priv->lock, flags);
ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e33af371b169..6354f19a31eb 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2010,7 +2010,6 @@ static void sh_eth_adjust_link(struct net_device *ndev)
if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
sh_eth_rcv_snd_enable(ndev);
- mmiowb();
spin_unlock_irqrestore(&mdp->lock, flags);
if (new_state && netif_msg_link(mdp))
@@ -3193,8 +3192,8 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
pdata->phy_interface = ret;
mac_addr = of_get_mac_address(np);
- if (mac_addr)
- memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(pdata->mac_addr, mac_addr);
pdata->no_ether_link =
of_property_read_bool(np, "renesas,no-ether-link");
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index a71c900ca04f..7ae6c124bfe9 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2207,6 +2207,15 @@ static int rocker_router_fib_event(struct notifier_block *nb,
switch (event) {
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
+ if (info->family == AF_INET) {
+ struct fib_entry_notifier_info *fen_info = ptr;
+
+ if (fen_info->fi->fib_nh_is_v6) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
+ }
+
memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
/* Take referece on fib_info to prevent it from being
* freed while work is queued. Release it afterwards.
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index fa296a7c255d..30a49802fb51 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2288,11 +2288,11 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
nh = fi->fib_nh;
nh_on_port = (fi->fib_dev == ofdpa_port->dev);
- has_gw = !!nh->nh_gw;
+ has_gw = !!nh->fib_nh_gw4;
if (has_gw && nh_on_port) {
err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
- nh->nh_gw, &index);
+ nh->fib_nh_gw4, &index);
if (err)
return err;
@@ -2749,7 +2749,7 @@ static int ofdpa_fib4_add(struct rocker *rocker,
fen_info->tb_id, 0);
if (err)
return err;
- fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD;
+ fen_info->fi->fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return 0;
}
@@ -2764,7 +2764,7 @@ static int ofdpa_fib4_del(struct rocker *rocker,
ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
if (!ofdpa_port)
return 0;
- fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ fen_info->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
fen_info->dst_len, fen_info->fi,
fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
@@ -2791,7 +2791,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
rocker);
if (!ofdpa_port)
continue;
- flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ flow_entry->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
flow_entry);
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index fbd00cb0cb7d..d2bc9412ba03 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -124,7 +124,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
/* Get MAC address if available (DT) */
- if (mac)
+ if (!IS_ERR_OR_NULL(mac))
ether_addr_copy(priv->dev->dev_addr, mac);
/* Get the TX/RX IRQ numbers */
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 70cce63a6081..696037d5ac3d 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -735,6 +735,7 @@ static int sgiseeq_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
sp = netdev_priv(dev);
/* Make private data page aligned */
diff --git a/drivers/net/ethernet/sfc/falcon/io.h b/drivers/net/ethernet/sfc/falcon/io.h
index 7085ee1d5e2b..c3577643fbda 100644
--- a/drivers/net/ethernet/sfc/falcon/io.h
+++ b/drivers/net/ethernet/sfc/falcon/io.h
@@ -108,7 +108,6 @@ static inline void ef4_writeo(struct ef4_nic *efx, const ef4_oword_t *value,
_ef4_writed(efx, value->u32[2], reg + 8);
_ef4_writed(efx, value->u32[3], reg + 12);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -130,7 +129,6 @@ static inline void ef4_sram_writeq(struct ef4_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 3409bbf5b19f..c5059f456f37 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -321,7 +321,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
@@ -333,7 +333,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
ef4_nic_push_buffers(tx_queue);
} else {
- tx_queue->xmit_more_available = skb->xmit_more;
+ tx_queue->xmit_more_available = netdev_xmit_more();
}
tx_queue->tx_packets++;
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 89563170af52..2774a10f44e9 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -120,7 +120,6 @@ static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -142,7 +141,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 9382bb0b4d5a..a4bbfebe3d64 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -342,6 +342,7 @@ static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
break;
default:
WARN_ON(1);
+ /* Fall through */
case MC_CMD_FCNTL_OFF:
link_state->fc = 0;
break;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 06c8f282263f..e182055ec2eb 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -478,8 +478,6 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
next = skb->next;
skb->next = NULL;
- if (next)
- skb->xmit_more = true;
efx_enqueue_skb(tx_queue, skb);
skb = next;
}
@@ -506,7 +504,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
- bool xmit_more = skb->xmit_more;
+ bool xmit_more = netdev_xmit_more();
bool data_mapped = false;
unsigned int segments;
unsigned int skb_len;
@@ -533,7 +531,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (rc)
goto err;
#ifdef EFX_USE_PIO
- } else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
+ } else if (skb_len <= efx_piobuf_size && !xmit_more &&
efx_nic_may_tx_pio(tx_queue)) {
/* Use PIO for short packets with an empty queue. */
if (efx_enqueue_skb_pio(tx_queue, skb))
@@ -559,8 +557,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
- /* There could be packets left on the partner queue if those
- * SKBs had skb->xmit_more set. If we do not push those they
+ /* There could be packets left on the partner queue if
+ * xmit_more was set. If we do not push those they
* could be left for a long time and cause a netdev watchdog.
*/
if (txq2->xmit_more_available)
@@ -568,7 +566,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
efx_nic_push_buffers(tx_queue);
} else {
- tx_queue->xmit_more_available = skb->xmit_more;
+ tx_queue->xmit_more_available = xmit_more;
}
if (segments) {
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index c07fd594fe71..02b3962b0e63 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -251,7 +251,6 @@ enum PMConfigBits {
* use of mdelay() at _sc92031_reset.
* Functions prefixed with _sc92031_ must be called with the lock held;
* functions prefixed with sc92031_ must be called without the lock held.
- * Use mmiowb() before unlocking if the hardware was written to.
*/
/* Locking rules for the interrupt:
@@ -361,7 +360,6 @@ static void sc92031_disable_interrupts(struct net_device *dev)
/* stop interrupts */
iowrite32(0, port_base + IntrMask);
_sc92031_dummy_read(port_base);
- mmiowb();
/* wait for any concurrent interrupt/tasklet to finish */
synchronize_irq(priv->pdev->irq);
@@ -379,7 +377,6 @@ static void sc92031_enable_interrupts(struct net_device *dev)
wmb();
iowrite32(IntrBits, port_base + IntrMask);
- mmiowb();
}
static void _sc92031_disable_tx_rx(struct net_device *dev)
@@ -867,7 +864,6 @@ out:
rmb();
iowrite32(intr_mask, port_base + IntrMask);
- mmiowb();
spin_unlock(&priv->lock);
}
@@ -901,7 +897,6 @@ out_none:
rmb();
iowrite32(intr_mask, port_base + IntrMask);
- mmiowb();
return IRQ_NONE;
}
@@ -978,7 +973,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
port_base + TxAddr0 + entry * 4);
iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
- mmiowb();
if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
netif_stop_queue(dev);
@@ -1024,7 +1018,6 @@ static int sc92031_open(struct net_device *dev)
spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
@@ -1060,7 +1053,6 @@ static int sc92031_stop(struct net_device *dev)
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1081,7 +1073,6 @@ static void sc92031_set_multicast_list(struct net_device *dev)
_sc92031_set_mar(dev);
_sc92031_set_rx_config(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
}
@@ -1098,7 +1089,6 @@ static void sc92031_tx_timeout(struct net_device *dev)
priv->tx_timeouts++;
_sc92031_reset(dev);
- mmiowb();
spin_unlock(&priv->lock);
@@ -1140,7 +1130,6 @@ sc92031_ethtool_get_link_ksettings(struct net_device *dev,
output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
_sc92031_mii_scan(port_base);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1311,7 +1300,6 @@ static int sc92031_ethtool_set_wol(struct net_device *dev,
priv->pm_config = pm_config;
iowrite32(pm_config, port_base + PMConfig);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1337,7 +1325,6 @@ static int sc92031_ethtool_nway_reset(struct net_device *dev)
out:
_sc92031_mii_scan(port_base);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1530,7 +1517,6 @@ static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1555,7 +1541,6 @@ static int sc92031_resume(struct pci_dev *pdev)
spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index a18149720aa2..cba5881b2746 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv)
}
static void *netsec_alloc_rx_data(struct netsec_priv *priv,
- dma_addr_t *dma_handle, u16 *desc_len)
+ dma_addr_t *dma_handle, u16 *desc_len,
+ bool napi)
{
size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
size_t payload_len = NETSEC_RX_BUF_SZ;
@@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
- buf = napi_alloc_frag(total_len);
+ buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len);
if (!buf)
return NULL;
@@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
/* allocate a fresh buffer and map it to the hardware.
* This will eventually replace the old buffer in the hardware
*/
- buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+ buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len,
+ true);
if (unlikely(!buf_addr))
break;
@@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
void *buf;
u16 len;
- buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+ buf = netsec_alloc_rx_data(priv, &dma_handle, &len,
+ false);
if (!buf) {
netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
goto err_out;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index bb6d5fb73035..51a7b48db4bc 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1599,7 +1599,7 @@ static int ave_probe(struct platform_device *pdev)
ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
/* if the mac address is invalid, use random mac address */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356a7e73..3dfb07a78952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
/* Specific functions used for Ring mode */
/* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+ int bfsize)
{
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
- << ERDES1_BUFFER2_SIZE_SHIFT)
- & ERDES1_BUFFER2_SIZE_MASK);
+ if (bfsize == BUF_SIZE_16KiB)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+ << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK);
if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
}
/* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
- << RDES1_BUFFER2_SIZE_SHIFT)
- & RDES1_BUFFER2_SIZE_MASK);
+ if (bfsize >= BUF_SIZE_2KiB) {
+ int bfsize2;
+
+ bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK);
+ }
if (end)
p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 5b3b06a0a3bf..d466e33635b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -15,7 +15,7 @@
* Adopted from dwmac-sti.c
*/
-#include <linux/mfd/syscon.h>
+#include <linux/mfd/altera-sysmgr.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
@@ -114,7 +114,8 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
dwmac->interface = of_get_phy_mode(np);
- sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
+ sys_mgr_base_addr =
+ altr_sysmgr_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
if (IS_ERR(sys_mgr_base_addr)) {
dev_info(dev, "No sysmgr-syscon node found\n");
return PTR_ERR(sys_mgr_base_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 062a600fa5a7..21428537e231 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -333,6 +333,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
*/
dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
"stm32_pwr_wakeup");
+ if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) {
err = device_init_wakeup(&pdev->dev, true);
if (err) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 195669f550f0..ba124a4da793 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1015,6 +1015,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+
/* The loopback bit seems to be re-set when link change
* Simply mask it each time
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 7e5d5db0d516..b4bb5629de38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -192,6 +192,8 @@ static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
default:
break;
}
+
+ writel(value, ioaddr + MTL_OPERATION_MODE);
}
static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7fbb6a4dbf51..e061e9f5fad7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ exit:
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwmac4_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 37d5e6fe7473..085b700a4994 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -143,6 +143,11 @@
#define XGMAC_RSF BIT(5)
#define XGMAC_RTC GENMASK(1, 0)
#define XGMAC_RTC_SHIFT 0
+#define XGMAC_MTL_RXQ_FLOW_CONTROL(x) (0x00001150 + (0x80 * (x)))
+#define XGMAC_RFD GENMASK(31, 17)
+#define XGMAC_RFD_SHIFT 17
+#define XGMAC_RFA GENMASK(15, 1)
+#define XGMAC_RFA_SHIFT 1
#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x)))
#define XGMAC_RXOIE BIT(16)
#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x)))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fdec997..98fa471da7c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwxgmac2_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 2ba712b48a89..e79037f511e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -147,6 +147,52 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
value &= ~XGMAC_RQS;
value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
+ if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
+ u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+ unsigned int rfd, rfa;
+
+ value |= XGMAC_EHFC;
+
+ /* Set Threshold for Activating Flow Control to min 2 frames,
+ * i.e. 1500 * 2 = 3000 bytes.
+ *
+ * Set Threshold for Deactivating Flow Control to min 1 frame,
+ * i.e. 1500 bytes.
+ */
+ switch (fifosz) {
+ case 4096:
+ /* This violates the above formula because of FIFO size
+ * limit therefore overflow may occur in spite of this.
+ */
+ rfd = 0x03; /* Full-2.5K */
+ rfa = 0x01; /* Full-1.5K */
+ break;
+
+ case 8192:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x0a; /* Full-6K */
+ break;
+
+ case 16384:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x12; /* Full-10K */
+ break;
+
+ default:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x1e; /* Full-16K */
+ break;
+ }
+
+ flow &= ~XGMAC_RFD;
+ flow |= rfd << XGMAC_RFD_SHIFT;
+
+ flow &= ~XGMAC_RFA;
+ flow |= rfa << XGMAC_RFA_SHIFT;
+
+ writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+ }
+
writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
/* Enable MTL RX overflow */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a790f9d..5202d6ad7919 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
- !!(rdes0 & RDES0_FRAME_TYPE),
- !!(rdes0 & ERDES0_RX_MAC_ADDR));
+ if (likely(ret == good_frame))
+ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+ !!(rdes0 & RDES0_FRAME_TYPE),
+ !!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+ p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
else
- ehn_desc_rx_set_on_ring(p, end);
+ ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944f26e3..5bb00234d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
- int end);
+ int end, int bfsize);
/* DMA TX descriptor ring initialization */
void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb29feba..6d690678c20e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- pr_warn("%s: Oversized frame spanned multiple buffers\n",
- __func__);
stats->rx_length_errors++;
return discard_frame;
}
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
- int end)
+ int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
else
- ndesc_rx_set_on_ring(p, end);
+ ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6a2e1031a62a..5678b869cbff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
-static int flow_ctrl = FLOW_OFF;
+static int flow_ctrl = FLOW_AUTO;
module_param(flow_ctrl, int, 0644);
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
}
/**
@@ -2614,8 +2616,6 @@ static int stmmac_open(struct net_device *dev)
u32 chan;
int ret;
- stmmac_check_ether_addr(priv);
-
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -3352,9 +3352,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
- unsigned int entry = rx_q->cur_rx;
+ unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
- unsigned int next_entry;
unsigned int count = 0;
bool xmac;
@@ -3372,10 +3371,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- int status;
+ int entry, status;
struct dma_desc *p;
struct dma_desc *np;
+ entry = next_entry;
+
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
@@ -3431,11 +3432,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
- netdev_err(priv->dev,
- "len %d larger than size (%d)\n",
- frame_len, priv->dma_buf_sz);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "len %d larger than size (%d)\n",
+ frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
- break;
+ continue;
}
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3470,7 +3472,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
dev_warn(priv->device,
"packet dropped\n");
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
dma_sync_single_for_cpu(priv->device,
@@ -3490,11 +3492,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
} else {
skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
- netdev_err(priv->dev,
- "%s: Inconsistent Rx chain\n",
- priv->dev->name);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "%s: Inconsistent Rx chain\n",
+ priv->dev->name);
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
prefetch(skb->data - NET_IP_ALIGN);
rx_q->rx_skbuff[entry] = NULL;
@@ -3529,7 +3532,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
}
- entry = next_entry;
}
stmmac_rx_refill(priv, queue);
@@ -4260,7 +4262,7 @@ int stmmac_dvr_probe(struct device *device,
priv->wol_irq = res->wol_irq;
priv->lpi_irq = res->lpi_irq;
- if (res->mac)
+ if (!IS_ERR_OR_NULL(res->mac))
memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
dev_set_drvdata(device, priv->dev);
@@ -4299,6 +4301,8 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
+ stmmac_check_ether_addr(priv);
+
/* Configure real RX and TX queues */
netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index d819e8eaba12..7cbc01f316fa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
},
.driver_data = (void *)&galileo_stmmac_dmi_data,
},
+ /*
+ * There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
+ * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+ * has only one pci network device while other asset tags are
+ * for IOT2040 which has two.
+ */
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
- "6ES7647-0AA00-1YA2"),
},
.driver_data = (void *)&iot2040_stmmac_dmi_data,
},
@@ -204,7 +208,7 @@ static int quark_default_data(struct pci_dev *pdev,
ret = 1;
}
- plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ plat->bus_id = pci_dev_id(pdev);
plat->phy_addr = ret;
plat->interface = PHY_INTERFACE_MODE_RMII;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 644e42c181ee..01ea0d6f8819 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -101,8 +101,7 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
}
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct vnet_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 590172818b92..96b883f965f6 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -234,8 +234,7 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
}
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index 99d86e39ff54..bf6c1c6779ff 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -995,7 +995,7 @@ static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
smp_wmb();
ring->cur = cur_index + 1;
- if (!pkt_info->skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
xlgmac_tx_start_xmit(channel, ring);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 8b21b40a9fe5..afbdc9744230 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -20,7 +20,6 @@ config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
select TI_DAVINCI_MDIO
- select TI_DAVINCI_CPDMA
select PHYLIB
---help---
This driver supports TI's DaVinci Ethernet .
@@ -38,16 +37,6 @@ config TI_DAVINCI_MDIO
To compile this driver as a module, choose M here: the module
will be called davinci_mdio. This is recommended.
-config TI_DAVINCI_CPDMA
- tristate "TI DaVinci CPDMA Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
- select GENERIC_ALLOCATOR
- ---help---
- This driver supports TI's DaVinci CPDMA dma engine.
-
- To compile this driver as a module, choose M here: the module
- will be called davinci_cpdma. This is recommended.
-
config TI_CPSW_PHY_SEL
bool "TI CPSW Phy mode Selection (DEPRECATED)"
default n
@@ -55,17 +44,10 @@ config TI_CPSW_PHY_SEL
This driver supports configuring of the phy mode connected to
the CPSW. DEPRECATED: use PHY_TI_GMII_SEL.
-config TI_CPSW_ALE
- tristate "TI CPSW ALE Support"
- ---help---
- This driver supports TI's CPSW ALE module.
-
config TI_CPSW
tristate "TI CPSW Switch Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
- select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
- select TI_CPSW_ALE
select MFD_SYSCON
select REGMAP
---help---
@@ -94,7 +76,6 @@ config TI_CPTS_MOD
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
- select TI_CPSW_ALE
select TI_DAVINCI_MDIO
depends on OF
depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 0be551de821c..ed12e1e5df2f 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -8,16 +8,15 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
obj-$(CONFIG_TLAN) += tlan.o
obj-$(CONFIG_CPMAC) += cpmac.o
-obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o
+ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
-obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
-obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw.o
+ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
-keystone_netcp-y := netcp_core.o
+keystone_netcp-y := netcp_core.o cpsw_ale.o
obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
-keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o
+keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index e2d47b24a869..3a655a4dc10e 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2006, 2007 Eugene Konev
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 38d1cc557c11..bfa81bbfce3f 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -1,14 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index fec275e2208d..48e0924259f5 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -1,17 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/* Texas Instruments Ethernet Switch Driver
*
* Copyright (C) 2013 Texas Instruments
*
* Module Author: Mugunthan V N <mugunthanvnm@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a591583d120e..634fc484a0b3 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1,16 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments Ethernet Switch Driver
*
* Copyright (C) 2012 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -44,138 +37,13 @@
#include "cpsw.h"
#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
#include "cpts.h"
#include "davinci_cpdma.h"
#include <net/pkt_sched.h>
-#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
- NETIF_MSG_DRV | NETIF_MSG_LINK | \
- NETIF_MSG_IFUP | NETIF_MSG_INTR | \
- NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
- NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_RX_STATUS)
-
-#define cpsw_info(priv, type, format, ...) \
-do { \
- if (netif_msg_##type(priv) && net_ratelimit()) \
- dev_info(priv->dev, format, ## __VA_ARGS__); \
-} while (0)
-
-#define cpsw_err(priv, type, format, ...) \
-do { \
- if (netif_msg_##type(priv) && net_ratelimit()) \
- dev_err(priv->dev, format, ## __VA_ARGS__); \
-} while (0)
-
-#define cpsw_dbg(priv, type, format, ...) \
-do { \
- if (netif_msg_##type(priv) && net_ratelimit()) \
- dev_dbg(priv->dev, format, ## __VA_ARGS__); \
-} while (0)
-
-#define cpsw_notice(priv, type, format, ...) \
-do { \
- if (netif_msg_##type(priv) && net_ratelimit()) \
- dev_notice(priv->dev, format, ## __VA_ARGS__); \
-} while (0)
-
-#define ALE_ALL_PORTS 0x7
-
-#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
-#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
-#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
-
-#define CPSW_VERSION_1 0x19010a
-#define CPSW_VERSION_2 0x19010c
-#define CPSW_VERSION_3 0x19010f
-#define CPSW_VERSION_4 0x190112
-
-#define HOST_PORT_NUM 0
-#define CPSW_ALE_PORTS_NUM 3
-#define SLIVER_SIZE 0x40
-
-#define CPSW1_HOST_PORT_OFFSET 0x028
-#define CPSW1_SLAVE_OFFSET 0x050
-#define CPSW1_SLAVE_SIZE 0x040
-#define CPSW1_CPDMA_OFFSET 0x100
-#define CPSW1_STATERAM_OFFSET 0x200
-#define CPSW1_HW_STATS 0x400
-#define CPSW1_CPTS_OFFSET 0x500
-#define CPSW1_ALE_OFFSET 0x600
-#define CPSW1_SLIVER_OFFSET 0x700
-
-#define CPSW2_HOST_PORT_OFFSET 0x108
-#define CPSW2_SLAVE_OFFSET 0x200
-#define CPSW2_SLAVE_SIZE 0x100
-#define CPSW2_CPDMA_OFFSET 0x800
-#define CPSW2_HW_STATS 0x900
-#define CPSW2_STATERAM_OFFSET 0xa00
-#define CPSW2_CPTS_OFFSET 0xc00
-#define CPSW2_ALE_OFFSET 0xd00
-#define CPSW2_SLIVER_OFFSET 0xd80
-#define CPSW2_BD_OFFSET 0x2000
-
-#define CPDMA_RXTHRESH 0x0c0
-#define CPDMA_RXFREE 0x0e0
-#define CPDMA_TXHDP 0x00
-#define CPDMA_RXHDP 0x20
-#define CPDMA_TXCP 0x40
-#define CPDMA_RXCP 0x60
-
-#define CPSW_POLL_WEIGHT 64
-#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
-#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
-#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
- ETH_FCS_LEN +\
- CPSW_RX_VLAN_ENCAP_HDR_SIZE)
-
-#define RX_PRIORITY_MAPPING 0x76543210
-#define TX_PRIORITY_MAPPING 0x33221100
-#define CPDMA_TX_PRIORITY_MAP 0x76543210
-
-#define CPSW_VLAN_AWARE BIT(1)
-#define CPSW_RX_VLAN_ENCAP BIT(2)
-#define CPSW_ALE_VLAN_AWARE 1
-
-#define CPSW_FIFO_NORMAL_MODE (0 << 16)
-#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
-#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
-
-#define CPSW_INTPACEEN (0x3f << 16)
-#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
-#define CPSW_CMINTMAX_CNT 63
-#define CPSW_CMINTMIN_CNT 2
-#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
-#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
-
-#define cpsw_slave_index(cpsw, priv) \
- ((cpsw->data.dual_emac) ? priv->emac_port : \
- cpsw->data.active_slave)
-#define IRQ_NUM 2
-#define CPSW_MAX_QUEUES 8
-#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
-#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
-#define CPSW_FIFO_SHAPE_EN_SHIFT 16
-#define CPSW_FIFO_RATE_EN_SHIFT 20
-#define CPSW_TC_NUM 4
-#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
-#define CPSW_PCT_MASK 0x7f
-
-#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
-#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
-#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16
-#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8
-#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0)
-enum {
- CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
- CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
- CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
- CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
-};
-
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
@@ -192,369 +60,6 @@ static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
module_param(descs_pool_size, int, 0444);
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
-struct cpsw_wr_regs {
- u32 id_ver;
- u32 soft_reset;
- u32 control;
- u32 int_control;
- u32 rx_thresh_en;
- u32 rx_en;
- u32 tx_en;
- u32 misc_en;
- u32 mem_allign1[8];
- u32 rx_thresh_stat;
- u32 rx_stat;
- u32 tx_stat;
- u32 misc_stat;
- u32 mem_allign2[8];
- u32 rx_imax;
- u32 tx_imax;
-
-};
-
-struct cpsw_ss_regs {
- u32 id_ver;
- u32 control;
- u32 soft_reset;
- u32 stat_port_en;
- u32 ptype;
- u32 soft_idle;
- u32 thru_rate;
- u32 gap_thresh;
- u32 tx_start_wds;
- u32 flow_control;
- u32 vlan_ltype;
- u32 ts_ltype;
- u32 dlr_ltype;
-};
-
-/* CPSW_PORT_V1 */
-#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
-#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
-#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
-#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
-#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
-#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
-#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
-#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
-
-/* CPSW_PORT_V2 */
-#define CPSW2_CONTROL 0x00 /* Control Register */
-#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
-#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
-#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
-#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
-#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
-#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
-
-/* CPSW_PORT_V1 and V2 */
-#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
-#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
-#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
-
-/* CPSW_PORT_V2 only */
-#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
-
-/* Bit definitions for the CPSW2_CONTROL register */
-#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
-#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
-#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
-#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
-#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
-#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
-#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
-#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
-#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
-#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
-#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
-#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
-#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
-#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
-#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
-#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
-#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
-
-#define CTRL_V2_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
- TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
-
-#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
-#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
-
-
-#define CTRL_V3_TS_BITS \
- (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
- TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
- TS_LTYPE1_EN | VLAN_LTYPE1_EN)
-
-#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
-#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
-
-/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
-#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
-#define TS_SEQ_ID_OFFSET_MASK (0x3f)
-#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
-#define TS_MSG_TYPE_EN_MASK (0xffff)
-
-/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
-#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
-
-/* Bit definitions for the CPSW1_TS_CTL register */
-#define CPSW_V1_TS_RX_EN BIT(0)
-#define CPSW_V1_TS_TX_EN BIT(4)
-#define CPSW_V1_MSG_TYPE_OFS 16
-
-/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
-#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
-
-#define CPSW_MAX_BLKS_TX 15
-#define CPSW_MAX_BLKS_TX_SHIFT 4
-#define CPSW_MAX_BLKS_RX 5
-
-struct cpsw_host_regs {
- u32 max_blks;
- u32 blk_cnt;
- u32 tx_in_ctl;
- u32 port_vlan;
- u32 tx_pri_map;
- u32 cpdma_tx_pri_map;
- u32 cpdma_rx_chan_map;
-};
-
-struct cpsw_sliver_regs {
- u32 id_ver;
- u32 mac_control;
- u32 mac_status;
- u32 soft_reset;
- u32 rx_maxlen;
- u32 __reserved_0;
- u32 rx_pause;
- u32 tx_pause;
- u32 __reserved_1;
- u32 rx_pri_map;
-};
-
-struct cpsw_hw_stats {
- u32 rxgoodframes;
- u32 rxbroadcastframes;
- u32 rxmulticastframes;
- u32 rxpauseframes;
- u32 rxcrcerrors;
- u32 rxaligncodeerrors;
- u32 rxoversizedframes;
- u32 rxjabberframes;
- u32 rxundersizedframes;
- u32 rxfragments;
- u32 __pad_0[2];
- u32 rxoctets;
- u32 txgoodframes;
- u32 txbroadcastframes;
- u32 txmulticastframes;
- u32 txpauseframes;
- u32 txdeferredframes;
- u32 txcollisionframes;
- u32 txsinglecollframes;
- u32 txmultcollframes;
- u32 txexcessivecollisions;
- u32 txlatecollisions;
- u32 txunderrun;
- u32 txcarriersenseerrors;
- u32 txoctets;
- u32 octetframes64;
- u32 octetframes65t127;
- u32 octetframes128t255;
- u32 octetframes256t511;
- u32 octetframes512t1023;
- u32 octetframes1024tup;
- u32 netoctets;
- u32 rxsofoverruns;
- u32 rxmofoverruns;
- u32 rxdmaoverruns;
-};
-
-struct cpsw_slave_data {
- struct device_node *phy_node;
- char phy_id[MII_BUS_ID_SIZE];
- int phy_if;
- u8 mac_addr[ETH_ALEN];
- u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
- struct phy *ifphy;
-};
-
-struct cpsw_platform_data {
- struct cpsw_slave_data *slave_data;
- u32 ss_reg_ofs; /* Subsystem control register offset */
- u32 channels; /* number of cpdma channels (symmetric) */
- u32 slaves; /* number of slave cpgmac ports */
- u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
- u32 ale_entries; /* ale table size */
- u32 bd_ram_size; /*buffer descriptor ram size */
- u32 mac_control; /* Mac control register */
- u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
- bool dual_emac; /* Enable Dual EMAC mode */
-};
-
-struct cpsw_slave {
- void __iomem *regs;
- struct cpsw_sliver_regs __iomem *sliver;
- int slave_num;
- u32 mac_control;
- struct cpsw_slave_data *data;
- struct phy_device *phy;
- struct net_device *ndev;
- u32 port_vlan;
-};
-
-static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
-{
- return readl_relaxed(slave->regs + offset);
-}
-
-static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
-{
- writel_relaxed(val, slave->regs + offset);
-}
-
-struct cpsw_vector {
- struct cpdma_chan *ch;
- int budget;
-};
-
-struct cpsw_common {
- struct device *dev;
- struct cpsw_platform_data data;
- struct napi_struct napi_rx;
- struct napi_struct napi_tx;
- struct cpsw_ss_regs __iomem *regs;
- struct cpsw_wr_regs __iomem *wr_regs;
- u8 __iomem *hw_stats;
- struct cpsw_host_regs __iomem *host_port_regs;
- u32 version;
- u32 coal_intvl;
- u32 bus_freq_mhz;
- int rx_packet_max;
- struct cpsw_slave *slaves;
- struct cpdma_ctlr *dma;
- struct cpsw_vector txv[CPSW_MAX_QUEUES];
- struct cpsw_vector rxv[CPSW_MAX_QUEUES];
- struct cpsw_ale *ale;
- bool quirk_irq;
- bool rx_irq_disabled;
- bool tx_irq_disabled;
- u32 irqs_table[IRQ_NUM];
- struct cpts *cpts;
- int rx_ch_num, tx_ch_num;
- int speed;
- int usage_count;
-};
-
-struct cpsw_priv {
- struct net_device *ndev;
- struct device *dev;
- u32 msg_enable;
- u8 mac_addr[ETH_ALEN];
- bool rx_pause;
- bool tx_pause;
- bool mqprio_hw;
- int fifo_bw[CPSW_TC_NUM];
- int shp_cfg_speed;
- int tx_ts_enabled;
- int rx_ts_enabled;
- u32 emac_port;
- struct cpsw_common *cpsw;
-};
-
-struct cpsw_stats {
- char stat_string[ETH_GSTRING_LEN];
- int type;
- int sizeof_stat;
- int stat_offset;
-};
-
-enum {
- CPSW_STATS,
- CPDMA_RX_STATS,
- CPDMA_TX_STATS,
-};
-
-#define CPSW_STAT(m) CPSW_STATS, \
- FIELD_SIZEOF(struct cpsw_hw_stats, m), \
- offsetof(struct cpsw_hw_stats, m)
-#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
- FIELD_SIZEOF(struct cpdma_chan_stats, m), \
- offsetof(struct cpdma_chan_stats, m)
-#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
- FIELD_SIZEOF(struct cpdma_chan_stats, m), \
- offsetof(struct cpdma_chan_stats, m)
-
-static const struct cpsw_stats cpsw_gstrings_stats[] = {
- { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
- { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
- { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
- { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
- { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
- { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
- { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
- { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
- { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
- { "Rx Fragments", CPSW_STAT(rxfragments) },
- { "Rx Octets", CPSW_STAT(rxoctets) },
- { "Good Tx Frames", CPSW_STAT(txgoodframes) },
- { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
- { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
- { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
- { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
- { "Collisions", CPSW_STAT(txcollisionframes) },
- { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
- { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
- { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
- { "Late Collisions", CPSW_STAT(txlatecollisions) },
- { "Tx Underrun", CPSW_STAT(txunderrun) },
- { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
- { "Tx Octets", CPSW_STAT(txoctets) },
- { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
- { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
- { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
- { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
- { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
- { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
- { "Net Octets", CPSW_STAT(netoctets) },
- { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
- { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
- { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
-};
-
-static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
- { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
- { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
- { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
- { "misqueued", CPDMA_RX_STAT(misqueued) },
- { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
- { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
- { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
- { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
- { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
- { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
- { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
- { "requeue", CPDMA_RX_STAT(requeue) },
- { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
-};
-
-#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
-#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
-
-#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
-#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
#define for_each_slave(priv, func, arg...) \
do { \
struct cpsw_slave *slave; \
@@ -572,11 +77,6 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid);
-static inline int cpsw_get_slave_port(u32 slave_num)
-{
- return slave_num + 1;
-}
-
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -653,13 +153,6 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
}
}
-struct addr_sync_ctx {
- struct net_device *ndev;
- const u8 *addr; /* address to be synched */
- int consumed; /* number of address instances */
- int flush; /* flush flag */
-};
-
/**
* cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
* if it's not deleted
@@ -800,12 +293,17 @@ static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_port = -1;
+
+ if (cpsw->data.dual_emac)
+ slave_port = priv->emac_port + 1;
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
cpsw_set_promiscious(ndev, true);
- cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
+ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
return;
} else {
/* Disable promiscuous mode */
@@ -813,14 +311,15 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
}
/* Restore allmulti on vlans if necessary */
- cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
+ cpsw_ale_set_allmulti(cpsw->ale,
+ ndev->flags & IFF_ALLMULTI, slave_port);
/* add/remove mcast address either for real netdev or for vlan */
__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
cpsw_del_mc_addr);
}
-static void cpsw_intr_enable(struct cpsw_common *cpsw)
+void cpsw_intr_enable(struct cpsw_common *cpsw)
{
writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
@@ -829,7 +328,7 @@ static void cpsw_intr_enable(struct cpsw_common *cpsw)
return;
}
-static void cpsw_intr_disable(struct cpsw_common *cpsw)
+void cpsw_intr_disable(struct cpsw_common *cpsw)
{
writel_relaxed(0, &cpsw->wr_regs->tx_en);
writel_relaxed(0, &cpsw->wr_regs->rx_en);
@@ -838,7 +337,7 @@ static void cpsw_intr_disable(struct cpsw_common *cpsw)
return;
}
-static void cpsw_tx_handler(void *token, int len, int status)
+void cpsw_tx_handler(void *token, int len, int status)
{
struct netdev_queue *txq;
struct sk_buff *skb = token;
@@ -970,11 +469,9 @@ requeue:
dev_kfree_skb_any(new_skb);
}
-static void cpsw_split_res(struct net_device *ndev)
+void cpsw_split_res(struct cpsw_common *cpsw)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
u32 consumed_rate = 0, bigest_rate = 0;
- struct cpsw_common *cpsw = priv->cpsw;
struct cpsw_vector *txv = cpsw->txv;
int i, ch_weight, rlim_ch_num = 0;
int budget, bigest_rate_ch = 0;
@@ -1254,29 +751,32 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
slave_port = cpsw_get_slave_port(slave->slave_num);
if (phy->link) {
- mac_control = cpsw->data.mac_control;
-
- /* enable forwarding */
- cpsw_ale_control_set(cpsw->ale, slave_port,
- ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+ mac_control = CPSW_SL_CTL_GMII_EN;
if (phy->speed == 1000)
- mac_control |= BIT(7); /* GIGABITEN */
+ mac_control |= CPSW_SL_CTL_GIG;
if (phy->duplex)
- mac_control |= BIT(0); /* FULLDUPLEXEN */
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
- mac_control |= BIT(15);
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
/* in band mode only works in 10Mbps RGMII mode */
else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
- mac_control |= BIT(18); /* In Band mode */
+ mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
if (priv->rx_pause)
- mac_control |= BIT(3);
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
if (priv->tx_pause)
- mac_control |= BIT(4);
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ if (mac_control != slave->mac_control)
+ cpsw_sl_ctl_set(slave->mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(cpsw->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
*link = true;
@@ -1290,12 +790,14 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* disable forwarding */
cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_wait_for_idle(slave->mac_sl, 100);
+
+ cpsw_sl_ctl_reset(slave->mac_sl);
}
- if (mac_control != slave->mac_control) {
+ if (mac_control != slave->mac_control)
phy_print_status(phy);
- writel_relaxed(mac_control, &slave->sliver->mac_control);
- }
slave->mac_control = mac_control;
}
@@ -1348,7 +850,7 @@ static void cpsw_adjust_link(struct net_device *ndev)
if (link) {
if (cpsw_need_resplit(cpsw))
- cpsw_split_res(ndev);
+ cpsw_split_res(cpsw);
netif_carrier_on(ndev);
if (netif_running(ndev))
@@ -1359,167 +861,6 @@ static void cpsw_adjust_link(struct net_device *ndev)
}
}
-static int cpsw_get_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *coal)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- coal->rx_coalesce_usecs = cpsw->coal_intvl;
- return 0;
-}
-
-static int cpsw_set_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *coal)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- u32 int_ctrl;
- u32 num_interrupts = 0;
- u32 prescale = 0;
- u32 addnl_dvdr = 1;
- u32 coal_intvl = 0;
- struct cpsw_common *cpsw = priv->cpsw;
-
- coal_intvl = coal->rx_coalesce_usecs;
-
- int_ctrl = readl(&cpsw->wr_regs->int_control);
- prescale = cpsw->bus_freq_mhz * 4;
-
- if (!coal->rx_coalesce_usecs) {
- int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
- goto update_return;
- }
-
- if (coal_intvl < CPSW_CMINTMIN_INTVL)
- coal_intvl = CPSW_CMINTMIN_INTVL;
-
- if (coal_intvl > CPSW_CMINTMAX_INTVL) {
- /* Interrupt pacer works with 4us Pulse, we can
- * throttle further by dilating the 4us pulse.
- */
- addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
-
- if (addnl_dvdr > 1) {
- prescale *= addnl_dvdr;
- if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
- coal_intvl = (CPSW_CMINTMAX_INTVL
- * addnl_dvdr);
- } else {
- addnl_dvdr = 1;
- coal_intvl = CPSW_CMINTMAX_INTVL;
- }
- }
-
- num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
- writel(num_interrupts, &cpsw->wr_regs->rx_imax);
- writel(num_interrupts, &cpsw->wr_regs->tx_imax);
-
- int_ctrl |= CPSW_INTPACEEN;
- int_ctrl &= (~CPSW_INTPRESCALE_MASK);
- int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
-
-update_return:
- writel(int_ctrl, &cpsw->wr_regs->int_control);
-
- cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
- cpsw->coal_intvl = coal_intvl;
-
- return 0;
-}
-
-static int cpsw_get_sset_count(struct net_device *ndev, int sset)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- switch (sset) {
- case ETH_SS_STATS:
- return (CPSW_STATS_COMMON_LEN +
- (cpsw->rx_ch_num + cpsw->tx_ch_num) *
- CPSW_STATS_CH_LEN);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
-{
- int ch_stats_len;
- int line;
- int i;
-
- ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
- for (i = 0; i < ch_stats_len; i++) {
- line = i % CPSW_STATS_CH_LEN;
- snprintf(*p, ETH_GSTRING_LEN,
- "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
- (long)(i / CPSW_STATS_CH_LEN),
- cpsw_gstrings_ch_stats[line].stat_string);
- *p += ETH_GSTRING_LEN;
- }
-}
-
-static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- u8 *p = data;
- int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
- memcpy(p, cpsw_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
-
- cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
- cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
- break;
- }
-}
-
-static void cpsw_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
-{
- u8 *p;
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- struct cpdma_chan_stats ch_stats;
- int i, l, ch;
-
- /* Collect Davinci CPDMA stats for Rx and Tx Channel */
- for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
- data[l] = readl(cpsw->hw_stats +
- cpsw_gstrings_stats[l].stat_offset);
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
- for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
- p = (u8 *)&ch_stats +
- cpsw_gstrings_ch_stats[i].stat_offset;
- data[l] = *(u32 *)p;
- }
- }
-
- for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
- cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
- for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
- p = (u8 *)&ch_stats +
- cpsw_gstrings_ch_stats[i].stat_offset;
- data[l] = *(u32 *)p;
- }
- }
-}
-
-static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
- struct sk_buff *skb,
- struct cpdma_chan *txch)
-{
- struct cpsw_common *cpsw = priv->cpsw;
-
- skb_tx_timestamp(skb);
- return cpdma_chan_submit(txch, skb, skb->data, skb->len,
- priv->emac_port + cpsw->data.dual_emac);
-}
-
static inline void cpsw_add_dual_emac_def_ale_entries(
struct cpsw_priv *priv, struct cpsw_slave *slave,
u32 slave_port)
@@ -1542,24 +883,18 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
ALE_PORT_DROP_UNKNOWN_VLAN, 1);
}
-static void soft_reset_slave(struct cpsw_slave *slave)
-{
- char name[32];
-
- snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
- soft_reset(name, &slave->sliver->soft_reset);
-}
-
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
u32 slave_port;
struct phy_device *phy;
struct cpsw_common *cpsw = priv->cpsw;
- soft_reset_slave(slave);
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
/* setup priority mapping */
- writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
+ RX_PRIORITY_MAPPING);
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -1585,7 +920,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
/* setup max packet size, and mac address */
- writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
+ cpsw->rx_packet_max);
cpsw_set_slave_mac(slave, priv);
slave->mac_control = 0; /* no link yet */
@@ -1696,7 +1032,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
}
}
-static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+int cpsw_fill_rx_channels(struct cpsw_priv *priv)
{
struct cpsw_common *cpsw = priv->cpsw;
struct sk_buff *skb;
@@ -1748,7 +1084,8 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
slave->phy = NULL;
cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
- soft_reset_slave(slave);
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
}
static int cpsw_tc_to_fifo(int tc, int num_tc)
@@ -2114,7 +1451,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
for_each_slave(priv, cpsw_slave_stop, cpsw);
if (cpsw_need_resplit(cpsw))
- cpsw_split_res(ndev);
+ cpsw_split_res(cpsw);
cpsw->usage_count--;
pm_runtime_put_sync(cpsw->dev);
@@ -2147,7 +1484,9 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
txch = cpsw->txv[q_idx].ch;
txq = netdev_get_tx_queue(ndev, q_idx);
- ret = cpsw_tx_packet_submit(priv, skb, txch);
+ skb_tx_timestamp(skb);
+ ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
+ priv->emac_port + cpsw->data.dual_emac);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail;
@@ -2418,18 +1757,6 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void cpsw_ndo_poll_controller(struct net_device *ndev)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- cpsw_intr_disable(cpsw);
- cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
- cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
- cpsw_intr_enable(cpsw);
-}
-#endif
-
static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
unsigned short vid)
{
@@ -2601,7 +1928,7 @@ static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
}
- cpsw_split_res(ndev);
+ cpsw_split_res(cpsw);
return ret;
}
@@ -2677,6 +2004,18 @@ static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
}
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cpsw_ndo_poll_controller(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ cpsw_intr_disable(cpsw);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+ cpsw_intr_enable(cpsw);
+}
+#endif
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -2695,25 +2034,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_setup_tc = cpsw_ndo_setup_tc,
};
-static int cpsw_get_regs_len(struct net_device *ndev)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
-}
-
-static void cpsw_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *p)
-{
- u32 *reg = p;
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- /* update CPSW IP version */
- regs->version = cpsw->version;
-
- cpsw_ale_dump(cpsw->ale, reg);
-}
-
static void cpsw_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -2725,119 +2045,6 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
-static u32 cpsw_get_msglevel(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- return priv->msg_enable;
-}
-
-static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- priv->msg_enable = value;
-}
-
-#if IS_ENABLED(CONFIG_TI_CPTS)
-static int cpsw_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = cpsw->cpts->phc_index;
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
- return 0;
-}
-#else
-static int cpsw_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
-{
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
- info->tx_types = 0;
- info->rx_filters = 0;
- return 0;
-}
-#endif
-
-static int cpsw_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *ecmd)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (!cpsw->slaves[slave_no].phy)
- return -EOPNOTSUPP;
-
- phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
- return 0;
-}
-
-static int cpsw_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *ecmd)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
- ecmd);
- else
- return -EOPNOTSUPP;
-}
-
-static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (cpsw->slaves[slave_no].phy)
- phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
-}
-
-static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
- else
- return -EOPNOTSUPP;
-}
-
-static void cpsw_get_pauseparam(struct net_device *ndev,
- struct ethtool_pauseparam *pause)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
-
- pause->autoneg = AUTONEG_DISABLE;
- pause->rx_pause = priv->rx_pause ? true : false;
- pause->tx_pause = priv->tx_pause ? true : false;
-}
-
static int cpsw_set_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
@@ -2851,316 +2058,10 @@ static int cpsw_set_pauseparam(struct net_device *ndev,
return 0;
}
-static int cpsw_ethtool_op_begin(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int ret;
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(cpsw->dev);
- }
-
- return ret;
-}
-
-static void cpsw_ethtool_op_complete(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- int ret;
-
- ret = pm_runtime_put(priv->cpsw->dev);
- if (ret < 0)
- cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
-}
-
-static void cpsw_get_channels(struct net_device *ndev,
- struct ethtool_channels *ch)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
- ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
- ch->max_combined = 0;
- ch->max_other = 0;
- ch->other_count = 0;
- ch->rx_count = cpsw->rx_ch_num;
- ch->tx_count = cpsw->tx_ch_num;
- ch->combined_count = 0;
-}
-
-static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
- struct ethtool_channels *ch)
-{
- if (cpsw->quirk_irq) {
- dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
- return -EOPNOTSUPP;
- }
-
- if (ch->combined_count)
- return -EINVAL;
-
- /* verify we have at least one channel in each direction */
- if (!ch->rx_count || !ch->tx_count)
- return -EINVAL;
-
- if (ch->rx_count > cpsw->data.channels ||
- ch->tx_count > cpsw->data.channels)
- return -EINVAL;
-
- return 0;
-}
-
-static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- void (*handler)(void *, int, int);
- struct netdev_queue *queue;
- struct cpsw_vector *vec;
- int ret, *ch, vch;
-
- if (rx) {
- ch = &cpsw->rx_ch_num;
- vec = cpsw->rxv;
- handler = cpsw_rx_handler;
- } else {
- ch = &cpsw->tx_ch_num;
- vec = cpsw->txv;
- handler = cpsw_tx_handler;
- }
-
- while (*ch < ch_num) {
- vch = rx ? *ch : 7 - *ch;
- vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
- queue = netdev_get_tx_queue(priv->ndev, *ch);
- queue->tx_maxrate = 0;
-
- if (IS_ERR(vec[*ch].ch))
- return PTR_ERR(vec[*ch].ch);
-
- if (!vec[*ch].ch)
- return -EINVAL;
-
- cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
- (rx ? "rx" : "tx"));
- (*ch)++;
- }
-
- while (*ch > ch_num) {
- (*ch)--;
-
- ret = cpdma_chan_destroy(vec[*ch].ch);
- if (ret)
- return ret;
-
- cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
- (rx ? "rx" : "tx"));
- }
-
- return 0;
-}
-
-static int cpsw_update_channels(struct cpsw_priv *priv,
- struct ethtool_channels *ch)
-{
- int ret;
-
- ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
- if (ret)
- return ret;
-
- ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void cpsw_suspend_data_pass(struct net_device *ndev)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- struct cpsw_slave *slave;
- int i;
-
- /* Disable NAPI scheduling */
- cpsw_intr_disable(cpsw);
-
- /* Stop all transmit queues for every network device.
- * Disable re-using rx descriptors with dormant_on.
- */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
- continue;
-
- netif_tx_stop_all_queues(slave->ndev);
- netif_dormant_on(slave->ndev);
- }
-
- /* Handle rest of tx packets and stop cpdma channels */
- cpdma_ctlr_stop(cpsw->dma);
-}
-
-static int cpsw_resume_data_pass(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int i, ret;
-
- /* Allow rx packets handling */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
- if (slave->ndev && netif_running(slave->ndev))
- netif_dormant_off(slave->ndev);
-
- /* After this receive is started */
- if (cpsw->usage_count) {
- ret = cpsw_fill_rx_channels(priv);
- if (ret)
- return ret;
-
- cpdma_ctlr_start(cpsw->dma);
- cpsw_intr_enable(cpsw);
- }
-
- /* Resume transmit for every affected interface */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
- if (slave->ndev && netif_running(slave->ndev))
- netif_tx_start_all_queues(slave->ndev);
-
- return 0;
-}
-
static int cpsw_set_channels(struct net_device *ndev,
struct ethtool_channels *chs)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int i, ret;
-
- ret = cpsw_check_ch_settings(cpsw, chs);
- if (ret < 0)
- return ret;
-
- cpsw_suspend_data_pass(ndev);
- ret = cpsw_update_channels(priv, chs);
- if (ret)
- goto err;
-
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
- continue;
-
- /* Inform stack about new count of queues */
- ret = netif_set_real_num_tx_queues(slave->ndev,
- cpsw->tx_ch_num);
- if (ret) {
- dev_err(priv->dev, "cannot set real number of tx queues\n");
- goto err;
- }
-
- ret = netif_set_real_num_rx_queues(slave->ndev,
- cpsw->rx_ch_num);
- if (ret) {
- dev_err(priv->dev, "cannot set real number of rx queues\n");
- goto err;
- }
- }
-
- if (cpsw->usage_count)
- cpsw_split_res(ndev);
-
- ret = cpsw_resume_data_pass(ndev);
- if (!ret)
- return 0;
-err:
- dev_err(priv->dev, "cannot update channels number, closing device\n");
- dev_close(ndev);
- return ret;
-}
-
-static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
-static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
-static int cpsw_nway_reset(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
- else
- return -EOPNOTSUPP;
-}
-
-static void cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
-
- /* not supported */
- ering->tx_max_pending = 0;
- ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
- ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
- ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
-}
-
-static int cpsw_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int ret;
-
- /* ignore ering->tx_pending - only rx_pending adjustment is supported */
-
- if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
- ering->rx_pending < CPSW_MAX_QUEUES ||
- ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
- return -EINVAL;
-
- if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
- return 0;
-
- cpsw_suspend_data_pass(ndev);
-
- cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
-
- if (cpsw->usage_count)
- cpdma_chan_split_pool(cpsw->dma);
-
- ret = cpsw_resume_data_pass(ndev);
- if (!ret)
- return 0;
-
- dev_err(&ndev->dev, "cannot set ring params, closing device\n");
- dev_close(ndev);
- return ret;
+ return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
}
static const struct ethtool_ops cpsw_ethtool_ops = {
@@ -3193,19 +2094,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.set_ringparam = cpsw_set_ringparam,
};
-static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
- u32 slave_reg_ofs, u32 sliver_reg_ofs)
-{
- void __iomem *regs = cpsw->regs;
- int slave_num = slave->slave_num;
- struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num;
-
- slave->data = data;
- slave->regs = regs + slave_reg_ofs;
- slave->sliver = regs + sliver_reg_ofs;
- slave->port_vlan = data->dual_emac_res_vlan;
-}
-
static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct platform_device *pdev)
{
@@ -3344,8 +2232,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
no_phy_slave:
mac_addr = of_get_mac_address(slave_node);
- if (mac_addr) {
- memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(slave_data->mac_addr, mac_addr);
} else {
ret = ti_cm_get_macid(&pdev->dev, i,
slave_data->mac_addr);
@@ -3408,7 +2296,8 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
struct cpsw_priv *priv_sl2;
int ret = 0;
- ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
+ ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
if (!ndev) {
dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
return -ENOMEM;
@@ -3442,11 +2331,8 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
/* register the network device */
SET_NETDEV_DEV(ndev, cpsw->dev);
ret = register_netdev(ndev);
- if (ret) {
+ if (ret)
dev_err(cpsw->dev, "cpsw: error registering net device\n");
- free_netdev(ndev);
- ret = -ENODEV;
- }
return ret;
}
@@ -3467,63 +2353,74 @@ static const struct soc_device_attribute cpsw_soc_devices[] = {
static int cpsw_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct clk *clk;
struct cpsw_platform_data *data;
struct net_device *ndev;
struct cpsw_priv *priv;
- struct cpdma_params dma_params;
- struct cpsw_ale_params ale_params;
void __iomem *ss_regs;
- void __iomem *cpts_regs;
struct resource *res, *ss_res;
struct gpio_descs *mode;
- u32 slave_offset, sliver_offset, slave_size;
const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
- int ret = 0, i, ch;
+ int ret = 0, ch;
int irq;
- cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
if (!cpsw)
return -ENOMEM;
- cpsw->dev = &pdev->dev;
+ cpsw->dev = dev;
- ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
- if (!ndev) {
- dev_err(&pdev->dev, "error allocating net_device\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, ndev);
- priv = netdev_priv(ndev);
- priv->cpsw = cpsw;
- priv->ndev = ndev;
- priv->dev = &ndev->dev;
- priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
- cpsw->rx_packet_max = max(rx_packet_max, 128);
-
- mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
+ mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
if (IS_ERR(mode)) {
ret = PTR_ERR(mode);
- dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
- goto clean_ndev_ret;
+ dev_err(dev, "gpio request failed, ret %d\n", ret);
+ return ret;
}
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "fck is not found %d\n", ret);
+ return ret;
+ }
+ cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
+
+ ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss_regs = devm_ioremap_resource(dev, ss_res);
+ if (IS_ERR(ss_regs))
+ return PTR_ERR(ss_regs);
+ cpsw->regs = ss_regs;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cpsw->wr_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cpsw->wr_regs))
+ return PTR_ERR(cpsw->wr_regs);
+
+ /* RX IRQ */
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[0] = irq;
+
+ /* TX IRQ */
+ irq = platform_get_irq(pdev, 2);
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[1] = irq;
+
/*
* This may be required here for child devices.
*/
- pm_runtime_enable(&pdev->dev);
-
- /* Select default pin state */
- pinctrl_pm_select_default_state(&pdev->dev);
+ pm_runtime_enable(dev);
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_put_noidle(dev);
goto clean_runtime_disable_ret;
}
@@ -3531,170 +2428,72 @@ static int cpsw_probe(struct platform_device *pdev)
if (ret)
goto clean_dt_ret;
- data = &cpsw->data;
- cpsw->rx_ch_num = 1;
- cpsw->tx_ch_num = 1;
-
- if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
- memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
- dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
- } else {
- eth_random_addr(priv->mac_addr);
- dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
- }
-
- memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ soc = soc_device_match(cpsw_soc_devices);
+ if (soc)
+ cpsw->quirk_irq = 1;
- cpsw->slaves = devm_kcalloc(&pdev->dev,
+ data = &cpsw->data;
+ cpsw->slaves = devm_kcalloc(dev,
data->slaves, sizeof(struct cpsw_slave),
GFP_KERNEL);
if (!cpsw->slaves) {
ret = -ENOMEM;
goto clean_dt_ret;
}
- for (i = 0; i < data->slaves; i++)
- cpsw->slaves[i].slave_num = i;
-
- cpsw->slaves[0].ndev = ndev;
- priv->emac_port = 0;
-
- clk = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(clk)) {
- dev_err(priv->dev, "fck is not found\n");
- ret = -ENODEV;
- goto clean_dt_ret;
- }
- cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
-
- ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
- if (IS_ERR(ss_regs)) {
- ret = PTR_ERR(ss_regs);
- goto clean_dt_ret;
- }
- cpsw->regs = ss_regs;
-
- cpsw->version = readl(&cpsw->regs->id_ver);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(cpsw->wr_regs)) {
- ret = PTR_ERR(cpsw->wr_regs);
- goto clean_dt_ret;
- }
- memset(&dma_params, 0, sizeof(dma_params));
- memset(&ale_params, 0, sizeof(ale_params));
+ cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
+ cpsw->descs_pool_size = descs_pool_size;
- switch (cpsw->version) {
- case CPSW_VERSION_1:
- cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
- cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
- dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
- dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
- ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
- slave_offset = CPSW1_SLAVE_OFFSET;
- slave_size = CPSW1_SLAVE_SIZE;
- sliver_offset = CPSW1_SLIVER_OFFSET;
- dma_params.desc_mem_phys = 0;
- break;
- case CPSW_VERSION_2:
- case CPSW_VERSION_3:
- case CPSW_VERSION_4:
- cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
- cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
- dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
- dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
- ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
- slave_offset = CPSW2_SLAVE_OFFSET;
- slave_size = CPSW2_SLAVE_SIZE;
- sliver_offset = CPSW2_SLIVER_OFFSET;
- dma_params.desc_mem_phys =
- (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
- break;
- default:
- dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
- ret = -ENODEV;
- goto clean_dt_ret;
- }
- for (i = 0; i < cpsw->data.slaves; i++) {
- struct cpsw_slave *slave = &cpsw->slaves[i];
-
- cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
- slave_offset += slave_size;
- sliver_offset += SLIVER_SIZE;
- }
-
- dma_params.dev = &pdev->dev;
- dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
- dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
- dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
- dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
- dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
-
- dma_params.num_chan = data->channels;
- dma_params.has_soft_reset = true;
- dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
- dma_params.desc_mem_size = data->bd_ram_size;
- dma_params.desc_align = 16;
- dma_params.has_ext_regs = true;
- dma_params.desc_hw_addr = dma_params.desc_mem_phys;
- dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
- dma_params.descs_pool_size = descs_pool_size;
-
- cpsw->dma = cpdma_ctlr_create(&dma_params);
- if (!cpsw->dma) {
- dev_err(priv->dev, "error initializing dma\n");
- ret = -ENOMEM;
+ ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
+ ss_res->start + CPSW2_BD_OFFSET,
+ descs_pool_size);
+ if (ret)
goto clean_dt_ret;
- }
-
- soc = soc_device_match(cpsw_soc_devices);
- if (soc)
- cpsw->quirk_irq = 1;
ch = cpsw->quirk_irq ? 0 : 7;
cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
if (IS_ERR(cpsw->txv[0].ch)) {
- dev_err(priv->dev, "error initializing tx dma channel\n");
+ dev_err(dev, "error initializing tx dma channel\n");
ret = PTR_ERR(cpsw->txv[0].ch);
- goto clean_dma_ret;
+ goto clean_cpts;
}
cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
if (IS_ERR(cpsw->rxv[0].ch)) {
- dev_err(priv->dev, "error initializing rx dma channel\n");
+ dev_err(dev, "error initializing rx dma channel\n");
ret = PTR_ERR(cpsw->rxv[0].ch);
- goto clean_dma_ret;
+ goto clean_cpts;
}
+ cpsw_split_res(cpsw);
- ale_params.dev = &pdev->dev;
- ale_params.ale_ageout = ale_ageout;
- ale_params.ale_entries = data->ale_entries;
- ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
-
- cpsw->ale = cpsw_ale_create(&ale_params);
- if (!cpsw->ale) {
- dev_err(priv->dev, "error initializing ale engine\n");
- ret = -ENODEV;
- goto clean_dma_ret;
+ /* setup netdev */
+ ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
+ if (!ndev) {
+ dev_err(dev, "error allocating net_device\n");
+ goto clean_cpts;
}
- cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
- if (IS_ERR(cpsw->cpts)) {
- ret = PTR_ERR(cpsw->cpts);
- goto clean_dma_ret;
- }
+ platform_set_drvdata(pdev, ndev);
+ priv = netdev_priv(ndev);
+ priv->cpsw = cpsw;
+ priv->ndev = ndev;
+ priv->dev = dev;
+ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv->emac_port = 0;
- ndev->irq = platform_get_irq(pdev, 1);
- if (ndev->irq < 0) {
- dev_err(priv->dev, "error getting irq resource\n");
- ret = ndev->irq;
- goto clean_dma_ret;
+ if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
+ memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
+ dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
+ } else {
+ eth_random_addr(priv->mac_addr);
+ dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
}
+ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+
+ cpsw->slaves[0].ndev = ndev;
+
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->netdev_ops = &cpsw_netdev_ops;
@@ -3705,15 +2504,14 @@ static int cpsw_probe(struct platform_device *pdev)
netif_tx_napi_add(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
CPSW_POLL_WEIGHT);
- cpsw_split_res(ndev);
/* register the network device */
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
ret = register_netdev(ndev);
if (ret) {
- dev_err(priv->dev, "error registering net device\n");
+ dev_err(dev, "error registering net device\n");
ret = -ENODEV;
- goto clean_dma_ret;
+ goto clean_cpts;
}
if (cpsw->data.dual_emac) {
@@ -3731,40 +2529,24 @@ static int cpsw_probe(struct platform_device *pdev)
* If anyone wants to implement support for those, make sure to
* first request and append them to irqs_table array.
*/
-
- /* RX IRQ */
- irq = platform_get_irq(pdev, 1);
- if (irq < 0) {
- ret = irq;
- goto clean_dma_ret;
- }
-
- cpsw->irqs_table[0] = irq;
- ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
- 0, dev_name(&pdev->dev), cpsw);
+ ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
+ 0, dev_name(dev), cpsw);
if (ret < 0) {
- dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_dma_ret;
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
}
- /* TX IRQ */
- irq = platform_get_irq(pdev, 2);
- if (irq < 0) {
- ret = irq;
- goto clean_dma_ret;
- }
- cpsw->irqs_table[1] = irq;
- ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
+ ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
- dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_dma_ret;
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
}
cpsw_notice(priv, probe,
"initialized device (regs %pa, irq %d, pool size %d)\n",
- &ss_res->start, ndev->irq, dma_params.descs_pool_size);
+ &ss_res->start, cpsw->irqs_table[0], descs_pool_size);
pm_runtime_put(&pdev->dev);
@@ -3772,15 +2554,14 @@ static int cpsw_probe(struct platform_device *pdev)
clean_unregister_netdev_ret:
unregister_netdev(ndev);
-clean_dma_ret:
+clean_cpts:
+ cpts_release(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
clean_dt_ret:
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
-clean_ndev_ret:
- free_netdev(priv->ndev);
return ret;
}
@@ -3805,9 +2586,6 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (cpsw->data.dual_emac)
- free_netdev(cpsw->slaves[1].ndev);
- free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 907e05fc22e4..35d602f03281 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -1,15 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Texas Instruments Ethernet Switch Driver
*
* Copyright (C) 2013 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __CPSW_H__
#define __CPSW_H__
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 798c989d5d93..84025dcc78d5 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1,16 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments N-Port Ethernet Switch Address Lookup Engine
*
* Copyright (C) 2012 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -287,6 +280,9 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
if (cpsw_ale_get_mcast(ale_entry)) {
u8 addr[6];
+ if (cpsw_ale_get_super(ale_entry))
+ continue;
+
cpsw_ale_get_addr(ale_entry, addr);
if (!is_broadcast_ether_addr(addr))
cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
@@ -296,7 +292,6 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
}
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
int flags, u16 vid)
@@ -334,7 +329,6 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid)
@@ -350,7 +344,6 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state)
@@ -365,7 +358,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
cpsw_ale_set_addr(ale_entry, addr);
- cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+ cpsw_ale_set_super(ale_entry, (flags & ALE_SUPER) ? 1 : 0);
cpsw_ale_set_mcast_state(ale_entry, mcast_state);
mask = cpsw_ale_get_port_mask(ale_entry,
@@ -384,7 +377,6 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
@@ -407,7 +399,6 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
/* ALE NetCP NU switch specific vlan functions */
static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
@@ -458,7 +449,6 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_add_vlan);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
{
@@ -480,40 +470,39 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan);
-void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
{
u32 ale_entry[ALE_ENTRY_WORDS];
- int type, idx;
int unreg_mcast = 0;
-
- /* Only bother doing the work if the setting is actually changing */
- if (ale->allmulti == allmulti)
- return;
-
- /* Remember the new setting to check against next time */
- ale->allmulti = allmulti;
+ int type, idx;
for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ int vlan_members;
+
cpsw_ale_read(ale, idx, ale_entry);
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_VLAN)
continue;
+ vlan_members =
+ cpsw_ale_get_vlan_member_list(ale_entry,
+ ale->vlan_field_bits);
+
+ if (port != -1 && !(vlan_members & BIT(port)))
+ continue;
unreg_mcast =
cpsw_ale_get_vlan_unreg_mcast(ale_entry,
ale->vlan_field_bits);
if (allmulti)
- unreg_mcast |= 1;
+ unreg_mcast |= ALE_PORT_HOST;
else
- unreg_mcast &= ~1;
+ unreg_mcast &= ~ALE_PORT_HOST;
cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
ale->vlan_field_bits);
cpsw_ale_write(ale, idx, ale_entry);
}
}
-EXPORT_SYMBOL_GPL(cpsw_ale_set_allmulti);
struct ale_control_info {
const char *name;
@@ -739,7 +728,6 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_control_set);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
{
@@ -763,7 +751,6 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift;
return tmp & BITMASK(info->bits);
}
-EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
static void cpsw_ale_timer(struct timer_list *t)
{
@@ -788,14 +775,12 @@ void cpsw_ale_start(struct cpsw_ale *ale)
add_timer(&ale->timer);
}
}
-EXPORT_SYMBOL_GPL(cpsw_ale_start);
void cpsw_ale_stop(struct cpsw_ale *ale)
{
del_timer_sync(&ale->timer);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
-EXPORT_SYMBOL_GPL(cpsw_ale_stop);
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
@@ -879,7 +864,6 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
return ale;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_create);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
{
@@ -890,8 +874,3 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
data += ALE_ENTRY_WORDS;
}
}
-EXPORT_SYMBOL_GPL(cpsw_ale_dump);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("TI CPSW ALE driver");
-MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index cd07a3e96d57..370df254eb12 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -1,16 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs
*
* Copyright (C) 2012 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __TI_CPSW_ALE_H__
#define __TI_CPSW_ALE_H__
@@ -37,7 +30,6 @@ struct cpsw_ale {
struct cpsw_ale_params params;
struct timer_list timer;
unsigned long ageout;
- int allmulti;
u32 version;
/* These bits are different on NetCP NU Switch ALE */
u32 port_mask_bits;
@@ -116,7 +108,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
-void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti);
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
new file mode 100644
index 000000000000..a4a7ec0d2531
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver ethtool intf
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/skbuff.h>
+
+#include "cpsw.h"
+#include "cpts.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "davinci_cpdma.h"
+
+struct cpsw_hw_stats {
+ u32 rxgoodframes;
+ u32 rxbroadcastframes;
+ u32 rxmulticastframes;
+ u32 rxpauseframes;
+ u32 rxcrcerrors;
+ u32 rxaligncodeerrors;
+ u32 rxoversizedframes;
+ u32 rxjabberframes;
+ u32 rxundersizedframes;
+ u32 rxfragments;
+ u32 __pad_0[2];
+ u32 rxoctets;
+ u32 txgoodframes;
+ u32 txbroadcastframes;
+ u32 txmulticastframes;
+ u32 txpauseframes;
+ u32 txdeferredframes;
+ u32 txcollisionframes;
+ u32 txsinglecollframes;
+ u32 txmultcollframes;
+ u32 txexcessivecollisions;
+ u32 txlatecollisions;
+ u32 txunderrun;
+ u32 txcarriersenseerrors;
+ u32 txoctets;
+ u32 octetframes64;
+ u32 octetframes65t127;
+ u32 octetframes128t255;
+ u32 octetframes256t511;
+ u32 octetframes512t1023;
+ u32 octetframes1024tup;
+ u32 netoctets;
+ u32 rxsofoverruns;
+ u32 rxmofoverruns;
+ u32 rxdmaoverruns;
+};
+
+struct cpsw_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+enum {
+ CPSW_STATS,
+ CPDMA_RX_STATS,
+ CPDMA_TX_STATS,
+};
+
+#define CPSW_STAT(m) CPSW_STATS, \
+ FIELD_SIZEOF(struct cpsw_hw_stats, m), \
+ offsetof(struct cpsw_hw_stats, m)
+#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
+ FIELD_SIZEOF(struct cpdma_chan_stats, m), \
+ offsetof(struct cpdma_chan_stats, m)
+#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
+ FIELD_SIZEOF(struct cpdma_chan_stats, m), \
+ offsetof(struct cpdma_chan_stats, m)
+
+static const struct cpsw_stats cpsw_gstrings_stats[] = {
+ { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
+ { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
+ { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
+ { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
+ { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
+ { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
+ { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
+ { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
+ { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
+ { "Rx Fragments", CPSW_STAT(rxfragments) },
+ { "Rx Octets", CPSW_STAT(rxoctets) },
+ { "Good Tx Frames", CPSW_STAT(txgoodframes) },
+ { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
+ { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
+ { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
+ { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
+ { "Collisions", CPSW_STAT(txcollisionframes) },
+ { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
+ { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
+ { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
+ { "Late Collisions", CPSW_STAT(txlatecollisions) },
+ { "Tx Underrun", CPSW_STAT(txunderrun) },
+ { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
+ { "Tx Octets", CPSW_STAT(txoctets) },
+ { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
+ { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
+ { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
+ { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
+ { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
+ { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
+ { "Net Octets", CPSW_STAT(netoctets) },
+ { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
+ { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
+ { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
+};
+
+static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
+ { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+ { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+ { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+ { "misqueued", CPDMA_RX_STAT(misqueued) },
+ { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+ { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+ { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+ { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+ { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+ { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+ { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+ { "requeue", CPDMA_RX_STAT(requeue) },
+ { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+};
+
+#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
+
+u32 cpsw_get_msglevel(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+void cpsw_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ coal->rx_coalesce_usecs = cpsw->coal_intvl;
+ return 0;
+}
+
+int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ u32 int_ctrl;
+ u32 num_interrupts = 0;
+ u32 prescale = 0;
+ u32 addnl_dvdr = 1;
+ u32 coal_intvl = 0;
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ coal_intvl = coal->rx_coalesce_usecs;
+
+ int_ctrl = readl(&cpsw->wr_regs->int_control);
+ prescale = cpsw->bus_freq_mhz * 4;
+
+ if (!coal->rx_coalesce_usecs) {
+ int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
+ goto update_return;
+ }
+
+ if (coal_intvl < CPSW_CMINTMIN_INTVL)
+ coal_intvl = CPSW_CMINTMIN_INTVL;
+
+ if (coal_intvl > CPSW_CMINTMAX_INTVL) {
+ /* Interrupt pacer works with 4us Pulse, we can
+ * throttle further by dilating the 4us pulse.
+ */
+ addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
+
+ if (addnl_dvdr > 1) {
+ prescale *= addnl_dvdr;
+ if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
+ coal_intvl = (CPSW_CMINTMAX_INTVL
+ * addnl_dvdr);
+ } else {
+ addnl_dvdr = 1;
+ coal_intvl = CPSW_CMINTMAX_INTVL;
+ }
+ }
+
+ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+ writel(num_interrupts, &cpsw->wr_regs->rx_imax);
+ writel(num_interrupts, &cpsw->wr_regs->tx_imax);
+
+ int_ctrl |= CPSW_INTPACEEN;
+ int_ctrl &= (~CPSW_INTPRESCALE_MASK);
+ int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
+
+update_return:
+ writel(int_ctrl, &cpsw->wr_regs->int_control);
+
+ cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
+ cpsw->coal_intvl = coal_intvl;
+
+ return 0;
+}
+
+int cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (CPSW_STATS_COMMON_LEN +
+ (cpsw->rx_ch_num + cpsw->tx_ch_num) *
+ CPSW_STATS_CH_LEN);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
+{
+ int ch_stats_len;
+ int line;
+ int i;
+
+ ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
+ for (i = 0; i < ch_stats_len; i++) {
+ line = i % CPSW_STATS_CH_LEN;
+ snprintf(*p, ETH_GSTRING_LEN,
+ "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
+ (long)(i / CPSW_STATS_CH_LEN),
+ cpsw_gstrings_ch_stats[line].stat_string);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
+ memcpy(p, cpsw_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
+ cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
+ break;
+ }
+}
+
+void cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u8 *p;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpdma_chan_stats ch_stats;
+ int i, l, ch;
+
+ /* Collect Davinci CPDMA stats for Rx and Tx Channel */
+ for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
+ data[l] = readl(cpsw->hw_stats +
+ cpsw_gstrings_stats[l].stat_offset);
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
+ }
+ }
+
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
+ }
+ }
+}
+
+void cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = priv->rx_pause ? true : false;
+ pause->tx_pause = priv->tx_pause ? true : false;
+}
+
+void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (cpsw->slaves[slave_no].phy)
+ phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
+}
+
+int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
+ else
+ return -EOPNOTSUPP;
+}
+
+int cpsw_get_regs_len(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
+}
+
+void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
+{
+ u32 *reg = p;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ /* update CPSW IP version */
+ regs->version = cpsw->version;
+
+ cpsw_ale_dump(cpsw->ale, reg);
+}
+
+int cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
+ pm_runtime_put_noidle(cpsw->dev);
+ }
+
+ return ret;
+}
+
+void cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_put(priv->cpsw->dev);
+ if (ret < 0)
+ cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
+}
+
+void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+ ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+ ch->max_combined = 0;
+ ch->max_other = 0;
+ ch->other_count = 0;
+ ch->rx_count = cpsw->rx_ch_num;
+ ch->tx_count = cpsw->tx_ch_num;
+ ch->combined_count = 0;
+}
+
+int cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+
+ phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
+ return 0;
+}
+
+int cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
+}
+
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
+ else
+ return -EOPNOTSUPP;
+}
+
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
+ else
+ return -EOPNOTSUPP;
+}
+
+int cpsw_nway_reset(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void cpsw_suspend_data_pass(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_slave *slave;
+ int i;
+
+ /* Disable NAPI scheduling */
+ cpsw_intr_disable(cpsw);
+
+ /* Stop all transmit queues for every network device.
+ * Disable re-using rx descriptors with dormant_on.
+ */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+
+ netif_tx_stop_all_queues(slave->ndev);
+ netif_dormant_on(slave->ndev);
+ }
+
+ /* Handle rest of tx packets and stop cpdma channels */
+ cpdma_ctlr_stop(cpsw->dma);
+}
+
+static int cpsw_resume_data_pass(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ /* Allow rx packets handling */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+ if (slave->ndev && netif_running(slave->ndev))
+ netif_dormant_off(slave->ndev);
+
+ /* After this receive is started */
+ if (cpsw->usage_count) {
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret)
+ return ret;
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ }
+
+ /* Resume transmit for every affected interface */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+ if (slave->ndev && netif_running(slave->ndev))
+ netif_tx_start_all_queues(slave->ndev);
+
+ return 0;
+}
+
+static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
+ struct ethtool_channels *ch)
+{
+ if (cpsw->quirk_irq) {
+ dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
+ return -EOPNOTSUPP;
+ }
+
+ if (ch->combined_count)
+ return -EINVAL;
+
+ /* verify we have at least one channel in each direction */
+ if (!ch->rx_count || !ch->tx_count)
+ return -EINVAL;
+
+ if (ch->rx_count > cpsw->data.channels ||
+ ch->tx_count > cpsw->data.channels)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
+ cpdma_handler_fn rx_handler)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ void (*handler)(void *, int, int);
+ struct netdev_queue *queue;
+ struct cpsw_vector *vec;
+ int ret, *ch, vch;
+
+ if (rx) {
+ ch = &cpsw->rx_ch_num;
+ vec = cpsw->rxv;
+ handler = rx_handler;
+ } else {
+ ch = &cpsw->tx_ch_num;
+ vec = cpsw->txv;
+ handler = cpsw_tx_handler;
+ }
+
+ while (*ch < ch_num) {
+ vch = rx ? *ch : 7 - *ch;
+ vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
+ queue = netdev_get_tx_queue(priv->ndev, *ch);
+ queue->tx_maxrate = 0;
+
+ if (IS_ERR(vec[*ch].ch))
+ return PTR_ERR(vec[*ch].ch);
+
+ if (!vec[*ch].ch)
+ return -EINVAL;
+
+ cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ (*ch)++;
+ }
+
+ while (*ch > ch_num) {
+ (*ch)--;
+
+ ret = cpdma_chan_destroy(vec[*ch].ch);
+ if (ret)
+ return ret;
+
+ cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ }
+
+ return 0;
+}
+
+int cpsw_set_channels_common(struct net_device *ndev,
+ struct ethtool_channels *chs,
+ cpdma_handler_fn rx_handler)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ ret = cpsw_check_ch_settings(cpsw, chs);
+ if (ret < 0)
+ return ret;
+
+ cpsw_suspend_data_pass(ndev);
+
+ ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
+ if (ret)
+ goto err;
+
+ ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler);
+ if (ret)
+ goto err;
+
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+
+ /* Inform stack about new count of queues */
+ ret = netif_set_real_num_tx_queues(slave->ndev,
+ cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto err;
+ }
+
+ ret = netif_set_real_num_rx_queues(slave->ndev,
+ cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto err;
+ }
+ }
+
+ if (cpsw->usage_count)
+ cpsw_split_res(cpsw);
+
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
+err:
+ dev_err(priv->dev, "cannot update channels number, closing device\n");
+ dev_close(ndev);
+ return ret;
+}
+
+void cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ /* not supported */
+ ering->tx_max_pending = 0;
+ ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
+ ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
+ ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
+}
+
+int cpsw_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ /* ignore ering->tx_pending - only rx_pending adjustment is supported */
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
+ ering->rx_pending < CPSW_MAX_QUEUES ||
+ ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
+ return -EINVAL;
+
+ if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
+ return 0;
+
+ cpsw_suspend_data_pass(ndev);
+
+ cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+
+ if (cpsw->usage_count)
+ cpdma_chan_split_pool(cpsw->dma);
+
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
+
+ dev_err(cpsw->dev, "cannot set ring params, closing device\n");
+ dev_close(ndev);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = cpsw->cpts->phc_index;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ return 0;
+}
+#else
+int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ info->tx_types = 0;
+ info->rx_filters = 0;
+ return 0;
+}
+#endif
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
new file mode 100644
index 000000000000..476d050a022c
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "cpts.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
+#include "davinci_cpdma.h"
+
+int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
+ int ale_ageout, phys_addr_t desc_mem_phys,
+ int descs_pool_size)
+{
+ u32 slave_offset, sliver_offset, slave_size;
+ struct cpsw_ale_params ale_params;
+ struct cpsw_platform_data *data;
+ struct cpdma_params dma_params;
+ struct device *dev = cpsw->dev;
+ void __iomem *cpts_regs;
+ int ret = 0, i;
+
+ data = &cpsw->data;
+ cpsw->rx_ch_num = 1;
+ cpsw->tx_ch_num = 1;
+
+ cpsw->version = readl(&cpsw->regs->id_ver);
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ memset(&ale_params, 0, sizeof(ale_params));
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+ cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
+ dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
+ slave_offset = CPSW1_SLAVE_OFFSET;
+ slave_size = CPSW1_SLAVE_SIZE;
+ sliver_offset = CPSW1_SLIVER_OFFSET;
+ dma_params.desc_mem_phys = 0;
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
+ cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+ cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
+ dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
+ slave_offset = CPSW2_SLAVE_OFFSET;
+ slave_size = CPSW2_SLAVE_SIZE;
+ sliver_offset = CPSW2_SLIVER_OFFSET;
+ dma_params.desc_mem_phys = desc_mem_phys;
+ break;
+ default:
+ dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ void __iomem *regs = cpsw->regs;
+
+ slave->slave_num = i;
+ slave->data = &cpsw->data.slave_data[i];
+ slave->regs = regs + slave_offset;
+ slave->port_vlan = slave->data->dual_emac_res_vlan;
+ slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
+ if (IS_ERR(slave->mac_sl))
+ return PTR_ERR(slave->mac_sl);
+
+ slave_offset += slave_size;
+ sliver_offset += SLIVER_SIZE;
+ }
+
+ ale_params.dev = dev;
+ ale_params.ale_ageout = ale_ageout;
+ ale_params.ale_entries = data->ale_entries;
+ ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
+
+ cpsw->ale = cpsw_ale_create(&ale_params);
+ if (!cpsw->ale) {
+ dev_err(dev, "error initializing ale engine\n");
+ return -ENODEV;
+ }
+
+ dma_params.dev = dev;
+ dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
+ dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
+ dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
+ dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
+ dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
+
+ dma_params.num_chan = data->channels;
+ dma_params.has_soft_reset = true;
+ dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
+ dma_params.desc_mem_size = data->bd_ram_size;
+ dma_params.desc_align = 16;
+ dma_params.has_ext_regs = true;
+ dma_params.desc_hw_addr = dma_params.desc_mem_phys;
+ dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
+ dma_params.descs_pool_size = descs_pool_size;
+
+ cpsw->dma = cpdma_ctlr_create(&dma_params);
+ if (!cpsw->dma) {
+ dev_err(dev, "error initializing dma\n");
+ return -ENOMEM;
+ }
+
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
+ if (IS_ERR(cpsw->cpts)) {
+ ret = PTR_ERR(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
new file mode 100644
index 000000000000..04795b97ee71
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch Driver
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
+#define DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
+
+#include "davinci_cpdma.h"
+
+#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
+ NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_IFUP | NETIF_MSG_INTR | \
+ NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
+ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_RX_STATUS)
+
+#define cpsw_info(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_info(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_err(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_err(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_dbg(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_dbg(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_notice(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_notice(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define ALE_ALL_PORTS 0x7
+
+#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
+#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
+#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
+
+#define CPSW_VERSION_1 0x19010a
+#define CPSW_VERSION_2 0x19010c
+#define CPSW_VERSION_3 0x19010f
+#define CPSW_VERSION_4 0x190112
+
+#define HOST_PORT_NUM 0
+#define CPSW_ALE_PORTS_NUM 3
+#define SLIVER_SIZE 0x40
+
+#define CPSW1_HOST_PORT_OFFSET 0x028
+#define CPSW1_SLAVE_OFFSET 0x050
+#define CPSW1_SLAVE_SIZE 0x040
+#define CPSW1_CPDMA_OFFSET 0x100
+#define CPSW1_STATERAM_OFFSET 0x200
+#define CPSW1_HW_STATS 0x400
+#define CPSW1_CPTS_OFFSET 0x500
+#define CPSW1_ALE_OFFSET 0x600
+#define CPSW1_SLIVER_OFFSET 0x700
+
+#define CPSW2_HOST_PORT_OFFSET 0x108
+#define CPSW2_SLAVE_OFFSET 0x200
+#define CPSW2_SLAVE_SIZE 0x100
+#define CPSW2_CPDMA_OFFSET 0x800
+#define CPSW2_HW_STATS 0x900
+#define CPSW2_STATERAM_OFFSET 0xa00
+#define CPSW2_CPTS_OFFSET 0xc00
+#define CPSW2_ALE_OFFSET 0xd00
+#define CPSW2_SLIVER_OFFSET 0xd80
+#define CPSW2_BD_OFFSET 0x2000
+
+#define CPDMA_RXTHRESH 0x0c0
+#define CPDMA_RXFREE 0x0e0
+#define CPDMA_TXHDP 0x00
+#define CPDMA_RXHDP 0x20
+#define CPDMA_TXCP 0x40
+#define CPDMA_RXCP 0x60
+
+#define CPSW_POLL_WEIGHT 64
+#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
+#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
+#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
+ ETH_FCS_LEN +\
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE)
+
+#define RX_PRIORITY_MAPPING 0x76543210
+#define TX_PRIORITY_MAPPING 0x33221100
+#define CPDMA_TX_PRIORITY_MAP 0x76543210
+
+#define CPSW_VLAN_AWARE BIT(1)
+#define CPSW_RX_VLAN_ENCAP BIT(2)
+#define CPSW_ALE_VLAN_AWARE 1
+
+#define CPSW_FIFO_NORMAL_MODE (0 << 16)
+#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
+#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
+
+#define CPSW_INTPACEEN (0x3f << 16)
+#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
+#define CPSW_CMINTMAX_CNT 63
+#define CPSW_CMINTMIN_CNT 2
+#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
+#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
+
+#define IRQ_NUM 2
+#define CPSW_MAX_QUEUES 8
+#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
+#define CPSW_FIFO_SHAPE_EN_SHIFT 16
+#define CPSW_FIFO_RATE_EN_SHIFT 20
+#define CPSW_TC_NUM 4
+#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
+#define CPSW_PCT_MASK 0x7f
+
+#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
+#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
+#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16
+#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8
+#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0)
+enum {
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
+};
+
+struct cpsw_wr_regs {
+ u32 id_ver;
+ u32 soft_reset;
+ u32 control;
+ u32 int_control;
+ u32 rx_thresh_en;
+ u32 rx_en;
+ u32 tx_en;
+ u32 misc_en;
+ u32 mem_allign1[8];
+ u32 rx_thresh_stat;
+ u32 rx_stat;
+ u32 tx_stat;
+ u32 misc_stat;
+ u32 mem_allign2[8];
+ u32 rx_imax;
+ u32 tx_imax;
+
+};
+
+struct cpsw_ss_regs {
+ u32 id_ver;
+ u32 control;
+ u32 soft_reset;
+ u32 stat_port_en;
+ u32 ptype;
+ u32 soft_idle;
+ u32 thru_rate;
+ u32 gap_thresh;
+ u32 tx_start_wds;
+ u32 flow_control;
+ u32 vlan_ltype;
+ u32 ts_ltype;
+ u32 dlr_ltype;
+};
+
+/* CPSW_PORT_V1 */
+#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
+#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
+#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
+#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
+#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
+#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
+#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
+
+/* CPSW_PORT_V2 */
+#define CPSW2_CONTROL 0x00 /* Control Register */
+#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
+#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
+#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
+#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
+#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
+
+/* CPSW_PORT_V1 and V2 */
+#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
+#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
+#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
+
+/* CPSW_PORT_V2 only */
+#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
+
+/* Bit definitions for the CPSW2_CONTROL register */
+#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
+#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
+#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
+#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
+#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
+#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
+#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
+#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
+
+#define CTRL_V2_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
+
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
+
+
+#define CTRL_V3_TS_BITS \
+ (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+ TS_LTYPE1_EN | VLAN_LTYPE1_EN)
+
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
+
+/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
+#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
+#define TS_SEQ_ID_OFFSET_MASK (0x3f)
+#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
+#define TS_MSG_TYPE_EN_MASK (0xffff)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
+
+/* Bit definitions for the CPSW1_TS_CTL register */
+#define CPSW_V1_TS_RX_EN BIT(0)
+#define CPSW_V1_TS_TX_EN BIT(4)
+#define CPSW_V1_MSG_TYPE_OFS 16
+
+/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
+#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
+
+#define CPSW_MAX_BLKS_TX 15
+#define CPSW_MAX_BLKS_TX_SHIFT 4
+#define CPSW_MAX_BLKS_RX 5
+
+struct cpsw_host_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 tx_in_ctl;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 cpdma_tx_pri_map;
+ u32 cpdma_rx_chan_map;
+};
+
+struct cpsw_slave_data {
+ struct device_node *phy_node;
+ char phy_id[MII_BUS_ID_SIZE];
+ int phy_if;
+ u8 mac_addr[ETH_ALEN];
+ u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
+ struct phy *ifphy;
+};
+
+struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
+ u32 ss_reg_ofs; /* Subsystem control register offset */
+ u32 channels; /* number of cpdma channels (symmetric) */
+ u32 slaves; /* number of slave cpgmac ports */
+ u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 ale_entries; /* ale table size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 mac_control; /* Mac control register */
+ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
+ bool dual_emac; /* Enable Dual EMAC mode */
+};
+
+struct cpsw_slave {
+ void __iomem *regs;
+ int slave_num;
+ u32 mac_control;
+ struct cpsw_slave_data *data;
+ struct phy_device *phy;
+ struct net_device *ndev;
+ u32 port_vlan;
+ struct cpsw_sl *mac_sl;
+};
+
+static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
+{
+ return readl_relaxed(slave->regs + offset);
+}
+
+static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
+{
+ writel_relaxed(val, slave->regs + offset);
+}
+
+struct cpsw_vector {
+ struct cpdma_chan *ch;
+ int budget;
+};
+
+struct cpsw_common {
+ struct device *dev;
+ struct cpsw_platform_data data;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+ struct cpsw_ss_regs __iomem *regs;
+ struct cpsw_wr_regs __iomem *wr_regs;
+ u8 __iomem *hw_stats;
+ struct cpsw_host_regs __iomem *host_port_regs;
+ u32 version;
+ u32 coal_intvl;
+ u32 bus_freq_mhz;
+ int rx_packet_max;
+ int descs_pool_size;
+ struct cpsw_slave *slaves;
+ struct cpdma_ctlr *dma;
+ struct cpsw_vector txv[CPSW_MAX_QUEUES];
+ struct cpsw_vector rxv[CPSW_MAX_QUEUES];
+ struct cpsw_ale *ale;
+ bool quirk_irq;
+ bool rx_irq_disabled;
+ bool tx_irq_disabled;
+ u32 irqs_table[IRQ_NUM];
+ struct cpts *cpts;
+ int rx_ch_num, tx_ch_num;
+ int speed;
+ int usage_count;
+};
+
+struct cpsw_priv {
+ struct net_device *ndev;
+ struct device *dev;
+ u32 msg_enable;
+ u8 mac_addr[ETH_ALEN];
+ bool rx_pause;
+ bool tx_pause;
+ bool mqprio_hw;
+ int fifo_bw[CPSW_TC_NUM];
+ int shp_cfg_speed;
+ int tx_ts_enabled;
+ int rx_ts_enabled;
+ u32 emac_port;
+ struct cpsw_common *cpsw;
+};
+
+#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
+#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
+
+#define cpsw_slave_index(cpsw, priv) \
+ ((cpsw->data.dual_emac) ? priv->emac_port : \
+ cpsw->data.active_slave)
+
+static inline int cpsw_get_slave_port(u32 slave_num)
+{
+ return slave_num + 1;
+}
+
+struct addr_sync_ctx {
+ struct net_device *ndev;
+ const u8 *addr; /* address to be synched */
+ int consumed; /* number of address instances */
+ int flush; /* flush flag */
+};
+
+int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
+ int ale_ageout, phys_addr_t desc_mem_phys,
+ int descs_pool_size);
+void cpsw_split_res(struct cpsw_common *cpsw);
+int cpsw_fill_rx_channels(struct cpsw_priv *priv);
+void cpsw_intr_enable(struct cpsw_common *cpsw);
+void cpsw_intr_disable(struct cpsw_common *cpsw);
+void cpsw_tx_handler(void *token, int len, int status);
+
+/* ethtool */
+u32 cpsw_get_msglevel(struct net_device *ndev);
+void cpsw_set_msglevel(struct net_device *ndev, u32 value);
+int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal);
+int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal);
+int cpsw_get_sset_count(struct net_device *ndev, int sset);
+void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data);
+void cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data);
+void cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause);
+void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol);
+int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol);
+int cpsw_get_regs_len(struct net_device *ndev);
+void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p);
+int cpsw_ethtool_op_begin(struct net_device *ndev);
+void cpsw_ethtool_op_complete(struct net_device *ndev);
+void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch);
+int cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd);
+int cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd);
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_nway_reset(struct net_device *ndev);
+void cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering);
+int cpsw_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering);
+int cpsw_set_channels_common(struct net_device *ndev,
+ struct ethtool_channels *chs,
+ cpdma_handler_fn rx_handler);
+int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info);
+
+#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */
diff --git a/drivers/net/ethernet/ti/cpsw_sl.c b/drivers/net/ethernet/ti/cpsw_sl.c
new file mode 100644
index 000000000000..0c7531cb0f39
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_sl.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch media-access-controller (MAC) submodule/
+ * Ethernet MAC Sliver (CPGMAC_SL)
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "cpsw_sl.h"
+
+#define CPSW_SL_REG_NOTUSED U16_MAX
+
+static const u16 cpsw_sl_reg_map_cpsw[] = {
+ [CPSW_SL_IDVER] = 0x00,
+ [CPSW_SL_MACCONTROL] = 0x04,
+ [CPSW_SL_MACSTATUS] = 0x08,
+ [CPSW_SL_SOFT_RESET] = 0x0c,
+ [CPSW_SL_RX_MAXLEN] = 0x10,
+ [CPSW_SL_BOFFTEST] = 0x14,
+ [CPSW_SL_RX_PAUSE] = 0x18,
+ [CPSW_SL_TX_PAUSE] = 0x1c,
+ [CPSW_SL_EMCONTROL] = 0x20,
+ [CPSW_SL_RX_PRI_MAP] = 0x24,
+ [CPSW_SL_TX_GAP] = 0x28,
+};
+
+static const u16 cpsw_sl_reg_map_66ak2hk[] = {
+ [CPSW_SL_IDVER] = 0x00,
+ [CPSW_SL_MACCONTROL] = 0x04,
+ [CPSW_SL_MACSTATUS] = 0x08,
+ [CPSW_SL_SOFT_RESET] = 0x0c,
+ [CPSW_SL_RX_MAXLEN] = 0x10,
+ [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_RX_PAUSE] = 0x18,
+ [CPSW_SL_TX_PAUSE] = 0x1c,
+ [CPSW_SL_EMCONTROL] = 0x20,
+ [CPSW_SL_RX_PRI_MAP] = 0x24,
+ [CPSW_SL_TX_GAP] = CPSW_SL_REG_NOTUSED,
+};
+
+static const u16 cpsw_sl_reg_map_66ak2x_xgbe[] = {
+ [CPSW_SL_IDVER] = 0x00,
+ [CPSW_SL_MACCONTROL] = 0x04,
+ [CPSW_SL_MACSTATUS] = 0x08,
+ [CPSW_SL_SOFT_RESET] = 0x0c,
+ [CPSW_SL_RX_MAXLEN] = 0x10,
+ [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_RX_PAUSE] = 0x18,
+ [CPSW_SL_TX_PAUSE] = 0x1c,
+ [CPSW_SL_EMCONTROL] = 0x20,
+ [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_TX_GAP] = 0x28,
+};
+
+static const u16 cpsw_sl_reg_map_66ak2elg_am65[] = {
+ [CPSW_SL_IDVER] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_MACCONTROL] = 0x00,
+ [CPSW_SL_MACSTATUS] = 0x04,
+ [CPSW_SL_SOFT_RESET] = 0x08,
+ [CPSW_SL_RX_MAXLEN] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_BOFFTEST] = 0x0c,
+ [CPSW_SL_RX_PAUSE] = 0x10,
+ [CPSW_SL_TX_PAUSE] = 0x40,
+ [CPSW_SL_EMCONTROL] = 0x70,
+ [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_TX_GAP] = 0x74,
+};
+
+#define CPSW_SL_SOFT_RESET_BIT BIT(0)
+
+#define CPSW_SL_STATUS_PN_IDLE BIT(31)
+#define CPSW_SL_AM65_STATUS_PN_E_IDLE BIT(30)
+#define CPSW_SL_AM65_STATUS_PN_P_IDLE BIT(29)
+#define CPSW_SL_AM65_STATUS_PN_TX_IDLE BIT(28)
+
+#define CPSW_SL_STATUS_IDLE_MASK_BASE (CPSW_SL_STATUS_PN_IDLE)
+
+#define CPSW_SL_STATUS_IDLE_MASK_K3 \
+ (CPSW_SL_STATUS_IDLE_MASK_BASE | CPSW_SL_AM65_STATUS_PN_E_IDLE | \
+ CPSW_SL_AM65_STATUS_PN_P_IDLE | CPSW_SL_AM65_STATUS_PN_TX_IDLE)
+
+#define CPSW_SL_CTL_FUNC_BASE \
+ (CPSW_SL_CTL_FULLDUPLEX |\
+ CPSW_SL_CTL_LOOPBACK |\
+ CPSW_SL_CTL_RX_FLOW_EN |\
+ CPSW_SL_CTL_TX_FLOW_EN |\
+ CPSW_SL_CTL_GMII_EN |\
+ CPSW_SL_CTL_TX_PACE |\
+ CPSW_SL_CTL_GIG |\
+ CPSW_SL_CTL_CMD_IDLE |\
+ CPSW_SL_CTL_IFCTL_A |\
+ CPSW_SL_CTL_IFCTL_B |\
+ CPSW_SL_CTL_GIG_FORCE |\
+ CPSW_SL_CTL_EXT_EN |\
+ CPSW_SL_CTL_RX_CEF_EN |\
+ CPSW_SL_CTL_RX_CSF_EN |\
+ CPSW_SL_CTL_RX_CMF_EN)
+
+struct cpsw_sl {
+ struct device *dev;
+ void __iomem *sl_base;
+ const u16 *regs;
+ u32 control_features;
+ u32 idle_mask;
+};
+
+struct cpsw_sl_dev_id {
+ const char *device_id;
+ const u16 *regs;
+ const u32 control_features;
+ const u32 regs_offset;
+ const u32 idle_mask;
+};
+
+static const struct cpsw_sl_dev_id cpsw_sl_id_match[] = {
+ {
+ .device_id = "cpsw",
+ .regs = cpsw_sl_reg_map_cpsw,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_TX_SG_LIM_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2hk",
+ .regs = cpsw_sl_reg_map_66ak2hk,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2x_xgbe",
+ .regs = cpsw_sl_reg_map_66ak2x_xgbe,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_XGIG |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_XGMII_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2el",
+ .regs = cpsw_sl_reg_map_66ak2elg_am65,
+ .regs_offset = 0x330,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_EXT_EN_RX_FLO |
+ CPSW_SL_CTL_EXT_EN_TX_FLO |
+ CPSW_SL_CTL_TX_SG_LIM_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2g",
+ .regs = cpsw_sl_reg_map_66ak2elg_am65,
+ .regs_offset = 0x330,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_EXT_EN_RX_FLO |
+ CPSW_SL_CTL_EXT_EN_TX_FLO,
+ },
+ {
+ .device_id = "am65",
+ .regs = cpsw_sl_reg_map_66ak2elg_am65,
+ .regs_offset = 0x330,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_XGIG |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_XGMII_EN |
+ CPSW_SL_CTL_EXT_EN_RX_FLO |
+ CPSW_SL_CTL_EXT_EN_TX_FLO |
+ CPSW_SL_CTL_TX_SG_LIM_EN |
+ CPSW_SL_CTL_EXT_EN_XGIG,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_K3,
+ },
+ { },
+};
+
+u32 cpsw_sl_reg_read(struct cpsw_sl *sl, enum cpsw_sl_regs reg)
+{
+ int val;
+
+ if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) {
+ dev_err(sl->dev, "cpsw_sl: not sup r reg: %04X\n",
+ sl->regs[reg]);
+ return 0;
+ }
+
+ val = readl(sl->sl_base + sl->regs[reg]);
+ dev_dbg(sl->dev, "cpsw_sl: reg: %04X r 0x%08X\n", sl->regs[reg], val);
+ return val;
+}
+
+void cpsw_sl_reg_write(struct cpsw_sl *sl, enum cpsw_sl_regs reg, u32 val)
+{
+ if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) {
+ dev_err(sl->dev, "cpsw_sl: not sup w reg: %04X\n",
+ sl->regs[reg]);
+ return;
+ }
+
+ dev_dbg(sl->dev, "cpsw_sl: reg: %04X w 0x%08X\n", sl->regs[reg], val);
+ writel(val, sl->sl_base + sl->regs[reg]);
+}
+
+static const struct cpsw_sl_dev_id *cpsw_sl_match_id(
+ const struct cpsw_sl_dev_id *id,
+ const char *device_id)
+{
+ if (!id || !device_id)
+ return NULL;
+
+ while (id->device_id) {
+ if (strcmp(device_id, id->device_id) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+struct cpsw_sl *cpsw_sl_get(const char *device_id, struct device *dev,
+ void __iomem *sl_base)
+{
+ const struct cpsw_sl_dev_id *sl_dev_id;
+ struct cpsw_sl *sl;
+
+ sl = devm_kzalloc(dev, sizeof(struct cpsw_sl), GFP_KERNEL);
+ if (!sl)
+ return ERR_PTR(-ENOMEM);
+ sl->dev = dev;
+ sl->sl_base = sl_base;
+
+ sl_dev_id = cpsw_sl_match_id(cpsw_sl_id_match, device_id);
+ if (!sl_dev_id) {
+ dev_err(sl->dev, "cpsw_sl: dev_id %s not found.\n", device_id);
+ return ERR_PTR(-EINVAL);
+ }
+ sl->regs = sl_dev_id->regs;
+ sl->control_features = sl_dev_id->control_features;
+ sl->idle_mask = sl_dev_id->idle_mask;
+ sl->sl_base += sl_dev_id->regs_offset;
+
+ return sl;
+}
+
+void cpsw_sl_reset(struct cpsw_sl *sl, unsigned long tmo)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(tmo);
+
+ /* Set the soft reset bit */
+ cpsw_sl_reg_write(sl, CPSW_SL_SOFT_RESET, CPSW_SL_SOFT_RESET_BIT);
+
+ /* Wait for the bit to clear */
+ do {
+ usleep_range(100, 200);
+ } while ((cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) &
+ CPSW_SL_SOFT_RESET_BIT) &&
+ time_after(timeout, jiffies));
+
+ if (cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) & CPSW_SL_SOFT_RESET_BIT)
+ dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n");
+}
+
+u32 cpsw_sl_ctl_set(struct cpsw_sl *sl, u32 ctl_funcs)
+{
+ u32 val;
+
+ if (ctl_funcs & ~sl->control_features) {
+ dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n",
+ ctl_funcs & (~sl->control_features));
+ return -EINVAL;
+ }
+
+ val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL);
+ val |= ctl_funcs;
+ cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val);
+
+ return 0;
+}
+
+u32 cpsw_sl_ctl_clr(struct cpsw_sl *sl, u32 ctl_funcs)
+{
+ u32 val;
+
+ if (ctl_funcs & ~sl->control_features) {
+ dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n",
+ ctl_funcs & (~sl->control_features));
+ return -EINVAL;
+ }
+
+ val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL);
+ val &= ~ctl_funcs;
+ cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val);
+
+ return 0;
+}
+
+void cpsw_sl_ctl_reset(struct cpsw_sl *sl)
+{
+ cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, 0);
+}
+
+int cpsw_sl_wait_for_idle(struct cpsw_sl *sl, unsigned long tmo)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(tmo);
+
+ do {
+ usleep_range(100, 200);
+ } while (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) &
+ sl->idle_mask) && time_after(timeout, jiffies));
+
+ if (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) & sl->idle_mask)) {
+ dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_sl.h b/drivers/net/ethernet/ti/cpsw_sl.h
new file mode 100644
index 000000000000..a6d06a5a420f
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_sl.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch media-access-controller (MAC) submodule/
+ * Ethernet MAC Sliver (CPGMAC_SL) APIs
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#ifndef __TI_CPSW_SL_H__
+#define __TI_CPSW_SL_H__
+
+#include <linux/device.h>
+
+enum cpsw_sl_regs {
+ CPSW_SL_IDVER,
+ CPSW_SL_MACCONTROL,
+ CPSW_SL_MACSTATUS,
+ CPSW_SL_SOFT_RESET,
+ CPSW_SL_RX_MAXLEN,
+ CPSW_SL_BOFFTEST,
+ CPSW_SL_RX_PAUSE,
+ CPSW_SL_TX_PAUSE,
+ CPSW_SL_EMCONTROL,
+ CPSW_SL_RX_PRI_MAP,
+ CPSW_SL_TX_GAP,
+};
+
+enum {
+ CPSW_SL_CTL_FULLDUPLEX = BIT(0), /* Full Duplex mode */
+ CPSW_SL_CTL_LOOPBACK = BIT(1), /* Loop Back Mode */
+ CPSW_SL_CTL_MTEST = BIT(2), /* Manufacturing Test mode */
+ CPSW_SL_CTL_RX_FLOW_EN = BIT(3), /* Receive Flow Control Enable */
+ CPSW_SL_CTL_TX_FLOW_EN = BIT(4), /* Transmit Flow Control Enable */
+ CPSW_SL_CTL_GMII_EN = BIT(5), /* GMII Enable */
+ CPSW_SL_CTL_TX_PACE = BIT(6), /* Transmit Pacing Enable */
+ CPSW_SL_CTL_GIG = BIT(7), /* Gigabit Mode */
+ CPSW_SL_CTL_XGIG = BIT(8), /* 10 Gigabit Mode */
+ CPSW_SL_CTL_TX_SHORT_GAP_EN = BIT(10), /* Transmit Short Gap Enable */
+ CPSW_SL_CTL_CMD_IDLE = BIT(11), /* Command Idle */
+ CPSW_SL_CTL_CRC_TYPE = BIT(12), /* Port CRC Type */
+ CPSW_SL_CTL_XGMII_EN = BIT(13), /* XGMII Enable */
+ CPSW_SL_CTL_IFCTL_A = BIT(15), /* Interface Control A */
+ CPSW_SL_CTL_IFCTL_B = BIT(16), /* Interface Control B */
+ CPSW_SL_CTL_GIG_FORCE = BIT(17), /* Gigabit Mode Force */
+ CPSW_SL_CTL_EXT_EN = BIT(18), /* External Control Enable */
+ CPSW_SL_CTL_EXT_EN_RX_FLO = BIT(19), /* Ext RX Flow Control Enable */
+ CPSW_SL_CTL_EXT_EN_TX_FLO = BIT(20), /* Ext TX Flow Control Enable */
+ CPSW_SL_CTL_TX_SG_LIM_EN = BIT(21), /* TXt Short Gap Limit Enable */
+ CPSW_SL_CTL_RX_CEF_EN = BIT(22), /* RX Copy Error Frames Enable */
+ CPSW_SL_CTL_RX_CSF_EN = BIT(23), /* RX Copy Short Frames Enable */
+ CPSW_SL_CTL_RX_CMF_EN = BIT(24), /* RX Copy MAC Control Frames Enable */
+ CPSW_SL_CTL_EXT_EN_XGIG = BIT(25), /* Ext XGIG Control En, k3 only */
+
+ CPSW_SL_CTL_FUNCS_COUNT
+};
+
+struct cpsw_sl;
+
+struct cpsw_sl *cpsw_sl_get(const char *device_id, struct device *dev,
+ void __iomem *sl_base);
+
+void cpsw_sl_reset(struct cpsw_sl *sl, unsigned long tmo);
+
+u32 cpsw_sl_ctl_set(struct cpsw_sl *sl, u32 ctl_funcs);
+u32 cpsw_sl_ctl_clr(struct cpsw_sl *sl, u32 ctl_funcs);
+void cpsw_sl_ctl_reset(struct cpsw_sl *sl);
+int cpsw_sl_wait_for_idle(struct cpsw_sl *sl, unsigned long tmo);
+
+u32 cpsw_sl_reg_read(struct cpsw_sl *sl, enum cpsw_sl_regs reg);
+void cpsw_sl_reg_write(struct cpsw_sl *sl, enum cpsw_sl_regs reg, u32 val);
+
+#endif /* __TI_CPSW_SL_H__ */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 2a9ba4acd7fa..e257018ada71 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* TI Common Platform Time Sync
*
* Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/err.h>
#include <linux/if.h>
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index d2c7decd59b6..024aab6af12f 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* TI Common Platform Time Sync
*
* Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _TI_CPTS_H_
#define _TI_CPTS_H_
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 4236dcdd5634..35bf14d8e7af 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -1,16 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments CPDMA Driver
*
* Copyright (C) 2010 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
@@ -527,7 +520,6 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->num_chan = CPDMA_MAX_CHANNELS;
return ctlr;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
{
@@ -588,7 +580,6 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
{
@@ -621,7 +612,6 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
{
@@ -639,7 +629,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
cpdma_desc_pool_destroy(ctlr);
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
{
@@ -660,25 +649,21 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
{
dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
{
return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
}
-EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
{
return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
}
-EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
int rx, int desc_num,
@@ -774,7 +759,6 @@ int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_split_pool);
/* cpdma_chan_set_weight - set weight of a channel in percentage.
@@ -807,7 +791,6 @@ int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_set_weight);
/* cpdma_chan_get_min_rate - get minimum allowed rate for channel
* Should be called before cpdma_chan_set_rate.
@@ -822,7 +805,6 @@ u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
return DIV_ROUND_UP(divident, divisor);
}
-EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate);
/* cpdma_chan_set_rate - limits bandwidth for transmit channel.
* The bandwidth * limited channels have to be in order beginning from lowest.
@@ -867,7 +849,6 @@ err:
spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_set_rate);
u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
{
@@ -880,7 +861,6 @@ u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
return rate;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_get_rate);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler, int rx_type)
@@ -940,7 +920,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
spin_unlock_irqrestore(&ctlr->lock, flags);
return chan;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_create);
int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
{
@@ -953,7 +932,6 @@ int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
return desc_num;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
int cpdma_chan_destroy(struct cpdma_chan *chan)
{
@@ -975,7 +953,6 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats)
@@ -988,7 +965,6 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
static void __cpdma_chan_submit(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc)
@@ -1095,7 +1071,6 @@ unlock_ret:
spin_unlock_irqrestore(&chan->lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_submit);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
{
@@ -1110,7 +1085,6 @@ bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
spin_unlock_irqrestore(&chan->lock, flags);
return free_tx_desc;
}
-EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
static void __cpdma_chan_free(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc,
@@ -1204,7 +1178,6 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota)
}
return used;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_process);
int cpdma_chan_start(struct cpdma_chan *chan)
{
@@ -1224,7 +1197,6 @@ int cpdma_chan_start(struct cpdma_chan *chan)
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_start);
int cpdma_chan_stop(struct cpdma_chan *chan)
{
@@ -1287,7 +1259,6 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_stop);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
{
@@ -1329,25 +1300,19 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_control_set);
int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
{
return ctlr->num_rx_desc;
}
-EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs);
int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
{
return ctlr->num_tx_desc;
}
-EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs);
void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
{
ctlr->num_rx_desc = num_rx_desc;
ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
}
-EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index d399af5389b8..10376062dafa 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -1,16 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Texas Instruments CPDMA Driver
*
* Copyright (C) 2010 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DAVINCI_CPDMA_H__
#define __DAVINCI_CPDMA_H__
@@ -34,8 +27,8 @@ struct cpdma_params {
int num_chan;
bool has_soft_reset;
int min_packet_size;
- u32 desc_mem_phys;
- u32 desc_hw_addr;
+ dma_addr_t desc_mem_phys;
+ dma_addr_t desc_hw_addr;
int desc_mem_size;
int desc_align;
u32 bus_freq_mhz;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 57450b174fc4..4bf65cab79e6 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* DaVinci Ethernet Medium Access Controller
*
@@ -6,21 +7,6 @@
* Copyright (C) 2009 Texas Instruments.
*
* ---------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- * ---------------------------------------------------------------------------
* History:
* 0-5 A number of folks worked on this driver in bits and pieces but the major
* contribution came from Suraj Iyer and Anant Gole
@@ -1714,7 +1700,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
if (!is_valid_ether_addr(pdata->mac_addr)) {
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(pdata->mac_addr, mac_addr);
}
@@ -1912,15 +1898,11 @@ static int davinci_emac_probe(struct platform_device *pdev)
ether_addr_copy(ndev->dev_addr, priv->mac_addr);
if (!is_valid_ether_addr(priv->mac_addr)) {
- /* Try nvmem if MAC wasn't passed over pdata or DT. */
- rc = nvmem_get_mac_address(&pdev->dev, priv->mac_addr);
- if (rc) {
- /* Use random MAC if still none obtained. */
- eth_hw_addr_random(ndev);
- memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
- dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
- priv->mac_addr);
- }
+ /* Use random MAC if still none obtained. */
+ eth_hw_addr_random(ndev);
+ memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
}
ndev->netdev_ops = &emac_netdev_ops;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index a98aedae1b41..38b7f6d35759 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* DaVinci MDIO Module driver
*
@@ -7,22 +8,6 @@
*
* Copyright (C) 2009 Texas Instruments.
*
- * ---------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- * ---------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/kernel.h>
@@ -140,7 +125,7 @@ static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
static void davinci_mdio_enable(struct davinci_mdio_data *data)
{
/* set enable and clock divider */
- __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
+ writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
}
static int davinci_mdio_reset(struct mii_bus *bus)
@@ -159,7 +144,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
msleep(PHY_MAX_ADDR * data->access_time);
/* dump hardware version info */
- ver = __raw_readl(&data->regs->version);
+ ver = readl(&data->regs->version);
dev_info(data->dev,
"davinci mdio revision %d.%d, bus freq %ld\n",
(ver >> 8) & 0xff, ver & 0xff,
@@ -169,7 +154,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
goto done;
/* get phy mask from the alive register */
- phy_mask = __raw_readl(&data->regs->alive);
+ phy_mask = readl(&data->regs->alive);
if (phy_mask) {
/* restrict mdio bus to live phys only */
dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
@@ -196,11 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
u32 reg;
while (time_after(timeout, jiffies)) {
- reg = __raw_readl(&regs->user[0].access);
+ reg = readl(&regs->user[0].access);
if ((reg & USERACCESS_GO) == 0)
return 0;
- reg = __raw_readl(&regs->control);
+ reg = readl(&regs->control);
if ((reg & CONTROL_IDLE) == 0) {
usleep_range(100, 200);
continue;
@@ -216,7 +201,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
return -EAGAIN;
}
- reg = __raw_readl(&regs->user[0].access);
+ reg = readl(&regs->user[0].access);
if ((reg & USERACCESS_GO) == 0)
return 0;
@@ -263,7 +248,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (ret < 0)
break;
- __raw_writel(reg, &data->regs->user[0].access);
+ writel(reg, &data->regs->user[0].access);
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
@@ -271,7 +256,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (ret < 0)
break;
- reg = __raw_readl(&data->regs->user[0].access);
+ reg = readl(&data->regs->user[0].access);
ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
break;
}
@@ -307,7 +292,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (ret < 0)
break;
- __raw_writel(reg, &data->regs->user[0].access);
+ writel(reg, &data->regs->user[0].access);
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
@@ -412,9 +397,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs))
- return PTR_ERR(data->regs);
+ data->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!data->regs)
+ return -ENOMEM;
davinci_mdio_init_clk(data);
@@ -472,9 +457,9 @@ static int davinci_mdio_runtime_suspend(struct device *dev)
u32 ctrl;
/* shutdown the scan state machine */
- ctrl = __raw_readl(&data->regs->control);
+ ctrl = readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
- __raw_writel(ctrl, &data->regs->control);
+ writel(ctrl, &data->regs->control);
wait_for_idle(data);
return 0;
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index c4ffdf47bad5..43d5cd59b56b 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NetCP driver local header
*
@@ -8,15 +9,6 @@
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Wingman Kwok <w-kwok2@ti.com>
* Murali Karicheri <m-karicheri2@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __NETCP_H__
#define __NETCP_H__
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index d847f672a705..642843945031 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Keystone NetCP Core driver
*
@@ -8,15 +9,6 @@
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Murali Karicheri <m-karicheri2@ti.com>
* Wingman Kwok <w-kwok2@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
@@ -2045,7 +2037,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
devm_release_mem_region(dev, res.start, size);
} else {
mac_addr = of_get_mac_address(node_interface);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
else
eth_random_addr(ndev->dev_addr);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 0a920c5936b2..ec179700c184 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Keystone GBE and XGBE subsystem code
*
@@ -7,15 +8,6 @@
* Cyril Chemparathy <cyril@ti.com>
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Wingman Kwok <w-kwok2@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index 5d8419f658d0..f7cf56d6351d 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SGMI module initialisation
*
@@ -6,14 +7,6 @@
* Sandeep Paulraj <s-paulraj@ti.com>
* Wingman Kwok <w-kwok2@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "netcp.h"
diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
index 33571acc52b6..112778aedd8a 100644
--- a/drivers/net/ethernet/ti/netcp_xgbepcsr.c
+++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* XGE PCSR module initialisation
*
@@ -5,14 +6,6 @@
* Authors: Sandeep Nair <sandeep_n@ti.com>
* WingMan Kwok <w-kwok2@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "netcp.h"
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 33949248c829..ab55416a10fa 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -571,7 +571,6 @@ static void rhine_ack_events(struct rhine_private *rp, u32 mask)
if (rp->quirks & rqStatusWBRace)
iowrite8(mask >> 16, ioaddr + IntrStatus2);
iowrite16(mask, ioaddr + IntrStatus);
- mmiowb();
}
/*
@@ -863,7 +862,6 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
iowrite16(enable_mask, ioaddr + IntrEnable);
- mmiowb();
}
return work_done;
}
@@ -1893,7 +1891,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
static void rhine_irq_disable(struct rhine_private *rp)
{
iowrite16(0x0000, rp->base + IntrEnable);
- mmiowb();
}
/* The interrupt handler does all of the Rx thread work and cleans up
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index d8ba512f166a..8788953eaafd 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -219,7 +219,6 @@ static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
{
__w5100_write_direct(ndev, addr, data);
- mmiowb();
return 0;
}
@@ -236,7 +235,6 @@ static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
{
__w5100_write_direct(ndev, addr, data >> 8);
__w5100_write_direct(ndev, addr + 1, data);
- mmiowb();
return 0;
}
@@ -260,8 +258,6 @@ static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
for (i = 0; i < len; i++, addr++)
__w5100_write_direct(ndev, addr, *buf++);
- mmiowb();
-
return 0;
}
@@ -375,7 +371,6 @@ static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
for (i = 0; i < len; i++)
*buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
- mmiowb();
spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0;
@@ -394,7 +389,6 @@ static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
for (i = 0; i < len; i++)
__w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
- mmiowb();
spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0;
@@ -1164,7 +1158,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
INIT_WORK(&priv->setrx_work, w5100_setrx_work);
INIT_WORK(&priv->restart_work, w5100_restart_work);
- if (mac_addr)
+ if (!IS_ERR_OR_NULL(mac_addr))
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index f9da5d6172e3..3f03eecc0479 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -141,7 +141,6 @@ static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr)
spin_lock_irqsave(&priv->reg_lock, flags);
w5300_write_direct(priv, W5300_IDM_AR, addr);
- mmiowb();
data = w5300_read_direct(priv, W5300_IDM_DR);
spin_unlock_irqrestore(&priv->reg_lock, flags);
@@ -154,9 +153,7 @@ static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data)
spin_lock_irqsave(&priv->reg_lock, flags);
w5300_write_direct(priv, W5300_IDM_AR, addr);
- mmiowb();
w5300_write_direct(priv, W5300_IDM_DR, data);
- mmiowb();
spin_unlock_irqrestore(&priv->reg_lock, flags);
}
@@ -192,7 +189,6 @@ static int w5300_command(struct w5300_priv *priv, u16 cmd)
unsigned long timeout = jiffies + msecs_to_jiffies(100);
w5300_write(priv, W5300_S0_CR, cmd);
- mmiowb();
while (w5300_read(priv, W5300_S0_CR) != 0) {
if (time_after(jiffies, timeout))
@@ -241,18 +237,15 @@ static void w5300_write_macaddr(struct w5300_priv *priv)
w5300_write(priv, W5300_SHARH,
ndev->dev_addr[4] << 8 |
ndev->dev_addr[5]);
- mmiowb();
}
static void w5300_hw_reset(struct w5300_priv *priv)
{
w5300_write_direct(priv, W5300_MR, MR_RST);
- mmiowb();
mdelay(5);
w5300_write_direct(priv, W5300_MR, priv->indirect ?
MR_WDF(7) | MR_PB | MR_IND :
MR_WDF(7) | MR_PB);
- mmiowb();
w5300_write(priv, W5300_IMR, 0);
w5300_write_macaddr(priv);
@@ -264,24 +257,20 @@ static void w5300_hw_reset(struct w5300_priv *priv)
w5300_write32(priv, W5300_TMSRL, 64 << 24);
w5300_write32(priv, W5300_TMSRH, 0);
w5300_write(priv, W5300_MTYPE, 0x00ff);
- mmiowb();
}
static void w5300_hw_start(struct w5300_priv *priv)
{
w5300_write(priv, W5300_S0_MR, priv->promisc ?
S0_MR_MACRAW : S0_MR_MACRAW_MF);
- mmiowb();
w5300_command(priv, S0_CR_OPEN);
w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
w5300_write(priv, W5300_IMR, IR_S0);
- mmiowb();
}
static void w5300_hw_close(struct w5300_priv *priv)
{
w5300_write(priv, W5300_IMR, 0);
- mmiowb();
w5300_command(priv, S0_CR_CLOSE);
}
@@ -372,7 +361,6 @@ static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
netif_stop_queue(ndev);
w5300_write_frame(priv, skb->data, skb->len);
- mmiowb();
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
@@ -419,7 +407,6 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
if (rx_count < budget) {
napi_complete_done(napi, rx_count);
w5300_write(priv, W5300_IMR, IR_S0);
- mmiowb();
}
return rx_count;
@@ -434,7 +421,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
if (!ir)
return IRQ_NONE;
w5300_write(priv, W5300_S0_IR, ir);
- mmiowb();
if (ir & S0_IR_SENDOK) {
netif_dbg(priv, tx_done, ndev, "tx done\n");
@@ -444,7 +430,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
if (ir & S0_IR_RECV) {
if (napi_schedule_prep(&priv->napi)) {
w5300_write(priv, W5300_IMR, 0);
- mmiowb();
__napi_schedule(&priv->napi);
}
}
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index da4ec575ccf9..db448fad621b 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -33,8 +33,7 @@ config XILINX_AXI_EMAC
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
- depends on (PPC || MICROBLAZE)
- depends on !64BIT || BROKEN
+ depends on PPC || MICROBLAZE || X86 || COMPILE_TEST
select PHYLIB
---help---
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 107575225383..1aeda084b8f1 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -334,6 +334,9 @@ struct temac_local {
/* Connection to PHY device */
struct device_node *phy_node;
+ /* For non-device-tree devices */
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ phy_interface_t phy_interface;
/* MDIO bus data */
struct mii_bus *mii_bus; /* MII bus reference */
@@ -344,8 +347,10 @@ struct temac_local {
#ifdef CONFIG_PPC_DCR
dcr_host_t sdma_dcrs;
#endif
- u32 (*dma_in)(struct temac_local *, int);
- void (*dma_out)(struct temac_local *, int, u32);
+ u32 (*temac_ior)(struct temac_local *lp, int offset);
+ void (*temac_iow)(struct temac_local *lp, int offset, u32 value);
+ u32 (*dma_in)(struct temac_local *lp, int reg);
+ void (*dma_out)(struct temac_local *lp, int reg, u32 value);
int tx_irq;
int rx_irq;
@@ -353,7 +358,10 @@ struct temac_local {
struct sk_buff **rx_skb;
spinlock_t rx_lock;
- struct mutex indirect_mutex;
+ /* For synchronization of indirect register access. Must be
+ * shared mutex between interfaces in same TEMAC block.
+ */
+ struct mutex *indirect_mutex;
u32 options; /* Current options word */
int last_link;
unsigned int temac_features;
@@ -367,18 +375,24 @@ struct temac_local {
int tx_bd_next;
int tx_bd_tail;
int rx_bd_ci;
+
+ /* DMA channel control setup */
+ u32 tx_chnl_ctrl;
+ u32 rx_chnl_ctrl;
};
+/* Wrappers for temac_ior()/temac_iow() function pointers above */
+#define temac_ior(lp, o) ((lp)->temac_ior(lp, o))
+#define temac_iow(lp, o, v) ((lp)->temac_iow(lp, o, v))
+
/* xilinx_temac.c */
-u32 temac_ior(struct temac_local *lp, int offset);
-void temac_iow(struct temac_local *lp, int offset, u32 value);
int temac_indirect_busywait(struct temac_local *lp);
u32 temac_indirect_in32(struct temac_local *lp, int reg);
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
/* xilinx_temac_mdio.c */
-int temac_mdio_setup(struct temac_local *lp, struct device_node *np);
+int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev);
void temac_mdio_teardown(struct temac_local *lp);
#endif /* XILINX_LL_TEMAC_H */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 44efffbe7970..47c45152132e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -51,6 +52,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/xilinx-ll-temac.h>
#include "ll_temac.h"
@@ -61,14 +63,24 @@
* Low level register access functions
*/
-u32 temac_ior(struct temac_local *lp, int offset)
+static u32 _temac_ior_be(struct temac_local *lp, int offset)
{
- return in_be32(lp->regs + offset);
+ return ioread32be(lp->regs + offset);
}
-void temac_iow(struct temac_local *lp, int offset, u32 value)
+static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
{
- out_be32(lp->regs + offset, value);
+ return iowrite32be(value, lp->regs + offset);
+}
+
+static u32 _temac_ior_le(struct temac_local *lp, int offset)
+{
+ return ioread32(lp->regs + offset);
+}
+
+static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
+{
+ return iowrite32(value, lp->regs + offset);
}
int temac_indirect_busywait(struct temac_local *lp)
@@ -80,7 +92,7 @@ int temac_indirect_busywait(struct temac_local *lp)
WARN_ON(1);
return -ETIMEDOUT;
}
- msleep(1);
+ usleep_range(500, 1000);
}
return 0;
}
@@ -119,23 +131,35 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
}
/**
- * temac_dma_in32 - Memory mapped DMA read, this function expects a
- * register input that is based on DCR word addresses which
- * are then converted to memory mapped byte addresses
+ * temac_dma_in32_* - Memory mapped DMA read, these function expects a
+ * register input that is based on DCR word addresses which are then
+ * converted to memory mapped byte addresses. To be assigned to
+ * lp->dma_in32.
*/
-static u32 temac_dma_in32(struct temac_local *lp, int reg)
+static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
{
- return in_be32(lp->sdma_regs + (reg << 2));
+ return ioread32be(lp->sdma_regs + (reg << 2));
+}
+
+static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
+{
+ return ioread32(lp->sdma_regs + (reg << 2));
}
/**
- * temac_dma_out32 - Memory mapped DMA read, this function expects a
- * register input that is based on DCR word addresses which
- * are then converted to memory mapped byte addresses
+ * temac_dma_out32_* - Memory mapped DMA read, these function expects
+ * a register input that is based on DCR word addresses which are then
+ * converted to memory mapped byte addresses. To be assigned to
+ * lp->dma_out32.
*/
-static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
+static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
+{
+ iowrite32be(value, lp->sdma_regs + (reg << 2));
+}
+
+static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
{
- out_be32(lp->sdma_regs + (reg << 2), value);
+ iowrite32(value, lp->sdma_regs + (reg << 2));
}
/* DMA register access functions can be DCR based or memory mapped.
@@ -187,7 +211,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
/*
* temac_dcr_setup - This is a stub for when DCR is not supported,
- * such as with MicroBlaze
+ * such as with MicroBlaze and x86
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
struct device_node *np)
@@ -225,7 +249,6 @@ static void temac_dma_bd_release(struct net_device *ndev)
dma_free_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
lp->tx_bd_v, lp->tx_bd_p);
- kfree(lp->rx_skb);
}
/**
@@ -235,9 +258,11 @@ static int temac_dma_bd_init(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct sk_buff *skb;
+ dma_addr_t skb_dma_addr;
int i;
- lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
+ lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
+ GFP_KERNEL);
if (!lp->rx_skb)
goto out;
@@ -256,13 +281,13 @@ static int temac_dma_bd_init(struct net_device *ndev)
goto out;
for (i = 0; i < TX_BD_NUM; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
+ lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
+ + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
}
for (i = 0; i < RX_BD_NUM; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
+ lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
+ + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
@@ -271,31 +296,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- XTE_MAX_JUMBO_FRAME_SIZE,
- DMA_FROM_DEVICE);
- lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
- lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
+ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
+ lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
+ lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
}
- lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
- CHNL_CTRL_IRQ_EN |
- CHNL_CTRL_IRQ_DLY_EN |
- CHNL_CTRL_IRQ_COAL_EN);
- /* 0x10220483 */
- /* 0x00100483 */
- lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
- CHNL_CTRL_IRQ_EN |
- CHNL_CTRL_IRQ_DLY_EN |
- CHNL_CTRL_IRQ_COAL_EN |
- CHNL_CTRL_IRQ_IOE);
- /* 0xff010283 */
-
- lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
- lp->dma_out(lp, RX_TAILDESC_PTR,
- lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
- lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+ /* Configure DMA channel (irq setup) */
+ lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
+ 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
+ CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
+ CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
+ lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
+ CHNL_CTRL_IRQ_IOE |
+ CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
+ CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
/* Init descriptor indexes */
lp->tx_bd_ci = 0;
@@ -303,6 +320,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
+ /* Enable RX DMA transfers */
+ wmb();
+ lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR,
+ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ /* Prepare for TX DMA transfer */
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+
return 0;
out:
@@ -319,7 +345,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
struct temac_local *lp = netdev_priv(ndev);
/* set up unicast MAC address filter set its mac address */
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
temac_indirect_out32(lp, XTE_UAW0_OFFSET,
(ndev->dev_addr[0]) |
(ndev->dev_addr[1] << 8) |
@@ -330,12 +356,12 @@ static void temac_do_set_mac_address(struct net_device *ndev)
temac_indirect_out32(lp, XTE_UAW1_OFFSET,
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
}
static int temac_init_mac_address(struct net_device *ndev, const void *address)
{
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
temac_do_set_mac_address(ndev);
@@ -359,7 +385,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
u32 multi_addr_msw, multi_addr_lsw, val;
int i;
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
/*
@@ -398,7 +424,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
}
static struct temac_option {
@@ -490,7 +516,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
struct temac_option *tp = &temac_options[0];
int reg;
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
while (tp->opt) {
reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
if (options & tp->opt)
@@ -499,7 +525,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
tp++;
}
lp->options |= options;
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
return 0;
}
@@ -518,7 +544,7 @@ static void temac_device_reset(struct net_device *ndev)
dev_dbg(&ndev->dev, "%s()\n", __func__);
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
/* Reset the receiver and wait for it to finish reset */
temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
timeout = 1000;
@@ -570,7 +596,7 @@ static void temac_device_reset(struct net_device *ndev)
temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
/* Sync default options with HW
* but leave receiver and transmitter disabled. */
@@ -598,7 +624,7 @@ static void temac_adjust_link(struct net_device *ndev)
/* hash together the state values to decide if something has changed */
link_state = phy->speed | (phy->duplex << 1) | phy->link;
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
if (lp->last_link != link_state) {
mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
@@ -614,23 +640,52 @@ static void temac_adjust_link(struct net_device *ndev)
lp->last_link = link_state;
phy_print_status(phy);
}
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
+}
+
+#ifdef CONFIG_64BIT
+
+static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
+{
+ bd->app3 = (u32)(((u64)p) >> 32);
+ bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
+}
+
+static void *ptr_from_txbd(struct cdmac_bd *bd)
+{
+ return (void *)(((u64)(bd->app3) << 32) | bd->app4);
}
+#else
+
+static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
+{
+ bd->app4 = (u32)p;
+}
+
+static void *ptr_from_txbd(struct cdmac_bd *bd)
+{
+ return (void *)(bd->app4);
+}
+
+#endif
+
static void temac_start_xmit_done(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
unsigned int stat = 0;
+ struct sk_buff *skb;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- stat = cur_p->app0;
+ stat = be32_to_cpu(cur_p->app0);
while (stat & STS_CTRL_APP0_CMPLT) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
- DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_consume_skb_irq((struct sk_buff *)cur_p->app4);
+ dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
+ be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
+ skb = (struct sk_buff *)ptr_from_txbd(cur_p);
+ if (skb)
+ dev_consume_skb_irq(skb);
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
@@ -638,14 +693,14 @@ static void temac_start_xmit_done(struct net_device *ndev)
cur_p->app4 = 0;
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += cur_p->len;
+ ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
lp->tx_bd_ci++;
if (lp->tx_bd_ci >= TX_BD_NUM)
lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- stat = cur_p->app0;
+ stat = be32_to_cpu(cur_p->app0);
}
netif_wake_queue(ndev);
@@ -679,7 +734,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
- dma_addr_t start_p, tail_p;
+ dma_addr_t start_p, tail_p, skb_dma_addr;
int ii;
unsigned long num_frag;
skb_frag_t *frag;
@@ -689,7 +744,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- if (temac_check_tx_bd_space(lp, num_frag)) {
+ if (temac_check_tx_bd_space(lp, num_frag + 1)) {
if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
@@ -700,16 +755,18 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned int csum_start_off = skb_checksum_start_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
- cur_p->app0 |= 1; /* TX Checksum Enabled */
- cur_p->app1 = (csum_start_off << 16) | csum_index_off;
+ cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
+ cur_p->app1 = cpu_to_be32((csum_start_off << 16)
+ | csum_index_off);
cur_p->app2 = 0; /* initial checksum seed */
}
- cur_p->app0 |= STS_CTRL_APP0_SOP;
- cur_p->len = skb_headlen(skb);
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- cur_p->app4 = (unsigned long)skb;
+ cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
+ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ cur_p->len = cpu_to_be32(skb_headlen(skb));
+ cur_p->phys = cpu_to_be32(skb_dma_addr);
+ ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
lp->tx_bd_tail++;
@@ -717,14 +774,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag), DMA_TO_DEVICE);
- cur_p->len = skb_frag_size(frag);
+ skb_dma_addr = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ cur_p->phys = cpu_to_be32(skb_dma_addr);
+ cur_p->len = cpu_to_be32(skb_frag_size(frag));
cur_p->app0 = 0;
frag++;
}
- cur_p->app0 |= STS_CTRL_APP0_EOP;
+ cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
@@ -734,6 +793,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
/* Kick off the transfer */
+ wmb();
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
return NETDEV_TX_OK;
@@ -746,7 +806,7 @@ static void ll_temac_recv(struct net_device *ndev)
struct sk_buff *skb, *new_skb;
unsigned int bdstat;
struct cdmac_bd *cur_p;
- dma_addr_t tail_p;
+ dma_addr_t tail_p, skb_dma_addr;
int length;
unsigned long flags;
@@ -755,14 +815,14 @@ static void ll_temac_recv(struct net_device *ndev)
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- bdstat = cur_p->app0;
+ bdstat = be32_to_cpu(cur_p->app0);
while ((bdstat & STS_CTRL_APP0_CMPLT)) {
skb = lp->rx_skb[lp->rx_bd_ci];
- length = cur_p->app4 & 0x3FFF;
+ length = be32_to_cpu(cur_p->app4) & 0x3FFF;
- dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
- DMA_FROM_DEVICE);
+ dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
+ XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
@@ -773,7 +833,12 @@ static void ll_temac_recv(struct net_device *ndev)
(skb->protocol == htons(ETH_P_IP)) &&
(skb->len > 64)) {
- skb->csum = cur_p->app3 & 0xFFFF;
+ /* Convert from device endianness (be32) to cpu
+ * endiannes, and if necessary swap the bytes
+ * (back) for proper IP checksum byte order
+ * (be16).
+ */
+ skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
@@ -790,11 +855,12 @@ static void ll_temac_recv(struct net_device *ndev)
return;
}
- cur_p->app0 = STS_CTRL_APP0_IRQONEND;
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- XTE_MAX_JUMBO_FRAME_SIZE,
- DMA_FROM_DEVICE);
- cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
+ cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
+ skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ cur_p->phys = cpu_to_be32(skb_dma_addr);
+ cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
lp->rx_skb[lp->rx_bd_ci] = new_skb;
lp->rx_bd_ci++;
@@ -802,7 +868,7 @@ static void ll_temac_recv(struct net_device *ndev)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- bdstat = cur_p->app0;
+ bdstat = be32_to_cpu(cur_p->app0);
}
lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
@@ -820,8 +886,10 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
if (status & (IRQ_COAL | IRQ_DLY))
temac_start_xmit_done(lp->ndev);
- if (status & 0x080)
- dev_err(&ndev->dev, "DMA error 0x%x\n", status);
+ if (status & (IRQ_ERR | IRQ_DMAERR))
+ dev_err_ratelimited(&ndev->dev,
+ "TX error 0x%x TX_CHNL_STS=0x%08x\n",
+ status, lp->dma_in(lp, TX_CHNL_STS));
return IRQ_HANDLED;
}
@@ -838,6 +906,10 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
if (status & (IRQ_COAL | IRQ_DLY))
ll_temac_recv(lp->ndev);
+ if (status & (IRQ_ERR | IRQ_DMAERR))
+ dev_err_ratelimited(&ndev->dev,
+ "RX error 0x%x RX_CHNL_STS=0x%08x\n",
+ status, lp->dma_in(lp, RX_CHNL_STS));
return IRQ_HANDLED;
}
@@ -857,7 +929,14 @@ static int temac_open(struct net_device *ndev)
dev_err(lp->dev, "of_phy_connect() failed\n");
return -ENODEV;
}
-
+ phy_start(phydev);
+ } else if (strlen(lp->phy_name) > 0) {
+ phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
+ lp->phy_interface);
+ if (IS_ERR(phydev)) {
+ dev_err(lp->dev, "phy_connect() failed\n");
+ return PTR_ERR(phydev);
+ }
phy_start(phydev);
}
@@ -977,22 +1056,25 @@ static const struct ethtool_ops temac_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int temac_of_probe(struct platform_device *op)
+static int temac_probe(struct platform_device *pdev)
{
- struct device_node *np;
+ struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
struct temac_local *lp;
struct net_device *ndev;
+ struct resource *res;
const void *addr;
__be32 *p;
+ bool little_endian;
int rc = 0;
/* Init network device structure */
- ndev = alloc_etherdev(sizeof(*lp));
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
if (!ndev)
return -ENOMEM;
- platform_set_drvdata(op, ndev);
- SET_NETDEV_DEV(ndev, &op->dev);
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->netdev_ops = &temac_netdev_ops;
@@ -1014,89 +1096,196 @@ static int temac_of_probe(struct platform_device *op)
/* setup temac private info structure */
lp = netdev_priv(ndev);
lp->ndev = ndev;
- lp->dev = &op->dev;
+ lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS;
spin_lock_init(&lp->rx_lock);
- mutex_init(&lp->indirect_mutex);
+
+ /* Setup mutex for synchronization of indirect register access */
+ if (pdata) {
+ if (!pdata->indirect_mutex) {
+ dev_err(&pdev->dev,
+ "indirect_mutex missing in platform_data\n");
+ return -EINVAL;
+ }
+ lp->indirect_mutex = pdata->indirect_mutex;
+ } else {
+ lp->indirect_mutex = devm_kmalloc(&pdev->dev,
+ sizeof(*lp->indirect_mutex),
+ GFP_KERNEL);
+ mutex_init(lp->indirect_mutex);
+ }
/* map device registers */
- lp->regs = of_iomap(op->dev.of_node, 0);
- if (!lp->regs) {
- dev_err(&op->dev, "could not map temac regs.\n");
- rc = -ENOMEM;
- goto nodev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(lp->regs)) {
+ dev_err(&pdev->dev, "could not map TEMAC registers\n");
+ return PTR_ERR(lp->regs);
+ }
+
+ /* Select register access functions with the specified
+ * endianness mode. Default for OF devices is big-endian.
+ */
+ little_endian = false;
+ if (temac_np) {
+ if (of_get_property(temac_np, "little-endian", NULL))
+ little_endian = true;
+ } else if (pdata) {
+ little_endian = pdata->reg_little_endian;
+ }
+ if (little_endian) {
+ lp->temac_ior = _temac_ior_le;
+ lp->temac_iow = _temac_iow_le;
+ } else {
+ lp->temac_ior = _temac_ior_be;
+ lp->temac_iow = _temac_iow_be;
}
/* Setup checksum offload, but default to off if not specified */
lp->temac_features = 0;
- p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
- if (p && be32_to_cpu(*p)) {
- lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
+ if (temac_np) {
+ p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
+ if (p && be32_to_cpu(*p))
+ lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
+ p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
+ if (p && be32_to_cpu(*p))
+ lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
+ } else if (pdata) {
+ if (pdata->txcsum)
+ lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
+ if (pdata->rxcsum)
+ lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
+ }
+ if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
- }
- p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
- if (p && be32_to_cpu(*p))
- lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
-
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
- if (!np) {
- dev_err(&op->dev, "could not find DMA node\n");
- rc = -ENODEV;
- goto err_iounmap;
- }
- /* Setup the DMA register accesses, could be DCR or memory mapped */
- if (temac_dcr_setup(lp, op, np)) {
+ /* Setup LocalLink DMA */
+ if (temac_np) {
+ /* Find the DMA node, map the DMA registers, and
+ * decode the DMA IRQs.
+ */
+ dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
+ if (!dma_np) {
+ dev_err(&pdev->dev, "could not find DMA node\n");
+ return -ENODEV;
+ }
- /* no DCR in the device tree, try non-DCR */
- lp->sdma_regs = of_iomap(np, 0);
- if (lp->sdma_regs) {
- lp->dma_in = temac_dma_in32;
- lp->dma_out = temac_dma_out32;
- dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
- } else {
- dev_err(&op->dev, "unable to map DMA registers\n");
- of_node_put(np);
- goto err_iounmap;
+ /* Setup the DMA register accesses, could be DCR or
+ * memory mapped.
+ */
+ if (temac_dcr_setup(lp, pdev, dma_np)) {
+ /* no DCR in the device tree, try non-DCR */
+ lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
+ NULL);
+ if (IS_ERR(lp->sdma_regs)) {
+ dev_err(&pdev->dev,
+ "unable to map DMA registers\n");
+ of_node_put(dma_np);
+ return PTR_ERR(lp->sdma_regs);
+ }
+ if (of_get_property(dma_np, "little-endian", NULL)) {
+ lp->dma_in = temac_dma_in32_le;
+ lp->dma_out = temac_dma_out32_le;
+ } else {
+ lp->dma_in = temac_dma_in32_be;
+ lp->dma_out = temac_dma_out32_be;
+ }
+ dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
}
- }
- lp->rx_irq = irq_of_parse_and_map(np, 0);
- lp->tx_irq = irq_of_parse_and_map(np, 1);
+ /* Get DMA RX and TX interrupts */
+ lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
+ lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
- of_node_put(np); /* Finished with the DMA node; drop the reference */
+ /* Use defaults for IRQ delay/coalescing setup. These
+ * are configuration values, so does not belong in
+ * device-tree.
+ */
+ lp->tx_chnl_ctrl = 0x10220000;
+ lp->rx_chnl_ctrl = 0xff070000;
+
+ /* Finished with the DMA node; drop the reference */
+ of_node_put(dma_np);
+ } else if (pdata) {
+ /* 2nd memory resource specifies DMA registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(lp->sdma_regs)) {
+ dev_err(&pdev->dev,
+ "could not map DMA registers\n");
+ return PTR_ERR(lp->sdma_regs);
+ }
+ if (pdata->dma_little_endian) {
+ lp->dma_in = temac_dma_in32_le;
+ lp->dma_out = temac_dma_out32_le;
+ } else {
+ lp->dma_in = temac_dma_in32_be;
+ lp->dma_out = temac_dma_out32_be;
+ }
- if (!lp->rx_irq || !lp->tx_irq) {
- dev_err(&op->dev, "could not determine irqs\n");
- rc = -ENOMEM;
- goto err_iounmap_2;
+ /* Get DMA RX and TX interrupts */
+ lp->rx_irq = platform_get_irq(pdev, 0);
+ lp->tx_irq = platform_get_irq(pdev, 1);
+
+ /* IRQ delay/coalescing setup */
+ if (pdata->tx_irq_timeout || pdata->tx_irq_count)
+ lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
+ (pdata->tx_irq_count << 16);
+ else
+ lp->tx_chnl_ctrl = 0x10220000;
+ if (pdata->rx_irq_timeout || pdata->rx_irq_count)
+ lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
+ (pdata->rx_irq_count << 16);
+ else
+ lp->rx_chnl_ctrl = 0xff070000;
}
+ /* Error handle returned DMA RX and TX interrupts */
+ if (lp->rx_irq < 0) {
+ if (lp->rx_irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "could not get DMA RX irq\n");
+ return lp->rx_irq;
+ }
+ if (lp->tx_irq < 0) {
+ if (lp->tx_irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "could not get DMA TX irq\n");
+ return lp->tx_irq;
+ }
- /* Retrieve the MAC address */
- addr = of_get_mac_address(op->dev.of_node);
- if (!addr) {
- dev_err(&op->dev, "could not find MAC address\n");
- rc = -ENODEV;
- goto err_iounmap_2;
+ if (temac_np) {
+ /* Retrieve the MAC address */
+ addr = of_get_mac_address(temac_np);
+ if (IS_ERR(addr)) {
+ dev_err(&pdev->dev, "could not find MAC address\n");
+ return -ENODEV;
+ }
+ temac_init_mac_address(ndev, addr);
+ } else if (pdata) {
+ temac_init_mac_address(ndev, pdata->mac_addr);
}
- temac_init_mac_address(ndev, addr);
- rc = temac_mdio_setup(lp, op->dev.of_node);
+ rc = temac_mdio_setup(lp, pdev);
if (rc)
- dev_warn(&op->dev, "error registering MDIO bus\n");
-
- lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
- if (lp->phy_node)
- dev_dbg(lp->dev, "using PHY node %pOF (%p)\n", np, np);
+ dev_warn(&pdev->dev, "error registering MDIO bus\n");
+
+ if (temac_np) {
+ lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
+ if (lp->phy_node)
+ dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
+ } else if (pdata) {
+ snprintf(lp->phy_name, sizeof(lp->phy_name),
+ PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
+ lp->phy_interface = pdata->phy_interface;
+ }
/* Add the device attributes */
rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
if (rc) {
dev_err(lp->dev, "Error creating sysfs files\n");
- goto err_iounmap_2;
+ goto err_sysfs_create;
}
rc = register_netdev(lp->ndev);
@@ -1107,33 +1296,25 @@ static int temac_of_probe(struct platform_device *op)
return 0;
- err_register_ndev:
+err_register_ndev:
sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
- err_iounmap_2:
- if (lp->sdma_regs)
- iounmap(lp->sdma_regs);
- err_iounmap:
- iounmap(lp->regs);
- nodev:
- free_netdev(ndev);
- ndev = NULL;
+err_sysfs_create:
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ temac_mdio_teardown(lp);
return rc;
}
-static int temac_of_remove(struct platform_device *op)
+static int temac_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(op);
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct temac_local *lp = netdev_priv(ndev);
- temac_mdio_teardown(lp);
unregister_netdev(ndev);
sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
- of_node_put(lp->phy_node);
- lp->phy_node = NULL;
- iounmap(lp->regs);
- if (lp->sdma_regs)
- iounmap(lp->sdma_regs);
- free_netdev(ndev);
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ temac_mdio_teardown(lp);
return 0;
}
@@ -1146,16 +1327,16 @@ static const struct of_device_id temac_of_match[] = {
};
MODULE_DEVICE_TABLE(of, temac_of_match);
-static struct platform_driver temac_of_driver = {
- .probe = temac_of_probe,
- .remove = temac_of_remove,
+static struct platform_driver temac_driver = {
+ .probe = temac_probe,
+ .remove = temac_remove,
.driver = {
.name = "xilinx_temac",
.of_match_table = temac_of_match,
},
};
-module_platform_driver(temac_of_driver);
+module_platform_driver(temac_driver);
MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
MODULE_AUTHOR("Yoshio Kashiwagi");
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index f5e83ac6f7e2..a4667326f745 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -14,6 +14,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/of_mdio.h>
+#include <linux/platform_data/xilinx-ll-temac.h>
#include "ll_temac.h"
@@ -28,10 +29,10 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
/* Write the PHY address to the MIIM Access Initiator register.
* When the transfer completes, the PHY register value will appear
* in the LSW0 register */
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET);
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n",
phy_id, reg, rc);
@@ -49,25 +50,34 @@ static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
/* First write the desired value into the write data register
* and then write the address into the access initiator register
*/
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val);
temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
return 0;
}
-int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
+int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
{
+ struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *np = dev_of_node(&pdev->dev);
struct mii_bus *bus;
u32 bus_hz;
int clk_div;
int rc;
struct resource res;
+ /* Get MDIO bus frequency (if specified) */
+ bus_hz = 0;
+ if (np)
+ of_property_read_u32(np, "clock-frequency", &bus_hz);
+ else if (pdata)
+ bus_hz = pdata->mdio_clk_freq;
+
/* Calculate a reasonable divisor for the clock rate */
clk_div = 0x3f; /* worst-case default setting */
- if (of_property_read_u32(np, "clock-frequency", &bus_hz) == 0) {
+ if (bus_hz != 0) {
clk_div = bus_hz / (2500 * 1000 * 2) - 1;
if (clk_div < 1)
clk_div = 1;
@@ -77,17 +87,23 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
/* Enable the MDIO bus by asserting the enable bit and writing
* in the clock config */
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
- bus = mdiobus_alloc();
+ bus = devm_mdiobus_alloc(&pdev->dev);
if (!bus)
return -ENOMEM;
- of_address_to_resource(np, 0, &res);
- snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
- (unsigned long long)res.start);
+ if (np) {
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ } else if (pdata) {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ pdata->mdio_bus_id);
+ }
+
bus->priv = lp;
bus->name = "Xilinx TEMAC MDIO";
bus->read = temac_mdio_read;
@@ -98,23 +114,16 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
rc = of_mdiobus_register(bus, np);
if (rc)
- goto err_register;
+ return rc;
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n",
temac_indirect_in32(lp, XTE_MC_OFFSET));
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
return 0;
-
- err_register:
- mdiobus_free(bus);
- return rc;
}
void temac_mdio_teardown(struct temac_local *lp)
{
mdiobus_unregister(lp->mii_bus);
- mdiobus_free(lp->mii_bus);
- lp->mii_bus = NULL;
}
-
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4041c75997ba..108fbc7f125a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1596,7 +1596,7 @@ static int axienet_probe(struct platform_device *pdev)
/* Retrieve the MAC address */
mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (!mac_addr) {
+ if (IS_ERR(mac_addr)) {
dev_err(&pdev->dev, "could not find MAC address\n");
goto free_netdev;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b03a417d0073..6886270da695 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -17,6 +17,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of_address.h>
@@ -1078,6 +1079,27 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
return (bool)*p;
}
+/**
+ * xemaclite_ethtools_get_drvinfo - Get various Axi Emac Lite driver info
+ * @ndev: Pointer to net_device structure
+ * @ed: Pointer to ethtool_drvinfo structure
+ *
+ * This implements ethtool command for getting the driver information.
+ * Issue "ethtool -i ethX" under linux prompt to execute this function.
+ */
+static void xemaclite_ethtools_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *ed)
+{
+ strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+}
+
+static const struct ethtool_ops xemaclite_ethtool_ops = {
+ .get_drvinfo = xemaclite_ethtools_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
static const struct net_device_ops xemaclite_netdev_ops;
/**
@@ -1143,9 +1165,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
mac_address = of_get_mac_address(ofdev->dev.of_node);
- if (mac_address) {
+ if (!IS_ERR(mac_address)) {
/* Set the MAC address. */
- memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, mac_address);
} else {
dev_warn(dev, "No MAC address found, using random\n");
eth_hw_addr_random(ndev);
@@ -1164,6 +1186,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
ndev->netdev_ops = &xemaclite_netdev_ops;
+ ndev->ethtool_ops = &xemaclite_ethtool_ops;
ndev->flags &= ~IFF_MULTICAST;
ndev->watchdog_timeo = TX_TIMEOUT;
@@ -1229,12 +1252,29 @@ xemaclite_poll_controller(struct net_device *ndev)
}
#endif
+/* Ioctl MII Interface */
+static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ if (!dev->phydev || !netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
+ .ndo_do_ioctl = xemaclite_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xemaclite_poll_controller,
#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index ed6623a9801e..319db3ece263 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -31,14 +31,15 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/net_tstamp.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <mach/ixp46x_ts.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
@@ -1497,6 +1498,15 @@ static struct platform_driver ixp4xx_eth_driver = {
static int __init eth_init_module(void)
{
int err;
+
+ /*
+ * FIXME: we bail out on device tree boot but this really needs
+ * to be fixed in a nicer way: this registers the MDIO bus before
+ * even matching the driver infrastructure, we should only probe
+ * detected hardware.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
if ((err = ixp4xx_mdio_register()))
return err;
return platform_driver_register(&ixp4xx_eth_driver);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5583d993480d..98d1a45c0606 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/hash.h>
+#include <net/ipv6_stubs.h>
#include <net/dst_metadata.h>
#include <net/gro_cells.h>
#include <net/rtnetlink.h>
@@ -22,8 +23,6 @@
#define GENEVE_NETDEV_VER "0.6"
-#define GENEVE_UDP_PORT 6081
-
#define GENEVE_N_VID (1u << 24)
#define GENEVE_VID_MASK (GENEVE_N_VID - 1)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 7a145172d503..eaf4311b4004 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1270,21 +1270,21 @@ static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
static const struct genl_ops gtp_genl_ops[] = {
{
.cmd = GTP_CMD_NEWPDP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_new_pdp,
- .policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_DELPDP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_del_pdp,
- .policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_GETPDP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_get_pdp,
.dumpit = gtp_genl_dump_pdp,
- .policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -1294,6 +1294,7 @@ static struct genl_family gtp_genl_family __ro_after_init = {
.version = 0,
.hdrsize = 0,
.maxattr = GTPA_MAX,
+ .policy = gtp_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = gtp_genl_ops,
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 029206e4da3b..0f7025f3a384 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1298,11 +1298,11 @@ static void rr_dump(struct net_device *dev)
if (rrpriv->tx_skbuff[cons]){
len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
- printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n",
+ printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %p, truesize 0x%x\n",
rrpriv->tx_ring[cons].mode,
rrpriv->tx_ring[cons].size,
(unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
- (unsigned long)rrpriv->tx_skbuff[cons]->data,
+ rrpriv->tx_skbuff[cons]->data,
(unsigned int)rrpriv->tx_skbuff[cons]->truesize);
for (i = 0; i < len; i++){
if (!(i & 7))
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e859ae2e42d5..49f41b64077b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -987,6 +987,7 @@ struct netvsc_device {
wait_queue_head_t wait_drain;
bool destroy;
+ bool tx_disable; /* if true, do not wake up queue again */
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 813d195bbd57..ee198606854d 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
+ net_device->tx_disable = false;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
} else {
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
- if (netif_tx_queue_stopped(txq) &&
+ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
(hv_get_avail_to_write_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
netif_tx_wake_queue(txq);
@@ -874,11 +875,6 @@ static inline int netvsc_send_pkt(
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
ndev_ctx->eth_stats.stop_queue++;
- if (atomic_read(&nvchan->queue_sends) < 1) {
- netif_tx_wake_queue(txq);
- ndev_ctx->eth_stats.wake_queue++;
- ret = -ENOSPC;
- }
} else {
netdev_err(ndev,
"Unable to send packet pages %u len %u, ret %d\n",
@@ -886,6 +882,15 @@ static inline int netvsc_send_pkt(
ret);
}
+ if (netif_tx_queue_stopped(txq) &&
+ atomic_read(&nvchan->queue_sends) < 1 &&
+ !net_device->tx_disable) {
+ netif_tx_wake_queue(txq);
+ ndev_ctx->eth_stats.wake_queue++;
+ if (ret == -EAGAIN)
+ ret = -ENOSPC;
+ }
+
return ret;
}
@@ -964,7 +969,7 @@ int netvsc_send(struct net_device *ndev,
/* Keep aggregating only if stack says more data is coming
* and not doing mixed modes send and not flow blocked
*/
- xmit_more = skb->xmit_more &&
+ xmit_more = netdev_xmit_more() &&
!packet->cp_partial &&
!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf4897043e83..06393b215102 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
rcu_read_unlock();
}
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ nvscdev->tx_disable = false;
+ virt_wmb(); /* ensure queue wake up mechanism is on */
+
+ netif_tx_wake_all_queues(ndev);
+}
+
static int netvsc_open(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
rdev = nvdev->extension;
if (!rdev->link_state) {
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(nvdev, net);
}
if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
}
}
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ if (nvscdev) {
+ nvscdev->tx_disable = true;
+ virt_wmb(); /* ensure txq will not wake up after stop */
+ }
+
+ netif_tx_disable(ndev);
+}
+
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
int ret;
- netif_tx_disable(net);
+ netvsc_tx_disable(nvdev, net);
/* No need to close rndis filter if it is removed already */
if (!nvdev)
@@ -308,7 +328,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev,
* If a valid queue has already been assigned, then use that.
* Otherwise compute tx queue based on hash and the send table.
*
- * This is basically similar to default (__netdev_pick_tx) with the added step
+ * This is basically similar to default (netdev_pick_tx) with the added step
* of using the host send_table when no other queue has been assigned.
*
* TODO support XPS - but get_xps_queue not exported
@@ -331,8 +351,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev;
@@ -344,10 +363,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
if (vf_ops->ndo_select_queue)
- txq = vf_ops->ndo_select_queue(vf_netdev, skb,
- sb_dev, fallback);
+ txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
else
- txq = fallback(vf_netdev, skb, NULL);
+ txq = netdev_pick_tx(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
@@ -920,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev,
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
- netif_tx_disable(ndev);
+ netvsc_tx_disable(nvdev, ndev);
ret = rndis_filter_close(nvdev);
if (ret) {
@@ -1908,7 +1926,7 @@ static void netvsc_link_change(struct work_struct *w)
if (rdev->link_state) {
rdev->link_state = false;
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(net_device, net);
} else {
notify = true;
}
@@ -1918,7 +1936,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
}
kfree(event);
break;
@@ -1927,7 +1945,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
event->event = RNDIS_STATUS_MEDIA_CONNECT;
spin_lock_irqsave(&ndev_ctx->lock, flags);
list_add(&event->list, &ndev_ctx->reconfig_events);
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 3b88846de31b..b187ae1a6bd6 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -227,14 +227,16 @@ static int append_radio_msg(struct sk_buff *skb, struct hwsim_phy *phy)
return 0;
}
- nl_edges = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGES);
+ nl_edges = nla_nest_start_noflag(skb,
+ MAC802154_HWSIM_ATTR_RADIO_EDGES);
if (!nl_edges) {
rcu_read_unlock();
return -ENOBUFS;
}
list_for_each_entry_rcu(e, &phy->edges, list) {
- nl_edge = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGE);
+ nl_edge = nla_nest_start_noflag(skb,
+ MAC802154_HWSIM_ATTR_RADIO_EDGE);
if (!nl_edge) {
rcu_read_unlock();
nla_nest_cancel(skb, nl_edges);
@@ -428,9 +430,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
- info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
- hwsim_edge_policy, NULL))
+ if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
return -EINVAL;
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID])
@@ -492,9 +492,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
- info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
- hwsim_edge_policy, NULL))
+ if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
return -EINVAL;
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID])
@@ -542,9 +540,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
- info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
- hwsim_edge_policy, NULL))
+ if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
return -EINVAL;
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
@@ -598,37 +594,37 @@ static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] =
static const struct genl_ops hwsim_nl_ops[] = {
{
.cmd = MAC802154_HWSIM_CMD_NEW_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_new_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_DEL_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_del_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_GET_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_get_radio_nl,
.dumpit = hwsim_dump_radio_nl,
},
{
.cmd = MAC802154_HWSIM_CMD_NEW_EDGE,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_new_edge_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_DEL_EDGE,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_del_edge_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_SET_EDGE,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_set_edge_lqi,
.flags = GENL_UNS_ADMIN_PERM,
},
@@ -638,6 +634,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.name = "MAC802154_HWSIM",
.version = 1,
.maxattr = MAC802154_HWSIM_ATTR_MAX,
+ .policy = hwsim_genl_policy,
.module = THIS_MODULE,
.ops = hwsim_nl_ops,
.n_ops = ARRAY_SIZE(hwsim_nl_ops),
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index c589f5ae75bb..8bb53ec8d9cf 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -533,6 +533,8 @@ mcr20a_start(struct ieee802154_hw *hw)
dev_dbg(printdev(lp), "no slotted operation\n");
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
DAR_PHY_CTRL1_SLOTTED, 0x0);
+ if (ret < 0)
+ return ret;
/* enable irq */
enable_irq(lp->spi->irq);
@@ -540,11 +542,15 @@ mcr20a_start(struct ieee802154_hw *hw)
/* Unmask SEQ interrupt */
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
DAR_PHY_CTRL2_SEQMSK, 0x0);
+ if (ret < 0)
+ return ret;
/* Start the RX sequence */
dev_dbg(printdev(lp), "start the RX sequence\n");
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
+ if (ret < 0)
+ return ret;
return 0;
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2df7f60fe052..857e4bf99883 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -128,21 +128,9 @@ static u32 always_on(struct net_device *dev)
return 1;
}
-static int loopback_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
-{
- ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-
- ts_info->phc_index = -1;
-
- return 0;
-};
-
static const struct ethtool_ops loopback_ethtool_ops = {
.get_link = always_on,
- .get_ts_info = loopback_get_ts_info,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static int loopback_dev_init(struct net_device *dev)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 64a982563d59..009b2902c9d3 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1611,9 +1611,7 @@ static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
if (!attrs[MACSEC_ATTR_SA_CONFIG])
return -EINVAL;
- if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX,
- attrs[MACSEC_ATTR_SA_CONFIG],
- macsec_genl_sa_policy, NULL))
+ if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
return -EINVAL;
return 0;
@@ -1624,9 +1622,7 @@ static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
return -EINVAL;
- if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX,
- attrs[MACSEC_ATTR_RXSC_CONFIG],
- macsec_genl_rxsc_policy, NULL))
+ if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
return -EINVAL;
return 0;
@@ -2175,8 +2171,9 @@ static int copy_tx_sa_stats(struct sk_buff *skb,
return 0;
}
-static int copy_rx_sa_stats(struct sk_buff *skb,
- struct macsec_rx_sa_stats __percpu *pstats)
+static noinline_for_stack int
+copy_rx_sa_stats(struct sk_buff *skb,
+ struct macsec_rx_sa_stats __percpu *pstats)
{
struct macsec_rx_sa_stats sum = {0, };
int cpu;
@@ -2201,8 +2198,8 @@ static int copy_rx_sa_stats(struct sk_buff *skb,
return 0;
}
-static int copy_rx_sc_stats(struct sk_buff *skb,
- struct pcpu_rx_sc_stats __percpu *pstats)
+static noinline_for_stack int
+copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
{
struct macsec_rx_sc_stats sum = {0, };
int cpu;
@@ -2265,8 +2262,8 @@ static int copy_rx_sc_stats(struct sk_buff *skb,
return 0;
}
-static int copy_tx_sc_stats(struct sk_buff *skb,
- struct pcpu_tx_sc_stats __percpu *pstats)
+static noinline_for_stack int
+copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
{
struct macsec_tx_sc_stats sum = {0, };
int cpu;
@@ -2305,8 +2302,8 @@ static int copy_tx_sc_stats(struct sk_buff *skb,
return 0;
}
-static int copy_secy_stats(struct sk_buff *skb,
- struct pcpu_secy_stats __percpu *pstats)
+static noinline_for_stack int
+copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
{
struct macsec_dev_stats sum = {0, };
int cpu;
@@ -2364,7 +2361,8 @@ static int copy_secy_stats(struct sk_buff *skb,
static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
{
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
+ struct nlattr *secy_nest = nla_nest_start_noflag(skb,
+ MACSEC_ATTR_SECY);
u64 csid;
if (!secy_nest)
@@ -2410,8 +2408,9 @@ cancel:
return 1;
}
-static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
- struct sk_buff *skb, struct netlink_callback *cb)
+static noinline_for_stack int
+dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ struct sk_buff *skb, struct netlink_callback *cb)
{
struct macsec_rx_sc *rx_sc;
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
@@ -2433,7 +2432,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (nla_put_secy(secy, skb))
goto nla_put_failure;
- attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS);
+ attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
if (!attr)
goto nla_put_failure;
if (copy_tx_sc_stats(skb, tx_sc->stats)) {
@@ -2442,7 +2441,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
- attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS);
+ attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
if (!attr)
goto nla_put_failure;
if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
@@ -2451,7 +2450,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
- txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST);
+ txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
if (!txsa_list)
goto nla_put_failure;
for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
@@ -2461,7 +2460,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (!tx_sa)
continue;
- txsa_nest = nla_nest_start(skb, j++);
+ txsa_nest = nla_nest_start_noflag(skb, j++);
if (!txsa_nest) {
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
@@ -2476,7 +2475,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
- attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
+ attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
@@ -2494,7 +2493,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, txsa_list);
- rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST);
+ rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
if (!rxsc_list)
goto nla_put_failure;
@@ -2502,7 +2501,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
for_each_rxsc_rtnl(secy, rx_sc) {
int k;
struct nlattr *rxsa_list;
- struct nlattr *rxsc_nest = nla_nest_start(skb, j++);
+ struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
if (!rxsc_nest) {
nla_nest_cancel(skb, rxsc_list);
@@ -2517,7 +2516,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
- attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS);
+ attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
@@ -2531,7 +2530,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
- rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST);
+ rxsa_list = nla_nest_start_noflag(skb,
+ MACSEC_RXSC_ATTR_SA_LIST);
if (!rxsa_list) {
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
@@ -2545,7 +2545,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (!rx_sa)
continue;
- rxsa_nest = nla_nest_start(skb, k++);
+ rxsa_nest = nla_nest_start_noflag(skb, k++);
if (!rxsa_nest) {
nla_nest_cancel(skb, rxsa_list);
nla_nest_cancel(skb, rxsc_nest);
@@ -2553,7 +2553,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
- attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
+ attr = nla_nest_start_noflag(skb,
+ MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, rxsa_list);
nla_nest_cancel(skb, rxsc_nest);
@@ -2636,61 +2637,61 @@ done:
static const struct genl_ops macsec_genl_ops[] = {
{
.cmd = MACSEC_CMD_GET_TXSC,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.dumpit = macsec_dump_txsc,
- .policy = macsec_genl_policy,
},
{
.cmd = MACSEC_CMD_ADD_RXSC,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_add_rxsc,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_DEL_RXSC,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_del_rxsc,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_UPD_RXSC,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_upd_rxsc,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_ADD_TXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_add_txsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_DEL_TXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_del_txsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_UPD_TXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_upd_txsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_ADD_RXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_add_rxsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_DEL_RXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_del_rxsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_UPD_RXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_upd_rxsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -2700,6 +2701,7 @@ static struct genl_family macsec_fam __ro_after_init = {
.hdrsize = 0,
.version = MACSEC_GENL_VERSION,
.maxattr = MACSEC_ATTR_MAX,
+ .policy = macsec_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = macsec_genl_ops,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0c0f105657d3..92efa93649f0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -24,6 +24,7 @@
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
@@ -34,6 +35,7 @@
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <linux/netpoll.h>
+#include <linux/phy.h>
#define MACVLAN_HASH_BITS 8
#define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS)
@@ -822,6 +824,32 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct net_device *real_dev = macvlan_dev_real_dev(dev);
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ struct ifreq ifrr;
+ int err = -EOPNOTSUPP;
+
+ strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ ifrr.ifr_ifru = ifr->ifr_ifru;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (!net_eq(dev_net(dev), &init_net))
+ break;
+ case SIOCGHWTSTAMP:
+ if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
+ err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+ break;
+ }
+
+ if (!err)
+ ifr->ifr_ifru = ifrr.ifr_ifru;
+
+ return err;
+}
+
/*
* macvlan network devices have devices nesting below it and are a special
* "super class" of normal network devices; split their locks off into a
@@ -1020,6 +1048,26 @@ static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
return __ethtool_get_link_ksettings(vlan->lowerdev, cmd);
}
+static int macvlan_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct net_device *real_dev = macvlan_dev_real_dev(dev);
+ const struct ethtool_ops *ops = real_dev->ethtool_ops;
+ struct phy_device *phydev = real_dev->phydev;
+
+ if (phydev && phydev->drv && phydev->drv->ts_info) {
+ return phydev->drv->ts_info(phydev, info);
+ } else if (ops->get_ts_info) {
+ return ops->get_ts_info(real_dev, info);
+ } else {
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ }
+
+ return 0;
+}
+
static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1094,6 +1142,7 @@ static const struct ethtool_ops macvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = macvlan_ethtool_get_link_ksettings,
.get_drvinfo = macvlan_ethtool_get_drvinfo,
+ .get_ts_info = macvlan_ethtool_get_ts_info,
};
static const struct net_device_ops macvlan_netdev_ops = {
@@ -1103,6 +1152,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_stop = macvlan_stop,
.ndo_start_xmit = macvlan_start_xmit,
.ndo_change_mtu = macvlan_change_mtu,
+ .ndo_do_ioctl = macvlan_do_ioctl,
.ndo_fix_features = macvlan_fix_features,
.ndo_change_rx_flags = macvlan_change_rx_flags,
.ndo_set_mac_address = macvlan_set_mac_address,
@@ -1576,7 +1626,7 @@ static int macvlan_fill_info(struct sk_buff *skb,
if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count))
goto nla_put_failure;
if (vlan->macaddr_count > 0) {
- nest = nla_nest_start(skb, IFLA_MACVLAN_MACADDR_DATA);
+ nest = nla_nest_start_noflag(skb, IFLA_MACVLAN_MACADDR_DATA);
if (nest == NULL)
goto nla_put_failure;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index ed1166adaa2f..b16a1221d19b 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -115,8 +115,7 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
static u16 net_failover_select_queue(struct net_device *dev,
struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct net_failover_info *nfo_info = netdev_priv(dev);
struct net_device *primary_dev;
@@ -127,10 +126,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
const struct net_device_ops *ops = primary_dev->netdev_ops;
if (ops->ndo_select_queue)
- txq = ops->ndo_select_queue(primary_dev, skb,
- sb_dev, fallback);
+ txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
else
- txq = fallback(primary_dev, skb, NULL);
+ txq = netdev_pick_tx(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 0fee1d06c084..09f1315d2f2a 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,17 +3,13 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o \
+ netdev.o dev.o fib.o bus.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
bpf.o
endif
-ifneq ($(CONFIG_NET_DEVLINK),)
-netdevsim-objs += devlink.o fib.o
-endif
-
ifneq ($(CONFIG_XFRM_OFFLOAD),)
netdevsim-objs += ipsec.o
endif
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index f92c43453ec6..2b74425822ab 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -27,7 +27,7 @@
bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
struct nsim_bpf_bound_prog {
- struct netdevsim *ns;
+ struct nsim_dev *nsim_dev;
struct bpf_prog *prog;
struct dentry *ddir;
const char *state;
@@ -65,8 +65,8 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
struct nsim_bpf_bound_prog *state;
state = env->prog->aux->offload->dev_priv;
- if (state->ns->bpf_bind_verifier_delay && !insn_idx)
- msleep(state->ns->bpf_bind_verifier_delay);
+ if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
+ msleep(state->nsim_dev->bpf_bind_verifier_delay);
if (insn_idx == env->prog->len - 1)
pr_vlog(env, "Hello from netdevsim!\n");
@@ -213,7 +213,8 @@ nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
return 0;
}
-static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
+static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
+ struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
char name[16];
@@ -222,13 +223,13 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
if (!state)
return -ENOMEM;
- state->ns = ns;
+ state->nsim_dev = nsim_dev;
state->prog = prog;
state->state = "verify";
/* Program id is not populated yet when we create the state. */
- sprintf(name, "%u", ns->sdev->prog_id_gen++);
- state->ddir = debugfs_create_dir(name, ns->sdev->ddir_bpf_bound_progs);
+ sprintf(name, "%u", nsim_dev->prog_id_gen++);
+ state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs);
if (IS_ERR_OR_NULL(state->ddir)) {
kfree(state);
return -ENOMEM;
@@ -239,7 +240,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
&state->state, &nsim_bpf_string_fops);
debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
- list_add_tail(&state->l, &ns->sdev->bpf_bound_progs);
+ list_add_tail(&state->l, &nsim_dev->bpf_bound_progs);
prog->aux->offload->dev_priv = state;
@@ -248,12 +249,13 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
{
- struct netdevsim *ns = bpf_offload_dev_priv(prog->aux->offload->offdev);
+ struct nsim_dev *nsim_dev =
+ bpf_offload_dev_priv(prog->aux->offload->offdev);
- if (!ns->bpf_bind_accept)
+ if (!nsim_dev->bpf_bind_accept)
return -EOPNOTSUPP;
- return nsim_bpf_create_prog(ns, prog);
+ return nsim_bpf_create_prog(nsim_dev, prog);
}
static int nsim_bpf_translate(struct bpf_prog *prog)
@@ -512,7 +514,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
}
offmap->dev_ops = &nsim_bpf_map_ops;
- list_add_tail(&nmap->l, &ns->sdev->bpf_bound_maps);
+ list_add_tail(&nmap->l, &ns->nsim_dev->bpf_bound_maps);
return 0;
@@ -576,61 +578,68 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
}
}
-int nsim_bpf_init(struct netdevsim *ns)
+int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
{
int err;
- if (ns->sdev->refcnt == 1) {
- INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs);
- INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps);
+ INIT_LIST_HEAD(&nsim_dev->bpf_bound_progs);
+ INIT_LIST_HEAD(&nsim_dev->bpf_bound_maps);
- ns->sdev->ddir_bpf_bound_progs =
- debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir);
- if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
- return -ENOMEM;
+ nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs",
+ nsim_dev->ddir);
+ if (IS_ERR_OR_NULL(nsim_dev->ddir_bpf_bound_progs))
+ return -ENOMEM;
- ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops,
- ns);
- err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
- if (err)
- return err;
- }
+ nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev);
+ err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev);
+ if (err)
+ return err;
+
+ nsim_dev->bpf_bind_accept = true;
+ debugfs_create_bool("bpf_bind_accept", 0600, nsim_dev->ddir,
+ &nsim_dev->bpf_bind_accept);
+ debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
+ &nsim_dev->bpf_bind_verifier_delay);
+ return 0;
+}
+
+void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev)
+{
+ WARN_ON(!list_empty(&nsim_dev->bpf_bound_progs));
+ WARN_ON(!list_empty(&nsim_dev->bpf_bound_maps));
+ bpf_offload_dev_destroy(nsim_dev->bpf_dev);
+}
+
+int nsim_bpf_init(struct netdevsim *ns)
+{
+ struct dentry *ddir = ns->nsim_dev_port->ddir;
+ int err;
- err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev);
+ err = bpf_offload_dev_netdev_register(ns->nsim_dev->bpf_dev,
+ ns->netdev);
if (err)
- goto err_destroy_bdev;
+ return err;
- debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
+ debugfs_create_u32("bpf_offloaded_id", 0400, ddir,
&ns->bpf_offloaded_id);
- ns->bpf_bind_accept = true;
- debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir,
- &ns->bpf_bind_accept);
- debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir,
- &ns->bpf_bind_verifier_delay);
-
ns->bpf_tc_accept = true;
- debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_tc_accept", 0600, ddir,
&ns->bpf_tc_accept);
- debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ddir,
&ns->bpf_tc_non_bound_accept);
ns->bpf_xdpdrv_accept = true;
- debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_xdpdrv_accept", 0600, ddir,
&ns->bpf_xdpdrv_accept);
ns->bpf_xdpoffload_accept = true;
- debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_xdpoffload_accept", 0600, ddir,
&ns->bpf_xdpoffload_accept);
ns->bpf_map_accept = true;
- debugfs_create_bool("bpf_map_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_map_accept", 0600, ddir,
&ns->bpf_map_accept);
return 0;
-
-err_destroy_bdev:
- if (ns->sdev->refcnt == 1)
- bpf_offload_dev_destroy(ns->sdev->bpf_dev);
- return err;
}
void nsim_bpf_uninit(struct netdevsim *ns)
@@ -638,11 +647,5 @@ void nsim_bpf_uninit(struct netdevsim *ns)
WARN_ON(ns->xdp.prog);
WARN_ON(ns->xdp_hw.prog);
WARN_ON(ns->bpf_offloaded);
- bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev);
-
- if (ns->sdev->refcnt == 1) {
- WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs));
- WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps));
- bpf_offload_dev_destroy(ns->sdev->bpf_dev);
- }
+ bpf_offload_dev_netdev_unregister(ns->nsim_dev->bpf_dev, ns->netdev);
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
new file mode 100644
index 000000000000..1a0ff3d7747b
--- /dev/null
+++ b/drivers/net/netdevsim/bus.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2019 Mellanox Technologies. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "netdevsim.h"
+
+static DEFINE_IDA(nsim_bus_dev_ids);
+static LIST_HEAD(nsim_bus_dev_list);
+static DEFINE_MUTEX(nsim_bus_dev_list_lock);
+
+static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
+{
+ return container_of(dev, struct nsim_bus_dev, dev);
+}
+
+static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int num_vfs)
+{
+ nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
+ sizeof(struct nsim_vf_config),
+ GFP_KERNEL);
+ if (!nsim_bus_dev->vfconfigs)
+ return -ENOMEM;
+ nsim_bus_dev->num_vfs = num_vfs;
+
+ return 0;
+}
+
+static void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev)
+{
+ kfree(nsim_bus_dev->vfconfigs);
+ nsim_bus_dev->vfconfigs = NULL;
+ nsim_bus_dev->num_vfs = 0;
+}
+
+static ssize_t
+nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ unsigned int num_vfs;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &num_vfs);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ if (nsim_bus_dev->num_vfs == num_vfs)
+ goto exit_good;
+ if (nsim_bus_dev->num_vfs && num_vfs) {
+ ret = -EBUSY;
+ goto exit_unlock;
+ }
+
+ if (num_vfs) {
+ ret = nsim_bus_dev_vfs_enable(nsim_bus_dev, num_vfs);
+ if (ret)
+ goto exit_unlock;
+ } else {
+ nsim_bus_dev_vfs_disable(nsim_bus_dev);
+ }
+exit_good:
+ ret = count;
+exit_unlock:
+ rtnl_unlock();
+
+ return ret;
+}
+
+static ssize_t
+nsim_bus_dev_numvfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ return sprintf(buf, "%u\n", nsim_bus_dev->num_vfs);
+}
+
+static struct device_attribute nsim_bus_dev_numvfs_attr =
+ __ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show,
+ nsim_bus_dev_numvfs_store);
+
+static ssize_t
+new_port_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ unsigned int port_index;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &port_index);
+ if (ret)
+ return ret;
+ ret = nsim_dev_port_add(nsim_bus_dev, port_index);
+ return ret ? ret : count;
+}
+
+static struct device_attribute nsim_bus_dev_new_port_attr = __ATTR_WO(new_port);
+
+static ssize_t
+del_port_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ unsigned int port_index;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &port_index);
+ if (ret)
+ return ret;
+ ret = nsim_dev_port_del(nsim_bus_dev, port_index);
+ return ret ? ret : count;
+}
+
+static struct device_attribute nsim_bus_dev_del_port_attr = __ATTR_WO(del_port);
+
+static struct attribute *nsim_bus_dev_attrs[] = {
+ &nsim_bus_dev_numvfs_attr.attr,
+ &nsim_bus_dev_new_port_attr.attr,
+ &nsim_bus_dev_del_port_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group nsim_bus_dev_attr_group = {
+ .attrs = nsim_bus_dev_attrs,
+};
+
+static const struct attribute_group *nsim_bus_dev_attr_groups[] = {
+ &nsim_bus_dev_attr_group,
+ NULL,
+};
+
+static void nsim_bus_dev_release(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ nsim_bus_dev_vfs_disable(nsim_bus_dev);
+}
+
+static struct device_type nsim_bus_dev_type = {
+ .groups = nsim_bus_dev_attr_groups,
+ .release = nsim_bus_dev_release,
+};
+
+static struct nsim_bus_dev *
+nsim_bus_dev_new(unsigned int id, unsigned int port_count);
+
+static ssize_t
+new_device_store(struct bus_type *bus, const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev;
+ unsigned int port_count;
+ unsigned int id;
+ int err;
+
+ err = sscanf(buf, "%u %u", &id, &port_count);
+ switch (err) {
+ case 1:
+ port_count = 1;
+ /* fall through */
+ case 2:
+ if (id > INT_MAX) {
+ pr_err("Value of \"id\" is too big.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
+ return -EINVAL;
+ }
+ nsim_bus_dev = nsim_bus_dev_new(id, port_count);
+ if (IS_ERR(nsim_bus_dev))
+ return PTR_ERR(nsim_bus_dev);
+
+ mutex_lock(&nsim_bus_dev_list_lock);
+ list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
+ mutex_unlock(&nsim_bus_dev_list_lock);
+
+ return count;
+}
+static BUS_ATTR_WO(new_device);
+
+static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev);
+
+static ssize_t
+del_device_store(struct bus_type *bus, const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev, *tmp;
+ unsigned int id;
+ int err;
+
+ err = sscanf(buf, "%u", &id);
+ switch (err) {
+ case 1:
+ if (id > INT_MAX) {
+ pr_err("Value of \"id\" is too big.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("Format for deleting device is \"id\" (uint).\n");
+ return -EINVAL;
+ }
+
+ err = -ENOENT;
+ mutex_lock(&nsim_bus_dev_list_lock);
+ list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
+ if (nsim_bus_dev->dev.id != id)
+ continue;
+ list_del(&nsim_bus_dev->list);
+ nsim_bus_dev_del(nsim_bus_dev);
+ err = 0;
+ break;
+ }
+ mutex_unlock(&nsim_bus_dev_list_lock);
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(del_device);
+
+static struct attribute *nsim_bus_attrs[] = {
+ &bus_attr_new_device.attr,
+ &bus_attr_del_device.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(nsim_bus);
+
+static int nsim_bus_probe(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ return nsim_dev_probe(nsim_bus_dev);
+}
+
+static int nsim_bus_remove(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ nsim_dev_remove(nsim_bus_dev);
+ return 0;
+}
+
+static int nsim_num_vf(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ return nsim_bus_dev->num_vfs;
+}
+
+static struct bus_type nsim_bus = {
+ .name = DRV_NAME,
+ .dev_name = DRV_NAME,
+ .bus_groups = nsim_bus_groups,
+ .probe = nsim_bus_probe,
+ .remove = nsim_bus_remove,
+ .num_vf = nsim_num_vf,
+};
+
+static struct nsim_bus_dev *
+nsim_bus_dev_new(unsigned int id, unsigned int port_count)
+{
+ struct nsim_bus_dev *nsim_bus_dev;
+ int err;
+
+ nsim_bus_dev = kzalloc(sizeof(*nsim_bus_dev), GFP_KERNEL);
+ if (!nsim_bus_dev)
+ return ERR_PTR(-ENOMEM);
+
+ err = ida_alloc_range(&nsim_bus_dev_ids, id, id, GFP_KERNEL);
+ if (err < 0)
+ goto err_nsim_bus_dev_free;
+ nsim_bus_dev->dev.id = err;
+ nsim_bus_dev->dev.bus = &nsim_bus;
+ nsim_bus_dev->dev.type = &nsim_bus_dev_type;
+ nsim_bus_dev->port_count = port_count;
+
+ err = device_register(&nsim_bus_dev->dev);
+ if (err)
+ goto err_nsim_bus_dev_id_free;
+ return nsim_bus_dev;
+
+err_nsim_bus_dev_id_free:
+ ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+err_nsim_bus_dev_free:
+ kfree(nsim_bus_dev);
+ return ERR_PTR(err);
+}
+
+static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
+{
+ device_unregister(&nsim_bus_dev->dev);
+ ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+ kfree(nsim_bus_dev);
+}
+
+static struct device_driver nsim_driver = {
+ .name = DRV_NAME,
+ .bus = &nsim_bus,
+ .owner = THIS_MODULE,
+};
+
+int nsim_bus_init(void)
+{
+ int err;
+
+ err = bus_register(&nsim_bus);
+ if (err)
+ return err;
+ err = driver_register(&nsim_driver);
+ if (err)
+ goto err_bus_unregister;
+ return 0;
+
+err_bus_unregister:
+ bus_unregister(&nsim_bus);
+ return err;
+}
+
+void nsim_bus_exit(void)
+{
+ struct nsim_bus_dev *nsim_bus_dev, *tmp;
+
+ mutex_lock(&nsim_bus_dev_list_lock);
+ list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
+ list_del(&nsim_bus_dev->list);
+ nsim_bus_dev_del(nsim_bus_dev);
+ }
+ mutex_unlock(&nsim_bus_dev_list_lock);
+ driver_unregister(&nsim_driver);
+ bus_unregister(&nsim_bus);
+}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
new file mode 100644
index 000000000000..b509b941d5ca
--- /dev/null
+++ b/drivers/net/netdevsim/dev.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2018 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com>
+ * Copyright (c) 2019 Mellanox Technologies. All rights reserved.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/rtnetlink.h>
+#include <net/devlink.h>
+
+#include "netdevsim.h"
+
+static struct dentry *nsim_dev_ddir;
+
+static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
+{
+ char dev_ddir_name[16];
+
+ sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
+ nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
+ if (IS_ERR_OR_NULL(nsim_dev->ddir))
+ return PTR_ERR_OR_ZERO(nsim_dev->ddir) ?: -EINVAL;
+ nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
+ if (IS_ERR_OR_NULL(nsim_dev->ports_ddir))
+ return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
+ return 0;
+}
+
+static void nsim_dev_debugfs_exit(struct nsim_dev *nsim_dev)
+{
+ debugfs_remove_recursive(nsim_dev->ports_ddir);
+ debugfs_remove_recursive(nsim_dev->ddir);
+}
+
+static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
+ struct nsim_dev_port *nsim_dev_port)
+{
+ char port_ddir_name[16];
+ char dev_link_name[32];
+
+ sprintf(port_ddir_name, "%u", nsim_dev_port->port_index);
+ nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
+ nsim_dev->ports_ddir);
+ if (IS_ERR_OR_NULL(nsim_dev_port->ddir))
+ return -ENOMEM;
+
+ sprintf(dev_link_name, "../../../" DRV_NAME "%u",
+ nsim_dev->nsim_bus_dev->dev.id);
+ debugfs_create_symlink("dev", nsim_dev_port->ddir, dev_link_name);
+
+ return 0;
+}
+
+static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
+{
+ debugfs_remove_recursive(nsim_dev_port->ddir);
+}
+
+static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
+{
+ struct nsim_dev *nsim_dev = priv;
+
+ return nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV4_FIB, false);
+}
+
+static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
+{
+ struct nsim_dev *nsim_dev = priv;
+
+ return nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV4_FIB_RULES, false);
+}
+
+static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
+{
+ struct nsim_dev *nsim_dev = priv;
+
+ return nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV6_FIB, false);
+}
+
+static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
+{
+ struct nsim_dev *nsim_dev = priv;
+
+ return nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV6_FIB_RULES, false);
+}
+
+static int nsim_dev_resources_register(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct devlink_resource_size_params params = {
+ .size_max = (u64)-1,
+ .size_granularity = 1,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY
+ };
+ int err;
+ u64 n;
+
+ /* Resources for IPv4 */
+ err = devlink_resource_register(devlink, "IPv4", (u64)-1,
+ NSIM_RESOURCE_IPV4,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
+ if (err) {
+ pr_err("Failed to register IPv4 top resource\n");
+ goto out;
+ }
+
+ n = nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV4_FIB, true);
+ err = devlink_resource_register(devlink, "fib", n,
+ NSIM_RESOURCE_IPV4_FIB,
+ NSIM_RESOURCE_IPV4, &params);
+ if (err) {
+ pr_err("Failed to register IPv4 FIB resource\n");
+ return err;
+ }
+
+ n = nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV4_FIB_RULES, true);
+ err = devlink_resource_register(devlink, "fib-rules", n,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV4, &params);
+ if (err) {
+ pr_err("Failed to register IPv4 FIB rules resource\n");
+ return err;
+ }
+
+ /* Resources for IPv6 */
+ err = devlink_resource_register(devlink, "IPv6", (u64)-1,
+ NSIM_RESOURCE_IPV6,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
+ if (err) {
+ pr_err("Failed to register IPv6 top resource\n");
+ goto out;
+ }
+
+ n = nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV6_FIB, true);
+ err = devlink_resource_register(devlink, "fib", n,
+ NSIM_RESOURCE_IPV6_FIB,
+ NSIM_RESOURCE_IPV6, &params);
+ if (err) {
+ pr_err("Failed to register IPv6 FIB resource\n");
+ return err;
+ }
+
+ n = nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV6_FIB_RULES, true);
+ err = devlink_resource_register(devlink, "fib-rules", n,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_IPV6, &params);
+ if (err) {
+ pr_err("Failed to register IPv6 FIB rules resource\n");
+ return err;
+ }
+
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_dev_ipv4_fib_resource_occ_get,
+ nsim_dev);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_dev_ipv4_fib_rules_res_occ_get,
+ nsim_dev);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_dev_ipv6_fib_resource_occ_get,
+ nsim_dev);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_dev_ipv6_fib_rules_res_occ_get,
+ nsim_dev);
+out:
+ return err;
+}
+
+static int nsim_dev_reload(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ enum nsim_resource_id res_ids[] = {
+ NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
+ int err;
+ u64 val;
+
+ err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ if (!err) {
+ err = nsim_fib_set_max(nsim_dev->fib_data,
+ res_ids[i], val, extack);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops nsim_dev_devlink_ops = {
+ .reload = nsim_dev_reload,
+};
+
+static struct nsim_dev *
+nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
+{
+ struct nsim_dev *nsim_dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+ nsim_dev = devlink_priv(devlink);
+ nsim_dev->nsim_bus_dev = nsim_bus_dev;
+ nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
+ get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+
+ nsim_dev->fib_data = nsim_fib_create();
+ if (IS_ERR(nsim_dev->fib_data)) {
+ err = PTR_ERR(nsim_dev->fib_data);
+ goto err_devlink_free;
+ }
+
+ err = nsim_dev_resources_register(devlink);
+ if (err)
+ goto err_fib_destroy;
+
+ err = devlink_register(devlink, &nsim_bus_dev->dev);
+ if (err)
+ goto err_resources_unregister;
+
+ err = nsim_dev_debugfs_init(nsim_dev);
+ if (err)
+ goto err_dl_unregister;
+
+ err = nsim_bpf_dev_init(nsim_dev);
+ if (err)
+ goto err_debugfs_exit;
+
+ return nsim_dev;
+
+err_debugfs_exit:
+ nsim_dev_debugfs_exit(nsim_dev);
+err_dl_unregister:
+ devlink_unregister(devlink);
+err_resources_unregister:
+ devlink_resources_unregister(devlink, NULL);
+err_fib_destroy:
+ nsim_fib_destroy(nsim_dev->fib_data);
+err_devlink_free:
+ devlink_free(devlink);
+ return ERR_PTR(err);
+}
+
+static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
+{
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+
+ nsim_bpf_dev_exit(nsim_dev);
+ nsim_dev_debugfs_exit(nsim_dev);
+ devlink_unregister(devlink);
+ devlink_resources_unregister(devlink, NULL);
+ nsim_fib_destroy(nsim_dev->fib_data);
+ mutex_destroy(&nsim_dev->port_list_lock);
+ devlink_free(devlink);
+}
+
+static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
+ unsigned int port_index)
+{
+ struct nsim_dev_port *nsim_dev_port;
+ struct devlink_port *devlink_port;
+ int err;
+
+ nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL);
+ if (!nsim_dev_port)
+ return -ENOMEM;
+ nsim_dev_port->port_index = port_index;
+
+ devlink_port = &nsim_dev_port->devlink_port;
+ devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_index + 1, 0, 0,
+ nsim_dev->switch_id.id,
+ nsim_dev->switch_id.id_len);
+ err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
+ port_index);
+ if (err)
+ goto err_port_free;
+
+ err = nsim_dev_port_debugfs_init(nsim_dev, nsim_dev_port);
+ if (err)
+ goto err_dl_port_unregister;
+
+ nsim_dev_port->ns = nsim_create(nsim_dev, nsim_dev_port);
+ if (IS_ERR(nsim_dev_port->ns)) {
+ err = PTR_ERR(nsim_dev_port->ns);
+ goto err_port_debugfs_exit;
+ }
+
+ devlink_port_type_eth_set(devlink_port, nsim_dev_port->ns->netdev);
+ list_add(&nsim_dev_port->list, &nsim_dev->port_list);
+
+ return 0;
+
+err_port_debugfs_exit:
+ nsim_dev_port_debugfs_exit(nsim_dev_port);
+err_dl_port_unregister:
+ devlink_port_unregister(devlink_port);
+err_port_free:
+ kfree(nsim_dev_port);
+ return err;
+}
+
+static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
+{
+ struct devlink_port *devlink_port = &nsim_dev_port->devlink_port;
+
+ list_del(&nsim_dev_port->list);
+ devlink_port_type_clear(devlink_port);
+ nsim_destroy(nsim_dev_port->ns);
+ nsim_dev_port_debugfs_exit(nsim_dev_port);
+ devlink_port_unregister(devlink_port);
+ kfree(nsim_dev_port);
+}
+
+static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_port *nsim_dev_port, *tmp;
+
+ list_for_each_entry_safe(nsim_dev_port, tmp,
+ &nsim_dev->port_list, list)
+ __nsim_dev_port_del(nsim_dev_port);
+}
+
+int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+{
+ struct nsim_dev *nsim_dev;
+ int i;
+ int err;
+
+ nsim_dev = nsim_dev_create(nsim_bus_dev, nsim_bus_dev->port_count);
+ if (IS_ERR(nsim_dev))
+ return PTR_ERR(nsim_dev);
+ dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+
+ for (i = 0; i < nsim_bus_dev->port_count; i++) {
+ err = __nsim_dev_port_add(nsim_dev, i);
+ if (err)
+ goto err_port_del_all;
+ }
+ return 0;
+
+err_port_del_all:
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_destroy(nsim_dev);
+ return err;
+}
+
+void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_destroy(nsim_dev);
+}
+
+static struct nsim_dev_port *
+__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, unsigned int port_index)
+{
+ struct nsim_dev_port *nsim_dev_port;
+
+ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list)
+ if (nsim_dev_port->port_index == port_index)
+ return nsim_dev_port;
+ return NULL;
+}
+
+int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ int err;
+
+ mutex_lock(&nsim_dev->port_list_lock);
+ if (__nsim_dev_port_lookup(nsim_dev, port_index))
+ err = -EEXIST;
+ else
+ err = __nsim_dev_port_add(nsim_dev, port_index);
+ mutex_unlock(&nsim_dev->port_list_lock);
+ return err;
+}
+
+int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ struct nsim_dev_port *nsim_dev_port;
+ int err = 0;
+
+ mutex_lock(&nsim_dev->port_list_lock);
+ nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, port_index);
+ if (!nsim_dev_port)
+ err = -ENOENT;
+ else
+ __nsim_dev_port_del(nsim_dev_port);
+ mutex_unlock(&nsim_dev->port_list_lock);
+ return err;
+}
+
+int nsim_dev_init(void)
+{
+ nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
+ if (IS_ERR_OR_NULL(nsim_dev_ddir))
+ return -ENOMEM;
+ return 0;
+}
+
+void nsim_dev_exit(void)
+{
+ debugfs_remove_recursive(nsim_dev_ddir);
+}
diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c
deleted file mode 100644
index 5135fc371f01..000000000000
--- a/drivers/net/netdevsim/devlink.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (c) 2018 Cumulus Networks. All rights reserved.
- * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com>
- *
- * This software is licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree.
- *
- * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
- * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
- * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
- * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
- * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
- */
-
-#include <linux/device.h>
-#include <net/devlink.h>
-#include <net/netns/generic.h>
-
-#include "netdevsim.h"
-
-static unsigned int nsim_devlink_id;
-
-/* place holder until devlink and namespaces is sorted out */
-static struct net *nsim_devlink_net(struct devlink *devlink)
-{
- return &init_net;
-}
-
-/* IPv4
- */
-static u64 nsim_ipv4_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
-}
-
-static u64 nsim_ipv4_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
-}
-
-/* IPv6
- */
-static u64 nsim_ipv6_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
-}
-
-static u64 nsim_ipv6_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
-}
-
-static int devlink_resources_register(struct devlink *devlink)
-{
- struct devlink_resource_size_params params = {
- .size_max = (u64)-1,
- .size_granularity = 1,
- .unit = DEVLINK_RESOURCE_UNIT_ENTRY
- };
- struct net *net = nsim_devlink_net(devlink);
- int err;
- u64 n;
-
- /* Resources for IPv4 */
- err = devlink_resource_register(devlink, "IPv4", (u64)-1,
- NSIM_RESOURCE_IPV4,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
- if (err) {
- pr_err("Failed to register IPv4 top resource\n");
- goto out;
- }
-
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
- NSIM_RESOURCE_IPV4_FIB,
- NSIM_RESOURCE_IPV4, &params);
- if (err) {
- pr_err("Failed to register IPv4 FIB resource\n");
- return err;
- }
-
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV4, &params);
- if (err) {
- pr_err("Failed to register IPv4 FIB rules resource\n");
- return err;
- }
-
- /* Resources for IPv6 */
- err = devlink_resource_register(devlink, "IPv6", (u64)-1,
- NSIM_RESOURCE_IPV6,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
- if (err) {
- pr_err("Failed to register IPv6 top resource\n");
- goto out;
- }
-
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
- NSIM_RESOURCE_IPV6_FIB,
- NSIM_RESOURCE_IPV6, &params);
- if (err) {
- pr_err("Failed to register IPv6 FIB resource\n");
- return err;
- }
-
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- NSIM_RESOURCE_IPV6, &params);
- if (err) {
- pr_err("Failed to register IPv6 FIB rules resource\n");
- return err;
- }
-
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB,
- nsim_ipv4_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- nsim_ipv4_fib_rules_res_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB,
- nsim_ipv6_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- nsim_ipv6_fib_rules_res_occ_get,
- net);
-out:
- return err;
-}
-
-static int nsim_devlink_reload(struct devlink *devlink,
- struct netlink_ext_ack *extack)
-{
- enum nsim_resource_id res_ids[] = {
- NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
- };
- struct net *net = nsim_devlink_net(devlink);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
- int err;
- u64 val;
-
- err = devlink_resource_size_get(devlink, res_ids[i], &val);
- if (!err) {
- err = nsim_fib_set_max(net, res_ids[i], val, extack);
- if (err)
- return err;
- }
- }
-
- return 0;
-}
-
-static void nsim_devlink_net_reset(struct net *net)
-{
- enum nsim_resource_id res_ids[] = {
- NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
- if (nsim_fib_set_max(net, res_ids[i], (u64)-1, NULL)) {
- pr_err("Failed to reset limit for resource %u\n",
- res_ids[i]);
- }
- }
-}
-
-static const struct devlink_ops nsim_devlink_ops = {
- .reload = nsim_devlink_reload,
-};
-
-/* once devlink / namespace issues are sorted out
- * this needs to be net in which a devlink instance
- * is to be created. e.g., dev_net(ns->netdev)
- */
-static struct net *nsim_to_net(struct netdevsim *ns)
-{
- return &init_net;
-}
-
-void nsim_devlink_teardown(struct netdevsim *ns)
-{
- if (ns->devlink) {
- struct net *net = nsim_to_net(ns);
- bool *reg_devlink = net_generic(net, nsim_devlink_id);
-
- devlink_resources_unregister(ns->devlink, NULL);
- devlink_unregister(ns->devlink);
- devlink_free(ns->devlink);
- ns->devlink = NULL;
-
- nsim_devlink_net_reset(net);
- *reg_devlink = true;
- }
-}
-
-int nsim_devlink_setup(struct netdevsim *ns)
-{
- struct net *net = nsim_to_net(ns);
- bool *reg_devlink = net_generic(net, nsim_devlink_id);
- struct devlink *devlink;
- int err;
-
- /* only one device per namespace controls devlink */
- if (!*reg_devlink) {
- ns->devlink = NULL;
- return 0;
- }
-
- devlink = devlink_alloc(&nsim_devlink_ops, 0);
- if (!devlink)
- return -ENOMEM;
-
- err = devlink_register(devlink, &ns->dev);
- if (err)
- goto err_devlink_free;
-
- err = devlink_resources_register(devlink);
- if (err)
- goto err_dl_unregister;
-
- ns->devlink = devlink;
-
- *reg_devlink = false;
-
- return 0;
-
-err_dl_unregister:
- devlink_unregister(devlink);
-err_devlink_free:
- devlink_free(devlink);
-
- return err;
-}
-
-/* Initialize per network namespace state */
-static int __net_init nsim_devlink_netns_init(struct net *net)
-{
- bool *reg_devlink = net_generic(net, nsim_devlink_id);
-
- *reg_devlink = true;
-
- return 0;
-}
-
-static struct pernet_operations nsim_devlink_net_ops = {
- .init = nsim_devlink_netns_init,
- .id = &nsim_devlink_id,
- .size = sizeof(bool),
-};
-
-void nsim_devlink_exit(void)
-{
- unregister_pernet_subsys(&nsim_devlink_net_ops);
- nsim_fib_exit();
-}
-
-int nsim_devlink_init(void)
-{
- int err;
-
- err = nsim_fib_init();
- if (err)
- goto err_out;
-
- err = register_pernet_subsys(&nsim_devlink_net_ops);
- if (err)
- nsim_fib_exit();
-
-err_out:
- return err;
-}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index f61d094746c0..8c57ba747772 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,7 +18,6 @@
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
-#include <net/netns/generic.h>
#include "netdevsim.h"
@@ -33,15 +32,14 @@ struct nsim_per_fib_data {
};
struct nsim_fib_data {
+ struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
};
-static unsigned int nsim_fib_net_id;
-
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
switch (res_id) {
@@ -64,10 +62,10 @@ u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
return max ? entry->max : entry->num;
}
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
+int nsim_fib_set_max(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, u64 val,
struct netlink_ext_ack *extack)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
int err = 0;
@@ -120,9 +118,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_rule_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -157,9 +155,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -178,18 +176,22 @@ static int nsim_fib_event(struct fib_notifier_info *info, bool add)
static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
struct fib_notifier_info *info = ptr;
int err = 0;
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
+ err = nsim_fib_rule_event(data, info,
+ event == FIB_EVENT_RULE_ADD);
break;
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(data, info,
+ event == FIB_EVENT_ENTRY_ADD);
break;
}
@@ -199,30 +201,23 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
- struct nsim_fib_data *data;
- struct net *net;
-
- rcu_read_lock();
- for_each_net_rcu(net) {
- data = net_generic(net, nsim_fib_net_id);
-
- data->ipv4.fib.num = 0ULL;
- data->ipv4.rules.num = 0ULL;
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
- data->ipv6.fib.num = 0ULL;
- data->ipv6.rules.num = 0ULL;
- }
- rcu_read_unlock();
+ data->ipv4.fib.num = 0ULL;
+ data->ipv4.rules.num = 0ULL;
+ data->ipv6.fib.num = 0ULL;
+ data->ipv6.rules.num = 0ULL;
}
-static struct notifier_block nsim_fib_nb = {
- .notifier_call = nsim_fib_event_nb,
-};
-
-/* Initialize per network namespace state */
-static int __net_init nsim_fib_netns_init(struct net *net)
+struct nsim_fib_data *nsim_fib_create(void)
{
- struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
+ struct nsim_fib_data *data;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
data->ipv4.fib.max = (u64)-1;
data->ipv4.rules.max = (u64)-1;
@@ -230,37 +225,22 @@ static int __net_init nsim_fib_netns_init(struct net *net)
data->ipv6.fib.max = (u64)-1;
data->ipv6.rules.max = (u64)-1;
- return 0;
-}
-
-static struct pernet_operations nsim_fib_net_ops = {
- .init = nsim_fib_netns_init,
- .id = &nsim_fib_net_id,
- .size = sizeof(struct nsim_fib_data),
-};
-
-void nsim_fib_exit(void)
-{
- unregister_pernet_subsys(&nsim_fib_net_ops);
- unregister_fib_notifier(&nsim_fib_nb);
-}
-
-int nsim_fib_init(void)
-{
- int err;
-
- err = register_pernet_subsys(&nsim_fib_net_ops);
- if (err < 0) {
- pr_err("Failed to register pernet subsystem\n");
- goto err_out;
- }
-
- err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
- if (err < 0) {
+ data->fib_nb.notifier_call = nsim_fib_event_nb;
+ err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent);
+ if (err) {
pr_err("Failed to register fib notifier\n");
goto err_out;
}
+ return data;
+
err_out:
- return err;
+ kfree(data);
+ return ERR_PTR(err);
+}
+
+void nsim_fib_destroy(struct nsim_fib_data *data)
+{
+ unregister_fib_notifier(&data->fib_nb);
+ kfree(data);
}
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index 76e11d889bb6..e27fc1a4516d 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -283,7 +283,8 @@ void nsim_ipsec_init(struct netdevsim *ns)
ns->netdev->features |= NSIM_ESP_FEATURES;
ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES;
- ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns,
+ ns->ipsec.pfile = debugfs_create_file("ipsec", 0400,
+ ns->nsim_dev_port->ddir, ns,
&ipsec_dbg_fops);
}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 75a50b59cb8f..e5c8aa08e1cd 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -25,230 +25,6 @@
#include "netdevsim.h"
-struct nsim_vf_config {
- int link_state;
- u16 min_tx_rate;
- u16 max_tx_rate;
- u16 vlan;
- __be16 vlan_proto;
- u16 qos;
- u8 vf_mac[ETH_ALEN];
- bool spoofchk_enabled;
- bool trusted;
- bool rss_query_enabled;
-};
-
-static u32 nsim_dev_id;
-
-static struct dentry *nsim_ddir;
-static struct dentry *nsim_sdev_ddir;
-
-static int nsim_num_vf(struct device *dev)
-{
- struct netdevsim *ns = to_nsim(dev);
-
- return ns->num_vfs;
-}
-
-static struct bus_type nsim_bus = {
- .name = DRV_NAME,
- .dev_name = DRV_NAME,
- .num_vf = nsim_num_vf,
-};
-
-static int nsim_vfs_enable(struct netdevsim *ns, unsigned int num_vfs)
-{
- ns->vfconfigs = kcalloc(num_vfs, sizeof(struct nsim_vf_config),
- GFP_KERNEL);
- if (!ns->vfconfigs)
- return -ENOMEM;
- ns->num_vfs = num_vfs;
-
- return 0;
-}
-
-static void nsim_vfs_disable(struct netdevsim *ns)
-{
- kfree(ns->vfconfigs);
- ns->vfconfigs = NULL;
- ns->num_vfs = 0;
-}
-
-static ssize_t
-nsim_numvfs_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct netdevsim *ns = to_nsim(dev);
- unsigned int num_vfs;
- int ret;
-
- ret = kstrtouint(buf, 0, &num_vfs);
- if (ret)
- return ret;
-
- rtnl_lock();
- if (ns->num_vfs == num_vfs)
- goto exit_good;
- if (ns->num_vfs && num_vfs) {
- ret = -EBUSY;
- goto exit_unlock;
- }
-
- if (num_vfs) {
- ret = nsim_vfs_enable(ns, num_vfs);
- if (ret)
- goto exit_unlock;
- } else {
- nsim_vfs_disable(ns);
- }
-exit_good:
- ret = count;
-exit_unlock:
- rtnl_unlock();
-
- return ret;
-}
-
-static ssize_t
-nsim_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct netdevsim *ns = to_nsim(dev);
-
- return sprintf(buf, "%u\n", ns->num_vfs);
-}
-
-static struct device_attribute nsim_numvfs_attr =
- __ATTR(sriov_numvfs, 0664, nsim_numvfs_show, nsim_numvfs_store);
-
-static struct attribute *nsim_dev_attrs[] = {
- &nsim_numvfs_attr.attr,
- NULL,
-};
-
-static const struct attribute_group nsim_dev_attr_group = {
- .attrs = nsim_dev_attrs,
-};
-
-static const struct attribute_group *nsim_dev_attr_groups[] = {
- &nsim_dev_attr_group,
- NULL,
-};
-
-static void nsim_dev_release(struct device *dev)
-{
- struct netdevsim *ns = to_nsim(dev);
-
- nsim_vfs_disable(ns);
- free_netdev(ns->netdev);
-}
-
-static struct device_type nsim_dev_type = {
- .groups = nsim_dev_attr_groups,
- .release = nsim_dev_release,
-};
-
-static int nsim_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct netdevsim *ns = netdev_priv(dev);
-
- ppid->id_len = sizeof(ns->sdev->switch_id);
- memcpy(&ppid->id, &ns->sdev->switch_id, ppid->id_len);
- return 0;
-}
-
-static int nsim_init(struct net_device *dev)
-{
- char sdev_ddir_name[10], sdev_link_name[32];
- struct netdevsim *ns = netdev_priv(dev);
- int err;
-
- ns->netdev = dev;
- ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir);
- if (IS_ERR_OR_NULL(ns->ddir))
- return -ENOMEM;
-
- if (!ns->sdev) {
- ns->sdev = kzalloc(sizeof(*ns->sdev), GFP_KERNEL);
- if (!ns->sdev) {
- err = -ENOMEM;
- goto err_debugfs_destroy;
- }
- ns->sdev->refcnt = 1;
- ns->sdev->switch_id = nsim_dev_id;
- sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
- ns->sdev->ddir = debugfs_create_dir(sdev_ddir_name,
- nsim_sdev_ddir);
- if (IS_ERR_OR_NULL(ns->sdev->ddir)) {
- err = PTR_ERR_OR_ZERO(ns->sdev->ddir) ?: -EINVAL;
- goto err_sdev_free;
- }
- } else {
- sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
- ns->sdev->refcnt++;
- }
-
- sprintf(sdev_link_name, "../../" DRV_NAME "_sdev/%s", sdev_ddir_name);
- debugfs_create_symlink("sdev", ns->ddir, sdev_link_name);
-
- err = nsim_bpf_init(ns);
- if (err)
- goto err_sdev_destroy;
-
- ns->dev.id = nsim_dev_id++;
- ns->dev.bus = &nsim_bus;
- ns->dev.type = &nsim_dev_type;
- err = device_register(&ns->dev);
- if (err)
- goto err_bpf_uninit;
-
- SET_NETDEV_DEV(dev, &ns->dev);
-
- err = nsim_devlink_setup(ns);
- if (err)
- goto err_unreg_dev;
-
- nsim_ipsec_init(ns);
-
- return 0;
-
-err_unreg_dev:
- device_unregister(&ns->dev);
-err_bpf_uninit:
- nsim_bpf_uninit(ns);
-err_sdev_destroy:
- if (!--ns->sdev->refcnt) {
- debugfs_remove_recursive(ns->sdev->ddir);
-err_sdev_free:
- kfree(ns->sdev);
- }
-err_debugfs_destroy:
- debugfs_remove_recursive(ns->ddir);
- return err;
-}
-
-static void nsim_uninit(struct net_device *dev)
-{
- struct netdevsim *ns = netdev_priv(dev);
-
- nsim_ipsec_teardown(ns);
- nsim_devlink_teardown(ns);
- debugfs_remove_recursive(ns->ddir);
- nsim_bpf_uninit(ns);
- if (!--ns->sdev->refcnt) {
- debugfs_remove_recursive(ns->sdev->ddir);
- kfree(ns->sdev);
- }
-}
-
-static void nsim_free(struct net_device *dev)
-{
- struct netdevsim *ns = netdev_priv(dev);
-
- device_unregister(&ns->dev);
- /* netdev and vf state will be freed out of device_release() */
-}
-
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
@@ -325,11 +101,12 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
/* Only refuse multicast addresses, zero address can mean unset/any. */
- if (vf >= ns->num_vfs || is_multicast_ether_addr(mac))
+ if (vf >= nsim_bus_dev->num_vfs || is_multicast_ether_addr(mac))
return -EINVAL;
- memcpy(ns->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
+ memcpy(nsim_bus_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
return 0;
}
@@ -338,13 +115,14 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf,
u16 vlan, u8 qos, __be16 vlan_proto)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs || vlan > 4095 || qos > 7)
+ if (vf >= nsim_bus_dev->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
- ns->vfconfigs[vf].vlan = vlan;
- ns->vfconfigs[vf].qos = qos;
- ns->vfconfigs[vf].vlan_proto = vlan_proto;
+ nsim_bus_dev->vfconfigs[vf].vlan = vlan;
+ nsim_bus_dev->vfconfigs[vf].qos = qos;
+ nsim_bus_dev->vfconfigs[vf].vlan_proto = vlan_proto;
return 0;
}
@@ -352,12 +130,13 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf,
static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
- ns->vfconfigs[vf].min_tx_rate = min;
- ns->vfconfigs[vf].max_tx_rate = max;
+ nsim_bus_dev->vfconfigs[vf].min_tx_rate = min;
+ nsim_bus_dev->vfconfigs[vf].max_tx_rate = max;
return 0;
}
@@ -365,10 +144,11 @@ static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
- ns->vfconfigs[vf].spoofchk_enabled = val;
+ nsim_bus_dev->vfconfigs[vf].spoofchk_enabled = val;
return 0;
}
@@ -376,10 +156,11 @@ static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
- ns->vfconfigs[vf].rss_query_enabled = val;
+ nsim_bus_dev->vfconfigs[vf].rss_query_enabled = val;
return 0;
}
@@ -387,10 +168,11 @@ static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
- ns->vfconfigs[vf].trusted = val;
+ nsim_bus_dev->vfconfigs[vf].trusted = val;
return 0;
}
@@ -399,21 +181,22 @@ static int
nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
ivi->vf = vf;
- ivi->linkstate = ns->vfconfigs[vf].link_state;
- ivi->min_tx_rate = ns->vfconfigs[vf].min_tx_rate;
- ivi->max_tx_rate = ns->vfconfigs[vf].max_tx_rate;
- ivi->vlan = ns->vfconfigs[vf].vlan;
- ivi->vlan_proto = ns->vfconfigs[vf].vlan_proto;
- ivi->qos = ns->vfconfigs[vf].qos;
- memcpy(&ivi->mac, ns->vfconfigs[vf].vf_mac, ETH_ALEN);
- ivi->spoofchk = ns->vfconfigs[vf].spoofchk_enabled;
- ivi->trusted = ns->vfconfigs[vf].trusted;
- ivi->rss_query_en = ns->vfconfigs[vf].rss_query_enabled;
+ ivi->linkstate = nsim_bus_dev->vfconfigs[vf].link_state;
+ ivi->min_tx_rate = nsim_bus_dev->vfconfigs[vf].min_tx_rate;
+ ivi->max_tx_rate = nsim_bus_dev->vfconfigs[vf].max_tx_rate;
+ ivi->vlan = nsim_bus_dev->vfconfigs[vf].vlan;
+ ivi->vlan_proto = nsim_bus_dev->vfconfigs[vf].vlan_proto;
+ ivi->qos = nsim_bus_dev->vfconfigs[vf].qos;
+ memcpy(&ivi->mac, nsim_bus_dev->vfconfigs[vf].vf_mac, ETH_ALEN);
+ ivi->spoofchk = nsim_bus_dev->vfconfigs[vf].spoofchk_enabled;
+ ivi->trusted = nsim_bus_dev->vfconfigs[vf].trusted;
+ ivi->rss_query_en = nsim_bus_dev->vfconfigs[vf].rss_query_enabled;
return 0;
}
@@ -421,8 +204,9 @@ nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
switch (state) {
@@ -434,7 +218,7 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
return -EINVAL;
}
- ns->vfconfigs[vf].link_state = state;
+ nsim_bus_dev->vfconfigs[vf].link_state = state;
return 0;
}
@@ -461,9 +245,14 @@ nsim_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
+static struct devlink_port *nsim_get_devlink_port(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ return &ns->nsim_dev_port->devlink_port;
+}
+
static const struct net_device_ops nsim_netdev_ops = {
- .ndo_init = nsim_init,
- .ndo_uninit = nsim_uninit,
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
@@ -481,7 +270,7 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_bpf = nsim_bpf,
- .ndo_get_port_parent_id = nsim_get_port_parent_id,
+ .ndo_get_devlink_port = nsim_get_devlink_port,
};
static void nsim_setup(struct net_device *dev)
@@ -489,9 +278,6 @@ static void nsim_setup(struct net_device *dev)
ether_setup(dev);
eth_hw_addr_random(dev);
- dev->netdev_ops = &nsim_netdev_ops;
- dev->priv_destructor = nsim_free;
-
dev->tx_queue_len = 0;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
@@ -506,104 +292,102 @@ static void nsim_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
}
-static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
+struct netdevsim *
+nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
{
- if (tb[IFLA_ADDRESS]) {
- if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
- return -EINVAL;
- if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
- return -EADDRNOTAVAIL;
- }
- return 0;
+ struct net_device *dev;
+ struct netdevsim *ns;
+ int err;
+
+ dev = alloc_netdev(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ ns = netdev_priv(dev);
+ ns->netdev = dev;
+ ns->nsim_dev = nsim_dev;
+ ns->nsim_dev_port = nsim_dev_port;
+ ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
+ dev->netdev_ops = &nsim_netdev_ops;
+
+ rtnl_lock();
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_free_netdev;
+
+ nsim_ipsec_init(ns);
+
+ err = register_netdevice(dev);
+ if (err)
+ goto err_ipsec_teardown;
+ rtnl_unlock();
+
+ return ns;
+
+err_ipsec_teardown:
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+ rtnl_unlock();
+err_free_netdev:
+ free_netdev(dev);
+ return ERR_PTR(err);
}
-static int nsim_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
+void nsim_destroy(struct netdevsim *ns)
{
- struct netdevsim *ns = netdev_priv(dev);
-
- if (tb[IFLA_LINK]) {
- struct net_device *joindev;
- struct netdevsim *joinns;
-
- joindev = __dev_get_by_index(src_net,
- nla_get_u32(tb[IFLA_LINK]));
- if (!joindev)
- return -ENODEV;
- if (joindev->netdev_ops != &nsim_netdev_ops)
- return -EINVAL;
-
- joinns = netdev_priv(joindev);
- if (!joinns->sdev || !joinns->sdev->refcnt)
- return -EINVAL;
- ns->sdev = joinns->sdev;
- }
+ struct net_device *dev = ns->netdev;
- return register_netdevice(dev);
+ rtnl_lock();
+ unregister_netdevice(dev);
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+ rtnl_unlock();
+ free_netdev(dev);
}
-static void nsim_dellink(struct net_device *dev, struct list_head *head)
+static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
- unregister_netdevice_queue(dev, head);
+ NL_SET_ERR_MSG_MOD(extack, "Please use: echo \"[ID] [PORT_COUNT]\" > /sys/bus/netdevsim/new_device");
+ return -EOPNOTSUPP;
}
static struct rtnl_link_ops nsim_link_ops __read_mostly = {
.kind = DRV_NAME,
- .priv_size = sizeof(struct netdevsim),
- .setup = nsim_setup,
.validate = nsim_validate,
- .newlink = nsim_newlink,
- .dellink = nsim_dellink,
};
static int __init nsim_module_init(void)
{
int err;
- nsim_ddir = debugfs_create_dir(DRV_NAME, NULL);
- if (IS_ERR_OR_NULL(nsim_ddir))
- return -ENOMEM;
-
- nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
- if (IS_ERR_OR_NULL(nsim_sdev_ddir)) {
- err = -ENOMEM;
- goto err_debugfs_destroy;
- }
-
- err = bus_register(&nsim_bus);
+ err = nsim_dev_init();
if (err)
- goto err_sdir_destroy;
+ return err;
- err = nsim_devlink_init();
+ err = nsim_bus_init();
if (err)
- goto err_unreg_bus;
+ goto err_dev_exit;
err = rtnl_link_register(&nsim_link_ops);
if (err)
- goto err_dl_fini;
+ goto err_bus_exit;
return 0;
-err_dl_fini:
- nsim_devlink_exit();
-err_unreg_bus:
- bus_unregister(&nsim_bus);
-err_sdir_destroy:
- debugfs_remove_recursive(nsim_sdev_ddir);
-err_debugfs_destroy:
- debugfs_remove_recursive(nsim_ddir);
+err_bus_exit:
+ nsim_bus_exit();
+err_dev_exit:
+ nsim_dev_exit();
return err;
}
static void __exit nsim_module_exit(void)
{
rtnl_link_unregister(&nsim_link_ops);
- nsim_devlink_exit();
- bus_unregister(&nsim_bus);
- debugfs_remove_recursive(nsim_sdev_ddir);
- debugfs_remove_recursive(nsim_ddir);
+ nsim_bus_exit();
+ nsim_dev_exit();
}
module_init(nsim_module_init);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 384c254fafc5..3f398797c2bc 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
+#include <net/devlink.h>
#include <net/xdp.h>
#define DRV_NAME "netdevsim"
@@ -26,26 +27,6 @@
#define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg)
-struct bpf_prog;
-struct bpf_offload_dev;
-struct dentry;
-struct nsim_vf_config;
-
-struct netdevsim_shared_dev {
- unsigned int refcnt;
- u32 switch_id;
-
- struct dentry *ddir;
-
- struct bpf_offload_dev *bpf_dev;
-
- struct dentry *ddir_bpf_bound_progs;
- u32 prog_id_gen;
-
- struct list_head bpf_bound_progs;
- struct list_head bpf_bound_maps;
-};
-
#define NSIM_IPSEC_MAX_SA_COUNT 33
#define NSIM_IPSEC_VALID BIT(31)
@@ -69,18 +50,14 @@ struct nsim_ipsec {
struct netdevsim {
struct net_device *netdev;
+ struct nsim_dev *nsim_dev;
+ struct nsim_dev_port *nsim_dev_port;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
- struct device dev;
- struct netdevsim_shared_dev *sdev;
-
- struct dentry *ddir;
-
- unsigned int num_vfs;
- struct nsim_vf_config *vfconfigs;
+ struct nsim_bus_dev *nsim_bus_dev;
struct bpf_prog *bpf_offloaded;
u32 bpf_offloaded_id;
@@ -88,22 +65,22 @@ struct netdevsim {
struct xdp_attachment_info xdp;
struct xdp_attachment_info xdp_hw;
- bool bpf_bind_accept;
- u32 bpf_bind_verifier_delay;
-
bool bpf_tc_accept;
bool bpf_tc_non_bound_accept;
bool bpf_xdpdrv_accept;
bool bpf_xdpoffload_accept;
bool bpf_map_accept;
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
- struct devlink *devlink;
-#endif
struct nsim_ipsec ipsec;
};
+struct netdevsim *
+nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
+void nsim_destroy(struct netdevsim *ns);
+
#ifdef CONFIG_BPF_SYSCALL
+int nsim_bpf_dev_init(struct nsim_dev *nsim_dev);
+void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev);
int nsim_bpf_init(struct netdevsim *ns);
void nsim_bpf_uninit(struct netdevsim *ns);
int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf);
@@ -111,6 +88,15 @@ int nsim_bpf_disable_tc(struct netdevsim *ns);
int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv);
#else
+
+static inline int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
+{
+ return 0;
+}
+
+static inline void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev)
+{
+}
static inline int nsim_bpf_init(struct netdevsim *ns)
{
return 0;
@@ -138,7 +124,6 @@ nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
#endif
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
enum nsim_resource_id {
NSIM_RESOURCE_NONE, /* DEVLINK_RESOURCE_ID_PARENT_TOP */
NSIM_RESOURCE_IPV4,
@@ -149,36 +134,47 @@ enum nsim_resource_id {
NSIM_RESOURCE_IPV6_FIB_RULES,
};
-int nsim_devlink_setup(struct netdevsim *ns);
-void nsim_devlink_teardown(struct netdevsim *ns);
+struct nsim_dev_port {
+ struct list_head list;
+ struct devlink_port devlink_port;
+ unsigned int port_index;
+ struct dentry *ddir;
+ struct netdevsim *ns;
+};
-int nsim_devlink_init(void);
-void nsim_devlink_exit(void);
+struct nsim_dev {
+ struct nsim_bus_dev *nsim_bus_dev;
+ struct nsim_fib_data *fib_data;
+ struct dentry *ddir;
+ struct dentry *ports_ddir;
+ struct bpf_offload_dev *bpf_dev;
+ bool bpf_bind_accept;
+ u32 bpf_bind_verifier_delay;
+ struct dentry *ddir_bpf_bound_progs;
+ u32 prog_id_gen;
+ struct list_head bpf_bound_progs;
+ struct list_head bpf_bound_maps;
+ struct netdev_phys_item_id switch_id;
+ struct list_head port_list;
+ struct mutex port_list_lock; /* protects port list */
+};
-int nsim_fib_init(void);
-void nsim_fib_exit(void);
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
+int nsim_dev_init(void);
+void nsim_dev_exit(void);
+int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev);
+void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev);
+int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index);
+int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index);
+
+struct nsim_fib_data *nsim_fib_create(void);
+void nsim_fib_destroy(struct nsim_fib_data *fib_data);
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max);
+int nsim_fib_set_max(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, u64 val,
struct netlink_ext_ack *extack);
-#else
-static inline int nsim_devlink_setup(struct netdevsim *ns)
-{
- return 0;
-}
-
-static inline void nsim_devlink_teardown(struct netdevsim *ns)
-{
-}
-
-static inline int nsim_devlink_init(void)
-{
- return 0;
-}
-
-static inline void nsim_devlink_exit(void)
-{
-}
-#endif
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
void nsim_ipsec_init(struct netdevsim *ns);
@@ -199,7 +195,26 @@ static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
}
#endif
-static inline struct netdevsim *to_nsim(struct device *ptr)
-{
- return container_of(ptr, struct netdevsim, dev);
-}
+struct nsim_vf_config {
+ int link_state;
+ u16 min_tx_rate;
+ u16 max_tx_rate;
+ u16 vlan;
+ __be16 vlan_proto;
+ u16 qos;
+ u8 vf_mac[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool trusted;
+ bool rss_query_enabled;
+};
+
+struct nsim_bus_dev {
+ struct device dev;
+ struct list_head list;
+ unsigned int port_count;
+ unsigned int num_vfs;
+ struct nsim_vf_config *vfconfigs;
+};
+
+int nsim_bus_init(void);
+void nsim_bus_exit(void);
diff --git a/drivers/net/netdevsim/sdev.c b/drivers/net/netdevsim/sdev.c
new file mode 100644
index 000000000000..6712da3340d6
--- /dev/null
+++ b/drivers/net/netdevsim/sdev.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "netdevsim.h"
+
+static struct dentry *nsim_sdev_ddir;
+
+static u32 nsim_sdev_id;
+
+struct netdevsim_shared_dev *nsim_sdev_get(struct netdevsim *joinns)
+{
+ struct netdevsim_shared_dev *sdev;
+ char sdev_ddir_name[10];
+ int err;
+
+ if (joinns) {
+ if (WARN_ON(!joinns->sdev))
+ return ERR_PTR(-EINVAL);
+ sdev = joinns->sdev;
+ sdev->refcnt++;
+ return sdev;
+ }
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return ERR_PTR(-ENOMEM);
+ sdev->refcnt = 1;
+ sdev->switch_id = nsim_sdev_id++;
+
+ sprintf(sdev_ddir_name, "%u", sdev->switch_id);
+ sdev->ddir = debugfs_create_dir(sdev_ddir_name, nsim_sdev_ddir);
+ if (IS_ERR_OR_NULL(sdev->ddir)) {
+ err = PTR_ERR_OR_ZERO(sdev->ddir) ?: -EINVAL;
+ goto err_sdev_free;
+ }
+
+ return sdev;
+
+err_sdev_free:
+ nsim_sdev_id--;
+ kfree(sdev);
+ return ERR_PTR(err);
+}
+
+void nsim_sdev_put(struct netdevsim_shared_dev *sdev)
+{
+ if (--sdev->refcnt)
+ return;
+ debugfs_remove_recursive(sdev->ddir);
+ kfree(sdev);
+}
+
+int nsim_sdev_init(void)
+{
+ nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
+ if (IS_ERR_OR_NULL(nsim_sdev_ddir))
+ return -ENOMEM;
+ return 0;
+}
+
+void nsim_sdev_exit(void)
+{
+ debugfs_remove_recursive(nsim_sdev_ddir);
+}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 520657945b82..d6299710d634 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -76,6 +76,17 @@ config MDIO_BUS_MUX_GPIO
several child MDIO busses to a parent bus. Child bus
selection is under the control of GPIO lines.
+config MDIO_BUS_MUX_MESON_G12A
+ tristate "Amlogic G12a based MDIO bus multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
+ select MDIO_BUS_MUX
+ default m if ARCH_MESON
+ help
+ This module provides a driver for the MDIO multiplexer/glue of
+ the amlogic g12a SoC. The multiplexers connects either the external
+ or the internal MDIO bus to the parent bus.
+
config MDIO_BUS_MUX_MMIOREG
tristate "MMIO device-controlled MDIO bus multiplexers"
depends on OF_MDIO && HAS_IOMEM
@@ -273,13 +284,13 @@ config BCM87XX_PHY
Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs.
config BCM_CYGNUS_PHY
- tristate "Broadcom Cygnus SoC internal PHY"
- depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ tristate "Broadcom Cygnus/Omega SoC internal PHY"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on MDIO_BCM_IPROC
select BCM_NET_PHYLIB
---help---
This PHY driver is for the 1G internal PHYs of the Broadcom
- Cygnus Family SoC.
+ Cygnus and Omega Family SoC.
Currently supports internal PHY's used in the BCM11300,
BCM11320, BCM11350, BCM11360, BCM58300, BCM58302,
@@ -397,7 +408,7 @@ config MICROCHIP_T1_PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
---help---
- Currently supports VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
+ Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index ece5dae67174..27d7f9f3b0de 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 65b4b0960b1e..eef35f8c8d45 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -60,7 +60,7 @@ static struct phy_driver am79c_driver[] = { {
.phy_id = PHY_ID_AM79C874,
.name = "AM79C874",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = am79c_config_init,
.ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 37218e5d7cc9..eed4fe3d871f 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/bitfield.h>
#include <linux/phy.h>
#include "aquantia.h"
@@ -22,20 +23,33 @@
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10
+
#define MDIO_AN_VEND_PROV 0xc400
#define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
+#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
+#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
+#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
#define MDIO_AN_TX_VEND_STATUS1 0xc800
-#define MDIO_AN_TX_VEND_STATUS1_10BASET (0x0 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_100BASETX (0x1 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_1000BASET (0x2 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_10GBASET (0x3 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_2500BASET (0x4 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_5000BASET (0x5 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK (0x7 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK GENMASK(3, 1)
+#define MDIO_AN_TX_VEND_STATUS1_10BASET 0
+#define MDIO_AN_TX_VEND_STATUS1_100BASETX 1
+#define MDIO_AN_TX_VEND_STATUS1_1000BASET 2
+#define MDIO_AN_TX_VEND_STATUS1_10GBASET 3
+#define MDIO_AN_TX_VEND_STATUS1_2500BASET 4
+#define MDIO_AN_TX_VEND_STATUS1_5000BASET 5
#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX BIT(0)
+#define MDIO_AN_TX_VEND_INT_STATUS1 0xcc00
+#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT BIT(1)
+
#define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01
#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
@@ -44,8 +58,42 @@
#define MDIO_AN_RX_LP_STAT1 0xe820
#define MDIO_AN_RX_LP_STAT1_1000BASET_FULL BIT(15)
#define MDIO_AN_RX_LP_STAT1_1000BASET_HALF BIT(14)
+#define MDIO_AN_RX_LP_STAT1_SHORT_REACH BIT(13)
+#define MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT BIT(12)
+#define MDIO_AN_RX_LP_STAT1_AQ_PHY BIT(2)
+
+#define MDIO_AN_RX_LP_STAT4 0xe823
+#define MDIO_AN_RX_LP_STAT4_FW_MAJOR GENMASK(15, 8)
+#define MDIO_AN_RX_LP_STAT4_FW_MINOR GENMASK(7, 0)
+
+#define MDIO_AN_RX_VEND_STAT3 0xe832
+#define MDIO_AN_RX_VEND_STAT3_AFR BIT(0)
+
+/* MDIO_MMD_C22EXT */
+#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292
+#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294
+#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297
+#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313
+#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315
+#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317
+#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318
+#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319
+#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a
+#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b
/* Vendor specific 1, MDIO_MMD_VEND1 */
+#define VEND1_GLOBAL_FW_ID 0x0020
+#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
+#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+
+#define VEND1_GLOBAL_RSVD_STAT1 0xc885
+#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
+#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
+
+#define VEND1_GLOBAL_RSVD_STAT9 0xc88d
+#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0)
+#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23
+
#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
@@ -72,6 +120,88 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+struct aqr107_hw_stat {
+ const char *name;
+ int reg;
+ int size;
+};
+
+#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
+static const struct aqr107_hw_stat aqr107_hw_stats[] = {
+ SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16),
+ SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22),
+};
+#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+
+struct aqr107_priv {
+ u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+};
+
+static int aqr107_get_sset_count(struct phy_device *phydev)
+{
+ return AQR107_SGMII_STAT_SZ;
+}
+
+static void aqr107_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < AQR107_SGMII_STAT_SZ; i++)
+ strscpy(data + i * ETH_GSTRING_LEN, aqr107_hw_stats[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static u64 aqr107_get_stat(struct phy_device *phydev, int index)
+{
+ const struct aqr107_hw_stat *stat = aqr107_hw_stats + index;
+ int len_l = min(stat->size, 16);
+ int len_h = stat->size - len_l;
+ u64 ret;
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg);
+ if (val < 0)
+ return U64_MAX;
+
+ ret = val & GENMASK(len_l - 1, 0);
+ if (len_h) {
+ val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1);
+ if (val < 0)
+ return U64_MAX;
+
+ ret += (val & GENMASK(len_h - 1, 0)) << 16;
+ }
+
+ return ret;
+}
+
+static void aqr107_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct aqr107_priv *priv = phydev->priv;
+ u64 val;
+ int i;
+
+ for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) {
+ val = aqr107_get_stat(phydev, i);
+ if (val == U64_MAX)
+ phydev_err(phydev, "Reading HW Statistics failed for %s\n",
+ aqr107_hw_stats[i].name);
+ else
+ priv->sgmii_stats[i] += val;
+
+ data[i] = priv->sgmii_stats[i];
+ }
+}
+
static int aqr_config_aneg(struct phy_device *phydev)
{
bool changed = false;
@@ -112,41 +242,22 @@ static int aqr_config_aneg(struct phy_device *phydev)
static int aqr_config_intr(struct phy_device *phydev)
{
+ bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- err = phy_write_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_TX_VEND_INT_MASK2,
- MDIO_AN_TX_VEND_INT_MASK2_LINK);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_STD_MASK,
- VEND1_GLOBAL_INT_STD_MASK_ALL);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_VEND_MASK,
- VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
- VEND1_GLOBAL_INT_VEND_MASK_AN);
- } else {
- err = phy_write_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_TX_VEND_INT_MASK2, 0);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_STD_MASK, 0);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_VEND_MASK, 0);
- }
+ err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
+ en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_STD_MASK,
+ en ? VEND1_GLOBAL_INT_STD_MASK_ALL : 0);
+ if (err < 0)
+ return err;
- return err;
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
+ en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
+ VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
}
static int aqr_ack_interrupt(struct phy_device *phydev)
@@ -178,21 +289,315 @@ static int aqr_read_status(struct phy_device *phydev)
return genphy_c45_read_status(phydev);
}
+static int aqr107_read_downshift_event(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS1);
+ if (val < 0)
+ return val;
+
+ return !!(val & MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT);
+}
+
+static int aqr107_read_rate(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
+ if (val < 0)
+ return val;
+
+ switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
+ case MDIO_AN_TX_VEND_STATUS1_10BASET:
+ phydev->speed = SPEED_10;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_100BASETX:
+ phydev->speed = SPEED_100;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_1000BASET:
+ phydev->speed = SPEED_1000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_2500BASET:
+ phydev->speed = SPEED_2500;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_5000BASET:
+ phydev->speed = SPEED_5000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_10GBASET:
+ phydev->speed = SPEED_10000;
+ break;
+ default:
+ phydev->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ return 0;
+}
+
+static int aqr107_read_status(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = aqr_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
+ return 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_IF_STATUS);
+ if (val < 0)
+ return val;
+
+ switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
+ phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ default:
+ phydev->interface = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ val = aqr107_read_downshift_event(phydev);
+ if (val <= 0)
+ return val;
+
+ phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
+
+ /* Read downshifted rate from vendor register */
+ return aqr107_read_rate(phydev);
+}
+
+static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
+
+ *data = enable && cnt ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int aqr107_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val = 0;
+
+ if (!FIELD_FIT(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt))
+ return -E2BIG;
+
+ if (cnt != DOWNSHIFT_DEV_DISABLE) {
+ val = MDIO_AN_VEND_PROV_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt);
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+ MDIO_AN_VEND_PROV_DOWNSHIFT_EN |
+ MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
+}
+
+static int aqr107_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return aqr107_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aqr107_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return aqr107_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* If we configure settings whilst firmware is still initializing the chip,
+ * then these settings may be overwritten. Therefore make sure chip
+ * initialization has completed. Use presence of the firmware ID as
+ * indicator for initialization having completed.
+ * The chip also provides a "reset completed" bit, but it's cleared after
+ * read. Therefore function would time out if called again.
+ */
+static int aqr107_wait_reset_complete(struct phy_device *phydev)
+{
+ int val, retries = 100;
+
+ do {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+ if (val < 0)
+ return val;
+ msleep(20);
+ } while (!val && --retries);
+
+ return val ? 0 : -ETIMEDOUT;
+}
+
+static void aqr107_chip_info(struct phy_device *phydev)
+{
+ u8 fw_major, fw_minor, build_id, prov_id;
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+ if (val < 0)
+ return;
+
+ fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
+ fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
+ if (val < 0)
+ return;
+
+ build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
+ prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
+
+ phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
+ fw_major, fw_minor, build_id, prov_id);
+}
+
+static int aqr107_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Check that the PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
+ phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ return -ENODEV;
+
+ ret = aqr107_wait_reset_complete(phydev);
+ if (!ret)
+ aqr107_chip_info(phydev);
+
+ /* ensure that a latched downshift event is cleared */
+ aqr107_read_downshift_event(phydev);
+
+ return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
static int aqcs109_config_init(struct phy_device *phydev)
{
+ int ret;
+
+ /* Check that the PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
+ return -ENODEV;
+
+ ret = aqr107_wait_reset_complete(phydev);
+ if (!ret)
+ aqr107_chip_info(phydev);
+
/* AQCS109 belongs to a chip family partially supporting 10G and 5G.
* PMA speed ability bits are the same for all members of the family,
* AQCS109 however supports speeds up to 2.5G only.
*/
- return phy_set_max_speed(phydev, SPEED_2500);
+ ret = phy_set_max_speed(phydev, SPEED_2500);
+ if (ret)
+ return ret;
+
+ /* ensure that a latched downshift event is cleared */
+ aqr107_read_downshift_event(phydev);
+
+ return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
+static void aqr107_link_change_notify(struct phy_device *phydev)
+{
+ u8 fw_major, fw_minor;
+ bool downshift, short_reach, afr;
+ int mode, val;
+
+ if (phydev->state != PHY_RUNNING || phydev->autoneg == AUTONEG_DISABLE)
+ return;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
+ /* call failed or link partner is no Aquantia PHY */
+ if (val < 0 || !(val & MDIO_AN_RX_LP_STAT1_AQ_PHY))
+ return;
+
+ short_reach = val & MDIO_AN_RX_LP_STAT1_SHORT_REACH;
+ downshift = val & MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT4);
+ if (val < 0)
+ return;
+
+ fw_major = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MAJOR, val);
+ fw_minor = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MINOR, val);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_VEND_STAT3);
+ if (val < 0)
+ return;
+
+ afr = val & MDIO_AN_RX_VEND_STAT3_AFR;
+
+ phydev_dbg(phydev, "Link partner is Aquantia PHY, FW %u.%u%s%s%s\n",
+ fw_major, fw_minor,
+ short_reach ? ", short reach mode" : "",
+ downshift ? ", fast-retrain downshift advertised" : "",
+ afr ? ", fast reframe advertised" : "");
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT9);
+ if (val < 0)
+ return;
+
+ mode = FIELD_GET(VEND1_GLOBAL_RSVD_STAT9_MODE, val);
+ if (mode == VEND1_GLOBAL_RSVD_STAT9_1000BT2)
+ phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
+}
+
+static int aqr107_suspend(struct phy_device *phydev)
+{
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int aqr107_resume(struct phy_device *phydev)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int aqr107_probe(struct phy_device *phydev)
+{
+ phydev->priv = devm_kzalloc(&phydev->mdio.dev,
+ sizeof(struct aqr107_priv), GFP_KERNEL);
+ if (!phydev->priv)
+ return -ENOMEM;
+
+ return aqr_hwmon_probe(phydev);
}
static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
.name = "Aquantia AQ1202",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
@@ -201,8 +606,6 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQ2104),
.name = "Aquantia AQ2104",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
@@ -211,8 +614,6 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
.name = "Aquantia AQR105",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
@@ -221,8 +622,6 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
.name = "Aquantia AQR106",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
@@ -231,31 +630,42 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
.name = "Aquantia AQR107",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
- .probe = aqr_hwmon_probe,
+ .probe = aqr107_probe,
+ .config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
- .read_status = aqr_read_status,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
- .probe = aqr_hwmon_probe,
+ .probe = aqr107_probe,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
- .read_status = aqr_read_status,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
.name = "Aquantia AQR405",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c
index f14ba5366b91..79bf7ef1fcfd 100644
--- a/drivers/net/phy/asix.c
+++ b/drivers/net/phy/asix.c
@@ -43,7 +43,7 @@ static struct phy_driver asix_driver[] = { {
.phy_id = PHY_ID_ASIX_AX88796B,
.name = "Asix Electronics AX88796B",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.soft_reset = asix_soft_reset,
} };
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f3e96191eb6f..222ccd9ecfce 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -324,8 +324,6 @@ static int at803x_config_intr(struct phy_device *phydev)
static void at803x_link_change_notify(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
-
/*
* Conduct a hardware reset for AT8030 every time a link loss is
* signalled. This is necessary to circumvent a hardware bug that
@@ -333,25 +331,19 @@ static void at803x_link_change_notify(struct phy_device *phydev)
* in the FIFO. In such cases, the FIFO enters an error mode it
* cannot recover from by software.
*/
- if (phydev->state == PHY_NOLINK) {
- if (phydev->mdio.reset && !priv->phy_reset) {
- struct at803x_context context;
+ if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
+ struct at803x_context context;
- at803x_context_save(phydev, &context);
+ at803x_context_save(phydev, &context);
- phy_device_reset(phydev, 1);
- msleep(1);
- phy_device_reset(phydev, 0);
- msleep(1);
+ phy_device_reset(phydev, 1);
+ msleep(1);
+ phy_device_reset(phydev, 0);
+ msleep(1);
- at803x_context_restore(phydev, &context);
+ at803x_context_restore(phydev, &context);
- phydev_dbg(phydev, "%s(): phy was reset\n",
- __func__);
- priv->phy_reset = true;
- }
- } else {
- priv->phy_reset = false;
+ phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
}
}
@@ -397,7 +389,7 @@ static struct phy_driver at803x_driver[] = {
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -412,7 +404,7 @@ static struct phy_driver at803x_driver[] = {
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -426,7 +418,7 @@ static struct phy_driver at803x_driver[] = {
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index ab8e12922bf9..9ccf28b0a04d 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -10,6 +10,10 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
+struct bcm_omega_phy_priv {
+ u64 *stats;
+};
+
/* Broadcom Cygnus Phy specific registers */
#define MII_BCM_CYGNUS_AFE_VDAC_ICTRL_0 0x91E5 /* VDAL Control register */
@@ -121,21 +125,162 @@ static int bcm_cygnus_resume(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int bcm_omega_config_init(struct phy_device *phydev)
+{
+ u8 count, rev;
+ int ret = 0;
+
+ rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
+
+ pr_info_once("%s: %s PHY revision: 0x%02x\n",
+ phydev_name(phydev), phydev->drv->name, rev);
+
+ /* Dummy read to a register to workaround an issue upon reset where the
+ * internal inverter may not allow the first MDIO transaction to pass
+ * the MDIO management controller and make us return 0xffff for such
+ * reads.
+ */
+ phy_read(phydev, MII_BMSR);
+
+ switch (rev) {
+ case 0x00:
+ ret = bcm_phy_28nm_a0b0_afe_config_init(phydev);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = bcm_phy_downshift_get(phydev, &count);
+ if (ret)
+ return ret;
+
+ /* Only enable EEE if Wirespeed/downshift is disabled */
+ ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE);
+ if (ret)
+ return ret;
+
+ return bcm_phy_enable_apd(phydev, true);
+}
+
+static int bcm_omega_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Re-apply workarounds coming out suspend/resume */
+ ret = bcm_omega_config_init(phydev);
+ if (ret)
+ return ret;
+
+ /* 28nm Gigabit PHYs come out of reset without any half-duplex
+ * or "hub" compliant advertised mode, fix that. This does not
+ * cause any problems with the PHY library since genphy_config_aneg()
+ * gracefully handles auto-negotiated and forced modes.
+ */
+ return genphy_config_aneg(phydev);
+}
+
+static int bcm_omega_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return bcm_phy_downshift_get(phydev, (u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bcm_omega_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna,
+ const void *data)
+{
+ u8 count = *(u8 *)data;
+ int ret;
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ ret = bcm_phy_downshift_set(phydev, count);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Disable EEE advertisement since this prevents the PHY
+ * from successfully linking up, trigger auto-negotiation restart
+ * to let the MAC decide what to do.
+ */
+ ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE);
+ if (ret)
+ return ret;
+
+ return genphy_restart_aneg(phydev);
+}
+
+static void bcm_omega_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm_omega_phy_priv *priv = phydev->priv;
+
+ bcm_phy_get_stats(phydev, priv->stats, stats, data);
+}
+
+static int bcm_omega_probe(struct phy_device *phydev)
+{
+ struct bcm_omega_phy_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ priv->stats = devm_kcalloc(&phydev->mdio.dev,
+ bcm_phy_get_sset_count(phydev), sizeof(u64),
+ GFP_KERNEL);
+ if (!priv->stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
static struct phy_driver bcm_cygnus_phy_driver[] = {
{
.phy_id = PHY_ID_BCM_CYGNUS,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom Cygnus PHY",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm_cygnus_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
.suspend = genphy_suspend,
.resume = bcm_cygnus_resume,
-} };
+}, {
+ .phy_id = PHY_ID_BCM_OMEGA,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom Omega Combo GPHY",
+ /* PHY_GBIT_FEATURES */
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm_omega_config_init,
+ .suspend = genphy_suspend,
+ .resume = bcm_omega_resume,
+ .get_tunable = bcm_omega_get_tunable,
+ .set_tunable = bcm_omega_set_tunable,
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm_omega_get_phy_stats,
+ .probe = bcm_omega_probe,
+}
+};
static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
{ PHY_ID_BCM_CYGNUS, 0xfffffff0, },
+ { PHY_ID_BCM_OMEGA, 0xfffffff0, },
{ }
};
MODULE_DEVICE_TABLE(mdio, bcm_cygnus_phy_tbl);
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index a75642051b8b..e0d3310957ff 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -371,6 +371,58 @@ void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
}
EXPORT_SYMBOL_GPL(bcm_phy_get_stats);
+void bcm_phy_r_rc_cal_reset(struct phy_device *phydev)
+{
+ /* Reset R_CAL/RC_CAL Engine */
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
+
+ /* Disable Reset R_AL/RC_CAL Engine */
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_r_rc_cal_reset);
+
+int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev)
+{
+ /* Increase VCO range to prevent unlocking problem of PLL at low
+ * temp
+ */
+ bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
+
+ /* Change Ki to 011 */
+ bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
+
+ /* Disable loading of TVCO buffer to bandgap, set bandgap trim
+ * to 111
+ */
+ bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
+
+ /* Adjust bias current trim by -3 */
+ bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b);
+
+ /* Switch to CORE_BASE1E */
+ phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd);
+
+ bcm_phy_r_rc_cal_reset(phydev);
+
+ /* write AFE_RXCONFIG_0 */
+ bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
+
+ /* write AFE_RXCONFIG_1 */
+ bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
+
+ /* write AFE_RX_LP_COUNTER */
+ bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+
+ /* write AFE_HPF_TRIM_OTHERS */
+ bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
+
+ /* write AFTE_TX_CONFIG */
+ bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init);
+
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 17faaefcfd60..5ecacb4e64f0 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -9,6 +9,24 @@
#include <linux/brcmphy.h>
#include <linux/phy.h>
+/* 28nm only register definitions */
+#define MISC_ADDR(base, channel) base, channel
+
+#define DSP_TAP10 MISC_ADDR(0x0a, 0)
+#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1)
+#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2)
+#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0)
+
+#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
+#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
+#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2)
+#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
+#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
+#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1)
+#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3)
+#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
+
+
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
@@ -45,5 +63,7 @@ int bcm_phy_get_sset_count(struct phy_device *phydev);
void bcm_phy_get_strings(struct phy_device *phydev, u8 *data);
void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
struct ethtool_stats *stats, u64 *data);
+void bcm_phy_r_rc_cal_reset(struct phy_device *phydev);
+int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev);
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 44e6cff419a0..23f1958ba6ad 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -64,7 +64,7 @@ static struct phy_driver bcm63xx_driver[] = {
.phy_id = 0x00406000,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (1)",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
@@ -73,7 +73,7 @@ static struct phy_driver bcm63xx_driver[] = {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b8415f8fae14..8fc33867e524 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -37,77 +37,10 @@
#define MII_BCM7XXX_SHD_3_TL4 0x23
#define MII_BCM7XXX_TL4_RST_MSK (BIT(2) | BIT(1))
-/* 28nm only register definitions */
-#define MISC_ADDR(base, channel) base, channel
-
-#define DSP_TAP10 MISC_ADDR(0x0a, 0)
-#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1)
-#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2)
-#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0)
-
-#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
-#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
-#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2)
-#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
-#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
-#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1)
-#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3)
-#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
-
struct bcm7xxx_phy_priv {
u64 *stats;
};
-static void r_rc_cal_reset(struct phy_device *phydev)
-{
- /* Reset R_CAL/RC_CAL Engine */
- bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
-
- /* Disable Reset R_AL/RC_CAL Engine */
- bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
-}
-
-static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
-{
- /* Increase VCO range to prevent unlocking problem of PLL at low
- * temp
- */
- bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
-
- /* Change Ki to 011 */
- bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
-
- /* Disable loading of TVCO buffer to bandgap, set bandgap trim
- * to 111
- */
- bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
-
- /* Adjust bias current trim by -3 */
- bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b);
-
- /* Switch to CORE_BASE1E */
- phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd);
-
- r_rc_cal_reset(phydev);
-
- /* write AFE_RXCONFIG_0 */
- bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
-
- /* write AFE_RXCONFIG_1 */
- bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
-
- /* write AFE_RX_LP_COUNTER */
- bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
-
- /* write AFE_HPF_TRIM_OTHERS */
- bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
-
- /* write AFTE_TX_CONFIG */
- bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
-
- return 0;
-}
-
static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
{
/* AFE_RXCONFIG_0 */
@@ -143,7 +76,7 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
/* Reset R_CAL/RC_CAL engine */
- r_rc_cal_reset(phydev);
+ bcm_phy_r_rc_cal_reset(phydev);
return 0;
}
@@ -171,7 +104,7 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
/* Reset R_CAL/RC_CAL engine */
- r_rc_cal_reset(phydev);
+ bcm_phy_r_rc_cal_reset(phydev);
return 0;
}
@@ -196,7 +129,7 @@ static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev)
/* Enable ffe zero detection for Vitesse interoperability */
bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015);
- r_rc_cal_reset(phydev);
+ bcm_phy_r_rc_cal_reset(phydev);
return 0;
}
@@ -227,7 +160,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
switch (rev) {
case 0xa0:
case 0xb0:
- ret = bcm7xxx_28nm_b0_afe_config_init(phydev);
+ ret = bcm_phy_28nm_a0b0_afe_config_init(phydev);
break;
case 0xd0:
ret = bcm7xxx_28nm_d0_afe_config_init(phydev);
@@ -605,7 +538,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.phy_id = (_oui), \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_GBIT_FEATURES, \
+ /* PHY_GBIT_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_config_init, \
.resume = bcm7xxx_28nm_resume, \
@@ -622,7 +555,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.phy_id = (_oui), \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_BASIC_FEATURES, \
+ /* PHY_BASIC_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_ephy_config_init, \
.resume = bcm7xxx_28nm_ephy_resume, \
@@ -637,7 +570,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.phy_id = (_oui), \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_BASIC_FEATURES, \
+ /* PHY_BASIC_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_config_init, \
.suspend = bcm7xxx_suspend, \
@@ -657,7 +590,6 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
- BCM7XXX_28NM_GPHY(PHY_ID_BCM_OMEGA, "Broadcom Omega Combo GPHY"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index cb86a3e90c7d..67fa05d67523 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -610,7 +610,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5411,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -618,7 +618,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -626,7 +626,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM54210E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -634,7 +634,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -642,7 +642,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM54612E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -650,7 +650,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM54616S,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -659,7 +659,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -667,7 +667,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -676,7 +676,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM54810,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -685,7 +685,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm5482_config_init,
.read_status = bcm5482_read_status,
.ack_interrupt = bcm_phy_ack_intr,
@@ -694,7 +694,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -702,7 +702,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -710,7 +710,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -718,7 +718,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -726,7 +726,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -735,7 +735,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5395",
.flags = PHY_IS_INTERNAL,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
.get_strings = bcm_phy_get_strings,
.get_stats = bcm53xx_phy_get_stats,
@@ -744,7 +744,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 108ed24f8489..9d1612a4d7e6 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -102,7 +102,7 @@ static struct phy_driver cis820x_driver[] = {
.phy_id = 0x000fc410,
.name = "Cicada Cis8201",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
@@ -110,7 +110,7 @@ static struct phy_driver cis820x_driver[] = {
.phy_id = 0x000fc440,
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index bf39baa7f2c8..942f277463a4 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -144,7 +144,7 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id = 0x0181b880,
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -153,7 +153,7 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id = 0x0181b8b0,
.name = "Davicom DM9161B/C",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -162,7 +162,7 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -171,7 +171,7 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id = 0x00181b80,
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
} };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2fe2ebaf62d1..6580094161a9 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1514,7 +1514,7 @@ static struct phy_driver dp83640_driver = {
.phy_id = DP83640_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = dp83640_probe,
.remove = dp83640_remove,
.soft_reset = dp83640_soft_reset,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 97d45bd5b38e..7ed4760fb155 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -310,7 +310,7 @@ static int dp83822_resume(struct phy_device *phydev)
{ \
PHY_ID_MATCH_MODEL(_id), \
.name = (_name), \
- .features = PHY_BASIC_FEATURES, \
+ /* PHY_BASIC_FEATURES */ \
.soft_reset = dp83822_phy_reset, \
.config_init = dp83822_config_init, \
.get_wol = dp83822_get_wol, \
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index f55dc907c2f3..6f9bc7d91f17 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
.phy_id = _id, \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_BASIC_FEATURES, \
+ /* PHY_BASIC_FEATURES */ \
\
.soft_reset = genphy_soft_reset, \
.config_init = _config_init, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 8448d01819ef..fd35131a0c39 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -315,7 +315,7 @@ static struct phy_driver dp83867_driver[] = {
.phy_id = DP83867_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "TI DP83867",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index e9704af1d239..ac27da16824d 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -338,7 +338,7 @@ static struct phy_driver dp83811_driver[] = {
.phy_id = DP83TC811_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "TI DP83TC811",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = dp83811_config_init,
.config_aneg = dp83811_config_aneg,
.soft_reset = dp83811_phy_reset,
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 2aa367c04a8e..09e07b902d3a 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -86,7 +86,7 @@ static struct phy_driver et1011c_driver[] = { {
.phy_id = 0x0282f014,
.name = "ET1011C",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_aneg = et1011c_config_aneg,
.read_status = et1011c_read_status,
} };
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 1acd8bfdb3bc..3ffe46df249e 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -301,7 +301,7 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
phy->supported);
}
- linkmode_copy(phy->advertising, phy->supported);
+ phy_advertise_supported(phy);
ret = phy_device_register(phy);
if (ret) {
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index ebef8354bc81..d6e8516cd146 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -311,7 +311,7 @@ static struct phy_driver icplus_driver[] = {
.phy_id = 0x02430d80,
.name = "ICPlus IP175C",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = &ip175c_config_init,
.config_aneg = &ip175c_config_aneg,
.read_status = &ip175c_read_status,
@@ -321,7 +321,7 @@ static struct phy_driver icplus_driver[] = {
.phy_id = 0x02430d90,
.name = "ICPlus IP1001",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &ip1001_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -329,7 +329,7 @@ static struct phy_driver icplus_driver[] = {
.phy_id = 0x02430c54,
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = ip101a_g_probe,
.config_intr = ip101a_g_config_intr,
.did_interrupt = ip101a_g_did_interrupt,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 02d9713318b6..b7875b36097f 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -232,7 +232,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_1_3,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -244,7 +244,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_1_3,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.3",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -256,7 +256,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_1_4,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -268,7 +268,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_1_4,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.4",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -280,7 +280,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_1_5,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -291,7 +291,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_1_5,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -302,7 +302,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_VR9_1_1,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.1 integrated)",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -313,7 +313,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_VR9_1_1,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.1 integrated)",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -324,7 +324,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_VR9_1_2,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.2 integrated)",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -335,7 +335,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_VR9_1_2,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.2 integrated)",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index a93d673baf35..314486288119 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -251,7 +251,7 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id = 0x78100000,
.name = "LXT970",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = lxt970_config_init,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
@@ -259,14 +259,14 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id = 0x001378e0,
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
}, {
.phy_id = 0x00137a10,
.name = "LXT973-A2",
.phy_id_mask = 0xffffffff,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = 0,
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
@@ -275,7 +275,7 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id = 0x00137a10,
.name = "LXT973",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = 0,
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 3ccba37bd6dd..a7796134e3be 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -29,6 +29,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/marvell_phy.h>
+#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/io.h>
@@ -91,6 +92,14 @@
#define MII_88E1510_TEMP_SENSOR 0x1b
#define MII_88E1510_TEMP_SENSOR_MASK 0xff
+#define MII_88E1540_COPPER_CTRL3 0x1a
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK GENMASK(11, 10)
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS 0
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS 1
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS 2
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS 3
+#define MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN BIT(9)
+
#define MII_88E6390_MISC_TEST 0x1b
#define MII_88E6390_MISC_TEST_SAMPLE_1S 0
#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14)
@@ -128,6 +137,7 @@
#define MII_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_DEF 0x0030
#define MII_88E1510_PHY_LED_DEF 0x1177
+#define MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE 0x1040
#define MII_M1011_PHY_STATUS 0x11
#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -624,7 +634,10 @@ static void marvell_config_led(struct phy_device *phydev)
* LED[2] .. Blink, Activity
*/
case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
- def_config = MII_88E1510_PHY_LED_DEF;
+ if (phydev->dev_flags & MARVELL_PHY_LED0_LINK_LED1_ACTIVE)
+ def_config = MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE;
+ else
+ def_config = MII_88E1510_PHY_LED_DEF;
break;
default:
return;
@@ -1025,6 +1038,101 @@ static int m88e1145_config_init(struct phy_device *phydev)
return 0;
}
+static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
+{
+ int val;
+
+ val = phy_read(phydev, MII_88E1540_COPPER_CTRL3);
+ if (val < 0)
+ return val;
+
+ if (!(val & MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN)) {
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_OFF;
+ return 0;
+ }
+
+ val = FIELD_GET(MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val);
+
+ switch (val) {
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS:
+ *msecs = 0;
+ break;
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS:
+ *msecs = 10;
+ break;
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS:
+ *msecs = 20;
+ break;
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS:
+ *msecs = 40;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
+{
+ struct ethtool_eee eee;
+ int val, ret;
+
+ if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
+ return phy_clear_bits(phydev, MII_88E1540_COPPER_CTRL3,
+ MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN);
+
+ /* According to the Marvell data sheet EEE must be disabled for
+ * Fast Link Down detection to work properly
+ */
+ ret = phy_ethtool_get_eee(phydev, &eee);
+ if (!ret && eee.eee_enabled) {
+ phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n");
+ return -EBUSY;
+ }
+
+ if (*msecs <= 5)
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS;
+ else if (*msecs <= 15)
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS;
+ else if (*msecs <= 30)
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS;
+ else
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS;
+
+ val = FIELD_PREP(MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val);
+
+ ret = phy_modify(phydev, MII_88E1540_COPPER_CTRL3,
+ MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val);
+ if (ret)
+ return ret;
+
+ return phy_set_bits(phydev, MII_88E1540_COPPER_CTRL3,
+ MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN);
+}
+
+static int m88e1540_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return m88e1540_get_fld(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1540_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return m88e1540_set_fld(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
/* The VOD can be out of specification on link up. Poke an
* undocumented register, in an undocumented page, with a magic value
* to fix this.
@@ -1489,9 +1597,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
{
+ int count = marvell_get_sset_count(phydev);
int i;
- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
+ for (i = 0; i < count; i++) {
strlcpy(data + i * ETH_GSTRING_LEN,
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
@@ -1519,9 +1628,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
static void marvell_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
+ int count = marvell_get_sset_count(phydev);
int i;
- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
+ for (i = 0; i < count; i++)
data[i] = marvell_get_stat(phydev, i);
}
@@ -2022,7 +2132,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1101,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1101",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2040,7 +2150,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1112,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1112",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2058,7 +2168,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1111,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1111",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2077,7 +2187,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1118,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1118",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1118_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2095,7 +2205,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1121R,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1121R",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = &m88e1121_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1121_config_aneg,
@@ -2115,7 +2225,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1318S,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1318S",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1318_config_init,
.config_aneg = &m88e1318_config_aneg,
@@ -2137,7 +2247,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1145,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1145",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1145_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2156,7 +2266,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1149R,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1149R",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1149_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2174,7 +2284,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1240,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1240",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2192,7 +2302,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1116R,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1116R",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1116r_config_init,
.ack_interrupt = &marvell_ack_interrupt,
@@ -2232,7 +2342,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1540,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1540",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = m88e1510_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2247,13 +2357,15 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2272,7 +2384,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E3016,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E3016",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = marvell_probe,
.config_init = &m88e3016_config_init,
.aneg_done = &marvell_aneg_done,
@@ -2292,7 +2404,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E6390,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e6390_config_aneg,
@@ -2307,6 +2419,8 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 100b401b1f4a..238a20e13d6a 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -48,6 +48,8 @@ enum {
MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */
/* Vendor2 MMD registers */
+ MV_V2_PORT_CTRL = 0xf001,
+ MV_V2_PORT_CTRL_PWRDOWN = 0x0800,
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
MV_V2_TEMP_CTRL_SAMPLE = 0x0000,
@@ -226,11 +228,19 @@ static int mv3310_probe(struct phy_device *phydev)
static int mv3310_suspend(struct phy_device *phydev)
{
- return 0;
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
}
static int mv3310_resume(struct phy_device *phydev)
{
+ int ret;
+
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
+ if (ret)
+ return ret;
+
return mv3310_hwmon_config(phydev, true);
}
@@ -472,8 +482,9 @@ static struct phy_driver mv3310_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E2110,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x2110",
- .get_features = genphy_c45_pma_read_abilities,
.probe = mv3310_probe,
+ .suspend = mv3310_suspend,
+ .resume = mv3310_resume,
.soft_reset = genphy_no_soft_reset,
.config_init = mv3310_config_init,
.config_aneg = mv3310_config_aneg,
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 8295bc7c8c20..4a28fb29adaa 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -92,10 +92,7 @@ static int unimac_mdio_poll(void *wait_func_data)
usleep_range(1000, 2000);
} while (--timeout);
- if (!timeout)
- return -ETIMEDOUT;
-
- return 0;
+ return -ETIMEDOUT;
}
static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
@@ -292,7 +289,7 @@ static int unimac_mdio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base);
+ dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus\n");
return 0;
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c
new file mode 100644
index 000000000000..6644762ff2ab
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-meson-g12a.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Baylibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#define ETH_PLL_STS 0x40
+#define ETH_PLL_CTL0 0x44
+#define PLL_CTL0_LOCK_DIG BIT(30)
+#define PLL_CTL0_RST BIT(29)
+#define PLL_CTL0_EN BIT(28)
+#define PLL_CTL0_SEL BIT(23)
+#define PLL_CTL0_N GENMASK(14, 10)
+#define PLL_CTL0_M GENMASK(8, 0)
+#define PLL_LOCK_TIMEOUT 1000000
+#define PLL_MUX_NUM_PARENT 2
+#define ETH_PLL_CTL1 0x48
+#define ETH_PLL_CTL2 0x4c
+#define ETH_PLL_CTL3 0x50
+#define ETH_PLL_CTL4 0x54
+#define ETH_PLL_CTL5 0x58
+#define ETH_PLL_CTL6 0x5c
+#define ETH_PLL_CTL7 0x60
+
+#define ETH_PHY_CNTL0 0x80
+#define EPHY_G12A_ID 0x33010180
+#define ETH_PHY_CNTL1 0x84
+#define PHY_CNTL1_ST_MODE GENMASK(2, 0)
+#define PHY_CNTL1_ST_PHYADD GENMASK(7, 3)
+#define EPHY_DFLT_ADD 8
+#define PHY_CNTL1_MII_MODE GENMASK(15, 14)
+#define EPHY_MODE_RMII 0x1
+#define PHY_CNTL1_CLK_EN BIT(16)
+#define PHY_CNTL1_CLKFREQ BIT(17)
+#define PHY_CNTL1_PHY_ENB BIT(18)
+#define ETH_PHY_CNTL2 0x88
+#define PHY_CNTL2_USE_INTERNAL BIT(5)
+#define PHY_CNTL2_SMI_SRC_MAC BIT(6)
+#define PHY_CNTL2_RX_CLK_EPHY BIT(9)
+
+#define MESON_G12A_MDIO_EXTERNAL_ID 0
+#define MESON_G12A_MDIO_INTERNAL_ID 1
+
+struct g12a_mdio_mux {
+ bool pll_is_enabled;
+ void __iomem *regs;
+ void *mux_handle;
+ struct clk *pclk;
+ struct clk *pll;
+};
+
+struct g12a_ephy_pll {
+ void __iomem *base;
+ struct clk_hw hw;
+};
+
+#define g12a_ephy_pll_to_dev(_hw) \
+ container_of(_hw, struct g12a_ephy_pll, hw)
+
+static unsigned long g12a_ephy_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+ u32 val, m, n;
+
+ val = readl(pll->base + ETH_PLL_CTL0);
+ m = FIELD_GET(PLL_CTL0_M, val);
+ n = FIELD_GET(PLL_CTL0_N, val);
+
+ return parent_rate * m / n;
+}
+
+static int g12a_ephy_pll_enable(struct clk_hw *hw)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+ u32 val = readl(pll->base + ETH_PLL_CTL0);
+
+ /* Apply both enable an reset */
+ val |= PLL_CTL0_RST | PLL_CTL0_EN;
+ writel(val, pll->base + ETH_PLL_CTL0);
+
+ /* Clear the reset to let PLL lock */
+ val &= ~PLL_CTL0_RST;
+ writel(val, pll->base + ETH_PLL_CTL0);
+
+ /* Poll on the digital lock instead of the usual analog lock
+ * This is done because bit 31 is unreliable on some SoC. Bit
+ * 31 may indicate that the PLL is not lock eventhough the clock
+ * is actually running
+ */
+ return readl_poll_timeout(pll->base + ETH_PLL_CTL0, val,
+ val & PLL_CTL0_LOCK_DIG, 0, PLL_LOCK_TIMEOUT);
+}
+
+static void g12a_ephy_pll_disable(struct clk_hw *hw)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+ u32 val;
+
+ val = readl(pll->base + ETH_PLL_CTL0);
+ val &= ~PLL_CTL0_EN;
+ val |= PLL_CTL0_RST;
+ writel(val, pll->base + ETH_PLL_CTL0);
+}
+
+static int g12a_ephy_pll_is_enabled(struct clk_hw *hw)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+ unsigned int val;
+
+ val = readl(pll->base + ETH_PLL_CTL0);
+
+ return (val & PLL_CTL0_LOCK_DIG) ? 1 : 0;
+}
+
+static void g12a_ephy_pll_init(struct clk_hw *hw)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+
+ /* Apply PLL HW settings */
+ writel(0x29c0040a, pll->base + ETH_PLL_CTL0);
+ writel(0x927e0000, pll->base + ETH_PLL_CTL1);
+ writel(0xac5f49e5, pll->base + ETH_PLL_CTL2);
+ writel(0x00000000, pll->base + ETH_PLL_CTL3);
+ writel(0x00000000, pll->base + ETH_PLL_CTL4);
+ writel(0x20200000, pll->base + ETH_PLL_CTL5);
+ writel(0x0000c002, pll->base + ETH_PLL_CTL6);
+ writel(0x00000023, pll->base + ETH_PLL_CTL7);
+}
+
+static const struct clk_ops g12a_ephy_pll_ops = {
+ .recalc_rate = g12a_ephy_pll_recalc_rate,
+ .is_enabled = g12a_ephy_pll_is_enabled,
+ .enable = g12a_ephy_pll_enable,
+ .disable = g12a_ephy_pll_disable,
+ .init = g12a_ephy_pll_init,
+};
+
+static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+{
+ int ret;
+
+ /* Enable the phy clock */
+ if (!priv->pll_is_enabled) {
+ ret = clk_prepare_enable(priv->pll);
+ if (ret)
+ return ret;
+ }
+
+ priv->pll_is_enabled = true;
+
+ /* Initialize ephy control */
+ writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
+ writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+ FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+ FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+ PHY_CNTL1_CLK_EN |
+ PHY_CNTL1_CLKFREQ |
+ PHY_CNTL1_PHY_ENB,
+ priv->regs + ETH_PHY_CNTL1);
+ writel(PHY_CNTL2_USE_INTERNAL |
+ PHY_CNTL2_SMI_SRC_MAC |
+ PHY_CNTL2_RX_CLK_EPHY,
+ priv->regs + ETH_PHY_CNTL2);
+
+ return 0;
+}
+
+static int g12a_enable_external_mdio(struct g12a_mdio_mux *priv)
+{
+ /* Reset the mdio bus mux */
+ writel_relaxed(0x0, priv->regs + ETH_PHY_CNTL2);
+
+ /* Disable the phy clock if enabled */
+ if (priv->pll_is_enabled) {
+ clk_disable_unprepare(priv->pll);
+ priv->pll_is_enabled = false;
+ }
+
+ return 0;
+}
+
+static int g12a_mdio_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct g12a_mdio_mux *priv = dev_get_drvdata(data);
+
+ if (current_child == desired_child)
+ return 0;
+
+ switch (desired_child) {
+ case MESON_G12A_MDIO_EXTERNAL_ID:
+ return g12a_enable_external_mdio(priv);
+ case MESON_G12A_MDIO_INTERNAL_ID:
+ return g12a_enable_internal_mdio(priv);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct of_device_id g12a_mdio_mux_match[] = {
+ { .compatible = "amlogic,g12a-mdio-mux", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, g12a_mdio_mux_match);
+
+static int g12a_ephy_glue_clk_register(struct device *dev)
+{
+ struct g12a_mdio_mux *priv = dev_get_drvdata(dev);
+ const char *parent_names[PLL_MUX_NUM_PARENT];
+ struct clk_init_data init;
+ struct g12a_ephy_pll *pll;
+ struct clk_mux *mux;
+ struct clk *clk;
+ char *name;
+ int i;
+
+ /* get the mux parents */
+ for (i = 0; i < PLL_MUX_NUM_PARENT; i++) {
+ char in_name[8];
+
+ snprintf(in_name, sizeof(in_name), "clkin%d", i);
+ clk = devm_clk_get(dev, in_name);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "Missing clock %s\n", in_name);
+ return PTR_ERR(clk);
+ }
+
+ parent_names[i] = __clk_get_name(clk);
+ }
+
+ /* create the input mux */
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ name = kasprintf(GFP_KERNEL, "%s#mux", dev_name(dev));
+ if (!name)
+ return -ENOMEM;
+
+ init.name = name;
+ init.ops = &clk_mux_ro_ops;
+ init.flags = 0;
+ init.parent_names = parent_names;
+ init.num_parents = PLL_MUX_NUM_PARENT;
+
+ mux->reg = priv->regs + ETH_PLL_CTL0;
+ mux->shift = __ffs(PLL_CTL0_SEL);
+ mux->mask = PLL_CTL0_SEL >> mux->shift;
+ mux->hw.init = &init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ kfree(name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register input mux\n");
+ return PTR_ERR(clk);
+ }
+
+ /* create the pll */
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return -ENOMEM;
+
+ name = kasprintf(GFP_KERNEL, "%s#pll", dev_name(dev));
+ if (!name)
+ return -ENOMEM;
+
+ init.name = name;
+ init.ops = &g12a_ephy_pll_ops;
+ init.flags = 0;
+ parent_names[0] = __clk_get_name(clk);
+ init.parent_names = parent_names;
+ init.num_parents = 1;
+
+ pll->base = priv->regs;
+ pll->hw.init = &init;
+
+ clk = devm_clk_register(dev, &pll->hw);
+ kfree(name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register input mux\n");
+ return PTR_ERR(clk);
+ }
+
+ priv->pll = clk;
+
+ return 0;
+}
+
+static int g12a_mdio_mux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct g12a_mdio_mux *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get peripheral clock\n");
+ return ret;
+ }
+
+ /* Make sure the device registers are clocked */
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret) {
+ dev_err(dev, "failed to enable peripheral clock");
+ return ret;
+ }
+
+ /* Register PLL in CCF */
+ ret = g12a_ephy_glue_clk_register(dev);
+ if (ret)
+ goto err;
+
+ ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn,
+ &priv->mux_handle, dev, NULL);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "mdio multiplexer init failed: %d", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ clk_disable_unprepare(priv->pclk);
+ return ret;
+}
+
+static int g12a_mdio_mux_remove(struct platform_device *pdev)
+{
+ struct g12a_mdio_mux *priv = platform_get_drvdata(pdev);
+
+ mdio_mux_uninit(priv->mux_handle);
+
+ if (priv->pll_is_enabled)
+ clk_disable_unprepare(priv->pll);
+
+ clk_disable_unprepare(priv->pclk);
+
+ return 0;
+}
+
+static struct platform_driver g12a_mdio_mux_driver = {
+ .probe = g12a_mdio_mux_probe,
+ .remove = g12a_mdio_mux_remove,
+ .driver = {
+ .name = "g12a-mdio_mux",
+ .of_match_table = g12a_mdio_mux_match,
+ },
+};
+module_platform_driver(g12a_mdio_mux_driver);
+
+MODULE_DESCRIPTION("Amlogic G12a MDIO multiplexer driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 4be4cc09eb90..bd04fe762056 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -24,6 +24,7 @@
#include <linux/of_gpio.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/reset.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
@@ -55,10 +56,25 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
return PTR_ERR(gpiod);
}
- mdiodev->reset = gpiod;
+ mdiodev->reset_gpio = gpiod;
- /* Assert the reset signal again */
- mdio_device_reset(mdiodev, 1);
+ return 0;
+}
+
+static int mdiobus_register_reset(struct mdio_device *mdiodev)
+{
+ struct reset_control *reset = NULL;
+
+ if (mdiodev->dev.of_node)
+ reset = devm_reset_control_get_exclusive(&mdiodev->dev,
+ "phy");
+ if (PTR_ERR(reset) == -ENOENT ||
+ PTR_ERR(reset) == -ENOTSUPP)
+ reset = NULL;
+ else if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ mdiodev->reset_ctrl = reset;
return 0;
}
@@ -74,6 +90,13 @@ int mdiobus_register_device(struct mdio_device *mdiodev)
err = mdiobus_register_gpiod(mdiodev);
if (err)
return err;
+
+ err = mdiobus_register_reset(mdiodev);
+ if (err)
+ return err;
+
+ /* Assert the reset signal */
+ mdio_device_reset(mdiodev, 1);
}
mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev;
@@ -446,8 +469,8 @@ void mdiobus_unregister(struct mii_bus *bus)
if (!mdiodev)
continue;
- if (mdiodev->reset)
- gpiod_put(mdiodev->reset);
+ if (mdiodev->reset_gpio)
+ gpiod_put(mdiodev->reset_gpio);
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 887076292e50..e282600bd83e 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -16,6 +16,7 @@
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/unistd.h>
@@ -116,10 +117,18 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
{
unsigned int d;
- if (!mdiodev->reset)
+ if (!mdiodev->reset_gpio && !mdiodev->reset_ctrl)
return;
- gpiod_set_value(mdiodev->reset, value);
+ if (mdiodev->reset_gpio)
+ gpiod_set_value(mdiodev->reset_gpio, value);
+
+ if (mdiodev->reset_ctrl) {
+ if (value)
+ reset_control_assert(mdiodev->reset_ctrl);
+ else
+ reset_control_deassert(mdiodev->reset_ctrl);
+ }
d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
if (d)
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 0eec2913c289..fa80d6dce8ee 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -224,24 +224,33 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
static struct phy_driver meson_gxl_phy[] = {
{
- .phy_id = 0x01814400,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_EXACT(0x01814400),
.name = "Meson GXL Internal PHY",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.soft_reset = genphy_soft_reset,
.config_init = meson_gxl_config_init,
- .aneg_done = genphy_aneg_done,
.read_status = meson_gxl_read_status,
.ack_interrupt = meson_gxl_ack_interrupt,
.config_intr = meson_gxl_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ }, {
+ PHY_ID_MATCH_EXACT(0x01803301),
+ .name = "Meson G12A Internal PHY",
+ /* PHY_BASIC_FEATURES */
+ .flags = PHY_IS_INTERNAL,
+ .soft_reset = genphy_soft_reset,
+ .ack_interrupt = meson_gxl_ack_interrupt,
+ .config_intr = meson_gxl_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
},
};
static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
- { 0x01814400, 0xfffffff0 },
+ { PHY_ID_MATCH_VENDOR(0x01814400) },
+ { PHY_ID_MATCH_VENDOR(0x01803301) },
{ }
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 352da24f1f33..3c8186f269f9 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -28,6 +28,7 @@
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
+#define KSZPHY_OMSO_FACTORY_TEST BIT(15)
#define KSZPHY_OMSO_B_CAST_OFF BIT(9)
#define KSZPHY_OMSO_NAND_TREE_ON BIT(5)
#define KSZPHY_OMSO_RMII_OVERRIDE BIT(1)
@@ -340,6 +341,18 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int ksz8081_config_init(struct phy_device *phydev)
+{
+ /* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
+ * based on the RXER (KSZ8081RNA/RND) or TXC (KSZ8081MNX/RNB) pin. If a
+ * pull-down is missing, the factory test mode should be cleared by
+ * manually writing a 0.
+ */
+ phy_clear_bits(phydev, MII_KSZPHY_OMSO, KSZPHY_OMSO_FACTORY_TEST);
+
+ return kszphy_config_init(phydev);
+}
+
static int ksz8061_config_init(struct phy_device *phydev)
{
int ret;
@@ -738,6 +751,31 @@ static int ksz8873mll_read_status(struct phy_device *phydev)
return 0;
}
+static int ksz9031_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_read_abilities(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Silicon Errata Sheet (DS80000691D or DS80000692D):
+ * Whenever the device's Asymmetric Pause capability is set to 1,
+ * link-up may fail after a link-up to link-down transition.
+ *
+ * Workaround:
+ * Do not enable the Asymmetric Pause capability bit.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+
+ /* We force setting the Pause capability as the core will force the
+ * Asymmetric Pause capability to 1 otherwise.
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+
+ return 0;
+}
+
static int ksz9031_read_status(struct phy_device *phydev)
{
int err;
@@ -908,7 +946,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KS8737,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
.ack_interrupt = kszphy_ack_interrupt,
@@ -919,7 +957,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8021,
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8021 or KSZ8031",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -934,7 +972,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8031,
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8031",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -949,7 +987,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8041,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
@@ -965,7 +1003,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8041RNLI,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -980,7 +1018,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8051,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -995,7 +1033,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8001,
.name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00fffffc,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1010,10 +1048,10 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
- .config_init = kszphy_config_init,
+ .config_init = ksz8081_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -1025,7 +1063,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8061,
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = ksz8061_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -1035,7 +1073,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9021_config_init,
@@ -1052,9 +1090,9 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
- .features = PHY_GBIT_FEATURES,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
+ .get_features = ksz9031_get_features,
.config_init = ksz9031_config_init,
.soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
@@ -1069,7 +1107,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
@@ -1085,7 +1123,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
@@ -1095,7 +1133,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ886X,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1103,7 +1141,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8795,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8795",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
@@ -1113,7 +1151,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9477,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9477",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index c6cbb3aa8ae0..eb1b3287fe08 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -333,7 +333,7 @@ static struct phy_driver microchip_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Microchip LAN88xx",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = lan88xx_probe,
.remove = lan88xx_remove,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index db50efb30df5..28676af97b42 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -85,12 +85,49 @@ enum rgmii_rx_clock_delay {
#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x))
#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x))
+#define MSCC_EXT_PAGE_CSR_CNTL_17 17
+#define MSCC_EXT_PAGE_CSR_CNTL_18 18
+
+#define MSCC_EXT_PAGE_CSR_CNTL_19 19
+#define MSCC_PHY_CSR_CNTL_19_REG_ADDR(x) (x)
+#define MSCC_PHY_CSR_CNTL_19_TARGET(x) ((x) << 12)
+#define MSCC_PHY_CSR_CNTL_19_READ BIT(14)
+#define MSCC_PHY_CSR_CNTL_19_CMD BIT(15)
+
+#define MSCC_EXT_PAGE_CSR_CNTL_20 20
+#define MSCC_PHY_CSR_CNTL_20_TARGET(x) (x)
+
+#define PHY_MCB_TARGET 0x07
+#define PHY_MCB_S6G_WRITE BIT(31)
+#define PHY_MCB_S6G_READ BIT(30)
+
+#define PHY_S6G_PLL5G_CFG0 0x06
+#define PHY_S6G_LCPLL_CFG 0x11
+#define PHY_S6G_PLL_CFG 0x2b
+#define PHY_S6G_COMMON_CFG 0x2c
+#define PHY_S6G_GPC_CFG 0x2e
+#define PHY_S6G_MISC_CFG 0x3b
+#define PHY_MCB_S6G_CFG 0x3f
+#define PHY_S6G_DFT_CFG2 0x3e
+#define PHY_S6G_PLL_STATUS 0x31
+#define PHY_S6G_IB_STATUS0 0x2f
+
+#define PHY_S6G_SYS_RST_POS 31
+#define PHY_S6G_ENA_LANE_POS 18
+#define PHY_S6G_ENA_LOOP_POS 8
+#define PHY_S6G_QRATE_POS 6
+#define PHY_S6G_IF_MODE_POS 4
+#define PHY_S6G_PLL_ENA_OFFS_POS 21
+#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
+#define PHY_S6G_PLL_FSM_ENA_POS 7
+
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
+#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4
/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
* in the same package.
*/
@@ -216,6 +253,7 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_TR_MSB 18
/* Microsemi PHY ID's */
+#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8530 0x00070560
#define PHY_ID_VSC8531 0x00070570
#define PHY_ID_VSC8540 0x00070760
@@ -1742,6 +1780,386 @@ static int vsc8584_did_interrupt(struct phy_device *phydev)
return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK;
}
+static int vsc8514_config_pre_init(struct phy_device *phydev)
+{
+ /* These are the settings to override the silicon default
+ * values to handle hardware performance of PHY. They
+ * are set at Power-On state and remain until PHY Reset.
+ */
+ const struct reg_val pre_init1[] = {
+ {0x0f90, 0x00688980},
+ {0x0786, 0x00000003},
+ {0x07fa, 0x0050100f},
+ {0x0f82, 0x0012b002},
+ {0x1686, 0x00000004},
+ {0x168c, 0x00d2c46f},
+ {0x17a2, 0x00000620},
+ {0x16a0, 0x00eeffdd},
+ {0x16a6, 0x00071448},
+ {0x16a4, 0x0013132f},
+ {0x16a8, 0x00000000},
+ {0x0ffc, 0x00c0a028},
+ {0x0fe8, 0x0091b06c},
+ {0x0fea, 0x00041600},
+ {0x0f80, 0x00fffaff},
+ {0x0fec, 0x00901809},
+ {0x0ffe, 0x00b01007},
+ {0x16b0, 0x00eeff00},
+ {0x16b2, 0x00007000},
+ {0x16b4, 0x00000814},
+ };
+ unsigned int i;
+ u16 reg;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* all writes below are broadcasted to all PHYs in the same package */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg |= SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg |= BIT(15);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
+ vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg &= ~BIT(15);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg &= ~SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ return 0;
+}
+
+static u32 vsc85xx_csr_ctrl_phy_read(struct phy_device *phydev,
+ u32 target, u32 reg)
+{
+ unsigned long deadline;
+ u32 val, val_l, val_h;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+
+ /* CSR registers are grouped under different Target IDs.
+ * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+ * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+ * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+ * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
+ */
+
+ /* Setup the Target ID */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+ MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+
+ /* Trigger CSR Action - Read into the CSR's */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+ MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ |
+ MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+ MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
+
+ /* Wait for register access*/
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+ } while (time_before(jiffies, deadline) &&
+ !(val & MSCC_PHY_CSR_CNTL_19_CMD));
+
+ if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+ return 0xffffffff;
+
+ /* Read the Least Significant Word (LSW) (17) */
+ val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17);
+
+ /* Read the Most Significant Word (MSW) (18) */
+ val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+
+ return (val_h << 16) | val_l;
+}
+
+static int vsc85xx_csr_ctrl_phy_write(struct phy_device *phydev,
+ u32 target, u32 reg, u32 val)
+{
+ unsigned long deadline;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+
+ /* CSR registers are grouped under different Target IDs.
+ * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+ * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+ * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+ * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
+ */
+
+ /* Setup the Target ID */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+ MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+
+ /* Write the Least Significant Word (LSW) (17) */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val);
+
+ /* Write the Most Significant Word (MSW) (18) */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16));
+
+ /* Trigger CSR Action - Write into the CSR's */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+ MSCC_PHY_CSR_CNTL_19_CMD |
+ MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+ MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
+
+ /* Wait for register access */
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+ } while (time_before(jiffies, deadline) &&
+ !(val & MSCC_PHY_CSR_CNTL_19_CMD));
+
+ if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+ return -ETIMEDOUT;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+
+ return 0;
+}
+
+static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
+ u32 op)
+{
+ unsigned long deadline;
+ u32 val;
+ int ret;
+
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, reg,
+ op | (1 << mcb));
+ if (ret)
+ return -EINVAL;
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ val = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, reg);
+
+ if (val == 0xffffffff)
+ return -EIO;
+
+ } while (time_before(jiffies, deadline) && (val & op));
+
+ if (val & op)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* Trigger a read to the spcified MCB */
+static int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+{
+ return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
+}
+
+/* Trigger a write to the spcified MCB */
+static int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+{
+ return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
+}
+
+static int vsc8514_config_init(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ unsigned long deadline;
+ u16 val, addr;
+ int ret, i;
+ u32 reg;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+
+ __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+
+ addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4);
+ addr >>= PHY_CNTL_4_ADDR_POS;
+
+ val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
+
+ if (val & PHY_ADDR_REVERSED)
+ vsc8531->base_addr = phydev->mdio.addr + addr;
+ else
+ vsc8531->base_addr = phydev->mdio.addr - addr;
+
+ /* Some parts of the init sequence are identical for every PHY in the
+ * package. Some parts are modifying the GPIO register bank which is a
+ * set of registers that are affecting all PHYs, a few resetting the
+ * microprocessor common to all PHYs.
+ * All PHYs' interrupts mask register has to be zeroed before enabling
+ * any PHY's interrupt in this register.
+ * For all these reasons, we need to do the init sequence once and only
+ * once whatever is the first PHY in the package that is initialized and
+ * do the correct init sequence for all PHYs that are package-critical
+ * in this pre-init function.
+ */
+ if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0))
+ vsc8514_config_pre_init(phydev);
+
+ vsc8531->pkg_init = true;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+
+ val &= ~MAC_CFG_MASK;
+ val |= MAC_CFG_QSGMII;
+ ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+
+ if (ret)
+ goto err;
+
+ ret = vsc8584_cmd(phydev,
+ PROC_CMD_MCB_ACCESS_MAC_CONF |
+ PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC);
+ if (ret)
+ goto err;
+
+ /* 6g mcb */
+ phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ /* lcpll mcb */
+ phy_update_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+ /* pll5gcfg0 */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_PLL5G_CFG0, 0x7036f145);
+ if (ret)
+ goto err;
+
+ phy_commit_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+ /* pllcfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_PLL_CFG,
+ (3 << PHY_S6G_PLL_ENA_OFFS_POS) |
+ (120 << PHY_S6G_PLL_FSM_CTRL_DATA_POS)
+ | (0 << PHY_S6G_PLL_FSM_ENA_POS));
+ if (ret)
+ goto err;
+
+ /* commoncfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_COMMON_CFG,
+ (0 << PHY_S6G_SYS_RST_POS) |
+ (0 << PHY_S6G_ENA_LANE_POS) |
+ (0 << PHY_S6G_ENA_LOOP_POS) |
+ (0 << PHY_S6G_QRATE_POS) |
+ (3 << PHY_S6G_IF_MODE_POS));
+ if (ret)
+ goto err;
+
+ /* misccfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_MISC_CFG, 1);
+ if (ret)
+ goto err;
+
+ /* gpcfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_GPC_CFG, 768);
+ if (ret)
+ goto err;
+
+ phy_commit_mcb_s6g(phydev, PHY_S6G_DFT_CFG2, 0);
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
+ 0); /* read 6G MCB into CSRs */
+ reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
+ PHY_S6G_PLL_STATUS);
+ if (reg == 0xffffffff) {
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return -EIO;
+ }
+
+ } while (time_before(jiffies, deadline) && (reg & BIT(12)));
+
+ if (reg & BIT(12)) {
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return -ETIMEDOUT;
+ }
+
+ /* misccfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_MISC_CFG, 0);
+ if (ret)
+ goto err;
+
+ phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
+ 0); /* read 6G MCB into CSRs */
+ reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
+ PHY_S6G_IB_STATUS0);
+ if (reg == 0xffffffff) {
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return -EIO;
+ }
+
+ } while (time_before(jiffies, deadline) && !(reg & BIT(8)));
+
+ if (!(reg & BIT(8))) {
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ ret = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MSCC_PHY_EXT_PHY_CNTL_1, MEDIA_OP_MODE_MASK,
+ MEDIA_OP_MODE_COPPER);
+
+ if (ret)
+ return ret;
+
+ ret = genphy_soft_reset(phydev);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vsc8531->nleds; i++) {
+ ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+
+err:
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return ret;
+}
+
static int vsc85xx_ack_interrupt(struct phy_device *phydev)
{
int rc = 0;
@@ -1791,6 +2209,31 @@ static int vsc85xx_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
+static int vsc8514_probe(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531;
+ u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
+ VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
+ VSC8531_DUPLEX_COLLISION};
+
+ vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+ if (!vsc8531)
+ return -ENOMEM;
+
+ phydev->priv = vsc8531;
+
+ vsc8531->nleds = 4;
+ vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc85xx_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
+ vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+}
+
static int vsc8574_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
@@ -1879,10 +2322,33 @@ static int vsc85xx_probe(struct phy_device *phydev)
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8514,
+ .name = "Microsemi GE VSC8514 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8514_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8514_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8530,
.name = "Microsemi FE VSC8530",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1907,7 +2373,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8531,
.name = "Microsemi VSC8531",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1932,7 +2398,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8540,
.name = "Microsemi FE VSC8540 SyncE",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1957,7 +2423,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8541,
.name = "Microsemi VSC8541 SyncE",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1982,7 +2448,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8574,
.name = "Microsemi GE VSC8574 SyncE",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -2008,7 +2474,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8584,
.name = "Microsemi GE VSC8584 SyncE",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -2034,6 +2500,7 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+ { PHY_ID_VSC8514, 0xfffffff0, },
{ PHY_ID_VSC8530, 0xfffffff0, },
{ PHY_ID_VSC8531, 0xfffffff0, },
{ PHY_ID_VSC8540, 0xfffffff0, },
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 42282a86b680..a221dd552c3c 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -128,7 +128,7 @@ static struct phy_driver dp83865_driver[] = { {
.phy_id = DP83865_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83865",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = ns_config_init,
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 9e24d9569424..abe13dfe50ad 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -262,12 +262,30 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
{
int val;
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (!(val & MDIO_AN_STAT1_COMPLETE)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->lp_advertising);
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, 0);
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ return 0;
+ }
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising,
+ val & MDIO_AN_STAT1_LPABLE);
+
/* Read the link partner's base page advertisement */
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
if (val < 0)
return val;
- mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+ mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, val);
phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
@@ -498,21 +516,10 @@ int gen10g_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(gen10g_config_aneg);
-static int gen10g_read_status(struct phy_device *phydev)
-{
- /* For now just lie and say it's 10G all the time */
- phydev->speed = SPEED_10000;
- phydev->duplex = DUPLEX_FULL;
-
- return genphy_c45_read_link(phydev);
-}
-
-struct phy_driver genphy_10g_driver = {
+struct phy_driver genphy_c45_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
- .name = "Generic 10G PHY",
+ .name = "Generic Clause 45 PHY",
.soft_reset = genphy_no_soft_reset,
- .features = PHY_10GBIT_FEATURES,
- .config_aneg = gen10g_config_aneg,
- .read_status = gen10g_read_status,
+ .read_status = genphy_c45_read_status,
};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 5016cd5fd7c7..3daf0214a242 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -8,6 +8,11 @@
const char *phy_speed_to_str(int speed)
{
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 67,
+ "Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
+ "If a speed or mode has been added please update phy_speed_to_str "
+ "and the PHY settings array.\n");
+
switch (speed) {
case SPEED_10:
return "10Mbps";
@@ -35,6 +40,8 @@ const char *phy_speed_to_str(int speed)
return "56Gbps";
case SPEED_100000:
return "100Gbps";
+ case SPEED_200000:
+ return "200Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
@@ -58,222 +65,81 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
/* A mapping of all SUPPORTED settings to speed/duplex. This table
* must be grouped by speed and sorted in descending match priority
* - iow, descending speed. */
+
+#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \
+ .bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
+
static const struct phy_setting settings[] = {
+ /* 200G */
+ PHY_SETTING( 200000, FULL, 200000baseCR4_Full ),
+ PHY_SETTING( 200000, FULL, 200000baseKR4_Full ),
+ PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full ),
+ PHY_SETTING( 200000, FULL, 200000baseDR4_Full ),
+ PHY_SETTING( 200000, FULL, 200000baseSR4_Full ),
/* 100G */
- {
- .speed = SPEED_100000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- },
- {
- .speed = SPEED_100000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- },
- {
- .speed = SPEED_100000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- },
- {
- .speed = SPEED_100000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- },
+ PHY_SETTING( 100000, FULL, 100000baseCR4_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseKR4_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseLR4_ER4_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseSR4_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseCR2_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseKR2_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseDR2_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseSR2_Full ),
/* 56G */
- {
- .speed = SPEED_56000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
- },
- {
- .speed = SPEED_56000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
- },
- {
- .speed = SPEED_56000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
- },
- {
- .speed = SPEED_56000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
- },
+ PHY_SETTING( 56000, FULL, 56000baseCR4_Full ),
+ PHY_SETTING( 56000, FULL, 56000baseKR4_Full ),
+ PHY_SETTING( 56000, FULL, 56000baseLR4_Full ),
+ PHY_SETTING( 56000, FULL, 56000baseSR4_Full ),
/* 50G */
- {
- .speed = SPEED_50000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- },
- {
- .speed = SPEED_50000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- },
- {
- .speed = SPEED_50000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- },
+ PHY_SETTING( 50000, FULL, 50000baseCR2_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseKR2_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseSR2_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseCR_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseKR_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseLR_ER_FR_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseDR_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseSR_Full ),
/* 40G */
- {
- .speed = SPEED_40000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- },
- {
- .speed = SPEED_40000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- },
- {
- .speed = SPEED_40000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- },
- {
- .speed = SPEED_40000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- },
+ PHY_SETTING( 40000, FULL, 40000baseCR4_Full ),
+ PHY_SETTING( 40000, FULL, 40000baseKR4_Full ),
+ PHY_SETTING( 40000, FULL, 40000baseLR4_Full ),
+ PHY_SETTING( 40000, FULL, 40000baseSR4_Full ),
/* 25G */
- {
- .speed = SPEED_25000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- },
- {
- .speed = SPEED_25000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- },
- {
- .speed = SPEED_25000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- },
-
+ PHY_SETTING( 25000, FULL, 25000baseCR_Full ),
+ PHY_SETTING( 25000, FULL, 25000baseKR_Full ),
+ PHY_SETTING( 25000, FULL, 25000baseSR_Full ),
/* 20G */
- {
- .speed = SPEED_20000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
- },
- {
- .speed = SPEED_20000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
- },
+ PHY_SETTING( 20000, FULL, 20000baseKR2_Full ),
+ PHY_SETTING( 20000, FULL, 20000baseMLD2_Full ),
/* 10G */
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- },
+ PHY_SETTING( 10000, FULL, 10000baseCR_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseER_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseKR_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseKX4_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseLR_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseLRM_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseR_FEC ),
+ PHY_SETTING( 10000, FULL, 10000baseSR_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseT_Full ),
/* 5G */
- {
- .speed = SPEED_5000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- },
-
+ PHY_SETTING( 5000, FULL, 5000baseT_Full ),
/* 2.5G */
- {
- .speed = SPEED_2500,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- },
- {
- .speed = SPEED_2500,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
- },
+ PHY_SETTING( 2500, FULL, 2500baseT_Full ),
+ PHY_SETTING( 2500, FULL, 2500baseX_Full ),
/* 1G */
- {
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- },
- {
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- },
- {
- .speed = SPEED_1000,
- .duplex = DUPLEX_HALF,
- .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- },
- {
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- },
+ PHY_SETTING( 1000, FULL, 1000baseKX_Full ),
+ PHY_SETTING( 1000, FULL, 1000baseT_Full ),
+ PHY_SETTING( 1000, HALF, 1000baseT_Half ),
+ PHY_SETTING( 1000, FULL, 1000baseX_Full ),
/* 100M */
- {
- .speed = SPEED_100,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- },
- {
- .speed = SPEED_100,
- .duplex = DUPLEX_HALF,
- .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- },
+ PHY_SETTING( 100, FULL, 100baseT_Full ),
+ PHY_SETTING( 100, HALF, 100baseT_Half ),
/* 10M */
- {
- .speed = SPEED_10,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- },
- {
- .speed = SPEED_10,
- .duplex = DUPLEX_HALF,
- .bit = ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- },
+ PHY_SETTING( 10, FULL, 10baseT_Full ),
+ PHY_SETTING( 10, HALF, 10baseT_Half ),
};
+#undef PHY_SETTING
/**
* phy_lookup_setting - lookup a PHY setting
@@ -362,7 +228,7 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
if (err)
return err;
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_advertise_supported(phydev);
return 0;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3745220c5c98..e8885429293a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -43,7 +43,6 @@ static const char *phy_state_to_str(enum phy_state st)
PHY_STATE_STR(NOLINK)
PHY_STATE_STR(FORCING)
PHY_STATE_STR(HALTED)
- PHY_STATE_STR(RESUMING)
}
return NULL;
@@ -61,6 +60,32 @@ static void phy_link_down(struct phy_device *phydev, bool do_carrier)
phy_led_trigger_change_speed(phydev);
}
+static const char *phy_pause_str(struct phy_device *phydev)
+{
+ bool local_pause, local_asym_pause;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ goto no_pause;
+
+ local_pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ local_asym_pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
+ if (local_pause && phydev->pause)
+ return "rx/tx";
+
+ if (local_asym_pause && phydev->asym_pause) {
+ if (local_pause)
+ return "rx";
+ if (phydev->pause)
+ return "tx";
+ }
+
+no_pause:
+ return "off";
+}
+
/**
* phy_print_status - Convenience function to print out the current phy status
* @phydev: the phy_device struct
@@ -72,7 +97,7 @@ void phy_print_status(struct phy_device *phydev)
"Link is Up - %s/%s - flow control %s\n",
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
- phydev->pause ? "rx/tx" : "off");
+ phy_pause_str(phydev));
} else {
netdev_info(phydev->attached_dev, "Link is Down\n");
}
@@ -214,10 +239,6 @@ static void phy_sanitize_settings(struct phy_device *phydev)
{
const struct phy_setting *setting;
- /* Sanitize settings based on PHY capabilities */
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported))
- phydev->autoneg = AUTONEG_DISABLE;
-
setting = phy_find_valid(phydev->speed, phydev->duplex,
phydev->supported);
if (setting) {
@@ -863,10 +884,7 @@ void phy_start(struct phy_device *phydev)
goto out;
}
- if (phydev->state == PHY_READY)
- phydev->state = PHY_UP;
- else
- phydev->state = PHY_RESUMING;
+ phydev->state = PHY_UP;
phy_start_machine(phydev);
out:
@@ -891,9 +909,6 @@ void phy_state_machine(struct work_struct *work)
old_state = phydev->state;
- if (phydev->drv && phydev->drv->link_change_notify)
- phydev->drv->link_change_notify(phydev);
-
switch (phydev->state) {
case PHY_DOWN:
case PHY_READY:
@@ -904,7 +919,6 @@ void phy_state_machine(struct work_struct *work)
break;
case PHY_NOLINK:
case PHY_RUNNING:
- case PHY_RESUMING:
err = phy_check_link_status(phydev);
break;
case PHY_FORCING:
@@ -940,10 +954,13 @@ void phy_state_machine(struct work_struct *work)
if (err < 0)
phy_error(phydev);
- if (old_state != phydev->state)
+ if (old_state != phydev->state) {
phydev_dbg(phydev, "PHY state change %s -> %s\n",
phy_state_to_str(old_state),
phy_state_to_str(phydev->state));
+ if (phydev->drv && phydev->drv->link_change_notify)
+ phydev->drv->link_change_notify(phydev);
+ }
/* Only re-schedule a PHY state machine change if we are polling the
* PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 77068c545de0..dcc93a873174 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -225,7 +225,7 @@ static void phy_mdio_device_remove(struct mdio_device *mdiodev)
}
static struct phy_driver genphy_driver;
-extern struct phy_driver genphy_10g_driver;
+extern struct phy_driver genphy_c45_driver;
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
@@ -1174,7 +1174,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
*/
if (!d->driver) {
if (phydev->is_c45)
- d->driver = &genphy_10g_driver.mdiodrv.driver;
+ d->driver = &genphy_c45_driver.mdiodrv.driver;
else
d->driver = &genphy_driver.mdiodrv.driver;
@@ -1335,7 +1335,7 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy);
bool phy_driver_is_genphy_10g(struct phy_device *phydev)
{
return phy_driver_is_genphy_kind(phydev,
- &genphy_10g_driver.mdiodrv.driver);
+ &genphy_c45_driver.mdiodrv.driver);
}
EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
@@ -1710,23 +1710,19 @@ int genphy_update_link(struct phy_device *phydev)
*/
if (!phy_polling_mode(phydev)) {
status = phy_read(phydev, MII_BMSR);
- if (status < 0) {
+ if (status < 0)
return status;
- } else if (status & BMSR_LSTATUS) {
- phydev->link = 1;
- return 0;
- }
+ else if (status & BMSR_LSTATUS)
+ goto done;
}
/* Read link and autonegotiation status */
status = phy_read(phydev, MII_BMSR);
if (status < 0)
return status;
-
- if ((status & BMSR_LSTATUS) == 0)
- phydev->link = 0;
- else
- phydev->link = 1;
+done:
+ phydev->link = status & BMSR_LSTATUS ? 1 : 0;
+ phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0;
return 0;
}
@@ -1743,23 +1739,26 @@ EXPORT_SYMBOL(genphy_update_link);
*/
int genphy_read_status(struct phy_device *phydev)
{
- int adv;
- int err;
- int lpa;
- int lpagb = 0;
+ int adv, lpa, lpagb, err, old_link = phydev->link;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
linkmode_zero(phydev->lp_advertising);
- if (AUTONEG_ENABLE == phydev->autoneg) {
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phydev->supported) ||
- linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->supported)) {
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ if (phydev->is_gigabit_capable) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
return lpagb;
@@ -1785,14 +1784,8 @@ int genphy_read_status(struct phy_device *phydev)
return lpa;
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
phy_resolve_aneg_linkmode(phydev);
- } else {
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
int bmcr = phy_read(phydev, MII_BMCR);
if (bmcr < 0)
@@ -1809,9 +1802,6 @@ int genphy_read_status(struct phy_device *phydev)
phydev->speed = SPEED_100;
else
phydev->speed = SPEED_10;
-
- phydev->pause = 0;
- phydev->asym_pause = 0;
}
return 0;
@@ -1829,13 +1819,25 @@ EXPORT_SYMBOL(genphy_read_status);
*/
int genphy_soft_reset(struct phy_device *phydev)
{
+ u16 res = BMCR_RESET;
int ret;
- ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ res |= BMCR_ANRESTART;
+
+ ret = phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, res);
if (ret < 0)
return ret;
- return phy_poll_reset(phydev);
+ ret = phy_poll_reset(phydev);
+ if (ret)
+ return ret;
+
+ /* BMCR may be reset to defaults */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ ret = genphy_setup_forced(phydev);
+
+ return ret;
}
EXPORT_SYMBOL(genphy_soft_reset);
@@ -1887,6 +1889,54 @@ int genphy_config_init(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_init);
+/**
+ * genphy_read_abilities - read PHY abilities from Clause 22 registers
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the PHY's abilities and populates
+ * phydev->supported accordingly.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int genphy_read_abilities(struct phy_device *phydev)
+{
+ int val;
+
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
+
+ val = phy_read(phydev, MII_BMSR);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported,
+ val & BMSR_ANEGCAPABLE);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phydev->supported,
+ val & BMSR_100FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phydev->supported,
+ val & BMSR_100HALF);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, phydev->supported,
+ val & BMSR_10FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, phydev->supported,
+ val & BMSR_10HALF);
+
+ if (val & BMSR_ESTATEN) {
+ val = phy_read(phydev, MII_ESTATUS);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported, val & ESTATUS_1000_TFULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported, val & ESTATUS_1000_THALF);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_read_abilities);
+
/* This is used for the phy device which doesn't support the MMD extended
* register access, but it does have side effect when we are trying to access
* the MMD register via indirect method.
@@ -1935,10 +1985,35 @@ EXPORT_SYMBOL(genphy_loopback);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode)
{
linkmode_clear_bit(link_mode, phydev->supported);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_advertise_supported(phydev);
}
EXPORT_SYMBOL(phy_remove_link_mode);
+static void phy_copy_pause_bits(unsigned long *dst, unsigned long *src)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dst,
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, src));
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, dst,
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, src));
+}
+
+/**
+ * phy_advertise_supported - Advertise all supported modes
+ * @phydev: target phy_device struct
+ *
+ * Description: Called to advertise all supported modes, doesn't touch
+ * pause mode advertising.
+ */
+void phy_advertise_supported(struct phy_device *phydev)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(new);
+
+ linkmode_copy(new, phydev->supported);
+ phy_copy_pause_bits(new, phydev->advertising);
+ linkmode_copy(phydev->advertising, new);
+}
+EXPORT_SYMBOL(phy_advertise_supported);
+
/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
@@ -1949,8 +2024,7 @@ EXPORT_SYMBOL(phy_remove_link_mode);
void phy_support_sym_pause(struct phy_device *phydev)
{
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_copy_pause_bits(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_sym_pause);
@@ -1962,9 +2036,7 @@ EXPORT_SYMBOL(phy_support_sym_pause);
*/
void phy_support_asym_pause(struct phy_device *phydev)
{
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_copy_pause_bits(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_asym_pause);
@@ -2044,11 +2116,14 @@ bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp)
{
if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->supported) ||
- (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->supported) &&
- pp->rx_pause != pp->tx_pause))
+ phydev->supported) && pp->rx_pause)
+ return false;
+
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported) &&
+ pp->rx_pause != pp->tx_pause)
return false;
+
return true;
}
EXPORT_SYMBOL(phy_validate_pause);
@@ -2104,14 +2179,30 @@ static int phy_probe(struct device *dev)
*/
if (phydrv->features) {
linkmode_copy(phydev->supported, phydrv->features);
- } else {
+ } else if (phydrv->get_features) {
err = phydrv->get_features(phydev);
- if (err)
- goto out;
+ } else if (phydev->is_c45) {
+ err = genphy_c45_pma_read_abilities(phydev);
+ } else {
+ err = genphy_read_abilities(phydev);
}
+ if (err)
+ goto out;
+
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported))
+ phydev->autoneg = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported))
+ phydev->is_gigabit_capable = 1;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported))
+ phydev->is_gigabit_capable = 1;
+
of_set_phy_supported(phydev);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_advertise_supported(phydev);
/* Get the EEE modes we want to prohibit. We will ask
* the PHY stop advertising these mode later on
@@ -2177,11 +2268,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
int retval;
/* Either the features are hard coded, or dynamically
- * determine. It cannot be both or neither
+ * determined. It cannot be both.
*/
- if (WARN_ON((!new_driver->features && !new_driver->get_features) ||
- (new_driver->features && new_driver->get_features))) {
- pr_err("%s: Driver features are missing\n", new_driver->name);
+ if (WARN_ON(new_driver->features && new_driver->get_features)) {
+ pr_err("%s: features and get_features must not both be set\n",
+ new_driver->name);
return -EINVAL;
}
@@ -2243,8 +2334,7 @@ static struct phy_driver genphy_driver = {
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
.soft_reset = genphy_no_soft_reset,
- .config_init = genphy_config_init,
- .features = PHY_GBIT_ALL_PORTS_FEATURES,
+ .get_features = genphy_read_abilities,
.aneg_done = genphy_aneg_done,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -2261,14 +2351,14 @@ static int __init phy_init(void)
features_init();
- rc = phy_driver_register(&genphy_10g_driver, THIS_MODULE);
+ rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
if (rc)
- goto err_10g;
+ goto err_c45;
rc = phy_driver_register(&genphy_driver, THIS_MODULE);
if (rc) {
- phy_driver_unregister(&genphy_10g_driver);
-err_10g:
+ phy_driver_unregister(&genphy_c45_driver);
+err_c45:
mdio_bus_exit();
}
@@ -2277,7 +2367,7 @@ err_10g:
static void __exit phy_exit(void)
{
- phy_driver_unregister(&genphy_10g_driver);
+ phy_driver_unregister(&genphy_c45_driver);
phy_driver_unregister(&genphy_driver);
mdio_bus_exit();
}
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 5486f6fb2ab2..1b15a991ee06 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -110,7 +110,7 @@ static struct phy_driver qs6612_driver[] = { {
.phy_id = 0x00181440,
.name = "QS6612",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = qs6612_config_init,
.ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 10df52ccddfe..a669945eb829 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -23,11 +23,15 @@
#define RTL821x_INSR 0x13
+#define RTL821x_EXT_PAGE_SELECT 0x1e
#define RTL821x_PAGE_SELECT 0x1f
#define RTL8211F_INSR 0x1d
#define RTL8211F_TX_DELAY BIT(8)
+#define RTL8211E_TX_DELAY BIT(1)
+#define RTL8211E_RX_DELAY BIT(2)
+#define RTL8211E_MODE_MII_GMII BIT(3)
#define RTL8201F_ISR 0x1e
#define RTL8201F_IER 0x13
@@ -151,29 +155,79 @@ static int rtl8211_config_aneg(struct phy_device *phydev)
static int rtl8211c_config_init(struct phy_device *phydev)
{
/* RTL8211C has an issue when operating in Gigabit slave mode */
- phy_set_bits(phydev, MII_CTRL1000,
- CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
-
- return genphy_config_init(phydev);
+ return phy_set_bits(phydev, MII_CTRL1000,
+ CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
}
static int rtl8211f_config_init(struct phy_device *phydev)
{
- int ret;
- u16 val = 0;
-
- ret = genphy_config_init(phydev);
- if (ret < 0)
- return ret;
+ u16 val;
- /* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ /* enable TX-delay for rgmii-{id,txid}, and disable it for rgmii and
+ * rgmii-rxid. The RX-delay can be enabled by the external RXDLY pin.
+ */
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val = RTL8211F_TX_DELAY;
+ break;
+ default: /* the rest of the modes imply leaving delay as is. */
+ return 0;
+ }
return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
}
+static int rtl8211e_config_init(struct phy_device *phydev)
+{
+ int ret = 0, oldpage;
+ u16 val;
+
+ /* enable TX/RX delay for rgmii-* modes, and disable them for rgmii. */
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val = RTL8211E_TX_DELAY | RTL8211E_RX_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = RTL8211E_RX_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = RTL8211E_TX_DELAY;
+ break;
+ default: /* the rest of the modes imply leaving delays as is. */
+ return 0;
+ }
+
+ /* According to a sample driver there is a 0x1c config register on the
+ * 0xa4 extension page (0x7) layout. It can be used to disable/enable
+ * the RX/TX delays otherwise controlled by RXDLY/TXDLY pins. It can
+ * also be used to customize the whole configuration register:
+ * 8:6 = PHY Address, 5:4 = Auto-Negotiation, 3 = Interface Mode Select,
+ * 2 = RX Delay, 1 = TX Delay, 0 = SELRGV (see original PHY datasheet
+ * for details).
+ */
+ oldpage = phy_select_page(phydev, 0x7);
+ if (oldpage < 0)
+ goto err_restore_page;
+
+ ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4);
+ if (ret)
+ goto err_restore_page;
+
+ ret = __phy_modify(phydev, 0x1c, RTL8211E_TX_DELAY | RTL8211E_RX_DELAY,
+ val);
+
+err_restore_page:
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
static int rtl8211b_suspend(struct phy_device *phydev)
{
phy_write(phydev, MII_MMD_DATA, BIT(9));
@@ -192,10 +246,6 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
{
int ret;
- ret = genphy_config_init(phydev);
- if (ret < 0)
- return ret;
-
ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE,
RTL8366RB_POWER_SAVE_ON);
if (ret) {
@@ -210,11 +260,9 @@ static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
.name = "RTL8201CP Ethernet",
- .features = PHY_BASIC_FEATURES,
}, {
PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
- .features = PHY_BASIC_FEATURES,
.ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
.suspend = genphy_suspend,
@@ -224,47 +272,52 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc910),
.name = "RTL8211 Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_aneg = rtl8211_config_aneg,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
.suspend = rtl8211b_suspend,
.resume = rtl8211b_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc913),
.name = "RTL8211C Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_init = rtl8211c_config_init,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
+ .config_init = &rtl8211e_config_init,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_init = &rtl8211f_config_init,
.ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
@@ -275,8 +328,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc800),
.name = "Generic Realtek PHY",
- .features = PHY_GBIT_FEATURES,
- .config_init = genphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -284,7 +335,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_init = &rtl8366rb_config_init,
/* These interrupts are handled by the irq controller
* embedded inside the RTL8366RB, they get unmasked when the
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index 95abf7072f32..52f1f65320fe 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -104,41 +104,14 @@ static int rockchip_integrated_phy_config_init(struct phy_device *phydev)
static void rockchip_link_change_notify(struct phy_device *phydev)
{
- int speed = SPEED_10;
-
- if (phydev->autoneg == AUTONEG_ENABLE) {
- int reg = phy_read(phydev, MII_SPECIAL_CONTROL_STATUS);
-
- if (reg < 0) {
- phydev_err(phydev, "phy_read err: %d.\n", reg);
- return;
- }
-
- if (reg & MII_SPEED_100)
- speed = SPEED_100;
- else if (reg & MII_SPEED_10)
- speed = SPEED_10;
- } else {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0) {
- phydev_err(phydev, "phy_read err: %d.\n", bmcr);
- return;
- }
-
- if (bmcr & BMCR_SPEED100)
- speed = SPEED_100;
- else
- speed = SPEED_10;
- }
-
/*
* If mode switch happens from 10BT to 100BT, all DSP/AFE
* registers are set to default values. So any AFE/DSP
* registers have to be re-initialized in this case.
*/
- if ((phydev->speed == SPEED_10) && (speed == SPEED_100)) {
+ if (phydev->state == PHY_RUNNING && phydev->speed == SPEED_100) {
int ret = rockchip_integrated_phy_analog_init(phydev);
+
if (ret)
phydev_err(phydev, "rockchip_integrated_phy_analog_init err: %d.\n",
ret);
@@ -202,7 +175,7 @@ static struct phy_driver rockchip_phy_driver[] = {
.phy_id = INTERNAL_EPHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "Rockchip integrated EPHY",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = 0,
.link_change_notify = rockchip_link_change_notify,
.soft_reset = genphy_soft_reset,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c94d3bfbc772..dc3d92d340c4 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -214,7 +214,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN83C185",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
@@ -233,7 +233,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8187",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
@@ -257,7 +257,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
@@ -282,7 +282,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN911x Internal PHY",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
@@ -300,7 +300,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
@@ -326,7 +326,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8740",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 92b64e254b44..7475cef17cf7 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -159,6 +159,14 @@ static const struct spi_device_id ks8995_id[] = {
};
MODULE_DEVICE_TABLE(spi, ks8995_id);
+static const struct of_device_id ks8895_spi_of_match[] = {
+ { .compatible = "micrel,ks8995" },
+ { .compatible = "micrel,ksz8864" },
+ { .compatible = "micrel,ksz8795" },
+ { },
+ };
+MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
+
static inline u8 get_chip_id(u8 val)
{
return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
@@ -526,6 +534,7 @@ static int ks8995_remove(struct spi_device *spi)
static struct spi_driver ks8995_driver = {
.driver = {
.name = "spi-ks8995",
+ .of_match_table = of_match_ptr(ks8895_spi_of_match),
},
.probe = ks8995_probe,
.remove = ks8995_remove,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 5b6acf431f98..d735a01380ed 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -81,7 +81,7 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id = STE101P_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "STe101p",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
@@ -91,7 +91,7 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id = STE100P_PHY_ID,
.phy_id_mask = 0xffffffff,
.name = "STe100p",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 219fc7cdc2b3..a32b3fd8a370 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -87,7 +87,7 @@ static struct phy_driver upd60620_driver[1] = { {
.phy_id = UPD60620_PHY_ID,
.phy_id_mask = 0xfffffffe,
.name = "Renesas uPD60620",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = 0,
.config_init = upd60620_config_init,
.read_status = upd60620_read_status,
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index dc0dd87a6694..43691b1acfd9 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -61,7 +61,6 @@
#define PHY_ID_VSC8234 0x000fc620
#define PHY_ID_VSC8244 0x000fc6c0
-#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8572 0x000704d0
#define PHY_ID_VSC8601 0x00070420
#define PHY_ID_VSC7385 0x00070450
@@ -293,7 +292,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
err = phy_write(phydev, MII_VSC8244_IMASK,
(phydev->drv->phy_id == PHY_ID_VSC8234 ||
phydev->drv->phy_id == PHY_ID_VSC8244 ||
- phydev->drv->phy_id == PHY_ID_VSC8514 ||
phydev->drv->phy_id == PHY_ID_VSC8572 ||
phydev->drv->phy_id == PHY_ID_VSC8601) ?
MII_VSC8244_IMASK_MASK :
@@ -389,7 +387,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8234,
.name = "Vitesse VSC8234",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -398,16 +396,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8244,
.name = "Vitesse VSC8244",
.phy_id_mask = 0x000fffc0,
- .features = PHY_GBIT_FEATURES,
- .config_init = &vsc824x_config_init,
- .config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
- .config_intr = &vsc82xx_config_intr,
-}, {
- .phy_id = PHY_ID_VSC8514,
- .name = "Vitesse VSC8514",
- .phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -416,7 +405,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8572,
.name = "Vitesse VSC8572",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -425,7 +414,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8601,
.name = "Vitesse VSC8601",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc8601_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -433,7 +422,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC7385,
.name = "Vitesse VSC7385",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
@@ -442,7 +431,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC7388,
.name = "Vitesse VSC7388",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
@@ -451,7 +440,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC7395,
.name = "Vitesse VSC7395",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
@@ -460,7 +449,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC7398,
.name = "Vitesse VSC7398",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
@@ -469,7 +458,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8662,
.name = "Vitesse VSC8662",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -479,7 +468,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8221,
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8221",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -488,7 +477,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8211,
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8211",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -499,7 +488,6 @@ module_phy_driver(vsc82xx_driver);
static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
{ PHY_ID_VSC8244, 0x000fffc0 },
- { PHY_ID_VSC8514, 0x000ffff0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
{ PHY_ID_VSC7385, 0x000ffff0 },
{ PHY_ID_VSC7388, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 7ccdc62c6052..ff61dd8748de 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -222,7 +222,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out_free;
}
state->sha1->tfm = shash;
- state->sha1->flags = 0;
digestsize = crypto_shash_digestsize(shash);
if (digestsize < MPPE_MAX_KEY_LEN)
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 941cfa8f1c2a..627b3a4405ad 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -316,7 +316,7 @@ static int
card_send_command(const int ioaddr[], const char* name,
const unsigned char out[], unsigned char in[])
{
- int status, x;
+ int status;
if ((status = card_wait_for_busy_clear(ioaddr, name)))
return status;
@@ -345,9 +345,7 @@ card_send_command(const int ioaddr[], const char* name,
out[0], out[1], out[2], out[3], out[4], out[5]);
}
- if (out[1] == 0x1b) {
- x = (out[2] == 0x02);
- } else {
+ if (out[1] != 0x1b) {
if (out[0] >= 0x80 && in[0] != (out[1] | 0x80))
return -EIO;
}
@@ -490,14 +488,13 @@ sb1000_check_CRC(const int ioaddr[], const char* name)
static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
- int crc, status;
+ int status;
/* check CRC */
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
if (st[1] != st[3] || st[2] != st[4])
return -EIO;
- crc = st[1] << 8 | st[2];
return 0;
}
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index f4e93f5fc204..ea90db3c7705 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -153,7 +153,7 @@ out_fail:
void
slhc_free(struct slcompress *comp)
{
- if ( comp == NULLSLCOMPR )
+ if ( IS_ERR_OR_NULL(comp) )
return;
if ( comp->tstate != NULLSLSTATE )
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6ed96fdfd96d..2106045b3e16 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -38,13 +38,11 @@
* Helpers
**********/
-#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
-
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
{
struct team_port *port = rtnl_dereference(dev->rx_handler_data);
- return team_port_exists(dev) ? port : NULL;
+ return netif_is_team_port(dev) ? port : NULL;
}
/*
@@ -1143,7 +1141,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EINVAL;
}
- if (team_port_exists(port_dev)) {
+ if (netif_is_team_port(port_dev)) {
NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
netdev_err(dev, "Device %s is already a port "
"of a team device\n", portname);
@@ -1156,6 +1154,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EINVAL;
}
+ if (netdev_has_upper_dev(dev, port_dev)) {
+ NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
+ netdev_err(dev, "Device %s is already an upper device of the team interface\n",
+ portname);
+ return -EBUSY;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
@@ -1246,6 +1251,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
goto err_option_port_add;
}
+ /* set promiscuity level to new slave */
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(port_dev, 1);
+ if (err)
+ goto err_set_slave_promisc;
+ }
+
+ /* set allmulti level to new slave */
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(port_dev, 1);
+ if (err) {
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(port_dev, -1);
+ goto err_set_slave_promisc;
+ }
+ }
+
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
@@ -1262,6 +1284,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return 0;
+err_set_slave_promisc:
+ __team_option_inst_del_port(team, port);
+
err_option_port_add:
team_upper_dev_unlink(team, port);
@@ -1307,6 +1332,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
team_port_disable(team, port);
list_del_rcu(&port->list);
+
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(port_dev, -1);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(port_dev, -1);
+
team_upper_dev_unlink(team, port);
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
@@ -1691,8 +1722,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -2260,7 +2290,7 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
if (err)
return err;
- option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+ option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
if (!option_item)
return -EMSGSIZE;
@@ -2374,7 +2404,7 @@ start_again:
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
- option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
+ option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
if (!option_list)
goto nla_put_failure;
@@ -2480,9 +2510,11 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
err = -EINVAL;
goto team_put;
}
- err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
- nl_option, team_nl_option_policy,
- info->extack);
+ err = nla_parse_nested_deprecated(opt_attrs,
+ TEAM_ATTR_OPTION_MAX,
+ nl_option,
+ team_nl_option_policy,
+ info->extack);
if (err)
goto team_put;
if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
@@ -2596,7 +2628,7 @@ static int team_nl_fill_one_port_get(struct sk_buff *skb,
{
struct nlattr *port_item;
- port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+ port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
if (!port_item)
goto nest_cancel;
if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
@@ -2651,7 +2683,7 @@ start_again:
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
- port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
+ port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
if (!port_list)
goto nla_put_failure;
@@ -2725,25 +2757,25 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
static const struct genl_ops team_nl_ops[] = {
{
.cmd = TEAM_CMD_NOOP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = team_nl_cmd_noop,
- .policy = team_nl_policy,
},
{
.cmd = TEAM_CMD_OPTIONS_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = team_nl_cmd_options_set,
- .policy = team_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = TEAM_CMD_OPTIONS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = team_nl_cmd_options_get,
- .policy = team_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = TEAM_CMD_PORT_LIST_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = team_nl_cmd_port_list_get,
- .policy = team_nl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -2756,6 +2788,7 @@ static struct genl_family team_nl_family __ro_after_init = {
.name = TEAM_GENL_NAME,
.version = TEAM_GENL_VERSION,
.maxattr = TEAM_ATTR_MAX,
+ .policy = team_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = team_nl_ops,
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index c48c3a1eb1f8..fcf31335a8b6 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1282,6 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
tbnet_tear_down(net, true);
}
+ tb_unregister_protocol_handler(&net->handler);
return 0;
}
@@ -1290,6 +1291,8 @@ static int __maybe_unused tbnet_resume(struct device *dev)
struct tb_service *svc = tb_to_service(dev);
struct tbnet *net = tb_service_get_drvdata(svc);
+ tb_register_protocol_handler(&net->handler);
+
netif_carrier_off(net->dev);
if (netif_running(net->dev)) {
netif_device_attach(net->dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e9ca1c088d0b..abae165dcca5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -596,18 +596,22 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
struct tun_prog *prog;
+ u32 numqueues;
u16 ret = 0;
+ numqueues = READ_ONCE(tun->numqueues);
+ if (!numqueues)
+ return 0;
+
prog = rcu_dereference(tun->steering_prog);
if (prog)
ret = bpf_prog_run_clear_cb(prog->prog, skb);
- return ret % tun->numqueues;
+ return ret % numqueues;
}
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct tun_struct *tun = netdev_priv(dev);
u16 ret;
@@ -700,6 +704,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
+ rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
+ NULL);
--tun->numqueues;
if (clean) {
@@ -1043,7 +1049,7 @@ static int tun_net_close(struct net_device *dev)
static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
+ if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
@@ -1082,7 +1088,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
tfile = rcu_dereference(tun->tfiles[txq]);
/* Drop packet if interface is not attached */
- if (txq >= tun->numqueues)
+ if (!tfile)
goto drop;
if (!rcu_dereference(tun->steering_prog))
@@ -1305,6 +1311,7 @@ static int tun_xdp_xmit(struct net_device *dev, int n,
rcu_read_lock();
+resample:
numqueues = READ_ONCE(tun->numqueues);
if (!numqueues) {
rcu_read_unlock();
@@ -1313,6 +1320,8 @@ static int tun_xdp_xmit(struct net_device *dev, int n,
tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
numqueues]);
+ if (unlikely(!tfile))
+ goto resample;
spin_lock(&tfile->tx_ring.producer_lock);
for (i = 0; i < n; i++) {
@@ -1966,7 +1975,8 @@ drop:
if (frags) {
/* Exercise flow dissector code path. */
- u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
+ u32 headlen = eth_get_headlen(tun->dev, skb->data,
+ skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
@@ -2873,8 +2883,7 @@ err_free_dev:
return err;
}
-static void tun_get_iff(struct net *net, struct tun_struct *tun,
- struct ifreq *ifr)
+static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
{
tun_debug(KERN_INFO, tun, "tun_get_iff\n");
@@ -3103,10 +3112,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
+ net = dev_net(tun->dev);
ret = 0;
switch (cmd) {
case TUNGETIFF:
- tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ tun_get_iff(tun, &ifr);
if (tfile->detached)
ifr.ifr_flags |= IFF_DETACH_QUEUE;
@@ -3328,6 +3338,13 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = tun_net_change_carrier(tun->dev, (bool)carrier);
break;
+ case TUNGETDEVNETNS:
+ ret = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ goto unlock;
+ ret = open_related_ns(&net->ns, get_net_ns);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -3457,7 +3474,7 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
rtnl_lock();
tun = tun_get(tfile);
if (tun)
- tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ tun_get_iff(tun, &ifr);
rtnl_unlock();
if (tun)
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index aff995be2a31..b86c5ce9a92a 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -320,6 +320,7 @@ static int aqc111_get_link_ksettings(struct net_device *net,
static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed)
{
struct aqc111_data *aqc111_data = dev->driver_priv;
+ u32 phy_on_the_wire;
aqc111_data->phy_cfg &= ~AQ_ADV_MASK;
aqc111_data->phy_cfg |= AQ_PAUSE;
@@ -361,7 +362,8 @@ static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed)
}
}
- aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg);
+ phy_on_the_wire = aqc111_data->phy_cfg;
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &phy_on_the_wire);
}
static int aqc111_set_link_ksettings(struct net_device *net,
@@ -453,6 +455,8 @@ static int aqc111_change_mtu(struct net_device *net, int new_mtu)
reg16 = 0x1420;
else if (dev->net->mtu <= 16334)
reg16 = 0x1A20;
+ else
+ return 0;
aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW,
2, &reg16);
@@ -753,6 +757,7 @@ static void aqc111_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct aqc111_data *aqc111_data = dev->driver_priv;
u16 reg16;
+ u32 phy_on_the_wire;
/* Force bz */
reg16 = SFR_PHYPWR_RSTCTL_BZ;
@@ -766,8 +771,9 @@ static void aqc111_unbind(struct usbnet *dev, struct usb_interface *intf)
aqc111_data->phy_cfg &= ~AQ_ADV_MASK;
aqc111_data->phy_cfg |= AQ_LOW_POWER;
aqc111_data->phy_cfg &= ~AQ_PHY_POWER_EN;
+ phy_on_the_wire = aqc111_data->phy_cfg;
aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0,
- &aqc111_data->phy_cfg);
+ &phy_on_the_wire);
kfree(aqc111_data);
}
@@ -990,6 +996,7 @@ static int aqc111_reset(struct usbnet *dev)
{
struct aqc111_data *aqc111_data = dev->driver_priv;
u8 reg8 = 0;
+ u32 phy_on_the_wire;
dev->rx_urb_size = URB_SIZE;
@@ -1002,8 +1009,9 @@ static int aqc111_reset(struct usbnet *dev)
/* Power up ethernet PHY */
aqc111_data->phy_cfg = AQ_PHY_POWER_EN;
+ phy_on_the_wire = aqc111_data->phy_cfg;
aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
- &aqc111_data->phy_cfg);
+ &phy_on_the_wire);
/* Set the MAC address */
aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN,
@@ -1034,6 +1042,7 @@ static int aqc111_stop(struct usbnet *dev)
{
struct aqc111_data *aqc111_data = dev->driver_priv;
u16 reg16 = 0;
+ u32 phy_on_the_wire;
aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
2, &reg16);
@@ -1045,8 +1054,9 @@ static int aqc111_stop(struct usbnet *dev)
/* Put PHY to low power*/
aqc111_data->phy_cfg |= AQ_LOW_POWER;
+ phy_on_the_wire = aqc111_data->phy_cfg;
aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
- &aqc111_data->phy_cfg);
+ &phy_on_the_wire);
netif_carrier_off(dev->net);
@@ -1322,6 +1332,7 @@ static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
u16 temp_rx_ctrl = 0x00;
u16 reg16;
u8 reg8;
+ u32 phy_on_the_wire;
usbnet_suspend(intf, message);
@@ -1393,12 +1404,14 @@ static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0,
WOL_CFG_SIZE, &wol_cfg);
+ phy_on_the_wire = aqc111_data->phy_cfg;
aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
- &aqc111_data->phy_cfg);
+ &phy_on_the_wire);
} else {
aqc111_data->phy_cfg |= AQ_LOW_POWER;
+ phy_on_the_wire = aqc111_data->phy_cfg;
aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
- &aqc111_data->phy_cfg);
+ &phy_on_the_wire);
/* Disable RX path */
aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC,
@@ -1415,7 +1428,7 @@ static int aqc111_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct aqc111_data *aqc111_data = dev->driver_priv;
- u16 reg16;
+ u16 reg16, oldreg16;
u8 reg8;
netif_carrier_off(dev->net);
@@ -1431,9 +1444,11 @@ static int aqc111_resume(struct usb_interface *intf)
/* Configure RX control register => start operation */
reg16 = aqc111_data->rxctl;
reg16 &= ~SFR_RX_CTL_START;
+ /* needs to be saved in case endianness is swapped */
+ oldreg16 = reg16;
aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
- reg16 |= SFR_RX_CTL_START;
+ reg16 = oldreg16 | SFR_RX_CTL_START;
aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
aqc111_set_phy_speed(dev, aqc111_data->autoneg,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 0362acd5cdca..28321aca48fe 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -23,6 +23,7 @@
#include <linux/usb/cdc_ncm.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/ipv6_stubs.h>
/* alternative VLAN for IP session 0 if not untagged */
#define MBIM_IPS0_VID 4094
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 3d8a70d3ea9b..c247aed2dceb 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -54,17 +54,6 @@
#include <linux/workqueue.h>
#define USB_VENDOR_APPLE 0x05ac
-#define USB_PRODUCT_IPHONE 0x1290
-#define USB_PRODUCT_IPHONE_3G 0x1292
-#define USB_PRODUCT_IPHONE_3GS 0x1294
-#define USB_PRODUCT_IPHONE_4 0x1297
-#define USB_PRODUCT_IPAD 0x129a
-#define USB_PRODUCT_IPAD_2 0x12a2
-#define USB_PRODUCT_IPAD_3 0x12a6
-#define USB_PRODUCT_IPAD_MINI 0x12ab
-#define USB_PRODUCT_IPHONE_4_VZW 0x129c
-#define USB_PRODUCT_IPHONE_4S 0x12a0
-#define USB_PRODUCT_IPHONE_5 0x12a8
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -88,50 +77,9 @@
#define IPHETH_CARRIER_ON 0x04
static const struct usb_device_id ipheth_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
+ { USB_VENDOR_AND_INTERFACE_INFO(USB_VENDOR_APPLE, IPHETH_USBINTF_CLASS,
+ IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
@@ -293,8 +241,6 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
struct usb_device *udev;
int retval;
- if (!dev)
- return 0;
if (!dev->confirmed_pairing)
return 0;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbdb4b15..5c3ac97519b7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -63,6 +63,7 @@ enum qmi_wwan_flags {
enum qmi_wwan_quirks {
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
+ QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
};
struct qmimux_hdr {
@@ -845,6 +846,16 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
.data = QMI_WWAN_QUIRK_DTR,
};
+static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
+ .description = "WWAN/QMI device",
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+ .bind = qmi_wwan_bind,
+ .unbind = qmi_wwan_unbind,
+ .manage_power = qmi_wwan_manage_power,
+ .rx_fixup = qmi_wwan_rx_fixup,
+ .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
+};
+
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
@@ -865,6 +876,15 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
#define QMI_GOBI_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 0)
+/* Quectel does not use fixed interface numbers on at least some of their
+ * devices. We need to check the number of endpoints to ensure that we bind to
+ * the correct interface.
+ */
+#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
+ USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
+ USB_SUBCLASS_VENDOR_SPEC, 0xff), \
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
+
static const struct usb_device_id products[] = {
/* 1. CDC ECM like devices match on the control interface */
{ /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -969,20 +989,9 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
- { /* Quectel EP06/EG06/EM06 */
- USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
- USB_CLASS_VENDOR_SPEC,
- USB_SUBCLASS_VENDOR_SPEC,
- 0xff),
- .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
- },
- { /* Quectel EG12/EM12 */
- USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
- USB_CLASS_VENDOR_SPEC,
- USB_SUBCLASS_VENDOR_SPEC,
- 0xff),
- .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
- },
+ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
+ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
+ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1122,9 +1131,16 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
+ {QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */
+ {QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */
+ {QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */
+ {QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */
+ {QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */
+ {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -1180,6 +1196,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
{QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
+ {QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */
{QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
@@ -1200,9 +1217,12 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
+ {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
@@ -1270,7 +1290,6 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
- {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
@@ -1350,27 +1369,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
return false;
}
-static bool quectel_diag_detected(struct usb_interface *intf)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
- u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
- u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
-
- if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
- return false;
-
- if (id_product == 0x0306 || id_product == 0x0512)
- return true;
- else
- return false;
-}
-
static int qmi_wwan_probe(struct usb_interface *intf,
const struct usb_device_id *prod)
{
struct usb_device_id *id = (struct usb_device_id *)prod;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
+ const struct driver_info *info;
/* Workaround to enable dynamic IDs. This disables usbnet
* blacklisting functionality. Which, if required, can be
@@ -1404,10 +1408,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
* we need to match on class/subclass/protocol. These values are
* identical for the diagnostic- and QMI-interface, but bNumEndpoints is
* different. Ignore the current interface if the number of endpoints
- * the number for the diag interface (two).
+ * equals the number for the diag interface (two).
*/
- if (quectel_diag_detected(intf))
- return -ENODEV;
+ info = (void *)&id->driver_info;
+
+ if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
+ if (desc->bNumEndpoints == 2)
+ return -ENODEV;
+ }
return usbnet_probe(intf, id);
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 86c8c64fbb0f..b01bfa63860d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1212,7 +1212,6 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
goto amacout;
}
memcpy(sa->sa_data, buf, 6);
- ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
netif_info(tp, probe, tp->netdev,
"Using pass-thru MAC addr %pM\n", sa->sa_data);
@@ -1221,43 +1220,57 @@ amacout:
return ret;
}
-static int set_ethernet_addr(struct r8152 *tp)
+static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
{
struct net_device *dev = tp->netdev;
- struct sockaddr sa;
int ret;
+ sa->sa_family = dev->type;
+
if (tp->version == RTL_VER_01) {
- ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
+ ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
} else {
/* if device doesn't support MAC pass through this will
* be expected to be non-zero
*/
- ret = vendor_mac_passthru_addr_read(tp, &sa);
+ ret = vendor_mac_passthru_addr_read(tp, sa);
if (ret < 0)
- ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa->sa_data);
}
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
- } else if (!is_valid_ether_addr(sa.sa_data)) {
+ } else if (!is_valid_ether_addr(sa->sa_data)) {
netif_err(tp, probe, dev, "Invalid ether addr %pM\n",
- sa.sa_data);
+ sa->sa_data);
eth_hw_addr_random(dev);
- ether_addr_copy(sa.sa_data, dev->dev_addr);
- ret = rtl8152_set_mac_address(dev, &sa);
+ ether_addr_copy(sa->sa_data, dev->dev_addr);
netif_info(tp, probe, dev, "Random ether addr %pM\n",
- sa.sa_data);
- } else {
- if (tp->version == RTL_VER_01)
- ether_addr_copy(dev->dev_addr, sa.sa_data);
- else
- ret = rtl8152_set_mac_address(dev, &sa);
+ sa->sa_data);
+ return 0;
}
return ret;
}
+static int set_ethernet_addr(struct r8152 *tp)
+{
+ struct net_device *dev = tp->netdev;
+ struct sockaddr sa;
+ int ret;
+
+ ret = determine_ethernet_addr(tp, &sa);
+ if (ret < 0)
+ return ret;
+
+ if (tp->version == RTL_VER_01)
+ ether_addr_copy(dev->dev_addr, sa.sa_data);
+ else
+ ret = rtl8152_set_mac_address(dev, &sa);
+
+ return ret;
+}
+
static void read_bulk_callback(struct urb *urb)
{
struct net_device *netdev;
@@ -4264,10 +4277,18 @@ static int rtl8152_post_reset(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
+ struct sockaddr sa;
if (!tp)
return 0;
+ /* reset the MAC adddress in case of policy change */
+ if (determine_ethernet_addr(tp, &sa) >= 0) {
+ rtnl_lock();
+ dev_set_mac_address (tp->netdev, &sa, NULL);
+ rtnl_unlock();
+ }
+
netdev = tp->netdev;
if (!netif_running(netdev))
return 0;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index ec287c9741e8..e4c2f3afce60 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -774,8 +774,8 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
/* maybe the boot loader passed the MAC address in devicetree */
mac_addr = of_get_mac_address(dev->udev->dev.of_node);
- if (mac_addr) {
- memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(dev->net->dev_addr, mac_addr);
return;
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index e3d08626828e..a0e119907c84 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -917,8 +917,8 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
/* maybe the boot loader passed the MAC address in devicetree */
mac_addr = of_get_mac_address(dev->udev->dev.of_node);
- if (mac_addr) {
- memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(dev->net->dev_addr, mac_addr);
return;
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 569e87a51a33..09a1433b0833 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -162,18 +162,6 @@ static void veth_get_ethtool_stats(struct net_device *dev,
}
}
-static int veth_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
-
- return 0;
-}
-
static const struct ethtool_ops veth_ethtool_ops = {
.get_drvinfo = veth_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -181,7 +169,7 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_sset_count = veth_get_sset_count,
.get_ethtool_stats = veth_get_ethtool_stats,
.get_link_ksettings = veth_get_link_ksettings,
- .get_ts_info = veth_get_ts_info,
+ .get_ts_info = ethtool_op_get_ts_info,
};
/* general routines */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7eb38ea9ba56..559c48e66afc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -31,7 +31,6 @@
#include <linux/average.h>
#include <linux/filter.h>
#include <linux/kernel.h>
-#include <linux/pci.h>
#include <net/route.h>
#include <net/xdp.h>
#include <net/net_failover.h>
@@ -1568,7 +1567,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct send_queue *sq = &vi->sq[qnum];
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
- bool kick = !skb->xmit_more;
+ bool kick = !netdev_xmit_more();
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
@@ -1588,7 +1587,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
- "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
+ "Unexpected TXQ (%d) queue failure: %d\n",
+ qnum, err);
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -1925,7 +1925,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
+static void virtnet_clean_affinity(struct virtnet_info *vi)
{
int i;
@@ -1949,7 +1949,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
int stride;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
- virtnet_clean_affinity(vi, -1);
+ virtnet_clean_affinity(vi);
return;
}
@@ -1999,7 +1999,7 @@ static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
node);
- virtnet_clean_affinity(vi, cpu);
+ virtnet_clean_affinity(vi);
return 0;
}
@@ -2384,7 +2384,7 @@ static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
- dev_warn(&vi->dev->dev, "Fail to set guest offload. \n");
+ dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
return -EINVAL;
}
@@ -2735,7 +2735,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
{
struct virtio_device *vdev = vi->vdev;
- virtnet_clean_affinity(vi, -1);
+ virtnet_clean_affinity(vi);
vdev->config->del_vqs(vdev);
@@ -3115,8 +3115,9 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Should never trigger: MTU was previously validated
* in virtnet_validate.
*/
- dev_err(&vdev->dev, "device MTU appears to have changed "
- "it is now %d < %d", mtu, dev->min_mtu);
+ dev_err(&vdev->dev,
+ "device MTU appears to have changed it is now %d < %d",
+ mtu, dev->min_mtu);
goto free;
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7c1430ed0244..cf7e6a92e73c 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -370,7 +370,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
if (!IS_ERR(neigh)) {
sock_confirm_neigh(skb, neigh);
- ret = neigh_output(neigh, skb);
+ ret = neigh_output(neigh, skb, false);
rcu_read_unlock_bh();
return ret;
}
@@ -549,7 +549,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
- u32 nexthop;
+ bool is_v6gw = false;
int ret = -EINVAL;
nf_reset(skb);
@@ -572,13 +572,11 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
rcu_read_lock_bh();
- nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
- neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
- if (unlikely(!neigh))
- neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
+ neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
if (!IS_ERR(neigh)) {
sock_confirm_neigh(skb, neigh);
- ret = neigh_output(neigh, skb);
+ /* if crossing protocols, can not use the cached header */
+ ret = neigh_output(neigh, skb, is_v6gw);
rcu_read_unlock_bh();
return ret;
}
@@ -875,6 +873,7 @@ static const struct net_device_ops vrf_netdev_ops = {
.ndo_init = vrf_dev_init,
.ndo_uninit = vrf_dev_uninit,
.ndo_start_xmit = vrf_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = vrf_get_stats64,
.ndo_add_slave = vrf_add_slave,
.ndo_del_slave = vrf_del_slave,
@@ -1273,9 +1272,15 @@ static void vrf_setup(struct net_device *dev)
/* default to no qdisc; user can add if desired */
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_RX_HANDLER;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- dev->min_mtu = 0;
- dev->max_mtu = 0;
+ /* VRF devices do not care about MTU, but if the MTU is set
+ * too low then the ipv4 and ipv6 protocols are disabled
+ * which breaks networking.
+ */
+ dev->min_mtu = IPV6_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d76dfed8d9bb..5994d5415a03 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -20,6 +20,7 @@
#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
+#include <net/ipv6_stubs.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 5c60dc60a8e6..46a05b6540b8 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -22,8 +22,8 @@
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index ef298d8525c5..4fe7c7e132c4 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -352,6 +352,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
case I2400M_SS_IDLE:
d_printf(1, dev, "entering BS-negotiated idle mode\n");
+ /* Fall through */
case I2400M_SS_DISCONNECTING:
case I2400M_SS_DATA_PATH_CONNECTED:
wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index f20886ade1cc..ebd64e083726 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -640,8 +640,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
* figure out where the next TX message starts (and where the
* offset to the moved header is).
*/
- hdr_size = sizeof(*tx_msg)
- + le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]);
+ hdr_size = struct_size(tx_msg, pld, le16_to_cpu(tx_msg->num_pls));
hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN);
tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size;
tx_msg_moved = (void *) tx_msg + tx_msg->offset;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 24b983edb357..eca87f7c5b6c 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1855,7 +1855,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_ce_crash_data ce_data;
u32 addr, id;
- lockdep_assert_held(&ar->data_lock);
+ lockdep_assert_held(&ar->dump_mutex);
ath10k_err(ar, "Copy Engine register dump:\n");
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 835b8de92d55..aff585658fc0 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3119,6 +3119,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
goto err_free_wq;
mutex_init(&ar->conf_mutex);
+ mutex_init(&ar->dump_mutex);
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->peers);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e08a17b01e03..e35aae5146f1 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1063,6 +1063,9 @@ struct ath10k {
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
+ /* protects coredump data */
+ struct mutex dump_mutex;
+
/* protects shared structure data */
spinlock_t data_lock;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 33838d9c1cb6..45a355fb62b9 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -1102,7 +1102,7 @@ struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
- lockdep_assert_held(&ar->data_lock);
+ lockdep_assert_held(&ar->dump_mutex);
if (ath10k_coredump_mask == 0)
/* coredump disabled */
@@ -1146,7 +1146,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
if (!buf)
return NULL;
- spin_lock_bh(&ar->data_lock);
+ mutex_lock(&ar->dump_mutex);
dump_data = (struct ath10k_dump_file_data *)(buf);
strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
@@ -1213,7 +1213,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
}
- spin_unlock_bh(&ar->data_lock);
+ mutex_unlock(&ar->dump_mutex);
return dump_data;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index a20ea270d519..1acc622d2183 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
num_msdus++;
num_bytes += ret;
}
- ieee80211_return_txq(hw, txq);
+ ieee80211_return_txq(hw, txq, false);
ieee80211_txq_schedule_end(hw, txq->ac);
record->num_msdus = cpu_to_le16(num_msdus);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index b73c23d4ce86..9c703d287333 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
if (ret < 0)
break;
}
- ieee80211_return_txq(hw, txq);
+ ieee80211_return_txq(hw, txq, false);
ath10k_htt_tx_txq_update(hw, txq);
if (ret == -EBUSY)
break;
@@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
if (ret < 0)
break;
}
- ieee80211_return_txq(hw, txq);
+ ieee80211_return_txq(hw, txq, false);
ath10k_htt_tx_txq_update(hw, txq);
out:
ieee80211_txq_schedule_end(hw, ac);
@@ -5774,7 +5774,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_MCAST_RATE &&
- !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
+ !ath10k_mac_vif_chan(arvif->vif, &def)) {
band = def.chan->band;
rateidx = vif->bss_conf.mcast_rate[band] - 1;
@@ -5812,7 +5812,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_BASIC_RATES) {
- if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
+ if (ath10k_mac_vif_chan(vif, &def)) {
mutex_unlock(&ar->conf_mutex);
return;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 271f92c24d44..2c27f407a851 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1441,7 +1441,7 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
int i, ret;
- lockdep_assert_held(&ar->data_lock);
+ lockdep_assert_held(&ar->dump_mutex);
ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
hi_failure_state,
@@ -1656,7 +1656,7 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
int ret, i;
u8 *buf;
- lockdep_assert_held(&ar->data_lock);
+ lockdep_assert_held(&ar->dump_mutex);
if (!crash_data)
return;
@@ -1734,14 +1734,19 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
}
}
-static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+static void ath10k_pci_fw_dump_work(struct work_struct *work)
{
+ struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
+ dump_work);
struct ath10k_fw_crash_data *crash_data;
+ struct ath10k *ar = ar_pci->ar;
char guid[UUID_STRING_LEN + 1];
- spin_lock_bh(&ar->data_lock);
+ mutex_lock(&ar->dump_mutex);
+ spin_lock_bh(&ar->data_lock);
ar->stats.fw_crash_counter++;
+ spin_unlock_bh(&ar->data_lock);
crash_data = ath10k_coredump_new(ar);
@@ -1756,11 +1761,18 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ath10k_ce_dump_registers(ar, crash_data);
ath10k_pci_dump_memory(ar, crash_data);
- spin_unlock_bh(&ar->data_lock);
+ mutex_unlock(&ar->dump_mutex);
queue_work(ar->workqueue, &ar->restart_work);
}
+static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ queue_work(ar->workqueue, &ar_pci->dump_work);
+}
+
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
@@ -3442,6 +3454,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
spin_lock_init(&ar_pci->ps_lock);
mutex_init(&ar_pci->ce_diag_mutex);
+ INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
+
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 3773c79f322f..4455ed6c5275 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -121,6 +121,8 @@ struct ath10k_pci {
/* For protecting ce_diag */
struct mutex ce_diag_mutex;
+ struct work_struct dump_work;
+
struct ath10k_ce ce;
struct timer_list rx_post_retry;
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 6433ff10d80e..a29cfb9c72c2 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -416,8 +416,8 @@ int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
int ret;
- ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len, ath10k_tm_policy,
- NULL);
+ ret = nla_parse_deprecated(tb, ATH10K_TM_ATTR_MAX, data, len,
+ ath10k_tm_policy, NULL);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a2351ef45ae0..65a4c142640d 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -837,7 +837,6 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
txq->link = &ds->ds_link;
ath5k_hw_start_tx_dma(ah, txq->qnum);
- mmiowb();
spin_unlock_bh(&txq->lock);
return 0;
@@ -2174,7 +2173,6 @@ ath5k_beacon_config(struct ath5k_hw *ah)
}
ath5k_hw_set_imr(ah, ah->imask);
- mmiowb();
spin_unlock_bh(&ah->block);
}
@@ -2779,7 +2777,6 @@ int ath5k_start(struct ieee80211_hw *hw)
ret = 0;
done:
- mmiowb();
mutex_unlock(&ah->lock);
set_bit(ATH_STAT_STARTED, ah->status);
@@ -2839,7 +2836,6 @@ void ath5k_stop(struct ieee80211_hw *hw)
"putting device to sleep\n");
}
- mmiowb();
mutex_unlock(&ah->lock);
ath5k_stop_tasklets(ah);
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 16e052d02c94..5e866a193ed0 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -263,7 +263,6 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = 0;
ath5k_hw_set_bssid(ah);
- mmiowb();
}
if (changes & BSS_CHANGED_BEACON_INT)
@@ -528,7 +527,6 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
- mmiowb();
mutex_unlock(&ah->lock);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index d8dcacda9add..f3906dbe5495 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -74,8 +74,8 @@ int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
int err, buf_len;
void *buf;
- err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len, ath6kl_tm_policy,
- NULL);
+ err = nla_parse_deprecated(tb, ATH6KL_TM_ATTR_MAX, data, len,
+ ath6kl_tm_policy, NULL);
if (err)
return err;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 98141b699c88..a04d8616fe09 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -642,7 +642,7 @@ static int ath9k_of_init(struct ath_softc *sc)
}
mac = of_get_mac_address(np);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(common->macaddr, mac);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 773d428ff1b0..b17e1ca40995 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
goto out;
while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
+ bool force;
+
tid = (struct ath_atx_tid *)queue->drv_priv;
ret = ath_tx_sched_aggr(sc, txq, tid);
ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
- ieee80211_return_txq(hw, queue);
+ force = !skb_queue_empty(&tid->retry_q);
+ ieee80211_return_txq(hw, queue, force);
}
out:
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c
index 51a038022c8b..7ae14b4d2d0e 100644
--- a/drivers/net/wireless/ath/wcn36xx/testmode.c
+++ b/drivers/net/wireless/ath/wcn36xx/testmode.c
@@ -132,8 +132,8 @@ int wcn36xx_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned short attr;
wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "Data:", data, len);
- ret = nla_parse(tb, WCN36XX_TM_ATTR_MAX, data, len,
- wcn36xx_tm_policy, NULL);
+ ret = nla_parse_deprecated(tb, WCN36XX_TM_ATTR_MAX, data, len,
+ wcn36xx_tm_policy, NULL);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index a1e226652b4a..804955d24b30 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -465,7 +465,7 @@ static int wil_cfg80211_validate_add_iface(struct wil6210_priv *wil,
.num_different_channels = 1,
};
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
if (wil->vifs[i]) {
wdev = vif_to_wdev(wil->vifs[i]);
params.iftype_num[wdev->iftype]++;
@@ -486,7 +486,7 @@ static int wil_cfg80211_validate_change_iface(struct wil6210_priv *wil,
};
bool check_combos = false;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif_pos = wil->vifs[i];
if (vif_pos && vif != vif_pos) {
@@ -1274,7 +1274,12 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
params->wait);
out:
+ /* when the sent packet was not acked by receiver(ACK=0), rc will
+ * be -EAGAIN. In this case this function needs to return success,
+ * the ACK=0 will be reflected in tx_status.
+ */
tx_status = (rc == 0);
+ rc = (rc == -EAGAIN) ? 0 : rc;
cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
tx_status, GFP_KERNEL);
@@ -1806,7 +1811,7 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
int rc, i;
struct wiphy *wiphy = wil_to_wiphy(wil);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
struct net_device *ndev;
struct cfg80211_beacon_data bcon = {};
@@ -2620,8 +2625,8 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
- rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
- wil_rf_sector_policy, NULL);
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
@@ -2679,13 +2684,13 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
QCA_ATTR_PAD))
goto nla_put_failure;
- nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
+ nl_cfgs = nla_nest_start_noflag(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
if (!nl_cfgs)
goto nla_put_failure;
for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
if (!(rf_modules_vec & BIT(i)))
continue;
- nl_cfg = nla_nest_start(msg, i);
+ nl_cfg = nla_nest_start_noflag(msg, i);
if (!nl_cfg)
goto nla_put_failure;
si = &reply.evt.sectors_info[i];
@@ -2740,8 +2745,8 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
- rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
- wil_rf_sector_policy, NULL);
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
@@ -2773,9 +2778,11 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
cmd.sector_type = sector_type;
nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
tmp) {
- rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
- nl_cfg, wil_rf_sector_cfg_policy,
- NULL);
+ rc = nla_parse_nested_deprecated(tb2,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
+ nl_cfg,
+ wil_rf_sector_cfg_policy,
+ NULL);
if (rc) {
wil_err(wil, "invalid sector cfg\n");
return -EINVAL;
@@ -2847,8 +2854,8 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy,
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
- rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
- wil_rf_sector_policy, NULL);
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
@@ -2955,8 +2962,8 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy,
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
- rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
- wil_rf_sector_policy, NULL);
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 7ad4e5328439..df2adff6c33a 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -207,6 +207,8 @@ static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
seq_puts(s, "???\n");
}
seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
+ seq_printf(s, " invalid_buff_id_cnt = %d\n",
+ sring->invalid_buff_id_cnt);
if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
@@ -258,6 +260,11 @@ static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
wil_halp_vote(wil);
+ if (wil_mem_access_lock(wil)) {
+ wil_halp_unvote(wil);
+ return;
+ }
+
wil_memcpy_fromio_32(&r, off, sizeof(r));
wil_mbox_ring_le2cpus(&r);
/*
@@ -323,6 +330,7 @@ static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
}
out:
seq_puts(s, "}\n");
+ wil_mem_access_unlock(wil);
wil_halp_unvote(wil);
}
@@ -601,6 +609,12 @@ static int memread_show(struct seq_file *s, void *data)
if (ret < 0)
return ret;
+ ret = wil_mem_access_lock(wil);
+ if (ret) {
+ wil_pm_runtime_put(wil);
+ return ret;
+ }
+
a = wmi_buffer(wil, cpu_to_le32(mem_addr));
if (a)
@@ -608,6 +622,7 @@ static int memread_show(struct seq_file *s, void *data)
else
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+ wil_mem_access_unlock(wil);
wil_pm_runtime_put(wil);
return 0;
@@ -626,10 +641,6 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
size_t unaligned_bytes, aligned_count, ret;
int rc;
- if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
- test_bit(wil_status_suspended, wil_blob->wil->status))
- return 0;
-
if (pos < 0)
return -EINVAL;
@@ -656,11 +667,19 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
return rc;
}
+ rc = wil_mem_access_lock(wil);
+ if (rc) {
+ kfree(buf);
+ wil_pm_runtime_put(wil);
+ return rc;
+ }
+
wil_memcpy_fromio_32(buf, (const void __iomem *)
wil_blob->blob.data + aligned_pos, aligned_count);
ret = copy_to_user(user_buf, buf + unaligned_bytes, count);
+ wil_mem_access_unlock(wil);
wil_pm_runtime_put(wil);
kfree(buf);
@@ -1364,7 +1383,7 @@ static int link_show(struct seq_file *s, void *data)
if (p->status != wil_sta_connected)
continue;
- vif = (mid < wil->max_vifs) ? wil->vifs[mid] : NULL;
+ vif = (mid < GET_MAX_VIFS(wil)) ? wil->vifs[mid] : NULL;
if (vif) {
rc = wil_cid_fill_sinfo(vif, i, sinfo);
if (rc)
@@ -1562,7 +1581,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
break;
}
mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
- if (mid < wil->max_vifs) {
+ if (mid < GET_MAX_VIFS(wil)) {
struct wil6210_vif *vif = wil->vifs[mid];
if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
@@ -1628,7 +1647,7 @@ static int mids_show(struct seq_file *s, void *data)
int i;
mutex_lock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
@@ -1849,7 +1868,7 @@ static int wil_link_stats_debugfs_show(struct seq_file *s, void *data)
/* iterate over all MIDs and show per-cid statistics. Then show the
* global statistics
*/
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
seq_printf(s, "MID %d ", i);
@@ -1905,7 +1924,7 @@ static ssize_t wil_link_stats_write(struct file *file, const char __user *buf,
if (rc)
return rc;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (!vif)
continue;
@@ -2375,6 +2394,7 @@ static const struct dbg_off dbg_wil_regs[] = {
{"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
doff_io32},
{"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
+ {"RGF_USER_USAGE_2", 0444, HOSTADDR(RGF_USER_USAGE_2), doff_io32},
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 388b3d4717ca..3ec0f2fab9b7 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -647,6 +647,8 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
out:
release_firmware(fw);
+ if (rc)
+ wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
return rc;
}
@@ -741,6 +743,8 @@ int wil_request_board(struct wil6210_priv *wil, const char *name)
out:
release_firmware(brd);
+ if (rc)
+ wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 277abfdf3322..9b9c9ec01536 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -184,6 +184,28 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
}
}
+/* Device memory access is prohibited while reset or suspend.
+ * wil_mem_access_lock protects accessing device memory in these cases
+ */
+int wil_mem_access_lock(struct wil6210_priv *wil)
+{
+ if (!down_read_trylock(&wil->mem_lock))
+ return -EBUSY;
+
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status)) {
+ up_read(&wil->mem_lock);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void wil_mem_access_unlock(struct wil6210_priv *wil)
+{
+ up_read(&wil->mem_lock);
+}
+
static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
{
struct wil_ring *ring = &wil->ring_tx[id];
@@ -663,7 +685,7 @@ void wil_bcast_fini_all(struct wil6210_priv *wil)
int i;
struct wil6210_vif *vif;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif)
wil_bcast_fini(vif);
@@ -703,6 +725,7 @@ int wil_priv_init(struct wil6210_priv *wil)
spin_lock_init(&wil->wmi_ev_lock);
spin_lock_init(&wil->net_queue_lock);
init_waitqueue_head(&wil->wq);
+ init_rwsem(&wil->mem_lock);
wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
if (!wil->wmi_wq)
@@ -1390,13 +1413,22 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
u8 mac[8];
int mac_addr;
- if (wil->hw_version >= HW_VER_TALYN_MB)
- mac_addr = RGF_OTP_MAC_TALYN_MB;
- else
- mac_addr = RGF_OTP_MAC;
+ /* OEM MAC has precedence */
+ mac_addr = RGF_OTP_OEM_MAC;
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr), sizeof(mac));
+
+ if (is_valid_ether_addr(mac)) {
+ wil_info(wil, "using OEM MAC %pM\n", mac);
+ } else {
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ mac_addr = RGF_OTP_MAC_TALYN_MB;
+ else
+ mac_addr = RGF_OTP_MAC;
+
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
+ sizeof(mac));
+ }
- wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
- sizeof(mac));
if (!is_valid_ether_addr(mac)) {
wil_err(wil, "Invalid MAC %pM\n", mac);
return -EINVAL;
@@ -1460,7 +1492,7 @@ void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync)
lockdep_assert_held(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (vif)
@@ -1500,11 +1532,6 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
if (wil->hw_version < HW_VER_TALYN_MB) {
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
- } else {
- wil_s(wil,
- RGF_CAF_ICR_TALYN_MB + offsetof(struct RGF_ICR, ICR), 0);
- wil_w(wil, RGF_CAF_ICR_TALYN_MB +
- offsetof(struct RGF_ICR, IMV), ~0);
}
/* clear PAL_UNIT_ICR (potential D0->D3 leftover)
* In Talyn-MB host cannot access this register due to
@@ -1528,7 +1555,7 @@ static int wil_restore_vifs(struct wil6210_priv *wil)
struct wireless_dev *wdev;
int i, rc;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (!vif)
continue;
@@ -1580,7 +1607,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
- if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
+ if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa) &&
+ wil->hw_version < HW_VER_TALYN_MB) {
wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
}
@@ -1599,20 +1627,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
set_bit(wil_status_resetting, wil->status);
- if (test_bit(wil_status_collecting_dumps, wil->status)) {
- /* Device collects crash dump, cancel the reset.
- * following crash dump collection, reset would take place.
- */
- wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
- rc = -EBUSY;
- goto out;
- }
-
mutex_lock(&wil->vif_mutex);
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
cancel_work_sync(&vif->disconnect_worker);
@@ -1782,7 +1801,9 @@ int __wil_up(struct wil6210_priv *wil)
WARN_ON(!mutex_is_locked(&wil->mutex));
+ down_write(&wil->mem_lock);
rc = wil_reset(wil, true);
+ up_write(&wil->mem_lock);
if (rc)
return rc;
@@ -1854,6 +1875,7 @@ int wil_up(struct wil6210_priv *wil)
int __wil_down(struct wil6210_priv *wil)
{
+ int rc;
WARN_ON(!mutex_is_locked(&wil->mutex));
set_bit(wil_status_resetting, wil->status);
@@ -1873,7 +1895,11 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
- return wil_reset(wil, false);
+ down_write(&wil->mem_lock);
+ rc = wil_reset(wil, false);
+ up_write(&wil->mem_lock);
+
+ return rc;
}
int wil_down(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index b4e0eb1585b9..59f041d708fe 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,7 +27,7 @@ bool wil_has_other_active_ifaces(struct wil6210_priv *wil,
struct wil6210_vif *vif;
struct net_device *ndev_i;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
ndev_i = vif_to_ndev(vif);
@@ -155,7 +155,7 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
struct wil6210_vif *vif;
if (!ring->va || !txdata->enabled ||
- txdata->mid >= wil->max_vifs)
+ txdata->mid >= GET_MAX_VIFS(wil))
continue;
vif = wil->vifs[txdata->mid];
@@ -294,7 +294,7 @@ static u8 wil_vif_find_free_mid(struct wil6210_priv *wil)
{
u8 i;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
if (!wil->vifs[i])
return i;
}
@@ -500,7 +500,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
bool any_active = wil_has_active_ifaces(wil, true, false);
ASSERT_RTNL();
- if (mid >= wil->max_vifs) {
+ if (mid >= GET_MAX_VIFS(wil)) {
wil_err(wil, "invalid MID: %d\n", mid);
return;
}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index c8c6613371d1..3b82d6cfc218 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -176,7 +176,7 @@ static void wil_remove_all_additional_vifs(struct wil6210_priv *wil)
struct wil6210_vif *vif;
int i;
- for (i = 1; i < wil->max_vifs; i++) {
+ for (i = 1; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
wil_vif_prepare_stop(vif);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 75fe9323547c..56143e7670ed 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -26,7 +26,7 @@ static void wil_pm_wake_connected_net_queues(struct wil6210_priv *wil)
int i;
mutex_lock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (vif && test_bit(wil_vif_fwconnected, vif->status))
@@ -40,7 +40,7 @@ static void wil_pm_stop_all_net_queues(struct wil6210_priv *wil)
int i;
mutex_lock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (vif)
@@ -123,7 +123,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
/* interface is running */
mutex_lock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (!vif)
@@ -195,14 +195,18 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
wil_dbg_pm(wil, "suspend keep radio on\n");
/* Prevent handling of new tx and wmi commands */
- set_bit(wil_status_suspending, wil->status);
- if (test_bit(wil_status_collecting_dumps, wil->status)) {
- /* Device collects crash dump, cancel the suspend */
- wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
- clear_bit(wil_status_suspending, wil->status);
+ rc = down_write_trylock(&wil->mem_lock);
+ if (!rc) {
+ wil_err(wil,
+ "device is busy. down_write_trylock failed, returned (0x%x)\n",
+ rc);
wil->suspend_stats.rejected_by_host++;
return -EBUSY;
}
+
+ set_bit(wil_status_suspending, wil->status);
+ up_write(&wil->mem_lock);
+
wil_pm_stop_all_net_queues(wil);
if (!wil_is_tx_idle(wil)) {
@@ -310,15 +314,18 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
wil_dbg_pm(wil, "suspend radio off\n");
- set_bit(wil_status_suspending, wil->status);
- if (test_bit(wil_status_collecting_dumps, wil->status)) {
- /* Device collects crash dump, cancel the suspend */
- wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
- clear_bit(wil_status_suspending, wil->status);
+ rc = down_write_trylock(&wil->mem_lock);
+ if (!rc) {
+ wil_err(wil,
+ "device is busy. down_write_trylock failed, returned (0x%x)\n",
+ rc);
wil->suspend_stats.rejected_by_host++;
return -EBUSY;
}
+ set_bit(wil_status_suspending, wil->status);
+ up_write(&wil->mem_lock);
+
/* if netif up, hardware is alive, shut it down */
mutex_lock(&wil->vif_mutex);
active_ifaces = wil_has_active_ifaces(wil, true, false);
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index c38773878ae3..f6fce6ff73d9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -29,6 +29,7 @@
#define WIL_EDMA_MAX_DATA_OFFSET (2)
/* RX buffer size must be aligned to 4 bytes */
#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
+#define MAX_INVALID_BUFF_ID_RETRY (3)
static void wil_tx_desc_unmap_edma(struct device *dev,
union wil_tx_desc *desc,
@@ -312,7 +313,8 @@ static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
struct list_head *free = &wil->rx_buff_mgmt.free;
int i;
- wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
+ wil->rx_buff_mgmt.buff_arr = kcalloc(size + 1,
+ sizeof(struct wil_rx_buff),
GFP_KERNEL);
if (!wil->rx_buff_mgmt.buff_arr)
return -ENOMEM;
@@ -321,14 +323,16 @@ static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
INIT_LIST_HEAD(active);
INIT_LIST_HEAD(free);
- /* Linkify the list */
+ /* Linkify the list.
+ * buffer id 0 should not be used (marks invalid id).
+ */
buff_arr = wil->rx_buff_mgmt.buff_arr;
- for (i = 0; i < size; i++) {
+ for (i = 1; i <= size; i++) {
list_add(&buff_arr[i].list, free);
buff_arr[i].id = i;
}
- wil->rx_buff_mgmt.size = size;
+ wil->rx_buff_mgmt.size = size + 1;
return 0;
}
@@ -428,6 +432,9 @@ static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
&ring->pa, ring->ctx);
wil_move_all_rx_buff_to_free_list(wil, ring);
+ dma_free_coherent(dev, sizeof(*ring->edma_rx_swtail.va),
+ ring->edma_rx_swtail.va,
+ ring->edma_rx_swtail.pa);
goto out;
}
@@ -804,18 +811,9 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil,
struct sk_buff *skb,
struct wil_net_stats *stats)
{
- int error;
int l2_rx_status;
- int l3_rx_status;
- int l4_rx_status;
void *msg = wil_skb_rxstatus(skb);
- error = wil_rx_status_get_error(msg);
- if (!error) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return 0;
- }
-
l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
if (l2_rx_status != 0) {
wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
@@ -844,17 +842,7 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil,
return -EFAULT;
}
- l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
- l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
- if (!l3_rx_status && !l4_rx_status)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- /* If HW reports bad checksum, let IP stack re-check it
- * For example, HW don't understand Microsoft IP stack that
- * mis-calculates TCP checksum - if it should be 0x0,
- * it writes 0xffff in violation of RFC 1624
- */
- else
- stats->rx_csum_err++;
+ skb->ip_summed = wil_rx_status_get_checksum(msg, stats);
return 0;
}
@@ -892,26 +880,50 @@ again:
/* Extract the buffer ID from the status message */
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
- if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
+
+ while (!buff_id) {
+ struct wil_rx_status_extended *s;
+ int invalid_buff_id_retry = 0;
+
+ wil_dbg_txrx(wil,
+ "buff_id is not updated yet by HW, (swhead 0x%x)\n",
+ sring->swhead);
+ if (++invalid_buff_id_retry > MAX_INVALID_BUFF_ID_RETRY)
+ break;
+
+ /* Read the status message again */
+ s = (struct wil_rx_status_extended *)
+ (sring->va + (sring->elem_size * sring->swhead));
+ *(struct wil_rx_status_extended *)msg = *s;
+ buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ }
+
+ if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
buff_id, sring->swhead);
+ wil_rx_status_reset_buff_id(sring);
wil_sring_advance_swhead(sring);
+ sring->invalid_buff_id_cnt++;
goto again;
}
- wil_sring_advance_swhead(sring);
-
/* Extract the SKB from the rx_buff management array */
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
if (!skb) {
wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ wil_rx_status_reset_buff_id(sring);
/* Move the buffer from the active list to the free list */
- list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
- &wil->rx_buff_mgmt.free);
+ list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+ wil_sring_advance_swhead(sring);
+ sring->invalid_buff_id_cnt++;
goto again;
}
+ wil_rx_status_reset_buff_id(sring);
+ wil_sring_advance_swhead(sring);
+
memcpy(&pa, skb->cb, sizeof(pa));
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
@@ -926,8 +938,8 @@ again:
sizeof(struct wil_rx_status_extended), false);
/* Move the buffer from the active list to the free list */
- list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
- &wil->rx_buff_mgmt.free);
+ list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
eop = wil_rx_status_get_eop(msg);
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index 343516a03a1e..bb4ff28b73e5 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2016,2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -427,6 +427,12 @@ static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
30, 30);
}
+static inline void wil_rx_status_reset_buff_id(struct wil_status_ring *s)
+{
+ ((struct wil_rx_status_compressed *)
+ (s->va + (s->elem_size * s->swhead)))->buff_id = 0;
+}
+
static inline __le16 wil_rx_status_get_buff_id(void *msg)
{
return ((struct wil_rx_status_compressed *)msg)->buff_id;
@@ -511,6 +517,45 @@ static inline int wil_rx_status_get_l4_rx_status(void *msg)
5, 6);
}
+/* L4 L3 Expected result
+ * 0 0 Ok. No L3 and no L4 known protocols found.
+ * Treated as L2 packet. (no offloads on this packet)
+ * 0 1 Ok. It means that L3 was found, and checksum check passed.
+ * No known L4 protocol was found.
+ * 0 2 It means that L3 protocol was found, and checksum check failed.
+ * No L4 known protocol was found.
+ * 1 any Ok. It means that L4 was found, and checksum check passed.
+ * 3 0 Not a possible scenario.
+ * 3 1 Recalculate. It means that L3 protocol was found, and checksum
+ * passed. But L4 checksum failed. Need to see if really failed,
+ * or due to fragmentation.
+ * 3 2 Both L3 and L4 checksum check failed.
+ */
+static inline int wil_rx_status_get_checksum(void *msg,
+ struct wil_net_stats *stats)
+{
+ int l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
+ int l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
+
+ if (l4_rx_status == 1)
+ return CHECKSUM_UNNECESSARY;
+
+ if (l4_rx_status == 0 && l3_rx_status == 1)
+ return CHECKSUM_UNNECESSARY;
+
+ if (l3_rx_status == 0 && l4_rx_status == 0)
+ /* L2 packet */
+ return CHECKSUM_NONE;
+
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW doesn't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ stats->rx_csum_err++;
+ return CHECKSUM_NONE;
+}
+
static inline int wil_rx_status_get_security(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index e1b1039b13ab..8724d9975606 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -185,6 +185,7 @@ struct RGF_ICR {
/* registers - FW addresses */
#define RGF_USER_USAGE_1 (0x880004)
+#define RGF_USER_USAGE_2 (0x880008)
#define RGF_USER_USAGE_6 (0x880018)
#define BIT_USER_OOB_MODE BIT(31)
#define BIT_USER_OOB_R2_MODE BIT(30)
@@ -367,6 +368,7 @@ struct RGF_ICR {
#define REVISION_ID_SPARROW_D0 (0x3)
#define RGF_OTP_MAC_TALYN_MB (0x8a0304)
+#define RGF_OTP_OEM_MAC (0x8a0334)
#define RGF_OTP_MAC (0x8a0620)
/* Talyn-MB */
@@ -566,10 +568,11 @@ struct wil_status_ring {
bool is_rx;
u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
struct wil_ring_rx_data rx_data;
+ u32 invalid_buff_id_cnt; /* relevant only for RX */
};
#define WIL_STA_TID_NUM (16)
-#define WIL_MCS_MAX (12) /* Maximum MCS supported */
+#define WIL_MCS_MAX (15) /* Maximum MCS supported */
struct wil_net_stats {
unsigned long rx_packets;
@@ -660,7 +663,6 @@ enum { /* for wil6210_priv.status */
wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
wil_status_resuming, /* resume in progress */
- wil_status_collecting_dumps, /* crashdump collection in progress */
wil_status_last /* keep last */
};
@@ -992,6 +994,8 @@ struct wil6210_priv {
struct wil_txrx_ops txrx_ops;
struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+ /* for synchronizing device memory access while reset or suspend */
+ struct rw_semaphore mem_lock;
/* statistics */
atomic_t isr_count_rx, isr_count_tx;
/* debugfs */
@@ -1060,6 +1064,7 @@ struct wil6210_priv {
#define vif_to_wil(v) (v->wil)
#define vif_to_ndev(v) (v->ndev)
#define vif_to_wdev(v) (&v->wdev)
+#define GET_MAX_VIFS(wil) min_t(int, (wil)->max_vifs, WIL_MAX_VIFS)
static inline struct wil6210_vif *wdev_to_vif(struct wil6210_priv *wil,
struct wireless_dev *wdev)
@@ -1176,6 +1181,8 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count);
+int wil_mem_access_lock(struct wil6210_priv *wil);
+void wil_mem_access_unlock(struct wil6210_priv *wil);
struct wil6210_vif *
wil_vif_alloc(struct wil6210_priv *wil, const char *name,
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index dc33a0b4c3fa..772cb00c2002 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -57,7 +57,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
{
- int i;
+ int i, rc;
const struct fw_map *map;
void *data;
u32 host_min, dump_size, offset, len;
@@ -73,14 +73,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
- set_bit(wil_status_collecting_dumps, wil->status);
- if (test_bit(wil_status_suspending, wil->status) ||
- test_bit(wil_status_suspended, wil->status) ||
- test_bit(wil_status_resetting, wil->status)) {
- wil_err(wil, "cannot collect fw dump during suspend/reset\n");
- clear_bit(wil_status_collecting_dumps, wil->status);
- return -EINVAL;
- }
+ rc = wil_mem_access_lock(wil);
+ if (rc)
+ return rc;
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
@@ -100,8 +95,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
}
-
- clear_bit(wil_status_collecting_dumps, wil->status);
+ wil_mem_access_unlock(wil);
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index bda4a9712f91..d89cd41e78ac 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -41,6 +41,7 @@ MODULE_PARM_DESC(led_id,
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
#define WIL_WMI_CALL_GENERAL_TO_MS 100
+#define WIL_WMI_PCP_STOP_TO_MS 5000
/**
* WMI event receiving - theory of operations
@@ -2195,7 +2196,8 @@ int wmi_pcp_stop(struct wil6210_vif *vif)
return rc;
return wmi_call(wil, WMI_PCP_STOP_CMDID, vif->mid, NULL, 0,
- WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
+ WMI_PCP_STOPPED_EVENTID, NULL, 0,
+ WIL_WMI_PCP_STOP_TO_MS);
}
int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid)
@@ -2957,6 +2959,10 @@ static const char *suspend_status2name(u8 status)
switch (status) {
case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
return "LINK_NOT_IDLE";
+ case WMI_TRAFFIC_SUSPEND_REJECTED_DISCONNECT:
+ return "DISCONNECT";
+ case WMI_TRAFFIC_SUSPEND_REJECTED_OTHER:
+ return "OTHER";
default:
return "Untracked status";
}
@@ -3046,6 +3052,9 @@ static void resume_triggers2string(u32 triggers, char *string, int str_size)
if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
strlcat(string, " WMI_EVT", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_DISCONNECT)
+ strlcat(string, " DISCONNECT", str_size);
}
int wmi_resume(struct wil6210_priv *wil)
@@ -3196,7 +3205,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
if (mid == MID_BROADCAST)
mid = 0;
- if (mid >= ARRAY_SIZE(wil->vifs) || mid >= wil->max_vifs) {
+ if (mid >= GET_MAX_VIFS(wil)) {
wil_dbg_wmi(wil, "invalid mid %d, event skipped\n",
mid);
return;
@@ -3502,8 +3511,9 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
- wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status);
- rc = -EINVAL;
+ wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n",
+ evt.evt.status);
+ rc = -EAGAIN;
}
kfree(cmd);
@@ -3555,9 +3565,9 @@ int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
- wil_err(wil, "mgmt_tx_ext failed with status %d\n",
- evt.evt.status);
- rc = -EINVAL;
+ wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n",
+ evt.evt.status);
+ rc = -EAGAIN;
}
kfree(cmd);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index b668758da994..da46fc8d39cf 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity
*
@@ -104,6 +104,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_RAW_MODE = 24,
WMI_FW_CAPABILITY_TX_REQ_EXT = 25,
WMI_FW_CAPABILITY_CHANNEL_4 = 26,
+ WMI_FW_CAPABILITY_IPA = 27,
WMI_FW_CAPABILITY_MAX,
};
@@ -294,6 +295,7 @@ enum wmi_command_id {
WMI_SET_AP_SLOT_SIZE_CMDID = 0xA0F,
WMI_SET_VRING_PRIORITY_WEIGHT_CMDID = 0xA10,
WMI_SET_VRING_PRIORITY_CMDID = 0xA11,
+ WMI_RBUFCAP_CFG_CMDID = 0xA12,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@@ -979,10 +981,22 @@ enum wmi_rx_msg_type {
WMI_RX_MSG_TYPE_EXTENDED = 0x01,
};
+enum wmi_ring_add_irq_mode {
+ /* Backwards compatibility
+ * for DESC ring - interrupt disabled
+ * for STATUS ring - interrupt enabled
+ */
+ WMI_RING_ADD_IRQ_MODE_BWC = 0x00,
+ WMI_RING_ADD_IRQ_MODE_DISABLE = 0x01,
+ WMI_RING_ADD_IRQ_MODE_ENABLE = 0x02,
+};
+
struct wmi_tx_status_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
u8 irq_index;
- u8 reserved[3];
+ /* wmi_ring_add_irq_mode */
+ u8 irq_mode;
+ u8 reserved[2];
} __packed;
struct wmi_rx_status_ring_add_cmd {
@@ -1016,7 +1030,10 @@ struct wmi_tx_desc_ring_add_cmd {
u8 mac_ctrl;
u8 to_resolution;
u8 agg_max_wsize;
- u8 reserved[3];
+ u8 irq_index;
+ /* wmi_ring_add_irq_mode */
+ u8 irq_mode;
+ u8 reserved;
struct wmi_vring_cfg_schd schd_params;
} __packed;
@@ -1982,6 +1999,7 @@ enum wmi_event_id {
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_BF_TRIG_EVENTID = 0x183A,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
@@ -2082,6 +2100,7 @@ enum wmi_event_id {
WMI_SET_AP_SLOT_SIZE_EVENTID = 0x1A0F,
WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID = 0x1A10,
WMI_SET_VRING_PRIORITY_EVENTID = 0x1A11,
+ WMI_RBUFCAP_CFG_EVENTID = 0x1A12,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -2267,7 +2286,9 @@ struct wmi_notify_req_done_event {
__le32 status;
__le64 tsf;
s8 rssi;
- u8 reserved0[3];
+ /* enum wmi_edmg_tx_mode */
+ u8 tx_mode;
+ u8 reserved0[2];
__le32 tx_tpt;
__le32 tx_goodput;
__le32 rx_goodput;
@@ -2316,6 +2337,7 @@ enum wmi_disconnect_reason {
WMI_DIS_REASON_PROFILE_MISMATCH = 0x0C,
WMI_DIS_REASON_CONNECTION_EVICTED = 0x0D,
WMI_DIS_REASON_IBSS_MERGE = 0x0E,
+ WMI_DIS_REASON_HIGH_TEMPERATURE = 0x0F,
};
/* WMI_DISCONNECT_EVENTID */
@@ -3168,6 +3190,30 @@ struct wmi_brp_set_ant_limit_event {
u8 reserved[3];
} __packed;
+enum wmi_bf_type {
+ WMI_BF_TYPE_SLS = 0x00,
+ WMI_BF_TYPE_BRP_RX = 0x01,
+};
+
+/* WMI_BF_TRIG_CMDID */
+struct wmi_bf_trig_cmd {
+ /* enum wmi_bf_type - type of requested beamforming */
+ u8 bf_type;
+ /* used only for WMI_BF_TYPE_BRP_RX */
+ u8 cid;
+ /* used only for WMI_BF_TYPE_SLS */
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved[4];
+} __packed;
+
+/* WMI_BF_TRIG_EVENTID */
+struct wmi_bf_trig_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 cid;
+ u8 reserved[2];
+} __packed;
+
/* broadcast connection ID */
#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
@@ -3263,6 +3309,8 @@ struct wmi_link_maintain_cfg_read_done_event {
enum wmi_traffic_suspend_status {
WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE = 0x1,
+ WMI_TRAFFIC_SUSPEND_REJECTED_DISCONNECT = 0x2,
+ WMI_TRAFFIC_SUSPEND_REJECTED_OTHER = 0x3,
};
/* WMI_TRAFFIC_SUSPEND_EVENTID */
@@ -3282,6 +3330,7 @@ enum wmi_resume_trigger {
WMI_RESUME_TRIGGER_UCAST_RX = 0x2,
WMI_RESUME_TRIGGER_BCAST_RX = 0x4,
WMI_RESUME_TRIGGER_WMI_EVT = 0x8,
+ WMI_RESUME_TRIGGER_DISCONNECT = 0x10,
};
/* WMI_TRAFFIC_RESUME_EVENTID */
@@ -4057,4 +4106,38 @@ struct wmi_set_vring_priority_event {
u8 reserved[3];
} __packed;
+/* WMI_RADAR_PCI_CTRL_BLOCK struct */
+struct wmi_radar_pci_ctrl_block {
+ /* last fw tail address index */
+ __le32 fw_tail_index;
+ /* last SW head address index known to FW */
+ __le32 sw_head_index;
+ __le32 last_wr_pulse_tsf_low;
+ __le32 last_wr_pulse_count;
+ __le32 last_wr_in_bytes;
+ __le32 last_wr_pulse_id;
+ __le32 last_wr_burst_id;
+ /* When pre overflow detected, advance sw head in unit of pulses */
+ __le32 sw_head_inc;
+ __le32 reserved[8];
+} __packed;
+
+/* WMI_RBUFCAP_CFG_CMD */
+struct wmi_rbufcap_cfg_cmd {
+ u8 enable;
+ u8 reserved;
+ /* RBUFCAP indicates rx space unavailable when number of rx
+ * descriptors drops below this threshold. Set 0 to use system
+ * default
+ */
+ __le16 rx_desc_threshold;
+} __packed;
+
+/* WMI_RBUFCAP_CFG_EVENTID */
+struct wmi_rbufcap_cfg_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index e99e766a3028..1cabae424839 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2585,8 +2585,8 @@ static int __init at76_mod_init(void)
if (result < 0)
printk(KERN_ERR DRIVER_NAME
": usb_register failed (status %d)\n", result);
-
- led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
+ else
+ led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
return result;
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 74be3c809225..4c7980f84591 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -485,7 +485,6 @@ static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val)
val = swab32(val);
b43_write32(dev, B43_MMIO_RAM_CONTROL, offset);
- mmiowb();
b43_write32(dev, B43_MMIO_RAM_DATA, val);
}
@@ -656,9 +655,7 @@ static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf)
/* The hardware guarantees us an atomic write, if we
* write the low register first. */
b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, low);
- mmiowb();
b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, high);
- mmiowb();
}
void b43_tsf_write(struct b43_wldev *dev, u64 tsf)
@@ -1822,11 +1819,9 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
if (b43_bus_host_is_sdio(dev->dev)) {
/* wl->mutex is enough. */
b43_do_beacon_update_trigger_work(dev);
- mmiowb();
} else {
spin_lock_irq(&wl->hardirq_lock);
b43_do_beacon_update_trigger_work(dev);
- mmiowb();
spin_unlock_irq(&wl->hardirq_lock);
}
}
@@ -2078,7 +2073,6 @@ static irqreturn_t b43_interrupt_thread_handler(int irq, void *dev_id)
mutex_lock(&dev->wl->mutex);
b43_do_interrupt_thread(dev);
- mmiowb();
mutex_unlock(&dev->wl->mutex);
return IRQ_HANDLED;
@@ -2143,7 +2137,6 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
spin_lock(&dev->wl->hardirq_lock);
ret = b43_do_interrupt(dev);
- mmiowb();
spin_unlock(&dev->wl->hardirq_lock);
return ret;
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
index 46408a560814..6b7f0238723f 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
@@ -1826,16 +1826,10 @@ static void lpphy_stop_tx_tone(struct b43_wldev *dev)
}
-static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
- int mode, bool useindex, u8 index)
-{
- //TODO
-}
-
static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
- struct lpphy_tx_gains gains, oldgains;
+ struct lpphy_tx_gains oldgains;
int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
lpphy_read_tx_pctl_mode_from_hardware(dev);
@@ -1848,11 +1842,6 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
- if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
- lpphy_papd_cal(dev, gains, 0, 1, 30);
- else
- lpphy_papd_cal(dev, gains, 0, 1, 65);
-
if (old_afe_ovr)
lpphy_set_tx_gains(dev, oldgains);
lpphy_set_bb_mult(dev, old_bbmult);
diff --git a/drivers/net/wireless/broadcom/b43/sysfs.c b/drivers/net/wireless/broadcom/b43/sysfs.c
index 3190493bd07f..93d03b673670 100644
--- a/drivers/net/wireless/broadcom/b43/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43/sysfs.c
@@ -129,7 +129,6 @@ static ssize_t b43_attr_interfmode_store(struct device *dev,
} else
err = -ENOSYS;
- mmiowb();
mutex_unlock(&wldev->wl->mutex);
return err ? err : count;
diff --git a/drivers/net/wireless/broadcom/b43legacy/ilt.c b/drivers/net/wireless/broadcom/b43legacy/ilt.c
index ee5682e54204..6d15fb4d30c6 100644
--- a/drivers/net/wireless/broadcom/b43legacy/ilt.c
+++ b/drivers/net/wireless/broadcom/b43legacy/ilt.c
@@ -315,14 +315,12 @@ const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE] = {
void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
- mmiowb();
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val);
}
void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
- mmiowb();
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2,
(val & 0xFFFF0000) >> 16);
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1,
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 55f411925960..c777efc6dc13 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -264,7 +264,6 @@ static void b43legacy_ram_write(struct b43legacy_wldev *dev, u16 offset,
val = swab32(val);
b43legacy_write32(dev, B43legacy_MMIO_RAM_CONTROL, offset);
- mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_RAM_DATA, val);
}
@@ -341,14 +340,11 @@ void b43legacy_shm_write32(struct b43legacy_wldev *dev,
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
- mmiowb();
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
(value >> 16) & 0xffff);
- mmiowb();
b43legacy_shm_control_word(dev, routing,
(offset >> 2) + 1);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA,
value & 0xffff);
return;
@@ -356,7 +352,6 @@ void b43legacy_shm_write32(struct b43legacy_wldev *dev,
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
- mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, value);
}
@@ -368,7 +363,6 @@ void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset,
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
- mmiowb();
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
value);
@@ -377,7 +371,6 @@ void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset,
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value);
}
@@ -471,7 +464,6 @@ static void b43legacy_time_lock(struct b43legacy_wldev *dev)
status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
status |= B43legacy_MACCTL_TBTTHOLD;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
- mmiowb();
}
static void b43legacy_time_unlock(struct b43legacy_wldev *dev)
@@ -494,10 +486,8 @@ static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf)
u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32;
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, 0);
- mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_HIGH,
hi);
- mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW,
lo);
} else {
@@ -507,13 +497,9 @@ static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf)
u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48;
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, 0);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_3, v3);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_2, v2);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_1, v1);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, v0);
}
}
@@ -1250,7 +1236,6 @@ static void b43legacy_beacon_update_trigger_work(struct work_struct *work)
/* The handler might have updated the IRQ mask. */
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK,
dev->irq_mask);
- mmiowb();
spin_unlock_irq(&wl->irq_lock);
}
mutex_unlock(&wl->mutex);
@@ -1346,7 +1331,6 @@ static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
b43legacy_controller_restart(dev, "DMA error");
- mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
return;
}
@@ -1396,7 +1380,6 @@ static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
handle_irq_transmit_status(dev);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
- mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
}
@@ -1488,7 +1471,6 @@ static irqreturn_t b43legacy_interrupt_handler(int irq, void *dev_id)
dev->irq_reason = reason;
tasklet_schedule(&dev->isr_tasklet);
out:
- mmiowb();
spin_unlock(&dev->wl->irq_lock);
return ret;
@@ -2781,7 +2763,6 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
- mmiowb();
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
@@ -2900,7 +2881,6 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
/* XXX: why? */
- mmiowb();
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c
index 995c7d0c212a..f949766d27ca 100644
--- a/drivers/net/wireless/broadcom/b43legacy/phy.c
+++ b/drivers/net/wireless/broadcom/b43legacy/phy.c
@@ -134,7 +134,6 @@ u16 b43legacy_phy_read(struct b43legacy_wldev *dev, u16 offset)
void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_PHY_DATA, val);
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/pio.h b/drivers/net/wireless/broadcom/b43legacy/pio.h
index 1cd1b9ca5e9c..08cd02282beb 100644
--- a/drivers/net/wireless/broadcom/b43legacy/pio.h
+++ b/drivers/net/wireless/broadcom/b43legacy/pio.h
@@ -92,7 +92,6 @@ void b43legacy_pio_write(struct b43legacy_pioqueue *queue,
u16 offset, u16 value)
{
b43legacy_write16(queue->dev, queue->mmio_base + offset, value);
- mmiowb();
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/radio.c b/drivers/net/wireless/broadcom/b43legacy/radio.c
index eab1c9387846..c6db444ea07e 100644
--- a/drivers/net/wireless/broadcom/b43legacy/radio.c
+++ b/drivers/net/wireless/broadcom/b43legacy/radio.c
@@ -95,7 +95,6 @@ void b43legacy_radio_lock(struct b43legacy_wldev *dev)
B43legacy_WARN_ON(status & B43legacy_MACCTL_RADIOLOCK);
status |= B43legacy_MACCTL_RADIOLOCK;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
- mmiowb();
udelay(10);
}
@@ -108,7 +107,6 @@ void b43legacy_radio_unlock(struct b43legacy_wldev *dev)
B43legacy_WARN_ON(!(status & B43legacy_MACCTL_RADIOLOCK));
status &= ~B43legacy_MACCTL_RADIOLOCK;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
- mmiowb();
}
u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset)
@@ -141,7 +139,6 @@ u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset)
void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_RADIO_DATA_LOW, val);
}
@@ -333,7 +330,6 @@ u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev)
void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset);
- mmiowb();
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val);
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/sysfs.c b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
index 2a1da15c913b..2db83eec7a11 100644
--- a/drivers/net/wireless/broadcom/b43legacy/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
@@ -143,7 +143,6 @@ static ssize_t b43legacy_attr_interfmode_store(struct device *dev,
if (err)
b43legacyerr(wldev->wl, "Interference Mitigation not "
"supported by device\n");
- mmiowb();
spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
mutex_unlock(&wldev->wl->mutex);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 73d3c1a0a7c9..98b168736df0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -490,11 +490,18 @@ fail:
return -ENOMEM;
}
-void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
+void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr)
+{
+ struct brcmf_bcdc *bcdc = drvr->proto->pd;
+
+ brcmf_fws_detach_pre_delif(bcdc->fws);
+}
+
+void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr)
{
struct brcmf_bcdc *bcdc = drvr->proto->pd;
drvr->proto->pd = NULL;
- brcmf_fws_detach(bcdc->fws);
+ brcmf_fws_detach_post_delif(bcdc->fws);
kfree(bcdc);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
index 3b0e9eff21b5..4bc52240ccea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
@@ -18,14 +18,16 @@
#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
-void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr);
void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state);
void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
bool success);
struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr);
#else
static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
-static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
+static void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) {};
+static inline void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) {}
#endif
#endif /* BRCMFMAC_BCDC_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index ec129864cc9c..60aede5abb4d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -628,15 +628,13 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (err)
- return err;
+ goto out;
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- if (!err)
- err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr,
- mypkt);
-
+ err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt);
+out:
brcmu_pkt_buf_free_skb(mypkt);
return err;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 3d441c5c745c..2fe167eae22c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -91,6 +91,7 @@ struct brcmf_bus_ops {
int (*get_fwname)(struct device *dev, const char *ext,
unsigned char *fw_name);
void (*debugfs_create)(struct device *dev);
+ int (*reset)(struct device *dev);
};
@@ -245,6 +246,15 @@ void brcmf_bus_debugfs_create(struct brcmf_bus *bus)
return bus->ops->debugfs_create(bus->dev);
}
+static inline
+int brcmf_bus_reset(struct brcmf_bus *bus)
+{
+ if (!bus->ops->reset)
+ return -EOPNOTSUPP;
+
+ return bus->ops->reset(bus->dev);
+}
+
/*
* interface functions from common layer
*/
@@ -262,6 +272,8 @@ void brcmf_detach(struct device *dev);
void brcmf_dev_reset(struct device *dev);
/* Request from bus module to initiate a coredump */
void brcmf_dev_coredump(struct device *dev);
+/* Indication that firmware has halted or crashed */
+void brcmf_fw_crashed(struct device *dev);
/* Configure the "global" bus state used by upper layers */
void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index e92f6351bd22..8ee8af4e7ec4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5464,6 +5464,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
conn_info->req_ie =
kmemdup(cfg->extra_buf, conn_info->req_ie_len,
GFP_KERNEL);
+ if (!conn_info->req_ie)
+ conn_info->req_ie_len = 0;
} else {
conn_info->req_ie_len = 0;
conn_info->req_ie = NULL;
@@ -5480,6 +5482,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
conn_info->resp_ie =
kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
GFP_KERNEL);
+ if (!conn_info->resp_ie)
+ conn_info->resp_ie_len = 0;
} else {
conn_info->resp_ie_len = 0;
conn_info->resp_ie = NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 4fbe8791f674..7d6a08779693 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -841,17 +841,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
bool rtnl_locked)
{
struct brcmf_if *ifp;
+ int ifidx;
ifp = drvr->iflist[bsscfgidx];
- drvr->iflist[bsscfgidx] = NULL;
if (!ifp) {
bphy_err(drvr, "Null interface, bsscfgidx=%d\n", bsscfgidx);
return;
}
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx,
ifp->ifidx);
- if (drvr->if2bss[ifp->ifidx] == bsscfgidx)
- drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
+ ifidx = ifp->ifidx;
+
if (ifp->ndev) {
if (bsscfgidx == 0) {
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
@@ -879,6 +879,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
brcmf_p2p_ifp_removed(ifp, rtnl_locked);
kfree(ifp);
}
+
+ drvr->iflist[bsscfgidx] = NULL;
+ if (drvr->if2bss[ifidx] == bsscfgidx)
+ drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
}
void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
@@ -1084,6 +1088,14 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data)
return 0;
}
+static void brcmf_core_bus_reset(struct work_struct *work)
+{
+ struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
+ bus_reset);
+
+ brcmf_bus_reset(drvr->bus_if);
+}
+
static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
{
int ret = -1;
@@ -1155,6 +1167,8 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
#endif
#endif /* CONFIG_INET */
+ INIT_WORK(&drvr->bus_reset, brcmf_core_bus_reset);
+
/* populate debugfs */
brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
brcmf_feat_debugfs_create(drvr);
@@ -1273,6 +1287,18 @@ void brcmf_dev_coredump(struct device *dev)
brcmf_dbg(TRACE, "failed to create coredump\n");
}
+void brcmf_fw_crashed(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+
+ bphy_err(drvr, "Firmware has halted or crashed\n");
+
+ brcmf_dev_coredump(dev);
+
+ schedule_work(&drvr->bus_reset);
+}
+
void brcmf_detach(struct device *dev)
{
s32 i;
@@ -1299,6 +1325,8 @@ void brcmf_detach(struct device *dev)
brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+ brcmf_proto_detach_pre_delif(drvr);
+
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS-1; i > -1; i--)
brcmf_remove_interface(drvr->iflist[i], false);
@@ -1308,7 +1336,7 @@ void brcmf_detach(struct device *dev)
brcmf_bus_stop(drvr->bus_if);
- brcmf_proto_detach(drvr);
+ brcmf_proto_detach_post_delif(drvr);
bus_if->drvr = NULL;
wiphy_free(drvr->wiphy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index d8085ce579f4..9f09aa31eeda 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -143,6 +143,8 @@ struct brcmf_pub {
struct notifier_block inet6addr_notifier;
struct brcmf_mp_device *settings;
+ struct work_struct bus_reset;
+
u8 clmver[BRCMF_DCMD_SMLEN];
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 7535cb0d4ac0..9f1417e00073 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -31,6 +31,10 @@ struct brcmf_dmi_data {
/* NOTE: Please keep all entries sorted alphabetically */
+static const struct brcmf_dmi_data acepc_t8_data = {
+ BRCM_CC_4345_CHIP_ID, 6, "acepc-t8"
+};
+
static const struct brcmf_dmi_data gpd_win_pocket_data = {
BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
};
@@ -49,6 +53,28 @@ static const struct brcmf_dmi_data pov_tab_p1006w_data = {
static const struct dmi_system_id dmi_platform_data[] = {
{
+ /* ACEPC T8 Cherry Trail Z8350 mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
+ /* ACEPC T11 Cherry Trail Z8350 mini PC, same wifi as the T8 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Match for the GPDwin which unfortunately uses somewhat
* generic dmi strings, which is why we test for 4 strings.
* Comparing against 23 other byt/cht boards, board_vendor
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 8209a42dea72..6a333dd80b2d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -711,7 +711,6 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
size_t mp_path_len;
u32 i, j;
char end = '\0';
- size_t reqsz;
for (i = 0; i < table_size; i++) {
if (mapping_table[i].chipid == chip &&
@@ -726,8 +725,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
return NULL;
}
- reqsz = sizeof(*fwreq) + n_fwnames * sizeof(struct brcmf_fw_item);
- fwreq = kzalloc(reqsz, GFP_KERNEL);
+ fwreq = kzalloc(struct_size(fwreq, items, n_fwnames), GFP_KERNEL);
if (!fwreq)
return NULL;
@@ -743,6 +741,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
for (j = 0; j < n_fwnames; j++) {
fwreq->items[j].path = fwnames[j].path;
+ fwnames[j].path[0] = '\0';
/* check if firmware path is provided by module parameter */
if (brcmf_mp_global.firmware_path[0] != '\0') {
strlcpy(fwnames[j].path, mp_path,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index abeb305492e0..c22c49ae552e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -580,24 +580,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
return ifidx == *(int *)arg;
}
-static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
- int ifidx)
-{
- bool (*matchfn)(struct sk_buff *, void *) = NULL;
- struct sk_buff *skb;
- int prec;
-
- if (ifidx != -1)
- matchfn = brcmf_fws_ifidx_match;
- for (prec = 0; prec < q->num_prec; prec++) {
- skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
- while (skb) {
- brcmu_pkt_buf_free_skb(skb);
- skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
- }
- }
-}
-
static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
{
int i;
@@ -669,6 +651,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
return 0;
}
+static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
+ int ifidx)
+{
+ bool (*matchfn)(struct sk_buff *, void *) = NULL;
+ struct sk_buff *skb;
+ int prec;
+ u32 hslot;
+
+ if (ifidx != -1)
+ matchfn = brcmf_fws_ifidx_match;
+ for (prec = 0; prec < q->num_prec; prec++) {
+ skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+ while (skb) {
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
+ true);
+ brcmu_pkt_buf_free_skb(skb);
+ skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+ }
+ }
+}
+
static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
u32 slot_id)
{
@@ -2200,6 +2204,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
brcmf_fws_lock(fws);
ifp->fws_desc = NULL;
brcmf_dbg(TRACE, "deleting %s\n", entry->name);
+ brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx],
+ ifp->ifidx);
brcmf_fws_macdesc_deinit(entry);
brcmf_fws_cleanup(fws, ifp->ifidx);
brcmf_fws_unlock(fws);
@@ -2437,17 +2443,25 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
return fws;
fail:
- brcmf_fws_detach(fws);
+ brcmf_fws_detach_pre_delif(fws);
+ brcmf_fws_detach_post_delif(fws);
return ERR_PTR(rc);
}
-void brcmf_fws_detach(struct brcmf_fws_info *fws)
+void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws)
{
if (!fws)
return;
-
- if (fws->fws_wq)
+ if (fws->fws_wq) {
destroy_workqueue(fws->fws_wq);
+ fws->fws_wq = NULL;
+ }
+}
+
+void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws)
+{
+ if (!fws)
+ return;
/* cleanup */
brcmf_fws_lock(fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index 4e6835766d5d..749c06dcdc17 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -19,7 +19,8 @@
#define FWSIGNAL_H_
struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
-void brcmf_fws_detach(struct brcmf_fws_info *fws);
+void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws);
+void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws);
void brcmf_fws_debugfs_create(struct brcmf_pub *drvr);
bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws);
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index d3780eae7f19..9d1f9ff25bfa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -375,7 +375,7 @@ brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
struct brcmf_msgbuf_pktid *pktid;
struct sk_buff *skb;
- if (idx >= pktids->array_size) {
+ if (idx < 0 || idx >= pktids->array_size) {
brcmf_err("Invalid packet id %d (max %d)\n", idx,
pktids->array_size);
return NULL;
@@ -747,7 +747,7 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
- tx_msghdr->msg.request_id = cpu_to_le32(pktid);
+ tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1);
tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
tx_msghdr->flags |= (skb->priority & 0x07) <<
@@ -884,7 +884,7 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
u16 flowid;
tx_status = (struct msgbuf_tx_status *)buf;
- idx = le32_to_cpu(tx_status->msg.request_id);
+ idx = le32_to_cpu(tx_status->msg.request_id) - 1;
flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 58a6bc379358..83e4938527f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -345,6 +345,10 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
};
+static void brcmf_pcie_setup(struct device *dev, int ret,
+ struct brcmf_fw_request *fwreq);
+static struct brcmf_fw_request *
+brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
@@ -671,6 +675,7 @@ static int
brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
{
struct brcmf_pcie_shared_info *shared;
+ struct brcmf_core *core;
u32 addr;
u32 cur_htod_mb_data;
u32 i;
@@ -694,7 +699,11 @@ brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
- pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
+
+ /* Send mailbox interrupt twice as a hardware workaround */
+ core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
+ if (core->rev <= 13)
+ pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
return 0;
}
@@ -730,7 +739,7 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
}
if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
- brcmf_dev_coredump(&devinfo->pdev->dev);
+ brcmf_fw_crashed(&devinfo->pdev->dev);
}
}
@@ -755,15 +764,22 @@ static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
console->base_addr, console->buf_addr, console->bufsize);
}
-
-static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
+/**
+ * brcmf_pcie_bus_console_read - reads firmware messages
+ *
+ * @error: specifies if error has occurred (prints messages unconditionally)
+ */
+static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
+ bool error)
{
+ struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
struct brcmf_pcie_console *console;
u32 addr;
u8 ch;
u32 newidx;
- if (!BRCMF_FWCON_ON())
+ if (!error && !BRCMF_FWCON_ON())
return;
console = &devinfo->shared.console;
@@ -787,7 +803,10 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
}
if (ch == '\n') {
console->log_str[console->log_idx] = 0;
- pr_debug("CONSOLE: %s", console->log_str);
+ if (error)
+ brcmf_err(bus, "CONSOLE: %s", console->log_str);
+ else
+ pr_debug("CONSOLE: %s", console->log_str);
console->log_idx = 0;
}
}
@@ -848,7 +867,7 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
&devinfo->pdev->dev);
}
}
- brcmf_pcie_bus_console_read(devinfo);
+ brcmf_pcie_bus_console_read(devinfo, false);
if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
brcmf_pcie_intr_enable(devinfo);
devinfo->in_irq = false;
@@ -1409,6 +1428,38 @@ int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
return 0;
}
+static int brcmf_pcie_reset(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+ struct brcmf_fw_request *fwreq;
+ int err;
+
+ brcmf_pcie_bus_console_read(devinfo, true);
+
+ brcmf_detach(dev);
+
+ brcmf_pcie_release_irq(devinfo);
+ brcmf_pcie_release_scratchbuffers(devinfo);
+ brcmf_pcie_release_ringbuffers(devinfo);
+ brcmf_pcie_reset_device(devinfo);
+
+ fwreq = brcmf_pcie_prepare_fw_request(devinfo);
+ if (!fwreq) {
+ dev_err(dev, "Failed to prepare FW request\n");
+ return -ENOMEM;
+ }
+
+ err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup);
+ if (err) {
+ dev_err(dev, "Failed to prepare FW request\n");
+ kfree(fwreq);
+ }
+
+ return err;
+}
+
static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.txdata = brcmf_pcie_tx,
.stop = brcmf_pcie_down,
@@ -1418,6 +1469,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.get_ramsize = brcmf_pcie_get_ramsize,
.get_memdump = brcmf_pcie_get_memdump,
.get_fwname = brcmf_pcie_get_fwname,
+ .reset = brcmf_pcie_reset,
};
@@ -1778,7 +1830,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0)
return;
- brcmf_pcie_bus_console_read(devinfo);
+ brcmf_pcie_bus_console_read(devinfo, false);
fail:
device_release_driver(dev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
index 024c643052bc..c7964ccdda69 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
@@ -67,16 +67,22 @@ fail:
return -ENOMEM;
}
-void brcmf_proto_detach(struct brcmf_pub *drvr)
+void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr)
{
brcmf_dbg(TRACE, "Enter\n");
if (drvr->proto) {
if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
- brcmf_proto_bcdc_detach(drvr);
+ brcmf_proto_bcdc_detach_post_delif(drvr);
else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF)
brcmf_proto_msgbuf_detach(drvr);
kfree(drvr->proto);
drvr->proto = NULL;
}
}
+
+void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr)
+{
+ if (drvr->proto && drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
+ brcmf_proto_bcdc_detach_pre_delif(drvr);
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index d3c3b9a815ad..72355aea9028 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -54,7 +54,8 @@ struct brcmf_proto {
int brcmf_proto_attach(struct brcmf_pub *drvr);
-void brcmf_proto_detach(struct brcmf_pub *drvr);
+void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr);
+void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr);
static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
struct sk_buff *skb,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 4d104ab80fd8..22b73da42822 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -622,6 +622,7 @@ BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio");
/* Note the names are not postfixed with a1 for backward compatibility */
BRCMF_FW_DEF(43430A1, "brcmfmac43430-sdio");
BRCMF_FW_DEF(43455, "brcmfmac43455-sdio");
+BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
@@ -642,7 +643,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0),
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1),
- BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455),
+ BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456),
+ BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
@@ -1090,8 +1092,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
/* dongle indicates the firmware has halted/crashed */
if (hmb_data & HMB_DATA_FWHALT) {
- brcmf_err("mailbox indicates firmware halted\n");
- brcmf_dev_coredump(&sdiod->func1->dev);
+ brcmf_dbg(SDIO, "mailbox indicates firmware halted\n");
+ brcmf_fw_crashed(&sdiod->func1->dev);
}
/* Dongle recomposed rx frames, accept them again */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index e9cbfd077710..75fcd6752edc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -160,7 +160,7 @@ struct brcmf_usbdev_info {
struct usb_device *usbdev;
struct device *dev;
- struct mutex dev_init_lock;
+ struct completion dev_init_done;
int ctl_in_pipe, ctl_out_pipe;
struct urb *ctl_urb; /* URB for control endpoint */
@@ -445,22 +445,17 @@ fail:
}
-static void brcmf_usb_free_q(struct list_head *q, bool pending)
+static void brcmf_usb_free_q(struct list_head *q)
{
struct brcmf_usbreq *req, *next;
- int i = 0;
+
list_for_each_entry_safe(req, next, q, list) {
if (!req->urb) {
brcmf_err("bad req\n");
break;
}
- i++;
- if (pending) {
- usb_kill_urb(req->urb);
- } else {
- usb_free_urb(req->urb);
- list_del_init(&req->list);
- }
+ usb_free_urb(req->urb);
+ list_del_init(&req->list);
}
}
@@ -682,12 +677,18 @@ static int brcmf_usb_up(struct device *dev)
static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
{
+ int i;
+
if (devinfo->ctl_urb)
usb_kill_urb(devinfo->ctl_urb);
if (devinfo->bulk_urb)
usb_kill_urb(devinfo->bulk_urb);
- brcmf_usb_free_q(&devinfo->tx_postq, true);
- brcmf_usb_free_q(&devinfo->rx_postq, true);
+ if (devinfo->tx_reqs)
+ for (i = 0; i < devinfo->bus_pub.ntxq; i++)
+ usb_kill_urb(devinfo->tx_reqs[i].urb);
+ if (devinfo->rx_reqs)
+ for (i = 0; i < devinfo->bus_pub.nrxq; i++)
+ usb_kill_urb(devinfo->rx_reqs[i].urb);
}
static void brcmf_usb_down(struct device *dev)
@@ -1023,8 +1024,8 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo);
/* free the URBS */
- brcmf_usb_free_q(&devinfo->rx_freeq, false);
- brcmf_usb_free_q(&devinfo->tx_freeq, false);
+ brcmf_usb_free_q(&devinfo->rx_freeq);
+ brcmf_usb_free_q(&devinfo->tx_freeq);
usb_free_urb(devinfo->ctl_urb);
usb_free_urb(devinfo->bulk_urb);
@@ -1193,11 +1194,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret,
if (ret)
goto error;
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
return;
error:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
device_release_driver(dev);
}
@@ -1265,7 +1266,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
if (ret)
goto fail;
/* we are done */
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
return 0;
}
bus->chip = bus_pub->devid;
@@ -1325,11 +1326,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
devinfo->usbdev = usb;
devinfo->dev = &usb->dev;
- /* Take an init lock, to protect for disconnect while still loading.
+ /* Init completion, to protect for disconnect while still loading.
* Necessary because of the asynchronous firmware load construction
*/
- mutex_init(&devinfo->dev_init_lock);
- mutex_lock(&devinfo->dev_init_lock);
+ init_completion(&devinfo->dev_init_done);
usb_set_intfdata(intf, devinfo);
@@ -1407,7 +1407,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
return 0;
fail:
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
kfree(devinfo);
usb_set_intfdata(intf, NULL);
return ret;
@@ -1422,7 +1422,7 @@ brcmf_usb_disconnect(struct usb_interface *intf)
devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
if (devinfo) {
- mutex_lock(&devinfo->dev_init_lock);
+ wait_for_completion(&devinfo->dev_init_done);
/* Make sure that devinfo still exists. Firmware probe routines
* may have released the device and cleared the intfdata.
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
index 8eff2753abad..d493021f6031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
@@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
struct brcmf_if *ifp;
const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
struct sk_buff *reply;
- int ret, payload, ret_len;
+ unsigned int payload, ret_len;
void *dcmd_buf = NULL, *wr_pointer;
u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
+ int ret;
if (len < sizeof(*cmdhdr)) {
brcmf_err("vendor command too short: %d\n", len);
@@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
brcmf_err("oversize return buffer %d\n", ret_len);
ret_len = BRCMF_DCMD_MAXLEN;
}
- payload = max(ret_len, len) + 1;
+ payload = max_t(unsigned int, ret_len, len) + 1;
dcmd_buf = vzalloc(payload);
if (NULL == dcmd_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
index a2960032be81..4b912e707f38 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
@@ -185,7 +185,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_Rx - CCK:");
pos +=
scnprintf(buf + pos, bufsz - pos,
@@ -273,7 +273,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_Rx - GENERAL:");
pos +=
scnprintf(buf + pos, bufsz - pos,
@@ -346,7 +346,7 @@ il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_Tx:");
pos +=
scnprintf(buf + pos, bufsz - pos,
@@ -447,7 +447,7 @@ il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_General:");
pos +=
scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index ce4144a89217..a20b6c885047 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -577,7 +577,6 @@ il4965_math_div_round(s32 num, s32 denom, s32 * res)
sign = -sign;
denom = -denom;
}
- *res = 1;
*res = ((num * 2 + denom) / (denom * 2)) * sign;
return 1;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index b079c64ca014..986646af8dfd 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2030,13 +2030,6 @@ static inline void
_il_release_nic_access(struct il_priv *il)
{
_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- /*
- * In above we are reading CSR_GP_CNTRL register, what will flush any
- * previous writes, but still want write, which clear MAC_ACCESS_REQ
- * bit, be performed on PCI bus before any other writes scheduled on
- * different CPUs (after we drop reg_lock).
- */
- mmiowb();
}
static inline u32
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index fdc56f821b5a..a9c846c59289 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -56,7 +56,7 @@
#include "iwl-config.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 46
+#define IWL_22000_UCODE_API_MAX 48
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -80,14 +80,15 @@
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
-#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
#define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-"
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
+#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -103,10 +104,8 @@
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
- IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
-#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
- IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
@@ -179,7 +178,11 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dbgc_supported = true, \
.min_umac_error_event_table = 0x400000, \
.d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024
+ .d3_debug_data_length = 60 * 1024, \
+ .fw_mon_smem_write_ptr_addr = 0xa0c16c, \
+ .fw_mon_smem_write_ptr_msk = 0xfffff, \
+ .fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \
+ .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
#define IWL_DEVICE_AX200_COMMON \
IWL_DEVICE_22000_COMMON, \
@@ -189,7 +192,8 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
IWL_DEVICE_22000_COMMON, \
.device_family = IWL_DEVICE_FAMILY_22000, \
.base_params = &iwl_22000_base_params, \
- .csr = &iwl_csr_v1
+ .csr = &iwl_csr_v1, \
+ .gp2_reg_addr = 0xa02c68
#define IWL_DEVICE_22560 \
IWL_DEVICE_22000_COMMON, \
@@ -200,9 +204,11 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
#define IWL_DEVICE_AX210 \
IWL_DEVICE_AX200_COMMON, \
.device_family = IWL_DEVICE_FAMILY_AX210, \
- .base_params = &iwl_22000_base_params, \
+ .base_params = &iwl_22560_base_params, \
.csr = &iwl_csr_v1, \
- .min_txq_size = 128
+ .min_txq_size = 128, \
+ .gp2_reg_addr = 0xd02c68, \
+ .min_256_ba_txq_size = 512
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
@@ -235,8 +241,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22260_2ax_cfg = {
- .name = "Intel(R) Wireless-AX 22260",
+const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
+ .name = "Intel(R) Wi-Fi 6 AX101",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax200_cfg_cc = {
+ .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -249,7 +267,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = {
};
const struct iwl_cfg killer1650x_2ax_cfg = {
- .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)",
+ .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -262,7 +280,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
};
const struct iwl_cfg killer1650w_2ax_cfg = {
- .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)",
+ .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -328,7 +346,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -340,7 +358,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -399,19 +417,6 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
- .name = "Intel(R) Dual Band Wireless AX 22560",
- .fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
- IWL_DEVICE_22560,
- .cdb = true,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
-};
-
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
@@ -427,12 +432,20 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
+ .uhb_supported = true,
IWL_DEVICE_AX210,
};
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+};
+
+const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
IWL_DEVICE_AX210,
};
@@ -442,8 +455,8 @@ MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index 575a7022d045..3846064d51a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -136,6 +136,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.ht_params = &iwl5000_ht_params,
.led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true,
+ .csr = &iwl_csr_v1,
};
#define IWL_DEVICE_5150 \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 3225b64eb845..41bdd0eaf62c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -148,7 +148,11 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.d3_debug_data_length = 92 * 1024, \
.ht_params = &iwl9000_ht_params, \
.nvm_ver = IWL9000_NVM_VERSION, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .fw_mon_smem_write_ptr_addr = 0xa0476c, \
+ .fw_mon_smem_write_ptr_msk = 0xfffff, \
+ .fw_mon_smem_cycle_cnt_ptr_addr = 0xa04774, \
+ .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
const struct iwl_cfg iwl9160_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 86ea0784e1a3..31231b223aae 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -214,7 +214,7 @@ struct iwl_proto_offload_cmd_v3_large {
#define IWL_WOWLAN_MIN_PATTERN_LEN 16
#define IWL_WOWLAN_MAX_PATTERN_LEN 128
-struct iwl_wowlan_pattern {
+struct iwl_wowlan_pattern_v1 {
u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
u8 mask_size;
@@ -227,7 +227,7 @@ struct iwl_wowlan_pattern {
/**
* struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
*/
-struct iwl_wowlan_patterns_cmd {
+struct iwl_wowlan_patterns_cmd_v1 {
/**
* @n_patterns: number of patterns
*/
@@ -236,9 +236,129 @@ struct iwl_wowlan_patterns_cmd {
/**
* @patterns: the patterns, array length in @n_patterns
*/
- struct iwl_wowlan_pattern patterns[];
+ struct iwl_wowlan_pattern_v1 patterns[];
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
+#define IPV4_ADDR_SIZE 4
+#define IPV6_ADDR_SIZE 16
+
+enum iwl_wowlan_pattern_type {
+ WOWLAN_PATTERN_TYPE_BITMASK,
+ WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN,
+ WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN,
+ WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN_WILDCARD,
+ WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN_WILDCARD,
+}; /* WOWLAN_PATTERN_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_wowlan_ipv4_tcp_syn - WoWLAN IPv4 TCP SYN pattern data
+ */
+struct iwl_wowlan_ipv4_tcp_syn {
+ /**
+ * @src_addr: source IP address to match
+ */
+ u8 src_addr[IPV4_ADDR_SIZE];
+
+ /**
+ * @dst_addr: destination IP address to match
+ */
+ u8 dst_addr[IPV4_ADDR_SIZE];
+
+ /**
+ * @src_port: source TCP port to match
+ */
+ __le16 src_port;
+
+ /**
+ * @dst_port: destination TCP port to match
+ */
+ __le16 dst_port;
+} __packed; /* WOWLAN_IPV4_TCP_SYN_API_S_VER_1 */
+
+/**
+ * struct iwl_wowlan_ipv6_tcp_syn - WoWLAN Ipv6 TCP SYN pattern data
+ */
+struct iwl_wowlan_ipv6_tcp_syn {
+ /**
+ * @src_addr: source IP address to match
+ */
+ u8 src_addr[IPV6_ADDR_SIZE];
+
+ /**
+ * @dst_addr: destination IP address to match
+ */
+ u8 dst_addr[IPV6_ADDR_SIZE];
+
+ /**
+ * @src_port: source TCP port to match
+ */
+ __le16 src_port;
+
+ /**
+ * @dst_port: destination TCP port to match
+ */
+ __le16 dst_port;
+} __packed; /* WOWLAN_IPV6_TCP_SYN_API_S_VER_1 */
+
+/**
+ * union iwl_wowlan_pattern_data - Data for the different pattern types
+ *
+ * If wildcard addresses/ports are to be used, the union can be left
+ * undefined.
+ */
+union iwl_wowlan_pattern_data {
+ /**
+ * @bitmask: bitmask pattern data
+ */
+ struct iwl_wowlan_pattern_v1 bitmask;
+
+ /**
+ * @ipv4_tcp_syn: IPv4 TCP SYN pattern data
+ */
+ struct iwl_wowlan_ipv4_tcp_syn ipv4_tcp_syn;
+
+ /**
+ * @ipv6_tcp_syn: IPv6 TCP SYN pattern data
+ */
+ struct iwl_wowlan_ipv6_tcp_syn ipv6_tcp_syn;
+}; /* WOWLAN_PATTERN_API_U_VER_1 */
+
+/**
+ * struct iwl_wowlan_pattern_v2 - Pattern entry for the WoWLAN wakeup patterns
+ */
+struct iwl_wowlan_pattern_v2 {
+ /**
+ * @pattern_type: defines the struct type to be used in the union
+ */
+ u8 pattern_type;
+
+ /**
+ * @reserved: reserved for alignment
+ */
+ u8 reserved[3];
+
+ /**
+ * @u: the union containing the match data, or undefined for
+ * wildcard matches
+ */
+ union iwl_wowlan_pattern_data u;
+} __packed; /* WOWLAN_PATTERN_API_S_VER_2 */
+
+/**
+ * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns command
+ */
+struct iwl_wowlan_patterns_cmd {
+ /**
+ * @n_patterns: number of patterns
+ */
+ __le32 n_patterns;
+
+ /**
+ * @patterns: the patterns, array length in @n_patterns
+ */
+ struct iwl_wowlan_pattern_v2 patterns[];
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */
+
enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
@@ -383,7 +503,11 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
-
+ IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC = BIT(17),
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN = BIT(18),
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD = BIT(19),
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN = BIT(20),
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD = BIT(21),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
struct iwl_wowlan_gtk_status_v1 {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 33858787817b..f4202bc231a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -60,12 +60,13 @@
#include <linux/bitops.h>
-/*
+/**
* struct iwl_fw_ini_header: Common Header for all debug group TLV's structures
+ *
* @tlv_version: version info
* @apply_point: &enum iwl_fw_ini_apply_point
* @data: TLV data followed
- **/
+ */
struct iwl_fw_ini_header {
__le32 tlv_version;
__le32 apply_point;
@@ -73,7 +74,7 @@ struct iwl_fw_ini_header {
} __packed; /* FW_DEBUG_TLV_HEADER_S */
/**
- * struct iwl_fw_ini_allocation_tlv - (IWL_FW_INI_TLV_TYPE_BUFFER_ALLOCATION)
+ * struct iwl_fw_ini_allocation_tlv - (IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION)
* buffer allocation TLV - for debug
*
* @iwl_fw_ini_header: header
@@ -84,7 +85,7 @@ struct iwl_fw_ini_header {
* @max_fragments: the maximum allowed fragmentation in the desired memory
* allocation above
* @min_frag_size: the minimum allowed fragmentation size in bytes
-*/
+ */
struct iwl_fw_ini_allocation_tlv {
struct iwl_fw_ini_header header;
__le32 allocation_id;
@@ -95,33 +96,52 @@ struct iwl_fw_ini_allocation_tlv {
} __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */
/**
- * struct iwl_fw_ini_hcmd (IWL_FW_INI_TLV_TYPE_HCMD)
- * Generic Host command pass through TLV
+ * enum iwl_fw_ini_dbg_domain - debug domains
+ * allows to send host cmd or collect memory region if a given domain is enabled
+ *
+ * @IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON: the default domain, always on
+ * @IWL_FW_INI_DBG_DOMAIN_REPORT_PS: power save domain
+ */
+enum iwl_fw_ini_dbg_domain {
+ IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON = 0,
+ IWL_FW_INI_DBG_DOMAIN_REPORT_PS,
+}; /* FW_DEBUG_TLV_DOMAIN_API_E_VER_1 */
+
+/**
+ * struct iwl_fw_ini_hcmd
*
* @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC
* @group: the desired cmd group
- * @padding: all zeros for dword alignment
- * @data: all of the relevant command (0xf6/0xf5) to be sent
-*/
+ * @reserved: to align to FW struct
+ * @data: all of the relevant command data to be sent
+ */
struct iwl_fw_ini_hcmd {
u8 id;
u8 group;
- __le16 padding;
+ __le16 reserved;
u8 data[0];
-} __packed; /* FW_DEBUG_TLV_HCMD_DATA_S */
+} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
/**
- * struct iwl_fw_ini_hcmd_tlv
+ * struct iwl_fw_ini_hcmd_tlv - (IWL_UCODE_TLV_TYPE_HCMD)
+ * Generic Host command pass through TLV
+ *
* @header: header
+ * @domain: send command only if the specific domain is enabled
+ * &enum iwl_fw_ini_dbg_domain
+ * @period_msec: period in which the hcmd will be sent to FW. Measured in msec
+ * (0 = one time command).
* @hcmd: a variable length host-command to be sent to apply the configuration.
*/
struct iwl_fw_ini_hcmd_tlv {
struct iwl_fw_ini_header header;
+ __le32 domain;
+ __le32 period_msec;
struct iwl_fw_ini_hcmd hcmd;
-} __packed; /* FW_DEBUG_TLV_HCMD_S_VER_1 */
+} __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */
-/*
- * struct iwl_fw_ini_debug_flow_tlv (IWL_FW_INI_TLV_TYPE_DEBUG_FLOW)
+/**
+ * struct iwl_fw_ini_debug_flow_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_FLOW)
*
* @header: header
* @debug_flow_cfg: &enum iwl_fw_ini_debug_flow
@@ -135,7 +155,19 @@ struct iwl_fw_ini_debug_flow_tlv {
#define IWL_FW_INI_MAX_NAME 32
/**
+ * struct iwl_fw_ini_region_cfg_dhc - defines dhc response to dump.
+ *
+ * @id_and_grp: id and group of dhc response.
+ * @desc: dhc response descriptor.
+ */
+struct iwl_fw_ini_region_cfg_dhc {
+ __le32 id_and_grp;
+ __le32 desc;
+} __packed; /* FW_DEBUG_TLV_REGION_DHC_API_S_VER_1 */
+
+/**
* struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region
+ *
* @num_of_range: the amount of ranges in the region
* @range_data_size: size of the data to read per range, in bytes.
*/
@@ -146,6 +178,7 @@ struct iwl_fw_ini_region_cfg_internal {
/**
* struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region
+ *
* @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region
* @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region.
* It is unused for tx.
@@ -163,34 +196,43 @@ struct iwl_fw_ini_region_cfg_fifos {
/**
* struct iwl_fw_ini_region_cfg
+ *
* @region_id: ID of this dump configuration
* @region_type: &enum iwl_fw_ini_region_type
- * @num_regions: amount of regions in the address array.
+ * @domain: dump this region only if the specific domain is enabled
+ * &enum iwl_fw_ini_dbg_domain
* @name_len: name length
* @name: file name to use for this region
* @internal: used in case the region uses internal memory.
* @allocation_id: For DRAM type field substitutes for allocation_id
* @fifos: used in case of fifos region.
+ * @dhc_desc: dhc response descriptor.
+ * @notif_id_and_grp: dump this region only if the specific notification
+ * occurred.
* @offset: offset to use for each memory base address
* @start_addr: array of addresses.
*/
struct iwl_fw_ini_region_cfg {
__le32 region_id;
__le32 region_type;
+ __le32 domain;
__le32 name_len;
u8 name[IWL_FW_INI_MAX_NAME];
union {
struct iwl_fw_ini_region_cfg_internal internal;
__le32 allocation_id;
struct iwl_fw_ini_region_cfg_fifos fifos;
- };
+ struct iwl_fw_ini_region_cfg_dhc dhc_desc;
+ __le32 notif_id_and_grp;
+ }; /* FW_DEBUG_TLV_REGION_EXT_INT_PARAMS_API_U_VER_1 */
__le32 offset;
__le32 start_addr[];
-} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_S */
+} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_tlv - (IWL_FW_INI_TLV_TYPE_REGION_CFG)
- * DUMP sections define IDs and triggers that use those IDs TLV
+ * struct iwl_fw_ini_region_tlv - (IWL_UCODE_TLV_TYPE_REGIONS)
+ * defines memory regions to dump
+ *
* @header: header
* @num_regions: how many different region section and IDs are coming next
* @region_config: list of dump configurations
@@ -199,13 +241,12 @@ struct iwl_fw_ini_region_tlv {
struct iwl_fw_ini_header header;
__le32 num_regions;
struct iwl_fw_ini_region_cfg region_config[];
-} __packed; /* FW_DEBUG_TLV_REGIONS_S_VER_1 */
+} __packed; /* FW_DEBUG_TLV_REGIONS_API_S_VER_1 */
/**
- * struct iwl_fw_ini_trigger - (IWL_FW_INI_TLV_TYPE_DUMP_CFG)
- * Region sections define IDs and triggers that use those IDs TLV
+ * struct iwl_fw_ini_trigger
*
- * @trigger_id: enum &iwl_fw_ini_tigger_id
+ * @trigger_id: &enum iwl_fw_ini_trigger_id
* @override_trig: determines how apply trigger in case a trigger with the
* same id is already in use. Using the first 2 bytes:
* Byte 0: if 0, override trigger configuration, otherwise use the
@@ -214,6 +255,7 @@ struct iwl_fw_ini_region_tlv {
* existing trigger.
* @dump_delay: delay from trigger fire to dump, in usec
* @occurrences: max amount of times to be fired
+ * @reserved: to align to FW struct
* @ignore_consec: ignore consecutive triggers, in usec
* @force_restart: force FW restart
* @multi_dut: initiate debug dump data on several DUTs
@@ -226,17 +268,18 @@ struct iwl_fw_ini_trigger {
__le32 override_trig;
__le32 dump_delay;
__le32 occurrences;
+ __le32 reserved;
__le32 ignore_consec;
__le32 force_restart;
__le32 multi_dut;
__le32 trigger_data;
__le32 num_regions;
__le32 data[];
-} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_S */
+} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_API_S_VER_1 */
/**
- * struct iwl_fw_ini_trigger_tlv - (IWL_FW_INI_TLV_TYPE_TRIGGERS_CFG)
- * DUMP sections define IDs and triggers that use those IDs TLV
+ * struct iwl_fw_ini_trigger_tlv - (IWL_UCODE_TLV_TYPE_TRIGGERS)
+ * Triggers that hold memory regions to dump in case a trigger fires
*
* @header: header
* @num_triggers: how many different triggers section and IDs are coming next
@@ -246,16 +289,18 @@ struct iwl_fw_ini_trigger_tlv {
struct iwl_fw_ini_header header;
__le32 num_triggers;
struct iwl_fw_ini_trigger trigger_config[];
-} __packed; /* FW_TLV_DEBUG_TRIGGERS_S_VER_1 */
+} __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */
/**
* enum iwl_fw_ini_trigger_id
+ *
* @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
* @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
* @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
* @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification
- * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION: FW generic notification
+ * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION: FW generic notification
* @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
+ * @IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER: triggers periodically
* @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
* @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
* threshold was crossed
@@ -299,47 +344,51 @@ enum iwl_fw_ini_trigger_id {
/* FW triggers */
IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4,
- IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION = 5,
+ IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION = 5,
/* User trigger */
IWL_FW_TRIGGER_ID_USER_TRIGGER = 6,
+ /* periodic uses the data field for the interval time */
+ IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER = 7,
+
/* Host triggers */
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 7,
- IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 8,
- IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 9,
- IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 10,
- IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 11,
- IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 12,
- IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 13,
- IWL_FW_TRIGGER_ID_HOST_SCAN_START = 14,
- IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 15,
- IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 16,
- IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 17,
- IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 18,
- IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 19,
- IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 20,
- IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 21,
- IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 22,
- IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 23,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 24,
- IWL_FW_TRIGGER_ID_HOST_D3_START = 25,
- IWL_FW_TRIGGER_ID_HOST_D3_END = 26,
- IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 27,
- IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 28,
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 29,
- IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 30,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 31,
- IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 32,
- IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 33,
- IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 34,
- IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 35,
+ IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 8,
+ IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 9,
+ IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 10,
+ IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 11,
+ IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 12,
+ IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 13,
+ IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 14,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_START = 15,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 16,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 17,
+ IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 18,
+ IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 19,
+ IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 20,
+ IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 21,
+ IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 22,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 23,
+ IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 24,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 25,
+ IWL_FW_TRIGGER_ID_HOST_D3_START = 26,
+ IWL_FW_TRIGGER_ID_HOST_D3_END = 27,
+ IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 28,
+ IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 29,
+ IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 30,
+ IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 31,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 32,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 33,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 34,
+ IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 35,
+ IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 36,
IWL_FW_TRIGGER_ID_NUM,
}; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */
/**
* enum iwl_fw_ini_apply_point
+ *
* @IWL_FW_INI_APPLY_INVALID: invalid
* @IWL_FW_INI_APPLY_EARLY: pre loading FW
* @IWL_FW_INI_APPLY_AFTER_ALIVE: first cmd from host after alive
@@ -360,6 +409,7 @@ enum iwl_fw_ini_apply_point {
/**
* enum iwl_fw_ini_allocation_id
+ *
* @IWL_FW_INI_ALLOCATION_INVALID: invalid
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
@@ -380,18 +430,22 @@ enum iwl_fw_ini_allocation_id {
/**
* enum iwl_fw_ini_buffer_location
+ *
* @IWL_FW_INI_LOCATION_INVALID: invalid
* @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
+ * @IWL_FW_INI_LOCATION_NPK_PATH: NPK location
*/
enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_INVALID,
IWL_FW_INI_LOCATION_SRAM_PATH,
IWL_FW_INI_LOCATION_DRAM_PATH,
+ IWL_FW_INI_LOCATION_NPK_PATH,
}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
/**
* enum iwl_fw_ini_debug_flow
+ *
* @IWL_FW_INI_DEBUG_INVALID: invalid
* @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined
* @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
@@ -404,6 +458,7 @@ enum iwl_fw_ini_debug_flow {
/**
* enum iwl_fw_ini_region_type
+ *
* @IWL_FW_INI_REGION_INVALID: invalid
* @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
* @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
@@ -416,6 +471,10 @@ enum iwl_fw_ini_debug_flow {
* @IWL_FW_INI_REGION_RXF: RX fifo
* @IWL_FW_INI_REGION_PAGING: paging memory
* @IWL_FW_INI_REGION_CSR: CSR registers
+ * @IWL_FW_INI_REGION_NOTIFICATION: FW notification data
+ * @IWL_FW_INI_REGION_DHC: dhc response to dump
+ * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
+ * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
@@ -431,6 +490,10 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_RXF,
IWL_FW_INI_REGION_PAGING,
IWL_FW_INI_REGION_CSR,
+ IWL_FW_INI_REGION_NOTIFICATION,
+ IWL_FW_INI_REGION_DHC,
+ IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
IWL_FW_INI_REGION_NUM
}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 5dddb21c1c4d..8d78b0e671c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -675,6 +675,59 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */
/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v4 - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @rx_rate_n_flags: rate and flags of the last FTM frame received from this
+ * responder
+ * @tx_rate_n_flags: rate and flags of the last ack sent to this responder
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy_v4 {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 rx_rate_n_flags;
+ __le32 tx_rate_n_flags;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
+
+/**
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
@@ -704,6 +757,8 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
* @specific_calib: val that was used in for this AP measurement
* @papd_calib_output: The result of the tof papd calibration that was injected
* into the algorithm.
+ * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
+ * @reserved: for alignment
*/
struct iwl_tof_range_rsp_ap_entry_ntfy {
u8 bssid[ETH_ALEN];
@@ -725,7 +780,9 @@ struct iwl_tof_range_rsp_ap_entry_ntfy {
__le16 common_calib;
__le16 specific_calib;
__le32 papd_calib_output;
-} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
+ u8 rttConfidence;
+ u8 reserved[3];
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
/**
* enum iwl_tof_response_status - tof response status
@@ -761,6 +818,22 @@ struct iwl_tof_range_rsp_ntfy_v5 {
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */
/**
+ * struct iwl_tof_range_rsp_ntfy_v6 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v6 {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
+
+/**
* struct iwl_tof_range_rsp_ntfy - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @num_of_aps: Number of APs results
@@ -774,7 +847,7 @@ struct iwl_tof_range_rsp_ntfy {
u8 last_report;
u8 reserved;
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 941c50477003..85c5e367cbf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -542,6 +542,66 @@ enum iwl_he_htc_flags {
#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
/**
+ * struct iwl_he_sta_context_cmd_v1 - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ */
+struct iwl_he_sta_context_cmd_v1 {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 reserved3;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
+
+/**
* struct iwl_he_sta_context_cmd - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
@@ -564,6 +624,14 @@ enum iwl_he_htc_flags {
* @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
* @reserved3: reserved byte for future use
* @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ * @max_bssid_indicator: indicator of the max bssid supported on the associated
+ * bss
+ * @bssid_index: index of the associated VAP
+ * @ema_ap: AP supports enhanced Multi BSSID advertisement
+ * @profile_periodicity: number of Beacon periods that are needed to receive the
+ * complete VAPs info
+ * @bssid_count: actual number of VAPs in the MultiBSS Set
+ * @reserved4: alignment
*/
struct iwl_he_sta_context_cmd {
u8 sta_id;
@@ -599,7 +667,14 @@ struct iwl_he_sta_context_cmd {
/* The below fields are set via MU EDCA parameter set element */
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
-} __packed; /* STA_CONTEXT_DOT11AX_API_S */
+
+ u8 max_bssid_indicator;
+ u8 bssid_index;
+ u8 ema_ap;
+ u8 profile_periodicity;
+ u8 bssid_count;
+ u8 reserved4[3];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
/**
* struct iwl_he_monitor_cmd - configure air sniffer for HE
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 93b392f0c6a4..97b49843e318 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -233,7 +233,8 @@ struct iwl_nvm_get_info_phy {
__le32 rx_chains;
} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
-#define IWL_NUM_CHANNELS (51)
+#define IWL_NUM_CHANNELS_V1 51
+#define IWL_NUM_CHANNELS 110
/**
* struct iwl_nvm_get_info_regulatory - regulatory information
@@ -241,13 +242,39 @@ struct iwl_nvm_get_info_phy {
* @channel_profile: regulatory data of this channel
* @reserved: reserved
*/
-struct iwl_nvm_get_info_regulatory {
+struct iwl_nvm_get_info_regulatory_v1 {
__le32 lar_enabled;
- __le16 channel_profile[IWL_NUM_CHANNELS];
+ __le16 channel_profile[IWL_NUM_CHANNELS_V1];
__le16 reserved;
} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
/**
+ * struct iwl_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @n_channels: number of valid channels in the array
+ * @channel_profile: regulatory data of this channel
+ */
+struct iwl_nvm_get_info_regulatory {
+ __le32 lar_enabled;
+ __le32 n_channels;
+ __le32 channel_profile[IWL_NUM_CHANNELS];
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */
+
+/**
+ * struct iwl_nvm_get_info_rsp_v3 - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwl_nvm_get_info_rsp_v3 {
+ struct iwl_nvm_get_info_general general;
+ struct iwl_nvm_get_info_sku mac_sku;
+ struct iwl_nvm_get_info_phy phy_sku;
+ struct iwl_nvm_get_info_regulatory_v1 regulatory;
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
+
+/**
* struct iwl_nvm_get_info_rsp - response to get NVM data
* @general: general NVM data
* @mac_sku: data relating to MAC sku
@@ -259,7 +286,7 @@ struct iwl_nvm_get_info_rsp {
struct iwl_nvm_get_info_sku mac_sku;
struct iwl_nvm_get_info_phy phy_sku;
struct iwl_nvm_get_info_regulatory regulatory;
-} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_4 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 6e8224ce8906..d55312ef58c9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -688,13 +688,6 @@ struct iwl_rx_mpdu_desc {
#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
-#define IWL_CD_STTS_OPTIMIZED_POS 0
-#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
-#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
-#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
-#define IWL_CD_STTS_WIFI_STATUS_POS 4
-#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
-
#define RX_NO_DATA_CHAIN_A_POS 0
#define RX_NO_DATA_CHAIN_A_MSK (0xff << RX_NO_DATA_CHAIN_A_POS)
#define RX_NO_DATA_CHAIN_B_POS 8
@@ -747,62 +740,6 @@ struct iwl_rx_no_data {
__le32 rx_vec[2];
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */
-/**
- * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
- * @IWL_CD_STTS_UNUSED: unused
- * @IWL_CD_STTS_UNUSED_2: unused
- * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
- * In sniffer mode, when split is used, set in last CD completion. (RX)
- * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
- * all CD completion. (RX)
- * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
- * @IWL_CD_STTS_ERROR: general error (RX)
- */
-enum iwl_completion_desc_transfer_status {
- IWL_CD_STTS_UNUSED,
- IWL_CD_STTS_UNUSED_2,
- IWL_CD_STTS_END_TRANSFER,
- IWL_CD_STTS_OVERFLOW,
- IWL_CD_STTS_ABORTED,
- IWL_CD_STTS_ERROR,
-};
-
-/**
- * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
- * @IWL_CD_STTS_VALID: the packet is valid (RX)
- * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
- * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
- * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
- * @IWL_CD_STTS_DUP: duplicate packet (RX)
- * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
- * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
- * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
- * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
- * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
- * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
- * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
- * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
- * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
- * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
- */
-enum iwl_completion_desc_wifi_status {
- IWL_CD_STTS_VALID,
- IWL_CD_STTS_FCS_ERR,
- IWL_CD_STTS_SEC_KEY_ERR,
- IWL_CD_STTS_DECRYPTION_ERR,
- IWL_CD_STTS_DUP,
- IWL_CD_STTS_ICV_MIC_ERR,
- IWL_CD_STTS_INTERNAL_SNAP_ERR,
- IWL_CD_STTS_SEC_PORT_FAIL,
- IWL_CD_STTS_BA_OLD_SN,
- IWL_CD_STTS_QOS_NULL,
- IWL_CD_STTS_MAC_HDR_ERR,
- IWL_CD_STTS_MAX_RETRANS,
- IWL_CD_STTS_EX_LIFETIME,
- IWL_CD_STTS_NOT_USED,
- IWL_CD_STTS_REPLAY_ERR,
-};
-
struct iwl_frame_release {
u8 baid;
u8 reserved;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 890a939c463d..1a67a2a439ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -788,7 +788,53 @@ struct iwl_umac_scan_complete {
__le32 reserved;
} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
-#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
+#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 5
+#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 7
+
+/**
+ * struct iwl_scan_offload_profile_match_v1 - match information
+ * @bssid: matched bssid
+ * @reserved: reserved
+ * @channel: channel where the match occurred
+ * @energy: energy
+ * @matching_feature: feature matches
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in the scan offload request.
+ */
+struct iwl_scan_offload_profile_match_v1 {
+ u8 bssid[ETH_ALEN];
+ __le16 reserved;
+ u8 channel;
+ u8 energy;
+ u8 matching_feature;
+ u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwl_scan_offload_profiles_query_v1 - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwl_scan_offload_profiles_query_v1 {
+ __le32 matched_profiles;
+ __le32 last_scan_age;
+ __le32 n_scans_done;
+ __le32 gp2_d0u;
+ __le32 gp2_invoked;
+ u8 resume_while_scanning;
+ u8 self_recovery;
+ __le16 reserved;
+ struct iwl_scan_offload_profile_match_v1 matches[IWL_SCAN_MAX_PROFILES];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
/**
* struct iwl_scan_offload_profile_match - match information
* @bssid: matched bssid
@@ -797,7 +843,7 @@ struct iwl_umac_scan_complete {
* @energy: energy
* @matching_feature: feature matches
* @matching_channels: bitmap of channels that matched, referencing
- * the channels passed in tue scan offload request
+ * the channels passed in the scan offload request.
*/
struct iwl_scan_offload_profile_match {
u8 bssid[ETH_ALEN];
@@ -806,7 +852,7 @@ struct iwl_scan_offload_profile_match {
u8 energy;
u8 matching_feature;
u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
-} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */
/**
* struct iwl_scan_offload_profiles_query - match results query response
@@ -831,7 +877,7 @@ struct iwl_scan_offload_profiles_query {
u8 self_recovery;
__le16 reserved;
struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
-} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */
/**
* struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 6ac240b6eace..73196cbc7fbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -8,6 +8,7 @@
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -133,6 +135,7 @@ enum iwl_tx_queue_cfg_actions {
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
+#define IWL_CMD_QUEUE_SIZE 32
/**
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
* @sta_id: station id
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index f119c49cd39c..5f52e40a2903 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -545,6 +545,7 @@ static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
{ .start = 0x00a04590, .end = 0x00a04590 },
{ .start = 0x00a04598, .end = 0x00a04598 },
{ .start = 0x00a045c0, .end = 0x00a045f4 },
+ { .start = 0x00a05c18, .end = 0x00a05c1c },
{ .start = 0x00a0c000, .end = 0x00a0c018 },
{ .start = 0x00a0c020, .end = 0x00a0c028 },
{ .start = 0x00a0c038, .end = 0x00a0c094 },
@@ -557,6 +558,12 @@ static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
{ .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
};
+static const struct iwl_prph_range iwl_prph_dump_addr_ax210[] = {
+ { .start = 0x00d03c00, .end = 0x00d03c64 },
+ { .start = 0x00d05c18, .end = 0x00d05c1c },
+ { .start = 0x00d0c000, .end = 0x00d0c174 },
+};
+
static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
u32 len_bytes, __le32 *data)
{
@@ -675,7 +682,8 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
u32 range_len;
if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- /* TODO */
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210);
+ handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr);
} else if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
@@ -804,8 +812,8 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
}
static struct iwl_fw_error_dump_file *
-_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_dump_ptrs *fw_error_dump)
+iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dump_ptrs *fw_error_dump)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
@@ -909,11 +917,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
- dump_info->device_family =
- fwrt->trans->cfg->device_family ==
- IWL_DEVICE_FAMILY_7000 ?
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+ dump_info->hw_type =
+ cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
dump_info->hw_step =
cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
@@ -967,10 +972,11 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
if (fifo_len) {
iwl_fw_dump_rxf(fwrt, &dump_data);
iwl_fw_dump_txf(fwrt, &dump_data);
- if (radio_len)
- iwl_read_radio_regs(fwrt, &dump_data);
}
+ if (radio_len)
+ iwl_read_radio_regs(fwrt, &dump_data);
+
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
@@ -1049,14 +1055,14 @@ static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 addr, prph_val, offset = le32_to_cpu(reg->offset);
+ u32 prph_val;
+ u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
int i;
- range->start_addr = reg->start_addr[idx];
+ range->start_addr = cpu_to_le64(addr);
range->range_data_size = reg->internal.range_data_size;
for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
- addr = le32_to_cpu(range->start_addr) + i;
- prph_val = iwl_read_prph(fwrt->trans, addr + offset);
+ prph_val = iwl_read_prph(fwrt->trans, addr + i);
if (prph_val == 0x5a5a5a5a)
return -EBUSY;
*val++ = cpu_to_le32(prph_val);
@@ -1071,16 +1077,13 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 addr, offset = le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
int i;
- range->start_addr = reg->start_addr[idx];
+ range->start_addr = cpu_to_le64(addr);
range->range_data_size = reg->internal.range_data_size;
- for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
- addr = le32_to_cpu(range->start_addr) + i;
- *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans,
- addr + offset));
- }
+ for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4)
+ *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@@ -1090,12 +1093,11 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
void *range_ptr, int idx)
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
- u32 addr = le32_to_cpu(range->start_addr);
- u32 offset = le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
- range->start_addr = reg->start_addr[idx];
+ range->start_addr = cpu_to_le64(addr);
range->range_data_size = reg->internal.range_data_size;
- iwl_trans_read_mem_bytes(fwrt->trans, addr + offset, range->data,
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
le32_to_cpu(reg->internal.range_data_size));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
@@ -1109,7 +1111,7 @@ iwl_dump_ini_paging_gen2_iter(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_error_dump_range *range = range_ptr;
u32 page_size = fwrt->trans->init_dram.paging[idx].size;
- range->start_addr = cpu_to_le32(idx);
+ range->start_addr = cpu_to_le64(idx);
range->range_data_size = cpu_to_le32(page_size);
memcpy(range->data, fwrt->trans->init_dram.paging[idx].block,
page_size);
@@ -1129,7 +1131,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
- range->start_addr = cpu_to_le32(idx);
+ range->start_addr = cpu_to_le64(idx);
range->range_data_size = cpu_to_le32(page_size);
dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size,
DMA_BIDIRECTIONAL);
@@ -1152,7 +1154,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
if (start_addr == 0x5a5a5a5a)
return -EBUSY;
- range->start_addr = cpu_to_le32(start_addr);
+ range->start_addr = cpu_to_le64(start_addr);
range->range_data_size = cpu_to_le32(fwrt->trans->fw_mon[idx].size);
memcpy(range->data, fwrt->trans->fw_mon[idx].block,
@@ -1228,10 +1230,11 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
struct iwl_ini_txf_iter_data *iter;
+ struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
u32 offs = le32_to_cpu(reg->offset), addr;
u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
- __le32 *val = range->data;
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ __le32 *data;
unsigned long flags;
int i;
@@ -1249,11 +1252,18 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
- /* read txf registers */
+ /*
+ * read txf registers. for each register, write to the dump the
+ * register address and its value
+ */
for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
addr = le32_to_cpu(reg->start_addr[i]) + offs;
- *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ reg_dump->addr = cpu_to_le32(addr);
+ reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
+ addr));
+
+ reg_dump++;
}
if (reg->fifos.header_only) {
@@ -1270,8 +1280,9 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
/* Read FIFO */
addr = TXF_READ_MODIFY_DATA + offs;
- for (i = 0; i < iter->fifo_size; i += sizeof(__le32))
- *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ data = (void *)reg_dump;
+ for (i = 0; i < iter->fifo_size; i += sizeof(*data))
+ *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
out:
iwl_trans_release_nic_access(fwrt->trans, &flags);
@@ -1327,10 +1338,11 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
struct iwl_ini_rxf_data rxf_data;
+ struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
u32 offs = le32_to_cpu(reg->offset), addr;
u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
- __le32 *val = range->data;
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ __le32 *data;
unsigned long flags;
int i;
@@ -1341,17 +1353,22 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return -EBUSY;
- offs += rxf_data.offset;
-
range->fifo_num = cpu_to_le32(rxf_data.fifo_num);
range->num_of_registers = reg->fifos.num_of_registers;
range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
- /* read rxf registers */
+ /*
+ * read rxf registers. for each register, write to the dump the
+ * register address and its value
+ */
for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
addr = le32_to_cpu(reg->start_addr[i]) + offs;
- *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ reg_dump->addr = cpu_to_le32(addr);
+ reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
+ addr));
+
+ reg_dump++;
}
if (reg->fifos.header_only) {
@@ -1359,6 +1376,12 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
goto out;
}
+ /*
+ * region register have absolute value so apply rxf offset after
+ * reading the registers
+ */
+ offs += rxf_data.offset;
+
/* Lock fence */
iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
/* Set fence pointer to the same place like WR pointer */
@@ -1369,8 +1392,9 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
/* Read FIFO */
addr = RXF_FIFO_RD_FENCE_INC + offs;
- for (i = 0; i < rxf_data.size; i += sizeof(__le32))
- *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ data = (void *)reg_dump;
+ for (i = 0; i < rxf_data.size; i += sizeof(*data))
+ *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
out:
iwl_trans_release_nic_access(fwrt->trans, &flags);
@@ -1384,32 +1408,86 @@ static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_error_dump *dump = data;
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_MEM_VER);
+
return dump->ranges;
}
static void
-*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+*iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_fw_ini_monitor_dump *data,
+ u32 write_ptr_addr, u32 write_ptr_msk,
+ u32 cycle_cnt_addr, u32 cycle_cnt_msk)
{
- struct iwl_fw_ini_monitor_dram_dump *mon_dump = (void *)data;
u32 write_ptr, cycle_cnt;
unsigned long flags;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
- IWL_ERR(fwrt, "Failed to get DRAM monitor header\n");
+ IWL_ERR(fwrt, "Failed to get monitor header\n");
return NULL;
}
- write_ptr = iwl_read_umac_prph_no_grab(fwrt->trans,
- MON_BUFF_WRPTR_VER2);
- cycle_cnt = iwl_read_umac_prph_no_grab(fwrt->trans,
- MON_BUFF_CYCLE_CNT_VER2);
+
+ write_ptr = iwl_read_prph_no_grab(fwrt->trans, write_ptr_addr);
+ cycle_cnt = iwl_read_prph_no_grab(fwrt->trans, cycle_cnt_addr);
+
iwl_trans_release_nic_access(fwrt->trans, &flags);
- mon_dump->write_ptr = cpu_to_le32(write_ptr);
- mon_dump->cycle_cnt = cpu_to_le32(cycle_cnt);
+ data->header.version = cpu_to_le32(IWL_INI_DUMP_MONITOR_VER);
+ data->write_ptr = cpu_to_le32(write_ptr & write_ptr_msk);
+ data->cycle_cnt = cpu_to_le32(cycle_cnt & cycle_cnt_msk);
+
+ return data->ranges;
+}
+
+static void
+*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *data)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+ u32 write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk;
+
+ switch (fwrt->trans->cfg->device_family) {
+ case IWL_DEVICE_FAMILY_9000:
+ case IWL_DEVICE_FAMILY_22000:
+ write_ptr_addr = MON_BUFF_WRPTR_VER2;
+ write_ptr_msk = -1;
+ cycle_cnt_addr = MON_BUFF_CYCLE_CNT_VER2;
+ cycle_cnt_msk = -1;
+ break;
+ default:
+ IWL_ERR(fwrt, "Unsupported device family %d\n",
+ fwrt->trans->cfg->device_family);
+ return NULL;
+ }
+
+ return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, write_ptr_addr,
+ write_ptr_msk, cycle_cnt_addr,
+ cycle_cnt_msk);
+}
+
+static void
+*iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *data)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+ const struct iwl_cfg *cfg = fwrt->trans->cfg;
+
+ if (fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
+ fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
+ IWL_ERR(fwrt, "Unsupported device family %d\n",
+ fwrt->trans->cfg->device_family);
+ return NULL;
+ }
+
+ return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump,
+ cfg->fw_mon_smem_write_ptr_addr,
+ cfg->fw_mon_smem_write_ptr_msk,
+ cfg->fw_mon_smem_cycle_cnt_ptr_addr,
+ cfg->fw_mon_smem_cycle_cnt_ptr_msk);
- return mon_dump->ranges;
}
static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt,
@@ -1418,6 +1496,8 @@ static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_fifo_error_dump *dump = data;
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_FIFO_VER);
+
return dump->ranges;
}
@@ -1509,7 +1589,8 @@ static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_cfg *reg)
{
- u32 size = sizeof(struct iwl_fw_ini_monitor_dram_dump);
+ u32 size = sizeof(struct iwl_fw_ini_monitor_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
if (fwrt->trans->num_blocks)
size += fwrt->trans->fw_mon[0].size;
@@ -1517,6 +1598,15 @@ static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
+static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ return sizeof(struct iwl_fw_ini_monitor_dump) +
+ iwl_dump_ini_mem_ranges(fwrt, reg) *
+ (sizeof(struct iwl_fw_ini_error_dump_range) +
+ le32_to_cpu(reg->internal.range_data_size));
+}
+
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_cfg *reg)
{
@@ -1524,7 +1614,7 @@ static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
void *fifo_iter = fwrt->dump.fifo_iter;
u32 size = 0;
u32 fifo_hdr = sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2;
fwrt->dump.fifo_iter = &iter;
while (iwl_ini_txf_iter(fwrt, reg)) {
@@ -1547,7 +1637,7 @@ static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_ini_rxf_data rx_data;
u32 size = sizeof(struct iwl_fw_ini_fifo_error_dump) +
sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2;
if (reg->fifos.header_only)
return size;
@@ -1584,27 +1674,31 @@ struct iwl_dump_ini_mem_ops {
* @fwrt: fw runtime struct.
* @data: dump memory data.
* @reg: region to copy to the dump.
+ * @ops: memory dump operations.
*/
static void
iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_region_type type,
struct iwl_fw_error_dump_data **data,
struct iwl_fw_ini_region_cfg *reg,
struct iwl_dump_ini_mem_ops *ops)
{
struct iwl_fw_ini_error_dump_header *header = (void *)(*data)->data;
+ u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type);
void *range;
- u32 num_of_ranges, i;
if (WARN_ON(!ops || !ops->get_num_of_ranges || !ops->get_size ||
!ops->fill_mem_hdr || !ops->fill_range))
return;
+ IWL_DEBUG_FW(fwrt, "WRT: collecting region: id=%d, type=%d\n",
+ le32_to_cpu(reg->region_id), type);
+
num_of_ranges = ops->get_num_of_ranges(fwrt, reg);
(*data)->type = cpu_to_le32(type | INI_DUMP_BIT);
(*data)->len = cpu_to_le32(ops->get_size(fwrt, reg));
+ header->region_id = reg->region_id;
header->num_of_ranges = cpu_to_le32(num_of_ranges);
header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME,
le32_to_cpu(reg->name_len)));
@@ -1612,8 +1706,10 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
range = ops->fill_mem_hdr(fwrt, reg, header);
if (!range) {
- IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
+ IWL_ERR(fwrt,
+ "WRT: failed to fill region header: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
+ memset(*data, 0, le32_to_cpu((*data)->len));
return;
}
@@ -1621,8 +1717,10 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
int range_size = ops->fill_range(fwrt, reg, range, i);
if (range_size < 0) {
- IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
+ IWL_ERR(fwrt,
+ "WRT: failed to dump region: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
+ memset(*data, 0, le32_to_cpu((*data)->len));
return;
}
range = range + range_size;
@@ -1641,23 +1739,30 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) {
u32 reg_id = le32_to_cpu(trigger->data[i]);
struct iwl_fw_ini_region_cfg *reg;
- enum iwl_fw_ini_region_type type;
if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)))
continue;
reg = fwrt->dump.active_regs[reg_id];
- if (WARN(!reg, "Unassigned region %d\n", reg_id))
+ if (!reg) {
+ IWL_WARN(fwrt,
+ "WRT: unassigned region id %d, skipping\n",
+ reg_id);
continue;
+ }
- type = le32_to_cpu(reg->region_type);
- switch (type) {
+ /* currently the driver supports always on domain only */
+ if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
+ continue;
+
+ switch (le32_to_cpu(reg->region_type)) {
case IWL_FW_INI_REGION_DEVICE_MEMORY:
case IWL_FW_INI_REGION_PERIPHERY_MAC:
case IWL_FW_INI_REGION_PERIPHERY_PHY:
case IWL_FW_INI_REGION_PERIPHERY_AUX:
- case IWL_FW_INI_REGION_INTERNAL_BUFFER:
case IWL_FW_INI_REGION_CSR:
+ case IWL_FW_INI_REGION_LMAC_ERROR_TABLE:
+ case IWL_FW_INI_REGION_UMAC_ERROR_TABLE:
size += hdr_len + iwl_dump_ini_mem_get_size(fwrt, reg);
break;
case IWL_FW_INI_REGION_TXF:
@@ -1666,7 +1771,7 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_REGION_RXF:
size += hdr_len + iwl_dump_ini_rxf_get_size(fwrt, reg);
break;
- case IWL_FW_INI_REGION_PAGING: {
+ case IWL_FW_INI_REGION_PAGING:
size += hdr_len;
if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
size += iwl_dump_ini_paging_get_size(fwrt, reg);
@@ -1675,13 +1780,16 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
reg);
}
break;
- }
case IWL_FW_INI_REGION_DRAM_BUFFER:
if (!fwrt->trans->num_blocks)
break;
size += hdr_len +
iwl_dump_ini_mon_dram_get_size(fwrt, reg);
break;
+ case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ size += hdr_len +
+ iwl_dump_ini_mon_smem_get_size(fwrt, reg);
+ break;
case IWL_FW_INI_REGION_DRAM_IMR:
/* Undefined yet */
default:
@@ -1699,7 +1807,6 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
for (i = 0; i < num; i++) {
u32 reg_id = le32_to_cpu(trigger->data[i]);
- enum iwl_fw_ini_region_type type;
struct iwl_fw_ini_region_cfg *reg;
struct iwl_dump_ini_mem_ops ops;
@@ -1711,15 +1818,19 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
if (!reg)
continue;
- type = le32_to_cpu(reg->region_type);
- switch (type) {
+ /* currently the driver supports always on domain only */
+ if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
+ continue;
+
+ switch (le32_to_cpu(reg->region_type)) {
case IWL_FW_INI_REGION_DEVICE_MEMORY:
- case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ case IWL_FW_INI_REGION_LMAC_ERROR_TABLE:
+ case IWL_FW_INI_REGION_UMAC_ERROR_TABLE:
ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
ops.get_size = iwl_dump_ini_mem_get_size;
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
ops.fill_range = iwl_dump_ini_dev_mem_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
case IWL_FW_INI_REGION_PERIPHERY_MAC:
case IWL_FW_INI_REGION_PERIPHERY_PHY:
@@ -1728,16 +1839,23 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
ops.get_size = iwl_dump_ini_mem_get_size;
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
ops.fill_range = iwl_dump_ini_prph_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
case IWL_FW_INI_REGION_DRAM_BUFFER:
ops.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges;
ops.get_size = iwl_dump_ini_mon_dram_get_size;
ops.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header;
ops.fill_range = iwl_dump_ini_mon_dram_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
- case IWL_FW_INI_REGION_PAGING: {
+ case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
+ ops.get_size = iwl_dump_ini_mon_smem_get_size;
+ ops.fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header;
+ ops.fill_range = iwl_dump_ini_dev_mem_iter;
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
+ break;
+ case IWL_FW_INI_REGION_PAGING:
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
ops.get_num_of_ranges =
@@ -1752,9 +1870,8 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
ops.fill_range = iwl_dump_ini_paging_gen2_iter;
}
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
- }
case IWL_FW_INI_REGION_TXF: {
struct iwl_ini_txf_iter_data iter = { .init = true };
void *fifo_iter = fwrt->dump.fifo_iter;
@@ -1764,7 +1881,7 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
ops.get_size = iwl_dump_ini_txf_get_size;
ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
ops.fill_range = iwl_dump_ini_txf_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
fwrt->dump.fifo_iter = fifo_iter;
break;
}
@@ -1773,14 +1890,14 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
ops.get_size = iwl_dump_ini_rxf_get_size;
ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
ops.fill_range = iwl_dump_ini_rxf_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
case IWL_FW_INI_REGION_CSR:
ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
ops.get_size = iwl_dump_ini_mem_get_size;
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
ops.fill_range = iwl_dump_ini_csr_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
case IWL_FW_INI_REGION_DRAM_IMR:
/* This is undefined yet */
@@ -1791,34 +1908,29 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
}
static struct iwl_fw_error_dump_file *
-_iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_dump_ptrs *fw_error_dump)
+iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt)
{
- int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type);
+ int size;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_ini_trigger *trigger;
-
- if (id == FW_DBG_TRIGGER_FW_ASSERT)
- id = IWL_FW_TRIGGER_ID_FW_ASSERT;
+ enum iwl_fw_ini_trigger_id id = fwrt->dump.ini_trig_id;
if (!iwl_fw_ini_trigger_on(fwrt, id))
return NULL;
trigger = fwrt->dump.active_trigs[id].trig;
- size = sizeof(*dump_file);
- size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
-
+ size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
if (!size)
return NULL;
+ size += sizeof(*dump_file);
+
dump_file = vzalloc(size);
if (!dump_file)
return NULL;
- fw_error_dump->fwrt_ptr = dump_file;
-
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
dump_file->file_len = cpu_to_le32(size);
@@ -1828,47 +1940,27 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
return dump_file;
}
-void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
{
- struct iwl_fw_dump_ptrs *fw_error_dump;
+ struct iwl_fw_dump_ptrs fw_error_dump = {};
struct iwl_fw_error_dump_file *dump_file;
struct scatterlist *sg_dump_data;
u32 file_len;
u32 dump_mask = fwrt->fw->dbg.dump_mask;
- IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
-
- /* there's no point in fw dump if the bus is dead */
- if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
- IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
- goto out;
- }
-
- fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
- if (!fw_error_dump)
- goto out;
-
- if (fwrt->trans->ini_valid)
- dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump);
- else
- dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
-
- if (!dump_file) {
- kfree(fw_error_dump);
+ dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump);
+ if (!dump_file)
goto out;
- }
if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only)
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
- if (!fwrt->trans->ini_valid)
- fw_error_dump->trans_ptr =
- iwl_trans_dump_data(fwrt->trans, dump_mask);
-
+ fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
file_len = le32_to_cpu(dump_file->file_len);
- fw_error_dump->fwrt_len = file_len;
- if (fw_error_dump->trans_ptr) {
- file_len += fw_error_dump->trans_ptr->len;
+ fw_error_dump.fwrt_len = file_len;
+
+ if (fw_error_dump.trans_ptr) {
+ file_len += fw_error_dump.trans_ptr->len;
dump_file->file_len = cpu_to_le32(file_len);
}
@@ -1876,27 +1968,49 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (sg_dump_data) {
sg_pcopy_from_buffer(sg_dump_data,
sg_nents(sg_dump_data),
- fw_error_dump->fwrt_ptr,
- fw_error_dump->fwrt_len, 0);
- if (fw_error_dump->trans_ptr)
+ fw_error_dump.fwrt_ptr,
+ fw_error_dump.fwrt_len, 0);
+ if (fw_error_dump.trans_ptr)
sg_pcopy_from_buffer(sg_dump_data,
sg_nents(sg_dump_data),
- fw_error_dump->trans_ptr->data,
- fw_error_dump->trans_ptr->len,
- fw_error_dump->fwrt_len);
+ fw_error_dump.trans_ptr->data,
+ fw_error_dump.trans_ptr->len,
+ fw_error_dump.fwrt_len);
dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
GFP_KERNEL);
}
- vfree(fw_error_dump->fwrt_ptr);
- vfree(fw_error_dump->trans_ptr);
- kfree(fw_error_dump);
+ vfree(fw_error_dump.fwrt_ptr);
+ vfree(fw_error_dump.trans_ptr);
out:
iwl_fw_free_dump_desc(fwrt);
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
- IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
}
-IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
+
+static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_fw_error_dump_file *dump_file;
+ struct scatterlist *sg_dump_data;
+ u32 file_len;
+
+ dump_file = iwl_fw_error_ini_dump_file(fwrt);
+ if (!dump_file)
+ goto out;
+
+ file_len = le32_to_cpu(dump_file->file_len);
+
+ sg_dump_data = alloc_sgtable(file_len);
+ if (sg_dump_data) {
+ sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data),
+ dump_file, file_len, 0);
+ dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
+ GFP_KERNEL);
+ }
+ vfree(dump_file);
+out:
+ fwrt->dump.ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
+ clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
+}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
.trig_desc = {
@@ -1910,6 +2024,17 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only,
unsigned int delay)
{
+ u32 trig_type = le32_to_cpu(desc->trig_desc.type);
+ int ret;
+
+ if (fwrt->trans->ini_valid) {
+ ret = iwl_fw_dbg_ini_collect(fwrt, trig_type);
+ if (!ret)
+ iwl_fw_free_dump_desc(fwrt);
+
+ return ret;
+ }
+
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
return -EBUSY;
@@ -1942,23 +2067,19 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
iwl_dump_error_desc->len = 0;
ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
- if (ret) {
+ if (ret)
kfree(iwl_dump_error_desc);
- } else {
- set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-
- /* trigger nmi to halt the fw */
- iwl_force_nmi(fwrt->trans);
- }
+ else
+ iwl_trans_sync_nmi(fwrt->trans);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect);
-int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len,
- struct iwl_fw_dbg_trigger_tlv *trigger)
+int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig,
+ const char *str, size_t len,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_fw_dump_desc *desc;
unsigned int delay = 0;
@@ -1995,50 +2116,73 @@ int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
}
-IWL_EXPORT_SYMBOL(_iwl_fw_dbg_collect);
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
-int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- u32 id, const char *str, size_t len)
+int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_trigger_id id)
{
- struct iwl_fw_dump_desc *desc;
struct iwl_fw_ini_active_triggers *active;
u32 occur, delay;
- if (!fwrt->trans->ini_valid)
- return _iwl_fw_dbg_collect(fwrt, id, str, len, NULL);
-
- if (id == FW_DBG_TRIGGER_USER)
- id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
+ if (WARN_ON(!iwl_fw_ini_trigger_on(fwrt, id)))
+ return -EINVAL;
- active = &fwrt->dump.active_trigs[id];
+ if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+ return -EBUSY;
- if (WARN_ON(!active->active))
+ if (!iwl_fw_ini_trigger_on(fwrt, id)) {
+ IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
+ id);
return -EINVAL;
+ }
+ active = &fwrt->dump.active_trigs[id];
delay = le32_to_cpu(active->trig->dump_delay);
occur = le32_to_cpu(active->trig->occurrences);
if (!occur)
return 0;
+ active->trig->occurrences = cpu_to_le32(--occur);
+
if (le32_to_cpu(active->trig->force_restart)) {
- IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
+ IWL_WARN(fwrt, "WRT: force restart: trigger %d fired.\n", id);
iwl_force_nmi(fwrt->trans);
return 0;
}
- desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
- if (!desc)
- return -ENOMEM;
+ if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+ return -EBUSY;
- active->trig->occurrences = cpu_to_le32(--occur);
+ fwrt->dump.ini_trig_id = id;
- desc->len = len;
- desc->trig_desc.type = cpu_to_le32(id);
- memcpy(desc->trig_desc.data, str, len);
+ IWL_WARN(fwrt, "WRT: collecting data: ini trigger %d fired.\n", id);
+
+ schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay));
- return iwl_fw_dbg_collect_desc(fwrt, desc, true, delay);
+ return 0;
}
-IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
+IWL_EXPORT_SYMBOL(_iwl_fw_dbg_ini_collect);
+
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id)
+{
+ int id;
+
+ switch (legacy_trigger_id) {
+ case FW_DBG_TRIGGER_FW_ASSERT:
+ case FW_DBG_TRIGGER_ALIVE_TIMEOUT:
+ case FW_DBG_TRIGGER_DRIVER:
+ id = IWL_FW_TRIGGER_ID_FW_ASSERT;
+ break;
+ case FW_DBG_TRIGGER_USER:
+ id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
+ break;
+ default:
+ return -EIO;
+ }
+
+ return _iwl_fw_dbg_ini_collect(fwrt, id);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_ini_collect);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
@@ -2066,8 +2210,8 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
len = strlen(buf) + 1;
}
- ret = _iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
- trigger);
+ ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
+ trigger);
if (ret)
return ret;
@@ -2141,9 +2285,20 @@ void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
return;
}
+ /* there's no point in fw dump if the bus is dead */
+ if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
+ IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
+ return;
+ }
+
iwl_fw_dbg_stop_recording(fwrt, &params);
- iwl_fw_error_dump(fwrt);
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection start\n");
+ if (fwrt->trans->ini_valid)
+ iwl_fw_error_ini_dump(fwrt);
+ else
+ iwl_fw_error_dump(fwrt);
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection done\n");
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
@@ -2213,12 +2368,14 @@ iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size)
if (!virtual_addr)
IWL_ERR(fwrt, "Failed to allocate debug memory\n");
+ IWL_DEBUG_FW(trans,
+ "Allocated DRAM buffer[%d], size=0x%x\n",
+ trans->num_blocks, size);
+
trans->fw_mon[trans->num_blocks].block = virtual_addr;
trans->fw_mon[trans->num_blocks].physical = phys_addr;
trans->fw_mon[trans->num_blocks].size = size;
trans->num_blocks++;
-
- IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
}
static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
@@ -2241,11 +2398,15 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH) {
if (!WARN(pnt != IWL_FW_INI_APPLY_EARLY,
- "Invalid apply point %d for SMEM buffer allocation",
- pnt))
+ "WRT: Invalid apply point %d for SMEM buffer allocation, aborting\n",
+ pnt)) {
+ IWL_DEBUG_FW(trans,
+ "WRT: applying SMEM buffer destination\n");
+
/* set sram monitor by enabling bit 7 */
iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
+ }
return;
}
@@ -2264,6 +2425,9 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
if (trans->num_blocks == 1)
return;
+ IWL_DEBUG_FW(trans,
+ "WRT: applying DRAM buffer[%d] destination\n", block_idx);
+
cmd->num_frags = cpu_to_le32(1);
cmd->fragments[0].address =
cpu_to_le64(trans->fw_mon[block_idx].physical);
@@ -2275,7 +2439,8 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
}
static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
- struct iwl_ucode_tlv *tlv)
+ struct iwl_ucode_tlv *tlv,
+ bool ext)
{
struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0];
struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd;
@@ -2287,6 +2452,14 @@ static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
.data = { data->data, },
};
+ /* currently the driver supports always on domain only */
+ if (le32_to_cpu(hcmd_tlv->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
+ return;
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Sending host command id=0x%x, group=0x%x\n",
+ ext, data->id, data->group);
+
iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
@@ -2296,24 +2469,32 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
{
void *iter = (void *)tlv->region_config;
int i, size = le32_to_cpu(tlv->num_regions);
+ const char *err_st =
+ "WRT: ext=%d. Invalid region %s %d for apply point %d\n";
for (i = 0; i < size; i++) {
struct iwl_fw_ini_region_cfg *reg = iter, **active;
int id = le32_to_cpu(reg->region_id);
u32 type = le32_to_cpu(reg->region_type);
- if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs),
- "Invalid region id %d for apply point %d\n", id, pnt))
+ if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs), err_st, ext,
+ "id", id, pnt))
+ break;
+
+ if (WARN(type == 0 || type >= IWL_FW_INI_REGION_NUM, err_st,
+ ext, "type", type, pnt))
break;
active = &fwrt->dump.active_regs[id];
if (*active)
- IWL_WARN(fwrt->trans, "region TLV %d override\n", id);
+ IWL_WARN(fwrt->trans,
+ "WRT: ext=%d. Region id %d override\n",
+ ext, id);
IWL_DEBUG_FW(fwrt,
- "%s: apply point %d, activating region ID %d\n",
- __func__, pnt, id);
+ "WRT: ext=%d. Activating region id %d\n",
+ ext, id);
*active = reg;
@@ -2321,7 +2502,15 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
type == IWL_FW_INI_REGION_RXF)
iter += le32_to_cpu(reg->fifos.num_of_registers) *
sizeof(__le32);
- else if (type != IWL_FW_INI_REGION_DRAM_BUFFER)
+ else if (type == IWL_FW_INI_REGION_DEVICE_MEMORY ||
+ type == IWL_FW_INI_REGION_PERIPHERY_MAC ||
+ type == IWL_FW_INI_REGION_PERIPHERY_PHY ||
+ type == IWL_FW_INI_REGION_PERIPHERY_AUX ||
+ type == IWL_FW_INI_REGION_INTERNAL_BUFFER ||
+ type == IWL_FW_INI_REGION_PAGING ||
+ type == IWL_FW_INI_REGION_CSR ||
+ type == IWL_FW_INI_REGION_LMAC_ERROR_TABLE ||
+ type == IWL_FW_INI_REGION_UMAC_ERROR_TABLE)
iter += le32_to_cpu(reg->internal.num_of_ranges) *
sizeof(__le32);
@@ -2340,7 +2529,8 @@ static int iwl_fw_dbg_trig_realloc(struct iwl_fw_runtime *fwrt,
ptr = krealloc(active->trig, size, GFP_KERNEL);
if (!ptr) {
- IWL_ERR(fwrt, "Failed to allocate memory for trigger %d\n", id);
+ IWL_ERR(fwrt, "WRT: Failed to allocate memory for trigger %d\n",
+ id);
return -ENOMEM;
}
active->trig = ptr;
@@ -2364,7 +2554,9 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
u32 trig_regs_size = le32_to_cpu(trig->num_regions) *
sizeof(__le32);
- if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
+ if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_trigs),
+ "WRT: ext=%d. Invalid trigger id %d for apply point %d\n",
+ ext, id, apply_point))
break;
active = &fwrt->dump.active_trigs[id];
@@ -2372,6 +2564,10 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
if (!active->active) {
size_t trig_size = sizeof(*trig) + trig_regs_size;
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Activating trigger %d\n",
+ ext, id);
+
if (iwl_fw_dbg_trig_realloc(fwrt, active, id,
trig_size))
goto next;
@@ -2390,8 +2586,16 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
int mem_to_add = trig_regs_size;
if (region_override) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Trigger %d regions override\n",
+ ext, id);
+
mem_to_add -= active_regs * sizeof(__le32);
} else {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Trigger %d regions appending\n",
+ ext, id);
+
offset += active_regs;
new_regs += active_regs;
}
@@ -2400,8 +2604,13 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
active->size + mem_to_add))
goto next;
- if (conf_override)
+ if (conf_override) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Trigger %d configuration override\n",
+ ext, id);
+
memcpy(active->trig, trig, sizeof(*trig));
+ }
memcpy(active->trig->data + offset, trig->data,
trig_regs_size);
@@ -2413,6 +2622,20 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
active->trig->occurrences = cpu_to_le32(-1);
active->active = true;
+
+ if (id == IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER) {
+ u32 collect_interval = le32_to_cpu(trig->trigger_data);
+
+ /* the minimum allowed interval is 50ms */
+ if (collect_interval < 50) {
+ collect_interval = 50;
+ trig->trigger_data =
+ cpu_to_le32(collect_interval);
+ }
+
+ mod_timer(&fwrt->dump.periodic_trig,
+ jiffies + msecs_to_jiffies(collect_interval));
+ }
next:
iter += sizeof(*trig) + trig_regs_size;
@@ -2442,11 +2665,11 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
case IWL_UCODE_TLV_TYPE_HCMD:
if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
IWL_ERR(fwrt,
- "Invalid apply point %x for host command\n",
- pnt);
+ "WRT: ext=%d. Invalid apply point %d for host command\n",
+ ext, pnt);
goto next;
}
- iwl_fw_dbg_send_hcmd(fwrt, tlv);
+ iwl_fw_dbg_send_hcmd(fwrt, tlv, ext);
break;
case IWL_UCODE_TLV_TYPE_REGIONS:
iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
@@ -2457,7 +2680,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
break;
default:
- WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
+ WARN_ONCE(1,
+ "WRT: ext=%d. Invalid TLV 0x%x for apply point\n",
+ ext, type);
break;
}
next:
@@ -2471,6 +2696,8 @@ void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
void *data = &fwrt->trans->apply_points[apply_point];
int i;
+ IWL_DEBUG_FW(fwrt, "WRT: enabling apply point %d\n", apply_point);
+
if (apply_point == IWL_FW_INI_APPLY_EARLY) {
for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++)
fwrt->dump.active_regs[i] = NULL;
@@ -2489,24 +2716,34 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
{
- /* if the wait event timeout elapses instead of wake up then
- * the driver did not receive NMI interrupt and can not assume the FW
- * is halted
- */
- int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
- !test_bit(STATUS_FW_WAIT_DUMP,
- &fwrt->trans->status),
- msecs_to_jiffies(2000));
- if (!ret) {
- /* failed to receive NMI interrupt, assuming the FW is stuck */
- set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
-
- clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
- }
-
- /* Assuming the op mode mutex is held at this point */
+ del_timer(&fwrt->dump.periodic_trig);
iwl_fw_dbg_collect_sync(fwrt);
iwl_trans_stop_device(fwrt->trans);
}
IWL_EXPORT_SYMBOL(iwl_fwrt_stop_device);
+
+void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t)
+{
+ struct iwl_fw_runtime *fwrt;
+ enum iwl_fw_ini_trigger_id id = IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER;
+ int ret;
+ typeof(fwrt->dump) *dump_ptr = container_of(t, typeof(fwrt->dump),
+ periodic_trig);
+
+ fwrt = container_of(dump_ptr, typeof(*fwrt), dump);
+
+ ret = _iwl_fw_dbg_ini_collect(fwrt, id);
+ if (!ret || ret == -EBUSY) {
+ struct iwl_fw_ini_trigger *trig =
+ fwrt->dump.active_trigs[id].trig;
+ u32 occur = le32_to_cpu(trig->occurrences);
+ u32 collect_interval = le32_to_cpu(trig->trigger_data);
+
+ if (!occur)
+ return;
+
+ mod_timer(&fwrt->dump.periodic_trig,
+ jiffies + msecs_to_jiffies(collect_interval));
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index a199056234d3..2a9e560a906b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -108,18 +108,17 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
fwrt->dump.umac_err_id = 0;
}
-void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
bool monitor_only, unsigned int delay);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type);
-int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len,
- struct iwl_fw_dbg_trigger_tlv *trigger);
+int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_trigger_id id);
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- u32 id, const char *str, size_t len);
+ enum iwl_fw_dbg_trigger trig, const char *str,
+ size_t len, struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...) __printf(3, 4);
@@ -229,10 +228,8 @@ iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_trigger *trig;
u32 usec;
-
-
- if (!fwrt->trans->ini_valid || id >= IWL_FW_TRIGGER_ID_NUM ||
- !fwrt->dump.active_trigs[id].active)
+ if (!fwrt->trans->ini_valid || id == IWL_FW_TRIGGER_ID_INVALID ||
+ id >= IWL_FW_TRIGGER_ID_NUM || !fwrt->dump.active_trigs[id].active)
return false;
trig = fwrt->dump.active_trigs[id].trig;
@@ -388,11 +385,13 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt)
{
+ del_timer(&fwrt->dump.periodic_trig);
flush_delayed_work(&fwrt->dump.wk);
}
static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt)
{
+ del_timer(&fwrt->dump.periodic_trig);
cancel_delayed_work_sync(&fwrt->dump.wk);
}
@@ -461,4 +460,15 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
/* This bit is used to differentiate the legacy dump from the ini dump */
#define INI_DUMP_BIT BIT(31)
+static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
+{
+ if (fwrt->trans->ini_valid && fwrt->trans->hw_error) {
+ _iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR);
+ fwrt->trans->hw_error = false;
+ } else {
+ iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
+ }
+}
+
+void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t);
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 9b5077bd46c3..0feff4c33e39 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -184,7 +184,7 @@ enum iwl_fw_error_dump_family {
/**
* struct iwl_fw_error_dump_info - info on the device / firmware
- * @device_family: the family of the device (7 / 8)
+ * @hw_type: the type of the device
* @hw_step: the step of the device
* @fw_human_readable: human readable FW version
* @dev_human_readable: name of the device
@@ -196,7 +196,7 @@ enum iwl_fw_error_dump_family {
* if the dump collection was not initiated by an assert, the value is 0
*/
struct iwl_fw_error_dump_info {
- __le32 device_family;
+ __le32 hw_type;
__le32 hw_step;
u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
u8 dev_human_readable[64];
@@ -211,6 +211,9 @@ struct iwl_fw_error_dump_info {
* @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
* @fw_mon_base_ptr: base pointer of the data
* @fw_mon_cycle_cnt: number of wraparounds
+ * @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit
+ * so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold
+ * MSB 32 bits
* @reserved: for future use
* @data: captured data
*/
@@ -218,7 +221,8 @@ struct iwl_fw_error_dump_fw_mon {
__le32 fw_mon_wr_ptr;
__le32 fw_mon_base_ptr;
__le32 fw_mon_cycle_cnt;
- __le32 reserved[3];
+ __le32 fw_mon_base_high_ptr;
+ __le32 reserved[2];
u8 data[];
} __packed;
@@ -274,25 +278,33 @@ struct iwl_fw_error_dump_mem {
u8 data[];
};
+#define IWL_INI_DUMP_MEM_VER 1
+#define IWL_INI_DUMP_MONITOR_VER 1
+#define IWL_INI_DUMP_FIFO_VER 1
+
/**
* struct iwl_fw_ini_error_dump_range - range of memory
- * @start_addr: the start address of this range
* @range_data_size: the size of this range, in bytes
+ * @start_addr: the start address of this range
* @data: the actual memory
*/
struct iwl_fw_ini_error_dump_range {
- __le32 start_addr;
__le32 range_data_size;
+ __le64 start_addr;
__le32 data[];
} __packed;
/**
* struct iwl_fw_ini_error_dump_header - ini region dump header
+ * @version: dump version
+ * @region_id: id of the region
* @num_of_ranges: number of ranges in this region
* @name_len: number of bytes allocated to the name string of this region
* @name: name of the region
*/
struct iwl_fw_ini_error_dump_header {
+ __le32 version;
+ __le32 region_id;
__le32 num_of_ranges;
__le32 name_len;
u8 name[IWL_FW_INI_MAX_NAME];
@@ -312,12 +324,23 @@ struct iwl_fw_ini_error_dump {
#define IWL_RXF_UMAC_BIT BIT(31)
/**
+ * struct iwl_fw_ini_error_dump_register - ini register dump
+ * @addr: address of the register
+ * @data: data of the register
+ */
+struct iwl_fw_ini_error_dump_register {
+ __le32 addr;
+ __le32 data;
+} __packed;
+
+/**
* struct iwl_fw_ini_fifo_error_dump_range - ini fifo range dump
* @fifo_num: the fifo num. In case of rxf and umac rxf, set BIT(31) to
* distinguish between lmac and umac
* @num_of_registers: num of registers to dump, dword size each
- * @range_data_size: the size of the registers and fifo data
- * @data: fifo data
+ * @range_data_size: the size of the data
+ * @data: consist of
+ * num_of_registers * (register address + register value) + fifo data
*/
struct iwl_fw_ini_fifo_error_dump_range {
__le32 fifo_num;
@@ -351,13 +374,13 @@ struct iwl_fw_error_dump_rb {
};
/**
- * struct iwl_fw_ini_monitor_dram_dump - ini dram monitor dump
+ * struct iwl_fw_ini_monitor_dump - ini monitor dump
* @header - header of the region
- * @write_ptr - write pointer position in the dram
+ * @write_ptr - write pointer position in the buffer
* @cycle_cnt - cycles count
* @ranges - the memory ranges of this this region
*/
-struct iwl_fw_ini_monitor_dram_dump {
+struct iwl_fw_ini_monitor_dump {
struct iwl_fw_ini_error_dump_header header;
__le32 write_ptr;
__le32 cycle_cnt;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 641c95d03b15..de9243d30135 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -93,7 +93,7 @@ struct iwl_ucode_header {
} u;
};
-#define IWL_UCODE_INI_TLV_GROUP BIT(24)
+#define IWL_UCODE_INI_TLV_GROUP 0x1000000
/*
* new TLV uCode file layout
@@ -142,17 +142,22 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
+ IWL_UCODE_TLV_CMD_VERSIONS = 48,
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52,
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
- IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
- IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
- IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
- IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4,
- IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5,
+ IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
+
+ IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1,
+ IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
+ IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP + 0x2,
+ IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP + 0x3,
+ IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP + 0x4,
+ IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP + 0x5,
+ IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW,
/* TLVs 0x1000-0x2000 are for internal driver usage */
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
@@ -272,8 +277,15 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* version of the beacon notification.
* @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of
* BEACON_FILTER_CONFIG_API_S_VER_4.
+ * @IWL_UCODE_TLV_API_REGULATORY_NVM_INFO: This ucode supports v4 of
+ * REGULATORY_NVM_GET_INFO_RSP_API_S.
* @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of
* LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S.
+ * @IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS: This ucode supports v2 of
+ * SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S and v3 of
+ * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
+ * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
+ * STA_CONTEXT_DOT11AX_API_S
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -300,7 +312,12 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45,
IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46,
IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47,
+ IWL_UCODE_TLV_API_REGULATORY_NVM_INFO = (__force iwl_ucode_tlv_api_t)48,
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50,
+ IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
+ IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -350,6 +367,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command
* @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
* (6 GHz).
+ * @IWL_UCODE_TLV_CAPA_CS_MODIFY: firmware supports modify action CSA command
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -420,6 +438,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46,
IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48,
IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47,
+ IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@@ -925,4 +944,20 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
+#define IWL_FW_CMD_VER_UNKNOWN 99
+
+/**
+ * struct iwl_fw_cmd_version - firmware command version entry
+ * @cmd: command ID
+ * @group: group ID
+ * @cmd_ver: command version
+ * @notif_ver: notification version
+ */
+struct iwl_fw_cmd_version {
+ u8 cmd;
+ u8 group;
+ u8 cmd_ver;
+ u8 notif_ver;
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index f4c5a4d73206..18ca5f152be6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -8,7 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,6 +109,9 @@ struct iwl_ucode_capabilities {
u32 error_log_size;
unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
+
+ const struct iwl_fw_cmd_version *cmd_versions;
+ u32 n_cmd_versions;
};
static inline bool
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 7adf4e4e841a..4435c0ce3013 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -76,7 +76,8 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
- init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
+ timer_setup(&fwrt->dump.periodic_trig,
+ iwl_fw_dbg_periodic_trig_handler, 0);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index a5fe1a8ca426..a6402a0b3854 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -145,6 +145,8 @@ struct iwl_fw_runtime {
u32 lmac_err_id[MAX_NUM_LMAC];
u32 umac_err_id;
void *fifo_iter;
+ enum iwl_fw_ini_trigger_id ini_trig_id;
+ struct timer_list periodic_trig;
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index f5f87773667b..f3e69edf8907 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -383,6 +383,9 @@ struct iwl_csr_params {
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @min_txq_size: minimum number of slots required in a TX queue
* @umac_prph_offset: offset to add to UMAC periphery address
+ * @uhb_supported: ultra high band channels supported
+ * @min_256_ba_txq_size: minimum number of slots required in a TX queue which
+ * supports 256 BA aggregation
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -433,7 +436,8 @@ struct iwl_cfg {
gen2:1,
cdb:1,
dbgc_supported:1,
- bisr_workaround:1;
+ bisr_workaround:1,
+ uhb_supported:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@@ -450,6 +454,12 @@ struct iwl_cfg {
u32 d3_debug_data_length;
u32 min_txq_size;
u32 umac_prph_offset;
+ u32 fw_mon_smem_write_ptr_addr;
+ u32 fw_mon_smem_write_ptr_msk;
+ u32 fw_mon_smem_cycle_cnt_ptr_addr;
+ u32 fw_mon_smem_cycle_cnt_ptr_msk;
+ u32 gp2_reg_addr;
+ u32 min_256_ba_txq_size;
};
extern const struct iwl_csr_params iwl_csr_v1;
@@ -549,8 +559,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22260_2ax_cfg;
+extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
@@ -567,11 +578,11 @@ extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
-extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
+extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index aea6d03e545a..553554846009 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -8,7 +8,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -290,6 +290,7 @@
/* HW REV */
#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+#define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4)
/* HW RFID */
#define CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0)
@@ -327,6 +328,7 @@ enum {
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
+#define CSR_HW_REV_TYPE_QUZ (0x0000354)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
#define CSR_HW_REV_TYPE_SO (0x0000370)
#define CSR_HW_REV_TYPE_TY (0x0000420)
@@ -336,6 +338,7 @@ enum {
#define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
+#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
/* HW_RF CHIP ID */
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 5798f434f68f..ba66f7fba064 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,7 @@
*
* BSD LICENSE
*
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,6 +73,9 @@ void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv);
int offset_size = copy_size;
+ if (le32_to_cpu(header->tlv_version) != 1)
+ return;
+
if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM,
"Invalid apply point id %d\n", apply_point))
return;
@@ -126,13 +129,17 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
len -= ALIGN(tlv_len, 4);
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
- if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP))
+ if (tlv_type < IWL_UCODE_TLV_DEBUG_BASE ||
+ tlv_type > IWL_UCODE_TLV_DEBUG_MAX)
continue;
hdr = (void *)&tlv->data[0];
apply = le32_to_cpu(hdr->apply_point);
- IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n",
+ if (le32_to_cpu(hdr->tlv_version) != 1)
+ continue;
+
+ IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n",
le32_to_cpu(tlv->type), apply);
if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM))
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index 655ff5694560..d3ba6a1422ee 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -218,5 +218,7 @@ do { \
#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
+#define IWL_DEBUG_FW_INFO(p, f, a...) \
+ IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 689a65b11cc3..852d3cbfc719 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -179,6 +179,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg.trigger_tlv[i]);
kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml);
+ kfree(drv->fw.ucode_capa.cmd_versions);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -252,8 +253,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
cfg->fw_name_pre, tag);
- IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
- drv->firmware_name);
+ IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
+ drv->firmware_name);
return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
drv->trans->dev,
@@ -1144,6 +1145,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (iwlwifi_mod_params.enable_ini)
iwl_fw_dbg_copy_tlv(drv->trans, tlv, false);
break;
+ case IWL_UCODE_TLV_CMD_VERSIONS:
+ if (tlv_len % sizeof(struct iwl_fw_cmd_version)) {
+ IWL_ERR(drv,
+ "Invalid length for command versions: %u\n",
+ tlv_len);
+ tlv_len /= sizeof(struct iwl_fw_cmd_version);
+ tlv_len *= sizeof(struct iwl_fw_cmd_version);
+ }
+ if (WARN_ON(capa->cmd_versions))
+ return -EINVAL;
+ capa->cmd_versions = kmemdup(tlv_data, tlv_len,
+ GFP_KERNEL);
+ if (!capa->cmd_versions)
+ return -ENOMEM;
+ capa->n_cmd_versions =
+ tlv_len / sizeof(struct iwl_fw_cmd_version);
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@@ -1318,8 +1336,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (!ucode_raw)
goto try_again;
- IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
- drv->firmware_name, ucode_raw->size);
+ IWL_DEBUG_FW_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
+ drv->firmware_name, ucode_raw->size);
/* Make sure that we got at least the API version number */
if (ucode_raw->size < 4) {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 87d6de7efdd2..d87a6bb3e456 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -130,7 +130,7 @@ enum nvm_sku_bits {
/*
* These are the channel numbers in the order that they are stored in the NVM
*/
-static const u8 iwl_nvm_channels[] = {
+static const u16 iwl_nvm_channels[] = {
/* 2.4 GHz */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
/* 5 GHz */
@@ -139,7 +139,7 @@ static const u8 iwl_nvm_channels[] = {
149, 153, 157, 161, 165
};
-static const u8 iwl_ext_nvm_channels[] = {
+static const u16 iwl_ext_nvm_channels[] = {
/* 2.4 GHz */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
/* 5 GHz */
@@ -148,14 +148,27 @@ static const u8 iwl_ext_nvm_channels[] = {
149, 153, 157, 161, 165, 169, 173, 177, 181
};
+static const u16 iwl_uhb_nvm_channels[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181,
+ /* 6-7 GHz */
+ 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233, 237, 241,
+ 245, 249, 253, 257, 261, 265, 269, 273, 277, 281, 285, 289, 293, 297,
+ 301, 305, 309, 313, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353,
+ 357, 361, 365, 369, 373, 377, 381, 385, 389, 393, 397, 401, 405, 409,
+ 413, 417, 421
+};
+
#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
#define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
+#define IWL_NVM_NUM_CHANNELS_UHB ARRAY_SIZE(iwl_uhb_nvm_channels)
#define NUM_2GHZ_CHANNELS 14
-#define NUM_2GHZ_CHANNELS_EXT 14
#define FIRST_2GHZ_HT_MINUS 5
#define LAST_2GHZ_HT_PLUS 9
-#define LAST_5GHZ_HT 165
-#define LAST_5GHZ_HT_FAMILY_8000 181
#define N_HW_ADDR_MASK 0xF
/* rate data (static) */
@@ -213,7 +226,7 @@ enum iwl_nvm_channel_flags {
};
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
- int chan, u16 flags)
+ int chan, u32 flags)
{
#define CHECK_AND_PRINT_I(x) \
((flags & NVM_CHANNEL_##x) ? " " #x : "")
@@ -244,20 +257,16 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
}
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
- u16 nvm_flags, const struct iwl_cfg *cfg)
+ u32 nvm_flags, const struct iwl_cfg *cfg)
{
u32 flags = IEEE80211_CHAN_NO_HT40;
- u32 last_5ghz_ht = LAST_5GHZ_HT;
-
- if (cfg->nvm_type == IWL_NVM_EXT)
- last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
if (ch_num <= LAST_2GHZ_HT_PLUS)
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
if (ch_num >= FIRST_2GHZ_HT_MINUS)
flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
- } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
else
@@ -292,30 +301,36 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
- const __le16 * const nvm_ch_flags,
- u32 sbands_flags)
+ const void * const nvm_ch_flags,
+ u32 sbands_flags, bool v4)
{
int ch_idx;
int n_channels = 0;
struct ieee80211_channel *channel;
- u16 ch_flags;
- int num_of_ch, num_2ghz_channels;
- const u8 *nvm_chan;
-
- if (cfg->nvm_type != IWL_NVM_EXT) {
- num_of_ch = IWL_NVM_NUM_CHANNELS;
- nvm_chan = &iwl_nvm_channels[0];
- num_2ghz_channels = NUM_2GHZ_CHANNELS;
- } else {
+ u32 ch_flags;
+ int num_of_ch, num_2ghz_channels = NUM_2GHZ_CHANNELS;
+ const u16 *nvm_chan;
+
+ if (cfg->uhb_supported) {
+ num_of_ch = IWL_NVM_NUM_CHANNELS_UHB;
+ nvm_chan = iwl_uhb_nvm_channels;
+ } else if (cfg->nvm_type == IWL_NVM_EXT) {
num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
- nvm_chan = &iwl_ext_nvm_channels[0];
- num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
+ nvm_chan = iwl_ext_nvm_channels;
+ } else {
+ num_of_ch = IWL_NVM_NUM_CHANNELS;
+ nvm_chan = iwl_nvm_channels;
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
bool is_5ghz = (ch_idx >= num_2ghz_channels);
- ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+ if (v4)
+ ch_flags =
+ __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx);
+ else
+ ch_flags =
+ __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
if (is_5ghz && !data->sku_cap_band_52ghz_enable)
continue;
@@ -636,12 +651,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
u8 tx_chains, u8 rx_chains)
{
- if (sband->band == NL80211_BAND_2GHZ ||
- sband->band == NL80211_BAND_5GHZ)
- sband->iftype_data = iwl_he_capa;
- else
- return;
-
+ sband->iftype_data = iwl_he_capa;
sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
@@ -661,15 +671,15 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
- const __le16 *nvm_ch_flags, u8 tx_chains,
- u8 rx_chains, u32 sbands_flags)
+ const void *nvm_ch_flags, u8 tx_chains,
+ u8 rx_chains, u32 sbands_flags, bool v4)
{
int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
- sbands_flags);
+ sbands_flags, v4);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@ -1006,22 +1016,18 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
- sbands_flags);
+ sbands_flags, false);
data->calib_version = 255;
return data;
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
-static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
+static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
- u32 last_5ghz_ht = LAST_5GHZ_HT;
-
- if (cfg->nvm_type == IWL_NVM_EXT)
- last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (ch_idx < NUM_2GHZ_CHANNELS &&
(nvm_flags & NVM_CHANNEL_40MHZ)) {
@@ -1029,8 +1035,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
flags &= ~NL80211_RRF_NO_HT40PLUS;
if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
flags &= ~NL80211_RRF_NO_HT40MINUS;
- } else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
- (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
flags &= ~NL80211_RRF_NO_HT40PLUS;
else
@@ -1074,18 +1079,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int ch_idx;
u16 ch_flags;
u32 reg_rule_flags, prev_reg_rule_flags = 0;
- const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
- iwl_ext_nvm_channels : iwl_nvm_channels;
+ const u16 *nvm_chan;
struct ieee80211_regdomain *regd, *copy_rd;
- int size_of_regd, regd_to_copy;
struct ieee80211_reg_rule *rule;
struct regdb_ptrs *regdb_ptrs;
enum nl80211_band band;
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
- int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
- IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
+ int max_num_ch;
+
+ if (cfg->uhb_supported) {
+ max_num_ch = IWL_NVM_NUM_CHANNELS_UHB;
+ nvm_chan = iwl_uhb_nvm_channels;
+ } else if (cfg->nvm_type == IWL_NVM_EXT) {
+ max_num_ch = IWL_NVM_NUM_CHANNELS_EXT;
+ nvm_chan = iwl_ext_nvm_channels;
+ } else {
+ max_num_ch = IWL_NVM_NUM_CHANNELS;
+ nvm_chan = iwl_nvm_channels;
+ }
if (WARN_ON(num_of_ch > max_num_ch))
num_of_ch = max_num_ch;
@@ -1097,11 +1110,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
num_of_ch);
/* build a regdomain rule for every valid channel */
- size_of_regd =
- sizeof(struct ieee80211_regdomain) +
- num_of_ch * sizeof(struct ieee80211_reg_rule);
-
- regd = kzalloc(size_of_regd, GFP_KERNEL);
+ regd = kzalloc(struct_size(regd, reg_rules, num_of_ch), GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
@@ -1177,14 +1186,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
* Narrow down regdom for unused regulatory rules to prevent hole
* between reg rules to wmm rules.
*/
- regd_to_copy = sizeof(struct ieee80211_regdomain) +
- valid_rules * sizeof(struct ieee80211_reg_rule);
-
- copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL);
- if (!copy_rd) {
+ copy_rd = kmemdup(regd, struct_size(regd, reg_rules, valid_rules),
+ GFP_KERNEL);
+ if (!copy_rd)
copy_rd = ERR_PTR(-ENOMEM);
- goto out;
- }
out:
kfree(regdb_ptrs);
@@ -1393,7 +1398,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
const struct iwl_fw *fw)
{
struct iwl_nvm_get_info cmd = {};
- struct iwl_nvm_get_info_rsp *rsp;
struct iwl_nvm_data *nvm;
struct iwl_host_cmd hcmd = {
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
@@ -1408,12 +1412,24 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
+ /*
+ * All the values in iwl_nvm_get_info_rsp v4 are the same as
+ * in v3, except for the channel profile part of the
+ * regulatory. So we can just access the new struct, with the
+ * exception of the latter.
+ */
+ struct iwl_nvm_get_info_rsp *rsp;
+ struct iwl_nvm_get_info_rsp_v3 *rsp_v3;
+ bool v4 = fw_has_api(&fw->ucode_capa,
+ IWL_UCODE_TLV_API_REGULATORY_NVM_INFO);
+ size_t rsp_size = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
+ void *channel_profile;
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
return ERR_PTR(ret);
- if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
+ if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != rsp_size,
"Invalid payload len in NVM response from FW %d",
iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
ret = -EINVAL;
@@ -1475,11 +1491,15 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
}
+ rsp_v3 = (void *)rsp;
+ channel_profile = v4 ? (void *)rsp->regulatory.channel_profile :
+ (void *)rsp_v3->regulatory.channel_profile;
+
iwl_init_sbands(trans->dev, trans->cfg, nvm,
- rsp->regulatory.channel_profile,
+ channel_profile,
nvm->valid_tx_ant & fw->valid_tx_ant,
nvm->valid_rx_ant & fw->valid_rx_ant,
- sbands_flags);
+ sbands_flags, v4);
iwl_free_resp(&hcmd);
return nvm;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 1af9f9e1ecd4..8e6a0c363c0d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -368,6 +368,12 @@
#define MON_BUFF_WRPTR_VER2 (0xa03c24)
#define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28)
#define MON_BUFF_SHIFT_VER2 (0x8)
+/* FW monitor familiy AX210 and on */
+#define DBGC_CUR_DBGBUF_BASE_ADDR_LSB (0xd03c20)
+#define DBGC_CUR_DBGBUF_BASE_ADDR_MSB (0xd03c24)
+#define DBGC_CUR_DBGBUF_STATUS (0xd03c1c)
+#define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c)
+#define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff)
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index bbebbf3efd57..1e4c9ef548cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -274,7 +274,6 @@ struct iwl_rx_cmd_buffer {
bool _page_stolen;
u32 _rx_page_order;
unsigned int truesize;
- u8 status;
};
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
@@ -338,7 +337,6 @@ enum iwl_d3_status {
* are sent
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -351,7 +349,6 @@ enum iwl_trans_status {
STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
- STATUS_FW_WAIT_DUMP,
};
static inline int
@@ -618,6 +615,7 @@ struct iwl_trans_ops {
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
u32 dump_mask);
void (*debugfs_cleanup)(struct iwl_trans *trans);
+ void (*sync_nmi)(struct iwl_trans *trans);
};
/**
@@ -769,6 +767,7 @@ struct iwl_self_init_dram {
* @umac_error_event_table: addr of umac error table
* @error_event_table_tlv_status: bitmap that indicates what error table
* pointers was recevied via TLV. use enum &iwl_error_event_table_status
+ * @hw_error: equals true if hw error interrupt was received from the FW
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -831,7 +830,7 @@ struct iwl_trans {
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
unsigned int error_event_table_tlv_status;
- wait_queue_head_t fw_halt_waitq;
+ bool hw_error;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -1239,10 +1238,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
/* prevent double restarts due to the same erroneous FW */
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
iwl_op_mode_nic_error(trans->op_mode);
+}
- if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status))
- wake_up(&trans->fw_halt_waitq);
-
+static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+ if (trans->ops->sync_nmi)
+ trans->ops->sync_nmi(trans);
}
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 808bc6f363d0..60f5d337f16d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -385,10 +385,10 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
}
}
-static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
- struct cfg80211_wowlan *wowlan)
+static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan)
{
- struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
@@ -399,7 +399,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return 0;
cmd.len[0] = sizeof(*pattern_cmd) +
- wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
+ wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v1);
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
@@ -426,6 +426,50 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return err;
}
+static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ pattern_cmd->patterns[i].pattern_type =
+ WOWLAN_PATTERN_TYPE_BITMASK;
+
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
+ pattern_cmd->patterns[i].u.bitmask.pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(pattern_cmd);
+ return err;
+}
+
static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *ap_sta)
{
@@ -851,7 +895,11 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (ret)
return ret;
- ret = iwl_mvm_send_patterns(mvm, wowlan);
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
+ ret = iwl_mvm_send_patterns(mvm, wowlan);
+ else
+ ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
return ret;
@@ -1728,9 +1776,12 @@ void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
}
+#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
+ IWL_SCAN_MAX_PROFILES)
+
struct iwl_mvm_nd_query_results {
u32 matched_profiles;
- struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+ u8 matches[ND_QUERY_BUF_LEN];
};
static int
@@ -1743,6 +1794,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
.flags = CMD_WANT_SKB,
};
int ret, len;
+ size_t query_len, matches_len;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
@@ -1750,8 +1802,19 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
return ret;
}
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+ query_len = sizeof(struct iwl_scan_offload_profiles_query);
+ matches_len = sizeof(struct iwl_scan_offload_profile_match) *
+ IWL_SCAN_MAX_PROFILES;
+ } else {
+ query_len = sizeof(struct iwl_scan_offload_profiles_query_v1);
+ matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) *
+ IWL_SCAN_MAX_PROFILES;
+ }
+
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
- if (len < sizeof(*query)) {
+ if (len < query_len) {
IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
ret = -EIO;
goto out_free_resp;
@@ -1760,7 +1823,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
query = (void *)cmd.resp_pkt->data;
results->matched_profiles = le32_to_cpu(query->matched_profiles);
- memcpy(results->matches, query->matches, sizeof(results->matches));
+ memcpy(results->matches, query->matches, matches_len);
#ifdef CONFIG_IWLWIFI_DEBUGFS
mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
@@ -1771,6 +1834,57 @@ out_free_resp:
return ret;
}
+static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_query_results *query,
+ int idx)
+{
+ int n_chans = 0, i;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+ struct iwl_scan_offload_profile_match *matches =
+ (struct iwl_scan_offload_profile_match *)query->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++)
+ n_chans += hweight8(matches[idx].matching_channels[i]);
+ } else {
+ struct iwl_scan_offload_profile_match_v1 *matches =
+ (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++)
+ n_chans += hweight8(matches[idx].matching_channels[i]);
+ }
+
+ return n_chans;
+}
+
+static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_query_results *query,
+ struct cfg80211_wowlan_nd_match *match,
+ int idx)
+{
+ int i;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+ struct iwl_scan_offload_profile_match *matches =
+ (struct iwl_scan_offload_profile_match *)query->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+ match->channels[match->n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ } else {
+ struct iwl_scan_offload_profile_match_v1 *matches =
+ (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+ match->channels[match->n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ }
+}
+
static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -1783,7 +1897,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
struct iwl_wowlan_status *fw_status;
unsigned long matched_profiles;
u32 reasons = 0;
- int i, j, n_matches, ret;
+ int i, n_matches, ret;
fw_status = iwl_mvm_get_wakeup_status(mvm);
if (!IS_ERR_OR_NULL(fw_status)) {
@@ -1817,14 +1931,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
goto out_report_nd;
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
- struct iwl_scan_offload_profile_match *fw_match;
struct cfg80211_wowlan_nd_match *match;
int idx, n_channels = 0;
- fw_match = &query.matches[i];
-
- for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
- n_channels += hweight8(fw_match->matching_channels[j]);
+ n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i);
match = kzalloc(struct_size(match, channels, n_channels),
GFP_KERNEL);
@@ -1844,10 +1954,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
if (mvm->n_nd_channels < n_channels)
continue;
- for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
- if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
- match->channels[match->n_channels++] =
- mvm->nd_channels[j]->center_freq;
+ iwl_mvm_query_set_freqs(mvm, &query, match, i);
}
out_report_nd:
@@ -2030,7 +2137,6 @@ out:
* 2. We are using a unified image but had an error while exiting D3
*/
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
- set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
/*
* When switching images we return 1, which causes mac80211
* to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 2453ceabf00d..f043eefabb4e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -743,9 +743,8 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, vif, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(#name, mode, parent, vif, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
@@ -774,8 +773,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
-
- if (!mvmvif->dbgfs_dir) {
+ if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) {
IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
dbgfs_dir);
return;
@@ -812,12 +810,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
mvm->debugfs_dir, buf);
- if (!mvmvif->dbgfs_slink)
- IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n",
- dbgfs_dir);
- return;
-err:
- IWL_ERR(mvm, "Can't create debugfs entity\n");
}
void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 776b24f54200..d4ff6b44de2c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1349,7 +1349,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return 0;
iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
- (count - 1));
+ (count - 1), NULL);
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
@@ -1696,9 +1696,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
- if (!debugfs_create_file(alias, mode, parent, mvm, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(alias, mode, parent, mvm, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
@@ -1709,9 +1708,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \
- if (!debugfs_create_file(alias, mode, parent, sta, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(alias, mode, parent, sta, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
@@ -2092,13 +2090,9 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
if (iwl_mvm_has_tlc_offload(mvm))
MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400);
-
- return;
-err:
- IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
}
-int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
+void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
struct dentry *bcast_dir __maybe_unused;
char buf[100];
@@ -2142,14 +2136,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
#endif
MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600);
- if (!debugfs_create_bool("enable_scan_iteration_notif",
- 0600,
- mvm->debugfs_dir,
- &mvm->scan_iter_notif_enabled))
- goto err;
- if (!debugfs_create_bool("drop_bcn_ap_mode", 0600,
- mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
- goto err;
+ debugfs_create_bool("enable_scan_iteration_notif", 0600,
+ mvm->debugfs_dir, &mvm->scan_iter_notif_enabled);
+ debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir,
+ &mvm->drop_bcn_ap_mode);
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
@@ -2157,13 +2147,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
bcast_dir = debugfs_create_dir("bcast_filtering",
mvm->debugfs_dir);
- if (!bcast_dir)
- goto err;
- if (!debugfs_create_bool("override", 0600,
- bcast_dir,
- &mvm->dbgfs_bcast_filtering.override))
- goto err;
+ debugfs_create_bool("override", 0600, bcast_dir,
+ &mvm->dbgfs_bcast_filtering.override);
MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
bcast_dir, 0600);
@@ -2175,35 +2161,26 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
- if (!debugfs_create_bool("d3_wake_sysassert", 0600,
- mvm->debugfs_dir, &mvm->d3_wake_sysassert))
- goto err;
- if (!debugfs_create_u32("last_netdetect_scans", 0400,
- mvm->debugfs_dir, &mvm->last_netdetect_scans))
- goto err;
+ debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
+ &mvm->d3_wake_sysassert);
+ debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir,
+ &mvm->last_netdetect_scans);
#endif
- if (!debugfs_create_u8("ps_disabled", 0400,
- mvm->debugfs_dir, &mvm->ps_disabled))
- goto err;
- if (!debugfs_create_blob("nvm_hw", 0400,
- mvm->debugfs_dir, &mvm->nvm_hw_blob))
- goto err;
- if (!debugfs_create_blob("nvm_sw", 0400,
- mvm->debugfs_dir, &mvm->nvm_sw_blob))
- goto err;
- if (!debugfs_create_blob("nvm_calib", 0400,
- mvm->debugfs_dir, &mvm->nvm_calib_blob))
- goto err;
- if (!debugfs_create_blob("nvm_prod", 0400,
- mvm->debugfs_dir, &mvm->nvm_prod_blob))
- goto err;
- if (!debugfs_create_blob("nvm_phy_sku", 0400,
- mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
- goto err;
- if (!debugfs_create_blob("nvm_reg", S_IRUSR,
- mvm->debugfs_dir, &mvm->nvm_reg_blob))
- goto err;
+ debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir,
+ &mvm->ps_disabled);
+ debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir,
+ &mvm->nvm_hw_blob);
+ debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir,
+ &mvm->nvm_sw_blob);
+ debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir,
+ &mvm->nvm_calib_blob);
+ debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir,
+ &mvm->nvm_prod_blob);
+ debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir,
+ &mvm->nvm_phy_sku_blob);
+ debugfs_create_blob("nvm_reg", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_reg_blob);
debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
@@ -2212,11 +2189,5 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
* exists (before the opmode exists which removes the target.)
*/
snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
- if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
- goto err;
-
- return 0;
-err:
- IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
- return -ENOMEM;
+ debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 94132cfd1f56..fec38a47696e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -187,12 +187,24 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 0; i < ETH_ALEN; i++)
cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
- if (vif->bss_conf.assoc)
+ if (vif->bss_conf.assoc) {
memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
- else
+
+ /* AP's TSF is only relevant if associated */
+ for (i = 0; i < req->n_peers; i++) {
+ if (req->peers[i].report_ap_tsf) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
+ return;
+ }
+ }
+ } else {
eth_broadcast_addr(cmd->range_req_bssid);
+ }
- /* TODO: fill in tsf_mac_id if needed */
+ /* Don't report AP's TSF */
cmd->tsf_mac_id = cpu_to_le32(0xff);
}
@@ -480,6 +492,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
int i;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
@@ -519,8 +532,15 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
int peer_idx;
if (new_api) {
- fw_ap = &fw_resp->ap[i];
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
+ fw_ap = &fw_resp->ap[i];
+ else
+ fw_ap = (void *)&fw_resp_v6->ap[i];
+
result.final = fw_resp->ap[i].last_burst;
+ result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
+ result.ap_tsf_valid = 1;
} else {
/* the first part is the same for old and new APIs */
fw_ap = (void *)&fw_resp_v5->ap[i];
@@ -588,6 +608,11 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
mvm->ftm_initiator.req,
&result, GFP_KERNEL);
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
+ IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n",
+ fw_ap->rttConfidence);
+
iwl_mvm_debug_range_resp(mvm, i, &result);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 00a47f6f1d81..ab68b5d53ec9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1121,7 +1121,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
+ if (ret != -ERFKILL)
+ iwl_fw_dbg_error_collect(&mvm->fwrt,
+ FW_DBG_TRIGGER_DRIVER);
goto error;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 6a70dece447d..53c217af13c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -262,9 +262,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
.preferred_tsf = NUM_TSF_IDS,
.found_vif = false,
};
- u32 ac;
- int ret, i, queue_limit;
- unsigned long used_hw_queues;
+ int ret, i;
lockdep_assert_held(&mvm->mutex);
@@ -341,37 +339,9 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
INIT_LIST_HEAD(&mvmvif->time_event_data.list);
mvmvif->time_event_data.id = TE_MAX;
- /* No need to allocate data queues to P2P Device MAC.*/
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
-
+ /* No need to allocate data queues to P2P Device MAC and NAN.*/
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
return 0;
- }
-
- /*
- * queues in mac80211 almost entirely independent of
- * the ones here - no real limit
- */
- queue_limit = IEEE80211_MAX_QUEUES;
-
- /*
- * Find available queues, and allocate them to the ACs. When in
- * DQA-mode they aren't really used, and this is done only so the
- * mac80211 ieee80211_check_queues() function won't fail
- */
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- u8 queue = find_first_zero_bit(&used_hw_queues, queue_limit);
-
- if (queue >= queue_limit) {
- IWL_ERR(mvm, "Failed to allocate queue\n");
- ret = -EIO;
- goto exit_fail;
- }
-
- __set_bit(queue, &used_hw_queues);
- vif->hw_queue[ac] = queue;
- }
/* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP ||
@@ -1143,9 +1113,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
ieee80211_tu_to_usec(data.beacon_int * rand /
100);
} else {
- mvmvif->ap_beacon_time =
- iwl_read_prph(mvm->trans,
- DEVICE_SYSTEM_TIME_REG);
+ mvmvif->ap_beacon_time = iwl_mvm_get_systime(mvm);
}
}
@@ -1573,6 +1541,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
rcu_read_lock();
vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]);
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
switch (vif->type) {
case NL80211_IFTYPE_AP:
@@ -1581,7 +1550,6 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
csa_vif != vif))
goto out_unlock;
- mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
if (WARN(csa_id != id_n_color,
"channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
@@ -1602,6 +1570,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
return;
case NL80211_IFTYPE_STATION:
iwl_mvm_csa_client_absent(mvm, vif);
+ cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(vif, true);
break;
default:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3a92c09d4692..5c52469288be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -420,6 +420,7 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
const static u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
@@ -597,6 +598,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ hw->wiphy->features |= NL80211_FEATURE_HT_IBSS;
+
hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
if (iwl_mvm_is_lar_supported(mvm))
hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
@@ -732,6 +736,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa;
hw->wiphy->num_iftype_ext_capab =
ARRAY_SIZE(he_iftypes_ext_capa);
+
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
}
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
@@ -1191,15 +1198,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
- /* clear the D3 reconfig, we only need it to avoid dumping a
- * firmware coredump on reconfiguration, we shouldn't do that
- * on D3->D0 transition
- */
- if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
- mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
- iwl_fw_error_dump(&mvm->fwrt);
- }
-
/* cleanup all stale references (scan, roc), but keep the
* ucode_down ref until reconfig is complete
*/
@@ -1500,6 +1498,91 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
+static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvmvif->csa_failed) {
+ mvmvif->csa_failed = false;
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct iwl_mvm_sta *mvmsta;
+
+ mvmvif->csa_bcn_pending = false;
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
+ mvmvif->ap_sta_id);
+
+ if (WARN_ON(!mvmsta)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ goto out_unlock;
+
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+
+ mvmvif->ps_disabled = false;
+
+ ret = iwl_mvm_power_update_ps(mvm);
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_chan_switch_te_cmd cmd = {
+ .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ };
+
+ IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
+
+ mutex_lock(&mvm->mutex);
+ WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ 0, sizeof(cmd), &cmd));
+ mutex_unlock(&mvm->mutex);
+
+ WARN_ON(iwl_mvm_post_channel_switch(hw, vif));
+}
+
+static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_vif *vif;
+
+ mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work);
+ vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+ mvm = mvmvif->mvm;
+
+ iwl_mvm_abort_channel_switch(mvm->hw, vif);
+ ieee80211_chswitch_done(vif, false);
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1626,6 +1709,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
}
iwl_mvm_tcm_add_vif(mvm, vif);
+ INIT_DELAYED_WORK(&mvmvif->csa_work,
+ iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR)
mvm->monitor_on = true;
@@ -2127,6 +2212,10 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
.frame_time_rts_th =
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
};
+ int size = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_MBSSID_HE) ?
+ sizeof(sta_ctxt_cmd) :
+ sizeof(struct iwl_he_sta_context_cmd_v1);
struct ieee80211_sta *sta;
u32 flags;
int i;
@@ -2254,16 +2343,18 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
/* Set the PPE thresholds accordingly */
if (low_th >= 0 && high_th >= 0) {
- u8 ***pkt_ext_qam =
- (void *)sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th;
+ struct iwl_he_pkt_ext *pkt_ext =
+ (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext;
for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
u8 bw;
for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
bw++) {
- pkt_ext_qam[i][bw][0] = low_th;
- pkt_ext_qam[i][bw][1] = high_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][0] =
+ low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] =
+ high_th;
}
}
@@ -2308,13 +2399,23 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
(vif->bss_conf.uora_ocw_range >> 3) & 0x7;
}
- /* TODO: support Multi BSSID IE */
+ if (vif->bss_conf.nontransmitted) {
+ flags |= STA_CTXT_HE_REF_BSSID_VALID;
+ ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr,
+ vif->bss_conf.transmitter_bssid);
+ sta_ctxt_cmd.max_bssid_indicator =
+ vif->bss_conf.bssid_indicator;
+ sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index;
+ sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap;
+ sta_ctxt_cmd.profile_periodicity =
+ vif->bss_conf.profile_periodicity;
+ }
sta_ctxt_cmd.flags = cpu_to_le32(flags);
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
DATA_PATH_GROUP, 0),
- 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
+ 0, size, &sta_ctxt_cmd))
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
}
@@ -2714,9 +2815,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_remove(mvm, vif);
- kfree(mvmvif->ap_wep_key);
- mvmvif->ap_wep_key = NULL;
-
mutex_unlock(&mvm->mutex);
}
@@ -3183,24 +3281,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_update_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
- /* if wep is used, need to set the key for the station now */
- if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
- mvm_sta->wep_key =
- kmemdup(mvmvif->ap_wep_key,
- sizeof(*mvmvif->ap_wep_key) +
- mvmvif->ap_wep_key->keylen,
- GFP_KERNEL);
- if (!mvm_sta->wep_key) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- ret = iwl_mvm_set_sta_key(mvm, vif, sta,
- mvm_sta->wep_key,
- STA_KEY_IDX_INVALID);
- } else {
- ret = 0;
- }
+ ret = 0;
/* we don't support TDLS during DCM */
if (iwl_mvm_phy_ctx_count(mvm) > 1)
@@ -3242,17 +3323,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
NL80211_TDLS_DISABLE_LINK);
}
- /* Remove STA key if this is an AP using WEP */
- if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
- int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
- mvm_sta->wep_key);
-
- if (!ret)
- ret = rm_ret;
- kfree(mvm_sta->wep_key);
- mvm_sta->wep_key = NULL;
- }
-
if (unlikely(ret &&
test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status)))
@@ -3289,6 +3359,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u32 changed)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (changed & (IEEE80211_RC_BW_CHANGED |
+ IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED))
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ true);
if (vif->type == NL80211_IFTYPE_STATION &&
changed & IEEE80211_RC_NSS_CHANGED)
@@ -3439,20 +3516,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- if (vif->type == NL80211_IFTYPE_AP) {
- struct iwl_mvm_vif *mvmvif =
- iwl_mvm_vif_from_mac80211(vif);
-
- mvmvif->ap_wep_key = kmemdup(key,
- sizeof(*key) + key->keylen,
- GFP_KERNEL);
- if (!mvmvif->ap_wep_key)
- return -ENOMEM;
- }
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return 0;
- break;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ break;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -EOPNOTSUPP;
+ /* support HW crypto on TX */
+ return 0;
default:
/* currently FW supports only one optional cipher scheme */
if (hw->n_cipher_schemes &&
@@ -3540,12 +3609,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
if (ret) {
IWL_WARN(mvm, "set key failed\n");
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
/*
* can't add key for RX, but we don't need it
- * in the device for TX so still return 0
+ * in the device for TX so still return 0,
+ * unless we have new TX API where we cannot
+ * put key material into the TX_CMD
*/
- key->hw_key_idx = STA_KEY_IDX_INVALID;
- ret = 0;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ ret = -EOPNOTSUPP;
+ else
+ ret = 0;
}
break;
@@ -3639,7 +3713,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
int duration)
{
- int res, time_reg = DEVICE_SYSTEM_TIME_REG;
+ int res;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
static const u16 time_event_response[] = { HOT_SPOT_CMD };
@@ -3665,7 +3739,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
0);
/* Set the time and duration */
- tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg));
+ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm));
delay = AUX_ROC_MIN_DELAY;
req_dur = MSEC_TO_TU(duration);
@@ -4391,8 +4465,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
int err;
u32 noa_duration;
- err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
- NULL);
+ err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len,
+ iwl_mvm_tm_policy, NULL);
if (err)
return err;
@@ -4469,16 +4543,22 @@ static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm,
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.tsf = cpu_to_le32(chsw->timestamp),
.cs_count = chsw->count,
+ .cs_mode = chsw->block_tx,
};
lockdep_assert_held(&mvm->mutex);
+ if (chsw->delay)
+ cmd.cs_delayed_bcn_count =
+ DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int);
+
return iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP,
CHANNEL_SWITCH_TIME_EVENT_CMD),
0, sizeof(cmd), &cmd);
}
+#define IWL_MAX_CSA_BLOCK_TX 1500
static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
@@ -4543,8 +4623,18 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
((vif->bss_conf.beacon_int * (chsw->count - 1) -
IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
- if (chsw->block_tx)
+ if (chsw->block_tx) {
iwl_mvm_csa_client_absent(mvm, vif);
+ /*
+ * In case of undetermined / long time with immediate
+ * quiet monitor status to gracefully disconnect
+ */
+ if (!chsw->count ||
+ chsw->count * vif->bss_conf.beacon_int >
+ IWL_MAX_CSA_BLOCK_TX)
+ schedule_delayed_work(&mvmvif->csa_work,
+ msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX));
+ }
if (mvmvif->bf_data.bf_enabled) {
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
@@ -4559,6 +4649,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
iwl_mvm_schedule_csa_period(mvm, vif,
vif->bss_conf.beacon_int,
apply_time);
+
+ mvmvif->csa_count = chsw->count;
+ mvmvif->csa_misbehave = false;
break;
default:
break;
@@ -4579,52 +4672,42 @@ out_unlock:
return ret;
}
-static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
-
- if (mvmvif->csa_failed) {
- mvmvif->csa_failed = false;
- ret = -EIO;
- goto out_unlock;
- }
-
- if (vif->type == NL80211_IFTYPE_STATION) {
- struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_chan_switch_te_cmd cmd = {
+ .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
+ .tsf = cpu_to_le32(chsw->timestamp),
+ .cs_count = chsw->count,
+ .cs_mode = chsw->block_tx,
+ };
- mvmvif->csa_bcn_pending = false;
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
- mvmvif->ap_sta_id);
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
+ return;
- if (WARN_ON(!mvmsta)) {
- ret = -EIO;
- goto out_unlock;
+ if (chsw->count >= mvmvif->csa_count && chsw->block_tx) {
+ if (mvmvif->csa_misbehave) {
+ /* Second time, give up on this AP*/
+ iwl_mvm_abort_channel_switch(hw, vif);
+ ieee80211_chswitch_done(vif, false);
+ mvmvif->csa_misbehave = false;
+ return;
}
-
- iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
-
- iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
-
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
- if (ret)
- goto out_unlock;
-
- iwl_mvm_stop_session_protection(mvm, vif);
+ mvmvif->csa_misbehave = true;
}
+ mvmvif->csa_count = chsw->count;
- mvmvif->ps_disabled = false;
-
- ret = iwl_mvm_power_update_ps(mvm);
+ IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id);
-out_unlock:
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ CMD_ASYNC, sizeof(cmd), &cmd));
}
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
@@ -5083,6 +5166,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.channel_switch = iwl_mvm_channel_switch,
.pre_channel_switch = iwl_mvm_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
+ .abort_channel_switch = iwl_mvm_abort_channel_switch,
+ .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
.tdls_channel_switch = iwl_mvm_tdls_channel_switch,
.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index bca6f6b536d9..8dc2a9850bc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -490,6 +490,9 @@ struct iwl_mvm_vif {
bool csa_countdown;
bool csa_failed;
u16 csa_target_freq;
+ u16 csa_count;
+ u16 csa_misbehave;
+ struct delayed_work csa_work;
/* Indicates that we are waiting for a beacon on a new channel */
bool csa_bcn_pending;
@@ -498,7 +501,6 @@ struct iwl_mvm_vif {
netdev_features_t features;
struct iwl_probe_resp_data __rcu *probe_resp_data;
- struct ieee80211_key_conf *ap_wep_key;
};
static inline struct iwl_mvm_vif *
@@ -1200,7 +1202,6 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
- * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
*/
@@ -1212,7 +1213,6 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
- IWL_MVM_STATUS_D3_RECONFIG,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
};
@@ -1538,6 +1538,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
+u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
@@ -1650,8 +1651,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
-void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
- struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
@@ -1785,14 +1786,13 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
+void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#else
-static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
- struct dentry *dbgfs_dir)
+static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
+ struct dentry *dbgfs_dir)
{
- return 0;
}
static inline void
iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2024,17 +2024,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
- /* If IWL_MVM_STATUS_HW_RESTART_REQUESTED bit is set then we received
- * an assert. Since we failed to bring the interface up, mac80211
- * will not attempt to reconfig the device,
- * which handles the dump collection in assert flow,
- * so trigger dump collection here.
- */
- if (test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status))
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
-
iwl_fw_cancel_timestamp(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
iwl_fwrt_stop_device(&mvm->fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ba27dce4c2bb..acd2fda12466 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -834,7 +834,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
- if (err)
+ if (err && err != -ERFKILL)
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm);
@@ -862,9 +862,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
min_backoff = iwl_mvm_min_backoff(mvm);
iwl_mvm_thermal_initialize(mvm, min_backoff);
- err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
- if (err)
- goto out_unregister;
+ iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
if (!iwl_mvm_has_new_rx_stats_api(mvm))
memset(&mvm->rx_stats_v3, 0,
@@ -881,14 +879,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return op_mode;
- out_unregister:
- if (iwlmvm_mod_params.init_dbg)
- return op_mode;
-
- ieee80211_unregister_hw(mvm->hw);
- mvm->hw_registered = false;
- iwl_mvm_leds_exit(mvm);
- iwl_mvm_thermal_exit(mvm);
out_free:
iwl_fw_flush_dump(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
@@ -1105,7 +1095,7 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
- iwl_mvm_rx_monitor_ndp(mvm, napi, rxb, 0);
+ iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
@@ -1271,6 +1261,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
{
iwl_abort_notification_waits(&mvm->notif_wait);
+ del_timer(&mvm->fwrt.dump.periodic_trig);
/*
* This is a bit racy, but worst case we tell mac80211 about
@@ -1291,8 +1282,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
* can't recover this since we're already half suspended.
*/
if (!mvm->fw_restart && fw_error) {
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ iwl_fw_error_collect(&mvm->fwrt);
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
@@ -1340,6 +1330,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
}
}
+ iwl_fw_error_collect(&mvm->fwrt);
+
if (fw_error && mvm->fw_restart > 0)
mvm->fw_restart--;
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index a28283ff7295..659e21b2d4e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -116,8 +116,9 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
return supp;
}
-static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
+static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct ieee80211_supported_band *sband)
{
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
@@ -147,6 +148,12 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+ /* consider our LDPC support in case of HE */
+ if (sband->iftype_data && sband->iftype_data->he_cap.has_he &&
+ !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
if (he_cap && he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
@@ -223,19 +230,43 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
static void
rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
- const struct ieee80211_sta_he_cap *he_cap,
+ struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
{
- u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
- u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ u16 tx_mcs_80 =
+ le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80);
+ u16 tx_mcs_160 =
+ le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
int i;
for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
-
+ u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
+ u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3;
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_80 > _tx_mcs_80)
+ _mcs_80 = _tx_mcs_80;
cmd->ht_rates[i][0] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_160 > _tx_mcs_160)
+ _mcs_160 = _tx_mcs_160;
cmd->ht_rates[i][1] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
}
@@ -264,7 +295,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
/* HT/VHT rates */
if (he_cap && he_cap->has_he) {
cmd->mode = IWL_TLC_MNG_MODE_HE;
- rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+ rs_fw_he_set_enabled_rates(sta, sband, cmd);
} else if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
@@ -345,6 +376,37 @@ out:
rcu_read_unlock();
}
+static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+
+ if (vht_cap && vht_cap->vht_supported) {
+ switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ return IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+ return IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ default:
+ return IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ }
+
+ } else if (ht_cap && ht_cap->ht_supported) {
+ if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)
+ /*
+ * agg is offloaded so we need to assume that agg
+ * are enabled and max mpdu in ampdu is 4095
+ * (spec 802.11-2016 9.3.2.1)
+ */
+ return IEEE80211_MAX_MPDU_LEN_HT_BA;
+ else
+ return IEEE80211_MAX_MPDU_LEN_HT_3839;
+ }
+
+ /* in legacy mode no amsdu is enabled so return zero */
+ return 0;
+}
+
void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool update)
{
@@ -352,15 +414,16 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
- struct ieee80211_supported_band *sband;
+ struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
+ u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
struct iwl_tlc_config_cmd cfg_cmd = {
.sta_id = mvmsta->sta_id,
.max_ch_width = update ?
rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
- .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+ .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)),
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
- .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+ .max_mpdu_len = cpu_to_le16(max_amsdu_len),
.amsdu = iwl_mvm_is_csum_supported(mvm),
};
int ret;
@@ -370,9 +433,14 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm);
#endif
- sband = hw->wiphy->bands[band];
rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+ /*
+ * since TLC offload works with one mode we can assume
+ * that only vht/ht is used and also set it as station max amsdu
+ */
+ sta->max_amsdu_len = max_amsdu_len;
+
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index e231a44d2423..c182821ab22b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4078,9 +4078,8 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, lq_sta, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(#name, mode, parent, lq_sta, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
@@ -4108,9 +4107,6 @@ static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
&lq_sta->pers.dbg_fixed_txp_reduction);
MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600);
- return;
-err:
- IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
}
void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1e03acf30762..1824566d08fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -169,9 +169,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
}
/* iwl_mvm_create_skb Adds the rxb to a new skb */
-static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
- u16 len, u8 crypt_len,
- struct iwl_rx_cmd_buffer *rxb)
+static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
@@ -204,6 +204,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
* present before copying packet data.
*/
hdrlen += crypt_len;
+
+ if (WARN_ONCE(headlen < hdrlen,
+ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+ hdrlen, len, crypt_len)) {
+ /*
+ * We warn and trace because we want to be able to see
+ * it in trace-cmd as well.
+ */
+ IWL_DEBUG_RX(mvm,
+ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+ hdrlen, len, crypt_len);
+ return -EINVAL;
+ }
+
skb_put_data(skb, hdr, hdrlen);
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
@@ -216,6 +230,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
}
+
+ return 0;
}
static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
@@ -1671,7 +1687,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->boottime_ns = ktime_get_boot_ns();
}
- iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
+ if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
sta, csi);
@@ -1679,8 +1699,8 @@ out:
rcu_read_unlock();
}
-void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
- struct iwl_rx_cmd_buffer *rxb, int queue)
+void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
{
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1701,10 +1721,6 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- /* Currently only NDP type is supported */
- if (info_type != RX_NO_DATA_INFO_TYPE_NDP)
- return;
-
energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS;
energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
@@ -1726,9 +1742,22 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
/* 0-length PSDU */
rx_status->flag |= RX_FLAG_NO_PSDU;
- /* currently this is the only type for which we get this notif */
- rx_status->zero_length_psdu_type =
- IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
+
+ switch (info_type) {
+ case RX_NO_DATA_INFO_TYPE_NDP:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
+ break;
+ case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
+ case RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
+ break;
+ default:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
+ break;
+ }
/* This may be overridden by iwl_mvm_rx_he() to HE_RU */
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 78694bc38e76..d9ddf9ff6428 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1082,21 +1082,23 @@ static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
dwell->extended = IWL_SCAN_DWELL_EXTENDED;
}
-static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
+static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels,
+ u32 max_channels)
{
struct ieee80211_supported_band *band;
int i, j = 0;
band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
- for (i = 0; i < band->n_channels; i++, j++)
+ for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
channels[j] = band->channels[i].hw_value;
band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
- for (i = 0; i < band->n_channels; i++, j++)
+ for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
channels[j] = band->channels[i].hw_value;
}
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags)
+ u32 flags, u8 channel_flags,
+ u32 max_channels)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
struct iwl_scan_config_v1 *cfg = config;
@@ -1115,11 +1117,12 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
- iwl_mvm_fill_channels(mvm, cfg->channel_array);
+ iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
}
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags)
+ u32 flags, u8 channel_flags,
+ u32 max_channels)
{
struct iwl_scan_config *cfg = config;
@@ -1162,7 +1165,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
- iwl_mvm_fill_channels(mvm, cfg->channel_array);
+ iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
}
int iwl_mvm_config_scan(struct iwl_mvm *mvm)
@@ -1181,7 +1184,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
u8 channel_flags;
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
- return -ENOBUFS;
+ num_channels = mvm->fw->ucode_capa.n_scan_channels;
if (iwl_mvm_is_cdb_supported(mvm)) {
type = iwl_mvm_get_scan_type_band(mvm, NULL,
@@ -1234,9 +1237,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
- iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
+ iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags,
+ num_channels);
} else {
- iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
+ iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
+ num_channels);
}
cmd.data[0] = cfg;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 498c315291cf..f545a737a92d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -746,7 +746,8 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
u8 sta_id, u8 tid, unsigned int timeout)
{
- int queue, size = IWL_DEFAULT_QUEUE_SIZE;
+ int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
+ mvm->trans->cfg->min_256_ba_txq_size);
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
@@ -1399,7 +1400,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
list_del_init(&mvmtxq->list);
+ local_bh_disable();
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+ local_bh_enable();
}
mutex_unlock(&mvm->mutex);
@@ -2107,12 +2110,14 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (!iwl_mvm_has_new_tx_api(mvm)) {
if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)
+ vif->type == NL80211_IFTYPE_ADHOC) {
queue = mvm->probe_queue;
- else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
queue = mvm->p2p_dev_queue;
- else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
+ } else {
+ WARN(1, "Missing required TXQ for adding bcast STA\n");
return -EINVAL;
+ }
bsta->tfd_queue_msk |= BIT(queue);
@@ -2275,7 +2280,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
const u8 *maddr = _maddr;
struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = IWL_MVM_TX_FIFO_MCAST,
+ .fifo = vif->type == NL80211_IFTYPE_AP ?
+ IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
.sta_id = msta->sta_id,
.tid = 0,
.aggregate = false,
@@ -2333,21 +2339,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
timeout);
- if (mvmvif->ap_wep_key) {
- u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
-
- __set_bit(key_offset, mvm->fw_key_table);
-
- if (key_offset == STA_KEY_IDX_INVALID)
- return -ENOSPC;
-
- ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
- mvmvif->ap_wep_key, true, 0, NULL, 0,
- key_offset, 0);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -2419,28 +2410,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
- if (mvmvif->ap_wep_key) {
- int i;
-
- if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
- mvm->fw_key_table)) {
- IWL_ERR(mvm, "offset %d not used in fw key table.\n",
- mvmvif->ap_wep_key->hw_key_idx);
- return -ENOENT;
- }
-
- /* track which key was deleted last */
- for (i = 0; i < STA_KEY_MAX_NUM; i++) {
- if (mvm->fw_key_deleted[i] < U8_MAX)
- mvm->fw_key_deleted[i]++;
- }
- mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
- ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
- mvmvif->ap_wep_key, true);
- if (ret)
- return ret;
- }
-
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 79700c7310a1..b4d4071b865d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data {
* the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data
- * @wep_key: used in AP mode. Is a duplicate of the WEP key.
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
* @tx_ant: the index of the antenna to use for data tx to this station. Only
* used during connection establishment (e.g. for the 4 way handshake
@@ -426,8 +425,6 @@ struct iwl_mvm_sta {
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
- struct ieee80211_key_conf *wep_key;
-
u8 reserved_queue;
/* Temporary, until the new TLC will control the Tx protection */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 859aa5a4e6b5..9df21a8d1fc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 Intel Corporation
+ * Copyright(C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 Intel Corporation
+ * Copyright(C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -252,8 +252,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
/* we only send requests to our switching peer - update sent time */
if (state == IWL_MVM_TDLS_SW_REQ_SENT)
- mvm->tdls_cs.peer.sent_timestamp =
- iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
if (state == IWL_MVM_TDLS_SW_IDLE)
mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 9693fa4cdc39..4d34e5ab1bff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -234,6 +234,7 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
break;
}
iwl_mvm_csa_client_absent(mvm, te_data->vif);
+ cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(te_data->vif, true);
break;
default:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 4649327abb45..b9914efc55c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1418,6 +1418,16 @@ void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
}
+u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
+{
+ u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
+ mvm->trans->cfg->gp2_reg_addr)
+ reg_addr = mvm->trans->cfg->gp2_reg_addr;
+
+ return iwl_read_prph(mvm->trans, reg_addr);
+}
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
{
@@ -1432,7 +1442,7 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
iwl_mvm_power_update_device(mvm);
}
- *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ *gp2 = iwl_mvm_get_systime(mvm);
*boottime = ktime_get_boot_ns();
if (!ps_disabled) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 1e36459948db..f496d1bcb643 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -66,7 +66,8 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
void *iml_img;
u32 control_flags = 0;
int ret;
- int cmdq_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
+ int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->cfg->min_txq_size);
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 9274e317cc77..8969b47bacf2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -210,7 +210,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
- TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
+ TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2b94e4cef56c..cd035061cdd5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -928,11 +928,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)},
@@ -953,17 +948,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
{IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
-
- {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
- {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
@@ -1046,9 +1039,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* register transport layer debugfs here */
- ret = iwl_trans_pcie_dbgfs_register(iwl_trans);
- if (ret)
- goto out_free_drv;
+ iwl_trans_pcie_dbgfs_register(iwl_trans);
/* if RTPM is in use, enable it in our device */
if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
@@ -1077,8 +1068,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-out_free_drv:
- iwl_drv_stop(iwl_trans->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index bf8b61a476c5..b513037dc066 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -106,7 +106,6 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
- * @size: size used from the buffer
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -114,7 +113,6 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
- u32 size;
};
/**
@@ -135,46 +133,32 @@ struct isr_statistics {
u32 unhandled;
};
-#define IWL_RX_TD_TYPE_MSK 0xff000000
-#define IWL_RX_TD_SIZE_MSK 0x00ffffff
-#define IWL_RX_TD_SIZE_2K BIT(11)
-#define IWL_RX_TD_TYPE 0
-
/**
* struct iwl_rx_transfer_desc - transfer descriptor
- * @type_n_size: buffer type (bit 0: external buff valid,
- * bit 1: optional footer valid, bit 2-7: reserved)
- * and buffer size
* @addr: ptr to free buffer start address
* @rbid: unique tag of the buffer
* @reserved: reserved
*/
struct iwl_rx_transfer_desc {
- __le32 type_n_size;
- __le64 addr;
__le16 rbid;
- __le16 reserved;
+ __le16 reserved[3];
+ __le64 addr;
} __packed;
-#define IWL_RX_CD_SIZE 0xffffff00
+#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
/**
* struct iwl_rx_completion_desc - completion descriptor
- * @type: buffer type (bit 0: external buff valid,
- * bit 1: optional footer valid, bit 2-7: reserved)
- * @status: status of the completion
* @reserved1: reserved
* @rbid: unique tag of the received buffer
- * @size: buffer size, masked by IWL_RX_CD_SIZE
+ * @flags: flags (0: fragmented, all others: reserved)
* @reserved2: reserved
*/
struct iwl_rx_completion_desc {
- u8 type;
- u8 status;
- __le16 reserved1;
+ __le32 reserved1;
__le16 rbid;
- __le32 size;
- u8 reserved2[22];
+ u8 flags;
+ u8 reserved2[25];
} __packed;
/**
@@ -306,10 +290,6 @@ struct iwl_cmd_meta {
u32 tbs;
};
-
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
/*
* The FH will write back to the first TB only, so we need to copy some data
* into the buffer regardless of whether it should be mapped or not.
@@ -556,7 +536,7 @@ struct iwl_trans_pcie {
int ict_index;
bool use_ict;
bool is_down, opmode_down;
- bool debug_rfkill;
+ s8 debug_rfkill;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
@@ -1002,7 +982,7 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
lockdep_assert_held(&trans_pcie->mutex);
- if (trans_pcie->debug_rfkill)
+ if (trans_pcie->debug_rfkill == 1)
return true;
return !(iwl_read32(trans, CSR_GP_CNTRL) &
@@ -1043,15 +1023,12 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-void iwl_trans_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
+void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
#else
-static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
-{
- return 0;
-}
+static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
#endif
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 8d4f0628622b..31b3591f71d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -282,9 +282,8 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
- bd[rxq->write].type_n_size =
- cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
- ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+ BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
+
bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
} else {
@@ -435,7 +434,7 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
/*
* Issue an error if we don't have enough pre-allocated
* buffers.
-` */
+ */
if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
IWL_CRIT(trans,
"Failed to alloc_pages\n");
@@ -1265,9 +1264,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
.truesize = max_len,
};
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
- rxcb.status = rxq->cd[i].status;
-
pkt = rxb_addr(&rxcb);
if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
@@ -1394,6 +1390,8 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb;
u16 vid;
+ BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
+
if (!trans->cfg->mq_rx_supported) {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
@@ -1415,9 +1413,6 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
- rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
-
rxb->invalid = true;
return rxb;
@@ -1434,10 +1429,15 @@ out_err:
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
+ struct iwl_rxq *rxq;
u32 r, i, count = 0;
bool emergency = false;
+ if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
+ return;
+
+ rxq = &trans_pcie->rxq[queue];
+
restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
@@ -2212,6 +2212,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
"Hardware error detected. Restarting.\n");
isr_stats->hw++;
+ trans->hw_error = true;
iwl_pcie_irq_handle_error(trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 9c203ca75de9..8507a7bdcfdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -234,7 +234,8 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int queue_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
+ int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->cfg->min_txq_size);
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
spin_lock(&trans_pcie->irq_lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index fe8269d023de..803fcbac4152 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -896,6 +896,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
if (!trans->num_blocks)
return;
+ IWL_DEBUG_FW(trans,
+ "WRT: applying DRAM buffer[0] destination\n");
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
trans->fw_mon[0].physical >>
MON_BUFF_SHIFT_VER2);
@@ -2067,7 +2069,6 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* MAC_ACCESS_REQ bit to be performed before any other writes
* scheduled on different CPUs (after we drop reg_lock).
*/
- mmiowb();
out:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
@@ -2442,9 +2443,8 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, trans, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(#name, mode, parent, trans, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
/* file operation */
@@ -2687,16 +2687,17 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool old = trans_pcie->debug_rfkill;
+ bool new_value;
int ret;
- ret = kstrtobool_from_user(user_buf, count, &trans_pcie->debug_rfkill);
+ ret = kstrtobool_from_user(user_buf, count, &new_value);
if (ret)
return ret;
- if (old == trans_pcie->debug_rfkill)
+ if (new_value == trans_pcie->debug_rfkill)
return count;
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
- old, trans_pcie->debug_rfkill);
+ trans_pcie->debug_rfkill, new_value);
+ trans_pcie->debug_rfkill = new_value;
iwl_pcie_handle_rfkill_irq(trans);
return count;
@@ -2847,7 +2848,7 @@ static const struct file_operations iwl_dbgfs_monitor_data_ops = {
};
/* Create the debugfs files and directories */
-int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
+void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
{
struct dentry *dir = trans->dbgfs_dir;
@@ -2858,11 +2859,6 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
- return 0;
-
-err:
- IWL_ERR(trans, "failed to create the trans debugfs entry\n");
- return -ENOMEM;
}
static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
@@ -3012,10 +3008,14 @@ static void
iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
struct iwl_fw_error_dump_fw_mon *fw_mon_data)
{
- u32 base, write_ptr, wrap_cnt;
+ u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
- /* If there was a dest TLV - use the values from there */
- if (trans->ini_valid) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
+ base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
+ write_ptr = DBGC_CUR_DBGBUF_STATUS;
+ wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
+ } else if (trans->ini_valid) {
base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2);
write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2);
wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2);
@@ -3028,12 +3028,18 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
write_ptr = MON_BUFF_WRPTR;
wrap_cnt = MON_BUFF_CYCLE_CNT;
}
- fw_mon_data->fw_mon_wr_ptr =
- cpu_to_le32(iwl_read_prph(trans, write_ptr));
+
+ write_ptr_val = iwl_read_prph(trans, write_ptr);
fw_mon_data->fw_mon_cycle_cnt =
cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
fw_mon_data->fw_mon_base_ptr =
cpu_to_le32(iwl_read_prph(trans, base));
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ fw_mon_data->fw_mon_base_high_ptr =
+ cpu_to_le32(iwl_read_prph(trans, base_high));
+ write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
+ }
+ fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
}
static u32
@@ -3044,9 +3050,10 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
u32 len = 0;
if ((trans->num_blocks &&
- trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
- (trans->dbg_dest_tlv && !trans->ini_valid) ||
- (trans->ini_valid && trans->num_blocks)) {
+ (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
+ trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210 ||
+ trans->ini_valid)) ||
+ (trans->dbg_dest_tlv && !trans->ini_valid)) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
@@ -3165,8 +3172,10 @@ static struct iwl_trans_dump_data
len = sizeof(*dump_data);
/* host commands */
- len += sizeof(*data) +
- cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD))
+ len += sizeof(*data) +
+ cmdq->n_window * (sizeof(*txcmd) +
+ TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
@@ -3318,7 +3327,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.unref = iwl_trans_pcie_unref, \
.dump_data = iwl_trans_pcie_dump_data, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
- .d3_resume = iwl_trans_pcie_d3_resume
+ .d3_resume = iwl_trans_pcie_d3_resume, \
+ .sync_nmi = iwl_trans_pcie_sync_nmi
#ifdef CONFIG_PM_SLEEP
#define IWL_TRANS_PM_OPS \
@@ -3411,7 +3421,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
ret = -ENOMEM;
goto out_no_pci;
}
-
+ trans_pcie->debug_rfkill = -1;
if (!cfg->base_params->pcie_l1_allowed) {
/*
@@ -3539,9 +3549,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
+ trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
@@ -3560,7 +3577,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- (trans->cfg != &iwl22260_2ax_cfg ||
+ (trans->cfg != &iwl_ax200_cfg_cc ||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status;
@@ -3637,22 +3654,29 @@ out_no_pci:
return ERR_PTR(ret);
}
-void iwl_trans_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
+ u32 inta_addr, sw_err_bit;
+
+ if (trans_pcie->msix_enabled) {
+ inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
+ sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
+ } else {
+ inta_addr = CSR_INT;
+ sw_err_bit = CSR_INT_BIT_SW_ERR;
+ }
iwl_disable_interrupts(trans);
iwl_force_nmi(trans);
while (time_after(timeout, jiffies)) {
- u32 inta_hw = iwl_read32(trans,
- CSR_MSIX_HW_INT_CAUSES_AD);
+ u32 inta_hw = iwl_read32(trans, inta_addr);
/* Error detected by uCode */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) {
+ if (inta_hw & sw_err_bit) {
/* Clear causes register */
- iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
- inta_hw &
- MSIX_HW_INT_CAUSES_REG_SW_ERR);
+ iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 88530d9f4a54..38d110338987 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
cmd_str);
ret = -ETIMEDOUT;
- iwl_trans_sync_nmi(trans);
+ iwl_trans_pcie_sync_nmi(trans);
goto cancel;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 9fbd37d23e85..fa4245d0d4a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -996,10 +996,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
if (cmd_queue)
- slots_num = max_t(u32, TFD_CMD_SLOTS,
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
else
- slots_num = TFD_TX_CMD_SLOTS;
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
+ trans->cfg->min_256_ba_txq_size);
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
@@ -1049,10 +1050,11 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
if (cmd_queue)
- slots_num = max_t(u32, TFD_CMD_SLOTS,
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
else
- slots_num = TFD_TX_CMD_SLOTS;
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
+ trans->cfg->min_256_ba_txq_size);
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
if (ret) {
@@ -1960,7 +1962,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id));
ret = -ETIMEDOUT;
- iwl_trans_sync_nmi(trans);
+ iwl_trans_pcie_sync_nmi(trans);
goto cancel;
}
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index 67b0c05afbdb..a324bc4b7938 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -65,7 +65,6 @@ int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
hdr[ETH_ALEN * 2 + 3] = 0;
desc->tfm = tfm_michael;
- desc->flags = 0;
err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
if (err)
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index 27a49068d32d..57ad56435dda 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable new PCI device\n");
- return err;
+ goto err_put;
}
mem_addr = pci_resource_start(pdev, 0);
@@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
err_disable_dev:
pci_disable_device(pdev);
+err_put:
pci_dev_put(pdev);
return err;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0838af04d681..60ca13e0f15b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -409,8 +409,8 @@ static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy,
int err;
u32 val;
- err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len,
- hwsim_vendor_test_policy, NULL);
+ err = nla_parse_deprecated(tb, QCA_WLAN_VENDOR_ATTR_MAX, data,
+ data_len, hwsim_vendor_test_policy, NULL);
if (err)
return err;
if (!tb[QCA_WLAN_VENDOR_ATTR_TEST])
@@ -521,7 +521,7 @@ struct mac80211_hwsim_data {
unsigned int rx_filter;
bool started, idle, scanning;
struct mutex mutex;
- struct tasklet_hrtimer beacon_timer;
+ struct hrtimer beacon_timer;
enum ps_mode {
PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
} ps;
@@ -1460,7 +1460,7 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
data->started = false;
- tasklet_hrtimer_cancel(&data->beacon_timer);
+ hrtimer_cancel(&data->beacon_timer);
wiphy_dbg(hw->wiphy, "%s\n", __func__);
}
@@ -1583,14 +1583,12 @@ static enum hrtimer_restart
mac80211_hwsim_beacon(struct hrtimer *timer)
{
struct mac80211_hwsim_data *data =
- container_of(timer, struct mac80211_hwsim_data,
- beacon_timer.timer);
+ container_of(timer, struct mac80211_hwsim_data, beacon_timer);
struct ieee80211_hw *hw = data->hw;
u64 bcn_int = data->beacon_int;
- ktime_t next_bcn;
if (!data->started)
- goto out;
+ return HRTIMER_NORESTART;
ieee80211_iterate_active_interfaces_atomic(
hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -1601,12 +1599,9 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
bcn_int -= data->bcn_delta;
data->bcn_delta = 0;
}
-
- next_bcn = ktime_add(hrtimer_get_expires(timer),
- ns_to_ktime(bcn_int * 1000));
- tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS);
-out:
- return HRTIMER_NORESTART;
+ hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
+ ns_to_ktime(bcn_int * NSEC_PER_USEC));
+ return HRTIMER_RESTART;
}
static const char * const hwsim_chanwidths[] = {
@@ -1680,15 +1675,15 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&data->mutex);
if (!data->started || !data->beacon_int)
- tasklet_hrtimer_cancel(&data->beacon_timer);
- else if (!hrtimer_is_queued(&data->beacon_timer.timer)) {
+ hrtimer_cancel(&data->beacon_timer);
+ else if (!hrtimer_is_queued(&data->beacon_timer)) {
u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
u32 bcn_int = data->beacon_int;
u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
- tasklet_hrtimer_start(&data->beacon_timer,
- ns_to_ktime(until_tbtt * 1000),
- HRTIMER_MODE_REL);
+ hrtimer_start(&data->beacon_timer,
+ ns_to_ktime(until_tbtt * NSEC_PER_USEC),
+ HRTIMER_MODE_REL_SOFT);
}
return 0;
@@ -1751,7 +1746,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
info->enable_beacon, info->beacon_int);
vp->bcn_en = info->enable_beacon;
if (data->started &&
- !hrtimer_is_queued(&data->beacon_timer.timer) &&
+ !hrtimer_is_queued(&data->beacon_timer) &&
info->enable_beacon) {
u64 tsf, until_tbtt;
u32 bcn_int;
@@ -1759,9 +1754,10 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
tsf = mac80211_hwsim_get_tsf(hw, vif);
bcn_int = data->beacon_int;
until_tbtt = bcn_int - do_div(tsf, bcn_int);
- tasklet_hrtimer_start(&data->beacon_timer,
- ns_to_ktime(until_tbtt * 1000),
- HRTIMER_MODE_REL);
+
+ hrtimer_start(&data->beacon_timer,
+ ns_to_ktime(until_tbtt * NSEC_PER_USEC),
+ HRTIMER_MODE_REL_SOFT);
} else if (!info->enable_beacon) {
unsigned int count = 0;
ieee80211_iterate_active_interfaces_atomic(
@@ -1770,7 +1766,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u",
count);
if (count == 0) {
- tasklet_hrtimer_cancel(&data->beacon_timer);
+ hrtimer_cancel(&data->beacon_timer);
data->beacon_int = 0;
}
}
@@ -1936,8 +1932,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
struct sk_buff *skb;
int err, ps;
- err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len,
- hwsim_testmode_policy, NULL);
+ err = nla_parse_deprecated(tb, HWSIM_TM_ATTR_MAX, data, len,
+ hwsim_testmode_policy, NULL);
if (err)
return err;
@@ -2644,7 +2640,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
enum nl80211_band band;
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
struct net *net;
- int idx;
+ int idx, i;
int n_limits = 0;
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2768,12 +2764,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
goto failed_hw;
}
+ data->if_combination.max_interfaces = 0;
+ for (i = 0; i < n_limits; i++)
+ data->if_combination.max_interfaces +=
+ data->if_limits[i].max;
+
data->if_combination.n_limits = n_limits;
- data->if_combination.max_interfaces = 2048;
data->if_combination.limits = data->if_limits;
- hw->wiphy->iface_combinations = &data->if_combination;
- hw->wiphy->n_iface_combinations = 1;
+ /*
+ * If we actually were asked to support combinations,
+ * advertise them - if there's only a single thing like
+ * only IBSS then don't advertise it as combinations.
+ */
+ if (data->if_combination.max_interfaces > 1) {
+ hw->wiphy->iface_combinations = &data->if_combination;
+ hw->wiphy->n_iface_combinations = 1;
+ }
if (param->ciphers) {
memcpy(data->ciphers, param->ciphers,
@@ -2799,6 +2806,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
+
+ /* We only have SW crypto and only implement the A-MPDU API
+ * (but don't really build A-MPDUs) so can have extended key
+ * support
+ */
+ ieee80211_hw_set(hw, EXT_KEY_ID_NATIVE);
if (rctbl)
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
@@ -2922,9 +2935,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- tasklet_hrtimer_init(&data->beacon_timer,
- mac80211_hwsim_beacon,
- CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_SOFT);
+ data->beacon_timer.function = mac80211_hwsim_beacon;
err = ieee80211_register_hw(hw);
if (err < 0) {
@@ -3620,35 +3633,35 @@ done:
static const struct genl_ops hwsim_ops[] = {
{
.cmd = HWSIM_CMD_REGISTER,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_register_received_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_FRAME,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_cloned_frame_received_nl,
},
{
.cmd = HWSIM_CMD_TX_INFO_FRAME,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_tx_info_frame_received_nl,
},
{
.cmd = HWSIM_CMD_NEW_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_new_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_DEL_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_del_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_GET_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_get_radio_nl,
.dumpit = hwsim_dump_radio_nl,
},
@@ -3658,6 +3671,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.name = "MAC80211_HWSIM",
.version = 1,
.maxattr = HWSIM_ATTR_MAX,
+ .policy = hwsim_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = hwsim_ops,
@@ -3894,6 +3908,8 @@ static int __init init_mac80211_hwsim(void)
param.p2p_device = support_p2p_device;
param.use_chanctx = channels > 1;
param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
+ if (param.p2p_device)
+ param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
err = mac80211_hwsim_new_radio(NULL, &param);
if (err < 0)
diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig
index 524fd565cb2a..572d187a99f4 100644
--- a/drivers/net/wireless/marvell/mwifiex/Kconfig
+++ b/drivers/net/wireless/marvell/mwifiex/Kconfig
@@ -9,13 +9,13 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997"
+ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997"
depends on MWIFIEX && MMC
select FW_LOADER
select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
- 8786/8787/8797/8887/8897/8997 chipsets with SDIO interface.
+ 8786/8787/8797/8887/8897/8977/8987/8997 chipsets with SDIO interface.
If you choose to build it as a module, it will be called
mwifiex_sdio.
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index c46f0a54a0c7..e11a4bb67172 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -4059,8 +4059,8 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!priv)
return -EINVAL;
- err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len, mwifiex_tm_policy,
- NULL);
+ err = nla_parse_deprecated(tb, MWIFIEX_TM_ATTR_MAX, data, len,
+ mwifiex_tm_policy, NULL);
if (err)
return err;
@@ -4082,16 +4082,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
+ kfree(hostcmd);
return -EFAULT;
}
/* process hostcmd response*/
skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
- if (!skb)
+ if (!skb) {
+ kfree(hostcmd);
return -ENOMEM;
+ }
err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
hostcmd->len, hostcmd->cmd);
if (err) {
+ kfree(hostcmd);
kfree_skb(skb);
return -EMSGSIZE;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index bfe84e55df77..f1522fb1c1e8 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -531,5 +531,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
rx_rate - 1 : rx_rate;
+ if (rate_index >= MWIFIEX_MAX_AC_RX_RATES)
+ rate_index = MWIFIEX_MAX_AC_RX_RATES - 1;
+
return rate_index;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 60db2b969e20..8c35441fd9b7 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -341,6 +341,12 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
sleep_cfm_tmp =
dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
+ MWIFIEX_TYPE_LEN);
+ if (!sleep_cfm_tmp) {
+ mwifiex_dbg(adapter, ERROR,
+ "SLEEP_CFM: dev_alloc_skb failed\n");
+ return -ENOMEM;
+ }
+
skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm)
+ MWIFIEX_TYPE_LEN);
put_unaligned_le32(MWIFIEX_USB_TYPE_CMD, sleep_cfm_tmp->data);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 20cee5c397fb..f6da8edab7f1 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1282,8 +1282,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index a85648342d15..24c041dad9f6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter = card->adapter;
- if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"device already resumed\n");
return 0;
@@ -491,6 +491,8 @@ static void mwifiex_sdio_coredump(struct device *dev)
#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
/* Device ID for SD8977 */
#define SDIO_DEVICE_ID_MARVELL_8977 (0x9145)
+/* Device ID for SD8987 */
+#define SDIO_DEVICE_ID_MARVELL_8987 (0x9149)
/* Device ID for SD8997 */
#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141)
@@ -511,6 +513,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977),
.driver_data = (unsigned long)&mwifiex_sdio_sd8977},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987),
+ .driver_data = (unsigned long)&mwifiex_sdio_sd8987},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
.driver_data = (unsigned long)&mwifiex_sdio_sd8997},
{},
@@ -2731,4 +2735,5 @@ MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 912de2cde8d9..f672bdf52cc1 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -37,6 +37,7 @@
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
+#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
#define BLOCK_MODE 1
@@ -526,6 +527,58 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
0x68, 0x69, 0x6a},
};
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf9,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xE8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
+ 0x61, 0x62, 0x64, 0x65, 0x66,
+ 0x68, 0x69, 0x6a},
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.firmware = SD8786_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd87xx,
@@ -633,6 +686,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.can_ext_scan = true,
};
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
+ .firmware = SD8987_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8987,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = true,
+ .can_ext_scan = true,
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
.firmware = SD8801_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd87xx,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 69e3b624adbb..24b33e20e7a9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1025,17 +1025,14 @@ mwifiex_create_custom_regdomain(struct mwifiex_private *priv,
struct ieee80211_regdomain *regd;
struct ieee80211_reg_rule *rule;
bool new_rule;
- int regd_size, idx, freq, prev_freq = 0;
+ int idx, freq, prev_freq = 0;
u32 bw, prev_bw = 0;
u8 chflags, prev_chflags = 0, valid_rules = 0;
if (WARN_ON_ONCE(num_chan > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
- regd_size = sizeof(struct ieee80211_regdomain) +
- num_chan * sizeof(struct ieee80211_reg_rule);
-
- regd = kzalloc(regd_size, GFP_KERNEL);
+ regd = kzalloc(struct_size(regd, reg_rules, num_chan), GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index a327fc5b36e3..8b3123cb84c8 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -27,9 +27,9 @@
#define MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE 12
-static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
- struct mwifiex_sta_node *sta_ptr,
- struct sk_buff *event)
+static int mwifiex_check_ibss_peer_capabilities(struct mwifiex_private *priv,
+ struct mwifiex_sta_node *sta_ptr,
+ struct sk_buff *event)
{
int evt_len, ele_len;
u8 *curr;
@@ -42,7 +42,7 @@ static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
evt_len = event->len;
curr = event->data;
- mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilties:",
+ mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilities:",
event->data, event->len);
skb_push(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
@@ -937,8 +937,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
ibss_sta_addr);
sta_ptr = mwifiex_add_sta_entry(priv, ibss_sta_addr);
if (sta_ptr && adapter->adhoc_11n_enabled) {
- mwifiex_check_ibss_peer_capabilties(priv, sta_ptr,
- adapter->event_skb);
+ mwifiex_check_ibss_peer_capabilities(priv, sta_ptr,
+ adapter->event_skb);
if (sta_ptr->is_11n_enabled)
for (i = 0; i < MAX_NUM_TID; i++)
sta_ptr->ampdu_sta[i] =
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index fb28a5c7f441..52a2ce2e78b0 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -250,7 +250,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
local_rx_pd->nf);
}
} else {
- if (rx_pkt_type != PKT_TYPE_BAR)
+ if (rx_pkt_type != PKT_TYPE_BAR &&
+ local_rx_pd->priority < MAX_NUM_TID)
priv->rx_seq[local_rx_pd->priority] = seq_num;
memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
ETH_ALEN);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index ca759d9c0253..86bfa1b9ef9d 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -23,8 +23,8 @@
#define MWIFIEX_BSS_START_EVT_FIX_SIZE 12
-static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
- struct sk_buff *event)
+static int mwifiex_check_uap_capabilities(struct mwifiex_private *priv,
+ struct sk_buff *event)
{
int evt_len;
u8 *curr;
@@ -38,7 +38,7 @@ static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
evt_len = event->len;
curr = event->data;
- mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
+ mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilities:",
event->data, event->len);
skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
@@ -201,7 +201,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
ETH_ALEN);
if (priv->hist_data)
mwifiex_hist_data_reset(priv);
- mwifiex_check_uap_capabilties(priv, adapter->event_skb);
+ mwifiex_check_uap_capabilities(priv, adapter->event_skb);
break;
case EVENT_UAP_MIC_COUNTERMEASURES:
/* For future development */
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 8e4e9b6919e0..c4db6417748f 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -441,6 +441,9 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
#define MWL8K_CMD_UPDATE_STADB 0x1123
#define MWL8K_CMD_BASTREAM 0x1125
+#define MWL8K_LEGACY_5G_RATE_OFFSET \
+ (ARRAY_SIZE(mwl8k_rates_24) - ARRAY_SIZE(mwl8k_rates_50))
+
static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
{
u16 command = le16_to_cpu(cmd);
@@ -1016,8 +1019,9 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
if (rxd->channel > 14) {
status->band = NL80211_BAND_5GHZ;
- if (!(status->encoding == RX_ENC_HT))
- status->rate_idx -= 5;
+ if (!(status->encoding == RX_ENC_HT) &&
+ status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET)
+ status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET;
} else {
status->band = NL80211_BAND_2GHZ;
}
@@ -1124,8 +1128,9 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
if (rxd->channel > 14) {
status->band = NL80211_BAND_5GHZ;
- if (!(status->encoding == RX_ENC_HT))
- status->rate_idx -= 5;
+ if (!(status->encoding == RX_ENC_HT) &&
+ status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET)
+ status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET;
} else {
status->band = NL80211_BAND_2GHZ;
}
@@ -2234,8 +2239,10 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
dma_size = le16_to_cpu(cmd->length);
dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(priv->pdev, dma_addr))
- return -ENOMEM;
+ if (pci_dma_mapping_error(priv->pdev, dma_addr)) {
+ rc = -ENOMEM;
+ goto exit;
+ }
priv->hostcmd_wait = &cmd_wait;
iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
@@ -2275,6 +2282,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
ms);
}
+exit:
if (bitmap)
mwl8k_enable_bsses(hw, true, bitmap);
@@ -4631,7 +4639,7 @@ static void mwl8k_tx_poll(unsigned long data)
limit = 32;
- spin_lock_bh(&priv->tx_lock);
+ spin_lock(&priv->tx_lock);
for (i = 0; i < mwl8k_tx_queues(priv); i++)
limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
@@ -4641,7 +4649,7 @@ static void mwl8k_tx_poll(unsigned long data)
priv->tx_wait = NULL;
}
- spin_unlock_bh(&priv->tx_lock);
+ spin_unlock(&priv->tx_lock);
if (limit) {
writel(~MWL8K_A2H_INT_TX_DONE,
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index dbe8c70a8f73..30e44e4c3c7d 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -22,3 +22,4 @@ config MT76x02_USB
source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 3fd1b64b4aa7..7beae2354a24 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -16,10 +16,11 @@ CFLAGS_mt76x02_trace.o := -I$(src)
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \
- mt76x02_dfs.o
+ mt76x02_dfs.o mt76x02_beacon.o
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
obj-$(CONFIG_MT7603E) += mt7603/
+obj-$(CONFIG_MT7615E) += mt7615/
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 73c8b2805c97..27e3ff039c48 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -135,7 +135,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
return;
status->tid = le16_to_cpu(bar->control) >> 12;
- seqno = le16_to_cpu(bar->start_seq_num) >> 4;
+ seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
tid = rcu_dereference(wcid->aggr[status->tid]);
if (!tid)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index a5adf22c3ffa..c6a9fe2aef9d 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -43,14 +43,15 @@ mt76_queues_read(struct seq_file *s, void *data)
int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- struct mt76_queue *q = &dev->q_tx[i];
+ struct mt76_sw_queue *q = &dev->q_tx[i];
- if (!q->ndesc)
+ if (!q->q)
continue;
seq_printf(s,
"%d: queued=%d head=%d tail=%d swq_queued=%d\n",
- i, q->queued, q->head, q->tail, q->swq_queued);
+ i, q->q->queued, q->q->head, q->q->tail,
+ q->swq_queued);
}
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 76629b98c78d..4381155375e1 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -18,16 +18,20 @@
#include "mt76.h"
#include "dma.h"
-#define DMA_DUMMY_TXWI ((void *) ~0)
-
static int
-mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base)
{
int size;
int i;
spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->swq);
+
+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+ q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
@@ -43,10 +47,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
- iowrite32(q->desc_dma, &q->regs->desc_base);
- iowrite32(0, &q->regs->cpu_idx);
- iowrite32(0, &q->regs->dma_idx);
- iowrite32(q->ndesc, &q->regs->ring_size);
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(0, &q->regs->cpu_idx);
+ writel(0, &q->regs->dma_idx);
+ writel(q->ndesc, &q->regs->ring_size);
return 0;
}
@@ -61,7 +65,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
int i, idx = -1;
if (txwi)
- q->entry[q->head].txwi = DMA_DUMMY_TXWI;
+ q->entry[q->head].txwi = DMA_DUMMY_DATA;
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
@@ -120,9 +124,12 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
DMA_TO_DEVICE);
}
- if (e->txwi == DMA_DUMMY_TXWI)
+ if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
+ if (e->skb == DMA_DUMMY_DATA)
+ e->skb = NULL;
+
*prev_e = *e;
memset(e, 0, sizeof(*e));
}
@@ -130,56 +137,64 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
- iowrite32(q->desc_dma, &q->regs->desc_base);
- iowrite32(q->ndesc, &q->regs->ring_size);
- q->head = ioread32(&q->regs->dma_idx);
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(q->ndesc, &q->regs->ring_size);
+ q->head = readl(&q->regs->dma_idx);
q->tail = q->head;
- iowrite32(q->head, &q->regs->cpu_idx);
+ writel(q->head, &q->regs->cpu_idx);
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
+ struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
+ unsigned int n_swq_queued[4] = {};
+ unsigned int n_queued = 0;
bool wake = false;
- int last;
+ int i, last;
- if (!q->ndesc)
+ if (!q)
return;
- spin_lock_bh(&q->lock);
if (flush)
last = -1;
else
- last = ioread32(&q->regs->dma_idx);
+ last = readl(&q->regs->dma_idx);
- while (q->queued && q->tail != last) {
+ while ((q->queued > n_queued) && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
- q->swq_queued--;
+ n_swq_queued[entry.qid]++;
q->tail = (q->tail + 1) % q->ndesc;
- q->queued--;
+ n_queued++;
- if (entry.skb) {
- spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, flush);
- spin_lock_bh(&q->lock);
- }
+ if (entry.skb)
+ dev->drv->tx_complete_skb(dev, qid, &entry);
if (entry.txwi) {
- mt76_put_txwi(dev, entry.txwi);
+ if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
+ mt76_put_txwi(dev, entry.txwi);
wake = !flush;
}
if (!flush && q->tail == last)
- last = ioread32(&q->regs->dma_idx);
+ last = readl(&q->regs->dma_idx);
}
- if (!flush)
- mt76_txq_schedule(dev, q);
- else
+ spin_lock_bh(&q->lock);
+
+ q->queued -= n_queued;
+ for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
+ if (!n_swq_queued[i])
+ continue;
+
+ dev->q_tx[i].swq_queued -= n_swq_queued[i];
+ }
+
+ if (flush)
mt76_dma_sync_idx(dev, q);
wake = wake && q->stopped &&
@@ -244,20 +259,20 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- iowrite32(q->head, &q->regs->cpu_idx);
+ writel(q->head, &q->regs->cpu_idx);
}
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue_buf buf;
dma_addr_t addr;
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
return -ENOMEM;
buf.addr = addr;
@@ -271,80 +286,85 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
return 0;
}
-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+static int
+mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
{
+ struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
+ int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
- struct mt76_queue_buf buf[32];
struct sk_buff *iter;
dma_addr_t addr;
- int len;
- u32 tx_info = 0;
- int n, ret;
+ u8 *txwi;
t = mt76_get_txwi(dev);
if (!t) {
ieee80211_free_txskb(dev->hw, skb);
return -ENOMEM;
}
+ txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
- &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- if (ret < 0)
- goto free;
+ if (dev->drv->tx_aligned4_skbs)
+ mt76_insert_hdr_pad(skb);
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = -ENOMEM;
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
goto free;
- }
- n = 0;
- buf[n].addr = t->dma_addr;
- buf[n++].len = dev->drv->txwi_size;
- buf[n].addr = addr;
- buf[n++].len = len;
+ tx_info.buf[n].addr = t->dma_addr;
+ tx_info.buf[n++].len = dev->drv->txwi_size;
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = len;
skb_walk_frags(skb, iter) {
- if (n == ARRAY_SIZE(buf))
+ if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
addr = dma_map_single(dev->dev, iter->data, iter->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
goto unmap;
- buf[n].addr = addr;
- buf[n++].len = iter->len;
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = iter->len;
}
+ tx_info.nbuf = n;
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto unmap;
- if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
+ ret = -ENOMEM;
goto unmap;
+ }
- return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
+ return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+ tx_info.info, tx_info.skb, t);
unmap:
- ret = -ENOMEM;
for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
- DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev, tx_info.buf[n].addr,
+ tx_info.buf[n].len, DMA_TO_DEVICE);
free:
- e.skb = skb;
+ e.skb = tx_info.skb;
e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
+ dev->drv->tx_complete_skb(dev, qid, &e);
mt76_put_txwi(dev, t);
return ret;
}
-EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
@@ -366,7 +386,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
break;
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
+ if (unlikely(dma_mapping_error(dev->dev, addr))) {
skb_free_frag(buf);
break;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index e3292df5e9b2..03dd2bafa4e8 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -16,6 +16,8 @@
#ifndef __MT76_DMA_H
#define __MT76_DMA_H
+#define DMA_DUMMY_DATA ((void *)~0)
+
#define MT_RING_SIZE 0x10
#define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index a1529920d877..b7a49ae6b327 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -94,8 +94,8 @@ mt76_eeprom_override(struct mt76_dev *dev)
return;
mac = of_get_mac_address(np);
- if (mac)
- memcpy(dev->macaddr, mac, ETH_ALEN);
+ if (!IS_ERR(mac))
+ ether_addr_copy(dev->macaddr, mac);
#endif
if (!is_valid_ether_addr(dev->macaddr)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 316167404729..5b6a81ee457e 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -214,6 +214,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
+ IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
return 0;
@@ -369,10 +371,16 @@ void mt76_unregister_device(struct mt76_dev *dev)
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
- mt76_tx_free(dev);
}
EXPORT_SYMBOL_GPL(mt76_unregister_device);
+void mt76_free_device(struct mt76_dev *dev)
+{
+ mt76_tx_free(dev);
+ ieee80211_free_hw(dev->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_free_device);
+
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
{
if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
@@ -384,17 +392,20 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_rx);
-static bool mt76_has_tx_pending(struct mt76_dev *dev)
+bool mt76_has_tx_pending(struct mt76_dev *dev)
{
+ struct mt76_queue *q;
int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- if (dev->q_tx[i].queued)
+ q = dev->q_tx[i].q;
+ if (q && q->queued)
return true;
}
return false;
}
+EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
void mt76_set_channel(struct mt76_dev *dev)
{
@@ -560,6 +571,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct mt76_wcid *wcid = status->wcid;
bool ps;
+ int i;
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
@@ -606,6 +618,20 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
dev->drv->sta_ps(dev, sta, ps);
ieee80211_sta_ps_transition(sta, ps);
+
+ if (ps)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct mt76_txq *mtxq;
+
+ if (!sta->txq[i])
+ continue;
+
+ mtxq = (struct mt76_txq *) sta->txq[i]->drv_priv;
+ if (!skb_queue_empty(&mtxq->retry_q))
+ ieee80211_schedule_txq(dev->hw, sta->txq[i]);
+ }
}
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
@@ -737,7 +763,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_dev *dev = hw->priv;
int n_chains = hweight8(dev->antenna_mask);
- *dbm = dev->txpower_cur / 2;
+ *dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
/* convert from per-chain power to combined
* output on 2x2 devices
@@ -787,3 +813,10 @@ void mt76_csa_check(struct mt76_dev *dev)
__mt76_csa_check, dev);
}
EXPORT_SYMBOL_GPL(mt76_csa_check);
+
+int
+mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_set_tim);
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 1d6bbce76041..38368d19aa6f 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -21,7 +21,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
{
u32 val;
- val = ioread32(dev->mmio.regs + offset);
+ val = readl(dev->mmio.regs + offset);
trace_reg_rr(dev, offset, val);
return val;
@@ -30,7 +30,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
{
trace_reg_wr(dev, offset, val);
- iowrite32(val, dev->mmio.regs + offset);
+ writel(val, dev->mmio.regs + offset);
}
static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
@@ -70,6 +70,19 @@ static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
return 0;
}
+void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
+ u32 clear, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->mmio.irq_lock, flags);
+ dev->mmio.irqmask &= ~clear;
+ dev->mmio.irqmask |= set;
+ mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+ spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
+}
+EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
+
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
{
static const struct mt76_bus_ops mt76_mmio_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index bcbfd3c4a44b..8ecbf81a906f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -69,6 +69,7 @@ enum mt76_txq_id {
MT_TXQ_MCU,
MT_TXQ_BEACON,
MT_TXQ_CAB,
+ MT_TXQ_FWDL,
__MT_TXQ_MAX
};
@@ -83,12 +84,11 @@ struct mt76_queue_buf {
int len;
};
-struct mt76u_buf {
- struct mt76_dev *dev;
- struct urb *urb;
- size_t len;
- void *buf;
- bool done;
+struct mt76_tx_info {
+ struct mt76_queue_buf buf[32];
+ struct sk_buff *skb;
+ int nbuf;
+ u32 info;
};
struct mt76_queue_entry {
@@ -98,9 +98,11 @@ struct mt76_queue_entry {
};
union {
struct mt76_txwi_cache *txwi;
- struct mt76u_buf ubuf;
+ struct urb *urb;
};
+ enum mt76_txq_id qid;
bool schedule;
+ bool done;
};
struct mt76_queue_regs {
@@ -117,9 +119,6 @@ struct mt76_queue {
struct mt76_queue_entry *entry;
struct mt76_desc *desc;
- struct list_head swq;
- int swq_queued;
-
u16 first;
u16 head;
u16 tail;
@@ -134,7 +133,13 @@ struct mt76_queue {
dma_addr_t desc_dma;
struct sk_buff *rx_head;
struct page_frag_cache rx_page;
- spinlock_t rx_page_lock;
+};
+
+struct mt76_sw_queue {
+ struct mt76_queue *q;
+
+ struct list_head swq;
+ int swq_queued;
};
struct mt76_mcu_ops {
@@ -150,13 +155,15 @@ struct mt76_mcu_ops {
struct mt76_queue_ops {
int (*init)(struct mt76_dev *dev);
- int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
+ int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base);
int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi);
- int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
@@ -183,6 +190,11 @@ enum mt76_wcid_flags {
DECLARE_EWMA(signal, 10, 8);
+#define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
+#define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
+#define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
+#define MT_WCID_TX_INFO_SET BIT(31)
+
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
@@ -201,18 +213,14 @@ struct mt76_wcid {
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
- __le16 tx_rate;
- bool tx_rate_set;
- u8 tx_rate_nss;
- s8 max_txpwr_adj;
+ u32 tx_info;
bool sw_iv;
u8 packet_id;
};
struct mt76_txq {
- struct list_head list;
- struct mt76_queue *hwq;
+ struct mt76_sw_queue *swq;
struct mt76_wcid *wcid;
struct sk_buff_head retry_q;
@@ -223,11 +231,11 @@ struct mt76_txq {
};
struct mt76_txwi_cache {
- u32 txwi[8];
- dma_addr_t dma_addr;
struct list_head list;
-};
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+};
struct mt76_rx_tid {
struct rcu_head rcu_head;
@@ -280,18 +288,22 @@ struct mt76_hw_cap {
bool has_5ghz;
};
+#define MT_TXWI_NO_FREE BIT(0)
+
struct mt76_driver_ops {
+ bool tx_aligned4_skbs;
+ u32 txwi_flags;
u16 txwi_size;
void (*update_survey)(struct mt76_dev *dev);
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid,
- struct ieee80211_sta *sta, u32 *tx_info);
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
- void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+ void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
@@ -378,7 +390,6 @@ struct mt76_usb {
u8 data[32];
struct tasklet_struct rx_tasklet;
- struct tasklet_struct tx_tasklet;
struct delayed_work stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
@@ -435,11 +446,14 @@ struct mt76_dev {
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
- struct mt76_queue q_tx[__MT_TXQ_MAX];
+ struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
+ struct tasklet_struct tx_tasklet;
+ struct delayed_work mac_work;
+
wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
@@ -455,6 +469,10 @@ struct mt76_dev {
u8 antenna_mask;
u16 chainmask;
+ struct tasklet_struct pre_tbtt_tasklet;
+ int beacon_int;
+ u8 beacon_mask;
+
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
struct debugfs_blob_wrapper eeprom;
@@ -529,6 +547,9 @@ struct mt76_rx_status {
#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
+#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
+#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
+#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
@@ -572,6 +593,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
@@ -597,6 +619,7 @@ struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates);
void mt76_unregister_device(struct mt76_dev *dev);
+void mt76_free_device(struct mt76_dev *dev);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
void mt76_seq_puts_array(struct seq_file *file, const char *str,
@@ -605,6 +628,12 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+static inline u8 *
+mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ return (u8 *)t - dev->drv->txwi_size;
+}
+
/* increment with wrap-around */
static inline int mt76_incr(int val, int size)
{
@@ -645,9 +674,19 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
}
-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+}
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -657,13 +696,14 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar);
-void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
+void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
void mt76_txq_schedule_all(struct mt76_dev *dev);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data);
+bool mt76_has_tx_pending(struct mt76_dev *dev);
void mt76_set_channel(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
@@ -708,6 +748,8 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);
+int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
@@ -738,8 +780,7 @@ static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_device *udev = to_usb_device(dev->dev);
struct mt76_usb *usb = &dev->usb;
unsigned int pipe;
@@ -757,10 +798,10 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
-int mt76u_submit_rx_buffers(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
-void mt76u_stop_queues(struct mt76_dev *dev);
-void mt76u_stop_stat_wk(struct mt76_dev *dev);
+void mt76u_stop_tx(struct mt76_dev *dev);
+void mt76u_stop_rx(struct mt76_dev *dev);
+int mt76u_resume_rx(struct mt76_dev *dev);
void mt76u_queues_deinit(struct mt76_dev *dev);
struct sk_buff *
@@ -770,4 +811,6 @@ void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);
+void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index 4dcb465095d1..58e68fbdbf75 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -16,21 +16,20 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
struct sk_buff *skb = NULL;
- if (!(dev->beacon_mask & BIT(mvif->idx)))
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
if (!skb)
return;
- mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb,
- &mvif->sta.wcid, NULL);
+ mt76_tx_queue_skb(dev, MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
spin_lock_bh(&dev->ps_lock);
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
- dev->mt76.q_tx[MT_TXQ_CAB].hw_idx) |
+ dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
@@ -49,7 +48,7 @@ mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- if (!(dev->beacon_mask & BIT(mvif->idx)))
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
@@ -73,10 +72,13 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
struct sk_buff *skb;
int i, nframes;
+ if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
data.dev = dev;
__skb_queue_head_init(&data.q);
- q = &dev->mt76.q_tx[MT_TXQ_BEACON];
+ q = dev->mt76.q_tx[MT_TXQ_BEACON].q;
spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -93,7 +95,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
if (dev->mt76.csa_complete)
goto out;
- q = &dev->mt76.q_tx[MT_TXQ_CAB];
+ q = dev->mt76.q_tx[MT_TXQ_CAB].q;
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
@@ -118,8 +120,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
- mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid,
- NULL);
+ mt76_tx_queue_skb(dev, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
}
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
@@ -135,7 +136,8 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
- if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
+ if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued >
+ hweight8(dev->mt76.beacon_mask))
dev->beacon_check++;
}
@@ -145,19 +147,19 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
if (idx >= 0) {
if (intval)
- dev->beacon_mask |= BIT(idx);
+ dev->mt76.beacon_mask |= BIT(idx);
else
- dev->beacon_mask &= ~BIT(idx);
+ dev->mt76.beacon_mask &= ~BIT(idx);
}
- if (!dev->beacon_mask || (!intval && idx < 0)) {
+ if (!dev->mt76.beacon_mask || (!intval && idx < 0)) {
mt7603_irq_disable(dev, MT_INT_MAC_IRQ3);
mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK);
mt76_wr(dev, MT_HW_INT_MASK(3), 0);
return;
}
- dev->beacon_int = intval;
+ dev->mt76.beacon_int = intval;
mt76_wr(dev, MT_TBTT,
FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE);
@@ -175,10 +177,11 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
mt76_set(dev, MT_WF_ARB_BCN_START,
MT_WF_ARB_BCN_START_BSSn(0) |
- ((dev->beacon_mask >> 1) * MT_WF_ARB_BCN_START_BSS0n(1)));
+ ((dev->mt76.beacon_mask >> 1) *
+ MT_WF_ARB_BCN_START_BSS0n(1)));
mt7603_irq_enable(dev, MT_INT_MAC_IRQ3);
- if (dev->beacon_mask & ~BIT(0))
+ if (dev->mt76.beacon_mask & ~BIT(0))
mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
else
mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
index 1086dcd376a0..37e5644b45ef 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -2,17 +2,6 @@
#include "mt7603.h"
-void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
- dev->mt76.mmio.irqmask &= ~clear;
- dev->mt76.mmio.irqmask |= set;
- mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
- spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
-}
-
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
@@ -38,7 +27,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr);
if (hwintr & MT_HW_INT3_PRE_TBTT0)
- tasklet_schedule(&dev->pre_tbtt_tasklet);
+ tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
@@ -46,7 +35,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
if (intr & MT_INT_TX_DONE_ALL) {
mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
- tasklet_schedule(&dev->tx_tasklet);
+ tasklet_schedule(&dev->mt76.tx_tasklet);
}
if (intr & MT_INT_RX_DONE(0)) {
@@ -64,8 +53,8 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr)
{
- u32 base = addr & GENMASK(31, 19);
- u32 offset = addr & GENMASK(18, 0);
+ u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index b3ae0aaea62a..27e2d9f90553 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -5,18 +5,22 @@
#include "../dma.h"
static int
-mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
+mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
int idx, int n_desc)
{
- int ret;
+ struct mt76_queue *hwq;
+ int err;
- q->hw_idx = idx;
- q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
@@ -119,15 +123,12 @@ static int
mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
{
- int ret;
+ int err;
- q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->buf_size = bufsize;
-
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
+ err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
+ MT_RX_RING_BASE);
+ if (err < 0)
+ return err;
mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
@@ -144,6 +145,8 @@ mt7603_tx_tasklet(unsigned long data)
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
+ mt76_txq_schedule_all(&dev->mt76);
+
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
}
@@ -163,7 +166,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
init_waitqueue_head(&dev->mt76.mmio.mcu.wait);
skb_queue_head_init(&dev->mt76.mmio.mcu.res_q);
- tasklet_init(&dev->tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
+ tasklet_init(&dev->mt76.tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
mt76_clear(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_EN |
@@ -223,6 +226,6 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN |
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
- tasklet_kill(&dev->tx_tasklet);
+ tasklet_kill(&dev->mt76.tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index d54dda67d036..78cdbb70e178 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -167,7 +167,8 @@ mt7603_mac_init(struct mt7603_dev *dev)
FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) |
FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15));
- mt76_rmw(dev, MT_DMA_DCR0, ~0xfffc, 4096);
+ mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
+ FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 4096));
mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13));
mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13));
@@ -488,6 +489,7 @@ mt7603_init_txpower(struct mt7603_dev *dev,
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
chan->max_power = target_power;
+ chan->orig_mpwr = target_power;
}
}
@@ -510,8 +512,10 @@ int mt7603_register_device(struct mt7603_dev *dev)
bus_ops->rmw = mt7603_rmw;
dev->mt76.bus = bus_ops;
- INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
- tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
+ spin_lock_init(&dev->ps_lock);
+
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
+ tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
(unsigned long)dev);
/* Check for 7688, which only has 1SS */
@@ -570,9 +574,9 @@ int mt7603_register_device(struct mt7603_dev *dev)
void mt7603_unregister_device(struct mt7603_dev *dev)
{
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76_unregister_device(&dev->mt76);
mt7603_mcu_exit(dev);
mt7603_dma_cleanup(dev);
- ieee80211_free_hw(mt76_hw(dev));
+ mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 5e31d7da96fc..6d506e34c3ee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
MT_BA_CONTROL_1_RESET));
}
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
int ba_size)
{
u32 addr = mt7603_wtbl2_addr(wcid);
@@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
mt76_clear(dev, addr + (15 * 4), tid_mask);
return;
}
- mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
-
- mt7603_mac_stop(dev);
- switch (tid) {
- case 0:
- mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
- break;
- case 1:
- mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
- break;
- case 2:
- mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
- ssn);
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
- ssn >> 8);
- break;
- case 3:
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
- break;
- case 4:
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
- break;
- case 5:
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
- ssn);
- mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
- ssn >> 4);
- break;
- case 6:
- mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
- break;
- case 7:
- mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
- break;
- }
- mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
- mt7603_mac_start(dev);
for (i = 7; i > 0; i--) {
if (ba_size >= MT_AGG_SIZE_LIMIT(i))
@@ -627,7 +590,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(hdr->frame_control);
status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
- status->seqno = hdr->seq_ctrl >> 4;
+ status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
return 0;
}
@@ -754,11 +717,11 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
MT_WTBL_UPDATE_RATE_UPDATE |
MT_WTBL_UPDATE_TX_COUNT_CLEAR);
- if (!sta->wcid.tx_rate_set)
+ if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
- sta->wcid.tx_rate_set = true;
+ sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
}
static enum mt7603_cipher_type
@@ -820,20 +783,23 @@ int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
static int
mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_queue *q,
+ struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
int pid, struct ieee80211_key_conf *key)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_queue *q = dev->mt76.q_tx[qid].q;
struct mt7603_vif *mvif;
int wlan_idx;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
int tx_count = 8;
u8 frame_type, frame_subtype;
u16 fc = le16_to_cpu(hdr->frame_control);
+ u16 seqno = 0;
u8 vif_idx = 0;
u32 val;
u8 bw;
@@ -841,7 +807,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
if (vif) {
mvif = (struct mt7603_vif *)vif->drv_priv;
vif_idx = mvif->idx;
- if (vif_idx && q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
+ if (vif_idx && qid >= MT_TXQ_BEACON)
vif_idx += 0x10;
}
@@ -915,11 +881,21 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
}
/* use maximum tx count for beacons and buffered multicast */
- if (q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
+ if (qid >= MT_TXQ_BEACON)
tx_count = 0x1f;
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
- FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
+ MT_TXD3_SN_VALID;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ seqno = le16_to_cpu(hdr->seq_ctrl);
+ else if (ieee80211_is_back_req(hdr->frame_control))
+ seqno = le16_to_cpu(bar->start_seq_num);
+ else
+ val &= ~MT_TXD3_SN_VALID;
+
+ val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
+
txwi[3] = cpu_to_le32(val);
if (key) {
@@ -936,13 +912,13 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
}
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
int pid;
@@ -958,7 +934,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt7603_wtbl_set_ps(dev, msta, false);
}
- pid = mt76_tx_status_skb_add(mdev, wcid, skb);
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
spin_lock_bh(&dev->mt76.lock);
@@ -968,7 +944,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
spin_unlock_bh(&dev->mt76.lock);
}
- mt7603_mac_write_txwi(dev, txwi_ptr, skb, q, wcid, sta, pid, key);
+ mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
+ sta, pid, key);
return 0;
}
@@ -1167,8 +1144,8 @@ out:
rcu_read_unlock();
}
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct sk_buff *skb = e->skb;
@@ -1178,7 +1155,7 @@ void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
return;
}
- if (q - dev->mt76.q_tx < 4)
+ if (qid < 4)
dev->tx_hang_check = 0;
mt76_tx_complete_skb(mdev, skb);
@@ -1291,7 +1268,7 @@ static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
{
- int beacon_int = dev->beacon_int;
+ int beacon_int = dev->mt76.beacon_int;
u32 mask = dev->mt76.mmio.irqmask;
int i;
@@ -1301,8 +1278,8 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mt76);
- tasklet_disable(&dev->tx_tasklet);
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.tx_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
@@ -1348,10 +1325,10 @@ skip_dma_reset:
clear_bit(MT76_RESET, &dev->mt76.state);
mutex_unlock(&dev->mt76.mutex);
- tasklet_enable(&dev->tx_tasklet);
- tasklet_schedule(&dev->tx_tasklet);
+ tasklet_enable(&dev->mt76.tx_tasklet);
+ tasklet_schedule(&dev->mt76.tx_tasklet);
- tasklet_enable(&dev->pre_tbtt_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, -1, beacon_int);
napi_enable(&dev->mt76.napi[0]);
@@ -1410,17 +1387,17 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev)
int i;
for (i = 0; i < 4; i++) {
- q = &dev->mt76.q_tx[i];
+ q = dev->mt76.q_tx[i].q;
if (!q->queued)
continue;
prev_dma_idx = dev->tx_dma_idx[i];
- dma_idx = ioread32(&q->regs->dma_idx);
+ dma_idx = readl(&q->regs->dma_idx);
dev->tx_dma_idx[i] = dma_idx;
if (dma_idx == prev_dma_idx &&
- dma_idx != ioread32(&q->regs->cpu_idx))
+ dma_idx != readl(&q->regs->cpu_idx))
break;
}
@@ -1691,7 +1668,7 @@ out:
void mt7603_mac_work(struct work_struct *work)
{
struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
- mac_work.work);
+ mt76.mac_work.work);
bool reset = false;
mt76_tx_status_check(&dev->mt76, NULL, false);
@@ -1744,6 +1721,6 @@ void mt7603_mac_work(struct work_struct *work)
if (reset)
mt7603_mac_watchdog_reset(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index cc0fe0933b2d..0a0334dc40d5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -16,7 +16,7 @@ mt7603_start(struct ieee80211_hw *hw)
mt7603_mac_start(dev);
dev->survey_time = ktime_get_boottime();
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- mt7603_mac_work(&dev->mac_work.work);
+ mt7603_mac_work(&dev->mt76.mac_work.work);
return 0;
}
@@ -27,7 +27,7 @@ mt7603_stop(struct ieee80211_hw *hw)
struct mt7603_dev *dev = hw->priv;
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
mt7603_mac_stop(dev);
}
@@ -132,11 +132,13 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
u8 bw = MT_BW_20;
bool failed = false;
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &dev->mt76.state);
+ mt7603_beacon_set_timer(dev, -1, 0);
mt76_set_channel(&dev->mt76);
mt7603_mac_stop(dev);
@@ -171,7 +173,7 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt76_txq_schedule_all(&dev->mt76);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT7603_WATCHDOG_TIME);
/* reset channel stats */
@@ -186,10 +188,14 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt7603_init_edcca(dev);
out:
+ if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
mutex_unlock(&dev->mt76.mutex);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+
if (failed)
- mt7603_mac_work(&dev->mac_work.work);
+ mt7603_mac_work(&dev->mt76.mac_work.work);
return ret;
}
@@ -294,9 +300,9 @@ mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) {
int beacon_int = !!info->enable_beacon * info->beacon_int;
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, mvif->idx, beacon_int);
- tasklet_enable(&dev->pre_tbtt_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
}
mutex_unlock(&dev->mt76.mutex);
@@ -372,7 +378,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct sk_buff_head list;
- mt76_stop_tx_queues(&dev->mt76, sta, false);
+ mt76_stop_tx_queues(&dev->mt76, sta, true);
mt7603_wtbl_set_ps(dev, msta, ps);
if (ps)
return;
@@ -492,7 +498,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
u16 cw_max = (1 << 10) - 1;
u32 val;
- queue = dev->mt76.q_tx[queue].hw_idx;
+ queue = dev->mt76.q_tx[queue].q->hw_idx;
if (params->cw_min)
cw_min = params->cw_min;
@@ -535,7 +541,6 @@ mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7603_dev *dev = hw->priv;
set_bit(MT76_SCANNING, &dev->mt76.state);
- mt7603_beacon_set_timer(dev, -1, 0);
}
static void
@@ -544,7 +549,6 @@ mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct mt7603_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
- mt7603_beacon_set_timer(dev, -1, dev->beacon_int);
}
static void
@@ -584,21 +588,21 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
break;
case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = *ssn << 4;
+ mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
@@ -664,12 +668,6 @@ static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *cont
mt76_tx(&dev->mt76, control->sta, wcid, skb);
}
-static int
-mt7603_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
-{
- return 0;
-}
-
const struct ieee80211_ops mt7603_ops = {
.tx = mt7603_tx,
.start = mt7603_start,
@@ -691,7 +689,7 @@ const struct ieee80211_ops mt7603_ops = {
.sta_rate_tbl_update = mt7603_sta_rate_tbl_update,
.release_buffered_frames = mt7603_release_buffered_frames,
.set_coverage_class = mt7603_set_coverage_class,
- .set_tim = mt7603_set_tim,
+ .set_tim = mt76_set_tim,
.get_survey = mt76_get_survey,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index d06905ea8cc6..6357b5658a32 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -14,17 +14,14 @@ struct mt7603_fw_trailer {
} __packed;
static int
-__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
- int query, int *wait_seq)
+__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
{
int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12;
struct mt76_dev *mdev = &dev->mt76;
struct mt7603_mcu_txd *txd;
u8 seq;
- if (!skb)
- return -EINVAL;
-
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
if (!seq)
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
@@ -42,15 +39,14 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
if (cmd < 0) {
txd->cid = -cmd;
+ txd->set_query = MCU_Q_NA;
} else {
txd->cid = MCU_CMD_EXT_CID;
txd->ext_cid = cmd;
- if (query != MCU_Q_NA)
- txd->ext_cid_ack = 1;
+ txd->set_query = MCU_Q_SET;
+ txd->ext_cid_ack = 1;
}
- txd->set_query = query;
-
if (wait_seq)
*wait_seq = seq;
@@ -58,21 +54,26 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
}
static int
-mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
- int query)
+mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
{
- struct mt76_dev *mdev = &dev->mt76;
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
unsigned long expires = jiffies + 3 * HZ;
struct mt7603_mcu_rxd *rxd;
+ struct sk_buff *skb;
int ret, seq;
+ skb = mt7603_mcu_msg_alloc(data, len);
+ if (!skb)
+ return -ENOMEM;
+
mutex_lock(&mdev->mmio.mcu.mutex);
- ret = __mt7603_mcu_msg_send(dev, skb, cmd, query, &seq);
+ ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
if (ret)
goto out;
- while (1) {
+ while (wait_resp) {
bool check_seq = false;
skb = mt76_mcu_get_response(&dev->mt76, expires);
@@ -113,28 +114,22 @@ mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len)
.len = cpu_to_le32(len),
.mode = cpu_to_le32(BIT(31)),
};
- struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
- return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
- MCU_Q_NA);
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ &req, sizeof(req), true);
}
static int
mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len)
{
- struct sk_buff *skb;
- int ret = 0;
+ int cur_len, ret = 0;
while (len > 0) {
- int cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
- len);
-
- skb = mt7603_mcu_msg_alloc(data, cur_len);
- if (!skb)
- return -ENOMEM;
+ cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
+ len);
- ret = __mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER,
- MCU_Q_NA, NULL);
+ ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ data, cur_len, false);
if (ret)
break;
@@ -155,23 +150,19 @@ mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr)
.override = cpu_to_le32(addr ? 1 : 0),
.addr = cpu_to_le32(addr),
};
- struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
- return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ,
- MCU_Q_NA);
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+ &req, sizeof(req), true);
}
static int
-mt7603_mcu_restart(struct mt7603_dev *dev)
+mt7603_mcu_restart(struct mt76_dev *dev)
{
- struct sk_buff *skb = mt7603_mcu_msg_alloc(NULL, 0);
-
- return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ,
- MCU_Q_NA);
+ return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ,
+ NULL, 0, true);
}
-static int
-mt7603_load_firmware(struct mt7603_dev *dev)
+static int mt7603_load_firmware(struct mt7603_dev *dev)
{
const struct firmware *fw;
const struct mt7603_fw_trailer *hdr;
@@ -261,6 +252,9 @@ running:
mt76_clear(dev, MT_SCH_4, BIT(8));
dev->mcu_running = true;
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
dev_info(dev->mt76.dev, "firmware init done\n");
out:
@@ -271,14 +265,18 @@ out:
int mt7603_mcu_init(struct mt7603_dev *dev)
{
- mutex_init(&dev->mt76.mmio.mcu.mutex);
+ static const struct mt76_mcu_ops mt7603_mcu_ops = {
+ .mcu_send_msg = mt7603_mcu_msg_send,
+ .mcu_restart = mt7603_mcu_restart,
+ };
+ dev->mt76.mcu_ops = &mt7603_mcu_ops;
return mt7603_load_firmware(dev);
}
void mt7603_mcu_exit(struct mt7603_dev *dev)
{
- mt7603_mcu_restart(dev);
+ __mt76_mcu_restart(&dev->mt76);
skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
}
@@ -360,27 +358,30 @@ int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
.buffer_mode = 1,
.len = ARRAY_SIZE(req_fields) - 1,
};
- struct sk_buff *skb;
- struct req_data *data;
const int size = 0xff * sizeof(struct req_data);
- u8 *eep = (u8 *)dev->mt76.eeprom.data;
- int i;
+ u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
+ int i, ret, len = sizeof(req_hdr) + size;
+ struct req_data *data;
BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size);
- skb = mt7603_mcu_msg_alloc(NULL, size + sizeof(req_hdr));
- memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
- data = (struct req_data *)skb_put(skb, size);
- memset(data, 0, size);
+ req = kmalloc(len, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ memcpy(req, &req_hdr, sizeof(req_hdr));
+ data = (struct req_data *)(req + sizeof(req_hdr));
+ memset(data, 0, size);
for (i = 0; i < ARRAY_SIZE(req_fields); i++) {
data[i].addr = cpu_to_le16(req_fields[i]);
data[i].val = eep[req_fields[i]];
- data[i].pad = 0;
}
- return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
- MCU_Q_SET);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ req, len, true);
+ kfree(req);
+
+ return ret;
}
static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
@@ -415,7 +416,6 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
},
#undef EEP_VAL
};
- struct sk_buff *skb;
u8 *eep = (u8 *)dev->mt76.eeprom.data;
memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK,
@@ -424,9 +424,8 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
sizeof(req.temp_comp_power));
- skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
- return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_TX_POWER_CTRL,
- MCU_Q_SET);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
+ &req, sizeof(req), true);
}
int mt7603_mcu_set_channel(struct mt7603_dev *dev)
@@ -450,10 +449,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
.tx_streams = n_chains,
.rx_streams = n_chains,
};
- struct sk_buff *skb;
s8 tx_power;
- int ret;
- int i;
+ int i, ret;
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
req.bw = MT_BW_40;
@@ -473,9 +470,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
req.txpower[i] = tx_power;
- skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
- ret = mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH,
- MCU_Q_SET);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
+ &req, sizeof(req), true);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 79f332429432..fa64bbaab0d2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -109,7 +109,6 @@ struct mt7603_dev {
ktime_t survey_time;
ktime_t ed_time;
- int beacon_int;
struct mt76_queue q_rx;
@@ -126,8 +125,6 @@ struct mt7603_dev {
s8 sensitivity;
- u8 beacon_mask;
-
u8 beacon_check;
u8 tx_hang_check;
u8 tx_dma_check;
@@ -143,10 +140,6 @@ struct mt7603_dev {
u32 reset_test;
unsigned int reset_cause[__RESET_CAUSE_MAX];
-
- struct delayed_work mac_work;
- struct tasklet_struct tx_tasklet;
- struct tasklet_struct pre_tbtt_tasklet;
};
extern const struct mt76_driver_ops mt7603_drv_ops;
@@ -179,16 +172,14 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev);
int mt7603_mcu_init(struct mt7603_dev *dev);
void mt7603_init_debugfs(struct mt7603_dev *dev);
-void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set);
-
static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask)
{
- mt7603_set_irq_mask(dev, 0, mask);
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
}
static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
{
- mt7603_set_irq_mask(dev, mask, 0);
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
void mt7603_mac_dma_start(struct mt7603_dev *dev);
@@ -200,7 +191,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
int ba_size);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
@@ -225,12 +216,12 @@ void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
index da6827ae6cee..9d257d5c309d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -233,6 +233,10 @@
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 0)
+#define MT_DMA_DCR0_DAMSDU BIT(16)
+#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
+
#define MT_DMA_DCR1 MT_WF_DMA(0x004)
#define MT_DMA_FQCR0 MT_WF_DMA(0x008)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
new file mode 100644
index 000000000000..3b8aba09bd5e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
@@ -0,0 +1,7 @@
+config MT7615E
+ tristate "MediaTek MT7615E (PCIe) support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7615-based wireless PCIe devices.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
new file mode 100644
index 000000000000..6397552f6ee3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -0,0 +1,5 @@
+#SPDX-License-Identifier: ISC
+
+obj-$(CONFIG_MT7615E) += mt7615e.o
+
+mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
new file mode 100644
index 000000000000..3ec6582afd8f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Roy Luo <royluo@google.com>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt7615.h"
+#include "../dma.h"
+#include "mac.h"
+
+static int
+mt7615_init_tx_queues(struct mt7615_dev *dev, int n_desc)
+{
+ struct mt76_sw_queue *q;
+ struct mt76_queue *hwq;
+ int err, i;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, 0, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < MT_TXQ_MCU; i++) {
+ q = &dev->mt76.q_tx[i];
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
+ }
+
+ return 0;
+}
+
+static int
+mt7615_init_mcu_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
+ int idx, int n_desc)
+{
+ struct mt76_queue *hwq;
+ int err;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
+
+ return 0;
+}
+
+void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+ switch (type) {
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 7 <= end; rxd += 7)
+ mt7615_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7615_mac_tx_free(dev, skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt76_mcu_rx_event(&dev->mt76, skb);
+ break;
+ case PKT_TYPE_NORMAL:
+ if (!mt7615_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ /* fall through */
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
+static void mt7615_tx_tasklet(unsigned long data)
+{
+ struct mt7615_dev *dev = (struct mt7615_dev *)data;
+ static const u8 queue_map[] = {
+ MT_TXQ_MCU,
+ MT_TXQ_BE
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++)
+ mt76_queue_tx_cleanup(dev, queue_map[i], false);
+
+ mt76_txq_schedule_all(&dev->mt76);
+
+ mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt7615_dma_init(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt76_dma_attach(&dev->mt76);
+
+ tasklet_init(&dev->mt76.tx_tasklet, mt7615_tx_tasklet,
+ (unsigned long)dev);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE |
+ MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN |
+ MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY |
+ MT_WPDMA_GLO_CFG_OMIT_TX_INFO);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0, 0x1);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21, 0x1);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 0x3);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
+ mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
+ mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
+ mt76_set(dev, 0x7158, BIT(16));
+ mt76_clear(dev, 0x7000, BIT(23));
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ ret = mt7615_init_tx_queues(dev, MT7615_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT7615_TXQ_MCU,
+ MT7615_TX_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+ MT7615_TXQ_FWDL,
+ MT7615_TX_FWDL_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* init rx queues */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+ MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
+ MT_RX_RING_BASE);
+ if (ret)
+ return ret;
+
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
+ MT7615_RX_RING_SIZE, MT_RX_BUF_SIZE,
+ MT_RX_RING_BASE);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_DELAY_INT_CFG, 0);
+
+ ret = mt76_init_queues(dev);
+ if (ret < 0)
+ return ret;
+
+ mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
+
+ /* start dma engine */
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN);
+
+ /* enable interrupts for TX/RX rings */
+ mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
+
+ return 0;
+}
+
+void mt7615_dma_cleanup(struct mt7615_dev *dev)
+{
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
+
+ tasklet_kill(&dev->mt76.tx_tasklet);
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
new file mode 100644
index 000000000000..dd5ab46a4f66
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt7615.h"
+#include "eeprom.h"
+
+static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
+ u16 addr, u8 *data)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+ val |= MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, base + MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ udelay(2);
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
+ WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
+ memset(data, 0x0, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int mt7615_efuse_init(struct mt7615_dev *dev)
+{
+ u32 base = mt7615_reg_map(dev, MT_EFUSE_BASE);
+ int len = MT7615_EEPROM_SIZE;
+ int ret, i;
+ void *buf;
+
+ if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
+ return -EINVAL;
+
+ dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+ dev->mt76.otp.size = len;
+ if (!dev->mt76.otp.data)
+ return -ENOMEM;
+
+ buf = dev->mt76.otp.data;
+ for (i = 0; i + 16 <= len; i += 16) {
+ ret = mt7615_efuse_read(dev, base, i, buf + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mt7615_eeprom_load(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return mt7615_efuse_init(dev);
+}
+
+int mt7615_eeprom_init(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt7615_eeprom_load(dev);
+ if (ret < 0)
+ return ret;
+
+ memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, MT7615_EEPROM_SIZE);
+
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
+
+ memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+
+ mt76_eeprom_override(&dev->mt76);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
new file mode 100644
index 000000000000..a4cf16688171
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_EEPROM_H
+#define __MT7615_EEPROM_H
+
+#include "mt7615.h"
+
+enum mt7615_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_NIC_CONF_0 = 0x034,
+
+ __MT_EE_MAX = 0x3bf
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
new file mode 100644
index 000000000000..3ab3ff553ef2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/etherdevice.h>
+#include "mt7615.h"
+#include "mac.h"
+
+static void mt7615_phy_init(struct mt7615_dev *dev)
+{
+ /* disable band 0 rf low power beacon mode */
+ mt76_rmw(dev, MT_WF_PHY_WF2_RFCTRL0, MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN,
+ MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+}
+
+static void mt7615_mac_init(struct mt7615_dev *dev)
+{
+ /* enable band 0 clk */
+ mt76_rmw(dev, MT_CFG_CCR,
+ MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN,
+ MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN);
+
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
+ mt76_rmw(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+ mt7615_mcu_set_rts_thresh(dev, 0x92b);
+
+ mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
+ MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
+
+ mt7615_mcu_init_mac(dev);
+
+ mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
+ FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072));
+
+ mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7));
+ mt76_wr(dev, MT_AGG_ARDCR,
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1),
+ max_t(int, 0, MT7615_RATE_RETRY - 2)) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
+
+ mt76_wr(dev, MT_AGG_ARCR,
+ (MT_AGG_ARCR_INIT_RATE1 |
+ FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
+ MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
+ FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
+ FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
+
+ dev->mt76.global_wcid.idx = MT7615_WTBL_RESERVED;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ rcu_assign_pointer(dev->mt76.wcid[MT7615_WTBL_RESERVED],
+ &dev->mt76.global_wcid);
+}
+
+static int mt7615_init_hardware(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ spin_lock_init(&dev->token_lock);
+ idr_init(&dev->token);
+
+ ret = mt7615_eeprom_init(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7615_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ ret = mt7615_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_ctrl_pm_state(dev, 0);
+ mt7615_mcu_del_wtbl_all(dev);
+
+ return 0;
+}
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+static struct ieee80211_rate mt7615_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(11, 60),
+ OFDM_RATE(15, 90),
+ OFDM_RATE(10, 120),
+ OFDM_RATE(14, 180),
+ OFDM_RATE(9, 240),
+ OFDM_RATE(13, 360),
+ OFDM_RATE(8, 480),
+ OFDM_RATE(12, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = MT7615_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION)
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ }
+};
+
+static int mt7615_init_debugfs(struct mt7615_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mt7615_register_device(struct mt7615_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ ret = mt7615_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
+
+ hw->queues = 4;
+ hw->max_rates = 3;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 11;
+
+ hw->sta_data_size = sizeof(struct mt7615_sta);
+ hw->vif_data_size = sizeof(struct mt7615_vif);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+
+ dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+ dev->mt76.chainmask = 0x404;
+ dev->mt76.antenna_mask = 0xf;
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP);
+
+ ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
+ ARRAY_SIZE(mt7615_rates));
+ if (ret)
+ return ret;
+
+ hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+
+ return mt7615_init_debugfs(dev);
+}
+
+void mt7615_unregister_device(struct mt7615_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt7615_txp_skb_unmap(&dev->mt76, txwi);
+ if (txwi->skb)
+ dev_kfree_skb_any(txwi->skb);
+ mt76_put_txwi(&dev->mt76, txwi);
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
+ mt76_unregister_device(&dev->mt76);
+ mt7615_mcu_exit(dev);
+ mt7615_dma_cleanup(dev);
+
+ ieee80211_free_hw(mt76_hw(dev));
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
new file mode 100644
index 000000000000..b8f48d10f27a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Roy Luo <royluo@google.com>
+ * Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7615.h"
+#include "../dma.h"
+#include "mac.h"
+
+static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
+ u8 idx, bool unicast)
+{
+ struct mt7615_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7615_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+static int mt7615_get_rate(struct mt7615_dev *dev,
+ struct ieee80211_supported_band *sband,
+ int idx, bool cck)
+{
+ int offset = 0;
+ int len = sband->n_bitrates;
+ int i;
+
+ if (cck) {
+ if (sband == &dev->mt76.sband_5g.sband)
+ return 0;
+
+ idx &= ~BIT(2); /* short preamble */
+ } else if (sband == &dev->mt76.sband_2g.sband) {
+ offset = 4;
+ }
+
+ for (i = offset; i < len; i++) {
+ if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
+ return i;
+ }
+
+ return 0;
+}
+
+static void mt7615_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u8 *pn = status->iv;
+ u8 *hdr;
+
+ __skb_push(skb, 8);
+ memmove(skb->data, skb->data + 8, hdr_len);
+ hdr = skb->data + hdr_len;
+
+ hdr[0] = pn[5];
+ hdr[1] = pn[4];
+ hdr[2] = 0;
+ hdr[3] = 0x20 | (key_id << 6);
+ hdr[4] = pn[3];
+ hdr[5] = pn[2];
+ hdr[6] = pn[1];
+ hdr[7] = pn[0];
+
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+}
+
+int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hdr *hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ u32 rxd0 = le32_to_cpu(rxd[0]);
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ bool unicast, remove_pad, insert_ccmp_hdr = false;
+ int i, idx;
+
+ memset(status, 0, sizeof(*status));
+
+ unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
+ idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
+ status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
+
+ /* TODO: properly support DBDC */
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &dev->mt76.sband_5g.sband;
+ else
+ sband = &dev->mt76.sband_2g.sband;
+
+ if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
+ !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ rxd += 4;
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ }
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+ u32 rxdg0 = le32_to_cpu(rxd[0]);
+ u32 rxdg1 = le32_to_cpu(rxd[1]);
+ u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
+ bool cck = false;
+
+ i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
+ switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ /* fall through */
+ case MT_PHY_TYPE_OFDM:
+ i = mt7615_get_rate(dev, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
+ status->encoding = RX_ENC_VHT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case MT_PHY_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rxdg0 & MT_RXV1_HT_SHORT_GI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (rxdg0 & MT_RXV1_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+
+ /* TODO: RSSI */
+ rxd += 6;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt7615_insert_ccmp_hdr(skb, key_id);
+ }
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control);
+ status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ return 0;
+}
+
+void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+}
+
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
+{
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_txwi_cache *t;
+ struct mt7615_dev *dev;
+ struct mt7615_txp *txp;
+ u8 *txwi_ptr;
+
+ txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
+ txp = (struct mt7615_txp *)(txwi_ptr + MT_TXD_SIZE);
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ spin_lock_bh(&dev->token_lock);
+ t = idr_remove(&dev->token, le16_to_cpu(txp->token));
+ spin_unlock_bh(&dev->token_lock);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->skb);
+}
+
+u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
+ const struct ieee80211_tx_rate *rate,
+ bool stbc, u8 *bw)
+{
+ u8 phy, nss, rate_idx;
+ u16 rateval;
+
+ *bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = ieee80211_rate_get_vht_mcs(rate);
+ nss = ieee80211_rate_get_vht_nss(rate);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *bw = 1;
+ else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ *bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ *bw = 3;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ nss = 1;
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ }
+
+ rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
+ FIELD_PREP(MT_TX_RATE_MODE, phy) |
+ FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
+
+ if (stbc && nss == 1)
+ rateval |= MT_TX_RATE_STBC;
+
+ return rateval;
+}
+
+int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int pid,
+ struct ieee80211_key_conf *key)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_vif *vif = info->control.vif;
+ int tx_count = 8;
+ u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0;
+ __le16 fc = hdr->frame_control;
+ u16 seqno = 0;
+ u32 val;
+
+ if (vif) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ omac_idx = mvif->omac_idx;
+ }
+
+ if (sta) {
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ tx_count = msta->rate_count;
+ }
+
+ fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+ fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+ if (ieee80211_is_data(fc)) {
+ q_idx = skb_get_queue_mapping(skb);
+ p_fmt = MT_TX_TYPE_CT;
+ } else if (ieee80211_is_beacon(fc)) {
+ q_idx = MT_LMAC_BCN0;
+ p_fmt = MT_TX_TYPE_FW;
+ } else {
+ q_idx = MT_LMAC_ALTX0;
+ p_fmt = MT_TX_TYPE_CT;
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO,
+ ieee80211_get_hdrlen_from_skb(skb) / 2) |
+ FIELD_PREP(MT_TXD1_TID,
+ skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+ FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
+ FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+ txwi[1] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD2_MULTICAST,
+ is_multicast_ether_addr(hdr->addr1));
+ txwi[2] = cpu_to_le32(val);
+
+ if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ txwi[4] = 0;
+ txwi[6] = 0;
+
+ if (rate->idx >= 0 && rate->count &&
+ !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+ bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
+ u8 bw;
+ u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
+
+ txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
+
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_BW, bw) |
+ FIELD_PREP(MT_TXD6_TX_RATE, rateval);
+ txwi[6] |= cpu_to_le32(val);
+
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
+
+ if (!(rate->flags & (IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_VHT_MCS)))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ tx_count = rate->count;
+ }
+
+ if (!ieee80211_is_beacon(fc)) {
+ val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
+ FIELD_PREP(MT_TXD5_PID, pid);
+ txwi[5] = cpu_to_le32(val);
+ } else {
+ txwi[5] = 0;
+ /* use maximum tx count for beacons */
+ tx_count = 0x1f;
+ }
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ val |= MT_TXD3_SN_VALID;
+ } else if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
+
+ seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
+ val |= MT_TXD3_SN_VALID;
+ }
+ val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
+
+ txwi[3] = cpu_to_le32(val);
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
+
+ if (key)
+ txwi[3] |= cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+
+ txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+
+ return 0;
+}
+
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt7615_txp *txp;
+ u8 *txwi;
+ int i;
+
+ txwi = mt76_get_txwi_ptr(dev, t);
+ txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
+ for (i = 1; i < txp->nbuf; i++)
+ dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+}
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ int i, pid, id, nbuf = tx_info->nbuf - 1;
+ u8 *txwi = (u8 *)txwi_ptr;
+ struct mt76_txwi_cache *t;
+ struct mt7615_txp *txp;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ spin_lock_bh(&dev->mt76.lock);
+ msta->rate_probe = true;
+ mt7615_mcu_set_rates(dev, msta, &info->control.rates[0],
+ msta->rates);
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
+ pid, key);
+
+ txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ }
+ txp->nbuf = nbuf;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+
+ txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+
+ if (!key)
+ txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+
+ if (vif) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ txp->bss_idx = mvif->idx;
+ }
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ spin_lock_bh(&dev->token_lock);
+ id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
+ spin_unlock_bh(&dev->token_lock);
+ if (id < 0)
+ return id;
+
+ txp->token = cpu_to_le16(id);
+ txp->rept_wds_wcid = 0xff;
+ tx_info->skb = DMA_DUMMY_DATA;
+
+ return 0;
+}
+
+static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ struct ieee80211_tx_info *info, __le32 *txs_data)
+{
+ struct ieee80211_supported_band *sband;
+ int i, idx, count, final_idx = 0;
+ bool fixed_rate, final_mpdu, ack_timeout;
+ bool probe, ampdu, cck = false;
+ u32 final_rate, final_rate_flags, final_nss, txs;
+ u8 pid;
+
+ fixed_rate = info->status.rates[0].count;
+ probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+
+ txs = le32_to_cpu(txs_data[1]);
+ final_mpdu = txs & MT_TXS1_ACKED_MPDU;
+ ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
+
+ txs = le32_to_cpu(txs_data[3]);
+ count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
+
+ txs = le32_to_cpu(txs_data[0]);
+ pid = FIELD_GET(MT_TXS0_PID, txs);
+ final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+ ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
+
+ if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
+ return false;
+
+ if (txs & MT_TXS0_QUEUE_TIMEOUT)
+ return false;
+
+ if (!ack_timeout)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !!(info->flags &
+ IEEE80211_TX_STAT_ACK);
+
+ if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
+ info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
+
+ if (fixed_rate && !probe) {
+ info->status.rates[0].count = count;
+ goto out;
+ }
+
+ for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
+ int cur_count = min_t(int, count, 2 * MT7615_RATE_RETRY);
+
+ if (!i && probe) {
+ cur_count = 1;
+ } else {
+ info->status.rates[i] = sta->rates[idx];
+ idx++;
+ }
+
+ if (i && info->status.rates[i].idx < 0) {
+ info->status.rates[i - 1].count += count;
+ break;
+ }
+
+ if (!count) {
+ info->status.rates[i].idx = -1;
+ break;
+ }
+
+ info->status.rates[i].count = cur_count;
+ final_idx = i;
+ count -= cur_count;
+ }
+
+out:
+ final_rate_flags = info->status.rates[final_idx].flags;
+
+ switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ /* fall through */
+ case MT_PHY_TYPE_OFDM:
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &dev->mt76.sband_5g.sband;
+ else
+ sband = &dev->mt76.sband_2g.sband;
+ final_rate &= MT_TX_RATE_IDX;
+ final_rate = mt7615_get_rate(dev, sband, final_rate, cck);
+ final_rate_flags = 0;
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ final_rate_flags |= IEEE80211_TX_RC_MCS;
+ final_rate &= MT_TX_RATE_IDX;
+ if (final_rate > 31)
+ return false;
+ break;
+ case MT_PHY_TYPE_VHT:
+ final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
+ final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
+ final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
+ break;
+ default:
+ return false;
+ }
+
+ info->status.rates[final_idx].idx = final_rate;
+ info->status.rates[final_idx].flags = final_rate_flags;
+
+ return true;
+}
+
+static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
+ struct mt7615_sta *sta, int pid,
+ __le32 *txs_data)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ if (pid < MT_PACKET_ID_FIRST)
+ return false;
+
+ mt76_tx_status_lock(mdev, &list);
+ skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ spin_lock_bh(&dev->mt76.lock);
+ if (sta->rate_probe) {
+ mt7615_mcu_set_rates(dev, sta, NULL,
+ sta->rates);
+ sta->rate_probe = false;
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ }
+
+ mt76_tx_status_skb_done(mdev, skb, &list);
+ }
+ mt76_tx_status_unlock(mdev, &list);
+
+ return !!skb;
+}
+
+void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt7615_sta *msta = NULL;
+ struct mt76_wcid *wcid;
+ __le32 *txs_data = data;
+ u32 txs;
+ u8 wcidx;
+ u8 pid;
+
+ txs = le32_to_cpu(txs_data[0]);
+ pid = FIELD_GET(MT_TXS0_PID, txs);
+ txs = le32_to_cpu(txs_data[2]);
+ wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+
+ if (pid == MT_PACKET_ID_NO_ACK)
+ return;
+
+ if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ goto out;
+
+ msta = container_of(wcid, struct mt7615_sta, wcid);
+ sta = wcid_to_sta(wcid);
+
+ if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
+ goto out;
+
+ if (wcidx >= MT7615_WTBL_STA || !sta)
+ goto out;
+
+ if (mt7615_fill_txs(dev, msta, &info, txs_data))
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+
+void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ u8 i, count;
+
+ count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
+ for (i = 0; i < count; i++) {
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
+ spin_unlock_bh(&dev->token_lock);
+
+ if (!txwi)
+ continue;
+
+ mt7615_txp_skb_unmap(mdev, txwi);
+ if (txwi->skb) {
+ mt76_tx_complete_skb(mdev, txwi->skb);
+ txwi->skb = NULL;
+ }
+
+ mt76_put_txwi(mdev, txwi);
+ }
+ dev_kfree_skb(skb);
+}
+
+void mt7615_mac_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+
+ dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
+ mac_work.work);
+
+ mt76_tx_status_check(&dev->mt76, NULL, false);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT7615_WATCHDOG_TIME);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
new file mode 100644
index 000000000000..18ad4b8a3807
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_MAC_H
+#define __MT7615_MAC_H
+
+#define MT_CT_PARSE_LEN 72
+#define MT_CT_DMA_BUF_NUM 2
+
+#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
+#define MT_RXD0_NORMAL_GROUP_1 BIT(25)
+#define MT_RXD0_NORMAL_GROUP_2 BIT(26)
+#define MT_RXD0_NORMAL_GROUP_3 BIT(27)
+#define MT_RXD0_NORMAL_GROUP_4 BIT(28)
+
+enum rx_pkt_type {
+ PKT_TYPE_TXS,
+ PKT_TYPE_TXRXV,
+ PKT_TYPE_NORMAL,
+ PKT_TYPE_RX_DUP_RFB,
+ PKT_TYPE_RX_TMR,
+ PKT_TYPE_RETRIEVE,
+ PKT_TYPE_TXRX_NOTIFY,
+ PKT_TYPE_RX_EVENT
+};
+
+#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
+#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24)
+#define MT_RXD1_NORMAL_HDR_TRANS BIT(23)
+#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22)
+#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16)
+#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6)
+#define MT_RXD1_NORMAL_BEACON_UC BIT(5)
+#define MT_RXD1_NORMAL_BEACON_MC BIT(4)
+#define MT_RXD1_NORMAL_BF_REPORT BIT(3)
+#define MT_RXD1_NORMAL_ADDR_TYPE GENMASK(2, 1)
+#define MT_RXD1_NORMAL_BCAST GENMASK(2, 1)
+#define MT_RXD1_NORMAL_MCAST BIT(2)
+#define MT_RXD1_NORMAL_U2M BIT(1)
+#define MT_RXD1_NORMAL_HTC_VLD BIT(0)
+
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(31)
+#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22)
+#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21)
+#define MT_RXD2_NORMAL_ICV_ERR BIT(20)
+#define MT_RXD2_NORMAL_CLM BIT(19)
+#define MT_RXD2_NORMAL_CM BIT(18)
+#define MT_RXD2_NORMAL_FCS_ERR BIT(17)
+#define MT_RXD2_NORMAL_SW_BIT BIT(16)
+#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12)
+#define MT_RXD2_NORMAL_TID GENMASK(11, 8)
+#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0)
+
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD3_NORMAL_CLS BIT(10)
+#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8)
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+
+#define MT_RXV1_ACID_DET_H BIT(31)
+#define MT_RXV1_ACID_DET_L BIT(30)
+#define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24)
+#define MT_RXV1_NUM_RX GENMASK(23, 22)
+#define MT_RXV1_HT_NO_SOUND BIT(21)
+#define MT_RXV1_HT_SMOOTH BIT(20)
+#define MT_RXV1_HT_SHORT_GI BIT(19)
+#define MT_RXV1_HT_AGGR BIT(18)
+#define MT_RXV1_VHTA1_B22 BIT(17)
+#define MT_RXV1_FRAME_MODE GENMASK(16, 15)
+#define MT_RXV1_TX_MODE GENMASK(14, 12)
+#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10)
+#define MT_RXV1_HT_AD_CODE BIT(9)
+#define MT_RXV1_HT_STBC GENMASK(8, 7)
+#define MT_RXV1_TX_RATE GENMASK(6, 0)
+
+#define MT_RXV2_SEL_ANT BIT(31)
+#define MT_RXV2_VALID_BIT BIT(30)
+#define MT_RXV2_NSTS GENMASK(29, 27)
+#define MT_RXV2_GROUP_ID GENMASK(26, 21)
+#define MT_RXV2_LENGTH GENMASK(20, 0)
+
+enum tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+ MT_TX_TYPE_CT,
+ MT_TX_TYPE_SF,
+ MT_TX_TYPE_CMD,
+ MT_TX_TYPE_FW,
+};
+
+enum tx_pkt_queue_idx {
+ MT_LMAC_AC00,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0,
+ MT_LMAC_BCN0,
+ MT_LMAC_PSMP0,
+};
+
+enum tx_port_idx {
+ MT_TX_PORT_IDX_LMAC,
+ MT_TX_PORT_IDX_MCU
+};
+
+enum tx_mcu_port_q_idx {
+ MT_TX_MCU_PORT_RX_Q0 = 0,
+ MT_TX_MCU_PORT_RX_Q1,
+ MT_TX_MCU_PORT_RX_Q2,
+ MT_TX_MCU_PORT_RX_Q3,
+ MT_TX_MCU_PORT_RX_FWDL = 0x1e
+};
+
+enum tx_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+ MT_PHY_BW_160,
+};
+
+#define MT_CT_INFO_APPLY_TXD BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
+#define MT_CT_INFO_MGMT_FRAME BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
+#define MT_CT_INFO_HSR2_TX BIT(4)
+
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_TXD0_P_IDX BIT(31)
+#define MT_TXD0_Q_IDX GENMASK(30, 26)
+#define MT_TXD0_UDP_TCP_SUM BIT(24)
+#define MT_TXD0_IP_SUM BIT(23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_OWN_MAC GENMASK(31, 26)
+#define MT_TXD1_PKT_FMT GENMASK(25, 24)
+#define MT_TXD1_TID GENMASK(23, 21)
+#define MT_TXD1_AMSDU BIT(20)
+#define MT_TXD1_UNXV BIT(19)
+#define MT_TXD1_HDR_PAD GENMASK(18, 17)
+#define MT_TXD1_TXD_LEN BIT(16)
+#define MT_TXD1_LONG_FORMAT BIT(15)
+#define MT_TXD1_HDR_FORMAT GENMASK(14, 13)
+#define MT_TXD1_HDR_INFO GENMASK(12, 8)
+#define MT_TXD1_WLAN_IDX GENMASK(7, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_TIMING_MEASURE BIT(30)
+#define MT_TXD2_BA_DISABLE BIT(29)
+#define MT_TXD2_POWER_OFFSET GENMASK(28, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+#define MT_TXD3_PROTECT_FRAME BIT(1)
+#define MT_TXD3_NO_ACK BIT(0)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_SW_POWER_MGMT BIT(13)
+#define MT_TXD5_DA_SELECT BIT(11)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_FIXED_RATE BIT(31)
+#define MT_TXD6_SGI BIT(30)
+#define MT_TXD6_LDPC BIT(29)
+#define MT_TXD6_TX_BF BIT(28)
+#define MT_TXD6_TX_RATE GENMASK(27, 16)
+#define MT_TXD6_ANT_ID GENMASK(15, 4)
+#define MT_TXD6_DYN_BW BIT(3)
+#define MT_TXD6_FIXED_BW BIT(2)
+#define MT_TXD6_BW GENMASK(1, 0)
+
+#define MT_TXD7_TYPE GENMASK(21, 20)
+#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+
+#define MT_TX_RATE_STBC BIT(11)
+#define MT_TX_RATE_NSS GENMASK(10, 9)
+#define MT_TX_RATE_MODE GENMASK(8, 6)
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+#define MT_TXP_MAX_BUF_NUM 6
+
+struct mt7615_txp {
+ __le16 flags;
+ __le16 token;
+ u8 bss_idx;
+ u8 rept_wds_wcid;
+ u8 rsv;
+ u8 nbuf;
+ __le32 buf[MT_TXP_MAX_BUF_NUM];
+ __le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed;
+
+struct mt7615_tx_free {
+ __le16 rx_byte_cnt;
+ __le16 ctrl;
+ u8 txd_cnt;
+ u8 rsv[3];
+ __le16 token[];
+} __packed;
+
+#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
+
+#define MT_TXS0_PID GENMASK(31, 24)
+#define MT_TXS0_BA_ERROR BIT(22)
+#define MT_TXS0_PS_FLAG BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT BIT(20)
+#define MT_TXS0_BIP_ERROR BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
+#define MT_TXS0_RTS_TIMEOUT BIT(17)
+#define MT_TXS0_ACK_TIMEOUT BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST BIT(15)
+#define MT_TXS0_TX_STATUS_MCU BIT(14)
+#define MT_TXS0_TXS_FORMAT BIT(13)
+#define MT_TXS0_FIXED_RATE BIT(12)
+#define MT_TXS0_TX_RATE GENMASK(11, 0)
+
+#define MT_TXS1_ANT_ID GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE GENMASK(19, 16)
+#define MT_TXS1_BW GENMASK(15, 14)
+#define MT_TXS1_I_TXBF BIT(13)
+#define MT_TXS1_E_TXBF BIT(12)
+#define MT_TXS1_TID GENMASK(11, 9)
+#define MT_TXS1_AMPDU BIT(8)
+#define MT_TXS1_ACKED_MPDU BIT(7)
+#define MT_TXS1_TX_POWER_DBM GENMASK(6, 0)
+
+#define MT_TXS2_WCID GENMASK(31, 24)
+#define MT_TXS2_RXV_SEQNO GENMASK(23, 16)
+#define MT_TXS2_TX_DELAY GENMASK(15, 0)
+
+#define MT_TXS3_LAST_TX_RATE GENMASK(31, 29)
+#define MT_TXS3_TX_COUNT GENMASK(28, 24)
+#define MT_TXS3_F1_TSSI1 GENMASK(23, 12)
+#define MT_TXS3_F1_TSSI0 GENMASK(11, 0)
+#define MT_TXS3_F0_SEQNO GENMASK(11, 0)
+
+#define MT_TXS4_F0_TIMESTAMP GENMASK(31, 0)
+#define MT_TXS4_F1_TSSI3 GENMASK(23, 12)
+#define MT_TXS4_F1_TSSI2 GENMASK(11, 0)
+
+#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0)
+#define MT_TXS5_F1_NOISE_2 GENMASK(23, 16)
+#define MT_TXS5_F1_NOISE_1 GENMASK(15, 8)
+#define MT_TXS5_F1_NOISE_0 GENMASK(7, 0)
+
+#define MT_TXS6_F1_RCPI_3 GENMASK(31, 24)
+#define MT_TXS6_F1_RCPI_2 GENMASK(23, 16)
+#define MT_TXS6_F1_RCPI_1 GENMASK(15, 8)
+#define MT_TXS6_F1_RCPI_0 GENMASK(7, 0)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
new file mode 100644
index 000000000000..80e6b211f60b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7615.h"
+
+static int mt7615_start(struct ieee80211_hw *hw)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT7615_WATCHDOG_TIME);
+
+ return 0;
+}
+
+static void mt7615_stop(struct ieee80211_hw *hw)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+}
+
+static int get_omac_idx(enum nl80211_iftype type, u32 mask)
+{
+ int i;
+
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ /* ap use hw bssid 0 and ext bssid */
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++)
+ if (~mask & BIT(i))
+ return i;
+
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* sta use hw bssid other than 0 */
+ for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++)
+ if (~mask & BIT(i))
+ return i;
+
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ };
+
+ return -1;
+}
+
+static int mt7615_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = hw->priv;
+ struct mt76_txq *mtxq;
+ int idx, ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mvif->idx = ffs(~dev->vif_mask) - 1;
+ if (mvif->idx >= MT7615_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ mvif->omac_idx = get_omac_idx(vif->type, dev->omac_mask);
+ if (mvif->omac_idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* TODO: DBDC support. Use band 0 and wmm 0 for now */
+ mvif->band_idx = 0;
+ mvif->wmm_idx = 0;
+
+ ret = mt7615_mcu_set_dev_info(dev, vif, 1);
+ if (ret)
+ goto out;
+
+ dev->vif_mask |= BIT(mvif->idx);
+ dev->omac_mask |= BIT(mvif->omac_idx);
+ idx = MT7615_WTBL_RESERVED - 1 - mvif->idx;
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ mt76_txq_init(&dev->mt76, vif->txq);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void mt7615_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = hw->priv;
+ int idx = mvif->sta.wcid.idx;
+
+ /* TODO: disable beacon for the bss */
+
+ mt7615_mcu_set_dev_info(dev, vif, 0);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ mt76_txq_remove(&dev->mt76, vif->txq);
+
+ mutex_lock(&dev->mt76.mutex);
+ dev->vif_mask &= ~BIT(mvif->idx);
+ dev->omac_mask &= ~BIT(mvif->omac_idx);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static int mt7615_set_channel(struct mt7615_dev *dev,
+ struct cfg80211_chan_def *def)
+{
+ int ret;
+
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_set_channel(&dev->mt76);
+
+ ret = mt7615_mcu_set_channel(dev);
+ if (ret)
+ return ret;
+
+ clear_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_txq_schedule_all(&dev->mt76);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT7615_WATCHDOG_TIME);
+ return 0;
+}
+
+static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7615_dev *dev = hw->priv;
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ int idx = key->keyidx;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ return mt7615_mcu_set_wtbl_key(dev, wcid->idx, key, cmd);
+}
+
+static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7615_dev *dev = hw->priv;
+ int ret = 0;
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ mutex_lock(&dev->mt76.mutex);
+
+ ieee80211_stop_queues(hw);
+ ret = mt7615_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+
+ mutex_unlock(&dev->mt76.mutex);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+
+ mutex_unlock(&dev->mt76.mutex);
+ }
+ return ret;
+}
+
+static int
+mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7615_dev *dev = hw->priv;
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+
+ /* TODO: hw wmm_set 1~3 */
+ return mt7615_mcu_set_wmm(dev, wmm_queue_map[queue], params);
+}
+
+static void mt7615_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct mt7615_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->mt76.rxfilter &= ~(_hw); \
+ dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+}
+
+static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ /* TODO: sta mode connect/disconnect
+ * BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID
+ */
+
+ /* TODO: update beacon content
+ * BSS_CHANGED_BEACON
+ */
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (info->enable_beacon) {
+ mt7615_mcu_set_bss_info(dev, vif, 1);
+ mt7615_mcu_add_wtbl_bmc(dev, vif);
+ mt7615_mcu_set_sta_rec_bmc(dev, vif, 1);
+ mt7615_mcu_set_bcn(dev, vif, 1);
+ } else {
+ mt7615_mcu_set_sta_rec_bmc(dev, vif, 0);
+ mt7615_mcu_del_wtbl_bmc(dev, vif);
+ mt7615_mcu_set_bss_info(dev, vif, 0);
+ mt7615_mcu_set_bcn(dev, vif, 0);
+ }
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ int idx;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+
+ mt7615_mcu_add_wtbl(dev, vif, sta);
+ mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
+
+ return 0;
+}
+
+void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ if (sta->ht_cap.ht_supported)
+ mt7615_mcu_set_ht_cap(dev, vif, sta);
+}
+
+void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
+ mt7615_mcu_del_wtbl(dev, vif, sta);
+}
+
+static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = hw->priv;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
+ int i;
+
+ spin_lock_bh(&dev->mt76.lock);
+ for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
+ msta->rates[i].idx = sta_rates->rate[i].idx;
+ msta->rates[i].count = sta_rates->rate[i].count;
+ msta->rates[i].flags = sta_rates->rate[i].flags;
+
+ if (msta->rates[i].idx < 0 || !msta->rates[i].count)
+ break;
+ }
+ msta->n_rates = i;
+ mt7615_mcu_set_rates(dev, msta, NULL, msta->rates);
+ msta->rate_probe = false;
+ spin_unlock_bh(&dev->mt76.lock);
+}
+
+static void mt7615_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct mt7615_dev *dev = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+ if (control->sta) {
+ struct mt7615_sta *sta;
+
+ sta = (struct mt7615_sta *)control->sta->drv_priv;
+ wcid = &sta->wcid;
+ }
+
+ if (vif && !control->sta) {
+ struct mt7615_vif *mvif;
+
+ mvif = (struct mt7615_vif *)vif->drv_priv;
+ wcid = &mvif->sta.wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7615_mcu_set_rts_thresh(dev, val);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7615_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn,
+ params->buf_size);
+ mt7615_mcu_set_rx_ba(dev, params, 1);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt7615_mcu_set_rx_ba(dev, params, 0);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ mt7615_mcu_set_tx_ba(dev, params, 1);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ mt7615_mcu_set_tx_ba(dev, params, 0);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ mt7615_mcu_set_tx_ba(dev, params, 0);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt7615_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt7615_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+const struct ieee80211_ops mt7615_ops = {
+ .tx = mt7615_tx,
+ .start = mt7615_start,
+ .stop = mt7615_stop,
+ .add_interface = mt7615_add_interface,
+ .remove_interface = mt7615_remove_interface,
+ .config = mt7615_config,
+ .conf_tx = mt7615_conf_tx,
+ .configure_filter = mt7615_configure_filter,
+ .bss_info_changed = mt7615_bss_info_changed,
+ .sta_state = mt76_sta_state,
+ .set_key = mt7615_set_key,
+ .ampdu_action = mt7615_ampdu_action,
+ .set_rts_threshold = mt7615_set_rts_threshold,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
+ .sw_scan_start = mt7615_sw_scan,
+ .sw_scan_complete = mt7615_sw_scan_complete,
+ .release_buffered_frames = mt76_release_buffered_frames,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
new file mode 100644
index 000000000000..ea67c6022fe6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -0,0 +1,1655 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/firmware.h>
+#include "mt7615.h"
+#include "mcu.h"
+#include "mac.h"
+#include "eeprom.h"
+
+struct mt7615_patch_hdr {
+ char build_date[16];
+ char platform[4];
+ __be32 hw_sw_ver;
+ __be32 patch_ver;
+ __be16 checksum;
+} __packed;
+
+struct mt7615_fw_trailer {
+ __le32 addr;
+ u8 chip_id;
+ u8 feature_set;
+ u8 eco_code;
+ char fw_ver[10];
+ char build_date[15];
+ __le32 len;
+} __packed;
+
+#define MCU_PATCH_ADDRESS 0x80000
+
+#define N9_REGION_NUM 2
+#define CR4_REGION_NUM 1
+
+#define IMG_CRC_LEN 4
+
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int query, int dest, int *wait_seq)
+{
+ struct mt7615_mcu_txd *mcu_txd;
+ u8 seq, q_idx, pkt_fmt;
+ enum mt76_txq_id qid;
+ u32 val;
+ __le32 *txd;
+
+ if (!skb)
+ return -EINVAL;
+
+ seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+
+ mcu_txd = (struct mt7615_mcu_txd *)skb_push(skb,
+ sizeof(struct mt7615_mcu_txd));
+ memset(mcu_txd, 0, sizeof(struct mt7615_mcu_txd));
+
+ if (cmd != -MCU_CMD_FW_SCATTER) {
+ q_idx = MT_TX_MCU_PORT_RX_Q0;
+ pkt_fmt = MT_TX_TYPE_CMD;
+ } else {
+ q_idx = MT_TX_MCU_PORT_RX_FWDL;
+ pkt_fmt = MT_TX_TYPE_FW;
+ }
+
+ txd = mcu_txd->txd;
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, cpu_to_le16(skb->len)) |
+ FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_MCU) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txd[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD) |
+ FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt);
+ txd[1] = cpu_to_le32(val);
+
+ mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, q_idx));
+ mcu_txd->pkt_type = MCU_PKT_ID;
+ mcu_txd->seq = seq;
+
+ if (cmd < 0) {
+ mcu_txd->cid = -cmd;
+ } else {
+ mcu_txd->cid = MCU_CMD_EXT_CID;
+ mcu_txd->ext_cid = cmd;
+ if (query != MCU_Q_NA)
+ mcu_txd->ext_cid_ack = 1;
+ }
+
+ mcu_txd->set_query = query;
+ mcu_txd->s2d_index = dest;
+
+ if (wait_seq)
+ *wait_seq = seq;
+
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state))
+ qid = MT_TXQ_MCU;
+ else
+ qid = MT_TXQ_FWDL;
+
+ return mt76_tx_queue_skb_raw(dev, qid, skb, 0);
+}
+
+static int mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int query, int dest,
+ struct sk_buff **skb_ret)
+{
+ unsigned long expires = jiffies + 10 * HZ;
+ struct mt7615_mcu_rxd *rxd;
+ int ret, seq;
+
+ mutex_lock(&dev->mt76.mmio.mcu.mutex);
+
+ ret = __mt7615_mcu_msg_send(dev, skb, cmd, query, dest, &seq);
+ if (ret)
+ goto out;
+
+ while (1) {
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
+ if (!skb) {
+ dev_err(dev->mt76.dev, "Message %d (seq %d) timeout\n",
+ cmd, seq);
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ rxd = (struct mt7615_mcu_rxd *)skb->data;
+ if (seq != rxd->seq)
+ continue;
+
+ if (skb_ret) {
+ int hdr_len = sizeof(*rxd);
+
+ if (!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mt76.state))
+ hdr_len -= 4;
+ skb_pull(skb, hdr_len);
+ *skb_ret = skb;
+ } else {
+ dev_kfree_skb(skb);
+ }
+
+ break;
+ }
+
+out:
+ mutex_unlock(&dev->mt76.mmio.mcu.mutex);
+
+ return ret;
+}
+
+static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr,
+ u32 len, u32 mode)
+{
+ struct {
+ __le32 addr;
+ __le32 len;
+ __le32 mode;
+ } req = {
+ .addr = cpu_to_le32(addr),
+ .len = cpu_to_le32(len),
+ .mode = cpu_to_le32(mode),
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+}
+
+static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
+ int len)
+{
+ struct sk_buff *skb;
+ int ret = 0;
+
+ while (len > 0) {
+ int cur_len = min_t(int, 4096 - sizeof(struct mt7615_mcu_txd),
+ len);
+
+ skb = mt7615_mcu_msg_alloc(data, cur_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = __mt7615_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+ if (ret)
+ break;
+
+ data += cur_len;
+ len -= cur_len;
+ }
+
+ return ret;
+}
+
+static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr,
+ u32 option)
+{
+ struct {
+ __le32 option;
+ __le32 addr;
+ } req = {
+ .option = cpu_to_le32(option),
+ .addr = cpu_to_le32(addr),
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+}
+
+static int mt7615_mcu_restart(struct mt7615_dev *dev)
+{
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(NULL, 0);
+
+ return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+}
+
+static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get)
+{
+ struct {
+ __le32 operation;
+ } req = {
+ .operation = cpu_to_le32(get ? PATCH_SEM_GET :
+ PATCH_SEM_RELEASE),
+ };
+ struct event {
+ u8 status;
+ u8 reserved[3];
+ } *resp;
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ struct sk_buff *skb_ret;
+ int ret;
+
+ ret = mt7615_mcu_msg_send(dev, skb, -MCU_CMD_PATCH_SEM_CONTROL,
+ MCU_Q_NA, MCU_S2D_H2N, &skb_ret);
+ if (ret)
+ goto out;
+
+ resp = (struct event *)(skb_ret->data);
+ ret = resp->status;
+ dev_kfree_skb(skb_ret);
+
+out:
+ return ret;
+}
+
+static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
+{
+ struct {
+ u8 check_crc;
+ u8 reserved[3];
+ } req = {
+ .check_crc = 0,
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_PATCH_FINISH_REQ,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+}
+
+static int mt7615_driver_own(struct mt7615_dev *dev)
+{
+ mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
+ MT_CFG_LPCR_HOST_FW_OWN, 0, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for driver own\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mt7615_load_patch(struct mt7615_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt7615_patch_hdr *hdr;
+ const char *firmware = MT7615_ROM_PATCH;
+ int len, ret, sem;
+
+ sem = mt7615_mcu_patch_sem_ctrl(dev, 1);
+ switch (sem) {
+ case PATCH_IS_DL:
+ return 0;
+ case PATCH_NOT_DL_SEM_SUCCESS:
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
+ return -EAGAIN;
+ }
+
+ ret = request_firmware(&fw, firmware, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7615_patch_hdr *)(fw->data);
+
+ dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+ len = fw->size - sizeof(*hdr);
+
+ ret = mt7615_mcu_init_download(dev, MCU_PATCH_ADDRESS, len,
+ DL_MODE_NEED_RSP);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + sizeof(*hdr), len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_start_patch(dev);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start patch\n");
+
+out:
+ release_firmware(fw);
+
+ sem = mt7615_mcu_patch_sem_ctrl(dev, 0);
+ switch (sem) {
+ case PATCH_REL_SEM_SUCCESS:
+ break;
+ default:
+ ret = -EAGAIN;
+ dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
+ break;
+ }
+
+ return ret;
+}
+
+static u32 gen_dl_mode(u8 feature_set, bool is_cr4)
+{
+ u32 ret = 0;
+
+ ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
+ (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
+static int mt7615_load_ram(struct mt7615_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt7615_fw_trailer *hdr;
+ const char *n9_firmware = MT7615_FIRMWARE_N9;
+ const char *cr4_firmware = MT7615_FIRMWARE_CR4;
+ u32 n9_ilm_addr, offset;
+ int i, ret;
+
+ ret = request_firmware(&fw, n9_firmware, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < N9_REGION_NUM * sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
+ N9_REGION_NUM * sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ n9_ilm_addr = le32_to_cpu(hdr->addr);
+
+ for (offset = 0, i = 0; i < N9_REGION_NUM; i++) {
+ u32 len, addr, mode;
+
+ len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
+ addr = le32_to_cpu(hdr[i].addr);
+ mode = gen_dl_mode(hdr[i].feature_set, false);
+
+ ret = mt7615_mcu_init_download(dev, addr, len, mode);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ goto out;
+ }
+
+ offset += len;
+ }
+
+ ret = mt7615_mcu_start_firmware(dev, n9_ilm_addr, FW_START_OVERRIDE);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
+ goto out;
+ }
+
+ release_firmware(fw);
+
+ ret = request_firmware(&fw, cr4_firmware, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < CR4_REGION_NUM * sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
+ CR4_REGION_NUM * sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "CR4 Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ for (offset = 0, i = 0; i < CR4_REGION_NUM; i++) {
+ u32 len, addr, mode;
+
+ len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
+ addr = le32_to_cpu(hdr[i].addr);
+ mode = gen_dl_mode(hdr[i].feature_set, true);
+
+ ret = mt7615_mcu_init_download(dev, addr, len, mode);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ goto out;
+ }
+
+ offset += len;
+ }
+
+ ret = mt7615_mcu_start_firmware(dev, 0, FW_START_WORKING_PDA_CR4);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n");
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7615_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+ u32 val;
+
+ val = mt76_get_field(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE);
+
+ if (val != FW_STATE_FW_DOWNLOAD) {
+ dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
+ return -EIO;
+ }
+
+ ret = mt7615_load_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7615_load_ram(dev);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE,
+ FIELD_PREP(MT_TOP_MISC2_FW_STATE,
+ FW_STATE_CR4_RDY), 500)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+
+ return 0;
+}
+
+int mt7615_mcu_init(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt7615_driver_own(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7615_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+ return 0;
+}
+
+void mt7615_mcu_exit(struct mt7615_dev *dev)
+{
+ mt7615_mcu_restart(dev);
+ mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN);
+ skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+}
+
+int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
+{
+ struct req_data {
+ u8 val;
+ } __packed;
+ struct {
+ u8 buffer_mode;
+ u8 pad;
+ u16 len;
+ } __packed req_hdr = {
+ .buffer_mode = 1,
+ .len = __MT_EE_MAX - MT_EE_NIC_CONF_0,
+ };
+ struct sk_buff *skb;
+ struct req_data *data;
+ const int size = (__MT_EE_MAX - MT_EE_NIC_CONF_0) *
+ sizeof(struct req_data);
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ u16 off;
+
+ skb = mt7615_mcu_msg_alloc(NULL, size + sizeof(req_hdr));
+ memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+ data = (struct req_data *)skb_put(skb, size);
+ memset(data, 0, size);
+
+ for (off = MT_EE_NIC_CONF_0; off < __MT_EE_MAX; off++)
+ data[off - MT_EE_NIC_CONF_0].val = eep[off];
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_init_mac(struct mt7615_dev *dev)
+{
+ struct {
+ u8 enable;
+ u8 band;
+ u8 rsv[2];
+ } __packed req = {
+ .enable = 1,
+ .band = 0,
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_MAC_INIT_CTRL,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
+{
+ struct {
+ u8 prot_idx;
+ u8 band;
+ u8 rsv[2];
+ __le32 len_thresh;
+ __le32 pkt_thresh;
+ } __packed req = {
+ .prot_idx = 1,
+ .band = 0,
+ .len_thresh = cpu_to_le32(val),
+ .pkt_thresh = cpu_to_le32(0x2),
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_PROTECT_CTRL,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+#define WMM_AIFS_SET BIT(0)
+#define WMM_CW_MIN_SET BIT(1)
+#define WMM_CW_MAX_SET BIT(2)
+#define WMM_TXOP_SET BIT(3)
+ struct req_data {
+ u8 number;
+ u8 rsv[3];
+ u8 queue;
+ u8 valid;
+ u8 aifs;
+ u8 cw_min;
+ __le16 cw_max;
+ __le16 txop;
+ } __packed req = {
+ .number = 1,
+ .queue = queue,
+ .valid = WMM_AIFS_SET | WMM_TXOP_SET,
+ .aifs = params->aifs,
+ .txop = cpu_to_le16(params->txop),
+ };
+ struct sk_buff *skb;
+
+ if (params->cw_min) {
+ req.valid |= WMM_CW_MIN_SET;
+ req.cw_min = params->cw_min;
+ }
+ if (params->cw_max) {
+ req.valid |= WMM_CW_MAX_SET;
+ req.cw_max = cpu_to_le16(params->cw_max);
+ }
+
+ skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_EDCA_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter)
+{
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 rsv[3];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = 0,
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_PM_STATE_CTRL,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+static int __mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
+ struct dev_info *dev_info)
+{
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv[3];
+ } __packed req_hdr = {0};
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 band_idx;
+ u8 omac_addr[ETH_ALEN];
+ } __packed;
+ struct sk_buff *skb;
+ u16 tlv_num = 0;
+
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) +
+ sizeof(struct req_tlv));
+ skb_reserve(skb, sizeof(req_hdr));
+
+ if (dev_info->feature & BIT(DEV_INFO_ACTIVE)) {
+ struct req_tlv req_tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(req_tlv)),
+ .active = dev_info->enable,
+ .band_idx = dev_info->band_idx,
+ };
+ memcpy(req_tlv.omac_addr, dev_info->omac_addr, ETH_ALEN);
+ memcpy(skb_put(skb, sizeof(req_tlv)), &req_tlv,
+ sizeof(req_tlv));
+ tlv_num++;
+ }
+
+ req_hdr.omac_idx = dev_info->omac_idx;
+ req_hdr.band_idx = dev_info->band_idx;
+ req_hdr.tlv_num = cpu_to_le16(tlv_num);
+ req_hdr.is_tlv_append = tlv_num ? 1 : 0;
+
+ memcpy(skb_push(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct dev_info dev_info = {0};
+
+ dev_info.omac_idx = mvif->omac_idx;
+ memcpy(dev_info.omac_addr, vif->addr, ETH_ALEN);
+ dev_info.band_idx = mvif->band_idx;
+ dev_info.enable = en;
+ dev_info.feature = BIT(DEV_INFO_ACTIVE);
+
+ return __mt7615_mcu_set_dev_info(dev, &dev_info);
+}
+
+static void bss_info_omac_handler (struct mt7615_dev *dev,
+ struct bss_info *bss_info,
+ struct sk_buff *skb)
+{
+ struct bss_info_omac tlv = {0};
+
+ tlv.tag = cpu_to_le16(BSS_INFO_OMAC);
+ tlv.len = cpu_to_le16(sizeof(tlv));
+ tlv.hw_bss_idx = (bss_info->omac_idx > EXT_BSSID_START) ?
+ HW_BSSID_0 : bss_info->omac_idx;
+ tlv.omac_idx = bss_info->omac_idx;
+ tlv.band_idx = bss_info->band_idx;
+ tlv.conn_type = cpu_to_le32(bss_info->conn_type);
+
+ memcpy(skb_put(skb, sizeof(tlv)), &tlv, sizeof(tlv));
+}
+
+static void bss_info_basic_handler (struct mt7615_dev *dev,
+ struct bss_info *bss_info,
+ struct sk_buff *skb)
+{
+ struct bss_info_basic tlv = {0};
+
+ tlv.tag = cpu_to_le16(BSS_INFO_BASIC);
+ tlv.len = cpu_to_le16(sizeof(tlv));
+ tlv.network_type = cpu_to_le32(bss_info->network_type);
+ tlv.active = bss_info->enable;
+ tlv.bcn_interval = cpu_to_le16(bss_info->bcn_interval);
+ memcpy(tlv.bssid, bss_info->bssid, ETH_ALEN);
+ tlv.wmm_idx = bss_info->wmm_idx;
+ tlv.dtim_period = bss_info->dtim_period;
+ tlv.bmc_tx_wlan_idx = bss_info->bmc_tx_wlan_idx;
+
+ memcpy(skb_put(skb, sizeof(tlv)), &tlv, sizeof(tlv));
+}
+
+static void bss_info_ext_bss_handler (struct mt7615_dev *dev,
+ struct bss_info *bss_info,
+ struct sk_buff *skb)
+{
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+ struct bss_info_ext_bss tlv = {0};
+ int ext_bss_idx;
+
+ ext_bss_idx = bss_info->omac_idx - EXT_BSSID_START;
+
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv.tag = cpu_to_le16(BSS_INFO_EXT_BSS);
+ tlv.len = cpu_to_le16(sizeof(tlv));
+ tlv.mbss_tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+
+ memcpy(skb_put(skb, sizeof(tlv)), &tlv, sizeof(tlv));
+}
+
+static struct bss_info_tag_handler bss_info_tag_handler[] = {
+ {BSS_INFO_OMAC, sizeof(struct bss_info_omac), bss_info_omac_handler},
+ {BSS_INFO_BASIC, sizeof(struct bss_info_basic), bss_info_basic_handler},
+ {BSS_INFO_RF_CH, sizeof(struct bss_info_rf_ch), NULL},
+ {BSS_INFO_PM, 0, NULL},
+ {BSS_INFO_UAPSD, 0, NULL},
+ {BSS_INFO_ROAM_DETECTION, 0, NULL},
+ {BSS_INFO_LQ_RM, 0, NULL},
+ {BSS_INFO_EXT_BSS, sizeof(struct bss_info_ext_bss), bss_info_ext_bss_handler},
+ {BSS_INFO_BMC_INFO, 0, NULL},
+ {BSS_INFO_SYNC_MODE, 0, NULL},
+ {BSS_INFO_RA, 0, NULL},
+ {BSS_INFO_MAX_NUM, 0, NULL},
+};
+
+static int __mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
+ struct bss_info *bss_info)
+{
+ struct req_hdr {
+ u8 bss_idx;
+ u8 rsv0;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv1[3];
+ } __packed req_hdr = {0};
+ struct sk_buff *skb;
+ u16 tlv_num = 0;
+ u32 size = 0;
+ int i;
+
+ for (i = 0; i < BSS_INFO_MAX_NUM; i++)
+ if ((BIT(bss_info_tag_handler[i].tag) & bss_info->feature) &&
+ bss_info_tag_handler[i].handler) {
+ tlv_num++;
+ size += bss_info_tag_handler[i].len;
+ }
+
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + size);
+
+ req_hdr.bss_idx = bss_info->bss_idx;
+ req_hdr.tlv_num = cpu_to_le16(tlv_num);
+ req_hdr.is_tlv_append = tlv_num ? 1 : 0;
+
+ memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+
+ for (i = 0; i < BSS_INFO_MAX_NUM; i++)
+ if ((BIT(bss_info_tag_handler[i].tag) & bss_info->feature) &&
+ bss_info_tag_handler[i].handler)
+ bss_info_tag_handler[i].handler(dev, bss_info, skb);
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_BSS_INFO_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+static void bss_info_convert_vif_type(enum nl80211_iftype type,
+ u32 *network_type, u32 *conn_type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ if (network_type)
+ *network_type = NETWORK_INFRA;
+ if (conn_type)
+ *conn_type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (network_type)
+ *network_type = NETWORK_INFRA;
+ if (conn_type)
+ *conn_type = CONNECTION_INFRA_STA;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ };
+}
+
+int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct bss_info bss_info = {0};
+ u8 bmc_tx_wlan_idx = 0;
+ u32 network_type = 0, conn_type = 0;
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ bmc_tx_wlan_idx = mvif->sta.wcid.idx;
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ /* find the unicast entry for sta mode bmc tx */
+ struct ieee80211_sta *ap_sta;
+ struct mt7615_sta *msta;
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!ap_sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ msta = (struct mt7615_sta *)ap_sta->drv_priv;
+ bmc_tx_wlan_idx = msta->wcid.idx;
+
+ rcu_read_unlock();
+ } else {
+ WARN_ON(1);
+ }
+
+ bss_info_convert_vif_type(vif->type, &network_type, &conn_type);
+
+ bss_info.bss_idx = mvif->idx;
+ memcpy(bss_info.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss_info.omac_idx = mvif->omac_idx;
+ bss_info.band_idx = mvif->band_idx;
+ bss_info.bmc_tx_wlan_idx = bmc_tx_wlan_idx;
+ bss_info.wmm_idx = mvif->wmm_idx;
+ bss_info.network_type = network_type;
+ bss_info.conn_type = conn_type;
+ bss_info.bcn_interval = vif->bss_conf.beacon_int;
+ bss_info.dtim_period = vif->bss_conf.dtim_period;
+ bss_info.enable = en;
+ bss_info.feature = BIT(BSS_INFO_BASIC);
+ if (en) {
+ bss_info.feature |= BIT(BSS_INFO_OMAC);
+ if (mvif->omac_idx > EXT_BSSID_START)
+ bss_info.feature |= BIT(BSS_INFO_EXT_BSS);
+ }
+
+ return __mt7615_mcu_set_bss_info(dev, &bss_info);
+}
+
+static int __mt7615_mcu_set_wtbl(struct mt7615_dev *dev, int wlan_idx,
+ int operation, void *buf, int buf_len)
+{
+ struct req_hdr {
+ u8 wlan_idx;
+ u8 operation;
+ __le16 tlv_num;
+ u8 rsv[4];
+ } __packed req_hdr = {0};
+ struct tlv {
+ __le16 tag;
+ __le16 len;
+ u8 buf[0];
+ } __packed;
+ struct sk_buff *skb;
+ u16 tlv_num = 0;
+ int offset = 0;
+
+ while (offset < buf_len) {
+ struct tlv *tlv = (struct tlv *)((u8 *)buf + offset);
+
+ tlv_num++;
+ offset += tlv->len;
+ }
+
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + buf_len);
+
+ req_hdr.wlan_idx = wlan_idx;
+ req_hdr.operation = operation;
+ req_hdr.tlv_num = cpu_to_le16(tlv_num);
+
+ memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+
+ if (buf && buf_len)
+ memcpy(skb_put(skb, buf_len), buf, buf_len);
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_WTBL_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+static enum mt7615_cipher_type
+mt7615_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ if (!key || key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(key_data + 16, key->key + 24, 8);
+ memcpy(key_data + 24, key->key + 16, 8);
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MT_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MT_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MT_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MT_CIPHER_WAPI;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct wtbl_sec_key wtbl_sec_key = {0};
+ int buf_len = sizeof(struct wtbl_sec_key);
+ u8 cipher;
+
+ wtbl_sec_key.tag = cpu_to_le16(WTBL_SEC_KEY);
+ wtbl_sec_key.len = cpu_to_le16(buf_len);
+ wtbl_sec_key.add = cmd;
+
+ if (cmd == SET_KEY) {
+ cipher = mt7615_get_key_info(key, wtbl_sec_key.key_material);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ wtbl_sec_key.cipher_id = cipher;
+ wtbl_sec_key.key_id = key->keyidx;
+ wtbl_sec_key.key_len = key->keylen;
+ } else {
+ wtbl_sec_key.key_len = sizeof(wtbl_sec_key.key_material);
+ }
+
+ return __mt7615_mcu_set_wtbl(dev, wcid, WTBL_SET, &wtbl_sec_key,
+ buf_len);
+}
+
+int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_generic *wtbl_generic;
+ struct wtbl_rx *wtbl_rx;
+ int buf_len, ret;
+ u8 *buf;
+
+ buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wtbl_generic = (struct wtbl_generic *)buf;
+ buf_len = sizeof(*wtbl_generic);
+ wtbl_generic->tag = cpu_to_le16(WTBL_GENERIC);
+ wtbl_generic->len = cpu_to_le16(buf_len);
+ eth_broadcast_addr(wtbl_generic->peer_addr);
+ wtbl_generic->muar_idx = 0xe;
+
+ wtbl_rx = (struct wtbl_rx *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_rx);
+ wtbl_rx->tag = cpu_to_le16(WTBL_RX);
+ wtbl_rx->len = cpu_to_le16(sizeof(*wtbl_rx));
+ wtbl_rx->rca1 = 1;
+ wtbl_rx->rca2 = 1;
+ wtbl_rx->rv = 1;
+
+ ret = __mt7615_mcu_set_wtbl(dev, mvif->sta.wcid.idx,
+ WTBL_RESET_AND_SET, buf, buf_len);
+
+ kfree(buf);
+ return ret;
+}
+
+int mt7615_mcu_del_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ return __mt7615_mcu_set_wtbl(dev, mvif->sta.wcid.idx,
+ WTBL_RESET_AND_SET, NULL, 0);
+}
+
+int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct wtbl_generic *wtbl_generic;
+ struct wtbl_rx *wtbl_rx;
+ int buf_len, ret;
+ u8 *buf;
+
+ buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wtbl_generic = (struct wtbl_generic *)buf;
+ buf_len = sizeof(*wtbl_generic);
+ wtbl_generic->tag = cpu_to_le16(WTBL_GENERIC);
+ wtbl_generic->len = cpu_to_le16(buf_len);
+ memcpy(wtbl_generic->peer_addr, sta->addr, ETH_ALEN);
+ wtbl_generic->muar_idx = mvif->omac_idx;
+ wtbl_generic->qos = sta->wme;
+ wtbl_generic->partial_aid = cpu_to_le16(sta->aid);
+
+ wtbl_rx = (struct wtbl_rx *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_rx);
+ wtbl_rx->tag = cpu_to_le16(WTBL_RX);
+ wtbl_rx->len = cpu_to_le16(sizeof(*wtbl_rx));
+ wtbl_rx->rca1 = (vif->type == NL80211_IFTYPE_AP) ? 0 : 1;
+ wtbl_rx->rca2 = 1;
+ wtbl_rx->rv = 1;
+
+ ret = __mt7615_mcu_set_wtbl(dev, msta->wcid.idx,
+ WTBL_RESET_AND_SET, buf, buf_len);
+
+ kfree(buf);
+ return ret;
+}
+
+int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ return __mt7615_mcu_set_wtbl(dev, msta->wcid.idx,
+ WTBL_RESET_AND_SET, NULL, 0);
+}
+
+int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
+{
+ return __mt7615_mcu_set_wtbl(dev, 0, WTBL_RESET_ALL, NULL, 0);
+}
+
+static int __mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, int bss_idx,
+ int wlan_idx, int muar_idx, void *buf,
+ int buf_len)
+{
+ struct req_hdr {
+ u8 bss_idx;
+ u8 wlan_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 muar_idx;
+ u8 rsv[2];
+ } __packed req_hdr = {0};
+ struct tlv {
+ __le16 tag;
+ __le16 len;
+ u8 buf[0];
+ } __packed;
+ struct sk_buff *skb;
+ u16 tlv_num = 0;
+ int offset = 0;
+
+ while (offset < buf_len) {
+ struct tlv *tlv = (struct tlv *)((u8 *)buf + offset);
+
+ tlv_num++;
+ offset += tlv->len;
+ }
+
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + buf_len);
+
+ req_hdr.bss_idx = bss_idx;
+ req_hdr.wlan_idx = wlan_idx;
+ req_hdr.tlv_num = cpu_to_le16(tlv_num);
+ req_hdr.is_tlv_append = tlv_num ? 1 : 0;
+ req_hdr.muar_idx = muar_idx;
+
+ memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+
+ if (buf && buf_len)
+ memcpy(skb_put(skb, buf_len), buf, buf_len);
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_STA_REC_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool en)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct sta_rec_basic sta_rec_basic = {0};
+ int buf_len = sizeof(struct sta_rec_basic);
+
+ sta_rec_basic.tag = cpu_to_le16(STA_REC_BASIC);
+ sta_rec_basic.len = cpu_to_le16(buf_len);
+ sta_rec_basic.conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
+ eth_broadcast_addr(sta_rec_basic.peer_addr);
+ if (en) {
+ sta_rec_basic.conn_state = CONN_STATE_PORT_SECURE;
+ sta_rec_basic.extra_info =
+ cpu_to_le16(EXTRA_INFO_VER | EXTRA_INFO_NEW);
+ } else {
+ sta_rec_basic.conn_state = CONN_STATE_DISCONNECT;
+ sta_rec_basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
+ }
+
+ return __mt7615_mcu_set_sta_rec(dev, mvif->idx, mvif->sta.wcid.idx,
+ mvif->omac_idx, &sta_rec_basic,
+ buf_len);
+}
+
+static void sta_rec_convert_vif_type(enum nl80211_iftype type, u32 *conn_type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ if (conn_type)
+ *conn_type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (conn_type)
+ *conn_type = CONNECTION_INFRA_AP;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ };
+}
+
+int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool en)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct sta_rec_basic sta_rec_basic = {0};
+ int buf_len = sizeof(struct sta_rec_basic);
+ u32 conn_type = 0;
+
+ sta_rec_convert_vif_type(vif->type, &conn_type);
+
+ sta_rec_basic.tag = cpu_to_le16(STA_REC_BASIC);
+ sta_rec_basic.len = cpu_to_le16(buf_len);
+ sta_rec_basic.conn_type = cpu_to_le32(conn_type);
+ sta_rec_basic.qos = sta->wme;
+ sta_rec_basic.aid = cpu_to_le16(sta->aid);
+ memcpy(sta_rec_basic.peer_addr, sta->addr, ETH_ALEN);
+
+ if (en) {
+ sta_rec_basic.conn_state = CONN_STATE_PORT_SECURE;
+ sta_rec_basic.extra_info =
+ cpu_to_le16(EXTRA_INFO_VER | EXTRA_INFO_NEW);
+ } else {
+ sta_rec_basic.conn_state = CONN_STATE_DISCONNECT;
+ sta_rec_basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
+ }
+
+ return __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
+ mvif->omac_idx, &sta_rec_basic,
+ buf_len);
+}
+
+int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en)
+{
+ struct req {
+ u8 omac_idx;
+ u8 enable;
+ u8 wlan_idx;
+ u8 band_idx;
+ u8 pkt_type;
+ u8 need_pre_tbtt_int;
+ __le16 csa_ie_pos;
+ __le16 pkt_len;
+ __le16 tim_ie_pos;
+ u8 pkt[512];
+ u8 csa_cnt;
+ /* bss color change */
+ u8 bcc_cnt;
+ __le16 bcc_ie_pos;
+ } __packed req = {0};
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct sk_buff *skb;
+ u16 tim_off, tim_len;
+
+ skb = ieee80211_beacon_get_tim(mt76_hw(dev), vif, &tim_off, &tim_len);
+
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
+ 0, NULL);
+ memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ dev_kfree_skb(skb);
+
+ req.omac_idx = mvif->omac_idx;
+ req.enable = en;
+ req.wlan_idx = wcid->idx;
+ req.band_idx = mvif->band_idx;
+ /* pky_type: 0 for bcn, 1 for tim */
+ req.pkt_type = 0;
+ req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + tim_off);
+
+ skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_BCN_OFFLOAD,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_channel(struct mt7615_dev *dev)
+{
+ struct cfg80211_chan_def *chdef = &dev->mt76.chandef;
+ struct {
+ u8 control_chan;
+ u8 center_chan;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams_mask;
+ u8 switch_reason;
+ u8 band_idx;
+ /* for 80+80 only */
+ u8 center_chan2;
+ __le16 cac_case;
+ u8 channel_band;
+ u8 rsv0;
+ __le32 outband_freq;
+ u8 txpower_drop;
+ u8 rsv1[3];
+ u8 txpower_sku[53];
+ u8 rsv2[3];
+ } req = {0};
+ struct sk_buff *skb;
+ int ret;
+
+ req.control_chan = chdef->chan->hw_value;
+ req.center_chan = ieee80211_frequency_to_channel(chdef->center_freq1);
+ req.tx_streams = (dev->mt76.chainmask >> 8) & 0xf;
+ req.rx_streams_mask = dev->mt76.antenna_mask;
+ req.switch_reason = CH_SWITCH_NORMAL;
+ req.band_idx = 0;
+ req.center_chan2 = ieee80211_frequency_to_channel(chdef->center_freq2);
+ req.txpower_drop = 0;
+
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ req.bw = CMD_CBW_40MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ req.bw = CMD_CBW_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ req.bw = CMD_CBW_8080MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ req.bw = CMD_CBW_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ req.bw = CMD_CBW_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ req.bw = CMD_CBW_10MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ default:
+ req.bw = CMD_CBW_20MHZ;
+ }
+
+ memset(req.txpower_sku, 0x3f, 49);
+
+ skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ ret = mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+ if (ret)
+ return ret;
+
+ skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_RX_PATH,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_ht *wtbl_ht;
+ struct wtbl_raw *wtbl_raw;
+ struct sta_rec_ht *sta_rec_ht;
+ int buf_len, ret;
+ u32 msk, val = 0;
+ u8 *buf;
+
+ buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* ht basic */
+ buf_len = sizeof(*wtbl_ht);
+ wtbl_ht = (struct wtbl_ht *)buf;
+ wtbl_ht->tag = cpu_to_le16(WTBL_HT);
+ wtbl_ht->len = cpu_to_le16(sizeof(*wtbl_ht));
+ wtbl_ht->ht = 1;
+ wtbl_ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ wtbl_ht->af = sta->ht_cap.ampdu_factor;
+ wtbl_ht->mm = sta->ht_cap.ampdu_density;
+
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ val |= MT_WTBL_W5_SHORT_GI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ val |= MT_WTBL_W5_SHORT_GI_40;
+
+ /* vht basic */
+ if (sta->vht_cap.vht_supported) {
+ struct wtbl_vht *wtbl_vht;
+
+ wtbl_vht = (struct wtbl_vht *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_vht);
+ wtbl_vht->tag = cpu_to_le16(WTBL_VHT);
+ wtbl_vht->len = cpu_to_le16(sizeof(*wtbl_vht));
+ wtbl_vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC;
+ wtbl_vht->vht = 1;
+
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ val |= MT_WTBL_W5_SHORT_GI_80;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ val |= MT_WTBL_W5_SHORT_GI_160;
+ }
+
+ /* smps */
+ if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
+ struct wtbl_smps *wtbl_smps;
+
+ wtbl_smps = (struct wtbl_smps *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_smps);
+ wtbl_smps->tag = cpu_to_le16(WTBL_SMPS);
+ wtbl_smps->len = cpu_to_le16(sizeof(*wtbl_smps));
+ wtbl_smps->smps = 1;
+ }
+
+ /* sgi */
+ msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
+ MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
+
+ wtbl_raw = (struct wtbl_raw *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_raw);
+ wtbl_raw->tag = cpu_to_le16(WTBL_RAW_DATA);
+ wtbl_raw->len = cpu_to_le16(sizeof(*wtbl_raw));
+ wtbl_raw->wtbl_idx = 1;
+ wtbl_raw->dw = 5;
+ wtbl_raw->msk = cpu_to_le32(~msk);
+ wtbl_raw->val = cpu_to_le32(val);
+
+ ret = __mt7615_mcu_set_wtbl(dev, msta->wcid.idx, WTBL_SET, buf,
+ buf_len);
+ if (ret) {
+ kfree(buf);
+ return ret;
+ }
+
+ memset(buf, 0, MT7615_WTBL_UPDATE_MAX_SIZE);
+
+ buf_len = sizeof(*sta_rec_ht);
+ sta_rec_ht = (struct sta_rec_ht *)buf;
+ sta_rec_ht->tag = cpu_to_le16(STA_REC_HT);
+ sta_rec_ht->len = cpu_to_le16(sizeof(*sta_rec_ht));
+ sta_rec_ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+
+ if (sta->vht_cap.vht_supported) {
+ struct sta_rec_vht *sta_rec_vht;
+
+ sta_rec_vht = (struct sta_rec_vht *)(buf + buf_len);
+ buf_len += sizeof(*sta_rec_vht);
+ sta_rec_vht->tag = cpu_to_le16(STA_REC_VHT);
+ sta_rec_vht->len = cpu_to_le16(sizeof(*sta_rec_vht));
+ sta_rec_vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ sta_rec_vht->vht_rx_mcs_map =
+ cpu_to_le16(sta->vht_cap.vht_mcs.rx_mcs_map);
+ sta_rec_vht->vht_tx_mcs_map =
+ cpu_to_le16(sta->vht_cap.vht_mcs.tx_mcs_map);
+ }
+
+ ret = __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
+ mvif->omac_idx, buf, buf_len);
+ kfree(buf);
+ return ret;
+}
+
+int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add)
+{
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ u8 ba_range[8] = {4, 8, 12, 24, 36, 48, 54, 64};
+ u16 tid = params->tid;
+ u16 ba_size = params->buf_size;
+ u16 ssn = params->ssn;
+ struct wtbl_ba wtbl_ba = {0};
+ struct sta_rec_ba sta_rec_ba = {0};
+ int ret, buf_len;
+
+ buf_len = sizeof(struct wtbl_ba);
+
+ wtbl_ba.tag = cpu_to_le16(WTBL_BA);
+ wtbl_ba.len = cpu_to_le16(buf_len);
+ wtbl_ba.tid = tid;
+ wtbl_ba.ba_type = MT_BA_TYPE_ORIGINATOR;
+
+ if (add) {
+ u8 idx;
+
+ for (idx = 7; idx > 0; idx--) {
+ if (ba_size >= ba_range[idx])
+ break;
+ }
+
+ wtbl_ba.sn = cpu_to_le16(ssn);
+ wtbl_ba.ba_en = 1;
+ wtbl_ba.ba_winsize_idx = idx;
+ }
+
+ ret = __mt7615_mcu_set_wtbl(dev, msta->wcid.idx, WTBL_SET, &wtbl_ba,
+ buf_len);
+ if (ret)
+ return ret;
+
+ buf_len = sizeof(struct sta_rec_ba);
+
+ sta_rec_ba.tag = cpu_to_le16(STA_REC_BA);
+ sta_rec_ba.len = cpu_to_le16(buf_len);
+ sta_rec_ba.tid = tid;
+ sta_rec_ba.ba_type = MT_BA_TYPE_ORIGINATOR;
+ sta_rec_ba.amsdu = params->amsdu;
+ sta_rec_ba.ba_en = add << tid;
+ sta_rec_ba.ssn = cpu_to_le16(ssn);
+ sta_rec_ba.winsize = cpu_to_le16(ba_size);
+
+ return __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
+ mvif->omac_idx, &sta_rec_ba, buf_len);
+}
+
+int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add)
+{
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ u16 tid = params->tid;
+ struct wtbl_ba wtbl_ba = {0};
+ struct sta_rec_ba sta_rec_ba = {0};
+ int ret, buf_len;
+
+ buf_len = sizeof(struct sta_rec_ba);
+
+ sta_rec_ba.tag = cpu_to_le16(STA_REC_BA);
+ sta_rec_ba.len = cpu_to_le16(buf_len);
+ sta_rec_ba.tid = tid;
+ sta_rec_ba.ba_type = MT_BA_TYPE_RECIPIENT;
+ sta_rec_ba.amsdu = params->amsdu;
+ sta_rec_ba.ba_en = add << tid;
+ sta_rec_ba.ssn = cpu_to_le16(params->ssn);
+ sta_rec_ba.winsize = cpu_to_le16(params->buf_size);
+
+ ret = __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
+ mvif->omac_idx, &sta_rec_ba, buf_len);
+ if (ret || !add)
+ return ret;
+
+ buf_len = sizeof(struct wtbl_ba);
+
+ wtbl_ba.tag = cpu_to_le16(WTBL_BA);
+ wtbl_ba.len = cpu_to_le16(buf_len);
+ wtbl_ba.tid = tid;
+ wtbl_ba.ba_type = MT_BA_TYPE_RECIPIENT;
+ memcpy(wtbl_ba.peer_addr, sta->addr, ETH_ALEN);
+ wtbl_ba.rst_ba_tid = tid;
+ wtbl_ba.rst_ba_sel = RST_BA_MAC_TID_MATCH;
+ wtbl_ba.rst_ba_sb = 1;
+
+ return __mt7615_mcu_set_wtbl(dev, msta->wcid.idx, WTBL_SET,
+ &wtbl_ba, buf_len);
+}
+
+void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates)
+{
+ int wcid = sta->wcid.idx;
+ u32 addr = MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
+ bool stbc = false;
+ int n_rates = sta->n_rates;
+ u8 bw, bw_prev, bw_idx = 0;
+ u16 val[4];
+ u16 probe_val;
+ u32 w5, w27;
+ int i;
+
+ if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ return;
+
+ for (i = n_rates; i < 4; i++)
+ rates[i] = rates[n_rates - 1];
+
+ val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
+ bw_prev = bw;
+
+ if (probe_rate) {
+ probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
+ if (bw)
+ bw_idx = 1;
+ else
+ bw_prev = 0;
+ } else {
+ probe_val = val[0];
+ }
+
+ val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
+ if (bw_prev) {
+ bw_idx = 3;
+ bw_prev = bw;
+ }
+
+ val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
+ if (bw_prev) {
+ bw_idx = 5;
+ bw_prev = bw;
+ }
+
+ val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
+ if (bw_prev)
+ bw_idx = 7;
+
+ w27 = mt76_rr(dev, addr + 27 * 4);
+ w27 &= ~MT_WTBL_W27_CC_BW_SEL;
+ w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw);
+
+ w5 = mt76_rr(dev, addr + 5 * 4);
+ w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE);
+ w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) |
+ FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7);
+
+ mt76_wr(dev, MT_WTBL_RIUCR0, w5);
+
+ mt76_wr(dev, MT_WTBL_RIUCR1,
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR2,
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR3,
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
+
+ mt76_wr(dev, MT_WTBL_UPDATE,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
+ MT_WTBL_UPDATE_RATE_UPDATE |
+ MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+ mt76_wr(dev, addr + 27 * 4, w27);
+
+ if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
+ sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
new file mode 100644
index 000000000000..9455f8fa475d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_MCU_H
+#define __MT7615_MCU_H
+
+struct mt7615_mcu_txd {
+ __le32 txd[8];
+
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query; /* FW don't care */
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 s2d_index;
+ u8 ext_cid_ack;
+
+ u32 reserved[5];
+} __packed __aligned(4);
+
+struct mt7615_mcu_rxd {
+ __le32 rxd[4];
+
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ __le16 __rsv;
+
+ u8 ext_eid;
+ u8 __rsv1[2];
+ u8 s2d_index;
+};
+
+#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID 0xa0
+
+enum {
+ MCU_Q_QUERY,
+ MCU_Q_SET,
+ MCU_Q_RESERVED,
+ MCU_Q_NA
+};
+
+enum {
+ MCU_S2D_H2N,
+ MCU_S2D_C2N,
+ MCU_S2D_H2C,
+ MCU_S2D_H2CN
+};
+
+enum {
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
+ MCU_CMD_FW_START_REQ = 0x02,
+ MCU_CMD_INIT_ACCESS_REG = 0x3,
+ MCU_CMD_PATCH_START_REQ = 0x05,
+ MCU_CMD_PATCH_FINISH_REQ = 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_EXT_CID = 0xED,
+ MCU_CMD_FW_SCATTER = 0xEE,
+ MCU_CMD_RESTART_DL_REQ = 0xEF,
+};
+
+enum {
+ MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+ MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
+ MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
+ MCU_EXT_CMD_EDCA_UPDATE = 0x27,
+ MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
+ MCU_EXT_CMD_WTBL_UPDATE = 0x32,
+ MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+ MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
+ MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
+ MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+};
+
+enum {
+ PATCH_SEM_RELEASE = 0x0,
+ PATCH_SEM_GET = 0x1
+};
+
+enum {
+ PATCH_NOT_DL_SEM_FAIL = 0x0,
+ PATCH_IS_DL = 0x1,
+ PATCH_NOT_DL_SEM_SUCCESS = 0x2,
+ PATCH_REL_SEM_SUCCESS = 0x3
+};
+
+enum {
+ FW_STATE_INITIAL = 0,
+ FW_STATE_FW_DOWNLOAD = 1,
+ FW_STATE_NORMAL_OPERATION = 2,
+ FW_STATE_NORMAL_TRX = 3,
+ FW_STATE_CR4_RDY = 7
+};
+
+#define STA_TYPE_STA BIT(0)
+#define STA_TYPE_AP BIT(1)
+#define STA_TYPE_ADHOC BIT(2)
+#define STA_TYPE_TDLS BIT(3)
+#define STA_TYPE_WDS BIT(4)
+#define STA_TYPE_BC BIT(5)
+
+#define NETWORK_INFRA BIT(16)
+#define NETWORK_P2P BIT(17)
+#define NETWORK_IBSS BIT(18)
+#define NETWORK_MESH BIT(19)
+#define NETWORK_BOW BIT(20)
+#define NETWORK_WDS BIT(21)
+
+#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
+#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
+#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
+#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
+#define CONNECTION_MESH_STA (STA_TYPE_STA | NETWORK_MESH)
+#define CONNECTION_MESH_AP (STA_TYPE_AP | NETWORK_MESH)
+#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
+#define CONNECTION_TDLS (STA_TYPE_STA | NETWORK_INFRA | STA_TYPE_TDLS)
+#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
+#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
+
+#define CONN_STATE_DISCONNECT 0
+#define CONN_STATE_CONNECT 1
+#define CONN_STATE_PORT_SECURE 2
+
+struct dev_info {
+ u8 omac_idx;
+ u8 omac_addr[ETH_ALEN];
+ u8 band_idx;
+ u8 enable;
+ u32 feature;
+};
+
+enum {
+ DEV_INFO_ACTIVE,
+ DEV_INFO_MAX_NUM
+};
+
+struct bss_info {
+ u8 bss_idx;
+ u8 bssid[ETH_ALEN];
+ u8 omac_idx;
+ u8 band_idx;
+ u8 bmc_tx_wlan_idx; /* for bmc tx (sta mode use uc entry) */
+ u8 wmm_idx;
+ u32 network_type;
+ u32 conn_type;
+ u16 bcn_interval;
+ u8 dtim_period;
+ u8 enable;
+ u32 feature;
+};
+
+struct bss_info_tag_handler {
+ u32 tag;
+ u32 len;
+ void (*handler)(struct mt7615_dev *dev,
+ struct bss_info *bss_info, struct sk_buff *skb);
+};
+
+struct bss_info_omac {
+ __le16 tag;
+ __le16 len;
+ u8 hw_bss_idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 rsv0;
+ __le32 conn_type;
+ u32 rsv1;
+} __packed;
+
+struct bss_info_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 network_type;
+ u8 active;
+ u8 rsv0;
+ __le16 bcn_interval;
+ u8 bssid[ETH_ALEN];
+ u8 wmm_idx;
+ u8 dtim_period;
+ u8 bmc_tx_wlan_idx;
+ u8 cipher; /* not used */
+ u8 phymode; /* not used */
+ u8 rsv1[5];
+} __packed;
+
+struct bss_info_rf_ch {
+ __le16 tag;
+ __le16 len;
+ u8 pri_ch;
+ u8 central_ch0;
+ u8 central_ch1;
+ u8 bw;
+} __packed;
+
+struct bss_info_ext_bss {
+ __le16 tag;
+ __le16 len;
+ __le32 mbss_tsf_offset; /* in unit of us */
+ u8 rsv[8];
+} __packed;
+
+enum {
+ BSS_INFO_OMAC,
+ BSS_INFO_BASIC,
+ BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
+ BSS_INFO_PM, /* sta only */
+ BSS_INFO_UAPSD, /* sta only */
+ BSS_INFO_ROAM_DETECTION, /* obsoleted */
+ BSS_INFO_LQ_RM, /* obsoleted */
+ BSS_INFO_EXT_BSS,
+ BSS_INFO_BMC_INFO, /* for bmc rate control in CR4 */
+ BSS_INFO_SYNC_MODE, /* obsoleted */
+ BSS_INFO_RA,
+ BSS_INFO_MAX_NUM
+};
+
+enum {
+ WTBL_RESET_AND_SET = 1,
+ WTBL_SET,
+ WTBL_QUERY,
+ WTBL_RESET_ALL
+};
+
+struct wtbl_generic {
+ __le16 tag;
+ __le16 len;
+ u8 peer_addr[ETH_ALEN];
+ u8 muar_idx;
+ u8 skip_tx;
+ u8 cf_ack;
+ u8 qos;
+ u8 mesh;
+ u8 adm;
+ __le16 partial_aid;
+ u8 baf_en;
+ u8 aad_om;
+} __packed;
+
+struct wtbl_rx {
+ __le16 tag;
+ __le16 len;
+ u8 rcid;
+ u8 rca1;
+ u8 rca2;
+ u8 rv;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_ht {
+ __le16 tag;
+ __le16 len;
+ u8 ht;
+ u8 ldpc;
+ u8 af;
+ u8 mm;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_vht {
+ __le16 tag;
+ __le16 len;
+ u8 ldpc;
+ u8 dyn_bw;
+ u8 vht;
+ u8 txop_ps;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_tx_ps {
+ __le16 tag;
+ __le16 len;
+ u8 txps;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_hdr_trans {
+ __le16 tag;
+ __le16 len;
+ u8 to_ds;
+ u8 from_ds;
+ u8 disable_rx_trans;
+ u8 rsv;
+} __packed;
+
+enum mt7615_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_TKIP_NO_MIC,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_BIP_CMAC_128,
+ MT_CIPHER_WEP128,
+ MT_CIPHER_WAPI,
+ MT_CIPHER_CCMP_256 = 10,
+ MT_CIPHER_GCMP,
+ MT_CIPHER_GCMP_256,
+};
+
+struct wtbl_sec_key {
+ __le16 tag;
+ __le16 len;
+ u8 add; /* 0: add, 1: remove */
+ u8 rkv;
+ u8 ikv;
+ u8 cipher_id;
+ u8 key_id;
+ u8 key_len;
+ u8 rsv[2];
+ u8 key_material[32];
+} __packed;
+
+enum {
+ MT_BA_TYPE_INVALID,
+ MT_BA_TYPE_ORIGINATOR,
+ MT_BA_TYPE_RECIPIENT
+};
+
+enum {
+ RST_BA_MAC_TID_MATCH,
+ RST_BA_MAC_MATCH,
+ RST_BA_NO_MATCH
+};
+
+struct wtbl_ba {
+ __le16 tag;
+ __le16 len;
+ /* common */
+ u8 tid;
+ u8 ba_type;
+ u8 rsv0[2];
+ /* originator only */
+ __le16 sn;
+ u8 ba_en;
+ u8 ba_winsize_idx;
+ __le16 ba_winsize;
+ /* recipient only */
+ u8 peer_addr[ETH_ALEN];
+ u8 rst_ba_tid;
+ u8 rst_ba_sel;
+ u8 rst_ba_sb;
+ u8 band_idx;
+ u8 rsv1[4];
+} __packed;
+
+struct wtbl_bf {
+ __le16 tag;
+ __le16 len;
+ u8 ibf;
+ u8 ebf;
+ u8 ibf_vht;
+ u8 ebf_vht;
+ u8 gid;
+ u8 pfmu_idx;
+ u8 rsv[2];
+} __packed;
+
+struct wtbl_smps {
+ __le16 tag;
+ __le16 len;
+ u8 smps;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_pn {
+ __le16 tag;
+ __le16 len;
+ u8 pn[6];
+ u8 rsv[2];
+} __packed;
+
+struct wtbl_spe {
+ __le16 tag;
+ __le16 len;
+ u8 spe_idx;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_raw {
+ __le16 tag;
+ __le16 len;
+ u8 wtbl_idx;
+ u8 dw;
+ u8 rsv[2];
+ __le32 msk;
+ __le32 val;
+} __packed;
+
+#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_generic) + \
+ sizeof(struct wtbl_rx) + \
+ sizeof(struct wtbl_ht) + \
+ sizeof(struct wtbl_vht) + \
+ sizeof(struct wtbl_tx_ps) + \
+ sizeof(struct wtbl_hdr_trans) + \
+ sizeof(struct wtbl_sec_key) + \
+ sizeof(struct wtbl_ba) + \
+ sizeof(struct wtbl_bf) + \
+ sizeof(struct wtbl_smps) + \
+ sizeof(struct wtbl_pn) + \
+ sizeof(struct wtbl_spe))
+
+enum {
+ WTBL_GENERIC,
+ WTBL_RX,
+ WTBL_HT,
+ WTBL_VHT,
+ WTBL_PEER_PS, /* not used */
+ WTBL_TX_PS,
+ WTBL_HDR_TRANS,
+ WTBL_SEC_KEY,
+ WTBL_BA,
+ WTBL_RDG, /* obsoleted */
+ WTBL_PROTECT, /* not used */
+ WTBL_CLEAR, /* not used */
+ WTBL_BF,
+ WTBL_SMPS,
+ WTBL_RAW_DATA, /* debug only */
+ WTBL_PN,
+ WTBL_SPE,
+ WTBL_MAX_NUM
+};
+
+struct sta_rec_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 qos;
+ __le16 aid;
+ u8 peer_addr[ETH_ALEN];
+#define EXTRA_INFO_VER BIT(0)
+#define EXTRA_INFO_NEW BIT(1)
+ __le16 extra_info;
+} __packed;
+
+struct sta_rec_ht {
+ __le16 tag;
+ __le16 len;
+ __le16 ht_cap;
+ u16 rsv;
+} __packed;
+
+struct sta_rec_vht {
+ __le16 tag;
+ __le16 len;
+ __le32 vht_cap;
+ __le16 vht_rx_mcs_map;
+ __le16 vht_tx_mcs_map;
+} __packed;
+
+struct sta_rec_ba {
+ __le16 tag;
+ __le16 len;
+ u8 tid;
+ u8 ba_type;
+ u8 amsdu;
+ u8 ba_en;
+ __le16 ssn;
+ __le16 winsize;
+} __packed;
+
+#define MT7615_STA_REC_UPDATE_MAX_SIZE (sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_vht))
+
+enum {
+ STA_REC_BASIC,
+ STA_REC_RA,
+ STA_REC_RA_CMM_INFO,
+ STA_REC_RA_UPDATE,
+ STA_REC_BF,
+ STA_REC_AMSDU, /* for CR4 */
+ STA_REC_BA,
+ STA_REC_RED, /* not used */
+ STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
+ STA_REC_HT,
+ STA_REC_VHT,
+ STA_REC_APPS,
+ STA_REC_MAX_NUM
+};
+
+enum {
+ CMD_CBW_20MHZ,
+ CMD_CBW_40MHZ,
+ CMD_CBW_80MHZ,
+ CMD_CBW_160MHZ,
+ CMD_CBW_10MHZ,
+ CMD_CBW_5MHZ,
+ CMD_CBW_8080MHZ
+};
+
+enum {
+ CH_SWITCH_NORMAL = 0,
+ CH_SWITCH_SCAN = 3,
+ CH_SWITCH_MCC = 4,
+ CH_SWITCH_DFS = 5,
+ CH_SWITCH_BACKGROUND_SCAN_START = 6,
+ CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
+ CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
+ CH_SWITCH_SCAN_BYPASS_DPD = 9
+};
+
+static inline struct sk_buff *
+mt7615_mcu_msg_alloc(const void *data, int len)
+{
+ return mt76_mcu_msg_alloc(data, sizeof(struct mt7615_mcu_txd),
+ len, 0);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
new file mode 100644
index 000000000000..895c2904d7eb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_H
+#define __MT7615_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT7615_MAX_INTERFACES 4
+#define MT7615_WTBL_SIZE 128
+#define MT7615_WTBL_RESERVED (MT7615_WTBL_SIZE - 1)
+#define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \
+ MT7615_MAX_INTERFACES)
+
+#define MT7615_WATCHDOG_TIME 100 /* ms */
+#define MT7615_RATE_RETRY 2
+
+#define MT7615_TX_RING_SIZE 1024
+#define MT7615_TX_MCU_RING_SIZE 128
+#define MT7615_TX_FWDL_RING_SIZE 128
+
+#define MT7615_RX_RING_SIZE 1024
+#define MT7615_RX_MCU_RING_SIZE 512
+
+#define MT7615_FIRMWARE_CR4 "mt7615_cr4.bin"
+#define MT7615_FIRMWARE_N9 "mt7615_n9.bin"
+#define MT7615_ROM_PATCH "mt7615_rom_patch.bin"
+
+#define MT7615_EEPROM_SIZE 1024
+#define MT7615_TOKEN_SIZE 4096
+
+struct mt7615_vif;
+struct mt7615_sta;
+
+enum mt7615_hw_txq_id {
+ MT7615_TXQ_MAIN,
+ MT7615_TXQ_EXT,
+ MT7615_TXQ_MCU,
+ MT7615_TXQ_FWDL,
+};
+
+struct mt7615_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7615_vif *vif;
+
+ struct ieee80211_tx_rate rates[8];
+ u8 rate_count;
+ u8 n_rates;
+
+ u8 rate_probe;
+};
+
+struct mt7615_vif {
+ u8 idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 wmm_idx;
+
+ struct mt7615_sta sta;
+};
+
+struct mt7615_dev {
+ struct mt76_dev mt76; /* must be first */
+ u32 vif_mask;
+ u32 omac_mask;
+
+ spinlock_t token_lock;
+ struct idr token;
+};
+
+enum {
+ HW_BSSID_0 = 0x0,
+ HW_BSSID_1,
+ HW_BSSID_2,
+ HW_BSSID_3,
+ HW_BSSID_MAX,
+ EXT_BSSID_START = 0x10,
+ EXT_BSSID_1,
+ EXT_BSSID_2,
+ EXT_BSSID_3,
+ EXT_BSSID_4,
+ EXT_BSSID_5,
+ EXT_BSSID_6,
+ EXT_BSSID_7,
+ EXT_BSSID_8,
+ EXT_BSSID_9,
+ EXT_BSSID_10,
+ EXT_BSSID_11,
+ EXT_BSSID_12,
+ EXT_BSSID_13,
+ EXT_BSSID_14,
+ EXT_BSSID_15,
+ EXT_BSSID_END
+};
+
+extern const struct ieee80211_ops mt7615_ops;
+extern struct pci_driver mt7615_pci_driver;
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+
+int mt7615_register_device(struct mt7615_dev *dev);
+void mt7615_unregister_device(struct mt7615_dev *dev);
+int mt7615_eeprom_init(struct mt7615_dev *dev);
+int mt7615_dma_init(struct mt7615_dev *dev);
+void mt7615_dma_cleanup(struct mt7615_dev *dev);
+int mt7615_mcu_init(struct mt7615_dev *dev);
+int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en);
+int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en);
+int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd);
+void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates);
+int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
+int mt7615_mcu_del_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
+int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
+int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool en);
+int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool en);
+int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en);
+int mt7615_mcu_set_channel(struct mt7615_dev *dev);
+int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
+ const struct ieee80211_tx_queue_params *params);
+int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+}
+
+static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
+ const struct ieee80211_tx_rate *rate,
+ bool stbc, u8 *bw);
+int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int pid,
+ struct ieee80211_key_conf *key);
+int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
+void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
+void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
+
+int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
+int mt7615_mcu_init_mac(struct mt7615_dev *dev);
+int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
+int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
+void mt7615_mcu_exit(struct mt7615_dev *dev);
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
+
+void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7615_mac_work(struct work_struct *work);
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *txwi);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
new file mode 100644
index 000000000000..11122bd2d727
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7615.h"
+#include "mac.h"
+
+static const struct pci_device_id mt7615_pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7615) },
+ { },
+};
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
+{
+ u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
+
+void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7615_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return IRQ_NONE;
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ tasklet_schedule(&dev->mt76.tx_tasklet);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mt7615_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
+ .txwi_flags = MT_TXWI_NO_FREE,
+ .tx_prepare_skb = mt7615_tx_prepare_skb,
+ .tx_complete_skb = mt7615_tx_complete_skb,
+ .rx_skb = mt7615_queue_rx_skb,
+ .rx_poll_complete = mt7615_rx_poll_complete,
+ .sta_ps = mt7615_sta_ps,
+ .sta_add = mt7615_sta_add,
+ .sta_assoc = mt7615_sta_assoc,
+ .sta_remove = mt7615_sta_remove,
+ };
+ struct mt7615_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7615_ops,
+ &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt7615_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt7615_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
+
+static void mt7615_pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_unregister_device(dev);
+}
+
+struct pci_driver mt7615_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7615_pci_device_table,
+ .probe = mt7615_pci_probe,
+ .remove = mt7615_pci_remove,
+};
+
+module_pci_driver(mt7615_pci_driver);
+
+MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
+MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
+MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7615_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
new file mode 100644
index 000000000000..70e5ace33cc3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_REGS_H
+#define __MT7615_REGS_H
+
+#define MT_HW_REV 0x1000
+#define MT_HW_CHIPID 0x1008
+#define MT_TOP_MISC2 0x1134
+#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
+
+#define MT_MCU_BASE 0x2000
+#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
+
+#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500)
+#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0)
+#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
+#define MT_PCIE_REMAP_BASE_1 0x40000
+
+#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504)
+#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
+#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
+#define MT_PCIE_REMAP_BASE_2 0x80000
+
+#define MT_HIF_BASE 0x4000
+#define MT_HIF(ofs) (MT_HIF_BASE + (ofs))
+
+#define MT_CFG_LPCR_HOST MT_HIF(0x1f0)
+#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
+#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
+
+#define MT_INT_SOURCE_CSR MT_HIF(0x200)
+#define MT_INT_MASK_CSR MT_HIF(0x204)
+#define MT_DELAY_INT_CFG MT_HIF(0x210)
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(7, 4)
+#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+
+#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0 BIT(9)
+#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21 GENMASK(23, 22)
+#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MT_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
+
+#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
+
+#define MT_TX_RING_BASE MT_HIF(0x300)
+#define MT_RX_RING_BASE MT_HIF(0x400)
+
+#define MT_WPDMA_GLO_CFG1 MT_HIF(0x500)
+#define MT_WPDMA_TX_PRE_CFG MT_HIF(0x510)
+#define MT_WPDMA_RX_PRE_CFG MT_HIF(0x520)
+#define MT_WPDMA_ABT_CFG MT_HIF(0x530)
+#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534)
+
+#define MT_WF_PHY_BASE 0x10000
+#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
+
+#define MT_WF_PHY_WF2_RFCTRL0 MT_WF_PHY(0x1900)
+#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
+
+#define MT_WF_CFG_BASE 0x20200
+#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs))
+
+#define MT_CFG_CCR MT_WF_CFG(0x000)
+#define MT_CFG_CCR_MAC_D1_1X_GC_EN BIT(24)
+#define MT_CFG_CCR_MAC_D0_1X_GC_EN BIT(25)
+#define MT_CFG_CCR_MAC_D1_2X_GC_EN BIT(30)
+#define MT_CFG_CCR_MAC_D0_2X_GC_EN BIT(31)
+
+#define MT_WF_AGG_BASE 0x20a00
+#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
+
+#define MT_AGG_ARCR MT_WF_AGG(0x010)
+#define MT_AGG_ARCR_INIT_RATE1 BIT(0)
+#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
+#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
+
+#define MT_AGG_ARUCR MT_WF_AGG(0x018)
+#define MT_AGG_ARDCR MT_WF_AGG(0x01c)
+#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
+#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n))
+
+#define MT_AGG_SCR MT_WF_AGG(0x0fc)
+#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3)
+
+#define MT_WF_TMAC_BASE 0x21000
+#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
+
+#define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4)
+#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
+#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12)
+#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
+#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
+
+#define MT_WF_RMAC_BASE 0x21200
+#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
+
+#define MT_WF_RFCR MT_WF_RMAC(0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_VERSION BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_WF_DMA_BASE 0x21800
+#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
+
+#define MT_DMA_DCR0 MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
+#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
+
+#define MT_WTBL_BASE 0x30000
+#define MT_WTBL_ENTRY_SIZE 256
+
+#define MT_WTBL_OFF_BASE 0x23400
+#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
+
+#define MT_WTBL_UPDATE MT_WTBL_OFF(0x030)
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
+#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
+#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
+#define MT_WTBL_UPDATE_BUSY BIT(31)
+
+#define MT_WTBL_ON_BASE 0x23000
+#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
+
+#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x020)
+
+#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x024)
+#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0)
+#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12)
+#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24)
+
+#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x028)
+#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0)
+#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4)
+#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16)
+#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28)
+
+#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x02c)
+#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0)
+#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
+#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
+
+#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5)
+#define MT_WTBL_W5_SHORT_GI_20 BIT(8)
+#define MT_WTBL_W5_SHORT_GI_40 BIT(9)
+#define MT_WTBL_W5_SHORT_GI_80 BIT(10)
+#define MT_WTBL_W5_SHORT_GI_160 BIT(11)
+#define MT_WTBL_W5_BW_CAP GENMASK(13, 12)
+#define MT_WTBL_W27_CC_BW_SEL GENMASK(6, 5)
+
+#define MT_EFUSE_BASE 0x81070000
+#define MT_EFUSE_BASE_CTRL 0x000
+#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
+
+#define MT_EFUSE_CTRL 0x008
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_VALID BIT(29)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
+#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index bcb72e019fd2..57e46d57b449 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -259,7 +259,6 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
return ret;
mt76x0_phy_init(dev);
- mt76x02_init_beacon_config(dev);
return 0;
}
@@ -281,6 +280,7 @@ mt76x0_init_txpower(struct mt76x02_dev *dev,
mt76x0_get_power_info(dev, chan, &tp);
chan->max_power = (mt76x02_get_max_rate_power(&t) + tp) / 2;
+ chan->orig_mpwr = chan->max_power;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index fee16ab21edb..691984037f98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -22,10 +22,9 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
int ret;
cancel_delayed_work_sync(&dev->cal_work);
- if (mt76_is_mmio(dev)) {
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ dev->beacon_ops->pre_tbtt_enable(dev, false);
+ if (mt76_is_mmio(dev))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- }
mt76_set_channel(&dev->mt76);
ret = mt76x0_phy_set_channel(dev, chandef);
@@ -38,9 +37,10 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
if (mt76_is_mmio(dev)) {
mt76x02_dfs_init_params(dev);
- tasklet_enable(&dev->pre_tbtt_tasklet);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
}
+ dev->beacon_ops->pre_tbtt_enable(dev, true);
+
mt76_txq_schedule_all(&dev->mt76);
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index f302162036d0..4585e1b756c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -25,25 +25,21 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
-
mt76x02_mac_start(dev);
mt76x0_phy_calibrate(dev, true);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- mutex_unlock(&dev->mt76.mutex);
-
return 0;
}
static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
0, 1000))
@@ -62,10 +58,8 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x0e_stop_hw(dev);
- mutex_unlock(&dev->mt76.mutex);
}
static void
@@ -74,13 +68,6 @@ mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
}
-static int
-mt76x0e_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
- bool set)
-{
- return 0;
-}
-
static const struct ieee80211_ops mt76x0e_ops = {
.tx = mt76x02_tx,
.start = mt76x0e_start,
@@ -101,7 +88,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.get_survey = mt76_get_survey,
.get_txpower = mt76_get_txpower,
.flush = mt76x0e_flush,
- .set_tim = mt76x0e_set_tim,
+ .set_tim = mt76_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x02_set_coverage_class,
.set_rts_threshold = mt76x02_set_rts_threshold,
@@ -128,6 +115,8 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
+ mt76x02e_init_beacon_config(dev);
+
if (mt76_chip(&dev->mt76) == 0x7610) {
u16 val;
@@ -164,6 +153,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_aligned4_skbs = true,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
@@ -223,7 +213,7 @@ error:
static void mt76x0e_cleanup(struct mt76x02_dev *dev)
{
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
mt76x02_dma_cleanup(dev);
@@ -238,7 +228,7 @@ mt76x0e_remove(struct pci_dev *pdev)
mt76_unregister_device(mdev);
mt76x0e_cleanup(dev);
- ieee80211_free_hw(mdev->hw);
+ mt76_free_device(mdev);
}
static const struct pci_device_id mt76x0e_device_table[] = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index e5a06f74a6f7..7c38ec4418db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -81,20 +81,19 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev)
mt76u_queues_deinit(&dev->mt76);
}
-static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
+static void mt76x0u_stop(struct ieee80211_hw *hw)
{
+ struct mt76x02_dev *dev = hw->priv;
+
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mac_work);
- mt76u_stop_stat_wk(&dev->mt76);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ mt76u_stop_tx(&dev->mt76);
+ mt76x02u_exit_beacon_config(dev);
if (test_bit(MT76_REMOVED, &dev->mt76.state))
return;
- mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
- MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_BEACON_TX);
-
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
@@ -109,31 +108,17 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mt76.mutex);
-
ret = mt76x0_mac_start(dev);
if (ret)
- goto out;
+ return ret;
mt76x0_phy_calibrate(dev, true);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-
-out:
- mutex_unlock(&dev->mt76.mutex);
- return ret;
-}
-
-static void mt76x0u_stop(struct ieee80211_hw *hw)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- mutex_lock(&dev->mt76.mutex);
- mt76x0u_mac_stop(dev);
- mutex_unlock(&dev->mt76.mutex);
+ return 0;
}
static const struct ieee80211_ops mt76x0u_ops = {
@@ -155,6 +140,8 @@ static const struct ieee80211_ops mt76x0u_ops = {
.set_rts_threshold = mt76x02_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
.get_txpower = mt76_get_txpower,
+ .set_tim = mt76_set_tim,
+ .release_buffered_frames = mt76_release_buffered_frames,
};
static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
@@ -175,6 +162,8 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
if (err < 0)
return err;
+ mt76x02u_init_beacon_config(dev);
+
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG,
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
@@ -223,6 +212,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb,
+ .sta_ps = mt76x02_sta_ps,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
};
@@ -232,7 +222,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
u32 mac_rev;
int ret;
- mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
+ mdev = mt76_alloc_device(&usb_dev->dev, sizeof(*dev), &mt76x0u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
@@ -311,8 +301,7 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
{
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
- mt76u_stop_queues(&dev->mt76);
- mt76x0u_mac_stop(dev);
+ mt76u_stop_rx(&dev->mt76);
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
mt76x0_chip_onoff(dev, false, false);
@@ -322,16 +311,12 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
{
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
- struct mt76_usb *usb = &dev->mt76.usb;
int ret;
- ret = mt76u_submit_rx_buffers(&dev->mt76);
+ ret = mt76u_resume_rx(&dev->mt76);
if (ret < 0)
goto err;
- tasklet_enable(&usb->rx_tasklet);
- tasklet_enable(&usb->tx_tasklet);
-
ret = mt76x0u_init_hardware(dev);
if (ret)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 07061eb4d1e1..687bd14b2d77 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -68,6 +68,13 @@ struct mt76x02_calibration {
s8 tssi_dc;
};
+struct mt76x02_beacon_ops {
+ unsigned int nslots;
+ unsigned int slot_size;
+ void (*pre_tbtt_enable) (struct mt76x02_dev *, bool);
+ void (*beacon_enable) (struct mt76x02_dev *, bool);
+};
+
struct mt76x02_dev {
struct mt76_dev mt76; /* must be first */
@@ -79,23 +86,25 @@ struct mt76x02_dev {
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
+ spinlock_t txstatus_fifo_lock;
struct sk_buff *rx_head;
- struct tasklet_struct tx_tasklet;
- struct tasklet_struct pre_tbtt_tasklet;
+ struct napi_struct tx_napi;
struct delayed_work cal_work;
- struct delayed_work mac_work;
struct delayed_work wdt_work;
+ struct hrtimer pre_tbtt_timer;
+ struct work_struct pre_tbtt_work;
+
+ const struct mt76x02_beacon_ops *beacon_ops;
+
u32 aggr_stats[32];
struct sk_buff *beacons[8];
- u8 beacon_mask;
u8 beacon_data_mask;
u8 tbtt_count;
- u16 beacon_int;
u32 tx_hang_reset;
u8 tx_hang_check;
@@ -163,7 +172,6 @@ void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class);
int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
-int mt76x02_insert_hdr_pad(struct sk_buff *skb);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
@@ -173,9 +181,9 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
@@ -185,9 +193,19 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed);
-extern const u16 mt76x02_beacon_offsets[16];
+struct beacon_bc_data {
+ struct mt76x02_dev *dev;
+ struct sk_buff_head q;
+ struct sk_buff *tail[8];
+};
void mt76x02_init_beacon_config(struct mt76x02_dev *dev);
-void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
+void mt76x02e_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev);
+void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
+void mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev,
+ struct beacon_bc_data *data,
+ int max_nframes);
+
void mt76x02_mac_start(struct mt76x02_dev *dev);
void mt76x02_init_debugfs(struct mt76x02_dev *dev);
@@ -208,12 +226,12 @@ static inline bool is_mt76x2(struct mt76x02_dev *dev)
static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
{
- mt76x02_set_irq_mask(dev, 0, mask);
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
}
static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask)
{
- mt76x02_set_irq_mask(dev, mask, 0);
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
static inline bool
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
new file mode 100644
index 000000000000..e196b9c0a686
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x02.h"
+
+static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
+{
+ u32 regs[4] = {};
+ u16 val;
+ int i;
+
+ for (i = 0; i < dev->beacon_ops->nslots; i++) {
+ val = i * dev->beacon_ops->slot_size;
+ regs[i / 4] |= (val / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int
+mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
+{
+ int beacon_len = dev->beacon_ops->slot_size;
+ struct mt76x02_txwi txwi;
+
+ if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
+ return -ENOSPC;
+
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+
+ mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+ offset += sizeof(txwi);
+
+ mt76_wr_copy(dev, offset, skb->data, skb->len);
+ return 0;
+}
+
+static int
+__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
+ struct sk_buff *skb)
+{
+ int beacon_len = dev->beacon_ops->slot_size;
+ int beacon_addr = MT_BEACON_BASE + (beacon_len * bcn_idx);
+ int ret = 0;
+ int i;
+
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+ if (skb) {
+ ret = mt76x02_write_beacon(dev, beacon_addr, skb);
+ if (!ret)
+ dev->beacon_data_mask |= BIT(bcn_idx);
+ } else {
+ dev->beacon_data_mask &= ~BIT(bcn_idx);
+ for (i = 0; i < beacon_len; i += 4)
+ mt76_wr(dev, beacon_addr + i, 0);
+ }
+
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+ return ret;
+}
+
+int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+ struct sk_buff *skb)
+{
+ bool force_update = false;
+ int bcn_idx = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (vif_idx == i) {
+ force_update = !!dev->beacons[i] ^ !!skb;
+
+ if (dev->beacons[i])
+ dev_kfree_skb(dev->beacons[i]);
+
+ dev->beacons[i] = skb;
+ __mt76x02_mac_set_beacon(dev, bcn_idx, skb);
+ } else if (force_update && dev->beacons[i]) {
+ __mt76x02_mac_set_beacon(dev, bcn_idx,
+ dev->beacons[i]);
+ }
+
+ bcn_idx += !!dev->beacons[i];
+ }
+
+ for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (!(dev->beacon_data_mask & BIT(i)))
+ break;
+
+ __mt76x02_mac_set_beacon(dev, i, NULL);
+ }
+
+ mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+ bcn_idx - 1);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
+
+static void
+__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
+ bool val, struct sk_buff *skb)
+{
+ u8 old_mask = dev->mt76.beacon_mask;
+ bool en;
+ u32 reg;
+
+ if (val) {
+ dev->mt76.beacon_mask |= BIT(vif_idx);
+ if (skb)
+ mt76x02_mac_set_beacon(dev, vif_idx, skb);
+ } else {
+ dev->mt76.beacon_mask &= ~BIT(vif_idx);
+ mt76x02_mac_set_beacon(dev, vif_idx, NULL);
+ }
+
+ if (!!old_mask == !!dev->mt76.beacon_mask)
+ return;
+
+ en = dev->mt76.beacon_mask;
+
+ reg = MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_TIMER_EN;
+ mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+
+ dev->beacon_ops->beacon_enable(dev, en);
+}
+
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
+ struct ieee80211_vif *vif, bool val)
+{
+ u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx;
+ struct sk_buff *skb = NULL;
+
+ dev->beacon_ops->pre_tbtt_enable(dev, false);
+
+ if (mt76_is_usb(dev))
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+
+ if (!dev->mt76.beacon_mask)
+ dev->tbtt_count = 0;
+
+ __mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb);
+
+ dev->beacon_ops->pre_tbtt_enable(dev, true);
+}
+
+void
+mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
+{
+ u32 timer_val = dev->mt76.beacon_int << 4;
+
+ dev->tbtt_count++;
+
+ /*
+ * Beacon timer drifts by 1us every tick, the timer is configured
+ * in 1/16 TU (64us) units.
+ */
+ if (dev->tbtt_count < 63)
+ return;
+
+ /*
+ * The updated beacon interval takes effect after two TBTT, because
+ * at this point the original interval has already been loaded into
+ * the next TBTT_TIMER value
+ */
+ if (dev->tbtt_count == 63)
+ timer_val -= 1;
+
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL, timer_val);
+
+ if (dev->tbtt_count >= 64) {
+ dev->tbtt_count = 0;
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer);
+
+void
+mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct sk_buff *skb = NULL;
+
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ mt76x02_mac_set_beacon(dev, mvif->idx, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter);
+
+static void
+mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct beacon_bc_data *data = priv;
+ struct mt76x02_dev *dev = data->dev;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ mt76_skb_set_moredata(skb, true);
+ __skb_queue_tail(&data->q, skb);
+ data->tail[mvif->idx] = skb;
+}
+
+void
+mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, struct beacon_bc_data *data,
+ int max_nframes)
+{
+ int i, nframes;
+
+ data->dev = dev;
+ __skb_queue_head_init(&data->q);
+
+ do {
+ nframes = skb_queue_len(&data->q);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x02_add_buffered_bc, data);
+ } while (nframes != skb_queue_len(&data->q) &&
+ skb_queue_len(&data->q) < max_nframes);
+
+ if (!skb_queue_len(&data->q))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(data->tail); i++) {
+ if (!data->tail[i])
+ continue;
+ mt76_skb_set_moredata(data->tail[i], false);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_enqueue_buffered_bc);
+
+void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
+{
+ int i;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+ mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+
+ for (i = 0; i < 8; i++)
+ mt76x02_mac_set_beacon(dev, i, NULL);
+
+ mt76x02_set_beacon_offsets(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
+
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 9ed231abe916..56510a1a843a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -218,10 +218,17 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate)
{
- spin_lock_bh(&dev->mt76.lock);
- wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
- wcid->tx_rate_set = true;
- spin_unlock_bh(&dev->mt76.lock);
+ s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
+ __le16 rateval;
+ u32 tx_info;
+ s8 nss;
+
+ rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+ tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
+ FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
+ FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
+ MT_WCID_TX_INFO_SET;
+ wcid->tx_info = tx_info;
}
void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
@@ -323,6 +330,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_key_conf *key = info->control.hw_key;
+ u32 wcid_tx_info;
u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
u16 txwi_flags = 0;
u8 nss;
@@ -357,16 +365,16 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->eiv = *((__le32 *)&ccmp_pn[4]);
}
- spin_lock_bh(&dev->mt76.lock);
if (wcid && (rate->idx < 0 || !rate->count)) {
- txwi->rate = wcid->tx_rate;
- max_txpwr_adj = wcid->max_txpwr_adj;
- nss = wcid->tx_rate_nss;
+ wcid_tx_info = wcid->tx_info;
+ txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
+ max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
+ wcid_tx_info);
+ nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
} else {
txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
}
- spin_unlock_bh(&dev->mt76.lock);
txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
max_txpwr_adj);
@@ -466,7 +474,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
return;
rcu_read_lock();
- mt76_tx_status_lock(mdev, &list);
if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
@@ -479,6 +486,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
drv_priv);
}
+ mt76_tx_status_lock(mdev, &list);
+
if (wcid) {
if (stat->pktid >= MT_PACKET_ID_FIRST)
status.skb = mt76_tx_status_skb_get(mdev, wcid,
@@ -498,7 +507,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (*update == 0 && stat_val == stat_cache &&
stat->wcid == msta->status.wcid && msta->n_frames < 32) {
msta->n_frames++;
- goto out;
+ mt76_tx_status_unlock(mdev, &list);
+ rcu_read_unlock();
+ return;
}
mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
@@ -514,11 +525,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (status.skb)
mt76_tx_status_skb_done(mdev, status.skb, &list);
- else
- ieee80211_tx_status_ext(mt76_hw(dev), &status);
-
-out:
mt76_tx_status_unlock(mdev, &list);
+
+ if (!status.skb)
+ ieee80211_tx_status_ext(mt76_hw(dev), &status);
rcu_read_unlock();
}
@@ -729,7 +739,6 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
{
struct mt76x02_tx_status stat = {};
- unsigned long flags;
u8 update = 1;
bool ret;
@@ -739,9 +748,11 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
trace_mac_txstat_poll(dev);
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
- spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
+ if (!spin_trylock(&dev->txstatus_fifo_lock))
+ break;
+
ret = mt76x02_mac_load_tx_status(dev, &stat);
- spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
+ spin_unlock(&dev->txstatus_fifo_lock);
if (!ret)
break;
@@ -755,11 +766,12 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
}
}
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
+ u8 *txwi_ptr;
if (!e->txwi) {
dev_kfree_skb_any(e->skb);
@@ -768,7 +780,8 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
mt76x02_mac_poll_tx_status(dev, false);
- txwi = (struct mt76x02_txwi *) &e->txwi->txwi;
+ txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
+ txwi = (struct mt76x02_txwi *)txwi_ptr;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
mt76_tx_complete_skb(mdev, e->skb);
@@ -1019,7 +1032,7 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
void mt76x02_mac_work(struct work_struct *work)
{
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
- mac_work.work);
+ mt76.mac_work.work);
int i, idx;
mutex_lock(&dev->mt76.mutex);
@@ -1032,7 +1045,7 @@ void mt76x02_mac_work(struct work_struct *work)
dev->aggr_stats[idx++] += val >> 16;
}
- if (!dev->beacon_mask)
+ if (!dev->mt76.beacon_mask)
mt76x02_check_mac_err(dev);
if (dev->ed_monitor)
@@ -1042,7 +1055,7 @@ void mt76x02_mac_work(struct work_struct *work)
mt76_tx_status_check(&dev->mt76, NULL, false);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
}
@@ -1053,141 +1066,3 @@ void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
get_unaligned_le16(addr + 4));
}
-
-static int
-mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
-{
- int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
- struct mt76x02_txwi txwi;
-
- if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
- return -ENOSPC;
-
- mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
-
- mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
- offset += sizeof(txwi);
-
- mt76_wr_copy(dev, offset, skb->data, skb->len);
- return 0;
-}
-
-static int
-__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
- struct sk_buff *skb)
-{
- int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
- int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
- int ret = 0;
- int i;
-
- /* Prevent corrupt transmissions during update */
- mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
-
- if (skb) {
- ret = mt76x02_write_beacon(dev, beacon_addr, skb);
- if (!ret)
- dev->beacon_data_mask |= BIT(bcn_idx);
- } else {
- dev->beacon_data_mask &= ~BIT(bcn_idx);
- for (i = 0; i < beacon_len; i += 4)
- mt76_wr(dev, beacon_addr + i, 0);
- }
-
- mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
-
- return ret;
-}
-
-int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb)
-{
- bool force_update = false;
- int bcn_idx = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
- if (vif_idx == i) {
- force_update = !!dev->beacons[i] ^ !!skb;
-
- if (dev->beacons[i])
- dev_kfree_skb(dev->beacons[i]);
-
- dev->beacons[i] = skb;
- __mt76x02_mac_set_beacon(dev, bcn_idx, skb);
- } else if (force_update && dev->beacons[i]) {
- __mt76x02_mac_set_beacon(dev, bcn_idx,
- dev->beacons[i]);
- }
-
- bcn_idx += !!dev->beacons[i];
- }
-
- for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
- if (!(dev->beacon_data_mask & BIT(i)))
- break;
-
- __mt76x02_mac_set_beacon(dev, i, NULL);
- }
-
- mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
- bcn_idx - 1);
- return 0;
-}
-
-static void
-__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
- bool val, struct sk_buff *skb)
-{
- u8 old_mask = dev->beacon_mask;
- bool en;
- u32 reg;
-
- if (val) {
- dev->beacon_mask |= BIT(vif_idx);
- if (skb)
- mt76x02_mac_set_beacon(dev, vif_idx, skb);
- } else {
- dev->beacon_mask &= ~BIT(vif_idx);
- mt76x02_mac_set_beacon(dev, vif_idx, NULL);
- }
-
- if (!!old_mask == !!dev->beacon_mask)
- return;
-
- en = dev->beacon_mask;
-
- reg = MT_BEACON_TIME_CFG_BEACON_TX |
- MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_TIMER_EN;
- mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
-
- if (mt76_is_usb(dev))
- return;
-
- mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
- if (en)
- mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
- else
- mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
-}
-
-void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
- struct ieee80211_vif *vif, bool val)
-{
- u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx;
- struct sk_buff *skb = NULL;
-
- if (mt76_is_mmio(dev))
- tasklet_disable(&dev->pre_tbtt_tasklet);
- else if (val)
- skb = ieee80211_beacon_get(mt76_hw(dev), vif);
-
- if (!dev->beacon_mask)
- dev->tbtt_count = 0;
-
- __mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb);
-
- if (mt76_is_mmio(dev))
- tasklet_enable(&dev->pre_tbtt_tasklet);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index caeeef96c42f..e4a9e0d0924b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -198,8 +198,8 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index daaed1220147..7b7163bc3b62 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -22,96 +22,18 @@
#include "mt76x02_mcu.h"
#include "mt76x02_trace.h"
-struct beacon_bc_data {
- struct mt76x02_dev *dev;
- struct sk_buff_head q;
- struct sk_buff *tail[8];
-};
-
-static void
-mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- struct sk_buff *skb = NULL;
-
- if (!(dev->beacon_mask & BIT(mvif->idx)))
- return;
-
- skb = ieee80211_beacon_get(mt76_hw(dev), vif);
- if (!skb)
- return;
-
- mt76x02_mac_set_beacon(dev, mvif->idx, skb);
-}
-
-static void
-mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct beacon_bc_data *data = priv;
- struct mt76x02_dev *dev = data->dev;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- struct ieee80211_tx_info *info;
- struct sk_buff *skb;
-
- if (!(dev->beacon_mask & BIT(mvif->idx)))
- return;
-
- skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
- if (!skb)
- return;
-
- info = IEEE80211_SKB_CB(skb);
- info->control.vif = vif;
- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
- mt76_skb_set_moredata(skb, true);
- __skb_queue_tail(&data->q, skb);
- data->tail[mvif->idx] = skb;
-}
-
-static void
-mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
-{
- u32 timer_val = dev->beacon_int << 4;
-
- dev->tbtt_count++;
-
- /*
- * Beacon timer drifts by 1us every tick, the timer is configured
- * in 1/16 TU (64us) units.
- */
- if (dev->tbtt_count < 63)
- return;
-
- /*
- * The updated beacon interval takes effect after two TBTT, because
- * at this point the original interval has already been loaded into
- * the next TBTT_TIMER value
- */
- if (dev->tbtt_count == 63)
- timer_val -= 1;
-
- mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
- MT_BEACON_TIME_CFG_INTVAL, timer_val);
-
- if (dev->tbtt_count >= 64) {
- dev->tbtt_count = 0;
- return;
- }
-}
-
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
- struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+ struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
struct beacon_bc_data data = {};
struct sk_buff *skb;
- int i, nframes;
+ int i;
- mt76x02_resync_beacon_timer(dev);
+ if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
- data.dev = dev;
- __skb_queue_head_init(&data.q);
+ mt76x02_resync_beacon_timer(dev);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -122,13 +44,7 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
if (dev->mt76.csa_complete)
return;
- do {
- nframes = skb_queue_len(&data.q);
- ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt76x02_add_buffered_bc, &data);
- } while (nframes != skb_queue_len(&data.q) &&
- skb_queue_len(&data.q) < 8);
+ mt76x02_enqueue_buffered_bc(dev, &data, 8);
if (!skb_queue_len(&data.q))
return;
@@ -146,25 +62,67 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
- NULL);
+ mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
+ NULL);
}
spin_unlock_bh(&q->lock);
}
+static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
+{
+ if (en)
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+ else
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+}
+
+static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
+{
+ mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+ if (en)
+ mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+ else
+ mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
+{
+ static const struct mt76x02_beacon_ops beacon_ops = {
+ .nslots = 8,
+ .slot_size = 1024,
+ .pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
+ .beacon_enable = mt76x02e_beacon_enable,
+ };
+
+ dev->beacon_ops = &beacon_ops;
+
+ /* Fire a pre-TBTT interrupt 8 ms before TBTT */
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, 8 << 4);
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+ MT_DFS_GP_INTERVAL);
+ mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+ mt76x02_init_beacon_config(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
+
static int
-mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
int idx, int n_desc)
{
- int ret;
+ struct mt76_queue *hwq;
+ int err;
- q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->hw_idx = idx;
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
@@ -175,15 +133,12 @@ static int
mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
{
- int ret;
-
- q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->buf_size = bufsize;
+ int err;
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
+ err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
+ MT_RX_RING_BASE);
+ if (err < 0)
+ return err;
mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
@@ -202,15 +157,32 @@ static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
static void mt76x02_tx_tasklet(unsigned long data)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
- int i;
+ mt76x02_mac_poll_tx_status(dev, false);
mt76x02_process_tx_status_fifo(dev);
+ mt76_txq_schedule_all(&dev->mt76);
+}
+
+static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, tx_napi);
+ int i;
+
+ mt76x02_mac_poll_tx_status(dev, false);
+
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
- mt76x02_mac_poll_tx_status(dev, false);
- mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
+ if (napi_complete_done(napi, 0))
+ mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ tasklet_schedule(&dev->mt76.tx_tasklet);
+
+ return 0;
}
int mt76x02_dma_init(struct mt76x02_dev *dev)
@@ -220,7 +192,6 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
struct mt76_queue *q;
void *status_fifo;
- BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
@@ -228,10 +199,12 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (!status_fifo)
return -ENOMEM;
- tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
- tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
+ tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet,
+ (unsigned long) dev);
+ tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
(unsigned long)dev);
+ spin_lock_init(&dev->txstatus_fifo_lock);
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
mt76_dma_attach(&dev->mt76);
@@ -268,7 +241,15 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- return mt76_init_queues(dev);
+ ret = mt76_init_queues(dev);
+ if (ret)
+ return ret;
+
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->tx_napi, mt76x02_poll_tx,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&dev->tx_napi);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_dma_init);
@@ -296,11 +277,6 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
intr &= dev->mt76.mmio.irqmask;
- if (intr & MT_INT_TX_DONE_ALL) {
- mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
- tasklet_schedule(&dev->tx_tasklet);
- }
-
if (intr & MT_INT_RX_DONE(0)) {
mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
napi_schedule(&dev->mt76.napi[0]);
@@ -312,19 +288,22 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
}
if (intr & MT_INT_PRE_TBTT)
- tasklet_schedule(&dev->pre_tbtt_tasklet);
+ tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
/* send buffered multicast frames now */
if (intr & MT_INT_TBTT) {
if (dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
else
- mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+ mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
}
- if (intr & MT_INT_TX_STAT) {
+ if (intr & MT_INT_TX_STAT)
mt76x02_mac_poll_tx_status(dev, true);
- tasklet_schedule(&dev->tx_tasklet);
+
+ if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
+ mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ napi_schedule(&dev->tx_napi);
}
if (intr & MT_INT_GPTIMER) {
@@ -336,18 +315,6 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
}
EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
-void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
- dev->mt76.mmio.irqmask &= ~clear;
- dev->mt76.mmio.irqmask |= set;
- mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
- spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
-}
-EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask);
-
static void mt76x02_dma_enable(struct mt76x02_dev *dev)
{
u32 val;
@@ -366,7 +333,8 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
{
- tasklet_kill(&dev->tx_tasklet);
+ tasklet_kill(&dev->mt76.tx_tasklet);
+ netif_napi_del(&dev->tx_napi);
mt76_dma_cleanup(&dev->mt76);
}
EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
@@ -403,13 +371,13 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
int i;
for (i = 0; i < 4; i++) {
- q = &dev->mt76.q_tx[i];
+ q = dev->mt76.q_tx[i].q;
if (!q->queued)
continue;
prev_dma_idx = dev->mt76.tx_dma_idx[i];
- dma_idx = ioread32(&q->regs->dma_idx);
+ dma_idx = readl(&q->regs->dma_idx);
dev->mt76.tx_dma_idx[i] = dma_idx;
if (prev_dma_idx == dma_idx)
@@ -472,7 +440,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
}
dev->vif_mask = 0;
- dev->beacon_mask = 0;
+ dev->mt76.beacon_mask = 0;
}
static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
@@ -484,8 +452,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
ieee80211_stop_queues(dev->mt76.hw);
set_bit(MT76_RESET, &dev->mt76.state);
- tasklet_disable(&dev->pre_tbtt_tasklet);
- tasklet_disable(&dev->tx_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.tx_tasklet);
+ napi_disable(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
napi_disable(&dev->mt76.napi[i]);
@@ -495,7 +464,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
if (restart)
mt76x02_reset_state(dev);
- if (dev->beacon_mask)
+ if (dev->mt76.beacon_mask)
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN);
@@ -514,7 +483,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mt76_set(dev, 0x734, 0x3);
if (restart)
- dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
+ mt76_mcu_restart(dev);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
mt76_queue_tx_cleanup(dev, i, true);
@@ -527,7 +496,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
if (dev->ed_monitor)
mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
- if (dev->beacon_mask && !restart)
+ if (dev->mt76.beacon_mask && !restart)
mt76_set(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN);
@@ -538,10 +507,11 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
clear_bit(MT76_RESET, &dev->mt76.state);
- tasklet_enable(&dev->tx_tasklet);
- tasklet_schedule(&dev->tx_tasklet);
+ tasklet_enable(&dev->mt76.tx_tasklet);
+ napi_enable(&dev->tx_napi);
+ napi_schedule(&dev->tx_napi);
- tasklet_enable(&dev->pre_tbtt_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
napi_enable(&dev->mt76.napi[i]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 7401cb94fb72..2ce05b543dff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -356,7 +356,10 @@
#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
#define MT_TBTT_SYNC_CFG 0x1118
-#define MT_TBTT_TIMER_CFG 0x1124
+#define MT_TSF_TIMER_DW0 0x111c
+#define MT_TSF_TIMER_DW1 0x1120
+#define MT_TBTT_TIMER 0x1124
+#define MT_TBTT_TIMER_VAL GENMASK(16, 0)
#define MT_INT_TIMER_CFG 0x1128
#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 94f47248c59f..cf7abd9b7d2e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -147,36 +147,33 @@ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
struct mt76x02_txwi *txwi = txwi_ptr;
- int qsel = MT_QSEL_EDCA;
- int pid;
- int ret;
+ int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
- if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+ if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
- mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ len = tx_info->skb->len - (hdrlen & 2);
+ mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
- pid = mt76_tx_status_skb_add(mdev, wcid, skb);
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
txwi->pktid = pid;
- ret = mt76x02_insert_hdr_pad(skb);
- if (ret < 0)
- return ret;
-
if (pid >= MT_PACKET_ID_FIRST)
qsel = MT_QSEL_MGMT;
- *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
- MT_TXD_INFO_80211;
+ tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
- *tx_info |= MT_TXD_INFO_WIV;
+ tx_info->info |= MT_TXD_INFO_WIV;
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
index 0126e51d77ed..7b53f9e57f29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -26,9 +26,11 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
+void mt76x02u_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev);
#endif /* __MT76x02_USB_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 6fb52b596d42..6b89f7eab26c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -26,8 +26,8 @@ static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
mt76x02_remove_hdr_pad(skb, 2);
}
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
mt76x02u_remove_dma_hdr(e->skb);
mt76_tx_complete_skb(mdev, e->skb);
@@ -72,27 +72,26 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
}
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
struct mt76x02_txwi *txwi;
enum mt76_qsel qsel;
- int len = skb->len;
u32 flags;
- int pid;
- mt76x02_insert_hdr_pad(skb);
+ mt76_insert_hdr_pad(tx_info->skb);
- txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
- mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
- skb_push(skb, sizeof(struct mt76x02_txwi));
+ txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi));
+ mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
+ skb_push(tx_info->skb, sizeof(*txwi));
- pid = mt76_tx_status_skb_add(mdev, wcid, skb);
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
txwi->pktid = pid;
- if (pid >= MT_PACKET_ID_FIRST || q2ep(q->hw_idx) == MT_EP_OUT_HCCA)
+ if (pid >= MT_PACKET_ID_FIRST || ep == MT_EP_OUT_HCCA)
qsel = MT_QSEL_MGMT;
else
qsel = MT_QSEL_EDCA;
@@ -102,6 +101,167 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
flags |= MT_TXD_INFO_WIV;
- return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
+ return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
+
+/* Trigger pre-TBTT event 8 ms before TBTT */
+#define PRE_TBTT_USEC 8000
+
+/* Beacon SRAM memory is limited to 8kB. We need to send PS buffered frames
+ * (which can be 1500 bytes big) via beacon memory. That make limit of number
+ * of slots to 5. TODO: dynamically calculate offsets in beacon SRAM.
+ */
+#define N_BCN_SLOTS 5
+
+static void mt76x02u_start_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+ u64 time;
+ u32 tbtt;
+
+ /* Get remaining TBTT in usec */
+ tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
+ tbtt *= 32;
+
+ if (tbtt <= PRE_TBTT_USEC) {
+ queue_work(system_highpri_wq, &dev->pre_tbtt_work);
+ return;
+ }
+
+ time = (tbtt - PRE_TBTT_USEC) * 1000ull;
+ hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
+}
+
+static void mt76x02u_restart_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+ u32 tbtt, dw0, dw1;
+ u64 tsf, time;
+
+ /* Get remaining TBTT in usec */
+ tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
+ tbtt *= 32;
+
+ dw0 = mt76_rr(dev, MT_TSF_TIMER_DW0);
+ dw1 = mt76_rr(dev, MT_TSF_TIMER_DW1);
+ tsf = (u64)dw0 << 32 | dw1;
+ dev_dbg(dev->mt76.dev, "TSF: %llu us TBTT %u us\n", tsf, tbtt);
+
+ /* Convert beacon interval in TU (1024 usec) to nsec */
+ time = ((1000000000ull * dev->mt76.beacon_int) >> 10);
+
+ /* Adjust time to trigger hrtimer 8ms before TBTT */
+ if (tbtt < PRE_TBTT_USEC)
+ time -= (PRE_TBTT_USEC - tbtt) * 1000ull;
+ else
+ time += (tbtt - PRE_TBTT_USEC) * 1000ull;
+
+ hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
+}
+
+static void mt76x02u_stop_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+ do {
+ hrtimer_cancel(&dev->pre_tbtt_timer);
+ cancel_work_sync(&dev->pre_tbtt_work);
+ /* Timer can be rearmed by work. */
+ } while (hrtimer_active(&dev->pre_tbtt_timer));
+}
+
+static void mt76x02u_pre_tbtt_work(struct work_struct *work)
+{
+ struct mt76x02_dev *dev =
+ container_of(work, struct mt76x02_dev, pre_tbtt_work);
+ struct beacon_bc_data data = {};
+ struct sk_buff *skb;
+ int i, nbeacons;
+
+ if (!dev->mt76.beacon_mask)
+ return;
+
+ if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ mt76x02_resync_beacon_timer(dev);
+
+ ieee80211_iterate_active_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x02_update_beacon_iter, dev);
+
+ nbeacons = hweight8(dev->mt76.beacon_mask);
+ mt76x02_enqueue_buffered_bc(dev, &data, N_BCN_SLOTS - nbeacons);
+
+ for (i = nbeacons; i < N_BCN_SLOTS; i++) {
+ skb = __skb_dequeue(&data.q);
+ mt76x02_mac_set_beacon(dev, i, skb);
+ }
+
+ mt76x02u_restart_pre_tbtt_timer(dev);
+}
+
+static enum hrtimer_restart mt76x02u_pre_tbtt_interrupt(struct hrtimer *timer)
+{
+ struct mt76x02_dev *dev =
+ container_of(timer, struct mt76x02_dev, pre_tbtt_timer);
+
+ queue_work(system_highpri_wq, &dev->pre_tbtt_work);
+
+ return HRTIMER_NORESTART;
+}
+
+static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
+{
+ if (en && dev->mt76.beacon_mask &&
+ !hrtimer_active(&dev->pre_tbtt_timer))
+ mt76x02u_start_pre_tbtt_timer(dev);
+ if (!en)
+ mt76x02u_stop_pre_tbtt_timer(dev);
+}
+
+static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!dev->mt76.beacon_int))
+ return;
+
+ if (en) {
+ mt76x02u_start_pre_tbtt_timer(dev);
+ } else {
+ /* Timer is already stopped, only clean up
+ * PS buffered frames if any.
+ */
+ for (i = 0; i < N_BCN_SLOTS; i++)
+ mt76x02_mac_set_beacon(dev, i, NULL);
+ }
+}
+
+void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
+{
+ static const struct mt76x02_beacon_ops beacon_ops = {
+ .nslots = N_BCN_SLOTS,
+ .slot_size = (8192 / N_BCN_SLOTS) & ~63,
+ .pre_tbtt_enable = mt76x02u_pre_tbtt_enable,
+ .beacon_enable = mt76x02u_beacon_enable,
+ };
+ dev->beacon_ops = &beacon_ops;
+
+ hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
+ INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
+
+ mt76x02_init_beacon_config(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config);
+
+void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev)
+{
+ if (!test_bit(MT76_REMOVED, &dev->mt76.state))
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ mt76x02u_stop_pre_tbtt_timer(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_exit_beacon_config);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index cd072ac614f7..ad5323447ed4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -132,7 +132,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
- INIT_DELAYED_WORK(&dev->mac_work, mt76x02_mac_work);
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt76x02_mac_work);
hw->queues = 4;
hw->max_rates = 1;
@@ -142,6 +142,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
@@ -158,7 +159,6 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
wiphy->reg_notifier = mt76x02_regd_notifier;
wiphy->iface_combinations = mt76x02_if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
- wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
/* init led callbacks */
@@ -378,7 +378,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
break;
case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = *ssn << 4;
+ mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
@@ -424,6 +424,16 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
+ /*
+ * In USB AP mode, broadcast/multicast frames are setup in beacon
+ * data registers and sent via HW beacons engine, they require to
+ * be already encrypted.
+ */
+ if (mt76_is_usb(dev) &&
+ vif->type == NL80211_IFTYPE_AP &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
msta = sta ? (struct mt76x02_sta *) sta->drv_priv : NULL;
wcid = msta ? &msta->wcid : &mvif->group_wcid;
@@ -465,7 +475,7 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
- qid = dev->mt76.q_tx[queue].hw_idx;
+ qid = dev->mt76.q_tx[queue].q->hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
@@ -562,26 +572,9 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
rate.idx = rates->rate[0].idx;
rate.flags = rates->rate[0].flags;
mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate);
- msta->wcid.max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, &rate);
}
EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update);
-int mt76x02_insert_hdr_pad(struct sk_buff *skb)
-{
- int len = ieee80211_get_hdrlen_from_skb(skb);
-
- if (len % 4 == 0)
- return 0;
-
- skb_push(skb, 2);
- memmove(skb->data, skb->data + 2, len);
-
- skb->data[len] = 0;
- skb->data[len + 1] = 0;
- return 2;
-}
-EXPORT_SYMBOL_GPL(mt76x02_insert_hdr_pad);
-
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
{
int hdrlen;
@@ -600,8 +593,6 @@ void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76x02_dev *dev = hw->priv;
- if (mt76_is_mmio(dev))
- tasklet_disable(&dev->pre_tbtt_tasklet);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
EXPORT_SYMBOL_GPL(mt76x02_sw_scan);
@@ -612,9 +603,6 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
- if (mt76_is_mmio(dev))
- tasklet_enable(&dev->pre_tbtt_tasklet);
-
if (dev->cal.gain_init_done) {
/* Restore AGC gain and resume calibration after scanning. */
dev->cal.low_gain = -1;
@@ -631,72 +619,11 @@ void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
- mt76x02_mac_wcid_set_drop(dev, idx, ps);
+ if (mt76_is_mmio(dev))
+ mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
-const u16 mt76x02_beacon_offsets[16] = {
- /* 1024 byte per beacon */
- 0xc000,
- 0xc400,
- 0xc800,
- 0xcc00,
- 0xd000,
- 0xd400,
- 0xd800,
- 0xdc00,
- /* BSS idx 8-15 not used for beacons */
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
-};
-
-static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
-{
- u16 val, base = MT_BEACON_BASE;
- u32 regs[4] = {};
- int i;
-
- for (i = 0; i < 16; i++) {
- val = mt76x02_beacon_offsets[i] - base;
- regs[i / 4] |= (val / 64) << (8 * (i % 4));
- }
-
- for (i = 0; i < 4; i++)
- mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
-}
-
-void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
-{
- int i;
-
- if (mt76_is_mmio(dev)) {
- /* Fire a pre-TBTT interrupt 8 ms before TBTT */
- mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
- 8 << 4);
- mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
- MT_DFS_GP_INTERVAL);
- mt76_wr(dev, MT_INT_TIMER_EN, 0);
- }
-
- mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
- MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_BEACON_TX));
- mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
- mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
-
- for (i = 0; i < 8; i++)
- mt76x02_mac_set_beacon(dev, i, NULL);
-
- mt76x02_set_beacon_offsets(dev);
-}
-EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
-
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -718,7 +645,7 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
- dev->beacon_int = info->beacon_int;
+ dev->mt76.beacon_int = info->beacon_int;
}
if (changed & BSS_CHANGED_BEACON_ENABLED)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index a30ef2c5a9db..c6078e90ca43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -165,27 +165,21 @@ void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_channel *chan;
struct mt76x2_tx_power_info txp;
struct mt76_rate_power t = {};
- int target_power;
int i;
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
mt76x2_get_power_info(dev, &txp, chan);
-
- target_power = max_t(int, (txp.chain[0].target_power +
- txp.chain[0].delta),
- (txp.chain[1].target_power +
- txp.chain[1].delta));
-
mt76x2_get_rate_power(dev, &t, chan);
chan->max_power = mt76x02_get_max_rate_power(&t) +
- target_power;
- chan->max_power /= 2;
+ txp.target_power;
+ chan->max_power = DIV_ROUND_UP(chan->max_power, 2);
/* convert to combined output power on 2x2 devices */
chan->max_power += 3;
+ chan->orig_mpwr = chan->max_power;
}
}
EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 6274655e1f7e..e84d5c5911ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -32,6 +32,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_aligned4_skbs = true,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
@@ -106,7 +107,7 @@ mt76pci_remove(struct pci_dev *pdev)
mt76_unregister_device(mdev);
mt76x2_cleanup(dev);
- ieee80211_free_hw(mdev->hw);
+ mt76_free_device(mdev);
}
MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index d3927a13e92e..71aea2832644 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -120,7 +120,7 @@ int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
mt76x02_mac_setaddr(dev, macaddr);
- mt76x02_init_beacon_config(dev);
+ mt76x02e_init_beacon_config(dev);
if (!hard)
return 0;
@@ -291,7 +291,7 @@ static int mt76x2_init_hardware(struct mt76x02_dev *dev)
void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
cancel_delayed_work_sync(&dev->wdt_work);
mt76x02_mcu_set_radio_state(dev, false);
mt76x2_mac_stop(dev, false);
@@ -300,7 +300,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
void mt76x2_cleanup(struct mt76x02_dev *dev)
{
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x2_stop_hardware(dev);
mt76x02_dma_cleanup(dev);
mt76x02_mcu_cleanup(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 878ce92405ed..e416eee6a306 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -22,26 +22,21 @@ mt76x2_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mt76.mutex);
-
ret = mt76x2_mac_start(dev);
if (ret)
- goto out;
+ return ret;
ret = mt76x2_phy_start(dev);
if (ret)
- goto out;
+ return ret;
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
MT_WATCHDOG_TIME);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-
-out:
- mutex_unlock(&dev->mt76.mutex);
- return ret;
+ return 0;
}
static void
@@ -49,10 +44,8 @@ mt76x2_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x2_stop_hardware(dev);
- mutex_unlock(&dev->mt76.mutex);
}
static int
@@ -66,7 +59,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_set_channel(&dev->mt76);
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
mt76x2_mac_stop(dev, true);
@@ -80,7 +73,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76x2_mac_resume(dev);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
- tasklet_enable(&dev->pre_tbtt_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
clear_bit(MT76_RESET, &dev->mt76.state);
@@ -135,12 +128,6 @@ mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
}
-static int
-mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
-{
- return 0;
-}
-
static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
u32 rx_ant)
{
@@ -197,7 +184,7 @@ const struct ieee80211_ops mt76x2_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x02_set_coverage_class,
.get_survey = mt76_get_survey,
- .set_tim = mt76x2_set_tim,
+ .set_tim = mt76_set_tim,
.set_antenna = mt76x2_set_antenna,
.get_antenna = mt76x2_get_antenna,
.set_rts_threshold = mt76x02_set_rts_threshold,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index 769a9b972044..cdedf95ca4f5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -161,12 +161,12 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
delta = txp.delta_bw80;
mt76x2_get_rate_power(dev, &t, chan);
- mt76x02_add_rate_power_offset(&t, txp.chain[0].target_power);
+ mt76x02_add_rate_power_offset(&t, txp.target_power + delta);
mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
base_power = mt76x2_get_min_rate_power(&t);
- delta += base_power - txp.chain[0].target_power;
+ delta = base_power - txp.target_power;
txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
@@ -182,7 +182,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
}
mt76x02_add_rate_power_offset(&t, -base_power);
- dev->target_power = txp.chain[0].target_power;
+ dev->target_power = txp.target_power;
dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
dev->mt76.rate_power = t;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index ac0f13d46299..7a994a783510 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -40,6 +40,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb,
+ .sta_ps = mt76x02_sta_ps,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
};
@@ -48,7 +49,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
struct mt76_dev *mdev;
int err;
- mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
+ mdev = mt76_alloc_device(&udev->dev, sizeof(*dev), &mt76x2u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
@@ -58,6 +59,8 @@ static int mt76x2u_probe(struct usb_interface *intf,
udev = usb_get_dev(udev);
usb_reset_device(udev);
+ usb_set_intfdata(intf, dev);
+
mt76x02u_init_mcu(mdev);
err = mt76u_init(mdev, intf);
if (err < 0)
@@ -104,8 +107,7 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
{
struct mt76x02_dev *dev = usb_get_intfdata(intf);
- mt76u_stop_queues(&dev->mt76);
- mt76x2u_stop_hw(dev);
+ mt76u_stop_rx(&dev->mt76);
return 0;
}
@@ -113,16 +115,12 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
{
struct mt76x02_dev *dev = usb_get_intfdata(intf);
- struct mt76_usb *usb = &dev->mt76.usb;
int err;
- err = mt76u_submit_rx_buffers(&dev->mt76);
+ err = mt76u_resume_rx(&dev->mt76);
if (err < 0)
goto err;
- tasklet_enable(&usb->rx_tasklet);
- tasklet_enable(&usb->tx_tasklet);
-
err = mt76x2u_init_hardware(dev);
if (err < 0)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 1da90e58d942..f2c57d5b87f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -183,7 +183,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
mt76x02_mac_shared_key_setup(dev, i, k, NULL);
}
- mt76x02_init_beacon_config(dev);
+ mt76x02u_init_beacon_config(dev);
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
@@ -244,9 +244,8 @@ fail:
void mt76x2u_stop_hw(struct mt76x02_dev *dev)
{
- mt76u_stop_stat_wk(&dev->mt76);
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
mt76x2u_mac_stop(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 2ac78e4dc41a..97bcf6494ec1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -21,29 +21,24 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mt76.mutex);
-
ret = mt76x2u_mac_start(dev);
if (ret)
- goto out;
+ return ret;
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-out:
- mutex_unlock(&dev->mt76.mutex);
- return ret;
+ return 0;
}
static void mt76x2u_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76u_stop_tx(&dev->mt76);
mt76x2u_stop_hw(dev);
- mutex_unlock(&dev->mt76.mutex);
}
static int
@@ -57,6 +52,8 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76_set_channel(&dev->mt76);
+ dev->beacon_ops->pre_tbtt_enable(dev, false);
+
mt76x2_mac_stop(dev, false);
err = mt76x2u_phy_set_channel(dev, chandef);
@@ -64,6 +61,8 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76x2_mac_resume(dev);
mt76x02_edcca_init(dev, true);
+ dev->beacon_ops->pre_tbtt_enable(dev, true);
+
clear_bit(MT76_RESET, &dev->mt76.state);
mt76_txq_schedule_all(&dev->mt76);
@@ -125,4 +124,6 @@ const struct ieee80211_ops mt76x2u_ops = {
.sw_scan_complete = mt76x02_sw_scan_complete,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.get_txpower = mt76_get_txpower,
+ .set_tim = mt76_set_tim,
+ .release_buffered_frames = mt76_release_buffered_frames,
};
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 2585df512335..5397827668b9 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -21,15 +21,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
dma_addr_t addr;
+ u8 *txwi;
int size;
- size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
- t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
- if (!t)
+ size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
+ txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ if (!txwi)
return NULL;
- addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
+ addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
+ t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;
return t;
@@ -72,13 +74,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
list_add(&t->list, &dev->txwi_cache);
spin_unlock_bh(&dev->lock);
}
+EXPORT_SYMBOL_GPL(mt76_put_txwi);
void mt76_tx_free(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
while ((t = __mt76_get_txwi(dev)) != NULL)
- dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
+ dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
}
@@ -266,7 +269,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
skb_set_queue_mapping(skb, qid);
}
- if (!wcid->tx_rate_set)
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
@@ -283,10 +286,10 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
mt76_check_agg_ssn(mtxq, skb);
}
- q = &dev->q_tx[qid];
+ q = dev->q_tx[qid].q;
spin_lock_bh(&q->lock);
- dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8 && !q->stopped) {
@@ -327,7 +330,6 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
{
struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
if (last)
@@ -335,7 +337,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
IEEE80211_TX_CTL_REQ_TX_STATUS;
mt76_skb_set_moredata(skb, !last);
- dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
}
void
@@ -346,7 +348,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
{
struct mt76_dev *dev = hw->priv;
struct sk_buff *last_skb = NULL;
- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+ struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
int i;
spin_lock_bh(&hwq->lock);
@@ -386,12 +388,14 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
struct mt76_txq *mtxq, bool *empty)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
- struct ieee80211_tx_info *info;
+ enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
+ struct mt76_queue *hwq = sq->q;
+ struct ieee80211_tx_info *info;
struct sk_buff *skb;
int n_frames = 1, limit;
struct ieee80211_tx_rate tx_rate;
@@ -411,7 +415,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
}
info = IEEE80211_SKB_CB(skb);
- if (!wcid->tx_rate_set)
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
tx_rate = info->control.rates[0];
@@ -423,7 +427,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
if (idx < 0)
return idx;
@@ -458,7 +462,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (cur_ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
txq->sta);
if (idx < 0)
return idx;
@@ -467,8 +471,9 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
} while (n_frames < limit);
if (!probe) {
- hwq->swq_queued++;
+ hwq->entry[idx].qid = sq - dev->q_tx;
hwq->entry[idx].schedule = true;
+ sq->swq_queued++;
}
dev->queue_ops->kick(dev, hwq);
@@ -477,22 +482,37 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
}
static int
-mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
+mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
{
- struct mt76_txq *mtxq, *mtxq_last;
- int len = 0;
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
+ struct mt76_queue *hwq = sq->q;
+ struct ieee80211_txq *txq;
+ struct mt76_txq *mtxq;
+ struct mt76_wcid *wcid;
+ int ret = 0;
-restart:
- mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
- while (!list_empty(&hwq->swq)) {
+ spin_lock_bh(&hwq->lock);
+ while (1) {
bool empty = false;
- int cur;
+
+ if (sq->swq_queued >= 4)
+ break;
if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
- test_bit(MT76_RESET, &dev->state))
- return -EBUSY;
+ test_bit(MT76_RESET, &dev->state)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ txq = ieee80211_next_txq(dev->hw, qid);
+ if (!txq)
+ break;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+ wcid = mtxq->wcid;
+ if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
+ continue;
- mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
@@ -504,38 +524,37 @@ restart:
spin_unlock_bh(&hwq->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
spin_lock_bh(&hwq->lock);
- goto restart;
}
- list_del_init(&mtxq->list);
-
- cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
- if (!empty)
- list_add_tail(&mtxq->list, &hwq->swq);
-
- if (cur < 0)
- return cur;
-
- len += cur;
-
- if (mtxq == mtxq_last)
- break;
+ ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
+ if (skb_queue_empty(&mtxq->retry_q))
+ empty = true;
+ ieee80211_return_txq(dev->hw, txq, !empty);
}
+ spin_unlock_bh(&hwq->lock);
- return len;
+ return ret;
}
-void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
+void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
{
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
int len;
+ if (qid >= 4)
+ return;
+
+ if (sq->swq_queued >= 4)
+ return;
+
rcu_read_lock();
- do {
- if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
- break;
- len = mt76_txq_schedule_list(dev, hwq);
+ do {
+ ieee80211_txq_schedule_start(dev->hw, qid);
+ len = mt76_txq_schedule_list(dev, qid);
+ ieee80211_txq_schedule_end(dev->hw, qid);
} while (len > 0);
+
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
@@ -544,13 +563,8 @@ void mt76_txq_schedule_all(struct mt76_dev *dev)
{
int i;
- for (i = 0; i <= MT_TXQ_BK; i++) {
- struct mt76_queue *q = &dev->q_tx[i];
-
- spin_lock_bh(&q->lock);
- mt76_txq_schedule(dev, q);
- spin_unlock_bh(&q->lock);
- }
+ for (i = 0; i <= MT_TXQ_BK; i++)
+ mt76_txq_schedule(dev, i);
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
@@ -561,18 +575,18 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_queue *hwq;
struct mt76_txq *mtxq;
if (!txq)
continue;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ hwq = mtxq->swq->q;
- spin_lock_bh(&mtxq->hwq->lock);
+ spin_lock_bh(&hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
- if (!list_empty(&mtxq->list))
- list_del_init(&mtxq->list);
- spin_unlock_bh(&mtxq->hwq->lock);
+ spin_unlock_bh(&hwq->lock);
}
}
EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
@@ -580,36 +594,23 @@ EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
struct mt76_dev *dev = hw->priv;
- struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
- struct mt76_queue *hwq = mtxq->hwq;
if (!test_bit(MT76_STATE_RUNNING, &dev->state))
return;
- spin_lock_bh(&hwq->lock);
- if (list_empty(&mtxq->list))
- list_add_tail(&mtxq->list, &hwq->swq);
- mt76_txq_schedule(dev, hwq);
- spin_unlock_bh(&hwq->lock);
+ tasklet_schedule(&dev->tx_tasklet);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
- struct mt76_queue *hwq;
struct sk_buff *skb;
if (!txq)
return;
mtxq = (struct mt76_txq *) txq->drv_priv;
- hwq = mtxq->hwq;
-
- spin_lock_bh(&hwq->lock);
- if (!list_empty(&mtxq->list))
- list_del_init(&mtxq->list);
- spin_unlock_bh(&hwq->lock);
while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
ieee80211_free_txskb(dev->hw, skb);
@@ -620,10 +621,9 @@ void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
- INIT_LIST_HEAD(&mtxq->list);
skb_queue_head_init(&mtxq->retry_q);
- mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
+ mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
}
EXPORT_SYMBOL_GPL(mt76_txq_init);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 4c1abd492405..bbaa1365bbda 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -31,8 +31,7 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_device *udev = to_usb_device(dev->dev);
unsigned int pipe;
int i, ret;
@@ -247,8 +246,7 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base,
static bool mt76u_check_sg(struct mt76_dev *dev)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_device *udev = to_usb_device(dev->dev);
return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
(udev->bus->no_sg_constraint ||
@@ -285,28 +283,24 @@ mt76u_set_endpoints(struct usb_interface *intf,
}
static int
-mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
- int nsgs, int len, int sglen)
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
+ int nsgs, gfp_t gfp)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- struct urb *urb = buf->urb;
int i;
- spin_lock_bh(&q->rx_page_lock);
for (i = 0; i < nsgs; i++) {
struct page *page;
void *data;
int offset;
- data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
if (!data)
break;
page = virt_to_head_page(data);
offset = data - page_address(page);
- sg_set_page(&urb->sg[i], page, sglen, offset);
+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
}
- spin_unlock_bh(&q->rx_page_lock);
if (i < nsgs) {
int j;
@@ -317,72 +311,78 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
}
urb->num_sgs = max_t(int, i, urb->num_sgs);
- buf->len = urb->num_sgs * sglen,
+ urb->transfer_buffer_length = urb->num_sgs * q->buf_size,
sg_init_marker(urb->sg, urb->num_sgs);
return i ? : -ENOMEM;
}
static int
-mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
- struct mt76u_buf *buf, int nsgs, gfp_t gfp)
+mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+
if (dev->usb.sg_en) {
- return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
- SKB_WITH_OVERHEAD(q->buf_size));
+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
} else {
- buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
- return buf->buf ? 0 : -ENOMEM;
+ urb->transfer_buffer_length = q->buf_size;
+ urb->transfer_buffer = page_frag_alloc(&q->rx_page,
+ q->buf_size, gfp);
+ return urb->transfer_buffer ? 0 : -ENOMEM;
}
}
static int
-mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf)
+mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned int size = sizeof(struct urb);
- buf->len = SKB_WITH_OVERHEAD(q->buf_size);
- buf->dev = dev;
+ if (dev->usb.sg_en)
+ size += MT_SG_MAX_SIZE * sizeof(struct scatterlist);
- buf->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!buf->urb)
+ e->urb = kzalloc(size, GFP_KERNEL);
+ if (!e->urb)
return -ENOMEM;
- if (dev->usb.sg_en) {
- buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
- sizeof(*buf->urb->sg),
- GFP_KERNEL);
- if (!buf->urb->sg)
- return -ENOMEM;
+ usb_init_urb(e->urb);
- sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE);
- }
+ if (dev->usb.sg_en)
+ e->urb->sg = (struct scatterlist *)(e->urb + 1);
- return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL);
+ return 0;
}
-static void mt76u_buf_free(struct mt76u_buf *buf)
+static int
+mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
+{
+ int err;
+
+ err = mt76u_urb_alloc(dev, e);
+ if (err)
+ return err;
+
+ return mt76u_refill_rx(dev, e->urb, MT_SG_MAX_SIZE, GFP_KERNEL);
+}
+
+static void mt76u_urb_free(struct urb *urb)
{
- struct urb *urb = buf->urb;
int i;
for (i = 0; i < urb->num_sgs; i++)
skb_free_frag(sg_virt(&urb->sg[i]));
- if (buf->buf)
- skb_free_frag(buf->buf);
+ if (urb->transfer_buffer)
+ skb_free_frag(urb->transfer_buffer);
- usb_free_urb(buf->urb);
+ usb_free_urb(urb);
}
static void
mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
- struct mt76u_buf *buf, usb_complete_t complete_fn,
+ struct urb *urb, usb_complete_t complete_fn,
void *context)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
- u8 *data = buf->urb->num_sgs ? NULL : buf->buf;
+ struct usb_device *udev = to_usb_device(dev->dev);
unsigned int pipe;
if (dir == USB_DIR_IN)
@@ -390,37 +390,28 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
else
pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
- usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
- complete_fn, context);
+ urb->dev = udev;
+ urb->pipe = pipe;
+ urb->complete = complete_fn;
+ urb->context = context;
}
-static int
-mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
- struct mt76u_buf *buf, gfp_t gfp,
- usb_complete_t complete_fn, void *context)
+static inline struct urb *
+mt76u_get_next_rx_entry(struct mt76_dev *dev)
{
- mt76u_fill_bulk_urb(dev, dir, index, buf, complete_fn,
- context);
- trace_submit_urb(dev, buf->urb);
-
- return usb_submit_urb(buf->urb, gfp);
-}
-
-static inline struct mt76u_buf
-*mt76u_get_next_rx_entry(struct mt76_queue *q)
-{
- struct mt76u_buf *buf = NULL;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct urb *urb = NULL;
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->queued > 0) {
- buf = &q->entry[q->head].ubuf;
+ urb = q->entry[q->head].urb;
q->head = (q->head + 1) % q->ndesc;
q->queued--;
}
spin_unlock_irqrestore(&q->lock, flags);
- return buf;
+ return urb;
}
static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
@@ -439,12 +430,12 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
}
static int
-mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- struct urb *urb = buf->urb;
- u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf;
- int data_len, len, nsgs = 1;
+ u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
+ int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
+ int len, nsgs = 1;
struct sk_buff *skb;
if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
@@ -454,10 +445,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
if (len < 0)
return 0;
- data_len = urb->num_sgs ? urb->sg[0].length : buf->len;
data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
- if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
+ if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) {
+ dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len);
return 0;
+ }
skb = build_skb(data, q->buf_size);
if (!skb)
@@ -503,7 +495,7 @@ static void mt76u_complete_rx(struct urb *urb)
}
spin_lock_irqsave(&q->lock, flags);
- if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
+ if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
goto out;
q->tail = (q->tail + 1) % q->ndesc;
@@ -513,37 +505,43 @@ out:
spin_unlock_irqrestore(&q->lock, flags);
}
+static int
+mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
+{
+ mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
+ mt76u_complete_rx, dev);
+ trace_submit_urb(dev, urb);
+
+ return usb_submit_urb(urb, GFP_ATOMIC);
+}
+
static void mt76u_rx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- struct mt76u_buf *buf;
+ struct urb *urb;
int err, count;
rcu_read_lock();
while (true) {
- buf = mt76u_get_next_rx_entry(q);
- if (!buf)
+ urb = mt76u_get_next_rx_entry(dev);
+ if (!urb)
break;
- count = mt76u_process_rx_entry(dev, buf);
+ count = mt76u_process_rx_entry(dev, urb);
if (count > 0) {
- err = mt76u_refill_rx(dev, q, buf, count,
- GFP_ATOMIC);
+ err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
if (err < 0)
break;
}
- mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
- buf, GFP_ATOMIC,
- mt76u_complete_rx, dev);
+ mt76u_submit_rx_buf(dev, urb);
}
mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
rcu_read_unlock();
}
-int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
unsigned long flags;
@@ -551,9 +549,7 @@ int mt76u_submit_rx_buffers(struct mt76_dev *dev)
spin_lock_irqsave(&q->lock, flags);
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
- &q->entry[i].ubuf, GFP_ATOMIC,
- mt76u_complete_rx, dev);
+ err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
if (err < 0)
break;
}
@@ -563,7 +559,6 @@ int mt76u_submit_rx_buffers(struct mt76_dev *dev)
return err;
}
-EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
static int mt76u_alloc_rx(struct mt76_dev *dev)
{
@@ -575,7 +570,6 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
if (!usb->mcu.data)
return -ENOMEM;
- spin_lock_init(&q->rx_page_lock);
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
@@ -586,7 +580,7 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
q->ndesc = MT_NUM_RX_ENTRIES;
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_buf_alloc(dev, &q->entry[i].ubuf);
+ err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
if (err < 0)
return err;
}
@@ -601,60 +595,76 @@ static void mt76u_free_rx(struct mt76_dev *dev)
int i;
for (i = 0; i < q->ndesc; i++)
- mt76u_buf_free(&q->entry[i].ubuf);
+ mt76u_urb_free(q->entry[i].urb);
- spin_lock_bh(&q->rx_page_lock);
if (!q->rx_page.va)
- goto out;
+ return;
page = virt_to_page(q->rx_page.va);
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
memset(&q->rx_page, 0, sizeof(q->rx_page));
-out:
- spin_unlock_bh(&q->rx_page_lock);
}
-static void mt76u_stop_rx(struct mt76_dev *dev)
+void mt76u_stop_rx(struct mt76_dev *dev)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int i;
for (i = 0; i < q->ndesc; i++)
- usb_kill_urb(q->entry[i].ubuf.urb);
+ usb_poison_urb(q->entry[i].urb);
+
+ tasklet_kill(&dev->usb.rx_tasklet);
}
+EXPORT_SYMBOL_GPL(mt76u_stop_rx);
+
+int mt76u_resume_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ usb_unpoison_urb(q->entry[i].urb);
+
+ return mt76u_submit_rx_buffers(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_resume_rx);
static void mt76u_tx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
struct mt76_queue_entry entry;
- struct mt76u_buf *buf;
+ struct mt76_sw_queue *sq;
struct mt76_queue *q;
bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ u32 n_dequeued = 0, n_sw_dequeued = 0;
- spin_lock_bh(&q->lock);
- while (true) {
- buf = &q->entry[q->head].ubuf;
- if (!buf->done || !q->queued)
+ sq = &dev->q_tx[i];
+ q = sq->q;
+
+ while (q->queued > n_dequeued) {
+ if (!q->entry[q->head].done)
break;
if (q->entry[q->head].schedule) {
q->entry[q->head].schedule = false;
- q->swq_queued--;
+ n_sw_dequeued++;
}
entry = q->entry[q->head];
+ q->entry[q->head].done = false;
q->head = (q->head + 1) % q->ndesc;
- q->queued--;
+ n_dequeued++;
- spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, false);
- spin_lock_bh(&q->lock);
+ dev->drv->tx_complete_skb(dev, i, &entry);
}
- mt76_txq_schedule(dev, q);
+
+ spin_lock_bh(&q->lock);
+
+ sq->swq_queued -= n_sw_dequeued;
+ q->queued -= n_dequeued;
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
@@ -665,6 +675,8 @@ static void mt76u_tx_tasklet(unsigned long data)
spin_unlock_bh(&q->lock);
+ mt76_txq_schedule(dev, i);
+
if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
ieee80211_queue_delayed_work(dev->hw,
&dev->usb.stat_work,
@@ -703,34 +715,43 @@ static void mt76u_tx_status_data(struct work_struct *work)
static void mt76u_complete_tx(struct urb *urb)
{
- struct mt76u_buf *buf = urb->context;
- struct mt76_dev *dev = buf->dev;
+ struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+ struct mt76_queue_entry *e = urb->context;
if (mt76u_urb_error(urb))
dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
- buf->done = true;
+ e->done = true;
- tasklet_schedule(&dev->usb.tx_tasklet);
+ tasklet_schedule(&dev->tx_tasklet);
}
static int
-mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb,
- struct urb *urb)
+mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
+ struct urb *urb)
{
- if (!dev->usb.sg_en)
- return 0;
+ urb->transfer_buffer_length = skb->len;
- sg_init_table(urb->sg, MT_SG_MAX_SIZE);
- urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
- return urb->num_sgs;
+ if (!dev->usb.sg_en) {
+ urb->transfer_buffer = skb->data;
+ return 0;
+ } else {
+ sg_init_table(urb->sg, MT_SG_MAX_SIZE);
+ urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
+ if (urb->num_sgs == 0)
+ return -ENOMEM;
+ return urb->num_sgs;
+ }
}
static int
-mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76u_buf *buf;
+ struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
u16 idx = q->tail;
int err;
@@ -738,24 +759,20 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return -ENOSPC;
skb->prev = skb->next = NULL;
- err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
+ err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
if (err < 0)
return err;
- buf = &q->entry[idx].ubuf;
- buf->buf = skb->data;
- buf->len = skb->len;
- buf->done = false;
-
- err = mt76u_tx_build_sg(dev, skb, buf->urb);
+ err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
if (err < 0)
return err;
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
- buf, mt76u_complete_tx, buf);
+ q->entry[idx].urb, mt76u_complete_tx,
+ &q->entry[idx]);
q->tail = (q->tail + 1) % q->ndesc;
- q->entry[idx].skb = skb;
+ q->entry[idx].skb = tx_info.skb;
q->queued++;
return idx;
@@ -763,14 +780,14 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct mt76u_buf *buf;
+ struct urb *urb;
int err;
while (q->first != q->tail) {
- buf = &q->entry[q->first].ubuf;
+ urb = q->entry[q->first].urb;
- trace_submit_urb(dev, buf->urb);
- err = usb_submit_urb(buf->urb, GFP_ATOMIC);
+ trace_submit_urb(dev, urb);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
if (err == -ENODEV)
set_bit(MT76_REMOVED, &dev->state);
@@ -785,15 +802,24 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
- struct mt76u_buf *buf;
struct mt76_queue *q;
- int i, j;
+ int i, j, err;
+
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
+ INIT_LIST_HEAD(&dev->q_tx[i].swq);
+
+ if (i >= IEEE80211_NUM_ACS) {
+ dev->q_tx[i].q = dev->q_tx[0].q;
+ continue;
+ }
+
+ q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->swq);
q->hw_idx = mt76_ac_to_hwq(i);
+ dev->q_tx[i].q = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
@@ -803,22 +829,9 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
q->ndesc = MT_NUM_TX_ENTRIES;
for (j = 0; j < q->ndesc; j++) {
- buf = &q->entry[j].ubuf;
- buf->dev = dev;
-
- buf->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!buf->urb)
- return -ENOMEM;
-
- if (dev->usb.sg_en) {
- size_t size = MT_SG_MAX_SIZE *
- sizeof(struct scatterlist);
-
- buf->urb->sg = devm_kzalloc(dev->dev, size,
- GFP_KERNEL);
- if (!buf->urb->sg)
- return -ENOMEM;
- }
+ err = mt76u_urb_alloc(dev, &q->entry[j]);
+ if (err < 0)
+ return err;
}
}
return 0;
@@ -830,44 +843,60 @@ static void mt76u_free_tx(struct mt76_dev *dev)
int i, j;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ q = dev->q_tx[i].q;
for (j = 0; j < q->ndesc; j++)
- usb_free_urb(q->entry[j].ubuf.urb);
+ usb_free_urb(q->entry[j].urb);
}
}
-static void mt76u_stop_tx(struct mt76_dev *dev)
+void mt76u_stop_tx(struct mt76_dev *dev)
{
+ struct mt76_queue_entry entry;
struct mt76_queue *q;
- int i, j;
+ int i, j, ret;
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
- for (j = 0; j < q->ndesc; j++)
- usb_kill_urb(q->entry[j].ubuf.urb);
- }
-}
+ ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), HZ/5);
+ if (!ret) {
+ dev_err(dev->dev, "timed out waiting for pending tx\n");
-void mt76u_stop_queues(struct mt76_dev *dev)
-{
- tasklet_disable(&dev->usb.rx_tasklet);
- tasklet_disable(&dev->usb.tx_tasklet);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->q_tx[i].q;
+ for (j = 0; j < q->ndesc; j++)
+ usb_kill_urb(q->entry[j].urb);
+ }
- mt76u_stop_rx(dev);
- mt76u_stop_tx(dev);
-}
-EXPORT_SYMBOL_GPL(mt76u_stop_queues);
+ tasklet_kill(&dev->tx_tasklet);
+
+ /* On device removal we maight queue skb's, but mt76u_tx_kick()
+ * will fail to submit urb, cleanup those skb's manually.
+ */
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->q_tx[i].q;
+
+ /* Assure we are in sync with killed tasklet. */
+ spin_lock_bh(&q->lock);
+ while (q->queued) {
+ entry = q->entry[q->head];
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+
+ dev->drv->tx_complete_skb(dev, i, &entry);
+ }
+ spin_unlock_bh(&q->lock);
+ }
+ }
-void mt76u_stop_stat_wk(struct mt76_dev *dev)
-{
cancel_delayed_work_sync(&dev->usb.stat_work);
clear_bit(MT76_READING_STATS, &dev->state);
+
+ mt76_tx_status_check(dev, NULL, true);
}
-EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
+EXPORT_SYMBOL_GPL(mt76u_stop_tx);
void mt76u_queues_deinit(struct mt76_dev *dev)
{
- mt76u_stop_queues(dev);
+ mt76u_stop_rx(dev);
+ mt76u_stop_tx(dev);
mt76u_free_rx(dev);
mt76u_free_tx(dev);
@@ -906,7 +935,7 @@ int mt76u_init(struct mt76_dev *dev,
struct mt76_usb *usb = &dev->usb;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
- tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
+ tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 14b569b6d1b5..7cea08f71838 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -13,12 +13,11 @@
#define QTNF_MAX_MAC 3
enum qtnf_fw_state {
- QTNF_FW_STATE_RESET,
- QTNF_FW_STATE_FW_DNLD_DONE,
+ QTNF_FW_STATE_DETACHED,
QTNF_FW_STATE_BOOT_DONE,
QTNF_FW_STATE_ACTIVE,
- QTNF_FW_STATE_DETACHED,
- QTNF_FW_STATE_EP_DEAD,
+ QTNF_FW_STATE_RUNNING,
+ QTNF_FW_STATE_DEAD,
};
struct qtnf_bus;
@@ -50,6 +49,7 @@ struct qtnf_bus {
struct napi_struct mux_napi;
struct net_device mux_dev;
struct workqueue_struct *workqueue;
+ struct workqueue_struct *hprio_workqueue;
struct work_struct fw_work;
struct work_struct event_work;
struct mutex bus_lock; /* lock during command/event processing */
@@ -58,6 +58,23 @@ struct qtnf_bus {
char bus_priv[0] __aligned(sizeof(void *));
};
+static inline bool qtnf_fw_is_up(struct qtnf_bus *bus)
+{
+ enum qtnf_fw_state state = bus->fw_state;
+
+ return ((state == QTNF_FW_STATE_ACTIVE) ||
+ (state == QTNF_FW_STATE_RUNNING));
+}
+
+static inline bool qtnf_fw_is_attached(struct qtnf_bus *bus)
+{
+ enum qtnf_fw_state state = bus->fw_state;
+
+ return ((state == QTNF_FW_STATE_ACTIVE) ||
+ (state == QTNF_FW_STATE_RUNNING) ||
+ (state == QTNF_FW_STATE_DEAD));
+}
+
static inline void *get_bus_priv(struct qtnf_bus *bus)
{
if (WARN(!bus, "qtnfmac: invalid bus pointer"))
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index dcb0991432f4..d90016125dfc 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -144,6 +144,7 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct net_device *netdev = wdev->netdev;
struct qtnf_vif *vif;
+ struct sk_buff *skb;
if (WARN_ON(!netdev))
return -EFAULT;
@@ -157,6 +158,11 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
if (netif_carrier_ok(netdev))
netif_carrier_off(netdev);
+ while ((skb = skb_dequeue(&vif->high_pri_tx_queue)))
+ dev_kfree_skb_any(skb);
+
+ cancel_work_sync(&vif->high_pri_tx_work);
+
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdevice(netdev);
@@ -424,13 +430,13 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
*cookie = short_cookie;
if (params->offchan)
- flags |= QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN;
+ flags |= QLINK_FRAME_TX_FLAG_OFFCHAN;
if (params->no_cck)
- flags |= QLINK_MGMT_FRAME_TX_FLAG_NO_CCK;
+ flags |= QLINK_FRAME_TX_FLAG_NO_CCK;
if (params->dont_wait_for_ack)
- flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT;
+ flags |= QLINK_FRAME_TX_FLAG_ACK_NOWAIT;
/* If channel is not specified, pass "freq = 0" to tell device
* firmware to use current channel.
@@ -445,9 +451,8 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
params->len, short_cookie, flags);
- return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags,
- freq,
- params->buf, params->len);
+ return qtnf_cmd_send_frame(vif, short_cookie, flags,
+ freq, params->buf, params->len);
}
static int
@@ -993,53 +998,31 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
#endif
};
-static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
+static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
{
- struct qtnf_wmac *mac = wiphy_priv(wiphy_in);
- struct qtnf_bus *bus = mac->bus;
- struct wiphy *wiphy;
- unsigned int mac_idx;
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
enum nl80211_band band;
int ret;
pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator,
req->alpha2[0], req->alpha2[1]);
- ret = qtnf_cmd_reg_notify(bus, req);
+ ret = qtnf_cmd_reg_notify(mac, req, qtnf_mac_slave_radar_get(wiphy));
if (ret) {
- if (ret == -EOPNOTSUPP) {
- pr_warn("reg update not supported\n");
- } else if (ret == -EALREADY) {
- pr_info("regulatory domain is already set to %c%c",
- req->alpha2[0], req->alpha2[1]);
- } else {
- pr_err("failed to update reg domain to %c%c\n",
- req->alpha2[0], req->alpha2[1]);
- }
-
+ pr_err("MAC%u: failed to update region to %c%c: %d\n",
+ mac->macid, req->alpha2[0], req->alpha2[1], ret);
return;
}
- for (mac_idx = 0; mac_idx < QTNF_MAX_MAC; ++mac_idx) {
- if (!(bus->hw_info.mac_bitmap & (1 << mac_idx)))
+ for (band = 0; band < NUM_NL80211_BANDS; ++band) {
+ if (!wiphy->bands[band])
continue;
- mac = bus->mac[mac_idx];
- if (!mac)
- continue;
-
- wiphy = priv_to_wiphy(mac);
-
- for (band = 0; band < NUM_NL80211_BANDS; ++band) {
- if (!wiphy->bands[band])
- continue;
-
- ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]);
- if (ret)
- pr_err("failed to get chan info for mac %u band %u\n",
- mac_idx, band);
- }
+ ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]);
+ if (ret)
+ pr_err("MAC%u: failed to update band %u\n",
+ mac->macid, band);
}
}
@@ -1095,6 +1078,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
struct wiphy *wiphy = priv_to_wiphy(mac);
struct qtnf_mac_info *macinfo = &mac->macinfo;
int ret;
+ bool regdomain_is_known;
if (!wiphy) {
pr_err("invalid wiphy pointer\n");
@@ -1127,7 +1111,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
- WIPHY_FLAG_4ADDR_STATION;
+ WIPHY_FLAG_4ADDR_STATION |
+ WIPHY_FLAG_NETNS_OK;
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
@@ -1166,11 +1151,19 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->wowlan = macinfo->wowlan;
#endif
+ regdomain_is_known = isalpha(mac->rd->alpha2[0]) &&
+ isalpha(mac->rd->alpha2[1]);
+
if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
- wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
- REGULATORY_CUSTOM_REG;
wiphy->reg_notifier = qtnf_cfg80211_reg_notifier;
- wiphy_apply_custom_regulatory(wiphy, hw_info->rd);
+
+ if (mac->rd->alpha2[0] == '9' && mac->rd->alpha2[1] == '9') {
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_STRICT_REG;
+ wiphy_apply_custom_regulatory(wiphy, mac->rd);
+ } else if (regdomain_is_known) {
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+ }
} else {
wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
}
@@ -1193,10 +1186,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
goto out;
if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
- ret = regulatory_set_wiphy_regd(wiphy, hw_info->rd);
- else if (isalpha(hw_info->rd->alpha2[0]) &&
- isalpha(hw_info->rd->alpha2[1]))
- ret = regulatory_hint(wiphy, hw_info->rd->alpha2);
+ ret = regulatory_set_wiphy_regd(wiphy, mac->rd);
+ else if (regdomain_is_known)
+ ret = regulatory_hint(wiphy, mac->rd->alpha2);
out:
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 85a2a58f4c16..459f6b81d2eb 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -11,6 +11,13 @@
#include "bus.h"
#include "commands.h"
+#define QTNF_SCAN_TIME_AUTO 0
+
+/* Let device itself to select best values for current conditions */
+#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT QTNF_SCAN_TIME_AUTO
+#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT QTNF_SCAN_TIME_AUTO
+#define QTNF_SCAN_SAMPLE_DURATION_DEFAULT QTNF_SCAN_TIME_AUTO
+
static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp,
u16 cmd_id, u8 mac_id, u8 vif_id,
size_t resp_size)
@@ -89,8 +96,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, cmd_id);
- if (bus->fw_state != QTNF_FW_STATE_ACTIVE &&
- cmd_id != QLINK_CMD_FW_INIT) {
+ if (!qtnf_fw_is_up(bus) && cmd_id != QLINK_CMD_FW_INIT) {
pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n",
mac_id, vif_id, cmd_id, bus->fw_state);
dev_kfree_skb(cmd_skb);
@@ -177,14 +183,6 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
memcpy(tlv->ie_data, buf, len);
}
-static inline size_t qtnf_cmd_acl_data_size(const struct cfg80211_acl_data *acl)
-{
- size_t size = sizeof(struct qlink_acl_data) +
- acl->n_acl_entries * sizeof(struct qlink_mac_address);
-
- return size;
-}
-
static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
const struct cfg80211_ap_settings *s)
{
@@ -203,7 +201,7 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
if (s->acl)
len += sizeof(struct qlink_tlv_hdr) +
- qtnf_cmd_acl_data_size(s->acl);
+ struct_size(s->acl, mac_addrs, s->acl->n_acl_entries);
if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
pr_err("VIF%u.%u: can not fit AP settings: %u\n",
@@ -310,7 +308,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
}
if (s->acl) {
- size_t acl_size = qtnf_cmd_acl_data_size(s->acl);
+ size_t acl_size = struct_size(s->acl, mac_addrs,
+ s->acl->n_acl_entries);
struct qlink_tlv_hdr *tlv =
skb_put(cmd_skb, sizeof(*tlv) + acl_size);
@@ -382,11 +381,11 @@ out:
return ret;
}
-int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
- u16 freq, const u8 *buf, size_t len)
+int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
+ u16 freq, const u8 *buf, size_t len)
{
struct sk_buff *cmd_skb;
- struct qlink_cmd_mgmt_frame_tx *cmd;
+ struct qlink_cmd_frame_tx *cmd;
int ret;
if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) {
@@ -396,14 +395,14 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
}
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
- QLINK_CMD_SEND_MGMT_FRAME,
+ QLINK_CMD_SEND_FRAME,
sizeof(*cmd));
if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
- cmd = (struct qlink_cmd_mgmt_frame_tx *)cmd_skb->data;
+ cmd = (struct qlink_cmd_frame_tx *)cmd_skb->data;
cmd->cookie = cpu_to_le32(cookie);
cmd->freq = cpu_to_le16(freq);
cmd->flags = cpu_to_le16(flags);
@@ -786,8 +785,25 @@ int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
int use4addr,
u8 *mac_addr)
{
- return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
- QLINK_CMD_CHANGE_INTF);
+ int ret;
+
+ ret = qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
+ QLINK_CMD_CHANGE_INTF);
+
+ /* Regulatory settings may be different for different interface types */
+ if (ret == 0 && vif->wdev.iftype != iftype) {
+ enum nl80211_band band;
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+
+ for (band = 0; band < NUM_NL80211_BANDS; ++band) {
+ if (!wiphy->bands[band])
+ continue;
+
+ qtnf_cmd_band_info_get(vif->mac, wiphy->bands[band]);
+ }
+ }
+
+ return ret;
}
int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
@@ -831,55 +847,6 @@ out:
return ret;
}
-static u32 qtnf_cmd_resp_reg_rule_flags_parse(u32 qflags)
-{
- u32 flags = 0;
-
- if (qflags & QLINK_RRF_NO_OFDM)
- flags |= NL80211_RRF_NO_OFDM;
-
- if (qflags & QLINK_RRF_NO_CCK)
- flags |= NL80211_RRF_NO_CCK;
-
- if (qflags & QLINK_RRF_NO_INDOOR)
- flags |= NL80211_RRF_NO_INDOOR;
-
- if (qflags & QLINK_RRF_NO_OUTDOOR)
- flags |= NL80211_RRF_NO_OUTDOOR;
-
- if (qflags & QLINK_RRF_DFS)
- flags |= NL80211_RRF_DFS;
-
- if (qflags & QLINK_RRF_PTP_ONLY)
- flags |= NL80211_RRF_PTP_ONLY;
-
- if (qflags & QLINK_RRF_PTMP_ONLY)
- flags |= NL80211_RRF_PTMP_ONLY;
-
- if (qflags & QLINK_RRF_NO_IR)
- flags |= NL80211_RRF_NO_IR;
-
- if (qflags & QLINK_RRF_AUTO_BW)
- flags |= NL80211_RRF_AUTO_BW;
-
- if (qflags & QLINK_RRF_IR_CONCURRENT)
- flags |= NL80211_RRF_IR_CONCURRENT;
-
- if (qflags & QLINK_RRF_NO_HT40MINUS)
- flags |= NL80211_RRF_NO_HT40MINUS;
-
- if (qflags & QLINK_RRF_NO_HT40PLUS)
- flags |= NL80211_RRF_NO_HT40PLUS;
-
- if (qflags & QLINK_RRF_NO_80MHZ)
- flags |= NL80211_RRF_NO_80MHZ;
-
- if (qflags & QLINK_RRF_NO_160MHZ)
- flags |= NL80211_RRF_NO_160MHZ;
-
- return flags;
-}
-
static int
qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
const struct qlink_resp_get_hw_info *resp,
@@ -887,7 +854,6 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
{
struct qtnf_hw_info *hwinfo = &bus->hw_info;
const struct qlink_tlv_hdr *tlv;
- const struct qlink_tlv_reg_rule *tlv_rule;
const char *bld_name = NULL;
const char *bld_rev = NULL;
const char *bld_type = NULL;
@@ -898,19 +864,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
const char *calibration_ver = NULL;
const char *uboot_ver = NULL;
u32 hw_ver = 0;
- struct ieee80211_reg_rule *rule;
u16 tlv_type;
u16 tlv_value_len;
- unsigned int rule_idx = 0;
-
- if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
- return -E2BIG;
-
- hwinfo->rd = kzalloc(struct_size(hwinfo->rd, reg_rules,
- resp->n_reg_rules), GFP_KERNEL);
-
- if (!hwinfo->rd)
- return -ENOMEM;
hwinfo->num_mac = resp->num_mac;
hwinfo->mac_bitmap = resp->mac_bitmap;
@@ -919,30 +874,11 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
hwinfo->total_tx_chain = resp->total_tx_chain;
hwinfo->total_rx_chain = resp->total_rx_chain;
hwinfo->hw_capab = le32_to_cpu(resp->hw_capab);
- hwinfo->rd->n_reg_rules = resp->n_reg_rules;
- hwinfo->rd->alpha2[0] = resp->alpha2[0];
- hwinfo->rd->alpha2[1] = resp->alpha2[1];
bld_tmstamp = le32_to_cpu(resp->bld_tmstamp);
plat_id = le32_to_cpu(resp->plat_id);
hw_ver = le32_to_cpu(resp->hw_ver);
- switch (resp->dfs_region) {
- case QLINK_DFS_FCC:
- hwinfo->rd->dfs_region = NL80211_DFS_FCC;
- break;
- case QLINK_DFS_ETSI:
- hwinfo->rd->dfs_region = NL80211_DFS_ETSI;
- break;
- case QLINK_DFS_JP:
- hwinfo->rd->dfs_region = NL80211_DFS_JP;
- break;
- case QLINK_DFS_UNSET:
- default:
- hwinfo->rd->dfs_region = NL80211_DFS_UNSET;
- break;
- }
-
tlv = (const struct qlink_tlv_hdr *)resp->info;
while (info_len >= sizeof(*tlv)) {
@@ -956,37 +892,6 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
}
switch (tlv_type) {
- case QTN_TLV_ID_REG_RULE:
- if (rule_idx >= resp->n_reg_rules) {
- pr_warn("unexpected number of rules: %u\n",
- resp->n_reg_rules);
- return -EINVAL;
- }
-
- if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
-
- tlv_rule = (const struct qlink_tlv_reg_rule *)tlv;
- rule = &hwinfo->rd->reg_rules[rule_idx++];
-
- rule->freq_range.start_freq_khz =
- le32_to_cpu(tlv_rule->start_freq_khz);
- rule->freq_range.end_freq_khz =
- le32_to_cpu(tlv_rule->end_freq_khz);
- rule->freq_range.max_bandwidth_khz =
- le32_to_cpu(tlv_rule->max_bandwidth_khz);
- rule->power_rule.max_antenna_gain =
- le32_to_cpu(tlv_rule->max_antenna_gain);
- rule->power_rule.max_eirp =
- le32_to_cpu(tlv_rule->max_eirp);
- rule->dfs_cac_ms =
- le32_to_cpu(tlv_rule->dfs_cac_ms);
- rule->flags = qtnf_cmd_resp_reg_rule_flags_parse(
- le32_to_cpu(tlv_rule->flags));
- break;
case QTN_TLV_ID_BUILD_NAME:
bld_name = (const void *)tlv->val;
break;
@@ -1019,17 +924,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (rule_idx != resp->n_reg_rules) {
- pr_warn("unexpected number of rules: expected %u got %u\n",
- resp->n_reg_rules, rule_idx);
- kfree(hwinfo->rd);
- hwinfo->rd = NULL;
- return -EINVAL;
- }
-
- pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u, capab=0x%x\n",
+ pr_info("fw_version=%d, MACs map %#x, chains Tx=%u Rx=%u, capab=0x%x\n",
hwinfo->fw_ver, hwinfo->mac_bitmap,
- hwinfo->rd->alpha2[0], hwinfo->rd->alpha2[1],
hwinfo->total_tx_chain, hwinfo->total_rx_chain,
hwinfo->hw_capab);
@@ -1042,7 +938,7 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
"\nHardware ID: %s" \
"\nCalibration version: %s" \
"\nU-Boot version: %s" \
- "\nHardware version: 0x%08x",
+ "\nHardware version: 0x%08x\n",
bld_name, bld_rev, bld_type, bld_label,
(unsigned long)bld_tmstamp,
(unsigned long)plat_id,
@@ -1085,9 +981,12 @@ qtnf_parse_wowlan_info(struct qtnf_wmac *mac,
}
}
-static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
- const u8 *tlv_buf, size_t tlv_buf_size)
+static int
+qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
+ const struct qlink_resp_get_mac_info *resp,
+ size_t tlv_buf_size)
{
+ const u8 *tlv_buf = resp->var_info;
struct ieee80211_iface_combination *comb = NULL;
size_t n_comb = 0;
struct ieee80211_iface_limit *limits;
@@ -1105,6 +1004,38 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
u8 ext_capa_len = 0;
u8 ext_capa_mask_len = 0;
int i = 0;
+ struct ieee80211_reg_rule *rule;
+ unsigned int rule_idx = 0;
+ const struct qlink_tlv_reg_rule *tlv_rule;
+
+ if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
+ return -E2BIG;
+
+ mac->rd = kzalloc(sizeof(*mac->rd) +
+ sizeof(struct ieee80211_reg_rule) *
+ resp->n_reg_rules, GFP_KERNEL);
+ if (!mac->rd)
+ return -ENOMEM;
+
+ mac->rd->n_reg_rules = resp->n_reg_rules;
+ mac->rd->alpha2[0] = resp->alpha2[0];
+ mac->rd->alpha2[1] = resp->alpha2[1];
+
+ switch (resp->dfs_region) {
+ case QLINK_DFS_FCC:
+ mac->rd->dfs_region = NL80211_DFS_FCC;
+ break;
+ case QLINK_DFS_ETSI:
+ mac->rd->dfs_region = NL80211_DFS_ETSI;
+ break;
+ case QLINK_DFS_JP:
+ mac->rd->dfs_region = NL80211_DFS_JP;
+ break;
+ case QLINK_DFS_UNSET:
+ default:
+ mac->rd->dfs_region = NL80211_DFS_UNSET;
+ break;
+ }
tlv = (const struct qlink_tlv_hdr *)tlv_buf;
while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
@@ -1225,6 +1156,23 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
mac->macinfo.wowlan = NULL;
qtnf_parse_wowlan_info(mac, wowlan);
break;
+ case QTN_TLV_ID_REG_RULE:
+ if (rule_idx >= resp->n_reg_rules) {
+ pr_warn("unexpected number of rules: %u\n",
+ resp->n_reg_rules);
+ return -EINVAL;
+ }
+
+ if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) {
+ pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
+ tlv_type, tlv_value_len);
+ return -EINVAL;
+ }
+
+ tlv_rule = (const struct qlink_tlv_reg_rule *)tlv;
+ rule = &mac->rd->reg_rules[rule_idx++];
+ qlink_utils_regrule_q2nl(rule, tlv_rule);
+ break;
default:
pr_warn("MAC%u: unknown TLV type %u\n",
mac->macid, tlv_type);
@@ -1253,6 +1201,12 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
return -EINVAL;
}
+ if (rule_idx != resp->n_reg_rules) {
+ pr_warn("unexpected number of rules: expected %u got %u\n",
+ resp->n_reg_rules, rule_idx);
+ return -EINVAL;
+ }
+
if (ext_capa_len > 0) {
ext_capa = kmemdup(ext_capa, ext_capa_len, GFP_KERNEL);
if (!ext_capa)
@@ -1663,7 +1617,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
qtnf_cmd_resp_proc_mac_info(mac, resp);
- ret = qtnf_parse_variable_mac_info(mac, resp->var_info, var_data_len);
+ ret = qtnf_parse_variable_mac_info(mac, resp, var_data_len);
out:
qtnf_bus_unlock(mac->bus);
@@ -1709,21 +1663,7 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
struct qlink_resp_band_info_get *resp;
size_t info_len = 0;
int ret = 0;
- u8 qband;
-
- switch (band->band) {
- case NL80211_BAND_2GHZ:
- qband = QLINK_BAND_2GHZ;
- break;
- case NL80211_BAND_5GHZ:
- qband = QLINK_BAND_5GHZ;
- break;
- case NL80211_BAND_60GHZ:
- qband = QLINK_BAND_60GHZ;
- break;
- default:
- return -EINVAL;
- }
+ u8 qband = qlink_utils_band_cfg2q(band->band);
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
QLINK_CMD_BAND_INFO_GET,
@@ -2107,22 +2047,23 @@ out:
static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
const struct ieee80211_channel *sc)
{
- struct qlink_tlv_channel *qchan;
- u32 flags = 0;
-
- qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
- qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
- qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr));
- qchan->chan.center_freq = cpu_to_le16(sc->center_freq);
- qchan->chan.hw_value = cpu_to_le16(sc->hw_value);
-
- if (sc->flags & IEEE80211_CHAN_NO_IR)
- flags |= QLINK_CHAN_NO_IR;
-
- if (sc->flags & IEEE80211_CHAN_RADAR)
- flags |= QLINK_CHAN_RADAR;
-
- qchan->chan.flags = cpu_to_le32(flags);
+ struct qlink_tlv_channel *tlv;
+ struct qlink_channel *qch;
+
+ tlv = skb_put_zero(cmd_skb, sizeof(*tlv));
+ qch = &tlv->chan;
+ tlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
+ tlv->hdr.len = cpu_to_le16(sizeof(*qch));
+
+ qch->center_freq = cpu_to_le16(sc->center_freq);
+ qch->hw_value = cpu_to_le16(sc->hw_value);
+ qch->band = qlink_utils_band_cfg2q(sc->band);
+ qch->max_power = sc->max_power;
+ qch->max_reg_power = sc->max_reg_power;
+ qch->max_antenna_gain = sc->max_antenna_gain;
+ qch->beacon_found = sc->beacon_found;
+ qch->dfs_state = qlink_utils_dfs_state_cfg2q(sc->dfs_state);
+ qch->flags = cpu_to_le32(qlink_utils_chflags_cfg2q(sc->flags));
}
static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
@@ -2141,6 +2082,35 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN);
}
+static void qtnf_cmd_scan_set_dwell(struct qtnf_wmac *mac,
+ struct sk_buff *cmd_skb)
+{
+ struct cfg80211_scan_request *scan_req = mac->scan_req;
+ u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT;
+ u16 dwell_passive = QTNF_SCAN_DWELL_PASSIVE_DEFAULT;
+ u16 duration = QTNF_SCAN_SAMPLE_DURATION_DEFAULT;
+
+ if (scan_req->duration) {
+ dwell_active = scan_req->duration;
+ dwell_passive = scan_req->duration;
+ }
+
+ pr_debug("MAC%u: %s scan dwell active=%u, passive=%u, duration=%u\n",
+ mac->macid,
+ scan_req->duration_mandatory ? "mandatory" : "max",
+ dwell_active, dwell_passive, duration);
+
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb,
+ QTN_TLV_ID_SCAN_DWELL_ACTIVE,
+ dwell_active);
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb,
+ QTN_TLV_ID_SCAN_DWELL_PASSIVE,
+ dwell_passive);
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb,
+ QTN_TLV_ID_SCAN_SAMPLE_DURATION,
+ duration);
+}
+
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb;
@@ -2192,6 +2162,8 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
}
}
+ qtnf_cmd_scan_set_dwell(mac, cmd_skb);
+
if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n",
mac->macid,
@@ -2207,15 +2179,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH);
}
- if (scan_req->duration) {
- pr_debug("MAC%u: %s scan duration %u\n", mac->macid,
- scan_req->duration_mandatory ? "mandatory" : "max",
- scan_req->duration);
-
- qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_SCAN_DWELL,
- scan_req->duration);
- }
-
ret = qtnf_cmd_send(mac->bus, cmd_skb);
if (ret)
goto out;
@@ -2404,13 +2367,18 @@ out:
return ret;
}
-int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
+int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
+ bool slave_radar)
{
+ struct wiphy *wiphy = priv_to_wiphy(mac);
+ struct qtnf_bus *bus = mac->bus;
struct sk_buff *cmd_skb;
int ret;
struct qlink_cmd_reg_notify *cmd;
+ enum nl80211_band band;
+ const struct ieee80211_supported_band *cfg_band;
- cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_REG_NOTIFY,
sizeof(*cmd));
if (!cmd_skb)
@@ -2447,12 +2415,41 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
break;
}
+ switch (req->dfs_region) {
+ case NL80211_DFS_FCC:
+ cmd->dfs_region = QLINK_DFS_FCC;
+ break;
+ case NL80211_DFS_ETSI:
+ cmd->dfs_region = QLINK_DFS_ETSI;
+ break;
+ case NL80211_DFS_JP:
+ cmd->dfs_region = QLINK_DFS_JP;
+ break;
+ default:
+ cmd->dfs_region = QLINK_DFS_UNSET;
+ break;
+ }
+
+ cmd->slave_radar = slave_radar;
+ cmd->num_channels = 0;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ unsigned int i;
+
+ cfg_band = wiphy->bands[band];
+ if (!cfg_band)
+ continue;
+
+ cmd->num_channels += cfg_band->n_channels;
+
+ for (i = 0; i < cfg_band->n_channels; ++i) {
+ qtnf_cmd_channel_tlv_add(cmd_skb,
+ &cfg_band->channels[i]);
+ }
+ }
+
qtnf_bus_lock(bus);
ret = qtnf_cmd_send(bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(bus);
return ret;
@@ -2592,7 +2589,7 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
struct qtnf_bus *bus = vif->mac->bus;
struct sk_buff *cmd_skb;
struct qlink_tlv_hdr *tlv;
- size_t acl_size = qtnf_cmd_acl_data_size(params);
+ size_t acl_size = struct_size(params, mac_addrs, params->n_acl_entries);
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 64f0b9dc8a14..88d7a3cd90d2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -27,8 +27,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
const struct cfg80211_ap_settings *s);
int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif);
int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg);
-int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
- u16 freq, const u8 *buf, size_t len);
+int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
+ u16 freq, const u8 *buf, size_t len);
int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
const u8 *buf, size_t len);
int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
@@ -57,7 +57,8 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif,
u16 reason_code);
int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
bool up);
-int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req);
+int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
+ bool slave_radar);
int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
struct qtnf_chan_stats *stats);
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index ee1b75fda1dd..8d699cc03d26 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -16,6 +16,12 @@
#define QTNF_DMP_MAX_LEN 48
#define QTNF_PRIMARY_VIF_IDX 0
+static bool slave_radar = true;
+module_param(slave_radar, bool, 0644);
+MODULE_PARM_DESC(slave_radar, "set 0 to disable radar detection in slave mode");
+
+static struct dentry *qtnf_debugfs_dir;
+
struct qtnf_frame_meta_info {
u8 magic_s;
u8 ifidx;
@@ -368,6 +374,23 @@ static void qtnf_mac_scan_timeout(struct work_struct *work)
qtnf_mac_scan_finish(mac, true);
}
+static void qtnf_vif_send_data_high_pri(struct work_struct *work)
+{
+ struct qtnf_vif *vif =
+ container_of(work, struct qtnf_vif, high_pri_tx_work);
+ struct sk_buff *skb;
+
+ if (!vif->netdev ||
+ vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)
+ return;
+
+ while ((skb = skb_dequeue(&vif->high_pri_tx_queue))) {
+ qtnf_cmd_send_frame(vif, 0, QLINK_FRAME_TX_FLAG_8023,
+ 0, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+}
+
static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
unsigned int macid)
{
@@ -395,7 +418,8 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
vif->mac = mac;
vif->vifid = i;
qtnf_sta_list_init(&vif->sta_list);
-
+ INIT_WORK(&vif->high_pri_tx_work, qtnf_vif_send_data_high_pri);
+ skb_queue_head_init(&vif->high_pri_tx_queue);
vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!vif->stats64)
pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
@@ -408,6 +432,11 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
return mac;
}
+bool qtnf_mac_slave_radar_get(struct wiphy *wiphy)
+{
+ return slave_radar;
+}
+
static const struct ethtool_ops qtnf_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
};
@@ -499,6 +528,8 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
qtnf_mac_iface_comb_free(mac);
qtnf_mac_ext_caps_free(mac);
kfree(mac->macinfo.wowlan);
+ kfree(mac->rd);
+ mac->rd = NULL;
wiphy_free(wiphy);
bus->mac[macid] = NULL;
}
@@ -587,8 +618,6 @@ int qtnf_core_attach(struct qtnf_bus *bus)
int ret;
qtnf_trans_init(bus);
-
- bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
qtnf_bus_data_rx_start(bus);
bus->workqueue = alloc_ordered_workqueue("QTNF_BUS", 0);
@@ -598,6 +627,13 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
+ bus->hprio_workqueue = alloc_workqueue("QTNF_HPRI", WQ_HIGHPRI, 0);
+ if (!bus->hprio_workqueue) {
+ pr_err("failed to alloc high prio workqueue\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
INIT_WORK(&bus->event_work, qtnf_event_work_handler);
ret = qtnf_cmd_send_init_fw(bus);
@@ -607,7 +643,6 @@ int qtnf_core_attach(struct qtnf_bus *bus)
}
bus->fw_state = QTNF_FW_STATE_ACTIVE;
-
ret = qtnf_cmd_get_hw_info(bus);
if (ret) {
pr_err("failed to get HW info: %d\n", ret);
@@ -637,11 +672,11 @@ int qtnf_core_attach(struct qtnf_bus *bus)
}
}
+ bus->fw_state = QTNF_FW_STATE_RUNNING;
return 0;
error:
qtnf_core_detach(bus);
-
return ret;
}
EXPORT_SYMBOL_GPL(qtnf_core_attach);
@@ -655,7 +690,7 @@ void qtnf_core_detach(struct qtnf_bus *bus)
for (macid = 0; macid < QTNF_MAX_MAC; macid++)
qtnf_core_mac_detach(bus, macid);
- if (bus->fw_state == QTNF_FW_STATE_ACTIVE)
+ if (qtnf_fw_is_up(bus))
qtnf_cmd_send_deinit_fw(bus);
bus->fw_state = QTNF_FW_STATE_DETACHED;
@@ -663,10 +698,14 @@ void qtnf_core_detach(struct qtnf_bus *bus)
if (bus->workqueue) {
flush_workqueue(bus->workqueue);
destroy_workqueue(bus->workqueue);
+ bus->workqueue = NULL;
}
- kfree(bus->hw_info.rd);
- bus->hw_info.rd = NULL;
+ if (bus->hprio_workqueue) {
+ flush_workqueue(bus->hprio_workqueue);
+ destroy_workqueue(bus->hprio_workqueue);
+ bus->hprio_workqueue = NULL;
+ }
qtnf_trans_free(bus);
}
@@ -684,6 +723,9 @@ struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb)
struct qtnf_wmac *mac;
struct qtnf_vif *vif;
+ if (unlikely(bus->fw_state != QTNF_FW_STATE_RUNNING))
+ return NULL;
+
meta = (struct qtnf_frame_meta_info *)
(skb_tail_pointer(skb) - sizeof(*meta));
@@ -799,6 +841,39 @@ void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
+void qtnf_packet_send_hi_pri(struct sk_buff *skb)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
+
+ skb_queue_tail(&vif->high_pri_tx_queue, skb);
+ queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
+}
+EXPORT_SYMBOL_GPL(qtnf_packet_send_hi_pri);
+
+struct dentry *qtnf_get_debugfs_dir(void)
+{
+ return qtnf_debugfs_dir;
+}
+EXPORT_SYMBOL_GPL(qtnf_get_debugfs_dir);
+
+static int __init qtnf_core_register(void)
+{
+ qtnf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ if (IS_ERR(qtnf_debugfs_dir))
+ qtnf_debugfs_dir = NULL;
+
+ return 0;
+}
+
+static void __exit qtnf_core_exit(void)
+{
+ debugfs_remove(qtnf_debugfs_dir);
+}
+
+module_init(qtnf_core_register);
+module_exit(qtnf_core_exit);
+
MODULE_AUTHOR("Quantenna Communications");
MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index a31cff46e964..322858df600c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -63,6 +63,8 @@ struct qtnf_vif {
struct qtnf_wmac *mac;
struct work_struct reset_work;
+ struct work_struct high_pri_tx_work;
+ struct sk_buff_head high_pri_tx_queue;
struct qtnf_sta_list sta_list;
unsigned long cons_tx_timeout_cnt;
int generation;
@@ -112,6 +114,7 @@ struct qtnf_wmac {
struct cfg80211_scan_request *scan_req;
struct mutex mac_lock; /* lock during wmac speicific ops */
struct delayed_work scan_timeout;
+ struct ieee80211_regdomain *rd;
};
struct qtnf_hw_info {
@@ -120,7 +123,6 @@ struct qtnf_hw_info {
u8 mac_bitmap;
u32 fw_ver;
u32 hw_capab;
- struct ieee80211_regdomain *rd;
u8 total_tx_chain;
u8 total_rx_chain;
char fw_version[ETHTOOL_FWVERS_LEN];
@@ -132,6 +134,7 @@ struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
+bool qtnf_mac_slave_radar_get(struct wiphy *wiphy);
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
@@ -149,6 +152,8 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
+void qtnf_packet_send_hi_pri(struct sk_buff *skb);
+struct dentry *qtnf_get_debugfs_dir(void);
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.c b/drivers/net/wireless/quantenna/qtnfmac/debug.c
index 598ece753a4b..2d3574c1f10e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/debug.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/debug.c
@@ -5,7 +5,9 @@
void qtnf_debugfs_init(struct qtnf_bus *bus, const char *name)
{
- bus->dbg_dir = debugfs_create_dir(name, NULL);
+ struct dentry *parent = qtnf_get_debugfs_dir();
+
+ bus->dbg_dir = debugfs_create_dir(name, parent);
}
void qtnf_debugfs_remove(struct qtnf_bus *bus)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 6c1b886339ac..b57c8c18a8d0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -493,14 +493,20 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
for (i = 0; i < QTNF_MAX_INTF; i++) {
vif = &mac->iflist[i];
+
if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)
continue;
- if (vif->netdev) {
- mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->netdev, &chandef);
- mutex_unlock(&vif->wdev.mtx);
- }
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
+ !vif->wdev.current_bss)
+ continue;
+
+ if (!vif->netdev)
+ continue;
+
+ mutex_lock(&vif->wdev.mtx);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef);
+ mutex_unlock(&vif->wdev.mtx);
}
return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index c3a32effa6f0..e4e9344b6982 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -56,7 +56,7 @@ int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
if (ret == -ETIMEDOUT) {
pr_err("EP firmware is dead\n");
- bus->fw_state = QTNF_FW_STATE_EP_DEAD;
+ bus->fw_state = QTNF_FW_STATE_DEAD;
}
return ret;
@@ -128,32 +128,22 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
return 0;
}
-void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success)
+int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
{
- struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
- struct pci_dev *pdev = priv->pdev;
int ret;
- if (boot_success) {
- bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
-
- ret = qtnf_core_attach(bus);
- if (ret) {
- pr_err("failed to attach core\n");
- boot_success = false;
- }
- }
-
- if (boot_success) {
+ bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
+ ret = qtnf_core_attach(bus);
+ if (ret) {
+ pr_err("failed to attach core\n");
+ } else {
qtnf_debugfs_init(bus, DRV_NAME);
qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
- } else {
- bus->fw_state = QTNF_FW_STATE_DETACHED;
}
- put_device(&pdev->dev);
+ return ret;
}
static void qtnf_tune_pcie_mps(struct pci_dev *pdev)
@@ -344,7 +334,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv = get_bus_priv(bus);
pci_set_drvdata(pdev, bus);
bus->dev = &pdev->dev;
- bus->fw_state = QTNF_FW_STATE_RESET;
+ bus->fw_state = QTNF_FW_STATE_DETACHED;
pcie_priv->pdev = pdev;
pcie_priv->tx_stopped = 0;
pcie_priv->rx_bd_num = rx_bd_size_param;
@@ -364,6 +354,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->pcie_irq_count = 0;
pcie_priv->tx_reclaim_done = 0;
pcie_priv->tx_reclaim_req = 0;
+ pcie_priv->tx_eapol = 0;
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
if (!pcie_priv->workqueue) {
@@ -419,8 +410,7 @@ static void qtnf_pcie_remove(struct pci_dev *dev)
cancel_work_sync(&bus->fw_work);
- if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
- bus->fw_state == QTNF_FW_STATE_EP_DEAD)
+ if (qtnf_fw_is_attached(bus))
qtnf_core_detach(bus);
netif_napi_del(&bus->mux_napi);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
index bbc074e1f34d..5e8b9cb68419 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
@@ -62,6 +62,7 @@ struct qtnf_pcie_bus_priv {
u32 tx_done_count;
u32 tx_reclaim_done;
u32 tx_reclaim_req;
+ u32 tx_eapol;
u8 msi_enabled;
u8 tx_stopped;
@@ -70,7 +71,7 @@ struct qtnf_pcie_bus_priv {
int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb);
int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv);
-void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success);
+int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus);
void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
struct qtnf_shm_ipc_region __iomem *ipc_tx_reg,
struct qtnf_shm_ipc_region __iomem *ipc_rx_reg,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 1f5facbb8905..3aa3714d4dfd 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -980,12 +980,11 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
{
struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+ u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
+ const char *fwname = QTN_PCI_PEARL_FW_NAME;
struct pci_dev *pdev = ps->base.pdev;
const struct firmware *fw;
int ret;
- u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
- const char *fwname = QTN_PCI_PEARL_FW_NAME;
- bool fw_boot_success = false;
if (ps->base.flashboot) {
state |= QTN_RC_FW_FLASHBOOT;
@@ -1031,23 +1030,23 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
goto fw_load_exit;
}
- pr_info("firmware is up and running\n");
-
if (qtnf_poll_state(&ps->bda->bda_ep_state,
QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
pr_err("firmware runtime failure\n");
goto fw_load_exit;
}
- fw_boot_success = true;
+ pr_info("firmware is up and running\n");
-fw_load_exit:
- qtnf_pcie_fw_boot_done(bus, fw_boot_success);
+ ret = qtnf_pcie_fw_boot_done(bus);
+ if (ret)
+ goto fw_load_exit;
- if (fw_boot_success) {
- qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
- qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
- }
+ qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
+ qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
+
+fw_load_exit:
+ put_device(&pdev->dev);
}
static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index cbcda57105f3..9a4380ed7f1b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -498,6 +498,13 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
int len;
int i;
+ if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
+ qtnf_packet_send_hi_pri(skb);
+ qtnf_update_tx_stats(skb->dev, skb);
+ priv->tx_eapol++;
+ return NETDEV_TX_OK;
+ }
+
spin_lock_irqsave(&priv->tx_lock, flags);
if (!qtnf_tx_queue_ready(ts)) {
@@ -761,6 +768,7 @@ static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
+ seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol);
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
@@ -1023,8 +1031,9 @@ static void qtnf_topaz_fw_work_handler(struct work_struct *work)
{
struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
- int ret;
int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
+ struct pci_dev *pdev = ts->base.pdev;
+ int ret;
qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
@@ -1073,19 +1082,23 @@ static void qtnf_topaz_fw_work_handler(struct work_struct *work)
}
}
+ ret = qtnf_post_init_ep(ts);
+ if (ret) {
+ pr_err("FW runtime failure\n");
+ goto fw_load_exit;
+ }
+
pr_info("firmware is up and running\n");
- ret = qtnf_post_init_ep(ts);
+ ret = qtnf_pcie_fw_boot_done(bus);
if (ret)
- pr_err("FW runtime failure\n");
+ goto fw_load_exit;
-fw_load_exit:
- qtnf_pcie_fw_boot_done(bus, ret ? false : true);
+ qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
+ qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
- if (ret == 0) {
- qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
- qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
- }
+fw_load_exit:
+ put_device(&pdev->dev);
}
static void qtnf_reclaim_tasklet_fn(unsigned long data)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 7798edcf7980..8a3c6344fa8e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -6,7 +6,7 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 13
+#define QLINK_PROTO_VER 15
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -206,6 +206,8 @@ struct qlink_sta_info_state {
* execution status (one of &enum qlink_cmd_result). Reply message
* may also contain data payload specific to the command type.
*
+ * @QLINK_CMD_SEND_FRAME: send specified frame over the air; firmware will
+ * encapsulate 802.3 packet into 802.11 frame automatically.
* @QLINK_CMD_BAND_INFO_GET: for the specified MAC and specified band, get
* the band's description including number of operational channels and
* info on each channel, HT/VHT capabilities, supported rates etc.
@@ -220,7 +222,7 @@ enum qlink_cmd_type {
QLINK_CMD_FW_INIT = 0x0001,
QLINK_CMD_FW_DEINIT = 0x0002,
QLINK_CMD_REGISTER_MGMT = 0x0003,
- QLINK_CMD_SEND_MGMT_FRAME = 0x0004,
+ QLINK_CMD_SEND_FRAME = 0x0004,
QLINK_CMD_MGMT_SET_APPIE = 0x0005,
QLINK_CMD_PHY_PARAMS_GET = 0x0011,
QLINK_CMD_PHY_PARAMS_SET = 0x0012,
@@ -321,22 +323,26 @@ struct qlink_cmd_mgmt_frame_register {
u8 do_register;
} __packed;
-enum qlink_mgmt_frame_tx_flags {
- QLINK_MGMT_FRAME_TX_FLAG_NONE = 0,
- QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN = BIT(0),
- QLINK_MGMT_FRAME_TX_FLAG_NO_CCK = BIT(1),
- QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT = BIT(2),
+/**
+ * @QLINK_FRAME_TX_FLAG_8023: frame has a 802.3 header; if not set, frame
+ * is a 802.11 encapsulated.
+ */
+enum qlink_frame_tx_flags {
+ QLINK_FRAME_TX_FLAG_OFFCHAN = BIT(0),
+ QLINK_FRAME_TX_FLAG_NO_CCK = BIT(1),
+ QLINK_FRAME_TX_FLAG_ACK_NOWAIT = BIT(2),
+ QLINK_FRAME_TX_FLAG_8023 = BIT(3),
};
/**
- * struct qlink_cmd_mgmt_frame_tx - data for QLINK_CMD_SEND_MGMT_FRAME command
+ * struct qlink_cmd_frame_tx - data for QLINK_CMD_SEND_FRAME command
*
* @cookie: opaque request identifier.
* @freq: Frequency to use for frame transmission.
- * @flags: Transmission flags, one of &enum qlink_mgmt_frame_tx_flags.
+ * @flags: Transmission flags, one of &enum qlink_frame_tx_flags.
* @frame_data: frame to transmit.
*/
-struct qlink_cmd_mgmt_frame_tx {
+struct qlink_cmd_frame_tx {
struct qlink_cmd chdr;
__le32 cookie;
__le16 freq;
@@ -580,12 +586,22 @@ enum qlink_user_reg_hint_type {
* @initiator: which entity sent the request, one of &enum qlink_reg_initiator.
* @user_reg_hint_type: type of hint for QLINK_REGDOM_SET_BY_USER request, one
* of &enum qlink_user_reg_hint_type.
+ * @num_channels: number of &struct qlink_tlv_channel in a variable portion of a
+ * payload.
+ * @slave_radar: whether slave device should enable radar detection.
+ * @dfs_region: one of &enum qlink_dfs_regions.
+ * @info: variable portion of regulatory notifier callback.
*/
struct qlink_cmd_reg_notify {
struct qlink_cmd chdr;
u8 alpha2[2];
u8 initiator;
u8 user_reg_hint_type;
+ u8 num_channels;
+ u8 dfs_region;
+ u8 slave_radar;
+ u8 rsvd[1];
+ u8 info[0];
} __packed;
/**
@@ -765,6 +781,18 @@ struct qlink_resp {
} __packed;
/**
+ * enum qlink_dfs_regions - regulatory DFS regions
+ *
+ * Corresponds to &enum nl80211_dfs_regions.
+ */
+enum qlink_dfs_regions {
+ QLINK_DFS_UNSET = 0,
+ QLINK_DFS_FCC = 1,
+ QLINK_DFS_ETSI = 2,
+ QLINK_DFS_JP = 3,
+};
+
+/**
* struct qlink_resp_get_mac_info - response for QLINK_CMD_MAC_INFO command
*
* Data describing specific physical device providing wireless MAC
@@ -779,6 +807,10 @@ struct qlink_resp {
* @bands_cap: wireless bands WMAC can operate in, bitmap of &enum qlink_band.
* @max_ap_assoc_sta: Maximum number of associations supported by WMAC.
* @radar_detect_widths: bitmask of channels BW for which WMAC can detect radar.
+ * @alpha2: country code ID firmware is configured to.
+ * @n_reg_rules: number of regulatory rules TLVs in variable portion of the
+ * message.
+ * @dfs_region: regulatory DFS region, one of &enum qlink_dfs_regions.
* @var_info: variable-length WMAC info data.
*/
struct qlink_resp_get_mac_info {
@@ -792,23 +824,14 @@ struct qlink_resp_get_mac_info {
__le16 radar_detect_widths;
__le32 max_acl_mac_addrs;
u8 bands_cap;
+ u8 alpha2[2];
+ u8 n_reg_rules;
+ u8 dfs_region;
u8 rsvd[1];
u8 var_info[0];
} __packed;
/**
- * enum qlink_dfs_regions - regulatory DFS regions
- *
- * Corresponds to &enum nl80211_dfs_regions.
- */
-enum qlink_dfs_regions {
- QLINK_DFS_UNSET = 0,
- QLINK_DFS_FCC = 1,
- QLINK_DFS_ETSI = 2,
- QLINK_DFS_JP = 3,
-};
-
-/**
* struct qlink_resp_get_hw_info - response for QLINK_CMD_GET_HW_INFO command
*
* Description of wireless hardware capabilities and features.
@@ -820,11 +843,7 @@ enum qlink_dfs_regions {
* @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware.
* @total_tx_chains: total number of transmit chains used by device.
* @total_rx_chains: total number of receive chains.
- * @alpha2: country code ID firmware is configured to.
- * @n_reg_rules: number of regulatory rules TLVs in variable portion of the
- * message.
- * @dfs_region: regulatory DFS region, one of @enum qlink_dfs_region.
- * @info: variable-length HW info, can contain QTN_TLV_ID_REG_RULE.
+ * @info: variable-length HW info.
*/
struct qlink_resp_get_hw_info {
struct qlink_resp rhdr;
@@ -838,9 +857,6 @@ struct qlink_resp_get_hw_info {
u8 mac_bitmap;
u8 total_tx_chain;
u8 total_rx_chain;
- u8 alpha2[2];
- u8 n_reg_rules;
- u8 dfs_region;
u8 info[0];
} __packed;
@@ -1148,6 +1164,13 @@ struct qlink_event_external_auth {
* carried by QTN_TLV_ID_STA_STATS_MAP.
* @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
* for in any given scan.
+ * @QTN_TLV_ID_SCAN_DWELL_ACTIVE: time spent on a single channel for an active
+ * scan.
+ * @QTN_TLV_ID_SCAN_DWELL_PASSIVE: time spent on a single channel for a passive
+ * scan.
+ * @QTN_TLV_ID_SCAN_SAMPLE_DURATION: total duration of sampling a single channel
+ * during a scan including off-channel dwell time and operating channel
+ * time.
*/
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
@@ -1180,7 +1203,9 @@ enum qlink_tlv_id {
QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
QTN_TLV_ID_SCAN_FLUSH = 0x0412,
- QTN_TLV_ID_SCAN_DWELL = 0x0413,
+ QTN_TLV_ID_SCAN_DWELL_ACTIVE = 0x0413,
+ QTN_TLV_ID_SCAN_DWELL_PASSIVE = 0x0416,
+ QTN_TLV_ID_SCAN_SAMPLE_DURATION = 0x0417,
};
struct qlink_tlv_hdr {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index 72bfd17cb687..1a972bce7b8b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -182,3 +182,120 @@ void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
memcpy(qacl->mac_addrs, acl->mac_addrs,
acl->n_acl_entries * sizeof(*qacl->mac_addrs));
}
+
+enum qlink_band qlink_utils_band_cfg2q(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return QLINK_BAND_2GHZ;
+ case NL80211_BAND_5GHZ:
+ return QLINK_BAND_5GHZ;
+ case NL80211_BAND_60GHZ:
+ return QLINK_BAND_60GHZ;
+ default:
+ return -EINVAL;
+ }
+}
+
+enum qlink_dfs_state qlink_utils_dfs_state_cfg2q(enum nl80211_dfs_state state)
+{
+ switch (state) {
+ case NL80211_DFS_USABLE:
+ return QLINK_DFS_USABLE;
+ case NL80211_DFS_AVAILABLE:
+ return QLINK_DFS_AVAILABLE;
+ case NL80211_DFS_UNAVAILABLE:
+ default:
+ return QLINK_DFS_UNAVAILABLE;
+ }
+}
+
+u32 qlink_utils_chflags_cfg2q(u32 cfgflags)
+{
+ u32 flags = 0;
+
+ if (cfgflags & IEEE80211_CHAN_DISABLED)
+ flags |= QLINK_CHAN_DISABLED;
+
+ if (cfgflags & IEEE80211_CHAN_NO_IR)
+ flags |= QLINK_CHAN_NO_IR;
+
+ if (cfgflags & IEEE80211_CHAN_RADAR)
+ flags |= QLINK_CHAN_RADAR;
+
+ if (cfgflags & IEEE80211_CHAN_NO_HT40PLUS)
+ flags |= QLINK_CHAN_NO_HT40PLUS;
+
+ if (cfgflags & IEEE80211_CHAN_NO_HT40MINUS)
+ flags |= QLINK_CHAN_NO_HT40MINUS;
+
+ if (cfgflags & IEEE80211_CHAN_NO_80MHZ)
+ flags |= QLINK_CHAN_NO_80MHZ;
+
+ if (cfgflags & IEEE80211_CHAN_NO_160MHZ)
+ flags |= QLINK_CHAN_NO_160MHZ;
+
+ return flags;
+}
+
+static u32 qtnf_reg_rule_flags_parse(u32 qflags)
+{
+ u32 flags = 0;
+
+ if (qflags & QLINK_RRF_NO_OFDM)
+ flags |= NL80211_RRF_NO_OFDM;
+
+ if (qflags & QLINK_RRF_NO_CCK)
+ flags |= NL80211_RRF_NO_CCK;
+
+ if (qflags & QLINK_RRF_NO_INDOOR)
+ flags |= NL80211_RRF_NO_INDOOR;
+
+ if (qflags & QLINK_RRF_NO_OUTDOOR)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ if (qflags & QLINK_RRF_DFS)
+ flags |= NL80211_RRF_DFS;
+
+ if (qflags & QLINK_RRF_PTP_ONLY)
+ flags |= NL80211_RRF_PTP_ONLY;
+
+ if (qflags & QLINK_RRF_PTMP_ONLY)
+ flags |= NL80211_RRF_PTMP_ONLY;
+
+ if (qflags & QLINK_RRF_NO_IR)
+ flags |= NL80211_RRF_NO_IR;
+
+ if (qflags & QLINK_RRF_AUTO_BW)
+ flags |= NL80211_RRF_AUTO_BW;
+
+ if (qflags & QLINK_RRF_IR_CONCURRENT)
+ flags |= NL80211_RRF_IR_CONCURRENT;
+
+ if (qflags & QLINK_RRF_NO_HT40MINUS)
+ flags |= NL80211_RRF_NO_HT40MINUS;
+
+ if (qflags & QLINK_RRF_NO_HT40PLUS)
+ flags |= NL80211_RRF_NO_HT40PLUS;
+
+ if (qflags & QLINK_RRF_NO_80MHZ)
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (qflags & QLINK_RRF_NO_160MHZ)
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ return flags;
+}
+
+void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule,
+ const struct qlink_tlv_reg_rule *tlv)
+{
+ rule->freq_range.start_freq_khz = le32_to_cpu(tlv->start_freq_khz);
+ rule->freq_range.end_freq_khz = le32_to_cpu(tlv->end_freq_khz);
+ rule->freq_range.max_bandwidth_khz =
+ le32_to_cpu(tlv->max_bandwidth_khz);
+ rule->power_rule.max_antenna_gain = le32_to_cpu(tlv->max_antenna_gain);
+ rule->power_rule.max_eirp = le32_to_cpu(tlv->max_eirp);
+ rule->dfs_cac_ms = le32_to_cpu(tlv->dfs_cac_ms);
+ rule->flags = qtnf_reg_rule_flags_parse(le32_to_cpu(tlv->flags));
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index 781ea7fe79f2..f873beed2ae7 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -79,5 +79,10 @@ bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
unsigned int arr_max_len);
void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
struct qlink_acl_data *qacl);
+enum qlink_band qlink_utils_band_cfg2q(enum nl80211_band band);
+enum qlink_dfs_state qlink_utils_dfs_state_cfg2q(enum nl80211_dfs_state state);
+u32 qlink_utils_chflags_cfg2q(u32 cfgflags);
+void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule,
+ const struct qlink_tlv_reg_rule *tlv_rule);
#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h
index b05ed2f3025a..06c38bafd2ca 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h
@@ -48,7 +48,8 @@
* RF2853 2.4G/5G 3T3R
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
- * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF3053 2.4G/5G 3T3R(RT3563/RT3573/RT3593)
+ * RF3853 2.4G/5G 3T3R(RT3883/RT3662)
* RF5592 2.4G/5G 2T2R
* RF3070 2.4G 1T1R
* RF5360 2.4G 1T1R
@@ -72,6 +73,7 @@
#define RF5592 0x000f
#define RF3070 0x3070
#define RF3290 0x3290
+#define RF3853 0x3853
#define RF5350 0x5350
#define RF5360 0x5360
#define RF5362 0x5362
@@ -1726,6 +1728,20 @@
#define TX_PWR_CFG_9B_STBC_MCS7 FIELD32(0x000000ff)
/*
+ * TX_TXBF_CFG:
+ */
+#define TX_TXBF_CFG_0 0x138c
+#define TX_TXBF_CFG_1 0x13a4
+#define TX_TXBF_CFG_2 0x13a8
+#define TX_TXBF_CFG_3 0x13ac
+
+/*
+ * TX_FBK_CFG_3S:
+ */
+#define TX_FBK_CFG_3S_0 0x13c4
+#define TX_FBK_CFG_3S_1 0x13c8
+
+/*
* RX_FILTER_CFG: RX configuration register.
*/
#define RX_FILTER_CFG 0x1400
@@ -2296,6 +2312,7 @@ struct mac_iveiv_entry {
/*
* RFCSR 2:
*/
+#define RFCSR2_RESCAL_BP FIELD8(0x40)
#define RFCSR2_RESCAL_EN FIELD8(0x80)
#define RFCSR2_RX2_EN_MT7620 FIELD8(0x02)
#define RFCSR2_TX2_EN_MT7620 FIELD8(0x20)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a03b5284a050..c8f2bf1243fd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -381,7 +381,8 @@ static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev,
wiphy_name(rt2x00dev->hw->wiphy), word))
return 0;
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
map = rt2800_eeprom_map_ext;
else
map = rt2800_eeprom_map;
@@ -590,6 +591,7 @@ void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
{
switch (rt2x00dev->chip.rt) {
case RT3593:
+ case RT3883:
*txwi_size = TXWI_DESC_SIZE_4WORDS;
*rxwi_size = RXWI_DESC_SIZE_5WORDS;
break;
@@ -1100,7 +1102,7 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
-void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
+void rt2800_txdone(struct rt2x00_dev *rt2x00dev, unsigned int quota)
{
struct data_queue *queue;
struct queue_entry *entry;
@@ -1108,7 +1110,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
u8 qid;
bool match;
- while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
+ while (quota-- > 0 && kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
/*
* TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is
* guaranteed to be one of the TX QIDs .
@@ -1164,15 +1166,6 @@ bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
struct data_queue *queue;
struct queue_entry *entry;
- if (!test_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags)) {
- unsigned long tout = msecs_to_jiffies(1000);
-
- if (time_before(jiffies, rt2x00dev->last_nostatus_check + tout))
- return false;
- }
-
- rt2x00dev->last_nostatus_check = jiffies;
-
tx_queue_for_each(rt2x00dev, queue) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
if (rt2800_entry_txstatus_timeout(rt2x00dev, entry))
@@ -1183,6 +1176,23 @@ bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_txstatus_timeout);
+/*
+ * test if there is an entry in any TX queue for which DMA is done
+ * but the TX status has not been returned yet
+ */
+bool rt2800_txstatus_pending(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ tx_queue_for_each(rt2x00dev, queue) {
+ if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) !=
+ rt2x00queue_get_entry(queue, Q_INDEX_DONE))
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(rt2800_txstatus_pending);
+
void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
@@ -2172,7 +2182,8 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2800_bbp_write(rt2x00dev, 3, r3);
rt2800_bbp_write(rt2x00dev, 1, r1);
- if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
if (ant->rx_chain_num == 1)
rt2800_bbp_write(rt2x00dev, 86, 0x00);
else
@@ -2194,7 +2205,8 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_LNA);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
} else if (libconf->rf.channel <= 128) {
- if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_EXT_LNA2_A1);
@@ -2204,7 +2216,8 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
EEPROM_RSSI_BG2_LNA_A1);
}
} else {
- if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_EXT_LNA2_A2);
@@ -2872,6 +2885,211 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
}
}
+static void rt2800_config_channel_rf3853(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr;
+ u8 bbp;
+ u8 pwr1, pwr2, pwr3;
+
+ const bool txbf_enabled = false; /* TODO */
+
+ /* TODO: add band selection */
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
+ else if (rf->channel < 132)
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x80);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
+
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x46);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x48);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x1a);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x52);
+
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 3:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+ /* fallthrough */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ /* fallthrough */
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+ break;
+ }
+
+ switch (rt2x00dev->default_ant.rx_chain_num) {
+ case 3:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+ /* fallthrough */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ /* fallthrough */
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+ break;
+ }
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_freq_cal_mode1(rt2x00dev);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 30);
+ if (!conf_is_ht40(conf))
+ rfcsr &= ~(0x06);
+ else
+ rfcsr |= 0x06;
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 31, 0xa0);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+
+ if (conf_is_ht40(conf))
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 32, 0xd8);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x3c);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x20);
+
+ /* loopback RF_BS */
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 36);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0);
+ rt2800_rfcsr_write(rt2x00dev, 36, rfcsr);
+
+ if (rf->channel <= 14)
+ rfcsr = 0x23;
+ else if (rf->channel < 100)
+ rfcsr = 0x36;
+ else if (rf->channel < 132)
+ rfcsr = 0x32;
+ else
+ rfcsr = 0x30;
+
+ if (txbf_enabled)
+ rfcsr |= 0x40;
+
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x93);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x9b);
+
+ if (rf->channel <= 14)
+ rfcsr = 0xbb;
+ else if (rf->channel < 100)
+ rfcsr = 0xeb;
+ else if (rf->channel < 132)
+ rfcsr = 0xb3;
+ else
+ rfcsr = 0x9b;
+ rt2800_rfcsr_write(rt2x00dev, 45, rfcsr);
+
+ if (rf->channel <= 14)
+ rfcsr = 0x8e;
+ else
+ rfcsr = 0x8a;
+
+ if (txbf_enabled)
+ rfcsr |= 0x20;
+
+ rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x86);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 51);
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x75);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x51);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 52);
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x05);
+
+ if (rf->channel <= 14) {
+ pwr1 = info->default_power1 & 0x1f;
+ pwr2 = info->default_power2 & 0x1f;
+ pwr3 = info->default_power3 & 0x1f;
+ } else {
+ pwr1 = 0x48 | ((info->default_power1 & 0x18) << 1) |
+ (info->default_power1 & 0x7);
+ pwr2 = 0x48 | ((info->default_power2 & 0x18) << 1) |
+ (info->default_power2 & 0x7);
+ pwr3 = 0x48 | ((info->default_power3 & 0x18) << 1) |
+ (info->default_power3 & 0x7);
+ }
+
+ rt2800_rfcsr_write(rt2x00dev, 53, pwr1);
+ rt2800_rfcsr_write(rt2x00dev, 54, pwr2);
+ rt2800_rfcsr_write(rt2x00dev, 55, pwr3);
+
+ rt2x00_dbg(rt2x00dev, "Channel:%d, pwr1:%02x, pwr2:%02x, pwr3:%02x\n",
+ rf->channel, pwr1, pwr2, pwr3);
+
+ bbp = (info->default_power1 >> 5) |
+ ((info->default_power2 & 0xe0) >> 1);
+ rt2800_bbp_write(rt2x00dev, 109, bbp);
+
+ bbp = rt2800_bbp_read(rt2x00dev, 110);
+ bbp &= 0x0f;
+ bbp |= (info->default_power3 & 0xe0) >> 1;
+ rt2800_bbp_write(rt2x00dev, 110, bbp);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 57);
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x6e);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x3e);
+
+ /* Enable RF tuning */
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3);
+ rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+
+ udelay(2000);
+
+ bbp = rt2800_bbp_read(rt2x00dev, 49);
+ /* clear update flag */
+ rt2800_bbp_write(rt2x00dev, 49, bbp & 0xfe);
+ rt2800_bbp_write(rt2x00dev, 49, bbp);
+
+ /* TODO: add calibration for TxBF */
+}
+
#define POWER_BOUND 0x27
#define POWER_BOUND_5G 0x2b
@@ -3675,19 +3893,51 @@ static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
unsigned int channel,
char txpower)
{
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC);
if (channel <= 14)
return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
return clamp_t(char, txpower, MIN_A_TXPOWER_3593,
MAX_A_TXPOWER_3593);
else
return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
}
+static void rt3883_bbp_adjust(struct rt2x00_dev *rt2x00dev,
+ struct rf_channel *rf)
+{
+ u8 bbp;
+
+ bbp = (rf->channel > 14) ? 0x48 : 0x38;
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+
+ if (rf->channel <= 14) {
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+ } else {
+ /* Disable CCK packet detection */
+ rt2800_bbp_write(rt2x00dev, 70, 0x00);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
+
+ if (rf->channel > 14) {
+ rt2800_bbp_write(rt2x00dev, 62, 0x1d);
+ rt2800_bbp_write(rt2x00dev, 63, 0x1d);
+ rt2800_bbp_write(rt2x00dev, 64, 0x1d);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 62, 0x2d);
+ rt2800_bbp_write(rt2x00dev, 63, 0x2d);
+ rt2800_bbp_write(rt2x00dev, 64, 0x2d);
+ }
+}
+
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -3706,6 +3956,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_txpower_to_dev(rt2x00dev, rf->channel,
info->default_power3);
+ switch (rt2x00dev->chip.rt) {
+ case RT3883:
+ rt3883_bbp_adjust(rt2x00dev, rf);
+ break;
+ }
+
switch (rt2x00dev->chip.rf) {
case RF2020:
case RF3020:
@@ -3726,6 +3982,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3322:
rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
break;
+ case RF3853:
+ rt2800_config_channel_rf3853(rt2x00dev, conf, rf, info);
+ break;
case RF3070:
case RF5350:
case RF5360:
@@ -3807,6 +4066,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 77, 0x98);
+ } else if (rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+
+ if (rt2x00dev->default_ant.rx_chain_num > 1)
+ rt2800_bbp_write(rt2x00dev, 86, 0x46);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0);
} else {
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
@@ -3820,6 +4088,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
!rt2x00_rt(rt2x00dev, RT6352)) {
if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
if (rt2x00_rt(rt2x00dev, RT3593))
@@ -3828,19 +4097,22 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 82, 0x84);
rt2800_bbp_write(rt2x00dev, 75, 0x50);
}
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
rt2800_bbp_write(rt2x00dev, 83, 0x8a);
}
} else {
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_bbp_write(rt2x00dev, 82, 0x94);
- else if (rt2x00_rt(rt2x00dev, RT3593))
+ else if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
rt2800_bbp_write(rt2x00dev, 82, 0x82);
else if (!rt2x00_rt(rt2x00dev, RT6352))
rt2800_bbp_write(rt2x00dev, 82, 0xf2);
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
rt2800_bbp_write(rt2x00dev, 83, 0x9a);
if (rt2x00_has_cap_external_lna_a(rt2x00dev))
@@ -3976,6 +4248,23 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
usleep_range(1000, 1500);
}
+ if (rt2x00_rt(rt2x00dev, RT3883)) {
+ if (!conf_is_ht40(conf))
+ rt2800_bbp_write(rt2x00dev, 105, 0x34);
+ else
+ rt2800_bbp_write(rt2x00dev, 105, 0x04);
+
+ /* AGC init */
+ if (rf->channel <= 14)
+ reg = 0x2e + rt2x00dev->lna_gain;
+ else
+ reg = 0x20 + ((rt2x00dev->lna_gain * 5) / 3);
+
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+
+ usleep_range(1000, 1500);
+ }
+
if (rt2x00_rt(rt2x00dev, RT5592) || rt2x00_rt(rt2x00dev, RT6352)) {
reg = 0x10;
if (!conf_is_ht40(conf)) {
@@ -4235,6 +4524,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
if (rt2x00_rt(rt2x00dev, RT3593))
return min_t(u8, txpower, 0xc);
+ if (rt2x00_rt(rt2x00dev, RT3883))
+ return min_t(u8, txpower, 0xf);
+
if (rt2x00_has_cap_power_limit(rt2x00dev)) {
/*
* Check if eirp txpower exceed txpower_limit.
@@ -4996,7 +5288,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
struct ieee80211_channel *chan,
int power_level)
{
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level);
else if (rt2x00_rt(rt2x00dev, RT6352))
rt2800_config_txpower_rt6352(rt2x00dev, chan, power_level);
@@ -5043,6 +5336,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
case RF3053:
case RF3070:
case RF3290:
+ case RF3853:
case RF5350:
case RF5360:
case RF5362:
@@ -5243,7 +5537,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
else
vgc = 0x2e + rt2x00dev->lna_gain;
} else { /* 5GHZ band */
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
else if (rt2x00_rt(rt2x00dev, RT5592))
vgc = 0x24 + (2 * rt2x00dev->lna_gain);
@@ -5263,7 +5558,8 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
{
if (qual->vgc_level != vgc_level) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
vgc_level);
} else if (rt2x00_rt(rt2x00dev, RT5592)) {
@@ -5310,6 +5606,11 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
}
break;
+ case RT3883:
+ if (qual->rssi > -65)
+ vgc += 0x10;
+ break;
+
case RT5592:
if (qual->rssi > -65)
vgc += 0x20;
@@ -5462,6 +5763,12 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x00000000);
}
+ } else if (rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00040000);
+ rt2800_register_write(rt2x00dev, TX_TXBF_CFG_0, 0x8000fc21);
+ rt2800_register_write(rt2x00dev, TX_TXBF_CFG_3, 0x00009c40);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT6352)) {
@@ -5675,6 +5982,11 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
reg = rt2x00_rt(rt2x00dev, RT5592) ? 0x00000082 : 0x00000002;
rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg);
+ if (rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2800_register_write(rt2x00dev, TX_FBK_CFG_3S_0, 0x12111008);
+ rt2800_register_write(rt2x00dev, TX_FBK_CFG_3S_1, 0x16151413);
+ }
+
reg = rt2800_register_read(rt2x00dev, TX_RTS_CFG);
rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 7);
rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
@@ -6291,6 +6603,47 @@ static void rt2800_init_bbp_3593(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
}
+static void rt2800_init_bbp_3883(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_init_bbp_early(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 4, 0x50);
+ rt2800_bbp_write(rt2x00dev, 47, 0x48);
+
+ rt2800_bbp_write(rt2x00dev, 86, 0x46);
+ rt2800_bbp_write(rt2x00dev, 88, 0x90);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+ rt2800_bbp_write(rt2x00dev, 105, 0x34);
+ rt2800_bbp_write(rt2x00dev, 106, 0x12);
+ rt2800_bbp_write(rt2x00dev, 120, 0x50);
+ rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+ rt2800_bbp_write(rt2x00dev, 163, 0x9d);
+
+ /* Set ITxBF timeout to 0x9C40=1000msec */
+ rt2800_bbp_write(rt2x00dev, 179, 0x02);
+ rt2800_bbp_write(rt2x00dev, 180, 0x00);
+ rt2800_bbp_write(rt2x00dev, 182, 0x40);
+ rt2800_bbp_write(rt2x00dev, 180, 0x01);
+ rt2800_bbp_write(rt2x00dev, 182, 0x9c);
+
+ rt2800_bbp_write(rt2x00dev, 179, 0x00);
+
+ /* Reprogram the inband interface to put right values in RXWI */
+ rt2800_bbp_write(rt2x00dev, 142, 0x04);
+ rt2800_bbp_write(rt2x00dev, 143, 0x3b);
+ rt2800_bbp_write(rt2x00dev, 142, 0x06);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa0);
+ rt2800_bbp_write(rt2x00dev, 142, 0x07);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa1);
+ rt2800_bbp_write(rt2x00dev, 142, 0x08);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa2);
+ rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+}
+
static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
{
int ant, div_mode;
@@ -6735,6 +7088,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
case RT3593:
rt2800_init_bbp_3593(rt2x00dev);
return;
+ case RT3883:
+ rt2800_init_bbp_3883(rt2x00dev);
+ return;
case RT5390:
case RT5392:
rt2800_init_bbp_53xx(rt2x00dev);
@@ -7606,6 +7962,144 @@ static void rt2800_init_rfcsr_5350(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
}
+static void rt2800_init_rfcsr_3883(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfcsr;
+
+ /* TODO: get the actual ECO value from the SoC */
+ const unsigned int eco = 5;
+
+ rt2800_rf_init_calibration(rt2x00dev, 2);
+
+ rt2800_rfcsr_write(rt2x00dev, 0, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0x5b);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x48);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x1a);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+
+ /* RFCSR 17 will be initialized later based on the
+ * frequency offset stored in the EEPROM
+ */
+
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x86);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x93);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x8e);
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x86);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x51);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x05);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x76);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x76);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x76);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x3e);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
+
+ /* TODO: rx filter calibration? */
+
+ rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
+ rt2800_bbp_write(rt2x00dev, 163, 0x9d);
+
+ rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+ rt2800_bbp_write(rt2x00dev, 179, 0x02);
+ rt2800_bbp_write(rt2x00dev, 180, 0x00);
+ rt2800_bbp_write(rt2x00dev, 182, 0x40);
+ rt2800_bbp_write(rt2x00dev, 180, 0x01);
+ rt2800_bbp_write(rt2x00dev, 182, 0x9c);
+
+ rt2800_bbp_write(rt2x00dev, 179, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 142, 0x04);
+ rt2800_bbp_write(rt2x00dev, 143, 0x3b);
+ rt2800_bbp_write(rt2x00dev, 142, 0x06);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa0);
+ rt2800_bbp_write(rt2x00dev, 142, 0x07);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa1);
+ rt2800_bbp_write(rt2x00dev, 142, 0x08);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa2);
+ rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+
+ if (eco == 5) {
+ rt2800_rfcsr_write(rt2x00dev, 32, 0xd8);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x32);
+ }
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 2);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_BP, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+ msleep(1);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 6);
+ rfcsr |= 0xc0;
+ rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 22);
+ rfcsr |= 0x20;
+ rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 46);
+ rfcsr |= 0x20;
+ rt2800_rfcsr_write(rt2x00dev, 46, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 20);
+ rfcsr &= ~0xee;
+ rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
+}
+
static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 2);
@@ -8448,6 +8942,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
case RT3390:
rt2800_init_rfcsr_3390(rt2x00dev);
break;
+ case RT3883:
+ rt2800_init_rfcsr_3883(rt2x00dev);
+ break;
case RT3572:
rt2800_init_rfcsr_3572(rt2x00dev);
break;
@@ -8653,7 +9150,8 @@ static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev)
{
u16 word;
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
return 0;
word = rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG);
@@ -8667,7 +9165,8 @@ static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev)
{
u16 word;
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
return 0;
word = rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A);
@@ -8773,7 +9272,8 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
- if (!rt2x00_rt(rt2x00dev, RT3593)) {
+ if (!rt2x00_rt(rt2x00dev, RT3593) &&
+ !rt2x00_rt(rt2x00dev, RT3883)) {
if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
@@ -8793,7 +9293,8 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
- if (!rt2x00_rt(rt2x00dev, RT3593)) {
+ if (!rt2x00_rt(rt2x00dev, RT3593) &&
+ !rt2x00_rt(rt2x00dev, RT3883)) {
if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
@@ -8801,7 +9302,8 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
}
rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
- if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
word = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2);
if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 ||
rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff)
@@ -8840,6 +9342,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rf = rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID);
else if (rt2x00_rt(rt2x00dev, RT3352))
rf = RF3322;
+ else if (rt2x00_rt(rt2x00dev, RT3883))
+ rf = RF3853;
else if (rt2x00_rt(rt2x00dev, RT5350))
rf = RF5350;
else
@@ -8860,6 +9364,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3290:
case RF3320:
case RF3322:
+ case RF3853:
case RF5350:
case RF5360:
case RF5362:
@@ -9146,6 +9651,66 @@ static const struct rf_channel rf_vals_3x_xtal20[] = {
{14, 0xF0, 2, 0x18},
};
+static const struct rf_channel rf_vals_3853[] = {
+ {1, 241, 6, 2},
+ {2, 241, 6, 7},
+ {3, 242, 6, 2},
+ {4, 242, 6, 7},
+ {5, 243, 6, 2},
+ {6, 243, 6, 7},
+ {7, 244, 6, 2},
+ {8, 244, 6, 7},
+ {9, 245, 6, 2},
+ {10, 245, 6, 7},
+ {11, 246, 6, 2},
+ {12, 246, 6, 7},
+ {13, 247, 6, 2},
+ {14, 248, 6, 4},
+
+ {36, 0x56, 8, 4},
+ {38, 0x56, 8, 6},
+ {40, 0x56, 8, 8},
+ {44, 0x57, 8, 0},
+ {46, 0x57, 8, 2},
+ {48, 0x57, 8, 4},
+ {52, 0x57, 8, 8},
+ {54, 0x57, 8, 10},
+ {56, 0x58, 8, 0},
+ {60, 0x58, 8, 4},
+ {62, 0x58, 8, 6},
+ {64, 0x58, 8, 8},
+
+ {100, 0x5b, 8, 8},
+ {102, 0x5b, 8, 10},
+ {104, 0x5c, 8, 0},
+ {108, 0x5c, 8, 4},
+ {110, 0x5c, 8, 6},
+ {112, 0x5c, 8, 8},
+ {114, 0x5c, 8, 10},
+ {116, 0x5d, 8, 0},
+ {118, 0x5d, 8, 2},
+ {120, 0x5d, 8, 4},
+ {124, 0x5d, 8, 8},
+ {126, 0x5d, 8, 10},
+ {128, 0x5e, 8, 0},
+ {132, 0x5e, 8, 4},
+ {134, 0x5e, 8, 6},
+ {136, 0x5e, 8, 8},
+ {140, 0x5f, 8, 0},
+
+ {149, 0x5f, 8, 9},
+ {151, 0x5f, 8, 11},
+ {153, 0x60, 8, 1},
+ {157, 0x60, 8, 5},
+ {159, 0x60, 8, 7},
+ {161, 0x60, 8, 9},
+ {165, 0x61, 8, 1},
+ {167, 0x61, 8, 3},
+ {169, 0x61, 8, 5},
+ {171, 0x61, 8, 7},
+ {173, 0x61, 8, 9},
+};
+
static const struct rf_channel rf_vals_5592_xtal20[] = {
/* Channel, N, K, mod, R */
{1, 482, 4, 10, 3},
@@ -9409,6 +9974,11 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels = rf_vals_3x;
break;
+ case RF3853:
+ spec->num_channels = ARRAY_SIZE(rf_vals_3853);
+ spec->channels = rf_vals_3853;
+ break;
+
case RF5592:
reg = rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX);
if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
@@ -9528,6 +10098,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3053:
case RF3070:
case RF3290:
+ case RF3853:
case RF5350:
case RF5360:
case RF5362:
@@ -9570,6 +10141,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
case RT3390:
case RT3572:
case RT3593:
+ case RT3883:
case RT5350:
case RT5390:
case RT5392:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 0dff2c7b3010..759eab2b70c3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -195,9 +195,10 @@ void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *tx
void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
bool match);
-void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
+void rt2800_txdone(struct rt2x00_dev *rt2x00dev, unsigned int quota);
void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev);
bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev);
+bool rt2800_txstatus_pending(struct rt2x00_dev *rt2x00dev);
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
void rt2800_clear_beacon(struct queue_entry *entry);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index ddb88cfeace2..ecc4c9332ec7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -255,26 +255,12 @@ void rt2800mmio_autowake_tasklet(unsigned long data)
}
EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
-static void rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
-{
- bool timeout = false;
-
- while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
- (timeout = rt2800_txstatus_timeout(rt2x00dev))) {
-
- rt2800_txdone(rt2x00dev);
-
- if (timeout)
- rt2800_txdone_nostatus(rt2x00dev);
- }
-}
-
-static bool rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
+static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
{
u32 status;
- bool more = false;
+ unsigned long flags;
- /* FIXEME: rewrite this comment
+ /*
* The TX_FIFO_STATUS interrupt needs special care. We should
* read TX_STA_FIFO but we should do it immediately as otherwise
* the register can overflow and we would lose status reports.
@@ -285,34 +271,32 @@ static bool rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
* because we can schedule the tasklet multiple times (when the
* interrupt fires again during tx status processing).
*
- * txstatus tasklet is called with INT_SOURCE_CSR_TX_FIFO_STATUS
- * disabled so have only one producer and one consumer - we don't
- * need to lock the kfifo.
+ * We also read statuses from tx status timeout timer, use
+ * lock to prevent concurent writes to fifo.
*/
+
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO);
if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
break;
kfifo_put(&rt2x00dev->txstatus_fifo, status);
- more = true;
}
- return more;
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
}
void rt2800mmio_txstatus_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- do {
- rt2800mmio_txdone(rt2x00dev);
+ rt2800_txdone(rt2x00dev, 16);
- } while (rt2800mmio_fetch_txstatus(rt2x00dev));
+ if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800mmio_enable_interrupt(rt2x00dev,
- INT_SOURCE_CSR_TX_FIFO_STATUS);
}
EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
@@ -339,8 +323,10 @@ irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
mask = ~reg;
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+ rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
rt2800mmio_fetch_txstatus(rt2x00dev);
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
}
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
@@ -440,6 +426,9 @@ void rt2800mmio_start_queue(struct data_queue *queue)
}
EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
+/* 200 ms */
+#define TXSTATUS_TIMEOUT 200000000
+
void rt2800mmio_kick_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
@@ -454,6 +443,8 @@ void rt2800mmio_kick_queue(struct data_queue *queue)
entry = rt2x00queue_get_entry(queue, Q_INDEX);
rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
entry->entry_idx);
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
break;
case QID_MGMT:
entry = rt2x00queue_get_entry(queue, Q_INDEX);
@@ -498,11 +489,8 @@ void rt2800mmio_flush_queue(struct data_queue *queue, bool drop)
* For TX queues schedule completion tasklet to catch
* tx status timeouts, othewise just wait.
*/
- if (tx_queue) {
- tasklet_disable(&rt2x00dev->txstatus_tasklet);
- rt2800mmio_txdone(rt2x00dev);
- tasklet_enable(&rt2x00dev->txstatus_tasklet);
- }
+ if (tx_queue)
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
/*
* Wait for a little while to give the driver
@@ -640,6 +628,10 @@ void rt2800mmio_clear_entry(struct queue_entry *entry)
word = rt2x00_desc_read(entry_priv->desc, 1);
rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /* If last entry stop txstatus timer */
+ if (entry->queue->length == 1)
+ hrtimer_cancel(&rt2x00dev->txstatus_timer);
}
}
EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
@@ -772,6 +764,70 @@ int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
+static void rt2800mmio_work_txdone(struct work_struct *work)
+{
+ struct rt2x00_dev *rt2x00dev =
+ container_of(work, struct rt2x00_dev, txdone_work);
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return;
+
+ while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
+ rt2800_txstatus_timeout(rt2x00dev)) {
+
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ rt2800_txdone(rt2x00dev, UINT_MAX);
+ rt2800_txdone_nostatus(rt2x00dev);
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ }
+
+ if (rt2800_txstatus_pending(rt2x00dev))
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
+}
+
+static enum hrtimer_restart rt2800mmio_tx_sta_fifo_timeout(struct hrtimer *timer)
+{
+ struct rt2x00_dev *rt2x00dev =
+ container_of(timer, struct rt2x00_dev, txstatus_timer);
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ goto out;
+
+ if (!rt2800_txstatus_pending(rt2x00dev))
+ goto out;
+
+ rt2800mmio_fetch_txstatus(rt2x00dev);
+ if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ else
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
+out:
+ return HRTIMER_NORESTART;
+}
+
+int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev)
+{
+ int retval;
+
+ retval = rt2800_probe_hw(rt2x00dev);
+ if (retval)
+ return retval;
+
+ /*
+ * Set txstatus timer function.
+ */
+ rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout;
+
+ /*
+ * Overwrite TX done handler
+ */
+ INIT_WORK(&rt2x00dev->txdone_work, rt2800mmio_work_txdone);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_probe_hw);
+
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("rt2800 MMIO library");
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
index 3a513273f414..ca58e6c3a4e5 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
@@ -153,6 +153,7 @@ void rt2800mmio_stop_queue(struct data_queue *queue);
void rt2800mmio_queue_init(struct data_queue *queue);
/* Initialization functions */
+int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev);
bool rt2800mmio_get_entry_state(struct queue_entry *entry);
void rt2800mmio_clear_entry(struct queue_entry *entry);
int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index 0291441ac548..43e1b1ee96bf 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -346,7 +346,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.tbtt_tasklet = rt2800mmio_tbtt_tasklet,
.rxdone_tasklet = rt2800mmio_rxdone_tasklet,
.autowake_tasklet = rt2800mmio_autowake_tasklet,
- .probe_hw = rt2800_probe_hw,
+ .probe_hw = rt2800mmio_probe_hw,
.get_firmware_name = rt2800pci_get_firmware_name,
.check_firmware = rt2800_check_firmware,
.load_firmware = rt2800_load_firmware,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index a502816214ab..4e9e38771a56 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -51,9 +51,16 @@ static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
static void rt2800soc_disable_radio(struct rt2x00_dev *rt2x00dev)
{
+ u32 reg;
+
rt2800_disable_radio(rt2x00dev);
rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
+
+ reg = 0;
+ if (rt2x00_rt(rt2x00dev, RT3883))
+ rt2x00_set_field32(&reg, TX_PIN_CFG_RFTR_EN, 1);
+
+ rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, reg);
}
static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev,
@@ -185,7 +192,7 @@ static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
.tbtt_tasklet = rt2800mmio_tbtt_tasklet,
.rxdone_tasklet = rt2800mmio_rxdone_tasklet,
.autowake_tasklet = rt2800mmio_autowake_tasklet,
- .probe_hw = rt2800_probe_hw,
+ .probe_hw = rt2800mmio_probe_hw,
.get_firmware_name = rt2800soc_get_firmware_name,
.check_firmware = rt2800soc_check_firmware,
.load_firmware = rt2800soc_load_firmware,
@@ -203,7 +210,7 @@ static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
.start_queue = rt2800mmio_start_queue,
.kick_queue = rt2800mmio_kick_queue,
.stop_queue = rt2800mmio_stop_queue,
- .flush_queue = rt2x00mmio_flush_queue,
+ .flush_queue = rt2800mmio_flush_queue,
.write_tx_desc = rt2800mmio_write_tx_desc,
.write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 19eabf16147b..b5f75df9b563 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -100,22 +100,6 @@ static void rt2800usb_stop_queue(struct data_queue *queue)
}
}
-/*
- * test if there is an entry in any TX queue for which DMA is done
- * but the TX status has not been returned yet
- */
-static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
-
- tx_queue_for_each(rt2x00dev, queue) {
- if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) !=
- rt2x00queue_get_entry(queue, Q_INDEX_DONE))
- return true;
- }
- return false;
-}
-
#define TXSTATUS_READ_INTERVAL 1000000
static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
@@ -145,7 +129,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
if (rt2800_txstatus_timeout(rt2x00dev))
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
- if (rt2800usb_txstatus_pending(rt2x00dev)) {
+ if (rt2800_txstatus_pending(rt2x00dev)) {
/* Read register after 1 ms */
hrtimer_start(&rt2x00dev->txstatus_timer,
TXSTATUS_READ_INTERVAL,
@@ -160,7 +144,7 @@ stop_reading:
* clear_bit someone could do rt2x00usb_interrupt_txdone, so recheck
* here again if status reading is needed.
*/
- if (rt2800usb_txstatus_pending(rt2x00dev) &&
+ if (rt2800_txstatus_pending(rt2x00dev) &&
!test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
return true;
else
@@ -480,7 +464,7 @@ static void rt2800usb_work_txdone(struct work_struct *work)
while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
rt2800_txstatus_timeout(rt2x00dev)) {
- rt2800_txdone(rt2x00dev);
+ rt2800_txdone(rt2x00dev, UINT_MAX);
rt2800_txdone_nostatus(rt2x00dev);
@@ -489,7 +473,7 @@ static void rt2800usb_work_txdone(struct work_struct *work)
* if the medium is busy, thus the TX_STA_FIFO entry is
* also delayed -> use a timer to retrieve it.
*/
- if (rt2800usb_txstatus_pending(rt2x00dev))
+ if (rt2800_txstatus_pending(rt2x00dev))
rt2800usb_async_read_tx_status(rt2x00dev);
}
}
@@ -562,13 +546,13 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
* stripped it from the frame. Signal this to mac80211.
*/
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
-
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) {
rxdesc->flags |= RX_FLAG_DECRYPTED;
} else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) {
/*
* In order to check the Michael Mic, the packet must have
- * been decrypted. Mac80211 doesnt check the MMIC failure
+ * been decrypted. Mac80211 doesnt check the MMIC failure
* flag to initiate MMIC countermeasures if the decoded flag
* has not been set.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 4b1744e9fb78..9c6ef0ca932b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -69,10 +69,10 @@
printk(KERN_ERR KBUILD_MODNAME ": %s: Error - " fmt, \
__func__, ##__VA_ARGS__)
#define rt2x00_err(dev, fmt, ...) \
- wiphy_err((dev)->hw->wiphy, "%s: Error - " fmt, \
+ wiphy_err_ratelimited((dev)->hw->wiphy, "%s: Error - " fmt, \
__func__, ##__VA_ARGS__)
#define rt2x00_warn(dev, fmt, ...) \
- wiphy_warn((dev)->hw->wiphy, "%s: Warning - " fmt, \
+ wiphy_warn_ratelimited((dev)->hw->wiphy, "%s: Warning - " fmt, \
__func__, ##__VA_ARGS__)
#define rt2x00_info(dev, fmt, ...) \
wiphy_info((dev)->hw->wiphy, "%s: Info - " fmt, \
@@ -673,7 +673,6 @@ enum rt2x00_state_flags {
CONFIG_CHANNEL_HT40,
CONFIG_POWERSAVING,
CONFIG_HT_DISABLED,
- CONFIG_QOS_DISABLED,
CONFIG_MONITORING,
/*
@@ -981,8 +980,6 @@ struct rt2x00_dev {
*/
DECLARE_KFIFO_PTR(txstatus_fifo, u32);
- unsigned long last_nostatus_check;
-
/*
* Timer to ensure tx status reports are read (rt2800usb).
*/
@@ -1017,6 +1014,7 @@ struct rt2x00_dev {
unsigned int extra_tx_headroom;
struct usb_anchor *anchor;
+ unsigned int num_proto_errs;
/* Clock for System On Chip devices. */
struct clk *clk;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 357c0941aaad..1b08b01db27b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1007,7 +1007,7 @@ void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr
const char *mac_addr;
mac_addr = of_get_mac_address(rt2x00dev->dev->of_node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(eeprom_mac_addr, mac_addr);
if (!is_valid_ether_addr(eeprom_mac_addr)) {
@@ -1391,6 +1391,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
mutex_init(&rt2x00dev->conf_mutex);
INIT_LIST_HEAD(&rt2x00dev->bar_list);
spin_lock_init(&rt2x00dev->bar_list_lock);
+ hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -1515,6 +1517,8 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
+ hrtimer_cancel(&rt2x00dev->txstatus_timer);
+
/*
* Kill the tx status tasklet.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 2825560e2424..e8462f25d252 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -642,19 +642,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
rt2x00dev->intf_associated--;
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
- clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
}
/*
- * Check for access point which do not support 802.11e . We have to
- * generate data frames sequence number in S/W for such AP, because
- * of H/W bug.
- */
- if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
- set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
- /*
* When the erp information has changed, we should perform
* additional configuration steps. For all other changes we are done.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h
index 184a4148b2f8..03e6cdb0b5a4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h
@@ -80,8 +80,6 @@ int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev,
*
* @desc: Pointer to device descriptor
* @desc_dma: DMA pointer to &desc.
- * @data: Pointer to device's entry memory.
- * @data_dma: DMA pointer to &data.
*/
struct queue_entry_priv_mmio {
__le32 *desc;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 92ddc19e7bf7..03b206440208 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
/*
* rt2800 has a H/W (or F/W) bug, device incorrectly increase
- * seqno on retransmited data (non-QOS) frames. To workaround
- * the problem let's generate seqno in software if QOS is
- * disabled.
+ * seqno on retransmitted data (non-QOS) and management frames.
+ * To workaround the problem let's generate seqno in software.
+ * Except for beacons which are transmitted periodically by H/W
+ * hence hardware has to assign seqno for them.
*/
- if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
- __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
- else
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
/* H/W will generate sequence number */
return;
+ }
+
+ __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
}
/*
@@ -671,7 +674,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
spin_lock(&queue->tx_lock);
if (unlikely(rt2x00queue_full(queue))) {
- rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
+ rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
queue->qid);
ret = -ENOBUFS;
goto out;
@@ -1039,7 +1042,6 @@ void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
*/
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_start_queue(queue);
- rt2x00dev->last_nostatus_check = jiffies;
rt2x00queue_start_queue(rt2x00dev->rx);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
index a15bae29917b..20113f861b9e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
@@ -361,7 +361,6 @@ enum queue_entry_flags {
ENTRY_DATA_PENDING,
ENTRY_DATA_IO_FAILED,
ENTRY_DATA_STATUS_PENDING,
- ENTRY_DATA_STATUS_SET,
};
/**
@@ -387,8 +386,6 @@ struct queue_entry {
unsigned int entry_idx;
- u32 status;
-
void *priv_data;
};
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 086aad22743d..9cdd7f2c92b5 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -31,6 +31,22 @@
#include "rt2x00.h"
#include "rt2x00usb.h"
+static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
+{
+ if (status == -ENODEV || status == -ENOENT)
+ return true;
+
+ if (status == -EPROTO || status == -ETIMEDOUT)
+ rt2x00dev->num_proto_errs++;
+ else
+ rt2x00dev->num_proto_errs = 0;
+
+ if (rt2x00dev->num_proto_errs > 3)
+ return true;
+
+ return false;
+}
+
/*
* Interfacing with the HW.
*/
@@ -57,7 +73,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
if (status >= 0)
return 0;
- if (status == -ENODEV || status == -ENOENT) {
+ if (rt2x00usb_check_usb_error(rt2x00dev, status)) {
/* Device has disappeared. */
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
break;
@@ -321,7 +337,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- if (status == -ENODEV || status == -ENOENT)
+ if (rt2x00usb_check_usb_error(rt2x00dev, status))
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
rt2x00lib_dmadone(entry);
@@ -410,7 +426,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- if (status == -ENODEV || status == -ENOENT)
+ if (rt2x00usb_check_usb_error(rt2x00dev, status))
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
rt2x00lib_dmadone(entry);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 44a943d18b84..ee4d8106bba5 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2795,6 +2795,8 @@ static int __init init_ray_cs(void)
rc = pcmcia_register_driver(&ray_driver);
pr_debug("raylink init_module register_pcmcia_driver returns 0x%x\n",
rc);
+ if (rc)
+ return rc;
#ifdef CONFIG_PROC_FS
proc_mkdir("driver/ray_cs", NULL);
@@ -2818,11 +2820,7 @@ static void __exit exit_ray_cs(void)
pr_debug("ray_cs: cleanup_module\n");
#ifdef CONFIG_PROC_FS
- remove_proc_entry("driver/ray_cs/ray_cs", NULL);
- remove_proc_entry("driver/ray_cs/essid", NULL);
- remove_proc_entry("driver/ray_cs/net_type", NULL);
- remove_proc_entry("driver/ray_cs/translate", NULL);
- remove_proc_entry("driver/ray_cs", NULL);
+ remove_proc_subtree("driver/ray_cs", NULL);
#endif
pcmcia_unregister_driver(&ray_driver);
diff --git a/drivers/net/wireless/realtek/Kconfig b/drivers/net/wireless/realtek/Kconfig
index 3db988e689d7..9189fd672578 100644
--- a/drivers/net/wireless/realtek/Kconfig
+++ b/drivers/net/wireless/realtek/Kconfig
@@ -14,5 +14,6 @@ if WLAN_VENDOR_REALTEK
source "drivers/net/wireless/realtek/rtl818x/Kconfig"
source "drivers/net/wireless/realtek/rtlwifi/Kconfig"
source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig"
+source "drivers/net/wireless/realtek/rtw88/Kconfig"
endif # WLAN_VENDOR_REALTEK
diff --git a/drivers/net/wireless/realtek/Makefile b/drivers/net/wireless/realtek/Makefile
index 9c78deb5eea9..118af9963d61 100644
--- a/drivers/net/wireless/realtek/Makefile
+++ b/drivers/net/wireless/realtek/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_RTL8180) += rtl818x/
obj-$(CONFIG_RTL8187) += rtl818x/
obj-$(CONFIG_RTLWIFI) += rtlwifi/
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu/
+obj-$(CONFIG_RTW88) += rtw88/
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 217d2a7a43c7..ac746c322554 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -448,6 +448,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
/* <2> work queue */
rtlpriv->works.hw = hw;
rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+ if (unlikely(!rtlpriv->works.rtl_wq)) {
+ pr_err("Failed to allocate work queue\n");
+ return;
+ }
+
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
(void *)rtl_watchdog_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 48ca52102cef..4055e0ab75ba 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -499,16 +499,16 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
- spin_lock_bh(&rtlpriv->locks.waitq_lock);
+ spin_lock(&rtlpriv->locks.waitq_lock);
if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
(ring->entries - skb_queue_len(&ring->queue) >
rtlhal->max_earlymode_num)) {
skb = skb_dequeue(&mac->skb_waitq[tid]);
} else {
- spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+ spin_unlock(&rtlpriv->locks.waitq_lock);
break;
}
- spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+ spin_unlock(&rtlpriv->locks.waitq_lock);
/* Some macaddr can't do early mode. like
* multicast/broadcast/no_qos data
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index 203e7b574e84..e2e0bfbc24fe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -600,6 +600,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 106011a24827..483dc8bdc555 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -372,8 +372,9 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rx_fwinfo_88e *p_drvinfo;
struct ieee80211_hdr *hdr;
-
+ u8 wake_match;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+
status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
if (status->packet_report_type == TX_REPORT2)
status->length = (u16)GET_RX_RPT2_DESC_PKT_LEN(pdesc);
@@ -399,18 +400,18 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate);
status->macid = GET_RX_DESC_MACID(pdesc);
- if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(2);
+ if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ wake_match = BIT(2);
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(1);
+ wake_match = BIT(1);
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
- status->wake_match = BIT(0);
+ wake_match = BIT(0);
else
- status->wake_match = 0;
- if (status->wake_match)
+ wake_match = 0;
+ if (wake_match)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- status->wake_match);
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index 18c76990a089..86b1b88cc4ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -623,6 +623,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
if (cmd_send_packet)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
index f3a336e0ea98..d259794a308b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
@@ -42,12 +42,9 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u32 tx_agc[2] = { 0, 0 }, tmpval = 0;
- bool turbo_scanoff = false;
u8 idx1, idx2;
u8 *ptr;
- if ((rtlefuse->eeprom_regulatory != 0) || (rtlefuse->external_pa))
- turbo_scanoff = true;
if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 7c5b54b71a92..67305ce915ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -744,6 +744,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 09cf8180e4ff..d297cfc0fd2b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -331,6 +331,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
struct rx_fwinfo *p_drvinfo;
struct ieee80211_hdr *hdr;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ u8 wake_match;
if (GET_RX_STATUS_DESC_RPT_SEL(pdesc) == 0)
status->packet_report_type = NORMAL_RX;
@@ -350,18 +351,18 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate);
status->macid = GET_RX_DESC_MACID(pdesc);
- if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(2);
+ if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ wake_match = BIT(2);
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(1);
+ wake_match = BIT(1);
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
- status->wake_match = BIT(0);
+ wake_match = BIT(0);
else
- status->wake_match = 0;
- if (status->wake_match)
+ wake_match = 0;
+ if (wake_match)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- status->wake_match);
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index 514891ea2c64..d8260c7afe09 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -663,7 +663,7 @@ void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
}
-void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+static void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index be451a6f7dbe..33481232fad0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -448,6 +448,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 6bab162e1bb8..655460f61bbc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1675,6 +1675,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
rtlhal->oem_id = RT_CID_819X_LENOVO;
break;
}
+ break;
case 0x1025:
rtlhal->oem_id = RT_CID_819X_ACER;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index 4d7fa27f55ca..aa56058af56e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -562,6 +562,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
u1rsvdpageloc, sizeof(u1rsvdpageloc));
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 9ada9a06c6ea..d87ba03fe78f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -300,7 +300,7 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rx_fwinfo_8723be *p_drvinfo;
struct ieee80211_hdr *hdr;
-
+ u8 wake_match;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
@@ -329,18 +329,18 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
status->packet_report_type = NORMAL_RX;
- if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(2);
+ if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ wake_match = BIT(2);
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(1);
+ wake_match = BIT(1);
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
- status->wake_match = BIT(0);
+ wake_match = BIT(0);
else
- status->wake_match = 0;
- if (status->wake_match)
+ wake_match = 0;
+ if (wake_match)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- status->wake_match);
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index dc0eb692088f..fe32d397d287 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -1623,6 +1623,8 @@ out:
&reserved_page_packet_8812[0], totalpacketlen);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
@@ -1759,6 +1761,8 @@ out:
&reserved_page_packet_8821[0], totalpacketlen);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index db5e628b17ed..7b6faf38e09c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -436,7 +436,7 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rx_fwinfo_8821ae *p_drvinfo;
struct ieee80211_hdr *hdr;
-
+ u8 wake_match;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
@@ -473,18 +473,18 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
status->packet_report_type = NORMAL_RX;
if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
- status->wake_match = BIT(2);
+ wake_match = BIT(2);
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(1);
+ wake_match = BIT(1);
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
- status->wake_match = BIT(0);
+ wake_match = BIT(0);
else
- status->wake_match = 0;
+ wake_match = 0;
- if (status->wake_match)
+ if (wake_match)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- status->wake_match);
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index e32e9ffa3192..518aaa875361 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2138,7 +2138,6 @@ struct rtl_stats {
u8 packet_report_type;
u32 macid;
- u8 wake_match;
u32 bt_rx_rssi_percentage;
u32 macid_valid_entry[2];
};
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
new file mode 100644
index 000000000000..55b1bf3dd9b6
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -0,0 +1,54 @@
+menuconfig RTW88
+ tristate "Realtek 802.11ac wireless chips support"
+ depends on MAC80211
+ help
+ This module adds support for mac80211-based wireless drivers that
+ enables Realtek IEEE 802.11ac wireless chipsets.
+
+ If you choose to build a module, it'll be called rtw88.
+
+if RTW88
+
+config RTW88_CORE
+ tristate
+
+config RTW88_PCI
+ tristate
+
+config RTW88_8822BE
+ bool "Realtek 8822BE PCI wireless network adapter"
+ depends on PCI
+ select RTW88_CORE
+ select RTW88_PCI
+ help
+ Select this option will enable support for 8822BE chipset
+
+ 802.11ac PCIe wireless network adapter
+
+config RTW88_8822CE
+ bool "Realtek 8822CE PCI wireless network adapter"
+ depends on PCI
+ select RTW88_CORE
+ select RTW88_PCI
+ help
+ Select this option will enable support for 8822CE chipset
+
+ 802.11ac PCIe wireless network adapter
+
+config RTW88_DEBUG
+ bool "Realtek rtw88 debug support"
+ depends on RTW88_CORE
+ help
+ Enable debug support
+
+ If unsure, say Y to simplify debug problems
+
+config RTW88_DEBUGFS
+ bool "Realtek rtw88 debugfs support"
+ depends on RTW88_CORE
+ help
+ Enable debug support
+
+ If unsure, say Y to simplify debug problems
+
+endif
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
new file mode 100644
index 000000000000..e0bfefd154af
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+obj-$(CONFIG_RTW88_CORE) += rtw88.o
+rtw88-y += main.o \
+ mac80211.o \
+ util.o \
+ debug.o \
+ tx.o \
+ rx.o \
+ mac.o \
+ phy.o \
+ efuse.o \
+ fw.o \
+ ps.o \
+ sec.o \
+ regd.o
+
+rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
+rtw88-$(CONFIG_RTW88_8822CE) += rtw8822c.o rtw8822c_table.o
+
+obj-$(CONFIG_RTW88_PCI) += rtwpci.o
+rtwpci-objs := pci.o
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
new file mode 100644
index 000000000000..f0ae26018f97
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "main.h"
+#include "sec.h"
+#include "fw.h"
+#include "debug.h"
+
+#ifdef CONFIG_RTW88_DEBUGFS
+
+struct rtw_debugfs_priv {
+ struct rtw_dev *rtwdev;
+ int (*cb_read)(struct seq_file *m, void *v);
+ ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *loff);
+ union {
+ u32 cb_data;
+ u8 *buf;
+ struct {
+ u32 page_offset;
+ u32 page_num;
+ } rsvd_page;
+ struct {
+ u8 rf_path;
+ u32 rf_addr;
+ u32 rf_mask;
+ };
+ struct {
+ u32 addr;
+ u32 len;
+ } read_reg;
+ };
+};
+
+static int rtw_debugfs_single_show(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+
+ return debugfs_priv->cb_read(m, v);
+}
+
+static ssize_t rtw_debugfs_common_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
+
+ return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static ssize_t rtw_debugfs_single_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+
+ return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static int rtw_debugfs_single_open_rw(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, rtw_debugfs_single_show, inode->i_private);
+}
+
+static int rtw_debugfs_close(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations file_ops_single_r = {
+ .owner = THIS_MODULE,
+ .open = rtw_debugfs_single_open_rw,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct file_operations file_ops_single_rw = {
+ .owner = THIS_MODULE,
+ .open = rtw_debugfs_single_open_rw,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = rtw_debugfs_single_write,
+};
+
+static const struct file_operations file_ops_common_write = {
+ .owner = THIS_MODULE,
+ .write = rtw_debugfs_common_write,
+ .open = simple_open,
+ .release = rtw_debugfs_close,
+};
+
+static int rtw_debugfs_get_read_reg(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val, len, addr;
+
+ len = debugfs_priv->read_reg.len;
+ addr = debugfs_priv->read_reg.addr;
+ switch (len) {
+ case 1:
+ val = rtw_read8(rtwdev, addr);
+ seq_printf(m, "reg 0x%03x: 0x%02x\n", addr, val);
+ break;
+ case 2:
+ val = rtw_read16(rtwdev, addr);
+ seq_printf(m, "reg 0x%03x: 0x%04x\n", addr, val);
+ break;
+ case 4:
+ val = rtw_read32(rtwdev, addr);
+ seq_printf(m, "reg 0x%03x: 0x%08x\n", addr, val);
+ break;
+ }
+ return 0;
+}
+
+static int rtw_debugfs_get_rf_read(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val, addr, mask;
+ u8 path;
+
+ path = debugfs_priv->rf_path;
+ addr = debugfs_priv->rf_addr;
+ mask = debugfs_priv->rf_mask;
+
+ val = rtw_read_rf(rtwdev, path, addr, mask);
+
+ seq_printf(m, "rf_read path:%d addr:0x%08x mask:0x%08x val=0x%08x\n",
+ path, addr, mask, val);
+
+ return 0;
+}
+
+static int rtw_debugfs_copy_from_user(char tmp[], int size,
+ const char __user *buffer, size_t count,
+ int num)
+{
+ int tmp_len;
+
+ if (count < num)
+ return -EFAULT;
+
+ tmp_len = (count > size - 1 ? size - 1 : count);
+
+ if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+ return count;
+
+ tmp[tmp_len] = '\0';
+
+ return 0;
+}
+
+static ssize_t rtw_debugfs_set_read_reg(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 addr, len;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
+
+ num = sscanf(tmp, "%x %x", &addr, &len);
+
+ if (num != 2)
+ return count;
+
+ if (len != 1 && len != 2 && len != 4) {
+ rtw_warn(rtwdev, "read reg setting wrong len\n");
+ return -EINVAL;
+ }
+ debugfs_priv->read_reg.addr = addr;
+ debugfs_priv->read_reg.len = len;
+
+ return count;
+}
+
+static int rtw_debugfs_get_dump_cam(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val, command;
+ u32 hw_key_idx = debugfs_priv->cb_data << RTW_SEC_CAM_ENTRY_SHIFT;
+ u32 read_cmd = RTW_SEC_CMD_POLLING;
+ int i;
+
+ seq_printf(m, "cam entry%d\n", debugfs_priv->cb_data);
+ seq_puts(m, "0x0 0x1 0x2 0x3 ");
+ seq_puts(m, "0x4 0x5\n");
+ mutex_lock(&rtwdev->mutex);
+ for (i = 0; i <= 5; i++) {
+ command = read_cmd | (hw_key_idx + i);
+ rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
+ val = rtw_read32(rtwdev, RTW_SEC_READ_REG);
+ seq_printf(m, "%8.8x", val);
+ if (i < 2)
+ seq_puts(m, " ");
+ }
+ seq_puts(m, "\n");
+ mutex_unlock(&rtwdev->mutex);
+ return 0;
+}
+
+static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u8 page_size = rtwdev->chip->page_size;
+ u32 buf_size = debugfs_priv->rsvd_page.page_num * page_size;
+ u32 offset = debugfs_priv->rsvd_page.page_offset * page_size;
+ u8 *buf;
+ int i;
+ int ret;
+
+ buf = vzalloc(buf_size);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = rtw_dump_drv_rsvd_page(rtwdev, offset, buf_size, (u32 *)buf);
+ if (ret) {
+ rtw_err(rtwdev, "failed to dump rsvd page\n");
+ vfree(buf);
+ return ret;
+ }
+
+ for (i = 0 ; i < buf_size ; i += 8) {
+ if (i % page_size == 0)
+ seq_printf(m, "PAGE %d\n", (i + offset) / page_size);
+ seq_printf(m, "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
+ *(buf + i), *(buf + i + 1),
+ *(buf + i + 2), *(buf + i + 3),
+ *(buf + i + 4), *(buf + i + 5),
+ *(buf + i + 6), *(buf + i + 7));
+ }
+ vfree(buf);
+
+ return 0;
+}
+
+static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 offset, page_num;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
+
+ num = sscanf(tmp, "%d %d", &offset, &page_num);
+
+ if (num != 2) {
+ rtw_warn(rtwdev, "invalid arguments\n");
+ return num;
+ }
+
+ debugfs_priv->rsvd_page.page_offset = offset;
+ debugfs_priv->rsvd_page.page_num = page_num;
+
+ return count;
+}
+
+static ssize_t rtw_debugfs_set_single_input(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 input;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+
+ num = kstrtoint(tmp, 0, &input);
+
+ if (num) {
+ rtw_warn(rtwdev, "kstrtoint failed\n");
+ return num;
+ }
+
+ debugfs_priv->cb_data = input;
+
+ return count;
+}
+
+static ssize_t rtw_debugfs_set_write_reg(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 addr, val, len;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+
+ /* write BB/MAC register */
+ num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+
+ if (num != 3)
+ return count;
+
+ switch (len) {
+ case 1:
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "reg write8 0x%03x: 0x%08x\n", addr, val);
+ rtw_write8(rtwdev, addr, (u8)val);
+ break;
+ case 2:
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "reg write16 0x%03x: 0x%08x\n", addr, val);
+ rtw_write16(rtwdev, addr, (u16)val);
+ break;
+ case 4:
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "reg write32 0x%03x: 0x%08x\n", addr, val);
+ rtw_write32(rtwdev, addr, (u32)val);
+ break;
+ default:
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "error write length = %d\n", len);
+ break;
+ }
+
+ return count;
+}
+
+static ssize_t rtw_debugfs_set_rf_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 path, addr, mask, val;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 4);
+
+ num = sscanf(tmp, "%x %x %x %x", &path, &addr, &mask, &val);
+
+ if (num != 4) {
+ rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n");
+ return count;
+ }
+
+ rtw_write_rf(rtwdev, path, addr, mask, val);
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "write_rf path:%d addr:0x%08x mask:0x%08x, val:0x%08x\n",
+ path, addr, mask, val);
+
+ return count;
+}
+
+static ssize_t rtw_debugfs_set_rf_read(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 path, addr, mask;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+
+ num = sscanf(tmp, "%x %x %x", &path, &addr, &mask);
+
+ if (num != 3) {
+ rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n");
+ return count;
+ }
+
+ debugfs_priv->rf_path = path;
+ debugfs_priv->rf_addr = addr;
+ debugfs_priv->rf_mask = mask;
+
+ return count;
+}
+
+static int rtw_debug_get_mac_page(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val;
+ u32 page = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0xff;
+
+ val = rtw_read32(rtwdev, debugfs_priv->cb_data);
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtw_read32(rtwdev, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int rtw_debug_get_bb_page(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val;
+ u32 page = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0xff;
+
+ val = rtw_read32(rtwdev, debugfs_priv->cb_data);
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtw_read32(rtwdev, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int rtw_debug_get_rf_dump(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 addr, offset, data;
+ u8 path;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ seq_printf(m, "RF path:%d\n", path);
+ for (addr = 0; addr < 0x100; addr += 4) {
+ seq_printf(m, "%8.8x ", addr);
+ for (offset = 0; offset < 4; offset++) {
+ data = rtw_read_rf(rtwdev, path, addr + offset,
+ 0xffffffff);
+ seq_printf(m, "%8.8x ", data);
+ }
+ seq_puts(m, "\n");
+ }
+ seq_puts(m, "\n");
+ }
+
+ return 0;
+}
+
+#define rtw_debug_impl_mac(page, addr) \
+static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
+ .cb_read = rtw_debug_get_mac_page, \
+ .cb_data = addr, \
+}
+
+rtw_debug_impl_mac(0, 0x0000);
+rtw_debug_impl_mac(1, 0x0100);
+rtw_debug_impl_mac(2, 0x0200);
+rtw_debug_impl_mac(3, 0x0300);
+rtw_debug_impl_mac(4, 0x0400);
+rtw_debug_impl_mac(5, 0x0500);
+rtw_debug_impl_mac(6, 0x0600);
+rtw_debug_impl_mac(7, 0x0700);
+rtw_debug_impl_mac(10, 0x1000);
+rtw_debug_impl_mac(11, 0x1100);
+rtw_debug_impl_mac(12, 0x1200);
+rtw_debug_impl_mac(13, 0x1300);
+rtw_debug_impl_mac(14, 0x1400);
+rtw_debug_impl_mac(15, 0x1500);
+rtw_debug_impl_mac(16, 0x1600);
+rtw_debug_impl_mac(17, 0x1700);
+
+#define rtw_debug_impl_bb(page, addr) \
+static struct rtw_debugfs_priv rtw_debug_priv_bb_ ##page = { \
+ .cb_read = rtw_debug_get_bb_page, \
+ .cb_data = addr, \
+}
+
+rtw_debug_impl_bb(8, 0x0800);
+rtw_debug_impl_bb(9, 0x0900);
+rtw_debug_impl_bb(a, 0x0a00);
+rtw_debug_impl_bb(b, 0x0b00);
+rtw_debug_impl_bb(c, 0x0c00);
+rtw_debug_impl_bb(d, 0x0d00);
+rtw_debug_impl_bb(e, 0x0e00);
+rtw_debug_impl_bb(f, 0x0f00);
+rtw_debug_impl_bb(18, 0x1800);
+rtw_debug_impl_bb(19, 0x1900);
+rtw_debug_impl_bb(1a, 0x1a00);
+rtw_debug_impl_bb(1b, 0x1b00);
+rtw_debug_impl_bb(1c, 0x1c00);
+rtw_debug_impl_bb(1d, 0x1d00);
+rtw_debug_impl_bb(1e, 0x1e00);
+rtw_debug_impl_bb(1f, 0x1f00);
+rtw_debug_impl_bb(2c, 0x2c00);
+rtw_debug_impl_bb(2d, 0x2d00);
+rtw_debug_impl_bb(40, 0x4000);
+rtw_debug_impl_bb(41, 0x4100);
+
+static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = {
+ .cb_read = rtw_debug_get_rf_dump,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_write_reg = {
+ .cb_write = rtw_debugfs_set_write_reg,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_rf_write = {
+ .cb_write = rtw_debugfs_set_rf_write,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_rf_read = {
+ .cb_write = rtw_debugfs_set_rf_read,
+ .cb_read = rtw_debugfs_get_rf_read,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_read_reg = {
+ .cb_write = rtw_debugfs_set_read_reg,
+ .cb_read = rtw_debugfs_get_read_reg,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = {
+ .cb_write = rtw_debugfs_set_single_input,
+ .cb_read = rtw_debugfs_get_dump_cam,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = {
+ .cb_write = rtw_debugfs_set_rsvd_page,
+ .cb_read = rtw_debugfs_get_rsvd_page,
+};
+
+#define rtw_debugfs_add_core(name, mode, fopname, parent) \
+ do { \
+ rtw_debug_priv_ ##name.rtwdev = rtwdev; \
+ if (!debugfs_create_file(#name, mode, \
+ parent, &rtw_debug_priv_ ##name,\
+ &file_ops_ ##fopname)) \
+ pr_debug("Unable to initialize debugfs:%s\n", \
+ #name); \
+ } while (0)
+
+#define rtw_debugfs_add_w(name) \
+ rtw_debugfs_add_core(name, S_IFREG | 0222, common_write, debugfs_topdir)
+#define rtw_debugfs_add_rw(name) \
+ rtw_debugfs_add_core(name, S_IFREG | 0666, single_rw, debugfs_topdir)
+#define rtw_debugfs_add_r(name) \
+ rtw_debugfs_add_core(name, S_IFREG | 0444, single_r, debugfs_topdir)
+
+void rtw_debugfs_init(struct rtw_dev *rtwdev)
+{
+ struct dentry *debugfs_topdir = rtwdev->debugfs;
+
+ debugfs_topdir = debugfs_create_dir("rtw88",
+ rtwdev->hw->wiphy->debugfsdir);
+ rtw_debugfs_add_w(write_reg);
+ rtw_debugfs_add_rw(read_reg);
+ rtw_debugfs_add_w(rf_write);
+ rtw_debugfs_add_rw(rf_read);
+ rtw_debugfs_add_rw(dump_cam);
+ rtw_debugfs_add_rw(rsvd_page);
+ rtw_debugfs_add_r(mac_0);
+ rtw_debugfs_add_r(mac_1);
+ rtw_debugfs_add_r(mac_2);
+ rtw_debugfs_add_r(mac_3);
+ rtw_debugfs_add_r(mac_4);
+ rtw_debugfs_add_r(mac_5);
+ rtw_debugfs_add_r(mac_6);
+ rtw_debugfs_add_r(mac_7);
+ rtw_debugfs_add_r(bb_8);
+ rtw_debugfs_add_r(bb_9);
+ rtw_debugfs_add_r(bb_a);
+ rtw_debugfs_add_r(bb_b);
+ rtw_debugfs_add_r(bb_c);
+ rtw_debugfs_add_r(bb_d);
+ rtw_debugfs_add_r(bb_e);
+ rtw_debugfs_add_r(bb_f);
+ rtw_debugfs_add_r(mac_10);
+ rtw_debugfs_add_r(mac_11);
+ rtw_debugfs_add_r(mac_12);
+ rtw_debugfs_add_r(mac_13);
+ rtw_debugfs_add_r(mac_14);
+ rtw_debugfs_add_r(mac_15);
+ rtw_debugfs_add_r(mac_16);
+ rtw_debugfs_add_r(mac_17);
+ rtw_debugfs_add_r(bb_18);
+ rtw_debugfs_add_r(bb_19);
+ rtw_debugfs_add_r(bb_1a);
+ rtw_debugfs_add_r(bb_1b);
+ rtw_debugfs_add_r(bb_1c);
+ rtw_debugfs_add_r(bb_1d);
+ rtw_debugfs_add_r(bb_1e);
+ rtw_debugfs_add_r(bb_1f);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C) {
+ rtw_debugfs_add_r(bb_2c);
+ rtw_debugfs_add_r(bb_2d);
+ rtw_debugfs_add_r(bb_40);
+ rtw_debugfs_add_r(bb_41);
+ }
+ rtw_debugfs_add_r(rf_dump);
+}
+
+#endif /* CONFIG_RTW88_DEBUGFS */
+
+#ifdef CONFIG_RTW88_DEBUG
+
+void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ if (rtw_debug_mask & mask)
+ dev_printk(KERN_DEBUG, rtwdev->dev, "%pV", &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__rtw_dbg);
+
+#endif /* CONFIG_RTW88_DEBUG */
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
new file mode 100644
index 000000000000..45851cbbd2ab
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_DEBUG_H
+#define __RTW_DEBUG_H
+
+enum rtw_debug_mask {
+ RTW_DBG_PCI = 0x00000001,
+ RTW_DBG_TX = 0x00000002,
+ RTW_DBG_RX = 0x00000004,
+ RTW_DBG_PHY = 0x00000008,
+ RTW_DBG_FW = 0x00000010,
+ RTW_DBG_EFUSE = 0x00000020,
+ RTW_DBG_COEX = 0x00000040,
+ RTW_DBG_RFK = 0x00000080,
+ RTW_DBG_REGD = 0x00000100,
+ RTW_DBG_DEBUGFS = 0x00000200,
+
+ RTW_DBG_ALL = 0xffffffff
+};
+
+#ifdef CONFIG_RTW88_DEBUGFS
+
+void rtw_debugfs_init(struct rtw_dev *rtwdev);
+
+#else
+
+static inline void rtw_debugfs_init(struct rtw_dev *rtwdev) {}
+
+#endif /* CONFIG_RTW88_DEBUGFS */
+
+#ifdef CONFIG_RTW88_DEBUG
+
+__printf(3, 4)
+void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
+ const char *fmt, ...);
+
+#define rtw_dbg(rtwdev, a...) __rtw_dbg(rtwdev, ##a)
+
+#else
+
+static inline void rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
+ const char *fmt, ...) {}
+
+#endif /* CONFIG_RTW88_DEBUG */
+
+#define rtw_info(rtwdev, a...) dev_info(rtwdev->dev, ##a)
+#define rtw_warn(rtwdev, a...) dev_warn(rtwdev->dev, ##a)
+#define rtw_err(rtwdev, a...) dev_err(rtwdev->dev, ##a)
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.c b/drivers/net/wireless/realtek/rtw88/efuse.c
new file mode 100644
index 000000000000..212c8376a8c9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/efuse.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "efuse.h"
+#include "reg.h"
+#include "debug.h"
+
+#define RTW_EFUSE_BANK_WIFI 0x0
+
+static void switch_efuse_bank(struct rtw_dev *rtwdev)
+{
+ rtw_write32_mask(rtwdev, REG_LDO_EFUSE_CTRL, BIT_MASK_EFUSE_BANK_SEL,
+ RTW_EFUSE_BANK_WIFI);
+}
+
+#define invalid_efuse_header(hdr1, hdr2) \
+ ((hdr1) == 0xff || (((hdr1) & 0x1f) == 0xf && (hdr2) == 0xff))
+#define invalid_efuse_content(word_en, i) \
+ (((word_en) & BIT(i)) != 0x0)
+#define get_efuse_blk_idx_2_byte(hdr1, hdr2) \
+ ((((hdr2) & 0xf0) >> 1) | (((hdr1) >> 5) & 0x07))
+#define get_efuse_blk_idx_1_byte(hdr1) \
+ (((hdr1) & 0xf0) >> 4)
+#define block_idx_to_logical_idx(blk_idx, i) \
+ (((blk_idx) << 3) + ((i) << 1))
+
+/* efuse header format
+ *
+ * | 7 5 4 0 | 7 4 3 0 | 15 8 7 0 |
+ * block[2:0] 0 1111 block[6:3] word_en[3:0] byte0 byte1
+ * | header 1 (optional) | header 2 | word N |
+ *
+ * word_en: 4 bits each word. 0 -> write; 1 -> not write
+ * N: 1~4, depends on word_en
+ */
+static int rtw_dump_logical_efuse_map(struct rtw_dev *rtwdev, u8 *phy_map,
+ u8 *log_map)
+{
+ u32 physical_size = rtwdev->efuse.physical_size;
+ u32 protect_size = rtwdev->efuse.protect_size;
+ u32 logical_size = rtwdev->efuse.logical_size;
+ u32 phy_idx, log_idx;
+ u8 hdr1, hdr2;
+ u8 blk_idx;
+ u8 word_en;
+ int i;
+
+ for (phy_idx = 0; phy_idx < physical_size - protect_size;) {
+ hdr1 = phy_map[phy_idx];
+ hdr2 = phy_map[phy_idx + 1];
+ if (invalid_efuse_header(hdr1, hdr2))
+ break;
+
+ if ((hdr1 & 0x1f) == 0xf) {
+ /* 2-byte header format */
+ blk_idx = get_efuse_blk_idx_2_byte(hdr1, hdr2);
+ word_en = hdr2 & 0xf;
+ phy_idx += 2;
+ } else {
+ /* 1-byte header format */
+ blk_idx = get_efuse_blk_idx_1_byte(hdr1);
+ word_en = hdr1 & 0xf;
+ phy_idx += 1;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (invalid_efuse_content(word_en, i))
+ continue;
+
+ log_idx = block_idx_to_logical_idx(blk_idx, i);
+ if (phy_idx + 1 > physical_size - protect_size ||
+ log_idx + 1 > logical_size)
+ return -EINVAL;
+
+ log_map[log_idx] = phy_map[phy_idx];
+ log_map[log_idx + 1] = phy_map[phy_idx + 1];
+ phy_idx += 2;
+ }
+ }
+ return 0;
+}
+
+static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 size = rtwdev->efuse.physical_size;
+ u32 efuse_ctl;
+ u32 addr;
+ u32 cnt;
+
+ switch_efuse_bank(rtwdev);
+
+ /* disable 2.5V LDO */
+ chip->ops->cfg_ldo25(rtwdev, false);
+
+ efuse_ctl = rtw_read32(rtwdev, REG_EFUSE_CTRL);
+
+ for (addr = 0; addr < size; addr++) {
+ efuse_ctl &= ~(BIT_MASK_EF_DATA | BITS_EF_ADDR);
+ efuse_ctl |= (addr & BIT_MASK_EF_ADDR) << BIT_SHIFT_EF_ADDR;
+ rtw_write32(rtwdev, REG_EFUSE_CTRL, efuse_ctl & (~BIT_EF_FLAG));
+
+ cnt = 1000000;
+ do {
+ udelay(1);
+ efuse_ctl = rtw_read32(rtwdev, REG_EFUSE_CTRL);
+ if (--cnt == 0)
+ return -EBUSY;
+ } while (!(efuse_ctl & BIT_EF_FLAG));
+
+ *(map + addr) = (u8)(efuse_ctl & BIT_MASK_EF_DATA);
+ }
+
+ return 0;
+}
+
+int rtw_parse_efuse_map(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u32 phy_size = efuse->physical_size;
+ u32 log_size = efuse->logical_size;
+ u8 *phy_map = NULL;
+ u8 *log_map = NULL;
+ int ret = 0;
+
+ phy_map = kmalloc(phy_size, GFP_KERNEL);
+ log_map = kmalloc(log_size, GFP_KERNEL);
+ if (!phy_map || !log_map) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ ret = rtw_dump_physical_efuse_map(rtwdev, phy_map);
+ if (ret) {
+ rtw_err(rtwdev, "failed to dump efuse physical map\n");
+ goto out_free;
+ }
+
+ memset(log_map, 0xff, log_size);
+ ret = rtw_dump_logical_efuse_map(rtwdev, phy_map, log_map);
+ if (ret) {
+ rtw_err(rtwdev, "failed to dump efuse logical map\n");
+ goto out_free;
+ }
+
+ ret = chip->ops->read_efuse(rtwdev, log_map);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read efuse map\n");
+ goto out_free;
+ }
+
+out_free:
+ kfree(log_map);
+ kfree(phy_map);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.h b/drivers/net/wireless/realtek/rtw88/efuse.h
new file mode 100644
index 000000000000..115bbe85946a
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/efuse.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_EFUSE_H__
+#define __RTW_EFUSE_H__
+
+#define EFUSE_HW_CAP_IGNORE 0
+#define EFUSE_HW_CAP_PTCL_VHT 3
+#define EFUSE_HW_CAP_SUPP_BW80 7
+#define EFUSE_HW_CAP_SUPP_BW40 6
+
+#define GET_EFUSE_HW_CAP_HCI(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(3, 0))
+#define GET_EFUSE_HW_CAP_BW(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(18, 16))
+#define GET_EFUSE_HW_CAP_NSS(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(20, 19))
+#define GET_EFUSE_HW_CAP_ANT_NUM(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(23, 21))
+#define GET_EFUSE_HW_CAP_PTCL(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(27, 26))
+
+int rtw_parse_efuse_map(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
new file mode 100644
index 000000000000..cf4265cda224
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "tx.h"
+#include "reg.h"
+#include "debug.h"
+
+void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_c2h_cmd *c2h;
+ u8 sub_cmd_id;
+
+ c2h = get_c2h_from_skb(skb);
+ sub_cmd_id = c2h->payload[0];
+
+ switch (sub_cmd_id) {
+ case C2H_CCX_RPT:
+ rtw_tx_report_handle(rtwdev, skb);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_c2h_cmd *c2h;
+ u32 pkt_offset;
+ u8 len;
+
+ pkt_offset = *((u32 *)skb->cb);
+ c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
+ len = skb->len - pkt_offset - 2;
+
+ rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
+ c2h->id, c2h->seq, len);
+
+ switch (c2h->id) {
+ case C2H_HALMAC:
+ rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, u8 *h2c)
+{
+ u8 box;
+ u8 box_state;
+ u32 box_reg, box_ex_reg;
+ u32 h2c_wait;
+ int idx;
+
+ rtw_dbg(rtwdev, RTW_DBG_FW,
+ "send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ h2c[3], h2c[2], h2c[1], h2c[0],
+ h2c[7], h2c[6], h2c[5], h2c[4]);
+
+ spin_lock(&rtwdev->h2c.lock);
+
+ box = rtwdev->h2c.last_box_num;
+ switch (box) {
+ case 0:
+ box_reg = REG_HMEBOX0;
+ box_ex_reg = REG_HMEBOX0_EX;
+ break;
+ case 1:
+ box_reg = REG_HMEBOX1;
+ box_ex_reg = REG_HMEBOX1_EX;
+ break;
+ case 2:
+ box_reg = REG_HMEBOX2;
+ box_ex_reg = REG_HMEBOX2_EX;
+ break;
+ case 3:
+ box_reg = REG_HMEBOX3;
+ box_ex_reg = REG_HMEBOX3_EX;
+ break;
+ default:
+ WARN(1, "invalid h2c mail box number\n");
+ goto out;
+ }
+
+ h2c_wait = 20;
+ do {
+ box_state = rtw_read8(rtwdev, REG_HMETFR);
+ } while ((box_state >> box) & 0x1 && --h2c_wait > 0);
+
+ if (!h2c_wait) {
+ rtw_err(rtwdev, "failed to send h2c command\n");
+ goto out;
+ }
+
+ for (idx = 0; idx < 4; idx++)
+ rtw_write8(rtwdev, box_reg + idx, h2c[idx]);
+ for (idx = 0; idx < 4; idx++)
+ rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]);
+
+ if (++rtwdev->h2c.last_box_num >= 4)
+ rtwdev->h2c.last_box_num = 0;
+
+out:
+ spin_unlock(&rtwdev->h2c.lock);
+}
+
+static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
+{
+ int ret;
+
+ spin_lock(&rtwdev->h2c.lock);
+
+ FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
+ ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
+ if (ret)
+ rtw_err(rtwdev, "failed to send h2c packet\n");
+ rtwdev->h2c.seq++;
+
+ spin_unlock(&rtwdev->h2c.lock);
+}
+
+void
+rtw_fw_send_general_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + 4;
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
+
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+
+ GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
+ fifo->rsvd_fw_txbuf_addr -
+ fifo->rsvd_boundary);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void
+rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + 8;
+ u8 fw_rf_type = 0;
+
+ if (hal->rf_type == RF_1T1R)
+ fw_rf_type = FW_RF_1T1R;
+ else if (hal->rf_type == RF_2T2R)
+ fw_rf_type = FW_RF_2T2R;
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
+
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+ PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
+ PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
+ PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
+ PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
+ PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + 1;
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+ IQK_SET_CLEAR(h2c_pkt, para->clear);
+ IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 rssi = ewma_rssi_read(&si->avg_rssi);
+ bool stbc_en = si->stbc_en ? true : false;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
+
+ SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
+ SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
+ SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ bool no_update = si->updated;
+ bool disable_pt = true;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
+
+ SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
+ SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
+ SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
+ SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
+ SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
+ SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en);
+ SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
+ SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
+ SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
+ SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
+ SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
+ SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
+ SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
+
+ si->init_ra_lv = 0;
+ si->updated = true;
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
+ MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
+ MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
+
+ SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
+ SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
+ SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
+ SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
+ SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
+ SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
+ enum rtw_rsvd_packet_type type)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+ u8 location = 0;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (type == rsvd_pkt->type)
+ location = rsvd_pkt->page;
+ }
+
+ return location;
+}
+
+void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 location = 0;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
+
+ location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
+ *(h2c_pkt + 1) = location;
+ rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
+
+ location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
+ *(h2c_pkt + 2) = location;
+ rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
+
+ location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
+ *(h2c_pkt + 3) = location;
+ rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
+
+ location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
+ *(h2c_pkt + 4) = location;
+ rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+static struct sk_buff *
+rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb_new;
+
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC &&
+ !ieee80211_vif_is_mesh(vif)) {
+ skb_new = alloc_skb(1, GFP_KERNEL);
+ if (!skb_new)
+ return NULL;
+ skb_put(skb_new, 1);
+ } else {
+ skb_new = ieee80211_beacon_get(hw, vif);
+ }
+
+ return skb_new;
+}
+
+static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum rtw_rsvd_packet_type type)
+{
+ struct sk_buff *skb_new;
+
+ switch (type) {
+ case RSVD_BEACON:
+ skb_new = rtw_beacon_get(hw, vif);
+ break;
+ case RSVD_PS_POLL:
+ skb_new = ieee80211_pspoll_get(hw, vif);
+ break;
+ case RSVD_PROBE_RESP:
+ skb_new = ieee80211_proberesp_get(hw, vif);
+ break;
+ case RSVD_NULL:
+ skb_new = ieee80211_nullfunc_get(hw, vif, false);
+ break;
+ case RSVD_QOS_NULL:
+ skb_new = ieee80211_nullfunc_get(hw, vif, true);
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!skb_new)
+ return NULL;
+
+ return skb_new;
+}
+
+static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_tx_pkt_info pkt_info;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 *pkt_desc;
+
+ memset(&pkt_info, 0, sizeof(pkt_info));
+ rtw_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb);
+ pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
+ memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
+ rtw_tx_fill_tx_desc(&pkt_info, skb);
+}
+
+static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
+{
+ return DIV_ROUND_UP(len, page_size);
+}
+
+static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
+ u8 page_margin, u32 page, u8 *buf,
+ struct rtw_rsvd_page *rsvd_pkt)
+{
+ struct sk_buff *skb = rsvd_pkt->skb;
+
+ if (rsvd_pkt->add_txdesc)
+ rtw_fill_rsvd_page_desc(rtwdev, skb);
+
+ if (page >= 1)
+ memcpy(buf + page_margin + page_size * (page - 1),
+ skb->data, skb->len);
+ else
+ memcpy(buf, skb->data, skb->len);
+}
+
+void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
+ bool txdesc)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type == type)
+ return;
+ }
+
+ rsvd_pkt = kmalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
+ if (!rsvd_pkt)
+ return;
+
+ rsvd_pkt->type = type;
+ rsvd_pkt->add_txdesc = txdesc;
+ list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+}
+
+void rtw_reset_rsvd_page(struct rtw_dev *rtwdev)
+{
+ struct rtw_rsvd_page *rsvd_pkt, *tmp;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type == RSVD_BEACON)
+ continue;
+ list_del(&rsvd_pkt->list);
+ kfree(rsvd_pkt);
+ }
+}
+
+int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
+ u8 *buf, u32 size)
+{
+ u8 bckp[2];
+ u8 val;
+ u16 rsvd_pg_head;
+ int ret;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ if (!size)
+ return -EINVAL;
+
+ pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr | BIT_BCN_VALID_V1);
+
+ val = rtw_read8(rtwdev, REG_CR + 1);
+ bckp[0] = val;
+ val |= BIT_ENSWBCN >> 8;
+ rtw_write8(rtwdev, REG_CR + 1, val);
+
+ val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
+ bckp[1] = val;
+ val &= ~(BIT_EN_BCNQ_DL >> 16);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+
+ ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write data to rsvd page\n");
+ goto restore;
+ }
+
+ if (!check_hw_ready(rtwdev, REG_FIFOPAGE_CTRL_2, BIT_BCN_VALID_V1, 1)) {
+ rtw_err(rtwdev, "error beacon valid\n");
+ ret = -EBUSY;
+ }
+
+restore:
+ rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
+ rsvd_pg_head | BIT_BCN_VALID_V1);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
+ rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
+
+ return ret;
+}
+
+static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
+{
+ u32 pg_size;
+ u32 pg_num = 0;
+ u16 pg_addr = 0;
+
+ pg_size = rtwdev->chip->page_size;
+ pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
+ if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
+ return -ENOMEM;
+
+ pg_addr = rtwdev->fifo.rsvd_drv_addr;
+
+ return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
+}
+
+static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif, u32 *size)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct sk_buff *iter;
+ struct rtw_rsvd_page *rsvd_pkt;
+ u32 page = 0;
+ u8 total_page = 0;
+ u8 page_size, page_margin, tx_desc_sz;
+ u8 *buf;
+
+ page_size = chip->page_size;
+ tx_desc_sz = chip->tx_pkt_desc_sz;
+ page_margin = page_size - tx_desc_sz;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
+ if (!iter) {
+ rtw_err(rtwdev, "fail to build rsvd packet\n");
+ goto release_skb;
+ }
+ rsvd_pkt->skb = iter;
+ rsvd_pkt->page = total_page;
+ if (rsvd_pkt->add_txdesc)
+ total_page += rtw_len_to_page(iter->len + tx_desc_sz,
+ page_size);
+ else
+ total_page += rtw_len_to_page(iter->len, page_size);
+ }
+
+ if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
+ rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
+ goto release_skb;
+ }
+
+ *size = (total_page - 1) * page_size + page_margin;
+ buf = kzalloc(*size, GFP_KERNEL);
+ if (!buf)
+ goto release_skb;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
+ page, buf, rsvd_pkt);
+ page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
+ }
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ kfree_skb(rsvd_pkt->skb);
+
+ return buf;
+
+release_skb:
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ kfree_skb(rsvd_pkt->skb);
+
+ return NULL;
+}
+
+static int
+rtw_download_beacon(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct sk_buff *skb;
+ int ret = 0;
+
+ skb = rtw_beacon_get(hw, vif);
+ if (!skb) {
+ rtw_err(rtwdev, "failed to get beacon skb\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
+ if (ret)
+ rtw_err(rtwdev, "failed to download drv rsvd page\n");
+
+ dev_kfree_skb(skb);
+
+out:
+ return ret;
+}
+
+int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ u8 *buf;
+ u32 size;
+ int ret;
+
+ buf = rtw_build_rsvd_page(rtwdev, vif, &size);
+ if (!buf) {
+ rtw_err(rtwdev, "failed to build rsvd page pkt\n");
+ return -ENOMEM;
+ }
+
+ ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download drv rsvd page\n");
+ goto free;
+ }
+
+ ret = rtw_download_beacon(rtwdev, vif);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download beacon\n");
+ goto free;
+ }
+
+free:
+ kfree(buf);
+
+ return ret;
+}
+
+int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
+ u32 offset, u32 size, u32 *buf)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ u32 residue, i;
+ u16 start_pg;
+ u16 idx = 0;
+ u16 ctl;
+ u8 rcr;
+
+ if (size & 0x3) {
+ rtw_warn(rtwdev, "should be 4-byte aligned\n");
+ return -EINVAL;
+ }
+
+ offset += fifo->rsvd_boundary << TX_PAGE_SIZE_SHIFT;
+ residue = offset & (FIFO_PAGE_SIZE - 1);
+ start_pg = offset >> FIFO_PAGE_SIZE_SHIFT;
+ start_pg += RSVD_PAGE_START_ADDR;
+
+ rcr = rtw_read8(rtwdev, REG_RCR + 2);
+ ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
+
+ /* disable rx clock gate */
+ rtw_write8(rtwdev, REG_RCR, rcr | BIT(3));
+
+ do {
+ rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
+
+ for (i = FIFO_DUMP_ADDR + residue;
+ i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
+ buf[idx++] = rtw_read32(rtwdev, i);
+ size -= 4;
+ if (size == 0)
+ goto out;
+ }
+
+ residue = 0;
+ start_pg++;
+ } while (size);
+
+out:
+ rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
+ rtw_write8(rtwdev, REG_RCR + 2, rcr);
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
new file mode 100644
index 000000000000..703466393ecb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_FW_H_
+#define __RTW_FW_H_
+
+#define H2C_PKT_SIZE 32
+#define H2C_PKT_HDR_SIZE 8
+
+/* FW bin information */
+#define FW_HDR_SIZE 64
+#define FW_HDR_CHKSUM_SIZE 8
+#define FW_HDR_VERSION 4
+#define FW_HDR_SUBVERSION 6
+#define FW_HDR_SUBINDEX 7
+#define FW_HDR_MONTH 16
+#define FW_HDR_DATE 17
+#define FW_HDR_HOUR 18
+#define FW_HDR_MIN 19
+#define FW_HDR_YEAR 20
+#define FW_HDR_MEM_USAGE 24
+#define FW_HDR_H2C_FMT_VER 28
+#define FW_HDR_DMEM_ADDR 32
+#define FW_HDR_DMEM_SIZE 36
+#define FW_HDR_IMEM_SIZE 48
+#define FW_HDR_EMEM_SIZE 52
+#define FW_HDR_EMEM_ADDR 56
+#define FW_HDR_IMEM_ADDR 60
+
+#define FIFO_PAGE_SIZE_SHIFT 12
+#define FIFO_PAGE_SIZE 4096
+#define RSVD_PAGE_START_ADDR 0x780
+#define FIFO_DUMP_ADDR 0x8000
+
+enum rtw_c2h_cmd_id {
+ C2H_BT_INFO = 0x09,
+ C2H_HW_FEATURE_REPORT = 0x19,
+ C2H_HW_FEATURE_DUMP = 0xfd,
+ C2H_HALMAC = 0xff,
+};
+
+enum rtw_c2h_cmd_id_ext {
+ C2H_CCX_RPT = 0x0f,
+};
+
+struct rtw_c2h_cmd {
+ u8 id;
+ u8 seq;
+ u8 payload[0];
+} __packed;
+
+enum rtw_rsvd_packet_type {
+ RSVD_BEACON,
+ RSVD_PS_POLL,
+ RSVD_PROBE_RESP,
+ RSVD_NULL,
+ RSVD_QOS_NULL,
+};
+
+enum rtw_fw_rf_type {
+ FW_RF_1T2R = 0,
+ FW_RF_2T4R = 1,
+ FW_RF_2T2R = 2,
+ FW_RF_2T3R = 3,
+ FW_RF_1T1R = 4,
+ FW_RF_2T2R_GREEN = 5,
+ FW_RF_3T3R = 6,
+ FW_RF_3T4R = 7,
+ FW_RF_4T4R = 8,
+ FW_RF_MAX_TYPE = 0xF,
+};
+
+struct rtw_iqk_para {
+ u8 clear;
+ u8 segment_iqk;
+};
+
+struct rtw_rsvd_page {
+ struct list_head list;
+ struct sk_buff *skb;
+ enum rtw_rsvd_packet_type type;
+ u8 page;
+ bool add_txdesc;
+};
+
+/* C2H */
+#define GET_CCX_REPORT_SEQNUM(c2h_payload) (c2h_payload[8] & 0xfc)
+#define GET_CCX_REPORT_STATUS(c2h_payload) (c2h_payload[9] & 0xc0)
+
+/* PKT H2C */
+#define H2C_PKT_CMD_ID 0xFF
+#define H2C_PKT_CATEGORY 0x01
+
+#define H2C_PKT_GENERAL_INFO 0x0D
+#define H2C_PKT_PHYDM_INFO 0x11
+#define H2C_PKT_IQK 0x0E
+
+#define SET_PKT_H2C_CATEGORY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(6, 0))
+#define SET_PKT_H2C_CMD_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_PKT_H2C_SUB_CMD_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 16))
+#define SET_PKT_H2C_TOTAL_LEN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 0))
+
+static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
+{
+ SET_PKT_H2C_CATEGORY(h2c_pkt, H2C_PKT_CATEGORY);
+ SET_PKT_H2C_CMD_ID(h2c_pkt, H2C_PKT_CMD_ID);
+ SET_PKT_H2C_SUB_CMD_ID(h2c_pkt, sub_id);
+}
+
+#define FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 16))
+#define GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+
+#define PHYDM_INFO_SET_REF_TYPE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(7, 0))
+#define PHYDM_INFO_SET_RF_TYPE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8))
+#define PHYDM_INFO_SET_CUT_VER(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+#define PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24))
+#define PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 28))
+#define IQK_SET_CLEAR(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0))
+#define IQK_SET_SEGMENT_IQK(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
+
+/* Command H2C */
+#define H2C_CMD_RSVD_PAGE 0x0
+#define H2C_CMD_MEDIA_STATUS_RPT 0x01
+#define H2C_CMD_SET_PWR_MODE 0x20
+#define H2C_CMD_RA_INFO 0x40
+#define H2C_CMD_RSSI_MONITOR 0x42
+
+#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0))
+
+#define MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
+#define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
+#define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(19, 16))
+#define SET_PWR_MODE_SET_SMART_PS(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 20))
+#define SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_PWR_MODE_SET_PORT_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 5))
+#define SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_RSSI_INFO_MACID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_RSSI_INFO_STBC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, BIT(1))
+#define SET_RA_INFO_MACID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_RA_INFO_RATE_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(20, 16))
+#define SET_RA_INFO_INIT_RA_LVL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(22, 21))
+#define SET_RA_INFO_SGI_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(23))
+#define SET_RA_INFO_BW_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(25, 24))
+#define SET_RA_INFO_LDPC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(26))
+#define SET_RA_INFO_NO_UPDATE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(27))
+#define SET_RA_INFO_VHT_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(29, 28))
+#define SET_RA_INFO_DIS_PT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(30))
+#define SET_RA_INFO_RA_MASK0(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+#define SET_RA_INFO_RA_MASK1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_RA_INFO_RA_MASK2(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_RA_INFO_RA_MASK3(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 24))
+
+static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
+{
+ u32 pkt_offset;
+
+ pkt_offset = *((u32 *)skb->cb);
+ return (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
+}
+
+void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_fw_send_general_info(struct rtw_dev *rtwdev);
+void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev);
+
+void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para);
+void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev);
+void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
+void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
+ bool txdesc);
+int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
+ u8 *buf, u32 size);
+void rtw_reset_rsvd_page(struct rtw_dev *rtwdev);
+int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif);
+void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
+int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
+ u32 offset, u32 size, u32 *buf);
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
new file mode 100644
index 000000000000..2676582a85a0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_HCI_H__
+#define __RTW_HCI_H__
+
+/* ops for PCI, USB and SDIO */
+struct rtw_hci_ops {
+ int (*tx)(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb);
+ int (*setup)(struct rtw_dev *rtwdev);
+ int (*start)(struct rtw_dev *rtwdev);
+ void (*stop)(struct rtw_dev *rtwdev);
+
+ int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
+ int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
+
+ u8 (*read8)(struct rtw_dev *rtwdev, u32 addr);
+ u16 (*read16)(struct rtw_dev *rtwdev, u32 addr);
+ u32 (*read32)(struct rtw_dev *rtwdev, u32 addr);
+ void (*write8)(struct rtw_dev *rtwdev, u32 addr, u8 val);
+ void (*write16)(struct rtw_dev *rtwdev, u32 addr, u16 val);
+ void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val);
+};
+
+static inline int rtw_hci_tx(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
+{
+ return rtwdev->hci.ops->tx(rtwdev, pkt_info, skb);
+}
+
+static inline int rtw_hci_setup(struct rtw_dev *rtwdev)
+{
+ return rtwdev->hci.ops->setup(rtwdev);
+}
+
+static inline int rtw_hci_start(struct rtw_dev *rtwdev)
+{
+ return rtwdev->hci.ops->start(rtwdev);
+}
+
+static inline void rtw_hci_stop(struct rtw_dev *rtwdev)
+{
+ rtwdev->hci.ops->stop(rtwdev);
+}
+
+static inline int
+rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
+{
+ return rtwdev->hci.ops->write_data_rsvd_page(rtwdev, buf, size);
+}
+
+static inline int
+rtw_hci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
+{
+ return rtwdev->hci.ops->write_data_h2c(rtwdev, buf, size);
+}
+
+static inline u8 rtw_read8(struct rtw_dev *rtwdev, u32 addr)
+{
+ return rtwdev->hci.ops->read8(rtwdev, addr);
+}
+
+static inline u16 rtw_read16(struct rtw_dev *rtwdev, u32 addr)
+{
+ return rtwdev->hci.ops->read16(rtwdev, addr);
+}
+
+static inline u32 rtw_read32(struct rtw_dev *rtwdev, u32 addr)
+{
+ return rtwdev->hci.ops->read32(rtwdev, addr);
+}
+
+static inline void rtw_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
+{
+ rtwdev->hci.ops->write8(rtwdev, addr, val);
+}
+
+static inline void rtw_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
+{
+ rtwdev->hci.ops->write16(rtwdev, addr, val);
+}
+
+static inline void rtw_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
+{
+ rtwdev->hci.ops->write32(rtwdev, addr, val);
+}
+
+static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit)
+{
+ u8 val;
+
+ val = rtw_read8(rtwdev, addr);
+ rtw_write8(rtwdev, addr, val | bit);
+}
+
+static inline void rtw_writ16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit)
+{
+ u16 val;
+
+ val = rtw_read16(rtwdev, addr);
+ rtw_write16(rtwdev, addr, val | bit);
+}
+
+static inline void rtw_write32_set(struct rtw_dev *rtwdev, u32 addr, u32 bit)
+{
+ u32 val;
+
+ val = rtw_read32(rtwdev, addr);
+ rtw_write32(rtwdev, addr, val | bit);
+}
+
+static inline void rtw_write8_clr(struct rtw_dev *rtwdev, u32 addr, u8 bit)
+{
+ u8 val;
+
+ val = rtw_read8(rtwdev, addr);
+ rtw_write8(rtwdev, addr, val & ~bit);
+}
+
+static inline void rtw_write16_clr(struct rtw_dev *rtwdev, u32 addr, u16 bit)
+{
+ u16 val;
+
+ val = rtw_read16(rtwdev, addr);
+ rtw_write16(rtwdev, addr, val & ~bit);
+}
+
+static inline void rtw_write32_clr(struct rtw_dev *rtwdev, u32 addr, u32 bit)
+{
+ u32 val;
+
+ val = rtw_read32(rtwdev, addr);
+ rtw_write32(rtwdev, addr, val & ~bit);
+}
+
+static inline u32
+rtw_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&rtwdev->rf_lock, flags);
+ val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask);
+ spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
+
+ return val;
+}
+
+static inline void
+rtw_write_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwdev->rf_lock, flags);
+ rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data);
+ spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
+}
+
+static inline u32
+rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw_read32(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
+static inline void
+rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 set;
+
+ WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr);
+
+ orig = rtw_read32(rtwdev, addr);
+ set = (orig & ~mask) | ((data << shift) & mask);
+ rtw_write32(rtwdev, addr, set);
+}
+
+static inline void
+rtw_write8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u8 data)
+{
+ u32 shift;
+ u8 orig, set;
+
+ mask &= 0xff;
+ shift = __ffs(mask);
+
+ orig = rtw_read8(rtwdev, addr);
+ set = (orig & ~mask) | ((data << shift) & mask);
+ rtw_write8(rtwdev, addr, set);
+}
+
+static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev)
+{
+ return rtwdev->hci.type;
+}
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
new file mode 100644
index 000000000000..25a923bc6366
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -0,0 +1,965 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "mac.h"
+#include "reg.h"
+#include "fw.h"
+#include "debug.h"
+
+void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ u8 txsc40 = 0, txsc20 = 0;
+ u32 value32;
+ u8 value8;
+
+ txsc20 = primary_ch_idx;
+ if (txsc20 == 1 || txsc20 == 3)
+ txsc40 = 9;
+ else
+ txsc40 = 10;
+ rtw_write8(rtwdev, REG_DATA_SC,
+ BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
+
+ value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
+ value32 &= ~BIT_RFMOD;
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_80:
+ value32 |= BIT_RFMOD_80M;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ value32 |= BIT_RFMOD_40M;
+ break;
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ break;
+ }
+ rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
+
+ value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
+ value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
+ rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
+
+ rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
+ rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
+
+ value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
+ value8 = value8 & ~BIT_CHECK_CCK_EN;
+ if (channel > 35)
+ value8 |= BIT_CHECK_CCK_EN;
+ rtw_write8(rtwdev, REG_CCK_CHECK, value8);
+}
+
+static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
+{
+ u32 value32;
+ u8 value8;
+
+ rtw_write8(rtwdev, REG_RSV_CTRL, 0);
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
+ break;
+ case RTW_HCI_TYPE_USB:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* config PIN Mux */
+ value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
+ value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
+ rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
+
+ value32 = rtw_read32(rtwdev, REG_LED_CFG);
+ value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
+ rtw_write32(rtwdev, REG_LED_CFG, value32);
+
+ value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
+ value32 |= BIT_WLRFE_4_5_EN;
+ rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
+
+ /* disable BB/RF */
+ value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
+ value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
+ rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
+
+ value8 = rtw_read8(rtwdev, REG_RF_CTRL);
+ value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
+ rtw_write8(rtwdev, REG_RF_CTRL, value8);
+
+ value32 = rtw_read32(rtwdev, REG_WLRF1);
+ value32 &= ~BIT_WLRF1_BBRF_EN;
+ rtw_write32(rtwdev, REG_WLRF1, value32);
+
+ return 0;
+}
+
+static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
+ struct rtw_pwr_seq_cmd *cmd)
+{
+ u8 value;
+ u8 flag = 0;
+ u32 offset;
+ u32 cnt = RTW_PWR_POLLING_CNT;
+
+ if (cmd->base == RTW_PWR_ADDR_SDIO)
+ offset = cmd->offset | SDIO_LOCAL_OFFSET;
+ else
+ offset = cmd->offset;
+
+ do {
+ cnt--;
+ value = rtw_read8(rtwdev, offset);
+ value &= cmd->mask;
+ if (value == (cmd->value & cmd->mask))
+ return 0;
+ if (cnt == 0) {
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
+ flag == 0) {
+ value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
+ value |= BIT(3);
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
+ value &= ~BIT(3);
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
+ cnt = RTW_PWR_POLLING_CNT;
+ flag = 1;
+ } else {
+ return -EBUSY;
+ }
+ } else {
+ udelay(50);
+ }
+ } while (1);
+}
+
+static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
+ u8 cut_mask, struct rtw_pwr_seq_cmd *cmd)
+{
+ struct rtw_pwr_seq_cmd *cur_cmd;
+ u32 offset;
+ u8 value;
+
+ for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
+ if (!(cur_cmd->intf_mask & intf_mask) ||
+ !(cur_cmd->cut_mask & cut_mask))
+ continue;
+
+ switch (cur_cmd->cmd) {
+ case RTW_PWR_CMD_WRITE:
+ offset = cur_cmd->offset;
+
+ if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
+ offset |= SDIO_LOCAL_OFFSET;
+
+ value = rtw_read8(rtwdev, offset);
+ value &= ~cur_cmd->mask;
+ value |= (cur_cmd->value & cur_cmd->mask);
+ rtw_write8(rtwdev, offset, value);
+ break;
+ case RTW_PWR_CMD_POLLING:
+ if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
+ return -EBUSY;
+ break;
+ case RTW_PWR_CMD_DELAY:
+ if (cur_cmd->value == RTW_PWR_DELAY_US)
+ udelay(cur_cmd->offset);
+ else
+ mdelay(cur_cmd->offset);
+ break;
+ case RTW_PWR_CMD_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
+ struct rtw_pwr_seq_cmd **cmd_seq)
+{
+ u8 cut_mask;
+ u8 intf_mask;
+ u8 cut;
+ u32 idx = 0;
+ struct rtw_pwr_seq_cmd *cmd;
+ int ret;
+
+ cut = rtwdev->hal.cut_version;
+ cut_mask = cut_version_to_mask(cut);
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ intf_mask = BIT(2);
+ break;
+ case RTW_HCI_TYPE_USB:
+ intf_mask = BIT(1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ do {
+ cmd = cmd_seq[idx];
+ if (!cmd)
+ break;
+
+ ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
+ if (ret)
+ return -EBUSY;
+
+ idx++;
+ } while (1);
+
+ return 0;
+}
+
+static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pwr_seq_cmd **pwr_seq;
+ u8 rpwm;
+ bool cur_pwr;
+
+ rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
+
+ /* Check FW still exist or not */
+ if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
+ rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
+ rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
+ }
+
+ if (rtw_read8(rtwdev, REG_CR) == 0xea)
+ cur_pwr = false;
+ else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
+ cur_pwr = false;
+ else
+ cur_pwr = true;
+
+ if (pwr_on && cur_pwr)
+ return -EALREADY;
+
+ pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
+ if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
+{
+ u8 sys_func_en = rtwdev->chip->sys_func_en;
+ u8 value8;
+ u32 value, tmp;
+
+ value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
+ value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
+ rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
+
+ rtw_write8(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
+ value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
+ rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
+
+ /* disable boot-from-flash for driver's DL FW */
+ tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
+ if (tmp & BIT_BOOT_FSPI_EN) {
+ rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
+ value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
+ rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
+ }
+
+ return 0;
+}
+
+int rtw_mac_power_on(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ ret = rtw_mac_pre_system_cfg(rtwdev);
+ if (ret)
+ goto err;
+
+ ret = rtw_mac_power_switch(rtwdev, true);
+ if (ret)
+ goto err;
+
+ ret = rtw_mac_init_system_cfg(rtwdev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ rtw_err(rtwdev, "mac power on failed");
+ return ret;
+}
+
+void rtw_mac_power_off(struct rtw_dev *rtwdev)
+{
+ rtw_mac_power_switch(rtwdev, false);
+}
+
+static bool check_firmware_size(const u8 *data, u32 size)
+{
+ u32 dmem_size;
+ u32 imem_size;
+ u32 emem_size;
+ u32 real_size;
+
+ dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
+ imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
+ emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
+ le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+
+ dmem_size += FW_HDR_CHKSUM_SIZE;
+ imem_size += FW_HDR_CHKSUM_SIZE;
+ emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
+ real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
+ if (real_size != size)
+ return false;
+
+ return true;
+}
+
+static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
+{
+ if (enable) {
+ /* cpu io interface enable */
+ rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
+
+ /* cpu enable */
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
+ } else {
+ /* cpu io interface disable */
+ rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
+
+ /* cpu disable */
+ rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
+ }
+}
+
+#define DLFW_RESTORE_REG_NUM 6
+
+static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *bckp)
+{
+ u8 tmp;
+ u8 bckp_idx = 0;
+
+ /* set HIQ to hi priority */
+ bckp[bckp_idx].len = 1;
+ bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
+ bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
+ bckp_idx++;
+ tmp = RTW_DMA_MAPPING_HIGH << 6;
+ rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
+
+ /* DLFW only use HIQ, map HIQ to hi priority */
+ bckp[bckp_idx].len = 1;
+ bckp[bckp_idx].reg = REG_CR;
+ bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
+ bckp_idx++;
+ bckp[bckp_idx].len = 4;
+ bckp[bckp_idx].reg = REG_H2CQ_CSR;
+ bckp[bckp_idx].val = BIT_H2CQ_FULL;
+ bckp_idx++;
+ tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
+ rtw_write8(rtwdev, REG_CR, tmp);
+ rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
+
+ /* Config hi priority queue and public priority queue page number */
+ bckp[bckp_idx].len = 2;
+ bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
+ bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
+ bckp_idx++;
+ bckp[bckp_idx].len = 4;
+ bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
+ bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
+ bckp_idx++;
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
+ rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
+
+ /* Disable beacon related functions */
+ tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
+ bckp[bckp_idx].len = 1;
+ bckp[bckp_idx].reg = REG_BCN_CTRL;
+ bckp[bckp_idx].val = tmp;
+ bckp_idx++;
+ tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
+ rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
+
+ WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
+}
+
+static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
+{
+ rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
+ rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
+ rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
+ rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
+}
+
+static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *bckp,
+ u8 bckp_num)
+{
+ rtw_restore_reg(rtwdev, bckp, bckp_num);
+}
+
+#define TX_DESC_SIZE 48
+
+static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
+ const u8 *data, u32 size)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
+ kfree(buf);
+ return ret;
+}
+
+static int
+send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
+{
+ int ret;
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ !((size + TX_DESC_SIZE) & (512 - 1)))
+ size += 1;
+
+ ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
+ if (ret)
+ rtw_err(rtwdev, "failed to download rsvd page\n");
+
+ return ret;
+}
+
+static int
+iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
+{
+ rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
+ rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
+ rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
+
+ if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
+ u32 len, u8 first)
+{
+ u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
+
+ if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
+ return -EBUSY;
+
+ ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
+ if (!first)
+ ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
+
+ if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
+ return -EBUSY;
+
+ return 0;
+}
+
+static bool
+check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
+{
+ u8 fw_ctrl;
+
+ fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
+
+ if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
+ if (addr < OCPBASE_DMEM_88XX) {
+ fw_ctrl |= BIT_IMEM_DW_OK;
+ fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+ } else {
+ fw_ctrl |= BIT_DMEM_DW_OK;
+ fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+ }
+
+ rtw_err(rtwdev, "invalid fw checksum\n");
+
+ return false;
+ }
+
+ if (addr < OCPBASE_DMEM_88XX) {
+ fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+ } else {
+ fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+ }
+
+ return true;
+}
+
+static int
+download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
+ u32 src, u32 dst, u32 size)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 desc_size = chip->tx_pkt_desc_sz;
+ u8 first_part;
+ u32 mem_offset;
+ u32 residue_size;
+ u32 pkt_size;
+ u32 max_size = 0x1000;
+ u32 val;
+ int ret;
+
+ mem_offset = 0;
+ first_part = 1;
+ residue_size = size;
+
+ val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
+ val |= BIT_DDMACH0_RESET_CHKSUM_STS;
+ rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
+
+ while (residue_size) {
+ if (residue_size >= max_size)
+ pkt_size = max_size;
+ else
+ pkt_size = residue_size;
+
+ ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
+ data + mem_offset, pkt_size);
+ if (ret)
+ return ret;
+
+ ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
+ src + desc_size,
+ dst + mem_offset, pkt_size,
+ first_part);
+ if (ret)
+ return ret;
+
+ first_part = 0;
+ mem_offset += pkt_size;
+ residue_size -= pkt_size;
+ }
+
+ if (!check_fw_checksum(rtwdev, dst))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void update_firmware_info(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ const u8 *data = fw->firmware->data;
+
+ fw->h2c_version =
+ le16_to_cpu(*((__le16 *)(data + FW_HDR_H2C_FMT_VER)));
+ fw->version =
+ le16_to_cpu(*((__le16 *)(data + FW_HDR_VERSION)));
+ fw->sub_version = *(data + FW_HDR_SUBVERSION);
+ fw->sub_index = *(data + FW_HDR_SUBINDEX);
+
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw h2c version: %x\n", fw->h2c_version);
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw version: %x\n", fw->version);
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub version: %x\n", fw->sub_version);
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub index: %x\n", fw->sub_index);
+}
+
+static int
+start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
+{
+ const u8 *cur_fw;
+ u16 val;
+ u32 imem_size;
+ u32 dmem_size;
+ u32 emem_size;
+ u32 addr;
+ int ret;
+
+ dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
+ imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
+ emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
+ le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+ dmem_size += FW_HDR_CHKSUM_SIZE;
+ imem_size += FW_HDR_CHKSUM_SIZE;
+ emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
+
+ val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
+ val |= BIT_MCUFWDL_EN;
+ rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
+
+ cur_fw = data + FW_HDR_SIZE;
+ addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_ADDR)));
+ addr &= ~BIT(31);
+ ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
+ if (ret)
+ return ret;
+
+ cur_fw = data + FW_HDR_SIZE + dmem_size;
+ addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_ADDR)));
+ addr &= ~BIT(31);
+ ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
+ if (ret)
+ return ret;
+
+ if (emem_size) {
+ cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
+ addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_ADDR)));
+ addr &= ~BIT(31);
+ ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
+ emem_size);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int download_firmware_validate(struct rtw_dev *rtwdev)
+{
+ u32 fw_key;
+
+ if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
+ fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
+ if (fw_key == ILLEGAL_KEY_GROUP)
+ rtw_err(rtwdev, "invalid fw key\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void download_firmware_end_flow(struct rtw_dev *rtwdev)
+{
+ u16 fw_ctrl;
+
+ rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
+
+ /* Check IMEM & DMEM checksum is OK or not */
+ fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
+ if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
+ return;
+
+ fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
+ rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+}
+
+int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
+{
+ struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
+ const u8 *data = fw->firmware->data;
+ u32 size = fw->firmware->size;
+ u32 ltecoex_bckp;
+ int ret;
+
+ if (!check_firmware_size(data, size))
+ return -EINVAL;
+
+ if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
+ return -EBUSY;
+
+ wlan_cpu_enable(rtwdev, false);
+
+ download_firmware_reg_backup(rtwdev, bckp);
+ download_firmware_reset_platform(rtwdev);
+
+ ret = start_download_firmware(rtwdev, data, size);
+ if (ret)
+ goto dlfw_fail;
+
+ download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
+
+ download_firmware_end_flow(rtwdev);
+
+ wlan_cpu_enable(rtwdev, true);
+
+ if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
+ return -EBUSY;
+
+ ret = download_firmware_validate(rtwdev);
+ if (ret)
+ goto dlfw_fail;
+
+ update_firmware_info(rtwdev, fw);
+
+ /* reset desc and index */
+ rtw_hci_setup(rtwdev);
+
+ rtwdev->h2c.last_box_num = 0;
+ rtwdev->h2c.seq = 0;
+
+ rtw_fw_send_general_info(rtwdev);
+ rtw_fw_send_phydm_info(rtwdev);
+
+ rtw_flag_set(rtwdev, RTW_FLAG_FW_RUNNING);
+
+ return 0;
+
+dlfw_fail:
+ /* Disable FWDL_EN */
+ rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
+
+ return ret;
+}
+
+static int txdma_queue_mapping(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_rqpn *rqpn = NULL;
+ u16 txdma_pq_map = 0;
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rqpn = &chip->rqpn_table[1];
+ break;
+ case RTW_HCI_TYPE_USB:
+ if (rtwdev->hci.bulkout_num == 2)
+ rqpn = &chip->rqpn_table[2];
+ else if (rtwdev->hci.bulkout_num == 3)
+ rqpn = &chip->rqpn_table[3];
+ else if (rtwdev->hci.bulkout_num == 4)
+ rqpn = &chip->rqpn_table[4];
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
+ txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
+ txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
+ txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
+ txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
+ txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
+ rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
+
+ rtw_write8(rtwdev, REG_CR, 0);
+ rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
+ rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
+
+ return 0;
+}
+
+static int set_trx_fifo_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u16 cur_pg_addr;
+ u8 csi_buf_pg_num = chip->csi_buf_pg_num;
+
+ /* config rsvd page num */
+ fifo->rsvd_drv_pg_num = 8;
+ fifo->txff_pg_num = chip->txff_size >> 7;
+ fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
+ RSVD_PG_H2C_EXTRAINFO_NUM +
+ RSVD_PG_H2C_STATICINFO_NUM +
+ RSVD_PG_H2CQ_NUM +
+ RSVD_PG_CPU_INSTRUCTION_NUM +
+ RSVD_PG_FW_TXBUF_NUM +
+ csi_buf_pg_num;
+
+ if (fifo->rsvd_pg_num > fifo->txff_pg_num)
+ return -ENOMEM;
+
+ fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
+ fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
+
+ cur_pg_addr = fifo->txff_pg_num;
+ cur_pg_addr -= csi_buf_pg_num;
+ fifo->rsvd_csibuf_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
+ fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
+ fifo->rsvd_cpu_instr_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2CQ_NUM;
+ fifo->rsvd_h2cq_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
+ fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
+ fifo->rsvd_h2c_info_addr = cur_pg_addr;
+ cur_pg_addr -= fifo->rsvd_drv_pg_num;
+ fifo->rsvd_drv_addr = cur_pg_addr;
+
+ if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
+ rtw_err(rtwdev, "wrong rsvd driver address\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int priority_queue_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_page_table *pg_tbl = NULL;
+ u16 pubq_num;
+ int ret;
+
+ ret = set_trx_fifo_info(rtwdev);
+ if (ret)
+ return ret;
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ pg_tbl = &chip->page_table[1];
+ break;
+ case RTW_HCI_TYPE_USB:
+ if (rtwdev->hci.bulkout_num == 2)
+ pg_tbl = &chip->page_table[2];
+ else if (rtwdev->hci.bulkout_num == 3)
+ pg_tbl = &chip->page_table[3];
+ else if (rtwdev->hci.bulkout_num == 4)
+ pg_tbl = &chip->page_table[4];
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
+ pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
+ rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
+
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
+ rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
+
+ rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
+ rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
+ rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
+
+ if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
+ return -EBUSY;
+
+ rtw_write8(rtwdev, REG_CR + 3, 0);
+
+ return 0;
+}
+
+static int init_h2c(struct rtw_dev *rtwdev)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ u8 value8;
+ u32 value32;
+ u32 h2cq_addr;
+ u32 h2cq_size;
+ u32 h2cq_free;
+ u32 wp, rp;
+
+ h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
+ h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
+
+ value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
+ value32 = (value32 & 0xFFFC0000) | h2cq_addr;
+ rtw_write32(rtwdev, REG_H2C_HEAD, value32);
+
+ value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
+ value32 = (value32 & 0xFFFC0000) | h2cq_addr;
+ rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
+
+ value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
+ value32 &= 0xFFFC0000;
+ value32 |= (h2cq_addr + h2cq_size);
+ rtw_write32(rtwdev, REG_H2C_TAIL, value32);
+
+ value8 = rtw_read8(rtwdev, REG_H2C_INFO);
+ value8 = (u8)((value8 & 0xFC) | 0x01);
+ rtw_write8(rtwdev, REG_H2C_INFO, value8);
+
+ value8 = rtw_read8(rtwdev, REG_H2C_INFO);
+ value8 = (u8)((value8 & 0xFB) | 0x04);
+ rtw_write8(rtwdev, REG_H2C_INFO, value8);
+
+ value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
+ value8 = (u8)((value8 & 0x7f) | 0x80);
+ rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
+
+ wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
+ rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
+ h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
+
+ if (h2cq_size != h2cq_free) {
+ rtw_err(rtwdev, "H2C queue mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = txdma_queue_mapping(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = priority_queue_cfg(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = init_h2c(rtwdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
+{
+ u8 value8;
+
+ rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
+ value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
+ value8 &= 0xF0;
+ /* For rxdesc len = 0 issue */
+ value8 |= 0xF;
+ rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
+ rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
+ rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
+
+ return 0;
+}
+
+int rtw_mac_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ int ret;
+
+ ret = rtw_init_trx_cfg(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = chip->ops->mac_init(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = rtw_drv_info_cfg(rtwdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
new file mode 100644
index 000000000000..efe6f731f240
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_MAC_H__
+#define __RTW_MAC_H__
+
+#define RTW_HW_PORT_NUM 5
+#define cut_version_to_mask(cut) (0x1 << ((cut) + 1))
+#define SDIO_LOCAL_OFFSET 0x10250000
+#define DDMA_POLLING_COUNT 1000
+#define C2H_PKT_BUF 256
+#define PHY_STATUS_SIZE 4
+#define ILLEGAL_KEY_GROUP 0xFAAAAA00
+
+/* HW memory address */
+#define OCPBASE_TXBUF_88XX 0x18780000
+#define OCPBASE_DMEM_88XX 0x00200000
+#define OCPBASE_EMEM_88XX 0x00100000
+
+#define RSVD_PG_DRV_NUM 16
+#define RSVD_PG_H2C_EXTRAINFO_NUM 24
+#define RSVD_PG_H2C_STATICINFO_NUM 8
+#define RSVD_PG_H2CQ_NUM 8
+#define RSVD_PG_CPU_INSTRUCTION_NUM 0
+#define RSVD_PG_FW_TXBUF_NUM 4
+
+void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx);
+int rtw_mac_power_on(struct rtw_dev *rtwdev);
+void rtw_mac_power_off(struct rtw_dev *rtwdev);
+int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
+int rtw_mac_init(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
new file mode 100644
index 000000000000..abded63f138d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "sec.h"
+#include "tx.h"
+#include "fw.h"
+#include "mac.h"
+#include "ps.h"
+#include "reg.h"
+#include "debug.h"
+
+static void rtw_ops_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_tx_pkt_info pkt_info = {0};
+
+ if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
+ goto out;
+
+ rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
+ if (rtw_hci_tx(rtwdev, &pkt_info, skb))
+ goto out;
+
+ return;
+
+out:
+ ieee80211_free_txskb(hw, skb);
+}
+
+static int rtw_ops_start(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw_core_start(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw_ops_stop(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_core_stop(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (hw->conf.flags & IEEE80211_CONF_IDLE) {
+ rtw_enter_ips(rtwdev);
+ } else {
+ ret = rtw_leave_ips(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave idle state\n");
+ goto out;
+ }
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ rtw_set_channel(rtwdev);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+ return ret;
+}
+
+static const struct rtw_vif_port rtw_vif_port[] = {
+ [0] = {
+ .mac_addr = {.addr = 0x0610},
+ .bssid = {.addr = 0x0618},
+ .net_type = {.addr = 0x0100, .mask = 0x30000},
+ .aid = {.addr = 0x06a8, .mask = 0x7ff},
+ },
+ [1] = {
+ .mac_addr = {.addr = 0x0700},
+ .bssid = {.addr = 0x0708},
+ .net_type = {.addr = 0x0100, .mask = 0xc0000},
+ .aid = {.addr = 0x0710, .mask = 0x7ff},
+ },
+ [2] = {
+ .mac_addr = {.addr = 0x1620},
+ .bssid = {.addr = 0x1628},
+ .net_type = {.addr = 0x1100, .mask = 0x3},
+ .aid = {.addr = 0x1600, .mask = 0x7ff},
+ },
+ [3] = {
+ .mac_addr = {.addr = 0x1630},
+ .bssid = {.addr = 0x1638},
+ .net_type = {.addr = 0x1100, .mask = 0xc},
+ .aid = {.addr = 0x1604, .mask = 0x7ff},
+ },
+ [4] = {
+ .mac_addr = {.addr = 0x1640},
+ .bssid = {.addr = 0x1648},
+ .net_type = {.addr = 0x1100, .mask = 0x30},
+ .aid = {.addr = 0x1608, .mask = 0x7ff},
+ },
+};
+
+static int rtw_ops_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ enum rtw_net_type net_type;
+ u32 config = 0;
+ u8 port = 0;
+
+ rtwvif->port = port;
+ rtwvif->vif = vif;
+ rtwvif->stats.tx_unicast = 0;
+ rtwvif->stats.rx_unicast = 0;
+ rtwvif->stats.tx_cnt = 0;
+ rtwvif->stats.rx_cnt = 0;
+ rtwvif->in_lps = false;
+ rtwvif->conf = &rtw_vif_port[port];
+
+ mutex_lock(&rtwdev->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ net_type = RTW_NET_AP_MODE;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ net_type = RTW_NET_AD_HOC;
+ break;
+ case NL80211_IFTYPE_STATION:
+ default:
+ net_type = RTW_NET_NO_LINK;
+ break;
+ }
+
+ ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ config |= PORT_SET_MAC_ADDR;
+ rtwvif->net_type = net_type;
+ config |= PORT_SET_NET_TYPE;
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ rtw_info(rtwdev, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ return 0;
+}
+
+static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ u32 config = 0;
+
+ rtw_info(rtwdev, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+
+ mutex_lock(&rtwdev->mutex);
+
+ eth_zero_addr(rtwvif->mac_addr);
+ config |= PORT_SET_MAC_ADDR;
+ rtwvif->net_type = RTW_NET_NO_LINK;
+ config |= PORT_SET_NET_TYPE;
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags,
+ u64 multicast)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ *new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
+ FIF_BCN_PRBRESP_PROMISC;
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*new_flags & FIF_ALLMULTI)
+ rtwdev->hal.rcr |= BIT_AM | BIT_AB;
+ else
+ rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB);
+ }
+ if (changed_flags & FIF_FCSFAIL) {
+ if (*new_flags & FIF_FCSFAIL)
+ rtwdev->hal.rcr |= BIT_ACRC32;
+ else
+ rtwdev->hal.rcr &= ~(BIT_ACRC32);
+ }
+ if (changed_flags & FIF_OTHER_BSS) {
+ if (*new_flags & FIF_OTHER_BSS)
+ rtwdev->hal.rcr |= BIT_AAP;
+ else
+ rtwdev->hal.rcr &= ~(BIT_AAP);
+ }
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
+ if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
+ rtwdev->hal.rcr &= ~(BIT_CBSSID_BCN | BIT_CBSSID_DATA);
+ else
+ rtwdev->hal.rcr |= BIT_CBSSID_BCN;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RX,
+ "config rx filter, changed=0x%08x, new=0x%08x, rcr=0x%08x\n",
+ changed_flags, *new_flags, rtwdev->hal.rcr);
+
+ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
+ u32 changed)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ u32 config = 0;
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ struct rtw_chip_info *chip = rtwdev->chip;
+ enum rtw_net_type net_type;
+
+ if (conf->assoc) {
+ net_type = RTW_NET_MGD_LINKED;
+ chip->ops->do_iqk(rtwdev);
+
+ rtwvif->aid = conf->aid;
+ rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
+ rtw_fw_download_rsvd_page(rtwdev, vif);
+ rtw_send_rsvd_page_h2c(rtwdev);
+ } else {
+ net_type = RTW_NET_NO_LINK;
+ rtwvif->aid = 0;
+ rtw_reset_rsvd_page(rtwdev);
+ }
+
+ rtwvif->net_type = net_type;
+ config |= PORT_SET_NET_TYPE;
+ config |= PORT_SET_AID;
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ ether_addr_copy(rtwvif->bssid, conf->bssid);
+ config |= PORT_SET_BSSID;
+ }
+
+ if (changed & BSS_CHANGED_BEACON)
+ rtw_fw_download_rsvd_page(rtwdev, vif);
+
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
+{
+ unsigned long mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ if (mac_id < RTW_MAX_MAC_ID_NUM)
+ set_bit(mac_id, rtwdev->mac_id_map);
+
+ return mac_id;
+}
+
+static void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
+{
+ clear_bit(mac_id, rtwdev->mac_id_map);
+}
+
+static int rtw_ops_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&rtwdev->mutex);
+
+ si->mac_id = rtw_acquire_macid(rtwdev);
+ if (si->mac_id >= RTW_MAX_MAC_ID_NUM) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ si->sta = sta;
+ si->vif = vif;
+ si->init_ra_lv = 1;
+ ewma_rssi_init(&si->avg_rssi);
+
+ rtw_update_sta_info(rtwdev, si);
+ rtw_fw_media_status_report(rtwdev, si->mac_id, true);
+
+ rtwdev->sta_cnt++;
+
+ rtw_info(rtwdev, "sta %pM joined with macid %d\n",
+ sta->addr, si->mac_id);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+ return ret;
+}
+
+static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+
+ rtw_release_macid(rtwdev, si->mac_id);
+ rtw_fw_media_status_report(rtwdev, si->mac_id, false);
+
+ rtwdev->sta_cnt--;
+
+ rtw_info(rtwdev, "sta %pM with macid %d left\n",
+ sta->addr, si->mac_id);
+
+ mutex_unlock(&rtwdev->mutex);
+ return 0;
+}
+
+static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_sec_desc *sec = &rtwdev->sec;
+ u8 hw_key_type;
+ u8 hw_key_idx;
+ int ret = 0;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ hw_key_type = RTW_CAM_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ hw_key_type = RTW_CAM_WEP104;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ hw_key_type = RTW_CAM_TKIP;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ hw_key_type = RTW_CAM_AES;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ /* suppress error messages */
+ return -EOPNOTSUPP;
+ default:
+ return -ENOTSUPP;
+ }
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ hw_key_idx = rtw_sec_get_free_cam(sec);
+ } else {
+ /* multiple interfaces? */
+ hw_key_idx = key->keyidx;
+ }
+
+ if (hw_key_idx > sec->total_cam_num) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ /* need sw generated IV */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ key->hw_key_idx = hw_key_idx;
+ rtw_sec_write_cam(rtwdev, sec, sta, key,
+ hw_key_type, hw_key_idx);
+ break;
+ case DISABLE_KEY:
+ rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx);
+ break;
+ }
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ieee80211_sta *sta = params->sta;
+ u16 tid = params->tid;
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+
+ rtw_leave_lps(rtwdev, rtwvif);
+
+ rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE);
+ rtw_flag_set(rtwdev, RTW_FLAG_SCANNING);
+}
+
+static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ rtw_flag_clear(rtwdev, RTW_FLAG_SCANNING);
+ rtw_flag_clear(rtwdev, RTW_FLAG_DIG_DISABLE);
+}
+
+const struct ieee80211_ops rtw_ops = {
+ .tx = rtw_ops_tx,
+ .start = rtw_ops_start,
+ .stop = rtw_ops_stop,
+ .config = rtw_ops_config,
+ .add_interface = rtw_ops_add_interface,
+ .remove_interface = rtw_ops_remove_interface,
+ .configure_filter = rtw_ops_configure_filter,
+ .bss_info_changed = rtw_ops_bss_info_changed,
+ .sta_add = rtw_ops_sta_add,
+ .sta_remove = rtw_ops_sta_remove,
+ .set_key = rtw_ops_set_key,
+ .ampdu_action = rtw_ops_ampdu_action,
+ .sw_scan_start = rtw_ops_sw_scan_start,
+ .sw_scan_complete = rtw_ops_sw_scan_complete,
+};
+EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
new file mode 100644
index 000000000000..f447361f7573
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "regd.h"
+#include "fw.h"
+#include "ps.h"
+#include "sec.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "efuse.h"
+#include "debug.h"
+
+static bool rtw_fw_support_lps;
+unsigned int rtw_debug_mask;
+EXPORT_SYMBOL(rtw_debug_mask);
+
+module_param_named(support_lps, rtw_fw_support_lps, bool, 0644);
+module_param_named(debug_mask, rtw_debug_mask, uint, 0644);
+
+MODULE_PARM_DESC(support_lps, "Set Y to enable LPS support");
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+static struct ieee80211_channel rtw_channeltable_2g[] = {
+ {.center_freq = 2412, .hw_value = 1,},
+ {.center_freq = 2417, .hw_value = 2,},
+ {.center_freq = 2422, .hw_value = 3,},
+ {.center_freq = 2427, .hw_value = 4,},
+ {.center_freq = 2432, .hw_value = 5,},
+ {.center_freq = 2437, .hw_value = 6,},
+ {.center_freq = 2442, .hw_value = 7,},
+ {.center_freq = 2447, .hw_value = 8,},
+ {.center_freq = 2452, .hw_value = 9,},
+ {.center_freq = 2457, .hw_value = 10,},
+ {.center_freq = 2462, .hw_value = 11,},
+ {.center_freq = 2467, .hw_value = 12,},
+ {.center_freq = 2472, .hw_value = 13,},
+ {.center_freq = 2484, .hw_value = 14,},
+};
+
+static struct ieee80211_channel rtw_channeltable_5g[] = {
+ {.center_freq = 5180, .hw_value = 36,},
+ {.center_freq = 5200, .hw_value = 40,},
+ {.center_freq = 5220, .hw_value = 44,},
+ {.center_freq = 5240, .hw_value = 48,},
+ {.center_freq = 5260, .hw_value = 52,},
+ {.center_freq = 5280, .hw_value = 56,},
+ {.center_freq = 5300, .hw_value = 60,},
+ {.center_freq = 5320, .hw_value = 64,},
+ {.center_freq = 5500, .hw_value = 100,},
+ {.center_freq = 5520, .hw_value = 104,},
+ {.center_freq = 5540, .hw_value = 108,},
+ {.center_freq = 5560, .hw_value = 112,},
+ {.center_freq = 5580, .hw_value = 116,},
+ {.center_freq = 5600, .hw_value = 120,},
+ {.center_freq = 5620, .hw_value = 124,},
+ {.center_freq = 5640, .hw_value = 128,},
+ {.center_freq = 5660, .hw_value = 132,},
+ {.center_freq = 5680, .hw_value = 136,},
+ {.center_freq = 5700, .hw_value = 140,},
+ {.center_freq = 5745, .hw_value = 149,},
+ {.center_freq = 5765, .hw_value = 153,},
+ {.center_freq = 5785, .hw_value = 157,},
+ {.center_freq = 5805, .hw_value = 161,},
+ {.center_freq = 5825, .hw_value = 165,
+ .flags = IEEE80211_CHAN_NO_HT40MINUS},
+};
+
+static struct ieee80211_rate rtw_ratetable[] = {
+ {.bitrate = 10, .hw_value = 0x00,},
+ {.bitrate = 20, .hw_value = 0x01,},
+ {.bitrate = 55, .hw_value = 0x02,},
+ {.bitrate = 110, .hw_value = 0x03,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
+};
+
+static struct ieee80211_supported_band rtw_band_2ghz = {
+ .band = NL80211_BAND_2GHZ,
+
+ .channels = rtw_channeltable_2g,
+ .n_channels = ARRAY_SIZE(rtw_channeltable_2g),
+
+ .bitrates = rtw_ratetable,
+ .n_bitrates = ARRAY_SIZE(rtw_ratetable),
+
+ .ht_cap = {0},
+ .vht_cap = {0},
+};
+
+static struct ieee80211_supported_band rtw_band_5ghz = {
+ .band = NL80211_BAND_5GHZ,
+
+ .channels = rtw_channeltable_5g,
+ .n_channels = ARRAY_SIZE(rtw_channeltable_5g),
+
+ /* 5G has no CCK rates */
+ .bitrates = rtw_ratetable + 4,
+ .n_bitrates = ARRAY_SIZE(rtw_ratetable) - 4,
+
+ .ht_cap = {0},
+ .vht_cap = {0},
+};
+
+struct rtw_watch_dog_iter_data {
+ struct rtw_vif *rtwvif;
+ bool active;
+ u8 assoc_cnt;
+};
+
+static void rtw_vif_watch_dog_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_watch_dog_iter_data *iter_data = data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->bss_conf.assoc) {
+ iter_data->assoc_cnt++;
+ iter_data->rtwvif = rtwvif;
+ }
+ if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD ||
+ rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
+ iter_data->active = true;
+ } else {
+ /* only STATION mode can enter lps */
+ iter_data->active = true;
+ }
+
+ rtwvif->stats.tx_unicast = 0;
+ rtwvif->stats.rx_unicast = 0;
+ rtwvif->stats.tx_cnt = 0;
+ rtwvif->stats.rx_cnt = 0;
+}
+
+/* process TX/RX statistics periodically for hardware,
+ * the information helps hardware to enhance performance
+ */
+static void rtw_watch_dog_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ watch_dog_work.work);
+ struct rtw_watch_dog_iter_data data = {};
+
+ if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
+ return;
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
+ RTW_WATCH_DOG_DELAY_TIME);
+
+ /* reset tx/rx statictics */
+ rtwdev->stats.tx_unicast = 0;
+ rtwdev->stats.rx_unicast = 0;
+ rtwdev->stats.tx_cnt = 0;
+ rtwdev->stats.rx_cnt = 0;
+
+ rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data);
+
+ /* fw supports only one station associated to enter lps, if there are
+ * more than two stations associated to the AP, then we can not enter
+ * lps, because fw does not handle the overlapped beacon interval
+ */
+ if (rtw_fw_support_lps &&
+ data.rtwvif && !data.active && data.assoc_cnt == 1)
+ rtw_enter_lps(rtwdev, data.rtwvif);
+
+ if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
+ return;
+
+ rtw_phy_dynamic_mechanism(rtwdev);
+
+ rtwdev->watch_dog_cnt++;
+}
+
+static void rtw_c2h_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, c2h_work);
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
+ skb_unlink(skb, &rtwdev->c2h_queue);
+ rtw_fw_c2h_cmd_handle(rtwdev, skb);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
+ struct rtw_channel_params *chan_params)
+{
+ struct ieee80211_channel *channel = chandef->chan;
+ enum nl80211_chan_width width = chandef->width;
+ u32 primary_freq, center_freq;
+ u8 center_chan;
+ u8 bandwidth = RTW_CHANNEL_WIDTH_20;
+ u8 primary_chan_idx = 0;
+
+ center_chan = channel->hw_value;
+ primary_freq = channel->center_freq;
+ center_freq = chandef->center_freq1;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ bandwidth = RTW_CHANNEL_WIDTH_20;
+ primary_chan_idx = 0;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bandwidth = RTW_CHANNEL_WIDTH_40;
+ if (primary_freq > center_freq) {
+ primary_chan_idx = 1;
+ center_chan -= 2;
+ } else {
+ primary_chan_idx = 2;
+ center_chan += 2;
+ }
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bandwidth = RTW_CHANNEL_WIDTH_80;
+ if (primary_freq > center_freq) {
+ if (primary_freq - center_freq == 10) {
+ primary_chan_idx = 1;
+ center_chan -= 2;
+ } else {
+ primary_chan_idx = 3;
+ center_chan -= 6;
+ }
+ } else {
+ if (center_freq - primary_freq == 10) {
+ primary_chan_idx = 2;
+ center_chan += 2;
+ } else {
+ primary_chan_idx = 4;
+ center_chan += 6;
+ }
+ }
+ break;
+ default:
+ center_chan = 0;
+ break;
+ }
+
+ chan_params->center_chan = center_chan;
+ chan_params->bandwidth = bandwidth;
+ chan_params->primary_chan_idx = primary_chan_idx;
+}
+
+void rtw_set_channel(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_channel_params ch_param;
+ u8 center_chan, bandwidth, primary_chan_idx;
+
+ rtw_get_channel_params(&hw->conf.chandef, &ch_param);
+ if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
+ return;
+
+ center_chan = ch_param.center_chan;
+ bandwidth = ch_param.bandwidth;
+ primary_chan_idx = ch_param.primary_chan_idx;
+
+ hal->current_band_width = bandwidth;
+ hal->current_channel = center_chan;
+ hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx);
+
+ rtw_phy_set_tx_power_level(rtwdev, center_chan);
+}
+
+static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(rtwdev, start + i, addr[i]);
+}
+
+void rtw_vif_port_config(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
+ u32 config)
+{
+ u32 addr, mask;
+
+ if (config & PORT_SET_MAC_ADDR) {
+ addr = rtwvif->conf->mac_addr.addr;
+ rtw_vif_write_addr(rtwdev, addr, rtwvif->mac_addr);
+ }
+ if (config & PORT_SET_BSSID) {
+ addr = rtwvif->conf->bssid.addr;
+ rtw_vif_write_addr(rtwdev, addr, rtwvif->bssid);
+ }
+ if (config & PORT_SET_NET_TYPE) {
+ addr = rtwvif->conf->net_type.addr;
+ mask = rtwvif->conf->net_type.mask;
+ rtw_write32_mask(rtwdev, addr, mask, rtwvif->net_type);
+ }
+ if (config & PORT_SET_AID) {
+ addr = rtwvif->conf->aid.addr;
+ mask = rtwvif->conf->aid.mask;
+ rtw_write32_mask(rtwdev, addr, mask, rtwvif->aid);
+ }
+}
+
+static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
+{
+ u8 bw = 0;
+
+ switch (bw_cap) {
+ case EFUSE_HW_CAP_IGNORE:
+ case EFUSE_HW_CAP_SUPP_BW80:
+ bw |= BIT(RTW_CHANNEL_WIDTH_80);
+ /* fall through */
+ case EFUSE_HW_CAP_SUPP_BW40:
+ bw |= BIT(RTW_CHANNEL_WIDTH_40);
+ /* fall through */
+ default:
+ bw |= BIT(RTW_CHANNEL_WIDTH_20);
+ break;
+ }
+
+ return bw;
+}
+
+static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ if (hw_ant_num == EFUSE_HW_CAP_IGNORE ||
+ hw_ant_num >= hal->rf_path_num)
+ return;
+
+ switch (hw_ant_num) {
+ case 1:
+ hal->rf_type = RF_1T1R;
+ hal->rf_path_num = 1;
+ hal->antenna_tx = BB_PATH_A;
+ hal->antenna_rx = BB_PATH_A;
+ break;
+ default:
+ WARN(1, "invalid hw configuration from efuse\n");
+ break;
+ }
+}
+
+static u64 get_vht_ra_mask(struct ieee80211_sta *sta)
+{
+ u64 ra_mask = 0;
+ u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+ u8 vht_mcs_cap;
+ int i, nss;
+
+ /* 4SS, every two bits for MCS7/8/9 */
+ for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 10) {
+ vht_mcs_cap = mcs_map & 0x3;
+ switch (vht_mcs_cap) {
+ case 2: /* MCS9 */
+ ra_mask |= 0x3ffULL << nss;
+ break;
+ case 1: /* MCS8 */
+ ra_mask |= 0x1ffULL << nss;
+ break;
+ case 0: /* MCS7 */
+ ra_mask |= 0x0ffULL << nss;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return ra_mask;
+}
+
+static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num)
+{
+ u8 rate_id = 0;
+
+ switch (wireless_set) {
+ case WIRELESS_CCK:
+ rate_id = RTW_RATEID_B_20M;
+ break;
+ case WIRELESS_OFDM:
+ rate_id = RTW_RATEID_G;
+ break;
+ case WIRELESS_CCK | WIRELESS_OFDM:
+ rate_id = RTW_RATEID_BG;
+ break;
+ case WIRELESS_OFDM | WIRELESS_HT:
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_GN_N1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_GN_N2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR5_N_3SS;
+ break;
+ case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT:
+ if (bw_mode == RTW_CHANNEL_WIDTH_40) {
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_BGN_40M_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_BGN_40M_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR5_N_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR7_N_4SS;
+ } else {
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_BGN_20M_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_BGN_20M_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR5_N_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR7_N_4SS;
+ }
+ break;
+ case WIRELESS_OFDM | WIRELESS_VHT:
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_ARFR1_AC_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_ARFR0_AC_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR4_AC_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR6_AC_4SS;
+ break;
+ case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_VHT:
+ if (bw_mode >= RTW_CHANNEL_WIDTH_80) {
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_ARFR1_AC_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_ARFR0_AC_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR4_AC_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR6_AC_4SS;
+ } else {
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_ARFR2_AC_2G_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_ARFR3_AC_2G_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR4_AC_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR6_AC_4SS;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return rate_id;
+}
+
+#define RA_MASK_CCK_RATES 0x0000f
+#define RA_MASK_OFDM_RATES 0x00ff0
+#define RA_MASK_HT_RATES_1SS (0xff000ULL << 0)
+#define RA_MASK_HT_RATES_2SS (0xff000ULL << 8)
+#define RA_MASK_HT_RATES_3SS (0xff000ULL << 16)
+#define RA_MASK_HT_RATES (RA_MASK_HT_RATES_1SS | \
+ RA_MASK_HT_RATES_2SS | \
+ RA_MASK_HT_RATES_3SS)
+#define RA_MASK_VHT_RATES_1SS (0x3ff000ULL << 0)
+#define RA_MASK_VHT_RATES_2SS (0x3ff000ULL << 10)
+#define RA_MASK_VHT_RATES_3SS (0x3ff000ULL << 20)
+#define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \
+ RA_MASK_VHT_RATES_2SS | \
+ RA_MASK_VHT_RATES_3SS)
+#define RA_MASK_CCK_IN_HT 0x00005
+#define RA_MASK_CCK_IN_VHT 0x00005
+#define RA_MASK_OFDM_IN_VHT 0x00010
+#define RA_MASK_OFDM_IN_HT_2G 0x00010
+#define RA_MASK_OFDM_IN_HT_5G 0x00030
+
+void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+{
+ struct ieee80211_sta *sta = si->sta;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rssi_level;
+ u8 wireless_set;
+ u8 bw_mode;
+ u8 rate_id;
+ u8 rf_type = RF_1T1R;
+ u8 stbc_en = 0;
+ u8 ldpc_en = 0;
+ u8 tx_num = 1;
+ u64 ra_mask = 0;
+ bool is_vht_enable = false;
+ bool is_support_sgi = false;
+
+ if (sta->vht_cap.vht_supported) {
+ is_vht_enable = true;
+ ra_mask |= get_vht_ra_mask(sta);
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ stbc_en = VHT_STBC_EN;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ ldpc_en = VHT_LDPC_EN;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ is_support_sgi = true;
+ } else if (sta->ht_cap.ht_supported) {
+ ra_mask |= (sta->ht_cap.mcs.rx_mask[NL80211_BAND_5GHZ] << 20) |
+ (sta->ht_cap.mcs.rx_mask[NL80211_BAND_2GHZ] << 12);
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ stbc_en = HT_STBC_EN;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ ldpc_en = HT_LDPC_EN;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 ||
+ sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ is_support_sgi = true;
+ }
+
+ if (hal->current_band_type == RTW_BAND_5G) {
+ ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ if (sta->vht_cap.vht_supported) {
+ ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT;
+ wireless_set = WIRELESS_OFDM | WIRELESS_VHT;
+ } else if (sta->ht_cap.ht_supported) {
+ ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G;
+ wireless_set = WIRELESS_OFDM | WIRELESS_HT;
+ } else {
+ wireless_set = WIRELESS_OFDM;
+ }
+ } else if (hal->current_band_type == RTW_BAND_2G) {
+ ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
+ if (sta->vht_cap.vht_supported) {
+ ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT |
+ RA_MASK_OFDM_IN_VHT;
+ wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
+ WIRELESS_HT | WIRELESS_VHT;
+ } else if (sta->ht_cap.ht_supported) {
+ ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT |
+ RA_MASK_OFDM_IN_HT_2G;
+ wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
+ WIRELESS_HT;
+ } else if (sta->supp_rates[0] <= 0xf) {
+ wireless_set = WIRELESS_CCK;
+ } else {
+ wireless_set = WIRELESS_CCK | WIRELESS_OFDM;
+ }
+ } else {
+ rtw_err(rtwdev, "Unknown band type\n");
+ wireless_set = 0;
+ }
+
+ if (efuse->hw_cap.nss == 1) {
+ ra_mask &= RA_MASK_VHT_RATES_1SS;
+ ra_mask &= RA_MASK_HT_RATES_1SS;
+ }
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_80:
+ bw_mode = RTW_CHANNEL_WIDTH_80;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw_mode = RTW_CHANNEL_WIDTH_40;
+ break;
+ default:
+ bw_mode = RTW_CHANNEL_WIDTH_20;
+ break;
+ }
+
+ if (sta->vht_cap.vht_supported && ra_mask & 0xffc00000) {
+ tx_num = 2;
+ rf_type = RF_2T2R;
+ } else if (sta->ht_cap.ht_supported && ra_mask & 0xfff00000) {
+ tx_num = 2;
+ rf_type = RF_2T2R;
+ }
+
+ rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
+
+ if (wireless_set != WIRELESS_CCK) {
+ rssi_level = si->rssi_level;
+ if (rssi_level == 0)
+ ra_mask &= 0xffffffffffffffffULL;
+ else if (rssi_level == 1)
+ ra_mask &= 0xfffffffffffffff0ULL;
+ else if (rssi_level == 2)
+ ra_mask &= 0xffffffffffffefe0ULL;
+ else if (rssi_level == 3)
+ ra_mask &= 0xffffffffffffcfc0ULL;
+ else if (rssi_level == 4)
+ ra_mask &= 0xffffffffffff8f80ULL;
+ else if (rssi_level >= 5)
+ ra_mask &= 0xffffffffffff0f00ULL;
+ }
+
+ si->bw_mode = bw_mode;
+ si->stbc_en = stbc_en;
+ si->ldpc_en = ldpc_en;
+ si->rf_type = rf_type;
+ si->wireless_set = wireless_set;
+ si->sgi_enable = is_support_sgi;
+ si->vht_enable = is_vht_enable;
+ si->ra_mask = ra_mask;
+ si->rate_id = rate_id;
+
+ rtw_fw_send_ra_info(rtwdev, si);
+}
+
+static int rtw_power_on(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ int ret;
+
+ ret = rtw_hci_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup hci\n");
+ goto err;
+ }
+
+ /* power on MAC before firmware downloaded */
+ ret = rtw_mac_power_on(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to power on mac\n");
+ goto err;
+ }
+
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware) {
+ ret = -EINVAL;
+ rtw_err(rtwdev, "failed to load firmware\n");
+ goto err;
+ }
+
+ ret = rtw_download_firmware(rtwdev, fw);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download firmware\n");
+ goto err_off;
+ }
+
+ /* config mac after firmware downloaded */
+ ret = rtw_mac_init(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to configure mac\n");
+ goto err_off;
+ }
+
+ chip->ops->phy_set_param(rtwdev);
+
+ ret = rtw_hci_start(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to start hci\n");
+ goto err_off;
+ }
+
+ return 0;
+
+err_off:
+ rtw_mac_power_off(rtwdev);
+
+err:
+ return ret;
+}
+
+int rtw_core_start(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_power_on(rtwdev);
+ if (ret)
+ return ret;
+
+ rtw_sec_enable_sec_engine(rtwdev);
+
+ /* rcr reset after powered on */
+ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
+ RTW_WATCH_DOG_DELAY_TIME);
+
+ rtw_flag_set(rtwdev, RTW_FLAG_RUNNING);
+
+ return 0;
+}
+
+static void rtw_power_off(struct rtw_dev *rtwdev)
+{
+ rtwdev->hci.ops->stop(rtwdev);
+ rtw_mac_power_off(rtwdev);
+}
+
+void rtw_core_stop(struct rtw_dev *rtwdev)
+{
+ rtw_flag_clear(rtwdev, RTW_FLAG_RUNNING);
+ rtw_flag_clear(rtwdev, RTW_FLAG_FW_RUNNING);
+
+ cancel_delayed_work_sync(&rtwdev->watch_dog_work);
+
+ rtw_power_off(rtwdev);
+}
+
+static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ ht_cap->ht_supported = true;
+ ht_cap->cap = 0;
+ ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_MAX_AMSDU |
+ IEEE80211_HT_CAP_LDPC_CODING |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40))
+ ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_SGI_40;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (efuse->hw_cap.nss > 1) {
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+ ht_cap->mcs.rx_highest = cpu_to_le16(300);
+ } else {
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0x00;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+ ht_cap->mcs.rx_highest = cpu_to_le16(150);
+ }
+}
+
+static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
+ struct ieee80211_sta_vht_cap *vht_cap)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u16 mcs_map;
+ __le16 highest;
+
+ if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE &&
+ efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT)
+ return;
+
+ vht_cap->vht_supported = true;
+ vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_HTC_VHT |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+ 0;
+ mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+ if (efuse->hw_cap.nss > 1) {
+ highest = cpu_to_le16(780);
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2;
+ } else {
+ highest = cpu_to_le16(390);
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2;
+ }
+
+ vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->vht_mcs.rx_highest = highest;
+ vht_cap->vht_mcs.tx_highest = highest;
+}
+
+static void rtw_set_supported_band(struct ieee80211_hw *hw,
+ struct rtw_chip_info *chip)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct ieee80211_supported_band *sband;
+
+ if (chip->band & RTW_BAND_2G) {
+ sband = kmemdup(&rtw_band_2ghz, sizeof(*sband), GFP_KERNEL);
+ if (!sband)
+ goto err_out;
+ if (chip->ht_supported)
+ rtw_init_ht_cap(rtwdev, &sband->ht_cap);
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
+ }
+
+ if (chip->band & RTW_BAND_5G) {
+ sband = kmemdup(&rtw_band_5ghz, sizeof(*sband), GFP_KERNEL);
+ if (!sband)
+ goto err_out;
+ if (chip->ht_supported)
+ rtw_init_ht_cap(rtwdev, &sband->ht_cap);
+ if (chip->vht_supported)
+ rtw_init_vht_cap(rtwdev, &sband->vht_cap);
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
+ }
+
+ return;
+
+err_out:
+ rtw_err(rtwdev, "failed to set supported band\n");
+ kfree(sband);
+}
+
+static void rtw_unset_supported_band(struct ieee80211_hw *hw,
+ struct rtw_chip_info *chip)
+{
+ kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
+ kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
+}
+
+static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
+{
+ struct rtw_dev *rtwdev = context;
+ struct rtw_fw_state *fw = &rtwdev->fw;
+
+ if (!firmware)
+ rtw_err(rtwdev, "failed to request firmware\n");
+
+ fw->firmware = firmware;
+ complete_all(&fw->completion);
+}
+
+static int rtw_load_firmware(struct rtw_dev *rtwdev, const char *fw_name)
+{
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ int ret;
+
+ init_completion(&fw->completion);
+
+ ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
+ GFP_KERNEL, rtwdev, rtw_load_firmware_cb);
+ if (ret) {
+ rtw_err(rtwdev, "async firmware request failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u32 wl_bt_pwr_ctrl;
+ int ret = 0;
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtwdev->hci.rpwm_addr = 0x03d9;
+ break;
+ default:
+ rtw_err(rtwdev, "unsupported hci type\n");
+ return -EINVAL;
+ }
+
+ wl_bt_pwr_ctrl = rtw_read32(rtwdev, REG_WL_BT_PWR_CTRL);
+ if (wl_bt_pwr_ctrl & BIT_BT_FUNC_EN)
+ rtwdev->efuse.btcoex = true;
+ hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1);
+ hal->fab_version = BIT_GET_VENDOR_ID(hal->chip_version) >> 2;
+ hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version);
+ hal->mp_chip = (hal->chip_version & BIT_RTL_ID) ? 0 : 1;
+ if (hal->chip_version & BIT_RF_TYPE_ID) {
+ hal->rf_type = RF_2T2R;
+ hal->rf_path_num = 2;
+ hal->antenna_tx = BB_PATH_AB;
+ hal->antenna_rx = BB_PATH_AB;
+ } else {
+ hal->rf_type = RF_1T1R;
+ hal->rf_path_num = 1;
+ hal->antenna_tx = BB_PATH_A;
+ hal->antenna_rx = BB_PATH_A;
+ }
+
+ if (hal->fab_version == 2)
+ hal->fab_version = 1;
+ else if (hal->fab_version == 1)
+ hal->fab_version = 2;
+
+ efuse->physical_size = chip->phy_efuse_size;
+ efuse->logical_size = chip->log_efuse_size;
+ efuse->protect_size = chip->ptct_efuse_size;
+
+ /* default use ack */
+ rtwdev->hal.rcr |= BIT_VHT_DACK;
+
+ return ret;
+}
+
+static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev)
+{
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ int ret;
+
+ ret = rtw_hci_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup hci\n");
+ goto err;
+ }
+
+ ret = rtw_mac_power_on(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to power on mac\n");
+ goto err;
+ }
+
+ rtw_write8(rtwdev, REG_C2HEVT, C2H_HW_FEATURE_DUMP);
+
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware) {
+ ret = -EINVAL;
+ rtw_err(rtwdev, "failed to load firmware\n");
+ goto err;
+ }
+
+ ret = rtw_download_firmware(rtwdev, fw);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download firmware\n");
+ goto err_off;
+ }
+
+ return 0;
+
+err_off:
+ rtw_mac_power_off(rtwdev);
+
+err:
+ return ret;
+}
+
+static int rtw_dump_hw_feature(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 hw_feature[HW_FEATURE_LEN];
+ u8 id;
+ u8 bw;
+ int i;
+
+ id = rtw_read8(rtwdev, REG_C2HEVT);
+ if (id != C2H_HW_FEATURE_REPORT) {
+ rtw_err(rtwdev, "failed to read hw feature report\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < HW_FEATURE_LEN; i++)
+ hw_feature[i] = rtw_read8(rtwdev, REG_C2HEVT + 2 + i);
+
+ rtw_write8(rtwdev, REG_C2HEVT, 0);
+
+ bw = GET_EFUSE_HW_CAP_BW(hw_feature);
+ efuse->hw_cap.bw = hw_bw_cap_to_bitamp(bw);
+ efuse->hw_cap.hci = GET_EFUSE_HW_CAP_HCI(hw_feature);
+ efuse->hw_cap.nss = GET_EFUSE_HW_CAP_NSS(hw_feature);
+ efuse->hw_cap.ptcl = GET_EFUSE_HW_CAP_PTCL(hw_feature);
+ efuse->hw_cap.ant_num = GET_EFUSE_HW_CAP_ANT_NUM(hw_feature);
+
+ rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num);
+
+ if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE)
+ efuse->hw_cap.nss = rtwdev->hal.rf_path_num;
+
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n",
+ efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl,
+ efuse->hw_cap.ant_num, efuse->hw_cap.nss);
+
+ return 0;
+}
+
+static void rtw_chip_efuse_disable(struct rtw_dev *rtwdev)
+{
+ rtw_hci_stop(rtwdev);
+ rtw_mac_power_off(rtwdev);
+}
+
+static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+
+ /* power on mac to read efuse */
+ ret = rtw_chip_efuse_enable(rtwdev);
+ if (ret)
+ goto out;
+
+ ret = rtw_parse_efuse_map(rtwdev);
+ if (ret)
+ goto out;
+
+ ret = rtw_dump_hw_feature(rtwdev);
+ if (ret)
+ goto out;
+
+ ret = rtw_check_supported_rfe(rtwdev);
+ if (ret)
+ goto out;
+
+ if (efuse->crystal_cap == 0xff)
+ efuse->crystal_cap = 0;
+ if (efuse->pa_type_2g == 0xff)
+ efuse->pa_type_2g = 0;
+ if (efuse->pa_type_5g == 0xff)
+ efuse->pa_type_5g = 0;
+ if (efuse->lna_type_2g == 0xff)
+ efuse->lna_type_2g = 0;
+ if (efuse->lna_type_5g == 0xff)
+ efuse->lna_type_5g = 0;
+ if (efuse->channel_plan == 0xff)
+ efuse->channel_plan = 0x7f;
+ if (efuse->bt_setting & BIT(0))
+ efuse->share_ant = true;
+ if (efuse->regd == 0xff)
+ efuse->regd = 0;
+
+ efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0;
+ efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0;
+ efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
+ efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
+
+ rtw_chip_efuse_disable(rtwdev);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+ return ret;
+}
+
+static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
+
+ if (!rfe_def)
+ return -ENODEV;
+
+ rtw_phy_setup_phy_cond(rtwdev, 0);
+
+ rtw_hw_init_tx_power(hal);
+ rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
+ rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
+ rtw_phy_tx_power_by_rate_config(hal);
+ rtw_phy_tx_power_limit_config(hal);
+
+ return 0;
+}
+
+int rtw_chip_info_setup(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_chip_parameter_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup chip parameters\n");
+ goto err_out;
+ }
+
+ ret = rtw_chip_efuse_info_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup chip efuse info\n");
+ goto err_out;
+ }
+
+ ret = rtw_chip_board_info_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup chip board info\n");
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ return ret;
+}
+EXPORT_SYMBOL(rtw_chip_info_setup);
+
+int rtw_core_init(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&rtwdev->rsvd_page_list);
+
+ timer_setup(&rtwdev->tx_report.purge_timer,
+ rtw_tx_report_purge_timer, 0);
+
+ INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
+ INIT_DELAYED_WORK(&rtwdev->lps_work, rtw_lps_work);
+ INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ skb_queue_head_init(&rtwdev->c2h_queue);
+ skb_queue_head_init(&rtwdev->tx_report.queue);
+
+ spin_lock_init(&rtwdev->dm_lock);
+ spin_lock_init(&rtwdev->rf_lock);
+ spin_lock_init(&rtwdev->h2c.lock);
+ spin_lock_init(&rtwdev->tx_report.q_lock);
+
+ mutex_init(&rtwdev->mutex);
+ mutex_init(&rtwdev->hal.tx_power_mutex);
+
+ rtwdev->sec.total_cam_num = 32;
+ rtwdev->hal.current_channel = 1;
+ set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_add_rsvd_page(rtwdev, RSVD_BEACON, false);
+ mutex_unlock(&rtwdev->mutex);
+
+ /* default rx filter setting */
+ rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV |
+ BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
+ BIT_AB | BIT_AM | BIT_APM;
+
+ ret = rtw_load_firmware(rtwdev, rtwdev->chip->fw_name);
+ if (ret) {
+ rtw_warn(rtwdev, "no firmware loaded\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw_core_init);
+
+void rtw_core_deinit(struct rtw_dev *rtwdev)
+{
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_rsvd_page *rsvd_pkt, *tmp;
+ unsigned long flags;
+
+ if (fw->firmware)
+ release_firmware(fw->firmware);
+
+ spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
+ skb_queue_purge(&rtwdev->tx_report.queue);
+ spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
+
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
+ list_del(&rsvd_pkt->list);
+ kfree(rsvd_pkt);
+ }
+
+ mutex_destroy(&rtwdev->mutex);
+ mutex_destroy(&rtwdev->hal.tx_power_mutex);
+}
+EXPORT_SYMBOL(rtw_core_deinit);
+
+int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
+{
+ int max_tx_headroom = 0;
+ int ret;
+
+ /* TODO: USB & SDIO may need extra room? */
+ max_tx_headroom = rtwdev->chip->tx_pkt_desc_sz;
+
+ hw->extra_tx_headroom = max_tx_headroom;
+ hw->queues = IEEE80211_NUM_ACS;
+ hw->sta_data_size = sizeof(struct rtw_sta_info);
+ hw->vif_data_size = sizeof(struct rtw_vif);
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
+ rtw_set_supported_band(hw, rtwdev->chip);
+ SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
+
+ rtw_regd_init(rtwdev, rtw_regd_notifier);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret) {
+ rtw_err(rtwdev, "failed to register hw\n");
+ return ret;
+ }
+
+ if (regulatory_hint(hw->wiphy, rtwdev->regd.alpha2))
+ rtw_err(rtwdev, "regulatory_hint fail\n");
+
+ rtw_debugfs_init(rtwdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw_register_hw);
+
+void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ ieee80211_unregister_hw(hw);
+ rtw_unset_supported_band(hw, chip);
+}
+EXPORT_SYMBOL(rtw_unregister_hw);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless core module");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
new file mode 100644
index 000000000000..00fc77fb9b54
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -0,0 +1,1104 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTK_MAIN_H_
+#define __RTK_MAIN_H_
+
+#include <net/mac80211.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+#include <linux/average.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "util.h"
+
+#define RTW_MAX_MAC_ID_NUM 32
+#define RTW_MAX_SEC_CAM_NUM 32
+
+#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2)
+
+#define RFREG_MASK 0xfffff
+#define INV_RF_DATA 0xffffffff
+#define TX_PAGE_SIZE_SHIFT 7
+
+#define RTW_CHANNEL_WIDTH_MAX 3
+#define RTW_RF_PATH_MAX 4
+#define HW_FEATURE_LEN 13
+
+extern unsigned int rtw_debug_mask;
+extern const struct ieee80211_ops rtw_ops;
+extern struct rtw_chip_info rtw8822b_hw_spec;
+extern struct rtw_chip_info rtw8822c_hw_spec;
+
+#define RTW_MAX_CHANNEL_NUM_2G 14
+#define RTW_MAX_CHANNEL_NUM_5G 49
+
+struct rtw_dev;
+
+enum rtw_hci_type {
+ RTW_HCI_TYPE_PCIE,
+ RTW_HCI_TYPE_USB,
+ RTW_HCI_TYPE_SDIO,
+
+ RTW_HCI_TYPE_UNDEFINE,
+};
+
+struct rtw_hci {
+ struct rtw_hci_ops *ops;
+ enum rtw_hci_type type;
+
+ u32 rpwm_addr;
+
+ u8 bulkout_num;
+};
+
+enum rtw_supported_band {
+ RTW_BAND_2G = 1 << 0,
+ RTW_BAND_5G = 1 << 1,
+ RTW_BAND_60G = 1 << 2,
+
+ RTW_BAND_MAX,
+};
+
+enum rtw_bandwidth {
+ RTW_CHANNEL_WIDTH_20 = 0,
+ RTW_CHANNEL_WIDTH_40 = 1,
+ RTW_CHANNEL_WIDTH_80 = 2,
+ RTW_CHANNEL_WIDTH_160 = 3,
+ RTW_CHANNEL_WIDTH_80_80 = 4,
+ RTW_CHANNEL_WIDTH_5 = 5,
+ RTW_CHANNEL_WIDTH_10 = 6,
+};
+
+enum rtw_net_type {
+ RTW_NET_NO_LINK = 0,
+ RTW_NET_AD_HOC = 1,
+ RTW_NET_MGD_LINKED = 2,
+ RTW_NET_AP_MODE = 3,
+};
+
+enum rtw_rf_type {
+ RF_1T1R = 0,
+ RF_1T2R = 1,
+ RF_2T2R = 2,
+ RF_2T3R = 3,
+ RF_2T4R = 4,
+ RF_3T3R = 5,
+ RF_3T4R = 6,
+ RF_4T4R = 7,
+ RF_TYPE_MAX,
+};
+
+enum rtw_rf_path {
+ RF_PATH_A = 0,
+ RF_PATH_B = 1,
+ RF_PATH_C = 2,
+ RF_PATH_D = 3,
+};
+
+enum rtw_bb_path {
+ BB_PATH_A = BIT(0),
+ BB_PATH_B = BIT(1),
+ BB_PATH_C = BIT(2),
+ BB_PATH_D = BIT(3),
+
+ BB_PATH_AB = (BB_PATH_A | BB_PATH_B),
+ BB_PATH_AC = (BB_PATH_A | BB_PATH_C),
+ BB_PATH_AD = (BB_PATH_A | BB_PATH_D),
+ BB_PATH_BC = (BB_PATH_B | BB_PATH_C),
+ BB_PATH_BD = (BB_PATH_B | BB_PATH_D),
+ BB_PATH_CD = (BB_PATH_C | BB_PATH_D),
+
+ BB_PATH_ABC = (BB_PATH_A | BB_PATH_B | BB_PATH_C),
+ BB_PATH_ABD = (BB_PATH_A | BB_PATH_B | BB_PATH_D),
+ BB_PATH_ACD = (BB_PATH_A | BB_PATH_C | BB_PATH_D),
+ BB_PATH_BCD = (BB_PATH_B | BB_PATH_C | BB_PATH_D),
+
+ BB_PATH_ABCD = (BB_PATH_A | BB_PATH_B | BB_PATH_C | BB_PATH_D),
+};
+
+enum rtw_rate_section {
+ RTW_RATE_SECTION_CCK = 0,
+ RTW_RATE_SECTION_OFDM,
+ RTW_RATE_SECTION_HT_1S,
+ RTW_RATE_SECTION_HT_2S,
+ RTW_RATE_SECTION_VHT_1S,
+ RTW_RATE_SECTION_VHT_2S,
+
+ /* keep last */
+ RTW_RATE_SECTION_MAX,
+};
+
+enum rtw_wireless_set {
+ WIRELESS_CCK = 0x00000001,
+ WIRELESS_OFDM = 0x00000002,
+ WIRELESS_HT = 0x00000004,
+ WIRELESS_VHT = 0x00000008,
+};
+
+#define HT_STBC_EN BIT(0)
+#define VHT_STBC_EN BIT(1)
+#define HT_LDPC_EN BIT(0)
+#define VHT_LDPC_EN BIT(1)
+
+enum rtw_chip_type {
+ RTW_CHIP_TYPE_8822B,
+ RTW_CHIP_TYPE_8822C,
+};
+
+enum rtw_tx_queue_type {
+ /* the order of AC queues matters */
+ RTW_TX_QUEUE_BK = 0x0,
+ RTW_TX_QUEUE_BE = 0x1,
+ RTW_TX_QUEUE_VI = 0x2,
+ RTW_TX_QUEUE_VO = 0x3,
+
+ RTW_TX_QUEUE_BCN = 0x4,
+ RTW_TX_QUEUE_MGMT = 0x5,
+ RTW_TX_QUEUE_HI0 = 0x6,
+ RTW_TX_QUEUE_H2C = 0x7,
+ /* keep it last */
+ RTK_MAX_TX_QUEUE_NUM
+};
+
+enum rtw_rx_queue_type {
+ RTW_RX_QUEUE_MPDU = 0x0,
+ RTW_RX_QUEUE_C2H = 0x1,
+ /* keep it last */
+ RTK_MAX_RX_QUEUE_NUM
+};
+
+enum rtw_rate_index {
+ RTW_RATEID_BGN_40M_2SS = 0,
+ RTW_RATEID_BGN_40M_1SS = 1,
+ RTW_RATEID_BGN_20M_2SS = 2,
+ RTW_RATEID_BGN_20M_1SS = 3,
+ RTW_RATEID_GN_N2SS = 4,
+ RTW_RATEID_GN_N1SS = 5,
+ RTW_RATEID_BG = 6,
+ RTW_RATEID_G = 7,
+ RTW_RATEID_B_20M = 8,
+ RTW_RATEID_ARFR0_AC_2SS = 9,
+ RTW_RATEID_ARFR1_AC_1SS = 10,
+ RTW_RATEID_ARFR2_AC_2G_1SS = 11,
+ RTW_RATEID_ARFR3_AC_2G_2SS = 12,
+ RTW_RATEID_ARFR4_AC_3SS = 13,
+ RTW_RATEID_ARFR5_N_3SS = 14,
+ RTW_RATEID_ARFR7_N_4SS = 15,
+ RTW_RATEID_ARFR6_AC_4SS = 16
+};
+
+enum rtw_trx_desc_rate {
+ DESC_RATE1M = 0x00,
+ DESC_RATE2M = 0x01,
+ DESC_RATE5_5M = 0x02,
+ DESC_RATE11M = 0x03,
+
+ DESC_RATE6M = 0x04,
+ DESC_RATE9M = 0x05,
+ DESC_RATE12M = 0x06,
+ DESC_RATE18M = 0x07,
+ DESC_RATE24M = 0x08,
+ DESC_RATE36M = 0x09,
+ DESC_RATE48M = 0x0a,
+ DESC_RATE54M = 0x0b,
+
+ DESC_RATEMCS0 = 0x0c,
+ DESC_RATEMCS1 = 0x0d,
+ DESC_RATEMCS2 = 0x0e,
+ DESC_RATEMCS3 = 0x0f,
+ DESC_RATEMCS4 = 0x10,
+ DESC_RATEMCS5 = 0x11,
+ DESC_RATEMCS6 = 0x12,
+ DESC_RATEMCS7 = 0x13,
+ DESC_RATEMCS8 = 0x14,
+ DESC_RATEMCS9 = 0x15,
+ DESC_RATEMCS10 = 0x16,
+ DESC_RATEMCS11 = 0x17,
+ DESC_RATEMCS12 = 0x18,
+ DESC_RATEMCS13 = 0x19,
+ DESC_RATEMCS14 = 0x1a,
+ DESC_RATEMCS15 = 0x1b,
+ DESC_RATEMCS16 = 0x1c,
+ DESC_RATEMCS17 = 0x1d,
+ DESC_RATEMCS18 = 0x1e,
+ DESC_RATEMCS19 = 0x1f,
+ DESC_RATEMCS20 = 0x20,
+ DESC_RATEMCS21 = 0x21,
+ DESC_RATEMCS22 = 0x22,
+ DESC_RATEMCS23 = 0x23,
+ DESC_RATEMCS24 = 0x24,
+ DESC_RATEMCS25 = 0x25,
+ DESC_RATEMCS26 = 0x26,
+ DESC_RATEMCS27 = 0x27,
+ DESC_RATEMCS28 = 0x28,
+ DESC_RATEMCS29 = 0x29,
+ DESC_RATEMCS30 = 0x2a,
+ DESC_RATEMCS31 = 0x2b,
+
+ DESC_RATEVHT1SS_MCS0 = 0x2c,
+ DESC_RATEVHT1SS_MCS1 = 0x2d,
+ DESC_RATEVHT1SS_MCS2 = 0x2e,
+ DESC_RATEVHT1SS_MCS3 = 0x2f,
+ DESC_RATEVHT1SS_MCS4 = 0x30,
+ DESC_RATEVHT1SS_MCS5 = 0x31,
+ DESC_RATEVHT1SS_MCS6 = 0x32,
+ DESC_RATEVHT1SS_MCS7 = 0x33,
+ DESC_RATEVHT1SS_MCS8 = 0x34,
+ DESC_RATEVHT1SS_MCS9 = 0x35,
+
+ DESC_RATEVHT2SS_MCS0 = 0x36,
+ DESC_RATEVHT2SS_MCS1 = 0x37,
+ DESC_RATEVHT2SS_MCS2 = 0x38,
+ DESC_RATEVHT2SS_MCS3 = 0x39,
+ DESC_RATEVHT2SS_MCS4 = 0x3a,
+ DESC_RATEVHT2SS_MCS5 = 0x3b,
+ DESC_RATEVHT2SS_MCS6 = 0x3c,
+ DESC_RATEVHT2SS_MCS7 = 0x3d,
+ DESC_RATEVHT2SS_MCS8 = 0x3e,
+ DESC_RATEVHT2SS_MCS9 = 0x3f,
+
+ DESC_RATEVHT3SS_MCS0 = 0x40,
+ DESC_RATEVHT3SS_MCS1 = 0x41,
+ DESC_RATEVHT3SS_MCS2 = 0x42,
+ DESC_RATEVHT3SS_MCS3 = 0x43,
+ DESC_RATEVHT3SS_MCS4 = 0x44,
+ DESC_RATEVHT3SS_MCS5 = 0x45,
+ DESC_RATEVHT3SS_MCS6 = 0x46,
+ DESC_RATEVHT3SS_MCS7 = 0x47,
+ DESC_RATEVHT3SS_MCS8 = 0x48,
+ DESC_RATEVHT3SS_MCS9 = 0x49,
+
+ DESC_RATEVHT4SS_MCS0 = 0x4a,
+ DESC_RATEVHT4SS_MCS1 = 0x4b,
+ DESC_RATEVHT4SS_MCS2 = 0x4c,
+ DESC_RATEVHT4SS_MCS3 = 0x4d,
+ DESC_RATEVHT4SS_MCS4 = 0x4e,
+ DESC_RATEVHT4SS_MCS5 = 0x4f,
+ DESC_RATEVHT4SS_MCS6 = 0x50,
+ DESC_RATEVHT4SS_MCS7 = 0x51,
+ DESC_RATEVHT4SS_MCS8 = 0x52,
+ DESC_RATEVHT4SS_MCS9 = 0x53,
+
+ DESC_RATE_MAX,
+};
+
+enum rtw_regulatory_domains {
+ RTW_REGD_FCC = 0,
+ RTW_REGD_MKK = 1,
+ RTW_REGD_ETSI = 2,
+ RTW_REGD_WW = 3,
+
+ RTW_REGD_MAX
+};
+
+enum rtw_flags {
+ RTW_FLAG_RUNNING,
+ RTW_FLAG_FW_RUNNING,
+ RTW_FLAG_SCANNING,
+ RTW_FLAG_INACTIVE_PS,
+ RTW_FLAG_LEISURE_PS,
+ RTW_FLAG_DIG_DISABLE,
+
+ NUM_OF_RTW_FLAGS,
+};
+
+/* the power index is represented by differences, which cck-1s & ht40-1s are
+ * the base values, so for 1s's differences, there are only ht20 & ofdm
+ */
+struct rtw_2g_1s_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 ofdm:4;
+ s8 bw20:4;
+#else
+ s8 bw20:4;
+ s8 ofdm:4;
+#endif
+} __packed;
+
+struct rtw_2g_ns_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 bw20:4;
+ s8 bw40:4;
+ s8 cck:4;
+ s8 ofdm:4;
+#else
+ s8 ofdm:4;
+ s8 cck:4;
+ s8 bw40:4;
+ s8 bw20:4;
+#endif
+} __packed;
+
+struct rtw_2g_txpwr_idx {
+ u8 cck_base[6];
+ u8 bw40_base[5];
+ struct rtw_2g_1s_pwr_idx_diff ht_1s_diff;
+ struct rtw_2g_ns_pwr_idx_diff ht_2s_diff;
+ struct rtw_2g_ns_pwr_idx_diff ht_3s_diff;
+ struct rtw_2g_ns_pwr_idx_diff ht_4s_diff;
+};
+
+struct rtw_5g_ht_1s_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 ofdm:4;
+ s8 bw20:4;
+#else
+ s8 bw20:4;
+ s8 ofdm:4;
+#endif
+} __packed;
+
+struct rtw_5g_ht_ns_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 bw20:4;
+ s8 bw40:4;
+#else
+ s8 bw40:4;
+ s8 bw20:4;
+#endif
+} __packed;
+
+struct rtw_5g_ofdm_ns_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 ofdm_3s:4;
+ s8 ofdm_2s:4;
+ s8 ofdm_4s:4;
+ s8 res:4;
+#else
+ s8 res:4;
+ s8 ofdm_4s:4;
+ s8 ofdm_2s:4;
+ s8 ofdm_3s:4;
+#endif
+} __packed;
+
+struct rtw_5g_vht_ns_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 bw160:4;
+ s8 bw80:4;
+#else
+ s8 bw80:4;
+ s8 bw160:4;
+#endif
+} __packed;
+
+struct rtw_5g_txpwr_idx {
+ u8 bw40_base[14];
+ struct rtw_5g_ht_1s_pwr_idx_diff ht_1s_diff;
+ struct rtw_5g_ht_ns_pwr_idx_diff ht_2s_diff;
+ struct rtw_5g_ht_ns_pwr_idx_diff ht_3s_diff;
+ struct rtw_5g_ht_ns_pwr_idx_diff ht_4s_diff;
+ struct rtw_5g_ofdm_ns_pwr_idx_diff ofdm_diff;
+ struct rtw_5g_vht_ns_pwr_idx_diff vht_1s_diff;
+ struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff;
+ struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff;
+ struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff;
+};
+
+struct rtw_txpwr_idx {
+ struct rtw_2g_txpwr_idx pwr_idx_2g;
+ struct rtw_5g_txpwr_idx pwr_idx_5g;
+};
+
+struct rtw_timer_list {
+ struct timer_list timer;
+ void (*function)(void *data);
+ void *args;
+};
+
+struct rtw_channel_params {
+ u8 center_chan;
+ u8 bandwidth;
+ u8 primary_chan_idx;
+};
+
+struct rtw_hw_reg {
+ u32 addr;
+ u32 mask;
+};
+
+struct rtw_backup_info {
+ u8 len;
+ u32 reg;
+ u32 val;
+};
+
+enum rtw_vif_port_set {
+ PORT_SET_MAC_ADDR = BIT(0),
+ PORT_SET_BSSID = BIT(1),
+ PORT_SET_NET_TYPE = BIT(2),
+ PORT_SET_AID = BIT(3),
+};
+
+struct rtw_vif_port {
+ struct rtw_hw_reg mac_addr;
+ struct rtw_hw_reg bssid;
+ struct rtw_hw_reg net_type;
+ struct rtw_hw_reg aid;
+};
+
+struct rtw_tx_pkt_info {
+ u32 tx_pkt_size;
+ u8 offset;
+ u8 pkt_offset;
+ u8 mac_id;
+ u8 rate_id;
+ u8 rate;
+ u8 qsel;
+ u8 bw;
+ u8 sec_type;
+ u8 sn;
+ bool ampdu_en;
+ u8 ampdu_factor;
+ u8 ampdu_density;
+ u16 seq;
+ bool stbc;
+ bool ldpc;
+ bool dis_rate_fallback;
+ bool bmc;
+ bool use_rate;
+ bool ls;
+ bool fs;
+ bool short_gi;
+ bool report;
+};
+
+struct rtw_rx_pkt_stat {
+ bool phy_status;
+ bool icv_err;
+ bool crc_err;
+ bool decrypted;
+ bool is_c2h;
+
+ s32 signal_power;
+ u16 pkt_len;
+ u8 bw;
+ u8 drv_info_sz;
+ u8 shift;
+ u8 rate;
+ u8 mac_id;
+ u8 cam_id;
+ u8 ppdu_cnt;
+ u32 tsf_low;
+ s8 rx_power[RTW_RF_PATH_MAX];
+ u8 rssi;
+ u8 rxsc;
+ struct rtw_sta_info *si;
+ struct ieee80211_vif *vif;
+};
+
+struct rtw_traffic_stats {
+ /* units in bytes */
+ u64 tx_unicast;
+ u64 rx_unicast;
+
+ /* count for packets */
+ u64 tx_cnt;
+ u64 rx_cnt;
+
+ /* units in Mbps */
+ u32 tx_throughput;
+ u32 rx_throughput;
+};
+
+enum rtw_lps_mode {
+ RTW_MODE_ACTIVE = 0,
+ RTW_MODE_LPS = 1,
+ RTW_MODE_WMM_PS = 2,
+};
+
+enum rtw_pwr_state {
+ RTW_RF_OFF = 0x0,
+ RTW_RF_ON = 0x4,
+ RTW_ALL_ON = 0xc,
+};
+
+struct rtw_lps_conf {
+ /* the interface to enter lps */
+ struct rtw_vif *rtwvif;
+ enum rtw_lps_mode mode;
+ enum rtw_pwr_state state;
+ u8 awake_interval;
+ u8 rlbm;
+ u8 smart_ps;
+ u8 port_id;
+};
+
+enum rtw_hw_key_type {
+ RTW_CAM_NONE = 0,
+ RTW_CAM_WEP40 = 1,
+ RTW_CAM_TKIP = 2,
+ RTW_CAM_AES = 4,
+ RTW_CAM_WEP104 = 5,
+};
+
+struct rtw_cam_entry {
+ bool valid;
+ bool group;
+ u8 addr[ETH_ALEN];
+ u8 hw_key_type;
+ struct ieee80211_key_conf *key;
+};
+
+struct rtw_sec_desc {
+ /* search strategy */
+ bool default_key_search;
+
+ u32 total_cam_num;
+ struct rtw_cam_entry cam_table[RTW_MAX_SEC_CAM_NUM];
+ DECLARE_BITMAP(cam_map, RTW_MAX_SEC_CAM_NUM);
+};
+
+struct rtw_tx_report {
+ /* protect the tx report queue */
+ spinlock_t q_lock;
+ struct sk_buff_head queue;
+ atomic_t sn;
+ struct timer_list purge_timer;
+};
+
+#define RTW_BC_MC_MACID 1
+DECLARE_EWMA(rssi, 10, 16);
+
+struct rtw_sta_info {
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+
+ struct ewma_rssi avg_rssi;
+ u8 rssi_level;
+
+ u8 mac_id;
+ u8 rate_id;
+ enum rtw_bandwidth bw_mode;
+ enum rtw_rf_type rf_type;
+ enum rtw_wireless_set wireless_set;
+ u8 stbc_en:2;
+ u8 ldpc_en:2;
+ bool sgi_enable;
+ bool vht_enable;
+ bool updated;
+ u8 init_ra_lv;
+ u64 ra_mask;
+};
+
+struct rtw_vif {
+ struct ieee80211_vif *vif;
+ enum rtw_net_type net_type;
+ u16 aid;
+ u8 mac_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u8 port;
+ const struct rtw_vif_port *conf;
+
+ struct rtw_traffic_stats stats;
+ bool in_lps;
+};
+
+struct rtw_regulatory {
+ char alpha2[2];
+ u8 chplan;
+ u8 txpwr_regd;
+};
+
+struct rtw_chip_ops {
+ int (*mac_init)(struct rtw_dev *rtwdev);
+ int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
+ void (*phy_set_param)(struct rtw_dev *rtwdev);
+ void (*set_channel)(struct rtw_dev *rtwdev, u8 channel,
+ u8 bandwidth, u8 primary_chan_idx);
+ void (*query_rx_desc)(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status);
+ u32 (*read_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask);
+ bool (*write_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+ void (*set_tx_power_index)(struct rtw_dev *rtwdev);
+ int (*rsvd_page_dump)(struct rtw_dev *rtwdev, u8 *buf, u32 offset,
+ u32 size);
+ void (*set_antenna)(struct rtw_dev *rtwdev, u8 antenna_tx,
+ u8 antenna_rx);
+ void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
+ void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
+ void (*do_iqk)(struct rtw_dev *rtwdev);
+};
+
+#define RTW_PWR_POLLING_CNT 20000
+
+#define RTW_PWR_CMD_READ 0x00
+#define RTW_PWR_CMD_WRITE 0x01
+#define RTW_PWR_CMD_POLLING 0x02
+#define RTW_PWR_CMD_DELAY 0x03
+#define RTW_PWR_CMD_END 0x04
+
+/* define the base address of each block */
+#define RTW_PWR_ADDR_MAC 0x00
+#define RTW_PWR_ADDR_USB 0x01
+#define RTW_PWR_ADDR_PCIE 0x02
+#define RTW_PWR_ADDR_SDIO 0x03
+
+#define RTW_PWR_INTF_SDIO_MSK BIT(0)
+#define RTW_PWR_INTF_USB_MSK BIT(1)
+#define RTW_PWR_INTF_PCI_MSK BIT(2)
+#define RTW_PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define RTW_PWR_CUT_A_MSK BIT(1)
+#define RTW_PWR_CUT_B_MSK BIT(2)
+#define RTW_PWR_CUT_C_MSK BIT(3)
+#define RTW_PWR_CUT_D_MSK BIT(4)
+#define RTW_PWR_CUT_E_MSK BIT(5)
+#define RTW_PWR_CUT_F_MSK BIT(6)
+#define RTW_PWR_CUT_G_MSK BIT(7)
+#define RTW_PWR_CUT_ALL_MSK 0xFF
+
+enum rtw_pwr_seq_cmd_delay_unit {
+ RTW_PWR_DELAY_US,
+ RTW_PWR_DELAY_MS,
+};
+
+struct rtw_pwr_seq_cmd {
+ u16 offset;
+ u8 cut_mask;
+ u8 intf_mask;
+ u8 base:4;
+ u8 cmd:4;
+ u8 mask;
+ u8 value;
+};
+
+enum rtw_chip_ver {
+ RTW_CHIP_VER_CUT_A = 0x00,
+ RTW_CHIP_VER_CUT_B = 0x01,
+ RTW_CHIP_VER_CUT_C = 0x02,
+ RTW_CHIP_VER_CUT_D = 0x03,
+ RTW_CHIP_VER_CUT_E = 0x04,
+ RTW_CHIP_VER_CUT_F = 0x05,
+ RTW_CHIP_VER_CUT_G = 0x06,
+};
+
+#define RTW_INTF_PHY_PLATFORM_ALL 0
+
+enum rtw_intf_phy_cut {
+ RTW_INTF_PHY_CUT_A = BIT(0),
+ RTW_INTF_PHY_CUT_B = BIT(1),
+ RTW_INTF_PHY_CUT_C = BIT(2),
+ RTW_INTF_PHY_CUT_D = BIT(3),
+ RTW_INTF_PHY_CUT_E = BIT(4),
+ RTW_INTF_PHY_CUT_F = BIT(5),
+ RTW_INTF_PHY_CUT_G = BIT(6),
+ RTW_INTF_PHY_CUT_ALL = 0xFFFF,
+};
+
+enum rtw_ip_sel {
+ RTW_IP_SEL_PHY = 0,
+ RTW_IP_SEL_MAC = 1,
+ RTW_IP_SEL_DBI = 2,
+
+ RTW_IP_SEL_UNDEF = 0xFFFF
+};
+
+enum rtw_pq_map_id {
+ RTW_PQ_MAP_VO = 0x0,
+ RTW_PQ_MAP_VI = 0x1,
+ RTW_PQ_MAP_BE = 0x2,
+ RTW_PQ_MAP_BK = 0x3,
+ RTW_PQ_MAP_MG = 0x4,
+ RTW_PQ_MAP_HI = 0x5,
+ RTW_PQ_MAP_NUM = 0x6,
+
+ RTW_PQ_MAP_UNDEF,
+};
+
+enum rtw_dma_mapping {
+ RTW_DMA_MAPPING_EXTRA = 0,
+ RTW_DMA_MAPPING_LOW = 1,
+ RTW_DMA_MAPPING_NORMAL = 2,
+ RTW_DMA_MAPPING_HIGH = 3,
+
+ RTW_DMA_MAPPING_UNDEF,
+};
+
+struct rtw_rqpn {
+ enum rtw_dma_mapping dma_map_vo;
+ enum rtw_dma_mapping dma_map_vi;
+ enum rtw_dma_mapping dma_map_be;
+ enum rtw_dma_mapping dma_map_bk;
+ enum rtw_dma_mapping dma_map_mg;
+ enum rtw_dma_mapping dma_map_hi;
+};
+
+struct rtw_page_table {
+ u16 hq_num;
+ u16 nq_num;
+ u16 lq_num;
+ u16 exq_num;
+ u16 gapq_num;
+};
+
+struct rtw_intf_phy_para {
+ u16 offset;
+ u16 value;
+ u16 ip_sel;
+ u16 cut_mask;
+ u16 platform;
+};
+
+struct rtw_intf_phy_para_table {
+ struct rtw_intf_phy_para *usb2_para;
+ struct rtw_intf_phy_para *usb3_para;
+ struct rtw_intf_phy_para *gen1_para;
+ struct rtw_intf_phy_para *gen2_para;
+ u8 n_usb2_para;
+ u8 n_usb3_para;
+ u8 n_gen1_para;
+ u8 n_gen2_para;
+};
+
+struct rtw_table {
+ const void *data;
+ const u32 size;
+ void (*parse)(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
+ void (*do_cfg)(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+ enum rtw_rf_path rf_path;
+};
+
+static inline void rtw_load_table(struct rtw_dev *rtwdev,
+ const struct rtw_table *tbl)
+{
+ (*tbl->parse)(rtwdev, tbl);
+}
+
+enum rtw_rfe_fem {
+ RTW_RFE_IFEM,
+ RTW_RFE_EFEM,
+ RTW_RFE_IFEM2G_EFEM5G,
+ RTW_RFE_NUM,
+};
+
+struct rtw_rfe_def {
+ const struct rtw_table *phy_pg_tbl;
+ const struct rtw_table *txpwr_lmt_tbl;
+};
+
+#define RTW_DEF_RFE(chip, bb_pg, pwrlmt) { \
+ .phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \
+ .txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \
+ }
+
+/* hardware configuration for each IC */
+struct rtw_chip_info {
+ struct rtw_chip_ops *ops;
+ u8 id;
+
+ const char *fw_name;
+ u8 tx_pkt_desc_sz;
+ u8 tx_buf_desc_sz;
+ u8 rx_pkt_desc_sz;
+ u8 rx_buf_desc_sz;
+ u32 phy_efuse_size;
+ u32 log_efuse_size;
+ u32 ptct_efuse_size;
+ u32 txff_size;
+ u32 rxff_size;
+ u8 band;
+ u8 page_size;
+ u8 csi_buf_pg_num;
+ u8 dig_max;
+ u8 dig_min;
+ u8 txgi_factor;
+ bool is_pwr_by_rate_dec;
+ u8 max_power_index;
+
+ bool ht_supported;
+ bool vht_supported;
+
+ /* init values */
+ u8 sys_func_en;
+ struct rtw_pwr_seq_cmd **pwr_on_seq;
+ struct rtw_pwr_seq_cmd **pwr_off_seq;
+ struct rtw_rqpn *rqpn_table;
+ struct rtw_page_table *page_table;
+ struct rtw_intf_phy_para_table *intf_table;
+
+ struct rtw_hw_reg *dig;
+ u32 rf_base_addr[2];
+ u32 rf_sipi_addr[2];
+
+ const struct rtw_table *mac_tbl;
+ const struct rtw_table *agc_tbl;
+ const struct rtw_table *bb_tbl;
+ const struct rtw_table *rf_tbl[RTW_RF_PATH_MAX];
+ const struct rtw_table *rfk_init_tbl;
+
+ const struct rtw_rfe_def *rfe_defs;
+ u32 rfe_defs_size;
+};
+
+struct rtw_dm_info {
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+ u32 total_fa_cnt;
+ u8 min_rssi;
+ u8 pre_min_rssi;
+ u16 fa_history[4];
+ u8 igi_history[4];
+ u8 igi_bitmap;
+ bool damping;
+ u8 damping_cnt;
+ u8 damping_rssi;
+
+ u8 cck_gi_u_bnd;
+ u8 cck_gi_l_bnd;
+};
+
+struct rtw_efuse {
+ u32 size;
+ u32 physical_size;
+ u32 logical_size;
+ u32 protect_size;
+
+ u8 addr[ETH_ALEN];
+ u8 channel_plan;
+ u8 country_code[2];
+ u8 rfe_option;
+ u8 thermal_meter;
+ u8 crystal_cap;
+ u8 ant_div_cfg;
+ u8 ant_div_type;
+ u8 regd;
+
+ u8 lna_type_2g;
+ u8 lna_type_5g;
+ u8 glna_type;
+ u8 alna_type;
+ bool ext_lna_2g;
+ bool ext_lna_5g;
+ u8 pa_type_2g;
+ u8 pa_type_5g;
+ u8 gpa_type;
+ u8 apa_type;
+ bool ext_pa_2g;
+ bool ext_pa_5g;
+
+ bool btcoex;
+ /* bt share antenna with wifi */
+ bool share_ant;
+ u8 bt_setting;
+
+ struct {
+ u8 hci;
+ u8 bw;
+ u8 ptcl;
+ u8 nss;
+ u8 ant_num;
+ } hw_cap;
+
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+};
+
+struct rtw_phy_cond {
+#ifdef __LITTLE_ENDIAN
+ u32 rfe:8;
+ u32 intf:4;
+ u32 pkg:4;
+ u32 plat:4;
+ u32 intf_rsvd:4;
+ u32 cut:4;
+ u32 branch:2;
+ u32 neg:1;
+ u32 pos:1;
+#else
+ u32 pos:1;
+ u32 neg:1;
+ u32 branch:2;
+ u32 cut:4;
+ u32 intf_rsvd:4;
+ u32 plat:4;
+ u32 pkg:4;
+ u32 intf:4;
+ u32 rfe:8;
+#endif
+ /* for intf:4 */
+ #define INTF_PCIE BIT(0)
+ #define INTF_USB BIT(1)
+ #define INTF_SDIO BIT(2)
+ /* for branch:2 */
+ #define BRANCH_IF 0
+ #define BRANCH_ELIF 1
+ #define BRANCH_ELSE 2
+ #define BRANCH_ENDIF 3
+};
+
+struct rtw_fifo_conf {
+ /* tx fifo information */
+ u16 rsvd_boundary;
+ u16 rsvd_pg_num;
+ u16 rsvd_drv_pg_num;
+ u16 txff_pg_num;
+ u16 acq_pg_num;
+ u16 rsvd_drv_addr;
+ u16 rsvd_h2c_info_addr;
+ u16 rsvd_h2c_sta_info_addr;
+ u16 rsvd_h2cq_addr;
+ u16 rsvd_cpu_instr_addr;
+ u16 rsvd_fw_txbuf_addr;
+ u16 rsvd_csibuf_addr;
+ enum rtw_dma_mapping pq_map[RTW_PQ_MAP_NUM];
+};
+
+struct rtw_fw_state {
+ const struct firmware *firmware;
+ struct completion completion;
+ u16 version;
+ u8 sub_version;
+ u8 sub_index;
+ u16 h2c_version;
+};
+
+struct rtw_hal {
+ u32 rcr;
+
+ u32 chip_version;
+ u8 fab_version;
+ u8 cut_version;
+ u8 mp_chip;
+ u8 oem_id;
+ struct rtw_phy_cond phy_cond;
+
+ u8 ps_mode;
+ u8 current_channel;
+ u8 current_band_width;
+ u8 current_band_type;
+ u8 sec_ch_offset;
+ u8 rf_type;
+ u8 rf_path_num;
+ u8 antenna_tx;
+ u8 antenna_rx;
+
+ /* protect tx power section */
+ struct mutex tx_power_mutex;
+ s8 tx_pwr_by_rate_offset_2g[RTW_RF_PATH_MAX]
+ [DESC_RATE_MAX];
+ s8 tx_pwr_by_rate_offset_5g[RTW_RF_PATH_MAX]
+ [DESC_RATE_MAX];
+ s8 tx_pwr_by_rate_base_2g[RTW_RF_PATH_MAX]
+ [RTW_RATE_SECTION_MAX];
+ s8 tx_pwr_by_rate_base_5g[RTW_RF_PATH_MAX]
+ [RTW_RATE_SECTION_MAX];
+ s8 tx_pwr_limit_2g[RTW_REGD_MAX]
+ [RTW_CHANNEL_WIDTH_MAX]
+ [RTW_RATE_SECTION_MAX]
+ [RTW_MAX_CHANNEL_NUM_2G];
+ s8 tx_pwr_limit_5g[RTW_REGD_MAX]
+ [RTW_CHANNEL_WIDTH_MAX]
+ [RTW_RATE_SECTION_MAX]
+ [RTW_MAX_CHANNEL_NUM_5G];
+ s8 tx_pwr_tbl[RTW_RF_PATH_MAX]
+ [DESC_RATE_MAX];
+};
+
+struct rtw_dev {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+
+ struct rtw_hci hci;
+
+ struct rtw_chip_info *chip;
+ struct rtw_hal hal;
+ struct rtw_fifo_conf fifo;
+ struct rtw_fw_state fw;
+ struct rtw_efuse efuse;
+ struct rtw_sec_desc sec;
+ struct rtw_traffic_stats stats;
+ struct rtw_regulatory regd;
+
+ struct rtw_dm_info dm_info;
+
+ /* ensures exclusive access from mac80211 callbacks */
+ struct mutex mutex;
+
+ /* lock for dm to use */
+ spinlock_t dm_lock;
+
+ /* read/write rf register */
+ spinlock_t rf_lock;
+
+ /* watch dog every 2 sec */
+ struct delayed_work watch_dog_work;
+ u32 watch_dog_cnt;
+
+ struct list_head rsvd_page_list;
+
+ /* c2h cmd queue & handler work */
+ struct sk_buff_head c2h_queue;
+ struct work_struct c2h_work;
+
+ struct rtw_tx_report tx_report;
+
+ struct {
+ /* incicate the mail box to use with fw */
+ u8 last_box_num;
+ /* protect to send h2c to fw */
+ spinlock_t lock;
+ u32 seq;
+ } h2c;
+
+ /* lps power state & handler work */
+ struct rtw_lps_conf lps_conf;
+ struct delayed_work lps_work;
+
+ struct dentry *debugfs;
+
+ u8 sta_cnt;
+
+ DECLARE_BITMAP(mac_id_map, RTW_MAX_MAC_ID_NUM);
+ DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS);
+
+ u8 mp_mode;
+
+ /* hci related data, must be last */
+ u8 priv[0] __aligned(sizeof(void *));
+};
+
+#include "hci.h"
+
+static inline bool rtw_flag_check(struct rtw_dev *rtwdev, enum rtw_flags flag)
+{
+ return test_bit(flag, rtwdev->flags);
+}
+
+static inline void rtw_flag_clear(struct rtw_dev *rtwdev, enum rtw_flags flag)
+{
+ clear_bit(flag, rtwdev->flags);
+}
+
+static inline void rtw_flag_set(struct rtw_dev *rtwdev, enum rtw_flags flag)
+{
+ set_bit(flag, rtwdev->flags);
+}
+
+void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
+ struct rtw_channel_params *ch_param);
+bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
+bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val);
+bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value);
+void rtw_restore_reg(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *bckp, u32 num);
+void rtw_set_channel(struct rtw_dev *rtwdev);
+void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
+ u32 config);
+void rtw_tx_report_purge_timer(struct timer_list *t);
+void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+int rtw_core_start(struct rtw_dev *rtwdev);
+void rtw_core_stop(struct rtw_dev *rtwdev);
+int rtw_chip_info_setup(struct rtw_dev *rtwdev);
+int rtw_core_init(struct rtw_dev *rtwdev);
+void rtw_core_deinit(struct rtw_dev *rtwdev);
+int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
+void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
new file mode 100644
index 000000000000..cfe05ba7280d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "main.h"
+#include "pci.h"
+#include "tx.h"
+#include "rx.h"
+#include "debug.h"
+
+static u32 rtw_pci_tx_queue_idx_addr[] = {
+ [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
+ [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
+ [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
+ [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,
+ [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,
+ [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,
+ [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
+};
+
+static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
+{
+ switch (queue) {
+ case RTW_TX_QUEUE_BCN:
+ return TX_DESC_QSEL_BEACON;
+ case RTW_TX_QUEUE_H2C:
+ return TX_DESC_QSEL_H2C;
+ case RTW_TX_QUEUE_MGMT:
+ return TX_DESC_QSEL_MGMT;
+ case RTW_TX_QUEUE_HI0:
+ return TX_DESC_QSEL_HIGH;
+ default:
+ return skb->priority;
+ }
+};
+
+static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ return readb(rtwpci->mmap + addr);
+}
+
+static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ return readw(rtwpci->mmap + addr);
+}
+
+static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ return readl(rtwpci->mmap + addr);
+}
+
+static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ writeb(val, rtwpci->mmap + addr);
+}
+
+static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ writew(val, rtwpci->mmap + addr);
+}
+
+static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ writel(val, rtwpci->mmap + addr);
+}
+
+static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
+{
+ int offset = tx_ring->r.desc_size * idx;
+
+ return tx_ring->r.head + offset;
+}
+
+static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
+ struct rtw_pci_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ struct rtw_pci_tx_data *tx_data;
+ struct sk_buff *skb, *tmp;
+ dma_addr_t dma;
+ u8 *head = tx_ring->r.head;
+ u32 len = tx_ring->r.len;
+ int ring_sz = len * tx_ring->r.desc_size;
+
+ /* free every skb remained in tx list */
+ skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
+ __skb_unlink(skb, &tx_ring->queue);
+ tx_data = rtw_pci_get_tx_data(skb);
+ dma = tx_data->dma;
+
+ pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* free the ring itself */
+ pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
+ tx_ring->r.head = NULL;
+}
+
+static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
+ struct rtw_pci_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u8 *head = rx_ring->r.head;
+ int buf_sz = RTK_PCI_RX_BUF_SIZE;
+ int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
+ int i;
+
+ for (i = 0; i < rx_ring->r.len; i++) {
+ skb = rx_ring->buf[i];
+ if (!skb)
+ continue;
+
+ dma = *((dma_addr_t *)skb->cb);
+ pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ rx_ring->buf[i] = NULL;
+ }
+
+ pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
+}
+
+static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *tx_ring;
+ struct rtw_pci_rx_ring *rx_ring;
+ int i;
+
+ for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ rtw_pci_free_tx_ring(rtwdev, tx_ring);
+ }
+
+ for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
+ rx_ring = &rtwpci->rx_rings[i];
+ rtw_pci_free_rx_ring(rtwdev, rx_ring);
+ }
+}
+
+static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
+ struct rtw_pci_tx_ring *tx_ring,
+ u8 desc_size, u32 len)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ int ring_sz = desc_size * len;
+ dma_addr_t dma;
+ u8 *head;
+
+ head = pci_zalloc_consistent(pdev, ring_sz, &dma);
+ if (!head) {
+ rtw_err(rtwdev, "failed to allocate tx ring\n");
+ return -ENOMEM;
+ }
+
+ skb_queue_head_init(&tx_ring->queue);
+ tx_ring->r.head = head;
+ tx_ring->r.dma = dma;
+ tx_ring->r.len = len;
+ tx_ring->r.desc_size = desc_size;
+ tx_ring->r.wp = 0;
+ tx_ring->r.rp = 0;
+
+ return 0;
+}
+
+static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct rtw_pci_rx_ring *rx_ring,
+ u32 idx, u32 desc_sz)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ struct rtw_pci_rx_buffer_desc *buf_desc;
+ int buf_sz = RTK_PCI_RX_BUF_SIZE;
+ dma_addr_t dma;
+
+ if (!skb)
+ return -EINVAL;
+
+ dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, dma))
+ return -EBUSY;
+
+ *((dma_addr_t *)skb->cb) = dma;
+ buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
+ idx * desc_sz);
+ memset(buf_desc, 0, sizeof(*buf_desc));
+ buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
+ buf_desc->dma = cpu_to_le32(dma);
+
+ return 0;
+}
+
+static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
+ struct rtw_pci_rx_ring *rx_ring,
+ u8 desc_size, u32 len)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ struct sk_buff *skb = NULL;
+ dma_addr_t dma;
+ u8 *head;
+ int ring_sz = desc_size * len;
+ int buf_sz = RTK_PCI_RX_BUF_SIZE;
+ int i, allocated;
+ int ret = 0;
+
+ head = pci_zalloc_consistent(pdev, ring_sz, &dma);
+ if (!head) {
+ rtw_err(rtwdev, "failed to allocate rx ring\n");
+ return -ENOMEM;
+ }
+ rx_ring->r.head = head;
+
+ for (i = 0; i < len; i++) {
+ skb = dev_alloc_skb(buf_sz);
+ if (!skb) {
+ allocated = i;
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skb->data, 0, buf_sz);
+ rx_ring->buf[i] = skb;
+ ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
+ if (ret) {
+ allocated = i;
+ dev_kfree_skb_any(skb);
+ goto err_out;
+ }
+ }
+
+ rx_ring->r.dma = dma;
+ rx_ring->r.len = len;
+ rx_ring->r.desc_size = desc_size;
+ rx_ring->r.wp = 0;
+ rx_ring->r.rp = 0;
+
+ return 0;
+
+err_out:
+ for (i = 0; i < allocated; i++) {
+ skb = rx_ring->buf[i];
+ if (!skb)
+ continue;
+ dma = *((dma_addr_t *)skb->cb);
+ pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ rx_ring->buf[i] = NULL;
+ }
+ pci_free_consistent(pdev, ring_sz, head, dma);
+
+ rtw_err(rtwdev, "failed to init rx buffer\n");
+
+ return ret;
+}
+
+static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *tx_ring;
+ struct rtw_pci_rx_ring *rx_ring;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
+ int tx_desc_size, rx_desc_size;
+ u32 len;
+ int ret;
+
+ tx_desc_size = chip->tx_buf_desc_sz;
+
+ for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ len = max_num_of_tx_queue(i);
+ ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
+ if (ret)
+ goto out;
+ }
+
+ rx_desc_size = chip->rx_buf_desc_sz;
+
+ for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
+ rx_ring = &rtwpci->rx_rings[j];
+ ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
+ RTK_MAX_RX_DESC_NUM);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ tx_alloced = i;
+ for (i = 0; i < tx_alloced; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ rtw_pci_free_tx_ring(rtwdev, tx_ring);
+ }
+
+ rx_alloced = j;
+ for (j = 0; j < rx_alloced; j++) {
+ rx_ring = &rtwpci->rx_rings[j];
+ rtw_pci_free_rx_ring(rtwdev, rx_ring);
+ }
+
+ return ret;
+}
+
+static void rtw_pci_deinit(struct rtw_dev *rtwdev)
+{
+ rtw_pci_free_trx_ring(rtwdev);
+}
+
+static int rtw_pci_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ int ret = 0;
+
+ rtwpci->irq_mask[0] = IMR_HIGHDOK |
+ IMR_MGNTDOK |
+ IMR_BKDOK |
+ IMR_BEDOK |
+ IMR_VIDOK |
+ IMR_VODOK |
+ IMR_ROK |
+ IMR_BCNDMAINT_E |
+ 0;
+ rtwpci->irq_mask[1] = IMR_TXFOVW |
+ 0;
+ rtwpci->irq_mask[3] = IMR_H2CDOK |
+ 0;
+ spin_lock_init(&rtwpci->irq_lock);
+ ret = rtw_pci_init_trx_ring(rtwdev);
+
+ return ret;
+}
+
+static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ u32 len;
+ u8 tmp;
+ dma_addr_t dma;
+
+ tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
+ rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
+
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
+
+ len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
+ dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
+ rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
+ rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff);
+ rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
+
+ /* reset read/write point */
+ rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
+
+ /* rest H2C Queue index */
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
+}
+
+static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
+{
+ rtw_pci_reset_buf_desc(rtwdev);
+}
+
+static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
+ struct rtw_pci *rtwpci)
+{
+ rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
+ rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
+ rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
+ rtwpci->irq_enabled = true;
+}
+
+static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
+ struct rtw_pci *rtwpci)
+{
+ rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
+ rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
+ rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
+ rtwpci->irq_enabled = false;
+}
+
+static int rtw_pci_setup(struct rtw_dev *rtwdev)
+{
+ rtw_pci_reset_trx_ring(rtwdev);
+
+ return 0;
+}
+
+static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
+{
+ /* reset dma and rx tag */
+ rtw_write32_set(rtwdev, RTK_PCI_CTRL,
+ BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
+ rtwpci->rx_tag = 0;
+}
+
+static int rtw_pci_start(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ rtw_pci_dma_reset(rtwdev, rtwpci);
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw_pci_enable_interrupt(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+
+ return 0;
+}
+
+static void rtw_pci_stop(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw_pci_disable_interrupt(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
+static u8 ac_to_hwq[] = {
+ [0] = RTW_TX_QUEUE_VO,
+ [1] = RTW_TX_QUEUE_VI,
+ [2] = RTW_TX_QUEUE_BE,
+ [3] = RTW_TX_QUEUE_BK,
+};
+
+static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 fc = hdr->frame_control;
+ u8 q_mapping = skb_get_queue_mapping(skb);
+ u8 queue;
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ queue = RTW_TX_QUEUE_BCN;
+ else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
+ queue = RTW_TX_QUEUE_MGMT;
+ else
+ queue = ac_to_hwq[q_mapping];
+
+ return queue;
+}
+
+static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
+ struct rtw_pci_tx_ring *ring)
+{
+ struct sk_buff *prev = skb_dequeue(&ring->queue);
+ struct rtw_pci_tx_data *tx_data;
+ dma_addr_t dma;
+
+ if (!prev)
+ return;
+
+ tx_data = rtw_pci_get_tx_data(prev);
+ dma = tx_data->dma;
+ pci_unmap_single(rtwpci->pdev, dma, prev->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(prev);
+}
+
+static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
+ struct rtw_pci_rx_ring *rx_ring,
+ u32 idx)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pci_rx_buffer_desc *buf_desc;
+ u32 desc_sz = chip->rx_buf_desc_sz;
+ u16 total_pkt_size;
+
+ buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
+ idx * desc_sz);
+ total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
+
+ /* rx tag mismatch, throw a warning */
+ if (total_pkt_size != rtwpci->rx_tag)
+ rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
+
+ rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
+}
+
+static int rtw_pci_xmit(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb, u8 queue)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pci_tx_ring *ring;
+ struct rtw_pci_tx_data *tx_data;
+ dma_addr_t dma;
+ u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
+ u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
+ u32 size;
+ u32 psb_len;
+ u8 *pkt_desc;
+ struct rtw_pci_tx_buffer_desc *buf_desc;
+ u32 bd_idx;
+
+ ring = &rtwpci->tx_rings[queue];
+
+ size = skb->len;
+
+ if (queue == RTW_TX_QUEUE_BCN)
+ rtw_pci_release_rsvd_page(rtwpci, ring);
+ else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
+ return -ENOSPC;
+
+ pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
+ memset(pkt_desc, 0, tx_pkt_desc_sz);
+ pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
+ rtw_tx_fill_tx_desc(pkt_info, skb);
+ dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(rtwpci->pdev, dma))
+ return -EBUSY;
+
+ /* after this we got dma mapped, there is no way back */
+ buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
+ memset(buf_desc, 0, tx_buf_desc_sz);
+ psb_len = (skb->len - 1) / 128 + 1;
+ if (queue == RTW_TX_QUEUE_BCN)
+ psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
+
+ buf_desc[0].psb_len = cpu_to_le16(psb_len);
+ buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
+ buf_desc[0].dma = cpu_to_le32(dma);
+ buf_desc[1].buf_size = cpu_to_le16(size);
+ buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
+
+ tx_data = rtw_pci_get_tx_data(skb);
+ tx_data->dma = dma;
+ tx_data->sn = pkt_info->sn;
+ skb_queue_tail(&ring->queue, skb);
+
+ /* kick off tx queue */
+ if (queue != RTW_TX_QUEUE_BCN) {
+ if (++ring->r.wp >= ring->r.len)
+ ring->r.wp = 0;
+ bd_idx = rtw_pci_tx_queue_idx_addr[queue];
+ rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
+ } else {
+ u32 reg_bcn_work;
+
+ reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
+ reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
+ rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
+ }
+
+ return 0;
+}
+
+static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
+ u32 size)
+{
+ struct sk_buff *skb;
+ struct rtw_tx_pkt_info pkt_info;
+ u32 tx_pkt_desc_sz;
+ u32 length;
+
+ tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
+ length = size + tx_pkt_desc_sz;
+ skb = dev_alloc_skb(length);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, tx_pkt_desc_sz);
+ memcpy((u8 *)skb_put(skb, size), buf, size);
+ memset(&pkt_info, 0, sizeof(pkt_info));
+ pkt_info.tx_pkt_size = size;
+ pkt_info.offset = tx_pkt_desc_sz;
+
+ return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
+}
+
+static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
+{
+ struct sk_buff *skb;
+ struct rtw_tx_pkt_info pkt_info;
+ u32 tx_pkt_desc_sz;
+ u32 length;
+
+ tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
+ length = size + tx_pkt_desc_sz;
+ skb = dev_alloc_skb(length);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, tx_pkt_desc_sz);
+ memcpy((u8 *)skb_put(skb, size), buf, size);
+ memset(&pkt_info, 0, sizeof(pkt_info));
+ pkt_info.tx_pkt_size = size;
+
+ return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
+}
+
+static int rtw_pci_tx(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *ring;
+ u8 queue = rtw_hw_queue_mapping(skb);
+ int ret;
+
+ ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
+ if (ret)
+ return ret;
+
+ ring = &rtwpci->tx_rings[queue];
+ if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
+ ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
+ ring->queue_stopped = true;
+ }
+
+ return 0;
+}
+
+static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
+ u8 hw_queue)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct ieee80211_tx_info *info;
+ struct rtw_pci_tx_ring *ring;
+ struct rtw_pci_tx_data *tx_data;
+ struct sk_buff *skb;
+ u32 count;
+ u32 bd_idx_addr;
+ u32 bd_idx, cur_rp;
+ u16 q_map;
+
+ ring = &rtwpci->tx_rings[hw_queue];
+
+ bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
+ bd_idx = rtw_read32(rtwdev, bd_idx_addr);
+ cur_rp = bd_idx >> 16;
+ cur_rp &= 0xfff;
+ if (cur_rp >= ring->r.rp)
+ count = cur_rp - ring->r.rp;
+ else
+ count = ring->r.len - (ring->r.rp - cur_rp);
+
+ while (count--) {
+ skb = skb_dequeue(&ring->queue);
+ tx_data = rtw_pci_get_tx_data(skb);
+ pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* just free command packets from host to card */
+ if (hw_queue == RTW_TX_QUEUE_H2C) {
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+
+ if (ring->queue_stopped &&
+ avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
+ q_map = skb_get_queue_mapping(skb);
+ ieee80211_wake_queue(hw, q_map);
+ ring->queue_stopped = false;
+ }
+
+ skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
+
+ info = IEEE80211_SKB_CB(skb);
+
+ /* enqueue to wait for tx report */
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
+ continue;
+ }
+
+ /* always ACK for others, then they won't be marked as drop */
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_info_clear_status(info);
+ ieee80211_tx_status_irqsafe(hw, skb);
+ }
+
+ ring->r.rp = cur_rp;
+}
+
+static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
+ u8 hw_queue)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pci_rx_ring *ring;
+ struct rtw_rx_pkt_stat pkt_stat;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb, *new;
+ u32 cur_wp, cur_rp, tmp;
+ u32 count;
+ u32 pkt_offset;
+ u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
+ u32 buf_desc_sz = chip->rx_buf_desc_sz;
+ u8 *rx_desc;
+ dma_addr_t dma;
+
+ ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
+
+ tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
+ cur_wp = tmp >> 16;
+ cur_wp &= 0xfff;
+ if (cur_wp >= ring->r.wp)
+ count = cur_wp - ring->r.wp;
+ else
+ count = ring->r.len - (ring->r.wp - cur_wp);
+
+ cur_rp = ring->r.rp;
+ while (count--) {
+ rtw_pci_dma_check(rtwdev, ring, cur_rp);
+ skb = ring->buf[cur_rp];
+ dma = *((dma_addr_t *)skb->cb);
+ pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ rx_desc = skb->data;
+ chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
+
+ /* offset from rx_desc to payload */
+ pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
+ pkt_stat.shift;
+
+ if (pkt_stat.is_c2h) {
+ /* keep rx_desc, halmac needs it */
+ skb_put(skb, pkt_stat.pkt_len + pkt_offset);
+
+ /* pass offset for further operation */
+ *((u32 *)skb->cb) = pkt_offset;
+ skb_queue_tail(&rtwdev->c2h_queue, skb);
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+ } else {
+ /* remove rx_desc, maybe use skb_pull? */
+ skb_put(skb, pkt_stat.pkt_len);
+ skb_reserve(skb, pkt_offset);
+
+ /* alloc a smaller skb to mac80211 */
+ new = dev_alloc_skb(pkt_stat.pkt_len);
+ if (!new) {
+ new = skb;
+ } else {
+ skb_put_data(new, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+ /* TODO: merge into rx.c */
+ rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
+ memcpy(new->cb, &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(rtwdev->hw, new);
+ }
+
+ /* skb delivered to mac80211, alloc a new one in rx ring */
+ new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE);
+ if (WARN(!new, "rx routine starvation\n"))
+ return;
+
+ ring->buf[cur_rp] = new;
+ rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz);
+
+ /* host read next element in ring */
+ if (++cur_rp >= ring->r.len)
+ cur_rp = 0;
+ }
+
+ ring->r.rp = cur_rp;
+ ring->r.wp = cur_wp;
+ rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
+}
+
+static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
+ struct rtw_pci *rtwpci, u32 *irq_status)
+{
+ irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
+ irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
+ irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
+ irq_status[0] &= rtwpci->irq_mask[0];
+ irq_status[1] &= rtwpci->irq_mask[1];
+ irq_status[3] &= rtwpci->irq_mask[3];
+ rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
+ rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
+ rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
+}
+
+static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
+{
+ struct rtw_dev *rtwdev = dev;
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ u32 irq_status[4];
+
+ spin_lock(&rtwpci->irq_lock);
+ if (!rtwpci->irq_enabled)
+ goto out;
+
+ rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
+
+ if (irq_status[0] & IMR_MGNTDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
+ if (irq_status[0] & IMR_HIGHDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
+ if (irq_status[0] & IMR_BEDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
+ if (irq_status[0] & IMR_BKDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
+ if (irq_status[0] & IMR_VODOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
+ if (irq_status[0] & IMR_VIDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
+ if (irq_status[3] & IMR_H2CDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
+ if (irq_status[0] & IMR_ROK)
+ rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
+
+out:
+ spin_unlock(&rtwpci->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long len;
+ u8 bar_id = 2;
+ int ret;
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ rtw_err(rtwdev, "failed to request pci regions\n");
+ return ret;
+ }
+
+ len = pci_resource_len(pdev, bar_id);
+ rtwpci->mmap = pci_iomap(pdev, bar_id, len);
+ if (!rtwpci->mmap) {
+ rtw_err(rtwdev, "failed to map pci memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ if (rtwpci->mmap) {
+ pci_iounmap(pdev, rtwpci->mmap);
+ pci_release_regions(pdev);
+ }
+}
+
+static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
+{
+ u16 write_addr;
+ u16 remainder = addr & 0x3;
+ u8 flag;
+ u8 cnt = 20;
+
+ write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
+ rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
+ rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
+ rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
+
+ flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
+ while (flag && (cnt != 0)) {
+ udelay(10);
+ flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
+ cnt--;
+ }
+
+ WARN(flag, "DBI write fail\n");
+}
+
+static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
+{
+ u8 page;
+ u8 wflag;
+ u8 cnt;
+
+ rtw_write16(rtwdev, REG_MDIO_V1, data);
+
+ page = addr < 0x20 ? 0 : 1;
+ page += g1 ? 0 : 2;
+ rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
+ rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
+
+ rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
+ wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
+
+ cnt = 20;
+ while (wflag && (cnt != 0)) {
+ udelay(10);
+ wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
+ BIT_MDIO_WFLAG_V1);
+ cnt--;
+ }
+
+ WARN(wflag, "MDIO write fail\n");
+}
+
+static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_intf_phy_para *para;
+ u16 cut;
+ u16 value;
+ u16 offset;
+ u16 ip_sel;
+ int i;
+
+ cut = BIT(0) << rtwdev->hal.cut_version;
+
+ for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
+ para = &chip->intf_table->gen1_para[i];
+ if (!(para->cut_mask & cut))
+ continue;
+ if (para->offset == 0xffff)
+ break;
+ offset = para->offset;
+ value = para->value;
+ ip_sel = para->ip_sel;
+ if (para->ip_sel == RTW_IP_SEL_PHY)
+ rtw_mdio_write(rtwdev, offset, value, true);
+ else
+ rtw_dbi_write8(rtwdev, offset, value);
+ }
+
+ for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
+ para = &chip->intf_table->gen2_para[i];
+ if (!(para->cut_mask & cut))
+ continue;
+ if (para->offset == 0xffff)
+ break;
+ offset = para->offset;
+ value = para->value;
+ ip_sel = para->ip_sel;
+ if (para->ip_sel == RTW_IP_SEL_PHY)
+ rtw_mdio_write(rtwdev, offset, value, false);
+ else
+ rtw_dbi_write8(rtwdev, offset, value);
+ }
+}
+
+static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to enable pci device\n");
+ return ret;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, rtwdev->hw);
+ SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
+
+ return 0;
+}
+
+static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
+{
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
+{
+ struct rtw_pci *rtwpci;
+ int ret;
+
+ rtwpci = (struct rtw_pci *)rtwdev->priv;
+ rtwpci->pdev = pdev;
+
+ /* after this driver can access to hw registers */
+ ret = rtw_pci_io_mapping(rtwdev, pdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to request pci io region\n");
+ goto err_out;
+ }
+
+ ret = rtw_pci_init(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to allocate pci resources\n");
+ goto err_io_unmap;
+ }
+
+ rtw_pci_phy_cfg(rtwdev);
+
+ return 0;
+
+err_io_unmap:
+ rtw_pci_io_unmapping(rtwdev, pdev);
+
+err_out:
+ return ret;
+}
+
+static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
+{
+ rtw_pci_deinit(rtwdev);
+ rtw_pci_io_unmapping(rtwdev, pdev);
+}
+
+static struct rtw_hci_ops rtw_pci_ops = {
+ .tx = rtw_pci_tx,
+ .setup = rtw_pci_setup,
+ .start = rtw_pci_start,
+ .stop = rtw_pci_stop,
+
+ .read8 = rtw_pci_read8,
+ .read16 = rtw_pci_read16,
+ .read32 = rtw_pci_read32,
+ .write8 = rtw_pci_write8,
+ .write16 = rtw_pci_write16,
+ .write32 = rtw_pci_write32,
+ .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
+ .write_data_h2c = rtw_pci_write_data_h2c,
+};
+
+static int rtw_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct ieee80211_hw *hw;
+ struct rtw_dev *rtwdev;
+ int drv_data_size;
+ int ret;
+
+ drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
+ hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
+ if (!hw) {
+ dev_err(&pdev->dev, "failed to allocate hw\n");
+ return -ENOMEM;
+ }
+
+ rtwdev = hw->priv;
+ rtwdev->hw = hw;
+ rtwdev->dev = &pdev->dev;
+ rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
+ rtwdev->hci.ops = &rtw_pci_ops;
+ rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
+
+ ret = rtw_core_init(rtwdev);
+ if (ret)
+ goto err_release_hw;
+
+ rtw_dbg(rtwdev, RTW_DBG_PCI,
+ "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
+ pdev->vendor, pdev->device, pdev->revision);
+
+ ret = rtw_pci_claim(rtwdev, pdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to claim pci device\n");
+ goto err_deinit_core;
+ }
+
+ ret = rtw_pci_setup_resource(rtwdev, pdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup pci resources\n");
+ goto err_pci_declaim;
+ }
+
+ ret = rtw_chip_info_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup chip information\n");
+ goto err_destroy_pci;
+ }
+
+ ret = rtw_register_hw(rtwdev, hw);
+ if (ret) {
+ rtw_err(rtwdev, "failed to register hw\n");
+ goto err_destroy_pci;
+ }
+
+ ret = request_irq(pdev->irq, &rtw_pci_interrupt_handler,
+ IRQF_SHARED, KBUILD_MODNAME, rtwdev);
+ if (ret) {
+ ieee80211_unregister_hw(hw);
+ goto err_destroy_pci;
+ }
+
+ return 0;
+
+err_destroy_pci:
+ rtw_pci_destroy(rtwdev, pdev);
+
+err_pci_declaim:
+ rtw_pci_declaim(rtwdev, pdev);
+
+err_deinit_core:
+ rtw_core_deinit(rtwdev);
+
+err_release_hw:
+ ieee80211_free_hw(hw);
+
+ return ret;
+}
+
+static void rtw_pci_remove(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtw_dev *rtwdev;
+ struct rtw_pci *rtwpci;
+
+ if (!hw)
+ return;
+
+ rtwdev = hw->priv;
+ rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ rtw_unregister_hw(rtwdev, hw);
+ rtw_pci_disable_interrupt(rtwdev, rtwpci);
+ rtw_pci_destroy(rtwdev, pdev);
+ rtw_pci_declaim(rtwdev, pdev);
+ free_irq(rtwpci->pdev->irq, rtwdev);
+ rtw_core_deinit(rtwdev);
+ ieee80211_free_hw(hw);
+}
+
+static const struct pci_device_id rtw_pci_id_table[] = {
+#ifdef CONFIG_RTW88_8822BE
+ { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
+#endif
+#ifdef CONFIG_RTW88_8822CE
+ { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
+#endif
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
+
+static struct pci_driver rtw_pci_driver = {
+ .name = "rtw_pci",
+ .id_table = rtw_pci_id_table,
+ .probe = rtw_pci_probe,
+ .remove = rtw_pci_remove,
+};
+module_pci_driver(rtw_pci_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
new file mode 100644
index 000000000000..87824a4caba9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTK_PCI_H_
+#define __RTK_PCI_H_
+
+#define RTK_PCI_DEVICE(vend, dev, hw_config) \
+ PCI_DEVICE(vend, dev), \
+ .driver_data = (kernel_ulong_t)&(hw_config),
+
+#define RTK_DEFAULT_TX_DESC_NUM 128
+#define RTK_BEQ_TX_DESC_NUM 256
+
+#define RTK_MAX_RX_DESC_NUM 512
+/* 8K + rx desc size */
+#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
+
+#define RTK_PCI_CTRL 0x300
+#define BIT_RST_TRXDMA_INTF BIT(20)
+#define BIT_RX_TAG_EN BIT(15)
+#define REG_DBI_WDATA_V1 0x03E8
+#define REG_DBI_FLAG_V1 0x03F0
+#define REG_MDIO_V1 0x03F4
+#define REG_PCIE_MIX_CFG 0x03F8
+#define BIT_MDIO_WFLAG_V1 BIT(5)
+
+#define BIT_PCI_BCNQ_FLAG BIT(4)
+#define RTK_PCI_TXBD_DESA_BCNQ 0x308
+#define RTK_PCI_TXBD_DESA_H2CQ 0x1320
+#define RTK_PCI_TXBD_DESA_MGMTQ 0x310
+#define RTK_PCI_TXBD_DESA_BKQ 0x330
+#define RTK_PCI_TXBD_DESA_BEQ 0x328
+#define RTK_PCI_TXBD_DESA_VIQ 0x320
+#define RTK_PCI_TXBD_DESA_VOQ 0x318
+#define RTK_PCI_TXBD_DESA_HI0Q 0x340
+#define RTK_PCI_RXBD_DESA_MPDUQ 0x338
+
+/* BCNQ is specialized for rsvd page, does not need to specify a number */
+#define RTK_PCI_TXBD_NUM_H2CQ 0x1328
+#define RTK_PCI_TXBD_NUM_MGMTQ 0x380
+#define RTK_PCI_TXBD_NUM_BKQ 0x38A
+#define RTK_PCI_TXBD_NUM_BEQ 0x388
+#define RTK_PCI_TXBD_NUM_VIQ 0x386
+#define RTK_PCI_TXBD_NUM_VOQ 0x384
+#define RTK_PCI_TXBD_NUM_HI0Q 0x38C
+#define RTK_PCI_RXBD_NUM_MPDUQ 0x382
+#define RTK_PCI_TXBD_IDX_H2CQ 0x132C
+#define RTK_PCI_TXBD_IDX_MGMTQ 0x3B0
+#define RTK_PCI_TXBD_IDX_BKQ 0x3AC
+#define RTK_PCI_TXBD_IDX_BEQ 0x3A8
+#define RTK_PCI_TXBD_IDX_VIQ 0x3A4
+#define RTK_PCI_TXBD_IDX_VOQ 0x3A0
+#define RTK_PCI_TXBD_IDX_HI0Q 0x3B8
+#define RTK_PCI_RXBD_IDX_MPDUQ 0x3B4
+
+#define RTK_PCI_TXBD_RWPTR_CLR 0x39C
+#define RTK_PCI_TXBD_H2CQ_CSR 0x1330
+
+#define BIT_CLR_H2CQ_HOST_IDX BIT(16)
+#define BIT_CLR_H2CQ_HW_IDX BIT(8)
+
+#define RTK_PCI_HIMR0 0x0B0
+#define RTK_PCI_HISR0 0x0B4
+#define RTK_PCI_HIMR1 0x0B8
+#define RTK_PCI_HISR1 0x0BC
+#define RTK_PCI_HIMR2 0x10B0
+#define RTK_PCI_HISR2 0x10B4
+#define RTK_PCI_HIMR3 0x10B8
+#define RTK_PCI_HISR3 0x10BC
+/* IMR 0 */
+#define IMR_TIMER2 BIT(31)
+#define IMR_TIMER1 BIT(30)
+#define IMR_PSTIMEOUT BIT(29)
+#define IMR_GTINT4 BIT(28)
+#define IMR_GTINT3 BIT(27)
+#define IMR_TBDER BIT(26)
+#define IMR_TBDOK BIT(25)
+#define IMR_TSF_BIT32_TOGGLE BIT(24)
+#define IMR_BCNDMAINT0 BIT(20)
+#define IMR_BCNDOK0 BIT(16)
+#define IMR_HSISR_IND_ON_INT BIT(15)
+#define IMR_BCNDMAINT_E BIT(14)
+#define IMR_ATIMEND BIT(12)
+#define IMR_HISR1_IND_INT BIT(11)
+#define IMR_C2HCMD BIT(10)
+#define IMR_CPWM2 BIT(9)
+#define IMR_CPWM BIT(8)
+#define IMR_HIGHDOK BIT(7)
+#define IMR_MGNTDOK BIT(6)
+#define IMR_BKDOK BIT(5)
+#define IMR_BEDOK BIT(4)
+#define IMR_VIDOK BIT(3)
+#define IMR_VODOK BIT(2)
+#define IMR_RDU BIT(1)
+#define IMR_ROK BIT(0)
+/* IMR 1 */
+#define IMR_TXFIFO_TH_INT BIT(30)
+#define IMR_BTON_STS_UPDATE BIT(29)
+#define IMR_MCUERR BIT(28)
+#define IMR_BCNDMAINT7 BIT(27)
+#define IMR_BCNDMAINT6 BIT(26)
+#define IMR_BCNDMAINT5 BIT(25)
+#define IMR_BCNDMAINT4 BIT(24)
+#define IMR_BCNDMAINT3 BIT(23)
+#define IMR_BCNDMAINT2 BIT(22)
+#define IMR_BCNDMAINT1 BIT(21)
+#define IMR_BCNDOK7 BIT(20)
+#define IMR_BCNDOK6 BIT(19)
+#define IMR_BCNDOK5 BIT(18)
+#define IMR_BCNDOK4 BIT(17)
+#define IMR_BCNDOK3 BIT(16)
+#define IMR_BCNDOK2 BIT(15)
+#define IMR_BCNDOK1 BIT(14)
+#define IMR_ATIMEND_E BIT(13)
+#define IMR_ATIMEND BIT(12)
+#define IMR_TXERR BIT(11)
+#define IMR_RXERR BIT(10)
+#define IMR_TXFOVW BIT(9)
+#define IMR_RXFOVW BIT(8)
+#define IMR_CPU_MGQ_TXDONE BIT(5)
+#define IMR_PS_TIMER_C BIT(4)
+#define IMR_PS_TIMER_B BIT(3)
+#define IMR_PS_TIMER_A BIT(2)
+#define IMR_CPUMGQ_TX_TIMER BIT(1)
+/* IMR 3 */
+#define IMR_H2CDOK BIT(16)
+
+/* one element is reserved to know if the ring is closed */
+static inline int avail_desc(u32 wp, u32 rp, u32 len)
+{
+ if (rp > wp)
+ return rp - wp - 1;
+ else
+ return len - wp + rp - 1;
+}
+
+#define RTK_PCI_TXBD_OWN_OFFSET 15
+#define RTK_PCI_TXBD_BCN_WORK 0x383
+
+struct rtw_pci_tx_buffer_desc {
+ __le16 buf_size;
+ __le16 psb_len;
+ __le32 dma;
+};
+
+struct rtw_pci_tx_data {
+ dma_addr_t dma;
+ u8 sn;
+};
+
+struct rtw_pci_ring {
+ u8 *head;
+ dma_addr_t dma;
+
+ u8 desc_size;
+
+ u32 len;
+ u32 wp;
+ u32 rp;
+};
+
+struct rtw_pci_tx_ring {
+ struct rtw_pci_ring r;
+ struct sk_buff_head queue;
+ bool queue_stopped;
+};
+
+struct rtw_pci_rx_buffer_desc {
+ __le16 buf_size;
+ __le16 total_pkt_size;
+ __le32 dma;
+};
+
+struct rtw_pci_rx_ring {
+ struct rtw_pci_ring r;
+ struct sk_buff *buf[RTK_MAX_RX_DESC_NUM];
+};
+
+#define RX_TAG_MAX 8192
+
+struct rtw_pci {
+ struct pci_dev *pdev;
+
+ /* used for pci interrupt */
+ spinlock_t irq_lock;
+ u32 irq_mask[4];
+ bool irq_enabled;
+
+ u16 rx_tag;
+ struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM];
+ struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM];
+
+ void __iomem *mmap;
+};
+
+static u32 max_num_of_tx_queue(u8 queue)
+{
+ u32 max_num;
+
+ switch (queue) {
+ case RTW_TX_QUEUE_BE:
+ max_num = RTK_BEQ_TX_DESC_NUM;
+ break;
+ case RTW_TX_QUEUE_BCN:
+ max_num = 1;
+ break;
+ default:
+ max_num = RTK_DEFAULT_TX_DESC_NUM;
+ break;
+ }
+
+ return max_num;
+}
+
+static inline struct
+rtw_pci_tx_data *rtw_pci_get_tx_data(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ BUILD_BUG_ON(sizeof(struct rtw_pci_tx_data) >
+ sizeof(info->status.status_driver_data));
+
+ return (struct rtw_pci_tx_data *)info->status.status_driver_data;
+}
+
+static inline
+struct rtw_pci_tx_buffer_desc *get_tx_buffer_desc(struct rtw_pci_tx_ring *ring,
+ u32 size)
+{
+ u8 *buf_desc;
+
+ buf_desc = ring->r.head + ring->r.wp * size;
+ return (struct rtw_pci_tx_buffer_desc *)buf_desc;
+}
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
new file mode 100644
index 000000000000..4381b360b5b5
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -0,0 +1,1727 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/bcd.h>
+
+#include "main.h"
+#include "reg.h"
+#include "fw.h"
+#include "phy.h"
+#include "debug.h"
+
+struct phy_cfg_pair {
+ u32 addr;
+ u32 data;
+};
+
+union phy_table_tile {
+ struct rtw_phy_cond cond;
+ struct phy_cfg_pair cfg;
+};
+
+struct phy_pg_cfg_pair {
+ u32 band;
+ u32 rf_path;
+ u32 tx_num;
+ u32 addr;
+ u32 bitmask;
+ u32 data;
+};
+
+struct txpwr_lmt_cfg_pair {
+ u8 regd;
+ u8 band;
+ u8 bw;
+ u8 rs;
+ u8 ch;
+ s8 txpwr_lmt;
+};
+
+static const u32 db_invert_table[12][8] = {
+ {10, 13, 16, 20,
+ 25, 32, 40, 50},
+ {64, 80, 101, 128,
+ 160, 201, 256, 318},
+ {401, 505, 635, 800,
+ 1007, 1268, 1596, 2010},
+ {316, 398, 501, 631,
+ 794, 1000, 1259, 1585},
+ {1995, 2512, 3162, 3981,
+ 5012, 6310, 7943, 10000},
+ {12589, 15849, 19953, 25119,
+ 31623, 39811, 50119, 63098},
+ {79433, 100000, 125893, 158489,
+ 199526, 251189, 316228, 398107},
+ {501187, 630957, 794328, 1000000,
+ 1258925, 1584893, 1995262, 2511886},
+ {3162278, 3981072, 5011872, 6309573,
+ 7943282, 1000000, 12589254, 15848932},
+ {19952623, 25118864, 31622777, 39810717,
+ 50118723, 63095734, 79432823, 100000000},
+ {125892541, 158489319, 199526232, 251188643,
+ 316227766, 398107171, 501187234, 630957345},
+ {794328235, 1000000000, 1258925412, 1584893192,
+ 1995262315, 2511886432U, 3162277660U, 3981071706U}
+};
+
+enum rtw_phy_band_type {
+ PHY_BAND_2G = 0,
+ PHY_BAND_5G = 1,
+};
+
+void rtw_phy_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 addr, mask;
+
+ dm_info->fa_history[3] = 0;
+ dm_info->fa_history[2] = 0;
+ dm_info->fa_history[1] = 0;
+ dm_info->fa_history[0] = 0;
+ dm_info->igi_bitmap = 0;
+ dm_info->igi_history[3] = 0;
+ dm_info->igi_history[2] = 0;
+ dm_info->igi_history[1] = 0;
+
+ addr = chip->dig[0].addr;
+ mask = chip->dig[0].mask;
+ dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask);
+}
+
+void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 addr, mask;
+ u8 path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ addr = chip->dig[path].addr;
+ mask = chip->dig[path].mask;
+ rtw_write32_mask(rtwdev, addr, mask, igi);
+ }
+}
+
+static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->false_alarm_statistics(rtwdev);
+}
+
+#define RA_FLOOR_TABLE_SIZE 7
+#define RA_FLOOR_UP_GAP 3
+
+static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi)
+{
+ u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100};
+ u8 new_level = 0;
+ int i;
+
+ for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++)
+ if (i >= old_level)
+ table[i] += RA_FLOOR_UP_GAP;
+
+ for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
+ if (rssi < table[i]) {
+ new_level = i;
+ break;
+ }
+ }
+
+ return new_level;
+}
+
+struct rtw_phy_stat_iter_data {
+ struct rtw_dev *rtwdev;
+ u8 min_rssi;
+};
+
+static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_phy_stat_iter_data *iter_data = data;
+ struct rtw_dev *rtwdev = iter_data->rtwdev;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ u8 rssi, rssi_level;
+
+ rssi = ewma_rssi_read(&si->avg_rssi);
+ rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi);
+
+ rtw_fw_send_rssi_info(rtwdev, si);
+
+ iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi);
+}
+
+static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_phy_stat_iter_data data = {};
+
+ data.rtwdev = rtwdev;
+ data.min_rssi = U8_MAX;
+ rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data);
+
+ dm_info->pre_min_rssi = dm_info->min_rssi;
+ dm_info->min_rssi = data.min_rssi;
+}
+
+static void rtw_phy_statistics(struct rtw_dev *rtwdev)
+{
+ rtw_phy_stat_rssi(rtwdev);
+ rtw_phy_stat_false_alarm(rtwdev);
+}
+
+#define DIG_PERF_FA_TH_LOW 250
+#define DIG_PERF_FA_TH_HIGH 500
+#define DIG_PERF_FA_TH_EXTRA_HIGH 750
+#define DIG_PERF_MAX 0x5a
+#define DIG_PERF_MID 0x40
+#define DIG_CVRG_FA_TH_LOW 2000
+#define DIG_CVRG_FA_TH_HIGH 4000
+#define DIG_CVRG_FA_TH_EXTRA_HIGH 5000
+#define DIG_CVRG_MAX 0x2a
+#define DIG_CVRG_MID 0x26
+#define DIG_CVRG_MIN 0x1c
+#define DIG_RSSI_GAIN_OFFSET 15
+
+static bool
+rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info)
+{
+ u16 fa_lo = DIG_PERF_FA_TH_LOW;
+ u16 fa_hi = DIG_PERF_FA_TH_HIGH;
+ u16 *fa_history;
+ u8 *igi_history;
+ u8 damping_rssi;
+ u8 min_rssi;
+ u8 diff;
+ u8 igi_bitmap;
+ bool damping = false;
+
+ min_rssi = dm_info->min_rssi;
+ if (dm_info->damping) {
+ damping_rssi = dm_info->damping_rssi;
+ diff = min_rssi > damping_rssi ? min_rssi - damping_rssi :
+ damping_rssi - min_rssi;
+ if (diff > 3 || dm_info->damping_cnt++ > 20) {
+ dm_info->damping = false;
+ return false;
+ }
+
+ return true;
+ }
+
+ igi_history = dm_info->igi_history;
+ fa_history = dm_info->fa_history;
+ igi_bitmap = dm_info->igi_bitmap & 0xf;
+ switch (igi_bitmap) {
+ case 5:
+ /* down -> up -> down -> up */
+ if (igi_history[0] > igi_history[1] &&
+ igi_history[2] > igi_history[3] &&
+ igi_history[0] - igi_history[1] >= 2 &&
+ igi_history[2] - igi_history[3] >= 2 &&
+ fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
+ fa_history[2] > fa_hi && fa_history[3] < fa_lo)
+ damping = true;
+ break;
+ case 9:
+ /* up -> down -> down -> up */
+ if (igi_history[0] > igi_history[1] &&
+ igi_history[3] > igi_history[2] &&
+ igi_history[0] - igi_history[1] >= 4 &&
+ igi_history[3] - igi_history[2] >= 2 &&
+ fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
+ fa_history[2] < fa_lo && fa_history[3] > fa_hi)
+ damping = true;
+ break;
+ default:
+ return false;
+ }
+
+ if (damping) {
+ dm_info->damping = true;
+ dm_info->damping_cnt = 0;
+ dm_info->damping_rssi = min_rssi;
+ }
+
+ return damping;
+}
+
+static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info,
+ u8 *upper, u8 *lower, bool linked)
+{
+ u8 dig_max, dig_min, dig_mid;
+ u8 min_rssi;
+
+ if (linked) {
+ dig_max = DIG_PERF_MAX;
+ dig_mid = DIG_PERF_MID;
+ /* 22B=0x1c, 22C=0x20 */
+ dig_min = 0x1c;
+ min_rssi = max_t(u8, dm_info->min_rssi, dig_min);
+ } else {
+ dig_max = DIG_CVRG_MAX;
+ dig_mid = DIG_CVRG_MID;
+ dig_min = DIG_CVRG_MIN;
+ min_rssi = dig_min;
+ }
+
+ /* DIG MAX should be bounded by minimum RSSI with offset +15 */
+ dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET);
+
+ *lower = clamp_t(u8, min_rssi, dig_min, dig_mid);
+ *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max);
+}
+
+static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info,
+ u16 *fa_th, u8 *step, bool linked)
+{
+ u8 min_rssi, pre_min_rssi;
+
+ min_rssi = dm_info->min_rssi;
+ pre_min_rssi = dm_info->pre_min_rssi;
+ step[0] = 4;
+ step[1] = 3;
+ step[2] = 2;
+
+ if (linked) {
+ fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH;
+ fa_th[1] = DIG_PERF_FA_TH_HIGH;
+ fa_th[2] = DIG_PERF_FA_TH_LOW;
+ if (pre_min_rssi > min_rssi) {
+ step[0] = 6;
+ step[1] = 4;
+ step[2] = 2;
+ }
+ } else {
+ fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH;
+ fa_th[1] = DIG_CVRG_FA_TH_HIGH;
+ fa_th[2] = DIG_CVRG_FA_TH_LOW;
+ }
+}
+
+static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa)
+{
+ u8 *igi_history;
+ u16 *fa_history;
+ u8 igi_bitmap;
+ bool up;
+
+ igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe;
+ igi_history = dm_info->igi_history;
+ fa_history = dm_info->fa_history;
+
+ up = igi > igi_history[0];
+ igi_bitmap |= up;
+
+ igi_history[3] = igi_history[2];
+ igi_history[2] = igi_history[1];
+ igi_history[1] = igi_history[0];
+ igi_history[0] = igi;
+
+ fa_history[3] = fa_history[2];
+ fa_history[2] = fa_history[1];
+ fa_history[1] = fa_history[0];
+ fa_history[0] = fa;
+
+ dm_info->igi_bitmap = igi_bitmap;
+}
+
+static void rtw_phy_dig(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 upper_bound, lower_bound;
+ u8 pre_igi, cur_igi;
+ u16 fa_th[3], fa_cnt;
+ u8 level;
+ u8 step[3];
+ bool linked;
+
+ if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE))
+ return;
+
+ if (rtw_phy_dig_check_damping(dm_info))
+ return;
+
+ linked = !!rtwdev->sta_cnt;
+
+ fa_cnt = dm_info->total_fa_cnt;
+ pre_igi = dm_info->igi_history[0];
+
+ rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked);
+
+ /* test the false alarm count from the highest threshold level first,
+ * and increase it by corresponding step size
+ *
+ * note that the step size is offset by -2, compensate it afterall
+ */
+ cur_igi = pre_igi;
+ for (level = 0; level < 3; level++) {
+ if (fa_cnt > fa_th[level]) {
+ cur_igi += step[level];
+ break;
+ }
+ }
+ cur_igi -= 2;
+
+ /* calculate the upper/lower bound by the minimum rssi we have among
+ * the peers connected with us, meanwhile make sure the igi value does
+ * not beyond the hardware limitation
+ */
+ rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked);
+ cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound);
+
+ /* record current igi value and false alarm statistics for further
+ * damping checks, and record the trend of igi values
+ */
+ rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt);
+
+ if (cur_igi != pre_igi)
+ rtw_phy_dig_write(rtwdev, cur_igi);
+}
+
+static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_dev *rtwdev = data;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ rtw_update_sta_info(rtwdev, si);
+}
+
+static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->watch_dog_cnt & 0x3)
+ return;
+
+ rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev);
+}
+
+void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
+{
+ /* for further calculation */
+ rtw_phy_statistics(rtwdev);
+ rtw_phy_dig(rtwdev);
+ rtw_phy_ra_info_update(rtwdev);
+}
+
+#define FRAC_BITS 3
+
+static u8 rtw_phy_power_2_db(s8 power)
+{
+ if (power <= -100 || power >= 20)
+ return 0;
+ else if (power >= 0)
+ return 100;
+ else
+ return 100 + power;
+}
+
+static u64 rtw_phy_db_2_linear(u8 power_db)
+{
+ u8 i, j;
+ u64 linear;
+
+ /* 1dB ~ 96dB */
+ i = (power_db - 1) >> 3;
+ j = (power_db - 1) - (i << 3);
+
+ linear = db_invert_table[i][j];
+ linear = i > 2 ? linear << FRAC_BITS : linear;
+
+ return linear;
+}
+
+static u8 rtw_phy_linear_2_db(u64 linear)
+{
+ u8 i;
+ u8 j;
+ u32 dB;
+
+ if (linear >= db_invert_table[11][7])
+ return 96; /* maximum 96 dB */
+
+ for (i = 0; i < 12; i++) {
+ if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
+ break;
+ else if (i > 2 && linear <= db_invert_table[i][7])
+ break;
+ }
+
+ for (j = 0; j < 8; j++) {
+ if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
+ break;
+ else if (i > 2 && linear <= db_invert_table[i][j])
+ break;
+ }
+
+ if (j == 0 && i == 0)
+ goto end;
+
+ if (j == 0) {
+ if (i != 3) {
+ if (db_invert_table[i][0] - linear >
+ linear - db_invert_table[i - 1][7]) {
+ i = i - 1;
+ j = 7;
+ }
+ } else {
+ if (db_invert_table[3][0] - linear >
+ linear - db_invert_table[2][7]) {
+ i = 2;
+ j = 7;
+ }
+ }
+ } else {
+ if (db_invert_table[i][j] - linear >
+ linear - db_invert_table[i][j - 1]) {
+ j = j - 1;
+ }
+ }
+end:
+ dB = (i << 3) + j + 1;
+
+ return dB;
+}
+
+u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num)
+{
+ s8 power;
+ u8 power_db;
+ u64 linear;
+ u64 sum = 0;
+ u8 path;
+
+ for (path = 0; path < path_num; path++) {
+ power = rf_power[path];
+ power_db = rtw_phy_power_2_db(power);
+ linear = rtw_phy_db_2_linear(power_db);
+ sum += linear;
+ }
+
+ sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS;
+ switch (path_num) {
+ case 2:
+ sum >>= 1;
+ break;
+ case 3:
+ sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5;
+ break;
+ case 4:
+ sum >>= 2;
+ break;
+ default:
+ break;
+ }
+
+ return rtw_phy_linear_2_db(sum);
+}
+
+u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const u32 *base_addr = chip->rf_base_addr;
+ u32 val, direct_addr;
+
+ if (rf_path >= hal->rf_path_num) {
+ rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ addr &= 0xff;
+ direct_addr = base_addr[rf_path] + (addr << 2);
+ mask &= RFREG_MASK;
+
+ val = rtw_read32_mask(rtwdev, direct_addr, mask);
+
+ return val;
+}
+
+bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 *sipi_addr = chip->rf_sipi_addr;
+ u32 data_and_addr;
+ u32 old_data = 0;
+ u32 shift;
+
+ if (rf_path >= hal->rf_path_num) {
+ rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return false;
+ }
+
+ addr &= 0xff;
+ mask &= RFREG_MASK;
+
+ if (mask != RFREG_MASK) {
+ old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK);
+
+ if (old_data == INV_RF_DATA) {
+ rtw_err(rtwdev, "Write fail, rf is disabled\n");
+ return false;
+ }
+
+ shift = __ffs(mask);
+ data = ((old_data) & (~mask)) | (data << shift);
+ }
+
+ data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff;
+
+ rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr);
+
+ udelay(13);
+
+ return true;
+}
+
+bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const u32 *base_addr = chip->rf_base_addr;
+ u32 direct_addr;
+
+ if (rf_path >= hal->rf_path_num) {
+ rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return false;
+ }
+
+ addr &= 0xff;
+ direct_addr = base_addr[rf_path] + (addr << 2);
+ mask &= RFREG_MASK;
+
+ rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
+ rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
+ rtw_write32_mask(rtwdev, direct_addr, mask, data);
+
+ udelay(1);
+
+ rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
+ rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
+
+ return true;
+}
+
+bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ if (addr != 0x00)
+ return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data);
+
+ return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data);
+}
+
+void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_phy_cond cond = {0};
+
+ cond.cut = hal->cut_version ? hal->cut_version : 15;
+ cond.pkg = pkg ? pkg : 15;
+ cond.plat = 0x04;
+ cond.rfe = efuse->rfe_option;
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_USB:
+ cond.intf = INTF_USB;
+ break;
+ case RTW_HCI_TYPE_SDIO:
+ cond.intf = INTF_SDIO;
+ break;
+ case RTW_HCI_TYPE_PCIE:
+ default:
+ cond.intf = INTF_PCIE;
+ break;
+ }
+
+ hal->phy_cond = cond;
+
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond));
+}
+
+static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_phy_cond drv_cond = hal->phy_cond;
+
+ if (cond.cut && cond.cut != drv_cond.cut)
+ return false;
+
+ if (cond.pkg && cond.pkg != drv_cond.pkg)
+ return false;
+
+ if (cond.intf && cond.intf != drv_cond.intf)
+ return false;
+
+ if (cond.rfe != drv_cond.rfe)
+ return false;
+
+ return true;
+}
+
+void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
+{
+ const union phy_table_tile *p = tbl->data;
+ const union phy_table_tile *end = p + tbl->size / 2;
+ struct rtw_phy_cond pos_cond = {0};
+ bool is_matched = true, is_skipped = false;
+
+ BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair));
+
+ for (; p < end; p++) {
+ if (p->cond.pos) {
+ switch (p->cond.branch) {
+ case BRANCH_ENDIF:
+ is_matched = true;
+ is_skipped = false;
+ break;
+ case BRANCH_ELSE:
+ is_matched = is_skipped ? false : true;
+ break;
+ case BRANCH_IF:
+ case BRANCH_ELIF:
+ default:
+ pos_cond = p->cond;
+ break;
+ }
+ } else if (p->cond.neg) {
+ if (!is_skipped) {
+ if (check_positive(rtwdev, pos_cond)) {
+ is_matched = true;
+ is_skipped = true;
+ } else {
+ is_matched = false;
+ is_skipped = false;
+ }
+ } else {
+ is_matched = false;
+ }
+ } else if (is_matched) {
+ (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data);
+ }
+ }
+}
+
+void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
+{
+ const struct phy_pg_cfg_pair *p = tbl->data;
+ const struct phy_pg_cfg_pair *end = p + tbl->size / 6;
+
+ BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6);
+
+ for (; p < end; p++) {
+ if (p->addr == 0xfe || p->addr == 0xffe) {
+ msleep(50);
+ continue;
+ }
+ phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
+ p->tx_num, p->addr, p->bitmask,
+ p->data);
+ }
+}
+
+void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
+ const struct rtw_table *tbl)
+{
+ const struct txpwr_lmt_cfg_pair *p = tbl->data;
+ const struct txpwr_lmt_cfg_pair *end = p + tbl->size / 6;
+
+ BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6);
+
+ for (; p < end; p++) {
+ phy_set_tx_power_limit(rtwdev, p->regd, p->band,
+ p->bw, p->rs,
+ p->ch, p->txpwr_lmt);
+ }
+}
+
+void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data)
+{
+ rtw_write8(rtwdev, addr, data);
+}
+
+void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data)
+{
+ rtw_write32(rtwdev, addr, data);
+}
+
+void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data)
+{
+ if (addr == 0xfe)
+ msleep(50);
+ else if (addr == 0xfd)
+ mdelay(5);
+ else if (addr == 0xfc)
+ mdelay(1);
+ else if (addr == 0xfb)
+ usleep_range(50, 60);
+ else if (addr == 0xfa)
+ udelay(5);
+ else if (addr == 0xf9)
+ udelay(1);
+ else
+ rtw_write32(rtwdev, addr, data);
+}
+
+void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data)
+{
+ if (addr == 0xffe) {
+ msleep(50);
+ } else if (addr == 0xfe) {
+ usleep_range(100, 110);
+ } else {
+ rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data);
+ udelay(1);
+ }
+}
+
+static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (!chip->rfk_init_tbl)
+ return;
+
+ rtw_load_table(rtwdev, chip->rfk_init_tbl);
+}
+
+void rtw_phy_load_tables(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 rf_path;
+
+ rtw_load_table(rtwdev, chip->mac_tbl);
+ rtw_load_table(rtwdev, chip->bb_tbl);
+ rtw_load_table(rtwdev, chip->agc_tbl);
+ rtw_load_rfk_table(rtwdev);
+
+ for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
+ const struct rtw_table *tbl;
+
+ tbl = chip->rf_tbl[rf_path];
+ rtw_load_table(rtwdev, tbl);
+ }
+}
+
+#define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
+
+#define RTW_MAX_POWER_INDEX 0x3F
+
+u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
+u8 rtw_ofdm_rates[] = {
+ DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
+ DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
+ DESC_RATE48M, DESC_RATE54M
+};
+u8 rtw_ht_1s_rates[] = {
+ DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
+ DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
+ DESC_RATEMCS6, DESC_RATEMCS7
+};
+u8 rtw_ht_2s_rates[] = {
+ DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
+ DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
+ DESC_RATEMCS14, DESC_RATEMCS15
+};
+u8 rtw_vht_1s_rates[] = {
+ DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
+ DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
+ DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
+ DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
+ DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
+};
+u8 rtw_vht_2s_rates[] = {
+ DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
+ DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
+ DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
+ DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
+ DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
+};
+u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
+u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
+u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
+u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
+u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
+u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
+u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
+ rtw_cck_rates, rtw_ofdm_rates,
+ rtw_ht_1s_rates, rtw_ht_2s_rates,
+ rtw_vht_1s_rates, rtw_vht_2s_rates
+};
+u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
+ ARRAY_SIZE(rtw_cck_rates),
+ ARRAY_SIZE(rtw_ofdm_rates),
+ ARRAY_SIZE(rtw_ht_1s_rates),
+ ARRAY_SIZE(rtw_ht_2s_rates),
+ ARRAY_SIZE(rtw_vht_1s_rates),
+ ARRAY_SIZE(rtw_vht_2s_rates)
+};
+
+static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
+ 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
+ 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
+ 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
+ 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
+ 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
+ 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
+ 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
+
+static int rtw_channel_to_idx(u8 band, u8 channel)
+{
+ int ch_idx;
+ u8 n_channel;
+
+ if (band == PHY_BAND_2G) {
+ ch_idx = channel - 1;
+ n_channel = RTW_MAX_CHANNEL_NUM_2G;
+ } else if (band == PHY_BAND_5G) {
+ n_channel = RTW_MAX_CHANNEL_NUM_5G;
+ for (ch_idx = 0; ch_idx < n_channel; ch_idx++)
+ if (rtw_channel_idx_5g[ch_idx] == channel)
+ break;
+ } else {
+ return -1;
+ }
+
+ if (ch_idx >= n_channel)
+ return -1;
+
+ return ch_idx;
+}
+
+static u8 rtw_get_channel_group(u8 channel)
+{
+ switch (channel) {
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case 1:
+ case 2:
+ case 36:
+ case 38:
+ case 40:
+ case 42:
+ return 0;
+ case 3:
+ case 4:
+ case 5:
+ case 44:
+ case 46:
+ case 48:
+ case 50:
+ return 1;
+ case 6:
+ case 7:
+ case 8:
+ case 52:
+ case 54:
+ case 56:
+ case 58:
+ return 2;
+ case 9:
+ case 10:
+ case 11:
+ case 60:
+ case 62:
+ case 64:
+ return 3;
+ case 12:
+ case 13:
+ case 100:
+ case 102:
+ case 104:
+ case 106:
+ return 4;
+ case 14:
+ case 108:
+ case 110:
+ case 112:
+ case 114:
+ return 5;
+ case 116:
+ case 118:
+ case 120:
+ case 122:
+ return 6;
+ case 124:
+ case 126:
+ case 128:
+ case 130:
+ return 7;
+ case 132:
+ case 134:
+ case 136:
+ case 138:
+ return 8;
+ case 140:
+ case 142:
+ case 144:
+ return 9;
+ case 149:
+ case 151:
+ case 153:
+ case 155:
+ return 10;
+ case 157:
+ case 159:
+ case 161:
+ return 11;
+ case 165:
+ case 167:
+ case 169:
+ case 171:
+ return 12;
+ case 173:
+ case 175:
+ case 177:
+ return 13;
+ }
+}
+
+static u8 phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
+ struct rtw_2g_txpwr_idx *pwr_idx_2g,
+ enum rtw_bandwidth bandwidth,
+ u8 rate, u8 group)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 tx_power;
+ bool mcs_rate;
+ bool above_2ss;
+ u8 factor = chip->txgi_factor;
+
+ if (rate <= DESC_RATE11M)
+ tx_power = pwr_idx_2g->cck_base[group];
+ else
+ tx_power = pwr_idx_2g->bw40_base[group];
+
+ if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
+ tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor;
+
+ mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
+ (rate >= DESC_RATEVHT1SS_MCS0 &&
+ rate <= DESC_RATEVHT2SS_MCS9);
+ above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
+ (rate >= DESC_RATEVHT2SS_MCS0);
+
+ if (!mcs_rate)
+ return tx_power;
+
+ switch (bandwidth) {
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case RTW_CHANNEL_WIDTH_20:
+ tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
+ if (above_2ss)
+ tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ /* bw40 is the base power */
+ if (above_2ss)
+ tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor;
+ break;
+ }
+
+ return tx_power;
+}
+
+static u8 phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
+ struct rtw_5g_txpwr_idx *pwr_idx_5g,
+ enum rtw_bandwidth bandwidth,
+ u8 rate, u8 group)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 tx_power;
+ u8 upper, lower;
+ bool mcs_rate;
+ bool above_2ss;
+ u8 factor = chip->txgi_factor;
+
+ tx_power = pwr_idx_5g->bw40_base[group];
+
+ mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
+ (rate >= DESC_RATEVHT1SS_MCS0 &&
+ rate <= DESC_RATEVHT2SS_MCS9);
+ above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
+ (rate >= DESC_RATEVHT2SS_MCS0);
+
+ if (!mcs_rate) {
+ tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor;
+ return tx_power;
+ }
+
+ switch (bandwidth) {
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case RTW_CHANNEL_WIDTH_20:
+ tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
+ if (above_2ss)
+ tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ /* bw40 is the base power */
+ if (above_2ss)
+ tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor;
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ /* the base idx of bw80 is the average of bw40+/bw40- */
+ lower = pwr_idx_5g->bw40_base[group];
+ upper = pwr_idx_5g->bw40_base[group + 1];
+
+ tx_power = (lower + upper) / 2;
+ tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor;
+ if (above_2ss)
+ tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor;
+ break;
+ }
+
+ return tx_power;
+}
+
+/* set tx power level by path for each rates, note that the order of the rates
+ * are *very* important, bacause 8822B/8821C combines every four bytes of tx
+ * power index into a four-byte power index register, and calls set_tx_agc to
+ * write these values into hardware
+ */
+static
+void phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, u8 ch, u8 path)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rs;
+
+ /* do not need cck rates if we are not in 2.4G */
+ if (hal->current_band_type == RTW_BAND_2G)
+ rs = RTW_RATE_SECTION_CCK;
+ else
+ rs = RTW_RATE_SECTION_OFDM;
+
+ for (; rs < RTW_RATE_SECTION_MAX; rs++)
+ phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
+}
+
+void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 path;
+
+ mutex_lock(&hal->tx_power_mutex);
+
+ for (path = 0; path < hal->rf_path_num; path++)
+ phy_set_tx_power_level_by_path(rtwdev, channel, path);
+
+ chip->ops->set_tx_power_index(rtwdev);
+ mutex_unlock(&hal->tx_power_mutex);
+}
+
+s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
+ enum rtw_bandwidth bandwidth, u8 rf_path,
+ u8 rate, u8 channel, u8 regd);
+
+static
+u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate,
+ enum rtw_bandwidth bandwidth, u8 channel, u8 regd)
+{
+ struct rtw_dev *rtwdev = adapter;
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_txpwr_idx *pwr_idx;
+ u8 tx_power;
+ u8 group;
+ u8 band;
+ s8 offset, limit;
+
+ pwr_idx = &rtwdev->efuse.txpwr_idx_table[rf_path];
+ group = rtw_get_channel_group(channel);
+
+ /* base power index for 2.4G/5G */
+ if (channel <= 14) {
+ band = PHY_BAND_2G;
+ tx_power = phy_get_2g_tx_power_index(rtwdev,
+ &pwr_idx->pwr_idx_2g,
+ bandwidth, rate, group);
+ offset = hal->tx_pwr_by_rate_offset_2g[rf_path][rate];
+ } else {
+ band = PHY_BAND_5G;
+ tx_power = phy_get_5g_tx_power_index(rtwdev,
+ &pwr_idx->pwr_idx_5g,
+ bandwidth, rate, group);
+ offset = hal->tx_pwr_by_rate_offset_5g[rf_path][rate];
+ }
+
+ limit = phy_get_tx_power_limit(rtwdev, band, bandwidth, rf_path,
+ rate, channel, regd);
+
+ if (offset > limit)
+ offset = limit;
+
+ tx_power += offset;
+
+ if (tx_power > rtwdev->chip->max_power_index)
+ tx_power = rtwdev->chip->max_power_index;
+
+ return tx_power;
+}
+
+void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs)
+{
+ struct rtw_dev *rtwdev = adapter;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 regd = rtwdev->regd.txpwr_regd;
+ u8 *rates;
+ u8 size;
+ u8 rate;
+ u8 pwr_idx;
+ u8 bw;
+ int i;
+
+ if (rs >= RTW_RATE_SECTION_MAX)
+ return;
+
+ rates = rtw_rate_section[rs];
+ size = rtw_rate_size[rs];
+ bw = hal->current_band_width;
+ for (i = 0; i < size; i++) {
+ rate = rates[i];
+ pwr_idx = phy_get_tx_power_index(adapter, path, rate, bw, ch,
+ regd);
+ hal->tx_pwr_tbl[path][rate] = pwr_idx;
+ }
+}
+
+static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i)
+{
+ if (rtwdev->chip->is_pwr_by_rate_dec)
+ return bcd_to_dec_pwr_by_rate(hex, i);
+ else
+ return (hex >> (i * 8)) & 0xFF;
+}
+
+static void phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev,
+ u32 addr, u32 mask,
+ u32 val, u8 *rate,
+ u8 *pwr_by_rate, u8 *rate_num)
+{
+ int i;
+
+ switch (addr) {
+ case 0xE00:
+ case 0x830:
+ rate[0] = DESC_RATE6M;
+ rate[1] = DESC_RATE9M;
+ rate[2] = DESC_RATE12M;
+ rate[3] = DESC_RATE18M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE04:
+ case 0x834:
+ rate[0] = DESC_RATE24M;
+ rate[1] = DESC_RATE36M;
+ rate[2] = DESC_RATE48M;
+ rate[3] = DESC_RATE54M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE08:
+ rate[0] = DESC_RATE1M;
+ pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1);
+ *rate_num = 1;
+ break;
+ case 0x86C:
+ if (mask == 0xffffff00) {
+ rate[0] = DESC_RATE2M;
+ rate[1] = DESC_RATE5_5M;
+ rate[2] = DESC_RATE11M;
+ for (i = 1; i < 4; ++i)
+ pwr_by_rate[i - 1] =
+ tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 3;
+ } else if (mask == 0x000000ff) {
+ rate[0] = DESC_RATE11M;
+ pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0);
+ *rate_num = 1;
+ }
+ break;
+ case 0xE10:
+ case 0x83C:
+ rate[0] = DESC_RATEMCS0;
+ rate[1] = DESC_RATEMCS1;
+ rate[2] = DESC_RATEMCS2;
+ rate[3] = DESC_RATEMCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE14:
+ case 0x848:
+ rate[0] = DESC_RATEMCS4;
+ rate[1] = DESC_RATEMCS5;
+ rate[2] = DESC_RATEMCS6;
+ rate[3] = DESC_RATEMCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE18:
+ case 0x84C:
+ rate[0] = DESC_RATEMCS8;
+ rate[1] = DESC_RATEMCS9;
+ rate[2] = DESC_RATEMCS10;
+ rate[3] = DESC_RATEMCS11;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE1C:
+ case 0x868:
+ rate[0] = DESC_RATEMCS12;
+ rate[1] = DESC_RATEMCS13;
+ rate[2] = DESC_RATEMCS14;
+ rate[3] = DESC_RATEMCS15;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+
+ break;
+ case 0x838:
+ rate[0] = DESC_RATE1M;
+ rate[1] = DESC_RATE2M;
+ rate[2] = DESC_RATE5_5M;
+ for (i = 1; i < 4; ++i)
+ pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev,
+ val, i);
+ *rate_num = 3;
+ break;
+ case 0xC20:
+ case 0xE20:
+ case 0x1820:
+ case 0x1A20:
+ rate[0] = DESC_RATE1M;
+ rate[1] = DESC_RATE2M;
+ rate[2] = DESC_RATE5_5M;
+ rate[3] = DESC_RATE11M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC24:
+ case 0xE24:
+ case 0x1824:
+ case 0x1A24:
+ rate[0] = DESC_RATE6M;
+ rate[1] = DESC_RATE9M;
+ rate[2] = DESC_RATE12M;
+ rate[3] = DESC_RATE18M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC28:
+ case 0xE28:
+ case 0x1828:
+ case 0x1A28:
+ rate[0] = DESC_RATE24M;
+ rate[1] = DESC_RATE36M;
+ rate[2] = DESC_RATE48M;
+ rate[3] = DESC_RATE54M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC2C:
+ case 0xE2C:
+ case 0x182C:
+ case 0x1A2C:
+ rate[0] = DESC_RATEMCS0;
+ rate[1] = DESC_RATEMCS1;
+ rate[2] = DESC_RATEMCS2;
+ rate[3] = DESC_RATEMCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC30:
+ case 0xE30:
+ case 0x1830:
+ case 0x1A30:
+ rate[0] = DESC_RATEMCS4;
+ rate[1] = DESC_RATEMCS5;
+ rate[2] = DESC_RATEMCS6;
+ rate[3] = DESC_RATEMCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC34:
+ case 0xE34:
+ case 0x1834:
+ case 0x1A34:
+ rate[0] = DESC_RATEMCS8;
+ rate[1] = DESC_RATEMCS9;
+ rate[2] = DESC_RATEMCS10;
+ rate[3] = DESC_RATEMCS11;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC38:
+ case 0xE38:
+ case 0x1838:
+ case 0x1A38:
+ rate[0] = DESC_RATEMCS12;
+ rate[1] = DESC_RATEMCS13;
+ rate[2] = DESC_RATEMCS14;
+ rate[3] = DESC_RATEMCS15;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC3C:
+ case 0xE3C:
+ case 0x183C:
+ case 0x1A3C:
+ rate[0] = DESC_RATEVHT1SS_MCS0;
+ rate[1] = DESC_RATEVHT1SS_MCS1;
+ rate[2] = DESC_RATEVHT1SS_MCS2;
+ rate[3] = DESC_RATEVHT1SS_MCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC40:
+ case 0xE40:
+ case 0x1840:
+ case 0x1A40:
+ rate[0] = DESC_RATEVHT1SS_MCS4;
+ rate[1] = DESC_RATEVHT1SS_MCS5;
+ rate[2] = DESC_RATEVHT1SS_MCS6;
+ rate[3] = DESC_RATEVHT1SS_MCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC44:
+ case 0xE44:
+ case 0x1844:
+ case 0x1A44:
+ rate[0] = DESC_RATEVHT1SS_MCS8;
+ rate[1] = DESC_RATEVHT1SS_MCS9;
+ rate[2] = DESC_RATEVHT2SS_MCS0;
+ rate[3] = DESC_RATEVHT2SS_MCS1;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC48:
+ case 0xE48:
+ case 0x1848:
+ case 0x1A48:
+ rate[0] = DESC_RATEVHT2SS_MCS2;
+ rate[1] = DESC_RATEVHT2SS_MCS3;
+ rate[2] = DESC_RATEVHT2SS_MCS4;
+ rate[3] = DESC_RATEVHT2SS_MCS5;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC4C:
+ case 0xE4C:
+ case 0x184C:
+ case 0x1A4C:
+ rate[0] = DESC_RATEVHT2SS_MCS6;
+ rate[1] = DESC_RATEVHT2SS_MCS7;
+ rate[2] = DESC_RATEVHT2SS_MCS8;
+ rate[3] = DESC_RATEVHT2SS_MCS9;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCD8:
+ case 0xED8:
+ case 0x18D8:
+ case 0x1AD8:
+ rate[0] = DESC_RATEMCS16;
+ rate[1] = DESC_RATEMCS17;
+ rate[2] = DESC_RATEMCS18;
+ rate[3] = DESC_RATEMCS19;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCDC:
+ case 0xEDC:
+ case 0x18DC:
+ case 0x1ADC:
+ rate[0] = DESC_RATEMCS20;
+ rate[1] = DESC_RATEMCS21;
+ rate[2] = DESC_RATEMCS22;
+ rate[3] = DESC_RATEMCS23;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCE0:
+ case 0xEE0:
+ case 0x18E0:
+ case 0x1AE0:
+ rate[0] = DESC_RATEVHT3SS_MCS0;
+ rate[1] = DESC_RATEVHT3SS_MCS1;
+ rate[2] = DESC_RATEVHT3SS_MCS2;
+ rate[3] = DESC_RATEVHT3SS_MCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCE4:
+ case 0xEE4:
+ case 0x18E4:
+ case 0x1AE4:
+ rate[0] = DESC_RATEVHT3SS_MCS4;
+ rate[1] = DESC_RATEVHT3SS_MCS5;
+ rate[2] = DESC_RATEVHT3SS_MCS6;
+ rate[3] = DESC_RATEVHT3SS_MCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCE8:
+ case 0xEE8:
+ case 0x18E8:
+ case 0x1AE8:
+ rate[0] = DESC_RATEVHT3SS_MCS8;
+ rate[1] = DESC_RATEVHT3SS_MCS9;
+ for (i = 0; i < 2; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 2;
+ break;
+ default:
+ rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr);
+ break;
+ }
+}
+
+void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtw_dev *rtwdev = adapter;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rate_num = 0;
+ u8 rate;
+ u8 rates[RTW_RF_PATH_MAX] = {0};
+ s8 offset;
+ s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0};
+ int i;
+
+ phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data,
+ rates, pwr_by_rate, &rate_num);
+
+ if (WARN_ON(rfpath >= RTW_RF_PATH_MAX ||
+ (band != PHY_BAND_2G && band != PHY_BAND_5G) ||
+ rate_num > RTW_RF_PATH_MAX))
+ return;
+
+ for (i = 0; i < rate_num; i++) {
+ offset = pwr_by_rate[i];
+ rate = rates[i];
+ if (band == PHY_BAND_2G)
+ hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset;
+ else if (band == PHY_BAND_5G)
+ hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset;
+ else
+ continue;
+ }
+}
+
+static
+void phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
+ u8 rs, u8 size, u8 *rates)
+{
+ u8 rate;
+ u8 base_idx, rate_idx;
+ s8 base_2g, base_5g;
+
+ if (rs >= RTW_RATE_SECTION_VHT_1S)
+ base_idx = rates[size - 3];
+ else
+ base_idx = rates[size - 1];
+ base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx];
+ base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx];
+ hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g;
+ hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g;
+ for (rate = 0; rate < size; rate++) {
+ rate_idx = rates[rate];
+ hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g;
+ hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g;
+ }
+}
+
+void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
+{
+ u8 path;
+
+ for (path = 0; path < RTW_RF_PATH_MAX; path++) {
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_CCK,
+ rtw_cck_size, rtw_cck_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_OFDM,
+ rtw_ofdm_size, rtw_ofdm_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_HT_1S,
+ rtw_ht_1s_size, rtw_ht_1s_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_HT_2S,
+ rtw_ht_2s_size, rtw_ht_2s_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_VHT_1S,
+ rtw_vht_1s_size, rtw_vht_1s_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_VHT_2S,
+ rtw_vht_2s_size, rtw_vht_2s_rates);
+ }
+}
+
+static void
+phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
+{
+ s8 base, orig;
+ u8 ch;
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) {
+ base = hal->tx_pwr_by_rate_base_2g[0][rs];
+ orig = hal->tx_pwr_limit_2g[regd][bw][rs][ch];
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base;
+ }
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) {
+ base = hal->tx_pwr_by_rate_base_5g[0][rs];
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base;
+ }
+}
+
+void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
+{
+ u8 regd, bw, rs;
+
+ for (regd = 0; regd < RTW_REGD_MAX; regd++)
+ for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ phy_tx_power_limit_config(hal, regd, bw, rs);
+}
+
+static s8 get_tx_power_limit(struct rtw_hal *hal, u8 bw, u8 rs, u8 ch, u8 regd)
+{
+ if (regd > RTW_REGD_WW)
+ return RTW_MAX_POWER_INDEX;
+
+ return hal->tx_pwr_limit_2g[regd][bw][rs][ch];
+}
+
+s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
+ enum rtw_bandwidth bw, u8 rf_path,
+ u8 rate, u8 channel, u8 regd)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ s8 power_limit;
+ u8 rs;
+ int ch_idx;
+
+ if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
+ rs = RTW_RATE_SECTION_CCK;
+ else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
+ rs = RTW_RATE_SECTION_OFDM;
+ else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
+ rs = RTW_RATE_SECTION_HT_1S;
+ else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
+ rs = RTW_RATE_SECTION_HT_2S;
+ else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
+ rs = RTW_RATE_SECTION_VHT_1S;
+ else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
+ rs = RTW_RATE_SECTION_VHT_2S;
+ else
+ goto err;
+
+ ch_idx = rtw_channel_to_idx(band, channel);
+ if (ch_idx < 0)
+ goto err;
+
+ power_limit = get_tx_power_limit(hal, bw, rs, ch_idx, regd);
+
+ return power_limit;
+
+err:
+ WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
+ band, bw, rf_path, rate, channel);
+ return RTW_MAX_POWER_INDEX;
+}
+
+void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
+ u8 bw, u8 rs, u8 ch, s8 pwr_limit)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int ch_idx;
+
+ pwr_limit = clamp_t(s8, pwr_limit,
+ -RTW_MAX_POWER_INDEX, RTW_MAX_POWER_INDEX);
+ ch_idx = rtw_channel_to_idx(band, ch);
+
+ if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
+ rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
+ WARN(1,
+ "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
+ regd, band, bw, rs, ch_idx, pwr_limit);
+ return;
+ }
+
+ if (band == PHY_BAND_2G)
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit;
+ else if (band == PHY_BAND_5G)
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit;
+}
+
+static
+void rtw_hw_tx_power_limit_init(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
+{
+ u8 ch;
+
+ /* 2.4G channels */
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
+
+ /* 5G channels */
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
+}
+
+void rtw_hw_init_tx_power(struct rtw_hal *hal)
+{
+ u8 regd, path, rate, rs, bw;
+
+ /* init tx power by rate offset */
+ for (path = 0; path < RTW_RF_PATH_MAX; path++) {
+ for (rate = 0; rate < DESC_RATE_MAX; rate++) {
+ hal->tx_pwr_by_rate_offset_2g[path][rate] = 0;
+ hal->tx_pwr_by_rate_offset_5g[path][rate] = 0;
+ }
+ }
+
+ /* init tx power limit */
+ for (regd = 0; regd < RTW_REGD_MAX; regd++)
+ for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ rtw_hw_tx_power_limit_init(hal, regd, bw, rs);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
new file mode 100644
index 000000000000..ec03a2051e52
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_PHY_H_
+#define __RTW_PHY_H_
+
+#include "debug.h"
+
+extern u8 rtw_cck_rates[];
+extern u8 rtw_ofdm_rates[];
+extern u8 rtw_ht_1s_rates[];
+extern u8 rtw_ht_2s_rates[];
+extern u8 rtw_vht_1s_rates[];
+extern u8 rtw_vht_2s_rates[];
+extern u8 *rtw_rate_section[];
+extern u8 rtw_rate_size[];
+
+void rtw_phy_init(struct rtw_dev *rtwdev);
+void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev);
+u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num);
+u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask);
+bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
+ u32 regaddr, u32 bitmask, u32 data);
+void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
+ u8 bw, u8 rs, u8 ch, s8 pwr_limit);
+void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs);
+void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg);
+void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
+void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
+void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
+void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+void rtw_hw_init_tx_power(struct rtw_hal *hal);
+void rtw_phy_load_tables(struct rtw_dev *rtwdev);
+void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel);
+void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal);
+void rtw_phy_tx_power_limit_config(struct rtw_hal *hal);
+
+#define RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, path) \
+const struct rtw_table name ## _tbl = { \
+ .data = name, \
+ .size = ARRAY_SIZE(name), \
+ .parse = rtw_parse_tbl_phy_cond, \
+ .do_cfg = cfg, \
+ .rf_path = path, \
+}
+
+#define RTW_DECL_TABLE_PHY_COND(name, cfg) \
+ RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, 0)
+
+#define RTW_DECL_TABLE_RF_RADIO(name, path) \
+ RTW_DECL_TABLE_PHY_COND_CORE(name, rtw_phy_cfg_rf, RF_PATH_ ## path)
+
+#define RTW_DECL_TABLE_BB_PG(name) \
+const struct rtw_table name ## _tbl = { \
+ .data = name, \
+ .size = ARRAY_SIZE(name), \
+ .parse = rtw_parse_tbl_bb_pg, \
+}
+
+#define RTW_DECL_TABLE_TXPWR_LMT(name) \
+const struct rtw_table name ## _tbl = { \
+ .data = name, \
+ .size = ARRAY_SIZE(name), \
+ .parse = rtw_parse_tbl_txpwr_lmt, \
+}
+
+static inline const struct rtw_rfe_def *rtw_get_rfe_def(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ const struct rtw_rfe_def *rfe_def = NULL;
+
+ if (chip->rfe_defs_size == 0)
+ return NULL;
+
+ if (efuse->rfe_option < chip->rfe_defs_size)
+ rfe_def = &chip->rfe_defs[efuse->rfe_option];
+
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "use rfe_def[%d]\n", efuse->rfe_option);
+ return rfe_def;
+}
+
+static inline int rtw_check_supported_rfe(struct rtw_dev *rtwdev)
+{
+ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
+
+ if (!rfe_def || !rfe_def->phy_pg_tbl || !rfe_def->txpwr_lmt_tbl) {
+ rtw_err(rtwdev, "rfe %d isn't supported\n",
+ rtwdev->efuse.rfe_option);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi);
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define RFREG_MASK 0xfffff
+
+#define MASK7BITS 0x7f
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASK20BITS 0xfffff
+#define MASK24BITS 0xffffff
+
+#define MASKH3BYTES 0xffffff00
+#define MASKL3BYTES 0x00ffffff
+#define MASKBYTE2HIGHNIBBLE 0x00f00000
+#define MASKBYTE3LOWNIBBLE 0x0f000000
+#define MASKL3BYTES 0x00ffffff
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
new file mode 100644
index 000000000000..607bfa4317d9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "ps.h"
+#include "mac.h"
+#include "debug.h"
+
+static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_core_start(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "leave idle state failed\n");
+
+ rtw_set_channel(rtwdev);
+ rtw_flag_clear(rtwdev, RTW_FLAG_INACTIVE_PS);
+
+ return ret;
+}
+
+int rtw_enter_ips(struct rtw_dev *rtwdev)
+{
+ rtw_flag_set(rtwdev, RTW_FLAG_INACTIVE_PS);
+
+ rtw_core_stop(rtwdev);
+
+ return 0;
+}
+
+static void rtw_restore_port_cfg_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ u32 config = ~0;
+
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+}
+
+int rtw_leave_ips(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_ips_pwr_up(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave ips state\n");
+ return ret;
+ }
+
+ rtw_iterate_vifs_atomic(rtwdev, rtw_restore_port_cfg_iter, rtwdev);
+
+ return 0;
+}
+
+static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ conf->state = RTW_ALL_ON;
+ conf->awake_interval = 1;
+ conf->rlbm = 0;
+ conf->smart_ps = 0;
+
+ rtw_fw_set_pwr_mode(rtwdev);
+ rtw_flag_clear(rtwdev, RTW_FLAG_LEISURE_PS);
+}
+
+static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ conf->state = RTW_RF_OFF;
+ conf->awake_interval = 1;
+ conf->rlbm = 1;
+ conf->smart_ps = 2;
+
+ rtw_fw_set_pwr_mode(rtwdev);
+ rtw_flag_set(rtwdev, RTW_FLAG_LEISURE_PS);
+}
+
+void rtw_lps_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ lps_work.work);
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ struct rtw_vif *rtwvif = conf->rtwvif;
+
+ if (WARN_ON(!rtwvif))
+ return;
+
+ if (conf->mode == RTW_MODE_LPS)
+ rtw_enter_lps_core(rtwdev);
+ else
+ rtw_leave_lps_core(rtwdev);
+}
+
+void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ if (rtwvif->in_lps)
+ return;
+
+ conf->mode = RTW_MODE_LPS;
+ conf->rtwvif = rtwvif;
+ rtwvif->in_lps = true;
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
+}
+
+void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ if (!rtwvif->in_lps)
+ return;
+
+ conf->mode = RTW_MODE_ACTIVE;
+ conf->rtwvif = rtwvif;
+ rtwvif->in_lps = false;
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
+}
+
+bool rtw_in_lps(struct rtw_dev *rtwdev)
+{
+ return rtw_flag_check(rtwdev, RTW_FLAG_LEISURE_PS);
+}
+
+void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ if (WARN_ON(!rtwvif))
+ return;
+
+ if (rtwvif->in_lps)
+ return;
+
+ conf->mode = RTW_MODE_LPS;
+ conf->rtwvif = rtwvif;
+ rtwvif->in_lps = true;
+
+ rtw_enter_lps_core(rtwdev);
+}
+
+void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ if (WARN_ON(!rtwvif))
+ return;
+
+ if (!rtwvif->in_lps)
+ return;
+
+ conf->mode = RTW_MODE_ACTIVE;
+ conf->rtwvif = rtwvif;
+ rtwvif->in_lps = false;
+
+ rtw_leave_lps_core(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h
new file mode 100644
index 000000000000..09e57405dc1b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/ps.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_PS_H_
+#define __RTW_PS_H_
+
+#define RTW_LPS_THRESHOLD 2
+
+int rtw_enter_ips(struct rtw_dev *rtwdev);
+int rtw_leave_ips(struct rtw_dev *rtwdev);
+
+void rtw_lps_work(struct work_struct *work);
+void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
+void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
+void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
+void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
+bool rtw_in_lps(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
new file mode 100644
index 000000000000..e2628f05812c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_REG_DEF_H__
+#define __RTW_REG_DEF_H__
+
+#define REG_SYS_FUNC_EN 0x0002
+#define BIT_FEN_CPUEN BIT(2)
+#define BIT_FEN_BB_GLB_RST BIT(1)
+#define BIT_FEN_BB_RSTB BIT(0)
+#define REG_SYS_PW_CTRL 0x0004
+#define REG_SYS_CLK_CTRL 0x0008
+#define BIT_CPU_CLK_EN BIT(14)
+
+#define REG_RSV_CTRL 0x001C
+#define DISABLE_PI 0x3
+#define ENABLE_PI 0x2
+#define BITS_RFC_DIRECT (BIT(31) | BIT(30))
+#define BIT_WLMCU_IOIF BIT(0)
+#define REG_RF_CTRL 0x001F
+#define BIT_RF_SDM_RSTB BIT(2)
+#define BIT_RF_RSTB BIT(1)
+#define BIT_RF_EN BIT(0)
+
+#define REG_AFE_CTRL1 0x0024
+#define BIT_MAC_CLK_SEL (BIT(20) | BIT(21))
+#define REG_EFUSE_CTRL 0x0030
+#define BIT_EF_FLAG BIT(31)
+#define BIT_SHIFT_EF_ADDR 8
+#define BIT_MASK_EF_ADDR 0x3ff
+#define BIT_MASK_EF_DATA 0xff
+#define BITS_EF_ADDR (BIT_MASK_EF_ADDR << BIT_SHIFT_EF_ADDR)
+
+#define REG_LDO_EFUSE_CTRL 0x0034
+#define BIT_MASK_EFUSE_BANK_SEL (BIT(8) | BIT(9))
+
+#define REG_GPIO_MUXCFG 0x0040
+#define BIT_FSPI_EN BIT(19)
+#define BIT_WLRFE_4_5_EN BIT(2)
+
+#define REG_LED_CFG 0x004C
+#define BIT_LNAON_SEL_EN BIT(26)
+#define BIT_PAPE_SEL_EN BIT(25)
+#define REG_PAD_CTRL1 0x0064
+#define BIT_PAPE_WLBT_SEL BIT(29)
+#define BIT_LNAON_WLBT_SEL BIT(28)
+#define REG_WL_BT_PWR_CTRL 0x0068
+#define BIT_BT_FUNC_EN BIT(18)
+#define BIT_BT_DIG_CLK_EN BIT(8)
+#define REG_HCI_OPT_CTRL 0x0074
+
+#define REG_MCUFW_CTRL 0x0080
+#define BIT_ANA_PORT_EN BIT(22)
+#define BIT_MAC_PORT_EN BIT(21)
+#define BIT_BOOT_FSPI_EN BIT(20)
+#define BIT_FW_INIT_RDY BIT(15)
+#define BIT_FW_DW_RDY BIT(14)
+#define BIT_RPWM_TOGGLE BIT(7)
+#define BIT_DMEM_CHKSUM_OK BIT(6)
+#define BIT_DMEM_DW_OK BIT(5)
+#define BIT_IMEM_CHKSUM_OK BIT(4)
+#define BIT_IMEM_DW_OK BIT(3)
+#define BIT_IMEM_BOOT_LOAD_CHECKSUM_OK BIT(2)
+#define BIT_MCUFWDL_EN BIT(0)
+#define BIT_CHECK_SUM_OK (BIT(4) | BIT(6))
+#define FW_READY (BIT_FW_INIT_RDY | BIT_FW_DW_RDY | \
+ BIT_IMEM_DW_OK | BIT_DMEM_DW_OK | \
+ BIT_CHECK_SUM_OK)
+#define FW_READY_MASK 0xffff
+
+#define REG_WLRF1 0x00EC
+#define REG_SYS_CFG1 0x00F0
+#define BIT_RTL_ID BIT(23)
+#define BIT_RF_TYPE_ID BIT(27)
+#define BIT_SHIFT_VENDOR_ID 16
+#define BIT_MASK_VENDOR_ID 0xf
+#define BIT_VENDOR_ID(x) (((x) & BIT_MASK_VENDOR_ID) << BIT_SHIFT_VENDOR_ID)
+#define BITS_VENDOR_ID (BIT_MASK_VENDOR_ID << BIT_SHIFT_VENDOR_ID)
+#define BIT_CLEAR_VENDOR_ID(x) ((x) & (~BITS_VENDOR_ID))
+#define BIT_GET_VENDOR_ID(x) (((x) >> BIT_SHIFT_VENDOR_ID) & BIT_MASK_VENDOR_ID)
+#define BIT_SHIFT_CHIP_VER 12
+#define BIT_MASK_CHIP_VER 0xf
+#define BIT_CHIP_VER(x) (((x) & BIT_MASK_CHIP_VER) << BIT_SHIFT_CHIP_VER)
+#define BITS_CHIP_VER (BIT_MASK_CHIP_VER << BIT_SHIFT_CHIP_VER)
+#define BIT_CLEAR_CHIP_VER(x) ((x) & (~BITS_CHIP_VER))
+#define BIT_GET_CHIP_VER(x) (((x) >> BIT_SHIFT_CHIP_VER) & BIT_MASK_CHIP_VER)
+#define REG_SYS_STATUS1 0x00F4
+#define REG_SYS_STATUS2 0x00F8
+#define REG_SYS_CFG2 0x00FC
+#define REG_WLRF1 0x00EC
+#define BIT_WLRF1_BBRF_EN (BIT(24) | BIT(25) | BIT(26))
+#define REG_CR 0x0100
+#define BIT_32K_CAL_TMR_EN BIT(10)
+#define BIT_MAC_SEC_EN BIT(9)
+#define BIT_ENSWBCN BIT(8)
+#define BIT_MACRXEN BIT(7)
+#define BIT_MACTXEN BIT(6)
+#define BIT_SCHEDULE_EN BIT(5)
+#define BIT_PROTOCOL_EN BIT(4)
+#define BIT_RXDMA_EN BIT(3)
+#define BIT_TXDMA_EN BIT(2)
+#define BIT_HCI_RXDMA_EN BIT(1)
+#define BIT_HCI_TXDMA_EN BIT(0)
+#define MAC_TRX_ENABLE (BIT_HCI_TXDMA_EN | BIT_HCI_RXDMA_EN | BIT_TXDMA_EN | \
+ BIT_RXDMA_EN | BIT_PROTOCOL_EN | BIT_SCHEDULE_EN | \
+ BIT_MACTXEN | BIT_MACRXEN)
+#define BIT_SHIFT_TXDMA_VOQ_MAP 4
+#define BIT_MASK_TXDMA_VOQ_MAP 0x3
+#define BIT_TXDMA_VOQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_VOQ_MAP) << BIT_SHIFT_TXDMA_VOQ_MAP)
+#define BIT_SHIFT_TXDMA_VIQ_MAP 6
+#define BIT_MASK_TXDMA_VIQ_MAP 0x3
+#define BIT_TXDMA_VIQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_VIQ_MAP) << BIT_SHIFT_TXDMA_VIQ_MAP)
+#define REG_TXDMA_PQ_MAP 0x010C
+#define BIT_SHIFT_TXDMA_BEQ_MAP 8
+#define BIT_MASK_TXDMA_BEQ_MAP 0x3
+#define BIT_TXDMA_BEQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_BEQ_MAP) << BIT_SHIFT_TXDMA_BEQ_MAP)
+#define BIT_SHIFT_TXDMA_BKQ_MAP 10
+#define BIT_MASK_TXDMA_BKQ_MAP 0x3
+#define BIT_TXDMA_BKQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_BKQ_MAP) << BIT_SHIFT_TXDMA_BKQ_MAP)
+#define BIT_SHIFT_TXDMA_MGQ_MAP 12
+#define BIT_MASK_TXDMA_MGQ_MAP 0x3
+#define BIT_TXDMA_MGQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_MGQ_MAP) << BIT_SHIFT_TXDMA_MGQ_MAP)
+#define BIT_SHIFT_TXDMA_HIQ_MAP 14
+#define BIT_MASK_TXDMA_HIQ_MAP 0x3
+#define BIT_TXDMA_HIQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_HIQ_MAP) << BIT_SHIFT_TXDMA_HIQ_MAP)
+#define BIT_SHIFT_TXSC_40M 4
+#define BIT_MASK_TXSC_40M 0xf
+#define BIT_TXSC_40M(x) \
+ (((x) & BIT_MASK_TXSC_40M) << BIT_SHIFT_TXSC_40M)
+#define BIT_SHIFT_TXSC_20M 0
+#define BIT_MASK_TXSC_20M 0xf
+#define BIT_TXSC_20M(x) \
+ (((x) & BIT_MASK_TXSC_20M) << BIT_SHIFT_TXSC_20M)
+#define BIT_SHIFT_MAC_CLK_SEL 20
+#define MAC_CLK_HW_DEF_80M 0
+#define MAC_CLK_HW_DEF_40M 1
+#define MAC_CLK_HW_DEF_20M 2
+#define MAC_CLK_SPEED 80
+
+#define REG_CR 0x0100
+#define REG_TRXFF_BNDY 0x0114
+#define REG_RXFF_BNDY 0x011C
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_C2HEVT 0x01A0
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX0 0x01D0
+#define REG_HMEBOX1 0x01D4
+#define REG_HMEBOX2 0x01D8
+#define REG_HMEBOX3 0x01DC
+#define REG_HMEBOX0_EX 0x01F0
+#define REG_HMEBOX1_EX 0x01F4
+#define REG_HMEBOX2_EX 0x01F8
+#define REG_HMEBOX3_EX 0x01FC
+
+#define REG_FIFOPAGE_CTRL_2 0x0204
+#define BIT_BCN_VALID_V1 BIT(15)
+#define BIT_MASK_BCN_HEAD_1_V1 0xfff
+#define REG_AUTO_LLT_V1 0x0208
+#define BIT_AUTO_INIT_LLT_V1 BIT(0)
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define BTI_PAGE_OVF BIT(2)
+#define REG_RQPN_CTRL_1 0x0228
+#define REG_RQPN_CTRL_2 0x022C
+#define BIT_LD_RQPN BIT(31)
+#define REG_FIFOPAGE_INFO_1 0x0230
+#define REG_FIFOPAGE_INFO_2 0x0234
+#define REG_FIFOPAGE_INFO_3 0x0238
+#define REG_FIFOPAGE_INFO_4 0x023C
+#define REG_FIFOPAGE_INFO_5 0x0240
+#define REG_H2C_HEAD 0x0244
+#define REG_H2C_TAIL 0x0248
+#define REG_H2C_READ_ADDR 0x024C
+#define REG_H2C_INFO 0x0254
+
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define BIT_EN_BCNQ_DL BIT(22)
+#define BIT_EN_WR_FREE_TAIL BIT(20)
+#define REG_BCNQ_BDNY_V1 0x0424
+#define REG_LIFETIME_EN 0x0426
+#define BIT_BA_PARSER_EN BIT(5)
+#define REG_SPEC_SIFS 0x0428
+#define REG_DARFRC 0x0430
+#define REG_DARFRCH 0x0434
+#define REG_RARFRCH 0x043C
+#define REG_ARFR0 0x0444
+#define REG_ARFRH0 0x0448
+#define REG_ARFR1_V1 0x044C
+#define REG_ARFRH1_V1 0x0450
+#define REG_CCK_CHECK 0x0454
+#define BIT_CHECK_CCK_EN BIT(7)
+#define REG_AMPDU_MAX_TIME_V1 0x0455
+#define REG_BCNQ1_BDNY_V1 0x0456
+#define REG_TX_HANG_CTRL 0x045E
+#define BIT_EN_EOF_V1 BIT(2)
+#define REG_DATA_SC 0x0483
+#define REG_ARFR4 0x049C
+#define REG_ARFRH4 0x04A0
+#define REG_ARFR5 0x04A4
+#define REG_ARFRH5 0x04A8
+#define REG_SW_AMPDU_BURST_MODE_CTRL 0x04BC
+#define BIT_PRE_TX_CMD BIT(6)
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_PRECNT_CTRL 0x04E5
+#define BIT_EN_PRECNT BIT(11)
+
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_PIFS 0x0512
+#define REG_SIFS 0x0514
+#define BIT_SHIFT_SIFS_OFDM_CTX 8
+#define BIT_SHIFT_SIFS_CCK_TRX 16
+#define BIT_SHIFT_SIFS_OFDM_TRX 24
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define BIT_SIFS_BK_EN BIT(12)
+#define REG_TXPAUSE 0x0522
+#define REG_RD_CTRL 0x0524
+#define BIT_DIS_TXOP_CFE BIT(10)
+#define BIT_DIS_LSIG_CFE BIT(9)
+#define BIT_DIS_STBC_CFE BIT(8)
+#define REG_TBTT_PROHIBIT 0x0540
+#define BIT_SHIFT_TBTT_HOLD_TIME_AP 8
+#define REG_RD_NAV_NXT 0x0544
+#define REG_BCN_CTRL 0x0550
+#define BIT_DIS_TSF_UDT BIT(4)
+#define BIT_EN_BCN_FUNCTION BIT(3)
+#define REG_BCN_CTRL_CLINT0 0x0551
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_USTIME_TSF 0x055C
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_MISC_CTRL 0x0577
+#define BIT_EN_FREE_CNT BIT(3)
+#define BIT_DIS_SECOND_CCA (BIT(0) | BIT(1))
+#define REG_TIMER0_SRC_SEL 0x05B4
+#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
+
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define BIT_APP_FCS BIT(31)
+#define BIT_APP_MIC BIT(30)
+#define BIT_APP_ICV BIT(29)
+#define BIT_APP_PHYSTS BIT(28)
+#define BIT_APP_BASSN BIT(27)
+#define BIT_VHT_DACK BIT(26)
+#define BIT_TCPOFLD_EN BIT(25)
+#define BIT_ENMBID BIT(24)
+#define BIT_LSIGEN BIT(23)
+#define BIT_MFBEN BIT(22)
+#define BIT_DISCHKPPDLLEN BIT(21)
+#define BIT_PKTCTL_DLEN BIT(20)
+#define BIT_TIM_PARSER_EN BIT(18)
+#define BIT_BC_MD_EN BIT(17)
+#define BIT_UC_MD_EN BIT(16)
+#define BIT_RXSK_PERPKT BIT(15)
+#define BIT_HTC_LOC_CTRL BIT(14)
+#define BIT_RPFM_CAM_ENABLE BIT(12)
+#define BIT_TA_BCN BIT(11)
+#define BIT_DISDECMYPKT BIT(10)
+#define BIT_AICV BIT(9)
+#define BIT_ACRC32 BIT(8)
+#define BIT_CBSSID_BCN BIT(7)
+#define BIT_CBSSID_DATA BIT(6)
+#define BIT_APWRMGT BIT(5)
+#define BIT_ADD3 BIT(4)
+#define BIT_AB BIT(3)
+#define BIT_AM BIT(2)
+#define BIT_APM BIT(1)
+#define BIT_AAP BIT(0)
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DRVINFO_SZ 0x060F
+#define BIT_APP_PHYSTS BIT(28)
+#define REG_USTIME_EDCA 0x0638
+#define REG_ACKTO_CCK 0x0639
+#define REG_RESP_SIFS_CCK 0x063C
+#define REG_RESP_SIFS_OFDM 0x063E
+#define REG_ACKTO 0x0640
+#define REG_EIFS 0x0642
+#define REG_NAV_CTRL 0x0650
+#define REG_WMAC_TRXPTCL_CTL 0x0668
+#define BIT_RFMOD (BIT(7) | BIT(8))
+#define BIT_RFMOD_80M BIT(8)
+#define BIT_RFMOD_40M BIT(7)
+#define REG_WMAC_TRXPTCL_CTL_H 0x066C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BBPSF_CTRL 0x06DC
+
+#define REG_WMAC_OPTION_FUNCTION 0x07D0
+#define REG_WMAC_OPTION_FUNCTION_1 0x07D4
+
+#define REG_ANAPAR_XTAL_0 0x1040
+#define REG_CPU_DMEM_CON 0x1080
+#define BIT_WL_PLATFORM_RST BIT(16)
+#define BIT_WL_SECURITY_CLK BIT(15)
+#define BIT_DDMA_EN BIT(8)
+
+#define REG_H2C_PKT_READADDR 0x10D0
+#define REG_H2C_PKT_WRITEADDR 0x10D4
+#define REG_FW_DBG7 0x10FC
+#define FW_KEY_MASK 0xffffff00
+
+#define REG_CR_EXT 0x1100
+
+#define REG_DDMA_CH0SA 0x1200
+#define REG_DDMA_CH0DA 0x1204
+#define REG_DDMA_CH0CTRL 0x1208
+#define BIT_DDMACH0_OWN BIT(31)
+#define BIT_DDMACH0_CHKSUM_EN BIT(29)
+#define BIT_DDMACH0_CHKSUM_STS BIT(27)
+#define BIT_DDMACH0_RESET_CHKSUM_STS BIT(25)
+#define BIT_DDMACH0_CHKSUM_CONT BIT(24)
+#define BIT_MASK_DDMACH0_DLEN 0x3ffff
+
+#define REG_H2CQ_CSR 0x1330
+#define BIT_H2CQ_FULL BIT(31)
+#define REG_FAST_EDCA_VOVI_SETTING 0x1448
+#define REG_FAST_EDCA_BEBK_SETTING 0x144C
+
+#define REG_RXPSF_CTRL 0x1610
+#define BIT_RXGCK_FIFOTHR_EN BIT(28)
+
+#define BIT_SHIFT_RXGCK_VHT_FIFOTHR 26
+#define BIT_MASK_RXGCK_VHT_FIFOTHR 0x3
+#define BIT_RXGCK_VHT_FIFOTHR(x) \
+ (((x) & BIT_MASK_RXGCK_VHT_FIFOTHR) << BIT_SHIFT_RXGCK_VHT_FIFOTHR)
+#define BITS_RXGCK_VHT_FIFOTHR \
+ (BIT_MASK_RXGCK_VHT_FIFOTHR << BIT_SHIFT_RXGCK_VHT_FIFOTHR)
+
+#define BIT_SHIFT_RXGCK_HT_FIFOTHR 24
+#define BIT_MASK_RXGCK_HT_FIFOTHR 0x3
+#define BIT_RXGCK_HT_FIFOTHR(x) \
+ (((x) & BIT_MASK_RXGCK_HT_FIFOTHR) << BIT_SHIFT_RXGCK_HT_FIFOTHR)
+#define BITS_RXGCK_HT_FIFOTHR \
+ (BIT_MASK_RXGCK_HT_FIFOTHR << BIT_SHIFT_RXGCK_HT_FIFOTHR)
+
+#define BIT_SHIFT_RXGCK_OFDM_FIFOTHR 22
+#define BIT_MASK_RXGCK_OFDM_FIFOTHR 0x3
+#define BIT_RXGCK_OFDM_FIFOTHR(x) \
+ (((x) & BIT_MASK_RXGCK_OFDM_FIFOTHR) << BIT_SHIFT_RXGCK_OFDM_FIFOTHR)
+#define BITS_RXGCK_OFDM_FIFOTHR \
+ (BIT_MASK_RXGCK_OFDM_FIFOTHR << BIT_SHIFT_RXGCK_OFDM_FIFOTHR)
+
+#define BIT_SHIFT_RXGCK_CCK_FIFOTHR 20
+#define BIT_MASK_RXGCK_CCK_FIFOTHR 0x3
+#define BIT_RXGCK_CCK_FIFOTHR(x) \
+ (((x) & BIT_MASK_RXGCK_CCK_FIFOTHR) << BIT_SHIFT_RXGCK_CCK_FIFOTHR)
+#define BITS_RXGCK_CCK_FIFOTHR \
+ (BIT_MASK_RXGCK_CCK_FIFOTHR << BIT_SHIFT_RXGCK_CCK_FIFOTHR)
+
+#define BIT_RXGCK_OFDMCCA_EN BIT(16)
+
+#define BIT_SHIFT_RXPSF_PKTLENTHR 13
+#define BIT_MASK_RXPSF_PKTLENTHR 0x7
+#define BIT_RXPSF_PKTLENTHR(x) \
+ (((x) & BIT_MASK_RXPSF_PKTLENTHR) << BIT_SHIFT_RXPSF_PKTLENTHR)
+#define BITS_RXPSF_PKTLENTHR \
+ (BIT_MASK_RXPSF_PKTLENTHR << BIT_SHIFT_RXPSF_PKTLENTHR)
+#define BIT_CLEAR_RXPSF_PKTLENTHR(x) ((x) & (~BITS_RXPSF_PKTLENTHR))
+#define BIT_SET_RXPSF_PKTLENTHR(x, v) \
+ (BIT_CLEAR_RXPSF_PKTLENTHR(x) | BIT_RXPSF_PKTLENTHR(v))
+
+#define BIT_RXPSF_CTRLEN BIT(12)
+#define BIT_RXPSF_VHTCHKEN BIT(11)
+#define BIT_RXPSF_HTCHKEN BIT(10)
+#define BIT_RXPSF_OFDMCHKEN BIT(9)
+#define BIT_RXPSF_CCKCHKEN BIT(8)
+#define BIT_RXPSF_OFDMRST BIT(7)
+#define BIT_RXPSF_CCKRST BIT(6)
+#define BIT_RXPSF_MHCHKEN BIT(5)
+#define BIT_RXPSF_CONT_ERRCHKEN BIT(4)
+#define BIT_RXPSF_ALL_ERRCHKEN BIT(3)
+
+#define BIT_SHIFT_RXPSF_ERRTHR 0
+#define BIT_MASK_RXPSF_ERRTHR 0x7
+#define BIT_RXPSF_ERRTHR(x) \
+ (((x) & BIT_MASK_RXPSF_ERRTHR) << BIT_SHIFT_RXPSF_ERRTHR)
+#define BITS_RXPSF_ERRTHR (BIT_MASK_RXPSF_ERRTHR << BIT_SHIFT_RXPSF_ERRTHR)
+#define BIT_CLEAR_RXPSF_ERRTHR(x) ((x) & (~BITS_RXPSF_ERRTHR))
+#define BIT_GET_RXPSF_ERRTHR(x) \
+ (((x) >> BIT_SHIFT_RXPSF_ERRTHR) & BIT_MASK_RXPSF_ERRTHR)
+#define BIT_SET_RXPSF_ERRTHR(x, v) \
+ (BIT_CLEAR_RXPSF_ERRTHR(x) | BIT_RXPSF_ERRTHR(v))
+
+#define REG_RXPSF_TYPE_CTRL 0x1614
+#define REG_GENERAL_OPTION 0x1664
+#define BIT_DUMMY_FCS_READY_MASK_EN BIT(9)
+
+#define REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1 0x1700
+#define REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1 0x1704
+#define REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1 0x1708
+#define LTECOEX_READY BIT(29)
+#define LTECOEX_ACCESS_CTRL REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1
+#define LTECOEX_WRITE_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1
+#define LTECOEX_READ_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1
+
+#define RF_DTXLOK 0x08
+#define RF_CFGCH 0x18
+#define RF_LUTWA 0x33
+#define RF_LUTWD1 0x3e
+#define RF_LUTWD0 0x3f
+#define RF_XTALX2 0xb8
+#define RF_MALSEL 0xbe
+#define RF_LUTDBG 0xdf
+#define RF_LUTWE2 0xee
+#define RF_LUTWE 0xef
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c
new file mode 100644
index 000000000000..e7750a833a8e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/regd.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "regd.h"
+#include "debug.h"
+#include "phy.h"
+
+#define COUNTRY_CHPLAN_ENT(_alpha2, _chplan, _txpwr_regd) \
+ {.alpha2 = (_alpha2), \
+ .chplan = (_chplan), \
+ .txpwr_regd = (_txpwr_regd) \
+ }
+
+/* If country code is not correctly defined in efuse,
+ * use worldwide country code and txpwr regd.
+ */
+static const struct rtw_regulatory rtw_defined_chplan =
+ COUNTRY_CHPLAN_ENT("00", RTW_CHPLAN_REALTEK_DEFINE, RTW_REGD_WW);
+
+static const struct rtw_regulatory all_chplan_map[] = {
+ COUNTRY_CHPLAN_ENT("AD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AF", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AO", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AR", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("AS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("AT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("AZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BB", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("BD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BO", RTW_CHPLAN_WORLD_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("BR", RTW_CHPLAN_FCC2_FCC1, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("BS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("BW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BZ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CA", RTW_CHPLAN_IC1_IC2, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CL", RTW_CHPLAN_WORLD_CHILE1, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CX", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("DE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("DJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("DK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("DM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("DO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("DZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("EC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("EE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("EG", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("EH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ER", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ES", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ET", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("FI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("FJ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("FK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("FM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("FO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("FR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GD", RTW_CHPLAN_FCC1_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("GE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GP", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GT", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("GU", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("GW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GY", RTW_CHPLAN_FCC1_NCC3, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("HK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("HM", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("HN", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("HR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("HT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("HU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ID", RTW_CHPLAN_ETSI1_ETSI12, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IL", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("JE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("JM", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("JO", RTW_CHPLAN_WORLD_ETSI8, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("JP", RTW_CHPLAN_MKK1_MKK1, RTW_REGD_MKK),
+ COUNTRY_CHPLAN_ENT("KE", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("KR", RTW_CHPLAN_KCC1_KCC2, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KW", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("KZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("LI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MA", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ME", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MF", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("MG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MH", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("MK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ML", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MP", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("MQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MV", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MX", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("MY", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NF", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NG", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("NL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NP", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NZ", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("OM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PA", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("PE", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("PF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PK", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("PT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("PY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("QA", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RU", RTW_CHPLAN_WORLD_ETSI14, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("SE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SR", RTW_CHPLAN_FCC2_FCC17, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("ST", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("SV", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("SX", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("SZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TK", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TT", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("TZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("UA", RTW_CHPLAN_WORLD_ETSI3, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("UG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("US", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("UY", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("UZ", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("VA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("VC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("VE", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("VI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("VN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("VU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("WF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("WS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("YE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("YT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ZA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ZM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ZW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+};
+
+static void rtw_regd_apply_beaconing_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
+{
+ enum nl80211_band band;
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_reg_rule *reg_rule;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+
+ sband = wiphy->bands[band];
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ reg_rule = freq_reg_info(wiphy,
+ MHZ_TO_KHZ(ch->center_freq));
+ if (IS_ERR(reg_rule))
+ continue;
+
+ ch->flags &= ~IEEE80211_CHAN_DISABLED;
+
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
+ }
+ }
+}
+
+static void rtw_regd_apply_hw_cap_flags(struct wiphy *wiphy)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ int i;
+
+ if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_80))
+ return;
+
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
+ if (!sband)
+ goto out_5g;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ ch->flags |= IEEE80211_CHAN_NO_80MHZ;
+ }
+
+out_5g:
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ ch->flags |= IEEE80211_CHAN_NO_80MHZ;
+ }
+}
+
+static void rtw_regd_apply_world_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
+{
+ rtw_regd_apply_beaconing_flags(wiphy, initiator);
+}
+
+static struct rtw_regulatory rtw_regd_find_reg_by_name(char *alpha2)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(all_chplan_map); i++) {
+ if (!memcmp(all_chplan_map[i].alpha2, alpha2, 2))
+ return all_chplan_map[i];
+ }
+
+ return rtw_defined_chplan;
+}
+
+static int rtw_regd_notifier_apply(struct rtw_dev *rtwdev,
+ struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ if (request->initiator == NL80211_REGDOM_SET_BY_USER)
+ return 0;
+ rtwdev->regd = rtw_regd_find_reg_by_name(request->alpha2);
+ rtw_regd_apply_world_flags(wiphy, request->initiator);
+
+ return 0;
+}
+
+static int
+rtw_regd_init_wiphy(struct rtw_regulatory *reg, struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ wiphy->reg_notifier = reg_notifier;
+
+ wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
+
+ rtw_regd_apply_hw_cap_flags(wiphy);
+
+ return 0;
+}
+
+int rtw_regd_init(struct rtw_dev *rtwdev,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ if (!wiphy)
+ return -EINVAL;
+
+ rtwdev->regd = rtw_regd_find_reg_by_name(rtwdev->efuse.country_code);
+ rtw_regd_init_wiphy(&rtwdev->regd, wiphy, reg_notifier);
+
+ return 0;
+}
+
+void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ rtw_regd_notifier_apply(rtwdev, wiphy, request);
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "get alpha2 %c%c from initiator %d, mapping to chplan 0x%x, txregd %d\n",
+ request->alpha2[0], request->alpha2[1], request->initiator,
+ rtwdev->regd.chplan, rtwdev->regd.txpwr_regd);
+
+ rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/regd.h b/drivers/net/wireless/realtek/rtw88/regd.h
new file mode 100644
index 000000000000..7784bb6d3ba7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/regd.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_REGD_H_
+#define __RTW_REGD_H_
+
+#define IEEE80211_CHAN_NO_IBSS IEEE80211_CHAN_NO_IR
+#define IEEE80211_CHAN_PASSIVE_SCAN IEEE80211_CHAN_NO_IR
+enum rtw_chplan_id {
+ RTW_CHPLAN_WORLD_ETSI1 = 0x26,
+ RTW_CHPLAN_MKK1_MKK1 = 0x27,
+ RTW_CHPLAN_IC1_IC2 = 0x2B,
+ RTW_CHPLAN_WORLD_CHILE1 = 0x2D,
+ RTW_CHPLAN_WORLD_FCC3 = 0x30,
+ RTW_CHPLAN_WORLD_FCC5 = 0x32,
+ RTW_CHPLAN_FCC1_FCC7 = 0x34,
+ RTW_CHPLAN_WORLD_ETSI3 = 0x36,
+ RTW_CHPLAN_ETSI1_ETSI12 = 0x3D,
+ RTW_CHPLAN_KCC1_KCC2 = 0x3E,
+ RTW_CHPLAN_ETSI1_ETSI4 = 0x42,
+ RTW_CHPLAN_FCC1_NCC3 = 0x44,
+ RTW_CHPLAN_WORLD_ACMA1 = 0x45,
+ RTW_CHPLAN_WORLD_ETSI6 = 0x47,
+ RTW_CHPLAN_WORLD_ETSI7 = 0x48,
+ RTW_CHPLAN_WORLD_ETSI8 = 0x49,
+ RTW_CHPLAN_WORLD_ETSI10 = 0x51,
+ RTW_CHPLAN_WORLD_ETSI14 = 0x59,
+ RTW_CHPLAN_FCC2_FCC7 = 0x61,
+ RTW_CHPLAN_FCC2_FCC1 = 0x62,
+ RTW_CHPLAN_WORLD_FCC7 = 0x73,
+ RTW_CHPLAN_FCC2_FCC17 = 0x74,
+ RTW_CHPLAN_WORLD_ETSI20 = 0x75,
+ RTW_CHPLAN_FCC2_FCC11 = 0x76,
+ RTW_CHPLAN_REALTEK_DEFINE = 0x7f,
+};
+
+struct country_code_to_enum_rd {
+ u16 countrycode;
+ const char *iso_name;
+};
+
+enum country_code_type {
+ COUNTRY_CODE_FCC = 0,
+ COUNTRY_CODE_IC = 1,
+ COUNTRY_CODE_ETSI = 2,
+ COUNTRY_CODE_SPAIN = 3,
+ COUNTRY_CODE_FRANCE = 4,
+ COUNTRY_CODE_MKK = 5,
+ COUNTRY_CODE_MKK1 = 6,
+ COUNTRY_CODE_ISRAEL = 7,
+ COUNTRY_CODE_TELEC = 8,
+ COUNTRY_CODE_MIC = 9,
+ COUNTRY_CODE_GLOBAL_DOMAIN = 10,
+ COUNTRY_CODE_WORLD_WIDE_13 = 11,
+ COUNTRY_CODE_TELEC_NETGEAR = 12,
+ COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,
+
+ /* new channel plan above this */
+ COUNTRY_CODE_MAX
+};
+
+int rtw_regd_init(struct rtw_dev *rtwdev,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
+void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
new file mode 100644
index 000000000000..1172f6c0605b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -0,0 +1,1594 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "tx.h"
+#include "rx.h"
+#include "phy.h"
+#include "rtw8822b.h"
+#include "rtw8822b_table.h"
+#include "mac.h"
+#include "reg.h"
+#include "debug.h"
+
+static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path);
+
+static void rtw8822be_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8822b_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+}
+
+static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8822b_efuse *map;
+ int i;
+
+ map = (struct rtw8822b_efuse *)log_map;
+
+ efuse->rfe_option = map->rfe_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->pa_type_2g = map->pa_type;
+ efuse->pa_type_5g = map->pa_type;
+ efuse->lna_type_2g = map->lna_type_2g[0];
+ efuse->lna_type_5g = map->lna_type_5g[0];
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8822be_efuse_parsing(efuse, map);
+ break;
+ default:
+ /* unsupported now */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtw8822b_phy_rfe_init(struct rtw_dev *rtwdev)
+{
+ /* chip top mux */
+ rtw_write32_mask(rtwdev, 0x64, BIT(29) | BIT(28), 0x3);
+ rtw_write32_mask(rtwdev, 0x4c, BIT(26) | BIT(25), 0x0);
+ rtw_write32_mask(rtwdev, 0x40, BIT(2), 0x1);
+
+ /* from s0 or s1 */
+ rtw_write32_mask(rtwdev, 0x1990, 0x3f, 0x30);
+ rtw_write32_mask(rtwdev, 0x1990, (BIT(11) | BIT(10)), 0x3);
+
+ /* input or output */
+ rtw_write32_mask(rtwdev, 0x974, 0x3f, 0x3f);
+ rtw_write32_mask(rtwdev, 0x974, (BIT(11) | BIT(10)), 0x3);
+}
+
+static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 crystal_cap;
+ bool is_tx2_path;
+
+ /* power on BB/RF domain */
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN,
+ BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
+ rtw_write8_set(rtwdev, REG_RF_CTRL,
+ BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+ rtw_write32_set(rtwdev, REG_WLRF1, BIT_WLRF1_BBRF_EN);
+
+ /* pre init before header files config */
+ rtw_write32_clr(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
+
+ rtw_phy_load_tables(rtwdev);
+
+ crystal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+ rtw_write32_mask(rtwdev, 0x24, 0x7e000000, crystal_cap);
+ rtw_write32_mask(rtwdev, 0x28, 0x7e, crystal_cap);
+
+ /* post init after header files config */
+ rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
+
+ is_tx2_path = false;
+ rtw8822b_config_trx_mode(rtwdev, hal->antenna_tx, hal->antenna_rx,
+ is_tx2_path);
+ rtw_phy_init(rtwdev);
+
+ rtw8822b_phy_rfe_init(rtwdev);
+
+ /* wifi path controller */
+ rtw_write32_mask(rtwdev, 0x70, 0x4000000, 1);
+ /* BB control */
+ rtw_write32_mask(rtwdev, 0x4c, 0x01800000, 0x2);
+ /* antenna mux switch */
+ rtw_write8(rtwdev, 0x974, 0xff);
+ rtw_write32_mask(rtwdev, 0x1990, 0x300, 0);
+ rtw_write32_mask(rtwdev, 0xcbc, 0x80000, 0x0);
+ /* SW control */
+ rtw_write8(rtwdev, 0xcb4, 0x77);
+ /* switch to WL side controller and gnt_wl gnt_bt debug signal */
+ rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
+ /* gnt_wl = 1, gnt_bt = 0 */
+ rtw_write32(rtwdev, 0x1704, 0x7700);
+ rtw_write32(rtwdev, 0x1700, 0xc00f0038);
+ /* switch for WL 2G */
+ rtw_write8(rtwdev, 0xcbd, 0x2);
+}
+
+#define WLAN_SLOT_TIME 0x09
+#define WLAN_PIFS_TIME 0x19
+#define WLAN_SIFS_CCK_CONT_TX 0xA
+#define WLAN_SIFS_OFDM_CONT_TX 0xE
+#define WLAN_SIFS_CCK_TRX 0x10
+#define WLAN_SIFS_OFDM_TRX 0x10
+#define WLAN_VO_TXOP_LIMIT 0x186 /* unit : 32us */
+#define WLAN_VI_TXOP_LIMIT 0x3BC /* unit : 32us */
+#define WLAN_RDG_NAV 0x05
+#define WLAN_TXOP_NAV 0x1B
+#define WLAN_CCK_RX_TSF 0x30
+#define WLAN_OFDM_RX_TSF 0x30
+#define WLAN_TBTT_PROHIBIT 0x04 /* unit : 32us */
+#define WLAN_TBTT_HOLD_TIME 0x064 /* unit : 32us */
+#define WLAN_DRV_EARLY_INT 0x04
+#define WLAN_BCN_DMA_TIME 0x02
+
+#define WLAN_RX_FILTER0 0x0FFFFFFF
+#define WLAN_RX_FILTER2 0xFFFF
+#define WLAN_RCR_CFG 0xE400220E
+#define WLAN_RXPKT_MAX_SZ 12288
+#define WLAN_RXPKT_MAX_SZ_512 (WLAN_RXPKT_MAX_SZ >> 9)
+
+#define WLAN_AMPDU_MAX_TIME 0x70
+#define WLAN_RTS_LEN_TH 0xFF
+#define WLAN_RTS_TX_TIME_TH 0x08
+#define WLAN_MAX_AGG_PKT_LIMIT 0x20
+#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20
+#define FAST_EDCA_VO_TH 0x06
+#define FAST_EDCA_VI_TH 0x06
+#define FAST_EDCA_BE_TH 0x06
+#define FAST_EDCA_BK_TH 0x06
+#define WLAN_BAR_RETRY_LIMIT 0x01
+#define WLAN_RA_TRY_RATE_AGG_LIMIT 0x08
+
+#define WLAN_TX_FUNC_CFG1 0x30
+#define WLAN_TX_FUNC_CFG2 0x30
+#define WLAN_MAC_OPT_NORM_FUNC1 0x98
+#define WLAN_MAC_OPT_LB_FUNC1 0x80
+#define WLAN_MAC_OPT_FUNC2 0x30810041
+
+#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \
+ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \
+ (WLAN_SIFS_CCK_TRX << BIT_SHIFT_SIFS_CCK_TRX) | \
+ (WLAN_SIFS_OFDM_TRX << BIT_SHIFT_SIFS_OFDM_TRX))
+
+#define WLAN_TBTT_TIME (WLAN_TBTT_PROHIBIT |\
+ (WLAN_TBTT_HOLD_TIME << BIT_SHIFT_TBTT_HOLD_TIME_AP))
+
+#define WLAN_NAV_CFG (WLAN_RDG_NAV | (WLAN_TXOP_NAV << 16))
+#define WLAN_RX_TSF_CFG (WLAN_CCK_RX_TSF | (WLAN_OFDM_RX_TSF) << 8)
+
+static int rtw8822b_mac_init(struct rtw_dev *rtwdev)
+{
+ u32 value32;
+
+ /* protocol configuration */
+ rtw_write8_clr(rtwdev, REG_SW_AMPDU_BURST_MODE_CTRL, BIT_PRE_TX_CMD);
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, WLAN_AMPDU_MAX_TIME);
+ rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_EOF_V1);
+ value32 = WLAN_RTS_LEN_TH | (WLAN_RTS_TX_TIME_TH << 8) |
+ (WLAN_MAX_AGG_PKT_LIMIT << 16) |
+ (WLAN_RTS_MAX_AGG_PKT_LIMIT << 24);
+ rtw_write32(rtwdev, REG_PROT_MODE_CTRL, value32);
+ rtw_write16(rtwdev, REG_BAR_MODE_CTRL + 2,
+ WLAN_BAR_RETRY_LIMIT | WLAN_RA_TRY_RATE_AGG_LIMIT << 8);
+ rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING, FAST_EDCA_VO_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING + 2, FAST_EDCA_VI_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING, FAST_EDCA_BE_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING + 2, FAST_EDCA_BK_TH);
+ /* EDCA configuration */
+ rtw_write8_clr(rtwdev, REG_TIMER0_SRC_SEL, BIT_TSFT_SEL_TIMER0);
+ rtw_write16(rtwdev, REG_TXPAUSE, 0x0000);
+ rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
+ rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_TIME);
+ rtw_write32(rtwdev, REG_SIFS, WLAN_SIFS_CFG);
+ rtw_write16(rtwdev, REG_EDCA_VO_PARAM + 2, WLAN_VO_TXOP_LIMIT);
+ rtw_write16(rtwdev, REG_EDCA_VI_PARAM + 2, WLAN_VI_TXOP_LIMIT);
+ rtw_write32(rtwdev, REG_RD_NAV_NXT, WLAN_NAV_CFG);
+ rtw_write16(rtwdev, REG_RXTSF_OFFSET_CCK, WLAN_RX_TSF_CFG);
+ /* Set beacon cotnrol - enable TSF and other related functions */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+ /* Set send beacon related registers */
+ rtw_write32(rtwdev, REG_TBTT_PROHIBIT, WLAN_TBTT_TIME);
+ rtw_write8(rtwdev, REG_DRVERLYINT, WLAN_DRV_EARLY_INT);
+ rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME);
+ rtw_write8_clr(rtwdev, REG_TX_PTCL_CTRL + 1, BIT_SIFS_BK_EN >> 8);
+ /* WMAC configuration */
+ rtw_write32(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
+ rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
+ rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RXPKT_MAX_SZ_512);
+ rtw_write8(rtwdev, REG_TCR + 2, WLAN_TX_FUNC_CFG2);
+ rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1);
+ rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2);
+ rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, WLAN_MAC_OPT_NORM_FUNC1);
+
+ return 0;
+}
+
+static void rtw8822b_set_channel_rfe_efem(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ bool is_channel_2g = (channel <= 14) ? true : false;
+
+ if (is_channel_2g) {
+ rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x705770);
+ rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
+ rtw_write32s_mask(rtwdev, REG_RFECTL, BIT(4), 0);
+ } else {
+ rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x177517);
+ rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x75);
+ rtw_write32s_mask(rtwdev, REG_RFECTL, BIT(5), 0);
+ }
+
+ rtw_write32s_mask(rtwdev, REG_RFEINV, BIT(11) | BIT(10) | 0x3f, 0x0);
+
+ if (hal->antenna_rx == BB_PATH_AB ||
+ hal->antenna_tx == BB_PATH_AB) {
+ /* 2TX or 2RX */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa501);
+ } else if (hal->antenna_rx == hal->antenna_tx) {
+ /* TXA+RXA or TXB+RXB */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa500);
+ } else {
+ /* TXB+RXA or TXA+RXB */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa005);
+ }
+}
+
+static void rtw8822b_set_channel_rfe_ifem(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ bool is_channel_2g = (channel <= 14) ? true : false;
+
+ if (is_channel_2g) {
+ /* signal source */
+ rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x745774);
+ rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
+ } else {
+ /* signal source */
+ rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x477547);
+ rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x75);
+ }
+
+ rtw_write32s_mask(rtwdev, REG_RFEINV, BIT(11) | BIT(10) | 0x3f, 0x0);
+
+ if (is_channel_2g) {
+ if (hal->antenna_rx == BB_PATH_AB ||
+ hal->antenna_tx == BB_PATH_AB) {
+ /* 2TX or 2RX */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa501);
+ } else if (hal->antenna_rx == hal->antenna_tx) {
+ /* TXA+RXA or TXB+RXB */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa500);
+ } else {
+ /* TXB+RXA or TXA+RXB */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa005);
+ }
+ } else {
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa5a5);
+ }
+}
+
+enum {
+ CCUT_IDX_1R_2G,
+ CCUT_IDX_2R_2G,
+ CCUT_IDX_1R_5G,
+ CCUT_IDX_2R_5G,
+ CCUT_IDX_NR,
+};
+
+struct cca_ccut {
+ u32 reg82c[CCUT_IDX_NR];
+ u32 reg830[CCUT_IDX_NR];
+ u32 reg838[CCUT_IDX_NR];
+};
+
+static const struct cca_ccut cca_ifem_ccut = {
+ {0x75C97010, 0x75C97010, 0x75C97010, 0x75C97010}, /*Reg82C*/
+ {0x79a0eaaa, 0x79A0EAAC, 0x79a0eaaa, 0x79a0eaaa}, /*Reg830*/
+ {0x87765541, 0x87746341, 0x87765541, 0x87746341}, /*Reg838*/
+};
+
+static const struct cca_ccut cca_efem_ccut = {
+ {0x75B86010, 0x75B76010, 0x75B86010, 0x75B76010}, /*Reg82C*/
+ {0x79A0EAA8, 0x79A0EAAC, 0x79A0EAA8, 0x79a0eaaa}, /*Reg830*/
+ {0x87766451, 0x87766431, 0x87766451, 0x87766431}, /*Reg838*/
+};
+
+static const struct cca_ccut cca_ifem_ccut_ext = {
+ {0x75da8010, 0x75da8010, 0x75da8010, 0x75da8010}, /*Reg82C*/
+ {0x79a0eaaa, 0x97A0EAAC, 0x79a0eaaa, 0x79a0eaaa}, /*Reg830*/
+ {0x87765541, 0x86666341, 0x87765561, 0x86666361}, /*Reg838*/
+};
+
+static void rtw8822b_get_cca_val(const struct cca_ccut *cca_ccut, u8 col,
+ u32 *reg82c, u32 *reg830, u32 *reg838)
+{
+ *reg82c = cca_ccut->reg82c[col];
+ *reg830 = cca_ccut->reg830[col];
+ *reg838 = cca_ccut->reg838[col];
+}
+
+struct rtw8822b_rfe_info {
+ const struct cca_ccut *cca_ccut_2g;
+ const struct cca_ccut *cca_ccut_5g;
+ enum rtw_rfe_fem fem;
+ bool ifem_ext;
+ void (*rtw_set_channel_rfe)(struct rtw_dev *rtwdev, u8 channel);
+};
+
+#define I2GE5G_CCUT(set_ch) { \
+ .cca_ccut_2g = &cca_ifem_ccut, \
+ .cca_ccut_5g = &cca_efem_ccut, \
+ .fem = RTW_RFE_IFEM2G_EFEM5G, \
+ .ifem_ext = false, \
+ .rtw_set_channel_rfe = &rtw8822b_set_channel_rfe_ ## set_ch, \
+ }
+#define IFEM_EXT_CCUT(set_ch) { \
+ .cca_ccut_2g = &cca_ifem_ccut_ext, \
+ .cca_ccut_5g = &cca_ifem_ccut_ext, \
+ .fem = RTW_RFE_IFEM, \
+ .ifem_ext = true, \
+ .rtw_set_channel_rfe = &rtw8822b_set_channel_rfe_ ## set_ch, \
+ }
+
+static const struct rtw8822b_rfe_info rtw8822b_rfe_info[] = {
+ [2] = I2GE5G_CCUT(efem),
+ [5] = IFEM_EXT_CCUT(ifem),
+};
+
+static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ const struct rtw8822b_rfe_info *rfe_info)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ const struct cca_ccut *cca_ccut;
+ u8 col;
+ u32 reg82c, reg830, reg838;
+ bool is_efem_cca = false, is_ifem_cca = false, is_rfe_type = false;
+
+ if (channel <= 14) {
+ cca_ccut = rfe_info->cca_ccut_2g;
+
+ if (hal->antenna_rx == BB_PATH_A ||
+ hal->antenna_rx == BB_PATH_B)
+ col = CCUT_IDX_1R_2G;
+ else
+ col = CCUT_IDX_2R_2G;
+ } else {
+ cca_ccut = rfe_info->cca_ccut_5g;
+
+ if (hal->antenna_rx == BB_PATH_A ||
+ hal->antenna_rx == BB_PATH_B)
+ col = CCUT_IDX_1R_5G;
+ else
+ col = CCUT_IDX_2R_5G;
+ }
+
+ rtw8822b_get_cca_val(cca_ccut, col, &reg82c, &reg830, &reg838);
+
+ switch (rfe_info->fem) {
+ case RTW_RFE_IFEM:
+ default:
+ is_ifem_cca = true;
+ if (rfe_info->ifem_ext)
+ is_rfe_type = true;
+ break;
+ case RTW_RFE_EFEM:
+ is_efem_cca = true;
+ break;
+ case RTW_RFE_IFEM2G_EFEM5G:
+ if (channel <= 14)
+ is_ifem_cca = true;
+ else
+ is_efem_cca = true;
+ break;
+ }
+
+ if (is_ifem_cca) {
+ if ((hal->cut_version == RTW_CHIP_VER_CUT_B &&
+ (col == CCUT_IDX_2R_2G || col == CCUT_IDX_2R_5G) &&
+ bw == RTW_CHANNEL_WIDTH_40) ||
+ (!is_rfe_type && col == CCUT_IDX_2R_5G &&
+ bw == RTW_CHANNEL_WIDTH_40) ||
+ (efuse->rfe_option == 5 && col == CCUT_IDX_2R_5G))
+ reg830 = 0x79a0ea28;
+ }
+
+ rtw_write32_mask(rtwdev, REG_CCASEL, MASKDWORD, reg82c);
+ rtw_write32_mask(rtwdev, REG_PDMFTH, MASKDWORD, reg830);
+ rtw_write32_mask(rtwdev, REG_CCA2ND, MASKDWORD, reg838);
+
+ if (is_efem_cca && !(hal->cut_version == RTW_CHIP_VER_CUT_B))
+ rtw_write32_mask(rtwdev, REG_L1WT, MASKDWORD, 0x9194b2b9);
+
+ if (bw == RTW_CHANNEL_WIDTH_20 &&
+ ((channel >= 52 && channel <= 64) ||
+ (channel >= 100 && channel <= 144)))
+ rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf0, 0x4);
+}
+
+static const u8 low_band[15] = {0x7, 0x6, 0x6, 0x5, 0x0, 0x0, 0x7, 0xff, 0x6,
+ 0x5, 0x0, 0x0, 0x7, 0x6, 0x6};
+static const u8 middle_band[23] = {0x6, 0x5, 0x0, 0x0, 0x7, 0x6, 0x6, 0xff, 0x0,
+ 0x0, 0x7, 0x6, 0x6, 0x5, 0x0, 0xff, 0x7, 0x6,
+ 0x6, 0x5, 0x0, 0x0, 0x7};
+static const u8 high_band[15] = {0x5, 0x5, 0x0, 0x7, 0x7, 0x6, 0x5, 0xff, 0x0,
+ 0x7, 0x7, 0x6, 0x5, 0x5, 0x0};
+
+static void rtw8822b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
+#define RF18_BAND_2G (0)
+#define RF18_BAND_5G (BIT(16) | BIT(8))
+#define RF18_CHANNEL_MASK (MASKBYTE0)
+#define RF18_RFSI_MASK (BIT(18) | BIT(17))
+#define RF18_RFSI_GE_CH80 (BIT(17))
+#define RF18_RFSI_GT_CH144 (BIT(18))
+#define RF18_BW_MASK (BIT(11) | BIT(10))
+#define RF18_BW_20M (BIT(11) | BIT(10))
+#define RF18_BW_40M (BIT(11))
+#define RF18_BW_80M (BIT(10))
+#define RFBE_MASK (BIT(17) | BIT(16) | BIT(15))
+
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 rf_reg18, rf_reg_be;
+
+ rf_reg18 = rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK);
+
+ rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
+ RF18_BW_MASK);
+
+ rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (channel & RF18_CHANNEL_MASK);
+ if (channel > 144)
+ rf_reg18 |= RF18_RFSI_GT_CH144;
+ else if (channel >= 80)
+ rf_reg18 |= RF18_RFSI_GE_CH80;
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_5:
+ case RTW_CHANNEL_WIDTH_10:
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ rf_reg18 |= RF18_BW_20M;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rf_reg18 |= RF18_BW_40M;
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ rf_reg18 |= RF18_BW_80M;
+ break;
+ }
+
+ if (channel <= 14)
+ rf_reg_be = 0x0;
+ else if (channel >= 36 && channel <= 64)
+ rf_reg_be = low_band[(channel - 36) >> 1];
+ else if (channel >= 100 && channel <= 144)
+ rf_reg_be = middle_band[(channel - 100) >> 1];
+ else if (channel >= 149 && channel <= 177)
+ rf_reg_be = high_band[(channel - 149) >> 1];
+ else
+ goto err;
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_MALSEL, RFBE_MASK, rf_reg_be);
+
+ /* need to set 0xdf[18]=1 before writing RF18 when channel 144 */
+ if (channel == 144)
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(18), 0x1);
+ else
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(18), 0x0);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK, rf_reg18);
+ if (hal->rf_type > RF_1T1R)
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x18, RFREG_MASK, rf_reg18);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 0);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 1);
+
+ return;
+
+err:
+ WARN_ON(1);
+}
+
+static void rtw8822b_toggle_igi(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 igi;
+
+ igi = rtw_read32_mask(rtwdev, REG_RXIGI_A, 0x7f);
+ rtw_write32_mask(rtwdev, REG_RXIGI_A, 0x7f, igi - 2);
+ rtw_write32_mask(rtwdev, REG_RXIGI_A, 0x7f, igi);
+ rtw_write32_mask(rtwdev, REG_RXIGI_B, 0x7f, igi - 2);
+ rtw_write32_mask(rtwdev, REG_RXIGI_B, 0x7f, igi);
+
+ rtw_write32_mask(rtwdev, REG_RXPSEL, MASKBYTE0, 0x0);
+ rtw_write32_mask(rtwdev, REG_RXPSEL, MASKBYTE0,
+ hal->antenna_rx | (hal->antenna_rx << 4));
+}
+
+static void rtw8822b_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw)
+{
+ if (bw == RTW_CHANNEL_WIDTH_40) {
+ /* RX DFIR for BW40 */
+ rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x1);
+ rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x0);
+ rtw_write32s_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0);
+ } else if (bw == RTW_CHANNEL_WIDTH_80) {
+ /* RX DFIR for BW80 */
+ rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2);
+ rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x1);
+ rtw_write32s_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0);
+ } else {
+ /* RX DFIR for BW20, BW10 and BW5*/
+ rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2);
+ rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x2);
+ rtw_write32s_mask(rtwdev, REG_TXDFIR, BIT(31), 0x1);
+ }
+}
+
+static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 rfe_option = efuse->rfe_option;
+ u32 val32;
+
+ if (channel <= 14) {
+ rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x1);
+ rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x0);
+ rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x0);
+ rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 15);
+
+ rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x0);
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x96a);
+ if (channel == 14) {
+ rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x00006577);
+ rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x0000);
+ } else {
+ rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x384f6577);
+ rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x1525);
+ }
+
+ rtw_write32_mask(rtwdev, REG_RFEINV, 0x300, 0x2);
+ } else if (channel > 35) {
+ rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
+ rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x1);
+ rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x0);
+ rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 34);
+
+ if (channel >= 36 && channel <= 64)
+ rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x1);
+ else if (channel >= 100 && channel <= 144)
+ rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x2);
+ else if (channel >= 149)
+ rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x3);
+
+ if (channel >= 36 && channel <= 48)
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x494);
+ else if (channel >= 52 && channel <= 64)
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x453);
+ else if (channel >= 100 && channel <= 116)
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x452);
+ else if (channel >= 118 && channel <= 177)
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x412);
+
+ rtw_write32_mask(rtwdev, 0xcbc, 0x300, 0x1);
+ }
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xFFCFFC00;
+ val32 |= (RTW_CHANNEL_WIDTH_20);
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ if (primary_ch_idx == 1)
+ rtw_write32_set(rtwdev, REG_RXSB, BIT(4));
+ else
+ rtw_write32_clr(rtwdev, REG_RXSB, BIT(4));
+
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xFF3FF300;
+ val32 |= (((primary_ch_idx & 0xf) << 2) | RTW_CHANNEL_WIDTH_40);
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xFCEFCF00;
+ val32 |= (((primary_ch_idx & 0xf) << 2) | RTW_CHANNEL_WIDTH_80);
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
+
+ if (rfe_option == 2) {
+ rtw_write32_mask(rtwdev, REG_L1PKWT, 0x0000f000, 0x6);
+ rtw_write32_mask(rtwdev, REG_ADC40, BIT(10), 0x1);
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_5:
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xEFEEFE00;
+ val32 |= ((BIT(6) | RTW_CHANNEL_WIDTH_20));
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0);
+ rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1);
+ break;
+ case RTW_CHANNEL_WIDTH_10:
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xEFFEFF00;
+ val32 |= ((BIT(7) | RTW_CHANNEL_WIDTH_20));
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0);
+ rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1);
+ break;
+ }
+}
+
+static void rtw8822b_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_chan_idx)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ const struct rtw8822b_rfe_info *rfe_info;
+
+ if (WARN(efuse->rfe_option >= ARRAY_SIZE(rtw8822b_rfe_info),
+ "rfe_option %d is out of boundary\n", efuse->rfe_option))
+ return;
+
+ rfe_info = &rtw8822b_rfe_info[efuse->rfe_option];
+
+ rtw8822b_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
+ rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
+ rtw8822b_set_channel_rf(rtwdev, channel, bw);
+ rtw8822b_set_channel_rxdfir(rtwdev, bw);
+ rtw8822b_toggle_igi(rtwdev);
+ rtw8822b_set_channel_cca(rtwdev, channel, bw, rfe_info);
+ (*rfe_info->rtw_set_channel_rfe)(rtwdev, channel);
+}
+
+static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ const struct rtw8822b_rfe_info *rfe_info;
+ u8 ch = rtwdev->hal.current_channel;
+ u8 tx_path_sel, rx_path_sel;
+ int counter;
+
+ if (WARN(efuse->rfe_option >= ARRAY_SIZE(rtw8822b_rfe_info),
+ "rfe_option %d is out of boundary\n", efuse->rfe_option))
+ return;
+
+ rfe_info = &rtw8822b_rfe_info[efuse->rfe_option];
+
+ if ((tx_path | rx_path) & BB_PATH_A)
+ rtw_write32_mask(rtwdev, REG_AGCTR_A, MASKLWORD, 0x3231);
+ else
+ rtw_write32_mask(rtwdev, REG_AGCTR_A, MASKLWORD, 0x1111);
+
+ if ((tx_path | rx_path) & BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_AGCTR_B, MASKLWORD, 0x3231);
+ else
+ rtw_write32_mask(rtwdev, REG_AGCTR_B, MASKLWORD, 0x1111);
+
+ rtw_write32_mask(rtwdev, REG_CDDTXP, (BIT(19) | BIT(18)), 0x3);
+ rtw_write32_mask(rtwdev, REG_TXPSEL, (BIT(29) | BIT(28)), 0x1);
+ rtw_write32_mask(rtwdev, REG_TXPSEL, BIT(30), 0x1);
+
+ if (tx_path & BB_PATH_A) {
+ rtw_write32_mask(rtwdev, REG_CDDTXP, 0xfff00000, 0x001);
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0xf0000000, 0x8);
+ } else if (tx_path & BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_CDDTXP, 0xfff00000, 0x002);
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0xf0000000, 0x4);
+ }
+
+ if (tx_path == BB_PATH_A || tx_path == BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_TXPSEL1, 0xfff0, 0x01);
+ else
+ rtw_write32_mask(rtwdev, REG_TXPSEL1, 0xfff0, 0x43);
+
+ tx_path_sel = (tx_path << 4) | tx_path;
+ rtw_write32_mask(rtwdev, REG_TXPSEL, MASKBYTE0, tx_path_sel);
+
+ if (tx_path != BB_PATH_A && tx_path != BB_PATH_B) {
+ if (is_tx2_path || rtwdev->mp_mode) {
+ rtw_write32_mask(rtwdev, REG_CDDTXP, 0xfff00000, 0x043);
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0xf0000000, 0xc);
+ }
+ }
+
+ rtw_write32_mask(rtwdev, REG_RXDESC, BIT(22), 0x0);
+ rtw_write32_mask(rtwdev, REG_RXDESC, BIT(18), 0x0);
+
+ if (rx_path & BB_PATH_A)
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0x0f000000, 0x0);
+ else if (rx_path & BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0x0f000000, 0x5);
+
+ rx_path_sel = (rx_path << 4) | rx_path;
+ rtw_write32_mask(rtwdev, REG_RXPSEL, MASKBYTE0, rx_path_sel);
+
+ if (rx_path == BB_PATH_A || rx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_ANTWT, BIT(16), 0x0);
+ rtw_write32_mask(rtwdev, REG_HTSTFWT, BIT(28), 0x0);
+ rtw_write32_mask(rtwdev, REG_MRC, BIT(23), 0x0);
+ } else {
+ rtw_write32_mask(rtwdev, REG_ANTWT, BIT(16), 0x1);
+ rtw_write32_mask(rtwdev, REG_HTSTFWT, BIT(28), 0x1);
+ rtw_write32_mask(rtwdev, REG_MRC, BIT(23), 0x1);
+ }
+
+ for (counter = 100; counter > 0; counter--) {
+ u32 rf_reg33;
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00001);
+
+ udelay(2);
+ rf_reg33 = rtw_read_rf(rtwdev, RF_PATH_A, 0x33, RFREG_MASK);
+
+ if (rf_reg33 == 0x00001)
+ break;
+ }
+
+ if (WARN(counter <= 0, "write RF mode table fail\n"))
+ return;
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00001);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD1, RFREG_MASK, 0x00034);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x4080c);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x00000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x00000);
+
+ rtw8822b_toggle_igi(rtwdev);
+ rtw8822b_set_channel_cca(rtwdev, 1, RTW_CHANNEL_WIDTH_20, rfe_info);
+ (*rfe_info->rtw_set_channel_rfe)(rtwdev, ch);
+}
+
+static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ s8 min_rx_power = -120;
+ u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
+
+ pkt_stat->rx_power[RF_PATH_A] = pwdb - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
+ pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+ min_rx_power);
+}
+
+static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 rxsc, bw;
+ s8 min_rx_power = -120;
+
+ if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
+ rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
+ else
+ rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
+
+ if (rxsc >= 1 && rxsc <= 8)
+ bw = RTW_CHANNEL_WIDTH_20;
+ else if (rxsc >= 9 && rxsc <= 12)
+ bw = RTW_CHANNEL_WIDTH_40;
+ else if (rxsc >= 13)
+ bw = RTW_CHANNEL_WIDTH_80;
+ else
+ bw = GET_PHY_STAT_P1_RF_MODE(phy_status);
+
+ pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
+ pkt_stat->rx_power[RF_PATH_B] = GET_PHY_STAT_P1_PWDB_B(phy_status) - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 2);
+ pkt_stat->bw = bw;
+ pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
+ pkt_stat->rx_power[RF_PATH_B],
+ min_rx_power);
+}
+
+static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 page;
+
+ page = *phy_status & 0xf;
+
+ switch (page) {
+ case 0:
+ query_phy_status_page0(rtwdev, phy_status, pkt_stat);
+ break;
+ case 1:
+ query_phy_status_page1(rtwdev, phy_status, pkt_stat);
+ break;
+ default:
+ rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
+ return;
+ }
+}
+
+static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u8 *phy_status = NULL;
+
+ memset(pkt_stat, 0, sizeof(*pkt_stat));
+
+ pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
+ pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
+ pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
+ pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
+ pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+ pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
+ pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
+ pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
+ pkt_stat->ppdu_cnt = GET_RX_DESC_PPDU_CNT(rx_desc);
+ pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
+
+ /* drv_info_sz is in unit of 8-bytes */
+ pkt_stat->drv_info_sz *= 8;
+
+ /* c2h cmd pkt's rx/phy status is not interested */
+ if (pkt_stat->is_c2h)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
+ pkt_stat->drv_info_sz);
+ if (pkt_stat->phy_status) {
+ phy_status = rx_desc + desc_sz + pkt_stat->shift;
+ query_phy_status(rtwdev, phy_status, pkt_stat);
+ }
+
+ rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
+}
+
+static void
+rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
+ static u32 phy_pwr_idx;
+ u8 rate, rate_idx, pwr_index, shift;
+ int j;
+
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+ pwr_index = hal->tx_pwr_tbl[path][rate];
+ shift = rate & 0x3;
+ phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
+ if (shift == 0x3) {
+ rate_idx = rate & 0xfc;
+ rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
+ phy_pwr_idx);
+ phy_pwr_idx = 0;
+ }
+ }
+}
+
+static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int rs, path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs);
+ }
+}
+
+static bool rtw8822b_check_rf_path(u8 antenna)
+{
+ switch (antenna) {
+ case BB_PATH_A:
+ case BB_PATH_B:
+ case BB_PATH_AB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void rtw8822b_set_antenna(struct rtw_dev *rtwdev, u8 antenna_tx,
+ u8 antenna_rx)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "config RF path, tx=0x%x rx=0x%x\n",
+ antenna_tx, antenna_rx);
+
+ if (!rtw8822b_check_rf_path(antenna_tx)) {
+ rtw_info(rtwdev, "unsupport tx path, set to default path ab\n");
+ antenna_tx = BB_PATH_AB;
+ }
+ if (!rtw8822b_check_rf_path(antenna_rx)) {
+ rtw_info(rtwdev, "unsupport rx path, set to default path ab\n");
+ antenna_rx = BB_PATH_AB;
+ }
+ hal->antenna_tx = antenna_tx;
+ hal->antenna_rx = antenna_rx;
+ rtw8822b_config_trx_mode(rtwdev, antenna_tx, antenna_rx, false);
+}
+
+static void rtw8822b_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 ldo_pwr;
+
+ ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
+ ldo_pwr = enable ? ldo_pwr | BIT(7) : ldo_pwr & ~BIT(7);
+ rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
+}
+
+static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_enable;
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+
+ cck_enable = rtw_read32(rtwdev, 0x808) & BIT(28);
+ cck_fa_cnt = rtw_read16(rtwdev, 0xa5c);
+ ofdm_fa_cnt = rtw_read16(rtwdev, 0xf48);
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
+
+ rtw_write32_set(rtwdev, 0x9a4, BIT(17));
+ rtw_write32_clr(rtwdev, 0x9a4, BIT(17));
+ rtw_write32_clr(rtwdev, 0xa2c, BIT(15));
+ rtw_write32_set(rtwdev, 0xa2c, BIT(15));
+ rtw_write32_set(rtwdev, 0xb58, BIT(0));
+ rtw_write32_clr(rtwdev, 0xb58, BIT(0));
+}
+
+static void rtw8822b_do_iqk(struct rtw_dev *rtwdev)
+{
+ static int do_iqk_cnt;
+ struct rtw_iqk_para para = {.clear = 0, .segment_iqk = 0};
+ u32 rf_reg, iqk_fail_mask;
+ int counter;
+ bool reload;
+
+ rtw_fw_do_iqk(rtwdev, &para);
+
+ for (counter = 0; counter < 300; counter++) {
+ rf_reg = rtw_read_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK);
+ if (rf_reg == 0xabcde)
+ break;
+ msleep(20);
+ }
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK, 0x0);
+
+ reload = !!rtw_read32_mask(rtwdev, REG_IQKFAILMSK, BIT(16));
+ iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(0, 7));
+ rtw_dbg(rtwdev, RTW_DBG_PHY,
+ "iqk counter=%d reload=%d do_iqk_cnt=%d n_iqk_fail(mask)=0x%02x\n",
+ counter, reload, ++do_iqk_cnt, iqk_fail_mask);
+}
+
+static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4) | BIT(7), 0},
+ {0x0300,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
+ {0x0012,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0012,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0001,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0xFF1A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x10C3,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x10A8,
+ RTW_PWR_CUT_C_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x10A9,
+ RTW_PWR_CUT_C_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xef},
+ {0x10AA,
+ RTW_PWR_CUT_C_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x0c},
+ {0x0068,
+ RTW_PWR_CUT_C_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0029,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xF9},
+ {0x0024,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0074,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x00AF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
+ {0x0003,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0093,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x001F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x00EF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0xFF1A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x30},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x10C3,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x20},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x004F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0046,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0046,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0081,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7) | BIT(6), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0090,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0044,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0040,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x90},
+ {0x0041,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0042,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x04},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd *card_enable_flow_8822b[] = {
+ trans_carddis_to_cardemu_8822b,
+ trans_cardemu_to_act_8822b,
+ NULL
+};
+
+static struct rtw_pwr_seq_cmd *card_disable_flow_8822b[] = {
+ trans_act_to_cardemu_8822b,
+ trans_cardemu_to_carddis_8822b,
+ NULL
+};
+
+static struct rtw_intf_phy_para usb2_param_8822b[] = {
+ {0xFFFF, 0x00,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para usb3_param_8822b[] = {
+ {0x0001, 0xA841,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_D,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
+ {0x0001, 0xA841,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0002, 0x60C6,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0008, 0x3596,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0009, 0x321C,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x000A, 0x9623,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0020, 0x94FF,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0021, 0xFFCF,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0026, 0xC006,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0029, 0xFF0E,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x002A, 0x1840,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
+ {0x0001, 0xA841,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0002, 0x60C6,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0008, 0x3597,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0009, 0x321C,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x000A, 0x9623,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0020, 0x94FF,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0021, 0xFFCF,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0026, 0xC006,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0029, 0xFF0E,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x002A, 0x3040,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para_table phy_para_table_8822b = {
+ .usb2_para = usb2_param_8822b,
+ .usb3_para = usb3_param_8822b,
+ .gen1_para = pcie_gen1_param_8822b,
+ .gen2_para = pcie_gen2_param_8822b,
+ .n_usb2_para = ARRAY_SIZE(usb2_param_8822b),
+ .n_usb3_para = ARRAY_SIZE(usb2_param_8822b),
+ .n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8822b),
+ .n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8822b),
+};
+
+static const struct rtw_rfe_def rtw8822b_rfe_defs[] = {
+ [2] = RTW_DEF_RFE(8822b, 2, 2),
+ [5] = RTW_DEF_RFE(8822b, 5, 5),
+};
+
+static struct rtw_hw_reg rtw8822b_dig[] = {
+ [0] = { .addr = 0xc50, .mask = 0x7f },
+ [1] = { .addr = 0xe50, .mask = 0x7f },
+};
+
+static struct rtw_page_table page_table_8822b[] = {
+ {64, 64, 64, 64, 1},
+ {64, 64, 64, 64, 1},
+ {64, 64, 0, 0, 1},
+ {64, 64, 64, 0, 1},
+ {64, 64, 64, 64, 1},
+};
+
+static struct rtw_rqpn rqpn_table_8822b[] = {
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+};
+
+static struct rtw_chip_ops rtw8822b_ops = {
+ .phy_set_param = rtw8822b_phy_set_param,
+ .read_efuse = rtw8822b_read_efuse,
+ .query_rx_desc = rtw8822b_query_rx_desc,
+ .set_channel = rtw8822b_set_channel,
+ .mac_init = rtw8822b_mac_init,
+ .read_rf = rtw_phy_read_rf,
+ .write_rf = rtw_phy_write_rf_reg_sipi,
+ .set_tx_power_index = rtw8822b_set_tx_power_index,
+ .set_antenna = rtw8822b_set_antenna,
+ .cfg_ldo25 = rtw8822b_cfg_ldo25,
+ .false_alarm_statistics = rtw8822b_false_alarm_statistics,
+ .do_iqk = rtw8822b_do_iqk,
+};
+
+struct rtw_chip_info rtw8822b_hw_spec = {
+ .ops = &rtw8822b_ops,
+ .id = RTW_CHIP_TYPE_8822B,
+ .fw_name = "rtw88/rtw8822b_fw.bin",
+ .tx_pkt_desc_sz = 48,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 1024,
+ .log_efuse_size = 768,
+ .ptct_efuse_size = 96,
+ .txff_size = 262144,
+ .rxff_size = 24576,
+ .txgi_factor = 1,
+ .is_pwr_by_rate_dec = true,
+ .max_power_index = 0x3f,
+ .csi_buf_pg_num = 0,
+ .band = RTW_BAND_2G | RTW_BAND_5G,
+ .page_size = 128,
+ .dig_min = 0x1c,
+ .ht_supported = true,
+ .vht_supported = true,
+ .sys_func_en = 0xDC,
+ .pwr_on_seq = card_enable_flow_8822b,
+ .pwr_off_seq = card_disable_flow_8822b,
+ .page_table = page_table_8822b,
+ .rqpn_table = rqpn_table_8822b,
+ .intf_table = &phy_para_table_8822b,
+ .dig = rtw8822b_dig,
+ .rf_base_addr = {0x2800, 0x2c00},
+ .rf_sipi_addr = {0xc90, 0xe90},
+ .mac_tbl = &rtw8822b_mac_tbl,
+ .agc_tbl = &rtw8822b_agc_tbl,
+ .bb_tbl = &rtw8822b_bb_tbl,
+ .rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl},
+ .rfe_defs = rtw8822b_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs),
+};
+EXPORT_SYMBOL(rtw8822b_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8822b_fw.bin");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
new file mode 100644
index 000000000000..0cb93d7d4cfd
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8822B_H__
+#define __RTW8822B_H__
+
+#include <asm/byteorder.h>
+
+#define RCR_VHT_ACK BIT(26)
+
+struct rtw8822bu_efuse {
+ u8 res4[4]; /* 0xd0 */
+ u8 usb_optional_function;
+ u8 res5[0x1e];
+ u8 res6[2];
+ u8 serial[0x0b]; /* 0xf5 */
+ u8 vid; /* 0x100 */
+ u8 res7;
+ u8 pid;
+ u8 res8[4];
+ u8 mac_addr[ETH_ALEN]; /* 0x107 */
+ u8 res9[2];
+ u8 vendor_name[0x07];
+ u8 res10[2];
+ u8 device_name[0x14];
+ u8 res11[0xcf];
+ u8 package_type; /* 0x1fb */
+ u8 res12[0x4];
+};
+
+struct rtw8822be_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0xd0 */
+ u8 vender_id[2];
+ u8 device_id[2];
+ u8 sub_vender_id[2];
+ u8 sub_device_id[2];
+ u8 pmc[2];
+ u8 exp_device_cap[2];
+ u8 msi_cap;
+ u8 ltr_cap; /* 0xe3 */
+ u8 exp_link_control[2];
+ u8 link_cap[4];
+ u8 link_control[2];
+ u8 serial_number[8];
+ u8 res0:2; /* 0xf4 */
+ u8 ltr_en:1;
+ u8 res1:2;
+ u8 obff:2;
+ u8 res2:3;
+ u8 obff_cap:2;
+ u8 res3:4;
+ u8 res4[3];
+ u8 class_code[3];
+ u8 pci_pm_L1_2_supp:1;
+ u8 pci_pm_L1_1_supp:1;
+ u8 aspm_pm_L1_2_supp:1;
+ u8 aspm_pm_L1_1_supp:1;
+ u8 L1_pm_substates_supp:1;
+ u8 res5:3;
+ u8 port_common_mode_restore_time;
+ u8 port_t_power_on_scale:2;
+ u8 res6:1;
+ u8 port_t_power_on_value:5;
+ u8 res7;
+};
+
+struct rtw8822b_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0e];
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g[2]; /* 0xbd */
+ u8 lna_type_5g[2];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 tx_bb_swing_setting_2g;
+ u8 tx_bb_swing_setting_5g;
+ u8 tx_pwr_calibrate_rate;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 country_code[2];
+ u8 res[3];
+ union {
+ struct rtw8822bu_efuse u;
+ struct rtw8822be_efuse e;
+ };
+};
+
+static inline void
+_rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
+{
+ /* 0xC00-0xCFF and 0xE00-0xEFF have the same layout */
+ rtw_write32_mask(rtwdev, addr, mask, data);
+ rtw_write32_mask(rtwdev, addr + 0x200, mask, data);
+}
+
+#define rtw_write32s_mask(rtwdev, addr, mask, data) \
+ do { \
+ BUILD_BUG_ON((addr) < 0xC00 || (addr) >= 0xD00); \
+ \
+ _rtw_write32s_mask(rtwdev, addr, mask, data); \
+ } while (0)
+
+/* phy status page0 */
+#define GET_PHY_STAT_P0_PWDB(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+
+/* phy status page1 */
+#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_PWDB_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16))
+#define GET_PHY_STAT_P1_RF_MODE(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x03), GENMASK(29, 28))
+#define GET_PHY_STAT_P1_L_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
+#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+
+#define REG_HTSTFWT 0x800
+#define REG_RXPSEL 0x808
+#define BIT_RX_PSEL_RST (BIT(28) | BIT(29))
+#define REG_TXPSEL 0x80c
+#define REG_RXCCAMSK 0x814
+#define REG_CCASEL 0x82c
+#define REG_PDMFTH 0x830
+#define REG_CCA2ND 0x838
+#define REG_L1WT 0x83c
+#define REG_L1PKWT 0x840
+#define REG_MRC 0x850
+#define REG_CLKTRK 0x860
+#define REG_ADCCLK 0x8ac
+#define REG_ADC160 0x8c4
+#define REG_ADC40 0x8c8
+#define REG_CDDTXP 0x93c
+#define REG_TXPSEL1 0x940
+#define REG_ACBB0 0x948
+#define REG_ACBBRXFIR 0x94c
+#define REG_ACGG2TBL 0x958
+#define REG_RXSB 0xa00
+#define REG_ADCINI 0xa04
+#define REG_TXSF2 0xa24
+#define REG_TXSF6 0xa28
+#define REG_RXDESC 0xa2c
+#define REG_ENTXCCK 0xa80
+#define REG_AGCTR_A 0xc08
+#define REG_TXDFIR 0xc20
+#define REG_RXIGI_A 0xc50
+#define REG_TRSW 0xca0
+#define REG_RFESEL0 0xcb0
+#define REG_RFESEL8 0xcb4
+#define REG_RFECTL 0xcb8
+#define REG_RFEINV 0xcbc
+#define REG_AGCTR_B 0xe08
+#define REG_RXIGI_B 0xe50
+#define REG_ANTWT 0x1904
+#define REG_IQKFAILMSK 0x1bf0
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
new file mode 100644
index 000000000000..2d2dfb495ce1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
@@ -0,0 +1,20783 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8822b_table.h"
+
+static const u32 rtw8822b_mac[] = {
+ 0x029, 0x000000F9,
+ 0x420, 0x00000080,
+ 0x421, 0x0000001F,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x000000F0,
+ 0x446, 0x00000001,
+ 0x447, 0x000000FE,
+ 0x448, 0x00000000,
+ 0x449, 0x00000000,
+ 0x44A, 0x00000000,
+ 0x44B, 0x00000040,
+ 0x44C, 0x00000010,
+ 0x44D, 0x000000F0,
+ 0x44E, 0x0000003F,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x00000000,
+ 0x452, 0x00000000,
+ 0x453, 0x00000040,
+ 0x455, 0x00000070,
+ 0x45E, 0x00000004,
+ 0x49C, 0x00000010,
+ 0x49D, 0x000000F0,
+ 0x49E, 0x00000000,
+ 0x49F, 0x00000006,
+ 0x4A0, 0x000000E0,
+ 0x4A1, 0x00000003,
+ 0x4A2, 0x00000000,
+ 0x4A3, 0x00000040,
+ 0x4A4, 0x00000015,
+ 0x4A5, 0x000000F0,
+ 0x4A6, 0x00000000,
+ 0x4A7, 0x00000006,
+ 0x4A8, 0x000000E0,
+ 0x4A9, 0x00000000,
+ 0x4AA, 0x00000000,
+ 0x4AB, 0x00000000,
+ 0x7DA, 0x00000008,
+ 0x1448, 0x00000006,
+ 0x144A, 0x00000006,
+ 0x144C, 0x00000006,
+ 0x144E, 0x00000006,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CA, 0x00000020,
+ 0x4CB, 0x00000020,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x4CF, 0x00000008,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x521, 0x0000002F,
+ 0x525, 0x0000004F,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000050,
+ 0x55D, 0x000000FF,
+ 0x577, 0x0000000B,
+ 0x5BE, 0x00000064,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x00000022,
+ 0x60C, 0x00000018,
+ 0x6A0, 0x000000FF,
+ 0x6A1, 0x000000FF,
+ 0x6A2, 0x000000FF,
+ 0x6A3, 0x000000FF,
+ 0x6A4, 0x000000FF,
+ 0x6A5, 0x000000FF,
+ 0x6DE, 0x00000084,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000050,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x718, 0x00000040,
+ 0x7D4, 0x00000098,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822b_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8822b_agc[] = {
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xDC000003,
+ 0x81C, 0xDB020003,
+ 0x81C, 0xDA040003,
+ 0x81C, 0xD9060003,
+ 0x81C, 0xD8080003,
+ 0x81C, 0xD70A0003,
+ 0x81C, 0xD60C0003,
+ 0x81C, 0xD50E0003,
+ 0x81C, 0xD4100003,
+ 0x81C, 0xD3120003,
+ 0x81C, 0xD2140003,
+ 0x81C, 0xD1160003,
+ 0x81C, 0xD0180003,
+ 0x81C, 0xB41A0003,
+ 0x81C, 0xB31C0003,
+ 0x81C, 0xB21E0003,
+ 0x81C, 0xB1200003,
+ 0x81C, 0xB0220003,
+ 0x81C, 0xAF240003,
+ 0x81C, 0xAE260003,
+ 0x81C, 0xAD280003,
+ 0x81C, 0xAC2A0003,
+ 0x81C, 0xAB2C0003,
+ 0x81C, 0x8C2E0003,
+ 0x81C, 0x8B300003,
+ 0x81C, 0x8A320003,
+ 0x81C, 0x89340003,
+ 0x81C, 0x88360003,
+ 0x81C, 0x87380003,
+ 0x81C, 0x863A0003,
+ 0x81C, 0x853C0003,
+ 0x81C, 0x693E0003,
+ 0x81C, 0x68400003,
+ 0x81C, 0x67420003,
+ 0x81C, 0x66440003,
+ 0x81C, 0x65460003,
+ 0x81C, 0x48480003,
+ 0x81C, 0x474A0003,
+ 0x81C, 0x464C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x27540003,
+ 0x81C, 0x26560003,
+ 0x81C, 0x25580003,
+ 0x81C, 0x245A0003,
+ 0x81C, 0x235C0003,
+ 0x81C, 0x045E0003,
+ 0x81C, 0x03600003,
+ 0x81C, 0x02620003,
+ 0x81C, 0x01640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFC000003,
+ 0x81C, 0xFB020003,
+ 0x81C, 0xFA040003,
+ 0x81C, 0xF9060003,
+ 0x81C, 0xF8080003,
+ 0x81C, 0xF70A0003,
+ 0x81C, 0xF60C0003,
+ 0x81C, 0xF50E0003,
+ 0x81C, 0xF4100003,
+ 0x81C, 0xF3120003,
+ 0x81C, 0xF2140003,
+ 0x81C, 0xF1160003,
+ 0x81C, 0xF0180003,
+ 0x81C, 0xEF1A0003,
+ 0x81C, 0xEE1C0003,
+ 0x81C, 0xED1E0003,
+ 0x81C, 0xEC200003,
+ 0x81C, 0xEB220003,
+ 0x81C, 0xEA240003,
+ 0x81C, 0xE9260003,
+ 0x81C, 0xE8280003,
+ 0x81C, 0xE72A0003,
+ 0x81C, 0xE62C0003,
+ 0x81C, 0xE52E0003,
+ 0x81C, 0xC8300003,
+ 0x81C, 0xC7320003,
+ 0x81C, 0xC6340003,
+ 0x81C, 0xC5360003,
+ 0x81C, 0xC4380003,
+ 0x81C, 0xC33A0003,
+ 0x81C, 0xC23C0003,
+ 0x81C, 0xC13E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0xA1460003,
+ 0x81C, 0xA0480003,
+ 0x81C, 0x684A0003,
+ 0x81C, 0x674C0003,
+ 0x81C, 0x664E0003,
+ 0x81C, 0x65500003,
+ 0x81C, 0x64520003,
+ 0x81C, 0x63540003,
+ 0x81C, 0x44560003,
+ 0x81C, 0x43580003,
+ 0x81C, 0x425A0003,
+ 0x81C, 0x415C0003,
+ 0x81C, 0x405E0003,
+ 0x81C, 0x23600003,
+ 0x81C, 0x22620003,
+ 0x81C, 0x21640003,
+ 0x81C, 0x03660003,
+ 0x81C, 0x02680003,
+ 0x81C, 0x016A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xDC000003,
+ 0x81C, 0xDB020003,
+ 0x81C, 0xDA040003,
+ 0x81C, 0xD9060003,
+ 0x81C, 0xD8080003,
+ 0x81C, 0xD70A0003,
+ 0x81C, 0xD60C0003,
+ 0x81C, 0xD50E0003,
+ 0x81C, 0xD4100003,
+ 0x81C, 0xD3120003,
+ 0x81C, 0xD2140003,
+ 0x81C, 0xD1160003,
+ 0x81C, 0xD0180003,
+ 0x81C, 0xB41A0003,
+ 0x81C, 0xB31C0003,
+ 0x81C, 0xB21E0003,
+ 0x81C, 0xB1200003,
+ 0x81C, 0xB0220003,
+ 0x81C, 0xAF240003,
+ 0x81C, 0xAE260003,
+ 0x81C, 0xAD280003,
+ 0x81C, 0xAC2A0003,
+ 0x81C, 0xAB2C0003,
+ 0x81C, 0x8C2E0003,
+ 0x81C, 0x8B300003,
+ 0x81C, 0x8A320003,
+ 0x81C, 0x89340003,
+ 0x81C, 0x88360003,
+ 0x81C, 0x87380003,
+ 0x81C, 0x863A0003,
+ 0x81C, 0x853C0003,
+ 0x81C, 0x693E0003,
+ 0x81C, 0x68400003,
+ 0x81C, 0x67420003,
+ 0x81C, 0x66440003,
+ 0x81C, 0x65460003,
+ 0x81C, 0x48480003,
+ 0x81C, 0x474A0003,
+ 0x81C, 0x464C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x27540003,
+ 0x81C, 0x26560003,
+ 0x81C, 0x25580003,
+ 0x81C, 0x245A0003,
+ 0x81C, 0x235C0003,
+ 0x81C, 0x045E0003,
+ 0x81C, 0x03600003,
+ 0x81C, 0x02620003,
+ 0x81C, 0x01640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xC5360103,
+ 0x81C, 0xC4380103,
+ 0x81C, 0xC33A0103,
+ 0x81C, 0xC23C0103,
+ 0x81C, 0xA53E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x83480103,
+ 0x81C, 0x824A0103,
+ 0x81C, 0x814C0103,
+ 0x81C, 0x804E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xC32C0103,
+ 0x81C, 0xC22E0103,
+ 0x81C, 0xC1300103,
+ 0x81C, 0xC0320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x06500103,
+ 0x81C, 0x05520103,
+ 0x81C, 0x04540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xC32C0103,
+ 0x81C, 0xC22E0103,
+ 0x81C, 0xC1300103,
+ 0x81C, 0xC0320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000103,
+ 0x81C, 0xFD020103,
+ 0x81C, 0xFC040103,
+ 0x81C, 0xFB060103,
+ 0x81C, 0xFA080103,
+ 0x81C, 0xF90A0103,
+ 0x81C, 0xF80C0103,
+ 0x81C, 0xF70E0103,
+ 0x81C, 0xF6100103,
+ 0x81C, 0xF5120103,
+ 0x81C, 0xF4140103,
+ 0x81C, 0xF3160103,
+ 0x81C, 0xF2180103,
+ 0x81C, 0xF11A0103,
+ 0x81C, 0xF01C0103,
+ 0x81C, 0xEF1E0103,
+ 0x81C, 0xEE200103,
+ 0x81C, 0xED220103,
+ 0x81C, 0xEC240103,
+ 0x81C, 0xEB260103,
+ 0x81C, 0xEA280103,
+ 0x81C, 0xE92A0103,
+ 0x81C, 0xE82C0103,
+ 0x81C, 0xE72E0103,
+ 0x81C, 0xE6300103,
+ 0x81C, 0xE5320103,
+ 0x81C, 0xE4340103,
+ 0x81C, 0xE3360103,
+ 0x81C, 0xC6380103,
+ 0x81C, 0xC53A0103,
+ 0x81C, 0xC43C0103,
+ 0x81C, 0xC33E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0xA04A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x80500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x605A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xC32C0103,
+ 0x81C, 0xC22E0103,
+ 0x81C, 0xC1300103,
+ 0x81C, 0xC0320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEF1C0103,
+ 0x81C, 0xEE1E0103,
+ 0x81C, 0xED200103,
+ 0x81C, 0xEC220103,
+ 0x81C, 0xEB240103,
+ 0x81C, 0xEA260103,
+ 0x81C, 0xE9280103,
+ 0x81C, 0xE82A0103,
+ 0x81C, 0xE72C0103,
+ 0x81C, 0xE62E0103,
+ 0x81C, 0xE5300103,
+ 0x81C, 0xE4320103,
+ 0x81C, 0xE3340103,
+ 0x81C, 0xE2360103,
+ 0x81C, 0xC5380103,
+ 0x81C, 0xC43A0103,
+ 0x81C, 0xC33C0103,
+ 0x81C, 0xC23E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0x834A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x64500103,
+ 0x81C, 0x63520103,
+ 0x81C, 0x62540103,
+ 0x81C, 0x61560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x065E0103,
+ 0x81C, 0x05600103,
+ 0x81C, 0x04620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000103,
+ 0x81C, 0xF9020103,
+ 0x81C, 0xF8040103,
+ 0x81C, 0xF7060103,
+ 0x81C, 0xF6080103,
+ 0x81C, 0xF50A0103,
+ 0x81C, 0xF40C0103,
+ 0x81C, 0xF30E0103,
+ 0x81C, 0xF2100103,
+ 0x81C, 0xF1120103,
+ 0x81C, 0xF0140103,
+ 0x81C, 0xEF160103,
+ 0x81C, 0xEE180103,
+ 0x81C, 0xED1A0103,
+ 0x81C, 0xEC1C0103,
+ 0x81C, 0xEB1E0103,
+ 0x81C, 0xEA200103,
+ 0x81C, 0xE9220103,
+ 0x81C, 0xE8240103,
+ 0x81C, 0xE7260103,
+ 0x81C, 0xE6280103,
+ 0x81C, 0xE52A0103,
+ 0x81C, 0xC42C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA4340103,
+ 0x81C, 0xA3360103,
+ 0x81C, 0xA2380103,
+ 0x81C, 0xA13A0103,
+ 0x81C, 0x833C0103,
+ 0x81C, 0x823E0103,
+ 0x81C, 0x81400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xE22C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x64420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x61480103,
+ 0x81C, 0x434A0103,
+ 0x81C, 0x424C0103,
+ 0x81C, 0x414E0103,
+ 0x81C, 0x40500103,
+ 0x81C, 0x22520103,
+ 0x81C, 0x21540103,
+ 0x81C, 0x20560103,
+ 0x81C, 0x04580103,
+ 0x81C, 0x035A0103,
+ 0x81C, 0x025C0103,
+ 0x81C, 0x015E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEF1C0103,
+ 0x81C, 0xEE1E0103,
+ 0x81C, 0xED200103,
+ 0x81C, 0xEC220103,
+ 0x81C, 0xEB240103,
+ 0x81C, 0xEA260103,
+ 0x81C, 0xE9280103,
+ 0x81C, 0xE82A0103,
+ 0x81C, 0xE72C0103,
+ 0x81C, 0xE62E0103,
+ 0x81C, 0xE5300103,
+ 0x81C, 0xE4320103,
+ 0x81C, 0xE3340103,
+ 0x81C, 0xC6360103,
+ 0x81C, 0xC5380103,
+ 0x81C, 0xC43A0103,
+ 0x81C, 0xC33C0103,
+ 0x81C, 0xC23E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0x834A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x245A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x04620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xE22C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x64420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x61480103,
+ 0x81C, 0x434A0103,
+ 0x81C, 0x424C0103,
+ 0x81C, 0x414E0103,
+ 0x81C, 0x40500103,
+ 0x81C, 0x22520103,
+ 0x81C, 0x21540103,
+ 0x81C, 0x20560103,
+ 0x81C, 0x04580103,
+ 0x81C, 0x035A0103,
+ 0x81C, 0x025C0103,
+ 0x81C, 0x015E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xC5360103,
+ 0x81C, 0xC4380103,
+ 0x81C, 0xC33A0103,
+ 0x81C, 0xC23C0103,
+ 0x81C, 0xA53E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x83480103,
+ 0x81C, 0x824A0103,
+ 0x81C, 0x814C0103,
+ 0x81C, 0x804E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000103,
+ 0x81C, 0xF8020103,
+ 0x81C, 0xF7040103,
+ 0x81C, 0xF6060103,
+ 0x81C, 0xF5080103,
+ 0x81C, 0xF40A0103,
+ 0x81C, 0xF30C0103,
+ 0x81C, 0xF20E0103,
+ 0x81C, 0xF1100103,
+ 0x81C, 0xF0120103,
+ 0x81C, 0xEF140103,
+ 0x81C, 0xEE160103,
+ 0x81C, 0xED180103,
+ 0x81C, 0xEC1A0103,
+ 0x81C, 0xEB1C0103,
+ 0x81C, 0xEA1E0103,
+ 0x81C, 0xE9200103,
+ 0x81C, 0xE8220103,
+ 0x81C, 0xE7240103,
+ 0x81C, 0xE6260103,
+ 0x81C, 0xE5280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA4340103,
+ 0x81C, 0xA3360103,
+ 0x81C, 0xA2380103,
+ 0x81C, 0xA13A0103,
+ 0x81C, 0xA03C0103,
+ 0x81C, 0x823E0103,
+ 0x81C, 0x81400103,
+ 0x81C, 0x80420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x61480103,
+ 0x81C, 0x604A0103,
+ 0x81C, 0x244C0103,
+ 0x81C, 0x234E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x05560103,
+ 0x81C, 0x04580103,
+ 0x81C, 0x035A0103,
+ 0x81C, 0x025C0103,
+ 0x81C, 0x015E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000103,
+ 0x81C, 0xFD020103,
+ 0x81C, 0xFC040103,
+ 0x81C, 0xFB060103,
+ 0x81C, 0xFA080103,
+ 0x81C, 0xF90A0103,
+ 0x81C, 0xF80C0103,
+ 0x81C, 0xF70E0103,
+ 0x81C, 0xF6100103,
+ 0x81C, 0xF5120103,
+ 0x81C, 0xF4140103,
+ 0x81C, 0xF3160103,
+ 0x81C, 0xF2180103,
+ 0x81C, 0xF11A0103,
+ 0x81C, 0xF01C0103,
+ 0x81C, 0xEF1E0103,
+ 0x81C, 0xEE200103,
+ 0x81C, 0xED220103,
+ 0x81C, 0xEC240103,
+ 0x81C, 0xEB260103,
+ 0x81C, 0xEA280103,
+ 0x81C, 0xE92A0103,
+ 0x81C, 0xE82C0103,
+ 0x81C, 0xE72E0103,
+ 0x81C, 0xE6300103,
+ 0x81C, 0xE5320103,
+ 0x81C, 0xE4340103,
+ 0x81C, 0xE3360103,
+ 0x81C, 0xC6380103,
+ 0x81C, 0xC53A0103,
+ 0x81C, 0xC43C0103,
+ 0x81C, 0xC33E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0xA04A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x80500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x605A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000103,
+ 0x81C, 0xFB020103,
+ 0x81C, 0xFA040103,
+ 0x81C, 0xF9060103,
+ 0x81C, 0xF8080103,
+ 0x81C, 0xF70A0103,
+ 0x81C, 0xF60C0103,
+ 0x81C, 0xF50E0103,
+ 0x81C, 0xF4100103,
+ 0x81C, 0xF3120103,
+ 0x81C, 0xF2140103,
+ 0x81C, 0xF1160103,
+ 0x81C, 0xF0180103,
+ 0x81C, 0xEE1A0103,
+ 0x81C, 0xED1C0103,
+ 0x81C, 0xEC1E0103,
+ 0x81C, 0xEB200103,
+ 0x81C, 0xEA220103,
+ 0x81C, 0xE9240103,
+ 0x81C, 0xE8260103,
+ 0x81C, 0xE7280103,
+ 0x81C, 0xE62A0103,
+ 0x81C, 0xE52C0103,
+ 0x81C, 0xE42E0103,
+ 0x81C, 0xE3300103,
+ 0x81C, 0xE2320103,
+ 0x81C, 0xE1340103,
+ 0x81C, 0xC5360103,
+ 0x81C, 0xC4380103,
+ 0x81C, 0xC33A0103,
+ 0x81C, 0xC23C0103,
+ 0x81C, 0xA53E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x83480103,
+ 0x81C, 0x824A0103,
+ 0x81C, 0x814C0103,
+ 0x81C, 0x804E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xC5360103,
+ 0x81C, 0xC4380103,
+ 0x81C, 0xC33A0103,
+ 0x81C, 0xC23C0103,
+ 0x81C, 0xA53E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x83480103,
+ 0x81C, 0x824A0103,
+ 0x81C, 0x814C0103,
+ 0x81C, 0x804E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000103,
+ 0x81C, 0xFB020103,
+ 0x81C, 0xFA040103,
+ 0x81C, 0xF9060103,
+ 0x81C, 0xF8080103,
+ 0x81C, 0xF70A0103,
+ 0x81C, 0xF60C0103,
+ 0x81C, 0xF50E0103,
+ 0x81C, 0xF4100103,
+ 0x81C, 0xF3120103,
+ 0x81C, 0xF2140103,
+ 0x81C, 0xF1160103,
+ 0x81C, 0xF0180103,
+ 0x81C, 0xEF1A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xE1360103,
+ 0x81C, 0xC3380103,
+ 0x81C, 0xC23A0103,
+ 0x81C, 0xC13C0103,
+ 0x81C, 0xC03E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x82480103,
+ 0x81C, 0x814A0103,
+ 0x81C, 0x804C0103,
+ 0x81C, 0x634E0103,
+ 0x81C, 0x62500103,
+ 0x81C, 0x61520103,
+ 0x81C, 0x42540103,
+ 0x81C, 0x41560103,
+ 0x81C, 0x24580103,
+ 0x81C, 0x235A0103,
+ 0x81C, 0x225C0103,
+ 0x81C, 0x215E0103,
+ 0x81C, 0x20600103,
+ 0x81C, 0x03620103,
+ 0x81C, 0x02640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000103,
+ 0x81C, 0xFD020103,
+ 0x81C, 0xFC040103,
+ 0x81C, 0xFB060103,
+ 0x81C, 0xFA080103,
+ 0x81C, 0xF90A0103,
+ 0x81C, 0xF80C0103,
+ 0x81C, 0xF70E0103,
+ 0x81C, 0xF6100103,
+ 0x81C, 0xF5120103,
+ 0x81C, 0xF4140103,
+ 0x81C, 0xF3160103,
+ 0x81C, 0xF2180103,
+ 0x81C, 0xF11A0103,
+ 0x81C, 0xF01C0103,
+ 0x81C, 0xEF1E0103,
+ 0x81C, 0xEE200103,
+ 0x81C, 0xED220103,
+ 0x81C, 0xEC240103,
+ 0x81C, 0xEB260103,
+ 0x81C, 0xEA280103,
+ 0x81C, 0xE92A0103,
+ 0x81C, 0xE82C0103,
+ 0x81C, 0xE72E0103,
+ 0x81C, 0xE6300103,
+ 0x81C, 0xE5320103,
+ 0x81C, 0xE4340103,
+ 0x81C, 0xE3360103,
+ 0x81C, 0xC6380103,
+ 0x81C, 0xC53A0103,
+ 0x81C, 0xC43C0103,
+ 0x81C, 0xC33E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0xA04A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x80500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x605A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xC32C0103,
+ 0x81C, 0xC22E0103,
+ 0x81C, 0xC1300103,
+ 0x81C, 0xC0320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFE000103,
+ 0x81C, 0xFD020103,
+ 0x81C, 0xFC040103,
+ 0x81C, 0xFB060103,
+ 0x81C, 0xFA080103,
+ 0x81C, 0xF90A0103,
+ 0x81C, 0xF80C0103,
+ 0x81C, 0xF70E0103,
+ 0x81C, 0xF6100103,
+ 0x81C, 0xF5120103,
+ 0x81C, 0xF4140103,
+ 0x81C, 0xF3160103,
+ 0x81C, 0xF2180103,
+ 0x81C, 0xF11A0103,
+ 0x81C, 0xF01C0103,
+ 0x81C, 0xEF1E0103,
+ 0x81C, 0xEE200103,
+ 0x81C, 0xED220103,
+ 0x81C, 0xEC240103,
+ 0x81C, 0xEB260103,
+ 0x81C, 0xEA280103,
+ 0x81C, 0xE92A0103,
+ 0x81C, 0xE82C0103,
+ 0x81C, 0xE72E0103,
+ 0x81C, 0xE6300103,
+ 0x81C, 0xE5320103,
+ 0x81C, 0xE4340103,
+ 0x81C, 0xE3360103,
+ 0x81C, 0xC6380103,
+ 0x81C, 0xC53A0103,
+ 0x81C, 0xC43C0103,
+ 0x81C, 0xC33E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0xA04A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x80500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x605A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xA63C0203,
+ 0x81C, 0xA53E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x63420203,
+ 0x81C, 0x62440203,
+ 0x81C, 0x61460203,
+ 0x81C, 0x60480203,
+ 0x81C, 0x424A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x06500203,
+ 0x81C, 0x05520203,
+ 0x81C, 0x04540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xC6360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xA63E0203,
+ 0x81C, 0xA5400203,
+ 0x81C, 0xA4420203,
+ 0x81C, 0xA3440203,
+ 0x81C, 0xA2460203,
+ 0x81C, 0xA1480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x60580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x215C0203,
+ 0x81C, 0x205E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xE1360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xC23E0203,
+ 0x81C, 0xC1400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0xA0480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x25580203,
+ 0x81C, 0x245A0203,
+ 0x81C, 0x235C0203,
+ 0x81C, 0x225E0203,
+ 0x81C, 0x21600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000203,
+ 0x81C, 0xF8020203,
+ 0x81C, 0xF7040203,
+ 0x81C, 0xF6060203,
+ 0x81C, 0xF5080203,
+ 0x81C, 0xF40A0203,
+ 0x81C, 0xF30C0203,
+ 0x81C, 0xF20E0203,
+ 0x81C, 0xF1100203,
+ 0x81C, 0xF0120203,
+ 0x81C, 0xEF140203,
+ 0x81C, 0xEE160203,
+ 0x81C, 0xED180203,
+ 0x81C, 0xEC1A0203,
+ 0x81C, 0xEB1C0203,
+ 0x81C, 0xEA1E0203,
+ 0x81C, 0xE9200203,
+ 0x81C, 0xE8220203,
+ 0x81C, 0xE7240203,
+ 0x81C, 0xE6260203,
+ 0x81C, 0xE5280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xC42C0203,
+ 0x81C, 0xC32E0203,
+ 0x81C, 0xC2300203,
+ 0x81C, 0xC1320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x65420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x424C0203,
+ 0x81C, 0x414E0203,
+ 0x81C, 0x40500203,
+ 0x81C, 0x22520203,
+ 0x81C, 0x21540203,
+ 0x81C, 0x20560203,
+ 0x81C, 0x04580203,
+ 0x81C, 0x035A0203,
+ 0x81C, 0x025C0203,
+ 0x81C, 0x015E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000203,
+ 0x81C, 0xFA020203,
+ 0x81C, 0xF9040203,
+ 0x81C, 0xF8060203,
+ 0x81C, 0xF7080203,
+ 0x81C, 0xF60A0203,
+ 0x81C, 0xF50C0203,
+ 0x81C, 0xF40E0203,
+ 0x81C, 0xF3100203,
+ 0x81C, 0xF2120203,
+ 0x81C, 0xF1140203,
+ 0x81C, 0xF0160203,
+ 0x81C, 0xEF180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xC23C0203,
+ 0x81C, 0xC13E0203,
+ 0x81C, 0xC0400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0xA0480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x60560203,
+ 0x81C, 0x24580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xC42C0203,
+ 0x81C, 0xC32E0203,
+ 0x81C, 0xC2300203,
+ 0x81C, 0xC1320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x65420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x424C0203,
+ 0x81C, 0x414E0203,
+ 0x81C, 0x40500203,
+ 0x81C, 0x22520203,
+ 0x81C, 0x21540203,
+ 0x81C, 0x20560203,
+ 0x81C, 0x04580203,
+ 0x81C, 0x035A0203,
+ 0x81C, 0x025C0203,
+ 0x81C, 0x015E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xA63C0203,
+ 0x81C, 0xA53E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000203,
+ 0x81C, 0xF8020203,
+ 0x81C, 0xF7040203,
+ 0x81C, 0xF6060203,
+ 0x81C, 0xF5080203,
+ 0x81C, 0xF40A0203,
+ 0x81C, 0xF30C0203,
+ 0x81C, 0xF20E0203,
+ 0x81C, 0xF1100203,
+ 0x81C, 0xF0120203,
+ 0x81C, 0xEF140203,
+ 0x81C, 0xEE160203,
+ 0x81C, 0xED180203,
+ 0x81C, 0xEC1A0203,
+ 0x81C, 0xEB1C0203,
+ 0x81C, 0xEA1E0203,
+ 0x81C, 0xE9200203,
+ 0x81C, 0xE8220203,
+ 0x81C, 0xE7240203,
+ 0x81C, 0xE6260203,
+ 0x81C, 0xE5280203,
+ 0x81C, 0xE42A0203,
+ 0x81C, 0xC42C0203,
+ 0x81C, 0xC32E0203,
+ 0x81C, 0xC2300203,
+ 0x81C, 0xC1320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x244C0203,
+ 0x81C, 0x234E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x05560203,
+ 0x81C, 0x04580203,
+ 0x81C, 0x035A0203,
+ 0x81C, 0x025C0203,
+ 0x81C, 0x015E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xC6360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xA63E0203,
+ 0x81C, 0xA5400203,
+ 0x81C, 0xA4420203,
+ 0x81C, 0xA3440203,
+ 0x81C, 0xA2460203,
+ 0x81C, 0xA1480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x60580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x215C0203,
+ 0x81C, 0x205E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xA63C0203,
+ 0x81C, 0xA53E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xA63C0203,
+ 0x81C, 0xA53E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xE1360203,
+ 0x81C, 0xE0380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xC23C0203,
+ 0x81C, 0xC13E0203,
+ 0x81C, 0xA3400203,
+ 0x81C, 0xA2420203,
+ 0x81C, 0xA1440203,
+ 0x81C, 0xA0460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x644E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x04600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xC6360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xA63E0203,
+ 0x81C, 0xA5400203,
+ 0x81C, 0xA4420203,
+ 0x81C, 0xA3440203,
+ 0x81C, 0xA2460203,
+ 0x81C, 0xA1480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x60580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x215C0203,
+ 0x81C, 0x205E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFD000203,
+ 0x81C, 0xFC020203,
+ 0x81C, 0xFB040203,
+ 0x81C, 0xFA060203,
+ 0x81C, 0xF9080203,
+ 0x81C, 0xF80A0203,
+ 0x81C, 0xF70C0203,
+ 0x81C, 0xF60E0203,
+ 0x81C, 0xF5100203,
+ 0x81C, 0xF4120203,
+ 0x81C, 0xF3140203,
+ 0x81C, 0xF2160203,
+ 0x81C, 0xF1180203,
+ 0x81C, 0xF01A0203,
+ 0x81C, 0xEF1C0203,
+ 0x81C, 0xEE1E0203,
+ 0x81C, 0xED200203,
+ 0x81C, 0xEC220203,
+ 0x81C, 0xEB240203,
+ 0x81C, 0xEA260203,
+ 0x81C, 0xE9280203,
+ 0x81C, 0xE82A0203,
+ 0x81C, 0xE72C0203,
+ 0x81C, 0xE62E0203,
+ 0x81C, 0xE5300203,
+ 0x81C, 0xE4320203,
+ 0x81C, 0xE3340203,
+ 0x81C, 0xC6360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xA63E0203,
+ 0x81C, 0xA5400203,
+ 0x81C, 0xA4420203,
+ 0x81C, 0xA3440203,
+ 0x81C, 0xA2460203,
+ 0x81C, 0xA1480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x60580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xA63C0303,
+ 0x81C, 0xA53E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x405A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xCA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x06500303,
+ 0x81C, 0x05520303,
+ 0x81C, 0x04540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xCA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x22500303,
+ 0x81C, 0x21520303,
+ 0x81C, 0x20540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xF0180303,
+ 0x81C, 0xEF1A0303,
+ 0x81C, 0xEE1C0303,
+ 0x81C, 0xED1E0303,
+ 0x81C, 0xEC200303,
+ 0x81C, 0xEB220303,
+ 0x81C, 0xEA240303,
+ 0x81C, 0xE9260303,
+ 0x81C, 0xE8280303,
+ 0x81C, 0xE72A0303,
+ 0x81C, 0xE62C0303,
+ 0x81C, 0xE52E0303,
+ 0x81C, 0xE4300303,
+ 0x81C, 0xE3320303,
+ 0x81C, 0xE2340303,
+ 0x81C, 0xC6360303,
+ 0x81C, 0xC5380303,
+ 0x81C, 0xC43A0303,
+ 0x81C, 0xC33C0303,
+ 0x81C, 0xA63E0303,
+ 0x81C, 0xA5400303,
+ 0x81C, 0xA4420303,
+ 0x81C, 0xA3440303,
+ 0x81C, 0xA2460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x80500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x225A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xCA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x22500303,
+ 0x81C, 0x21520303,
+ 0x81C, 0x20540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xF0160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xE1340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xC23C0303,
+ 0x81C, 0xC13E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x64500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x235A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000303,
+ 0x81C, 0xF8020303,
+ 0x81C, 0xF7040303,
+ 0x81C, 0xF6060303,
+ 0x81C, 0xF5080303,
+ 0x81C, 0xF40A0303,
+ 0x81C, 0xF30C0303,
+ 0x81C, 0xF20E0303,
+ 0x81C, 0xF1100303,
+ 0x81C, 0xF0120303,
+ 0x81C, 0xEF140303,
+ 0x81C, 0xEE160303,
+ 0x81C, 0xED180303,
+ 0x81C, 0xEC1A0303,
+ 0x81C, 0xEB1C0303,
+ 0x81C, 0xEA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xC0320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x22500303,
+ 0x81C, 0x21520303,
+ 0x81C, 0x20540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000303,
+ 0x81C, 0xF7020303,
+ 0x81C, 0xF6040303,
+ 0x81C, 0xF5060303,
+ 0x81C, 0xF4080303,
+ 0x81C, 0xF30A0303,
+ 0x81C, 0xF20C0303,
+ 0x81C, 0xF10E0303,
+ 0x81C, 0xF0100303,
+ 0x81C, 0xEF120303,
+ 0x81C, 0xEE140303,
+ 0x81C, 0xED160303,
+ 0x81C, 0xEC180303,
+ 0x81C, 0xEB1A0303,
+ 0x81C, 0xEA1C0303,
+ 0x81C, 0xE91E0303,
+ 0x81C, 0xCA200303,
+ 0x81C, 0xC9220303,
+ 0x81C, 0xC8240303,
+ 0x81C, 0xC7260303,
+ 0x81C, 0xC6280303,
+ 0x81C, 0xC52A0303,
+ 0x81C, 0xC42C0303,
+ 0x81C, 0xC32E0303,
+ 0x81C, 0xC2300303,
+ 0x81C, 0xC1320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x65420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x424C0303,
+ 0x81C, 0x414E0303,
+ 0x81C, 0x40500303,
+ 0x81C, 0x22520303,
+ 0x81C, 0x21540303,
+ 0x81C, 0x20560303,
+ 0x81C, 0x04580303,
+ 0x81C, 0x035A0303,
+ 0x81C, 0x025C0303,
+ 0x81C, 0x015E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xF0160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xC23C0303,
+ 0x81C, 0xC13E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x43540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x235A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000303,
+ 0x81C, 0xF7020303,
+ 0x81C, 0xF6040303,
+ 0x81C, 0xF5060303,
+ 0x81C, 0xF4080303,
+ 0x81C, 0xF30A0303,
+ 0x81C, 0xF20C0303,
+ 0x81C, 0xF10E0303,
+ 0x81C, 0xF0100303,
+ 0x81C, 0xEF120303,
+ 0x81C, 0xEE140303,
+ 0x81C, 0xED160303,
+ 0x81C, 0xEC180303,
+ 0x81C, 0xEB1A0303,
+ 0x81C, 0xEA1C0303,
+ 0x81C, 0xE91E0303,
+ 0x81C, 0xCA200303,
+ 0x81C, 0xC9220303,
+ 0x81C, 0xC8240303,
+ 0x81C, 0xC7260303,
+ 0x81C, 0xC6280303,
+ 0x81C, 0xC52A0303,
+ 0x81C, 0xC42C0303,
+ 0x81C, 0xC32E0303,
+ 0x81C, 0xC2300303,
+ 0x81C, 0xC1320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x65420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x424C0303,
+ 0x81C, 0x414E0303,
+ 0x81C, 0x40500303,
+ 0x81C, 0x22520303,
+ 0x81C, 0x21540303,
+ 0x81C, 0x20560303,
+ 0x81C, 0x04580303,
+ 0x81C, 0x035A0303,
+ 0x81C, 0x025C0303,
+ 0x81C, 0x015E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xA63C0303,
+ 0x81C, 0xA53E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x405A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000303,
+ 0x81C, 0xF7020303,
+ 0x81C, 0xF6040303,
+ 0x81C, 0xF5060303,
+ 0x81C, 0xF4080303,
+ 0x81C, 0xF30A0303,
+ 0x81C, 0xF20C0303,
+ 0x81C, 0xF10E0303,
+ 0x81C, 0xF0100303,
+ 0x81C, 0xEF120303,
+ 0x81C, 0xEE140303,
+ 0x81C, 0xED160303,
+ 0x81C, 0xEC180303,
+ 0x81C, 0xEB1A0303,
+ 0x81C, 0xEA1C0303,
+ 0x81C, 0xE91E0303,
+ 0x81C, 0xCA200303,
+ 0x81C, 0xC9220303,
+ 0x81C, 0xC8240303,
+ 0x81C, 0xC7260303,
+ 0x81C, 0xC6280303,
+ 0x81C, 0xC52A0303,
+ 0x81C, 0xC42C0303,
+ 0x81C, 0xC32E0303,
+ 0x81C, 0xC2300303,
+ 0x81C, 0xC1320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x234C0303,
+ 0x81C, 0x224E0303,
+ 0x81C, 0x21500303,
+ 0x81C, 0x20520303,
+ 0x81C, 0x06540303,
+ 0x81C, 0x05560303,
+ 0x81C, 0x04580303,
+ 0x81C, 0x035A0303,
+ 0x81C, 0x025C0303,
+ 0x81C, 0x015E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xF0180303,
+ 0x81C, 0xEF1A0303,
+ 0x81C, 0xEE1C0303,
+ 0x81C, 0xED1E0303,
+ 0x81C, 0xEC200303,
+ 0x81C, 0xEB220303,
+ 0x81C, 0xEA240303,
+ 0x81C, 0xE9260303,
+ 0x81C, 0xE8280303,
+ 0x81C, 0xE72A0303,
+ 0x81C, 0xE62C0303,
+ 0x81C, 0xE52E0303,
+ 0x81C, 0xE4300303,
+ 0x81C, 0xE3320303,
+ 0x81C, 0xE2340303,
+ 0x81C, 0xC6360303,
+ 0x81C, 0xC5380303,
+ 0x81C, 0xC43A0303,
+ 0x81C, 0xC33C0303,
+ 0x81C, 0xA63E0303,
+ 0x81C, 0xA5400303,
+ 0x81C, 0xA4420303,
+ 0x81C, 0xA3440303,
+ 0x81C, 0xA2460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x80500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x225A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xEF160303,
+ 0x81C, 0xEE180303,
+ 0x81C, 0xED1A0303,
+ 0x81C, 0xEC1C0303,
+ 0x81C, 0xEB1E0303,
+ 0x81C, 0xEA200303,
+ 0x81C, 0xE9220303,
+ 0x81C, 0xE8240303,
+ 0x81C, 0xE7260303,
+ 0x81C, 0xE6280303,
+ 0x81C, 0xE52A0303,
+ 0x81C, 0xE42C0303,
+ 0x81C, 0xE32E0303,
+ 0x81C, 0xE2300303,
+ 0x81C, 0xE1320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xA63C0303,
+ 0x81C, 0xA53E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x405A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xEF160303,
+ 0x81C, 0xEE180303,
+ 0x81C, 0xED1A0303,
+ 0x81C, 0xEC1C0303,
+ 0x81C, 0xEB1E0303,
+ 0x81C, 0xEA200303,
+ 0x81C, 0xE9220303,
+ 0x81C, 0xE8240303,
+ 0x81C, 0xE7260303,
+ 0x81C, 0xE6280303,
+ 0x81C, 0xE52A0303,
+ 0x81C, 0xE42C0303,
+ 0x81C, 0xE32E0303,
+ 0x81C, 0xE2300303,
+ 0x81C, 0xE1320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xA63C0303,
+ 0x81C, 0xA53E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x405A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xF0160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xE1340303,
+ 0x81C, 0xE0360303,
+ 0x81C, 0xC3380303,
+ 0x81C, 0xC23A0303,
+ 0x81C, 0xC13C0303,
+ 0x81C, 0xC03E0303,
+ 0x81C, 0xA3400303,
+ 0x81C, 0xA2420303,
+ 0x81C, 0xA1440303,
+ 0x81C, 0xA0460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x644E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x23580303,
+ 0x81C, 0x225A0303,
+ 0x81C, 0x215C0303,
+ 0x81C, 0x055E0303,
+ 0x81C, 0x04600303,
+ 0x81C, 0x03620303,
+ 0x81C, 0x02640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xF0180303,
+ 0x81C, 0xEF1A0303,
+ 0x81C, 0xEE1C0303,
+ 0x81C, 0xED1E0303,
+ 0x81C, 0xEC200303,
+ 0x81C, 0xEB220303,
+ 0x81C, 0xEA240303,
+ 0x81C, 0xE9260303,
+ 0x81C, 0xE8280303,
+ 0x81C, 0xE72A0303,
+ 0x81C, 0xE62C0303,
+ 0x81C, 0xE52E0303,
+ 0x81C, 0xE4300303,
+ 0x81C, 0xE3320303,
+ 0x81C, 0xE2340303,
+ 0x81C, 0xC6360303,
+ 0x81C, 0xC5380303,
+ 0x81C, 0xC43A0303,
+ 0x81C, 0xC33C0303,
+ 0x81C, 0xA63E0303,
+ 0x81C, 0xA5400303,
+ 0x81C, 0xA4420303,
+ 0x81C, 0xA3440303,
+ 0x81C, 0xA2460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x80500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x225A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xCA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x22500303,
+ 0x81C, 0x21520303,
+ 0x81C, 0x20540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xF0180303,
+ 0x81C, 0xEF1A0303,
+ 0x81C, 0xEE1C0303,
+ 0x81C, 0xED1E0303,
+ 0x81C, 0xEC200303,
+ 0x81C, 0xEB220303,
+ 0x81C, 0xEA240303,
+ 0x81C, 0xE9260303,
+ 0x81C, 0xE8280303,
+ 0x81C, 0xE72A0303,
+ 0x81C, 0xE62C0303,
+ 0x81C, 0xE52E0303,
+ 0x81C, 0xE4300303,
+ 0x81C, 0xE3320303,
+ 0x81C, 0xE2340303,
+ 0x81C, 0xC6360303,
+ 0x81C, 0xC5380303,
+ 0x81C, 0xC43A0303,
+ 0x81C, 0xC33C0303,
+ 0x81C, 0xA63E0303,
+ 0x81C, 0xA5400303,
+ 0x81C, 0xA4420303,
+ 0x81C, 0xA3440303,
+ 0x81C, 0xA2460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x80500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x235A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x03620303,
+ 0x81C, 0x02640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF6000403,
+ 0x81C, 0xF5020403,
+ 0x81C, 0xF4040403,
+ 0x81C, 0xF3060403,
+ 0x81C, 0xF2080403,
+ 0x81C, 0xF10A0403,
+ 0x81C, 0xF00C0403,
+ 0x81C, 0xEF0E0403,
+ 0x81C, 0xD6100403,
+ 0x81C, 0xD5120403,
+ 0x81C, 0xD4140403,
+ 0x81C, 0xD3160403,
+ 0x81C, 0xD2180403,
+ 0x81C, 0xD11A0403,
+ 0x81C, 0xD01C0403,
+ 0x81C, 0xCF1E0403,
+ 0x81C, 0x95200403,
+ 0x81C, 0x94220403,
+ 0x81C, 0x93240403,
+ 0x81C, 0x92260403,
+ 0x81C, 0x91280403,
+ 0x81C, 0x902A0403,
+ 0x81C, 0x8F2C0403,
+ 0x81C, 0x8E2E0403,
+ 0x81C, 0x8D300403,
+ 0x81C, 0x8C320403,
+ 0x81C, 0x8B340403,
+ 0x81C, 0x8A360403,
+ 0x81C, 0x89380403,
+ 0x81C, 0x883A0403,
+ 0x81C, 0x873C0403,
+ 0x81C, 0x863E0403,
+ 0x81C, 0x68400403,
+ 0x81C, 0x67420403,
+ 0x81C, 0x66440403,
+ 0x81C, 0x65460403,
+ 0x81C, 0x64480403,
+ 0x81C, 0x634A0403,
+ 0x81C, 0x484C0403,
+ 0x81C, 0x474E0403,
+ 0x81C, 0x46500403,
+ 0x81C, 0x45520403,
+ 0x81C, 0x44540403,
+ 0x81C, 0x27560403,
+ 0x81C, 0x26580403,
+ 0x81C, 0x255A0403,
+ 0x81C, 0x245C0403,
+ 0x81C, 0x235E0403,
+ 0x81C, 0x04600403,
+ 0x81C, 0x03620403,
+ 0x81C, 0x02640403,
+ 0x81C, 0x01660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF6000403,
+ 0x81C, 0xF5020403,
+ 0x81C, 0xF4040403,
+ 0x81C, 0xF3060403,
+ 0x81C, 0xF2080403,
+ 0x81C, 0xF10A0403,
+ 0x81C, 0xF00C0403,
+ 0x81C, 0xEF0E0403,
+ 0x81C, 0xD6100403,
+ 0x81C, 0xD5120403,
+ 0x81C, 0xD4140403,
+ 0x81C, 0xD3160403,
+ 0x81C, 0xD2180403,
+ 0x81C, 0xD11A0403,
+ 0x81C, 0xD01C0403,
+ 0x81C, 0xCF1E0403,
+ 0x81C, 0x95200403,
+ 0x81C, 0x94220403,
+ 0x81C, 0x93240403,
+ 0x81C, 0x92260403,
+ 0x81C, 0x91280403,
+ 0x81C, 0x902A0403,
+ 0x81C, 0x8F2C0403,
+ 0x81C, 0x8E2E0403,
+ 0x81C, 0x8D300403,
+ 0x81C, 0x8C320403,
+ 0x81C, 0x8B340403,
+ 0x81C, 0x8A360403,
+ 0x81C, 0x89380403,
+ 0x81C, 0x883A0403,
+ 0x81C, 0x873C0403,
+ 0x81C, 0x863E0403,
+ 0x81C, 0x68400403,
+ 0x81C, 0x67420403,
+ 0x81C, 0x66440403,
+ 0x81C, 0x65460403,
+ 0x81C, 0x64480403,
+ 0x81C, 0x634A0403,
+ 0x81C, 0x484C0403,
+ 0x81C, 0x474E0403,
+ 0x81C, 0x46500403,
+ 0x81C, 0x45520403,
+ 0x81C, 0x44540403,
+ 0x81C, 0x27560403,
+ 0x81C, 0x26580403,
+ 0x81C, 0x255A0403,
+ 0x81C, 0x245C0403,
+ 0x81C, 0x235E0403,
+ 0x81C, 0x04600403,
+ 0x81C, 0x03620403,
+ 0x81C, 0x02640403,
+ 0x81C, 0x01660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEE1C0503,
+ 0x81C, 0xED1E0503,
+ 0x81C, 0xEC200503,
+ 0x81C, 0xEB220503,
+ 0x81C, 0xEA240503,
+ 0x81C, 0xE9260503,
+ 0x81C, 0xE8280503,
+ 0x81C, 0xE72A0503,
+ 0x81C, 0xE62C0503,
+ 0x81C, 0xE52E0503,
+ 0x81C, 0xE4300503,
+ 0x81C, 0xE3320503,
+ 0x81C, 0xE2340503,
+ 0x81C, 0xC5360503,
+ 0x81C, 0xC4380503,
+ 0x81C, 0xC33A0503,
+ 0x81C, 0xC23C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBE000503,
+ 0x81C, 0xBD020503,
+ 0x81C, 0xBC040503,
+ 0x81C, 0xBB060503,
+ 0x81C, 0xBA080503,
+ 0x81C, 0xB90A0503,
+ 0x81C, 0xB80C0503,
+ 0x81C, 0xB70E0503,
+ 0x81C, 0xB6100503,
+ 0x81C, 0xB5120503,
+ 0x81C, 0xB4140503,
+ 0x81C, 0xB3160503,
+ 0x81C, 0xB2180503,
+ 0x81C, 0xB11A0503,
+ 0x81C, 0xB01C0503,
+ 0x81C, 0xAF1E0503,
+ 0x81C, 0xAE200503,
+ 0x81C, 0xAD220503,
+ 0x81C, 0xAC240503,
+ 0x81C, 0xAB260503,
+ 0x81C, 0x8D280503,
+ 0x81C, 0x8C2A0503,
+ 0x81C, 0x8B2C0503,
+ 0x81C, 0x8A2E0503,
+ 0x81C, 0x89300503,
+ 0x81C, 0x88320503,
+ 0x81C, 0x6A340503,
+ 0x81C, 0x69360503,
+ 0x81C, 0x68380503,
+ 0x81C, 0x673A0503,
+ 0x81C, 0x663C0503,
+ 0x81C, 0x653E0503,
+ 0x81C, 0x64400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x06500503,
+ 0x81C, 0x05520503,
+ 0x81C, 0x04540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007C0503,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000503,
+ 0x81C, 0xF7020503,
+ 0x81C, 0xF6040503,
+ 0x81C, 0xF5060503,
+ 0x81C, 0xF4080503,
+ 0x81C, 0xF30A0503,
+ 0x81C, 0xF20C0503,
+ 0x81C, 0xF10E0503,
+ 0x81C, 0xF0100503,
+ 0x81C, 0xEF120503,
+ 0x81C, 0xEE140503,
+ 0x81C, 0xED160503,
+ 0x81C, 0xEC180503,
+ 0x81C, 0xEB1A0503,
+ 0x81C, 0xEA1C0503,
+ 0x81C, 0xE91E0503,
+ 0x81C, 0xE8200503,
+ 0x81C, 0xE7220503,
+ 0x81C, 0xE6240503,
+ 0x81C, 0xE5260503,
+ 0x81C, 0xE4280503,
+ 0x81C, 0xE32A0503,
+ 0x81C, 0xC32C0503,
+ 0x81C, 0xC22E0503,
+ 0x81C, 0xC1300503,
+ 0x81C, 0xC0320503,
+ 0x81C, 0xA3340503,
+ 0x81C, 0xA2360503,
+ 0x81C, 0xA1380503,
+ 0x81C, 0xA03A0503,
+ 0x81C, 0x823C0503,
+ 0x81C, 0x813E0503,
+ 0x81C, 0x80400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000503,
+ 0x81C, 0xFD020503,
+ 0x81C, 0xFC040503,
+ 0x81C, 0xFB060503,
+ 0x81C, 0xFA080503,
+ 0x81C, 0xF90A0503,
+ 0x81C, 0xF80C0503,
+ 0x81C, 0xF70E0503,
+ 0x81C, 0xF6100503,
+ 0x81C, 0xF5120503,
+ 0x81C, 0xF4140503,
+ 0x81C, 0xF3160503,
+ 0x81C, 0xF2180503,
+ 0x81C, 0xF11A0503,
+ 0x81C, 0xF01C0503,
+ 0x81C, 0xEF1E0503,
+ 0x81C, 0xEE200503,
+ 0x81C, 0xED220503,
+ 0x81C, 0xEC240503,
+ 0x81C, 0xEB260503,
+ 0x81C, 0xEA280503,
+ 0x81C, 0xE92A0503,
+ 0x81C, 0xE82C0503,
+ 0x81C, 0xE72E0503,
+ 0x81C, 0xE6300503,
+ 0x81C, 0xE5320503,
+ 0x81C, 0xE4340503,
+ 0x81C, 0xE3360503,
+ 0x81C, 0xC6380503,
+ 0x81C, 0xC53A0503,
+ 0x81C, 0xC43C0503,
+ 0x81C, 0xC33E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0xA04A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x80500503,
+ 0x81C, 0x64520503,
+ 0x81C, 0x63540503,
+ 0x81C, 0x62560503,
+ 0x81C, 0x61580503,
+ 0x81C, 0x605A0503,
+ 0x81C, 0x235C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000503,
+ 0x81C, 0xF7020503,
+ 0x81C, 0xF6040503,
+ 0x81C, 0xF5060503,
+ 0x81C, 0xF4080503,
+ 0x81C, 0xF30A0503,
+ 0x81C, 0xF20C0503,
+ 0x81C, 0xF10E0503,
+ 0x81C, 0xF0100503,
+ 0x81C, 0xEF120503,
+ 0x81C, 0xEE140503,
+ 0x81C, 0xED160503,
+ 0x81C, 0xEC180503,
+ 0x81C, 0xEB1A0503,
+ 0x81C, 0xEA1C0503,
+ 0x81C, 0xE91E0503,
+ 0x81C, 0xE8200503,
+ 0x81C, 0xE7220503,
+ 0x81C, 0xE6240503,
+ 0x81C, 0xE5260503,
+ 0x81C, 0xE4280503,
+ 0x81C, 0xE32A0503,
+ 0x81C, 0xC32C0503,
+ 0x81C, 0xC22E0503,
+ 0x81C, 0xC1300503,
+ 0x81C, 0xC0320503,
+ 0x81C, 0xA3340503,
+ 0x81C, 0xA2360503,
+ 0x81C, 0xA1380503,
+ 0x81C, 0xA03A0503,
+ 0x81C, 0x823C0503,
+ 0x81C, 0x813E0503,
+ 0x81C, 0x80400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEF1C0503,
+ 0x81C, 0xEE1E0503,
+ 0x81C, 0xED200503,
+ 0x81C, 0xEC220503,
+ 0x81C, 0xEB240503,
+ 0x81C, 0xEA260503,
+ 0x81C, 0xE9280503,
+ 0x81C, 0xE82A0503,
+ 0x81C, 0xE72C0503,
+ 0x81C, 0xE62E0503,
+ 0x81C, 0xE5300503,
+ 0x81C, 0xE4320503,
+ 0x81C, 0xE3340503,
+ 0x81C, 0xE2360503,
+ 0x81C, 0xC5380503,
+ 0x81C, 0xC43A0503,
+ 0x81C, 0xC33C0503,
+ 0x81C, 0xC23E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0x834A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x64500503,
+ 0x81C, 0x63520503,
+ 0x81C, 0x62540503,
+ 0x81C, 0x61560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x065E0503,
+ 0x81C, 0x05600503,
+ 0x81C, 0x04620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000503,
+ 0x81C, 0xF9020503,
+ 0x81C, 0xF8040503,
+ 0x81C, 0xF7060503,
+ 0x81C, 0xF6080503,
+ 0x81C, 0xF50A0503,
+ 0x81C, 0xF40C0503,
+ 0x81C, 0xF30E0503,
+ 0x81C, 0xF2100503,
+ 0x81C, 0xF1120503,
+ 0x81C, 0xF0140503,
+ 0x81C, 0xEF160503,
+ 0x81C, 0xEE180503,
+ 0x81C, 0xED1A0503,
+ 0x81C, 0xEC1C0503,
+ 0x81C, 0xEB1E0503,
+ 0x81C, 0xEA200503,
+ 0x81C, 0xE9220503,
+ 0x81C, 0xE8240503,
+ 0x81C, 0xE7260503,
+ 0x81C, 0xE6280503,
+ 0x81C, 0xE52A0503,
+ 0x81C, 0xC42C0503,
+ 0x81C, 0xC32E0503,
+ 0x81C, 0xC2300503,
+ 0x81C, 0xC1320503,
+ 0x81C, 0xA4340503,
+ 0x81C, 0xA3360503,
+ 0x81C, 0xA2380503,
+ 0x81C, 0xA13A0503,
+ 0x81C, 0x833C0503,
+ 0x81C, 0x823E0503,
+ 0x81C, 0x81400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBF000503,
+ 0x81C, 0xBE020503,
+ 0x81C, 0xBD040503,
+ 0x81C, 0xBC060503,
+ 0x81C, 0xBB080503,
+ 0x81C, 0xBA0A0503,
+ 0x81C, 0xB90C0503,
+ 0x81C, 0xB80E0503,
+ 0x81C, 0xB7100503,
+ 0x81C, 0xB6120503,
+ 0x81C, 0xB5140503,
+ 0x81C, 0xB4160503,
+ 0x81C, 0xB3180503,
+ 0x81C, 0xB21A0503,
+ 0x81C, 0xB11C0503,
+ 0x81C, 0x931E0503,
+ 0x81C, 0x92200503,
+ 0x81C, 0x91220503,
+ 0x81C, 0x90240503,
+ 0x81C, 0x8F260503,
+ 0x81C, 0x8E280503,
+ 0x81C, 0x8D2A0503,
+ 0x81C, 0x8C2C0503,
+ 0x81C, 0x8B2E0503,
+ 0x81C, 0x8A300503,
+ 0x81C, 0x89320503,
+ 0x81C, 0x88340503,
+ 0x81C, 0x6A360503,
+ 0x81C, 0x69380503,
+ 0x81C, 0x683A0503,
+ 0x81C, 0x673C0503,
+ 0x81C, 0x663E0503,
+ 0x81C, 0x65400503,
+ 0x81C, 0x64420503,
+ 0x81C, 0x63440503,
+ 0x81C, 0x62460503,
+ 0x81C, 0x61480503,
+ 0x81C, 0x604A0503,
+ 0x81C, 0x424C0503,
+ 0x81C, 0x414E0503,
+ 0x81C, 0x40500503,
+ 0x81C, 0x06520503,
+ 0x81C, 0x05540503,
+ 0x81C, 0x04560503,
+ 0x81C, 0x03580503,
+ 0x81C, 0x025A0503,
+ 0x81C, 0x015C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEF1C0503,
+ 0x81C, 0xEE1E0503,
+ 0x81C, 0xED200503,
+ 0x81C, 0xEC220503,
+ 0x81C, 0xEB240503,
+ 0x81C, 0xEA260503,
+ 0x81C, 0xE9280503,
+ 0x81C, 0xE82A0503,
+ 0x81C, 0xE72C0503,
+ 0x81C, 0xE62E0503,
+ 0x81C, 0xE5300503,
+ 0x81C, 0xE4320503,
+ 0x81C, 0xE3340503,
+ 0x81C, 0xC6360503,
+ 0x81C, 0xC5380503,
+ 0x81C, 0xC43A0503,
+ 0x81C, 0xC33C0503,
+ 0x81C, 0xC23E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0x834A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x245A0503,
+ 0x81C, 0x235C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x04620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000503,
+ 0x81C, 0xF7020503,
+ 0x81C, 0xF6040503,
+ 0x81C, 0xF5060503,
+ 0x81C, 0xF4080503,
+ 0x81C, 0xF30A0503,
+ 0x81C, 0xF20C0503,
+ 0x81C, 0xF10E0503,
+ 0x81C, 0xF0100503,
+ 0x81C, 0xEF120503,
+ 0x81C, 0xEE140503,
+ 0x81C, 0xED160503,
+ 0x81C, 0xEC180503,
+ 0x81C, 0xEB1A0503,
+ 0x81C, 0xEA1C0503,
+ 0x81C, 0xE91E0503,
+ 0x81C, 0xE8200503,
+ 0x81C, 0xE7220503,
+ 0x81C, 0xE6240503,
+ 0x81C, 0xE5260503,
+ 0x81C, 0xE4280503,
+ 0x81C, 0xE32A0503,
+ 0x81C, 0xE22C0503,
+ 0x81C, 0xC32E0503,
+ 0x81C, 0xC2300503,
+ 0x81C, 0xC1320503,
+ 0x81C, 0xA3340503,
+ 0x81C, 0xA2360503,
+ 0x81C, 0xA1380503,
+ 0x81C, 0xA03A0503,
+ 0x81C, 0x823C0503,
+ 0x81C, 0x813E0503,
+ 0x81C, 0x80400503,
+ 0x81C, 0x64420503,
+ 0x81C, 0x63440503,
+ 0x81C, 0x62460503,
+ 0x81C, 0x61480503,
+ 0x81C, 0x434A0503,
+ 0x81C, 0x424C0503,
+ 0x81C, 0x414E0503,
+ 0x81C, 0x40500503,
+ 0x81C, 0x22520503,
+ 0x81C, 0x21540503,
+ 0x81C, 0x20560503,
+ 0x81C, 0x04580503,
+ 0x81C, 0x035A0503,
+ 0x81C, 0x025C0503,
+ 0x81C, 0x015E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEE1C0503,
+ 0x81C, 0xED1E0503,
+ 0x81C, 0xEC200503,
+ 0x81C, 0xEB220503,
+ 0x81C, 0xEA240503,
+ 0x81C, 0xE9260503,
+ 0x81C, 0xE8280503,
+ 0x81C, 0xE72A0503,
+ 0x81C, 0xE62C0503,
+ 0x81C, 0xE52E0503,
+ 0x81C, 0xE4300503,
+ 0x81C, 0xE3320503,
+ 0x81C, 0xE2340503,
+ 0x81C, 0xC5360503,
+ 0x81C, 0xC4380503,
+ 0x81C, 0xC33A0503,
+ 0x81C, 0xC23C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000503,
+ 0x81C, 0xF8020503,
+ 0x81C, 0xF7040503,
+ 0x81C, 0xF6060503,
+ 0x81C, 0xF5080503,
+ 0x81C, 0xF40A0503,
+ 0x81C, 0xF30C0503,
+ 0x81C, 0xF20E0503,
+ 0x81C, 0xF1100503,
+ 0x81C, 0xF0120503,
+ 0x81C, 0xEF140503,
+ 0x81C, 0xEE160503,
+ 0x81C, 0xED180503,
+ 0x81C, 0xEC1A0503,
+ 0x81C, 0xEB1C0503,
+ 0x81C, 0xEA1E0503,
+ 0x81C, 0xE9200503,
+ 0x81C, 0xE8220503,
+ 0x81C, 0xE7240503,
+ 0x81C, 0xE6260503,
+ 0x81C, 0xE5280503,
+ 0x81C, 0xE42A0503,
+ 0x81C, 0xE32C0503,
+ 0x81C, 0xC32E0503,
+ 0x81C, 0xC2300503,
+ 0x81C, 0xC1320503,
+ 0x81C, 0xA4340503,
+ 0x81C, 0xA3360503,
+ 0x81C, 0xA2380503,
+ 0x81C, 0xA13A0503,
+ 0x81C, 0xA03C0503,
+ 0x81C, 0x823E0503,
+ 0x81C, 0x81400503,
+ 0x81C, 0x80420503,
+ 0x81C, 0x63440503,
+ 0x81C, 0x62460503,
+ 0x81C, 0x61480503,
+ 0x81C, 0x604A0503,
+ 0x81C, 0x244C0503,
+ 0x81C, 0x234E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x05560503,
+ 0x81C, 0x04580503,
+ 0x81C, 0x035A0503,
+ 0x81C, 0x025C0503,
+ 0x81C, 0x015E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000503,
+ 0x81C, 0xFD020503,
+ 0x81C, 0xFC040503,
+ 0x81C, 0xFB060503,
+ 0x81C, 0xFA080503,
+ 0x81C, 0xF90A0503,
+ 0x81C, 0xF80C0503,
+ 0x81C, 0xF70E0503,
+ 0x81C, 0xF6100503,
+ 0x81C, 0xF5120503,
+ 0x81C, 0xF4140503,
+ 0x81C, 0xF3160503,
+ 0x81C, 0xF2180503,
+ 0x81C, 0xF11A0503,
+ 0x81C, 0xF01C0503,
+ 0x81C, 0xEF1E0503,
+ 0x81C, 0xEE200503,
+ 0x81C, 0xED220503,
+ 0x81C, 0xEC240503,
+ 0x81C, 0xEB260503,
+ 0x81C, 0xEA280503,
+ 0x81C, 0xE92A0503,
+ 0x81C, 0xE82C0503,
+ 0x81C, 0xE72E0503,
+ 0x81C, 0xE6300503,
+ 0x81C, 0xE5320503,
+ 0x81C, 0xE4340503,
+ 0x81C, 0xE3360503,
+ 0x81C, 0xC6380503,
+ 0x81C, 0xC53A0503,
+ 0x81C, 0xC43C0503,
+ 0x81C, 0xC33E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0xA04A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x80500503,
+ 0x81C, 0x64520503,
+ 0x81C, 0x63540503,
+ 0x81C, 0x62560503,
+ 0x81C, 0x61580503,
+ 0x81C, 0x605A0503,
+ 0x81C, 0x235C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEE1C0503,
+ 0x81C, 0xED1E0503,
+ 0x81C, 0xEC200503,
+ 0x81C, 0xEB220503,
+ 0x81C, 0xEA240503,
+ 0x81C, 0xE9260503,
+ 0x81C, 0xE8280503,
+ 0x81C, 0xE72A0503,
+ 0x81C, 0xE62C0503,
+ 0x81C, 0xE52E0503,
+ 0x81C, 0xE4300503,
+ 0x81C, 0xE3320503,
+ 0x81C, 0xE2340503,
+ 0x81C, 0xC5360503,
+ 0x81C, 0xC4380503,
+ 0x81C, 0xC33A0503,
+ 0x81C, 0xC23C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEE1C0503,
+ 0x81C, 0xED1E0503,
+ 0x81C, 0xEC200503,
+ 0x81C, 0xEB220503,
+ 0x81C, 0xEA240503,
+ 0x81C, 0xE9260503,
+ 0x81C, 0xE8280503,
+ 0x81C, 0xE72A0503,
+ 0x81C, 0xE62C0503,
+ 0x81C, 0xE52E0503,
+ 0x81C, 0xE4300503,
+ 0x81C, 0xE3320503,
+ 0x81C, 0xE2340503,
+ 0x81C, 0xC5360503,
+ 0x81C, 0xC4380503,
+ 0x81C, 0xC33A0503,
+ 0x81C, 0xC23C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBF000503,
+ 0x81C, 0xBF020503,
+ 0x81C, 0xBF040503,
+ 0x81C, 0xBF060503,
+ 0x81C, 0xBF080503,
+ 0x81C, 0xBF0A0503,
+ 0x81C, 0xBE0C0503,
+ 0x81C, 0xBD0E0503,
+ 0x81C, 0xBC100503,
+ 0x81C, 0xBB120503,
+ 0x81C, 0xBA140503,
+ 0x81C, 0xB9160503,
+ 0x81C, 0xB8180503,
+ 0x81C, 0xB71A0503,
+ 0x81C, 0xB61C0503,
+ 0x81C, 0xB51E0503,
+ 0x81C, 0xB2200503,
+ 0x81C, 0xB3220503,
+ 0x81C, 0xB2240503,
+ 0x81C, 0xB1260503,
+ 0x81C, 0xB0280503,
+ 0x81C, 0xAF2A0503,
+ 0x81C, 0xAE2C0503,
+ 0x81C, 0xAD2E0503,
+ 0x81C, 0xAC300503,
+ 0x81C, 0xAB320503,
+ 0x81C, 0xAA340503,
+ 0x81C, 0xC6360503,
+ 0x81C, 0xC5380503,
+ 0x81C, 0xC43A0503,
+ 0x81C, 0xC33C0503,
+ 0x81C, 0x883E0503,
+ 0x81C, 0x87400503,
+ 0x81C, 0x86420503,
+ 0x81C, 0x85440503,
+ 0x81C, 0x84460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x674A0503,
+ 0x81C, 0x664C0503,
+ 0x81C, 0x654E0503,
+ 0x81C, 0x64500503,
+ 0x81C, 0x27520503,
+ 0x81C, 0x26540503,
+ 0x81C, 0x25560503,
+ 0x81C, 0x24580503,
+ 0x81C, 0x235A0503,
+ 0x81C, 0x225C0503,
+ 0x81C, 0x215E0503,
+ 0x81C, 0x20600503,
+ 0x81C, 0x03620503,
+ 0x81C, 0x02640503,
+ 0x81C, 0x01660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000403,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEF1C0503,
+ 0x81C, 0xEE1E0503,
+ 0x81C, 0xED200503,
+ 0x81C, 0xEC220503,
+ 0x81C, 0xEB240503,
+ 0x81C, 0xEA260503,
+ 0x81C, 0xE9280503,
+ 0x81C, 0xE82A0503,
+ 0x81C, 0xE72C0503,
+ 0x81C, 0xE62E0503,
+ 0x81C, 0xE5300503,
+ 0x81C, 0xE4320503,
+ 0x81C, 0xE3340503,
+ 0x81C, 0xC6360503,
+ 0x81C, 0xC5380503,
+ 0x81C, 0xC43A0503,
+ 0x81C, 0xC33C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0xA0480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x64500503,
+ 0x81C, 0x63520503,
+ 0x81C, 0x62540503,
+ 0x81C, 0x61560503,
+ 0x81C, 0x60580503,
+ 0x81C, 0x235A0503,
+ 0x81C, 0x225C0503,
+ 0x81C, 0x215E0503,
+ 0x81C, 0x20600503,
+ 0x81C, 0x03620503,
+ 0x81C, 0x02640503,
+ 0x81C, 0x01660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000503,
+ 0x81C, 0xF7020503,
+ 0x81C, 0xF6040503,
+ 0x81C, 0xF5060503,
+ 0x81C, 0xF4080503,
+ 0x81C, 0xF30A0503,
+ 0x81C, 0xF20C0503,
+ 0x81C, 0xF10E0503,
+ 0x81C, 0xF0100503,
+ 0x81C, 0xEF120503,
+ 0x81C, 0xEE140503,
+ 0x81C, 0xED160503,
+ 0x81C, 0xEC180503,
+ 0x81C, 0xEB1A0503,
+ 0x81C, 0xEA1C0503,
+ 0x81C, 0xE91E0503,
+ 0x81C, 0xE8200503,
+ 0x81C, 0xE7220503,
+ 0x81C, 0xE6240503,
+ 0x81C, 0xE5260503,
+ 0x81C, 0xE4280503,
+ 0x81C, 0xE32A0503,
+ 0x81C, 0xC32C0503,
+ 0x81C, 0xC22E0503,
+ 0x81C, 0xC1300503,
+ 0x81C, 0xC0320503,
+ 0x81C, 0xA3340503,
+ 0x81C, 0xA2360503,
+ 0x81C, 0xA1380503,
+ 0x81C, 0xA03A0503,
+ 0x81C, 0x823C0503,
+ 0x81C, 0x813E0503,
+ 0x81C, 0x80400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFE000503,
+ 0x81C, 0xFD020503,
+ 0x81C, 0xFC040503,
+ 0x81C, 0xFB060503,
+ 0x81C, 0xFA080503,
+ 0x81C, 0xF90A0503,
+ 0x81C, 0xF80C0503,
+ 0x81C, 0xF70E0503,
+ 0x81C, 0xF6100503,
+ 0x81C, 0xF5120503,
+ 0x81C, 0xF4140503,
+ 0x81C, 0xF3160503,
+ 0x81C, 0xF2180503,
+ 0x81C, 0xF11A0503,
+ 0x81C, 0xF01C0503,
+ 0x81C, 0xEF1E0503,
+ 0x81C, 0xEE200503,
+ 0x81C, 0xED220503,
+ 0x81C, 0xEC240503,
+ 0x81C, 0xEB260503,
+ 0x81C, 0xEA280503,
+ 0x81C, 0xE92A0503,
+ 0x81C, 0xE82C0503,
+ 0x81C, 0xE72E0503,
+ 0x81C, 0xE6300503,
+ 0x81C, 0xE5320503,
+ 0x81C, 0xE4340503,
+ 0x81C, 0xE3360503,
+ 0x81C, 0xC6380503,
+ 0x81C, 0xC53A0503,
+ 0x81C, 0xC43C0503,
+ 0x81C, 0xC33E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0xA04A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x80500503,
+ 0x81C, 0x64520503,
+ 0x81C, 0x63540503,
+ 0x81C, 0x62560503,
+ 0x81C, 0x61580503,
+ 0x81C, 0x605A0503,
+ 0x81C, 0x235C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x42560603,
+ 0x81C, 0x41580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBD000603,
+ 0x81C, 0xBC020603,
+ 0x81C, 0xBB040603,
+ 0x81C, 0xBA060603,
+ 0x81C, 0xB9080603,
+ 0x81C, 0xB80A0603,
+ 0x81C, 0xB70C0603,
+ 0x81C, 0xB60E0603,
+ 0x81C, 0xB5100603,
+ 0x81C, 0xB4120603,
+ 0x81C, 0xB3140603,
+ 0x81C, 0xB2160603,
+ 0x81C, 0xB1180603,
+ 0x81C, 0xB01A0603,
+ 0x81C, 0xAF1C0603,
+ 0x81C, 0xAE1E0603,
+ 0x81C, 0xAD200603,
+ 0x81C, 0x8F220603,
+ 0x81C, 0x8E240603,
+ 0x81C, 0x8D260603,
+ 0x81C, 0x8C280603,
+ 0x81C, 0x8B2A0603,
+ 0x81C, 0x8A2C0603,
+ 0x81C, 0x892E0603,
+ 0x81C, 0x88300603,
+ 0x81C, 0x6B320603,
+ 0x81C, 0x6A340603,
+ 0x81C, 0x69360603,
+ 0x81C, 0x68380603,
+ 0x81C, 0x673A0603,
+ 0x81C, 0x663C0603,
+ 0x81C, 0x653E0603,
+ 0x81C, 0x64400603,
+ 0x81C, 0x63420603,
+ 0x81C, 0x62440603,
+ 0x81C, 0x61460603,
+ 0x81C, 0x60480603,
+ 0x81C, 0x424A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x06500603,
+ 0x81C, 0x05520603,
+ 0x81C, 0x04540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007C0603,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000603,
+ 0x81C, 0xF6020603,
+ 0x81C, 0xF5040603,
+ 0x81C, 0xF4060603,
+ 0x81C, 0xF3080603,
+ 0x81C, 0xF20A0603,
+ 0x81C, 0xF10C0603,
+ 0x81C, 0xF00E0603,
+ 0x81C, 0xEF100603,
+ 0x81C, 0xEE120603,
+ 0x81C, 0xED140603,
+ 0x81C, 0xEC160603,
+ 0x81C, 0xEB180603,
+ 0x81C, 0xEA1A0603,
+ 0x81C, 0xE91C0603,
+ 0x81C, 0xE81E0603,
+ 0x81C, 0xE7200603,
+ 0x81C, 0xE6220603,
+ 0x81C, 0xE5240603,
+ 0x81C, 0xE4260603,
+ 0x81C, 0xE3280603,
+ 0x81C, 0xC42A0603,
+ 0x81C, 0xC32C0603,
+ 0x81C, 0xC22E0603,
+ 0x81C, 0xC1300603,
+ 0x81C, 0xC0320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEF1A0603,
+ 0x81C, 0xEE1C0603,
+ 0x81C, 0xED1E0603,
+ 0x81C, 0xEC200603,
+ 0x81C, 0xEB220603,
+ 0x81C, 0xEA240603,
+ 0x81C, 0xE9260603,
+ 0x81C, 0xE8280603,
+ 0x81C, 0xE72A0603,
+ 0x81C, 0xE62C0603,
+ 0x81C, 0xE52E0603,
+ 0x81C, 0xE4300603,
+ 0x81C, 0xE3320603,
+ 0x81C, 0xE2340603,
+ 0x81C, 0xC6360603,
+ 0x81C, 0xC5380603,
+ 0x81C, 0xC43A0603,
+ 0x81C, 0xC33C0603,
+ 0x81C, 0xA63E0603,
+ 0x81C, 0xA5400603,
+ 0x81C, 0xA4420603,
+ 0x81C, 0xA3440603,
+ 0x81C, 0xA2460603,
+ 0x81C, 0xA1480603,
+ 0x81C, 0x834A0603,
+ 0x81C, 0x824C0603,
+ 0x81C, 0x814E0603,
+ 0x81C, 0x64500603,
+ 0x81C, 0x63520603,
+ 0x81C, 0x62540603,
+ 0x81C, 0x61560603,
+ 0x81C, 0x60580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x215C0603,
+ 0x81C, 0x205E0603,
+ 0x81C, 0x03600603,
+ 0x81C, 0x02620603,
+ 0x81C, 0x01640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000603,
+ 0x81C, 0xF6020603,
+ 0x81C, 0xF5040603,
+ 0x81C, 0xF4060603,
+ 0x81C, 0xF3080603,
+ 0x81C, 0xF20A0603,
+ 0x81C, 0xF10C0603,
+ 0x81C, 0xF00E0603,
+ 0x81C, 0xEF100603,
+ 0x81C, 0xEE120603,
+ 0x81C, 0xED140603,
+ 0x81C, 0xEC160603,
+ 0x81C, 0xEB180603,
+ 0x81C, 0xEA1A0603,
+ 0x81C, 0xE91C0603,
+ 0x81C, 0xE81E0603,
+ 0x81C, 0xE7200603,
+ 0x81C, 0xE6220603,
+ 0x81C, 0xE5240603,
+ 0x81C, 0xE4260603,
+ 0x81C, 0xE3280603,
+ 0x81C, 0xC42A0603,
+ 0x81C, 0xC32C0603,
+ 0x81C, 0xC22E0603,
+ 0x81C, 0xC1300603,
+ 0x81C, 0xC0320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEF1A0603,
+ 0x81C, 0xEE1C0603,
+ 0x81C, 0xED1E0603,
+ 0x81C, 0xEC200603,
+ 0x81C, 0xEB220603,
+ 0x81C, 0xEA240603,
+ 0x81C, 0xE9260603,
+ 0x81C, 0xE8280603,
+ 0x81C, 0xE72A0603,
+ 0x81C, 0xE62C0603,
+ 0x81C, 0xE52E0603,
+ 0x81C, 0xE4300603,
+ 0x81C, 0xE3320603,
+ 0x81C, 0xE2340603,
+ 0x81C, 0xE1360603,
+ 0x81C, 0xC5380603,
+ 0x81C, 0xC43A0603,
+ 0x81C, 0xC33C0603,
+ 0x81C, 0xC23E0603,
+ 0x81C, 0xC1400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0xA0480603,
+ 0x81C, 0x834A0603,
+ 0x81C, 0x824C0603,
+ 0x81C, 0x814E0603,
+ 0x81C, 0x64500603,
+ 0x81C, 0x63520603,
+ 0x81C, 0x62540603,
+ 0x81C, 0x61560603,
+ 0x81C, 0x25580603,
+ 0x81C, 0x245A0603,
+ 0x81C, 0x235C0603,
+ 0x81C, 0x225E0603,
+ 0x81C, 0x21600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000603,
+ 0x81C, 0xF8020603,
+ 0x81C, 0xF7040603,
+ 0x81C, 0xF6060603,
+ 0x81C, 0xF5080603,
+ 0x81C, 0xF40A0603,
+ 0x81C, 0xF30C0603,
+ 0x81C, 0xF20E0603,
+ 0x81C, 0xF1100603,
+ 0x81C, 0xF0120603,
+ 0x81C, 0xEF140603,
+ 0x81C, 0xEE160603,
+ 0x81C, 0xED180603,
+ 0x81C, 0xEC1A0603,
+ 0x81C, 0xEB1C0603,
+ 0x81C, 0xEA1E0603,
+ 0x81C, 0xE9200603,
+ 0x81C, 0xE8220603,
+ 0x81C, 0xE7240603,
+ 0x81C, 0xE6260603,
+ 0x81C, 0xE5280603,
+ 0x81C, 0xC42A0603,
+ 0x81C, 0xC32C0603,
+ 0x81C, 0xC22E0603,
+ 0x81C, 0xC1300603,
+ 0x81C, 0xC0320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBE000603,
+ 0x81C, 0xBD020603,
+ 0x81C, 0xBC040603,
+ 0x81C, 0xBB060603,
+ 0x81C, 0xBA080603,
+ 0x81C, 0xB90A0603,
+ 0x81C, 0xB80C0603,
+ 0x81C, 0xB70E0603,
+ 0x81C, 0xB6100603,
+ 0x81C, 0xB5120603,
+ 0x81C, 0xB4140603,
+ 0x81C, 0xB3160603,
+ 0x81C, 0xB2180603,
+ 0x81C, 0xB11A0603,
+ 0x81C, 0xB01C0603,
+ 0x81C, 0x921E0603,
+ 0x81C, 0x91200603,
+ 0x81C, 0x90220603,
+ 0x81C, 0x8F240603,
+ 0x81C, 0x8E260603,
+ 0x81C, 0x8D280603,
+ 0x81C, 0x8C2A0603,
+ 0x81C, 0x8B2C0603,
+ 0x81C, 0x8A2E0603,
+ 0x81C, 0x89300603,
+ 0x81C, 0x88320603,
+ 0x81C, 0x6B340603,
+ 0x81C, 0x6A360603,
+ 0x81C, 0x69380603,
+ 0x81C, 0x683A0603,
+ 0x81C, 0x673C0603,
+ 0x81C, 0x663E0603,
+ 0x81C, 0x65400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x424C0603,
+ 0x81C, 0x414E0603,
+ 0x81C, 0x40500603,
+ 0x81C, 0x06520603,
+ 0x81C, 0x05540603,
+ 0x81C, 0x04560603,
+ 0x81C, 0x03580603,
+ 0x81C, 0x025A0603,
+ 0x81C, 0x015C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000603,
+ 0x81C, 0xFA020603,
+ 0x81C, 0xF9040603,
+ 0x81C, 0xF8060603,
+ 0x81C, 0xF7080603,
+ 0x81C, 0xF60A0603,
+ 0x81C, 0xF50C0603,
+ 0x81C, 0xF40E0603,
+ 0x81C, 0xF3100603,
+ 0x81C, 0xF2120603,
+ 0x81C, 0xF1140603,
+ 0x81C, 0xF0160603,
+ 0x81C, 0xEF180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xC23C0603,
+ 0x81C, 0xC13E0603,
+ 0x81C, 0xC0400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0xA0480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x60560603,
+ 0x81C, 0x24580603,
+ 0x81C, 0x235A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x03620603,
+ 0x81C, 0x02640603,
+ 0x81C, 0x01660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000603,
+ 0x81C, 0xF7020603,
+ 0x81C, 0xF6040603,
+ 0x81C, 0xF5060603,
+ 0x81C, 0xF4080603,
+ 0x81C, 0xF30A0603,
+ 0x81C, 0xF20C0603,
+ 0x81C, 0xF10E0603,
+ 0x81C, 0xF0100603,
+ 0x81C, 0xEF120603,
+ 0x81C, 0xEE140603,
+ 0x81C, 0xED160603,
+ 0x81C, 0xEC180603,
+ 0x81C, 0xEB1A0603,
+ 0x81C, 0xEA1C0603,
+ 0x81C, 0xE91E0603,
+ 0x81C, 0xE8200603,
+ 0x81C, 0xE7220603,
+ 0x81C, 0xE6240603,
+ 0x81C, 0xE5260603,
+ 0x81C, 0xE4280603,
+ 0x81C, 0xE32A0603,
+ 0x81C, 0xC42C0603,
+ 0x81C, 0xC32E0603,
+ 0x81C, 0xC2300603,
+ 0x81C, 0xC1320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x65420603,
+ 0x81C, 0x64440603,
+ 0x81C, 0x63460603,
+ 0x81C, 0x62480603,
+ 0x81C, 0x614A0603,
+ 0x81C, 0x424C0603,
+ 0x81C, 0x414E0603,
+ 0x81C, 0x40500603,
+ 0x81C, 0x22520603,
+ 0x81C, 0x21540603,
+ 0x81C, 0x20560603,
+ 0x81C, 0x04580603,
+ 0x81C, 0x035A0603,
+ 0x81C, 0x025C0603,
+ 0x81C, 0x015E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x42560603,
+ 0x81C, 0x41580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000603,
+ 0x81C, 0xF8020603,
+ 0x81C, 0xF7040603,
+ 0x81C, 0xF6060603,
+ 0x81C, 0xF5080603,
+ 0x81C, 0xF40A0603,
+ 0x81C, 0xF30C0603,
+ 0x81C, 0xF20E0603,
+ 0x81C, 0xF1100603,
+ 0x81C, 0xF0120603,
+ 0x81C, 0xEF140603,
+ 0x81C, 0xEE160603,
+ 0x81C, 0xED180603,
+ 0x81C, 0xEC1A0603,
+ 0x81C, 0xEB1C0603,
+ 0x81C, 0xEA1E0603,
+ 0x81C, 0xE9200603,
+ 0x81C, 0xE8220603,
+ 0x81C, 0xE7240603,
+ 0x81C, 0xE6260603,
+ 0x81C, 0xE5280603,
+ 0x81C, 0xE42A0603,
+ 0x81C, 0xC42C0603,
+ 0x81C, 0xC32E0603,
+ 0x81C, 0xC2300603,
+ 0x81C, 0xC1320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x244C0603,
+ 0x81C, 0x234E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x05560603,
+ 0x81C, 0x04580603,
+ 0x81C, 0x035A0603,
+ 0x81C, 0x025C0603,
+ 0x81C, 0x015E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEF1A0603,
+ 0x81C, 0xEE1C0603,
+ 0x81C, 0xED1E0603,
+ 0x81C, 0xEC200603,
+ 0x81C, 0xEB220603,
+ 0x81C, 0xEA240603,
+ 0x81C, 0xE9260603,
+ 0x81C, 0xE8280603,
+ 0x81C, 0xE72A0603,
+ 0x81C, 0xE62C0603,
+ 0x81C, 0xE52E0603,
+ 0x81C, 0xE4300603,
+ 0x81C, 0xE3320603,
+ 0x81C, 0xE2340603,
+ 0x81C, 0xC6360603,
+ 0x81C, 0xC5380603,
+ 0x81C, 0xC43A0603,
+ 0x81C, 0xC33C0603,
+ 0x81C, 0xA63E0603,
+ 0x81C, 0xA5400603,
+ 0x81C, 0xA4420603,
+ 0x81C, 0xA3440603,
+ 0x81C, 0xA2460603,
+ 0x81C, 0xA1480603,
+ 0x81C, 0x834A0603,
+ 0x81C, 0x824C0603,
+ 0x81C, 0x814E0603,
+ 0x81C, 0x64500603,
+ 0x81C, 0x63520603,
+ 0x81C, 0x62540603,
+ 0x81C, 0x61560603,
+ 0x81C, 0x60580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x215C0603,
+ 0x81C, 0x205E0603,
+ 0x81C, 0x03600603,
+ 0x81C, 0x02620603,
+ 0x81C, 0x01640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x42560603,
+ 0x81C, 0x41580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x42560603,
+ 0x81C, 0x41580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBF000603,
+ 0x81C, 0xBF020603,
+ 0x81C, 0xBF040603,
+ 0x81C, 0xBF060603,
+ 0x81C, 0xBF080603,
+ 0x81C, 0xBE0A0603,
+ 0x81C, 0xBD0C0603,
+ 0x81C, 0xBC0E0603,
+ 0x81C, 0xBB100603,
+ 0x81C, 0xBA120603,
+ 0x81C, 0xB9140603,
+ 0x81C, 0xB8160603,
+ 0x81C, 0xB7180603,
+ 0x81C, 0xB61A0603,
+ 0x81C, 0xB51C0603,
+ 0x81C, 0xB41E0603,
+ 0x81C, 0xB1200603,
+ 0x81C, 0xB2220603,
+ 0x81C, 0xB1240603,
+ 0x81C, 0xB0260603,
+ 0x81C, 0xAF280603,
+ 0x81C, 0xAE2A0603,
+ 0x81C, 0xAD2C0603,
+ 0x81C, 0xAC2E0603,
+ 0x81C, 0xAB300603,
+ 0x81C, 0xAA320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0x883C0603,
+ 0x81C, 0x873E0603,
+ 0x81C, 0x86400603,
+ 0x81C, 0x85420603,
+ 0x81C, 0x84440603,
+ 0x81C, 0x83460603,
+ 0x81C, 0x67480603,
+ 0x81C, 0x664A0603,
+ 0x81C, 0x654C0603,
+ 0x81C, 0x644E0603,
+ 0x81C, 0x27500603,
+ 0x81C, 0x26520603,
+ 0x81C, 0x25540603,
+ 0x81C, 0x24560603,
+ 0x81C, 0x23580603,
+ 0x81C, 0x225A0603,
+ 0x81C, 0x215C0603,
+ 0x81C, 0x205E0603,
+ 0x81C, 0x03600603,
+ 0x81C, 0x02620603,
+ 0x81C, 0x01640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000403,
+ 0x81C, 0xFB000603,
+ 0x81C, 0xFA020603,
+ 0x81C, 0xF9040603,
+ 0x81C, 0xF8060603,
+ 0x81C, 0xF7080603,
+ 0x81C, 0xF60A0603,
+ 0x81C, 0xF50C0603,
+ 0x81C, 0xF40E0603,
+ 0x81C, 0xF3100603,
+ 0x81C, 0xF2120603,
+ 0x81C, 0xF1140603,
+ 0x81C, 0xF0160603,
+ 0x81C, 0xEF180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x644E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x60560603,
+ 0x81C, 0x40580603,
+ 0x81C, 0x215A0603,
+ 0x81C, 0x205C0603,
+ 0x81C, 0x035E0603,
+ 0x81C, 0x02600603,
+ 0x81C, 0x01620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000603,
+ 0x81C, 0xF6020603,
+ 0x81C, 0xF5040603,
+ 0x81C, 0xF4060603,
+ 0x81C, 0xF3080603,
+ 0x81C, 0xF20A0603,
+ 0x81C, 0xF10C0603,
+ 0x81C, 0xF00E0603,
+ 0x81C, 0xEF100603,
+ 0x81C, 0xEE120603,
+ 0x81C, 0xED140603,
+ 0x81C, 0xEC160603,
+ 0x81C, 0xEB180603,
+ 0x81C, 0xEA1A0603,
+ 0x81C, 0xE91C0603,
+ 0x81C, 0xE81E0603,
+ 0x81C, 0xE7200603,
+ 0x81C, 0xE6220603,
+ 0x81C, 0xE5240603,
+ 0x81C, 0xE4260603,
+ 0x81C, 0xE3280603,
+ 0x81C, 0xC42A0603,
+ 0x81C, 0xC32C0603,
+ 0x81C, 0xC22E0603,
+ 0x81C, 0xC1300603,
+ 0x81C, 0xC0320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFD000603,
+ 0x81C, 0xFC020603,
+ 0x81C, 0xFB040603,
+ 0x81C, 0xFA060603,
+ 0x81C, 0xF9080603,
+ 0x81C, 0xF80A0603,
+ 0x81C, 0xF70C0603,
+ 0x81C, 0xF60E0603,
+ 0x81C, 0xF5100603,
+ 0x81C, 0xF4120603,
+ 0x81C, 0xF3140603,
+ 0x81C, 0xF2160603,
+ 0x81C, 0xF1180603,
+ 0x81C, 0xF01A0603,
+ 0x81C, 0xEF1C0603,
+ 0x81C, 0xEE1E0603,
+ 0x81C, 0xED200603,
+ 0x81C, 0xEC220603,
+ 0x81C, 0xEB240603,
+ 0x81C, 0xEA260603,
+ 0x81C, 0xE9280603,
+ 0x81C, 0xE82A0603,
+ 0x81C, 0xE72C0603,
+ 0x81C, 0xE62E0603,
+ 0x81C, 0xE5300603,
+ 0x81C, 0xE4320603,
+ 0x81C, 0xE3340603,
+ 0x81C, 0xC6360603,
+ 0x81C, 0xC5380603,
+ 0x81C, 0xC43A0603,
+ 0x81C, 0xC33C0603,
+ 0x81C, 0xA63E0603,
+ 0x81C, 0xA5400603,
+ 0x81C, 0xA4420603,
+ 0x81C, 0xA3440603,
+ 0x81C, 0xA2460603,
+ 0x81C, 0xA1480603,
+ 0x81C, 0x834A0603,
+ 0x81C, 0x824C0603,
+ 0x81C, 0x814E0603,
+ 0x81C, 0x64500603,
+ 0x81C, 0x63520603,
+ 0x81C, 0x62540603,
+ 0x81C, 0x61560603,
+ 0x81C, 0x60580603,
+ 0x81C, 0x235A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x03620603,
+ 0x81C, 0x02640603,
+ 0x81C, 0x01660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x405A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBD000703,
+ 0x81C, 0xBC020703,
+ 0x81C, 0xBB040703,
+ 0x81C, 0xBA060703,
+ 0x81C, 0xB9080703,
+ 0x81C, 0xB80A0703,
+ 0x81C, 0xB70C0703,
+ 0x81C, 0xB60E0703,
+ 0x81C, 0xB5100703,
+ 0x81C, 0xB4120703,
+ 0x81C, 0xB3140703,
+ 0x81C, 0xB2160703,
+ 0x81C, 0xB1180703,
+ 0x81C, 0xB01A0703,
+ 0x81C, 0xAF1C0703,
+ 0x81C, 0xAE1E0703,
+ 0x81C, 0xAD200703,
+ 0x81C, 0xAC220703,
+ 0x81C, 0x8E240703,
+ 0x81C, 0x8D260703,
+ 0x81C, 0x8C280703,
+ 0x81C, 0x6F2A0703,
+ 0x81C, 0x6E2C0703,
+ 0x81C, 0x6D2E0703,
+ 0x81C, 0x6C300703,
+ 0x81C, 0x6B320703,
+ 0x81C, 0x6A340703,
+ 0x81C, 0x69360703,
+ 0x81C, 0x68380703,
+ 0x81C, 0x673A0703,
+ 0x81C, 0x663C0703,
+ 0x81C, 0x653E0703,
+ 0x81C, 0x64400703,
+ 0x81C, 0x63420703,
+ 0x81C, 0x62440703,
+ 0x81C, 0x61460703,
+ 0x81C, 0x60480703,
+ 0x81C, 0x424A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x06500703,
+ 0x81C, 0x05520703,
+ 0x81C, 0x04540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007C0703,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000703,
+ 0x81C, 0xF6020703,
+ 0x81C, 0xF5040703,
+ 0x81C, 0xF4060703,
+ 0x81C, 0xF3080703,
+ 0x81C, 0xF20A0703,
+ 0x81C, 0xF10C0703,
+ 0x81C, 0xF00E0703,
+ 0x81C, 0xEF100703,
+ 0x81C, 0xEE120703,
+ 0x81C, 0xED140703,
+ 0x81C, 0xEC160703,
+ 0x81C, 0xEB180703,
+ 0x81C, 0xEA1A0703,
+ 0x81C, 0xE91C0703,
+ 0x81C, 0xCA1E0703,
+ 0x81C, 0xC9200703,
+ 0x81C, 0xC8220703,
+ 0x81C, 0xC7240703,
+ 0x81C, 0xC6260703,
+ 0x81C, 0xC5280703,
+ 0x81C, 0xC42A0703,
+ 0x81C, 0xC32C0703,
+ 0x81C, 0xC22E0703,
+ 0x81C, 0xC1300703,
+ 0x81C, 0xA4320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x22500703,
+ 0x81C, 0x21520703,
+ 0x81C, 0x20540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xF0180703,
+ 0x81C, 0xEF1A0703,
+ 0x81C, 0xEE1C0703,
+ 0x81C, 0xED1E0703,
+ 0x81C, 0xEC200703,
+ 0x81C, 0xEB220703,
+ 0x81C, 0xEA240703,
+ 0x81C, 0xE9260703,
+ 0x81C, 0xE8280703,
+ 0x81C, 0xE72A0703,
+ 0x81C, 0xE62C0703,
+ 0x81C, 0xE52E0703,
+ 0x81C, 0xE4300703,
+ 0x81C, 0xE3320703,
+ 0x81C, 0xE2340703,
+ 0x81C, 0xC6360703,
+ 0x81C, 0xC5380703,
+ 0x81C, 0xC43A0703,
+ 0x81C, 0xC33C0703,
+ 0x81C, 0xA63E0703,
+ 0x81C, 0xA5400703,
+ 0x81C, 0xA4420703,
+ 0x81C, 0xA3440703,
+ 0x81C, 0xA2460703,
+ 0x81C, 0x84480703,
+ 0x81C, 0x834A0703,
+ 0x81C, 0x824C0703,
+ 0x81C, 0x814E0703,
+ 0x81C, 0x80500703,
+ 0x81C, 0x63520703,
+ 0x81C, 0x62540703,
+ 0x81C, 0x61560703,
+ 0x81C, 0x60580703,
+ 0x81C, 0x225A0703,
+ 0x81C, 0x055C0703,
+ 0x81C, 0x045E0703,
+ 0x81C, 0x03600703,
+ 0x81C, 0x02620703,
+ 0x81C, 0x01640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000703,
+ 0x81C, 0xF6020703,
+ 0x81C, 0xF5040703,
+ 0x81C, 0xF4060703,
+ 0x81C, 0xF3080703,
+ 0x81C, 0xF20A0703,
+ 0x81C, 0xF10C0703,
+ 0x81C, 0xF00E0703,
+ 0x81C, 0xEF100703,
+ 0x81C, 0xEE120703,
+ 0x81C, 0xED140703,
+ 0x81C, 0xEC160703,
+ 0x81C, 0xEB180703,
+ 0x81C, 0xEA1A0703,
+ 0x81C, 0xE91C0703,
+ 0x81C, 0xCA1E0703,
+ 0x81C, 0xC9200703,
+ 0x81C, 0xC8220703,
+ 0x81C, 0xC7240703,
+ 0x81C, 0xC6260703,
+ 0x81C, 0xC5280703,
+ 0x81C, 0xC42A0703,
+ 0x81C, 0xC32C0703,
+ 0x81C, 0xC22E0703,
+ 0x81C, 0xC1300703,
+ 0x81C, 0xA4320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x22500703,
+ 0x81C, 0x21520703,
+ 0x81C, 0x20540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xF0160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xE1340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xC23C0703,
+ 0x81C, 0xC13E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x64500703,
+ 0x81C, 0x63520703,
+ 0x81C, 0x62540703,
+ 0x81C, 0x61560703,
+ 0x81C, 0x60580703,
+ 0x81C, 0x235A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000703,
+ 0x81C, 0xF8020703,
+ 0x81C, 0xF7040703,
+ 0x81C, 0xF6060703,
+ 0x81C, 0xF5080703,
+ 0x81C, 0xF40A0703,
+ 0x81C, 0xF30C0703,
+ 0x81C, 0xF20E0703,
+ 0x81C, 0xF1100703,
+ 0x81C, 0xF0120703,
+ 0x81C, 0xEF140703,
+ 0x81C, 0xEE160703,
+ 0x81C, 0xED180703,
+ 0x81C, 0xEC1A0703,
+ 0x81C, 0xEB1C0703,
+ 0x81C, 0xEA1E0703,
+ 0x81C, 0xC9200703,
+ 0x81C, 0xC8220703,
+ 0x81C, 0xC7240703,
+ 0x81C, 0xC6260703,
+ 0x81C, 0xC5280703,
+ 0x81C, 0xC42A0703,
+ 0x81C, 0xC32C0703,
+ 0x81C, 0xC22E0703,
+ 0x81C, 0xC1300703,
+ 0x81C, 0xC0320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x22500703,
+ 0x81C, 0x21520703,
+ 0x81C, 0x20540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBE000703,
+ 0x81C, 0xBD020703,
+ 0x81C, 0xBC040703,
+ 0x81C, 0xBB060703,
+ 0x81C, 0xBA080703,
+ 0x81C, 0xB90A0703,
+ 0x81C, 0xB80C0703,
+ 0x81C, 0xB70E0703,
+ 0x81C, 0xB6100703,
+ 0x81C, 0xB5120703,
+ 0x81C, 0xB4140703,
+ 0x81C, 0xB3160703,
+ 0x81C, 0xB2180703,
+ 0x81C, 0xB11A0703,
+ 0x81C, 0xB01C0703,
+ 0x81C, 0x921E0703,
+ 0x81C, 0x91200703,
+ 0x81C, 0x90220703,
+ 0x81C, 0x8F240703,
+ 0x81C, 0x8E260703,
+ 0x81C, 0x8D280703,
+ 0x81C, 0x8C2A0703,
+ 0x81C, 0x6F2C0703,
+ 0x81C, 0x6E2E0703,
+ 0x81C, 0x6D300703,
+ 0x81C, 0x6C320703,
+ 0x81C, 0x6B340703,
+ 0x81C, 0x6A360703,
+ 0x81C, 0x69380703,
+ 0x81C, 0x683A0703,
+ 0x81C, 0x673C0703,
+ 0x81C, 0x663E0703,
+ 0x81C, 0x65400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x424C0703,
+ 0x81C, 0x414E0703,
+ 0x81C, 0x40500703,
+ 0x81C, 0x06520703,
+ 0x81C, 0x05540703,
+ 0x81C, 0x04560703,
+ 0x81C, 0x03580703,
+ 0x81C, 0x025A0703,
+ 0x81C, 0x015C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xF0160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xC23C0703,
+ 0x81C, 0xC13E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x43540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x235A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000703,
+ 0x81C, 0xF7020703,
+ 0x81C, 0xF6040703,
+ 0x81C, 0xF5060703,
+ 0x81C, 0xF4080703,
+ 0x81C, 0xF30A0703,
+ 0x81C, 0xF20C0703,
+ 0x81C, 0xF10E0703,
+ 0x81C, 0xF0100703,
+ 0x81C, 0xEF120703,
+ 0x81C, 0xEE140703,
+ 0x81C, 0xED160703,
+ 0x81C, 0xEC180703,
+ 0x81C, 0xEB1A0703,
+ 0x81C, 0xEA1C0703,
+ 0x81C, 0xE91E0703,
+ 0x81C, 0xCA200703,
+ 0x81C, 0xC9220703,
+ 0x81C, 0xC8240703,
+ 0x81C, 0xC7260703,
+ 0x81C, 0xC6280703,
+ 0x81C, 0xC52A0703,
+ 0x81C, 0xC42C0703,
+ 0x81C, 0xC32E0703,
+ 0x81C, 0xC2300703,
+ 0x81C, 0xC1320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x65420703,
+ 0x81C, 0x64440703,
+ 0x81C, 0x63460703,
+ 0x81C, 0x62480703,
+ 0x81C, 0x614A0703,
+ 0x81C, 0x424C0703,
+ 0x81C, 0x414E0703,
+ 0x81C, 0x40500703,
+ 0x81C, 0x22520703,
+ 0x81C, 0x21540703,
+ 0x81C, 0x20560703,
+ 0x81C, 0x04580703,
+ 0x81C, 0x035A0703,
+ 0x81C, 0x025C0703,
+ 0x81C, 0x015E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x405A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000703,
+ 0x81C, 0xF7020703,
+ 0x81C, 0xF6040703,
+ 0x81C, 0xF5060703,
+ 0x81C, 0xF4080703,
+ 0x81C, 0xF30A0703,
+ 0x81C, 0xF20C0703,
+ 0x81C, 0xF10E0703,
+ 0x81C, 0xF0100703,
+ 0x81C, 0xEF120703,
+ 0x81C, 0xEE140703,
+ 0x81C, 0xED160703,
+ 0x81C, 0xEC180703,
+ 0x81C, 0xEB1A0703,
+ 0x81C, 0xEA1C0703,
+ 0x81C, 0xE91E0703,
+ 0x81C, 0xCA200703,
+ 0x81C, 0xC9220703,
+ 0x81C, 0xC8240703,
+ 0x81C, 0xC7260703,
+ 0x81C, 0xC6280703,
+ 0x81C, 0xC52A0703,
+ 0x81C, 0xC42C0703,
+ 0x81C, 0xC32E0703,
+ 0x81C, 0xC2300703,
+ 0x81C, 0xC1320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x234C0703,
+ 0x81C, 0x224E0703,
+ 0x81C, 0x21500703,
+ 0x81C, 0x20520703,
+ 0x81C, 0x06540703,
+ 0x81C, 0x05560703,
+ 0x81C, 0x04580703,
+ 0x81C, 0x035A0703,
+ 0x81C, 0x025C0703,
+ 0x81C, 0x015E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xF0180703,
+ 0x81C, 0xEF1A0703,
+ 0x81C, 0xEE1C0703,
+ 0x81C, 0xED1E0703,
+ 0x81C, 0xEC200703,
+ 0x81C, 0xEB220703,
+ 0x81C, 0xEA240703,
+ 0x81C, 0xE9260703,
+ 0x81C, 0xE8280703,
+ 0x81C, 0xE72A0703,
+ 0x81C, 0xE62C0703,
+ 0x81C, 0xE52E0703,
+ 0x81C, 0xE4300703,
+ 0x81C, 0xE3320703,
+ 0x81C, 0xE2340703,
+ 0x81C, 0xC6360703,
+ 0x81C, 0xC5380703,
+ 0x81C, 0xC43A0703,
+ 0x81C, 0xC33C0703,
+ 0x81C, 0xA63E0703,
+ 0x81C, 0xA5400703,
+ 0x81C, 0xA4420703,
+ 0x81C, 0xA3440703,
+ 0x81C, 0xA2460703,
+ 0x81C, 0x84480703,
+ 0x81C, 0x834A0703,
+ 0x81C, 0x824C0703,
+ 0x81C, 0x814E0703,
+ 0x81C, 0x80500703,
+ 0x81C, 0x63520703,
+ 0x81C, 0x62540703,
+ 0x81C, 0x61560703,
+ 0x81C, 0x60580703,
+ 0x81C, 0x225A0703,
+ 0x81C, 0x055C0703,
+ 0x81C, 0x045E0703,
+ 0x81C, 0x03600703,
+ 0x81C, 0x02620703,
+ 0x81C, 0x01640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xEF160703,
+ 0x81C, 0xEE180703,
+ 0x81C, 0xED1A0703,
+ 0x81C, 0xEC1C0703,
+ 0x81C, 0xEB1E0703,
+ 0x81C, 0xEA200703,
+ 0x81C, 0xE9220703,
+ 0x81C, 0xE8240703,
+ 0x81C, 0xE7260703,
+ 0x81C, 0xE6280703,
+ 0x81C, 0xE52A0703,
+ 0x81C, 0xE42C0703,
+ 0x81C, 0xE32E0703,
+ 0x81C, 0xE2300703,
+ 0x81C, 0xE1320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x405A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xEF160703,
+ 0x81C, 0xEE180703,
+ 0x81C, 0xED1A0703,
+ 0x81C, 0xEC1C0703,
+ 0x81C, 0xEB1E0703,
+ 0x81C, 0xEA200703,
+ 0x81C, 0xE9220703,
+ 0x81C, 0xE8240703,
+ 0x81C, 0xE7260703,
+ 0x81C, 0xE6280703,
+ 0x81C, 0xE52A0703,
+ 0x81C, 0xE42C0703,
+ 0x81C, 0xE32E0703,
+ 0x81C, 0xE2300703,
+ 0x81C, 0xE1320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x405A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBF000703,
+ 0x81C, 0xBF020703,
+ 0x81C, 0xBF040703,
+ 0x81C, 0xBF060703,
+ 0x81C, 0xBF080703,
+ 0x81C, 0xBE0A0703,
+ 0x81C, 0xBD0C0703,
+ 0x81C, 0xBC0E0703,
+ 0x81C, 0xBB100703,
+ 0x81C, 0xBA120703,
+ 0x81C, 0xB9140703,
+ 0x81C, 0xB8160703,
+ 0x81C, 0xB7180703,
+ 0x81C, 0xB61A0703,
+ 0x81C, 0xB51C0703,
+ 0x81C, 0xB41E0703,
+ 0x81C, 0xB1200703,
+ 0x81C, 0xB2220703,
+ 0x81C, 0xB1240703,
+ 0x81C, 0xB0260703,
+ 0x81C, 0xAF280703,
+ 0x81C, 0xAE2A0703,
+ 0x81C, 0xAD2C0703,
+ 0x81C, 0xAC2E0703,
+ 0x81C, 0xAB300703,
+ 0x81C, 0xAA320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0x883C0703,
+ 0x81C, 0x873E0703,
+ 0x81C, 0x86400703,
+ 0x81C, 0x85420703,
+ 0x81C, 0x84440703,
+ 0x81C, 0x83460703,
+ 0x81C, 0x67480703,
+ 0x81C, 0x664A0703,
+ 0x81C, 0x654C0703,
+ 0x81C, 0x644E0703,
+ 0x81C, 0x27500703,
+ 0x81C, 0x26520703,
+ 0x81C, 0x25540703,
+ 0x81C, 0x24560703,
+ 0x81C, 0x23580703,
+ 0x81C, 0x225A0703,
+ 0x81C, 0x215C0703,
+ 0x81C, 0x205E0703,
+ 0x81C, 0x03600703,
+ 0x81C, 0x02620703,
+ 0x81C, 0x01640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000403,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xF0160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0x84460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x60560703,
+ 0x81C, 0x22580703,
+ 0x81C, 0x055A0703,
+ 0x81C, 0x045C0703,
+ 0x81C, 0x035E0703,
+ 0x81C, 0x02600703,
+ 0x81C, 0x01620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000703,
+ 0x81C, 0xF6020703,
+ 0x81C, 0xF5040703,
+ 0x81C, 0xF4060703,
+ 0x81C, 0xF3080703,
+ 0x81C, 0xF20A0703,
+ 0x81C, 0xF10C0703,
+ 0x81C, 0xF00E0703,
+ 0x81C, 0xEF100703,
+ 0x81C, 0xEE120703,
+ 0x81C, 0xED140703,
+ 0x81C, 0xEC160703,
+ 0x81C, 0xEB180703,
+ 0x81C, 0xEA1A0703,
+ 0x81C, 0xE91C0703,
+ 0x81C, 0xCA1E0703,
+ 0x81C, 0xC9200703,
+ 0x81C, 0xC8220703,
+ 0x81C, 0xC7240703,
+ 0x81C, 0xC6260703,
+ 0x81C, 0xC5280703,
+ 0x81C, 0xC42A0703,
+ 0x81C, 0xC32C0703,
+ 0x81C, 0xC22E0703,
+ 0x81C, 0xC1300703,
+ 0x81C, 0xA4320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x22500703,
+ 0x81C, 0x21520703,
+ 0x81C, 0x20540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xF0180703,
+ 0x81C, 0xEF1A0703,
+ 0x81C, 0xEE1C0703,
+ 0x81C, 0xED1E0703,
+ 0x81C, 0xEC200703,
+ 0x81C, 0xEB220703,
+ 0x81C, 0xEA240703,
+ 0x81C, 0xE9260703,
+ 0x81C, 0xE8280703,
+ 0x81C, 0xE72A0703,
+ 0x81C, 0xE62C0703,
+ 0x81C, 0xE52E0703,
+ 0x81C, 0xE4300703,
+ 0x81C, 0xE3320703,
+ 0x81C, 0xE2340703,
+ 0x81C, 0xC6360703,
+ 0x81C, 0xC5380703,
+ 0x81C, 0xC43A0703,
+ 0x81C, 0xC33C0703,
+ 0x81C, 0xA63E0703,
+ 0x81C, 0xA5400703,
+ 0x81C, 0xA4420703,
+ 0x81C, 0xA3440703,
+ 0x81C, 0xA2460703,
+ 0x81C, 0x84480703,
+ 0x81C, 0x834A0703,
+ 0x81C, 0x824C0703,
+ 0x81C, 0x814E0703,
+ 0x81C, 0x80500703,
+ 0x81C, 0x63520703,
+ 0x81C, 0x62540703,
+ 0x81C, 0x61560703,
+ 0x81C, 0x60580703,
+ 0x81C, 0x235A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x03620703,
+ 0x81C, 0x02640703,
+ 0x81C, 0x01660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0xA0000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0xB0000000, 0x00000000,
+
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822b_agc, rtw_phy_cfg_agc);
+
+static const u32 rtw8822b_bb[] = {
+ 0x800, 0x9020D010,
+ 0x804, 0x800181A0,
+ 0x808, 0x0E028233,
+ 0x80C, 0x10000013,
+ 0x810, 0x22101243,
+ 0x814, 0x020C3D11,
+ 0x818, 0x84A10385,
+ 0x81C, 0x1E1E081F,
+ 0x820, 0x0001AAAA,
+ 0x824, 0x00030FE0,
+ 0x828, 0x0000CCCC,
+ 0x82C, 0x75CB7010,
+ 0x830, 0x79A0EAAA,
+ 0x834, 0x072E6986,
+ 0x838, 0x87766441,
+ 0x83C, 0x9194B2B7,
+ 0x840, 0x171750E0,
+ 0x844, 0x4D3D7CDB,
+ 0x848, 0x4AD0408B,
+ 0x84C, 0x6AFBF7A5,
+ 0x850, 0x28A74706,
+ 0x854, 0x0001520C,
+ 0x858, 0x4060C000,
+ 0x85C, 0x74010160,
+ 0x860, 0x68A7C321,
+ 0x864, 0x79F27032,
+ 0x868, 0x8CA7A314,
+ 0x86C, 0x778C2878,
+ 0x870, 0x77777777,
+ 0x874, 0x27612C2E,
+ 0x878, 0xC0003152,
+ 0x87C, 0x5C8FC000,
+ 0x880, 0x00000000,
+ 0x884, 0x00000000,
+ 0x888, 0x00000000,
+ 0x88C, 0x00000000,
+ 0x890, 0x00000000,
+ 0x894, 0x00000000,
+ 0x898, 0x00000000,
+ 0x89C, 0x00000000,
+ 0x8A0, 0x00000013,
+ 0x8A4, 0x7F7F7F7F,
+ 0x8A8, 0x2202033E,
+ 0x8AC, 0xF00F000A,
+ 0x8B0, 0x00000600,
+ 0x8B4, 0x000FC080,
+ 0x8B8, 0xEC0057F7,
+ 0x8BC, 0xACB520A3,
+ 0x8C0, 0xFFE04020,
+ 0x8C4, 0x47C00000,
+ 0x8C8, 0x000251A5,
+ 0x8CC, 0x08108492,
+ 0x8D0, 0x0000B800,
+ 0x8D4, 0x860308A0,
+ 0x8D8, 0x29095612,
+ 0x8DC, 0x00000000,
+ 0x8E0, 0x32D16777,
+ 0x8E4, 0x4C098935,
+ 0x8E8, 0xFFFFC42C,
+ 0x8EC, 0x99999999,
+ 0x8F0, 0x00009999,
+ 0x8F4, 0x00D80FA1,
+ 0x8F8, 0x40000080,
+ 0x8FC, 0x00000130,
+ 0x900, 0x00800000,
+ 0x904, 0x00000000,
+ 0x908, 0x00000000,
+ 0x90C, 0xD3000000,
+ 0x910, 0x0000FC00,
+ 0x914, 0xC6380000,
+ 0x918, 0x1C1028C0,
+ 0x91C, 0x64B11A1C,
+ 0x920, 0xE0767233,
+ 0x924, 0x855A2500,
+ 0x928, 0x4AB0E4E4,
+ 0x92C, 0xFFFEB200,
+ 0x930, 0xFFFFFFFE,
+ 0x934, 0x001FFFFF,
+ 0x938, 0x00008480,
+ 0x93C, 0xE41C0642,
+ 0x940, 0x0E470430,
+ 0x944, 0x00000000,
+ 0x948, 0xAC000000,
+ 0x94C, 0x10000083,
+ 0x950, 0x32010080,
+ 0x954, 0x84510080,
+ 0x958, 0x00000001,
+ 0x95C, 0x04248000,
+ 0x960, 0x00000000,
+ 0x964, 0x00000000,
+ 0x968, 0x00000000,
+ 0x96C, 0x00000000,
+ 0x970, 0x00001FFF,
+ 0x974, 0x44000FFF,
+ 0x978, 0x00000000,
+ 0x97C, 0x00000000,
+ 0x980, 0x00000000,
+ 0x984, 0x00000000,
+ 0x988, 0x00000000,
+ 0x98C, 0x43440000,
+ 0x990, 0x27100000,
+ 0x994, 0xFFFF0100,
+ 0x998, 0xFFFFFF5C,
+ 0x99C, 0xFFFFFFFF,
+ 0x9A0, 0x000000FF,
+ 0x9A4, 0x80000088,
+ 0x9A8, 0x0C2F0000,
+ 0x9AC, 0x01560000,
+ 0x9B0, 0x70000000,
+ 0x9B4, 0x00000000,
+ 0x9B8, 0x00000000,
+ 0x9BC, 0x00000000,
+ 0x9C0, 0x00000000,
+ 0x9C4, 0x00000000,
+ 0x9C8, 0x00000000,
+ 0x9CC, 0x00000000,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E0, 0x00000000,
+ 0x9E4, 0x02000402,
+ 0x9E8, 0x000022D4,
+ 0x9EC, 0x00000000,
+ 0x9F0, 0x00010080,
+ 0x9F4, 0x00000000,
+ 0x9F8, 0x00000000,
+ 0x9FC, 0xEFFFF7F7,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x81FF800C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E20100F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x84880000,
+ 0xA24, 0x384F6577,
+ 0xA28, 0x00001525,
+ 0xA2C, 0x00920000,
+ 0xA70, 0x101FFF00,
+ 0xA74, 0x00000148,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x218675B2,
+ 0xA84, 0x80208C00,
+ 0xA88, 0x040C0000,
+ 0xA8C, 0x12345678,
+ 0xA90, 0xABCDEF00,
+ 0xA94, 0x001B1B89,
+ 0xA98, 0x030A0000,
+ 0xA9C, 0x00060000,
+ 0xAA0, 0x00000000,
+ 0xAA4, 0x0004000F,
+ 0xAA8, 0x00000200,
+ 0xB00, 0xE1000440,
+ 0xB04, 0x00800000,
+ 0xB08, 0xFF02030B,
+ 0xB0C, 0x01EAA406,
+ 0xB10, 0x00030690,
+ 0xB14, 0x006000FA,
+ 0xB18, 0x00000002,
+ 0xB1C, 0x00000002,
+ 0xB20, 0x4B00001F,
+ 0xB24, 0x4E8E3E40,
+ 0xB28, 0x03020100,
+ 0xB2C, 0x07060504,
+ 0xB30, 0x0B0A0908,
+ 0xB34, 0x0F0E0D0C,
+ 0xB38, 0x13121110,
+ 0xB3C, 0x0000003A,
+ 0xB40, 0x00000000,
+ 0xB44, 0x80000000,
+ 0xB48, 0x3F0000FA,
+ 0xB4C, 0x88C80020,
+ 0xB50, 0x00000000,
+ 0xB54, 0x00004241,
+ 0xB58, 0xE0008208,
+ 0xB5C, 0x41EFFFF9,
+ 0xB60, 0x00000000,
+ 0xB64, 0x00200063,
+ 0xB68, 0x0000003A,
+ 0xB6C, 0x00000102,
+ 0xB70, 0x4E6D1870,
+ 0xB74, 0x03020100,
+ 0xB78, 0x07060504,
+ 0xB7C, 0x0B0A0908,
+ 0xB80, 0x0F0E0D0C,
+ 0xB84, 0x13121110,
+ 0xB88, 0x00000000,
+ 0xB8C, 0x00000000,
+ 0xC00, 0x00000007,
+ 0xC04, 0x00000020,
+ 0xC08, 0x60403231,
+ 0xC0C, 0x00012345,
+ 0xC10, 0x00000100,
+ 0xC14, 0x01000000,
+ 0xC18, 0x00000000,
+ 0xC1C, 0x40040053,
+ 0xC20, 0x40020103,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x00000000,
+ 0xC34, 0x00000000,
+ 0xC38, 0x00000000,
+ 0xC3C, 0x00000000,
+ 0xC40, 0x00000000,
+ 0xC44, 0x00000000,
+ 0xC48, 0x00000000,
+ 0xC4C, 0x00000000,
+ 0xC50, 0x00000020,
+ 0xC54, 0x00000000,
+ 0xC58, 0xD8020402,
+ 0xC5C, 0xDE000120,
+ 0xC68, 0x5979993F,
+ 0xC6C, 0x0000122A,
+ 0xC70, 0x99795979,
+ 0xC74, 0x99795979,
+ 0xC78, 0x99799979,
+ 0xC7C, 0x99791979,
+ 0xC80, 0x19791979,
+ 0xC84, 0x19791979,
+ 0xC88, 0x00000000,
+ 0xC8C, 0x07000000,
+ 0xC94, 0x01000100,
+ 0xC98, 0x201C8000,
+ 0xC9C, 0x00000000,
+ 0xCA0, 0x0000A555,
+ 0xCA4, 0x08040201,
+ 0xCA8, 0x80402010,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x77777777,
+ 0xCB4, 0x00007777,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x00000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x00000000,
+ 0xCDC, 0x00000000,
+ 0xCE0, 0x00000000,
+ 0xCE4, 0x00000000,
+ 0xCE8, 0x00000000,
+ 0xCEC, 0x00000000,
+ 0xE00, 0x00000007,
+ 0xE04, 0x00000020,
+ 0xE08, 0x60403231,
+ 0xE0C, 0x00012345,
+ 0xE10, 0x00000100,
+ 0xE14, 0x01000000,
+ 0xE18, 0x00000000,
+ 0xE1C, 0x40040053,
+ 0xE20, 0x40020103,
+ 0xE24, 0x00000000,
+ 0xE28, 0x00000000,
+ 0xE2C, 0x00000000,
+ 0xE30, 0x00000000,
+ 0xE34, 0x00000000,
+ 0xE38, 0x00000000,
+ 0xE3C, 0x00000000,
+ 0xE40, 0x00000000,
+ 0xE44, 0x00000000,
+ 0xE48, 0x00000000,
+ 0xE4C, 0x00000000,
+ 0xE50, 0x00000020,
+ 0xE54, 0x00000000,
+ 0xE58, 0xD8120402,
+ 0xE5C, 0xDE000120,
+ 0xE68, 0x5979993F,
+ 0xE6C, 0x0000122A,
+ 0xE70, 0x99795979,
+ 0xE74, 0x99795979,
+ 0xE78, 0x99799979,
+ 0xE7C, 0x99791979,
+ 0xE80, 0x19791979,
+ 0xE84, 0x19791979,
+ 0xE88, 0x00000000,
+ 0xE8C, 0x07000000,
+ 0xE94, 0x01000100,
+ 0xE98, 0x201C8000,
+ 0xE9C, 0x00000000,
+ 0xEA0, 0x0000A555,
+ 0xEA4, 0x08040201,
+ 0xEA8, 0x80402010,
+ 0xEAC, 0x00000000,
+ 0xEB0, 0x77777777,
+ 0xEB4, 0x00007777,
+ 0xEB8, 0x00000000,
+ 0xEBC, 0x00000000,
+ 0xEC0, 0x00000000,
+ 0xEC4, 0x00000000,
+ 0xEC8, 0x00000000,
+ 0xECC, 0x00000000,
+ 0xED0, 0x00000000,
+ 0xED4, 0x00000000,
+ 0xED8, 0x00000000,
+ 0xEDC, 0x00000000,
+ 0xEE0, 0x00000000,
+ 0xEE4, 0x00000000,
+ 0xEE8, 0x00000000,
+ 0xEEC, 0x00000000,
+ 0x1900, 0x00000000,
+ 0x1904, 0x00238000,
+ 0x1908, 0x00000000,
+ 0x190C, 0x00000000,
+ 0x1910, 0x00000000,
+ 0x1914, 0x00000000,
+ 0x1918, 0x00000000,
+ 0x191C, 0x00000000,
+ 0x1920, 0x00000000,
+ 0x1924, 0x00000000,
+ 0x1928, 0x00000000,
+ 0x192C, 0x00000000,
+ 0x1930, 0x00000000,
+ 0x1934, 0x00000000,
+ 0x1938, 0x00000000,
+ 0x193C, 0x00000000,
+ 0x1940, 0x00000000,
+ 0x1944, 0x00000000,
+ 0x1948, 0x00000000,
+ 0x194C, 0x00000000,
+ 0x1950, 0x00000000,
+ 0x1954, 0x00000000,
+ 0x1958, 0x00000000,
+ 0x195C, 0x00000000,
+ 0x1960, 0x00000000,
+ 0x1964, 0x00000000,
+ 0x1968, 0x00000000,
+ 0x196C, 0x00000000,
+ 0x1970, 0x00000000,
+ 0x1974, 0x00000000,
+ 0x1978, 0x00000000,
+ 0x197C, 0x00000000,
+ 0x1980, 0x00000000,
+ 0x1984, 0x03000000,
+ 0x1988, 0x21401E88,
+ 0x198C, 0x00004000,
+ 0x1990, 0x00000000,
+ 0x1994, 0x00000000,
+ 0x1998, 0x00000053,
+ 0x199C, 0x00000000,
+ 0x19A0, 0x00000000,
+ 0x19A4, 0x00000000,
+ 0x19A8, 0x00000000,
+ 0x19AC, 0x0E47E47F,
+ 0x19B0, 0x00000000,
+ 0x19B4, 0x0E47E47F,
+ 0x19B8, 0x00000000,
+ 0x19BC, 0x00000000,
+ 0x19C0, 0x00000000,
+ 0x19C4, 0x00000000,
+ 0x19C8, 0x00000000,
+ 0x19CC, 0x00000000,
+ 0x19D0, 0x00000000,
+ 0x19D4, 0xAAAAAAAA,
+ 0x19D8, 0x00000AAA,
+ 0x19DC, 0x133E0F37,
+ 0x19E0, 0x00000000,
+ 0x19E4, 0x00000000,
+ 0x19E8, 0x00000000,
+ 0x19EC, 0x00000000,
+ 0x19F0, 0x00000000,
+ 0x19F4, 0x00000000,
+ 0x19F8, 0x01A00000,
+ 0x19FC, 0x00000000,
+ 0x1C00, 0x00000100,
+ 0x1C04, 0x01000000,
+ 0x1C08, 0x00000100,
+ 0x1C0C, 0x01000000,
+ 0x1C10, 0x00000100,
+ 0x1C14, 0x01000000,
+ 0x1C18, 0x00000100,
+ 0x1C1C, 0x01000000,
+ 0x1C20, 0x00000100,
+ 0x1C24, 0x01000000,
+ 0x1C28, 0x00000100,
+ 0x1C2C, 0x01000000,
+ 0x1C30, 0x00000100,
+ 0x1C34, 0x01000000,
+ 0x1C38, 0x00000000,
+ 0x1C3C, 0x00000000,
+ 0x1C40, 0x000C0100,
+ 0x1C44, 0x000000F3,
+ 0x1C48, 0x1A8249A8,
+ 0x1C4C, 0x1461C826,
+ 0x1C50, 0x0001469E,
+ 0x1C54, 0x58D158D1,
+ 0x1C58, 0x04490088,
+ 0x1C5C, 0x04004400,
+ 0x1C60, 0x00000000,
+ 0x1C64, 0x04004400,
+ 0x1C68, 0x00000100,
+ 0x1C6C, 0x01000000,
+ 0x1C70, 0x00000100,
+ 0x1C74, 0x01000000,
+ 0x1C78, 0x00000000,
+ 0x1C7C, 0x00000010,
+ 0x1C80, 0x5FFF5FFF,
+ 0x1C84, 0x5FFF5FFF,
+ 0x1C88, 0x5FFF5FFF,
+ 0x1C8C, 0x5FFF5FFF,
+ 0x1C90, 0x5FFF5FFF,
+ 0x1C94, 0x5FFF5FFF,
+ 0x1C98, 0x5FFF5FFF,
+ 0x1C9C, 0x5FFF5FFF,
+ 0x1CA0, 0x00000100,
+ 0x1CA4, 0x01000000,
+ 0x1CA8, 0x00000100,
+ 0x1CAC, 0x5FFF5FFF,
+ 0x1CB0, 0x00000100,
+ 0x1CB4, 0x01000000,
+ 0x1CB8, 0x00000000,
+ 0x1CBC, 0x00000000,
+ 0x1CC0, 0x00000100,
+ 0x1CC4, 0x01000000,
+ 0x1CC8, 0x00000100,
+ 0x1CCC, 0x01000000,
+ 0x1CD0, 0x00000100,
+ 0x1CD4, 0x01000000,
+ 0x1CD8, 0x00000100,
+ 0x1CDC, 0x01000000,
+ 0x1CE0, 0x00000100,
+ 0x1CE4, 0x01000000,
+ 0x1CE8, 0x00000100,
+ 0x1CEC, 0x01000000,
+ 0x1CF0, 0x00000100,
+ 0x1CF4, 0x01000000,
+ 0x1CF8, 0x00000000,
+ 0x1CFC, 0x00000000,
+ 0xC60, 0x70038040,
+ 0xC60, 0x70038040,
+ 0xC60, 0x70146040,
+ 0xC60, 0x70246040,
+ 0xC60, 0x70346040,
+ 0xC60, 0x70446040,
+ 0xC60, 0x70532040,
+ 0xC60, 0x70646040,
+ 0xC60, 0x70738040,
+ 0xC60, 0x70838040,
+ 0xC60, 0x70938040,
+ 0xC60, 0x70A38040,
+ 0xC60, 0x70B36040,
+ 0xC60, 0x70C06040,
+ 0xC60, 0x70D06040,
+ 0xC60, 0x70E76040,
+ 0xC60, 0x70F06040,
+ 0xE60, 0x70038040,
+ 0xE60, 0x70038040,
+ 0xE60, 0x70146040,
+ 0xE60, 0x70246040,
+ 0xE60, 0x70346040,
+ 0xE60, 0x70446040,
+ 0xE60, 0x70532040,
+ 0xE60, 0x70646040,
+ 0xE60, 0x70738040,
+ 0xE60, 0x70838040,
+ 0xE60, 0x70938040,
+ 0xE60, 0x70A38040,
+ 0xE60, 0x70B36040,
+ 0xE60, 0x70C06040,
+ 0xE60, 0x70D06040,
+ 0xE60, 0x70E76040,
+ 0xE60, 0x70F06040,
+ 0xC64, 0x00800000,
+ 0xC64, 0x08800001,
+ 0xC64, 0x00800002,
+ 0xC64, 0x00800003,
+ 0xC64, 0x00800004,
+ 0xC64, 0x00800005,
+ 0xC64, 0x00800006,
+ 0xC64, 0x08800007,
+ 0xC64, 0x00004000,
+ 0xE64, 0x00800000,
+ 0xE64, 0x08800001,
+ 0xE64, 0x00800002,
+ 0xE64, 0x00800003,
+ 0xE64, 0x00800004,
+ 0xE64, 0x00800005,
+ 0xE64, 0x00800006,
+ 0xE64, 0x08800007,
+ 0xE64, 0x00004000,
+ 0x1B00, 0xF8000008,
+ 0x1B00, 0xF80A7008,
+ 0x1B00, 0xF8015008,
+ 0x1B00, 0xF8000008,
+ 0x1B04, 0xE24629D2,
+ 0x1B08, 0x00000080,
+ 0x1B0C, 0x00000000,
+ 0x1B10, 0x00011C00,
+ 0x1B14, 0x00000000,
+ 0x1B18, 0x00292903,
+ 0x1B1C, 0xA2193C32,
+ 0x1B20, 0x01840008,
+ 0x1B24, 0x01860008,
+ 0x1B28, 0x80060300,
+ 0x1B2C, 0x00000003,
+ 0x1B30, 0x20000000,
+ 0x1B34, 0x00000800,
+ 0x1B3C, 0x20000000,
+ 0x1BC0, 0x01000000,
+ 0x1BCC, 0x00000000,
+ 0x1B00, 0xF800000A,
+ 0x1B1C, 0xA2193C32,
+ 0x1B20, 0x01840008,
+ 0x1B24, 0x01860008,
+ 0x1B28, 0x80060300,
+ 0x1B2C, 0x00000003,
+ 0x1B30, 0x20000000,
+ 0x1B34, 0x00000800,
+ 0x1B3C, 0x20000000,
+ 0x1BC0, 0x01000000,
+ 0x1BCC, 0x00000000,
+ 0x1B00, 0xF8000000,
+ 0x1B80, 0x00000007,
+ 0x1B80, 0x090A0005,
+ 0x1B80, 0x090A0007,
+ 0x1B80, 0x0FFE0015,
+ 0x1B80, 0x0FFE0017,
+ 0x1B80, 0x00220025,
+ 0x1B80, 0x00220027,
+ 0x1B80, 0x00040035,
+ 0x1B80, 0x00040037,
+ 0x1B80, 0x05C00045,
+ 0x1B80, 0x05C00047,
+ 0x1B80, 0x00070055,
+ 0x1B80, 0x00070057,
+ 0x1B80, 0x64000065,
+ 0x1B80, 0x64000067,
+ 0x1B80, 0x00020075,
+ 0x1B80, 0x00020077,
+ 0x1B80, 0x00080085,
+ 0x1B80, 0x00080087,
+ 0x1B80, 0x80000095,
+ 0x1B80, 0x80000097,
+ 0x1B80, 0x090800A5,
+ 0x1B80, 0x090800A7,
+ 0x1B80, 0x0F0200B5,
+ 0x1B80, 0x0F0200B7,
+ 0x1B80, 0x002200C5,
+ 0x1B80, 0x002200C7,
+ 0x1B80, 0x000400D5,
+ 0x1B80, 0x000400D7,
+ 0x1B80, 0x05C000E5,
+ 0x1B80, 0x05C000E7,
+ 0x1B80, 0x000700F5,
+ 0x1B80, 0x000700F7,
+ 0x1B80, 0x64020105,
+ 0x1B80, 0x64020107,
+ 0x1B80, 0x00020115,
+ 0x1B80, 0x00020117,
+ 0x1B80, 0x00040125,
+ 0x1B80, 0x00040127,
+ 0x1B80, 0x4A000135,
+ 0x1B80, 0x4A000137,
+ 0x1B80, 0x4B040145,
+ 0x1B80, 0x4B040147,
+ 0x1B80, 0x85030155,
+ 0x1B80, 0x85030157,
+ 0x1B80, 0x40090165,
+ 0x1B80, 0x40090167,
+ 0x1B80, 0xE0280175,
+ 0x1B80, 0xE0280177,
+ 0x1B80, 0x4B050185,
+ 0x1B80, 0x4B050187,
+ 0x1B80, 0x86030195,
+ 0x1B80, 0x86030197,
+ 0x1B80, 0x400B01A5,
+ 0x1B80, 0x400B01A7,
+ 0x1B80, 0xE02801B5,
+ 0x1B80, 0xE02801B7,
+ 0x1B80, 0x4B0001C5,
+ 0x1B80, 0x4B0001C7,
+ 0x1B80, 0x000701D5,
+ 0x1B80, 0x000701D7,
+ 0x1B80, 0x4C0001E5,
+ 0x1B80, 0x4C0001E7,
+ 0x1B80, 0x000401F5,
+ 0x1B80, 0x000401F7,
+ 0x1B80, 0x4D040205,
+ 0x1B80, 0x4D040207,
+ 0x1B80, 0x2EF00215,
+ 0x1B80, 0x2EF00217,
+ 0x1B80, 0x00000225,
+ 0x1B80, 0x00000227,
+ 0x1B80, 0x20810235,
+ 0x1B80, 0x20810237,
+ 0x1B80, 0x23450245,
+ 0x1B80, 0x23450247,
+ 0x1B80, 0x4D000255,
+ 0x1B80, 0x4D000257,
+ 0x1B80, 0x00040265,
+ 0x1B80, 0x00040267,
+ 0x1B80, 0x30000275,
+ 0x1B80, 0x30000277,
+ 0x1B80, 0xE1D80285,
+ 0x1B80, 0xE1D80287,
+ 0x1B80, 0xF0110295,
+ 0x1B80, 0xF0110297,
+ 0x1B80, 0xF11102A5,
+ 0x1B80, 0xF11102A7,
+ 0x1B80, 0xF21102B5,
+ 0x1B80, 0xF21102B7,
+ 0x1B80, 0xF31102C5,
+ 0x1B80, 0xF31102C7,
+ 0x1B80, 0xF41102D5,
+ 0x1B80, 0xF41102D7,
+ 0x1B80, 0xF51102E5,
+ 0x1B80, 0xF51102E7,
+ 0x1B80, 0xF61102F5,
+ 0x1B80, 0xF61102F7,
+ 0x1B80, 0xF7110305,
+ 0x1B80, 0xF7110307,
+ 0x1B80, 0xF8110315,
+ 0x1B80, 0xF8110317,
+ 0x1B80, 0xF9110325,
+ 0x1B80, 0xF9110327,
+ 0x1B80, 0xFA110335,
+ 0x1B80, 0xFA110337,
+ 0x1B80, 0xFB110345,
+ 0x1B80, 0xFB110347,
+ 0x1B80, 0xFC110355,
+ 0x1B80, 0xFC110357,
+ 0x1B80, 0xFD110365,
+ 0x1B80, 0xFD110367,
+ 0x1B80, 0xFE110375,
+ 0x1B80, 0xFE110377,
+ 0x1B80, 0xFF110385,
+ 0x1B80, 0xFF110387,
+ 0x1B80, 0x00010395,
+ 0x1B80, 0x00010397,
+ 0x1B80, 0x305103A5,
+ 0x1B80, 0x305103A7,
+ 0x1B80, 0x306903B5,
+ 0x1B80, 0x306903B7,
+ 0x1B80, 0x30B403C5,
+ 0x1B80, 0x30B403C7,
+ 0x1B80, 0x30B703D5,
+ 0x1B80, 0x30B703D7,
+ 0x1B80, 0x306B03E5,
+ 0x1B80, 0x306B03E7,
+ 0x1B80, 0x307603F5,
+ 0x1B80, 0x307603F7,
+ 0x1B80, 0x30810405,
+ 0x1B80, 0x30810407,
+ 0x1B80, 0x30C10415,
+ 0x1B80, 0x30C10417,
+ 0x1B80, 0x30BB0425,
+ 0x1B80, 0x30BB0427,
+ 0x1B80, 0x30CF0435,
+ 0x1B80, 0x30CF0437,
+ 0x1B80, 0x30DA0445,
+ 0x1B80, 0x30DA0447,
+ 0x1B80, 0x30E50455,
+ 0x1B80, 0x30E50457,
+ 0x1B80, 0x304A0465,
+ 0x1B80, 0x304A0467,
+ 0x1B80, 0x31140475,
+ 0x1B80, 0x31140477,
+ 0x1B80, 0x31250485,
+ 0x1B80, 0x31250487,
+ 0x1B80, 0x313A0495,
+ 0x1B80, 0x313A0497,
+ 0x1B80, 0x4D0404A5,
+ 0x1B80, 0x4D0404A7,
+ 0x1B80, 0x2EF004B5,
+ 0x1B80, 0x2EF004B7,
+ 0x1B80, 0x000004C5,
+ 0x1B80, 0x000004C7,
+ 0x1B80, 0x208104D5,
+ 0x1B80, 0x208104D7,
+ 0x1B80, 0xA3B504E5,
+ 0x1B80, 0xA3B504E7,
+ 0x1B80, 0x4D0004F5,
+ 0x1B80, 0x4D0004F7,
+ 0x1B80, 0x30000505,
+ 0x1B80, 0x30000507,
+ 0x1B80, 0xE1650515,
+ 0x1B80, 0xE1650517,
+ 0x1B80, 0x4D040525,
+ 0x1B80, 0x4D040527,
+ 0x1B80, 0x20800535,
+ 0x1B80, 0x20800537,
+ 0x1B80, 0x00000545,
+ 0x1B80, 0x00000547,
+ 0x1B80, 0x4D000555,
+ 0x1B80, 0x4D000557,
+ 0x1B80, 0x55070565,
+ 0x1B80, 0x55070567,
+ 0x1B80, 0xE15D0575,
+ 0x1B80, 0xE15D0577,
+ 0x1B80, 0xE15D0585,
+ 0x1B80, 0xE15D0587,
+ 0x1B80, 0x4D040595,
+ 0x1B80, 0x4D040597,
+ 0x1B80, 0x208805A5,
+ 0x1B80, 0x208805A7,
+ 0x1B80, 0x020005B5,
+ 0x1B80, 0x020005B7,
+ 0x1B80, 0x4D0005C5,
+ 0x1B80, 0x4D0005C7,
+ 0x1B80, 0x550F05D5,
+ 0x1B80, 0x550F05D7,
+ 0x1B80, 0xE15D05E5,
+ 0x1B80, 0xE15D05E7,
+ 0x1B80, 0x4F0205F5,
+ 0x1B80, 0x4F0205F7,
+ 0x1B80, 0x4E000605,
+ 0x1B80, 0x4E000607,
+ 0x1B80, 0x53020615,
+ 0x1B80, 0x53020617,
+ 0x1B80, 0x52010625,
+ 0x1B80, 0x52010627,
+ 0x1B80, 0xE1610635,
+ 0x1B80, 0xE1610637,
+ 0x1B80, 0x4D080645,
+ 0x1B80, 0x4D080647,
+ 0x1B80, 0x57100655,
+ 0x1B80, 0x57100657,
+ 0x1B80, 0x57000665,
+ 0x1B80, 0x57000667,
+ 0x1B80, 0x4D000675,
+ 0x1B80, 0x4D000677,
+ 0x1B80, 0x00010685,
+ 0x1B80, 0x00010687,
+ 0x1B80, 0xE1650695,
+ 0x1B80, 0xE1650697,
+ 0x1B80, 0x000106A5,
+ 0x1B80, 0x000106A7,
+ 0x1B80, 0x308B06B5,
+ 0x1B80, 0x308B06B7,
+ 0x1B80, 0x002306C5,
+ 0x1B80, 0x002306C7,
+ 0x1B80, 0xE1CB06D5,
+ 0x1B80, 0xE1CB06D7,
+ 0x1B80, 0x000206E5,
+ 0x1B80, 0x000206E7,
+ 0x1B80, 0x54E906F5,
+ 0x1B80, 0x54E906F7,
+ 0x1B80, 0x0BA60705,
+ 0x1B80, 0x0BA60707,
+ 0x1B80, 0x00230715,
+ 0x1B80, 0x00230717,
+ 0x1B80, 0xE1CB0725,
+ 0x1B80, 0xE1CB0727,
+ 0x1B80, 0x00020735,
+ 0x1B80, 0x00020737,
+ 0x1B80, 0x4D300745,
+ 0x1B80, 0x4D300747,
+ 0x1B80, 0x30A40755,
+ 0x1B80, 0x30A40757,
+ 0x1B80, 0x30870765,
+ 0x1B80, 0x30870767,
+ 0x1B80, 0x00220775,
+ 0x1B80, 0x00220777,
+ 0x1B80, 0xE1CB0785,
+ 0x1B80, 0xE1CB0787,
+ 0x1B80, 0x00020795,
+ 0x1B80, 0x00020797,
+ 0x1B80, 0x54E807A5,
+ 0x1B80, 0x54E807A7,
+ 0x1B80, 0x0BA607B5,
+ 0x1B80, 0x0BA607B7,
+ 0x1B80, 0x002207C5,
+ 0x1B80, 0x002207C7,
+ 0x1B80, 0xE1CB07D5,
+ 0x1B80, 0xE1CB07D7,
+ 0x1B80, 0x000207E5,
+ 0x1B80, 0x000207E7,
+ 0x1B80, 0x4D3007F5,
+ 0x1B80, 0x4D3007F7,
+ 0x1B80, 0x30A40805,
+ 0x1B80, 0x30A40807,
+ 0x1B80, 0x63F10815,
+ 0x1B80, 0x63F10817,
+ 0x1B80, 0xE1650825,
+ 0x1B80, 0xE1650827,
+ 0x1B80, 0xE1CB0835,
+ 0x1B80, 0xE1CB0837,
+ 0x1B80, 0x63F40845,
+ 0x1B80, 0x63F40847,
+ 0x1B80, 0xE1650855,
+ 0x1B80, 0xE1650857,
+ 0x1B80, 0xE1CB0865,
+ 0x1B80, 0xE1CB0867,
+ 0x1B80, 0x0BA80875,
+ 0x1B80, 0x0BA80877,
+ 0x1B80, 0x63F80885,
+ 0x1B80, 0x63F80887,
+ 0x1B80, 0xE1650895,
+ 0x1B80, 0xE1650897,
+ 0x1B80, 0xE1CB08A5,
+ 0x1B80, 0xE1CB08A7,
+ 0x1B80, 0x0BA908B5,
+ 0x1B80, 0x0BA908B7,
+ 0x1B80, 0x63FC08C5,
+ 0x1B80, 0x63FC08C7,
+ 0x1B80, 0xE16508D5,
+ 0x1B80, 0xE16508D7,
+ 0x1B80, 0xE1CB08E5,
+ 0x1B80, 0xE1CB08E7,
+ 0x1B80, 0x63FF08F5,
+ 0x1B80, 0x63FF08F7,
+ 0x1B80, 0xE1650905,
+ 0x1B80, 0xE1650907,
+ 0x1B80, 0xE1CB0915,
+ 0x1B80, 0xE1CB0917,
+ 0x1B80, 0x63000925,
+ 0x1B80, 0x63000927,
+ 0x1B80, 0xE1650935,
+ 0x1B80, 0xE1650937,
+ 0x1B80, 0xE1CB0945,
+ 0x1B80, 0xE1CB0947,
+ 0x1B80, 0x63030955,
+ 0x1B80, 0x63030957,
+ 0x1B80, 0xE1650965,
+ 0x1B80, 0xE1650967,
+ 0x1B80, 0xE1CB0975,
+ 0x1B80, 0xE1CB0977,
+ 0x1B80, 0xF4D40985,
+ 0x1B80, 0xF4D40987,
+ 0x1B80, 0x63070995,
+ 0x1B80, 0x63070997,
+ 0x1B80, 0xE16509A5,
+ 0x1B80, 0xE16509A7,
+ 0x1B80, 0xE1CB09B5,
+ 0x1B80, 0xE1CB09B7,
+ 0x1B80, 0xF5DB09C5,
+ 0x1B80, 0xF5DB09C7,
+ 0x1B80, 0x630B09D5,
+ 0x1B80, 0x630B09D7,
+ 0x1B80, 0xE16509E5,
+ 0x1B80, 0xE16509E7,
+ 0x1B80, 0xE1CB09F5,
+ 0x1B80, 0xE1CB09F7,
+ 0x1B80, 0x630E0A05,
+ 0x1B80, 0x630E0A07,
+ 0x1B80, 0xE1650A15,
+ 0x1B80, 0xE1650A17,
+ 0x1B80, 0xE1CB0A25,
+ 0x1B80, 0xE1CB0A27,
+ 0x1B80, 0x4D300A35,
+ 0x1B80, 0x4D300A37,
+ 0x1B80, 0x55010A45,
+ 0x1B80, 0x55010A47,
+ 0x1B80, 0x57040A55,
+ 0x1B80, 0x57040A57,
+ 0x1B80, 0x57000A65,
+ 0x1B80, 0x57000A67,
+ 0x1B80, 0x96000A75,
+ 0x1B80, 0x96000A77,
+ 0x1B80, 0x57080A85,
+ 0x1B80, 0x57080A87,
+ 0x1B80, 0x57000A95,
+ 0x1B80, 0x57000A97,
+ 0x1B80, 0x95000AA5,
+ 0x1B80, 0x95000AA7,
+ 0x1B80, 0x4D000AB5,
+ 0x1B80, 0x4D000AB7,
+ 0x1B80, 0x6C070AC5,
+ 0x1B80, 0x6C070AC7,
+ 0x1B80, 0x7B200AD5,
+ 0x1B80, 0x7B200AD7,
+ 0x1B80, 0x7A000AE5,
+ 0x1B80, 0x7A000AE7,
+ 0x1B80, 0x79000AF5,
+ 0x1B80, 0x79000AF7,
+ 0x1B80, 0x7F200B05,
+ 0x1B80, 0x7F200B07,
+ 0x1B80, 0x7E000B15,
+ 0x1B80, 0x7E000B17,
+ 0x1B80, 0x7D000B25,
+ 0x1B80, 0x7D000B27,
+ 0x1B80, 0x00010B35,
+ 0x1B80, 0x00010B37,
+ 0x1B80, 0x62850B45,
+ 0x1B80, 0x62850B47,
+ 0x1B80, 0xE1650B55,
+ 0x1B80, 0xE1650B57,
+ 0x1B80, 0x00010B65,
+ 0x1B80, 0x00010B67,
+ 0x1B80, 0x5C320B75,
+ 0x1B80, 0x5C320B77,
+ 0x1B80, 0xE1C70B85,
+ 0x1B80, 0xE1C70B87,
+ 0x1B80, 0xE1930B95,
+ 0x1B80, 0xE1930B97,
+ 0x1B80, 0x00010BA5,
+ 0x1B80, 0x00010BA7,
+ 0x1B80, 0x5C320BB5,
+ 0x1B80, 0x5C320BB7,
+ 0x1B80, 0x63F40BC5,
+ 0x1B80, 0x63F40BC7,
+ 0x1B80, 0x62850BD5,
+ 0x1B80, 0x62850BD7,
+ 0x1B80, 0x0BB00BE5,
+ 0x1B80, 0x0BB00BE7,
+ 0x1B80, 0xE1650BF5,
+ 0x1B80, 0xE1650BF7,
+ 0x1B80, 0xE1CB0C05,
+ 0x1B80, 0xE1CB0C07,
+ 0x1B80, 0x5C320C15,
+ 0x1B80, 0x5C320C17,
+ 0x1B80, 0x63FC0C25,
+ 0x1B80, 0x63FC0C27,
+ 0x1B80, 0x62850C35,
+ 0x1B80, 0x62850C37,
+ 0x1B80, 0x0BB10C45,
+ 0x1B80, 0x0BB10C47,
+ 0x1B80, 0xE1650C55,
+ 0x1B80, 0xE1650C57,
+ 0x1B80, 0xE1CB0C65,
+ 0x1B80, 0xE1CB0C67,
+ 0x1B80, 0x63030C75,
+ 0x1B80, 0x63030C77,
+ 0x1B80, 0xE1650C85,
+ 0x1B80, 0xE1650C87,
+ 0x1B80, 0xE1CB0C95,
+ 0x1B80, 0xE1CB0C97,
+ 0x1B80, 0xF7040CA5,
+ 0x1B80, 0xF7040CA7,
+ 0x1B80, 0x630B0CB5,
+ 0x1B80, 0x630B0CB7,
+ 0x1B80, 0xE1650CC5,
+ 0x1B80, 0xE1650CC7,
+ 0x1B80, 0xE1CB0CD5,
+ 0x1B80, 0xE1CB0CD7,
+ 0x1B80, 0x00010CE5,
+ 0x1B80, 0x00010CE7,
+ 0x1B80, 0x30F30CF5,
+ 0x1B80, 0x30F30CF7,
+ 0x1B80, 0x00230D05,
+ 0x1B80, 0x00230D07,
+ 0x1B80, 0xE1D00D15,
+ 0x1B80, 0xE1D00D17,
+ 0x1B80, 0x00020D25,
+ 0x1B80, 0x00020D27,
+ 0x1B80, 0x54E90D35,
+ 0x1B80, 0x54E90D37,
+ 0x1B80, 0x0BA60D45,
+ 0x1B80, 0x0BA60D47,
+ 0x1B80, 0x00230D55,
+ 0x1B80, 0x00230D57,
+ 0x1B80, 0xE1D00D65,
+ 0x1B80, 0xE1D00D67,
+ 0x1B80, 0x00020D75,
+ 0x1B80, 0x00020D77,
+ 0x1B80, 0x4D100D85,
+ 0x1B80, 0x4D100D87,
+ 0x1B80, 0x30A40D95,
+ 0x1B80, 0x30A40D97,
+ 0x1B80, 0x30ED0DA5,
+ 0x1B80, 0x30ED0DA7,
+ 0x1B80, 0x00220DB5,
+ 0x1B80, 0x00220DB7,
+ 0x1B80, 0xE1D00DC5,
+ 0x1B80, 0xE1D00DC7,
+ 0x1B80, 0x00020DD5,
+ 0x1B80, 0x00020DD7,
+ 0x1B80, 0x54E80DE5,
+ 0x1B80, 0x54E80DE7,
+ 0x1B80, 0x0BA60DF5,
+ 0x1B80, 0x0BA60DF7,
+ 0x1B80, 0x00220E05,
+ 0x1B80, 0x00220E07,
+ 0x1B80, 0xE1D00E15,
+ 0x1B80, 0xE1D00E17,
+ 0x1B80, 0x00020E25,
+ 0x1B80, 0x00020E27,
+ 0x1B80, 0x4D100E35,
+ 0x1B80, 0x4D100E37,
+ 0x1B80, 0x30A40E45,
+ 0x1B80, 0x30A40E47,
+ 0x1B80, 0x5C320E55,
+ 0x1B80, 0x5C320E57,
+ 0x1B80, 0x54F00E65,
+ 0x1B80, 0x54F00E67,
+ 0x1B80, 0x67F10E75,
+ 0x1B80, 0x67F10E77,
+ 0x1B80, 0xE1930E85,
+ 0x1B80, 0xE1930E87,
+ 0x1B80, 0xE1D00E95,
+ 0x1B80, 0xE1D00E97,
+ 0x1B80, 0x67F40EA5,
+ 0x1B80, 0x67F40EA7,
+ 0x1B80, 0xE1930EB5,
+ 0x1B80, 0xE1930EB7,
+ 0x1B80, 0xE1D00EC5,
+ 0x1B80, 0xE1D00EC7,
+ 0x1B80, 0x5C320ED5,
+ 0x1B80, 0x5C320ED7,
+ 0x1B80, 0x54F10EE5,
+ 0x1B80, 0x54F10EE7,
+ 0x1B80, 0x0BA80EF5,
+ 0x1B80, 0x0BA80EF7,
+ 0x1B80, 0x67F80F05,
+ 0x1B80, 0x67F80F07,
+ 0x1B80, 0xE1930F15,
+ 0x1B80, 0xE1930F17,
+ 0x1B80, 0xE1D00F25,
+ 0x1B80, 0xE1D00F27,
+ 0x1B80, 0x5C320F35,
+ 0x1B80, 0x5C320F37,
+ 0x1B80, 0x54F10F45,
+ 0x1B80, 0x54F10F47,
+ 0x1B80, 0x0BA90F55,
+ 0x1B80, 0x0BA90F57,
+ 0x1B80, 0x67FC0F65,
+ 0x1B80, 0x67FC0F67,
+ 0x1B80, 0xE1930F75,
+ 0x1B80, 0xE1930F77,
+ 0x1B80, 0xE1D00F85,
+ 0x1B80, 0xE1D00F87,
+ 0x1B80, 0x67FF0F95,
+ 0x1B80, 0x67FF0F97,
+ 0x1B80, 0xE1930FA5,
+ 0x1B80, 0xE1930FA7,
+ 0x1B80, 0xE1D00FB5,
+ 0x1B80, 0xE1D00FB7,
+ 0x1B80, 0x5C320FC5,
+ 0x1B80, 0x5C320FC7,
+ 0x1B80, 0x54F20FD5,
+ 0x1B80, 0x54F20FD7,
+ 0x1B80, 0x67000FE5,
+ 0x1B80, 0x67000FE7,
+ 0x1B80, 0xE1930FF5,
+ 0x1B80, 0xE1930FF7,
+ 0x1B80, 0xE1D01005,
+ 0x1B80, 0xE1D01007,
+ 0x1B80, 0x67031015,
+ 0x1B80, 0x67031017,
+ 0x1B80, 0xE1931025,
+ 0x1B80, 0xE1931027,
+ 0x1B80, 0xE1D01035,
+ 0x1B80, 0xE1D01037,
+ 0x1B80, 0xF9CC1045,
+ 0x1B80, 0xF9CC1047,
+ 0x1B80, 0x67071055,
+ 0x1B80, 0x67071057,
+ 0x1B80, 0xE1931065,
+ 0x1B80, 0xE1931067,
+ 0x1B80, 0xE1D01075,
+ 0x1B80, 0xE1D01077,
+ 0x1B80, 0xFAD31085,
+ 0x1B80, 0xFAD31087,
+ 0x1B80, 0x5C321095,
+ 0x1B80, 0x5C321097,
+ 0x1B80, 0x54F310A5,
+ 0x1B80, 0x54F310A7,
+ 0x1B80, 0x670B10B5,
+ 0x1B80, 0x670B10B7,
+ 0x1B80, 0xE19310C5,
+ 0x1B80, 0xE19310C7,
+ 0x1B80, 0xE1D010D5,
+ 0x1B80, 0xE1D010D7,
+ 0x1B80, 0x670E10E5,
+ 0x1B80, 0x670E10E7,
+ 0x1B80, 0xE19310F5,
+ 0x1B80, 0xE19310F7,
+ 0x1B80, 0xE1D01105,
+ 0x1B80, 0xE1D01107,
+ 0x1B80, 0x4D101115,
+ 0x1B80, 0x4D101117,
+ 0x1B80, 0x30A41125,
+ 0x1B80, 0x30A41127,
+ 0x1B80, 0x00011135,
+ 0x1B80, 0x00011137,
+ 0x1B80, 0x6C001145,
+ 0x1B80, 0x6C001147,
+ 0x1B80, 0x00061155,
+ 0x1B80, 0x00061157,
+ 0x1B80, 0x53001165,
+ 0x1B80, 0x53001167,
+ 0x1B80, 0x57F71175,
+ 0x1B80, 0x57F71177,
+ 0x1B80, 0x58211185,
+ 0x1B80, 0x58211187,
+ 0x1B80, 0x592E1195,
+ 0x1B80, 0x592E1197,
+ 0x1B80, 0x5A3811A5,
+ 0x1B80, 0x5A3811A7,
+ 0x1B80, 0x5B4111B5,
+ 0x1B80, 0x5B4111B7,
+ 0x1B80, 0x000711C5,
+ 0x1B80, 0x000711C7,
+ 0x1B80, 0x5C0011D5,
+ 0x1B80, 0x5C0011D7,
+ 0x1B80, 0x4B0011E5,
+ 0x1B80, 0x4B0011E7,
+ 0x1B80, 0x4E8F11F5,
+ 0x1B80, 0x4E8F11F7,
+ 0x1B80, 0x4F151205,
+ 0x1B80, 0x4F151207,
+ 0x1B80, 0x00041215,
+ 0x1B80, 0x00041217,
+ 0x1B80, 0xE1B51225,
+ 0x1B80, 0xE1B51227,
+ 0x1B80, 0xAB001235,
+ 0x1B80, 0xAB001237,
+ 0x1B80, 0x00011245,
+ 0x1B80, 0x00011247,
+ 0x1B80, 0x6C001255,
+ 0x1B80, 0x6C001257,
+ 0x1B80, 0x00061265,
+ 0x1B80, 0x00061267,
+ 0x1B80, 0x53001275,
+ 0x1B80, 0x53001277,
+ 0x1B80, 0x57F71285,
+ 0x1B80, 0x57F71287,
+ 0x1B80, 0x58211295,
+ 0x1B80, 0x58211297,
+ 0x1B80, 0x592E12A5,
+ 0x1B80, 0x592E12A7,
+ 0x1B80, 0x5A3812B5,
+ 0x1B80, 0x5A3812B7,
+ 0x1B80, 0x5B4112C5,
+ 0x1B80, 0x5B4112C7,
+ 0x1B80, 0x000712D5,
+ 0x1B80, 0x000712D7,
+ 0x1B80, 0x5C0012E5,
+ 0x1B80, 0x5C0012E7,
+ 0x1B80, 0x4B4012F5,
+ 0x1B80, 0x4B4012F7,
+ 0x1B80, 0x4E971305,
+ 0x1B80, 0x4E971307,
+ 0x1B80, 0x4F111315,
+ 0x1B80, 0x4F111317,
+ 0x1B80, 0x00041325,
+ 0x1B80, 0x00041327,
+ 0x1B80, 0xE1B51335,
+ 0x1B80, 0xE1B51337,
+ 0x1B80, 0xAB001345,
+ 0x1B80, 0xAB001347,
+ 0x1B80, 0x8B001355,
+ 0x1B80, 0x8B001357,
+ 0x1B80, 0xAB001365,
+ 0x1B80, 0xAB001367,
+ 0x1B80, 0x8A191375,
+ 0x1B80, 0x8A191377,
+ 0x1B80, 0x301D1385,
+ 0x1B80, 0x301D1387,
+ 0x1B80, 0x00011395,
+ 0x1B80, 0x00011397,
+ 0x1B80, 0x6C0113A5,
+ 0x1B80, 0x6C0113A7,
+ 0x1B80, 0x000613B5,
+ 0x1B80, 0x000613B7,
+ 0x1B80, 0x530113C5,
+ 0x1B80, 0x530113C7,
+ 0x1B80, 0x57F713D5,
+ 0x1B80, 0x57F713D7,
+ 0x1B80, 0x582113E5,
+ 0x1B80, 0x582113E7,
+ 0x1B80, 0x592E13F5,
+ 0x1B80, 0x592E13F7,
+ 0x1B80, 0x5A381405,
+ 0x1B80, 0x5A381407,
+ 0x1B80, 0x5B411415,
+ 0x1B80, 0x5B411417,
+ 0x1B80, 0x00071425,
+ 0x1B80, 0x00071427,
+ 0x1B80, 0x5C001435,
+ 0x1B80, 0x5C001437,
+ 0x1B80, 0x4B001445,
+ 0x1B80, 0x4B001447,
+ 0x1B80, 0x4E871455,
+ 0x1B80, 0x4E871457,
+ 0x1B80, 0x4F111465,
+ 0x1B80, 0x4F111467,
+ 0x1B80, 0x00041475,
+ 0x1B80, 0x00041477,
+ 0x1B80, 0xE1B51485,
+ 0x1B80, 0xE1B51487,
+ 0x1B80, 0xAB001495,
+ 0x1B80, 0xAB001497,
+ 0x1B80, 0x000614A5,
+ 0x1B80, 0x000614A7,
+ 0x1B80, 0x577714B5,
+ 0x1B80, 0x577714B7,
+ 0x1B80, 0x000714C5,
+ 0x1B80, 0x000714C7,
+ 0x1B80, 0x4E8614D5,
+ 0x1B80, 0x4E8614D7,
+ 0x1B80, 0x000414E5,
+ 0x1B80, 0x000414E7,
+ 0x1B80, 0x000114F5,
+ 0x1B80, 0x000114F7,
+ 0x1B80, 0x00011505,
+ 0x1B80, 0x00011507,
+ 0x1B80, 0x7B241515,
+ 0x1B80, 0x7B241517,
+ 0x1B80, 0x7A401525,
+ 0x1B80, 0x7A401527,
+ 0x1B80, 0x79001535,
+ 0x1B80, 0x79001537,
+ 0x1B80, 0x55031545,
+ 0x1B80, 0x55031547,
+ 0x1B80, 0x315D1555,
+ 0x1B80, 0x315D1557,
+ 0x1B80, 0x7B1C1565,
+ 0x1B80, 0x7B1C1567,
+ 0x1B80, 0x7A401575,
+ 0x1B80, 0x7A401577,
+ 0x1B80, 0x550B1585,
+ 0x1B80, 0x550B1587,
+ 0x1B80, 0x315D1595,
+ 0x1B80, 0x315D1597,
+ 0x1B80, 0x7B2015A5,
+ 0x1B80, 0x7B2015A7,
+ 0x1B80, 0x7A0015B5,
+ 0x1B80, 0x7A0015B7,
+ 0x1B80, 0x551315C5,
+ 0x1B80, 0x551315C7,
+ 0x1B80, 0x740115D5,
+ 0x1B80, 0x740115D7,
+ 0x1B80, 0x740015E5,
+ 0x1B80, 0x740015E7,
+ 0x1B80, 0x8E0015F5,
+ 0x1B80, 0x8E0015F7,
+ 0x1B80, 0x00011605,
+ 0x1B80, 0x00011607,
+ 0x1B80, 0x57021615,
+ 0x1B80, 0x57021617,
+ 0x1B80, 0x57001625,
+ 0x1B80, 0x57001627,
+ 0x1B80, 0x97001635,
+ 0x1B80, 0x97001637,
+ 0x1B80, 0x00011645,
+ 0x1B80, 0x00011647,
+ 0x1B80, 0x4F781655,
+ 0x1B80, 0x4F781657,
+ 0x1B80, 0x53881665,
+ 0x1B80, 0x53881667,
+ 0x1B80, 0xE1731675,
+ 0x1B80, 0xE1731677,
+ 0x1B80, 0x54801685,
+ 0x1B80, 0x54801687,
+ 0x1B80, 0x54001695,
+ 0x1B80, 0x54001697,
+ 0x1B80, 0xE17316A5,
+ 0x1B80, 0xE17316A7,
+ 0x1B80, 0x548116B5,
+ 0x1B80, 0x548116B7,
+ 0x1B80, 0x540016C5,
+ 0x1B80, 0x540016C7,
+ 0x1B80, 0xE17316D5,
+ 0x1B80, 0xE17316D7,
+ 0x1B80, 0x548216E5,
+ 0x1B80, 0x548216E7,
+ 0x1B80, 0x540016F5,
+ 0x1B80, 0x540016F7,
+ 0x1B80, 0xE17E1705,
+ 0x1B80, 0xE17E1707,
+ 0x1B80, 0xBF1D1715,
+ 0x1B80, 0xBF1D1717,
+ 0x1B80, 0x301D1725,
+ 0x1B80, 0x301D1727,
+ 0x1B80, 0xE1511735,
+ 0x1B80, 0xE1511737,
+ 0x1B80, 0xE1561745,
+ 0x1B80, 0xE1561747,
+ 0x1B80, 0xE15A1755,
+ 0x1B80, 0xE15A1757,
+ 0x1B80, 0xE1611765,
+ 0x1B80, 0xE1611767,
+ 0x1B80, 0xE1C71775,
+ 0x1B80, 0xE1C71777,
+ 0x1B80, 0x55131785,
+ 0x1B80, 0x55131787,
+ 0x1B80, 0xE15D1795,
+ 0x1B80, 0xE15D1797,
+ 0x1B80, 0x551517A5,
+ 0x1B80, 0x551517A7,
+ 0x1B80, 0xE16117B5,
+ 0x1B80, 0xE16117B7,
+ 0x1B80, 0xE1C717C5,
+ 0x1B80, 0xE1C717C7,
+ 0x1B80, 0x000117D5,
+ 0x1B80, 0x000117D7,
+ 0x1B80, 0x54BF17E5,
+ 0x1B80, 0x54BF17E7,
+ 0x1B80, 0x54C017F5,
+ 0x1B80, 0x54C017F7,
+ 0x1B80, 0x54A31805,
+ 0x1B80, 0x54A31807,
+ 0x1B80, 0x54C11815,
+ 0x1B80, 0x54C11817,
+ 0x1B80, 0x54A41825,
+ 0x1B80, 0x54A41827,
+ 0x1B80, 0x4C181835,
+ 0x1B80, 0x4C181837,
+ 0x1B80, 0xBF071845,
+ 0x1B80, 0xBF071847,
+ 0x1B80, 0x54C21855,
+ 0x1B80, 0x54C21857,
+ 0x1B80, 0x54A41865,
+ 0x1B80, 0x54A41867,
+ 0x1B80, 0xBF041875,
+ 0x1B80, 0xBF041877,
+ 0x1B80, 0x54C11885,
+ 0x1B80, 0x54C11887,
+ 0x1B80, 0x54A31895,
+ 0x1B80, 0x54A31897,
+ 0x1B80, 0xBF0118A5,
+ 0x1B80, 0xBF0118A7,
+ 0x1B80, 0xE1D518B5,
+ 0x1B80, 0xE1D518B7,
+ 0x1B80, 0x54DF18C5,
+ 0x1B80, 0x54DF18C7,
+ 0x1B80, 0x000118D5,
+ 0x1B80, 0x000118D7,
+ 0x1B80, 0x54BF18E5,
+ 0x1B80, 0x54BF18E7,
+ 0x1B80, 0x54E518F5,
+ 0x1B80, 0x54E518F7,
+ 0x1B80, 0x050A1905,
+ 0x1B80, 0x050A1907,
+ 0x1B80, 0x54DF1915,
+ 0x1B80, 0x54DF1917,
+ 0x1B80, 0x00011925,
+ 0x1B80, 0x00011927,
+ 0x1B80, 0x7F201935,
+ 0x1B80, 0x7F201937,
+ 0x1B80, 0x7E001945,
+ 0x1B80, 0x7E001947,
+ 0x1B80, 0x7D001955,
+ 0x1B80, 0x7D001957,
+ 0x1B80, 0x55011965,
+ 0x1B80, 0x55011967,
+ 0x1B80, 0x5C311975,
+ 0x1B80, 0x5C311977,
+ 0x1B80, 0xE15D1985,
+ 0x1B80, 0xE15D1987,
+ 0x1B80, 0xE1611995,
+ 0x1B80, 0xE1611997,
+ 0x1B80, 0x548019A5,
+ 0x1B80, 0x548019A7,
+ 0x1B80, 0x540019B5,
+ 0x1B80, 0x540019B7,
+ 0x1B80, 0xE15D19C5,
+ 0x1B80, 0xE15D19C7,
+ 0x1B80, 0xE16119D5,
+ 0x1B80, 0xE16119D7,
+ 0x1B80, 0x548119E5,
+ 0x1B80, 0x548119E7,
+ 0x1B80, 0x540019F5,
+ 0x1B80, 0x540019F7,
+ 0x1B80, 0xE15D1A05,
+ 0x1B80, 0xE15D1A07,
+ 0x1B80, 0xE1611A15,
+ 0x1B80, 0xE1611A17,
+ 0x1B80, 0x54821A25,
+ 0x1B80, 0x54821A27,
+ 0x1B80, 0x54001A35,
+ 0x1B80, 0x54001A37,
+ 0x1B80, 0xE17E1A45,
+ 0x1B80, 0xE17E1A47,
+ 0x1B80, 0xBFE91A55,
+ 0x1B80, 0xBFE91A57,
+ 0x1B80, 0x301D1A65,
+ 0x1B80, 0x301D1A67,
+ 0x1B80, 0x00231A75,
+ 0x1B80, 0x00231A77,
+ 0x1B80, 0x7B201A85,
+ 0x1B80, 0x7B201A87,
+ 0x1B80, 0x7A001A95,
+ 0x1B80, 0x7A001A97,
+ 0x1B80, 0x79001AA5,
+ 0x1B80, 0x79001AA7,
+ 0x1B80, 0xE1CB1AB5,
+ 0x1B80, 0xE1CB1AB7,
+ 0x1B80, 0x00021AC5,
+ 0x1B80, 0x00021AC7,
+ 0x1B80, 0x00011AD5,
+ 0x1B80, 0x00011AD7,
+ 0x1B80, 0x00221AE5,
+ 0x1B80, 0x00221AE7,
+ 0x1B80, 0x7B201AF5,
+ 0x1B80, 0x7B201AF7,
+ 0x1B80, 0x7A001B05,
+ 0x1B80, 0x7A001B07,
+ 0x1B80, 0x79001B15,
+ 0x1B80, 0x79001B17,
+ 0x1B80, 0xE1CB1B25,
+ 0x1B80, 0xE1CB1B27,
+ 0x1B80, 0x00021B35,
+ 0x1B80, 0x00021B37,
+ 0x1B80, 0x00011B45,
+ 0x1B80, 0x00011B47,
+ 0x1B80, 0x74021B55,
+ 0x1B80, 0x74021B57,
+ 0x1B80, 0x003F1B65,
+ 0x1B80, 0x003F1B67,
+ 0x1B80, 0x74001B75,
+ 0x1B80, 0x74001B77,
+ 0x1B80, 0x00021B85,
+ 0x1B80, 0x00021B87,
+ 0x1B80, 0x00011B95,
+ 0x1B80, 0x00011B97,
+ 0x1B80, 0x4D041BA5,
+ 0x1B80, 0x4D041BA7,
+ 0x1B80, 0x2EF81BB5,
+ 0x1B80, 0x2EF81BB7,
+ 0x1B80, 0x00001BC5,
+ 0x1B80, 0x00001BC7,
+ 0x1B80, 0x23301BD5,
+ 0x1B80, 0x23301BD7,
+ 0x1B80, 0x00241BE5,
+ 0x1B80, 0x00241BE7,
+ 0x1B80, 0x23E01BF5,
+ 0x1B80, 0x23E01BF7,
+ 0x1B80, 0x003F1C05,
+ 0x1B80, 0x003F1C07,
+ 0x1B80, 0x23FC1C15,
+ 0x1B80, 0x23FC1C17,
+ 0x1B80, 0xBFCE1C25,
+ 0x1B80, 0xBFCE1C27,
+ 0x1B80, 0x2EF01C35,
+ 0x1B80, 0x2EF01C37,
+ 0x1B80, 0x00001C45,
+ 0x1B80, 0x00001C47,
+ 0x1B80, 0x4D001C55,
+ 0x1B80, 0x4D001C57,
+ 0x1B80, 0x00011C65,
+ 0x1B80, 0x00011C67,
+ 0x1B80, 0x549F1C75,
+ 0x1B80, 0x549F1C77,
+ 0x1B80, 0x54FF1C85,
+ 0x1B80, 0x54FF1C87,
+ 0x1B80, 0x54001C95,
+ 0x1B80, 0x54001C97,
+ 0x1B80, 0x00011CA5,
+ 0x1B80, 0x00011CA7,
+ 0x1B80, 0x5C311CB5,
+ 0x1B80, 0x5C311CB7,
+ 0x1B80, 0x07141CC5,
+ 0x1B80, 0x07141CC7,
+ 0x1B80, 0x54001CD5,
+ 0x1B80, 0x54001CD7,
+ 0x1B80, 0x5C321CE5,
+ 0x1B80, 0x5C321CE7,
+ 0x1B80, 0x00011CF5,
+ 0x1B80, 0x00011CF7,
+ 0x1B80, 0x5C321D05,
+ 0x1B80, 0x5C321D07,
+ 0x1B80, 0x07141D15,
+ 0x1B80, 0x07141D17,
+ 0x1B80, 0x54001D25,
+ 0x1B80, 0x54001D27,
+ 0x1B80, 0x5C311D35,
+ 0x1B80, 0x5C311D37,
+ 0x1B80, 0x00011D45,
+ 0x1B80, 0x00011D47,
+ 0x1B80, 0x4C981D55,
+ 0x1B80, 0x4C981D57,
+ 0x1B80, 0x4C181D65,
+ 0x1B80, 0x4C181D67,
+ 0x1B80, 0x00011D75,
+ 0x1B80, 0x00011D77,
+ 0x1B80, 0x5C321D85,
+ 0x1B80, 0x5C321D87,
+ 0x1B80, 0x62841D95,
+ 0x1B80, 0x62841D97,
+ 0x1B80, 0x66861DA5,
+ 0x1B80, 0x66861DA7,
+ 0x1B80, 0x6C031DB5,
+ 0x1B80, 0x6C031DB7,
+ 0x1B80, 0x7B201DC5,
+ 0x1B80, 0x7B201DC7,
+ 0x1B80, 0x7A001DD5,
+ 0x1B80, 0x7A001DD7,
+ 0x1B80, 0x79001DE5,
+ 0x1B80, 0x79001DE7,
+ 0x1B80, 0x7F201DF5,
+ 0x1B80, 0x7F201DF7,
+ 0x1B80, 0x7E001E05,
+ 0x1B80, 0x7E001E07,
+ 0x1B80, 0x7D001E15,
+ 0x1B80, 0x7D001E17,
+ 0x1B80, 0x09011E25,
+ 0x1B80, 0x09011E27,
+ 0x1B80, 0x0C011E35,
+ 0x1B80, 0x0C011E37,
+ 0x1B80, 0x0BA61E45,
+ 0x1B80, 0x0BA61E47,
+ 0x1B80, 0x00011E55,
+ 0x1B80, 0x00011E57,
+ 0x1B80, 0x00000006,
+ 0x1B80, 0x00000002,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822b_bb, rtw_phy_cfg_bb);
+
+static const u32 rtw8822b_bb_pg_type2[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
+ 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
+ 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
+ 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
+ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+ 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
+ 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
+ 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
+ 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
+ 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
+ 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
+ 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
+ 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
+ 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
+ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x40424446,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x32343638,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x38404244,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x30323436,
+ 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404244,
+ 1, 0, 1, 0x00000c38, 0xffffffff, 0x30323436,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x38404244,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x30323436,
+ 1, 0, 0, 0x00000c44, 0xffffffff, 0x42442628,
+ 1, 0, 1, 0x00000c48, 0xffffffff, 0x34363840,
+ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x26283032,
+ 1, 1, 0, 0x00000e24, 0xffffffff, 0x40424446,
+ 1, 1, 0, 0x00000e28, 0xffffffff, 0x32343638,
+ 1, 1, 0, 0x00000e2c, 0xffffffff, 0x38404244,
+ 1, 1, 0, 0x00000e30, 0xffffffff, 0x30323436,
+ 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404244,
+ 1, 1, 1, 0x00000e38, 0xffffffff, 0x30323436,
+ 1, 1, 0, 0x00000e3c, 0xffffffff, 0x38404244,
+ 1, 1, 0, 0x00000e40, 0xffffffff, 0x30323436,
+ 1, 1, 0, 0x00000e44, 0xffffffff, 0x42442628,
+ 1, 1, 1, 0x00000e48, 0xffffffff, 0x34363840,
+ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x26283032
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type2);
+
+static const u32 rtw8822b_bb_pg_type5[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
+ 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
+ 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
+ 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
+ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+ 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
+ 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
+ 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
+ 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
+ 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
+ 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
+ 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
+ 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
+ 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
+ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830,
+ 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638,
+ 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
+ 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022,
+ 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234,
+ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426,
+ 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840,
+ 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032,
+ 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638,
+ 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830,
+ 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638,
+ 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830,
+ 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638,
+ 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830,
+ 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022,
+ 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234,
+ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type5);
+
+static const u32 rtw8822b_rf_a[] = {
+ 0x000, 0x00030000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0xA0000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00010D24,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000002,
+ 0x03E, 0x0000003F,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000D0F4E,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C0F4E,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00000034,
+ 0x03F, 0x0004080E,
+ 0x0EF, 0x00080000,
+ 0x0DF, 0x00002449,
+ 0x033, 0x00000024,
+ 0x03E, 0x0000003F,
+ 0x03F, 0x00060FDE,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000037,
+ 0x03F, 0x0007EFCE,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000037,
+ 0x03F, 0x000DEFCE,
+ 0x0EF, 0x00000000,
+ 0x07F, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0xA0000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0xB0000000, 0x00000000,
+ 0x0B1, 0x0007DBE4,
+ 0x0B2, 0x000225D1,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C330,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0003C360,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0xA0000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0xB0000000, 0x00000000,
+ 0x0B4, 0x00099DD0,
+ 0x0B5, 0x000400FC,
+ 0x0B6, 0x000187F0,
+ 0x0B7, 0x00030018,
+ 0x0B8, 0x00080800,
+ 0x0B9, 0x00000000,
+ 0x0BA, 0x00008000,
+ 0x0BB, 0x00000000,
+ 0x0BC, 0x00040030,
+ 0x0BD, 0x00000000,
+ 0x0BE, 0x00000000,
+ 0x0BF, 0x00000000,
+ 0x0C0, 0x00000000,
+ 0x0C1, 0x00000000,
+ 0x0C2, 0x00000000,
+ 0x0C3, 0x00000000,
+ 0x0C4, 0x00002402,
+ 0x0C5, 0x00000009,
+ 0x0C6, 0x00040299,
+ 0x0C7, 0x00055555,
+ 0x0C8, 0x0000C16C,
+ 0x0C9, 0x0001C146,
+ 0x0CA, 0x00000000,
+ 0x0CB, 0x00000000,
+ 0x0CC, 0x00000000,
+ 0x0CD, 0x00000000,
+ 0x0CE, 0x00090C00,
+ 0x0CF, 0x0006D200,
+ 0x0DF, 0x00000009,
+ 0x018, 0x00010524,
+ 0x089, 0x00000207,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0xA0000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0xB0000000, 0x00000000,
+ 0x08B, 0x00061E3C,
+ 0x08C, 0x000112C7,
+ 0x08D, 0x000F4988,
+ 0x08E, 0x00064D40,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000007,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000006,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000005,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004084,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000004,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004108,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000003,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x0000490C,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000002,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005E00,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000001,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005862,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005948,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000F,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000D,
+ 0x03E, 0x000040C8,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000009,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000008,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000017,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000015,
+ 0x03E, 0x000040C8,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000014,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000013,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000012,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000011,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000010,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00004000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000001,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000006,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00000401,
+ 0x084, 0x00001209,
+ 0x086, 0x000001A0,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0xA0000000, 0x00000000,
+ 0x087, 0x000E8180,
+ 0xB0000000, 0x00000000,
+ 0x088, 0x00070020,
+ 0x0DE, 0x00000010,
+ 0x0EF, 0x00008000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x0000003C,
+ 0x033, 0x0000000E,
+ 0x03F, 0x00000038,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000030,
+ 0x033, 0x0000000C,
+ 0x03F, 0x00000028,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000020,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000018,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000010,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000007,
+ 0x03F, 0x0000003C,
+ 0x033, 0x00000006,
+ 0x03F, 0x00000038,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000028,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000020,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000018,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000010,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000008,
+ 0x0EF, 0x00000000,
+ 0x0B8, 0x00080A00,
+ 0x0FE, 0x00000000,
+ 0x0B0, 0x000FF0FA,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0CA, 0x00080000,
+ 0x0FE, 0x00000000,
+ 0x0C9, 0x0001C141,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x018, 0x00018D24,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x018, 0x00010D24,
+ 0x01B, 0x00075A40,
+ 0x0EE, 0x00000002,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000006,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000000,
+ 0x0EE, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0xA0000000, 0x00000000,
+ 0x061, 0x0005D3D0,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x0EF, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000303,
+ 0x030, 0x00001303,
+ 0x030, 0x00002303,
+ 0x030, 0x00003303,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x0EF, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x00004355,
+ 0x030, 0x00005355,
+ 0x030, 0x00006355,
+ 0x030, 0x00007355,
+ 0x030, 0x00008315,
+ 0x030, 0x00009315,
+ 0x030, 0x0000A315,
+ 0x030, 0x0000B315,
+ 0x0EF, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x0EF, 0x00000000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x0EF, 0x00000000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000384,
+ 0x030, 0x00001384,
+ 0x030, 0x00002384,
+ 0x030, 0x00003384,
+ 0x030, 0x00004425,
+ 0x030, 0x00005425,
+ 0x030, 0x00006425,
+ 0x030, 0x00007425,
+ 0x030, 0x000084A6,
+ 0x030, 0x000094A6,
+ 0x030, 0x0000A4A6,
+ 0x030, 0x0000B4A6,
+ 0x0EF, 0x00000000,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000463,
+ 0x030, 0x00001463,
+ 0x030, 0x00002463,
+ 0x030, 0x00003463,
+ 0x030, 0x00004545,
+ 0x030, 0x00005545,
+ 0x030, 0x00006545,
+ 0x030, 0x00007545,
+ 0x030, 0x00008565,
+ 0x030, 0x00009565,
+ 0x030, 0x0000A565,
+ 0x030, 0x0000B565,
+ 0x0EF, 0x00000000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x0EF, 0x00000000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x0EF, 0x00000000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000443,
+ 0x030, 0x00001443,
+ 0x030, 0x00002443,
+ 0x030, 0x00003443,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x0EF, 0x00000000,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000443,
+ 0x030, 0x00001443,
+ 0x030, 0x00002443,
+ 0x030, 0x00003443,
+ 0x030, 0x00004483,
+ 0x030, 0x00005483,
+ 0x030, 0x00006483,
+ 0x030, 0x00007483,
+ 0x030, 0x000084A4,
+ 0x030, 0x000094A4,
+ 0x030, 0x0000A4A4,
+ 0x030, 0x0000B4A4,
+ 0x0EF, 0x00000000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000361,
+ 0x030, 0x00001361,
+ 0x030, 0x00002361,
+ 0x030, 0x00003361,
+ 0x030, 0x00004443,
+ 0x030, 0x00005443,
+ 0x030, 0x00006443,
+ 0x030, 0x00007443,
+ 0x030, 0x00008424,
+ 0x030, 0x00009424,
+ 0x030, 0x0000A424,
+ 0x030, 0x0000B424,
+ 0x0EF, 0x00000000,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x0EF, 0x00000000,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000403,
+ 0x030, 0x00001403,
+ 0x030, 0x00002403,
+ 0x030, 0x00003403,
+ 0x030, 0x000044A2,
+ 0x030, 0x000054A2,
+ 0x030, 0x000064A2,
+ 0x030, 0x000074A2,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x0EF, 0x00000000,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x0EF, 0x00000000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x0EF, 0x00000000,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x0EF, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A1,
+ 0x030, 0x000012A1,
+ 0x030, 0x000022A1,
+ 0x030, 0x000032A1,
+ 0x030, 0x000042A1,
+ 0x030, 0x000052A1,
+ 0x030, 0x000062A1,
+ 0x030, 0x000072A1,
+ 0x030, 0x000082A1,
+ 0x030, 0x000092A1,
+ 0x030, 0x0000A2A1,
+ 0x030, 0x0000B2A1,
+ 0x0EF, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A0,
+ 0x030, 0x000013A0,
+ 0x030, 0x000023A0,
+ 0x030, 0x000033A0,
+ 0x030, 0x000043A1,
+ 0x030, 0x000053A1,
+ 0x030, 0x000063A1,
+ 0x030, 0x000073A1,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x0EF, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A1,
+ 0x030, 0x000012A1,
+ 0x030, 0x000022A1,
+ 0x030, 0x000032A1,
+ 0x030, 0x000042A1,
+ 0x030, 0x000052A1,
+ 0x030, 0x000062A1,
+ 0x030, 0x000072A1,
+ 0x030, 0x000082A1,
+ 0x030, 0x000092A1,
+ 0x030, 0x0000A2A1,
+ 0x030, 0x0000B2A1,
+ 0x0EF, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A0,
+ 0x030, 0x000013A0,
+ 0x030, 0x000023A0,
+ 0x030, 0x000033A0,
+ 0x030, 0x00004430,
+ 0x030, 0x00005430,
+ 0x030, 0x00006430,
+ 0x030, 0x00007430,
+ 0x030, 0x00008372,
+ 0x030, 0x00009372,
+ 0x030, 0x0000A372,
+ 0x030, 0x0000B372,
+ 0x0EF, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x0EF, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003D0,
+ 0x030, 0x000013D0,
+ 0x030, 0x000023D0,
+ 0x030, 0x000033D0,
+ 0x030, 0x000043D0,
+ 0x030, 0x000053D0,
+ 0x030, 0x000063D0,
+ 0x030, 0x000073D0,
+ 0x030, 0x000083D0,
+ 0x030, 0x000093D0,
+ 0x030, 0x0000A3D0,
+ 0x030, 0x0000B3D0,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x000043A3,
+ 0x030, 0x000053A3,
+ 0x030, 0x000063A3,
+ 0x030, 0x000073A3,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000776,
+ 0x030, 0x00001455,
+ 0x030, 0x00002335,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000660,
+ 0x030, 0x00001443,
+ 0x030, 0x00002221,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000767,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000765,
+ 0x030, 0x00001632,
+ 0x030, 0x00002451,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000777,
+ 0x030, 0x00001454,
+ 0x030, 0x00002224,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000775,
+ 0x030, 0x00001422,
+ 0x030, 0x00002210,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000775,
+ 0x030, 0x00001343,
+ 0x030, 0x00002210,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C29,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C69,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000CA8,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C0B,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0E,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C2B,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2E,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF7,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000086D,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000870,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000891,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000894,
+ 0x033, 0x00000029,
+ 0x03F, 0x000008B5,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000008F5,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000829,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000848,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084B,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000080B,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000080E,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000848,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000869,
+ 0x033, 0x00000064,
+ 0x03F, 0x000008A9,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C10,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C4A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CC9,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000086D,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000870,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000891,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000894,
+ 0x033, 0x00000069,
+ 0x03F, 0x000008B5,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000008F5,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8E,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF5,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8E,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF5,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CED,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000824,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000827,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000082A,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000082D,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C68,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6B,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CCA,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CCD,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C08,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0B,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0E,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2E,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C31,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CCA,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CCD,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000086A,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000086D,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000870,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000891,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000894,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000008B5,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000008F5,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000047,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000004A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000004D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000050,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000094,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF4,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000047,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000004A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000004D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000050,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000094,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF4,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000400,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x000008BB,
+ 0x033, 0x00000001,
+ 0x03F, 0x000008BB,
+ 0x033, 0x00000002,
+ 0x03F, 0x000008BB,
+ 0x033, 0x00000003,
+ 0x03F, 0x000008BB,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000001,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000002,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000003,
+ 0x03F, 0x000004BB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000F34,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0xA0000000, 0x00000000,
+ 0x081, 0x0000F000,
+ 0x087, 0x00016040,
+ 0x051, 0x00000C00,
+ 0x052, 0x0007C241,
+ 0x053, 0x0001C069,
+ 0x054, 0x00078032,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00058750,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000008,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000071,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000074,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000008,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000071,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000074,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0005142C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0005144B,
+ 0x033, 0x00000002,
+ 0x03F, 0x0005144E,
+ 0x033, 0x00000003,
+ 0x03F, 0x00051C69,
+ 0x033, 0x00000004,
+ 0x03F, 0x00051C6C,
+ 0x033, 0x00000005,
+ 0x03F, 0x00051C6F,
+ 0x033, 0x00000006,
+ 0x03F, 0x00051CEB,
+ 0x033, 0x00000007,
+ 0x03F, 0x00051CEE,
+ 0x033, 0x00000008,
+ 0x03F, 0x00051CF1,
+ 0x033, 0x00000009,
+ 0x03F, 0x00051CF4,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00051CF7,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000010,
+ 0x033, 0x00000000,
+ 0x008, 0x0009C060,
+ 0x033, 0x00000001,
+ 0x008, 0x0009C060,
+ 0x0EF, 0x00000000,
+ 0x033, 0x000000A2,
+ 0x0EF, 0x00080000,
+ 0x03E, 0x0000593F,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000D0F4F,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C0F4F,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x033, 0x000000A3,
+ 0x0EF, 0x00080000,
+ 0x03E, 0x00005934,
+ 0x03F, 0x0005AFCF,
+ 0x0EF, 0x00000000,
+ 0x83000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0xA0000000, 0x00000000,
+ 0x0CE, 0x00094C00,
+ 0xB0000000, 0x00000000,
+ 0x83000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00064700,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0xA0000000, 0x00000000,
+ 0x0CF, 0x00064700,
+ 0xB0000000, 0x00000000,
+ 0x83000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000096,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000056,
+ 0x0EF, 0x00000000,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000096,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0B0, 0x000FF0FC,
+ 0x0C4, 0x00081402,
+ 0x0CC, 0x00082000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_a, A);
+
+static const u32 rtw8822b_rf_b[] = {
+ 0x000, 0x00030000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0xA0000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00010D24,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000002,
+ 0x03E, 0x0000003F,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000D0F4E,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C0F4E,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00000034,
+ 0x03F, 0x0004080E,
+ 0x0EF, 0x00080000,
+ 0x0DF, 0x00002449,
+ 0x033, 0x00000024,
+ 0x03E, 0x0000003F,
+ 0x03F, 0x00060FDE,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000037,
+ 0x03F, 0x0007EFCE,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000037,
+ 0x03F, 0x000DEFCE,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x00000009,
+ 0x018, 0x00010524,
+ 0x089, 0x00000207,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0xA0000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0xB0000000, 0x00000000,
+ 0x08B, 0x00061E3C,
+ 0x08C, 0x000112C7,
+ 0x08D, 0x000F4988,
+ 0x08E, 0x00064D40,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000007,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000005,
+ 0x03E, 0x000040C8,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000004,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000003,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000002,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000001,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000F,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000D,
+ 0x8300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040D0,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000009,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000008,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000017,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000DFF86,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000DFF86,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000015,
+ 0x8300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040D0,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000014,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000013,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000012,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000011,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000010,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00004000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000001,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00000401,
+ 0x084, 0x00001209,
+ 0x086, 0x000001A0,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0xA0000000, 0x00000000,
+ 0x087, 0x000E8180,
+ 0xB0000000, 0x00000000,
+ 0x088, 0x00070020,
+ 0x0DE, 0x00000010,
+ 0x0EF, 0x00008000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x0000003C,
+ 0x033, 0x0000000E,
+ 0x03F, 0x00000038,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000030,
+ 0x033, 0x0000000C,
+ 0x03F, 0x00000028,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000020,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000018,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000010,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000007,
+ 0x03F, 0x0000003C,
+ 0x033, 0x00000006,
+ 0x03F, 0x00000038,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000028,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000020,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000018,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000010,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000008,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00018D24,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x018, 0x00010D24,
+ 0x01B, 0x00075A40,
+ 0x0EE, 0x00000002,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000006,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000000,
+ 0x0EE, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0xA0000000, 0x00000000,
+ 0x061, 0x0005D3D0,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A3,
+ 0x030, 0x000053A3,
+ 0x030, 0x000063A3,
+ 0x030, 0x000073A3,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000303,
+ 0x030, 0x00001303,
+ 0x030, 0x00002303,
+ 0x030, 0x00003303,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A4,
+ 0x030, 0x000014A4,
+ 0x030, 0x000024A4,
+ 0x030, 0x000034A4,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x000083A5,
+ 0x030, 0x000093A5,
+ 0x030, 0x0000A3A5,
+ 0x030, 0x0000B3A5,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002F4,
+ 0x030, 0x000012F4,
+ 0x030, 0x000022F4,
+ 0x030, 0x000032F4,
+ 0x030, 0x00004365,
+ 0x030, 0x00005365,
+ 0x030, 0x00006365,
+ 0x030, 0x00007365,
+ 0x030, 0x000082A4,
+ 0x030, 0x000092A4,
+ 0x030, 0x0000A2A4,
+ 0x030, 0x0000B2A4,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000382,
+ 0x030, 0x00001382,
+ 0x030, 0x00002382,
+ 0x030, 0x00003382,
+ 0x030, 0x00004445,
+ 0x030, 0x00005445,
+ 0x030, 0x00006445,
+ 0x030, 0x00007445,
+ 0x030, 0x00008425,
+ 0x030, 0x00009425,
+ 0x030, 0x0000A425,
+ 0x030, 0x0000B425,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A3,
+ 0x030, 0x000053A3,
+ 0x030, 0x000063A3,
+ 0x030, 0x000073A3,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000443,
+ 0x030, 0x00001443,
+ 0x030, 0x00002443,
+ 0x030, 0x00003443,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000343,
+ 0x030, 0x00001343,
+ 0x030, 0x00002343,
+ 0x030, 0x00003343,
+ 0x030, 0x00004483,
+ 0x030, 0x00005483,
+ 0x030, 0x00006483,
+ 0x030, 0x00007483,
+ 0x030, 0x000083A4,
+ 0x030, 0x000093A4,
+ 0x030, 0x0000A3A4,
+ 0x030, 0x0000B3A4,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x00004423,
+ 0x030, 0x00005423,
+ 0x030, 0x00006423,
+ 0x030, 0x00007423,
+ 0x030, 0x00008324,
+ 0x030, 0x00009324,
+ 0x030, 0x0000A324,
+ 0x030, 0x0000B324,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000303,
+ 0x030, 0x00001303,
+ 0x030, 0x00002303,
+ 0x030, 0x00003303,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000403,
+ 0x030, 0x00001403,
+ 0x030, 0x00002403,
+ 0x030, 0x00003403,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A1,
+ 0x030, 0x000012A1,
+ 0x030, 0x000022A1,
+ 0x030, 0x000032A1,
+ 0x030, 0x000042A1,
+ 0x030, 0x000052A1,
+ 0x030, 0x000062A1,
+ 0x030, 0x000072A1,
+ 0x030, 0x000082A1,
+ 0x030, 0x000092A1,
+ 0x030, 0x0000A2A1,
+ 0x030, 0x0000B2A1,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000043A1,
+ 0x030, 0x000053A1,
+ 0x030, 0x000063A1,
+ 0x030, 0x000073A1,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A1,
+ 0x030, 0x000012A1,
+ 0x030, 0x000022A1,
+ 0x030, 0x000032A1,
+ 0x030, 0x000042A1,
+ 0x030, 0x000052A1,
+ 0x030, 0x000062A1,
+ 0x030, 0x000072A1,
+ 0x030, 0x000082A1,
+ 0x030, 0x000092A1,
+ 0x030, 0x0000A2A1,
+ 0x030, 0x0000B2A1,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A1,
+ 0x030, 0x000014A1,
+ 0x030, 0x000024A1,
+ 0x030, 0x000034A1,
+ 0x030, 0x000043A1,
+ 0x030, 0x000053A1,
+ 0x030, 0x000063A1,
+ 0x030, 0x000073A1,
+ 0x030, 0x000083A1,
+ 0x030, 0x000093A1,
+ 0x030, 0x0000A3A1,
+ 0x030, 0x0000B3A1,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x000002D0,
+ 0x030, 0x000012D0,
+ 0x030, 0x000022D0,
+ 0x030, 0x000032D0,
+ 0x030, 0x000042D0,
+ 0x030, 0x000052D0,
+ 0x030, 0x000062D0,
+ 0x030, 0x000072D0,
+ 0x030, 0x000082D0,
+ 0x030, 0x000092D0,
+ 0x030, 0x0000A2D0,
+ 0x030, 0x0000B2D0,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001452,
+ 0x030, 0x00002220,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000660,
+ 0x030, 0x00001341,
+ 0x030, 0x00002220,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000767,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000765,
+ 0x030, 0x00001632,
+ 0x030, 0x00002451,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000776,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000775,
+ 0x030, 0x00001422,
+ 0x030, 0x00002210,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000775,
+ 0x030, 0x00001222,
+ 0x030, 0x00002210,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4E,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6E,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000889,
+ 0x033, 0x00000024,
+ 0x03F, 0x000008AA,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C25,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C28,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C2B,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C68,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C6B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6E,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF7,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEA,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000829,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000848,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084B,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000842,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000845,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000866,
+ 0x033, 0x00000063,
+ 0x03F, 0x000008A6,
+ 0x033, 0x00000064,
+ 0x03F, 0x000008C8,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C10,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C4A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CC9,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEA,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4E,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8F,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF5,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4E,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8F,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF5,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CED,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000826,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000082C,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000082F,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000086C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000080A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000080D,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000810,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000868,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C68,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6B,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CAB,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAE,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C08,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0B,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0E,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2E,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C31,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CAB,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAE,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CEA,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CED,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000047,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000004A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000004D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000050,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000094,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000047,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000004A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000004D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000050,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000094,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000400,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x000004FB,
+ 0x033, 0x00000001,
+ 0x03F, 0x000004FB,
+ 0x033, 0x00000002,
+ 0x03F, 0x000004FB,
+ 0x033, 0x00000003,
+ 0x03F, 0x000004FB,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000001,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000002,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000003,
+ 0x03F, 0x000004BB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000F34,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0xA0000000, 0x00000000,
+ 0x081, 0x0000F000,
+ 0x087, 0x00016040,
+ 0x051, 0x00000C00,
+ 0x052, 0x0007C241,
+ 0x053, 0x0001C069,
+ 0x054, 0x00078032,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00058750,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000008,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000071,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000074,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0005142C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0005142F,
+ 0x033, 0x00000002,
+ 0x03F, 0x00051432,
+ 0x033, 0x00000003,
+ 0x03F, 0x00051CA5,
+ 0x033, 0x00000004,
+ 0x03F, 0x00051CA8,
+ 0x033, 0x00000005,
+ 0x03F, 0x00051CAB,
+ 0x033, 0x00000006,
+ 0x03F, 0x00051CEB,
+ 0x033, 0x00000007,
+ 0x03F, 0x00051CEE,
+ 0x033, 0x00000008,
+ 0x03F, 0x00051CF1,
+ 0x033, 0x00000009,
+ 0x03F, 0x00051CF4,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00051CF7,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0005142C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0005144B,
+ 0x033, 0x00000002,
+ 0x03F, 0x00051868,
+ 0x033, 0x00000003,
+ 0x03F, 0x0005186B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0005186E,
+ 0x033, 0x00000005,
+ 0x03F, 0x00051871,
+ 0x033, 0x00000006,
+ 0x03F, 0x00051874,
+ 0x033, 0x00000007,
+ 0x03F, 0x00051895,
+ 0x033, 0x00000008,
+ 0x03F, 0x000518B6,
+ 0x033, 0x00000009,
+ 0x03F, 0x000518F6,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00051CF7,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000008,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000071,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000074,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0005142C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0005142F,
+ 0x033, 0x00000002,
+ 0x03F, 0x00051432,
+ 0x033, 0x00000003,
+ 0x03F, 0x00051C87,
+ 0x033, 0x00000004,
+ 0x03F, 0x00051C8A,
+ 0x033, 0x00000005,
+ 0x03F, 0x00051C8D,
+ 0x033, 0x00000006,
+ 0x03F, 0x00051CEB,
+ 0x033, 0x00000007,
+ 0x03F, 0x00051CEE,
+ 0x033, 0x00000008,
+ 0x03F, 0x00051CF1,
+ 0x033, 0x00000009,
+ 0x03F, 0x00051CF4,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00051CF7,
+ 0xB0000000, 0x00000000,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000010,
+ 0x033, 0x00000000,
+ 0x008, 0x0009C060,
+ 0x033, 0x00000001,
+ 0x008, 0x0009C060,
+ 0x0EF, 0x00000000,
+ 0x033, 0x000000A2,
+ 0x0EF, 0x00080000,
+ 0x03E, 0x0000593F,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000D0F4F,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C0F4F,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x033, 0x000000A3,
+ 0x0EF, 0x00080000,
+ 0x03E, 0x00005934,
+ 0x03F, 0x0005AFCF,
+ 0x0EF, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_b, B);
+
+static const u8 rtw8822b_txpwr_lmt_type2[] = {
+ 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30,
+ 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30,
+ 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30,
+ 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30,
+ 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30,
+ 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30,
+ 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30,
+ 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30,
+ 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30,
+ 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30,
+ 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30,
+ 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30,
+ 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28,
+ 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32,
+ 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34,
+ 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34,
+ 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34,
+ 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34,
+ 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34,
+ 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34,
+ 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34,
+ 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34,
+ 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34,
+ 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34,
+ 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34,
+ 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34,
+ 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34,
+ 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63,
+ 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34,
+ 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34,
+ 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34,
+ 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34,
+ 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34,
+ 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34,
+ 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34,
+ 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34,
+ 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34,
+ 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34,
+ 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34,
+ 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34,
+ 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34,
+ 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63,
+ 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30,
+ 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30,
+ 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30,
+ 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30,
+ 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30,
+ 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30,
+ 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30,
+ 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30,
+ 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30,
+ 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30,
+ 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30,
+ 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30,
+ 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30,
+ 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63,
+ 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63,
+ 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63,
+ 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34,
+ 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34,
+ 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34,
+ 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34,
+ 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34,
+ 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34,
+ 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34,
+ 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34,
+ 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34,
+ 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63,
+ 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63,
+ 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63,
+ 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63,
+ 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63,
+ 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30,
+ 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30,
+ 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30,
+ 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30,
+ 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30,
+ 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30,
+ 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30,
+ 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30,
+ 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30,
+ 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63,
+ 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63,
+ 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63,
+ 0, 1, 0, 1, 36, 36, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30,
+ 0, 1, 0, 1, 40, 38, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30,
+ 0, 1, 0, 1, 44, 38, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30,
+ 0, 1, 0, 1, 48, 38, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30,
+ 0, 1, 0, 1, 52, 38, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28,
+ 0, 1, 0, 1, 56, 38, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28,
+ 0, 1, 0, 1, 60, 38, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28,
+ 0, 1, 0, 1, 64, 34, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28,
+ 0, 1, 0, 1, 100, 32, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32,
+ 0, 1, 0, 1, 104, 38, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32,
+ 0, 1, 0, 1, 108, 38, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32,
+ 0, 1, 0, 1, 112, 38, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32,
+ 0, 1, 0, 1, 116, 38, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32,
+ 0, 1, 0, 1, 120, 38, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32,
+ 0, 1, 0, 1, 124, 38, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32,
+ 0, 1, 0, 1, 128, 38, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32,
+ 0, 1, 0, 1, 132, 38, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32,
+ 0, 1, 0, 1, 136, 38, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32,
+ 0, 1, 0, 1, 140, 34, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32,
+ 0, 1, 0, 1, 144, 34, 2, 1, 0, 1, 144, 32, 1, 1, 0, 1, 144, 63,
+ 0, 1, 0, 1, 149, 38, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63,
+ 0, 1, 0, 1, 153, 38, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63,
+ 0, 1, 0, 1, 157, 38, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63,
+ 0, 1, 0, 1, 161, 38, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63,
+ 0, 1, 0, 1, 165, 38, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63,
+ 0, 1, 0, 2, 36, 36, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28,
+ 0, 1, 0, 2, 40, 38, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28,
+ 0, 1, 0, 2, 44, 38, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28,
+ 0, 1, 0, 2, 48, 38, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28,
+ 0, 1, 0, 2, 52, 38, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28,
+ 0, 1, 0, 2, 56, 38, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28,
+ 0, 1, 0, 2, 60, 38, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28,
+ 0, 1, 0, 2, 64, 34, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28,
+ 0, 1, 0, 2, 100, 32, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32,
+ 0, 1, 0, 2, 104, 38, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32,
+ 0, 1, 0, 2, 108, 38, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32,
+ 0, 1, 0, 2, 112, 38, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32,
+ 0, 1, 0, 2, 116, 38, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32,
+ 0, 1, 0, 2, 120, 38, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32,
+ 0, 1, 0, 2, 124, 38, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32,
+ 0, 1, 0, 2, 128, 38, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32,
+ 0, 1, 0, 2, 132, 38, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32,
+ 0, 1, 0, 2, 136, 38, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32,
+ 0, 1, 0, 2, 140, 32, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32,
+ 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63,
+ 0, 1, 0, 2, 149, 38, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63,
+ 0, 1, 0, 2, 153, 38, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63,
+ 0, 1, 0, 2, 157, 38, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63,
+ 0, 1, 0, 2, 161, 38, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63,
+ 0, 1, 0, 2, 165, 38, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63,
+ 0, 1, 0, 3, 36, 34, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22,
+ 0, 1, 0, 3, 40, 36, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22,
+ 0, 1, 0, 3, 44, 36, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22,
+ 0, 1, 0, 3, 48, 36, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22,
+ 0, 1, 0, 3, 52, 36, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22,
+ 0, 1, 0, 3, 56, 36, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22,
+ 0, 1, 0, 3, 60, 36, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22,
+ 0, 1, 0, 3, 64, 34, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22,
+ 0, 1, 0, 3, 100, 32, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30,
+ 0, 1, 0, 3, 104, 36, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30,
+ 0, 1, 0, 3, 108, 38, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30,
+ 0, 1, 0, 3, 112, 38, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30,
+ 0, 1, 0, 3, 116, 38, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30,
+ 0, 1, 0, 3, 120, 38, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30,
+ 0, 1, 0, 3, 124, 38, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30,
+ 0, 1, 0, 3, 128, 38, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30,
+ 0, 1, 0, 3, 132, 38, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30,
+ 0, 1, 0, 3, 136, 36, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30,
+ 0, 1, 0, 3, 140, 32, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30,
+ 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63,
+ 0, 1, 0, 3, 149, 38, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63,
+ 0, 1, 0, 3, 153, 38, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63,
+ 0, 1, 0, 3, 157, 38, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63,
+ 0, 1, 0, 3, 161, 38, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63,
+ 0, 1, 0, 3, 165, 38, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63,
+ 0, 1, 1, 2, 38, 28, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30,
+ 0, 1, 1, 2, 46, 36, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30,
+ 0, 1, 1, 2, 54, 36, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30,
+ 0, 1, 1, 2, 62, 30, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30,
+ 0, 1, 1, 2, 102, 30, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30,
+ 0, 1, 1, 2, 110, 36, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30,
+ 0, 1, 1, 2, 118, 36, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30,
+ 0, 1, 1, 2, 126, 36, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30,
+ 0, 1, 1, 2, 134, 36, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30,
+ 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63,
+ 0, 1, 1, 2, 151, 36, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63,
+ 0, 1, 1, 2, 159, 36, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63,
+ 0, 1, 1, 3, 38, 26, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22,
+ 0, 1, 1, 3, 46, 36, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22,
+ 0, 1, 1, 3, 54, 36, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22,
+ 0, 1, 1, 3, 62, 28, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22,
+ 0, 1, 1, 3, 102, 28, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30,
+ 0, 1, 1, 3, 110, 36, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30,
+ 0, 1, 1, 3, 118, 36, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30,
+ 0, 1, 1, 3, 126, 36, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30,
+ 0, 1, 1, 3, 134, 36, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30,
+ 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63,
+ 0, 1, 1, 3, 151, 36, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63,
+ 0, 1, 1, 3, 159, 36, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63,
+ 0, 1, 2, 4, 42, 26, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28,
+ 0, 1, 2, 4, 58, 26, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28,
+ 0, 1, 2, 4, 106, 26, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30,
+ 0, 1, 2, 4, 122, 36, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30,
+ 0, 1, 2, 4, 138, 36, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63,
+ 0, 1, 2, 4, 155, 36, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63,
+ 0, 1, 2, 5, 42, 24, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22,
+ 0, 1, 2, 5, 58, 24, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22,
+ 0, 1, 2, 5, 106, 26, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30,
+ 0, 1, 2, 5, 122, 36, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30,
+ 0, 1, 2, 5, 138, 36, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63,
+ 0, 1, 2, 5, 155, 36, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type2);
+
+static const u8 rtw8822b_txpwr_lmt_type5[] = {
+ 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30,
+ 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30,
+ 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30,
+ 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30,
+ 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30,
+ 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30,
+ 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30,
+ 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30,
+ 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30,
+ 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30,
+ 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30,
+ 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30,
+ 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28,
+ 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32,
+ 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34,
+ 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34,
+ 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34,
+ 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34,
+ 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34,
+ 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34,
+ 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34,
+ 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34,
+ 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34,
+ 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34,
+ 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34,
+ 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34,
+ 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34,
+ 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63,
+ 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34,
+ 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34,
+ 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34,
+ 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34,
+ 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34,
+ 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34,
+ 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34,
+ 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34,
+ 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34,
+ 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34,
+ 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34,
+ 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34,
+ 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34,
+ 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63,
+ 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30,
+ 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30,
+ 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30,
+ 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30,
+ 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30,
+ 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30,
+ 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30,
+ 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30,
+ 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30,
+ 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30,
+ 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30,
+ 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30,
+ 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30,
+ 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63,
+ 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63,
+ 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63,
+ 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34,
+ 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34,
+ 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34,
+ 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34,
+ 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34,
+ 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34,
+ 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34,
+ 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34,
+ 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34,
+ 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63,
+ 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63,
+ 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63,
+ 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63,
+ 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63,
+ 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30,
+ 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30,
+ 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30,
+ 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30,
+ 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30,
+ 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30,
+ 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30,
+ 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30,
+ 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30,
+ 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63,
+ 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63,
+ 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63,
+ 0, 1, 0, 1, 36, 30, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30,
+ 0, 1, 0, 1, 40, 32, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30,
+ 0, 1, 0, 1, 44, 32, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30,
+ 0, 1, 0, 1, 48, 32, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30,
+ 0, 1, 0, 1, 52, 32, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28,
+ 0, 1, 0, 1, 56, 32, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28,
+ 0, 1, 0, 1, 60, 32, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28,
+ 0, 1, 0, 1, 64, 28, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28,
+ 0, 1, 0, 1, 100, 26, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32,
+ 0, 1, 0, 1, 104, 32, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32,
+ 0, 1, 0, 1, 108, 32, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32,
+ 0, 1, 0, 1, 112, 32, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32,
+ 0, 1, 0, 1, 116, 32, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32,
+ 0, 1, 0, 1, 120, 32, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32,
+ 0, 1, 0, 1, 124, 32, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32,
+ 0, 1, 0, 1, 128, 32, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32,
+ 0, 1, 0, 1, 132, 32, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32,
+ 0, 1, 0, 1, 136, 32, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32,
+ 0, 1, 0, 1, 140, 28, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32,
+ 0, 1, 0, 1, 144, 28, 2, 1, 0, 1, 144, 63, 1, 1, 0, 1, 144, 63,
+ 0, 1, 0, 1, 149, 32, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63,
+ 0, 1, 0, 1, 153, 32, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63,
+ 0, 1, 0, 1, 157, 32, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63,
+ 0, 1, 0, 1, 161, 32, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63,
+ 0, 1, 0, 1, 165, 32, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63,
+ 0, 1, 0, 2, 36, 30, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28,
+ 0, 1, 0, 2, 40, 32, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28,
+ 0, 1, 0, 2, 44, 32, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28,
+ 0, 1, 0, 2, 48, 32, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28,
+ 0, 1, 0, 2, 52, 32, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28,
+ 0, 1, 0, 2, 56, 32, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28,
+ 0, 1, 0, 2, 60, 32, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28,
+ 0, 1, 0, 2, 64, 28, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28,
+ 0, 1, 0, 2, 100, 26, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32,
+ 0, 1, 0, 2, 104, 32, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32,
+ 0, 1, 0, 2, 108, 32, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32,
+ 0, 1, 0, 2, 112, 32, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32,
+ 0, 1, 0, 2, 116, 32, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32,
+ 0, 1, 0, 2, 120, 32, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32,
+ 0, 1, 0, 2, 124, 32, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32,
+ 0, 1, 0, 2, 128, 32, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32,
+ 0, 1, 0, 2, 132, 32, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32,
+ 0, 1, 0, 2, 136, 32, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32,
+ 0, 1, 0, 2, 140, 26, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32,
+ 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63,
+ 0, 1, 0, 2, 149, 32, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63,
+ 0, 1, 0, 2, 153, 32, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63,
+ 0, 1, 0, 2, 157, 32, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63,
+ 0, 1, 0, 2, 161, 32, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63,
+ 0, 1, 0, 2, 165, 32, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63,
+ 0, 1, 0, 3, 36, 28, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22,
+ 0, 1, 0, 3, 40, 30, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22,
+ 0, 1, 0, 3, 44, 30, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22,
+ 0, 1, 0, 3, 48, 30, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22,
+ 0, 1, 0, 3, 52, 30, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22,
+ 0, 1, 0, 3, 56, 30, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22,
+ 0, 1, 0, 3, 60, 30, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22,
+ 0, 1, 0, 3, 64, 28, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22,
+ 0, 1, 0, 3, 100, 26, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30,
+ 0, 1, 0, 3, 104, 30, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30,
+ 0, 1, 0, 3, 108, 32, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30,
+ 0, 1, 0, 3, 112, 32, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30,
+ 0, 1, 0, 3, 116, 32, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30,
+ 0, 1, 0, 3, 120, 32, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30,
+ 0, 1, 0, 3, 124, 32, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30,
+ 0, 1, 0, 3, 128, 32, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30,
+ 0, 1, 0, 3, 132, 32, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30,
+ 0, 1, 0, 3, 136, 30, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30,
+ 0, 1, 0, 3, 140, 26, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30,
+ 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63,
+ 0, 1, 0, 3, 149, 32, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63,
+ 0, 1, 0, 3, 153, 32, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63,
+ 0, 1, 0, 3, 157, 32, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63,
+ 0, 1, 0, 3, 161, 32, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63,
+ 0, 1, 0, 3, 165, 32, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63,
+ 0, 1, 1, 2, 38, 22, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30,
+ 0, 1, 1, 2, 46, 30, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30,
+ 0, 1, 1, 2, 54, 30, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30,
+ 0, 1, 1, 2, 62, 24, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30,
+ 0, 1, 1, 2, 102, 24, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30,
+ 0, 1, 1, 2, 110, 30, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30,
+ 0, 1, 1, 2, 118, 30, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30,
+ 0, 1, 1, 2, 126, 30, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30,
+ 0, 1, 1, 2, 134, 30, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30,
+ 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63,
+ 0, 1, 1, 2, 151, 30, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63,
+ 0, 1, 1, 2, 159, 30, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63,
+ 0, 1, 1, 3, 38, 20, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22,
+ 0, 1, 1, 3, 46, 30, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22,
+ 0, 1, 1, 3, 54, 30, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22,
+ 0, 1, 1, 3, 62, 22, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22,
+ 0, 1, 1, 3, 102, 22, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30,
+ 0, 1, 1, 3, 110, 30, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30,
+ 0, 1, 1, 3, 118, 30, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30,
+ 0, 1, 1, 3, 126, 30, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30,
+ 0, 1, 1, 3, 134, 30, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30,
+ 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63,
+ 0, 1, 1, 3, 151, 30, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63,
+ 0, 1, 1, 3, 159, 30, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63,
+ 0, 1, 2, 4, 42, 20, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28,
+ 0, 1, 2, 4, 58, 20, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28,
+ 0, 1, 2, 4, 106, 20, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30,
+ 0, 1, 2, 4, 122, 30, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30,
+ 0, 1, 2, 4, 138, 30, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63,
+ 0, 1, 2, 4, 155, 30, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63,
+ 0, 1, 2, 5, 42, 18, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22,
+ 0, 1, 2, 5, 58, 18, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22,
+ 0, 1, 2, 5, 106, 20, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30,
+ 0, 1, 2, 5, 122, 30, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30,
+ 0, 1, 2, 5, 138, 30, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63,
+ 0, 1, 2, 5, 155, 30, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63,
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type5);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
new file mode 100644
index 000000000000..d4c268889368
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8822B_TABLE_H__
+#define __RTW8822B_TABLE_H__
+
+extern const struct rtw_table rtw8822b_mac_tbl;
+extern const struct rtw_table rtw8822b_agc_tbl;
+extern const struct rtw_table rtw8822b_bb_tbl;
+extern const struct rtw_table rtw8822b_bb_pg_type2_tbl;
+extern const struct rtw_table rtw8822b_bb_pg_type5_tbl;
+extern const struct rtw_table rtw8822b_rf_a_tbl;
+extern const struct rtw_table rtw8822b_rf_b_tbl;
+extern const struct rtw_table rtw8822b_txpwr_lmt_type2_tbl;
+extern const struct rtw_table rtw8822b_txpwr_lmt_type5_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
new file mode 100644
index 000000000000..b4f7242e5aa3
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -0,0 +1,1890 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "tx.h"
+#include "rx.h"
+#include "phy.h"
+#include "rtw8822c.h"
+#include "rtw8822c_table.h"
+#include "mac.h"
+#include "reg.h"
+#include "debug.h"
+
+static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path);
+
+static void rtw8822ce_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8822c_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+}
+
+static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8822c_efuse *map;
+ int i;
+
+ map = (struct rtw8822c_efuse *)log_map;
+
+ efuse->rfe_option = map->rfe_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8822ce_efuse_parsing(efuse, map);
+ break;
+ default:
+ /* unsupported now */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtw8822c_header_file_init(struct rtw_dev *rtwdev, bool pre)
+{
+ rtw_write32_set(rtwdev, REG_3WIRE, BIT_3WIRE_TX_EN | BIT_3WIRE_RX_EN);
+ rtw_write32_set(rtwdev, REG_3WIRE, BIT_3WIRE_PI_ON);
+ rtw_write32_set(rtwdev, REG_3WIRE2, BIT_3WIRE_TX_EN | BIT_3WIRE_RX_EN);
+ rtw_write32_set(rtwdev, REG_3WIRE2, BIT_3WIRE_PI_ON);
+
+ if (pre)
+ rtw_write32_clr(rtwdev, REG_ENCCK, BIT_CCK_OFDM_BLK_EN);
+ else
+ rtw_write32_set(rtwdev, REG_ENCCK, BIT_CCK_OFDM_BLK_EN);
+}
+
+static void rtw8822c_dac_backup_reg(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *backup,
+ struct rtw_backup_info *backup_rf)
+{
+ u32 path, i;
+ u32 val;
+ u32 reg;
+ u32 rf_addr[DACK_RF_8822C] = {0x8f};
+ u32 addrs[DACK_REG_8822C] = {0x180c, 0x1810, 0x410c, 0x4110,
+ 0x1c3c, 0x1c24, 0x1d70, 0x9b4,
+ 0x1a00, 0x1a14, 0x1d58, 0x1c38,
+ 0x1e24, 0x1e28, 0x1860, 0x4160};
+
+ for (i = 0; i < DACK_REG_8822C; i++) {
+ backup[i].len = 4;
+ backup[i].reg = addrs[i];
+ backup[i].val = rtw_read32(rtwdev, addrs[i]);
+ }
+
+ for (path = 0; path < DACK_PATH_8822C; path++) {
+ for (i = 0; i < DACK_RF_8822C; i++) {
+ reg = rf_addr[i];
+ val = rtw_read_rf(rtwdev, path, reg, RFREG_MASK);
+ backup_rf[path * i + i].reg = reg;
+ backup_rf[path * i + i].val = val;
+ }
+ }
+}
+
+static void rtw8822c_dac_restore_reg(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *backup,
+ struct rtw_backup_info *backup_rf)
+{
+ u32 path, i;
+ u32 val;
+ u32 reg;
+
+ rtw_restore_reg(rtwdev, backup, DACK_REG_8822C);
+
+ for (path = 0; path < DACK_PATH_8822C; path++) {
+ for (i = 0; i < DACK_RF_8822C; i++) {
+ val = backup_rf[path * i + i].val;
+ reg = backup_rf[path * i + i].reg;
+ rtw_write_rf(rtwdev, path, reg, RFREG_MASK, val);
+ }
+ }
+}
+
+static void rtw8822c_rf_minmax_cmp(struct rtw_dev *rtwdev, u32 value,
+ u32 *min, u32 *max)
+{
+ if (value >= 0x200) {
+ if (*min >= 0x200) {
+ if (*min > value)
+ *min = value;
+ } else {
+ *min = value;
+ }
+ if (*max >= 0x200) {
+ if (*max < value)
+ *max = value;
+ }
+ } else {
+ if (*min < 0x200) {
+ if (*min > value)
+ *min = value;
+ }
+
+ if (*max >= 0x200) {
+ *max = value;
+ } else {
+ if (*max < value)
+ *max = value;
+ }
+ }
+}
+
+static void swap_u32(u32 *v1, u32 *v2)
+{
+ u32 tmp;
+
+ tmp = *v1;
+ *v1 = *v2;
+ *v2 = tmp;
+}
+
+static void __rtw8822c_dac_iq_sort(struct rtw_dev *rtwdev, u32 *v1, u32 *v2)
+{
+ if (*v1 >= 0x200 && *v2 >= 0x200) {
+ if (*v1 > *v2)
+ swap_u32(v1, v2);
+ } else if (*v1 < 0x200 && *v2 < 0x200) {
+ if (*v1 > *v2)
+ swap_u32(v1, v2);
+ } else if (*v1 < 0x200 && *v2 >= 0x200) {
+ swap_u32(v1, v2);
+ }
+}
+
+static void rtw8822c_dac_iq_sort(struct rtw_dev *rtwdev, u32 *iv, u32 *qv)
+{
+ u32 i, j;
+
+ for (i = 0; i < DACK_SN_8822C - 1; i++) {
+ for (j = 0; j < (DACK_SN_8822C - 1 - i) ; j++) {
+ __rtw8822c_dac_iq_sort(rtwdev, &iv[j], &iv[j + 1]);
+ __rtw8822c_dac_iq_sort(rtwdev, &qv[j], &qv[j + 1]);
+ }
+ }
+}
+
+static void rtw8822c_dac_iq_offset(struct rtw_dev *rtwdev, u32 *vec, u32 *val)
+{
+ u32 p, m, t, i;
+
+ m = 0;
+ p = 0;
+ for (i = 10; i < DACK_SN_8822C - 10; i++) {
+ if (vec[i] > 0x200)
+ m = (0x400 - vec[i]) + m;
+ else
+ p = vec[i] + p;
+ }
+
+ if (p > m) {
+ t = p - m;
+ t = t / (DACK_SN_8822C - 20);
+ } else {
+ t = m - p;
+ t = t / (DACK_SN_8822C - 20);
+ if (t != 0x0)
+ t = 0x400 - t;
+ }
+
+ *val = t;
+}
+
+static u32 rtw8822c_get_path_base_addr(u8 path)
+{
+ u32 base_addr;
+
+ switch (path) {
+ case RF_PATH_A:
+ base_addr = 0x1800;
+ break;
+ case RF_PATH_B:
+ base_addr = 0x4100;
+ break;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+
+ return base_addr;
+}
+
+static bool rtw8822c_dac_iq_check(struct rtw_dev *rtwdev, u32 value)
+{
+ bool ret = true;
+
+ if ((value >= 0x200 && (0x400 - value) > 0x64) ||
+ (value < 0x200 && value > 0x64)) {
+ ret = false;
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] Error overflow\n");
+ }
+
+ return ret;
+}
+
+static void rtw8822c_dac_cal_iq_sample(struct rtw_dev *rtwdev, u32 *iv, u32 *qv)
+{
+ u32 temp;
+ int i = 0, cnt = 0;
+
+ while (i < DACK_SN_8822C && cnt < 10000) {
+ cnt++;
+ temp = rtw_read32_mask(rtwdev, 0x2dbc, 0x3fffff);
+ iv[i] = (temp & 0x3ff000) >> 12;
+ qv[i] = temp & 0x3ff;
+
+ if (rtw8822c_dac_iq_check(rtwdev, iv[i]) &&
+ rtw8822c_dac_iq_check(rtwdev, qv[i]))
+ i++;
+ }
+}
+
+static void rtw8822c_dac_cal_iq_search(struct rtw_dev *rtwdev,
+ u32 *iv, u32 *qv,
+ u32 *i_value, u32 *q_value)
+{
+ u32 i_max = 0, q_max = 0, i_min = 0, q_min = 0;
+ u32 i_delta, q_delta;
+ u32 temp;
+ int i, cnt = 0;
+
+ do {
+ i_min = iv[0];
+ i_max = iv[0];
+ q_min = qv[0];
+ q_max = qv[0];
+ for (i = 0; i < DACK_SN_8822C; i++) {
+ rtw8822c_rf_minmax_cmp(rtwdev, iv[i], &i_min, &i_max);
+ rtw8822c_rf_minmax_cmp(rtwdev, qv[i], &q_min, &q_max);
+ }
+
+ if (i_max < 0x200 && i_min < 0x200)
+ i_delta = i_max - i_min;
+ else if (i_max >= 0x200 && i_min >= 0x200)
+ i_delta = i_max - i_min;
+ else
+ i_delta = i_max + (0x400 - i_min);
+
+ if (q_max < 0x200 && q_min < 0x200)
+ q_delta = q_max - q_min;
+ else if (q_max >= 0x200 && q_min >= 0x200)
+ q_delta = q_max - q_min;
+ else
+ q_delta = q_max + (0x400 - q_min);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] i: min=0x%08x, max=0x%08x, delta=0x%08x\n",
+ i_min, i_max, i_delta);
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] q: min=0x%08x, max=0x%08x, delta=0x%08x\n",
+ q_min, q_max, q_delta);
+
+ rtw8822c_dac_iq_sort(rtwdev, iv, qv);
+
+ if (i_delta > 5 || q_delta > 5) {
+ temp = rtw_read32_mask(rtwdev, 0x2dbc, 0x3fffff);
+ iv[0] = (temp & 0x3ff000) >> 12;
+ qv[0] = temp & 0x3ff;
+ temp = rtw_read32_mask(rtwdev, 0x2dbc, 0x3fffff);
+ iv[DACK_SN_8822C - 1] = (temp & 0x3ff000) >> 12;
+ qv[DACK_SN_8822C - 1] = temp & 0x3ff;
+ } else {
+ break;
+ }
+ } while (cnt++ < 100);
+
+ rtw8822c_dac_iq_offset(rtwdev, iv, i_value);
+ rtw8822c_dac_iq_offset(rtwdev, qv, q_value);
+}
+
+static void rtw8822c_dac_cal_rf_mode(struct rtw_dev *rtwdev,
+ u32 *i_value, u32 *q_value)
+{
+ u32 iv[DACK_SN_8822C], qv[DACK_SN_8822C];
+ u32 rf_a, rf_b;
+
+ mdelay(10);
+
+ rf_a = rtw_read_rf(rtwdev, RF_PATH_A, 0x0, RFREG_MASK);
+ rf_b = rtw_read_rf(rtwdev, RF_PATH_B, 0x0, RFREG_MASK);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] RF path-A=0x%05x\n", rf_a);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] RF path-B=0x%05x\n", rf_b);
+
+ rtw8822c_dac_cal_iq_sample(rtwdev, iv, qv);
+ rtw8822c_dac_cal_iq_search(rtwdev, iv, qv, i_value, q_value);
+}
+
+static void rtw8822c_dac_bb_setting(struct rtw_dev *rtwdev)
+{
+ rtw_write32_mask(rtwdev, 0x1d58, 0xff8, 0x1ff);
+ rtw_write32_mask(rtwdev, 0x1a00, 0x3, 0x2);
+ rtw_write32_mask(rtwdev, 0x1a14, 0x300, 0x3);
+ rtw_write32(rtwdev, 0x1d70, 0x7e7e7e7e);
+ rtw_write32_mask(rtwdev, 0x180c, 0x3, 0x0);
+ rtw_write32_mask(rtwdev, 0x410c, 0x3, 0x0);
+ rtw_write32(rtwdev, 0x1b00, 0x00000008);
+ rtw_write8(rtwdev, 0x1bcc, 0x3f);
+ rtw_write32(rtwdev, 0x1b00, 0x0000000a);
+ rtw_write8(rtwdev, 0x1bcc, 0x3f);
+ rtw_write32_mask(rtwdev, 0x1e24, BIT(31), 0x0);
+ rtw_write32_mask(rtwdev, 0x1e28, 0xf, 0x3);
+}
+
+static void rtw8822c_dac_cal_adc(struct rtw_dev *rtwdev,
+ u8 path, u32 *adc_ic, u32 *adc_qc)
+{
+ u32 ic = 0, qc = 0, temp = 0;
+ u32 base_addr;
+ u32 path_sel;
+ int i;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK path(%d)\n", path);
+
+ base_addr = rtw8822c_get_path_base_addr(path);
+ switch (path) {
+ case RF_PATH_A:
+ path_sel = 0xa0000;
+ break;
+ case RF_PATH_B:
+ path_sel = 0x80000;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ /* ADCK step1 */
+ rtw_write32_mask(rtwdev, base_addr + 0x30, BIT(30), 0x0);
+ if (path == RF_PATH_B)
+ rtw_write32(rtwdev, base_addr + 0x30, 0x30db8041);
+ rtw_write32(rtwdev, base_addr + 0x60, 0xf0040ff0);
+ rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02dd08c4);
+ rtw_write32(rtwdev, base_addr + 0x0c, 0x10000260);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x0, RFREG_MASK, 0x10000);
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x0, RFREG_MASK, 0x10000);
+ for (i = 0; i < 10; i++) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK count=%d\n", i);
+ rtw_write32(rtwdev, 0x1c3c, path_sel + 0x8003);
+ rtw_write32(rtwdev, 0x1c24, 0x00010002);
+ rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc);
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] before: i=0x%x, q=0x%x\n", ic, qc);
+
+ /* compensation value */
+ if (ic != 0x0) {
+ ic = 0x400 - ic;
+ *adc_ic = ic;
+ }
+ if (qc != 0x0) {
+ qc = 0x400 - qc;
+ *adc_qc = qc;
+ }
+ temp = (ic & 0x3ff) | ((qc & 0x3ff) << 10);
+ rtw_write32(rtwdev, base_addr + 0x68, temp);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK 0x%08x=0x08%x\n",
+ base_addr + 0x68, temp);
+ /* check ADC DC offset */
+ rtw_write32(rtwdev, 0x1c3c, path_sel + 0x8103);
+ rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc);
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] after: i=0x%08x, q=0x%08x\n", ic, qc);
+ if (ic >= 0x200)
+ ic = 0x400 - ic;
+ if (qc >= 0x200)
+ qc = 0x400 - qc;
+ if (ic < 5 && qc < 5)
+ break;
+ }
+
+ /* ADCK step2 */
+ rtw_write32(rtwdev, 0x1c3c, 0x00000003);
+ rtw_write32(rtwdev, base_addr + 0x0c, 0x10000260);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c4);
+
+ /* release pull low switch on IQ path */
+ rtw_write_rf(rtwdev, path, 0x8f, BIT(13), 0x1);
+}
+
+static void rtw8822c_dac_cal_step1(struct rtw_dev *rtwdev, u8 path)
+{
+ u32 base_addr;
+
+ base_addr = rtw8822c_get_path_base_addr(path);
+
+ rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
+ if (path == RF_PATH_A) {
+ rtw_write32(rtwdev, base_addr + 0x60, 0xf0040ff0);
+ rtw_write32(rtwdev, 0x1c38, 0xffffffff);
+ }
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, 0x9b4, 0xdb66db00);
+ rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb88);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff81);
+ rtw_write32(rtwdev, base_addr + 0xc0, 0x0003d208);
+ rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb88);
+ rtw_write32(rtwdev, base_addr + 0xd8, 0x0008ff81);
+ rtw_write32(rtwdev, base_addr + 0xdc, 0x0003d208);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x60000000);
+ mdelay(2);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0x000aff8d);
+ mdelay(2);
+ rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb89);
+ rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb89);
+ mdelay(1);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x62000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xd4, 0x62000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x02000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff87);
+ rtw_write32(rtwdev, 0x9b4, 0xdb6db600);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff87);
+ rtw_write32(rtwdev, base_addr + 0x60, 0xf0000000);
+}
+
+static void rtw8822c_dac_cal_step2(struct rtw_dev *rtwdev,
+ u8 path, u32 *ic_out, u32 *qc_out)
+{
+ u32 base_addr;
+ u32 ic, qc, ic_in, qc_in;
+
+ base_addr = rtw8822c_get_path_base_addr(path);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xf0000000, 0x0);
+ rtw_write32_mask(rtwdev, base_addr + 0xc0, 0xf, 0x8);
+ rtw_write32_mask(rtwdev, base_addr + 0xd8, 0xf0000000, 0x0);
+ rtw_write32_mask(rtwdev, base_addr + 0xdc, 0xf, 0x8);
+
+ rtw_write32(rtwdev, 0x1b00, 0x00000008);
+ rtw_write8(rtwdev, 0x1bcc, 0x03f);
+ rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, 0x1c3c, 0x00088103);
+
+ rtw8822c_dac_cal_rf_mode(rtwdev, &ic_in, &qc_in);
+ ic = ic_in;
+ qc = qc_in;
+
+ /* compensation value */
+ if (ic != 0x0)
+ ic = 0x400 - ic;
+ if (qc != 0x0)
+ qc = 0x400 - qc;
+ if (ic < 0x300) {
+ ic = ic * 2 * 6 / 5;
+ ic = ic + 0x80;
+ } else {
+ ic = (0x400 - ic) * 2 * 6 / 5;
+ ic = 0x7f - ic;
+ }
+ if (qc < 0x300) {
+ qc = qc * 2 * 6 / 5;
+ qc = qc + 0x80;
+ } else {
+ qc = (0x400 - qc) * 2 * 6 / 5;
+ qc = 0x7f - qc;
+ }
+
+ *ic_out = ic;
+ *qc_out = qc;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] before i=0x%x, q=0x%x\n", ic_in, qc_in);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] after i=0x%x, q=0x%x\n", ic, qc);
+}
+
+static void rtw8822c_dac_cal_step3(struct rtw_dev *rtwdev, u8 path,
+ u32 adc_ic, u32 adc_qc,
+ u32 *ic_in, u32 *qc_in,
+ u32 *i_out, u32 *q_out)
+{
+ u32 base_addr;
+ u32 ic, qc;
+ u32 temp;
+
+ base_addr = rtw8822c_get_path_base_addr(path);
+ ic = *ic_in;
+ qc = *qc_in;
+
+ rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, 0x9b4, 0xdb66db00);
+ rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb88);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0xc008ff81);
+ rtw_write32(rtwdev, base_addr + 0xc0, 0x0003d208);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xf0000000, ic & 0xf);
+ rtw_write32_mask(rtwdev, base_addr + 0xc0, 0xf, (ic & 0xf0) >> 4);
+ rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb88);
+ rtw_write32(rtwdev, base_addr + 0xd8, 0xe008ff81);
+ rtw_write32(rtwdev, base_addr + 0xdc, 0x0003d208);
+ rtw_write32_mask(rtwdev, base_addr + 0xd8, 0xf0000000, qc & 0xf);
+ rtw_write32_mask(rtwdev, base_addr + 0xdc, 0xf, (qc & 0xf0) >> 4);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x60000000);
+ mdelay(2);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xe, 0x6);
+ mdelay(2);
+ rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb89);
+ rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb89);
+ mdelay(1);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x62000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xd4, 0x62000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x02000000);
+ mdelay(20);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xe, 0x3);
+ rtw_write32(rtwdev, 0x9b4, 0xdb6db600);
+
+ /* check DAC DC offset */
+ temp = ((adc_ic + 0x10) & 0x3ff) | (((adc_qc + 0x10) & 0x3ff) << 10);
+ rtw_write32(rtwdev, base_addr + 0x68, temp);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, base_addr + 0x60, 0xf0000000);
+ rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc);
+ if (ic >= 0x10)
+ ic = ic - 0x10;
+ else
+ ic = 0x400 - (0x10 - ic);
+
+ if (qc >= 0x10)
+ qc = qc - 0x10;
+ else
+ qc = 0x400 - (0x10 - qc);
+
+ *i_out = ic;
+ *q_out = qc;
+
+ if (ic >= 0x200)
+ ic = 0x400 - ic;
+ if (qc >= 0x200)
+ qc = 0x400 - qc;
+
+ *ic_in = ic;
+ *qc_in = qc;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] after DACK i=0x%x, q=0x%x\n", *i_out, *q_out);
+}
+
+static void rtw8822c_dac_cal_step4(struct rtw_dev *rtwdev, u8 path)
+{
+ u32 base_addr = rtw8822c_get_path_base_addr(path);
+
+ rtw_write32(rtwdev, base_addr + 0x68, 0x0);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c4);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0x1, 0x0);
+ rtw_write32_mask(rtwdev, base_addr + 0x30, BIT(30), 0x1);
+}
+
+static void rtw8822c_rf_dac_cal(struct rtw_dev *rtwdev)
+{
+ struct rtw_backup_info backup_rf[DACK_RF_8822C * DACK_PATH_8822C];
+ struct rtw_backup_info backup[DACK_REG_8822C];
+ u32 ic = 0, qc = 0, i;
+ u32 i_a = 0x0, q_a = 0x0, i_b = 0x0, q_b = 0x0;
+ u32 ic_a = 0x0, qc_a = 0x0, ic_b = 0x0, qc_b = 0x0;
+ u32 adc_ic_a = 0x0, adc_qc_a = 0x0, adc_ic_b = 0x0, adc_qc_b = 0x0;
+
+ rtw8822c_dac_backup_reg(rtwdev, backup, backup_rf);
+
+ rtw8822c_dac_bb_setting(rtwdev);
+
+ /* path-A */
+ rtw8822c_dac_cal_adc(rtwdev, RF_PATH_A, &adc_ic_a, &adc_qc_a);
+ for (i = 0; i < 10; i++) {
+ rtw8822c_dac_cal_step1(rtwdev, RF_PATH_A);
+ rtw8822c_dac_cal_step2(rtwdev, RF_PATH_A, &ic, &qc);
+ ic_a = ic;
+ qc_a = qc;
+
+ rtw8822c_dac_cal_step3(rtwdev, RF_PATH_A, adc_ic_a, adc_qc_a,
+ &ic, &qc, &i_a, &q_a);
+
+ if (ic < 5 && qc < 5)
+ break;
+ }
+ rtw8822c_dac_cal_step4(rtwdev, RF_PATH_A);
+
+ /* path-B */
+ rtw8822c_dac_cal_adc(rtwdev, RF_PATH_B, &adc_ic_b, &adc_qc_b);
+ for (i = 0; i < 10; i++) {
+ rtw8822c_dac_cal_step1(rtwdev, RF_PATH_B);
+ rtw8822c_dac_cal_step2(rtwdev, RF_PATH_B, &ic, &qc);
+ ic_b = ic;
+ qc_b = qc;
+
+ rtw8822c_dac_cal_step3(rtwdev, RF_PATH_B, adc_ic_b, adc_qc_b,
+ &ic, &qc, &i_b, &q_b);
+
+ if (ic < 5 && qc < 5)
+ break;
+ }
+ rtw8822c_dac_cal_step4(rtwdev, RF_PATH_B);
+
+ rtw_write32(rtwdev, 0x1b00, 0x00000008);
+ rtw_write32_mask(rtwdev, 0x4130, BIT(30), 0x1);
+ rtw_write8(rtwdev, 0x1bcc, 0x0);
+ rtw_write32(rtwdev, 0x1b00, 0x0000000a);
+ rtw_write8(rtwdev, 0x1bcc, 0x0);
+
+ rtw8822c_dac_restore_reg(rtwdev, backup, backup_rf);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path A: ic=0x%x, qc=0x%x\n", ic_a, qc_a);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path B: ic=0x%x, qc=0x%x\n", ic_b, qc_b);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path A: i=0x%x, q=0x%x\n", i_a, q_a);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path B: i=0x%x, q=0x%x\n", i_b, q_b);
+}
+
+static void rtw8822c_rf_x2_check(struct rtw_dev *rtwdev)
+{
+ u8 x2k_busy;
+
+ mdelay(1);
+ x2k_busy = rtw_read_rf(rtwdev, RF_PATH_A, 0xb8, BIT(15));
+ if (x2k_busy == 1) {
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xb8, RFREG_MASK, 0xC4440);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xba, RFREG_MASK, 0x6840D);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xb8, RFREG_MASK, 0x80440);
+ mdelay(1);
+ }
+}
+
+static void rtw8822c_rf_init(struct rtw_dev *rtwdev)
+{
+ rtw8822c_rf_dac_cal(rtwdev);
+ rtw8822c_rf_x2_check(rtwdev);
+}
+
+static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 crystal_cap;
+ u8 cck_gi_u_bnd_msb = 0;
+ u8 cck_gi_u_bnd_lsb = 0;
+ u8 cck_gi_l_bnd_msb = 0;
+ u8 cck_gi_l_bnd_lsb = 0;
+ bool is_tx2_path;
+
+ /* power on BB/RF domain */
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN,
+ BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB);
+ rtw_write8_set(rtwdev, REG_RF_CTRL,
+ BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+ rtw_write32_set(rtwdev, REG_WLRF1, BIT_WLRF1_BBRF_EN);
+
+ /* pre init before header files config */
+ rtw8822c_header_file_init(rtwdev, true);
+
+ rtw_phy_load_tables(rtwdev);
+
+ crystal_cap = rtwdev->efuse.crystal_cap & 0x7f;
+ rtw_write32_mask(rtwdev, REG_ANAPAR_XTAL_0, 0xfffc00,
+ crystal_cap | (crystal_cap << 7));
+
+ /* post init after header files config */
+ rtw8822c_header_file_init(rtwdev, false);
+
+ is_tx2_path = false;
+ rtw8822c_config_trx_mode(rtwdev, hal->antenna_tx, hal->antenna_rx,
+ is_tx2_path);
+ rtw_phy_init(rtwdev);
+
+ cck_gi_u_bnd_msb = (u8)rtw_read32_mask(rtwdev, 0x1a98, 0xc000);
+ cck_gi_u_bnd_lsb = (u8)rtw_read32_mask(rtwdev, 0x1aa8, 0xf0000);
+ cck_gi_l_bnd_msb = (u8)rtw_read32_mask(rtwdev, 0x1a98, 0xc0);
+ cck_gi_l_bnd_lsb = (u8)rtw_read32_mask(rtwdev, 0x1a70, 0x0f000000);
+
+ dm_info->cck_gi_u_bnd = ((cck_gi_u_bnd_msb << 4) | (cck_gi_u_bnd_lsb));
+ dm_info->cck_gi_l_bnd = ((cck_gi_l_bnd_msb << 4) | (cck_gi_l_bnd_lsb));
+
+ rtw8822c_rf_init(rtwdev);
+ /* wifi path controller */
+ rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
+ rtw_write32_mask(rtwdev, 0x1704, 0xffffffff, 0x7700);
+ rtw_write32_mask(rtwdev, 0x1700, 0xffffffff, 0xc00f0038);
+ rtw_write32_mask(rtwdev, 0x6c0, 0xffffffff, 0xaaaaaaaa);
+ rtw_write32_mask(rtwdev, 0x6c4, 0xffffffff, 0xaaaaaaaa);
+}
+
+#define WLAN_TXQ_RPT_EN 0x1F
+#define WLAN_SLOT_TIME 0x09
+#define WLAN_PIFS_TIME 0x1C
+#define WLAN_SIFS_CCK_CONT_TX 0x0A
+#define WLAN_SIFS_OFDM_CONT_TX 0x0E
+#define WLAN_SIFS_CCK_TRX 0x0A
+#define WLAN_SIFS_OFDM_TRX 0x10
+#define WLAN_NAV_MAX 0xC8
+#define WLAN_RDG_NAV 0x05
+#define WLAN_TXOP_NAV 0x1B
+#define WLAN_CCK_RX_TSF 0x30
+#define WLAN_OFDM_RX_TSF 0x30
+#define WLAN_TBTT_PROHIBIT 0x04 /* unit : 32us */
+#define WLAN_TBTT_HOLD_TIME 0x064 /* unit : 32us */
+#define WLAN_DRV_EARLY_INT 0x04
+#define WLAN_BCN_CTRL_CLT0 0x10
+#define WLAN_BCN_DMA_TIME 0x02
+#define WLAN_BCN_MAX_ERR 0xFF
+#define WLAN_SIFS_CCK_DUR_TUNE 0x0A
+#define WLAN_SIFS_OFDM_DUR_TUNE 0x10
+#define WLAN_SIFS_CCK_CTX 0x0A
+#define WLAN_SIFS_CCK_IRX 0x0A
+#define WLAN_SIFS_OFDM_CTX 0x0E
+#define WLAN_SIFS_OFDM_IRX 0x0E
+#define WLAN_EIFS_DUR_TUNE 0x40
+#define WLAN_EDCA_VO_PARAM 0x002FA226
+#define WLAN_EDCA_VI_PARAM 0x005EA328
+#define WLAN_EDCA_BE_PARAM 0x005EA42B
+#define WLAN_EDCA_BK_PARAM 0x0000A44F
+
+#define WLAN_RX_FILTER0 0xFFFFFFFF
+#define WLAN_RX_FILTER2 0xFFFF
+#define WLAN_RCR_CFG 0xE400220E
+#define WLAN_RXPKT_MAX_SZ 12288
+#define WLAN_RXPKT_MAX_SZ_512 (WLAN_RXPKT_MAX_SZ >> 9)
+
+#define WLAN_AMPDU_MAX_TIME 0x70
+#define WLAN_RTS_LEN_TH 0xFF
+#define WLAN_RTS_TX_TIME_TH 0x08
+#define WLAN_MAX_AGG_PKT_LIMIT 0x20
+#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20
+#define WLAN_PRE_TXCNT_TIME_TH 0x1E0
+#define FAST_EDCA_VO_TH 0x06
+#define FAST_EDCA_VI_TH 0x06
+#define FAST_EDCA_BE_TH 0x06
+#define FAST_EDCA_BK_TH 0x06
+#define WLAN_BAR_RETRY_LIMIT 0x01
+#define WLAN_BAR_ACK_TYPE 0x05
+#define WLAN_RA_TRY_RATE_AGG_LIMIT 0x08
+#define WLAN_RESP_TXRATE 0x84
+#define WLAN_ACK_TO 0x21
+#define WLAN_ACK_TO_CCK 0x6A
+#define WLAN_DATA_RATE_FB_CNT_1_4 0x01000000
+#define WLAN_DATA_RATE_FB_CNT_5_8 0x08070504
+#define WLAN_RTS_RATE_FB_CNT_5_8 0x08070504
+#define WLAN_DATA_RATE_FB_RATE0 0xFE01F010
+#define WLAN_DATA_RATE_FB_RATE0_H 0x40000000
+#define WLAN_RTS_RATE_FB_RATE1 0x003FF010
+#define WLAN_RTS_RATE_FB_RATE1_H 0x40000000
+#define WLAN_RTS_RATE_FB_RATE4 0x0600F010
+#define WLAN_RTS_RATE_FB_RATE4_H 0x400003E0
+#define WLAN_RTS_RATE_FB_RATE5 0x0600F015
+#define WLAN_RTS_RATE_FB_RATE5_H 0x000000E0
+
+#define WLAN_TX_FUNC_CFG1 0x30
+#define WLAN_TX_FUNC_CFG2 0x30
+#define WLAN_MAC_OPT_NORM_FUNC1 0x98
+#define WLAN_MAC_OPT_LB_FUNC1 0x80
+#define WLAN_MAC_OPT_FUNC2 0x30810041
+
+#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \
+ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \
+ (WLAN_SIFS_CCK_TRX << BIT_SHIFT_SIFS_CCK_TRX) | \
+ (WLAN_SIFS_OFDM_TRX << BIT_SHIFT_SIFS_OFDM_TRX))
+
+#define WLAN_SIFS_DUR_TUNE (WLAN_SIFS_CCK_DUR_TUNE | \
+ (WLAN_SIFS_OFDM_DUR_TUNE << 8))
+
+#define WLAN_TBTT_TIME (WLAN_TBTT_PROHIBIT |\
+ (WLAN_TBTT_HOLD_TIME << BIT_SHIFT_TBTT_HOLD_TIME_AP))
+
+#define WLAN_NAV_CFG (WLAN_RDG_NAV | (WLAN_TXOP_NAV << 16))
+#define WLAN_RX_TSF_CFG (WLAN_CCK_RX_TSF | (WLAN_OFDM_RX_TSF) << 8)
+
+#define MAC_CLK_SPEED 80 /* 80M */
+#define EFUSE_PCB_INFO_OFFSET 0xCA
+
+static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
+{
+ u8 value8;
+ u16 value16;
+ u32 value32;
+ u16 pre_txcnt;
+
+ /* txq control */
+ value8 = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL);
+ value8 |= (BIT(7) & ~BIT(1) & ~BIT(2));
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL, value8);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
+ /* sifs control */
+ rtw_write16(rtwdev, REG_SPEC_SIFS, WLAN_SIFS_DUR_TUNE);
+ rtw_write32(rtwdev, REG_SIFS, WLAN_SIFS_CFG);
+ rtw_write16(rtwdev, REG_RESP_SIFS_CCK,
+ WLAN_SIFS_CCK_CTX | WLAN_SIFS_CCK_IRX << 8);
+ rtw_write16(rtwdev, REG_RESP_SIFS_OFDM,
+ WLAN_SIFS_OFDM_CTX | WLAN_SIFS_OFDM_IRX << 8);
+ /* rate fallback control */
+ rtw_write32(rtwdev, REG_DARFRC, WLAN_DATA_RATE_FB_CNT_1_4);
+ rtw_write32(rtwdev, REG_DARFRCH, WLAN_DATA_RATE_FB_CNT_5_8);
+ rtw_write32(rtwdev, REG_RARFRCH, WLAN_RTS_RATE_FB_CNT_5_8);
+ rtw_write32(rtwdev, REG_ARFR0, WLAN_DATA_RATE_FB_RATE0);
+ rtw_write32(rtwdev, REG_ARFRH0, WLAN_DATA_RATE_FB_RATE0_H);
+ rtw_write32(rtwdev, REG_ARFR1_V1, WLAN_RTS_RATE_FB_RATE1);
+ rtw_write32(rtwdev, REG_ARFRH1_V1, WLAN_RTS_RATE_FB_RATE1_H);
+ rtw_write32(rtwdev, REG_ARFR4, WLAN_RTS_RATE_FB_RATE4);
+ rtw_write32(rtwdev, REG_ARFRH4, WLAN_RTS_RATE_FB_RATE4_H);
+ rtw_write32(rtwdev, REG_ARFR5, WLAN_RTS_RATE_FB_RATE5);
+ rtw_write32(rtwdev, REG_ARFRH5, WLAN_RTS_RATE_FB_RATE5_H);
+ /* protocol configuration */
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, WLAN_AMPDU_MAX_TIME);
+ rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_EOF_V1);
+ pre_txcnt = WLAN_PRE_TXCNT_TIME_TH | BIT_EN_PRECNT;
+ rtw_write8(rtwdev, REG_PRECNT_CTRL, (u8)(pre_txcnt & 0xFF));
+ rtw_write8(rtwdev, REG_PRECNT_CTRL + 1, (u8)(pre_txcnt >> 8));
+ value32 = WLAN_RTS_LEN_TH | (WLAN_RTS_TX_TIME_TH << 8) |
+ (WLAN_MAX_AGG_PKT_LIMIT << 16) |
+ (WLAN_RTS_MAX_AGG_PKT_LIMIT << 24);
+ rtw_write32(rtwdev, REG_PROT_MODE_CTRL, value32);
+ rtw_write16(rtwdev, REG_BAR_MODE_CTRL + 2,
+ WLAN_BAR_RETRY_LIMIT | WLAN_RA_TRY_RATE_AGG_LIMIT << 8);
+ rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING, FAST_EDCA_VO_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING + 2, FAST_EDCA_VI_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING, FAST_EDCA_BE_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING + 2, FAST_EDCA_BK_TH);
+ /* close BA parser */
+ rtw_write8_clr(rtwdev, REG_LIFETIME_EN, BIT_BA_PARSER_EN);
+ rtw_write32_clr(rtwdev, REG_RRSR, BITS_RRSR_RSC);
+
+ /* EDCA configuration */
+ rtw_write32(rtwdev, REG_EDCA_VO_PARAM, WLAN_EDCA_VO_PARAM);
+ rtw_write32(rtwdev, REG_EDCA_VI_PARAM, WLAN_EDCA_VI_PARAM);
+ rtw_write32(rtwdev, REG_EDCA_BE_PARAM, WLAN_EDCA_BE_PARAM);
+ rtw_write32(rtwdev, REG_EDCA_BK_PARAM, WLAN_EDCA_BK_PARAM);
+ rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_TIME);
+ rtw_write8_clr(rtwdev, REG_TX_PTCL_CTRL + 1, BIT_SIFS_BK_EN >> 8);
+ rtw_write8_set(rtwdev, REG_RD_CTRL + 1,
+ (BIT_DIS_TXOP_CFE | BIT_DIS_LSIG_CFE |
+ BIT_DIS_STBC_CFE) >> 8);
+
+ /* MAC clock configuration */
+ rtw_write32_clr(rtwdev, REG_AFE_CTRL1, BIT_MAC_CLK_SEL);
+ rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
+ rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
+
+ rtw_write8_set(rtwdev, REG_MISC_CTRL,
+ BIT_EN_FREE_CNT | BIT_DIS_SECOND_CCA);
+ rtw_write8_clr(rtwdev, REG_TIMER0_SRC_SEL, BIT_TSFT_SEL_TIMER0);
+ rtw_write16(rtwdev, REG_TXPAUSE, 0x0000);
+ rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
+ rtw_write32(rtwdev, REG_RD_NAV_NXT, WLAN_NAV_CFG);
+ rtw_write16(rtwdev, REG_RXTSF_OFFSET_CCK, WLAN_RX_TSF_CFG);
+ /* Set beacon cotnrol - enable TSF and other related functions */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+ /* Set send beacon related registers */
+ rtw_write32(rtwdev, REG_TBTT_PROHIBIT, WLAN_TBTT_TIME);
+ rtw_write8(rtwdev, REG_DRVERLYINT, WLAN_DRV_EARLY_INT);
+ rtw_write8(rtwdev, REG_BCN_CTRL_CLINT0, WLAN_BCN_CTRL_CLT0);
+ rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME);
+ rtw_write8(rtwdev, REG_BCN_MAX_ERR, WLAN_BCN_MAX_ERR);
+
+ /* WMAC configuration */
+ rtw_write8(rtwdev, REG_BBPSF_CTRL + 2, WLAN_RESP_TXRATE);
+ rtw_write8(rtwdev, REG_ACKTO, WLAN_ACK_TO);
+ rtw_write8(rtwdev, REG_ACKTO_CCK, WLAN_ACK_TO_CCK);
+ rtw_write16(rtwdev, REG_EIFS, WLAN_EIFS_DUR_TUNE);
+ rtw_write8(rtwdev, REG_NAV_CTRL + 2, WLAN_NAV_MAX);
+ rtw_write8(rtwdev, REG_WMAC_TRXPTCL_CTL_H + 2, WLAN_BAR_ACK_TYPE);
+ rtw_write32(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
+ rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
+ rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RXPKT_MAX_SZ_512);
+ rtw_write8(rtwdev, REG_TCR + 2, WLAN_TX_FUNC_CFG2);
+ rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1);
+ rtw_write32_set(rtwdev, REG_GENERAL_OPTION, BIT_DUMMY_FCS_READY_MASK_EN);
+ rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2);
+ rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION_1, WLAN_MAC_OPT_NORM_FUNC1);
+
+ /* init low power */
+ value16 = rtw_read16(rtwdev, REG_RXPSF_CTRL + 2) & 0xF00F;
+ value16 |= (BIT_RXGCK_VHT_FIFOTHR(1) | BIT_RXGCK_HT_FIFOTHR(1) |
+ BIT_RXGCK_OFDM_FIFOTHR(1) | BIT_RXGCK_CCK_FIFOTHR(1)) >> 16;
+ rtw_write16(rtwdev, REG_RXPSF_CTRL + 2, value16);
+ value16 = 0;
+ value16 = BIT_SET_RXPSF_PKTLENTHR(value16, 1);
+ value16 |= BIT_RXPSF_CTRLEN | BIT_RXPSF_VHTCHKEN | BIT_RXPSF_HTCHKEN
+ | BIT_RXPSF_OFDMCHKEN | BIT_RXPSF_CCKCHKEN
+ | BIT_RXPSF_OFDMRST;
+ rtw_write16(rtwdev, REG_RXPSF_CTRL, value16);
+ rtw_write32(rtwdev, REG_RXPSF_TYPE_CTRL, 0xFFFFFFFF);
+ /* rx ignore configuration */
+ value16 = rtw_read16(rtwdev, REG_RXPSF_CTRL);
+ value16 &= ~(BIT_RXPSF_MHCHKEN | BIT_RXPSF_CCKRST |
+ BIT_RXPSF_CONT_ERRCHKEN);
+ value16 = BIT_SET_RXPSF_ERRTHR(value16, 0x07);
+ rtw_write16(rtwdev, REG_RXPSF_CTRL, value16);
+
+ return 0;
+}
+
+static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
+#define RF18_BAND_2G (0)
+#define RF18_BAND_5G (BIT(16) | BIT(8))
+#define RF18_CHANNEL_MASK (MASKBYTE0)
+#define RF18_RFSI_MASK (BIT(18) | BIT(17))
+#define RF18_RFSI_GE_CH80 (BIT(17))
+#define RF18_RFSI_GT_CH140 (BIT(18))
+#define RF18_BW_MASK (BIT(13) | BIT(12))
+#define RF18_BW_20M (BIT(13) | BIT(12))
+#define RF18_BW_40M (BIT(13))
+#define RF18_BW_80M (BIT(12))
+
+ u32 rf_reg18 = 0;
+ u32 rf_rxbb = 0;
+
+ rf_reg18 = rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK);
+
+ rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
+ RF18_BW_MASK);
+
+ rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (channel & RF18_CHANNEL_MASK);
+ if (channel > 144)
+ rf_reg18 |= RF18_RFSI_GT_CH140;
+ else if (channel >= 80)
+ rf_reg18 |= RF18_RFSI_GE_CH80;
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_5:
+ case RTW_CHANNEL_WIDTH_10:
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ rf_reg18 |= RF18_BW_20M;
+ rf_rxbb = 0x18;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ /* RF bandwidth */
+ rf_reg18 |= RF18_BW_40M;
+ rf_rxbb = 0x10;
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ rf_reg18 |= RF18_BW_80M;
+ rf_rxbb = 0x8;
+ break;
+ }
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, 0x04, 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, 0x1f, 0x12);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, 0xfffff, rf_rxbb);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, 0x04, 0x00);
+
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE2, 0x04, 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWA, 0x1f, 0x12);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWD0, 0xfffff, rf_rxbb);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE2, 0x04, 0x00);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_reg18);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_reg18);
+}
+
+static void rtw8822c_toggle_igi(struct rtw_dev *rtwdev)
+{
+ u32 igi;
+
+ igi = rtw_read32_mask(rtwdev, REG_RXIGI, 0x7f);
+ rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f, igi - 2);
+ rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f00, igi - 2);
+ rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f, igi);
+ rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f00, igi);
+}
+
+static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ if (channel <= 14) {
+ rtw_write32_clr(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
+ rtw_write32_set(rtwdev, REG_TXF4, BIT(20));
+ rtw_write32_clr(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
+ rtw_write32_clr(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
+ rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0xF);
+
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x0);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x0);
+ if (channel == 13 || channel == 14)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x969);
+ else if (channel == 11 || channel == 12)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x96a);
+ else
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x9aa);
+ if (channel == 14) {
+ rtw_write32_mask(rtwdev, REG_TXF0, MASKHWORD, 0x3da0);
+ rtw_write32_mask(rtwdev, REG_TXF1, MASKDWORD,
+ 0x4962c931);
+ rtw_write32_mask(rtwdev, REG_TXF2, MASKLWORD, 0x6aa3);
+ rtw_write32_mask(rtwdev, REG_TXF3, MASKHWORD, 0xaa7b);
+ rtw_write32_mask(rtwdev, REG_TXF4, MASKLWORD, 0xf3d7);
+ rtw_write32_mask(rtwdev, REG_TXF5, MASKDWORD, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXF6, MASKDWORD,
+ 0xff012455);
+ rtw_write32_mask(rtwdev, REG_TXF7, MASKDWORD, 0xffff);
+ } else {
+ rtw_write32_mask(rtwdev, REG_TXF0, MASKHWORD, 0x5284);
+ rtw_write32_mask(rtwdev, REG_TXF1, MASKDWORD,
+ 0x3e18fec8);
+ rtw_write32_mask(rtwdev, REG_TXF2, MASKLWORD, 0x0a88);
+ rtw_write32_mask(rtwdev, REG_TXF3, MASKHWORD, 0xacc4);
+ rtw_write32_mask(rtwdev, REG_TXF4, MASKLWORD, 0xc8b2);
+ rtw_write32_mask(rtwdev, REG_TXF5, MASKDWORD,
+ 0x00faf0de);
+ rtw_write32_mask(rtwdev, REG_TXF6, MASKDWORD,
+ 0x00122344);
+ rtw_write32_mask(rtwdev, REG_TXF7, MASKDWORD,
+ 0x0fffffff);
+ }
+ if (channel == 13)
+ rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
+ else
+ rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x1);
+ } else if (channel > 35) {
+ rtw_write32_set(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
+ rtw_write32_set(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
+ rtw_write32_set(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
+ rtw_write32_clr(rtwdev, REG_TXF4, BIT(20));
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x0);
+ rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0x22);
+ rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
+ if (channel >= 36 && channel <= 64) {
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x1);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x1);
+ } else if (channel >= 100 && channel <= 144) {
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x2);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x2);
+ } else if (channel >= 149) {
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x3);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x3);
+ }
+
+ if (channel >= 36 && channel <= 51)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x494);
+ else if (channel >= 52 && channel <= 55)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x493);
+ else if (channel >= 56 && channel <= 111)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x453);
+ else if (channel >= 112 && channel <= 119)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x452);
+ else if (channel >= 120 && channel <= 172)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x412);
+ else if (channel >= 173 && channel <= 177)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x411);
+ }
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x19B);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x7);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x6);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rtw_write32_mask(rtwdev, REG_CCKSB, BIT(4),
+ (primary_ch_idx == 1 ? 1 : 0));
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x5);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
+ (primary_ch_idx | (primary_ch_idx << 4)));
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0xa);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
+ (primary_ch_idx | (primary_ch_idx << 4)));
+ break;
+ case RTW_CHANNEL_WIDTH_5:
+ rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x2AB);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x1);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x4);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x4);
+ break;
+ case RTW_CHANNEL_WIDTH_10:
+ rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x2AB);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x2);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x6);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x5);
+ break;
+ }
+}
+
+static void rtw8822c_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_chan_idx)
+{
+ rtw8822c_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
+ rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
+ rtw8822c_set_channel_rf(rtwdev, channel, bw);
+ rtw8822c_toggle_igi(rtwdev);
+}
+
+static void rtw8822c_config_cck_rx_path(struct rtw_dev *rtwdev, u8 rx_path)
+{
+ if (rx_path == BB_PATH_A || rx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_CCANRX, 0x00060000, 0x0);
+ rtw_write32_mask(rtwdev, REG_CCANRX, 0x00600000, 0x0);
+ } else if (rx_path == BB_PATH_AB) {
+ rtw_write32_mask(rtwdev, REG_CCANRX, 0x00600000, 0x1);
+ rtw_write32_mask(rtwdev, REG_CCANRX, 0x00060000, 0x1);
+ }
+
+ if (rx_path == BB_PATH_A)
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0x0f000000, 0x0);
+ else if (rx_path == BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0x0f000000, 0x5);
+ else if (rx_path == BB_PATH_AB)
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0x0f000000, 0x1);
+}
+
+static void rtw8822c_config_ofdm_rx_path(struct rtw_dev *rtwdev, u8 rx_path)
+{
+ if (rx_path == BB_PATH_A || rx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x300, 0x0);
+ rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x600000, 0x0);
+ rtw_write32_mask(rtwdev, REG_AGCSWSH, BIT(17), 0x0);
+ rtw_write32_mask(rtwdev, REG_ANTWTPD, BIT(20), 0x0);
+ rtw_write32_mask(rtwdev, REG_MRCM, BIT(24), 0x0);
+ } else if (rx_path == BB_PATH_AB) {
+ rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x300, 0x1);
+ rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x600000, 0x1);
+ rtw_write32_mask(rtwdev, REG_AGCSWSH, BIT(17), 0x1);
+ rtw_write32_mask(rtwdev, REG_ANTWTPD, BIT(20), 0x1);
+ rtw_write32_mask(rtwdev, REG_MRCM, BIT(24), 0x1);
+ }
+
+ rtw_write32_mask(rtwdev, 0x824, 0x0f000000, rx_path);
+ rtw_write32_mask(rtwdev, 0x824, 0x000f0000, rx_path);
+}
+
+static void rtw8822c_config_rx_path(struct rtw_dev *rtwdev, u8 rx_path)
+{
+ rtw8822c_config_cck_rx_path(rtwdev, rx_path);
+ rtw8822c_config_ofdm_rx_path(rtwdev, rx_path);
+}
+
+static void rtw8822c_config_cck_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
+ bool is_tx2_path)
+{
+ if (tx_path == BB_PATH_A) {
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
+ } else if (tx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x4);
+ } else {
+ if (is_tx2_path)
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0xc);
+ else
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
+ }
+}
+
+static void rtw8822c_config_ofdm_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
+ bool is_tx2_path)
+{
+ if (tx_path == BB_PATH_A) {
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x11);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xff, 0x0);
+ } else if (tx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x12);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xff, 0x0);
+ } else {
+ if (is_tx2_path) {
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x33);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0404);
+ } else {
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x31);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0400);
+ }
+ }
+}
+
+static void rtw8822c_config_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
+ bool is_tx2_path)
+{
+ rtw8822c_config_cck_tx_path(rtwdev, tx_path, is_tx2_path);
+ rtw8822c_config_ofdm_tx_path(rtwdev, tx_path, is_tx2_path);
+}
+
+static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path)
+{
+ if ((tx_path | rx_path) & BB_PATH_A)
+ rtw_write32_mask(rtwdev, REG_ORITXCODE, MASK20BITS, 0x33312);
+ else
+ rtw_write32_mask(rtwdev, REG_ORITXCODE, MASK20BITS, 0x11111);
+ if ((tx_path | rx_path) & BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_ORITXCODE2, MASK20BITS, 0x33312);
+ else
+ rtw_write32_mask(rtwdev, REG_ORITXCODE2, MASK20BITS, 0x11111);
+
+ rtw8822c_config_rx_path(rtwdev, rx_path);
+ rtw8822c_config_tx_path(rtwdev, tx_path, is_tx2_path);
+
+ rtw8822c_toggle_igi(rtwdev);
+}
+
+static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 l_bnd, u_bnd;
+ u8 gain_a, gain_b;
+ s8 rx_power[RTW_RF_PATH_MAX];
+ s8 min_rx_power = -120;
+
+ rx_power[RF_PATH_A] = GET_PHY_STAT_P0_PWDB_A(phy_status);
+ rx_power[RF_PATH_B] = GET_PHY_STAT_P0_PWDB_B(phy_status);
+ l_bnd = dm_info->cck_gi_l_bnd;
+ u_bnd = dm_info->cck_gi_u_bnd;
+ gain_a = GET_PHY_STAT_P0_GAIN_A(phy_status);
+ gain_b = GET_PHY_STAT_P0_GAIN_B(phy_status);
+ if (gain_a < l_bnd)
+ rx_power[RF_PATH_A] += (l_bnd - gain_a) << 1;
+ else if (gain_a > u_bnd)
+ rx_power[RF_PATH_A] -= (gain_a - u_bnd) << 1;
+ if (gain_b < l_bnd)
+ rx_power[RF_PATH_A] += (l_bnd - gain_b) << 1;
+ else if (gain_b > u_bnd)
+ rx_power[RF_PATH_A] -= (gain_b - u_bnd) << 1;
+
+ rx_power[RF_PATH_A] -= 110;
+ rx_power[RF_PATH_B] -= 110;
+
+ pkt_stat->rx_power[RF_PATH_A] = max3(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B],
+ min_rx_power);
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
+ pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+ min_rx_power);
+}
+
+static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 rxsc, bw;
+ s8 min_rx_power = -120;
+
+ if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
+ rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
+ else
+ rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
+
+ if (rxsc >= 9 && rxsc <= 12)
+ bw = RTW_CHANNEL_WIDTH_40;
+ else if (rxsc >= 13)
+ bw = RTW_CHANNEL_WIDTH_80;
+ else
+ bw = RTW_CHANNEL_WIDTH_20;
+
+ pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
+ pkt_stat->rx_power[RF_PATH_B] = GET_PHY_STAT_P1_PWDB_B(phy_status) - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 2);
+ pkt_stat->bw = bw;
+ pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
+ pkt_stat->rx_power[RF_PATH_B],
+ min_rx_power);
+}
+
+static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 page;
+
+ page = *phy_status & 0xf;
+
+ switch (page) {
+ case 0:
+ query_phy_status_page0(rtwdev, phy_status, pkt_stat);
+ break;
+ case 1:
+ query_phy_status_page1(rtwdev, phy_status, pkt_stat);
+ break;
+ default:
+ rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
+ return;
+ }
+}
+
+static void rtw8822c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u8 *phy_status = NULL;
+
+ memset(pkt_stat, 0, sizeof(*pkt_stat));
+
+ pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
+ pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
+ pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
+ pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
+ pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+ pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
+ pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
+ pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
+ pkt_stat->ppdu_cnt = GET_RX_DESC_PPDU_CNT(rx_desc);
+ pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
+
+ /* drv_info_sz is in unit of 8-bytes */
+ pkt_stat->drv_info_sz *= 8;
+
+ /* c2h cmd pkt's rx/phy status is not interested */
+ if (pkt_stat->is_c2h)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
+ pkt_stat->drv_info_sz);
+ if (pkt_stat->phy_status) {
+ phy_status = rx_desc + desc_sz + pkt_stat->shift;
+ query_phy_status(rtwdev, phy_status, pkt_stat);
+ }
+
+ rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
+}
+
+static void
+rtw8822c_set_write_tx_power_ref(struct rtw_dev *rtwdev, u8 *tx_pwr_ref_cck,
+ u8 *tx_pwr_ref_ofdm)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 txref_cck[2] = {0x18a0, 0x41a0};
+ u32 txref_ofdm[2] = {0x18e8, 0x41e8};
+ u8 path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ rtw_write32_mask(rtwdev, 0x1c90, BIT(15), 0);
+ rtw_write32_mask(rtwdev, txref_cck[path], 0x7f0000,
+ tx_pwr_ref_cck[path]);
+ }
+ for (path = 0; path < hal->rf_path_num; path++) {
+ rtw_write32_mask(rtwdev, 0x1c90, BIT(15), 0);
+ rtw_write32_mask(rtwdev, txref_ofdm[path], 0x1fc00,
+ tx_pwr_ref_ofdm[path]);
+ }
+}
+
+static void rtw8822c_set_tx_power_diff(struct rtw_dev *rtwdev, u8 rate,
+ s8 *diff_idx)
+{
+ u32 offset_txagc = 0x3a00;
+ u8 rate_idx = rate & 0xfc;
+ u8 pwr_idx[4];
+ u32 phy_pwr_idx;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ pwr_idx[i] = diff_idx[i] & 0x7f;
+
+ phy_pwr_idx = pwr_idx[0] |
+ (pwr_idx[1] << 8) |
+ (pwr_idx[2] << 16) |
+ (pwr_idx[3] << 24);
+
+ rtw_write32_mask(rtwdev, 0x1c90, BIT(15), 0x0);
+ rtw_write32_mask(rtwdev, offset_txagc + rate_idx, MASKDWORD,
+ phy_pwr_idx);
+}
+
+static void rtw8822c_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rs, rate, j;
+ u8 pwr_ref_cck[2] = {hal->tx_pwr_tbl[RF_PATH_A][DESC_RATE11M],
+ hal->tx_pwr_tbl[RF_PATH_B][DESC_RATE11M]};
+ u8 pwr_ref_ofdm[2] = {hal->tx_pwr_tbl[RF_PATH_A][DESC_RATEMCS7],
+ hal->tx_pwr_tbl[RF_PATH_B][DESC_RATEMCS7]};
+ s8 diff_a, diff_b;
+ u8 pwr_a, pwr_b;
+ s8 diff_idx[4];
+
+ rtw8822c_set_write_tx_power_ref(rtwdev, pwr_ref_cck, pwr_ref_ofdm);
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+ pwr_a = hal->tx_pwr_tbl[RF_PATH_A][rate];
+ pwr_b = hal->tx_pwr_tbl[RF_PATH_B][rate];
+ if (rs == 0) {
+ diff_a = (s8)pwr_a - (s8)pwr_ref_cck[0];
+ diff_b = (s8)pwr_b - (s8)pwr_ref_cck[1];
+ } else {
+ diff_a = (s8)pwr_a - (s8)pwr_ref_ofdm[0];
+ diff_b = (s8)pwr_b - (s8)pwr_ref_ofdm[1];
+ }
+ diff_idx[rate % 4] = min(diff_a, diff_b);
+ if (rate % 4 == 3)
+ rtw8822c_set_tx_power_diff(rtwdev, rate - 3,
+ diff_idx);
+ }
+ }
+}
+
+static void rtw8822c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 ldo_pwr;
+
+ ldo_pwr = rtw_read8(rtwdev, REG_ANAPARLDO_POW_MAC);
+ ldo_pwr = enable ? ldo_pwr | BIT_LDOE25_PON : ldo_pwr & ~BIT_LDOE25_PON;
+ rtw_write8(rtwdev, REG_ANAPARLDO_POW_MAC, ldo_pwr);
+}
+
+static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_enable;
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+ u32 ofdm_tx_counter;
+
+ cck_enable = rtw_read32(rtwdev, REG_ENCCK) & BIT_CCK_BLK_EN;
+ cck_fa_cnt = rtw_read16(rtwdev, REG_CCK_FACNT);
+ ofdm_fa_cnt = rtw_read16(rtwdev, REG_OFDM_FACNT);
+ ofdm_tx_counter = rtw_read16(rtwdev, REG_OFDM_TXCNT);
+ ofdm_fa_cnt -= ofdm_tx_counter;
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
+
+ rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 0);
+ rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2);
+ rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0);
+ rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 2);
+ rtw_write32_set(rtwdev, REG_CNT_CTRL, BIT_ALL_CNT_RST);
+ rtw_write32_clr(rtwdev, REG_CNT_CTRL, BIT_ALL_CNT_RST);
+}
+
+static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
+{
+}
+
+static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x002E,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x002D,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x007F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4) | BIT(7), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0xFF1A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x002E,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0x0074,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0071,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)),
+ (BIT(7) | BIT(6) | BIT(5))},
+ {0x0061,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)), 0},
+ {0x001F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6)), BIT(7)},
+ {0x00EF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6)), BIT(7)},
+ {0x1045,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
+ {0x0093,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x001F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x00EF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x1045,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0xFF1A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x30},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0081,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7) | BIT(6), 0},
+ {0x0090,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd *card_enable_flow_8822c[] = {
+ trans_carddis_to_cardemu_8822c,
+ trans_cardemu_to_act_8822c,
+ NULL
+};
+
+static struct rtw_pwr_seq_cmd *card_disable_flow_8822c[] = {
+ trans_act_to_cardemu_8822c,
+ trans_cardemu_to_carddis_8822c,
+ NULL
+};
+
+static struct rtw_intf_phy_para usb2_param_8822c[] = {
+ {0xFFFF, 0x00,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para usb3_param_8822c[] = {
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para pcie_gen1_param_8822c[] = {
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para pcie_gen2_param_8822c[] = {
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para_table phy_para_table_8822c = {
+ .usb2_para = usb2_param_8822c,
+ .usb3_para = usb3_param_8822c,
+ .gen1_para = pcie_gen1_param_8822c,
+ .gen2_para = pcie_gen2_param_8822c,
+ .n_usb2_para = ARRAY_SIZE(usb2_param_8822c),
+ .n_usb3_para = ARRAY_SIZE(usb2_param_8822c),
+ .n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8822c),
+ .n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8822c),
+};
+
+static const struct rtw_rfe_def rtw8822c_rfe_defs[] = {
+ [0] = RTW_DEF_RFE(8822c, 0, 0),
+ [1] = RTW_DEF_RFE(8822c, 0, 0),
+ [2] = RTW_DEF_RFE(8822c, 0, 0),
+};
+
+static struct rtw_hw_reg rtw8822c_dig[] = {
+ [0] = { .addr = 0x1d70, .mask = 0x7f },
+ [1] = { .addr = 0x1d70, .mask = 0x7f00 },
+};
+
+static struct rtw_page_table page_table_8822c[] = {
+ {64, 64, 64, 64, 1},
+ {64, 64, 64, 64, 1},
+ {64, 64, 0, 0, 1},
+ {64, 64, 64, 0, 1},
+ {64, 64, 64, 64, 1},
+};
+
+static struct rtw_rqpn rqpn_table_8822c[] = {
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+};
+
+static struct rtw_chip_ops rtw8822c_ops = {
+ .phy_set_param = rtw8822c_phy_set_param,
+ .read_efuse = rtw8822c_read_efuse,
+ .query_rx_desc = rtw8822c_query_rx_desc,
+ .set_channel = rtw8822c_set_channel,
+ .mac_init = rtw8822c_mac_init,
+ .read_rf = rtw_phy_read_rf,
+ .write_rf = rtw_phy_write_rf_reg_mix,
+ .set_tx_power_index = rtw8822c_set_tx_power_index,
+ .cfg_ldo25 = rtw8822c_cfg_ldo25,
+ .false_alarm_statistics = rtw8822c_false_alarm_statistics,
+ .do_iqk = rtw8822c_do_iqk,
+};
+
+struct rtw_chip_info rtw8822c_hw_spec = {
+ .ops = &rtw8822c_ops,
+ .id = RTW_CHIP_TYPE_8822C,
+ .fw_name = "rtw88/rtw8822c_fw.bin",
+ .tx_pkt_desc_sz = 48,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 512,
+ .log_efuse_size = 768,
+ .ptct_efuse_size = 124,
+ .txff_size = 262144,
+ .rxff_size = 24576,
+ .txgi_factor = 2,
+ .is_pwr_by_rate_dec = false,
+ .max_power_index = 0x7f,
+ .csi_buf_pg_num = 50,
+ .band = RTW_BAND_2G | RTW_BAND_5G,
+ .page_size = 128,
+ .dig_min = 0x20,
+ .ht_supported = true,
+ .vht_supported = true,
+ .sys_func_en = 0xD8,
+ .pwr_on_seq = card_enable_flow_8822c,
+ .pwr_off_seq = card_disable_flow_8822c,
+ .page_table = page_table_8822c,
+ .rqpn_table = rqpn_table_8822c,
+ .intf_table = &phy_para_table_8822c,
+ .dig = rtw8822c_dig,
+ .rf_base_addr = {0x3c00, 0x4c00},
+ .rf_sipi_addr = {0x1808, 0x4108},
+ .mac_tbl = &rtw8822c_mac_tbl,
+ .agc_tbl = &rtw8822c_agc_tbl,
+ .bb_tbl = &rtw8822c_bb_tbl,
+ .rfk_init_tbl = &rtw8822c_array_mp_cal_init_tbl,
+ .rf_tbl = {&rtw8822c_rf_a_tbl, &rtw8822c_rf_b_tbl},
+ .rfe_defs = rtw8822c_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
+};
+EXPORT_SYMBOL(rtw8822c_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8822c_fw.bin");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
new file mode 100644
index 000000000000..d3bd9850baa0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8822C_H__
+#define __RTW8822C_H__
+
+#include <asm/byteorder.h>
+
+struct rtw8822cu_efuse {
+ u8 res0[0x30]; /* 0x120 */
+ u8 vid[2]; /* 0x150 */
+ u8 pid[2];
+ u8 res1[3];
+ u8 mac_addr[ETH_ALEN]; /* 0x157 */
+ u8 res2[0x3d];
+};
+
+struct rtw8822ce_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0x120 */
+ u8 vender_id[2];
+ u8 device_id[2];
+ u8 sub_vender_id[2];
+ u8 sub_device_id[2];
+ u8 pmc[2];
+ u8 exp_device_cap[2];
+ u8 msi_cap;
+ u8 ltr_cap; /* 0x133 */
+ u8 exp_link_control[2];
+ u8 link_cap[4];
+ u8 link_control[2];
+ u8 serial_number[8];
+ u8 res0:2; /* 0x144 */
+ u8 ltr_en:1;
+ u8 res1:2;
+ u8 obff:2;
+ u8 res2:3;
+ u8 obff_cap:2;
+ u8 res3:4;
+ u8 class_code[3];
+ u8 res4;
+ u8 pci_pm_L1_2_supp:1;
+ u8 pci_pm_L1_1_supp:1;
+ u8 aspm_pm_L1_2_supp:1;
+ u8 aspm_pm_L1_1_supp:1;
+ u8 L1_pm_substates_supp:1;
+ u8 res5:3;
+ u8 port_common_mode_restore_time;
+ u8 port_t_power_on_scale:2;
+ u8 res6:1;
+ u8 port_t_power_on_value:5;
+ u8 res7;
+};
+
+struct rtw8822c_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0e];
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 res1;
+ u8 iqk_lck;
+ u8 res2[5]; /* 0xbc */
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 tx_bb_swing_setting_2g;
+ u8 tx_bb_swing_setting_5g;
+ u8 tx_pwr_calibrate_rate;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 country_code[2];
+ u8 res3[3];
+ u8 path_a_thermal; /* 0xd0 */
+ u8 path_b_thermal;
+ u8 res4[2];
+ u8 rx_gain_gap_2g_ofdm;
+ u8 res5;
+ u8 rx_gain_gap_2g_cck;
+ u8 res6;
+ u8 rx_gain_gap_5gl;
+ u8 res7;
+ u8 rx_gain_gap_5gm;
+ u8 res8;
+ u8 rx_gain_gap_5gh;
+ u8 res9;
+ u8 res10[0x42];
+ union {
+ struct rtw8822cu_efuse u;
+ struct rtw8822ce_efuse e;
+ };
+};
+
+#define DACK_PATH_8822C 2
+#define DACK_REG_8822C 16
+#define DACK_RF_8822C 1
+#define DACK_SN_8822C 100
+
+/* phy status page0 */
+#define GET_PHY_STAT_P0_PWDB_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P0_PWDB_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P0_GAIN_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(21, 16))
+#define GET_PHY_STAT_P0_GAIN_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(29, 24))
+
+/* phy status page1 */
+#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_PWDB_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16))
+#define GET_PHY_STAT_P1_L_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
+#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+
+#define REG_ANAPARLDO_POW_MAC 0x0029
+#define BIT_LDOE25_PON BIT(0)
+#define REG_RRSR 0x0440
+#define BITS_RRSR_RSC (BIT(21) | BIT(22))
+
+#define REG_TXDFIR0 0x808
+#define REG_DFIRBW 0x810
+#define REG_ANTMAP0 0x820
+#define REG_ANTMAP 0x824
+#define REG_DYMPRITH 0x86c
+#define REG_DYMENTH0 0x870
+#define REG_DYMENTH 0x874
+#define REG_DYMTHMIN 0x8a4
+#define REG_TXBWCTL 0x9b0
+#define REG_TXCLK 0x9b4
+#define REG_SCOTRK 0xc30
+#define REG_MRCM 0xc38
+#define REG_AGCSWSH 0xc44
+#define REG_ANTWTPD 0xc54
+#define REG_ORITXCODE 0x1800
+#define REG_3WIRE 0x180c
+#define BIT_3WIRE_TX_EN BIT(0)
+#define BIT_3WIRE_RX_EN BIT(1)
+#define BIT_3WIRE_PI_ON BIT(28)
+#define REG_RXAGCCTL0 0x18ac
+#define REG_CCKSB 0x1a00
+#define REG_RXCCKSEL 0x1a04
+#define REG_BGCTRL 0x1a14
+#define BITS_RX_IQ_WEIGHT (BIT(8) | BIT(9))
+#define REG_TXF0 0x1a20
+#define REG_TXF1 0x1a24
+#define REG_TXF2 0x1a28
+#define REG_CCANRX 0x1a2c
+#define BIT_CCK_FA_RST (BIT(14) | BIT(15))
+#define BIT_OFDM_FA_RST (BIT(12) | BIT(13))
+#define REG_CCK_FACNT 0x1a5c
+#define REG_CCKTXONLY 0x1a80
+#define BIT_BB_CCK_CHECK_EN BIT(18)
+#define REG_TXF3 0x1a98
+#define REG_TXF4 0x1a9c
+#define REG_TXF5 0x1aa0
+#define REG_TXF6 0x1aac
+#define REG_TXF7 0x1ab0
+#define REG_TXANT 0x1c28
+#define REG_ENCCK 0x1c3c
+#define BIT_CCK_BLK_EN BIT(1)
+#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1))
+#define REG_CCAMSK 0x1c80
+#define REG_RXFNCTL 0x1d30
+#define REG_RXIGI 0x1d70
+#define REG_ENFN 0x1e24
+#define REG_TXANTSEG 0x1e28
+#define REG_TXLGMAP 0x1e2c
+#define REG_CCKPATH 0x1e5c
+#define REG_CNT_CTRL 0x1eb4
+#define BIT_ALL_CNT_RST BIT(25)
+#define REG_OFDM_FACNT 0x2d00
+#define REG_OFDM_TXCNT 0x2de0
+#define REG_ORITXCODE2 0x4100
+#define REG_3WIRE2 0x410c
+#define REG_RXAGCCTL 0x41ac
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
new file mode 100644
index 000000000000..49044f510c6c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -0,0 +1,11753 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8822c_table.h"
+
+static const u32 rtw8822c_mac[] = {
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822c_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8822c_agc[] = {
+ 0x1D90, 0x300001FF,
+ 0x1D90, 0x300101FF,
+ 0x1D90, 0x300201FE,
+ 0x1D90, 0x300301FD,
+ 0x1D90, 0x300401FC,
+ 0x1D90, 0x300501FB,
+ 0x1D90, 0x300601FA,
+ 0x1D90, 0x300701F9,
+ 0x1D90, 0x300801F8,
+ 0x1D90, 0x300901F7,
+ 0x1D90, 0x300A01F6,
+ 0x1D90, 0x300B01F5,
+ 0x1D90, 0x300C01F4,
+ 0x1D90, 0x300D01F3,
+ 0x1D90, 0x300E01F2,
+ 0x1D90, 0x300F01F1,
+ 0x1D90, 0x301001F0,
+ 0x1D90, 0x301101EF,
+ 0x1D90, 0x301201EE,
+ 0x1D90, 0x301301ED,
+ 0x1D90, 0x301401EC,
+ 0x1D90, 0x301501EB,
+ 0x1D90, 0x30160192,
+ 0x1D90, 0x30170191,
+ 0x1D90, 0x30180190,
+ 0x1D90, 0x3019018F,
+ 0x1D90, 0x301A018E,
+ 0x1D90, 0x301B018D,
+ 0x1D90, 0x301C018C,
+ 0x1D90, 0x301D018B,
+ 0x1D90, 0x301E018A,
+ 0x1D90, 0x301F0189,
+ 0x1D90, 0x30200188,
+ 0x1D90, 0x30210187,
+ 0x1D90, 0x30220186,
+ 0x1D90, 0x30230185,
+ 0x1D90, 0x3024014B,
+ 0x1D90, 0x3025014A,
+ 0x1D90, 0x30260149,
+ 0x1D90, 0x30270148,
+ 0x1D90, 0x30280147,
+ 0x1D90, 0x30290146,
+ 0x1D90, 0x302A0145,
+ 0x1D90, 0x302B0144,
+ 0x1D90, 0x302C0143,
+ 0x1D90, 0x302D0142,
+ 0x1D90, 0x302E00C8,
+ 0x1D90, 0x302F00C7,
+ 0x1D90, 0x303000C6,
+ 0x1D90, 0x303100C5,
+ 0x1D90, 0x303200C4,
+ 0x1D90, 0x30330088,
+ 0x1D90, 0x30340087,
+ 0x1D90, 0x30350086,
+ 0x1D90, 0x30360045,
+ 0x1D90, 0x30370044,
+ 0x1D90, 0x30380043,
+ 0x1D90, 0x30390023,
+ 0x1D90, 0x303A0022,
+ 0x1D90, 0x303B0021,
+ 0x1D90, 0x303C0020,
+ 0x1D90, 0x303D0002,
+ 0x1D90, 0x303E0001,
+ 0x1D90, 0x303F0000,
+ 0x1D90, 0x304000FF,
+ 0x1D90, 0x304100FF,
+ 0x1D90, 0x304200FF,
+ 0x1D90, 0x304300FF,
+ 0x1D90, 0x304400FE,
+ 0x1D90, 0x304500FD,
+ 0x1D90, 0x304600FC,
+ 0x1D90, 0x304700FB,
+ 0x1D90, 0x304800FA,
+ 0x1D90, 0x304900F9,
+ 0x1D90, 0x304A00F8,
+ 0x1D90, 0x304B00F7,
+ 0x1D90, 0x304C00F6,
+ 0x1D90, 0x304D00F5,
+ 0x1D90, 0x304E00F4,
+ 0x1D90, 0x304F00F3,
+ 0x1D90, 0x305000F2,
+ 0x1D90, 0x305100F1,
+ 0x1D90, 0x305200F0,
+ 0x1D90, 0x305300EF,
+ 0x1D90, 0x305400EE,
+ 0x1D90, 0x305500ED,
+ 0x1D90, 0x305600EC,
+ 0x1D90, 0x305700EB,
+ 0x1D90, 0x305800EA,
+ 0x1D90, 0x305900E9,
+ 0x1D90, 0x305A00E8,
+ 0x1D90, 0x305B00E7,
+ 0x1D90, 0x305C00E6,
+ 0x1D90, 0x305D00C7,
+ 0x1D90, 0x305E00C6,
+ 0x1D90, 0x305F00C5,
+ 0x1D90, 0x306000C4,
+ 0x1D90, 0x306100C3,
+ 0x1D90, 0x306200C2,
+ 0x1D90, 0x306300A4,
+ 0x1D90, 0x306400A3,
+ 0x1D90, 0x306500A2,
+ 0x1D90, 0x30660086,
+ 0x1D90, 0x30670085,
+ 0x1D90, 0x30680084,
+ 0x1D90, 0x30690083,
+ 0x1D90, 0x306A0082,
+ 0x1D90, 0x306B0069,
+ 0x1D90, 0x306C0068,
+ 0x1D90, 0x306D0067,
+ 0x1D90, 0x306E0066,
+ 0x1D90, 0x306F0065,
+ 0x1D90, 0x30700064,
+ 0x1D90, 0x30710063,
+ 0x1D90, 0x30720044,
+ 0x1D90, 0x30730043,
+ 0x1D90, 0x30740042,
+ 0x1D90, 0x30750025,
+ 0x1D90, 0x30760024,
+ 0x1D90, 0x30770023,
+ 0x1D90, 0x30780022,
+ 0x1D90, 0x30790021,
+ 0x1D90, 0x307A0020,
+ 0x1D90, 0x307B0003,
+ 0x1D90, 0x307C0002,
+ 0x1D90, 0x307D0001,
+ 0x1D90, 0x307E0000,
+ 0x1D90, 0x307F0000,
+ 0x1D90, 0x308000FF,
+ 0x1D90, 0x308100FF,
+ 0x1D90, 0x308200FF,
+ 0x1D90, 0x308300FF,
+ 0x1D90, 0x308400FE,
+ 0x1D90, 0x308500FD,
+ 0x1D90, 0x308600FC,
+ 0x1D90, 0x308700FB,
+ 0x1D90, 0x308800FA,
+ 0x1D90, 0x308900F9,
+ 0x1D90, 0x308A00F8,
+ 0x1D90, 0x308B00F7,
+ 0x1D90, 0x308C00F6,
+ 0x1D90, 0x308D00F5,
+ 0x1D90, 0x308E00F4,
+ 0x1D90, 0x308F00F3,
+ 0x1D90, 0x309000F2,
+ 0x1D90, 0x309100F1,
+ 0x1D90, 0x309200F0,
+ 0x1D90, 0x309300EF,
+ 0x1D90, 0x309400EE,
+ 0x1D90, 0x309500ED,
+ 0x1D90, 0x309600EC,
+ 0x1D90, 0x309700EB,
+ 0x1D90, 0x309800EA,
+ 0x1D90, 0x309900E9,
+ 0x1D90, 0x309A00E8,
+ 0x1D90, 0x309B00E7,
+ 0x1D90, 0x309C00E6,
+ 0x1D90, 0x309D00C7,
+ 0x1D90, 0x309E00C6,
+ 0x1D90, 0x309F00C5,
+ 0x1D90, 0x30A000C4,
+ 0x1D90, 0x30A100C3,
+ 0x1D90, 0x30A200C2,
+ 0x1D90, 0x30A300A4,
+ 0x1D90, 0x30A400A3,
+ 0x1D90, 0x30A500A2,
+ 0x1D90, 0x30A60086,
+ 0x1D90, 0x30A70085,
+ 0x1D90, 0x30A80084,
+ 0x1D90, 0x30A90083,
+ 0x1D90, 0x30AA0082,
+ 0x1D90, 0x30AB0069,
+ 0x1D90, 0x30AC0068,
+ 0x1D90, 0x30AD0067,
+ 0x1D90, 0x30AE0066,
+ 0x1D90, 0x30AF0065,
+ 0x1D90, 0x30B00064,
+ 0x1D90, 0x30B10063,
+ 0x1D90, 0x30B20044,
+ 0x1D90, 0x30B30043,
+ 0x1D90, 0x30B40042,
+ 0x1D90, 0x30B50025,
+ 0x1D90, 0x30B60024,
+ 0x1D90, 0x30B70023,
+ 0x1D90, 0x30B80022,
+ 0x1D90, 0x30B90021,
+ 0x1D90, 0x30BA0020,
+ 0x1D90, 0x30BB0003,
+ 0x1D90, 0x30BC0002,
+ 0x1D90, 0x30BD0001,
+ 0x1D90, 0x30BE0000,
+ 0x1D90, 0x30BF0000,
+ 0x1D90, 0x30C000FF,
+ 0x1D90, 0x30C100FF,
+ 0x1D90, 0x30C200FF,
+ 0x1D90, 0x30C300FF,
+ 0x1D90, 0x30C400FE,
+ 0x1D90, 0x30C500FD,
+ 0x1D90, 0x30C600FC,
+ 0x1D90, 0x30C700FB,
+ 0x1D90, 0x30C800FA,
+ 0x1D90, 0x30C900F9,
+ 0x1D90, 0x30CA00F8,
+ 0x1D90, 0x30CB00F7,
+ 0x1D90, 0x30CC00F6,
+ 0x1D90, 0x30CD00F5,
+ 0x1D90, 0x30CE00F4,
+ 0x1D90, 0x30CF00F3,
+ 0x1D90, 0x30D000F2,
+ 0x1D90, 0x30D100F1,
+ 0x1D90, 0x30D200F0,
+ 0x1D90, 0x30D300EF,
+ 0x1D90, 0x30D400EE,
+ 0x1D90, 0x30D500ED,
+ 0x1D90, 0x30D600EC,
+ 0x1D90, 0x30D700EB,
+ 0x1D90, 0x30D800EA,
+ 0x1D90, 0x30D900E9,
+ 0x1D90, 0x30DA00E8,
+ 0x1D90, 0x30DB00E7,
+ 0x1D90, 0x30DC00E6,
+ 0x1D90, 0x30DD00C7,
+ 0x1D90, 0x30DE00C6,
+ 0x1D90, 0x30DF00C5,
+ 0x1D90, 0x30E000C4,
+ 0x1D90, 0x30E100C3,
+ 0x1D90, 0x30E200C2,
+ 0x1D90, 0x30E300A4,
+ 0x1D90, 0x30E400A3,
+ 0x1D90, 0x30E500A2,
+ 0x1D90, 0x30E60086,
+ 0x1D90, 0x30E70085,
+ 0x1D90, 0x30E80084,
+ 0x1D90, 0x30E90083,
+ 0x1D90, 0x30EA0082,
+ 0x1D90, 0x30EB0069,
+ 0x1D90, 0x30EC0068,
+ 0x1D90, 0x30ED0067,
+ 0x1D90, 0x30EE0066,
+ 0x1D90, 0x30EF0065,
+ 0x1D90, 0x30F00064,
+ 0x1D90, 0x30F10063,
+ 0x1D90, 0x30F20044,
+ 0x1D90, 0x30F30043,
+ 0x1D90, 0x30F40042,
+ 0x1D90, 0x30F50025,
+ 0x1D90, 0x30F60024,
+ 0x1D90, 0x30F70023,
+ 0x1D90, 0x30F80022,
+ 0x1D90, 0x30F90021,
+ 0x1D90, 0x30FA0020,
+ 0x1D90, 0x30FB0003,
+ 0x1D90, 0x30FC0002,
+ 0x1D90, 0x30FD0001,
+ 0x1D90, 0x30FE0000,
+ 0x1D90, 0x30FF0000,
+ 0x1D90, 0x310001FF,
+ 0x1D90, 0x310101FF,
+ 0x1D90, 0x310201FF,
+ 0x1D90, 0x310301FF,
+ 0x1D90, 0x310401FF,
+ 0x1D90, 0x310501FF,
+ 0x1D90, 0x310601FF,
+ 0x1D90, 0x310701FF,
+ 0x1D90, 0x310801FF,
+ 0x1D90, 0x310901FE,
+ 0x1D90, 0x310A01FD,
+ 0x1D90, 0x310B01FC,
+ 0x1D90, 0x310C01FB,
+ 0x1D90, 0x310D01FA,
+ 0x1D90, 0x310E01F9,
+ 0x1D90, 0x310F01F8,
+ 0x1D90, 0x311001F7,
+ 0x1D90, 0x311101F6,
+ 0x1D90, 0x311201F5,
+ 0x1D90, 0x311301F4,
+ 0x1D90, 0x311401F3,
+ 0x1D90, 0x311501F2,
+ 0x1D90, 0x311601F1,
+ 0x1D90, 0x311701F0,
+ 0x1D90, 0x311801EF,
+ 0x1D90, 0x311901EE,
+ 0x1D90, 0x311A01ED,
+ 0x1D90, 0x311B01EC,
+ 0x1D90, 0x311C01EB,
+ 0x1D90, 0x311D0192,
+ 0x1D90, 0x311E0191,
+ 0x1D90, 0x311F0190,
+ 0x1D90, 0x3120018F,
+ 0x1D90, 0x3121018E,
+ 0x1D90, 0x3122018D,
+ 0x1D90, 0x3123018C,
+ 0x1D90, 0x3124018B,
+ 0x1D90, 0x3125018A,
+ 0x1D90, 0x31260189,
+ 0x1D90, 0x31270188,
+ 0x1D90, 0x31280187,
+ 0x1D90, 0x31290186,
+ 0x1D90, 0x312A0185,
+ 0x1D90, 0x312B0149,
+ 0x1D90, 0x312C0148,
+ 0x1D90, 0x312D0147,
+ 0x1D90, 0x312E0146,
+ 0x1D90, 0x312F0145,
+ 0x1D90, 0x31300144,
+ 0x1D90, 0x31310143,
+ 0x1D90, 0x31320142,
+ 0x1D90, 0x31330141,
+ 0x1D90, 0x31340140,
+ 0x1D90, 0x313500C7,
+ 0x1D90, 0x313600C6,
+ 0x1D90, 0x313700C5,
+ 0x1D90, 0x313800C4,
+ 0x1D90, 0x313900C3,
+ 0x1D90, 0x313A0088,
+ 0x1D90, 0x313B0087,
+ 0x1D90, 0x313C0086,
+ 0x1D90, 0x313D0045,
+ 0x1D90, 0x313E0044,
+ 0x1D90, 0x313F0043,
+ 0x1D90, 0x314001FF,
+ 0x1D90, 0x314101FF,
+ 0x1D90, 0x314201FF,
+ 0x1D90, 0x314301FF,
+ 0x1D90, 0x314401FF,
+ 0x1D90, 0x314501FF,
+ 0x1D90, 0x314601FF,
+ 0x1D90, 0x314701FE,
+ 0x1D90, 0x314801FD,
+ 0x1D90, 0x314901FC,
+ 0x1D90, 0x314A01FB,
+ 0x1D90, 0x314B01FA,
+ 0x1D90, 0x314C01F9,
+ 0x1D90, 0x314D01F8,
+ 0x1D90, 0x314E01F7,
+ 0x1D90, 0x314F01F6,
+ 0x1D90, 0x315001F5,
+ 0x1D90, 0x315101F4,
+ 0x1D90, 0x315201F3,
+ 0x1D90, 0x315301F2,
+ 0x1D90, 0x315401F1,
+ 0x1D90, 0x315501F0,
+ 0x1D90, 0x315601EF,
+ 0x1D90, 0x315701EE,
+ 0x1D90, 0x315801ED,
+ 0x1D90, 0x315901EC,
+ 0x1D90, 0x315A01EB,
+ 0x1D90, 0x315B01EA,
+ 0x1D90, 0x315C01E9,
+ 0x1D90, 0x315D018F,
+ 0x1D90, 0x315E018E,
+ 0x1D90, 0x315F018D,
+ 0x1D90, 0x3160018C,
+ 0x1D90, 0x3161018B,
+ 0x1D90, 0x3162018A,
+ 0x1D90, 0x31630189,
+ 0x1D90, 0x31640188,
+ 0x1D90, 0x31650187,
+ 0x1D90, 0x31660186,
+ 0x1D90, 0x31670185,
+ 0x1D90, 0x31680184,
+ 0x1D90, 0x31690183,
+ 0x1D90, 0x316A0182,
+ 0x1D90, 0x316B0149,
+ 0x1D90, 0x316C0148,
+ 0x1D90, 0x316D0147,
+ 0x1D90, 0x316E0146,
+ 0x1D90, 0x316F0145,
+ 0x1D90, 0x31700144,
+ 0x1D90, 0x31710143,
+ 0x1D90, 0x31720142,
+ 0x1D90, 0x31730141,
+ 0x1D90, 0x31740140,
+ 0x1D90, 0x317500C7,
+ 0x1D90, 0x317600C6,
+ 0x1D90, 0x317700C5,
+ 0x1D90, 0x317800C4,
+ 0x1D90, 0x317900C3,
+ 0x1D90, 0x317A0088,
+ 0x1D90, 0x317B0087,
+ 0x1D90, 0x317C0086,
+ 0x1D90, 0x317D0045,
+ 0x1D90, 0x317E0044,
+ 0x1D90, 0x317F0043,
+ 0x1D90, 0x318001FE,
+ 0x1D90, 0x318101FD,
+ 0x1D90, 0x318201FC,
+ 0x1D90, 0x318301FB,
+ 0x1D90, 0x318401FA,
+ 0x1D90, 0x318501F9,
+ 0x1D90, 0x318601F8,
+ 0x1D90, 0x318701F7,
+ 0x1D90, 0x318801F6,
+ 0x1D90, 0x318901F5,
+ 0x1D90, 0x318A01F4,
+ 0x1D90, 0x318B01F3,
+ 0x1D90, 0x318C01F2,
+ 0x1D90, 0x318D01F1,
+ 0x1D90, 0x318E01F0,
+ 0x1D90, 0x318F01EF,
+ 0x1D90, 0x319001EE,
+ 0x1D90, 0x319101ED,
+ 0x1D90, 0x319201EC,
+ 0x1D90, 0x319301EB,
+ 0x1D90, 0x319401EA,
+ 0x1D90, 0x319501E9,
+ 0x1D90, 0x3196018F,
+ 0x1D90, 0x3197018E,
+ 0x1D90, 0x3198018D,
+ 0x1D90, 0x3199018C,
+ 0x1D90, 0x319A018B,
+ 0x1D90, 0x319B018A,
+ 0x1D90, 0x319C0189,
+ 0x1D90, 0x319D0188,
+ 0x1D90, 0x319E0187,
+ 0x1D90, 0x319F0186,
+ 0x1D90, 0x31A00185,
+ 0x1D90, 0x31A10184,
+ 0x1D90, 0x31A20183,
+ 0x1D90, 0x31A30182,
+ 0x1D90, 0x31A40149,
+ 0x1D90, 0x31A50148,
+ 0x1D90, 0x31A60147,
+ 0x1D90, 0x31A70146,
+ 0x1D90, 0x31A80145,
+ 0x1D90, 0x31A90144,
+ 0x1D90, 0x31AA0143,
+ 0x1D90, 0x31AB0142,
+ 0x1D90, 0x31AC0141,
+ 0x1D90, 0x31AD0140,
+ 0x1D90, 0x31AE00C7,
+ 0x1D90, 0x31AF00C6,
+ 0x1D90, 0x31B000C5,
+ 0x1D90, 0x31B100C4,
+ 0x1D90, 0x31B200C3,
+ 0x1D90, 0x31B30088,
+ 0x1D90, 0x31B40087,
+ 0x1D90, 0x31B50086,
+ 0x1D90, 0x31B60045,
+ 0x1D90, 0x31B70044,
+ 0x1D90, 0x31B80043,
+ 0x1D90, 0x31B90023,
+ 0x1D90, 0x31BA0022,
+ 0x1D90, 0x31BB0021,
+ 0x1D90, 0x31BC0020,
+ 0x1D90, 0x31BD0002,
+ 0x1D90, 0x31BE0001,
+ 0x1D90, 0x31BF0000,
+ 0x1D70, 0x22222222,
+ 0x1D70, 0x20202020,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822c_agc, rtw_phy_cfg_agc);
+
+static const u32 rtw8822c_bb[] = {
+ 0x1D0C, 0x00410000,
+ 0x1C3C, 0x01038040,
+ 0x1C90, 0x00E49708,
+ 0x800, 0x00000000,
+ 0x804, 0xD6300000,
+ 0x808, 0x60956093,
+ 0x80C, 0x00000025,
+ 0x810, 0x11B019B0,
+ 0x814, 0x00904080,
+ 0x818, 0xC30056F1,
+ 0x81C, 0x00050000,
+ 0x820, 0x11111133,
+ 0x824, 0xC3C3CCC4,
+ 0x828, 0x30FB186C,
+ 0x82C, 0x185D6556,
+ 0x830, 0x1751145B,
+ 0x834, 0x776995D7,
+ 0x838, 0x74777A7D,
+ 0x83C, 0xF9AA9982,
+ 0x840, 0x89AA9ABB,
+ 0x844, 0x0DEEDDC1,
+ 0x848, 0xCDEEDEFF,
+ 0x84C, 0xFFFF5555,
+ 0x850, 0x6F7A727D,
+ 0x854, 0x6C776F7A,
+ 0x858, 0x6F7A6C77,
+ 0x85C, 0x69746974,
+ 0x860, 0x6F7A6C77,
+ 0x864, 0x6C776C77,
+ 0x868, 0x727D6F7A,
+ 0x86C, 0x69D7B196,
+ 0x870, 0x1A6D769B,
+ 0x874, 0x55823917,
+ 0x878, 0x00C025BD,
+ 0x87C, 0x4140557D,
+ 0x880, 0x9A1D9D47,
+ 0x884, 0x1DE7134F,
+ 0x888, 0x2857A857,
+ 0x88C, 0x520E8A24,
+ 0x890, 0x8F628C44,
+ 0x894, 0x72745F43,
+ 0x898, 0x03F02F0D,
+ 0x89C, 0x5DB6886F,
+ 0x8A0, 0x07DC309F,
+ 0x8A4, 0x09412495,
+ 0x8A8, 0x222222A9,
+ 0x8AC, 0x89628C44,
+ 0x8B0, 0x72745F43,
+ 0x8B4, 0x03F02F0D,
+ 0x8B8, 0x55B6886F,
+ 0x8BC, 0x07D0309F,
+ 0x8C0, 0x70404023,
+ 0x8C4, 0x00440001,
+ 0x8C8, 0x7A7A2E26,
+ 0x8CC, 0x25297777,
+ 0x8D0, 0x6CEB6DCE,
+ 0x8D4, 0x0005A632,
+ 0x8D8, 0x00000000,
+ 0x8DC, 0x00000000,
+ 0x8E0, 0x00000000,
+ 0x8E4, 0x00000000,
+ 0x8E8, 0x00000000,
+ 0x8EC, 0x00000000,
+ 0x8F0, 0x00000000,
+ 0x8F4, 0x00000000,
+ 0x8F8, 0x25239843,
+ 0x900, 0x00000000,
+ 0x904, 0x00000000,
+ 0x908, 0x000008CB,
+ 0x90C, 0x00000000,
+ 0x910, 0x00000000,
+ 0x914, 0x20000000,
+ 0x918, 0x20000000,
+ 0x91C, 0x20000000,
+ 0x920, 0x20000000,
+ 0x924, 0x00000000,
+ 0x928, 0x0000003A,
+ 0x92C, 0x0000003A,
+ 0x930, 0x0000003A,
+ 0x934, 0x0000003A,
+ 0x938, 0x0000000F,
+ 0x93C, 0x00000000,
+ 0x940, 0x4E1F3E81,
+ 0x944, 0x4E1F3E81,
+ 0x948, 0x4E1F3E81,
+ 0x94C, 0x4E1F3E81,
+ 0x950, 0x03020100,
+ 0x954, 0x07060504,
+ 0x958, 0x0B0A0908,
+ 0x95C, 0x0F0E0D0C,
+ 0x960, 0x13121110,
+ 0x964, 0x17161514,
+ 0x968, 0x03020100,
+ 0x96C, 0x07060504,
+ 0x970, 0x0B0A0908,
+ 0x974, 0x0F0E0D0C,
+ 0x978, 0x13121110,
+ 0x97C, 0x17161514,
+ 0x980, 0x03020100,
+ 0x984, 0x07060504,
+ 0x988, 0x0B0A0908,
+ 0x98C, 0x0F0E0D0C,
+ 0x990, 0x13121110,
+ 0x994, 0x17161514,
+ 0x998, 0x03020100,
+ 0x99C, 0x07060504,
+ 0x9A0, 0x0B0A0908,
+ 0x9A4, 0x0F0E0D0C,
+ 0x9A8, 0x13121110,
+ 0x9AC, 0x17161514,
+ 0x9B0, 0x00002200,
+ 0x9B4, 0xDB6FFF00,
+ 0x9B8, 0x00400064,
+ 0x9BC, 0x00000000,
+ 0x9C0, 0x01010101,
+ 0x9C4, 0x00640064,
+ 0x9C8, 0x00640064,
+ 0x9CC, 0x00007777,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E0, 0x00000000,
+ 0x9E4, 0x00000000,
+ 0x9E8, 0x00000000,
+ 0x9EC, 0x00000000,
+ 0x9F0, 0x100024E0,
+ 0x9F4, 0x00000000,
+ 0x9F8, 0x00000000,
+ 0xA00, 0x02001208,
+ 0xA04, 0x00000000,
+ 0xA08, 0x00000000,
+ 0xA0C, 0x00000000,
+ 0xA10, 0x00000000,
+ 0xA14, 0x00000000,
+ 0xA18, 0x00000000,
+ 0xA1C, 0x00000000,
+ 0xA20, 0xEB31B333,
+ 0xA24, 0x00275485,
+ 0xA28, 0x00166366,
+ 0xA2C, 0x00275485,
+ 0xA30, 0x00166366,
+ 0xA34, 0x00275485,
+ 0xA38, 0x00200400,
+ 0xA3C, 0x00200400,
+ 0xA40, 0xB35DC5BD,
+ 0xA44, 0x3033BEBD,
+ 0xA48, 0x2A521254,
+ 0xA4C, 0xA2733345,
+ 0xA50, 0x617BE003,
+ 0xA54, 0x50000968,
+ 0xA58, 0x00020000,
+ 0xA5C, 0x01000000,
+ 0xA60, 0x02000000,
+ 0xA64, 0x03000000,
+ 0xA68, 0x00020000,
+ 0xA6C, 0x00000000,
+ 0xA70, 0x00000000,
+ 0xA74, 0x00000000,
+ 0xA78, 0x00000000,
+ 0xA7C, 0x00000000,
+ 0xA80, 0x00000000,
+ 0xA84, 0x00000000,
+ 0xA88, 0x00000000,
+ 0xA8C, 0x00000000,
+ 0xA90, 0x00000000,
+ 0xA94, 0x00000000,
+ 0xA98, 0x00000000,
+ 0xA9C, 0x00000000,
+ 0xAA0, 0x00000000,
+ 0xAA4, 0x00000000,
+ 0xAA8, 0x00000000,
+ 0xAAC, 0x00000000,
+ 0xAB0, 0x00000000,
+ 0xAB4, 0x00000000,
+ 0xAB8, 0x00000000,
+ 0xABC, 0x00000000,
+ 0xAC0, 0x00000000,
+ 0xAC4, 0x00000000,
+ 0xAC8, 0x00000000,
+ 0xACC, 0x00000000,
+ 0xAD0, 0x00000000,
+ 0xAD4, 0x00000000,
+ 0xAD8, 0x00000000,
+ 0xADC, 0x00000000,
+ 0xAE0, 0x00000000,
+ 0xAE4, 0x00000000,
+ 0xAE8, 0x00000000,
+ 0xAEC, 0x00000000,
+ 0xAF0, 0x00000000,
+ 0xAF4, 0x00000000,
+ 0xAF8, 0x00000000,
+ 0xB00, 0x00000000,
+ 0xB04, 0x00000000,
+ 0xB08, 0x00000000,
+ 0xB0C, 0x00000000,
+ 0xB10, 0x00000000,
+ 0xB14, 0x00000000,
+ 0xB18, 0x00000000,
+ 0xB1C, 0x00000000,
+ 0xB20, 0x00000000,
+ 0xB24, 0x00000000,
+ 0xB28, 0x00000000,
+ 0xB2C, 0x00000000,
+ 0xB30, 0x00000000,
+ 0xB34, 0x00000000,
+ 0xB38, 0x00000000,
+ 0xB3C, 0x00000000,
+ 0xB40, 0x00000000,
+ 0xB44, 0x00000000,
+ 0xB48, 0x00000000,
+ 0xB4C, 0x00000000,
+ 0xB50, 0x00000000,
+ 0xB54, 0x00000000,
+ 0xB58, 0x00060100,
+ 0xB5C, 0x00000000,
+ 0xB60, 0x00000000,
+ 0xB64, 0x00000000,
+ 0xB68, 0x00000000,
+ 0xB6C, 0x00000000,
+ 0xB70, 0x00000000,
+ 0xB74, 0x00000000,
+ 0xB78, 0x00000000,
+ 0xB7C, 0x00000000,
+ 0xB80, 0x00000000,
+ 0xB84, 0x00000000,
+ 0xB88, 0x00000000,
+ 0xB8C, 0x00000000,
+ 0xB90, 0x00000000,
+ 0xB94, 0x00000000,
+ 0xB98, 0x00000000,
+ 0xB9C, 0x00000000,
+ 0xBA0, 0x00000000,
+ 0xBA4, 0x00000000,
+ 0xBA8, 0x00000000,
+ 0xBAC, 0x00000000,
+ 0xBB0, 0x00000000,
+ 0xBB4, 0x00000000,
+ 0xBB8, 0x00000000,
+ 0xBBC, 0x00000000,
+ 0xBC0, 0x00000000,
+ 0xBC4, 0x00000000,
+ 0xBC8, 0x00000000,
+ 0xBCC, 0x00000000,
+ 0xBD0, 0x00000000,
+ 0xBD4, 0x00000000,
+ 0xBD8, 0x00000000,
+ 0xBDC, 0x00000000,
+ 0xBE0, 0x00000000,
+ 0xBE4, 0x00000000,
+ 0xBE8, 0x00000000,
+ 0xBEC, 0x00000000,
+ 0xBF0, 0x00000000,
+ 0xBF4, 0x00000000,
+ 0xBF8, 0x00000000,
+ 0xC00, 0x1C8BA0D6,
+ 0xC04, 0x00000001,
+ 0xC08, 0x00000000,
+ 0xC0C, 0x02F1D8B7,
+ 0xC10, 0x000000B0,
+ 0xC14, 0x0000D891,
+ 0xC18, 0x00087672,
+ 0xC1C, 0x15260000,
+ 0xC20, 0x00000000,
+ 0xC24, 0x40600000,
+ 0xC28, 0x06400F76,
+ 0xC2C, 0xE30020E1,
+ 0xC30, 0x140C9494,
+ 0xC34, 0x00A04946,
+ 0xC38, 0x011D4820,
+ 0xC3C, 0x168DB61B,
+ 0xC40, 0x009C50F8,
+ 0xC44, 0x2013BAD1,
+ 0xC48, 0xFFFFF7CC,
+ 0xC4C, 0xA000FFFF,
+ 0xC50, 0x20D0F800,
+ 0xC54, 0x941A0200,
+ 0xC58, 0x18380111,
+ 0xC5C, 0x006E01B8,
+ 0xC60, 0x2CA5555B,
+ 0xC64, 0x0210005F,
+ 0xC68, 0x039A5300,
+ 0xC6C, 0x0265C2BA,
+ 0xC70, 0x000CEB21,
+ 0xC74, 0x0E149CA1,
+ 0xC78, 0x1AB4956B,
+ 0xC7C, 0x00000ABF,
+ 0xC80, 0xC02A8799,
+ 0xC84, 0x06C636C6,
+ 0xC88, 0x08090202,
+ 0xC8C, 0x00204048,
+ 0xC90, 0x00F85F85,
+ 0xC94, 0x00000F85,
+ 0xC98, 0x58385858,
+ 0xC9C, 0x18382838,
+ 0xCA0, 0x00002838,
+ 0xCA4, 0x3A253A3A,
+ 0xCA8, 0x10251A25,
+ 0xCAC, 0x00001025,
+ 0xCB0, 0x3A133A3A,
+ 0xCB4, 0x08130D13,
+ 0xCB8, 0x00000813,
+ 0xCBC, 0x001F1066,
+ 0xCC0, 0x88A00400,
+ 0xCC4, 0x00200400,
+ 0xCC8, 0x0B200400,
+ 0xCCC, 0x00600400,
+ 0xCD0, 0x00000092,
+ 0xCD4, 0x22220000,
+ 0xCD8, 0x22222222,
+ 0xCDC, 0x22222222,
+ 0xCE0, 0x22222222,
+ 0xCE4, 0x22222222,
+ 0xCE8, 0x00002222,
+ 0xCEC, 0x00000000,
+ 0xCF0, 0x00000000,
+ 0xCF4, 0x00000000,
+ 0xCF8, 0x00000000,
+ 0xD00, 0x1083A10A,
+ 0xD04, 0x0EC42948,
+ 0xD08, 0x10852108,
+ 0xD0C, 0x0CC41D08,
+ 0xD10, 0x108620EC,
+ 0xD14, 0x0CA42108,
+ 0xD18, 0x107620E8,
+ 0xD1C, 0x0E742108,
+ 0xD20, 0x0E8618C8,
+ 0xD24, 0x00000108,
+ 0xD28, 0x288C224C,
+ 0xD2C, 0x11C6320C,
+ 0xD30, 0x30CEBD98,
+ 0xD34, 0x10C31908,
+ 0xD38, 0x310A318C,
+ 0xD3C, 0x18C41D08,
+ 0xD40, 0x28CC4190,
+ 0xD44, 0x19062108,
+ 0xD48, 0x294A5A17,
+ 0xD4C, 0x00000108,
+ 0xD50, 0x10A3A908,
+ 0xD54, 0x10842148,
+ 0xD58, 0x14C5314A,
+ 0xD5C, 0x1086258C,
+ 0xD60, 0x10A42948,
+ 0xD64, 0x10842108,
+ 0xD68, 0x08C42108,
+ 0xD6C, 0x10842148,
+ 0xD70, 0x08822084,
+ 0xD74, 0x10841D04,
+ 0xD78, 0x08421088,
+ 0xD7C, 0x1083A104,
+ 0xD80, 0x10842108,
+ 0xD84, 0x1085294A,
+ 0xD88, 0x08822104,
+ 0xD8C, 0x10852948,
+ 0xD90, 0x08421084,
+ 0xD94, 0x10852104,
+ 0xD98, 0x08421084,
+ 0xD9C, 0x10863184,
+ 0xDA0, 0x1083B10A,
+ 0xDA4, 0x10842148,
+ 0xDA8, 0x1984718C,
+ 0xDAC, 0x108C33AF,
+ 0xDB0, 0x00000000,
+ 0xDB4, 0x00000000,
+ 0xDB8, 0x00000000,
+ 0xDBC, 0x00000000,
+ 0xDC0, 0x00000000,
+ 0xDC4, 0x00000000,
+ 0xDC8, 0x00000000,
+ 0xDCC, 0x00000000,
+ 0xDD0, 0x00000000,
+ 0xDD4, 0x00000000,
+ 0xDD8, 0x00000000,
+ 0xDDC, 0x00000000,
+ 0xDE0, 0x00000000,
+ 0xDE4, 0x00000000,
+ 0xDE8, 0x00000000,
+ 0xDEC, 0x00000000,
+ 0xDF0, 0x00000000,
+ 0xDF4, 0x00000000,
+ 0xDF8, 0x00000000,
+ 0x1800, 0x00033312,
+ 0x1804, 0x00033312,
+ 0x180C, 0x17F40060,
+ 0x1810, 0x62F508C4,
+ 0x1814, 0x506AA5B4,
+ 0x1818, 0x000014FF,
+ 0x181C, 0x00000000,
+ 0x1820, 0x02D508CC,
+ 0x1824, 0x506AA5B4,
+ 0x1828, 0x000004FD,
+ 0x182C, 0x00000000,
+ 0x1834, 0x00000000,
+ 0x1838, 0x20000000,
+ 0x183C, 0x00000000,
+ 0x1840, 0x00000000,
+ 0x1844, 0x00000000,
+ 0x1848, 0x00000000,
+ 0x184C, 0x00000000,
+ 0x1850, 0x00000000,
+ 0x1854, 0x00000000,
+ 0x1858, 0x00000000,
+ 0x185C, 0x00000000,
+ 0x1860, 0xF0040FF8,
+ 0x1864, 0x7F000000,
+ 0x1868, 0x00000000,
+ 0x186C, 0x0000FF00,
+ 0x1870, 0x00000000,
+ 0x1874, 0x00000000,
+ 0x1878, 0x00000000,
+ 0x187C, 0x00000000,
+ 0x1880, 0x00000000,
+ 0x1884, 0x02B00000,
+ 0x1888, 0x00000000,
+ 0x188C, 0x00000000,
+ 0x1890, 0x00000000,
+ 0x1894, 0x00000000,
+ 0x1898, 0x00000000,
+ 0x18A0, 0x00510000,
+ 0x18A4, 0x183C1F7F,
+ 0x18A8, 0x0A02C99A,
+ 0x18AC, 0x00004200,
+ 0x18B0, 0x0809FB08,
+ 0x18B0, 0x0809FB09,
+ 0x18B4, 0x00000000,
+ 0x18B8, 0x00000000,
+ 0x18BC, 0x00C3FF80,
+ 0x18C0, 0x0002D100,
+ 0x18C4, 0x00000004,
+ 0x18C8, 0x001FFFE0,
+ 0x18CC, 0x0809FB08,
+ 0x18CC, 0x0809FB09,
+ 0x18D0, 0x00000000,
+ 0x18D4, 0x00000000,
+ 0x18D8, 0x00C3FF80,
+ 0x18DC, 0x0002D100,
+ 0x18E0, 0x00000004,
+ 0x18E4, 0x001FFFE0,
+ 0x18E8, 0x00800000,
+ 0x18EC, 0x1EC08000,
+ 0x18F0, 0x7F000064,
+ 0x18F4, 0x1F7DE75C,
+ 0x18F8, 0x7F7F7F7F,
+ 0x18FC, 0x7F7F7F7F,
+ 0x1900, 0xA7A7A7A7,
+ 0x1904, 0x95959595,
+ 0x1908, 0x00777788,
+ 0x190C, 0x77776666,
+ 0x1910, 0x00033333,
+ 0x1914, 0xAAAC875A,
+ 0x1918, 0x2AA2A8A2,
+ 0x191C, 0x2AAAA8A2,
+ 0x1920, 0x00878766,
+ 0x1924, 0x000C4924,
+ 0x1928, 0x5669B6C0,
+ 0x192C, 0x00409190,
+ 0x1930, 0xB85C0492,
+ 0x1934, 0x00B4A298,
+ 0x1938, 0x00030151,
+ 0x193C, 0x0058C618,
+ 0x1940, 0x41000000,
+ 0x1944, 0x00000BCB,
+ 0x1948, 0xAAAAAAAA,
+ 0x194C, 0x00B99999,
+ 0x1950, 0x88886665,
+ 0x1954, 0x08888888,
+ 0x1958, 0x00000618,
+ 0x195C, 0x00000000,
+ 0x1960, 0x00000000,
+ 0x1964, 0x00000000,
+ 0x1968, 0x00000000,
+ 0x196C, 0x00000000,
+ 0x1970, 0x00000000,
+ 0x1974, 0x00000000,
+ 0x1978, 0x00000000,
+ 0x197C, 0x00000000,
+ 0x1980, 0x00000000,
+ 0x1984, 0x00000000,
+ 0x1988, 0x00000000,
+ 0x198C, 0x00000000,
+ 0x1990, 0x00000000,
+ 0x1994, 0x00000000,
+ 0x1998, 0x00000000,
+ 0x199C, 0x00000000,
+ 0x19A0, 0x00000000,
+ 0x19A4, 0x00000000,
+ 0x19A8, 0x00000000,
+ 0x19AC, 0x00000000,
+ 0x19B0, 0x00000000,
+ 0x19B4, 0x00000000,
+ 0x19B8, 0x00000000,
+ 0x19BC, 0x00000000,
+ 0x19C0, 0x00000000,
+ 0x19C4, 0x00000000,
+ 0x19C8, 0x00000000,
+ 0x19CC, 0x00000000,
+ 0x19D0, 0x00000000,
+ 0x19D4, 0x00000000,
+ 0x19D8, 0x00000000,
+ 0x19DC, 0x00000000,
+ 0x19E0, 0x00000000,
+ 0x19E4, 0x00000000,
+ 0x19E8, 0x00000000,
+ 0x19EC, 0x00000000,
+ 0x19F0, 0x00000000,
+ 0x19F4, 0x00000000,
+ 0x19F8, 0x00000000,
+ 0x1C00, 0x00000000,
+ 0x1C04, 0x00000000,
+ 0x1C08, 0x00000000,
+ 0x1C0C, 0x00000000,
+ 0x1C10, 0x00000000,
+ 0x1C14, 0x00000000,
+ 0x1C18, 0x00000000,
+ 0x1C1C, 0x00000000,
+ 0x1C20, 0x03C23F00,
+ 0x1C24, 0xF101F002,
+ 0x1C28, 0x0FFE0010,
+ 0x1C2C, 0x453090FF,
+ 0x1C30, 0xFE0090FE,
+ 0x1C34, 0xE4E42000,
+ 0x1C38, 0xFFA1005E,
+ 0x1C40, 0x8F588837,
+ 0x1C44, 0x04400300,
+ 0x1C48, 0x00000000,
+ 0x1C4C, 0x00000200,
+ 0x1C50, 0x8E588837,
+ 0x1C54, 0x04400300,
+ 0x1C58, 0x00000000,
+ 0x1C5C, 0xFFFFFFFF,
+ 0x1C60, 0x0F030032,
+ 0x1C64, 0x360F0000,
+ 0x1C68, 0x007F0000,
+ 0x1C6C, 0x00010000,
+ 0x1C70, 0x00037FFE,
+ 0x1C74, 0x00000000,
+ 0x1C78, 0x00020000,
+ 0x1C7C, 0x00310000,
+ 0x1C80, 0x0E38E000,
+ 0x1C84, 0x245120D4,
+ 0x1C88, 0xC8400483,
+ 0x1C8C, 0x40005A20,
+ 0x1C94, 0x00000000,
+ 0x1C98, 0x00000000,
+ 0x1C9C, 0x00000000,
+ 0x1CA0, 0x00000000,
+ 0x1CA4, 0x20000000,
+ 0x1CA8, 0x0E000000,
+ 0x1CAC, 0xE424A2CC,
+ 0x1CB0, 0x00000000,
+ 0x1CB4, 0x00000000,
+ 0x1CB8, 0x24800000,
+ 0x1CBC, 0x60004800,
+ 0x1CC0, 0x24800000,
+ 0x1CC4, 0x60004800,
+ 0x1CC8, 0xF0444900,
+ 0x1CCC, 0x030300F1,
+ 0x1CD0, 0x0F000000,
+ 0x1CD4, 0x02024B00,
+ 0x1CD8, 0x04000000,
+ 0x1CDC, 0x10000000,
+ 0x1CE0, 0x60000000,
+ 0x1CE4, 0x00000000,
+ 0x1CE8, 0xC0000000,
+ 0x1CEC, 0x00000000,
+ 0x1CF0, 0x00000000,
+ 0x1CF4, 0xE4000000,
+ 0x1CF8, 0x00000000,
+ 0x1D00, 0x00000000,
+ 0x1D04, 0x08A3C000,
+ 0x1D08, 0xA0000000,
+ 0x1D10, 0x08B5BBBB,
+ 0x1D14, 0x77777777,
+ 0x1D18, 0x99999999,
+ 0x1D1C, 0x99999999,
+ 0x1D20, 0x000081E0,
+ 0x1D24, 0x00000000,
+ 0x1D28, 0x00000000,
+ 0x1D2C, 0xC0000000,
+ 0x1D30, 0x50009C00,
+ 0x1D34, 0x00000000,
+ 0x1D38, 0x00000000,
+ 0x1D3C, 0xF8000000,
+ 0x1D40, 0x00000000,
+ 0x1D44, 0x74740000,
+ 0x1D48, 0x14147474,
+ 0x1D4C, 0x00FFFF14,
+ 0x1D50, 0x00000000,
+ 0x1D54, 0x03A00000,
+ 0x1D58, 0x80800000,
+ 0x1D5C, 0x00000000,
+ 0x1D60, 0x00000000,
+ 0x1D64, 0x88000000,
+ 0x1D68, 0x00000000,
+ 0x1D6C, 0x666D8001,
+ 0x1D70, 0x20202020,
+ 0x1D74, 0x4E4E4E4E,
+ 0x1D78, 0x18189818,
+ 0x1D7C, 0x0005A000,
+ 0x1D80, 0x00080000,
+ 0x1D84, 0x00080000,
+ 0x1D88, 0x000000EF,
+ 0x1D8C, 0x0C0C0C0C,
+ 0x1D90, 0x103F003F,
+ 0x1D94, 0x00000000,
+ 0x1D98, 0x00000000,
+ 0x1D9C, 0x00000000,
+ 0x1DA0, 0x00000000,
+ 0x1DA4, 0x00000000,
+ 0x1DA8, 0x00000000,
+ 0x1DAC, 0x00000000,
+ 0x1DB0, 0x00000000,
+ 0x1DB4, 0x00000000,
+ 0x1DB8, 0x00000000,
+ 0x1DBC, 0x00000000,
+ 0x1DC0, 0x00000000,
+ 0x1DC4, 0x00000000,
+ 0x1DC8, 0x00000000,
+ 0x1DCC, 0x00000000,
+ 0x1DD0, 0x00000000,
+ 0x1DD4, 0x00000000,
+ 0x1DD8, 0x00000000,
+ 0x1DDC, 0x1FDF0000,
+ 0x1DE0, 0x01010000,
+ 0x1DE4, 0x05210123,
+ 0x1DE8, 0xFFFF4848,
+ 0x1DEC, 0x00000000,
+ 0x1DF0, 0x00000000,
+ 0x1DF4, 0x80000002,
+ 0x1DF8, 0x00000000,
+ 0x1E00, 0x00000000,
+ 0x1E04, 0x00000000,
+ 0x1E08, 0x00000000,
+ 0x1E0C, 0x00000000,
+ 0x1E10, 0x00000000,
+ 0x1E14, 0x00000000,
+ 0x1E18, 0x00000000,
+ 0x1E1C, 0x00000000,
+ 0x1E20, 0x00000000,
+ 0x1E24, 0x80003000,
+ 0x1E28, 0x000CC0C3,
+ 0x1E2C, 0xE4E40404,
+ 0x1E30, 0xE4E4E4E4,
+ 0x1E34, 0xF3001234,
+ 0x1E38, 0x00000000,
+ 0x1E3C, 0x00000000,
+ 0x1E40, 0x00000000,
+ 0x1E44, 0x00000000,
+ 0x1E48, 0x00000000,
+ 0x1E4C, 0x00000000,
+ 0x1E50, 0x00000000,
+ 0x1E54, 0x00000000,
+ 0x1E58, 0x00000000,
+ 0x1E5C, 0xC1000000,
+ 0x1E60, 0x00000000,
+ 0x1E64, 0xF3A00001,
+ 0x1E68, 0x0028846E,
+ 0x1E6C, 0x40274906,
+ 0x1E70, 0x00001000,
+ 0x1E74, 0x00000000,
+ 0x1E78, 0x00000000,
+ 0x1E7C, 0x00000000,
+ 0x1E80, 0x00000000,
+ 0x1E84, 0x00000000,
+ 0x1E84, 0x40000000,
+ 0x1E84, 0x41000000,
+ 0x1E84, 0x42000000,
+ 0x1E84, 0x43000000,
+ 0x1E84, 0x44000000,
+ 0x1E84, 0x45000000,
+ 0x1E84, 0x46000000,
+ 0x1E84, 0x47000000,
+ 0x1E84, 0x48000000,
+ 0x1E84, 0x49000000,
+ 0x1E84, 0x4A000000,
+ 0x1E84, 0x4B000000,
+ 0x1E84, 0x4C000000,
+ 0x1E84, 0x4D000000,
+ 0x1E84, 0x4E000000,
+ 0x1E84, 0x4F000000,
+ 0x1E84, 0x50000000,
+ 0x1E84, 0x51000000,
+ 0x1E84, 0x52000000,
+ 0x1E84, 0x53000000,
+ 0x1E84, 0x54000000,
+ 0x1E84, 0x55000000,
+ 0x1E84, 0x56000000,
+ 0x1E84, 0x57000000,
+ 0x1E84, 0x58000000,
+ 0x1E84, 0x59000000,
+ 0x1E84, 0x5A000000,
+ 0x1E84, 0x5B000000,
+ 0x1E84, 0x5C000000,
+ 0x1E84, 0x5D000000,
+ 0x1E84, 0x5E000000,
+ 0x1E84, 0x5F000000,
+ 0x1E84, 0x60000000,
+ 0x1E84, 0x61000000,
+ 0x1E84, 0x62000000,
+ 0x1E84, 0x63000000,
+ 0x1E84, 0x64000000,
+ 0x1E84, 0x65000000,
+ 0x1E84, 0x66000000,
+ 0x1E84, 0x67000000,
+ 0x1E84, 0x68000000,
+ 0x1E84, 0x69000000,
+ 0x1E84, 0x6A000000,
+ 0x1E84, 0x6B000000,
+ 0x1E84, 0x6C000000,
+ 0x1E84, 0x6D000000,
+ 0x1E84, 0x6E000000,
+ 0x1E84, 0x6F000000,
+ 0x1E84, 0x70000000,
+ 0x1E84, 0x71000000,
+ 0x1E84, 0x72000000,
+ 0x1E84, 0x73000000,
+ 0x1E84, 0x74000000,
+ 0x1E84, 0x75000000,
+ 0x1E84, 0x76000000,
+ 0x1E84, 0x77000000,
+ 0x1E84, 0x78000000,
+ 0x1E84, 0x79000000,
+ 0x1E84, 0x7A000000,
+ 0x1E84, 0x7B000000,
+ 0x1E84, 0x7C000000,
+ 0x1E84, 0x7D000000,
+ 0x1E84, 0x7E000000,
+ 0x1E84, 0x7F000000,
+ 0x1E84, 0x80000000,
+ 0x1E84, 0x00000000,
+ 0x1E88, 0x0200FC1C,
+ 0x1E8C, 0x00000000,
+ 0x1E90, 0x00000000,
+ 0x1E94, 0x04000000,
+ 0x1E98, 0x00000000,
+ 0x1E9C, 0x00000000,
+ 0x1EA0, 0x00000000,
+ 0x1EA4, 0x00000000,
+ 0x1EA8, 0xAA464646,
+ 0x1EAC, 0x01800030,
+ 0x1EB0, 0x00003002,
+ 0x1EB4, 0x31800002,
+ 0x1EB8, 0x00000000,
+ 0x1EBC, 0x00000000,
+ 0x1EC0, 0x00000000,
+ 0x1EC4, 0x00000000,
+ 0x1EC8, 0x00000000,
+ 0x1ECC, 0x00000000,
+ 0x1ED0, 0x00000000,
+ 0x1ED4, 0x8000000A,
+ 0x1ED8, 0x800B03E8,
+ 0x1EDC, 0x83E90FFF,
+ 0x1EE0, 0x8000FFFF,
+ 0x1EE4, 0x70000000,
+ 0x1EE8, 0x00000000,
+ 0x1EEC, 0x0280A933,
+ 0x1EF0, 0x00000A80,
+ 0x1EF4, 0x00001266,
+ 0x1EF8, 0x01000100,
+ 0x3A00, 0x0004080C,
+ 0x3A04, 0x1C202428,
+ 0x3A08, 0x0C101418,
+ 0x3A0C, 0x181C2024,
+ 0x3A10, 0x080C1014,
+ 0x3A14, 0x181C2024,
+ 0x3A18, 0x080C1014,
+ 0x3A1C, 0x00000000,
+ 0x3A20, 0x00000000,
+ 0x3A24, 0x00000000,
+ 0x3A28, 0x00000000,
+ 0x3A2C, 0x181C2024,
+ 0x3A30, 0x080C1014,
+ 0x3A34, 0x20240004,
+ 0x3A38, 0x1014181C,
+ 0x3A3C, 0x0004080C,
+ 0x3A40, 0x00000000,
+ 0x3A44, 0x00000000,
+ 0x3A48, 0x00000000,
+ 0x3A4C, 0x00000000,
+ 0x3A50, 0x00000000,
+ 0x3A54, 0x00000000,
+ 0x3A58, 0x00000000,
+ 0x3A5C, 0x00000000,
+ 0x3A60, 0x00000000,
+ 0x3A64, 0x00000000,
+ 0x3A68, 0x00000000,
+ 0x3A6C, 0x00000000,
+ 0x3A70, 0x00000000,
+ 0x3A74, 0x00000000,
+ 0x3A78, 0x00000000,
+ 0x3A7C, 0x00000000,
+ 0x3A80, 0x00000000,
+ 0x3A84, 0x00000000,
+ 0x3A88, 0x00000000,
+ 0x3A8C, 0x00000000,
+ 0x3A90, 0x00000000,
+ 0x3A94, 0x00000000,
+ 0x3A98, 0x00000000,
+ 0x3A9C, 0x00000000,
+ 0x3AA0, 0x00000000,
+ 0x3AA4, 0x00000000,
+ 0x4000, 0xA6A6A6A6,
+ 0x4004, 0x95959595,
+ 0x4008, 0x00777777,
+ 0x400C, 0x77776666,
+ 0x4010, 0x00033333,
+ 0x4014, 0xAAAC875A,
+ 0x4018, 0x2AA2A8A2,
+ 0x401C, 0x2AAAA8A2,
+ 0x4020, 0x00878766,
+ 0x4024, 0x000C4924,
+ 0x4028, 0x5669B6C0,
+ 0x402C, 0x00409190,
+ 0x4030, 0xB85C0492,
+ 0x4034, 0x00B4A298,
+ 0x4038, 0x00030151,
+ 0x403C, 0x0058C618,
+ 0x4040, 0x41000000,
+ 0x4044, 0x00000BCB,
+ 0x4048, 0xAAAAAAAA,
+ 0x404C, 0x00B98989,
+ 0x4050, 0x88886665,
+ 0x4054, 0x08888888,
+ 0x4058, 0x00000618,
+ 0x405C, 0x00000000,
+ 0x4060, 0x00000000,
+ 0x4064, 0x00000000,
+ 0x4068, 0x00000000,
+ 0x406C, 0x00000000,
+ 0x4070, 0x00000000,
+ 0x4074, 0x00000000,
+ 0x4078, 0x00000000,
+ 0x407C, 0x00000000,
+ 0x4080, 0x00000000,
+ 0x4084, 0x00000000,
+ 0x4088, 0x00000000,
+ 0x408C, 0x00000000,
+ 0x4090, 0x00000000,
+ 0x4094, 0x00000000,
+ 0x4098, 0x00000000,
+ 0x409C, 0x00000000,
+ 0x40A0, 0x00000000,
+ 0x40A4, 0x00000000,
+ 0x40A8, 0x00000000,
+ 0x40AC, 0x00000000,
+ 0x40B0, 0x00000000,
+ 0x40B4, 0x00000000,
+ 0x40B8, 0x00000000,
+ 0x40BC, 0x00000000,
+ 0x40C0, 0x00000000,
+ 0x40C4, 0x00000000,
+ 0x40C8, 0x00000000,
+ 0x40CC, 0x00000000,
+ 0x40D0, 0x00000000,
+ 0x40D4, 0x00000000,
+ 0x40D8, 0x00000000,
+ 0x40DC, 0x00000000,
+ 0x40E0, 0x00000000,
+ 0x40E4, 0x00000000,
+ 0x40E8, 0x00000000,
+ 0x40EC, 0x00000000,
+ 0x40F0, 0x00000000,
+ 0x40F4, 0x00000000,
+ 0x40F8, 0x00000000,
+ 0x4100, 0x00033312,
+ 0x4104, 0x00033312,
+ 0x410C, 0x17F40060,
+ 0x4110, 0x62D508C4,
+ 0x4114, 0x506AA5B4,
+ 0x4118, 0x000014FF,
+ 0x411C, 0x00000000,
+ 0x4120, 0x02D508CC,
+ 0x4124, 0x506AA5B4,
+ 0x4128, 0x000004FD,
+ 0x412C, 0x00000000,
+ 0x4134, 0x00000000,
+ 0x4138, 0x20000000,
+ 0x413C, 0x00000000,
+ 0x4140, 0x00000000,
+ 0x4144, 0x00000000,
+ 0x4148, 0x00000000,
+ 0x414C, 0x00000000,
+ 0x4150, 0x00000000,
+ 0x4154, 0x00000000,
+ 0x4158, 0x00000000,
+ 0x415C, 0x00000000,
+ 0x4160, 0xF0040FF8,
+ 0x4164, 0x7F000000,
+ 0x4168, 0x00000000,
+ 0x416C, 0x00008000,
+ 0x4170, 0x00000000,
+ 0x4174, 0x00000000,
+ 0x4178, 0x00000000,
+ 0x417C, 0x00000000,
+ 0x4180, 0x00000000,
+ 0x4184, 0x02B00000,
+ 0x4188, 0x00000000,
+ 0x418C, 0x00000000,
+ 0x4190, 0x00000000,
+ 0x4194, 0x00000000,
+ 0x4198, 0x00000000,
+ 0x41A0, 0x00510000,
+ 0x41A4, 0x183C1F7F,
+ 0x41A8, 0x1402C99A,
+ 0x41AC, 0x00004200,
+ 0x41B0, 0x0809FB08,
+ 0x41B0, 0x0809FB09,
+ 0x41B4, 0x00000000,
+ 0x41B8, 0x00000000,
+ 0x41BC, 0x00C3FF80,
+ 0x41C0, 0x0002D100,
+ 0x41C4, 0x00000004,
+ 0x41C8, 0x001FFFE0,
+ 0x41CC, 0x0809FB08,
+ 0x41CC, 0x0809FB09,
+ 0x41D0, 0x00000000,
+ 0x41D4, 0x00000000,
+ 0x41D8, 0x00C3FF80,
+ 0x41DC, 0x0002D100,
+ 0x41E0, 0x00000004,
+ 0x41E4, 0x001FFFE0,
+ 0x41E8, 0x00000200,
+ 0x41EC, 0x1E008000,
+ 0x41F0, 0x7F000064,
+ 0x41F4, 0x1F7DE75C,
+ 0x41F8, 0x7F7F7F7F,
+ 0x41FC, 0x7F7F7F7F,
+ 0x1830, 0x700B8001,
+ 0x1830, 0x700B8001,
+ 0x1830, 0x70144001,
+ 0x1830, 0x70244001,
+ 0x1830, 0x70344001,
+ 0x1830, 0x70444001,
+ 0x1830, 0x705B8001,
+ 0x1830, 0x70644001,
+ 0x1830, 0x707B8001,
+ 0x1830, 0x708B8001,
+ 0x1830, 0x709B8001,
+ 0x1830, 0x70AB8001,
+ 0x1830, 0x70BB8001,
+ 0x1830, 0x70CB8001,
+ 0x1830, 0x70DB8001,
+ 0x1830, 0x70EB8001,
+ 0x1830, 0x70FB8001,
+ 0x1830, 0x70FB8001,
+ 0x4130, 0x700B8001,
+ 0x4130, 0x700B8001,
+ 0x4130, 0x70144001,
+ 0x4130, 0x70244001,
+ 0x4130, 0x70344001,
+ 0x4130, 0x70444001,
+ 0x4130, 0x705B8001,
+ 0x4130, 0x70644001,
+ 0x4130, 0x707B8001,
+ 0x4130, 0x708B8001,
+ 0x4130, 0x709B8001,
+ 0x4130, 0x70AB8001,
+ 0x4130, 0x70BB8001,
+ 0x4130, 0x70CB8001,
+ 0x4130, 0x70DB8001,
+ 0x4130, 0x70EB8001,
+ 0x4130, 0x70FB8001,
+ 0x4130, 0x70FB8001,
+ 0x1A00, 0x00D047C8,
+ 0x1A04, 0xC0000008,
+ 0x1A08, 0x88838300,
+ 0x1A0C, 0x2E20100F,
+ 0x1A10, 0x9500BB78,
+ 0x1A14, 0x111440A8,
+ 0x1A18, 0x00881117,
+ 0x1A1C, 0x89140F00,
+ 0x1A20, 0x52840000,
+ 0x1A24, 0x3E18FEC8,
+ 0x1A28, 0x00150A88,
+ 0x1A2C, 0x12988000,
+ 0x1A30, 0x10114007,
+ 0x1A34, 0x1011C007,
+ 0x1A38, 0x00000000,
+ 0x1A3C, 0x00000000,
+ 0x1A40, 0x00000000,
+ 0x1A44, 0x00000000,
+ 0x1A48, 0x000C0000,
+ 0x1A4C, 0xB00000C0,
+ 0x1A50, 0x22040700,
+ 0x1A54, 0x09003000,
+ 0x1A58, 0x00000881,
+ 0x1A5C, 0x00000128,
+ 0x1A60, 0x85830000,
+ 0x1A64, 0x00000128,
+ 0x1A68, 0x00222211,
+ 0x1A6C, 0x00000000,
+ 0x1A70, 0x00008000,
+ 0x1A74, 0x00000048,
+ 0x1A78, 0x000089F0,
+ 0x1A7C, 0x225B0606,
+ 0x1A80, 0x208A7532,
+ 0x1A84, 0x85200200,
+ 0x1A88, 0x048C0000,
+ 0x1A8C, 0x00000000,
+ 0x1A90, 0x00000000,
+ 0x1A94, 0x00000000,
+ 0x1A98, 0xACC4C040,
+ 0x1A9C, 0x0016C8B2,
+ 0x1AA0, 0x00FAF0DE,
+ 0x1AA4, 0x00020000,
+ 0x1AA8, 0xBA0F0004,
+ 0x1AAC, 0x00122344,
+ 0x1AB0, 0x0FFFFFFF,
+ 0x1AB4, 0x0F201402,
+ 0x1AB8, 0x00000000,
+ 0x1ABC, 0xC2008080,
+ 0x1AC0, 0x54D0A742,
+ 0x1AC4, 0x00000000,
+ 0x1AC8, 0x00000807,
+ 0x1ACC, 0x00000707,
+ 0x1AD0, 0xA33529AD,
+ 0x1AD4, 0x0D8D8452,
+ 0x1AD8, 0x08024024,
+ 0x1ADC, 0x000DB001,
+ 0x1AE0, 0x00600391,
+ 0x1AE4, 0x08000080,
+ 0x1AE8, 0x00000002,
+ 0x1AEC, 0x00000000,
+ 0x1AF0, 0x00000000,
+ 0x1AF4, 0x00000000,
+ 0x1AF8, 0x00000000,
+ 0x1AFC, 0x00000000,
+ 0x1D0C, 0x00400000,
+ 0x1D0C, 0x00410000,
+ 0x1EE8, 0x00000003,
+ 0xC0C, 0x02F1D8BF,
+ 0x1D94, 0x40000000,
+ 0x1D94, 0x40010000,
+ 0x1D94, 0x40020000,
+ 0x1D94, 0x40030000,
+ 0x1D94, 0x40040000,
+ 0x1D94, 0x40050000,
+ 0x1D94, 0x40060000,
+ 0x1D94, 0x40070000,
+ 0x1D94, 0x40080000,
+ 0x1D94, 0x40090000,
+ 0x1D94, 0x400A0000,
+ 0x1D94, 0x400B0000,
+ 0x1D94, 0x400C0000,
+ 0x1D94, 0x400D0000,
+ 0x1D94, 0x400E0000,
+ 0x1D94, 0x400F0000,
+ 0x1D94, 0x40100000,
+ 0x1D94, 0x40110000,
+ 0x1D94, 0x40120000,
+ 0x1D94, 0x40130000,
+ 0x1D94, 0x40140000,
+ 0x1D94, 0x40150000,
+ 0x1D94, 0x40160000,
+ 0x1D94, 0x40170000,
+ 0x1D94, 0x40180000,
+ 0x1D94, 0x40190000,
+ 0x1D94, 0x401A0000,
+ 0x1D94, 0x401B0000,
+ 0x1D94, 0x401C0000,
+ 0x1D94, 0x401D0000,
+ 0x1D94, 0x401E0000,
+ 0x1D94, 0x401F0000,
+ 0x1D94, 0x40200000,
+ 0x1D94, 0x40210000,
+ 0x1D94, 0x40220000,
+ 0x1D94, 0x40230000,
+ 0x1D94, 0x40240000,
+ 0x1D94, 0x40250000,
+ 0x1D94, 0x40260000,
+ 0x1D94, 0x40270000,
+ 0x1D94, 0x40280000,
+ 0x1D94, 0x40290000,
+ 0x1D94, 0x402A0000,
+ 0x1D94, 0x402B0000,
+ 0x1D94, 0x402C0000,
+ 0x1D94, 0x402D0000,
+ 0x1D94, 0x402E0000,
+ 0x1D94, 0x402F0000,
+ 0x1D94, 0x40300000,
+ 0x1D94, 0x40310000,
+ 0x1D94, 0x40320000,
+ 0x1D94, 0x40330000,
+ 0x1D94, 0x40340000,
+ 0x1D94, 0x40350000,
+ 0x1D94, 0x40360000,
+ 0x1D94, 0x40370000,
+ 0x1D94, 0x40380000,
+ 0x1D94, 0x40390000,
+ 0x1D94, 0x403A0000,
+ 0x1D94, 0x403B0000,
+ 0x1D94, 0x403C0000,
+ 0x1D94, 0x403D0000,
+ 0x1D94, 0x403E0000,
+ 0x1D94, 0x403F0000,
+ 0x1D94, 0x40400000,
+ 0x1D94, 0x40410000,
+ 0x1D94, 0x40420000,
+ 0x1D94, 0x40430000,
+ 0x1D94, 0x40440000,
+ 0x1D94, 0x40450000,
+ 0x1D94, 0x40460000,
+ 0x1D94, 0x40470000,
+ 0x1D94, 0x40480000,
+ 0x1D94, 0x40490000,
+ 0x1D94, 0x404A0000,
+ 0x1D94, 0x404B0000,
+ 0x1D94, 0x404C0000,
+ 0x1D94, 0x404D0000,
+ 0x1D94, 0x404E0000,
+ 0x1D94, 0x404F0000,
+ 0x1D94, 0x40500000,
+ 0x1D94, 0x40510000,
+ 0x1D94, 0x40520000,
+ 0x1D94, 0x40530000,
+ 0x1D94, 0x40540000,
+ 0x1D94, 0x40550000,
+ 0x1D94, 0x40560000,
+ 0x1D94, 0x40570000,
+ 0x1D94, 0x40580000,
+ 0x1D94, 0x40590000,
+ 0x1D94, 0x405A0000,
+ 0x1D94, 0x405B0000,
+ 0x1D94, 0x405C0000,
+ 0x1D94, 0x405D0000,
+ 0x1D94, 0x405E0000,
+ 0x1D94, 0x405F0000,
+ 0x1D94, 0x40600000,
+ 0x1D94, 0x40610000,
+ 0x1D94, 0x40620000,
+ 0x1D94, 0x40630000,
+ 0x1D94, 0x40640000,
+ 0x1D94, 0x40650000,
+ 0x1D94, 0x40660000,
+ 0x1D94, 0x40670000,
+ 0x1D94, 0x40680000,
+ 0x1D94, 0x40690000,
+ 0x1D94, 0x406A0000,
+ 0x1D94, 0x406B0000,
+ 0x1D94, 0x406C0000,
+ 0x1D94, 0x406D0000,
+ 0x1D94, 0x406E0000,
+ 0x1D94, 0x406F0000,
+ 0x1D94, 0x40700000,
+ 0x1D94, 0x40710000,
+ 0x1D94, 0x40720000,
+ 0x1D94, 0x40730000,
+ 0x1D94, 0x40740000,
+ 0x1D94, 0x40750000,
+ 0x1D94, 0x40760000,
+ 0x1D94, 0x40770000,
+ 0x1D94, 0x40780000,
+ 0x1D94, 0x40790000,
+ 0x1D94, 0x407A0000,
+ 0x1D94, 0x407B0000,
+ 0x1D94, 0x407C0000,
+ 0x1D94, 0x407D0000,
+ 0x1D94, 0x407E0000,
+ 0x1D94, 0x407F0000,
+ 0x1D94, 0x40800000,
+ 0x1D94, 0x40810000,
+ 0x1D94, 0x40820000,
+ 0x1D94, 0x40830000,
+ 0x1D94, 0x40840000,
+ 0x1D94, 0x40850000,
+ 0x1D94, 0x40860000,
+ 0x1D94, 0x40870000,
+ 0x1D94, 0x40880000,
+ 0x1D94, 0x40890000,
+ 0x1D94, 0x408A0000,
+ 0x1D94, 0x408B0000,
+ 0x1D94, 0x408C0000,
+ 0x1D94, 0x408D0000,
+ 0x1D94, 0x408E0000,
+ 0x1D94, 0x408F0000,
+ 0x1D94, 0x40900000,
+ 0x1D94, 0x40910000,
+ 0x1D94, 0x40920000,
+ 0x1D94, 0x40930000,
+ 0x1D94, 0x40940000,
+ 0x1D94, 0x40950000,
+ 0x1D94, 0x40960000,
+ 0x1D94, 0x40970000,
+ 0x1D94, 0x40980000,
+ 0x1D94, 0x40990000,
+ 0x1D94, 0x409A0000,
+ 0x1D94, 0x409B0000,
+ 0x1D94, 0x409C0000,
+ 0x1D94, 0x409D0000,
+ 0x1D94, 0x409E0000,
+ 0x1D94, 0x409F0000,
+ 0x1D94, 0x40A00000,
+ 0x1D94, 0x40A10000,
+ 0x1D94, 0x40A20000,
+ 0x1D94, 0x40A30000,
+ 0x1D94, 0x40A40000,
+ 0x1D94, 0x40A50000,
+ 0x1D94, 0x40A60000,
+ 0x1D94, 0x40A70000,
+ 0x1D94, 0x40A80000,
+ 0x1D94, 0x40A90000,
+ 0x1D94, 0x40AA0000,
+ 0x1D94, 0x40AB0000,
+ 0x1D94, 0x40AC0000,
+ 0x1D94, 0x40AD0000,
+ 0x1D94, 0x40AE0000,
+ 0x1D94, 0x40AF0000,
+ 0x1D94, 0x40B00000,
+ 0x1D94, 0x40B10000,
+ 0x1D94, 0x40B20000,
+ 0x1D94, 0x40B30000,
+ 0x1D94, 0x40B40000,
+ 0x1D94, 0x40B50000,
+ 0x1D94, 0x40B60000,
+ 0x1D94, 0x40B70000,
+ 0x1D94, 0x40B80000,
+ 0x1D94, 0x40B90000,
+ 0x1D94, 0x40BA0000,
+ 0x1D94, 0x40BB0000,
+ 0x1D94, 0x40BC0000,
+ 0x1D94, 0x40BD0000,
+ 0x1D94, 0x40BE0000,
+ 0x1D94, 0x40BF0000,
+ 0x1D94, 0x40C00000,
+ 0x1D94, 0x40C10000,
+ 0x1D94, 0x40C20000,
+ 0x1D94, 0x40C30000,
+ 0x1D94, 0x40C40000,
+ 0x1D94, 0x40C50000,
+ 0x1D94, 0x40C60000,
+ 0x1D94, 0x40C70000,
+ 0x1D94, 0x40C80000,
+ 0x1D94, 0x40C90000,
+ 0x1D94, 0x40CA0000,
+ 0x1D94, 0x40CB0000,
+ 0x1D94, 0x40CC0000,
+ 0x1D94, 0x40CD0000,
+ 0x1D94, 0x40CE0000,
+ 0x1D94, 0x40CF0000,
+ 0x1D94, 0x40D00000,
+ 0x1D94, 0x40D10000,
+ 0x1D94, 0x40D20000,
+ 0x1D94, 0x40D30000,
+ 0x1D94, 0x40D40000,
+ 0x1D94, 0x40D50000,
+ 0x1D94, 0x40D60000,
+ 0x1D94, 0x40D70000,
+ 0x1D94, 0x40D80000,
+ 0x1D94, 0x40D90000,
+ 0x1D94, 0x40DA0000,
+ 0x1D94, 0x40DB0000,
+ 0x1D94, 0x40DC0000,
+ 0x1D94, 0x40DD0000,
+ 0x1D94, 0x40DE0000,
+ 0x1D94, 0x40DF0000,
+ 0x1D94, 0x40E00000,
+ 0x1D94, 0x40E10000,
+ 0x1D94, 0x40E20000,
+ 0x1D94, 0x40E30000,
+ 0x1D94, 0x40E40000,
+ 0x1D94, 0x40E50000,
+ 0x1D94, 0x40E60000,
+ 0x1D94, 0x40E70000,
+ 0x1D94, 0x40E80000,
+ 0x1D94, 0x40E90000,
+ 0x1D94, 0x40EA0000,
+ 0x1D94, 0x40EB0000,
+ 0x1D94, 0x40EC0000,
+ 0x1D94, 0x40ED0000,
+ 0x1D94, 0x40EE0000,
+ 0x1D94, 0x40EF0000,
+ 0x1D94, 0x40F00000,
+ 0x1D94, 0x40F10000,
+ 0x1D94, 0x40F20000,
+ 0x1D94, 0x40F30000,
+ 0x1D94, 0x40F40000,
+ 0x1D94, 0x40F50000,
+ 0x1D94, 0x40F60000,
+ 0x1D94, 0x40F70000,
+ 0x1D94, 0x40F80000,
+ 0x1D94, 0x40F90000,
+ 0x1D94, 0x40FA0000,
+ 0x1D94, 0x40FB0000,
+ 0x1D94, 0x40FC0000,
+ 0x1D94, 0x40FD0000,
+ 0x1D94, 0x40FE0000,
+ 0x1D94, 0x40FF0000,
+ 0xC0C, 0x02F1D8B7,
+ 0x1EE8, 0x00000000,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822c_bb, rtw_phy_cfg_bb);
+
+static const u32 rtw8822c_bb_pg_type0[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
+ 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
+ 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
+ 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
+ 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
+ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
+ 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054,
+ 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
+ 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
+ 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
+ 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
+ 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
+ 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
+ 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
+ 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
+ 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
+ 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
+ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
+ 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
+ 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
+ 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
+ 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
+ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
+ 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
+ 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
+ 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
+ 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
+ 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
+ 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
+ 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
+ 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
+ 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
+ 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
+ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8822c_bb_pg_type0);
+
+static const u32 rtw8822c_rf_a[] = {
+ 0x000, 0x00030000,
+ 0x018, 0x00013124,
+ 0x093, 0x0008483F,
+ 0x0DE, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000B9140,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000B9140,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0xA0000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0xB0000000, 0x00000000,
+ 0x081, 0x0000FC01,
+ 0x081, 0x0002FC01,
+ 0x081, 0x0003FC01,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000013,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000033,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000043,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000053,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x08A, 0x000E7DE3,
+ 0x08B, 0x0008FE00,
+ 0x0EE, 0x00000008,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000023,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000023,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00004000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000000F,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x01B, 0x00003A40,
+ 0x061, 0x0000D233,
+ 0x062, 0x0004D232,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x00000C02,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000233,
+ 0x030, 0x00001233,
+ 0x030, 0x00002233,
+ 0x030, 0x00003233,
+ 0x030, 0x00004203,
+ 0x030, 0x00005233,
+ 0x030, 0x00006233,
+ 0x030, 0x00007233,
+ 0x030, 0x00008203,
+ 0x030, 0x00009233,
+ 0x030, 0x0000A233,
+ 0x030, 0x0000B233,
+ 0x030, 0x0000C233,
+ 0x030, 0x0000D233,
+ 0x030, 0x0000E203,
+ 0x030, 0x0000F233,
+ 0x030, 0x00010233,
+ 0x030, 0x00011233,
+ 0x030, 0x00012203,
+ 0x030, 0x00013233,
+ 0x030, 0x00014233,
+ 0x030, 0x00015233,
+ 0x030, 0x00016203,
+ 0x030, 0x00017233,
+ 0x030, 0x00018203,
+ 0x030, 0x00019233,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000232,
+ 0x030, 0x00001232,
+ 0x030, 0x00002232,
+ 0x030, 0x00003232,
+ 0x030, 0x00004232,
+ 0x030, 0x00005232,
+ 0x030, 0x00006232,
+ 0x030, 0x00007232,
+ 0x030, 0x00008232,
+ 0x030, 0x00009232,
+ 0x030, 0x0000A232,
+ 0x030, 0x0000B232,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000770,
+ 0x030, 0x00001770,
+ 0x030, 0x00002440,
+ 0x030, 0x00003440,
+ 0x030, 0x00004330,
+ 0x030, 0x00005330,
+ 0x030, 0x00008770,
+ 0x030, 0x0000A440,
+ 0x030, 0x0000C330,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x051, 0x0003C800,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0xA0000000, 0x00000000,
+ 0x052, 0x000942CA,
+ 0xB0000000, 0x00000000,
+ 0x053, 0x000090F9,
+ 0x054, 0x00088000,
+ 0x057, 0x0004C80A,
+ 0x0EF, 0x00000020,
+ 0x033, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000010,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000018,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000028,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0xB0000000, 0x00000000,
+ 0x0EE, 0x00000000,
+ 0x05C, 0x000FCC00,
+ 0x067, 0x0000A505,
+ 0x0D3, 0x00000542,
+ 0x043, 0x00005000,
+ 0x07F, 0x00000000,
+ 0x0B0, 0x0001F0FC,
+ 0x0B1, 0x0007DBE4,
+ 0x0B2, 0x00022400,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0xA0000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0xB0000000, 0x00000000,
+ 0x0B4, 0x00099D40,
+ 0x0B5, 0x0004103F,
+ 0x0B6, 0x000187F8,
+ 0x0B7, 0x00030018,
+ 0x0BC, 0x00000008,
+ 0x0D3, 0x00000542,
+ 0x0DD, 0x00000500,
+ 0x0BB, 0x00040010,
+ 0x0B0, 0x0001F0FA,
+ 0x0FE, 0x00000000,
+ 0x0CA, 0x00080000,
+ 0x0CA, 0x00080001,
+ 0x0FE, 0x00000000,
+ 0x0B0, 0x0001F0F8,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0xA0000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001B124,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0xA0000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x0CC, 0x0000F000,
+ 0x0CD, 0x00089600,
+ 0x018, 0x00013108,
+ 0x0FE, 0x00000000,
+ 0x0B8, 0x000C0440,
+ 0x0BA, 0x000E840D,
+ 0x0FE, 0x00000000,
+ 0x018, 0x00013124,
+ 0x059, 0x000A0000,
+ 0x05A, 0x00060000,
+ 0x05B, 0x00014000,
+ 0x0ED, 0x00000008,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000000F,
+ 0x0ED, 0x00000000,
+ 0x0EE, 0x00000002,
+ 0x033, 0x00000017,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000018,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000000,
+ 0x033, 0x0000001A,
+ 0x03F, 0x0000003F,
+ 0x033, 0x0000001B,
+ 0x03F, 0x0000003F,
+ 0x033, 0x0000001C,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0x0ED, 0x00000200,
+ 0x033, 0x00000000,
+ 0x03F, 0x000F45A4,
+ 0x033, 0x00000001,
+ 0x03F, 0x000F49A4,
+ 0x033, 0x00000002,
+ 0x03F, 0x000F49A4,
+ 0x033, 0x00000003,
+ 0x03F, 0x000F69A4,
+ 0x033, 0x00000004,
+ 0x03F, 0x000F69A4,
+ 0x033, 0x00000005,
+ 0x03F, 0x000F69A4,
+ 0x033, 0x00000006,
+ 0x03F, 0x000F6DA4,
+ 0x033, 0x00000007,
+ 0x03F, 0x000F6DA4,
+ 0x033, 0x00000008,
+ 0x03F, 0x000F6DA4,
+ 0x033, 0x00000009,
+ 0x03F, 0x000F8DA4,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000F8DA4,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000F8DA4,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000F91A4,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000F91A4,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000F91A4,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000FB1A4,
+ 0x033, 0x00000010,
+ 0x03F, 0x000FB1A4,
+ 0x033, 0x00000011,
+ 0x03F, 0x000FB1A4,
+ 0x033, 0x00000012,
+ 0x03F, 0x000FB5A4,
+ 0x033, 0x00000013,
+ 0x03F, 0x000FB5A4,
+ 0x033, 0x00000014,
+ 0x03F, 0x000FD9A4,
+ 0x033, 0x00000015,
+ 0x03F, 0x000FD9A4,
+ 0x033, 0x00000016,
+ 0x03F, 0x000FF9A4,
+ 0x033, 0x00000017,
+ 0x03F, 0x000FF9A4,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FFDA4,
+ 0x033, 0x00000019,
+ 0x03F, 0x000FFDA4,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000FFDA4,
+ 0x0ED, 0x00000000,
+ 0x092, 0x00084800,
+ 0x092, 0x00084801,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x092, 0x00084800,
+ 0x08F, 0x0000182C,
+ 0x088, 0x0004326B,
+ 0x019, 0x00000005,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_a, A);
+
+static const u32 rtw8822c_rf_b[] = {
+ 0x000, 0x00030000,
+ 0x018, 0x00013124,
+ 0x093, 0x0008483F,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000001,
+ 0x03F, 0x00091020,
+ 0x0EF, 0x00000000,
+ 0x0DE, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000B9140,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000B9140,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0xA0000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0xB0000000, 0x00000000,
+ 0x081, 0x0000FC01,
+ 0x081, 0x0002FC01,
+ 0x081, 0x0003FC01,
+ 0x085, 0x0006A06C,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000013,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000033,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000043,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000053,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x08A, 0x000E7DE3,
+ 0x08B, 0x0008FE00,
+ 0x0EE, 0x00000008,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000023,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000023,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00004000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000000F,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x01B, 0x00003A40,
+ 0x061, 0x0000D233,
+ 0x062, 0x0004D232,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x00000C02,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000233,
+ 0x030, 0x00001233,
+ 0x030, 0x00002233,
+ 0x030, 0x00003233,
+ 0x030, 0x00004203,
+ 0x030, 0x00005233,
+ 0x030, 0x00006233,
+ 0x030, 0x00007233,
+ 0x030, 0x00008203,
+ 0x030, 0x00009233,
+ 0x030, 0x0000A233,
+ 0x030, 0x0000B233,
+ 0x030, 0x0000C233,
+ 0x030, 0x0000D233,
+ 0x030, 0x0000E203,
+ 0x030, 0x0000F233,
+ 0x030, 0x00010233,
+ 0x030, 0x00011233,
+ 0x030, 0x00012203,
+ 0x030, 0x00013233,
+ 0x030, 0x00014233,
+ 0x030, 0x00015233,
+ 0x030, 0x00016203,
+ 0x030, 0x00017233,
+ 0x030, 0x00018203,
+ 0x030, 0x00019233,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000232,
+ 0x030, 0x00001232,
+ 0x030, 0x00002232,
+ 0x030, 0x00003232,
+ 0x030, 0x00004232,
+ 0x030, 0x00005232,
+ 0x030, 0x00006232,
+ 0x030, 0x00007232,
+ 0x030, 0x00008232,
+ 0x030, 0x00009232,
+ 0x030, 0x0000A232,
+ 0x030, 0x0000B232,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000770,
+ 0x030, 0x00001770,
+ 0x030, 0x00002440,
+ 0x030, 0x00003440,
+ 0x030, 0x00004330,
+ 0x030, 0x00005330,
+ 0x030, 0x00008770,
+ 0x030, 0x0000A440,
+ 0x030, 0x0000C330,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x051, 0x0003C800,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0xA0000000, 0x00000000,
+ 0x052, 0x000942C0,
+ 0xB0000000, 0x00000000,
+ 0x053, 0x000090F9,
+ 0x054, 0x00088000,
+ 0x057, 0x0004C80A,
+ 0x0EF, 0x00000020,
+ 0x033, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000010,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000018,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000028,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0xB0000000, 0x00000000,
+ 0x0EE, 0x00000000,
+ 0x05C, 0x000FCC00,
+ 0x067, 0x0000A505,
+ 0x0D3, 0x00000542,
+ 0x043, 0x00005000,
+ 0x059, 0x000A0000,
+ 0x05A, 0x00060000,
+ 0x05B, 0x00014000,
+ 0x001, 0x00040000,
+ 0x0EE, 0x00000002,
+ 0x033, 0x00000017,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000018,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000000,
+ 0x033, 0x0000001A,
+ 0x03F, 0x0000003F,
+ 0x033, 0x0000001B,
+ 0x03F, 0x0000003F,
+ 0x033, 0x0000001C,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0x092, 0x00084800,
+ 0x092, 0x00084801,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x092, 0x00084800,
+ 0x08F, 0x0000182C,
+ 0x088, 0x0004326B,
+ 0x019, 0x00000005,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B);
+
+static const u8 rtw8822c_txpwr_lmt_type0[] = {
+ 0, 0, 0, 0, 1, 72, 2, 0, 0, 0, 1, 60,
+ 0, 0, 0, 0, 2, 72, 2, 0, 0, 0, 2, 60,
+ 0, 0, 0, 0, 3, 76, 2, 0, 0, 0, 3, 60,
+ 0, 0, 0, 0, 4, 76, 2, 0, 0, 0, 4, 60,
+ 0, 0, 0, 0, 5, 76, 2, 0, 0, 0, 5, 60,
+ 0, 0, 0, 0, 6, 76, 2, 0, 0, 0, 6, 60,
+ 0, 0, 0, 0, 7, 76, 2, 0, 0, 0, 7, 60,
+ 0, 0, 0, 0, 8, 76, 2, 0, 0, 0, 8, 60,
+ 0, 0, 0, 0, 9, 76, 2, 0, 0, 0, 9, 60,
+ 0, 0, 0, 0, 10, 72, 2, 0, 0, 0, 10, 60,
+ 0, 0, 0, 0, 11, 72, 2, 0, 0, 0, 11, 60,
+ 0, 0, 0, 0, 12, 52, 2, 0, 0, 0, 12, 60,
+ 0, 0, 0, 0, 13, 48, 2, 0, 0, 0, 13, 60,
+ 0, 0, 0, 0, 14, 127, 2, 0, 0, 0, 14, 127,
+ 0, 0, 0, 1, 1, 52, 2, 0, 0, 1, 1, 60,
+ 0, 0, 0, 1, 2, 60, 2, 0, 0, 1, 2, 60,
+ 0, 0, 0, 1, 3, 64, 2, 0, 0, 1, 3, 60,
+ 0, 0, 0, 1, 4, 68, 2, 0, 0, 1, 4, 60,
+ 0, 0, 0, 1, 5, 76, 2, 0, 0, 1, 5, 60,
+ 0, 0, 0, 1, 6, 76, 2, 0, 0, 1, 6, 60,
+ 0, 0, 0, 1, 7, 76, 2, 0, 0, 1, 7, 60,
+ 0, 0, 0, 1, 8, 68, 2, 0, 0, 1, 8, 60,
+ 0, 0, 0, 1, 9, 64, 2, 0, 0, 1, 9, 60,
+ 0, 0, 0, 1, 10, 60, 2, 0, 0, 1, 10, 60,
+ 0, 0, 0, 1, 11, 52, 2, 0, 0, 1, 11, 60,
+ 0, 0, 0, 1, 12, 40, 2, 0, 0, 1, 12, 60,
+ 0, 0, 0, 1, 13, 28, 2, 0, 0, 1, 13, 60,
+ 0, 0, 0, 1, 14, 127, 2, 0, 0, 1, 14, 127,
+ 0, 0, 0, 2, 1, 52, 2, 0, 0, 2, 1, 60,
+ 0, 0, 0, 2, 2, 60, 2, 0, 0, 2, 2, 60,
+ 0, 0, 0, 2, 3, 64, 2, 0, 0, 2, 3, 60,
+ 0, 0, 0, 2, 4, 68, 2, 0, 0, 2, 4, 60,
+ 0, 0, 0, 2, 5, 76, 2, 0, 0, 2, 5, 60,
+ 0, 0, 0, 2, 6, 76, 2, 0, 0, 2, 6, 60,
+ 0, 0, 0, 2, 7, 76, 2, 0, 0, 2, 7, 60,
+ 0, 0, 0, 2, 8, 68, 2, 0, 0, 2, 8, 60,
+ 0, 0, 0, 2, 9, 64, 2, 0, 0, 2, 9, 60,
+ 0, 0, 0, 2, 10, 60, 2, 0, 0, 2, 10, 60,
+ 0, 0, 0, 2, 11, 52, 2, 0, 0, 2, 11, 60,
+ 0, 0, 0, 2, 12, 40, 2, 0, 0, 2, 12, 60,
+ 0, 0, 0, 2, 13, 28, 2, 0, 0, 2, 13, 60,
+ 0, 0, 0, 2, 14, 127, 2, 0, 0, 2, 14, 127,
+ 0, 0, 0, 3, 1, 52, 2, 0, 0, 3, 1, 36,
+ 0, 0, 0, 3, 2, 60, 2, 0, 0, 3, 2, 36,
+ 0, 0, 0, 3, 3, 64, 2, 0, 0, 3, 3, 36,
+ 0, 0, 0, 3, 4, 68, 2, 0, 0, 3, 4, 36,
+ 0, 0, 0, 3, 5, 76, 2, 0, 0, 3, 5, 36,
+ 0, 0, 0, 3, 6, 76, 2, 0, 0, 3, 6, 36,
+ 0, 0, 0, 3, 7, 76, 2, 0, 0, 3, 7, 36,
+ 0, 0, 0, 3, 8, 68, 2, 0, 0, 3, 8, 36,
+ 0, 0, 0, 3, 9, 64, 2, 0, 0, 3, 9, 36,
+ 0, 0, 0, 3, 10, 60, 2, 0, 0, 3, 10, 36,
+ 0, 0, 0, 3, 11, 52, 2, 0, 0, 3, 11, 36,
+ 0, 0, 0, 3, 12, 40, 2, 0, 0, 3, 12, 36,
+ 0, 0, 0, 3, 13, 28, 2, 0, 0, 3, 13, 36,
+ 0, 0, 0, 3, 14, 127, 2, 0, 0, 3, 14, 127,
+ 0, 0, 1, 2, 1, 127, 2, 0, 1, 2, 1, 127,
+ 0, 0, 1, 2, 2, 127, 2, 0, 1, 2, 2, 127,
+ 0, 0, 1, 2, 3, 52, 2, 0, 1, 2, 3, 60,
+ 0, 0, 1, 2, 4, 52, 2, 0, 1, 2, 4, 60,
+ 0, 0, 1, 2, 5, 60, 2, 0, 1, 2, 5, 60,
+ 0, 0, 1, 2, 6, 64, 2, 0, 1, 2, 6, 60,
+ 0, 0, 1, 2, 7, 60, 2, 0, 1, 2, 7, 60,
+ 0, 0, 1, 2, 8, 52, 2, 0, 1, 2, 8, 60,
+ 0, 0, 1, 2, 9, 52, 2, 0, 1, 2, 9, 60,
+ 0, 0, 1, 2, 10, 40, 2, 0, 1, 2, 10, 60,
+ 0, 0, 1, 2, 11, 28, 2, 0, 1, 2, 11, 60,
+ 0, 0, 1, 2, 12, 127, 2, 0, 1, 2, 12, 127,
+ 0, 0, 1, 2, 13, 127, 2, 0, 1, 2, 13, 127,
+ 0, 0, 1, 2, 14, 127, 2, 0, 1, 2, 14, 127,
+ 0, 0, 1, 3, 1, 127, 2, 0, 1, 3, 1, 127,
+ 0, 0, 1, 3, 2, 127, 2, 0, 1, 3, 2, 127,
+ 0, 0, 1, 3, 3, 48, 2, 0, 1, 3, 3, 36,
+ 0, 0, 1, 3, 4, 48, 2, 0, 1, 3, 4, 36,
+ 0, 0, 1, 3, 5, 60, 2, 0, 1, 3, 5, 36,
+ 0, 0, 1, 3, 6, 64, 2, 0, 1, 3, 6, 36,
+ 0, 0, 1, 3, 7, 60, 2, 0, 1, 3, 7, 36,
+ 0, 0, 1, 3, 8, 52, 2, 0, 1, 3, 8, 36,
+ 0, 0, 1, 3, 9, 52, 2, 0, 1, 3, 9, 36,
+ 0, 0, 1, 3, 10, 40, 2, 0, 1, 3, 10, 36,
+ 0, 0, 1, 3, 11, 26, 2, 0, 1, 3, 11, 36,
+ 0, 0, 1, 3, 12, 127, 2, 0, 1, 3, 12, 127,
+ 0, 0, 1, 3, 13, 127, 2, 0, 1, 3, 13, 127,
+ 0, 0, 1, 3, 14, 127, 2, 0, 1, 3, 14, 127,
+ 0, 1, 0, 1, 36, 74, 2, 1, 0, 1, 36, 62,
+ 0, 1, 0, 1, 40, 80, 2, 1, 0, 1, 40, 62,
+ 0, 1, 0, 1, 44, 80, 2, 1, 0, 1, 44, 62,
+ 0, 1, 0, 1, 48, 80, 2, 1, 0, 1, 48, 62,
+ 0, 1, 0, 1, 52, 80, 2, 1, 0, 1, 52, 62,
+ 0, 1, 0, 1, 56, 80, 2, 1, 0, 1, 56, 62,
+ 0, 1, 0, 1, 60, 80, 2, 1, 0, 1, 60, 62,
+ 0, 1, 0, 1, 64, 74, 2, 1, 0, 1, 64, 62,
+ 0, 1, 0, 1, 100, 72, 2, 1, 0, 1, 100, 62,
+ 0, 1, 0, 1, 104, 80, 2, 1, 0, 1, 104, 62,
+ 0, 1, 0, 1, 108, 80, 2, 1, 0, 1, 108, 62,
+ 0, 1, 0, 1, 112, 80, 2, 1, 0, 1, 112, 62,
+ 0, 1, 0, 1, 116, 80, 2, 1, 0, 1, 116, 62,
+ 0, 1, 0, 1, 120, 80, 2, 1, 0, 1, 120, 62,
+ 0, 1, 0, 1, 124, 80, 2, 1, 0, 1, 124, 62,
+ 0, 1, 0, 1, 128, 80, 2, 1, 0, 1, 128, 62,
+ 0, 1, 0, 1, 132, 80, 2, 1, 0, 1, 132, 62,
+ 0, 1, 0, 1, 136, 80, 2, 1, 0, 1, 136, 62,
+ 0, 1, 0, 1, 140, 72, 2, 1, 0, 1, 140, 62,
+ 0, 1, 0, 1, 144, 80, 2, 1, 0, 1, 144, 127,
+ 0, 1, 0, 1, 149, 80, 2, 1, 0, 1, 149, 127,
+ 0, 1, 0, 1, 153, 80, 2, 1, 0, 1, 153, 127,
+ 0, 1, 0, 1, 157, 80, 2, 1, 0, 1, 157, 127,
+ 0, 1, 0, 1, 161, 80, 2, 1, 0, 1, 161, 127,
+ 0, 1, 0, 1, 165, 80, 2, 1, 0, 1, 165, 127,
+ 0, 1, 0, 2, 36, 72, 2, 1, 0, 2, 36, 62,
+ 0, 1, 0, 2, 40, 80, 2, 1, 0, 2, 40, 62,
+ 0, 1, 0, 2, 44, 80, 2, 1, 0, 2, 44, 62,
+ 0, 1, 0, 2, 48, 80, 2, 1, 0, 2, 48, 62,
+ 0, 1, 0, 2, 52, 80, 2, 1, 0, 2, 52, 62,
+ 0, 1, 0, 2, 56, 80, 2, 1, 0, 2, 56, 62,
+ 0, 1, 0, 2, 60, 80, 2, 1, 0, 2, 60, 62,
+ 0, 1, 0, 2, 64, 74, 2, 1, 0, 2, 64, 62,
+ 0, 1, 0, 2, 100, 70, 2, 1, 0, 2, 100, 62,
+ 0, 1, 0, 2, 104, 80, 2, 1, 0, 2, 104, 62,
+ 0, 1, 0, 2, 108, 80, 2, 1, 0, 2, 108, 62,
+ 0, 1, 0, 2, 112, 80, 2, 1, 0, 2, 112, 62,
+ 0, 1, 0, 2, 116, 80, 2, 1, 0, 2, 116, 62,
+ 0, 1, 0, 2, 120, 80, 2, 1, 0, 2, 120, 62,
+ 0, 1, 0, 2, 124, 80, 2, 1, 0, 2, 124, 62,
+ 0, 1, 0, 2, 128, 80, 2, 1, 0, 2, 128, 62,
+ 0, 1, 0, 2, 132, 80, 2, 1, 0, 2, 132, 62,
+ 0, 1, 0, 2, 136, 80, 2, 1, 0, 2, 136, 62,
+ 0, 1, 0, 2, 140, 70, 2, 1, 0, 2, 140, 62,
+ 0, 1, 0, 2, 144, 80, 2, 1, 0, 2, 144, 127,
+ 0, 1, 0, 2, 149, 80, 2, 1, 0, 2, 149, 127,
+ 0, 1, 0, 2, 153, 80, 2, 1, 0, 2, 153, 127,
+ 0, 1, 0, 2, 157, 80, 2, 1, 0, 2, 157, 127,
+ 0, 1, 0, 2, 161, 80, 2, 1, 0, 2, 161, 127,
+ 0, 1, 0, 2, 165, 80, 2, 1, 0, 2, 165, 127,
+ 0, 1, 0, 3, 36, 68, 2, 1, 0, 3, 36, 38,
+ 0, 1, 0, 3, 40, 68, 2, 1, 0, 3, 40, 38,
+ 0, 1, 0, 3, 44, 68, 2, 1, 0, 3, 44, 38,
+ 0, 1, 0, 3, 48, 68, 2, 1, 0, 3, 48, 38,
+ 0, 1, 0, 3, 52, 68, 2, 1, 0, 3, 52, 38,
+ 0, 1, 0, 3, 56, 68, 2, 1, 0, 3, 56, 38,
+ 0, 1, 0, 3, 60, 66, 2, 1, 0, 3, 60, 38,
+ 0, 1, 0, 3, 64, 68, 2, 1, 0, 3, 64, 38,
+ 0, 1, 0, 3, 100, 60, 2, 1, 0, 3, 100, 38,
+ 0, 1, 0, 3, 104, 68, 2, 1, 0, 3, 104, 38,
+ 0, 1, 0, 3, 108, 68, 2, 1, 0, 3, 108, 38,
+ 0, 1, 0, 3, 112, 68, 2, 1, 0, 3, 112, 38,
+ 0, 1, 0, 3, 116, 68, 2, 1, 0, 3, 116, 38,
+ 0, 1, 0, 3, 120, 68, 2, 1, 0, 3, 120, 38,
+ 0, 1, 0, 3, 124, 68, 2, 1, 0, 3, 124, 38,
+ 0, 1, 0, 3, 128, 68, 2, 1, 0, 3, 128, 38,
+ 0, 1, 0, 3, 132, 68, 2, 1, 0, 3, 132, 38,
+ 0, 1, 0, 3, 136, 68, 2, 1, 0, 3, 136, 38,
+ 0, 1, 0, 3, 140, 60, 2, 1, 0, 3, 140, 38,
+ 0, 1, 0, 3, 144, 68, 2, 1, 0, 3, 144, 127,
+ 0, 1, 0, 3, 149, 80, 2, 1, 0, 3, 149, 127,
+ 0, 1, 0, 3, 153, 80, 2, 1, 0, 3, 153, 127,
+ 0, 1, 0, 3, 157, 80, 2, 1, 0, 3, 157, 127,
+ 0, 1, 0, 3, 161, 80, 2, 1, 0, 3, 161, 127,
+ 0, 1, 0, 3, 165, 80, 2, 1, 0, 3, 165, 127,
+ 0, 1, 1, 2, 38, 66, 2, 1, 1, 2, 38, 64,
+ 0, 1, 1, 2, 46, 72, 2, 1, 1, 2, 46, 64,
+ 0, 1, 1, 2, 54, 72, 2, 1, 1, 2, 54, 64,
+ 0, 1, 1, 2, 62, 64, 2, 1, 1, 2, 62, 64,
+ 0, 1, 1, 2, 102, 58, 2, 1, 1, 2, 102, 64,
+ 0, 1, 1, 2, 110, 74, 2, 1, 1, 2, 110, 64,
+ 0, 1, 1, 2, 118, 74, 2, 1, 1, 2, 118, 64,
+ 0, 1, 1, 2, 126, 74, 2, 1, 1, 2, 126, 64,
+ 0, 1, 1, 2, 134, 74, 2, 1, 1, 2, 134, 64,
+ 0, 1, 1, 2, 142, 74, 2, 1, 1, 2, 142, 127,
+ 0, 1, 1, 2, 151, 74, 2, 1, 1, 2, 151, 127,
+ 0, 1, 1, 2, 159, 74, 2, 1, 1, 2, 159, 127,
+ 0, 1, 1, 3, 38, 60, 2, 1, 1, 3, 38, 40,
+ 0, 1, 1, 3, 46, 68, 2, 1, 1, 3, 46, 40,
+ 0, 1, 1, 3, 54, 68, 2, 1, 1, 3, 54, 40,
+ 0, 1, 1, 3, 62, 58, 2, 1, 1, 3, 62, 40,
+ 0, 1, 1, 3, 102, 54, 2, 1, 1, 3, 102, 40,
+ 0, 1, 1, 3, 110, 68, 2, 1, 1, 3, 110, 40,
+ 0, 1, 1, 3, 118, 68, 2, 1, 1, 3, 118, 40,
+ 0, 1, 1, 3, 126, 68, 2, 1, 1, 3, 126, 40,
+ 0, 1, 1, 3, 134, 68, 2, 1, 1, 3, 134, 40,
+ 0, 1, 1, 3, 142, 68, 2, 1, 1, 3, 142, 127,
+ 0, 1, 1, 3, 151, 74, 2, 1, 1, 3, 151, 127,
+ 0, 1, 1, 3, 159, 74, 2, 1, 1, 3, 159, 127,
+ 0, 1, 2, 4, 42, 64, 2, 1, 2, 4, 42, 64,
+ 0, 1, 2, 4, 58, 62, 2, 1, 2, 4, 58, 64,
+ 0, 1, 2, 4, 106, 58, 2, 1, 2, 4, 106, 64,
+ 0, 1, 2, 4, 122, 72, 2, 1, 2, 4, 122, 64,
+ 0, 1, 2, 4, 138, 72, 2, 1, 2, 4, 138, 127,
+ 0, 1, 2, 4, 155, 72, 2, 1, 2, 4, 155, 127,
+ 0, 1, 2, 5, 42, 54, 2, 1, 2, 5, 42, 40,
+ 0, 1, 2, 5, 58, 52, 2, 1, 2, 5, 58, 40,
+ 0, 1, 2, 5, 106, 50, 2, 1, 2, 5, 106, 40,
+ 0, 1, 2, 5, 122, 66, 2, 1, 2, 5, 122, 40,
+ 0, 1, 2, 5, 138, 66, 2, 1, 2, 5, 138, 127,
+ 0, 1, 2, 5, 155, 62, 2, 1, 2, 5, 155, 127
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
+
+static const u32 rtw8822c_array_mp_cal_init[] = {
+ 0x1b00, 0x00000008,
+ 0x1b00, 0x00A70008,
+ 0x1b00, 0x00150008,
+ 0x1b00, 0x00000008,
+ 0x1b04, 0xE2462952,
+ 0x1b08, 0x00000080,
+ 0x1b0c, 0x00000000,
+ 0x1b10, 0x00010C00,
+ 0x1b14, 0x00000000,
+ 0x1b18, 0x00292903,
+ 0x1b1c, 0xA218FC32,
+ 0x1b20, 0x01040008,
+ 0x1b24, 0x00060008,
+ 0x1b28, 0x00060300,
+ 0x1b2C, 0x00180018,
+ 0x1b30, 0x40000000,
+ 0x1b34, 0x00000800,
+ 0x1b38, 0x40000000,
+ 0x1b3C, 0x40000000,
+ 0x1b98, 0x00000000,
+ 0x1b9c, 0x00000000,
+ 0x1bc0, 0x01000000,
+ 0x1bcc, 0x00000000,
+ 0x1be4, 0x00000000,
+ 0x1bec, 0x40000000,
+ 0x1b40, 0x40000000,
+ 0x1b44, 0x20004064,
+ 0x1b48, 0x0005002D,
+ 0x1b4c, 0x00000000,
+ 0x1b60, 0x1F100000,
+ 0x1b64, 0x12000000,
+ 0x1b4c, 0x00000000,
+ 0x1b4c, 0x008a0000,
+ 0x1b50, 0x000003BE,
+ 0x1b4c, 0x018a0000,
+ 0x1b50, 0x0000057A,
+ 0x1b4c, 0x028a0000,
+ 0x1b50, 0x000006C8,
+ 0x1b4c, 0x038a0000,
+ 0x1b50, 0x000007E0,
+ 0x1b4c, 0x048a0000,
+ 0x1b50, 0x000008D5,
+ 0x1b4c, 0x058a0000,
+ 0x1b50, 0x000009B2,
+ 0x1b4c, 0x068a0000,
+ 0x1b50, 0x00000A7D,
+ 0x1b4c, 0x078a0000,
+ 0x1b50, 0x00000B3A,
+ 0x1b4c, 0x088a0000,
+ 0x1b50, 0x00000BEB,
+ 0x1b4c, 0x098a0000,
+ 0x1b50, 0x00000C92,
+ 0x1b4c, 0x0A8a0000,
+ 0x1b50, 0x00000D31,
+ 0x1b4c, 0x0B8a0000,
+ 0x1b50, 0x00000DC9,
+ 0x1b4c, 0x0C8a0000,
+ 0x1b50, 0x00000E5A,
+ 0x1b4c, 0x0D8a0000,
+ 0x1b50, 0x00000EE6,
+ 0x1b4c, 0x0E8a0000,
+ 0x1b50, 0x00000F6D,
+ 0x1b4c, 0x0F8a0000,
+ 0x1b50, 0x00000FF0,
+ 0x1b4c, 0x108a0000,
+ 0x1b50, 0x0000106F,
+ 0x1b4c, 0x118a0000,
+ 0x1b50, 0x000010E9,
+ 0x1b4c, 0x128a0000,
+ 0x1b50, 0x00001161,
+ 0x1b4c, 0x138a0000,
+ 0x1b50, 0x000011D5,
+ 0x1b4c, 0x148a0000,
+ 0x1b50, 0x00001247,
+ 0x1b4c, 0x158a0000,
+ 0x1b50, 0x000012B5,
+ 0x1b4c, 0x168a0000,
+ 0x1b50, 0x00001322,
+ 0x1b4c, 0x178a0000,
+ 0x1b50, 0x0000138B,
+ 0x1b4c, 0x188a0000,
+ 0x1b50, 0x000013F3,
+ 0x1b4c, 0x198a0000,
+ 0x1b50, 0x00001459,
+ 0x1b4c, 0x1A8a0000,
+ 0x1b50, 0x000014BD,
+ 0x1b4c, 0x1B8a0000,
+ 0x1b50, 0x0000151E,
+ 0x1b4c, 0x1C8a0000,
+ 0x1b50, 0x0000157F,
+ 0x1b4c, 0x1D8a0000,
+ 0x1b50, 0x000015DD,
+ 0x1b4c, 0x1E8a0000,
+ 0x1b50, 0x0000163A,
+ 0x1b4c, 0x1F8a0000,
+ 0x1b50, 0x00001695,
+ 0x1b4c, 0x208a0000,
+ 0x1b50, 0x000016EF,
+ 0x1b4c, 0x218a0000,
+ 0x1b50, 0x00001748,
+ 0x1b4c, 0x228a0000,
+ 0x1b50, 0x0000179F,
+ 0x1b4c, 0x238a0000,
+ 0x1b50, 0x000017F5,
+ 0x1b4c, 0x248a0000,
+ 0x1b50, 0x0000184A,
+ 0x1b4c, 0x258a0000,
+ 0x1b50, 0x0000189E,
+ 0x1b4c, 0x268a0000,
+ 0x1b50, 0x000018F1,
+ 0x1b4c, 0x278a0000,
+ 0x1b50, 0x00001942,
+ 0x1b4c, 0x288a0000,
+ 0x1b50, 0x00001993,
+ 0x1b4c, 0x298a0000,
+ 0x1b50, 0x000019E2,
+ 0x1b4c, 0x2A8a0000,
+ 0x1b50, 0x00001A31,
+ 0x1b4c, 0x2B8a0000,
+ 0x1b50, 0x00001A7F,
+ 0x1b4c, 0x2C8a0000,
+ 0x1b50, 0x00001ACC,
+ 0x1b4c, 0x2D8a0000,
+ 0x1b50, 0x00001B18,
+ 0x1b4c, 0x2E8a0000,
+ 0x1b50, 0x00001B63,
+ 0x1b4c, 0x2F8a0000,
+ 0x1b50, 0x00001BAD,
+ 0x1b4c, 0x308a0000,
+ 0x1b50, 0x00001BF7,
+ 0x1b4c, 0x318a0000,
+ 0x1b50, 0x00001C40,
+ 0x1b4c, 0x328a0000,
+ 0x1b50, 0x00001C88,
+ 0x1b4c, 0x338a0000,
+ 0x1b50, 0x00001CCF,
+ 0x1b4c, 0x348a0000,
+ 0x1b50, 0x00001D16,
+ 0x1b4c, 0x358a0000,
+ 0x1b50, 0x00001D5C,
+ 0x1b4c, 0x368a0000,
+ 0x1b50, 0x00001DA2,
+ 0x1b4c, 0x378a0000,
+ 0x1b50, 0x00001DE6,
+ 0x1b4c, 0x388a0000,
+ 0x1b50, 0x00001E2B,
+ 0x1b4c, 0x398a0000,
+ 0x1b50, 0x00001E6E,
+ 0x1b4c, 0x3A8a0000,
+ 0x1b50, 0x00001EB1,
+ 0x1b4c, 0x3B8a0000,
+ 0x1b50, 0x00001EF4,
+ 0x1b4c, 0x3C8a0000,
+ 0x1b50, 0x00001F35,
+ 0x1b4c, 0x3D8a0000,
+ 0x1b50, 0x00001F77,
+ 0x1b4c, 0x3E8a0000,
+ 0x1b50, 0x00001FB8,
+ 0x1b4c, 0x3F8a0000,
+ 0x1b50, 0x00001FF8,
+ 0x1b4c, 0x00000000,
+ 0x1b50, 0x00000000,
+ 0x1b58, 0x00890000,
+ 0x1b5C, 0x3C6B3FFF,
+ 0x1b58, 0x02890000,
+ 0x1b5C, 0x35D9390A,
+ 0x1b58, 0x04890000,
+ 0x1b5C, 0x2FFE32D6,
+ 0x1b58, 0x06890000,
+ 0x1b5C, 0x2AC62D4F,
+ 0x1b58, 0x08890000,
+ 0x1b5C, 0x261F2862,
+ 0x1b58, 0x0A890000,
+ 0x1b5C, 0x21FA23FD,
+ 0x1b58, 0x0C890000,
+ 0x1b5C, 0x1E482013,
+ 0x1b58, 0x0E890000,
+ 0x1b5C, 0x1AFD1C96,
+ 0x1b58, 0x10890000,
+ 0x1b5C, 0x180E197B,
+ 0x1b58, 0x12890000,
+ 0x1b5C, 0x157016B5,
+ 0x1b58, 0x14890000,
+ 0x1b5C, 0x131B143D,
+ 0x1b58, 0x16890000,
+ 0x1b5C, 0x1107120A,
+ 0x1b58, 0x18890000,
+ 0x1b5C, 0x0F2D1013,
+ 0x1b58, 0x1A890000,
+ 0x1b5C, 0x0D870E54,
+ 0x1b58, 0x1C890000,
+ 0x1b5C, 0x0C0E0CC5,
+ 0x1b58, 0x1E890000,
+ 0x1b5C, 0x0ABF0B62,
+ 0x1b58, 0x20890000,
+ 0x1b5C, 0x09930A25,
+ 0x1b58, 0x22890000,
+ 0x1b5C, 0x0889090A,
+ 0x1b58, 0x24890000,
+ 0x1b5C, 0x079B080F,
+ 0x1b58, 0x26890000,
+ 0x1b5C, 0x06C7072E,
+ 0x1b58, 0x28890000,
+ 0x1b5C, 0x060B0666,
+ 0x1b58, 0x2A890000,
+ 0x1b5C, 0x056305B4,
+ 0x1b58, 0x2C890000,
+ 0x1b5C, 0x04CD0515,
+ 0x1b58, 0x2E890000,
+ 0x1b5C, 0x04470488,
+ 0x1b58, 0x30890000,
+ 0x1b5C, 0x03D0040A,
+ 0x1b58, 0x32890000,
+ 0x1b5C, 0x03660399,
+ 0x1b58, 0x34890000,
+ 0x1b5C, 0x03070335,
+ 0x1b58, 0x36890000,
+ 0x1b5C, 0x02B302DC,
+ 0x1b58, 0x38890000,
+ 0x1b5C, 0x0268028C,
+ 0x1b58, 0x3A890000,
+ 0x1b5C, 0x02250245,
+ 0x1b58, 0x3C890000,
+ 0x1b5C, 0x01E90206,
+ 0x1b58, 0x3E890000,
+ 0x1b5C, 0x01B401CE,
+ 0x1b58, 0x40890000,
+ 0x1b5C, 0x0185019C,
+ 0x1b58, 0x42890000,
+ 0x1b5C, 0x015A016F,
+ 0x1b58, 0x44890000,
+ 0x1b5C, 0x01350147,
+ 0x1b58, 0x46890000,
+ 0x1b5C, 0x01130123,
+ 0x1b58, 0x48890000,
+ 0x1b5C, 0x00F50104,
+ 0x1b58, 0x4A890000,
+ 0x1b5C, 0x00DA00E7,
+ 0x1b58, 0x4C890000,
+ 0x1b5C, 0x00C300CE,
+ 0x1b58, 0x4E890000,
+ 0x1b5C, 0x00AE00B8,
+ 0x1b58, 0x50890000,
+ 0x1b5C, 0x009B00A4,
+ 0x1b58, 0x52890000,
+ 0x1b5C, 0x008A0092,
+ 0x1b58, 0x54890000,
+ 0x1b5C, 0x007B0082,
+ 0x1b58, 0x56890000,
+ 0x1b5C, 0x006E0074,
+ 0x1b58, 0x58890000,
+ 0x1b5C, 0x00620067,
+ 0x1b58, 0x5A890000,
+ 0x1b5C, 0x0057005C,
+ 0x1b58, 0x5C890000,
+ 0x1b5C, 0x004E0052,
+ 0x1b58, 0x5E890000,
+ 0x1b5C, 0x00450049,
+ 0x1b58, 0x60890000,
+ 0x1b5C, 0x003E0041,
+ 0x1b58, 0x62890000,
+ 0x1b5C, 0x0037003A,
+ 0x1b58, 0x62010000,
+ 0x1b00, 0x0000000A,
+ 0x1b00, 0x00A7000A,
+ 0x1b00, 0x0015000A,
+ 0x1b00, 0x0000000A,
+ 0x1b04, 0xE2462952,
+ 0x1b08, 0x00000080,
+ 0x1b0c, 0x00000000,
+ 0x1b10, 0x00010C00,
+ 0x1b14, 0x00000000,
+ 0x1b18, 0x00292903,
+ 0x1b1c, 0xA218FC32,
+ 0x1b20, 0x01040008,
+ 0x1b24, 0x00060008,
+ 0x1b28, 0x00060300,
+ 0x1b2C, 0x00180018,
+ 0x1b30, 0x40000000,
+ 0x1b34, 0x00000800,
+ 0x1b38, 0x40000000,
+ 0x1b3C, 0x40000000,
+ 0x1b98, 0x00000000,
+ 0x1b9c, 0x00000000,
+ 0x1bc0, 0x01000000,
+ 0x1bcc, 0x00000000,
+ 0x1be4, 0x00000000,
+ 0x1bec, 0x40000000,
+ 0x1b60, 0x1F100000,
+ 0x1b64, 0x12000000,
+ 0x1b58, 0x00890000,
+ 0x1b5C, 0x3C6B3FFF,
+ 0x1b58, 0x02890000,
+ 0x1b5C, 0x35D9390A,
+ 0x1b58, 0x04890000,
+ 0x1b5C, 0x2FFE32D6,
+ 0x1b58, 0x06890000,
+ 0x1b5C, 0x2AC62D4F,
+ 0x1b58, 0x08890000,
+ 0x1b5C, 0x261F2862,
+ 0x1b58, 0x0A890000,
+ 0x1b5C, 0x21FA23FD,
+ 0x1b58, 0x0C890000,
+ 0x1b5C, 0x1E482013,
+ 0x1b58, 0x0E890000,
+ 0x1b5C, 0x1AFD1C96,
+ 0x1b58, 0x10890000,
+ 0x1b5C, 0x180E197B,
+ 0x1b58, 0x12890000,
+ 0x1b5C, 0x157016B5,
+ 0x1b58, 0x14890000,
+ 0x1b5C, 0x131B143D,
+ 0x1b58, 0x16890000,
+ 0x1b5C, 0x1107120A,
+ 0x1b58, 0x18890000,
+ 0x1b5C, 0x0F2D1013,
+ 0x1b58, 0x1A890000,
+ 0x1b5C, 0x0D870E54,
+ 0x1b58, 0x1C890000,
+ 0x1b5C, 0x0C0E0CC5,
+ 0x1b58, 0x1E890000,
+ 0x1b5C, 0x0ABF0B62,
+ 0x1b58, 0x20890000,
+ 0x1b5C, 0x09930A25,
+ 0x1b58, 0x22890000,
+ 0x1b5C, 0x0889090A,
+ 0x1b58, 0x24890000,
+ 0x1b5C, 0x079B080F,
+ 0x1b58, 0x26890000,
+ 0x1b5C, 0x06C7072E,
+ 0x1b58, 0x28890000,
+ 0x1b5C, 0x060B0666,
+ 0x1b58, 0x2A890000,
+ 0x1b5C, 0x056305B4,
+ 0x1b58, 0x2C890000,
+ 0x1b5C, 0x04CD0515,
+ 0x1b58, 0x2E890000,
+ 0x1b5C, 0x04470488,
+ 0x1b58, 0x30890000,
+ 0x1b5C, 0x03D0040A,
+ 0x1b58, 0x32890000,
+ 0x1b5C, 0x03660399,
+ 0x1b58, 0x34890000,
+ 0x1b5C, 0x03070335,
+ 0x1b58, 0x36890000,
+ 0x1b5C, 0x02B302DC,
+ 0x1b58, 0x38890000,
+ 0x1b5C, 0x0268028C,
+ 0x1b58, 0x3A890000,
+ 0x1b5C, 0x02250245,
+ 0x1b58, 0x3C890000,
+ 0x1b5C, 0x01E90206,
+ 0x1b58, 0x3E890000,
+ 0x1b5C, 0x01B401CE,
+ 0x1b58, 0x40890000,
+ 0x1b5C, 0x0185019C,
+ 0x1b58, 0x42890000,
+ 0x1b5C, 0x015A016F,
+ 0x1b58, 0x44890000,
+ 0x1b5C, 0x01350147,
+ 0x1b58, 0x46890000,
+ 0x1b5C, 0x01130123,
+ 0x1b58, 0x48890000,
+ 0x1b5C, 0x00F50104,
+ 0x1b58, 0x4A890000,
+ 0x1b5C, 0x00DA00E7,
+ 0x1b58, 0x4C890000,
+ 0x1b5C, 0x00C300CE,
+ 0x1b58, 0x4E890000,
+ 0x1b5C, 0x00AE00B8,
+ 0x1b58, 0x50890000,
+ 0x1b5C, 0x009B00A4,
+ 0x1b58, 0x52890000,
+ 0x1b5C, 0x008A0092,
+ 0x1b58, 0x54890000,
+ 0x1b5C, 0x007B0082,
+ 0x1b58, 0x56890000,
+ 0x1b5C, 0x006E0074,
+ 0x1b58, 0x58890000,
+ 0x1b5C, 0x00620067,
+ 0x1b58, 0x5A890000,
+ 0x1b5C, 0x0057005C,
+ 0x1b58, 0x5C890000,
+ 0x1b5C, 0x004E0052,
+ 0x1b58, 0x5E890000,
+ 0x1b5C, 0x00450049,
+ 0x1b58, 0x60890000,
+ 0x1b5C, 0x003E0041,
+ 0x1b58, 0x62890000,
+ 0x1b5C, 0x0037003A,
+ 0x1b58, 0x62010000,
+ 0x1b00, 0x0000000C,
+ 0x1bd4, 0x000000F0,
+ 0x1bb8, 0x20202020,
+ 0x1bbc, 0x20202020,
+ 0x1bc0, 0x20202020,
+ 0x1bc4, 0x20202020,
+ 0x1bc8, 0x04040404,
+ 0x1bcc, 0x04040404,
+ 0x1bd0, 0x04040404,
+ 0x1bd8, 0x04040404,
+ 0x1bdc, 0x20202020,
+ 0x1be0, 0x04040404,
+ 0x1be4, 0x77472F17,
+ 0x1be8, 0xEFBFA78F,
+ 0x1bec, 0x00000000,
+ 0x1bf0, 0x1F1F1939,
+ 0x1b04, 0x0000005B,
+ 0x1b08, 0xB000C000,
+ 0x1b5c, 0x0000005B,
+ 0x1b60, 0xB000C000,
+ 0x1bb4, 0x20000000,
+ 0x1b00, 0x00000008,
+ 0x1b80, 0x00000007,
+ 0x1b80, 0x00080005,
+ 0x1b80, 0x00080007,
+ 0x1b80, 0x80000015,
+ 0x1b80, 0x80000017,
+ 0x1b80, 0x09080025,
+ 0x1b80, 0x09080027,
+ 0x1b80, 0x0f020035,
+ 0x1b80, 0x0f020037,
+ 0x1b80, 0x00220045,
+ 0x1b80, 0x00220047,
+ 0x1b80, 0x00040055,
+ 0x1b80, 0x00040057,
+ 0x1b80, 0x05c00065,
+ 0x1b80, 0x05c00067,
+ 0x1b80, 0x00070075,
+ 0x1b80, 0x00070077,
+ 0x1b80, 0x64020085,
+ 0x1b80, 0x64020087,
+ 0x1b80, 0x00020095,
+ 0x1b80, 0x00020097,
+ 0x1b80, 0x000400a5,
+ 0x1b80, 0x000400a7,
+ 0x1b80, 0x4a0000b5,
+ 0x1b80, 0x4a0000b7,
+ 0x1b80, 0x4b0400c5,
+ 0x1b80, 0x4b0400c7,
+ 0x1b80, 0x860300d5,
+ 0x1b80, 0x860300d7,
+ 0x1b80, 0x400900e5,
+ 0x1b80, 0x400900e7,
+ 0x1b80, 0xe02700f5,
+ 0x1b80, 0xe02700f7,
+ 0x1b80, 0x4b050105,
+ 0x1b80, 0x4b050107,
+ 0x1b80, 0x87030115,
+ 0x1b80, 0x87030117,
+ 0x1b80, 0x400b0125,
+ 0x1b80, 0x400b0127,
+ 0x1b80, 0xe0270135,
+ 0x1b80, 0xe0270137,
+ 0x1b80, 0x4b060145,
+ 0x1b80, 0x4b060147,
+ 0x1b80, 0x88030155,
+ 0x1b80, 0x88030157,
+ 0x1b80, 0x400d0165,
+ 0x1b80, 0x400d0167,
+ 0x1b80, 0xe0270175,
+ 0x1b80, 0xe0270177,
+ 0x1b80, 0x4b000185,
+ 0x1b80, 0x4b000187,
+ 0x1b80, 0x00070195,
+ 0x1b80, 0x00070197,
+ 0x1b80, 0x4c0001a5,
+ 0x1b80, 0x4c0001a7,
+ 0x1b80, 0x000401b5,
+ 0x1b80, 0x000401b7,
+ 0x1b80, 0x400801c5,
+ 0x1b80, 0x400801c7,
+ 0x1b80, 0x505501d5,
+ 0x1b80, 0x505501d7,
+ 0x1b80, 0x090a01e5,
+ 0x1b80, 0x090a01e7,
+ 0x1b80, 0x0ffe01f5,
+ 0x1b80, 0x0ffe01f7,
+ 0x1b80, 0x00220205,
+ 0x1b80, 0x00220207,
+ 0x1b80, 0x00040215,
+ 0x1b80, 0x00040217,
+ 0x1b80, 0x05c00225,
+ 0x1b80, 0x05c00227,
+ 0x1b80, 0x00070235,
+ 0x1b80, 0x00070237,
+ 0x1b80, 0x64000245,
+ 0x1b80, 0x64000247,
+ 0x1b80, 0x00020255,
+ 0x1b80, 0x00020257,
+ 0x1b80, 0x30000265,
+ 0x1b80, 0x30000267,
+ 0x1b80, 0xa5100275,
+ 0x1b80, 0xa5100277,
+ 0x1b80, 0xe3520285,
+ 0x1b80, 0xe3520287,
+ 0x1b80, 0xf01d0295,
+ 0x1b80, 0xf01d0297,
+ 0x1b80, 0xf11d02a5,
+ 0x1b80, 0xf11d02a7,
+ 0x1b80, 0xf21d02b5,
+ 0x1b80, 0xf21d02b7,
+ 0x1b80, 0xf31d02c5,
+ 0x1b80, 0xf31d02c7,
+ 0x1b80, 0xf41d02d5,
+ 0x1b80, 0xf41d02d7,
+ 0x1b80, 0xf51d02e5,
+ 0x1b80, 0xf51d02e7,
+ 0x1b80, 0xf61d02f5,
+ 0x1b80, 0xf61d02f7,
+ 0x1b80, 0xf71d0305,
+ 0x1b80, 0xf71d0307,
+ 0x1b80, 0xf81d0315,
+ 0x1b80, 0xf81d0317,
+ 0x1b80, 0xf91d0325,
+ 0x1b80, 0xf91d0327,
+ 0x1b80, 0xfa1d0335,
+ 0x1b80, 0xfa1d0337,
+ 0x1b80, 0xfb1d0345,
+ 0x1b80, 0xfb1d0347,
+ 0x1b80, 0xfc1d0355,
+ 0x1b80, 0xfc1d0357,
+ 0x1b80, 0xfd1d0365,
+ 0x1b80, 0xfd1d0367,
+ 0x1b80, 0xf21d0375,
+ 0x1b80, 0xf21d0377,
+ 0x1b80, 0xf31d0385,
+ 0x1b80, 0xf31d0387,
+ 0x1b80, 0xf41d0395,
+ 0x1b80, 0xf41d0397,
+ 0x1b80, 0xf51d03a5,
+ 0x1b80, 0xf51d03a7,
+ 0x1b80, 0xf61d03b5,
+ 0x1b80, 0xf61d03b7,
+ 0x1b80, 0xf71d03c5,
+ 0x1b80, 0xf71d03c7,
+ 0x1b80, 0xf81d03d5,
+ 0x1b80, 0xf81d03d7,
+ 0x1b80, 0xf91d03e5,
+ 0x1b80, 0xf91d03e7,
+ 0x1b80, 0xfa1d03f5,
+ 0x1b80, 0xfa1d03f7,
+ 0x1b80, 0xfb1d0405,
+ 0x1b80, 0xfb1d0407,
+ 0x1b80, 0xfc1d0415,
+ 0x1b80, 0xfc1d0417,
+ 0x1b80, 0xfd1d0425,
+ 0x1b80, 0xfd1d0427,
+ 0x1b80, 0xfe1d0435,
+ 0x1b80, 0xfe1d0437,
+ 0x1b80, 0xff1d0445,
+ 0x1b80, 0xff1d0447,
+ 0x1b80, 0x00010455,
+ 0x1b80, 0x00010457,
+ 0x1b80, 0x30620465,
+ 0x1b80, 0x30620467,
+ 0x1b80, 0x307a0475,
+ 0x1b80, 0x307a0477,
+ 0x1b80, 0x307c0485,
+ 0x1b80, 0x307c0487,
+ 0x1b80, 0x30eb0495,
+ 0x1b80, 0x30eb0497,
+ 0x1b80, 0x308004a5,
+ 0x1b80, 0x308004a7,
+ 0x1b80, 0x308c04b5,
+ 0x1b80, 0x308c04b7,
+ 0x1b80, 0x309804c5,
+ 0x1b80, 0x309804c7,
+ 0x1b80, 0x307f04d5,
+ 0x1b80, 0x307f04d7,
+ 0x1b80, 0x308b04e5,
+ 0x1b80, 0x308b04e7,
+ 0x1b80, 0x309704f5,
+ 0x1b80, 0x309704f7,
+ 0x1b80, 0x30ef0505,
+ 0x1b80, 0x30ef0507,
+ 0x1b80, 0x30fa0515,
+ 0x1b80, 0x30fa0517,
+ 0x1b80, 0x31050525,
+ 0x1b80, 0x31050527,
+ 0x1b80, 0x316a0535,
+ 0x1b80, 0x316a0537,
+ 0x1b80, 0x307a0545,
+ 0x1b80, 0x307a0547,
+ 0x1b80, 0x30e90555,
+ 0x1b80, 0x30e90557,
+ 0x1b80, 0x31870565,
+ 0x1b80, 0x31870567,
+ 0x1b80, 0x31a00575,
+ 0x1b80, 0x31a00577,
+ 0x1b80, 0x31ba0585,
+ 0x1b80, 0x31ba0587,
+ 0x1b80, 0x31c20595,
+ 0x1b80, 0x31c20597,
+ 0x1b80, 0x31ca05a5,
+ 0x1b80, 0x31ca05a7,
+ 0x1b80, 0x31d205b5,
+ 0x1b80, 0x31d205b7,
+ 0x1b80, 0x31da05c5,
+ 0x1b80, 0x31da05c7,
+ 0x1b80, 0x31e905d5,
+ 0x1b80, 0x31e905d7,
+ 0x1b80, 0x31f805e5,
+ 0x1b80, 0x31f805e7,
+ 0x1b80, 0x31fe05f5,
+ 0x1b80, 0x31fe05f7,
+ 0x1b80, 0x32040605,
+ 0x1b80, 0x32040607,
+ 0x1b80, 0x320a0615,
+ 0x1b80, 0x320a0617,
+ 0x1b80, 0xe2eb0625,
+ 0x1b80, 0xe2eb0627,
+ 0x1b80, 0x4d040635,
+ 0x1b80, 0x4d040637,
+ 0x1b80, 0x20800645,
+ 0x1b80, 0x20800647,
+ 0x1b80, 0x00000655,
+ 0x1b80, 0x00000657,
+ 0x1b80, 0x4d000665,
+ 0x1b80, 0x4d000667,
+ 0x1b80, 0x55070675,
+ 0x1b80, 0x55070677,
+ 0x1b80, 0xe2e30685,
+ 0x1b80, 0xe2e30687,
+ 0x1b80, 0xe2e30695,
+ 0x1b80, 0xe2e30697,
+ 0x1b80, 0x4d0406a5,
+ 0x1b80, 0x4d0406a7,
+ 0x1b80, 0x208806b5,
+ 0x1b80, 0x208806b7,
+ 0x1b80, 0x020006c5,
+ 0x1b80, 0x020006c7,
+ 0x1b80, 0x4d0006d5,
+ 0x1b80, 0x4d0006d7,
+ 0x1b80, 0x550f06e5,
+ 0x1b80, 0x550f06e7,
+ 0x1b80, 0xe2e306f5,
+ 0x1b80, 0xe2e306f7,
+ 0x1b80, 0x4f020705,
+ 0x1b80, 0x4f020707,
+ 0x1b80, 0x4e000715,
+ 0x1b80, 0x4e000717,
+ 0x1b80, 0x53020725,
+ 0x1b80, 0x53020727,
+ 0x1b80, 0x52010735,
+ 0x1b80, 0x52010737,
+ 0x1b80, 0xe2e70745,
+ 0x1b80, 0xe2e70747,
+ 0x1b80, 0x4d080755,
+ 0x1b80, 0x4d080757,
+ 0x1b80, 0x57100765,
+ 0x1b80, 0x57100767,
+ 0x1b80, 0x57000775,
+ 0x1b80, 0x57000777,
+ 0x1b80, 0x4d000785,
+ 0x1b80, 0x4d000787,
+ 0x1b80, 0x00010795,
+ 0x1b80, 0x00010797,
+ 0x1b80, 0xe2eb07a5,
+ 0x1b80, 0xe2eb07a7,
+ 0x1b80, 0x000107b5,
+ 0x1b80, 0x000107b7,
+ 0x1b80, 0x620607c5,
+ 0x1b80, 0x620607c7,
+ 0x1b80, 0xe2eb07d5,
+ 0x1b80, 0xe2eb07d7,
+ 0x1b80, 0x000107e5,
+ 0x1b80, 0x000107e7,
+ 0x1b80, 0x620607f5,
+ 0x1b80, 0x620607f7,
+ 0x1b80, 0x30ad0805,
+ 0x1b80, 0x30ad0807,
+ 0x1b80, 0x00260815,
+ 0x1b80, 0x00260817,
+ 0x1b80, 0xe3450825,
+ 0x1b80, 0xe3450827,
+ 0x1b80, 0x00020835,
+ 0x1b80, 0x00020837,
+ 0x1b80, 0x54ec0845,
+ 0x1b80, 0x54ec0847,
+ 0x1b80, 0x0ba60855,
+ 0x1b80, 0x0ba60857,
+ 0x1b80, 0x00260865,
+ 0x1b80, 0x00260867,
+ 0x1b80, 0xe3450875,
+ 0x1b80, 0xe3450877,
+ 0x1b80, 0x00020885,
+ 0x1b80, 0x00020887,
+ 0x1b80, 0x63c30895,
+ 0x1b80, 0x63c30897,
+ 0x1b80, 0x30d908a5,
+ 0x1b80, 0x30d908a7,
+ 0x1b80, 0x620608b5,
+ 0x1b80, 0x620608b7,
+ 0x1b80, 0x30a508c5,
+ 0x1b80, 0x30a508c7,
+ 0x1b80, 0x002408d5,
+ 0x1b80, 0x002408d7,
+ 0x1b80, 0xe34508e5,
+ 0x1b80, 0xe34508e7,
+ 0x1b80, 0x000208f5,
+ 0x1b80, 0x000208f7,
+ 0x1b80, 0x54ea0905,
+ 0x1b80, 0x54ea0907,
+ 0x1b80, 0x0ba60915,
+ 0x1b80, 0x0ba60917,
+ 0x1b80, 0x00240925,
+ 0x1b80, 0x00240927,
+ 0x1b80, 0xe3450935,
+ 0x1b80, 0xe3450937,
+ 0x1b80, 0x00020945,
+ 0x1b80, 0x00020947,
+ 0x1b80, 0x63c30955,
+ 0x1b80, 0x63c30957,
+ 0x1b80, 0x30d90965,
+ 0x1b80, 0x30d90967,
+ 0x1b80, 0x62060975,
+ 0x1b80, 0x62060977,
+ 0x1b80, 0x6c100985,
+ 0x1b80, 0x6c100987,
+ 0x1b80, 0x6d0f0995,
+ 0x1b80, 0x6d0f0997,
+ 0x1b80, 0xe2eb09a5,
+ 0x1b80, 0xe2eb09a7,
+ 0x1b80, 0xe34509b5,
+ 0x1b80, 0xe34509b7,
+ 0x1b80, 0x6c2409c5,
+ 0x1b80, 0x6c2409c7,
+ 0x1b80, 0xe2eb09d5,
+ 0x1b80, 0xe2eb09d7,
+ 0x1b80, 0xe34509e5,
+ 0x1b80, 0xe34509e7,
+ 0x1b80, 0x6c4409f5,
+ 0x1b80, 0x6c4409f7,
+ 0x1b80, 0xe2eb0a05,
+ 0x1b80, 0xe2eb0a07,
+ 0x1b80, 0xe3450a15,
+ 0x1b80, 0xe3450a17,
+ 0x1b80, 0x6c640a25,
+ 0x1b80, 0x6c640a27,
+ 0x1b80, 0xe2eb0a35,
+ 0x1b80, 0xe2eb0a37,
+ 0x1b80, 0xe3450a45,
+ 0x1b80, 0xe3450a47,
+ 0x1b80, 0x0baa0a55,
+ 0x1b80, 0x0baa0a57,
+ 0x1b80, 0x6c840a65,
+ 0x1b80, 0x6c840a67,
+ 0x1b80, 0x6d0f0a75,
+ 0x1b80, 0x6d0f0a77,
+ 0x1b80, 0xe2eb0a85,
+ 0x1b80, 0xe2eb0a87,
+ 0x1b80, 0xe3450a95,
+ 0x1b80, 0xe3450a97,
+ 0x1b80, 0x6ca40aa5,
+ 0x1b80, 0x6ca40aa7,
+ 0x1b80, 0xe2eb0ab5,
+ 0x1b80, 0xe2eb0ab7,
+ 0x1b80, 0xe3450ac5,
+ 0x1b80, 0xe3450ac7,
+ 0x1b80, 0x0bac0ad5,
+ 0x1b80, 0x0bac0ad7,
+ 0x1b80, 0x6cc40ae5,
+ 0x1b80, 0x6cc40ae7,
+ 0x1b80, 0x6d0f0af5,
+ 0x1b80, 0x6d0f0af7,
+ 0x1b80, 0xe2eb0b05,
+ 0x1b80, 0xe2eb0b07,
+ 0x1b80, 0xe3450b15,
+ 0x1b80, 0xe3450b17,
+ 0x1b80, 0x6ce40b25,
+ 0x1b80, 0x6ce40b27,
+ 0x1b80, 0xe2eb0b35,
+ 0x1b80, 0xe2eb0b37,
+ 0x1b80, 0xe3450b45,
+ 0x1b80, 0xe3450b47,
+ 0x1b80, 0x6cf40b55,
+ 0x1b80, 0x6cf40b57,
+ 0x1b80, 0xe2eb0b65,
+ 0x1b80, 0xe2eb0b67,
+ 0x1b80, 0xe3450b75,
+ 0x1b80, 0xe3450b77,
+ 0x1b80, 0x6c0c0b85,
+ 0x1b80, 0x6c0c0b87,
+ 0x1b80, 0x6d000b95,
+ 0x1b80, 0x6d000b97,
+ 0x1b80, 0xe2eb0ba5,
+ 0x1b80, 0xe2eb0ba7,
+ 0x1b80, 0xe3450bb5,
+ 0x1b80, 0xe3450bb7,
+ 0x1b80, 0x6c1c0bc5,
+ 0x1b80, 0x6c1c0bc7,
+ 0x1b80, 0xe2eb0bd5,
+ 0x1b80, 0xe2eb0bd7,
+ 0x1b80, 0xe3450be5,
+ 0x1b80, 0xe3450be7,
+ 0x1b80, 0x6c3c0bf5,
+ 0x1b80, 0x6c3c0bf7,
+ 0x1b80, 0xe2eb0c05,
+ 0x1b80, 0xe2eb0c07,
+ 0x1b80, 0xe3450c15,
+ 0x1b80, 0xe3450c17,
+ 0x1b80, 0xf4bf0c25,
+ 0x1b80, 0xf4bf0c27,
+ 0x1b80, 0xf7be0c35,
+ 0x1b80, 0xf7be0c37,
+ 0x1b80, 0x6c5c0c45,
+ 0x1b80, 0x6c5c0c47,
+ 0x1b80, 0xe2eb0c55,
+ 0x1b80, 0xe2eb0c57,
+ 0x1b80, 0xe3450c65,
+ 0x1b80, 0xe3450c67,
+ 0x1b80, 0x6c7c0c75,
+ 0x1b80, 0x6c7c0c77,
+ 0x1b80, 0xe2eb0c85,
+ 0x1b80, 0xe2eb0c87,
+ 0x1b80, 0xe3450c95,
+ 0x1b80, 0xe3450c97,
+ 0x1b80, 0xf5c30ca5,
+ 0x1b80, 0xf5c30ca7,
+ 0x1b80, 0xf8c20cb5,
+ 0x1b80, 0xf8c20cb7,
+ 0x1b80, 0x6c9c0cc5,
+ 0x1b80, 0x6c9c0cc7,
+ 0x1b80, 0xe2eb0cd5,
+ 0x1b80, 0xe2eb0cd7,
+ 0x1b80, 0xe3450ce5,
+ 0x1b80, 0xe3450ce7,
+ 0x1b80, 0x6cbc0cf5,
+ 0x1b80, 0x6cbc0cf7,
+ 0x1b80, 0xe2eb0d05,
+ 0x1b80, 0xe2eb0d07,
+ 0x1b80, 0xe3450d15,
+ 0x1b80, 0xe3450d17,
+ 0x1b80, 0x6cdc0d25,
+ 0x1b80, 0x6cdc0d27,
+ 0x1b80, 0xe2eb0d35,
+ 0x1b80, 0xe2eb0d37,
+ 0x1b80, 0xe3450d45,
+ 0x1b80, 0xe3450d47,
+ 0x1b80, 0x6cf00d55,
+ 0x1b80, 0x6cf00d57,
+ 0x1b80, 0xe2eb0d65,
+ 0x1b80, 0xe2eb0d67,
+ 0x1b80, 0xe3450d75,
+ 0x1b80, 0xe3450d77,
+ 0x1b80, 0x63c30d85,
+ 0x1b80, 0x63c30d87,
+ 0x1b80, 0x55010d95,
+ 0x1b80, 0x55010d97,
+ 0x1b80, 0x57040da5,
+ 0x1b80, 0x57040da7,
+ 0x1b80, 0x57000db5,
+ 0x1b80, 0x57000db7,
+ 0x1b80, 0x96000dc5,
+ 0x1b80, 0x96000dc7,
+ 0x1b80, 0x57080dd5,
+ 0x1b80, 0x57080dd7,
+ 0x1b80, 0x57000de5,
+ 0x1b80, 0x57000de7,
+ 0x1b80, 0x95000df5,
+ 0x1b80, 0x95000df7,
+ 0x1b80, 0x4d000e05,
+ 0x1b80, 0x4d000e07,
+ 0x1b80, 0x63050e15,
+ 0x1b80, 0x63050e17,
+ 0x1b80, 0x7b400e25,
+ 0x1b80, 0x7b400e27,
+ 0x1b80, 0x7a000e35,
+ 0x1b80, 0x7a000e37,
+ 0x1b80, 0x79000e45,
+ 0x1b80, 0x79000e47,
+ 0x1b80, 0x7f400e55,
+ 0x1b80, 0x7f400e57,
+ 0x1b80, 0x7e000e65,
+ 0x1b80, 0x7e000e67,
+ 0x1b80, 0x7d000e75,
+ 0x1b80, 0x7d000e77,
+ 0x1b80, 0x00010e85,
+ 0x1b80, 0x00010e87,
+ 0x1b80, 0xe3170e95,
+ 0x1b80, 0xe3170e97,
+ 0x1b80, 0x00010ea5,
+ 0x1b80, 0x00010ea7,
+ 0x1b80, 0x5c320eb5,
+ 0x1b80, 0x5c320eb7,
+ 0x1b80, 0xe3410ec5,
+ 0x1b80, 0xe3410ec7,
+ 0x1b80, 0xe3170ed5,
+ 0x1b80, 0xe3170ed7,
+ 0x1b80, 0x00010ee5,
+ 0x1b80, 0x00010ee7,
+ 0x1b80, 0x31260ef5,
+ 0x1b80, 0x31260ef7,
+ 0x1b80, 0x00260f05,
+ 0x1b80, 0x00260f07,
+ 0x1b80, 0xe34a0f15,
+ 0x1b80, 0xe34a0f17,
+ 0x1b80, 0x00020f25,
+ 0x1b80, 0x00020f27,
+ 0x1b80, 0x54ec0f35,
+ 0x1b80, 0x54ec0f37,
+ 0x1b80, 0x0ba60f45,
+ 0x1b80, 0x0ba60f47,
+ 0x1b80, 0x00260f55,
+ 0x1b80, 0x00260f57,
+ 0x1b80, 0xe34a0f65,
+ 0x1b80, 0xe34a0f67,
+ 0x1b80, 0x00020f75,
+ 0x1b80, 0x00020f77,
+ 0x1b80, 0x63830f85,
+ 0x1b80, 0x63830f87,
+ 0x1b80, 0x30d90f95,
+ 0x1b80, 0x30d90f97,
+ 0x1b80, 0x311a0fa5,
+ 0x1b80, 0x311a0fa7,
+ 0x1b80, 0x00240fb5,
+ 0x1b80, 0x00240fb7,
+ 0x1b80, 0xe34a0fc5,
+ 0x1b80, 0xe34a0fc7,
+ 0x1b80, 0x00020fd5,
+ 0x1b80, 0x00020fd7,
+ 0x1b80, 0x54ea0fe5,
+ 0x1b80, 0x54ea0fe7,
+ 0x1b80, 0x0ba60ff5,
+ 0x1b80, 0x0ba60ff7,
+ 0x1b80, 0x00241005,
+ 0x1b80, 0x00241007,
+ 0x1b80, 0xe34a1015,
+ 0x1b80, 0xe34a1017,
+ 0x1b80, 0x00021025,
+ 0x1b80, 0x00021027,
+ 0x1b80, 0x63831035,
+ 0x1b80, 0x63831037,
+ 0x1b80, 0x30d91045,
+ 0x1b80, 0x30d91047,
+ 0x1b80, 0x5c321055,
+ 0x1b80, 0x5c321057,
+ 0x1b80, 0x54e61065,
+ 0x1b80, 0x54e61067,
+ 0x1b80, 0x6e101075,
+ 0x1b80, 0x6e101077,
+ 0x1b80, 0x6f0f1085,
+ 0x1b80, 0x6f0f1087,
+ 0x1b80, 0xe3171095,
+ 0x1b80, 0xe3171097,
+ 0x1b80, 0xe34a10a5,
+ 0x1b80, 0xe34a10a7,
+ 0x1b80, 0x5c3210b5,
+ 0x1b80, 0x5c3210b7,
+ 0x1b80, 0x54e710c5,
+ 0x1b80, 0x54e710c7,
+ 0x1b80, 0x6e2410d5,
+ 0x1b80, 0x6e2410d7,
+ 0x1b80, 0xe31710e5,
+ 0x1b80, 0xe31710e7,
+ 0x1b80, 0xe34a10f5,
+ 0x1b80, 0xe34a10f7,
+ 0x1b80, 0x5c321105,
+ 0x1b80, 0x5c321107,
+ 0x1b80, 0x54e81115,
+ 0x1b80, 0x54e81117,
+ 0x1b80, 0x6e441125,
+ 0x1b80, 0x6e441127,
+ 0x1b80, 0xe3171135,
+ 0x1b80, 0xe3171137,
+ 0x1b80, 0xe34a1145,
+ 0x1b80, 0xe34a1147,
+ 0x1b80, 0x5c321155,
+ 0x1b80, 0x5c321157,
+ 0x1b80, 0x54e91165,
+ 0x1b80, 0x54e91167,
+ 0x1b80, 0x6e641175,
+ 0x1b80, 0x6e641177,
+ 0x1b80, 0xe3171185,
+ 0x1b80, 0xe3171187,
+ 0x1b80, 0xe34a1195,
+ 0x1b80, 0xe34a1197,
+ 0x1b80, 0x5c3211a5,
+ 0x1b80, 0x5c3211a7,
+ 0x1b80, 0x54ea11b5,
+ 0x1b80, 0x54ea11b7,
+ 0x1b80, 0x0baa11c5,
+ 0x1b80, 0x0baa11c7,
+ 0x1b80, 0x6e8411d5,
+ 0x1b80, 0x6e8411d7,
+ 0x1b80, 0x6f0f11e5,
+ 0x1b80, 0x6f0f11e7,
+ 0x1b80, 0xe31711f5,
+ 0x1b80, 0xe31711f7,
+ 0x1b80, 0xe34a1205,
+ 0x1b80, 0xe34a1207,
+ 0x1b80, 0x5c321215,
+ 0x1b80, 0x5c321217,
+ 0x1b80, 0x54eb1225,
+ 0x1b80, 0x54eb1227,
+ 0x1b80, 0x6ea41235,
+ 0x1b80, 0x6ea41237,
+ 0x1b80, 0xe3171245,
+ 0x1b80, 0xe3171247,
+ 0x1b80, 0xe34a1255,
+ 0x1b80, 0xe34a1257,
+ 0x1b80, 0x5c321265,
+ 0x1b80, 0x5c321267,
+ 0x1b80, 0x54ec1275,
+ 0x1b80, 0x54ec1277,
+ 0x1b80, 0x0bac1285,
+ 0x1b80, 0x0bac1287,
+ 0x1b80, 0x6ec41295,
+ 0x1b80, 0x6ec41297,
+ 0x1b80, 0x6f0f12a5,
+ 0x1b80, 0x6f0f12a7,
+ 0x1b80, 0xe31712b5,
+ 0x1b80, 0xe31712b7,
+ 0x1b80, 0xe34a12c5,
+ 0x1b80, 0xe34a12c7,
+ 0x1b80, 0x5c3212d5,
+ 0x1b80, 0x5c3212d7,
+ 0x1b80, 0x54ed12e5,
+ 0x1b80, 0x54ed12e7,
+ 0x1b80, 0x6ee412f5,
+ 0x1b80, 0x6ee412f7,
+ 0x1b80, 0xe3171305,
+ 0x1b80, 0xe3171307,
+ 0x1b80, 0xe34a1315,
+ 0x1b80, 0xe34a1317,
+ 0x1b80, 0x5c321325,
+ 0x1b80, 0x5c321327,
+ 0x1b80, 0x54ee1335,
+ 0x1b80, 0x54ee1337,
+ 0x1b80, 0x6ef41345,
+ 0x1b80, 0x6ef41347,
+ 0x1b80, 0xe3171355,
+ 0x1b80, 0xe3171357,
+ 0x1b80, 0xe34a1365,
+ 0x1b80, 0xe34a1367,
+ 0x1b80, 0x5c321375,
+ 0x1b80, 0x5c321377,
+ 0x1b80, 0x54ef1385,
+ 0x1b80, 0x54ef1387,
+ 0x1b80, 0x6e0c1395,
+ 0x1b80, 0x6e0c1397,
+ 0x1b80, 0x6f0013a5,
+ 0x1b80, 0x6f0013a7,
+ 0x1b80, 0xe31713b5,
+ 0x1b80, 0xe31713b7,
+ 0x1b80, 0xe34a13c5,
+ 0x1b80, 0xe34a13c7,
+ 0x1b80, 0x5c3213d5,
+ 0x1b80, 0x5c3213d7,
+ 0x1b80, 0x54f013e5,
+ 0x1b80, 0x54f013e7,
+ 0x1b80, 0x6e1c13f5,
+ 0x1b80, 0x6e1c13f7,
+ 0x1b80, 0xe3171405,
+ 0x1b80, 0xe3171407,
+ 0x1b80, 0xe34a1415,
+ 0x1b80, 0xe34a1417,
+ 0x1b80, 0x5c321425,
+ 0x1b80, 0x5c321427,
+ 0x1b80, 0x54f11435,
+ 0x1b80, 0x54f11437,
+ 0x1b80, 0x6e3c1445,
+ 0x1b80, 0x6e3c1447,
+ 0x1b80, 0xe3171455,
+ 0x1b80, 0xe3171457,
+ 0x1b80, 0xe34a1465,
+ 0x1b80, 0xe34a1467,
+ 0x1b80, 0xfaa91475,
+ 0x1b80, 0xfaa91477,
+ 0x1b80, 0x5c321485,
+ 0x1b80, 0x5c321487,
+ 0x1b80, 0x54f21495,
+ 0x1b80, 0x54f21497,
+ 0x1b80, 0x6e5c14a5,
+ 0x1b80, 0x6e5c14a7,
+ 0x1b80, 0xe31714b5,
+ 0x1b80, 0xe31714b7,
+ 0x1b80, 0xe34a14c5,
+ 0x1b80, 0xe34a14c7,
+ 0x1b80, 0x5c3214d5,
+ 0x1b80, 0x5c3214d7,
+ 0x1b80, 0x54f314e5,
+ 0x1b80, 0x54f314e7,
+ 0x1b80, 0x6e7c14f5,
+ 0x1b80, 0x6e7c14f7,
+ 0x1b80, 0xe3171505,
+ 0x1b80, 0xe3171507,
+ 0x1b80, 0xe34a1515,
+ 0x1b80, 0xe34a1517,
+ 0x1b80, 0xfba91525,
+ 0x1b80, 0xfba91527,
+ 0x1b80, 0x5c321535,
+ 0x1b80, 0x5c321537,
+ 0x1b80, 0x54f41545,
+ 0x1b80, 0x54f41547,
+ 0x1b80, 0x6e9c1555,
+ 0x1b80, 0x6e9c1557,
+ 0x1b80, 0xe3171565,
+ 0x1b80, 0xe3171567,
+ 0x1b80, 0xe34a1575,
+ 0x1b80, 0xe34a1577,
+ 0x1b80, 0x5c321585,
+ 0x1b80, 0x5c321587,
+ 0x1b80, 0x54f51595,
+ 0x1b80, 0x54f51597,
+ 0x1b80, 0x6ebc15a5,
+ 0x1b80, 0x6ebc15a7,
+ 0x1b80, 0xe31715b5,
+ 0x1b80, 0xe31715b7,
+ 0x1b80, 0xe34a15c5,
+ 0x1b80, 0xe34a15c7,
+ 0x1b80, 0x5c3215d5,
+ 0x1b80, 0x5c3215d7,
+ 0x1b80, 0x54f615e5,
+ 0x1b80, 0x54f615e7,
+ 0x1b80, 0x6edc15f5,
+ 0x1b80, 0x6edc15f7,
+ 0x1b80, 0xe3171605,
+ 0x1b80, 0xe3171607,
+ 0x1b80, 0xe34a1615,
+ 0x1b80, 0xe34a1617,
+ 0x1b80, 0x5c321625,
+ 0x1b80, 0x5c321627,
+ 0x1b80, 0x54f71635,
+ 0x1b80, 0x54f71637,
+ 0x1b80, 0x6ef01645,
+ 0x1b80, 0x6ef01647,
+ 0x1b80, 0xe3171655,
+ 0x1b80, 0xe3171657,
+ 0x1b80, 0xe34a1665,
+ 0x1b80, 0xe34a1667,
+ 0x1b80, 0x63831675,
+ 0x1b80, 0x63831677,
+ 0x1b80, 0x30d91685,
+ 0x1b80, 0x30d91687,
+ 0x1b80, 0x00011695,
+ 0x1b80, 0x00011697,
+ 0x1b80, 0x000416a5,
+ 0x1b80, 0x000416a7,
+ 0x1b80, 0x550116b5,
+ 0x1b80, 0x550116b7,
+ 0x1b80, 0x5c3116c5,
+ 0x1b80, 0x5c3116c7,
+ 0x1b80, 0x5f8216d5,
+ 0x1b80, 0x5f8216d7,
+ 0x1b80, 0x660516e5,
+ 0x1b80, 0x660516e7,
+ 0x1b80, 0x000616f5,
+ 0x1b80, 0x000616f7,
+ 0x1b80, 0x5d801705,
+ 0x1b80, 0x5d801707,
+ 0x1b80, 0x09001715,
+ 0x1b80, 0x09001717,
+ 0x1b80, 0x0a011725,
+ 0x1b80, 0x0a011727,
+ 0x1b80, 0x0b401735,
+ 0x1b80, 0x0b401737,
+ 0x1b80, 0x0d001745,
+ 0x1b80, 0x0d001747,
+ 0x1b80, 0x0f011755,
+ 0x1b80, 0x0f011757,
+ 0x1b80, 0x002a1765,
+ 0x1b80, 0x002a1767,
+ 0x1b80, 0x055a1775,
+ 0x1b80, 0x055a1777,
+ 0x1b80, 0x05db1785,
+ 0x1b80, 0x05db1787,
+ 0x1b80, 0xe3351795,
+ 0x1b80, 0xe3351797,
+ 0x1b80, 0xe2e317a5,
+ 0x1b80, 0xe2e317a7,
+ 0x1b80, 0x000617b5,
+ 0x1b80, 0x000617b7,
+ 0x1b80, 0x06da17c5,
+ 0x1b80, 0x06da17c7,
+ 0x1b80, 0x07db17d5,
+ 0x1b80, 0x07db17d7,
+ 0x1b80, 0xe33517e5,
+ 0x1b80, 0xe33517e7,
+ 0x1b80, 0xe2e317f5,
+ 0x1b80, 0xe2e317f7,
+ 0x1b80, 0xe32c1805,
+ 0x1b80, 0xe32c1807,
+ 0x1b80, 0x00021815,
+ 0x1b80, 0x00021817,
+ 0x1b80, 0xe3311825,
+ 0x1b80, 0xe3311827,
+ 0x1b80, 0x5d001835,
+ 0x1b80, 0x5d001837,
+ 0x1b80, 0x00041845,
+ 0x1b80, 0x00041847,
+ 0x1b80, 0x5fa21855,
+ 0x1b80, 0x5fa21857,
+ 0x1b80, 0x00011865,
+ 0x1b80, 0x00011867,
+ 0x1b80, 0xe2571875,
+ 0x1b80, 0xe2571877,
+ 0x1b80, 0x74081885,
+ 0x1b80, 0x74081887,
+ 0x1b80, 0xe2a11895,
+ 0x1b80, 0xe2a11897,
+ 0x1b80, 0xe28318a5,
+ 0x1b80, 0xe28318a7,
+ 0x1b80, 0xe2c118b5,
+ 0x1b80, 0xe2c118b7,
+ 0x1b80, 0xb90018c5,
+ 0x1b80, 0xb90018c7,
+ 0x1b80, 0x990018d5,
+ 0x1b80, 0x990018d7,
+ 0x1b80, 0x000618e5,
+ 0x1b80, 0x000618e7,
+ 0x1b80, 0x770018f5,
+ 0x1b80, 0x770018f7,
+ 0x1b80, 0x00041905,
+ 0x1b80, 0x00041907,
+ 0x1b80, 0x49041915,
+ 0x1b80, 0x49041917,
+ 0x1b80, 0x4bb01925,
+ 0x1b80, 0x4bb01927,
+ 0x1b80, 0x00061935,
+ 0x1b80, 0x00061937,
+ 0x1b80, 0x75041945,
+ 0x1b80, 0x75041947,
+ 0x1b80, 0x77081955,
+ 0x1b80, 0x77081957,
+ 0x1b80, 0x00071965,
+ 0x1b80, 0x00071967,
+ 0x1b80, 0x77101975,
+ 0x1b80, 0x77101977,
+ 0x1b80, 0x00041985,
+ 0x1b80, 0x00041987,
+ 0x1b80, 0x44801995,
+ 0x1b80, 0x44801997,
+ 0x1b80, 0x45ff19a5,
+ 0x1b80, 0x45ff19a7,
+ 0x1b80, 0x463f19b5,
+ 0x1b80, 0x463f19b7,
+ 0x1b80, 0x473119c5,
+ 0x1b80, 0x473119c7,
+ 0x1b80, 0x400819d5,
+ 0x1b80, 0x400819d7,
+ 0x1b80, 0xe23e19e5,
+ 0x1b80, 0xe23e19e7,
+ 0x1b80, 0x000119f5,
+ 0x1b80, 0x000119f7,
+ 0x1b80, 0xe2571a05,
+ 0x1b80, 0xe2571a07,
+ 0x1b80, 0x74081a15,
+ 0x1b80, 0x74081a17,
+ 0x1b80, 0xe2b11a25,
+ 0x1b80, 0xe2b11a27,
+ 0x1b80, 0xe2831a35,
+ 0x1b80, 0xe2831a37,
+ 0x1b80, 0xe2c71a45,
+ 0x1b80, 0xe2c71a47,
+ 0x1b80, 0xb9001a55,
+ 0x1b80, 0xb9001a57,
+ 0x1b80, 0x99001a65,
+ 0x1b80, 0x99001a67,
+ 0x1b80, 0x00061a75,
+ 0x1b80, 0x00061a77,
+ 0x1b80, 0x77001a85,
+ 0x1b80, 0x77001a87,
+ 0x1b80, 0x00051a95,
+ 0x1b80, 0x00051a97,
+ 0x1b80, 0x61041aa5,
+ 0x1b80, 0x61041aa7,
+ 0x1b80, 0x63b01ab5,
+ 0x1b80, 0x63b01ab7,
+ 0x1b80, 0x00061ac5,
+ 0x1b80, 0x00061ac7,
+ 0x1b80, 0x75081ad5,
+ 0x1b80, 0x75081ad7,
+ 0x1b80, 0x77081ae5,
+ 0x1b80, 0x77081ae7,
+ 0x1b80, 0x00071af5,
+ 0x1b80, 0x00071af7,
+ 0x1b80, 0x77201b05,
+ 0x1b80, 0x77201b07,
+ 0x1b80, 0x00051b15,
+ 0x1b80, 0x00051b17,
+ 0x1b80, 0x5c801b25,
+ 0x1b80, 0x5c801b27,
+ 0x1b80, 0x5dff1b35,
+ 0x1b80, 0x5dff1b37,
+ 0x1b80, 0x5e3f1b45,
+ 0x1b80, 0x5e3f1b47,
+ 0x1b80, 0x5f311b55,
+ 0x1b80, 0x5f311b57,
+ 0x1b80, 0x00041b65,
+ 0x1b80, 0x00041b67,
+ 0x1b80, 0x400a1b75,
+ 0x1b80, 0x400a1b77,
+ 0x1b80, 0xe23e1b85,
+ 0x1b80, 0xe23e1b87,
+ 0x1b80, 0x00011b95,
+ 0x1b80, 0x00011b97,
+ 0x1b80, 0xe2571ba5,
+ 0x1b80, 0xe2571ba7,
+ 0x1b80, 0x74081bb5,
+ 0x1b80, 0x74081bb7,
+ 0x1b80, 0xe2a11bc5,
+ 0x1b80, 0xe2a11bc7,
+ 0x1b80, 0xe2831bd5,
+ 0x1b80, 0xe2831bd7,
+ 0x1b80, 0xe2c11be5,
+ 0x1b80, 0xe2c11be7,
+ 0x1b80, 0xe2cd1bf5,
+ 0x1b80, 0xe2cd1bf7,
+ 0x1b80, 0xe2101c05,
+ 0x1b80, 0xe2101c07,
+ 0x1b80, 0x00011c15,
+ 0x1b80, 0x00011c17,
+ 0x1b80, 0xe2571c25,
+ 0x1b80, 0xe2571c27,
+ 0x1b80, 0x74081c35,
+ 0x1b80, 0x74081c37,
+ 0x1b80, 0xe2b11c45,
+ 0x1b80, 0xe2b11c47,
+ 0x1b80, 0xe2831c55,
+ 0x1b80, 0xe2831c57,
+ 0x1b80, 0xe2c71c65,
+ 0x1b80, 0xe2c71c67,
+ 0x1b80, 0xe2cd1c75,
+ 0x1b80, 0xe2cd1c77,
+ 0x1b80, 0xe2261c85,
+ 0x1b80, 0xe2261c87,
+ 0x1b80, 0x00011c95,
+ 0x1b80, 0x00011c97,
+ 0x1b80, 0xe26d1ca5,
+ 0x1b80, 0xe26d1ca7,
+ 0x1b80, 0x74001cb5,
+ 0x1b80, 0x74001cb7,
+ 0x1b80, 0xe2a11cc5,
+ 0x1b80, 0xe2a11cc7,
+ 0x1b80, 0xe2921cd5,
+ 0x1b80, 0xe2921cd7,
+ 0x1b80, 0xe2c11ce5,
+ 0x1b80, 0xe2c11ce7,
+ 0x1b80, 0xe2cd1cf5,
+ 0x1b80, 0xe2cd1cf7,
+ 0x1b80, 0xe2101d05,
+ 0x1b80, 0xe2101d07,
+ 0x1b80, 0x00011d15,
+ 0x1b80, 0x00011d17,
+ 0x1b80, 0xe26d1d25,
+ 0x1b80, 0xe26d1d27,
+ 0x1b80, 0x74001d35,
+ 0x1b80, 0x74001d37,
+ 0x1b80, 0xe2b11d45,
+ 0x1b80, 0xe2b11d47,
+ 0x1b80, 0xe2921d55,
+ 0x1b80, 0xe2921d57,
+ 0x1b80, 0xe2c71d65,
+ 0x1b80, 0xe2c71d67,
+ 0x1b80, 0xe2cd1d75,
+ 0x1b80, 0xe2cd1d77,
+ 0x1b80, 0xe2261d85,
+ 0x1b80, 0xe2261d87,
+ 0x1b80, 0x00011d95,
+ 0x1b80, 0x00011d97,
+ 0x1b80, 0x00041da5,
+ 0x1b80, 0x00041da7,
+ 0x1b80, 0x445b1db5,
+ 0x1b80, 0x445b1db7,
+ 0x1b80, 0x47b01dc5,
+ 0x1b80, 0x47b01dc7,
+ 0x1b80, 0x47301dd5,
+ 0x1b80, 0x47301dd7,
+ 0x1b80, 0x47001de5,
+ 0x1b80, 0x47001de7,
+ 0x1b80, 0x00061df5,
+ 0x1b80, 0x00061df7,
+ 0x1b80, 0x77081e05,
+ 0x1b80, 0x77081e07,
+ 0x1b80, 0x00041e15,
+ 0x1b80, 0x00041e17,
+ 0x1b80, 0x49401e25,
+ 0x1b80, 0x49401e27,
+ 0x1b80, 0x4bb01e35,
+ 0x1b80, 0x4bb01e37,
+ 0x1b80, 0x00071e45,
+ 0x1b80, 0x00071e47,
+ 0x1b80, 0x54401e55,
+ 0x1b80, 0x54401e57,
+ 0x1b80, 0x00041e65,
+ 0x1b80, 0x00041e67,
+ 0x1b80, 0x40081e75,
+ 0x1b80, 0x40081e77,
+ 0x1b80, 0x00011e85,
+ 0x1b80, 0x00011e87,
+ 0x1b80, 0x00051e95,
+ 0x1b80, 0x00051e97,
+ 0x1b80, 0x5c5b1ea5,
+ 0x1b80, 0x5c5b1ea7,
+ 0x1b80, 0x5fb01eb5,
+ 0x1b80, 0x5fb01eb7,
+ 0x1b80, 0x5f301ec5,
+ 0x1b80, 0x5f301ec7,
+ 0x1b80, 0x5f001ed5,
+ 0x1b80, 0x5f001ed7,
+ 0x1b80, 0x00061ee5,
+ 0x1b80, 0x00061ee7,
+ 0x1b80, 0x77081ef5,
+ 0x1b80, 0x77081ef7,
+ 0x1b80, 0x00051f05,
+ 0x1b80, 0x00051f07,
+ 0x1b80, 0x61401f15,
+ 0x1b80, 0x61401f17,
+ 0x1b80, 0x63b01f25,
+ 0x1b80, 0x63b01f27,
+ 0x1b80, 0x00071f35,
+ 0x1b80, 0x00071f37,
+ 0x1b80, 0x54401f45,
+ 0x1b80, 0x54401f47,
+ 0x1b80, 0x00041f55,
+ 0x1b80, 0x00041f57,
+ 0x1b80, 0x40081f65,
+ 0x1b80, 0x40081f67,
+ 0x1b80, 0x00011f75,
+ 0x1b80, 0x00011f77,
+ 0x1b80, 0xe2571f85,
+ 0x1b80, 0xe2571f87,
+ 0x1b80, 0x74081f95,
+ 0x1b80, 0x74081f97,
+ 0x1b80, 0xe2a11fa5,
+ 0x1b80, 0xe2a11fa7,
+ 0x1b80, 0x00041fb5,
+ 0x1b80, 0x00041fb7,
+ 0x1b80, 0x40081fc5,
+ 0x1b80, 0x40081fc7,
+ 0x1b80, 0x00011fd5,
+ 0x1b80, 0x00011fd7,
+ 0x1b80, 0xe2571fe5,
+ 0x1b80, 0xe2571fe7,
+ 0x1b80, 0x74081ff5,
+ 0x1b80, 0x74081ff7,
+ 0x1b80, 0xe2b12005,
+ 0x1b80, 0xe2b12007,
+ 0x1b80, 0x00042015,
+ 0x1b80, 0x00042017,
+ 0x1b80, 0x40082025,
+ 0x1b80, 0x40082027,
+ 0x1b80, 0x00012035,
+ 0x1b80, 0x00012037,
+ 0x1b80, 0xe26d2045,
+ 0x1b80, 0xe26d2047,
+ 0x1b80, 0x74002055,
+ 0x1b80, 0x74002057,
+ 0x1b80, 0xe2a12065,
+ 0x1b80, 0xe2a12067,
+ 0x1b80, 0x00042075,
+ 0x1b80, 0x00042077,
+ 0x1b80, 0x40082085,
+ 0x1b80, 0x40082087,
+ 0x1b80, 0x00012095,
+ 0x1b80, 0x00012097,
+ 0x1b80, 0xe26d20a5,
+ 0x1b80, 0xe26d20a7,
+ 0x1b80, 0x740020b5,
+ 0x1b80, 0x740020b7,
+ 0x1b80, 0xe2b120c5,
+ 0x1b80, 0xe2b120c7,
+ 0x1b80, 0x000420d5,
+ 0x1b80, 0x000420d7,
+ 0x1b80, 0x400820e5,
+ 0x1b80, 0x400820e7,
+ 0x1b80, 0x000120f5,
+ 0x1b80, 0x000120f7,
+ 0x1b80, 0x00042105,
+ 0x1b80, 0x00042107,
+ 0x1b80, 0x49042115,
+ 0x1b80, 0x49042117,
+ 0x1b80, 0x4bb02125,
+ 0x1b80, 0x4bb02127,
+ 0x1b80, 0x00062135,
+ 0x1b80, 0x00062137,
+ 0x1b80, 0x75042145,
+ 0x1b80, 0x75042147,
+ 0x1b80, 0x77082155,
+ 0x1b80, 0x77082157,
+ 0x1b80, 0x00042165,
+ 0x1b80, 0x00042167,
+ 0x1b80, 0x44802175,
+ 0x1b80, 0x44802177,
+ 0x1b80, 0x45ff2185,
+ 0x1b80, 0x45ff2187,
+ 0x1b80, 0x463f2195,
+ 0x1b80, 0x463f2197,
+ 0x1b80, 0x473121a5,
+ 0x1b80, 0x473121a7,
+ 0x1b80, 0x400821b5,
+ 0x1b80, 0x400821b7,
+ 0x1b80, 0xe23e21c5,
+ 0x1b80, 0xe23e21c7,
+ 0x1b80, 0x000421d5,
+ 0x1b80, 0x000421d7,
+ 0x1b80, 0x400c21e5,
+ 0x1b80, 0x400c21e7,
+ 0x1b80, 0x000621f5,
+ 0x1b80, 0x000621f7,
+ 0x1b80, 0x75002205,
+ 0x1b80, 0x75002207,
+ 0x1b80, 0x00042215,
+ 0x1b80, 0x00042217,
+ 0x1b80, 0x445b2225,
+ 0x1b80, 0x445b2227,
+ 0x1b80, 0x47002235,
+ 0x1b80, 0x47002237,
+ 0x1b80, 0x40082245,
+ 0x1b80, 0x40082247,
+ 0x1b80, 0x00012255,
+ 0x1b80, 0x00012257,
+ 0x1b80, 0x00052265,
+ 0x1b80, 0x00052267,
+ 0x1b80, 0x61042275,
+ 0x1b80, 0x61042277,
+ 0x1b80, 0x63b02285,
+ 0x1b80, 0x63b02287,
+ 0x1b80, 0x00062295,
+ 0x1b80, 0x00062297,
+ 0x1b80, 0x750822a5,
+ 0x1b80, 0x750822a7,
+ 0x1b80, 0x770822b5,
+ 0x1b80, 0x770822b7,
+ 0x1b80, 0x000522c5,
+ 0x1b80, 0x000522c7,
+ 0x1b80, 0x5c8022d5,
+ 0x1b80, 0x5c8022d7,
+ 0x1b80, 0x5dff22e5,
+ 0x1b80, 0x5dff22e7,
+ 0x1b80, 0x5e3f22f5,
+ 0x1b80, 0x5e3f22f7,
+ 0x1b80, 0x5f312305,
+ 0x1b80, 0x5f312307,
+ 0x1b80, 0x00042315,
+ 0x1b80, 0x00042317,
+ 0x1b80, 0x400a2325,
+ 0x1b80, 0x400a2327,
+ 0x1b80, 0xe23e2335,
+ 0x1b80, 0xe23e2337,
+ 0x1b80, 0x00042345,
+ 0x1b80, 0x00042347,
+ 0x1b80, 0x400c2355,
+ 0x1b80, 0x400c2357,
+ 0x1b80, 0x00062365,
+ 0x1b80, 0x00062367,
+ 0x1b80, 0x75002375,
+ 0x1b80, 0x75002377,
+ 0x1b80, 0x00052385,
+ 0x1b80, 0x00052387,
+ 0x1b80, 0x5c5b2395,
+ 0x1b80, 0x5c5b2397,
+ 0x1b80, 0x5f0023a5,
+ 0x1b80, 0x5f0023a7,
+ 0x1b80, 0x000423b5,
+ 0x1b80, 0x000423b7,
+ 0x1b80, 0x400823c5,
+ 0x1b80, 0x400823c7,
+ 0x1b80, 0x000123d5,
+ 0x1b80, 0x000123d7,
+ 0x1b80, 0x000723e5,
+ 0x1b80, 0x000723e7,
+ 0x1b80, 0x4c1223f5,
+ 0x1b80, 0x4c1223f7,
+ 0x1b80, 0x4e202405,
+ 0x1b80, 0x4e202407,
+ 0x1b80, 0x00052415,
+ 0x1b80, 0x00052417,
+ 0x1b80, 0x598f2425,
+ 0x1b80, 0x598f2427,
+ 0x1b80, 0x40022435,
+ 0x1b80, 0x40022437,
+ 0x1b80, 0x4c012445,
+ 0x1b80, 0x4c012447,
+ 0x1b80, 0x4c002455,
+ 0x1b80, 0x4c002457,
+ 0x1b80, 0xab002465,
+ 0x1b80, 0xab002467,
+ 0x1b80, 0x40032475,
+ 0x1b80, 0x40032477,
+ 0x1b80, 0x49802485,
+ 0x1b80, 0x49802487,
+ 0x1b80, 0x56c02495,
+ 0x1b80, 0x56c02497,
+ 0x1b80, 0x540224a5,
+ 0x1b80, 0x540224a7,
+ 0x1b80, 0x4c0124b5,
+ 0x1b80, 0x4c0124b7,
+ 0x1b80, 0x4c0024c5,
+ 0x1b80, 0x4c0024c7,
+ 0x1b80, 0xab0024d5,
+ 0x1b80, 0xab0024d7,
+ 0x1b80, 0x540024e5,
+ 0x1b80, 0x540024e7,
+ 0x1b80, 0x000724f5,
+ 0x1b80, 0x000724f7,
+ 0x1b80, 0x4c002505,
+ 0x1b80, 0x4c002507,
+ 0x1b80, 0x4e002515,
+ 0x1b80, 0x4e002517,
+ 0x1b80, 0x00052525,
+ 0x1b80, 0x00052527,
+ 0x1b80, 0x40042535,
+ 0x1b80, 0x40042537,
+ 0x1b80, 0x4c012545,
+ 0x1b80, 0x4c012547,
+ 0x1b80, 0x4c002555,
+ 0x1b80, 0x4c002557,
+ 0x1b80, 0x00012565,
+ 0x1b80, 0x00012567,
+ 0x1b80, 0x00042575,
+ 0x1b80, 0x00042577,
+ 0x1b80, 0x44802585,
+ 0x1b80, 0x44802587,
+ 0x1b80, 0x4b002595,
+ 0x1b80, 0x4b002597,
+ 0x1b80, 0x000525a5,
+ 0x1b80, 0x000525a7,
+ 0x1b80, 0x5c8025b5,
+ 0x1b80, 0x5c8025b7,
+ 0x1b80, 0x630025c5,
+ 0x1b80, 0x630025c7,
+ 0x1b80, 0x000725d5,
+ 0x1b80, 0x000725d7,
+ 0x1b80, 0x780c25e5,
+ 0x1b80, 0x780c25e7,
+ 0x1b80, 0x791925f5,
+ 0x1b80, 0x791925f7,
+ 0x1b80, 0x7a002605,
+ 0x1b80, 0x7a002607,
+ 0x1b80, 0x7b822615,
+ 0x1b80, 0x7b822617,
+ 0x1b80, 0x7b022625,
+ 0x1b80, 0x7b022627,
+ 0x1b80, 0x78142635,
+ 0x1b80, 0x78142637,
+ 0x1b80, 0x79ee2645,
+ 0x1b80, 0x79ee2647,
+ 0x1b80, 0x7a012655,
+ 0x1b80, 0x7a012657,
+ 0x1b80, 0x7b832665,
+ 0x1b80, 0x7b832667,
+ 0x1b80, 0x7b032675,
+ 0x1b80, 0x7b032677,
+ 0x1b80, 0x78282685,
+ 0x1b80, 0x78282687,
+ 0x1b80, 0x79b42695,
+ 0x1b80, 0x79b42697,
+ 0x1b80, 0x7a0026a5,
+ 0x1b80, 0x7a0026a7,
+ 0x1b80, 0x7b0026b5,
+ 0x1b80, 0x7b0026b7,
+ 0x1b80, 0x000126c5,
+ 0x1b80, 0x000126c7,
+ 0x1b80, 0x000426d5,
+ 0x1b80, 0x000426d7,
+ 0x1b80, 0x448026e5,
+ 0x1b80, 0x448026e7,
+ 0x1b80, 0x4b0026f5,
+ 0x1b80, 0x4b0026f7,
+ 0x1b80, 0x00052705,
+ 0x1b80, 0x00052707,
+ 0x1b80, 0x5c802715,
+ 0x1b80, 0x5c802717,
+ 0x1b80, 0x63002725,
+ 0x1b80, 0x63002727,
+ 0x1b80, 0x00072735,
+ 0x1b80, 0x00072737,
+ 0x1b80, 0x78102745,
+ 0x1b80, 0x78102747,
+ 0x1b80, 0x79132755,
+ 0x1b80, 0x79132757,
+ 0x1b80, 0x7a002765,
+ 0x1b80, 0x7a002767,
+ 0x1b80, 0x7b802775,
+ 0x1b80, 0x7b802777,
+ 0x1b80, 0x7b002785,
+ 0x1b80, 0x7b002787,
+ 0x1b80, 0x78db2795,
+ 0x1b80, 0x78db2797,
+ 0x1b80, 0x790027a5,
+ 0x1b80, 0x790027a7,
+ 0x1b80, 0x7a0027b5,
+ 0x1b80, 0x7a0027b7,
+ 0x1b80, 0x7b8127c5,
+ 0x1b80, 0x7b8127c7,
+ 0x1b80, 0x7b0127d5,
+ 0x1b80, 0x7b0127d7,
+ 0x1b80, 0x782827e5,
+ 0x1b80, 0x782827e7,
+ 0x1b80, 0x79b427f5,
+ 0x1b80, 0x79b427f7,
+ 0x1b80, 0x7a002805,
+ 0x1b80, 0x7a002807,
+ 0x1b80, 0x7b002815,
+ 0x1b80, 0x7b002817,
+ 0x1b80, 0x00012825,
+ 0x1b80, 0x00012827,
+ 0x1b80, 0x00072835,
+ 0x1b80, 0x00072837,
+ 0x1b80, 0x783e2845,
+ 0x1b80, 0x783e2847,
+ 0x1b80, 0x79f92855,
+ 0x1b80, 0x79f92857,
+ 0x1b80, 0x7a012865,
+ 0x1b80, 0x7a012867,
+ 0x1b80, 0x7b822875,
+ 0x1b80, 0x7b822877,
+ 0x1b80, 0x7b022885,
+ 0x1b80, 0x7b022887,
+ 0x1b80, 0x78a92895,
+ 0x1b80, 0x78a92897,
+ 0x1b80, 0x79ed28a5,
+ 0x1b80, 0x79ed28a7,
+ 0x1b80, 0x7b8328b5,
+ 0x1b80, 0x7b8328b7,
+ 0x1b80, 0x7b0328c5,
+ 0x1b80, 0x7b0328c7,
+ 0x1b80, 0x782828d5,
+ 0x1b80, 0x782828d7,
+ 0x1b80, 0x79b428e5,
+ 0x1b80, 0x79b428e7,
+ 0x1b80, 0x7a0028f5,
+ 0x1b80, 0x7a0028f7,
+ 0x1b80, 0x7b002905,
+ 0x1b80, 0x7b002907,
+ 0x1b80, 0x00012915,
+ 0x1b80, 0x00012917,
+ 0x1b80, 0x00072925,
+ 0x1b80, 0x00072927,
+ 0x1b80, 0x78ae2935,
+ 0x1b80, 0x78ae2937,
+ 0x1b80, 0x79fa2945,
+ 0x1b80, 0x79fa2947,
+ 0x1b80, 0x7a012955,
+ 0x1b80, 0x7a012957,
+ 0x1b80, 0x7b802965,
+ 0x1b80, 0x7b802967,
+ 0x1b80, 0x7b002975,
+ 0x1b80, 0x7b002977,
+ 0x1b80, 0x787a2985,
+ 0x1b80, 0x787a2987,
+ 0x1b80, 0x79f12995,
+ 0x1b80, 0x79f12997,
+ 0x1b80, 0x7b8129a5,
+ 0x1b80, 0x7b8129a7,
+ 0x1b80, 0x7b0129b5,
+ 0x1b80, 0x7b0129b7,
+ 0x1b80, 0x782829c5,
+ 0x1b80, 0x782829c7,
+ 0x1b80, 0x79b429d5,
+ 0x1b80, 0x79b429d7,
+ 0x1b80, 0x7a0029e5,
+ 0x1b80, 0x7a0029e7,
+ 0x1b80, 0x7b0029f5,
+ 0x1b80, 0x7b0029f7,
+ 0x1b80, 0x00012a05,
+ 0x1b80, 0x00012a07,
+ 0x1b80, 0x00072a15,
+ 0x1b80, 0x00072a17,
+ 0x1b80, 0x75002a25,
+ 0x1b80, 0x75002a27,
+ 0x1b80, 0x76022a35,
+ 0x1b80, 0x76022a37,
+ 0x1b80, 0x77152a45,
+ 0x1b80, 0x77152a47,
+ 0x1b80, 0x00062a55,
+ 0x1b80, 0x00062a57,
+ 0x1b80, 0x74002a65,
+ 0x1b80, 0x74002a67,
+ 0x1b80, 0x76002a75,
+ 0x1b80, 0x76002a77,
+ 0x1b80, 0x77002a85,
+ 0x1b80, 0x77002a87,
+ 0x1b80, 0x75102a95,
+ 0x1b80, 0x75102a97,
+ 0x1b80, 0x75002aa5,
+ 0x1b80, 0x75002aa7,
+ 0x1b80, 0xb3002ab5,
+ 0x1b80, 0xb3002ab7,
+ 0x1b80, 0x93002ac5,
+ 0x1b80, 0x93002ac7,
+ 0x1b80, 0x00072ad5,
+ 0x1b80, 0x00072ad7,
+ 0x1b80, 0x76002ae5,
+ 0x1b80, 0x76002ae7,
+ 0x1b80, 0x77002af5,
+ 0x1b80, 0x77002af7,
+ 0x1b80, 0x00012b05,
+ 0x1b80, 0x00012b07,
+ 0x1b80, 0x00072b15,
+ 0x1b80, 0x00072b17,
+ 0x1b80, 0x75002b25,
+ 0x1b80, 0x75002b27,
+ 0x1b80, 0x76022b35,
+ 0x1b80, 0x76022b37,
+ 0x1b80, 0x77252b45,
+ 0x1b80, 0x77252b47,
+ 0x1b80, 0x00062b55,
+ 0x1b80, 0x00062b57,
+ 0x1b80, 0x74002b65,
+ 0x1b80, 0x74002b67,
+ 0x1b80, 0x76002b75,
+ 0x1b80, 0x76002b77,
+ 0x1b80, 0x77012b85,
+ 0x1b80, 0x77012b87,
+ 0x1b80, 0x75102b95,
+ 0x1b80, 0x75102b97,
+ 0x1b80, 0x75002ba5,
+ 0x1b80, 0x75002ba7,
+ 0x1b80, 0xb3002bb5,
+ 0x1b80, 0xb3002bb7,
+ 0x1b80, 0x93002bc5,
+ 0x1b80, 0x93002bc7,
+ 0x1b80, 0x00072bd5,
+ 0x1b80, 0x00072bd7,
+ 0x1b80, 0x76002be5,
+ 0x1b80, 0x76002be7,
+ 0x1b80, 0x77002bf5,
+ 0x1b80, 0x77002bf7,
+ 0x1b80, 0x00012c05,
+ 0x1b80, 0x00012c07,
+ 0x1b80, 0x00042c15,
+ 0x1b80, 0x00042c17,
+ 0x1b80, 0x44802c25,
+ 0x1b80, 0x44802c27,
+ 0x1b80, 0x47302c35,
+ 0x1b80, 0x47302c37,
+ 0x1b80, 0x00062c45,
+ 0x1b80, 0x00062c47,
+ 0x1b80, 0x776c2c55,
+ 0x1b80, 0x776c2c57,
+ 0x1b80, 0x00012c65,
+ 0x1b80, 0x00012c67,
+ 0x1b80, 0x00052c75,
+ 0x1b80, 0x00052c77,
+ 0x1b80, 0x5c802c85,
+ 0x1b80, 0x5c802c87,
+ 0x1b80, 0x5f302c95,
+ 0x1b80, 0x5f302c97,
+ 0x1b80, 0x00062ca5,
+ 0x1b80, 0x00062ca7,
+ 0x1b80, 0x776d2cb5,
+ 0x1b80, 0x776d2cb7,
+ 0x1b80, 0x00012cc5,
+ 0x1b80, 0x00012cc7,
+ 0x1b80, 0xb9002cd5,
+ 0x1b80, 0xb9002cd7,
+ 0x1b80, 0x99002ce5,
+ 0x1b80, 0x99002ce7,
+ 0x1b80, 0x00062cf5,
+ 0x1b80, 0x00062cf7,
+ 0x1b80, 0x77002d05,
+ 0x1b80, 0x77002d07,
+ 0x1b80, 0x98052d15,
+ 0x1b80, 0x98052d17,
+ 0x1b80, 0x00042d25,
+ 0x1b80, 0x00042d27,
+ 0x1b80, 0x40082d35,
+ 0x1b80, 0x40082d37,
+ 0x1b80, 0x4a022d45,
+ 0x1b80, 0x4a022d47,
+ 0x1b80, 0x30192d55,
+ 0x1b80, 0x30192d57,
+ 0x1b80, 0x00012d65,
+ 0x1b80, 0x00012d67,
+ 0x1b80, 0x7b482d75,
+ 0x1b80, 0x7b482d77,
+ 0x1b80, 0x7a902d85,
+ 0x1b80, 0x7a902d87,
+ 0x1b80, 0x79002d95,
+ 0x1b80, 0x79002d97,
+ 0x1b80, 0x55032da5,
+ 0x1b80, 0x55032da7,
+ 0x1b80, 0x32e32db5,
+ 0x1b80, 0x32e32db7,
+ 0x1b80, 0x7b382dc5,
+ 0x1b80, 0x7b382dc7,
+ 0x1b80, 0x7a802dd5,
+ 0x1b80, 0x7a802dd7,
+ 0x1b80, 0x550b2de5,
+ 0x1b80, 0x550b2de7,
+ 0x1b80, 0x32e32df5,
+ 0x1b80, 0x32e32df7,
+ 0x1b80, 0x7b402e05,
+ 0x1b80, 0x7b402e07,
+ 0x1b80, 0x7a002e15,
+ 0x1b80, 0x7a002e17,
+ 0x1b80, 0x55132e25,
+ 0x1b80, 0x55132e27,
+ 0x1b80, 0x74012e35,
+ 0x1b80, 0x74012e37,
+ 0x1b80, 0x74002e45,
+ 0x1b80, 0x74002e47,
+ 0x1b80, 0x8e002e55,
+ 0x1b80, 0x8e002e57,
+ 0x1b80, 0x00012e65,
+ 0x1b80, 0x00012e67,
+ 0x1b80, 0x57022e75,
+ 0x1b80, 0x57022e77,
+ 0x1b80, 0x57002e85,
+ 0x1b80, 0x57002e87,
+ 0x1b80, 0x97002e95,
+ 0x1b80, 0x97002e97,
+ 0x1b80, 0x00012ea5,
+ 0x1b80, 0x00012ea7,
+ 0x1b80, 0x4f782eb5,
+ 0x1b80, 0x4f782eb7,
+ 0x1b80, 0x53882ec5,
+ 0x1b80, 0x53882ec7,
+ 0x1b80, 0xe2f72ed5,
+ 0x1b80, 0xe2f72ed7,
+ 0x1b80, 0x54802ee5,
+ 0x1b80, 0x54802ee7,
+ 0x1b80, 0x54002ef5,
+ 0x1b80, 0x54002ef7,
+ 0x1b80, 0x54812f05,
+ 0x1b80, 0x54812f07,
+ 0x1b80, 0x54002f15,
+ 0x1b80, 0x54002f17,
+ 0x1b80, 0x54822f25,
+ 0x1b80, 0x54822f27,
+ 0x1b80, 0x54002f35,
+ 0x1b80, 0x54002f37,
+ 0x1b80, 0xe3022f45,
+ 0x1b80, 0xe3022f47,
+ 0x1b80, 0xbf1d2f55,
+ 0x1b80, 0xbf1d2f57,
+ 0x1b80, 0x30192f65,
+ 0x1b80, 0x30192f67,
+ 0x1b80, 0xe2d72f75,
+ 0x1b80, 0xe2d72f77,
+ 0x1b80, 0xe2dc2f85,
+ 0x1b80, 0xe2dc2f87,
+ 0x1b80, 0xe2e02f95,
+ 0x1b80, 0xe2e02f97,
+ 0x1b80, 0xe2e72fa5,
+ 0x1b80, 0xe2e72fa7,
+ 0x1b80, 0xe3412fb5,
+ 0x1b80, 0xe3412fb7,
+ 0x1b80, 0x55132fc5,
+ 0x1b80, 0x55132fc7,
+ 0x1b80, 0xe2e32fd5,
+ 0x1b80, 0xe2e32fd7,
+ 0x1b80, 0x55152fe5,
+ 0x1b80, 0x55152fe7,
+ 0x1b80, 0xe2e72ff5,
+ 0x1b80, 0xe2e72ff7,
+ 0x1b80, 0xe3413005,
+ 0x1b80, 0xe3413007,
+ 0x1b80, 0x00013015,
+ 0x1b80, 0x00013017,
+ 0x1b80, 0x54bf3025,
+ 0x1b80, 0x54bf3027,
+ 0x1b80, 0x54c03035,
+ 0x1b80, 0x54c03037,
+ 0x1b80, 0x54a33045,
+ 0x1b80, 0x54a33047,
+ 0x1b80, 0x54c13055,
+ 0x1b80, 0x54c13057,
+ 0x1b80, 0x54a43065,
+ 0x1b80, 0x54a43067,
+ 0x1b80, 0x4c183075,
+ 0x1b80, 0x4c183077,
+ 0x1b80, 0xbf073085,
+ 0x1b80, 0xbf073087,
+ 0x1b80, 0x54c23095,
+ 0x1b80, 0x54c23097,
+ 0x1b80, 0x54a430a5,
+ 0x1b80, 0x54a430a7,
+ 0x1b80, 0xbf0430b5,
+ 0x1b80, 0xbf0430b7,
+ 0x1b80, 0x54c130c5,
+ 0x1b80, 0x54c130c7,
+ 0x1b80, 0x54a330d5,
+ 0x1b80, 0x54a330d7,
+ 0x1b80, 0xbf0130e5,
+ 0x1b80, 0xbf0130e7,
+ 0x1b80, 0xe34f30f5,
+ 0x1b80, 0xe34f30f7,
+ 0x1b80, 0x54df3105,
+ 0x1b80, 0x54df3107,
+ 0x1b80, 0x00013115,
+ 0x1b80, 0x00013117,
+ 0x1b80, 0x54bf3125,
+ 0x1b80, 0x54bf3127,
+ 0x1b80, 0x54e53135,
+ 0x1b80, 0x54e53137,
+ 0x1b80, 0x050a3145,
+ 0x1b80, 0x050a3147,
+ 0x1b80, 0x54df3155,
+ 0x1b80, 0x54df3157,
+ 0x1b80, 0x00013165,
+ 0x1b80, 0x00013167,
+ 0x1b80, 0x7f403175,
+ 0x1b80, 0x7f403177,
+ 0x1b80, 0x7e003185,
+ 0x1b80, 0x7e003187,
+ 0x1b80, 0x7d003195,
+ 0x1b80, 0x7d003197,
+ 0x1b80, 0x550131a5,
+ 0x1b80, 0x550131a7,
+ 0x1b80, 0x5c3131b5,
+ 0x1b80, 0x5c3131b7,
+ 0x1b80, 0xe2e331c5,
+ 0x1b80, 0xe2e331c7,
+ 0x1b80, 0xe2e731d5,
+ 0x1b80, 0xe2e731d7,
+ 0x1b80, 0x548031e5,
+ 0x1b80, 0x548031e7,
+ 0x1b80, 0x540031f5,
+ 0x1b80, 0x540031f7,
+ 0x1b80, 0x54813205,
+ 0x1b80, 0x54813207,
+ 0x1b80, 0x54003215,
+ 0x1b80, 0x54003217,
+ 0x1b80, 0x54823225,
+ 0x1b80, 0x54823227,
+ 0x1b80, 0x54003235,
+ 0x1b80, 0x54003237,
+ 0x1b80, 0xe3023245,
+ 0x1b80, 0xe3023247,
+ 0x1b80, 0xbfed3255,
+ 0x1b80, 0xbfed3257,
+ 0x1b80, 0x30193265,
+ 0x1b80, 0x30193267,
+ 0x1b80, 0x74023275,
+ 0x1b80, 0x74023277,
+ 0x1b80, 0x003f3285,
+ 0x1b80, 0x003f3287,
+ 0x1b80, 0x74003295,
+ 0x1b80, 0x74003297,
+ 0x1b80, 0x000232a5,
+ 0x1b80, 0x000232a7,
+ 0x1b80, 0x000132b5,
+ 0x1b80, 0x000132b7,
+ 0x1b80, 0x000632c5,
+ 0x1b80, 0x000632c7,
+ 0x1b80, 0x5a8032d5,
+ 0x1b80, 0x5a8032d7,
+ 0x1b80, 0x5a0032e5,
+ 0x1b80, 0x5a0032e7,
+ 0x1b80, 0x920032f5,
+ 0x1b80, 0x920032f7,
+ 0x1b80, 0x00013305,
+ 0x1b80, 0x00013307,
+ 0x1b80, 0x5b8f3315,
+ 0x1b80, 0x5b8f3317,
+ 0x1b80, 0x5b0f3325,
+ 0x1b80, 0x5b0f3327,
+ 0x1b80, 0x91003335,
+ 0x1b80, 0x91003337,
+ 0x1b80, 0x00013345,
+ 0x1b80, 0x00013347,
+ 0x1b80, 0x00063355,
+ 0x1b80, 0x00063357,
+ 0x1b80, 0x5d803365,
+ 0x1b80, 0x5d803367,
+ 0x1b80, 0x5e563375,
+ 0x1b80, 0x5e563377,
+ 0x1b80, 0x00043385,
+ 0x1b80, 0x00043387,
+ 0x1b80, 0x4d083395,
+ 0x1b80, 0x4d083397,
+ 0x1b80, 0x571033a5,
+ 0x1b80, 0x571033a7,
+ 0x1b80, 0x570033b5,
+ 0x1b80, 0x570033b7,
+ 0x1b80, 0x4d0033c5,
+ 0x1b80, 0x4d0033c7,
+ 0x1b80, 0x000633d5,
+ 0x1b80, 0x000633d7,
+ 0x1b80, 0x5d0033e5,
+ 0x1b80, 0x5d0033e7,
+ 0x1b80, 0x000433f5,
+ 0x1b80, 0x000433f7,
+ 0x1b80, 0x00013405,
+ 0x1b80, 0x00013407,
+ 0x1b80, 0x549f3415,
+ 0x1b80, 0x549f3417,
+ 0x1b80, 0x54ff3425,
+ 0x1b80, 0x54ff3427,
+ 0x1b80, 0x54003435,
+ 0x1b80, 0x54003437,
+ 0x1b80, 0x00013445,
+ 0x1b80, 0x00013447,
+ 0x1b80, 0x5c313455,
+ 0x1b80, 0x5c313457,
+ 0x1b80, 0x07143465,
+ 0x1b80, 0x07143467,
+ 0x1b80, 0x54003475,
+ 0x1b80, 0x54003477,
+ 0x1b80, 0x5c323485,
+ 0x1b80, 0x5c323487,
+ 0x1b80, 0x00013495,
+ 0x1b80, 0x00013497,
+ 0x1b80, 0x5c3234a5,
+ 0x1b80, 0x5c3234a7,
+ 0x1b80, 0x071434b5,
+ 0x1b80, 0x071434b7,
+ 0x1b80, 0x540034c5,
+ 0x1b80, 0x540034c7,
+ 0x1b80, 0x5c3134d5,
+ 0x1b80, 0x5c3134d7,
+ 0x1b80, 0x000134e5,
+ 0x1b80, 0x000134e7,
+ 0x1b80, 0x4c9834f5,
+ 0x1b80, 0x4c9834f7,
+ 0x1b80, 0x4c183505,
+ 0x1b80, 0x4c183507,
+ 0x1b80, 0x00013515,
+ 0x1b80, 0x00013517,
+ 0x1b80, 0x5c323525,
+ 0x1b80, 0x5c323527,
+ 0x1b80, 0x62043535,
+ 0x1b80, 0x62043537,
+ 0x1b80, 0x63033545,
+ 0x1b80, 0x63033547,
+ 0x1b80, 0x66073555,
+ 0x1b80, 0x66073557,
+ 0x1b80, 0x7b403565,
+ 0x1b80, 0x7b403567,
+ 0x1b80, 0x7a003575,
+ 0x1b80, 0x7a003577,
+ 0x1b80, 0x79003585,
+ 0x1b80, 0x79003587,
+ 0x1b80, 0x7f403595,
+ 0x1b80, 0x7f403597,
+ 0x1b80, 0x7e0035a5,
+ 0x1b80, 0x7e0035a7,
+ 0x1b80, 0x7d0035b5,
+ 0x1b80, 0x7d0035b7,
+ 0x1b80, 0x090135c5,
+ 0x1b80, 0x090135c7,
+ 0x1b80, 0x0c0135d5,
+ 0x1b80, 0x0c0135d7,
+ 0x1b80, 0x0ba635e5,
+ 0x1b80, 0x0ba635e7,
+ 0x1b80, 0x000135f5,
+ 0x1b80, 0x000135f7,
+ 0x1b80, 0x00000006,
+ 0x1b80, 0x00000002,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822c_array_mp_cal_init, rtw_phy_cfg_bb);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
new file mode 100644
index 000000000000..06e207dd8e5f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8822C_TABLE_H__
+#define __RTW8822C_TABLE_H__
+
+extern const struct rtw_table rtw8822c_mac_tbl;
+extern const struct rtw_table rtw8822c_agc_tbl;
+extern const struct rtw_table rtw8822c_bb_tbl;
+extern const struct rtw_table rtw8822c_bb_pg_type0_tbl;
+extern const struct rtw_table rtw8822c_rf_a_tbl;
+extern const struct rtw_table rtw8822c_rf_b_tbl;
+extern const struct rtw_table rtw8822c_txpwr_lmt_type0_tbl;
+extern const struct rtw_table rtw8822c_array_mp_cal_init_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
new file mode 100644
index 000000000000..4d837f0c6d5f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "rx.h"
+#include "ps.h"
+
+void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct rtw_vif *rtwvif;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ if (!is_broadcast_ether_addr(hdr->addr1) &&
+ !is_multicast_ether_addr(hdr->addr1)) {
+ rtwdev->stats.rx_unicast += skb->len;
+ rtwdev->stats.rx_cnt++;
+ if (vif) {
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ rtwvif->stats.rx_unicast += skb->len;
+ rtwvif->stats.rx_cnt++;
+ if (rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
+ rtw_leave_lps_irqsafe(rtwdev, rtwvif);
+ }
+ }
+}
+EXPORT_SYMBOL(rtw_rx_stats);
+
+struct rtw_rx_addr_match_data {
+ struct rtw_dev *rtwdev;
+ struct ieee80211_hdr *hdr;
+ struct rtw_rx_pkt_stat *pkt_stat;
+ u8 *bssid;
+};
+
+static void rtw_rx_addr_match_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_rx_addr_match_data *iter_data = data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = iter_data->hdr;
+ struct rtw_dev *rtwdev = iter_data->rtwdev;
+ struct rtw_sta_info *si;
+ struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat;
+ u8 *bssid = iter_data->bssid;
+
+ if (ether_addr_equal(vif->bss_conf.bssid, bssid) &&
+ (ether_addr_equal(vif->addr, hdr->addr1) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ sta = ieee80211_find_sta_by_ifaddr(rtwdev->hw, hdr->addr2,
+ vif->addr);
+ else
+ return;
+
+ if (!sta)
+ return;
+
+ si = (struct rtw_sta_info *)sta->drv_priv;
+ ewma_rssi_add(&si->avg_rssi, pkt_stat->rssi);
+}
+
+static void rtw_rx_addr_match(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_hdr *hdr)
+{
+ struct rtw_rx_addr_match_data data = {};
+
+ if (pkt_stat->crc_err || pkt_stat->icv_err || !pkt_stat->phy_status ||
+ ieee80211_is_ctl(hdr->frame_control))
+ return;
+
+ data.rtwdev = rtwdev;
+ data.hdr = hdr;
+ data.pkt_stat = pkt_stat;
+ data.bssid = get_hdr_bssid(hdr);
+
+ rtw_iterate_vifs_atomic(rtwdev, rtw_rx_addr_match_iter, &data);
+}
+
+void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status,
+ u8 *phy_status)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+
+ memset(rx_status, 0, sizeof(*rx_status));
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+ if (pkt_stat->crc_err)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (pkt_stat->decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+
+ if (pkt_stat->rate >= DESC_RATEVHT1SS_MCS0)
+ rx_status->encoding = RX_ENC_VHT;
+ else if (pkt_stat->rate >= DESC_RATEMCS0)
+ rx_status->encoding = RX_ENC_HT;
+
+ if (pkt_stat->rate >= DESC_RATEVHT1SS_MCS0 &&
+ pkt_stat->rate <= DESC_RATEVHT1SS_MCS9) {
+ rx_status->nss = 1;
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT1SS_MCS0;
+ } else if (pkt_stat->rate >= DESC_RATEVHT2SS_MCS0 &&
+ pkt_stat->rate <= DESC_RATEVHT2SS_MCS9) {
+ rx_status->nss = 2;
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT2SS_MCS0;
+ } else if (pkt_stat->rate >= DESC_RATEVHT3SS_MCS0 &&
+ pkt_stat->rate <= DESC_RATEVHT3SS_MCS9) {
+ rx_status->nss = 3;
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT3SS_MCS0;
+ } else if (pkt_stat->rate >= DESC_RATEVHT4SS_MCS0 &&
+ pkt_stat->rate <= DESC_RATEVHT4SS_MCS9) {
+ rx_status->nss = 4;
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT4SS_MCS0;
+ } else if (pkt_stat->rate >= DESC_RATEMCS0 &&
+ pkt_stat->rate <= DESC_RATEMCS15) {
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEMCS0;
+ } else if (rx_status->band == NL80211_BAND_5GHZ &&
+ pkt_stat->rate >= DESC_RATE6M &&
+ pkt_stat->rate <= DESC_RATE54M) {
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATE6M;
+ } else if (rx_status->band == NL80211_BAND_2GHZ &&
+ pkt_stat->rate >= DESC_RATE1M &&
+ pkt_stat->rate <= DESC_RATE54M) {
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATE1M;
+ } else {
+ rx_status->rate_idx = 0;
+ }
+
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+ rx_status->mactime = pkt_stat->tsf_low;
+
+ if (pkt_stat->bw == RTW_CHANNEL_WIDTH_80)
+ rx_status->bw = RATE_INFO_BW_80;
+ else if (pkt_stat->bw == RTW_CHANNEL_WIDTH_40)
+ rx_status->bw = RATE_INFO_BW_40;
+ else
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->signal = pkt_stat->signal_power;
+
+ rtw_rx_addr_match(rtwdev, pkt_stat, hdr);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h
new file mode 100644
index 000000000000..383f3b2babc1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rx.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_RX_H_
+#define __RTW_RX_H_
+
+#define GET_RX_DESC_PHYST(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(26))
+#define GET_RX_DESC_ICV_ERR(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(15))
+#define GET_RX_DESC_CRC32(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(14))
+#define GET_RX_DESC_SWDEC(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(27))
+#define GET_RX_DESC_C2H(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x02), BIT(28))
+#define GET_RX_DESC_PKT_LEN(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(13, 0))
+#define GET_RX_DESC_DRV_INFO_SIZE(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(19, 16))
+#define GET_RX_DESC_SHIFT(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(25, 24))
+#define GET_RX_DESC_RX_RATE(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x03), GENMASK(6, 0))
+#define GET_RX_DESC_MACID(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x01), GENMASK(6, 0))
+#define GET_RX_DESC_PPDU_CNT(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x02), GENMASK(30, 29))
+#define GET_RX_DESC_TSFL(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0))
+
+void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status,
+ u8 *phy_status);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/sec.c b/drivers/net/wireless/realtek/rtw88/sec.c
new file mode 100644
index 000000000000..c594fc02804d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/sec.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "sec.h"
+#include "reg.h"
+
+int rtw_sec_get_free_cam(struct rtw_sec_desc *sec)
+{
+ /* if default key search is enabled, the first 4 cam entries
+ * are used to direct map to group key with its key->key_idx, so
+ * driver should use cam entries after 4 to install pairwise key
+ */
+ if (sec->default_key_search)
+ return find_next_zero_bit(sec->cam_map, RTW_MAX_SEC_CAM_NUM,
+ RTW_SEC_DEFAULT_KEY_NUM);
+
+ return find_first_zero_bit(sec->cam_map, RTW_MAX_SEC_CAM_NUM);
+}
+
+void rtw_sec_write_cam(struct rtw_dev *rtwdev,
+ struct rtw_sec_desc *sec,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ u8 hw_key_type, u8 hw_key_idx)
+{
+ struct rtw_cam_entry *cam = &sec->cam_table[hw_key_idx];
+ u32 write_cmd;
+ u32 command;
+ u32 content;
+ u32 addr;
+ int i, j;
+
+ set_bit(hw_key_idx, sec->cam_map);
+ cam->valid = true;
+ cam->group = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ cam->hw_key_type = hw_key_type;
+ cam->key = key;
+ if (sta)
+ ether_addr_copy(cam->addr, sta->addr);
+ else
+ eth_broadcast_addr(cam->addr);
+
+ write_cmd = RTW_SEC_CMD_WRITE_ENABLE | RTW_SEC_CMD_POLLING;
+ addr = hw_key_idx << RTW_SEC_CAM_ENTRY_SHIFT;
+ for (i = 5; i >= 0; i--) {
+ switch (i) {
+ case 0:
+ content = ((key->keyidx & 0x3)) |
+ ((hw_key_type & 0x7) << 2) |
+ (cam->group << 6) |
+ (cam->valid << 15) |
+ (cam->addr[0] << 16) |
+ (cam->addr[1] << 24);
+ break;
+ case 1:
+ content = (cam->addr[2]) |
+ (cam->addr[3] << 8) |
+ (cam->addr[4] << 16) |
+ (cam->addr[5] << 24);
+ break;
+ default:
+ j = (i - 2) << 2;
+ content = (key->key[j]) |
+ (key->key[j + 1] << 8) |
+ (key->key[j + 2] << 16) |
+ (key->key[j + 3] << 24);
+ break;
+ }
+
+ command = write_cmd | (addr + i);
+ rtw_write32(rtwdev, RTW_SEC_WRITE_REG, content);
+ rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
+ }
+}
+
+void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
+ struct rtw_sec_desc *sec,
+ u8 hw_key_idx)
+{
+ struct rtw_cam_entry *cam = &sec->cam_table[hw_key_idx];
+ u32 write_cmd;
+ u32 command;
+ u32 addr;
+
+ clear_bit(hw_key_idx, sec->cam_map);
+ cam->valid = false;
+ cam->key = NULL;
+ eth_zero_addr(cam->addr);
+
+ write_cmd = RTW_SEC_CMD_WRITE_ENABLE | RTW_SEC_CMD_POLLING;
+ addr = hw_key_idx << RTW_SEC_CAM_ENTRY_SHIFT;
+ command = write_cmd | addr;
+ rtw_write32(rtwdev, RTW_SEC_WRITE_REG, 0);
+ rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
+}
+
+void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev)
+{
+ struct rtw_sec_desc *sec = &rtwdev->sec;
+ u16 ctrl_reg;
+ u16 sec_config;
+
+ /* default use default key search for now */
+ sec->default_key_search = true;
+
+ ctrl_reg = rtw_read16(rtwdev, REG_CR);
+ ctrl_reg |= RTW_SEC_ENGINE_EN;
+ rtw_write16(rtwdev, REG_CR, ctrl_reg);
+
+ sec_config = rtw_read16(rtwdev, RTW_SEC_CONFIG);
+
+ sec_config |= RTW_SEC_TX_DEC_EN | RTW_SEC_RX_DEC_EN;
+ if (sec->default_key_search)
+ sec_config |= RTW_SEC_TX_UNI_USE_DK | RTW_SEC_RX_UNI_USE_DK |
+ RTW_SEC_TX_BC_USE_DK | RTW_SEC_RX_BC_USE_DK;
+
+ rtw_write16(rtwdev, RTW_SEC_CONFIG, sec_config);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/sec.h b/drivers/net/wireless/realtek/rtw88/sec.h
new file mode 100644
index 000000000000..8c50a895c797
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/sec.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_SEC_H_
+#define __RTW_SEC_H_
+
+#define RTW_SEC_CMD_REG 0x670
+#define RTW_SEC_WRITE_REG 0x674
+#define RTW_SEC_READ_REG 0x678
+#define RTW_SEC_CONFIG 0x680
+
+#define RTW_SEC_CAM_ENTRY_SHIFT 3
+#define RTW_SEC_DEFAULT_KEY_NUM 4
+#define RTW_SEC_CMD_WRITE_ENABLE BIT(16)
+#define RTW_SEC_CMD_CLEAR BIT(30)
+#define RTW_SEC_CMD_POLLING BIT(31)
+
+#define RTW_SEC_TX_UNI_USE_DK BIT(0)
+#define RTW_SEC_RX_UNI_USE_DK BIT(1)
+#define RTW_SEC_TX_DEC_EN BIT(2)
+#define RTW_SEC_RX_DEC_EN BIT(3)
+#define RTW_SEC_TX_BC_USE_DK BIT(6)
+#define RTW_SEC_RX_BC_USE_DK BIT(7)
+
+#define RTW_SEC_ENGINE_EN BIT(9)
+
+int rtw_sec_get_free_cam(struct rtw_sec_desc *sec);
+void rtw_sec_write_cam(struct rtw_dev *rtwdev,
+ struct rtw_sec_desc *sec,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ u8 hw_key_type, u8 hw_key_idx);
+void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
+ struct rtw_sec_desc *sec,
+ u8 hw_key_idx);
+void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
new file mode 100644
index 000000000000..e32faf8bead9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "tx.h"
+#include "fw.h"
+#include "ps.h"
+
+static
+void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct rtw_vif *rtwvif;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ if (!is_broadcast_ether_addr(hdr->addr1) &&
+ !is_multicast_ether_addr(hdr->addr1)) {
+ rtwdev->stats.tx_unicast += skb->len;
+ rtwdev->stats.tx_cnt++;
+ if (vif) {
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ rtwvif->stats.tx_unicast += skb->len;
+ rtwvif->stats.tx_cnt++;
+ if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD)
+ rtw_leave_lps_irqsafe(rtwdev, rtwvif);
+ }
+ }
+}
+
+void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
+{
+ __le32 *txdesc = (__le32 *)skb->data;
+
+ SET_TX_DESC_TXPKTSIZE(txdesc, pkt_info->tx_pkt_size);
+ SET_TX_DESC_OFFSET(txdesc, pkt_info->offset);
+ SET_TX_DESC_PKT_OFFSET(txdesc, pkt_info->pkt_offset);
+ SET_TX_DESC_QSEL(txdesc, pkt_info->qsel);
+ SET_TX_DESC_BMC(txdesc, pkt_info->bmc);
+ SET_TX_DESC_RATE_ID(txdesc, pkt_info->rate_id);
+ SET_TX_DESC_DATARATE(txdesc, pkt_info->rate);
+ SET_TX_DESC_DISDATAFB(txdesc, pkt_info->dis_rate_fallback);
+ SET_TX_DESC_USE_RATE(txdesc, pkt_info->use_rate);
+ SET_TX_DESC_SEC_TYPE(txdesc, pkt_info->sec_type);
+ SET_TX_DESC_DATA_BW(txdesc, pkt_info->bw);
+ SET_TX_DESC_SW_SEQ(txdesc, pkt_info->seq);
+ SET_TX_DESC_MAX_AGG_NUM(txdesc, pkt_info->ampdu_factor);
+ SET_TX_DESC_AMPDU_DENSITY(txdesc, pkt_info->ampdu_density);
+ SET_TX_DESC_DATA_STBC(txdesc, pkt_info->stbc);
+ SET_TX_DESC_DATA_LDPC(txdesc, pkt_info->ldpc);
+ SET_TX_DESC_AGG_EN(txdesc, pkt_info->ampdu_en);
+ SET_TX_DESC_LS(txdesc, pkt_info->ls);
+ SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi);
+ SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
+ SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
+}
+EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
+
+static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta)
+{
+ u8 exp = sta->ht_cap.ampdu_factor;
+
+ /* the least ampdu factor is 8K, and the value in the tx desc is the
+ * max aggregation num, which represents val * 2 packets can be
+ * aggregated in an AMPDU, so here we should use 8/2=4 as the base
+ */
+ return (BIT(2) << exp) - 1;
+}
+
+static u8 get_tx_ampdu_density(struct ieee80211_sta *sta)
+{
+ return sta->ht_cap.ampdu_density;
+}
+
+static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev,
+ struct ieee80211_sta *sta)
+{
+ u8 rate;
+
+ if (rtwdev->hal.rf_type == RF_2T2R && sta->ht_cap.mcs.rx_mask[1] != 0)
+ rate = DESC_RATEMCS15;
+ else
+ rate = DESC_RATEMCS7;
+
+ return rate;
+}
+
+static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev,
+ struct ieee80211_sta *sta)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 rate;
+ u16 tx_mcs_map;
+
+ tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map);
+ if (efuse->hw_cap.nss == 1) {
+ switch (tx_mcs_map & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ rate = DESC_RATEVHT1SS_MCS7;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ rate = DESC_RATEVHT1SS_MCS8;
+ break;
+ default:
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ rate = DESC_RATEVHT1SS_MCS9;
+ break;
+ }
+ } else if (efuse->hw_cap.nss >= 2) {
+ switch ((tx_mcs_map & 0xc) >> 2) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ rate = DESC_RATEVHT2SS_MCS7;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ rate = DESC_RATEVHT2SS_MCS8;
+ break;
+ default:
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ rate = DESC_RATEVHT2SS_MCS9;
+ break;
+ }
+ } else {
+ rate = DESC_RATEVHT1SS_MCS9;
+ }
+
+ return rate;
+}
+
+static void rtw_tx_report_enable(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info)
+{
+ struct rtw_tx_report *tx_report = &rtwdev->tx_report;
+
+ /* [11:8], reserved, fills with zero
+ * [7:2], tx report sequence number
+ * [1:0], firmware use, fills with zero
+ */
+ pkt_info->sn = (atomic_inc_return(&tx_report->sn) << 2) & 0xfc;
+ pkt_info->report = true;
+}
+
+void rtw_tx_report_purge_timer(struct timer_list *t)
+{
+ struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer);
+ struct rtw_tx_report *tx_report = &rtwdev->tx_report;
+ unsigned long flags;
+
+ if (skb_queue_len(&tx_report->queue) == 0)
+ return;
+
+ WARN(1, "purge skb(s) not reported by firmware\n");
+
+ spin_lock_irqsave(&tx_report->q_lock, flags);
+ skb_queue_purge(&tx_report->queue);
+ spin_unlock_irqrestore(&tx_report->q_lock, flags);
+}
+
+void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn)
+{
+ struct rtw_tx_report *tx_report = &rtwdev->tx_report;
+ unsigned long flags;
+ u8 *drv_data;
+
+ /* pass sn to tx report handler through driver data */
+ drv_data = (u8 *)IEEE80211_SKB_CB(skb)->status.status_driver_data;
+ *drv_data = sn;
+
+ spin_lock_irqsave(&tx_report->q_lock, flags);
+ __skb_queue_tail(&tx_report->queue, skb);
+ spin_unlock_irqrestore(&tx_report->q_lock, flags);
+
+ mod_timer(&tx_report->purge_timer, jiffies + RTW_TX_PROBE_TIMEOUT);
+}
+EXPORT_SYMBOL(rtw_tx_report_enqueue);
+
+static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev,
+ struct sk_buff *skb, bool acked)
+{
+ struct ieee80211_tx_info *info;
+
+ info = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(info);
+ if (acked)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(rtwdev->hw, skb);
+}
+
+void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_tx_report *tx_report = &rtwdev->tx_report;
+ struct rtw_c2h_cmd *c2h;
+ struct sk_buff *cur, *tmp;
+ unsigned long flags;
+ u8 sn, st;
+ u8 *n;
+
+ c2h = get_c2h_from_skb(skb);
+
+ sn = GET_CCX_REPORT_SEQNUM(c2h->payload);
+ st = GET_CCX_REPORT_STATUS(c2h->payload);
+
+ spin_lock_irqsave(&tx_report->q_lock, flags);
+ skb_queue_walk_safe(&tx_report->queue, cur, tmp) {
+ n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data;
+ if (*n == sn) {
+ __skb_unlink(cur, &tx_report->queue);
+ rtw_tx_report_tx_status(rtwdev, cur, st == 0);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&tx_report->q_lock, flags);
+}
+
+static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ pkt_info->use_rate = true;
+ pkt_info->rate_id = 6;
+ pkt_info->dis_rate_fallback = true;
+}
+
+static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtw_sta_info *si;
+ u16 seq;
+ u8 ampdu_factor = 0;
+ u8 ampdu_density = 0;
+ bool ampdu_en = false;
+ u8 rate = DESC_RATE6M;
+ u8 rate_id = 6;
+ u8 bw = RTW_CHANNEL_WIDTH_20;
+ bool stbc = false;
+ bool ldpc = false;
+
+ seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+
+ /* for broadcast/multicast, use default values */
+ if (!sta)
+ goto out;
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ampdu_en = true;
+ ampdu_factor = get_tx_ampdu_factor(sta);
+ ampdu_density = get_tx_ampdu_density(sta);
+ }
+
+ if (sta->vht_cap.vht_supported)
+ rate = get_highest_vht_tx_rate(rtwdev, sta);
+ else if (sta->ht_cap.ht_supported)
+ rate = get_highest_ht_tx_rate(rtwdev, sta);
+ else if (sta->supp_rates[0] <= 0xf)
+ rate = DESC_RATE11M;
+ else
+ rate = DESC_RATE54M;
+
+ si = (struct rtw_sta_info *)sta->drv_priv;
+
+ bw = si->bw_mode;
+ rate_id = si->rate_id;
+ stbc = si->stbc_en;
+ ldpc = si->ldpc_en;
+
+out:
+ pkt_info->seq = seq;
+ pkt_info->ampdu_factor = ampdu_factor;
+ pkt_info->ampdu_density = ampdu_density;
+ pkt_info->ampdu_en = ampdu_en;
+ pkt_info->rate = rate;
+ pkt_info->rate_id = rate_id;
+ pkt_info->bw = bw;
+ pkt_info->stbc = stbc;
+ pkt_info->ldpc = ldpc;
+}
+
+void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct rtw_sta_info *si;
+ struct ieee80211_vif *vif = NULL;
+ __le16 fc = hdr->frame_control;
+ u8 sec_type = 0;
+ bool bmc;
+
+ if (control->sta) {
+ si = (struct rtw_sta_info *)control->sta->drv_priv;
+ vif = si->vif;
+ }
+
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
+ rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, control, skb);
+ else if (ieee80211_is_data(fc))
+ rtw_tx_data_pkt_info_update(rtwdev, pkt_info, control, skb);
+
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *key = info->control.hw_key;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ sec_type = 0x01;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ sec_type = 0x03;
+ break;
+ default:
+ break;
+ }
+ }
+
+ bmc = is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1);
+
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
+ rtw_tx_report_enable(rtwdev, pkt_info);
+
+ pkt_info->bmc = bmc;
+ pkt_info->sec_type = sec_type;
+ pkt_info->tx_pkt_size = skb->len;
+ pkt_info->offset = chip->tx_pkt_desc_sz;
+ pkt_info->qsel = skb->priority;
+ pkt_info->ls = true;
+
+ /* maybe merge with tx status ? */
+ rtw_tx_stats(rtwdev, vif, skb);
+}
+
+void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool bmc;
+
+ bmc = is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1);
+ pkt_info->use_rate = true;
+ pkt_info->rate_id = 6;
+ pkt_info->dis_rate_fallback = true;
+ pkt_info->bmc = bmc;
+ pkt_info->tx_pkt_size = skb->len;
+ pkt_info->offset = chip->tx_pkt_desc_sz;
+ pkt_info->qsel = skb->priority;
+ pkt_info->ls = true;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
new file mode 100644
index 000000000000..8338dbf55576
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_TX_H_
+#define __RTW_TX_H_
+
+#define RTK_TX_MAX_AGG_NUM_MASK 0x1f
+
+#define RTW_TX_PROBE_TIMEOUT msecs_to_jiffies(500)
+
+#define SET_TX_DESC_TXPKTSIZE(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, GENMASK(15, 0))
+#define SET_TX_DESC_OFFSET(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, GENMASK(23, 16))
+#define SET_TX_DESC_PKT_OFFSET(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(28, 24))
+#define SET_TX_DESC_QSEL(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(12, 8))
+#define SET_TX_DESC_BMC(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, BIT(24))
+#define SET_TX_DESC_RATE_ID(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(20, 16))
+#define SET_TX_DESC_DATARATE(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x04, value, GENMASK(6, 0))
+#define SET_TX_DESC_DISDATAFB(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(10))
+#define SET_TX_DESC_USE_RATE(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(8))
+#define SET_TX_DESC_SEC_TYPE(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(23, 22))
+#define SET_TX_DESC_DATA_BW(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, GENMASK(6, 5))
+#define SET_TX_DESC_SW_SEQ(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(23, 12))
+#define SET_TX_DESC_MAX_AGG_NUM(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17))
+#define SET_TX_DESC_AMPDU_DENSITY(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, GENMASK(22, 20))
+#define SET_TX_DESC_DATA_STBC(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, GENMASK(9, 8))
+#define SET_TX_DESC_DATA_LDPC(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, BIT(7))
+#define SET_TX_DESC_AGG_EN(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(12))
+#define SET_TX_DESC_LS(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, BIT(26))
+#define SET_TX_DESC_DATA_SHORT(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, BIT(4))
+#define SET_TX_DESC_SPE_RPT(tx_desc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(19))
+#define SET_TX_DESC_SW_DEFINE(tx_desc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x06, value, GENMASK(11, 0))
+
+enum rtw_tx_desc_queue_select {
+ TX_DESC_QSEL_TID0 = 0,
+ TX_DESC_QSEL_TID1 = 1,
+ TX_DESC_QSEL_TID2 = 2,
+ TX_DESC_QSEL_TID3 = 3,
+ TX_DESC_QSEL_TID4 = 4,
+ TX_DESC_QSEL_TID5 = 5,
+ TX_DESC_QSEL_TID6 = 6,
+ TX_DESC_QSEL_TID7 = 7,
+ TX_DESC_QSEL_TID8 = 8,
+ TX_DESC_QSEL_TID9 = 9,
+ TX_DESC_QSEL_TID10 = 10,
+ TX_DESC_QSEL_TID11 = 11,
+ TX_DESC_QSEL_TID12 = 12,
+ TX_DESC_QSEL_TID13 = 13,
+ TX_DESC_QSEL_TID14 = 14,
+ TX_DESC_QSEL_TID15 = 15,
+ TX_DESC_QSEL_BEACON = 16,
+ TX_DESC_QSEL_HIGH = 17,
+ TX_DESC_QSEL_MGMT = 18,
+ TX_DESC_QSEL_H2C = 19,
+};
+
+void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb);
+void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn);
+void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
new file mode 100644
index 000000000000..212070c2baa8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "util.h"
+#include "reg.h"
+
+bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
+{
+ u32 cnt;
+
+ for (cnt = 0; cnt < 1000; cnt++) {
+ if (rtw_read32_mask(rtwdev, addr, mask) == target)
+ return true;
+
+ udelay(10);
+ }
+
+ return false;
+}
+
+bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
+{
+ if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1))
+ return false;
+
+ rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0x800F0000 | offset);
+ *val = rtw_read32(rtwdev, LTECOEX_READ_DATA);
+
+ return true;
+}
+
+bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value)
+{
+ if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1))
+ return false;
+
+ rtw_write32(rtwdev, LTECOEX_WRITE_DATA, value);
+ rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0xC00F0000 | offset);
+
+ return true;
+}
+
+void rtw_restore_reg(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *bckp, u32 num)
+{
+ u8 len;
+ u32 reg;
+ u32 val;
+ int i;
+
+ for (i = 0; i < num; i++, bckp++) {
+ len = bckp->len;
+ reg = bckp->reg;
+ val = bckp->val;
+
+ switch (len) {
+ case 1:
+ rtw_write8(rtwdev, reg, (u8)val);
+ break;
+ case 2:
+ rtw_write16(rtwdev, reg, (u16)val);
+ break;
+ case 4:
+ rtw_write32(rtwdev, reg, (u32)val);
+ break;
+ default:
+ break;
+ }
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw88/util.h b/drivers/net/wireless/realtek/rtw88/util.h
new file mode 100644
index 000000000000..7bd2843b0bce
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/util.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_UTIL_H__
+#define __RTW_UTIL_H__
+
+struct rtw_dev;
+
+#define rtw_iterate_vifs(rtwdev, iterator, data) \
+ ieee80211_iterate_active_interfaces(rtwdev->hw, \
+ IEEE80211_IFACE_ITER_NORMAL, iterator, data)
+#define rtw_iterate_vifs_atomic(rtwdev, iterator, data) \
+ ieee80211_iterate_active_interfaces_atomic(rtwdev->hw, \
+ IEEE80211_IFACE_ITER_NORMAL, iterator, data)
+#define rtw_iterate_stas_atomic(rtwdev, iterator, data) \
+ ieee80211_iterate_stations_atomic(rtwdev->hw, iterator, data)
+
+static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
+{
+ __le16 fc = hdr->frame_control;
+ u8 *bssid;
+
+ if (ieee80211_has_tods(fc))
+ bssid = hdr->addr1;
+ else if (ieee80211_has_fromds(fc))
+ bssid = hdr->addr2;
+ else
+ bssid = hdr->addr3;
+
+ return bssid;
+}
+
+#endif
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 51e4e92d95a0..e07a1152cec1 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1707,7 +1707,7 @@ static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev)
int len, ret, max_pmkids;
max_pmkids = priv->wdev.wiphy->max_num_pmkids;
- len = sizeof(*pmkids) + max_pmkids * sizeof(pmkids->bssid_info[0]);
+ len = struct_size(pmkids, bssid_info, max_pmkids);
pmkids = kzalloc(len, GFP_KERNEL);
if (!pmkids)
@@ -1740,7 +1740,7 @@ static int set_device_pmkids(struct usbnet *usbdev,
int ret, len, num_pmkids;
num_pmkids = le32_to_cpu(pmkids->bssid_info_count);
- len = sizeof(*pmkids) + num_pmkids * sizeof(pmkids->bssid_info[0]);
+ len = struct_size(pmkids, bssid_info, num_pmkids);
pmkids->length = cpu_to_le32(len);
debug_print_pmkids(usbdev, pmkids, __func__);
@@ -1761,7 +1761,7 @@ static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
struct cfg80211_pmksa *pmksa,
int max_pmkids)
{
- int i, newlen, err;
+ int i, err;
unsigned int count;
count = le32_to_cpu(pmkids->bssid_info_count);
@@ -1786,9 +1786,7 @@ static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
pmkids->bssid_info[i] = pmkids->bssid_info[i + 1];
count--;
- newlen = sizeof(*pmkids) + count * sizeof(pmkids->bssid_info[0]);
-
- pmkids->length = cpu_to_le32(newlen);
+ pmkids->length = cpu_to_le32(struct_size(pmkids, bssid_info, count));
pmkids->bssid_info_count = cpu_to_le32(count);
return pmkids;
@@ -1831,7 +1829,7 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
}
/* add new pmkid */
- newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
+ newlen = struct_size(pmkids, bssid_info, count + 1);
new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
if (!new_pmkids) {
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 1dbaab2a96b7..f84250bdb8cf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -31,6 +31,13 @@ static struct ta_metadata metadata_flash_content[] = {
};
+static struct ta_metadata metadata[] = {{"pmemdata_dummy", 0x00000000},
+ {"rsi/rs9116_wlan.rps", 0x00000000},
+ {"rsi/rs9116_wlan_bt_classic.rps", 0x00000000},
+ {"rsi/pmemdata_dummy", 0x00000000},
+ {"rsi/rs9116_wlan_bt_classic.rps", 0x00000000}
+};
+
int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
@@ -829,21 +836,18 @@ static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
return 0;
}
-static int rsi_load_firmware(struct rsi_hw *adapter)
+static int rsi_hal_prepare_fwload(struct rsi_hw *adapter)
{
- struct rsi_common *common = adapter->priv;
struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
- const struct firmware *fw_entry = NULL;
- u32 regout_val = 0, content_size;
- u16 tmp_regout_val = 0;
- struct ta_metadata *metadata_p;
+ u32 regout_val = 0;
int status;
bl_start_cmd_timer(adapter, BL_CMD_TIMEOUT);
while (!adapter->blcmd_timer_expired) {
status = hif_ops->master_reg_read(adapter, SWBL_REGOUT,
- &regout_val, 2);
+ &regout_val,
+ RSI_COMMON_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"%s: REGOUT read failed\n", __func__);
@@ -865,13 +869,26 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
(regout_val & 0xff));
status = hif_ops->master_reg_write(adapter, SWBL_REGOUT,
- (REGOUT_INVALID | REGOUT_INVALID << 8),
- 2);
- if (status < 0) {
+ (REGOUT_INVALID |
+ REGOUT_INVALID << 8),
+ RSI_COMMON_REG_SIZE);
+ if (status < 0)
rsi_dbg(ERR_ZONE, "%s: REGOUT writing failed..\n", __func__);
- return status;
- }
- mdelay(1);
+ else
+ rsi_dbg(INFO_ZONE,
+ "===> Device is ready to load firmware <===\n");
+
+ return status;
+}
+
+static int rsi_load_9113_firmware(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ const struct firmware *fw_entry = NULL;
+ u32 content_size;
+ u16 tmp_regout_val = 0;
+ struct ta_metadata *metadata_p;
+ int status;
status = bl_cmd(adapter, CONFIG_AUTO_READ_MODE, CMD_PASS,
"AUTO_READ_CMD");
@@ -902,13 +919,15 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
/* Get the firmware version */
common->lmac_ver.ver.info.fw_ver[0] =
- fw_entry->data[LMAC_VER_OFFSET] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET_9113] & 0xFF;
common->lmac_ver.ver.info.fw_ver[1] =
- fw_entry->data[LMAC_VER_OFFSET + 1] & 0xFF;
- common->lmac_ver.major = fw_entry->data[LMAC_VER_OFFSET + 2] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET_9113 + 1] & 0xFF;
+ common->lmac_ver.major =
+ fw_entry->data[LMAC_VER_OFFSET_9113 + 2] & 0xFF;
common->lmac_ver.release_num =
- fw_entry->data[LMAC_VER_OFFSET + 3] & 0xFF;
- common->lmac_ver.minor = fw_entry->data[LMAC_VER_OFFSET + 4] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET_9113 + 3] & 0xFF;
+ common->lmac_ver.minor =
+ fw_entry->data[LMAC_VER_OFFSET_9113 + 4] & 0xFF;
common->lmac_ver.patch_num = 0;
rsi_print_version(common);
@@ -977,19 +996,161 @@ fail:
return status;
}
+static int rsi_load_9116_firmware(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
+ const struct firmware *fw_entry;
+ struct ta_metadata *metadata_p;
+ u8 *ta_firmware, *fw_p;
+ struct bootload_ds bootload_ds;
+ u32 instructions_sz, base_address;
+ u16 block_size = adapter->block_size;
+ u32 dest, len;
+ int status, cnt;
+
+ rsi_dbg(INIT_ZONE, "***** Load 9116 TA Instructions *****\n");
+
+ if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) {
+ status = bl_cmd(adapter, POLLING_MODE, CMD_PASS,
+ "POLLING_MODE");
+ if (status < 0)
+ return status;
+ }
+
+ status = hif_ops->master_reg_write(adapter, MEM_ACCESS_CTRL_FROM_HOST,
+ RAM_384K_ACCESS_FROM_TA,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to access full RAM memory\n",
+ __func__);
+ return status;
+ }
+
+ metadata_p = &metadata[adapter->priv->coex_mode];
+ rsi_dbg(INIT_ZONE, "%s: loading file %s\n", __func__, metadata_p->name);
+ status = request_firmware(&fw_entry, metadata_p->name, adapter->device);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to open file %s\n",
+ __func__, metadata_p->name);
+ return status;
+ }
+
+ ta_firmware = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!ta_firmware)
+ goto fail_release_fw;
+ fw_p = ta_firmware;
+ instructions_sz = fw_entry->size;
+ rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", instructions_sz);
+
+ common->lmac_ver.major = ta_firmware[LMAC_VER_OFFSET_9116];
+ common->lmac_ver.minor = ta_firmware[LMAC_VER_OFFSET_9116 + 1];
+ common->lmac_ver.release_num = ta_firmware[LMAC_VER_OFFSET_9116 + 2];
+ common->lmac_ver.patch_num = ta_firmware[LMAC_VER_OFFSET_9116 + 3];
+ common->lmac_ver.ver.info.fw_ver[0] =
+ ta_firmware[LMAC_VER_OFFSET_9116 + 4];
+
+ if (instructions_sz % FW_ALIGN_SIZE)
+ instructions_sz +=
+ (FW_ALIGN_SIZE - (instructions_sz % FW_ALIGN_SIZE));
+ rsi_dbg(INFO_ZONE, "instructions_sz : %d\n", instructions_sz);
+
+ if (*(u16 *)fw_p == RSI_9116_FW_MAGIC_WORD) {
+ memcpy(&bootload_ds, fw_p, sizeof(struct bootload_ds));
+ fw_p += le16_to_cpu(bootload_ds.offset);
+ rsi_dbg(INFO_ZONE, "FW start = %x\n", *(u32 *)fw_p);
+
+ cnt = 0;
+ do {
+ rsi_dbg(ERR_ZONE, "%s: Loading chunk %d\n",
+ __func__, cnt);
+
+ dest = le32_to_cpu(bootload_ds.bl_entry[cnt].dst_addr);
+ len = le32_to_cpu(bootload_ds.bl_entry[cnt].control) &
+ RSI_BL_CTRL_LEN_MASK;
+ rsi_dbg(INFO_ZONE, "length %d destination %x\n",
+ len, dest);
+
+ status = hif_ops->load_data_master_write(adapter, dest,
+ len,
+ block_size,
+ fw_p);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Failed to load chunk %d\n", cnt);
+ break;
+ }
+ fw_p += len;
+ if (le32_to_cpu(bootload_ds.bl_entry[cnt].control) &
+ RSI_BL_CTRL_LAST_ENTRY)
+ break;
+ cnt++;
+ } while (1);
+ } else {
+ base_address = metadata_p->address;
+ status = hif_ops->load_data_master_write(adapter,
+ base_address,
+ instructions_sz,
+ block_size,
+ ta_firmware);
+ }
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load %s blk\n",
+ __func__, metadata_p->name);
+ goto fail_free_fw;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Successfully loaded %s instructions\n",
+ __func__, metadata_p->name);
+
+ if (adapter->rsi_host_intf == RSI_HOST_INTF_SDIO) {
+ if (hif_ops->ta_reset(adapter))
+ rsi_dbg(ERR_ZONE, "Unable to put ta in reset\n");
+ } else {
+ if (bl_cmd(adapter, JUMP_TO_ZERO_PC,
+ CMD_PASS, "JUMP_TO_ZERO") < 0)
+ rsi_dbg(INFO_ZONE, "Jump to zero command failed\n");
+ else
+ rsi_dbg(INFO_ZONE, "Jump to zero command successful\n");
+ }
+
+fail_free_fw:
+ kfree(ta_firmware);
+fail_release_fw:
+ release_firmware(fw_entry);
+
+ return status;
+}
+
int rsi_hal_device_init(struct rsi_hw *adapter)
{
struct rsi_common *common = adapter->priv;
+ int status;
switch (adapter->device_model) {
case RSI_DEV_9113:
- if (rsi_load_firmware(adapter)) {
+ status = rsi_hal_prepare_fwload(adapter);
+ if (status < 0)
+ return status;
+ if (rsi_load_9113_firmware(adapter)) {
rsi_dbg(ERR_ZONE,
"%s: Failed to load TA instructions\n",
__func__);
return -EINVAL;
}
break;
+ case RSI_DEV_9116:
+ status = rsi_hal_prepare_fwload(adapter);
+ if (status < 0)
+ return status;
+ if (rsi_load_9116_firmware(adapter)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to load firmware to 9116 device\n",
+ __func__);
+ return -EINVAL;
+ }
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 831046e760f8..49df3bb08d41 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -188,27 +188,27 @@ bool rsi_is_cipher_wep(struct rsi_common *common)
* @adapter: Pointer to the adapter structure.
* @band: Operating band to be set.
*
- * Return: None.
+ * Return: int - 0 on success, negative error on failure.
*/
-static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
+static int rsi_register_rates_channels(struct rsi_hw *adapter, int band)
{
struct ieee80211_supported_band *sbands = &adapter->sbands[band];
void *channels = NULL;
if (band == NL80211_BAND_2GHZ) {
- channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
- memcpy(channels,
- rsi_2ghz_channels,
- sizeof(rsi_2ghz_channels));
+ channels = kmemdup(rsi_2ghz_channels, sizeof(rsi_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
sbands->band = NL80211_BAND_2GHZ;
sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
sbands->bitrates = rsi_rates;
sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
} else {
- channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
- memcpy(channels,
- rsi_5ghz_channels,
- sizeof(rsi_5ghz_channels));
+ channels = kmemdup(rsi_5ghz_channels, sizeof(rsi_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
sbands->band = NL80211_BAND_5GHZ;
sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
sbands->bitrates = &rsi_rates[4];
@@ -227,6 +227,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
sbands->ht_cap.mcs.rx_mask[0] = 0xff;
sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
/* sbands->ht_cap.mcs.rx_highest = 0x82; */
+ return 0;
}
static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
@@ -2064,11 +2065,16 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->available_antennas_rx = 1;
wiphy->available_antennas_tx = 1;
- rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+ status = rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+ if (status)
+ return status;
wiphy->bands[NL80211_BAND_2GHZ] =
&adapter->sbands[NL80211_BAND_2GHZ];
if (common->num_supp_bands > 1) {
- rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
+ status = rsi_register_rates_channels(adapter,
+ NL80211_BAND_5GHZ);
+ if (status)
+ return status;
wiphy->bands[NL80211_BAND_5GHZ] =
&adapter->sbands[NL80211_BAND_5GHZ];
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 844f2fac298f..6c7f26ef6476 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -209,6 +209,59 @@ static struct bootup_params boot_params_40 = {
.beacon_resedue_alg_en = 0,
};
+static struct bootup_params_9116 boot_params_9116_20 = {
+ .magic_number = cpu_to_le16(LOADED_TOKEN),
+ .valid = cpu_to_le32(VALID_20),
+ .device_clk_info_9116 = {{
+ .pll_config_9116_g = {
+ .pll_ctrl_set_reg = cpu_to_le16(0xd518),
+ .pll_ctrl_clr_reg = cpu_to_le16(0x2ae7),
+ .pll_modem_conig_reg = cpu_to_le16(0x2000),
+ .soc_clk_config_reg = cpu_to_le16(0x0c18),
+ .adc_dac_strm1_config_reg = cpu_to_le16(0x1100),
+ .adc_dac_strm2_config_reg = cpu_to_le16(0x6600),
+ },
+ .switch_clk_9116_g = {
+ .switch_clk_info =
+ cpu_to_le32((RSI_SWITCH_TASS_CLK |
+ RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG |
+ RSI_SWITCH_BBP_LMAC_CLK_REG)),
+ .tass_clock_reg = cpu_to_le32(0x083C0503),
+ .wlan_bbp_lmac_clk_reg_val = cpu_to_le32(0x01042001),
+ .zbbt_bbp_lmac_clk_reg_val = cpu_to_le32(0x02010001),
+ .bbp_lmac_clk_en_val = cpu_to_le32(0x0000003b),
+ }
+ },
+ },
+};
+
+static struct bootup_params_9116 boot_params_9116_40 = {
+ .magic_number = cpu_to_le16(LOADED_TOKEN),
+ .valid = cpu_to_le32(VALID_40),
+ .device_clk_info_9116 = {{
+ .pll_config_9116_g = {
+ .pll_ctrl_set_reg = cpu_to_le16(0xd518),
+ .pll_ctrl_clr_reg = cpu_to_le16(0x2ae7),
+ .pll_modem_conig_reg = cpu_to_le16(0x3000),
+ .soc_clk_config_reg = cpu_to_le16(0x0c18),
+ .adc_dac_strm1_config_reg = cpu_to_le16(0x0000),
+ .adc_dac_strm2_config_reg = cpu_to_le16(0x6600),
+ },
+ .switch_clk_9116_g = {
+ .switch_clk_info =
+ cpu_to_le32((RSI_SWITCH_TASS_CLK |
+ RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG |
+ RSI_SWITCH_BBP_LMAC_CLK_REG |
+ RSI_MODEM_CLK_160MHZ)),
+ .tass_clock_reg = cpu_to_le32(0x083C0503),
+ .wlan_bbp_lmac_clk_reg_val = cpu_to_le32(0x01042002),
+ .zbbt_bbp_lmac_clk_reg_val = cpu_to_le32(0x04010002),
+ .bbp_lmac_clk_en_val = cpu_to_le32(0x0000003b),
+ }
+ },
+ },
+};
+
static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
/**
@@ -235,6 +288,14 @@ static void rsi_set_default_parameters(struct rsi_common *common)
common->obm_ant_sel_val = 2;
common->beacon_interval = RSI_BEACON_INTERVAL;
common->dtim_cnt = RSI_DTIM_COUNT;
+ common->w9116_features.pll_mode = 0x0;
+ common->w9116_features.rf_type = 1;
+ common->w9116_features.wireless_mode = 0;
+ common->w9116_features.enable_ppe = 0;
+ common->w9116_features.afe_type = 1;
+ common->w9116_features.dpd = 0;
+ common->w9116_features.sifs_tx_enable = 0;
+ common->w9116_features.ps_options = 0;
}
void init_bgscan_params(struct rsi_common *common)
@@ -363,6 +424,10 @@ static int rsi_load_radio_caps(struct rsi_common *common)
}
radio_caps->radio_info |= radio_id;
+ if (adapter->device_model == RSI_DEV_9116 &&
+ common->channel_width == BW_20MHZ)
+ radio_caps->radio_cfg_info &= ~0x3;
+
radio_caps->sifs_tx_11n = cpu_to_le16(SIFS_TX_11N_VALUE);
radio_caps->sifs_tx_11b = cpu_to_le16(SIFS_TX_11B_VALUE);
radio_caps->slot_rx_11n = cpu_to_le16(SHORT_SLOT_VALUE);
@@ -378,14 +443,16 @@ static int rsi_load_radio_caps(struct rsi_common *common)
}
for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
- radio_caps->qos_params[ii].cont_win_min_q =
- cpu_to_le16(common->edca_params[ii].cw_min);
- radio_caps->qos_params[ii].cont_win_max_q =
- cpu_to_le16(common->edca_params[ii].cw_max);
- radio_caps->qos_params[ii].aifsn_val_q =
- cpu_to_le16((common->edca_params[ii].aifs) << 8);
- radio_caps->qos_params[ii].txop_q =
- cpu_to_le16(common->edca_params[ii].txop);
+ if (common->edca_params[ii].cw_max > 0) {
+ radio_caps->qos_params[ii].cont_win_min_q =
+ cpu_to_le16(common->edca_params[ii].cw_min);
+ radio_caps->qos_params[ii].cont_win_max_q =
+ cpu_to_le16(common->edca_params[ii].cw_max);
+ radio_caps->qos_params[ii].aifsn_val_q =
+ cpu_to_le16(common->edca_params[ii].aifs << 8);
+ radio_caps->qos_params[ii].txop_q =
+ cpu_to_le16(common->edca_params[ii].txop);
+ }
}
radio_caps->qos_params[BROADCAST_HW_Q].txop_q = cpu_to_le16(0xffff);
@@ -893,6 +960,50 @@ static int rsi_load_bootup_params(struct rsi_common *common)
return rsi_send_internal_mgmt_frame(common, skb);
}
+static int rsi_load_9116_bootup_params(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_boot_params_9116 *boot_params;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_boot_params_9116));
+ if (!skb)
+ return -ENOMEM;
+ memset(skb->data, 0, sizeof(struct rsi_boot_params));
+ boot_params = (struct rsi_boot_params_9116 *)skb->data;
+
+ if (common->channel_width == BW_40MHZ) {
+ memcpy(&boot_params->bootup_params,
+ &boot_params_9116_40,
+ sizeof(struct bootup_params_9116));
+ rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__,
+ UMAC_CLK_40BW);
+ boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40BW);
+ } else {
+ memcpy(&boot_params->bootup_params,
+ &boot_params_9116_20,
+ sizeof(struct bootup_params_9116));
+ if (boot_params_20.valid != cpu_to_le32(VALID_20)) {
+ boot_params->umac_clk = cpu_to_le16(UMAC_CLK_20BW);
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Packet 20MHZ <=== %d\n", __func__,
+ UMAC_CLK_20BW);
+ } else {
+ boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40MHZ);
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Packet 20MHZ <=== %d\n", __func__,
+ UMAC_CLK_40MHZ);
+ }
+ }
+ rsi_set_len_qno(&boot_params->desc_dword0.len_qno,
+ sizeof(struct bootup_params_9116), RSI_WIFI_MGMT_Q);
+ boot_params->desc_dword0.frame_type = BOOTUP_PARAMS_REQUEST;
+ skb_put(skb, sizeof(struct rsi_boot_params_9116));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
/**
* rsi_send_reset_mac() - This function prepares reset MAC request and sends an
* internal management frame to indicate it to firmware.
@@ -921,6 +1032,11 @@ static int rsi_send_reset_mac(struct rsi_common *common)
mgmt_frame->desc_word[1] = cpu_to_le16(RESET_MAC_REQ);
mgmt_frame->desc_word[4] = cpu_to_le16(RETRY_COUNT << 8);
+#define RSI_9116_DEF_TA_AGGR 3
+ if (common->priv->device_model == RSI_DEV_9116)
+ mgmt_frame->desc_word[3] |=
+ cpu_to_le16(RSI_9116_DEF_TA_AGGR << 8);
+
skb_put(skb, FRAME_DESC_SZ);
return rsi_send_internal_mgmt_frame(common, skb);
@@ -971,7 +1087,10 @@ int rsi_band_check(struct rsi_common *common,
}
if (common->channel_width != prev_bw) {
- status = rsi_load_bootup_params(common);
+ if (adapter->device_model == RSI_DEV_9116)
+ status = rsi_load_9116_bootup_params(common);
+ else
+ status = rsi_load_bootup_params(common);
if (status)
return status;
@@ -1546,6 +1665,47 @@ int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
return rsi_send_internal_mgmt_frame(common, skb);
}
+static int rsi_send_w9116_features(struct rsi_common *common)
+{
+ struct rsi_wlan_9116_features *w9116_features;
+ u16 frame_len = sizeof(struct rsi_wlan_9116_features);
+ struct sk_buff *skb;
+
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Sending wlan 9116 features\n", __func__);
+
+ skb = dev_alloc_skb(frame_len);
+ if (!skb)
+ return -ENOMEM;
+ memset(skb->data, 0, frame_len);
+
+ w9116_features = (struct rsi_wlan_9116_features *)skb->data;
+
+ w9116_features->pll_mode = common->w9116_features.pll_mode;
+ w9116_features->rf_type = common->w9116_features.rf_type;
+ w9116_features->wireless_mode = common->w9116_features.wireless_mode;
+ w9116_features->enable_ppe = common->w9116_features.enable_ppe;
+ w9116_features->afe_type = common->w9116_features.afe_type;
+ if (common->w9116_features.dpd)
+ w9116_features->feature_enable |= cpu_to_le32(RSI_DPD);
+ if (common->w9116_features.sifs_tx_enable)
+ w9116_features->feature_enable |=
+ cpu_to_le32(RSI_SIFS_TX_ENABLE);
+ if (common->w9116_features.ps_options & RSI_DUTY_CYCLING)
+ w9116_features->feature_enable |= cpu_to_le32(RSI_DUTY_CYCLING);
+ if (common->w9116_features.ps_options & RSI_END_OF_FRAME)
+ w9116_features->feature_enable |= cpu_to_le32(RSI_END_OF_FRAME);
+ w9116_features->feature_enable |=
+ cpu_to_le32((common->w9116_features.ps_options & ~0x3) << 2);
+
+ rsi_set_len_qno(&w9116_features->desc.desc_dword0.len_qno,
+ frame_len - FRAME_DESC_SZ, RSI_WIFI_MGMT_Q);
+ w9116_features->desc.desc_dword0.frame_type = FEATURES_ENABLE;
+ skb_put(skb, frame_len);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
/**
* rsi_set_antenna() - This function send antenna configuration request
* to device
@@ -1766,15 +1926,26 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n",
__func__);
if (common->fsm_state == FSM_BOOT_PARAMS_SENT) {
- adapter->eeprom.length = (IEEE80211_ADDR_LEN +
- WLAN_MAC_MAGIC_WORD_LEN +
- WLAN_HOST_MODE_LEN);
- adapter->eeprom.offset = WLAN_MAC_EEPROM_ADDR;
- if (rsi_eeprom_read(common)) {
- common->fsm_state = FSM_CARD_NOT_READY;
- goto out;
+ if (adapter->device_model == RSI_DEV_9116) {
+ common->band = NL80211_BAND_5GHZ;
+ common->num_supp_bands = 2;
+
+ if (rsi_send_reset_mac(common))
+ goto out;
+ else
+ common->fsm_state = FSM_RESET_MAC_SENT;
+ } else {
+ adapter->eeprom.length =
+ (IEEE80211_ADDR_LEN +
+ WLAN_MAC_MAGIC_WORD_LEN +
+ WLAN_HOST_MODE_LEN);
+ adapter->eeprom.offset = WLAN_MAC_EEPROM_ADDR;
+ if (rsi_eeprom_read(common)) {
+ common->fsm_state = FSM_CARD_NOT_READY;
+ goto out;
+ }
+ common->fsm_state = FSM_EEPROM_READ_MAC_ADDR;
}
- common->fsm_state = FSM_EEPROM_READ_MAC_ADDR;
} else {
rsi_dbg(INFO_ZONE,
"%s: Received bootup params cfm in %d state\n",
@@ -1853,6 +2024,12 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
case RADIO_CAPABILITIES:
if (common->fsm_state == FSM_RADIO_CAPS_SENT) {
common->rf_reset = 1;
+ if (adapter->device_model == RSI_DEV_9116 &&
+ rsi_send_w9116_features(common)) {
+ rsi_dbg(ERR_ZONE,
+ "Failed to send 9116 features\n");
+ goto out;
+ }
if (rsi_program_bb_rf(common)) {
goto out;
} else {
@@ -1925,6 +2102,8 @@ out:
int rsi_handle_card_ready(struct rsi_common *common, u8 *msg)
{
+ int status;
+
switch (common->fsm_state) {
case FSM_CARD_NOT_READY:
rsi_dbg(INIT_ZONE, "Card ready indication from Common HAL\n");
@@ -1936,14 +2115,29 @@ int rsi_handle_card_ready(struct rsi_common *common, u8 *msg)
case FSM_COMMON_DEV_PARAMS_SENT:
rsi_dbg(INIT_ZONE, "Card ready indication from WLAN HAL\n");
+ if (common->priv->device_model == RSI_DEV_9116) {
+ if (msg[16] != MAGIC_WORD) {
+ rsi_dbg(FSM_ZONE,
+ "%s: [EEPROM_READ] Invalid token\n",
+ __func__);
+ common->fsm_state = FSM_CARD_NOT_READY;
+ return -EINVAL;
+ }
+ memcpy(common->mac_addr, &msg[20], ETH_ALEN);
+ rsi_dbg(INIT_ZONE, "MAC Addr %pM", common->mac_addr);
+ }
/* Get usb buffer status register address */
common->priv->usb_buffer_status_reg = *(u32 *)&msg[8];
rsi_dbg(INFO_ZONE, "USB buffer status register = %x\n",
common->priv->usb_buffer_status_reg);
- if (rsi_load_bootup_params(common)) {
+ if (common->priv->device_model == RSI_DEV_9116)
+ status = rsi_load_9116_bootup_params(common);
+ else
+ status = rsi_load_bootup_params(common);
+ if (status < 0) {
common->fsm_state = FSM_CARD_NOT_READY;
- return -EINVAL;
+ return status;
}
common->fsm_state = FSM_BOOT_PARAMS_SENT;
break;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 3430d7a0899e..f9c67ed473d1 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -923,6 +923,70 @@ static int rsi_sdio_reinit_device(struct rsi_hw *adapter)
return 0;
}
+static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
+{
+ int status;
+ u32 addr;
+ u8 *data;
+
+ status = rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Unable to set ms word to common reg\n");
+ return status;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Bring TA out of reset\n", __func__);
+ put_unaligned_le32(TA_HOLD_THREAD_VALUE, data);
+ addr = TA_HOLD_THREAD_REG | RSI_SD_REQUEST_MASTER;
+ status = rsi_sdio_write_register_multiple(adapter, addr,
+ (u8 *)&data,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to hold TA threads\n");
+ return status;
+ }
+
+ put_unaligned_le32(TA_SOFT_RST_CLR, data);
+ addr = TA_SOFT_RESET_REG | RSI_SD_REQUEST_MASTER;
+ status = rsi_sdio_write_register_multiple(adapter, addr,
+ (u8 *)&data,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to get TA out of reset\n");
+ return status;
+ }
+
+ put_unaligned_le32(TA_PC_ZERO, data);
+ addr = TA_TH0_PC_REG | RSI_SD_REQUEST_MASTER;
+ status = rsi_sdio_write_register_multiple(adapter, addr,
+ (u8 *)&data,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to Reset TA PC value\n");
+ return -EINVAL;
+ }
+
+ put_unaligned_le32(TA_RELEASE_THREAD_VALUE, data);
+ addr = TA_RELEASE_THREAD_REG | RSI_SD_REQUEST_MASTER;
+ status = rsi_sdio_write_register_multiple(adapter, addr,
+ (u8 *)&data,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to release TA threads\n");
+ return status;
+ }
+
+ status = rsi_sdio_master_access_msword(adapter, MISC_CFG_BASE_ADDR);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to set ms word to common reg\n");
+ return status;
+ }
+ rsi_dbg(INIT_ZONE, "***** TA Reset done *****\n");
+
+ return 0;
+}
+
static struct rsi_host_intf_ops sdio_host_intf_ops = {
.write_pkt = rsi_sdio_host_intf_write_pkt,
.read_pkt = rsi_sdio_host_intf_read_pkt,
@@ -933,6 +997,7 @@ static struct rsi_host_intf_ops sdio_host_intf_ops = {
.master_reg_write = rsi_sdio_master_reg_write,
.load_data_master_write = rsi_sdio_load_data_master_write,
.reinit_device = rsi_sdio_reinit_device,
+ .ta_reset = rsi_sdio_ta_reset,
};
/**
@@ -949,7 +1014,7 @@ static int rsi_probe(struct sdio_func *pfunction,
{
struct rsi_hw *adapter;
struct rsi_91x_sdiodev *sdev;
- int status;
+ int status = -EINVAL;
rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
@@ -968,6 +1033,20 @@ static int rsi_probe(struct sdio_func *pfunction,
status = -EIO;
goto fail_free_adapter;
}
+
+ if (pfunction->device == RSI_SDIO_PID_9113) {
+ rsi_dbg(ERR_ZONE, "%s: 9113 module detected\n", __func__);
+ adapter->device_model = RSI_DEV_9113;
+ } else if (pfunction->device == RSI_SDIO_PID_9116) {
+ rsi_dbg(ERR_ZONE, "%s: 9116 module detected\n", __func__);
+ adapter->device_model = RSI_DEV_9116;
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unsupported RSI device id 0x%x\n", __func__,
+ pfunction->device);
+ goto fail_free_adapter;
+ }
+
sdev = (struct rsi_91x_sdiodev *)adapter->rsi_dev;
rsi_init_event(&sdev->rx_thread.event);
status = rsi_create_kthread(adapter->priv, &sdev->rx_thread,
@@ -1088,16 +1167,41 @@ static void rsi_reset_chip(struct rsi_hw *adapter)
* and any pending dma transfers to rf spi in device to finish.
*/
msleep(100);
-
- ulp_read_write(adapter, RSI_ULP_RESET_REG, RSI_ULP_WRITE_0, 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, RSI_ULP_WRITE_2, 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, RSI_ULP_WRITE_0, 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1, RSI_ULP_WRITE_50,
- 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2, RSI_ULP_WRITE_0,
- 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE,
- RSI_ULP_TIMER_ENABLE, 32);
+ if (adapter->device_model != RSI_DEV_9116) {
+ ulp_read_write(adapter, RSI_ULP_RESET_REG, RSI_ULP_WRITE_0, 32);
+ ulp_read_write(adapter,
+ RSI_WATCH_DOG_TIMER_1, RSI_ULP_WRITE_2, 32);
+ ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, RSI_ULP_WRITE_0,
+ 32);
+ ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1,
+ RSI_ULP_WRITE_50, 32);
+ ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2,
+ RSI_ULP_WRITE_0, 32);
+ ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE,
+ RSI_ULP_TIMER_ENABLE, 32);
+ } else {
+ if ((rsi_sdio_master_reg_write(adapter,
+ NWP_WWD_INTERRUPT_TIMER,
+ NWP_WWD_INT_TIMER_CLKS,
+ RSI_9116_REG_SIZE)) < 0) {
+ rsi_dbg(ERR_ZONE, "Failed to write to intr timer\n");
+ }
+ if ((rsi_sdio_master_reg_write(adapter,
+ NWP_WWD_SYSTEM_RESET_TIMER,
+ NWP_WWD_SYS_RESET_TIMER_CLKS,
+ RSI_9116_REG_SIZE)) < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Failed to write to system reset timer\n");
+ }
+ if ((rsi_sdio_master_reg_write(adapter,
+ NWP_WWD_MODE_AND_RSTART,
+ NWP_WWD_TIMER_DISABLE,
+ RSI_9116_REG_SIZE)) < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Failed to write to mode and restart\n");
+ }
+ rsi_dbg(ERR_ZONE, "***** Watch Dog Reset Successful *****\n");
+ }
/* This msleep will be sufficient for the ulp
* read write operations to complete for chip reset.
*/
@@ -1415,7 +1519,8 @@ static const struct dev_pm_ops rsi_pm_ops = {
#endif
static const struct sdio_device_id rsi_dev_table[] = {
- { SDIO_DEVICE(RSI_SDIO_VID_9113, RSI_SDIO_PID_9113) },
+ { SDIO_DEVICE(RSI_SDIO_VENDOR_ID, RSI_SDIO_PID_9113) },
+ { SDIO_DEVICE(RSI_SDIO_VENDOR_ID, RSI_SDIO_PID_9116) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index ac0ef5ea6ffb..f5048d4b8cb6 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -213,7 +213,7 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
*/
static int rsi_usb_reg_write(struct usb_device *usbdev,
u32 reg,
- u16 value,
+ u32 value,
u16 len)
{
u8 *usb_reg_buf;
@@ -226,17 +226,17 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
if (!usb_reg_buf)
return status;
- usb_reg_buf[0] = (value & 0x00ff);
- usb_reg_buf[1] = (value & 0xff00) >> 8;
- usb_reg_buf[2] = 0x0;
- usb_reg_buf[3] = 0x0;
+ usb_reg_buf[0] = (cpu_to_le32(value) & 0x00ff);
+ usb_reg_buf[1] = (cpu_to_le32(value) & 0xff00) >> 8;
+ usb_reg_buf[2] = (cpu_to_le32(value) & 0x00ff0000) >> 16;
+ usb_reg_buf[3] = (cpu_to_le32(value) & 0xff000000) >> 24;
status = usb_control_msg(usbdev,
usb_sndctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
RSI_USB_REQ_OUT,
- ((reg & 0xffff0000) >> 16),
- (reg & 0xffff),
+ ((cpu_to_le32(reg) & 0xffff0000) >> 16),
+ (cpu_to_le32(reg) & 0xffff),
(void *)usb_reg_buf,
len,
USB_CTRL_SET_TIMEOUT);
@@ -263,8 +263,10 @@ static void rsi_rx_done_handler(struct urb *urb)
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data;
int status = -EINVAL;
- if (urb->status)
- goto out;
+ if (urb->status) {
+ dev_kfree_skb(rx_cb->rx_skb);
+ return;
+ }
if (urb->actual_length <= 0 ||
urb->actual_length > rx_cb->rx_skb->len) {
@@ -698,26 +700,47 @@ static int rsi_reset_card(struct rsi_hw *adapter)
goto fail;
}
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1,
- RSI_ULP_WRITE_2, 32);
- if (ret < 0)
- goto fail;
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2,
- RSI_ULP_WRITE_0, 32);
- if (ret < 0)
- goto fail;
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1,
- RSI_ULP_WRITE_50, 32);
- if (ret < 0)
- goto fail;
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2,
- RSI_ULP_WRITE_0, 32);
- if (ret < 0)
- goto fail;
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE,
- RSI_ULP_TIMER_ENABLE, 32);
- if (ret < 0)
- goto fail;
+ if (adapter->device_model != RSI_DEV_9116) {
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1,
+ RSI_ULP_WRITE_2, 32);
+ if (ret < 0)
+ goto fail;
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2,
+ RSI_ULP_WRITE_0, 32);
+ if (ret < 0)
+ goto fail;
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1,
+ RSI_ULP_WRITE_50, 32);
+ if (ret < 0)
+ goto fail;
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2,
+ RSI_ULP_WRITE_0, 32);
+ if (ret < 0)
+ goto fail;
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE,
+ RSI_ULP_TIMER_ENABLE, 32);
+ if (ret < 0)
+ goto fail;
+ } else {
+ if ((rsi_usb_master_reg_write(adapter,
+ NWP_WWD_INTERRUPT_TIMER,
+ NWP_WWD_INT_TIMER_CLKS,
+ RSI_9116_REG_SIZE)) < 0) {
+ goto fail;
+ }
+ if ((rsi_usb_master_reg_write(adapter,
+ NWP_WWD_SYSTEM_RESET_TIMER,
+ NWP_WWD_SYS_RESET_TIMER_CLKS,
+ RSI_9116_REG_SIZE)) < 0) {
+ goto fail;
+ }
+ if ((rsi_usb_master_reg_write(adapter,
+ NWP_WWD_MODE_AND_RSTART,
+ NWP_WWD_TIMER_DISABLE,
+ RSI_9116_REG_SIZE)) < 0) {
+ goto fail;
+ }
+ }
rsi_dbg(INFO_ZONE, "Reset card done\n");
return ret;
@@ -763,6 +786,18 @@ static int rsi_probe(struct usb_interface *pfunction,
rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
+ if (id && id->idProduct == RSI_USB_PID_9113) {
+ rsi_dbg(INIT_ZONE, "%s: 9113 module detected\n", __func__);
+ adapter->device_model = RSI_DEV_9113;
+ } else if (id && id->idProduct == RSI_USB_PID_9116) {
+ rsi_dbg(INIT_ZONE, "%s: 9116 module detected\n", __func__);
+ adapter->device_model = RSI_DEV_9116;
+ } else {
+ rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n",
+ __func__, id->idProduct);
+ goto err1;
+ }
+
dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
status = rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2);
@@ -845,7 +880,8 @@ static int rsi_resume(struct usb_interface *intf)
#endif
static const struct usb_device_id rsi_dev_table[] = {
- { USB_DEVICE(RSI_USB_VID_9113, RSI_USB_PID_9113) },
+ { USB_DEVICE(RSI_USB_VENDOR_ID, RSI_USB_PID_9113) },
+ { USB_DEVICE(RSI_USB_VENDOR_ID, RSI_USB_PID_9116) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h
index ad903b22440e..c1cf19d1e376 100644
--- a/drivers/net/wireless/rsi/rsi_boot_params.h
+++ b/drivers/net/wireless/rsi/rsi_boot_params.h
@@ -80,6 +80,15 @@ struct pll_config {
struct afepll_info afepll_info_g;
} __packed;
+struct pll_config_9116 {
+ __le16 pll_ctrl_set_reg;
+ __le16 pll_ctrl_clr_reg;
+ __le16 pll_modem_conig_reg;
+ __le16 soc_clk_config_reg;
+ __le16 adc_dac_strm1_config_reg;
+ __le16 adc_dac_strm2_config_reg;
+} __packed;
+
/* structure to store configs related to UMAC clk programming */
struct switch_clk {
__le16 switch_clk_info;
@@ -93,11 +102,32 @@ struct switch_clk {
__le16 qspi_uart_clock_reg_config;
} __packed;
+#define RSI_SWITCH_TASS_CLK BIT(0)
+#define RSI_SWITCH_QSPI_CLK BIT(1)
+#define RSI_SWITCH_SLP_CLK_2_32 BIT(2)
+#define RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG BIT(3)
+#define RSI_SWITCH_ZBBT_BBP_LMAC_CLK_REG BIT(4)
+#define RSI_SWITCH_BBP_LMAC_CLK_REG BIT(5)
+#define RSI_MODEM_CLK_160MHZ BIT(6)
+
+struct switch_clk_9116 {
+ __le32 switch_clk_info;
+ __le32 tass_clock_reg;
+ __le32 wlan_bbp_lmac_clk_reg_val;
+ __le32 zbbt_bbp_lmac_clk_reg_val;
+ __le32 bbp_lmac_clk_en_val;
+} __packed;
+
struct device_clk_info {
struct pll_config pll_config_g;
struct switch_clk switch_clk_g;
} __packed;
+struct device_clk_info_9116 {
+ struct pll_config_9116 pll_config_9116_g;
+ struct switch_clk_9116 switch_clk_9116_g;
+} __packed;
+
struct bootup_params {
__le16 magic_number;
__le16 crystal_good_time;
@@ -127,4 +157,37 @@ struct bootup_params {
__le32 max_threshold_to_avoid_sleep;
u8 beacon_resedue_alg_en;
} __packed;
+
+struct bootup_params_9116 {
+ __le16 magic_number;
+#define LOADED_TOKEN 0x5AA5 /* Bootup params are installed by host
+ * or OTP/FLASH (Bootloader)
+ */
+#define ROM_TOKEN 0x55AA /* Bootup params are taken from ROM
+ * itself in MCU mode.
+ */
+ __le16 crystal_good_time;
+ __le32 valid;
+ __le32 reserved_for_valids;
+ __le16 bootup_mode_info;
+#define BT_COEXIST BIT(0)
+#define BOOTUP_MODE (BIT(2) | BIT(1))
+#define CUR_DEV_MODE_9116 (bootup_params_9116.bootup_mode_info >> 1)
+ __le16 digital_loop_back_params;
+ __le16 rtls_timestamp_en;
+ __le16 host_spi_intr_cfg;
+ struct device_clk_info_9116 device_clk_info_9116[1];
+ __le32 buckboost_wakeup_cnt;
+ __le16 pmu_wakeup_wait;
+ u8 shutdown_wait_time;
+ u8 pmu_slp_clkout_sel;
+ __le32 wdt_prog_value;
+ __le32 wdt_soc_rst_delay;
+ __le32 dcdc_operation_mode;
+ __le32 soc_reset_wait_cnt;
+ __le32 waiting_time_at_fresh_sleep;
+ __le32 max_threshold_to_avoid_sleep;
+ u8 beacon_resedue_alg_en;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index 327638cdd30b..46e36df9e8e3 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -70,6 +70,21 @@
#define RSI_WATCH_DOG_DELAY_TIMER_2 0x16f
#define RSI_WATCH_DOG_TIMER_ENABLE 0x170
+/* Watchdog timer addresses for 9116 */
+#define NWP_AHB_BASE_ADDR 0x41300000
+#define NWP_WWD_INTERRUPT_TIMER (NWP_AHB_BASE_ADDR + 0x300)
+#define NWP_WWD_SYSTEM_RESET_TIMER (NWP_AHB_BASE_ADDR + 0x304)
+#define NWP_WWD_WINDOW_TIMER (NWP_AHB_BASE_ADDR + 0x308)
+#define NWP_WWD_TIMER_SETTINGS (NWP_AHB_BASE_ADDR + 0x30C)
+#define NWP_WWD_MODE_AND_RSTART (NWP_AHB_BASE_ADDR + 0x310)
+#define NWP_WWD_RESET_BYPASS (NWP_AHB_BASE_ADDR + 0x314)
+#define NWP_FSM_INTR_MASK_REG (NWP_AHB_BASE_ADDR + 0x104)
+
+/* Watchdog timer values */
+#define NWP_WWD_INT_TIMER_CLKS 5
+#define NWP_WWD_SYS_RESET_TIMER_CLKS 4
+#define NWP_WWD_TIMER_DISABLE 0xAA0001
+
#define RSI_ULP_WRITE_0 00
#define RSI_ULP_WRITE_2 02
#define RSI_ULP_WRITE_50 50
@@ -113,9 +128,18 @@
#define BBP_INFO_40MHZ 0x6
#define FW_FLASH_OFFSET 0x820
-#define LMAC_VER_OFFSET (FW_FLASH_OFFSET + 0x200)
+#define LMAC_VER_OFFSET_9113 (FW_FLASH_OFFSET + 0x200)
+#define LMAC_VER_OFFSET_9116 0x22C2
#define MAX_DWORD_ALIGN_BYTES 64
#define RSI_COMMON_REG_SIZE 2
+#define RSI_9116_REG_SIZE 4
+#define FW_ALIGN_SIZE 4
+#define RSI_9116_FW_MAGIC_WORD 0x5aa5
+
+#define MEM_ACCESS_CTRL_FROM_HOST 0x41300000
+#define RAM_384K_ACCESS_FROM_TA (BIT(2) | BIT(3) | BIT(4) | BIT(5) | \
+ BIT(20) | BIT(21) | BIT(22) | \
+ BIT(23) | BIT(24) | BIT(25))
struct bl_header {
__le32 flags;
@@ -130,6 +154,24 @@ struct ta_metadata {
unsigned int address;
};
+#define RSI_BL_CTRL_LEN_MASK 0xFFFFFF
+#define RSI_BL_CTRL_SPI_32BIT_MODE BIT(27)
+#define RSI_BL_CTRL_REL_TA_SOFTRESET BIT(28)
+#define RSI_BL_CTRL_START_FROM_ROM_PC BIT(29)
+#define RSI_BL_CTRL_SPI_8BIT_MODE BIT(30)
+#define RSI_BL_CTRL_LAST_ENTRY BIT(31)
+struct bootload_entry {
+ __le32 control;
+ __le32 dst_addr;
+} __packed;
+
+struct bootload_ds {
+ __le16 fixed_pattern;
+ __le16 offset;
+ __le32 reserved;
+ struct bootload_entry bl_entry[7];
+} __packed;
+
struct rsi_mgmt_desc {
__le16 len_qno;
u8 frame_type;
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 35d13f35e9b0..73a19e43106b 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -111,9 +111,13 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
#define RSI_WOW_ENABLED BIT(0)
#define RSI_WOW_NO_CONNECTION BIT(1)
-#define RSI_DEV_9113 1
#define RSI_MAX_RX_PKTS 64
+enum rsi_dev_model {
+ RSI_DEV_9113 = 0,
+ RSI_DEV_9116
+};
+
struct version_info {
u16 major;
u16 minor;
@@ -215,6 +219,17 @@ enum rsi_dfs_regions {
RSI_REGION_WORLD
};
+struct rsi_9116_features {
+ u8 pll_mode;
+ u8 rf_type;
+ u8 wireless_mode;
+ u8 afe_type;
+ u8 enable_ppe;
+ u8 dpd;
+ u32 sifs_tx_enable;
+ u32 ps_options;
+};
+
struct rsi_common {
struct rsi_hw *priv;
struct vif_priv vif_info[RSI_MAX_VIFS];
@@ -310,6 +325,7 @@ struct rsi_common {
struct cfg80211_scan_request *hwscan;
struct rsi_bgscan_params bgscan;
+ struct rsi_9116_features w9116_features;
u8 bgscan_en;
u8 mac_ops_resumed;
};
@@ -329,7 +345,7 @@ struct eeprom_read {
struct rsi_hw {
struct rsi_common *priv;
- u8 device_model;
+ enum rsi_dev_model device_model;
struct ieee80211_hw *hw;
struct ieee80211_vif *vifs[RSI_MAX_VIFS];
struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
@@ -381,6 +397,7 @@ struct rsi_host_intf_ops {
u32 instructions_size, u16 block_size,
u8 *fw);
int (*reinit_device)(struct rsi_hw *adapter);
+ int (*ta_reset)(struct rsi_hw *adapter);
};
enum rsi_host_intf rsi_get_host_intf(void *priv);
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index ea83faa15c7e..2ce2dcf57441 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -294,6 +294,7 @@ enum cmd_frame_type {
COMMON_DEV_CONFIG = 0x28,
RADIO_PARAMS_UPDATE = 0x29,
WOWLAN_CONFIG_PARAMS = 0x2B,
+ FEATURES_ENABLE = 0x33,
WOWLAN_WAKEUP_REASON = 0xc5
};
@@ -351,6 +352,15 @@ struct rsi_boot_params {
struct bootup_params bootup_params;
} __packed;
+struct rsi_boot_params_9116 {
+ struct rsi_cmd_desc_dword0 desc_dword0;
+ struct rsi_cmd_desc_dword1 desc_dword1;
+ struct rsi_cmd_desc_dword2 desc_dword2;
+ __le16 reserved;
+ __le16 umac_clk;
+ struct bootup_params_9116 bootup_params;
+} __packed;
+
struct rsi_peer_notify {
struct rsi_cmd_desc desc;
u8 mac_addr[6];
@@ -654,6 +664,22 @@ struct rsi_bgscan_probe {
__le16 probe_req_length;
} __packed;
+#define RSI_DUTY_CYCLING BIT(0)
+#define RSI_END_OF_FRAME BIT(1)
+#define RSI_SIFS_TX_ENABLE BIT(2)
+#define RSI_DPD BIT(3)
+struct rsi_wlan_9116_features {
+ struct rsi_cmd_desc desc;
+ u8 pll_mode;
+ u8 rf_type;
+ u8 wireless_mode;
+ u8 enable_ppe;
+ u8 afe_type;
+ u8 reserved1;
+ __le16 reserved2;
+ __le32 feature_enable;
+};
+
static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
{
return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 66dcd2ec9051..c5cfb6238f73 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -28,8 +28,9 @@
#include <linux/mmc/sdio_ids.h>
#include "rsi_main.h"
-#define RSI_SDIO_VID_9113 0x041B
+#define RSI_SDIO_VENDOR_ID 0x041B
#define RSI_SDIO_PID_9113 0x9330
+#define RSI_SDIO_PID_9116 0x9116
enum sdio_interrupt_type {
BUFFER_FULL = 0x0,
@@ -91,7 +92,7 @@ enum sdio_interrupt_type {
#define TA_SOFT_RST_SET BIT(0)
#define TA_PC_ZERO 0
#define TA_HOLD_THREAD_VALUE 0xF
-#define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF)
+#define TA_RELEASE_THREAD_VALUE 0xF
#define TA_BASE_ADDR 0x2200
#define MISC_CFG_BASE_ADDR 0x4105
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index 5b2eddd1a2ee..8702f434b569 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -22,8 +22,9 @@
#include "rsi_main.h"
#include "rsi_common.h"
-#define RSI_USB_VID_9113 0x1618
+#define RSI_USB_VENDOR_ID 0x1618
#define RSI_USB_PID_9113 0x9113
+#define RSI_USB_PID_9116 0x9116
#define USB_INTERNAL_REG_1 0x25000
#define RSI_USB_READY_MAGIC_NUM 0xab
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 90dc979f260b..c1608f0bf6d0 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -345,6 +345,11 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
mutex_init(&priv->wsm_cmd_mux);
mutex_init(&priv->conf_mutex);
priv->workqueue = create_singlethread_workqueue("cw1200_wq");
+ if (!priv->workqueue) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
sema_init(&priv->scan.lock, 1);
INIT_WORK(&priv->scan.work, cw1200_scan_work);
INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 348be0aed97e..0415a064f6e2 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1700,14 +1700,14 @@ void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
- set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
+ __set_bit_le(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
}
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
{
struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL;
int ret = 0, i, b, ch_bit_idx;
- u32 tmp_ch_bitmap[2];
+ __le32 tmp_ch_bitmap[2] __aligned(sizeof(unsigned long));
struct wiphy *wiphy = wl->hw->wiphy;
struct ieee80211_supported_band *band;
bool timeout = false;
@@ -1717,7 +1717,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
wl1271_debug(DEBUG_CMD, "cmd reg domain config");
- memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
+ memcpy(tmp_ch_bitmap, wl->reg_ch_conf_pending, sizeof(tmp_ch_bitmap));
for (b = NL80211_BAND_2GHZ; b <= NL80211_BAND_5GHZ; b++) {
band = wiphy->bands[b];
@@ -1738,13 +1738,10 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
if (ch_bit_idx < 0)
continue;
- set_bit(ch_bit_idx, (long *)tmp_ch_bitmap);
+ __set_bit_le(ch_bit_idx, (long *)tmp_ch_bitmap);
}
}
- tmp_ch_bitmap[0] |= wl->reg_ch_conf_pending[0];
- tmp_ch_bitmap[1] |= wl->reg_ch_conf_pending[1];
-
if (!memcmp(tmp_ch_bitmap, wl->reg_ch_conf_last, sizeof(tmp_ch_bitmap)))
goto out;
@@ -1754,8 +1751,8 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
goto out;
}
- cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]);
- cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]);
+ cmd->ch_bit_map1 = tmp_ch_bitmap[0];
+ cmd->ch_bit_map2 = tmp_ch_bitmap[1];
cmd->dfs_region = wl->dfs_region;
wl1271_debug(DEBUG_CMD,
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index dcb2c8b0feb6..affefaaea1ea 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -372,8 +372,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 nla_cmd;
int err;
- err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy,
- NULL);
+ err = nla_parse_deprecated(tb, WL1271_TM_ATTR_MAX, data, len,
+ wl1271_tm_policy, NULL);
if (err)
return err;
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index 7f34ec077ee5..75756fb8e7b0 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -41,8 +41,8 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
if (!data)
return -EINVAL;
- ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
- wlcore_vendor_attr_policy, NULL);
+ ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
+ wlcore_vendor_attr_policy, NULL);
if (ret)
return ret;
@@ -122,8 +122,8 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
if (!data)
return -EINVAL;
- ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
- wlcore_vendor_attr_policy, NULL);
+ ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
+ wlcore_vendor_attr_policy, NULL);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index dd14850b0603..870eea3e7a27 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -320,9 +320,9 @@ struct wl1271 {
bool watchdog_recovery;
/* Reg domain last configuration */
- u32 reg_ch_conf_last[2] __aligned(8);
+ DECLARE_BITMAP(reg_ch_conf_last, 64);
/* Reg domain pending configuration */
- u32 reg_ch_conf_pending[2];
+ DECLARE_BITMAP(reg_ch_conf_pending, 64);
/* Pointer that holds DMA-friendly block for the mailbox */
void *mbox;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index c2cda3acd4af..a094d5b3383c 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1917,8 +1917,7 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
if (!urb)
return -ENOMEM;
- req_len = sizeof(struct usb_req_write_regs) +
- count * sizeof(struct reg_data);
+ req_len = struct_size(req, reg_writes, count);
req = kmalloc(req_len, GFP_KERNEL);
if (!req) {
r = -ENOMEM;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 936c0b3e0ba2..05847eb91a1b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -248,6 +248,22 @@ struct xenvif_hash {
struct xenvif_hash_cache cache;
};
+struct backend_info {
+ struct xenbus_device *dev;
+ struct xenvif *vif;
+
+ /* This is the state that will be reflected in xenstore when any
+ * active hotplug script completes.
+ */
+ enum xenbus_state state;
+
+ enum xenbus_state frontend_state;
+ struct xenbus_watch hotplug_status_watch;
+ u8 have_hotplug_status_watch:1;
+
+ const char *hotplug_script;
+};
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -283,6 +299,8 @@ struct xenvif {
struct xenbus_watch credit_watch;
struct xenbus_watch mcast_ctrl_watch;
+ struct backend_info *be;
+
spinlock_t lock;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 6da12518e693..783198844dd7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -148,8 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
@@ -162,7 +161,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
+ return netdev_pick_tx(dev, skb, NULL) %
+ dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 330ddb64930f..41c9e8f2e520 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -22,22 +22,6 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
-struct backend_info {
- struct xenbus_device *dev;
- struct xenvif *vif;
-
- /* This is the state that will be reflected in xenstore when any
- * active hotplug script completes.
- */
- enum xenbus_state state;
-
- enum xenbus_state frontend_state;
- struct xenbus_watch hotplug_status_watch;
- u8 have_hotplug_status_watch:1;
-
- const char *hotplug_script;
-};
-
static int connect_data_rings(struct backend_info *be,
struct xenvif_queue *queue);
static void connect(struct backend_info *be);
@@ -472,6 +456,7 @@ static int backend_create_xenvif(struct backend_info *be)
return err;
}
be->vif = vif;
+ vif->be = be;
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
return 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c914c24f880b..8d33970a2950 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -543,8 +543,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
}
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
unsigned int num_queues = dev->real_num_tx_queues;
u32 hash;
@@ -2038,7 +2037,7 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* Fall through - Missed the backend's CLOSING state. */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 8a04c5e02999..0f43bb389566 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * MEI Library for mei bus nfc device access
- *
- * Copyright (C) 2013 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
+ * Copyright (c) 2013, Intel Corporation.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * MEI Library for mei bus nfc device access
*/
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index eb5eddf1794e..5dad8847a9b3 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * HCI based Driver for Inside Secure microread NFC Chip
- *
- * Copyright (C) 2013 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * HCI based Driver for Inside Secure microread NFC Chip
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index a0cc1cc45292..5961f14259e5 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2147,6 +2147,7 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
break;
}
+ /* fall through */
default:
/* jumbo frame ? */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
@@ -2273,6 +2274,7 @@ static void pn533_wq_mi_recv(struct work_struct *work)
break;
}
+ /* fall through */
default:
skb_put_u8(skb, 1); /*TG*/
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index ad57a8ec00d6..579bc599f545 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * HCI based Driver for NXP pn544 NFC Chip
- *
* Copyright (C) 2013 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * HCI based Driver for NXP pn544 NFC Chip
*/
#include <linux/module.h>
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index b7828fb252f2..b681073ae8ba 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -449,7 +449,6 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_digest(desc, fw->image, image_size,
hash_data);
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index fd08be2917e6..c005997493af 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -400,6 +400,7 @@ static int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev,
default:
return 1;
}
+ break;
default:
return 1;
}
@@ -619,6 +620,7 @@ static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb,
switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_res->pfb)) {
case ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU:
pr_err("Received a ACK/NACK PDU\n");
+ /* fall through */
case ST21NFCA_NFC_DEP_PFB_I_PDU:
info->dep_info.curr_nfc_dep_pni =
ST21NFCA_NFC_DEP_PFB_PNI(dep_res->pfb + 1);
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 2b26f762fbc3..99727a2edda0 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -781,9 +781,7 @@ static irqreturn_t st95hf_irq_thread_handler(int irq, void *st95hfcontext)
int result = 0;
int res_len;
static bool wtx;
- struct device *dev;
struct device *spidevice;
- struct nfc_digital_dev *nfcddev;
struct sk_buff *skb_resp;
struct st95hf_context *stcontext =
(struct st95hf_context *)st95hfcontext;
@@ -828,8 +826,6 @@ static irqreturn_t st95hf_irq_thread_handler(int irq, void *st95hfcontext)
goto end;
}
- dev = &stcontext->nfcdev->dev;
- nfcddev = stcontext->ddev;
if (skb_resp->data[2] == WTX_REQ_FROM_TAG) {
/* Request for new FWT from tag */
result = st95hf_handle_wtx(stcontext, true, skb_resp->data[3]);
@@ -1074,6 +1070,12 @@ static const struct spi_device_id st95hf_id[] = {
};
MODULE_DEVICE_TABLE(spi, st95hf_id);
+static const struct of_device_id st95hf_spi_of_match[] = {
+ { .compatible = "st,st95hf" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
+
static int st95hf_probe(struct spi_device *nfc_spi_dev)
{
int ret;
@@ -1260,6 +1262,7 @@ static struct spi_driver st95hf_driver = {
.driver = {
.name = "st95hf",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(st95hf_spi_of_match),
},
.id_table = st95hf_id,
.probe = st95hf_probe,
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 1dede87dd54f..dcf234680535 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -358,8 +358,6 @@ static void idt_sw_write(struct idt_ntb_dev *ndev,
iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
/* Put the new value of the register */
iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
- /* Make sure the PCIe transactions are executed */
- mmiowb();
/* Unlock GASA registers operations */
spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
}
@@ -750,7 +748,6 @@ static void idt_ntb_local_link_enable(struct idt_ntb_dev *ndev)
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata);
- mmiowb();
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
@@ -778,7 +775,6 @@ static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev)
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0);
- mmiowb();
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
@@ -1339,7 +1335,6 @@ static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr);
idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32));
idt_nt_write(ndev, IDT_NT_LUTUDATA, data);
- mmiowb();
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
/* Limit address isn't specified since size is fixed for LUT */
}
@@ -1393,7 +1388,6 @@ static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
idt_nt_write(ndev, IDT_NT_LUTLDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTMDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTUDATA, 0);
- mmiowb();
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
}
@@ -1812,7 +1806,6 @@ static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
/* Set the route and send the data */
idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl);
idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg);
- mmiowb();
/* Unlock the messages routing table */
spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 2a9d6b0d1f19..11a6cd374004 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -284,11 +284,9 @@ static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_HDATA(perf->gidx),
upper_32_bits(data));
- mmiowb();
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_CMD(perf->gidx),
cmd);
- mmiowb();
ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
@@ -379,7 +377,6 @@ static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
upper_32_bits(data));
- mmiowb();
/* This call shall trigger peer message event */
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index b72a303176c7..9486acc08402 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -198,14 +198,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
return NULL;
nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
- if (nd_btt->id < 0) {
- kfree(nd_btt);
- return NULL;
- }
+ if (nd_btt->id < 0)
+ goto out_nd_btt;
nd_btt->lbasize = lbasize;
- if (uuid)
+ if (uuid) {
uuid = kmemdup(uuid, 16, GFP_KERNEL);
+ if (!uuid)
+ goto out_put_id;
+ }
nd_btt->uuid = uuid;
dev = &nd_btt->dev;
dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
@@ -220,6 +221,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
return NULL;
}
return dev;
+
+out_put_id:
+ ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+
+out_nd_btt:
+ kfree(nd_btt);
+ return NULL;
}
struct device *nd_btt_create(struct nd_region *nd_region)
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7bbff0af29b2..7ff684159f29 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -581,7 +581,7 @@ int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
struct device_driver *drv = &nd_drv->drv;
if (!nd_drv->type) {
- pr_debug("driver type bitmask not set (%pf)\n",
+ pr_debug("driver type bitmask not set (%ps)\n",
__builtin_return_address(0));
return -EINVAL;
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 91b9abbf689c..ecbab2d66e38 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -58,7 +58,7 @@ static int validate_dimm(struct nvdimm_drvdata *ndd)
rc = nvdimm_check_config_data(ndd->dev);
if (rc)
- dev_dbg(ndd->dev, "%pf: %s error: %d\n",
+ dev_dbg(ndd->dev, "%ps: %s error: %d\n",
__builtin_return_address(0), __func__, rc);
return rc;
}
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index f3d753d3169c..2030805aa216 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -756,6 +756,17 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
return &guid_null;
}
+static void reap_victim(struct nd_mapping *nd_mapping,
+ struct nd_label_ent *victim)
+{
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ u32 slot = to_slot(ndd, victim->label);
+
+ dev_dbg(ndd->dev, "free: %d\n", slot);
+ nd_label_free_slot(ndd, slot);
+ victim->label = NULL;
+}
+
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
@@ -763,9 +774,9 @@ static int __pmem_label_update(struct nd_region *nd_region,
struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- struct nd_label_ent *label_ent, *victim = NULL;
struct nd_namespace_label *nd_label;
struct nd_namespace_index *nsindex;
+ struct nd_label_ent *label_ent;
struct nd_label_id label_id;
struct resource *res;
unsigned long *free;
@@ -834,18 +845,10 @@ static int __pmem_label_update(struct nd_region *nd_region,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label)
continue;
- if (memcmp(nspm->uuid, label_ent->label->uuid,
- NSLABEL_UUID_LEN) != 0)
- continue;
- victim = label_ent;
- list_move_tail(&victim->list, &nd_mapping->labels);
- break;
- }
- if (victim) {
- dev_dbg(ndd->dev, "free: %d\n", slot);
- slot = to_slot(ndd, victim->label);
- nd_label_free_slot(ndd, slot);
- victim->label = NULL;
+ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
+ || memcmp(nspm->uuid, label_ent->label->uuid,
+ NSLABEL_UUID_LEN) == 0)
+ reap_victim(nd_mapping, label_ent);
}
/* update index */
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 7849bf1812c4..d0214644e334 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1247,12 +1247,27 @@ static int namespace_update_uuid(struct nd_region *nd_region,
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_label_ent *label_ent;
struct resource *res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, old_label_id.id) == 0)
sprintf((void *) res->name, "%s",
new_label_id.id);
+
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+ struct nd_namespace_label *nd_label = label_ent->label;
+ struct nd_label_id label_id;
+
+ if (!nd_label)
+ continue;
+ nd_label_gen_id(&label_id, nd_label->uuid,
+ __le32_to_cpu(nd_label->flags));
+ if (strcmp(old_label_id.id, label_id.id) == 0)
+ set_bit(ND_LABEL_REAP, &label_ent->flags);
+ }
+ mutex_unlock(&nd_mapping->lock);
}
kfree(*old_uuid);
out:
@@ -2249,9 +2264,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
if (!nsblk->uuid)
goto blk_err;
memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
- if (name[0])
+ if (name[0]) {
nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
GFP_KERNEL);
+ if (!nsblk->alt_name)
+ goto blk_err;
+ }
res = nsblk_add_resource(nd_region, ndd, nsblk,
__le64_to_cpu(nd_label->dpa));
if (!res)
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index a5ac3b240293..191d62af0e51 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -113,8 +113,12 @@ struct nd_percpu_lane {
spinlock_t lock;
};
+enum nd_label_flags {
+ ND_LABEL_REAP,
+};
struct nd_label_ent {
struct list_head list;
+ unsigned long flags;
struct nd_namespace_label *label;
};
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index d271bd731af7..01f40672507f 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -391,7 +391,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
bb_present = badblocks_check(&nd_region->bb, meta_start,
meta_num, &first_bad, &num_bad);
if (bb_present) {
- dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %lx\n",
+ dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
num_bad, first_bad);
nsoff = ALIGN_DOWN((nd_region->ndr_start
+ (first_bad << 9)) - nsio->res.start,
@@ -410,7 +410,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
}
if (rc) {
dev_err(&nd_pfn->dev,
- "error clearing %x badblocks at %lx\n",
+ "error clearing %x badblocks at %llx\n",
num_bad, first_bad);
return rc;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index bc2f700feef8..0279eb1da3ef 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page,
while (len) {
mem = kmap_atomic(page);
- chunk = min_t(unsigned int, len, PAGE_SIZE);
+ chunk = min_t(unsigned int, len, PAGE_SIZE - off);
memcpy_flushcache(pmem_addr, mem + off, chunk);
kunmap_atomic(mem);
len -= chunk;
off = 0;
page++;
- pmem_addr += PAGE_SIZE;
+ pmem_addr += chunk;
}
}
@@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
while (len) {
mem = kmap_atomic(page);
- chunk = min_t(unsigned int, len, PAGE_SIZE);
+ chunk = min_t(unsigned int, len, PAGE_SIZE - off);
rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
kunmap_atomic(mem);
if (rem)
@@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
len -= chunk;
off = 0;
page++;
- pmem_addr += PAGE_SIZE;
+ pmem_addr += chunk;
}
return BLK_STS_OK;
}
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index f8bb746a549f..a570f2263a42 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -22,6 +22,8 @@ static bool key_revalidate = true;
module_param(key_revalidate, bool, 0444);
MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
static void *key_data(struct key *key)
{
struct encrypted_key_payload *epayload = dereference_key_locked(key);
@@ -75,6 +77,16 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
return key;
}
+static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
+ struct key **key)
+{
+ *key = nvdimm_request_key(nvdimm);
+ if (!*key)
+ return zero_key;
+
+ return key_data(*key);
+}
+
static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
key_serial_t id, int subclass)
{
@@ -105,36 +117,57 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
return key;
}
-static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm)
+static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
+ key_serial_t id, int subclass, struct key **key)
+{
+ *key = NULL;
+ if (id == 0) {
+ if (subclass == NVDIMM_BASE_KEY)
+ return zero_key;
+ else
+ return NULL;
+ }
+
+ *key = nvdimm_lookup_user_key(nvdimm, id, subclass);
+ if (!*key)
+ return NULL;
+
+ return key_data(*key);
+}
+
+
+static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
{
struct key *key;
int rc;
+ const void *data;
if (!nvdimm->sec.ops->change_key)
- return NULL;
+ return -EOPNOTSUPP;
- key = nvdimm_request_key(nvdimm);
- if (!key)
- return NULL;
+ data = nvdimm_get_key_payload(nvdimm, &key);
/*
* Send the same key to the hardware as new and old key to
* verify that the key is good.
*/
- rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key),
- key_data(key), NVDIMM_USER);
+ rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
if (rc < 0) {
nvdimm_put_key(key);
- key = NULL;
+ return rc;
}
- return key;
+
+ nvdimm_put_key(key);
+ nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+ return 0;
}
static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- struct key *key = NULL;
+ struct key *key;
+ const void *data;
int rc;
/* The bus lock should be held at the top level of the call stack */
@@ -160,16 +193,11 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
if (!key_revalidate)
return 0;
- key = nvdimm_key_revalidate(nvdimm);
- if (!key)
- return nvdimm_security_freeze(nvdimm);
+ return nvdimm_key_revalidate(nvdimm);
} else
- key = nvdimm_request_key(nvdimm);
+ data = nvdimm_get_key_payload(nvdimm, &key);
- if (!key)
- return -ENOKEY;
-
- rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key));
+ rc = nvdimm->sec.ops->unlock(nvdimm, data);
dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
@@ -195,6 +223,7 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct key *key;
int rc;
+ const void *data;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -214,11 +243,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
return -EBUSY;
}
- key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
- if (!key)
+ data = nvdimm_get_user_key_payload(nvdimm, keyid,
+ NVDIMM_BASE_KEY, &key);
+ if (!data)
return -ENOKEY;
- rc = nvdimm->sec.ops->disable(nvdimm, key_data(key));
+ rc = nvdimm->sec.ops->disable(nvdimm, data);
dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
@@ -235,6 +265,7 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct key *key, *newkey;
int rc;
+ const void *data, *newdata;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -249,22 +280,19 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
return -EIO;
}
- if (keyid == 0)
- key = NULL;
- else {
- key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
- if (!key)
- return -ENOKEY;
- }
+ data = nvdimm_get_user_key_payload(nvdimm, keyid,
+ NVDIMM_BASE_KEY, &key);
+ if (!data)
+ return -ENOKEY;
- newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY);
- if (!newkey) {
+ newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
+ NVDIMM_NEW_KEY, &newkey);
+ if (!newdata) {
nvdimm_put_key(key);
return -ENOKEY;
}
- rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL,
- key_data(newkey), pass_type);
+ rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
dev_dbg(dev, "key: %d %d update%s: %s\n",
key_serial(key), key_serial(newkey),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
@@ -286,8 +314,9 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- struct key *key;
+ struct key *key = NULL;
int rc;
+ const void *data;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -319,11 +348,12 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
return -EOPNOTSUPP;
}
- key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
- if (!key)
+ data = nvdimm_get_user_key_payload(nvdimm, keyid,
+ NVDIMM_BASE_KEY, &key);
+ if (!data)
return -ENOKEY;
- rc = nvdimm->sec.ops->erase(nvdimm, key_data(key), pass_type);
+ rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
rc == 0 ? "success" : "fail");
@@ -337,8 +367,9 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- struct key *key;
+ struct key *key = NULL;
int rc;
+ const void *data;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -368,15 +399,12 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return -EBUSY;
}
- if (keyid == 0)
- key = NULL;
- else {
- key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
- if (!key)
- return -ENOKEY;
- }
+ data = nvdimm_get_user_key_payload(nvdimm, keyid,
+ NVDIMM_BASE_KEY, &key);
+ if (!data)
+ return -ENOKEY;
- rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL);
+ rc = nvdimm->sec.ops->overwrite(nvdimm, data);
dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 470601980794..a6644a2c3ef7 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
"Cancelling I/O %d", req->tag);
nvme_req(req)->status = NVME_SC_ABORT_REQ;
- blk_mq_complete_request(req);
+ blk_mq_complete_request_sync(req);
return true;
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
@@ -388,7 +388,7 @@ static void nvme_free_ns_head(struct kref *ref)
nvme_mpath_remove_disk(head);
ida_simple_remove(&head->subsys->ns_ida, head->instance);
list_del_init(&head->entry);
- cleanup_srcu_struct_quiesced(&head->srcu);
+ cleanup_srcu_struct(&head->srcu);
nvme_put_subsystem(head->subsys);
kfree(head);
}
@@ -1105,7 +1105,7 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
if (error) {
- dev_warn(ctrl->device, "Identify namespace failed\n");
+ dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
kfree(id);
return NULL;
}
@@ -1588,9 +1588,13 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
- sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+ sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
unsigned short bs = 1 << ns->lba_shift;
+ if (ns->lba_shift > PAGE_SHIFT) {
+ /* unsupported block size, set capacity to 0 later */
+ bs = (1 << 9);
+ }
blk_mq_freeze_queue(disk->queue);
blk_integrity_unregister(disk);
@@ -1601,7 +1605,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
if (ns->ms && !ns->ext &&
(ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
nvme_init_integrity(disk, ns->ms, ns->pi_type);
- if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
+ if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
+ ns->lba_shift > PAGE_SHIFT)
capacity = 0;
set_capacity(disk, capacity);
@@ -2549,7 +2554,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->crdt[2] = le16_to_cpu(id->crdt3);
ctrl->oacs = le16_to_cpu(id->oacs);
- ctrl->oncs = le16_to_cpup(&id->oncs);
+ ctrl->oncs = le16_to_cpu(id->oncs);
ctrl->oaes = le32_to_cpu(id->oaes);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
@@ -3874,10 +3879,37 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
-int __init nvme_core_init(void)
+/*
+ * Check we didn't inadvertently grow the command structure sizes:
+ */
+static inline void _nvme_check_size(void)
+{
+ BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
+ BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
+}
+
+
+static int __init nvme_core_init(void)
{
int result = -ENOMEM;
+ _nvme_check_size();
+
nvme_wq = alloc_workqueue("nvme-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_wq)
@@ -3924,7 +3956,7 @@ out:
return result;
}
-void __exit nvme_core_exit(void)
+static void __exit nvme_core_exit(void)
{
ida_destroy(&nvme_subsystems_ida);
class_destroy(nvme_subsys_class);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index d4cb826f58ff..592d1e61ef7e 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -1188,6 +1188,7 @@ static void __exit nvmf_exit(void)
class_destroy(nvmf_class);
nvmf_host_put(nvmf_default_host);
+ BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index f3b9d91ba0df..9544eb60f725 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -14,7 +14,7 @@
#include "fabrics.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
-
+#include <scsi/scsi_transport_fc.h>
/* *************************** Data Structures/Defines ****************** */
@@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
memset(queue, 0, sizeof(*queue));
queue->ctrl = ctrl;
queue->qnum = idx;
- atomic_set(&queue->csn, 1);
+ atomic_set(&queue->csn, 0);
queue->dev = ctrl->dev;
if (idx > 0)
@@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
*/
queue->connection_id = 0;
- atomic_set(&queue->csn, 1);
+ atomic_set(&queue->csn, 0);
}
static void
@@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
{
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
- u32 csn;
int ret, opstate;
/*
@@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
/* format the FC-NVME CMD IU and fcp_req */
cmdiu->connection_id = cpu_to_be64(queue->connection_id);
- csn = atomic_inc_return(&queue->csn);
- cmdiu->csn = cpu_to_be32(csn);
cmdiu->data_len = cpu_to_be32(data_len);
switch (io_dir) {
case NVMEFC_FCP_WRITE:
@@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (!(op->flags & FCOP_FLAGS_AEN))
blk_mq_start_request(op->rq);
+ cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
&ctrl->rport->remoteport,
queue->lldd_handle, &op->fcp_req);
if (ret) {
+ /*
+ * If the lld fails to send the command is there an issue with
+ * the csn value? If the command that fails is the Connect,
+ * no - as the connection won't be live. If it is a command
+ * post-connect, it's possible a gap in csn may be created.
+ * Does this matter? As Linux initiators don't send fused
+ * commands, no. The gap would exist, but as there's nothing
+ * that depends on csn order to be delivered on the target
+ * side, it shouldn't hurt. It would be difficult for a
+ * target to even detect the csn gap as it has no idea when the
+ * cmd with the csn was supposed to arrive.
+ */
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index f0716f6ce41f..5c9429d41120 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -232,6 +232,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
+ /*
+ * The namespace might be going away and the bio might
+ * be moved to a different queue via blk_steal_bios(),
+ * so we need to use the bio_split pool from the original
+ * queue to allocate the bvecs from.
+ */
+ blk_queue_split(q, &bio);
+
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
if (likely(ns)) {
@@ -421,7 +429,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
unsigned *nr_change_groups = data;
struct nvme_ns *ns;
- dev_info(ctrl->device, "ANA group %d: %s.\n",
+ dev_dbg(ctrl->device, "ANA group %d: %s.\n",
le32_to_cpu(desc->grpid),
nvme_ana_state_names[desc->state]);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 527d64545023..5ee75b5ff83f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -577,7 +577,4 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
return dev_to_disk(dev)->private_data;
}
-int __init nvme_core_init(void);
-void __exit nvme_core_exit(void);
-
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a90cf5d63aac..3e4fb891a95a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -146,7 +146,7 @@ static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
static int queue_count_set(const char *val, const struct kernel_param *kp)
{
- int n = 0, ret;
+ int n, ret;
ret = kstrtoint(val, 10, &n);
if (ret)
@@ -177,7 +177,6 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
* commands and one for I/O commands).
*/
struct nvme_queue {
- struct device *q_dmadev;
struct nvme_dev *dev;
spinlock_t sq_lock;
struct nvme_command *sq_cmds;
@@ -189,7 +188,7 @@ struct nvme_queue {
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
u16 q_depth;
- s16 cq_vector;
+ u16 cq_vector;
u16 sq_tail;
u16 last_sq_tail;
u16 cq_head;
@@ -200,6 +199,7 @@ struct nvme_queue {
#define NVMEQ_ENABLED 0
#define NVMEQ_SQ_CMB 1
#define NVMEQ_DELETE_ERROR 2
+#define NVMEQ_POLLED 3
u32 *dbbuf_sq_db;
u32 *dbbuf_cq_db;
u32 *dbbuf_sq_ei;
@@ -208,10 +208,10 @@ struct nvme_queue {
};
/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries. You can't see it in this data structure because C doesn't let
- * me express that. Use nvme_init_iod to ensure there's enough space
- * allocated to store the PRP list.
+ * The nvme_iod describes the data in an I/O.
+ *
+ * The sg pointer contains the list of PRP/SGL chunk allocations in addition
+ * to the actual struct scatterlist.
*/
struct nvme_iod {
struct nvme_request req;
@@ -220,33 +220,12 @@ struct nvme_iod {
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
int nents; /* Used in scatterlist */
- int length; /* Of data, in bytes */
dma_addr_t first_dma;
- struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
+ unsigned int dma_len; /* length of single DMA segment mapping */
+ dma_addr_t meta_dma;
struct scatterlist *sg;
- struct scatterlist inline_sg[0];
};
-/*
- * Check we didin't inadvertently grow the command struct
- */
-static inline void _nvme_check_size(void)
-{
- BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
- BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
- BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
- BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
-}
-
static unsigned int max_io_queues(void)
{
return num_possible_cpus() + write_queues + poll_queues;
@@ -372,12 +351,6 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
}
/*
- * Max size of iod being embedded in the request payload
- */
-#define NVME_INT_PAGES 2
-#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size)
-
-/*
* Will slightly overestimate the number of pages needed. This is OK
* as it only leads to a small amount of wasted memory for the lifetime of
* the I/O.
@@ -411,15 +384,6 @@ static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
return alloc_size + sizeof(struct scatterlist) * nseg;
}
-static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl)
-{
- unsigned int alloc_size = nvme_pci_iod_alloc_size(dev,
- NVME_INT_BYTES(dev), NVME_INT_PAGES,
- use_sgl);
-
- return sizeof(struct nvme_iod) + alloc_size;
-}
-
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -584,37 +548,26 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
return true;
}
-static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
- int nseg = blk_rq_nr_phys_segments(rq);
- unsigned int size = blk_rq_payload_bytes(rq);
-
- iod->use_sgl = nvme_pci_use_sgls(dev, rq);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ enum dma_data_direction dma_dir = rq_data_dir(req) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ int i;
- if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
- return BLK_STS_RESOURCE;
- } else {
- iod->sg = iod->inline_sg;
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, dma_addr, iod->dma_len, dma_dir);
+ return;
}
- iod->aborted = 0;
- iod->npages = -1;
- iod->nents = 0;
- iod->length = size;
-
- return BLK_STS_OK;
-}
+ WARN_ON_ONCE(!iod->nents);
-static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ /* P2PDMA requests do not need to be unmapped */
+ if (!is_pci_p2pdma_page(sg_page(iod->sg)))
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
- int i;
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
@@ -638,8 +591,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
dma_addr = next_dma_addr;
}
- if (iod->sg != iod->inline_sg)
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sg, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -829,80 +781,104 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_OK;
}
+static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
+
+ iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
+ if (bv->bv_len > first_prp_len)
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
+ return 0;
+}
+
+static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->flags = NVME_CMD_SGL_METABUF;
+ cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
+ cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
+ cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct request_queue *q = req->q;
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
- blk_status_t ret = BLK_STS_IOERR;
+ blk_status_t ret = BLK_STS_RESOURCE;
int nr_mapped;
+ if (blk_rq_nr_phys_segments(req) == 1) {
+ struct bio_vec bv = req_bvec(req);
+
+ if (!is_pci_p2pdma_page(bv.bv_page)) {
+ if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
+ return nvme_setup_prp_simple(dev, req,
+ &cmnd->rw, &bv);
+
+ if (iod->nvmeq->qid &&
+ dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
+ return nvme_setup_sgl_simple(dev, req,
+ &cmnd->rw, &bv);
+ }
+ }
+
+ iod->dma_len = 0;
+ iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sg)
+ return BLK_STS_RESOURCE;
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(q, req, iod->sg);
+ iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
goto out;
- ret = BLK_STS_RESOURCE;
-
if (is_pci_p2pdma_page(sg_page(iod->sg)))
nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents,
- dma_dir);
+ rq_dma_dir(req));
else
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
- dma_dir, DMA_ATTR_NO_WARN);
+ rq_dma_dir(req), DMA_ATTR_NO_WARN);
if (!nr_mapped)
goto out;
+ iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-
- if (ret != BLK_STS_OK)
- goto out_unmap;
-
- ret = BLK_STS_IOERR;
- if (blk_integrity_rq(req)) {
- if (blk_rq_count_integrity_sg(q, req->bio) != 1)
- goto out_unmap;
-
- sg_init_table(&iod->meta_sg, 1);
- if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
- goto out_unmap;
-
- if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
- goto out_unmap;
-
- cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
- }
-
- return BLK_STS_OK;
-
-out_unmap:
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
out:
+ if (ret != BLK_STS_OK)
+ nvme_unmap_data(dev, req);
return ret;
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
+ struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- if (iod->nents) {
- /* P2PDMA requests do not need to be unmapped */
- if (!is_pci_p2pdma_page(sg_page(iod->sg)))
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
-
- if (blk_integrity_rq(req))
- dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
- }
- nvme_cleanup_cmd(req);
- nvme_free_iod(dev, req);
+ iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
+ rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->meta_dma))
+ return BLK_STS_IOERR;
+ cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+ return 0;
}
/*
@@ -915,9 +891,14 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_queue *nvmeq = hctx->driver_data;
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_command cmnd;
blk_status_t ret;
+ iod->aborted = 0;
+ iod->npages = -1;
+ iod->nents = 0;
+
/*
* We should not need to do this, but we're still using this to
* ensure we can drain requests on a dying queue.
@@ -929,21 +910,23 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
- ret = nvme_init_iod(req, dev);
- if (ret)
- goto out_free_cmd;
-
if (blk_rq_nr_phys_segments(req)) {
ret = nvme_map_data(dev, req, &cmnd);
if (ret)
- goto out_cleanup_iod;
+ goto out_free_cmd;
+ }
+
+ if (blk_integrity_rq(req)) {
+ ret = nvme_map_metadata(dev, req, &cmnd);
+ if (ret)
+ goto out_unmap_data;
}
blk_mq_start_request(req);
nvme_submit_cmd(nvmeq, &cmnd, bd->last);
return BLK_STS_OK;
-out_cleanup_iod:
- nvme_free_iod(dev, req);
+out_unmap_data:
+ nvme_unmap_data(dev, req);
out_free_cmd:
nvme_cleanup_cmd(req);
return ret;
@@ -952,8 +935,14 @@ out_free_cmd:
static void nvme_pci_complete_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_dev *dev = iod->nvmeq->dev;
- nvme_unmap_data(iod->nvmeq->dev, req);
+ nvme_cleanup_cmd(req);
+ if (blk_integrity_rq(req))
+ dma_unmap_page(dev->dev, iod->meta_dma,
+ rq_integrity_vec(req)->bv_len, rq_data_dir(req));
+ if (blk_rq_nr_phys_segments(req))
+ nvme_unmap_data(dev, req);
nvme_complete_rq(req);
}
@@ -1088,7 +1077,7 @@ static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
* using the CQ lock. For normal interrupt driven threads we have
* to disable the interrupt to avoid racing with it.
*/
- if (nvmeq->cq_vector == -1) {
+ if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) {
spin_lock(&nvmeq->cq_poll_lock);
found = nvme_process_cq(nvmeq, &start, &end, tag);
spin_unlock(&nvmeq->cq_poll_lock);
@@ -1148,7 +1137,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
struct nvme_command c;
int flags = NVME_QUEUE_PHYS_CONTIG;
- if (vector != -1)
+ if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
flags |= NVME_CQ_IRQ_ENABLED;
/*
@@ -1161,10 +1150,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
c.create_cq.cqid = cpu_to_le16(qid);
c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
c.create_cq.cq_flags = cpu_to_le16(flags);
- if (vector != -1)
- c.create_cq.irq_vector = cpu_to_le16(vector);
- else
- c.create_cq.irq_vector = 0;
+ c.create_cq.irq_vector = cpu_to_le16(vector);
return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
}
@@ -1271,6 +1257,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd;
+ bool shutdown = false;
u32 csts = readl(dev->bar + NVME_REG_CSTS);
/* If PCI error recovery process is happening, we cannot reset or
@@ -1307,12 +1294,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* shutdown, so we return BLK_EH_DONE.
*/
switch (dev->ctrl.state) {
+ case NVME_CTRL_DELETING:
+ shutdown = true;
case NVME_CTRL_CONNECTING:
case NVME_CTRL_RESETTING:
dev_warn_ratelimited(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, shutdown);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
return BLK_EH_DONE;
default:
@@ -1371,16 +1360,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
- dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
if (!nvmeq->sq_cmds)
return;
if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
- pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
+ pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
} else {
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
}
}
@@ -1410,10 +1399,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->dev->online_queues--;
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
- if (nvmeq->cq_vector == -1)
- return 0;
- pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
- nvmeq->cq_vector = -1;
+ if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
+ pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
return 0;
}
@@ -1498,7 +1485,6 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
goto free_cqdma;
- nvmeq->q_dmadev = dev->dev;
nvmeq->dev = dev;
spin_lock_init(&nvmeq->sq_lock);
spin_lock_init(&nvmeq->cq_poll_lock);
@@ -1507,7 +1493,6 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->qid = qid;
- nvmeq->cq_vector = -1;
dev->ctrl.queue_count++;
return 0;
@@ -1552,7 +1537,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
{
struct nvme_dev *dev = nvmeq->dev;
int result;
- s16 vector;
+ u16 vector = 0;
clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
@@ -1563,7 +1548,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
if (!polled)
vector = dev->num_vecs == 1 ? 0 : qid;
else
- vector = -1;
+ set_bit(NVMEQ_POLLED, &nvmeq->flags);
result = adapter_alloc_cq(dev, qid, nvmeq, vector);
if (result)
@@ -1578,7 +1563,8 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
nvmeq->cq_vector = vector;
nvme_init_queue(nvmeq, qid);
- if (vector != -1) {
+ if (!polled) {
+ nvmeq->cq_vector = vector;
result = queue_request_irq(nvmeq);
if (result < 0)
goto release_sq;
@@ -1588,7 +1574,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
return result;
release_sq:
- nvmeq->cq_vector = -1;
dev->online_queues--;
adapter_delete_sq(dev, qid);
release_cq:
@@ -1639,7 +1624,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(dev->dev);
- dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);
+ dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev;
@@ -1730,7 +1715,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
nvme_init_queue(nvmeq, 0);
result = queue_request_irq(nvmeq);
if (result) {
- nvmeq->cq_vector = -1;
+ dev->online_queues--;
return result;
}
@@ -2171,10 +2156,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* number of interrupts.
*/
result = queue_request_irq(adminq);
- if (result) {
- adminq->cq_vector = -1;
+ if (result)
return result;
- }
set_bit(NVMEQ_ENABLED, &adminq->flags);
result = nvme_create_io_queues(dev);
@@ -2286,11 +2269,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth =
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
- if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
- dev->tagset.cmd_size = max(dev->tagset.cmd_size,
- nvme_pci_cmd_size(dev, true));
- }
+ dev->tagset.cmd_size = sizeof(struct nvme_iod);
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
@@ -2438,8 +2417,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* must flush all entered requests to their failed completion to avoid
* deadlocking blk-mq hot-cpu notifier.
*/
- if (shutdown)
+ if (shutdown) {
nvme_start_queues(&dev->ctrl);
+ if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
+ blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+ }
mutex_unlock(&dev->shutdown_lock);
}
@@ -2979,6 +2961,9 @@ static struct pci_driver nvme_driver = {
static int __init nvme_init(void)
{
+ BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
return pci_register_driver(&nvme_driver);
}
@@ -2987,7 +2972,6 @@ static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
flush_workqueue(nvme_wq);
- _nvme_check_size();
}
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 11a5ecae78c8..e1824c2e0a1c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -914,8 +914,9 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
{
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request,
- &ctrl->ctrl);
+ if (ctrl->ctrl.admin_tagset)
+ blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
+ nvme_cancel_request, &ctrl->ctrl);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
}
@@ -926,8 +927,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request,
- &ctrl->ctrl);
+ if (ctrl->ctrl.tagset)
+ blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
+ nvme_cancel_request, &ctrl->ctrl);
if (remove)
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 68c49dd67210..2b107a1d152b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -473,7 +473,6 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
}
return 0;
-
}
static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
@@ -634,7 +633,6 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
nvme_end_request(rq, cpu_to_le16(status << 1), res);
}
-
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
@@ -1425,7 +1423,8 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
if (!ret) {
set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
} else {
- __nvme_tcp_stop_queue(&ctrl->queues[idx]);
+ if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
+ __nvme_tcp_stop_queue(&ctrl->queues[idx]);
dev_err(nctrl->device,
"failed to connect queue: %d ret=%d\n", idx, ret);
}
@@ -1535,7 +1534,7 @@ out_free_queue:
return ret;
}
-static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
+static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
@@ -1565,7 +1564,7 @@ static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
return nr_io_queues;
}
-static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
+static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
unsigned int nr_io_queues;
int ret;
@@ -1582,7 +1581,7 @@ static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
dev_info(ctrl->device,
"creating %d I/O queues.\n", nr_io_queues);
- return nvme_tcp_alloc_io_queues(ctrl);
+ return __nvme_tcp_alloc_io_queues(ctrl);
}
static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
@@ -1599,7 +1598,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
int ret;
- ret = nvme_alloc_io_queues(ctrl);
+ ret = nvme_tcp_alloc_io_queues(ctrl);
if (ret)
return ret;
@@ -1710,7 +1709,9 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
{
blk_mq_quiesce_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
- blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl);
+ if (ctrl->admin_tagset)
+ blk_mq_tagset_busy_iter(ctrl->admin_tagset,
+ nvme_cancel_request, ctrl);
blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove);
}
@@ -1722,7 +1723,9 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
return;
nvme_stop_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
- blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl);
+ if (ctrl->tagset)
+ blk_mq_tagset_busy_iter(ctrl->tagset,
+ nvme_cancel_request, ctrl);
if (remove)
nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index d94f25cde019..3ef0a4e5eed6 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -3,6 +3,7 @@ config NVME_TARGET
tristate "NVMe Target support"
depends on BLOCK
depends on CONFIGFS_FS
+ select SGL_ALLOC
help
This enabled target side support for the NVMe protocol, that is
it allows the Linux kernel to implement NVMe subsystems and
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 76250181fee0..9f72d515fc4b 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len;
}
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
+{
+ return le64_to_cpu(cmd->get_log_page.lpo);
+}
+
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
{
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index adb79545cdd7..08dd5af357f7 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -898,8 +898,8 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
}
subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
- if (!subsys)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(subsys))
+ return ERR_CAST(subsys);
config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b3e765a95af8..7734a6acff85 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -8,6 +8,7 @@
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/pci-p2pdma.h>
+#include <linux/scatterlist.h>
#include "nvmet.h"
@@ -214,6 +215,8 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ctrl *ctrl;
+ lockdep_assert_held(&subsys->lock);
+
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
@@ -494,13 +497,14 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret;
mutex_lock(&subsys->lock);
- ret = -EMFILE;
- if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
- goto out_unlock;
ret = 0;
if (ns->enabled)
goto out_unlock;
+ ret = -EMFILE;
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
@@ -644,7 +648,7 @@ static void nvmet_update_sq_head(struct nvmet_req *req)
} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
old_sqhd);
}
- req->rsp->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
+ req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
}
static void nvmet_set_error(struct nvmet_req *req, u16 status)
@@ -653,7 +657,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
struct nvme_error_slot *new_error_slot;
unsigned long flags;
- req->rsp->status = cpu_to_le16(status << 1);
+ req->cqe->status = cpu_to_le16(status << 1);
if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
return;
@@ -673,15 +677,15 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
spin_unlock_irqrestore(&ctrl->error_lock, flags);
/* set the more bit for this request */
- req->rsp->status |= cpu_to_le16(1 << 14);
+ req->cqe->status |= cpu_to_le16(1 << 14);
}
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
- req->rsp->sq_id = cpu_to_le16(req->sq->qid);
- req->rsp->command_id = req->cmd->common.command_id;
+ req->cqe->sq_id = cpu_to_le16(req->sq->qid);
+ req->cqe->command_id = req->cmd->common.command_id;
if (unlikely(status))
nvmet_set_error(req, status);
@@ -838,8 +842,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg = NULL;
req->sg_cnt = 0;
req->transfer_len = 0;
- req->rsp->status = 0;
- req->rsp->sq_head = 0;
+ req->cqe->status = 0;
+ req->cqe->sq_head = 0;
req->ns = NULL;
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
@@ -1066,7 +1070,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
}
@@ -1087,7 +1091,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
out:
@@ -1185,7 +1189,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
goto out;
}
@@ -1194,7 +1198,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!nvmet_host_allowed(subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
hostnqn, subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
goto out_put_subsystem;
@@ -1364,7 +1368,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
if (!subsys)
- return NULL;
+ return ERR_PTR(-ENOMEM);
subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
/* generate a random serial number as our controllers are ephemeral: */
@@ -1380,14 +1384,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
default:
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
kfree(subsys);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
subsys->type = type;
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
GFP_KERNEL);
if (!subsys->subsysnqn) {
kfree(subsys);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
kref_init(&subsys->ref);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index c872b47a88f3..5baf269f3f8a 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -30,14 +30,17 @@ void nvmet_port_disc_changed(struct nvmet_port *port,
{
struct nvmet_ctrl *ctrl;
+ lockdep_assert_held(&nvmet_config_sem);
nvmet_genctr++;
+ mutex_lock(&nvmet_disc_subsys->lock);
list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
continue;
__nvmet_disc_changed(port, ctrl);
}
+ mutex_unlock(&nvmet_disc_subsys->lock);
}
static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
@@ -46,12 +49,14 @@ static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
{
struct nvmet_ctrl *ctrl;
+ mutex_lock(&nvmet_disc_subsys->lock);
list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
continue;
__nvmet_disc_changed(port, ctrl);
}
+ mutex_unlock(&nvmet_disc_subsys->lock);
}
void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
@@ -131,54 +136,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port
memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
}
+static size_t discovery_log_entries(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_subsys_link *p;
+ struct nvmet_port *r;
+ size_t entries = 0;
+
+ list_for_each_entry(p, &req->port->subsystems, entry) {
+ if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
+ continue;
+ entries++;
+ }
+ list_for_each_entry(r, &req->port->referrals, entry)
+ entries++;
+ return entries;
+}
+
static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
{
const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmf_disc_rsp_page_hdr *hdr;
+ u64 offset = nvmet_get_log_page_offset(req->cmd);
size_t data_len = nvmet_get_log_page_len(req->cmd);
- size_t alloc_len = max(data_len, sizeof(*hdr));
- int residual_len = data_len - sizeof(*hdr);
+ size_t alloc_len;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
u32 numrec = 0;
u16 status = 0;
+ void *buffer;
+
+ /* Spec requires dword aligned offsets */
+ if (offset & 0x3) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
/*
* Make sure we're passing at least a buffer of response header size.
* If host provided data len is less than the header size, only the
* number of bytes requested by host will be sent to host.
*/
- hdr = kzalloc(alloc_len, GFP_KERNEL);
- if (!hdr) {
+ down_read(&nvmet_config_sem);
+ alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
+ buffer = kzalloc(alloc_len, GFP_KERNEL);
+ if (!buffer) {
+ up_read(&nvmet_config_sem);
status = NVME_SC_INTERNAL;
goto out;
}
- down_read(&nvmet_config_sem);
+ hdr = buffer;
list_for_each_entry(p, &req->port->subsystems, entry) {
+ char traddr[NVMF_TRADDR_SIZE];
+
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
- if (residual_len >= entry_size) {
- char traddr[NVMF_TRADDR_SIZE];
-
- nvmet_set_disc_traddr(req, req->port, traddr);
- nvmet_format_discovery_entry(hdr, req->port,
- p->subsys->subsysnqn, traddr,
- NVME_NQN_NVME, numrec);
- residual_len -= entry_size;
- }
+
+ nvmet_set_disc_traddr(req, req->port, traddr);
+ nvmet_format_discovery_entry(hdr, req->port,
+ p->subsys->subsysnqn, traddr,
+ NVME_NQN_NVME, numrec);
numrec++;
}
list_for_each_entry(r, &req->port->referrals, entry) {
- if (residual_len >= entry_size) {
- nvmet_format_discovery_entry(hdr, r,
- NVME_DISC_SUBSYS_NAME,
- r->disc_addr.traddr,
- NVME_NQN_DISC, numrec);
- residual_len -= entry_size;
- }
+ nvmet_format_discovery_entry(hdr, r,
+ NVME_DISC_SUBSYS_NAME,
+ r->disc_addr.traddr,
+ NVME_NQN_DISC, numrec);
numrec++;
}
@@ -190,8 +217,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
up_read(&nvmet_config_sem);
- status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
- kfree(hdr);
+ status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
+ kfree(buffer);
out:
nvmet_req_complete(req, status);
}
@@ -350,8 +377,8 @@ int __init nvmet_init_discovery(void)
{
nvmet_disc_subsys =
nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
- if (!nvmet_disc_subsys)
- return -ENOMEM;
+ if (IS_ERR(nvmet_disc_subsys))
+ return PTR_ERR(nvmet_disc_subsys);
return 0;
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 3a76ebc3d155..3b9f79aba98f 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -72,7 +72,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
offsetof(struct nvmf_property_get_command, attrib);
}
- req->rsp->result.u64 = cpu_to_le64(val);
+ req->cqe->result.u64 = cpu_to_le64(val);
nvmet_req_complete(req, status);
}
@@ -124,7 +124,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
req->sq->sqhd_disabled = true;
- req->rsp->sq_head = cpu_to_le16(0xffff);
+ req->cqe->sq_head = cpu_to_le16(0xffff);
}
if (ctrl->ops->install_queue) {
@@ -158,7 +158,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
/* zero out initial completion result, assign values as needed */
- req->rsp->result.u32 = 0;
+ req->cqe->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
@@ -172,7 +172,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
goto out;
}
@@ -195,7 +195,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_info("creating controller %d for subsystem %s for NQN %s.\n",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
- req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
+ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
out:
kfree(d);
@@ -222,7 +222,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out;
/* zero out initial completion result, assign values as needed */
- req->rsp->result.u32 = 0;
+ req->cqe->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
@@ -240,14 +240,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
goto out_ctrl_put;
}
status = nvmet_install_queue(ctrl, req);
if (status) {
/* pass back cntlid that had the issue of installing queue */
- req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
+ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
goto out_ctrl_put;
}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 98b7b1f4ee96..508661af0f50 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -128,12 +128,12 @@ struct nvmet_fc_tgt_queue {
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
struct nvmet_fc_tgt_assoc *assoc;
- struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
struct list_head fod_list;
struct list_head pending_cmd_list;
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
+ struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
struct nvmet_fc_tgt_assoc {
@@ -588,9 +588,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (qid > NVMET_NR_QUEUES)
return NULL;
- queue = kzalloc((sizeof(*queue) +
- (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
- GFP_KERNEL);
+ queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
if (!queue)
return NULL;
@@ -603,7 +601,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue->work_q)
goto out_a_put;
- queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
queue->qid = qid;
queue->sqsize = sqsize;
queue->assoc = assoc;
@@ -2187,7 +2184,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
}
fod->req.cmd = &fod->cmdiubuf.sqe;
- fod->req.rsp = &fod->rspiubuf.cqe;
+ fod->req.cqe = &fod->rspiubuf.cqe;
fod->req.port = tgtport->pe->port;
/* clear any response payload */
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index a065dbfc43b1..3efc52f9c309 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -196,7 +196,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
GFP_KERNEL, 0, bio);
if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range->slba);
- return blk_to_nvme_status(req, errno_to_blk_status(ret));
+ return errno_to_nvme_status(req, ret);
}
return NVME_SC_SUCCESS;
}
@@ -252,7 +252,6 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
{
struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
struct bio *bio = NULL;
- u16 status = NVME_SC_SUCCESS;
sector_t sector;
sector_t nr_sector;
int ret;
@@ -264,13 +263,12 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
GFP_KERNEL, &bio, 0);
- status = blk_to_nvme_status(req, errno_to_blk_status(ret));
if (bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
submit_bio(bio);
} else {
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
}
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index bc6ebb51b0bf..05453f5d1448 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -49,7 +49,12 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
goto err;
ns->size = stat.size;
- ns->blksize_shift = file_inode(ns->file)->i_blkbits;
+ /*
+ * i_blkbits can be greater than the universally accepted upper bound,
+ * so make sure we export a sane namespace lba_shift.
+ */
+ ns->blksize_shift = min_t(u8,
+ file_inode(ns->file)->i_blkbits, 12);
ns->bvec_cache = kmem_cache_create("nvmet-bvec",
NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index b9f623ab01f3..9e211ad6bdd3 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -18,7 +18,7 @@
struct nvme_loop_iod {
struct nvme_request nvme_req;
struct nvme_command cmd;
- struct nvme_completion rsp;
+ struct nvme_completion cqe;
struct nvmet_req req;
struct nvme_loop_queue *queue;
struct work_struct work;
@@ -94,7 +94,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
{
struct nvme_loop_queue *queue =
container_of(req->sq, struct nvme_loop_queue, nvme_sq);
- struct nvme_completion *cqe = req->rsp;
+ struct nvme_completion *cqe = req->cqe;
/*
* AEN requests are special as they don't time out and can
@@ -129,20 +129,6 @@ static void nvme_loop_execute_work(struct work_struct *work)
nvmet_req_execute(&iod->req);
}
-static enum blk_eh_timer_return
-nvme_loop_timeout(struct request *rq, bool reserved)
-{
- struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
-
- /* queue error recovery */
- nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
-
- /* fail with DNR on admin cmd timeout */
- nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
-
- return BLK_EH_DONE;
-}
-
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -207,7 +193,7 @@ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
struct nvme_loop_iod *iod, unsigned int queue_idx)
{
iod->req.cmd = &iod->cmd;
- iod->req.rsp = &iod->rsp;
+ iod->req.cqe = &iod->cqe;
iod->queue = &ctrl->queues[queue_idx];
INIT_WORK(&iod->work, nvme_loop_execute_work);
return 0;
@@ -253,7 +239,6 @@ static const struct blk_mq_ops nvme_loop_mq_ops = {
.complete = nvme_loop_complete_rq,
.init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_hctx,
- .timeout = nvme_loop_timeout,
};
static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
@@ -261,7 +246,6 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
.complete = nvme_loop_complete_rq,
.init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_admin_hctx,
- .timeout = nvme_loop_timeout,
};
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 51e49efd7849..c25d88fc9dec 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -284,7 +284,7 @@ struct nvmet_fabrics_ops {
struct nvmet_req {
struct nvme_command *cmd;
- struct nvme_completion *rsp;
+ struct nvme_completion *cqe;
struct nvmet_sq *sq;
struct nvmet_cq *cq;
struct nvmet_ns *ns;
@@ -322,7 +322,7 @@ extern struct workqueue_struct *buffered_io_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
- req->rsp->result.u32 = cpu_to_le32(result);
+ req->cqe->result.u32 = cpu_to_le32(result);
}
/*
@@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
extern struct list_head *nvmet_ports;
void nvmet_port_disc_changed(struct nvmet_port *port,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ef893addf341..36d906a7f70d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -160,7 +160,7 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
{
return !nvme_is_write(rsp->req.cmd) &&
rsp->req.transfer_len &&
- !rsp->req.rsp->status &&
+ !rsp->req.cqe->status &&
!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
}
@@ -364,16 +364,17 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r)
{
/* NVMe CQE / RDMA SEND */
- r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
- if (!r->req.rsp)
+ r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
+ if (!r->req.cqe)
goto out;
- r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
- sizeof(*r->req.rsp), DMA_TO_DEVICE);
+ r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
+ sizeof(*r->req.cqe), DMA_TO_DEVICE);
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp;
- r->send_sge.length = sizeof(*r->req.rsp);
+ r->req.p2p_client = &ndev->device->dev;
+ r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey;
r->send_cqe.done = nvmet_rdma_send_done;
@@ -388,7 +389,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
return 0;
out_free_rsp:
- kfree(r->req.rsp);
+ kfree(r->req.cqe);
out:
return -ENOMEM;
}
@@ -397,8 +398,8 @@ static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r)
{
ib_dma_unmap_single(ndev->device, r->send_sge.addr,
- sizeof(*r->req.rsp), DMA_TO_DEVICE);
- kfree(r->req.rsp);
+ sizeof(*r->req.cqe), DMA_TO_DEVICE);
+ kfree(r->req.cqe);
}
static int
@@ -763,8 +764,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->send_sge.addr, cmd->send_sge.length,
DMA_TO_DEVICE);
- cmd->req.p2p_client = &queue->dev->device->dev;
-
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index ad0df786fe93..69b83fa0c76c 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -161,14 +161,14 @@ static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
{
- return nvmet_tcp_has_data_in(cmd) && !cmd->req.rsp->status;
+ return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
}
static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
{
return !nvme_is_write(cmd->req.cmd) &&
cmd->req.transfer_len > 0 &&
- !cmd->req.rsp->status;
+ !cmd->req.cqe->status;
}
static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
@@ -371,13 +371,14 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
cmd->state = NVMET_TCP_SEND_DATA_PDU;
pdu->hdr.type = nvme_tcp_c2h_data;
- pdu->hdr.flags = NVME_TCP_F_DATA_LAST;
+ pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
+ NVME_TCP_F_DATA_SUCCESS : 0);
pdu->hdr.hlen = sizeof(*pdu);
pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
pdu->hdr.plen =
cpu_to_le32(pdu->hdr.hlen + hdgst +
cmd->req.transfer_len + ddgst);
- pdu->command_id = cmd->req.rsp->command_id;
+ pdu->command_id = cmd->req.cqe->command_id;
pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
@@ -542,8 +543,19 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
cmd->state = NVMET_TCP_SEND_DDGST;
cmd->offset = 0;
} else {
- nvmet_setup_response_pdu(cmd);
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
+ }
+
+ if (queue->nvme_sq.sqhd_disabled) {
+ kfree(cmd->iov);
+ sgl_free(cmd->req.sg);
}
+
return 1;
}
@@ -619,7 +631,13 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
return ret;
cmd->offset += ret;
- nvmet_setup_response_pdu(cmd);
+
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
return 1;
}
@@ -756,12 +774,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
return -EPROTO;
}
- if (icreq->maxr2t != 0) {
- pr_err("queue %d: unsupported maxr2t %d\n", queue->idx,
- le32_to_cpu(icreq->maxr2t) + 1);
- return -EPROTO;
- }
-
queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
if (queue->hdr_digest || queue->data_digest) {
@@ -1206,7 +1218,7 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
if (!c->rsp_pdu)
goto out_free_cmd;
- c->req.rsp = &c->rsp_pdu->cqe;
+ c->req.cqe = &c->rsp_pdu->cqe;
c->data_pdu = page_frag_alloc(&queue->pf_cache,
sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 530d570724c9..6b2c4254c2fb 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -13,6 +13,16 @@ menuconfig NVMEM
if NVMEM
+config NVMEM_SYSFS
+ bool "/sys/bus/nvmem/devices/*/nvmem (sysfs interface)"
+ depends on SYSFS
+ default y
+ help
+ Say Y here to add a sysfs interface for NVMEM.
+
+ This interface is mostly used by userspace applications to
+ read/write directly into nvmem.
+
config NVMEM_IMX_IIM
tristate "i.MX IC Identification Module support"
depends on ARCH_MXC || COMPILE_TEST
@@ -25,8 +35,8 @@ config NVMEM_IMX_IIM
will be called nvmem-imx-iim.
config NVMEM_IMX_OCOTP
- tristate "i.MX6 On-Chip OTP Controller support"
- depends on SOC_IMX6 || SOC_IMX7D || COMPILE_TEST
+ tristate "i.MX 6/7/8 On-Chip OTP Controller support"
+ depends on ARCH_MXC || COMPILE_TEST
depends on HAS_IOMEM
help
This is a driver for the On-Chip OTP Controller (OCOTP) available on
@@ -113,6 +123,16 @@ config NVMEM_BCM_OCOTP
This driver can also be built as a module. If so, the module
will be called nvmem-bcm-ocotp.
+config NVMEM_STM32_ROMEM
+ tristate "STMicroelectronics STM32 factory-programmed memory support"
+ depends on ARCH_STM32 || COMPILE_TEST
+ help
+ Say y here to enable read-only access for STMicroelectronics STM32
+ factory-programmed memory area.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-stm32-romem.
+
config NVMEM_SUNXI_SID
tristate "Allwinner SoCs SID support"
depends on ARCH_SUNXI
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 2ece8ffffdda..c1fe4768dfef 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -6,6 +6,9 @@
obj-$(CONFIG_NVMEM) += nvmem_core.o
nvmem_core-y := core.o
+obj-$(CONFIG_NVMEM_SYSFS) += nvmem_sysfs.o
+nvmem_sysfs-y := nvmem-sysfs.o
+
# Devices
obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
nvmem-bcm-ocotp-y := bcm-ocotp.o
@@ -26,6 +29,8 @@ nvmem_qfprom-y := qfprom.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
+nvmem_stm32_romem-y := stm32-romem.o
+obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
nvmem_sunxi_sid-y := sunxi_sid.o
obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
nvmem-uniphier-efuse-y := uniphier-efuse.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index f24008b66826..c7892c3da91f 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -17,27 +17,7 @@
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/slab.h>
-
-struct nvmem_device {
- struct module *owner;
- struct device dev;
- int stride;
- int word_size;
- int id;
- struct kref refcnt;
- size_t size;
- bool read_only;
- int flags;
- enum nvmem_type type;
- struct bin_attribute eeprom;
- struct device *base_dev;
- struct list_head cells;
- nvmem_reg_read_t reg_read;
- nvmem_reg_write_t reg_write;
- void *priv;
-};
-
-#define FLAG_COMPAT BIT(0)
+#include "nvmem.h"
struct nvmem_cell {
const char *name;
@@ -61,18 +41,7 @@ static LIST_HEAD(nvmem_lookup_list);
static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
-static const char * const nvmem_type_str[] = {
- [NVMEM_TYPE_UNKNOWN] = "Unknown",
- [NVMEM_TYPE_EEPROM] = "EEPROM",
- [NVMEM_TYPE_OTP] = "OTP",
- [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
-};
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct lock_class_key eeprom_lock_key;
-#endif
-
-#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
@@ -91,187 +60,6 @@ static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
return -EINVAL;
}
-static ssize_t type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvmem_device *nvmem = to_nvmem_device(dev);
-
- return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
-}
-
-static DEVICE_ATTR_RO(type);
-
-static struct attribute *nvmem_attrs[] = {
- &dev_attr_type.attr,
- NULL,
-};
-
-static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t pos, size_t count)
-{
- struct device *dev;
- struct nvmem_device *nvmem;
- int rc;
-
- if (attr->private)
- dev = attr->private;
- else
- dev = container_of(kobj, struct device, kobj);
- nvmem = to_nvmem_device(dev);
-
- /* Stop the user from reading */
- if (pos >= nvmem->size)
- return 0;
-
- if (count < nvmem->word_size)
- return -EINVAL;
-
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
- count = round_down(count, nvmem->word_size);
-
- rc = nvmem_reg_read(nvmem, pos, buf, count);
-
- if (rc)
- return rc;
-
- return count;
-}
-
-static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t pos, size_t count)
-{
- struct device *dev;
- struct nvmem_device *nvmem;
- int rc;
-
- if (attr->private)
- dev = attr->private;
- else
- dev = container_of(kobj, struct device, kobj);
- nvmem = to_nvmem_device(dev);
-
- /* Stop the user from writing */
- if (pos >= nvmem->size)
- return -EFBIG;
-
- if (count < nvmem->word_size)
- return -EINVAL;
-
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
- count = round_down(count, nvmem->word_size);
-
- rc = nvmem_reg_write(nvmem, pos, buf, count);
-
- if (rc)
- return rc;
-
- return count;
-}
-
-/* default read/write permissions */
-static struct bin_attribute bin_attr_rw_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0644,
- },
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
-};
-
-static struct bin_attribute *nvmem_bin_rw_attributes[] = {
- &bin_attr_rw_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_rw_group = {
- .bin_attrs = nvmem_bin_rw_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_rw_dev_groups[] = {
- &nvmem_bin_rw_group,
- NULL,
-};
-
-/* read only permission */
-static struct bin_attribute bin_attr_ro_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0444,
- },
- .read = bin_attr_nvmem_read,
-};
-
-static struct bin_attribute *nvmem_bin_ro_attributes[] = {
- &bin_attr_ro_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_ro_group = {
- .bin_attrs = nvmem_bin_ro_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_ro_dev_groups[] = {
- &nvmem_bin_ro_group,
- NULL,
-};
-
-/* default read/write permissions, root only */
-static struct bin_attribute bin_attr_rw_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0600,
- },
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
-};
-
-static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
- &bin_attr_rw_root_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_rw_root_group = {
- .bin_attrs = nvmem_bin_rw_root_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
- &nvmem_bin_rw_root_group,
- NULL,
-};
-
-/* read only permission, root only */
-static struct bin_attribute bin_attr_ro_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0400,
- },
- .read = bin_attr_nvmem_read,
-};
-
-static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
- &bin_attr_ro_root_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_ro_root_group = {
- .bin_attrs = nvmem_bin_ro_root_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
- &nvmem_bin_ro_root_group,
- NULL,
-};
-
static void nvmem_release(struct device *dev)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
@@ -422,43 +210,6 @@ err:
return rval;
}
-/*
- * nvmem_setup_compat() - Create an additional binary entry in
- * drivers sys directory, to be backwards compatible with the older
- * drivers/misc/eeprom drivers.
- */
-static int nvmem_setup_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- int rval;
-
- if (!config->base_dev)
- return -EINVAL;
-
- if (nvmem->read_only)
- nvmem->eeprom = bin_attr_ro_root_nvmem;
- else
- nvmem->eeprom = bin_attr_rw_root_nvmem;
- nvmem->eeprom.attr.name = "eeprom";
- nvmem->eeprom.size = nvmem->size;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- nvmem->eeprom.attr.key = &eeprom_lock_key;
-#endif
- nvmem->eeprom.private = &nvmem->dev;
- nvmem->base_dev = config->base_dev;
-
- rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
- if (rval) {
- dev_err(&nvmem->dev,
- "Failed to create eeprom binary file %d\n", rval);
- return rval;
- }
-
- nvmem->flags |= FLAG_COMPAT;
-
- return 0;
-}
-
/**
* nvmem_register_notifier() - Register a notifier block for nvmem events.
*
@@ -651,14 +402,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
- if (config->root_only)
- nvmem->dev.groups = nvmem->read_only ?
- nvmem_ro_root_dev_groups :
- nvmem_rw_root_dev_groups;
- else
- nvmem->dev.groups = nvmem->read_only ?
- nvmem_ro_dev_groups :
- nvmem_rw_dev_groups;
+ nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
device_initialize(&nvmem->dev);
@@ -669,7 +413,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
goto err_put_device;
if (config->compat) {
- rval = nvmem_setup_compat(nvmem, config);
+ rval = nvmem_sysfs_setup_compat(nvmem, config);
if (rval)
goto err_device_del;
}
@@ -696,7 +440,7 @@ err_remove_cells:
nvmem_device_remove_all_cells(nvmem);
err_teardown_compat:
if (config->compat)
- device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
+ nvmem_sysfs_remove_compat(nvmem, config);
err_device_del:
device_del(&nvmem->dev);
err_put_device:
@@ -1166,7 +910,7 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put);
static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
{
u8 *p, *b;
- int i, bit_offset = cell->bit_offset;
+ int i, extra, bit_offset = cell->bit_offset;
p = b = buf;
if (bit_offset) {
@@ -1181,11 +925,16 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
p = b;
*b++ >>= bit_offset;
}
-
- /* result fits in less bytes */
- if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
- *p-- = 0;
+ } else {
+ /* point to the msb */
+ p += cell->bytes - 1;
}
+
+ /* result fits in less bytes */
+ extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
+ while (--extra >= 0)
+ *p-- = 0;
+
/* clear msb bits if any leftover in the last byte */
*p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
}
@@ -1335,6 +1084,43 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
EXPORT_SYMBOL_GPL(nvmem_cell_write);
/**
+ * nvmem_cell_read_u16() - Read a cell value as an u16
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
+{
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+
+ cell = nvmem_cell_get(dev, cell_id);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf)) {
+ nvmem_cell_put(cell);
+ return PTR_ERR(buf);
+ }
+ if (len != sizeof(*val)) {
+ kfree(buf);
+ nvmem_cell_put(cell);
+ return -EINVAL;
+ }
+ memcpy(val, buf, sizeof(*val));
+ kfree(buf);
+ nvmem_cell_put(cell);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
+
+/**
* nvmem_cell_read_u32() - Read a cell value as an u32
*
* @dev: Device that requests the nvmem cell.
diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c
index 6651e4cdc002..34582293b985 100644
--- a/drivers/nvmem/imx-iim.c
+++ b/drivers/nvmem/imx-iim.c
@@ -104,7 +104,6 @@ static int imx_iim_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
- struct resource *res;
struct iim_priv *iim;
struct nvmem_device *nvmem;
struct nvmem_config cfg = {};
@@ -114,8 +113,7 @@ static int imx_iim_probe(struct platform_device *pdev)
if (!iim)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iim->base = devm_ioremap_resource(dev, res);
+ iim->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iim->base))
return PTR_ERR(iim->base);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 08a9b1ef8ae4..4cf7b61e4bf5 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -444,6 +444,12 @@ static const struct ocotp_params imx7ulp_params = {
.bank_address_words = 0,
};
+static const struct ocotp_params imx8mq_params = {
+ .nregs = 256,
+ .bank_address_words = 4,
+ .set_timing = imx_ocotp_set_imx7_timing,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
@@ -453,6 +459,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
{ .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
{ .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
+ { .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
@@ -460,7 +467,6 @@ MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
static int imx_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct ocotp_priv *priv;
struct nvmem_device *nvmem;
@@ -470,8 +476,7 @@ static int imx_ocotp_probe(struct platform_device *pdev)
priv->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 53122f59c4b2..fbb7db6ee1f5 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -145,7 +145,6 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct mxs_data *data;
struct mxs_ocotp *otp;
- struct resource *res;
const struct of_device_id *match;
int ret;
@@ -157,8 +156,7 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
if (!otp)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- otp->base = devm_ioremap_resource(dev, res);
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(otp->base))
return PTR_ERR(otp->base);
diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
new file mode 100644
index 000000000000..6f303b91f6e7
--- /dev/null
+++ b/drivers/nvmem/nvmem-sysfs.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Limited
+ */
+#include "nvmem.h"
+
+static const char * const nvmem_type_str[] = {
+ [NVMEM_TYPE_UNKNOWN] = "Unknown",
+ [NVMEM_TYPE_EEPROM] = "EEPROM",
+ [NVMEM_TYPE_OTP] = "OTP",
+ [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key eeprom_lock_key;
+#endif
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *nvmem_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+
+static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ int rc;
+
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = container_of(kobj, struct device, kobj);
+ nvmem = to_nvmem_device(dev);
+
+ /* Stop the user from reading */
+ if (pos >= nvmem->size)
+ return 0;
+
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ rc = nvmem->reg_read(nvmem->priv, pos, buf, count);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ int rc;
+
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = container_of(kobj, struct device, kobj);
+ nvmem = to_nvmem_device(dev);
+
+ /* Stop the user from writing */
+ if (pos >= nvmem->size)
+ return -EFBIG;
+
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ rc = nvmem->reg_write(nvmem->priv, pos, buf, count);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+/* default read/write permissions */
+static struct bin_attribute bin_attr_rw_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0644,
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_rw_attributes[] = {
+ &bin_attr_rw_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_rw_group = {
+ .bin_attrs = nvmem_bin_rw_attributes,
+ .attrs = nvmem_attrs,
+};
+
+static const struct attribute_group *nvmem_rw_dev_groups[] = {
+ &nvmem_bin_rw_group,
+ NULL,
+};
+
+/* read only permission */
+static struct bin_attribute bin_attr_ro_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0444,
+ },
+ .read = bin_attr_nvmem_read,
+};
+
+static struct bin_attribute *nvmem_bin_ro_attributes[] = {
+ &bin_attr_ro_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_ro_group = {
+ .bin_attrs = nvmem_bin_ro_attributes,
+ .attrs = nvmem_attrs,
+};
+
+static const struct attribute_group *nvmem_ro_dev_groups[] = {
+ &nvmem_bin_ro_group,
+ NULL,
+};
+
+/* default read/write permissions, root only */
+static struct bin_attribute bin_attr_rw_root_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0600,
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
+ &bin_attr_rw_root_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_rw_root_group = {
+ .bin_attrs = nvmem_bin_rw_root_attributes,
+ .attrs = nvmem_attrs,
+};
+
+static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
+ &nvmem_bin_rw_root_group,
+ NULL,
+};
+
+/* read only permission, root only */
+static struct bin_attribute bin_attr_ro_root_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0400,
+ },
+ .read = bin_attr_nvmem_read,
+};
+
+static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
+ &bin_attr_ro_root_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_ro_root_group = {
+ .bin_attrs = nvmem_bin_ro_root_attributes,
+ .attrs = nvmem_attrs,
+};
+
+static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
+ &nvmem_bin_ro_root_group,
+ NULL,
+};
+
+const struct attribute_group **nvmem_sysfs_get_groups(
+ struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ if (config->root_only)
+ return nvmem->read_only ?
+ nvmem_ro_root_dev_groups :
+ nvmem_rw_root_dev_groups;
+
+ return nvmem->read_only ? nvmem_ro_dev_groups : nvmem_rw_dev_groups;
+}
+
+/*
+ * nvmem_setup_compat() - Create an additional binary entry in
+ * drivers sys directory, to be backwards compatible with the older
+ * drivers/misc/eeprom drivers.
+ */
+int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ int rval;
+
+ if (!config->compat)
+ return 0;
+
+ if (!config->base_dev)
+ return -EINVAL;
+
+ if (nvmem->read_only)
+ nvmem->eeprom = bin_attr_ro_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_rw_root_nvmem;
+ nvmem->eeprom.attr.name = "eeprom";
+ nvmem->eeprom.size = nvmem->size;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ nvmem->eeprom.attr.key = &eeprom_lock_key;
+#endif
+ nvmem->eeprom.private = &nvmem->dev;
+ nvmem->base_dev = config->base_dev;
+
+ rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
+ if (rval) {
+ dev_err(&nvmem->dev,
+ "Failed to create eeprom binary file %d\n", rval);
+ return rval;
+ }
+
+ nvmem->flags |= FLAG_COMPAT;
+
+ return 0;
+}
+
+void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ if (config->compat)
+ device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
+}
diff --git a/drivers/nvmem/nvmem.h b/drivers/nvmem/nvmem.h
new file mode 100644
index 000000000000..eb8ed7121fa3
--- /dev/null
+++ b/drivers/nvmem/nvmem.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DRIVERS_NVMEM_H
+#define _DRIVERS_NVMEM_H
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+
+struct nvmem_device {
+ struct module *owner;
+ struct device dev;
+ int stride;
+ int word_size;
+ int id;
+ struct kref refcnt;
+ size_t size;
+ bool read_only;
+ int flags;
+ enum nvmem_type type;
+ struct bin_attribute eeprom;
+ struct device *base_dev;
+ struct list_head cells;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ void *priv;
+};
+
+#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
+#define FLAG_COMPAT BIT(0)
+
+#ifdef CONFIG_NVMEM_SYSFS
+const struct attribute_group **nvmem_sysfs_get_groups(
+ struct nvmem_device *nvmem,
+ const struct nvmem_config *config);
+int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config);
+void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config);
+#else
+static inline const struct attribute_group **nvmem_sysfs_get_groups(
+ struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ return NULL;
+}
+
+static inline int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ return -ENOSYS;
+}
+static inline void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+}
+#endif /* CONFIG_NVMEM_SYSFS */
+
+#endif /* _DRIVERS_NVMEM_H */
diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c
new file mode 100644
index 000000000000..354be526897f
--- /dev/null
+++ b/drivers/nvmem/stm32-romem.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STM32 Factory-programmed memory read access driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com> for STMicroelectronics.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+
+/* BSEC secure service access from non-secure */
+#define STM32_SMC_BSEC 0x82001003
+#define STM32_SMC_READ_SHADOW 0x01
+#define STM32_SMC_PROG_OTP 0x02
+#define STM32_SMC_WRITE_SHADOW 0x03
+#define STM32_SMC_READ_OTP 0x04
+
+/* shadow registers offest */
+#define STM32MP15_BSEC_DATA0 0x200
+
+/* 32 (x 32-bits) lower shadow registers */
+#define STM32MP15_BSEC_NUM_LOWER 32
+
+struct stm32_romem_cfg {
+ int size;
+};
+
+struct stm32_romem_priv {
+ void __iomem *base;
+ struct nvmem_config cfg;
+};
+
+static int stm32_romem_read(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+ u8 *buf8 = buf;
+ int i;
+
+ for (i = offset; i < offset + bytes; i++)
+ *buf8++ = readb_relaxed(priv->base + i);
+
+ return 0;
+}
+
+static int stm32_bsec_smc(u8 op, u32 otp, u32 data, u32 *result)
+{
+#if IS_ENABLED(CONFIG_HAVE_ARM_SMCCC)
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(STM32_SMC_BSEC, op, otp, data, 0, 0, 0, 0, &res);
+ if (res.a0)
+ return -EIO;
+
+ if (result)
+ *result = (u32)res.a1;
+
+ return 0;
+#else
+ return -ENXIO;
+#endif
+}
+
+static int stm32_bsec_read(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+ struct device *dev = priv->cfg.dev;
+ u32 roffset, rbytes, val;
+ u8 *buf8 = buf, *val8 = (u8 *)&val;
+ int i, j = 0, ret, skip_bytes, size;
+
+ /* Round unaligned access to 32-bits */
+ roffset = rounddown(offset, 4);
+ skip_bytes = offset & 0x3;
+ rbytes = roundup(bytes + skip_bytes, 4);
+
+ if (roffset + rbytes > priv->cfg.size)
+ return -EINVAL;
+
+ for (i = roffset; (i < roffset + rbytes); i += 4) {
+ u32 otp = i >> 2;
+
+ if (otp < STM32MP15_BSEC_NUM_LOWER) {
+ /* read lower data from shadow registers */
+ val = readl_relaxed(
+ priv->base + STM32MP15_BSEC_DATA0 + i);
+ } else {
+ ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, otp, 0,
+ &val);
+ if (ret) {
+ dev_err(dev, "Can't read data%d (%d)\n", otp,
+ ret);
+ return ret;
+ }
+ }
+ /* skip first bytes in case of unaligned read */
+ if (skip_bytes)
+ size = min(bytes, (size_t)(4 - skip_bytes));
+ else
+ size = min(bytes, (size_t)4);
+ memcpy(&buf8[j], &val8[skip_bytes], size);
+ bytes -= size;
+ j += size;
+ skip_bytes = 0;
+ }
+
+ return 0;
+}
+
+static int stm32_bsec_write(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+ struct device *dev = priv->cfg.dev;
+ u32 *buf32 = buf;
+ int ret, i;
+
+ /* Allow only writing complete 32-bits aligned words */
+ if ((bytes % 4) || (offset % 4))
+ return -EINVAL;
+
+ for (i = offset; i < offset + bytes; i += 4) {
+ ret = stm32_bsec_smc(STM32_SMC_PROG_OTP, i >> 2, *buf32++,
+ NULL);
+ if (ret) {
+ dev_err(dev, "Can't write data%d (%d)\n", i >> 2, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int stm32_romem_probe(struct platform_device *pdev)
+{
+ const struct stm32_romem_cfg *cfg;
+ struct device *dev = &pdev->dev;
+ struct stm32_romem_priv *priv;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->cfg.name = "stm32-romem";
+ priv->cfg.word_size = 1;
+ priv->cfg.stride = 1;
+ priv->cfg.dev = dev;
+ priv->cfg.priv = priv;
+ priv->cfg.owner = THIS_MODULE;
+
+ cfg = (const struct stm32_romem_cfg *)
+ of_match_device(dev->driver->of_match_table, dev)->data;
+ if (!cfg) {
+ priv->cfg.read_only = true;
+ priv->cfg.size = resource_size(res);
+ priv->cfg.reg_read = stm32_romem_read;
+ } else {
+ priv->cfg.size = cfg->size;
+ priv->cfg.reg_read = stm32_bsec_read;
+ priv->cfg.reg_write = stm32_bsec_write;
+ }
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &priv->cfg));
+}
+
+static const struct stm32_romem_cfg stm32mp15_bsec_cfg = {
+ .size = 384, /* 96 x 32-bits data words */
+};
+
+static const struct of_device_id stm32_romem_of_match[] = {
+ { .compatible = "st,stm32f4-otp", }, {
+ .compatible = "st,stm32mp15-bsec",
+ .data = (void *)&stm32mp15_bsec_cfg,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, stm32_romem_of_match);
+
+static struct platform_driver stm32_romem_driver = {
+ .probe = stm32_romem_probe,
+ .driver = {
+ .name = "stm32-romem",
+ .of_match_table = of_match_ptr(stm32_romem_of_match),
+ },
+};
+module_platform_driver(stm32_romem_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 RO-MEM");
+MODULE_ALIAS("platform:nvmem-stm32-romem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 570a2e354f30..a079a80ddf2c 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Allwinner sunXi SoCs Security ID support.
*
* Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
* Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
@@ -35,13 +26,6 @@
#define SUN8I_SID_OP_LOCK (0xAC << 8)
#define SUN8I_SID_READ BIT(1)
-static struct nvmem_config econfig = {
- .name = "sunxi-sid",
- .read_only = true,
- .stride = 4,
- .word_size = 1,
-};
-
struct sunxi_sid_cfg {
u32 value_offset;
u32 size;
@@ -53,33 +37,12 @@ struct sunxi_sid {
u32 value_offset;
};
-/* We read the entire key, due to a 32 bit read alignment requirement. Since we
- * want to return the requested byte, this results in somewhat slower code and
- * uses 4 times more reads as needed but keeps code simpler. Since the SID is
- * only very rarely probed, this is not really an issue.
- */
-static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
- const unsigned int offset)
-{
- u32 sid_key;
-
- sid_key = ioread32be(sid->base + round_down(offset, 4));
- sid_key >>= (offset % 4) * 8;
-
- return sid_key; /* Only return the last byte */
-}
-
static int sunxi_sid_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct sunxi_sid *sid = context;
- u8 *buf = val;
- /* Offset the read operation to the real position of SID */
- offset += sid->value_offset;
-
- while (bytes--)
- *buf++ = sunxi_sid_read_byte(sid, offset++);
+ memcpy_fromio(val, sid->base + sid->value_offset + offset, bytes);
return 0;
}
@@ -115,36 +78,34 @@ static int sun8i_sid_register_readout(const struct sunxi_sid *sid,
* to be not reliable at all.
* Read by the registers instead.
*/
-static int sun8i_sid_read_byte_by_reg(const struct sunxi_sid *sid,
- const unsigned int offset,
- u8 *out)
-{
- u32 word;
- int ret;
-
- ret = sun8i_sid_register_readout(sid, offset & ~0x03, &word);
-
- if (ret)
- return ret;
-
- *out = (word >> ((offset & 0x3) * 8)) & 0xff;
-
- return 0;
-}
-
static int sun8i_sid_read_by_reg(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct sunxi_sid *sid = context;
- u8 *buf = val;
+ u32 word;
int ret;
- while (bytes--) {
- ret = sun8i_sid_read_byte_by_reg(sid, offset++, buf++);
+ /* .stride = 4 so offset is guaranteed to be aligned */
+ while (bytes >= 4) {
+ ret = sun8i_sid_register_readout(sid, offset, val);
if (ret)
return ret;
+
+ val += 4;
+ offset += 4;
+ bytes -= 4;
}
+ if (!bytes)
+ return 0;
+
+ /* Handle any trailing bytes */
+ ret = sun8i_sid_register_readout(sid, offset, &word);
+ if (ret)
+ return ret;
+
+ memcpy(val, &word, bytes);
+
return 0;
}
@@ -152,9 +113,10 @@ static int sunxi_sid_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
+ struct nvmem_config *nvmem_cfg;
struct nvmem_device *nvmem;
struct sunxi_sid *sid;
- int i, size;
+ int size;
char *randomness;
const struct sunxi_sid_cfg *cfg;
@@ -174,14 +136,23 @@ static int sunxi_sid_probe(struct platform_device *pdev)
size = cfg->size;
- econfig.size = size;
- econfig.dev = dev;
+ nvmem_cfg = devm_kzalloc(dev, sizeof(*nvmem_cfg), GFP_KERNEL);
+ if (!nvmem_cfg)
+ return -ENOMEM;
+
+ nvmem_cfg->dev = dev;
+ nvmem_cfg->name = "sunxi-sid";
+ nvmem_cfg->read_only = true;
+ nvmem_cfg->size = cfg->size;
+ nvmem_cfg->word_size = 1;
+ nvmem_cfg->stride = 4;
+ nvmem_cfg->priv = sid;
if (cfg->need_register_readout)
- econfig.reg_read = sun8i_sid_read_by_reg;
+ nvmem_cfg->reg_read = sun8i_sid_read_by_reg;
else
- econfig.reg_read = sunxi_sid_read;
- econfig.priv = sid;
- nvmem = devm_nvmem_register(dev, &econfig);
+ nvmem_cfg->reg_read = sunxi_sid_read;
+
+ nvmem = devm_nvmem_register(dev, nvmem_cfg);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
@@ -189,9 +160,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
if (!randomness)
return -ENOMEM;
- for (i = 0; i < size; i++)
- econfig.reg_read(sid, i, &randomness[i], 1);
-
+ nvmem_cfg->reg_read(sid, 0, randomness, size);
add_device_randomness(randomness, size);
kfree(randomness);
@@ -219,11 +188,19 @@ static const struct sunxi_sid_cfg sun50i_a64_cfg = {
.size = 0x100,
};
+static const struct sunxi_sid_cfg sun50i_h6_cfg = {
+ .value_offset = 0x200,
+ .size = 0x200,
+};
+
static const struct of_device_id sunxi_sid_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
{ .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
+ { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
{ .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
+ { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg },
+ { .compatible = "allwinner,sun50i-h6-sid", .data = &sun50i_h6_cfg },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 2270373b30ab..978427a9d5e6 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -569,6 +569,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
* relative to that node.
*/
static u64 __of_translate_address(struct device_node *dev,
+ struct device_node *(*get_parent)(const struct device_node *),
const __be32 *in_addr, const char *rprop,
struct device_node **host)
{
@@ -585,7 +586,7 @@ static u64 __of_translate_address(struct device_node *dev,
*host = NULL;
/* Get parent & match bus type */
- parent = of_get_parent(dev);
+ parent = get_parent(dev);
if (parent == NULL)
goto bail;
bus = of_match_bus(parent);
@@ -609,7 +610,7 @@ static u64 __of_translate_address(struct device_node *dev,
/* Switch to parent bus */
of_node_put(dev);
dev = parent;
- parent = of_get_parent(dev);
+ parent = get_parent(dev);
/* If root, we have finished */
if (parent == NULL) {
@@ -665,7 +666,8 @@ u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
struct device_node *host;
u64 ret;
- ret = __of_translate_address(dev, in_addr, "ranges", &host);
+ ret = __of_translate_address(dev, of_get_parent,
+ in_addr, "ranges", &host);
if (host) {
of_node_put(host);
return OF_BAD_ADDR;
@@ -675,12 +677,31 @@ u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
}
EXPORT_SYMBOL(of_translate_address);
+static struct device_node *__of_get_dma_parent(const struct device_node *np)
+{
+ struct of_phandle_args args;
+ int ret, index;
+
+ index = of_property_match_string(np, "interconnect-names", "dma-mem");
+ if (index < 0)
+ return of_get_parent(np);
+
+ ret = of_parse_phandle_with_args(np, "interconnects",
+ "#interconnect-cells",
+ index, &args);
+ if (ret < 0)
+ return of_get_parent(np);
+
+ return of_node_get(args.np);
+}
+
u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
{
struct device_node *host;
u64 ret;
- ret = __of_translate_address(dev, in_addr, "dma-ranges", &host);
+ ret = __of_translate_address(dev, __of_get_dma_parent,
+ in_addr, "dma-ranges", &host);
if (host) {
of_node_put(host);
@@ -736,7 +757,8 @@ static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
unsigned long port;
struct device_node *host;
- taddr = __of_translate_address(dev, in_addr, "ranges", &host);
+ taddr = __of_translate_address(dev, of_get_parent,
+ in_addr, "ranges", &host);
if (host) {
/* host-specific port access */
port = logic_pio_trans_hwaddr(&host->fwnode, taddr, size);
@@ -908,9 +930,15 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
return -EINVAL;
while (1) {
+ struct device_node *parent;
+
naddr = of_n_addr_cells(node);
nsize = of_n_size_cells(node);
- node = of_get_next_parent(node);
+
+ parent = __of_get_dma_parent(node);
+ of_node_put(node);
+
+ node = parent;
if (!node)
break;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 5226e898476e..20e0e7ee4edf 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1350,8 +1350,9 @@ int of_phandle_iterator_next(struct of_phandle_iterator *it)
* property data length
*/
if (it->cur + count > it->list_end) {
- pr_err("%pOF: arguments longer than property\n",
- it->parent);
+ pr_err("%pOF: %s = %d found %d\n",
+ it->parent, it->cells_name,
+ count, it->cell_count);
goto err;
}
}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 3717f2a20d0d..da8158392010 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -17,7 +17,7 @@
/**
* of_match_device - Tell if a struct device matches an of_device_id list
- * @ids: array of of device match structures to search in
+ * @matches: array of of device match structures to search in
* @dev: the of device structure to match against
*
* Used by a driver to check whether an platform_device present in the
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 4734223ab702..de893c9616a1 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1091,7 +1091,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
- strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
+ strlcpy(data, p, min(l, COMMAND_LINE_SIZE));
/*
* CONFIG_CMDLINE is meant to be a default in case nothing else
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index e1f6f392a4c0..7f84bb4903ca 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -500,7 +500,7 @@ void __init of_irq_init(const struct of_device_id *matches)
* pointer, interrupt-parent device_node etc.
*/
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (WARN_ON(!desc)) {
+ if (!desc) {
of_node_put(np);
goto err;
}
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 810ab0fbcccb..6f1be80e8c4e 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -7,10 +7,11 @@
*/
#include <linux/etherdevice.h>
#include <linux/kernel.h>
-#include <linux/nvmem-consumer.h>
#include <linux/of_net.h>
+#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/export.h>
+#include <linux/device.h>
/**
* of_get_phy_mode - Get phy mode for given device_node
@@ -48,12 +49,38 @@ static const void *of_get_mac_addr(struct device_node *np, const char *name)
return NULL;
}
+static const void *of_get_mac_addr_nvmem(struct device_node *np)
+{
+ int ret;
+ const void *mac;
+ u8 nvmem_mac[ETH_ALEN];
+ struct platform_device *pdev = of_find_device_by_node(np);
+
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ret = nvmem_get_mac_address(&pdev->dev, &nvmem_mac);
+ if (ret) {
+ put_device(&pdev->dev);
+ return ERR_PTR(ret);
+ }
+
+ mac = devm_kmemdup(&pdev->dev, nvmem_mac, ETH_ALEN, GFP_KERNEL);
+ put_device(&pdev->dev);
+ if (!mac)
+ return ERR_PTR(-ENOMEM);
+
+ return mac;
+}
+
/**
* Search the device tree for the best MAC address to use. 'mac-address' is
* checked first, because that is supposed to contain to "most recent" MAC
* address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address. If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree.
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree. If any
+ * of the above isn't set, then try to get MAC address from nvmem cell named
+ * 'mac-address'.
*
* Note that the 'address' property is supposed to contain a virtual address of
* the register set, but some DTS files have redefined that property to be the
@@ -65,6 +92,8 @@ static const void *of_get_mac_addr(struct device_node *np, const char *name)
* addresses. Some older U-Boots only initialized 'local-mac-address'. In
* this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
* but is all zeros.
+ *
+ * Return: Will be a valid pointer on success and ERR_PTR in case of error.
*/
const void *of_get_mac_address(struct device_node *np)
{
@@ -78,6 +107,10 @@ const void *of_get_mac_address(struct device_node *np)
if (addr)
return addr;
- return of_get_mac_addr(np, "address");
+ addr = of_get_mac_addr(np, "address");
+ if (addr)
+ return addr;
+
+ return of_get_mac_addr_nvmem(np);
}
EXPORT_SYMBOL(of_get_mac_address);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 6a36bc0b3d64..89e190e94af7 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -171,6 +171,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
{
extern const struct of_device_id __reservedmem_of_table[];
const struct of_device_id *i;
+ int ret = -ENOENT;
for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
reservedmem_of_init_fn initfn = i->data;
@@ -179,13 +180,14 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
continue;
- if (initfn(rmem) == 0) {
+ ret = initfn(rmem);
+ if (ret == 0) {
pr_info("initialized node %s, compatible id %s\n",
rmem->name, compat);
- return 0;
+ break;
}
}
- return -ENOENT;
+ return ret;
}
static int __init __rmem_cmp(const void *a, const void *b)
@@ -245,7 +247,9 @@ void __init fdt_init_reserved_mem(void)
int len;
const __be32 *prop;
int err = 0;
+ int nomap;
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
prop = of_get_flat_dt_prop(node, "phandle", &len);
if (!prop)
prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
@@ -255,8 +259,16 @@ void __init fdt_init_reserved_mem(void)
if (rmem->size == 0)
err = __reserved_mem_alloc_size(node, rmem->name,
&rmem->base, &rmem->size);
- if (err == 0)
- __reserved_mem_init_node(rmem);
+ if (err == 0) {
+ err = __reserved_mem_init_node(rmem);
+ if (err != 0 && err != -ENOENT) {
+ pr_info("node %s compatible matching fail\n",
+ rmem->name);
+ memblock_free(rmem->base, rmem->size);
+ if (nomap)
+ memblock_add(rmem->base, rmem->size);
+ }
+ }
}
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 8631efa1daa1..d7fa75e31f22 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -659,7 +659,7 @@ EXPORT_SYMBOL(of_graph_get_next_endpoint);
*
* Return: An 'endpoint' node pointer which is identified by reg and at the same
* is the child of a port node identified by port_reg. reg and port_reg are
- * ignored when they are -1.
+ * ignored when they are -1. Use of_node_put() on the pointer when done.
*/
struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg)
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index cccde756b510..3832a5de4602 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -344,7 +344,7 @@ static void __init of_unittest_check_phandles(void)
}
nh = kzalloc(sizeof(*nh), GFP_KERNEL);
- if (WARN_ON(!nh))
+ if (!nh)
return;
nh->np = np;
@@ -1199,12 +1199,9 @@ static int __init unittest_data_add(void)
/* creating copy */
unittest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL);
-
- if (!unittest_data) {
- pr_warn("%s: Failed to allocate memory for unittest_data; "
- "not running tests\n", __func__);
+ if (!unittest_data)
return -ENOMEM;
- }
+
of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
if (!unittest_data_node) {
pr_warn("%s: No tree to attach; not running tests\n", __func__);
@@ -1845,10 +1842,8 @@ static int unittest_i2c_bus_probe(struct platform_device *pdev)
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
std = devm_kzalloc(dev, sizeof(*std), GFP_KERNEL);
- if (!std) {
- dev_err(dev, "Failed to allocate unittest i2c data\n");
+ if (!std)
return -ENOMEM;
- }
/* link them together */
std->pdev = pdev;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 0420f7e8ad5b..0e7703fe733f 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -526,6 +526,60 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
+/**
+ * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
+ * target voltage.
+ * @dev: Device for which we do this operation.
+ * @u_volt: Target voltage.
+ *
+ * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
+ *
+ * Return: matching *opp, else returns ERR_PTR in case of error which should be
+ * handled using IS_ERR.
+ *
+ * Error return values can be:
+ * EINVAL: bad parameters
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+ unsigned long u_volt)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ if (!dev || !u_volt) {
+ dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
+ u_volt);
+ return ERR_PTR(-EINVAL);
+ }
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available) {
+ if (temp_opp->supplies[0].u_volt > u_volt)
+ break;
+ opp = temp_opp;
+ }
+ }
+
+ /* Increment the reference count of OPP */
+ if (!IS_ERR(opp))
+ dev_pm_opp_get(opp);
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
+
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
struct dev_pm_opp_supply *supply)
{
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 1be571c20062..6bad04cbb1d3 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -157,8 +157,12 @@
#define DBG_IRT(x...)
#endif
+#ifdef CONFIG_64BIT
+#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa))
+#else
#define COMPARE_IRTE_ADDR(irte, hpa) \
- ((irte)->dest_iosapic_addr == F_EXTEND(hpa))
+ ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
+#endif
#define IOSAPIC_REG_SELECT 0x00
#define IOSAPIC_REG_WINDOW 0x10
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 0c6e8b44b4ed..c60b465f6fe4 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
break;
case DISPLAY_MODEL_LASI:
+ /* Skip to register LED in QEMU */
+ if (running_on_qemu)
+ return 1;
LED_DATA_REG = data_reg;
led_func_ptr = led_LASI_driver;
printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index f12b9da69255..90fb73575495 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -722,7 +722,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) {
return -EIO;
}
- /* fall through to NIBBLE */
+ /* fall through - to NIBBLE */
case IEEE1284_MODE_NIBBLE:
DPRINTK (KERN_DEBUG "%s: Using nibble mode\n", port->name);
fn = port->ops->nibble_read_data;
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index e9b52e4a4648..e77044c2bf62 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -158,8 +158,9 @@ static int parport_config(struct pcmcia_device *link)
return 0;
failed:
- parport_cs_release(link);
- return -ENODEV;
+ parport_cs_release(link);
+ kfree(link->priv);
+ return -ENODEV;
} /* parport_config */
static void parport_cs_release(struct pcmcia_device *link)
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index 62873070f988..b7a892791c3e 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -568,6 +568,7 @@ static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id)
/**
* parport_ip32_dma_start - begins a DMA transfer
+ * @p: partport to work on
* @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE
* @addr: pointer to data buffer
* @count: buffer size
@@ -575,8 +576,8 @@ static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id)
* Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
* correctly balanced.
*/
-static int parport_ip32_dma_start(enum dma_data_direction dir,
- void *addr, size_t count)
+static int parport_ip32_dma_start(struct parport *p,
+ enum dma_data_direction dir, void *addr, size_t count)
{
unsigned int limit;
u64 ctrl;
@@ -601,7 +602,7 @@ static int parport_ip32_dma_start(enum dma_data_direction dir,
/* Prepare DMA pointers */
parport_ip32_dma.dir = dir;
- parport_ip32_dma.buf = dma_map_single(NULL, addr, count, dir);
+ parport_ip32_dma.buf = dma_map_single(&p->bus_dev, addr, count, dir);
parport_ip32_dma.len = count;
parport_ip32_dma.next = parport_ip32_dma.buf;
parport_ip32_dma.left = parport_ip32_dma.len;
@@ -625,11 +626,12 @@ static int parport_ip32_dma_start(enum dma_data_direction dir,
/**
* parport_ip32_dma_stop - ends a running DMA transfer
+ * @p: partport to work on
*
* Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
* correctly balanced.
*/
-static void parport_ip32_dma_stop(void)
+static void parport_ip32_dma_stop(struct parport *p)
{
u64 ctx_a;
u64 ctx_b;
@@ -685,8 +687,8 @@ static void parport_ip32_dma_stop(void)
enable_irq(MACEISA_PAR_CTXB_IRQ);
parport_ip32_dma.irq_on = 1;
- dma_unmap_single(NULL, parport_ip32_dma.buf, parport_ip32_dma.len,
- parport_ip32_dma.dir);
+ dma_unmap_single(&p->bus_dev, parport_ip32_dma.buf,
+ parport_ip32_dma.len, parport_ip32_dma.dir);
}
/**
@@ -1445,7 +1447,7 @@ static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
priv->irq_mode = PARPORT_IP32_IRQ_HERE;
- parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len);
+ parport_ip32_dma_start(p, DMA_TO_DEVICE, (void *)buf, len);
reinit_completion(&priv->irq_complete);
parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
@@ -1461,7 +1463,7 @@ static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
if (ecr & ECR_SERVINTR)
break; /* DMA transfer just finished */
}
- parport_ip32_dma_stop();
+ parport_ip32_dma_stop(p);
written = len - parport_ip32_dma_get_residue();
priv->irq_mode = PARPORT_IP32_IRQ_FWD;
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 657d642fcc67..28cdd8c0213a 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -10,10 +10,10 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
-obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_ACPI) += pci-acpi.o
endif
+obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
obj-$(CONFIG_PCIEPORTBUS) += pcie/
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 5cb40b2518f9..495059d923f7 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -23,7 +23,7 @@ void pci_add_resource_offset(struct list_head *resources, struct resource *res,
entry = resource_list_create_entry(res, 0);
if (!entry) {
- printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res);
+ pr_err("PCI: can't add host bridge window %pR\n", res);
return;
}
@@ -288,8 +288,7 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
res->end = end;
res->flags &= ~IORESOURCE_UNSET;
orig_res.flags &= ~IORESOURCE_UNSET;
- pci_printk(KERN_DEBUG, dev, "%pR clipped to %pR\n",
- &orig_res, res);
+ pci_info(dev, "%pR clipped to %pR\n", &orig_res, res);
return true;
}
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 6012f3059acd..011c57cae4b0 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -267,6 +267,7 @@ config PCIE_TANGO_SMP8759
config VMD
depends on PCI_MSI && X86_64 && SRCU
+ select X86_DEV_DMA_OPS
tristate "Intel Volume Management Device Driver"
---help---
Adds support for the Intel Volume Management Device (VMD). VMD is a
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 6ea74b1c0d94..a6ce1ee51b4c 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -103,15 +103,32 @@ config PCIE_SPEAR13XX
Say Y here if you want PCIe support on SPEAr13XX SoCs.
config PCI_KEYSTONE
- bool "TI Keystone PCIe controller"
- depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST)
+ bool
+
+config PCI_KEYSTONE_HOST
+ bool "PCI Keystone Host Mode"
+ depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select PCI_KEYSTONE
+ default y
help
- Say Y here if you want to enable PCI controller support on Keystone
- SoCs. The PCI controller on Keystone is based on DesignWare hardware
- and therefore the driver re-uses the DesignWare core functions to
- implement the driver.
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in host mode. The PCI controller on Keystone is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_KEYSTONE_EP
+ bool "PCI Keystone Endpoint Mode"
+ depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_KEYSTONE
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in endpoint mode. The PCI controller on Keystone is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index b5f3b83cc2b3..b085dfd4fab7 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -28,5 +28,6 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
# depending on whether ACPI, the DT driver, or both are enabled.
ifdef CONFIG_PCI
+obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
endif
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index ae84a69ae63a..419451efd58c 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -247,6 +247,7 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops, pp);
+ of_node_put(pcie_intc_node);
if (!dra7xx->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENODEV;
@@ -406,7 +407,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
return &dra7xx_pcie_epc_features;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = dra7xx_pcie_ep_init,
.raise_irq = dra7xx_pcie_raise_irq,
.get_features = dra7xx_pcie_get_features,
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 3d627f94a166..9b5cb5b70389 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -52,6 +52,7 @@ enum imx6_pcie_variants {
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
+#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
@@ -89,9 +90,8 @@ struct imx6_pcie {
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
-#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
-#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
+#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
/* PCIe Root Complex registers (memory-mapped) */
#define PCIE_RC_IMX6_MSI_CAP 0x50
@@ -104,34 +104,29 @@ struct imx6_pcie {
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
-#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
-#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
-#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
-#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
-#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
-#define PCIE_PHY_CTRL_DATA_LOC 0
-#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
-#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
-#define PCIE_PHY_CTRL_WR_LOC 18
-#define PCIE_PHY_CTRL_RD_LOC 19
+#define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define PCIE_PHY_CTRL_CAP_ADR BIT(16)
+#define PCIE_PHY_CTRL_CAP_DAT BIT(17)
+#define PCIE_PHY_CTRL_WR BIT(18)
+#define PCIE_PHY_CTRL_RD BIT(19)
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
-#define PCIE_PHY_STAT_ACK_LOC 16
+#define PCIE_PHY_STAT_ACK BIT(16)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
-#define PCIE_PHY_ATEOVRD_EN (0x1 << 2)
+#define PCIE_PHY_ATEOVRD_EN BIT(2)
#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
-#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9)
+#define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
#define PCIE_PHY_RX_ASIC_OUT 0x100D
#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
@@ -154,19 +149,19 @@ struct imx6_pcie {
#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
#define PHY_RX_OVRD_IN_LO 0x1005
-#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
-#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
{
struct dw_pcie *pci = imx6_pcie->pci;
- u32 val;
+ bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
do {
- val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
- val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+ val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
+ PCIE_PHY_STAT_ACK;
wait_counter++;
if (val == exp_val)
@@ -184,27 +179,27 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
u32 val;
int ret;
- val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+ val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
- val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, 0);
+ return pcie_phy_poll_ack(imx6_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
{
struct dw_pcie *pci = imx6_pcie->pci;
- u32 val, phy_ctl;
+ u32 phy_ctl;
int ret;
ret = pcie_phy_wait_ack(imx6_pcie, addr);
@@ -212,23 +207,22 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
return ret;
/* assert Read signal */
- phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+ phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
- val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
- *data = val & 0xffff;
+ *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, 0);
+ return pcie_phy_poll_ack(imx6_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
{
struct dw_pcie *pci = imx6_pcie->pci;
u32 var;
@@ -240,41 +234,41 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
if (ret)
return ret;
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* capture data */
- var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+ var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
/* deassert cap data */
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
if (ret)
return ret;
/* assert wr signal */
- var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+ var = PCIE_PHY_CTRL_WR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
/* deassert wr signal */
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
if (ret)
return ret;
@@ -285,7 +279,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
- u32 tmp;
+ u16 tmp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
return;
@@ -455,7 +449,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
* reset time is too short, cannot meet the requirement.
* add one ~10us delay here.
*/
- udelay(10);
+ usleep_range(10, 100);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
@@ -488,20 +482,14 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
{
u32 val;
- unsigned int retries;
struct device *dev = imx6_pcie->pci->dev;
- for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
- regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
-
- if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
- return;
-
- usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
- PHY_PLL_LOCK_WAIT_USLEEP_MAX);
- }
-
- dev_err(dev, "PCIe PLL lock timeout\n");
+ if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR22, val,
+ val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT))
+ dev_err(dev, "PCIe PLL lock timeout\n");
}
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
@@ -687,7 +675,7 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
{
unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
int mult, div;
- u32 val;
+ u16 val;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
return 0;
@@ -730,21 +718,6 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
return 0;
}
-static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
-{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
- return -ETIMEDOUT;
-}
-
static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -761,7 +734,7 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
}
dev_err(dev, "Speed change timeout\n");
- return -EINVAL;
+ return -ETIMEDOUT;
}
static void imx6_pcie_ltssm_enable(struct device *dev)
@@ -803,7 +776,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
- ret = imx6_pcie_wait_for_link(imx6_pcie);
+ ret = dw_pcie_wait_for_link(pci);
if (ret)
goto err_reset_phy;
@@ -841,7 +814,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
}
/* Make sure link training is finished as well! */
- ret = imx6_pcie_wait_for_link(imx6_pcie);
+ ret = dw_pcie_wait_for_link(pci);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
@@ -856,8 +829,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
err_reset_phy:
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
imx6_pcie_reset_phy(imx6_pcie);
return ret;
}
@@ -993,17 +966,11 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
}
}
-static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
-{
- return (imx6_pcie->drvdata->variant == IMX7D ||
- imx6_pcie->drvdata->variant == IMX6SX);
-}
-
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- if (!imx6_pcie_supports_suspend(imx6_pcie))
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
@@ -1019,7 +986,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
struct pcie_port *pp = &imx6_pcie->pci->pp;
- if (!imx6_pcie_supports_suspend(imx6_pcie))
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
imx6_pcie_assert_core_reset(imx6_pcie);
@@ -1249,7 +1216,8 @@ static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6SX] = {
.variant = IMX6SX,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1258,6 +1226,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
[IMX7D] = {
.variant = IMX7D,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX8MQ] = {
.variant = IMX8MQ,
@@ -1279,6 +1248,7 @@ static struct platform_driver imx6_pcie_driver = {
.of_match_table = imx6_pcie_of_match,
.suppress_bind_attrs = true,
.pm = &imx6_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = imx6_pcie_probe,
.shutdown = imx6_pcie_shutdown,
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 14f2b0b4ed5e..af677254a072 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -18,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
@@ -26,6 +28,7 @@
#include <linux/resource.h>
#include <linux/signal.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define PCIE_VENDORID_MASK 0xffff
@@ -44,28 +47,34 @@
#define CFG_TYPE1 BIT(24)
#define OB_SIZE 0x030
-#define SPACE0_REMOTE_CFG_OFFSET 0x1000
#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
#define OB_ENABLEN BIT(0)
#define OB_WIN_SIZE 8 /* 8MB */
+#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
+#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
+#define PCIE_EP_IRQ_SET 0x64
+#define PCIE_EP_IRQ_CLR 0x68
+#define INT_ENABLE BIT(0)
+
/* IRQ register defines */
#define IRQ_EOI 0x050
-#define IRQ_STATUS 0x184
-#define IRQ_ENABLE_SET 0x188
-#define IRQ_ENABLE_CLR 0x18c
#define MSI_IRQ 0x054
-#define MSI0_IRQ_STATUS 0x104
-#define MSI0_IRQ_ENABLE_SET 0x108
-#define MSI0_IRQ_ENABLE_CLR 0x10c
-#define IRQ_STATUS 0x184
+#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
+#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
+#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
#define MSI_IRQ_OFFSET 4
+#define IRQ_STATUS(n) (0x184 + ((n) << 4))
+#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
+#define INTx_EN BIT(0)
+
#define ERR_IRQ_STATUS 0x1c4
#define ERR_IRQ_ENABLE_SET 0x1c8
#define ERR_AER BIT(5) /* ECRC error */
+#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
#define ERR_CORR BIT(3) /* Correctable error */
#define ERR_NONFATAL BIT(2) /* Non-fatal error */
@@ -74,25 +83,45 @@
#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
ERR_NONFATAL | ERR_FATAL | ERR_SYS)
-#define MAX_MSI_HOST_IRQS 8
/* PCIE controller device IDs */
#define PCIE_RC_K2HK 0xb008
#define PCIE_RC_K2E 0xb009
#define PCIE_RC_K2L 0xb00a
#define PCIE_RC_K2G 0xb00b
+#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
+#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
+
+#define EP 0x0
+#define LEG_EP 0x1
+#define RC 0x2
+
+#define EXP_CAP_ID_OFFSET 0x70
+
+#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
+
+#define AM654_PCIE_DEV_TYPE_MASK 0x3
+#define AM654_WIN_SIZE SZ_64K
+
+#define APP_ADDR_SPACE_0 (16 * SZ_1K)
+
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+struct ks_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+ unsigned int version;
+};
+
struct keystone_pcie {
struct dw_pcie *pci;
/* PCI Device ID */
u32 device_id;
- int num_legacy_host_irqs;
int legacy_host_irqs[PCI_NUM_INTX];
struct device_node *legacy_intc_np;
- int num_msi_host_irqs;
- int msi_host_irqs[MAX_MSI_HOST_IRQS];
+ int msi_host_irq;
int num_lanes;
u32 num_viewport;
struct phy **phy;
@@ -101,28 +130,12 @@ struct keystone_pcie {
struct irq_domain *legacy_irq_domain;
struct device_node *np;
- int error_irq;
-
/* Application register space */
void __iomem *va_app_base; /* DT 1st resource */
struct resource app;
+ bool is_am6;
};
-static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
- u32 *bit_pos)
-{
- *reg_offset = offset % 8;
- *bit_pos = offset >> 3;
-}
-
-static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- return ks_pcie->app.start + MSI_IRQ;
-}
-
static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
{
return readl(ks_pcie->va_app_base + offset);
@@ -134,81 +147,114 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
writel(val, ks_pcie->va_app_base + offset);
}
-static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+static void ks_pcie_msi_irq_ack(struct irq_data *data)
{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- u32 pending, vector;
- int src, virq;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ u32 reg_offset;
+ u32 bit_pos;
- pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
- /*
- * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
- * shows 1, 9, 17, 25 and so forth
- */
- for (src = 0; src < 4; src++) {
- if (BIT(src) & pending) {
- vector = offset + (src << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
- src, vector, virq);
- generic_handle_irq(virq);
- }
- }
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
+ BIT(bit_pos));
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
}
-static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
+static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- u32 reg_offset, bit_pos;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
struct dw_pcie *pci;
+ u64 msi_target;
pci = to_dw_pcie_from_pp(pp);
ks_pcie = to_keystone_pcie(pci);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
- BIT(bit_pos));
- ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+ msi_target = ks_pcie->app.start + MSI_IRQ;
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+ msg->data = data->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ return -EINVAL;
+}
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+static void ks_pcie_msi_mask(struct irq_data *data)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+static void ks_pcie_msi_unmask(struct irq_data *data)
{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
+static struct irq_chip ks_pcie_msi_irq_chip = {
+ .name = "KEYSTONE-PCI-MSI",
+ .irq_ack = ks_pcie_msi_irq_ack,
+ .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
+ .irq_set_affinity = ks_pcie_msi_set_affinity,
+ .irq_mask = ks_pcie_msi_mask,
+ .irq_unmask = ks_pcie_msi_unmask,
+};
+
static int ks_pcie_msi_host_init(struct pcie_port *pp)
{
+ pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
}
-static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
-{
- int i;
-
- for (i = 0; i < PCI_NUM_INTX; i++)
- ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
-}
-
static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
int offset)
{
@@ -217,7 +263,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
u32 pending;
int virq;
- pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
if (BIT(0) & pending) {
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
@@ -229,6 +275,14 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
}
+/*
+ * Dummy function so that DW core doesn't configure MSI
+ */
+static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
+{
+ return 0;
+}
+
static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
@@ -255,10 +309,10 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
if (reg & ERR_CORR)
dev_dbg(dev, "Correctable Error\n");
- if (reg & ERR_AXI)
+ if (!ks_pcie->is_am6 && (reg & ERR_AXI))
dev_err(dev, "AXI tag lookup fatal Error\n");
- if (reg & ERR_AER)
+ if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
dev_err(dev, "ECRC Error\n");
ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
@@ -356,6 +410,9 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
ks_pcie_clear_dbi_mode(ks_pcie);
+ if (ks_pcie->is_am6)
+ return;
+
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -445,68 +502,33 @@ static int ks_pcie_link_up(struct dw_pcie *pci)
return (val == PORT_LOGIC_LTSSM_STATE_L0);
}
-static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+static void ks_pcie_stop_link(struct dw_pcie *pci)
{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 val;
/* Disable Link training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-
- /* Initiate Link Training */
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
}
-/**
- * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware
- *
- * Ioremap the register resources, initialize legacy irq domain
- * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
- * PCI host controller.
- */
-static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie)
+static int ks_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
-
- /* Index 0 is the config reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /*
- * We set these same and is used in pcie rd/wr_other_conf
- * functions
- */
- pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
- pp->va_cfg1_base = pp->va_cfg0_base;
-
- /* Index 1 is the application reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ks_pcie->va_app_base))
- return PTR_ERR(ks_pcie->va_app_base);
-
- ks_pcie->app = *res;
+ u32 val;
- /* Create legacy IRQ domain */
- ks_pcie->legacy_irq_domain =
- irq_domain_add_linear(ks_pcie->legacy_intc_np,
- PCI_NUM_INTX,
- &ks_pcie_legacy_irq_domain_ops,
- NULL);
- if (!ks_pcie->legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
- return -EINVAL;
+ if (dw_pcie_link_up(pci)) {
+ dev_dbg(dev, "link is already up\n");
+ return 0;
}
- return dw_pcie_host_init(pp);
+ /* Initiate Link Training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+
+ return 0;
}
static void ks_pcie_quirk(struct pci_dev *dev)
@@ -552,34 +574,16 @@ static void ks_pcie_quirk(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
-static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct device *dev = pci->dev;
-
- if (dw_pcie_link_up(pci)) {
- dev_info(dev, "Link already up\n");
- return 0;
- }
-
- ks_pcie_initiate_link_train(ks_pcie);
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- dev_err(dev, "phy link never came up\n");
- return -ETIMEDOUT;
-}
-
static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
+ unsigned int irq = desc->irq_data.hwirq;
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
- u32 offset = irq - ks_pcie->msi_host_irqs[0];
+ u32 offset = irq - ks_pcie->msi_host_irq;
struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 vector, virq, reg, pos;
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
@@ -589,7 +593,23 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_pcie_handle_msi_irq(ks_pcie, offset);
+
+ reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (pos = 0; pos < 4; pos++) {
+ if (!(reg & BIT(pos)))
+ continue;
+
+ vector = offset + (pos << 3);
+ virq = irq_linear_revmap(pp->irq_domain, vector);
+ dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
+ virq);
+ generic_handle_irq(virq);
+ }
+
chained_irq_exit(chip, desc);
}
@@ -622,89 +642,119 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
- char *controller, int *num_irqs)
+static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
{
- int temp, max_host_irqs, legacy = 1, *host_irqs;
struct device *dev = ks_pcie->pci->dev;
- struct device_node *np_pcie = dev->of_node, **np_temp;
-
- if (!strcmp(controller, "msi-interrupt-controller"))
- legacy = 0;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ struct irq_data *irq_data;
+ int irq_count, irq, ret, i;
- if (legacy) {
- np_temp = &ks_pcie->legacy_intc_np;
- max_host_irqs = PCI_NUM_INTX;
- host_irqs = &ks_pcie->legacy_host_irqs[0];
- } else {
- np_temp = &ks_pcie->msi_intc_np;
- max_host_irqs = MAX_MSI_HOST_IRQS;
- host_irqs = &ks_pcie->msi_host_irqs[0];
- }
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
+ return 0;
- /* interrupt controller is in a child node */
- *np_temp = of_get_child_by_name(np_pcie, controller);
- if (!(*np_temp)) {
- dev_err(dev, "Node for %s is absent\n", controller);
+ intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
+ if (!intc_np) {
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "msi-interrupt-controller node is absent\n");
return -EINVAL;
}
- temp = of_irq_count(*np_temp);
- if (!temp) {
- dev_err(dev, "No IRQ entries in %s\n", controller);
- of_node_put(*np_temp);
- return -EINVAL;
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
}
- if (temp > max_host_irqs)
- dev_warn(dev, "Too many %s interrupts defined %u\n",
- (legacy ? "legacy" : "MSI"), temp);
-
- /*
- * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
- * 7 (MSI)
- */
- for (temp = 0; temp < max_host_irqs; temp++) {
- host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
- if (!host_irqs[temp])
- break;
- }
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
- of_node_put(*np_temp);
+ if (!ks_pcie->msi_host_irq) {
+ irq_data = irq_get_irq_data(irq);
+ if (!irq_data) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->msi_host_irq = irq_data->hwirq;
+ }
- if (temp) {
- *num_irqs = temp;
- return 0;
+ irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
+ ks_pcie);
}
- return -EINVAL;
+ of_node_put(intc_np);
+ return 0;
+
+err:
+ of_node_put(intc_np);
+ return ret;
}
-static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
+static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
{
- int i;
+ struct device *dev = ks_pcie->pci->dev;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ int irq_count, irq, ret = 0, i;
+
+ intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
+ if (!intc_np) {
+ /*
+ * Since legacy interrupts are modeled as edge-interrupts in
+ * AM6, keep it disabled for now.
+ */
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "legacy-interrupt-controller node is absent\n");
+ return -EINVAL;
+ }
- /* Legacy IRQ */
- for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
- irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->legacy_host_irqs[i] = irq;
+
+ irq_set_chained_handler_and_data(irq,
ks_pcie_legacy_irq_handler,
ks_pcie);
}
- ks_pcie_enable_legacy_irqs(ks_pcie);
- /* MSI IRQ */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
- irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
- ks_pcie_msi_irq_handler,
- ks_pcie);
- }
+ legacy_irq_domain =
+ irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ &ks_pcie_legacy_irq_domain_ops, NULL);
+ if (!legacy_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ ret = -EINVAL;
+ goto err;
}
+ ks_pcie->legacy_irq_domain = legacy_irq_domain;
+
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
- if (ks_pcie->error_irq > 0)
- ks_pcie_enable_error_irq(ks_pcie);
+err:
+ of_node_put(intc_np);
+ return ret;
}
+#ifdef CONFIG_ARM
/*
* When a PCI device does not exist during config cycles, keystone host gets a
* bus error instead of returning 0xffffffff. This handler always returns 0
@@ -724,6 +774,7 @@ static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
return 0;
}
+#endif
static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
{
@@ -742,8 +793,10 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
if (ret)
return ret;
+ dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+ dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@@ -754,11 +807,18 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
int ret;
+ ret = ks_pcie_config_legacy_irq(ks_pcie);
+ if (ret)
+ return ret;
+
+ ret = ks_pcie_config_msi_irq(ks_pcie);
+ if (ret)
+ return ret;
+
dw_pcie_setup_rc(pp);
- ks_pcie_establish_link(ks_pcie);
+ ks_pcie_stop_link(pci);
ks_pcie_setup_rc_app_regs(ks_pcie);
- ks_pcie_setup_interrupts(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -766,12 +826,17 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
if (ret < 0)
return ret;
+#ifdef CONFIG_ARM
/*
* PCIe access errors that result into OCP errors are caught by ARM as
* "External aborts"
*/
hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
"Asynchronous external abort");
+#endif
+
+ ks_pcie_start_link(pci);
+ dw_pcie_wait_for_link(pci);
return 0;
}
@@ -780,14 +845,15 @@ static const struct dw_pcie_host_ops ks_pcie_host_ops = {
.rd_other_conf = ks_pcie_rd_other_conf,
.wr_other_conf = ks_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
- .msi_set_irq = ks_pcie_msi_set_irq,
- .msi_clear_irq = ks_pcie_msi_clear_irq,
- .get_msi_addr = ks_pcie_get_msi_addr,
.msi_host_init = ks_pcie_msi_host_init,
- .msi_irq_ack = ks_pcie_msi_irq_ack,
.scan_bus = ks_pcie_v3_65_scan_bus,
};
+static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
+ .host_init = ks_pcie_host_init,
+ .msi_host_init = ks_pcie_am654_msi_host_init,
+};
+
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
@@ -801,41 +867,17 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
+ struct resource *res;
int ret;
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
- "legacy-interrupt-controller",
- &ks_pcie->num_legacy_host_irqs);
- if (ret)
- return ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
- "msi-interrupt-controller",
- &ks_pcie->num_msi_host_irqs);
- if (ret)
- return ret;
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
- /*
- * Index 0 is the platform interrupt for error interrupt
- * from RC. This is optional.
- */
- ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
- if (ks_pcie->error_irq <= 0)
- dev_info(dev, "no error IRQ defined\n");
- else {
- ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler,
- IRQF_SHARED, "pcie-error-irq", ks_pcie);
- if (ret < 0) {
- dev_err(dev, "failed to request error IRQ %d\n",
- ks_pcie->error_irq);
- return ret;
- }
- }
+ pp->va_cfg1_base = pp->va_cfg0_base;
- pp->ops = &ks_pcie_host_ops;
- ret = ks_pcie_dw_host_init(ks_pcie);
+ ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
return ret;
@@ -844,18 +886,139 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
return 0;
}
-static const struct of_device_id ks_pcie_of_match[] = {
- {
- .type = "pci",
- .compatible = "ti,keystone-pcie",
- },
- { },
-};
+static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 val;
+
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_read(base + reg, size, &val);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+ return val;
+}
+
+static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_write(base + reg, size, val);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+}
static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+ .start_link = ks_pcie_start_link,
+ .stop_link = ks_pcie_stop_link,
.link_up = ks_pcie_link_up,
+ .read_dbi2 = ks_pcie_am654_read_dbi2,
+ .write_dbi2 = ks_pcie_am654_write_dbi2,
+};
+
+static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ int flags;
+
+ ep->page_size = AM654_WIN_SIZE;
+ flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+ dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
+}
+
+static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ u8 int_pin;
+
+ int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
+ if (int_pin == 0 || int_pin > 4)
+ return;
+
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
+ INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
+ mdelay(1);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
+ INT_ENABLE);
+}
+
+static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ ks_pcie_am654_raise_legacy_irq(ks_pcie);
+ break;
+ case PCI_EPC_IRQ_MSI:
+ dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features ks_pcie_am654_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[2] = SZ_1M,
+ .bar_fixed_size[3] = SZ_64K,
+ .bar_fixed_size[4] = 256,
+ .bar_fixed_size[5] = SZ_1M,
+ .align = SZ_1M,
};
+static const struct pci_epc_features*
+ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
+{
+ return &ks_pcie_am654_epc_features;
+}
+
+static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
+ .ep_init = ks_pcie_am654_ep_init,
+ .raise_irq = ks_pcie_am654_raise_irq,
+ .get_features = &ks_pcie_am654_get_features,
+};
+
+static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = ks_pcie->pci;
+
+ ep = &pci->ep;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
{
int num_lanes = ks_pcie->num_lanes;
@@ -873,6 +1036,10 @@ static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
int num_lanes = ks_pcie->num_lanes;
for (i = 0; i < num_lanes; i++) {
+ ret = phy_reset(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
ret = phy_init(ks_pcie->phy[i]);
if (ret < 0)
goto err_phy;
@@ -895,20 +1062,161 @@ err_phy:
return ret;
}
+static int ks_pcie_set_mode(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
+ val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
+
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ks_pcie_am654_set_mode(struct device *dev,
+ enum dw_pcie_device_mode mode)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ mask = AM654_PCIE_DEV_TYPE_MASK;
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ val = RC;
+ break;
+ case DW_PCIE_EP_TYPE:
+ val = EP;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed)
+{
+ u32 val;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP);
+ if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= link_speed;
+ dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP,
+ val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2);
+ if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= link_speed;
+ dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2,
+ val);
+ }
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
+ .host_ops = &ks_pcie_host_ops,
+ .version = 0x365A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
+ .host_ops = &ks_pcie_am654_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
+ .version = 0x490A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
+ .ep_ops = &ks_pcie_am654_ep_ops,
+ .mode = DW_PCIE_EP_TYPE,
+ .version = 0x490A,
+};
+
+static const struct of_device_id ks_pcie_of_match[] = {
+ {
+ .type = "pci",
+ .data = &ks_pcie_rc_of_data,
+ .compatible = "ti,keystone-pcie",
+ },
+ {
+ .data = &ks_pcie_am654_rc_of_data,
+ .compatible = "ti,am654-pcie-rc",
+ },
+ {
+ .data = &ks_pcie_am654_ep_of_data,
+ .compatible = "ti,am654-pcie-ep",
+ },
+ { },
+};
+
static int __init ks_pcie_probe(struct platform_device *pdev)
{
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ const struct ks_pcie_of_data *data;
+ const struct of_device_id *match;
+ enum dw_pcie_device_mode mode;
struct dw_pcie *pci;
struct keystone_pcie *ks_pcie;
struct device_link **link;
+ struct gpio_desc *gpiod;
+ void __iomem *atu_base;
+ struct resource *res;
+ unsigned int version;
+ void __iomem *base;
u32 num_viewport;
struct phy **phy;
+ int link_speed;
u32 num_lanes;
char name[10];
int ret;
+ int irq;
int i;
+ match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
+ data = (struct ks_pcie_of_data *)match->data;
+ if (!data)
+ return -EINVAL;
+
+ version = data->version;
+ host_ops = data->host_ops;
+ ep_ops = data->ep_ops;
+ mode = data->mode;
+
ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
return -ENOMEM;
@@ -917,12 +1225,38 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
+ base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
+ ks_pcie->is_am6 = true;
+
+ pci->dbi_base = base;
+ pci->dbi_base2 = base;
pci->dev = dev;
pci->ops = &ks_pcie_dw_pcie_ops;
+ pci->version = version;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "missing IRQ resource: %d\n", irq);
+ return irq;
+ }
- ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
if (ret < 0) {
- dev_err(dev, "unable to read *num-viewport* property\n");
+ dev_err(dev, "failed to request error IRQ %d\n",
+ irq);
return ret;
}
@@ -960,9 +1294,17 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ks_pcie->pci = pci;
ks_pcie->link = link;
ks_pcie->num_lanes = num_lanes;
- ks_pcie->num_viewport = num_viewport;
ks_pcie->phy = phy;
+ gpiod = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset GPIO\n");
+ goto err_link;
+ }
+
ret = ks_pcie_enable_phy(ks_pcie);
if (ret) {
dev_err(dev, "failed to enable phy\n");
@@ -977,9 +1319,79 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
- if (ret < 0)
- goto err_get_sync;
+ if (pci->version >= 0x480A) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ atu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(atu_base)) {
+ ret = PTR_ERR(atu_base);
+ goto err_get_sync;
+ }
+
+ pci->atu_base = atu_base;
+
+ ret = ks_pcie_am654_set_mode(dev, mode);
+ if (ret < 0)
+ goto err_get_sync;
+ } else {
+ ret = ks_pcie_set_mode(dev);
+ if (ret < 0)
+ goto err_get_sync;
+ }
+
+ link_speed = of_pci_get_max_link_speed(np);
+ if (link_speed < 0)
+ link_speed = 2;
+
+ ks_pcie_set_link_speed(pci, link_speed);
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ return ret;
+ }
+
+ /*
+ * "Power Sequencing and Reset Signal Timings" table in
+ * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
+ * indicates PERST# should be deasserted after minimum of 100us
+ * once REFCLK is stable. The REFCLK to the connector in RC
+ * mode is selected while enabling the PHY. So deassert PERST#
+ * after 100 us.
+ */
+ if (gpiod) {
+ usleep_range(100, 200);
+ gpiod_set_value_cansleep(gpiod, 1);
+ }
+
+ ks_pcie->num_viewport = num_viewport;
+ pci->pp.ops = host_ops;
+ ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ pci->ep.ops = ep_ops;
+ ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+
+ ks_pcie_enable_error_irq(ks_pcie);
return 0;
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index a42c9c3ae1cc..be61d96cc95e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -79,7 +79,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = ls_pcie_ep_init,
.raise_irq = ls_pcie_ep_raise_irq,
.get_features = ls_pcie_ep_get_features,
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ce45bde29bf8..3a5fa26d5e56 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -201,6 +201,7 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp)
return -EINVAL;
}
+ of_node_put(msi_node);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
new file mode 100644
index 000000000000..3ab58f0584a8
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips
+ * such as Graviton and Alpine)
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author: Jonathan Chocron <jonnyc@amazon.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci-acpi.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+struct al_pcie_acpi {
+ void __iomem *dbi_base;
+};
+
+static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct al_pcie_acpi *pcie = cfg->priv;
+ void __iomem *dbi_base = pcie->dbi_base;
+
+ if (bus->number == cfg->busr.start) {
+ /*
+ * The DW PCIe core doesn't filter out transactions to other
+ * devices/functions on the root bus num, so we do this here.
+ */
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+ else
+ return dbi_base + where;
+ }
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int al_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct al_pcie_acpi *al_pcie;
+ struct resource *res;
+ int ret;
+
+ al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+ if (!al_pcie)
+ return -ENOMEM;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc dbi base address for SEG %d\n",
+ root->segment);
+ return ret;
+ }
+
+ dev_dbg(dev, "Root port dbi res: %pR\n", res);
+
+ al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(al_pcie->dbi_base)) {
+ long err = PTR_ERR(al_pcie->dbi_base);
+
+ dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n",
+ res, err);
+ return err;
+ }
+
+ cfg->priv = al_pcie;
+
+ return 0;
+}
+
+struct pci_ecam_ops al_pcie_ops = {
+ .bus_shift = 20,
+ .init = al_pcie_init,
+ .pci_ops = {
+ .map_bus = al_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index dba83abfe764..d00252bd8fae 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -444,7 +444,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 24f5a775ad34..2bf5a35c0570 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -46,16 +46,19 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
u8 cap_id, next_cap_ptr;
u16 reg;
+ if (!cap_ptr)
+ return 0;
+
reg = dw_pcie_readw_dbi(pci, cap_ptr);
- next_cap_ptr = (reg & 0xff00) >> 8;
cap_id = (reg & 0x00ff);
- if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
+ if (cap_id > PCI_CAP_ID_MAX)
return 0;
if (cap_id == cap)
return cap_ptr;
+ next_cap_ptr = (reg & 0xff00) >> 8;
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
}
@@ -67,9 +70,6 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
next_cap_ptr = (reg & 0x00ff);
- if (!next_cap_ptr)
- return 0;
-
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
}
@@ -397,6 +397,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
+ unsigned int aligned_offset;
u16 msg_ctrl, msg_data;
u32 msg_addr_lower, msg_addr_upper, reg;
u64 msg_addr;
@@ -422,13 +423,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
reg = ep->msi_cap + PCI_MSI_DATA_32;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
- msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
+ aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
+ msg_addr = ((u64)msg_addr_upper) << 32 |
+ (msg_addr_lower & ~aligned_offset);
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
epc->mem->page_size);
if (ret)
return ret;
- writel(msg_data | (interrupt_num - 1), ep->msi_mem);
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
@@ -504,10 +507,32 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
pci_epc_mem_exit(epc);
}
+static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+{
+ u32 header;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ while (pos) {
+ header = dw_pcie_readl_dbi(pci, pos);
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (!pos)
+ break;
+ }
+
+ return 0;
+}
+
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
+ int i;
int ret;
+ u32 reg;
void *addr;
+ unsigned int nbars;
+ unsigned int offset;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
@@ -517,10 +542,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
return -EINVAL;
}
- if (pci->iatu_unroll_enabled && !pci->atu_base) {
- dev_err(dev, "atu_base is not populated\n");
- return -EINVAL;
- }
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
if (ret < 0) {
@@ -595,6 +616,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
dw_pcie_setup(pci);
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 25087d3c9a82..77db32529319 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -126,18 +126,12 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
- if (pp->ops->get_msi_addr)
- msi_target = pp->ops->get_msi_addr(pp);
- else
- msi_target = (u64)pp->msi_data;
+ msi_target = (u64)pp->msi_data;
msg->address_lo = lower_32_bits(msi_target);
msg->address_hi = upper_32_bits(msi_target);
- if (pp->ops->get_msi_data)
- msg->data = pp->ops->get_msi_data(pp, d->hwirq);
- else
- msg->data = d->hwirq;
+ msg->data = d->hwirq;
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
(int)d->hwirq, msg->address_hi, msg->address_lo);
@@ -157,17 +151,13 @@ static void dw_pci_bottom_mask(struct irq_data *d)
raw_spin_lock_irqsave(&pp->lock, flags);
- if (pp->ops->msi_clear_irq) {
- pp->ops->msi_clear_irq(pp, d->hwirq);
- } else {
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
- }
+ pp->irq_mask[ctrl] |= BIT(bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -180,17 +170,13 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
raw_spin_lock_irqsave(&pp->lock, flags);
- if (pp->ops->msi_set_irq) {
- pp->ops->msi_set_irq(pp, d->hwirq);
- } else {
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
- }
+ pp->irq_mask[ctrl] &= ~BIT(bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -199,20 +185,12 @@ static void dw_pci_bottom_ack(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned int res, bit, ctrl;
- unsigned long flags;
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- raw_spin_lock_irqsave(&pp->lock, flags);
-
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
-
- if (pp->ops->msi_irq_ack)
- pp->ops->msi_irq_ack(d->hwirq, pp);
-
- raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -245,7 +223,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, bit + i,
- &dw_pci_msi_bottom_irq_chip,
+ pp->msi_irq_chip,
pp, handle_edge_irq,
NULL, NULL);
@@ -298,25 +276,31 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
void dw_pcie_free_msi(struct pcie_port *pp)
{
- irq_set_chained_handler(pp->msi_irq, NULL);
- irq_set_handler_data(pp->msi_irq, NULL);
+ if (pp->msi_irq) {
+ irq_set_chained_handler(pp->msi_irq, NULL);
+ irq_set_handler_data(pp->msi_irq, NULL);
+ }
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
+
+ if (pp->msi_page)
+ __free_page(pp->msi_page);
}
void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct page *page;
u64 msi_target;
- page = alloc_page(GFP_KERNEL);
- pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ pp->msi_page = alloc_page(GFP_KERNEL);
+ pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(dev, pp->msi_data)) {
dev_err(dev, "Failed to map MSI data\n");
- __free_page(page);
+ __free_page(pp->msi_page);
+ pp->msi_page = NULL;
return;
}
msi_target = (u64)pp->msi_data;
@@ -335,7 +319,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win, *tmp;
- struct pci_bus *bus, *child;
+ struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
int ret;
@@ -352,7 +336,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
dev_err(dev, "Missing *config* reg space\n");
}
- bridge = pci_alloc_host_bridge(0);
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -363,7 +347,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
ret = devm_request_pci_bus_resources(dev, &bridge->windows);
if (ret)
- goto error;
+ return ret;
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
@@ -407,8 +391,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
resource_size(pp->cfg));
if (!pci->dbi_base) {
dev_err(dev, "Error with ioremap\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
}
@@ -419,8 +402,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg0_base, pp->cfg0_size);
if (!pp->va_cfg0_base) {
dev_err(dev, "Error with ioremap in function\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
}
@@ -430,8 +412,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_size);
if (!pp->va_cfg1_base) {
dev_err(dev, "Error with ioremap\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
}
@@ -439,7 +420,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
pci->num_viewport = 2;
- if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
+ if (pci_msi_enabled()) {
/*
* If a specific SoC driver needs to change the
* default number of vectors, it needs to implement
@@ -454,14 +435,16 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->num_vectors == 0) {
dev_err(dev,
"Invalid number of vectors\n");
- goto error;
+ return -EINVAL;
}
}
if (!pp->ops->msi_host_init) {
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
ret = dw_pcie_allocate_domains(pp);
if (ret)
- goto error;
+ return ret;
if (pp->msi_irq)
irq_set_chained_handler_and_data(pp->msi_irq,
@@ -470,14 +453,14 @@ int dw_pcie_host_init(struct pcie_port *pp)
} else {
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
- goto error;
+ return ret;
}
}
if (pp->ops->host_init) {
ret = pp->ops->host_init(pp);
if (ret)
- goto error;
+ goto err_free_msi;
}
pp->root_bus_nr = pp->busn->start;
@@ -491,24 +474,25 @@ int dw_pcie_host_init(struct pcie_port *pp)
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
- goto error;
+ goto err_free_msi;
- bus = bridge->bus;
+ pp->root_bus = bridge->bus;
if (pp->ops->scan_bus)
pp->ops->scan_bus(pp);
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ pci_bus_size_bridges(pp->root_bus);
+ pci_bus_assign_resources(pp->root_bus);
- list_for_each_entry(child, &bus->children, node)
+ list_for_each_entry(child, &pp->root_bus->children, node)
pcie_bus_configure_settings(child);
- pci_bus_add_devices(bus);
+ pci_bus_add_devices(pp->root_bus);
return 0;
-error:
- pci_free_host_bridge(bridge);
+err_free_msi:
+ if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ dw_pcie_free_msi(pp);
return ret;
}
@@ -628,17 +612,6 @@ static struct pci_ops dw_pcie_ops = {
.write = dw_pcie_wr_conf,
};
-static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
- if (val == 0xffffffff)
- return 1;
-
- return 0;
-}
-
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val, ctrl, num_ctrls;
@@ -646,17 +619,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_setup(pci);
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
-
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- pp->irq_mask[ctrl] = ~0;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, pp->irq_mask[ctrl]);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
+ if (!pp->ops->msi_host_init) {
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ pp->irq_mask[ctrl] = ~0;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, pp->irq_mask[ctrl]);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, ~0);
+ }
}
/* Setup RC BARs */
@@ -690,14 +665,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
- /* Get iATU unroll support */
- pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
- dev_dbg(pci->dev, "iATU unroll: %s\n",
- pci->iatu_unroll_enabled ? "enabled" : "disabled");
-
- if (pci->iatu_unroll_enabled && !pci->atu_base)
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
-
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 932dbd0b34b6..b58fdcbc664b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -106,7 +106,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
return &dw_plat_pcie_epc_features;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = dw_plat_pcie_ep_init,
.raise_irq = dw_plat_pcie_ep_raise_irq,
.get_features = dw_plat_pcie_get_features,
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 31f6331ca46f..9d7c51c32b3b 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -14,12 +14,6 @@
#include "pcie-designware.h"
-/* PCIe Port Logic registers */
-#define PLR_OFFSET 0x700
-#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
-#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
-#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
-
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -89,6 +83,37 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
dev_err(pci->dev, "Write DBI address failed\n");
}
+u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
+{
+ int ret;
+ u32 val;
+
+ if (pci->ops->read_dbi2)
+ return pci->ops->read_dbi2(pci, base, reg, size);
+
+ ret = dw_pcie_read(base + reg, size, &val);
+ if (ret)
+ dev_err(pci->dev, "read DBI address failed\n");
+
+ return val;
+}
+
+void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ int ret;
+
+ if (pci->ops->write_dbi2) {
+ pci->ops->write_dbi2(pci, base, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "write DBI address failed\n");
+}
+
static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
@@ -334,9 +359,20 @@ int dw_pcie_link_up(struct dw_pcie *pci)
if (pci->ops->link_up)
return pci->ops->link_up(pci);
- val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
- return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
- (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
+ val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
+ return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
+ (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
+}
+
+static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+ if (val == 0xffffffff)
+ return 1;
+
+ return 0;
}
void dw_pcie_setup(struct dw_pcie *pci)
@@ -347,6 +383,16 @@ void dw_pcie_setup(struct dw_pcie *pci)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ if (pci->version >= 0x480A || (!pci->version &&
+ dw_pcie_iatu_unroll_enabled(pci))) {
+ pci->iatu_unroll_enabled = true;
+ if (!pci->atu_base)
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+ dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
+ "enabled" : "disabled");
+
+
ret = of_property_read_u32(np, "num-lanes", &lanes);
if (ret)
lanes = 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 377f4c0b52da..b8993f2b78df 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -41,6 +41,9 @@
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+#define PCIE_PORT_DEBUG1 0x72C
+#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
+#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE BIT(17)
@@ -145,14 +148,9 @@ struct dw_pcie_host_ops {
int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 val);
int (*host_init)(struct pcie_port *pp);
- void (*msi_set_irq)(struct pcie_port *pp, int irq);
- void (*msi_clear_irq)(struct pcie_port *pp, int irq);
- phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
- u32 (*get_msi_data)(struct pcie_port *pp, int pos);
void (*scan_bus)(struct pcie_port *pp);
void (*set_num_vectors)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp);
- void (*msi_irq_ack)(int irq, struct pcie_port *pp);
};
struct pcie_port {
@@ -179,8 +177,11 @@ struct pcie_port {
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
dma_addr_t msi_data;
+ struct page *msi_page;
+ struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
+ struct pci_bus *root_bus;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@@ -200,7 +201,7 @@ struct dw_pcie_ep_ops {
struct dw_pcie_ep {
struct pci_epc *epc;
- struct dw_pcie_ep_ops *ops;
+ const struct dw_pcie_ep_ops *ops;
phys_addr_t phys_base;
size_t addr_size;
size_t page_size;
@@ -222,6 +223,10 @@ struct dw_pcie_ops {
size_t size);
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
+ u32 (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size);
+ void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int (*link_up)(struct dw_pcie *pcie);
int (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
@@ -238,6 +243,7 @@ struct dw_pcie {
struct pcie_port pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
+ unsigned int version;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -252,6 +258,10 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
size_t size);
void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
size_t size, u32 val);
+u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size);
+void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
@@ -295,12 +305,12 @@ static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val);
+ __dw_pcie_write_dbi2(pci, pci->dbi_base2, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
+ return __dw_pcie_read_dbi2(pci, pci->dbi_base2, reg, 0x4);
}
static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index a7f703556790..0ed235d560e3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1129,25 +1129,8 @@ err_deinit:
return ret;
}
-static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- /* the device class is not reported correctly from the register */
- if (where == PCI_CLASS_REVISION && size == 4) {
- *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
- *val &= 0xff; /* keep revision id */
- *val |= PCI_CLASS_BRIDGE_PCI << 16;
- return PCIBIOS_SUCCESSFUL;
- }
-
- return dw_pcie_read(pci->dbi_base + where, size, val);
-}
-
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.host_init = qcom_pcie_host_init,
- .rd_own_conf = qcom_pcie_rd_own_conf,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1309,6 +1292,12 @@ static const struct of_device_id qcom_pcie_match[] = {
{ }
};
+static void qcom_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
+
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
.driver = {
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index d5dc40289cce..3f30ee4a00b3 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -270,6 +270,7 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
struct device_node *np = pci->dev->of_node;
struct device_node *np_intc;
+ int ret = 0;
np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
if (!np_intc) {
@@ -280,20 +281,24 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
pp->irq = irq_of_parse_and_map(np_intc, 0);
if (!pp->irq) {
dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_node;
}
priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
if (!priv->legacy_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put_node;
}
irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
pp);
- return 0;
+out_put_node:
+ of_node_put(np_intc);
+ return ret;
}
static int uniphier_pcie_host_init(struct pcie_port *pp)
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index eb58dfdaba1b..134e0306ff00 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -794,6 +794,7 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
struct irq_chip *irq_chip;
+ int ret = 0;
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
@@ -806,8 +807,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
dev_name(dev));
if (!irq_chip->name) {
- of_node_put(pcie_intc_node);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_put_node;
}
irq_chip->irq_mask = advk_pcie_irq_mask;
@@ -819,11 +820,13 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
&advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_put_node;
}
- return 0;
+out_put_node:
+ of_node_put(pcie_intc_node);
+ return ret;
}
static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index dea3ec7592a2..75a2fb930d4b 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Simple, generic PCI host controller driver targetting firmware-initialised
+ * Simple, generic PCI host controller driver targeting firmware-initialised
* systems and virtual machines (e.g. the PCI emulation provided by kvmtool).
*
* Copyright (C) 2014 ARM Limited
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 95441a35eceb..82acd6155adf 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1486,6 +1486,21 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
}
}
+/*
+ * Remove entries in sysfs pci slot directory.
+ */
+static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
+{
+ struct hv_pci_dev *hpdev;
+
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if (!hpdev->pci_slot)
+ continue;
+ pci_destroy_slot(hpdev->pci_slot);
+ hpdev->pci_slot = NULL;
+ }
+}
+
/**
* create_root_hv_pci_bus() - Expose a new root PCI bus
* @hbus: Root PCI bus, as understood by this driver
@@ -1761,6 +1776,10 @@ static void pci_devices_present_work(struct work_struct *work)
hpdev = list_first_entry(&removed, struct hv_pci_dev,
list_entry);
list_del(&hpdev->list_entry);
+
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+
put_pcichild(hpdev);
}
@@ -1900,6 +1919,9 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
+ /* For the get_pcichild() in hv_pci_eject_device() */
+ put_pcichild(hpdev);
+ /* For the two refs got in new_pcichild_device() */
put_pcichild(hpdev);
put_pcichild(hpdev);
put_hvpcibus(hpdev->hbus);
@@ -2677,6 +2699,7 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_lock_rescan_remove();
pci_stop_root_bus(hbus->pci_bus);
pci_remove_root_bus(hbus->pci_bus);
+ hv_pci_remove_slots(hbus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_removed;
}
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index f4f53d092e00..464ba2538d52 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -231,9 +231,9 @@ struct tegra_msi {
struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
- unsigned long pages;
struct mutex lock;
- u64 phys;
+ void *virt;
+ dma_addr_t phys;
int irq;
};
@@ -1536,7 +1536,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
err = platform_get_irq_byname(pdev, "msi");
if (err < 0) {
dev_err(dev, "failed to get IRQ: %d\n", err);
- goto err;
+ goto free_irq_domain;
}
msi->irq = err;
@@ -1545,17 +1545,35 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(dev, "failed to request IRQ: %d\n", err);
- goto err;
+ goto free_irq_domain;
+ }
+
+ /* Though the PCIe controller can address >32-bit address space, to
+ * facilitate endpoints that support only 32-bit MSI target address,
+ * the mask is set to 32-bit to make sure that MSI target address is
+ * always a 32-bit address
+ */
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err < 0) {
+ dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
+ goto free_irq;
+ }
+
+ msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!msi->virt) {
+ dev_err(dev, "failed to allocate DMA memory for MSI\n");
+ err = -ENOMEM;
+ goto free_irq;
}
- /* setup AFI/FPCI range */
- msi->pages = __get_free_pages(GFP_KERNEL, 0);
- msi->phys = virt_to_phys((void *)msi->pages);
host->msi = &msi->chip;
return 0;
-err:
+free_irq:
+ free_irq(msi->irq, pcie);
+free_irq_domain:
irq_domain_remove(msi->domain);
return err;
}
@@ -1592,7 +1610,8 @@ static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
struct tegra_msi *msi = &pcie->msi;
unsigned int i, irq;
- free_pages(msi->pages, 0);
+ dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
+ DMA_ATTR_NO_KERNEL_MAPPING);
if (msi->irq > 0)
free_irq(msi->irq, pcie);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index cb3401a931f8..0a3f61be5625 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -367,7 +367,7 @@ static void iproc_msi_handler(struct irq_desc *desc)
/*
* Now go read the tail pointer again to see if there are new
- * oustanding events that came in during the above window.
+ * outstanding events that came in during the above window.
*/
} while (true);
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index c20fd6bd68fd..e3ca46497470 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -60,6 +60,10 @@
#define APB_ERR_EN_SHIFT 0
#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
+#define CFG_RD_SUCCESS 0
+#define CFG_RD_UR 1
+#define CFG_RD_CRS 2
+#define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
@@ -289,6 +293,9 @@ enum iproc_pcie_reg {
IPROC_PCIE_IARR4,
IPROC_PCIE_IMAP4,
+ /* config read status */
+ IPROC_PCIE_CFG_RD_STATUS,
+
/* link status */
IPROC_PCIE_LINK_STATUS,
@@ -350,6 +357,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = {
[IPROC_PCIE_IMAP3] = 0xe08,
[IPROC_PCIE_IARR4] = 0xe68,
[IPROC_PCIE_IMAP4] = 0xe70,
+ [IPROC_PCIE_CFG_RD_STATUS] = 0xee0,
[IPROC_PCIE_LINK_STATUS] = 0xf0c,
[IPROC_PCIE_APB_ERR_EN] = 0xf40,
};
@@ -474,10 +482,12 @@ static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
return (pcie->base + offset);
}
-static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
+static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
+ void __iomem *cfg_data_p)
{
int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
unsigned int data;
+ u32 status;
/*
* As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
@@ -498,6 +508,15 @@ static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
*/
data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) {
+ /*
+ * CRS state is set in CFG_RD status register
+ * This will handle the case where CFG_RETRY_STATUS is
+ * valid config data.
+ */
+ status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
+ if (status != CFG_RD_CRS)
+ return data;
+
udelay(1);
data = readl(cfg_data_p);
}
@@ -576,7 +595,7 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
if (!cfg_data_p)
return PCIBIOS_DEVICE_NOT_FOUND;
- data = iproc_pcie_cfg_retry(cfg_data_p);
+ data = iproc_pcie_cfg_retry(pcie, cfg_data_p);
*val = data;
if (size <= 2)
@@ -936,8 +955,25 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
resource_size_t window_size =
ob_map->window_sizes[size_idx] * SZ_1M;
- if (size < window_size)
- continue;
+ /*
+ * Keep iterating until we reach the last window and
+ * with the minimal window size at index zero. In this
+ * case, we take a compromise by mapping it using the
+ * minimum window size that can be supported
+ */
+ if (size < window_size) {
+ if (size_idx > 0 || window_idx > 0)
+ continue;
+
+ /*
+ * For the corner case of reaching the minimal
+ * window size that can be supported on the
+ * last window
+ */
+ axi_addr = ALIGN_DOWN(axi_addr, window_size);
+ pci_addr = ALIGN_DOWN(pci_addr, window_size);
+ size = window_size;
+ }
if (!IS_ALIGNED(axi_addr, window_size) ||
!IS_ALIGNED(pci_addr, window_size)) {
@@ -1146,11 +1182,43 @@ err_ib:
return ret;
}
+static int iproc_pcie_add_dma_range(struct device *dev,
+ struct list_head *resources,
+ struct of_pci_range *range)
+{
+ struct resource *res;
+ struct resource_entry *entry, *tmp;
+ struct list_head *head = resources;
+
+ res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ resource_list_for_each_entry(tmp, resources) {
+ if (tmp->res->start < range->cpu_addr)
+ head = &tmp->node;
+ }
+
+ res->start = range->cpu_addr;
+ res->end = res->start + range->size - 1;
+
+ entry = resource_list_create_entry(res, 0);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->offset = res->start - range->cpu_addr;
+ resource_list_add(entry, head);
+
+ return 0;
+}
+
static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct of_pci_range range;
struct of_pci_range_parser parser;
int ret;
+ LIST_HEAD(resources);
/* Get the dma-ranges from DT */
ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
@@ -1158,13 +1226,23 @@ static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
return ret;
for_each_of_pci_range(&parser, &range) {
+ ret = iproc_pcie_add_dma_range(pcie->dev,
+ &resources,
+ &range);
+ if (ret)
+ goto out;
/* Each range entry corresponds to an inbound mapping region */
ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
if (ret)
- return ret;
+ goto out;
}
+ list_splice_init(&resources, &host->dma_ranges);
+
return 0;
+out:
+ pci_free_resource_list(&resources);
+ return ret;
}
static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
@@ -1320,14 +1398,18 @@ static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
if (pcie->need_msi_steer) {
ret = iproc_pcie_msi_steer(pcie, msi_node);
if (ret)
- return ret;
+ goto out_put_node;
}
/*
* If another MSI controller is being used, the call below should fail
* but that is okay
*/
- return iproc_msi_init(pcie, msi_node);
+ ret = iproc_msi_init(pcie, msi_node);
+
+out_put_node:
+ of_node_put(msi_node);
+ return ret;
}
static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
@@ -1347,7 +1429,6 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
break;
case IPROC_PCIE_PAXB:
regs = iproc_pcie_reg_paxb;
- pcie->iproc_cfg_read = true;
pcie->has_apb_err_disable = true;
if (pcie->need_ob_cfg) {
pcie->ob_map = paxb_ob_map;
@@ -1356,6 +1437,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
break;
case IPROC_PCIE_PAXB_V2:
regs = iproc_pcie_reg_paxb_v2;
+ pcie->iproc_cfg_read = true;
pcie->has_apb_err_disable = true;
if (pcie->need_ob_cfg) {
pcie->ob_map = paxb_v2_ob_map;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 0b6c72804e03..80601e1b939e 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -578,6 +578,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops, port);
+ of_node_put(pcie_intc_node);
if (!port->irq_domain) {
dev_err(dev, "failed to get INTx IRQ domain\n");
return -ENODEV;
@@ -915,49 +916,29 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
/* sys_ck might be divided into the following parts in some chips */
snprintf(name, sizeof(name), "ahb_ck%d", slot);
- port->ahb_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->ahb_ck)) {
- if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->ahb_ck = NULL;
- }
+ port->ahb_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->ahb_ck))
+ return PTR_ERR(port->ahb_ck);
snprintf(name, sizeof(name), "axi_ck%d", slot);
- port->axi_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->axi_ck)) {
- if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->axi_ck = NULL;
- }
+ port->axi_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->axi_ck))
+ return PTR_ERR(port->axi_ck);
snprintf(name, sizeof(name), "aux_ck%d", slot);
- port->aux_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->aux_ck)) {
- if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->aux_ck = NULL;
- }
+ port->aux_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->aux_ck))
+ return PTR_ERR(port->aux_ck);
snprintf(name, sizeof(name), "obff_ck%d", slot);
- port->obff_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->obff_ck)) {
- if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->obff_ck = NULL;
- }
+ port->obff_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->obff_ck))
+ return PTR_ERR(port->obff_ck);
snprintf(name, sizeof(name), "pipe_ck%d", slot);
- port->pipe_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->pipe_ck)) {
- if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->pipe_ck = NULL;
- }
+ port->pipe_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->pipe_ck))
+ return PTR_ERR(port->pipe_ck);
snprintf(name, sizeof(name), "pcie-rst%d", slot);
port->reset = devm_reset_control_get_optional_exclusive(dev, name);
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index c8febb009454..f6a669a9af41 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -46,14 +46,15 @@
/* Transfer control */
#define PCIETCTLR 0x02000
-#define CFINIT 1
+#define DL_DOWN BIT(3)
+#define CFINIT BIT(0)
#define PCIETSTR 0x02004
-#define DATA_LINK_ACTIVE 1
+#define DATA_LINK_ACTIVE BIT(0)
#define PCIEERRFR 0x02020
#define UNSUPPORTED_REQUEST BIT(4)
#define PCIEMSIFR 0x02044
#define PCIEMSIALR 0x02048
-#define MSIFE 1
+#define MSIFE BIT(0)
#define PCIEMSIAUR 0x0204c
#define PCIEMSIIER 0x02050
@@ -94,6 +95,7 @@
#define MACCTLR 0x011058
#define SPEED_CHANGE BIT(24)
#define SCRAMBLE_DISABLE BIT(27)
+#define PMSR 0x01105c
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
#define SPCNGRSN BIT(31)
@@ -152,14 +154,13 @@ struct rcar_pcie {
struct rcar_msi msi;
};
-static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
- unsigned long reg)
+static void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val,
+ unsigned int reg)
{
writel(val, pcie->base + reg);
}
-static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
- unsigned long reg)
+static u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg)
{
return readl(pcie->base + reg);
}
@@ -171,7 +172,7 @@ enum {
static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
{
- int shift = 8 * (where & 3);
+ unsigned int shift = BITS_PER_BYTE * (where & 3);
u32 val = rcar_pci_read_reg(pcie, where & ~3);
val &= ~(mask << shift);
@@ -181,7 +182,7 @@ static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
{
- int shift = 8 * (where & 3);
+ unsigned int shift = BITS_PER_BYTE * (where & 3);
u32 val = rcar_pci_read_reg(pcie, where & ~3);
return val >> shift;
@@ -192,7 +193,7 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
unsigned char access_type, struct pci_bus *bus,
unsigned int devfn, int where, u32 *data)
{
- int dev, func, reg, index;
+ unsigned int dev, func, reg, index;
dev = PCI_SLOT(devfn);
func = PCI_FUNC(devfn);
@@ -281,12 +282,12 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
}
if (size == 1)
- *val = (*val >> (8 * (where & 3))) & 0xff;
+ *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
else if (size == 2)
- *val = (*val >> (8 * (where & 2))) & 0xffff;
+ *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
- dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
- bus->number, devfn, where, size, (unsigned long)*val);
+ dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, *val);
return ret;
}
@@ -296,23 +297,24 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct rcar_pcie *pcie = bus->sysdata;
- int shift, ret;
+ unsigned int shift;
u32 data;
+ int ret;
ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
bus, devfn, where, &data);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
- dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
- bus->number, devfn, where, size, (unsigned long)val);
+ dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, val);
if (size == 1) {
- shift = 8 * (where & 3);
+ shift = BITS_PER_BYTE * (where & 3);
data &= ~(0xff << shift);
data |= ((val & 0xff) << shift);
} else if (size == 2) {
- shift = 8 * (where & 2);
+ shift = BITS_PER_BYTE * (where & 2);
data &= ~(0xffff << shift);
data |= ((val & 0xffff) << shift);
} else
@@ -507,10 +509,10 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie)
}
static void phy_write_reg(struct rcar_pcie *pcie,
- unsigned int rate, unsigned int addr,
- unsigned int lane, unsigned int data)
+ unsigned int rate, u32 addr,
+ unsigned int lane, u32 data)
{
- unsigned long phyaddr;
+ u32 phyaddr;
phyaddr = WRITE_CMD |
((rate & 1) << RATE_POS) |
@@ -738,15 +740,15 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
while (reg) {
unsigned int index = find_first_bit(&reg, 32);
- unsigned int irq;
+ unsigned int msi_irq;
/* clear the interrupt */
rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
- irq = irq_find_mapping(msi->domain, index);
- if (irq) {
+ msi_irq = irq_find_mapping(msi->domain, index);
+ if (msi_irq) {
if (test_bit(index, msi->used))
- generic_handle_irq(irq);
+ generic_handle_irq(msi_irq);
else
dev_info(dev, "unhandled MSI\n");
} else {
@@ -890,7 +892,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct rcar_msi *msi = &pcie->msi;
- unsigned long base;
+ phys_addr_t base;
int err, i;
mutex_init(&msi->lock);
@@ -929,10 +931,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
/* setup MSI data target */
msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ if (!msi->pages) {
+ err = -ENOMEM;
+ goto err;
+ }
base = virt_to_phys((void *)msi->pages);
- rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
- rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
+ rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
+ rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
/* enable all MSI interrupts */
rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
@@ -1118,7 +1124,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
- unsigned int data;
+ u32 data;
int err;
int (*phy_init_fn)(struct rcar_pcie *);
struct pci_host_bridge *bridge;
@@ -1130,6 +1136,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = dev;
+ platform_set_drvdata(pdev, pcie);
err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
if (err)
@@ -1221,10 +1228,28 @@ err_free_bridge:
return err;
}
+static int rcar_pcie_resume_noirq(struct device *dev)
+{
+ struct rcar_pcie *pcie = dev_get_drvdata(dev);
+
+ if (rcar_pci_read_reg(pcie, PMSR) &&
+ !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
+ return 0;
+
+ /* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+ return rcar_pcie_wait_for_dl(pcie);
+}
+
+static const struct dev_pm_ops rcar_pcie_pm_ops = {
+ .resume_noirq = rcar_pcie_resume_noirq,
+};
+
static struct platform_driver rcar_pcie_driver = {
.driver = {
.name = "rcar-pcie",
.of_match_table = rcar_pcie_of_match,
+ .pm = &rcar_pcie_pm_ops,
.suppress_bind_attrs = true,
},
.probe = rcar_pcie_probe,
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index a5d799e2dff2..d743b0a48988 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -350,7 +350,7 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
struct rockchip_pcie *rockchip = &ep->rockchip;
u32 r = ep->max_regions - 1;
u32 offset;
- u16 status;
+ u32 status;
u8 msg_code;
if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 1372d270764f..8d20f1793a61 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -724,6 +724,7 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
&intx_domain_ops, rockchip);
+ of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
return -EINVAL;
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 81538d77f790..3b031f00a94a 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -438,11 +438,10 @@ static const struct irq_domain_ops legacy_domain_ops = {
#ifdef CONFIG_PCI_MSI
static struct irq_chip nwl_msi_irq_chip = {
.name = "nwl_pcie:msi",
- .irq_enable = unmask_msi_irq,
- .irq_disable = mask_msi_irq,
- .irq_mask = mask_msi_irq,
- .irq_unmask = unmask_msi_irq,
-
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
};
static struct msi_domain_info nwl_msi_domain_info = {
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 9bd1a35cd5d8..5bf3af3b28e6 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -336,14 +336,19 @@ static const struct irq_domain_ops msi_domain_ops = {
* xilinx_pcie_enable_msi - Enable MSI support
* @port: PCIe port information
*/
-static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
{
phys_addr_t msg_addr;
port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+ if (!port->msi_pages)
+ return -ENOMEM;
+
msg_addr = virt_to_phys((void *)port->msi_pages);
pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+
+ return 0;
}
/* INTx Functions */
@@ -498,6 +503,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
+ int ret;
/* Setup INTx */
pcie_intc_node = of_get_next_child(node, NULL);
@@ -526,7 +532,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
return -ENODEV;
}
- xilinx_pcie_enable_msi(port);
+ ret = xilinx_pcie_enable_msi(port);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index cf6816b55b5e..999a5509e57e 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -95,10 +95,8 @@ struct vmd_dev {
struct irq_domain *irq_domain;
struct pci_bus *bus;
-#ifdef CONFIG_X86_DEV_DMA_OPS
struct dma_map_ops dma_ops;
struct dma_domain dma_domain;
-#endif
};
static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
@@ -293,7 +291,6 @@ static struct msi_domain_info vmd_msi_domain_info = {
.chip = &vmd_msi_controller,
};
-#ifdef CONFIG_X86_DEV_DMA_OPS
/*
* VMD replaces the requester ID with its own. DMA mappings for devices in a
* VMD domain need to be mapped for the VMD, not the device requiring
@@ -438,10 +435,6 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
add_dma_domain(domain);
}
#undef ASSIGN_VMD_DMA_OPS
-#else
-static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
-static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
-#endif
static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
unsigned int devfn, int reg, int len)
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index d0b91da49bf4..27806987e93b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -438,7 +438,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
epc_features = epf_test->epc_features;
base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
- test_reg_bar);
+ test_reg_bar, epc_features->align);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -453,7 +453,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
- base = pci_epf_alloc_space(epf, bar_size[bar], bar);
+ base = pci_epf_alloc_space(epf, bar_size[bar], bar,
+ epc_features->align);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
@@ -591,6 +592,11 @@ static int __init pci_epf_test_init(void)
kpcitest_workqueue = alloc_workqueue("kpcitest",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!kpcitest_workqueue) {
+ pr_err("Failed to allocate the kpcitest work queue\n");
+ return -ENOMEM;
+ }
+
ret = pci_epf_register_driver(&test_driver);
if (ret) {
pr_err("Failed to register pci epf test driver --> %d\n", ret);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 8bfdcd291196..fb1306de8f40 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -109,10 +109,12 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* pci_epf_alloc_space() - allocate memory for the PCI EPF register space
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
+ * @align: alignment size for the allocation region
*
* Invoke to allocate memory for the PCI EPF register space.
*/
-void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ size_t align)
{
void *space;
struct device *dev = epf->epc->dev.parent;
@@ -120,7 +122,11 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
if (size < 128)
size = 128;
- size = roundup_pow_of_two(size);
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
if (!space) {
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 506e1d923a1f..8c51a04b8083 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -25,36 +25,21 @@
#include "../pcie/portdrv.h"
-#define MY_NAME "pciehp"
-
extern bool pciehp_poll_mode;
extern int pciehp_poll_time;
-extern bool pciehp_debug;
-
-#define dbg(format, arg...) \
-do { \
- if (pciehp_debug) \
- printk(KERN_DEBUG "%s: " format, MY_NAME, ## arg); \
-} while (0)
-#define err(format, arg...) \
- printk(KERN_ERR "%s: " format, MY_NAME, ## arg)
-#define info(format, arg...) \
- printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
-#define warn(format, arg...) \
- printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
+/*
+ * Set CONFIG_DYNAMIC_DEBUG=y and boot with 'dyndbg="file pciehp* +p"' to
+ * enable debug messages.
+ */
#define ctrl_dbg(ctrl, format, arg...) \
- do { \
- if (pciehp_debug) \
- dev_printk(KERN_DEBUG, &ctrl->pcie->device, \
- format, ## arg); \
- } while (0)
+ pci_dbg(ctrl->pcie->port, format, ## arg)
#define ctrl_err(ctrl, format, arg...) \
- dev_err(&ctrl->pcie->device, format, ## arg)
+ pci_err(ctrl->pcie->port, format, ## arg)
#define ctrl_info(ctrl, format, arg...) \
- dev_info(&ctrl->pcie->device, format, ## arg)
+ pci_info(ctrl->pcie->port, format, ## arg)
#define ctrl_warn(ctrl, format, arg...) \
- dev_warn(&ctrl->pcie->device, format, ## arg)
+ pci_warn(ctrl->pcie->port, format, ## arg)
#define SLOT_NAME_SIZE 10
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index fc5366b50e95..6ad0d86762cb 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -17,6 +17,9 @@
* Dely Sy <dely.l.sy@intel.com>"
*/
+#define pr_fmt(fmt) "pciehp: " fmt
+#define dev_fmt pr_fmt
+
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -27,7 +30,6 @@
#include "../pci.h"
/* Global variables */
-bool pciehp_debug;
bool pciehp_poll_mode;
int pciehp_poll_time;
@@ -35,15 +37,11 @@ int pciehp_poll_time;
* not really modular, but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param here.
*/
-module_param(pciehp_debug, bool, 0644);
module_param(pciehp_poll_mode, bool, 0644);
module_param(pciehp_poll_time, int, 0644);
-MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-#define PCIE_MODULE_NAME "pciehp"
-
static int set_attention_status(struct hotplug_slot *slot, u8 value);
static int get_power_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
@@ -182,14 +180,14 @@ static int pciehp_probe(struct pcie_device *dev)
if (!dev->port->subordinate) {
/* Can happen if we run out of bus numbers during probe */
- dev_err(&dev->device,
+ pci_err(dev->port,
"Hotplug bridge without secondary bus, ignoring\n");
return -ENODEV;
}
ctrl = pcie_init(dev);
if (!ctrl) {
- dev_err(&dev->device, "Controller initialization failed\n");
+ pci_err(dev->port, "Controller initialization failed\n");
return -ENODEV;
}
set_service_data(dev, ctrl);
@@ -307,7 +305,7 @@ static int pciehp_runtime_resume(struct pcie_device *dev)
#endif /* PM */
static struct pcie_port_service_driver hpdriver_portdrv = {
- .name = PCIE_MODULE_NAME,
+ .name = "pciehp",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_HP,
@@ -328,9 +326,9 @@ int __init pcie_hp_init(void)
int retval = 0;
retval = pcie_port_service_register(&hpdriver_portdrv);
- dbg("pcie_port_service_register = %d\n", retval);
+ pr_debug("pcie_port_service_register = %d\n", retval);
if (retval)
- dbg("Failure to register service\n");
+ pr_debug("Failure to register service\n");
return retval;
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 3f3df4c29f6e..631ced0ab28a 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -13,6 +13,8 @@
*
*/
+#define dev_fmt(fmt) "pciehp: " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
@@ -115,6 +117,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
* removed from the slot/adapter.
*/
msleep(1000);
+
+ /* Ignore link or presence changes caused by power off */
+ atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
+ &ctrl->pending_events);
}
/* turn off Green LED */
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 6a2365cd794e..bd990e3371e3 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -12,6 +12,8 @@
* Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*/
+#define dev_fmt(fmt) "pciehp: " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/jiffies.h>
@@ -46,7 +48,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
/* Installs the interrupt handler */
retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
- IRQF_SHARED, MY_NAME, ctrl);
+ IRQF_SHARED, "pciehp", ctrl);
if (retval)
ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
irq);
@@ -232,8 +234,8 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
delay -= step;
} while (delay > 0);
- if (count > 1 && pciehp_debug)
- printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
+ if (count > 1)
+ pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), count, step, l);
@@ -822,14 +824,11 @@ static inline void dbg_ctrl(struct controller *ctrl)
struct pci_dev *pdev = ctrl->pcie->port;
u16 reg16;
- if (!pciehp_debug)
- return;
-
- ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
+ ctrl_dbg(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
- ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
+ ctrl_dbg(ctrl, "Slot Status : 0x%04x\n", reg16);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
- ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
+ ctrl_dbg(ctrl, "Slot Control : 0x%04x\n", reg16);
}
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index b9c1396db6fe..d17f3bf36f70 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -13,6 +13,8 @@
*
*/
+#define dev_fmt(fmt) "pciehp: " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index e2356a9c7088..182f9e3443ee 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name)
if (rc == 0)
break;
}
+ of_node_put(parent);
return dn;
}
@@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name,
return np;
}
+/* Returns a device_node with its reference count incremented */
static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
{
struct device_node *dn;
@@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name)
rc = dlpar_add_phb(drc_name, dn);
break;
}
+ of_node_put(dn);
printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
exit:
@@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name)
rc = dlpar_remove_pci_slot(drc_name, dn);
break;
}
+ of_node_put(dn);
vm_unmap_aliases();
printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 5282aa3e33c5..93b4a945c55d 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -21,6 +21,7 @@
/* free up the memory used by a slot */
void dealloc_slot_struct(struct slot *slot)
{
+ of_node_put(slot->dn);
kfree(slot->name);
kfree(slot);
}
@@ -36,7 +37,7 @@ struct slot *alloc_slot_struct(struct device_node *dn,
slot->name = kstrdup(drc_name, GFP_KERNEL);
if (!slot->name)
goto error_slot;
- slot->dn = dn;
+ slot->dn = of_node_get(dn);
slot->index = drc_index;
slot->power_domain = power_domain;
slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 73986825d221..e039b740fe74 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1338,7 +1338,7 @@ irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
struct msi_desc *desc)
{
return (irq_hw_number_t)desc->msi_attrib.entry_nr |
- PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
+ pci_dev_id(dev) << 11 |
(pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
}
@@ -1508,7 +1508,7 @@ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
{
struct device_node *of_node;
- u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
@@ -1531,7 +1531,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
struct irq_domain *dom;
- u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
dom = of_msi_map_get_device_domain(&pdev->dev, rid);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 3d32da15c215..73d5adec0a28 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -15,6 +15,7 @@
#include <linux/of_pci.h>
#include "pci.h"
+#ifdef CONFIG_PCI
void pci_set_of_node(struct pci_dev *dev)
{
if (!dev->bus->dev.of_node)
@@ -31,10 +32,16 @@ void pci_release_of_node(struct pci_dev *dev)
void pci_set_bus_of_node(struct pci_bus *bus)
{
- if (bus->self == NULL)
- bus->dev.of_node = pcibios_get_phb_of_node(bus);
- else
- bus->dev.of_node = of_node_get(bus->self->dev.of_node);
+ struct device_node *node;
+
+ if (bus->self == NULL) {
+ node = pcibios_get_phb_of_node(bus);
+ } else {
+ node = of_node_get(bus->self->dev.of_node);
+ if (node && of_property_read_bool(node, "external-facing"))
+ bus->self->untrusted = true;
+ }
+ bus->dev.of_node = node;
}
void pci_release_bus_of_node(struct pci_bus *bus)
@@ -197,27 +204,6 @@ int of_get_pci_domain_nr(struct device_node *node)
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
- * This function will try to find the limitation of link speed by finding
- * a property called "max-link-speed" of the given device node.
- *
- * @node: device tree node with the max link speed information
- *
- * Returns the associated max link speed from DT, or a negative value if the
- * required property is not found or is invalid.
- */
-int of_pci_get_max_link_speed(struct device_node *node)
-{
- u32 max_link_speed;
-
- if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
- max_link_speed > 4)
- return -EINVAL;
-
- return max_link_speed;
-}
-EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
-
-/**
* of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
* is present and valid
*/
@@ -537,3 +523,25 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
return err;
}
+#endif /* CONFIG_PCI */
+
+/**
+ * This function will try to find the limitation of link speed by finding
+ * a property called "max-link-speed" of the given device node.
+ *
+ * @node: device tree node with the max link speed information
+ *
+ * Returns the associated max link speed from DT, or a negative value if the
+ * required property is not found or is invalid.
+ */
+int of_pci_get_max_link_speed(struct device_node *node)
+{
+ u32 max_link_speed;
+
+ if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
+ max_link_speed > 4)
+ return -EINVAL;
+
+ return max_link_speed;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index c52298d76e64..742928d0053e 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -275,6 +275,30 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
}
/*
+ * If we can't find a common upstream bridge take a look at the root
+ * complex and compare it to a whitelist of known good hardware.
+ */
+static bool root_complex_whitelist(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
+ struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
+ unsigned short vendor, device;
+
+ if (!root)
+ return false;
+
+ vendor = root->vendor;
+ device = root->device;
+ pci_dev_put(root);
+
+ /* AMD ZEN host bridges can do peer to peer */
+ if (vendor == PCI_VENDOR_ID_AMD && device == 0x1450)
+ return true;
+
+ return false;
+}
+
+/*
* Find the distance through the nearest common upstream bridge between
* two PCI devices.
*
@@ -317,13 +341,13 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
* In this case, a list of all infringing bridge addresses will be
* populated in acs_list (assuming it's non-null) for printk purposes.
*/
-static int upstream_bridge_distance(struct pci_dev *a,
- struct pci_dev *b,
+static int upstream_bridge_distance(struct pci_dev *provider,
+ struct pci_dev *client,
struct seq_buf *acs_list)
{
+ struct pci_dev *a = provider, *b = client, *bb;
int dist_a = 0;
int dist_b = 0;
- struct pci_dev *bb = NULL;
int acs_cnt = 0;
/*
@@ -354,6 +378,14 @@ static int upstream_bridge_distance(struct pci_dev *a,
dist_a++;
}
+ /*
+ * Allow the connection if both devices are on a whitelisted root
+ * complex, but add an arbitary large value to the distance.
+ */
+ if (root_complex_whitelist(provider) &&
+ root_complex_whitelist(client))
+ return 0x1000 + dist_a + dist_b;
+
return -1;
check_b_path_acs:
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e1949f7efd9c..c5e1a097d7e3 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -119,7 +119,7 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
}
static acpi_status decode_type0_hpx_record(union acpi_object *record,
- struct hotplug_params *hpx)
+ struct hpp_type0 *hpx0)
{
int i;
union acpi_object *fields = record->package.elements;
@@ -132,16 +132,14 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record,
for (i = 2; i < 6; i++)
if (fields[i].type != ACPI_TYPE_INTEGER)
return AE_ERROR;
- hpx->t0 = &hpx->type0_data;
- hpx->t0->revision = revision;
- hpx->t0->cache_line_size = fields[2].integer.value;
- hpx->t0->latency_timer = fields[3].integer.value;
- hpx->t0->enable_serr = fields[4].integer.value;
- hpx->t0->enable_perr = fields[5].integer.value;
+ hpx0->revision = revision;
+ hpx0->cache_line_size = fields[2].integer.value;
+ hpx0->latency_timer = fields[3].integer.value;
+ hpx0->enable_serr = fields[4].integer.value;
+ hpx0->enable_perr = fields[5].integer.value;
break;
default:
- printk(KERN_WARNING
- "%s: Type 0 Revision %d record not supported\n",
+ pr_warn("%s: Type 0 Revision %d record not supported\n",
__func__, revision);
return AE_ERROR;
}
@@ -149,7 +147,7 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record,
}
static acpi_status decode_type1_hpx_record(union acpi_object *record,
- struct hotplug_params *hpx)
+ struct hpp_type1 *hpx1)
{
int i;
union acpi_object *fields = record->package.elements;
@@ -162,15 +160,13 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record,
for (i = 2; i < 5; i++)
if (fields[i].type != ACPI_TYPE_INTEGER)
return AE_ERROR;
- hpx->t1 = &hpx->type1_data;
- hpx->t1->revision = revision;
- hpx->t1->max_mem_read = fields[2].integer.value;
- hpx->t1->avg_max_split = fields[3].integer.value;
- hpx->t1->tot_max_split = fields[4].integer.value;
+ hpx1->revision = revision;
+ hpx1->max_mem_read = fields[2].integer.value;
+ hpx1->avg_max_split = fields[3].integer.value;
+ hpx1->tot_max_split = fields[4].integer.value;
break;
default:
- printk(KERN_WARNING
- "%s: Type 1 Revision %d record not supported\n",
+ pr_warn("%s: Type 1 Revision %d record not supported\n",
__func__, revision);
return AE_ERROR;
}
@@ -178,7 +174,7 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record,
}
static acpi_status decode_type2_hpx_record(union acpi_object *record,
- struct hotplug_params *hpx)
+ struct hpp_type2 *hpx2)
{
int i;
union acpi_object *fields = record->package.elements;
@@ -191,45 +187,102 @@ static acpi_status decode_type2_hpx_record(union acpi_object *record,
for (i = 2; i < 18; i++)
if (fields[i].type != ACPI_TYPE_INTEGER)
return AE_ERROR;
- hpx->t2 = &hpx->type2_data;
- hpx->t2->revision = revision;
- hpx->t2->unc_err_mask_and = fields[2].integer.value;
- hpx->t2->unc_err_mask_or = fields[3].integer.value;
- hpx->t2->unc_err_sever_and = fields[4].integer.value;
- hpx->t2->unc_err_sever_or = fields[5].integer.value;
- hpx->t2->cor_err_mask_and = fields[6].integer.value;
- hpx->t2->cor_err_mask_or = fields[7].integer.value;
- hpx->t2->adv_err_cap_and = fields[8].integer.value;
- hpx->t2->adv_err_cap_or = fields[9].integer.value;
- hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
- hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
- hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
- hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
- hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
- hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
- hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
- hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
+ hpx2->revision = revision;
+ hpx2->unc_err_mask_and = fields[2].integer.value;
+ hpx2->unc_err_mask_or = fields[3].integer.value;
+ hpx2->unc_err_sever_and = fields[4].integer.value;
+ hpx2->unc_err_sever_or = fields[5].integer.value;
+ hpx2->cor_err_mask_and = fields[6].integer.value;
+ hpx2->cor_err_mask_or = fields[7].integer.value;
+ hpx2->adv_err_cap_and = fields[8].integer.value;
+ hpx2->adv_err_cap_or = fields[9].integer.value;
+ hpx2->pci_exp_devctl_and = fields[10].integer.value;
+ hpx2->pci_exp_devctl_or = fields[11].integer.value;
+ hpx2->pci_exp_lnkctl_and = fields[12].integer.value;
+ hpx2->pci_exp_lnkctl_or = fields[13].integer.value;
+ hpx2->sec_unc_err_sever_and = fields[14].integer.value;
+ hpx2->sec_unc_err_sever_or = fields[15].integer.value;
+ hpx2->sec_unc_err_mask_and = fields[16].integer.value;
+ hpx2->sec_unc_err_mask_or = fields[17].integer.value;
break;
default:
- printk(KERN_WARNING
- "%s: Type 2 Revision %d record not supported\n",
+ pr_warn("%s: Type 2 Revision %d record not supported\n",
__func__, revision);
return AE_ERROR;
}
return AE_OK;
}
-static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
+static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
+ union acpi_object *reg_fields)
+{
+ hpx3_reg->device_type = reg_fields[0].integer.value;
+ hpx3_reg->function_type = reg_fields[1].integer.value;
+ hpx3_reg->config_space_location = reg_fields[2].integer.value;
+ hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value;
+ hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value;
+ hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value;
+ hpx3_reg->dvsec_id = reg_fields[6].integer.value;
+ hpx3_reg->dvsec_rev = reg_fields[7].integer.value;
+ hpx3_reg->match_offset = reg_fields[8].integer.value;
+ hpx3_reg->match_mask_and = reg_fields[9].integer.value;
+ hpx3_reg->match_value = reg_fields[10].integer.value;
+ hpx3_reg->reg_offset = reg_fields[11].integer.value;
+ hpx3_reg->reg_mask_and = reg_fields[12].integer.value;
+ hpx3_reg->reg_mask_or = reg_fields[13].integer.value;
+}
+
+static acpi_status program_type3_hpx_record(struct pci_dev *dev,
+ union acpi_object *record,
+ const struct hotplug_program_ops *hp_ops)
+{
+ union acpi_object *fields = record->package.elements;
+ u32 desc_count, expected_length, revision;
+ union acpi_object *reg_fields;
+ struct hpx_type3 hpx3;
+ int i;
+
+ revision = fields[1].integer.value;
+ switch (revision) {
+ case 1:
+ desc_count = fields[2].integer.value;
+ expected_length = 3 + desc_count * 14;
+
+ if (record->package.count != expected_length)
+ return AE_ERROR;
+
+ for (i = 2; i < expected_length; i++)
+ if (fields[i].type != ACPI_TYPE_INTEGER)
+ return AE_ERROR;
+
+ for (i = 0; i < desc_count; i++) {
+ reg_fields = fields + 3 + i * 14;
+ parse_hpx3_register(&hpx3, reg_fields);
+ hp_ops->program_type3(dev, &hpx3);
+ }
+
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Type 3 Revision %d record not supported\n",
+ __func__, revision);
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle,
+ const struct hotplug_program_ops *hp_ops)
{
acpi_status status;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *package, *record, *fields;
+ struct hpp_type0 hpx0;
+ struct hpp_type1 hpx1;
+ struct hpp_type2 hpx2;
u32 type;
int i;
- /* Clear the return buffer with zeros */
- memset(hpx, 0, sizeof(struct hotplug_params));
-
status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
if (ACPI_FAILURE(status))
return status;
@@ -257,22 +310,33 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
type = fields[0].integer.value;
switch (type) {
case 0:
- status = decode_type0_hpx_record(record, hpx);
+ memset(&hpx0, 0, sizeof(hpx0));
+ status = decode_type0_hpx_record(record, &hpx0);
if (ACPI_FAILURE(status))
goto exit;
+ hp_ops->program_type0(dev, &hpx0);
break;
case 1:
- status = decode_type1_hpx_record(record, hpx);
+ memset(&hpx1, 0, sizeof(hpx1));
+ status = decode_type1_hpx_record(record, &hpx1);
if (ACPI_FAILURE(status))
goto exit;
+ hp_ops->program_type1(dev, &hpx1);
break;
case 2:
- status = decode_type2_hpx_record(record, hpx);
+ memset(&hpx2, 0, sizeof(hpx2));
+ status = decode_type2_hpx_record(record, &hpx2);
+ if (ACPI_FAILURE(status))
+ goto exit;
+ hp_ops->program_type2(dev, &hpx2);
+ break;
+ case 3:
+ status = program_type3_hpx_record(dev, record, hp_ops);
if (ACPI_FAILURE(status))
goto exit;
break;
default:
- printk(KERN_ERR "%s: Type %d record not supported\n",
+ pr_err("%s: Type %d record not supported\n",
__func__, type);
status = AE_ERROR;
goto exit;
@@ -283,14 +347,16 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
return status;
}
-static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
+static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle,
+ const struct hotplug_program_ops *hp_ops)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package, *fields;
+ struct hpp_type0 hpp0;
int i;
- memset(hpp, 0, sizeof(struct hotplug_params));
+ memset(&hpp0, 0, sizeof(hpp0));
status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -311,12 +377,13 @@ static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
}
}
- hpp->t0 = &hpp->type0_data;
- hpp->t0->revision = 1;
- hpp->t0->cache_line_size = fields[0].integer.value;
- hpp->t0->latency_timer = fields[1].integer.value;
- hpp->t0->enable_serr = fields[2].integer.value;
- hpp->t0->enable_perr = fields[3].integer.value;
+ hpp0.revision = 1;
+ hpp0.cache_line_size = fields[0].integer.value;
+ hpp0.latency_timer = fields[1].integer.value;
+ hpp0.enable_serr = fields[2].integer.value;
+ hpp0.enable_perr = fields[3].integer.value;
+
+ hp_ops->program_type0(dev, &hpp0);
exit:
kfree(buffer.pointer);
@@ -328,7 +395,8 @@ exit:
* @dev - the pci_dev for which we want parameters
* @hpp - allocated by the caller
*/
-int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
+int pci_acpi_program_hp_params(struct pci_dev *dev,
+ const struct hotplug_program_ops *hp_ops)
{
acpi_status status;
acpi_handle handle, phandle;
@@ -351,10 +419,10 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
* this pci dev.
*/
while (handle) {
- status = acpi_run_hpx(handle, hpp);
+ status = acpi_run_hpx(dev, handle, hp_ops);
if (ACPI_SUCCESS(status))
return 0;
- status = acpi_run_hpp(handle, hpp);
+ status = acpi_run_hpp(dev, handle, hp_ops);
if (ACPI_SUCCESS(status))
return 0;
if (acpi_is_root_bridge(handle))
@@ -366,7 +434,6 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
}
return -ENODEV;
}
-EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
* pciehp_is_native - Check whether a hotplug port is handled by the OS
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 71853befd435..cae630fe6387 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -578,7 +578,7 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: Device state not saved by %pF\n",
+ "PCI PM: Device state not saved by %pS\n",
drv->suspend);
}
}
@@ -605,7 +605,7 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: Device state not saved by %pF\n",
+ "PCI PM: Device state not saved by %pS\n",
drv->suspend_late);
goto Fixup;
}
@@ -773,7 +773,7 @@ static int pci_pm_suspend(struct device *dev)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pF\n",
+ "PCI PM: State of device not saved by %pS\n",
pm->suspend);
}
}
@@ -821,7 +821,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pF\n",
+ "PCI PM: State of device not saved by %pS\n",
pm->suspend_noirq);
goto Fixup;
}
@@ -1260,11 +1260,11 @@ static int pci_pm_runtime_suspend(struct device *dev)
* log level.
*/
if (error == -EBUSY || error == -EAGAIN) {
- dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
+ dev_dbg(dev, "can't suspend now (%ps returned %d)\n",
pm->runtime_suspend, error);
return error;
} else if (error) {
- dev_err(dev, "can't suspend (%pf returned %d)\n",
+ dev_err(dev, "can't suspend (%ps returned %d)\n",
pm->runtime_suspend, error);
return error;
}
@@ -1276,7 +1276,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
&& !pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pF\n",
+ "PCI PM: State of device not saved by %pS\n",
pm->runtime_suspend);
return 0;
}
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 66f8a59fadbd..e408099fea52 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -66,20 +66,18 @@ static int __init pci_stub_init(void)
&class, &class_mask);
if (fields < 2) {
- printk(KERN_WARNING
- "pci-stub: invalid id string \"%s\"\n", id);
+ pr_warn("pci-stub: invalid ID string \"%s\"\n", id);
continue;
}
- printk(KERN_INFO
- "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n",
+ pr_info("pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n",
vendor, device, subvendor, subdevice, class, class_mask);
rc = pci_add_dynid(&stub_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
- printk(KERN_WARNING
- "pci-stub: failed to add dynamic id (%d)\n", rc);
+ pr_warn("pci-stub: failed to add dynamic ID (%d)\n",
+ rc);
}
return 0;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 25794c27c7a4..6d27475e39b2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1111,8 +1111,7 @@ legacy_io_err:
kfree(b->legacy_io);
b->legacy_io = NULL;
kzalloc_err:
- printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n");
- return;
+ dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
}
void pci_remove_legacy_files(struct pci_bus *b)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7c1b362f599a..8abc843b1615 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -197,8 +197,8 @@ EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
/**
* pci_dev_str_match_path - test if a path string matches a device
- * @dev: the PCI device to test
- * @path: string to match the device against
+ * @dev: the PCI device to test
+ * @path: string to match the device against
* @endptr: pointer to the string after the match
*
* Test if a string (typically from a kernel parameter) formatted as a
@@ -280,8 +280,8 @@ free_and_exit:
/**
* pci_dev_str_match - test if a string matches a device
- * @dev: the PCI device to test
- * @p: string to match the device against
+ * @dev: the PCI device to test
+ * @p: string to match the device against
* @endptr: pointer to the string after the match
*
* Test if a string (typically from a kernel parameter) matches a specified
@@ -341,7 +341,7 @@ static int pci_dev_str_match(struct pci_dev *dev, const char *p,
} else {
/*
* PCI Bus, Device, Function IDs are specified
- * (optionally, may include a path of devfns following it)
+ * (optionally, may include a path of devfns following it)
*/
ret = pci_dev_str_match_path(dev, p, &p);
if (ret < 0)
@@ -425,7 +425,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
* Tell if a device supports a given PCI capability.
* Returns the address of the requested capability structure within the
* device's PCI configuration space or 0 in case the device does not
- * support it. Possible values for @cap:
+ * support it. Possible values for @cap include:
*
* %PCI_CAP_ID_PM Power Management
* %PCI_CAP_ID_AGP Accelerated Graphics Port
@@ -450,11 +450,11 @@ EXPORT_SYMBOL(pci_find_capability);
/**
* pci_bus_find_capability - query for devices' capabilities
- * @bus: the PCI bus to query
+ * @bus: the PCI bus to query
* @devfn: PCI device to query
- * @cap: capability code
+ * @cap: capability code
*
- * Like pci_find_capability() but works for pci devices that do not have a
+ * Like pci_find_capability() but works for PCI devices that do not have a
* pci_dev structure set up yet.
*
* Returns the address of the requested capability structure within the
@@ -535,7 +535,7 @@ EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
*
* Returns the address of the requested extended capability structure
* within the device's PCI configuration space or 0 if the device does
- * not support it. Possible values for @cap:
+ * not support it. Possible values for @cap include:
*
* %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
* %PCI_EXT_CAP_ID_VC Virtual Channel
@@ -618,12 +618,13 @@ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
EXPORT_SYMBOL_GPL(pci_find_ht_capability);
/**
- * pci_find_parent_resource - return resource region of parent bus of given region
+ * pci_find_parent_resource - return resource region of parent bus of given
+ * region
* @dev: PCI device structure contains resources to be searched
* @res: child resource record for which parent is sought
*
- * For given resource region of given device, return the resource
- * region of parent bus the given region is contained in.
+ * For given resource region of given device, return the resource region of
+ * parent bus the given region is contained in.
*/
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res)
@@ -800,7 +801,7 @@ static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
- * given PCI device
+ * given PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
*
@@ -826,7 +827,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
if (state < PCI_D0 || state > PCI_D3hot)
return -EINVAL;
- /* Validate current state:
+ /*
+ * Validate current state:
* Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
@@ -837,14 +839,15 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EINVAL;
}
- /* check if this device supports the desired state */
+ /* Check if this device supports the desired state */
if ((state == PCI_D1 && !dev->d1_support)
|| (state == PCI_D2 && !dev->d2_support))
return -EIO;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- /* If we're (effectively) in D3, force entire word to 0.
+ /*
+ * If we're (effectively) in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and
* sets PowerState to 0.
*/
@@ -867,11 +870,13 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
break;
}
- /* enter specified state */
+ /* Enter specified state */
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
- /* Mandatory power management transition delays */
- /* see PCI PM 1.1 5.6.1 table 18 */
+ /*
+ * Mandatory power management transition delays; see PCI PM 1.1
+ * 5.6.1 table 18
+ */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
@@ -1085,16 +1090,18 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
int error;
- /* bound the state we're entering */
+ /* Bound the state we're entering */
if (state > PCI_D3cold)
state = PCI_D3cold;
else if (state < PCI_D0)
state = PCI_D0;
else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
+
/*
- * If the device or the parent bridge do not support PCI PM,
- * ignore the request if we're doing anything other than putting
- * it into D0 (which would only happen on boot).
+ * If the device or the parent bridge do not support PCI
+ * PM, ignore the request if we're doing anything other
+ * than putting it into D0 (which would only happen on
+ * boot).
*/
return 0;
@@ -1104,8 +1111,10 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
__pci_start_power_transition(dev, state);
- /* This device is quirked not to be put into D3, so
- don't put it in D3 */
+ /*
+ * This device is quirked not to be put into D3, so don't put it in
+ * D3
+ */
if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
return 0;
@@ -1127,12 +1136,11 @@ EXPORT_SYMBOL(pci_set_power_state);
* pci_choose_state - Choose the power state of a PCI device
* @dev: PCI device to be suspended
* @state: target sleep state for the whole system. This is the value
- * that is passed to suspend() function.
+ * that is passed to suspend() function.
*
* Returns PCI power state suitable for given device and given system
* message.
*/
-
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
{
pci_power_t ret;
@@ -1310,8 +1318,9 @@ static void pci_restore_ltr_state(struct pci_dev *dev)
}
/**
- * pci_save_state - save the PCI configuration space of a device before suspending
- * @dev: - PCI device that we're dealing with
+ * pci_save_state - save the PCI configuration space of a device before
+ * suspending
+ * @dev: PCI device that we're dealing with
*/
int pci_save_state(struct pci_dev *dev)
{
@@ -1422,7 +1431,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
/**
* pci_restore_state - Restore the saved state of a PCI device
- * @dev: - PCI device that we're dealing with
+ * @dev: PCI device that we're dealing with
*/
void pci_restore_state(struct pci_dev *dev)
{
@@ -1599,8 +1608,8 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
* pci_reenable_device - Resume abandoned device
* @dev: PCI device to be resumed
*
- * Note this function is a backend of pci_default_resume and is not supposed
- * to be called by normal code, write proper resume handler and use it instead.
+ * NOTE: This function is a backend of pci_default_resume() and is not supposed
+ * to be called by normal code, write proper resume handler and use it instead.
*/
int pci_reenable_device(struct pci_dev *dev)
{
@@ -1675,9 +1684,9 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
* pci_enable_device_io - Initialize a device for use with IO space
* @dev: PCI device to be initialized
*
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable I/O resources. Wake up the device if it was suspended.
- * Beware, this function can fail.
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O resources. Wake up the device if it was suspended.
+ * Beware, this function can fail.
*/
int pci_enable_device_io(struct pci_dev *dev)
{
@@ -1689,9 +1698,9 @@ EXPORT_SYMBOL(pci_enable_device_io);
* pci_enable_device_mem - Initialize a device for use with Memory space
* @dev: PCI device to be initialized
*
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable Memory resources. Wake up the device if it was suspended.
- * Beware, this function can fail.
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable Memory resources. Wake up the device if it was suspended.
+ * Beware, this function can fail.
*/
int pci_enable_device_mem(struct pci_dev *dev)
{
@@ -1703,12 +1712,12 @@ EXPORT_SYMBOL(pci_enable_device_mem);
* pci_enable_device - Initialize device before it's used by a driver.
* @dev: PCI device to be initialized
*
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable I/O and memory. Wake up the device if it was suspended.
- * Beware, this function can fail.
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O and memory. Wake up the device if it was suspended.
+ * Beware, this function can fail.
*
- * Note we don't actually enable the device many times if we call
- * this function repeatedly (we just increment the count).
+ * Note we don't actually enable the device many times if we call
+ * this function repeatedly (we just increment the count).
*/
int pci_enable_device(struct pci_dev *dev)
{
@@ -1717,8 +1726,8 @@ int pci_enable_device(struct pci_dev *dev)
EXPORT_SYMBOL(pci_enable_device);
/*
- * Managed PCI resources. This manages device on/off, intx/msi/msix
- * on/off and BAR regions. pci_dev itself records msi/msix status, so
+ * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
+ * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
* there's no need to track it separately. pci_devres is initialized
* when a device is enabled using managed PCI device enable interface.
*/
@@ -1836,7 +1845,8 @@ int __weak pcibios_add_device(struct pci_dev *dev)
}
/**
- * pcibios_release_device - provide arch specific hooks when releasing device dev
+ * pcibios_release_device - provide arch specific hooks when releasing
+ * device dev
* @dev: the PCI device being released
*
* Permits the platform to provide architecture specific functionality when
@@ -1927,8 +1937,7 @@ EXPORT_SYMBOL(pci_disable_device);
* @dev: the PCIe device reset
* @state: Reset state to enter into
*
- *
- * Sets the PCIe reset state for the device. This is the default
+ * Set the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1942,7 +1951,6 @@ int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
* @dev: the PCIe device reset
* @state: Reset state to enter into
*
- *
* Sets the PCI reset state for the device.
*/
int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
@@ -2339,7 +2347,8 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
}
/**
- * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
+ * pci_prepare_to_sleep - prepare PCI device for system-wide transition
+ * into a sleep state
* @dev: Device to handle.
*
* Choose the power state appropriate for the device depending on whether
@@ -2367,7 +2376,8 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
EXPORT_SYMBOL(pci_prepare_to_sleep);
/**
- * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
+ * pci_back_from_sleep - turn PCI device on during system-wide transition
+ * into working state
* @dev: Device to handle.
*
* Disable device's system wake-up capability and put it into D0.
@@ -2777,14 +2787,14 @@ void pci_pm_init(struct pci_dev *dev)
dev->d2_support = true;
if (dev->d1_support || dev->d2_support)
- pci_printk(KERN_DEBUG, dev, "supports%s%s\n",
+ pci_info(dev, "supports%s%s\n",
dev->d1_support ? " D1" : "",
dev->d2_support ? " D2" : "");
}
pmc &= PCI_PM_CAP_PME_MASK;
if (pmc) {
- pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n",
+ pci_info(dev, "PME# supported from%s%s%s%s%s\n",
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -2952,16 +2962,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
res->flags = flags;
if (bei <= PCI_EA_BEI_BAR5)
- pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
bei, res, prop);
else if (bei == PCI_EA_BEI_ROM)
- pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
res, prop);
else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
- pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
bei - PCI_EA_BEI_VF_BAR0, res, prop);
else
- pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
bei, res, prop);
out:
@@ -3005,7 +3015,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
/**
* _pci_add_cap_save_buffer - allocate buffer for saving given
- * capability registers
+ * capability registers
* @dev: the PCI device
* @cap: the capability to allocate the buffer for
* @extended: Standard or Extended capability ID
@@ -3186,7 +3196,7 @@ static void pci_disable_acs_redir(struct pci_dev *dev)
}
/**
- * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
+ * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
* @dev: the PCI device
*/
static void pci_std_enable_acs(struct pci_dev *dev)
@@ -3609,13 +3619,14 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
EXPORT_SYMBOL_GPL(pci_common_swizzle);
/**
- * pci_release_region - Release a PCI bar
- * @pdev: PCI device whose resources were previously reserved by pci_request_region
- * @bar: BAR to release
+ * pci_release_region - Release a PCI bar
+ * @pdev: PCI device whose resources were previously reserved by
+ * pci_request_region()
+ * @bar: BAR to release
*
- * Releases the PCI I/O and memory resources previously reserved by a
- * successful call to pci_request_region. Call this function only
- * after all use of the PCI regions has ceased.
+ * Releases the PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_region(). Call this function only
+ * after all use of the PCI regions has ceased.
*/
void pci_release_region(struct pci_dev *pdev, int bar)
{
@@ -3637,23 +3648,23 @@ void pci_release_region(struct pci_dev *pdev, int bar)
EXPORT_SYMBOL(pci_release_region);
/**
- * __pci_request_region - Reserved PCI I/O and memory resource
- * @pdev: PCI device whose resources are to be reserved
- * @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
- * @exclusive: whether the region access is exclusive or not
+ * __pci_request_region - Reserved PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource.
+ * @exclusive: whether the region access is exclusive or not
*
- * Mark the PCI region associated with PCI device @pdev BR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
*
- * If @exclusive is set, then the region is marked so that userspace
- * is explicitly not allowed to map the resource via /dev/mem or
- * sysfs MMIO access.
+ * If @exclusive is set, then the region is marked so that userspace
+ * is explicitly not allowed to map the resource via /dev/mem or
+ * sysfs MMIO access.
*
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
*/
static int __pci_request_region(struct pci_dev *pdev, int bar,
const char *res_name, int exclusive)
@@ -3687,18 +3698,18 @@ err_out:
}
/**
- * pci_request_region - Reserve PCI I/O and memory resource
- * @pdev: PCI device whose resources are to be reserved
- * @bar: BAR to be reserved
- * @res_name: Name to be associated with resource
+ * pci_request_region - Reserve PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
*
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
{
@@ -3707,31 +3718,6 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
EXPORT_SYMBOL(pci_request_region);
/**
- * pci_request_region_exclusive - Reserved PCI I/O and memory resource
- * @pdev: PCI device whose resources are to be reserved
- * @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
- *
- * Mark the PCI region associated with PCI device @pdev BR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
- *
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
- *
- * The key difference that _exclusive makes it that userspace is
- * explicitly not allowed to map the resource via /dev/mem or
- * sysfs.
- */
-int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
- const char *res_name)
-{
- return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
-}
-EXPORT_SYMBOL(pci_request_region_exclusive);
-
-/**
* pci_release_selected_regions - Release selected PCI I/O and memory resources
* @pdev: PCI device whose resources were previously reserved
* @bars: Bitmask of BARs to be released
@@ -3791,12 +3777,13 @@ int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
/**
- * pci_release_regions - Release reserved PCI I/O and memory resources
- * @pdev: PCI device whose resources were previously reserved by pci_request_regions
+ * pci_release_regions - Release reserved PCI I/O and memory resources
+ * @pdev: PCI device whose resources were previously reserved by
+ * pci_request_regions()
*
- * Releases all PCI I/O and memory resources previously reserved by a
- * successful call to pci_request_regions. Call this function only
- * after all use of the PCI regions has ceased.
+ * Releases all PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_regions(). Call this function only
+ * after all use of the PCI regions has ceased.
*/
void pci_release_regions(struct pci_dev *pdev)
@@ -3806,17 +3793,17 @@ void pci_release_regions(struct pci_dev *pdev)
EXPORT_SYMBOL(pci_release_regions);
/**
- * pci_request_regions - Reserved PCI I/O and memory resources
- * @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * pci_request_regions - Reserve PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @res_name: Name to be associated with resource.
*
- * Mark all PCI regions associated with PCI device @pdev as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark all PCI regions associated with PCI device @pdev as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
*
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
*/
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
@@ -3825,20 +3812,19 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
EXPORT_SYMBOL(pci_request_regions);
/**
- * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
- * @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @res_name: Name to be associated with resource.
*
- * Mark all PCI regions associated with PCI device @pdev as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark all PCI regions associated with PCI device @pdev as being reserved
+ * by owner @res_name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
- * pci_request_regions_exclusive() will mark the region so that
- * /dev/mem and the sysfs MMIO access will not be allowed.
+ * pci_request_regions_exclusive() will mark the region so that /dev/mem
+ * and the sysfs MMIO access will not be allowed.
*
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
+ * Returns 0 on success, or %EBUSY on error. A warning message is also
+ * printed on failure.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
@@ -3849,7 +3835,7 @@ EXPORT_SYMBOL(pci_request_regions_exclusive);
/*
* Record the PCI IO range (expressed as CPU physical address + size).
- * Return a negative value if an error has occured, zero otherwise
+ * Return a negative value if an error has occurred, zero otherwise
*/
int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
resource_size_t size)
@@ -3905,14 +3891,14 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
}
/**
- * pci_remap_iospace - Remap the memory mapped I/O space
- * @res: Resource describing the I/O space
- * @phys_addr: physical address of range to be mapped
+ * pci_remap_iospace - Remap the memory mapped I/O space
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
*
- * Remap the memory mapped I/O space described by the @res
- * and the CPU physical address @phys_addr into virtual address space.
- * Only architectures that have memory mapped IO functions defined
- * (and the PCI_IOBASE value defined) should call this function.
+ * Remap the memory mapped I/O space described by the @res and the CPU
+ * physical address @phys_addr into virtual address space. Only
+ * architectures that have memory mapped IO functions defined (and the
+ * PCI_IOBASE value defined) should call this function.
*/
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
@@ -3928,8 +3914,10 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
pgprot_device(PAGE_KERNEL));
#else
- /* this architecture does not have memory mapped I/O space,
- so this function should never be called */
+ /*
+ * This architecture does not have memory mapped I/O space,
+ * so this function should never be called
+ */
WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
return -ENODEV;
#endif
@@ -3937,12 +3925,12 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
EXPORT_SYMBOL(pci_remap_iospace);
/**
- * pci_unmap_iospace - Unmap the memory mapped I/O space
- * @res: resource to be unmapped
+ * pci_unmap_iospace - Unmap the memory mapped I/O space
+ * @res: resource to be unmapped
*
- * Unmap the CPU virtual address @res from virtual address space.
- * Only architectures that have memory mapped IO functions defined
- * (and the PCI_IOBASE value defined) should call this function.
+ * Unmap the CPU virtual address @res from virtual address space. Only
+ * architectures that have memory mapped IO functions defined (and the
+ * PCI_IOBASE value defined) should call this function.
*/
void pci_unmap_iospace(struct resource *res)
{
@@ -4185,7 +4173,7 @@ int pci_set_cacheline_size(struct pci_dev *dev)
if (cacheline_size == pci_cache_line_size)
return 0;
- pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n",
+ pci_info(dev, "cache line size of %d is not supported\n",
pci_cache_line_size << 2);
return -EINVAL;
@@ -4288,7 +4276,7 @@ EXPORT_SYMBOL(pci_clear_mwi);
* @pdev: the PCI device to operate on
* @enable: boolean: whether to enable or disable PCI INTx
*
- * Enables/disables PCI INTx for device dev
+ * Enables/disables PCI INTx for device @pdev
*/
void pci_intx(struct pci_dev *pdev, int enable)
{
@@ -4364,9 +4352,8 @@ done:
* pci_check_and_mask_intx - mask INTx on pending interrupt
* @dev: the PCI device to operate on
*
- * Check if the device dev has its INTx line asserted, mask it and
- * return true in that case. False is returned if no interrupt was
- * pending.
+ * Check if the device dev has its INTx line asserted, mask it and return
+ * true in that case. False is returned if no interrupt was pending.
*/
bool pci_check_and_mask_intx(struct pci_dev *dev)
{
@@ -4378,9 +4365,9 @@ EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
* pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
* @dev: the PCI device to operate on
*
- * Check if the device dev has its INTx line asserted, unmask it if not
- * and return true. False is returned and the mask remains active if
- * there was still an interrupt pending.
+ * Check if the device dev has its INTx line asserted, unmask it if not and
+ * return true. False is returned and the mask remains active if there was
+ * still an interrupt pending.
*/
bool pci_check_and_unmask_intx(struct pci_dev *dev)
{
@@ -4389,7 +4376,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
/**
- * pci_wait_for_pending_transaction - waits for pending transaction
+ * pci_wait_for_pending_transaction - wait for pending transaction
* @dev: the PCI device to operate on
*
* Return 0 if transaction is pending 1 otherwise.
@@ -4447,7 +4434,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
/**
* pcie_has_flr - check if a device supports function level resets
- * @dev: device to check
+ * @dev: device to check
*
* Returns true if the device advertises support for PCIe function level
* resets.
@@ -4466,7 +4453,7 @@ EXPORT_SYMBOL_GPL(pcie_has_flr);
/**
* pcie_flr - initiate a PCIe function level reset
- * @dev: device to reset
+ * @dev: device to reset
*
* Initiate a function level reset on @dev. The caller should ensure the
* device supports FLR before calling this function, e.g. by using the
@@ -4810,6 +4797,7 @@ static void pci_dev_restore(struct pci_dev *dev)
*
* The device function is presumed to be unused and the caller is holding
* the device mutex lock when this function is called.
+ *
* Resetting the device will make the contents of PCI configuration space
* random, so any caller of this must be prepared to reinitialise the
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
@@ -5373,8 +5361,8 @@ EXPORT_SYMBOL_GPL(pci_reset_bus);
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
*
- * Returns mmrbc: maximum designed memory read count in bytes
- * or appropriate error value.
+ * Returns mmrbc: maximum designed memory read count in bytes or
+ * appropriate error value.
*/
int pcix_get_max_mmrbc(struct pci_dev *dev)
{
@@ -5396,8 +5384,8 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc);
* pcix_get_mmrbc - get PCI-X maximum memory read byte count
* @dev: PCI device to query
*
- * Returns mmrbc: maximum memory read count in bytes
- * or appropriate error value.
+ * Returns mmrbc: maximum memory read count in bytes or appropriate error
+ * value.
*/
int pcix_get_mmrbc(struct pci_dev *dev)
{
@@ -5421,7 +5409,7 @@ EXPORT_SYMBOL(pcix_get_mmrbc);
* @mmrbc: maximum memory read count in bytes
* valid values are 512, 1024, 2048, 4096
*
- * If possible sets maximum memory read byte count, some bridges have erratas
+ * If possible sets maximum memory read byte count, some bridges have errata
* that prevent this.
*/
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
@@ -5466,8 +5454,7 @@ EXPORT_SYMBOL(pcix_set_mmrbc);
* pcie_get_readrq - get PCI Express read request size
* @dev: PCI device to query
*
- * Returns maximum memory read request in bytes
- * or appropriate error value.
+ * Returns maximum memory read request in bytes or appropriate error value.
*/
int pcie_get_readrq(struct pci_dev *dev)
{
@@ -5495,10 +5482,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
return -EINVAL;
/*
- * If using the "performance" PCIe config, we clamp the
- * read rq size to the max packet size to prevent the
- * host bridge generating requests larger than we can
- * cope with
+ * If using the "performance" PCIe config, we clamp the read rq
+ * size to the max packet size to keep the host bridge from
+ * generating requests larger than we can cope with.
*/
if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
int mps = pcie_get_mps(dev);
@@ -6144,6 +6130,7 @@ static int of_pci_bus_find_domain_nr(struct device *parent)
if (parent)
domain = of_get_pci_domain_nr(parent->of_node);
+
/*
* Check DT domain and use_dt_domains values.
*
@@ -6262,11 +6249,9 @@ static int __init pci_setup(char *str)
} else if (!strncmp(str, "pcie_scan_all", 13)) {
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
- disable_acs_redir_param =
- kstrdup(str + 18, GFP_KERNEL);
+ disable_acs_redir_param = str + 18;
} else {
- printk(KERN_ERR "PCI: Unknown option `%s'\n",
- str);
+ pr_err("PCI: Unknown option `%s'\n", str);
}
}
str = k;
@@ -6274,3 +6259,19 @@ static int __init pci_setup(char *str)
return 0;
}
early_param("pci", pci_setup);
+
+/*
+ * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point
+ * to data in the __initdata section which will be freed after the init
+ * sequence is complete. We can't allocate memory in pci_setup() because some
+ * architectures do not have any memory allocation service available during
+ * an early_param() call. So we allocate memory and copy the variable here
+ * before the init section is freed.
+ */
+static int __init pci_realloc_setup_params(void)
+{
+ disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
+
+ return 0;
+}
+pure_initcall(pci_realloc_setup_params);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d994839a3e24..9cb99380c61e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -597,7 +597,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev);
void pci_aer_clear_device_status(struct pci_dev *dev);
#else
static inline void pci_no_aer(void) { }
-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
+static inline void pci_aer_init(struct pci_dev *d) { }
static inline void pci_aer_exit(struct pci_dev *d) { }
static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 5cbdbca904ac..362eb8cfa53b 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -142,3 +142,11 @@ config PCIE_PTM
This is only useful if you have devices that support PTM, but it
is safe to enable even if you don't.
+
+config PCIE_BW
+ bool "PCI Express Bandwidth Change Notification"
+ depends on PCIEPORTBUS
+ help
+ This enables PCI Express Bandwidth Change Notification. If
+ you know link width or rate changes occur only to correct
+ unreliable links, you may answer Y.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index f1d7bc1e5efa..efb9d2e71e9e 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -3,7 +3,6 @@
# Makefile for PCI Express features and port driver
pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o
-pcieportdrv-y += bw_notification.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
@@ -13,3 +12,4 @@ obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
obj-$(CONFIG_PCIE_PTM) += ptm.o
+obj-$(CONFIG_PCIE_BW) += bw_notification.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index f8fc2114ad39..b45bc47d04fe 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -12,6 +12,9 @@
* Andrew Patterson <andrew.patterson@hp.com>
*/
+#define pr_fmt(fmt) "AER: " fmt
+#define dev_fmt pr_fmt
+
#include <linux/cper.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
@@ -779,10 +782,11 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
u8 bus = info->id >> 8;
u8 devfn = info->id & 0xff;
- pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
- info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity],
- pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -964,8 +968,7 @@ static bool find_source_device(struct pci_dev *parent,
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
if (!e_info->error_dev_num) {
- pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n",
- e_info->id);
+ pci_info(parent, "can't find device of ID%04x\n", e_info->id);
return false;
}
return true;
@@ -1377,25 +1380,24 @@ static int aer_probe(struct pcie_device *dev)
int status;
struct aer_rpc *rpc;
struct device *device = &dev->device;
+ struct pci_dev *port = dev->port;
rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
- if (!rpc) {
- dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
+ if (!rpc)
return -ENOMEM;
- }
- rpc->rpd = dev->port;
+
+ rpc->rpd = port;
set_service_data(dev, rpc);
status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
IRQF_SHARED, "aerdrv", dev);
if (status) {
- dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
- dev->irq);
+ pci_err(port, "request AER IRQ %d failed\n", dev->irq);
return status;
}
aer_enable_rootport(rpc);
- dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
+ pci_info(port, "enabled with IRQ %d\n", dev->irq);
return 0;
}
@@ -1419,7 +1421,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
rc = pci_bus_error_reset(dev);
- pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
+ pci_info(dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 95d4759664b3..043b8b0cfcc5 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -12,6 +12,8 @@
* Huang Ying <ying.huang@intel.com>
*/
+#define dev_fmt(fmt) "aer_inject: " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
@@ -332,14 +334,14 @@ static int aer_inject(struct aer_error_inj *einj)
return -ENODEV;
rpdev = pcie_find_root_port(dev);
if (!rpdev) {
- pci_err(dev, "aer_inject: Root port not found\n");
+ pci_err(dev, "Root port not found\n");
ret = -ENODEV;
goto out_put;
}
pos_cap_err = dev->aer_cap;
if (!pos_cap_err) {
- pci_err(dev, "aer_inject: Device doesn't support AER\n");
+ pci_err(dev, "Device doesn't support AER\n");
ret = -EPROTONOSUPPORT;
goto out_put;
}
@@ -350,7 +352,7 @@ static int aer_inject(struct aer_error_inj *einj)
rp_pos_cap_err = rpdev->aer_cap;
if (!rp_pos_cap_err) {
- pci_err(rpdev, "aer_inject: Root port doesn't support AER\n");
+ pci_err(rpdev, "Root port doesn't support AER\n");
ret = -EPROTONOSUPPORT;
goto out_put;
}
@@ -398,14 +400,14 @@ static int aer_inject(struct aer_error_inj *einj)
if (!aer_mask_override && einj->cor_status &&
!(einj->cor_status & ~cor_mask)) {
ret = -EINVAL;
- pci_warn(dev, "aer_inject: The correctable error(s) is masked by device\n");
+ pci_warn(dev, "The correctable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
if (!aer_mask_override && einj->uncor_status &&
!(einj->uncor_status & ~uncor_mask)) {
ret = -EINVAL;
- pci_warn(dev, "aer_inject: The uncorrectable error(s) is masked by device\n");
+ pci_warn(dev, "The uncorrectable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
@@ -460,19 +462,17 @@ static int aer_inject(struct aer_error_inj *einj)
if (device) {
edev = to_pcie_device(device);
if (!get_service_data(edev)) {
- dev_warn(&edev->device,
- "aer_inject: AER service is not initialized\n");
+ pci_warn(edev->port, "AER service is not initialized\n");
ret = -EPROTONOSUPPORT;
goto out_put;
}
- dev_info(&edev->device,
- "aer_inject: Injecting errors %08x/%08x into device %s\n",
+ pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev));
local_irq_disable();
generic_handle_irq(edev->irq);
local_irq_enable();
} else {
- pci_err(rpdev, "aer_inject: AER device not found\n");
+ pci_err(rpdev, "AER device not found\n");
ret = -ENODEV;
}
out_put:
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 727e3c1ef9a4..fd4cb75088f9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -196,6 +196,36 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_capable = (blacklist) ? 0 : capable;
}
+static bool pcie_retrain_link(struct pcie_link_state *link)
+{
+ struct pci_dev *parent = link->pdev;
+ unsigned long end_jiffies;
+ u16 reg16;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ reg16 |= PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ if (parent->clear_retrain_link) {
+ /*
+ * Due to an erratum in some devices the Retrain Link bit
+ * needs to be cleared again manually to allow the link
+ * training to succeed.
+ */
+ reg16 &= ~PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ }
+
+ /* Wait for link training end. Break out after waiting for timeout */
+ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
+ do {
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+ msleep(1);
+ } while (time_before(jiffies, end_jiffies));
+ return !(reg16 & PCI_EXP_LNKSTA_LT);
+}
+
/*
* pcie_aspm_configure_common_clock: check if the 2 ends of a link
* could use common clock. If they are, configure them to use the
@@ -205,7 +235,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
int same_clock = 1;
u16 reg16, parent_reg, child_reg[8];
- unsigned long start_jiffies;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
/*
@@ -263,21 +292,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
reg16 &= ~PCI_EXP_LNKCTL_CCC;
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
- /* Retrain link */
- reg16 |= PCI_EXP_LNKCTL_RL;
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
-
- /* Wait for link training end. Break out after waiting for timeout */
- start_jiffies = jiffies;
- for (;;) {
- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
- break;
- if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
- break;
- msleep(1);
- }
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ if (pcie_retrain_link(link))
return;
/* Training failed. Restore common clock configurations */
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index 4fa9e3523ee1..77e685771487 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -107,11 +107,25 @@ static void pcie_bandwidth_notification_remove(struct pcie_device *srv)
free_irq(srv->irq, srv);
}
+static int pcie_bandwidth_notification_suspend(struct pcie_device *srv)
+{
+ pcie_disable_link_bandwidth_notification(srv->port);
+ return 0;
+}
+
+static int pcie_bandwidth_notification_resume(struct pcie_device *srv)
+{
+ pcie_enable_link_bandwidth_notification(srv->port);
+ return 0;
+}
+
static struct pcie_port_service_driver pcie_bandwidth_notification_driver = {
.name = "pcie_bw_notification",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_BWNOTIF,
.probe = pcie_bandwidth_notification_probe,
+ .suspend = pcie_bandwidth_notification_suspend,
+ .resume = pcie_bandwidth_notification_resume,
.remove = pcie_bandwidth_notification_remove,
};
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 7b77754a82de..a32ec3487a8d 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -6,6 +6,8 @@
* Copyright (C) 2016 Intel Corp.
*/
+#define dev_fmt(fmt) "DPC: " fmt
+
#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -100,7 +102,6 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
{
unsigned long timeout = jiffies + HZ;
struct pci_dev *pdev = dpc->dev->port;
- struct device *dev = &dpc->dev->device;
u16 cap = dpc->cap_pos, status;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
@@ -110,7 +111,7 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
}
if (status & PCI_EXP_DPC_RP_BUSY) {
- dev_warn(dev, "DPC root port still busy\n");
+ pci_warn(pdev, "root port still busy\n");
return -EBUSY;
}
return 0;
@@ -148,7 +149,6 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
{
- struct device *dev = &dpc->dev->device;
struct pci_dev *pdev = dpc->dev->port;
u16 cap = dpc->cap_pos, dpc_status, first_error;
u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
@@ -156,13 +156,13 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask);
- dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
+ pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
status, mask);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
- dev_err(dev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
+ pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
sev, syserr, exc);
/* Get First Error Pointer */
@@ -171,7 +171,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
if ((status & ~mask) & (1 << i))
- dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
+ pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
first_error == i ? " (First)" : "");
}
@@ -185,18 +185,18 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
&dw2);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
&dw3);
- dev_err(dev, "TLP Header: %#010x %#010x %#010x %#010x\n",
+ pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
dw0, dw1, dw2, dw3);
if (dpc->rp_log_size < 5)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
- dev_err(dev, "RP PIO ImpSpec Log %#010x\n", log);
+ pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
for (i = 0; i < dpc->rp_log_size - 5; i++) {
pci_read_config_dword(pdev,
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
- dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
+ pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
clear_status:
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
@@ -229,18 +229,17 @@ static irqreturn_t dpc_handler(int irq, void *context)
struct aer_err_info info;
struct dpc_dev *dpc = context;
struct pci_dev *pdev = dpc->dev->port;
- struct device *dev = &dpc->dev->device;
u16 cap = dpc->cap_pos, status, source, reason, ext_reason;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
- dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n",
+ pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
status, source);
reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
- dev_warn(dev, "DPC %s detected\n",
+ pci_warn(pdev, "%s detected\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
(reason == 2) ? "ERR_FATAL" :
@@ -307,7 +306,7 @@ static int dpc_probe(struct pcie_device *dev)
dpc_handler, IRQF_SHARED,
"pcie-dpc", dpc);
if (status) {
- dev_warn(device, "request IRQ%d failed: %d\n", dev->irq,
+ pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
status);
return status;
}
@@ -319,7 +318,7 @@ static int dpc_probe(struct pcie_device *dev)
if (dpc->rp_extensions) {
dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) {
- dev_err(device, "RP PIO log size %u is invalid\n",
+ pci_err(pdev, "RP PIO log size %u is invalid\n",
dpc->rp_log_size);
dpc->rp_log_size = 0;
}
@@ -328,11 +327,11 @@ static int dpc_probe(struct pcie_device *dev)
ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
- dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
- cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
- FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
- FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
- FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
+ pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
+ cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
+ FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
+ FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
+ FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
return status;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 54d593d10396..f38e6c19dd50 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -7,6 +7,8 @@
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*/
+#define dev_fmt(fmt) "PME: " fmt
+
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -194,14 +196,14 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
* assuming that the PME was reported by a PCIe-PCI bridge that
* used devfn different from zero.
*/
- pci_dbg(port, "PME interrupt generated for non-existent device %02x:%02x.%d\n",
- busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n",
+ busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
found = pcie_pme_from_pci_bridge(bus, 0);
}
out:
if (!found)
- pci_dbg(port, "Spurious native PME interrupt!\n");
+ pci_info(port, "Spurious native interrupt!\n");
}
/**
@@ -341,7 +343,7 @@ static int pcie_pme_probe(struct pcie_device *srv)
return ret;
}
- pci_info(port, "Signaling PME with IRQ %d\n", srv->irq);
+ pci_info(port, "Signaling with IRQ %d\n", srv->irq);
pcie_pme_mark_devices(port);
pcie_pme_interrupt_enable(port, true);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 1d50dc58ac40..944827a8c7d3 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -49,7 +49,11 @@ int pcie_dpc_init(void);
static inline int pcie_dpc_init(void) { return 0; }
#endif
+#ifdef CONFIG_PCIE_BW
int pcie_bandwidth_notification_init(void);
+#else
+static inline int pcie_bandwidth_notification_init(void) { return 0; }
+#endif
/* Port Type */
#define PCIE_ANY_PORT (~0)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 7d04f9d087a6..1b330129089f 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -55,7 +55,8 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
* 7.8.2, 7.10.10, 7.31.2.
*/
- if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
+ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
+ PCIE_PORT_SERVICE_BWNOTIF)) {
pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
*pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
nvec = *pme + 1;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 7e12d0163863..0e8e2c186f50 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -317,7 +317,7 @@ fail:
res->flags = 0;
out:
if (res->flags)
- pci_printk(KERN_DEBUG, dev, "reg 0x%x: %pR\n", pos, res);
+ pci_info(dev, "reg 0x%x: %pR\n", pos, res);
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
@@ -435,7 +435,7 @@ static void pci_read_bridge_io(struct pci_bus *child)
region.start = base;
region.end = limit + io_granularity - 1;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res);
+ pci_info(dev, " bridge window %pR\n", res);
}
}
@@ -457,7 +457,7 @@ static void pci_read_bridge_mmio(struct pci_bus *child)
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res);
+ pci_info(dev, " bridge window %pR\n", res);
}
}
@@ -510,7 +510,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res);
+ pci_info(dev, " bridge window %pR\n", res);
}
}
@@ -540,8 +540,7 @@ void pci_read_bridge_bases(struct pci_bus *child)
if (res && res->flags) {
pci_bus_add_resource(child, res,
PCI_SUBTRACTIVE_DECODE);
- pci_printk(KERN_DEBUG, dev,
- " bridge window %pR (subtractive decode)\n",
+ pci_info(dev, " bridge window %pR (subtractive decode)\n",
res);
}
}
@@ -586,16 +585,10 @@ static void pci_release_host_bridge_dev(struct device *dev)
kfree(to_pci_host_bridge(dev));
}
-struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
+static void pci_init_host_bridge(struct pci_host_bridge *bridge)
{
- struct pci_host_bridge *bridge;
-
- bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
- if (!bridge)
- return NULL;
-
INIT_LIST_HEAD(&bridge->windows);
- bridge->dev.release = pci_release_host_bridge_dev;
+ INIT_LIST_HEAD(&bridge->dma_ranges);
/*
* We assume we can manage these PCIe features. Some systems may
@@ -608,6 +601,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
bridge->native_shpc_hotplug = 1;
bridge->native_pme = 1;
bridge->native_ltr = 1;
+}
+
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
+ if (!bridge)
+ return NULL;
+
+ pci_init_host_bridge(bridge);
+ bridge->dev.release = pci_release_host_bridge_dev;
return bridge;
}
@@ -622,7 +627,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
if (!bridge)
return NULL;
- INIT_LIST_HEAD(&bridge->windows);
+ pci_init_host_bridge(bridge);
bridge->dev.release = devm_pci_release_host_bridge_dev;
return bridge;
@@ -632,6 +637,7 @@ EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
void pci_free_host_bridge(struct pci_host_bridge *bridge)
{
pci_free_resource_list(&bridge->windows);
+ pci_free_resource_list(&bridge->dma_ranges);
kfree(bridge);
}
@@ -1081,6 +1087,36 @@ static void pci_enable_crs(struct pci_dev *pdev)
static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
unsigned int available_buses);
+/**
+ * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus
+ * numbers from EA capability.
+ * @dev: Bridge
+ * @sec: updated with secondary bus number from EA
+ * @sub: updated with subordinate bus number from EA
+ *
+ * If @dev is a bridge with EA capability, update @sec and @sub with
+ * fixed bus numbers from the capability and return true. Otherwise,
+ * return false.
+ */
+static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
+{
+ int ea, offset;
+ u32 dw;
+
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+ return false;
+
+ /* find PCI EA capability in list */
+ ea = pci_find_capability(dev, PCI_CAP_ID_EA);
+ if (!ea)
+ return false;
+
+ offset = ea + PCI_EA_FIRST_ENT;
+ pci_read_config_dword(dev, offset, &dw);
+ *sec = dw & PCI_EA_SEC_BUS_MASK;
+ *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ return true;
+}
/*
* pci_scan_bridge_extend() - Scan buses behind a bridge
@@ -1115,6 +1151,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
u16 bctl;
u8 primary, secondary, subordinate;
int broken = 0;
+ bool fixed_buses;
+ u8 fixed_sec, fixed_sub;
+ int next_busnr;
/*
* Make sure the bridge is powered on to be able to access config
@@ -1214,17 +1253,24 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
/* Clear errors */
pci_write_config_word(dev, PCI_STATUS, 0xffff);
+ /* Read bus numbers from EA Capability (if present) */
+ fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub);
+ if (fixed_buses)
+ next_busnr = fixed_sec;
+ else
+ next_busnr = max + 1;
+
/*
* Prevent assigning a bus number that already exists.
* This can happen when a bridge is hot-plugged, so in this
* case we only re-scan this bus.
*/
- child = pci_find_bus(pci_domain_nr(bus), max+1);
+ child = pci_find_bus(pci_domain_nr(bus), next_busnr);
if (!child) {
- child = pci_add_new_bus(bus, dev, max+1);
+ child = pci_add_new_bus(bus, dev, next_busnr);
if (!child)
goto out;
- pci_bus_insert_busn_res(child, max+1,
+ pci_bus_insert_busn_res(child, next_busnr,
bus->busn_res.end);
}
max++;
@@ -1285,7 +1331,13 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
max += i;
}
- /* Set subordinate bus number to its real value */
+ /*
+ * Set subordinate bus number to its real value.
+ * If fixed subordinate bus number exists from EA
+ * capability then use it.
+ */
+ if (fixed_buses)
+ max = fixed_sub;
pci_bus_update_busn_res_end(child, max);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
}
@@ -1690,7 +1742,7 @@ int pci_setup_device(struct pci_dev *dev)
dev->revision = class & 0xff;
dev->class = class >> 8; /* upper 3 bytes */
- pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n",
+ pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
dev->vendor, dev->device, dev->hdr_type, dev->class);
if (pci_early_dump)
@@ -2026,6 +2078,119 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
*/
}
+static u16 hpx3_device_type(struct pci_dev *dev)
+{
+ u16 pcie_type = pci_pcie_type(dev);
+ const int pcie_to_hpx3_type[] = {
+ [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
+ [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
+ [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
+ [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
+ [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
+ [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
+ [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
+ [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
+ [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
+ };
+
+ if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
+ return 0;
+
+ return pcie_to_hpx3_type[pcie_type];
+}
+
+static u8 hpx3_function_type(struct pci_dev *dev)
+{
+ if (dev->is_virtfn)
+ return HPX_FN_SRIOV_VIRT;
+ else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
+ return HPX_FN_SRIOV_PHYS;
+ else
+ return HPX_FN_NORMAL;
+}
+
+static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
+{
+ u8 cap_ver = hpx3_cap_id & 0xf;
+
+ if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
+ return true;
+ else if (cap_ver == pcie_cap_id)
+ return true;
+
+ return false;
+}
+
+static void program_hpx_type3_register(struct pci_dev *dev,
+ const struct hpx_type3 *reg)
+{
+ u32 match_reg, write_reg, header, orig_value;
+ u16 pos;
+
+ if (!(hpx3_device_type(dev) & reg->device_type))
+ return;
+
+ if (!(hpx3_function_type(dev) & reg->function_type))
+ return;
+
+ switch (reg->config_space_location) {
+ case HPX_CFG_PCICFG:
+ pos = 0;
+ break;
+ case HPX_CFG_PCIE_CAP:
+ pos = pci_find_capability(dev, reg->pci_exp_cap_id);
+ if (pos == 0)
+ return;
+
+ break;
+ case HPX_CFG_PCIE_CAP_EXT:
+ pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
+ if (pos == 0)
+ return;
+
+ pci_read_config_dword(dev, pos, &header);
+ if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
+ reg->pci_exp_cap_ver))
+ return;
+
+ break;
+ case HPX_CFG_VEND_CAP: /* Fall through */
+ case HPX_CFG_DVSEC: /* Fall through */
+ default:
+ pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
+ return;
+ }
+
+ pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
+
+ if ((match_reg & reg->match_mask_and) != reg->match_value)
+ return;
+
+ pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
+ orig_value = write_reg;
+ write_reg &= reg->reg_mask_and;
+ write_reg |= reg->reg_mask_or;
+
+ if (orig_value == write_reg)
+ return;
+
+ pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
+
+ pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
+ pos, orig_value, write_reg);
+}
+
+static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx3)
+{
+ if (!hpx3)
+ return;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ program_hpx_type3_register(dev, hpx3);
+}
+
int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
{
struct pci_host_bridge *host;
@@ -2206,8 +2371,12 @@ static void pci_configure_serr(struct pci_dev *dev)
static void pci_configure_device(struct pci_dev *dev)
{
- struct hotplug_params hpp;
- int ret;
+ static const struct hotplug_program_ops hp_ops = {
+ .program_type0 = program_hpp_type0,
+ .program_type1 = program_hpp_type1,
+ .program_type2 = program_hpp_type2,
+ .program_type3 = program_hpx_type3,
+ };
pci_configure_mps(dev);
pci_configure_extended_tags(dev, NULL);
@@ -2216,14 +2385,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_eetlp_prefix(dev);
pci_configure_serr(dev);
- memset(&hpp, 0, sizeof(hpp));
- ret = pci_get_hp_params(dev, &hpp);
- if (ret)
- return;
-
- program_hpp_type2(dev, hpp.t2);
- program_hpp_type1(dev, hpp.t1);
- program_hpp_type0(dev, hpp.t0);
+ pci_acpi_program_hp_params(dev, &hp_ops);
}
static void pci_release_capabilities(struct pci_dev *dev)
@@ -3086,7 +3248,7 @@ int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
conflict = request_resource_conflict(parent_res, res);
if (conflict)
- dev_printk(KERN_DEBUG, &b->dev,
+ dev_info(&b->dev,
"busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
res, pci_is_root_bus(b) ? "domain " : "",
parent_res, conflict->name, conflict);
@@ -3106,8 +3268,7 @@ int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
size = bus_max - res->start + 1;
ret = adjust_resource(res, res->start, size);
- dev_printk(KERN_DEBUG, &b->dev,
- "busn_res: %pR end %s updated to %02x\n",
+ dev_info(&b->dev, "busn_res: %pR end %s updated to %02x\n",
&old_res, ret ? "can not be" : "is", bus_max);
if (!ret && !res->parent)
@@ -3125,8 +3286,7 @@ void pci_bus_release_busn_res(struct pci_bus *b)
return;
ret = release_resource(res);
- dev_printk(KERN_DEBUG, &b->dev,
- "busn_res: %pR %s released\n",
+ dev_info(&b->dev, "busn_res: %pR %s released\n",
res, ret ? "can not be" : "is");
}
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 6fa1627ce08d..445b51db75b0 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -222,6 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
}
/* If arch decided it can't, fall through... */
#endif /* HAVE_PCI_MMAP */
+ /* fall through */
default:
ret = -EINVAL;
break;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a59ad09ce911..0f16acc323c6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -36,7 +36,7 @@ static ktime_t fixup_debug_start(struct pci_dev *dev,
void (*fn)(struct pci_dev *dev))
{
if (initcall_debug)
- pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current));
+ pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current));
return ktime_get();
}
@@ -51,7 +51,7 @@ static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
if (initcall_debug || duration > 10000)
- pci_info(dev, "%pF took %lld usecs\n", fn, duration);
+ pci_info(dev, "%pS took %lld usecs\n", fn, duration);
}
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
@@ -159,8 +159,7 @@ static int __init pci_apply_final_quirks(void)
u8 tmp;
if (pci_cache_line_size)
- printk(KERN_DEBUG "PCI: CLS %u bytes\n",
- pci_cache_line_size << 2);
+ pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2);
pci_apply_fixup_final_quirks = true;
for_each_pci_dev(dev) {
@@ -177,16 +176,16 @@ static int __init pci_apply_final_quirks(void)
if (!tmp || cls == tmp)
continue;
- printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
- cls << 2, tmp << 2,
- pci_dfl_cache_line_size << 2);
+ pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
+ cls << 2, tmp << 2,
+ pci_dfl_cache_line_size << 2);
pci_cache_line_size = pci_dfl_cache_line_size;
}
}
if (!pci_cache_line_size) {
- printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
- cls << 2, pci_dfl_cache_line_size << 2);
+ pr_info("PCI: CLS %u bytes, default %u\n", cls << 2,
+ pci_dfl_cache_line_size << 2);
pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
}
@@ -2245,6 +2244,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+/*
+ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
+ * Link bit cleared after starting the link retrain process to allow this
+ * process to finish.
+ *
+ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the
+ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
+ */
+static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
+{
+ dev->clear_retrain_link = 1;
+ pci_info(dev, "Enable PCIe Retrain Link quirk\n");
+}
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
+
static void fixup_rev1_53c810(struct pci_dev *dev)
{
u32 class = dev->class;
@@ -2596,7 +2612,7 @@ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
pci_read_config_dword(dev, 0x74, &cfg);
if (cfg & ((1 << 2) | (1 << 15))) {
- printk(KERN_INFO "Rewriting IRQ routing register on MCP55\n");
+ pr_info("Rewriting IRQ routing register on MCP55\n");
cfg &= ~((1 << 2) | (1 << 15));
pci_write_config_dword(dev, 0x74, cfg);
}
@@ -3408,6 +3424,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
/*
* Root port on some Cavium CN8xxx chips do not successfully complete a bus
@@ -3877,6 +3894,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
quirk_dma_func1_alias);
@@ -4903,6 +4922,7 @@ static void quirk_no_ats(struct pci_dev *pdev)
/* AMD Stoney platform GPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
@@ -5120,3 +5140,61 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
+
+/*
+ * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
+ * not always reset the secondary Nvidia GPU between reboots if the system
+ * is configured to use Hybrid Graphics mode. This results in the GPU
+ * being left in whatever state it was in during the *previous* boot, which
+ * causes spurious interrupts from the GPU, which in turn causes us to
+ * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly,
+ * this also completely breaks nouveau.
+ *
+ * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
+ * clean state and fixes all these issues.
+ *
+ * When the machine is configured in Dedicated display mode, the issue
+ * doesn't occur. Fortunately the GPU advertises NoReset+ when in this
+ * mode, so we can detect that and avoid resetting it.
+ */
+static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
+{
+ void __iomem *map;
+ int ret;
+
+ if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
+ pdev->subsystem_device != 0x222e ||
+ !pdev->reset_fn)
+ return;
+
+ if (pci_enable_device_mem(pdev))
+ return;
+
+ /*
+ * Based on nvkm_device_ctor() in
+ * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+ */
+ map = pci_iomap(pdev, 0, 0x23000);
+ if (!map) {
+ pci_err(pdev, "Can't map MMIO space\n");
+ goto out_disable;
+ }
+
+ /*
+ * Make sure the GPU looks like it's been POSTed before resetting
+ * it.
+ */
+ if (ioread32(map + 0x2240c) & 0x2) {
+ pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
+ ret = pci_reset_function(pdev);
+ if (ret < 0)
+ pci_err(pdev, "Failed to reset GPU: %d\n", ret);
+ }
+
+ iounmap(map);
+out_disable:
+ pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
+ PCI_CLASS_DISPLAY_VGA, 8,
+ quirk_reset_lenovo_thinkpad_p50_nvgpu);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 2b5f720862d3..5c7922612733 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -33,7 +33,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
struct pci_bus *bus;
int ret;
- ret = fn(pdev, PCI_DEVID(pdev->bus->number, pdev->devfn), data);
+ ret = fn(pdev, pci_dev_id(pdev), data);
if (ret)
return ret;
@@ -88,9 +88,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
return ret;
continue;
case PCI_EXP_TYPE_PCIE_BRIDGE:
- ret = fn(tmp,
- PCI_DEVID(tmp->bus->number,
- tmp->devfn), data);
+ ret = fn(tmp, pci_dev_id(tmp), data);
if (ret)
return ret;
continue;
@@ -101,9 +99,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
PCI_DEVID(tmp->subordinate->number,
PCI_DEVFN(0, 0)), data);
else
- ret = fn(tmp,
- PCI_DEVID(tmp->bus->number,
- tmp->devfn), data);
+ ret = fn(tmp, pci_dev_id(tmp), data);
if (ret)
return ret;
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index ec44a0f3a7ac..0cdd5ff389de 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -49,17 +49,15 @@ static void free_list(struct list_head *head)
}
/**
- * add_to_list() - add a new resource tracker to the list
+ * add_to_list() - Add a new resource tracker to the list
* @head: Head of the list
- * @dev: device corresponding to which the resource
- * belongs
- * @res: The resource to be tracked
- * @add_size: additional size to be optionally added
- * to the resource
+ * @dev: Device to which the resource belongs
+ * @res: Resource to be tracked
+ * @add_size: Additional size to be optionally added to the resource
*/
-static int add_to_list(struct list_head *head,
- struct pci_dev *dev, struct resource *res,
- resource_size_t add_size, resource_size_t min_align)
+static int add_to_list(struct list_head *head, struct pci_dev *dev,
+ struct resource *res, resource_size_t add_size,
+ resource_size_t min_align)
{
struct pci_dev_resource *tmp;
@@ -80,8 +78,7 @@ static int add_to_list(struct list_head *head,
return 0;
}
-static void remove_from_list(struct list_head *head,
- struct resource *res)
+static void remove_from_list(struct list_head *head, struct resource *res)
{
struct pci_dev_resource *dev_res, *tmp;
@@ -158,7 +155,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
tmp->res = r;
tmp->dev = dev;
- /* fallback is smallest one or list is empty*/
+ /* Fallback is smallest one or list is empty */
n = head;
list_for_each_entry(dev_res, head, list) {
resource_size_t align;
@@ -171,21 +168,20 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
break;
}
}
- /* Insert it just before n*/
+ /* Insert it just before n */
list_add_tail(&tmp->list, n);
}
}
-static void __dev_sort_resources(struct pci_dev *dev,
- struct list_head *head)
+static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
{
u16 class = dev->class >> 8;
- /* Don't touch classless devices or host bridges or ioapics. */
+ /* Don't touch classless devices or host bridges or IOAPICs */
if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
return;
- /* Don't touch ioapic devices already enabled by firmware */
+ /* Don't touch IOAPIC devices already enabled by firmware */
if (class == PCI_CLASS_SYSTEM_PIC) {
u16 command;
pci_read_config_word(dev, PCI_COMMAND, &command);
@@ -204,19 +200,18 @@ static inline void reset_resource(struct resource *res)
}
/**
- * reassign_resources_sorted() - satisfy any additional resource requests
+ * reassign_resources_sorted() - Satisfy any additional resource requests
*
- * @realloc_head : head of the list tracking requests requiring additional
- * resources
- * @head : head of the list tracking requests with allocated
- * resources
+ * @realloc_head: Head of the list tracking requests requiring
+ * additional resources
+ * @head: Head of the list tracking requests with allocated
+ * resources
*
- * Walk through each element of the realloc_head and try to procure
- * additional resources for the element, provided the element
- * is in the head list.
+ * Walk through each element of the realloc_head and try to procure additional
+ * resources for the element, provided the element is in the head list.
*/
static void reassign_resources_sorted(struct list_head *realloc_head,
- struct list_head *head)
+ struct list_head *head)
{
struct resource *res;
struct pci_dev_resource *add_res, *tmp;
@@ -228,18 +223,18 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
bool found_match = false;
res = add_res->res;
- /* skip resource that has been reset */
+ /* Skip resource that has been reset */
if (!res->flags)
goto out;
- /* skip this resource if not found in head list */
+ /* Skip this resource if not found in head list */
list_for_each_entry(dev_res, head, list) {
if (dev_res->res == res) {
found_match = true;
break;
}
}
- if (!found_match)/* just skip */
+ if (!found_match) /* Just skip */
continue;
idx = res - &add_res->dev->resource[0];
@@ -255,10 +250,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
(IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
if (pci_reassign_resource(add_res->dev, idx,
add_size, align))
- pci_printk(KERN_DEBUG, add_res->dev,
- "failed to add %llx res[%d]=%pR\n",
- (unsigned long long)add_size,
- idx, res);
+ pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
+ (unsigned long long) add_size, idx,
+ res);
}
out:
list_del(&add_res->list);
@@ -267,14 +261,14 @@ out:
}
/**
- * assign_requested_resources_sorted() - satisfy resource requests
+ * assign_requested_resources_sorted() - Satisfy resource requests
*
- * @head : head of the list tracking requests for resources
- * @fail_head : head of the list tracking requests that could
- * not be allocated
+ * @head: Head of the list tracking requests for resources
+ * @fail_head: Head of the list tracking requests that could not be
+ * allocated
*
- * Satisfy resource requests of each element in the list. Add
- * requests that could not satisfied to the failed_list.
+ * Satisfy resource requests of each element in the list. Add requests that
+ * could not be satisfied to the failed_list.
*/
static void assign_requested_resources_sorted(struct list_head *head,
struct list_head *fail_head)
@@ -290,8 +284,9 @@ static void assign_requested_resources_sorted(struct list_head *head,
pci_assign_resource(dev_res->dev, idx)) {
if (fail_head) {
/*
- * if the failed res is for ROM BAR, and it will
- * be enabled later, don't add it to the list
+ * If the failed resource is a ROM BAR and
+ * it will be enabled later, don't add it
+ * to the list.
*/
if (!((idx == PCI_ROM_RESOURCE) &&
(!(res->flags & IORESOURCE_ROM_ENABLE))))
@@ -310,15 +305,14 @@ static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
struct pci_dev_resource *fail_res;
unsigned long mask = 0;
- /* check failed type */
+ /* Check failed type */
list_for_each_entry(fail_res, fail_head, list)
mask |= fail_res->flags;
/*
- * one pref failed resource will set IORESOURCE_MEM,
- * as we can allocate pref in non-pref range.
- * Will release all assigned non-pref sibling resources
- * according to that bit.
+ * One pref failed resource will set IORESOURCE_MEM, as we can
+ * allocate pref in non-pref range. Will release all assigned
+ * non-pref sibling resources according to that bit.
*/
return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
}
@@ -328,11 +322,11 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
if (res->flags & IORESOURCE_IO)
return !!(mask & IORESOURCE_IO);
- /* check pref at first */
+ /* Check pref at first */
if (res->flags & IORESOURCE_PREFETCH) {
if (mask & IORESOURCE_PREFETCH)
return true;
- /* count pref if its parent is non-pref */
+ /* Count pref if its parent is non-pref */
else if ((mask & IORESOURCE_MEM) &&
!(res->parent->flags & IORESOURCE_PREFETCH))
return true;
@@ -343,33 +337,33 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
if (res->flags & IORESOURCE_MEM)
return !!(mask & IORESOURCE_MEM);
- return false; /* should not get here */
+ return false; /* Should not get here */
}
static void __assign_resources_sorted(struct list_head *head,
- struct list_head *realloc_head,
- struct list_head *fail_head)
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
{
/*
- * Should not assign requested resources at first.
- * they could be adjacent, so later reassign can not reallocate
- * them one by one in parent resource window.
- * Try to assign requested + add_size at beginning
- * if could do that, could get out early.
- * if could not do that, we still try to assign requested at first,
- * then try to reassign add_size for some resources.
+ * Should not assign requested resources at first. They could be
+ * adjacent, so later reassign can not reallocate them one by one in
+ * parent resource window.
+ *
+ * Try to assign requested + add_size at beginning. If could do that,
+ * could get out early. If could not do that, we still try to assign
+ * requested at first, then try to reassign add_size for some resources.
*
* Separate three resource type checking if we need to release
* assigned resource after requested + add_size try.
- * 1. if there is io port assign fail, will release assigned
- * io port.
- * 2. if there is pref mmio assign fail, release assigned
- * pref mmio.
- * if assigned pref mmio's parent is non-pref mmio and there
- * is non-pref mmio assign fail, will release that assigned
- * pref mmio.
- * 3. if there is non-pref mmio assign fail or pref mmio
- * assigned fail, will release assigned non-pref mmio.
+ *
+ * 1. If IO port assignment fails, will release assigned IO
+ * port.
+ * 2. If pref MMIO assignment fails, release assigned pref
+ * MMIO. If assigned pref MMIO's parent is non-pref MMIO
+ * and non-pref MMIO assignment fails, will release that
+ * assigned pref MMIO.
+ * 3. If non-pref MMIO assignment fails or pref MMIO
+ * assignment fails, will release assigned non-pref MMIO.
*/
LIST_HEAD(save_head);
LIST_HEAD(local_fail_head);
@@ -398,7 +392,7 @@ static void __assign_resources_sorted(struct list_head *head,
/*
* There are two kinds of additional resources in the list:
* 1. bridge resource -- IORESOURCE_STARTALIGN
- * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
+ * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
* Here just fix the additional alignment for bridge
*/
if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
@@ -407,10 +401,10 @@ static void __assign_resources_sorted(struct list_head *head,
add_align = get_res_add_align(realloc_head, dev_res->res);
/*
- * The "head" list is sorted by the alignment to make sure
- * resources with bigger alignment will be assigned first.
- * After we change the alignment of a dev_res in "head" list,
- * we need to reorder the list by alignment to make it
+ * The "head" list is sorted by alignment so resources with
+ * bigger alignment will be assigned first. After we
+ * change the alignment of a dev_res in "head" list, we
+ * need to reorder the list by alignment to make it
* consistent.
*/
if (add_align > dev_res->res->start) {
@@ -435,7 +429,7 @@ static void __assign_resources_sorted(struct list_head *head,
/* Try updated head list with add_size added */
assign_requested_resources_sorted(head, &local_fail_head);
- /* all assigned with add_size ? */
+ /* All assigned with add_size? */
if (list_empty(&local_fail_head)) {
/* Remove head list from realloc_head list */
list_for_each_entry(dev_res, head, list)
@@ -445,13 +439,13 @@ static void __assign_resources_sorted(struct list_head *head,
return;
}
- /* check failed type */
+ /* Check failed type */
fail_type = pci_fail_res_type_mask(&local_fail_head);
- /* remove not need to be released assigned res from head list etc */
+ /* Remove not need to be released assigned res from head list etc */
list_for_each_entry_safe(dev_res, tmp_res, head, list)
if (dev_res->res->parent &&
!pci_need_to_release(fail_type, dev_res->res)) {
- /* remove it from realloc_head list */
+ /* Remove it from realloc_head list */
remove_from_list(realloc_head, dev_res->res);
remove_from_list(&save_head, dev_res->res);
list_del(&dev_res->list);
@@ -477,16 +471,15 @@ requested_and_reassign:
/* Satisfy the must-have resource requests */
assign_requested_resources_sorted(head, fail_head);
- /* Try to satisfy any additional optional resource
- requests */
+ /* Try to satisfy any additional optional resource requests */
if (realloc_head)
reassign_resources_sorted(realloc_head, head);
free_list(head);
}
static void pdev_assign_resources_sorted(struct pci_dev *dev,
- struct list_head *add_head,
- struct list_head *fail_head)
+ struct list_head *add_head,
+ struct list_head *fail_head)
{
LIST_HEAD(head);
@@ -563,17 +556,19 @@ void pci_setup_cardbus(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_setup_cardbus);
-/* Initialize bridges with base/limit values we have collected.
- PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
- requires that if there is no I/O ports or memory behind the
- bridge, corresponding range must be turned off by writing base
- value greater than limit to the bridge's base/limit registers.
-
- Note: care must be taken when updating I/O base/limit registers
- of bridges which support 32-bit I/O. This update requires two
- config space writes, so it's quite possible that an I/O window of
- the bridge will have some undesirable address (e.g. 0) after the
- first write. Ditto 64-bit prefetchable MMIO. */
+/*
+ * Initialize bridges with base/limit values we have collected. PCI-to-PCI
+ * Bridge Architecture Specification rev. 1.1 (1998) requires that if there
+ * are no I/O ports or memory behind the bridge, the corresponding range
+ * must be turned off by writing base value greater than limit to the
+ * bridge's base/limit registers.
+ *
+ * Note: care must be taken when updating I/O base/limit registers of
+ * bridges which support 32-bit I/O. This update requires two config space
+ * writes, so it's quite possible that an I/O window of the bridge will
+ * have some undesirable address (e.g. 0) after the first write. Ditto
+ * 64-bit prefetchable MMIO.
+ */
static void pci_setup_bridge_io(struct pci_dev *bridge)
{
struct resource *res;
@@ -587,7 +582,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
if (bridge->io_window_1k)
io_mask = PCI_IO_1K_RANGE_MASK;
- /* Set up the top and bottom of the PCI I/O segment for this bus. */
+ /* Set up the top and bottom of the PCI I/O segment for this bus */
res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
@@ -595,19 +590,19 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
io_base_lo = (region.start >> 8) & io_mask;
io_limit_lo = (region.end >> 8) & io_mask;
l = ((u16) io_limit_lo << 8) | io_base_lo;
- /* Set up upper 16 bits of I/O base/limit. */
+ /* Set up upper 16 bits of I/O base/limit */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
pci_info(bridge, " bridge window %pR\n", res);
} else {
- /* Clear upper 16 bits of I/O base/limit. */
+ /* Clear upper 16 bits of I/O base/limit */
io_upper16 = 0;
l = 0x00f0;
}
- /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
+ /* Temporarily disable the I/O range before updating PCI_IO_BASE */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
- /* Update lower 16 bits of I/O base/limit. */
+ /* Update lower 16 bits of I/O base/limit */
pci_write_config_word(bridge, PCI_IO_BASE, l);
- /* Update upper 16 bits of I/O base/limit. */
+ /* Update upper 16 bits of I/O base/limit */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
}
@@ -617,7 +612,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
struct pci_bus_region region;
u32 l;
- /* Set up the top and bottom of the PCI Memory segment for this bus. */
+ /* Set up the top and bottom of the PCI Memory segment for this bus */
res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
@@ -636,12 +631,14 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
struct pci_bus_region region;
u32 l, bu, lu;
- /* Clear out the upper 32 bits of PREF limit.
- If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
- disables PREF range, which is ok. */
+ /*
+ * Clear out the upper 32 bits of PREF limit. If
+ * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables
+ * PREF range, which is ok.
+ */
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
- /* Set up PREF base/limit. */
+ /* Set up PREF base/limit */
bu = lu = 0;
res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
pcibios_resource_to_bus(bridge->bus, &region, res);
@@ -658,7 +655,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
- /* Set the upper 32 bits of PREF base & limit. */
+ /* Set the upper 32 bits of PREF base & limit */
pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
}
@@ -702,13 +699,13 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
return 0;
if (pci_claim_resource(bridge, i) == 0)
- return 0; /* claimed the window */
+ return 0; /* Claimed the window */
if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return 0;
if (!pci_bus_clip_resource(bridge, i))
- return -EINVAL; /* clipping didn't change anything */
+ return -EINVAL; /* Clipping didn't change anything */
switch (i - PCI_BRIDGE_RESOURCES) {
case 0:
@@ -725,14 +722,16 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
}
if (pci_claim_resource(bridge, i) == 0)
- return 0; /* claimed a smaller window */
+ return 0; /* Claimed a smaller window */
return -EINVAL;
}
-/* Check whether the bridge supports optional I/O and
- prefetchable memory ranges. If not, the respective
- base/limit registers must be read-only and read as 0. */
+/*
+ * Check whether the bridge supports optional I/O and prefetchable memory
+ * ranges. If not, the respective base/limit registers must be read-only
+ * and read as 0.
+ */
static void pci_bridge_check_ranges(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
@@ -752,12 +751,14 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
}
}
-/* Helper function for sizing routines: find first available
- bus resource of a given type. Note: we intentionally skip
- the bus resources which have already been assigned (that is,
- have non-NULL parent resource). */
+/*
+ * Helper function for sizing routines: find first available bus resource
+ * of a given type. Note: we intentionally skip the bus resources which
+ * have already been assigned (that is, have non-NULL parent resource).
+ */
static struct resource *find_free_bus_resource(struct pci_bus *bus,
- unsigned long type_mask, unsigned long type)
+ unsigned long type_mask,
+ unsigned long type)
{
int i;
struct resource *r;
@@ -772,19 +773,21 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus,
}
static resource_size_t calculate_iosize(resource_size_t size,
- resource_size_t min_size,
- resource_size_t size1,
- resource_size_t add_size,
- resource_size_t children_add_size,
- resource_size_t old_size,
- resource_size_t align)
+ resource_size_t min_size,
+ resource_size_t size1,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
+ resource_size_t old_size,
+ resource_size_t align)
{
if (size < min_size)
size = min_size;
if (old_size == 1)
old_size = 0;
- /* To be fixed in 2.5: we should have sort of HAVE_ISA
- flag in the struct pci_bus. */
+ /*
+ * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the
+ * struct pci_bus.
+ */
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
@@ -797,11 +800,11 @@ static resource_size_t calculate_iosize(resource_size_t size,
}
static resource_size_t calculate_memsize(resource_size_t size,
- resource_size_t min_size,
- resource_size_t add_size,
- resource_size_t children_add_size,
- resource_size_t old_size,
- resource_size_t align)
+ resource_size_t min_size,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
+ resource_size_t old_size,
+ resource_size_t align)
{
if (size < min_size)
size = min_size;
@@ -824,8 +827,7 @@ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
-static resource_size_t window_alignment(struct pci_bus *bus,
- unsigned long type)
+static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
{
resource_size_t align = 1, arch_align;
@@ -833,8 +835,8 @@ static resource_size_t window_alignment(struct pci_bus *bus,
align = PCI_P2P_DEFAULT_MEM_ALIGN;
else if (type & IORESOURCE_IO) {
/*
- * Per spec, I/O windows are 4K-aligned, but some
- * bridges have an extension to support 1K alignment.
+ * Per spec, I/O windows are 4K-aligned, but some bridges have
+ * an extension to support 1K alignment.
*/
if (bus->self->io_window_1k)
align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
@@ -847,20 +849,21 @@ static resource_size_t window_alignment(struct pci_bus *bus,
}
/**
- * pbus_size_io() - size the io window of a given bus
+ * pbus_size_io() - Size the I/O window of a given bus
*
- * @bus : the bus
- * @min_size : the minimum io window that must to be allocated
- * @add_size : additional optional io window
- * @realloc_head : track the additional io window on this list
+ * @bus: The bus
+ * @min_size: The minimum I/O window that must be allocated
+ * @add_size: Additional optional I/O window
+ * @realloc_head: Track the additional I/O window on this list
*
- * Sizing the IO windows of the PCI-PCI bridge is trivial,
- * since these windows have 1K or 4K granularity and the IO ranges
- * of non-bridge PCI devices are limited to 256 bytes.
- * We must be careful with the ISA aliasing though.
+ * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these
+ * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI
+ * devices are limited to 256 bytes. We must be careful with the ISA
+ * aliasing though.
*/
static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
- resource_size_t add_size, struct list_head *realloc_head)
+ resource_size_t add_size,
+ struct list_head *realloc_head)
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
@@ -918,9 +921,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
- pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx\n",
- b_res, &bus->busn_res,
- (unsigned long long)size1-size0);
+ pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
+ b_res, &bus->busn_res,
+ (unsigned long long) size1 - size0);
}
}
@@ -947,33 +950,33 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
}
/**
- * pbus_size_mem() - size the memory window of a given bus
+ * pbus_size_mem() - Size the memory window of a given bus
*
- * @bus : the bus
- * @mask: mask the resource flag, then compare it with type
- * @type: the type of free resource from bridge
- * @type2: second match type
- * @type3: third match type
- * @min_size : the minimum memory window that must to be allocated
- * @add_size : additional optional memory window
- * @realloc_head : track the additional memory window on this list
+ * @bus: The bus
+ * @mask: Mask the resource flag, then compare it with type
+ * @type: The type of free resource from bridge
+ * @type2: Second match type
+ * @type3: Third match type
+ * @min_size: The minimum memory window that must be allocated
+ * @add_size: Additional optional memory window
+ * @realloc_head: Track the additional memory window on this list
*
- * Calculate the size of the bus and minimal alignment which
- * guarantees that all child resources fit in this size.
+ * Calculate the size of the bus and minimal alignment which guarantees
+ * that all child resources fit in this size.
*
- * Returns -ENOSPC if there's no available bus resource of the desired type.
- * Otherwise, sets the bus resource start/end to indicate the required
- * size, adds things to realloc_head (if supplied), and returns 0.
+ * Return -ENOSPC if there's no available bus resource of the desired
+ * type. Otherwise, set the bus resource start/end to indicate the
+ * required size, add things to realloc_head (if supplied), and return 0.
*/
static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
unsigned long type, unsigned long type2,
- unsigned long type3,
- resource_size_t min_size, resource_size_t add_size,
+ unsigned long type3, resource_size_t min_size,
+ resource_size_t add_size,
struct list_head *realloc_head)
{
struct pci_dev *dev;
resource_size_t min_align, align, size, size0, size1;
- resource_size_t aligns[18]; /* Alignments from 1Mb to 128Gb */
+ resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus,
mask | IORESOURCE_PREFETCH, type);
@@ -1002,12 +1005,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
continue;
r_size = resource_size(r);
#ifdef CONFIG_PCI_IOV
- /* put SRIOV requested res to the optional list */
+ /* Put SRIOV requested res to the optional list */
if (realloc_head && i >= PCI_IOV_RESOURCES &&
i <= PCI_IOV_RESOURCE_END) {
add_align = max(pci_resource_alignment(dev, r), add_align);
r->end = r->start - 1;
- add_to_list(realloc_head, dev, r, r_size, 0/* don't care */);
+ add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
children_add_size += r_size;
continue;
}
@@ -1029,8 +1032,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
continue;
}
size += max(r_size, align);
- /* Exclude ranges with size > align from
- calculation of the alignment. */
+ /*
+ * Exclude ranges with size > align from calculation of
+ * the alignment.
+ */
if (r_size <= align)
aligns[order] += align;
if (order > max_order)
@@ -1063,7 +1068,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->flags |= IORESOURCE_STARTALIGN;
if (size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
- pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
+ pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
b_res, &bus->busn_res,
(unsigned long long) (size1 - size0),
(unsigned long long) add_align);
@@ -1081,7 +1086,7 @@ unsigned long pci_cardbus_resource_alignment(struct resource *res)
}
static void pci_bus_size_cardbus(struct pci_bus *bus,
- struct list_head *realloc_head)
+ struct list_head *realloc_head)
{
struct pci_dev *bridge = bus->self;
struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
@@ -1091,8 +1096,8 @@ static void pci_bus_size_cardbus(struct pci_bus *bus,
if (b_res[0].parent)
goto handle_b_res_1;
/*
- * Reserve some resources for CardBus. We reserve
- * a fixed amount of bus space for CardBus bridges.
+ * Reserve some resources for CardBus. We reserve a fixed amount
+ * of bus space for CardBus bridges.
*/
b_res[0].start = pci_cardbus_io_size;
b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1;
@@ -1116,7 +1121,7 @@ handle_b_res_1:
}
handle_b_res_2:
- /* MEM1 must not be pref mmio */
+ /* MEM1 must not be pref MMIO */
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) {
ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
@@ -1124,10 +1129,7 @@ handle_b_res_2:
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
}
- /*
- * Check whether prefetchable memory is supported
- * by this bridge.
- */
+ /* Check whether prefetchable memory is supported by this bridge. */
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
@@ -1138,9 +1140,8 @@ handle_b_res_2:
if (b_res[2].parent)
goto handle_b_res_3;
/*
- * If we have prefetchable memory support, allocate
- * two regions. Otherwise, allocate one region of
- * twice the size.
+ * If we have prefetchable memory support, allocate two regions.
+ * Otherwise, allocate one region of twice the size.
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
b_res[2].start = pci_cardbus_mem_size;
@@ -1153,7 +1154,7 @@ handle_b_res_2:
pci_cardbus_mem_size, pci_cardbus_mem_size);
}
- /* reduce that to half */
+ /* Reduce that to half */
b_res_3_size = pci_cardbus_mem_size;
}
@@ -1204,7 +1205,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
switch (bus->self->hdr_type) {
case PCI_HEADER_TYPE_CARDBUS:
- /* don't size cardbuses yet. */
+ /* Don't size CardBuses yet */
break;
case PCI_HEADER_TYPE_BRIDGE:
@@ -1271,18 +1272,17 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
/*
* Compute the size required to put everything else in the
- * non-prefetchable window. This includes:
+ * non-prefetchable window. This includes:
*
* - all non-prefetchable resources
* - 32-bit prefetchable resources if there's a 64-bit
* prefetchable window or no prefetchable window at all
- * - 64-bit prefetchable resources if there's no
- * prefetchable window at all
+ * - 64-bit prefetchable resources if there's no prefetchable
+ * window at all
*
- * Note that the strategy in __pci_assign_resource() must
- * match that used here. Specifically, we cannot put a
- * 32-bit prefetchable resource in a 64-bit prefetchable
- * window.
+ * Note that the strategy in __pci_assign_resource() must match
+ * that used here. Specifically, we cannot put a 32-bit
+ * prefetchable resource in a 64-bit prefetchable window.
*/
pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
realloc_head ? 0 : additional_mem_size,
@@ -1315,8 +1315,8 @@ static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
}
/*
- * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they
- * are skipped by pbus_assign_resources_sorted().
+ * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are
+ * skipped by pbus_assign_resources_sorted().
*/
static void pdev_assign_fixed_resources(struct pci_dev *dev)
{
@@ -1427,10 +1427,9 @@ static void pci_bus_allocate_resources(struct pci_bus *b)
struct pci_bus *child;
/*
- * Carry out a depth-first search on the PCI bus
- * tree to allocate bridge apertures. Read the
- * programmed bridge bases and recursively claim
- * the respective bridge resources.
+ * Carry out a depth-first search on the PCI bus tree to allocate
+ * bridge apertures. Read the programmed bridge bases and
+ * recursively claim the respective bridge resources.
*/
if (b->self) {
pci_read_bridge_bases(b);
@@ -1484,7 +1483,7 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
IORESOURCE_MEM_64)
static void pci_bridge_release_resources(struct pci_bus *bus,
- unsigned long type)
+ unsigned long type)
{
struct pci_dev *dev = bus->self;
struct resource *r;
@@ -1495,16 +1494,14 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
/*
- * 1. if there is io port assign fail, will release bridge
- * io port.
- * 2. if there is non pref mmio assign fail, release bridge
- * nonpref mmio.
- * 3. if there is 64bit pref mmio assign fail, and bridge pref
- * is 64bit, release bridge pref mmio.
- * 4. if there is pref mmio assign fail, and bridge pref is
- * 32bit mmio, release bridge pref mmio
- * 5. if there is pref mmio assign fail, and bridge pref is not
- * assigned, release bridge nonpref mmio.
+ * 1. If IO port assignment fails, release bridge IO port.
+ * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO.
+ * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit,
+ * release bridge pref MMIO.
+ * 4. If pref MMIO assignment fails, and bridge pref is 32bit,
+ * release bridge pref MMIO.
+ * 5. If pref MMIO assignment fails, and bridge pref is not
+ * assigned, release bridge nonpref MMIO.
*/
if (type & IORESOURCE_IO)
idx = 0;
@@ -1524,25 +1521,22 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
if (!r->parent)
return;
- /*
- * if there are children under that, we should release them
- * all
- */
+ /* If there are children, release them all */
release_child_resources(r);
if (!release_resource(r)) {
type = old_flags = r->flags & PCI_RES_TYPE_MASK;
- pci_printk(KERN_DEBUG, dev, "resource %d %pR released\n",
- PCI_BRIDGE_RESOURCES + idx, r);
- /* keep the old size */
+ pci_info(dev, "resource %d %pR released\n",
+ PCI_BRIDGE_RESOURCES + idx, r);
+ /* Keep the old size */
r->end = resource_size(r) - 1;
r->start = 0;
r->flags = 0;
- /* avoiding touch the one without PREF */
+ /* Avoiding touch the one without PREF */
if (type & IORESOURCE_PREFETCH)
type = IORESOURCE_PREFETCH;
__pci_setup_bridge(bus, type);
- /* for next child res under same bridge */
+ /* For next child res under same bridge */
r->flags = old_flags;
}
}
@@ -1551,9 +1545,10 @@ enum release_type {
leaf_only,
whole_subtree,
};
+
/*
- * try to release pci bridge resources that is from leaf bridge,
- * so we can allocate big new one later
+ * Try to release PCI bridge resources from leaf bridge, so we can allocate
+ * a larger window later.
*/
static void pci_bus_release_bridge_resources(struct pci_bus *bus,
unsigned long type,
@@ -1596,7 +1591,7 @@ static void pci_bus_dump_res(struct pci_bus *bus)
if (!res || !res->end || !res->flags)
continue;
- dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
+ dev_info(&bus->dev, "resource %d %pR\n", i, res);
}
}
@@ -1678,7 +1673,7 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data)
pcibios_resource_to_bus(dev->bus, &region, r);
if (!region.start) {
*unassigned = true;
- return 1; /* return early from pci_walk_bus() */
+ return 1; /* Return early from pci_walk_bus() */
}
}
@@ -1686,7 +1681,7 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data)
}
static enum enable_type pci_realloc_detect(struct pci_bus *bus,
- enum enable_type enable_local)
+ enum enable_type enable_local)
{
bool unassigned = false;
@@ -1701,21 +1696,21 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus,
}
#else
static enum enable_type pci_realloc_detect(struct pci_bus *bus,
- enum enable_type enable_local)
+ enum enable_type enable_local)
{
return enable_local;
}
#endif
/*
- * first try will not touch pci bridge res
- * second and later try will clear small leaf bridge res
- * will stop till to the max depth if can not find good one
+ * First try will not touch PCI bridge res.
+ * Second and later try will clear small leaf bridge res.
+ * Will stop till to the max depth if can not find good one.
*/
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
{
- LIST_HEAD(realloc_head); /* list of resources that
- want additional resources */
+ LIST_HEAD(realloc_head);
+ /* List of resources that want additional resources */
struct list_head *add_list = NULL;
int tried_times = 0;
enum release_type rel_type = leaf_only;
@@ -1724,26 +1719,26 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
int pci_try_num = 1;
enum enable_type enable_local;
- /* don't realloc if asked to do so */
+ /* Don't realloc if asked to do so */
enable_local = pci_realloc_detect(bus, pci_realloc_enable);
if (pci_realloc_enabled(enable_local)) {
int max_depth = pci_bus_get_depth(bus);
pci_try_num = max_depth + 1;
- dev_printk(KERN_DEBUG, &bus->dev,
- "max bus depth: %d pci_try_num: %d\n",
- max_depth, pci_try_num);
+ dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
+ max_depth, pci_try_num);
}
again:
/*
- * last try will use add_list, otherwise will try good to have as
- * must have, so can realloc parent bridge resource
+ * Last try will use add_list, otherwise will try good to have as must
+ * have, so can realloc parent bridge resource
*/
if (tried_times + 1 == pci_try_num)
add_list = &realloc_head;
- /* Depth first, calculate sizes and alignments of all
- subordinate buses. */
+ /*
+ * Depth first, calculate sizes and alignments of all subordinate buses.
+ */
__pci_bus_size_bridges(bus, add_list);
/* Depth last, allocate resources and update the hardware. */
@@ -1752,7 +1747,7 @@ again:
BUG_ON(!list_empty(add_list));
tried_times++;
- /* any device complain? */
+ /* Any device complain? */
if (list_empty(&fail_head))
goto dump;
@@ -1766,23 +1761,23 @@ again:
goto dump;
}
- dev_printk(KERN_DEBUG, &bus->dev,
- "No. %d try to assign unassigned res\n", tried_times + 1);
+ dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
+ tried_times + 1);
- /* third times and later will not check if it is leaf */
+ /* Third times and later will not check if it is leaf */
if ((tried_times + 1) > 2)
rel_type = whole_subtree;
/*
* Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge
+ * child device under that bridge.
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
fail_res->flags & PCI_RES_TYPE_MASK,
rel_type);
- /* restore size and flags */
+ /* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
@@ -1797,7 +1792,7 @@ again:
goto again;
dump:
- /* dump the resource on buses */
+ /* Dump the resource on buses */
pci_bus_dump_resources(bus);
}
@@ -1808,14 +1803,15 @@ void __init pci_assign_unassigned_resources(void)
list_for_each_entry(root_bus, &pci_root_buses, node) {
pci_assign_unassigned_root_bus_resources(root_bus);
- /* Make sure the root bridge has a companion ACPI device: */
+ /* Make sure the root bridge has a companion ACPI device */
if (ACPI_HANDLE(root_bus->bridge))
acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
}
}
static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
- struct list_head *add_list, resource_size_t available)
+ struct list_head *add_list,
+ resource_size_t available)
{
struct pci_dev_resource *dev_res;
@@ -1839,8 +1835,10 @@ static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
}
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
- struct list_head *add_list, resource_size_t available_io,
- resource_size_t available_mmio, resource_size_t available_mmio_pref)
+ struct list_head *add_list,
+ resource_size_t available_io,
+ resource_size_t available_mmio,
+ resource_size_t available_mmio_pref)
{
resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref;
unsigned int normal_bridges = 0, hotplug_bridges = 0;
@@ -1864,7 +1862,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
/*
* Calculate the total amount of extra resource space we can
- * pass to bridges below this one. This is basically the
+ * pass to bridges below this one. This is basically the
* extra space reduced by the minimal required space for the
* non-hotplug bridges.
*/
@@ -1874,7 +1872,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
/*
* Calculate how many hotplug bridges and normal bridges there
- * are on this bus. We will distribute the additional available
+ * are on this bus. We will distribute the additional available
* resources between hotplug bridges.
*/
for_each_pci_bridge(dev, bus) {
@@ -1909,8 +1907,8 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
/*
* There is only one bridge on the bus so it gets all available
- * resources which it can then distribute to the possible
- * hotplug bridges below.
+ * resources which it can then distribute to the possible hotplug
+ * bridges below.
*/
if (hotplug_bridges + normal_bridges == 1) {
dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
@@ -1961,9 +1959,8 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
}
}
-static void
-pci_bridge_distribute_available_resources(struct pci_dev *bridge,
- struct list_head *add_list)
+static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
+ struct list_head *add_list)
{
resource_size_t available_io, available_mmio, available_mmio_pref;
const struct resource *res;
@@ -1980,14 +1977,17 @@ pci_bridge_distribute_available_resources(struct pci_dev *bridge,
available_mmio_pref = resource_size(res);
pci_bus_distribute_available_resources(bridge->subordinate,
- add_list, available_io, available_mmio, available_mmio_pref);
+ add_list, available_io,
+ available_mmio,
+ available_mmio_pref);
}
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
- LIST_HEAD(add_list); /* list of resources that
- want additional resources */
+ /* List of resources that want additional resources */
+ LIST_HEAD(add_list);
+
int tried_times = 0;
LIST_HEAD(fail_head);
struct pci_dev_resource *fail_res;
@@ -1997,9 +1997,9 @@ again:
__pci_bus_size_bridges(parent, &add_list);
/*
- * Distribute remaining resources (if any) equally between
- * hotplug bridges below. This makes it possible to extend the
- * hierarchy later without running out of resources.
+ * Distribute remaining resources (if any) equally between hotplug
+ * bridges below. This makes it possible to extend the hierarchy
+ * later without running out of resources.
*/
pci_bridge_distribute_available_resources(bridge, &add_list);
@@ -2011,7 +2011,7 @@ again:
goto enable_all;
if (tried_times >= 2) {
- /* still fail, don't need to try more */
+ /* Still fail, don't need to try more */
free_list(&fail_head);
goto enable_all;
}
@@ -2020,15 +2020,15 @@ again:
tried_times + 1);
/*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge
+ * Try to release leaf bridge's resources that aren't big enough
+ * to contain child device resources.
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
fail_res->flags & PCI_RES_TYPE_MASK,
whole_subtree);
- /* restore size and flags */
+ /* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
@@ -2107,7 +2107,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
}
list_for_each_entry(dev_res, &saved, list) {
- /* Skip the bridge we just assigned resources for. */
+ /* Skip the bridge we just assigned resources for */
if (bridge == dev_res->dev)
continue;
@@ -2119,7 +2119,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
return 0;
cleanup:
- /* restore size and flags */
+ /* Restore size and flags */
list_for_each_entry(dev_res, &failed, list) {
struct resource *res = dev_res->res;
@@ -2151,8 +2151,8 @@ cleanup:
void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
{
struct pci_dev *dev;
- LIST_HEAD(add_list); /* list of resources that
- want additional resources */
+ /* List of resources that want additional resources */
+ LIST_HEAD(add_list);
down_read(&pci_bus_sem);
for_each_pci_bridge(dev, bus)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index c46d5e1ff536..f4d92b1afe7b 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -403,7 +403,7 @@ static int pci_slot_init(void)
pci_slots_kset = kset_create_and_add("slots", NULL,
&pci_bus_kset->kobj);
if (!pci_slots_kset) {
- printk(KERN_ERR "PCI: Slot initialization failure\n");
+ pr_err("PCI: Slot initialization failure\n");
return -ENOMEM;
}
return 0;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index e22766c79fe9..bebbde4ebec0 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -390,7 +390,7 @@ static int switchtec_dev_open(struct inode *inode, struct file *filp)
return PTR_ERR(stuser);
filp->private_data = stuser;
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
@@ -658,19 +658,25 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
static int ioctl_event_summary(struct switchtec_dev *stdev,
struct switchtec_user *stuser,
- struct switchtec_ioctl_event_summary __user *usum)
+ struct switchtec_ioctl_event_summary __user *usum,
+ size_t size)
{
- struct switchtec_ioctl_event_summary s = {0};
+ struct switchtec_ioctl_event_summary *s;
int i;
u32 reg;
+ int ret = 0;
- s.global = ioread32(&stdev->mmio_sw_event->global_summary);
- s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
- s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->global = ioread32(&stdev->mmio_sw_event->global_summary);
+ s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {
reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
- s.part[i] = reg;
+ s->part[i] = reg;
}
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
@@ -679,15 +685,19 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
break;
reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
- s.pff[i] = reg;
+ s->pff[i] = reg;
}
- if (copy_to_user(usum, &s, sizeof(s)))
- return -EFAULT;
+ if (copy_to_user(usum, s, size)) {
+ ret = -EFAULT;
+ goto error_case;
+ }
stuser->event_cnt = atomic_read(&stdev->event_cnt);
- return 0;
+error_case:
+ kfree(s);
+ return ret;
}
static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
@@ -977,8 +987,9 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
case SWITCHTEC_IOCTL_FLASH_PART_INFO:
rc = ioctl_flash_part_info(stdev, argp);
break;
- case SWITCHTEC_IOCTL_EVENT_SUMMARY:
- rc = ioctl_event_summary(stdev, stuser, argp);
+ case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
+ rc = ioctl_event_summary(stdev, stuser, argp,
+ sizeof(struct switchtec_ioctl_event_summary_legacy));
break;
case SWITCHTEC_IOCTL_EVENT_CTL:
rc = ioctl_event_ctl(stdev, argp);
@@ -989,6 +1000,10 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
case SWITCHTEC_IOCTL_PORT_TO_PFF:
rc = ioctl_port_to_pff(stdev, argp);
break;
+ case SWITCHTEC_IOCTL_EVENT_SUMMARY:
+ rc = ioctl_event_summary(stdev, stuser, argp,
+ sizeof(struct switchtec_ioctl_event_summary));
+ break;
default:
rc = -ENOTTY;
break;
@@ -1162,7 +1177,8 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0;
- if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
+ if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
+ eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
return 0;
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index eba6e33147a2..d1b16cf3403f 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -291,8 +291,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
vector[i] = op.msix_entries[i].vector;
}
} else {
- printk(KERN_DEBUG "enable msix get value %x\n",
- op.value);
+ pr_info("enable msix get value %x\n", op.value);
err = op.value;
}
} else {
@@ -364,12 +363,12 @@ static void pci_frontend_disable_msi(struct pci_dev *dev)
err = do_pci_op(pdev, &op);
if (err == XEN_PCI_ERR_dev_not_found) {
/* XXX No response from backend, what shall we do? */
- printk(KERN_DEBUG "get no response from backend for disable MSI\n");
+ pr_info("get no response from backend for disable MSI\n");
return;
}
if (err)
/* how can pciback notify us fail? */
- printk(KERN_DEBUG "get fake response frombackend\n");
+ pr_info("get fake response from backend\n");
}
static struct xen_pci_frontend_ops pci_frontend_ops = {
@@ -1104,7 +1103,7 @@ static void __ref pcifront_backend_changed(struct xenbus_device *xdev,
case XenbusStateClosed:
if (xdev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* fall through - Missed the backend's CLOSING state. */
case XenbusStateClosing:
dev_warn(&xdev->dev, "backend going away!\n");
pcifront_try_disconnect(pdev);
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index c2a17a79f0b2..267fb875e40f 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -22,7 +22,7 @@
#include <mach/hardware.h>
#include <asm/io.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/mux.h>
#include <mach/tc.h>
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index af9bc178495d..a94e586a58b2 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -52,6 +52,15 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
+config ARM_SMMU_V3_PMU
+ tristate "ARM SMMUv3 Performance Monitors Extension"
+ depends on ARM64 && ACPI && ARM_SMMU_V3
+ help
+ Provides support for the ARM SMMUv3 Performance Monitor Counter
+ Groups (PMCG), which provide monitoring of transactions passing
+ through the SMMU and allow the resulting information to be filtered
+ based on the Stream ID of the corresponding master.
+
config ARM_DSU_PMU
tristate "ARM DynamIQ Shared Unit (DSU) PMU"
depends on ARM64
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 909f27fd9db3..30489941f3d6 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_ARM_CCN) += arm-ccn.o
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
+obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index bfd03e023308..8f8606b9bc9e 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1684,21 +1684,24 @@ static int cci_pmu_probe(struct platform_device *pdev)
raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
mutex_init(&cci_pmu->reserve_mutex);
atomic_set(&cci_pmu->active_events, 0);
- cci_pmu->cpu = get_cpu();
-
- ret = cci_pmu_init(cci_pmu, pdev);
- if (ret) {
- put_cpu();
- return ret;
- }
+ cci_pmu->cpu = raw_smp_processor_id();
+ g_cci_pmu = cci_pmu;
cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
"perf/arm/cci:online", NULL,
cci_pmu_offline_cpu);
- put_cpu();
- g_cci_pmu = cci_pmu;
+
+ ret = cci_pmu_init(cci_pmu, pdev);
+ if (ret)
+ goto error_pmu_init;
+
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;
+
+error_pmu_init:
+ cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
+ g_cci_pmu = NULL;
+ return ret;
}
static int cci_pmu_remove(struct platform_device *pdev)
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 2ae76026e947..0bb52d9bdcf7 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -167,7 +167,7 @@ struct arm_ccn_dt {
struct hrtimer hrtimer;
- cpumask_t cpu;
+ unsigned int cpu;
struct hlist_node node;
struct pmu pmu;
@@ -559,7 +559,7 @@ static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
{
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
- return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu);
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu));
}
static struct device_attribute arm_ccn_pmu_cpumask_attr =
@@ -759,7 +759,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
* mitigate this, we enforce CPU assignment to one, selected
* processor (the one described in the "cpumask" attribute).
*/
- event->cpu = cpumask_first(&ccn->dt.cpu);
+ event->cpu = ccn->dt.cpu;
node_xp = CCN_CONFIG_NODE(event->attr.config);
type = CCN_CONFIG_TYPE(event->attr.config);
@@ -1215,15 +1215,15 @@ static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
unsigned int target;
- if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
+ if (cpu != dt->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&dt->pmu, cpu, target);
- cpumask_set_cpu(target, &dt->cpu);
+ dt->cpu = target;
if (ccn->irq)
- WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
+ WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu)));
return 0;
}
@@ -1299,29 +1299,30 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
}
/* Pick one CPU which we will use to collect data from CCN... */
- cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
+ ccn->dt.cpu = raw_smp_processor_id();
/* Also make sure that the overflow interrupt is handled by this CPU */
if (ccn->irq) {
- err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
+ err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu));
if (err) {
dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
goto error_set_affinity;
}
}
+ cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ &ccn->dt.node);
+
err = perf_pmu_register(&ccn->dt.pmu, name, -1);
if (err)
goto error_pmu_register;
- cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
- &ccn->dt.node);
- put_cpu();
return 0;
error_pmu_register:
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ &ccn->dt.node);
error_set_affinity:
- put_cpu();
error_choose_name:
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
for (i = 0; i < ccn->num_xps; i++)
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
new file mode 100644
index 000000000000..da71c741cb46
--- /dev/null
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This driver adds support for perf events to use the Performance
+ * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
+ * to monitor that node.
+ *
+ * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
+ * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
+ * to 4K boundary. For example, the PMCG at 0xff88840000 is named
+ * smmuv3_pmcg_ff88840
+ *
+ * Filtering by stream id is done by specifying filtering parameters
+ * with the event. options are:
+ * filter_enable - 0 = no filtering, 1 = filtering enabled
+ * filter_span - 0 = exact match, 1 = pattern match
+ * filter_stream_id - pattern to filter against
+ *
+ * To match a partial StreamID where the X most-significant bits must match
+ * but the Y least-significant bits might differ, STREAMID is programmed
+ * with a value that contains:
+ * STREAMID[Y - 1] == 0.
+ * STREAMID[Y - 2:0] == 1 (where Y > 1).
+ * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
+ * contain a value to match from the corresponding bits of event StreamID.
+ *
+ * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
+ * filter_span=1,filter_stream_id=0x42/ -a netperf
+ * Applies filter pattern 0x42 to transaction events, which means events
+ * matching stream ids 0x42 and 0x43 are counted. Further filtering
+ * information is available in the SMMU documentation.
+ *
+ * SMMU events are not attributable to a CPU, so task mode and sampling
+ * are not supported.
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/msi.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define SMMU_PMCG_EVCNTR0 0x0
+#define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
+#define SMMU_PMCG_EVTYPER0 0x400
+#define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
+#define SMMU_PMCG_SID_SPAN_SHIFT 29
+#define SMMU_PMCG_SMR0 0xA00
+#define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
+#define SMMU_PMCG_CNTENSET0 0xC00
+#define SMMU_PMCG_CNTENCLR0 0xC20
+#define SMMU_PMCG_INTENSET0 0xC40
+#define SMMU_PMCG_INTENCLR0 0xC60
+#define SMMU_PMCG_OVSCLR0 0xC80
+#define SMMU_PMCG_OVSSET0 0xCC0
+#define SMMU_PMCG_CFGR 0xE00
+#define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
+#define SMMU_PMCG_CFGR_MSI BIT(21)
+#define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
+#define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
+#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
+#define SMMU_PMCG_CR 0xE04
+#define SMMU_PMCG_CR_ENABLE BIT(0)
+#define SMMU_PMCG_CEID0 0xE20
+#define SMMU_PMCG_CEID1 0xE28
+#define SMMU_PMCG_IRQ_CTRL 0xE50
+#define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
+#define SMMU_PMCG_IRQ_CFG0 0xE58
+#define SMMU_PMCG_IRQ_CFG1 0xE60
+#define SMMU_PMCG_IRQ_CFG2 0xE64
+
+/* MSI config fields */
+#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
+#define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
+
+#define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
+#define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
+
+#define SMMU_PMCG_MAX_COUNTERS 64
+#define SMMU_PMCG_ARCH_MAX_EVENTS 128
+
+#define SMMU_PMCG_PA_SHIFT 12
+
+#define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
+
+static int cpuhp_state_num;
+
+struct smmu_pmu {
+ struct hlist_node node;
+ struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
+ DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
+ DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
+ unsigned int irq;
+ unsigned int on_cpu;
+ struct pmu pmu;
+ unsigned int num_counters;
+ struct device *dev;
+ void __iomem *reg_base;
+ void __iomem *reloc_base;
+ u64 counter_mask;
+ u32 options;
+ bool global_filter;
+ u32 global_filter_span;
+ u32 global_filter_sid;
+};
+
+#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
+
+#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
+ static inline u32 get_##_name(struct perf_event *event) \
+ { \
+ return FIELD_GET(GENMASK_ULL(_end, _start), \
+ event->attr._config); \
+ } \
+
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
+
+static inline void smmu_pmu_enable(struct pmu *pmu)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+
+ writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
+ smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+ writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
+}
+
+static inline void smmu_pmu_disable(struct pmu *pmu)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+
+ writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
+ writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+}
+
+static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
+ u32 idx, u64 value)
+{
+ if (smmu_pmu->counter_mask & BIT(32))
+ writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
+ else
+ writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
+}
+
+static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+ u64 value;
+
+ if (smmu_pmu->counter_mask & BIT(32))
+ value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
+ else
+ value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
+
+ return value;
+}
+
+static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+ writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
+}
+
+static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+ writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
+}
+
+static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+ writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
+}
+
+static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
+ u32 idx)
+{
+ writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
+}
+
+static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
+ u32 val)
+{
+ writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
+}
+
+static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
+{
+ writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
+}
+
+static void smmu_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ u64 delta, prev, now;
+ u32 idx = hwc->idx;
+
+ do {
+ prev = local64_read(&hwc->prev_count);
+ now = smmu_pmu_counter_get_value(smmu_pmu, idx);
+ } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
+
+ /* handle overflow. */
+ delta = now - prev;
+ delta &= smmu_pmu->counter_mask;
+
+ local64_add(delta, &event->count);
+}
+
+static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+ u64 new;
+
+ if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
+ /*
+ * On platforms that require this quirk, if the counter starts
+ * at < half_counter value and wraps, the current logic of
+ * handling the overflow may not work. It is expected that,
+ * those platforms will have full 64 counter bits implemented
+ * so that such a possibility is remote(eg: HiSilicon HIP08).
+ */
+ new = smmu_pmu_counter_get_value(smmu_pmu, idx);
+ } else {
+ /*
+ * We limit the max period to half the max counter value
+ * of the counter size, so that even in the case of extreme
+ * interrupt latency the counter will (hopefully) not wrap
+ * past its initial value.
+ */
+ new = smmu_pmu->counter_mask >> 1;
+ smmu_pmu_counter_set_value(smmu_pmu, idx, new);
+ }
+
+ local64_set(&hwc->prev_count, new);
+}
+
+static void smmu_pmu_set_event_filter(struct perf_event *event,
+ int idx, u32 span, u32 sid)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ u32 evtyper;
+
+ evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
+ smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
+ smmu_pmu_set_smr(smmu_pmu, idx, sid);
+}
+
+static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
+ struct perf_event *event, int idx)
+{
+ u32 span, sid;
+ unsigned int num_ctrs = smmu_pmu->num_counters;
+ bool filter_en = !!get_filter_enable(event);
+
+ span = filter_en ? get_filter_span(event) :
+ SMMU_PMCG_DEFAULT_FILTER_SPAN;
+ sid = filter_en ? get_filter_stream_id(event) :
+ SMMU_PMCG_DEFAULT_FILTER_SID;
+
+ /* Support individual filter settings */
+ if (!smmu_pmu->global_filter) {
+ smmu_pmu_set_event_filter(event, idx, span, sid);
+ return 0;
+ }
+
+ /* Requested settings same as current global settings*/
+ if (span == smmu_pmu->global_filter_span &&
+ sid == smmu_pmu->global_filter_sid)
+ return 0;
+
+ if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
+ return -EAGAIN;
+
+ smmu_pmu_set_event_filter(event, 0, span, sid);
+ smmu_pmu->global_filter_span = span;
+ smmu_pmu->global_filter_sid = sid;
+ return 0;
+}
+
+static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
+ struct perf_event *event)
+{
+ int idx, err;
+ unsigned int num_ctrs = smmu_pmu->num_counters;
+
+ idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
+ if (idx == num_ctrs)
+ /* The counters are all in use. */
+ return -EAGAIN;
+
+ err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
+ if (err)
+ return err;
+
+ set_bit(idx, smmu_pmu->used_counters);
+
+ return idx;
+}
+
+/*
+ * Implementation of abstract pmu functionality required by
+ * the core perf events code.
+ */
+
+static int smmu_pmu_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ struct device *dev = smmu_pmu->dev;
+ struct perf_event *sibling;
+ u16 event_id;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (hwc->sample_period) {
+ dev_dbg(dev, "Sampling not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0) {
+ dev_dbg(dev, "Per-task mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Verify specified event is supported on this PMU */
+ event_id = get_event(event);
+ if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
+ (!test_bit(event_id, smmu_pmu->supported_events))) {
+ dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
+ return -EINVAL;
+ }
+
+ /* Don't allow groups with mixed PMUs, except for s/w events */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader)) {
+ dev_dbg(dev, "Can't create mixed PMU group\n");
+ return -EINVAL;
+ }
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling)) {
+ dev_dbg(dev, "Can't create mixed PMU group\n");
+ return -EINVAL;
+ }
+ }
+
+ hwc->idx = -1;
+
+ /*
+ * Ensure all events are on the same cpu so all events are in the
+ * same cpu context, to avoid races on pmu_enable etc.
+ */
+ event->cpu = smmu_pmu->on_cpu;
+
+ return 0;
+}
+
+static void smmu_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ hwc->state = 0;
+
+ smmu_pmu_set_period(smmu_pmu, hwc);
+
+ smmu_pmu_counter_enable(smmu_pmu, idx);
+}
+
+static void smmu_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ smmu_pmu_counter_disable(smmu_pmu, idx);
+ /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
+ smmu_pmu_event_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int smmu_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+
+ idx = smmu_pmu_get_event_idx(smmu_pmu, event);
+ if (idx < 0)
+ return idx;
+
+ hwc->idx = idx;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ smmu_pmu->events[idx] = event;
+ local64_set(&hwc->prev_count, 0);
+
+ smmu_pmu_interrupt_enable(smmu_pmu, idx);
+
+ if (flags & PERF_EF_START)
+ smmu_pmu_event_start(event, flags);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void smmu_pmu_event_del(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ int idx = hwc->idx;
+
+ smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
+ smmu_pmu_interrupt_disable(smmu_pmu, idx);
+ smmu_pmu->events[idx] = NULL;
+ clear_bit(idx, smmu_pmu->used_counters);
+
+ perf_event_update_userpage(event);
+}
+
+static void smmu_pmu_event_read(struct perf_event *event)
+{
+ smmu_pmu_event_update(event);
+}
+
+/* cpumask */
+
+static ssize_t smmu_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
+}
+
+static struct device_attribute smmu_pmu_cpumask_attr =
+ __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
+
+static struct attribute *smmu_pmu_cpumask_attrs[] = {
+ &smmu_pmu_cpumask_attr.attr,
+ NULL
+};
+
+static struct attribute_group smmu_pmu_cpumask_group = {
+ .attrs = smmu_pmu_cpumask_attrs,
+};
+
+/* Events */
+
+static ssize_t smmu_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define SMMU_EVENT_ATTR(name, config) \
+ PMU_EVENT_ATTR(name, smmu_event_attr_##name, \
+ config, smmu_pmu_event_show)
+SMMU_EVENT_ATTR(cycles, 0);
+SMMU_EVENT_ATTR(transaction, 1);
+SMMU_EVENT_ATTR(tlb_miss, 2);
+SMMU_EVENT_ATTR(config_cache_miss, 3);
+SMMU_EVENT_ATTR(trans_table_walk_access, 4);
+SMMU_EVENT_ATTR(config_struct_access, 5);
+SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6);
+SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7);
+
+static struct attribute *smmu_pmu_events[] = {
+ &smmu_event_attr_cycles.attr.attr,
+ &smmu_event_attr_transaction.attr.attr,
+ &smmu_event_attr_tlb_miss.attr.attr,
+ &smmu_event_attr_config_cache_miss.attr.attr,
+ &smmu_event_attr_trans_table_walk_access.attr.attr,
+ &smmu_event_attr_config_struct_access.attr.attr,
+ &smmu_event_attr_pcie_ats_trans_rq.attr.attr,
+ &smmu_event_attr_pcie_ats_trans_passed.attr.attr,
+ NULL
+};
+
+static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
+
+ if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
+ return attr->mode;
+
+ return 0;
+}
+
+static struct attribute_group smmu_pmu_events_group = {
+ .name = "events",
+ .attrs = smmu_pmu_events,
+ .is_visible = smmu_pmu_event_is_visible,
+};
+
+/* Formats */
+PMU_FORMAT_ATTR(event, "config:0-15");
+PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
+PMU_FORMAT_ATTR(filter_span, "config1:32");
+PMU_FORMAT_ATTR(filter_enable, "config1:33");
+
+static struct attribute *smmu_pmu_formats[] = {
+ &format_attr_event.attr,
+ &format_attr_filter_stream_id.attr,
+ &format_attr_filter_span.attr,
+ &format_attr_filter_enable.attr,
+ NULL
+};
+
+static struct attribute_group smmu_pmu_format_group = {
+ .name = "format",
+ .attrs = smmu_pmu_formats,
+};
+
+static const struct attribute_group *smmu_pmu_attr_grps[] = {
+ &smmu_pmu_cpumask_group,
+ &smmu_pmu_events_group,
+ &smmu_pmu_format_group,
+ NULL
+};
+
+/*
+ * Generic device handlers
+ */
+
+static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct smmu_pmu *smmu_pmu;
+ unsigned int target;
+
+ smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
+ if (cpu != smmu_pmu->on_cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
+ smmu_pmu->on_cpu = target;
+ WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target)));
+
+ return 0;
+}
+
+static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
+{
+ struct smmu_pmu *smmu_pmu = data;
+ u64 ovsr;
+ unsigned int idx;
+
+ ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
+ if (!ovsr)
+ return IRQ_NONE;
+
+ writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
+
+ for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
+ struct perf_event *event = smmu_pmu->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (WARN_ON_ONCE(!event))
+ continue;
+
+ smmu_pmu_event_update(event);
+ hwc = &event->hw;
+
+ smmu_pmu_set_period(smmu_pmu, hwc);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void smmu_pmu_free_msis(void *data)
+{
+ struct device *dev = data;
+
+ platform_msi_domain_free_irqs(dev);
+}
+
+static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ phys_addr_t doorbell;
+ struct device *dev = msi_desc_to_dev(desc);
+ struct smmu_pmu *pmu = dev_get_drvdata(dev);
+
+ doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
+ doorbell &= MSI_CFG0_ADDR_MASK;
+
+ writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
+ writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
+ writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
+ pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
+}
+
+static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
+{
+ struct msi_desc *desc;
+ struct device *dev = pmu->dev;
+ int ret;
+
+ /* Clear MSI address reg */
+ writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
+
+ /* MSI supported or not */
+ if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
+ return;
+
+ ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
+ if (ret) {
+ dev_warn(dev, "failed to allocate MSIs\n");
+ return;
+ }
+
+ desc = first_msi_entry(dev);
+ if (desc)
+ pmu->irq = desc->irq;
+
+ /* Add callback to free MSIs on teardown */
+ devm_add_action(dev, smmu_pmu_free_msis, dev);
+}
+
+static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
+{
+ unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
+ int irq, ret = -ENXIO;
+
+ smmu_pmu_setup_msi(pmu);
+
+ irq = pmu->irq;
+ if (irq)
+ ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
+ flags, "smmuv3-pmu", pmu);
+ return ret;
+}
+
+static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
+{
+ u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
+
+ smmu_pmu_disable(&smmu_pmu->pmu);
+
+ /* Disable counter and interrupt */
+ writeq_relaxed(counter_present_mask,
+ smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
+ writeq_relaxed(counter_present_mask,
+ smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
+ writeq_relaxed(counter_present_mask,
+ smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
+}
+
+static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
+{
+ u32 model;
+
+ model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
+
+ switch (model) {
+ case IORT_SMMU_V3_PMCG_HISI_HIP08:
+ /* HiSilicon Erratum 162001800 */
+ smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
+ break;
+ }
+
+ dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
+}
+
+static int smmu_pmu_probe(struct platform_device *pdev)
+{
+ struct smmu_pmu *smmu_pmu;
+ struct resource *res_0, *res_1;
+ u32 cfgr, reg_size;
+ u64 ceid_64[2];
+ int irq, err;
+ char *name;
+ struct device *dev = &pdev->dev;
+
+ smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
+ if (!smmu_pmu)
+ return -ENOMEM;
+
+ smmu_pmu->dev = dev;
+ platform_set_drvdata(pdev, smmu_pmu);
+
+ smmu_pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = smmu_pmu_enable,
+ .pmu_disable = smmu_pmu_disable,
+ .event_init = smmu_pmu_event_init,
+ .add = smmu_pmu_event_add,
+ .del = smmu_pmu_event_del,
+ .start = smmu_pmu_event_start,
+ .stop = smmu_pmu_event_stop,
+ .read = smmu_pmu_event_read,
+ .attr_groups = smmu_pmu_attr_grps,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
+ if (IS_ERR(smmu_pmu->reg_base))
+ return PTR_ERR(smmu_pmu->reg_base);
+
+ cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
+
+ /* Determine if page 1 is present */
+ if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
+ res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
+ if (IS_ERR(smmu_pmu->reloc_base))
+ return PTR_ERR(smmu_pmu->reloc_base);
+ } else {
+ smmu_pmu->reloc_base = smmu_pmu->reg_base;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq > 0)
+ smmu_pmu->irq = irq;
+
+ ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
+ ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
+ bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
+ SMMU_PMCG_ARCH_MAX_EVENTS);
+
+ smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
+
+ smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
+
+ reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
+ smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
+
+ smmu_pmu_reset(smmu_pmu);
+
+ err = smmu_pmu_setup_irq(smmu_pmu);
+ if (err) {
+ dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
+ return err;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
+ (res_0->start) >> SMMU_PMCG_PA_SHIFT);
+ if (!name) {
+ dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
+ return -EINVAL;
+ }
+
+ smmu_pmu_get_acpi_options(smmu_pmu);
+
+ /* Pick one CPU to be the preferred one to use */
+ smmu_pmu->on_cpu = raw_smp_processor_id();
+ WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
+ cpumask_of(smmu_pmu->on_cpu)));
+
+ err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
+ &smmu_pmu->node);
+ if (err) {
+ dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
+ err, &res_0->start);
+ goto out_cpuhp_err;
+ }
+
+ err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
+ if (err) {
+ dev_err(dev, "Error %d registering PMU @%pa\n",
+ err, &res_0->start);
+ goto out_unregister;
+ }
+
+ dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
+ &res_0->start, smmu_pmu->num_counters,
+ smmu_pmu->global_filter ? "Global(Counter0)" :
+ "Individual");
+
+ return 0;
+
+out_unregister:
+ cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
+out_cpuhp_err:
+ put_cpu();
+ return err;
+}
+
+static int smmu_pmu_remove(struct platform_device *pdev)
+{
+ struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&smmu_pmu->pmu);
+ cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
+
+ return 0;
+}
+
+static void smmu_pmu_shutdown(struct platform_device *pdev)
+{
+ struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
+
+ smmu_pmu_disable(&smmu_pmu->pmu);
+}
+
+static struct platform_driver smmu_pmu_driver = {
+ .driver = {
+ .name = "arm-smmu-v3-pmcg",
+ },
+ .probe = smmu_pmu_probe,
+ .remove = smmu_pmu_remove,
+ .shutdown = smmu_pmu_shutdown,
+};
+
+static int __init arm_smmu_pmu_init(void)
+{
+ cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/arm/pmcg:online",
+ NULL,
+ smmu_pmu_offline_cpu);
+ if (cpuhp_state_num < 0)
+ return cpuhp_state_num;
+
+ return platform_driver_register(&smmu_pmu_driver);
+}
+module_init(arm_smmu_pmu_init);
+
+static void __exit arm_smmu_pmu_exit(void)
+{
+ platform_driver_unregister(&smmu_pmu_driver);
+ cpuhp_remove_multi_state(cpuhp_state_num);
+}
+
+module_exit(arm_smmu_pmu_exit);
+
+MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
+MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
+MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index fb1204bcc454..53772d35b36e 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -3,7 +3,8 @@
#
config PHY_SUN4I_USB
tristate "Allwinner sunxi SoC USB PHY driver"
- depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
depends on RESET_CONTROLLER
depends on EXTCON
depends on POWER_SUPPLY
@@ -19,7 +20,8 @@ config PHY_SUN4I_USB
config PHY_SUN6I_MIPI_DPHY
tristate "Allwinner A31 MIPI D-PHY Support"
- depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
depends on RESET_CONTROLLER
select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
@@ -31,7 +33,8 @@ config PHY_SUN6I_MIPI_DPHY
config PHY_SUN9I_USB
tristate "Allwinner sun9i SoC USB PHY driver"
- depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
depends on RESET_CONTROLLER
depends on USB_SUPPORT
select USB_COMMON
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 4bbd9ede38c8..cc5af961778d 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -554,6 +554,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
struct sun4i_usb_phy_data *data =
container_of(work, struct sun4i_usb_phy_data, detect.work);
struct phy *phy0 = data->phys[0].phy;
+ struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
bool force_session_end, id_notify = false, vbus_notify = false;
int id_det, vbus_det;
@@ -610,6 +611,9 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
mutex_unlock(&phy0->mutex);
}
+ /* Enable PHY0 passby for host mode only. */
+ sun4i_usb_phy_passby(phy, !id_det);
+
/* Re-route PHY0 if necessary */
if (data->cfg->phy0_dual_route)
sun4i_usb_phy0_reroute(data, id_det);
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
index 23fe1cda2f70..4c08c1ccdd04 100644
--- a/drivers/phy/amlogic/Kconfig
+++ b/drivers/phy/amlogic/Kconfig
@@ -36,3 +36,25 @@ config PHY_MESON_GXL_USB3
Enable this to support the Meson USB3 PHY and OTG detection
IP block found in Meson GXL and GXM SoCs.
If unsure, say N.
+
+config PHY_MESON_G12A_USB2
+ tristate "Meson G12A USB2 PHY driver"
+ default ARCH_MESON
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Enable this to support the Meson USB2 PHYs found in Meson
+ G12A SoCs.
+ If unsure, say N.
+
+config PHY_MESON_G12A_USB3_PCIE
+ tristate "Meson G12A USB3+PCIE Combo PHY driver"
+ default ARCH_MESON
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Enable this to support the Meson USB3 + PCIE Combo PHY found
+ in Meson G12A SoCs.
+ If unsure, say N.
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
index 4fd8848c194d..fdd008e1b19b 100644
--- a/drivers/phy/amlogic/Makefile
+++ b/drivers/phy/amlogic/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
+obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
obj-$(CONFIG_PHY_MESON_GXL_USB3) += phy-meson-gxl-usb3.o
+obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE) += phy-meson-g12a-usb3-pcie.o
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
new file mode 100644
index 000000000000..9065ffc85eb4
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Meson G12A USB2 PHY driver
+ *
+ * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define PHY_CTRL_R0 0x0
+#define PHY_CTRL_R1 0x4
+#define PHY_CTRL_R2 0x8
+#define PHY_CTRL_R3 0xc
+ #define PHY_CTRL_R3_SQUELCH_REF GENMASK(1, 0)
+ #define PHY_CTRL_R3_HSDIC_REF GENMASK(3, 2)
+ #define PHY_CTRL_R3_DISC_THRESH GENMASK(7, 4)
+
+#define PHY_CTRL_R4 0x10
+ #define PHY_CTRL_R4_CALIB_CODE_7_0 GENMASK(7, 0)
+ #define PHY_CTRL_R4_CALIB_CODE_15_8 GENMASK(15, 8)
+ #define PHY_CTRL_R4_CALIB_CODE_23_16 GENMASK(23, 16)
+ #define PHY_CTRL_R4_I_C2L_CAL_EN BIT(24)
+ #define PHY_CTRL_R4_I_C2L_CAL_RESET_N BIT(25)
+ #define PHY_CTRL_R4_I_C2L_CAL_DONE BIT(26)
+ #define PHY_CTRL_R4_TEST_BYPASS_MODE_EN BIT(27)
+ #define PHY_CTRL_R4_I_C2L_BIAS_TRIM_1_0 GENMASK(29, 28)
+ #define PHY_CTRL_R4_I_C2L_BIAS_TRIM_3_2 GENMASK(31, 30)
+
+#define PHY_CTRL_R5 0x14
+#define PHY_CTRL_R6 0x18
+#define PHY_CTRL_R7 0x1c
+#define PHY_CTRL_R8 0x20
+#define PHY_CTRL_R9 0x24
+#define PHY_CTRL_R10 0x28
+#define PHY_CTRL_R11 0x2c
+#define PHY_CTRL_R12 0x30
+#define PHY_CTRL_R13 0x34
+ #define PHY_CTRL_R13_CUSTOM_PATTERN_19 GENMASK(7, 0)
+ #define PHY_CTRL_R13_LOAD_STAT BIT(14)
+ #define PHY_CTRL_R13_UPDATE_PMA_SIGNALS BIT(15)
+ #define PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET GENMASK(20, 16)
+ #define PHY_CTRL_R13_CLEAR_HOLD_HS_DISCONNECT BIT(21)
+ #define PHY_CTRL_R13_BYPASS_HOST_DISCONNECT_VAL BIT(22)
+ #define PHY_CTRL_R13_BYPASS_HOST_DISCONNECT_EN BIT(23)
+ #define PHY_CTRL_R13_I_C2L_HS_EN BIT(24)
+ #define PHY_CTRL_R13_I_C2L_FS_EN BIT(25)
+ #define PHY_CTRL_R13_I_C2L_LS_EN BIT(26)
+ #define PHY_CTRL_R13_I_C2L_HS_OE BIT(27)
+ #define PHY_CTRL_R13_I_C2L_FS_OE BIT(28)
+ #define PHY_CTRL_R13_I_C2L_HS_RX_EN BIT(29)
+ #define PHY_CTRL_R13_I_C2L_FSLS_RX_EN BIT(30)
+
+#define PHY_CTRL_R14 0x38
+ #define PHY_CTRL_R14_I_RDP_EN BIT(0)
+ #define PHY_CTRL_R14_I_RPU_SW1_EN BIT(1)
+ #define PHY_CTRL_R14_I_RPU_SW2_EN GENMASK(2, 3)
+ #define PHY_CTRL_R14_PG_RSTN BIT(4)
+ #define PHY_CTRL_R14_I_C2L_DATA_16_8 BIT(5)
+ #define PHY_CTRL_R14_I_C2L_ASSERT_SINGLE_EN_ZERO BIT(6)
+ #define PHY_CTRL_R14_BYPASS_CTRL_7_0 GENMASK(15, 8)
+ #define PHY_CTRL_R14_BYPASS_CTRL_15_8 GENMASK(23, 16)
+
+#define PHY_CTRL_R15 0x3c
+#define PHY_CTRL_R16 0x40
+ #define PHY_CTRL_R16_MPLL_M GENMASK(8, 0)
+ #define PHY_CTRL_R16_MPLL_N GENMASK(14, 10)
+ #define PHY_CTRL_R16_MPLL_TDC_MODE BIT(20)
+ #define PHY_CTRL_R16_MPLL_SDM_EN BIT(21)
+ #define PHY_CTRL_R16_MPLL_LOAD BIT(22)
+ #define PHY_CTRL_R16_MPLL_DCO_SDM_EN BIT(23)
+ #define PHY_CTRL_R16_MPLL_LOCK_LONG GENMASK(25, 24)
+ #define PHY_CTRL_R16_MPLL_LOCK_F BIT(26)
+ #define PHY_CTRL_R16_MPLL_FAST_LOCK BIT(27)
+ #define PHY_CTRL_R16_MPLL_EN BIT(28)
+ #define PHY_CTRL_R16_MPLL_RESET BIT(29)
+ #define PHY_CTRL_R16_MPLL_LOCK BIT(30)
+ #define PHY_CTRL_R16_MPLL_LOCK_DIG BIT(31)
+
+#define PHY_CTRL_R17 0x44
+ #define PHY_CTRL_R17_MPLL_FRAC_IN GENMASK(13, 0)
+ #define PHY_CTRL_R17_MPLL_FIX_EN BIT(16)
+ #define PHY_CTRL_R17_MPLL_LAMBDA1 GENMASK(19, 17)
+ #define PHY_CTRL_R17_MPLL_LAMBDA0 GENMASK(22, 20)
+ #define PHY_CTRL_R17_MPLL_FILTER_MODE BIT(23)
+ #define PHY_CTRL_R17_MPLL_FILTER_PVT2 GENMASK(27, 24)
+ #define PHY_CTRL_R17_MPLL_FILTER_PVT1 GENMASK(31, 28)
+
+#define PHY_CTRL_R18 0x48
+ #define PHY_CTRL_R18_MPLL_LKW_SEL GENMASK(1, 0)
+ #define PHY_CTRL_R18_MPLL_LK_W GENMASK(5, 2)
+ #define PHY_CTRL_R18_MPLL_LK_S GENMASK(11, 6)
+ #define PHY_CTRL_R18_MPLL_DCO_M_EN BIT(12)
+ #define PHY_CTRL_R18_MPLL_DCO_CLK_SEL BIT(13)
+ #define PHY_CTRL_R18_MPLL_PFD_GAIN GENMASK(15, 14)
+ #define PHY_CTRL_R18_MPLL_ROU GENMASK(18, 16)
+ #define PHY_CTRL_R18_MPLL_DATA_SEL GENMASK(21, 19)
+ #define PHY_CTRL_R18_MPLL_BIAS_ADJ GENMASK(23, 22)
+ #define PHY_CTRL_R18_MPLL_BB_MODE GENMASK(25, 24)
+ #define PHY_CTRL_R18_MPLL_ALPHA GENMASK(28, 26)
+ #define PHY_CTRL_R18_MPLL_ADJ_LDO GENMASK(30, 29)
+ #define PHY_CTRL_R18_MPLL_ACG_RANGE BIT(31)
+
+#define PHY_CTRL_R19 0x4c
+#define PHY_CTRL_R20 0x50
+ #define PHY_CTRL_R20_USB2_IDDET_EN BIT(0)
+ #define PHY_CTRL_R20_USB2_OTG_VBUS_TRIM_2_0 GENMASK(3, 1)
+ #define PHY_CTRL_R20_USB2_OTG_VBUSDET_EN BIT(4)
+ #define PHY_CTRL_R20_USB2_AMON_EN BIT(5)
+ #define PHY_CTRL_R20_USB2_CAL_CODE_R5 BIT(6)
+ #define PHY_CTRL_R20_BYPASS_OTG_DET BIT(7)
+ #define PHY_CTRL_R20_USB2_DMON_EN BIT(8)
+ #define PHY_CTRL_R20_USB2_DMON_SEL_3_0 GENMASK(12, 9)
+ #define PHY_CTRL_R20_USB2_EDGE_DRV_EN BIT(13)
+ #define PHY_CTRL_R20_USB2_EDGE_DRV_TRIM_1_0 GENMASK(15, 14)
+ #define PHY_CTRL_R20_USB2_BGR_ADJ_4_0 GENMASK(20, 16)
+ #define PHY_CTRL_R20_USB2_BGR_START BIT(21)
+ #define PHY_CTRL_R20_USB2_BGR_VREF_4_0 GENMASK(28, 24)
+ #define PHY_CTRL_R20_USB2_BGR_DBG_1_0 GENMASK(30, 29)
+ #define PHY_CTRL_R20_BYPASS_CAL_DONE_R5 BIT(31)
+
+#define PHY_CTRL_R21 0x54
+ #define PHY_CTRL_R21_USB2_BGR_FORCE BIT(0)
+ #define PHY_CTRL_R21_USB2_CAL_ACK_EN BIT(1)
+ #define PHY_CTRL_R21_USB2_OTG_ACA_EN BIT(2)
+ #define PHY_CTRL_R21_USB2_TX_STRG_PD BIT(3)
+ #define PHY_CTRL_R21_USB2_OTG_ACA_TRIM_1_0 GENMASK(5, 4)
+ #define PHY_CTRL_R21_BYPASS_UTMI_CNTR GENMASK(15, 6)
+ #define PHY_CTRL_R21_BYPASS_UTMI_REG GENMASK(25, 20)
+
+#define PHY_CTRL_R22 0x58
+#define PHY_CTRL_R23 0x5c
+
+#define RESET_COMPLETE_TIME 1000
+#define PLL_RESET_COMPLETE_TIME 100
+
+struct phy_meson_g12a_usb2_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *clk;
+ struct reset_control *reset;
+};
+
+static const struct regmap_config phy_meson_g12a_usb2_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PHY_CTRL_R23,
+};
+
+static int phy_meson_g12a_usb2_init(struct phy *phy)
+{
+ struct phy_meson_g12a_usb2_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
+
+ udelay(RESET_COMPLETE_TIME);
+
+ /* usb2_otg_aca_en == 0 */
+ regmap_update_bits(priv->regmap, PHY_CTRL_R21,
+ PHY_CTRL_R21_USB2_OTG_ACA_EN, 0);
+
+ /* PLL Setup : 24MHz * 20 / 1 = 480MHz */
+ regmap_write(priv->regmap, PHY_CTRL_R16,
+ FIELD_PREP(PHY_CTRL_R16_MPLL_M, 20) |
+ FIELD_PREP(PHY_CTRL_R16_MPLL_N, 1) |
+ PHY_CTRL_R16_MPLL_LOAD |
+ FIELD_PREP(PHY_CTRL_R16_MPLL_LOCK_LONG, 1) |
+ PHY_CTRL_R16_MPLL_FAST_LOCK |
+ PHY_CTRL_R16_MPLL_EN |
+ PHY_CTRL_R16_MPLL_RESET);
+
+ regmap_write(priv->regmap, PHY_CTRL_R17,
+ FIELD_PREP(PHY_CTRL_R17_MPLL_FRAC_IN, 0) |
+ FIELD_PREP(PHY_CTRL_R17_MPLL_LAMBDA1, 7) |
+ FIELD_PREP(PHY_CTRL_R17_MPLL_LAMBDA0, 7) |
+ FIELD_PREP(PHY_CTRL_R17_MPLL_FILTER_PVT2, 2) |
+ FIELD_PREP(PHY_CTRL_R17_MPLL_FILTER_PVT1, 9));
+
+ regmap_write(priv->regmap, PHY_CTRL_R18,
+ FIELD_PREP(PHY_CTRL_R18_MPLL_LKW_SEL, 1) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_LK_W, 9) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_LK_S, 0x27) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_PFD_GAIN, 1) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_ROU, 7) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_DATA_SEL, 3) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_BIAS_ADJ, 1) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_BB_MODE, 0) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_ALPHA, 3) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_ADJ_LDO, 1) |
+ PHY_CTRL_R18_MPLL_ACG_RANGE);
+
+ udelay(PLL_RESET_COMPLETE_TIME);
+
+ /* UnReset PLL */
+ regmap_write(priv->regmap, PHY_CTRL_R16,
+ FIELD_PREP(PHY_CTRL_R16_MPLL_M, 20) |
+ FIELD_PREP(PHY_CTRL_R16_MPLL_N, 1) |
+ PHY_CTRL_R16_MPLL_LOAD |
+ FIELD_PREP(PHY_CTRL_R16_MPLL_LOCK_LONG, 1) |
+ PHY_CTRL_R16_MPLL_FAST_LOCK |
+ PHY_CTRL_R16_MPLL_EN);
+
+ /* PHY Tuning */
+ regmap_write(priv->regmap, PHY_CTRL_R20,
+ FIELD_PREP(PHY_CTRL_R20_USB2_OTG_VBUS_TRIM_2_0, 4) |
+ PHY_CTRL_R20_USB2_OTG_VBUSDET_EN |
+ FIELD_PREP(PHY_CTRL_R20_USB2_DMON_SEL_3_0, 15) |
+ PHY_CTRL_R20_USB2_EDGE_DRV_EN |
+ FIELD_PREP(PHY_CTRL_R20_USB2_EDGE_DRV_TRIM_1_0, 3) |
+ FIELD_PREP(PHY_CTRL_R20_USB2_BGR_ADJ_4_0, 0) |
+ FIELD_PREP(PHY_CTRL_R20_USB2_BGR_VREF_4_0, 0) |
+ FIELD_PREP(PHY_CTRL_R20_USB2_BGR_DBG_1_0, 0));
+
+ regmap_write(priv->regmap, PHY_CTRL_R4,
+ FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_7_0, 0xf) |
+ FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_15_8, 0xf) |
+ FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_23_16, 0xf) |
+ PHY_CTRL_R4_TEST_BYPASS_MODE_EN |
+ FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_1_0, 0) |
+ FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_3_2, 0));
+
+ /* Tuning Disconnect Threshold */
+ regmap_write(priv->regmap, PHY_CTRL_R3,
+ FIELD_PREP(PHY_CTRL_R3_SQUELCH_REF, 0) |
+ FIELD_PREP(PHY_CTRL_R3_HSDIC_REF, 1) |
+ FIELD_PREP(PHY_CTRL_R3_DISC_THRESH, 3));
+
+ /* Analog Settings */
+ regmap_write(priv->regmap, PHY_CTRL_R14, 0);
+ regmap_write(priv->regmap, PHY_CTRL_R13,
+ PHY_CTRL_R13_UPDATE_PMA_SIGNALS |
+ FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7));
+
+ return 0;
+}
+
+static int phy_meson_g12a_usb2_exit(struct phy *phy)
+{
+ struct phy_meson_g12a_usb2_priv *priv = phy_get_drvdata(phy);
+
+ return reset_control_reset(priv->reset);
+}
+
+/* set_mode is not needed, mode setting is handled via the UTMI bus */
+static const struct phy_ops phy_meson_g12a_usb2_ops = {
+ .init = phy_meson_g12a_usb2_init,
+ .exit = phy_meson_g12a_usb2_exit,
+ .owner = THIS_MODULE,
+};
+
+static int phy_meson_g12a_usb2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct phy_meson_g12a_usb2_priv *priv;
+ struct phy *phy;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ platform_set_drvdata(pdev, priv);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, base,
+ &phy_meson_g12a_usb2_regmap_conf);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->clk = devm_clk_get(dev, "xtal");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->reset = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
+
+ ret = reset_control_deassert(priv->reset);
+ if (ret)
+ return ret;
+
+ phy = devm_phy_create(dev, NULL, &phy_meson_g12a_usb2_ops);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to create PHY\n");
+
+ return ret;
+ }
+
+ phy_set_bus_width(phy, 8);
+ phy_set_drvdata(phy, priv);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id phy_meson_g12a_usb2_of_match[] = {
+ { .compatible = "amlogic,g12a-usb2-phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_meson_g12a_usb2_of_match);
+
+static struct platform_driver phy_meson_g12a_usb2_driver = {
+ .probe = phy_meson_g12a_usb2_probe,
+ .driver = {
+ .name = "phy-meson-g12a-usb2",
+ .of_match_table = phy_meson_g12a_usb2_of_match,
+ },
+};
+module_platform_driver(phy_meson_g12a_usb2_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Meson G12A USB2 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
new file mode 100644
index 000000000000..6233a7979a93
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Amlogic G12A USB3 + PCIE Combo PHY driver
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/phy/phy.h>
+
+#define PHY_R0 0x00
+ #define PHY_R0_PCIE_POWER_STATE GENMASK(4, 0)
+ #define PHY_R0_PCIE_USB3_SWITCH GENMASK(6, 5)
+
+#define PHY_R1 0x04
+ #define PHY_R1_PHY_TX1_TERM_OFFSET GENMASK(4, 0)
+ #define PHY_R1_PHY_TX0_TERM_OFFSET GENMASK(9, 5)
+ #define PHY_R1_PHY_RX1_EQ GENMASK(12, 10)
+ #define PHY_R1_PHY_RX0_EQ GENMASK(15, 13)
+ #define PHY_R1_PHY_LOS_LEVEL GENMASK(20, 16)
+ #define PHY_R1_PHY_LOS_BIAS GENMASK(23, 21)
+ #define PHY_R1_PHY_REF_CLKDIV2 BIT(24)
+ #define PHY_R1_PHY_MPLL_MULTIPLIER GENMASK(31, 25)
+
+#define PHY_R2 0x08
+ #define PHY_R2_PCS_TX_DEEMPH_GEN2_6DB GENMASK(5, 0)
+ #define PHY_R2_PCS_TX_DEEMPH_GEN2_3P5DB GENMASK(11, 6)
+ #define PHY_R2_PCS_TX_DEEMPH_GEN1 GENMASK(17, 12)
+ #define PHY_R2_PHY_TX_VBOOST_LVL GENMASK(20, 18)
+
+#define PHY_R4 0x10
+ #define PHY_R4_PHY_CR_WRITE BIT(0)
+ #define PHY_R4_PHY_CR_READ BIT(1)
+ #define PHY_R4_PHY_CR_DATA_IN GENMASK(17, 2)
+ #define PHY_R4_PHY_CR_CAP_DATA BIT(18)
+ #define PHY_R4_PHY_CR_CAP_ADDR BIT(19)
+
+#define PHY_R5 0x14
+ #define PHY_R5_PHY_CR_DATA_OUT GENMASK(15, 0)
+ #define PHY_R5_PHY_CR_ACK BIT(16)
+ #define PHY_R5_PHY_BS_OUT BIT(17)
+
+struct phy_g12a_usb3_pcie_priv {
+ struct regmap *regmap;
+ struct regmap *regmap_cr;
+ struct clk *clk_ref;
+ struct reset_control *reset;
+ struct phy *phy;
+ unsigned int mode;
+};
+
+static const struct regmap_config phy_g12a_usb3_pcie_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PHY_R5,
+};
+
+static int phy_g12a_usb3_pcie_cr_bus_addr(struct phy_g12a_usb3_pcie_priv *priv,
+ unsigned int addr)
+{
+ unsigned int val, reg;
+ int ret;
+
+ reg = FIELD_PREP(PHY_R4_PHY_CR_DATA_IN, addr);
+
+ regmap_write(priv->regmap, PHY_R4, reg);
+ regmap_write(priv->regmap, PHY_R4, reg);
+
+ regmap_write(priv->regmap, PHY_R4, reg | PHY_R4_PHY_CR_CAP_ADDR);
+
+ ret = regmap_read_poll_timeout(priv->regmap, PHY_R5, val,
+ (val & PHY_R5_PHY_CR_ACK),
+ 5, 1000);
+ if (ret)
+ return ret;
+
+ regmap_write(priv->regmap, PHY_R4, reg);
+
+ ret = regmap_read_poll_timeout(priv->regmap, PHY_R5, val,
+ !(val & PHY_R5_PHY_CR_ACK),
+ 5, 1000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_cr_bus_read(void *context, unsigned int addr,
+ unsigned int *data)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = context;
+ unsigned int val;
+ int ret;
+
+ ret = phy_g12a_usb3_pcie_cr_bus_addr(priv, addr);
+ if (ret)
+ return ret;
+
+ regmap_write(priv->regmap, PHY_R4, 0);
+ regmap_write(priv->regmap, PHY_R4, PHY_R4_PHY_CR_READ);
+
+ ret = regmap_read_poll_timeout(priv->regmap, PHY_R5, val,
+ (val & PHY_R5_PHY_CR_ACK),
+ 5, 1000);
+ if (ret)
+ return ret;
+
+ *data = FIELD_GET(PHY_R5_PHY_CR_DATA_OUT, val);
+
+ regmap_write(priv->regmap, PHY_R4, 0);
+
+ ret = regmap_read_poll_timeout(priv->regmap, PHY_R5, val,
+ !(val & PHY_R5_PHY_CR_ACK),
+ 5, 1000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_cr_bus_write(void *context, unsigned int addr,
+ unsigned int data)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = context;
+ unsigned int val, reg;
+ int ret;
+
+ ret = phy_g12a_usb3_pcie_cr_bus_addr(priv, addr);
+ if (ret)
+ return ret;
+
+ reg = FIELD_PREP(PHY_R4_PHY_CR_DATA_IN, data);
+
+ regmap_write(priv->regmap, PHY_R4, reg);
+ regmap_write(priv->regmap, PHY_R4, reg);
+
+ regmap_write(priv->regmap, PHY_R4, reg | PHY_R4_PHY_CR_CAP_DATA);
+
+ ret = regmap_read_poll_timeout(priv->regmap, PHY_R5, val,
+ (val & PHY_R5_PHY_CR_ACK),
+ 5, 1000);
+ if (ret)
+ return ret;
+
+ regmap_write(priv->regmap, PHY_R4, reg);
+
+ ret = regmap_read_poll_timeout(priv->regmap, PHY_R5, val,
+ (val & PHY_R5_PHY_CR_ACK) == 0,
+ 5, 1000);
+ if (ret)
+ return ret;
+
+ regmap_write(priv->regmap, PHY_R4, reg);
+
+ regmap_write(priv->regmap, PHY_R4, reg | PHY_R4_PHY_CR_WRITE);
+
+ ret = regmap_read_poll_timeout(priv->regmap, PHY_R5, val,
+ (val & PHY_R5_PHY_CR_ACK),
+ 5, 1000);
+ if (ret)
+ return ret;
+
+ regmap_write(priv->regmap, PHY_R4, reg);
+
+ ret = regmap_read_poll_timeout(priv->regmap, PHY_R5, val,
+ (val & PHY_R5_PHY_CR_ACK) == 0,
+ 5, 1000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct regmap_config phy_g12a_usb3_pcie_cr_regmap_conf = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_read = phy_g12a_usb3_pcie_cr_bus_read,
+ .reg_write = phy_g12a_usb3_pcie_cr_bus_write,
+ .max_register = 0xffff,
+ .fast_io = true,
+};
+
+static int phy_g12a_usb3_init(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+ int data, ret;
+
+ /* Switch PHY to USB3 */
+ /* TODO figure out how to handle when PCIe was set in the bootloader */
+ regmap_update_bits(priv->regmap, PHY_R0,
+ PHY_R0_PCIE_USB3_SWITCH,
+ PHY_R0_PCIE_USB3_SWITCH);
+
+ /*
+ * WORKAROUND: There is SSPHY suspend bug due to
+ * which USB enumerates
+ * in HS mode instead of SS mode. Workaround it by asserting
+ * LANE0.TX_ALT_BLOCK.EN_ALT_BUS to enable TX to use alt bus
+ * mode
+ */
+ ret = regmap_update_bits(priv->regmap_cr, 0x102d, BIT(7), BIT(7));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(priv->regmap_cr, 0x1010, 0xff0, 20);
+ if (ret)
+ return ret;
+
+ /*
+ * Fix RX Equalization setting as follows
+ * LANE0.RX_OVRD_IN_HI. RX_EQ_EN set to 0
+ * LANE0.RX_OVRD_IN_HI.RX_EQ_EN_OVRD set to 1
+ * LANE0.RX_OVRD_IN_HI.RX_EQ set to 3
+ * LANE0.RX_OVRD_IN_HI.RX_EQ_OVRD set to 1
+ */
+ ret = regmap_read(priv->regmap_cr, 0x1006, &data);
+ if (ret)
+ return ret;
+
+ data &= ~BIT(6);
+ data |= BIT(7);
+ data &= ~(0x7 << 8);
+ data |= (0x3 << 8);
+ data |= (1 << 11);
+ ret = regmap_write(priv->regmap_cr, 0x1006, data);
+ if (ret)
+ return ret;
+
+ /*
+ * Set EQ and TX launch amplitudes as follows
+ * LANE0.TX_OVRD_DRV_LO.PREEMPH set to 22
+ * LANE0.TX_OVRD_DRV_LO.AMPLITUDE set to 127
+ * LANE0.TX_OVRD_DRV_LO.EN set to 1.
+ */
+ ret = regmap_read(priv->regmap_cr, 0x1002, &data);
+ if (ret)
+ return ret;
+
+ data &= ~0x3f80;
+ data |= (0x16 << 7);
+ data &= ~0x7f;
+ data |= (0x7f | BIT(14));
+ ret = regmap_write(priv->regmap_cr, 0x1002, data);
+ if (ret)
+ return ret;
+
+ /* MPLL_LOOP_CTL.PROP_CNTRL = 8 */
+ ret = regmap_update_bits(priv->regmap_cr, 0x30, 0xf << 4, 8 << 4);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(priv->regmap, PHY_R2,
+ PHY_R2_PHY_TX_VBOOST_LVL,
+ FIELD_PREP(PHY_R2_PHY_TX_VBOOST_LVL, 0x4));
+
+ regmap_update_bits(priv->regmap, PHY_R1,
+ PHY_R1_PHY_LOS_BIAS | PHY_R1_PHY_LOS_LEVEL,
+ FIELD_PREP(PHY_R1_PHY_LOS_BIAS, 4) |
+ FIELD_PREP(PHY_R1_PHY_LOS_LEVEL, 9));
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_init(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
+
+ if (priv->mode == PHY_TYPE_USB3)
+ return phy_g12a_usb3_init(phy);
+
+ /* Power UP PCIE */
+ /* TODO figure out when the bootloader has set USB3 mode before */
+ regmap_update_bits(priv->regmap, PHY_R0,
+ PHY_R0_PCIE_POWER_STATE,
+ FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1c));
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_exit(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
+ return reset_control_reset(priv->reset);
+}
+
+static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = dev_get_drvdata(dev);
+ unsigned int mode;
+
+ if (args->args_count < 1) {
+ dev_err(dev, "invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mode = args->args[0];
+
+ if (mode != PHY_TYPE_USB3 && mode != PHY_TYPE_PCIE) {
+ dev_err(dev, "invalid phy mode select argument\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv->mode = mode;
+
+ return priv->phy;
+}
+
+static const struct phy_ops phy_g12a_usb3_pcie_ops = {
+ .init = phy_g12a_usb3_pcie_init,
+ .exit = phy_g12a_usb3_pcie_exit,
+ .owner = THIS_MODULE,
+};
+
+static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct phy_g12a_usb3_pcie_priv *priv;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, base,
+ &phy_g12a_usb3_pcie_regmap_conf);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->regmap_cr = devm_regmap_init(dev, NULL, priv,
+ &phy_g12a_usb3_pcie_cr_regmap_conf);
+ if (IS_ERR(priv->regmap_cr))
+ return PTR_ERR(priv->regmap_cr);
+
+ priv->clk_ref = devm_clk_get(dev, "ref_clk");
+ if (IS_ERR(priv->clk_ref))
+ return PTR_ERR(priv->clk_ref);
+
+ ret = clk_prepare_enable(priv->clk_ref);
+ if (ret)
+ goto err_disable_clk_ref;
+
+ priv->reset = devm_reset_control_array_get(dev, false, false);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
+
+ priv->phy = devm_phy_create(dev, np, &phy_g12a_usb3_pcie_ops);
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to create PHY\n");
+
+ return ret;
+ }
+
+ phy_set_drvdata(priv->phy, priv);
+ dev_set_drvdata(dev, priv);
+
+ phy_provider = devm_of_phy_provider_register(dev,
+ phy_g12a_usb3_pcie_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_disable_clk_ref:
+ clk_disable_unprepare(priv->clk_ref);
+
+ return ret;
+}
+
+static const struct of_device_id phy_g12a_usb3_pcie_of_match[] = {
+ { .compatible = "amlogic,g12a-usb3-pcie-phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_g12a_usb3_pcie_of_match);
+
+static struct platform_driver phy_g12a_usb3_pcie_driver = {
+ .probe = phy_g12a_usb3_pcie_probe,
+ .driver = {
+ .name = "phy-g12a-usb3-pcie",
+ .of_match_table = phy_g12a_usb3_pcie_of_match,
+ },
+};
+module_platform_driver(phy_g12a_usb3_pcie_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Amlogic G12A USB3 + PCIE Combo PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
index 148ef0bdb9c1..4cbee412f2b0 100644
--- a/drivers/phy/amlogic/phy-meson-gxl-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
@@ -261,14 +261,9 @@ static int phy_meson_gxl_usb2_probe(struct platform_device *pdev)
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
- priv->clk = devm_clk_get(dev, "phy");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- if (ret == -ENOENT)
- priv->clk = NULL;
- else
- return ret;
- }
+ priv->clk = devm_clk_get_optional(dev, "phy");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
priv->reset = devm_reset_control_get_optional_shared(dev, "phy");
if (IS_ERR(priv->reset))
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index aa917a61071d..f30f4819c3bb 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -10,6 +10,17 @@ config PHY_CYGNUS_PCIE
Enable this to support the Broadcom Cygnus PCIe PHY.
If unsure, say N.
+config PHY_BCM_SR_USB
+ tristate "Broadcom Stingray USB PHY driver"
+ depends on OF && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select GENERIC_PHY
+ default ARCH_BCM_IPROC
+ help
+ Enable this to support the Broadcom Stingray USB PHY
+ driver. It supports all versions of Superspeed and
+ Highspeed PHYs.
+ If unsure, say N.
+
config BCM_KONA_USB2_PHY
tristate "Broadcom Kona USB2 PHY Driver"
depends on HAS_IOMEM
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index 0f60184e6662..f453c7d3ffff 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_PHY_BRCM_USB) += phy-brcm-usb-dvr.o
phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o
obj-$(CONFIG_PHY_BCM_SR_PCIE) += phy-bcm-sr-pcie.o
+obj-$(CONFIG_PHY_BCM_SR_USB) += phy-bcm-sr-usb.o
diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c
new file mode 100644
index 000000000000..fe6c58910e4c
--- /dev/null
+++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Broadcom
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+enum bcm_usb_phy_version {
+ BCM_SR_USB_COMBO_PHY,
+ BCM_SR_USB_HS_PHY,
+};
+
+enum bcm_usb_phy_reg {
+ PLL_NDIV_FRAC,
+ PLL_NDIV_INT,
+ PLL_CTRL,
+ PHY_CTRL,
+ PHY_PLL_CTRL,
+};
+
+/* USB PHY registers */
+
+static const u8 bcm_usb_combo_phy_ss[] = {
+ [PLL_CTRL] = 0x18,
+ [PHY_CTRL] = 0x14,
+};
+
+static const u8 bcm_usb_combo_phy_hs[] = {
+ [PLL_NDIV_FRAC] = 0x04,
+ [PLL_NDIV_INT] = 0x08,
+ [PLL_CTRL] = 0x0c,
+ [PHY_CTRL] = 0x10,
+};
+
+#define HSPLL_NDIV_INT_VAL 0x13
+#define HSPLL_NDIV_FRAC_VAL 0x1005
+
+static const u8 bcm_usb_hs_phy[] = {
+ [PLL_NDIV_FRAC] = 0x0,
+ [PLL_NDIV_INT] = 0x4,
+ [PLL_CTRL] = 0x8,
+ [PHY_CTRL] = 0xc,
+};
+
+enum pll_ctrl_bits {
+ PLL_RESETB,
+ SSPLL_SUSPEND_EN,
+ PLL_SEQ_START,
+ PLL_LOCK,
+ PLL_PDIV,
+};
+
+static const u8 u3pll_ctrl[] = {
+ [PLL_RESETB] = 0,
+ [SSPLL_SUSPEND_EN] = 1,
+ [PLL_SEQ_START] = 2,
+ [PLL_LOCK] = 3,
+};
+
+#define HSPLL_PDIV_MASK 0xF
+#define HSPLL_PDIV_VAL 0x1
+
+static const u8 u2pll_ctrl[] = {
+ [PLL_PDIV] = 1,
+ [PLL_RESETB] = 5,
+ [PLL_LOCK] = 6,
+};
+
+enum bcm_usb_phy_ctrl_bits {
+ CORERDY,
+ AFE_LDO_PWRDWNB,
+ AFE_PLL_PWRDWNB,
+ AFE_BG_PWRDWNB,
+ PHY_ISO,
+ PHY_RESETB,
+ PHY_PCTL,
+};
+
+#define PHY_PCTL_MASK 0xffff
+/*
+ * 0x0806 of PCTL_VAL has below bits set
+ * BIT-8 : refclk divider 1
+ * BIT-3:2: device mode; mode is not effect
+ * BIT-1: soft reset active low
+ */
+#define HSPHY_PCTL_VAL 0x0806
+#define SSPHY_PCTL_VAL 0x0006
+
+static const u8 u3phy_ctrl[] = {
+ [PHY_RESETB] = 1,
+ [PHY_PCTL] = 2,
+};
+
+static const u8 u2phy_ctrl[] = {
+ [CORERDY] = 0,
+ [AFE_LDO_PWRDWNB] = 1,
+ [AFE_PLL_PWRDWNB] = 2,
+ [AFE_BG_PWRDWNB] = 3,
+ [PHY_ISO] = 4,
+ [PHY_RESETB] = 5,
+ [PHY_PCTL] = 6,
+};
+
+struct bcm_usb_phy_cfg {
+ uint32_t type;
+ uint32_t version;
+ void __iomem *regs;
+ struct phy *phy;
+ const u8 *offset;
+};
+
+#define PLL_LOCK_RETRY_COUNT 1000
+
+enum bcm_usb_phy_type {
+ USB_HS_PHY,
+ USB_SS_PHY,
+};
+
+#define NUM_BCM_SR_USB_COMBO_PHYS 2
+
+static inline void bcm_usb_reg32_clrbits(void __iomem *addr, uint32_t clear)
+{
+ writel(readl(addr) & ~clear, addr);
+}
+
+static inline void bcm_usb_reg32_setbits(void __iomem *addr, uint32_t set)
+{
+ writel(readl(addr) | set, addr);
+}
+
+static int bcm_usb_pll_lock_check(void __iomem *addr, u32 bit)
+{
+ int retry;
+ u32 rd_data;
+
+ retry = PLL_LOCK_RETRY_COUNT;
+ do {
+ rd_data = readl(addr);
+ if (rd_data & bit)
+ return 0;
+ udelay(1);
+ } while (--retry > 0);
+
+ pr_err("%s: FAIL\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static int bcm_usb_ss_phy_init(struct bcm_usb_phy_cfg *phy_cfg)
+{
+ int ret = 0;
+ void __iomem *regs = phy_cfg->regs;
+ const u8 *offset;
+ u32 rd_data;
+
+ offset = phy_cfg->offset;
+
+ /* Set pctl with mode and soft reset */
+ rd_data = readl(regs + offset[PHY_CTRL]);
+ rd_data &= ~(PHY_PCTL_MASK << u3phy_ctrl[PHY_PCTL]);
+ rd_data |= (SSPHY_PCTL_VAL << u3phy_ctrl[PHY_PCTL]);
+ writel(rd_data, regs + offset[PHY_CTRL]);
+
+ bcm_usb_reg32_clrbits(regs + offset[PLL_CTRL],
+ BIT(u3pll_ctrl[SSPLL_SUSPEND_EN]));
+ bcm_usb_reg32_setbits(regs + offset[PLL_CTRL],
+ BIT(u3pll_ctrl[PLL_SEQ_START]));
+ bcm_usb_reg32_setbits(regs + offset[PLL_CTRL],
+ BIT(u3pll_ctrl[PLL_RESETB]));
+
+ /* Maximum timeout for PLL reset done */
+ msleep(30);
+
+ ret = bcm_usb_pll_lock_check(regs + offset[PLL_CTRL],
+ BIT(u3pll_ctrl[PLL_LOCK]));
+
+ return ret;
+}
+
+static int bcm_usb_hs_phy_init(struct bcm_usb_phy_cfg *phy_cfg)
+{
+ int ret = 0;
+ void __iomem *regs = phy_cfg->regs;
+ const u8 *offset;
+ u32 rd_data;
+
+ offset = phy_cfg->offset;
+
+ writel(HSPLL_NDIV_INT_VAL, regs + offset[PLL_NDIV_INT]);
+ writel(HSPLL_NDIV_FRAC_VAL, regs + offset[PLL_NDIV_FRAC]);
+
+ rd_data = readl(regs + offset[PLL_CTRL]);
+ rd_data &= ~(HSPLL_PDIV_MASK << u2pll_ctrl[PLL_PDIV]);
+ rd_data |= (HSPLL_PDIV_VAL << u2pll_ctrl[PLL_PDIV]);
+ writel(rd_data, regs + offset[PLL_CTRL]);
+
+ /* Set Core Ready high */
+ bcm_usb_reg32_setbits(regs + offset[PHY_CTRL],
+ BIT(u2phy_ctrl[CORERDY]));
+
+ /* Maximum timeout for Core Ready done */
+ msleep(30);
+
+ bcm_usb_reg32_setbits(regs + offset[PLL_CTRL],
+ BIT(u2pll_ctrl[PLL_RESETB]));
+ bcm_usb_reg32_setbits(regs + offset[PHY_CTRL],
+ BIT(u2phy_ctrl[PHY_RESETB]));
+
+
+ rd_data = readl(regs + offset[PHY_CTRL]);
+ rd_data &= ~(PHY_PCTL_MASK << u2phy_ctrl[PHY_PCTL]);
+ rd_data |= (HSPHY_PCTL_VAL << u2phy_ctrl[PHY_PCTL]);
+ writel(rd_data, regs + offset[PHY_CTRL]);
+
+ /* Maximum timeout for PLL reset done */
+ msleep(30);
+
+ ret = bcm_usb_pll_lock_check(regs + offset[PLL_CTRL],
+ BIT(u2pll_ctrl[PLL_LOCK]));
+
+ return ret;
+}
+
+static int bcm_usb_phy_reset(struct phy *phy)
+{
+ struct bcm_usb_phy_cfg *phy_cfg = phy_get_drvdata(phy);
+ void __iomem *regs = phy_cfg->regs;
+ const u8 *offset;
+
+ offset = phy_cfg->offset;
+
+ if (phy_cfg->type == USB_HS_PHY) {
+ bcm_usb_reg32_clrbits(regs + offset[PHY_CTRL],
+ BIT(u2phy_ctrl[CORERDY]));
+ bcm_usb_reg32_setbits(regs + offset[PHY_CTRL],
+ BIT(u2phy_ctrl[CORERDY]));
+ }
+
+ return 0;
+}
+
+static int bcm_usb_phy_init(struct phy *phy)
+{
+ struct bcm_usb_phy_cfg *phy_cfg = phy_get_drvdata(phy);
+ int ret = -EINVAL;
+
+ if (phy_cfg->type == USB_SS_PHY)
+ ret = bcm_usb_ss_phy_init(phy_cfg);
+ else if (phy_cfg->type == USB_HS_PHY)
+ ret = bcm_usb_hs_phy_init(phy_cfg);
+
+ return ret;
+}
+
+static struct phy_ops sr_phy_ops = {
+ .init = bcm_usb_phy_init,
+ .reset = bcm_usb_phy_reset,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *bcm_usb_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct bcm_usb_phy_cfg *phy_cfg;
+ int phy_idx;
+
+ phy_cfg = dev_get_drvdata(dev);
+ if (!phy_cfg)
+ return ERR_PTR(-EINVAL);
+
+ if (phy_cfg->version == BCM_SR_USB_COMBO_PHY) {
+ phy_idx = args->args[0];
+
+ if (WARN_ON(phy_idx > 1))
+ return ERR_PTR(-ENODEV);
+
+ return phy_cfg[phy_idx].phy;
+ } else
+ return phy_cfg->phy;
+}
+
+static int bcm_usb_phy_create(struct device *dev, struct device_node *node,
+ void __iomem *regs, uint32_t version)
+{
+ struct bcm_usb_phy_cfg *phy_cfg;
+ int idx;
+
+ if (version == BCM_SR_USB_COMBO_PHY) {
+ phy_cfg = devm_kzalloc(dev, NUM_BCM_SR_USB_COMBO_PHYS *
+ sizeof(struct bcm_usb_phy_cfg),
+ GFP_KERNEL);
+ if (!phy_cfg)
+ return -ENOMEM;
+
+ for (idx = 0; idx < NUM_BCM_SR_USB_COMBO_PHYS; idx++) {
+ phy_cfg[idx].regs = regs;
+ phy_cfg[idx].version = version;
+ if (idx == 0) {
+ phy_cfg[idx].offset = bcm_usb_combo_phy_hs;
+ phy_cfg[idx].type = USB_HS_PHY;
+ } else {
+ phy_cfg[idx].offset = bcm_usb_combo_phy_ss;
+ phy_cfg[idx].type = USB_SS_PHY;
+ }
+ phy_cfg[idx].phy = devm_phy_create(dev, node,
+ &sr_phy_ops);
+ if (IS_ERR(phy_cfg[idx].phy))
+ return PTR_ERR(phy_cfg[idx].phy);
+
+ phy_set_drvdata(phy_cfg[idx].phy, &phy_cfg[idx]);
+ }
+ } else if (version == BCM_SR_USB_HS_PHY) {
+ phy_cfg = devm_kzalloc(dev, sizeof(struct bcm_usb_phy_cfg),
+ GFP_KERNEL);
+ if (!phy_cfg)
+ return -ENOMEM;
+
+ phy_cfg->regs = regs;
+ phy_cfg->version = version;
+ phy_cfg->offset = bcm_usb_hs_phy;
+ phy_cfg->type = USB_HS_PHY;
+ phy_cfg->phy = devm_phy_create(dev, node, &sr_phy_ops);
+ if (IS_ERR(phy_cfg->phy))
+ return PTR_ERR(phy_cfg->phy);
+
+ phy_set_drvdata(phy_cfg->phy, phy_cfg);
+ } else
+ return -ENODEV;
+
+ dev_set_drvdata(dev, phy_cfg);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_usb_phy_of_match[] = {
+ {
+ .compatible = "brcm,sr-usb-combo-phy",
+ .data = (void *)BCM_SR_USB_COMBO_PHY,
+ },
+ {
+ .compatible = "brcm,sr-usb-hs-phy",
+ .data = (void *)BCM_SR_USB_HS_PHY,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm_usb_phy_of_match);
+
+static int bcm_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ const struct of_device_id *of_id;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+ enum bcm_usb_phy_version version;
+ struct phy_provider *phy_provider;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ of_id = of_match_node(bcm_usb_phy_of_match, dn);
+ if (of_id)
+ version = (enum bcm_usb_phy_version)of_id->data;
+ else
+ return -ENODEV;
+
+ ret = bcm_usb_phy_create(dev, dn, regs, version);
+ if (ret)
+ return ret;
+
+ phy_provider = devm_of_phy_provider_register(dev, bcm_usb_phy_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver bcm_usb_phy_driver = {
+ .driver = {
+ .name = "phy-bcm-sr-usb",
+ .of_match_table = bcm_usb_phy_of_match,
+ },
+ .probe = bcm_usb_phy_probe,
+};
+module_platform_driver(bcm_usb_phy_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom stingray USB Phy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index d6ea5ce8afa5..0c4833da7be0 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#define PHY_CTRL0 0x0
#define PHY_CTRL0_REF_SSP_EN BIT(2)
@@ -24,6 +25,7 @@ struct imx8mq_usb_phy {
struct phy *phy;
struct clk *clk;
void __iomem *base;
+ struct regulator *vbus;
};
static int imx8mq_usb_phy_init(struct phy *phy)
@@ -55,6 +57,11 @@ static int imx8mq_usb_phy_init(struct phy *phy)
static int imx8mq_phy_power_on(struct phy *phy)
{
struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = regulator_enable(imx_phy->vbus);
+ if (ret)
+ return ret;
return clk_prepare_enable(imx_phy->clk);
}
@@ -64,6 +71,7 @@ static int imx8mq_phy_power_off(struct phy *phy)
struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy);
clk_disable_unprepare(imx_phy->clk);
+ regulator_disable(imx_phy->vbus);
return 0;
}
@@ -101,6 +109,10 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
if (IS_ERR(imx_phy->phy))
return PTR_ERR(imx_phy->phy);
+ imx_phy->vbus = devm_regulator_get(dev, "vbus");
+ if (IS_ERR(imx_phy->vbus))
+ return PTR_ERR(imx_phy->vbus);
+
phy_set_drvdata(imx_phy->phy, imx_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
diff --git a/drivers/phy/hisilicon/Kconfig b/drivers/phy/hisilicon/Kconfig
index b40ee54a1a50..3c142f08987c 100644
--- a/drivers/phy/hisilicon/Kconfig
+++ b/drivers/phy/hisilicon/Kconfig
@@ -12,6 +12,16 @@ config PHY_HI6220_USB
To compile this driver as a module, choose M here.
+config PHY_HI3660_USB
+ tristate "hi3660 USB PHY support"
+ depends on (ARCH_HISI && ARM64) || COMPILE_TEST
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the HISILICON HI3660 USB PHY.
+
+ To compile this driver as a module, choose M here.
+
config PHY_HISTB_COMBPHY
tristate "HiSilicon STB SoCs COMBPHY support"
depends on (ARCH_HISI && ARM64) || COMPILE_TEST
diff --git a/drivers/phy/hisilicon/Makefile b/drivers/phy/hisilicon/Makefile
index f662a4fe18d8..75ba64e2faf8 100644
--- a/drivers/phy/hisilicon/Makefile
+++ b/drivers/phy/hisilicon/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_PHY_HI6220_USB) += phy-hi6220-usb.o
+obj-$(CONFIG_PHY_HI3660_USB) += phy-hi3660-usb3.o
obj-$(CONFIG_PHY_HISTB_COMBPHY) += phy-histb-combphy.o
obj-$(CONFIG_PHY_HISI_INNO_USB2) += phy-hisi-inno-usb2.o
obj-$(CONFIG_PHY_HIX5HD2_SATA) += phy-hix5hd2-sata.o
diff --git a/drivers/phy/hisilicon/phy-hi3660-usb3.c b/drivers/phy/hisilicon/phy-hi3660-usb3.c
new file mode 100644
index 000000000000..cc0af2c044d0
--- /dev/null
+++ b/drivers/phy/hisilicon/phy-hi3660-usb3.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Phy provider for USB 3.0 controller on HiSilicon 3660 platform
+ *
+ * Copyright (C) 2017-2018 Hilisicon Electronics Co., Ltd.
+ * http://www.huawei.com
+ *
+ * Authors: Yu Chen <chenyu56@huawei.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define PERI_CRG_CLK_EN4 0x40
+#define PERI_CRG_CLK_DIS4 0x44
+#define GT_CLK_USB3OTG_REF BIT(0)
+#define GT_ACLK_USB3OTG BIT(1)
+
+#define PERI_CRG_RSTEN4 0x90
+#define PERI_CRG_RSTDIS4 0x94
+#define IP_RST_USB3OTGPHY_POR BIT(3)
+#define IP_RST_USB3OTG BIT(5)
+
+#define PERI_CRG_ISODIS 0x148
+#define USB_REFCLK_ISO_EN BIT(25)
+
+#define PCTRL_PERI_CTRL3 0x10
+#define PCTRL_PERI_CTRL3_MSK_START 16
+#define USB_TCXO_EN BIT(1)
+
+#define PCTRL_PERI_CTRL24 0x64
+#define SC_CLK_USB3PHY_3MUX1_SEL BIT(25)
+
+#define USBOTG3_CTRL0 0x00
+#define SC_USB3PHY_ABB_GT_EN BIT(15)
+
+#define USBOTG3_CTRL2 0x08
+#define USBOTG3CTRL2_POWERDOWN_HSP BIT(0)
+#define USBOTG3CTRL2_POWERDOWN_SSP BIT(1)
+
+#define USBOTG3_CTRL3 0x0C
+#define USBOTG3_CTRL3_VBUSVLDEXT BIT(6)
+#define USBOTG3_CTRL3_VBUSVLDEXTSEL BIT(5)
+
+#define USBOTG3_CTRL4 0x10
+
+#define USBOTG3_CTRL7 0x1c
+#define REF_SSP_EN BIT(16)
+
+/* This value config the default txtune parameter of the usb 2.0 phy */
+#define HI3660_USB_DEFAULT_PHY_PARAM 0x1c466e3
+
+struct hi3660_priv {
+ struct device *dev;
+ struct regmap *peri_crg;
+ struct regmap *pctrl;
+ struct regmap *otg_bc;
+ u32 eye_diagram_param;
+};
+
+static int hi3660_phy_init(struct phy *phy)
+{
+ struct hi3660_priv *priv = phy_get_drvdata(phy);
+ u32 val, mask;
+ int ret;
+
+ /* usb refclk iso disable */
+ ret = regmap_write(priv->peri_crg, PERI_CRG_ISODIS, USB_REFCLK_ISO_EN);
+ if (ret)
+ goto out;
+
+ /* enable usb_tcxo_en */
+ val = USB_TCXO_EN | (USB_TCXO_EN << PCTRL_PERI_CTRL3_MSK_START);
+ ret = regmap_write(priv->pctrl, PCTRL_PERI_CTRL3, val);
+ if (ret)
+ goto out;
+
+ /* assert phy */
+ val = IP_RST_USB3OTGPHY_POR | IP_RST_USB3OTG;
+ ret = regmap_write(priv->peri_crg, PERI_CRG_RSTEN4, val);
+ if (ret)
+ goto out;
+
+ /* enable phy ref clk */
+ val = SC_USB3PHY_ABB_GT_EN;
+ mask = val;
+ ret = regmap_update_bits(priv->otg_bc, USBOTG3_CTRL0, mask, val);
+ if (ret)
+ goto out;
+
+ val = REF_SSP_EN;
+ mask = val;
+ ret = regmap_update_bits(priv->otg_bc, USBOTG3_CTRL7, mask, val);
+ if (ret)
+ goto out;
+
+ /* exit from IDDQ mode */
+ mask = USBOTG3CTRL2_POWERDOWN_HSP | USBOTG3CTRL2_POWERDOWN_SSP;
+ ret = regmap_update_bits(priv->otg_bc, USBOTG3_CTRL2, mask, 0);
+ if (ret)
+ goto out;
+
+ /* delay for exit from IDDQ mode */
+ usleep_range(100, 120);
+
+ /* deassert phy */
+ val = IP_RST_USB3OTGPHY_POR | IP_RST_USB3OTG;
+ ret = regmap_write(priv->peri_crg, PERI_CRG_RSTDIS4, val);
+ if (ret)
+ goto out;
+
+ /* delay for phy deasserted */
+ usleep_range(10000, 15000);
+
+ /* fake vbus valid signal */
+ val = USBOTG3_CTRL3_VBUSVLDEXT | USBOTG3_CTRL3_VBUSVLDEXTSEL;
+ mask = val;
+ ret = regmap_update_bits(priv->otg_bc, USBOTG3_CTRL3, mask, val);
+ if (ret)
+ goto out;
+
+ /* delay for vbus valid */
+ usleep_range(100, 120);
+
+ ret = regmap_write(priv->otg_bc, USBOTG3_CTRL4,
+ priv->eye_diagram_param);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ dev_err(priv->dev, "failed to init phy ret: %d\n", ret);
+ return ret;
+}
+
+static int hi3660_phy_exit(struct phy *phy)
+{
+ struct hi3660_priv *priv = phy_get_drvdata(phy);
+ u32 val;
+ int ret;
+
+ /* assert phy */
+ val = IP_RST_USB3OTGPHY_POR;
+ ret = regmap_write(priv->peri_crg, PERI_CRG_RSTEN4, val);
+ if (ret)
+ goto out;
+
+ /* disable usb_tcxo_en */
+ val = USB_TCXO_EN << PCTRL_PERI_CTRL3_MSK_START;
+ ret = regmap_write(priv->pctrl, PCTRL_PERI_CTRL3, val);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ dev_err(priv->dev, "failed to exit phy ret: %d\n", ret);
+ return ret;
+}
+
+static struct phy_ops hi3660_phy_ops = {
+ .init = hi3660_phy_init,
+ .exit = hi3660_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int hi3660_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct phy *phy;
+ struct hi3660_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->peri_crg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "hisilicon,pericrg-syscon");
+ if (IS_ERR(priv->peri_crg)) {
+ dev_err(dev, "no hisilicon,pericrg-syscon\n");
+ return PTR_ERR(priv->peri_crg);
+ }
+
+ priv->pctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "hisilicon,pctrl-syscon");
+ if (IS_ERR(priv->pctrl)) {
+ dev_err(dev, "no hisilicon,pctrl-syscon\n");
+ return PTR_ERR(priv->pctrl);
+ }
+
+ /* node of hi3660 phy is a sub-node of usb3_otg_bc */
+ priv->otg_bc = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->otg_bc)) {
+ dev_err(dev, "no hisilicon,usb3-otg-bc-syscon\n");
+ return PTR_ERR(priv->otg_bc);
+ }
+
+ if (of_property_read_u32(dev->of_node, "hisilicon,eye-diagram-param",
+ &(priv->eye_diagram_param)))
+ priv->eye_diagram_param = HI3660_USB_DEFAULT_PHY_PARAM;
+
+ phy = devm_phy_create(dev, NULL, &hi3660_phy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id hi3660_phy_of_match[] = {
+ {.compatible = "hisilicon,hi3660-usb-phy",},
+ { }
+};
+MODULE_DEVICE_TABLE(of, hi3660_phy_of_match);
+
+static struct platform_driver hi3660_phy_driver = {
+ .probe = hi3660_phy_probe,
+ .driver = {
+ .name = "hi3660-usb-phy",
+ .of_match_table = hi3660_phy_of_match,
+ }
+};
+module_platform_driver(hi3660_phy_driver);
+
+MODULE_AUTHOR("Yu Chen <chenyu56@huawei.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hilisicon Hi3660 USB3 PHY Driver");
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
index 94a29dea57af..ded900b06f5a 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -266,7 +266,6 @@ static struct platform_driver mvebu_a3700_utmi_driver = {
.probe = mvebu_a3700_utmi_phy_probe,
.driver = {
.name = "mvebu-a3700-utmi-phy",
- .owner = THIS_MODULE,
.of_match_table = mvebu_a3700_utmi_of_match,
},
};
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 8857d00b3c65..b5a89dbd3fe7 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -13,6 +13,16 @@ config PHY_MTK_TPHY
multi-ports is first version, otherwise is second veriosn,
so you can easily distinguish them by banks layout.
+config PHY_MTK_UFS
+ tristate "MediaTek UFS M-PHY driver"
+ depends on ARCH_MEDIATEK && OF
+ select GENERIC_PHY
+ help
+ Support for UFS M-PHY on MediaTek chipsets.
+ Enable this to provide vendor-specific probing,
+ initialization, power on and power off flow of
+ specified M-PHYs.
+
config PHY_MTK_XSPHY
tristate "MediaTek XS-PHY Driver"
depends on ARCH_MEDIATEK && OF
diff --git a/drivers/phy/mediatek/Makefile b/drivers/phy/mediatek/Makefile
index ee49edc97ee9..08a8e6a97b1e 100644
--- a/drivers/phy/mediatek/Makefile
+++ b/drivers/phy/mediatek/Makefile
@@ -4,4 +4,5 @@
#
obj-$(CONFIG_PHY_MTK_TPHY) += phy-mtk-tphy.o
+obj-$(CONFIG_PHY_MTK_UFS) += phy-mtk-ufs.o
obj-$(CONFIG_PHY_MTK_XSPHY) += phy-mtk-xsphy.o
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 5b6a470ca145..cb2ed3b25068 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -1103,13 +1103,9 @@ static int mtk_tphy_probe(struct platform_device *pdev)
}
/* it's deprecated, make it optional for backward compatibility */
- tphy->u3phya_ref = devm_clk_get(dev, "u3phya_ref");
- if (IS_ERR(tphy->u3phya_ref)) {
- if (PTR_ERR(tphy->u3phya_ref) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- tphy->u3phya_ref = NULL;
- }
+ tphy->u3phya_ref = devm_clk_get_optional(dev, "u3phya_ref");
+ if (IS_ERR(tphy->u3phya_ref))
+ return PTR_ERR(tphy->u3phya_ref);
tphy->src_ref_clk = U3P_REF_CLK;
tphy->src_coef = U3P_SLEW_RATE_COEF;
diff --git a/drivers/phy/mediatek/phy-mtk-ufs.c b/drivers/phy/mediatek/phy-mtk-ufs.c
new file mode 100644
index 000000000000..cf94f5c35dc5
--- /dev/null
+++ b/drivers/phy/mediatek/phy-mtk-ufs.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Stanley Chu <stanley.chu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+/* mphy register and offsets */
+#define MP_GLB_DIG_8C 0x008C
+#define FRC_PLL_ISO_EN BIT(8)
+#define PLL_ISO_EN BIT(9)
+#define FRC_FRC_PWR_ON BIT(10)
+#define PLL_PWR_ON BIT(11)
+
+#define MP_LN_DIG_RX_9C 0xA09C
+#define FSM_DIFZ_FRC BIT(18)
+
+#define MP_LN_DIG_RX_AC 0xA0AC
+#define FRC_RX_SQ_EN BIT(0)
+#define RX_SQ_EN BIT(1)
+
+#define MP_LN_RX_44 0xB044
+#define FRC_CDR_PWR_ON BIT(17)
+#define CDR_PWR_ON BIT(18)
+#define FRC_CDR_ISO_EN BIT(19)
+#define CDR_ISO_EN BIT(20)
+
+struct ufs_mtk_phy {
+ struct device *dev;
+ void __iomem *mmio;
+ struct clk *mp_clk;
+ struct clk *unipro_clk;
+};
+
+static inline u32 mphy_readl(struct ufs_mtk_phy *phy, u32 reg)
+{
+ return readl(phy->mmio + reg);
+}
+
+static inline void mphy_writel(struct ufs_mtk_phy *phy, u32 val, u32 reg)
+{
+ writel(val, phy->mmio + reg);
+}
+
+static void mphy_set_bit(struct ufs_mtk_phy *phy, u32 reg, u32 bit)
+{
+ u32 val;
+
+ val = mphy_readl(phy, reg);
+ val |= bit;
+ mphy_writel(phy, val, reg);
+}
+
+static void mphy_clr_bit(struct ufs_mtk_phy *phy, u32 reg, u32 bit)
+{
+ u32 val;
+
+ val = mphy_readl(phy, reg);
+ val &= ~bit;
+ mphy_writel(phy, val, reg);
+}
+
+static struct ufs_mtk_phy *get_ufs_mtk_phy(struct phy *generic_phy)
+{
+ return (struct ufs_mtk_phy *)phy_get_drvdata(generic_phy);
+}
+
+static int ufs_mtk_phy_clk_init(struct ufs_mtk_phy *phy)
+{
+ struct device *dev = phy->dev;
+
+ phy->unipro_clk = devm_clk_get(dev, "unipro");
+ if (IS_ERR(phy->unipro_clk)) {
+ dev_err(dev, "failed to get clock: unipro");
+ return PTR_ERR(phy->unipro_clk);
+ }
+
+ phy->mp_clk = devm_clk_get(dev, "mp");
+ if (IS_ERR(phy->mp_clk)) {
+ dev_err(dev, "failed to get clock: mp");
+ return PTR_ERR(phy->mp_clk);
+ }
+
+ return 0;
+}
+
+static void ufs_mtk_phy_set_active(struct ufs_mtk_phy *phy)
+{
+ /* release DA_MP_PLL_PWR_ON */
+ mphy_set_bit(phy, MP_GLB_DIG_8C, PLL_PWR_ON);
+ mphy_clr_bit(phy, MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
+
+ /* release DA_MP_PLL_ISO_EN */
+ mphy_clr_bit(phy, MP_GLB_DIG_8C, PLL_ISO_EN);
+ mphy_clr_bit(phy, MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
+
+ /* release DA_MP_CDR_PWR_ON */
+ mphy_set_bit(phy, MP_LN_RX_44, CDR_PWR_ON);
+ mphy_clr_bit(phy, MP_LN_RX_44, FRC_CDR_PWR_ON);
+
+ /* release DA_MP_CDR_ISO_EN */
+ mphy_clr_bit(phy, MP_LN_RX_44, CDR_ISO_EN);
+ mphy_clr_bit(phy, MP_LN_RX_44, FRC_CDR_ISO_EN);
+
+ /* release DA_MP_RX0_SQ_EN */
+ mphy_set_bit(phy, MP_LN_DIG_RX_AC, RX_SQ_EN);
+ mphy_clr_bit(phy, MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
+
+ /* delay 1us to wait DIFZ stable */
+ udelay(1);
+
+ /* release DIFZ */
+ mphy_clr_bit(phy, MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
+}
+
+static void ufs_mtk_phy_set_deep_hibern(struct ufs_mtk_phy *phy)
+{
+ /* force DIFZ */
+ mphy_set_bit(phy, MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
+
+ /* force DA_MP_RX0_SQ_EN */
+ mphy_set_bit(phy, MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
+ mphy_clr_bit(phy, MP_LN_DIG_RX_AC, RX_SQ_EN);
+
+ /* force DA_MP_CDR_ISO_EN */
+ mphy_set_bit(phy, MP_LN_RX_44, FRC_CDR_ISO_EN);
+ mphy_set_bit(phy, MP_LN_RX_44, CDR_ISO_EN);
+
+ /* force DA_MP_CDR_PWR_ON */
+ mphy_set_bit(phy, MP_LN_RX_44, FRC_CDR_PWR_ON);
+ mphy_clr_bit(phy, MP_LN_RX_44, CDR_PWR_ON);
+
+ /* force DA_MP_PLL_ISO_EN */
+ mphy_set_bit(phy, MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
+ mphy_set_bit(phy, MP_GLB_DIG_8C, PLL_ISO_EN);
+
+ /* force DA_MP_PLL_PWR_ON */
+ mphy_set_bit(phy, MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
+ mphy_clr_bit(phy, MP_GLB_DIG_8C, PLL_PWR_ON);
+}
+
+static int ufs_mtk_phy_power_on(struct phy *generic_phy)
+{
+ struct ufs_mtk_phy *phy = get_ufs_mtk_phy(generic_phy);
+ int ret;
+
+ ret = clk_prepare_enable(phy->unipro_clk);
+ if (ret) {
+ dev_err(phy->dev, "unipro_clk enable failed %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(phy->mp_clk);
+ if (ret) {
+ dev_err(phy->dev, "mp_clk enable failed %d\n", ret);
+ goto out_unprepare_unipro_clk;
+ }
+
+ ufs_mtk_phy_set_active(phy);
+
+ return 0;
+
+out_unprepare_unipro_clk:
+ clk_disable_unprepare(phy->unipro_clk);
+out:
+ return ret;
+}
+
+static int ufs_mtk_phy_power_off(struct phy *generic_phy)
+{
+ struct ufs_mtk_phy *phy = get_ufs_mtk_phy(generic_phy);
+
+ ufs_mtk_phy_set_deep_hibern(phy);
+
+ clk_disable_unprepare(phy->unipro_clk);
+ clk_disable_unprepare(phy->mp_clk);
+
+ return 0;
+}
+
+static const struct phy_ops ufs_mtk_phy_ops = {
+ .power_on = ufs_mtk_phy_power_on,
+ .power_off = ufs_mtk_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int ufs_mtk_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct ufs_mtk_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->mmio))
+ return PTR_ERR(phy->mmio);
+
+ phy->dev = dev;
+
+ ret = ufs_mtk_phy_clk_init(phy);
+ if (ret)
+ return ret;
+
+ generic_phy = devm_phy_create(dev, NULL, &ufs_mtk_phy_ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id ufs_mtk_phy_of_match[] = {
+ {.compatible = "mediatek,mt8183-ufsphy"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ufs_mtk_phy_of_match);
+
+static struct platform_driver ufs_mtk_phy_driver = {
+ .probe = ufs_mtk_phy_probe,
+ .driver = {
+ .of_match_table = ufs_mtk_phy_of_match,
+ .name = "ufs_mtk_phy",
+ },
+};
+module_platform_driver(ufs_mtk_phy_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) MediaTek MPHY");
+MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/motorola/Kconfig b/drivers/phy/motorola/Kconfig
index 82651524ffb9..718f8729701d 100644
--- a/drivers/phy/motorola/Kconfig
+++ b/drivers/phy/motorola/Kconfig
@@ -13,7 +13,7 @@ config PHY_CPCAP_USB
config PHY_MAPPHONE_MDM6600
tristate "Motorola Mapphone MDM6600 modem USB PHY driver"
- depends on OF && USB_SUPPORT
+ depends on OF && USB_SUPPORT && GPIOLIB
select GENERIC_PHY
help
Enable this for MDM6600 USB modem to work on Motorola phones
diff --git a/drivers/phy/mscc/phy-ocelot-serdes.c b/drivers/phy/mscc/phy-ocelot-serdes.c
index 77c46f639fbf..76f596365176 100644
--- a/drivers/phy/mscc/phy-ocelot-serdes.c
+++ b/drivers/phy/mscc/phy-ocelot-serdes.c
@@ -31,6 +31,238 @@ struct serdes_macro {
struct serdes_ctrl *ctrl;
};
+#define MCB_S6G_CFG_TIMEOUT 50
+
+static int __serdes_write_mcb_s6g(struct regmap *regmap, u8 macro, u32 op)
+{
+ unsigned int regval = 0;
+
+ regmap_write(regmap, HSIO_MCB_S6G_ADDR_CFG, op |
+ HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(BIT(macro)));
+
+ return regmap_read_poll_timeout(regmap, HSIO_MCB_S6G_ADDR_CFG, regval,
+ (regval & op) != op, 100,
+ MCB_S6G_CFG_TIMEOUT * 1000);
+}
+
+static int serdes_commit_mcb_s6g(struct regmap *regmap, u8 macro)
+{
+ return __serdes_write_mcb_s6g(regmap, macro,
+ HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT);
+}
+
+static int serdes_update_mcb_s6g(struct regmap *regmap, u8 macro)
+{
+ return __serdes_write_mcb_s6g(regmap, macro,
+ HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT);
+}
+
+static int serdes_init_s6g(struct regmap *regmap, u8 serdes, int mode)
+{
+ u32 pll_fsm_ctrl_data;
+ u32 ob_ena1v_mode;
+ u32 des_bw_ana;
+ u32 ob_ena_cas;
+ u32 if_mode;
+ u32 ob_lev;
+ u32 qrate;
+ int ret;
+
+ if (mode == PHY_INTERFACE_MODE_QSGMII) {
+ pll_fsm_ctrl_data = 120;
+ ob_ena1v_mode = 0;
+ ob_ena_cas = 0;
+ des_bw_ana = 5;
+ ob_lev = 24;
+ if_mode = 3;
+ qrate = 0;
+ } else {
+ pll_fsm_ctrl_data = 60;
+ ob_ena1v_mode = 1;
+ ob_ena_cas = 2;
+ des_bw_ana = 3;
+ ob_lev = 48;
+ if_mode = 1;
+ qrate = 1;
+ }
+
+ ret = serdes_update_mcb_s6g(regmap, serdes);
+ if (ret)
+ return ret;
+
+ /* Test pattern */
+
+ regmap_update_bits(regmap, HSIO_S6G_COMMON_CFG,
+ HSIO_S6G_COMMON_CFG_SYS_RST, 0);
+
+ regmap_update_bits(regmap, HSIO_S6G_PLL_CFG,
+ HSIO_S6G_PLL_CFG_PLL_FSM_ENA, 0);
+
+ regmap_update_bits(regmap, HSIO_S6G_IB_CFG,
+ HSIO_S6G_IB_CFG_IB_SIG_DET_ENA |
+ HSIO_S6G_IB_CFG_IB_REG_ENA |
+ HSIO_S6G_IB_CFG_IB_SAM_ENA |
+ HSIO_S6G_IB_CFG_IB_EQZ_ENA |
+ HSIO_S6G_IB_CFG_IB_CONCUR |
+ HSIO_S6G_IB_CFG_IB_CAL_ENA,
+ HSIO_S6G_IB_CFG_IB_SIG_DET_ENA |
+ HSIO_S6G_IB_CFG_IB_REG_ENA |
+ HSIO_S6G_IB_CFG_IB_SAM_ENA |
+ HSIO_S6G_IB_CFG_IB_EQZ_ENA |
+ HSIO_S6G_IB_CFG_IB_CONCUR);
+
+ regmap_update_bits(regmap, HSIO_S6G_IB_CFG1,
+ HSIO_S6G_IB_CFG1_IB_FRC_OFFSET |
+ HSIO_S6G_IB_CFG1_IB_FRC_LP |
+ HSIO_S6G_IB_CFG1_IB_FRC_MID |
+ HSIO_S6G_IB_CFG1_IB_FRC_HP |
+ HSIO_S6G_IB_CFG1_IB_FILT_OFFSET |
+ HSIO_S6G_IB_CFG1_IB_FILT_LP |
+ HSIO_S6G_IB_CFG1_IB_FILT_MID |
+ HSIO_S6G_IB_CFG1_IB_FILT_HP,
+ HSIO_S6G_IB_CFG1_IB_FILT_OFFSET |
+ HSIO_S6G_IB_CFG1_IB_FILT_HP |
+ HSIO_S6G_IB_CFG1_IB_FILT_LP |
+ HSIO_S6G_IB_CFG1_IB_FILT_MID);
+
+ regmap_update_bits(regmap, HSIO_S6G_IB_CFG2,
+ HSIO_S6G_IB_CFG2_IB_UREG_M,
+ HSIO_S6G_IB_CFG2_IB_UREG(4));
+
+ regmap_update_bits(regmap, HSIO_S6G_IB_CFG3,
+ HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M |
+ HSIO_S6G_IB_CFG3_IB_INI_LP_M |
+ HSIO_S6G_IB_CFG3_IB_INI_MID_M |
+ HSIO_S6G_IB_CFG3_IB_INI_HP_M,
+ HSIO_S6G_IB_CFG3_IB_INI_OFFSET(31) |
+ HSIO_S6G_IB_CFG3_IB_INI_LP(1) |
+ HSIO_S6G_IB_CFG3_IB_INI_MID(31) |
+ HSIO_S6G_IB_CFG3_IB_INI_HP(0));
+
+ regmap_update_bits(regmap, HSIO_S6G_MISC_CFG,
+ HSIO_S6G_MISC_CFG_LANE_RST,
+ HSIO_S6G_MISC_CFG_LANE_RST);
+
+ ret = serdes_commit_mcb_s6g(regmap, serdes);
+ if (ret)
+ return ret;
+
+ /* OB + DES + IB + SER CFG */
+ regmap_update_bits(regmap, HSIO_S6G_OB_CFG,
+ HSIO_S6G_OB_CFG_OB_IDLE |
+ HSIO_S6G_OB_CFG_OB_ENA1V_MODE |
+ HSIO_S6G_OB_CFG_OB_POST0_M |
+ HSIO_S6G_OB_CFG_OB_PREC_M,
+ (ob_ena1v_mode ? HSIO_S6G_OB_CFG_OB_ENA1V_MODE : 0) |
+ HSIO_S6G_OB_CFG_OB_POST0(0) |
+ HSIO_S6G_OB_CFG_OB_PREC(0));
+
+ regmap_update_bits(regmap, HSIO_S6G_OB_CFG1,
+ HSIO_S6G_OB_CFG1_OB_ENA_CAS_M |
+ HSIO_S6G_OB_CFG1_OB_LEV_M,
+ HSIO_S6G_OB_CFG1_OB_LEV(ob_lev) |
+ HSIO_S6G_OB_CFG1_OB_ENA_CAS(ob_ena_cas));
+
+ regmap_update_bits(regmap, HSIO_S6G_DES_CFG,
+ HSIO_S6G_DES_CFG_DES_PHS_CTRL_M |
+ HSIO_S6G_DES_CFG_DES_CPMD_SEL_M |
+ HSIO_S6G_DES_CFG_DES_BW_ANA_M,
+ HSIO_S6G_DES_CFG_DES_PHS_CTRL(2) |
+ HSIO_S6G_DES_CFG_DES_CPMD_SEL(0) |
+ HSIO_S6G_DES_CFG_DES_BW_ANA(des_bw_ana));
+
+ regmap_update_bits(regmap, HSIO_S6G_IB_CFG,
+ HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M |
+ HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M,
+ HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(0) |
+ HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(0));
+
+ regmap_update_bits(regmap, HSIO_S6G_IB_CFG1,
+ HSIO_S6G_IB_CFG1_IB_TSDET_M,
+ HSIO_S6G_IB_CFG1_IB_TSDET(16));
+
+ regmap_update_bits(regmap, HSIO_S6G_SER_CFG,
+ HSIO_S6G_SER_CFG_SER_ALISEL_M |
+ HSIO_S6G_SER_CFG_SER_ENALI,
+ HSIO_S6G_SER_CFG_SER_ALISEL(0));
+
+ regmap_update_bits(regmap, HSIO_S6G_PLL_CFG,
+ HSIO_S6G_PLL_CFG_PLL_DIV4 |
+ HSIO_S6G_PLL_CFG_PLL_ENA_ROT |
+ HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M |
+ HSIO_S6G_PLL_CFG_PLL_ROT_DIR |
+ HSIO_S6G_PLL_CFG_PLL_ROT_FRQ,
+ HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA
+ (pll_fsm_ctrl_data));
+
+ regmap_update_bits(regmap, HSIO_S6G_COMMON_CFG,
+ HSIO_S6G_COMMON_CFG_SYS_RST |
+ HSIO_S6G_COMMON_CFG_ENA_LANE |
+ HSIO_S6G_COMMON_CFG_PWD_RX |
+ HSIO_S6G_COMMON_CFG_PWD_TX |
+ HSIO_S6G_COMMON_CFG_HRATE |
+ HSIO_S6G_COMMON_CFG_QRATE |
+ HSIO_S6G_COMMON_CFG_ENA_ELOOP |
+ HSIO_S6G_COMMON_CFG_ENA_FLOOP |
+ HSIO_S6G_COMMON_CFG_IF_MODE_M,
+ HSIO_S6G_COMMON_CFG_SYS_RST |
+ HSIO_S6G_COMMON_CFG_ENA_LANE |
+ (qrate ? HSIO_S6G_COMMON_CFG_QRATE : 0) |
+ HSIO_S6G_COMMON_CFG_IF_MODE(if_mode));
+
+ regmap_update_bits(regmap, HSIO_S6G_MISC_CFG,
+ HSIO_S6G_MISC_CFG_LANE_RST |
+ HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA |
+ HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA |
+ HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA,
+ HSIO_S6G_MISC_CFG_LANE_RST |
+ HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA);
+
+
+ ret = serdes_commit_mcb_s6g(regmap, serdes);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(regmap, HSIO_S6G_PLL_CFG,
+ HSIO_S6G_PLL_CFG_PLL_FSM_ENA,
+ HSIO_S6G_PLL_CFG_PLL_FSM_ENA);
+
+ ret = serdes_commit_mcb_s6g(regmap, serdes);
+ if (ret)
+ return ret;
+
+ /* Wait for PLL bringup */
+ msleep(20);
+
+ regmap_update_bits(regmap, HSIO_S6G_IB_CFG,
+ HSIO_S6G_IB_CFG_IB_CAL_ENA,
+ HSIO_S6G_IB_CFG_IB_CAL_ENA);
+
+ regmap_update_bits(regmap, HSIO_S6G_MISC_CFG,
+ HSIO_S6G_MISC_CFG_LANE_RST, 0);
+
+ ret = serdes_commit_mcb_s6g(regmap, serdes);
+ if (ret)
+ return ret;
+
+ /* Wait for calibration */
+ msleep(60);
+
+ regmap_update_bits(regmap, HSIO_S6G_IB_CFG,
+ HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M |
+ HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M,
+ HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(0) |
+ HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(7));
+
+ regmap_update_bits(regmap, HSIO_S6G_IB_CFG1,
+ HSIO_S6G_IB_CFG1_IB_TSDET_M,
+ HSIO_S6G_IB_CFG1_IB_TSDET(3));
+
+ /* IB CFG */
+
+ return 0;
+}
+
#define MCB_S1G_CFG_TIMEOUT 50
static int __serdes_write_mcb_s1g(struct regmap *regmap, u8 macro, u32 op)
@@ -110,7 +342,7 @@ struct serdes_mux {
u32 mux;
};
-#define SERDES_MUX(_idx, _port, _mode, _submode, _mask, _mux) { \
+#define SERDES_MUX(_idx, _port, _mode, _submode, _mask, _mux) { \
.idx = _idx, \
.port = _port, \
.mode = _mode, \
@@ -191,8 +423,12 @@ static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
if (macro->idx <= SERDES1G_MAX)
return serdes_init_s1g(macro->ctrl->regs, macro->idx);
+ else if (macro->idx <= SERDES6G_MAX)
+ return serdes_init_s6g(macro->ctrl->regs,
+ macro->idx - (SERDES1G_MAX + 1),
+ ocelot_serdes_muxes[i].submode);
- /* SERDES6G and PCIe not supported yet */
+ /* PCIe not supported yet */
return -EOPNOTSUPP;
}
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index cb38f6e8614c..c147ba843f0b 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -384,10 +384,16 @@ int phy_reset(struct phy *phy)
if (!phy || !phy->ops->reset)
return 0;
+ ret = phy_pm_runtime_get_sync(phy);
+ if (ret < 0 && ret != -ENOTSUPP)
+ return ret;
+
mutex_lock(&phy->mutex);
ret = phy->ops->reset(phy);
mutex_unlock(&phy->mutex);
+ phy_pm_runtime_put(phy);
+
return ret;
}
EXPORT_SYMBOL_GPL(phy_reset);
@@ -564,6 +570,11 @@ void phy_put(struct phy *phy)
if (!phy || IS_ERR(phy))
return;
+ mutex_lock(&phy->mutex);
+ if (phy->ops->release)
+ phy->ops->release(phy);
+ mutex_unlock(&phy->mutex);
+
module_put(phy->ops->owner);
put_device(&phy->dev);
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 08d6f6f7f039..cd91b4179b10 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -242,6 +242,88 @@ static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0x0e),
};
+static const struct qmp_phy_init_tbl msm8998_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15),
+};
+
+static const struct qmp_phy_init_tbl msm8998_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8998_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40),
+};
+
+static const struct qmp_phy_init_tbl msm8998_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03),
+};
+
static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
@@ -897,6 +979,7 @@ struct qmp_phy {
* @init_count: phy common block initialization count
* @phy_initialized: indicate if PHY has been initialized
* @mode: current PHY mode
+ * @ufs_reset: optional UFS PHY reset handle
*/
struct qcom_qmp {
struct device *dev;
@@ -914,6 +997,8 @@ struct qcom_qmp {
int init_count;
bool phy_initialized;
enum phy_mode mode;
+
+ struct reset_control *ufs_reset;
};
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
@@ -1146,6 +1231,31 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.no_pcs_sw_reset = true,
};
+static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8998_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl),
+ .tx_tbl = msm8998_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8998_pcie_tx_tbl),
+ .rx_tbl = msm8998_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8998_pcie_rx_tbl),
+ .pcs_tbl = msm8998_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_com_pcs_ready = PCS_READY,
+};
+
static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -1314,6 +1424,7 @@ static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp)
return 0;
}
+ reset_control_assert(qmp->ufs_reset);
if (cfg->has_phy_com_ctrl) {
qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
SERDES_START | PCS_START);
@@ -1335,8 +1446,7 @@ static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp)
return 0;
}
-/* PHY Initialization */
-static int qcom_qmp_phy_init(struct phy *phy)
+static int qcom_qmp_phy_enable(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -1351,6 +1461,33 @@ static int qcom_qmp_phy_init(struct phy *phy)
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+ if (cfg->no_pcs_sw_reset) {
+ /*
+ * Get UFS reset, which is delayed until now to avoid a
+ * circular dependency where UFS needs its PHY, but the PHY
+ * needs this UFS reset.
+ */
+ if (!qmp->ufs_reset) {
+ qmp->ufs_reset =
+ devm_reset_control_get_exclusive(qmp->dev,
+ "ufsphy");
+
+ if (IS_ERR(qmp->ufs_reset)) {
+ ret = PTR_ERR(qmp->ufs_reset);
+ dev_err(qmp->dev,
+ "failed to get UFS reset: %d\n",
+ ret);
+
+ qmp->ufs_reset = NULL;
+ return ret;
+ }
+ }
+
+ ret = reset_control_assert(qmp->ufs_reset);
+ if (ret)
+ goto err_lane_rst;
+ }
+
ret = qcom_qmp_phy_com_init(qphy);
if (ret)
return ret;
@@ -1383,14 +1520,9 @@ static int qcom_qmp_phy_init(struct phy *phy)
cfg->rx_tbl, cfg->rx_tbl_num);
qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
-
- /*
- * UFS PHY requires the deassert of software reset before serdes start.
- * For UFS PHYs that do not have software reset control bits, defer
- * starting serdes until the power on callback.
- */
- if ((cfg->type == PHY_TYPE_UFS) && cfg->no_pcs_sw_reset)
- goto out;
+ ret = reset_control_deassert(qmp->ufs_reset);
+ if (ret)
+ goto err_lane_rst;
/*
* Pull out PHY from POWER DOWN state.
@@ -1403,7 +1535,9 @@ static int qcom_qmp_phy_init(struct phy *phy)
usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
/* Pull PHY out of reset state */
- qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ if (!cfg->no_pcs_sw_reset)
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
if (cfg->has_phy_dp_com_ctrl)
qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
@@ -1420,11 +1554,10 @@ static int qcom_qmp_phy_init(struct phy *phy)
goto err_pcs_ready;
}
qmp->phy_initialized = true;
-
-out:
- return ret;
+ return 0;
err_pcs_ready:
+ reset_control_assert(qmp->ufs_reset);
clk_disable_unprepare(qphy->pipe_clk);
err_clk_enable:
if (cfg->has_lane_rst)
@@ -1435,7 +1568,7 @@ err_lane_rst:
return ret;
}
-static int qcom_qmp_phy_exit(struct phy *phy)
+static int qcom_qmp_phy_disable(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -1463,44 +1596,6 @@ static int qcom_qmp_phy_exit(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_poweron(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qmp->cfg;
- void __iomem *pcs = qphy->pcs;
- void __iomem *status;
- unsigned int mask, val;
- int ret = 0;
-
- if (cfg->type != PHY_TYPE_UFS)
- return 0;
-
- /*
- * For UFS PHY that has not software reset control, serdes start
- * should only happen when UFS driver explicitly calls phy_power_on
- * after it deasserts software reset.
- */
- if (cfg->no_pcs_sw_reset && !qmp->phy_initialized &&
- (qmp->init_count != 0)) {
- /* start SerDes and Phy-Coding-Sublayer */
- qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
-
- status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
- mask = cfg->mask_pcs_ready;
-
- ret = readl_poll_timeout(status, val, !(val & mask), 1,
- PHY_INIT_COMPLETE_TIMEOUT);
- if (ret) {
- dev_err(qmp->dev, "phy initialization timed-out\n");
- return ret;
- }
- qmp->phy_initialized = true;
- }
-
- return ret;
-}
-
static int qcom_qmp_phy_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
@@ -1750,9 +1845,15 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
}
static const struct phy_ops qcom_qmp_phy_gen_ops = {
- .init = qcom_qmp_phy_init,
- .exit = qcom_qmp_phy_exit,
- .power_on = qcom_qmp_phy_poweron,
+ .init = qcom_qmp_phy_enable,
+ .exit = qcom_qmp_phy_disable,
+ .set_mode = qcom_qmp_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static const struct phy_ops qcom_qmp_ufs_ops = {
+ .power_on = qcom_qmp_phy_enable,
+ .power_off = qcom_qmp_phy_disable,
.set_mode = qcom_qmp_phy_set_mode,
.owner = THIS_MODULE,
};
@@ -1763,6 +1864,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
+ const struct phy_ops *ops = &qcom_qmp_phy_gen_ops;
char prop_name[MAX_PROP_NAME];
int ret;
@@ -1849,7 +1951,10 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
}
}
- generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_gen_ops);
+ if (qmp->cfg->type == PHY_TYPE_UFS)
+ ops = &qcom_qmp_ufs_ops;
+
+ generic_phy = devm_phy_create(dev, np, ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
dev_err(dev, "failed to create qphy %d\n", ret);
@@ -1873,6 +1978,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,msm8996-qmp-usb3-phy",
.data = &msm8996_usb3phy_cfg,
}, {
+ .compatible = "qcom,msm8998-qmp-pcie-phy",
+ .data = &msm8998_pciephy_cfg,
+ }, {
.compatible = "qcom,msm8998-qmp-ufs-phy",
.data = &sdm845_ufsphy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index a1b6cdee9a08..335ea5d7ef40 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -241,6 +241,7 @@
#define QSERDES_V3_RX_RX_BAND 0x110
#define QSERDES_V3_RX_RX_INTERFACE_MODE 0x11c
#define QSERDES_V3_RX_RX_MODE_00 0x164
+#define QSERDES_V3_RX_RX_MODE_01 0x168
/* Only for QMP V3 PHY - PCS registers */
#define QPHY_V3_PCS_POWER_DOWN_CONTROL 0x004
@@ -280,6 +281,7 @@
#define QPHY_V3_PCS_TSYNC_RSYNC_TIME 0x08c
#define QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x0a0
#define QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK 0x0a4
+#define QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME 0x0a8
#define QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK 0x0b0
#define QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME 0x0b8
#define QPHY_V3_PCS_RXEQTRAINING_RUN_TIME 0x0bc
@@ -292,13 +294,23 @@
#define QPHY_V3_PCS_RX_MIN_HIBERN8_TIME 0x138
#define QPHY_V3_PCS_RX_SIGDET_CTRL1 0x13c
#define QPHY_V3_PCS_RX_SIGDET_CTRL2 0x140
+#define QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1a8
+#define QPHY_V3_PCS_OSC_DTCT_ACTIONS 0x1ac
+#define QPHY_V3_PCS_SIGDET_CNTRL 0x1b0
#define QPHY_V3_PCS_TX_MID_TERM_CTRL1 0x1bc
#define QPHY_V3_PCS_MULTI_LANE_CTRL1 0x1c4
#define QPHY_V3_PCS_RX_SIGDET_LVL 0x1d8
+#define QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1dc
+#define QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1e0
#define QPHY_V3_PCS_REFGEN_REQ_CONFIG1 0x20c
#define QPHY_V3_PCS_REFGEN_REQ_CONFIG2 0x210
/* Only for QMP V3 PHY - PCS_MISC registers */
#define QPHY_V3_PCS_MISC_CLAMP_ENABLE 0x0c
+#define QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2 0x2c
+#define QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1 0x44
+#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2 0x54
+#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c
+#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 8fd7ce139772..1cbf1d6f28ce 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -822,14 +822,9 @@ static int qusb2_phy_probe(struct platform_device *pdev)
return ret;
}
- qphy->iface_clk = devm_clk_get(dev, "iface");
- if (IS_ERR(qphy->iface_clk)) {
- ret = PTR_ERR(qphy->iface_clk);
- if (ret == -EPROBE_DEFER)
- return ret;
- qphy->iface_clk = NULL;
- dev_dbg(dev, "failed to get iface clk, %d\n", ret);
- }
+ qphy->iface_clk = devm_clk_get_optional(dev, "iface");
+ if (IS_ERR(qphy->iface_clk))
+ return PTR_ERR(qphy->iface_clk);
qphy->phy_reset = devm_reset_control_get_by_index(&pdev->dev, 0);
if (IS_ERR(qphy->phy_reset)) {
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index f798fb64de94..109ddd67be82 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -96,11 +97,10 @@ struct ufs_qcom_phy {
char name[UFS_QCOM_PHY_NAME_LEN];
struct ufs_qcom_phy_calibration *cached_regs;
int cached_regs_table_size;
- bool is_powered_on;
- bool is_started;
struct ufs_qcom_phy_specific_ops *phy_spec_ops;
enum phy_mode mode;
+ struct reset_control *ufs_reset;
};
/**
@@ -115,6 +115,7 @@ struct ufs_qcom_phy {
* and writes to QSERDES_RX_SIGDET_CNTRL attribute
*/
struct ufs_qcom_phy_specific_ops {
+ int (*calibrate)(struct ufs_qcom_phy *ufs_qcom_phy, bool is_rate_B);
void (*start_serdes)(struct ufs_qcom_phy *phy);
int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
index 1e0d4f2046a4..4815546f228c 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
@@ -42,28 +42,6 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
}
-static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
- bool is_rate_B = false;
- int ret;
-
- if (phy_common->mode == PHY_MODE_UFS_HS_B)
- is_rate_B = true;
-
- ret = ufs_qcom_phy_qmp_14nm_phy_calibrate(phy_common, is_rate_B);
- if (!ret)
- /* phy calibrated, but yet to be started */
- phy_common->is_started = false;
-
- return ret;
-}
-
-static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
-{
- return 0;
-}
-
static
int ufs_qcom_phy_qmp_14nm_set_mode(struct phy *generic_phy,
enum phy_mode mode, int submode)
@@ -124,8 +102,6 @@ static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
}
static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
- .init = ufs_qcom_phy_qmp_14nm_init,
- .exit = ufs_qcom_phy_qmp_14nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.set_mode = ufs_qcom_phy_qmp_14nm_set_mode,
@@ -133,6 +109,7 @@ static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
};
static struct ufs_qcom_phy_specific_ops phy_14nm_ops = {
+ .calibrate = ufs_qcom_phy_qmp_14nm_phy_calibrate,
.start_serdes = ufs_qcom_phy_qmp_14nm_start_serdes,
.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
.set_tx_lane_enable = ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
index aef40f7a41d4..f1cf819e12ea 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
@@ -61,28 +61,6 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
}
-static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
- bool is_rate_B = false;
- int ret;
-
- if (phy_common->mode == PHY_MODE_UFS_HS_B)
- is_rate_B = true;
-
- ret = ufs_qcom_phy_qmp_20nm_phy_calibrate(phy_common, is_rate_B);
- if (!ret)
- /* phy calibrated, but yet to be started */
- phy_common->is_started = false;
-
- return ret;
-}
-
-static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
-{
- return 0;
-}
-
static
int ufs_qcom_phy_qmp_20nm_set_mode(struct phy *generic_phy,
enum phy_mode mode, int submode)
@@ -182,8 +160,6 @@ static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
}
static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
- .init = ufs_qcom_phy_qmp_20nm_init,
- .exit = ufs_qcom_phy_qmp_20nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.set_mode = ufs_qcom_phy_qmp_20nm_set_mode,
@@ -191,6 +167,7 @@ static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
};
static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
+ .calibrate = ufs_qcom_phy_qmp_20nm_phy_calibrate,
.start_serdes = ufs_qcom_phy_qmp_20nm_start_serdes,
.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
.set_tx_lane_enable = ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index f2979ccad00a..45404e31e672 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -147,6 +147,21 @@ out:
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
+static int ufs_qcom_phy_get_reset(struct ufs_qcom_phy *phy_common)
+{
+ struct reset_control *reset;
+
+ if (phy_common->ufs_reset)
+ return 0;
+
+ reset = devm_reset_control_get_exclusive_by_index(phy_common->dev, 0);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ phy_common->ufs_reset = reset;
+ return 0;
+}
+
static int __ufs_qcom_phy_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool err_print)
{
@@ -459,7 +474,7 @@ out:
}
/* Turn OFF M-PHY RMMI interface clocks */
-void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
+static void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
{
if (phy->is_iface_clk_enabled) {
clk_disable_unprepare(phy->tx_iface_clk);
@@ -528,23 +543,38 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
{
struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
struct device *dev = phy_common->dev;
+ bool is_rate_B = false;
int err;
- if (phy_common->is_powered_on)
- return 0;
+ err = ufs_qcom_phy_get_reset(phy_common);
+ if (err)
+ return err;
- if (!phy_common->is_started) {
- err = ufs_qcom_phy_start_serdes(phy_common);
- if (err)
- return err;
+ err = reset_control_assert(phy_common->ufs_reset);
+ if (err)
+ return err;
- err = ufs_qcom_phy_is_pcs_ready(phy_common);
- if (err)
- return err;
+ if (phy_common->mode == PHY_MODE_UFS_HS_B)
+ is_rate_B = true;
- phy_common->is_started = true;
+ err = phy_common->phy_spec_ops->calibrate(phy_common, is_rate_B);
+ if (err)
+ return err;
+
+ err = reset_control_deassert(phy_common->ufs_reset);
+ if (err) {
+ dev_err(dev, "Failed to assert UFS PHY reset");
+ return err;
}
+ err = ufs_qcom_phy_start_serdes(phy_common);
+ if (err)
+ return err;
+
+ err = ufs_qcom_phy_is_pcs_ready(phy_common);
+ if (err)
+ return err;
+
err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
if (err) {
dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
@@ -587,7 +617,6 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
}
}
- phy_common->is_powered_on = true;
goto out;
out_disable_ref_clk:
@@ -607,9 +636,6 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
{
struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
- if (!phy_common->is_powered_on)
- return 0;
-
phy_common->phy_spec_ops->power_control(phy_common, false);
if (phy_common->vddp_ref_clk.reg)
@@ -620,8 +646,7 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
- phy_common->is_powered_on = false;
-
+ reset_control_assert(phy_common->ufs_reset);
return 0;
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index e340a925bbb1..111bdcae775c 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -19,7 +19,7 @@ config PHY_RCAR_GEN3_PCIE
config PHY_RCAR_GEN3_USB2
tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
depends on ARCH_RENESAS
- depends on EXTCON
+ depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
depends on USB_SUPPORT
select GENERIC_PHY
select USB_COMMON
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 72eeb066912d..8dc5710d9c98 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded, Inc.
+ * Copyright (C) 2019 Renesas Electronics Corp.
*/
#include <linux/clk.h>
@@ -15,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#include <linux/of_device.h>
#define USBHS_LPSTS 0x02
#define USBHS_UGCTRL 0x80
@@ -35,6 +37,8 @@
#define USBHS_UGCTRL2_USB0SEL 0x00000030
#define USBHS_UGCTRL2_USB0SEL_PCI 0x00000010
#define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030
+#define USBHS_UGCTRL2_USB0SEL_USB20 0x00000010
+#define USBHS_UGCTRL2_USB0SEL_HS_USB20 0x00000020
/* USB General status register (UGSTS) */
#define USBHS_UGSTS_LOCK 0x00000100 /* From technical update */
@@ -64,6 +68,11 @@ struct rcar_gen2_phy_driver {
struct rcar_gen2_channel *channels;
};
+struct rcar_gen2_phy_data {
+ const struct phy_ops *gen2_phy_ops;
+ const u32 (*select_value)[PHYS_PER_CHANNEL];
+};
+
static int rcar_gen2_phy_init(struct phy *p)
{
struct rcar_gen2_phy *phy = phy_get_drvdata(p);
@@ -180,6 +189,60 @@ static int rcar_gen2_phy_power_off(struct phy *p)
return 0;
}
+static int rz_g1c_phy_power_on(struct phy *p)
+{
+ struct rcar_gen2_phy *phy = phy_get_drvdata(p);
+ struct rcar_gen2_phy_driver *drv = phy->channel->drv;
+ void __iomem *base = drv->base;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&drv->lock, flags);
+
+ /* Power on USBHS PHY */
+ value = readl(base + USBHS_UGCTRL);
+ value &= ~USBHS_UGCTRL_PLLRESET;
+ writel(value, base + USBHS_UGCTRL);
+
+ /* As per the data sheet wait 340 micro sec for power stable */
+ udelay(340);
+
+ if (phy->select_value == USBHS_UGCTRL2_USB0SEL_HS_USB20) {
+ value = readw(base + USBHS_LPSTS);
+ value |= USBHS_LPSTS_SUSPM;
+ writew(value, base + USBHS_LPSTS);
+ }
+
+ spin_unlock_irqrestore(&drv->lock, flags);
+
+ return 0;
+}
+
+static int rz_g1c_phy_power_off(struct phy *p)
+{
+ struct rcar_gen2_phy *phy = phy_get_drvdata(p);
+ struct rcar_gen2_phy_driver *drv = phy->channel->drv;
+ void __iomem *base = drv->base;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&drv->lock, flags);
+ /* Power off USBHS PHY */
+ if (phy->select_value == USBHS_UGCTRL2_USB0SEL_HS_USB20) {
+ value = readw(base + USBHS_LPSTS);
+ value &= ~USBHS_LPSTS_SUSPM;
+ writew(value, base + USBHS_LPSTS);
+ }
+
+ value = readl(base + USBHS_UGCTRL);
+ value |= USBHS_UGCTRL_PLLRESET;
+ writel(value, base + USBHS_UGCTRL);
+
+ spin_unlock_irqrestore(&drv->lock, flags);
+
+ return 0;
+}
+
static const struct phy_ops rcar_gen2_phy_ops = {
.init = rcar_gen2_phy_init,
.exit = rcar_gen2_phy_exit,
@@ -188,12 +251,55 @@ static const struct phy_ops rcar_gen2_phy_ops = {
.owner = THIS_MODULE,
};
+static const struct phy_ops rz_g1c_phy_ops = {
+ .init = rcar_gen2_phy_init,
+ .exit = rcar_gen2_phy_exit,
+ .power_on = rz_g1c_phy_power_on,
+ .power_off = rz_g1c_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const u32 pci_select_value[][PHYS_PER_CHANNEL] = {
+ [0] = { USBHS_UGCTRL2_USB0SEL_PCI, USBHS_UGCTRL2_USB0SEL_HS_USB },
+ [2] = { USBHS_UGCTRL2_USB2SEL_PCI, USBHS_UGCTRL2_USB2SEL_USB30 },
+};
+
+static const u32 usb20_select_value[][PHYS_PER_CHANNEL] = {
+ { USBHS_UGCTRL2_USB0SEL_USB20, USBHS_UGCTRL2_USB0SEL_HS_USB20 },
+};
+
+static const struct rcar_gen2_phy_data rcar_gen2_usb_phy_data = {
+ .gen2_phy_ops = &rcar_gen2_phy_ops,
+ .select_value = pci_select_value,
+};
+
+static const struct rcar_gen2_phy_data rz_g1c_usb_phy_data = {
+ .gen2_phy_ops = &rz_g1c_phy_ops,
+ .select_value = usb20_select_value,
+};
+
static const struct of_device_id rcar_gen2_phy_match_table[] = {
- { .compatible = "renesas,usb-phy-r8a7790" },
- { .compatible = "renesas,usb-phy-r8a7791" },
- { .compatible = "renesas,usb-phy-r8a7794" },
- { .compatible = "renesas,rcar-gen2-usb-phy" },
- { }
+ {
+ .compatible = "renesas,usb-phy-r8a77470",
+ .data = &rz_g1c_usb_phy_data,
+ },
+ {
+ .compatible = "renesas,usb-phy-r8a7790",
+ .data = &rcar_gen2_usb_phy_data,
+ },
+ {
+ .compatible = "renesas,usb-phy-r8a7791",
+ .data = &rcar_gen2_usb_phy_data,
+ },
+ {
+ .compatible = "renesas,usb-phy-r8a7794",
+ .data = &rcar_gen2_usb_phy_data,
+ },
+ {
+ .compatible = "renesas,rcar-gen2-usb-phy",
+ .data = &rcar_gen2_usb_phy_data,
+ },
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_gen2_phy_match_table);
@@ -224,11 +330,6 @@ static const u32 select_mask[] = {
[2] = USBHS_UGCTRL2_USB2SEL,
};
-static const u32 select_value[][PHYS_PER_CHANNEL] = {
- [0] = { USBHS_UGCTRL2_USB0SEL_PCI, USBHS_UGCTRL2_USB0SEL_HS_USB },
- [2] = { USBHS_UGCTRL2_USB2SEL_PCI, USBHS_UGCTRL2_USB2SEL_USB30 },
-};
-
static int rcar_gen2_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -238,6 +339,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
struct clk *clk;
+ const struct rcar_gen2_phy_data *data;
int i = 0;
if (!dev->of_node) {
@@ -266,6 +368,10 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
drv->clk = clk;
drv->base = base;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
drv->num_channels = of_get_child_count(dev->of_node);
drv->channels = devm_kcalloc(dev, drv->num_channels,
sizeof(struct rcar_gen2_channel),
@@ -294,10 +400,10 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
phy->channel = channel;
phy->number = n;
- phy->select_value = select_value[channel_num][n];
+ phy->select_value = data->select_value[channel_num][n];
phy->phy = devm_phy_create(dev, NULL,
- &rcar_gen2_phy_ops);
+ data->gen2_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "Failed to create PHY\n");
return PTR_ERR(phy->phy);
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 0a34782aaaa2..1322185a00a2 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -37,11 +37,8 @@
/* INT_ENABLE */
#define USB2_INT_ENABLE_UCOM_INTEN BIT(3)
-#define USB2_INT_ENABLE_USBH_INTB_EN BIT(2)
-#define USB2_INT_ENABLE_USBH_INTA_EN BIT(1)
-#define USB2_INT_ENABLE_INIT (USB2_INT_ENABLE_UCOM_INTEN | \
- USB2_INT_ENABLE_USBH_INTB_EN | \
- USB2_INT_ENABLE_USBH_INTA_EN)
+#define USB2_INT_ENABLE_USBH_INTB_EN BIT(2) /* For EHCI */
+#define USB2_INT_ENABLE_USBH_INTA_EN BIT(1) /* For OHCI */
/* USBCTR */
#define USB2_USBCTR_DIRPD BIT(2)
@@ -78,10 +75,35 @@
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
+#define NUM_OF_PHYS 4
+enum rcar_gen3_phy_index {
+ PHY_INDEX_BOTH_HC,
+ PHY_INDEX_OHCI,
+ PHY_INDEX_EHCI,
+ PHY_INDEX_HSUSB
+};
+
+static const u32 rcar_gen3_int_enable[NUM_OF_PHYS] = {
+ USB2_INT_ENABLE_USBH_INTB_EN | USB2_INT_ENABLE_USBH_INTA_EN,
+ USB2_INT_ENABLE_USBH_INTA_EN,
+ USB2_INT_ENABLE_USBH_INTB_EN,
+ 0
+};
+
+struct rcar_gen3_phy {
+ struct phy *phy;
+ struct rcar_gen3_chan *ch;
+ u32 int_enable_bits;
+ bool initialized;
+ bool otg_initialized;
+ bool powered;
+};
+
struct rcar_gen3_chan {
void __iomem *base;
+ struct device *dev; /* platform_device's device */
struct extcon_dev *extcon;
- struct phy *phy;
+ struct rcar_gen3_phy rphys[NUM_OF_PHYS];
struct regulator *vbus;
struct work_struct work;
enum usb_dr_mode dr_mode;
@@ -120,7 +142,7 @@ static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host)
void __iomem *usb2_base = ch->base;
u32 val = readl(usb2_base + USB2_COMMCTRL);
- dev_vdbg(&ch->phy->dev, "%s: %08x, %d\n", __func__, val, host);
+ dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, host);
if (host)
val &= ~USB2_COMMCTRL_OTG_PERI;
else
@@ -133,7 +155,7 @@ static void rcar_gen3_set_linectrl(struct rcar_gen3_chan *ch, int dp, int dm)
void __iomem *usb2_base = ch->base;
u32 val = readl(usb2_base + USB2_LINECTRL1);
- dev_vdbg(&ch->phy->dev, "%s: %08x, %d, %d\n", __func__, val, dp, dm);
+ dev_vdbg(ch->dev, "%s: %08x, %d, %d\n", __func__, val, dp, dm);
val &= ~(USB2_LINECTRL1_DP_RPD | USB2_LINECTRL1_DM_RPD);
if (dp)
val |= USB2_LINECTRL1_DP_RPD;
@@ -147,7 +169,7 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
void __iomem *usb2_base = ch->base;
u32 val = readl(usb2_base + USB2_ADPCTRL);
- dev_vdbg(&ch->phy->dev, "%s: %08x, %d\n", __func__, val, vbus);
+ dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus);
if (vbus)
val |= USB2_ADPCTRL_DRVVBUS;
else
@@ -249,6 +271,42 @@ static enum phy_mode rcar_gen3_get_phy_mode(struct rcar_gen3_chan *ch)
return PHY_MODE_USB_DEVICE;
}
+static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch)
+{
+ int i;
+
+ for (i = 0; i < NUM_OF_PHYS; i++) {
+ if (ch->rphys[i].initialized)
+ return true;
+ }
+
+ return false;
+}
+
+static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch)
+{
+ int i;
+
+ for (i = 0; i < NUM_OF_PHYS; i++) {
+ if (ch->rphys[i].otg_initialized)
+ return false;
+ }
+
+ return true;
+}
+
+static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch)
+{
+ int i;
+
+ for (i = 0; i < NUM_OF_PHYS; i++) {
+ if (ch->rphys[i].powered)
+ return false;
+ }
+
+ return true;
+}
+
static ssize_t role_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -256,7 +314,7 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
bool is_b_device;
enum phy_mode cur_mode, new_mode;
- if (!ch->is_otg_channel || !ch->phy->init_count)
+ if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
return -EIO;
if (!strncmp(buf, "host", strlen("host")))
@@ -294,7 +352,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- if (!ch->is_otg_channel || !ch->phy->init_count)
+ if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
return -EIO;
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
@@ -328,37 +386,62 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
static int rcar_gen3_phy_usb2_init(struct phy *p)
{
- struct rcar_gen3_chan *channel = phy_get_drvdata(p);
+ struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
+ struct rcar_gen3_chan *channel = rphy->ch;
void __iomem *usb2_base = channel->base;
+ u32 val;
/* Initialize USB2 part */
- writel(USB2_INT_ENABLE_INIT, usb2_base + USB2_INT_ENABLE);
+ val = readl(usb2_base + USB2_INT_ENABLE);
+ val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits;
+ writel(val, usb2_base + USB2_INT_ENABLE);
writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
/* Initialize otg part */
- if (channel->is_otg_channel)
- rcar_gen3_init_otg(channel);
+ if (channel->is_otg_channel) {
+ if (rcar_gen3_needs_init_otg(channel))
+ rcar_gen3_init_otg(channel);
+ rphy->otg_initialized = true;
+ }
+
+ rphy->initialized = true;
return 0;
}
static int rcar_gen3_phy_usb2_exit(struct phy *p)
{
- struct rcar_gen3_chan *channel = phy_get_drvdata(p);
+ struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
+ struct rcar_gen3_chan *channel = rphy->ch;
+ void __iomem *usb2_base = channel->base;
+ u32 val;
+
+ rphy->initialized = false;
+
+ if (channel->is_otg_channel)
+ rphy->otg_initialized = false;
- writel(0, channel->base + USB2_INT_ENABLE);
+ val = readl(usb2_base + USB2_INT_ENABLE);
+ val &= ~rphy->int_enable_bits;
+ if (!rcar_gen3_is_any_rphy_initialized(channel))
+ val &= ~USB2_INT_ENABLE_UCOM_INTEN;
+ writel(val, usb2_base + USB2_INT_ENABLE);
return 0;
}
static int rcar_gen3_phy_usb2_power_on(struct phy *p)
{
- struct rcar_gen3_chan *channel = phy_get_drvdata(p);
+ struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
+ struct rcar_gen3_chan *channel = rphy->ch;
void __iomem *usb2_base = channel->base;
u32 val;
int ret;
+ if (!rcar_gen3_are_all_rphys_power_off(channel))
+ return 0;
+
if (channel->vbus) {
ret = regulator_enable(channel->vbus);
if (ret)
@@ -371,14 +454,22 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
val &= ~USB2_USBCTR_PLL_RST;
writel(val, usb2_base + USB2_USBCTR);
+ rphy->powered = true;
+
return 0;
}
static int rcar_gen3_phy_usb2_power_off(struct phy *p)
{
- struct rcar_gen3_chan *channel = phy_get_drvdata(p);
+ struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
+ struct rcar_gen3_chan *channel = rphy->ch;
int ret = 0;
+ rphy->powered = false;
+
+ if (!rcar_gen3_are_all_rphys_power_off(channel))
+ return 0;
+
if (channel->vbus)
ret = regulator_disable(channel->vbus);
@@ -393,6 +484,12 @@ static const struct phy_ops rcar_gen3_phy_usb2_ops = {
.owner = THIS_MODULE,
};
+static const struct phy_ops rz_g1c_phy_usb2_ops = {
+ .init = rcar_gen3_phy_usb2_init,
+ .exit = rcar_gen3_phy_usb2_exit,
+ .owner = THIS_MODULE,
+};
+
static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
{
struct rcar_gen3_chan *ch = _ch;
@@ -401,7 +498,7 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
irqreturn_t ret = IRQ_NONE;
if (status & USB2_OBINT_BITS) {
- dev_vdbg(&ch->phy->dev, "%s: %08x\n", __func__, status);
+ dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
rcar_gen3_device_recognition(ch);
ret = IRQ_HANDLED;
@@ -411,11 +508,27 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
}
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
- { .compatible = "renesas,usb2-phy-r8a7795" },
- { .compatible = "renesas,usb2-phy-r8a7796" },
- { .compatible = "renesas,usb2-phy-r8a77965" },
- { .compatible = "renesas,rcar-gen3-usb2-phy" },
- { }
+ {
+ .compatible = "renesas,usb2-phy-r8a77470",
+ .data = &rz_g1c_phy_usb2_ops,
+ },
+ {
+ .compatible = "renesas,usb2-phy-r8a7795",
+ .data = &rcar_gen3_phy_usb2_ops,
+ },
+ {
+ .compatible = "renesas,usb2-phy-r8a7796",
+ .data = &rcar_gen3_phy_usb2_ops,
+ },
+ {
+ .compatible = "renesas,usb2-phy-r8a77965",
+ .data = &rcar_gen3_phy_usb2_ops,
+ },
+ {
+ .compatible = "renesas,rcar-gen3-usb2-phy",
+ .data = &rcar_gen3_phy_usb2_ops,
+ },
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb2_match_table);
@@ -425,13 +538,54 @@ static const unsigned int rcar_gen3_phy_cable[] = {
EXTCON_NONE,
};
+static struct phy *rcar_gen3_phy_usb2_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+
+ if (args->args_count == 0) /* For old version dts */
+ return ch->rphys[PHY_INDEX_BOTH_HC].phy;
+ else if (args->args_count > 1) /* Prevent invalid args count */
+ return ERR_PTR(-ENODEV);
+
+ if (args->args[0] >= NUM_OF_PHYS)
+ return ERR_PTR(-ENODEV);
+
+ return ch->rphys[args->args[0]].phy;
+}
+
+static enum usb_dr_mode rcar_gen3_get_dr_mode(struct device_node *np)
+{
+ enum usb_dr_mode candidate = USB_DR_MODE_UNKNOWN;
+ int i;
+
+ /*
+ * If one of device nodes has other dr_mode except UNKNOWN,
+ * this function returns UNKNOWN. To achieve backward compatibility,
+ * this loop starts the index as 0.
+ */
+ for (i = 0; i < NUM_OF_PHYS; i++) {
+ enum usb_dr_mode mode = of_usb_get_dr_mode_by_phy(np, i);
+
+ if (mode != USB_DR_MODE_UNKNOWN) {
+ if (candidate == USB_DR_MODE_UNKNOWN)
+ candidate = mode;
+ else if (candidate != mode)
+ return USB_DR_MODE_UNKNOWN;
+ }
+ }
+
+ return candidate;
+}
+
static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_gen3_chan *channel;
struct phy_provider *provider;
struct resource *res;
- int irq, ret = 0;
+ const struct phy_ops *phy_usb2_ops;
+ int irq, ret = 0, i;
if (!dev->of_node) {
dev_err(dev, "This driver needs device tree\n");
@@ -457,7 +611,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
dev_err(dev, "No irq handler (%d)\n", irq);
}
- channel->dr_mode = of_usb_get_dr_mode_by_phy(dev->of_node, 0);
+ channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
int ret;
@@ -481,11 +635,21 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
* And then, phy-core will manage runtime pm for this device.
*/
pm_runtime_enable(dev);
- channel->phy = devm_phy_create(dev, NULL, &rcar_gen3_phy_usb2_ops);
- if (IS_ERR(channel->phy)) {
- dev_err(dev, "Failed to create USB2 PHY\n");
- ret = PTR_ERR(channel->phy);
- goto error;
+ phy_usb2_ops = of_device_get_match_data(dev);
+ if (!phy_usb2_ops)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_OF_PHYS; i++) {
+ channel->rphys[i].phy = devm_phy_create(dev, NULL,
+ phy_usb2_ops);
+ if (IS_ERR(channel->rphys[i].phy)) {
+ dev_err(dev, "Failed to create USB2 PHY\n");
+ ret = PTR_ERR(channel->rphys[i].phy);
+ goto error;
+ }
+ channel->rphys[i].ch = channel;
+ channel->rphys[i].int_enable_bits = rcar_gen3_int_enable[i];
+ phy_set_drvdata(channel->rphys[i].phy, &channel->rphys[i]);
}
channel->vbus = devm_regulator_get_optional(dev, "vbus");
@@ -498,9 +662,9 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, channel);
- phy_set_drvdata(channel->phy, channel);
+ channel->dev = dev;
- provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
ret = PTR_ERR(provider);
diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
index 19bf84f0bc67..b804c6bf4b55 100644
--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
+++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
@@ -87,6 +87,7 @@ struct rockchip_emmc_phy {
unsigned int reg_offset;
struct regmap *reg_base;
struct clk *emmcclk;
+ unsigned int drive_impedance;
};
static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
@@ -281,10 +282,10 @@ static int rockchip_emmc_phy_power_on(struct phy *phy)
{
struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
- /* Drive impedance: 50 Ohm */
+ /* Drive impedance: from DTS */
regmap_write(rk_phy->reg_base,
rk_phy->reg_offset + GRF_EMMCPHY_CON6,
- HIWORD_UPDATE(PHYCTRL_DR_50OHM,
+ HIWORD_UPDATE(rk_phy->drive_impedance,
PHYCTRL_DR_MASK,
PHYCTRL_DR_SHIFT));
@@ -314,6 +315,26 @@ static const struct phy_ops ops = {
.owner = THIS_MODULE,
};
+static u32 convert_drive_impedance_ohm(struct platform_device *pdev, u32 dr_ohm)
+{
+ switch (dr_ohm) {
+ case 100:
+ return PHYCTRL_DR_100OHM;
+ case 66:
+ return PHYCTRL_DR_66OHM;
+ case 50:
+ return PHYCTRL_DR_50OHM;
+ case 40:
+ return PHYCTRL_DR_40OHM;
+ case 33:
+ return PHYCTRL_DR_33OHM;
+ }
+
+ dev_warn(&pdev->dev, "Invalid value %u for drive-impedance-ohm.\n",
+ dr_ohm);
+ return PHYCTRL_DR_50OHM;
+}
+
static int rockchip_emmc_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -322,6 +343,7 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct regmap *grf;
unsigned int reg_offset;
+ u32 val;
if (!dev->parent || !dev->parent->of_node)
return -ENODEV;
@@ -344,6 +366,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
rk_phy->reg_offset = reg_offset;
rk_phy->reg_base = grf;
+ rk_phy->drive_impedance = PHYCTRL_DR_50OHM;
+
+ if (!of_property_read_u32(dev->of_node, "drive-impedance-ohm", &val))
+ rk_phy->drive_impedance = convert_drive_impedance_ohm(pdev, val);
generic_phy = devm_phy_create(dev, dev->of_node, &ops);
if (IS_ERR(generic_phy)) {
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index e32edeebcd63..8ad366ee6ada 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -400,7 +400,7 @@ struct phy_reg {
u32 addr;
};
-struct phy_reg usb3_pll_cfg[] = {
+static struct phy_reg usb3_pll_cfg[] = {
{ 0xf0, CMN_PLL0_VCOCAL_INIT },
{ 0x18, CMN_PLL0_VCOCAL_ITER },
{ 0xd0, CMN_PLL0_INTDIV },
@@ -417,7 +417,7 @@ struct phy_reg usb3_pll_cfg[] = {
{ 0x8, CMN_DIAG_PLL0_LF_PROG },
};
-struct phy_reg dp_pll_cfg[] = {
+static struct phy_reg dp_pll_cfg[] = {
{ 0xf0, CMN_PLL1_VCOCAL_INIT },
{ 0x18, CMN_PLL1_VCOCAL_ITER },
{ 0x30b9, CMN_PLL1_VCOCAL_START },
diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c
index b1b048be6166..50f379fc4e06 100644
--- a/drivers/phy/socionext/phy-uniphier-usb3hs.c
+++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c
@@ -335,13 +335,9 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk_parent))
return PTR_ERR(priv->clk_parent);
- priv->clk_ext = devm_clk_get(dev, "phy-ext");
- if (IS_ERR(priv->clk_ext)) {
- if (PTR_ERR(priv->clk_ext) == -ENOENT)
- priv->clk_ext = NULL;
- else
- return PTR_ERR(priv->clk_ext);
- }
+ priv->clk_ext = devm_clk_get_optional(dev, "phy-ext");
+ if (IS_ERR(priv->clk_ext))
+ return PTR_ERR(priv->clk_ext);
priv->rst = devm_reset_control_get_shared(dev, "phy");
if (IS_ERR(priv->rst))
diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c
index 4be95679c7d8..ec231e40ef2a 100644
--- a/drivers/phy/socionext/phy-uniphier-usb3ss.c
+++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c
@@ -238,13 +238,9 @@ static int uniphier_u3ssphy_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- priv->clk_ext = devm_clk_get(dev, "phy-ext");
- if (IS_ERR(priv->clk_ext)) {
- if (PTR_ERR(priv->clk_ext) == -ENOENT)
- priv->clk_ext = NULL;
- else
- return PTR_ERR(priv->clk_ext);
- }
+ priv->clk_ext = devm_clk_get_optional(dev, "phy-ext");
+ if (IS_ERR(priv->clk_ext))
+ return PTR_ERR(priv->clk_ext);
priv->rst = devm_reset_control_get_shared(dev, "phy");
if (IS_ERR(priv->rst))
diff --git a/drivers/phy/tegra/Makefile b/drivers/phy/tegra/Makefile
index 898589238fd9..a93cd9a499b2 100644
--- a/drivers/phy/tegra/Makefile
+++ b/drivers/phy/tegra/Makefile
@@ -4,3 +4,4 @@ phy-tegra-xusb-y += xusb.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_124_SOC) += xusb-tegra124.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_186_SOC) += xusb-tegra186.o
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
new file mode 100644
index 000000000000..6f3afaf9398f
--- /dev/null
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "xusb.h"
+
+/* FUSE USB_CALIB registers */
+#define HS_CURR_LEVEL_PADX_SHIFT(x) ((x) ? (11 + (x - 1) * 6) : 0)
+#define HS_CURR_LEVEL_PAD_MASK 0x3f
+#define HS_TERM_RANGE_ADJ_SHIFT 7
+#define HS_TERM_RANGE_ADJ_MASK 0xf
+#define HS_SQUELCH_SHIFT 29
+#define HS_SQUELCH_MASK 0x7
+
+#define RPD_CTRL_SHIFT 0
+#define RPD_CTRL_MASK 0x1f
+
+/* XUSB PADCTL registers */
+#define XUSB_PADCTL_USB2_PAD_MUX 0x4
+#define USB2_PORT_SHIFT(x) ((x) * 2)
+#define USB2_PORT_MASK 0x3
+#define PORT_XUSB 1
+#define HSIC_PORT_SHIFT(x) ((x) + 20)
+#define HSIC_PORT_MASK 0x1
+#define PORT_HSIC 0
+
+#define XUSB_PADCTL_USB2_PORT_CAP 0x8
+#define XUSB_PADCTL_SS_PORT_CAP 0xc
+#define PORTX_CAP_SHIFT(x) ((x) * 4)
+#define PORT_CAP_MASK 0x3
+#define PORT_CAP_DISABLED 0x0
+#define PORT_CAP_HOST 0x1
+#define PORT_CAP_DEVICE 0x2
+#define PORT_CAP_OTG 0x3
+
+#define XUSB_PADCTL_ELPG_PROGRAM 0x20
+#define USB2_PORT_WAKE_INTERRUPT_ENABLE(x) BIT(x)
+#define USB2_PORT_WAKEUP_EVENT(x) BIT((x) + 7)
+#define SS_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x) + 14)
+#define SS_PORT_WAKEUP_EVENT(x) BIT((x) + 21)
+#define USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x) + 28)
+#define USB2_HSIC_PORT_WAKEUP_EVENT(x) BIT((x) + 30)
+#define ALL_WAKE_EVENTS \
+ (USB2_PORT_WAKEUP_EVENT(0) | USB2_PORT_WAKEUP_EVENT(1) | \
+ USB2_PORT_WAKEUP_EVENT(2) | SS_PORT_WAKEUP_EVENT(0) | \
+ SS_PORT_WAKEUP_EVENT(1) | SS_PORT_WAKEUP_EVENT(2) | \
+ USB2_HSIC_PORT_WAKEUP_EVENT(0))
+
+#define XUSB_PADCTL_ELPG_PROGRAM_1 0x24
+#define SSPX_ELPG_CLAMP_EN(x) BIT(0 + (x) * 3)
+#define SSPX_ELPG_CLAMP_EN_EARLY(x) BIT(1 + (x) * 3)
+#define SSPX_ELPG_VCORE_DOWN(x) BIT(2 + (x) * 3)
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x88 + (x) * 0x40)
+#define HS_CURR_LEVEL(x) ((x) & 0x3f)
+#define TERM_SEL BIT(25)
+#define USB2_OTG_PD BIT(26)
+#define USB2_OTG_PD2 BIT(27)
+#define USB2_OTG_PD2_OVRD_EN BIT(28)
+#define USB2_OTG_PD_ZI BIT(29)
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL1(x) (0x8c + (x) * 0x40)
+#define USB2_OTG_PD_DR BIT(2)
+#define TERM_RANGE_ADJ(x) (((x) & 0xf) << 3)
+#define RPD_CTRL(x) (((x) & 0x1f) << 26)
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x284
+#define BIAS_PAD_PD BIT(11)
+#define HS_SQUELCH_LEVEL(x) (((x) & 0x7) << 0)
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1 0x288
+#define USB2_TRK_START_TIMER(x) (((x) & 0x7f) << 12)
+#define USB2_TRK_DONE_RESET_TIMER(x) (((x) & 0x7f) << 19)
+#define USB2_PD_TRK BIT(26)
+
+#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x300 + (x) * 0x20)
+#define HSIC_PD_TX_DATA0 BIT(1)
+#define HSIC_PD_TX_STROBE BIT(3)
+#define HSIC_PD_RX_DATA0 BIT(4)
+#define HSIC_PD_RX_STROBE BIT(6)
+#define HSIC_PD_ZI_DATA0 BIT(7)
+#define HSIC_PD_ZI_STROBE BIT(9)
+#define HSIC_RPD_DATA0 BIT(13)
+#define HSIC_RPD_STROBE BIT(15)
+#define HSIC_RPU_DATA0 BIT(16)
+#define HSIC_RPU_STROBE BIT(18)
+
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL0 0x340
+#define HSIC_TRK_START_TIMER(x) (((x) & 0x7f) << 5)
+#define HSIC_TRK_DONE_RESET_TIMER(x) (((x) & 0x7f) << 12)
+#define HSIC_PD_TRK BIT(19)
+
+#define USB2_VBUS_ID 0x360
+#define VBUS_OVERRIDE BIT(14)
+#define ID_OVERRIDE(x) (((x) & 0xf) << 18)
+#define ID_OVERRIDE_FLOATING ID_OVERRIDE(8)
+#define ID_OVERRIDE_GROUNDED ID_OVERRIDE(0)
+
+#define TEGRA186_LANE(_name, _offset, _shift, _mask, _type) \
+ { \
+ .name = _name, \
+ .offset = _offset, \
+ .shift = _shift, \
+ .mask = _mask, \
+ .num_funcs = ARRAY_SIZE(tegra186_##_type##_functions), \
+ .funcs = tegra186_##_type##_functions, \
+ }
+
+struct tegra_xusb_fuse_calibration {
+ u32 *hs_curr_level;
+ u32 hs_squelch;
+ u32 hs_term_range_adj;
+ u32 rpd_ctrl;
+};
+
+struct tegra186_xusb_padctl {
+ struct tegra_xusb_padctl base;
+
+ struct tegra_xusb_fuse_calibration calib;
+
+ /* UTMI bias and tracking */
+ struct clk *usb2_trk_clk;
+ unsigned int bias_pad_enable;
+};
+
+static inline struct tegra186_xusb_padctl *
+to_tegra186_xusb_padctl(struct tegra_xusb_padctl *padctl)
+{
+ return container_of(padctl, struct tegra186_xusb_padctl, base);
+}
+
+/* USB 2.0 UTMI PHY support */
+static struct tegra_xusb_lane *
+tegra186_usb2_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_usb2_lane *usb2;
+ int err;
+
+ usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+ if (!usb2)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&usb2->base.list);
+ usb2->base.soc = &pad->soc->lanes[index];
+ usb2->base.index = index;
+ usb2->base.pad = pad;
+ usb2->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&usb2->base, np);
+ if (err < 0) {
+ kfree(usb2);
+ return ERR_PTR(err);
+ }
+
+ return &usb2->base;
+}
+
+static void tegra186_usb2_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+
+ kfree(usb2);
+}
+
+static const struct tegra_xusb_lane_ops tegra186_usb2_lane_ops = {
+ .probe = tegra186_usb2_lane_probe,
+ .remove = tegra186_usb2_lane_remove,
+};
+
+static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+ struct device *dev = padctl->dev;
+ u32 value;
+ int err;
+
+ mutex_lock(&padctl->lock);
+
+ if (priv->bias_pad_enable++ > 0) {
+ mutex_unlock(&padctl->lock);
+ return;
+ }
+
+ err = clk_prepare_enable(priv->usb2_trk_clk);
+ if (err < 0)
+ dev_warn(dev, "failed to enable USB2 trk clock: %d\n", err);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ value &= ~USB2_TRK_START_TIMER(~0);
+ value |= USB2_TRK_START_TIMER(0x1e);
+ value &= ~USB2_TRK_DONE_RESET_TIMER(~0);
+ value |= USB2_TRK_DONE_RESET_TIMER(0xa);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+ value &= ~BIAS_PAD_PD;
+ value &= ~HS_SQUELCH_LEVEL(~0);
+ value |= HS_SQUELCH_LEVEL(priv->calib.hs_squelch);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+ udelay(1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ value &= ~USB2_PD_TRK;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+
+ mutex_unlock(&padctl->lock);
+}
+
+static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ if (WARN_ON(priv->bias_pad_enable == 0)) {
+ mutex_unlock(&padctl->lock);
+ return;
+ }
+
+ if (--priv->bias_pad_enable > 0) {
+ mutex_unlock(&padctl->lock);
+ return;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ value |= USB2_PD_TRK;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+
+ clk_disable_unprepare(priv->usb2_trk_clk);
+
+ mutex_unlock(&padctl->lock);
+}
+
+static void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb2_port *port;
+ struct device *dev = padctl->dev;
+ unsigned int index = lane->index;
+ u32 value;
+
+ if (!phy)
+ return;
+
+ port = tegra_xusb_find_usb2_port(padctl, index);
+ if (!port) {
+ dev_err(dev, "no port found for USB2 lane %u\n", index);
+ return;
+ }
+
+ tegra186_utmi_bias_pad_power_on(padctl);
+
+ udelay(2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+ value &= ~USB2_OTG_PD;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+ value &= ~USB2_OTG_PD_DR;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+}
+
+static void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ if (!phy)
+ return;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+ value |= USB2_OTG_PD;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+ value |= USB2_OTG_PD_DR;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+
+ udelay(2);
+
+ tegra186_utmi_bias_pad_power_off(padctl);
+}
+
+static int tegra186_utmi_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+ struct tegra_xusb_usb2_port *port;
+ unsigned int index = lane->index;
+ struct device *dev = padctl->dev;
+ u32 value;
+
+ port = tegra_xusb_find_usb2_port(padctl, index);
+ if (!port) {
+ dev_err(dev, "no port found for USB2 lane %u\n", index);
+ return -ENODEV;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
+ value &= ~(USB2_PORT_MASK << USB2_PORT_SHIFT(index));
+ value |= (PORT_XUSB << USB2_PORT_SHIFT(index));
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
+ value &= ~(PORT_CAP_MASK << PORTX_CAP_SHIFT(index));
+
+ if (port->mode == USB_DR_MODE_UNKNOWN)
+ value |= (PORT_CAP_DISABLED << PORTX_CAP_SHIFT(index));
+ else if (port->mode == USB_DR_MODE_PERIPHERAL)
+ value |= (PORT_CAP_DEVICE << PORTX_CAP_SHIFT(index));
+ else if (port->mode == USB_DR_MODE_HOST)
+ value |= (PORT_CAP_HOST << PORTX_CAP_SHIFT(index));
+ else if (port->mode == USB_DR_MODE_OTG)
+ value |= (PORT_CAP_OTG << PORTX_CAP_SHIFT(index));
+
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+ value &= ~USB2_OTG_PD_ZI;
+ value |= TERM_SEL;
+ value &= ~HS_CURR_LEVEL(~0);
+
+ if (usb2->hs_curr_level_offset) {
+ int hs_current_level;
+
+ hs_current_level = (int)priv->calib.hs_curr_level[index] +
+ usb2->hs_curr_level_offset;
+
+ if (hs_current_level < 0)
+ hs_current_level = 0;
+ if (hs_current_level > 0x3f)
+ hs_current_level = 0x3f;
+
+ value |= HS_CURR_LEVEL(hs_current_level);
+ } else {
+ value |= HS_CURR_LEVEL(priv->calib.hs_curr_level[index]);
+ }
+
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+ value &= ~TERM_RANGE_ADJ(~0);
+ value |= TERM_RANGE_ADJ(priv->calib.hs_term_range_adj);
+ value &= ~RPD_CTRL(~0);
+ value |= RPD_CTRL(priv->calib.rpd_ctrl);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+
+ /* TODO: pad power saving */
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ return 0;
+}
+
+static int tegra186_utmi_phy_power_off(struct phy *phy)
+{
+ /* TODO: pad power saving */
+ tegra_phy_xusb_utmi_pad_power_down(phy);
+
+ return 0;
+}
+
+static int tegra186_utmi_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb2_port *port;
+ unsigned int index = lane->index;
+ struct device *dev = padctl->dev;
+ int err;
+
+ port = tegra_xusb_find_usb2_port(padctl, index);
+ if (!port) {
+ dev_err(dev, "no port found for USB2 lane %u\n", index);
+ return -ENODEV;
+ }
+
+ if (port->supply && port->mode == USB_DR_MODE_HOST) {
+ err = regulator_enable(port->supply);
+ if (err) {
+ dev_err(dev, "failed to enable port %u VBUS: %d\n",
+ index, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra186_utmi_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb2_port *port;
+ unsigned int index = lane->index;
+ struct device *dev = padctl->dev;
+ int err;
+
+ port = tegra_xusb_find_usb2_port(padctl, index);
+ if (!port) {
+ dev_err(dev, "no port found for USB2 lane %u\n", index);
+ return -ENODEV;
+ }
+
+ if (port->supply && port->mode == USB_DR_MODE_HOST) {
+ err = regulator_disable(port->supply);
+ if (err) {
+ dev_err(dev, "failed to disable port %u VBUS: %d\n",
+ index, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct phy_ops utmi_phy_ops = {
+ .init = tegra186_utmi_phy_init,
+ .exit = tegra186_utmi_phy_exit,
+ .power_on = tegra186_utmi_phy_power_on,
+ .power_off = tegra186_utmi_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra186_usb2_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+ struct tegra_xusb_usb2_pad *usb2;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+ if (!usb2)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &usb2->base;
+ pad->ops = &tegra186_usb2_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(usb2);
+ goto out;
+ }
+
+ priv->usb2_trk_clk = devm_clk_get(&pad->dev, "trk");
+ if (IS_ERR(priv->usb2_trk_clk)) {
+ err = PTR_ERR(priv->usb2_trk_clk);
+ dev_dbg(&pad->dev, "failed to get usb2 trk clock: %d\n", err);
+ goto unregister;
+ }
+
+ err = tegra_xusb_pad_register(pad, &utmi_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra186_usb2_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
+
+ kfree(usb2);
+}
+
+static const struct tegra_xusb_pad_ops tegra186_usb2_pad_ops = {
+ .probe = tegra186_usb2_pad_probe,
+ .remove = tegra186_usb2_pad_remove,
+};
+
+static const char * const tegra186_usb2_functions[] = {
+ "xusb",
+};
+
+static const struct tegra_xusb_lane_soc tegra186_usb2_lanes[] = {
+ TEGRA186_LANE("usb2-0", 0, 0, 0, usb2),
+ TEGRA186_LANE("usb2-1", 0, 0, 0, usb2),
+ TEGRA186_LANE("usb2-2", 0, 0, 0, usb2),
+};
+
+static const struct tegra_xusb_pad_soc tegra186_usb2_pad = {
+ .name = "usb2",
+ .num_lanes = ARRAY_SIZE(tegra186_usb2_lanes),
+ .lanes = tegra186_usb2_lanes,
+ .ops = &tegra186_usb2_pad_ops,
+};
+
+static int tegra186_usb2_port_enable(struct tegra_xusb_port *port)
+{
+ return 0;
+}
+
+static void tegra186_usb2_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra186_usb2_port_map(struct tegra_xusb_port *port)
+{
+ return tegra_xusb_find_lane(port->padctl, "usb2", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra186_usb2_port_ops = {
+ .enable = tegra186_usb2_port_enable,
+ .disable = tegra186_usb2_port_disable,
+ .map = tegra186_usb2_port_map,
+};
+
+/* SuperSpeed PHY support */
+static struct tegra_xusb_lane *
+tegra186_usb3_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_usb3_lane *usb3;
+ int err;
+
+ usb3 = kzalloc(sizeof(*usb3), GFP_KERNEL);
+ if (!usb3)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&usb3->base.list);
+ usb3->base.soc = &pad->soc->lanes[index];
+ usb3->base.index = index;
+ usb3->base.pad = pad;
+ usb3->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&usb3->base, np);
+ if (err < 0) {
+ kfree(usb3);
+ return ERR_PTR(err);
+ }
+
+ return &usb3->base;
+}
+
+static void tegra186_usb3_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_usb3_lane *usb3 = to_usb3_lane(lane);
+
+ kfree(usb3);
+}
+
+static const struct tegra_xusb_lane_ops tegra186_usb3_lane_ops = {
+ .probe = tegra186_usb3_lane_probe,
+ .remove = tegra186_usb3_lane_remove,
+};
+static int tegra186_usb3_port_enable(struct tegra_xusb_port *port)
+{
+ return 0;
+}
+
+static void tegra186_usb3_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra186_usb3_port_map(struct tegra_xusb_port *port)
+{
+ return tegra_xusb_find_lane(port->padctl, "usb3", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra186_usb3_port_ops = {
+ .enable = tegra186_usb3_port_enable,
+ .disable = tegra186_usb3_port_disable,
+ .map = tegra186_usb3_port_map,
+};
+
+static int tegra186_usb3_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb3_port *port;
+ struct tegra_xusb_usb2_port *usb2;
+ unsigned int index = lane->index;
+ struct device *dev = padctl->dev;
+ u32 value;
+
+ port = tegra_xusb_find_usb3_port(padctl, index);
+ if (!port) {
+ dev_err(dev, "no port found for USB3 lane %u\n", index);
+ return -ENODEV;
+ }
+
+ usb2 = tegra_xusb_find_usb2_port(padctl, port->port);
+ if (!usb2) {
+ dev_err(dev, "no companion port found for USB3 lane %u\n",
+ index);
+ return -ENODEV;
+ }
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_CAP);
+ value &= ~(PORT_CAP_MASK << PORTX_CAP_SHIFT(index));
+
+ if (usb2->mode == USB_DR_MODE_UNKNOWN)
+ value |= (PORT_CAP_DISABLED << PORTX_CAP_SHIFT(index));
+ else if (usb2->mode == USB_DR_MODE_PERIPHERAL)
+ value |= (PORT_CAP_DEVICE << PORTX_CAP_SHIFT(index));
+ else if (usb2->mode == USB_DR_MODE_HOST)
+ value |= (PORT_CAP_HOST << PORTX_CAP_SHIFT(index));
+ else if (usb2->mode == USB_DR_MODE_OTG)
+ value |= (PORT_CAP_OTG << PORTX_CAP_SHIFT(index));
+
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_CAP);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value &= ~SSPX_ELPG_VCORE_DOWN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value &= ~SSPX_ELPG_CLAMP_EN_EARLY(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value &= ~SSPX_ELPG_CLAMP_EN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra186_usb3_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb3_port *port;
+ unsigned int index = lane->index;
+ struct device *dev = padctl->dev;
+ u32 value;
+
+ port = tegra_xusb_find_usb3_port(padctl, index);
+ if (!port) {
+ dev_err(dev, "no port found for USB3 lane %u\n", index);
+ return -ENODEV;
+ }
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value |= SSPX_ELPG_CLAMP_EN_EARLY(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value |= SSPX_ELPG_CLAMP_EN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ usleep_range(250, 350);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value |= SSPX_ELPG_VCORE_DOWN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra186_usb3_phy_init(struct phy *phy)
+{
+ return 0;
+}
+
+static int tegra186_usb3_phy_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static const struct phy_ops usb3_phy_ops = {
+ .init = tegra186_usb3_phy_init,
+ .exit = tegra186_usb3_phy_exit,
+ .power_on = tegra186_usb3_phy_power_on,
+ .power_off = tegra186_usb3_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra186_usb3_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_usb3_pad *usb3;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ usb3 = kzalloc(sizeof(*usb3), GFP_KERNEL);
+ if (!usb3)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &usb3->base;
+ pad->ops = &tegra186_usb3_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(usb3);
+ goto out;
+ }
+
+ err = tegra_xusb_pad_register(pad, &usb3_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra186_usb3_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
+
+ kfree(usb2);
+}
+
+static const struct tegra_xusb_pad_ops tegra186_usb3_pad_ops = {
+ .probe = tegra186_usb3_pad_probe,
+ .remove = tegra186_usb3_pad_remove,
+};
+
+static const char * const tegra186_usb3_functions[] = {
+ "xusb",
+};
+
+static const struct tegra_xusb_lane_soc tegra186_usb3_lanes[] = {
+ TEGRA186_LANE("usb3-0", 0, 0, 0, usb3),
+ TEGRA186_LANE("usb3-1", 0, 0, 0, usb3),
+ TEGRA186_LANE("usb3-2", 0, 0, 0, usb3),
+};
+
+static const struct tegra_xusb_pad_soc tegra186_usb3_pad = {
+ .name = "usb3",
+ .num_lanes = ARRAY_SIZE(tegra186_usb3_lanes),
+ .lanes = tegra186_usb3_lanes,
+ .ops = &tegra186_usb3_pad_ops,
+};
+
+static const struct tegra_xusb_pad_soc * const tegra186_pads[] = {
+ &tegra186_usb2_pad,
+ &tegra186_usb3_pad,
+#if 0 /* TODO implement */
+ &tegra186_hsic_pad,
+#endif
+};
+
+static int
+tegra186_xusb_read_fuse_calibration(struct tegra186_xusb_padctl *padctl)
+{
+ struct device *dev = padctl->base.dev;
+ unsigned int i, count;
+ u32 value, *level;
+ int err;
+
+ count = padctl->base.soc->ports.usb2.count;
+
+ level = devm_kcalloc(dev, count, sizeof(u32), GFP_KERNEL);
+ if (!level)
+ return -ENOMEM;
+
+ err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
+ if (err) {
+ dev_err(dev, "failed to read calibration fuse: %d\n", err);
+ return err;
+ }
+
+ dev_dbg(dev, "FUSE_USB_CALIB_0 %#x\n", value);
+
+ for (i = 0; i < count; i++)
+ level[i] = (value >> HS_CURR_LEVEL_PADX_SHIFT(i)) &
+ HS_CURR_LEVEL_PAD_MASK;
+
+ padctl->calib.hs_curr_level = level;
+
+ padctl->calib.hs_squelch = (value >> HS_SQUELCH_SHIFT) &
+ HS_SQUELCH_MASK;
+ padctl->calib.hs_term_range_adj = (value >> HS_TERM_RANGE_ADJ_SHIFT) &
+ HS_TERM_RANGE_ADJ_MASK;
+
+ err = tegra_fuse_readl(TEGRA_FUSE_USB_CALIB_EXT_0, &value);
+ if (err) {
+ dev_err(dev, "failed to read calibration fuse: %d\n", err);
+ return err;
+ }
+
+ dev_dbg(dev, "FUSE_USB_CALIB_EXT_0 %#x\n", value);
+
+ padctl->calib.rpd_ctrl = (value >> RPD_CTRL_SHIFT) & RPD_CTRL_MASK;
+
+ return 0;
+}
+
+static struct tegra_xusb_padctl *
+tegra186_xusb_padctl_probe(struct device *dev,
+ const struct tegra_xusb_padctl_soc *soc)
+{
+ struct tegra186_xusb_padctl *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->base.dev = dev;
+ priv->base.soc = soc;
+
+ err = tegra186_xusb_read_fuse_calibration(priv);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return &priv->base;
+}
+
+static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
+{
+}
+
+static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
+ .probe = tegra186_xusb_padctl_probe,
+ .remove = tegra186_xusb_padctl_remove,
+};
+
+static const char * const tegra186_xusb_padctl_supply_names[] = {
+ "avdd-pll-erefeut",
+ "avdd-usb",
+ "vclamp-usb",
+ "vddio-hsic",
+};
+
+const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc = {
+ .num_pads = ARRAY_SIZE(tegra186_pads),
+ .pads = tegra186_pads,
+ .ports = {
+ .usb2 = {
+ .ops = &tegra186_usb2_port_ops,
+ .count = 3,
+ },
+#if 0 /* TODO implement */
+ .hsic = {
+ .ops = &tegra186_hsic_port_ops,
+ .count = 1,
+ },
+#endif
+ .usb3 = {
+ .ops = &tegra186_usb3_port_ops,
+ .count = 3,
+ },
+ },
+ .ops = &tegra186_xusb_padctl_ops,
+ .supply_names = tegra186_xusb_padctl_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra186_xusb_padctl_supply_names),
+};
+EXPORT_SYMBOL_GPL(tegra186_xusb_padctl_soc);
+
+MODULE_AUTHOR("JC Kuo <jckuo@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra186 XUSB Pad Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 5b3b8863363e..0417213ed68b 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -68,6 +68,12 @@ static const struct of_device_id tegra_xusb_padctl_of_match[] = {
.data = &tegra210_xusb_padctl_soc,
},
#endif
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+ {
+ .compatible = "nvidia,tegra186-xusb-padctl",
+ .data = &tegra186_xusb_padctl_soc,
+ },
+#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
@@ -313,6 +319,10 @@ static void tegra_xusb_lane_program(struct tegra_xusb_lane *lane)
const struct tegra_xusb_lane_soc *soc = lane->soc;
u32 value;
+ /* skip single function lanes */
+ if (soc->num_funcs < 2)
+ return;
+
/* choose function */
value = padctl_readl(padctl, soc->offset);
value &= ~(soc->mask << soc->shift);
@@ -542,13 +552,34 @@ static void tegra_xusb_port_unregister(struct tegra_xusb_port *port)
device_unregister(&port->dev);
}
+static const char *const modes[] = {
+ [USB_DR_MODE_UNKNOWN] = "",
+ [USB_DR_MODE_HOST] = "host",
+ [USB_DR_MODE_PERIPHERAL] = "peripheral",
+ [USB_DR_MODE_OTG] = "otg",
+};
+
static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
{
struct tegra_xusb_port *port = &usb2->base;
struct device_node *np = port->dev.of_node;
+ const char *mode;
usb2->internal = of_property_read_bool(np, "nvidia,internal");
+ if (!of_property_read_string(np, "mode", &mode)) {
+ int err = match_string(modes, ARRAY_SIZE(modes), mode);
+ if (err < 0) {
+ dev_err(&port->dev, "invalid value %s for \"mode\"\n",
+ mode);
+ usb2->mode = USB_DR_MODE_UNKNOWN;
+ } else {
+ usb2->mode = err;
+ }
+ } else {
+ usb2->mode = USB_DR_MODE_HOST;
+ }
+
usb2->supply = devm_regulator_get(&port->dev, "vbus");
return PTR_ERR_OR_ZERO(usb2->supply);
}
@@ -839,6 +870,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
struct resource *res;
+ unsigned int i;
int err;
/* for backwards compatibility with old device trees */
@@ -876,14 +908,38 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
goto remove;
}
+ padctl->supplies = devm_kcalloc(&pdev->dev, padctl->soc->num_supplies,
+ sizeof(*padctl->supplies), GFP_KERNEL);
+ if (!padctl->supplies) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < padctl->soc->num_supplies; i++)
+ padctl->supplies[i].supply = padctl->soc->supply_names[i];
+
+ err = devm_regulator_bulk_get(&pdev->dev, padctl->soc->num_supplies,
+ padctl->supplies);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
+ goto remove;
+ }
+
err = reset_control_deassert(padctl->rst);
if (err < 0)
goto remove;
+ err = regulator_bulk_enable(padctl->soc->num_supplies,
+ padctl->supplies);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable supplies: %d\n", err);
+ goto reset;
+ }
+
err = tegra_xusb_setup_pads(padctl);
if (err < 0) {
dev_err(&pdev->dev, "failed to setup pads: %d\n", err);
- goto reset;
+ goto power_down;
}
err = tegra_xusb_setup_ports(padctl);
@@ -896,6 +952,8 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
remove_pads:
tegra_xusb_remove_pads(padctl);
+power_down:
+ regulator_bulk_disable(padctl->soc->num_supplies, padctl->supplies);
reset:
reset_control_assert(padctl->rst);
remove:
@@ -911,6 +969,11 @@ static int tegra_xusb_padctl_remove(struct platform_device *pdev)
tegra_xusb_remove_ports(padctl);
tegra_xusb_remove_pads(padctl);
+ err = regulator_bulk_disable(padctl->soc->num_supplies,
+ padctl->supplies);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to disable supplies: %d\n", err);
+
err = reset_control_assert(padctl->rst);
if (err < 0)
dev_err(&pdev->dev, "failed to assert reset: %d\n", err);
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index b49dbc36efa3..e0028b9fe702 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -19,6 +19,8 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/usb/otg.h>
+
/* legacy entry points for backwards-compatibility */
int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev);
int tegra_xusb_padctl_legacy_remove(struct platform_device *pdev);
@@ -54,10 +56,21 @@ struct tegra_xusb_lane {
int tegra_xusb_lane_parse_dt(struct tegra_xusb_lane *lane,
struct device_node *np);
+struct tegra_xusb_usb3_lane {
+ struct tegra_xusb_lane base;
+};
+
+static inline struct tegra_xusb_usb3_lane *
+to_usb3_lane(struct tegra_xusb_lane *lane)
+{
+ return container_of(lane, struct tegra_xusb_usb3_lane, base);
+}
+
struct tegra_xusb_usb2_lane {
struct tegra_xusb_lane base;
u32 hs_curr_level_offset;
+ bool powered_on;
};
static inline struct tegra_xusb_usb2_lane *
@@ -168,6 +181,19 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
const struct phy_ops *ops);
void tegra_xusb_pad_unregister(struct tegra_xusb_pad *pad);
+struct tegra_xusb_usb3_pad {
+ struct tegra_xusb_pad base;
+
+ unsigned int enable;
+ struct mutex lock;
+};
+
+static inline struct tegra_xusb_usb3_pad *
+to_usb3_pad(struct tegra_xusb_pad *pad)
+{
+ return container_of(pad, struct tegra_xusb_usb3_pad, base);
+}
+
struct tegra_xusb_usb2_pad {
struct tegra_xusb_pad base;
@@ -271,6 +297,7 @@ struct tegra_xusb_usb2_port {
struct tegra_xusb_port base;
struct regulator *supply;
+ enum usb_dr_mode mode;
bool internal;
};
@@ -367,6 +394,9 @@ struct tegra_xusb_padctl_soc {
} ports;
const struct tegra_xusb_padctl_ops *ops;
+
+ const char * const *supply_names;
+ unsigned int num_supplies;
};
struct tegra_xusb_padctl {
@@ -390,6 +420,8 @@ struct tegra_xusb_padctl {
unsigned int enable;
struct clk *clk;
+
+ struct regulator_bulk_data *supplies;
};
static inline void padctl_writel(struct tegra_xusb_padctl *padctl, u32 value,
@@ -417,5 +449,8 @@ extern const struct tegra_xusb_padctl_soc tegra124_xusb_padctl_soc;
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
extern const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc;
#endif
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+extern const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc;
+#endif
#endif /* __PHY_TEGRA_XUSB_H */
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index 103efc456a12..781514efded3 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -20,6 +20,18 @@ config PHY_DM816X_USB
help
Enable this for dm816x USB to work.
+config PHY_AM654_SERDES
+ tristate "TI AM654 SERDES support"
+ depends on OF && ARCH_K3 || COMPILE_TEST
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ select MULTIPLEXER
+ select REGMAP_MMIO
+ select MUX_MMIO
+ help
+ This option enables support for TI AM654 SerDes PHY used for
+ PCIe.
+
config OMAP_CONTROL_PHY
tristate "OMAP CONTROL PHY Driver"
depends on ARCH_OMAP2PLUS || COMPILE_TEST
@@ -37,7 +49,7 @@ config OMAP_USB2
depends on USB_SUPPORT
select GENERIC_PHY
select USB_PHY
- select OMAP_CONTROL_PHY if ARCH_OMAP2PLUS
+ select OMAP_CONTROL_PHY if ARCH_OMAP2PLUS || COMPILE_TEST
help
Enable this to support the transceiver that is part of SOC. This
driver takes care of all the PHY functionality apart from comparator.
diff --git a/drivers/phy/ti/Makefile b/drivers/phy/ti/Makefile
index bea8f25a137a..bff901eb0ecc 100644
--- a/drivers/phy/ti/Makefile
+++ b/drivers/phy/ti/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o
obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o
obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
+obj-$(CONFIG_PHY_AM654_SERDES) += phy-am654-serdes.o
obj-$(CONFIG_PHY_TI_GMII_SEL) += phy-gmii-sel.o
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
new file mode 100644
index 000000000000..d3769200cb9b
--- /dev/null
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * PCIe SERDES driver for AM654x SoC
+ *
+ * Copyright (C) 2018 - 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mux/consumer.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define CMU_R07C 0x7c
+
+#define COMLANE_R138 0xb38
+#define VERSION 0x70
+
+#define COMLANE_R190 0xb90
+
+#define COMLANE_R194 0xb94
+
+#define SERDES_CTRL 0x1fd0
+
+#define WIZ_LANEXCTL_STS 0x1fe0
+#define TX0_DISABLE_STATE 0x4
+#define TX0_SLEEP_STATE 0x5
+#define TX0_SNOOZE_STATE 0x6
+#define TX0_ENABLE_STATE 0x7
+
+#define RX0_DISABLE_STATE 0x4
+#define RX0_SLEEP_STATE 0x5
+#define RX0_SNOOZE_STATE 0x6
+#define RX0_ENABLE_STATE 0x7
+
+#define WIZ_PLL_CTRL 0x1ff4
+#define PLL_DISABLE_STATE 0x4
+#define PLL_SLEEP_STATE 0x5
+#define PLL_SNOOZE_STATE 0x6
+#define PLL_ENABLE_STATE 0x7
+
+#define PLL_LOCK_TIME 100000 /* in microseconds */
+#define SLEEP_TIME 100 /* in microseconds */
+
+#define LANE_USB3 0x0
+#define LANE_PCIE0_LANE0 0x1
+
+#define LANE_PCIE1_LANE0 0x0
+#define LANE_PCIE0_LANE1 0x1
+
+#define SERDES_NUM_CLOCKS 3
+
+#define AM654_SERDES_CTRL_CLKSEL_MASK GENMASK(7, 4)
+#define AM654_SERDES_CTRL_CLKSEL_SHIFT 4
+
+struct serdes_am654_clk_mux {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int reg;
+ int clk_id;
+ struct clk_init_data clk_data;
+};
+
+#define to_serdes_am654_clk_mux(_hw) \
+ container_of(_hw, struct serdes_am654_clk_mux, hw)
+
+static struct regmap_config serdes_am654_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static const struct reg_field cmu_master_cdn_o = REG_FIELD(CMU_R07C, 24, 24);
+static const struct reg_field config_version = REG_FIELD(COMLANE_R138, 16, 23);
+static const struct reg_field l1_master_cdn_o = REG_FIELD(COMLANE_R190, 9, 9);
+static const struct reg_field cmu_ok_i_0 = REG_FIELD(COMLANE_R194, 19, 19);
+static const struct reg_field por_en = REG_FIELD(SERDES_CTRL, 29, 29);
+static const struct reg_field tx0_enable = REG_FIELD(WIZ_LANEXCTL_STS, 29, 31);
+static const struct reg_field rx0_enable = REG_FIELD(WIZ_LANEXCTL_STS, 13, 15);
+static const struct reg_field pll_enable = REG_FIELD(WIZ_PLL_CTRL, 29, 31);
+static const struct reg_field pll_ok = REG_FIELD(WIZ_PLL_CTRL, 28, 28);
+
+struct serdes_am654 {
+ struct regmap *regmap;
+ struct regmap_field *cmu_master_cdn_o;
+ struct regmap_field *config_version;
+ struct regmap_field *l1_master_cdn_o;
+ struct regmap_field *cmu_ok_i_0;
+ struct regmap_field *por_en;
+ struct regmap_field *tx0_enable;
+ struct regmap_field *rx0_enable;
+ struct regmap_field *pll_enable;
+ struct regmap_field *pll_ok;
+
+ struct device *dev;
+ struct mux_control *control;
+ bool busy;
+ u32 type;
+ struct device_node *of_node;
+ struct clk_onecell_data clk_data;
+ struct clk *clks[SERDES_NUM_CLOCKS];
+};
+
+static int serdes_am654_enable_pll(struct serdes_am654 *phy)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_field_write(phy->pll_enable, PLL_ENABLE_STATE);
+ if (ret)
+ return ret;
+
+ return regmap_field_read_poll_timeout(phy->pll_ok, val, val, 1000,
+ PLL_LOCK_TIME);
+}
+
+static void serdes_am654_disable_pll(struct serdes_am654 *phy)
+{
+ struct device *dev = phy->dev;
+ int ret;
+
+ ret = regmap_field_write(phy->pll_enable, PLL_DISABLE_STATE);
+ if (ret)
+ dev_err(dev, "Failed to disable PLL\n");
+}
+
+static int serdes_am654_enable_txrx(struct serdes_am654 *phy)
+{
+ int ret;
+
+ /* Enable TX */
+ ret = regmap_field_write(phy->tx0_enable, TX0_ENABLE_STATE);
+ if (ret)
+ return ret;
+
+ /* Enable RX */
+ ret = regmap_field_write(phy->rx0_enable, RX0_ENABLE_STATE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int serdes_am654_disable_txrx(struct serdes_am654 *phy)
+{
+ int ret;
+
+ /* Disable TX */
+ ret = regmap_field_write(phy->tx0_enable, TX0_DISABLE_STATE);
+ if (ret)
+ return ret;
+
+ /* Disable RX */
+ ret = regmap_field_write(phy->rx0_enable, RX0_DISABLE_STATE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int serdes_am654_power_on(struct phy *x)
+{
+ struct serdes_am654 *phy = phy_get_drvdata(x);
+ struct device *dev = phy->dev;
+ int ret;
+ u32 val;
+
+ ret = serdes_am654_enable_pll(phy);
+ if (ret) {
+ dev_err(dev, "Failed to enable PLL\n");
+ return ret;
+ }
+
+ ret = serdes_am654_enable_txrx(phy);
+ if (ret) {
+ dev_err(dev, "Failed to enable TX RX\n");
+ return ret;
+ }
+
+ return regmap_field_read_poll_timeout(phy->cmu_ok_i_0, val, val,
+ SLEEP_TIME, PLL_LOCK_TIME);
+}
+
+static int serdes_am654_power_off(struct phy *x)
+{
+ struct serdes_am654 *phy = phy_get_drvdata(x);
+
+ serdes_am654_disable_txrx(phy);
+ serdes_am654_disable_pll(phy);
+
+ return 0;
+}
+
+static int serdes_am654_init(struct phy *x)
+{
+ struct serdes_am654 *phy = phy_get_drvdata(x);
+ int ret;
+
+ ret = regmap_field_write(phy->config_version, VERSION);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(phy->cmu_master_cdn_o, 0x1);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(phy->l1_master_cdn_o, 0x1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int serdes_am654_reset(struct phy *x)
+{
+ struct serdes_am654 *phy = phy_get_drvdata(x);
+ int ret;
+
+ ret = regmap_field_write(phy->por_en, 0x1);
+ if (ret)
+ return ret;
+
+ mdelay(1);
+
+ ret = regmap_field_write(phy->por_en, 0x0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void serdes_am654_release(struct phy *x)
+{
+ struct serdes_am654 *phy = phy_get_drvdata(x);
+
+ phy->type = PHY_NONE;
+ phy->busy = false;
+ mux_control_deselect(phy->control);
+}
+
+struct phy *serdes_am654_xlate(struct device *dev, struct of_phandle_args
+ *args)
+{
+ struct serdes_am654 *am654_phy;
+ struct phy *phy;
+ int ret;
+
+ phy = of_phy_simple_xlate(dev, args);
+ if (IS_ERR(phy))
+ return phy;
+
+ am654_phy = phy_get_drvdata(phy);
+ if (am654_phy->busy)
+ return ERR_PTR(-EBUSY);
+
+ ret = mux_control_select(am654_phy->control, args->args[1]);
+ if (ret) {
+ dev_err(dev, "Failed to select SERDES Lane Function\n");
+ return ERR_PTR(ret);
+ }
+
+ am654_phy->busy = true;
+ am654_phy->type = args->args[0];
+
+ return phy;
+}
+
+static const struct phy_ops ops = {
+ .reset = serdes_am654_reset,
+ .init = serdes_am654_init,
+ .power_on = serdes_am654_power_on,
+ .power_off = serdes_am654_power_off,
+ .release = serdes_am654_release,
+ .owner = THIS_MODULE,
+};
+
+#define SERDES_NUM_MUX_COMBINATIONS 16
+
+#define LICLK 0
+#define EXT_REFCLK 1
+#define RICLK 2
+
+static const int
+serdes_am654_mux_table[SERDES_NUM_MUX_COMBINATIONS][SERDES_NUM_CLOCKS] = {
+ /*
+ * Each combination maps to one of
+ * "Figure 12-1986. SerDes Reference Clock Distribution"
+ * in TRM.
+ */
+ /* Parent of CMU refclk, Left output, Right output
+ * either of EXT_REFCLK, LICLK, RICLK
+ */
+ { EXT_REFCLK, EXT_REFCLK, EXT_REFCLK }, /* 0000 */
+ { RICLK, EXT_REFCLK, EXT_REFCLK }, /* 0001 */
+ { EXT_REFCLK, RICLK, LICLK }, /* 0010 */
+ { RICLK, RICLK, EXT_REFCLK }, /* 0011 */
+ { LICLK, EXT_REFCLK, EXT_REFCLK }, /* 0100 */
+ { EXT_REFCLK, EXT_REFCLK, EXT_REFCLK }, /* 0101 */
+ { LICLK, RICLK, LICLK }, /* 0110 */
+ { EXT_REFCLK, RICLK, LICLK }, /* 0111 */
+ { EXT_REFCLK, EXT_REFCLK, LICLK }, /* 1000 */
+ { RICLK, EXT_REFCLK, LICLK }, /* 1001 */
+ { EXT_REFCLK, RICLK, EXT_REFCLK }, /* 1010 */
+ { RICLK, RICLK, EXT_REFCLK }, /* 1011 */
+ { LICLK, EXT_REFCLK, LICLK }, /* 1100 */
+ { EXT_REFCLK, EXT_REFCLK, LICLK }, /* 1101 */
+ { LICLK, RICLK, EXT_REFCLK }, /* 1110 */
+ { EXT_REFCLK, RICLK, EXT_REFCLK }, /* 1111 */
+};
+
+static u8 serdes_am654_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct serdes_am654_clk_mux *mux = to_serdes_am654_clk_mux(hw);
+ struct regmap *regmap = mux->regmap;
+ unsigned int reg = mux->reg;
+ unsigned int val;
+
+ regmap_read(regmap, reg, &val);
+ val &= AM654_SERDES_CTRL_CLKSEL_MASK;
+ val >>= AM654_SERDES_CTRL_CLKSEL_SHIFT;
+
+ return serdes_am654_mux_table[val][mux->clk_id];
+}
+
+static int serdes_am654_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct serdes_am654_clk_mux *mux = to_serdes_am654_clk_mux(hw);
+ struct regmap *regmap = mux->regmap;
+ unsigned int reg = mux->reg;
+ int clk_id = mux->clk_id;
+ int parents[SERDES_NUM_CLOCKS];
+ const int *p;
+ u32 val;
+ int found, i;
+ int ret;
+
+ /* get existing setting */
+ regmap_read(regmap, reg, &val);
+ val &= AM654_SERDES_CTRL_CLKSEL_MASK;
+ val >>= AM654_SERDES_CTRL_CLKSEL_SHIFT;
+
+ for (i = 0; i < SERDES_NUM_CLOCKS; i++)
+ parents[i] = serdes_am654_mux_table[val][i];
+
+ /* change parent of this clock. others left intact */
+ parents[clk_id] = index;
+
+ /* Find the match */
+ for (val = 0; val < SERDES_NUM_MUX_COMBINATIONS; val++) {
+ p = serdes_am654_mux_table[val];
+ found = 1;
+ for (i = 0; i < SERDES_NUM_CLOCKS; i++) {
+ if (parents[i] != p[i]) {
+ found = 0;
+ break;
+ }
+ }
+
+ if (found)
+ break;
+ }
+
+ if (!found) {
+ /*
+ * This can never happen, unless we missed
+ * a valid combination in serdes_am654_mux_table.
+ */
+ WARN(1, "Failed to find the parent of %s clock\n",
+ hw->init->name);
+ return -EINVAL;
+ }
+
+ val <<= AM654_SERDES_CTRL_CLKSEL_SHIFT;
+ ret = regmap_update_bits(regmap, reg, AM654_SERDES_CTRL_CLKSEL_MASK,
+ val);
+
+ return ret;
+}
+
+static const struct clk_ops serdes_am654_clk_mux_ops = {
+ .set_parent = serdes_am654_clk_mux_set_parent,
+ .get_parent = serdes_am654_clk_mux_get_parent,
+};
+
+static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
+ const char *clock_name, int clock_num)
+{
+ struct device_node *node = am654_phy->of_node;
+ struct device *dev = am654_phy->dev;
+ struct serdes_am654_clk_mux *mux;
+ struct device_node *regmap_node;
+ const char **parent_names;
+ struct clk_init_data *init;
+ unsigned int num_parents;
+ struct regmap *regmap;
+ const __be32 *addr;
+ unsigned int reg;
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ init = &mux->clk_data;
+
+ regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0);
+ of_node_put(regmap_node);
+ if (!regmap_node) {
+ dev_err(dev, "Fail to get serdes-clk node\n");
+ return -ENODEV;
+ }
+
+ regmap = syscon_node_to_regmap(regmap_node->parent);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Fail to get Syscon regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents < 2) {
+ dev_err(dev, "SERDES clock must have parents\n");
+ return -EINVAL;
+ }
+
+ parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
+ GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ of_clk_parent_fill(node, parent_names, num_parents);
+
+ addr = of_get_address(regmap_node, 0, NULL, NULL);
+ if (!addr)
+ return -EINVAL;
+
+ reg = be32_to_cpu(*addr);
+
+ init->ops = &serdes_am654_clk_mux_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clock_name;
+
+ mux->regmap = regmap;
+ mux->reg = reg;
+ mux->clk_id = clock_num;
+ mux->hw.init = init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ am654_phy->clks[clock_num] = clk;
+
+ return 0;
+}
+
+static const struct of_device_id serdes_am654_id_table[] = {
+ {
+ .compatible = "ti,phy-am654-serdes",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, serdes_am654_id_table);
+
+static int serdes_am654_regfield_init(struct serdes_am654 *am654_phy)
+{
+ struct regmap *regmap = am654_phy->regmap;
+ struct device *dev = am654_phy->dev;
+
+ am654_phy->cmu_master_cdn_o = devm_regmap_field_alloc(dev, regmap,
+ cmu_master_cdn_o);
+ if (IS_ERR(am654_phy->cmu_master_cdn_o)) {
+ dev_err(dev, "CMU_MASTER_CDN_O reg field init failed\n");
+ return PTR_ERR(am654_phy->cmu_master_cdn_o);
+ }
+
+ am654_phy->config_version = devm_regmap_field_alloc(dev, regmap,
+ config_version);
+ if (IS_ERR(am654_phy->config_version)) {
+ dev_err(dev, "CONFIG_VERSION reg field init failed\n");
+ return PTR_ERR(am654_phy->config_version);
+ }
+
+ am654_phy->l1_master_cdn_o = devm_regmap_field_alloc(dev, regmap,
+ l1_master_cdn_o);
+ if (IS_ERR(am654_phy->l1_master_cdn_o)) {
+ dev_err(dev, "L1_MASTER_CDN_O reg field init failed\n");
+ return PTR_ERR(am654_phy->l1_master_cdn_o);
+ }
+
+ am654_phy->cmu_ok_i_0 = devm_regmap_field_alloc(dev, regmap,
+ cmu_ok_i_0);
+ if (IS_ERR(am654_phy->cmu_ok_i_0)) {
+ dev_err(dev, "CMU_OK_I_0 reg field init failed\n");
+ return PTR_ERR(am654_phy->cmu_ok_i_0);
+ }
+
+ am654_phy->por_en = devm_regmap_field_alloc(dev, regmap, por_en);
+ if (IS_ERR(am654_phy->por_en)) {
+ dev_err(dev, "POR_EN reg field init failed\n");
+ return PTR_ERR(am654_phy->por_en);
+ }
+
+ am654_phy->tx0_enable = devm_regmap_field_alloc(dev, regmap,
+ tx0_enable);
+ if (IS_ERR(am654_phy->tx0_enable)) {
+ dev_err(dev, "TX0_ENABLE reg field init failed\n");
+ return PTR_ERR(am654_phy->tx0_enable);
+ }
+
+ am654_phy->rx0_enable = devm_regmap_field_alloc(dev, regmap,
+ rx0_enable);
+ if (IS_ERR(am654_phy->rx0_enable)) {
+ dev_err(dev, "RX0_ENABLE reg field init failed\n");
+ return PTR_ERR(am654_phy->rx0_enable);
+ }
+
+ am654_phy->pll_enable = devm_regmap_field_alloc(dev, regmap,
+ pll_enable);
+ if (IS_ERR(am654_phy->pll_enable)) {
+ dev_err(dev, "PLL_ENABLE reg field init failed\n");
+ return PTR_ERR(am654_phy->pll_enable);
+ }
+
+ am654_phy->pll_ok = devm_regmap_field_alloc(dev, regmap, pll_ok);
+ if (IS_ERR(am654_phy->pll_ok)) {
+ dev_err(dev, "PLL_OK reg field init failed\n");
+ return PTR_ERR(am654_phy->pll_ok);
+ }
+
+ return 0;
+}
+
+static int serdes_am654_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct clk_onecell_data *clk_data;
+ struct serdes_am654 *am654_phy;
+ struct mux_control *control;
+ const char *clock_name;
+ struct regmap *regmap;
+ void __iomem *base;
+ struct phy *phy;
+ int ret;
+ int i;
+
+ am654_phy = devm_kzalloc(dev, sizeof(*am654_phy), GFP_KERNEL);
+ if (!am654_phy)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &serdes_am654_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ control = devm_mux_control_get(dev, NULL);
+ if (IS_ERR(control))
+ return PTR_ERR(control);
+
+ am654_phy->dev = dev;
+ am654_phy->of_node = node;
+ am654_phy->regmap = regmap;
+ am654_phy->control = control;
+ am654_phy->type = PHY_NONE;
+
+ ret = serdes_am654_regfield_init(am654_phy);
+ if (ret) {
+ dev_err(dev, "Failed to initialize regfields\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, am654_phy);
+
+ for (i = 0; i < SERDES_NUM_CLOCKS; i++) {
+ ret = of_property_read_string_index(node, "clock-output-names",
+ i, &clock_name);
+ if (ret) {
+ dev_err(dev, "Failed to get clock name\n");
+ return ret;
+ }
+
+ ret = serdes_am654_clk_register(am654_phy, clock_name, i);
+ if (ret) {
+ dev_err(dev, "Failed to initialize clock %s\n",
+ clock_name);
+ return ret;
+ }
+ }
+
+ clk_data = &am654_phy->clk_data;
+ clk_data->clks = am654_phy->clks;
+ clk_data->clk_num = SERDES_NUM_CLOCKS;
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+
+ phy = devm_phy_create(dev, NULL, &ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, am654_phy);
+ phy_provider = devm_of_phy_provider_register(dev, serdes_am654_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto clk_err;
+ }
+
+ return 0;
+
+clk_err:
+ of_clk_del_provider(node);
+
+ return ret;
+}
+
+static int serdes_am654_remove(struct platform_device *pdev)
+{
+ struct serdes_am654 *am654_phy = platform_get_drvdata(pdev);
+ struct device_node *node = am654_phy->of_node;
+
+ pm_runtime_disable(&pdev->dev);
+ of_clk_del_provider(node);
+
+ return 0;
+}
+
+static struct platform_driver serdes_am654_driver = {
+ .probe = serdes_am654_probe,
+ .remove = serdes_am654_remove,
+ .driver = {
+ .name = "phy-am654",
+ .of_match_table = serdes_am654_id_table,
+ },
+};
+module_platform_driver(serdes_am654_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("TI AM654x SERDES driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index 68ce4a082b9b..739aaa0eb0ef 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -56,51 +56,73 @@
#define SATA_PLL_SOFT_RESET BIT(18)
-#define PIPE3_PHY_PWRCTL_CLK_CMD_MASK 0x003FC000
+#define PIPE3_PHY_PWRCTL_CLK_CMD_MASK GENMASK(21, 14)
#define PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT 14
-#define PIPE3_PHY_PWRCTL_CLK_FREQ_MASK 0xFFC00000
+#define PIPE3_PHY_PWRCTL_CLK_FREQ_MASK GENMASK(31, 22)
#define PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT 22
-#define PIPE3_PHY_TX_RX_POWERON 0x3
-#define PIPE3_PHY_TX_RX_POWEROFF 0x0
+#define PIPE3_PHY_RX_POWERON (0x1 << PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT)
+#define PIPE3_PHY_TX_POWERON (0x2 << PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT)
#define PCIE_PCS_MASK 0xFF0000
#define PCIE_PCS_DELAY_COUNT_SHIFT 0x10
-#define PCIEPHYRX_ANA_PROGRAMMABILITY 0x0000000C
+#define PIPE3_PHY_RX_ANA_PROGRAMMABILITY 0x0000000C
#define INTERFACE_MASK GENMASK(31, 27)
#define INTERFACE_SHIFT 27
+#define INTERFACE_MODE_USBSS BIT(4)
+#define INTERFACE_MODE_SATA_1P5 BIT(3)
+#define INTERFACE_MODE_SATA_3P0 BIT(2)
+#define INTERFACE_MODE_PCIE BIT(0)
+
#define LOSD_MASK GENMASK(17, 14)
#define LOSD_SHIFT 14
#define MEM_PLLDIV GENMASK(6, 5)
-#define PCIEPHYRX_TRIM 0x0000001C
-#define MEM_DLL_TRIM_SEL GENMASK(31, 30)
+#define PIPE3_PHY_RX_TRIM 0x0000001C
+#define MEM_DLL_TRIM_SEL_MASK GENMASK(31, 30)
#define MEM_DLL_TRIM_SHIFT 30
-#define PCIEPHYRX_DLL 0x00000024
-#define MEM_DLL_PHINT_RATE GENMASK(31, 30)
+#define PIPE3_PHY_RX_DLL 0x00000024
+#define MEM_DLL_PHINT_RATE_MASK GENMASK(31, 30)
+#define MEM_DLL_PHINT_RATE_SHIFT 30
-#define PCIEPHYRX_DIGITAL_MODES 0x00000028
+#define PIPE3_PHY_RX_DIGITAL_MODES 0x00000028
+#define MEM_HS_RATE_MASK GENMASK(28, 27)
+#define MEM_HS_RATE_SHIFT 27
+#define MEM_OVRD_HS_RATE BIT(26)
+#define MEM_OVRD_HS_RATE_SHIFT 26
#define MEM_CDR_FASTLOCK BIT(23)
-#define MEM_CDR_LBW GENMASK(22, 21)
-#define MEM_CDR_STEPCNT GENMASK(20, 19)
+#define MEM_CDR_FASTLOCK_SHIFT 23
+#define MEM_CDR_LBW_MASK GENMASK(22, 21)
+#define MEM_CDR_LBW_SHIFT 21
+#define MEM_CDR_STEPCNT_MASK GENMASK(20, 19)
+#define MEM_CDR_STEPCNT_SHIFT 19
#define MEM_CDR_STL_MASK GENMASK(18, 16)
#define MEM_CDR_STL_SHIFT 16
#define MEM_CDR_THR_MASK GENMASK(15, 13)
#define MEM_CDR_THR_SHIFT 13
#define MEM_CDR_THR_MODE BIT(12)
-#define MEM_CDR_CDR_2NDO_SDM_MODE BIT(11)
-#define MEM_OVRD_HS_RATE BIT(26)
-
-#define PCIEPHYRX_EQUALIZER 0x00000038
-#define MEM_EQLEV GENMASK(31, 16)
-#define MEM_EQFTC GENMASK(15, 11)
-#define MEM_EQCTL GENMASK(10, 7)
+#define MEM_CDR_THR_MODE_SHIFT 12
+#define MEM_CDR_2NDO_SDM_MODE BIT(11)
+#define MEM_CDR_2NDO_SDM_MODE_SHIFT 11
+
+#define PIPE3_PHY_RX_EQUALIZER 0x00000038
+#define MEM_EQLEV_MASK GENMASK(31, 16)
+#define MEM_EQLEV_SHIFT 16
+#define MEM_EQFTC_MASK GENMASK(15, 11)
+#define MEM_EQFTC_SHIFT 11
+#define MEM_EQCTL_MASK GENMASK(10, 7)
#define MEM_EQCTL_SHIFT 7
#define MEM_OVRD_EQLEV BIT(2)
+#define MEM_OVRD_EQLEV_SHIFT 2
#define MEM_OVRD_EQFTC BIT(1)
+#define MEM_OVRD_EQFTC_SHIFT 1
+
+#define SATA_PHY_RX_IO_AND_A2D_OVERRIDES 0x44
+#define MEM_CDR_LOS_SOURCE_MASK GENMASK(10, 9)
+#define MEM_CDR_LOS_SOURCE_SHIFT 9
/*
* This is an Empirical value that works, need to confirm the actual
@@ -110,6 +132,10 @@
#define PLL_IDLE_TIME 100 /* in milliseconds */
#define PLL_LOCK_TIME 100 /* in milliseconds */
+enum pipe3_mode { PIPE3_MODE_PCIE = 1,
+ PIPE3_MODE_SATA,
+ PIPE3_MODE_USBSS };
+
struct pipe3_dpll_params {
u16 m;
u8 n;
@@ -123,6 +149,27 @@ struct pipe3_dpll_map {
struct pipe3_dpll_params params;
};
+struct pipe3_settings {
+ u8 ana_interface;
+ u8 ana_losd;
+ u8 dig_fastlock;
+ u8 dig_lbw;
+ u8 dig_stepcnt;
+ u8 dig_stl;
+ u8 dig_thr;
+ u8 dig_thr_mode;
+ u8 dig_2ndo_sdm_mode;
+ u8 dig_hs_rate;
+ u8 dig_ovrd_hs_rate;
+ u8 dll_trim_sel;
+ u8 dll_phint_rate;
+ u8 eq_lev;
+ u8 eq_ftc;
+ u8 eq_ctl;
+ u8 eq_ovrd_lev;
+ u8 eq_ovrd_ftc;
+};
+
struct ti_pipe3 {
void __iomem *pll_ctrl_base;
void __iomem *phy_rx;
@@ -141,6 +188,8 @@ struct ti_pipe3 {
unsigned int power_reg; /* power reg. index within syscon */
unsigned int pcie_pcs_reg; /* pcs reg. index in syscon */
bool sata_refclk_enabled;
+ enum pipe3_mode mode;
+ struct pipe3_settings settings;
};
static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -163,6 +212,89 @@ static struct pipe3_dpll_map dpll_map_sata[] = {
{ }, /* Terminator */
};
+struct pipe3_data {
+ enum pipe3_mode mode;
+ struct pipe3_dpll_map *dpll_map;
+ struct pipe3_settings settings;
+};
+
+static struct pipe3_data data_usb = {
+ .mode = PIPE3_MODE_USBSS,
+ .dpll_map = dpll_map_usb,
+ .settings = {
+ /* DRA75x TRM Table 26-17 Preferred USB3_PHY_RX SCP Register Settings */
+ .ana_interface = INTERFACE_MODE_USBSS,
+ .ana_losd = 0xa,
+ .dig_fastlock = 1,
+ .dig_lbw = 3,
+ .dig_stepcnt = 0,
+ .dig_stl = 0x3,
+ .dig_thr = 1,
+ .dig_thr_mode = 1,
+ .dig_2ndo_sdm_mode = 0,
+ .dig_hs_rate = 0,
+ .dig_ovrd_hs_rate = 1,
+ .dll_trim_sel = 0x2,
+ .dll_phint_rate = 0x3,
+ .eq_lev = 0,
+ .eq_ftc = 0,
+ .eq_ctl = 0x9,
+ .eq_ovrd_lev = 0,
+ .eq_ovrd_ftc = 0,
+ },
+};
+
+static struct pipe3_data data_sata = {
+ .mode = PIPE3_MODE_SATA,
+ .dpll_map = dpll_map_sata,
+ .settings = {
+ /* DRA75x TRM Table 26-9 Preferred SATA_PHY_RX SCP Register Settings */
+ .ana_interface = INTERFACE_MODE_SATA_3P0,
+ .ana_losd = 0x5,
+ .dig_fastlock = 1,
+ .dig_lbw = 3,
+ .dig_stepcnt = 0,
+ .dig_stl = 0x3,
+ .dig_thr = 1,
+ .dig_thr_mode = 1,
+ .dig_2ndo_sdm_mode = 0,
+ .dig_hs_rate = 0, /* Not in TRM preferred settings */
+ .dig_ovrd_hs_rate = 0, /* Not in TRM preferred settings */
+ .dll_trim_sel = 0x1,
+ .dll_phint_rate = 0x2, /* for 1.5 GHz DPLL clock */
+ .eq_lev = 0,
+ .eq_ftc = 0x1f,
+ .eq_ctl = 0,
+ .eq_ovrd_lev = 1,
+ .eq_ovrd_ftc = 1,
+ },
+};
+
+static struct pipe3_data data_pcie = {
+ .mode = PIPE3_MODE_PCIE,
+ .settings = {
+ /* DRA75x TRM Table 26-62 Preferred PCIe_PHY_RX SCP Register Settings */
+ .ana_interface = INTERFACE_MODE_PCIE,
+ .ana_losd = 0xa,
+ .dig_fastlock = 1,
+ .dig_lbw = 3,
+ .dig_stepcnt = 0,
+ .dig_stl = 0x3,
+ .dig_thr = 1,
+ .dig_thr_mode = 1,
+ .dig_2ndo_sdm_mode = 0,
+ .dig_hs_rate = 0,
+ .dig_ovrd_hs_rate = 0,
+ .dll_trim_sel = 0x2,
+ .dll_phint_rate = 0x3,
+ .eq_lev = 0,
+ .eq_ftc = 0x1f,
+ .eq_ctl = 1,
+ .eq_ovrd_lev = 0,
+ .eq_ovrd_ftc = 0,
+ },
+};
+
static inline u32 ti_pipe3_readl(void __iomem *addr, unsigned offset)
{
return __raw_readl(addr + offset);
@@ -196,7 +328,6 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
static int ti_pipe3_power_off(struct phy *x)
{
- u32 val;
int ret;
struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -205,13 +336,13 @@ static int ti_pipe3_power_off(struct phy *x)
return 0;
}
- val = PIPE3_PHY_TX_RX_POWEROFF << PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT;
-
ret = regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
- PIPE3_PHY_PWRCTL_CLK_CMD_MASK, val);
+ PIPE3_PHY_PWRCTL_CLK_CMD_MASK, 0);
return ret;
}
+static void ti_pipe3_calibrate(struct ti_pipe3 *phy);
+
static int ti_pipe3_power_on(struct phy *x)
{
u32 val;
@@ -219,6 +350,7 @@ static int ti_pipe3_power_on(struct phy *x)
int ret;
unsigned long rate;
struct ti_pipe3 *phy = phy_get_drvdata(x);
+ bool rx_pending = false;
if (!phy->phy_power_syscon) {
omap_control_phy_power(phy->control_dev, 1);
@@ -231,14 +363,35 @@ static int ti_pipe3_power_on(struct phy *x)
return -EINVAL;
}
rate = rate / 1000000;
- mask = OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK |
- OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK;
- val = PIPE3_PHY_TX_RX_POWERON << PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT;
- val |= rate << OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT;
-
+ mask = OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK;
+ val = rate << OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT;
ret = regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
mask, val);
- return ret;
+ /*
+ * For PCIe, TX and RX must be powered on simultaneously.
+ * For USB and SATA, TX must be powered on before RX
+ */
+ mask = OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK;
+ if (phy->mode == PIPE3_MODE_SATA || phy->mode == PIPE3_MODE_USBSS) {
+ val = PIPE3_PHY_TX_POWERON;
+ rx_pending = true;
+ } else {
+ val = PIPE3_PHY_TX_POWERON | PIPE3_PHY_RX_POWERON;
+ }
+
+ regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
+ mask, val);
+
+ if (rx_pending) {
+ val = PIPE3_PHY_TX_POWERON | PIPE3_PHY_RX_POWERON;
+ regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
+ mask, val);
+ }
+
+ if (phy->mode == PIPE3_MODE_PCIE)
+ ti_pipe3_calibrate(phy);
+
+ return 0;
}
static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy)
@@ -300,32 +453,55 @@ static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
static void ti_pipe3_calibrate(struct ti_pipe3 *phy)
{
u32 val;
+ struct pipe3_settings *s = &phy->settings;
- val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY);
+ val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_ANA_PROGRAMMABILITY);
val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV);
- val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
- ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val);
-
- val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES);
- val &= ~(MEM_CDR_STEPCNT | MEM_CDR_STL_MASK | MEM_CDR_THR_MASK |
- MEM_CDR_CDR_2NDO_SDM_MODE | MEM_OVRD_HS_RATE);
- val |= (MEM_CDR_FASTLOCK | MEM_CDR_LBW | 0x3 << MEM_CDR_STL_SHIFT |
- 0x1 << MEM_CDR_THR_SHIFT | MEM_CDR_THR_MODE);
- ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES, val);
-
- val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_TRIM);
- val &= ~MEM_DLL_TRIM_SEL;
- val |= 0x2 << MEM_DLL_TRIM_SHIFT;
- ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_TRIM, val);
-
- val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DLL);
- val |= MEM_DLL_PHINT_RATE;
- ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_DLL, val);
-
- val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_EQUALIZER);
- val &= ~(MEM_EQLEV | MEM_EQCTL | MEM_OVRD_EQLEV | MEM_OVRD_EQFTC);
- val |= MEM_EQFTC | 0x1 << MEM_EQCTL_SHIFT;
- ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_EQUALIZER, val);
+ val |= (s->ana_interface << INTERFACE_SHIFT | s->ana_losd << LOSD_SHIFT);
+ ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_ANA_PROGRAMMABILITY, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_DIGITAL_MODES);
+ val &= ~(MEM_HS_RATE_MASK | MEM_OVRD_HS_RATE | MEM_CDR_FASTLOCK |
+ MEM_CDR_LBW_MASK | MEM_CDR_STEPCNT_MASK | MEM_CDR_STL_MASK |
+ MEM_CDR_THR_MASK | MEM_CDR_THR_MODE | MEM_CDR_2NDO_SDM_MODE);
+ val |= s->dig_hs_rate << MEM_HS_RATE_SHIFT |
+ s->dig_ovrd_hs_rate << MEM_OVRD_HS_RATE_SHIFT |
+ s->dig_fastlock << MEM_CDR_FASTLOCK_SHIFT |
+ s->dig_lbw << MEM_CDR_LBW_SHIFT |
+ s->dig_stepcnt << MEM_CDR_STEPCNT_SHIFT |
+ s->dig_stl << MEM_CDR_STL_SHIFT |
+ s->dig_thr << MEM_CDR_THR_SHIFT |
+ s->dig_thr_mode << MEM_CDR_THR_MODE_SHIFT |
+ s->dig_2ndo_sdm_mode << MEM_CDR_2NDO_SDM_MODE_SHIFT;
+ ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_DIGITAL_MODES, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_TRIM);
+ val &= ~MEM_DLL_TRIM_SEL_MASK;
+ val |= s->dll_trim_sel << MEM_DLL_TRIM_SHIFT;
+ ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_TRIM, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_DLL);
+ val &= ~MEM_DLL_PHINT_RATE_MASK;
+ val |= s->dll_phint_rate << MEM_DLL_PHINT_RATE_SHIFT;
+ ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_DLL, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_EQUALIZER);
+ val &= ~(MEM_EQLEV_MASK | MEM_EQFTC_MASK | MEM_EQCTL_MASK |
+ MEM_OVRD_EQLEV | MEM_OVRD_EQFTC);
+ val |= s->eq_lev << MEM_EQLEV_SHIFT |
+ s->eq_ftc << MEM_EQFTC_SHIFT |
+ s->eq_ctl << MEM_EQCTL_SHIFT |
+ s->eq_ovrd_lev << MEM_OVRD_EQLEV_SHIFT |
+ s->eq_ovrd_ftc << MEM_OVRD_EQFTC_SHIFT;
+ ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_EQUALIZER, val);
+
+ if (phy->mode == PIPE3_MODE_SATA) {
+ val = ti_pipe3_readl(phy->phy_rx,
+ SATA_PHY_RX_IO_AND_A2D_OVERRIDES);
+ val &= ~MEM_CDR_LOS_SOURCE_MASK;
+ ti_pipe3_writel(phy->phy_rx, SATA_PHY_RX_IO_AND_A2D_OVERRIDES,
+ val);
+ }
}
static int ti_pipe3_init(struct phy *x)
@@ -340,7 +516,7 @@ static int ti_pipe3_init(struct phy *x)
* as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
* 18-1804.
*/
- if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
+ if (phy->mode == PIPE3_MODE_PCIE) {
if (!phy->pcs_syscon) {
omap_control_pcie_pcs(phy->control_dev, 0x96);
return 0;
@@ -349,12 +525,7 @@ static int ti_pipe3_init(struct phy *x)
val = 0x96 << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT;
ret = regmap_update_bits(phy->pcs_syscon, phy->pcie_pcs_reg,
PCIE_PCS_MASK, val);
- if (ret)
- return ret;
-
- ti_pipe3_calibrate(phy);
-
- return 0;
+ return ret;
}
/* Bring it out of IDLE if it is IDLE */
@@ -367,8 +538,7 @@ static int ti_pipe3_init(struct phy *x)
/* SATA has issues if re-programmed when locked */
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
- if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node,
- "ti,phy-pipe3-sata"))
+ if ((val & PLL_LOCK) && phy->mode == PIPE3_MODE_SATA)
return ret;
/* Program the DPLL */
@@ -378,6 +548,8 @@ static int ti_pipe3_init(struct phy *x)
return -EINVAL;
}
+ ti_pipe3_calibrate(phy);
+
return ret;
}
@@ -390,12 +562,11 @@ static int ti_pipe3_exit(struct phy *x)
/* If dpll_reset_syscon is not present we wont power down SATA DPLL
* due to Errata i783
*/
- if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
- !phy->dpll_reset_syscon)
+ if (phy->mode == PIPE3_MODE_SATA && !phy->dpll_reset_syscon)
return 0;
/* PCIe doesn't have internal DPLL */
- if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
+ if (phy->mode != PIPE3_MODE_PCIE) {
/* Put DPLL in IDLE mode */
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
val |= PLL_IDLE;
@@ -418,7 +589,7 @@ static int ti_pipe3_exit(struct phy *x)
}
/* i783: SATA needs control bit toggle after PLL unlock */
- if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
+ if (phy->mode == PIPE3_MODE_SATA) {
regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
@@ -443,7 +614,6 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
{
struct clk *clk;
struct device *dev = phy->dev;
- struct device_node *node = dev->of_node;
phy->refclk = devm_clk_get(dev, "refclk");
if (IS_ERR(phy->refclk)) {
@@ -451,11 +621,11 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
/* older DTBs have missing refclk in SATA PHY
* so don't bail out in case of SATA PHY.
*/
- if (!of_device_is_compatible(node, "ti,phy-pipe3-sata"))
+ if (phy->mode != PIPE3_MODE_SATA)
return PTR_ERR(phy->refclk);
}
- if (!of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+ if (phy->mode != PIPE3_MODE_SATA) {
phy->wkupclk = devm_clk_get(dev, "wkupclk");
if (IS_ERR(phy->wkupclk)) {
dev_err(dev, "unable to get wkupclk\n");
@@ -465,8 +635,7 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
phy->wkupclk = ERR_PTR(-ENODEV);
}
- if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie") ||
- phy->phy_power_syscon) {
+ if (phy->mode != PIPE3_MODE_PCIE || phy->phy_power_syscon) {
phy->sys_clk = devm_clk_get(dev, "sysclk");
if (IS_ERR(phy->sys_clk)) {
dev_err(dev, "unable to get sysclk\n");
@@ -474,7 +643,7 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
}
}
- if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
+ if (phy->mode == PIPE3_MODE_PCIE) {
clk = devm_clk_get(dev, "dpll_ref");
if (IS_ERR(clk)) {
dev_err(dev, "unable to get dpll ref clk\n");
@@ -546,7 +715,7 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
phy->control_dev = &control_pdev->dev;
}
- if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
+ if (phy->mode == PIPE3_MODE_PCIE) {
phy->pcs_syscon = syscon_regmap_lookup_by_phandle(node,
"syscon-pcs");
if (IS_ERR(phy->pcs_syscon)) {
@@ -564,7 +733,7 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
}
}
- if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+ if (phy->mode == PIPE3_MODE_SATA) {
phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
"syscon-pllreset");
if (IS_ERR(phy->dpll_reset_syscon)) {
@@ -589,12 +758,8 @@ static int ti_pipe3_get_tx_rx_base(struct ti_pipe3 *phy)
{
struct resource *res;
struct device *dev = phy->dev;
- struct device_node *node = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
- if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie"))
- return 0;
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"phy_rx");
phy->phy_rx = devm_ioremap_resource(dev, res);
@@ -611,24 +776,12 @@ static int ti_pipe3_get_tx_rx_base(struct ti_pipe3 *phy)
static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy)
{
struct resource *res;
- const struct of_device_id *match;
struct device *dev = phy->dev;
- struct device_node *node = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
- if (of_device_is_compatible(node, "ti,phy-pipe3-pcie"))
+ if (phy->mode == PIPE3_MODE_PCIE)
return 0;
- match = of_match_device(ti_pipe3_id_table, dev);
- if (!match)
- return -EINVAL;
-
- phy->dpll_map = (struct pipe3_dpll_map *)match->data;
- if (!phy->dpll_map) {
- dev_err(dev, "no DPLL data\n");
- return -EINVAL;
- }
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"pll_ctrl");
phy->pll_ctrl_base = devm_ioremap_resource(dev, res);
@@ -640,15 +793,29 @@ static int ti_pipe3_probe(struct platform_device *pdev)
struct ti_pipe3 *phy;
struct phy *generic_phy;
struct phy_provider *phy_provider;
- struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int ret;
+ const struct of_device_id *match;
+ struct pipe3_data *data;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- phy->dev = dev;
+ match = of_match_device(ti_pipe3_id_table, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct pipe3_data *)match->data;
+ if (!data) {
+ dev_err(dev, "no driver data\n");
+ return -EINVAL;
+ }
+
+ phy->dev = dev;
+ phy->mode = data->mode;
+ phy->dpll_map = data->dpll_map;
+ phy->settings = data->settings;
ret = ti_pipe3_get_pll_base(phy);
if (ret)
@@ -672,7 +839,7 @@ static int ti_pipe3_probe(struct platform_device *pdev)
/*
* Prevent auto-disable of refclk for SATA PHY due to Errata i783
*/
- if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+ if (phy->mode == PIPE3_MODE_SATA) {
if (!IS_ERR(phy->refclk)) {
clk_prepare_enable(phy->refclk);
phy->sata_refclk_enabled = true;
@@ -762,18 +929,19 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
static const struct of_device_id ti_pipe3_id_table[] = {
{
.compatible = "ti,phy-usb3",
- .data = dpll_map_usb,
+ .data = &data_usb,
},
{
.compatible = "ti,omap-usb3",
- .data = dpll_map_usb,
+ .data = &data_usb,
},
{
.compatible = "ti,phy-pipe3-sata",
- .data = dpll_map_sata,
+ .data = &data_sata,
},
{
.compatible = "ti,phy-pipe3-pcie",
+ .data = &data_pcie,
},
{}
};
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index c267afb68f07..176e16a36553 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -172,6 +172,7 @@ struct twl4030_usb {
int irq;
enum musb_vbus_id_status linkstat;
+ atomic_t connected;
bool vbus_supplied;
bool musb_mailbox_pending;
@@ -575,39 +576,29 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
{
struct twl4030_usb *twl = _twl;
enum musb_vbus_id_status status;
- bool status_changed = false;
int err;
status = twl4030_usb_linkstat(twl);
mutex_lock(&twl->lock);
- if (status >= 0 && status != twl->linkstat) {
- status_changed =
- cable_present(twl->linkstat) !=
- cable_present(status);
- twl->linkstat = status;
- }
+ twl->linkstat = status;
mutex_unlock(&twl->lock);
- if (status_changed) {
- /* FIXME add a set_power() method so that B-devices can
- * configure the charger appropriately. It's not always
- * correct to consume VBUS power, and how much current to
- * consume is a function of the USB configuration chosen
- * by the host.
- *
- * REVISIT usb_gadget_vbus_connect(...) as needed, ditto
- * its disconnect() sibling, when changing to/from the
- * USB_LINK_VBUS state. musb_hdrc won't care until it
- * starts to handle softconnect right.
- */
- if (cable_present(status)) {
+ if (cable_present(status)) {
+ if (atomic_add_unless(&twl->connected, 1, 1)) {
+ dev_dbg(twl->dev, "%s: cable connected %i\n",
+ __func__, status);
pm_runtime_get_sync(twl->dev);
- } else {
+ twl->musb_mailbox_pending = true;
+ }
+ } else {
+ if (atomic_add_unless(&twl->connected, -1, 0)) {
+ dev_dbg(twl->dev, "%s: cable disconnected %i\n",
+ __func__, status);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
+ twl->musb_mailbox_pending = true;
}
- twl->musb_mailbox_pending = true;
}
if (twl->musb_mailbox_pending) {
err = musb_mailbox(status);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 2764d713fea8..ea798548b012 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -108,6 +108,14 @@ config PINCTRL_AMD
Requires ACPI/FDT device enumeration code to set up a platform
device.
+config PINCTRL_BM1880
+ bool "Bitmain BM1880 Pinctrl driver"
+ depends on OF && (ARCH_BITMAIN || COMPILE_TEST)
+ default ARCH_BITMAIN
+ select PINMUX
+ help
+ Pinctrl driver for Bitmain BM1880 SoC.
+
config PINCTRL_DA850_PUPD
tristate "TI DA850/OMAP-L138/AM18XX pullup/pulldown groups"
depends on OF && (ARCH_DAVINCI_DA850 || COMPILE_TEST)
@@ -265,6 +273,20 @@ config PINCTRL_ST
select PINCONF
select GPIOLIB_IRQCHIP
+config PINCTRL_STMFX
+ tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
+ depends on I2C
+ depends on OF || COMPILE_TEST
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select MFD_STMFX
+ help
+ Driver for STMicroelectronics Multi-Function eXpander (STMFX)
+ GPIO expander.
+ This provides a GPIO interface supporting inputs and outputs,
+ and configuring push-pull, open-drain, and can also be used as
+ interrupt-controller.
+
config PINCTRL_U300
bool "U300 pin controller driver"
depends on ARCH_U300
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 712184b74a5c..ac537fdbc998 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
+obj-$(CONFIG_PINCTRL_BM1880) += pinctrl-bm1880.o
obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
@@ -40,6 +41,7 @@ obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
+obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
index 27013e5949bc..74af07e25174 100644
--- a/drivers/pinctrl/cirrus/Kconfig
+++ b/drivers/pinctrl/cirrus/Kconfig
@@ -1,3 +1,13 @@
+config PINCTRL_LOCHNAGAR
+ tristate "Cirrus Logic Lochnagar pinctrl driver"
+ depends on MFD_LOCHNAGAR
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ This driver supports configuring the GPIO and other pin configuration
+ of the Cirrus Logic Lochnagar audio development board.
+
# This is all selected by the Madera MFD driver Kconfig options
config PINCTRL_MADERA
tristate
diff --git a/drivers/pinctrl/cirrus/Makefile b/drivers/pinctrl/cirrus/Makefile
index 6e4938cde9e3..20baebf438f6 100644
--- a/drivers/pinctrl/cirrus/Makefile
+++ b/drivers/pinctrl/cirrus/Makefile
@@ -1,4 +1,6 @@
# Cirrus Logic pinctrl drivers
+obj-$(CONFIG_PINCTRL_LOCHNAGAR) += pinctrl-lochnagar.o
+
pinctrl-madera-objs := pinctrl-madera-core.o
ifeq ($(CONFIG_PINCTRL_CS47L35),y)
pinctrl-madera-objs += pinctrl-cs47l35.o
diff --git a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
new file mode 100644
index 000000000000..670ac53a3141
--- /dev/null
+++ b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
@@ -0,0 +1,1235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lochnagar pin and GPIO control
+ *
+ * Copyright (c) 2017-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include <linux/mfd/lochnagar.h>
+#include <linux/mfd/lochnagar1_regs.h>
+#include <linux/mfd/lochnagar2_regs.h>
+
+#include <dt-bindings/pinctrl/lochnagar.h>
+
+#include "../pinctrl-utils.h"
+
+#define LN2_NUM_GPIO_CHANNELS 16
+
+#define LN_CDC_AIF1_STR "codec-aif1"
+#define LN_CDC_AIF2_STR "codec-aif2"
+#define LN_CDC_AIF3_STR "codec-aif3"
+#define LN_DSP_AIF1_STR "dsp-aif1"
+#define LN_DSP_AIF2_STR "dsp-aif2"
+#define LN_PSIA1_STR "psia1"
+#define LN_PSIA2_STR "psia2"
+#define LN_GF_AIF1_STR "gf-aif1"
+#define LN_GF_AIF2_STR "gf-aif2"
+#define LN_GF_AIF3_STR "gf-aif3"
+#define LN_GF_AIF4_STR "gf-aif4"
+#define LN_SPDIF_AIF_STR "spdif-aif"
+#define LN_USB_AIF1_STR "usb-aif1"
+#define LN_USB_AIF2_STR "usb-aif2"
+#define LN_ADAT_AIF_STR "adat-aif"
+#define LN_SOUNDCARD_AIF_STR "soundcard-aif"
+
+#define LN_PIN_GPIO(REV, ID, NAME, REG, SHIFT, INVERT) \
+static const struct lochnagar_pin lochnagar##REV##_##ID##_pin = { \
+ .name = NAME, .type = LN_PTYPE_GPIO, .reg = LOCHNAGAR##REV##_##REG, \
+ .shift = LOCHNAGAR##REV##_##SHIFT##_SHIFT, .invert = INVERT, \
+}
+
+#define LN_PIN_SAIF(REV, ID, NAME) \
+static const struct lochnagar_pin lochnagar##REV##_##ID##_pin = \
+ { .name = NAME, .type = LN_PTYPE_AIF, }
+
+#define LN_PIN_AIF(REV, ID) \
+ LN_PIN_SAIF(REV, ID##_BCLK, LN_##ID##_STR"-bclk"); \
+ LN_PIN_SAIF(REV, ID##_LRCLK, LN_##ID##_STR"-lrclk"); \
+ LN_PIN_SAIF(REV, ID##_RXDAT, LN_##ID##_STR"-rxdat"); \
+ LN_PIN_SAIF(REV, ID##_TXDAT, LN_##ID##_STR"-txdat")
+
+#define LN1_PIN_GPIO(ID, NAME, REG, SHIFT, INVERT) \
+ LN_PIN_GPIO(1, ID, NAME, REG, SHIFT, INVERT)
+
+#define LN1_PIN_MUX(ID, NAME) \
+static const struct lochnagar_pin lochnagar1_##ID##_pin = \
+ { .name = NAME, .type = LN_PTYPE_MUX, .reg = LOCHNAGAR1_##ID, }
+
+#define LN1_PIN_AIF(ID) LN_PIN_AIF(1, ID)
+
+#define LN2_PIN_GPIO(ID, NAME, REG, SHIFT, INVERT) \
+ LN_PIN_GPIO(2, ID, NAME, REG, SHIFT, INVERT)
+
+#define LN2_PIN_MUX(ID, NAME) \
+static const struct lochnagar_pin lochnagar2_##ID##_pin = \
+ { .name = NAME, .type = LN_PTYPE_MUX, .reg = LOCHNAGAR2_GPIO_##ID, }
+
+#define LN2_PIN_AIF(ID) LN_PIN_AIF(2, ID)
+
+#define LN2_PIN_GAI(ID) \
+ LN2_PIN_MUX(ID##_BCLK, LN_##ID##_STR"-bclk"); \
+ LN2_PIN_MUX(ID##_LRCLK, LN_##ID##_STR"-lrclk"); \
+ LN2_PIN_MUX(ID##_RXDAT, LN_##ID##_STR"-rxdat"); \
+ LN2_PIN_MUX(ID##_TXDAT, LN_##ID##_STR"-txdat")
+
+#define LN_PIN(REV, ID) [LOCHNAGAR##REV##_PIN_##ID] = { \
+ .number = LOCHNAGAR##REV##_PIN_##ID, \
+ .name = lochnagar##REV##_##ID##_pin.name, \
+ .drv_data = (void *)&lochnagar##REV##_##ID##_pin, \
+}
+
+#define LN1_PIN(ID) LN_PIN(1, ID)
+#define LN2_PIN(ID) LN_PIN(2, ID)
+
+#define LN_PINS(REV, ID) \
+ LN_PIN(REV, ID##_BCLK), LN_PIN(REV, ID##_LRCLK), \
+ LN_PIN(REV, ID##_RXDAT), LN_PIN(REV, ID##_TXDAT)
+
+#define LN1_PINS(ID) LN_PINS(1, ID)
+#define LN2_PINS(ID) LN_PINS(2, ID)
+
+enum {
+ LOCHNAGAR1_PIN_GF_GPIO2 = LOCHNAGAR1_PIN_NUM_GPIOS,
+ LOCHNAGAR1_PIN_GF_GPIO3,
+ LOCHNAGAR1_PIN_GF_GPIO7,
+ LOCHNAGAR1_PIN_LED1,
+ LOCHNAGAR1_PIN_LED2,
+ LOCHNAGAR1_PIN_CDC_AIF1_BCLK,
+ LOCHNAGAR1_PIN_CDC_AIF1_LRCLK,
+ LOCHNAGAR1_PIN_CDC_AIF1_RXDAT,
+ LOCHNAGAR1_PIN_CDC_AIF1_TXDAT,
+ LOCHNAGAR1_PIN_CDC_AIF2_BCLK,
+ LOCHNAGAR1_PIN_CDC_AIF2_LRCLK,
+ LOCHNAGAR1_PIN_CDC_AIF2_RXDAT,
+ LOCHNAGAR1_PIN_CDC_AIF2_TXDAT,
+ LOCHNAGAR1_PIN_CDC_AIF3_BCLK,
+ LOCHNAGAR1_PIN_CDC_AIF3_LRCLK,
+ LOCHNAGAR1_PIN_CDC_AIF3_RXDAT,
+ LOCHNAGAR1_PIN_CDC_AIF3_TXDAT,
+ LOCHNAGAR1_PIN_DSP_AIF1_BCLK,
+ LOCHNAGAR1_PIN_DSP_AIF1_LRCLK,
+ LOCHNAGAR1_PIN_DSP_AIF1_RXDAT,
+ LOCHNAGAR1_PIN_DSP_AIF1_TXDAT,
+ LOCHNAGAR1_PIN_DSP_AIF2_BCLK,
+ LOCHNAGAR1_PIN_DSP_AIF2_LRCLK,
+ LOCHNAGAR1_PIN_DSP_AIF2_RXDAT,
+ LOCHNAGAR1_PIN_DSP_AIF2_TXDAT,
+ LOCHNAGAR1_PIN_PSIA1_BCLK,
+ LOCHNAGAR1_PIN_PSIA1_LRCLK,
+ LOCHNAGAR1_PIN_PSIA1_RXDAT,
+ LOCHNAGAR1_PIN_PSIA1_TXDAT,
+ LOCHNAGAR1_PIN_PSIA2_BCLK,
+ LOCHNAGAR1_PIN_PSIA2_LRCLK,
+ LOCHNAGAR1_PIN_PSIA2_RXDAT,
+ LOCHNAGAR1_PIN_PSIA2_TXDAT,
+ LOCHNAGAR1_PIN_SPDIF_AIF_BCLK,
+ LOCHNAGAR1_PIN_SPDIF_AIF_LRCLK,
+ LOCHNAGAR1_PIN_SPDIF_AIF_RXDAT,
+ LOCHNAGAR1_PIN_SPDIF_AIF_TXDAT,
+ LOCHNAGAR1_PIN_GF_AIF3_BCLK,
+ LOCHNAGAR1_PIN_GF_AIF3_RXDAT,
+ LOCHNAGAR1_PIN_GF_AIF3_LRCLK,
+ LOCHNAGAR1_PIN_GF_AIF3_TXDAT,
+ LOCHNAGAR1_PIN_GF_AIF4_BCLK,
+ LOCHNAGAR1_PIN_GF_AIF4_RXDAT,
+ LOCHNAGAR1_PIN_GF_AIF4_LRCLK,
+ LOCHNAGAR1_PIN_GF_AIF4_TXDAT,
+ LOCHNAGAR1_PIN_GF_AIF1_BCLK,
+ LOCHNAGAR1_PIN_GF_AIF1_RXDAT,
+ LOCHNAGAR1_PIN_GF_AIF1_LRCLK,
+ LOCHNAGAR1_PIN_GF_AIF1_TXDAT,
+ LOCHNAGAR1_PIN_GF_AIF2_BCLK,
+ LOCHNAGAR1_PIN_GF_AIF2_RXDAT,
+ LOCHNAGAR1_PIN_GF_AIF2_LRCLK,
+ LOCHNAGAR1_PIN_GF_AIF2_TXDAT,
+
+ LOCHNAGAR2_PIN_SPDIF_AIF_BCLK = LOCHNAGAR2_PIN_NUM_GPIOS,
+ LOCHNAGAR2_PIN_SPDIF_AIF_LRCLK,
+ LOCHNAGAR2_PIN_SPDIF_AIF_RXDAT,
+ LOCHNAGAR2_PIN_SPDIF_AIF_TXDAT,
+ LOCHNAGAR2_PIN_USB_AIF1_BCLK,
+ LOCHNAGAR2_PIN_USB_AIF1_LRCLK,
+ LOCHNAGAR2_PIN_USB_AIF1_RXDAT,
+ LOCHNAGAR2_PIN_USB_AIF1_TXDAT,
+ LOCHNAGAR2_PIN_USB_AIF2_BCLK,
+ LOCHNAGAR2_PIN_USB_AIF2_LRCLK,
+ LOCHNAGAR2_PIN_USB_AIF2_RXDAT,
+ LOCHNAGAR2_PIN_USB_AIF2_TXDAT,
+ LOCHNAGAR2_PIN_ADAT_AIF_BCLK,
+ LOCHNAGAR2_PIN_ADAT_AIF_LRCLK,
+ LOCHNAGAR2_PIN_ADAT_AIF_RXDAT,
+ LOCHNAGAR2_PIN_ADAT_AIF_TXDAT,
+ LOCHNAGAR2_PIN_SOUNDCARD_AIF_BCLK,
+ LOCHNAGAR2_PIN_SOUNDCARD_AIF_LRCLK,
+ LOCHNAGAR2_PIN_SOUNDCARD_AIF_RXDAT,
+ LOCHNAGAR2_PIN_SOUNDCARD_AIF_TXDAT,
+};
+
+enum lochnagar_pin_type {
+ LN_PTYPE_GPIO,
+ LN_PTYPE_MUX,
+ LN_PTYPE_AIF,
+ LN_PTYPE_COUNT,
+};
+
+struct lochnagar_pin {
+ const char name[20];
+
+ enum lochnagar_pin_type type;
+
+ unsigned int reg;
+ int shift;
+ bool invert;
+};
+
+LN1_PIN_GPIO(CDC_RESET, "codec-reset", RST, CDC_RESET, 1);
+LN1_PIN_GPIO(DSP_RESET, "dsp-reset", RST, DSP_RESET, 1);
+LN1_PIN_GPIO(CDC_CIF1MODE, "codec-cif1mode", I2C_CTRL, CDC_CIF_MODE, 0);
+LN1_PIN_MUX(GF_GPIO2, "gf-gpio2");
+LN1_PIN_MUX(GF_GPIO3, "gf-gpio3");
+LN1_PIN_MUX(GF_GPIO7, "gf-gpio7");
+LN1_PIN_MUX(LED1, "led1");
+LN1_PIN_MUX(LED2, "led2");
+LN1_PIN_AIF(CDC_AIF1);
+LN1_PIN_AIF(CDC_AIF2);
+LN1_PIN_AIF(CDC_AIF3);
+LN1_PIN_AIF(DSP_AIF1);
+LN1_PIN_AIF(DSP_AIF2);
+LN1_PIN_AIF(PSIA1);
+LN1_PIN_AIF(PSIA2);
+LN1_PIN_AIF(SPDIF_AIF);
+LN1_PIN_AIF(GF_AIF1);
+LN1_PIN_AIF(GF_AIF2);
+LN1_PIN_AIF(GF_AIF3);
+LN1_PIN_AIF(GF_AIF4);
+
+LN2_PIN_GPIO(CDC_RESET, "codec-reset", MINICARD_RESETS, CDC_RESET, 1);
+LN2_PIN_GPIO(DSP_RESET, "dsp-reset", MINICARD_RESETS, DSP_RESET, 1);
+LN2_PIN_GPIO(CDC_CIF1MODE, "codec-cif1mode", COMMS_CTRL4, CDC_CIF1MODE, 0);
+LN2_PIN_GPIO(CDC_LDOENA, "codec-ldoena", POWER_CTRL, PWR_ENA, 0);
+LN2_PIN_GPIO(SPDIF_HWMODE, "spdif-hwmode", SPDIF_CTRL, SPDIF_HWMODE, 0);
+LN2_PIN_GPIO(SPDIF_RESET, "spdif-reset", SPDIF_CTRL, SPDIF_RESET, 1);
+LN2_PIN_MUX(FPGA_GPIO1, "fpga-gpio1");
+LN2_PIN_MUX(FPGA_GPIO2, "fpga-gpio2");
+LN2_PIN_MUX(FPGA_GPIO3, "fpga-gpio3");
+LN2_PIN_MUX(FPGA_GPIO4, "fpga-gpio4");
+LN2_PIN_MUX(FPGA_GPIO5, "fpga-gpio5");
+LN2_PIN_MUX(FPGA_GPIO6, "fpga-gpio6");
+LN2_PIN_MUX(CDC_GPIO1, "codec-gpio1");
+LN2_PIN_MUX(CDC_GPIO2, "codec-gpio2");
+LN2_PIN_MUX(CDC_GPIO3, "codec-gpio3");
+LN2_PIN_MUX(CDC_GPIO4, "codec-gpio4");
+LN2_PIN_MUX(CDC_GPIO5, "codec-gpio5");
+LN2_PIN_MUX(CDC_GPIO6, "codec-gpio6");
+LN2_PIN_MUX(CDC_GPIO7, "codec-gpio7");
+LN2_PIN_MUX(CDC_GPIO8, "codec-gpio8");
+LN2_PIN_MUX(DSP_GPIO1, "dsp-gpio1");
+LN2_PIN_MUX(DSP_GPIO2, "dsp-gpio2");
+LN2_PIN_MUX(DSP_GPIO3, "dsp-gpio3");
+LN2_PIN_MUX(DSP_GPIO4, "dsp-gpio4");
+LN2_PIN_MUX(DSP_GPIO5, "dsp-gpio5");
+LN2_PIN_MUX(DSP_GPIO6, "dsp-gpio6");
+LN2_PIN_MUX(GF_GPIO2, "gf-gpio2");
+LN2_PIN_MUX(GF_GPIO3, "gf-gpio3");
+LN2_PIN_MUX(GF_GPIO7, "gf-gpio7");
+LN2_PIN_MUX(DSP_UART1_RX, "dsp-uart1-rx");
+LN2_PIN_MUX(DSP_UART1_TX, "dsp-uart1-tx");
+LN2_PIN_MUX(DSP_UART2_RX, "dsp-uart2-rx");
+LN2_PIN_MUX(DSP_UART2_TX, "dsp-uart2-tx");
+LN2_PIN_MUX(GF_UART2_RX, "gf-uart2-rx");
+LN2_PIN_MUX(GF_UART2_TX, "gf-uart2-tx");
+LN2_PIN_MUX(USB_UART_RX, "usb-uart-rx");
+LN2_PIN_MUX(CDC_PDMCLK1, "codec-pdmclk1");
+LN2_PIN_MUX(CDC_PDMDAT1, "codec-pdmdat1");
+LN2_PIN_MUX(CDC_PDMCLK2, "codec-pdmclk2");
+LN2_PIN_MUX(CDC_PDMDAT2, "codec-pdmdat2");
+LN2_PIN_MUX(CDC_DMICCLK1, "codec-dmicclk1");
+LN2_PIN_MUX(CDC_DMICDAT1, "codec-dmicdat1");
+LN2_PIN_MUX(CDC_DMICCLK2, "codec-dmicclk2");
+LN2_PIN_MUX(CDC_DMICDAT2, "codec-dmicdat2");
+LN2_PIN_MUX(CDC_DMICCLK3, "codec-dmicclk3");
+LN2_PIN_MUX(CDC_DMICDAT3, "codec-dmicdat3");
+LN2_PIN_MUX(CDC_DMICCLK4, "codec-dmicclk4");
+LN2_PIN_MUX(CDC_DMICDAT4, "codec-dmicdat4");
+LN2_PIN_MUX(DSP_DMICCLK1, "dsp-dmicclk1");
+LN2_PIN_MUX(DSP_DMICDAT1, "dsp-dmicdat1");
+LN2_PIN_MUX(DSP_DMICCLK2, "dsp-dmicclk2");
+LN2_PIN_MUX(DSP_DMICDAT2, "dsp-dmicdat2");
+LN2_PIN_MUX(I2C2_SCL, "i2c2-scl");
+LN2_PIN_MUX(I2C2_SDA, "i2c2-sda");
+LN2_PIN_MUX(I2C3_SCL, "i2c3-scl");
+LN2_PIN_MUX(I2C3_SDA, "i2c3-sda");
+LN2_PIN_MUX(I2C4_SCL, "i2c4-scl");
+LN2_PIN_MUX(I2C4_SDA, "i2c4-sda");
+LN2_PIN_MUX(DSP_STANDBY, "dsp-standby");
+LN2_PIN_MUX(CDC_MCLK1, "codec-mclk1");
+LN2_PIN_MUX(CDC_MCLK2, "codec-mclk2");
+LN2_PIN_MUX(DSP_CLKIN, "dsp-clkin");
+LN2_PIN_MUX(PSIA1_MCLK, "psia1-mclk");
+LN2_PIN_MUX(PSIA2_MCLK, "psia2-mclk");
+LN2_PIN_MUX(GF_GPIO1, "gf-gpio1");
+LN2_PIN_MUX(GF_GPIO5, "gf-gpio5");
+LN2_PIN_MUX(DSP_GPIO20, "dsp-gpio20");
+LN2_PIN_GAI(CDC_AIF1);
+LN2_PIN_GAI(CDC_AIF2);
+LN2_PIN_GAI(CDC_AIF3);
+LN2_PIN_GAI(DSP_AIF1);
+LN2_PIN_GAI(DSP_AIF2);
+LN2_PIN_GAI(PSIA1);
+LN2_PIN_GAI(PSIA2);
+LN2_PIN_GAI(GF_AIF1);
+LN2_PIN_GAI(GF_AIF2);
+LN2_PIN_GAI(GF_AIF3);
+LN2_PIN_GAI(GF_AIF4);
+LN2_PIN_AIF(SPDIF_AIF);
+LN2_PIN_AIF(USB_AIF1);
+LN2_PIN_AIF(USB_AIF2);
+LN2_PIN_AIF(ADAT_AIF);
+LN2_PIN_AIF(SOUNDCARD_AIF);
+
+static const struct pinctrl_pin_desc lochnagar1_pins[] = {
+ LN1_PIN(CDC_RESET), LN1_PIN(DSP_RESET), LN1_PIN(CDC_CIF1MODE),
+ LN1_PIN(GF_GPIO2), LN1_PIN(GF_GPIO3), LN1_PIN(GF_GPIO7),
+ LN1_PIN(LED1), LN1_PIN(LED2),
+ LN1_PINS(CDC_AIF1), LN1_PINS(CDC_AIF2), LN1_PINS(CDC_AIF3),
+ LN1_PINS(DSP_AIF1), LN1_PINS(DSP_AIF2),
+ LN1_PINS(PSIA1), LN1_PINS(PSIA2),
+ LN1_PINS(SPDIF_AIF),
+ LN1_PINS(GF_AIF1), LN1_PINS(GF_AIF2),
+ LN1_PINS(GF_AIF3), LN1_PINS(GF_AIF4),
+};
+
+static const struct pinctrl_pin_desc lochnagar2_pins[] = {
+ LN2_PIN(CDC_RESET), LN2_PIN(DSP_RESET), LN2_PIN(CDC_CIF1MODE),
+ LN2_PIN(CDC_LDOENA),
+ LN2_PIN(SPDIF_HWMODE), LN2_PIN(SPDIF_RESET),
+ LN2_PIN(FPGA_GPIO1), LN2_PIN(FPGA_GPIO2), LN2_PIN(FPGA_GPIO3),
+ LN2_PIN(FPGA_GPIO4), LN2_PIN(FPGA_GPIO5), LN2_PIN(FPGA_GPIO6),
+ LN2_PIN(CDC_GPIO1), LN2_PIN(CDC_GPIO2), LN2_PIN(CDC_GPIO3),
+ LN2_PIN(CDC_GPIO4), LN2_PIN(CDC_GPIO5), LN2_PIN(CDC_GPIO6),
+ LN2_PIN(CDC_GPIO7), LN2_PIN(CDC_GPIO8),
+ LN2_PIN(DSP_GPIO1), LN2_PIN(DSP_GPIO2), LN2_PIN(DSP_GPIO3),
+ LN2_PIN(DSP_GPIO4), LN2_PIN(DSP_GPIO5), LN2_PIN(DSP_GPIO6),
+ LN2_PIN(DSP_GPIO20),
+ LN2_PIN(GF_GPIO1), LN2_PIN(GF_GPIO2), LN2_PIN(GF_GPIO3),
+ LN2_PIN(GF_GPIO5), LN2_PIN(GF_GPIO7),
+ LN2_PINS(CDC_AIF1), LN2_PINS(CDC_AIF2), LN2_PINS(CDC_AIF3),
+ LN2_PINS(DSP_AIF1), LN2_PINS(DSP_AIF2),
+ LN2_PINS(PSIA1), LN2_PINS(PSIA2),
+ LN2_PINS(GF_AIF1), LN2_PINS(GF_AIF2),
+ LN2_PINS(GF_AIF3), LN2_PINS(GF_AIF4),
+ LN2_PIN(DSP_UART1_RX), LN2_PIN(DSP_UART1_TX),
+ LN2_PIN(DSP_UART2_RX), LN2_PIN(DSP_UART2_TX),
+ LN2_PIN(GF_UART2_RX), LN2_PIN(GF_UART2_TX),
+ LN2_PIN(USB_UART_RX),
+ LN2_PIN(CDC_PDMCLK1), LN2_PIN(CDC_PDMDAT1),
+ LN2_PIN(CDC_PDMCLK2), LN2_PIN(CDC_PDMDAT2),
+ LN2_PIN(CDC_DMICCLK1), LN2_PIN(CDC_DMICDAT1),
+ LN2_PIN(CDC_DMICCLK2), LN2_PIN(CDC_DMICDAT2),
+ LN2_PIN(CDC_DMICCLK3), LN2_PIN(CDC_DMICDAT3),
+ LN2_PIN(CDC_DMICCLK4), LN2_PIN(CDC_DMICDAT4),
+ LN2_PIN(DSP_DMICCLK1), LN2_PIN(DSP_DMICDAT1),
+ LN2_PIN(DSP_DMICCLK2), LN2_PIN(DSP_DMICDAT2),
+ LN2_PIN(I2C2_SCL), LN2_PIN(I2C2_SDA),
+ LN2_PIN(I2C3_SCL), LN2_PIN(I2C3_SDA),
+ LN2_PIN(I2C4_SCL), LN2_PIN(I2C4_SDA),
+ LN2_PIN(DSP_STANDBY),
+ LN2_PIN(CDC_MCLK1), LN2_PIN(CDC_MCLK2),
+ LN2_PIN(DSP_CLKIN),
+ LN2_PIN(PSIA1_MCLK), LN2_PIN(PSIA2_MCLK),
+ LN2_PINS(SPDIF_AIF),
+ LN2_PINS(USB_AIF1), LN2_PINS(USB_AIF2),
+ LN2_PINS(ADAT_AIF),
+ LN2_PINS(SOUNDCARD_AIF),
+};
+
+#define LN_AIF_PINS(REV, ID) \
+ LOCHNAGAR##REV##_PIN_##ID##_BCLK, \
+ LOCHNAGAR##REV##_PIN_##ID##_LRCLK, \
+ LOCHNAGAR##REV##_PIN_##ID##_TXDAT, \
+ LOCHNAGAR##REV##_PIN_##ID##_RXDAT,
+
+#define LN1_AIF(ID, CTRL) \
+static const struct lochnagar_aif lochnagar1_##ID##_aif = { \
+ .name = LN_##ID##_STR, \
+ .pins = { LN_AIF_PINS(1, ID) }, \
+ .src_reg = LOCHNAGAR1_##ID##_SEL, \
+ .src_mask = LOCHNAGAR1_SRC_MASK, \
+ .ctrl_reg = LOCHNAGAR1_##CTRL, \
+ .ena_mask = LOCHNAGAR1_##ID##_ENA_MASK, \
+ .master_mask = LOCHNAGAR1_##ID##_LRCLK_DIR_MASK | \
+ LOCHNAGAR1_##ID##_BCLK_DIR_MASK, \
+}
+
+#define LN2_AIF(ID) \
+static const struct lochnagar_aif lochnagar2_##ID##_aif = { \
+ .name = LN_##ID##_STR, \
+ .pins = { LN_AIF_PINS(2, ID) }, \
+ .src_reg = LOCHNAGAR2_##ID##_CTRL, \
+ .src_mask = LOCHNAGAR2_AIF_SRC_MASK, \
+ .ctrl_reg = LOCHNAGAR2_##ID##_CTRL, \
+ .ena_mask = LOCHNAGAR2_AIF_ENA_MASK, \
+ .master_mask = LOCHNAGAR2_AIF_LRCLK_DIR_MASK | \
+ LOCHNAGAR2_AIF_BCLK_DIR_MASK, \
+}
+
+struct lochnagar_aif {
+ const char name[16];
+
+ unsigned int pins[4];
+
+ u16 src_reg;
+ u16 src_mask;
+
+ u16 ctrl_reg;
+ u16 ena_mask;
+ u16 master_mask;
+};
+
+LN1_AIF(CDC_AIF1, CDC_AIF_CTRL1);
+LN1_AIF(CDC_AIF2, CDC_AIF_CTRL1);
+LN1_AIF(CDC_AIF3, CDC_AIF_CTRL2);
+LN1_AIF(DSP_AIF1, DSP_AIF);
+LN1_AIF(DSP_AIF2, DSP_AIF);
+LN1_AIF(PSIA1, PSIA_AIF);
+LN1_AIF(PSIA2, PSIA_AIF);
+LN1_AIF(GF_AIF1, GF_AIF1);
+LN1_AIF(GF_AIF2, GF_AIF2);
+LN1_AIF(GF_AIF3, GF_AIF1);
+LN1_AIF(GF_AIF4, GF_AIF2);
+LN1_AIF(SPDIF_AIF, EXT_AIF_CTRL);
+
+LN2_AIF(CDC_AIF1);
+LN2_AIF(CDC_AIF2);
+LN2_AIF(CDC_AIF3);
+LN2_AIF(DSP_AIF1);
+LN2_AIF(DSP_AIF2);
+LN2_AIF(PSIA1);
+LN2_AIF(PSIA2);
+LN2_AIF(GF_AIF1);
+LN2_AIF(GF_AIF2);
+LN2_AIF(GF_AIF3);
+LN2_AIF(GF_AIF4);
+LN2_AIF(SPDIF_AIF);
+LN2_AIF(USB_AIF1);
+LN2_AIF(USB_AIF2);
+LN2_AIF(ADAT_AIF);
+LN2_AIF(SOUNDCARD_AIF);
+
+#define LN2_OP_AIF 0x00
+#define LN2_OP_GPIO 0xFE
+
+#define LN_FUNC(NAME, TYPE, OP) \
+ { .name = NAME, .type = LN_FTYPE_##TYPE, .op = OP }
+
+#define LN_FUNC_PIN(REV, ID, OP) \
+ LN_FUNC(lochnagar##REV##_##ID##_pin.name, PIN, OP)
+
+#define LN1_FUNC_PIN(ID, OP) LN_FUNC_PIN(1, ID, OP)
+#define LN2_FUNC_PIN(ID, OP) LN_FUNC_PIN(2, ID, OP)
+
+#define LN_FUNC_AIF(REV, ID, OP) \
+ LN_FUNC(lochnagar##REV##_##ID##_aif.name, AIF, OP)
+
+#define LN1_FUNC_AIF(ID, OP) LN_FUNC_AIF(1, ID, OP)
+#define LN2_FUNC_AIF(ID, OP) LN_FUNC_AIF(2, ID, OP)
+
+#define LN2_FUNC_GAI(ID, OP, BOP, LROP, RXOP, TXOP) \
+ LN2_FUNC_AIF(ID, OP), \
+ LN_FUNC(lochnagar2_##ID##_BCLK_pin.name, PIN, BOP), \
+ LN_FUNC(lochnagar2_##ID##_LRCLK_pin.name, PIN, LROP), \
+ LN_FUNC(lochnagar2_##ID##_RXDAT_pin.name, PIN, RXOP), \
+ LN_FUNC(lochnagar2_##ID##_TXDAT_pin.name, PIN, TXOP)
+
+enum lochnagar_func_type {
+ LN_FTYPE_PIN,
+ LN_FTYPE_AIF,
+ LN_FTYPE_COUNT,
+};
+
+struct lochnagar_func {
+ const char * const name;
+
+ enum lochnagar_func_type type;
+
+ u8 op;
+};
+
+static const struct lochnagar_func lochnagar1_funcs[] = {
+ LN_FUNC("dsp-gpio1", PIN, 0x01),
+ LN_FUNC("dsp-gpio2", PIN, 0x02),
+ LN_FUNC("dsp-gpio3", PIN, 0x03),
+ LN_FUNC("codec-gpio1", PIN, 0x04),
+ LN_FUNC("codec-gpio2", PIN, 0x05),
+ LN_FUNC("codec-gpio3", PIN, 0x06),
+ LN_FUNC("codec-gpio4", PIN, 0x07),
+ LN_FUNC("codec-gpio5", PIN, 0x08),
+ LN_FUNC("codec-gpio6", PIN, 0x09),
+ LN_FUNC("codec-gpio7", PIN, 0x0A),
+ LN_FUNC("codec-gpio8", PIN, 0x0B),
+ LN1_FUNC_PIN(GF_GPIO2, 0x0C),
+ LN1_FUNC_PIN(GF_GPIO3, 0x0D),
+ LN1_FUNC_PIN(GF_GPIO7, 0x0E),
+
+ LN1_FUNC_AIF(SPDIF_AIF, 0x01),
+ LN1_FUNC_AIF(PSIA1, 0x02),
+ LN1_FUNC_AIF(PSIA2, 0x03),
+ LN1_FUNC_AIF(CDC_AIF1, 0x04),
+ LN1_FUNC_AIF(CDC_AIF2, 0x05),
+ LN1_FUNC_AIF(CDC_AIF3, 0x06),
+ LN1_FUNC_AIF(DSP_AIF1, 0x07),
+ LN1_FUNC_AIF(DSP_AIF2, 0x08),
+ LN1_FUNC_AIF(GF_AIF3, 0x09),
+ LN1_FUNC_AIF(GF_AIF4, 0x0A),
+ LN1_FUNC_AIF(GF_AIF1, 0x0B),
+ LN1_FUNC_AIF(GF_AIF2, 0x0C),
+};
+
+static const struct lochnagar_func lochnagar2_funcs[] = {
+ LN_FUNC("aif", PIN, LN2_OP_AIF),
+ LN2_FUNC_PIN(FPGA_GPIO1, 0x01),
+ LN2_FUNC_PIN(FPGA_GPIO2, 0x02),
+ LN2_FUNC_PIN(FPGA_GPIO3, 0x03),
+ LN2_FUNC_PIN(FPGA_GPIO4, 0x04),
+ LN2_FUNC_PIN(FPGA_GPIO5, 0x05),
+ LN2_FUNC_PIN(FPGA_GPIO6, 0x06),
+ LN2_FUNC_PIN(CDC_GPIO1, 0x07),
+ LN2_FUNC_PIN(CDC_GPIO2, 0x08),
+ LN2_FUNC_PIN(CDC_GPIO3, 0x09),
+ LN2_FUNC_PIN(CDC_GPIO4, 0x0A),
+ LN2_FUNC_PIN(CDC_GPIO5, 0x0B),
+ LN2_FUNC_PIN(CDC_GPIO6, 0x0C),
+ LN2_FUNC_PIN(CDC_GPIO7, 0x0D),
+ LN2_FUNC_PIN(CDC_GPIO8, 0x0E),
+ LN2_FUNC_PIN(DSP_GPIO1, 0x0F),
+ LN2_FUNC_PIN(DSP_GPIO2, 0x10),
+ LN2_FUNC_PIN(DSP_GPIO3, 0x11),
+ LN2_FUNC_PIN(DSP_GPIO4, 0x12),
+ LN2_FUNC_PIN(DSP_GPIO5, 0x13),
+ LN2_FUNC_PIN(DSP_GPIO6, 0x14),
+ LN2_FUNC_PIN(GF_GPIO2, 0x15),
+ LN2_FUNC_PIN(GF_GPIO3, 0x16),
+ LN2_FUNC_PIN(GF_GPIO7, 0x17),
+ LN2_FUNC_PIN(GF_GPIO1, 0x18),
+ LN2_FUNC_PIN(GF_GPIO5, 0x19),
+ LN2_FUNC_PIN(DSP_GPIO20, 0x1A),
+ LN_FUNC("codec-clkout", PIN, 0x20),
+ LN_FUNC("dsp-clkout", PIN, 0x21),
+ LN_FUNC("pmic-32k", PIN, 0x22),
+ LN_FUNC("spdif-clkout", PIN, 0x23),
+ LN_FUNC("clk-12m288", PIN, 0x24),
+ LN_FUNC("clk-11m2986", PIN, 0x25),
+ LN_FUNC("clk-24m576", PIN, 0x26),
+ LN_FUNC("clk-22m5792", PIN, 0x27),
+ LN_FUNC("xmos-mclk", PIN, 0x29),
+ LN_FUNC("gf-clkout1", PIN, 0x2A),
+ LN_FUNC("gf-mclk1", PIN, 0x2B),
+ LN_FUNC("gf-mclk3", PIN, 0x2C),
+ LN_FUNC("gf-mclk2", PIN, 0x2D),
+ LN_FUNC("gf-clkout2", PIN, 0x2E),
+ LN2_FUNC_PIN(CDC_MCLK1, 0x2F),
+ LN2_FUNC_PIN(CDC_MCLK2, 0x30),
+ LN2_FUNC_PIN(DSP_CLKIN, 0x31),
+ LN2_FUNC_PIN(PSIA1_MCLK, 0x32),
+ LN2_FUNC_PIN(PSIA2_MCLK, 0x33),
+ LN_FUNC("spdif-mclk", PIN, 0x34),
+ LN_FUNC("codec-irq", PIN, 0x42),
+ LN2_FUNC_PIN(CDC_RESET, 0x43),
+ LN2_FUNC_PIN(DSP_RESET, 0x44),
+ LN_FUNC("dsp-irq", PIN, 0x45),
+ LN2_FUNC_PIN(DSP_STANDBY, 0x46),
+ LN2_FUNC_PIN(CDC_PDMCLK1, 0x90),
+ LN2_FUNC_PIN(CDC_PDMDAT1, 0x91),
+ LN2_FUNC_PIN(CDC_PDMCLK2, 0x92),
+ LN2_FUNC_PIN(CDC_PDMDAT2, 0x93),
+ LN2_FUNC_PIN(CDC_DMICCLK1, 0xA0),
+ LN2_FUNC_PIN(CDC_DMICDAT1, 0xA1),
+ LN2_FUNC_PIN(CDC_DMICCLK2, 0xA2),
+ LN2_FUNC_PIN(CDC_DMICDAT2, 0xA3),
+ LN2_FUNC_PIN(CDC_DMICCLK3, 0xA4),
+ LN2_FUNC_PIN(CDC_DMICDAT3, 0xA5),
+ LN2_FUNC_PIN(CDC_DMICCLK4, 0xA6),
+ LN2_FUNC_PIN(CDC_DMICDAT4, 0xA7),
+ LN2_FUNC_PIN(DSP_DMICCLK1, 0xA8),
+ LN2_FUNC_PIN(DSP_DMICDAT1, 0xA9),
+ LN2_FUNC_PIN(DSP_DMICCLK2, 0xAA),
+ LN2_FUNC_PIN(DSP_DMICDAT2, 0xAB),
+ LN2_FUNC_PIN(DSP_UART1_RX, 0xC0),
+ LN2_FUNC_PIN(DSP_UART1_TX, 0xC1),
+ LN2_FUNC_PIN(DSP_UART2_RX, 0xC2),
+ LN2_FUNC_PIN(DSP_UART2_TX, 0xC3),
+ LN2_FUNC_PIN(GF_UART2_RX, 0xC4),
+ LN2_FUNC_PIN(GF_UART2_TX, 0xC5),
+ LN2_FUNC_PIN(USB_UART_RX, 0xC6),
+ LN_FUNC("usb-uart-tx", PIN, 0xC7),
+ LN2_FUNC_PIN(I2C2_SCL, 0xE0),
+ LN2_FUNC_PIN(I2C2_SDA, 0xE1),
+ LN2_FUNC_PIN(I2C3_SCL, 0xE2),
+ LN2_FUNC_PIN(I2C3_SDA, 0xE3),
+ LN2_FUNC_PIN(I2C4_SCL, 0xE4),
+ LN2_FUNC_PIN(I2C4_SDA, 0xE5),
+
+ LN2_FUNC_AIF(SPDIF_AIF, 0x01),
+ LN2_FUNC_GAI(PSIA1, 0x02, 0x50, 0x51, 0x52, 0x53),
+ LN2_FUNC_GAI(PSIA2, 0x03, 0x54, 0x55, 0x56, 0x57),
+ LN2_FUNC_GAI(CDC_AIF1, 0x04, 0x59, 0x5B, 0x5A, 0x58),
+ LN2_FUNC_GAI(CDC_AIF2, 0x05, 0x5D, 0x5F, 0x5E, 0x5C),
+ LN2_FUNC_GAI(CDC_AIF3, 0x06, 0x61, 0x62, 0x63, 0x60),
+ LN2_FUNC_GAI(DSP_AIF1, 0x07, 0x65, 0x67, 0x66, 0x64),
+ LN2_FUNC_GAI(DSP_AIF2, 0x08, 0x69, 0x6B, 0x6A, 0x68),
+ LN2_FUNC_GAI(GF_AIF3, 0x09, 0x6D, 0x6F, 0x6C, 0x6E),
+ LN2_FUNC_GAI(GF_AIF4, 0x0A, 0x71, 0x73, 0x70, 0x72),
+ LN2_FUNC_GAI(GF_AIF1, 0x0B, 0x75, 0x77, 0x74, 0x76),
+ LN2_FUNC_GAI(GF_AIF2, 0x0C, 0x79, 0x7B, 0x78, 0x7A),
+ LN2_FUNC_AIF(USB_AIF1, 0x0D),
+ LN2_FUNC_AIF(USB_AIF2, 0x0E),
+ LN2_FUNC_AIF(ADAT_AIF, 0x0F),
+ LN2_FUNC_AIF(SOUNDCARD_AIF, 0x10),
+};
+
+#define LN_GROUP_PIN(REV, ID) { \
+ .name = lochnagar##REV##_##ID##_pin.name, \
+ .type = LN_FTYPE_PIN, \
+ .pins = &lochnagar##REV##_pins[LOCHNAGAR##REV##_PIN_##ID].number, \
+ .npins = 1, \
+ .priv = &lochnagar##REV##_pins[LOCHNAGAR##REV##_PIN_##ID], \
+}
+
+#define LN_GROUP_AIF(REV, ID) { \
+ .name = lochnagar##REV##_##ID##_aif.name, \
+ .type = LN_FTYPE_AIF, \
+ .pins = lochnagar##REV##_##ID##_aif.pins, \
+ .npins = ARRAY_SIZE(lochnagar##REV##_##ID##_aif.pins), \
+ .priv = &lochnagar##REV##_##ID##_aif, \
+}
+
+#define LN1_GROUP_PIN(ID) LN_GROUP_PIN(1, ID)
+#define LN2_GROUP_PIN(ID) LN_GROUP_PIN(2, ID)
+
+#define LN1_GROUP_AIF(ID) LN_GROUP_AIF(1, ID)
+#define LN2_GROUP_AIF(ID) LN_GROUP_AIF(2, ID)
+
+#define LN2_GROUP_GAI(ID) \
+ LN2_GROUP_AIF(ID), \
+ LN2_GROUP_PIN(ID##_BCLK), LN2_GROUP_PIN(ID##_LRCLK), \
+ LN2_GROUP_PIN(ID##_RXDAT), LN2_GROUP_PIN(ID##_TXDAT)
+
+struct lochnagar_group {
+ const char * const name;
+
+ enum lochnagar_func_type type;
+
+ const unsigned int *pins;
+ unsigned int npins;
+
+ const void *priv;
+};
+
+static const struct lochnagar_group lochnagar1_groups[] = {
+ LN1_GROUP_PIN(GF_GPIO2), LN1_GROUP_PIN(GF_GPIO3),
+ LN1_GROUP_PIN(GF_GPIO7),
+ LN1_GROUP_PIN(LED1), LN1_GROUP_PIN(LED2),
+ LN1_GROUP_AIF(CDC_AIF1), LN1_GROUP_AIF(CDC_AIF2),
+ LN1_GROUP_AIF(CDC_AIF3),
+ LN1_GROUP_AIF(DSP_AIF1), LN1_GROUP_AIF(DSP_AIF2),
+ LN1_GROUP_AIF(PSIA1), LN1_GROUP_AIF(PSIA2),
+ LN1_GROUP_AIF(GF_AIF1), LN1_GROUP_AIF(GF_AIF2),
+ LN1_GROUP_AIF(GF_AIF3), LN1_GROUP_AIF(GF_AIF4),
+ LN1_GROUP_AIF(SPDIF_AIF),
+};
+
+static const struct lochnagar_group lochnagar2_groups[] = {
+ LN2_GROUP_PIN(FPGA_GPIO1), LN2_GROUP_PIN(FPGA_GPIO2),
+ LN2_GROUP_PIN(FPGA_GPIO3), LN2_GROUP_PIN(FPGA_GPIO4),
+ LN2_GROUP_PIN(FPGA_GPIO5), LN2_GROUP_PIN(FPGA_GPIO6),
+ LN2_GROUP_PIN(CDC_GPIO1), LN2_GROUP_PIN(CDC_GPIO2),
+ LN2_GROUP_PIN(CDC_GPIO3), LN2_GROUP_PIN(CDC_GPIO4),
+ LN2_GROUP_PIN(CDC_GPIO5), LN2_GROUP_PIN(CDC_GPIO6),
+ LN2_GROUP_PIN(CDC_GPIO7), LN2_GROUP_PIN(CDC_GPIO8),
+ LN2_GROUP_PIN(DSP_GPIO1), LN2_GROUP_PIN(DSP_GPIO2),
+ LN2_GROUP_PIN(DSP_GPIO3), LN2_GROUP_PIN(DSP_GPIO4),
+ LN2_GROUP_PIN(DSP_GPIO5), LN2_GROUP_PIN(DSP_GPIO6),
+ LN2_GROUP_PIN(DSP_GPIO20),
+ LN2_GROUP_PIN(GF_GPIO1),
+ LN2_GROUP_PIN(GF_GPIO2), LN2_GROUP_PIN(GF_GPIO5),
+ LN2_GROUP_PIN(GF_GPIO3), LN2_GROUP_PIN(GF_GPIO7),
+ LN2_GROUP_PIN(DSP_UART1_RX), LN2_GROUP_PIN(DSP_UART1_TX),
+ LN2_GROUP_PIN(DSP_UART2_RX), LN2_GROUP_PIN(DSP_UART2_TX),
+ LN2_GROUP_PIN(GF_UART2_RX), LN2_GROUP_PIN(GF_UART2_TX),
+ LN2_GROUP_PIN(USB_UART_RX),
+ LN2_GROUP_PIN(CDC_PDMCLK1), LN2_GROUP_PIN(CDC_PDMDAT1),
+ LN2_GROUP_PIN(CDC_PDMCLK2), LN2_GROUP_PIN(CDC_PDMDAT2),
+ LN2_GROUP_PIN(CDC_DMICCLK1), LN2_GROUP_PIN(CDC_DMICDAT1),
+ LN2_GROUP_PIN(CDC_DMICCLK2), LN2_GROUP_PIN(CDC_DMICDAT2),
+ LN2_GROUP_PIN(CDC_DMICCLK3), LN2_GROUP_PIN(CDC_DMICDAT3),
+ LN2_GROUP_PIN(CDC_DMICCLK4), LN2_GROUP_PIN(CDC_DMICDAT4),
+ LN2_GROUP_PIN(DSP_DMICCLK1), LN2_GROUP_PIN(DSP_DMICDAT1),
+ LN2_GROUP_PIN(DSP_DMICCLK2), LN2_GROUP_PIN(DSP_DMICDAT2),
+ LN2_GROUP_PIN(I2C2_SCL), LN2_GROUP_PIN(I2C2_SDA),
+ LN2_GROUP_PIN(I2C3_SCL), LN2_GROUP_PIN(I2C3_SDA),
+ LN2_GROUP_PIN(I2C4_SCL), LN2_GROUP_PIN(I2C4_SDA),
+ LN2_GROUP_PIN(DSP_STANDBY),
+ LN2_GROUP_PIN(CDC_MCLK1), LN2_GROUP_PIN(CDC_MCLK2),
+ LN2_GROUP_PIN(DSP_CLKIN),
+ LN2_GROUP_PIN(PSIA1_MCLK), LN2_GROUP_PIN(PSIA2_MCLK),
+ LN2_GROUP_GAI(CDC_AIF1), LN2_GROUP_GAI(CDC_AIF2),
+ LN2_GROUP_GAI(CDC_AIF3),
+ LN2_GROUP_GAI(DSP_AIF1), LN2_GROUP_GAI(DSP_AIF2),
+ LN2_GROUP_GAI(PSIA1), LN2_GROUP_GAI(PSIA2),
+ LN2_GROUP_GAI(GF_AIF1), LN2_GROUP_GAI(GF_AIF2),
+ LN2_GROUP_GAI(GF_AIF3), LN2_GROUP_GAI(GF_AIF4),
+ LN2_GROUP_AIF(SPDIF_AIF),
+ LN2_GROUP_AIF(USB_AIF1), LN2_GROUP_AIF(USB_AIF2),
+ LN2_GROUP_AIF(ADAT_AIF),
+ LN2_GROUP_AIF(SOUNDCARD_AIF),
+};
+
+struct lochnagar_func_groups {
+ const char **groups;
+ unsigned int ngroups;
+};
+
+struct lochnagar_pin_priv {
+ struct lochnagar *lochnagar;
+ struct device *dev;
+
+ const struct lochnagar_func *funcs;
+ unsigned int nfuncs;
+
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+
+ const struct lochnagar_group *groups;
+ unsigned int ngroups;
+
+ struct lochnagar_func_groups func_groups[LN_FTYPE_COUNT];
+
+ struct gpio_chip gpio_chip;
+};
+
+static int lochnagar_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct lochnagar_pin_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->ngroups;
+}
+
+static const char *lochnagar_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group_idx)
+{
+ struct lochnagar_pin_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->groups[group_idx].name;
+}
+
+static int lochnagar_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group_idx,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct lochnagar_pin_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = priv->groups[group_idx].pins;
+ *num_pins = priv->groups[group_idx].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops lochnagar_pin_group_ops = {
+ .get_groups_count = lochnagar_get_groups_count,
+ .get_group_name = lochnagar_get_group_name,
+ .get_group_pins = lochnagar_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int lochnagar_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct lochnagar_pin_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->nfuncs;
+}
+
+static const char *lochnagar_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned int func_idx)
+{
+ struct lochnagar_pin_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->funcs[func_idx].name;
+}
+
+static int lochnagar_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned int func_idx,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct lochnagar_pin_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ int func_type;
+
+ func_type = priv->funcs[func_idx].type;
+
+ *groups = priv->func_groups[func_type].groups;
+ *num_groups = priv->func_groups[func_type].ngroups;
+
+ return 0;
+}
+
+static int lochnagar2_get_gpio_chan(struct lochnagar_pin_priv *priv,
+ unsigned int op)
+{
+ struct regmap *regmap = priv->lochnagar->regmap;
+ unsigned int val;
+ int free = -1;
+ int i, ret;
+
+ for (i = 0; i < LN2_NUM_GPIO_CHANNELS; i++) {
+ ret = regmap_read(regmap, LOCHNAGAR2_GPIO_CHANNEL1 + i, &val);
+ if (ret)
+ return ret;
+
+ val &= LOCHNAGAR2_GPIO_CHANNEL_SRC_MASK;
+
+ if (val == op)
+ return i + 1;
+
+ if (free < 0 && !val)
+ free = i;
+ }
+
+ if (free >= 0) {
+ ret = regmap_update_bits(regmap,
+ LOCHNAGAR2_GPIO_CHANNEL1 + free,
+ LOCHNAGAR2_GPIO_CHANNEL_SRC_MASK, op);
+ if (ret)
+ return ret;
+
+ free++;
+
+ dev_dbg(priv->dev, "Set channel %d to 0x%x\n", free, op);
+
+ return free;
+ }
+
+ return -ENOSPC;
+}
+
+static int lochnagar_pin_set_mux(struct lochnagar_pin_priv *priv,
+ const struct lochnagar_pin *pin,
+ unsigned int op)
+{
+ int ret;
+
+ switch (priv->lochnagar->type) {
+ case LOCHNAGAR1:
+ break;
+ default:
+ ret = lochnagar2_get_gpio_chan(priv, op);
+ if (ret < 0) {
+ dev_err(priv->dev, "Failed to get channel for %s: %d\n",
+ pin->name, ret);
+ return ret;
+ }
+
+ op = ret;
+ break;
+ }
+
+ dev_dbg(priv->dev, "Set pin %s to 0x%x\n", pin->name, op);
+
+ ret = regmap_write(priv->lochnagar->regmap, pin->reg, op);
+ if (ret)
+ dev_err(priv->dev, "Failed to set %s mux: %d\n",
+ pin->name, ret);
+
+ return 0;
+}
+
+static int lochnagar_aif_set_mux(struct lochnagar_pin_priv *priv,
+ const struct lochnagar_group *group,
+ unsigned int op)
+{
+ struct regmap *regmap = priv->lochnagar->regmap;
+ const struct lochnagar_aif *aif = group->priv;
+ const struct lochnagar_pin *pin;
+ int i, ret;
+
+ ret = regmap_update_bits(regmap, aif->src_reg, aif->src_mask, op);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set %s source: %d\n",
+ group->name, ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(regmap, aif->ctrl_reg,
+ aif->ena_mask, aif->ena_mask);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set %s enable: %d\n",
+ group->name, ret);
+ return ret;
+ }
+
+ for (i = 0; i < group->npins; i++) {
+ pin = priv->pins[group->pins[i]].drv_data;
+
+ if (pin->type != LN_PTYPE_MUX)
+ continue;
+
+ dev_dbg(priv->dev, "Set pin %s to AIF\n", pin->name);
+
+ ret = regmap_update_bits(regmap, pin->reg,
+ LOCHNAGAR2_GPIO_SRC_MASK,
+ LN2_OP_AIF);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set %s to AIF: %d\n",
+ pin->name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int lochnagar_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_idx, unsigned int group_idx)
+{
+ struct lochnagar_pin_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ const struct lochnagar_func *func = &priv->funcs[func_idx];
+ const struct lochnagar_group *group = &priv->groups[group_idx];
+ const struct lochnagar_pin *pin;
+
+ switch (func->type) {
+ case LN_FTYPE_AIF:
+ dev_dbg(priv->dev, "Set group %s to %s\n",
+ group->name, func->name);
+
+ return lochnagar_aif_set_mux(priv, group, func->op);
+ case LN_FTYPE_PIN:
+ pin = priv->pins[*group->pins].drv_data;
+
+ dev_dbg(priv->dev, "Set pin %s to %s\n", pin->name, func->name);
+
+ return lochnagar_pin_set_mux(priv, pin, func->op);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lochnagar_gpio_request(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct lochnagar_pin_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ struct lochnagar *lochnagar = priv->lochnagar;
+ const struct lochnagar_pin *pin = priv->pins[offset].drv_data;
+ int ret;
+
+ dev_dbg(priv->dev, "Requesting GPIO %s\n", pin->name);
+
+ if (lochnagar->type == LOCHNAGAR1 || pin->type != LN_PTYPE_MUX)
+ return 0;
+
+ ret = lochnagar2_get_gpio_chan(priv, LN2_OP_GPIO);
+ if (ret < 0) {
+ dev_err(priv->dev, "Failed to get low channel: %d\n", ret);
+ return ret;
+ }
+
+ ret = lochnagar2_get_gpio_chan(priv, LN2_OP_GPIO | 0x1);
+ if (ret < 0) {
+ dev_err(priv->dev, "Failed to get high channel: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lochnagar_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset,
+ bool input)
+{
+ /* The GPIOs only support output */
+ if (input)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct pinmux_ops lochnagar_pin_mux_ops = {
+ .get_functions_count = lochnagar_get_funcs_count,
+ .get_function_name = lochnagar_get_func_name,
+ .get_function_groups = lochnagar_get_func_groups,
+ .set_mux = lochnagar_set_mux,
+
+ .gpio_request_enable = lochnagar_gpio_request,
+ .gpio_set_direction = lochnagar_gpio_set_direction,
+
+ .strict = true,
+};
+
+static int lochnagar_aif_set_master(struct lochnagar_pin_priv *priv,
+ unsigned int group_idx, bool master)
+{
+ struct regmap *regmap = priv->lochnagar->regmap;
+ const struct lochnagar_group *group = &priv->groups[group_idx];
+ const struct lochnagar_aif *aif = group->priv;
+ unsigned int val = 0;
+ int ret;
+
+ if (group->type != LN_FTYPE_AIF)
+ return -EINVAL;
+
+ if (!master)
+ val = aif->master_mask;
+
+ dev_dbg(priv->dev, "Set AIF %s to %s\n",
+ group->name, master ? "master" : "slave");
+
+ ret = regmap_update_bits(regmap, aif->ctrl_reg, aif->master_mask, val);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set %s mode: %d\n",
+ group->name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lochnagar_conf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group_idx,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct lochnagar_pin_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ int i, ret;
+
+ for (i = 0; i < num_configs; i++) {
+ unsigned int param = pinconf_to_config_param(*configs);
+
+ switch (param) {
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ ret = lochnagar_aif_set_master(priv, group_idx, true);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ ret = lochnagar_aif_set_master(priv, group_idx, false);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ configs++;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops lochnagar_pin_conf_ops = {
+ .pin_config_group_set = lochnagar_conf_group_set,
+};
+
+static const struct pinctrl_desc lochnagar_pin_desc = {
+ .name = "lochnagar-pinctrl",
+ .owner = THIS_MODULE,
+
+ .pctlops = &lochnagar_pin_group_ops,
+ .pmxops = &lochnagar_pin_mux_ops,
+ .confops = &lochnagar_pin_conf_ops,
+};
+
+static void lochnagar_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct lochnagar_pin_priv *priv = gpiochip_get_data(chip);
+ struct lochnagar *lochnagar = priv->lochnagar;
+ const struct lochnagar_pin *pin = priv->pins[offset].drv_data;
+ int ret;
+
+ value = !!value;
+
+ dev_dbg(priv->dev, "Set GPIO %s to %s\n",
+ pin->name, value ? "high" : "low");
+
+ switch (pin->type) {
+ case LN_PTYPE_MUX:
+ value |= LN2_OP_GPIO;
+
+ ret = lochnagar_pin_set_mux(priv, pin, value);
+ break;
+ case LN_PTYPE_GPIO:
+ if (pin->invert)
+ value = !value;
+
+ ret = regmap_update_bits(lochnagar->regmap, pin->reg,
+ BIT(pin->shift), value << pin->shift);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0)
+ dev_err(chip->parent, "Failed to set %s value: %d\n",
+ pin->name, ret);
+}
+
+static int lochnagar_gpio_direction_out(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ lochnagar_gpio_set(chip, offset, value);
+
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static int lochnagar_fill_func_groups(struct lochnagar_pin_priv *priv)
+{
+ struct lochnagar_func_groups *funcs;
+ int i;
+
+ for (i = 0; i < priv->ngroups; i++)
+ priv->func_groups[priv->groups[i].type].ngroups++;
+
+ for (i = 0; i < LN_FTYPE_COUNT; i++) {
+ funcs = &priv->func_groups[i];
+
+ if (!funcs->ngroups)
+ continue;
+
+ funcs->groups = devm_kcalloc(priv->dev, funcs->ngroups,
+ sizeof(*funcs->groups),
+ GFP_KERNEL);
+ if (!funcs->groups)
+ return -ENOMEM;
+
+ funcs->ngroups = 0;
+ }
+
+ for (i = 0; i < priv->ngroups; i++) {
+ funcs = &priv->func_groups[priv->groups[i].type];
+
+ funcs->groups[funcs->ngroups++] = priv->groups[i].name;
+ }
+
+ return 0;
+}
+
+static int lochnagar_pin_probe(struct platform_device *pdev)
+{
+ struct lochnagar *lochnagar = dev_get_drvdata(pdev->dev.parent);
+ struct lochnagar_pin_priv *priv;
+ struct pinctrl_desc *desc;
+ struct pinctrl_dev *pctl;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->lochnagar = lochnagar;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ *desc = lochnagar_pin_desc;
+
+ priv->gpio_chip.label = dev_name(dev);
+ priv->gpio_chip.request = gpiochip_generic_request;
+ priv->gpio_chip.free = gpiochip_generic_free;
+ priv->gpio_chip.direction_output = lochnagar_gpio_direction_out;
+ priv->gpio_chip.set = lochnagar_gpio_set;
+ priv->gpio_chip.can_sleep = true;
+ priv->gpio_chip.parent = dev;
+ priv->gpio_chip.base = -1;
+#ifdef CONFIG_OF_GPIO
+ priv->gpio_chip.of_node = dev->of_node;
+#endif
+
+ switch (lochnagar->type) {
+ case LOCHNAGAR1:
+ priv->funcs = lochnagar1_funcs;
+ priv->nfuncs = ARRAY_SIZE(lochnagar1_funcs);
+ priv->pins = lochnagar1_pins;
+ priv->npins = ARRAY_SIZE(lochnagar1_pins);
+ priv->groups = lochnagar1_groups;
+ priv->ngroups = ARRAY_SIZE(lochnagar1_groups);
+
+ priv->gpio_chip.ngpio = LOCHNAGAR1_PIN_NUM_GPIOS;
+ break;
+ case LOCHNAGAR2:
+ priv->funcs = lochnagar2_funcs;
+ priv->nfuncs = ARRAY_SIZE(lochnagar2_funcs);
+ priv->pins = lochnagar2_pins;
+ priv->npins = ARRAY_SIZE(lochnagar2_pins);
+ priv->groups = lochnagar2_groups;
+ priv->ngroups = ARRAY_SIZE(lochnagar2_groups);
+
+ priv->gpio_chip.ngpio = LOCHNAGAR2_PIN_NUM_GPIOS;
+ break;
+ default:
+ dev_err(dev, "Unknown Lochnagar type: %d\n", lochnagar->type);
+ return -EINVAL;
+ }
+
+ ret = lochnagar_fill_func_groups(priv);
+ if (ret < 0)
+ return ret;
+
+ desc->pins = priv->pins;
+ desc->npins = priv->npins;
+
+ pctl = devm_pinctrl_register(dev, desc, priv);
+ if (IS_ERR(pctl)) {
+ ret = PTR_ERR(pctl);
+ dev_err(priv->dev, "Failed to register pinctrl: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register gpiochip: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id lochnagar_of_match[] = {
+ { .compatible = "cirrus,lochnagar-pinctrl" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lochnagar_of_match);
+
+static struct platform_driver lochnagar_pin_driver = {
+ .driver = {
+ .name = "lochnagar-pinctrl",
+ .of_match_table = of_match_ptr(lochnagar_of_match),
+ },
+
+ .probe = lochnagar_pin_probe,
+};
+module_platform_driver(lochnagar_pin_driver);
+
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Pinctrl driver for Cirrus Logic Lochnagar Board");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 188001beb298..83ff9532bae6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -449,7 +449,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
}
} else {
pin_reg = &ipctl->pin_regs[pin_id];
- if (!pin_reg || pin_reg->conf_reg == -1) {
+ if (pin_reg->conf_reg == -1) {
seq_puts(s, "N/A");
return;
}
@@ -785,7 +785,6 @@ int imx_pinctrl_probe(struct platform_device *pdev,
struct pinctrl_desc *imx_pinctrl_desc;
struct device_node *np;
struct imx_pinctrl *ipctl;
- struct resource *res;
struct regmap *gpr;
int ret, i;
@@ -817,8 +816,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
ipctl->pin_regs[i].conf_reg = -1;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ipctl->base = devm_ioremap_resource(&pdev->dev, res);
+ ipctl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ipctl->base))
return PTR_ERR(ipctl->base);
@@ -887,3 +885,22 @@ free:
return ret;
}
+
+static int __maybe_unused imx_pinctrl_suspend(struct device *dev)
+{
+ struct imx_pinctrl *ipctl = dev_get_drvdata(dev);
+
+ return pinctrl_force_sleep(ipctl->pctl);
+}
+
+static int __maybe_unused imx_pinctrl_resume(struct device *dev)
+{
+ struct imx_pinctrl *ipctl = dev_get_drvdata(dev);
+
+ return pinctrl_force_default(ipctl->pctl);
+}
+
+const struct dev_pm_ops imx_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(imx_pinctrl_suspend,
+ imx_pinctrl_resume)
+};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 98a4889af4ef..333d32b947b1 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -17,6 +17,7 @@
struct platform_device;
extern struct pinmux_ops imx_pmx_ops;
+extern const struct dev_pm_ops imx_pinctrl_pm_ops;
/**
* struct imx_pin_mmio - MMIO pin configurations
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mq.c b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
index 8d39af541d5f..50aa1c00c4b2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mq.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
@@ -339,6 +339,7 @@ static struct platform_driver imx8mq_pinctrl_driver = {
.driver = {
.name = "imx8mq-pinctrl",
.of_match_table = of_match_ptr(imx8mq_pinctrl_of_match),
+ .pm = &imx_pinctrl_pm_ops,
.suppress_bind_attrs = true,
},
.probe = imx8mq_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-scu.c b/drivers/pinctrl/freescale/pinctrl-scu.c
index 83e69c0f39e6..73bf1d9f9cc6 100644
--- a/drivers/pinctrl/freescale/pinctrl-scu.c
+++ b/drivers/pinctrl/freescale/pinctrl-scu.c
@@ -35,7 +35,7 @@ struct imx_sc_msg_resp_pad_get {
u32 val;
} __packed;
-struct imx_sc_ipc *pinctrl_ipc_handle;
+static struct imx_sc_ipc *pinctrl_ipc_handle;
int imx_pinctrl_sc_ipc_init(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 241384ead4ed..18d9ad504194 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1710,6 +1710,8 @@ static int byt_gpio_probe(struct byt_gpio *vg)
#ifdef CONFIG_PM_SLEEP
vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
sizeof(*vg->saved_context), GFP_KERNEL);
+ if (!vg->saved_context)
+ return -ENOMEM;
#endif
ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg);
if (ret) {
diff --git a/drivers/pinctrl/intel/pinctrl-cedarfork.c b/drivers/pinctrl/intel/pinctrl-cedarfork.c
index b7d632f1dbf6..aa6f9040d3d8 100644
--- a/drivers/pinctrl/intel/pinctrl-cedarfork.c
+++ b/drivers/pinctrl/intel/pinctrl-cedarfork.c
@@ -91,13 +91,13 @@ static const struct pinctrl_pin_desc cdf_pins[] = {
PINCTRL_PIN(43, "MEMTRIP_N"),
PINCTRL_PIN(44, "UART0_RXD"),
PINCTRL_PIN(45, "UART0_TXD"),
- PINCTRL_PIN(46, "UART1_RXD"),
- PINCTRL_PIN(47, "UART1_TXD"),
+ PINCTRL_PIN(46, "GBE_UART_RXD"),
+ PINCTRL_PIN(47, "GBE_UART_TXD"),
/* WEST01 */
PINCTRL_PIN(48, "GBE_GPIO13"),
PINCTRL_PIN(49, "AUX_PWR"),
- PINCTRL_PIN(50, "CPU_GP_2"),
- PINCTRL_PIN(51, "CPU_GP_3"),
+ PINCTRL_PIN(50, "UART0_RTS"),
+ PINCTRL_PIN(51, "UART0_CTS"),
PINCTRL_PIN(52, "FAN_PWM_0"),
PINCTRL_PIN(53, "FAN_PWM_1"),
PINCTRL_PIN(54, "FAN_PWM_2"),
@@ -201,8 +201,8 @@ static const struct pinctrl_pin_desc cdf_pins[] = {
/* WESTF */
PINCTRL_PIN(145, "NAC_RMII_CLK"),
PINCTRL_PIN(146, "NAC_RGMII_CLK"),
- PINCTRL_PIN(147, "NAC_SPARE0"),
- PINCTRL_PIN(148, "NAC_SPARE1"),
+ PINCTRL_PIN(147, "NAC_GBE_SMB_CLK_TX_N2S"),
+ PINCTRL_PIN(148, "NAC_GBE_SMB_DATA_TX_N2S"),
PINCTRL_PIN(149, "NAC_SPARE2"),
PINCTRL_PIN(150, "NAC_INIT_SX_WAKE_N"),
PINCTRL_PIN(151, "NAC_GBE_GPIO0_S2N"),
@@ -219,8 +219,8 @@ static const struct pinctrl_pin_desc cdf_pins[] = {
PINCTRL_PIN(162, "NAC_NCSI_TXD1"),
PINCTRL_PIN(163, "NAC_NCSI_ARB_OUT"),
PINCTRL_PIN(164, "NAC_NCSI_OE_N"),
- PINCTRL_PIN(165, "NAC_GBE_SMB_CLK"),
- PINCTRL_PIN(166, "NAC_GBE_SMB_DATA"),
+ PINCTRL_PIN(165, "NAC_GBE_SMB_CLK_RX_S2N"),
+ PINCTRL_PIN(166, "NAC_GBE_SMB_DATA_RX_S2N"),
PINCTRL_PIN(167, "NAC_GBE_SMB_ALRT_N"),
/* EAST2 */
PINCTRL_PIN(168, "USB_OC0_N"),
@@ -232,7 +232,7 @@ static const struct pinctrl_pin_desc cdf_pins[] = {
PINCTRL_PIN(174, "GBE_GPIO5"),
PINCTRL_PIN(175, "GBE_GPIO6"),
PINCTRL_PIN(176, "GBE_GPIO7"),
- PINCTRL_PIN(177, "GBE_GPIO8"),
+ PINCTRL_PIN(177, "SPI_TPM_CS_N"),
PINCTRL_PIN(178, "GBE_GPIO9"),
PINCTRL_PIN(179, "GBE_GPIO10"),
PINCTRL_PIN(180, "GBE_GPIO11"),
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 3b1818184207..d7acbb79cdf7 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -81,6 +81,7 @@ struct intel_pad_context {
struct intel_community_context {
u32 *intmask;
+ u32 *hostown;
};
struct intel_pinctrl_context {
@@ -1284,7 +1285,7 @@ static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
for (i = 0; i < pctrl->ncommunities; i++) {
struct intel_community *community = &pctrl->communities[i];
- u32 *intmask;
+ u32 *intmask, *hostown;
intmask = devm_kcalloc(pctrl->dev, community->ngpps,
sizeof(*intmask), GFP_KERNEL);
@@ -1292,6 +1293,13 @@ static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
return -ENOMEM;
communities[i].intmask = intmask;
+
+ hostown = devm_kcalloc(pctrl->dev, community->ngpps,
+ sizeof(*hostown), GFP_KERNEL);
+ if (!hostown)
+ return -ENOMEM;
+
+ communities[i].hostown = hostown;
}
pctrl->context.pads = pads;
@@ -1466,7 +1474,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
return false;
}
-int intel_pinctrl_suspend(struct device *dev)
+int intel_pinctrl_suspend_noirq(struct device *dev)
{
struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
struct intel_community_context *communities;
@@ -1501,11 +1509,15 @@ int intel_pinctrl_suspend(struct device *dev)
base = community->regs + community->ie_offset;
for (gpp = 0; gpp < community->ngpps; gpp++)
communities[i].intmask[gpp] = readl(base + gpp * 4);
+
+ base = community->regs + community->hostown_offset;
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ communities[i].hostown[gpp] = readl(base + gpp * 4);
}
return 0;
}
-EXPORT_SYMBOL_GPL(intel_pinctrl_suspend);
+EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq);
static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
{
@@ -1527,7 +1539,32 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
}
}
-int intel_pinctrl_resume(struct device *dev)
+static u32
+intel_gpio_is_requested(struct gpio_chip *chip, int base, unsigned int size)
+{
+ u32 requested = 0;
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (gpiochip_is_requested(chip, base + i))
+ requested |= BIT(i);
+
+ return requested;
+}
+
+static u32
+intel_gpio_update_pad_mode(void __iomem *hostown, u32 mask, u32 value)
+{
+ u32 curr, updated;
+
+ curr = readl(hostown);
+ updated = (curr & ~mask) | (value & mask);
+ writel(updated, hostown);
+
+ return curr;
+}
+
+int intel_pinctrl_resume_noirq(struct device *dev)
{
struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
const struct intel_community_context *communities;
@@ -1585,11 +1622,30 @@ int intel_pinctrl_resume(struct device *dev)
dev_dbg(dev, "restored mask %d/%u %#08x\n", i, gpp,
readl(base + gpp * 4));
}
+
+ base = community->regs + community->hostown_offset;
+ for (gpp = 0; gpp < community->ngpps; gpp++) {
+ const struct intel_padgroup *padgrp = &community->gpps[gpp];
+ u32 requested = 0, value = 0;
+ u32 saved = communities[i].hostown[gpp];
+
+ if (padgrp->gpio_base < 0)
+ continue;
+
+ requested = intel_gpio_is_requested(&pctrl->chip,
+ padgrp->gpio_base, padgrp->size);
+ value = intel_gpio_update_pad_mode(base + gpp * 4,
+ requested, saved);
+ if ((value ^ saved) & requested) {
+ dev_warn(dev, "restore hostown %d/%u %#8x->%#8x\n",
+ i, gpp, value, saved);
+ }
+ }
}
return 0;
}
-EXPORT_SYMBOL_GPL(intel_pinctrl_resume);
+EXPORT_SYMBOL_GPL(intel_pinctrl_resume_noirq);
#endif
MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index b8a07d37d18f..a8e958f1dcf5 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -177,13 +177,14 @@ int intel_pinctrl_probe_by_hid(struct platform_device *pdev);
int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
#ifdef CONFIG_PM_SLEEP
-int intel_pinctrl_suspend(struct device *dev);
-int intel_pinctrl_resume(struct device *dev);
+int intel_pinctrl_suspend_noirq(struct device *dev);
+int intel_pinctrl_resume_noirq(struct device *dev);
#endif
-#define INTEL_PINCTRL_PM_OPS(_name) \
-const struct dev_pm_ops _name = { \
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend, intel_pinctrl_resume) \
+#define INTEL_PINCTRL_PM_OPS(_name) \
+const struct dev_pm_ops _name = { \
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend_noirq, \
+ intel_pinctrl_resume_noirq) \
}
#endif /* PINCTRL_INTEL_H */
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index a005cbccb4f7..26ed5dca1460 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -113,6 +113,13 @@ config PINCTRL_MT8183
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT8516
+ bool "Mediatek MT8516 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK
+
# For PMIC
config PINCTRL_MT6397
bool "Mediatek MT6397 pin control"
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 4b4e2eaf6f2d..a74325abd877 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -17,4 +17,5 @@ obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
+obj-$(CONFIG_PINCTRL_MT8516) += pinctrl-mt8516.o
obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8183.c b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
index 6262fd3678ea..2c7409ed16fa 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8183.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
@@ -472,6 +472,51 @@ static const struct mtk_pin_field_calc mt8183_pin_r1_range[] = {
PIN_FIELD_BASE(133, 133, 8, 0x0D0, 0x10, 13, 1),
};
+static const struct mtk_pin_field_calc mt8183_pin_e1e0en_range[] = {
+ PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 20, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 15, 1),
+ PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 12, 1),
+ PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 7, 1),
+ PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 12, 1),
+ PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 9, 1),
+ PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 19, 1),
+ PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 22, 1),
+ PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 24, 1),
+ PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 14, 1),
+ PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 27, 1),
+ PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 17, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_e0_range[] = {
+ PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 21, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 16, 1),
+ PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 13, 1),
+ PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 8, 1),
+ PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 13, 1),
+ PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 10, 1),
+ PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 20, 1),
+ PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 23, 1),
+ PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 25, 1),
+ PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 15, 1),
+ PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 28, 1),
+ PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 18, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_e1_range[] = {
+ PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 22, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 17, 1),
+ PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 14, 1),
+ PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 9, 1),
+ PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 14, 1),
+ PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 11, 1),
+ PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 21, 1),
+ PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 24, 1),
+ PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 26, 1),
+ PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 16, 1),
+ PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 29, 1),
+ PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 19, 1),
+};
+
static const struct mtk_pin_reg_calc mt8183_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8183_pin_mode_range),
[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8183_pin_dir_range),
@@ -485,6 +530,9 @@ static const struct mtk_pin_reg_calc mt8183_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8183_pin_pupd_range),
[PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8183_pin_r0_range),
[PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8183_pin_r1_range),
+ [PINCTRL_PIN_REG_DRV_EN] = MTK_RANGE(mt8183_pin_e1e0en_range),
+ [PINCTRL_PIN_REG_DRV_E0] = MTK_RANGE(mt8183_pin_e0_range),
+ [PINCTRL_PIN_REG_DRV_E1] = MTK_RANGE(mt8183_pin_e1_range),
};
static const char * const mt8183_pinctrl_register_base_names[] = {
@@ -517,6 +565,8 @@ static const struct mtk_pin_soc mt8183_data = {
.drive_get = mtk_pinconf_drive_get_rev1,
.adv_pull_get = mtk_pinconf_adv_pull_get,
.adv_pull_set = mtk_pinconf_adv_pull_set,
+ .adv_drive_get = mtk_pinconf_adv_drive_get,
+ .adv_drive_set = mtk_pinconf_adv_drive_set,
};
static const struct of_device_id mt8183_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8516.c b/drivers/pinctrl/mediatek/pinctrl-mt8516.c
new file mode 100644
index 000000000000..b375426aa61e
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8516.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Min.Guo <min.guo@mediatek.com>
+ */
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt8516.h"
+
+static const struct mtk_drv_group_desc mt8516_drv_grp[] = {
+ /* 0E4E8SR 4/8/12/16 */
+ MTK_DRV_GRP(4, 16, 1, 2, 4),
+ /* 0E2E4SR 2/4/6/8 */
+ MTK_DRV_GRP(2, 8, 1, 2, 2),
+ /* E8E4E2 2/4/6/8/10/12/14/16 */
+ MTK_DRV_GRP(2, 16, 0, 2, 2)
+};
+
+static const struct mtk_pin_drv_grp mt8516_pin_drv[] = {
+ MTK_PIN_DRV_GRP(0, 0xd00, 0, 0),
+ MTK_PIN_DRV_GRP(1, 0xd00, 0, 0),
+ MTK_PIN_DRV_GRP(2, 0xd00, 0, 0),
+ MTK_PIN_DRV_GRP(3, 0xd00, 0, 0),
+ MTK_PIN_DRV_GRP(4, 0xd00, 0, 0),
+
+ MTK_PIN_DRV_GRP(5, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(6, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(7, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(8, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(9, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(10, 0xd00, 4, 0),
+
+ MTK_PIN_DRV_GRP(11, 0xd00, 8, 0),
+ MTK_PIN_DRV_GRP(12, 0xd00, 8, 0),
+ MTK_PIN_DRV_GRP(13, 0xd00, 8, 0),
+
+ MTK_PIN_DRV_GRP(14, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(15, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(16, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(17, 0xd00, 12, 2),
+
+ MTK_PIN_DRV_GRP(18, 0xd10, 0, 0),
+ MTK_PIN_DRV_GRP(19, 0xd10, 0, 0),
+ MTK_PIN_DRV_GRP(20, 0xd10, 0, 0),
+
+ MTK_PIN_DRV_GRP(21, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(22, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(23, 0xd00, 12, 2),
+
+ MTK_PIN_DRV_GRP(24, 0xd00, 8, 0),
+ MTK_PIN_DRV_GRP(25, 0xd00, 8, 0),
+
+ MTK_PIN_DRV_GRP(26, 0xd10, 4, 1),
+ MTK_PIN_DRV_GRP(27, 0xd10, 4, 1),
+ MTK_PIN_DRV_GRP(28, 0xd10, 4, 1),
+ MTK_PIN_DRV_GRP(29, 0xd10, 4, 1),
+ MTK_PIN_DRV_GRP(30, 0xd10, 4, 1),
+
+ MTK_PIN_DRV_GRP(31, 0xd10, 8, 1),
+ MTK_PIN_DRV_GRP(32, 0xd10, 8, 1),
+ MTK_PIN_DRV_GRP(33, 0xd10, 8, 1),
+
+ MTK_PIN_DRV_GRP(34, 0xd10, 12, 0),
+ MTK_PIN_DRV_GRP(35, 0xd10, 12, 0),
+
+ MTK_PIN_DRV_GRP(36, 0xd20, 0, 0),
+ MTK_PIN_DRV_GRP(37, 0xd20, 0, 0),
+ MTK_PIN_DRV_GRP(38, 0xd20, 0, 0),
+ MTK_PIN_DRV_GRP(39, 0xd20, 0, 0),
+
+ MTK_PIN_DRV_GRP(40, 0xd20, 4, 1),
+
+ MTK_PIN_DRV_GRP(41, 0xd20, 8, 1),
+ MTK_PIN_DRV_GRP(42, 0xd20, 8, 1),
+ MTK_PIN_DRV_GRP(43, 0xd20, 8, 1),
+
+ MTK_PIN_DRV_GRP(44, 0xd20, 12, 1),
+ MTK_PIN_DRV_GRP(45, 0xd20, 12, 1),
+ MTK_PIN_DRV_GRP(46, 0xd20, 12, 1),
+ MTK_PIN_DRV_GRP(47, 0xd20, 12, 1),
+
+ MTK_PIN_DRV_GRP(48, 0xd30, 0, 1),
+ MTK_PIN_DRV_GRP(49, 0xd30, 0, 1),
+ MTK_PIN_DRV_GRP(50, 0xd30, 0, 1),
+ MTK_PIN_DRV_GRP(51, 0xd30, 0, 1),
+
+ MTK_PIN_DRV_GRP(54, 0xd30, 8, 1),
+
+ MTK_PIN_DRV_GRP(55, 0xd30, 12, 1),
+ MTK_PIN_DRV_GRP(56, 0xd30, 12, 1),
+ MTK_PIN_DRV_GRP(57, 0xd30, 12, 1),
+
+ MTK_PIN_DRV_GRP(62, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(63, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(64, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(65, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(66, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(67, 0xd40, 8, 1),
+
+ MTK_PIN_DRV_GRP(68, 0xd40, 12, 2),
+
+ MTK_PIN_DRV_GRP(69, 0xd50, 0, 2),
+
+ MTK_PIN_DRV_GRP(70, 0xd50, 4, 2),
+ MTK_PIN_DRV_GRP(71, 0xd50, 4, 2),
+ MTK_PIN_DRV_GRP(72, 0xd50, 4, 2),
+ MTK_PIN_DRV_GRP(73, 0xd50, 4, 2),
+
+ MTK_PIN_DRV_GRP(100, 0xd50, 8, 1),
+ MTK_PIN_DRV_GRP(101, 0xd50, 8, 1),
+ MTK_PIN_DRV_GRP(102, 0xd50, 8, 1),
+ MTK_PIN_DRV_GRP(103, 0xd50, 8, 1),
+
+ MTK_PIN_DRV_GRP(104, 0xd50, 12, 2),
+
+ MTK_PIN_DRV_GRP(105, 0xd60, 0, 2),
+
+ MTK_PIN_DRV_GRP(106, 0xd60, 4, 2),
+ MTK_PIN_DRV_GRP(107, 0xd60, 4, 2),
+ MTK_PIN_DRV_GRP(108, 0xd60, 4, 2),
+ MTK_PIN_DRV_GRP(109, 0xd60, 4, 2),
+
+ MTK_PIN_DRV_GRP(110, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(111, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(112, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(113, 0xd70, 0, 2),
+
+ MTK_PIN_DRV_GRP(114, 0xd70, 4, 2),
+
+ MTK_PIN_DRV_GRP(115, 0xd60, 12, 2),
+
+ MTK_PIN_DRV_GRP(116, 0xd60, 8, 2),
+
+ MTK_PIN_DRV_GRP(117, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(118, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(119, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(120, 0xd70, 0, 2),
+};
+
+static const struct mtk_pin_spec_pupd_set_samereg mt8516_spec_pupd[] = {
+ MTK_PIN_PUPD_SPEC_SR(14, 0xe50, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(15, 0xe60, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(16, 0xe60, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(17, 0xe60, 10, 9, 8),
+
+ MTK_PIN_PUPD_SPEC_SR(21, 0xe60, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(22, 0xe70, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(23, 0xe70, 6, 5, 4),
+
+ MTK_PIN_PUPD_SPEC_SR(40, 0xe80, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(41, 0xe80, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(42, 0xe90, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(43, 0xe90, 6, 5, 4),
+
+ MTK_PIN_PUPD_SPEC_SR(68, 0xe50, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(69, 0xe50, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(70, 0xe40, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(71, 0xe40, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(72, 0xe40, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(73, 0xe50, 2, 1, 0),
+
+ MTK_PIN_PUPD_SPEC_SR(104, 0xe40, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(105, 0xe30, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(106, 0xe20, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(107, 0xe30, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(108, 0xe30, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(109, 0xe30, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(110, 0xe10, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(111, 0xe10, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(112, 0xe10, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(113, 0xe10, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(114, 0xe20, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(115, 0xe20, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(116, 0xe20, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(117, 0xe00, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(118, 0xe00, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(119, 0xe00, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(120, 0xe00, 2, 1, 0),
+};
+
+static int mt8516_spec_pull_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, bool isup, unsigned int r1r0)
+{
+ return mtk_pctrl_spec_pull_set_samereg(regmap, mt8516_spec_pupd,
+ ARRAY_SIZE(mt8516_spec_pupd), pin, align, isup, r1r0);
+}
+
+static const struct mtk_pin_ies_smt_set mt8516_ies_set[] = {
+ MTK_PIN_IES_SMT_SPEC(0, 6, 0x900, 2),
+ MTK_PIN_IES_SMT_SPEC(7, 10, 0x900, 3),
+ MTK_PIN_IES_SMT_SPEC(11, 13, 0x900, 12),
+ MTK_PIN_IES_SMT_SPEC(14, 17, 0x900, 13),
+ MTK_PIN_IES_SMT_SPEC(18, 20, 0x910, 10),
+ MTK_PIN_IES_SMT_SPEC(21, 23, 0x900, 13),
+ MTK_PIN_IES_SMT_SPEC(24, 25, 0x900, 12),
+ MTK_PIN_IES_SMT_SPEC(26, 30, 0x900, 0),
+ MTK_PIN_IES_SMT_SPEC(31, 33, 0x900, 1),
+ MTK_PIN_IES_SMT_SPEC(34, 39, 0x900, 2),
+ MTK_PIN_IES_SMT_SPEC(40, 40, 0x910, 11),
+ MTK_PIN_IES_SMT_SPEC(41, 43, 0x900, 10),
+ MTK_PIN_IES_SMT_SPEC(44, 47, 0x900, 11),
+ MTK_PIN_IES_SMT_SPEC(48, 51, 0x900, 14),
+ MTK_PIN_IES_SMT_SPEC(52, 53, 0x910, 0),
+ MTK_PIN_IES_SMT_SPEC(54, 54, 0x910, 2),
+ MTK_PIN_IES_SMT_SPEC(55, 57, 0x910, 4),
+ MTK_PIN_IES_SMT_SPEC(58, 59, 0x900, 15),
+ MTK_PIN_IES_SMT_SPEC(60, 61, 0x910, 1),
+ MTK_PIN_IES_SMT_SPEC(62, 65, 0x910, 5),
+ MTK_PIN_IES_SMT_SPEC(66, 67, 0x910, 6),
+ MTK_PIN_IES_SMT_SPEC(68, 68, 0x930, 2),
+ MTK_PIN_IES_SMT_SPEC(69, 69, 0x930, 1),
+ MTK_PIN_IES_SMT_SPEC(70, 70, 0x930, 6),
+ MTK_PIN_IES_SMT_SPEC(71, 71, 0x930, 5),
+ MTK_PIN_IES_SMT_SPEC(72, 72, 0x930, 4),
+ MTK_PIN_IES_SMT_SPEC(73, 73, 0x930, 3),
+ MTK_PIN_IES_SMT_SPEC(100, 103, 0x910, 7),
+ MTK_PIN_IES_SMT_SPEC(104, 104, 0x920, 12),
+ MTK_PIN_IES_SMT_SPEC(105, 105, 0x920, 11),
+ MTK_PIN_IES_SMT_SPEC(106, 106, 0x930, 0),
+ MTK_PIN_IES_SMT_SPEC(107, 107, 0x920, 15),
+ MTK_PIN_IES_SMT_SPEC(108, 108, 0x920, 14),
+ MTK_PIN_IES_SMT_SPEC(109, 109, 0x920, 13),
+ MTK_PIN_IES_SMT_SPEC(110, 110, 0x920, 9),
+ MTK_PIN_IES_SMT_SPEC(111, 111, 0x920, 8),
+ MTK_PIN_IES_SMT_SPEC(112, 112, 0x920, 7),
+ MTK_PIN_IES_SMT_SPEC(113, 113, 0x920, 6),
+ MTK_PIN_IES_SMT_SPEC(114, 114, 0x920, 10),
+ MTK_PIN_IES_SMT_SPEC(115, 115, 0x920, 1),
+ MTK_PIN_IES_SMT_SPEC(116, 116, 0x920, 0),
+ MTK_PIN_IES_SMT_SPEC(117, 117, 0x920, 5),
+ MTK_PIN_IES_SMT_SPEC(118, 118, 0x920, 4),
+ MTK_PIN_IES_SMT_SPEC(119, 119, 0x920, 3),
+ MTK_PIN_IES_SMT_SPEC(120, 120, 0x920, 2),
+ MTK_PIN_IES_SMT_SPEC(121, 124, 0x910, 9),
+};
+
+static const struct mtk_pin_ies_smt_set mt8516_smt_set[] = {
+ MTK_PIN_IES_SMT_SPEC(0, 6, 0xA00, 2),
+ MTK_PIN_IES_SMT_SPEC(7, 10, 0xA00, 3),
+ MTK_PIN_IES_SMT_SPEC(11, 13, 0xA00, 12),
+ MTK_PIN_IES_SMT_SPEC(14, 17, 0xA00, 13),
+ MTK_PIN_IES_SMT_SPEC(18, 20, 0xA10, 10),
+ MTK_PIN_IES_SMT_SPEC(21, 23, 0xA00, 13),
+ MTK_PIN_IES_SMT_SPEC(24, 25, 0xA00, 12),
+ MTK_PIN_IES_SMT_SPEC(26, 30, 0xA00, 0),
+ MTK_PIN_IES_SMT_SPEC(31, 33, 0xA00, 1),
+ MTK_PIN_IES_SMT_SPEC(34, 39, 0xA900, 2),
+ MTK_PIN_IES_SMT_SPEC(40, 40, 0xA10, 11),
+ MTK_PIN_IES_SMT_SPEC(41, 43, 0xA00, 10),
+ MTK_PIN_IES_SMT_SPEC(44, 47, 0xA00, 11),
+ MTK_PIN_IES_SMT_SPEC(48, 51, 0xA00, 14),
+ MTK_PIN_IES_SMT_SPEC(52, 53, 0xA10, 0),
+ MTK_PIN_IES_SMT_SPEC(54, 54, 0xA10, 2),
+ MTK_PIN_IES_SMT_SPEC(55, 57, 0xA10, 4),
+ MTK_PIN_IES_SMT_SPEC(58, 59, 0xA00, 15),
+ MTK_PIN_IES_SMT_SPEC(60, 61, 0xA10, 1),
+ MTK_PIN_IES_SMT_SPEC(62, 65, 0xA10, 5),
+ MTK_PIN_IES_SMT_SPEC(66, 67, 0xA10, 6),
+ MTK_PIN_IES_SMT_SPEC(68, 68, 0xA30, 2),
+ MTK_PIN_IES_SMT_SPEC(69, 69, 0xA30, 1),
+ MTK_PIN_IES_SMT_SPEC(70, 70, 0xA30, 3),
+ MTK_PIN_IES_SMT_SPEC(71, 71, 0xA30, 4),
+ MTK_PIN_IES_SMT_SPEC(72, 72, 0xA30, 5),
+ MTK_PIN_IES_SMT_SPEC(73, 73, 0xA30, 6),
+
+ MTK_PIN_IES_SMT_SPEC(100, 103, 0xA10, 7),
+ MTK_PIN_IES_SMT_SPEC(104, 104, 0xA20, 12),
+ MTK_PIN_IES_SMT_SPEC(105, 105, 0xA20, 11),
+ MTK_PIN_IES_SMT_SPEC(106, 106, 0xA30, 13),
+ MTK_PIN_IES_SMT_SPEC(107, 107, 0xA20, 14),
+ MTK_PIN_IES_SMT_SPEC(108, 108, 0xA20, 15),
+ MTK_PIN_IES_SMT_SPEC(109, 109, 0xA30, 0),
+ MTK_PIN_IES_SMT_SPEC(110, 110, 0xA20, 9),
+ MTK_PIN_IES_SMT_SPEC(111, 111, 0xA20, 8),
+ MTK_PIN_IES_SMT_SPEC(112, 112, 0xA20, 7),
+ MTK_PIN_IES_SMT_SPEC(113, 113, 0xA20, 6),
+ MTK_PIN_IES_SMT_SPEC(114, 114, 0xA20, 10),
+ MTK_PIN_IES_SMT_SPEC(115, 115, 0xA20, 1),
+ MTK_PIN_IES_SMT_SPEC(116, 116, 0xA20, 0),
+ MTK_PIN_IES_SMT_SPEC(117, 117, 0xA20, 5),
+ MTK_PIN_IES_SMT_SPEC(118, 118, 0xA20, 4),
+ MTK_PIN_IES_SMT_SPEC(119, 119, 0xA20, 3),
+ MTK_PIN_IES_SMT_SPEC(120, 120, 0xA20, 2),
+ MTK_PIN_IES_SMT_SPEC(121, 124, 0xA10, 9),
+};
+
+static int mt8516_ies_smt_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, int value, enum pin_config_param arg)
+{
+ if (arg == PIN_CONFIG_INPUT_ENABLE)
+ return mtk_pconf_spec_set_ies_smt_range(regmap, mt8516_ies_set,
+ ARRAY_SIZE(mt8516_ies_set), pin, align, value);
+ else if (arg == PIN_CONFIG_INPUT_SCHMITT_ENABLE)
+ return mtk_pconf_spec_set_ies_smt_range(regmap, mt8516_smt_set,
+ ARRAY_SIZE(mt8516_smt_set), pin, align, value);
+ return -EINVAL;
+}
+
+static const struct mtk_pinctrl_devdata mt8516_pinctrl_data = {
+ .pins = mtk_pins_mt8516,
+ .npins = ARRAY_SIZE(mtk_pins_mt8516),
+ .grp_desc = mt8516_drv_grp,
+ .n_grp_cls = ARRAY_SIZE(mt8516_drv_grp),
+ .pin_drv_grp = mt8516_pin_drv,
+ .n_pin_drv_grps = ARRAY_SIZE(mt8516_pin_drv),
+ .spec_pull_set = mt8516_spec_pull_set,
+ .spec_ies_smt_set = mt8516_ies_smt_set,
+ .dir_offset = 0x0000,
+ .pullen_offset = 0x0500,
+ .pullsel_offset = 0x0600,
+ .dout_offset = 0x0100,
+ .din_offset = 0x0200,
+ .pinmux_offset = 0x0300,
+ .type1_start = 125,
+ .type1_end = 125,
+ .port_shf = 4,
+ .port_mask = 0xf,
+ .port_align = 4,
+ .eint_hw = {
+ .port_mask = 7,
+ .ports = 6,
+ .ap_num = 169,
+ .db_cnt = 64,
+ },
+};
+
+static int mt8516_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_pctrl_init(pdev, &mt8516_pinctrl_data, NULL);
+}
+
+static const struct of_device_id mt8516_pctrl_match[] = {
+ {
+ .compatible = "mediatek,mt8516-pinctrl",
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mt8516_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+ .probe = mt8516_pinctrl_probe,
+ .driver = {
+ .name = "mediatek-mt8516-pinctrl",
+ .of_match_table = mt8516_pctrl_match,
+ .pm = &mtk_eint_pm_ops,
+ },
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+ return platform_driver_register(&mtk_pinctrl_driver);
+}
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index b1c368455d30..20e1c890e73b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -674,3 +674,52 @@ int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
return 0;
}
+
+int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg)
+{
+ int err;
+ int en = arg & 1;
+ int e0 = !!(arg & 2);
+ int e1 = !!(arg & 4);
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_EN, en);
+ if (err)
+ return err;
+
+ if (!en)
+ return err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_E0, e0);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_E1, e1);
+ if (err)
+ return err;
+
+ return err;
+}
+
+int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 *val)
+{
+ u32 en, e0, e1;
+ int err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_EN, &en);
+ if (err)
+ return err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_E0, &e0);
+ if (err)
+ return err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_E1, &e1);
+ if (err)
+ return err;
+
+ *val = (en | e0 << 1 | e1 << 2) & 0x7;
+
+ return 0;
+}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index 6d24522739d9..1b7da42aa1d5 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -63,6 +63,9 @@ enum {
PINCTRL_PIN_REG_IES,
PINCTRL_PIN_REG_PULLEN,
PINCTRL_PIN_REG_PULLSEL,
+ PINCTRL_PIN_REG_DRV_EN,
+ PINCTRL_PIN_REG_DRV_E0,
+ PINCTRL_PIN_REG_DRV_E1,
PINCTRL_PIN_REG_MAX,
};
@@ -224,6 +227,10 @@ struct mtk_pin_soc {
int (*adv_pull_get)(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
u32 *val);
+ int (*adv_drive_set)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg);
+ int (*adv_drive_get)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 *val);
/* Specific driver data */
void *driver_data;
@@ -287,5 +294,9 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
u32 *val);
+int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 *val);
#endif /* __PINCTRL_MTK_COMMON_V2_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8516.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8516.h
new file mode 100644
index 000000000000..f7a4c6e4a026
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8516.h
@@ -0,0 +1,1182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+#ifndef __PINCTRL_MTK_MT8516_H
+#define __PINCTRL_MTK_MT8516_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt8516[] = {
+ MTK_PIN(
+ PINCTRL_PIN(0, "EINT0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 0),
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "PWM_B"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "EXT_TXD0"),
+ MTK_FUNCTION(6, "SQICS"),
+ MTK_FUNCTION(7, "DBG_MON_A[6]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(1, "EINT1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 1),
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "PWM_C"),
+ MTK_FUNCTION(3, "I2S2_DI"),
+ MTK_FUNCTION(4, "EXT_TXD1"),
+ MTK_FUNCTION(5, "CONN_MCU_TDO"),
+ MTK_FUNCTION(6, "SQISO"),
+ MTK_FUNCTION(7, "DBG_MON_A[7]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(2, "EINT2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 2),
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "CLKM0"),
+ MTK_FUNCTION(3, "I2S2_LRCK"),
+ MTK_FUNCTION(4, "EXT_TXD2"),
+ MTK_FUNCTION(5, "CONN_MCU_DBGACK_N"),
+ MTK_FUNCTION(6, "SQISI"),
+ MTK_FUNCTION(7, "DBG_MON_A[8]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(3, "EINT3"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 3),
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "CLKM1"),
+ MTK_FUNCTION(3, "SPI_MI"),
+ MTK_FUNCTION(4, "EXT_TXD3"),
+ MTK_FUNCTION(5, "CONN_MCU_DBGI_N"),
+ MTK_FUNCTION(6, "SQIWP"),
+ MTK_FUNCTION(7, "DBG_MON_A[9]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(4, "EINT4"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 4),
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "CLKM2"),
+ MTK_FUNCTION(3, "SPI_MO"),
+ MTK_FUNCTION(4, "EXT_TXC"),
+ MTK_FUNCTION(5, "CONN_MCU_TCK"),
+ MTK_FUNCTION(6, "CONN_MCU_AICE_JCKC"),
+ MTK_FUNCTION(7, "DBG_MON_A[10]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(5, "EINT5"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 5),
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "UCTS2"),
+ MTK_FUNCTION(3, "SPI_CSB"),
+ MTK_FUNCTION(4, "EXT_RXER"),
+ MTK_FUNCTION(5, "CONN_MCU_TDI"),
+ MTK_FUNCTION(6, "CONN_TEST_CK"),
+ MTK_FUNCTION(7, "DBG_MON_A[11]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(6, "EINT6"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 6),
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "URTS2"),
+ MTK_FUNCTION(3, "SPI_CLK"),
+ MTK_FUNCTION(4, "EXT_RXC"),
+ MTK_FUNCTION(5, "CONN_MCU_TRST_B"),
+ MTK_FUNCTION(7, "DBG_MON_A[12]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(7, "EINT7"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 7),
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "SQIRST"),
+ MTK_FUNCTION(3, "SDA1_0"),
+ MTK_FUNCTION(4, "EXT_RXDV"),
+ MTK_FUNCTION(5, "CONN_MCU_TMS"),
+ MTK_FUNCTION(6, "CONN_MCU_AICE_JMSC"),
+ MTK_FUNCTION(7, "DBG_MON_A[13]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(8, "EINT8"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 8),
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SQICK"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(3, "SCL1_0"),
+ MTK_FUNCTION(4, "EXT_RXD0"),
+ MTK_FUNCTION(5, "ANT_SEL0"),
+ MTK_FUNCTION(7, "DBG_MON_A[14]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(9, "EINT9"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 9),
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "CLKM4"),
+ MTK_FUNCTION(2, "SDA2_0"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "EXT_RXD1"),
+ MTK_FUNCTION(5, "ANT_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_A[15]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(10, "EINT10"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 10),
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "CLKM5"),
+ MTK_FUNCTION(2, "SCL2_0"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "EXT_RXD2"),
+ MTK_FUNCTION(5, "ANT_SEL2"),
+ MTK_FUNCTION(7, "DBG_MON_A[16]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(11, "EINT11"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 11),
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "CLKM4"),
+ MTK_FUNCTION(2, "PWM_C"),
+ MTK_FUNCTION(3, "CONN_TEST_CK"),
+ MTK_FUNCTION(4, "ANT_SEL3"),
+ MTK_FUNCTION(6, "EXT_RXD3"),
+ MTK_FUNCTION(7, "DBG_MON_A[17]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(12, "EINT12"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 12),
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "CLKM5"),
+ MTK_FUNCTION(2, "PWM_A"),
+ MTK_FUNCTION(3, "SPDIF_OUT"),
+ MTK_FUNCTION(4, "ANT_SEL4"),
+ MTK_FUNCTION(6, "EXT_TXEN"),
+ MTK_FUNCTION(7, "DBG_MON_A[18]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(13, "EINT13"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 13),
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(3, "TSF_IN"),
+ MTK_FUNCTION(4, "ANT_SEL5"),
+ MTK_FUNCTION(6, "SPDIF_IN"),
+ MTK_FUNCTION(7, "DBG_MON_A[19]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(14, "EINT14"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 14),
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(2, "I2S_8CH_DO1"),
+ MTK_FUNCTION(3, "TDM_RX_MCK"),
+ MTK_FUNCTION(4, "ANT_SEL1"),
+ MTK_FUNCTION(5, "CONN_MCU_DBGACK_N"),
+ MTK_FUNCTION(6, "NCLE"),
+ MTK_FUNCTION(7, "DBG_MON_B[8]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(15, "EINT15"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 15),
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(2, "I2S_8CH_LRCK"),
+ MTK_FUNCTION(3, "TDM_RX_BCK"),
+ MTK_FUNCTION(4, "ANT_SEL2"),
+ MTK_FUNCTION(5, "CONN_MCU_DBGI_N"),
+ MTK_FUNCTION(6, "NCEB1"),
+ MTK_FUNCTION(7, "DBG_MON_B[9]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(16, "EINT16"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 16),
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(2, "I2S_8CH_BCK"),
+ MTK_FUNCTION(3, "TDM_RX_LRCK"),
+ MTK_FUNCTION(4, "ANT_SEL3"),
+ MTK_FUNCTION(5, "CONN_MCU_TRST_B"),
+ MTK_FUNCTION(6, "NCEB0"),
+ MTK_FUNCTION(7, "DBG_MON_B[10]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(17, "EINT17"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 17),
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(2, "I2S_8CH_MCK"),
+ MTK_FUNCTION(3, "TDM_RX_DI"),
+ MTK_FUNCTION(4, "IDDIG"),
+ MTK_FUNCTION(5, "ANT_SEL4"),
+ MTK_FUNCTION(6, "NREB"),
+ MTK_FUNCTION(7, "DBG_MON_B[11]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(18, "EINT18"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 18),
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "I2S3_LRCK"),
+ MTK_FUNCTION(4, "CLKM1"),
+ MTK_FUNCTION(5, "ANT_SEL3"),
+ MTK_FUNCTION(6, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[20]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(19, "EINT19"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 19),
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "UCTS1"),
+ MTK_FUNCTION(2, "IDDIG"),
+ MTK_FUNCTION(3, "I2S3_BCK"),
+ MTK_FUNCTION(4, "CLKM2"),
+ MTK_FUNCTION(5, "ANT_SEL4"),
+ MTK_FUNCTION(6, "I2S2_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A[21]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(20, "EINT20"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 20),
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "URTS1"),
+ MTK_FUNCTION(3, "I2S3_DO"),
+ MTK_FUNCTION(4, "CLKM3"),
+ MTK_FUNCTION(5, "ANT_SEL5"),
+ MTK_FUNCTION(6, "I2S2_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[22]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(21, "EINT21"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 21),
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "NRNB"),
+ MTK_FUNCTION(2, "ANT_SEL0"),
+ MTK_FUNCTION(3, "I2S_8CH_DO4"),
+ MTK_FUNCTION(7, "DBG_MON_B[31]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(22, "EINT22"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 22),
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(2, "I2S_8CH_DO2"),
+ MTK_FUNCTION(3, "TSF_IN"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "SPDIF_OUT"),
+ MTK_FUNCTION(6, "NRE_C"),
+ MTK_FUNCTION(7, "DBG_MON_B[12]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(23, "EINT23"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 23),
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(2, "I2S_8CH_DO3"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(4, "IR"),
+ MTK_FUNCTION(5, "SPDIF_IN"),
+ MTK_FUNCTION(6, "NDQS_C"),
+ MTK_FUNCTION(7, "DBG_MON_B[13]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(24, "EINT24"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 24),
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(3, "ANT_SEL1"),
+ MTK_FUNCTION(4, "UCTS2"),
+ MTK_FUNCTION(5, "PWM_A"),
+ MTK_FUNCTION(6, "I2S0_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[0]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(25, "EINT25"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 25),
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(3, "ANT_SEL0"),
+ MTK_FUNCTION(4, "URTS2"),
+ MTK_FUNCTION(5, "PWM_B"),
+ MTK_FUNCTION(6, "I2S_8CH_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[1]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(26, "PWRAP_SPI0_MI"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 26),
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(27, "PWRAP_SPI0_MO"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 27),
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(28, "PWRAP_INT"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 28),
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "I2S0_MCK"),
+ MTK_FUNCTION(4, "I2S_8CH_MCK"),
+ MTK_FUNCTION(5, "I2S2_MCK"),
+ MTK_FUNCTION(6, "I2S3_MCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(29, "PWRAP_SPI0_CK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 29),
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(30, "PWRAP_SPI0_CSN"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 30),
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(31, "RTC32K_CK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 31),
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(32, "WATCHDOG"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 32),
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(33, "SRCLKENA"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 33),
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(34, "URXD2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 34),
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "URXD2"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(4, "DBG_SCL"),
+ MTK_FUNCTION(6, "I2S2_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[0]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(35, "UTXD2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 35),
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "UTXD2"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(4, "DBG_SDA"),
+ MTK_FUNCTION(6, "I2S3_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[1]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(36, "MRG_CLK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 36),
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "MRG_CLK"),
+ MTK_FUNCTION(3, "I2S0_BCK"),
+ MTK_FUNCTION(4, "I2S3_BCK"),
+ MTK_FUNCTION(5, "PCM0_CLK"),
+ MTK_FUNCTION(6, "IR"),
+ MTK_FUNCTION(7, "DBG_MON_A[2]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(37, "MRG_SYNC"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 37),
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "MRG_SYNC"),
+ MTK_FUNCTION(3, "I2S0_LRCK"),
+ MTK_FUNCTION(4, "I2S3_LRCK"),
+ MTK_FUNCTION(5, "PCM0_SYNC"),
+ MTK_FUNCTION(6, "EXT_COL"),
+ MTK_FUNCTION(7, "DBG_MON_A[3]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(38, "MRG_DI"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 38),
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "MRG_DI"),
+ MTK_FUNCTION(3, "I2S0_DI"),
+ MTK_FUNCTION(4, "I2S3_DO"),
+ MTK_FUNCTION(5, "PCM0_DI"),
+ MTK_FUNCTION(6, "EXT_MDIO"),
+ MTK_FUNCTION(7, "DBG_MON_A[4]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(39, "MRG_DO"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 39),
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "MRG_DO"),
+ MTK_FUNCTION(3, "I2S0_MCK"),
+ MTK_FUNCTION(4, "I2S3_MCK"),
+ MTK_FUNCTION(5, "PCM0_DO"),
+ MTK_FUNCTION(6, "EXT_MDC"),
+ MTK_FUNCTION(7, "DBG_MON_A[5]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(40, "KPROW0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 40),
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(7, "DBG_MON_B[4]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(41, "KPROW1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 41),
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "IDDIG"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(7, "DBG_MON_B[5]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(42, "KPCOL0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 42),
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "KPCOL0"),
+ MTK_FUNCTION(7, "DBG_MON_B[6]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(43, "KPCOL1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 43),
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "TSF_IN"),
+ MTK_FUNCTION(7, "DBG_MON_B[7]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(44, "JTMS"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 44),
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "JTMS"),
+ MTK_FUNCTION(2, "CONN_MCU_TMS"),
+ MTK_FUNCTION(3, "CONN_MCU_AICE_JMSC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(45, "JTCK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 45),
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "JTCK"),
+ MTK_FUNCTION(2, "CONN_MCU_TCK"),
+ MTK_FUNCTION(3, "CONN_MCU_AICE_JCKC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(46, "JTDI"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 46),
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "JTDI"),
+ MTK_FUNCTION(2, "CONN_MCU_TDI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(47, "JTDO"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 47),
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "JTDO"),
+ MTK_FUNCTION(2, "CONN_MCU_TDO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(48, "SPI_CS"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 48),
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "SPI_CSB"),
+ MTK_FUNCTION(3, "I2S0_DI"),
+ MTK_FUNCTION(4, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[23]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(49, "SPI_CK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 49),
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "SPI_CLK"),
+ MTK_FUNCTION(3, "I2S0_LRCK"),
+ MTK_FUNCTION(4, "I2S2_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A[24]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(50, "SPI_MI"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 50),
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "SPI_MI"),
+ MTK_FUNCTION(2, "SPI_MO"),
+ MTK_FUNCTION(3, "I2S0_BCK"),
+ MTK_FUNCTION(4, "I2S2_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[25]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(51, "SPI_MO"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 51),
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "SPI_MO"),
+ MTK_FUNCTION(2, "SPI_MI"),
+ MTK_FUNCTION(3, "I2S0_MCK"),
+ MTK_FUNCTION(4, "I2S2_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[26]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(52, "SDA1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 52),
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "SDA1_0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(53, "SCL1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 53),
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "SCL1_0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(54, "GPIO54"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 54),
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(2, "PWM_B"),
+ MTK_FUNCTION(7, "DBG_MON_B[2]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(55, "I2S_DATA_IN"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 55),
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "I2S0_DI"),
+ MTK_FUNCTION(2, "UCTS0"),
+ MTK_FUNCTION(3, "I2S3_DO"),
+ MTK_FUNCTION(4, "I2S_8CH_DO1"),
+ MTK_FUNCTION(5, "PWM_A"),
+ MTK_FUNCTION(6, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[28]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(56, "I2S_LRCK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 56),
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "I2S0_LRCK"),
+ MTK_FUNCTION(3, "I2S3_LRCK"),
+ MTK_FUNCTION(4, "I2S_8CH_LRCK"),
+ MTK_FUNCTION(5, "PWM_B"),
+ MTK_FUNCTION(6, "I2S2_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A[29]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(57, "I2S_BCK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 57),
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "I2S0_BCK"),
+ MTK_FUNCTION(2, "URTS0"),
+ MTK_FUNCTION(3, "I2S3_BCK"),
+ MTK_FUNCTION(4, "I2S_8CH_BCK"),
+ MTK_FUNCTION(5, "PWM_C"),
+ MTK_FUNCTION(6, "I2S2_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[30]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(58, "SDA0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 58),
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "SDA0_0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(59, "SCL0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 59),
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "SCL0_0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(60, "SDA2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 60),
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "SDA2_0"),
+ MTK_FUNCTION(2, "PWM_B")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(61, "SCL2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 61),
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "SCL2_0"),
+ MTK_FUNCTION(2, "PWM_C")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(62, "URXD0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 62),
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "UTXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(63, "UTXD0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 63),
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "URXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(64, "URXD1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 64),
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A[27]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(65, "UTXD1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 65),
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A[31]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(66, "LCM_RST"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 66),
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(3, "I2S0_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[3]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(67, "GPIO67"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 67),
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(3, "I2S_8CH_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[14]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(68, "MSDC2_CMD"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 68),
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(2, "I2S_8CH_DO4"),
+ MTK_FUNCTION(3, "SDA1_0"),
+ MTK_FUNCTION(5, "USB_SDA"),
+ MTK_FUNCTION(6, "I2S3_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[15]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(69, "MSDC2_CLK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 69),
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(2, "I2S_8CH_DO3"),
+ MTK_FUNCTION(3, "SCL1_0"),
+ MTK_FUNCTION(5, "USB_SCL"),
+ MTK_FUNCTION(6, "I2S3_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[16]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(70, "MSDC2_DAT0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 70),
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(2, "I2S_8CH_DO2"),
+ MTK_FUNCTION(5, "UTXD0"),
+ MTK_FUNCTION(6, "I2S3_DO"),
+ MTK_FUNCTION(7, "DBG_MON_B[17]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(71, "MSDC2_DAT1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 71),
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(2, "I2S_8CH_DO1"),
+ MTK_FUNCTION(3, "PWM_A"),
+ MTK_FUNCTION(4, "I2S3_MCK"),
+ MTK_FUNCTION(5, "URXD0"),
+ MTK_FUNCTION(6, "PWM_B"),
+ MTK_FUNCTION(7, "DBG_MON_B[18]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(72, "MSDC2_DAT2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 72),
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(2, "I2S_8CH_LRCK"),
+ MTK_FUNCTION(3, "SDA2_0"),
+ MTK_FUNCTION(5, "UTXD1"),
+ MTK_FUNCTION(6, "PWM_C"),
+ MTK_FUNCTION(7, "DBG_MON_B[19]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(73, "MSDC2_DAT3"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 73),
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(2, "I2S_8CH_BCK"),
+ MTK_FUNCTION(3, "SCL2_0"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "URXD1"),
+ MTK_FUNCTION(6, "PWM_A"),
+ MTK_FUNCTION(7, "DBG_MON_B[20]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(74, "TDN3"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 74),
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "TDN3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(75, "TDP3"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 75),
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "TDP3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(76, "TDN2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 76),
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "TDN2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(77, "TDP2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 77),
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "TDP2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(78, "TCN"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 78),
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "TCN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(79, "TCP"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 79),
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "TCP")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(80, "TDN1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 80),
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "TDN1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(81, "TDP1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 81),
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "TDP1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(82, "TDN0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 82),
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "TDN0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(83, "TDP0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 83),
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "TDP0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(84, "RDN0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 84),
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "RDN0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(85, "RDP0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 85),
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "RDP0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(86, "RDN1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 86),
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "RDN1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(87, "RDP1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 87),
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "RDP1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(88, "RCN"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 88),
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "RCN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(89, "RCP"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 89),
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "RCP")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(90, "RDN2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 90),
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "RDN2"),
+ MTK_FUNCTION(2, "CMDAT8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(91, "RDP2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 91),
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "RDP2"),
+ MTK_FUNCTION(2, "CMDAT9")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(92, "RDN3"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 92),
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "RDN3"),
+ MTK_FUNCTION(2, "CMDAT4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(93, "RDP3"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 93),
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "RDP3"),
+ MTK_FUNCTION(2, "CMDAT5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(94, "RCN_A"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 94),
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "RCN_A"),
+ MTK_FUNCTION(2, "CMDAT6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(95, "RCP_A"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 95),
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "RCP_A"),
+ MTK_FUNCTION(2, "CMDAT7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(96, "RDN1_A"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 96),
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "RDN1_A"),
+ MTK_FUNCTION(2, "CMDAT2"),
+ MTK_FUNCTION(3, "CMCSD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(97, "RDP1_A"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 97),
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "RDP1_A"),
+ MTK_FUNCTION(2, "CMDAT3"),
+ MTK_FUNCTION(3, "CMCSD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(98, "RDN0_A"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 98),
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "RDN0_A"),
+ MTK_FUNCTION(2, "CMHSYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(99, "RDP0_A"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 99),
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "RDP0_A"),
+ MTK_FUNCTION(2, "CMVSYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(100, "CMDAT0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 100),
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "CMDAT0"),
+ MTK_FUNCTION(2, "CMCSD0"),
+ MTK_FUNCTION(3, "ANT_SEL2"),
+ MTK_FUNCTION(5, "TDM_RX_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[21]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(101, "CMDAT1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 101),
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "CMDAT1"),
+ MTK_FUNCTION(2, "CMCSD1"),
+ MTK_FUNCTION(3, "ANT_SEL3"),
+ MTK_FUNCTION(4, "CMFLASH"),
+ MTK_FUNCTION(5, "TDM_RX_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[22]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(102, "CMMCLK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 102),
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "CMMCLK"),
+ MTK_FUNCTION(3, "ANT_SEL4"),
+ MTK_FUNCTION(5, "TDM_RX_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[23]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(103, "CMPCLK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 103),
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "CMPCLK"),
+ MTK_FUNCTION(2, "CMCSK"),
+ MTK_FUNCTION(3, "ANT_SEL5"),
+ MTK_FUNCTION(5, " TDM_RX_DI"),
+ MTK_FUNCTION(7, "DBG_MON_B[24]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(104, "MSDC1_CMD"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 104),
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(4, "SQICS"),
+ MTK_FUNCTION(7, "DBG_MON_B[25]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(105, "MSDC1_CLK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 105),
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(4, "SQISO"),
+ MTK_FUNCTION(7, "DBG_MON_B[26]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(106, "MSDC1_DAT0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 106),
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(4, "SQISI"),
+ MTK_FUNCTION(7, "DBG_MON_B[27]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(107, "MSDC1_DAT1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 107),
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(4, "SQIWP"),
+ MTK_FUNCTION(7, "DBG_MON_B[28]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(108, "MSDC1_DAT2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 108),
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(4, "SQIRST"),
+ MTK_FUNCTION(7, "DBG_MON_B[29]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(109, "MSDC1_DAT3"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 109),
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(4, "SQICK"), /* WIP */
+ MTK_FUNCTION(7, "DBG_MON_B[30]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(110, "MSDC0_DAT7"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 110),
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "MSDC0_DAT7"),
+ MTK_FUNCTION(4, "NLD7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(111, "MSDC0_DAT6"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 111),
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "MSDC0_DAT6"),
+ MTK_FUNCTION(4, "NLD6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(112, "MSDC0_DAT5"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 112),
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "MSDC0_DAT5"),
+ MTK_FUNCTION(4, "NLD4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(113, "MSDC0_DAT4"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 113),
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "MSDC0_DAT4"),
+ MTK_FUNCTION(4, "NLD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(114, "MSDC0_RSTB"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 114),
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "MSDC0_RSTB"),
+ MTK_FUNCTION(4, "NLD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(115, "MSDC0_CMD"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 115),
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "MSDC0_CMD"),
+ MTK_FUNCTION(4, "NALE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(116, "MSDC0_CLK"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 116),
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(4, "NWEB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(117, "MSDC0_DAT3"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 117),
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "MSDC0_DAT3"),
+ MTK_FUNCTION(4, "NLD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(118, "MSDC0_DAT2"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 118),
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "MSDC0_DAT2"),
+ MTK_FUNCTION(4, "NLD5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(119, "MSDC0_DAT1"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 119),
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "MSDC0_DAT1"),
+ MTK_FUNCTION(4, "NLD8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(120, "MSDC0_DAT0"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 120),
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "MSDC0_DAT0"),
+ MTK_FUNCTION(4, "WATCHDOG"),
+ MTK_FUNCTION(5, "NLD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(121, "GPIO121"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 121),
+ MTK_FUNCTION(0, "GPIO121")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(122, "GPIO122"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 122),
+ MTK_FUNCTION(0, "GPIO122")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(123, "GPIO123"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 123),
+ MTK_FUNCTION(0, "GPIO123")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(124, "GPIO124"),
+ NULL, "mt8516",
+ MTK_EINT_FUNCTION(0, 124),
+ MTK_FUNCTION(0, "GPIO124")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT8516_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index b59e10852bfb..d3b34e9a7507 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -20,12 +20,14 @@
#define MTK_PIN_CONFIG_RDSEL (PIN_CONFIG_END + 2)
#define MTK_PIN_CONFIG_PU_ADV (PIN_CONFIG_END + 3)
#define MTK_PIN_CONFIG_PD_ADV (PIN_CONFIG_END + 4)
+#define MTK_PIN_CONFIG_DRV_ADV (PIN_CONFIG_END + 5)
static const struct pinconf_generic_params mtk_custom_bindings[] = {
{"mediatek,tdsel", MTK_PIN_CONFIG_TDSEL, 0},
{"mediatek,rdsel", MTK_PIN_CONFIG_RDSEL, 0},
{"mediatek,pull-up-adv", MTK_PIN_CONFIG_PU_ADV, 1},
{"mediatek,pull-down-adv", MTK_PIN_CONFIG_PD_ADV, 1},
+ {"mediatek,drive-strength-adv", MTK_PIN_CONFIG_DRV_ADV, 2},
};
#ifdef CONFIG_DEBUG_FS
@@ -34,6 +36,7 @@ static const struct pin_config_item mtk_conf_items[] = {
PCONFDUMP(MTK_PIN_CONFIG_RDSEL, "rdsel", NULL, true),
PCONFDUMP(MTK_PIN_CONFIG_PU_ADV, "pu-adv", NULL, true),
PCONFDUMP(MTK_PIN_CONFIG_PD_ADV, "pd-adv", NULL, true),
+ PCONFDUMP(MTK_PIN_CONFIG_DRV_ADV, "drive-strength-adv", NULL, true),
};
#endif
@@ -176,6 +179,15 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
return -ENOTSUPP;
}
break;
+ case MTK_PIN_CONFIG_DRV_ADV:
+ if (hw->soc->adv_drive_get) {
+ err = hw->soc->adv_drive_get(hw, desc, &ret);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
default:
return -ENOTSUPP;
}
@@ -311,6 +323,15 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -ENOTSUPP;
}
break;
+ case MTK_PIN_CONFIG_DRV_ADV:
+ if (hw->soc->adv_drive_set) {
+ err = hw->soc->adv_drive_set(hw, desc, arg);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
default:
err = -ENOTSUPP;
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 6689995fa3aa..e18ebb2c78d9 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -930,8 +930,8 @@ static int amd_gpio_probe(struct platform_device *pdev)
goto out2;
}
- ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
- KBUILD_MODNAME, gpio_dev);
+ ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index d89dc43c5757..e3239cf926f9 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -688,8 +688,9 @@ static void artpec6_pmx_select_func(struct pinctrl_dev *pctldev,
}
}
-int artpec6_pmx_enable(struct pinctrl_dev *pctldev, unsigned int function,
- unsigned int group)
+static int artpec6_pmx_set(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
{
struct artpec6_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
@@ -702,18 +703,6 @@ int artpec6_pmx_enable(struct pinctrl_dev *pctldev, unsigned int function,
return 0;
}
-void artpec6_pmx_disable(struct pinctrl_dev *pctldev, unsigned int function,
- unsigned int group)
-{
- struct artpec6_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
-
- dev_dbg(pmx->dev, "disabling %s function for pin group %s\n",
- artpec6_pmx_get_fname(pctldev, function),
- artpec6_get_group_name(pctldev, group));
-
- artpec6_pmx_select_func(pctldev, function, group, false);
-}
-
static int artpec6_pmx_request_gpio(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int pin)
@@ -737,7 +726,7 @@ static const struct pinmux_ops artpec6_pmx_ops = {
.get_functions_count = artpec6_pmx_get_functions_count,
.get_function_name = artpec6_pmx_get_fname,
.get_function_groups = artpec6_pmx_get_fgroups,
- .set_mux = artpec6_pmx_enable,
+ .set_mux = artpec6_pmx_set,
.gpio_request_enable = artpec6_pmx_request_gpio,
};
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index afd0b533c40a..4fcf7262bed9 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -366,6 +366,8 @@ static int axp20x_build_funcs_groups(struct platform_device *pdev)
pctl->funcs[i].groups = devm_kcalloc(&pdev->dev,
npins, sizeof(char *),
GFP_KERNEL);
+ if (!pctl->funcs[i].groups)
+ return -ENOMEM;
for (pin = 0; pin < npins; pin++)
pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name;
}
diff --git a/drivers/pinctrl/pinctrl-bm1880.c b/drivers/pinctrl/pinctrl-bm1880.c
new file mode 100644
index 000000000000..446b07d8fbfc
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-bm1880.c
@@ -0,0 +1,965 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Bitmain BM1880 SoC Pinctrl driver
+ *
+ * Copyright (c) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "pinctrl-utils.h"
+
+#define BM1880_REG_MUX 0x20
+
+/**
+ * struct bm1880_pinctrl - driver data
+ * @base: Pinctrl base address
+ * @pctrl: Pinctrl device
+ * @groups: Pingroups
+ * @ngroups: Number of @groups
+ * @funcs: Pinmux functions
+ * @nfuncs: Number of @funcs
+ */
+struct bm1880_pinctrl {
+ void __iomem *base;
+ struct pinctrl_dev *pctrldev;
+ const struct bm1880_pctrl_group *groups;
+ unsigned int ngroups;
+ const struct bm1880_pinmux_function *funcs;
+ unsigned int nfuncs;
+};
+
+/**
+ * struct bm1880_pctrl_group - pinctrl group
+ * @name: Name of the group
+ * @pins: Array of pins belonging to this group
+ * @npins: Number of @pins
+ */
+struct bm1880_pctrl_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned int npins;
+};
+
+/**
+ * struct bm1880_pinmux_function - a pinmux function
+ * @name: Name of the pinmux function.
+ * @groups: List of pingroups for this function.
+ * @ngroups: Number of entries in @groups.
+ * @mux_val: Selector for this function
+ * @mux_mask: Mask for function specific selector
+ * @mux: Offset of function specific mux
+ * @mux_shift: Shift for function specific selector
+ */
+struct bm1880_pinmux_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+ u32 mux_val;
+ u32 mux_mask;
+ u32 mux;
+ u8 mux_shift;
+};
+
+static const struct pinctrl_pin_desc bm1880_pins[] = {
+ PINCTRL_PIN(0, "MIO0"),
+ PINCTRL_PIN(1, "MIO1"),
+ PINCTRL_PIN(2, "MIO2"),
+ PINCTRL_PIN(3, "MIO3"),
+ PINCTRL_PIN(4, "MIO4"),
+ PINCTRL_PIN(5, "MIO5"),
+ PINCTRL_PIN(6, "MIO6"),
+ PINCTRL_PIN(7, "MIO7"),
+ PINCTRL_PIN(8, "MIO8"),
+ PINCTRL_PIN(9, "MIO9"),
+ PINCTRL_PIN(10, "MIO10"),
+ PINCTRL_PIN(11, "MIO11"),
+ PINCTRL_PIN(12, "MIO12"),
+ PINCTRL_PIN(13, "MIO13"),
+ PINCTRL_PIN(14, "MIO14"),
+ PINCTRL_PIN(15, "MIO15"),
+ PINCTRL_PIN(16, "MIO16"),
+ PINCTRL_PIN(17, "MIO17"),
+ PINCTRL_PIN(18, "MIO18"),
+ PINCTRL_PIN(19, "MIO19"),
+ PINCTRL_PIN(20, "MIO20"),
+ PINCTRL_PIN(21, "MIO21"),
+ PINCTRL_PIN(22, "MIO22"),
+ PINCTRL_PIN(23, "MIO23"),
+ PINCTRL_PIN(24, "MIO24"),
+ PINCTRL_PIN(25, "MIO25"),
+ PINCTRL_PIN(26, "MIO26"),
+ PINCTRL_PIN(27, "MIO27"),
+ PINCTRL_PIN(28, "MIO28"),
+ PINCTRL_PIN(29, "MIO29"),
+ PINCTRL_PIN(30, "MIO30"),
+ PINCTRL_PIN(31, "MIO31"),
+ PINCTRL_PIN(32, "MIO32"),
+ PINCTRL_PIN(33, "MIO33"),
+ PINCTRL_PIN(34, "MIO34"),
+ PINCTRL_PIN(35, "MIO35"),
+ PINCTRL_PIN(36, "MIO36"),
+ PINCTRL_PIN(37, "MIO37"),
+ PINCTRL_PIN(38, "MIO38"),
+ PINCTRL_PIN(39, "MIO39"),
+ PINCTRL_PIN(40, "MIO40"),
+ PINCTRL_PIN(41, "MIO41"),
+ PINCTRL_PIN(42, "MIO42"),
+ PINCTRL_PIN(43, "MIO43"),
+ PINCTRL_PIN(44, "MIO44"),
+ PINCTRL_PIN(45, "MIO45"),
+ PINCTRL_PIN(46, "MIO46"),
+ PINCTRL_PIN(47, "MIO47"),
+ PINCTRL_PIN(48, "MIO48"),
+ PINCTRL_PIN(49, "MIO49"),
+ PINCTRL_PIN(50, "MIO50"),
+ PINCTRL_PIN(51, "MIO51"),
+ PINCTRL_PIN(52, "MIO52"),
+ PINCTRL_PIN(53, "MIO53"),
+ PINCTRL_PIN(54, "MIO54"),
+ PINCTRL_PIN(55, "MIO55"),
+ PINCTRL_PIN(56, "MIO56"),
+ PINCTRL_PIN(57, "MIO57"),
+ PINCTRL_PIN(58, "MIO58"),
+ PINCTRL_PIN(59, "MIO59"),
+ PINCTRL_PIN(60, "MIO60"),
+ PINCTRL_PIN(61, "MIO61"),
+ PINCTRL_PIN(62, "MIO62"),
+ PINCTRL_PIN(63, "MIO63"),
+ PINCTRL_PIN(64, "MIO64"),
+ PINCTRL_PIN(65, "MIO65"),
+ PINCTRL_PIN(66, "MIO66"),
+ PINCTRL_PIN(67, "MIO67"),
+ PINCTRL_PIN(68, "MIO68"),
+ PINCTRL_PIN(69, "MIO69"),
+ PINCTRL_PIN(70, "MIO70"),
+ PINCTRL_PIN(71, "MIO71"),
+ PINCTRL_PIN(72, "MIO72"),
+ PINCTRL_PIN(73, "MIO73"),
+ PINCTRL_PIN(74, "MIO74"),
+ PINCTRL_PIN(75, "MIO75"),
+ PINCTRL_PIN(76, "MIO76"),
+ PINCTRL_PIN(77, "MIO77"),
+ PINCTRL_PIN(78, "MIO78"),
+ PINCTRL_PIN(79, "MIO79"),
+ PINCTRL_PIN(80, "MIO80"),
+ PINCTRL_PIN(81, "MIO81"),
+ PINCTRL_PIN(82, "MIO82"),
+ PINCTRL_PIN(83, "MIO83"),
+ PINCTRL_PIN(84, "MIO84"),
+ PINCTRL_PIN(85, "MIO85"),
+ PINCTRL_PIN(86, "MIO86"),
+ PINCTRL_PIN(87, "MIO87"),
+ PINCTRL_PIN(88, "MIO88"),
+ PINCTRL_PIN(89, "MIO89"),
+ PINCTRL_PIN(90, "MIO90"),
+ PINCTRL_PIN(91, "MIO91"),
+ PINCTRL_PIN(92, "MIO92"),
+ PINCTRL_PIN(93, "MIO93"),
+ PINCTRL_PIN(94, "MIO94"),
+ PINCTRL_PIN(95, "MIO95"),
+ PINCTRL_PIN(96, "MIO96"),
+ PINCTRL_PIN(97, "MIO97"),
+ PINCTRL_PIN(98, "MIO98"),
+ PINCTRL_PIN(99, "MIO99"),
+ PINCTRL_PIN(100, "MIO100"),
+ PINCTRL_PIN(101, "MIO101"),
+ PINCTRL_PIN(102, "MIO102"),
+ PINCTRL_PIN(103, "MIO103"),
+ PINCTRL_PIN(104, "MIO104"),
+ PINCTRL_PIN(105, "MIO105"),
+ PINCTRL_PIN(106, "MIO106"),
+ PINCTRL_PIN(107, "MIO107"),
+ PINCTRL_PIN(108, "MIO108"),
+ PINCTRL_PIN(109, "MIO109"),
+ PINCTRL_PIN(110, "MIO110"),
+ PINCTRL_PIN(111, "MIO111"),
+};
+
+enum bm1880_pinmux_functions {
+ F_nand, F_spi, F_emmc, F_sdio, F_eth0, F_pwm0, F_pwm1, F_pwm2,
+ F_pwm3, F_pwm4, F_pwm5, F_pwm6, F_pwm7, F_pwm8, F_pwm9, F_pwm10,
+ F_pwm11, F_pwm12, F_pwm13, F_pwm14, F_pwm15, F_pwm16, F_pwm17,
+ F_pwm18, F_pwm19, F_pwm20, F_pwm21, F_pwm22, F_pwm23, F_pwm24,
+ F_pwm25, F_pwm26, F_pwm27, F_pwm28, F_pwm29, F_pwm30, F_pwm31,
+ F_pwm32, F_pwm33, F_pwm34, F_pwm35, F_pwm36, F_pwm37, F_i2c0, F_i2c1,
+ F_i2c2, F_i2c3, F_i2c4, F_uart0, F_uart1, F_uart2, F_uart3, F_uart4,
+ F_uart5, F_uart6, F_uart7, F_uart8, F_uart9, F_uart10, F_uart11,
+ F_uart12, F_uart13, F_uart14, F_uart15, F_gpio0, F_gpio1, F_gpio2,
+ F_gpio3, F_gpio4, F_gpio5, F_gpio6, F_gpio7, F_gpio8, F_gpio9, F_gpio10,
+ F_gpio11, F_gpio12, F_gpio13, F_gpio14, F_gpio15, F_gpio16, F_gpio17,
+ F_gpio18, F_gpio19, F_gpio20, F_gpio21, F_gpio22, F_gpio23, F_gpio24,
+ F_gpio25, F_gpio26, F_gpio27, F_gpio28, F_gpio29, F_gpio30, F_gpio31,
+ F_gpio32, F_gpio33, F_gpio34, F_gpio35, F_gpio36, F_gpio37, F_gpio38,
+ F_gpio39, F_gpio40, F_gpio41, F_gpio42, F_gpio43, F_gpio44, F_gpio45,
+ F_gpio46, F_gpio47, F_gpio48, F_gpio49, F_gpio50, F_gpio51, F_gpio52,
+ F_gpio53, F_gpio54, F_gpio55, F_gpio56, F_gpio57, F_gpio58, F_gpio59,
+ F_gpio60, F_gpio61, F_gpio62, F_gpio63, F_gpio64, F_gpio65, F_gpio66,
+ F_gpio67, F_eth1, F_i2s0, F_i2s0_mclkin, F_i2s1, F_i2s1_mclkin, F_spi0,
+ F_max
+};
+
+static const unsigned int nand_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16 };
+static const unsigned int spi_pins[] = { 0, 1, 8, 10, 11, 12, 13 };
+static const unsigned int emmc_pins[] = { 2, 3, 4, 5, 6, 7, 9, 14, 15, 16 };
+static const unsigned int sdio_pins[] = { 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26 };
+static const unsigned int eth0_pins[] = { 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42 };
+static const unsigned int pwm0_pins[] = { 29 };
+static const unsigned int pwm1_pins[] = { 30 };
+static const unsigned int pwm2_pins[] = { 34 };
+static const unsigned int pwm3_pins[] = { 35 };
+static const unsigned int pwm4_pins[] = { 43 };
+static const unsigned int pwm5_pins[] = { 44 };
+static const unsigned int pwm6_pins[] = { 45 };
+static const unsigned int pwm7_pins[] = { 46 };
+static const unsigned int pwm8_pins[] = { 47 };
+static const unsigned int pwm9_pins[] = { 48 };
+static const unsigned int pwm10_pins[] = { 49 };
+static const unsigned int pwm11_pins[] = { 50 };
+static const unsigned int pwm12_pins[] = { 51 };
+static const unsigned int pwm13_pins[] = { 52 };
+static const unsigned int pwm14_pins[] = { 53 };
+static const unsigned int pwm15_pins[] = { 54 };
+static const unsigned int pwm16_pins[] = { 55 };
+static const unsigned int pwm17_pins[] = { 56 };
+static const unsigned int pwm18_pins[] = { 57 };
+static const unsigned int pwm19_pins[] = { 58 };
+static const unsigned int pwm20_pins[] = { 59 };
+static const unsigned int pwm21_pins[] = { 60 };
+static const unsigned int pwm22_pins[] = { 61 };
+static const unsigned int pwm23_pins[] = { 62 };
+static const unsigned int pwm24_pins[] = { 97 };
+static const unsigned int pwm25_pins[] = { 98 };
+static const unsigned int pwm26_pins[] = { 99 };
+static const unsigned int pwm27_pins[] = { 100 };
+static const unsigned int pwm28_pins[] = { 101 };
+static const unsigned int pwm29_pins[] = { 102 };
+static const unsigned int pwm30_pins[] = { 103 };
+static const unsigned int pwm31_pins[] = { 104 };
+static const unsigned int pwm32_pins[] = { 105 };
+static const unsigned int pwm33_pins[] = { 106 };
+static const unsigned int pwm34_pins[] = { 107 };
+static const unsigned int pwm35_pins[] = { 108 };
+static const unsigned int pwm36_pins[] = { 109 };
+static const unsigned int pwm37_pins[] = { 110 };
+static const unsigned int i2c0_pins[] = { 63, 64 };
+static const unsigned int i2c1_pins[] = { 65, 66 };
+static const unsigned int i2c2_pins[] = { 67, 68 };
+static const unsigned int i2c3_pins[] = { 69, 70 };
+static const unsigned int i2c4_pins[] = { 71, 72 };
+static const unsigned int uart0_pins[] = { 73, 74 };
+static const unsigned int uart1_pins[] = { 75, 76 };
+static const unsigned int uart2_pins[] = { 77, 78 };
+static const unsigned int uart3_pins[] = { 79, 80 };
+static const unsigned int uart4_pins[] = { 81, 82 };
+static const unsigned int uart5_pins[] = { 83, 84 };
+static const unsigned int uart6_pins[] = { 85, 86 };
+static const unsigned int uart7_pins[] = { 87, 88 };
+static const unsigned int uart8_pins[] = { 89, 90 };
+static const unsigned int uart9_pins[] = { 91, 92 };
+static const unsigned int uart10_pins[] = { 93, 94 };
+static const unsigned int uart11_pins[] = { 95, 96 };
+static const unsigned int uart12_pins[] = { 73, 74, 75, 76 };
+static const unsigned int uart13_pins[] = { 77, 78, 83, 84 };
+static const unsigned int uart14_pins[] = { 79, 80, 85, 86 };
+static const unsigned int uart15_pins[] = { 81, 82, 87, 88 };
+static const unsigned int gpio0_pins[] = { 97 };
+static const unsigned int gpio1_pins[] = { 98 };
+static const unsigned int gpio2_pins[] = { 99 };
+static const unsigned int gpio3_pins[] = { 100 };
+static const unsigned int gpio4_pins[] = { 101 };
+static const unsigned int gpio5_pins[] = { 102 };
+static const unsigned int gpio6_pins[] = { 103 };
+static const unsigned int gpio7_pins[] = { 104 };
+static const unsigned int gpio8_pins[] = { 105 };
+static const unsigned int gpio9_pins[] = { 106 };
+static const unsigned int gpio10_pins[] = { 107 };
+static const unsigned int gpio11_pins[] = { 108 };
+static const unsigned int gpio12_pins[] = { 109 };
+static const unsigned int gpio13_pins[] = { 110 };
+static const unsigned int gpio14_pins[] = { 43 };
+static const unsigned int gpio15_pins[] = { 44 };
+static const unsigned int gpio16_pins[] = { 45 };
+static const unsigned int gpio17_pins[] = { 46 };
+static const unsigned int gpio18_pins[] = { 47 };
+static const unsigned int gpio19_pins[] = { 48 };
+static const unsigned int gpio20_pins[] = { 49 };
+static const unsigned int gpio21_pins[] = { 50 };
+static const unsigned int gpio22_pins[] = { 51 };
+static const unsigned int gpio23_pins[] = { 52 };
+static const unsigned int gpio24_pins[] = { 53 };
+static const unsigned int gpio25_pins[] = { 54 };
+static const unsigned int gpio26_pins[] = { 55 };
+static const unsigned int gpio27_pins[] = { 56 };
+static const unsigned int gpio28_pins[] = { 57 };
+static const unsigned int gpio29_pins[] = { 58 };
+static const unsigned int gpio30_pins[] = { 59 };
+static const unsigned int gpio31_pins[] = { 60 };
+static const unsigned int gpio32_pins[] = { 61 };
+static const unsigned int gpio33_pins[] = { 62 };
+static const unsigned int gpio34_pins[] = { 63 };
+static const unsigned int gpio35_pins[] = { 64 };
+static const unsigned int gpio36_pins[] = { 65 };
+static const unsigned int gpio37_pins[] = { 66 };
+static const unsigned int gpio38_pins[] = { 67 };
+static const unsigned int gpio39_pins[] = { 68 };
+static const unsigned int gpio40_pins[] = { 69 };
+static const unsigned int gpio41_pins[] = { 70 };
+static const unsigned int gpio42_pins[] = { 71 };
+static const unsigned int gpio43_pins[] = { 72 };
+static const unsigned int gpio44_pins[] = { 73 };
+static const unsigned int gpio45_pins[] = { 74 };
+static const unsigned int gpio46_pins[] = { 75 };
+static const unsigned int gpio47_pins[] = { 76 };
+static const unsigned int gpio48_pins[] = { 77 };
+static const unsigned int gpio49_pins[] = { 78 };
+static const unsigned int gpio50_pins[] = { 79 };
+static const unsigned int gpio51_pins[] = { 80 };
+static const unsigned int gpio52_pins[] = { 81 };
+static const unsigned int gpio53_pins[] = { 82 };
+static const unsigned int gpio54_pins[] = { 83 };
+static const unsigned int gpio55_pins[] = { 84 };
+static const unsigned int gpio56_pins[] = { 85 };
+static const unsigned int gpio57_pins[] = { 86 };
+static const unsigned int gpio58_pins[] = { 87 };
+static const unsigned int gpio59_pins[] = { 88 };
+static const unsigned int gpio60_pins[] = { 89 };
+static const unsigned int gpio61_pins[] = { 90 };
+static const unsigned int gpio62_pins[] = { 91 };
+static const unsigned int gpio63_pins[] = { 92 };
+static const unsigned int gpio64_pins[] = { 93 };
+static const unsigned int gpio65_pins[] = { 94 };
+static const unsigned int gpio66_pins[] = { 95 };
+static const unsigned int gpio67_pins[] = { 96 };
+static const unsigned int eth1_pins[] = { 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58 };
+static const unsigned int i2s0_pins[] = { 87, 88, 89, 90, 91 };
+static const unsigned int i2s0_mclkin_pins[] = { 97 };
+static const unsigned int i2s1_pins[] = { 92, 93, 94, 95, 96 };
+static const unsigned int i2s1_mclkin_pins[] = { 98 };
+static const unsigned int spi0_pins[] = { 59, 60, 61, 62 };
+
+#define BM1880_PINCTRL_GRP(nm) \
+ { \
+ .name = #nm "_grp", \
+ .pins = nm ## _pins, \
+ .npins = ARRAY_SIZE(nm ## _pins), \
+ }
+
+static const struct bm1880_pctrl_group bm1880_pctrl_groups[] = {
+ BM1880_PINCTRL_GRP(nand),
+ BM1880_PINCTRL_GRP(spi),
+ BM1880_PINCTRL_GRP(emmc),
+ BM1880_PINCTRL_GRP(sdio),
+ BM1880_PINCTRL_GRP(eth0),
+ BM1880_PINCTRL_GRP(pwm0),
+ BM1880_PINCTRL_GRP(pwm1),
+ BM1880_PINCTRL_GRP(pwm2),
+ BM1880_PINCTRL_GRP(pwm3),
+ BM1880_PINCTRL_GRP(pwm4),
+ BM1880_PINCTRL_GRP(pwm5),
+ BM1880_PINCTRL_GRP(pwm6),
+ BM1880_PINCTRL_GRP(pwm7),
+ BM1880_PINCTRL_GRP(pwm8),
+ BM1880_PINCTRL_GRP(pwm9),
+ BM1880_PINCTRL_GRP(pwm10),
+ BM1880_PINCTRL_GRP(pwm11),
+ BM1880_PINCTRL_GRP(pwm12),
+ BM1880_PINCTRL_GRP(pwm13),
+ BM1880_PINCTRL_GRP(pwm14),
+ BM1880_PINCTRL_GRP(pwm15),
+ BM1880_PINCTRL_GRP(pwm16),
+ BM1880_PINCTRL_GRP(pwm17),
+ BM1880_PINCTRL_GRP(pwm18),
+ BM1880_PINCTRL_GRP(pwm19),
+ BM1880_PINCTRL_GRP(pwm20),
+ BM1880_PINCTRL_GRP(pwm21),
+ BM1880_PINCTRL_GRP(pwm22),
+ BM1880_PINCTRL_GRP(pwm23),
+ BM1880_PINCTRL_GRP(pwm24),
+ BM1880_PINCTRL_GRP(pwm25),
+ BM1880_PINCTRL_GRP(pwm26),
+ BM1880_PINCTRL_GRP(pwm27),
+ BM1880_PINCTRL_GRP(pwm28),
+ BM1880_PINCTRL_GRP(pwm29),
+ BM1880_PINCTRL_GRP(pwm30),
+ BM1880_PINCTRL_GRP(pwm31),
+ BM1880_PINCTRL_GRP(pwm32),
+ BM1880_PINCTRL_GRP(pwm33),
+ BM1880_PINCTRL_GRP(pwm34),
+ BM1880_PINCTRL_GRP(pwm35),
+ BM1880_PINCTRL_GRP(pwm36),
+ BM1880_PINCTRL_GRP(i2c0),
+ BM1880_PINCTRL_GRP(i2c1),
+ BM1880_PINCTRL_GRP(i2c2),
+ BM1880_PINCTRL_GRP(i2c3),
+ BM1880_PINCTRL_GRP(i2c4),
+ BM1880_PINCTRL_GRP(uart0),
+ BM1880_PINCTRL_GRP(uart1),
+ BM1880_PINCTRL_GRP(uart2),
+ BM1880_PINCTRL_GRP(uart3),
+ BM1880_PINCTRL_GRP(uart4),
+ BM1880_PINCTRL_GRP(uart5),
+ BM1880_PINCTRL_GRP(uart6),
+ BM1880_PINCTRL_GRP(uart7),
+ BM1880_PINCTRL_GRP(uart8),
+ BM1880_PINCTRL_GRP(uart9),
+ BM1880_PINCTRL_GRP(uart10),
+ BM1880_PINCTRL_GRP(uart11),
+ BM1880_PINCTRL_GRP(uart12),
+ BM1880_PINCTRL_GRP(uart13),
+ BM1880_PINCTRL_GRP(uart14),
+ BM1880_PINCTRL_GRP(uart15),
+ BM1880_PINCTRL_GRP(gpio0),
+ BM1880_PINCTRL_GRP(gpio1),
+ BM1880_PINCTRL_GRP(gpio2),
+ BM1880_PINCTRL_GRP(gpio3),
+ BM1880_PINCTRL_GRP(gpio4),
+ BM1880_PINCTRL_GRP(gpio5),
+ BM1880_PINCTRL_GRP(gpio6),
+ BM1880_PINCTRL_GRP(gpio7),
+ BM1880_PINCTRL_GRP(gpio8),
+ BM1880_PINCTRL_GRP(gpio9),
+ BM1880_PINCTRL_GRP(gpio10),
+ BM1880_PINCTRL_GRP(gpio11),
+ BM1880_PINCTRL_GRP(gpio12),
+ BM1880_PINCTRL_GRP(gpio13),
+ BM1880_PINCTRL_GRP(gpio14),
+ BM1880_PINCTRL_GRP(gpio15),
+ BM1880_PINCTRL_GRP(gpio16),
+ BM1880_PINCTRL_GRP(gpio17),
+ BM1880_PINCTRL_GRP(gpio18),
+ BM1880_PINCTRL_GRP(gpio19),
+ BM1880_PINCTRL_GRP(gpio20),
+ BM1880_PINCTRL_GRP(gpio21),
+ BM1880_PINCTRL_GRP(gpio22),
+ BM1880_PINCTRL_GRP(gpio23),
+ BM1880_PINCTRL_GRP(gpio24),
+ BM1880_PINCTRL_GRP(gpio25),
+ BM1880_PINCTRL_GRP(gpio26),
+ BM1880_PINCTRL_GRP(gpio27),
+ BM1880_PINCTRL_GRP(gpio28),
+ BM1880_PINCTRL_GRP(gpio29),
+ BM1880_PINCTRL_GRP(gpio30),
+ BM1880_PINCTRL_GRP(gpio31),
+ BM1880_PINCTRL_GRP(gpio32),
+ BM1880_PINCTRL_GRP(gpio33),
+ BM1880_PINCTRL_GRP(gpio34),
+ BM1880_PINCTRL_GRP(gpio35),
+ BM1880_PINCTRL_GRP(gpio36),
+ BM1880_PINCTRL_GRP(gpio37),
+ BM1880_PINCTRL_GRP(gpio38),
+ BM1880_PINCTRL_GRP(gpio39),
+ BM1880_PINCTRL_GRP(gpio40),
+ BM1880_PINCTRL_GRP(gpio41),
+ BM1880_PINCTRL_GRP(gpio42),
+ BM1880_PINCTRL_GRP(gpio43),
+ BM1880_PINCTRL_GRP(gpio44),
+ BM1880_PINCTRL_GRP(gpio45),
+ BM1880_PINCTRL_GRP(gpio46),
+ BM1880_PINCTRL_GRP(gpio47),
+ BM1880_PINCTRL_GRP(gpio48),
+ BM1880_PINCTRL_GRP(gpio49),
+ BM1880_PINCTRL_GRP(gpio50),
+ BM1880_PINCTRL_GRP(gpio51),
+ BM1880_PINCTRL_GRP(gpio52),
+ BM1880_PINCTRL_GRP(gpio53),
+ BM1880_PINCTRL_GRP(gpio54),
+ BM1880_PINCTRL_GRP(gpio55),
+ BM1880_PINCTRL_GRP(gpio56),
+ BM1880_PINCTRL_GRP(gpio57),
+ BM1880_PINCTRL_GRP(gpio58),
+ BM1880_PINCTRL_GRP(gpio59),
+ BM1880_PINCTRL_GRP(gpio60),
+ BM1880_PINCTRL_GRP(gpio61),
+ BM1880_PINCTRL_GRP(gpio62),
+ BM1880_PINCTRL_GRP(gpio63),
+ BM1880_PINCTRL_GRP(gpio64),
+ BM1880_PINCTRL_GRP(gpio65),
+ BM1880_PINCTRL_GRP(gpio66),
+ BM1880_PINCTRL_GRP(gpio67),
+ BM1880_PINCTRL_GRP(eth1),
+ BM1880_PINCTRL_GRP(i2s0),
+ BM1880_PINCTRL_GRP(i2s0_mclkin),
+ BM1880_PINCTRL_GRP(i2s1),
+ BM1880_PINCTRL_GRP(i2s1_mclkin),
+ BM1880_PINCTRL_GRP(spi0),
+};
+
+static const char * const nand_group[] = { "nand_grp" };
+static const char * const spi_group[] = { "spi_grp" };
+static const char * const emmc_group[] = { "emmc_grp" };
+static const char * const sdio_group[] = { "sdio_grp" };
+static const char * const eth0_group[] = { "eth0_grp" };
+static const char * const pwm0_group[] = { "pwm0_grp" };
+static const char * const pwm1_group[] = { "pwm1_grp" };
+static const char * const pwm2_group[] = { "pwm2_grp" };
+static const char * const pwm3_group[] = { "pwm3_grp" };
+static const char * const pwm4_group[] = { "pwm4_grp" };
+static const char * const pwm5_group[] = { "pwm5_grp" };
+static const char * const pwm6_group[] = { "pwm6_grp" };
+static const char * const pwm7_group[] = { "pwm7_grp" };
+static const char * const pwm8_group[] = { "pwm8_grp" };
+static const char * const pwm9_group[] = { "pwm9_grp" };
+static const char * const pwm10_group[] = { "pwm10_grp" };
+static const char * const pwm11_group[] = { "pwm11_grp" };
+static const char * const pwm12_group[] = { "pwm12_grp" };
+static const char * const pwm13_group[] = { "pwm13_grp" };
+static const char * const pwm14_group[] = { "pwm14_grp" };
+static const char * const pwm15_group[] = { "pwm15_grp" };
+static const char * const pwm16_group[] = { "pwm16_grp" };
+static const char * const pwm17_group[] = { "pwm17_grp" };
+static const char * const pwm18_group[] = { "pwm18_grp" };
+static const char * const pwm19_group[] = { "pwm19_grp" };
+static const char * const pwm20_group[] = { "pwm20_grp" };
+static const char * const pwm21_group[] = { "pwm21_grp" };
+static const char * const pwm22_group[] = { "pwm22_grp" };
+static const char * const pwm23_group[] = { "pwm23_grp" };
+static const char * const pwm24_group[] = { "pwm24_grp" };
+static const char * const pwm25_group[] = { "pwm25_grp" };
+static const char * const pwm26_group[] = { "pwm26_grp" };
+static const char * const pwm27_group[] = { "pwm27_grp" };
+static const char * const pwm28_group[] = { "pwm28_grp" };
+static const char * const pwm29_group[] = { "pwm29_grp" };
+static const char * const pwm30_group[] = { "pwm30_grp" };
+static const char * const pwm31_group[] = { "pwm31_grp" };
+static const char * const pwm32_group[] = { "pwm32_grp" };
+static const char * const pwm33_group[] = { "pwm33_grp" };
+static const char * const pwm34_group[] = { "pwm34_grp" };
+static const char * const pwm35_group[] = { "pwm35_grp" };
+static const char * const pwm36_group[] = { "pwm36_grp" };
+static const char * const pwm37_group[] = { "pwm37_grp" };
+static const char * const i2c0_group[] = { "i2c0_grp" };
+static const char * const i2c1_group[] = { "i2c1_grp" };
+static const char * const i2c2_group[] = { "i2c2_grp" };
+static const char * const i2c3_group[] = { "i2c3_grp" };
+static const char * const i2c4_group[] = { "i2c4_grp" };
+static const char * const uart0_group[] = { "uart0_grp" };
+static const char * const uart1_group[] = { "uart1_grp" };
+static const char * const uart2_group[] = { "uart2_grp" };
+static const char * const uart3_group[] = { "uart3_grp" };
+static const char * const uart4_group[] = { "uart4_grp" };
+static const char * const uart5_group[] = { "uart5_grp" };
+static const char * const uart6_group[] = { "uart6_grp" };
+static const char * const uart7_group[] = { "uart7_grp" };
+static const char * const uart8_group[] = { "uart8_grp" };
+static const char * const uart9_group[] = { "uart9_grp" };
+static const char * const uart10_group[] = { "uart10_grp" };
+static const char * const uart11_group[] = { "uart11_grp" };
+static const char * const uart12_group[] = { "uart12_grp" };
+static const char * const uart13_group[] = { "uart13_grp" };
+static const char * const uart14_group[] = { "uart14_grp" };
+static const char * const uart15_group[] = { "uart15_grp" };
+static const char * const gpio0_group[] = { "gpio0_grp" };
+static const char * const gpio1_group[] = { "gpio1_grp" };
+static const char * const gpio2_group[] = { "gpio2_grp" };
+static const char * const gpio3_group[] = { "gpio3_grp" };
+static const char * const gpio4_group[] = { "gpio4_grp" };
+static const char * const gpio5_group[] = { "gpio5_grp" };
+static const char * const gpio6_group[] = { "gpio6_grp" };
+static const char * const gpio7_group[] = { "gpio7_grp" };
+static const char * const gpio8_group[] = { "gpio8_grp" };
+static const char * const gpio9_group[] = { "gpio9_grp" };
+static const char * const gpio10_group[] = { "gpio10_grp" };
+static const char * const gpio11_group[] = { "gpio11_grp" };
+static const char * const gpio12_group[] = { "gpio12_grp" };
+static const char * const gpio13_group[] = { "gpio13_grp" };
+static const char * const gpio14_group[] = { "gpio14_grp" };
+static const char * const gpio15_group[] = { "gpio15_grp" };
+static const char * const gpio16_group[] = { "gpio16_grp" };
+static const char * const gpio17_group[] = { "gpio17_grp" };
+static const char * const gpio18_group[] = { "gpio18_grp" };
+static const char * const gpio19_group[] = { "gpio19_grp" };
+static const char * const gpio20_group[] = { "gpio20_grp" };
+static const char * const gpio21_group[] = { "gpio21_grp" };
+static const char * const gpio22_group[] = { "gpio22_grp" };
+static const char * const gpio23_group[] = { "gpio23_grp" };
+static const char * const gpio24_group[] = { "gpio24_grp" };
+static const char * const gpio25_group[] = { "gpio25_grp" };
+static const char * const gpio26_group[] = { "gpio26_grp" };
+static const char * const gpio27_group[] = { "gpio27_grp" };
+static const char * const gpio28_group[] = { "gpio28_grp" };
+static const char * const gpio29_group[] = { "gpio29_grp" };
+static const char * const gpio30_group[] = { "gpio30_grp" };
+static const char * const gpio31_group[] = { "gpio31_grp" };
+static const char * const gpio32_group[] = { "gpio32_grp" };
+static const char * const gpio33_group[] = { "gpio33_grp" };
+static const char * const gpio34_group[] = { "gpio34_grp" };
+static const char * const gpio35_group[] = { "gpio35_grp" };
+static const char * const gpio36_group[] = { "gpio36_grp" };
+static const char * const gpio37_group[] = { "gpio37_grp" };
+static const char * const gpio38_group[] = { "gpio38_grp" };
+static const char * const gpio39_group[] = { "gpio39_grp" };
+static const char * const gpio40_group[] = { "gpio40_grp" };
+static const char * const gpio41_group[] = { "gpio41_grp" };
+static const char * const gpio42_group[] = { "gpio42_grp" };
+static const char * const gpio43_group[] = { "gpio43_grp" };
+static const char * const gpio44_group[] = { "gpio44_grp" };
+static const char * const gpio45_group[] = { "gpio45_grp" };
+static const char * const gpio46_group[] = { "gpio46_grp" };
+static const char * const gpio47_group[] = { "gpio47_grp" };
+static const char * const gpio48_group[] = { "gpio48_grp" };
+static const char * const gpio49_group[] = { "gpio49_grp" };
+static const char * const gpio50_group[] = { "gpio50_grp" };
+static const char * const gpio51_group[] = { "gpio51_grp" };
+static const char * const gpio52_group[] = { "gpio52_grp" };
+static const char * const gpio53_group[] = { "gpio53_grp" };
+static const char * const gpio54_group[] = { "gpio54_grp" };
+static const char * const gpio55_group[] = { "gpio55_grp" };
+static const char * const gpio56_group[] = { "gpio56_grp" };
+static const char * const gpio57_group[] = { "gpio57_grp" };
+static const char * const gpio58_group[] = { "gpio58_grp" };
+static const char * const gpio59_group[] = { "gpio59_grp" };
+static const char * const gpio60_group[] = { "gpio60_grp" };
+static const char * const gpio61_group[] = { "gpio61_grp" };
+static const char * const gpio62_group[] = { "gpio62_grp" };
+static const char * const gpio63_group[] = { "gpio63_grp" };
+static const char * const gpio64_group[] = { "gpio64_grp" };
+static const char * const gpio65_group[] = { "gpio65_grp" };
+static const char * const gpio66_group[] = { "gpio66_grp" };
+static const char * const gpio67_group[] = { "gpio67_grp" };
+static const char * const eth1_group[] = { "eth1_grp" };
+static const char * const i2s0_group[] = { "i2s0_grp" };
+static const char * const i2s0_mclkin_group[] = { "i2s0_mclkin_grp" };
+static const char * const i2s1_group[] = { "i2s1_grp" };
+static const char * const i2s1_mclkin_group[] = { "i2s1_mclkin_grp" };
+static const char * const spi0_group[] = { "spi0_grp" };
+
+#define BM1880_PINMUX_FUNCTION(fname, mval, mask) \
+ [F_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_group, \
+ .ngroups = ARRAY_SIZE(fname##_group), \
+ .mux_val = mval, \
+ .mux_mask = mask, \
+ }
+
+#define BM1880_PINMUX_FUNCTION_MUX(fname, mval, mask, offset, shift)\
+ [F_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_group, \
+ .ngroups = ARRAY_SIZE(fname##_group), \
+ .mux_val = mval, \
+ .mux_mask = mask, \
+ .mux = offset, \
+ .mux_shift = shift, \
+ }
+
+static const struct bm1880_pinmux_function bm1880_pmux_functions[] = {
+ BM1880_PINMUX_FUNCTION(nand, 2, 0x03),
+ BM1880_PINMUX_FUNCTION(spi, 0, 0x03),
+ BM1880_PINMUX_FUNCTION(emmc, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(sdio, 0, 0x03),
+ BM1880_PINMUX_FUNCTION(eth0, 0, 0x03),
+ BM1880_PINMUX_FUNCTION_MUX(pwm0, 2, 0x0F, 0x50, 0x00),
+ BM1880_PINMUX_FUNCTION_MUX(pwm1, 2, 0x0F, 0x50, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(pwm2, 2, 0x0F, 0x50, 0x08),
+ BM1880_PINMUX_FUNCTION_MUX(pwm3, 2, 0x0F, 0x50, 0x0C),
+ BM1880_PINMUX_FUNCTION_MUX(pwm4, 2, 0x0F, 0x50, 0x10),
+ BM1880_PINMUX_FUNCTION_MUX(pwm5, 2, 0x0F, 0x50, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(pwm6, 2, 0x0F, 0x50, 0x18),
+ BM1880_PINMUX_FUNCTION_MUX(pwm7, 2, 0x0F, 0x50, 0x1C),
+ BM1880_PINMUX_FUNCTION_MUX(pwm8, 2, 0x0F, 0x54, 0x00),
+ BM1880_PINMUX_FUNCTION_MUX(pwm9, 2, 0x0F, 0x54, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(pwm10, 2, 0x0F, 0x54, 0x08),
+ BM1880_PINMUX_FUNCTION_MUX(pwm11, 2, 0x0F, 0x54, 0x0C),
+ BM1880_PINMUX_FUNCTION_MUX(pwm12, 2, 0x0F, 0x54, 0x10),
+ BM1880_PINMUX_FUNCTION_MUX(pwm13, 2, 0x0F, 0x54, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(pwm14, 2, 0x0F, 0x54, 0x18),
+ BM1880_PINMUX_FUNCTION_MUX(pwm15, 2, 0x0F, 0x54, 0x1C),
+ BM1880_PINMUX_FUNCTION_MUX(pwm16, 2, 0x0F, 0x58, 0x00),
+ BM1880_PINMUX_FUNCTION_MUX(pwm17, 2, 0x0F, 0x58, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(pwm18, 2, 0x0F, 0x58, 0x08),
+ BM1880_PINMUX_FUNCTION_MUX(pwm19, 2, 0x0F, 0x58, 0x0C),
+ BM1880_PINMUX_FUNCTION_MUX(pwm20, 2, 0x0F, 0x58, 0x10),
+ BM1880_PINMUX_FUNCTION_MUX(pwm21, 2, 0x0F, 0x58, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(pwm22, 2, 0x0F, 0x58, 0x18),
+ BM1880_PINMUX_FUNCTION_MUX(pwm23, 2, 0x0F, 0x58, 0x1C),
+ BM1880_PINMUX_FUNCTION_MUX(pwm24, 2, 0x0F, 0x5C, 0x00),
+ BM1880_PINMUX_FUNCTION_MUX(pwm25, 2, 0x0F, 0x5C, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(pwm26, 2, 0x0F, 0x5C, 0x08),
+ BM1880_PINMUX_FUNCTION_MUX(pwm27, 2, 0x0F, 0x5C, 0x0C),
+ BM1880_PINMUX_FUNCTION_MUX(pwm28, 2, 0x0F, 0x5C, 0x10),
+ BM1880_PINMUX_FUNCTION_MUX(pwm29, 2, 0x0F, 0x5C, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(pwm30, 2, 0x0F, 0x5C, 0x18),
+ BM1880_PINMUX_FUNCTION_MUX(pwm31, 2, 0x0F, 0x5C, 0x1C),
+ BM1880_PINMUX_FUNCTION_MUX(pwm32, 2, 0x0F, 0x60, 0x00),
+ BM1880_PINMUX_FUNCTION_MUX(pwm33, 2, 0x0F, 0x60, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(pwm34, 2, 0x0F, 0x60, 0x08),
+ BM1880_PINMUX_FUNCTION_MUX(pwm35, 2, 0x0F, 0x60, 0x0C),
+ BM1880_PINMUX_FUNCTION_MUX(pwm36, 2, 0x0F, 0x60, 0x10),
+ BM1880_PINMUX_FUNCTION_MUX(pwm37, 2, 0x0F, 0x60, 0x1C),
+ BM1880_PINMUX_FUNCTION(i2c0, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(i2c1, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(i2c2, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(i2c3, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(i2c4, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart0, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart1, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart2, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart3, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart4, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart5, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart6, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart7, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart8, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart9, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart10, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart11, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(uart12, 3, 0x03),
+ BM1880_PINMUX_FUNCTION(uart13, 3, 0x03),
+ BM1880_PINMUX_FUNCTION(uart14, 3, 0x03),
+ BM1880_PINMUX_FUNCTION(uart15, 3, 0x03),
+ BM1880_PINMUX_FUNCTION_MUX(gpio0, 0, 0x03, 0x4E0, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio1, 0, 0x03, 0x4E4, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio2, 0, 0x03, 0x4E4, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio3, 0, 0x03, 0x4E8, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio4, 0, 0x03, 0x4E8, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio5, 0, 0x03, 0x4EC, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio6, 0, 0x03, 0x4EC, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio7, 0, 0x03, 0x4F0, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio8, 0, 0x03, 0x4F0, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio9, 0, 0x03, 0x4F4, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio10, 0, 0x03, 0x4F4, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio11, 0, 0x03, 0x4F8, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio12, 1, 0x03, 0x4F8, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio13, 1, 0x03, 0x4FC, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio14, 0, 0x03, 0x474, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio15, 0, 0x03, 0x478, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio16, 0, 0x03, 0x478, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio17, 0, 0x03, 0x47C, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio18, 0, 0x03, 0x47C, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio19, 0, 0x03, 0x480, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio20, 0, 0x03, 0x480, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio21, 0, 0x03, 0x484, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio22, 0, 0x03, 0x484, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio23, 0, 0x03, 0x488, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio24, 0, 0x03, 0x488, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio25, 0, 0x03, 0x48C, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio26, 0, 0x03, 0x48C, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio27, 0, 0x03, 0x490, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio28, 0, 0x03, 0x490, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio29, 0, 0x03, 0x494, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio30, 0, 0x03, 0x494, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio31, 0, 0x03, 0x498, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio32, 0, 0x03, 0x498, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio33, 0, 0x03, 0x49C, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio34, 0, 0x03, 0x49C, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio35, 0, 0x03, 0x4A0, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio36, 0, 0x03, 0x4A0, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio37, 0, 0x03, 0x4A4, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio38, 0, 0x03, 0x4A4, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio39, 0, 0x03, 0x4A8, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio40, 0, 0x03, 0x4A8, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio41, 0, 0x03, 0x4AC, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio42, 0, 0x03, 0x4AC, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio43, 0, 0x03, 0x4B0, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio44, 0, 0x03, 0x4B0, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio45, 0, 0x03, 0x4B4, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio46, 0, 0x03, 0x4B4, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio47, 0, 0x03, 0x4B8, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio48, 0, 0x03, 0x4B8, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio49, 0, 0x03, 0x4BC, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio50, 0, 0x03, 0x4BC, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio51, 0, 0x03, 0x4C0, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio52, 0, 0x03, 0x4C0, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio53, 0, 0x03, 0x4C4, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio54, 0, 0x03, 0x4C4, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio55, 0, 0x03, 0x4C8, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio56, 0, 0x03, 0x4C8, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio57, 0, 0x03, 0x4CC, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio58, 0, 0x03, 0x4CC, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio59, 0, 0x03, 0x4D0, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio60, 0, 0x03, 0x4D0, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio61, 0, 0x03, 0x4D4, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio62, 0, 0x03, 0x4D4, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio63, 0, 0x03, 0x4D8, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio64, 0, 0x03, 0x4D8, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio65, 0, 0x03, 0x4DC, 0x04),
+ BM1880_PINMUX_FUNCTION_MUX(gpio66, 0, 0x03, 0x4DC, 0x14),
+ BM1880_PINMUX_FUNCTION_MUX(gpio67, 0, 0x03, 0x4E0, 0x04),
+ BM1880_PINMUX_FUNCTION(eth1, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(i2s0, 2, 0x03),
+ BM1880_PINMUX_FUNCTION(i2s0_mclkin, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(i2s1, 2, 0x03),
+ BM1880_PINMUX_FUNCTION(i2s1_mclkin, 1, 0x03),
+ BM1880_PINMUX_FUNCTION(spi0, 1, 0x03),
+};
+
+static int bm1880_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct bm1880_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->ngroups;
+}
+
+static const char *bm1880_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct bm1880_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->groups[selector].name;
+}
+
+static int bm1880_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct bm1880_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->groups[selector].pins;
+ *num_pins = pctrl->groups[selector].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops bm1880_pctrl_ops = {
+ .get_groups_count = bm1880_pctrl_get_groups_count,
+ .get_group_name = bm1880_pctrl_get_group_name,
+ .get_group_pins = bm1880_pctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+/* pinmux */
+static int bm1880_pmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct bm1880_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->nfuncs;
+}
+
+static const char *bm1880_pmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct bm1880_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->funcs[selector].name;
+}
+
+static int bm1880_pmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct bm1880_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->funcs[selector].groups;
+ *num_groups = pctrl->funcs[selector].ngroups;
+ return 0;
+}
+
+static int bm1880_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct bm1880_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct bm1880_pctrl_group *pgrp = &pctrl->groups[group];
+ const struct bm1880_pinmux_function *func = &pctrl->funcs[function];
+ int i;
+
+ if (func->mux) {
+ u32 regval = readl_relaxed(pctrl->base + BM1880_REG_MUX +
+ func->mux);
+
+ regval &= ~(func->mux_mask << func->mux_shift);
+ regval |= func->mux_val << func->mux_shift;
+ writel_relaxed(regval, pctrl->base + BM1880_REG_MUX +
+ func->mux);
+ } else {
+ for (i = 0; i < pgrp->npins; i++) {
+ unsigned int pin = pgrp->pins[i];
+ u32 offset = (pin >> 1) << 2;
+ u32 mux_offset = ((!((pin + 1) & 1) << 4) + 4);
+ u32 regval = readl_relaxed(pctrl->base +
+ BM1880_REG_MUX + offset);
+
+ regval &= ~(func->mux_mask << mux_offset);
+ regval |= func->mux_val << mux_offset;
+
+ writel_relaxed(regval, pctrl->base +
+ BM1880_REG_MUX + offset);
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops bm1880_pinmux_ops = {
+ .get_functions_count = bm1880_pmux_get_functions_count,
+ .get_function_name = bm1880_pmux_get_function_name,
+ .get_function_groups = bm1880_pmux_get_function_groups,
+ .set_mux = bm1880_pinmux_set_mux,
+};
+
+static struct pinctrl_desc bm1880_desc = {
+ .name = "bm1880_pinctrl",
+ .pins = bm1880_pins,
+ .npins = ARRAY_SIZE(bm1880_pins),
+ .pctlops = &bm1880_pctrl_ops,
+ .pmxops = &bm1880_pinmux_ops,
+ .owner = THIS_MODULE,
+};
+
+static int bm1880_pinctrl_probe(struct platform_device *pdev)
+
+{
+ struct resource *res;
+ struct bm1880_pinctrl *pctrl;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->base))
+ return PTR_ERR(pctrl->base);
+
+ pctrl->groups = bm1880_pctrl_groups;
+ pctrl->ngroups = ARRAY_SIZE(bm1880_pctrl_groups);
+ pctrl->funcs = bm1880_pmux_functions;
+ pctrl->nfuncs = ARRAY_SIZE(bm1880_pmux_functions);
+
+ pctrl->pctrldev = devm_pinctrl_register(&pdev->dev, &bm1880_desc,
+ pctrl);
+ if (IS_ERR(pctrl->pctrldev))
+ return PTR_ERR(pctrl->pctrldev);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ dev_info(&pdev->dev, "BM1880 pinctrl driver initialized\n");
+
+ return 0;
+}
+
+static const struct of_device_id bm1880_pinctrl_of_match[] = {
+ { .compatible = "bitmain,bm1880-pinctrl" },
+ { }
+};
+
+static struct platform_driver bm1880_pinctrl_driver = {
+ .driver = {
+ .name = "pinctrl-bm1880",
+ .of_match_table = of_match_ptr(bm1880_pinctrl_of_match),
+ },
+ .probe = bm1880_pinctrl_probe,
+};
+
+static int __init bm1880_pinctrl_init(void)
+{
+ return platform_driver_register(&bm1880_pinctrl_driver);
+}
+arch_initcall(bm1880_pinctrl_init);
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 5d7a8514def9..f0cdb5234e49 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -266,7 +266,6 @@ static int mcp_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
status = (data & BIT(pin)) ? 1 : 0;
break;
default:
- dev_err(mcp->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
}
@@ -293,7 +292,7 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
break;
default:
- dev_err(mcp->dev, "Invalid config param %04x\n", param);
+ dev_dbg(mcp->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
}
}
@@ -656,115 +655,6 @@ static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/seq_file.h>
-
-/*
- * This compares the chip's registers with the register
- * cache and corrects any incorrectly set register. This
- * can be used to fix state for MCP23xxx, that temporary
- * lost its power supply.
- */
-#define MCP23S08_CONFIG_REGS 7
-static int __check_mcp23s08_reg_cache(struct mcp23s08 *mcp)
-{
- int cached[MCP23S08_CONFIG_REGS];
- int err = 0, i;
-
- /* read cached config registers */
- for (i = 0; i < MCP23S08_CONFIG_REGS; i++) {
- err = mcp_read(mcp, i, &cached[i]);
- if (err)
- goto out;
- }
-
- regcache_cache_bypass(mcp->regmap, true);
-
- for (i = 0; i < MCP23S08_CONFIG_REGS; i++) {
- int uncached;
- err = mcp_read(mcp, i, &uncached);
- if (err)
- goto out;
-
- if (uncached != cached[i]) {
- dev_err(mcp->dev, "restoring reg 0x%02x from 0x%04x to 0x%04x (power-loss?)\n",
- i, uncached, cached[i]);
- mcp_write(mcp, i, cached[i]);
- }
- }
-
-out:
- if (err)
- dev_err(mcp->dev, "read error: reg=%02x, err=%d", i, err);
- regcache_cache_bypass(mcp->regmap, false);
- return err;
-}
-
-/*
- * This shows more info than the generic gpio dump code:
- * pullups, deglitching, open drain drive.
- */
-static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
-{
- struct mcp23s08 *mcp;
- char bank;
- int t;
- unsigned mask;
- int iodir, gpio, gppu;
-
- mcp = gpiochip_get_data(chip);
-
- /* NOTE: we only handle one bank for now ... */
- bank = '0' + ((mcp->addr >> 1) & 0x7);
-
- mutex_lock(&mcp->lock);
-
- t = __check_mcp23s08_reg_cache(mcp);
- if (t) {
- seq_printf(s, " I/O Error\n");
- goto done;
- }
- t = mcp_read(mcp, MCP_IODIR, &iodir);
- if (t) {
- seq_printf(s, " I/O Error\n");
- goto done;
- }
- t = mcp_read(mcp, MCP_GPIO, &gpio);
- if (t) {
- seq_printf(s, " I/O Error\n");
- goto done;
- }
- t = mcp_read(mcp, MCP_GPPU, &gppu);
- if (t) {
- seq_printf(s, " I/O Error\n");
- goto done;
- }
-
- for (t = 0, mask = BIT(0); t < chip->ngpio; t++, mask <<= 1) {
- const char *label;
-
- label = gpiochip_is_requested(chip, t);
- if (!label)
- continue;
-
- seq_printf(s, " gpio-%-3d P%c.%d (%-12s) %s %s %s\n",
- chip->base + t, bank, t, label,
- (iodir & mask) ? "in " : "out",
- (gpio & mask) ? "hi" : "lo",
- (gppu & mask) ? "up" : " ");
- /* NOTE: ignoring the irq-related registers */
- }
-done:
- mutex_unlock(&mcp->lock);
-}
-
-#else
-#define mcp23s08_dbg_show NULL
-#endif
-
-/*----------------------------------------------------------------------*/
-
static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
void *data, unsigned addr, unsigned type,
unsigned int base, int cs)
@@ -785,7 +675,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.get = mcp23s08_get;
mcp->chip.direction_output = mcp23s08_direction_output;
mcp->chip.set = mcp23s08_set;
- mcp->chip.dbg_show = mcp23s08_dbg_show;
#ifdef CONFIG_OF_GPIO
mcp->chip.of_gpio_n_cells = 2;
mcp->chip.of_node = dev->of_node;
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index aa5f949ef219..5b0678f310e5 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1367,6 +1367,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
if (!of_find_property(child, "gpio-controller", NULL)) {
dev_err(pctl->dev,
"No gpio-controller property for bank %u\n", i);
+ of_node_put(child);
ret = -ENODEV;
goto err;
}
@@ -1374,6 +1375,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
irq = irq_of_parse_and_map(child, 0);
if (irq < 0) {
dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
+ of_node_put(child);
ret = irq;
goto err;
}
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 9cfe9d0520ac..021e37b7689e 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -620,14 +620,7 @@ static void rza1_pin_reset(struct rza1_port *port, unsigned int pin)
static inline int rza1_pin_get_direction(struct rza1_port *port,
unsigned int pin)
{
- unsigned long irqflags;
- int input;
-
- spin_lock_irqsave(&port->lock, irqflags);
- input = rza1_get_bit(port, RZA1_PM_REG, pin);
- spin_unlock_irqrestore(&port->lock, irqflags);
-
- return !!input;
+ return !!rza1_get_bit(port, RZA1_PM_REG, pin);
}
/**
@@ -671,14 +664,7 @@ static inline void rza1_pin_set(struct rza1_port *port, unsigned int pin,
static inline int rza1_pin_get(struct rza1_port *port, unsigned int pin)
{
- unsigned long irqflags;
- int val;
-
- spin_lock_irqsave(&port->lock, irqflags);
- val = rza1_get_bit(port, RZA1_PPR_REG, pin);
- spin_unlock_irqrestore(&port->lock, irqflags);
-
- return val;
+ return rza1_get_bit(port, RZA1_PPR_REG, pin);
}
/**
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index e66af93f2cbf..195b442a2343 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1170,7 +1170,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
struct property *pp;
struct st_pinconf *conf;
struct device_node *pins;
- int i = 0, npins = 0, nr_props;
+ int i = 0, npins = 0, nr_props, ret = 0;
pins = of_get_child_by_name(np, "st,pins");
if (!pins)
@@ -1185,7 +1185,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
npins++;
} else {
pr_warn("Invalid st,pins in %pOFn node\n", np);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_node;
}
}
@@ -1195,8 +1196,10 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
grp->pin_conf = devm_kcalloc(info->dev,
npins, sizeof(*conf), GFP_KERNEL);
- if (!grp->pins || !grp->pin_conf)
- return -ENOMEM;
+ if (!grp->pins || !grp->pin_conf) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
/* <bank offset mux direction rt_type rt_delay rt_clk> */
for_each_property_of_node(pins, pp) {
@@ -1229,9 +1232,11 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
}
i++;
}
+
+out_put_node:
of_node_put(pins);
- return 0;
+ return ret;
}
static int st_pctl_parse_functions(struct device_node *np,
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
new file mode 100644
index 000000000000..eba872ce4a7c
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STMicroelectronics Multi-Function eXpander (STMFX) GPIO expander
+ *
+ * Copyright (C) 2019 STMicroelectronics
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>.
+ */
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/stmfx.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "core.h"
+#include "pinctrl-utils.h"
+
+/* GPIOs expander */
+/* GPIO_STATE1 0x10, GPIO_STATE2 0x11, GPIO_STATE3 0x12 */
+#define STMFX_REG_GPIO_STATE STMFX_REG_GPIO_STATE1 /* R */
+/* GPIO_DIR1 0x60, GPIO_DIR2 0x61, GPIO_DIR3 0x63 */
+#define STMFX_REG_GPIO_DIR STMFX_REG_GPIO_DIR1 /* RW */
+/* GPIO_TYPE1 0x64, GPIO_TYPE2 0x65, GPIO_TYPE3 0x66 */
+#define STMFX_REG_GPIO_TYPE STMFX_REG_GPIO_TYPE1 /* RW */
+/* GPIO_PUPD1 0x68, GPIO_PUPD2 0x69, GPIO_PUPD3 0x6A */
+#define STMFX_REG_GPIO_PUPD STMFX_REG_GPIO_PUPD1 /* RW */
+/* GPO_SET1 0x6C, GPO_SET2 0x6D, GPO_SET3 0x6E */
+#define STMFX_REG_GPO_SET STMFX_REG_GPO_SET1 /* RW */
+/* GPO_CLR1 0x70, GPO_CLR2 0x71, GPO_CLR3 0x72 */
+#define STMFX_REG_GPO_CLR STMFX_REG_GPO_CLR1 /* RW */
+/* IRQ_GPI_SRC1 0x48, IRQ_GPI_SRC2 0x49, IRQ_GPI_SRC3 0x4A */
+#define STMFX_REG_IRQ_GPI_SRC STMFX_REG_IRQ_GPI_SRC1 /* RW */
+/* IRQ_GPI_EVT1 0x4C, IRQ_GPI_EVT2 0x4D, IRQ_GPI_EVT3 0x4E */
+#define STMFX_REG_IRQ_GPI_EVT STMFX_REG_IRQ_GPI_EVT1 /* RW */
+/* IRQ_GPI_TYPE1 0x50, IRQ_GPI_TYPE2 0x51, IRQ_GPI_TYPE3 0x52 */
+#define STMFX_REG_IRQ_GPI_TYPE STMFX_REG_IRQ_GPI_TYPE1 /* RW */
+/* IRQ_GPI_PENDING1 0x0C, IRQ_GPI_PENDING2 0x0D, IRQ_GPI_PENDING3 0x0E*/
+#define STMFX_REG_IRQ_GPI_PENDING STMFX_REG_IRQ_GPI_PENDING1 /* R */
+/* IRQ_GPI_ACK1 0x54, IRQ_GPI_ACK2 0x55, IRQ_GPI_ACK3 0x56 */
+#define STMFX_REG_IRQ_GPI_ACK STMFX_REG_IRQ_GPI_ACK1 /* RW */
+
+#define NR_GPIO_REGS 3
+#define NR_GPIOS_PER_REG 8
+#define get_reg(offset) ((offset) / NR_GPIOS_PER_REG)
+#define get_shift(offset) ((offset) % NR_GPIOS_PER_REG)
+#define get_mask(offset) (BIT(get_shift(offset)))
+
+/*
+ * STMFX pinctrl can have up to 24 pins if STMFX other functions are not used.
+ * Pins availability is managed thanks to gpio-ranges property.
+ */
+static const struct pinctrl_pin_desc stmfx_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "agpio0"),
+ PINCTRL_PIN(17, "agpio1"),
+ PINCTRL_PIN(18, "agpio2"),
+ PINCTRL_PIN(19, "agpio3"),
+ PINCTRL_PIN(20, "agpio4"),
+ PINCTRL_PIN(21, "agpio5"),
+ PINCTRL_PIN(22, "agpio6"),
+ PINCTRL_PIN(23, "agpio7"),
+};
+
+struct stmfx_pinctrl {
+ struct device *dev;
+ struct stmfx *stmfx;
+ struct pinctrl_dev *pctl_dev;
+ struct pinctrl_desc pctl_desc;
+ struct gpio_chip gpio_chip;
+ struct irq_chip irq_chip;
+ struct mutex lock; /* IRQ bus lock */
+ unsigned long gpio_valid_mask;
+ /* Cache of IRQ_GPI_* registers for bus_lock */
+ u8 irq_gpi_src[NR_GPIO_REGS];
+ u8 irq_gpi_type[NR_GPIO_REGS];
+ u8 irq_gpi_evt[NR_GPIO_REGS];
+ u8 irq_toggle_edge[NR_GPIO_REGS];
+#ifdef CONFIG_PM
+ /* Backup of GPIO_* registers for suspend/resume */
+ u8 bkp_gpio_state[NR_GPIO_REGS];
+ u8 bkp_gpio_dir[NR_GPIO_REGS];
+ u8 bkp_gpio_type[NR_GPIO_REGS];
+ u8 bkp_gpio_pupd[NR_GPIO_REGS];
+#endif
+};
+
+static int stmfx_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = STMFX_REG_GPIO_STATE + get_reg(offset);
+ u32 mask = get_mask(offset);
+ u32 value;
+ int ret;
+
+ ret = regmap_read(pctl->stmfx->map, reg, &value);
+
+ return ret ? ret : !!(value & mask);
+}
+
+static void stmfx_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = value ? STMFX_REG_GPO_SET : STMFX_REG_GPO_CLR;
+ u32 mask = get_mask(offset);
+
+ regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset),
+ mask, mask);
+}
+
+static int stmfx_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = STMFX_REG_GPIO_DIR + get_reg(offset);
+ u32 mask = get_mask(offset);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(pctl->stmfx->map, reg, &val);
+ /*
+ * On stmfx, gpio pins direction is (0)input, (1)output.
+ * .get_direction returns 0=out, 1=in
+ */
+
+ return ret ? ret : !(val & mask);
+}
+
+static int stmfx_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = STMFX_REG_GPIO_DIR + get_reg(offset);
+ u32 mask = get_mask(offset);
+
+ return regmap_write_bits(pctl->stmfx->map, reg, mask, 0);
+}
+
+static int stmfx_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = STMFX_REG_GPIO_DIR + get_reg(offset);
+ u32 mask = get_mask(offset);
+
+ stmfx_gpio_set(gc, offset, value);
+
+ return regmap_write_bits(pctl->stmfx->map, reg, mask, mask);
+}
+
+static int stmfx_pinconf_get_pupd(struct stmfx_pinctrl *pctl,
+ unsigned int offset)
+{
+ u32 reg = STMFX_REG_GPIO_PUPD + get_reg(offset);
+ u32 pupd, mask = get_mask(offset);
+ int ret;
+
+ ret = regmap_read(pctl->stmfx->map, reg, &pupd);
+ if (ret)
+ return ret;
+
+ return !!(pupd & mask);
+}
+
+static int stmfx_pinconf_set_pupd(struct stmfx_pinctrl *pctl,
+ unsigned int offset, u32 pupd)
+{
+ u32 reg = STMFX_REG_GPIO_PUPD + get_reg(offset);
+ u32 mask = get_mask(offset);
+
+ return regmap_write_bits(pctl->stmfx->map, reg, mask, pupd ? mask : 0);
+}
+
+static int stmfx_pinconf_get_type(struct stmfx_pinctrl *pctl,
+ unsigned int offset)
+{
+ u32 reg = STMFX_REG_GPIO_TYPE + get_reg(offset);
+ u32 type, mask = get_mask(offset);
+ int ret;
+
+ ret = regmap_read(pctl->stmfx->map, reg, &type);
+ if (ret)
+ return ret;
+
+ return !!(type & mask);
+}
+
+static int stmfx_pinconf_set_type(struct stmfx_pinctrl *pctl,
+ unsigned int offset, u32 type)
+{
+ u32 reg = STMFX_REG_GPIO_TYPE + get_reg(offset);
+ u32 mask = get_mask(offset);
+
+ return regmap_write_bits(pctl->stmfx->map, reg, mask, type ? mask : 0);
+}
+
+static int stmfx_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct stmfx_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ u32 param = pinconf_to_config_param(*config);
+ struct pinctrl_gpio_range *range;
+ u32 arg = 0;
+ int ret, dir, type, pupd;
+
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+ if (!range)
+ return -EINVAL;
+
+ dir = stmfx_gpio_get_direction(&pctl->gpio_chip, pin);
+ if (dir < 0)
+ return dir;
+ type = stmfx_pinconf_get_type(pctl, pin);
+ if (type < 0)
+ return type;
+ pupd = stmfx_pinconf_get_pupd(pctl, pin);
+ if (pupd < 0)
+ return pupd;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if ((!dir && (!type || !pupd)) || (dir && !type))
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (dir && type && !pupd)
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (type && pupd)
+ arg = 1;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if ((!dir && type) || (dir && !type))
+ arg = 1;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if ((!dir && !type) || (dir && type))
+ arg = 1;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ if (dir)
+ return -EINVAL;
+
+ ret = stmfx_gpio_get(&pctl->gpio_chip, pin);
+ if (ret < 0)
+ return ret;
+
+ arg = ret;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct stmfx_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_gpio_range *range;
+ enum pin_config_param param;
+ u32 arg;
+ int dir, i, ret;
+
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+ if (!range) {
+ dev_err(pctldev->dev, "pin %d is not available\n", pin);
+ return -EINVAL;
+ }
+
+ dir = stmfx_gpio_get_direction(&pctl->gpio_chip, pin);
+ if (dir < 0)
+ return dir;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = stmfx_pinconf_set_pupd(pctl, pin, 0);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = stmfx_pinconf_set_pupd(pctl, pin, 1);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (!dir)
+ ret = stmfx_pinconf_set_type(pctl, pin, 1);
+ else
+ ret = stmfx_pinconf_set_type(pctl, pin, 0);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if (!dir)
+ ret = stmfx_pinconf_set_type(pctl, pin, 0);
+ else
+ ret = stmfx_pinconf_set_type(pctl, pin, 1);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ ret = stmfx_gpio_direction_output(&pctl->gpio_chip,
+ pin, arg);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int offset)
+{
+ struct stmfx_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_gpio_range *range;
+ int dir, type, pupd, val;
+
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, offset);
+ if (!range)
+ return;
+
+ dir = stmfx_gpio_get_direction(&pctl->gpio_chip, offset);
+ if (dir < 0)
+ return;
+ type = stmfx_pinconf_get_type(pctl, offset);
+ if (type < 0)
+ return;
+ pupd = stmfx_pinconf_get_pupd(pctl, offset);
+ if (pupd < 0)
+ return;
+ val = stmfx_gpio_get(&pctl->gpio_chip, offset);
+ if (val < 0)
+ return;
+
+ if (!dir) {
+ seq_printf(s, "output %s ", val ? "high" : "low");
+ if (type)
+ seq_printf(s, "open drain %s internal pull-up ",
+ pupd ? "with" : "without");
+ else
+ seq_puts(s, "push pull no pull ");
+ } else {
+ seq_printf(s, "input %s ", val ? "high" : "low");
+ if (type)
+ seq_printf(s, "with internal pull-%s ",
+ pupd ? "up" : "down");
+ else
+ seq_printf(s, "%s ", pupd ? "floating" : "analog");
+ }
+}
+
+static const struct pinconf_ops stmfx_pinconf_ops = {
+ .pin_config_get = stmfx_pinconf_get,
+ .pin_config_set = stmfx_pinconf_set,
+ .pin_config_dbg_show = stmfx_pinconf_dbg_show,
+};
+
+static int stmfx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return 0;
+}
+
+static const char *stmfx_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return NULL;
+}
+
+static int stmfx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ return -ENOTSUPP;
+}
+
+static const struct pinctrl_ops stmfx_pinctrl_ops = {
+ .get_groups_count = stmfx_pinctrl_get_groups_count,
+ .get_group_name = stmfx_pinctrl_get_group_name,
+ .get_group_pins = stmfx_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static void stmfx_pinctrl_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+ u32 reg = get_reg(data->hwirq);
+ u32 mask = get_mask(data->hwirq);
+
+ pctl->irq_gpi_src[reg] &= ~mask;
+}
+
+static void stmfx_pinctrl_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+ u32 reg = get_reg(data->hwirq);
+ u32 mask = get_mask(data->hwirq);
+
+ pctl->irq_gpi_src[reg] |= mask;
+}
+
+static int stmfx_pinctrl_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+ u32 reg = get_reg(data->hwirq);
+ u32 mask = get_mask(data->hwirq);
+
+ if (type == IRQ_TYPE_NONE)
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_BOTH) {
+ pctl->irq_gpi_evt[reg] |= mask;
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ pctl->irq_gpi_evt[reg] &= ~mask;
+ irq_set_handler_locked(data, handle_level_irq);
+ }
+
+ if ((type & IRQ_TYPE_EDGE_RISING) || (type & IRQ_TYPE_LEVEL_HIGH))
+ pctl->irq_gpi_type[reg] |= mask;
+ else
+ pctl->irq_gpi_type[reg] &= ~mask;
+
+ /*
+ * In case of (type & IRQ_TYPE_EDGE_BOTH), we need to know current
+ * GPIO value to set the right edge trigger. But in atomic context
+ * here we can't access registers over I2C. That's why (type &
+ * IRQ_TYPE_EDGE_BOTH) will be managed in .irq_sync_unlock.
+ */
+
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ pctl->irq_toggle_edge[reg] |= mask;
+ else
+ pctl->irq_toggle_edge[reg] &= mask;
+
+ return 0;
+}
+
+static void stmfx_pinctrl_irq_bus_lock(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+
+ mutex_lock(&pctl->lock);
+}
+
+static void stmfx_pinctrl_irq_bus_sync_unlock(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+ u32 reg = get_reg(data->hwirq);
+ u32 mask = get_mask(data->hwirq);
+
+ /*
+ * In case of IRQ_TYPE_EDGE_BOTH), read the current GPIO value
+ * (this couldn't be done in .irq_set_type because of atomic context)
+ * to set the right irq trigger type.
+ */
+ if (pctl->irq_toggle_edge[reg] & mask) {
+ if (stmfx_gpio_get(gpio_chip, data->hwirq))
+ pctl->irq_gpi_type[reg] &= ~mask;
+ else
+ pctl->irq_gpi_type[reg] |= mask;
+ }
+
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_EVT,
+ pctl->irq_gpi_evt, NR_GPIO_REGS);
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_TYPE,
+ pctl->irq_gpi_type, NR_GPIO_REGS);
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
+ pctl->irq_gpi_src, NR_GPIO_REGS);
+
+ mutex_unlock(&pctl->lock);
+}
+
+static void stmfx_pinctrl_irq_toggle_trigger(struct stmfx_pinctrl *pctl,
+ unsigned int offset)
+{
+ u32 reg = get_reg(offset);
+ u32 mask = get_mask(offset);
+ int val;
+
+ if (!(pctl->irq_toggle_edge[reg] & mask))
+ return;
+
+ val = stmfx_gpio_get(&pctl->gpio_chip, offset);
+ if (val < 0)
+ return;
+
+ if (val) {
+ pctl->irq_gpi_type[reg] &= mask;
+ regmap_write_bits(pctl->stmfx->map,
+ STMFX_REG_IRQ_GPI_TYPE + reg,
+ mask, 0);
+
+ } else {
+ pctl->irq_gpi_type[reg] |= mask;
+ regmap_write_bits(pctl->stmfx->map,
+ STMFX_REG_IRQ_GPI_TYPE + reg,
+ mask, mask);
+ }
+}
+
+static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
+{
+ struct stmfx_pinctrl *pctl = (struct stmfx_pinctrl *)dev_id;
+ struct gpio_chip *gc = &pctl->gpio_chip;
+ u8 pending[NR_GPIO_REGS];
+ u8 src[NR_GPIO_REGS] = {0, 0, 0};
+ unsigned long n, status;
+ int ret;
+
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_IRQ_GPI_PENDING,
+ &pending, NR_GPIO_REGS);
+ if (ret)
+ return IRQ_NONE;
+
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
+ src, NR_GPIO_REGS);
+
+ status = *(unsigned long *)pending;
+ for_each_set_bit(n, &status, gc->ngpio) {
+ handle_nested_irq(irq_find_mapping(gc->irq.domain, n));
+ stmfx_pinctrl_irq_toggle_trigger(pctl, n);
+ }
+
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
+ pctl->irq_gpi_src, NR_GPIO_REGS);
+
+ return IRQ_HANDLED;
+}
+
+static int stmfx_pinctrl_gpio_function_enable(struct stmfx_pinctrl *pctl)
+{
+ struct pinctrl_gpio_range *gpio_range;
+ struct pinctrl_dev *pctl_dev = pctl->pctl_dev;
+ u32 func = STMFX_FUNC_GPIO;
+
+ pctl->gpio_valid_mask = GENMASK(15, 0);
+
+ gpio_range = pinctrl_find_gpio_range_from_pin(pctl_dev, 16);
+ if (gpio_range) {
+ func |= STMFX_FUNC_ALTGPIO_LOW;
+ pctl->gpio_valid_mask |= GENMASK(19, 16);
+ }
+
+ gpio_range = pinctrl_find_gpio_range_from_pin(pctl_dev, 20);
+ if (gpio_range) {
+ func |= STMFX_FUNC_ALTGPIO_HIGH;
+ pctl->gpio_valid_mask |= GENMASK(23, 20);
+ }
+
+ return stmfx_function_enable(pctl->stmfx, func);
+}
+
+static int stmfx_pinctrl_probe(struct platform_device *pdev)
+{
+ struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
+ struct device_node *np = pdev->dev.of_node;
+ struct stmfx_pinctrl *pctl;
+ u32 n;
+ int irq, ret;
+
+ pctl = devm_kzalloc(stmfx->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pctl);
+
+ pctl->dev = &pdev->dev;
+ pctl->stmfx = stmfx;
+
+ if (!of_find_property(np, "gpio-ranges", NULL)) {
+ dev_err(pctl->dev, "missing required gpio-ranges property\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(pctl->dev, "failed to get irq\n");
+ return -ENXIO;
+ }
+
+ mutex_init(&pctl->lock);
+
+ /* Register pin controller */
+ pctl->pctl_desc.name = "stmfx-pinctrl";
+ pctl->pctl_desc.pctlops = &stmfx_pinctrl_ops;
+ pctl->pctl_desc.confops = &stmfx_pinconf_ops;
+ pctl->pctl_desc.pins = stmfx_pins;
+ pctl->pctl_desc.npins = ARRAY_SIZE(stmfx_pins);
+ pctl->pctl_desc.owner = THIS_MODULE;
+
+ ret = devm_pinctrl_register_and_init(pctl->dev, &pctl->pctl_desc,
+ pctl, &pctl->pctl_dev);
+ if (ret) {
+ dev_err(pctl->dev, "pinctrl registration failed\n");
+ return ret;
+ }
+
+ ret = pinctrl_enable(pctl->pctl_dev);
+ if (ret) {
+ dev_err(pctl->dev, "pinctrl enable failed\n");
+ return ret;
+ }
+
+ /* Register gpio controller */
+ pctl->gpio_chip.label = "stmfx-gpio";
+ pctl->gpio_chip.parent = pctl->dev;
+ pctl->gpio_chip.get_direction = stmfx_gpio_get_direction;
+ pctl->gpio_chip.direction_input = stmfx_gpio_direction_input;
+ pctl->gpio_chip.direction_output = stmfx_gpio_direction_output;
+ pctl->gpio_chip.get = stmfx_gpio_get;
+ pctl->gpio_chip.set = stmfx_gpio_set;
+ pctl->gpio_chip.set_config = gpiochip_generic_config;
+ pctl->gpio_chip.base = -1;
+ pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
+ pctl->gpio_chip.can_sleep = true;
+ pctl->gpio_chip.of_node = np;
+ pctl->gpio_chip.need_valid_mask = true;
+
+ ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
+ if (ret) {
+ dev_err(pctl->dev, "gpio_chip registration failed\n");
+ return ret;
+ }
+
+ ret = stmfx_pinctrl_gpio_function_enable(pctl);
+ if (ret)
+ return ret;
+
+ pctl->irq_chip.name = dev_name(pctl->dev);
+ pctl->irq_chip.irq_mask = stmfx_pinctrl_irq_mask;
+ pctl->irq_chip.irq_unmask = stmfx_pinctrl_irq_unmask;
+ pctl->irq_chip.irq_set_type = stmfx_pinctrl_irq_set_type;
+ pctl->irq_chip.irq_bus_lock = stmfx_pinctrl_irq_bus_lock;
+ pctl->irq_chip.irq_bus_sync_unlock = stmfx_pinctrl_irq_bus_sync_unlock;
+ for_each_clear_bit(n, &pctl->gpio_valid_mask, pctl->gpio_chip.ngpio)
+ clear_bit(n, pctl->gpio_chip.valid_mask);
+
+ ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip,
+ 0, handle_bad_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(pctl->dev, "cannot add irqchip to gpiochip\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(pctl->dev, irq, NULL,
+ stmfx_pinctrl_irq_thread_fn,
+ IRQF_ONESHOT,
+ pctl->irq_chip.name, pctl);
+ if (ret) {
+ dev_err(pctl->dev, "cannot request irq%d\n", irq);
+ return ret;
+ }
+
+ gpiochip_set_nested_irqchip(&pctl->gpio_chip, &pctl->irq_chip, irq);
+
+ dev_info(pctl->dev,
+ "%ld GPIOs available\n", hweight_long(pctl->gpio_valid_mask));
+
+ return 0;
+}
+
+static int stmfx_pinctrl_remove(struct platform_device *pdev)
+{
+ struct stmfx *stmfx = dev_get_platdata(&pdev->dev);
+
+ return stmfx_function_disable(stmfx,
+ STMFX_FUNC_GPIO |
+ STMFX_FUNC_ALTGPIO_LOW |
+ STMFX_FUNC_ALTGPIO_HIGH);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stmfx_pinctrl_backup_regs(struct stmfx_pinctrl *pctl)
+{
+ int ret;
+
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_STATE,
+ &pctl->bkp_gpio_state, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_DIR,
+ &pctl->bkp_gpio_dir, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_TYPE,
+ &pctl->bkp_gpio_type, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_PUPD,
+ &pctl->bkp_gpio_pupd, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int stmfx_pinctrl_restore_regs(struct stmfx_pinctrl *pctl)
+{
+ int ret;
+
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_DIR,
+ pctl->bkp_gpio_dir, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_TYPE,
+ pctl->bkp_gpio_type, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_PUPD,
+ pctl->bkp_gpio_pupd, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPO_SET,
+ pctl->bkp_gpio_state, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_EVT,
+ pctl->irq_gpi_evt, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_TYPE,
+ pctl->irq_gpi_type, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
+ pctl->irq_gpi_src, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int stmfx_pinctrl_suspend(struct device *dev)
+{
+ struct stmfx_pinctrl *pctl = dev_get_drvdata(dev);
+ int ret;
+
+ ret = stmfx_pinctrl_backup_regs(pctl);
+ if (ret) {
+ dev_err(pctl->dev, "registers backup failure\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stmfx_pinctrl_resume(struct device *dev)
+{
+ struct stmfx_pinctrl *pctl = dev_get_drvdata(dev);
+ int ret;
+
+ ret = stmfx_pinctrl_restore_regs(pctl);
+ if (ret) {
+ dev_err(pctl->dev, "registers restoration failure\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stmfx_pinctrl_dev_pm_ops,
+ stmfx_pinctrl_suspend, stmfx_pinctrl_resume);
+
+static const struct of_device_id stmfx_pinctrl_of_match[] = {
+ { .compatible = "st,stmfx-0300-pinctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stmfx_pinctrl_of_match);
+
+static struct platform_driver stmfx_pinctrl_driver = {
+ .driver = {
+ .name = "stmfx-pinctrl",
+ .of_match_table = stmfx_pinctrl_of_match,
+ .pm = &stmfx_pinctrl_dev_pm_ops,
+ },
+ .probe = stmfx_pinctrl_probe,
+ .remove = stmfx_pinctrl_remove,
+};
+module_platform_driver(stmfx_pinctrl_driver);
+
+MODULE_DESCRIPTION("STMFX pinctrl/GPIO driver");
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index 44c6b753f692..85ddf49a5188 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -71,6 +71,7 @@ s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata,
}
clk_base = of_iomap(np, 0);
+ of_node_put(np);
if (!clk_base) {
pr_err("%s: failed to map clock registers\n", __func__);
return ERR_PTR(-EINVAL);
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index e941ba60d4b7..2dd716b016a3 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -3,201 +3,183 @@
# Renesas SH and SH Mobile PINCTRL drivers
#
-if ARCH_RENESAS || SUPERH
-
config PINCTRL_SH_PFC
+ bool "Renesas SoC pin control support" if COMPILE_TEST && !(ARCH_RENESAS || SUPERH)
+ default y if ARCH_RENESAS || SUPERH
select PINMUX
select PINCONF
select GENERIC_PINCONF
- def_bool y
+ select PINCTRL_PFC_EMEV2 if ARCH_EMEV2
+ select PINCTRL_PFC_R8A73A4 if ARCH_R8A73A4
+ select PINCTRL_PFC_R8A7740 if ARCH_R8A7740
+ select PINCTRL_PFC_R8A7743 if ARCH_R8A7743
+ select PINCTRL_PFC_R8A7744 if ARCH_R8A7744
+ select PINCTRL_PFC_R8A7745 if ARCH_R8A7745
+ select PINCTRL_PFC_R8A77470 if ARCH_R8A77470
+ select PINCTRL_PFC_R8A774A1 if ARCH_R8A774A1
+ select PINCTRL_PFC_R8A774C0 if ARCH_R8A774C0
+ select PINCTRL_PFC_R8A7778 if ARCH_R8A7778
+ select PINCTRL_PFC_R8A7779 if ARCH_R8A7779
+ select PINCTRL_PFC_R8A7790 if ARCH_R8A7790
+ select PINCTRL_PFC_R8A7791 if ARCH_R8A7791
+ select PINCTRL_PFC_R8A7792 if ARCH_R8A7792
+ select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
+ select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
+ select PINCTRL_PFC_R8A7795 if ARCH_R8A7795
+ select PINCTRL_PFC_R8A7796 if ARCH_R8A7796
+ select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
+ select PINCTRL_PFC_R8A77970 if ARCH_R8A77970
+ select PINCTRL_PFC_R8A77980 if ARCH_R8A77980
+ select PINCTRL_PFC_R8A77990 if ARCH_R8A77990
+ select PINCTRL_PFC_R8A77995 if ARCH_R8A77995
+ select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
+ select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
+ select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
+ select PINCTRL_PFC_SH73A0 if ARCH_SH73A0
+ select PINCTRL_PFC_SH7720 if CPU_SUBTYPE_SH7720
+ select PINCTRL_PFC_SH7722 if CPU_SUBTYPE_SH7722
+ select PINCTRL_PFC_SH7723 if CPU_SUBTYPE_SH7723
+ select PINCTRL_PFC_SH7724 if CPU_SUBTYPE_SH7724
+ select PINCTRL_PFC_SH7734 if CPU_SUBTYPE_SH7734
+ select PINCTRL_PFC_SH7757 if CPU_SUBTYPE_SH7757
+ select PINCTRL_PFC_SH7785 if CPU_SUBTYPE_SH7785
+ select PINCTRL_PFC_SH7786 if CPU_SUBTYPE_SH7786
+ select PINCTRL_PFC_SHX3 if CPU_SUBTYPE_SHX3
help
- This enables pin control drivers for SH and SH Mobile platforms
+ This enables pin control drivers for Renesas SuperH and ARM platforms
config PINCTRL_SH_PFC_GPIO
select GPIOLIB
- select PINCTRL_SH_PFC
bool
help
This enables pin control and GPIO drivers for SH/SH Mobile platforms
+config PINCTRL_SH_FUNC_GPIO
+ select PINCTRL_SH_PFC_GPIO
+ bool
+ help
+ This enables legacy function GPIOs for SH platforms
+
config PINCTRL_PFC_EMEV2
- def_bool y
- depends on ARCH_EMEV2
- select PINCTRL_SH_PFC
+ bool "Emma Mobile AV2 pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A73A4
- def_bool y
- depends on ARCH_R8A73A4
+ bool "R-Mobile APE6 pin control support" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_R8A7740
- def_bool y
- depends on ARCH_R8A7740
+ bool "R-Mobile A1 pin control support" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_R8A7743
- def_bool y
- depends on ARCH_R8A7743
- select PINCTRL_SH_PFC
+ bool "RZ/G1M pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7744
- def_bool y
- depends on ARCH_R8A7744
- select PINCTRL_SH_PFC
+ bool "RZ/G1N pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7745
- def_bool y
- depends on ARCH_R8A7745
- select PINCTRL_SH_PFC
+ bool "RZ/G1E pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A77470
- def_bool y
- depends on ARCH_R8A77470
- select PINCTRL_SH_PFC
+ bool "RZ/G1C pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A774A1
- def_bool y
- depends on ARCH_R8A774A1
- select PINCTRL_SH_PFC
+ bool "RZ/G2M pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A774C0
- def_bool y
- depends on ARCH_R8A774C0
- select PINCTRL_SH_PFC
+ bool "RZ/G2E pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7778
- def_bool y
- depends on ARCH_R8A7778
- select PINCTRL_SH_PFC
+ bool "R-Car M1A pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7779
- def_bool y
- depends on ARCH_R8A7779
- select PINCTRL_SH_PFC
+ bool "R-Car H1 pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7790
- def_bool y
- depends on ARCH_R8A7790
- select PINCTRL_SH_PFC
+ bool "R-Car H2 pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7791
- def_bool y
- depends on ARCH_R8A7791
- select PINCTRL_SH_PFC
+ bool "R-Car M2-W pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7792
- def_bool y
- depends on ARCH_R8A7792
- select PINCTRL_SH_PFC
+ bool "R-Car V2H pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7793
- def_bool y
- depends on ARCH_R8A7793
- select PINCTRL_SH_PFC
+ bool "R-Car M2-N pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7794
- def_bool y
- depends on ARCH_R8A7794
- select PINCTRL_SH_PFC
+ bool "R-Car E2 pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7795
- def_bool y
- depends on ARCH_R8A7795
- select PINCTRL_SH_PFC
+ bool "R-Car H3 pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A7796
- def_bool y
- depends on ARCH_R8A7796
- select PINCTRL_SH_PFC
+ bool "R-Car M3-W pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A77965
- def_bool y
- depends on ARCH_R8A77965
- select PINCTRL_SH_PFC
+ bool "R-Car M3-N pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A77970
- def_bool y
- depends on ARCH_R8A77970
- select PINCTRL_SH_PFC
+ bool "R-Car V3M pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A77980
- def_bool y
- depends on ARCH_R8A77980
- select PINCTRL_SH_PFC
+ bool "R-Car V3H pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A77990
- def_bool y
- depends on ARCH_R8A77990
- select PINCTRL_SH_PFC
+ bool "R-Car E3 pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A77995
- def_bool y
- depends on ARCH_R8A77995
- select PINCTRL_SH_PFC
+ bool "R-Car D3 pin control support" if COMPILE_TEST
config PINCTRL_PFC_SH7203
- def_bool y
- depends on CPU_SUBTYPE_SH7203
- select PINCTRL_SH_PFC_GPIO
+ bool "SH7203 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7264
- def_bool y
- depends on CPU_SUBTYPE_SH7264
- select PINCTRL_SH_PFC_GPIO
+ bool "SH7264 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7269
- def_bool y
- depends on CPU_SUBTYPE_SH7269
- select PINCTRL_SH_PFC_GPIO
+ bool "SH7269 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH73A0
- def_bool y
- depends on ARCH_SH73A0
+ bool "SH-Mobile AG5 pin control support" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
select REGULATOR
config PINCTRL_PFC_SH7720
- def_bool y
- depends on CPU_SUBTYPE_SH7720
- select PINCTRL_SH_PFC_GPIO
+ bool "SH7720 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7722
- def_bool y
- depends on CPU_SUBTYPE_SH7722
- select PINCTRL_SH_PFC_GPIO
+ bool "SH7722 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7723
- def_bool y
- depends on CPU_SUBTYPE_SH7723
- select PINCTRL_SH_PFC_GPIO
+ bool "SH-Mobile R2 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7724
- def_bool y
- depends on CPU_SUBTYPE_SH7724
- select PINCTRL_SH_PFC_GPIO
+ bool "SH-Mobile R2R pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7734
- def_bool y
- depends on CPU_SUBTYPE_SH7734
- select PINCTRL_SH_PFC_GPIO
+ bool "SH7734 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7757
- def_bool y
- depends on CPU_SUBTYPE_SH7757
- select PINCTRL_SH_PFC_GPIO
+ bool "SH7757 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7785
- def_bool y
- depends on CPU_SUBTYPE_SH7785
- select PINCTRL_SH_PFC_GPIO
+ bool "SH7785 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7786
- def_bool y
- depends on CPU_SUBTYPE_SH7786
- select PINCTRL_SH_PFC_GPIO
+ bool "SH7786 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SHX3
- def_bool y
- depends on CPU_SUBTYPE_SHX3
- select PINCTRL_SH_PFC_GPIO
-endif
+ bool "SH-X3 pin control support" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 82ebb2a91ee0..8c95abcfcc00 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -38,3 +38,18 @@ obj-$(CONFIG_PINCTRL_PFC_SH7757) += pfc-sh7757.o
obj-$(CONFIG_PINCTRL_PFC_SH7785) += pfc-sh7785.o
obj-$(CONFIG_PINCTRL_PFC_SH7786) += pfc-sh7786.o
obj-$(CONFIG_PINCTRL_PFC_SHX3) += pfc-shx3.o
+
+ifeq ($(CONFIG_COMPILE_TEST),y)
+CFLAGS_pfc-sh7203.o += -I$(srctree)/arch/sh/include/cpu-sh2a
+CFLAGS_pfc-sh7264.o += -I$(srctree)/arch/sh/include/cpu-sh2a
+CFLAGS_pfc-sh7269.o += -I$(srctree)/arch/sh/include/cpu-sh2a
+CFLAGS_pfc-sh7720.o += -I$(srctree)/arch/sh/include/cpu-sh3
+CFLAGS_pfc-sh7722.o += -I$(srctree)/arch/sh/include/cpu-sh4
+CFLAGS_pfc-sh7723.o += -I$(srctree)/arch/sh/include/cpu-sh4
+CFLAGS_pfc-sh7724.o += -I$(srctree)/arch/sh/include/cpu-sh4
+CFLAGS_pfc-sh7734.o += -I$(srctree)/arch/sh/include/cpu-sh4
+CFLAGS_pfc-sh7757.o += -I$(srctree)/arch/sh/include/cpu-sh4
+CFLAGS_pfc-sh7785.o += -I$(srctree)/arch/sh/include/cpu-sh4
+CFLAGS_pfc-sh7786.o += -I$(srctree)/arch/sh/include/cpu-sh4
+CFLAGS_pfc-shx3.o += -I$(srctree)/arch/sh/include/cpu-sh4
+endif
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index f1cfcc8c6544..3f989f5cb021 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -571,6 +571,13 @@ static const struct of_device_id sh_pfc_of_table[] = {
.compatible = "renesas,pfc-r8a7795",
.data = &r8a7795_pinmux_info,
},
+#ifdef DEBUG
+ {
+ /* For sanity checks only (nothing matches against this) */
+ .compatible = "renesas,pfc-r8a77950", /* R-Car H3 ES1.0 */
+ .data = &r8a7795es1_pinmux_info,
+ },
+#endif /* DEBUG */
#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7796
{
@@ -709,6 +716,128 @@ static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
+#ifdef DEBUG
+static bool is0s(const u16 *enum_ids, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ if (enum_ids[i])
+ return false;
+
+ return true;
+}
+
+static unsigned int sh_pfc_errors;
+static unsigned int sh_pfc_warnings;
+
+static void sh_pfc_check_cfg_reg(const char *drvname,
+ const struct pinmux_cfg_reg *cfg_reg)
+{
+ unsigned int i, n, rw, fw;
+
+ if (cfg_reg->field_width) {
+ /* Checked at build time */
+ return;
+ }
+
+ for (i = 0, n = 0, rw = 0; (fw = cfg_reg->var_field_width[i]); i++) {
+ if (fw > 3 && is0s(&cfg_reg->enum_ids[n], 1 << fw)) {
+ pr_warn("%s: reg 0x%x: reserved field [%u:%u] can be split to reduce table size\n",
+ drvname, cfg_reg->reg, rw, rw + fw - 1);
+ sh_pfc_warnings++;
+ }
+ n += 1 << fw;
+ rw += fw;
+ }
+
+ if (rw != cfg_reg->reg_width) {
+ pr_err("%s: reg 0x%x: var_field_width declares %u instead of %u bits\n",
+ drvname, cfg_reg->reg, rw, cfg_reg->reg_width);
+ sh_pfc_errors++;
+ }
+
+ if (n != cfg_reg->nr_enum_ids) {
+ pr_err("%s: reg 0x%x: enum_ids[] has %u instead of %u values\n",
+ drvname, cfg_reg->reg, cfg_reg->nr_enum_ids, n);
+ sh_pfc_errors++;
+ }
+}
+
+static void sh_pfc_check_info(const struct sh_pfc_soc_info *info)
+{
+ const struct sh_pfc_function *func;
+ const char *drvname = info->name;
+ unsigned int *refcnts;
+ unsigned int i, j, k;
+
+ pr_info("Checking %s\n", drvname);
+
+ /* Check groups and functions */
+ refcnts = kcalloc(info->nr_groups, sizeof(*refcnts), GFP_KERNEL);
+ if (!refcnts)
+ return;
+
+ for (i = 0; i < info->nr_functions; i++) {
+ func = &info->functions[i];
+ for (j = 0; j < func->nr_groups; j++) {
+ for (k = 0; k < info->nr_groups; k++) {
+ if (!strcmp(func->groups[j],
+ info->groups[k].name)) {
+ refcnts[k]++;
+ break;
+ }
+ }
+
+ if (k == info->nr_groups) {
+ pr_err("%s: function %s: group %s not found\n",
+ drvname, func->name, func->groups[j]);
+ sh_pfc_errors++;
+ }
+ }
+ }
+
+ for (i = 0; i < info->nr_groups; i++) {
+ if (!refcnts[i]) {
+ pr_err("%s: orphan group %s\n", drvname,
+ info->groups[i].name);
+ sh_pfc_errors++;
+ } else if (refcnts[i] > 1) {
+ pr_err("%s: group %s referred by %u functions\n",
+ drvname, info->groups[i].name, refcnts[i]);
+ sh_pfc_warnings++;
+ }
+ }
+
+ kfree(refcnts);
+
+ /* Check config register descriptions */
+ for (i = 0; info->cfg_regs && info->cfg_regs[i].reg; i++)
+ sh_pfc_check_cfg_reg(drvname, &info->cfg_regs[i]);
+}
+
+static void sh_pfc_check_driver(const struct platform_driver *pdrv)
+{
+ unsigned int i;
+
+ pr_warn("Checking builtin pinmux tables\n");
+
+ for (i = 0; pdrv->id_table[i].name[0]; i++)
+ sh_pfc_check_info((void *)pdrv->id_table[i].driver_data);
+
+#ifdef CONFIG_OF
+ for (i = 0; pdrv->driver.of_match_table[i].compatible[0]; i++)
+ sh_pfc_check_info(pdrv->driver.of_match_table[i].data);
+#endif
+
+ pr_warn("Detected %u errors and %u warnings\n", sh_pfc_errors,
+ sh_pfc_warnings);
+}
+
+#else /* !DEBUG */
+static inline void sh_pfc_check_driver(struct platform_driver *pdrv) {}
+#endif /* !DEBUG */
+
static int sh_pfc_probe(struct platform_device *pdev)
{
#ifdef CONFIG_OF
@@ -840,6 +969,7 @@ static struct platform_driver sh_pfc_driver = {
static int __init sh_pfc_init(void)
{
+ sh_pfc_check_driver(&sh_pfc_driver);
return platform_driver_register(&sh_pfc_driver);
}
postcore_initcall(sh_pfc_init);
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 4f3a34ee1cd4..97c1332c1045 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -252,7 +252,7 @@ static int gpio_pin_setup(struct sh_pfc_chip *chip)
* Function GPIOs
*/
-#ifdef CONFIG_SUPERH
+#ifdef CONFIG_PINCTRL_SH_FUNC_GPIO
static int gpio_function_request(struct gpio_chip *gc, unsigned offset)
{
static bool __print_once;
@@ -292,7 +292,7 @@ static int gpio_function_setup(struct sh_pfc_chip *chip)
return 0;
}
-#endif
+#endif /* CONFIG_PINCTRL_SH_FUNC_GPIO */
/* -----------------------------------------------------------------------------
* Register/unregister
@@ -369,7 +369,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
if (IS_ENABLED(CONFIG_OF) && pfc->dev->of_node)
return 0;
-#ifdef CONFIG_SUPERH
+#ifdef CONFIG_PINCTRL_SH_FUNC_GPIO
/*
* Register the GPIO to pin mappings. As pins with GPIO ports
* must come first in the ranges, skip the pins without GPIO
@@ -397,7 +397,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
if (IS_ERR(chip))
return PTR_ERR(chip);
-#endif /* CONFIG_SUPERH */
+#endif /* CONFIG_PINCTRL_SH_FUNC_GPIO */
return 0;
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/sh-pfc/pfc-emev2.c
index 310c6f3ee7cc..0af1ef82a1a8 100644
--- a/drivers/pinctrl/sh-pfc/pfc-emev2.c
+++ b/drivers/pinctrl/sh-pfc/pfc-emev2.c
@@ -1433,7 +1433,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xe0140200, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xe0140200, 32, 1, GROUP(
0, PORT31_FN, /* PIN: J18 */
0, PORT30_FN, /* PIN: H18 */
0, PORT29_FN, /* PIN: G18 */
@@ -1466,9 +1466,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_JT_SEL, PORT2_FN, /* PIN: V9 */
0, PORT1_FN, /* PIN: U10 */
0, PORT0_FN, /* PIN: V10 */
- }
+ ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe0140204, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xe0140204, 32, 1, GROUP(
FN_SDI1_CMD, PORT63_FN, /* PIN: AC21 */
FN_SDI1_CKI, PORT62_FN, /* PIN: AA23 */
FN_SDI1_CKO, PORT61_FN, /* PIN: AB22 */
@@ -1501,9 +1501,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_LCD3_R2, PORT34_FN, /* PIN: A19 */
FN_LCD3_R1, PORT33_FN, /* PIN: B20 */
FN_LCD3_R0, PORT32_FN, /* PIN: A20 */
- }
+ ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe0140208, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xe0140208, 32, 1, GROUP(
FN_AB_1_0_PORT95, PORT95_FN, /* PIN: L21 */
FN_AB_1_0_PORT94, PORT94_FN, /* PIN: K21 */
FN_AB_1_0_PORT93, PORT93_FN, /* PIN: J21 */
@@ -1536,9 +1536,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SDI1_DATA2, PORT66_FN, /* PIN: AB19 */
FN_SDI1_DATA1, PORT65_FN, /* PIN: AB20 */
FN_SDI1_DATA0, PORT64_FN, /* PIN: AB21 */
- }
+ ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe014020c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xe014020c, 32, 1, GROUP(
FN_NTSC_DATA4, PORT127_FN, /* PIN: T20 */
FN_NTSC_DATA3, PORT126_FN, /* PIN: R18 */
FN_NTSC_DATA2, PORT125_FN, /* PIN: R20 */
@@ -1571,9 +1571,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_AB_9_8_PORT98, PORT98_FN, /* PIN: M20 */
FN_AB_9_8_PORT97, PORT97_FN, /* PIN: N21 */
FN_AB_A20, PORT96_FN, /* PIN: M21 */
- }
+ ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe0140210, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xe0140210, 32, 1, GROUP(
0, 0,
FN_UART_1_0_PORT158, PORT158_FN, /* PIN: AB10 */
FN_UART_1_0_PORT157, PORT157_FN, /* PIN: AA10 */
@@ -1606,11 +1606,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_NTSC_DATA7, PORT130_FN, /* PIN: U18 */
FN_NTSC_DATA6, PORT129_FN, /* PIN: U20 */
FN_NTSC_DATA5, PORT128_FN, /* PIN: T18 */
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_LCD3", 0xe0140284, 32,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 2, 2, 2, 2, 2, 2) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2),
+ GROUP(
/* 31 - 12 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1624,11 +1626,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 1 - 0 */
FN_SEL_LCD3_1_0_00, FN_SEL_LCD3_1_0_01, 0, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_UART", 0xe0140288, 32,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2),
+ GROUP(
/* 31 - 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1636,11 +1640,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 1 - 0 */
FN_SEL_UART_1_0_00, FN_SEL_UART_1_0_01, 0, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_IIC", 0xe014028c, 32,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2),
+ GROUP(
/* 31 - 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1648,11 +1654,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 1 - 0 */
FN_SEL_IIC_1_0_00, FN_SEL_IIC_1_0_01, 0, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_AB", 0xe0140294, 32,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2),
+ GROUP(
/* 31 - 14 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1673,11 +1680,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_AB_3_2_10, FN_SEL_AB_3_2_11,
/* 1 - 0 */
FN_SEL_AB_1_0_00, 0, FN_SEL_AB_1_0_10, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_USI", 0xe0140298, 32,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2),
+ GROUP(
/* 31 - 10 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1692,11 +1701,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_USI_3_2_00, FN_SEL_USI_3_2_01, 0, 0,
/* 1 - 0 */
FN_SEL_USI_1_0_00, FN_SEL_USI_1_0_01, 0, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_HSI", 0xe01402a8, 32,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2),
+ GROUP(
/* 31 - 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1704,7 +1715,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 1 - 0 */
FN_SEL_HSI_1_0_00, FN_SEL_HSI_1_0_01, 0, 0,
- }
+ ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
index 5acbacb3727f..bf12849defdb 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
@@ -2284,7 +2284,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(328, 0xe6053148),
PORTCR(329, 0xe6053149),
- { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1) {
+ { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1, GROUP(
MSEL1CR_31_0, MSEL1CR_31_1,
0, 0,
0, 0,
@@ -2317,9 +2317,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL1CR_02_0, MSEL1CR_02_1,
MSEL1CR_01_0, MSEL1CR_01_1,
MSEL1CR_00_0, MSEL1CR_00_1,
- }
+ ))
},
- { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1) {
+ { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1, GROUP(
MSEL3CR_31_0, MSEL3CR_31_1,
0, 0,
0, 0,
@@ -2352,9 +2352,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
MSEL3CR_01_0, MSEL3CR_01_1,
MSEL3CR_00_0, MSEL3CR_00_1,
- }
+ ))
},
- { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1) {
+ { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1, GROUP(
0, 0,
MSEL4CR_30_0, MSEL4CR_30_1,
MSEL4CR_29_0, MSEL4CR_29_1,
@@ -2387,9 +2387,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
MSEL4CR_01_0, MSEL4CR_01_1,
0, 0,
- }
+ ))
},
- { PINMUX_CFG_REG("MSEL5CR", 0xe6058028, 32, 1) {
+ { PINMUX_CFG_REG("MSEL5CR", 0xe6058028, 32, 1, GROUP(
MSEL5CR_31_0, MSEL5CR_31_1,
MSEL5CR_30_0, MSEL5CR_30_1,
MSEL5CR_29_0, MSEL5CR_29_1,
@@ -2422,9 +2422,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- }
+ ))
},
- { PINMUX_CFG_REG("MSEL8CR", 0xe6058034, 32, 1) {
+ { PINMUX_CFG_REG("MSEL8CR", 0xe6058034, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2457,14 +2457,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
MSEL8CR_01_0, MSEL8CR_01_1,
MSEL8CR_00_0, MSEL8CR_00_1,
- }
+ ))
},
{ },
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) {
+ { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32, GROUP(
0, PORT30_DATA, PORT29_DATA, PORT28_DATA,
PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
@@ -2473,9 +2473,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32) {
+ { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -2484,9 +2484,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
0, 0, 0, PORT40_DATA,
PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054004, 32) {
+ { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054004, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, PORT85_DATA, PORT84_DATA,
@@ -2495,9 +2495,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTD127_096DR", 0xe6055004, 32) {
+ { PINMUX_DATA_REG("PORTD127_096DR", 0xe6055004, 32, GROUP(
0, PORT126_DATA, PORT125_DATA, PORT124_DATA,
PORT123_DATA, PORT122_DATA, PORT121_DATA, PORT120_DATA,
PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA,
@@ -2506,9 +2506,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTD159_128DR", 0xe6055008, 32) {
+ { PINMUX_DATA_REG("PORTD159_128DR", 0xe6055008, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -2517,9 +2517,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
0, 0, 0, 0,
0, PORT134_DATA, PORT133_DATA, PORT132_DATA,
PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056000, 32) {
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056000, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -2528,9 +2528,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA,
PORT167_DATA, PORT166_DATA, PORT165_DATA, PORT164_DATA,
PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056004, 32) {
+ { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056004, 32, GROUP(
0, PORT222_DATA, PORT221_DATA, PORT220_DATA,
PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA,
PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA,
@@ -2539,9 +2539,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTR255_224DR", 0xe6056008, 32) {
+ { PINMUX_DATA_REG("PORTR255_224DR", 0xe6056008, 32, GROUP(
0, 0, 0, 0,
0, PORT250_DATA, PORT249_DATA, PORT248_DATA,
PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA,
@@ -2550,9 +2550,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA,
PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA,
PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTR287_256DR", 0xe605600C, 32) {
+ { PINMUX_DATA_REG("PORTR287_256DR", 0xe605600C, 32, GROUP(
0, 0, 0, 0,
PORT283_DATA, PORT282_DATA, PORT281_DATA, PORT280_DATA,
PORT279_DATA, PORT278_DATA, PORT277_DATA, PORT276_DATA,
@@ -2561,9 +2561,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT267_DATA, PORT266_DATA, PORT265_DATA, PORT264_DATA,
PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA,
PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTU319_288DR", 0xe6057000, 32) {
+ { PINMUX_DATA_REG("PORTU319_288DR", 0xe6057000, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, PORT308_DATA,
@@ -2572,9 +2572,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT299_DATA, PORT298_DATA, PORT297_DATA, PORT296_DATA,
PORT295_DATA, PORT294_DATA, PORT293_DATA, PORT292_DATA,
PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA,
- }
+ ))
},
- { PINMUX_DATA_REG("PORTU351_320DR", 0xe6057004, 32) {
+ { PINMUX_DATA_REG("PORTU351_320DR", 0xe6057004, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -2583,7 +2583,7 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
0, 0, PORT329_DATA, PORT328_DATA,
PORT327_DATA, PORT326_DATA, PORT325_DATA, PORT324_DATA,
PORT323_DATA, PORT322_DATA, PORT321_DATA, PORT320_DATA,
- }
+ ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index 6d761e62c6c8..696a0f6fc1da 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -3436,7 +3436,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(210, 0xe60530d2), /* PORT210CR */
PORTCR(211, 0xe60530d3), /* PORT211CR */
- { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1) {
+ { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1, GROUP(
MSEL1CR_31_0, MSEL1CR_31_1,
MSEL1CR_30_0, MSEL1CR_30_1,
MSEL1CR_29_0, MSEL1CR_29_1,
@@ -3461,9 +3461,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL1CR_2_0, MSEL1CR_2_1,
0, 0,
MSEL1CR_0_0, MSEL1CR_0_1,
- }
+ ))
},
- { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1) {
+ { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -3474,9 +3474,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL3CR_6_0, MSEL3CR_6_1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
- }
+ ))
},
- { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1) {
+ { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -3493,9 +3493,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
MSEL4CR_1_0, MSEL4CR_1_1,
0, 0,
- }
+ ))
},
- { PINMUX_CFG_REG("MSEL5CR", 0xE6058028, 32, 1) {
+ { PINMUX_CFG_REG("MSEL5CR", 0xE6058028, 32, 1, GROUP(
MSEL5CR_31_0, MSEL5CR_31_1,
MSEL5CR_30_0, MSEL5CR_30_1,
MSEL5CR_29_0, MSEL5CR_29_1,
@@ -3528,13 +3528,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL5CR_2_0, MSEL5CR_2_1,
0, 0,
MSEL5CR_0_0, MSEL5CR_0_1,
- }
+ ))
},
{ },
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054800, 32) {
+ { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054800, 32, GROUP(
PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
@@ -3542,9 +3542,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
- PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA }
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA ))
},
- { PINMUX_DATA_REG("PORTL063_032DR", 0xe6054804, 32) {
+ { PINMUX_DATA_REG("PORTL063_032DR", 0xe6054804, 32, GROUP(
PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
@@ -3552,9 +3552,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA,
PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
- PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA }
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA ))
},
- { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054808, 32) {
+ { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054808, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -3562,9 +3562,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
- PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA }
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA ))
},
- { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055808, 32) {
+ { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055808, 32, GROUP(
PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
@@ -3572,9 +3572,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
- { PINMUX_DATA_REG("PORTD127_096DR", 0xe605580c, 32) {
+ { PINMUX_DATA_REG("PORTD127_096DR", 0xe605580c, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -3582,9 +3582,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
- PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA }
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA ))
},
- { PINMUX_DATA_REG("PORTR127_096DR", 0xe605680C, 32) {
+ { PINMUX_DATA_REG("PORTR127_096DR", 0xe605680C, 32, GROUP(
PORT127_DATA, PORT126_DATA, PORT125_DATA, PORT124_DATA,
PORT123_DATA, PORT122_DATA, PORT121_DATA, PORT120_DATA,
PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA,
@@ -3592,9 +3592,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
- { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056810, 32) {
+ { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056810, 32, GROUP(
PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
@@ -3602,9 +3602,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
- PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA }
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA ))
},
- { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056814, 32) {
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056814, 32, GROUP(
PORT191_DATA, PORT190_DATA, PORT189_DATA, PORT188_DATA,
PORT187_DATA, PORT186_DATA, PORT185_DATA, PORT184_DATA,
PORT183_DATA, PORT182_DATA, PORT181_DATA, PORT180_DATA,
@@ -3612,9 +3612,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA,
PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA,
PORT167_DATA, PORT166_DATA, PORT165_DATA, PORT164_DATA,
- PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA }
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA ))
},
- { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056818, 32) {
+ { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056818, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -3622,9 +3622,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
- PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA }
+ PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA ))
},
- { PINMUX_DATA_REG("PORTU223_192DR", 0xe6057818, 32) {
+ { PINMUX_DATA_REG("PORTU223_192DR", 0xe6057818, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -3632,7 +3632,7 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77470.c b/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
index 4359aeb35dbd..c05dc1490486 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
@@ -5,6 +5,7 @@
* Copyright (C) 2018 Renesas Electronics Corp.
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include "sh_pfc.h"
@@ -2540,7 +2541,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2572,9 +2573,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, FN_USB1_OVC,
GP_0_2_FN, FN_USB1_PWEN,
GP_0_1_FN, FN_USB0_OVC,
- GP_0_0_FN, FN_USB0_PWEN, }
+ GP_0_0_FN, FN_USB0_PWEN, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2606,9 +2607,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, FN_IP1_23_20,
GP_1_2_FN, FN_IP1_19_16,
GP_1_1_FN, FN_IP1_15_12,
- GP_1_0_FN, FN_IP1_11_8, }
+ GP_1_0_FN, FN_IP1_11_8, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1, GROUP(
GP_2_31_FN, FN_IP8_3_0,
GP_2_30_FN, FN_IP7_31_28,
GP_2_29_FN, FN_IP7_27_24,
@@ -2640,9 +2641,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, FN_IP4_19_16,
GP_2_2_FN, FN_IP4_15_12,
GP_2_1_FN, FN_IP4_11_8,
- GP_2_0_FN, FN_IP4_7_4, }
+ GP_2_0_FN, FN_IP4_7_4, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1, GROUP(
0, 0,
0, 0,
GP_3_29_FN, FN_IP10_19_16,
@@ -2674,9 +2675,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, FN_IP8_19_16,
GP_3_2_FN, FN_IP8_15_12,
GP_3_1_FN, FN_IP8_11_8,
- GP_3_0_FN, FN_IP8_7_4, }
+ GP_3_0_FN, FN_IP8_7_4, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2708,9 +2709,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, FN_IP11_3_0,
GP_4_2_FN, FN_IP10_31_28,
GP_4_1_FN, FN_IP10_27_24,
- GP_4_0_FN, FN_IP10_23_20, }
+ GP_4_0_FN, FN_IP10_23_20, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1, GROUP(
GP_5_31_FN, FN_IP17_27_24,
GP_5_30_FN, FN_IP17_23_20,
GP_5_29_FN, FN_IP17_19_16,
@@ -2742,10 +2743,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, FN_IP14_11_8,
GP_5_2_FN, FN_IP14_7_4,
GP_5_1_FN, FN_IP14_3_0,
- GP_5_0_FN, FN_IP13_31_28, }
+ GP_5_0_FN, FN_IP13_31_28, ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP0_31_28 [4] */
FN_SD0_WP, FN_IRQ7, FN_CAN0_TX_A, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2769,10 +2771,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* IP0_3_0 [4] */
FN_SD0_CLK, 0, 0, FN_SSI_SCK1_C, FN_RX3_C, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP1_31_28 [4] */
FN_D5, FN_HRX2, FN_SCL1_B, FN_PWM2_C, FN_TCLK2_B, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2796,10 +2799,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* IP1_3_0 [4] */
FN_MMC0_D4, FN_SD1_CD, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP2_31_28 [4] */
FN_D13, FN_MSIOF2_SYNC_A, 0, FN_RX4_C, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
@@ -2823,10 +2827,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP2_3_0 [4] */
FN_D6, FN_HTX2, FN_SDA1_B, FN_PWM4_C, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP3_31_28 [4] */
FN_QSPI0_SSL, FN_WE1_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
@@ -2851,10 +2856,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_3_0 [4] */
FN_D14, FN_MSIOF2_SS1, 0, FN_TX4_C, FN_CAN1_RX_B,
0, FN_AVB_AVTP_CAPTURE_A,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP4_31_28 [4] */
FN_DU0_DR6, 0, FN_RX2_C, 0, 0, 0, FN_A6, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2878,10 +2884,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* IP4_3_0 [4] */
FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK_A, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP5_31_28 [4] */
FN_DU0_DG6, 0, FN_HRX1_C, 0, 0, 0, FN_A14, 0, 0, 0,
0, 0, 0, 0, 0, 0,
@@ -2905,10 +2912,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* IP5_3_0 [4] */
FN_DU0_DR7, 0, FN_TX2_C, 0, FN_PWM2_B, 0, FN_A7, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP6_31_28 [4] */
FN_DU0_DB6, 0, 0, 0, 0, 0, FN_A22, 0, 0,
0, 0, 0, 0, 0, 0, 0,
@@ -2932,10 +2940,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP6_3_0 [4] */
FN_DU0_DG7, 0, FN_HTX1_C, 0, FN_PWM6_B, 0, FN_A15,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP7_31_28 [4] */
FN_DU0_DISP, 0, 0, 0, FN_CAN1_RX_C, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
@@ -2959,10 +2968,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0,
/* IP7_3_0 [4] */
FN_DU0_DB7, 0, 0, 0, 0, 0, FN_A23, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xE6060060, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP8_31_28 [4] */
FN_VI1_DATA5, 0, 0, 0, FN_AVB_RXD4, FN_ETH_LINK, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
@@ -2986,10 +2996,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0,
/* IP8_3_0 [4] */
FN_DU0_CDE, 0, 0, 0, FN_CAN1_TX_C, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, }
+ 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xE6060064, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP9_31_28 [4] */
FN_VI1_DATA9, 0, 0, FN_SDA2_B, FN_AVB_TXD0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
@@ -3013,10 +3024,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0,
/* IP9_3_0 [4] */
FN_VI1_DATA6, 0, 0, 0, FN_AVB_RXD5, FN_ETH_TXD1, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xE6060068, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP10_31_28 [4] */
FN_SCL1_A, FN_RX4_A, FN_PWM5_D, FN_DU1_DR0, 0, 0,
FN_SSI_SCK6_B, FN_VI0_G0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3041,10 +3053,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0,
/* IP10_3_0 [4] */
FN_VI1_DATA10, 0, 0, FN_CAN0_RX_B, FN_AVB_TXD1, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xE606006C, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP11_31_28 [4] */
FN_HRX1_A, FN_SCL4_A, FN_PWM6_A, FN_DU1_DG0, FN_RX0_A, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3072,10 +3085,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* IP11_3_0 [4] */
FN_SDA1_A, FN_TX4_A, 0, FN_DU1_DR1, 0, 0, FN_SSI_WS6_B,
- FN_VI0_G1, 0, 0, 0, 0, 0, 0, 0, 0, }
+ FN_VI0_G1, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xE6060070, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP12_31_28 [4] */
FN_SD2_DAT2, FN_RX2_A, 0, FN_DU1_DB0, FN_SSI_SDATA2_B, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3099,10 +3113,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP12_3_0 [4] */
FN_HTX1_A, FN_SDA4_A, 0, FN_DU1_DG1, FN_TX0_A, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR13", 0xE6060074, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP13_31_28 [4] */
FN_SSI_SCK5_A, 0, 0, FN_DU1_DOTCLKOUT1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
@@ -3127,10 +3142,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP13_3_0 [4] */
FN_SD2_DAT3, FN_TX2_A, 0, FN_DU1_DB1, FN_SSI_WS9_B, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR14", 0xE6060078, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP14_31_28 [4] */
FN_SSI_SDATA7_A, 0, 0, FN_IRQ8, FN_AUDIO_CLKA_D, FN_CAN_CLK_D,
FN_VI0_G5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3154,10 +3170,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP14_3_0 [4] */
FN_SSI_WS5_A, 0, FN_SCL3_C, FN_DU1_DOTCLKIN, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR15", 0xE606007C, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP15_31_28 [4] */
FN_SSI_WS4_A, 0, FN_AVB_PHY_INT, 0, 0, 0, FN_VI0_R5, 0, 0, 0,
0, 0, 0, 0, 0, 0,
@@ -3181,10 +3198,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI0_G7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP15_3_0 [4] */
FN_SSI_SCK0129_A, FN_MSIOF1_RXD_A, FN_RX5_D, 0, 0, 0,
- FN_VI0_G6, 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ FN_VI0_G6, 0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR16", 0xE6060080, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP16_31_28 [4] */
FN_SSI_SDATA2_A, FN_HRTS1_N_B, 0, 0, 0, 0,
FN_VI0_DATA4_VI0_B4, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3209,10 +3227,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DACK2, FN_VI0_CLK, FN_AVB_COL, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP16_3_0 [4] */
FN_SSI_SDATA4_A, 0, FN_AVB_CRS, 0, 0, 0, FN_VI0_R6, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR17", 0xE6060084, 32,
- 4, 4, 4, 4, 4, 4, 4, 4) {
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
/* IP17_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP17_27_24 [4] */
@@ -3235,11 +3254,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI0_DATA6_VI0_B6, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP17_3_0 [4] */
FN_SSI_SCK9_A, FN_RX2_B, FN_SCL3_E, 0, 0, FN_EX_WAIT1,
- FN_VI0_DATA5_VI0_B5, 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ FN_VI0_DATA5_VI0_B5, 0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xE60600C0, 32,
- 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1, 3, 3,
- 1, 2, 3, 3, 1) {
+ GROUP(1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1,
+ 3, 3, 1, 2, 3, 3, 1),
+ GROUP(
/* RESERVED [1] */
0, 0,
/* RESERVED [1] */
@@ -3282,11 +3302,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2, FN_SEL_I2C00_3,
FN_SEL_I2C00_4, 0, 0, 0,
/* SEL_AVB [1] */
- FN_SEL_AVB_0, FN_SEL_AVB_1, }
+ FN_SEL_AVB_0, FN_SEL_AVB_1, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xE60600C4, 32,
- 1, 3, 3, 2, 2, 1, 2, 2,
- 2, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 1) {
+ GROUP(1, 3, 3, 2, 2, 1, 2, 2, 2, 1, 1, 1,
+ 1, 1, 2, 1, 1, 2, 2, 1),
+ GROUP(
/* SEL_SCIFCLK [1] */
FN_SEL_SCIFCLK_0, FN_SEL_SCIFCLK_1,
/* SEL_SCIF5 [3] */
@@ -3328,11 +3349,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_HSCIF1 [2] */
FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_HSCIF1_2, 0,
/* SEL_HSCIF0 [1] */
- FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,}
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE60600C8, 32,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2),
+ GROUP(
/* RESERVED [1] */
0, 0,
/* RESERVED [1] */
@@ -3374,7 +3396,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_SSI1 [2] */
FN_SEL_SSI1_0, FN_SEL_SSI1_1, FN_SEL_SSI1_2, FN_SEL_SSI1_3,
/* SEL_SSI0 [2] */
- FN_SEL_SSI0_0, FN_SEL_SSI0_1, 0, 0, }
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1, 0, 0, ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 068b5e6334d1..49fe52d35f30 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -2104,7 +2104,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xfffc0004, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xfffc0004, 32, 1, GROUP(
GP_0_31_FN, FN_IP1_14_11,
GP_0_30_FN, FN_IP1_10_8,
GP_0_29_FN, FN_IP1_7_5,
@@ -2136,9 +2136,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, FN_IP0_4_2,
GP_0_2_FN, FN_PENC1,
GP_0_1_FN, FN_PENC0,
- GP_0_0_FN, FN_IP0_1_0 }
+ GP_0_0_FN, FN_IP0_1_0 ))
},
- { PINMUX_CFG_REG("GPSR1", 0xfffc0008, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xfffc0008, 32, 1, GROUP(
GP_1_31_FN, FN_IP4_6_4,
GP_1_30_FN, FN_IP4_3_1,
GP_1_29_FN, FN_IP4_0,
@@ -2170,9 +2170,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, FN_IP1_27_25,
GP_1_2_FN, FN_IP1_24,
GP_1_1_FN, FN_WE0,
- GP_1_0_FN, FN_IP1_23_21 }
+ GP_1_0_FN, FN_IP1_23_21 ))
},
- { PINMUX_CFG_REG("GPSR2", 0xfffc000c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xfffc000c, 32, 1, GROUP(
GP_2_31_FN, FN_IP6_7,
GP_2_30_FN, FN_IP6_6_5,
GP_2_29_FN, FN_IP6_4_2,
@@ -2204,9 +2204,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, FN_IP4_12_11,
GP_2_2_FN, FN_IP4_10_9,
GP_2_1_FN, FN_IP4_8,
- GP_2_0_FN, FN_IP4_7 }
+ GP_2_0_FN, FN_IP4_7 ))
},
- { PINMUX_CFG_REG("GPSR3", 0xfffc0010, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xfffc0010, 32, 1, GROUP(
GP_3_31_FN, FN_IP8_10_9,
GP_3_30_FN, FN_IP8_8_6,
GP_3_29_FN, FN_IP8_5_3,
@@ -2238,9 +2238,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, FN_IP6_10,
GP_3_2_FN, FN_SSI_SCK34,
GP_3_1_FN, FN_IP6_9,
- GP_3_0_FN, FN_IP6_8 }
+ GP_3_0_FN, FN_IP6_8 ))
},
- { PINMUX_CFG_REG("GPSR4", 0xfffc0014, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xfffc0014, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2272,12 +2272,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, FN_IP8_21_19,
GP_4_2_FN, FN_IP8_18_16,
GP_4_1_FN, FN_IP8_15_14,
- GP_4_0_FN, FN_IP8_13_11 }
+ GP_4_0_FN, FN_IP8_13_11 ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xfffc0020, 32,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 3, 4, 3, 3, 2) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 3, 4, 3, 3, 2),
+ GROUP(
/* IP0_31 [1] */
0, 0,
/* IP0_30 [1] */
@@ -2328,10 +2329,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TX2_E, FN_SDA2_B, 0, 0,
/* IP0_1_0 [2] */
FN_PRESETOUT, 0, FN_PWM1, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xfffc0024, 32,
- 1, 1, 2, 3, 1, 3, 3, 1, 2, 4, 3, 3, 3, 1, 1) {
+ GROUP(1, 1, 2, 3, 1, 3, 3, 1, 2, 4, 3, 3,
+ 3, 1, 1),
+ GROUP(
/* IP1_31 [1] */
0, 0,
/* IP1_30 [1] */
@@ -2371,11 +2374,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_A21, FN_HSPI_CLK1_B,
/* IP1_0 [1] */
FN_A20, FN_HSPI_CS1_B,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xfffc0028, 32,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 3, 2, 3, 3, 3, 3) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 3, 2, 3, 3, 3, 3),
+ GROUP(
/* IP2_31 [1] */
FN_MLB_CLK, FN_IRQ1_A,
/* IP2_30 [1] */
@@ -2423,11 +2427,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP2_2_0 [3] */
FN_SD1_CLK_A, FN_MMC_CLK, 0, FN_ATACS00,
FN_EX_CS2, 0, 0, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xfffc002c, 32,
- 1, 1, 1, 1, 1, 3, 3, 2,
- 3, 3, 3, 2, 3, 3, 2) {
+ GROUP(1, 1, 1, 1, 1, 3, 3, 2, 3, 3, 3, 2,
+ 3, 3, 2),
+ GROUP(
/* IP3_31 [1] */
FN_DU0_DR6, FN_LCDOUT6,
/* IP3_30 [1] */
@@ -2465,10 +2470,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SDSELF_B, 0, 0, 0,
/* IP3_1_0 [2] */
FN_MLB_SIG, FN_RX5_B, FN_SDA3_A, FN_IRQ2_A,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xfffc0030, 32,
- 1, 2, 2, 2, 4, 4, 2, 2, 2, 2, 1, 1, 3, 3, 1) {
+ GROUP(1, 2, 2, 2, 4, 4, 2, 2, 2, 2, 1, 1,
+ 3, 3, 1),
+ GROUP(
/* IP4_31 [1] */
0, 0,
/* IP4_30_29 [2] */
@@ -2507,10 +2514,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TX1_D, FN_CAN0_TX_A, FN_ADICHS0, 0,
/* IP4_0 [1] */
FN_DU0_DR7, FN_LCDOUT7,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xfffc0034, 32,
- 1, 2, 3, 3, 2, 3, 3, 2, 1, 2, 2, 1, 1, 2, 2, 2) {
+ GROUP(1, 2, 3, 3, 2, 3, 3, 2, 1, 2, 2, 1,
+ 1, 2, 2, 2),
+ GROUP(
/* IP5_31 [1] */
0, 0,
@@ -2551,11 +2560,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI1_DATA10_B, FN_DU0_DB6, FN_LCDOUT22, 0,
/* IP5_1_0 [2] */
FN_VI0_R5_B, FN_DU0_DB5, FN_LCDOUT21, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xfffc0038, 32,
- 2, 2, 2, 2, 2, 1, 2, 2, 1, 2,
- 1, 2, 1, 1, 1, 1, 2, 3, 2) {
+ GROUP(2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 1, 2,
+ 1, 1, 1, 1, 2, 3, 2),
+ GROUP(
/* IP6_31_30 [2] */
FN_SD0_DAT2, 0, FN_SUB_TDI, 0,
/* IP6_29_28 [2] */
@@ -2602,10 +2612,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP6_1_0 [2] */
FN_SSI_SCK6, FN_HSPI_RX2_A,
FN_FMCLK_B, FN_CAN1_TX_B,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xfffc003c, 32,
- 3, 4, 3, 1, 3, 3, 3, 3, 3, 2, 2, 2) {
+ GROUP(3, 4, 3, 1, 3, 3, 3, 3, 3, 2, 2, 2),
+ GROUP(
/* IP7_31_29 [3] */
FN_VI0_HSYNC, FN_SD2_CD_B, FN_VI1_DATA2, FN_DU1_DR2,
@@ -2641,10 +2652,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SD0_CD, 0, FN_TX5_A, 0,
/* IP7_1_0 [2] */
FN_SD0_DAT3, 0, FN_IRQ1_B, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xfffc0040, 32,
- 1, 1, 3, 3, 2, 3, 3, 2, 3, 2, 3, 3, 3) {
+ GROUP(1, 1, 3, 3, 2, 3, 3, 2, 3, 2, 3, 3, 3),
+ GROUP(
/* IP8_31 [1] */
0, 0,
/* IP8_30 [1] */
@@ -2681,10 +2693,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP8_2_0 [3] */
FN_VI0_VSYNC, FN_SD2_WP_B, FN_VI1_DATA3, FN_DU1_DR3,
0, FN_HSPI_TX1_A, FN_TX3_B, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xfffc0044, 32,
- 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP9_31 [1] */
0, 0,
/* IP9_30 [1] */
@@ -2723,10 +2736,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP9_2_0 [3] */
FN_VI0_G4, FN_SD2_DAT0_B, FN_VI1_DATA6, FN_DU1_DR6,
0, FN_HRTS1_B, 0, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xfffc0048, 32,
- 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 4, 3, 3, 3) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 4,
+ 3, 3, 3),
+ GROUP(
/* IP10_31 [1] */
0, 0,
@@ -2772,11 +2787,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ATARD1, FN_ETH_MDC,
FN_SDA1_B, 0,
0, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xfffc0050, 32,
- 1, 1, 2, 2, 3, 2, 2, 1, 1, 1, 1, 2,
- 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(1, 1, 2, 2, 3, 2, 2, 1, 1, 1, 1, 2,
+ 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* SEL 31 [1] */
0, 0,
@@ -2835,11 +2851,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_WAIT2_A, FN_SEL_WAIT2_B,
/* SEL_0 (WAIT1) [1] */
FN_SEL_WAIT1_A, FN_SEL_WAIT1_B,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xfffc0054, 32,
- 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1) {
+ GROUP(1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1),
+ GROUP(
/* SEL_31 [1] */
0, 0,
@@ -2899,7 +2916,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_I2C2_C, 0,
/* SEL_0 (I2C1) [1] */
FN_SEL_I2C1_A, FN_SEL_I2C1_B,
- }
+ ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index 64bace100316..0c121b28ec3f 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -1595,6 +1595,92 @@ static const unsigned int ether_magic_pins[] = {
static const unsigned int ether_magic_mux[] = {
ETH_MAGIC_MARK,
};
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 21)
+};
+static const unsigned int hscif0_data_mux[] = {
+ HTX0_MARK, HRX0_MARK
+};
+static const unsigned int hscif0_data_b_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13)
+};
+static const unsigned int hscif0_data_b_mux[] = {
+ HTX0_B_MARK, HRX0_B_MARK
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* CTS, RTS */
+ RCAR_GP_PIN(4, 18), RCAR_GP_PIN(4, 19)
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HCTS0_MARK, HRTS0_MARK
+};
+static const unsigned int hscif0_ctrl_b_pins[] = {
+ /* CTS, RTS */
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 10)
+};
+static const unsigned int hscif0_ctrl_b_mux[] = {
+ HCTS0_B_MARK, HRTS0_B_MARK
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 17)
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK
+};
+static const unsigned int hscif0_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 11)
+};
+static const unsigned int hscif0_clk_b_mux[] = {
+ HSCK0_B_MARK
+};
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(0, 19), RCAR_GP_PIN(0, 20)
+};
+static const unsigned int hscif1_data_mux[] = {
+ HTX1_MARK, HRX1_MARK
+};
+static const unsigned int hscif1_data_b_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3)
+};
+static const unsigned int hscif1_data_b_mux[] = {
+ HTX1_B_MARK, HRX1_B_MARK
+};
+static const unsigned int hscif1_ctrl_pins[] = {
+ /* CTS, RTS */
+ RCAR_GP_PIN(0, 21), RCAR_GP_PIN(0, 22)
+};
+static const unsigned int hscif1_ctrl_mux[] = {
+ HCTS1_MARK, HRTS1_MARK
+};
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* CTS, RTS */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 6)
+};
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HCTS1_B_MARK, HRTS1_B_MARK
+};
+static const unsigned int hscif1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 18)
+};
+static const unsigned int hscif1_clk_mux[] = {
+ HSCK1_MARK
+};
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 4)
+};
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK
+};
/* - HSPI0 ------------------------------------------------------------------ */
static const unsigned int hspi0_pins[] = {
/* CLK, CS, RX, TX */
@@ -2618,6 +2704,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(ether_rmii),
SH_PFC_PIN_GROUP(ether_link),
SH_PFC_PIN_GROUP(ether_magic),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_data_b),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_data),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif1_clk),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
SH_PFC_PIN_GROUP(hspi0),
SH_PFC_PIN_GROUP(hspi1),
SH_PFC_PIN_GROUP(hspi1_b),
@@ -2783,6 +2881,24 @@ static const char * const ether_groups[] = {
"ether_magic",
};
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_data_b",
+ "hscif0_ctrl",
+ "hscif0_ctrl_b",
+ "hscif0_clk",
+ "hscif0_clk_b",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data",
+ "hscif1_data_b",
+ "hscif1_ctrl",
+ "hscif1_ctrl_b",
+ "hscif1_clk",
+ "hscif1_clk_b",
+};
+
static const char * const hspi0_groups[] = {
"hspi0",
};
@@ -3005,6 +3121,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(du0),
SH_PFC_FUNCTION(du1),
SH_PFC_FUNCTION(ether),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
SH_PFC_FUNCTION(hspi0),
SH_PFC_FUNCTION(hspi1),
SH_PFC_FUNCTION(hspi2),
@@ -3036,7 +3154,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xfffc0004, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xfffc0004, 32, 1, GROUP(
GP_0_31_FN, FN_IP3_31_29,
GP_0_30_FN, FN_IP3_26_24,
GP_0_29_FN, FN_IP3_22_21,
@@ -3068,9 +3186,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, FN_A17,
GP_0_2_FN, FN_IP0_7_6,
GP_0_1_FN, FN_AVS2,
- GP_0_0_FN, FN_AVS1 }
+ GP_0_0_FN, FN_AVS1 ))
},
- { PINMUX_CFG_REG("GPSR1", 0xfffc0008, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xfffc0008, 32, 1, GROUP(
GP_1_31_FN, FN_IP5_23_21,
GP_1_30_FN, FN_IP5_20_17,
GP_1_29_FN, FN_IP5_16_15,
@@ -3102,9 +3220,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, FN_IP4_10_8,
GP_1_2_FN, FN_IP4_7_5,
GP_1_1_FN, FN_IP4_4_2,
- GP_1_0_FN, FN_IP4_1_0 }
+ GP_1_0_FN, FN_IP4_1_0 ))
},
- { PINMUX_CFG_REG("GPSR2", 0xfffc000c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xfffc000c, 32, 1, GROUP(
GP_2_31_FN, FN_IP10_28_26,
GP_2_30_FN, FN_IP10_25_24,
GP_2_29_FN, FN_IP10_23_21,
@@ -3136,9 +3254,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, FN_IP8_24_23,
GP_2_2_FN, FN_IP8_22_21,
GP_2_1_FN, FN_IP8_20,
- GP_2_0_FN, FN_IP5_27_24 }
+ GP_2_0_FN, FN_IP5_27_24 ))
},
- { PINMUX_CFG_REG("GPSR3", 0xfffc0010, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xfffc0010, 32, 1, GROUP(
GP_3_31_FN, FN_IP6_3_2,
GP_3_30_FN, FN_IP6_1_0,
GP_3_29_FN, FN_IP5_30_29,
@@ -3170,9 +3288,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, FN_IP11_8_6,
GP_3_2_FN, FN_IP11_5_3,
GP_3_1_FN, FN_IP11_2_0,
- GP_3_0_FN, FN_IP10_31_29 }
+ GP_3_0_FN, FN_IP10_31_29 ))
},
- { PINMUX_CFG_REG("GPSR4", 0xfffc0014, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xfffc0014, 32, 1, GROUP(
GP_4_31_FN, FN_IP8_19,
GP_4_30_FN, FN_IP8_18,
GP_4_29_FN, FN_IP8_17_16,
@@ -3204,9 +3322,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, FN_IP6_11_9,
GP_4_2_FN, FN_IP6_8,
GP_4_1_FN, FN_IP6_7_6,
- GP_4_0_FN, FN_IP6_5_4 }
+ GP_4_0_FN, FN_IP6_5_4 ))
},
- { PINMUX_CFG_REG("GPSR5", 0xfffc0018, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xfffc0018, 32, 1, GROUP(
GP_5_31_FN, FN_IP3_5,
GP_5_30_FN, FN_IP3_4,
GP_5_29_FN, FN_IP3_3,
@@ -3238,9 +3356,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, FN_A4,
GP_5_2_FN, FN_A3,
GP_5_1_FN, FN_A2,
- GP_5_0_FN, FN_A1 }
+ GP_5_0_FN, FN_A1 ))
},
- { PINMUX_CFG_REG("GPSR6", 0xfffc001c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xfffc001c, 32, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -3255,11 +3373,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, FN_IP3_15,
GP_6_2_FN, FN_IP3_8,
GP_6_1_FN, FN_IP3_7,
- GP_6_0_FN, FN_IP3_6 }
+ GP_6_0_FN, FN_IP3_6 ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xfffc0020, 32,
- 1, 3, 2, 1, 2, 4, 3, 2, 2, 2, 2, 2, 3, 3) {
+ GROUP(1, 3, 2, 1, 2, 4, 3, 2, 2, 2, 2, 2, 3, 3),
+ GROUP(
/* IP0_31 [1] */
0, 0,
/* IP0_30_28 [3] */
@@ -3294,10 +3413,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ATADIR0, FN_SDSELF, FN_HCTS1, FN_TX4_C,
/* IP0_2_0 [3] */
FN_USB_PENC2, FN_SCK0, FN_PWM1, FN_PWMFSW0,
- FN_SCIF_CLK, FN_TCLK0_C, 0, 0 }
+ FN_SCIF_CLK, FN_TCLK0_C, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xfffc0024, 32,
- 3, 4, 2, 2, 2, 4, 4, 4, 3, 2, 2) {
+ GROUP(3, 4, 2, 2, 2, 4, 4, 4, 3, 2, 2),
+ GROUP(
/* IP1_31_29 [3] */
0, 0, 0, 0, 0, 0, 0, 0,
/* IP1_28_25 [4] */
@@ -3332,10 +3452,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP1_3_2 [2] */
FN_EX_CS1, FN_MMC0_D7, FN_FD7, 0,
/* IP1_1_0 [2] */
- FN_EX_CS0, FN_RX3_C_IRDA_RX_C, FN_MMC0_D6, FN_FD6 }
+ FN_EX_CS0, FN_RX3_C_IRDA_RX_C, FN_MMC0_D6, FN_FD6 ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xfffc0028, 32,
- 1, 3, 1, 1, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4) {
+ GROUP(1, 3, 1, 1, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4),
+ GROUP(
/* IP2_31 [1] */
0, 0,
/* IP2_30_28 [3] */
@@ -3378,11 +3499,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_HRX0, FN_RX1, FN_SCKZ, FN_RTS0_C_TANS_C,
FN_SUB_TDI, FN_CC5_STATE3, FN_CC5_STATE11, FN_CC5_STATE19,
FN_CC5_STATE27, FN_CC5_STATE35, 0, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xfffc002c, 32,
- 3, 1, 1, 3, 1, 2, 1, 1, 1, 1, 1,
- 1, 3, 3, 1, 1, 1, 1, 1, 1, 3) {
+ GROUP(3, 1, 1, 3, 1, 2, 1, 1, 1, 1, 1, 1,
+ 3, 3, 1, 1, 1, 1, 1, 1, 3),
+ GROUP(
/* IP3_31_29 [3] */
FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, FN_CAN1_TX, FN_TX2_C,
FN_SCL2_C, FN_REMOCON, 0, 0,
@@ -3429,11 +3551,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_DG2, FN_LCDOUT10,
/* IP3_2_0 [3] */
FN_DU0_DG1, FN_LCDOUT9, FN_DACK1, FN_SDA2,
- FN_AUDATA3, 0, 0, 0 }
+ FN_AUDATA3, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xfffc0030, 32,
- 3, 1, 1, 1, 1, 1, 1, 3, 3,
- 1, 1, 1, 1, 1, 1, 3, 3, 3, 2) {
+ GROUP(3, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
+ 1, 1, 1, 3, 3, 3, 2),
+ GROUP(
/* IP4_31_29 [3] */
FN_DU1_DB0, FN_VI2_DATA4_VI2_B4, FN_SCL2_B, FN_SD3_DAT0,
FN_TX5, FN_SCK0_D, 0, 0,
@@ -3477,11 +3600,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_CDE, FN_QPOLB, FN_CAN1_RX, FN_RX2_C,
FN_DREQ0_B, FN_SSI_SCK78_B, FN_SCK0_B, 0,
/* IP4_1_0 [2] */
- FN_DU0_DISP, FN_QPOLA, FN_CAN_CLK_C, FN_SCK2_C }
+ FN_DU0_DISP, FN_QPOLA, FN_CAN_CLK_C, FN_SCK2_C ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xfffc0034, 32,
- 1, 2, 1, 4, 3, 4, 2, 2,
- 2, 2, 1, 1, 1, 1, 1, 1, 3) {
+ GROUP(1, 2, 1, 4, 3, 4, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 3),
+ GROUP(
/* IP5_31 [1] */
0, 0,
/* IP5_30_29 [2] */
@@ -3523,10 +3647,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU1_DB2, FN_VI2_R4,
/* IP5_2_0 [3] */
FN_DU1_DB1, FN_VI2_DATA5_VI2_B5, FN_SDA2_B, FN_SD3_DAT1,
- FN_RX5, FN_RTS0_D_TANS_D, 0, 0 }
+ FN_RX5, FN_RTS0_D_TANS_D, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xfffc0038, 32,
- 1, 2, 2, 2, 2, 3, 2, 3, 3, 3, 1, 2, 2, 2, 2) {
+ GROUP(1, 2, 2, 2, 2, 3, 2, 3, 3, 3, 1, 2,
+ 2, 2, 2),
+ GROUP(
/* IP6_31 [1] */
0, 0,
/* IP6_30_29 [2] */
@@ -3560,10 +3686,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP6_3_2 [2] */
FN_SSI_WS0129, FN_CAN_DEBUGOUT2, FN_MOUT2, 0,
/* IP6_1_0 [2] */
- FN_SSI_SCK0129, FN_CAN_DEBUGOUT1, FN_MOUT1, 0 }
+ FN_SSI_SCK0129, FN_CAN_DEBUGOUT1, FN_MOUT1, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xfffc003c, 32,
- 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2) {
+ GROUP(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 2, 2),
+ GROUP(
/* IP7_31 [1] */
0, 0,
/* IP7_30_29 [2] */
@@ -3596,10 +3724,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP7_3_2 [2] */
FN_SSI_SDATA6, FN_ADICHS2, FN_CAN_CLK, FN_IECLK_B,
/* IP7_1_0 [2] */
- FN_SSI_WS6, FN_ADICHS1, FN_CAN0_RX, FN_IETX_B }
+ FN_SSI_WS6, FN_ADICHS1, FN_CAN0_RX, FN_IETX_B ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xfffc0040, 32,
- 1, 3, 3, 2, 2, 1, 1, 1, 2, 4, 4, 4, 4) {
+ GROUP(1, 3, 3, 2, 2, 1, 1, 1, 2, 4, 4, 4, 4),
+ GROUP(
/* IP8_31 [1] */
0, 0,
/* IP8_30_28 [3] */
@@ -3639,11 +3768,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_HSPI_CLK0, FN_CTS0, FN_USB_OVC0, FN_AD_CLK,
FN_CC5_STATE4, FN_CC5_STATE12, FN_CC5_STATE20, FN_CC5_STATE28,
FN_CC5_STATE36, 0, 0, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xfffc0044, 32,
- 2, 2, 2, 2, 2, 3, 3, 2, 2,
- 2, 2, 1, 1, 1, 1, 2, 2) {
+ GROUP(2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 1,
+ 1, 1, 1, 2, 2),
+ GROUP(
/* IP9_31_30 [2] */
0, 0, 0, 0,
/* IP9_29_28 [2] */
@@ -3679,10 +3809,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP9_3_2 [2] */
FN_VI0_DATA1_VI0_B1, FN_HCTS1_B, FN_MT1_PWM, 0,
/* IP9_1_0 [2] */
- FN_VI0_DATA0_VI0_B0, FN_HRTS1_B, FN_MT1_VCXO, 0 }
+ FN_VI0_DATA0_VI0_B0, FN_HRTS1_B, FN_MT1_VCXO, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xfffc0048, 32,
- 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP10_31_29 [3] */
FN_VI1_VSYNC, FN_AUDIO_CLKOUT_C, FN_SSI_WS4, FN_SIM_CLK,
FN_GPS_MAG_C, FN_SPV_TRST, FN_SCL3, 0,
@@ -3714,10 +3845,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DACK0_C, FN_DRACK0_C, 0, 0,
/* IP10_2_0 [3] */
FN_VI0_R0, FN_SSI_SDATA7_C, FN_SCK1_C, FN_DREQ1_B,
- FN_ARM_TRACEDATA_10, FN_DREQ0_C, 0, 0 }
+ FN_ARM_TRACEDATA_10, FN_DREQ0_C, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xfffc004c, 32,
- 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP11_31_30 [2] */
0, 0, 0, 0,
/* IP11_29_27 [3] */
@@ -3749,10 +3881,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ADICS_B_SAMP_B, 0, 0, 0,
/* IP11_2_0 [3] */
FN_VI1_DATA0_VI1_B0, FN_SD2_DAT0, FN_SIM_RST, FN_SPV_TCK,
- FN_ADICLK_B, 0, 0, 0 }
+ FN_ADICLK_B, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xfffc0050, 32,
- 4, 4, 4, 2, 3, 3, 3, 3, 3, 3) {
+ GROUP(4, 4, 4, 2, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP12_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -3781,11 +3914,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SCL1_C, FN_HTX0_B, 0, 0,
/* IP12_2_0 [3] */
FN_VI1_G2, FN_VI3_DATA2, FN_SSI_WS1, FN_TS_SPSYNC1,
- FN_SCK2, FN_HSCK0_B, 0, 0 }
+ FN_SCK2, FN_HSCK0_B, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL", 0xfffc0090, 32,
- 2, 2, 3, 3, 2, 2, 2, 2, 2,
- 1, 1, 1, 1, 1, 1, 1, 2, 1, 2) {
+ GROUP(2, 2, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1,
+ 1, 1, 1, 1, 2, 1, 2),
+ GROUP(
/* SEL_SCIF5 [2] */
FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
/* SEL_SCIF4 [2] */
@@ -3825,11 +3959,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_EXBUS1 [1] */
FN_SEL_EXBUS1_0, FN_SEL_EXBUS1_1,
/* SEL_EXBUS0 [2] */
- FN_SEL_EXBUS0_0, FN_SEL_EXBUS0_1, FN_SEL_EXBUS0_2, 0 }
+ FN_SEL_EXBUS0_0, FN_SEL_EXBUS0_1, FN_SEL_EXBUS0_2, 0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xfffc0094, 32,
- 2, 2, 2, 2, 1, 1, 1, 3, 1,
- 2, 2, 2, 2, 1, 1, 2, 1, 2, 2) {
+ GROUP(2, 2, 2, 2, 1, 1, 1, 3, 1, 2, 2, 2,
+ 2, 1, 1, 2, 1, 2, 2),
+ GROUP(
/* SEL_TMU1 [2] */
FN_SEL_TMU1_0, FN_SEL_TMU1_1, FN_SEL_TMU1_2, 0,
/* SEL_TMU0 [2] */
@@ -3868,7 +4003,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_I2C2 [2] */
FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3,
/* SEL_I2C1 [2] */
- FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, FN_SEL_I2C1_3 }
+ FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, FN_SEL_I2C1_3 ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index a84229cb8cd4..c41a6761cf9d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -8,6 +8,7 @@
* Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*/
+#include <linux/errno.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
@@ -4744,7 +4745,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1, GROUP(
GP_0_31_FN, FN_IP3_17_15,
GP_0_30_FN, FN_IP3_14_12,
GP_0_29_FN, FN_IP3_11_8,
@@ -4776,9 +4777,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, FN_IP0_11_9,
GP_0_2_FN, FN_IP0_8_6,
GP_0_1_FN, FN_IP0_5_3,
- GP_0_0_FN, FN_IP0_2_0 }
+ GP_0_0_FN, FN_IP0_2_0 ))
},
- { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1, GROUP(
0, 0,
0, 0,
GP_1_29_FN, FN_IP6_13_11,
@@ -4810,9 +4811,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, FN_IP3_28_26,
GP_1_2_FN, FN_IP3_25_23,
GP_1_1_FN, FN_IP3_22_20,
- GP_1_0_FN, FN_IP3_19_18, }
+ GP_1_0_FN, FN_IP3_19_18, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1, GROUP(
0, 0,
0, 0,
GP_2_29_FN, FN_IP7_15_13,
@@ -4844,9 +4845,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, FN_IP8_3_2,
GP_2_2_FN, FN_IP8_1_0,
GP_2_1_FN, FN_IP7_30_29,
- GP_2_0_FN, FN_IP7_28_27 }
+ GP_2_0_FN, FN_IP7_28_27 ))
},
- { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1, GROUP(
GP_3_31_FN, FN_IP11_21_18,
GP_3_30_FN, FN_IP11_17_15,
GP_3_29_FN, FN_IP11_14_13,
@@ -4878,9 +4879,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, FN_IP9_3_2,
GP_3_2_FN, FN_IP9_1_0,
GP_3_1_FN, FN_IP8_30_29,
- GP_3_0_FN, FN_IP8_28 }
+ GP_3_0_FN, FN_IP8_28 ))
},
- { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1, GROUP(
GP_4_31_FN, FN_IP14_18_16,
GP_4_30_FN, FN_IP14_15_12,
GP_4_29_FN, FN_IP14_11_9,
@@ -4912,9 +4913,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, FN_IP11_31_30,
GP_4_2_FN, FN_IP11_29_27,
GP_4_1_FN, FN_IP11_26_24,
- GP_4_0_FN, FN_IP11_23_22 }
+ GP_4_0_FN, FN_IP11_23_22 ))
},
- { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1, GROUP(
GP_5_31_FN, FN_IP7_24_22,
GP_5_30_FN, FN_IP7_21_19,
GP_5_29_FN, FN_IP7_18_16,
@@ -4946,10 +4947,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, FN_IP14_30_28,
GP_5_2_FN, FN_IP14_27_25,
GP_5_1_FN, FN_IP14_24_22,
- GP_5_0_FN, FN_IP14_21_19 }
+ GP_5_0_FN, FN_IP14_21_19 ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32,
- 1, 4, 4, 3, 4, 4, 3, 3, 3, 3) {
+ GROUP(1, 4, 4, 3, 4, 4, 3, 3, 3, 3),
+ GROUP(
/* IP0_31 [1] */
0, 0,
/* IP0_30_27 [4] */
@@ -4982,10 +4984,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0,
/* IP0_2_0 [3] */
FN_D0, FN_MSIOF3_SCK_B, FN_VI3_DATA0, FN_VI0_G4, FN_VI0_G4_B,
- 0, 0, 0, }
+ 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xE6060024, 32,
- 2, 2, 2, 4, 4, 3, 3, 4, 4, 4) {
+ GROUP(2, 2, 2, 4, 4, 3, 3, 4, 4, 4),
+ GROUP(
/* IP1_31_30 [2] */
0, 0, 0, 0,
/* IP1_29_28 [2] */
@@ -5019,10 +5022,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP1_3_0 [4] */
FN_D9, FN_SCIFA1_RXD_C, FN_AVB_TXD1, 0,
FN_VI0_G1, FN_VI0_G1_B, FN_VI2_DATA1_VI2_B1,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xE6060028, 32,
- 3, 3, 4, 4, 3, 3, 3, 3, 3, 3) {
+ GROUP(3, 3, 4, 4, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP2_31_29 [3] */
0, 0, 0, 0, 0, 0, 0, 0,
/* IP2_28_26 [3] */
@@ -5048,10 +5052,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP2_5_3 [3] */
FN_A3, FN_PWM6, FN_MSIOF1_SS2_B, 0, 0, 0, 0, 0,
/* IP2_2_0 [3] */
- FN_A2, FN_PWM5, FN_MSIOF1_SS1_B, 0, 0, 0, 0, 0, }
+ FN_A2, FN_PWM5, FN_MSIOF1_SS1_B, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xE606002C, 32,
- 3, 3, 3, 3, 2, 3, 3, 4, 4, 4) {
+ GROUP(3, 3, 3, 3, 2, 3, 3, 4, 4, 4),
+ GROUP(
/* IP3_31_29 [3] */
FN_A20, FN_SPCLK, FN_VI1_R3, FN_VI1_R3_B, FN_VI2_G4,
0, 0, 0,
@@ -5081,10 +5086,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_3_0 [4] */
FN_A11, FN_SCIFB2_CTS_N_B, FN_MSIOF2_SCK, FN_VI1_R0,
FN_VI1_R0_B, FN_VI2_G0, FN_VI2_DATA3_VI2_B3_B, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xE6060030, 32,
- 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP4_31_30 [2] */
0, 0, 0, 0,
/* IP4_29_27 [3] */
@@ -5114,10 +5120,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_A22, FN_MISO_IO1, FN_VI1_R5, FN_VI1_R5_B, FN_VI2_G6, 0, 0, 0,
/* IP4_2_0 [3] */
FN_A21, FN_MOSI_IO0, FN_VI1_R4, FN_VI1_R4_B, FN_VI2_G5, 0, 0, 0,
- }
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xE6060034, 32,
- 2, 3, 3, 3, 3, 3, 2, 3, 4, 3, 3) {
+ GROUP(2, 3, 3, 3, 3, 3, 2, 3, 4, 3, 3),
+ GROUP(
/* IP5_31_30 [2] */
0, 0, 0, 0,
/* IP5_29_27 [3] */
@@ -5151,10 +5158,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_INTC_EN0_N, FN_I2C1_SCL,
/* IP5_2_0 [3] */
FN_EX_CS3_N, FN_GPS_MAG, FN_VI3_FIELD, FN_VI1_G1, FN_VI1_G1_B,
- FN_VI2_R3, 0, 0, }
+ FN_VI2_R3, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xE6060038, 32,
- 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3) {
+ GROUP(3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3),
+ GROUP(
/* IP6_31_29 [3] */
FN_ETH_REF_CLK, 0, FN_HCTS0_N_E,
FN_STP_IVCXO27_1_B, FN_HRX0_F, 0, 0, 0,
@@ -5187,10 +5195,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SSI_SDATA7_C, FN_SSI_SCK78_B, 0, 0, 0,
/* IP6_2_0 [3] */
FN_DACK0, FN_IRQ0, FN_INTC_IRQ0_N, FN_SSI_SCK6_B,
- FN_VI1_VSYNC_N, FN_VI1_VSYNC_N_B, FN_SSI_WS78_C, 0, }
+ FN_VI1_VSYNC_N, FN_VI1_VSYNC_N_B, FN_SSI_WS78_C, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
- 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 3, 3) {
+ GROUP(1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 3, 3),
+ GROUP(
/* IP7_31 [1] */
0, 0,
/* IP7_30_29 [2] */
@@ -5222,11 +5231,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ETH_TXD1, 0, FN_HTX0_F, FN_BPFCLK_G, 0, 0, 0, 0,
/* IP7_2_0 [3] */
FN_ETH_MDIO, 0, FN_HRTS0_N_E,
- FN_SIM0_D_C, FN_HCTS0_N_F, 0, 0, 0, }
+ FN_SIM0_D_C, FN_HCTS0_N_F, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32,
- 1, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2) {
+ GROUP(1, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2),
+ GROUP(
/* IP8_31 [1] */
0, 0,
/* IP8_30_29 [2] */
@@ -5263,10 +5273,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP8_3_2 [2] */
FN_VI0_DATA2_VI0_B2, FN_ATAWR0_N, FN_AVB_RXD4, 0,
/* IP8_1_0 [2] */
- FN_VI0_DATA1_VI0_B1, FN_ATARD0_N, FN_AVB_RXD3, 0, }
+ FN_VI0_DATA1_VI0_B1, FN_ATARD0_N, FN_AVB_RXD3, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32,
- 4, 2, 2, 2, 2, 2, 2, 4, 4, 2, 2, 2, 2) {
+ GROUP(4, 2, 2, 2, 2, 2, 2, 4, 4, 2, 2, 2, 2),
+ GROUP(
/* IP9_31_28 [4] */
FN_SD1_CD, FN_MMC1_D6, FN_TS_SDEN1, FN_USB1_EXTP,
FN_GLO_SS, FN_VI0_CLK_B, FN_IIC2_SCL_D, FN_I2C2_SCL_D,
@@ -5298,10 +5309,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP9_3_2 [2] */
FN_SD0_DAT1, FN_SCIFB1_TXD_B, FN_VI1_DATA3_VI1_B3_B, 0,
/* IP9_1_0 [2] */
- FN_SD0_DAT0, FN_SCIFB1_RXD_B, FN_VI1_DATA2_VI1_B2_B, 0, }
+ FN_SD0_DAT0, FN_SCIFB1_RXD_B, FN_VI1_DATA2_VI1_B2_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xE6060048, 32,
- 2, 4, 3, 4, 4, 4, 4, 3, 4) {
+ GROUP(2, 4, 3, 4, 4, 4, 4, 3, 4),
+ GROUP(
/* IP10_31_30 [2] */
0, 0, 0, 0,
/* IP10_29_26 [4] */
@@ -5337,10 +5349,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP10_3_0 [4] */
FN_SD1_WP, FN_MMC1_D7, FN_TS_SPSYNC1, FN_USB1_IDIN,
FN_GLO_RFON, FN_VI1_CLK_B, FN_IIC2_SDA_D, FN_I2C2_SDA_D,
- FN_SIM0_D_B, 0, 0, 0, 0, 0, 0, 0, }
+ FN_SIM0_D_B, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xE606004C, 32,
- 2, 3, 3, 2, 4, 3, 2, 2, 2, 2, 2, 1, 4) {
+ GROUP(2, 3, 3, 2, 4, 3, 2, 2, 2, 2, 2, 1, 4),
+ GROUP(
/* IP11_31_30 [2] */
FN_SSI_SCK0129, FN_CAN_CLK_B, FN_MOUT0, 0,
/* IP11_29_27 [3] */
@@ -5372,10 +5385,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP11_3_0 [4] */
FN_SD2_WP, FN_MMC0_D5, FN_TS_SCK0_B, FN_USB2_IDIN,
FN_GLO_I1, FN_VI0_DATA7_VI0_B7_B, FN_HRTS0_N_D,
- FN_TS_SCK1_B, FN_GLO_I1_B, FN_VI3_DATA7_B, 0, 0, 0, 0, 0, 0, }
+ FN_TS_SCK1_B, FN_GLO_I1_B, FN_VI3_DATA7_B, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32,
- 1, 3, 3, 2, 3, 3, 3, 3, 3, 2, 2, 2, 2) {
+ GROUP(1, 3, 3, 2, 3, 3, 3, 3, 3, 2, 2, 2, 2),
+ GROUP(
/* IP12_31 [1] */
0, 0,
/* IP12_30_28 [3] */
@@ -5411,10 +5425,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP12_3_2 [2] */
FN_SSI_SDATA0, FN_CAN0_RX_B, FN_MOUT2, 0,
/* IP12_1_0 [2] */
- FN_SSI_WS0129, FN_CAN0_TX_B, FN_MOUT1, 0, }
+ FN_SSI_WS0129, FN_CAN0_TX_B, FN_MOUT1, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32,
- 1, 2, 3, 3, 4, 3, 3, 3, 3, 4, 3) {
+ GROUP(1, 2, 3, 3, 4, 3, 3, 3, 3, 4, 3),
+ GROUP(
/* IP13_31 [1] */
0, 0,
/* IP13_30_29 [2] */
@@ -5447,10 +5462,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_BPFCLK_F, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP13_2_0 [3] */
FN_SSI_SDATA5, FN_SCIFB1_TXD, FN_IETX_B, FN_DU2_DR2,
- FN_LCDOUT2, FN_CAN_DEBUGOUT5, 0, 0, }
+ FN_LCDOUT2, FN_CAN_DEBUGOUT5, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR14", 0xE6060058, 32,
- 1, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3) {
+ GROUP(1, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3),
+ GROUP(
/* IP14_30 [1] */
0, 0,
/* IP14_30_28 [3] */
@@ -5485,10 +5501,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP14_2_0 [3] */
FN_AUDIO_CLKB, FN_SCIF_CLK, FN_CAN0_RX_D,
FN_DVC_MUTE, FN_CAN0_RX_C, FN_CAN_DEBUGOUT15,
- FN_REMOCON, 0, }
+ FN_REMOCON, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR15", 0xE606005C, 32,
- 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3) {
+ GROUP(2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3),
+ GROUP(
/* IP15_31_30 [2] */
0, 0, 0, 0,
/* IP15_29_28 [2] */
@@ -5520,10 +5537,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_IIC2_SCL, FN_I2C2_SCL, 0,
/* IP15_2_0 [3] */
FN_SCIFA2_SCK, FN_FMCLK, FN_SCK2, FN_MSIOF3_SCK, FN_DU2_DG7,
- FN_LCDOUT15, FN_SCIF_CLK_B, 0, }
+ FN_LCDOUT15, FN_SCIF_CLK_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR16", 0xE6060160, 32,
- 4, 4, 4, 4, 4, 4, 1, 1, 3, 3) {
+ GROUP(4, 4, 4, 4, 4, 4, 1, 1, 3, 3),
+ GROUP(
/* IP16_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -5551,11 +5569,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ADICS_SAMP, FN_DU2_CDE, FN_QPOLB, FN_SCIFA2_RXD_B, 0,
/* IP16_2_0 [3] */
FN_MSIOF0_SS2, FN_AUDIO_CLKOUT, FN_ADICHS2,
- FN_DU2_DISP, FN_QPOLA, FN_HTX0_C, FN_SCIFA2_TXD_B, 0, }
+ FN_DU2_DISP, FN_QPOLA, FN_HTX0_C, FN_SCIFA2_TXD_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32,
- 3, 2, 2, 3, 2, 1, 1, 1, 2, 1,
- 2, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1) {
+ GROUP(3, 2, 2, 3, 2, 1, 1, 1, 2, 1, 2, 1,
+ 1, 1, 1, 2, 1, 1, 2, 1, 1),
+ GROUP(
/* SEL_SCIF1 [3] */
FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
FN_SEL_SCIF1_4, 0, 0, 0,
@@ -5601,11 +5620,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_SOF3 [1] */
FN_SEL_SOF3_0, FN_SEL_SOF3_1,
/* SEL_SOF0 [1] */
- FN_SEL_SOF0_0, FN_SEL_SOF0_1, }
+ FN_SEL_SOF0_0, FN_SEL_SOF0_1, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32,
- 3, 1, 1, 1, 2, 1, 2, 1, 2,
- 1, 1, 1, 3, 3, 2, 3, 2, 2) {
+ GROUP(3, 1, 1, 1, 2, 1, 2, 1, 2, 1, 1, 1,
+ 3, 3, 2, 3, 2, 2),
+ GROUP(
/* RESERVED [3] */
0, 0, 0, 0, 0, 0, 0, 0,
/* SEL_TMU1 [1] */
@@ -5643,11 +5663,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_SIM [2] */
FN_SEL_SIM_0, FN_SEL_SIM_1, FN_SEL_SIM_2, 0,
/* SEL_SSI8 [2] */
- FN_SEL_SSI8_0, FN_SEL_SSI8_1, FN_SEL_SSI8_2, 0, }
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1, FN_SEL_SSI8_2, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32,
- 1, 1, 2, 4, 4, 2, 2,
- 4, 2, 3, 2, 3, 2) {
+ GROUP(1, 1, 2, 4, 4, 2, 2, 4, 2, 3, 2, 3, 2),
+ GROUP(
/* SEL_IICDVFS [1] */
FN_SEL_IICDVFS_0, FN_SEL_IICDVFS_1,
/* SEL_IIC0 [1] */
@@ -5678,7 +5698,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3,
FN_SEL_I2C2_4, 0, 0, 0,
/* SEL_I2C1 [2] */
- FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, 0, }
+ FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, 0, ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index d8b13d4e9bbf..1292ec8d268f 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -6,6 +6,7 @@
* Copyright (C) 2014-2017 Cogent Embedded, Inc.
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include "sh_pfc.h"
@@ -5427,7 +5428,7 @@ static const struct {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1, GROUP(
GP_0_31_FN, FN_IP1_22_20,
GP_0_30_FN, FN_IP1_19_17,
GP_0_29_FN, FN_IP1_16_14,
@@ -5459,9 +5460,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, FN_IP0_3,
GP_0_2_FN, FN_IP0_2,
GP_0_1_FN, FN_IP0_1,
- GP_0_0_FN, FN_IP0_0, }
+ GP_0_0_FN, FN_IP0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5493,9 +5494,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, FN_IP2_2_0,
GP_1_2_FN, FN_IP1_31_29,
GP_1_1_FN, FN_IP1_28_26,
- GP_1_0_FN, FN_IP1_25_23, }
+ GP_1_0_FN, FN_IP1_25_23, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1, GROUP(
GP_2_31_FN, FN_IP6_7_6,
GP_2_30_FN, FN_IP6_5_3,
GP_2_29_FN, FN_IP6_2_0,
@@ -5527,9 +5528,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, FN_IP4_4_2,
GP_2_2_FN, FN_IP4_1_0,
GP_2_1_FN, FN_IP3_30_28,
- GP_2_0_FN, FN_IP3_27_25 }
+ GP_2_0_FN, FN_IP3_27_25 ))
},
- { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1, GROUP(
GP_3_31_FN, FN_IP9_18_17,
GP_3_30_FN, FN_IP9_16,
GP_3_29_FN, FN_IP9_15_13,
@@ -5561,9 +5562,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, FN_IP7_12_11,
GP_3_2_FN, FN_IP7_10_9,
GP_3_1_FN, FN_IP7_8_6,
- GP_3_0_FN, FN_IP7_5_3 }
+ GP_3_0_FN, FN_IP7_5_3 ))
},
- { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1, GROUP(
GP_4_31_FN, FN_IP15_5_4,
GP_4_30_FN, FN_IP15_3_2,
GP_4_29_FN, FN_IP15_1_0,
@@ -5595,9 +5596,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, FN_IP9_24_23,
GP_4_2_FN, FN_IP9_22_21,
GP_4_1_FN, FN_IP9_20_19,
- GP_4_0_FN, FN_VI0_CLK }
+ GP_4_0_FN, FN_VI0_CLK ))
},
- { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1, GROUP(
GP_5_31_FN, FN_IP3_24_22,
GP_5_30_FN, FN_IP13_9_7,
GP_5_29_FN, FN_IP13_6_5,
@@ -5629,9 +5630,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, FN_IP11_18_17,
GP_5_2_FN, FN_IP11_16_15,
GP_5_1_FN, FN_IP11_14_12,
- GP_5_0_FN, FN_IP11_11_9 }
+ GP_5_0_FN, FN_IP11_11_9 ))
},
- { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1, GROUP(
GP_6_31_FN, FN_DU0_DOTCLKIN,
GP_6_30_FN, FN_USB1_OVC,
GP_6_29_FN, FN_IP14_31_29,
@@ -5663,9 +5664,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, FN_IP13_13,
GP_6_2_FN, FN_IP13_12,
GP_6_1_FN, FN_IP13_11,
- GP_6_0_FN, FN_IP13_10 }
+ GP_6_0_FN, FN_IP13_10 ))
},
- { PINMUX_CFG_REG("GPSR7", 0xE6060074, 32, 1) {
+ { PINMUX_CFG_REG("GPSR7", 0xE6060074, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5697,11 +5698,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_7_3_FN, FN_IP15_26_24,
GP_7_2_FN, FN_IP15_23_21,
GP_7_1_FN, FN_IP15_20_18,
- GP_7_0_FN, FN_IP15_17_15 }
+ GP_7_0_FN, FN_IP15_17_15 ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32,
- 1, 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(1, 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* IP0_31 [1] */
0, 0,
/* IP0_30_29 [2] */
@@ -5756,10 +5758,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP0_1 [1] */
FN_D1, 0,
/* IP0_0 [1] */
- FN_D0, 0, }
+ FN_D0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xE6060024, 32,
- 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2) {
+ GROUP(3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2),
+ GROUP(
/* IP1_31_29 [3] */
FN_A18, FN_DREQ1, FN_SCIFA1_RXD_C, 0, FN_SCIFB1_RXD_C,
0, 0, 0,
@@ -5792,10 +5795,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_A8, FN_MSIOF1_SS1, FN_I2C0_SCL, 0,
/* IP1_1_0 [2] */
FN_A7, FN_MSIOF1_SYNC,
- 0, 0, }
+ 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xE6060028, 32,
- 2, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3) {
+ GROUP(2, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3),
+ GROUP(
/* IP2_31_30 [2] */
0, 0, 0, 0,
/* IP2_29_27 [3] */
@@ -5828,10 +5832,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_A20, FN_SPCLK, 0, 0,
/* IP2_2_0 [3] */
FN_A19, FN_DACK1, FN_SCIFA1_TXD_C, 0,
- FN_SCIFB1_TXD_C, 0, FN_SCIFB1_SCK_B, 0, }
+ FN_SCIFB1_TXD_C, 0, FN_SCIFB1_SCK_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xE606002C, 32,
- 1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3) {
+ GROUP(1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3),
+ GROUP(
/* IP3_31 [1] */
0, 0,
/* IP3_30_28 [3] */
@@ -5866,10 +5871,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SCIFB1_RXD_B, FN_PWM1, FN_TPU_TO1, 0,
/* IP3_2_0 [3] */
FN_EX_CS4_N, FN_ATARD0_N, FN_MSIOF2_RXD, 0, FN_EX_WAIT2,
- 0, 0, 0, }
+ 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xE6060030, 32,
- 1, 3, 2, 2, 2, 1, 1, 1, 3, 3, 3, 2, 3, 3, 2) {
+ GROUP(1, 3, 2, 2, 2, 1, 1, 1, 3, 3, 3, 2,
+ 3, 3, 2),
+ GROUP(
/* IP4_31 [1] */
0, 0,
/* IP4_30_28 [3] */
@@ -5908,10 +5915,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_MSIOF2_SYNC_C, FN_GLO_I0_D,
0, 0, 0,
/* IP4_1_0 [2] */
- FN_SSI_SDATA0, FN_I2C0_SCL_B, FN_IIC0_SCL_B, FN_MSIOF2_SCK_C, }
+ FN_SSI_SDATA0, FN_I2C0_SCL_B, FN_IIC0_SCL_B, FN_MSIOF2_SCK_C,
+ ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xE6060034, 32,
- 3, 3, 2, 2, 2, 3, 2, 3, 3, 3, 3, 3) {
+ GROUP(3, 3, 2, 2, 2, 3, 2, 3, 3, 3, 3, 3),
+ GROUP(
/* IP5_31_29 [3] */
FN_SSI_SDATA9, FN_RX3_D, FN_CAN0_RX_D,
0, 0, 0, 0, 0,
@@ -5946,10 +5955,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP5_2_0 [3] */
FN_SSI_WS5, FN_MSIOF1_SYNC_C, FN_TS_SCK0, FN_GLO_I1,
FN_MSIOF2_TXD_D, FN_VI1_R3_B,
- 0, 0, }
+ 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xE6060038, 32,
- 2, 3, 3, 3, 2, 3, 2, 2, 2, 2, 2, 3, 3) {
+ GROUP(2, 3, 3, 3, 2, 3, 2, 2, 2, 2, 2, 3, 3),
+ GROUP(
/* IP6_31_30 [2] */
0, 0, 0, 0,
/* IP6_29_27 [3] */
@@ -5986,10 +5996,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP6_2_0 [3] */
FN_AUDIO_CLKB, FN_STP_OPWM_0_B, FN_MSIOF1_SCK_B,
FN_SCIF_CLK, FN_DVC_MUTE, FN_BPFCLK_E,
- 0, 0, }
+ 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
- 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3) {
+ GROUP(2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3),
+ GROUP(
/* IP7_31_30 [2] */
0, 0, 0, 0,
/* IP7_29_27 [3] */
@@ -6027,10 +6038,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP7_2_0 [3] */
FN_IRQ9, FN_DU1_DOTCLKIN_B, FN_CAN_CLK_D, FN_GPS_MAG_C,
FN_SCIF_CLK_B, FN_GPS_MAG_D,
- 0, 0, }
+ 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32,
- 1, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(1, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP8_31 [1] */
0, 0,
/* IP8_30_28 [3] */
@@ -6070,10 +6082,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
/* IP8_2_0 [3] */
FN_DU1_DG3, FN_LCDOUT11, FN_VI1_DATA5_B, 0, FN_SSI_WS78_B,
- 0, 0, 0, }
+ 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32,
- 3, 2, 2, 2, 2, 2, 2, 1, 3, 1, 1, 3, 1, 1, 3, 3) {
+ GROUP(3, 2, 2, 2, 2, 2, 2, 1, 3, 1, 1, 3,
+ 1, 1, 3, 3),
+ GROUP(
/* IP9_31_29 [3] */
FN_VI0_G0, FN_IIC1_SCL, FN_STP_IVCXO27_0_C, FN_I2C4_SCL,
FN_HCTS2_N, FN_SCIFB2_CTS_N, FN_ATAWR1_N, 0,
@@ -6113,10 +6127,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0,
/* IP9_2_0 [3] */
FN_DU1_DB6, FN_LCDOUT22, FN_I2C3_SCL_C, FN_RX3, FN_SCIFA3_RXD,
- 0, 0, 0, }
+ 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xE6060048, 32,
- 3, 2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 3) {
+ GROUP(3, 2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 3),
+ GROUP(
/* IP10_31_29 [3] */
FN_VI0_R4, FN_VI2_DATA5, FN_GLO_SCLK_B, FN_TX0_C, FN_I2C1_SCL_D,
0, 0, 0,
@@ -6150,11 +6165,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_HSCK2, FN_SCIFB2_SCK, FN_ATARD1_N, 0,
/* IP10_2_0 [3] */
FN_VI0_G1, FN_IIC1_SDA, FN_STP_ISCLK_0_C, FN_I2C4_SDA,
- FN_HRTS2_N, FN_SCIFB2_RTS_N, FN_ATADIR1_N, 0, }
+ FN_HRTS2_N, FN_SCIFB2_RTS_N, FN_ATADIR1_N, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xE606004C, 32,
- 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
- 3, 3, 3, 3, 3) {
+ GROUP(2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 3, 3, 3, 3, 3),
+ GROUP(
/* IP11_31_30 [2] */
FN_ETH_CRS_DV, FN_AVB_LINK, FN_I2C2_SDA_C, 0,
/* IP11_29_28 [2] */
@@ -6197,10 +6213,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0,
/* IP11_2_0 [3] */
FN_VI0_R5, FN_VI2_DATA6, FN_GLO_SDATA_B, FN_RX0_C,
- FN_I2C1_SDA_D, 0, 0, 0, }
+ FN_I2C1_SDA_D, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32,
- 2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2) {
+ GROUP(2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2),
+ GROUP(
/* IP12_31_30 [2] */
0, 0, 0, 0,
/* IP12_29_27 [3] */
@@ -6238,11 +6255,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP12_3_2 [2] */
FN_ETH_RXD0, FN_AVB_PHY_INT, FN_I2C3_SDA, FN_IIC0_SDA,
/* IP12_1_0 [2] */
- FN_ETH_RX_ER, FN_AVB_CRS, FN_I2C3_SCL, FN_IIC0_SCL, }
+ FN_ETH_RX_ER, FN_AVB_CRS, FN_I2C3_SCL, FN_IIC0_SCL, ))
},
{ PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32,
- 1, 3, 1, 1, 1, 2, 1, 3, 3, 1, 1, 1, 1, 1, 1,
- 3, 2, 2, 3) {
+ GROUP(1, 3, 1, 1, 1, 2, 1, 3, 3, 1, 1, 1,
+ 1, 1, 1, 3, 2, 2, 3),
+ GROUP(
/* IP13_31 [1] */
0, 0,
/* IP13_30_28 [3] */
@@ -6289,10 +6307,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP13_2_0 [3] */
FN_STP_ISD_0, FN_AVB_TX_ER, FN_SCIFB2_SCK_C,
FN_ADICLK_B, FN_MSIOF0_SS1_C,
- 0, 0, 0, }
+ 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR14", 0xE6060058, 32,
- 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2) {
+ GROUP(3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1,
+ 1, 1, 2),
+ GROUP(
/* IP14_31_29 [3] */
FN_MSIOF0_SS2, FN_MMC_D7, FN_ADICHS2, FN_RX0_E,
FN_VI1_VSYNC_N_C, FN_IIC0_SDA_C, FN_VI1_G5_B, 0,
@@ -6332,10 +6352,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP14_2 [1] */
FN_SD2_CLK, FN_MMC_CLK,
/* IP14_1_0 [2] */
- FN_SD1_WP, FN_PWM1_B, FN_I2C1_SDA_C, 0, }
+ FN_SD1_WP, FN_PWM1_B, FN_I2C1_SDA_C, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR15", 0xE606005C, 32,
- 2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2) {
+ GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2),
+ GROUP(
/* IP15_31_30 [2] */
0, 0, 0, 0,
/* IP15_29_27 [3] */
@@ -6373,10 +6394,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP15_3_2 [2] */
FN_SIM0_CLK, FN_IECLK, FN_CAN_CLK_C, 0,
/* IP15_1_0 [2] */
- FN_SIM0_RST, FN_IETX, FN_CAN1_TX_D, 0, }
+ FN_SIM0_RST, FN_IETX, FN_CAN1_TX_D, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR16", 0xE6060160, 32,
- 4, 4, 4, 4, 4, 2, 2, 2, 3, 3) {
+ GROUP(4, 4, 4, 4, 4, 2, 2, 2, 3, 3),
+ GROUP(
/* IP16_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -6405,11 +6427,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP16_2_0 [3] */
FN_HRX1, FN_SCIFB1_RXD, FN_VI1_R0_B,
FN_GLO_SDATA_C, FN_VI1_DATA6_C,
- 0, 0, 0, }
+ 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32,
- 1, 2, 2, 2, 3, 2, 1, 1, 1, 1,
- 3, 2, 2, 2, 1, 2, 2, 2) {
+ GROUP(1, 2, 2, 2, 3, 2, 1, 1, 1, 1, 3, 2,
+ 2, 2, 1, 2, 2, 2),
+ GROUP(
/* RESERVED [1] */
0, 0,
/* SEL_SCIF1 [2] */
@@ -6450,11 +6473,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_TSIF0 [2] */
FN_SEL_TSIF0_0, FN_SEL_TSIF0_1, FN_SEL_TSIF0_2, FN_SEL_TSIF0_3,
/* SEL_SOF0 [2] */
- FN_SEL_SOF0_0, FN_SEL_SOF0_1, FN_SEL_SOF0_2, 0, }
+ FN_SEL_SOF0_0, FN_SEL_SOF0_1, FN_SEL_SOF0_2, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32,
- 3, 1, 1, 3, 2, 1, 1, 2, 2,
- 1, 3, 2, 1, 2, 2, 2, 1, 1, 1) {
+ GROUP(3, 1, 1, 3, 2, 1, 1, 2, 2, 1, 3, 2,
+ 1, 2, 2, 2, 1, 1, 1),
+ GROUP(
/* SEL_SCIF0 [3] */
FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2,
FN_SEL_SCIF0_3, FN_SEL_SCIF0_4,
@@ -6498,11 +6522,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* RESERVED [1] */
0, 0,
/* SEL_SSI8 [1] */
- FN_SEL_SSI8_0, FN_SEL_SSI8_1, }
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 1, 1, 2, 2, 3, 2, 2, 2, 1) {
+ GROUP(2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2,
+ 3, 2, 2, 2, 1),
+ GROUP(
/* SEL_HSCIF2 [2] */
FN_SEL_HSCIF2_0, FN_SEL_HSCIF2_1,
FN_SEL_HSCIF2_2, FN_SEL_HSCIF2_3,
@@ -6540,11 +6565,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* RESERVED [2] */
0, 0, 0, 0,
/* RESERVED [1] */
- 0, 0, }
+ 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE606009C, 32,
- 3, 2, 2, 1, 1, 1, 1, 3, 2,
- 2, 3, 1, 1, 1, 2, 2, 2, 2) {
+ GROUP(3, 2, 2, 1, 1, 1, 1, 3, 2, 2, 3, 1,
+ 1, 1, 2, 2, 2, 2),
+ GROUP(
/* SEL_SOF1 [3] */
FN_SEL_SOF1_0, FN_SEL_SOF1_1, FN_SEL_SOF1_2, FN_SEL_SOF1_3,
FN_SEL_SOF1_4,
@@ -6586,7 +6612,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* RESERVED [2] */
0, 0, 0, 0,
/* RESERVED [2] */
- 0, 0, 0, 0, }
+ 0, 0, 0, 0, ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
index d36da5652de6..bbace1478613 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
@@ -1988,7 +1988,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2020,9 +2020,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, FN_IP0_3,
GP_0_2_FN, FN_IP0_2,
GP_0_1_FN, FN_IP0_1,
- GP_0_0_FN, FN_IP0_0 }
+ GP_0_0_FN, FN_IP0_0 ))
},
- { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2054,9 +2054,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, FN_IP1_8,
GP_1_2_FN, FN_IP1_7,
GP_1_1_FN, FN_IP1_6,
- GP_1_0_FN, FN_IP1_5, }
+ GP_1_0_FN, FN_IP1_5, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1, GROUP(
GP_2_31_FN, FN_A15,
GP_2_30_FN, FN_A14,
GP_2_29_FN, FN_A13,
@@ -2088,9 +2088,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, FN_D3,
GP_2_2_FN, FN_D2,
GP_2_1_FN, FN_D1,
- GP_2_0_FN, FN_D0 }
+ GP_2_0_FN, FN_D0 ))
},
- { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2122,9 +2122,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, FN_A19,
GP_3_2_FN, FN_A18,
GP_3_1_FN, FN_A17,
- GP_3_0_FN, FN_A16 }
+ GP_3_0_FN, FN_A16 ))
},
- { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2156,9 +2156,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, FN_VI0_VSYNC_N,
GP_4_2_FN, FN_VI0_HSYNC_N,
GP_4_1_FN, FN_VI0_CLKENB,
- GP_4_0_FN, FN_VI0_CLK }
+ GP_4_0_FN, FN_VI0_CLK ))
},
- { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2190,9 +2190,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, FN_VI1_VSYNC_N,
GP_5_2_FN, FN_VI1_HSYNC_N,
GP_5_1_FN, FN_VI1_CLKENB,
- GP_5_0_FN, FN_VI1_CLK }
+ GP_5_0_FN, FN_VI1_CLK ))
},
- { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2224,9 +2224,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, FN_IP2_3,
GP_6_2_FN, FN_IP2_2,
GP_6_1_FN, FN_IP2_1,
- GP_6_0_FN, FN_IP2_0 }
+ GP_6_0_FN, FN_IP2_0 ))
},
- { PINMUX_CFG_REG("GPSR7", 0xE6060020, 32, 1) {
+ { PINMUX_CFG_REG("GPSR7", 0xE6060020, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2258,9 +2258,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_7_3_FN, FN_IP3_3,
GP_7_2_FN, FN_IP3_2,
GP_7_1_FN, FN_IP3_1,
- GP_7_0_FN, FN_IP3_0 }
+ GP_7_0_FN, FN_IP3_0 ))
},
- { PINMUX_CFG_REG("GPSR8", 0xE6060024, 32, 1) {
+ { PINMUX_CFG_REG("GPSR8", 0xE6060024, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2292,9 +2292,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_8_3_FN, FN_IP4_3_2,
GP_8_2_FN, FN_IP4_1,
GP_8_1_FN, FN_IP4_0,
- GP_8_0_FN, FN_VI4_CLK }
+ GP_8_0_FN, FN_VI4_CLK ))
},
- { PINMUX_CFG_REG("GPSR9", 0xE6060028, 32, 1) {
+ { PINMUX_CFG_REG("GPSR9", 0xE6060028, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2326,9 +2326,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_9_3_FN, FN_IP5_2,
GP_9_2_FN, FN_IP5_1,
GP_9_1_FN, FN_IP5_0,
- GP_9_0_FN, FN_VI5_CLK }
+ GP_9_0_FN, FN_VI5_CLK ))
},
- { PINMUX_CFG_REG("GPSR10", 0xE606002C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR10", 0xE606002C, 32, 1, GROUP(
GP_10_31_FN, FN_CAN1_RX,
GP_10_30_FN, FN_CAN1_TX,
GP_10_29_FN, FN_CAN_CLK,
@@ -2360,9 +2360,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_10_3_FN, FN_IP6_2,
GP_10_2_FN, FN_HRTS0_N,
GP_10_1_FN, FN_IP6_1,
- GP_10_0_FN, FN_IP6_0 }
+ GP_10_0_FN, FN_IP6_0 ))
},
- { PINMUX_CFG_REG("GPSR11", 0xE6060030, 32, 1) {
+ { PINMUX_CFG_REG("GPSR11", 0xE6060030, 32, 1, GROUP(
0, 0,
0, 0,
GP_11_29_FN, FN_AVS2,
@@ -2394,13 +2394,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_11_3_FN, FN_IP7_6,
GP_11_2_FN, FN_IP7_5_4,
GP_11_1_FN, FN_IP7_3_2,
- GP_11_0_FN, FN_IP7_1_0 }
+ GP_11_0_FN, FN_IP7_1_0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32,
- 4, 4,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* IP0_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP0_27_24 [4] */
@@ -2452,13 +2453,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP0_1 [1] */
FN_DU0_DR1_DATA1, 0,
/* IP0_0 [1] */
- FN_DU0_DR0_DATA0, 0 }
+ FN_DU0_DR0_DATA0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32,
- 4, 4,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* IP1_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP1_27_24 [4] */
@@ -2510,13 +2512,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP1_1 [1] */
FN_DU0_EXVSYNC_DU0_VSYNC, 0,
/* IP1_0 [1] */
- FN_DU0_EXHSYNC_DU0_HSYNC, 0 }
+ FN_DU0_EXHSYNC_DU0_HSYNC, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32,
- 4, 4,
- 4, 3, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(4, 4,
+ 4, 3, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* IP2_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP2_27_24 [4] */
@@ -2558,13 +2561,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP2_1 [1] */
FN_VI2_CLKENB, FN_AVB_RX_DV,
/* IP2_0 [1] */
- FN_VI2_CLK, FN_AVB_RX_CLK }
+ FN_VI2_CLK, FN_AVB_RX_CLK ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32,
- 4, 4,
- 4, 4,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(4, 4,
+ 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* IP3_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP3_27_24 [4] */
@@ -2604,12 +2608,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_1 [1] */
FN_VI3_CLKENB, FN_AVB_TXD4,
/* IP3_0 [1] */
- FN_VI3_CLK, FN_AVB_TX_CLK }
+ FN_VI3_CLK, FN_AVB_TX_CLK ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32,
- 4, 3, 1,
- 1, 1, 1, 2, 2, 2,
- 2, 2, 2, 2, 2, 1, 2, 1, 1) {
+ GROUP(4, 3, 1,
+ 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 2, 1, 1),
+ GROUP(
/* IP4_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP4_27_25 [3] */
@@ -2645,13 +2650,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP4_1 [1] */
FN_VI4_HSYNC_N, FN_VI0_D13_G5_Y5,
/* IP4_0 [1] */
- FN_VI4_CLKENB, FN_VI0_D12_G4_Y4 }
+ FN_VI4_CLKENB, FN_VI0_D12_G4_Y4 ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32,
- 4, 4,
- 4, 4,
- 4, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(4, 4,
+ 4, 4,
+ 4, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* IP5_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP5_27_24 [4] */
@@ -2685,13 +2691,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP5_1 [1] */
FN_VI5_HSYNC_N, FN_VI1_D13_G5_Y5_B,
/* IP5_0 [1] */
- FN_VI5_CLKENB, FN_VI1_D12_G4_Y4_B }
+ FN_VI5_CLKENB, FN_VI1_D12_G4_Y4_B ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32,
- 4, 4,
- 4, 1, 2, 1,
- 2, 2, 2, 2,
- 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(4, 4,
+ 4, 1, 2, 1,
+ 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* IP6_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP6_27_24 [4] */
@@ -2727,13 +2734,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP6_1 [1] */
FN_MSIOF0_SYNC, FN_HCTS0_N,
/* IP6_0 [1] */
- FN_MSIOF0_SCK, FN_HSCK0 }
+ FN_MSIOF0_SCK, FN_HSCK0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32,
- 4, 4,
- 3, 1, 1, 1, 1, 1,
- 2, 2, 2, 2,
- 1, 1, 2, 2, 2) {
+ GROUP(4, 4,
+ 3, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2,
+ 1, 1, 2, 2, 2),
+ GROUP(
/* IP7_31_28 [4] */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP7_27_24 [4] */
@@ -2767,7 +2775,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP7_3_2 [2] */
FN_PWM1, FN_TCLK2, FN_FSO_CFE_1, 0,
/* IP7_1_0 [2] */
- FN_PWM0, FN_TCLK1, FN_FSO_CFE_0, 0 }
+ FN_PWM0, FN_TCLK1, FN_FSO_CFE_0, 0 ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index 958a5f714c93..1ff4969d8381 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -7,6 +7,7 @@
* Copyright (C) 2015-2017 Cogent Embedded, Inc. <source@cogentembedded.com>
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
@@ -4617,7 +4618,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1, GROUP(
GP_0_31_FN, FN_IP2_17_16,
GP_0_30_FN, FN_IP2_15_14,
GP_0_29_FN, FN_IP2_13_12,
@@ -4649,9 +4650,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, FN_IP0_27_26,
GP_0_2_FN, FN_IP0_25,
GP_0_1_FN, FN_IP0_24,
- GP_0_0_FN, FN_IP0_23_22, }
+ GP_0_0_FN, FN_IP0_23_22, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4683,9 +4684,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, FN_IP2_29_27,
GP_1_2_FN, FN_IP2_26_24,
GP_1_1_FN, FN_IP2_23_21,
- GP_1_0_FN, FN_IP2_20_18, }
+ GP_1_0_FN, FN_IP2_20_18, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1, GROUP(
GP_2_31_FN, FN_IP6_7_6,
GP_2_30_FN, FN_IP6_5_4,
GP_2_29_FN, FN_IP6_3_2,
@@ -4717,9 +4718,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, FN_IP4_11_10,
GP_2_2_FN, FN_IP4_9_8,
GP_2_1_FN, FN_IP4_7_5,
- GP_2_0_FN, FN_IP4_4_2 }
+ GP_2_0_FN, FN_IP4_4_2 ))
},
- { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1, GROUP(
GP_3_31_FN, FN_IP8_22_20,
GP_3_30_FN, FN_IP8_19_17,
GP_3_29_FN, FN_IP8_16_15,
@@ -4751,9 +4752,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, FN_IP6_11,
GP_3_2_FN, FN_IP6_10,
GP_3_1_FN, FN_IP6_9,
- GP_3_0_FN, FN_IP6_8 }
+ GP_3_0_FN, FN_IP6_8 ))
},
- { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1, GROUP(
GP_4_31_FN, FN_IP11_17_16,
GP_4_30_FN, FN_IP11_15_14,
GP_4_29_FN, FN_IP11_13_11,
@@ -4785,9 +4786,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, FN_IP9_2_0,
GP_4_2_FN, FN_IP8_31_29,
GP_4_1_FN, FN_IP8_28_26,
- GP_4_0_FN, FN_IP8_25_23 }
+ GP_4_0_FN, FN_IP8_25_23 ))
},
- { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4819,9 +4820,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, FN_IP11_29_27,
GP_5_2_FN, FN_IP11_26_24,
GP_5_1_FN, FN_IP11_23_21,
- GP_5_0_FN, FN_IP11_20_18 }
+ GP_5_0_FN, FN_IP11_20_18 ))
},
- { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4853,11 +4854,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, FN_SD0_DATA1,
GP_6_2_FN, FN_SD0_DATA0,
GP_6_1_FN, FN_SD0_CMD,
- GP_6_0_FN, FN_SD0_CLK }
+ GP_6_0_FN, FN_SD0_CLK ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32,
- 2, 2, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(2, 2, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* IP0_31_30 [2] */
FN_D5, FN_SCIF4_RXD_B, FN_I2C0_SCL_D, 0,
/* IP0_29_28 [2] */
@@ -4907,11 +4909,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP0_1 [1] */
0, 0,
/* IP0_0 [1] */
- FN_SD1_CD, FN_CAN0_RX, }
+ FN_SD1_CD, FN_CAN0_RX, ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xE6060024, 32,
- 2, 2, 1, 1, 1, 1, 2, 2, 2, 3, 2, 2, 3, 2, 2,
- 2, 2) {
+ GROUP(2, 2, 1, 1, 1, 1, 2, 2, 2, 3, 2, 2,
+ 3, 2, 2, 2, 2),
+ GROUP(
/* IP1_31_30 [2] */
FN_A6, FN_SCIFB0_CTS_N, FN_SCIFA4_RXD_B, FN_TPUTO2_C,
/* IP1_29_28 [2] */
@@ -4947,10 +4950,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP1_3_2 [2] */
FN_D7, FN_IRQ3, FN_TCLK1, FN_PWM6_B,
/* IP1_1_0 [2] */
- FN_D6, FN_SCIF4_TXD_B, FN_I2C0_SDA_D, 0, }
+ FN_D6, FN_SCIF4_TXD_B, FN_I2C0_SDA_D, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xE6060028, 32,
- 2, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ GROUP(2, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2),
+ GROUP(
/* IP2_31_30 [2] */
FN_A20, FN_SPCLK, 0, 0,
/* IP2_29_27 [3] */
@@ -4982,10 +4986,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP2_3_2 [2] */
FN_A8, FN_MSIOF1_RXD, FN_SCIFA0_RXD_B, 0,
/* IP2_1_0 [2] */
- FN_A7, FN_SCIFB0_RTS_N, FN_SCIFA4_TXD_B, 0, }
+ FN_A7, FN_SCIFB0_RTS_N, FN_SCIFA4_TXD_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xE606002C, 32,
- 1, 1, 3, 3, 3, 3, 3, 2, 1, 1, 1, 2, 2, 2, 2, 2) {
+ GROUP(1, 1, 3, 3, 3, 3, 3, 2, 1, 1, 1, 2,
+ 2, 2, 2, 2),
+ GROUP(
/* IP3_31 [1] */
FN_RD_WR_N, FN_ATAG1_N,
/* IP3_30 [1] */
@@ -5022,10 +5028,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_3_2 [2] */
FN_A22, FN_MISO_IO1, 0, FN_ATADIR1_N,
/* IP3_1_0 [2] */
- FN_A21, FN_MOSI_IO0, 0, 0, }
+ FN_A21, FN_MOSI_IO0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xE6060030, 32,
- 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2) {
+ GROUP(2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2),
+ GROUP(
/* IP4_31_30 [2] */
FN_DU0_DG4, FN_LCDOUT12, 0, 0,
/* IP4_29_28 [2] */
@@ -5057,10 +5064,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_DR0, FN_LCDOUT16, FN_SCIF5_RXD_C, FN_I2C2_SCL_D,
0, 0, 0, 0,
/* IP4_1_0 [2] */
- FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK, 0, }
+ FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xE6060034, 32,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2) {
+ GROUP(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
+ 2, 2, 2),
+ GROUP(
/* IP5_31_30 [2] */
FN_DU0_EXHSYNC_DU0_HSYNC, FN_QSTH_QHS, 0, 0,
/* IP5_29_28 [2] */
@@ -5092,11 +5101,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP5_3_2 [2] */
FN_DU0_DG6, FN_LCDOUT14, 0, 0,
/* IP5_1_0 [2] */
- FN_DU0_DG5, FN_LCDOUT13, 0, 0, }
+ FN_DU0_DG5, FN_LCDOUT13, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xE6060038, 32,
- 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
- 2, 2) {
+ GROUP(3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2),
+ GROUP(
/* IP6_31_29 [3] */
FN_ETH_MDIO, FN_VI0_G0, FN_MSIOF2_RXD_B, FN_I2C5_SCL_D,
FN_AVB_TX_CLK, FN_ADIDATA, 0, 0,
@@ -5138,10 +5148,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, 0,
0,
/* IP6_1_0 [2] */
- FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE, 0, 0, }
+ FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
- 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP7_31 [1] */
FN_DREQ0_N, FN_SCIFB1_RXD,
/* IP7_30 [1] */
@@ -5175,10 +5186,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_AVB_TXD1, FN_ADICLK, 0, 0,
/* IP7_2_0 [3] */
FN_ETH_CRS_DV, FN_VI0_G1, FN_MSIOF2_TXD_B, FN_I2C5_SDA_D,
- FN_AVB_TXD0, FN_ADICS_SAMP, 0, 0, }
+ FN_AVB_TXD0, FN_ADICS_SAMP, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32,
- 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3) {
+ GROUP(3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3),
+ GROUP(
/* IP8_31_29 [3] */
FN_MSIOF0_RXD, FN_SCIF5_RXD, FN_I2C2_SCL_C, FN_DU1_DR2,
0, FN_TS_SDEN_D, FN_FMCLK_C, 0,
@@ -5210,10 +5222,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_AVB_MDIO, FN_SSI_SCK78_B, 0, 0,
/* IP8_2_0 [3] */
FN_ETH_MDC, FN_VI0_R3, FN_SCIF3_TXD_B, FN_I2C4_SDA_E,
- FN_AVB_MDC, FN_SSI_SDATA6_B, 0, 0, }
+ FN_AVB_MDC, FN_SSI_SDATA6_B, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32,
- 1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3) {
+ GROUP(1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3),
+ GROUP(
/* IP9_31 [1] */
0, 0,
/* IP9_30_28 [3] */
@@ -5246,10 +5259,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, FN_TPUTO1_C, 0, 0,
/* IP9_2_0 [3] */
FN_MSIOF0_TXD, FN_SCIF5_TXD, FN_I2C2_SDA_C, FN_DU1_DR3,
- 0, FN_TS_SPSYNC_D, FN_FMIN_C, 0, }
+ 0, FN_TS_SPSYNC_D, FN_FMIN_C, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xE6060048, 32,
- 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP10_31_30 [2] */
FN_SSI_SCK5, FN_SCIFA3_SCK, FN_DU1_DOTCLKIN, 0,
/* IP10_29_27 [3] */
@@ -5281,10 +5295,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
/* IP10_2_0 [3] */
FN_SCIF1_RXD, FN_I2C5_SCL, FN_DU1_DG6, FN_SSI_SCK2_B,
- 0, 0, 0, 0, }
+ 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xE606004C, 32,
- 2, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3) {
+ GROUP(2, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3),
+ GROUP(
/* IP11_31_30 [2] */
0, 0, 0, 0,
/* IP11_29_27 [3] */
@@ -5316,10 +5331,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
/* IP11_2_0 [3] */
FN_SSI_WS5, FN_SCIFA3_RXD, FN_I2C3_SCL_C, FN_DU1_DOTCLKOUT0,
- 0, 0, 0, 0, }
+ 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32,
- 2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3) {
+ GROUP(2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3),
+ GROUP(
/* IP12_31_30 [2] */
0, 0, 0, 0,
/* IP12_29_27 [3] */
@@ -5351,10 +5367,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_CAN1_RX_C, FN_DACK1_B, 0, 0,
/* IP12_2_0 [3] */
FN_SSI_SCK34, FN_MSIOF1_SYNC_B, FN_SCIFA1_SCK_C, FN_ADICHS0_B,
- 0, FN_DREQ1_N_B, 0, 0, }
+ 0, FN_DREQ1_N_B, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32,
- 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP13_31 [1] */
0, 0,
/* IP13_30 [1] */
@@ -5391,11 +5408,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI1_DATA4, 0, FN_ATACS10_N, FN_ETH_REFCLK_B, 0,
/* IP13_2_0 [3] */
FN_SSI_WS2, FN_HSCIF1_HCTS_N_B, FN_SCIFA0_RXD_D, FN_VI1_DATA3,
- 0, FN_ATACS00_N, FN_ETH_LINK_B, 0, }
+ 0, FN_ATACS00_N, FN_ETH_LINK_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32,
- 2, 1, 2, 3, 4, 1, 1, 3, 3, 3, 3, 3,
- 2, 1) {
+ GROUP(2, 1, 2, 3, 4, 1, 1, 3, 3, 3, 3, 3, 2, 1),
+ GROUP(
/* SEL_ADG [2] */
FN_SEL_ADG_0, FN_SEL_ADG_1, FN_SEL_ADG_2, FN_SEL_ADG_3,
/* RESERVED [1] */
@@ -5429,11 +5446,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_I2C05 [2] */
FN_SEL_I2C05_0, FN_SEL_I2C05_1, FN_SEL_I2C05_2, FN_SEL_I2C05_3,
/* RESERVED [1] */
- 0, 0, }
+ 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32,
- 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1,
- 2, 2, 2, 1, 1, 2) {
+ GROUP(2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1,
+ 2, 2, 1, 1, 2, 2, 2, 1, 1, 2),
+ GROUP(
/* SEL_IEB [2] */
FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
/* SEL_IIC0 [2] */
@@ -5480,11 +5498,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_HSCIF1 [1] */
FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1,
/* RESERVED [2] */
- 0, 0, 0, 0, }
+ 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32,
- 2, 2, 2, 1, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(2, 2, 2, 1, 3, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* SEL_SCIF0 [2] */
FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
/* SEL_SCIF1 [2] */
@@ -5537,7 +5556,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* RESERVED [1] */
0, 0,
/* RESERVED [1] */
- 0, 0, }
+ 0, 0, ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 287cfbb7e992..f16dfbad3f17 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -5,6 +5,7 @@
* Copyright (C) 2015-2017 Renesas Electronics Corporation
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include "core.h"
@@ -153,11 +154,11 @@
#define GPSR5_11 F_(RX2_A, IP12_7_4)
#define GPSR5_10 F_(TX2_A, IP12_3_0)
#define GPSR5_9 F_(SCK2, IP11_31_28)
-#define GPSR5_8 F_(RTS1_N_TANS, IP11_27_24)
+#define GPSR5_8 F_(RTS1_N, IP11_27_24)
#define GPSR5_7 F_(CTS1_N, IP11_23_20)
#define GPSR5_6 F_(TX1_A, IP11_19_16)
#define GPSR5_5 F_(RX1_A, IP11_15_12)
-#define GPSR5_4 F_(RTS0_N_TANS, IP11_11_8)
+#define GPSR5_4 F_(RTS0_N, IP11_11_8)
#define GPSR5_3 F_(CTS0_N, IP11_7_4)
#define GPSR5_2 F_(TX0, IP11_3_0)
#define GPSR5_1 F_(RX0, IP10_31_28)
@@ -198,8 +199,8 @@
#define GPSR6_0 F_(SSI_SCK01239, IP13_23_20)
/* GPSR7 */
-#define GPSR7_3 FM(HDMI1_CEC)
-#define GPSR7_2 FM(HDMI0_CEC)
+#define GPSR7_3 FM(GP7_03)
+#define GPSR7_2 FM(GP7_02)
#define GPSR7_1 FM(AVS2)
#define GPSR7_0 FM(AVS1)
@@ -210,7 +211,7 @@
#define IP0_11_8 FM(AVB_PHY_INT) F_(0, 0) FM(MSIOF2_SYNC_C) FM(RX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_15_12 FM(AVB_LINK) F_(0, 0) FM(MSIOF2_SCK_C) FM(TX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_19_16 FM(AVB_AVTP_MATCH_A) F_(0, 0) FM(MSIOF2_RXD_C) FM(CTS4_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_23_20 FM(AVB_AVTP_CAPTURE_A) F_(0, 0) FM(MSIOF2_TXD_C) FM(RTS4_N_TANS_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_23_20 FM(AVB_AVTP_CAPTURE_A) F_(0, 0) FM(MSIOF2_TXD_C) FM(RTS4_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_27_24 FM(IRQ0) FM(QPOLB) F_(0, 0) FM(DU_CDE) FM(VI4_DATA0_B) FM(CAN0_TX_B) FM(CANFD0_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_31_28 FM(IRQ1) FM(QPOLA) F_(0, 0) FM(DU_DISP) FM(VI4_DATA1_B) FM(CAN0_RX_B) FM(CANFD0_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1_3_0 FM(IRQ2) FM(QCPV_QDE) F_(0, 0) FM(DU_EXODDF_DU_ODDF_DISP_CDE) FM(VI4_DATA2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -232,7 +233,7 @@
#define IP2_27_24 FM(A7) FM(LCDOUT23) FM(MSIOF2_SS2_A) FM(TX4_B) FM(VI4_DATA15) FM(VI5_DATA15) FM(DU_DB7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_31_28 FM(A8) FM(RX3_B) FM(MSIOF2_SYNC_A) FM(HRX4_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(SDA6_A) FM(AVB_AVTP_MATCH_B) FM(PWM1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_3_0 FM(A9) F_(0, 0) FM(MSIOF2_SCK_A) FM(CTS4_N_B) F_(0, 0) FM(VI5_VSYNC_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_7_4 FM(A10) F_(0, 0) FM(MSIOF2_RXD_A) FM(RTS4_N_TANS_B) F_(0, 0) FM(VI5_HSYNC_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_7_4 FM(A10) F_(0, 0) FM(MSIOF2_RXD_A) FM(RTS4_N_B) F_(0, 0) FM(VI5_HSYNC_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_11_8 FM(A11) FM(TX3_B) FM(MSIOF2_TXD_A) FM(HTX4_B) FM(HSCK4) FM(VI5_FIELD) F_(0, 0) FM(SCL6_A) FM(AVB_AVTP_CAPTURE_B) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_15_12 FM(A12) FM(LCDOUT12) FM(MSIOF3_SCK_C) F_(0, 0) FM(HRX4_A) FM(VI5_DATA8) FM(DU_DG4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_19_16 FM(A13) FM(LCDOUT13) FM(MSIOF3_SYNC_C) F_(0, 0) FM(HTX4_A) FM(VI5_DATA9) FM(DU_DG5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -248,7 +249,7 @@
#define IP4_27_24 FM(RD_N) F_(0, 0) FM(MSIOF3_SYNC_D) FM(RX3_A) FM(HRX3_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(CAN0_TX_A) FM(CANFD0_TX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_31_28 FM(RD_WR_N) F_(0, 0) FM(MSIOF3_RXD_D) FM(TX3_A) FM(HTX3_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(CAN0_RX_A) FM(CANFD0_RX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP5_3_0 FM(WE0_N) F_(0, 0) FM(MSIOF3_TXD_D) FM(CTS3_N) FM(HCTS3_N) F_(0, 0) F_(0, 0) FM(SCL6_B) FM(CAN_CLK) F_(0, 0) FM(IECLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_7_4 FM(WE1_N) F_(0, 0) FM(MSIOF3_SS1_D) FM(RTS3_N_TANS) FM(HRTS3_N) F_(0, 0) F_(0, 0) FM(SDA6_B) FM(CAN1_RX) FM(CANFD1_RX) FM(IERX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_7_4 FM(WE1_N) F_(0, 0) FM(MSIOF3_SS1_D) FM(RTS3_N) FM(HRTS3_N) F_(0, 0) F_(0, 0) FM(SDA6_B) FM(CAN1_RX) FM(CANFD1_RX) FM(IERX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP5_11_8 FM(EX_WAIT0_A) FM(QCLK) F_(0, 0) F_(0, 0) FM(VI4_CLK) F_(0, 0) FM(DU_DOTCLKOUT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP5_15_12 FM(D0) FM(MSIOF2_SS1_B)FM(MSIOF3_SCK_A) F_(0, 0) FM(VI4_DATA16) FM(VI5_DATA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP5_19_16 FM(D1) FM(MSIOF2_SS2_B)FM(MSIOF3_SYNC_A) F_(0, 0) FM(VI4_DATA17) FM(VI5_DATA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -261,7 +262,7 @@
#define IP6_15_12 FM(D8) FM(LCDOUT0) FM(MSIOF2_SCK_D) FM(SCK4_C) FM(VI4_DATA0_A) F_(0, 0) FM(DU_DR0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP6_19_16 FM(D9) FM(LCDOUT1) FM(MSIOF2_SYNC_D) F_(0, 0) FM(VI4_DATA1_A) F_(0, 0) FM(DU_DR1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP6_23_20 FM(D10) FM(LCDOUT2) FM(MSIOF2_RXD_D) FM(HRX3_B) FM(VI4_DATA2_A) FM(CTS4_N_C) FM(DU_DR2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_27_24 FM(D11) FM(LCDOUT3) FM(MSIOF2_TXD_D) FM(HTX3_B) FM(VI4_DATA3_A) FM(RTS4_N_TANS_C)FM(DU_DR3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_27_24 FM(D11) FM(LCDOUT3) FM(MSIOF2_TXD_D) FM(HTX3_B) FM(VI4_DATA3_A) FM(RTS4_N_C) FM(DU_DR3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP6_31_28 FM(D12) FM(LCDOUT4) FM(MSIOF2_SS1_D) FM(RX4_C) FM(VI4_DATA4_A) F_(0, 0) FM(DU_DR4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_3_0 FM(D13) FM(LCDOUT5) FM(MSIOF2_SS2_D) FM(TX4_C) FM(VI4_DATA5_A) F_(0, 0) FM(DU_DR5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_7_4 FM(D14) FM(LCDOUT6) FM(MSIOF3_SS1_A) FM(HRX3_C) FM(VI4_DATA6_A) F_(0, 0) FM(DU_DR6) FM(SCL6_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -299,11 +300,11 @@
#define IP10_31_28 FM(RX0) FM(HRX1_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SCK0_C) FM(STP_ISCLK_0_C) FM(RIF0_D0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_3_0 FM(TX0) FM(HTX1_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SPSYNC0_C)FM(STP_ISSYNC_0_C) FM(RIF0_D1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_7_4 FM(CTS0_N) FM(HCTS1_N_B) FM(MSIOF1_SYNC_B) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_C)FM(STP_ISSYNC_1_C) FM(RIF1_SYNC_B) FM(AUDIO_CLKOUT_C) FM(ADICS_SAMP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_11_8 FM(RTS0_N_TANS) FM(HRTS1_N_B) FM(MSIOF1_SS1_B) FM(AUDIO_CLKA_B) FM(SCL2_A) F_(0, 0) FM(STP_IVCXO27_1_C) FM(RIF0_SYNC_B) F_(0, 0) FM(ADICHS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_11_8 FM(RTS0_N) FM(HRTS1_N_B) FM(MSIOF1_SS1_B) FM(AUDIO_CLKA_B) FM(SCL2_A) F_(0, 0) FM(STP_IVCXO27_1_C) FM(RIF0_SYNC_B) F_(0, 0) FM(ADICHS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_15_12 FM(RX1_A) FM(HRX1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDAT0_C) FM(STP_ISD_0_C) FM(RIF1_CLK_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_19_16 FM(TX1_A) FM(HTX1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDEN0_C) FM(STP_ISEN_0_C) FM(RIF1_D0_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_23_20 FM(CTS1_N) FM(HCTS1_N_A) FM(MSIOF1_RXD_B) F_(0, 0) F_(0, 0) FM(TS_SDEN1_C) FM(STP_ISEN_1_C) FM(RIF1_D0_B) F_(0, 0) FM(ADIDATA) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_27_24 FM(RTS1_N_TANS) FM(HRTS1_N_A) FM(MSIOF1_TXD_B) F_(0, 0) F_(0, 0) FM(TS_SDAT1_C) FM(STP_ISD_1_C) FM(RIF1_D1_B) F_(0, 0) FM(ADICHS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24 FM(RTS1_N) FM(HRTS1_N_A) FM(MSIOF1_TXD_B) F_(0, 0) F_(0, 0) FM(TS_SDAT1_C) FM(STP_ISD_1_C) FM(RIF1_D1_B) F_(0, 0) FM(ADICHS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_31_28 FM(SCK2) FM(SCIF_CLK_B) FM(MSIOF1_SCK_B) F_(0, 0) F_(0, 0) FM(TS_SCK1_C) FM(STP_ISCLK_1_C) FM(RIF1_CLK_B) F_(0, 0) FM(ADICLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP12_3_0 FM(TX2_A) F_(0, 0) F_(0, 0) FM(SD2_CD_B) FM(SCL1_A) F_(0, 0) FM(FMCLK_A) FM(RIF1_D1_C) F_(0, 0) FM(FSO_CFE_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP12_7_4 FM(RX2_A) F_(0, 0) F_(0, 0) FM(SD2_WP_B) FM(SDA1_A) F_(0, 0) FM(FMIN_A) FM(RIF1_SYNC_C) F_(0, 0) FM(FSO_CFE_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -339,7 +340,7 @@
#define IP15_23_20 FM(SSI_SDATA7) FM(HCTS2_N_B) FM(MSIOF1_RXD_C) F_(0, 0) F_(0, 0) FM(TS_SDEN1_A) FM(STP_ISEN_1_A) FM(RIF1_D0_A) FM(RIF3_D0_A) F_(0, 0) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_27_24 FM(SSI_SDATA8) FM(HRTS2_N_B) FM(MSIOF1_TXD_C) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A) FM(RIF1_D1_A) FM(RIF3_D1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_31_28 FM(SSI_SDATA9_A) FM(HSCK2_B) FM(MSIOF1_SS1_C) FM(HSCK1_A) FM(SSI_WS1_B) FM(SCK1) FM(STP_IVCXO27_1_A) FM(SCK5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP16_7_4 FM(AUDIO_CLKB_B) FM(SCIF_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_1_D) FM(REMOCON_A) F_(0, 0) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP16_11_8 FM(USB0_PWEN) F_(0, 0) F_(0, 0) FM(SIM0_RST_C) F_(0, 0) FM(TS_SCK1_D) FM(STP_ISCLK_1_D) FM(BPFCLK_B) FM(RIF3_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP16_15_12 FM(USB0_OVC) F_(0, 0) F_(0, 0) FM(SIM0_D_C) F_(0, 0) FM(TS_SDAT1_D) FM(STP_ISD_1_D) F_(0, 0) FM(RIF3_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -576,8 +577,8 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(AVS1),
PINMUX_SINGLE(AVS2),
- PINMUX_SINGLE(HDMI0_CEC),
- PINMUX_SINGLE(HDMI1_CEC),
+ PINMUX_SINGLE(GP7_02),
+ PINMUX_SINGLE(GP7_03),
PINMUX_SINGLE(MSIOF0_RXD),
PINMUX_SINGLE(MSIOF0_SCK),
PINMUX_SINGLE(MSIOF0_TXD),
@@ -616,7 +617,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP0_23_20, AVB_AVTP_CAPTURE_A, I2C_SEL_5_0, SEL_ETHERAVB_0),
PINMUX_IPSR_PHYS_MSEL(IP0_23_20, MSIOF2_TXD_C, I2C_SEL_5_0, SEL_MSIOF2_2),
- PINMUX_IPSR_PHYS_MSEL(IP0_23_20, RTS4_N_TANS_A, I2C_SEL_5_0, SEL_SCIF4_0),
+ PINMUX_IPSR_PHYS_MSEL(IP0_23_20, RTS4_N_A, I2C_SEL_5_0, SEL_SCIF4_0),
PINMUX_IPSR_PHYS(IP0_23_20, SDA5, I2C_SEL_5_1),
PINMUX_IPSR_GPSR(IP0_27_24, IRQ0),
@@ -756,7 +757,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP3_7_4, A10),
PINMUX_IPSR_MSEL(IP3_7_4, MSIOF2_RXD_A, SEL_MSIOF2_0),
- PINMUX_IPSR_MSEL(IP3_7_4, RTS4_N_TANS_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MSEL(IP3_7_4, RTS4_N_B, SEL_SCIF4_1),
PINMUX_IPSR_GPSR(IP3_7_4, VI5_HSYNC_N),
PINMUX_IPSR_GPSR(IP3_11_8, A11),
@@ -859,7 +860,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP5_7_4, WE1_N),
PINMUX_IPSR_MSEL(IP5_7_4, MSIOF3_SS1_D, SEL_MSIOF3_3),
- PINMUX_IPSR_GPSR(IP5_7_4, RTS3_N_TANS),
+ PINMUX_IPSR_GPSR(IP5_7_4, RTS3_N),
PINMUX_IPSR_GPSR(IP5_7_4, HRTS3_N),
PINMUX_IPSR_MSEL(IP5_7_4, SDA6_B, SEL_I2C6_1),
PINMUX_IPSR_GPSR(IP5_7_4, CAN1_RX),
@@ -940,7 +941,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP6_27_24, MSIOF2_TXD_D, SEL_MSIOF2_3),
PINMUX_IPSR_MSEL(IP6_27_24, HTX3_B, SEL_HSCIF3_1),
PINMUX_IPSR_MSEL(IP6_27_24, VI4_DATA3_A, SEL_VIN4_0),
- PINMUX_IPSR_MSEL(IP6_27_24, RTS4_N_TANS_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MSEL(IP6_27_24, RTS4_N_C, SEL_SCIF4_2),
PINMUX_IPSR_GPSR(IP6_27_24, DU_DR3),
PINMUX_IPSR_GPSR(IP6_31_28, D12),
@@ -1112,7 +1113,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP11_7_4, AUDIO_CLKOUT_C, SEL_ADG_2),
PINMUX_IPSR_GPSR(IP11_7_4, ADICS_SAMP),
- PINMUX_IPSR_GPSR(IP11_11_8, RTS0_N_TANS),
+ PINMUX_IPSR_GPSR(IP11_11_8, RTS0_N),
PINMUX_IPSR_MSEL(IP11_11_8, HRTS1_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP11_11_8, MSIOF1_SS1_B, SEL_MSIOF1_1),
PINMUX_IPSR_MSEL(IP11_11_8, AUDIO_CLKA_B, SEL_ADG_1),
@@ -1141,7 +1142,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP11_23_20, RIF1_D0_B, SEL_DRIF1_1),
PINMUX_IPSR_GPSR(IP11_23_20, ADIDATA),
- PINMUX_IPSR_GPSR(IP11_27_24, RTS1_N_TANS),
+ PINMUX_IPSR_GPSR(IP11_27_24, RTS1_N),
PINMUX_IPSR_MSEL(IP11_27_24, HRTS1_N_A, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP11_27_24, MSIOF1_TXD_B, SEL_MSIOF1_1),
PINMUX_IPSR_MSEL(IP11_27_24, TS_SDAT1_C, SEL_TSIF1_2),
@@ -1358,7 +1359,6 @@ static const u16 pinmux_data[] = {
/* IPSR16 */
PINMUX_IPSR_MSEL(IP16_3_0, AUDIO_CLKA_A, SEL_ADG_0),
- PINMUX_IPSR_GPSR(IP16_3_0, CC5_OSCOUT),
PINMUX_IPSR_MSEL(IP16_7_4, AUDIO_CLKB_B, SEL_ADG_1),
PINMUX_IPSR_MSEL(IP16_7_4, SCIF_CLK_A, SEL_SCIF1_0),
@@ -2071,22 +2071,6 @@ static const unsigned int du_disp_pins[] = {
static const unsigned int du_disp_mux[] = {
DU_DISP_MARK,
};
-/* - HDMI ------------------------------------------------------------------- */
-static const unsigned int hdmi0_cec_pins[] = {
- /* HDMI0_CEC */
- RCAR_GP_PIN(7, 2),
-};
-static const unsigned int hdmi0_cec_mux[] = {
- HDMI0_CEC_MARK,
-};
-static const unsigned int hdmi1_cec_pins[] = {
- /* HDMI1_CEC */
- RCAR_GP_PIN(7, 3),
-};
-static const unsigned int hdmi1_cec_mux[] = {
- HDMI1_CEC_MARK,
-};
-
/* - HSCIF0 ----------------------------------------------------------------- */
static const unsigned int hscif0_data_pins[] = {
/* RX, TX */
@@ -3235,7 +3219,7 @@ static const unsigned int scif0_ctrl_pins[] = {
RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
};
static const unsigned int scif0_ctrl_mux[] = {
- RTS0_N_TANS_MARK, CTS0_N_MARK,
+ RTS0_N_MARK, CTS0_N_MARK,
};
/* - SCIF1 ------------------------------------------------------------------ */
static const unsigned int scif1_data_a_pins[] = {
@@ -3257,7 +3241,7 @@ static const unsigned int scif1_ctrl_pins[] = {
RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
};
static const unsigned int scif1_ctrl_mux[] = {
- RTS1_N_TANS_MARK, CTS1_N_MARK,
+ RTS1_N_MARK, CTS1_N_MARK,
};
static const unsigned int scif1_data_b_pins[] = {
@@ -3309,7 +3293,7 @@ static const unsigned int scif3_ctrl_pins[] = {
RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
};
static const unsigned int scif3_ctrl_mux[] = {
- RTS3_N_TANS_MARK, CTS3_N_MARK,
+ RTS3_N_MARK, CTS3_N_MARK,
};
static const unsigned int scif3_data_b_pins[] = {
/* RX, TX */
@@ -3338,7 +3322,7 @@ static const unsigned int scif4_ctrl_a_pins[] = {
RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
};
static const unsigned int scif4_ctrl_a_mux[] = {
- RTS4_N_TANS_A_MARK, CTS4_N_A_MARK,
+ RTS4_N_A_MARK, CTS4_N_A_MARK,
};
static const unsigned int scif4_data_b_pins[] = {
/* RX, TX */
@@ -3359,7 +3343,7 @@ static const unsigned int scif4_ctrl_b_pins[] = {
RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
};
static const unsigned int scif4_ctrl_b_mux[] = {
- RTS4_N_TANS_B_MARK, CTS4_N_B_MARK,
+ RTS4_N_B_MARK, CTS4_N_B_MARK,
};
static const unsigned int scif4_data_c_pins[] = {
/* RX, TX */
@@ -3380,7 +3364,7 @@ static const unsigned int scif4_ctrl_c_pins[] = {
RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
};
static const unsigned int scif4_ctrl_c_mux[] = {
- RTS4_N_TANS_C_MARK, CTS4_N_C_MARK,
+ RTS4_N_C_MARK, CTS4_N_C_MARK,
};
/* - SCIF5 ------------------------------------------------------------------ */
static const unsigned int scif5_data_pins[] = {
@@ -3944,8 +3928,6 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(du_oddf),
SH_PFC_PIN_GROUP(du_cde),
SH_PFC_PIN_GROUP(du_disp),
- SH_PFC_PIN_GROUP(hdmi0_cec),
- SH_PFC_PIN_GROUP(hdmi1_cec),
SH_PFC_PIN_GROUP(hscif0_data),
SH_PFC_PIN_GROUP(hscif0_clk),
SH_PFC_PIN_GROUP(hscif0_ctrl),
@@ -4299,14 +4281,6 @@ static const char * const du_groups[] = {
"du_disp",
};
-static const char * const hdmi0_groups[] = {
- "hdmi0_cec",
-};
-
-static const char * const hdmi1_groups[] = {
- "hdmi1_cec",
-};
-
static const char * const hscif0_groups[] = {
"hscif0_data",
"hscif0_clk",
@@ -4694,8 +4668,6 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(drif2),
SH_PFC_FUNCTION(drif3),
SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(hdmi0),
- SH_PFC_FUNCTION(hdmi1),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
SH_PFC_FUNCTION(hscif2),
@@ -4745,7 +4717,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4777,9 +4749,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, GPSR0_3,
GP_0_2_FN, GPSR0_2,
GP_0_1_FN, GPSR0_1,
- GP_0_0_FN, GPSR0_0, }
+ GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4811,9 +4783,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, GPSR1_3,
GP_1_2_FN, GPSR1_2,
GP_1_1_FN, GPSR1_1,
- GP_1_0_FN, GPSR1_0, }
+ GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4845,9 +4817,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, GPSR2_3,
GP_2_2_FN, GPSR2_2,
GP_2_1_FN, GPSR2_1,
- GP_2_0_FN, GPSR2_0, }
+ GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4879,9 +4851,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, GPSR3_3,
GP_3_2_FN, GPSR3_2,
GP_3_1_FN, GPSR3_1,
- GP_3_0_FN, GPSR3_0, }
+ GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4913,9 +4885,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, GPSR4_3,
GP_4_2_FN, GPSR4_2,
GP_4_1_FN, GPSR4_1,
- GP_4_0_FN, GPSR4_0, }
+ GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4947,9 +4919,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, GPSR5_3,
GP_5_2_FN, GPSR5_2,
GP_5_1_FN, GPSR5_1,
- GP_5_0_FN, GPSR5_0, }
+ GP_5_0_FN, GPSR5_0, ))
},
- { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
GP_6_31_FN, GPSR6_31,
GP_6_30_FN, GPSR6_30,
GP_6_29_FN, GPSR6_29,
@@ -4981,9 +4953,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, GPSR6_3,
GP_6_2_FN, GPSR6_2,
GP_6_1_FN, GPSR6_1,
- GP_6_0_FN, GPSR6_0, }
+ GP_6_0_FN, GPSR6_0, ))
},
- { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5015,14 +4987,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_7_3_FN, GPSR7_3,
GP_7_2_FN, GPSR7_2,
GP_7_1_FN, GPSR7_1,
- GP_7_0_FN, GPSR7_0, }
+ GP_7_0_FN, GPSR7_0, ))
},
#undef F_
#undef FM
#define F_(x, y) x,
#define FM(x) FN_##x,
- { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
IP0_31_28
IP0_27_24
IP0_23_20
@@ -5030,9 +5002,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP0_15_12
IP0_11_8
IP0_7_4
- IP0_3_0 }
+ IP0_3_0 ))
},
- { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
IP1_31_28
IP1_27_24
IP1_23_20
@@ -5040,9 +5012,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1_15_12
IP1_11_8
IP1_7_4
- IP1_3_0 }
+ IP1_3_0 ))
},
- { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
IP2_31_28
IP2_27_24
IP2_23_20
@@ -5050,9 +5022,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2_15_12
IP2_11_8
IP2_7_4
- IP2_3_0 }
+ IP2_3_0 ))
},
- { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
IP3_31_28
IP3_27_24
IP3_23_20
@@ -5060,9 +5032,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP3_15_12
IP3_11_8
IP3_7_4
- IP3_3_0 }
+ IP3_3_0 ))
},
- { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
IP4_31_28
IP4_27_24
IP4_23_20
@@ -5070,9 +5042,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP4_15_12
IP4_11_8
IP4_7_4
- IP4_3_0 }
+ IP4_3_0 ))
},
- { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
IP5_31_28
IP5_27_24
IP5_23_20
@@ -5080,9 +5052,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP5_15_12
IP5_11_8
IP5_7_4
- IP5_3_0 }
+ IP5_3_0 ))
},
- { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
IP6_31_28
IP6_27_24
IP6_23_20
@@ -5090,9 +5062,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_15_12
IP6_11_8
IP6_7_4
- IP6_3_0 }
+ IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
@@ -5100,9 +5072,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP7_15_12
IP7_11_8
IP7_7_4
- IP7_3_0 }
+ IP7_3_0 ))
},
- { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
IP8_31_28
IP8_27_24
IP8_23_20
@@ -5110,9 +5082,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP8_15_12
IP8_11_8
IP8_7_4
- IP8_3_0 }
+ IP8_3_0 ))
},
- { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
IP9_31_28
IP9_27_24
IP9_23_20
@@ -5120,9 +5092,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP9_15_12
IP9_11_8
IP9_7_4
- IP9_3_0 }
+ IP9_3_0 ))
},
- { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
IP10_31_28
IP10_27_24
IP10_23_20
@@ -5130,9 +5102,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP10_15_12
IP10_11_8
IP10_7_4
- IP10_3_0 }
+ IP10_3_0 ))
},
- { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
IP11_31_28
IP11_27_24
IP11_23_20
@@ -5140,9 +5112,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP11_15_12
IP11_11_8
IP11_7_4
- IP11_3_0 }
+ IP11_3_0 ))
},
- { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+ { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
IP12_31_28
IP12_27_24
IP12_23_20
@@ -5150,9 +5122,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP12_15_12
IP12_11_8
IP12_7_4
- IP12_3_0 }
+ IP12_3_0 ))
},
- { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+ { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
IP13_31_28
IP13_27_24
IP13_23_20
@@ -5160,9 +5132,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP13_15_12
IP13_11_8
IP13_7_4
- IP13_3_0 }
+ IP13_3_0 ))
},
- { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
+ { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4, GROUP(
IP14_31_28
IP14_27_24
IP14_23_20
@@ -5170,9 +5142,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP14_15_12
IP14_11_8
IP14_7_4
- IP14_3_0 }
+ IP14_3_0 ))
},
- { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4, GROUP(
IP15_31_28
IP15_27_24
IP15_23_20
@@ -5180,9 +5152,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP15_15_12
IP15_11_8
IP15_7_4
- IP15_3_0 }
+ IP15_3_0 ))
},
- { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4) {
+ { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4, GROUP(
IP16_31_28
IP16_27_24
IP16_23_20
@@ -5190,9 +5162,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP16_15_12
IP16_11_8
IP16_7_4
- IP16_3_0 }
+ IP16_3_0 ))
},
- { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4) {
+ { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP(
/* IP17_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP17_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP17_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -5200,7 +5172,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP17_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP17_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IP17_7_4
- IP17_3_0 }
+ IP17_3_0 ))
},
#undef F_
#undef FM
@@ -5208,8 +5180,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- 1, 2, 2, 3, 1, 1, 2, 1, 1, 1,
- 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1) {
+ GROUP(1, 2, 2, 3, 1, 1, 2, 1, 1, 1, 2, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1),
+ GROUP(
0, 0, /* RESERVED 31 */
MOD_SEL0_30_29
MOD_SEL0_28_27
@@ -5232,11 +5205,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_5_4
MOD_SEL0_3
MOD_SEL0_2_1
- 0, 0, /* RESERVED 0 */ }
+ 0, 0, /* RESERVED 0 */ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- 2, 3, 1, 2, 3, 1, 1, 2, 1,
- 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
+ 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
MOD_SEL1_31_30
MOD_SEL1_29_28_27
MOD_SEL1_26
@@ -5259,11 +5233,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_3
MOD_SEL1_2
MOD_SEL1_1
- MOD_SEL1_0 }
+ MOD_SEL1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
- 1, 1, 1, 1, 4, 4, 4,
- 4, 4, 4, 1, 2, 1) {
+ GROUP(1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 1, 2, 1),
+ GROUP(
MOD_SEL2_31
MOD_SEL2_30
MOD_SEL2_29
@@ -5291,7 +5265,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
/* RESERVED 2, 1 */
0, 0, 0, 0,
- MOD_SEL2_0 }
+ MOD_SEL2_0 ))
},
{ },
};
@@ -5412,8 +5386,8 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */
{ RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */
{ RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */
- { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 3), 8, 3 }, /* HDMI1_CEC */
+ { RCAR_GP_PIN(7, 2), 12, 3 }, /* GP7_02 */
+ { RCAR_GP_PIN(7, 3), 8, 3 }, /* GP7_03 */
{ PIN_A_NUMBER('P', 7), 4, 2 }, /* DU_DOTCLKIN0 */
{ PIN_A_NUMBER('P', 8), 0, 2 }, /* DU_DOTCLKIN1 */
} },
@@ -5474,11 +5448,11 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(5, 3), 0, 3 }, /* CTS0 */
} },
{ PINMUX_DRIVE_REG("DRVCTRL18", 0xe6060348) {
- { RCAR_GP_PIN(5, 4), 28, 3 }, /* RTS0_TANS */
+ { RCAR_GP_PIN(5, 4), 28, 3 }, /* RTS0 */
{ RCAR_GP_PIN(5, 5), 24, 3 }, /* RX1 */
{ RCAR_GP_PIN(5, 6), 20, 3 }, /* TX1 */
{ RCAR_GP_PIN(5, 7), 16, 3 }, /* CTS1 */
- { RCAR_GP_PIN(5, 8), 12, 3 }, /* RTS1_TANS */
+ { RCAR_GP_PIN(5, 8), 12, 3 }, /* RTS1 */
{ RCAR_GP_PIN(5, 9), 8, 3 }, /* SCK2 */
{ RCAR_GP_PIN(5, 10), 4, 3 }, /* TX2 */
{ RCAR_GP_PIN(5, 11), 0, 3 }, /* RX2 */
@@ -5547,10 +5521,12 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
enum ioctrl_regs {
POCCTRL,
+ TDSELCTRL,
};
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
[POCCTRL] = { 0xe6060380, },
+ [TDSELCTRL] = { 0xe60603c0, },
{ /* sentinel */ },
};
@@ -5668,8 +5644,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[25] = RCAR_GP_PIN(0, 15), /* D15 */
[26] = RCAR_GP_PIN(7, 0), /* AVS1 */
[27] = RCAR_GP_PIN(7, 1), /* AVS2 */
- [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
- [29] = RCAR_GP_PIN(7, 3), /* HDMI1_CEC */
+ [28] = RCAR_GP_PIN(7, 2), /* GP7_02 */
+ [29] = RCAR_GP_PIN(7, 3), /* GP7_03 */
[30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
[31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
} },
@@ -5724,11 +5700,11 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[13] = RCAR_GP_PIN(5, 1), /* RX0 */
[14] = RCAR_GP_PIN(5, 2), /* TX0 */
[15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
- [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N */
[17] = RCAR_GP_PIN(5, 5), /* RX1_A */
[18] = RCAR_GP_PIN(5, 6), /* TX1_A */
[19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
- [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N */
[21] = RCAR_GP_PIN(5, 9), /* SCK2 */
[22] = RCAR_GP_PIN(5, 10), /* TX2_A */
[23] = RCAR_GP_PIN(5, 11), /* RX2_A */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index db9add1405c5..68bcb8980b16 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -2,9 +2,10 @@
/*
* R8A7795 ES2.0+ processor support - PFC hardware block.
*
- * Copyright (C) 2015-2017 Renesas Electronics Corporation
+ * Copyright (C) 2015-2019 Renesas Electronics Corporation
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
@@ -200,8 +201,8 @@
#define GPSR6_0 F_(SSI_SCK01239, IP14_23_20)
/* GPSR7 */
-#define GPSR7_3 FM(HDMI1_CEC)
-#define GPSR7_2 FM(HDMI0_CEC)
+#define GPSR7_3 FM(GP7_03)
+#define GPSR7_2 FM(GP7_02)
#define GPSR7_1 FM(AVS2)
#define GPSR7_0 FM(AVS1)
@@ -350,7 +351,7 @@
#define IP16_23_20 FM(SSI_SDATA7) FM(HCTS2_N_B) FM(MSIOF1_RXD_C) F_(0, 0) F_(0, 0) FM(TS_SDEN1_A) FM(STP_ISEN_1_A) FM(RIF1_D0_A) FM(RIF3_D0_A) F_(0, 0) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP16_27_24 FM(SSI_SDATA8) FM(HRTS2_N_B) FM(MSIOF1_TXD_C) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A) FM(RIF1_D1_A) FM(RIF3_D1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP16_31_28 FM(SSI_SDATA9_A) FM(HSCK2_B) FM(MSIOF1_SS1_C) FM(HSCK1_A) FM(SSI_WS1_B) FM(SCK1) FM(STP_IVCXO27_1_A) FM(SCK5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP17_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP17_7_4 FM(AUDIO_CLKB_B) FM(SCIF_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_1_D) FM(REMOCON_A) F_(0, 0) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP17_11_8 FM(USB0_PWEN) F_(0, 0) F_(0, 0) FM(SIM0_RST_C) F_(0, 0) FM(TS_SCK1_D) FM(STP_ISCLK_1_D) FM(BPFCLK_B) FM(RIF3_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(HSCK2_C) F_(0, 0) F_(0, 0)
#define IP17_15_12 FM(USB0_OVC) F_(0, 0) F_(0, 0) FM(SIM0_D_C) F_(0, 0) FM(TS_SDAT1_D) FM(STP_ISD_1_D) F_(0, 0) FM(RIF3_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(HRX2_C) F_(0, 0) F_(0, 0)
@@ -461,7 +462,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
#define MOD_SEL0_9_8 FM(SEL_DRIF1_0) FM(SEL_DRIF1_1) FM(SEL_DRIF1_2) F_(0, 0)
#define MOD_SEL0_7_6 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1) FM(SEL_DRIF0_2) F_(0, 0)
#define MOD_SEL0_5 FM(SEL_CANFD0_0) FM(SEL_CANFD0_1)
-#define MOD_SEL0_4_3 FM(SEL_ADG_A_0) FM(SEL_ADG_A_1) FM(SEL_ADG_A_2) FM(SEL_ADG_A_3)
+#define MOD_SEL0_4_3 FM(SEL_ADGA_0) FM(SEL_ADGA_1) FM(SEL_ADGA_2) FM(SEL_ADGA_3)
/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
#define MOD_SEL1_31_30 FM(SEL_TSIF1_0) FM(SEL_TSIF1_1) FM(SEL_TSIF1_2) FM(SEL_TSIF1_3)
@@ -497,8 +498,8 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
#define MOD_SEL2_21 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
#define MOD_SEL2_20 FM(SEL_SSI9_0) FM(SEL_SSI9_1)
#define MOD_SEL2_19 FM(SEL_TIMER_TMU2_0) FM(SEL_TIMER_TMU2_1)
-#define MOD_SEL2_18 FM(SEL_ADG_B_0) FM(SEL_ADG_B_1)
-#define MOD_SEL2_17 FM(SEL_ADG_C_0) FM(SEL_ADG_C_1)
+#define MOD_SEL2_18 FM(SEL_ADGB_0) FM(SEL_ADGB_1)
+#define MOD_SEL2_17 FM(SEL_ADGC_0) FM(SEL_ADGC_1)
#define MOD_SEL2_0 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
#define PINMUX_MOD_SELS \
@@ -590,8 +591,8 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(AVS1),
PINMUX_SINGLE(AVS2),
PINMUX_SINGLE(CLKOUT),
- PINMUX_SINGLE(HDMI0_CEC),
- PINMUX_SINGLE(HDMI1_CEC),
+ PINMUX_SINGLE(GP7_02),
+ PINMUX_SINGLE(GP7_03),
PINMUX_SINGLE(MSIOF0_RXD),
PINMUX_SINGLE(MSIOF0_SCK),
PINMUX_SINGLE(MSIOF0_TXD),
@@ -1129,7 +1130,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP11_27_24, SCK0),
PINMUX_IPSR_MSEL(IP11_27_24, HSCK1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP11_27_24, MSIOF1_SS2_B, SEL_MSIOF1_1),
- PINMUX_IPSR_MSEL(IP11_27_24, AUDIO_CLKC_B, SEL_ADG_C_1),
+ PINMUX_IPSR_MSEL(IP11_27_24, AUDIO_CLKC_B, SEL_ADGC_1),
PINMUX_IPSR_MSEL(IP11_27_24, SDA2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP11_27_24, SIM0_RST_B, SEL_SIMCARD_1),
PINMUX_IPSR_MSEL(IP11_27_24, STP_OPWM_0_C, SEL_SSP1_0_2),
@@ -1162,7 +1163,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP12_11_8, RTS0_N),
PINMUX_IPSR_MSEL(IP12_11_8, HRTS1_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP12_11_8, MSIOF1_SS1_B, SEL_MSIOF1_1),
- PINMUX_IPSR_MSEL(IP12_11_8, AUDIO_CLKA_B, SEL_ADG_A_1),
+ PINMUX_IPSR_MSEL(IP12_11_8, AUDIO_CLKA_B, SEL_ADGA_1),
PINMUX_IPSR_MSEL(IP12_11_8, SCL2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP12_11_8, STP_IVCXO27_1_C, SEL_SSP1_1_2),
PINMUX_IPSR_MSEL(IP12_11_8, RIF0_SYNC_B, SEL_DRIF0_1),
@@ -1221,7 +1222,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP13_11_8, HSCK0),
PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3),
- PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0),
+ PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADGB_0),
PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI1_1),
PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3),
@@ -1268,7 +1269,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP14_3_0, MSIOF0_SS1),
PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0),
PINMUX_IPSR_GPSR(IP14_3_0, NFWP_N_A),
- PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2),
+ PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADGA_2),
PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2),
PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A),
@@ -1277,7 +1278,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP14_7_4, MSIOF0_SS2),
PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3),
- PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0),
+ PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADGC_0),
PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3),
PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D),
@@ -1408,10 +1409,9 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_31_28, SCK5_A, SEL_SCIF5_0),
/* IPSR17 */
- PINMUX_IPSR_MSEL(IP17_3_0, AUDIO_CLKA_A, SEL_ADG_A_0),
- PINMUX_IPSR_GPSR(IP17_3_0, CC5_OSCOUT),
+ PINMUX_IPSR_MSEL(IP17_3_0, AUDIO_CLKA_A, SEL_ADGA_0),
- PINMUX_IPSR_MSEL(IP17_7_4, AUDIO_CLKB_B, SEL_ADG_B_1),
+ PINMUX_IPSR_MSEL(IP17_7_4, AUDIO_CLKB_B, SEL_ADGB_1),
PINMUX_IPSR_MSEL(IP17_7_4, SCIF_CLK_A, SEL_SCIF_0),
PINMUX_IPSR_MSEL(IP17_7_4, STP_IVCXO27_1_D, SEL_SSP1_1_3),
PINMUX_IPSR_MSEL(IP17_7_4, REMOCON_A, SEL_REMOCON_0),
@@ -2131,22 +2131,6 @@ static const unsigned int du_disp_mux[] = {
DU_DISP_MARK,
};
-/* - HDMI ------------------------------------------------------------------- */
-static const unsigned int hdmi0_cec_pins[] = {
- /* HDMI0_CEC */
- RCAR_GP_PIN(7, 2),
-};
-static const unsigned int hdmi0_cec_mux[] = {
- HDMI0_CEC_MARK,
-};
-static const unsigned int hdmi1_cec_pins[] = {
- /* HDMI1_CEC */
- RCAR_GP_PIN(7, 3),
-};
-static const unsigned int hdmi1_cec_mux[] = {
- HDMI1_CEC_MARK,
-};
-
/* - HSCIF0 ----------------------------------------------------------------- */
static const unsigned int hscif0_data_pins[] = {
/* RX, TX */
@@ -4225,8 +4209,6 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(du_oddf),
SH_PFC_PIN_GROUP(du_cde),
SH_PFC_PIN_GROUP(du_disp),
- SH_PFC_PIN_GROUP(hdmi0_cec),
- SH_PFC_PIN_GROUP(hdmi1_cec),
SH_PFC_PIN_GROUP(hscif0_data),
SH_PFC_PIN_GROUP(hscif0_clk),
SH_PFC_PIN_GROUP(hscif0_ctrl),
@@ -4611,14 +4593,6 @@ static const char * const du_groups[] = {
"du_disp",
};
-static const char * const hdmi0_groups[] = {
- "hdmi0_cec",
-};
-
-static const char * const hdmi1_groups[] = {
- "hdmi1_cec",
-};
-
static const char * const hscif0_groups[] = {
"hscif0_data",
"hscif0_clk",
@@ -5037,8 +5011,6 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(drif2),
SH_PFC_FUNCTION(drif3),
SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(hdmi0),
- SH_PFC_FUNCTION(hdmi1),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
SH_PFC_FUNCTION(hscif2),
@@ -5088,7 +5060,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5120,9 +5092,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, GPSR0_3,
GP_0_2_FN, GPSR0_2,
GP_0_1_FN, GPSR0_1,
- GP_0_0_FN, GPSR0_0, }
+ GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5154,9 +5126,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, GPSR1_3,
GP_1_2_FN, GPSR1_2,
GP_1_1_FN, GPSR1_1,
- GP_1_0_FN, GPSR1_0, }
+ GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5188,9 +5160,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, GPSR2_3,
GP_2_2_FN, GPSR2_2,
GP_2_1_FN, GPSR2_1,
- GP_2_0_FN, GPSR2_0, }
+ GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5222,9 +5194,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, GPSR3_3,
GP_3_2_FN, GPSR3_2,
GP_3_1_FN, GPSR3_1,
- GP_3_0_FN, GPSR3_0, }
+ GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5256,9 +5228,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, GPSR4_3,
GP_4_2_FN, GPSR4_2,
GP_4_1_FN, GPSR4_1,
- GP_4_0_FN, GPSR4_0, }
+ GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5290,9 +5262,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, GPSR5_3,
GP_5_2_FN, GPSR5_2,
GP_5_1_FN, GPSR5_1,
- GP_5_0_FN, GPSR5_0, }
+ GP_5_0_FN, GPSR5_0, ))
},
- { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
GP_6_31_FN, GPSR6_31,
GP_6_30_FN, GPSR6_30,
GP_6_29_FN, GPSR6_29,
@@ -5324,9 +5296,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, GPSR6_3,
GP_6_2_FN, GPSR6_2,
GP_6_1_FN, GPSR6_1,
- GP_6_0_FN, GPSR6_0, }
+ GP_6_0_FN, GPSR6_0, ))
},
- { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5358,14 +5330,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_7_3_FN, GPSR7_3,
GP_7_2_FN, GPSR7_2,
GP_7_1_FN, GPSR7_1,
- GP_7_0_FN, GPSR7_0, }
+ GP_7_0_FN, GPSR7_0, ))
},
#undef F_
#undef FM
#define F_(x, y) x,
#define FM(x) FN_##x,
- { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
IP0_31_28
IP0_27_24
IP0_23_20
@@ -5373,9 +5345,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP0_15_12
IP0_11_8
IP0_7_4
- IP0_3_0 }
+ IP0_3_0 ))
},
- { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
IP1_31_28
IP1_27_24
IP1_23_20
@@ -5383,9 +5355,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1_15_12
IP1_11_8
IP1_7_4
- IP1_3_0 }
+ IP1_3_0 ))
},
- { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
IP2_31_28
IP2_27_24
IP2_23_20
@@ -5393,9 +5365,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2_15_12
IP2_11_8
IP2_7_4
- IP2_3_0 }
+ IP2_3_0 ))
},
- { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
IP3_31_28
IP3_27_24
IP3_23_20
@@ -5403,9 +5375,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP3_15_12
IP3_11_8
IP3_7_4
- IP3_3_0 }
+ IP3_3_0 ))
},
- { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
IP4_31_28
IP4_27_24
IP4_23_20
@@ -5413,9 +5385,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP4_15_12
IP4_11_8
IP4_7_4
- IP4_3_0 }
+ IP4_3_0 ))
},
- { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
IP5_31_28
IP5_27_24
IP5_23_20
@@ -5423,9 +5395,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP5_15_12
IP5_11_8
IP5_7_4
- IP5_3_0 }
+ IP5_3_0 ))
},
- { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
IP6_31_28
IP6_27_24
IP6_23_20
@@ -5433,9 +5405,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_15_12
IP6_11_8
IP6_7_4
- IP6_3_0 }
+ IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
@@ -5443,9 +5415,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IP7_11_8
IP7_7_4
- IP7_3_0 }
+ IP7_3_0 ))
},
- { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
IP8_31_28
IP8_27_24
IP8_23_20
@@ -5453,9 +5425,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP8_15_12
IP8_11_8
IP8_7_4
- IP8_3_0 }
+ IP8_3_0 ))
},
- { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
IP9_31_28
IP9_27_24
IP9_23_20
@@ -5463,9 +5435,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP9_15_12
IP9_11_8
IP9_7_4
- IP9_3_0 }
+ IP9_3_0 ))
},
- { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
IP10_31_28
IP10_27_24
IP10_23_20
@@ -5473,9 +5445,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP10_15_12
IP10_11_8
IP10_7_4
- IP10_3_0 }
+ IP10_3_0 ))
},
- { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
IP11_31_28
IP11_27_24
IP11_23_20
@@ -5483,9 +5455,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP11_15_12
IP11_11_8
IP11_7_4
- IP11_3_0 }
+ IP11_3_0 ))
},
- { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+ { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
IP12_31_28
IP12_27_24
IP12_23_20
@@ -5493,9 +5465,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP12_15_12
IP12_11_8
IP12_7_4
- IP12_3_0 }
+ IP12_3_0 ))
},
- { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+ { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
IP13_31_28
IP13_27_24
IP13_23_20
@@ -5503,9 +5475,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP13_15_12
IP13_11_8
IP13_7_4
- IP13_3_0 }
+ IP13_3_0 ))
},
- { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
+ { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4, GROUP(
IP14_31_28
IP14_27_24
IP14_23_20
@@ -5513,9 +5485,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP14_15_12
IP14_11_8
IP14_7_4
- IP14_3_0 }
+ IP14_3_0 ))
},
- { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4, GROUP(
IP15_31_28
IP15_27_24
IP15_23_20
@@ -5523,9 +5495,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP15_15_12
IP15_11_8
IP15_7_4
- IP15_3_0 }
+ IP15_3_0 ))
},
- { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4) {
+ { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4, GROUP(
IP16_31_28
IP16_27_24
IP16_23_20
@@ -5533,9 +5505,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP16_15_12
IP16_11_8
IP16_7_4
- IP16_3_0 }
+ IP16_3_0 ))
},
- { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4) {
+ { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP(
IP17_31_28
IP17_27_24
IP17_23_20
@@ -5543,9 +5515,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP17_15_12
IP17_11_8
IP17_7_4
- IP17_3_0 }
+ IP17_3_0 ))
},
- { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4) {
+ { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP(
/* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -5553,7 +5525,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IP18_7_4
- IP18_3_0 }
+ IP18_3_0 ))
},
#undef F_
#undef FM
@@ -5561,8 +5533,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- 3, 2, 3, 1, 1, 1, 1, 1, 2, 1,
- 1, 2, 1, 1, 1, 2, 2, 1, 2, 3) {
+ GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2,
+ 1, 1, 1, 2, 2, 1, 2, 3),
+ GROUP(
MOD_SEL0_31_30_29
MOD_SEL0_28_27
MOD_SEL0_26_25_24
@@ -5583,11 +5556,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_5
MOD_SEL0_4_3
/* RESERVED 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- 2, 3, 1, 2, 3, 1, 1, 2, 1,
- 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
+ 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
MOD_SEL1_31_30
MOD_SEL1_29_28_27
MOD_SEL1_26
@@ -5610,11 +5584,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_3
MOD_SEL1_2
MOD_SEL1_1
- MOD_SEL1_0 }
+ MOD_SEL1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
- 1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1, 1,
- 4, 4, 4, 3, 1) {
+ GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1,
+ 1, 4, 4, 4, 3, 1),
+ GROUP(
MOD_SEL2_31
MOD_SEL2_30
MOD_SEL2_29
@@ -5641,7 +5616,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* RESERVED 3, 2, 1 */
0, 0, 0, 0, 0, 0, 0, 0,
- MOD_SEL2_0 }
+ MOD_SEL2_0 ))
},
{ },
};
@@ -5762,8 +5737,8 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */
{ RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */
{ RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */
- { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 3), 8, 3 }, /* HDMI1_CEC */
+ { RCAR_GP_PIN(7, 2), 12, 3 }, /* GP7_02 */
+ { RCAR_GP_PIN(7, 3), 8, 3 }, /* GP7_03 */
{ PIN_A_NUMBER('P', 7), 4, 2 }, /* DU_DOTCLKIN0 */
{ PIN_A_NUMBER('P', 8), 0, 2 }, /* DU_DOTCLKIN1 */
} },
@@ -5897,10 +5872,12 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
enum ioctrl_regs {
POCCTRL,
+ TDSELCTRL,
};
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
[POCCTRL] = { 0xe6060380, },
+ [TDSELCTRL] = { 0xe60603c0, },
{ /* sentinel */ },
};
@@ -6017,8 +5994,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[25] = RCAR_GP_PIN(0, 15), /* D15 */
[26] = RCAR_GP_PIN(7, 0), /* AVS1 */
[27] = RCAR_GP_PIN(7, 1), /* AVS2 */
- [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
- [29] = RCAR_GP_PIN(7, 3), /* HDMI1_CEC */
+ [28] = RCAR_GP_PIN(7, 2), /* GP7_02 */
+ [29] = RCAR_GP_PIN(7, 3), /* GP7_03 */
[30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
[31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
} },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 72348a4f2ece..38cce690db70 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -2,7 +2,7 @@
/*
* R8A7796 processor support - PFC hardware block.
*
- * Copyright (C) 2016-2017 Renesas Electronics Corp.
+ * Copyright (C) 2016-2019 Renesas Electronics Corp.
*
* This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
*
@@ -11,6 +11,7 @@
* Copyright (C) 2015 Renesas Electronics Corporation
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include "core.h"
@@ -206,7 +207,7 @@
/* GPSR7 */
#define GPSR7_3 FM(GP7_03)
-#define GPSR7_2 FM(HDMI0_CEC)
+#define GPSR7_2 FM(GP7_02)
#define GPSR7_1 FM(AVS2)
#define GPSR7_0 FM(AVS1)
@@ -355,7 +356,7 @@
#define IP16_23_20 FM(SSI_SDATA7) FM(HCTS2_N_B) FM(MSIOF1_RXD_C) F_(0, 0) F_(0, 0) FM(TS_SDEN1_A) FM(STP_ISEN_1_A) FM(RIF1_D0_A) FM(RIF3_D0_A) F_(0, 0) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP16_27_24 FM(SSI_SDATA8) FM(HRTS2_N_B) FM(MSIOF1_TXD_C) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A) FM(RIF1_D1_A) FM(RIF3_D1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP16_31_28 FM(SSI_SDATA9_A) FM(HSCK2_B) FM(MSIOF1_SS1_C) FM(HSCK1_A) FM(SSI_WS1_B) FM(SCK1) FM(STP_IVCXO27_1_A) FM(SCK5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP17_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP17_7_4 FM(AUDIO_CLKB_B) FM(SCIF_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_1_D) FM(REMOCON_A) F_(0, 0) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP17_11_8 FM(USB0_PWEN) F_(0, 0) F_(0, 0) FM(SIM0_RST_C) F_(0, 0) FM(TS_SCK1_D) FM(STP_ISCLK_1_D) FM(BPFCLK_B) FM(RIF3_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(HSCK2_C) F_(0, 0) F_(0, 0)
#define IP17_15_12 FM(USB0_OVC) F_(0, 0) F_(0, 0) FM(SIM0_D_C) F_(0, 0) FM(TS_SDAT1_D) FM(STP_ISD_1_D) F_(0, 0) FM(RIF3_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(HRX2_C) F_(0, 0) F_(0, 0)
@@ -466,7 +467,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
#define MOD_SEL0_9_8 FM(SEL_DRIF1_0) FM(SEL_DRIF1_1) FM(SEL_DRIF1_2) F_(0, 0)
#define MOD_SEL0_7_6 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1) FM(SEL_DRIF0_2) F_(0, 0)
#define MOD_SEL0_5 FM(SEL_CANFD0_0) FM(SEL_CANFD0_1)
-#define MOD_SEL0_4_3 FM(SEL_ADG_A_0) FM(SEL_ADG_A_1) FM(SEL_ADG_A_2) FM(SEL_ADG_A_3)
+#define MOD_SEL0_4_3 FM(SEL_ADGA_0) FM(SEL_ADGA_1) FM(SEL_ADGA_2) FM(SEL_ADGA_3)
/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
#define MOD_SEL1_31_30 FM(SEL_TSIF1_0) FM(SEL_TSIF1_1) FM(SEL_TSIF1_2) FM(SEL_TSIF1_3)
@@ -499,12 +500,12 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
#define MOD_SEL2_28_27 FM(SEL_FM_0) FM(SEL_FM_1) FM(SEL_FM_2) FM(SEL_FM_3)
#define MOD_SEL2_26 FM(SEL_SCIF5_0) FM(SEL_SCIF5_1)
#define MOD_SEL2_25_24_23 FM(SEL_I2C6_0) FM(SEL_I2C6_1) FM(SEL_I2C6_2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define MOD_SEL2_22 FM(SEL_NDFC_0) FM(SEL_NDFC_1)
+#define MOD_SEL2_22 FM(SEL_NDF_0) FM(SEL_NDF_1)
#define MOD_SEL2_21 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
#define MOD_SEL2_20 FM(SEL_SSI9_0) FM(SEL_SSI9_1)
#define MOD_SEL2_19 FM(SEL_TIMER_TMU2_0) FM(SEL_TIMER_TMU2_1)
-#define MOD_SEL2_18 FM(SEL_ADG_B_0) FM(SEL_ADG_B_1)
-#define MOD_SEL2_17 FM(SEL_ADG_C_0) FM(SEL_ADG_C_1)
+#define MOD_SEL2_18 FM(SEL_ADGB_0) FM(SEL_ADGB_1)
+#define MOD_SEL2_17 FM(SEL_ADGC_0) FM(SEL_ADGC_1)
#define MOD_SEL2_0 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
#define PINMUX_MOD_SELS \
@@ -597,7 +598,7 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(AVS2),
PINMUX_SINGLE(CLKOUT),
PINMUX_SINGLE(GP7_03),
- PINMUX_SINGLE(HDMI0_CEC),
+ PINMUX_SINGLE(GP7_02),
PINMUX_SINGLE(MSIOF0_RXD),
PINMUX_SINGLE(MSIOF0_SCK),
PINMUX_SINGLE(MSIOF0_TXD),
@@ -1021,35 +1022,35 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP8_15_12, SD1_CMD),
PINMUX_IPSR_MSEL(IP8_15_12, MSIOF1_SYNC_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_15_12, NFCE_N_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_15_12, NFCE_N_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_15_12, SIM0_D_A, SEL_SIMCARD_0),
PINMUX_IPSR_MSEL(IP8_15_12, STP_IVCXO27_1_B, SEL_SSP1_1_1),
PINMUX_IPSR_GPSR(IP8_19_16, SD1_DAT0),
PINMUX_IPSR_GPSR(IP8_19_16, SD2_DAT4),
PINMUX_IPSR_MSEL(IP8_19_16, MSIOF1_RXD_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_19_16, NFWP_N_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_19_16, NFWP_N_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_19_16, TS_SCK1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_19_16, STP_ISCLK_1_B, SEL_SSP1_1_1),
PINMUX_IPSR_GPSR(IP8_23_20, SD1_DAT1),
PINMUX_IPSR_GPSR(IP8_23_20, SD2_DAT5),
PINMUX_IPSR_MSEL(IP8_23_20, MSIOF1_TXD_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_23_20, NFDATA14_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_23_20, NFDATA14_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_23_20, TS_SPSYNC1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_23_20, STP_ISSYNC_1_B, SEL_SSP1_1_1),
PINMUX_IPSR_GPSR(IP8_27_24, SD1_DAT2),
PINMUX_IPSR_GPSR(IP8_27_24, SD2_DAT6),
PINMUX_IPSR_MSEL(IP8_27_24, MSIOF1_SS1_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_27_24, NFDATA15_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_27_24, NFDATA15_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_27_24, TS_SDAT1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_27_24, STP_ISD_1_B, SEL_SSP1_1_1),
PINMUX_IPSR_GPSR(IP8_31_28, SD1_DAT3),
PINMUX_IPSR_GPSR(IP8_31_28, SD2_DAT7),
PINMUX_IPSR_MSEL(IP8_31_28, MSIOF1_SS2_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_31_28, NFRB_N_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_31_28, NFRB_N_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_31_28, TS_SDEN1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_31_28, STP_ISEN_1_B, SEL_SSP1_1_1),
@@ -1115,28 +1116,28 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP11_7_4, NFCLE),
PINMUX_IPSR_GPSR(IP11_11_8, SD0_CD),
- PINMUX_IPSR_MSEL(IP11_11_8, NFDATA14_A, SEL_NDFC_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, NFDATA14_A, SEL_NDF_0),
PINMUX_IPSR_MSEL(IP11_11_8, SCL2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP11_11_8, SIM0_RST_A, SEL_SIMCARD_0),
PINMUX_IPSR_GPSR(IP11_15_12, SD0_WP),
- PINMUX_IPSR_MSEL(IP11_15_12, NFDATA15_A, SEL_NDFC_0),
+ PINMUX_IPSR_MSEL(IP11_15_12, NFDATA15_A, SEL_NDF_0),
PINMUX_IPSR_MSEL(IP11_15_12, SDA2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP11_19_16, SD1_CD, I2C_SEL_0_0),
- PINMUX_IPSR_PHYS_MSEL(IP11_19_16, NFRB_N_A, I2C_SEL_0_0, SEL_NDFC_0),
+ PINMUX_IPSR_PHYS_MSEL(IP11_19_16, NFRB_N_A, I2C_SEL_0_0, SEL_NDF_0),
PINMUX_IPSR_PHYS_MSEL(IP11_19_16, SIM0_CLK_B, I2C_SEL_0_0, SEL_SIMCARD_1),
PINMUX_IPSR_PHYS(IP11_19_16, SCL0, I2C_SEL_0_1),
PINMUX_IPSR_MSEL(IP11_23_20, SD1_WP, I2C_SEL_0_0),
- PINMUX_IPSR_PHYS_MSEL(IP11_23_20, NFCE_N_A, I2C_SEL_0_0, SEL_NDFC_0),
+ PINMUX_IPSR_PHYS_MSEL(IP11_23_20, NFCE_N_A, I2C_SEL_0_0, SEL_NDF_0),
PINMUX_IPSR_PHYS_MSEL(IP11_23_20, SIM0_D_B, I2C_SEL_0_0, SEL_SIMCARD_1),
PINMUX_IPSR_PHYS(IP11_23_20, SDA0, I2C_SEL_0_1),
PINMUX_IPSR_GPSR(IP11_27_24, SCK0),
PINMUX_IPSR_MSEL(IP11_27_24, HSCK1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP11_27_24, MSIOF1_SS2_B, SEL_MSIOF1_1),
- PINMUX_IPSR_MSEL(IP11_27_24, AUDIO_CLKC_B, SEL_ADG_C_1),
+ PINMUX_IPSR_MSEL(IP11_27_24, AUDIO_CLKC_B, SEL_ADGC_1),
PINMUX_IPSR_MSEL(IP11_27_24, SDA2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP11_27_24, SIM0_RST_B, SEL_SIMCARD_1),
PINMUX_IPSR_MSEL(IP11_27_24, STP_OPWM_0_C, SEL_SSP1_0_2),
@@ -1169,7 +1170,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP12_11_8, RTS0_N),
PINMUX_IPSR_MSEL(IP12_11_8, HRTS1_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP12_11_8, MSIOF1_SS1_B, SEL_MSIOF1_1),
- PINMUX_IPSR_MSEL(IP12_11_8, AUDIO_CLKA_B, SEL_ADG_A_1),
+ PINMUX_IPSR_MSEL(IP12_11_8, AUDIO_CLKA_B, SEL_ADGA_1),
PINMUX_IPSR_MSEL(IP12_11_8, SCL2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP12_11_8, STP_IVCXO27_1_C, SEL_SSP1_1_2),
PINMUX_IPSR_MSEL(IP12_11_8, RIF0_SYNC_B, SEL_DRIF0_1),
@@ -1228,7 +1229,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP13_11_8, HSCK0),
PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3),
- PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0),
+ PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADGB_0),
PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI1_1),
PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3),
@@ -1274,8 +1275,8 @@ static const u16 pinmux_data[] = {
/* IPSR14 */
PINMUX_IPSR_GPSR(IP14_3_0, MSIOF0_SS1),
PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0),
- PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDFC_0),
- PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2),
+ PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDF_0),
+ PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADGA_2),
PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2),
PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A),
@@ -1284,7 +1285,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP14_7_4, MSIOF0_SS2),
PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3),
- PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0),
+ PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADGC_0),
PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3),
PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D),
@@ -1412,10 +1413,9 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_31_28, SCK5_A, SEL_SCIF5_0),
/* IPSR17 */
- PINMUX_IPSR_MSEL(IP17_3_0, AUDIO_CLKA_A, SEL_ADG_A_0),
- PINMUX_IPSR_GPSR(IP17_3_0, CC5_OSCOUT),
+ PINMUX_IPSR_MSEL(IP17_3_0, AUDIO_CLKA_A, SEL_ADGA_0),
- PINMUX_IPSR_MSEL(IP17_7_4, AUDIO_CLKB_B, SEL_ADG_B_1),
+ PINMUX_IPSR_MSEL(IP17_7_4, AUDIO_CLKB_B, SEL_ADGB_1),
PINMUX_IPSR_MSEL(IP17_7_4, SCIF_CLK_A, SEL_SCIF_0),
PINMUX_IPSR_MSEL(IP17_7_4, STP_IVCXO27_1_D, SEL_SSP1_1_3),
PINMUX_IPSR_MSEL(IP17_7_4, REMOCON_A, SEL_REMOCON_0),
@@ -1499,11 +1499,6 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP18_7_4, FMIN_C, SEL_FM_2),
PINMUX_IPSR_MSEL(IP18_7_4, FMIN_D, SEL_FM_3),
- /* I2C */
- PINMUX_IPSR_NOGP(0, I2C_SEL_0_1),
- PINMUX_IPSR_NOGP(0, I2C_SEL_3_1),
- PINMUX_IPSR_NOGP(0, I2C_SEL_5_1),
-
/*
* Static pins can not be muxed between different functions but
* still need mark entries in the pinmux list. Add each static
@@ -2140,15 +2135,6 @@ static const unsigned int du_disp_mux[] = {
DU_DISP_MARK,
};
-/* - HDMI ------------------------------------------------------------------- */
-static const unsigned int hdmi0_cec_pins[] = {
- /* HDMI0_CEC */
- RCAR_GP_PIN(7, 2),
-};
-static const unsigned int hdmi0_cec_mux[] = {
- HDMI0_CEC_MARK,
-};
-
/* - HSCIF0 ----------------------------------------------------------------- */
static const unsigned int hscif0_data_pins[] = {
/* RX, TX */
@@ -4124,8 +4110,8 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[310];
- struct sh_pfc_pin_group automotive[33];
+ struct sh_pfc_pin_group common[312];
+ struct sh_pfc_pin_group automotive[30];
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
@@ -4160,6 +4146,9 @@ static const struct {
SH_PFC_PIN_GROUP(can0_data_b),
SH_PFC_PIN_GROUP(can1_data),
SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(canfd0_data_a),
+ SH_PFC_PIN_GROUP(canfd0_data_b),
+ SH_PFC_PIN_GROUP(canfd1_data),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
@@ -4168,7 +4157,6 @@ static const struct {
SH_PFC_PIN_GROUP(du_oddf),
SH_PFC_PIN_GROUP(du_cde),
SH_PFC_PIN_GROUP(du_disp),
- SH_PFC_PIN_GROUP(hdmi0_cec),
SH_PFC_PIN_GROUP(hscif0_data),
SH_PFC_PIN_GROUP(hscif0_clk),
SH_PFC_PIN_GROUP(hscif0_ctrl),
@@ -4440,9 +4428,6 @@ static const struct {
SH_PFC_PIN_GROUP(vin5_clk),
},
.automotive = {
- SH_PFC_PIN_GROUP(canfd0_data_a),
- SH_PFC_PIN_GROUP(canfd0_data_b),
- SH_PFC_PIN_GROUP(canfd1_data),
SH_PFC_PIN_GROUP(drif0_ctrl_a),
SH_PFC_PIN_GROUP(drif0_data0_a),
SH_PFC_PIN_GROUP(drif0_data1_a),
@@ -4585,10 +4570,6 @@ static const char * const du_groups[] = {
"du_disp",
};
-static const char * const hdmi0_groups[] = {
- "hdmi0_cec",
-};
-
static const char * const hscif0_groups[] = {
"hscif0_data",
"hscif0_clk",
@@ -4982,8 +4963,8 @@ static const char * const vin5_groups[] = {
};
static const struct {
- struct sh_pfc_function common[48];
- struct sh_pfc_function automotive[6];
+ struct sh_pfc_function common[49];
+ struct sh_pfc_function automotive[4];
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -4991,8 +4972,9 @@ static const struct {
SH_PFC_FUNCTION(can0),
SH_PFC_FUNCTION(can1),
SH_PFC_FUNCTION(can_clk),
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(hdmi0),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
SH_PFC_FUNCTION(hscif2),
@@ -5036,8 +5018,6 @@ static const struct {
SH_PFC_FUNCTION(vin5),
},
.automotive = {
- SH_PFC_FUNCTION(canfd0),
- SH_PFC_FUNCTION(canfd1),
SH_PFC_FUNCTION(drif0),
SH_PFC_FUNCTION(drif1),
SH_PFC_FUNCTION(drif2),
@@ -5048,7 +5028,7 @@ static const struct {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5080,9 +5060,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, GPSR0_3,
GP_0_2_FN, GPSR0_2,
GP_0_1_FN, GPSR0_1,
- GP_0_0_FN, GPSR0_0, }
+ GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5114,9 +5094,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, GPSR1_3,
GP_1_2_FN, GPSR1_2,
GP_1_1_FN, GPSR1_1,
- GP_1_0_FN, GPSR1_0, }
+ GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5148,9 +5128,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, GPSR2_3,
GP_2_2_FN, GPSR2_2,
GP_2_1_FN, GPSR2_1,
- GP_2_0_FN, GPSR2_0, }
+ GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5182,9 +5162,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, GPSR3_3,
GP_3_2_FN, GPSR3_2,
GP_3_1_FN, GPSR3_1,
- GP_3_0_FN, GPSR3_0, }
+ GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5216,9 +5196,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, GPSR4_3,
GP_4_2_FN, GPSR4_2,
GP_4_1_FN, GPSR4_1,
- GP_4_0_FN, GPSR4_0, }
+ GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5250,9 +5230,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, GPSR5_3,
GP_5_2_FN, GPSR5_2,
GP_5_1_FN, GPSR5_1,
- GP_5_0_FN, GPSR5_0, }
+ GP_5_0_FN, GPSR5_0, ))
},
- { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
GP_6_31_FN, GPSR6_31,
GP_6_30_FN, GPSR6_30,
GP_6_29_FN, GPSR6_29,
@@ -5284,9 +5264,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, GPSR6_3,
GP_6_2_FN, GPSR6_2,
GP_6_1_FN, GPSR6_1,
- GP_6_0_FN, GPSR6_0, }
+ GP_6_0_FN, GPSR6_0, ))
},
- { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5318,14 +5298,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_7_3_FN, GPSR7_3,
GP_7_2_FN, GPSR7_2,
GP_7_1_FN, GPSR7_1,
- GP_7_0_FN, GPSR7_0, }
+ GP_7_0_FN, GPSR7_0, ))
},
#undef F_
#undef FM
#define F_(x, y) x,
#define FM(x) FN_##x,
- { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
IP0_31_28
IP0_27_24
IP0_23_20
@@ -5333,9 +5313,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP0_15_12
IP0_11_8
IP0_7_4
- IP0_3_0 }
+ IP0_3_0 ))
},
- { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
IP1_31_28
IP1_27_24
IP1_23_20
@@ -5343,9 +5323,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1_15_12
IP1_11_8
IP1_7_4
- IP1_3_0 }
+ IP1_3_0 ))
},
- { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
IP2_31_28
IP2_27_24
IP2_23_20
@@ -5353,9 +5333,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2_15_12
IP2_11_8
IP2_7_4
- IP2_3_0 }
+ IP2_3_0 ))
},
- { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
IP3_31_28
IP3_27_24
IP3_23_20
@@ -5363,9 +5343,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP3_15_12
IP3_11_8
IP3_7_4
- IP3_3_0 }
+ IP3_3_0 ))
},
- { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
IP4_31_28
IP4_27_24
IP4_23_20
@@ -5373,9 +5353,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP4_15_12
IP4_11_8
IP4_7_4
- IP4_3_0 }
+ IP4_3_0 ))
},
- { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
IP5_31_28
IP5_27_24
IP5_23_20
@@ -5383,9 +5363,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP5_15_12
IP5_11_8
IP5_7_4
- IP5_3_0 }
+ IP5_3_0 ))
},
- { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
IP6_31_28
IP6_27_24
IP6_23_20
@@ -5393,9 +5373,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_15_12
IP6_11_8
IP6_7_4
- IP6_3_0 }
+ IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
@@ -5403,9 +5383,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IP7_11_8
IP7_7_4
- IP7_3_0 }
+ IP7_3_0 ))
},
- { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
IP8_31_28
IP8_27_24
IP8_23_20
@@ -5413,9 +5393,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP8_15_12
IP8_11_8
IP8_7_4
- IP8_3_0 }
+ IP8_3_0 ))
},
- { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
IP9_31_28
IP9_27_24
IP9_23_20
@@ -5423,9 +5403,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP9_15_12
IP9_11_8
IP9_7_4
- IP9_3_0 }
+ IP9_3_0 ))
},
- { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
IP10_31_28
IP10_27_24
IP10_23_20
@@ -5433,9 +5413,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP10_15_12
IP10_11_8
IP10_7_4
- IP10_3_0 }
+ IP10_3_0 ))
},
- { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
IP11_31_28
IP11_27_24
IP11_23_20
@@ -5443,9 +5423,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP11_15_12
IP11_11_8
IP11_7_4
- IP11_3_0 }
+ IP11_3_0 ))
},
- { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+ { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
IP12_31_28
IP12_27_24
IP12_23_20
@@ -5453,9 +5433,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP12_15_12
IP12_11_8
IP12_7_4
- IP12_3_0 }
+ IP12_3_0 ))
},
- { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+ { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
IP13_31_28
IP13_27_24
IP13_23_20
@@ -5463,9 +5443,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP13_15_12
IP13_11_8
IP13_7_4
- IP13_3_0 }
+ IP13_3_0 ))
},
- { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
+ { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4, GROUP(
IP14_31_28
IP14_27_24
IP14_23_20
@@ -5473,9 +5453,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP14_15_12
IP14_11_8
IP14_7_4
- IP14_3_0 }
+ IP14_3_0 ))
},
- { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4, GROUP(
IP15_31_28
IP15_27_24
IP15_23_20
@@ -5483,9 +5463,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP15_15_12
IP15_11_8
IP15_7_4
- IP15_3_0 }
+ IP15_3_0 ))
},
- { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4) {
+ { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4, GROUP(
IP16_31_28
IP16_27_24
IP16_23_20
@@ -5493,9 +5473,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP16_15_12
IP16_11_8
IP16_7_4
- IP16_3_0 }
+ IP16_3_0 ))
},
- { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4) {
+ { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP(
IP17_31_28
IP17_27_24
IP17_23_20
@@ -5503,9 +5483,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP17_15_12
IP17_11_8
IP17_7_4
- IP17_3_0 }
+ IP17_3_0 ))
},
- { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4) {
+ { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP(
/* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -5513,7 +5493,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IP18_7_4
- IP18_3_0 }
+ IP18_3_0 ))
},
#undef F_
#undef FM
@@ -5521,8 +5501,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- 3, 2, 3, 1, 1, 1, 1, 1, 2, 1,
- 1, 2, 1, 1, 1, 2, 2, 1, 2, 3) {
+ GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2,
+ 1, 1, 1, 2, 2, 1, 2, 3),
+ GROUP(
MOD_SEL0_31_30_29
MOD_SEL0_28_27
MOD_SEL0_26_25_24
@@ -5543,11 +5524,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_5
MOD_SEL0_4_3
/* RESERVED 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- 2, 3, 1, 2, 3, 1, 1, 2, 1,
- 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
+ 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
MOD_SEL1_31_30
MOD_SEL1_29_28_27
MOD_SEL1_26
@@ -5570,11 +5552,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_3
MOD_SEL1_2
MOD_SEL1_1
- MOD_SEL1_0 }
+ MOD_SEL1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
- 1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1, 1,
- 4, 4, 4, 3, 1) {
+ GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1,
+ 1, 4, 4, 4, 3, 1),
+ GROUP(
MOD_SEL2_31
MOD_SEL2_30
MOD_SEL2_29
@@ -5600,7 +5583,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* RESERVED 3, 2, 1 */
0, 0, 0, 0, 0, 0, 0, 0,
- MOD_SEL2_0 }
+ MOD_SEL2_0 ))
},
{ },
};
@@ -5721,7 +5704,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */
{ RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */
{ RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */
- { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */
+ { RCAR_GP_PIN(7, 2), 12, 3 }, /* GP7_02 */
{ RCAR_GP_PIN(7, 3), 8, 3 }, /* GP7_03 */
{ PIN_A_NUMBER('P', 7), 4, 2 }, /* DU_DOTCLKIN0 */
{ PIN_A_NUMBER('P', 8), 0, 2 }, /* DU_DOTCLKIN1 */
@@ -5855,10 +5838,12 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
enum ioctrl_regs {
POCCTRL,
+ TDSELCTRL,
};
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
[POCCTRL] = { 0xe6060380, },
+ [TDSELCTRL] = { 0xe60603c0, },
{ /* sentinel */ },
};
@@ -5975,7 +5960,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[25] = RCAR_GP_PIN(0, 15), /* D15 */
[26] = RCAR_GP_PIN(7, 0), /* AVS1 */
[27] = RCAR_GP_PIN(7, 1), /* AVS2 */
- [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [28] = RCAR_GP_PIN(7, 2), /* GP7_02 */
[29] = RCAR_GP_PIN(7, 3), /* GP7_03 */
[30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
[31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index 14c4b671cddf..090024355eba 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -3,7 +3,7 @@
* R8A77965 processor support - PFC hardware block.
*
* Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
- * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016-2019 Renesas Electronics Corp.
*
* This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7796.c
*
@@ -12,6 +12,7 @@
* Copyright (C) 2015 Renesas Electronics Corporation
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include "core.h"
@@ -207,7 +208,7 @@
/* GPSR7 */
#define GPSR7_3 FM(GP7_03)
-#define GPSR7_2 FM(HDMI0_CEC)
+#define GPSR7_2 FM(GP7_02)
#define GPSR7_1 FM(AVS2)
#define GPSR7_0 FM(AVS1)
@@ -356,7 +357,7 @@
#define IP16_23_20 FM(SSI_SDATA7) FM(HCTS2_N_B) FM(MSIOF1_RXD_C) F_(0, 0) F_(0, 0) FM(TS_SDEN1_A) FM(STP_ISEN_1_A) FM(RIF1_D0_A) FM(RIF3_D0_A) F_(0, 0) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP16_27_24 FM(SSI_SDATA8) FM(HRTS2_N_B) FM(MSIOF1_TXD_C) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A) FM(RIF1_D1_A) FM(RIF3_D1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP16_31_28 FM(SSI_SDATA9_A) FM(HSCK2_B) FM(MSIOF1_SS1_C) FM(HSCK1_A) FM(SSI_WS1_B) FM(SCK1) FM(STP_IVCXO27_1_A) FM(SCK5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP17_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP17_7_4 FM(AUDIO_CLKB_B) FM(SCIF_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_1_D) FM(REMOCON_A) F_(0, 0) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP17_11_8 FM(USB0_PWEN) F_(0, 0) F_(0, 0) FM(SIM0_RST_C) F_(0, 0) FM(TS_SCK1_D) FM(STP_ISCLK_1_D) FM(BPFCLK_B) FM(RIF3_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(HSCK2_C) F_(0, 0) F_(0, 0)
#define IP17_15_12 FM(USB0_OVC) F_(0, 0) F_(0, 0) FM(SIM0_D_C) F_(0, 0) FM(TS_SDAT1_D) FM(STP_ISD_1_D) F_(0, 0) FM(RIF3_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(HRX2_C) F_(0, 0) F_(0, 0)
@@ -467,7 +468,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
#define MOD_SEL0_9_8 FM(SEL_DRIF1_0) FM(SEL_DRIF1_1) FM(SEL_DRIF1_2) F_(0, 0)
#define MOD_SEL0_7_6 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1) FM(SEL_DRIF0_2) F_(0, 0)
#define MOD_SEL0_5 FM(SEL_CANFD0_0) FM(SEL_CANFD0_1)
-#define MOD_SEL0_4_3 FM(SEL_ADG_A_0) FM(SEL_ADG_A_1) FM(SEL_ADG_A_2) FM(SEL_ADG_A_3)
+#define MOD_SEL0_4_3 FM(SEL_ADGA_0) FM(SEL_ADGA_1) FM(SEL_ADGA_2) FM(SEL_ADGA_3)
/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
#define MOD_SEL1_31_30 FM(SEL_TSIF1_0) FM(SEL_TSIF1_1) FM(SEL_TSIF1_2) FM(SEL_TSIF1_3)
@@ -500,12 +501,12 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
#define MOD_SEL2_28_27 FM(SEL_FM_0) FM(SEL_FM_1) FM(SEL_FM_2) FM(SEL_FM_3)
#define MOD_SEL2_26 FM(SEL_SCIF5_0) FM(SEL_SCIF5_1)
#define MOD_SEL2_25_24_23 FM(SEL_I2C6_0) FM(SEL_I2C6_1) FM(SEL_I2C6_2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define MOD_SEL2_22 FM(SEL_NDFC_0) FM(SEL_NDFC_1)
+#define MOD_SEL2_22 FM(SEL_NDF_0) FM(SEL_NDF_1)
#define MOD_SEL2_21 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
#define MOD_SEL2_20 FM(SEL_SSI9_0) FM(SEL_SSI9_1)
#define MOD_SEL2_19 FM(SEL_TIMER_TMU2_0) FM(SEL_TIMER_TMU2_1)
-#define MOD_SEL2_18 FM(SEL_ADG_B_0) FM(SEL_ADG_B_1)
-#define MOD_SEL2_17 FM(SEL_ADG_C_0) FM(SEL_ADG_C_1)
+#define MOD_SEL2_18 FM(SEL_ADGB_0) FM(SEL_ADGB_1)
+#define MOD_SEL2_17 FM(SEL_ADGC_0) FM(SEL_ADGC_1)
#define MOD_SEL2_0 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
#define PINMUX_MOD_SELS \
@@ -557,6 +558,9 @@ MOD_SEL0_4_3 MOD_SEL1_4 \
FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN3) \
FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) FM(TDI) FM(TCK) FM(TRST) FM(EXTALR)
+#define PINMUX_PHYS \
+ FM(SCL0) FM(SDA0) FM(SCL3) FM(SDA3) FM(SCL5) FM(SDA5)
+
enum {
PINMUX_RESERVED = 0,
@@ -582,6 +586,7 @@ enum {
PINMUX_IPSR
PINMUX_MOD_SELS
PINMUX_STATIC
+ PINMUX_PHYS
PINMUX_MARK_END,
#undef F_
#undef FM
@@ -594,7 +599,7 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(AVS2),
PINMUX_SINGLE(CLKOUT),
PINMUX_SINGLE(GP7_03),
- PINMUX_SINGLE(HDMI0_CEC),
+ PINMUX_SINGLE(GP7_02),
PINMUX_SINGLE(MSIOF0_RXD),
PINMUX_SINGLE(MSIOF0_SCK),
PINMUX_SINGLE(MSIOF0_TXD),
@@ -619,13 +624,15 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP0_15_12, TX4_A, SEL_SCIF4_0),
PINMUX_IPSR_GPSR(IP0_19_16, FSCLKST2_N_A),
- PINMUX_IPSR_MSEL(IP0_19_16, AVB_AVTP_MATCH_A, SEL_ETHERAVB_0),
- PINMUX_IPSR_MSEL(IP0_19_16, MSIOF2_RXD_C, SEL_MSIOF2_2),
- PINMUX_IPSR_MSEL(IP0_19_16, CTS4_N_A, SEL_SCIF4_0),
+ PINMUX_IPSR_PHYS_MSEL(IP0_19_16, AVB_AVTP_MATCH_A, I2C_SEL_5_0, SEL_ETHERAVB_0),
+ PINMUX_IPSR_PHYS_MSEL(IP0_19_16, MSIOF2_RXD_C, I2C_SEL_5_0, SEL_MSIOF2_2),
+ PINMUX_IPSR_PHYS_MSEL(IP0_19_16, CTS4_N_A, I2C_SEL_5_0, SEL_SCIF4_0),
+ PINMUX_IPSR_PHYS(IP0_19_16, SCL5, I2C_SEL_5_1),
- PINMUX_IPSR_MSEL(IP0_23_20, AVB_AVTP_CAPTURE_A, SEL_ETHERAVB_0),
- PINMUX_IPSR_MSEL(IP0_23_20, MSIOF2_TXD_C, SEL_MSIOF2_2),
- PINMUX_IPSR_MSEL(IP0_23_20, RTS4_N_A, SEL_SCIF4_0),
+ PINMUX_IPSR_PHYS_MSEL(IP0_23_20, AVB_AVTP_CAPTURE_A, I2C_SEL_5_0, SEL_ETHERAVB_0),
+ PINMUX_IPSR_PHYS_MSEL(IP0_23_20, MSIOF2_TXD_C, I2C_SEL_5_0, SEL_MSIOF2_2),
+ PINMUX_IPSR_PHYS_MSEL(IP0_23_20, RTS4_N_A, I2C_SEL_5_0, SEL_SCIF4_0),
+ PINMUX_IPSR_PHYS(IP0_23_20, SDA5, I2C_SEL_5_1),
PINMUX_IPSR_GPSR(IP0_27_24, IRQ0),
PINMUX_IPSR_GPSR(IP0_27_24, QPOLB),
@@ -678,14 +685,16 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP1_19_16, VI4_DATA6_B, SEL_VIN4_1),
PINMUX_IPSR_MSEL(IP1_19_16, IECLK_B, SEL_IEBUS_1),
- PINMUX_IPSR_MSEL(IP1_23_20, PWM1_A, SEL_PWM1_0),
- PINMUX_IPSR_MSEL(IP1_23_20, HRX3_D, SEL_HSCIF3_3),
- PINMUX_IPSR_MSEL(IP1_23_20, VI4_DATA7_B, SEL_VIN4_1),
- PINMUX_IPSR_MSEL(IP1_23_20, IERX_B, SEL_IEBUS_1),
+ PINMUX_IPSR_PHYS_MSEL(IP1_23_20, PWM1_A, I2C_SEL_3_0, SEL_PWM1_0),
+ PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
+ PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
+ PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
+ PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
- PINMUX_IPSR_MSEL(IP1_27_24, PWM2_A, SEL_PWM2_0),
- PINMUX_IPSR_MSEL(IP1_27_24, HTX3_D, SEL_HSCIF3_3),
- PINMUX_IPSR_MSEL(IP1_27_24, IETX_B, SEL_IEBUS_1),
+ PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
+ PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
+ PINMUX_IPSR_PHYS_MSEL(IP1_27_24, IETX_B, I2C_SEL_3_0, SEL_IEBUS_1),
+ PINMUX_IPSR_PHYS(IP1_27_24, SDA3, I2C_SEL_3_1),
PINMUX_IPSR_GPSR(IP1_31_28, A0),
PINMUX_IPSR_GPSR(IP1_31_28, LCDOUT16),
@@ -1016,35 +1025,35 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP8_15_12, SD1_CMD),
PINMUX_IPSR_MSEL(IP8_15_12, MSIOF1_SYNC_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_15_12, NFCE_N_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_15_12, NFCE_N_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_15_12, SIM0_D_A, SEL_SIMCARD_0),
PINMUX_IPSR_MSEL(IP8_15_12, STP_IVCXO27_1_B, SEL_SSP1_1_1),
PINMUX_IPSR_GPSR(IP8_19_16, SD1_DAT0),
PINMUX_IPSR_GPSR(IP8_19_16, SD2_DAT4),
PINMUX_IPSR_MSEL(IP8_19_16, MSIOF1_RXD_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_19_16, NFWP_N_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_19_16, NFWP_N_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_19_16, TS_SCK1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_19_16, STP_ISCLK_1_B, SEL_SSP1_1_1),
PINMUX_IPSR_GPSR(IP8_23_20, SD1_DAT1),
PINMUX_IPSR_GPSR(IP8_23_20, SD2_DAT5),
PINMUX_IPSR_MSEL(IP8_23_20, MSIOF1_TXD_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_23_20, NFDATA14_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_23_20, NFDATA14_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_23_20, TS_SPSYNC1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_23_20, STP_ISSYNC_1_B, SEL_SSP1_1_1),
PINMUX_IPSR_GPSR(IP8_27_24, SD1_DAT2),
PINMUX_IPSR_GPSR(IP8_27_24, SD2_DAT6),
PINMUX_IPSR_MSEL(IP8_27_24, MSIOF1_SS1_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_27_24, NFDATA15_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_27_24, NFDATA15_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_27_24, TS_SDAT1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_27_24, STP_ISD_1_B, SEL_SSP1_1_1),
PINMUX_IPSR_GPSR(IP8_31_28, SD1_DAT3),
PINMUX_IPSR_GPSR(IP8_31_28, SD2_DAT7),
PINMUX_IPSR_MSEL(IP8_31_28, MSIOF1_SS2_G, SEL_MSIOF1_6),
- PINMUX_IPSR_MSEL(IP8_31_28, NFRB_N_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_31_28, NFRB_N_B, SEL_NDF_1),
PINMUX_IPSR_MSEL(IP8_31_28, TS_SDEN1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_31_28, STP_ISEN_1_B, SEL_SSP1_1_1),
@@ -1111,26 +1120,28 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP11_7_4, NFCLE),
PINMUX_IPSR_GPSR(IP11_11_8, SD0_CD),
- PINMUX_IPSR_MSEL(IP11_11_8, NFDATA14_A, SEL_NDFC_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, NFDATA14_A, SEL_NDF_0),
PINMUX_IPSR_MSEL(IP11_11_8, SCL2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP11_11_8, SIM0_RST_A, SEL_SIMCARD_0),
PINMUX_IPSR_GPSR(IP11_15_12, SD0_WP),
- PINMUX_IPSR_MSEL(IP11_15_12, NFDATA15_A, SEL_NDFC_0),
+ PINMUX_IPSR_MSEL(IP11_15_12, NFDATA15_A, SEL_NDF_0),
PINMUX_IPSR_MSEL(IP11_15_12, SDA2_B, SEL_I2C2_1),
- PINMUX_IPSR_GPSR(IP11_19_16, SD1_CD),
- PINMUX_IPSR_MSEL(IP11_19_16, NFRB_N_A, SEL_NDFC_0),
- PINMUX_IPSR_MSEL(IP11_19_16, SIM0_CLK_B, SEL_SIMCARD_1),
+ PINMUX_IPSR_MSEL(IP11_19_16, SD1_CD, I2C_SEL_0_0),
+ PINMUX_IPSR_PHYS_MSEL(IP11_19_16, NFRB_N_A, I2C_SEL_0_0, SEL_NDF_0),
+ PINMUX_IPSR_PHYS_MSEL(IP11_19_16, SIM0_CLK_B, I2C_SEL_0_0, SEL_SIMCARD_1),
+ PINMUX_IPSR_PHYS(IP11_19_16, SCL0, I2C_SEL_0_1),
- PINMUX_IPSR_GPSR(IP11_23_20, SD1_WP),
- PINMUX_IPSR_MSEL(IP11_23_20, NFCE_N_A, SEL_NDFC_0),
- PINMUX_IPSR_MSEL(IP11_23_20, SIM0_D_B, SEL_SIMCARD_1),
+ PINMUX_IPSR_MSEL(IP11_23_20, SD1_WP, I2C_SEL_0_0),
+ PINMUX_IPSR_PHYS_MSEL(IP11_23_20, NFCE_N_A, I2C_SEL_0_0, SEL_NDF_0),
+ PINMUX_IPSR_PHYS_MSEL(IP11_23_20, SIM0_D_B, I2C_SEL_0_0, SEL_SIMCARD_1),
+ PINMUX_IPSR_PHYS(IP11_23_20, SDA0, I2C_SEL_0_1),
PINMUX_IPSR_GPSR(IP11_27_24, SCK0),
PINMUX_IPSR_MSEL(IP11_27_24, HSCK1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP11_27_24, MSIOF1_SS2_B, SEL_MSIOF1_1),
- PINMUX_IPSR_MSEL(IP11_27_24, AUDIO_CLKC_B, SEL_ADG_C_1),
+ PINMUX_IPSR_MSEL(IP11_27_24, AUDIO_CLKC_B, SEL_ADGC_1),
PINMUX_IPSR_MSEL(IP11_27_24, SDA2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP11_27_24, SIM0_RST_B, SEL_SIMCARD_1),
PINMUX_IPSR_MSEL(IP11_27_24, STP_OPWM_0_C, SEL_SSP1_0_2),
@@ -1163,7 +1174,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP12_11_8, RTS0_N),
PINMUX_IPSR_MSEL(IP12_11_8, HRTS1_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP12_11_8, MSIOF1_SS1_B, SEL_MSIOF1_1),
- PINMUX_IPSR_MSEL(IP12_11_8, AUDIO_CLKA_B, SEL_ADG_A_1),
+ PINMUX_IPSR_MSEL(IP12_11_8, AUDIO_CLKA_B, SEL_ADGA_1),
PINMUX_IPSR_MSEL(IP12_11_8, SCL2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP12_11_8, STP_IVCXO27_1_C, SEL_SSP1_1_2),
PINMUX_IPSR_MSEL(IP12_11_8, RIF0_SYNC_B, SEL_DRIF0_1),
@@ -1222,7 +1233,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP13_11_8, HSCK0),
PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3),
- PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0),
+ PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADGB_0),
PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI1_1),
PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3),
@@ -1268,8 +1279,8 @@ static const u16 pinmux_data[] = {
/* IPSR14 */
PINMUX_IPSR_GPSR(IP14_3_0, MSIOF0_SS1),
PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0),
- PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDFC_0),
- PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2),
+ PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDF_0),
+ PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADGA_2),
PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2),
PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A),
@@ -1278,7 +1289,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP14_7_4, MSIOF0_SS2),
PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3),
- PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0),
+ PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADGC_0),
PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3),
PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D),
@@ -1407,10 +1418,9 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_31_28, SCK5_A, SEL_SCIF5_0),
/* IPSR17 */
- PINMUX_IPSR_MSEL(IP17_3_0, AUDIO_CLKA_A, SEL_ADG_A_0),
- PINMUX_IPSR_GPSR(IP17_3_0, CC5_OSCOUT),
+ PINMUX_IPSR_MSEL(IP17_3_0, AUDIO_CLKA_A, SEL_ADGA_0),
- PINMUX_IPSR_MSEL(IP17_7_4, AUDIO_CLKB_B, SEL_ADG_B_1),
+ PINMUX_IPSR_MSEL(IP17_7_4, AUDIO_CLKB_B, SEL_ADGB_1),
PINMUX_IPSR_MSEL(IP17_7_4, SCIF_CLK_A, SEL_SCIF_0),
PINMUX_IPSR_MSEL(IP17_7_4, STP_IVCXO27_1_D, SEL_SSP1_1_3),
PINMUX_IPSR_MSEL(IP17_7_4, REMOCON_A, SEL_REMOCON_0),
@@ -1494,11 +1504,6 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP18_7_4, FMIN_C, SEL_FM_2),
PINMUX_IPSR_MSEL(IP18_7_4, FMIN_D, SEL_FM_3),
- /* I2C */
- PINMUX_IPSR_NOGP(0, I2C_SEL_0_1),
- PINMUX_IPSR_NOGP(0, I2C_SEL_3_1),
- PINMUX_IPSR_NOGP(0, I2C_SEL_5_1),
-
/*
* Static pins can not be muxed between different functions but
* still need mark entries in the pinmux list. Add each static
@@ -2478,52 +2483,92 @@ static const unsigned int hscif4_data_b_mux[] = {
};
/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+};
+
+static const unsigned int i2c0_mux[] = {
+ SCL0_MARK, SDA0_MARK,
+};
+
static const unsigned int i2c1_a_pins[] = {
/* SDA, SCL */
RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
};
+
static const unsigned int i2c1_a_mux[] = {
SDA1_A_MARK, SCL1_A_MARK,
};
+
static const unsigned int i2c1_b_pins[] = {
/* SDA, SCL */
RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
};
+
static const unsigned int i2c1_b_mux[] = {
SDA1_B_MARK, SCL1_B_MARK,
};
+
static const unsigned int i2c2_a_pins[] = {
/* SDA, SCL */
RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
};
+
static const unsigned int i2c2_a_mux[] = {
SDA2_A_MARK, SCL2_A_MARK,
};
+
static const unsigned int i2c2_b_pins[] = {
/* SDA, SCL */
RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
};
+
static const unsigned int i2c2_b_mux[] = {
SDA2_B_MARK, SCL2_B_MARK,
};
+
+static const unsigned int i2c3_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+
+static const unsigned int i2c3_mux[] = {
+ SCL3_MARK, SDA3_MARK,
+};
+
+static const unsigned int i2c5_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 13), RCAR_GP_PIN(2, 14),
+};
+
+static const unsigned int i2c5_mux[] = {
+ SCL5_MARK, SDA5_MARK,
+};
+
static const unsigned int i2c6_a_pins[] = {
/* SDA, SCL */
RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
};
+
static const unsigned int i2c6_a_mux[] = {
SDA6_A_MARK, SCL6_A_MARK,
};
+
static const unsigned int i2c6_b_pins[] = {
/* SDA, SCL */
RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
};
+
static const unsigned int i2c6_b_mux[] = {
SDA6_B_MARK, SCL6_B_MARK,
};
+
static const unsigned int i2c6_c_pins[] = {
/* SDA, SCL */
RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
};
+
static const unsigned int i2c6_c_mux[] = {
SDA6_C_MARK, SCL6_C_MARK,
};
@@ -4413,10 +4458,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(hscif4_clk),
SH_PFC_PIN_GROUP(hscif4_ctrl),
SH_PFC_PIN_GROUP(hscif4_data_b),
+ SH_PFC_PIN_GROUP(i2c0),
SH_PFC_PIN_GROUP(i2c1_a),
SH_PFC_PIN_GROUP(i2c1_b),
SH_PFC_PIN_GROUP(i2c2_a),
SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c5),
SH_PFC_PIN_GROUP(i2c6_a),
SH_PFC_PIN_GROUP(i2c6_b),
SH_PFC_PIN_GROUP(i2c6_c),
@@ -4807,6 +4855,10 @@ static const char * const hscif4_groups[] = {
"hscif4_data_b",
};
+static const char * const i2c0_groups[] = {
+ "i2c0",
+};
+
static const char * const i2c1_groups[] = {
"i2c1_a",
"i2c1_b",
@@ -4817,6 +4869,14 @@ static const char * const i2c2_groups[] = {
"i2c2_b",
};
+static const char * const i2c3_groups[] = {
+ "i2c3",
+};
+
+static const char * const i2c5_groups[] = {
+ "i2c5",
+};
+
static const char * const i2c6_groups[] = {
"i2c6_a",
"i2c6_b",
@@ -5166,8 +5226,11 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(hscif2),
SH_PFC_FUNCTION(hscif3),
SH_PFC_FUNCTION(hscif4),
+ SH_PFC_FUNCTION(i2c0),
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c5),
SH_PFC_FUNCTION(i2c6),
SH_PFC_FUNCTION(intc_ex),
SH_PFC_FUNCTION(msiof0),
@@ -5205,7 +5268,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5237,9 +5300,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, GPSR0_3,
GP_0_2_FN, GPSR0_2,
GP_0_1_FN, GPSR0_1,
- GP_0_0_FN, GPSR0_0, }
+ GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5271,9 +5334,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, GPSR1_3,
GP_1_2_FN, GPSR1_2,
GP_1_1_FN, GPSR1_1,
- GP_1_0_FN, GPSR1_0, }
+ GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5305,9 +5368,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, GPSR2_3,
GP_2_2_FN, GPSR2_2,
GP_2_1_FN, GPSR2_1,
- GP_2_0_FN, GPSR2_0, }
+ GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5339,9 +5402,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, GPSR3_3,
GP_3_2_FN, GPSR3_2,
GP_3_1_FN, GPSR3_1,
- GP_3_0_FN, GPSR3_0, }
+ GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5373,9 +5436,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, GPSR4_3,
GP_4_2_FN, GPSR4_2,
GP_4_1_FN, GPSR4_1,
- GP_4_0_FN, GPSR4_0, }
+ GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5407,9 +5470,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, GPSR5_3,
GP_5_2_FN, GPSR5_2,
GP_5_1_FN, GPSR5_1,
- GP_5_0_FN, GPSR5_0, }
+ GP_5_0_FN, GPSR5_0, ))
},
- { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
GP_6_31_FN, GPSR6_31,
GP_6_30_FN, GPSR6_30,
GP_6_29_FN, GPSR6_29,
@@ -5441,9 +5504,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, GPSR6_3,
GP_6_2_FN, GPSR6_2,
GP_6_1_FN, GPSR6_1,
- GP_6_0_FN, GPSR6_0, }
+ GP_6_0_FN, GPSR6_0, ))
},
- { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -5475,14 +5538,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_7_3_FN, GPSR7_3,
GP_7_2_FN, GPSR7_2,
GP_7_1_FN, GPSR7_1,
- GP_7_0_FN, GPSR7_0, }
+ GP_7_0_FN, GPSR7_0, ))
},
#undef F_
#undef FM
#define F_(x, y) x,
#define FM(x) FN_##x,
- { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
IP0_31_28
IP0_27_24
IP0_23_20
@@ -5490,9 +5553,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP0_15_12
IP0_11_8
IP0_7_4
- IP0_3_0 }
+ IP0_3_0 ))
},
- { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
IP1_31_28
IP1_27_24
IP1_23_20
@@ -5500,9 +5563,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1_15_12
IP1_11_8
IP1_7_4
- IP1_3_0 }
+ IP1_3_0 ))
},
- { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
IP2_31_28
IP2_27_24
IP2_23_20
@@ -5510,9 +5573,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2_15_12
IP2_11_8
IP2_7_4
- IP2_3_0 }
+ IP2_3_0 ))
},
- { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
IP3_31_28
IP3_27_24
IP3_23_20
@@ -5520,9 +5583,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP3_15_12
IP3_11_8
IP3_7_4
- IP3_3_0 }
+ IP3_3_0 ))
},
- { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
IP4_31_28
IP4_27_24
IP4_23_20
@@ -5530,9 +5593,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP4_15_12
IP4_11_8
IP4_7_4
- IP4_3_0 }
+ IP4_3_0 ))
},
- { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
IP5_31_28
IP5_27_24
IP5_23_20
@@ -5540,9 +5603,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP5_15_12
IP5_11_8
IP5_7_4
- IP5_3_0 }
+ IP5_3_0 ))
},
- { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
IP6_31_28
IP6_27_24
IP6_23_20
@@ -5550,9 +5613,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_15_12
IP6_11_8
IP6_7_4
- IP6_3_0 }
+ IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
@@ -5560,9 +5623,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IP7_11_8
IP7_7_4
- IP7_3_0 }
+ IP7_3_0 ))
},
- { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
IP8_31_28
IP8_27_24
IP8_23_20
@@ -5570,9 +5633,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP8_15_12
IP8_11_8
IP8_7_4
- IP8_3_0 }
+ IP8_3_0 ))
},
- { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
IP9_31_28
IP9_27_24
IP9_23_20
@@ -5580,9 +5643,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP9_15_12
IP9_11_8
IP9_7_4
- IP9_3_0 }
+ IP9_3_0 ))
},
- { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
IP10_31_28
IP10_27_24
IP10_23_20
@@ -5590,9 +5653,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP10_15_12
IP10_11_8
IP10_7_4
- IP10_3_0 }
+ IP10_3_0 ))
},
- { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
IP11_31_28
IP11_27_24
IP11_23_20
@@ -5600,9 +5663,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP11_15_12
IP11_11_8
IP11_7_4
- IP11_3_0 }
+ IP11_3_0 ))
},
- { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+ { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
IP12_31_28
IP12_27_24
IP12_23_20
@@ -5610,9 +5673,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP12_15_12
IP12_11_8
IP12_7_4
- IP12_3_0 }
+ IP12_3_0 ))
},
- { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+ { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
IP13_31_28
IP13_27_24
IP13_23_20
@@ -5620,9 +5683,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP13_15_12
IP13_11_8
IP13_7_4
- IP13_3_0 }
+ IP13_3_0 ))
},
- { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
+ { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4, GROUP(
IP14_31_28
IP14_27_24
IP14_23_20
@@ -5630,9 +5693,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP14_15_12
IP14_11_8
IP14_7_4
- IP14_3_0 }
+ IP14_3_0 ))
},
- { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4, GROUP(
IP15_31_28
IP15_27_24
IP15_23_20
@@ -5640,9 +5703,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP15_15_12
IP15_11_8
IP15_7_4
- IP15_3_0 }
+ IP15_3_0 ))
},
- { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4) {
+ { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4, GROUP(
IP16_31_28
IP16_27_24
IP16_23_20
@@ -5650,9 +5713,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP16_15_12
IP16_11_8
IP16_7_4
- IP16_3_0 }
+ IP16_3_0 ))
},
- { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4) {
+ { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP(
IP17_31_28
IP17_27_24
IP17_23_20
@@ -5660,9 +5723,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP17_15_12
IP17_11_8
IP17_7_4
- IP17_3_0 }
+ IP17_3_0 ))
},
- { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4) {
+ { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP(
/* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -5670,7 +5733,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IP18_7_4
- IP18_3_0 }
+ IP18_3_0 ))
},
#undef F_
#undef FM
@@ -5678,8 +5741,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- 3, 2, 3, 1, 1, 1, 1, 1, 2, 1,
- 1, 2, 1, 1, 1, 2, 2, 1, 2, 3) {
+ GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2,
+ 1, 1, 1, 2, 2, 1, 2, 3),
+ GROUP(
MOD_SEL0_31_30_29
MOD_SEL0_28_27
MOD_SEL0_26_25_24
@@ -5700,11 +5764,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_5
MOD_SEL0_4_3
/* RESERVED 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- 2, 3, 1, 2, 3, 1, 1, 2, 1,
- 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
+ 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
MOD_SEL1_31_30
MOD_SEL1_29_28_27
MOD_SEL1_26
@@ -5727,11 +5792,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_3
MOD_SEL1_2
MOD_SEL1_1
- MOD_SEL1_0 }
+ MOD_SEL1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
- 1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1, 1,
- 4, 4, 4, 3, 1) {
+ GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1,
+ 1, 4, 4, 4, 3, 1),
+ GROUP(
MOD_SEL2_31
MOD_SEL2_30
MOD_SEL2_29
@@ -5757,7 +5823,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* RESERVED 3, 2, 1 */
0, 0, 0, 0, 0, 0, 0, 0,
- MOD_SEL2_0 }
+ MOD_SEL2_0 ))
},
{ },
};
@@ -5878,7 +5944,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */
{ RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */
{ RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */
- { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */
+ { RCAR_GP_PIN(7, 2), 12, 3 }, /* GP7_02 */
{ RCAR_GP_PIN(7, 3), 8, 3 }, /* GP7_03 */
{ PIN_A_NUMBER('P', 7), 4, 2 }, /* DU_DOTCLKIN0 */
{ PIN_A_NUMBER('P', 8), 0, 2 }, /* DU_DOTCLKIN1 */
@@ -6012,10 +6078,12 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
enum ioctrl_regs {
POCCTRL,
+ TDSELCTRL,
};
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
[POCCTRL] = { 0xe6060380, },
+ [TDSELCTRL] = { 0xe60603c0, },
{ /* sentinel */ },
};
@@ -6132,7 +6200,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[25] = RCAR_GP_PIN(0, 15), /* D15 */
[26] = RCAR_GP_PIN(7, 0), /* AVS1 */
[27] = RCAR_GP_PIN(7, 1), /* AVS2 */
- [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [28] = RCAR_GP_PIN(7, 2), /* GP7_02 */
[29] = RCAR_GP_PIN(7, 3), /* GP7_03 */
[30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
[31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
index c5e67ba29f7c..2d76b548b942 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
@@ -12,6 +12,7 @@
* Copyright (C) 2015 Renesas Electronics Corporation
*/
+#include <linux/errno.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -171,19 +172,19 @@
#define IP2_15_12 FM(DU_EXHSYNC_DU_HSYNC) FM(HRX0) F_(0, 0) FM(A19) FM(IRQ3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_19_16 FM(DU_EXVSYNC_DU_VSYNC) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_23_20 FM(DU_EXODDF_DU_ODDF_DISP_CDE) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_27_24 FM(IRQ0) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24 FM(IRQ0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_31_28 FM(VI0_CLK) FM(MSIOF2_SCK) FM(SCK3) F_(0, 0) FM(HSCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_3_0 FM(VI0_CLKENB) FM(MSIOF2_RXD) FM(RX3) FM(RD_WR_N) FM(HCTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_7_4 FM(VI0_HSYNC_N) FM(MSIOF2_TXD) FM(TX3) F_(0, 0) FM(HRTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_11_8 FM(VI0_VSYNC_N) FM(MSIOF2_SYNC) FM(CTS3_N) F_(0, 0) FM(HTX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_15_12 FM(VI0_DATA0) FM(MSIOF2_SS1) FM(RTS3_N_TANS) F_(0, 0) FM(HRX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_15_12 FM(VI0_DATA0) FM(MSIOF2_SS1) FM(RTS3_N) F_(0, 0) FM(HRX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_19_16 FM(VI0_DATA1) FM(MSIOF2_SS2) FM(SCK1) F_(0, 0) FM(SPEEDIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_23_20 FM(VI0_DATA2) FM(AVB0_AVTP_PPS) FM(SDA3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_27_24 FM(VI0_DATA3) FM(HSCK1) FM(SCL3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_31_28 FM(VI0_DATA4) FM(HRTS1_N) FM(RX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_3_0 FM(VI0_DATA5) FM(HCTS1_N) FM(TX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_7_4 FM(VI0_DATA6) FM(HTX1) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_11_8 FM(VI0_DATA7) FM(HRX1) FM(RTS1_N_TANS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_11_8 FM(VI0_DATA7) FM(HRX1) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_15_12 FM(VI0_DATA8) FM(HSCK2) FM(PWM0_A) FM(A22) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_19_16 FM(VI0_DATA9) FM(HCTS2_N) FM(PWM1_A) FM(A23) FM(FSO_CFE_0_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_23_20 FM(VI0_DATA10) FM(HRTS2_N) FM(PWM2_A) FM(A24) FM(FSO_CFE_1_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -198,18 +199,18 @@
#define IP5_27_24 FM(VI1_DATA2) FM(CANFD0_TX_B) F_(0, 0) FM(D5) FM(MMC_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP5_31_28 FM(VI1_DATA3) FM(CANFD0_RX_B) F_(0, 0) FM(D6) FM(MMC_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP6_3_0 FM(VI1_DATA4) FM(CANFD_CLK_B) F_(0, 0) FM(D7) FM(MMC_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_7_4 FM(VI1_DATA5) F_(0,0) FM(SCK4) FM(D8) FM(MMC_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_11_8 FM(VI1_DATA6) F_(0,0) FM(RX4) FM(D9) FM(MMC_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_15_12 FM(VI1_DATA7) F_(0,0) FM(TX4) FM(D10) FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_19_16 FM(VI1_DATA8) F_(0,0) FM(CTS4_N) FM(D11) FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_23_20 FM(VI1_DATA9) F_(0,0) FM(RTS4_N_TANS) FM(D12) FM(MMC_D6) FM(SCL3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_27_24 FM(VI1_DATA10) F_(0,0) F_(0, 0) FM(D13) FM(MMC_D7) FM(SDA3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_7_4 FM(VI1_DATA5) F_(0, 0) FM(SCK4) FM(D8) FM(MMC_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_11_8 FM(VI1_DATA6) F_(0, 0) FM(RX4) FM(D9) FM(MMC_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_15_12 FM(VI1_DATA7) F_(0, 0) FM(TX4) FM(D10) FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_19_16 FM(VI1_DATA8) F_(0, 0) FM(CTS4_N) FM(D11) FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_23_20 FM(VI1_DATA9) F_(0, 0) FM(RTS4_N) FM(D12) FM(MMC_D6) FM(SCL3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_27_24 FM(VI1_DATA10) F_(0, 0) F_(0, 0) FM(D13) FM(MMC_D7) FM(SDA3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP6_31_28 FM(VI1_DATA11) FM(SCL4) FM(IRQ4) FM(D14) FM(MMC_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_3_0 FM(VI1_FIELD) FM(SDA4) FM(IRQ5) FM(D15) FM(MMC_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_7_4 FM(SCL0) FM(DU_DR0) FM(TPU0TO0) FM(CLKOUT) F_(0, 0) FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_11_8 FM(SDA0) FM(DU_DR1) FM(TPU0TO1) FM(BS_N) FM(SCK0) FM(MSIOF0_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_15_12 FM(SCL1) FM(DU_DG0) FM(TPU0TO2) FM(RD_N) FM(CTS0_N) FM(MSIOF0_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_19_16 FM(SDA1) FM(DU_DG1) FM(TPU0TO3) FM(WE0_N) FM(RTS0_N_TANS) FM(MSIOF0_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_19_16 FM(SDA1) FM(DU_DG1) FM(TPU0TO3) FM(WE0_N) FM(RTS0_N) FM(MSIOF0_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_23_20 FM(SCL2) FM(DU_DB0) FM(TCLK1_A) FM(WE1_N) FM(RX0) FM(MSIOF0_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_27_24 FM(SDA2) FM(DU_DB1) FM(TCLK2_A) FM(EX_WAIT0) FM(TX0) FM(MSIOF0_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_31_28 FM(AVB0_AVTP_CAPTURE) F_(0, 0) F_(0, 0) F_(0, 0) FM(FSCLKST2_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -470,7 +471,6 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_23_20, MSIOF3_SYNC),
PINMUX_IPSR_GPSR(IP2_27_24, IRQ0),
- PINMUX_IPSR_GPSR(IP2_27_24, CC5_OSCOUT),
PINMUX_IPSR_GPSR(IP2_31_28, VI0_CLK),
PINMUX_IPSR_GPSR(IP2_31_28, MSIOF2_SCK),
@@ -496,7 +496,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP3_15_12, VI0_DATA0),
PINMUX_IPSR_GPSR(IP3_15_12, MSIOF2_SS1),
- PINMUX_IPSR_GPSR(IP3_15_12, RTS3_N_TANS),
+ PINMUX_IPSR_GPSR(IP3_15_12, RTS3_N),
PINMUX_IPSR_GPSR(IP3_15_12, HRX3),
PINMUX_IPSR_GPSR(IP3_19_16, VI0_DATA1),
@@ -527,7 +527,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP4_11_8, VI0_DATA7),
PINMUX_IPSR_GPSR(IP4_11_8, HRX1),
- PINMUX_IPSR_GPSR(IP4_11_8, RTS1_N_TANS),
+ PINMUX_IPSR_GPSR(IP4_11_8, RTS1_N),
PINMUX_IPSR_GPSR(IP4_15_12, VI0_DATA8),
PINMUX_IPSR_GPSR(IP4_15_12, HSCK2),
@@ -617,7 +617,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP6_19_16, MMC_D5),
PINMUX_IPSR_GPSR(IP6_23_20, VI1_DATA9),
- PINMUX_IPSR_GPSR(IP6_23_20, RTS4_N_TANS),
+ PINMUX_IPSR_GPSR(IP6_23_20, RTS4_N),
PINMUX_IPSR_GPSR(IP6_23_20, D12),
PINMUX_IPSR_GPSR(IP6_23_20, MMC_D6),
PINMUX_IPSR_MSEL(IP6_23_20, SCL3_B, SEL_I2C3_1),
@@ -664,7 +664,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP7_19_16, DU_DG1),
PINMUX_IPSR_GPSR(IP7_19_16, TPU0TO3),
PINMUX_IPSR_GPSR(IP7_19_16, WE0_N),
- PINMUX_IPSR_GPSR(IP7_19_16, RTS0_N_TANS),
+ PINMUX_IPSR_GPSR(IP7_19_16, RTS0_N),
PINMUX_IPSR_GPSR(IP7_19_16, MSIOF0_SYNC),
PINMUX_IPSR_GPSR(IP7_23_20, SCL2),
@@ -1468,7 +1468,7 @@ static const unsigned int scif0_ctrl_pins[] = {
RCAR_GP_PIN(4, 3), RCAR_GP_PIN(4, 2),
};
static const unsigned int scif0_ctrl_mux[] = {
- RTS0_N_TANS_MARK, CTS0_N_MARK,
+ RTS0_N_MARK, CTS0_N_MARK,
};
/* - SCIF1 ------------------------------------------------------------------ */
@@ -1491,7 +1491,7 @@ static const unsigned int scif1_ctrl_pins[] = {
RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 10),
};
static const unsigned int scif1_ctrl_mux[] = {
- RTS1_N_TANS_MARK, CTS1_N_MARK,
+ RTS1_N_MARK, CTS1_N_MARK,
};
static const unsigned int scif1_data_b_pins[] = {
/* RX, TX */
@@ -1521,7 +1521,7 @@ static const unsigned int scif3_ctrl_pins[] = {
RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 3),
};
static const unsigned int scif3_ctrl_mux[] = {
- RTS3_N_TANS_MARK, CTS3_N_MARK,
+ RTS3_N_MARK, CTS3_N_MARK,
};
/* - SCIF4 ------------------------------------------------------------------ */
@@ -1544,7 +1544,7 @@ static const unsigned int scif4_ctrl_pins[] = {
RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
};
static const unsigned int scif4_ctrl_mux[] = {
- RTS4_N_TANS_MARK, CTS4_N_MARK,
+ RTS4_N_MARK, CTS4_N_MARK,
};
/* - TMU -------------------------------------------------------------------- */
@@ -2072,7 +2072,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2104,9 +2104,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, GPSR0_3,
GP_0_2_FN, GPSR0_2,
GP_0_1_FN, GPSR0_1,
- GP_0_0_FN, GPSR0_0, }
+ GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2138,9 +2138,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, GPSR1_3,
GP_1_2_FN, GPSR1_2,
GP_1_1_FN, GPSR1_1,
- GP_1_0_FN, GPSR1_0, }
+ GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2172,9 +2172,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, GPSR2_3,
GP_2_2_FN, GPSR2_2,
GP_2_1_FN, GPSR2_1,
- GP_2_0_FN, GPSR2_0, }
+ GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2206,9 +2206,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, GPSR3_3,
GP_3_2_FN, GPSR3_2,
GP_3_1_FN, GPSR3_1,
- GP_3_0_FN, GPSR3_0, }
+ GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2240,9 +2240,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, GPSR4_3,
GP_4_2_FN, GPSR4_2,
GP_4_1_FN, GPSR4_1,
- GP_4_0_FN, GPSR4_0, }
+ GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2274,14 +2274,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, GPSR5_3,
GP_5_2_FN, GPSR5_2,
GP_5_1_FN, GPSR5_1,
- GP_5_0_FN, GPSR5_0, }
+ GP_5_0_FN, GPSR5_0, ))
},
#undef F_
#undef FM
#define F_(x, y) x,
#define FM(x) FN_##x,
- { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
IP0_31_28
IP0_27_24
IP0_23_20
@@ -2289,9 +2289,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP0_15_12
IP0_11_8
IP0_7_4
- IP0_3_0 }
+ IP0_3_0 ))
},
- { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
IP1_31_28
IP1_27_24
IP1_23_20
@@ -2299,9 +2299,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1_15_12
IP1_11_8
IP1_7_4
- IP1_3_0 }
+ IP1_3_0 ))
},
- { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
IP2_31_28
IP2_27_24
IP2_23_20
@@ -2309,9 +2309,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2_15_12
IP2_11_8
IP2_7_4
- IP2_3_0 }
+ IP2_3_0 ))
},
- { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
IP3_31_28
IP3_27_24
IP3_23_20
@@ -2319,9 +2319,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP3_15_12
IP3_11_8
IP3_7_4
- IP3_3_0 }
+ IP3_3_0 ))
},
- { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
IP4_31_28
IP4_27_24
IP4_23_20
@@ -2329,9 +2329,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP4_15_12
IP4_11_8
IP4_7_4
- IP4_3_0 }
+ IP4_3_0 ))
},
- { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
IP5_31_28
IP5_27_24
IP5_23_20
@@ -2339,9 +2339,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP5_15_12
IP5_11_8
IP5_7_4
- IP5_3_0 }
+ IP5_3_0 ))
},
- { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
IP6_31_28
IP6_27_24
IP6_23_20
@@ -2349,9 +2349,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_15_12
IP6_11_8
IP6_7_4
- IP6_3_0 }
+ IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
@@ -2359,9 +2359,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP7_15_12
IP7_11_8
IP7_7_4
- IP7_3_0 }
+ IP7_3_0 ))
},
- { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
IP8_31_28
IP8_27_24
IP8_23_20
@@ -2369,7 +2369,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP8_15_12
IP8_11_8
IP8_7_4
- IP8_3_0 }
+ IP8_3_0 ))
},
#undef F_
#undef FM
@@ -2377,8 +2377,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- 4, 4, 4, 4, 4,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(4, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
/* RESERVED 31, 30, 29, 28 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* RESERVED 27, 26, 25, 24 */
@@ -2400,21 +2401,23 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_3
MOD_SEL0_2
MOD_SEL0_1
- MOD_SEL0_0 }
+ MOD_SEL0_0 ))
},
{ },
};
enum ioctrl_regs {
- IOCTRL30,
- IOCTRL31,
- IOCTRL32,
+ POCCTRL0,
+ POCCTRL1,
+ POCCTRL2,
+ TDSELCTRL,
};
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
- [IOCTRL30] = { 0xe6060380 },
- [IOCTRL31] = { 0xe6060384 },
- [IOCTRL32] = { 0xe6060388 },
+ [POCCTRL0] = { 0xe6060380 },
+ [POCCTRL1] = { 0xe6060384 },
+ [POCCTRL2] = { 0xe6060388 },
+ [TDSELCTRL] = { 0xe60603c0, },
{ /* sentinel */ },
};
@@ -2423,13 +2426,13 @@ static int r8a77970_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
{
int bit = pin & 0x1f;
- *pocctrl = pinmux_ioctrl_regs[IOCTRL30].reg;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL0].reg;
if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 21))
return bit;
if (pin >= RCAR_GP_PIN(2, 0) && pin <= RCAR_GP_PIN(2, 9))
return bit + 22;
- *pocctrl = pinmux_ioctrl_regs[IOCTRL31].reg;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL1].reg;
if (pin >= RCAR_GP_PIN(2, 10) && pin <= RCAR_GP_PIN(2, 16))
return bit - 10;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 16))
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
index b807b67ae143..473da65890a7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
@@ -12,6 +12,7 @@
* Copyright (C) 2015 Renesas Electronics Corporation
*/
+#include <linux/errno.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -186,7 +187,7 @@
#define IP0_7_4 FM(DU_DR3) FM(RX4) FM(GETHER_RMII_RX_ER) FM(A1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_11_8 FM(DU_DR4) FM(TX4) FM(GETHER_RMII_RXD0) FM(A2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_15_12 FM(DU_DR5) FM(CTS4_N) FM(GETHER_RMII_RXD1) FM(A3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_19_16 FM(DU_DR6) FM(RTS4_N_TANS) FM(GETHER_RMII_TXD_EN) FM(A4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_19_16 FM(DU_DR6) FM(RTS4_N) FM(GETHER_RMII_TXD_EN) FM(A4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_23_20 FM(DU_DR7) F_(0, 0) FM(GETHER_RMII_TXD0) FM(A5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_27_24 FM(DU_DG2) F_(0, 0) FM(GETHER_RMII_TXD1) FM(A6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_31_28 FM(DU_DG3) FM(CPG_CPCKOUT) FM(GETHER_RMII_REFCLK) FM(A7) FM(PWMFSW0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -204,19 +205,19 @@
#define IP2_15_12 FM(DU_EXHSYNC_DU_HSYNC) FM(MSIOF3_SS2) FM(GETHER_PHY_INT_B) FM(A19) FM(FXR_TXENA_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_19_16 FM(DU_EXVSYNC_DU_VSYNC) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_23_20 FM(DU_EXODDF_DU_ODDF_DISP_CDE) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_27_24 FM(IRQ0) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24 FM(IRQ0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_31_28 FM(VI0_CLK) FM(MSIOF2_SCK) FM(SCK3) F_(0, 0) FM(HSCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_3_0 FM(VI0_CLKENB) FM(MSIOF2_RXD) FM(RX3) FM(RD_WR_N) FM(HCTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_7_4 FM(VI0_HSYNC_N) FM(MSIOF2_TXD) FM(TX3) F_(0, 0) FM(HRTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_11_8 FM(VI0_VSYNC_N) FM(MSIOF2_SYNC) FM(CTS3_N) F_(0, 0) FM(HTX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_15_12 FM(VI0_DATA0) FM(MSIOF2_SS1) FM(RTS3_N_TANS) F_(0, 0) FM(HRX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_15_12 FM(VI0_DATA0) FM(MSIOF2_SS1) FM(RTS3_N) F_(0, 0) FM(HRX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_19_16 FM(VI0_DATA1) FM(MSIOF2_SS2) FM(SCK1) F_(0, 0) FM(SPEEDIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_23_20 FM(VI0_DATA2) FM(AVB_AVTP_PPS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_27_24 FM(VI0_DATA3) FM(HSCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_31_28 FM(VI0_DATA4) FM(HRTS1_N) FM(RX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_3_0 FM(VI0_DATA5) FM(HCTS1_N) FM(TX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_7_4 FM(VI0_DATA6) FM(HTX1) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_11_8 FM(VI0_DATA7) FM(HRX1) FM(RTS1_N_TANS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_11_8 FM(VI0_DATA7) FM(HRX1) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_15_12 FM(VI0_DATA8) FM(HSCK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_19_16 FM(VI0_DATA9) FM(HCTS2_N) FM(PWM1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP4_23_20 FM(VI0_DATA10) FM(HRTS2_N) FM(PWM2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -242,7 +243,7 @@
#define IP7_7_4 FM(SCL0) F_(0, 0) F_(0, 0) FM(CLKOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_11_8 FM(SDA0) F_(0, 0) F_(0, 0) FM(BS_N) FM(SCK0) FM(HSCK0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_15_12 FM(SCL1) F_(0, 0) FM(TPU0TO2) FM(RD_N) FM(CTS0_N) FM(HCTS0_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_19_16 FM(SDA1) F_(0, 0) FM(TPU0TO3) FM(WE0_N) FM(RTS0_N_TANS) FM(HRTS0_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_19_16 FM(SDA1) F_(0, 0) FM(TPU0TO3) FM(WE0_N) FM(RTS0_N) FM(HRTS0_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_23_20 FM(SCL2) F_(0, 0) F_(0, 0) FM(WE1_N) FM(RX0) FM(HRX0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_27_24 FM(SDA2) F_(0, 0) F_(0, 0) FM(EX_WAIT0) FM(TX0) FM(HTX0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP7_31_28 FM(AVB_AVTP_MATCH) FM(TPU0TO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -469,7 +470,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP0_15_12, A3),
PINMUX_IPSR_GPSR(IP0_19_16, DU_DR6),
- PINMUX_IPSR_GPSR(IP0_19_16, RTS4_N_TANS),
+ PINMUX_IPSR_GPSR(IP0_19_16, RTS4_N),
PINMUX_IPSR_GPSR(IP0_19_16, GETHER_RMII_TXD_EN),
PINMUX_IPSR_GPSR(IP0_19_16, A4),
@@ -554,7 +555,6 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_23_20, MSIOF3_SYNC),
PINMUX_IPSR_GPSR(IP2_27_24, IRQ0),
- PINMUX_IPSR_GPSR(IP2_27_24, CC5_OSCOUT),
PINMUX_IPSR_GPSR(IP2_31_28, VI0_CLK),
PINMUX_IPSR_GPSR(IP2_31_28, MSIOF2_SCK),
@@ -580,7 +580,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP3_15_12, VI0_DATA0),
PINMUX_IPSR_GPSR(IP3_15_12, MSIOF2_SS1),
- PINMUX_IPSR_GPSR(IP3_15_12, RTS3_N_TANS),
+ PINMUX_IPSR_GPSR(IP3_15_12, RTS3_N),
PINMUX_IPSR_GPSR(IP3_15_12, HRX3),
PINMUX_IPSR_GPSR(IP3_19_16, VI0_DATA1),
@@ -609,7 +609,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP4_11_8, VI0_DATA7),
PINMUX_IPSR_GPSR(IP4_11_8, HRX1),
- PINMUX_IPSR_GPSR(IP4_11_8, RTS1_N_TANS),
+ PINMUX_IPSR_GPSR(IP4_11_8, RTS1_N),
PINMUX_IPSR_GPSR(IP4_15_12, VI0_DATA8),
PINMUX_IPSR_GPSR(IP4_15_12, HSCK2),
@@ -728,7 +728,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP7_19_16, SDA1),
PINMUX_IPSR_GPSR(IP7_19_16, TPU0TO3),
PINMUX_IPSR_GPSR(IP7_19_16, WE0_N),
- PINMUX_IPSR_GPSR(IP7_19_16, RTS0_N_TANS),
+ PINMUX_IPSR_GPSR(IP7_19_16, RTS0_N),
PINMUX_IPSR_MSEL(IP1_23_20, HRTS0_N_B, SEL_HSCIF0_1),
PINMUX_IPSR_GPSR(IP7_23_20, SCL2),
@@ -1726,11 +1726,11 @@ static const unsigned int scif0_clk_mux[] = {
SCK0_MARK,
};
static const unsigned int scif0_ctrl_pins[] = {
- /* RTS0#/TANS, CTS0# */
+ /* RTS0#, CTS0# */
RCAR_GP_PIN(4, 3), RCAR_GP_PIN(4, 2),
};
static const unsigned int scif0_ctrl_mux[] = {
- RTS0_N_TANS_MARK, CTS0_N_MARK,
+ RTS0_N_MARK, CTS0_N_MARK,
};
/* - SCIF1 ------------------------------------------------------------------ */
@@ -1749,11 +1749,11 @@ static const unsigned int scif1_clk_mux[] = {
SCK1_MARK,
};
static const unsigned int scif1_ctrl_pins[] = {
- /* RTS1#/TANS, CTS1# */
+ /* RTS1#, CTS1# */
RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 10),
};
static const unsigned int scif1_ctrl_mux[] = {
- RTS1_N_TANS_MARK, CTS1_N_MARK,
+ RTS1_N_MARK, CTS1_N_MARK,
};
static const unsigned int scif1_data_b_pins[] = {
/* RX1, TX1 */
@@ -1779,11 +1779,11 @@ static const unsigned int scif3_clk_mux[] = {
SCK3_MARK,
};
static const unsigned int scif3_ctrl_pins[] = {
- /* RTS3#/TANS, CTS3# */
+ /* RTS3#, CTS3# */
RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 3),
};
static const unsigned int scif3_ctrl_mux[] = {
- RTS3_N_TANS_MARK, CTS3_N_MARK,
+ RTS3_N_MARK, CTS3_N_MARK,
};
/* - SCIF4 ------------------------------------------------------------------ */
@@ -1802,11 +1802,11 @@ static const unsigned int scif4_clk_mux[] = {
SCK4_MARK,
};
static const unsigned int scif4_ctrl_pins[] = {
- /* RTS4#/TANS, CTS4# */
+ /* RTS4#, CTS4# */
RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 3),
};
static const unsigned int scif4_ctrl_mux[] = {
- RTS4_N_TANS_MARK, CTS4_N_MARK,
+ RTS4_N_MARK, CTS4_N_MARK,
};
/* - SCIF Clock ------------------------------------------------------------- */
@@ -2474,7 +2474,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2506,9 +2506,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, GPSR0_3,
GP_0_2_FN, GPSR0_2,
GP_0_1_FN, GPSR0_1,
- GP_0_0_FN, GPSR0_0, }
+ GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2540,9 +2540,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, GPSR1_3,
GP_1_2_FN, GPSR1_2,
GP_1_1_FN, GPSR1_1,
- GP_1_0_FN, GPSR1_0, }
+ GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
0, 0,
0, 0,
GP_2_29_FN, GPSR2_29,
@@ -2574,9 +2574,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, GPSR2_3,
GP_2_2_FN, GPSR2_2,
GP_2_1_FN, GPSR2_1,
- GP_2_0_FN, GPSR2_0, }
+ GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2608,9 +2608,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, GPSR3_3,
GP_3_2_FN, GPSR3_2,
GP_3_1_FN, GPSR3_1,
- GP_3_0_FN, GPSR3_0, }
+ GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2642,9 +2642,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, GPSR4_3,
GP_4_2_FN, GPSR4_2,
GP_4_1_FN, GPSR4_1,
- GP_4_0_FN, GPSR4_0, }
+ GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2676,14 +2676,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, GPSR5_3,
GP_5_2_FN, GPSR5_2,
GP_5_1_FN, GPSR5_1,
- GP_5_0_FN, GPSR5_0, }
+ GP_5_0_FN, GPSR5_0, ))
},
#undef F_
#undef FM
#define F_(x, y) x,
#define FM(x) FN_##x,
- { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
IP0_31_28
IP0_27_24
IP0_23_20
@@ -2691,9 +2691,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP0_15_12
IP0_11_8
IP0_7_4
- IP0_3_0 }
+ IP0_3_0 ))
},
- { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
IP1_31_28
IP1_27_24
IP1_23_20
@@ -2701,9 +2701,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1_15_12
IP1_11_8
IP1_7_4
- IP1_3_0 }
+ IP1_3_0 ))
},
- { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
IP2_31_28
IP2_27_24
IP2_23_20
@@ -2711,9 +2711,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2_15_12
IP2_11_8
IP2_7_4
- IP2_3_0 }
+ IP2_3_0 ))
},
- { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
IP3_31_28
IP3_27_24
IP3_23_20
@@ -2721,9 +2721,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP3_15_12
IP3_11_8
IP3_7_4
- IP3_3_0 }
+ IP3_3_0 ))
},
- { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
IP4_31_28
IP4_27_24
IP4_23_20
@@ -2731,9 +2731,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP4_15_12
IP4_11_8
IP4_7_4
- IP4_3_0 }
+ IP4_3_0 ))
},
- { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
IP5_31_28
IP5_27_24
IP5_23_20
@@ -2741,9 +2741,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP5_15_12
IP5_11_8
IP5_7_4
- IP5_3_0 }
+ IP5_3_0 ))
},
- { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
IP6_31_28
IP6_27_24
IP6_23_20
@@ -2751,9 +2751,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_15_12
IP6_11_8
IP6_7_4
- IP6_3_0 }
+ IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
@@ -2761,9 +2761,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP7_15_12
IP7_11_8
IP7_7_4
- IP7_3_0 }
+ IP7_3_0 ))
},
- { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
IP8_31_28
IP8_27_24
IP8_23_20
@@ -2771,9 +2771,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP8_15_12
IP8_11_8
IP8_7_4
- IP8_3_0 }
+ IP8_3_0 ))
},
- { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
IP9_31_28
IP9_27_24
IP9_23_20
@@ -2781,9 +2781,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP9_15_12
IP9_11_8
IP9_7_4
- IP9_3_0 }
+ IP9_3_0 ))
},
- { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
IP10_31_28
IP10_27_24
IP10_23_20
@@ -2791,7 +2791,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP10_15_12
IP10_11_8
IP10_7_4
- IP10_3_0 }
+ IP10_3_0 ))
},
#undef F_
#undef FM
@@ -2799,8 +2799,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- 4, 4, 4, 4, 4,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(4, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
/* RESERVED 31, 30, 29, 28 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* RESERVED 27, 26, 25, 24 */
@@ -2822,23 +2823,25 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
MOD_SEL0_2
MOD_SEL0_1
- MOD_SEL0_0 }
+ MOD_SEL0_0 ))
},
{ },
};
enum ioctrl_regs {
- IOCTRL30,
- IOCTRL31,
- IOCTRL32,
- IOCTRL33,
+ POCCTRL0,
+ POCCTRL1,
+ POCCTRL2,
+ POCCTRL3,
+ TDSELCTRL,
};
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
- [IOCTRL30] = { 0xe6060380, },
- [IOCTRL31] = { 0xe6060384, },
- [IOCTRL32] = { 0xe6060388, },
- [IOCTRL33] = { 0xe606038c, },
+ [POCCTRL0] = { 0xe6060380, },
+ [POCCTRL1] = { 0xe6060384, },
+ [POCCTRL2] = { 0xe6060388, },
+ [POCCTRL3] = { 0xe606038c, },
+ [TDSELCTRL] = { 0xe60603c0, },
{ /* sentinel */ },
};
@@ -2847,20 +2850,20 @@ static int r8a77980_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
{
int bit = pin & 0x1f;
- *pocctrl = pinmux_ioctrl_regs[IOCTRL30].reg;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL0].reg;
if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 21))
return bit;
else if (pin >= RCAR_GP_PIN(2, 0) && pin <= RCAR_GP_PIN(2, 9))
return bit + 22;
- *pocctrl = pinmux_ioctrl_regs[IOCTRL31].reg;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL1].reg;
if (pin >= RCAR_GP_PIN(2, 10) && pin <= RCAR_GP_PIN(2, 16))
return bit - 10;
if ((pin >= RCAR_GP_PIN(2, 17) && pin <= RCAR_GP_PIN(2, 24)) ||
(pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 16)))
return bit + 7;
- *pocctrl = pinmux_ioctrl_regs[IOCTRL32].reg;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL2].reg;
if (pin >= RCAR_GP_PIN(2, 25) && pin <= RCAR_GP_PIN(2, 29))
return pin - 25;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
index 151640c30e9d..91a837b02a36 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -2,7 +2,7 @@
/*
* R8A77990 processor support - PFC hardware block.
*
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
*
* This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7796.c
*
@@ -11,6 +11,7 @@
* Copyright (C) 2016-2017 Renesas Electronics Corp.
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include "core.h"
@@ -165,7 +166,7 @@
#define GPSR5_7 F_(SCK2_A, IP12_7_4)
#define GPSR5_6 F_(TX1, IP12_3_0)
#define GPSR5_5 F_(RX1, IP11_31_28)
-#define GPSR5_4 F_(RTS0_N_TANS_A, IP11_23_20)
+#define GPSR5_4 F_(RTS0_N_A, IP11_23_20)
#define GPSR5_3 F_(CTS0_N_A, IP11_19_16)
#define GPSR5_2 F_(TX0_A, IP11_15_12)
#define GPSR5_1 F_(RX0_A, IP11_11_8)
@@ -219,7 +220,7 @@
#define IP3_3_0 FM(A1) FM(IRQ1) FM(PWM3_A) FM(DU_DOTCLKIN1) FM(VI5_DATA0_A) FM(DU_DISP_CDE) FM(SDA6_B) FM(IETX) FM(QCPV_QDE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_7_4 FM(A2) FM(IRQ2) FM(AVB_AVTP_PPS) FM(VI4_CLKENB) FM(VI5_DATA1_A) FM(DU_DISP) FM(SCL6_B) F_(0, 0) FM(QSTVB_QVE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_11_8 FM(A3) FM(CTS4_N_A) FM(PWM4_A) FM(VI4_DATA12) F_(0, 0) FM(DU_DOTCLKOUT0) FM(HTX3_D) FM(IECLK) FM(LCDOUT12) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_15_12 FM(A4) FM(RTS4_N_TANS_A) FM(MSIOF3_SYNC_B) FM(VI4_DATA8) FM(PWM2_B) FM(DU_DG4) FM(RIF2_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_15_12 FM(A4) FM(RTS4_N_A) FM(MSIOF3_SYNC_B) FM(VI4_DATA8) FM(PWM2_B) FM(DU_DG4) FM(RIF2_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_19_16 FM(A5) FM(SCK4_A) FM(MSIOF3_SCK_B) FM(VI4_DATA9) FM(PWM3_B) F_(0, 0) FM(RIF2_SYNC_B) F_(0, 0) FM(QPOLA) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_23_20 FM(A6) FM(RX4_A) FM(MSIOF3_RXD_B) FM(VI4_DATA10) F_(0, 0) F_(0, 0) FM(RIF2_D0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_27_24 FM(A7) FM(TX4_A) FM(MSIOF3_TXD_B) FM(VI4_DATA11) F_(0, 0) F_(0, 0) FM(RIF2_D1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -240,10 +241,10 @@
#define IP5_15_12 FM(CS0_N) FM(SCL5) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR0) FM(VI4_DATA2_B) F_(0, 0) FM(LCDOUT16) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP5_19_16 FM(WE0_N) FM(SDA5) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR1) FM(VI4_DATA3_B) F_(0, 0) FM(LCDOUT17) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP5_23_20 FM(D0) FM(MSIOF3_SCK_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR2) FM(CTS4_N_C) F_(0, 0) FM(LCDOUT18) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_27_24 FM(D1) FM(MSIOF3_SYNC_A) FM(SCK3_A) FM(VI4_DATA23) FM(VI5_CLKENB_A) FM(DU_DB7) FM(RTS4_N_TANS_C) F_(0, 0) FM(LCDOUT7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_27_24 FM(D1) FM(MSIOF3_SYNC_A) FM(SCK3_A) FM(VI4_DATA23) FM(VI5_CLKENB_A) FM(DU_DB7) FM(RTS4_N_C) F_(0, 0) FM(LCDOUT7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP5_31_28 FM(D2) FM(MSIOF3_RXD_A) FM(RX5_C) F_(0, 0) FM(VI5_DATA14_A) FM(DU_DR3) FM(RX4_C) F_(0, 0) FM(LCDOUT19) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP6_3_0 FM(D3) FM(MSIOF3_TXD_A) FM(TX5_C) F_(0, 0) FM(VI5_DATA15_A) FM(DU_DR4) FM(TX4_C) F_(0, 0) FM(LCDOUT20) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_7_4 FM(D4) FM(CANFD1_TX) FM(HSCK3_B) FM(CAN1_TX) FM(RTS3_N_TANS_A) FM(MSIOF3_SS2_A) F_(0, 0) FM(VI5_DATA1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_7_4 FM(D4) FM(CANFD1_TX) FM(HSCK3_B) FM(CAN1_TX) FM(RTS3_N_A) FM(MSIOF3_SS2_A) F_(0, 0) FM(VI5_DATA1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP6_11_8 FM(D5) FM(RX3_A) FM(HRX3_B) F_(0, 0) F_(0, 0) FM(DU_DR5) FM(VI4_DATA4_B) F_(0, 0) FM(LCDOUT21) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP6_15_12 FM(D6) FM(TX3_A) FM(HTX3_B) F_(0, 0) F_(0, 0) FM(DU_DR6) FM(VI4_DATA5_B) F_(0, 0) FM(LCDOUT22) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP6_19_16 FM(D7) FM(CANFD1_RX) FM(IRQ5) FM(CAN1_RX) FM(CTS3_N_A) F_(0, 0) F_(0, 0) FM(VI5_DATA2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -289,8 +290,8 @@
#define IP11_11_8 FM(RX0_A) FM(HRX1_A) FM(SSI_SCK2_A) FM(RIF1_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_15_12 FM(TX0_A) FM(HTX1_A) FM(SSI_WS2_A) FM(RIF1_D0) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDAT1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_19_16 FM(CTS0_N_A) FM(NFDATA14_A) FM(AUDIO_CLKOUT_A) FM(RIF1_D1) FM(SCIF_CLK_A) FM(FMCLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_23_20 FM(RTS0_N_TANS_A) FM(NFDATA15_A) FM(AUDIO_CLKOUT1_A) FM(RIF1_CLK) FM(SCL2_A) FM(FMIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_27_24 FM(SCK0_A) FM(HSCK1_A) FM(USB3HS0_ID) FM(RTS1_N_TANS) FM(SDA2_A) FM(FMCLK_C) F_(0, 0) F_(0, 0) FM(USB0_ID) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_23_20 FM(RTS0_N_A) FM(NFDATA15_A) FM(AUDIO_CLKOUT1_A) FM(RIF1_CLK) FM(SCL2_A) FM(FMIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24 FM(SCK0_A) FM(HSCK1_A) FM(USB3HS0_ID) FM(RTS1_N) FM(SDA2_A) FM(FMCLK_C) F_(0, 0) F_(0, 0) FM(USB0_ID) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_31_28 FM(RX1) FM(HRX2_B) FM(SSI_SCK9_B) FM(AUDIO_CLKOUT1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
@@ -414,7 +415,7 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define MOD_SEL0_22 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1)
#define MOD_SEL0_21_20 REV4(FM(SEL_I2C1_0), FM(SEL_I2C1_1), FM(SEL_I2C1_2), FM(SEL_I2C1_3))
#define MOD_SEL0_19_18_17 REV8(FM(SEL_I2C2_0), FM(SEL_I2C2_1), FM(SEL_I2C2_2), FM(SEL_I2C2_3), FM(SEL_I2C2_4), F_(0, 0), F_(0, 0), F_(0, 0))
-#define MOD_SEL0_16 FM(SEL_NDFC_0) FM(SEL_NDFC_1)
+#define MOD_SEL0_16 FM(SEL_NDF_0) FM(SEL_NDF_1)
#define MOD_SEL0_15 FM(SEL_PWM0_0) FM(SEL_PWM0_1)
#define MOD_SEL0_14 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
#define MOD_SEL0_13_12 REV4(FM(SEL_PWM2_0), FM(SEL_PWM2_1), FM(SEL_PWM2_2), F_(0, 0))
@@ -429,8 +430,6 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0))
/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
-#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
-#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
#define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
#define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
#define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
@@ -451,8 +450,7 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define PINMUX_MOD_SELS \
\
- MOD_SEL1_31 \
-MOD_SEL0_30_29 MOD_SEL1_30 \
+MOD_SEL0_30_29 \
MOD_SEL1_29 \
MOD_SEL0_28 MOD_SEL1_28 \
MOD_SEL0_27_26 \
@@ -671,7 +669,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP3_11_8, LCDOUT12),
PINMUX_IPSR_GPSR(IP3_15_12, A4),
- PINMUX_IPSR_MSEL(IP3_15_12, RTS4_N_TANS_A, SEL_SCIF4_0),
+ PINMUX_IPSR_MSEL(IP3_15_12, RTS4_N_A, SEL_SCIF4_0),
PINMUX_IPSR_MSEL(IP3_15_12, MSIOF3_SYNC_B, SEL_MSIOF3_1),
PINMUX_IPSR_GPSR(IP3_15_12, VI4_DATA8),
PINMUX_IPSR_MSEL(IP3_15_12, PWM2_B, SEL_PWM2_1),
@@ -821,7 +819,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP5_27_24, VI4_DATA23),
PINMUX_IPSR_MSEL(IP5_27_24, VI5_CLKENB_A, SEL_VIN5_0),
PINMUX_IPSR_GPSR(IP5_27_24, DU_DB7),
- PINMUX_IPSR_MSEL(IP5_27_24, RTS4_N_TANS_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MSEL(IP5_27_24, RTS4_N_C, SEL_SCIF4_2),
PINMUX_IPSR_GPSR(IP5_27_24, LCDOUT7),
PINMUX_IPSR_GPSR(IP5_31_28, D2),
@@ -845,7 +843,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP6_7_4, CANFD1_TX),
PINMUX_IPSR_MSEL(IP6_7_4, HSCK3_B, SEL_HSCIF3_1),
PINMUX_IPSR_GPSR(IP6_7_4, CAN1_TX),
- PINMUX_IPSR_MSEL(IP6_7_4, RTS3_N_TANS_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP6_7_4, RTS3_N_A, SEL_SCIF3_0),
PINMUX_IPSR_GPSR(IP6_7_4, MSIOF3_SS2_A),
PINMUX_IPSR_MSEL(IP6_7_4, VI5_DATA1_B, SEL_VIN5_1),
@@ -984,23 +982,23 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP8_19_16, REMOCON_C, SEL_REMOCON_2),
PINMUX_IPSR_GPSR(IP8_23_20, SD1_CLK),
- PINMUX_IPSR_MSEL(IP8_23_20, NFDATA14_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_23_20, NFDATA14_B, SEL_NDF_1),
PINMUX_IPSR_GPSR(IP8_27_24, SD1_CMD),
- PINMUX_IPSR_MSEL(IP8_27_24, NFDATA15_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_27_24, NFDATA15_B, SEL_NDF_1),
PINMUX_IPSR_GPSR(IP8_31_28, SD1_DAT0),
- PINMUX_IPSR_MSEL(IP8_31_28, NFWP_N_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP8_31_28, NFWP_N_B, SEL_NDF_1),
/* IPSR9 */
PINMUX_IPSR_GPSR(IP9_3_0, SD1_DAT1),
- PINMUX_IPSR_MSEL(IP9_3_0, NFCE_N_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP9_3_0, NFCE_N_B, SEL_NDF_1),
PINMUX_IPSR_GPSR(IP9_7_4, SD1_DAT2),
- PINMUX_IPSR_MSEL(IP9_7_4, NFALE_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP9_7_4, NFALE_B, SEL_NDF_1),
PINMUX_IPSR_GPSR(IP9_11_8, SD1_DAT3),
- PINMUX_IPSR_MSEL(IP9_11_8, NFRB_N_B, SEL_NDFC_1),
+ PINMUX_IPSR_MSEL(IP9_11_8, NFRB_N_B, SEL_NDF_1),
PINMUX_IPSR_GPSR(IP9_15_12, SD3_CLK),
PINMUX_IPSR_GPSR(IP9_15_12, NFWE_N),
@@ -1037,57 +1035,57 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP10_23_20, NFCLE),
PINMUX_IPSR_GPSR(IP10_27_24, SD0_CD),
- PINMUX_IPSR_GPSR(IP10_27_24, NFALE_A),
+ PINMUX_IPSR_MSEL(IP10_27_24, NFALE_A, SEL_NDF_0),
PINMUX_IPSR_GPSR(IP10_27_24, SD3_CD),
PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP10_27_24, SSI_SCK2_B),
PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0),
PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP),
- PINMUX_IPSR_GPSR(IP10_31_28, NFRB_N_A),
+ PINMUX_IPSR_MSEL(IP10_31_28, NFRB_N_A, SEL_NDF_0),
PINMUX_IPSR_GPSR(IP10_31_28, SD3_WP),
PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP10_31_28, SSI_WS2_B),
PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0),
/* IPSR11 */
PINMUX_IPSR_GPSR(IP11_3_0, SD1_CD),
- PINMUX_IPSR_MSEL(IP11_3_0, NFCE_N_A, SEL_NDFC_0),
+ PINMUX_IPSR_MSEL(IP11_3_0, NFCE_N_A, SEL_NDF_0),
PINMUX_IPSR_GPSR(IP11_3_0, SSI_SCK1),
PINMUX_IPSR_MSEL(IP11_3_0, RIF0_D1_B, SEL_DRIF0_1),
PINMUX_IPSR_GPSR(IP11_3_0, TS_SDEN0),
PINMUX_IPSR_GPSR(IP11_7_4, SD1_WP),
- PINMUX_IPSR_MSEL(IP11_7_4, NFWP_N_A, SEL_NDFC_0),
+ PINMUX_IPSR_MSEL(IP11_7_4, NFWP_N_A, SEL_NDF_0),
PINMUX_IPSR_GPSR(IP11_7_4, SSI_WS1),
PINMUX_IPSR_MSEL(IP11_7_4, RIF0_SYNC_B, SEL_DRIF0_1),
PINMUX_IPSR_GPSR(IP11_7_4, TS_SPSYNC0),
PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0),
- PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0),
+ PINMUX_IPSR_GPSR(IP11_11_8, SSI_SCK2_A),
PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC),
PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1),
PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0),
PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A),
- PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0),
+ PINMUX_IPSR_GPSR(IP11_15_12, SSI_WS2_A),
PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0),
PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1),
PINMUX_IPSR_MSEL(IP11_19_16, CTS0_N_A, SEL_SCIF0_0),
- PINMUX_IPSR_MSEL(IP11_19_16, NFDATA14_A, SEL_NDFC_0),
+ PINMUX_IPSR_MSEL(IP11_19_16, NFDATA14_A, SEL_NDF_0),
PINMUX_IPSR_GPSR(IP11_19_16, AUDIO_CLKOUT_A),
PINMUX_IPSR_GPSR(IP11_19_16, RIF1_D1),
PINMUX_IPSR_MSEL(IP11_19_16, SCIF_CLK_A, SEL_SCIF_0),
PINMUX_IPSR_MSEL(IP11_19_16, FMCLK_A, SEL_FM_0),
- PINMUX_IPSR_MSEL(IP11_23_20, RTS0_N_TANS_A, SEL_SCIF0_0),
- PINMUX_IPSR_MSEL(IP11_23_20, NFDATA15_A, SEL_NDFC_0),
+ PINMUX_IPSR_MSEL(IP11_23_20, RTS0_N_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_23_20, NFDATA15_A, SEL_NDF_0),
PINMUX_IPSR_GPSR(IP11_23_20, AUDIO_CLKOUT1_A),
PINMUX_IPSR_GPSR(IP11_23_20, RIF1_CLK),
PINMUX_IPSR_MSEL(IP11_23_20, SCL2_A, SEL_I2C2_0),
@@ -1096,7 +1094,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP11_27_24, SCK0_A, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP11_27_24, HSCK1_A, SEL_HSCIF1_0),
PINMUX_IPSR_GPSR(IP11_27_24, USB3HS0_ID),
- PINMUX_IPSR_GPSR(IP11_27_24, RTS1_N_TANS),
+ PINMUX_IPSR_GPSR(IP11_27_24, RTS1_N),
PINMUX_IPSR_MSEL(IP11_27_24, SDA2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP11_27_24, FMCLK_C, SEL_FM_2),
PINMUX_IPSR_GPSR(IP11_27_24, USB0_ID),
@@ -1180,7 +1178,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0),
PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1),
PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1),
- PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0),
+ PINMUX_IPSR_GPSR(IP13_19_16, SIM0_D_A),
PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT),
PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1),
@@ -1248,7 +1246,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2),
PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3),
PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1),
- PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1),
+ PINMUX_IPSR_GPSR(IP15_15_12, SIM0_D_B),
PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6),
PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0),
@@ -2839,7 +2837,7 @@ static const unsigned int scif0_ctrl_a_pins[] = {
};
static const unsigned int scif0_ctrl_a_mux[] = {
- RTS0_N_TANS_A_MARK, CTS0_N_A_MARK,
+ RTS0_N_A_MARK, CTS0_N_A_MARK,
};
static const unsigned int scif0_data_b_pins[] = {
@@ -2885,7 +2883,7 @@ static const unsigned int scif1_ctrl_pins[] = {
};
static const unsigned int scif1_ctrl_mux[] = {
- RTS1_N_TANS_MARK, CTS1_N_MARK,
+ RTS1_N_MARK, CTS1_N_MARK,
};
/* - SCIF2 ------------------------------------------------------------------ */
@@ -2941,7 +2939,7 @@ static const unsigned int scif3_ctrl_a_pins[] = {
};
static const unsigned int scif3_ctrl_a_mux[] = {
- RTS3_N_TANS_A_MARK, CTS3_N_A_MARK,
+ RTS3_N_A_MARK, CTS3_N_A_MARK,
};
static const unsigned int scif3_data_b_pins[] = {
@@ -2996,7 +2994,7 @@ static const unsigned int scif4_ctrl_a_pins[] = {
};
static const unsigned int scif4_ctrl_a_mux[] = {
- RTS4_N_TANS_A_MARK, CTS4_N_A_MARK,
+ RTS4_N_A_MARK, CTS4_N_A_MARK,
};
static const unsigned int scif4_data_b_pins[] = {
@@ -3032,7 +3030,7 @@ static const unsigned int scif4_ctrl_c_pins[] = {
};
static const unsigned int scif4_ctrl_c_mux[] = {
- RTS4_N_TANS_C_MARK, CTS4_N_C_MARK,
+ RTS4_N_C_MARK, CTS4_N_C_MARK,
};
/* - SCIF5 ------------------------------------------------------------------ */
@@ -3766,8 +3764,8 @@ static const unsigned int vin5_clk_b_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[245];
- struct sh_pfc_pin_group automotive[23];
+ struct sh_pfc_pin_group common[247];
+ struct sh_pfc_pin_group automotive[21];
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a),
@@ -3798,6 +3796,8 @@ static const struct {
SH_PFC_PIN_GROUP(can0_data),
SH_PFC_PIN_GROUP(can1_data),
SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(canfd0_data),
+ SH_PFC_PIN_GROUP(canfd1_data),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_in_0),
@@ -4017,8 +4017,6 @@ static const struct {
SH_PFC_PIN_GROUP(vin5_clk_b),
},
.automotive = {
- SH_PFC_PIN_GROUP(canfd0_data),
- SH_PFC_PIN_GROUP(canfd1_data),
SH_PFC_PIN_GROUP(drif0_ctrl_a),
SH_PFC_PIN_GROUP(drif0_data0_a),
SH_PFC_PIN_GROUP(drif0_data1_a),
@@ -4465,8 +4463,8 @@ static const char * const vin5_groups[] = {
};
static const struct {
- struct sh_pfc_function common[45];
- struct sh_pfc_function automotive[6];
+ struct sh_pfc_function common[47];
+ struct sh_pfc_function automotive[4];
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -4474,6 +4472,8 @@ static const struct {
SH_PFC_FUNCTION(can0),
SH_PFC_FUNCTION(can1),
SH_PFC_FUNCTION(can_clk),
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
SH_PFC_FUNCTION(du),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
@@ -4516,8 +4516,6 @@ static const struct {
SH_PFC_FUNCTION(vin5),
},
.automotive = {
- SH_PFC_FUNCTION(canfd0),
- SH_PFC_FUNCTION(canfd1),
SH_PFC_FUNCTION(drif0),
SH_PFC_FUNCTION(drif1),
SH_PFC_FUNCTION(drif2),
@@ -4528,7 +4526,7 @@ static const struct {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4560,9 +4558,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, GPSR0_3,
GP_0_2_FN, GPSR0_2,
GP_0_1_FN, GPSR0_1,
- GP_0_0_FN, GPSR0_0, }
+ GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4594,9 +4592,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, GPSR1_3,
GP_1_2_FN, GPSR1_2,
GP_1_1_FN, GPSR1_1,
- GP_1_0_FN, GPSR1_0, }
+ GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4628,9 +4626,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, GPSR2_3,
GP_2_2_FN, GPSR2_2,
GP_2_1_FN, GPSR2_1,
- GP_2_0_FN, GPSR2_0, }
+ GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4662,9 +4660,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, GPSR3_3,
GP_3_2_FN, GPSR3_2,
GP_3_1_FN, GPSR3_1,
- GP_3_0_FN, GPSR3_0, }
+ GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4696,9 +4694,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, GPSR4_3,
GP_4_2_FN, GPSR4_2,
GP_4_1_FN, GPSR4_1,
- GP_4_0_FN, GPSR4_0, }
+ GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4730,9 +4728,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, GPSR5_3,
GP_5_2_FN, GPSR5_2,
GP_5_1_FN, GPSR5_1,
- GP_5_0_FN, GPSR5_0, }
+ GP_5_0_FN, GPSR5_0, ))
},
- { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4764,14 +4762,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, GPSR6_3,
GP_6_2_FN, GPSR6_2,
GP_6_1_FN, GPSR6_1,
- GP_6_0_FN, GPSR6_0, }
+ GP_6_0_FN, GPSR6_0, ))
},
#undef F_
#undef FM
#define F_(x, y) x,
#define FM(x) FN_##x,
- { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
IP0_31_28
IP0_27_24
IP0_23_20
@@ -4779,9 +4777,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP0_15_12
IP0_11_8
IP0_7_4
- IP0_3_0 }
+ IP0_3_0 ))
},
- { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
IP1_31_28
IP1_27_24
IP1_23_20
@@ -4789,9 +4787,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1_15_12
IP1_11_8
IP1_7_4
- IP1_3_0 }
+ IP1_3_0 ))
},
- { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
IP2_31_28
IP2_27_24
IP2_23_20
@@ -4799,9 +4797,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2_15_12
IP2_11_8
IP2_7_4
- IP2_3_0 }
+ IP2_3_0 ))
},
- { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
IP3_31_28
IP3_27_24
IP3_23_20
@@ -4809,9 +4807,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP3_15_12
IP3_11_8
IP3_7_4
- IP3_3_0 }
+ IP3_3_0 ))
},
- { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
IP4_31_28
IP4_27_24
IP4_23_20
@@ -4819,9 +4817,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP4_15_12
IP4_11_8
IP4_7_4
- IP4_3_0 }
+ IP4_3_0 ))
},
- { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
IP5_31_28
IP5_27_24
IP5_23_20
@@ -4829,9 +4827,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP5_15_12
IP5_11_8
IP5_7_4
- IP5_3_0 }
+ IP5_3_0 ))
},
- { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
IP6_31_28
IP6_27_24
IP6_23_20
@@ -4839,9 +4837,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_15_12
IP6_11_8
IP6_7_4
- IP6_3_0 }
+ IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
@@ -4849,9 +4847,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP7_15_12
IP7_11_8
IP7_7_4
- IP7_3_0 }
+ IP7_3_0 ))
},
- { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
IP8_31_28
IP8_27_24
IP8_23_20
@@ -4859,9 +4857,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP8_15_12
IP8_11_8
IP8_7_4
- IP8_3_0 }
+ IP8_3_0 ))
},
- { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
IP9_31_28
IP9_27_24
IP9_23_20
@@ -4869,9 +4867,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP9_15_12
IP9_11_8
IP9_7_4
- IP9_3_0 }
+ IP9_3_0 ))
},
- { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
IP10_31_28
IP10_27_24
IP10_23_20
@@ -4879,9 +4877,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP10_15_12
IP10_11_8
IP10_7_4
- IP10_3_0 }
+ IP10_3_0 ))
},
- { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
IP11_31_28
IP11_27_24
IP11_23_20
@@ -4889,9 +4887,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP11_15_12
IP11_11_8
IP11_7_4
- IP11_3_0 }
+ IP11_3_0 ))
},
- { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+ { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
IP12_31_28
IP12_27_24
IP12_23_20
@@ -4899,9 +4897,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP12_15_12
IP12_11_8
IP12_7_4
- IP12_3_0 }
+ IP12_3_0 ))
},
- { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+ { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
IP13_31_28
IP13_27_24
IP13_23_20
@@ -4909,9 +4907,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP13_15_12
IP13_11_8
IP13_7_4
- IP13_3_0 }
+ IP13_3_0 ))
},
- { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
+ { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4, GROUP(
IP14_31_28
IP14_27_24
IP14_23_20
@@ -4919,9 +4917,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP14_15_12
IP14_11_8
IP14_7_4
- IP14_3_0 }
+ IP14_3_0 ))
},
- { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4, GROUP(
IP15_31_28
IP15_27_24
IP15_23_20
@@ -4929,7 +4927,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP15_15_12
IP15_11_8
IP15_7_4
- IP15_3_0 }
+ IP15_3_0 ))
},
#undef F_
#undef FM
@@ -4937,8 +4935,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- 1, 2, 1, 2, 1, 1, 1, 1, 2, 3, 1,
- 1, 1, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2) {
+ GROUP(1, 2, 1, 2, 1, 1, 1, 1, 2, 3, 1, 1,
+ 1, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2),
+ GROUP(
/* RESERVED 31 */
0, 0,
MOD_SEL0_30_29
@@ -4962,13 +4961,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_4
MOD_SEL0_3
MOD_SEL0_2
- MOD_SEL0_1_0 }
+ MOD_SEL0_1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
- 1, 2, 2, 2, 1, 1, 2, 1, 4) {
- MOD_SEL1_31
- MOD_SEL1_30
+ GROUP(2, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1,
+ 2, 2, 2, 1, 1, 2, 1, 4),
+ GROUP(
+ /* RESERVED 31, 30 */
+ 0, 0, 0, 0,
MOD_SEL1_29
MOD_SEL1_28
/* RESERVED 27 */
@@ -4989,17 +4989,19 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_6_5
MOD_SEL1_4
/* RESERVED 3, 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
{ },
};
enum ioctrl_regs {
- IOCTRL30,
+ POCCTRL0,
+ TDSELCTRL,
};
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
- [IOCTRL30] = { 0xe6060380, },
+ [POCCTRL0] = { 0xe6060380, },
+ [TDSELCTRL] = { 0xe60603c0, },
{ /* sentinel */ },
};
@@ -5008,7 +5010,7 @@ static int r8a77990_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
{
int bit = -EINVAL;
- *pocctrl = pinmux_ioctrl_regs[IOCTRL30].reg;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL0].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -5124,7 +5126,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
} },
{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
[0] = RCAR_GP_PIN(5, 0), /* SCK0_A */
- [1] = RCAR_GP_PIN(5, 4), /* RTS0#/TANS_A */
+ [1] = RCAR_GP_PIN(5, 4), /* RTS0#_A */
[2] = RCAR_GP_PIN(5, 3), /* CTS0#_A */
[3] = RCAR_GP_PIN(5, 2), /* TX0_A */
[4] = RCAR_GP_PIN(5, 1), /* RX0_A */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
index 9e377e3b9cb3..dd87085d48cb 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
@@ -11,6 +11,7 @@
* Copyright (C) 2015 Renesas Electronics Corporation
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include "core.h"
@@ -287,7 +288,7 @@
#define IP10_23_20 FM(SSI_SDATA4_A) FM(HTX0) FM(SCL2_A) FM(CAN1_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP10_27_24 FM(SSI_WS4_A) FM(HRX0) FM(SDA2_A) FM(CAN1_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP10_31_28 FM(SCL1) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_3_0 FM(SDA1) FM(RTS1_N_TANS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_3_0 FM(SDA1) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_7_4 FM(MSIOF1_SCK) FM(AVB0_AVTP_PPS_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_11_8 FM(MSIOF1_TXD) FM(AVB0_AVTP_CAPTURE_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_15_12 FM(MSIOF1_RXD) FM(AVB0_AVTP_MATCH_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -298,7 +299,7 @@
/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
#define IP12_3_0 FM(RX1_A) FM(CTS0_N) FM(TPU0TO0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_7_4 FM(TX1_A) FM(RTS0_N_TANS) FM(TPU0TO1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_7_4 FM(TX1_A) FM(RTS0_N) FM(TPU0TO1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP12_11_8 FM(SCK2) FM(MSIOF1_SS1) FM(TPU0TO3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP12_15_12 FM(TPU0TO0_A) FM(AVB0_AVTP_CAPTURE_A) FM(HCTS0_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP12_19_16 FM(TPU0TO1_A) FM(AVB0_AVTP_MATCH_A) FM(HRTS0_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -857,7 +858,7 @@ static const u16 pinmux_data[] = {
/* IPSR11 */
PINMUX_IPSR_GPSR(IP11_3_0, SDA1),
- PINMUX_IPSR_GPSR(IP11_3_0, RTS1_N_TANS),
+ PINMUX_IPSR_GPSR(IP11_3_0, RTS1_N),
PINMUX_IPSR_GPSR(IP11_7_4, MSIOF1_SCK),
PINMUX_IPSR_MSEL(IP11_7_4, AVB0_AVTP_PPS_B, SEL_ETHERAVB_1),
@@ -892,7 +893,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP12_3_0, TPU0TO0_B),
PINMUX_IPSR_MSEL(IP12_7_4, TX1_A, SEL_SCIF1_0),
- PINMUX_IPSR_GPSR(IP12_7_4, RTS0_N_TANS),
+ PINMUX_IPSR_GPSR(IP12_7_4, RTS0_N),
PINMUX_IPSR_GPSR(IP12_7_4, TPU0TO1_B),
PINMUX_IPSR_GPSR(IP12_11_8, SCK2),
@@ -1704,7 +1705,7 @@ static const unsigned int scif0_ctrl_pins[] = {
RCAR_GP_PIN(4, 24), RCAR_GP_PIN(4, 23),
};
static const unsigned int scif0_ctrl_mux[] = {
- RTS0_N_TANS_MARK, CTS0_N_MARK,
+ RTS0_N_MARK, CTS0_N_MARK,
};
/* - SCIF1 ------------------------------------------------------------------ */
static const unsigned int scif1_data_a_pins[] = {
@@ -1740,7 +1741,7 @@ static const unsigned int scif1_ctrl_pins[] = {
RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 10),
};
static const unsigned int scif1_ctrl_mux[] = {
- RTS1_N_TANS_MARK, CTS1_N_MARK,
+ RTS1_N_MARK, CTS1_N_MARK,
};
/* - SCIF2 ------------------------------------------------------------------ */
@@ -2374,7 +2375,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2406,9 +2407,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, GPSR0_3,
GP_0_2_FN, GPSR0_2,
GP_0_1_FN, GPSR0_1,
- GP_0_0_FN, GPSR0_0, }
+ GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
GP_1_31_FN, GPSR1_31,
GP_1_30_FN, GPSR1_30,
GP_1_29_FN, GPSR1_29,
@@ -2440,9 +2441,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, GPSR1_3,
GP_1_2_FN, GPSR1_2,
GP_1_1_FN, GPSR1_1,
- GP_1_0_FN, GPSR1_0, }
+ GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
GP_2_31_FN, GPSR2_31,
GP_2_30_FN, GPSR2_30,
GP_2_29_FN, GPSR2_29,
@@ -2474,9 +2475,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, GPSR2_3,
GP_2_2_FN, GPSR2_2,
GP_2_1_FN, GPSR2_1,
- GP_2_0_FN, GPSR2_0, }
+ GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2508,9 +2509,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, GPSR3_3,
GP_3_2_FN, GPSR3_2,
GP_3_1_FN, GPSR3_1,
- GP_3_0_FN, GPSR3_0, }
+ GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
GP_4_31_FN, GPSR4_31,
GP_4_30_FN, GPSR4_30,
GP_4_29_FN, GPSR4_29,
@@ -2542,9 +2543,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, GPSR4_3,
GP_4_2_FN, GPSR4_2,
GP_4_1_FN, GPSR4_1,
- GP_4_0_FN, GPSR4_0, }
+ GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2576,9 +2577,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, GPSR5_3,
GP_5_2_FN, GPSR5_2,
GP_5_1_FN, GPSR5_1,
- GP_5_0_FN, GPSR5_0, }
+ GP_5_0_FN, GPSR5_0, ))
},
- { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+ { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2610,14 +2611,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_3_FN, GPSR6_3,
GP_6_2_FN, GPSR6_2,
GP_6_1_FN, GPSR6_1,
- GP_6_0_FN, GPSR6_0, }
+ GP_6_0_FN, GPSR6_0, ))
},
#undef F_
#undef FM
#define F_(x, y) x,
#define FM(x) FN_##x,
- { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
IP0_31_28
IP0_27_24
IP0_23_20
@@ -2625,9 +2626,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP0_15_12
IP0_11_8
IP0_7_4
- IP0_3_0 }
+ IP0_3_0 ))
},
- { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
IP1_31_28
IP1_27_24
IP1_23_20
@@ -2635,9 +2636,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1_15_12
IP1_11_8
IP1_7_4
- IP1_3_0 }
+ IP1_3_0 ))
},
- { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
IP2_31_28
IP2_27_24
IP2_23_20
@@ -2645,9 +2646,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2_15_12
IP2_11_8
IP2_7_4
- IP2_3_0 }
+ IP2_3_0 ))
},
- { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
IP3_31_28
IP3_27_24
IP3_23_20
@@ -2655,9 +2656,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP3_15_12
IP3_11_8
IP3_7_4
- IP3_3_0 }
+ IP3_3_0 ))
},
- { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
IP4_31_28
IP4_27_24
IP4_23_20
@@ -2665,9 +2666,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP4_15_12
IP4_11_8
IP4_7_4
- IP4_3_0 }
+ IP4_3_0 ))
},
- { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
IP5_31_28
IP5_27_24
IP5_23_20
@@ -2675,9 +2676,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP5_15_12
IP5_11_8
IP5_7_4
- IP5_3_0 }
+ IP5_3_0 ))
},
- { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
IP6_31_28
IP6_27_24
IP6_23_20
@@ -2685,9 +2686,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_15_12
IP6_11_8
IP6_7_4
- IP6_3_0 }
+ IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
@@ -2695,9 +2696,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP7_15_12
IP7_11_8
IP7_7_4
- IP7_3_0 }
+ IP7_3_0 ))
},
- { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
IP8_31_28
IP8_27_24
IP8_23_20
@@ -2705,9 +2706,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP8_15_12
IP8_11_8
IP8_7_4
- IP8_3_0 }
+ IP8_3_0 ))
},
- { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
IP9_31_28
IP9_27_24
IP9_23_20
@@ -2715,9 +2716,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP9_15_12
IP9_11_8
IP9_7_4
- IP9_3_0 }
+ IP9_3_0 ))
},
- { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
IP10_31_28
IP10_27_24
IP10_23_20
@@ -2725,9 +2726,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP10_15_12
IP10_11_8
IP10_7_4
- IP10_3_0 }
+ IP10_3_0 ))
},
- { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+ { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
IP11_31_28
IP11_27_24
IP11_23_20
@@ -2735,9 +2736,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP11_15_12
IP11_11_8
IP11_7_4
- IP11_3_0 }
+ IP11_3_0 ))
},
- { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+ { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
IP12_31_28
IP12_27_24
IP12_23_20
@@ -2745,9 +2746,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP12_15_12
IP12_11_8
IP12_7_4
- IP12_3_0 }
+ IP12_3_0 ))
},
- { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+ { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
/* IP13_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP13_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP13_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2755,7 +2756,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP13_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP13_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IP13_7_4
- IP13_3_0 }
+ IP13_3_0 ))
},
#undef F_
#undef FM
@@ -2763,8 +2764,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1,
- 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1,
+ 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* RESERVED 31 */
0, 0,
MOD_SEL0_30
@@ -2792,11 +2794,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_3
MOD_SEL0_2
MOD_SEL0_1
- MOD_SEL0_0 }
+ MOD_SEL0_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- 1, 1, 1, 1, 1, 1, 2, 4, 4,
- 4, 4, 4, 4) {
+ GROUP(1, 1, 1, 1, 1, 1, 2, 4, 4, 4, 4, 4, 4),
+ GROUP(
MOD_SEL1_31
MOD_SEL1_30
MOD_SEL1_29
@@ -2816,7 +2818,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* RESERVED 7, 6, 5, 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* RESERVED 3, 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
{ },
};
@@ -2833,6 +2835,15 @@ static int r8a77995_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *po
return bit;
}
+enum ioctrl_regs {
+ TDSELCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [TDSELCTRL] = { 0xe60603c0, },
+ { /* sentinel */ },
+};
+
static const struct sh_pfc_soc_operations r8a77995_pinmux_ops = {
.pin_to_pocctrl = r8a77995_pin_to_pocctrl,
};
@@ -2852,6 +2863,7 @@ const struct sh_pfc_soc_info r8a77995_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7203.c b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
index 9ee468a9bd0e..811a6f2cb1fc 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7203.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
@@ -1073,7 +1073,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1) {
+ { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -1089,9 +1089,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- 0, 0 }
+ 0, 0 ))
},
- { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4) {
+ { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1099,9 +1099,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCRL3", 0xfffe3892, 16, 4) {
+ { PINMUX_CFG_REG("PBCRL3", 0xfffe3892, 16, 4, GROUP(
PB11MD_0, PB11MD_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1112,9 +1112,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PB8MD_00, PB8MD_01, PB8MD_10, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCRL2", 0xfffe3894, 16, 4) {
+ { PINMUX_CFG_REG("PBCRL2", 0xfffe3894, 16, 4, GROUP(
PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1125,9 +1125,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCRL1", 0xfffe3896, 16, 4) {
+ { PINMUX_CFG_REG("PBCRL1", 0xfffe3896, 16, 4, GROUP(
PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1138,9 +1138,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4) {
+ { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1148,9 +1148,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCIORL", 0xfffe3906, 16, 1) {
+ { PINMUX_CFG_REG("PCIORL", 0xfffe3906, 16, 1, GROUP(
0, 0,
PC14_IN, PC14_OUT,
PC13_IN, PC13_OUT,
@@ -1166,9 +1166,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC3_IN, PC3_OUT,
PC2_IN, PC2_OUT,
PC1_IN, PC1_OUT,
- PC0_IN, PC0_OUT }
+ PC0_IN, PC0_OUT ))
},
- { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4) {
+ { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PC14MD_0, PC14MD_1,
@@ -1178,9 +1178,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PC12MD_0, PC12MD_1,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCCRL3", 0xfffe3912, 16, 4) {
+ { PINMUX_CFG_REG("PCCRL3", 0xfffe3912, 16, 4, GROUP(
PC11MD_00, PC11MD_01, PC11MD_10, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1191,9 +1191,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PC8MD_0, PC8MD_1,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCCRL2", 0xfffe3914, 16, 4) {
+ { PINMUX_CFG_REG("PCCRL2", 0xfffe3914, 16, 4, GROUP(
PC7MD_0, PC7MD_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1204,9 +1204,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PC4MD_0, PC4MD_1,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCCRL1", 0xfffe3916, 16, 4) {
+ { PINMUX_CFG_REG("PCCRL1", 0xfffe3916, 16, 4, GROUP(
PC3MD_0, PC3MD_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1217,9 +1217,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PC0MD_00, PC0MD_01, PC0MD_10, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDIORL", 0xfffe3986, 16, 1) {
+ { PINMUX_CFG_REG("PDIORL", 0xfffe3986, 16, 1, GROUP(
PD15_IN, PD15_OUT,
PD14_IN, PD14_OUT,
PD13_IN, PD13_OUT,
@@ -1235,9 +1235,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD3_IN, PD3_OUT,
PD2_IN, PD2_OUT,
PD1_IN, PD1_OUT,
- PD0_IN, PD0_OUT }
+ PD0_IN, PD0_OUT ))
},
- { PINMUX_CFG_REG("PDCRL4", 0xfffe3990, 16, 4) {
+ { PINMUX_CFG_REG("PDCRL4", 0xfffe3990, 16, 4, GROUP(
PD15MD_000, PD15MD_001, PD15MD_010, 0,
PD15MD_100, PD15MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1252,9 +1252,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD12MD_000, PD12MD_001, PD12MD_010, 0,
PD12MD_100, PD12MD_101, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDCRL3", 0xfffe3992, 16, 4) {
+ { PINMUX_CFG_REG("PDCRL3", 0xfffe3992, 16, 4, GROUP(
PD11MD_000, PD11MD_001, PD11MD_010, 0,
PD11MD_100, PD11MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1269,9 +1269,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD8MD_000, PD8MD_001, PD8MD_010, 0,
PD8MD_100, PD8MD_101, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDCRL2", 0xfffe3994, 16, 4) {
+ { PINMUX_CFG_REG("PDCRL2", 0xfffe3994, 16, 4, GROUP(
PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011,
PD7MD_100, PD7MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1286,9 +1286,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011,
PD4MD_100, PD4MD_101, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDCRL1", 0xfffe3996, 16, 4) {
+ { PINMUX_CFG_REG("PDCRL1", 0xfffe3996, 16, 4, GROUP(
PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011,
PD3MD_100, PD3MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1303,9 +1303,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011,
PD0MD_100, PD0MD_101, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PEIORL", 0xfffe3a06, 16, 1) {
+ { PINMUX_CFG_REG("PEIORL", 0xfffe3a06, 16, 1, GROUP(
PE15_IN, PE15_OUT,
PE14_IN, PE14_OUT,
PE13_IN, PE13_OUT,
@@ -1321,9 +1321,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE3_IN, PE3_OUT,
PE2_IN, PE2_OUT,
PE1_IN, PE1_OUT,
- PE0_IN, PE0_OUT }
+ PE0_IN, PE0_OUT ))
},
- { PINMUX_CFG_REG("PECRL4", 0xfffe3a10, 16, 4) {
+ { PINMUX_CFG_REG("PECRL4", 0xfffe3a10, 16, 4, GROUP(
PE15MD_00, PE15MD_01, 0, PE15MD_11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1334,9 +1334,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PE12MD_00, 0, 0, PE12MD_11,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PECRL3", 0xfffe3a12, 16, 4) {
+ { PINMUX_CFG_REG("PECRL3", 0xfffe3a12, 16, 4, GROUP(
PE11MD_000, PE11MD_001, PE11MD_010, 0,
PE11MD_100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1349,9 +1349,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PECRL2", 0xfffe3a14, 16, 4) {
+ { PINMUX_CFG_REG("PECRL2", 0xfffe3a14, 16, 4, GROUP(
PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011,
PE7MD_100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1366,9 +1366,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011,
PE4MD_100, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PECRL1", 0xfffe3a16, 16, 4) {
+ { PINMUX_CFG_REG("PECRL1", 0xfffe3a16, 16, 4, GROUP(
PE3MD_00, PE3MD_01, 0, PE3MD_11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1380,9 +1380,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE0MD_000, PE0MD_001, 0, PE0MD_011,
PE0MD_100, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFIORH", 0xfffe3a84, 16, 1) {
+ { PINMUX_CFG_REG("PFIORH", 0xfffe3a84, 16, 1, GROUP(
0, 0,
PF30_IN, PF30_OUT,
PF29_IN, PF29_OUT,
@@ -1398,9 +1398,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF19_IN, PF19_OUT,
PF18_IN, PF18_OUT,
PF17_IN, PF17_OUT,
- PF16_IN, PF16_OUT }
+ PF16_IN, PF16_OUT ))
},
- { PINMUX_CFG_REG("PFIORL", 0xfffe3a86, 16, 1) {
+ { PINMUX_CFG_REG("PFIORL", 0xfffe3a86, 16, 1, GROUP(
PF15_IN, PF15_OUT,
PF14_IN, PF14_OUT,
PF13_IN, PF13_OUT,
@@ -1416,9 +1416,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF3_IN, PF3_OUT,
PF2_IN, PF2_OUT,
PF1_IN, PF1_OUT,
- PF0_IN, PF0_OUT }
+ PF0_IN, PF0_OUT ))
},
- { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4) {
+ { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF30MD_0, PF30MD_1,
@@ -1428,9 +1428,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF28MD_0, PF28MD_1,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCRH3", 0xfffe3a8a, 16, 4) {
+ { PINMUX_CFG_REG("PFCRH3", 0xfffe3a8a, 16, 4, GROUP(
PF27MD_0, PF27MD_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1441,9 +1441,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF24MD_0, PF24MD_1,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCRH2", 0xfffe3a8c, 16, 4) {
+ { PINMUX_CFG_REG("PFCRH2", 0xfffe3a8c, 16, 4, GROUP(
PF23MD_00, PF23MD_01, PF23MD_10, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1454,9 +1454,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF20MD_00, PF20MD_01, PF20MD_10, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCRH1", 0xfffe3a8e, 16, 4) {
+ { PINMUX_CFG_REG("PFCRH1", 0xfffe3a8e, 16, 4, GROUP(
PF19MD_00, PF19MD_01, PF19MD_10, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1467,9 +1467,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF16MD_00, PF16MD_01, PF16MD_10, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCRL4", 0xfffe3a90, 16, 4) {
+ { PINMUX_CFG_REG("PFCRL4", 0xfffe3a90, 16, 4, GROUP(
PF15MD_00, PF15MD_01, PF15MD_10, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1480,9 +1480,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF12MD_00, PF12MD_01, PF12MD_10, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCRL3", 0xfffe3a92, 16, 4) {
+ { PINMUX_CFG_REG("PFCRL3", 0xfffe3a92, 16, 4, GROUP(
PF11MD_00, PF11MD_01, PF11MD_10, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1493,9 +1493,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF8MD_00, PF8MD_01, PF8MD_10, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCRL2", 0xfffe3a94, 16, 4) {
+ { PINMUX_CFG_REG("PFCRL2", 0xfffe3a94, 16, 4, GROUP(
PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1506,9 +1506,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCRL1", 0xfffe3a96, 16, 4) {
+ { PINMUX_CFG_REG("PFCRL1", 0xfffe3a96, 16, 4, GROUP(
PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1519,53 +1519,53 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADRL", 0xfffe3802, 16) {
+ { PINMUX_DATA_REG("PADRL", 0xfffe3802, 16, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
- PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA ))
},
- { PINMUX_DATA_REG("PBDRL", 0xfffe3882, 16) {
+ { PINMUX_DATA_REG("PBDRL", 0xfffe3882, 16, GROUP(
0, 0, 0, PB12_DATA,
PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
- PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA ))
},
- { PINMUX_DATA_REG("PCDRL", 0xfffe3902, 16) {
+ { PINMUX_DATA_REG("PCDRL", 0xfffe3902, 16, GROUP(
0, PC14_DATA, PC13_DATA, PC12_DATA,
PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA,
PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
- PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA ))
},
- { PINMUX_DATA_REG("PDDRL", 0xfffe3982, 16) {
+ { PINMUX_DATA_REG("PDDRL", 0xfffe3982, 16, GROUP(
PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
- PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA ))
},
- { PINMUX_DATA_REG("PEDRL", 0xfffe3a02, 16) {
+ { PINMUX_DATA_REG("PEDRL", 0xfffe3a02, 16, GROUP(
PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA,
PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA,
PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
- PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA ))
},
- { PINMUX_DATA_REG("PFDRH", 0xfffe3a80, 16) {
+ { PINMUX_DATA_REG("PFDRH", 0xfffe3a80, 16, GROUP(
0, PF30_DATA, PF29_DATA, PF28_DATA,
PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA,
PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
- PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA }
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA ))
},
- { PINMUX_DATA_REG("PFDRL", 0xfffe3a82, 16) {
+ { PINMUX_DATA_REG("PFDRL", 0xfffe3a82, 16, GROUP(
PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
- PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 501de63e6c5f..4a95867deb8a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -1466,17 +1466,17 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1) {
+ { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PA3_IN, PA3_OUT,
PA2_IN, PA2_OUT,
PA1_IN, PA1_OUT,
- PA0_IN, PA0_OUT }
+ PA0_IN, PA0_OUT ))
},
- { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4) {
+ { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PB22MD_00, PB22MD_01, PB22MD_10, 0, 0, 0, 0, 0,
@@ -1484,10 +1484,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB21MD_0, PB21MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB20MD_1, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR4", 0xfffe3826, 16, 4) {
+ { PINMUX_CFG_REG("PBCR4", 0xfffe3826, 16, 4, GROUP(
0, PB19MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB18MD_01, 0, 0, 0, 0, 0, 0,
@@ -1495,9 +1495,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PB17MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB16MD_01, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR3", 0xfffe3828, 16, 4) {
+ { PINMUX_CFG_REG("PBCR3", 0xfffe3828, 16, 4, GROUP(
0, PB15MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB14MD_01, 0, 0, 0, 0, 0, 0,
@@ -1505,9 +1505,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PB13MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB12MD_01, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR2", 0xfffe382a, 16, 4) {
+ { PINMUX_CFG_REG("PBCR2", 0xfffe382a, 16, 4, GROUP(
0, PB11MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB10MD_01, 0, 0, 0, 0, 0, 0,
@@ -1515,9 +1515,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PB9MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB8MD_01, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR1", 0xfffe382c, 16, 4) {
+ { PINMUX_CFG_REG("PBCR1", 0xfffe382c, 16, 4, GROUP(
0, PB7MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB6MD_01, 0, 0, 0, 0, 0, 0,
@@ -1525,9 +1525,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PB5MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB4MD_01, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4) {
+ { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4, GROUP(
0, PB3MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB2MD_1, 0, 0, 0, 0, 0, 0,
@@ -1535,10 +1535,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PB1MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1) {
+ { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
@@ -1548,10 +1548,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB19_IN, PB19_OUT,
PB18_IN, PB18_OUT,
PB17_IN, PB17_OUT,
- PB16_IN, PB16_OUT }
+ PB16_IN, PB16_OUT ))
},
- { PINMUX_CFG_REG("PBIOR0", 0xfffe3832, 16, 1) {
+ { PINMUX_CFG_REG("PBIOR0", 0xfffe3832, 16, 1, GROUP(
PB15_IN, PB15_OUT,
PB14_IN, PB14_OUT,
PB13_IN, PB13_OUT,
@@ -1567,10 +1567,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB3_IN, PB3_OUT,
PB2_IN, PB2_OUT,
PB1_IN, PB1_OUT,
- 0, 0 }
+ 0, 0 ))
},
- { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4) {
+ { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PC10MD_0, PC10MD_1, 0, 0, 0, 0, 0, 0,
@@ -1578,9 +1578,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC9MD_0, PC9MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PC8MD_00, PC8MD_01, PC8MD_10, PC8MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCCR1", 0xfffe384c, 16, 4) {
+ { PINMUX_CFG_REG("PCCR1", 0xfffe384c, 16, 4, GROUP(
PC7MD_00, PC7MD_01, PC7MD_10, PC7MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PC6MD_00, PC6MD_01, PC6MD_10, PC6MD_11, 0, 0, 0, 0,
@@ -1588,9 +1588,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC5MD_00, PC5MD_01, PC5MD_10, PC5MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PC4MD_0, PC4MD_1, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCCR0", 0xfffe384e, 16, 4) {
+ { PINMUX_CFG_REG("PCCR0", 0xfffe384e, 16, 4, GROUP(
PC3MD_0, PC3MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PC2MD_0, PC2MD_1, 0, 0, 0, 0, 0, 0,
@@ -1598,10 +1598,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC1MD_0, PC1MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PC0MD_0, PC0MD_1, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1) {
+ { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PC10_IN, PC10_OUT,
PC9_IN, PC9_OUT,
@@ -1614,10 +1614,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC2_IN, PC2_OUT,
PC1_IN, PC1_OUT,
PC0_IN, PC0_OUT
- }
+ ))
},
- { PINMUX_CFG_REG("PDCR3", 0xfffe3868, 16, 4) {
+ { PINMUX_CFG_REG("PDCR3", 0xfffe3868, 16, 4, GROUP(
0, PD15MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PD14MD_01, 0, 0, 0, 0, 0, 0,
@@ -1625,9 +1625,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PD13MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PD12MD_01, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDCR2", 0xfffe386a, 16, 4) {
+ { PINMUX_CFG_REG("PDCR2", 0xfffe386a, 16, 4, GROUP(
0, PD11MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PD10MD_01, 0, 0, 0, 0, 0, 0,
@@ -1635,9 +1635,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PD9MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PD8MD_01, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDCR1", 0xfffe386c, 16, 4) {
+ { PINMUX_CFG_REG("PDCR1", 0xfffe386c, 16, 4, GROUP(
0, PD7MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PD6MD_01, 0, 0, 0, 0, 0, 0,
@@ -1645,9 +1645,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PD5MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PD4MD_01, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDCR0", 0xfffe386e, 16, 4) {
+ { PINMUX_CFG_REG("PDCR0", 0xfffe386e, 16, 4, GROUP(
0, PD3MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PD2MD_01, 0, 0, 0, 0, 0, 0,
@@ -1655,10 +1655,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PD1MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PD0MD_01, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDIOR0", 0xfffe3872, 16, 1) {
+ { PINMUX_CFG_REG("PDIOR0", 0xfffe3872, 16, 1, GROUP(
PD15_IN, PD15_OUT,
PD14_IN, PD14_OUT,
PD13_IN, PD13_OUT,
@@ -1674,10 +1674,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD3_IN, PD3_OUT,
PD2_IN, PD2_OUT,
PD1_IN, PD1_OUT,
- PD0_IN, PD0_OUT }
+ PD0_IN, PD0_OUT ))
},
- { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4) {
+ { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1685,10 +1685,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE5MD_00, PE5MD_01, 0, PE5MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PE4MD_00, PE4MD_01, 0, PE4MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PECR0", 0xfffe388e, 16, 4) {
+ { PINMUX_CFG_REG("PECR0", 0xfffe388e, 16, 4, GROUP(
PE3MD_00, PE3MD_01, 0, PE3MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PE2MD_00, PE2MD_01, 0, PE2MD_11, 0, 0, 0, 0,
@@ -1697,10 +1697,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE1MD_100, PE1MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1) {
+ { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
@@ -1709,19 +1709,19 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE3_IN, PE3_OUT,
PE2_IN, PE2_OUT,
PE1_IN, PE1_OUT,
- PE0_IN, PE0_OUT }
+ PE0_IN, PE0_OUT ))
},
- { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4) {
+ { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF12MD_000, PF12MD_001, 0, PF12MD_011,
PF12MD_100, PF12MD_101, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR2", 0xfffe38aa, 16, 4) {
+ { PINMUX_CFG_REG("PFCR2", 0xfffe38aa, 16, 4, GROUP(
PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
PF11MD_100, PF11MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1732,10 +1732,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF9MD_100, PF9MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PF8MD_00, PF8MD_01, PF8MD_10, PF8MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR1", 0xfffe38ac, 16, 4) {
+ { PINMUX_CFG_REG("PFCR1", 0xfffe38ac, 16, 4, GROUP(
PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
PF7MD_100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1747,10 +1747,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
PF4MD_100, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR0", 0xfffe38ae, 16, 4) {
+ { PINMUX_CFG_REG("PFCR0", 0xfffe38ae, 16, 4, GROUP(
PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
PF3MD_100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1762,10 +1762,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
PF0MD_100, PF0MD_101, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1) {
+ { PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1, GROUP(
0, 0, 0, 0, 0, 0,
PF12_IN, PF12_OUT,
PF11_IN, PF11_OUT,
@@ -1779,10 +1779,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF3_IN, PF3_OUT,
PF2_IN, PF2_OUT,
PF1_IN, PF1_OUT,
- PF0_IN, PF0_OUT }
+ PF0_IN, PF0_OUT ))
},
- { PINMUX_CFG_REG("PGCR7", 0xfffe38c0, 16, 4) {
+ { PINMUX_CFG_REG("PGCR7", 0xfffe38c0, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1791,10 +1791,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
PG0MD_100, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4) {
+ { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1802,10 +1802,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR5", 0xfffe38c4, 16, 4) {
+ { PINMUX_CFG_REG("PGCR5", 0xfffe38c4, 16, 4, GROUP(
PG23MD_00, PG23MD_01, PG23MD_10, PG23MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PG22MD_00, PG22MD_01, PG22MD_10, PG22MD_11, 0, 0, 0, 0,
@@ -1814,10 +1814,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
PG20MD_100, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR4", 0xfffe38c6, 16, 4) {
+ { PINMUX_CFG_REG("PGCR4", 0xfffe38c6, 16, 4, GROUP(
PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
PG19MD_100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1829,10 +1829,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PG16MD_000, PG16MD_001, PG16MD_010, PG16MD_011,
PG16MD_100, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR3", 0xfffe38c8, 16, 4) {
+ { PINMUX_CFG_REG("PGCR3", 0xfffe38c8, 16, 4, GROUP(
PG15MD_000, PG15MD_001, PG15MD_010, PG15MD_011,
PG15MD_100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1844,9 +1844,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PG12MD_000, PG12MD_001, PG12MD_010, 0,
PG12MD_100, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR2", 0xfffe38ca, 16, 4) {
+ { PINMUX_CFG_REG("PGCR2", 0xfffe38ca, 16, 4, GROUP(
PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
PG11MD_100, PG11MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1858,10 +1858,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
PG8MD_100, PG8MD_101, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR1", 0xfffe38cc, 16, 4) {
+ { PINMUX_CFG_REG("PGCR1", 0xfffe38cc, 16, 4, GROUP(
PG7MD_00, PG7MD_01, PG7MD_10, PG7MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PG6MD_00, PG6MD_01, PG6MD_10, PG6MD_11, 0, 0, 0, 0,
@@ -1869,9 +1869,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG5MD_00, PG5MD_01, PG5MD_10, PG5MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PG4MD_00, PG4MD_01, PG4MD_10, PG4MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4) {
+ { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4, GROUP(
PG3MD_00, PG3MD_01, PG3MD_10, PG3MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PG2MD_00, PG2MD_01, PG2MD_10, PG2MD_11, 0, 0, 0, 0,
@@ -1879,9 +1879,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG1MD_00, PG1MD_01, PG1MD_10, PG1MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1) {
+ { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
PG24_IN, PG24_OUT,
@@ -1892,10 +1892,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG19_IN, PG19_OUT,
PG18_IN, PG18_OUT,
PG17_IN, PG17_OUT,
- PG16_IN, PG16_OUT }
+ PG16_IN, PG16_OUT ))
},
- { PINMUX_CFG_REG("PGIOR0", 0xfffe38d2, 16, 1) {
+ { PINMUX_CFG_REG("PGIOR0", 0xfffe38d2, 16, 1, GROUP(
PG15_IN, PG15_OUT,
PG14_IN, PG14_OUT,
PG13_IN, PG13_OUT,
@@ -1912,10 +1912,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG2_IN, PG2_OUT,
PG1_IN, PG1_OUT,
PG0_IN, PG0_OUT
- }
+ ))
},
- { PINMUX_CFG_REG("PHCR1", 0xfffe38ec, 16, 4) {
+ { PINMUX_CFG_REG("PHCR1", 0xfffe38ec, 16, 4, GROUP(
PH7MD_0, PH7MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PH6MD_0, PH6MD_1, 0, 0, 0, 0, 0, 0,
@@ -1923,10 +1923,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PH5MD_0, PH5MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PH4MD_0, PH4MD_1, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PHCR0", 0xfffe38ee, 16, 4) {
+ { PINMUX_CFG_REG("PHCR0", 0xfffe38ee, 16, 4, GROUP(
PH3MD_0, PH3MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PH2MD_0, PH2MD_1, 0, 0, 0, 0, 0, 0,
@@ -1934,10 +1934,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PH1MD_0, PH1MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PH0MD_0, PH0MD_1, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR2", 0xfffe390a, 16, 4) {
+ { PINMUX_CFG_REG("PJCR2", 0xfffe390a, 16, 4, GROUP(
PJ11MD_00, PJ11MD_01, PJ11MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PJ10MD_00, PJ10MD_01, PJ10MD_10, 0, 0, 0, 0, 0,
@@ -1945,9 +1945,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ9MD_00, PJ9MD_01, PJ9MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PJ8MD_00, PJ8MD_01, PJ8MD_10, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR1", 0xfffe390c, 16, 4) {
+ { PINMUX_CFG_REG("PJCR1", 0xfffe390c, 16, 4, GROUP(
PJ7MD_00, PJ7MD_01, PJ7MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PJ6MD_00, PJ6MD_01, PJ6MD_10, 0, 0, 0, 0, 0,
@@ -1955,9 +1955,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ5MD_00, PJ5MD_01, PJ5MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PJ4MD_00, PJ4MD_01, PJ4MD_10, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR0", 0xfffe390e, 16, 4) {
+ { PINMUX_CFG_REG("PJCR0", 0xfffe390e, 16, 4, GROUP(
PJ3MD_00, PJ3MD_01, PJ3MD_10, PJ3MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PJ2MD_000, PJ2MD_001, PJ2MD_010, PJ2MD_011,
@@ -1968,9 +1968,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
PJ0MD_100, PJ0MD_101, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, }
+ 0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG("PJIOR0", 0xfffe3912, 16, 1) {
+ { PINMUX_CFG_REG("PJIOR0", 0xfffe3912, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
PJ11_IN, PJ11_OUT,
PJ10_IN, PJ10_OUT,
@@ -1983,10 +1983,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ3_IN, PJ3_OUT,
PJ2_IN, PJ2_OUT,
PJ1_IN, PJ1_OUT,
- PJ0_IN, PJ0_OUT }
+ PJ0_IN, PJ0_OUT ))
},
- { PINMUX_CFG_REG("PKCR2", 0xfffe392a, 16, 4) {
+ { PINMUX_CFG_REG("PKCR2", 0xfffe392a, 16, 4, GROUP(
PK11MD_00, PK11MD_01, PK11MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PK10MD_00, PK10MD_01, PK10MD_10, 0, 0, 0, 0, 0,
@@ -1994,10 +1994,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PK9MD_00, PK9MD_01, PK9MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PK8MD_00, PK8MD_01, PK8MD_10, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PKCR1", 0xfffe392c, 16, 4) {
+ { PINMUX_CFG_REG("PKCR1", 0xfffe392c, 16, 4, GROUP(
PK7MD_00, PK7MD_01, PK7MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PK6MD_00, PK6MD_01, PK6MD_10, 0, 0, 0, 0, 0,
@@ -2005,9 +2005,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PK5MD_00, PK5MD_01, PK5MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PK4MD_00, PK4MD_01, PK4MD_10, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PKCR0", 0xfffe392e, 16, 4) {
+ { PINMUX_CFG_REG("PKCR0", 0xfffe392e, 16, 4, GROUP(
PK3MD_00, PK3MD_01, PK3MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PK2MD_00, PK2MD_01, PK2MD_10, 0, 0, 0, 0, 0,
@@ -2015,10 +2015,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PK1MD_00, PK1MD_01, PK1MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PK0MD_00, PK0MD_01, PK0MD_10, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PKIOR0", 0xfffe3932, 16, 1) {
+ { PINMUX_CFG_REG("PKIOR0", 0xfffe3932, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
PJ11_IN, PJ11_OUT,
PJ10_IN, PJ10_OUT,
@@ -2031,85 +2031,85 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ3_IN, PJ3_OUT,
PJ2_IN, PJ2_OUT,
PJ1_IN, PJ1_OUT,
- PJ0_IN, PJ0_OUT }
+ PJ0_IN, PJ0_OUT ))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADR1", 0xfffe3814, 16) {
+ { PINMUX_DATA_REG("PADR1", 0xfffe3814, 16, GROUP(
0, 0, 0, 0, 0, 0, 0, PA3_DATA,
- 0, 0, 0, 0, 0, 0, 0, PA2_DATA }
+ 0, 0, 0, 0, 0, 0, 0, PA2_DATA ))
},
- { PINMUX_DATA_REG("PADR0", 0xfffe3816, 16) {
+ { PINMUX_DATA_REG("PADR0", 0xfffe3816, 16, GROUP(
0, 0, 0, 0, 0, 0, 0, PA1_DATA,
- 0, 0, 0, 0, 0, 0, 0, PA0_DATA }
+ 0, 0, 0, 0, 0, 0, 0, PA0_DATA ))
},
- { PINMUX_DATA_REG("PBDR1", 0xfffe3834, 16) {
+ { PINMUX_DATA_REG("PBDR1", 0xfffe3834, 16, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, PB22_DATA, PB21_DATA, PB20_DATA,
- PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA }
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA ))
},
- { PINMUX_DATA_REG("PBDR0", 0xfffe3836, 16) {
+ { PINMUX_DATA_REG("PBDR0", 0xfffe3836, 16, GROUP(
PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
- PB3_DATA, PB2_DATA, PB1_DATA, 0 }
+ PB3_DATA, PB2_DATA, PB1_DATA, 0 ))
},
- { PINMUX_DATA_REG("PCDR0", 0xfffe3856, 16) {
+ { PINMUX_DATA_REG("PCDR0", 0xfffe3856, 16, GROUP(
0, 0, 0, 0,
0, PC10_DATA, PC9_DATA, PC8_DATA,
PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
- PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA ))
},
- { PINMUX_DATA_REG("PDDR0", 0xfffe3876, 16) {
+ { PINMUX_DATA_REG("PDDR0", 0xfffe3876, 16, GROUP(
PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
- PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA ))
},
- { PINMUX_DATA_REG("PEDR0", 0xfffe3896, 16) {
+ { PINMUX_DATA_REG("PEDR0", 0xfffe3896, 16, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, PE5_DATA, PE4_DATA,
- PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA ))
},
- { PINMUX_DATA_REG("PFDR0", 0xfffe38b6, 16) {
+ { PINMUX_DATA_REG("PFDR0", 0xfffe38b6, 16, GROUP(
0, 0, 0, PF12_DATA,
PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
- PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA ))
},
- { PINMUX_DATA_REG("PGDR1", 0xfffe38d4, 16) {
+ { PINMUX_DATA_REG("PGDR1", 0xfffe38d4, 16, GROUP(
0, 0, 0, 0, 0, 0, 0, PG24_DATA,
PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
- PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA }
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA ))
},
- { PINMUX_DATA_REG("PGDR0", 0xfffe38d6, 16) {
+ { PINMUX_DATA_REG("PGDR0", 0xfffe38d6, 16, GROUP(
PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
- PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA ))
},
- { PINMUX_DATA_REG("PJDR0", 0xfffe3916, 16) {
+ { PINMUX_DATA_REG("PJDR0", 0xfffe3916, 16, GROUP(
0, 0, 0, PJ12_DATA,
PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
- PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA ))
},
- { PINMUX_DATA_REG("PKDR0", 0xfffe3936, 16) {
+ { PINMUX_DATA_REG("PKDR0", 0xfffe3936, 16, GROUP(
0, 0, 0, PK12_DATA,
PK11_DATA, PK10_DATA, PK9_DATA, PK8_DATA,
PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
- PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA }
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA ))
},
{ }
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index a95997a389a4..6cbb18ef77dc 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -1951,13 +1951,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* where Field_Width is 1 for single mode registers or 4 for upto 16
mode registers and modes are described in assending order [0..16] */
- { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1) {
+ { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, PA1_IN, PA1_OUT,
0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, PA0_IN, PA0_OUT }
+ 0, 0, 0, 0, 0, 0, PA0_IN, PA0_OUT ))
},
- { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4) {
+ { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PB22MD_000, PB22MD_001, PB22MD_010, PB22MD_011,
@@ -1969,9 +1969,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB20MD_000, PB20MD_001, PB20MD_010, PB20MD_011,
PB20MD_100, PB20MD_101, PB20MD_110, PB20MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR4", 0xfffe3826, 16, 4) {
+ { PINMUX_CFG_REG("PBCR4", 0xfffe3826, 16, 4, GROUP(
PB19MD_000, PB19MD_001, PB19MD_010, PB19MD_011,
PB19MD_100, PB19MD_101, PB19MD_110, PB19MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -1986,9 +1986,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB16MD_000, PB16MD_001, PB16MD_010, PB16MD_011,
PB16MD_100, PB16MD_101, PB16MD_110, PB16MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR3", 0xfffe3828, 16, 4) {
+ { PINMUX_CFG_REG("PBCR3", 0xfffe3828, 16, 4, GROUP(
PB15MD_000, PB15MD_001, PB15MD_010, PB15MD_011,
PB15MD_100, PB15MD_101, PB15MD_110, PB15MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2002,9 +2002,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR2", 0xfffe382a, 16, 4) {
+ { PINMUX_CFG_REG("PBCR2", 0xfffe382a, 16, 4, GROUP(
PB11MD_00, PB11MD_01, PB11MD_10, PB11MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2015,9 +2015,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PB8MD_00, PB8MD_01, PB8MD_10, PB8MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR1", 0xfffe382c, 16, 4) {
+ { PINMUX_CFG_REG("PBCR1", 0xfffe382c, 16, 4, GROUP(
PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2028,9 +2028,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4) {
+ { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4, GROUP(
PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2040,10 +2040,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1) {
+ { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
@@ -2053,9 +2053,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB19_IN, PB19_OUT,
PB18_IN, PB18_OUT,
PB17_IN, PB17_OUT,
- PB16_IN, PB16_OUT }
+ PB16_IN, PB16_OUT ))
},
- { PINMUX_CFG_REG("PBIOR0", 0xfffe3832, 16, 1) {
+ { PINMUX_CFG_REG("PBIOR0", 0xfffe3832, 16, 1, GROUP(
PB15_IN, PB15_OUT,
PB14_IN, PB14_OUT,
PB13_IN, PB13_OUT,
@@ -2071,10 +2071,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB3_IN, PB3_OUT,
PB2_IN, PB2_OUT,
PB1_IN, PB1_OUT,
- 0, 0 }
+ 0, 0 ))
},
- { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4) {
+ { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2083,9 +2083,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC8MD_000, PC8MD_001, PC8MD_010, PC8MD_011,
PC8MD_100, PC8MD_101, PC8MD_110, PC8MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCCR1", 0xfffe384c, 16, 4) {
+ { PINMUX_CFG_REG("PCCR1", 0xfffe384c, 16, 4, GROUP(
PC7MD_000, PC7MD_001, PC7MD_010, PC7MD_011,
PC7MD_100, PC7MD_101, PC7MD_110, PC7MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2099,9 +2099,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PC4MD_00, PC4MD_01, PC4MD_10, PC4MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCCR0", 0xfffe384e, 16, 4) {
+ { PINMUX_CFG_REG("PCCR0", 0xfffe384e, 16, 4, GROUP(
PC3MD_00, PC3MD_01, PC3MD_10, PC3MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2112,10 +2112,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PC0MD_0, PC0MD_1, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1) {
+ { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PC8_IN, PC8_OUT,
PC7_IN, PC7_OUT,
@@ -2125,10 +2125,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC3_IN, PC3_OUT,
PC2_IN, PC2_OUT,
PC1_IN, PC1_OUT,
- PC0_IN, PC0_OUT }
+ PC0_IN, PC0_OUT ))
},
- { PINMUX_CFG_REG("PDCR3", 0xfffe3868, 16, 4) {
+ { PINMUX_CFG_REG("PDCR3", 0xfffe3868, 16, 4, GROUP(
PD15MD_00, PD15MD_01, PD15MD_10, PD15MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2139,9 +2139,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PD12MD_00, PD12MD_01, PD12MD_10, PD12MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDCR2", 0xfffe386a, 16, 4) {
+ { PINMUX_CFG_REG("PDCR2", 0xfffe386a, 16, 4, GROUP(
PD11MD_00, PD11MD_01, PD11MD_10, PD11MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2152,9 +2152,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PD8MD_00, PD8MD_01, PD8MD_10, PD8MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDCR1", 0xfffe386c, 16, 4) {
+ { PINMUX_CFG_REG("PDCR1", 0xfffe386c, 16, 4, GROUP(
PD7MD_00, PD7MD_01, PD7MD_10, PD7MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2165,9 +2165,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PD4MD_00, PD4MD_01, PD4MD_10, PD4MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDCR0", 0xfffe386e, 16, 4) {
+ { PINMUX_CFG_REG("PDCR0", 0xfffe386e, 16, 4, GROUP(
PD3MD_00, PD3MD_01, PD3MD_10, PD3MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2178,10 +2178,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PD0MD_00, PD0MD_01, PD0MD_10, PD0MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PDIOR0", 0xfffe3872, 16, 1) {
+ { PINMUX_CFG_REG("PDIOR0", 0xfffe3872, 16, 1, GROUP(
PD15_IN, PD15_OUT,
PD14_IN, PD14_OUT,
PD13_IN, PD13_OUT,
@@ -2197,10 +2197,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD3_IN, PD3_OUT,
PD2_IN, PD2_OUT,
PD1_IN, PD1_OUT,
- PD0_IN, PD0_OUT }
+ PD0_IN, PD0_OUT ))
},
- { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4) {
+ { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4, GROUP(
PE7MD_00, PE7MD_01, PE7MD_10, PE7MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2211,9 +2211,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PE4MD_00, PE4MD_01, PE4MD_10, PE4MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PECR0", 0xfffe388e, 16, 4) {
+ { PINMUX_CFG_REG("PECR0", 0xfffe388e, 16, 4, GROUP(
PE3MD_000, PE3MD_001, PE3MD_010, PE3MD_011,
PE3MD_100, PE3MD_101, PE3MD_110, PE3MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2227,9 +2227,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1) {
+ { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PE7_IN, PE7_OUT,
@@ -2239,10 +2239,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE3_IN, PE3_OUT,
PE2_IN, PE2_OUT,
PE1_IN, PE1_OUT,
- PE0_IN, PE0_OUT }
+ PE0_IN, PE0_OUT ))
},
- { PINMUX_CFG_REG("PFCR6", 0xfffe38a2, 16, 4) {
+ { PINMUX_CFG_REG("PFCR6", 0xfffe38a2, 16, 4, GROUP(
PF23MD_000, PF23MD_001, PF23MD_010, PF23MD_011,
PF23MD_100, PF23MD_101, PF23MD_110, PF23MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2257,9 +2257,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF20MD_000, PF20MD_001, PF20MD_010, PF20MD_011,
PF20MD_100, PF20MD_101, PF20MD_110, PF20MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR5", 0xfffe38a4, 16, 4) {
+ { PINMUX_CFG_REG("PFCR5", 0xfffe38a4, 16, 4, GROUP(
PF19MD_000, PF19MD_001, PF19MD_010, PF19MD_011,
PF19MD_100, PF19MD_101, PF19MD_110, PF19MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2274,9 +2274,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF16MD_000, PF16MD_001, PF16MD_010, PF16MD_011,
PF16MD_100, PF16MD_101, PF16MD_110, PF16MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR4", 0xfffe38a6, 16, 4) {
+ { PINMUX_CFG_REG("PFCR4", 0xfffe38a6, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2285,9 +2285,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF15MD_000, PF15MD_001, PF15MD_010, PF15MD_011,
PF15MD_100, PF15MD_101, PF15MD_110, PF15MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4) {
+ { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF14MD_000, PF14MD_001, PF14MD_010, PF14MD_011,
@@ -2300,9 +2300,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF12MD_000, PF12MD_001, PF12MD_010, PF12MD_011,
PF12MD_100, PF12MD_101, PF12MD_110, PF12MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR2", 0xfffe38aa, 16, 4) {
+ { PINMUX_CFG_REG("PFCR2", 0xfffe38aa, 16, 4, GROUP(
PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
PF11MD_100, PF11MD_101, PF11MD_110, PF11MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2317,9 +2317,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF8MD_000, PF8MD_001, PF8MD_010, PF8MD_011,
PF8MD_100, PF8MD_101, PF8MD_110, PF8MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR1", 0xfffe38ac, 16, 4) {
+ { PINMUX_CFG_REG("PFCR1", 0xfffe38ac, 16, 4, GROUP(
PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
PF7MD_100, PF7MD_101, PF7MD_110, PF7MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2334,9 +2334,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
PF4MD_100, PF4MD_101, PF4MD_110, PF4MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR0", 0xfffe38ae, 16, 4) {
+ { PINMUX_CFG_REG("PFCR0", 0xfffe38ae, 16, 4, GROUP(
PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
PF3MD_100, PF3MD_101, PF3MD_110, PF3MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2351,10 +2351,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
PF0MD_100, PF0MD_101, PF0MD_110, PF0MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFIOR1", 0xfffe38b0, 16, 1) {
+ { PINMUX_CFG_REG("PFIOR1", 0xfffe38b0, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PF23_IN, PF23_OUT,
@@ -2364,9 +2364,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF19_IN, PF19_OUT,
PF18_IN, PF18_OUT,
PF17_IN, PF17_OUT,
- PF16_IN, PF16_OUT }
+ PF16_IN, PF16_OUT ))
},
- { PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1) {
+ { PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1, GROUP(
PF15_IN, PF15_OUT,
PF14_IN, PF14_OUT,
PF13_IN, PF13_OUT,
@@ -2382,10 +2382,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF3_IN, PF3_OUT,
PF2_IN, PF2_OUT,
PF1_IN, PF1_OUT,
- PF0_IN, PF0_OUT }
+ PF0_IN, PF0_OUT ))
},
- { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4) {
+ { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4, GROUP(
PG27MD_00, PG27MD_01, PG27MD_10, PG27MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2396,9 +2396,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR5", 0xfffe38c4, 16, 4) {
+ { PINMUX_CFG_REG("PGCR5", 0xfffe38c4, 16, 4, GROUP(
PG23MD_000, PG23MD_001, PG23MD_010, PG23MD_011,
PG23MD_100, PG23MD_101, PG23MD_110, PG23MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2413,9 +2413,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
PG20MD_100, PG20MD_101, PG20MD_110, PG20MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR4", 0xfffe38c6, 16, 4) {
+ { PINMUX_CFG_REG("PGCR4", 0xfffe38c6, 16, 4, GROUP(
PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
PG19MD_100, PG19MD_101, PG19MD_110, PG19MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2428,9 +2428,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PG16MD_00, PG16MD_01, PG16MD_10, PG16MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR3", 0xfffe38c8, 16, 4) {
+ { PINMUX_CFG_REG("PGCR3", 0xfffe38c8, 16, 4, GROUP(
PG15MD_00, PG15MD_01, PG15MD_10, PG15MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2441,9 +2441,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PG12MD_00, PG12MD_01, PG12MD_10, PG12MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR2", 0xfffe38ca, 16, 4) {
+ { PINMUX_CFG_REG("PGCR2", 0xfffe38ca, 16, 4, GROUP(
PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
PG11MD_100, PG11MD_101, PG11MD_110, PG11MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2458,10 +2458,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
PG8MD_100, PG8MD_101, PG8MD_110, PG8MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR1", 0xfffe38cc, 16, 4) {
+ { PINMUX_CFG_REG("PGCR1", 0xfffe38cc, 16, 4, GROUP(
PG7MD_000, PG7MD_001, PG7MD_010, PG7MD_011,
PG7MD_100, PG7MD_101, PG7MD_110, PG7MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2476,9 +2476,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG4MD_000, PG4MD_001, PG4MD_010, PG4MD_011,
PG4MD_100, PG4MD_101, PG4MD_110, PG4MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4) {
+ { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4, GROUP(
PG3MD_000, PG3MD_001, PG3MD_010, PG3MD_011,
PG3MD_100, PG3MD_101, PG3MD_110, PG3MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2493,10 +2493,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
PG0MD_100, PG0MD_101, PG0MD_110, PG0MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1) {
+ { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
PG27_IN, PG27_OUT,
PG26_IN, PG26_OUT,
@@ -2509,9 +2509,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG19_IN, PG19_OUT,
PG18_IN, PG18_OUT,
PG17_IN, PG17_OUT,
- PG16_IN, PG16_OUT }
+ PG16_IN, PG16_OUT ))
},
- { PINMUX_CFG_REG("PGIOR0", 0xfffe38d2, 16, 1) {
+ { PINMUX_CFG_REG("PGIOR0", 0xfffe38d2, 16, 1, GROUP(
PG15_IN, PG15_OUT,
PG14_IN, PG14_OUT,
PG13_IN, PG13_OUT,
@@ -2527,10 +2527,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG3_IN, PG3_OUT,
PG2_IN, PG2_OUT,
PG1_IN, PG1_OUT,
- PG0_IN, PG0_OUT }
+ PG0_IN, PG0_OUT ))
},
- { PINMUX_CFG_REG("PHCR1", 0xfffe38ec, 16, 4) {
+ { PINMUX_CFG_REG("PHCR1", 0xfffe38ec, 16, 4, GROUP(
PH7MD_00, PH7MD_01, PH7MD_10, PH7MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2541,10 +2541,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PH4MD_00, PH4MD_01, PH4MD_10, PH4MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PHCR0", 0xfffe38ee, 16, 4) {
+ { PINMUX_CFG_REG("PHCR0", 0xfffe38ee, 16, 4, GROUP(
PH3MD_00, PH3MD_01, PH3MD_10, PH3MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2555,10 +2555,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PH0MD_00, PH0MD_01, PH0MD_10, PH0MD_11, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR7", 0xfffe3900, 16, 4) {
+ { PINMUX_CFG_REG("PJCR7", 0xfffe3900, 16, 4, GROUP(
PJ31MD_0, PJ31MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2572,9 +2572,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ28MD_000, PJ28MD_001, PJ28MD_010, PJ28MD_011,
PJ28MD_100, PJ28MD_101, PJ28MD_110, PJ28MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR6", 0xfffe3902, 16, 4) {
+ { PINMUX_CFG_REG("PJCR6", 0xfffe3902, 16, 4, GROUP(
PJ27MD_000, PJ27MD_001, PJ27MD_010, PJ27MD_011,
PJ27MD_100, PJ27MD_101, PJ27MD_110, PJ27MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2589,9 +2589,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ24MD_000, PJ24MD_001, PJ24MD_010, PJ24MD_011,
PJ24MD_100, PJ24MD_101, PJ24MD_110, PJ24MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR5", 0xfffe3904, 16, 4) {
+ { PINMUX_CFG_REG("PJCR5", 0xfffe3904, 16, 4, GROUP(
PJ23MD_000, PJ23MD_001, PJ23MD_010, PJ23MD_011,
PJ23MD_100, PJ23MD_101, PJ23MD_110, PJ23MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2606,9 +2606,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ20MD_000, PJ20MD_001, PJ20MD_010, PJ20MD_011,
PJ20MD_100, PJ20MD_101, PJ20MD_110, PJ20MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR4", 0xfffe3906, 16, 4) {
+ { PINMUX_CFG_REG("PJCR4", 0xfffe3906, 16, 4, GROUP(
PJ19MD_000, PJ19MD_001, PJ19MD_010, PJ19MD_011,
PJ19MD_100, PJ19MD_101, PJ19MD_110, PJ19MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2623,9 +2623,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ16MD_000, PJ16MD_001, PJ16MD_010, PJ16MD_011,
PJ16MD_100, PJ16MD_101, PJ16MD_110, PJ16MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR3", 0xfffe3908, 16, 4) {
+ { PINMUX_CFG_REG("PJCR3", 0xfffe3908, 16, 4, GROUP(
PJ15MD_000, PJ15MD_001, PJ15MD_010, PJ15MD_011,
PJ15MD_100, PJ15MD_101, PJ15MD_110, PJ15MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2640,9 +2640,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ12MD_000, PJ12MD_001, PJ12MD_010, PJ12MD_011,
PJ12MD_100, PJ12MD_101, PJ12MD_110, PJ12MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR2", 0xfffe390a, 16, 4) {
+ { PINMUX_CFG_REG("PJCR2", 0xfffe390a, 16, 4, GROUP(
PJ11MD_000, PJ11MD_001, PJ11MD_010, PJ11MD_011,
PJ11MD_100, PJ11MD_101, PJ11MD_110, PJ11MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2657,9 +2657,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ8MD_000, PJ8MD_001, PJ8MD_010, PJ8MD_011,
PJ8MD_100, PJ8MD_101, PJ8MD_110, PJ8MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR1", 0xfffe390c, 16, 4) {
+ { PINMUX_CFG_REG("PJCR1", 0xfffe390c, 16, 4, GROUP(
PJ7MD_000, PJ7MD_001, PJ7MD_010, PJ7MD_011,
PJ7MD_100, PJ7MD_101, PJ7MD_110, PJ7MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2674,9 +2674,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ4MD_000, PJ4MD_001, PJ4MD_010, PJ4MD_011,
PJ4MD_100, PJ4MD_101, PJ4MD_110, PJ4MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJCR0", 0xfffe390e, 16, 4) {
+ { PINMUX_CFG_REG("PJCR0", 0xfffe390e, 16, 4, GROUP(
PJ3MD_000, PJ3MD_001, PJ3MD_010, PJ3MD_011,
PJ3MD_100, PJ3MD_101, PJ3MD_110, PJ3MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2691,10 +2691,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
PJ0MD_100, PJ0MD_101, PJ0MD_110, PJ0MD_111,
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PJIOR1", 0xfffe3910, 16, 1) {
+ { PINMUX_CFG_REG("PJIOR1", 0xfffe3910, 16, 1, GROUP(
PJ31_IN, PJ31_OUT,
PJ30_IN, PJ30_OUT,
PJ29_IN, PJ29_OUT,
@@ -2710,9 +2710,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ19_IN, PJ19_OUT,
PJ18_IN, PJ18_OUT,
PJ17_IN, PJ17_OUT,
- PJ16_IN, PJ16_OUT }
+ PJ16_IN, PJ16_OUT ))
},
- { PINMUX_CFG_REG("PJIOR0", 0xfffe3912, 16, 1) {
+ { PINMUX_CFG_REG("PJIOR0", 0xfffe3912, 16, 1, GROUP(
PJ15_IN, PJ15_OUT,
PJ14_IN, PJ14_OUT,
PJ13_IN, PJ13_OUT,
@@ -2728,86 +2728,86 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ3_IN, PJ3_OUT,
PJ2_IN, PJ2_OUT,
PJ1_IN, PJ1_OUT,
- PJ0_IN, PJ0_OUT }
+ PJ0_IN, PJ0_OUT ))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADR0", 0xfffe3816, 16) {
+ { PINMUX_DATA_REG("PADR0", 0xfffe3816, 16, GROUP(
0, 0, 0, 0, 0, 0, 0, PA1_DATA,
- 0, 0, 0, 0, 0, 0, 0, PA0_DATA }
+ 0, 0, 0, 0, 0, 0, 0, PA0_DATA ))
},
- { PINMUX_DATA_REG("PBDR1", 0xfffe3834, 16) {
+ { PINMUX_DATA_REG("PBDR1", 0xfffe3834, 16, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
0, PB22_DATA, PB21_DATA, PB20_DATA,
- PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA }
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA ))
},
- { PINMUX_DATA_REG("PBDR0", 0xfffe3836, 16) {
+ { PINMUX_DATA_REG("PBDR0", 0xfffe3836, 16, GROUP(
PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
- PB3_DATA, PB2_DATA, PB1_DATA, 0 }
+ PB3_DATA, PB2_DATA, PB1_DATA, 0 ))
},
- { PINMUX_DATA_REG("PCDR0", 0xfffe3856, 16) {
+ { PINMUX_DATA_REG("PCDR0", 0xfffe3856, 16, GROUP(
0, 0, 0, 0,
0, 0, 0, PC8_DATA,
PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
- PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA ))
},
- { PINMUX_DATA_REG("PDDR0", 0xfffe3876, 16) {
+ { PINMUX_DATA_REG("PDDR0", 0xfffe3876, 16, GROUP(
PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
- PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA ))
},
- { PINMUX_DATA_REG("PEDR0", 0xfffe3896, 16) {
+ { PINMUX_DATA_REG("PEDR0", 0xfffe3896, 16, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
- PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA ))
},
- { PINMUX_DATA_REG("PFDR1", 0xfffe38b4, 16) {
+ { PINMUX_DATA_REG("PFDR1", 0xfffe38b4, 16, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
- PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA }
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA ))
},
- { PINMUX_DATA_REG("PFDR0", 0xfffe38b6, 16) {
+ { PINMUX_DATA_REG("PFDR0", 0xfffe38b6, 16, GROUP(
PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
- PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA ))
},
- { PINMUX_DATA_REG("PGDR1", 0xfffe38d4, 16) {
+ { PINMUX_DATA_REG("PGDR1", 0xfffe38d4, 16, GROUP(
0, 0, 0, 0,
PG27_DATA, PG26_DATA, PG25_DATA, PG24_DATA,
PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
- PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA }
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA ))
},
- { PINMUX_DATA_REG("PGDR0", 0xfffe38d6, 16) {
+ { PINMUX_DATA_REG("PGDR0", 0xfffe38d6, 16, GROUP(
PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
- PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA ))
},
- { PINMUX_DATA_REG("PJDR1", 0xfffe3914, 16) {
+ { PINMUX_DATA_REG("PJDR1", 0xfffe3914, 16, GROUP(
PJ31_DATA, PJ30_DATA, PJ29_DATA, PJ28_DATA,
PJ27_DATA, PJ26_DATA, PJ25_DATA, PJ24_DATA,
PJ23_DATA, PJ22_DATA, PJ21_DATA, PJ20_DATA,
- PJ19_DATA, PJ18_DATA, PJ17_DATA, PJ16_DATA }
+ PJ19_DATA, PJ18_DATA, PJ17_DATA, PJ16_DATA ))
},
- { PINMUX_DATA_REG("PJDR0", 0xfffe3916, 16) {
+ { PINMUX_DATA_REG("PJDR0", 0xfffe3916, 16, GROUP(
PJ15_DATA, PJ14_DATA, PJ13_DATA, PJ12_DATA,
PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
- PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA ))
},
{ }
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index ef3da8bf1d87..e1276d143117 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3971,7 +3971,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(308, 0xe6052134), /* PORT308CR */
PORTCR(309, 0xe6052135), /* PORT309CR */
- { PINMUX_CFG_REG("MSEL2CR", 0xe605801c, 32, 1) {
+ { PINMUX_CFG_REG("MSEL2CR", 0xe605801c, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4004,9 +4004,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1,
MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1,
MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
- }
+ ))
},
- { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1) {
+ { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -4039,9 +4039,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
0, 0,
0, 0,
- }
+ ))
},
- { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1) {
+ { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1, GROUP(
0, 0,
0, 0,
MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
@@ -4074,13 +4074,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
0, 0,
- }
+ ))
},
{ },
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) {
+ { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32, GROUP(
PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
@@ -4088,9 +4088,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
- PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA }
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA ))
},
- { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32) {
+ { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32, GROUP(
PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
@@ -4098,9 +4098,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA,
PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
- PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA }
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA ))
},
- { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055004, 32) {
+ { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055004, 32, GROUP(
PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
@@ -4108,9 +4108,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
- PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA }
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA ))
},
- { PINMUX_DATA_REG("PORTR127_096DR", 0xe6056000, 32) {
+ { PINMUX_DATA_REG("PORTR127_096DR", 0xe6056000, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, PORT118_DATA, PORT117_DATA, PORT116_DATA,
@@ -4118,9 +4118,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
- PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA }
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA ))
},
- { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056004, 32) {
+ { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056004, 32, GROUP(
PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
@@ -4128,9 +4128,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
- PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA }
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA ))
},
- { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056008, 32) {
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056008, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -4138,9 +4138,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, PORT164_DATA,
- PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA }
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA ))
},
- { PINMUX_DATA_REG("PORTR223_192DR", 0xe605600C, 32) {
+ { PINMUX_DATA_REG("PORTR223_192DR", 0xe605600C, 32, GROUP(
PORT223_DATA, PORT222_DATA, PORT221_DATA, PORT220_DATA,
PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA,
PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA,
@@ -4148,9 +4148,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
- PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA }
+ PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA ))
},
- { PINMUX_DATA_REG("PORTU255_224DR", 0xe6057000, 32) {
+ { PINMUX_DATA_REG("PORTU255_224DR", 0xe6057000, 32, GROUP(
PORT255_DATA, PORT254_DATA, PORT253_DATA, PORT252_DATA,
PORT251_DATA, PORT250_DATA, PORT249_DATA, PORT248_DATA,
PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA,
@@ -4158,9 +4158,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT239_DATA, PORT238_DATA, PORT237_DATA, PORT236_DATA,
PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA,
PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA,
- PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA }
+ PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA ))
},
- { PINMUX_DATA_REG("PORTU287_256DR", 0xe6057004, 32) {
+ { PINMUX_DATA_REG("PORTU287_256DR", 0xe6057004, 32, GROUP(
0, 0, 0, 0,
0, PORT282_DATA, PORT281_DATA, PORT280_DATA,
PORT279_DATA, PORT278_DATA, PORT277_DATA, PORT276_DATA,
@@ -4168,9 +4168,9 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT271_DATA, PORT270_DATA, PORT269_DATA, PORT268_DATA,
PORT267_DATA, PORT266_DATA, PORT265_DATA, PORT264_DATA,
PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA,
- PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA }
+ PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA ))
},
- { PINMUX_DATA_REG("PORTR319_288DR", 0xe6056010, 32) {
+ { PINMUX_DATA_REG("PORTR319_288DR", 0xe6056010, 32, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, PORT309_DATA, PORT308_DATA,
@@ -4178,7 +4178,7 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
PORT303_DATA, PORT302_DATA, PORT301_DATA, PORT300_DATA,
PORT299_DATA, PORT298_DATA, PORT297_DATA, PORT296_DATA,
PORT295_DATA, PORT294_DATA, PORT293_DATA, PORT292_DATA,
- PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA }
+ PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7720.c b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
index 65694bfaa08d..37bcae6b3208 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7720.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
@@ -925,7 +925,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2, GROUP(
PTA7_FN, PTA7_OUT, 0, PTA7_IN,
PTA6_FN, PTA6_OUT, 0, PTA6_IN,
PTA5_FN, PTA5_OUT, 0, PTA5_IN,
@@ -933,9 +933,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTA3_FN, PTA3_OUT, 0, PTA3_IN,
PTA2_FN, PTA2_OUT, 0, PTA2_IN,
PTA1_FN, PTA1_OUT, 0, PTA1_IN,
- PTA0_FN, PTA0_OUT, 0, PTA0_IN }
+ PTA0_FN, PTA0_OUT, 0, PTA0_IN ))
},
- { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2, GROUP(
PTB7_FN, PTB7_OUT, 0, PTB7_IN,
PTB6_FN, PTB6_OUT, 0, PTB6_IN,
PTB5_FN, PTB5_OUT, 0, PTB5_IN,
@@ -943,9 +943,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTB3_FN, PTB3_OUT, 0, PTB3_IN,
PTB2_FN, PTB2_OUT, 0, PTB2_IN,
PTB1_FN, PTB1_OUT, 0, PTB1_IN,
- PTB0_FN, PTB0_OUT, 0, PTB0_IN }
+ PTB0_FN, PTB0_OUT, 0, PTB0_IN ))
},
- { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2, GROUP(
PTC7_FN, PTC7_OUT, 0, PTC7_IN,
PTC6_FN, PTC6_OUT, 0, PTC6_IN,
PTC5_FN, PTC5_OUT, 0, PTC5_IN,
@@ -953,9 +953,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTC3_FN, PTC3_OUT, 0, PTC3_IN,
PTC2_FN, PTC2_OUT, 0, PTC2_IN,
PTC1_FN, PTC1_OUT, 0, PTC1_IN,
- PTC0_FN, PTC0_OUT, 0, PTC0_IN }
+ PTC0_FN, PTC0_OUT, 0, PTC0_IN ))
},
- { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2, GROUP(
PTD7_FN, PTD7_OUT, 0, PTD7_IN,
PTD6_FN, PTD6_OUT, 0, PTD6_IN,
PTD5_FN, PTD5_OUT, 0, PTD5_IN,
@@ -963,9 +963,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTD3_FN, PTD3_OUT, 0, PTD3_IN,
PTD2_FN, PTD2_OUT, 0, PTD2_IN,
PTD1_FN, PTD1_OUT, 0, PTD1_IN,
- PTD0_FN, PTD0_OUT, 0, PTD0_IN }
+ PTD0_FN, PTD0_OUT, 0, PTD0_IN ))
},
- { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2, GROUP(
0, 0, 0, 0,
PTE6_FN, 0, 0, PTE6_IN,
PTE5_FN, 0, 0, PTE5_IN,
@@ -973,9 +973,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTE3_FN, PTE3_OUT, 0, PTE3_IN,
PTE2_FN, PTE2_OUT, 0, PTE2_IN,
PTE1_FN, PTE1_OUT, 0, PTE1_IN,
- PTE0_FN, PTE0_OUT, 0, PTE0_IN }
+ PTE0_FN, PTE0_OUT, 0, PTE0_IN ))
},
- { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2, GROUP(
0, 0, 0, 0,
PTF6_FN, 0, 0, PTF6_IN,
PTF5_FN, 0, 0, PTF5_IN,
@@ -983,9 +983,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTF3_FN, 0, 0, PTF3_IN,
PTF2_FN, 0, 0, PTF2_IN,
PTF1_FN, 0, 0, PTF1_IN,
- PTF0_FN, 0, 0, PTF0_IN }
+ PTF0_FN, 0, 0, PTF0_IN ))
},
- { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP(
0, 0, 0, 0,
PTG6_FN, PTG6_OUT, 0, PTG6_IN,
PTG5_FN, PTG5_OUT, 0, PTG5_IN,
@@ -993,9 +993,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTG3_FN, PTG3_OUT, 0, PTG3_IN,
PTG2_FN, PTG2_OUT, 0, PTG2_IN,
PTG1_FN, PTG1_OUT, 0, PTG1_IN,
- PTG0_FN, PTG0_OUT, 0, PTG0_IN }
+ PTG0_FN, PTG0_OUT, 0, PTG0_IN ))
},
- { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2, GROUP(
0, 0, 0, 0,
PTH6_FN, PTH6_OUT, 0, PTH6_IN,
PTH5_FN, PTH5_OUT, 0, PTH5_IN,
@@ -1003,9 +1003,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTH3_FN, PTH3_OUT, 0, PTH3_IN,
PTH2_FN, PTH2_OUT, 0, PTH2_IN,
PTH1_FN, PTH1_OUT, 0, PTH1_IN,
- PTH0_FN, PTH0_OUT, 0, PTH0_IN }
+ PTH0_FN, PTH0_OUT, 0, PTH0_IN ))
},
- { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2, GROUP(
0, 0, 0, 0,
PTJ6_FN, PTJ6_OUT, 0, PTJ6_IN,
PTJ5_FN, PTJ5_OUT, 0, PTJ5_IN,
@@ -1013,9 +1013,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
- PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
+ PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN ))
},
- { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1023,9 +1023,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTK3_FN, PTK3_OUT, 0, PTK3_IN,
PTK2_FN, PTK2_OUT, 0, PTK2_IN,
PTK1_FN, PTK1_OUT, 0, PTK1_IN,
- PTK0_FN, PTK0_OUT, 0, PTK0_IN }
+ PTK0_FN, PTK0_OUT, 0, PTK0_IN ))
},
- { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2, GROUP(
PTL7_FN, PTL7_OUT, 0, PTL7_IN,
PTL6_FN, PTL6_OUT, 0, PTL6_IN,
PTL5_FN, PTL5_OUT, 0, PTL5_IN,
@@ -1033,9 +1033,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTL3_FN, PTL3_OUT, 0, PTL3_IN,
0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2, GROUP(
PTM7_FN, PTM7_OUT, 0, PTM7_IN,
PTM6_FN, PTM6_OUT, 0, PTM6_IN,
PTM5_FN, PTM5_OUT, 0, PTM5_IN,
@@ -1043,9 +1043,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTM3_FN, PTM3_OUT, 0, PTM3_IN,
PTM2_FN, PTM2_OUT, 0, PTM2_IN,
PTM1_FN, PTM1_OUT, 0, PTM1_IN,
- PTM0_FN, PTM0_OUT, 0, PTM0_IN }
+ PTM0_FN, PTM0_OUT, 0, PTM0_IN ))
},
- { PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2) {
+ { PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1053,9 +1053,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTP3_FN, PTP3_OUT, 0, PTP3_IN,
PTP2_FN, PTP2_OUT, 0, PTP2_IN,
PTP1_FN, PTP1_OUT, 0, PTP1_IN,
- PTP0_FN, PTP0_OUT, 0, PTP0_IN }
+ PTP0_FN, PTP0_OUT, 0, PTP0_IN ))
},
- { PINMUX_CFG_REG("PRCR", 0xa405011a, 16, 2) {
+ { PINMUX_CFG_REG("PRCR", 0xa405011a, 16, 2, GROUP(
PTR7_FN, PTR7_OUT, 0, PTR7_IN,
PTR6_FN, PTR6_OUT, 0, PTR6_IN,
PTR5_FN, PTR5_OUT, 0, PTR5_IN,
@@ -1063,9 +1063,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTR3_FN, PTR3_OUT, 0, PTR3_IN,
PTR2_FN, PTR2_OUT, 0, PTR2_IN,
PTR1_FN, PTR1_OUT, 0, PTR1_IN,
- PTR0_FN, PTR0_OUT, 0, PTR0_IN }
+ PTR0_FN, PTR0_OUT, 0, PTR0_IN ))
},
- { PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2) {
+ { PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1073,9 +1073,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTS3_FN, PTS3_OUT, 0, PTS3_IN,
PTS2_FN, PTS2_OUT, 0, PTS2_IN,
PTS1_FN, PTS1_OUT, 0, PTS1_IN,
- PTS0_FN, PTS0_OUT, 0, PTS0_IN }
+ PTS0_FN, PTS0_OUT, 0, PTS0_IN ))
},
- { PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2) {
+ { PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1083,9 +1083,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTT3_FN, PTT3_OUT, 0, PTT3_IN,
PTT2_FN, PTT2_OUT, 0, PTT2_IN,
PTT1_FN, PTT1_OUT, 0, PTT1_IN,
- PTT0_FN, PTT0_OUT, 0, PTT0_IN }
+ PTT0_FN, PTT0_OUT, 0, PTT0_IN ))
},
- { PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2) {
+ { PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1093,9 +1093,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTU3_FN, PTU3_OUT, 0, PTU3_IN,
PTU2_FN, PTU2_OUT, 0, PTU2_IN,
PTU1_FN, PTU1_OUT, 0, PTU1_IN,
- PTU0_FN, PTU0_OUT, 0, PTU0_IN }
+ PTU0_FN, PTU0_OUT, 0, PTU0_IN ))
},
- { PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2) {
+ { PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1103,83 +1103,83 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTV3_FN, PTV3_OUT, 0, PTV3_IN,
PTV2_FN, PTV2_OUT, 0, PTV2_IN,
PTV1_FN, PTV1_OUT, 0, PTV1_IN,
- PTV0_FN, PTV0_OUT, 0, PTV0_IN }
+ PTV0_FN, PTV0_OUT, 0, PTV0_IN ))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADR", 0xa4050140, 8) {
+ { PINMUX_DATA_REG("PADR", 0xa4050140, 8, GROUP(
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
- PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA ))
},
- { PINMUX_DATA_REG("PBDR", 0xa4050142, 8) {
+ { PINMUX_DATA_REG("PBDR", 0xa4050142, 8, GROUP(
PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
- PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA ))
},
- { PINMUX_DATA_REG("PCDR", 0xa4050144, 8) {
+ { PINMUX_DATA_REG("PCDR", 0xa4050144, 8, GROUP(
PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
- PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA ))
},
- { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8, GROUP(
PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
- PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA ))
},
- { PINMUX_DATA_REG("PEDR", 0xa4050148, 8) {
+ { PINMUX_DATA_REG("PEDR", 0xa4050148, 8, GROUP(
0, PTE6_DATA, PTE5_DATA, PTE4_DATA,
- PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA ))
},
- { PINMUX_DATA_REG("PFDR", 0xa405014a, 8) {
+ { PINMUX_DATA_REG("PFDR", 0xa405014a, 8, GROUP(
0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
- PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA ))
},
- { PINMUX_DATA_REG("PGDR", 0xa405014c, 8) {
+ { PINMUX_DATA_REG("PGDR", 0xa405014c, 8, GROUP(
0, PTG6_DATA, PTG5_DATA, PTG4_DATA,
- PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA ))
},
- { PINMUX_DATA_REG("PHDR", 0xa405014e, 8) {
+ { PINMUX_DATA_REG("PHDR", 0xa405014e, 8, GROUP(
0, PTH6_DATA, PTH5_DATA, PTH4_DATA,
- PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA ))
},
- { PINMUX_DATA_REG("PJDR", 0xa4050150, 8) {
+ { PINMUX_DATA_REG("PJDR", 0xa4050150, 8, GROUP(
0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
- PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA ))
},
- { PINMUX_DATA_REG("PKDR", 0xa4050152, 8) {
+ { PINMUX_DATA_REG("PKDR", 0xa4050152, 8, GROUP(
0, 0, 0, 0,
- PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA ))
},
- { PINMUX_DATA_REG("PLDR", 0xa4050154, 8) {
+ { PINMUX_DATA_REG("PLDR", 0xa4050154, 8, GROUP(
PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
- PTL3_DATA, 0, 0, 0 }
+ PTL3_DATA, 0, 0, 0 ))
},
- { PINMUX_DATA_REG("PMDR", 0xa4050156, 8) {
+ { PINMUX_DATA_REG("PMDR", 0xa4050156, 8, GROUP(
PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
- PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA ))
},
- { PINMUX_DATA_REG("PPDR", 0xa4050158, 8) {
+ { PINMUX_DATA_REG("PPDR", 0xa4050158, 8, GROUP(
0, 0, 0, PTP4_DATA,
- PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA ))
},
- { PINMUX_DATA_REG("PRDR", 0xa405015a, 8) {
+ { PINMUX_DATA_REG("PRDR", 0xa405015a, 8, GROUP(
PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
- PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA ))
},
- { PINMUX_DATA_REG("PSDR", 0xa405015c, 8) {
+ { PINMUX_DATA_REG("PSDR", 0xa405015c, 8, GROUP(
0, 0, 0, PTS4_DATA,
- PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA ))
},
- { PINMUX_DATA_REG("PTDR", 0xa405015e, 8) {
+ { PINMUX_DATA_REG("PTDR", 0xa405015e, 8, GROUP(
0, 0, 0, PTT4_DATA,
- PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA ))
},
- { PINMUX_DATA_REG("PUDR", 0xa4050160, 8) {
+ { PINMUX_DATA_REG("PUDR", 0xa4050160, 8, GROUP(
0, 0, 0, PTU4_DATA,
- PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA ))
},
- { PINMUX_DATA_REG("PVDR", 0xa4050162, 8) {
+ { PINMUX_DATA_REG("PVDR", 0xa4050162, 8, GROUP(
0, 0, 0, PTV4_DATA,
- PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7722.c b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
index 0e733bffdb38..95295be4e703 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7722.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
@@ -1237,7 +1237,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2, GROUP(
VIO_D7_SCIF1_SCK, PTA7_OUT, 0, PTA7_IN,
VIO_D6_SCIF1_RXD, 0, 0, PTA6_IN,
VIO_D5_SCIF1_TXD, PTA5_OUT, 0, PTA5_IN,
@@ -1245,9 +1245,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
VIO_D3, 0, 0, PTA3_IN,
VIO_D2, 0, 0, PTA2_IN,
VIO_D1, 0, 0, PTA1_IN,
- VIO_D0_LCDLCLK, 0, 0, PTA0_IN }
+ VIO_D0_LCDLCLK, 0, 0, PTA0_IN ))
},
- { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2, GROUP(
HPD55, PTB7_OUT, 0, PTB7_IN,
HPD54, PTB6_OUT, 0, PTB6_IN,
HPD53, PTB5_OUT, 0, PTB5_IN,
@@ -1255,9 +1255,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
HPD51, PTB3_OUT, 0, PTB3_IN,
HPD50, PTB2_OUT, 0, PTB2_IN,
HPD49, PTB1_OUT, 0, PTB1_IN,
- HPD48, PTB0_OUT, 0, PTB0_IN }
+ HPD48, PTB0_OUT, 0, PTB0_IN ))
},
- { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2, GROUP(
0, 0, 0, PTC7_IN,
0, 0, 0, 0,
IOIS16, 0, 0, PTC5_IN,
@@ -1265,9 +1265,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
HPDQM6, PTC3_OUT, 0, PTC3_IN,
HPDQM5, PTC2_OUT, 0, PTC2_IN,
0, 0, 0, 0,
- HPDQM4, PTC0_OUT, 0, PTC0_IN }
+ HPDQM4, PTC0_OUT, 0, PTC0_IN ))
},
- { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2, GROUP(
SDHICD, 0, 0, PTD7_IN,
SDHIWP, PTD6_OUT, 0, PTD6_IN,
SDHID3, PTD5_OUT, 0, PTD5_IN,
@@ -1275,9 +1275,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
SDHID1, PTD3_OUT, 0, PTD3_IN,
SDHID0, PTD2_OUT, 0, PTD2_IN,
SDHICMD, PTD1_OUT, 0, PTD1_IN,
- SDHICLK, PTD0_OUT, 0, 0 }
+ SDHICLK, PTD0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2, GROUP(
A25, PTE7_OUT, 0, PTE7_IN,
A24, PTE6_OUT, 0, PTE6_IN,
A23, PTE5_OUT, 0, PTE5_IN,
@@ -1285,9 +1285,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
IRQ5, PTE1_OUT, 0, PTE1_IN,
- IRQ4_BS, PTE0_OUT, 0, PTE0_IN }
+ IRQ4_BS, PTE0_OUT, 0, PTE0_IN ))
},
- { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2, GROUP(
0, 0, 0, 0,
PTF6, PTF6_OUT, 0, PTF6_IN,
SIOSCK_SIUBOBT, PTF5_OUT, 0, PTF5_IN,
@@ -1295,9 +1295,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
SIOSTRB0_SIUBIBT, PTF3_OUT, 0, PTF3_IN,
SIOD_SIUBILR, PTF2_OUT, 0, PTF2_IN,
SIORXD_SIUBISLD, 0, 0, PTF1_IN,
- SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 }
+ SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1305,9 +1305,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
AUDATA3, PTG3_OUT, 0, 0,
AUDATA2, PTG2_OUT, 0, 0,
AUDATA1, PTG1_OUT, 0, 0,
- AUDATA0, PTG0_OUT, 0, 0 }
+ AUDATA0, PTG0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2, GROUP(
LCDVCPWC_LCDVCPWC2, PTH7_OUT, 0, 0,
LCDVSYN2_DACK, PTH6_OUT, 0, PTH6_IN,
LCDVSYN, PTH5_OUT, 0, PTH5_IN,
@@ -1315,9 +1315,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
LCDHSYN_LCDCS, PTH3_OUT, 0, 0,
LCDDON_LCDDON2, PTH2_OUT, 0, 0,
LCDD17_DV_HSYNC, PTH1_OUT, 0, PTH1_IN,
- LCDD16_DV_VSYNC, PTH0_OUT, 0, PTH0_IN }
+ LCDD16_DV_VSYNC, PTH0_OUT, 0, PTH0_IN ))
},
- { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2, GROUP(
STATUS0, PTJ7_OUT, 0, 0,
0, PTJ6_OUT, 0, 0,
PDSTATUS, PTJ5_OUT, 0, 0,
@@ -1325,9 +1325,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
IRQ1, PTJ1_OUT, 0, PTJ1_IN,
- IRQ0, PTJ0_OUT, 0, PTJ0_IN }
+ IRQ0, PTJ0_OUT, 0, PTJ0_IN ))
},
- { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2, GROUP(
0, 0, 0, 0,
SIUAILR_SIOF1_SS2, PTK6_OUT, 0, PTK6_IN,
SIUAIBT_SIOF1_SS1, PTK5_OUT, 0, PTK5_IN,
@@ -1335,9 +1335,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
SIUAOBT_SIOF1_SCK, PTK3_OUT, 0, PTK3_IN,
SIUAISLD_SIOF1_RXD, 0, 0, PTK2_IN,
SIUAOSLD_SIOF1_TXD, PTK1_OUT, 0, 0,
- PTK0, PTK0_OUT, 0, PTK0_IN }
+ PTK0, PTK0_OUT, 0, PTK0_IN ))
},
- { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2, GROUP(
LCDD15_DV_D15, PTL7_OUT, 0, PTL7_IN,
LCDD14_DV_D14, PTL6_OUT, 0, PTL6_IN,
LCDD13_DV_D13, PTL5_OUT, 0, PTL5_IN,
@@ -1345,9 +1345,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
LCDD11_DV_D11, PTL3_OUT, 0, PTL3_IN,
LCDD10_DV_D10, PTL2_OUT, 0, PTL2_IN,
LCDD9_DV_D9, PTL1_OUT, 0, PTL1_IN,
- LCDD8_DV_D8, PTL0_OUT, 0, PTL0_IN }
+ LCDD8_DV_D8, PTL0_OUT, 0, PTL0_IN ))
},
- { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2, GROUP(
LCDD7_DV_D7, PTM7_OUT, 0, PTM7_IN,
LCDD6_DV_D6, PTM6_OUT, 0, PTM6_IN,
LCDD5_DV_D5, PTM5_OUT, 0, PTM5_IN,
@@ -1355,9 +1355,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
LCDD3_DV_D3, PTM3_OUT, 0, PTM3_IN,
LCDD2_DV_D2, PTM2_OUT, 0, PTM2_IN,
LCDD1_DV_D1, PTM1_OUT, 0, PTM1_IN,
- LCDD0_DV_D0, PTM0_OUT, 0, PTM0_IN }
+ LCDD0_DV_D0, PTM0_OUT, 0, PTM0_IN ))
},
- { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2, GROUP(
HPD63, PTN7_OUT, 0, PTN7_IN,
HPD62, PTN6_OUT, 0, PTN6_IN,
HPD61, PTN5_OUT, 0, PTN5_IN,
@@ -1365,9 +1365,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
HPD59, PTN3_OUT, 0, PTN3_IN,
HPD58, PTN2_OUT, 0, PTN2_IN,
HPD57, PTN1_OUT, 0, PTN1_IN,
- HPD56, PTN0_OUT, 0, PTN0_IN }
+ HPD56, PTN0_OUT, 0, PTN0_IN ))
},
- { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2, GROUP(
0, 0, 0, 0,
SIOF0_SS2_SIM_RST, PTQ6_OUT, 0, 0,
SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, 0, PTQ5_IN,
@@ -1375,9 +1375,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
SIOF0_SCK_TS_SCK, PTQ3_OUT, 0, PTQ3_IN,
PTQ2, 0, 0, PTQ2_IN,
PTQ1, PTQ1_OUT, 0, 0,
- PTQ0, PTQ0_OUT, 0, PTQ0_IN }
+ PTQ0, PTQ0_OUT, 0, PTQ0_IN ))
},
- { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1385,9 +1385,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0,
WAIT, 0, 0, PTR2_IN,
LCDDCK_LCDWR, PTR1_OUT, 0, 0,
- LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 }
+ LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1395,9 +1395,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0,
SCIF0_SCK_TPUTO, PTS2_OUT, 0, PTS2_IN,
SCIF0_RXD, 0, 0, PTS1_IN,
- SCIF0_TXD, PTS0_OUT, 0, 0 }
+ SCIF0_TXD, PTS0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1405,9 +1405,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FWE, PTT3_OUT, 0, PTT3_IN,
FSC, PTT2_OUT, 0, PTT2_IN,
DREQ0, 0, 0, PTT1_IN,
- FCDE, PTT0_OUT, 0, 0 }
+ FCDE, PTT0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1415,9 +1415,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
NAF1_VIO_D9, PTU3_OUT, 0, PTU3_IN,
NAF0_VIO_D8, PTU2_OUT, 0, PTU2_IN,
FRB_VIO_CLK2, 0, 0, PTU1_IN,
- FCE_VIO_HD2, PTU0_OUT, 0, PTU0_IN }
+ FCE_VIO_HD2, PTU0_OUT, 0, PTU0_IN ))
},
- { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1425,9 +1425,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
NAF6_VIO_D14, PTV3_OUT, 0, PTV3_IN,
NAF5_VIO_D13, PTV2_OUT, 0, PTV2_IN,
NAF4_VIO_D12, PTV1_OUT, 0, PTV1_IN,
- NAF3_VIO_D11, PTV0_OUT, 0, PTV0_IN }
+ NAF3_VIO_D11, PTV0_OUT, 0, PTV0_IN ))
},
- { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2, GROUP(
0, 0, 0, 0,
VIO_FLD_SCIF2_CTS, 0, 0, PTW6_IN,
VIO_CKO_SCIF2_RTS, PTW5_OUT, 0, 0,
@@ -1435,9 +1435,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
VIO_STEM_SCIF2_TXD, PTW3_OUT, 0, PTW3_IN,
VIO_HD_SCIF2_RXD, PTW2_OUT, 0, PTW2_IN,
VIO_VD_SCIF1_CTS, PTW1_OUT, 0, PTW1_IN,
- VIO_CLK_SCIF1_RTS, PTW0_OUT, 0, PTW0_IN }
+ VIO_CLK_SCIF1_RTS, PTW0_OUT, 0, PTW0_IN ))
},
- { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2, GROUP(
0, 0, 0, 0,
CS6A_CE2B, PTX6_OUT, 0, PTX6_IN,
LCDD23, PTX5_OUT, 0, PTX5_IN,
@@ -1445,9 +1445,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
LCDD21, PTX3_OUT, 0, PTX3_IN,
LCDD20, PTX2_OUT, 0, PTX2_IN,
LCDD19_DV_CLKI, PTX1_OUT, 0, PTX1_IN,
- LCDD18_DV_CLK, PTX0_OUT, 0, PTX0_IN }
+ LCDD18_DV_CLK, PTX0_OUT, 0, PTX0_IN ))
},
- { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
KEYOUT5_IN5, PTY5_OUT, 0, PTY5_IN,
@@ -1455,9 +1455,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
KEYOUT3, PTY3_OUT, 0, PTY3_IN,
KEYOUT2, PTY2_OUT, 0, PTY2_IN,
KEYOUT1, PTY1_OUT, 0, 0,
- KEYOUT0, PTY0_OUT, 0, PTY0_IN }
+ KEYOUT0, PTY0_OUT, 0, PTY0_IN ))
},
- { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
KEYIN4_IRQ7, 0, 0, PTZ5_IN,
@@ -1465,9 +1465,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
KEYIN2, 0, 0, PTZ3_IN,
KEYIN1, 0, 0, PTZ2_IN,
KEYIN0_IRQ6, 0, 0, PTZ1_IN,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1, GROUP(
PSA15_KEYIN0, PSA15_IRQ6,
PSA14_KEYIN4, PSA14_IRQ7,
0, 0,
@@ -1483,9 +1483,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- 0, 0 }
+ 0, 0 ))
},
- { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1, GROUP(
PSB15_SIOTXD, PSB15_SIUBOSLD,
PSB14_SIORXD, PSB14_SIUBISLD,
PSB13_SIOD, PSB13_SIUBILR,
@@ -1501,9 +1501,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSB3_SIOF0_SS1, PSB3_TS_SPSYNC,
PSB2_SIOF0_SS2, PSB2_SIM_RST,
PSB1_SIUMCKA, PSB1_SIOF1_MCK,
- PSB0_SIUAOSLD, PSB0_SIOF1_TXD }
+ PSB0_SIUAOSLD, PSB0_SIOF1_TXD ))
},
- { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1, GROUP(
PSC15_SIUAISLD, PSC15_SIOF1_RXD,
PSC14_SIUAOBT, PSC14_SIOF1_SCK,
PSC13_SIUAOLR, PSC13_SIOF1_SYNC,
@@ -1519,9 +1519,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- PSC0_NAF, PSC0_VIO }
+ PSC0_NAF, PSC0_VIO ))
},
- { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1, GROUP(
0, 0,
0, 0,
PSD13_VIO, PSD13_SCIF2,
@@ -1537,9 +1537,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2,
PSD2_LCDDON, PSD2_LCDDON2,
0, 0,
- PSD0_LCDD19_LCDD0, PSD0_DV }
+ PSD0_LCDD19_LCDD0, PSD0_DV ))
},
- { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
+ { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1, GROUP(
PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT,
@@ -1555,9 +1555,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSE3_FLCTL, PSE3_VIO,
PSE2_NAF2, PSE2_VIO_D10,
PSE1_NAF1, PSE1_VIO_D9,
- PSE0_NAF0, PSE0_VIO_D8 }
+ PSE0_NAF0, PSE0_VIO_D8 ))
},
- { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1) {
+ { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1, GROUP(
0, 0,
HIZA14_KEYSC, HIZA14_HIZ,
0, 0,
@@ -1573,9 +1573,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- 0, 0 }
+ 0, 0 ))
},
- { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1) {
+ { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -1591,9 +1591,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
HIZB1_VIO, HIZB1_HIZ,
- HIZB0_VIO, HIZB0_HIZ }
+ HIZB0_VIO, HIZB0_HIZ ))
},
- { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1) {
+ { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1, GROUP(
HIZC15_IRQ7, HIZC15_HIZ,
HIZC14_IRQ6, HIZC14_HIZ,
HIZC13_IRQ5, HIZC13_HIZ,
@@ -1609,9 +1609,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- 0, 0 }
+ 0, 0 ))
},
- { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1) {
+ { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -1627,103 +1627,103 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- 0, 0 }
+ 0, 0 ))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8, GROUP(
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
- PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA ))
},
- { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8, GROUP(
PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
- PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA ))
},
- { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8, GROUP(
PTC7_DATA, 0, PTC5_DATA, PTC4_DATA,
- PTC3_DATA, PTC2_DATA, 0, PTC0_DATA }
+ PTC3_DATA, PTC2_DATA, 0, PTC0_DATA ))
},
- { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8, GROUP(
PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
- PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA ))
},
- { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8, GROUP(
PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
- 0, 0, PTE1_DATA, PTE0_DATA }
+ 0, 0, PTE1_DATA, PTE0_DATA ))
},
- { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8, GROUP(
0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
- PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA ))
},
- { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8, GROUP(
0, 0, 0, PTG4_DATA,
- PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA ))
},
- { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8, GROUP(
PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
- PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA ))
},
- { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8, GROUP(
PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
- 0, 0, PTJ1_DATA, PTJ0_DATA }
+ 0, 0, PTJ1_DATA, PTJ0_DATA ))
},
- { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8, GROUP(
0, PTK6_DATA, PTK5_DATA, PTK4_DATA,
- PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA ))
},
- { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8, GROUP(
PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
- PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA ))
},
- { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8, GROUP(
PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
- PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA ))
},
- { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8, GROUP(
PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
- PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA ))
},
- { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8, GROUP(
0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
- PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA ))
},
- { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8, GROUP(
0, 0, 0, PTR4_DATA,
- PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA ))
},
- { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8, GROUP(
0, 0, 0, PTS4_DATA,
- PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA ))
},
- { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8, GROUP(
0, 0, 0, PTT4_DATA,
- PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA ))
},
- { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8, GROUP(
0, 0, 0, PTU4_DATA,
- PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA ))
},
- { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8, GROUP(
0, 0, 0, PTV4_DATA,
- PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA ))
},
- { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8, GROUP(
0, PTW6_DATA, PTW5_DATA, PTW4_DATA,
- PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA ))
},
- { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8, GROUP(
0, PTX6_DATA, PTX5_DATA, PTX4_DATA,
- PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA ))
},
- { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8, GROUP(
0, PTY6_DATA, PTY5_DATA, PTY4_DATA,
- PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA ))
},
- { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8, GROUP(
0, 0, PTZ5_DATA, PTZ4_DATA,
- PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7723.c b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
index 86f9a88726b7..6f08f527c010 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7723.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
@@ -1507,7 +1507,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2, GROUP(
PTA7_FN, PTA7_OUT, 0, PTA7_IN,
PTA6_FN, PTA6_OUT, 0, PTA6_IN,
PTA5_FN, PTA5_OUT, 0, PTA5_IN,
@@ -1515,9 +1515,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTA3_FN, PTA3_OUT, 0, PTA3_IN,
PTA2_FN, PTA2_OUT, 0, PTA2_IN,
PTA1_FN, PTA1_OUT, 0, PTA1_IN,
- PTA0_FN, PTA0_OUT, 0, PTA0_IN }
+ PTA0_FN, PTA0_OUT, 0, PTA0_IN ))
},
- { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2, GROUP(
PTB7_FN, PTB7_OUT, 0, PTB7_IN,
PTB6_FN, PTB6_OUT, 0, PTB6_IN,
PTB5_FN, PTB5_OUT, 0, PTB5_IN,
@@ -1525,9 +1525,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTB3_FN, PTB3_OUT, 0, PTB3_IN,
PTB2_FN, PTB2_OUT, 0, PTB2_IN,
PTB1_FN, PTB1_OUT, 0, PTB1_IN,
- PTB0_FN, PTB0_OUT, 0, PTB0_IN }
+ PTB0_FN, PTB0_OUT, 0, PTB0_IN ))
},
- { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2, GROUP(
PTC7_FN, PTC7_OUT, 0, PTC7_IN,
PTC6_FN, PTC6_OUT, 0, PTC6_IN,
PTC5_FN, PTC5_OUT, 0, PTC5_IN,
@@ -1535,9 +1535,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTC3_FN, PTC3_OUT, 0, PTC3_IN,
PTC2_FN, PTC2_OUT, 0, PTC2_IN,
PTC1_FN, PTC1_OUT, 0, PTC1_IN,
- PTC0_FN, PTC0_OUT, 0, PTC0_IN }
+ PTC0_FN, PTC0_OUT, 0, PTC0_IN ))
},
- { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2, GROUP(
PTD7_FN, PTD7_OUT, 0, PTD7_IN,
PTD6_FN, PTD6_OUT, 0, PTD6_IN,
PTD5_FN, PTD5_OUT, 0, PTD5_IN,
@@ -1545,9 +1545,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTD3_FN, PTD3_OUT, 0, PTD3_IN,
PTD2_FN, PTD2_OUT, 0, PTD2_IN,
PTD1_FN, PTD1_OUT, 0, PTD1_IN,
- PTD0_FN, PTD0_OUT, 0, PTD0_IN }
+ PTD0_FN, PTD0_OUT, 0, PTD0_IN ))
},
- { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
PTE5_FN, PTE5_OUT, 0, PTE5_IN,
@@ -1555,9 +1555,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTE3_FN, PTE3_OUT, 0, PTE3_IN,
PTE2_FN, PTE2_OUT, 0, PTE2_IN,
PTE1_FN, PTE1_OUT, 0, PTE1_IN,
- PTE0_FN, PTE0_OUT, 0, PTE0_IN }
+ PTE0_FN, PTE0_OUT, 0, PTE0_IN ))
},
- { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2, GROUP(
PTF7_FN, PTF7_OUT, 0, PTF7_IN,
PTF6_FN, PTF6_OUT, 0, PTF6_IN,
PTF5_FN, PTF5_OUT, 0, PTF5_IN,
@@ -1565,9 +1565,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTF3_FN, PTF3_OUT, 0, PTF3_IN,
PTF2_FN, PTF2_OUT, 0, PTF2_IN,
PTF1_FN, PTF1_OUT, 0, PTF1_IN,
- PTF0_FN, PTF0_OUT, 0, PTF0_IN }
+ PTF0_FN, PTF0_OUT, 0, PTF0_IN ))
},
- { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
PTG5_FN, PTG5_OUT, 0, 0,
@@ -1575,9 +1575,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTG3_FN, PTG3_OUT, 0, 0,
PTG2_FN, PTG2_OUT, 0, 0,
PTG1_FN, PTG1_OUT, 0, 0,
- PTG0_FN, PTG0_OUT, 0, 0 }
+ PTG0_FN, PTG0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2, GROUP(
PTH7_FN, PTH7_OUT, 0, PTH7_IN,
PTH6_FN, PTH6_OUT, 0, PTH6_IN,
PTH5_FN, PTH5_OUT, 0, PTH5_IN,
@@ -1585,9 +1585,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTH3_FN, PTH3_OUT, 0, PTH3_IN,
PTH2_FN, PTH2_OUT, 0, PTH2_IN,
PTH1_FN, PTH1_OUT, 0, PTH1_IN,
- PTH0_FN, PTH0_OUT, 0, PTH0_IN }
+ PTH0_FN, PTH0_OUT, 0, PTH0_IN ))
},
- { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2, GROUP(
PTJ7_FN, PTJ7_OUT, 0, 0,
0, 0, 0, 0,
PTJ5_FN, PTJ5_OUT, 0, 0,
@@ -1595,9 +1595,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
- PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
+ PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN ))
},
- { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2, GROUP(
PTK7_FN, PTK7_OUT, 0, PTK7_IN,
PTK6_FN, PTK6_OUT, 0, PTK6_IN,
PTK5_FN, PTK5_OUT, 0, PTK5_IN,
@@ -1605,9 +1605,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTK3_FN, PTK3_OUT, 0, PTK3_IN,
PTK2_FN, PTK2_OUT, 0, PTK2_IN,
PTK1_FN, PTK1_OUT, 0, PTK1_IN,
- PTK0_FN, PTK0_OUT, 0, PTK0_IN }
+ PTK0_FN, PTK0_OUT, 0, PTK0_IN ))
},
- { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2, GROUP(
PTL7_FN, PTL7_OUT, 0, PTL7_IN,
PTL6_FN, PTL6_OUT, 0, PTL6_IN,
PTL5_FN, PTL5_OUT, 0, PTL5_IN,
@@ -1615,9 +1615,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTL3_FN, PTL3_OUT, 0, PTL3_IN,
PTL2_FN, PTL2_OUT, 0, PTL2_IN,
PTL1_FN, PTL1_OUT, 0, PTL1_IN,
- PTL0_FN, PTL0_OUT, 0, PTL0_IN }
+ PTL0_FN, PTL0_OUT, 0, PTL0_IN ))
},
- { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2, GROUP(
PTM7_FN, PTM7_OUT, 0, PTM7_IN,
PTM6_FN, PTM6_OUT, 0, PTM6_IN,
PTM5_FN, PTM5_OUT, 0, PTM5_IN,
@@ -1625,9 +1625,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTM3_FN, PTM3_OUT, 0, PTM3_IN,
PTM2_FN, PTM2_OUT, 0, PTM2_IN,
PTM1_FN, PTM1_OUT, 0, PTM1_IN,
- PTM0_FN, PTM0_OUT, 0, PTM0_IN }
+ PTM0_FN, PTM0_OUT, 0, PTM0_IN ))
},
- { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2, GROUP(
PTN7_FN, PTN7_OUT, 0, PTN7_IN,
PTN6_FN, PTN6_OUT, 0, PTN6_IN,
PTN5_FN, PTN5_OUT, 0, PTN5_IN,
@@ -1635,9 +1635,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTN3_FN, PTN3_OUT, 0, PTN3_IN,
PTN2_FN, PTN2_OUT, 0, PTN2_IN,
PTN1_FN, PTN1_OUT, 0, PTN1_IN,
- PTN0_FN, PTN0_OUT, 0, PTN0_IN }
+ PTN0_FN, PTN0_OUT, 0, PTN0_IN ))
},
- { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1645,9 +1645,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTQ3_FN, 0, 0, PTQ3_IN,
PTQ2_FN, 0, 0, PTQ2_IN,
PTQ1_FN, 0, 0, PTQ1_IN,
- PTQ0_FN, 0, 0, PTQ0_IN }
+ PTQ0_FN, 0, 0, PTQ0_IN ))
},
- { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2, GROUP(
PTR7_FN, PTR7_OUT, 0, PTR7_IN,
PTR6_FN, PTR6_OUT, 0, PTR6_IN,
PTR5_FN, PTR5_OUT, 0, PTR5_IN,
@@ -1655,9 +1655,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTR3_FN, 0, 0, PTR3_IN,
PTR2_FN, 0, 0, PTR2_IN,
PTR1_FN, PTR1_OUT, 0, PTR1_IN,
- PTR0_FN, PTR0_OUT, 0, PTR0_IN }
+ PTR0_FN, PTR0_OUT, 0, PTR0_IN ))
},
- { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2, GROUP(
PTS7_FN, PTS7_OUT, 0, PTS7_IN,
PTS6_FN, PTS6_OUT, 0, PTS6_IN,
PTS5_FN, PTS5_OUT, 0, PTS5_IN,
@@ -1665,9 +1665,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTS3_FN, PTS3_OUT, 0, PTS3_IN,
PTS2_FN, PTS2_OUT, 0, PTS2_IN,
PTS1_FN, PTS1_OUT, 0, PTS1_IN,
- PTS0_FN, PTS0_OUT, 0, PTS0_IN }
+ PTS0_FN, PTS0_OUT, 0, PTS0_IN ))
},
- { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
PTT5_FN, PTT5_OUT, 0, PTT5_IN,
@@ -1675,9 +1675,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTT3_FN, PTT3_OUT, 0, PTT3_IN,
PTT2_FN, PTT2_OUT, 0, PTT2_IN,
PTT1_FN, PTT1_OUT, 0, PTT1_IN,
- PTT0_FN, PTT0_OUT, 0, PTT0_IN }
+ PTT0_FN, PTT0_OUT, 0, PTT0_IN ))
},
- { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
PTU5_FN, PTU5_OUT, 0, PTU5_IN,
@@ -1685,9 +1685,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTU3_FN, PTU3_OUT, 0, PTU3_IN,
PTU2_FN, PTU2_OUT, 0, PTU2_IN,
PTU1_FN, PTU1_OUT, 0, PTU1_IN,
- PTU0_FN, PTU0_OUT, 0, PTU0_IN }
+ PTU0_FN, PTU0_OUT, 0, PTU0_IN ))
},
- { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2, GROUP(
PTV7_FN, PTV7_OUT, 0, PTV7_IN,
PTV6_FN, PTV6_OUT, 0, PTV6_IN,
PTV5_FN, PTV5_OUT, 0, PTV5_IN,
@@ -1695,9 +1695,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTV3_FN, PTV3_OUT, 0, PTV3_IN,
PTV2_FN, PTV2_OUT, 0, PTV2_IN,
PTV1_FN, PTV1_OUT, 0, PTV1_IN,
- PTV0_FN, PTV0_OUT, 0, PTV0_IN }
+ PTV0_FN, PTV0_OUT, 0, PTV0_IN ))
},
- { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2, GROUP(
PTW7_FN, PTW7_OUT, 0, PTW7_IN,
PTW6_FN, PTW6_OUT, 0, PTW6_IN,
PTW5_FN, PTW5_OUT, 0, PTW5_IN,
@@ -1705,9 +1705,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTW3_FN, PTW3_OUT, 0, PTW3_IN,
PTW2_FN, PTW2_OUT, 0, PTW2_IN,
PTW1_FN, PTW1_OUT, 0, PTW1_IN,
- PTW0_FN, PTW0_OUT, 0, PTW0_IN }
+ PTW0_FN, PTW0_OUT, 0, PTW0_IN ))
},
- { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2, GROUP(
PTX7_FN, PTX7_OUT, 0, PTX7_IN,
PTX6_FN, PTX6_OUT, 0, PTX6_IN,
PTX5_FN, PTX5_OUT, 0, PTX5_IN,
@@ -1715,9 +1715,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTX3_FN, PTX3_OUT, 0, PTX3_IN,
PTX2_FN, PTX2_OUT, 0, PTX2_IN,
PTX1_FN, PTX1_OUT, 0, PTX1_IN,
- PTX0_FN, PTX0_OUT, 0, PTX0_IN }
+ PTX0_FN, PTX0_OUT, 0, PTX0_IN ))
},
- { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2, GROUP(
PTY7_FN, PTY7_OUT, 0, PTY7_IN,
PTY6_FN, PTY6_OUT, 0, PTY6_IN,
PTY5_FN, PTY5_OUT, 0, PTY5_IN,
@@ -1725,9 +1725,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTY3_FN, PTY3_OUT, 0, PTY3_IN,
PTY2_FN, PTY2_OUT, 0, PTY2_IN,
PTY1_FN, PTY1_OUT, 0, PTY1_IN,
- PTY0_FN, PTY0_OUT, 0, PTY0_IN }
+ PTY0_FN, PTY0_OUT, 0, PTY0_IN ))
},
- { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2, GROUP(
PTZ7_FN, PTZ7_OUT, 0, PTZ7_IN,
PTZ6_FN, PTZ6_OUT, 0, PTZ6_IN,
PTZ5_FN, PTZ5_OUT, 0, PTZ5_IN,
@@ -1735,9 +1735,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTZ3_FN, PTZ3_OUT, 0, PTZ3_IN,
PTZ2_FN, PTZ2_OUT, 0, PTZ2_IN,
PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN,
- PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN }
+ PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN ))
},
- { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 2) {
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 2, GROUP(
PSA15_PSA14_FN1, PSA15_PSA14_FN2, 0, 0,
PSA13_PSA12_FN1, PSA13_PSA12_FN2, 0, 0,
PSA11_PSA10_FN1, PSA11_PSA10_FN2, 0, 0,
@@ -1745,9 +1745,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3, 0,
PSA3_PSA2_FN1, PSA3_PSA2_FN2, 0, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 2) {
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 2, GROUP(
PSB15_PSB14_FN1, PSB15_PSB14_FN2, 0, 0,
PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS, 0, 0,
0, 0, 0, 0,
@@ -1755,9 +1755,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSB7_PSB6_FN1, PSB7_PSB6_FN2, 0, 0,
PSB5_PSB4_FN1, PSB5_PSB4_FN2, 0, 0,
PSB3_PSB2_FN1, PSB3_PSB2_FN2, 0, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 2) {
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 2, GROUP(
PSC15_PSC14_FN1, PSC15_PSC14_FN2, 0, 0,
PSC13_PSC12_FN1, PSC13_PSC12_FN2, 0, 0,
PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3, 0,
@@ -1765,9 +1765,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 2) {
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 2, GROUP(
PSD15_PSD14_FN1, PSD15_PSD14_FN2, 0, 0,
PSD13_PSD12_FN1, PSD13_PSD12_FN2, 0, 0,
PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3, 0,
@@ -1775,103 +1775,103 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSD7_PSD6_FN1, PSD7_PSD6_FN2, 0, 0,
PSD5_PSD4_FN1, PSD5_PSD4_FN2, 0, 0,
PSD3_PSD2_FN1, PSD3_PSD2_FN2, 0, 0,
- PSD1_PSD0_FN1, PSD1_PSD0_FN2, 0, 0 }
+ PSD1_PSD0_FN1, PSD1_PSD0_FN2, 0, 0 ))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8, GROUP(
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
- PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA ))
},
- { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8, GROUP(
PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
- PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA ))
},
- { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8, GROUP(
PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
- PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA ))
},
- { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8, GROUP(
PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
- PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA ))
},
- { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8, GROUP(
0, 0, PTE5_DATA, PTE4_DATA,
- PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA ))
},
- { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8, GROUP(
PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
- PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA ))
},
- { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8, GROUP(
0, 0, PTG5_DATA, PTG4_DATA,
- PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA ))
},
- { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8, GROUP(
PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
- PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA ))
},
- { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8, GROUP(
PTJ7_DATA, 0, PTJ5_DATA, 0,
- PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA ))
},
- { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8, GROUP(
PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
- PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA ))
},
- { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8, GROUP(
PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
- PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA ))
},
- { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8, GROUP(
PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
- PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA ))
},
- { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8, GROUP(
PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
- PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA ))
},
- { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8, GROUP(
0, 0, 0, 0,
- PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA ))
},
- { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8, GROUP(
PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
- PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA ))
},
- { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8, GROUP(
PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
- PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA ))
},
- { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8, GROUP(
0, 0, PTT5_DATA, PTT4_DATA,
- PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA ))
},
- { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8, GROUP(
0, 0, PTU5_DATA, PTU4_DATA,
- PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA ))
},
- { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8, GROUP(
PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
- PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA ))
},
- { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8, GROUP(
PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
- PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA ))
},
- { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8, GROUP(
PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
- PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA ))
},
- { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8, GROUP(
PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
- PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA ))
},
- { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8, GROUP(
PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
- PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7724.c b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
index 2cc4aa7df613..7a18afecda2c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7724.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
@@ -1739,7 +1739,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2, GROUP(
PTA7_FN, PTA7_OUT, 0, PTA7_IN,
PTA6_FN, PTA6_OUT, 0, PTA6_IN,
PTA5_FN, PTA5_OUT, 0, PTA5_IN,
@@ -1747,9 +1747,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTA3_FN, PTA3_OUT, 0, PTA3_IN,
PTA2_FN, PTA2_OUT, 0, PTA2_IN,
PTA1_FN, PTA1_OUT, 0, PTA1_IN,
- PTA0_FN, PTA0_OUT, 0, PTA0_IN }
+ PTA0_FN, PTA0_OUT, 0, PTA0_IN ))
},
- { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2, GROUP(
PTB7_FN, PTB7_OUT, 0, PTB7_IN,
PTB6_FN, PTB6_OUT, 0, PTB6_IN,
PTB5_FN, PTB5_OUT, 0, PTB5_IN,
@@ -1757,9 +1757,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTB3_FN, PTB3_OUT, 0, PTB3_IN,
PTB2_FN, PTB2_OUT, 0, PTB2_IN,
PTB1_FN, PTB1_OUT, 0, PTB1_IN,
- PTB0_FN, PTB0_OUT, 0, PTB0_IN }
+ PTB0_FN, PTB0_OUT, 0, PTB0_IN ))
},
- { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2, GROUP(
PTC7_FN, PTC7_OUT, 0, PTC7_IN,
PTC6_FN, PTC6_OUT, 0, PTC6_IN,
PTC5_FN, PTC5_OUT, 0, PTC5_IN,
@@ -1767,9 +1767,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTC3_FN, PTC3_OUT, 0, PTC3_IN,
PTC2_FN, PTC2_OUT, 0, PTC2_IN,
PTC1_FN, PTC1_OUT, 0, PTC1_IN,
- PTC0_FN, PTC0_OUT, 0, PTC0_IN }
+ PTC0_FN, PTC0_OUT, 0, PTC0_IN ))
},
- { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2, GROUP(
PTD7_FN, PTD7_OUT, 0, PTD7_IN,
PTD6_FN, PTD6_OUT, 0, PTD6_IN,
PTD5_FN, PTD5_OUT, 0, PTD5_IN,
@@ -1777,9 +1777,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTD3_FN, PTD3_OUT, 0, PTD3_IN,
PTD2_FN, PTD2_OUT, 0, PTD2_IN,
PTD1_FN, PTD1_OUT, 0, PTD1_IN,
- PTD0_FN, PTD0_OUT, 0, PTD0_IN }
+ PTD0_FN, PTD0_OUT, 0, PTD0_IN ))
},
- { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2, GROUP(
PTE7_FN, PTE7_OUT, 0, PTE7_IN,
PTE6_FN, PTE6_OUT, 0, PTE6_IN,
PTE5_FN, PTE5_OUT, 0, PTE5_IN,
@@ -1787,9 +1787,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTE3_FN, PTE3_OUT, 0, PTE3_IN,
PTE2_FN, PTE2_OUT, 0, PTE2_IN,
PTE1_FN, PTE1_OUT, 0, PTE1_IN,
- PTE0_FN, PTE0_OUT, 0, PTE0_IN }
+ PTE0_FN, PTE0_OUT, 0, PTE0_IN ))
},
- { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2, GROUP(
PTF7_FN, PTF7_OUT, 0, PTF7_IN,
PTF6_FN, PTF6_OUT, 0, PTF6_IN,
PTF5_FN, PTF5_OUT, 0, PTF5_IN,
@@ -1797,9 +1797,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTF3_FN, PTF3_OUT, 0, PTF3_IN,
PTF2_FN, PTF2_OUT, 0, PTF2_IN,
PTF1_FN, PTF1_OUT, 0, PTF1_IN,
- PTF0_FN, PTF0_OUT, 0, PTF0_IN }
+ PTF0_FN, PTF0_OUT, 0, PTF0_IN ))
},
- { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
PTG5_FN, PTG5_OUT, 0, 0,
@@ -1807,9 +1807,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTG3_FN, PTG3_OUT, 0, 0,
PTG2_FN, PTG2_OUT, 0, 0,
PTG1_FN, PTG1_OUT, 0, 0,
- PTG0_FN, PTG0_OUT, 0, 0 }
+ PTG0_FN, PTG0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2, GROUP(
PTH7_FN, PTH7_OUT, 0, PTH7_IN,
PTH6_FN, PTH6_OUT, 0, PTH6_IN,
PTH5_FN, PTH5_OUT, 0, PTH5_IN,
@@ -1817,9 +1817,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTH3_FN, PTH3_OUT, 0, PTH3_IN,
PTH2_FN, PTH2_OUT, 0, PTH2_IN,
PTH1_FN, PTH1_OUT, 0, PTH1_IN,
- PTH0_FN, PTH0_OUT, 0, PTH0_IN }
+ PTH0_FN, PTH0_OUT, 0, PTH0_IN ))
},
- { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2, GROUP(
PTJ7_FN, PTJ7_OUT, 0, 0,
PTJ6_FN, PTJ6_OUT, 0, 0,
PTJ5_FN, PTJ5_OUT, 0, 0,
@@ -1827,9 +1827,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
- PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
+ PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN ))
},
- { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2, GROUP(
PTK7_FN, PTK7_OUT, 0, PTK7_IN,
PTK6_FN, PTK6_OUT, 0, PTK6_IN,
PTK5_FN, PTK5_OUT, 0, PTK5_IN,
@@ -1837,9 +1837,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTK3_FN, PTK3_OUT, 0, PTK3_IN,
PTK2_FN, PTK2_OUT, 0, PTK2_IN,
PTK1_FN, PTK1_OUT, 0, PTK1_IN,
- PTK0_FN, PTK0_OUT, 0, PTK0_IN }
+ PTK0_FN, PTK0_OUT, 0, PTK0_IN ))
},
- { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2, GROUP(
PTL7_FN, PTL7_OUT, 0, PTL7_IN,
PTL6_FN, PTL6_OUT, 0, PTL6_IN,
PTL5_FN, PTL5_OUT, 0, PTL5_IN,
@@ -1847,9 +1847,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTL3_FN, PTL3_OUT, 0, PTL3_IN,
PTL2_FN, PTL2_OUT, 0, PTL2_IN,
PTL1_FN, PTL1_OUT, 0, PTL1_IN,
- PTL0_FN, PTL0_OUT, 0, PTL0_IN }
+ PTL0_FN, PTL0_OUT, 0, PTL0_IN ))
},
- { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2, GROUP(
PTM7_FN, PTM7_OUT, 0, PTM7_IN,
PTM6_FN, PTM6_OUT, 0, PTM6_IN,
PTM5_FN, PTM5_OUT, 0, PTM5_IN,
@@ -1857,9 +1857,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTM3_FN, PTM3_OUT, 0, PTM3_IN,
PTM2_FN, PTM2_OUT, 0, PTM2_IN,
PTM1_FN, PTM1_OUT, 0, PTM1_IN,
- PTM0_FN, PTM0_OUT, 0, PTM0_IN }
+ PTM0_FN, PTM0_OUT, 0, PTM0_IN ))
},
- { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2, GROUP(
PTN7_FN, PTN7_OUT, 0, PTN7_IN,
PTN6_FN, PTN6_OUT, 0, PTN6_IN,
PTN5_FN, PTN5_OUT, 0, PTN5_IN,
@@ -1867,9 +1867,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTN3_FN, PTN3_OUT, 0, PTN3_IN,
PTN2_FN, PTN2_OUT, 0, PTN2_IN,
PTN1_FN, PTN1_OUT, 0, PTN1_IN,
- PTN0_FN, PTN0_OUT, 0, PTN0_IN }
+ PTN0_FN, PTN0_OUT, 0, PTN0_IN ))
},
- { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2, GROUP(
PTQ7_FN, PTQ7_OUT, 0, PTQ7_IN,
PTQ6_FN, PTQ6_OUT, 0, PTQ6_IN,
PTQ5_FN, PTQ5_OUT, 0, PTQ5_IN,
@@ -1877,9 +1877,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTQ3_FN, PTQ3_OUT, 0, PTQ3_IN,
PTQ2_FN, PTQ2_OUT, 0, PTQ2_IN,
PTQ1_FN, PTQ1_OUT, 0, PTQ1_IN,
- PTQ0_FN, PTQ0_OUT, 0, PTQ0_IN }
+ PTQ0_FN, PTQ0_OUT, 0, PTQ0_IN ))
},
- { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2, GROUP(
PTR7_FN, PTR7_OUT, 0, PTR7_IN,
PTR6_FN, PTR6_OUT, 0, PTR6_IN,
PTR5_FN, PTR5_OUT, 0, PTR5_IN,
@@ -1887,9 +1887,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTR3_FN, 0, 0, PTR3_IN,
PTR2_FN, 0, 0, PTR2_IN,
PTR1_FN, PTR1_OUT, 0, PTR1_IN,
- PTR0_FN, PTR0_OUT, 0, PTR0_IN }
+ PTR0_FN, PTR0_OUT, 0, PTR0_IN ))
},
- { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2, GROUP(
0, 0, 0, 0,
PTS6_FN, PTS6_OUT, 0, PTS6_IN,
PTS5_FN, PTS5_OUT, 0, PTS5_IN,
@@ -1897,9 +1897,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTS3_FN, PTS3_OUT, 0, PTS3_IN,
PTS2_FN, PTS2_OUT, 0, PTS2_IN,
PTS1_FN, PTS1_OUT, 0, PTS1_IN,
- PTS0_FN, PTS0_OUT, 0, PTS0_IN }
+ PTS0_FN, PTS0_OUT, 0, PTS0_IN ))
},
- { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2, GROUP(
PTT7_FN, PTT7_OUT, 0, PTT7_IN,
PTT6_FN, PTT6_OUT, 0, PTT6_IN,
PTT5_FN, PTT5_OUT, 0, PTT5_IN,
@@ -1907,9 +1907,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTT3_FN, PTT3_OUT, 0, PTT3_IN,
PTT2_FN, PTT2_OUT, 0, PTT2_IN,
PTT1_FN, PTT1_OUT, 0, PTT1_IN,
- PTT0_FN, PTT0_OUT, 0, PTT0_IN }
+ PTT0_FN, PTT0_OUT, 0, PTT0_IN ))
},
- { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2, GROUP(
PTU7_FN, PTU7_OUT, 0, PTU7_IN,
PTU6_FN, PTU6_OUT, 0, PTU6_IN,
PTU5_FN, PTU5_OUT, 0, PTU5_IN,
@@ -1917,9 +1917,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTU3_FN, PTU3_OUT, 0, PTU3_IN,
PTU2_FN, PTU2_OUT, 0, PTU2_IN,
PTU1_FN, PTU1_OUT, 0, PTU1_IN,
- PTU0_FN, PTU0_OUT, 0, PTU0_IN }
+ PTU0_FN, PTU0_OUT, 0, PTU0_IN ))
},
- { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2, GROUP(
PTV7_FN, PTV7_OUT, 0, PTV7_IN,
PTV6_FN, PTV6_OUT, 0, PTV6_IN,
PTV5_FN, PTV5_OUT, 0, PTV5_IN,
@@ -1927,9 +1927,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTV3_FN, PTV3_OUT, 0, PTV3_IN,
PTV2_FN, PTV2_OUT, 0, PTV2_IN,
PTV1_FN, PTV1_OUT, 0, PTV1_IN,
- PTV0_FN, PTV0_OUT, 0, PTV0_IN }
+ PTV0_FN, PTV0_OUT, 0, PTV0_IN ))
},
- { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2, GROUP(
PTW7_FN, PTW7_OUT, 0, PTW7_IN,
PTW6_FN, PTW6_OUT, 0, PTW6_IN,
PTW5_FN, PTW5_OUT, 0, PTW5_IN,
@@ -1937,9 +1937,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTW3_FN, PTW3_OUT, 0, PTW3_IN,
PTW2_FN, PTW2_OUT, 0, PTW2_IN,
PTW1_FN, PTW1_OUT, 0, PTW1_IN,
- PTW0_FN, PTW0_OUT, 0, PTW0_IN }
+ PTW0_FN, PTW0_OUT, 0, PTW0_IN ))
},
- { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2, GROUP(
PTX7_FN, PTX7_OUT, 0, PTX7_IN,
PTX6_FN, PTX6_OUT, 0, PTX6_IN,
PTX5_FN, PTX5_OUT, 0, PTX5_IN,
@@ -1947,9 +1947,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTX3_FN, PTX3_OUT, 0, PTX3_IN,
PTX2_FN, PTX2_OUT, 0, PTX2_IN,
PTX1_FN, PTX1_OUT, 0, PTX1_IN,
- PTX0_FN, PTX0_OUT, 0, PTX0_IN }
+ PTX0_FN, PTX0_OUT, 0, PTX0_IN ))
},
- { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2, GROUP(
PTY7_FN, PTY7_OUT, 0, PTY7_IN,
PTY6_FN, PTY6_OUT, 0, PTY6_IN,
PTY5_FN, PTY5_OUT, 0, PTY5_IN,
@@ -1957,9 +1957,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTY3_FN, PTY3_OUT, 0, PTY3_IN,
PTY2_FN, PTY2_OUT, 0, PTY2_IN,
PTY1_FN, PTY1_OUT, 0, PTY1_IN,
- PTY0_FN, PTY0_OUT, 0, PTY0_IN }
+ PTY0_FN, PTY0_OUT, 0, PTY0_IN ))
},
- { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2, GROUP(
PTZ7_FN, PTZ7_OUT, 0, PTZ7_IN,
PTZ6_FN, PTZ6_OUT, 0, PTZ6_IN,
PTZ5_FN, PTZ5_OUT, 0, PTZ5_IN,
@@ -1967,9 +1967,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTZ3_FN, PTZ3_OUT, 0, PTZ3_IN,
PTZ2_FN, PTZ2_OUT, 0, PTZ2_IN,
PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN,
- PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN }
+ PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN ))
},
- { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1, GROUP(
PSA15_0, PSA15_1,
PSA14_0, PSA14_1,
PSA13_0, PSA13_1,
@@ -1985,9 +1985,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSA3_0, PSA3_1,
PSA2_0, PSA2_1,
PSA1_0, PSA1_1,
- PSA0_0, PSA0_1}
+ PSA0_0, PSA0_1))
},
- { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1, GROUP(
0, 0,
PSB14_0, PSB14_1,
PSB13_0, PSB13_1,
@@ -2003,9 +2003,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSB3_0, PSB3_1,
PSB2_0, PSB2_1,
PSB1_0, PSB1_1,
- PSB0_0, PSB0_1}
+ PSB0_0, PSB0_1))
},
- { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1, GROUP(
PSC15_0, PSC15_1,
PSC14_0, PSC14_1,
PSC13_0, PSC13_1,
@@ -2021,9 +2021,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
PSC2_0, PSC2_1,
PSC1_0, PSC1_1,
- PSC0_0, PSC0_1}
+ PSC0_0, PSC0_1))
},
- { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1, GROUP(
PSD15_0, PSD15_1,
PSD14_0, PSD14_1,
PSD13_0, PSD13_1,
@@ -2039,9 +2039,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSD3_0, PSD3_1,
PSD2_0, PSD2_1,
PSD1_0, PSD1_1,
- PSD0_0, PSD0_1}
+ PSD0_0, PSD0_1))
},
- { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
+ { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1, GROUP(
PSE15_0, PSE15_1,
PSE14_0, PSE14_1,
PSE13_0, PSE13_1,
@@ -2057,103 +2057,103 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSE3_0, PSE3_1,
PSE2_0, PSE2_1,
PSE1_0, PSE1_1,
- PSE0_0, PSE0_1}
+ PSE0_0, PSE0_1))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8, GROUP(
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
- PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA ))
},
- { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8, GROUP(
PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
- PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA ))
},
- { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8, GROUP(
PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
- PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA ))
},
- { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8, GROUP(
PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
- PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA ))
},
- { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8, GROUP(
PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
- PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA ))
},
- { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8, GROUP(
PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
- PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA ))
},
- { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8, GROUP(
0, 0, PTG5_DATA, PTG4_DATA,
- PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA ))
},
- { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8, GROUP(
PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
- PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA ))
},
- { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8, GROUP(
PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
- PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA ))
},
- { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8, GROUP(
PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
- PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA ))
},
- { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8, GROUP(
PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
- PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA ))
},
- { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8, GROUP(
PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
- PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA ))
},
- { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8, GROUP(
PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
- PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA ))
},
- { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8, GROUP(
PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
- PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA ))
},
- { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8, GROUP(
PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
- PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA ))
},
- { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8, GROUP(
0, PTS6_DATA, PTS5_DATA, PTS4_DATA,
- PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA ))
},
- { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8, GROUP(
PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
- PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA ))
},
- { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8, GROUP(
PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
- PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA ))
},
- { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8, GROUP(
PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
- PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA ))
},
- { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8, GROUP(
PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
- PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA ))
},
- { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8, GROUP(
PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
- PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA ))
},
- { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8, GROUP(
PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
- PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA ))
},
- { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8, GROUP(
PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
- PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 748a32a3af82..fac7b4699121 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -1635,7 +1635,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xFFFC0004, 32, 1) {
+ { PINMUX_CFG_REG("GPSR0", 0xFFFC0004, 32, 1, GROUP(
GP_0_31_FN, FN_IP2_2_0,
GP_0_30_FN, FN_IP1_31_29,
GP_0_29_FN, FN_IP1_28_26,
@@ -1667,9 +1667,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_3_FN, FN_IP1_15_14,
GP_0_2_FN, FN_IP1_13_12,
GP_0_1_FN, FN_IP1_11_10,
- GP_0_0_FN, FN_IP1_9_8 }
+ GP_0_0_FN, FN_IP1_9_8 ))
},
- { PINMUX_CFG_REG("GPSR1", 0xFFFC0008, 32, 1) {
+ { PINMUX_CFG_REG("GPSR1", 0xFFFC0008, 32, 1, GROUP(
GP_1_31_FN, FN_IP11_25_23,
GP_1_30_FN, FN_IP2_13_11,
GP_1_29_FN, FN_IP2_10_8,
@@ -1701,9 +1701,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_3_FN, FN_IP11_22_21,
GP_1_2_FN, FN_IP11_20_19,
GP_1_1_FN, FN_IP3_29_27,
- GP_1_0_FN, FN_IP3_20 }
+ GP_1_0_FN, FN_IP3_20 ))
},
- { PINMUX_CFG_REG("GPSR2", 0xFFFC000C, 32, 1) {
+ { PINMUX_CFG_REG("GPSR2", 0xFFFC000C, 32, 1, GROUP(
GP_2_31_FN, FN_IP4_31_30,
GP_2_30_FN, FN_IP5_2_0,
GP_2_29_FN, FN_IP5_5_3,
@@ -1735,9 +1735,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_3_FN, FN_IP4_2_0,
GP_2_2_FN, FN_IP11_11_10,
GP_2_1_FN, FN_IP11_9_7,
- GP_2_0_FN, FN_IP11_6_4 }
+ GP_2_0_FN, FN_IP11_6_4 ))
},
- { PINMUX_CFG_REG("GPSR3", 0xFFFC0010, 32, 1) {
+ { PINMUX_CFG_REG("GPSR3", 0xFFFC0010, 32, 1, GROUP(
GP_3_31_FN, FN_IP9_1_0,
GP_3_30_FN, FN_IP8_19_18,
GP_3_29_FN, FN_IP8_17_16,
@@ -1769,10 +1769,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_3_FN, FN_IP6_9_8,
GP_3_2_FN, FN_IP6_7_6,
GP_3_1_FN, FN_IP6_5_3,
- GP_3_0_FN, FN_IP6_2_0 }
+ GP_3_0_FN, FN_IP6_2_0 ))
},
- { PINMUX_CFG_REG("GPSR4", 0xFFFC0014, 32, 1) {
+ { PINMUX_CFG_REG("GPSR4", 0xFFFC0014, 32, 1, GROUP(
GP_4_31_FN, FN_IP10_24_23,
GP_4_30_FN, FN_IP10_22,
GP_4_29_FN, FN_IP11_18_16,
@@ -1804,9 +1804,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_3_FN, FN_IP9_25_24,
GP_4_2_FN, FN_IP9_23_22,
GP_4_1_FN, FN_IP9_21_20,
- GP_4_0_FN, FN_IP9_19_18 }
+ GP_4_0_FN, FN_IP9_19_18 ))
},
- { PINMUX_CFG_REG("GPSR5", 0xFFFC0018, 32, 1) {
+ { PINMUX_CFG_REG("GPSR5", 0xFFFC0018, 32, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 28 */
0, 0, 0, 0, 0, 0, 0, 0, /* 27 - 24 */
0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 20 */
@@ -1819,12 +1819,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_FN, FN_IRQ3_B,
GP_5_2_FN, FN_IRQ2_B,
GP_5_1_FN, FN_IP11_3,
- GP_5_0_FN, FN_IP10_25 }
+ GP_5_0_FN, FN_IP10_25 ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xFFFC001C, 32,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2) {
+ GROUP(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
+ GROUP(
/* IP0_31_30 [2] */
FN_A15, FN_ST0_VCO_CLKIN, FN_LCD_DATA15_A,
FN_TIOC3D_C,
@@ -1857,10 +1857,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP0_3_2 [2] */
FN_A1, FN_ST0_REQ, FN_LCD_DATA1_A, FN_TCLKB_C,
/* IP0_1_0 [2] */
- FN_A0, FN_ST0_CLKIN, FN_LCD_DATA0_A, FN_TCLKA_C }
+ FN_A0, FN_ST0_CLKIN, FN_LCD_DATA0_A, FN_TCLKA_C ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xFFFC0020, 32,
- 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ GROUP(3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
+ GROUP(
/* IP1_31_29 [3] */
FN_D3, FN_SD0_DAT3_A, FN_MMC_D3_A, FN_ST1_D6,
FN_FD3_A, 0, 0, 0,
@@ -1892,10 +1893,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP1_3_2 [2] */
FN_A17, FN_ST1_VCO_CLKIN, FN_LCD_CL1_A, FN_TIOC4B_C,
/* IP1_1_0 [2] */
- FN_A16, FN_ST0_PWM, FN_LCD_DON_A, FN_TIOC4A_C }
+ FN_A16, FN_ST0_PWM, FN_LCD_DON_A, FN_TIOC4A_C ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xFFFC0024, 32,
- 1, 3, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3) {
+ GROUP(1, 3, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3),
+ GROUP(
/* IP2_31 [1] */
0, 0,
/* IP2_30_28 [3] */
@@ -1928,10 +1930,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_D5, FN_SD0_WP_A, FN_MMC_D5_A, FN_FD5_A,
/* IP2_2_0 [3] */
FN_D4, FN_SD0_CD_A, FN_MMC_D4_A, FN_ST1_D7,
- FN_FD4_A, 0, 0, 0 }
+ FN_FD4_A, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xFFFC0028, 32,
- 2, 3, 3, 3, 1, 2, 3, 3, 3, 3, 3, 1, 2) {
+ GROUP(2, 3, 3, 3, 1, 2, 3, 3, 3, 3, 3, 1, 2),
+ GROUP(
/* IP3_31_30 [2] */
0, 0, 0, 0,
/* IP3_29_27 [3] */
@@ -1965,10 +1968,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_2 [1] */
FN_CS1_A26, FN_QIO3_B,
/* IP3_1_0 [2] */
- FN_D15, FN_SCK2_B, 0, 0 }
+ FN_D15, FN_SCK2_B, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xFFFC002C, 32,
- 2, 2, 2, 2, 2, 2 , 2, 3, 3, 3, 3, 3, 3) {
+ GROUP(2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP4_31_30 [2] */
0, FN_SCK2_A, FN_VI0_G3, 0,
/* IP4_29_28 [2] */
@@ -2000,10 +2004,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ET0_RX_DV, 0, 0, 0,
/* IP4_2_0 [3] */
FN_HCTS0_A, FN_CTS1_A, FN_VI0_FIELD, FN_RMII0_RXD1_A,
- FN_ET0_ERXD7, 0, 0, 0 }
+ FN_ET0_ERXD7, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xFFFC0030, 32,
- 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3),
+ GROUP(
/* IP5_31 [1] */
0, 0,
/* IP5_30 [1] */
@@ -2040,11 +2046,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, FN_ET0_ERXD2_B,
/* IP5_2_0 [3] */
FN_SD2_CLK_A, FN_RX2_A, FN_VI0_G4, 0,
- FN_ET0_RX_CLK_B, 0, 0, 0 }
+ FN_ET0_RX_CLK_B, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xFFFC0034, 32,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 3, 3, 2, 2, 2, 2, 2, 2, 3, 3) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 2, 2,
+ 2, 2, 2, 2, 3, 3),
+ GROUP(
/* IP5_31 [1] */
0, 0,
/* IP6_30 [1] */
@@ -2084,10 +2091,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TCLKB_A, FN_HIFD01, 0, 0,
/* IP6_2_0 [3] */
FN_DU0_DR0, FN_SCIF_CLK_B, FN_HRX0_D, FN_IETX_A,
- FN_TCLKA_A, FN_HIFD00, 0, 0 }
+ FN_TCLKA_A, FN_HIFD00, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xFFFC0038, 32,
- 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ GROUP(1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(
/* IP7_31 [1] */
0, 0,
/* IP7_30_29 [2] */
@@ -2120,10 +2128,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_HIFD11, 0, 0, 0,
/* IP7_2_0 [3] */
FN_DU0_DG2, FN_RTS1_C, FN_RMII0_MDC_B, FN_TIOC2A_A,
- FN_HIFD10, 0, 0, 0 }
+ FN_HIFD10, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xFFFC003C, 32,
- 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ GROUP(2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2),
+ GROUP(
/* IP9_31_30 [2] */
0, 0, 0, 0,
/* IP8_29_28 [2] */
@@ -2156,11 +2166,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP8_3_2 [2] */
FN_DU0_DB6, 0, FN_HIFRDY, 0,
/* IP8_1_0 [2] */
- FN_DU0_DB5, 0, FN_HIFDREQ, 0 }
+ FN_DU0_DB5, 0, FN_HIFDREQ, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xFFFC0040, 32,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2) {
+ GROUP(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2),
+ GROUP(
/* IP9_31_30 [2] */
0, 0, 0, 0,
/* IP9_29_28 [2] */
@@ -2192,11 +2203,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP9_3_2 [2] */
FN_VI1_0_A, 0, FN_FD1_B, FN_LCD_DATA1_B,
/* IP9_1_0 [2] */
- FN_VI1_CLK_A, 0, FN_FD0_B, FN_LCD_DATA0_B }
+ FN_VI1_CLK_A, 0, FN_FD0_B, FN_LCD_DATA0_B ))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xFFFC0044, 32,
- 2, 2, 2, 1, 2, 1, 3,
- 3, 1, 3, 3, 3, 3, 3) {
+ GROUP(2, 2, 2, 1, 2, 1, 3, 3, 1, 3, 3, 3, 3, 3),
+ GROUP(
/* IP9_31_30 [2] */
0, 0, 0, 0,
/* IP10_29_28 [2] */
@@ -2231,10 +2242,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_LCD_DON_B, 0, 0,
/* IP10_2_0 [3] */
FN_SSI_SCK23, FN_VI1_4_B, FN_RX1_D, FN_FCLE_B,
- FN_LCD_DATA15_B, 0, 0, 0 }
+ FN_LCD_DATA15_B, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xFFFC0048, 32,
- 3, 1, 2, 3, 2, 2, 3, 3, 1, 2, 3, 3, 1, 1, 1, 1) {
+ GROUP(3, 1, 2, 3, 2, 2, 3, 3, 1, 2, 3, 3,
+ 1, 1, 1, 1),
+ GROUP(
/* IP11_31_29 [3] */
0, 0, 0, 0, 0, 0, 0, 0,
/* IP11_28 [1] */
@@ -2271,11 +2284,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP11_1 [1] */
FN_SDA1, FN_RX1_E,
/* IP11_0 [1] */
- FN_SCL1, FN_SCIF_CLK_C }
+ FN_SCL1, FN_SCIF_CLK_C ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xFFFC004C, 32,
- 3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2,
- 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) {
+ GROUP(3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2,
+ 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
/* SEL1_31_29 [3] */
0, 0, 0, 0, 0, 0, 0, 0,
/* SEL1_28 [1] */
@@ -2327,11 +2341,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL1_1 [1] */
FN_SEL_MMC_0, FN_SEL_MMC_1,
/* SEL1_0 [1] */
- FN_SEL_INTC_0, FN_SEL_INTC_1 }
+ FN_SEL_INTC_0, FN_SEL_INTC_1 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xFFFC0050, 32,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 2, 2, 1, 2, 2, 3, 2, 3, 2, 2) {
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 1, 2, 2, 3, 2, 3, 2, 2),
+ GROUP(
/* SEL2_31 [1] */
0, 0,
/* SEL2_30 [1] */
@@ -2375,15 +2390,20 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL2_3_2 [2] */
FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, 0,
/* SEL2_1_0 [2] */
- FN_SEL_SCIF_CLK_0, FN_SEL_SCIF_CLK_1, FN_SEL_SCIF_CLK_2, 0 }
+ FN_SEL_SCIF_CLK_0, FN_SEL_SCIF_CLK_1, FN_SEL_SCIF_CLK_2, 0 ))
},
/* GPIO 0 - 5*/
- { PINMUX_CFG_REG("INOUTSEL0", 0xFFC40004, 32, 1) { GP_INOUTSEL(0) } },
- { PINMUX_CFG_REG("INOUTSEL1", 0xFFC41004, 32, 1) { GP_INOUTSEL(1) } },
- { PINMUX_CFG_REG("INOUTSEL2", 0xFFC42004, 32, 1) { GP_INOUTSEL(2) } },
- { PINMUX_CFG_REG("INOUTSEL3", 0xFFC43004, 32, 1) { GP_INOUTSEL(3) } },
- { PINMUX_CFG_REG("INOUTSEL4", 0xFFC44004, 32, 1) { GP_INOUTSEL(4) } },
- { PINMUX_CFG_REG("INOUTSEL5", 0xffc45004, 32, 1) {
+ { PINMUX_CFG_REG("INOUTSEL0", 0xFFC40004, 32, 1, GROUP(GP_INOUTSEL(0)))
+ },
+ { PINMUX_CFG_REG("INOUTSEL1", 0xFFC41004, 32, 1, GROUP(GP_INOUTSEL(1)))
+ },
+ { PINMUX_CFG_REG("INOUTSEL2", 0xFFC42004, 32, 1, GROUP(GP_INOUTSEL(2)))
+ },
+ { PINMUX_CFG_REG("INOUTSEL3", 0xFFC43004, 32, 1, GROUP(GP_INOUTSEL(3)))
+ },
+ { PINMUX_CFG_REG("INOUTSEL4", 0xFFC44004, 32, 1, GROUP(GP_INOUTSEL(4)))
+ },
+ { PINMUX_CFG_REG("INOUTSEL5", 0xffc45004, 32, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 24 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 16 */
0, 0, 0, 0, 0, 0, 0, 0, /* 15 - 12 */
@@ -2398,24 +2418,24 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_3_IN, GP_5_3_OUT,
GP_5_2_IN, GP_5_2_OUT,
GP_5_1_IN, GP_5_1_OUT,
- GP_5_0_IN, GP_5_0_OUT }
+ GP_5_0_IN, GP_5_0_OUT ))
},
{ },
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
/* GPIO 0 - 5*/
- { PINMUX_DATA_REG("INDT0", 0xFFC4000C, 32) { GP_INDT(0) } },
- { PINMUX_DATA_REG("INDT1", 0xFFC4100C, 32) { GP_INDT(1) } },
- { PINMUX_DATA_REG("INDT2", 0xFFC4200C, 32) { GP_INDT(2) } },
- { PINMUX_DATA_REG("INDT3", 0xFFC4300C, 32) { GP_INDT(3) } },
- { PINMUX_DATA_REG("INDT4", 0xFFC4400C, 32) { GP_INDT(4) } },
- { PINMUX_DATA_REG("INDT5", 0xFFC4500C, 32) {
+ { PINMUX_DATA_REG("INDT0", 0xFFC4000C, 32, GROUP(GP_INDT(0))) },
+ { PINMUX_DATA_REG("INDT1", 0xFFC4100C, 32, GROUP(GP_INDT(1))) },
+ { PINMUX_DATA_REG("INDT2", 0xFFC4200C, 32, GROUP(GP_INDT(2))) },
+ { PINMUX_DATA_REG("INDT3", 0xFFC4300C, 32, GROUP(GP_INDT(3))) },
+ { PINMUX_DATA_REG("INDT4", 0xFFC4400C, 32, GROUP(GP_INDT(4))) },
+ { PINMUX_DATA_REG("INDT5", 0xFFC4500C, 32, GROUP(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
GP_5_11_DATA, GP_5_10_DATA, GP_5_9_DATA, GP_5_8_DATA,
GP_5_7_DATA, GP_5_6_DATA, GP_5_5_DATA, GP_5_4_DATA,
- GP_5_3_DATA, GP_5_2_DATA, GP_5_1_DATA, GP_5_0_DATA }
+ GP_5_3_DATA, GP_5_2_DATA, GP_5_1_DATA, GP_5_0_DATA ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
index b16090690ee3..064e987b09cb 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7757.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
@@ -1683,7 +1683,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) {
+ { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2, GROUP(
PTA7_FN, PTA7_OUT, PTA7_IN, 0,
PTA6_FN, PTA6_OUT, PTA6_IN, 0,
PTA5_FN, PTA5_OUT, PTA5_IN, 0,
@@ -1691,9 +1691,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTA3_FN, PTA3_OUT, PTA3_IN, 0,
PTA2_FN, PTA2_OUT, PTA2_IN, 0,
PTA1_FN, PTA1_OUT, PTA1_IN, 0,
- PTA0_FN, PTA0_OUT, PTA0_IN, 0 }
+ PTA0_FN, PTA0_OUT, PTA0_IN, 0 ))
},
- { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) {
+ { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2, GROUP(
PTB7_FN, PTB7_OUT, PTB7_IN, 0,
PTB6_FN, PTB6_OUT, PTB6_IN, 0,
PTB5_FN, PTB5_OUT, PTB5_IN, 0,
@@ -1701,9 +1701,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTB3_FN, PTB3_OUT, PTB3_IN, 0,
PTB2_FN, PTB2_OUT, PTB2_IN, 0,
PTB1_FN, PTB1_OUT, PTB1_IN, 0,
- PTB0_FN, PTB0_OUT, PTB0_IN, 0 }
+ PTB0_FN, PTB0_OUT, PTB0_IN, 0 ))
},
- { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2) {
+ { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2, GROUP(
PTC7_FN, PTC7_OUT, PTC7_IN, 0,
PTC6_FN, PTC6_OUT, PTC6_IN, 0,
PTC5_FN, PTC5_OUT, PTC5_IN, 0,
@@ -1711,9 +1711,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTC3_FN, PTC3_OUT, PTC3_IN, 0,
PTC2_FN, PTC2_OUT, PTC2_IN, 0,
PTC1_FN, PTC1_OUT, PTC1_IN, 0,
- PTC0_FN, PTC0_OUT, PTC0_IN, 0 }
+ PTC0_FN, PTC0_OUT, PTC0_IN, 0 ))
},
- { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) {
+ { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2, GROUP(
PTD7_FN, PTD7_OUT, PTD7_IN, 0,
PTD6_FN, PTD6_OUT, PTD6_IN, 0,
PTD5_FN, PTD5_OUT, PTD5_IN, 0,
@@ -1721,9 +1721,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTD3_FN, PTD3_OUT, PTD3_IN, 0,
PTD2_FN, PTD2_OUT, PTD2_IN, 0,
PTD1_FN, PTD1_OUT, PTD1_IN, 0,
- PTD0_FN, PTD0_OUT, PTD0_IN, 0 }
+ PTD0_FN, PTD0_OUT, PTD0_IN, 0 ))
},
- { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) {
+ { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2, GROUP(
PTE7_FN, PTE7_OUT, PTE7_IN, 0,
PTE6_FN, PTE6_OUT, PTE6_IN, 0,
PTE5_FN, PTE5_OUT, PTE5_IN, 0,
@@ -1731,9 +1731,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTE3_FN, PTE3_OUT, PTE3_IN, 0,
PTE2_FN, PTE2_OUT, PTE2_IN, 0,
PTE1_FN, PTE1_OUT, PTE1_IN, 0,
- PTE0_FN, PTE0_OUT, PTE0_IN, 0 }
+ PTE0_FN, PTE0_OUT, PTE0_IN, 0 ))
},
- { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) {
+ { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2, GROUP(
PTF7_FN, PTF7_OUT, PTF7_IN, 0,
PTF6_FN, PTF6_OUT, PTF6_IN, 0,
PTF5_FN, PTF5_OUT, PTF5_IN, 0,
@@ -1741,9 +1741,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTF3_FN, PTF3_OUT, PTF3_IN, 0,
PTF2_FN, PTF2_OUT, PTF2_IN, 0,
PTF1_FN, PTF1_OUT, PTF1_IN, 0,
- PTF0_FN, PTF0_OUT, PTF0_IN, 0 }
+ PTF0_FN, PTF0_OUT, PTF0_IN, 0 ))
},
- { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) {
+ { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2, GROUP(
PTG7_FN, PTG7_OUT, PTG7_IN, 0,
PTG6_FN, PTG6_OUT, PTG6_IN, 0,
PTG5_FN, PTG5_OUT, PTG5_IN, 0,
@@ -1751,9 +1751,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTG3_FN, PTG3_OUT, PTG3_IN, 0,
PTG2_FN, PTG2_OUT, PTG2_IN, 0,
PTG1_FN, PTG1_OUT, PTG1_IN, 0,
- PTG0_FN, PTG0_OUT, PTG0_IN, 0 }
+ PTG0_FN, PTG0_OUT, PTG0_IN, 0 ))
},
- { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) {
+ { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2, GROUP(
PTH7_FN, PTH7_OUT, PTH7_IN, 0,
PTH6_FN, PTH6_OUT, PTH6_IN, 0,
PTH5_FN, PTH5_OUT, PTH5_IN, 0,
@@ -1761,9 +1761,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTH3_FN, PTH3_OUT, PTH3_IN, 0,
PTH2_FN, PTH2_OUT, PTH2_IN, 0,
PTH1_FN, PTH1_OUT, PTH1_IN, 0,
- PTH0_FN, PTH0_OUT, PTH0_IN, 0 }
+ PTH0_FN, PTH0_OUT, PTH0_IN, 0 ))
},
- { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) {
+ { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2, GROUP(
PTI7_FN, PTI7_OUT, PTI7_IN, 0,
PTI6_FN, PTI6_OUT, PTI6_IN, 0,
PTI5_FN, PTI5_OUT, PTI5_IN, 0,
@@ -1771,9 +1771,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTI3_FN, PTI3_OUT, PTI3_IN, 0,
PTI2_FN, PTI2_OUT, PTI2_IN, 0,
PTI1_FN, PTI1_OUT, PTI1_IN, 0,
- PTI0_FN, PTI0_OUT, PTI0_IN, 0 }
+ PTI0_FN, PTI0_OUT, PTI0_IN, 0 ))
},
- { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) {
+ { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2, GROUP(
0, 0, 0, 0, /* reserved: always set 1 */
PTJ6_FN, PTJ6_OUT, PTJ6_IN, 0,
PTJ5_FN, PTJ5_OUT, PTJ5_IN, 0,
@@ -1781,9 +1781,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTJ3_FN, PTJ3_OUT, PTJ3_IN, 0,
PTJ2_FN, PTJ2_OUT, PTJ2_IN, 0,
PTJ1_FN, PTJ1_OUT, PTJ1_IN, 0,
- PTJ0_FN, PTJ0_OUT, PTJ0_IN, 0 }
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN, 0 ))
},
- { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) {
+ { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2, GROUP(
PTK7_FN, PTK7_OUT, PTK7_IN, 0,
PTK6_FN, PTK6_OUT, PTK6_IN, 0,
PTK5_FN, PTK5_OUT, PTK5_IN, 0,
@@ -1791,9 +1791,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTK3_FN, PTK3_OUT, PTK3_IN, 0,
PTK2_FN, PTK2_OUT, PTK2_IN, 0,
PTK1_FN, PTK1_OUT, PTK1_IN, 0,
- PTK0_FN, PTK0_OUT, PTK0_IN, 0 }
+ PTK0_FN, PTK0_OUT, PTK0_IN, 0 ))
},
- { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) {
+ { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2, GROUP(
0, 0, 0, 0, /* reserved: always set 1 */
PTL6_FN, PTL6_OUT, PTL6_IN, 0,
PTL5_FN, PTL5_OUT, PTL5_IN, 0,
@@ -1801,9 +1801,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTL3_FN, PTL3_OUT, PTL3_IN, 0,
PTL2_FN, PTL2_OUT, PTL2_IN, 0,
PTL1_FN, PTL1_OUT, PTL1_IN, 0,
- PTL0_FN, PTL0_OUT, PTL0_IN, 0 }
+ PTL0_FN, PTL0_OUT, PTL0_IN, 0 ))
},
- { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) {
+ { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2, GROUP(
PTM7_FN, PTM7_OUT, PTM7_IN, 0,
PTM6_FN, PTM6_OUT, PTM6_IN, 0,
PTM5_FN, PTM5_OUT, PTM5_IN, 0,
@@ -1811,9 +1811,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTM3_FN, PTM3_OUT, PTM3_IN, 0,
PTM2_FN, PTM2_OUT, PTM2_IN, 0,
PTM1_FN, PTM1_OUT, PTM1_IN, 0,
- PTM0_FN, PTM0_OUT, PTM0_IN, 0 }
+ PTM0_FN, PTM0_OUT, PTM0_IN, 0 ))
},
- { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) {
+ { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2, GROUP(
0, 0, 0, 0, /* reserved: always set 1 */
PTN6_FN, PTN6_OUT, PTN6_IN, 0,
PTN5_FN, PTN5_OUT, PTN5_IN, 0,
@@ -1821,9 +1821,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTN3_FN, PTN3_OUT, PTN3_IN, 0,
PTN2_FN, PTN2_OUT, PTN2_IN, 0,
PTN1_FN, PTN1_OUT, PTN1_IN, 0,
- PTN0_FN, PTN0_OUT, PTN0_IN, 0 }
+ PTN0_FN, PTN0_OUT, PTN0_IN, 0 ))
},
- { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) {
+ { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2, GROUP(
PTO7_FN, PTO7_OUT, PTO7_IN, 0,
PTO6_FN, PTO6_OUT, PTO6_IN, 0,
PTO5_FN, PTO5_OUT, PTO5_IN, 0,
@@ -1831,10 +1831,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTO3_FN, PTO3_OUT, PTO3_IN, 0,
PTO2_FN, PTO2_OUT, PTO2_IN, 0,
PTO1_FN, PTO1_OUT, PTO1_IN, 0,
- PTO0_FN, PTO0_OUT, PTO0_IN, 0 }
+ PTO0_FN, PTO0_OUT, PTO0_IN, 0 ))
},
#if 0 /* FIXME: Remove it? */
- { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) {
+ { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2, GROUP(
0, 0, 0, 0, /* reserved: always set 1 */
PTP6_FN, PTP6_OUT, PTP6_IN, 0,
PTP5_FN, PTP5_OUT, PTP5_IN, 0,
@@ -1842,10 +1842,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTP3_FN, PTP3_OUT, PTP3_IN, 0,
PTP2_FN, PTP2_OUT, PTP2_IN, 0,
PTP1_FN, PTP1_OUT, PTP1_IN, 0,
- PTP0_FN, PTP0_OUT, PTP0_IN, 0 }
+ PTP0_FN, PTP0_OUT, PTP0_IN, 0 ))
},
#endif
- { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) {
+ { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2, GROUP(
0, 0, 0, 0, /* reserved: always set 1 */
PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0,
PTQ5_FN, PTQ5_OUT, PTQ5_IN, 0,
@@ -1853,9 +1853,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTQ3_FN, PTQ3_OUT, PTQ3_IN, 0,
PTQ2_FN, PTQ2_OUT, PTQ2_IN, 0,
PTQ1_FN, PTQ1_OUT, PTQ1_IN, 0,
- PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 }
+ PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 ))
},
- { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2) {
+ { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2, GROUP(
PTR7_FN, PTR7_OUT, PTR7_IN, 0,
PTR6_FN, PTR6_OUT, PTR6_IN, 0,
PTR5_FN, PTR5_OUT, PTR5_IN, 0,
@@ -1863,9 +1863,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTR3_FN, PTR3_OUT, PTR3_IN, 0,
PTR2_FN, PTR2_OUT, PTR2_IN, 0,
PTR1_FN, PTR1_OUT, PTR1_IN, 0,
- PTR0_FN, PTR0_OUT, PTR0_IN, 0 }
+ PTR0_FN, PTR0_OUT, PTR0_IN, 0 ))
},
- { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2) {
+ { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2, GROUP(
PTS7_FN, PTS7_OUT, PTS7_IN, 0,
PTS6_FN, PTS6_OUT, PTS6_IN, 0,
PTS5_FN, PTS5_OUT, PTS5_IN, 0,
@@ -1873,9 +1873,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTS3_FN, PTS3_OUT, PTS3_IN, 0,
PTS2_FN, PTS2_OUT, PTS2_IN, 0,
PTS1_FN, PTS1_OUT, PTS1_IN, 0,
- PTS0_FN, PTS0_OUT, PTS0_IN, 0 }
+ PTS0_FN, PTS0_OUT, PTS0_IN, 0 ))
},
- { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) {
+ { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2, GROUP(
PTT7_FN, PTT7_OUT, PTT7_IN, 0,
PTT6_FN, PTT6_OUT, PTT6_IN, 0,
PTT5_FN, PTT5_OUT, PTT5_IN, 0,
@@ -1883,9 +1883,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTT3_FN, PTT3_OUT, PTT3_IN, 0,
PTT2_FN, PTT2_OUT, PTT2_IN, 0,
PTT1_FN, PTT1_OUT, PTT1_IN, 0,
- PTT0_FN, PTT0_OUT, PTT0_IN, 0 }
+ PTT0_FN, PTT0_OUT, PTT0_IN, 0 ))
},
- { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) {
+ { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2, GROUP(
PTU7_FN, PTU7_OUT, PTU7_IN, 0,
PTU6_FN, PTU6_OUT, PTU6_IN, 0,
PTU5_FN, PTU5_OUT, PTU5_IN, 0,
@@ -1893,9 +1893,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTU3_FN, PTU3_OUT, PTU3_IN, 0,
PTU2_FN, PTU2_OUT, PTU2_IN, 0,
PTU1_FN, PTU1_OUT, PTU1_IN, 0,
- PTU0_FN, PTU0_OUT, PTU0_IN, 0 }
+ PTU0_FN, PTU0_OUT, PTU0_IN, 0 ))
},
- { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) {
+ { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2, GROUP(
PTV7_FN, PTV7_OUT, PTV7_IN, 0,
PTV6_FN, PTV6_OUT, PTV6_IN, 0,
PTV5_FN, PTV5_OUT, PTV5_IN, 0,
@@ -1903,9 +1903,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTV3_FN, PTV3_OUT, PTV3_IN, 0,
PTV2_FN, PTV2_OUT, PTV2_IN, 0,
PTV1_FN, PTV1_OUT, PTV1_IN, 0,
- PTV0_FN, PTV0_OUT, PTV0_IN, 0 }
+ PTV0_FN, PTV0_OUT, PTV0_IN, 0 ))
},
- { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) {
+ { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2, GROUP(
PTW7_FN, PTW7_OUT, PTW7_IN, 0,
PTW6_FN, PTW6_OUT, PTW6_IN, 0,
PTW5_FN, PTW5_OUT, PTW5_IN, 0,
@@ -1913,9 +1913,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTW3_FN, PTW3_OUT, PTW3_IN, 0,
PTW2_FN, PTW2_OUT, PTW2_IN, 0,
PTW1_FN, PTW1_OUT, PTW1_IN, 0,
- PTW0_FN, PTW0_OUT, PTW0_IN, 0 }
+ PTW0_FN, PTW0_OUT, PTW0_IN, 0 ))
},
- { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) {
+ { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2, GROUP(
PTX7_FN, PTX7_OUT, PTX7_IN, 0,
PTX6_FN, PTX6_OUT, PTX6_IN, 0,
PTX5_FN, PTX5_OUT, PTX5_IN, 0,
@@ -1923,9 +1923,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTX3_FN, PTX3_OUT, PTX3_IN, 0,
PTX2_FN, PTX2_OUT, PTX2_IN, 0,
PTX1_FN, PTX1_OUT, PTX1_IN, 0,
- PTX0_FN, PTX0_OUT, PTX0_IN, 0 }
+ PTX0_FN, PTX0_OUT, PTX0_IN, 0 ))
},
- { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) {
+ { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2, GROUP(
PTY7_FN, PTY7_OUT, PTY7_IN, 0,
PTY6_FN, PTY6_OUT, PTY6_IN, 0,
PTY5_FN, PTY5_OUT, PTY5_IN, 0,
@@ -1933,9 +1933,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTY3_FN, PTY3_OUT, PTY3_IN, 0,
PTY2_FN, PTY2_OUT, PTY2_IN, 0,
PTY1_FN, PTY1_OUT, PTY1_IN, 0,
- PTY0_FN, PTY0_OUT, PTY0_IN, 0 }
+ PTY0_FN, PTY0_OUT, PTY0_IN, 0 ))
},
- { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) {
+ { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2, GROUP(
PTZ7_FN, PTZ7_OUT, PTZ7_IN, 0,
PTZ6_FN, PTZ6_OUT, PTZ6_IN, 0,
PTZ5_FN, PTZ5_OUT, PTZ5_IN, 0,
@@ -1943,10 +1943,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTZ3_FN, PTZ3_OUT, PTZ3_IN, 0,
PTZ2_FN, PTZ2_OUT, PTZ2_IN, 0,
PTZ1_FN, PTZ1_OUT, PTZ1_IN, 0,
- PTZ0_FN, PTZ0_OUT, PTZ0_IN, 0 }
+ PTZ0_FN, PTZ0_OUT, PTZ0_IN, 0 ))
},
- { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) {
+ { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1, GROUP(
PS0_15_FN1, PS0_15_FN2,
PS0_14_FN1, PS0_14_FN2,
PS0_13_FN1, PS0_13_FN2,
@@ -1962,9 +1962,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PS0_3_FN1, PS0_3_FN2,
PS0_2_FN1, PS0_2_FN2,
0, 0,
- 0, 0, }
+ 0, 0, ))
},
- { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) {
+ { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -1980,9 +1980,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
PS1_2_FN1, PS1_2_FN2,
0, 0,
- 0, 0, }
+ 0, 0, ))
},
- { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) {
+ { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1, GROUP(
0, 0,
0, 0,
PS2_13_FN1, PS2_13_FN2,
@@ -1998,9 +1998,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
PS2_2_FN1, PS2_2_FN2,
0, 0,
- 0, 0, }
+ 0, 0, ))
},
- { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1) {
+ { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1, GROUP(
PS3_15_FN1, PS3_15_FN2,
PS3_14_FN1, PS3_14_FN2,
PS3_13_FN1, PS3_13_FN2,
@@ -2016,10 +2016,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
PS3_2_FN1, PS3_2_FN2,
PS3_1_FN1, PS3_1_FN2,
- 0, 0, }
+ 0, 0, ))
},
- { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) {
+ { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1, GROUP(
0, 0,
PS4_14_FN1, PS4_14_FN2,
PS4_13_FN1, PS4_13_FN2,
@@ -2035,9 +2035,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PS4_3_FN1, PS4_3_FN2,
PS4_2_FN1, PS4_2_FN2,
PS4_1_FN1, PS4_1_FN2,
- PS4_0_FN1, PS4_0_FN2, }
+ PS4_0_FN1, PS4_0_FN2, ))
},
- { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) {
+ { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -2053,9 +2053,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PS5_3_FN1, PS5_3_FN2,
PS5_2_FN1, PS5_2_FN2,
0, 0,
- 0, 0, }
+ 0, 0, ))
},
- { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) {
+ { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1, GROUP(
PS6_15_FN1, PS6_15_FN2,
PS6_14_FN1, PS6_14_FN2,
PS6_13_FN1, PS6_13_FN2,
@@ -2071,9 +2071,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PS6_3_FN1, PS6_3_FN2,
PS6_2_FN1, PS6_2_FN2,
PS6_1_FN1, PS6_1_FN2,
- PS6_0_FN1, PS6_0_FN2, }
+ PS6_0_FN1, PS6_0_FN2, ))
},
- { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1) {
+ { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1, GROUP(
PS7_15_FN1, PS7_15_FN2,
PS7_14_FN1, PS7_14_FN2,
PS7_13_FN1, PS7_13_FN2,
@@ -2089,9 +2089,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- 0, 0, }
+ 0, 0, ))
},
- { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1) {
+ { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1, GROUP(
PS8_15_FN1, PS8_15_FN2,
PS8_14_FN1, PS8_14_FN2,
PS8_13_FN1, PS8_13_FN2,
@@ -2107,115 +2107,115 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- 0, 0, }
+ 0, 0, ))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADR", 0xffec0034, 8) {
+ { PINMUX_DATA_REG("PADR", 0xffec0034, 8, GROUP(
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
- PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA ))
},
- { PINMUX_DATA_REG("PBDR", 0xffec0036, 8) {
+ { PINMUX_DATA_REG("PBDR", 0xffec0036, 8, GROUP(
PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
- PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA ))
},
- { PINMUX_DATA_REG("PCDR", 0xffec0038, 8) {
+ { PINMUX_DATA_REG("PCDR", 0xffec0038, 8, GROUP(
PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
- PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA ))
},
- { PINMUX_DATA_REG("PDDR", 0xffec003a, 8) {
+ { PINMUX_DATA_REG("PDDR", 0xffec003a, 8, GROUP(
PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
- PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA ))
},
- { PINMUX_DATA_REG("PEDR", 0xffec003c, 8) {
+ { PINMUX_DATA_REG("PEDR", 0xffec003c, 8, GROUP(
PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
- PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA ))
},
- { PINMUX_DATA_REG("PFDR", 0xffec003e, 8) {
+ { PINMUX_DATA_REG("PFDR", 0xffec003e, 8, GROUP(
PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
- PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA ))
},
- { PINMUX_DATA_REG("PGDR", 0xffec0040, 8) {
+ { PINMUX_DATA_REG("PGDR", 0xffec0040, 8, GROUP(
PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
- PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA ))
},
- { PINMUX_DATA_REG("PHDR", 0xffec0042, 8) {
+ { PINMUX_DATA_REG("PHDR", 0xffec0042, 8, GROUP(
PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
- PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA ))
},
- { PINMUX_DATA_REG("PIDR", 0xffec0044, 8) {
+ { PINMUX_DATA_REG("PIDR", 0xffec0044, 8, GROUP(
PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
- PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA }
+ PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA ))
},
- { PINMUX_DATA_REG("PJDR", 0xffec0046, 8) {
+ { PINMUX_DATA_REG("PJDR", 0xffec0046, 8, GROUP(
0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
- PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA ))
},
- { PINMUX_DATA_REG("PKDR", 0xffec0048, 8) {
+ { PINMUX_DATA_REG("PKDR", 0xffec0048, 8, GROUP(
PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
- PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA ))
},
- { PINMUX_DATA_REG("PLDR", 0xffec004a, 8) {
+ { PINMUX_DATA_REG("PLDR", 0xffec004a, 8, GROUP(
0, PTL6_DATA, PTL5_DATA, PTL4_DATA,
- PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA ))
},
- { PINMUX_DATA_REG("PMDR", 0xffec004c, 8) {
+ { PINMUX_DATA_REG("PMDR", 0xffec004c, 8, GROUP(
PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
- PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA ))
},
- { PINMUX_DATA_REG("PNDR", 0xffec004e, 8) {
+ { PINMUX_DATA_REG("PNDR", 0xffec004e, 8, GROUP(
0, PTN6_DATA, PTN5_DATA, PTN4_DATA,
- PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA ))
},
- { PINMUX_DATA_REG("PODR", 0xffec0050, 8) {
+ { PINMUX_DATA_REG("PODR", 0xffec0050, 8, GROUP(
PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
- PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA }
+ PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA ))
},
- { PINMUX_DATA_REG("PPDR", 0xffec0052, 8) {
+ { PINMUX_DATA_REG("PPDR", 0xffec0052, 8, GROUP(
PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
- PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA ))
},
- { PINMUX_DATA_REG("PQDR", 0xffec0054, 8) {
+ { PINMUX_DATA_REG("PQDR", 0xffec0054, 8, GROUP(
0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
- PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA ))
},
- { PINMUX_DATA_REG("PRDR", 0xffec0056, 8) {
+ { PINMUX_DATA_REG("PRDR", 0xffec0056, 8, GROUP(
PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
- PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA ))
},
- { PINMUX_DATA_REG("PSDR", 0xffec0058, 8) {
+ { PINMUX_DATA_REG("PSDR", 0xffec0058, 8, GROUP(
PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
- PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA ))
},
- { PINMUX_DATA_REG("PTDR", 0xffec005a, 8) {
+ { PINMUX_DATA_REG("PTDR", 0xffec005a, 8, GROUP(
PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
- PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA ))
},
- { PINMUX_DATA_REG("PUDR", 0xffec005c, 8) {
+ { PINMUX_DATA_REG("PUDR", 0xffec005c, 8, GROUP(
PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
- PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA ))
},
- { PINMUX_DATA_REG("PVDR", 0xffec005e, 8) {
+ { PINMUX_DATA_REG("PVDR", 0xffec005e, 8, GROUP(
PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
- PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA ))
},
- { PINMUX_DATA_REG("PWDR", 0xffec0060, 8) {
+ { PINMUX_DATA_REG("PWDR", 0xffec0060, 8, GROUP(
PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
- PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA ))
},
- { PINMUX_DATA_REG("PXDR", 0xffec0062, 8) {
+ { PINMUX_DATA_REG("PXDR", 0xffec0062, 8, GROUP(
PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
- PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA ))
},
- { PINMUX_DATA_REG("PYDR", 0xffec0064, 8) {
+ { PINMUX_DATA_REG("PYDR", 0xffec0064, 8, GROUP(
PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
- PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA ))
},
- { PINMUX_DATA_REG("PZDR", 0xffec0066, 8) {
+ { PINMUX_DATA_REG("PZDR", 0xffec0066, 8, GROUP(
PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
- PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7785.c b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
index 193179f7fdd9..c4c1e288c53e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7785.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
@@ -985,7 +985,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PACR", 0xffe70000, 16, 2) {
+ { PINMUX_CFG_REG("PACR", 0xffe70000, 16, 2, GROUP(
PA7_FN, PA7_OUT, PA7_IN, 0,
PA6_FN, PA6_OUT, PA6_IN, 0,
PA5_FN, PA5_OUT, PA5_IN, 0,
@@ -993,9 +993,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PA3_FN, PA3_OUT, PA3_IN, 0,
PA2_FN, PA2_OUT, PA2_IN, 0,
PA1_FN, PA1_OUT, PA1_IN, 0,
- PA0_FN, PA0_OUT, PA0_IN, 0 }
+ PA0_FN, PA0_OUT, PA0_IN, 0 ))
},
- { PINMUX_CFG_REG("PBCR", 0xffe70002, 16, 2) {
+ { PINMUX_CFG_REG("PBCR", 0xffe70002, 16, 2, GROUP(
PB7_FN, PB7_OUT, PB7_IN, 0,
PB6_FN, PB6_OUT, PB6_IN, 0,
PB5_FN, PB5_OUT, PB5_IN, 0,
@@ -1003,9 +1003,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB3_FN, PB3_OUT, PB3_IN, 0,
PB2_FN, PB2_OUT, PB2_IN, 0,
PB1_FN, PB1_OUT, PB1_IN, 0,
- PB0_FN, PB0_OUT, PB0_IN, 0 }
+ PB0_FN, PB0_OUT, PB0_IN, 0 ))
},
- { PINMUX_CFG_REG("PCCR", 0xffe70004, 16, 2) {
+ { PINMUX_CFG_REG("PCCR", 0xffe70004, 16, 2, GROUP(
PC7_FN, PC7_OUT, PC7_IN, 0,
PC6_FN, PC6_OUT, PC6_IN, 0,
PC5_FN, PC5_OUT, PC5_IN, 0,
@@ -1013,9 +1013,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC3_FN, PC3_OUT, PC3_IN, 0,
PC2_FN, PC2_OUT, PC2_IN, 0,
PC1_FN, PC1_OUT, PC1_IN, 0,
- PC0_FN, PC0_OUT, PC0_IN, 0 }
+ PC0_FN, PC0_OUT, PC0_IN, 0 ))
},
- { PINMUX_CFG_REG("PDCR", 0xffe70006, 16, 2) {
+ { PINMUX_CFG_REG("PDCR", 0xffe70006, 16, 2, GROUP(
PD7_FN, PD7_OUT, PD7_IN, 0,
PD6_FN, PD6_OUT, PD6_IN, 0,
PD5_FN, PD5_OUT, PD5_IN, 0,
@@ -1023,9 +1023,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD3_FN, PD3_OUT, PD3_IN, 0,
PD2_FN, PD2_OUT, PD2_IN, 0,
PD1_FN, PD1_OUT, PD1_IN, 0,
- PD0_FN, PD0_OUT, PD0_IN, 0 }
+ PD0_FN, PD0_OUT, PD0_IN, 0 ))
},
- { PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2) {
+ { PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
PE5_FN, PE5_OUT, PE5_IN, 0,
@@ -1033,9 +1033,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE3_FN, PE3_OUT, PE3_IN, 0,
PE2_FN, PE2_OUT, PE2_IN, 0,
PE1_FN, PE1_OUT, PE1_IN, 0,
- PE0_FN, PE0_OUT, PE0_IN, 0 }
+ PE0_FN, PE0_OUT, PE0_IN, 0 ))
},
- { PINMUX_CFG_REG("PFCR", 0xffe7000a, 16, 2) {
+ { PINMUX_CFG_REG("PFCR", 0xffe7000a, 16, 2, GROUP(
PF7_FN, PF7_OUT, PF7_IN, 0,
PF6_FN, PF6_OUT, PF6_IN, 0,
PF5_FN, PF5_OUT, PF5_IN, 0,
@@ -1043,9 +1043,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF3_FN, PF3_OUT, PF3_IN, 0,
PF2_FN, PF2_OUT, PF2_IN, 0,
PF1_FN, PF1_OUT, PF1_IN, 0,
- PF0_FN, PF0_OUT, PF0_IN, 0 }
+ PF0_FN, PF0_OUT, PF0_IN, 0 ))
},
- { PINMUX_CFG_REG("PGCR", 0xffe7000c, 16, 2) {
+ { PINMUX_CFG_REG("PGCR", 0xffe7000c, 16, 2, GROUP(
PG7_FN, PG7_OUT, PG7_IN, 0,
PG6_FN, PG6_OUT, PG6_IN, 0,
PG5_FN, PG5_OUT, PG5_IN, 0,
@@ -1053,9 +1053,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG3_FN, PG3_OUT, PG3_IN, 0,
PG2_FN, PG2_OUT, PG2_IN, 0,
PG1_FN, PG1_OUT, PG1_IN, 0,
- PG0_FN, PG0_OUT, PG0_IN, 0 }
+ PG0_FN, PG0_OUT, PG0_IN, 0 ))
},
- { PINMUX_CFG_REG("PHCR", 0xffe7000e, 16, 2) {
+ { PINMUX_CFG_REG("PHCR", 0xffe7000e, 16, 2, GROUP(
PH7_FN, PH7_OUT, PH7_IN, 0,
PH6_FN, PH6_OUT, PH6_IN, 0,
PH5_FN, PH5_OUT, PH5_IN, 0,
@@ -1063,9 +1063,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PH3_FN, PH3_OUT, PH3_IN, 0,
PH2_FN, PH2_OUT, PH2_IN, 0,
PH1_FN, PH1_OUT, PH1_IN, 0,
- PH0_FN, PH0_OUT, PH0_IN, 0 }
+ PH0_FN, PH0_OUT, PH0_IN, 0 ))
},
- { PINMUX_CFG_REG("PJCR", 0xffe70010, 16, 2) {
+ { PINMUX_CFG_REG("PJCR", 0xffe70010, 16, 2, GROUP(
PJ7_FN, PJ7_OUT, PJ7_IN, 0,
PJ6_FN, PJ6_OUT, PJ6_IN, 0,
PJ5_FN, PJ5_OUT, PJ5_IN, 0,
@@ -1073,9 +1073,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ3_FN, PJ3_OUT, PJ3_IN, 0,
PJ2_FN, PJ2_OUT, PJ2_IN, 0,
PJ1_FN, PJ1_OUT, PJ1_IN, 0,
- PJ0_FN, PJ0_OUT, PJ0_IN, 0 }
+ PJ0_FN, PJ0_OUT, PJ0_IN, 0 ))
},
- { PINMUX_CFG_REG("PKCR", 0xffe70012, 16, 2) {
+ { PINMUX_CFG_REG("PKCR", 0xffe70012, 16, 2, GROUP(
PK7_FN, PK7_OUT, PK7_IN, 0,
PK6_FN, PK6_OUT, PK6_IN, 0,
PK5_FN, PK5_OUT, PK5_IN, 0,
@@ -1083,9 +1083,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PK3_FN, PK3_OUT, PK3_IN, 0,
PK2_FN, PK2_OUT, PK2_IN, 0,
PK1_FN, PK1_OUT, PK1_IN, 0,
- PK0_FN, PK0_OUT, PK0_IN, 0 }
+ PK0_FN, PK0_OUT, PK0_IN, 0 ))
},
- { PINMUX_CFG_REG("PLCR", 0xffe70014, 16, 2) {
+ { PINMUX_CFG_REG("PLCR", 0xffe70014, 16, 2, GROUP(
PL7_FN, PL7_OUT, PL7_IN, 0,
PL6_FN, PL6_OUT, PL6_IN, 0,
PL5_FN, PL5_OUT, PL5_IN, 0,
@@ -1093,9 +1093,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PL3_FN, PL3_OUT, PL3_IN, 0,
PL2_FN, PL2_OUT, PL2_IN, 0,
PL1_FN, PL1_OUT, PL1_IN, 0,
- PL0_FN, PL0_OUT, PL0_IN, 0 }
+ PL0_FN, PL0_OUT, PL0_IN, 0 ))
},
- { PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2) {
+ { PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1103,9 +1103,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
PM1_FN, PM1_OUT, PM1_IN, 0,
- PM0_FN, PM0_OUT, PM0_IN, 0 }
+ PM0_FN, PM0_OUT, PM0_IN, 0 ))
},
- { PINMUX_CFG_REG("PNCR", 0xffe70018, 16, 2) {
+ { PINMUX_CFG_REG("PNCR", 0xffe70018, 16, 2, GROUP(
PN7_FN, PN7_OUT, PN7_IN, 0,
PN6_FN, PN6_OUT, PN6_IN, 0,
PN5_FN, PN5_OUT, PN5_IN, 0,
@@ -1113,9 +1113,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PN3_FN, PN3_OUT, PN3_IN, 0,
PN2_FN, PN2_OUT, PN2_IN, 0,
PN1_FN, PN1_OUT, PN1_IN, 0,
- PN0_FN, PN0_OUT, PN0_IN, 0 }
+ PN0_FN, PN0_OUT, PN0_IN, 0 ))
},
- { PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2) {
+ { PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
PP5_FN, PP5_OUT, PP5_IN, 0,
@@ -1123,9 +1123,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PP3_FN, PP3_OUT, PP3_IN, 0,
PP2_FN, PP2_OUT, PP2_IN, 0,
PP1_FN, PP1_OUT, PP1_IN, 0,
- PP0_FN, PP0_OUT, PP0_IN, 0 }
+ PP0_FN, PP0_OUT, PP0_IN, 0 ))
},
- { PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2) {
+ { PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1133,9 +1133,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PQ3_FN, PQ3_OUT, PQ3_IN, 0,
PQ2_FN, PQ2_OUT, PQ2_IN, 0,
PQ1_FN, PQ1_OUT, PQ1_IN, 0,
- PQ0_FN, PQ0_OUT, PQ0_IN, 0 }
+ PQ0_FN, PQ0_OUT, PQ0_IN, 0 ))
},
- { PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2) {
+ { PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2, GROUP(
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1143,9 +1143,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PR3_FN, PR3_OUT, PR3_IN, 0,
PR2_FN, PR2_OUT, PR2_IN, 0,
PR1_FN, PR1_OUT, PR1_IN, 0,
- PR0_FN, PR0_OUT, PR0_IN, 0 }
+ PR0_FN, PR0_OUT, PR0_IN, 0 ))
},
- { PINMUX_CFG_REG("P1MSELR", 0xffe70080, 16, 1) {
+ { PINMUX_CFG_REG("P1MSELR", 0xffe70080, 16, 1, GROUP(
P1MSEL15_0, P1MSEL15_1,
P1MSEL14_0, P1MSEL14_1,
P1MSEL13_0, P1MSEL13_1,
@@ -1161,9 +1161,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
P1MSEL3_0, P1MSEL3_1,
P1MSEL2_0, P1MSEL2_1,
P1MSEL1_0, P1MSEL1_1,
- P1MSEL0_0, P1MSEL0_1 }
+ P1MSEL0_0, P1MSEL0_1 ))
},
- { PINMUX_CFG_REG("P2MSELR", 0xffe70082, 16, 1) {
+ { PINMUX_CFG_REG("P2MSELR", 0xffe70082, 16, 1, GROUP(
0, 0,
0, 0,
0, 0,
@@ -1179,75 +1179,75 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
P2MSEL2_0, P2MSEL2_1,
P2MSEL1_0, P2MSEL1_1,
- P2MSEL0_0, P2MSEL0_1 }
+ P2MSEL0_0, P2MSEL0_1 ))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADR", 0xffe70020, 8) {
+ { PINMUX_DATA_REG("PADR", 0xffe70020, 8, GROUP(
PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
- PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA ))
},
- { PINMUX_DATA_REG("PBDR", 0xffe70022, 8) {
+ { PINMUX_DATA_REG("PBDR", 0xffe70022, 8, GROUP(
PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
- PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA ))
},
- { PINMUX_DATA_REG("PCDR", 0xffe70024, 8) {
+ { PINMUX_DATA_REG("PCDR", 0xffe70024, 8, GROUP(
PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
- PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA ))
},
- { PINMUX_DATA_REG("PDDR", 0xffe70026, 8) {
+ { PINMUX_DATA_REG("PDDR", 0xffe70026, 8, GROUP(
PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
- PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA ))
},
- { PINMUX_DATA_REG("PEDR", 0xffe70028, 8) {
+ { PINMUX_DATA_REG("PEDR", 0xffe70028, 8, GROUP(
0, 0, PE5_DATA, PE4_DATA,
- PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA ))
},
- { PINMUX_DATA_REG("PFDR", 0xffe7002a, 8) {
+ { PINMUX_DATA_REG("PFDR", 0xffe7002a, 8, GROUP(
PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
- PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA ))
},
- { PINMUX_DATA_REG("PGDR", 0xffe7002c, 8) {
+ { PINMUX_DATA_REG("PGDR", 0xffe7002c, 8, GROUP(
PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
- PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA ))
},
- { PINMUX_DATA_REG("PHDR", 0xffe7002e, 8) {
+ { PINMUX_DATA_REG("PHDR", 0xffe7002e, 8, GROUP(
PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
- PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA }
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA ))
},
- { PINMUX_DATA_REG("PJDR", 0xffe70030, 8) {
+ { PINMUX_DATA_REG("PJDR", 0xffe70030, 8, GROUP(
PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
- PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA ))
},
- { PINMUX_DATA_REG("PKDR", 0xffe70032, 8) {
+ { PINMUX_DATA_REG("PKDR", 0xffe70032, 8, GROUP(
PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
- PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA }
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA ))
},
- { PINMUX_DATA_REG("PLDR", 0xffe70034, 8) {
+ { PINMUX_DATA_REG("PLDR", 0xffe70034, 8, GROUP(
PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA,
- PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA }
+ PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA ))
},
- { PINMUX_DATA_REG("PMDR", 0xffe70036, 8) {
+ { PINMUX_DATA_REG("PMDR", 0xffe70036, 8, GROUP(
0, 0, 0, 0,
- 0, 0, PM1_DATA, PM0_DATA }
+ 0, 0, PM1_DATA, PM0_DATA ))
},
- { PINMUX_DATA_REG("PNDR", 0xffe70038, 8) {
+ { PINMUX_DATA_REG("PNDR", 0xffe70038, 8, GROUP(
PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA,
- PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA }
+ PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA ))
},
- { PINMUX_DATA_REG("PPDR", 0xffe7003a, 8) {
+ { PINMUX_DATA_REG("PPDR", 0xffe7003a, 8, GROUP(
0, 0, PP5_DATA, PP4_DATA,
- PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA }
+ PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA ))
},
- { PINMUX_DATA_REG("PQDR", 0xffe7003c, 8) {
+ { PINMUX_DATA_REG("PQDR", 0xffe7003c, 8, GROUP(
0, 0, 0, PQ4_DATA,
- PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA }
+ PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA ))
},
- { PINMUX_DATA_REG("PRDR", 0xffe7003e, 8) {
+ { PINMUX_DATA_REG("PRDR", 0xffe7003e, 8, GROUP(
0, 0, 0, 0,
- PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA }
+ PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7786.c b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
index cc2657c4f85c..b8a098cd7721 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7786.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
@@ -627,7 +627,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PACR", 0xffcc0000, 16, 2) {
+ { PINMUX_CFG_REG("PACR", 0xffcc0000, 16, 2, GROUP(
PA7_FN, PA7_OUT, PA7_IN, 0,
PA6_FN, PA6_OUT, PA6_IN, 0,
PA5_FN, PA5_OUT, PA5_IN, 0,
@@ -635,9 +635,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PA3_FN, PA3_OUT, PA3_IN, 0,
PA2_FN, PA2_OUT, PA2_IN, 0,
PA1_FN, PA1_OUT, PA1_IN, 0,
- PA0_FN, PA0_OUT, PA0_IN, 0 }
+ PA0_FN, PA0_OUT, PA0_IN, 0 ))
},
- { PINMUX_CFG_REG("PBCR", 0xffcc0002, 16, 2) {
+ { PINMUX_CFG_REG("PBCR", 0xffcc0002, 16, 2, GROUP(
PB7_FN, PB7_OUT, PB7_IN, 0,
PB6_FN, PB6_OUT, PB6_IN, 0,
PB5_FN, PB5_OUT, PB5_IN, 0,
@@ -645,9 +645,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB3_FN, PB3_OUT, PB3_IN, 0,
PB2_FN, PB2_OUT, PB2_IN, 0,
PB1_FN, PB1_OUT, PB1_IN, 0,
- PB0_FN, PB0_OUT, PB0_IN, 0 }
+ PB0_FN, PB0_OUT, PB0_IN, 0 ))
},
- { PINMUX_CFG_REG("PCCR", 0xffcc0004, 16, 2) {
+ { PINMUX_CFG_REG("PCCR", 0xffcc0004, 16, 2, GROUP(
PC7_FN, PC7_OUT, PC7_IN, 0,
PC6_FN, PC6_OUT, PC6_IN, 0,
PC5_FN, PC5_OUT, PC5_IN, 0,
@@ -655,9 +655,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC3_FN, PC3_OUT, PC3_IN, 0,
PC2_FN, PC2_OUT, PC2_IN, 0,
PC1_FN, PC1_OUT, PC1_IN, 0,
- PC0_FN, PC0_OUT, PC0_IN, 0 }
+ PC0_FN, PC0_OUT, PC0_IN, 0 ))
},
- { PINMUX_CFG_REG("PDCR", 0xffcc0006, 16, 2) {
+ { PINMUX_CFG_REG("PDCR", 0xffcc0006, 16, 2, GROUP(
PD7_FN, PD7_OUT, PD7_IN, 0,
PD6_FN, PD6_OUT, PD6_IN, 0,
PD5_FN, PD5_OUT, PD5_IN, 0,
@@ -665,9 +665,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD3_FN, PD3_OUT, PD3_IN, 0,
PD2_FN, PD2_OUT, PD2_IN, 0,
PD1_FN, PD1_OUT, PD1_IN, 0,
- PD0_FN, PD0_OUT, PD0_IN, 0 }
+ PD0_FN, PD0_OUT, PD0_IN, 0 ))
},
- { PINMUX_CFG_REG("PECR", 0xffcc0008, 16, 2) {
+ { PINMUX_CFG_REG("PECR", 0xffcc0008, 16, 2, GROUP(
PE7_FN, PE7_OUT, PE7_IN, 0,
PE6_FN, PE6_OUT, PE6_IN, 0,
0, 0, 0, 0,
@@ -675,9 +675,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, 0, }
+ 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG("PFCR", 0xffcc000a, 16, 2) {
+ { PINMUX_CFG_REG("PFCR", 0xffcc000a, 16, 2, GROUP(
PF7_FN, PF7_OUT, PF7_IN, 0,
PF6_FN, PF6_OUT, PF6_IN, 0,
PF5_FN, PF5_OUT, PF5_IN, 0,
@@ -685,9 +685,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF3_FN, PF3_OUT, PF3_IN, 0,
PF2_FN, PF2_OUT, PF2_IN, 0,
PF1_FN, PF1_OUT, PF1_IN, 0,
- PF0_FN, PF0_OUT, PF0_IN, 0 }
+ PF0_FN, PF0_OUT, PF0_IN, 0 ))
},
- { PINMUX_CFG_REG("PGCR", 0xffcc000c, 16, 2) {
+ { PINMUX_CFG_REG("PGCR", 0xffcc000c, 16, 2, GROUP(
PG7_FN, PG7_OUT, PG7_IN, 0,
PG6_FN, PG6_OUT, PG6_IN, 0,
PG5_FN, PG5_OUT, PG5_IN, 0,
@@ -695,9 +695,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, 0, }
+ 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG("PHCR", 0xffcc000e, 16, 2) {
+ { PINMUX_CFG_REG("PHCR", 0xffcc000e, 16, 2, GROUP(
PH7_FN, PH7_OUT, PH7_IN, 0,
PH6_FN, PH6_OUT, PH6_IN, 0,
PH5_FN, PH5_OUT, PH5_IN, 0,
@@ -705,9 +705,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PH3_FN, PH3_OUT, PH3_IN, 0,
PH2_FN, PH2_OUT, PH2_IN, 0,
PH1_FN, PH1_OUT, PH1_IN, 0,
- PH0_FN, PH0_OUT, PH0_IN, 0 }
+ PH0_FN, PH0_OUT, PH0_IN, 0 ))
},
- { PINMUX_CFG_REG("PJCR", 0xffcc0010, 16, 2) {
+ { PINMUX_CFG_REG("PJCR", 0xffcc0010, 16, 2, GROUP(
PJ7_FN, PJ7_OUT, PJ7_IN, 0,
PJ6_FN, PJ6_OUT, PJ6_IN, 0,
PJ5_FN, PJ5_OUT, PJ5_IN, 0,
@@ -715,9 +715,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PJ3_FN, PJ3_OUT, PJ3_IN, 0,
PJ2_FN, PJ2_OUT, PJ2_IN, 0,
PJ1_FN, PJ1_OUT, PJ1_IN, 0,
- 0, 0, 0, 0, }
+ 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG("P1MSELR", 0xffcc0080, 16, 1) {
+ { PINMUX_CFG_REG("P1MSELR", 0xffcc0080, 16, 1, GROUP(
0, 0,
P1MSEL14_0, P1MSEL14_1,
P1MSEL13_0, P1MSEL13_1,
@@ -733,9 +733,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
P1MSEL3_0, P1MSEL3_1,
P1MSEL2_0, P1MSEL2_1,
P1MSEL1_0, P1MSEL1_1,
- P1MSEL0_0, P1MSEL0_1 }
+ P1MSEL0_0, P1MSEL0_1 ))
},
- { PINMUX_CFG_REG("P2MSELR", 0xffcc0082, 16, 1) {
+ { PINMUX_CFG_REG("P2MSELR", 0xffcc0082, 16, 1, GROUP(
P2MSEL15_0, P2MSEL15_1,
P2MSEL14_0, P2MSEL14_1,
P2MSEL13_0, P2MSEL13_1,
@@ -751,47 +751,47 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
P2MSEL3_0, P2MSEL3_1,
P2MSEL2_0, P2MSEL2_1,
P2MSEL1_0, P2MSEL1_1,
- P2MSEL0_0, P2MSEL0_1 }
+ P2MSEL0_0, P2MSEL0_1 ))
},
{}
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PADR", 0xffcc0020, 8) {
+ { PINMUX_DATA_REG("PADR", 0xffcc0020, 8, GROUP(
PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
- PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA ))
},
- { PINMUX_DATA_REG("PBDR", 0xffcc0022, 8) {
+ { PINMUX_DATA_REG("PBDR", 0xffcc0022, 8, GROUP(
PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
- PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA ))
},
- { PINMUX_DATA_REG("PCDR", 0xffcc0024, 8) {
+ { PINMUX_DATA_REG("PCDR", 0xffcc0024, 8, GROUP(
PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
- PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA ))
},
- { PINMUX_DATA_REG("PDDR", 0xffcc0026, 8) {
+ { PINMUX_DATA_REG("PDDR", 0xffcc0026, 8, GROUP(
PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
- PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA ))
},
- { PINMUX_DATA_REG("PEDR", 0xffcc0028, 8) {
+ { PINMUX_DATA_REG("PEDR", 0xffcc0028, 8, GROUP(
PE7_DATA, PE6_DATA,
- 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_DATA_REG("PFDR", 0xffcc002a, 8) {
+ { PINMUX_DATA_REG("PFDR", 0xffcc002a, 8, GROUP(
PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
- PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA ))
},
- { PINMUX_DATA_REG("PGDR", 0xffcc002c, 8) {
+ { PINMUX_DATA_REG("PGDR", 0xffcc002c, 8, GROUP(
PG7_DATA, PG6_DATA, PG5_DATA, 0,
- 0, 0, 0, 0 }
+ 0, 0, 0, 0 ))
},
- { PINMUX_DATA_REG("PHDR", 0xffcc002e, 8) {
+ { PINMUX_DATA_REG("PHDR", 0xffcc002e, 8, GROUP(
PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
- PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA }
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA ))
},
- { PINMUX_DATA_REG("PJDR", 0xffcc0030, 8) {
+ { PINMUX_DATA_REG("PJDR", 0xffcc0030, 8, GROUP(
PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
- PJ3_DATA, PJ2_DATA, PJ1_DATA, 0 }
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, 0 ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-shx3.c b/drivers/pinctrl/sh-pfc/pfc-shx3.c
index 905ae00cc6f1..22e812850964 100644
--- a/drivers/pinctrl/sh-pfc/pfc-shx3.c
+++ b/drivers/pinctrl/sh-pfc/pfc-shx3.c
@@ -431,7 +431,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2) {
+ { PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2, GROUP(
PA7_FN, PA7_OUT, PA7_IN, 0,
PA6_FN, PA6_OUT, PA6_IN, 0,
PA5_FN, PA5_OUT, PA5_IN, 0,
@@ -447,9 +447,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB3_FN, PB3_OUT, PB3_IN, 0,
PB2_FN, PB2_OUT, PB2_IN, 0,
PB1_FN, PB1_OUT, PB1_IN, 0,
- PB0_FN, PB0_OUT, PB0_IN, 0, },
+ PB0_FN, PB0_OUT, PB0_IN, 0, ))
},
- { PINMUX_CFG_REG("PCDCR", 0xffc70004, 32, 2) {
+ { PINMUX_CFG_REG("PCDCR", 0xffc70004, 32, 2, GROUP(
PC7_FN, PC7_OUT, PC7_IN, 0,
PC6_FN, PC6_OUT, PC6_IN, 0,
PC5_FN, PC5_OUT, PC5_IN, 0,
@@ -465,9 +465,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD3_FN, PD3_OUT, PD3_IN, 0,
PD2_FN, PD2_OUT, PD2_IN, 0,
PD1_FN, PD1_OUT, PD1_IN, 0,
- PD0_FN, PD0_OUT, PD0_IN, 0, },
+ PD0_FN, PD0_OUT, PD0_IN, 0, ))
},
- { PINMUX_CFG_REG("PEFCR", 0xffc70008, 32, 2) {
+ { PINMUX_CFG_REG("PEFCR", 0xffc70008, 32, 2, GROUP(
PE7_FN, PE7_OUT, PE7_IN, 0,
PE6_FN, PE6_OUT, PE6_IN, 0,
PE5_FN, PE5_OUT, PE5_IN, 0,
@@ -483,9 +483,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF3_FN, PF3_OUT, PF3_IN, 0,
PF2_FN, PF2_OUT, PF2_IN, 0,
PF1_FN, PF1_OUT, PF1_IN, 0,
- PF0_FN, PF0_OUT, PF0_IN, 0, },
+ PF0_FN, PF0_OUT, PF0_IN, 0, ))
},
- { PINMUX_CFG_REG("PGHCR", 0xffc7000c, 32, 2) {
+ { PINMUX_CFG_REG("PGHCR", 0xffc7000c, 32, 2, GROUP(
PG7_FN, PG7_OUT, PG7_IN, 0,
PG6_FN, PG6_OUT, PG6_IN, 0,
PG5_FN, PG5_OUT, PG5_IN, 0,
@@ -501,43 +501,43 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PH3_FN, PH3_OUT, PH3_IN, 0,
PH2_FN, PH2_OUT, PH2_IN, 0,
PH1_FN, PH1_OUT, PH1_IN, 0,
- PH0_FN, PH0_OUT, PH0_IN, 0, },
+ PH0_FN, PH0_OUT, PH0_IN, 0, ))
},
{ },
};
static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PABDR", 0xffc70010, 32) {
+ { PINMUX_DATA_REG("PABDR", 0xffc70010, 32, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
0, 0, 0, 0, 0, 0, 0, 0,
PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
- PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, },
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, ))
},
- { PINMUX_DATA_REG("PCDDR", 0xffc70014, 32) {
+ { PINMUX_DATA_REG("PCDDR", 0xffc70014, 32, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
0, 0, 0, 0, 0, 0, 0, 0,
PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
- PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, },
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, ))
},
- { PINMUX_DATA_REG("PEFDR", 0xffc70018, 32) {
+ { PINMUX_DATA_REG("PEFDR", 0xffc70018, 32, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
0, 0, 0, 0, 0, 0, 0, 0,
PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
- PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, },
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, ))
},
- { PINMUX_DATA_REG("PGHDR", 0xffc7001c, 32) {
+ { PINMUX_DATA_REG("PGHDR", 0xffc7001c, 32, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, PH5_DATA, PH4_DATA,
- PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, },
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, ))
},
{ },
};
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 56016cb76769..7db5819eea7e 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -111,40 +111,54 @@ struct pinmux_func {
struct pinmux_cfg_reg {
u32 reg;
u8 reg_width, field_width;
+#ifdef DEBUG
+ u16 nr_enum_ids; /* for variable width regs only */
+#define SET_NR_ENUM_IDS(n) .nr_enum_ids = n,
+#else
+#define SET_NR_ENUM_IDS(n)
+#endif
const u16 *enum_ids;
const u8 *var_field_width;
};
+#define GROUP(...) __VA_ARGS__
+
/*
* Describe a config register consisting of several fields of the same width
* - name: Register name (unused, for documentation purposes only)
* - r: Physical register address
* - r_width: Width of the register (in bits)
* - f_width: Width of the fixed-width register fields (in bits)
- * This macro must be followed by initialization data: For each register field
- * (from left to right, i.e. MSB to LSB), 2^f_width enum IDs must be specified,
- * one for each possible combination of the register field bit values.
+ * - ids: For each register field (from left to right, i.e. MSB to LSB),
+ * 2^f_width enum IDs must be specified, one for each possible
+ * combination of the register field bit values, all wrapped using
+ * the GROUP() macro.
*/
-#define PINMUX_CFG_REG(name, r, r_width, f_width) \
+#define PINMUX_CFG_REG(name, r, r_width, f_width, ids) \
.reg = r, .reg_width = r_width, \
- .field_width = f_width + BUILD_BUG_ON_ZERO(r_width % f_width), \
- .enum_ids = (const u16 [(r_width / f_width) * (1 << f_width)])
+ .field_width = f_width + BUILD_BUG_ON_ZERO(r_width % f_width) + \
+ BUILD_BUG_ON_ZERO(sizeof((const u16 []) { ids }) / sizeof(u16) != \
+ (r_width / f_width) * (1 << f_width)), \
+ .enum_ids = (const u16 [(r_width / f_width) * (1 << f_width)]) \
+ { ids }
/*
* Describe a config register consisting of several fields of different widths
* - name: Register name (unused, for documentation purposes only)
* - r: Physical register address
* - r_width: Width of the register (in bits)
- * - var_fw0, var_fwn...: List of widths of the register fields (in bits),
- * From left to right (i.e. MSB to LSB)
- * This macro must be followed by initialization data: For each register field
- * (from left to right, i.e. MSB to LSB), 2^var_fwi enum IDs must be specified,
- * one for each possible combination of the register field bit values.
+ * - f_widths: List of widths of the register fields (in bits), from left
+ * to right (i.e. MSB to LSB), wrapped using the GROUP() macro.
+ * - ids: For each register field (from left to right, i.e. MSB to LSB),
+ * 2^f_widths[i] enum IDs must be specified, one for each possible
+ * combination of the register field bit values, all wrapped using
+ * the GROUP() macro.
*/
-#define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \
- .reg = r, .reg_width = r_width, \
- .var_field_width = (const u8 []) { var_fw0, var_fwn, 0 }, \
- .enum_ids = (const u16 [])
+#define PINMUX_CFG_REG_VAR(name, r, r_width, f_widths, ids) \
+ .reg = r, .reg_width = r_width, \
+ .var_field_width = (const u8 []) { f_widths, 0 }, \
+ SET_NR_ENUM_IDS(sizeof((const u16 []) { ids }) / sizeof(u16)) \
+ .enum_ids = (const u16 []) { ids }
struct pinmux_drive_reg_field {
u16 pin;
@@ -187,12 +201,14 @@ struct pinmux_data_reg {
* - name: Register name (unused, for documentation purposes only)
* - r: Physical register address
* - r_width: Width of the register (in bits)
- * This macro must be followed by initialization data: For each register bit
- * (from left to right, i.e. MSB to LSB), one enum ID must be specified.
+ * - ids: For each register bit (from left to right, i.e. MSB to LSB), one
+ * enum ID must be specified, all wrapped using the GROUP() macro.
*/
-#define PINMUX_DATA_REG(name, r, r_width) \
- .reg = r, .reg_width = r_width, \
- .enum_ids = (const u16 [r_width]) \
+#define PINMUX_DATA_REG(name, r, r_width, ids) \
+ .reg = r, .reg_width = r_width + \
+ BUILD_BUG_ON_ZERO(sizeof((const u16 []) { ids }) / sizeof(u16) != \
+ r_width), \
+ .enum_ids = (const u16 [r_width]) { ids }
struct pinmux_irq {
const short *gpios;
@@ -261,7 +277,7 @@ struct sh_pfc_soc_info {
const struct sh_pfc_function *functions;
unsigned int nr_functions;
-#ifdef CONFIG_SUPERH
+#ifdef CONFIG_PINCTRL_SH_FUNC_GPIO
const struct pinmux_func *func_gpios;
unsigned int nr_func_gpios;
#endif
@@ -402,8 +418,8 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
/*
* Describe a pinmux configuration in which a pin is physically multiplexed
* with other pins.
- * - ipsr: IPSR field
- * - fn: Function name, also referring to the IPSR field
+ * - ipsr: IPSR field (unused, for documentation purposes only)
+ * - fn: Function name
* - psel: Physical multiplexing selector
*/
#define PINMUX_IPSR_PHYS(ipsr, fn, psel) \
@@ -663,7 +679,9 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
*/
#define PORTCR(nr, reg) \
{ \
- PINMUX_CFG_REG_VAR("PORT" nr "CR", reg, 8, 2, 2, 1, 3) {\
+ PINMUX_CFG_REG_VAR("PORT" nr "CR", reg, 8, \
+ GROUP(2, 2, 1, 3), \
+ GROUP( \
/* PULMD[1:0], handled by .set_bias() */ \
0, 0, 0, 0, \
/* IE and OE */ \
@@ -675,7 +693,7 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT##nr##_FN2, PORT##nr##_FN3, \
PORT##nr##_FN4, PORT##nr##_FN5, \
PORT##nr##_FN6, PORT##nr##_FN7 \
- } \
+ )) \
}
/*
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 0b9ff5aa6bb5..2317ccf63321 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -95,6 +95,9 @@ struct stm32_pinctrl {
struct regmap *regmap;
struct regmap_field *irqmux[STM32_GPIO_PINS_PER_BANK];
struct hwspinlock *hwlock;
+ struct stm32_desc_pin *pins;
+ u32 npins;
+ u32 pkg;
};
static inline int stm32_gpio_pin(int gpio)
@@ -358,8 +361,8 @@ static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl,
{
int i;
- for (i = 0; i < pctl->match_data->npins; i++) {
- const struct stm32_desc_pin *pin = pctl->match_data->pins + i;
+ for (i = 0; i < pctl->npins; i++) {
+ const struct stm32_desc_pin *pin = pctl->pins + i;
const struct stm32_desc_function *func = pin->functions;
if (pin->pin.number != pin_num)
@@ -1119,23 +1122,35 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
return 0;
}
+static struct irq_domain *stm32_pctrl_get_irq_domain(struct device_node *np)
+{
+ struct device_node *parent;
+ struct irq_domain *domain;
+
+ if (!of_find_property(np, "interrupt-parent", NULL))
+ return NULL;
+
+ parent = of_irq_find_parent(np);
+ if (!parent)
+ return ERR_PTR(-ENXIO);
+
+ domain = irq_find_host(parent);
+ if (!domain)
+ /* domain not registered yet */
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return domain;
+}
+
static int stm32_pctrl_dt_setup_irq(struct platform_device *pdev,
struct stm32_pinctrl *pctl)
{
- struct device_node *np = pdev->dev.of_node, *parent;
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct regmap *rm;
int offset, ret, i;
int mask, mask_width;
- parent = of_irq_find_parent(np);
- if (!parent)
- return -ENXIO;
-
- pctl->domain = irq_find_host(parent);
- if (!pctl->domain)
- return -ENXIO;
-
pctl->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(pctl->regmap))
return PTR_ERR(pctl->regmap);
@@ -1175,7 +1190,7 @@ static int stm32_pctrl_build_state(struct platform_device *pdev)
struct stm32_pinctrl *pctl = platform_get_drvdata(pdev);
int i;
- pctl->ngroups = pctl->match_data->npins;
+ pctl->ngroups = pctl->npins;
/* Allocate groups */
pctl->groups = devm_kcalloc(&pdev->dev, pctl->ngroups,
@@ -1189,19 +1204,50 @@ static int stm32_pctrl_build_state(struct platform_device *pdev)
if (!pctl->grp_names)
return -ENOMEM;
- for (i = 0; i < pctl->match_data->npins; i++) {
- const struct stm32_desc_pin *pin = pctl->match_data->pins + i;
+ for (i = 0; i < pctl->npins; i++) {
+ const struct stm32_desc_pin *pin = pctl->pins + i;
struct stm32_pinctrl_group *group = pctl->groups + i;
group->name = pin->pin.name;
group->pin = pin->pin.number;
-
pctl->grp_names[i] = pin->pin.name;
}
return 0;
}
+static int stm32_pctrl_create_pins_tab(struct stm32_pinctrl *pctl,
+ struct stm32_desc_pin *pins)
+{
+ const struct stm32_desc_pin *p;
+ int i, nb_pins_available = 0;
+
+ for (i = 0; i < pctl->match_data->npins; i++) {
+ p = pctl->match_data->pins + i;
+ if (pctl->pkg && !(pctl->pkg & p->pkg))
+ continue;
+ pins->pin = p->pin;
+ pins->functions = p->functions;
+ pins++;
+ nb_pins_available++;
+ }
+
+ pctl->npins = nb_pins_available;
+
+ return 0;
+}
+
+static void stm32_pctl_get_package(struct device_node *np,
+ struct stm32_pinctrl *pctl)
+{
+ if (of_property_read_u32(np, "st,package", &pctl->pkg)) {
+ pctl->pkg = 0;
+ dev_warn(pctl->dev, "No package detected, use default one\n");
+ } else {
+ dev_dbg(pctl->dev, "package detected: %x\n", pctl->pkg);
+ }
+}
+
int stm32_pctl_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1230,6 +1276,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pctl);
+ /* check for IRQ controller (may require deferred probe) */
+ pctl->domain = stm32_pctrl_get_irq_domain(np);
+ if (IS_ERR(pctl->domain))
+ return PTR_ERR(pctl->domain);
+
/* hwspinlock is optional */
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
if (hwlock_id < 0) {
@@ -1241,30 +1292,43 @@ int stm32_pctl_probe(struct platform_device *pdev)
pctl->dev = dev;
pctl->match_data = match->data;
+
+ /* get package information */
+ stm32_pctl_get_package(np, pctl);
+
+ pctl->pins = devm_kcalloc(pctl->dev, pctl->match_data->npins,
+ sizeof(*pctl->pins), GFP_KERNEL);
+ if (!pctl->pins)
+ return -ENOMEM;
+
+ ret = stm32_pctrl_create_pins_tab(pctl, pctl->pins);
+ if (ret)
+ return ret;
+
ret = stm32_pctrl_build_state(pdev);
if (ret) {
dev_err(dev, "build state failed: %d\n", ret);
return -EINVAL;
}
- if (of_find_property(np, "interrupt-parent", NULL)) {
+ if (pctl->domain) {
ret = stm32_pctrl_dt_setup_irq(pdev, pctl);
if (ret)
return ret;
}
- pins = devm_kcalloc(&pdev->dev, pctl->match_data->npins, sizeof(*pins),
+ pins = devm_kcalloc(&pdev->dev, pctl->npins, sizeof(*pins),
GFP_KERNEL);
if (!pins)
return -ENOMEM;
- for (i = 0; i < pctl->match_data->npins; i++)
- pins[i] = pctl->match_data->pins[i].pin;
+ for (i = 0; i < pctl->npins; i++)
+ pins[i] = pctl->pins[i].pin;
pctl->pctl_desc.name = dev_name(&pdev->dev);
pctl->pctl_desc.owner = THIS_MODULE;
pctl->pctl_desc.pins = pins;
- pctl->pctl_desc.npins = pctl->match_data->npins;
+ pctl->pctl_desc.npins = pctl->npins;
pctl->pctl_desc.confops = &stm32_pconf_ops;
pctl->pctl_desc.pctlops = &stm32_pctrl_ops;
pctl->pctl_desc.pmxops = &stm32_pmx_ops;
@@ -1305,4 +1369,3 @@ int stm32_pctl_probe(struct platform_device *pdev)
return 0;
}
-
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
index 473a6238a27b..de5e7012ca03 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.h
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -18,6 +18,12 @@
#define STM32_PIN_AF(x) ((x) + 1)
#define STM32_PIN_ANALOG (STM32_PIN_AF(15) + 1)
+/* package information */
+#define STM32MP_PKG_AA BIT(0)
+#define STM32MP_PKG_AB BIT(1)
+#define STM32MP_PKG_AC BIT(2)
+#define STM32MP_PKG_AD BIT(3)
+
struct stm32_desc_function {
const char *name;
const unsigned char num;
@@ -26,6 +32,7 @@ struct stm32_desc_function {
struct stm32_desc_pin {
struct pinctrl_pin_desc pin;
const struct stm32_desc_function *functions;
+ const unsigned int pkg;
};
#define STM32_PIN(_pin, ...) \
@@ -35,6 +42,13 @@ struct stm32_desc_pin {
__VA_ARGS__, { } }, \
}
+#define STM32_PIN_PKG(_pin, _pkg, ...) \
+ { \
+ .pin = _pin, \
+ .pkg = _pkg, \
+ .functions = (struct stm32_desc_function[]){ \
+ __VA_ARGS__, { } }, \
+ }
#define STM32_FUNCTION(_num, _name) \
{ \
.num = _num, \
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp157.c b/drivers/pinctrl/stm32/pinctrl-stm32mp157.c
index 7c7d6284b23c..320544f69e57 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32mp157.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32mp157.c
@@ -10,77 +10,82 @@
#include "pinctrl-stm32.h"
static const struct stm32_desc_pin stm32mp157_pins[] = {
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(0, "PA0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA0"),
STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
STM32_FUNCTION(3, "TIM5_CH1"),
STM32_FUNCTION(4, "TIM8_ETR"),
STM32_FUNCTION(5, "TIM15_BKIN"),
- STM32_FUNCTION(8, "USART2_CTS_NSS USART_BOOT2_CTS_NSS"),
+ STM32_FUNCTION(8, "USART2_CTS USART2_NSS"),
STM32_FUNCTION(9, "UART4_TX"),
STM32_FUNCTION(10, "SDMMC2_CMD"),
STM32_FUNCTION(11, "SAI2_SD_B"),
- STM32_FUNCTION(12, "ETH_GMII_CRS ETH_MII_CRS"),
+ STM32_FUNCTION(12, "ETH1_GMII_CRS ETH1_MII_CRS"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(1, "PA1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA1"),
STM32_FUNCTION(1, "ETH_CLK"),
STM32_FUNCTION(2, "TIM2_CH2"),
STM32_FUNCTION(3, "TIM5_CH2"),
STM32_FUNCTION(4, "LPTIM3_OUT"),
STM32_FUNCTION(5, "TIM15_CH1N"),
- STM32_FUNCTION(8, "USART2_RTS USART_BOOT2_RTS"),
+ STM32_FUNCTION(8, "USART2_RTS USART2_DE"),
STM32_FUNCTION(9, "UART4_RX"),
- STM32_FUNCTION(10, "QUADSPI_BK1_IO3 QUADSPI_BOOTBK1_IO3"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
STM32_FUNCTION(11, "SAI2_MCLK_B"),
- STM32_FUNCTION(12, "ETH_GMII_RX_CLK ETH_MII_RX_CLK ETH_RGMII_RX_CLK ETH_RMII_REF_CLK"),
+ STM32_FUNCTION(12, "ETH1_GMII_RX_CLK ETH1_MII_RX_CLK ETH1_RGMII_RX_CLK ETH1_RMII_REF_CLK"),
STM32_FUNCTION(15, "LCD_R2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(2, "PA2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA2"),
STM32_FUNCTION(2, "TIM2_CH3"),
STM32_FUNCTION(3, "TIM5_CH3"),
STM32_FUNCTION(4, "LPTIM4_OUT"),
STM32_FUNCTION(5, "TIM15_CH1"),
- STM32_FUNCTION(8, "USART2_TX USART_BOOT2_TX"),
+ STM32_FUNCTION(8, "USART2_TX"),
STM32_FUNCTION(9, "SAI2_SCK_B"),
- STM32_FUNCTION(11, "SDMMC2_D0DIR SDMMC_BOOT2_D0DIR"),
- STM32_FUNCTION(12, "ETH_MDIO"),
+ STM32_FUNCTION(11, "SDMMC2_D0DIR"),
+ STM32_FUNCTION(12, "ETH1_MDIO"),
STM32_FUNCTION(13, "MDIOS_MDIO"),
STM32_FUNCTION(15, "LCD_R1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(3, "PA3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA3"),
STM32_FUNCTION(2, "TIM2_CH4"),
STM32_FUNCTION(3, "TIM5_CH4"),
STM32_FUNCTION(4, "LPTIM5_OUT"),
STM32_FUNCTION(5, "TIM15_CH2"),
- STM32_FUNCTION(8, "USART2_RX USART_BOOT2_RX"),
+ STM32_FUNCTION(8, "USART2_RX"),
STM32_FUNCTION(10, "LCD_B2"),
- STM32_FUNCTION(12, "ETH_GMII_COL ETH_MII_COL"),
+ STM32_FUNCTION(12, "ETH1_GMII_COL ETH1_MII_COL"),
STM32_FUNCTION(15, "LCD_B5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(4, "PA4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA4"),
STM32_FUNCTION(1, "HDP0"),
STM32_FUNCTION(3, "TIM5_ETR"),
STM32_FUNCTION(5, "SAI4_D2"),
STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
- STM32_FUNCTION(8, "USART2_CK USART_BOOT2_CK"),
+ STM32_FUNCTION(8, "USART2_CK"),
STM32_FUNCTION(9, "SPI6_NSS"),
STM32_FUNCTION(13, "SAI4_FS_A"),
STM32_FUNCTION(14, "DCMI_HSYNC"),
@@ -88,8 +93,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(5, "PA5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA5"),
STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
STM32_FUNCTION(4, "TIM8_CH1N"),
@@ -101,8 +107,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(6, "PA6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA6"),
STM32_FUNCTION(2, "TIM1_BKIN"),
STM32_FUNCTION(3, "TIM3_CH1"),
@@ -118,8 +125,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(7, "PA7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA7"),
STM32_FUNCTION(2, "TIM1_CH1N"),
STM32_FUNCTION(3, "TIM3_CH2"),
@@ -129,13 +137,14 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(9, "SPI6_MOSI"),
STM32_FUNCTION(10, "TIM14_CH1"),
STM32_FUNCTION(11, "QUADSPI_CLK"),
- STM32_FUNCTION(12, "ETH_GMII_RX_DV ETH_MII_RX_DV ETH_RGMII_RX_CTL ETH_RMII_CRS_DV"),
+ STM32_FUNCTION(12, "ETH1_GMII_RX_DV ETH1_MII_RX_DV ETH1_RGMII_RX_CTL ETH1_RMII_CRS_DV"),
STM32_FUNCTION(13, "SAI4_SD_A"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(8, "PA8"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA8"),
STM32_FUNCTION(1, "MCO1"),
STM32_FUNCTION(2, "TIM1_CH1"),
@@ -143,37 +152,37 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(5, "I2C3_SCL"),
STM32_FUNCTION(6, "SPI3_MOSI I2S3_SDO"),
STM32_FUNCTION(8, "USART1_CK"),
- STM32_FUNCTION(9, "SDMMC2_CKIN SDMMC_BOOT2_CKIN"),
- STM32_FUNCTION(10, "SDMMC2_D4 SDMMC_BOOT2_D4"),
- STM32_FUNCTION(11, "USBO_SOF"),
+ STM32_FUNCTION(9, "SDMMC2_CKIN"),
+ STM32_FUNCTION(10, "SDMMC2_D4"),
+ STM32_FUNCTION(11, "OTG_FS_SOF OTG_HS_SOF"),
STM32_FUNCTION(13, "SAI4_SD_B"),
STM32_FUNCTION(14, "UART7_RX"),
STM32_FUNCTION(15, "LCD_R6"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(9, "PA9"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA9"),
STM32_FUNCTION(2, "TIM1_CH2"),
STM32_FUNCTION(5, "I2C3_SMBA"),
STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
STM32_FUNCTION(8, "USART1_TX"),
- STM32_FUNCTION(9, "SDMMC2_CDIR SDMMC_BOOT2_CDIR"),
- STM32_FUNCTION(10, "CAN1_RXFD"),
- STM32_FUNCTION(11, "SDMMC2_D5 SDMMC_BOOT2_D5"),
+ STM32_FUNCTION(9, "SDMMC2_CDIR"),
+ STM32_FUNCTION(11, "SDMMC2_D5"),
STM32_FUNCTION(14, "DCMI_D0"),
STM32_FUNCTION(15, "LCD_R5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(10, "PA10"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA10"),
STM32_FUNCTION(2, "TIM1_CH3"),
STM32_FUNCTION(6, "SPI3_NSS I2S3_WS"),
STM32_FUNCTION(8, "USART1_RX"),
- STM32_FUNCTION(10, "CAN1_TXFD"),
STM32_FUNCTION(12, "MDIOS_MDIO"),
STM32_FUNCTION(13, "SAI4_FS_B"),
STM32_FUNCTION(14, "DCMI_D1"),
@@ -181,37 +190,39 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(11, "PA11"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA11"),
STM32_FUNCTION(2, "TIM1_CH4"),
STM32_FUNCTION(3, "I2C6_SCL"),
STM32_FUNCTION(5, "I2C5_SCL"),
STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
STM32_FUNCTION(7, "UART4_RX"),
- STM32_FUNCTION(8, "USART1_CTS_NSS"),
- STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(8, "USART1_CTS USART1_NSS"),
+ STM32_FUNCTION(10, "FDCAN1_RX"),
STM32_FUNCTION(15, "LCD_R4"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(12, "PA12"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA12"),
STM32_FUNCTION(2, "TIM1_ETR"),
STM32_FUNCTION(3, "I2C6_SDA"),
STM32_FUNCTION(5, "I2C5_SDA"),
- STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
STM32_FUNCTION(7, "UART4_TX"),
- STM32_FUNCTION(8, "USART1_RTS"),
+ STM32_FUNCTION(8, "USART1_RTS USART1_DE"),
STM32_FUNCTION(9, "SAI2_FS_B"),
- STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(10, "FDCAN1_TX"),
STM32_FUNCTION(15, "LCD_R5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(13, "PA13"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA13"),
STM32_FUNCTION(1, "DBTRGO"),
STM32_FUNCTION(2, "DBTRGI"),
@@ -220,8 +231,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(14, "PA14"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA14"),
STM32_FUNCTION(1, "DBTRGO"),
STM32_FUNCTION(2, "DBTRGI"),
@@ -229,73 +241,79 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(15, "PA15"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOA15"),
STM32_FUNCTION(1, "DBTRGI"),
STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
STM32_FUNCTION(3, "SAI4_D2"),
STM32_FUNCTION(4, "SDMMC1_CDIR"),
- STM32_FUNCTION(5, "HDMI_CEC"),
+ STM32_FUNCTION(5, "CEC"),
STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
STM32_FUNCTION(8, "SPI6_NSS"),
- STM32_FUNCTION(9, "UART4_RTS UART_BOOT4_RTS"),
- STM32_FUNCTION(10, "SDMMC2_D5 SDMMC_BOOT2_D5"),
- STM32_FUNCTION(11, "SDMMC2_CDIR SDMMC_BOOT2_CDIR"),
- STM32_FUNCTION(12, "SDMMC1_D5 SDMMC_BOOT1_D5"),
+ STM32_FUNCTION(9, "UART4_RTS UART4_DE"),
+ STM32_FUNCTION(10, "SDMMC2_D5"),
+ STM32_FUNCTION(11, "SDMMC2_CDIR"),
+ STM32_FUNCTION(12, "SDMMC1_D5"),
STM32_FUNCTION(13, "SAI4_FS_A"),
STM32_FUNCTION(14, "UART7_TX"),
+ STM32_FUNCTION(15, "LCD_R1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(16, "PB0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB0"),
STM32_FUNCTION(2, "TIM1_CH2N"),
STM32_FUNCTION(3, "TIM3_CH3"),
STM32_FUNCTION(4, "TIM8_CH2N"),
- STM32_FUNCTION(7, "DFSDM_CKOUT"),
- STM32_FUNCTION(9, "UART4_CTS UART_BOOT4_CTS"),
+ STM32_FUNCTION(7, "DFSDM1_CKOUT"),
+ STM32_FUNCTION(9, "UART4_CTS"),
STM32_FUNCTION(10, "LCD_R3"),
- STM32_FUNCTION(12, "ETH_GMII_RXD2 ETH_MII_RXD2 ETH_RGMII_RXD2"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD2 ETH1_MII_RXD2 ETH1_RGMII_RXD2"),
STM32_FUNCTION(13, "MDIOS_MDIO"),
STM32_FUNCTION(15, "LCD_G1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(17, "PB1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB1"),
STM32_FUNCTION(2, "TIM1_CH3N"),
STM32_FUNCTION(3, "TIM3_CH4"),
STM32_FUNCTION(4, "TIM8_CH3N"),
- STM32_FUNCTION(7, "DFSDM_DATA1"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN1"),
STM32_FUNCTION(10, "LCD_R6"),
- STM32_FUNCTION(12, "ETH_GMII_RXD3 ETH_MII_RXD3 ETH_RGMII_RXD3"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD3 ETH1_MII_RXD3 ETH1_RGMII_RXD3"),
STM32_FUNCTION(13, "MDIOS_MDC"),
STM32_FUNCTION(15, "LCD_G0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(18, "PB2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB2"),
STM32_FUNCTION(1, "TRACED4"),
STM32_FUNCTION(2, "RTC_OUT2"),
STM32_FUNCTION(3, "SAI1_D1"),
- STM32_FUNCTION(4, "DFSDM_CK1"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN1"),
STM32_FUNCTION(5, "USART1_RX"),
STM32_FUNCTION(6, "I2S_CKIN"),
STM32_FUNCTION(7, "SAI1_SD_A"),
STM32_FUNCTION(8, "SPI3_MOSI I2S3_SDO"),
- STM32_FUNCTION(9, "UART4_RX UART_BOOT4_RX"),
+ STM32_FUNCTION(9, "UART4_RX"),
STM32_FUNCTION(10, "QUADSPI_CLK"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(19, "PB3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB3"),
STM32_FUNCTION(1, "TRACED9"),
STM32_FUNCTION(2, "TIM2_CH2"),
@@ -303,14 +321,15 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"),
STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
STM32_FUNCTION(9, "SPI6_SCK"),
- STM32_FUNCTION(10, "SDMMC2_D2 SDMMC_BOOT2_D2"),
+ STM32_FUNCTION(10, "SDMMC2_D2"),
STM32_FUNCTION(13, "SAI4_MCLK_A"),
STM32_FUNCTION(14, "UART7_RX"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(20, "PB4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB4"),
STM32_FUNCTION(1, "TRACED8"),
STM32_FUNCTION(2, "TIM16_BKIN"),
@@ -320,14 +339,15 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(7, "SPI3_MISO I2S3_SDI"),
STM32_FUNCTION(8, "SPI2_NSS I2S2_WS"),
STM32_FUNCTION(9, "SPI6_MISO"),
- STM32_FUNCTION(10, "SDMMC2_D3 SDMMC_BOOT2_D3"),
+ STM32_FUNCTION(10, "SDMMC2_D3"),
STM32_FUNCTION(13, "SAI4_SCK_A"),
STM32_FUNCTION(14, "UART7_TX"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(21, "PB5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB5"),
STM32_FUNCTION(1, "ETH_CLK"),
STM32_FUNCTION(2, "TIM17_BKIN"),
@@ -338,166 +358,175 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(7, "I2C4_SMBA"),
STM32_FUNCTION(8, "SPI3_MOSI I2S3_SDO"),
STM32_FUNCTION(9, "SPI6_MOSI"),
- STM32_FUNCTION(10, "CAN2_RX"),
+ STM32_FUNCTION(10, "FDCAN2_RX"),
STM32_FUNCTION(11, "SAI4_SD_A"),
- STM32_FUNCTION(12, "ETH_PPS_OUT"),
- STM32_FUNCTION(13, "UART5_RX UART_BOOT5_RX"),
+ STM32_FUNCTION(12, "ETH1_PPS_OUT"),
+ STM32_FUNCTION(13, "UART5_RX"),
STM32_FUNCTION(14, "DCMI_D10"),
STM32_FUNCTION(15, "LCD_G7"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(22, "PB6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB6"),
STM32_FUNCTION(2, "TIM16_CH1N"),
STM32_FUNCTION(3, "TIM4_CH1"),
STM32_FUNCTION(5, "I2C1_SCL"),
- STM32_FUNCTION(6, "HDMI_CEC"),
+ STM32_FUNCTION(6, "CEC"),
STM32_FUNCTION(7, "I2C4_SCL"),
STM32_FUNCTION(8, "USART1_TX"),
- STM32_FUNCTION(10, "CAN2_TX"),
- STM32_FUNCTION(11, "QUADSPI_BK1_NCS QUADSPI_BOOTBK1_NCS"),
- STM32_FUNCTION(12, "DFSDM_DATA5"),
+ STM32_FUNCTION(10, "FDCAN2_TX"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(12, "DFSDM1_DATIN5"),
STM32_FUNCTION(13, "UART5_TX"),
STM32_FUNCTION(14, "DCMI_D5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(23, "PB7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB7"),
STM32_FUNCTION(2, "TIM17_CH1N"),
STM32_FUNCTION(3, "TIM4_CH2"),
STM32_FUNCTION(5, "I2C1_SDA"),
STM32_FUNCTION(7, "I2C4_SDA"),
STM32_FUNCTION(8, "USART1_RX"),
- STM32_FUNCTION(10, "CAN2_TXFD"),
- STM32_FUNCTION(11, "SDMMC2_D1 SDMMC_BOOT2_D1"),
- STM32_FUNCTION(12, "DFSDM_CK5"),
+ STM32_FUNCTION(11, "SDMMC2_D1"),
+ STM32_FUNCTION(12, "DFSDM1_CKIN5"),
STM32_FUNCTION(13, "FMC_NL"),
STM32_FUNCTION(14, "DCMI_VSYNC"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(24, "PB8"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB8"),
STM32_FUNCTION(1, "HDP6"),
STM32_FUNCTION(2, "TIM16_CH1"),
STM32_FUNCTION(3, "TIM4_CH3"),
- STM32_FUNCTION(4, "DFSDM_CK7"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN7"),
STM32_FUNCTION(5, "I2C1_SCL"),
- STM32_FUNCTION(6, "SDMMC1_CKIN SDMMC_BOOT1_CKIN"),
+ STM32_FUNCTION(6, "SDMMC1_CKIN"),
STM32_FUNCTION(7, "I2C4_SCL"),
- STM32_FUNCTION(8, "SDMMC2_CKIN SDMMC_BOOT2_CKIN"),
+ STM32_FUNCTION(8, "SDMMC2_CKIN"),
STM32_FUNCTION(9, "UART4_RX"),
- STM32_FUNCTION(10, "CAN1_RX"),
- STM32_FUNCTION(11, "SDMMC2_D4 SDMMC_BOOT2_D4"),
- STM32_FUNCTION(12, "ETH_GMII_TXD3 ETH_MII_TXD3 ETH_RGMII_TXD3"),
- STM32_FUNCTION(13, "SDMMC1_D4 SDMMC_BOOT1_D4"),
+ STM32_FUNCTION(10, "FDCAN1_RX"),
+ STM32_FUNCTION(11, "SDMMC2_D4"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD3 ETH1_MII_TXD3 ETH1_RGMII_TXD3"),
+ STM32_FUNCTION(13, "SDMMC1_D4"),
STM32_FUNCTION(14, "DCMI_D6"),
STM32_FUNCTION(15, "LCD_B6"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(25, "PB9"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB9"),
STM32_FUNCTION(1, "HDP7"),
STM32_FUNCTION(2, "TIM17_CH1"),
STM32_FUNCTION(3, "TIM4_CH4"),
- STM32_FUNCTION(4, "DFSDM_DATA7"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN7"),
STM32_FUNCTION(5, "I2C1_SDA"),
STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
STM32_FUNCTION(7, "I2C4_SDA"),
- STM32_FUNCTION(8, "SDMMC2_CDIR SDMMC_BOOT2_CDIR"),
+ STM32_FUNCTION(8, "SDMMC2_CDIR"),
STM32_FUNCTION(9, "UART4_TX"),
- STM32_FUNCTION(10, "CAN1_TX"),
- STM32_FUNCTION(11, "SDMMC2_D5 SDMMC_BOOT2_D5"),
- STM32_FUNCTION(12, "SDMMC1_CDIR SDMMC_BOOT1_CDIR"),
- STM32_FUNCTION(13, "SDMMC1_D5 SDMMC_BOOT1_D5"),
+ STM32_FUNCTION(10, "FDCAN1_TX"),
+ STM32_FUNCTION(11, "SDMMC2_D5"),
+ STM32_FUNCTION(12, "SDMMC1_CDIR"),
+ STM32_FUNCTION(13, "SDMMC1_D5"),
STM32_FUNCTION(14, "DCMI_D7"),
STM32_FUNCTION(15, "LCD_B7"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(26, "PB10"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB10"),
STM32_FUNCTION(2, "TIM2_CH3"),
STM32_FUNCTION(4, "LPTIM2_IN1"),
STM32_FUNCTION(5, "I2C2_SCL"),
STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
- STM32_FUNCTION(7, "DFSDM_DATA7"),
- STM32_FUNCTION(8, "USART3_TX USART_BOOT3_TX"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN7"),
+ STM32_FUNCTION(8, "USART3_TX"),
STM32_FUNCTION(10, "QUADSPI_BK1_NCS"),
- STM32_FUNCTION(12, "ETH_GMII_RX_ER ETH_MII_RX_ER"),
+ STM32_FUNCTION(12, "ETH1_GMII_RX_ER ETH1_MII_RX_ER"),
STM32_FUNCTION(15, "LCD_G4"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(27, "PB11"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB11"),
STM32_FUNCTION(2, "TIM2_CH4"),
STM32_FUNCTION(4, "LPTIM2_ETR"),
STM32_FUNCTION(5, "I2C2_SDA"),
- STM32_FUNCTION(7, "DFSDM_CK7"),
+ STM32_FUNCTION(7, "DFSDM1_CKIN7"),
STM32_FUNCTION(8, "USART3_RX"),
- STM32_FUNCTION(12, "ETH_GMII_TX_EN ETH_MII_TX_EN ETH_RGMII_TX_CTL ETH_RMII_TX_EN"),
+ STM32_FUNCTION(12, "ETH1_GMII_TX_EN ETH1_MII_TX_EN ETH1_RGMII_TX_CTL ETH1_RMII_TX_EN"),
STM32_FUNCTION(14, "DSI_TE"),
STM32_FUNCTION(15, "LCD_G5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(28, "PB12"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB12"),
STM32_FUNCTION(2, "TIM1_BKIN"),
STM32_FUNCTION(3, "I2C6_SMBA"),
STM32_FUNCTION(5, "I2C2_SMBA"),
STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
- STM32_FUNCTION(7, "DFSDM_DATA1"),
- STM32_FUNCTION(8, "USART3_CK USART_BOOT3_CK"),
- STM32_FUNCTION(9, "USART3_RX USART_BOOT3_RX"),
- STM32_FUNCTION(10, "CAN2_RX"),
- STM32_FUNCTION(12, "ETH_GMII_TXD0 ETH_MII_TXD0 ETH_RGMII_TXD0 ETH_RMII_TXD0"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN1"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(9, "USART3_RX"),
+ STM32_FUNCTION(10, "FDCAN2_RX"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD0 ETH1_MII_TXD0 ETH1_RGMII_TXD0 ETH1_RMII_TXD0"),
STM32_FUNCTION(15, "UART5_RX"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(29, "PB13"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB13"),
STM32_FUNCTION(2, "TIM1_CH1N"),
- STM32_FUNCTION(4, "DFSDM_CKOUT"),
+ STM32_FUNCTION(4, "DFSDM1_CKOUT"),
STM32_FUNCTION(5, "LPTIM2_OUT"),
STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
- STM32_FUNCTION(7, "DFSDM_CK1"),
- STM32_FUNCTION(8, "USART3_CTS_NSS USART_BOOT3_CTS_NSS"),
- STM32_FUNCTION(10, "CAN2_TX"),
- STM32_FUNCTION(12, "ETH_GMII_TXD1 ETH_MII_TXD1 ETH_RGMII_TXD1 ETH_RMII_TXD1"),
- STM32_FUNCTION(15, "UART5_TX UART_BOOT5_TX"),
+ STM32_FUNCTION(7, "DFSDM1_CKIN1"),
+ STM32_FUNCTION(8, "USART3_CTS USART3_NSS"),
+ STM32_FUNCTION(10, "FDCAN2_TX"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD1 ETH1_MII_TXD1 ETH1_RGMII_TXD1 ETH1_RMII_TXD1"),
+ STM32_FUNCTION(15, "UART5_TX"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(30, "PB14"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB14"),
STM32_FUNCTION(2, "TIM1_CH2N"),
STM32_FUNCTION(3, "TIM12_CH1"),
STM32_FUNCTION(4, "TIM8_CH2N"),
STM32_FUNCTION(5, "USART1_TX"),
STM32_FUNCTION(6, "SPI2_MISO I2S2_SDI"),
- STM32_FUNCTION(7, "DFSDM_DATA2"),
- STM32_FUNCTION(8, "USART3_RTS USART_BOOT3_RTS"),
- STM32_FUNCTION(10, "SDMMC2_D0 SDMMC_BOOT2_D0"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN2"),
+ STM32_FUNCTION(8, "USART3_RTS USART3_DE"),
+ STM32_FUNCTION(10, "SDMMC2_D0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(31, "PB15"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOB15"),
STM32_FUNCTION(1, "RTC_REFIN"),
STM32_FUNCTION(2, "TIM1_CH3N"),
@@ -505,523 +534,557 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(4, "TIM8_CH3N"),
STM32_FUNCTION(5, "USART1_RX"),
STM32_FUNCTION(6, "SPI2_MOSI I2S2_SDO"),
- STM32_FUNCTION(7, "DFSDM_CK2"),
- STM32_FUNCTION(10, "SDMMC2_D1 SDMMC_BOOT2_D1"),
+ STM32_FUNCTION(7, "DFSDM1_CKIN2"),
+ STM32_FUNCTION(10, "SDMMC2_D1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(32, "PC0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC0"),
- STM32_FUNCTION(4, "DFSDM_CK0"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN0"),
STM32_FUNCTION(5, "LPTIM2_IN2"),
- STM32_FUNCTION(7, "DFSDM_DATA4"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN4"),
STM32_FUNCTION(9, "SAI2_FS_B"),
- STM32_FUNCTION(11, "QUADSPI_BK2_NCS QUADSPI_BOOTBK2_NCS"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_NCS"),
STM32_FUNCTION(15, "LCD_R5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(33, "PC1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC1"),
STM32_FUNCTION(1, "TRACED0"),
STM32_FUNCTION(3, "SAI1_D1"),
- STM32_FUNCTION(4, "DFSDM_DATA0"),
- STM32_FUNCTION(5, "DFSDM_CK4"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN0"),
+ STM32_FUNCTION(5, "DFSDM1_CKIN4"),
STM32_FUNCTION(6, "SPI2_MOSI I2S2_SDO"),
STM32_FUNCTION(7, "SAI1_SD_A"),
STM32_FUNCTION(10, "SDMMC2_CK"),
- STM32_FUNCTION(12, "ETH_MDC"),
+ STM32_FUNCTION(12, "ETH1_MDC"),
STM32_FUNCTION(13, "MDIOS_MDC"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(34, "PC2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC2"),
- STM32_FUNCTION(4, "DFSDM_CK1"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN1"),
STM32_FUNCTION(6, "SPI2_MISO I2S2_SDI"),
- STM32_FUNCTION(7, "DFSDM_CKOUT"),
- STM32_FUNCTION(12, "ETH_GMII_TXD2 ETH_MII_TXD2 ETH_RGMII_TXD2"),
+ STM32_FUNCTION(7, "DFSDM1_CKOUT"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD2 ETH1_MII_TXD2 ETH1_RGMII_TXD2"),
+ STM32_FUNCTION(14, "DCMI_PIXCLK"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(35, "PC3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC3"),
STM32_FUNCTION(1, "TRACECLK"),
- STM32_FUNCTION(4, "DFSDM_DATA1"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN1"),
STM32_FUNCTION(6, "SPI2_MOSI I2S2_SDO"),
- STM32_FUNCTION(12, "ETH_GMII_TX_CLK ETH_MII_TX_CLK"),
+ STM32_FUNCTION(12, "ETH1_GMII_TX_CLK ETH1_MII_TX_CLK"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(36, "PC4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC4"),
- STM32_FUNCTION(4, "DFSDM_CK2"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN2"),
STM32_FUNCTION(6, "I2S1_MCK"),
- STM32_FUNCTION(10, "SPDIF_IN2"),
- STM32_FUNCTION(12, "ETH_GMII_RXD0 ETH_MII_RXD0 ETH_RGMII_RXD0 ETH_RMII_RXD0"),
+ STM32_FUNCTION(10, "SPDIFRX_IN2"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD0 ETH1_MII_RXD0 ETH1_RGMII_RXD0 ETH1_RMII_RXD0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(37, "PC5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC5"),
STM32_FUNCTION(3, "SAI1_D3"),
- STM32_FUNCTION(4, "DFSDM_DATA2"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN2"),
STM32_FUNCTION(5, "SAI4_D4"),
STM32_FUNCTION(7, "SAI1_D4"),
- STM32_FUNCTION(10, "SPDIF_IN3"),
- STM32_FUNCTION(12, "ETH_GMII_RXD1 ETH_MII_RXD1 ETH_RGMII_RXD1 ETH_RMII_RXD1"),
+ STM32_FUNCTION(10, "SPDIFRX_IN3"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD1 ETH1_MII_RXD1 ETH1_RGMII_RXD1 ETH1_RMII_RXD1"),
STM32_FUNCTION(13, "SAI4_D3"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(38, "PC6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC6"),
STM32_FUNCTION(1, "HDP1"),
STM32_FUNCTION(3, "TIM3_CH1"),
STM32_FUNCTION(4, "TIM8_CH1"),
- STM32_FUNCTION(5, "DFSDM_CK3"),
+ STM32_FUNCTION(5, "DFSDM1_CKIN3"),
STM32_FUNCTION(6, "I2S2_MCK"),
- STM32_FUNCTION(8, "USART6_TX USART_BOOT6_TX"),
- STM32_FUNCTION(9, "SDMMC1_D0DIR SDMMC_BOOT1_D0DIR"),
- STM32_FUNCTION(10, "SDMMC2_D0DIR SDMMC_BOOT2_D0DIR"),
- STM32_FUNCTION(11, "SDMMC2_D6 SDMMC_BOOT2_D6"),
+ STM32_FUNCTION(8, "USART6_TX"),
+ STM32_FUNCTION(9, "SDMMC1_D0DIR"),
+ STM32_FUNCTION(10, "SDMMC2_D0DIR"),
+ STM32_FUNCTION(11, "SDMMC2_D6"),
STM32_FUNCTION(12, "DSI_TE"),
- STM32_FUNCTION(13, "SDMMC1_D6 SDMMC_BOOT1_D6"),
+ STM32_FUNCTION(13, "SDMMC1_D6"),
STM32_FUNCTION(14, "DCMI_D0"),
STM32_FUNCTION(15, "LCD_HSYNC"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(39, "PC7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC7"),
STM32_FUNCTION(1, "HDP4"),
STM32_FUNCTION(3, "TIM3_CH2"),
STM32_FUNCTION(4, "TIM8_CH2"),
- STM32_FUNCTION(5, "DFSDM_DATA3"),
+ STM32_FUNCTION(5, "DFSDM1_DATIN3"),
STM32_FUNCTION(7, "I2S3_MCK"),
- STM32_FUNCTION(8, "USART6_RX USART_BOOT6_RX"),
- STM32_FUNCTION(9, "SDMMC1_D123DIR SDMMC_BOOT1_D123DIR"),
- STM32_FUNCTION(10, "SDMMC2_D123DIR SDMMC_BOOT2_D123DIR"),
- STM32_FUNCTION(11, "SDMMC2_D7 SDMMC_BOOT2_D7"),
- STM32_FUNCTION(13, "SDMMC1_D7 SDMMC_BOOT1_D7"),
+ STM32_FUNCTION(8, "USART6_RX"),
+ STM32_FUNCTION(9, "SDMMC1_D123DIR"),
+ STM32_FUNCTION(10, "SDMMC2_D123DIR"),
+ STM32_FUNCTION(11, "SDMMC2_D7"),
+ STM32_FUNCTION(13, "SDMMC1_D7"),
STM32_FUNCTION(14, "DCMI_D1"),
STM32_FUNCTION(15, "LCD_G6"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(40, "PC8"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC8"),
STM32_FUNCTION(1, "TRACED0"),
STM32_FUNCTION(3, "TIM3_CH3"),
STM32_FUNCTION(4, "TIM8_CH3"),
STM32_FUNCTION(7, "UART4_TX"),
- STM32_FUNCTION(8, "USART6_CK USART_BOOT6_CK"),
- STM32_FUNCTION(9, "UART5_RTS UART_BOOT5_RTS"),
- STM32_FUNCTION(13, "SDMMC1_D0 SDMMC_BOOT1_D0"),
+ STM32_FUNCTION(8, "USART6_CK"),
+ STM32_FUNCTION(9, "UART5_RTS UART5_DE"),
+ STM32_FUNCTION(13, "SDMMC1_D0"),
STM32_FUNCTION(14, "DCMI_D2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(41, "PC9"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC9"),
STM32_FUNCTION(1, "TRACED1"),
STM32_FUNCTION(3, "TIM3_CH4"),
STM32_FUNCTION(4, "TIM8_CH4"),
STM32_FUNCTION(5, "I2C3_SDA"),
STM32_FUNCTION(6, "I2S_CKIN"),
- STM32_FUNCTION(9, "UART5_CTS UART_BOOT5_CTS"),
+ STM32_FUNCTION(9, "UART5_CTS"),
STM32_FUNCTION(10, "QUADSPI_BK1_IO0"),
- STM32_FUNCTION(13, "SDMMC1_D1 SDMMC_BOOT1_D1"),
+ STM32_FUNCTION(13, "SDMMC1_D1"),
STM32_FUNCTION(14, "DCMI_D3"),
STM32_FUNCTION(15, "LCD_B2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(42, "PC10"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC10"),
STM32_FUNCTION(1, "TRACED2"),
- STM32_FUNCTION(4, "DFSDM_CK5"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN5"),
STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
STM32_FUNCTION(8, "USART3_TX"),
STM32_FUNCTION(9, "UART4_TX"),
STM32_FUNCTION(10, "QUADSPI_BK1_IO1"),
STM32_FUNCTION(11, "SAI4_MCLK_B"),
- STM32_FUNCTION(13, "SDMMC1_D2 SDMMC_BOOT1_D2"),
+ STM32_FUNCTION(13, "SDMMC1_D2"),
STM32_FUNCTION(14, "DCMI_D8"),
STM32_FUNCTION(15, "LCD_R2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(43, "PC11"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC11"),
STM32_FUNCTION(1, "TRACED3"),
- STM32_FUNCTION(4, "DFSDM_DATA5"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN5"),
STM32_FUNCTION(7, "SPI3_MISO I2S3_SDI"),
STM32_FUNCTION(8, "USART3_RX"),
STM32_FUNCTION(9, "UART4_RX"),
- STM32_FUNCTION(10, "QUADSPI_BK2_NCS QUADSPI_BOOTBK2_NCS"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_NCS"),
STM32_FUNCTION(11, "SAI4_SCK_B"),
- STM32_FUNCTION(13, "SDMMC1_D3 SDMMC_BOOT1_D3"),
+ STM32_FUNCTION(13, "SDMMC1_D3"),
STM32_FUNCTION(14, "DCMI_D4"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(44, "PC12"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC12"),
STM32_FUNCTION(1, "TRACECLK"),
STM32_FUNCTION(2, "MCO2"),
STM32_FUNCTION(3, "SAI4_D3"),
STM32_FUNCTION(7, "SPI3_MOSI I2S3_SDO"),
- STM32_FUNCTION(8, "USART3_CK USART_BOOT3_CK"),
+ STM32_FUNCTION(8, "USART3_CK"),
STM32_FUNCTION(9, "UART5_TX"),
STM32_FUNCTION(11, "SAI4_SD_B"),
- STM32_FUNCTION(13, "SDMMC1_CK SDMMC_BOOT1_CK"),
+ STM32_FUNCTION(13, "SDMMC1_CK"),
STM32_FUNCTION(14, "DCMI_D9"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(45, "PC13"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC13"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(46, "PC14"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC14"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(47, "PC15"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOC15"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(48, "PD0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD0"),
STM32_FUNCTION(3, "I2C6_SDA"),
- STM32_FUNCTION(4, "DFSDM_CK6"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN6"),
STM32_FUNCTION(5, "I2C5_SDA"),
STM32_FUNCTION(7, "SAI3_SCK_A"),
STM32_FUNCTION(9, "UART4_RX"),
- STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(10, "FDCAN1_RX"),
STM32_FUNCTION(11, "SDMMC3_CMD"),
- STM32_FUNCTION(12, "DFSDM_DATA7"),
- STM32_FUNCTION(13, "FMC_D2"),
+ STM32_FUNCTION(12, "DFSDM1_DATIN7"),
+ STM32_FUNCTION(13, "FMC_D2 FMC_DA2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(49, "PD1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD1"),
STM32_FUNCTION(3, "I2C6_SCL"),
- STM32_FUNCTION(4, "DFSDM_DATA6"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN6"),
STM32_FUNCTION(5, "I2C5_SCL"),
STM32_FUNCTION(7, "SAI3_SD_A"),
STM32_FUNCTION(9, "UART4_TX"),
- STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(10, "FDCAN1_TX"),
STM32_FUNCTION(11, "SDMMC3_D0"),
- STM32_FUNCTION(12, "DFSDM_CK7"),
- STM32_FUNCTION(13, "FMC_D3"),
+ STM32_FUNCTION(12, "DFSDM1_CKIN7"),
+ STM32_FUNCTION(13, "FMC_D3 FMC_DA3"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(50, "PD2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD2"),
STM32_FUNCTION(3, "TIM3_ETR"),
STM32_FUNCTION(5, "I2C5_SMBA"),
STM32_FUNCTION(7, "UART4_RX"),
STM32_FUNCTION(9, "UART5_RX"),
- STM32_FUNCTION(13, "SDMMC1_CMD SDMMC_BOOT1_CMD"),
+ STM32_FUNCTION(13, "SDMMC1_CMD"),
STM32_FUNCTION(14, "DCMI_D11"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(51, "PD3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD3"),
STM32_FUNCTION(1, "HDP5"),
- STM32_FUNCTION(4, "DFSDM_CKOUT"),
+ STM32_FUNCTION(4, "DFSDM1_CKOUT"),
STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
- STM32_FUNCTION(7, "DFSDM_DATA0"),
- STM32_FUNCTION(8, "USART2_CTS_NSS USART_BOOT2_CTS_NSS"),
- STM32_FUNCTION(9, "SDMMC1_D123DIR SDMMC_BOOT1_D123DIR"),
- STM32_FUNCTION(10, "SDMMC2_D7 SDMMC_BOOT2_D7"),
- STM32_FUNCTION(11, "SDMMC2_D123DIR SDMMC_BOOT2_D123DIR"),
- STM32_FUNCTION(12, "SDMMC1_D7 SDMMC_BOOT1_D7"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN0"),
+ STM32_FUNCTION(8, "USART2_CTS USART2_NSS"),
+ STM32_FUNCTION(9, "SDMMC1_D123DIR"),
+ STM32_FUNCTION(10, "SDMMC2_D7"),
+ STM32_FUNCTION(11, "SDMMC2_D123DIR"),
+ STM32_FUNCTION(12, "SDMMC1_D7"),
STM32_FUNCTION(13, "FMC_CLK"),
STM32_FUNCTION(14, "DCMI_D5"),
STM32_FUNCTION(15, "LCD_G7"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(52, "PD4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD4"),
STM32_FUNCTION(7, "SAI3_FS_A"),
- STM32_FUNCTION(8, "USART2_RTS USART_BOOT2_RTS"),
- STM32_FUNCTION(10, "CAN1_RXFD"),
+ STM32_FUNCTION(8, "USART2_RTS USART2_DE"),
STM32_FUNCTION(11, "SDMMC3_D1"),
- STM32_FUNCTION(12, "DFSDM_CK0"),
+ STM32_FUNCTION(12, "DFSDM1_CKIN0"),
STM32_FUNCTION(13, "FMC_NOE"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(53, "PD5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD5"),
STM32_FUNCTION(8, "USART2_TX"),
- STM32_FUNCTION(10, "CAN1_TXFD"),
STM32_FUNCTION(11, "SDMMC3_D2"),
STM32_FUNCTION(13, "FMC_NWE"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(54, "PD6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD6"),
STM32_FUNCTION(2, "TIM16_CH1N"),
STM32_FUNCTION(3, "SAI1_D1"),
- STM32_FUNCTION(4, "DFSDM_CK4"),
- STM32_FUNCTION(5, "DFSDM_DATA1"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN4"),
+ STM32_FUNCTION(5, "DFSDM1_DATIN1"),
STM32_FUNCTION(6, "SPI3_MOSI I2S3_SDO"),
STM32_FUNCTION(7, "SAI1_SD_A"),
STM32_FUNCTION(8, "USART2_RX"),
- STM32_FUNCTION(10, "CAN2_RXFD"),
- STM32_FUNCTION(11, "FMC_INT"),
STM32_FUNCTION(13, "FMC_NWAIT"),
STM32_FUNCTION(14, "DCMI_D10"),
STM32_FUNCTION(15, "LCD_B2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(55, "PD7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD7"),
STM32_FUNCTION(1, "TRACED6"),
- STM32_FUNCTION(4, "DFSDM_DATA4"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN4"),
STM32_FUNCTION(5, "I2C2_SCL"),
- STM32_FUNCTION(7, "DFSDM_CK1"),
- STM32_FUNCTION(8, "USART2_CK USART_BOOT2_CK"),
- STM32_FUNCTION(10, "SPDIF_IN0"),
+ STM32_FUNCTION(7, "DFSDM1_CKIN1"),
+ STM32_FUNCTION(8, "USART2_CK"),
+ STM32_FUNCTION(10, "SPDIFRX_IN0"),
STM32_FUNCTION(11, "SDMMC3_D3"),
STM32_FUNCTION(13, "FMC_NE1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(56, "PD8"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD8"),
- STM32_FUNCTION(4, "DFSDM_CK3"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN3"),
STM32_FUNCTION(7, "SAI3_SCK_B"),
STM32_FUNCTION(8, "USART3_TX"),
- STM32_FUNCTION(10, "SPDIF_IN1"),
- STM32_FUNCTION(13, "FMC_D13"),
+ STM32_FUNCTION(10, "SPDIFRX_IN1"),
+ STM32_FUNCTION(13, "FMC_D13 FMC_DA13"),
STM32_FUNCTION(15, "LCD_B7"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(57, "PD9"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD9"),
- STM32_FUNCTION(4, "DFSDM_DATA3"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN3"),
STM32_FUNCTION(7, "SAI3_SD_B"),
STM32_FUNCTION(8, "USART3_RX"),
- STM32_FUNCTION(10, "CAN2_RXFD"),
- STM32_FUNCTION(13, "FMC_D14"),
+ STM32_FUNCTION(13, "FMC_D14 FMC_DA14"),
+ STM32_FUNCTION(14, "DCMI_HSYNC"),
STM32_FUNCTION(15, "LCD_B0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(58, "PD10"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD10"),
STM32_FUNCTION(1, "RTC_REFIN"),
STM32_FUNCTION(2, "TIM16_BKIN"),
- STM32_FUNCTION(4, "DFSDM_CKOUT"),
+ STM32_FUNCTION(4, "DFSDM1_CKOUT"),
STM32_FUNCTION(5, "I2C5_SMBA"),
STM32_FUNCTION(6, "SPI3_MISO I2S3_SDI"),
STM32_FUNCTION(7, "SAI3_FS_B"),
- STM32_FUNCTION(8, "USART3_CK USART_BOOT3_CK"),
- STM32_FUNCTION(10, "CAN2_TXFD"),
- STM32_FUNCTION(13, "FMC_D15"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(13, "FMC_D15 FMC_DA15"),
STM32_FUNCTION(15, "LCD_B3"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(59, "PD11"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD11"),
STM32_FUNCTION(4, "LPTIM2_IN2"),
STM32_FUNCTION(5, "I2C4_SMBA"),
STM32_FUNCTION(6, "I2C1_SMBA"),
- STM32_FUNCTION(8, "USART3_CTS_NSS USART_BOOT3_CTS_NSS"),
+ STM32_FUNCTION(8, "USART3_CTS USART3_NSS"),
STM32_FUNCTION(10, "QUADSPI_BK1_IO0"),
STM32_FUNCTION(11, "SAI2_SD_A"),
- STM32_FUNCTION(13, "FMC_A16 FMC_CLE"),
+ STM32_FUNCTION(13, "FMC_CLE FMC_A16"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(60, "PD12"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD12"),
STM32_FUNCTION(2, "LPTIM1_IN1"),
STM32_FUNCTION(3, "TIM4_CH1"),
STM32_FUNCTION(4, "LPTIM2_IN1"),
STM32_FUNCTION(5, "I2C4_SCL"),
STM32_FUNCTION(6, "I2C1_SCL"),
- STM32_FUNCTION(8, "USART3_RTS USART_BOOT3_RTS"),
+ STM32_FUNCTION(8, "USART3_RTS USART3_DE"),
STM32_FUNCTION(10, "QUADSPI_BK1_IO1"),
STM32_FUNCTION(11, "SAI2_FS_A"),
- STM32_FUNCTION(13, "FMC_A17 FMC_ALE"),
+ STM32_FUNCTION(13, "FMC_ALE FMC_A17"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(61, "PD13"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD13"),
STM32_FUNCTION(2, "LPTIM1_OUT"),
STM32_FUNCTION(3, "TIM4_CH2"),
STM32_FUNCTION(5, "I2C4_SDA"),
STM32_FUNCTION(6, "I2C1_SDA"),
STM32_FUNCTION(7, "I2S3_MCK"),
- STM32_FUNCTION(10, "QUADSPI_BK1_IO3 QUADSPI_BOOTBK1_IO3"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
STM32_FUNCTION(11, "SAI2_SCK_A"),
STM32_FUNCTION(13, "FMC_A18"),
STM32_FUNCTION(14, "DSI_TE"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(62, "PD14"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD14"),
STM32_FUNCTION(3, "TIM4_CH3"),
STM32_FUNCTION(7, "SAI3_MCLK_B"),
- STM32_FUNCTION(9, "UART8_CTS UART_BOOT8_CTS"),
- STM32_FUNCTION(13, "FMC_D0"),
+ STM32_FUNCTION(9, "UART8_CTS"),
+ STM32_FUNCTION(13, "FMC_D0 FMC_DA0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(63, "PD15"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOD15"),
STM32_FUNCTION(3, "TIM4_CH4"),
STM32_FUNCTION(7, "SAI3_MCLK_A"),
- STM32_FUNCTION(9, "UART8_CTS UART_BOOT8_CTS"),
- STM32_FUNCTION(13, "FMC_D1"),
+ STM32_FUNCTION(9, "UART8_CTS"),
+ STM32_FUNCTION(13, "FMC_D1 FMC_DA1"),
+ STM32_FUNCTION(15, "LCD_R1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(64, "PE0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE0"),
STM32_FUNCTION(2, "LPTIM1_ETR"),
STM32_FUNCTION(3, "TIM4_ETR"),
STM32_FUNCTION(5, "LPTIM2_ETR"),
STM32_FUNCTION(6, "SPI3_SCK I2S3_CK"),
STM32_FUNCTION(7, "SAI4_MCLK_B"),
- STM32_FUNCTION(9, "UART8_RX UART_BOOT8_RX"),
- STM32_FUNCTION(10, "CAN1_RXFD"),
+ STM32_FUNCTION(9, "UART8_RX"),
STM32_FUNCTION(11, "SAI2_MCLK_A"),
STM32_FUNCTION(13, "FMC_NBL0"),
STM32_FUNCTION(14, "DCMI_D2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(65, "PE1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE1"),
STM32_FUNCTION(2, "LPTIM1_IN2"),
STM32_FUNCTION(6, "I2S2_MCK"),
STM32_FUNCTION(7, "SAI3_SD_B"),
- STM32_FUNCTION(9, "UART8_TX UART_BOOT8_TX"),
- STM32_FUNCTION(10, "CAN1_TXFD"),
+ STM32_FUNCTION(9, "UART8_TX"),
STM32_FUNCTION(13, "FMC_NBL1"),
STM32_FUNCTION(14, "DCMI_D3"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(66, "PE2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE2"),
STM32_FUNCTION(1, "TRACECLK"),
STM32_FUNCTION(3, "SAI1_CK1"),
STM32_FUNCTION(5, "I2C4_SCL"),
STM32_FUNCTION(6, "SPI4_SCK"),
STM32_FUNCTION(7, "SAI1_MCLK_A"),
- STM32_FUNCTION(10, "QUADSPI_BK1_IO2 QUADSPI_BOOTBK1_IO2"),
- STM32_FUNCTION(12, "ETH_GMII_TXD3 ETH_MII_TXD3 ETH_RGMII_TXD3"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO2"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD3 ETH1_MII_TXD3 ETH1_RGMII_TXD3"),
STM32_FUNCTION(13, "FMC_A23"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(67, "PE3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE3"),
STM32_FUNCTION(1, "TRACED0"),
STM32_FUNCTION(5, "TIM15_BKIN"),
STM32_FUNCTION(7, "SAI1_SD_B"),
- STM32_FUNCTION(10, "SDMMC2_CK SDMMC_BOOT2_CK"),
+ STM32_FUNCTION(10, "SDMMC2_CK"),
STM32_FUNCTION(13, "FMC_A19"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(68, "PE4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE4"),
STM32_FUNCTION(1, "TRACED1"),
STM32_FUNCTION(3, "SAI1_D2"),
- STM32_FUNCTION(4, "DFSDM_DATA3"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN3"),
STM32_FUNCTION(5, "TIM15_CH1N"),
STM32_FUNCTION(6, "SPI4_NSS"),
STM32_FUNCTION(7, "SAI1_FS_A"),
- STM32_FUNCTION(8, "SDMMC2_CKIN SDMMC_BOOT2_CKIN"),
- STM32_FUNCTION(9, "SDMMC1_CKIN SDMMC_BOOT1_CKIN"),
- STM32_FUNCTION(10, "SDMMC2_D4 SDMMC_BOOT2_D4"),
- STM32_FUNCTION(12, "SDMMC1_D4 SDMMC_BOOT1_D4"),
+ STM32_FUNCTION(8, "SDMMC2_CKIN"),
+ STM32_FUNCTION(9, "SDMMC1_CKIN"),
+ STM32_FUNCTION(10, "SDMMC2_D4"),
+ STM32_FUNCTION(12, "SDMMC1_D4"),
STM32_FUNCTION(13, "FMC_A20"),
STM32_FUNCTION(14, "DCMI_D4"),
STM32_FUNCTION(15, "LCD_B0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(69, "PE5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE5"),
STM32_FUNCTION(1, "TRACED3"),
STM32_FUNCTION(3, "SAI1_CK2"),
- STM32_FUNCTION(4, "DFSDM_CK3"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN3"),
STM32_FUNCTION(5, "TIM15_CH1"),
STM32_FUNCTION(6, "SPI4_MISO"),
STM32_FUNCTION(7, "SAI1_SCK_A"),
- STM32_FUNCTION(8, "SDMMC2_D0DIR SDMMC_BOOT2_D0DIR"),
- STM32_FUNCTION(9, "SDMMC1_D0DIR SDMMC_BOOT1_D0DIR"),
- STM32_FUNCTION(10, "SDMMC2_D6 SDMMC_BOOT2_D6"),
- STM32_FUNCTION(12, "SDMMC1_D6 SDMMC_BOOT1_D6"),
+ STM32_FUNCTION(8, "SDMMC2_D0DIR"),
+ STM32_FUNCTION(9, "SDMMC1_D0DIR"),
+ STM32_FUNCTION(10, "SDMMC2_D6"),
+ STM32_FUNCTION(12, "SDMMC1_D6"),
STM32_FUNCTION(13, "FMC_A21"),
STM32_FUNCTION(14, "DCMI_D6"),
STM32_FUNCTION(15, "LCD_G0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(70, "PE6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE6"),
STM32_FUNCTION(1, "TRACED2"),
STM32_FUNCTION(2, "TIM1_BKIN2"),
@@ -1030,7 +1093,7 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(6, "SPI4_MOSI"),
STM32_FUNCTION(7, "SAI1_SD_A"),
STM32_FUNCTION(8, "SDMMC2_D0"),
- STM32_FUNCTION(9, "SDMMC1_D2 SDMMC_BOOT1_D2"),
+ STM32_FUNCTION(9, "SDMMC1_D2"),
STM32_FUNCTION(11, "SAI2_MCLK_B"),
STM32_FUNCTION(13, "FMC_A22"),
STM32_FUNCTION(14, "DCMI_D7"),
@@ -1038,119 +1101,132 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(71, "PE7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE7"),
STM32_FUNCTION(2, "TIM1_ETR"),
STM32_FUNCTION(3, "TIM3_ETR"),
- STM32_FUNCTION(4, "DFSDM_DATA2"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN2"),
STM32_FUNCTION(8, "UART7_RX"),
- STM32_FUNCTION(11, "QUADSPI_BK2_IO0 QUADSPI_BOOTBK2_IO0"),
- STM32_FUNCTION(13, "FMC_D4"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO0"),
+ STM32_FUNCTION(13, "FMC_D4 FMC_DA4"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(72, "PE8"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE8"),
STM32_FUNCTION(2, "TIM1_CH1N"),
- STM32_FUNCTION(4, "DFSDM_CK2"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN2"),
STM32_FUNCTION(8, "UART7_TX"),
- STM32_FUNCTION(11, "QUADSPI_BK2_IO1 QUADSPI_BOOTBK2_IO1"),
- STM32_FUNCTION(13, "FMC_D5"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO1"),
+ STM32_FUNCTION(13, "FMC_D5 FMC_DA5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(73, "PE9"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE9"),
STM32_FUNCTION(2, "TIM1_CH1"),
- STM32_FUNCTION(4, "DFSDM_CKOUT"),
- STM32_FUNCTION(8, "UART7_RTS UART_BOOT7_RTS"),
- STM32_FUNCTION(11, "QUADSPI_BK2_IO2 QUADSPI_BOOTBK2_IO2"),
- STM32_FUNCTION(13, "FMC_D6"),
+ STM32_FUNCTION(4, "DFSDM1_CKOUT"),
+ STM32_FUNCTION(8, "UART7_RTS UART7_DE"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO2"),
+ STM32_FUNCTION(13, "FMC_D6 FMC_DA6"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(74, "PE10"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE10"),
STM32_FUNCTION(2, "TIM1_CH2N"),
- STM32_FUNCTION(4, "DFSDM_DATA4"),
- STM32_FUNCTION(8, "UART7_CTS UART_BOOT7_CTS"),
- STM32_FUNCTION(11, "QUADSPI_BK2_IO3 QUADSPI_BOOTBK2_IO3"),
- STM32_FUNCTION(13, "FMC_D7"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN4"),
+ STM32_FUNCTION(8, "UART7_CTS"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO3"),
+ STM32_FUNCTION(13, "FMC_D7 FMC_DA7"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(75, "PE11"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE11"),
STM32_FUNCTION(2, "TIM1_CH2"),
- STM32_FUNCTION(4, "DFSDM_CK4"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN4"),
STM32_FUNCTION(6, "SPI4_NSS"),
- STM32_FUNCTION(8, "USART6_CK USART_BOOT6_CK"),
+ STM32_FUNCTION(8, "USART6_CK"),
STM32_FUNCTION(11, "SAI2_SD_B"),
- STM32_FUNCTION(13, "FMC_D8"),
+ STM32_FUNCTION(13, "FMC_D8 FMC_DA8"),
+ STM32_FUNCTION(14, "DCMI_D4"),
STM32_FUNCTION(15, "LCD_G3"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(76, "PE12"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE12"),
STM32_FUNCTION(2, "TIM1_CH3N"),
- STM32_FUNCTION(4, "DFSDM_DATA5"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN5"),
STM32_FUNCTION(6, "SPI4_SCK"),
- STM32_FUNCTION(9, "SDMMC1_D0DIR SDMMC_BOOT1_D0DIR"),
+ STM32_FUNCTION(9, "SDMMC1_D0DIR"),
STM32_FUNCTION(11, "SAI2_SCK_B"),
- STM32_FUNCTION(13, "FMC_D9"),
+ STM32_FUNCTION(13, "FMC_D9 FMC_DA9"),
STM32_FUNCTION(15, "LCD_B4"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(77, "PE13"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE13"),
STM32_FUNCTION(1, "HDP2"),
STM32_FUNCTION(2, "TIM1_CH3"),
- STM32_FUNCTION(4, "DFSDM_CK5"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN5"),
STM32_FUNCTION(6, "SPI4_MISO"),
STM32_FUNCTION(11, "SAI2_FS_B"),
- STM32_FUNCTION(13, "FMC_D10"),
+ STM32_FUNCTION(13, "FMC_D10 FMC_DA10"),
+ STM32_FUNCTION(14, "DCMI_D6"),
STM32_FUNCTION(15, "LCD_DE"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(78, "PE14"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE14"),
STM32_FUNCTION(2, "TIM1_CH4"),
STM32_FUNCTION(6, "SPI4_MOSI"),
- STM32_FUNCTION(9, "UART8_RTS UART_BOOT8_RTS"),
+ STM32_FUNCTION(9, "UART8_RTS UART8_DE"),
STM32_FUNCTION(11, "SAI2_MCLK_B"),
- STM32_FUNCTION(12, "SDMMC1_D123DIR SDMMC_BOOT1_D123DIR"),
- STM32_FUNCTION(13, "FMC_D11"),
+ STM32_FUNCTION(12, "SDMMC1_D123DIR"),
+ STM32_FUNCTION(13, "FMC_D11 FMC_DA11"),
STM32_FUNCTION(14, "LCD_G0"),
STM32_FUNCTION(15, "LCD_CLK"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(79, "PE15"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOE15"),
STM32_FUNCTION(1, "HDP3"),
STM32_FUNCTION(2, "TIM1_BKIN"),
STM32_FUNCTION(5, "TIM15_BKIN"),
- STM32_FUNCTION(8, "USART2_CTS_NSS USART_BOOT2_CTS_NSS"),
- STM32_FUNCTION(9, "UART8_CTS UART_BOOT8_CTS"),
- STM32_FUNCTION(13, "FMC_D12"),
+ STM32_FUNCTION(8, "USART2_CTS USART2_NSS"),
+ STM32_FUNCTION(9, "UART8_CTS"),
+ STM32_FUNCTION(11, "FMC_NCE2"),
+ STM32_FUNCTION(13, "FMC_D12 FMC_DA12"),
STM32_FUNCTION(15, "LCD_R7"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(80, "PF0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF0"),
STM32_FUNCTION(5, "I2C2_SDA"),
STM32_FUNCTION(10, "SDMMC3_D0"),
@@ -1159,8 +1235,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(81, "PF1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF1"),
STM32_FUNCTION(5, "I2C2_SCL"),
STM32_FUNCTION(10, "SDMMC3_CMD"),
@@ -1169,27 +1246,30 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(82, "PF2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF2"),
STM32_FUNCTION(5, "I2C2_SMBA"),
- STM32_FUNCTION(10, "SDMMC2_D0DIR SDMMC_BOOT2_D0DIR"),
+ STM32_FUNCTION(10, "SDMMC2_D0DIR"),
STM32_FUNCTION(11, "SDMMC3_D0DIR"),
- STM32_FUNCTION(12, "SDMMC1_D0DIR SDMMC_BOOT1_D0DIR"),
+ STM32_FUNCTION(12, "SDMMC1_D0DIR"),
STM32_FUNCTION(13, "FMC_A2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(83, "PF3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF3"),
- STM32_FUNCTION(12, "ETH_GMII_TX_ER ETH_MII_TX_ER"),
+ STM32_FUNCTION(12, "ETH1_GMII_TX_ER ETH1_MII_TX_ER"),
STM32_FUNCTION(13, "FMC_A3"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(84, "PF4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF4"),
STM32_FUNCTION(8, "USART2_RX"),
STM32_FUNCTION(10, "SDMMC3_D1"),
@@ -1198,8 +1278,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(85, "PF5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF5"),
STM32_FUNCTION(8, "USART2_TX"),
STM32_FUNCTION(10, "SDMMC3_D2"),
@@ -1207,71 +1288,77 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(86, "PF6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOF6"),
STM32_FUNCTION(2, "TIM16_CH1"),
STM32_FUNCTION(6, "SPI5_NSS"),
STM32_FUNCTION(7, "SAI1_SD_B"),
- STM32_FUNCTION(8, "UART7_RX UART_BOOT7_RX"),
- STM32_FUNCTION(10, "QUADSPI_BK1_IO3 QUADSPI_BOOTBK1_IO3"),
+ STM32_FUNCTION(8, "UART7_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
STM32_FUNCTION(13, "SAI4_SCK_B"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(87, "PF7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOF7"),
STM32_FUNCTION(2, "TIM17_CH1"),
STM32_FUNCTION(6, "SPI5_SCK"),
STM32_FUNCTION(7, "SAI1_MCLK_B"),
- STM32_FUNCTION(8, "UART7_TX UART_BOOT7_TX"),
- STM32_FUNCTION(10, "QUADSPI_BK1_IO2 QUADSPI_BOOTBK1_IO2"),
+ STM32_FUNCTION(8, "UART7_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(88, "PF8"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOF8"),
STM32_FUNCTION(1, "TRACED12"),
STM32_FUNCTION(2, "TIM16_CH1N"),
STM32_FUNCTION(6, "SPI5_MISO"),
STM32_FUNCTION(7, "SAI1_SCK_B"),
- STM32_FUNCTION(8, "UART7_RTS UART_BOOT7_RTS"),
+ STM32_FUNCTION(8, "UART7_RTS UART7_DE"),
STM32_FUNCTION(10, "TIM13_CH1"),
- STM32_FUNCTION(11, "QUADSPI_BK1_IO0 QUADSPI_BOOTBK1_IO0"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_IO0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(89, "PF9"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOF9"),
STM32_FUNCTION(1, "TRACED13"),
STM32_FUNCTION(2, "TIM17_CH1N"),
STM32_FUNCTION(6, "SPI5_MOSI"),
STM32_FUNCTION(7, "SAI1_FS_B"),
- STM32_FUNCTION(8, "UART7_CTS UART_BOOT7_CTS"),
+ STM32_FUNCTION(8, "UART7_CTS"),
STM32_FUNCTION(10, "TIM14_CH1"),
- STM32_FUNCTION(11, "QUADSPI_BK1_IO1 QUADSPI_BOOTBK1_IO1"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_IO1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(90, "PF10"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOF10"),
STM32_FUNCTION(2, "TIM16_BKIN"),
STM32_FUNCTION(3, "SAI1_D3"),
STM32_FUNCTION(4, "SAI4_D4"),
STM32_FUNCTION(7, "SAI1_D4"),
- STM32_FUNCTION(10, "QUADSPI_CLK QUADSPI_BOOTCLK"),
+ STM32_FUNCTION(10, "QUADSPI_CLK"),
STM32_FUNCTION(13, "SAI4_D3"),
STM32_FUNCTION(14, "DCMI_D11"),
STM32_FUNCTION(15, "LCD_DE"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(91, "PF11"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOF11"),
STM32_FUNCTION(6, "SPI5_MOSI"),
STM32_FUNCTION(11, "SAI2_SD_B"),
@@ -1280,138 +1367,151 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(92, "PF12"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF12"),
STM32_FUNCTION(1, "TRACED4"),
- STM32_FUNCTION(12, "ETH_GMII_RXD4"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD4"),
STM32_FUNCTION(13, "FMC_A6"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(93, "PF13"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF13"),
STM32_FUNCTION(1, "TRACED5"),
- STM32_FUNCTION(4, "DFSDM_DATA6"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN6"),
STM32_FUNCTION(5, "I2C4_SMBA"),
STM32_FUNCTION(6, "I2C1_SMBA"),
- STM32_FUNCTION(7, "DFSDM_DATA3"),
- STM32_FUNCTION(12, "ETH_GMII_RXD5"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN3"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD5"),
STM32_FUNCTION(13, "FMC_A7"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(94, "PF14"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF14"),
STM32_FUNCTION(1, "TRACED6"),
- STM32_FUNCTION(4, "DFSDM_CK6"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN6"),
STM32_FUNCTION(5, "I2C4_SCL"),
STM32_FUNCTION(6, "I2C1_SCL"),
- STM32_FUNCTION(12, "ETH_GMII_RXD6"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD6"),
STM32_FUNCTION(13, "FMC_A8"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(95, "PF15"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOF15"),
STM32_FUNCTION(1, "TRACED7"),
STM32_FUNCTION(5, "I2C4_SDA"),
STM32_FUNCTION(6, "I2C1_SDA"),
- STM32_FUNCTION(12, "ETH_GMII_RXD7"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD7"),
STM32_FUNCTION(13, "FMC_A9"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(96, "PG0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOG0"),
STM32_FUNCTION(1, "TRACED0"),
- STM32_FUNCTION(4, "DFSDM_DATA0"),
- STM32_FUNCTION(12, "ETH_GMII_TXD4"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN0"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD4"),
STM32_FUNCTION(13, "FMC_A10"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(97, "PG1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOG1"),
STM32_FUNCTION(1, "TRACED1"),
- STM32_FUNCTION(12, "ETH_GMII_TXD5"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD5"),
STM32_FUNCTION(13, "FMC_A11"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(98, "PG2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOG2"),
STM32_FUNCTION(1, "TRACED2"),
STM32_FUNCTION(2, "MCO2"),
STM32_FUNCTION(4, "TIM8_BKIN"),
- STM32_FUNCTION(12, "ETH_GMII_TXD6"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD6"),
STM32_FUNCTION(13, "FMC_A12"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(99, "PG3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOG3"),
STM32_FUNCTION(1, "TRACED3"),
STM32_FUNCTION(4, "TIM8_BKIN2"),
- STM32_FUNCTION(5, "DFSDM_CK1"),
- STM32_FUNCTION(12, "ETH_GMII_TXD7"),
+ STM32_FUNCTION(5, "DFSDM1_CKIN1"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD7"),
STM32_FUNCTION(13, "FMC_A13"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(100, "PG4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOG4"),
STM32_FUNCTION(2, "TIM1_BKIN2"),
- STM32_FUNCTION(12, "ETH_GMII_GTX_CLK ETH_RGMII_GTX_CLK"),
+ STM32_FUNCTION(12, "ETH1_GMII_GTX_CLK ETH1_RGMII_GTX_CLK"),
STM32_FUNCTION(13, "FMC_A14"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(101, "PG5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOG5"),
STM32_FUNCTION(2, "TIM1_ETR"),
- STM32_FUNCTION(12, "ETH_GMII_CLK125 ETH_RGMII_CLK125"),
+ STM32_FUNCTION(12, "ETH1_GMII_CLK125 ETH1_RGMII_CLK125"),
STM32_FUNCTION(13, "FMC_A15"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(102, "PG6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG6"),
STM32_FUNCTION(1, "TRACED14"),
STM32_FUNCTION(2, "TIM17_BKIN"),
- STM32_FUNCTION(11, "SDMMC2_CMD SDMMC_BOOT2_CMD"),
+ STM32_FUNCTION(11, "SDMMC2_CMD"),
STM32_FUNCTION(14, "DCMI_D12"),
STM32_FUNCTION(15, "LCD_R7"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(103, "PG7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG7"),
STM32_FUNCTION(1, "TRACED5"),
STM32_FUNCTION(7, "SAI1_MCLK_A"),
- STM32_FUNCTION(8, "USART6_CK USART_BOOT6_CK"),
- STM32_FUNCTION(9, "UART8_RTS UART_BOOT8_RTS"),
+ STM32_FUNCTION(8, "USART6_CK"),
+ STM32_FUNCTION(9, "UART8_RTS UART8_DE"),
STM32_FUNCTION(10, "QUADSPI_CLK"),
- STM32_FUNCTION(12, "QUADSPI_BK2_IO3 QUADSPI_BOOTBK2_IO3"),
+ STM32_FUNCTION(12, "QUADSPI_BK2_IO3"),
STM32_FUNCTION(13, "FMC_INT"),
STM32_FUNCTION(14, "DCMI_D13"),
STM32_FUNCTION(15, "LCD_CLK"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(104, "PG8"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG8"),
STM32_FUNCTION(1, "TRACED15"),
STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
@@ -1419,73 +1519,79 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(4, "TIM8_ETR"),
STM32_FUNCTION(6, "SPI6_NSS"),
STM32_FUNCTION(7, "SAI4_D2"),
- STM32_FUNCTION(8, "USART6_RTS USART_BOOT6_RTS"),
- STM32_FUNCTION(9, "USART3_RTS"),
- STM32_FUNCTION(10, "SPDIF_IN2"),
+ STM32_FUNCTION(8, "USART6_RTS USART6_DE"),
+ STM32_FUNCTION(9, "USART3_RTS USART3_DE"),
+ STM32_FUNCTION(10, "SPDIFRX_IN2"),
STM32_FUNCTION(11, "SAI4_FS_A"),
- STM32_FUNCTION(12, "ETH_PPS_OUT"),
+ STM32_FUNCTION(12, "ETH1_PPS_OUT"),
STM32_FUNCTION(15, "LCD_G7"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(105, "PG9"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG9"),
STM32_FUNCTION(1, "DBTRGO"),
STM32_FUNCTION(8, "USART6_RX"),
- STM32_FUNCTION(9, "SPDIF_IN3"),
- STM32_FUNCTION(10, "QUADSPI_BK2_IO2 QUADSPI_BOOTBK2_IO2"),
+ STM32_FUNCTION(9, "SPDIFRX_IN3"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO2"),
STM32_FUNCTION(11, "SAI2_FS_B"),
- STM32_FUNCTION(13, "FMC_NE2 FMC_NCE"),
+ STM32_FUNCTION(13, "FMC_NCE FMC_NE2"),
STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(15, "LCD_R1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(106, "PG10"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG10"),
STM32_FUNCTION(1, "TRACED10"),
- STM32_FUNCTION(9, "UART8_CTS UART_BOOT8_CTS"),
+ STM32_FUNCTION(9, "UART8_CTS"),
STM32_FUNCTION(10, "LCD_G3"),
STM32_FUNCTION(11, "SAI2_SD_B"),
- STM32_FUNCTION(12, "QUADSPI_BK2_IO2 QUADSPI_BOOTBK2_IO2"),
+ STM32_FUNCTION(12, "QUADSPI_BK2_IO2"),
STM32_FUNCTION(13, "FMC_NE3"),
STM32_FUNCTION(14, "DCMI_D2"),
STM32_FUNCTION(15, "LCD_B2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(107, "PG11"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG11"),
STM32_FUNCTION(1, "TRACED11"),
STM32_FUNCTION(5, "USART1_TX"),
- STM32_FUNCTION(7, "UART4_TX UART_BOOT4_TX"),
- STM32_FUNCTION(9, "SPDIF_IN0"),
- STM32_FUNCTION(12, "ETH_GMII_TX_EN ETH_MII_TX_EN ETH_RGMII_TX_CTL ETH_RMII_TX_EN"),
+ STM32_FUNCTION(7, "UART4_TX"),
+ STM32_FUNCTION(9, "SPDIFRX_IN0"),
+ STM32_FUNCTION(12, "ETH1_GMII_TX_EN ETH1_MII_TX_EN ETH1_RGMII_TX_CTL ETH1_RMII_TX_EN"),
STM32_FUNCTION(14, "DCMI_D3"),
STM32_FUNCTION(15, "LCD_B3"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(108, "PG12"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG12"),
STM32_FUNCTION(2, "LPTIM1_IN1"),
STM32_FUNCTION(6, "SPI6_MISO"),
STM32_FUNCTION(7, "SAI4_CK2"),
- STM32_FUNCTION(8, "USART6_RTS USART_BOOT6_RTS"),
- STM32_FUNCTION(9, "SPDIF_IN1"),
+ STM32_FUNCTION(8, "USART6_RTS USART6_DE"),
+ STM32_FUNCTION(9, "SPDIFRX_IN1"),
STM32_FUNCTION(10, "LCD_B4"),
STM32_FUNCTION(11, "SAI4_SCK_A"),
- STM32_FUNCTION(12, "ETH_PHY_INTN"),
+ STM32_FUNCTION(12, "ETH1_PHY_INTN"),
STM32_FUNCTION(13, "FMC_NE4"),
STM32_FUNCTION(15, "LCD_B1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(109, "PG13"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG13"),
STM32_FUNCTION(1, "TRACED0"),
STM32_FUNCTION(2, "LPTIM1_OUT"),
@@ -1493,79 +1599,86 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(5, "SAI4_CK1"),
STM32_FUNCTION(6, "SPI6_SCK"),
STM32_FUNCTION(7, "SAI1_SCK_A"),
- STM32_FUNCTION(8, "USART6_CTS_NSS USART_BOOT6_CTS_NSS"),
+ STM32_FUNCTION(8, "USART6_CTS USART6_NSS"),
STM32_FUNCTION(11, "SAI4_MCLK_A"),
- STM32_FUNCTION(12, "ETH_GMII_TXD0 ETH_MII_TXD0 ETH_RGMII_TXD0 ETH_RMII_TXD0"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD0 ETH1_MII_TXD0 ETH1_RGMII_TXD0 ETH1_RMII_TXD0"),
STM32_FUNCTION(13, "FMC_A24"),
STM32_FUNCTION(15, "LCD_R0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(110, "PG14"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG14"),
STM32_FUNCTION(1, "TRACED1"),
STM32_FUNCTION(2, "LPTIM1_ETR"),
STM32_FUNCTION(6, "SPI6_MOSI"),
STM32_FUNCTION(7, "SAI4_D1"),
STM32_FUNCTION(8, "USART6_TX"),
- STM32_FUNCTION(10, "QUADSPI_BK2_IO3 QUADSPI_BOOTBK2_IO3"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO3"),
STM32_FUNCTION(11, "SAI4_SD_A"),
- STM32_FUNCTION(12, "ETH_GMII_TXD1 ETH_MII_TXD1 ETH_RGMII_TXD1 ETH_RMII_TXD1"),
+ STM32_FUNCTION(12, "ETH1_GMII_TXD1 ETH1_MII_TXD1 ETH1_RGMII_TXD1 ETH1_RMII_TXD1"),
STM32_FUNCTION(13, "FMC_A25"),
STM32_FUNCTION(15, "LCD_B0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(111, "PG15"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOG15"),
STM32_FUNCTION(1, "TRACED7"),
STM32_FUNCTION(3, "SAI1_D2"),
STM32_FUNCTION(5, "I2C2_SDA"),
STM32_FUNCTION(7, "SAI1_FS_A"),
- STM32_FUNCTION(8, "USART6_CTS_NSS USART_BOOT6_CTS_NSS"),
+ STM32_FUNCTION(8, "USART6_CTS USART6_NSS"),
STM32_FUNCTION(11, "SDMMC3_CK"),
STM32_FUNCTION(14, "DCMI_D13"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(112, "PH0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOH0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(113, "PH1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC | STM32MP_PKG_AB | STM32MP_PKG_AD,
STM32_FUNCTION(0, "GPIOH1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(114, "PH2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH2"),
STM32_FUNCTION(2, "LPTIM1_IN2"),
- STM32_FUNCTION(10, "QUADSPI_BK2_IO0 QUADSPI_BOOTBK2_IO0"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO0"),
STM32_FUNCTION(11, "SAI2_SCK_B"),
- STM32_FUNCTION(12, "ETH_GMII_CRS ETH_MII_CRS"),
+ STM32_FUNCTION(12, "ETH1_GMII_CRS ETH1_MII_CRS"),
STM32_FUNCTION(15, "LCD_R0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(115, "PH3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH3"),
- STM32_FUNCTION(4, "DFSDM_CK4"),
- STM32_FUNCTION(10, "QUADSPI_BK2_IO1 QUADSPI_BOOTBK2_IO1"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN4"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO1"),
STM32_FUNCTION(11, "SAI2_MCLK_B"),
- STM32_FUNCTION(12, "ETH_GMII_COL ETH_MII_COL"),
+ STM32_FUNCTION(12, "ETH1_GMII_COL ETH1_MII_COL"),
STM32_FUNCTION(15, "LCD_R1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(116, "PH4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH4"),
STM32_FUNCTION(5, "I2C2_SCL"),
STM32_FUNCTION(10, "LCD_G5"),
@@ -1573,8 +1686,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(117, "PH5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH5"),
STM32_FUNCTION(5, "I2C2_SDA"),
STM32_FUNCTION(6, "SPI5_NSS"),
@@ -1582,31 +1696,34 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(118, "PH6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH6"),
STM32_FUNCTION(3, "TIM12_CH1"),
STM32_FUNCTION(5, "I2C2_SMBA"),
STM32_FUNCTION(6, "SPI5_SCK"),
- STM32_FUNCTION(12, "ETH_GMII_RXD2 ETH_MII_RXD2 ETH_RGMII_RXD2"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD2 ETH1_MII_RXD2 ETH1_RGMII_RXD2"),
STM32_FUNCTION(13, "MDIOS_MDIO"),
STM32_FUNCTION(14, "DCMI_D8"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(119, "PH7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH7"),
STM32_FUNCTION(5, "I2C3_SCL"),
STM32_FUNCTION(6, "SPI5_MISO"),
- STM32_FUNCTION(12, "ETH_GMII_RXD3 ETH_MII_RXD3 ETH_RGMII_RXD3"),
+ STM32_FUNCTION(12, "ETH1_GMII_RXD3 ETH1_MII_RXD3 ETH1_RGMII_RXD3"),
STM32_FUNCTION(13, "MDIOS_MDC"),
STM32_FUNCTION(14, "DCMI_D9"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(120, "PH8"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH8"),
STM32_FUNCTION(3, "TIM5_ETR"),
STM32_FUNCTION(5, "I2C3_SDA"),
@@ -1615,8 +1732,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(121, "PH9"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH9"),
STM32_FUNCTION(3, "TIM12_CH2"),
STM32_FUNCTION(5, "I2C3_SMBA"),
@@ -1625,8 +1743,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(122, "PH10"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH10"),
STM32_FUNCTION(3, "TIM5_CH1"),
STM32_FUNCTION(5, "I2C4_SMBA"),
@@ -1636,8 +1755,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(123, "PH11"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH11"),
STM32_FUNCTION(3, "TIM5_CH2"),
STM32_FUNCTION(5, "I2C4_SCL"),
@@ -1647,8 +1767,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(124, "PH12"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH12"),
STM32_FUNCTION(1, "HDP2"),
STM32_FUNCTION(3, "TIM5_CH3"),
@@ -1659,50 +1780,53 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(125, "PH13"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH13"),
STM32_FUNCTION(4, "TIM8_CH1N"),
STM32_FUNCTION(9, "UART4_TX"),
- STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(10, "FDCAN1_TX"),
STM32_FUNCTION(15, "LCD_G2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(126, "PH14"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH14"),
STM32_FUNCTION(4, "TIM8_CH2N"),
STM32_FUNCTION(9, "UART4_RX"),
- STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(10, "FDCAN1_RX"),
STM32_FUNCTION(14, "DCMI_D4"),
STM32_FUNCTION(15, "LCD_G3"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(127, "PH15"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOH15"),
STM32_FUNCTION(4, "TIM8_CH3N"),
- STM32_FUNCTION(10, "CAN1_TXFD"),
STM32_FUNCTION(14, "DCMI_D11"),
STM32_FUNCTION(15, "LCD_G4"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(128, "PI0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI0"),
STM32_FUNCTION(3, "TIM5_CH4"),
STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
- STM32_FUNCTION(10, "CAN1_RXFD"),
STM32_FUNCTION(14, "DCMI_D13"),
STM32_FUNCTION(15, "LCD_G5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(129, "PI1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI1"),
STM32_FUNCTION(4, "TIM8_BKIN2"),
STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
@@ -1711,8 +1835,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(130, "PI2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI2"),
STM32_FUNCTION(4, "TIM8_CH4"),
STM32_FUNCTION(6, "SPI2_MISO I2S2_SDI"),
@@ -1721,8 +1846,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(131, "PI3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI3"),
STM32_FUNCTION(4, "TIM8_ETR"),
STM32_FUNCTION(6, "SPI2_MOSI I2S2_SDO"),
@@ -1730,8 +1856,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(132, "PI4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI4"),
STM32_FUNCTION(4, "TIM8_BKIN"),
STM32_FUNCTION(11, "SAI2_MCLK_A"),
@@ -1740,8 +1867,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(133, "PI5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI5"),
STM32_FUNCTION(4, "TIM8_CH1"),
STM32_FUNCTION(11, "SAI2_SCK_A"),
@@ -1750,8 +1878,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(134, "PI6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI6"),
STM32_FUNCTION(4, "TIM8_CH2"),
STM32_FUNCTION(11, "SAI2_SD_A"),
@@ -1760,8 +1889,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(135, "PI7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI7"),
STM32_FUNCTION(4, "TIM8_CH3"),
STM32_FUNCTION(11, "SAI2_FS_A"),
@@ -1770,35 +1900,38 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(136, "PI8"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI8"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(137, "PI9"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI9"),
STM32_FUNCTION(1, "HDP1"),
STM32_FUNCTION(9, "UART4_RX"),
- STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(10, "FDCAN1_RX"),
STM32_FUNCTION(15, "LCD_VSYNC"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(138, "PI10"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI10"),
STM32_FUNCTION(1, "HDP0"),
- STM32_FUNCTION(9, "USART3_CTS_NSS USART_BOOT3_CTS_NSS"),
- STM32_FUNCTION(10, "CAN1_RXFD"),
- STM32_FUNCTION(12, "ETH_GMII_RX_ER ETH_MII_RX_ER"),
+ STM32_FUNCTION(9, "USART3_CTS USART3_NSS"),
+ STM32_FUNCTION(12, "ETH1_GMII_RX_ER ETH1_MII_RX_ER"),
STM32_FUNCTION(15, "LCD_HSYNC"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(139, "PI11"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOI11"),
STM32_FUNCTION(1, "MCO1"),
STM32_FUNCTION(6, "I2S_CKIN"),
@@ -1806,8 +1939,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(140, "PI12"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOI12"),
STM32_FUNCTION(1, "TRACED0"),
STM32_FUNCTION(3, "HDP0"),
@@ -1815,8 +1949,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(141, "PI13"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOI13"),
STM32_FUNCTION(1, "TRACED1"),
STM32_FUNCTION(3, "HDP1"),
@@ -1824,24 +1959,27 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(142, "PI14"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOI14"),
STM32_FUNCTION(1, "TRACECLK"),
STM32_FUNCTION(15, "LCD_CLK"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(143, "PI15"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOI15"),
STM32_FUNCTION(10, "LCD_G2"),
STM32_FUNCTION(15, "LCD_R0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(144, "PJ0"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ0"),
STM32_FUNCTION(1, "TRACED8"),
STM32_FUNCTION(10, "LCD_R7"),
@@ -1849,16 +1987,18 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(145, "PJ1"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ1"),
STM32_FUNCTION(1, "TRACED9"),
STM32_FUNCTION(15, "LCD_R2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(146, "PJ2"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ2"),
STM32_FUNCTION(1, "TRACED10"),
STM32_FUNCTION(14, "DSI_TE"),
@@ -1866,24 +2006,27 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(147, "PJ3"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ3"),
STM32_FUNCTION(1, "TRACED11"),
STM32_FUNCTION(15, "LCD_R4"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(148, "PJ4"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ4"),
STM32_FUNCTION(1, "TRACED12"),
STM32_FUNCTION(15, "LCD_R5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(149, "PJ5"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ5"),
STM32_FUNCTION(1, "TRACED2"),
STM32_FUNCTION(3, "HDP2"),
@@ -1891,8 +2034,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(150, "PJ6"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ6"),
STM32_FUNCTION(1, "TRACED3"),
STM32_FUNCTION(3, "HDP3"),
@@ -1901,8 +2045,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(151, "PJ7"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ7"),
STM32_FUNCTION(1, "TRACED13"),
STM32_FUNCTION(4, "TIM8_CH2N"),
@@ -1910,8 +2055,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(152, "PJ8"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ8"),
STM32_FUNCTION(1, "TRACED14"),
STM32_FUNCTION(2, "TIM1_CH3N"),
@@ -1921,8 +2067,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(153, "PJ9"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ9"),
STM32_FUNCTION(1, "TRACED15"),
STM32_FUNCTION(2, "TIM1_CH3"),
@@ -1932,8 +2079,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(154, "PJ10"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ10"),
STM32_FUNCTION(2, "TIM1_CH2N"),
STM32_FUNCTION(4, "TIM8_CH2"),
@@ -1942,8 +2090,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(155, "PJ11"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ11"),
STM32_FUNCTION(2, "TIM1_CH2"),
STM32_FUNCTION(4, "TIM8_CH2N"),
@@ -1952,38 +2101,43 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(156, "PJ12"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ12"),
STM32_FUNCTION(10, "LCD_G3"),
STM32_FUNCTION(15, "LCD_B0"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(157, "PJ13"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ13"),
STM32_FUNCTION(10, "LCD_G4"),
STM32_FUNCTION(15, "LCD_B1"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(158, "PJ14"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ14"),
STM32_FUNCTION(15, "LCD_B2"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(159, "PJ15"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOJ15"),
STM32_FUNCTION(15, "LCD_B3"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(160, "PK0"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOK0"),
STM32_FUNCTION(2, "TIM1_CH1N"),
STM32_FUNCTION(4, "TIM8_CH3"),
@@ -1992,8 +2146,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(161, "PK1"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOK1"),
STM32_FUNCTION(1, "TRACED4"),
STM32_FUNCTION(2, "TIM1_CH1"),
@@ -2004,8 +2159,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(162, "PK2"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOK2"),
STM32_FUNCTION(1, "TRACED5"),
STM32_FUNCTION(2, "TIM1_BKIN"),
@@ -2015,22 +2171,25 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(163, "PK3"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOK3"),
STM32_FUNCTION(15, "LCD_B4"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(164, "PK4"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOK4"),
STM32_FUNCTION(15, "LCD_B5"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(165, "PK5"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOK5"),
STM32_FUNCTION(1, "TRACED6"),
STM32_FUNCTION(3, "HDP6"),
@@ -2038,8 +2197,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(166, "PK6"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOK6"),
STM32_FUNCTION(1, "TRACED7"),
STM32_FUNCTION(3, "HDP7"),
@@ -2047,8 +2207,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(167, "PK7"),
+ STM32MP_PKG_AA,
STM32_FUNCTION(0, "GPIOK7"),
STM32_FUNCTION(15, "LCD_DE"),
STM32_FUNCTION(16, "EVENTOUT"),
@@ -2057,8 +2218,9 @@ static const struct stm32_desc_pin stm32mp157_pins[] = {
};
static const struct stm32_desc_pin stm32mp157_z_pins[] = {
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(400, "PZ0"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOZ0"),
STM32_FUNCTION(3, "I2C6_SCL"),
STM32_FUNCTION(4, "I2C2_SCL"),
@@ -2068,8 +2230,9 @@ static const struct stm32_desc_pin stm32mp157_z_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(401, "PZ1"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOZ1"),
STM32_FUNCTION(3, "I2C6_SDA"),
STM32_FUNCTION(4, "I2C2_SDA"),
@@ -2081,8 +2244,9 @@ static const struct stm32_desc_pin stm32mp157_z_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(402, "PZ2"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOZ2"),
STM32_FUNCTION(3, "I2C6_SCL"),
STM32_FUNCTION(4, "I2C2_SCL"),
@@ -2094,21 +2258,23 @@ static const struct stm32_desc_pin stm32mp157_z_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(403, "PZ3"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOZ3"),
STM32_FUNCTION(3, "I2C6_SDA"),
STM32_FUNCTION(4, "I2C2_SDA"),
STM32_FUNCTION(5, "I2C5_SDA"),
STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
STM32_FUNCTION(7, "I2C4_SDA"),
- STM32_FUNCTION(8, "USART1_CTS_NSS"),
+ STM32_FUNCTION(8, "USART1_CTS USART1_NSS"),
STM32_FUNCTION(9, "SPI6_NSS"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(404, "PZ4"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOZ4"),
STM32_FUNCTION(3, "I2C6_SCL"),
STM32_FUNCTION(4, "I2C2_SCL"),
@@ -2117,19 +2283,21 @@ static const struct stm32_desc_pin stm32mp157_z_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(405, "PZ5"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOZ5"),
STM32_FUNCTION(3, "I2C6_SDA"),
STM32_FUNCTION(4, "I2C2_SDA"),
STM32_FUNCTION(5, "I2C5_SDA"),
STM32_FUNCTION(7, "I2C4_SDA"),
- STM32_FUNCTION(8, "USART1_RTS"),
+ STM32_FUNCTION(8, "USART1_RTS USART1_DE"),
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(406, "PZ6"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOZ6"),
STM32_FUNCTION(3, "I2C6_SCL"),
STM32_FUNCTION(4, "I2C2_SCL"),
@@ -2140,8 +2308,9 @@ static const struct stm32_desc_pin stm32mp157_z_pins[] = {
STM32_FUNCTION(16, "EVENTOUT"),
STM32_FUNCTION(17, "ANALOG")
),
- STM32_PIN(
+ STM32_PIN_PKG(
PINCTRL_PIN(407, "PZ7"),
+ STM32MP_PKG_AA | STM32MP_PKG_AC,
STM32_FUNCTION(0, "GPIOZ7"),
STM32_FUNCTION(3, "I2C6_SDA"),
STM32_FUNCTION(4, "I2C2_SDA"),
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index a731fc966b63..9093a420d310 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -11,82 +11,101 @@ config PINCTRL_SUNIV_F1C100S
select PINCTRL_SUNXI
config PINCTRL_SUN4I_A10
- def_bool MACH_SUN4I || MACH_SUN7I || MACH_SUN8I
+ bool "Support for the Allwinner A10, A20 and R40 PIO"
+ default MACH_SUN4I || MACH_SUN7I || MACH_SUN8I
select PINCTRL_SUNXI
config PINCTRL_SUN5I
- def_bool MACH_SUN5I
+ bool "Support for the Allwinner A10s, A13, R8 and NextThing GR8 PIO"
+ default MACH_SUN5I
select PINCTRL_SUNXI
config PINCTRL_SUN6I_A31
- def_bool MACH_SUN6I
+ bool "Support for the Allwinner A31 PIO"
+ default MACH_SUN6I
select PINCTRL_SUNXI
config PINCTRL_SUN6I_A31_R
- def_bool MACH_SUN6I
+ bool "Support for the Allwinner A31 R-PIO"
+ default MACH_SUN6I
depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_A23
- def_bool MACH_SUN8I
+ bool "Support for the Allwinner A23 PIO"
+ default MACH_SUN8I
select PINCTRL_SUNXI
config PINCTRL_SUN8I_A33
- def_bool MACH_SUN8I
+ bool "Support for the Allwinner A33 PIO"
+ default MACH_SUN8I
select PINCTRL_SUNXI
config PINCTRL_SUN8I_A83T
- def_bool MACH_SUN8I
+ bool "Support for the Allwinner A83T PIO"
+ default MACH_SUN8I
select PINCTRL_SUNXI
config PINCTRL_SUN8I_A83T_R
- def_bool MACH_SUN8I
+ bool "Support for the Allwinner A83T R-PIO"
+ default MACH_SUN8I
select PINCTRL_SUNXI
config PINCTRL_SUN8I_A23_R
- def_bool MACH_SUN8I
+ bool "Support for the Allwinner A23 and A33 R-PIO"
+ default MACH_SUN8I
depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_H3
- def_bool MACH_SUN8I
+ bool "Support for the Allwinner H3 PIO"
+ default MACH_SUN8I
select PINCTRL_SUNXI
config PINCTRL_SUN8I_H3_R
- def_bool MACH_SUN8I || (ARM64 && ARCH_SUNXI)
+ bool "Support for the Allwinner H3 and H5 R-PIO"
+ default MACH_SUN8I || (ARM64 && ARCH_SUNXI)
select PINCTRL_SUNXI
config PINCTRL_SUN8I_V3S
- def_bool MACH_SUN8I
+ bool "Support for the Allwinner V3s PIO"
+ default MACH_SUN8I
select PINCTRL_SUNXI
config PINCTRL_SUN9I_A80
- def_bool MACH_SUN9I
+ bool "Support for the Allwinner A80 PIO"
+ default MACH_SUN9I
select PINCTRL_SUNXI
config PINCTRL_SUN9I_A80_R
- def_bool MACH_SUN9I
+ bool "Support for the Allwinner A80 R-PIO"
+ default MACH_SUN9I
depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN50I_A64
- def_bool ARM64 && ARCH_SUNXI
+ bool "Support for the Allwinner A64 PIO"
+ default ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
config PINCTRL_SUN50I_A64_R
- def_bool ARM64 && ARCH_SUNXI
+ bool "Support for the Allwinner A64 R-PIO"
+ default ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
config PINCTRL_SUN50I_H5
- def_bool ARM64 && ARCH_SUNXI
+ bool "Support for the Allwinner H5 PIO"
+ default ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
config PINCTRL_SUN50I_H6
- def_bool ARM64 && ARCH_SUNXI
+ bool "Support for the Allwinner H6 PIO"
+ default ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
config PINCTRL_SUN50I_H6_R
- def_bool ARM64 && ARCH_SUNXI
+ bool "Support for the Allwinner H6 R-PIO"
+ default ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
endif
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
index ef4268cc6227..3cc1121589c9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
@@ -591,6 +591,7 @@ static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
.irq_banks = 4,
.irq_bank_map = h6_irq_bank_map,
.irq_read_needs_mux = true,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
};
static int h6_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index e05dd9a5551d..a191a65217ac 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -153,7 +153,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_r_pinctrl_data = {
.pin_base = PL_BASE,
.irq_banks = 2,
.disable_strict_mode = true,
- .has_io_bias_cfg = true,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_GRP_CONFIG,
};
static int sun9i_a80_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index da37d594a13d..0633a03d5e13 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -722,7 +722,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_pinctrl_data = {
.npins = ARRAY_SIZE(sun9i_a80_pins),
.irq_banks = 5,
.disable_strict_mode = true,
- .has_io_bias_cfg = true,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_GRP_CONFIG,
};
static int sun9i_a80_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 8dd25caea2cf..0cbca30b75dc 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -530,14 +530,10 @@ static int sunxi_pconf_group_get(struct pinctrl_dev *pctldev,
return sunxi_pconf_get(pctldev, g->pin, config);
}
-static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
- unsigned group,
- unsigned long *configs,
- unsigned num_configs)
+static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, unsigned num_configs)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct sunxi_pinctrl_group *g = &pctl->groups[group];
- unsigned pin = g->pin - pctl->desc->pin_base;
int i;
for (i = 0; i < num_configs; i++) {
@@ -596,9 +592,20 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
return 0;
}
+static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
+ unsigned long *configs, unsigned num_configs)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct sunxi_pinctrl_group *g = &pctl->groups[group];
+
+ /* We only support 1 pin per group. Chain it to the pin callback */
+ return sunxi_pconf_set(pctldev, g->pin, configs, num_configs);
+}
+
static const struct pinconf_ops sunxi_pconf_ops = {
.is_generic = true,
.pin_config_get = sunxi_pconf_get,
+ .pin_config_set = sunxi_pconf_set,
.pin_config_group_get = sunxi_pconf_group_get,
.pin_config_group_set = sunxi_pconf_group_set,
};
@@ -607,10 +614,12 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
unsigned pin,
struct regulator *supply)
{
+ unsigned short bank = pin / PINS_PER_BANK;
+ unsigned long flags;
u32 val, reg;
int uV;
- if (!pctl->desc->has_io_bias_cfg)
+ if (!pctl->desc->io_bias_cfg_variant)
return 0;
uV = regulator_get_voltage(supply);
@@ -621,25 +630,41 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
if (uV == 0)
return 0;
- /* Configured value must be equal or greater to actual voltage */
- if (uV <= 1800000)
- val = 0x0; /* 1.8V */
- else if (uV <= 2500000)
- val = 0x6; /* 2.5V */
- else if (uV <= 2800000)
- val = 0x9; /* 2.8V */
- else if (uV <= 3000000)
- val = 0xA; /* 3.0V */
- else
- val = 0xD; /* 3.3V */
-
- pin -= pctl->desc->pin_base;
-
- reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
- reg &= ~IO_BIAS_MASK;
- writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
+ switch (pctl->desc->io_bias_cfg_variant) {
+ case BIAS_VOLTAGE_GRP_CONFIG:
+ /*
+ * Configured value must be equal or greater to actual
+ * voltage.
+ */
+ if (uV <= 1800000)
+ val = 0x0; /* 1.8V */
+ else if (uV <= 2500000)
+ val = 0x6; /* 2.5V */
+ else if (uV <= 2800000)
+ val = 0x9; /* 2.8V */
+ else if (uV <= 3000000)
+ val = 0xA; /* 3.0V */
+ else
+ val = 0xD; /* 3.3V */
+
+ pin -= pctl->desc->pin_base;
+
+ reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
+ reg &= ~IO_BIAS_MASK;
+ writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
+ return 0;
+ case BIAS_VOLTAGE_PIO_POW_MODE_SEL:
+ val = uV <= 1800000 ? 1 : 0;
- return 0;
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ reg = readl(pctl->membase + PIO_POW_MOD_SEL_REG);
+ reg &= ~(1 << bank);
+ writel(reg | val << bank, pctl->membase + PIO_POW_MOD_SEL_REG);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int sunxi_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
@@ -1443,16 +1468,17 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number;
pctl->chip->owner = THIS_MODULE;
- pctl->chip->request = gpiochip_generic_request,
- pctl->chip->free = gpiochip_generic_free,
- pctl->chip->direction_input = sunxi_pinctrl_gpio_direction_input,
- pctl->chip->direction_output = sunxi_pinctrl_gpio_direction_output,
- pctl->chip->get = sunxi_pinctrl_gpio_get,
- pctl->chip->set = sunxi_pinctrl_gpio_set,
- pctl->chip->of_xlate = sunxi_pinctrl_gpio_of_xlate,
- pctl->chip->to_irq = sunxi_pinctrl_gpio_to_irq,
- pctl->chip->of_gpio_n_cells = 3,
- pctl->chip->can_sleep = false,
+ pctl->chip->request = gpiochip_generic_request;
+ pctl->chip->free = gpiochip_generic_free;
+ pctl->chip->set_config = gpiochip_generic_config;
+ pctl->chip->direction_input = sunxi_pinctrl_gpio_direction_input;
+ pctl->chip->direction_output = sunxi_pinctrl_gpio_direction_output;
+ pctl->chip->get = sunxi_pinctrl_gpio_get;
+ pctl->chip->set = sunxi_pinctrl_gpio_set;
+ pctl->chip->of_xlate = sunxi_pinctrl_gpio_of_xlate;
+ pctl->chip->to_irq = sunxi_pinctrl_gpio_to_irq;
+ pctl->chip->of_gpio_n_cells = 3;
+ pctl->chip->can_sleep = false;
pctl->chip->ngpio = round_up(last_pin, PINS_PER_BANK) -
pctl->desc->pin_base;
pctl->chip->label = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index ee15ab067b5f..44e30deeee38 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -95,6 +95,22 @@
#define PINCTRL_SUN7I_A20 BIT(7)
#define PINCTRL_SUN8I_R40 BIT(8)
+#define PIO_POW_MOD_SEL_REG 0x340
+
+enum sunxi_desc_bias_voltage {
+ BIAS_VOLTAGE_NONE,
+ /*
+ * Bias voltage configuration is done through
+ * Pn_GRP_CONFIG registers, as seen on A80 SoC.
+ */
+ BIAS_VOLTAGE_GRP_CONFIG,
+ /*
+ * Bias voltage is set through PIO_POW_MOD_SEL_REG
+ * register, as seen on H6 SoC, for example.
+ */
+ BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+};
+
struct sunxi_desc_function {
unsigned long variant;
const char *name;
@@ -117,7 +133,7 @@ struct sunxi_pinctrl_desc {
const unsigned int *irq_bank_map;
bool irq_read_needs_mux;
bool disable_strict_mode;
- bool has_io_bias_cfg;
+ enum sunxi_desc_bias_voltage io_bias_cfg_variant;
};
struct sunxi_pinctrl_function {
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index caa44dd2880a..3cb69309912b 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -411,6 +411,7 @@ int zx_pinctrl_init(struct platform_device *pdev,
}
zpctl->aux_base = of_iomap(np, 0);
+ of_node_put(np);
if (!zpctl->aux_base)
return -ENOMEM;
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 9186d81a51cc..997317d2f2b9 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -59,6 +59,18 @@ config CROS_EC_I2C
a checksum. Failing accesses will be retried three times to
improve reliability.
+config CROS_EC_RPMSG
+ tristate "ChromeOS Embedded Controller (rpmsg)"
+ depends on MFD_CROS_EC && RPMSG && OF
+ help
+ If you say Y here, you get support for talking to the ChromeOS EC
+ through rpmsg. This uses a simple byte-level protocol with a
+ checksum. Also since there's no addition EC-to-host interrupt, this
+ use a byte in message to distinguish host event from host command.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_rpmsg.
+
config CROS_EC_SPI
tristate "ChromeOS Embedded Controller (SPI)"
depends on MFD_CROS_EC && SPI
@@ -152,6 +164,18 @@ config CROS_EC_SYSFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_sysfs.
+config CROS_USBPD_LOGGER
+ tristate "Logging driver for USB PD charger"
+ depends on CHARGER_CROS_USBPD
+ default y
+ select RTC_LIB
+ help
+ This option enables support for logging event data for the USB PD charger
+ available in the Embedded Controller on ChromeOS systems.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_usbpd_logger.
+
source "drivers/platform/chrome/wilco_ec/Kconfig"
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 1e2f0029b597..1b2f1dcfcd5c 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,18 +1,23 @@
# SPDX-License-Identifier: GPL-2.0
+# tell define_trace.h where to find the cros ec trace header
+CFLAGS_cros_ec_trace.o:= -I$(src)
+
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
obj-$(CONFIG_CHROMEOS_TBMC) += chromeos_tbmc.o
obj-$(CONFIG_CROS_EC_I2C) += cros_ec_i2c.o
+obj-$(CONFIG_CROS_EC_RPMSG) += cros_ec_rpmsg.o
obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o
cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
-obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o
+obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o
obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
+obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
obj-$(CONFIG_WILCO_EC) += wilco_ec/
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 24326eecd787..7abbb6167766 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -125,7 +125,7 @@ static bool chromeos_laptop_match_adapter_devid(struct device *dev, u32 devid)
return false;
pdev = to_pci_dev(dev);
- return devid == PCI_DEVID(pdev->bus->number, pdev->devfn);
+ return devid == pci_dev_id(pdev);
}
static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 71308766e891..4c2a27f6a6d0 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -72,15 +72,9 @@ static void cros_ec_console_log_work(struct work_struct *__work)
int buf_space;
int ret;
- ret = cros_ec_cmd_xfer(ec->ec_dev, &snapshot_msg);
- if (ret < 0) {
- dev_err(ec->dev, "EC communication failed\n");
- goto resched;
- }
- if (snapshot_msg.result != EC_RES_SUCCESS) {
- dev_err(ec->dev, "EC failed to snapshot the console log\n");
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, &snapshot_msg);
+ if (ret < 0)
goto resched;
- }
/* Loop until we have read everything, or there's an error. */
mutex_lock(&debug_info->log_mutex);
@@ -95,16 +89,10 @@ static void cros_ec_console_log_work(struct work_struct *__work)
memset(read_params, '\0', sizeof(*read_params));
read_params->subcmd = CONSOLE_READ_RECENT;
- ret = cros_ec_cmd_xfer(ec->ec_dev, debug_info->read_msg);
- if (ret < 0) {
- dev_err(ec->dev, "EC communication failed\n");
- break;
- }
- if (debug_info->read_msg->result != EC_RES_SUCCESS) {
- dev_err(ec->dev,
- "EC failed to read the console log\n");
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev,
+ debug_info->read_msg);
+ if (ret < 0)
break;
- }
/* If the buffer is empty, we're done here. */
if (ret == 0 || ec_buffer[0] == '\0')
@@ -132,7 +120,7 @@ static int cros_ec_console_log_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
@@ -290,9 +278,8 @@ static int ec_read_version_supported(struct cros_ec_dev *ec)
params->cmd = EC_CMD_CONSOLE_READ;
response = (struct ec_response_get_cmd_versions *)msg->data;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg) >= 0 &&
- msg->result == EC_RES_SUCCESS &&
- (response->version_mask & EC_VER_MASK(1));
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg) >= 0 &&
+ response->version_mask & EC_VER_MASK(1);
kfree(msg);
@@ -306,11 +293,12 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
int read_params_size;
int read_response_size;
- if (!ec_read_version_supported(ec)) {
- dev_warn(ec->dev,
- "device does not support reading the console log\n");
+ /*
+ * If the console log feature is not supported return silently and
+ * don't create the console_log entry.
+ */
+ if (!ec_read_version_supported(ec))
return 0;
- }
buf = devm_kzalloc(ec->dev, LOG_SIZE, GFP_KERNEL);
if (!buf)
@@ -336,12 +324,8 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
mutex_init(&debug_info->log_mutex);
init_waitqueue_head(&debug_info->log_wq);
- if (!debugfs_create_file("console_log",
- S_IFREG | 0444,
- debug_info->dir,
- debug_info,
- &cros_ec_console_log_fops))
- return -ENOMEM;
+ debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
+ debug_info, &cros_ec_console_log_fops);
INIT_DELAYED_WORK(&debug_info->log_poll_work,
cros_ec_console_log_work);
@@ -375,9 +359,8 @@ static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
msg->command = EC_CMD_GET_PANIC_INFO;
msg->insize = insize;
- ret = cros_ec_cmd_xfer(ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
if (ret < 0) {
- dev_warn(debug_info->ec->dev, "Cannot read panicinfo.\n");
ret = 0;
goto free;
}
@@ -389,13 +372,8 @@ static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
debug_info->panicinfo_blob.data = msg->data;
debug_info->panicinfo_blob.size = ret;
- if (!debugfs_create_blob("panicinfo",
- S_IFREG | 0444,
- debug_info->dir,
- &debug_info->panicinfo_blob)) {
- ret = -ENOMEM;
- goto free;
- }
+ debugfs_create_blob("panicinfo", S_IFREG | 0444, debug_info->dir,
+ &debug_info->panicinfo_blob);
return 0;
@@ -404,15 +382,6 @@ free:
return ret;
}
-static int cros_ec_create_pdinfo(struct cros_ec_debugfs *debug_info)
-{
- if (!debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
- &cros_ec_pdinfo_fops))
- return -ENOMEM;
-
- return 0;
-}
-
static int cros_ec_debugfs_probe(struct platform_device *pd)
{
struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent);
@@ -427,8 +396,6 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
debug_info->ec = ec;
debug_info->dir = debugfs_create_dir(name, NULL);
- if (!debug_info->dir)
- return -ENOMEM;
ret = cros_ec_create_panicinfo(debug_info);
if (ret)
@@ -438,9 +405,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
if (ret)
goto remove_debugfs;
- ret = cros_ec_create_pdinfo(debug_info);
- if (ret)
- goto remove_log;
+ debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
+ &cros_ec_pdinfo_fops);
ec->debug_info = debug_info;
@@ -448,8 +414,6 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
return 0;
-remove_log:
- cros_ec_cleanup_console_log(debug_info);
remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 97a068dff192..3d2325197a68 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -10,6 +10,8 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
+#include "cros_ec_trace.h"
+
#define EC_COMMAND_RETRIES 50
static int prepare_packet(struct cros_ec_device *ec_dev,
@@ -51,11 +53,24 @@ static int send_command(struct cros_ec_device *ec_dev,
int ret;
int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
+ trace_cros_ec_cmd(msg);
+
if (ec_dev->proto_version > 2)
xfer_fxn = ec_dev->pkt_xfer;
else
xfer_fxn = ec_dev->cmd_xfer;
+ if (!xfer_fxn) {
+ /*
+ * This error can happen if a communication error happened and
+ * the EC is trying to use protocol v2, on an underlying
+ * communication mechanism that does not support v2.
+ */
+ dev_err_once(ec_dev->dev,
+ "missing EC transfer API, cannot send command\n");
+ return -EIO;
+ }
+
ret = (*xfer_fxn)(ec_dev, msg);
if (msg->result == EC_RES_IN_PROGRESS) {
int i;
@@ -414,6 +429,12 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
else
ec_dev->mkbp_event_supported = 1;
+ /* Probe if host sleep v1 is supported for S0ix failure detection. */
+ ret = cros_ec_get_host_command_version_mask(ec_dev,
+ EC_CMD_HOST_SLEEP_EVENT,
+ &ver_mask);
+ ec_dev->host_sleep_v1 = (ret >= 0 && (ver_mask & EC_VER_MASK(1)));
+
/*
* Get host event wake mask, assume all events are wake events
* if unavailable.
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
new file mode 100644
index 000000000000..5d3fb2abad1d
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2018 Google LLC.
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+
+#define EC_MSG_TIMEOUT_MS 200
+#define HOST_COMMAND_MARK 1
+#define HOST_EVENT_MARK 2
+
+/**
+ * struct cros_ec_rpmsg_response - rpmsg message format from from EC.
+ *
+ * @type: The type of message, should be either HOST_COMMAND_MARK or
+ * HOST_EVENT_MARK, representing that the message is a response to
+ * host command, or a host event.
+ * @data: ec_host_response for host command.
+ */
+struct cros_ec_rpmsg_response {
+ u8 type;
+ u8 data[] __aligned(4);
+};
+
+/**
+ * struct cros_ec_rpmsg - information about a EC over rpmsg.
+ *
+ * @rpdev: rpmsg device we are connected to
+ * @xfer_ack: completion for host command transfer.
+ * @host_event_work: Work struct for pending host event.
+ */
+struct cros_ec_rpmsg {
+ struct rpmsg_device *rpdev;
+ struct completion xfer_ack;
+ struct work_struct host_event_work;
+};
+
+/**
+ * cros_ec_cmd_xfer_rpmsg - Transfer a message over rpmsg and receive the reply
+ *
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ *
+ * This is only used for old EC proto version, and is not supported for this
+ * driver.
+ *
+ * Return: -EINVAL
+ */
+static int cros_ec_cmd_xfer_rpmsg(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ return -EINVAL;
+}
+
+/**
+ * cros_ec_pkt_xfer_rpmsg - Transfer a packet over rpmsg and receive the reply
+ *
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ *
+ * Return: number of bytes of the reply on success or negative error code.
+ */
+static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+ struct rpmsg_device *rpdev = ec_rpmsg->rpdev;
+ struct ec_host_response *response;
+ unsigned long timeout;
+ int len;
+ int ret;
+ u8 sum;
+ int i;
+
+ ec_msg->result = 0;
+ len = cros_ec_prepare_tx(ec_dev, ec_msg);
+ dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
+
+ reinit_completion(&ec_rpmsg->xfer_ack);
+ ret = rpmsg_send(rpdev->ept, ec_dev->dout, len);
+ if (ret) {
+ dev_err(ec_dev->dev, "rpmsg send failed\n");
+ return ret;
+ }
+
+ timeout = msecs_to_jiffies(EC_MSG_TIMEOUT_MS);
+ ret = wait_for_completion_timeout(&ec_rpmsg->xfer_ack, timeout);
+ if (!ret) {
+ dev_err(ec_dev->dev, "rpmsg send timeout\n");
+ return -EIO;
+ }
+
+ /* check response error code */
+ response = (struct ec_host_response *)ec_dev->din;
+ ec_msg->result = response->result;
+
+ ret = cros_ec_check_result(ec_dev, ec_msg);
+ if (ret)
+ goto exit;
+
+ if (response->data_len > ec_msg->insize) {
+ dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
+ response->data_len, ec_msg->insize);
+ ret = -EMSGSIZE;
+ goto exit;
+ }
+
+ /* copy response packet payload and compute checksum */
+ memcpy(ec_msg->data, ec_dev->din + sizeof(*response),
+ response->data_len);
+
+ sum = 0;
+ for (i = 0; i < sizeof(*response) + response->data_len; i++)
+ sum += ec_dev->din[i];
+
+ if (sum) {
+ dev_err(ec_dev->dev, "bad packet checksum, calculated %x\n",
+ sum);
+ ret = -EBADMSG;
+ goto exit;
+ }
+
+ ret = response->data_len;
+exit:
+ if (ec_msg->command == EC_CMD_REBOOT_EC)
+ msleep(EC_REBOOT_DELAY_MS);
+
+ return ret;
+}
+
+static void
+cros_ec_rpmsg_host_event_function(struct work_struct *host_event_work)
+{
+ struct cros_ec_rpmsg *ec_rpmsg = container_of(host_event_work,
+ struct cros_ec_rpmsg,
+ host_event_work);
+ struct cros_ec_device *ec_dev = dev_get_drvdata(&ec_rpmsg->rpdev->dev);
+ bool wake_event = true;
+ int ret;
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event);
+
+ /*
+ * Signal only if wake host events or any interrupt if
+ * cros_ec_get_next_event() returned an error (default value for
+ * wake_event is true)
+ */
+ if (wake_event && device_may_wakeup(ec_dev->dev))
+ pm_wakeup_event(ec_dev->dev, 0);
+
+ if (ret > 0)
+ blocking_notifier_call_chain(&ec_dev->event_notifier,
+ 0, ec_dev);
+}
+
+static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+ struct cros_ec_rpmsg_response *resp;
+
+ if (!len) {
+ dev_warn(ec_dev->dev, "rpmsg received empty response");
+ return -EINVAL;
+ }
+
+ resp = data;
+ len -= offsetof(struct cros_ec_rpmsg_response, data);
+ if (resp->type == HOST_COMMAND_MARK) {
+ if (len > ec_dev->din_size) {
+ dev_warn(ec_dev->dev,
+ "received length %d > din_size %d, truncating",
+ len, ec_dev->din_size);
+ len = ec_dev->din_size;
+ }
+
+ memcpy(ec_dev->din, resp->data, len);
+ complete(&ec_rpmsg->xfer_ack);
+ } else if (resp->type == HOST_EVENT_MARK) {
+ schedule_work(&ec_rpmsg->host_event_work);
+ } else {
+ dev_warn(ec_dev->dev, "rpmsg received invalid type = %d",
+ resp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ struct cros_ec_rpmsg *ec_rpmsg;
+ struct cros_ec_device *ec_dev;
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ ec_rpmsg = devm_kzalloc(dev, sizeof(*ec_rpmsg), GFP_KERNEL);
+ if (!ec_rpmsg)
+ return -ENOMEM;
+
+ ec_dev->dev = dev;
+ ec_dev->priv = ec_rpmsg;
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_rpmsg;
+ ec_dev->pkt_xfer = cros_ec_pkt_xfer_rpmsg;
+ ec_dev->phys_name = dev_name(&rpdev->dev);
+ ec_dev->din_size = sizeof(struct ec_host_response) +
+ sizeof(struct ec_response_get_protocol_info);
+ ec_dev->dout_size = sizeof(struct ec_host_request);
+ dev_set_drvdata(dev, ec_dev);
+
+ ec_rpmsg->rpdev = rpdev;
+ init_completion(&ec_rpmsg->xfer_ack);
+ INIT_WORK(&ec_rpmsg->host_event_work,
+ cros_ec_rpmsg_host_event_function);
+
+ return cros_ec_register(ec_dev);
+}
+
+static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+
+ cancel_work_sync(&ec_rpmsg->host_event_work);
+}
+
+static const struct of_device_id cros_ec_rpmsg_of_match[] = {
+ { .compatible = "google,cros-ec-rpmsg", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cros_ec_rpmsg_of_match);
+
+static struct rpmsg_driver cros_ec_driver_rpmsg = {
+ .drv = {
+ .name = "cros-ec-rpmsg",
+ .of_match_table = cros_ec_rpmsg_of_match,
+ },
+ .probe = cros_ec_rpmsg_probe,
+ .remove = cros_ec_rpmsg_remove,
+ .callback = cros_ec_rpmsg_callback,
+};
+
+module_rpmsg_driver(cros_ec_driver_rpmsg);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS EC multi function device (rpmsg)");
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index ffc38f9d4829..8e9451720e73 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -75,6 +75,27 @@ struct cros_ec_spi {
unsigned int end_of_msg_delay;
};
+typedef int (*cros_ec_xfer_fn_t) (struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg);
+
+/**
+ * struct cros_ec_xfer_work_params - params for our high priority workers
+ *
+ * @work: The work_struct needed to queue work
+ * @fn: The function to use to transfer
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ * @ret: The return value of the function
+ */
+
+struct cros_ec_xfer_work_params {
+ struct work_struct work;
+ cros_ec_xfer_fn_t fn;
+ struct cros_ec_device *ec_dev;
+ struct cros_ec_command *ec_msg;
+ int ret;
+};
+
static void debug_packet(struct device *dev, const char *name, u8 *ptr,
int len)
{
@@ -350,13 +371,13 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
}
/**
- * cros_ec_pkt_xfer_spi - Transfer a packet over SPI and receive the reply
+ * do_cros_ec_pkt_xfer_spi - Transfer a packet over SPI and receive the reply
*
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
*/
-static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
- struct cros_ec_command *ec_msg)
+static int do_cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
{
struct ec_host_response *response;
struct cros_ec_spi *ec_spi = ec_dev->priv;
@@ -493,13 +514,13 @@ exit:
}
/**
- * cros_ec_cmd_xfer_spi - Transfer a message over SPI and receive the reply
+ * do_cros_ec_cmd_xfer_spi - Transfer a message over SPI and receive the reply
*
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
*/
-static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
- struct cros_ec_command *ec_msg)
+static int do_cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
{
struct cros_ec_spi *ec_spi = ec_dev->priv;
struct spi_transfer trans;
@@ -611,6 +632,53 @@ exit:
return ret;
}
+static void cros_ec_xfer_high_pri_work(struct work_struct *work)
+{
+ struct cros_ec_xfer_work_params *params;
+
+ params = container_of(work, struct cros_ec_xfer_work_params, work);
+ params->ret = params->fn(params->ec_dev, params->ec_msg);
+}
+
+static int cros_ec_xfer_high_pri(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg,
+ cros_ec_xfer_fn_t fn)
+{
+ struct cros_ec_xfer_work_params params;
+
+ INIT_WORK_ONSTACK(&params.work, cros_ec_xfer_high_pri_work);
+ params.ec_dev = ec_dev;
+ params.ec_msg = ec_msg;
+ params.fn = fn;
+
+ /*
+ * This looks a bit ridiculous. Why do the work on a
+ * different thread if we're just going to block waiting for
+ * the thread to finish? The key here is that the thread is
+ * running at high priority but the calling context might not
+ * be. We need to be at high priority to avoid getting
+ * context switched out for too long and the EC giving up on
+ * the transfer.
+ */
+ queue_work(system_highpri_wq, &params.work);
+ flush_work(&params.work);
+ destroy_work_on_stack(&params.work);
+
+ return params.ret;
+}
+
+static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ return cros_ec_xfer_high_pri(ec_dev, ec_msg, do_cros_ec_pkt_xfer_spi);
+}
+
+static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ return cros_ec_xfer_high_pri(ec_dev, ec_msg, do_cros_ec_cmd_xfer_spi);
+}
+
static void cros_ec_spi_dt_probe(struct cros_ec_spi *ec_spi, struct device *dev)
{
struct device_node *np = dev->of_node;
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
new file mode 100644
index 000000000000..0a76412095a9
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+// Trace events for the ChromeOS Embedded Controller
+//
+// Copyright 2019 Google LLC.
+
+#define TRACE_SYMBOL(a) {a, #a}
+
+// Generate the list using the following script:
+// sed -n 's/^#define \(EC_CMD_[[:alnum:]_]*\)\s.*/\tTRACE_SYMBOL(\1), \\/p' include/linux/mfd/cros_ec_commands.h
+#define EC_CMDS \
+ TRACE_SYMBOL(EC_CMD_PROTO_VERSION), \
+ TRACE_SYMBOL(EC_CMD_HELLO), \
+ TRACE_SYMBOL(EC_CMD_GET_VERSION), \
+ TRACE_SYMBOL(EC_CMD_READ_TEST), \
+ TRACE_SYMBOL(EC_CMD_GET_BUILD_INFO), \
+ TRACE_SYMBOL(EC_CMD_GET_CHIP_INFO), \
+ TRACE_SYMBOL(EC_CMD_GET_BOARD_VERSION), \
+ TRACE_SYMBOL(EC_CMD_READ_MEMMAP), \
+ TRACE_SYMBOL(EC_CMD_GET_CMD_VERSIONS), \
+ TRACE_SYMBOL(EC_CMD_GET_COMMS_STATUS), \
+ TRACE_SYMBOL(EC_CMD_TEST_PROTOCOL), \
+ TRACE_SYMBOL(EC_CMD_GET_PROTOCOL_INFO), \
+ TRACE_SYMBOL(EC_CMD_GSV_PAUSE_IN_S5), \
+ TRACE_SYMBOL(EC_CMD_GET_FEATURES), \
+ TRACE_SYMBOL(EC_CMD_FLASH_INFO), \
+ TRACE_SYMBOL(EC_CMD_FLASH_READ), \
+ TRACE_SYMBOL(EC_CMD_FLASH_WRITE), \
+ TRACE_SYMBOL(EC_CMD_FLASH_ERASE), \
+ TRACE_SYMBOL(EC_CMD_FLASH_PROTECT), \
+ TRACE_SYMBOL(EC_CMD_FLASH_REGION_INFO), \
+ TRACE_SYMBOL(EC_CMD_VBNV_CONTEXT), \
+ TRACE_SYMBOL(EC_CMD_PWM_GET_FAN_TARGET_RPM), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_TARGET_RPM), \
+ TRACE_SYMBOL(EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_DUTY), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_DUTY), \
+ TRACE_SYMBOL(EC_CMD_PWM_GET_DUTY), \
+ TRACE_SYMBOL(EC_CMD_LIGHTBAR_CMD), \
+ TRACE_SYMBOL(EC_CMD_LED_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_VBOOT_HASH), \
+ TRACE_SYMBOL(EC_CMD_MOTION_SENSE_CMD), \
+ TRACE_SYMBOL(EC_CMD_USB_CHARGE_SET_MODE), \
+ TRACE_SYMBOL(EC_CMD_PSTORE_INFO), \
+ TRACE_SYMBOL(EC_CMD_PSTORE_READ), \
+ TRACE_SYMBOL(EC_CMD_PSTORE_WRITE), \
+ TRACE_SYMBOL(EC_CMD_RTC_GET_VALUE), \
+ TRACE_SYMBOL(EC_CMD_RTC_GET_ALARM), \
+ TRACE_SYMBOL(EC_CMD_RTC_SET_VALUE), \
+ TRACE_SYMBOL(EC_CMD_RTC_SET_ALARM), \
+ TRACE_SYMBOL(EC_CMD_PORT80_LAST_BOOT), \
+ TRACE_SYMBOL(EC_CMD_PORT80_READ), \
+ TRACE_SYMBOL(EC_CMD_THERMAL_SET_THRESHOLD), \
+ TRACE_SYMBOL(EC_CMD_THERMAL_GET_THRESHOLD), \
+ TRACE_SYMBOL(EC_CMD_THERMAL_AUTO_FAN_CTRL), \
+ TRACE_SYMBOL(EC_CMD_TMP006_GET_CALIBRATION), \
+ TRACE_SYMBOL(EC_CMD_TMP006_SET_CALIBRATION), \
+ TRACE_SYMBOL(EC_CMD_TMP006_GET_RAW), \
+ TRACE_SYMBOL(EC_CMD_MKBP_STATE), \
+ TRACE_SYMBOL(EC_CMD_MKBP_INFO), \
+ TRACE_SYMBOL(EC_CMD_MKBP_SIMULATE_KEY), \
+ TRACE_SYMBOL(EC_CMD_MKBP_SET_CONFIG), \
+ TRACE_SYMBOL(EC_CMD_MKBP_GET_CONFIG), \
+ TRACE_SYMBOL(EC_CMD_KEYSCAN_SEQ_CTRL), \
+ TRACE_SYMBOL(EC_CMD_GET_NEXT_EVENT), \
+ TRACE_SYMBOL(EC_CMD_TEMP_SENSOR_GET_INFO), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_B), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SMI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SCI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_WAKE_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_SMI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_SCI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_WAKE_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR_B), \
+ TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_BKLIGHT), \
+ TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_WIRELESS), \
+ TRACE_SYMBOL(EC_CMD_GPIO_SET), \
+ TRACE_SYMBOL(EC_CMD_GPIO_GET), \
+ TRACE_SYMBOL(EC_CMD_I2C_READ), \
+ TRACE_SYMBOL(EC_CMD_I2C_WRITE), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_CONSOLE_SNAPSHOT), \
+ TRACE_SYMBOL(EC_CMD_CONSOLE_READ), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_CUT_OFF), \
+ TRACE_SYMBOL(EC_CMD_USB_MUX), \
+ TRACE_SYMBOL(EC_CMD_LDO_SET), \
+ TRACE_SYMBOL(EC_CMD_LDO_GET), \
+ TRACE_SYMBOL(EC_CMD_POWER_INFO), \
+ TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU), \
+ TRACE_SYMBOL(EC_CMD_HANG_DETECT), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_STATE), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_CURRENT_LIMIT), \
+ TRACE_SYMBOL(EC_CMD_EXTERNAL_POWER_LIMIT), \
+ TRACE_SYMBOL(EC_CMD_HOST_SLEEP_EVENT), \
+ TRACE_SYMBOL(EC_CMD_SB_READ_WORD), \
+ TRACE_SYMBOL(EC_CMD_SB_WRITE_WORD), \
+ TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
+ TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
+ TRACE_SYMBOL(EC_CMD_CODEC_I2S), \
+ TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
+ TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
+ TRACE_SYMBOL(EC_CMD_ACPI_READ), \
+ TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
+ TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
+ TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
+ TRACE_SYMBOL(EC_CMD_CEC_SET), \
+ TRACE_SYMBOL(EC_CMD_CEC_GET), \
+ TRACE_SYMBOL(EC_CMD_REBOOT), \
+ TRACE_SYMBOL(EC_CMD_RESEND_RESPONSE), \
+ TRACE_SYMBOL(EC_CMD_VERSION0), \
+ TRACE_SYMBOL(EC_CMD_PD_EXCHANGE_STATUS), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_PORTS), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_POWER_INFO), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_PORT_COUNT), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_DISCOVERY), \
+ TRACE_SYMBOL(EC_CMD_PD_CHARGE_PORT_OVERRIDE), \
+ TRACE_SYMBOL(EC_CMD_PD_GET_LOG_ENTRY), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO)
+
+#define CREATE_TRACE_POINTS
+#include "cros_ec_trace.h"
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
new file mode 100644
index 000000000000..7ae3b89c78b9
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Embedded Controller
+ *
+ * Copyright 2019 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/mfd/cros_ec.h>
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(cros_ec_cmd_class,
+ TP_PROTO(struct cros_ec_command *cmd),
+ TP_ARGS(cmd),
+ TP_STRUCT__entry(
+ __field(uint32_t, version)
+ __field(uint32_t, command)
+ ),
+ TP_fast_assign(
+ __entry->version = cmd->version;
+ __entry->command = cmd->command;
+ ),
+ TP_printk("version: %u, command: %s", __entry->version,
+ __print_symbolic(__entry->command, EC_CMDS))
+);
+
+
+DEFINE_EVENT(cros_ec_cmd_class, cros_ec_cmd,
+ TP_PROTO(struct cros_ec_command *cmd),
+ TP_ARGS(cmd)
+);
+
+
+#endif /* _CROS_EC_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
new file mode 100644
index 000000000000..7c7b267626a0
--- /dev/null
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Logging driver for ChromeOS EC based USBPD Charger.
+ *
+ * Copyright 2018 Google LLC.
+ */
+
+#include <linux/ktime.h>
+#include <linux/math64.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define DRV_NAME "cros-usbpd-logger"
+
+#define CROS_USBPD_MAX_LOG_ENTRIES 30
+#define CROS_USBPD_LOG_UPDATE_DELAY msecs_to_jiffies(60000)
+#define CROS_USBPD_DATA_SIZE 16
+#define CROS_USBPD_LOG_RESP_SIZE (sizeof(struct ec_response_pd_log) + \
+ CROS_USBPD_DATA_SIZE)
+#define CROS_USBPD_BUFFER_SIZE (sizeof(struct cros_ec_command) + \
+ CROS_USBPD_LOG_RESP_SIZE)
+/* Buffer for building the PDLOG string */
+#define BUF_SIZE 80
+
+struct logger_data {
+ struct device *dev;
+ struct cros_ec_dev *ec_dev;
+ u8 ec_buffer[CROS_USBPD_BUFFER_SIZE];
+ struct delayed_work log_work;
+ struct workqueue_struct *log_workqueue;
+};
+
+static const char * const chg_type_names[] = {
+ "None", "PD", "Type-C", "Proprietary", "DCP", "CDP", "SDP",
+ "Other", "VBUS"
+};
+
+static const char * const role_names[] = {
+ "Disconnected", "SRC", "SNK", "SNK (not charging)"
+};
+
+static const char * const fault_names[] = {
+ "---", "OCP", "fast OCP", "OVP", "Discharge"
+};
+
+static int append_str(char *buf, int pos, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf + pos, BUF_SIZE - pos, fmt, args);
+ va_end(args);
+
+ return i;
+}
+
+static struct ec_response_pd_log *ec_get_log_entry(struct logger_data *logger)
+{
+ struct cros_ec_dev *ec_dev = logger->ec_dev;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = (struct cros_ec_command *)logger->ec_buffer;
+
+ msg->command = ec_dev->cmd_offset + EC_CMD_PD_GET_LOG_ENTRY;
+ msg->insize = CROS_USBPD_LOG_RESP_SIZE;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev->ec_dev, msg);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return (struct ec_response_pd_log *)msg->data;
+}
+
+static void cros_usbpd_print_log_entry(struct ec_response_pd_log *r,
+ ktime_t tstamp)
+{
+ const char *fault, *role, *chg_type;
+ struct usb_chg_measures *meas;
+ struct mcdp_info *minfo;
+ int role_idx, type_idx;
+ char buf[BUF_SIZE + 1];
+ struct rtc_time rt;
+ int len = 0;
+ s32 rem;
+ int i;
+
+ /* The timestamp is the number of 1024th of seconds in the past */
+ tstamp = ktime_sub_us(tstamp, r->timestamp << PD_LOG_TIMESTAMP_SHIFT);
+ rt = rtc_ktime_to_tm(tstamp);
+
+ switch (r->type) {
+ case PD_EVENT_MCU_CHARGE:
+ if (r->data & CHARGE_FLAGS_OVERRIDE)
+ len += append_str(buf, len, "override ");
+
+ if (r->data & CHARGE_FLAGS_DELAYED_OVERRIDE)
+ len += append_str(buf, len, "pending_override ");
+
+ role_idx = r->data & CHARGE_FLAGS_ROLE_MASK;
+ role = role_idx < ARRAY_SIZE(role_names) ?
+ role_names[role_idx] : "Unknown";
+
+ type_idx = (r->data & CHARGE_FLAGS_TYPE_MASK)
+ >> CHARGE_FLAGS_TYPE_SHIFT;
+
+ chg_type = type_idx < ARRAY_SIZE(chg_type_names) ?
+ chg_type_names[type_idx] : "???";
+
+ if (role_idx == USB_PD_PORT_POWER_DISCONNECTED ||
+ role_idx == USB_PD_PORT_POWER_SOURCE) {
+ len += append_str(buf, len, "%s", role);
+ break;
+ }
+
+ meas = (struct usb_chg_measures *)r->payload;
+ len += append_str(buf, len, "%s %s %s %dmV max %dmV / %dmA",
+ role, r->data & CHARGE_FLAGS_DUAL_ROLE ?
+ "DRP" : "Charger",
+ chg_type, meas->voltage_now,
+ meas->voltage_max, meas->current_max);
+ break;
+ case PD_EVENT_ACC_RW_FAIL:
+ len += append_str(buf, len, "RW signature check failed");
+ break;
+ case PD_EVENT_PS_FAULT:
+ fault = r->data < ARRAY_SIZE(fault_names) ? fault_names[r->data]
+ : "???";
+ len += append_str(buf, len, "Power supply fault: %s", fault);
+ break;
+ case PD_EVENT_VIDEO_DP_MODE:
+ len += append_str(buf, len, "DP mode %sabled", r->data == 1 ?
+ "en" : "dis");
+ break;
+ case PD_EVENT_VIDEO_CODEC:
+ minfo = (struct mcdp_info *)r->payload;
+ len += append_str(buf, len, "HDMI info: family:%04x chipid:%04x ",
+ MCDP_FAMILY(minfo->family),
+ MCDP_CHIPID(minfo->chipid));
+ len += append_str(buf, len, "irom:%d.%d.%d fw:%d.%d.%d",
+ minfo->irom.major, minfo->irom.minor,
+ minfo->irom.build, minfo->fw.major,
+ minfo->fw.minor, minfo->fw.build);
+ break;
+ default:
+ len += append_str(buf, len, "Event %02x (%04x) [", r->type,
+ r->data);
+
+ for (i = 0; i < PD_LOG_SIZE(r->size_port); i++)
+ len += append_str(buf, len, "%02x ", r->payload[i]);
+
+ len += append_str(buf, len, "]");
+ break;
+ }
+
+ div_s64_rem(ktime_to_ms(tstamp), MSEC_PER_SEC, &rem);
+ pr_info("PDLOG %d/%02d/%02d %02d:%02d:%02d.%03d P%d %s\n",
+ rt.tm_year + 1900, rt.tm_mon + 1, rt.tm_mday,
+ rt.tm_hour, rt.tm_min, rt.tm_sec, rem,
+ PD_LOG_PORT(r->size_port), buf);
+}
+
+static void cros_usbpd_log_check(struct work_struct *work)
+{
+ struct logger_data *logger = container_of(to_delayed_work(work),
+ struct logger_data,
+ log_work);
+ struct device *dev = logger->dev;
+ struct ec_response_pd_log *r;
+ int entries = 0;
+ ktime_t now;
+
+ while (entries++ < CROS_USBPD_MAX_LOG_ENTRIES) {
+ r = ec_get_log_entry(logger);
+ now = ktime_get_real();
+ if (IS_ERR(r)) {
+ dev_dbg(dev, "Cannot get PD log %ld\n", PTR_ERR(r));
+ break;
+ }
+ if (r->type == PD_EVENT_NO_ENTRY)
+ break;
+
+ cros_usbpd_print_log_entry(r, now);
+ }
+
+ queue_delayed_work(logger->log_workqueue, &logger->log_work,
+ CROS_USBPD_LOG_UPDATE_DELAY);
+}
+
+static int cros_usbpd_logger_probe(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+ struct device *dev = &pd->dev;
+ struct logger_data *logger;
+
+ logger = devm_kzalloc(dev, sizeof(*logger), GFP_KERNEL);
+ if (!logger)
+ return -ENOMEM;
+
+ logger->dev = dev;
+ logger->ec_dev = ec_dev;
+
+ platform_set_drvdata(pd, logger);
+
+ /* Retrieve PD event logs periodically */
+ INIT_DELAYED_WORK(&logger->log_work, cros_usbpd_log_check);
+ logger->log_workqueue = create_singlethread_workqueue("cros_usbpd_log");
+ queue_delayed_work(logger->log_workqueue, &logger->log_work,
+ CROS_USBPD_LOG_UPDATE_DELAY);
+
+ return 0;
+}
+
+static int cros_usbpd_logger_remove(struct platform_device *pd)
+{
+ struct logger_data *logger = platform_get_drvdata(pd);
+
+ cancel_delayed_work_sync(&logger->log_work);
+
+ return 0;
+}
+
+static int __maybe_unused cros_usbpd_logger_resume(struct device *dev)
+{
+ struct logger_data *logger = dev_get_drvdata(dev);
+
+ queue_delayed_work(logger->log_workqueue, &logger->log_work,
+ CROS_USBPD_LOG_UPDATE_DELAY);
+
+ return 0;
+}
+
+static int __maybe_unused cros_usbpd_logger_suspend(struct device *dev)
+{
+ struct logger_data *logger = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&logger->log_work);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cros_usbpd_logger_pm_ops, cros_usbpd_logger_suspend,
+ cros_usbpd_logger_resume);
+
+static struct platform_driver cros_usbpd_logger_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_usbpd_logger_pm_ops,
+ },
+ .probe = cros_usbpd_logger_probe,
+ .remove = cros_usbpd_logger_remove,
+};
+
+module_platform_driver(cros_usbpd_logger_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Logging driver for ChromeOS EC USBPD Charger.");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index c090db2cd5be..f163476d080d 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -4,31 +4,7 @@
*
* Copyright 2019 Google LLC
*
- * There is only one attribute used for debugging, called raw.
- * You can write a hexadecimal sentence to raw, and that series of bytes
- * will be sent to the EC. Then, you can read the bytes of response
- * by reading from raw.
- *
- * For writing:
- * Bytes 0-1 indicate the message type:
- * 00 F0 = Execute Legacy Command
- * 00 F2 = Read/Write NVRAM Property
- * Byte 2 provides the command code
- * Bytes 3+ consist of the data passed in the request
- *
- * When referencing the EC interface spec, byte 2 corresponds to MBOX[0],
- * byte 3 corresponds to MBOX[1], etc.
- *
- * At least three bytes are required, for the msg type and command,
- * with additional bytes optional for additional data.
- *
- * Example:
- * // Request EC info type 3 (EC firmware build date)
- * $ echo 00 f0 38 00 03 00 > raw
- * // View the result. The decoded ASCII result "12/21/18" is
- * // included after the raw hex.
- * $ cat raw
- * 00 31 32 2f 32 31 2f 31 38 00 38 00 01 00 2f 00 .12/21/18.8...
+ * See Documentation/ABI/testing/debugfs-wilco-ec for usage.
*/
#include <linux/ctype.h>
@@ -136,18 +112,15 @@ static ssize_t raw_write(struct file *file, const char __user *user_buf,
ret = parse_hex_sentence(buf, kcount, request_data, TYPE_AND_DATA_SIZE);
if (ret < 0)
return ret;
- /* Need at least two bytes for message type and one for command */
+ /* Need at least two bytes for message type and one byte of data */
if (ret < 3)
return -EINVAL;
- /* Clear response data buffer */
- memset(debug_info->raw_data, '\0', EC_MAILBOX_DATA_SIZE_EXTENDED);
-
msg.type = request_data[0] << 8 | request_data[1];
- msg.flags = WILCO_EC_FLAG_RAW;
- msg.command = request_data[2];
- msg.request_data = ret > 3 ? request_data + 3 : 0;
- msg.request_size = ret - 3;
+ msg.flags = 0;
+ msg.request_data = request_data + 2;
+ msg.request_size = ret - 2;
+ memset(debug_info->raw_data, 0, sizeof(debug_info->raw_data));
msg.response_data = debug_info->raw_data;
msg.response_size = EC_MAILBOX_DATA_SIZE;
@@ -174,7 +147,8 @@ static ssize_t raw_read(struct file *file, char __user *user_buf, size_t count,
fmt_len = hex_dump_to_buffer(debug_info->raw_data,
debug_info->response_size,
16, 1, debug_info->formatted_data,
- FORMATTED_BUFFER_SIZE, true);
+ sizeof(debug_info->formatted_data),
+ true);
/* Only return response the first time it is read */
debug_info->response_size = 0;
}
@@ -190,6 +164,51 @@ static const struct file_operations fops_raw = {
.llseek = no_llseek,
};
+#define CMD_KB_CHROME 0x88
+#define SUB_CMD_H1_GPIO 0x0A
+
+struct h1_gpio_status_request {
+ u8 cmd; /* Always CMD_KB_CHROME */
+ u8 reserved;
+ u8 sub_cmd; /* Always SUB_CMD_H1_GPIO */
+} __packed;
+
+struct hi_gpio_status_response {
+ u8 status; /* 0 if allowed */
+ u8 val; /* BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL */
+} __packed;
+
+static int h1_gpio_get(void *arg, u64 *val)
+{
+ struct wilco_ec_device *ec = arg;
+ struct h1_gpio_status_request rq;
+ struct hi_gpio_status_response rs;
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_KB_CHROME;
+ rq.sub_cmd = SUB_CMD_H1_GPIO;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = &rq;
+ msg.request_size = sizeof(rq);
+ msg.response_data = &rs;
+ msg.response_size = sizeof(rs);
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (rs.status)
+ return -EIO;
+
+ *val = rs.val;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_h1_gpio, h1_gpio_get, NULL, "0x%02llx\n");
+
/**
* wilco_ec_debugfs_probe() - Create the debugfs node
* @pdev: The platform device, probably created in core.c
@@ -211,6 +230,8 @@ static int wilco_ec_debugfs_probe(struct platform_device *pdev)
if (!debug_info->dir)
return 0;
debugfs_create_file("raw", 0644, debug_info->dir, NULL, &fops_raw);
+ debugfs_create_file("h1_gpio", 0444, debug_info->dir, ec,
+ &fops_h1_gpio);
return 0;
}
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
index 14355668ddfa..7fb58b487963 100644
--- a/drivers/platform/chrome/wilco_ec/mailbox.c
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -92,21 +92,10 @@ static void wilco_ec_prepare(struct wilco_ec_message *msg,
struct wilco_ec_request *rq)
{
memset(rq, 0, sizeof(*rq));
-
- /* Handle messages without trimming bytes from the request */
- if (msg->request_size && msg->flags & WILCO_EC_FLAG_RAW_REQUEST) {
- rq->reserved_raw = *(u8 *)msg->request_data;
- msg->request_size--;
- memmove(msg->request_data, msg->request_data + 1,
- msg->request_size);
- }
-
- /* Fill in request packet */
rq->struct_version = EC_MAILBOX_PROTO_VERSION;
rq->mailbox_id = msg->type;
rq->mailbox_version = EC_MAILBOX_VERSION;
- rq->data_size = msg->request_size + EC_MAILBOX_DATA_EXTRA;
- rq->command = msg->command;
+ rq->data_size = msg->request_size;
/* Checksum header and data */
rq->checksum = wilco_ec_checksum(rq, sizeof(*rq));
@@ -159,6 +148,12 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
return -EIO;
}
+ /*
+ * The EC always returns either EC_MAILBOX_DATA_SIZE or
+ * EC_MAILBOX_DATA_SIZE_EXTENDED bytes of data, so we need to
+ * calculate the checksum on **all** of this data, even if we
+ * won't use all of it.
+ */
if (msg->flags & WILCO_EC_FLAG_EXTENDED_DATA)
size = EC_MAILBOX_DATA_SIZE_EXTENDED;
else
@@ -173,33 +168,26 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
return -EBADMSG;
}
- /* Check that the EC reported success */
- msg->result = rs->result;
- if (msg->result) {
- dev_dbg(ec->dev, "bad response: 0x%02x\n", msg->result);
+ if (rs->result) {
+ dev_dbg(ec->dev, "EC reported failure: 0x%02x\n", rs->result);
return -EBADMSG;
}
- /* Check the returned data size, skipping the header */
if (rs->data_size != size) {
dev_dbg(ec->dev, "unexpected packet size (%u != %zu)",
rs->data_size, size);
return -EMSGSIZE;
}
- /* Skip 1 response data byte unless specified */
- size = (msg->flags & WILCO_EC_FLAG_RAW_RESPONSE) ? 0 : 1;
- if ((ssize_t) rs->data_size - size < msg->response_size) {
- dev_dbg(ec->dev, "response data too short (%zd < %zu)",
- (ssize_t) rs->data_size - size, msg->response_size);
+ if (rs->data_size < msg->response_size) {
+ dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)",
+ rs->data_size, msg->response_size);
return -EMSGSIZE;
}
- /* Ignore response data bytes as requested */
- memcpy(msg->response_data, rs->data + size, msg->response_size);
+ memcpy(msg->response_data, rs->data, msg->response_size);
- /* Return actual amount of data received */
- return msg->response_size;
+ return rs->data_size;
}
/**
@@ -207,10 +195,12 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
* @ec: EC device.
* @msg: EC message data for request and response.
*
- * On entry msg->type, msg->flags, msg->command, msg->request_size,
- * msg->response_size, and msg->request_data should all be filled in.
+ * On entry msg->type, msg->request_size, and msg->request_data should all be
+ * filled in. If desired, msg->flags can be set.
*
- * On exit msg->result and msg->response_data will be filled.
+ * If a response is expected, msg->response_size should be set, and
+ * msg->response_data should point to a buffer with enough space. On exit
+ * msg->response_data will be filled.
*
* Return: number of bytes received or negative error code on failure.
*/
@@ -219,9 +209,8 @@ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
struct wilco_ec_request *rq;
int ret;
- dev_dbg(ec->dev, "cmd=%02x type=%04x flags=%02x rslen=%zu rqlen=%zu\n",
- msg->command, msg->type, msg->flags, msg->response_size,
- msg->request_size);
+ dev_dbg(ec->dev, "type=%04x flags=%02x rslen=%zu rqlen=%zu\n",
+ msg->type, msg->flags, msg->response_size, msg->request_size);
mutex_lock(&ec->mailbox_lock);
/* Prepare request packet */
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 321bc673c417..cef0133aa47a 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -274,7 +274,8 @@ static int pin_user_pages(unsigned long first_page,
*iter_last_page_size = last_page_size;
}
- ret = get_user_pages_fast(first_page, requested_pages, !is_write,
+ ret = get_user_pages_fast(first_page, requested_pages,
+ !is_write ? FOLL_WRITE : 0,
pages);
if (ret <= 0)
return -EFAULT;
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index cd8a90846063..530fe7e31397 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -5,7 +5,7 @@
menuconfig MELLANOX_PLATFORM
bool "Platform support for Mellanox hardware"
- depends on X86 || ARM || COMPILE_TEST
+ depends on X86 || ARM || ARM64 || COMPILE_TEST
---help---
Say Y here to get to see options for platform support for
Mellanox systems. This option alone does not add any kernel code.
@@ -34,4 +34,14 @@ config MLXREG_IO
to system resets operation, system reset causes monitoring and some
kinds of mux selection.
+config MLXBF_TMFIFO
+ tristate "Mellanox BlueField SoC TmFifo platform driver"
+ depends on ARM64
+ depends on ACPI
+ depends on VIRTIO_CONSOLE && VIRTIO_NET
+ help
+ Say y here to enable TmFifo support. The TmFifo driver provides
+ platform driver support for the TmFifo which supports console
+ and networking based on the virtio framework.
+
endif # MELLANOX_PLATFORM
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
index 57074d9c722c..a229bda18fd9 100644
--- a/drivers/platform/mellanox/Makefile
+++ b/drivers/platform/mellanox/Makefile
@@ -3,5 +3,6 @@
# Makefile for linux/drivers/platform/mellanox
# Mellanox Platform-Specific Drivers
#
+obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o
obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo-regs.h b/drivers/platform/mellanox/mlxbf-tmfifo-regs.h
new file mode 100644
index 000000000000..e4f0d2eda714
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-tmfifo-regs.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __MLXBF_TMFIFO_REGS_H__
+#define __MLXBF_TMFIFO_REGS_H__
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+#define MLXBF_TMFIFO_TX_DATA 0x00
+#define MLXBF_TMFIFO_TX_STS 0x08
+#define MLXBF_TMFIFO_TX_STS__LENGTH 0x0001
+#define MLXBF_TMFIFO_TX_STS__COUNT_SHIFT 0
+#define MLXBF_TMFIFO_TX_STS__COUNT_WIDTH 9
+#define MLXBF_TMFIFO_TX_STS__COUNT_RESET_VAL 0
+#define MLXBF_TMFIFO_TX_STS__COUNT_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_TX_STS__COUNT_MASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_TX_CTL 0x10
+#define MLXBF_TMFIFO_TX_CTL__LENGTH 0x0001
+#define MLXBF_TMFIFO_TX_CTL__LWM_SHIFT 0
+#define MLXBF_TMFIFO_TX_CTL__LWM_WIDTH 8
+#define MLXBF_TMFIFO_TX_CTL__LWM_RESET_VAL 128
+#define MLXBF_TMFIFO_TX_CTL__LWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_TX_CTL__LWM_MASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_TX_CTL__HWM_SHIFT 8
+#define MLXBF_TMFIFO_TX_CTL__HWM_WIDTH 8
+#define MLXBF_TMFIFO_TX_CTL__HWM_RESET_VAL 128
+#define MLXBF_TMFIFO_TX_CTL__HWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_TX_CTL__HWM_MASK GENMASK_ULL(15, 8)
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_SHIFT 32
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_WIDTH 9
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RESET_VAL 256
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32)
+#define MLXBF_TMFIFO_RX_DATA 0x00
+#define MLXBF_TMFIFO_RX_STS 0x08
+#define MLXBF_TMFIFO_RX_STS__LENGTH 0x0001
+#define MLXBF_TMFIFO_RX_STS__COUNT_SHIFT 0
+#define MLXBF_TMFIFO_RX_STS__COUNT_WIDTH 9
+#define MLXBF_TMFIFO_RX_STS__COUNT_RESET_VAL 0
+#define MLXBF_TMFIFO_RX_STS__COUNT_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_RX_STS__COUNT_MASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_RX_CTL 0x10
+#define MLXBF_TMFIFO_RX_CTL__LENGTH 0x0001
+#define MLXBF_TMFIFO_RX_CTL__LWM_SHIFT 0
+#define MLXBF_TMFIFO_RX_CTL__LWM_WIDTH 8
+#define MLXBF_TMFIFO_RX_CTL__LWM_RESET_VAL 128
+#define MLXBF_TMFIFO_RX_CTL__LWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_RX_CTL__LWM_MASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_RX_CTL__HWM_SHIFT 8
+#define MLXBF_TMFIFO_RX_CTL__HWM_WIDTH 8
+#define MLXBF_TMFIFO_RX_CTL__HWM_RESET_VAL 128
+#define MLXBF_TMFIFO_RX_CTL__HWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_RX_CTL__HWM_MASK GENMASK_ULL(15, 8)
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_SHIFT 32
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_WIDTH 9
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RESET_VAL 256
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32)
+
+#endif /* !defined(__MLXBF_TMFIFO_REGS_H__) */
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
new file mode 100644
index 000000000000..9a5c9fd2dbc6
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -0,0 +1,1281 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox BlueField SoC TmFifo driver
+ *
+ * Copyright (C) 2019 Mellanox Technologies
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/circ_buf.h>
+#include <linux/efi.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/virtio_config.h>
+#include <linux/virtio_console.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_ring.h>
+
+#include "mlxbf-tmfifo-regs.h"
+
+/* Vring size. */
+#define MLXBF_TMFIFO_VRING_SIZE SZ_1K
+
+/* Console Tx buffer size. */
+#define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K
+
+/* Console Tx buffer reserved space. */
+#define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8
+
+/* House-keeping timer interval. */
+#define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10)
+
+/* Virtual devices sharing the TM FIFO. */
+#define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1)
+
+/*
+ * Reserve 1/16 of TmFifo space, so console messages are not starved by
+ * the networking traffic.
+ */
+#define MLXBF_TMFIFO_RESERVE_RATIO 16
+
+/* Message with data needs at least two words (for header & data). */
+#define MLXBF_TMFIFO_DATA_MIN_WORDS 2
+
+struct mlxbf_tmfifo;
+
+/**
+ * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
+ * @va: virtual address of the ring
+ * @dma: dma address of the ring
+ * @vq: pointer to the virtio virtqueue
+ * @desc: current descriptor of the pending packet
+ * @desc_head: head descriptor of the pending packet
+ * @cur_len: processed length of the current descriptor
+ * @rem_len: remaining length of the pending packet
+ * @pkt_len: total length of the pending packet
+ * @next_avail: next avail descriptor id
+ * @num: vring size (number of descriptors)
+ * @align: vring alignment size
+ * @index: vring index
+ * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
+ * @fifo: pointer to the tmfifo structure
+ */
+struct mlxbf_tmfifo_vring {
+ void *va;
+ dma_addr_t dma;
+ struct virtqueue *vq;
+ struct vring_desc *desc;
+ struct vring_desc *desc_head;
+ int cur_len;
+ int rem_len;
+ u32 pkt_len;
+ u16 next_avail;
+ int num;
+ int align;
+ int index;
+ int vdev_id;
+ struct mlxbf_tmfifo *fifo;
+};
+
+/* Interrupt types. */
+enum {
+ MLXBF_TM_RX_LWM_IRQ,
+ MLXBF_TM_RX_HWM_IRQ,
+ MLXBF_TM_TX_LWM_IRQ,
+ MLXBF_TM_TX_HWM_IRQ,
+ MLXBF_TM_MAX_IRQ
+};
+
+/* Ring types (Rx & Tx). */
+enum {
+ MLXBF_TMFIFO_VRING_RX,
+ MLXBF_TMFIFO_VRING_TX,
+ MLXBF_TMFIFO_VRING_MAX
+};
+
+/**
+ * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
+ * @vdev: virtio device, in which the vdev.id.device field has the
+ * VIRTIO_ID_xxx id to distinguish the virtual device.
+ * @status: status of the device
+ * @features: supported features of the device
+ * @vrings: array of tmfifo vrings of this device
+ * @config.cons: virtual console config -
+ * select if vdev.id.device is VIRTIO_ID_CONSOLE
+ * @config.net: virtual network config -
+ * select if vdev.id.device is VIRTIO_ID_NET
+ * @tx_buf: tx buffer used to buffer data before writing into the FIFO
+ */
+struct mlxbf_tmfifo_vdev {
+ struct virtio_device vdev;
+ u8 status;
+ u64 features;
+ struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX];
+ union {
+ struct virtio_console_config cons;
+ struct virtio_net_config net;
+ } config;
+ struct circ_buf tx_buf;
+};
+
+/**
+ * mlxbf_tmfifo_irq_info - Structure of the interrupt information
+ * @fifo: pointer to the tmfifo structure
+ * @irq: interrupt number
+ * @index: index into the interrupt array
+ */
+struct mlxbf_tmfifo_irq_info {
+ struct mlxbf_tmfifo *fifo;
+ int irq;
+ int index;
+};
+
+/**
+ * mlxbf_tmfifo - Structure of the TmFifo
+ * @vdev: array of the virtual devices running over the TmFifo
+ * @lock: lock to protect the TmFifo access
+ * @rx_base: mapped register base address for the Rx FIFO
+ * @tx_base: mapped register base address for the Tx FIFO
+ * @rx_fifo_size: number of entries of the Rx FIFO
+ * @tx_fifo_size: number of entries of the Tx FIFO
+ * @pend_events: pending bits for deferred events
+ * @irq_info: interrupt information
+ * @work: work struct for deferred process
+ * @timer: background timer
+ * @vring: Tx/Rx ring
+ * @spin_lock: spin lock
+ * @is_ready: ready flag
+ */
+struct mlxbf_tmfifo {
+ struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX];
+ struct mutex lock; /* TmFifo lock */
+ void __iomem *rx_base;
+ void __iomem *tx_base;
+ int rx_fifo_size;
+ int tx_fifo_size;
+ unsigned long pend_events;
+ struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ];
+ struct work_struct work;
+ struct timer_list timer;
+ struct mlxbf_tmfifo_vring *vring[2];
+ spinlock_t spin_lock; /* spin lock */
+ bool is_ready;
+};
+
+/**
+ * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
+ * @type: message type
+ * @len: payload length in network byte order. Messages sent into the FIFO
+ * will be read by the other side as data stream in the same byte order.
+ * The length needs to be encoded into network order so both sides
+ * could understand it.
+ */
+struct mlxbf_tmfifo_msg_hdr {
+ u8 type;
+ __be16 len;
+ u8 unused[5];
+} __packed __aligned(sizeof(u64));
+
+/*
+ * Default MAC.
+ * This MAC address will be read from EFI persistent variable if configured.
+ * It can also be reconfigured with standard Linux tools.
+ */
+static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
+ 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01
+};
+
+/* EFI variable name of the MAC address. */
+static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
+
+/* Maximum L2 header length. */
+#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
+
+/* Supported virtio-net features. */
+#define MLXBF_TMFIFO_NET_FEATURES \
+ (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \
+ BIT_ULL(VIRTIO_NET_F_MAC))
+
+#define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev)
+
+/* Free vrings of the FIFO device. */
+static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo,
+ struct mlxbf_tmfifo_vdev *tm_vdev)
+{
+ struct mlxbf_tmfifo_vring *vring;
+ int i, size;
+
+ for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
+ vring = &tm_vdev->vrings[i];
+ if (vring->va) {
+ size = vring_size(vring->num, vring->align);
+ dma_free_coherent(tm_vdev->vdev.dev.parent, size,
+ vring->va, vring->dma);
+ vring->va = NULL;
+ if (vring->vq) {
+ vring_del_virtqueue(vring->vq);
+ vring->vq = NULL;
+ }
+ }
+ }
+}
+
+/* Allocate vrings for the FIFO. */
+static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
+ struct mlxbf_tmfifo_vdev *tm_vdev)
+{
+ struct mlxbf_tmfifo_vring *vring;
+ struct device *dev;
+ dma_addr_t dma;
+ int i, size;
+ void *va;
+
+ for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
+ vring = &tm_vdev->vrings[i];
+ vring->fifo = fifo;
+ vring->num = MLXBF_TMFIFO_VRING_SIZE;
+ vring->align = SMP_CACHE_BYTES;
+ vring->index = i;
+ vring->vdev_id = tm_vdev->vdev.id.device;
+ dev = &tm_vdev->vdev.dev;
+
+ size = vring_size(vring->num, vring->align);
+ va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
+ if (!va) {
+ mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
+ dev_err(dev->parent, "dma_alloc_coherent failed\n");
+ return -ENOMEM;
+ }
+
+ vring->va = va;
+ vring->dma = dma;
+ }
+
+ return 0;
+}
+
+/* Disable interrupts of the FIFO device. */
+static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo)
+{
+ int i, irq;
+
+ for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
+ irq = fifo->irq_info[i].irq;
+ fifo->irq_info[i].irq = 0;
+ disable_irq(irq);
+ }
+}
+
+/* Interrupt handler. */
+static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg)
+{
+ struct mlxbf_tmfifo_irq_info *irq_info = arg;
+
+ if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events))
+ schedule_work(&irq_info->fifo->work);
+
+ return IRQ_HANDLED;
+}
+
+/* Get the next packet descriptor from the vring. */
+static struct vring_desc *
+mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = vring->vq->vdev;
+ unsigned int idx, head;
+
+ if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
+ return NULL;
+
+ idx = vring->next_avail % vr->num;
+ head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
+ if (WARN_ON(head >= vr->num))
+ return NULL;
+
+ vring->next_avail++;
+
+ return &vr->desc[head];
+}
+
+/* Release virtio descriptor. */
+static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc, u32 len)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = vring->vq->vdev;
+ u16 idx, vr_idx;
+
+ vr_idx = virtio16_to_cpu(vdev, vr->used->idx);
+ idx = vr_idx % vr->num;
+ vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc);
+ vr->used->ring[idx].len = cpu_to_virtio32(vdev, len);
+
+ /*
+ * Virtio could poll and check the 'idx' to decide whether the desc is
+ * done or not. Add a memory barrier here to make sure the update above
+ * completes before updating the idx.
+ */
+ mb();
+ vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
+}
+
+/* Get the total length of the descriptor chain. */
+static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = vring->vq->vdev;
+ u32 len = 0, idx;
+
+ while (desc) {
+ len += virtio32_to_cpu(vdev, desc->len);
+ if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
+ break;
+ idx = virtio16_to_cpu(vdev, desc->next);
+ desc = &vr->desc[idx];
+ }
+
+ return len;
+}
+
+static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
+{
+ struct vring_desc *desc_head;
+ u32 len = 0;
+
+ if (vring->desc_head) {
+ desc_head = vring->desc_head;
+ len = vring->pkt_len;
+ } else {
+ desc_head = mlxbf_tmfifo_get_next_desc(vring);
+ len = mlxbf_tmfifo_get_pkt_len(vring, desc_head);
+ }
+
+ if (desc_head)
+ mlxbf_tmfifo_release_desc(vring, desc_head, len);
+
+ vring->pkt_len = 0;
+ vring->desc = NULL;
+ vring->desc_head = NULL;
+}
+
+static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc, bool is_rx)
+{
+ struct virtio_device *vdev = vring->vq->vdev;
+ struct virtio_net_hdr *net_hdr;
+
+ net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
+ memset(net_hdr, 0, sizeof(*net_hdr));
+}
+
+/* Get and initialize the next packet. */
+static struct vring_desc *
+mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+{
+ struct vring_desc *desc;
+
+ desc = mlxbf_tmfifo_get_next_desc(vring);
+ if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET)
+ mlxbf_tmfifo_init_net_desc(vring, desc, is_rx);
+
+ vring->desc_head = desc;
+ vring->desc = desc;
+
+ return desc;
+}
+
+/* House-keeping timer. */
+static void mlxbf_tmfifo_timer(struct timer_list *t)
+{
+ struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer);
+ int rx, tx;
+
+ rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events);
+ tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
+
+ if (rx || tx)
+ schedule_work(&fifo->work);
+
+ mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
+}
+
+/* Copy one console packet into the output buffer. */
+static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons,
+ struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = &cons->vdev;
+ u32 len, idx, seg;
+ void *addr;
+
+ while (desc) {
+ addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
+ len = virtio32_to_cpu(vdev, desc->len);
+
+ seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (len <= seg) {
+ memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len);
+ } else {
+ memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg);
+ addr += seg;
+ memcpy(cons->tx_buf.buf, addr, len - seg);
+ }
+ cons->tx_buf.head = (cons->tx_buf.head + len) %
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE;
+
+ if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
+ break;
+ idx = virtio16_to_cpu(vdev, desc->next);
+ desc = &vr->desc[idx];
+ }
+}
+
+/* Copy console data into the output buffer. */
+static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons,
+ struct mlxbf_tmfifo_vring *vring)
+{
+ struct vring_desc *desc;
+ u32 len, avail;
+
+ desc = mlxbf_tmfifo_get_next_desc(vring);
+ while (desc) {
+ /* Release the packet if not enough space. */
+ len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) {
+ mlxbf_tmfifo_release_desc(vring, desc, len);
+ break;
+ }
+
+ mlxbf_tmfifo_console_output_one(cons, vring, desc);
+ mlxbf_tmfifo_release_desc(vring, desc, len);
+ desc = mlxbf_tmfifo_get_next_desc(vring);
+ }
+}
+
+/* Get the number of available words in Rx FIFO for receiving. */
+static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo)
+{
+ u64 sts;
+
+ sts = readq(fifo->rx_base + MLXBF_TMFIFO_RX_STS);
+ return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts);
+}
+
+/* Get the number of available words in the TmFifo for sending. */
+static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id)
+{
+ int tx_reserve;
+ u32 count;
+ u64 sts;
+
+ /* Reserve some room in FIFO for console messages. */
+ if (vdev_id == VIRTIO_ID_NET)
+ tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO;
+ else
+ tx_reserve = 1;
+
+ sts = readq(fifo->tx_base + MLXBF_TMFIFO_TX_STS);
+ count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts);
+ return fifo->tx_fifo_size - tx_reserve - count;
+}
+
+/* Console Tx (move data from the output buffer into the TmFifo). */
+static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
+{
+ struct mlxbf_tmfifo_msg_hdr hdr;
+ struct mlxbf_tmfifo_vdev *cons;
+ unsigned long flags;
+ int size, seg;
+ void *addr;
+ u64 data;
+
+ /* Return if not enough space available. */
+ if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS)
+ return;
+
+ cons = fifo->vdev[VIRTIO_ID_CONSOLE];
+ if (!cons || !cons->tx_buf.buf)
+ return;
+
+ /* Return if no data to send. */
+ size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (size == 0)
+ return;
+
+ /* Adjust the size to available space. */
+ if (size + sizeof(hdr) > avail * sizeof(u64))
+ size = avail * sizeof(u64) - sizeof(hdr);
+
+ /* Write header. */
+ hdr.type = VIRTIO_ID_CONSOLE;
+ hdr.len = htons(size);
+ writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+
+ /* Use spin-lock to protect the 'cons->tx_buf'. */
+ spin_lock_irqsave(&fifo->spin_lock, flags);
+
+ while (size > 0) {
+ addr = cons->tx_buf.buf + cons->tx_buf.tail;
+
+ seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (seg >= sizeof(u64)) {
+ memcpy(&data, addr, sizeof(u64));
+ } else {
+ memcpy(&data, addr, seg);
+ memcpy((u8 *)&data + seg, cons->tx_buf.buf,
+ sizeof(u64) - seg);
+ }
+ writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+
+ if (size >= sizeof(u64)) {
+ cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) %
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE;
+ size -= sizeof(u64);
+ } else {
+ cons->tx_buf.tail = (cons->tx_buf.tail + size) %
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE;
+ size = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&fifo->spin_lock, flags);
+}
+
+/* Rx/Tx one word in the descriptor buffer. */
+static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc,
+ bool is_rx, int len)
+{
+ struct virtio_device *vdev = vring->vq->vdev;
+ struct mlxbf_tmfifo *fifo = vring->fifo;
+ void *addr;
+ u64 data;
+
+ /* Get the buffer address of this desc. */
+ addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
+
+ /* Read a word from FIFO for Rx. */
+ if (is_rx)
+ data = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
+
+ if (vring->cur_len + sizeof(u64) <= len) {
+ /* The whole word. */
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data, sizeof(u64));
+ else
+ memcpy(&data, addr + vring->cur_len, sizeof(u64));
+ vring->cur_len += sizeof(u64);
+ } else {
+ /* Leftover bytes. */
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ len - vring->cur_len);
+ else
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
+ vring->cur_len = len;
+ }
+
+ /* Write the word into FIFO for Tx. */
+ if (!is_rx)
+ writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+}
+
+/*
+ * Rx/Tx packet header.
+ *
+ * In Rx case, the packet might be found to belong to a different vring since
+ * the TmFifo is shared by different services. In such case, the 'vring_change'
+ * flag is set.
+ */
+static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc,
+ bool is_rx, bool *vring_change)
+{
+ struct mlxbf_tmfifo *fifo = vring->fifo;
+ struct virtio_net_config *config;
+ struct mlxbf_tmfifo_msg_hdr hdr;
+ int vdev_id, hdr_len;
+
+ /* Read/Write packet header. */
+ if (is_rx) {
+ /* Drain one word from the FIFO. */
+ *(u64 *)&hdr = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
+
+ /* Skip the length 0 packets (keepalive). */
+ if (hdr.len == 0)
+ return;
+
+ /* Check packet type. */
+ if (hdr.type == VIRTIO_ID_NET) {
+ vdev_id = VIRTIO_ID_NET;
+ hdr_len = sizeof(struct virtio_net_hdr);
+ config = &fifo->vdev[vdev_id]->config.net;
+ if (ntohs(hdr.len) > config->mtu +
+ MLXBF_TMFIFO_NET_L2_OVERHEAD)
+ return;
+ } else {
+ vdev_id = VIRTIO_ID_CONSOLE;
+ hdr_len = 0;
+ }
+
+ /*
+ * Check whether the new packet still belongs to this vring.
+ * If not, update the pkt_len of the new vring.
+ */
+ if (vdev_id != vring->vdev_id) {
+ struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id];
+
+ if (!tm_dev2)
+ return;
+ vring->desc = desc;
+ vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
+ *vring_change = true;
+ }
+ vring->pkt_len = ntohs(hdr.len) + hdr_len;
+ } else {
+ /* Network virtio has an extra header. */
+ hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
+ sizeof(struct virtio_net_hdr) : 0;
+ vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
+ VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
+ hdr.len = htons(vring->pkt_len - hdr_len);
+ writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+ }
+
+ vring->cur_len = hdr_len;
+ vring->rem_len = vring->pkt_len;
+ fifo->vring[is_rx] = vring;
+}
+
+/*
+ * Rx/Tx one descriptor.
+ *
+ * Return true to indicate more data available.
+ */
+static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ bool is_rx, int *avail)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct mlxbf_tmfifo *fifo = vring->fifo;
+ struct virtio_device *vdev;
+ bool vring_change = false;
+ struct vring_desc *desc;
+ unsigned long flags;
+ u32 len, idx;
+
+ vdev = &fifo->vdev[vring->vdev_id]->vdev;
+
+ /* Get the descriptor of the next packet. */
+ if (!vring->desc) {
+ desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
+ if (!desc)
+ return false;
+ } else {
+ desc = vring->desc;
+ }
+
+ /* Beginning of a packet. Start to Rx/Tx packet header. */
+ if (vring->pkt_len == 0) {
+ mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
+ (*avail)--;
+
+ /* Return if new packet is for another ring. */
+ if (vring_change)
+ return false;
+ goto mlxbf_tmfifo_desc_done;
+ }
+
+ /* Get the length of this desc. */
+ len = virtio32_to_cpu(vdev, desc->len);
+ if (len > vring->rem_len)
+ len = vring->rem_len;
+
+ /* Rx/Tx one word (8 bytes) if not done. */
+ if (vring->cur_len < len) {
+ mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len);
+ (*avail)--;
+ }
+
+ /* Check again whether it's done. */
+ if (vring->cur_len == len) {
+ vring->cur_len = 0;
+ vring->rem_len -= len;
+
+ /* Get the next desc on the chain. */
+ if (vring->rem_len > 0 &&
+ (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
+ idx = virtio16_to_cpu(vdev, desc->next);
+ desc = &vr->desc[idx];
+ goto mlxbf_tmfifo_desc_done;
+ }
+
+ /* Done and release the pending packet. */
+ mlxbf_tmfifo_release_pending_pkt(vring);
+ desc = NULL;
+ fifo->vring[is_rx] = NULL;
+
+ /* Notify upper layer that packet is done. */
+ spin_lock_irqsave(&fifo->spin_lock, flags);
+ vring_interrupt(0, vring->vq);
+ spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ }
+
+mlxbf_tmfifo_desc_done:
+ /* Save the current desc. */
+ vring->desc = desc;
+
+ return true;
+}
+
+/* Rx & Tx processing of a queue. */
+static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+{
+ int avail = 0, devid = vring->vdev_id;
+ struct mlxbf_tmfifo *fifo;
+ bool more;
+
+ fifo = vring->fifo;
+
+ /* Return if vdev is not ready. */
+ if (!fifo->vdev[devid])
+ return;
+
+ /* Return if another vring is running. */
+ if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring)
+ return;
+
+ /* Only handle console and network for now. */
+ if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE))
+ return;
+
+ do {
+ /* Get available FIFO space. */
+ if (avail == 0) {
+ if (is_rx)
+ avail = mlxbf_tmfifo_get_rx_avail(fifo);
+ else
+ avail = mlxbf_tmfifo_get_tx_avail(fifo, devid);
+ if (avail <= 0)
+ break;
+ }
+
+ /* Console output always comes from the Tx buffer. */
+ if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
+ mlxbf_tmfifo_console_tx(fifo, avail);
+ break;
+ }
+
+ /* Handle one descriptor. */
+ more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
+ } while (more);
+}
+
+/* Handle Rx or Tx queues. */
+static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id,
+ int irq_id, bool is_rx)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev;
+ struct mlxbf_tmfifo_vring *vring;
+ int i;
+
+ if (!test_and_clear_bit(irq_id, &fifo->pend_events) ||
+ !fifo->irq_info[irq_id].irq)
+ return;
+
+ for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) {
+ tm_vdev = fifo->vdev[i];
+ if (tm_vdev) {
+ vring = &tm_vdev->vrings[queue_id];
+ if (vring->vq)
+ mlxbf_tmfifo_rxtx(vring, is_rx);
+ }
+ }
+}
+
+/* Work handler for Rx and Tx case. */
+static void mlxbf_tmfifo_work_handler(struct work_struct *work)
+{
+ struct mlxbf_tmfifo *fifo;
+
+ fifo = container_of(work, struct mlxbf_tmfifo, work);
+ if (!fifo->is_ready)
+ return;
+
+ mutex_lock(&fifo->lock);
+
+ /* Tx (Send data to the TmFifo). */
+ mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX,
+ MLXBF_TM_TX_LWM_IRQ, false);
+
+ /* Rx (Receive data from the TmFifo). */
+ mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX,
+ MLXBF_TM_RX_HWM_IRQ, true);
+
+ mutex_unlock(&fifo->lock);
+}
+
+/* The notify function is called when new buffers are posted. */
+static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
+{
+ struct mlxbf_tmfifo_vring *vring = vq->priv;
+ struct mlxbf_tmfifo_vdev *tm_vdev;
+ struct mlxbf_tmfifo *fifo;
+ unsigned long flags;
+
+ fifo = vring->fifo;
+
+ /*
+ * Virtio maintains vrings in pairs, even number ring for Rx
+ * and odd number ring for Tx.
+ */
+ if (vring->index & BIT(0)) {
+ /*
+ * Console could make blocking call with interrupts disabled.
+ * In such case, the vring needs to be served right away. For
+ * other cases, just set the TX LWM bit to start Tx in the
+ * worker handler.
+ */
+ if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
+ spin_lock_irqsave(&fifo->spin_lock, flags);
+ tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
+ mlxbf_tmfifo_console_output(tm_vdev, vring);
+ spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
+ &fifo->pend_events)) {
+ return true;
+ }
+ } else {
+ if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events))
+ return true;
+ }
+
+ schedule_work(&fifo->work);
+
+ return true;
+}
+
+/* Get the array of feature bits for this device. */
+static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ return tm_vdev->features;
+}
+
+/* Confirm device features to use. */
+static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ tm_vdev->features = vdev->features;
+
+ return 0;
+}
+
+/* Free virtqueues found by find_vqs(). */
+static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+ struct mlxbf_tmfifo_vring *vring;
+ struct virtqueue *vq;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
+ vring = &tm_vdev->vrings[i];
+
+ /* Release the pending packet. */
+ if (vring->desc)
+ mlxbf_tmfifo_release_pending_pkt(vring);
+ vq = vring->vq;
+ if (vq) {
+ vring->vq = NULL;
+ vring_del_virtqueue(vq);
+ }
+ }
+}
+
+/* Create and initialize the virtual queues. */
+static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
+ unsigned int nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[],
+ const bool *ctx,
+ struct irq_affinity *desc)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+ struct mlxbf_tmfifo_vring *vring;
+ struct virtqueue *vq;
+ int i, ret, size;
+
+ if (nvqs > ARRAY_SIZE(tm_vdev->vrings))
+ return -EINVAL;
+
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ ret = -EINVAL;
+ goto error;
+ }
+ vring = &tm_vdev->vrings[i];
+
+ /* zero vring */
+ size = vring_size(vring->num, vring->align);
+ memset(vring->va, 0, size);
+ vq = vring_new_virtqueue(i, vring->num, vring->align, vdev,
+ false, false, vring->va,
+ mlxbf_tmfifo_virtio_notify,
+ callbacks[i], names[i]);
+ if (!vq) {
+ dev_err(&vdev->dev, "vring_new_virtqueue failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ vqs[i] = vq;
+ vring->vq = vq;
+ vq->priv = vring;
+ }
+
+ return 0;
+
+error:
+ mlxbf_tmfifo_virtio_del_vqs(vdev);
+ return ret;
+}
+
+/* Read the status byte. */
+static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ return tm_vdev->status;
+}
+
+/* Write the status byte. */
+static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev,
+ u8 status)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ tm_vdev->status = status;
+}
+
+/* Reset the device. Not much here for now. */
+static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ tm_vdev->status = 0;
+}
+
+/* Read the value of a configuration field. */
+static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf,
+ unsigned int len)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ if ((u64)offset + len > sizeof(tm_vdev->config))
+ return;
+
+ memcpy(buf, (u8 *)&tm_vdev->config + offset, len);
+}
+
+/* Write the value of a configuration field. */
+static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev,
+ unsigned int offset,
+ const void *buf,
+ unsigned int len)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ if ((u64)offset + len > sizeof(tm_vdev->config))
+ return;
+
+ memcpy((u8 *)&tm_vdev->config + offset, buf, len);
+}
+
+static void tmfifo_virtio_dev_release(struct device *device)
+{
+ struct virtio_device *vdev =
+ container_of(device, struct virtio_device, dev);
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ kfree(tm_vdev);
+}
+
+/* Virtio config operations. */
+static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = {
+ .get_features = mlxbf_tmfifo_virtio_get_features,
+ .finalize_features = mlxbf_tmfifo_virtio_finalize_features,
+ .find_vqs = mlxbf_tmfifo_virtio_find_vqs,
+ .del_vqs = mlxbf_tmfifo_virtio_del_vqs,
+ .reset = mlxbf_tmfifo_virtio_reset,
+ .set_status = mlxbf_tmfifo_virtio_set_status,
+ .get_status = mlxbf_tmfifo_virtio_get_status,
+ .get = mlxbf_tmfifo_virtio_get,
+ .set = mlxbf_tmfifo_virtio_set,
+};
+
+/* Create vdev for the FIFO. */
+static int mlxbf_tmfifo_create_vdev(struct device *dev,
+ struct mlxbf_tmfifo *fifo,
+ int vdev_id, u64 features,
+ void *config, u32 size)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL;
+ int ret;
+
+ mutex_lock(&fifo->lock);
+
+ tm_vdev = fifo->vdev[vdev_id];
+ if (tm_vdev) {
+ dev_err(dev, "vdev %d already exists\n", vdev_id);
+ ret = -EEXIST;
+ goto fail;
+ }
+
+ tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL);
+ if (!tm_vdev) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ tm_vdev->vdev.id.device = vdev_id;
+ tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops;
+ tm_vdev->vdev.dev.parent = dev;
+ tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release;
+ tm_vdev->features = features;
+ if (config)
+ memcpy(&tm_vdev->config, config, size);
+
+ if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) {
+ dev_err(dev, "unable to allocate vring\n");
+ ret = -ENOMEM;
+ goto vdev_fail;
+ }
+
+ /* Allocate an output buffer for the console device. */
+ if (vdev_id == VIRTIO_ID_CONSOLE)
+ tm_vdev->tx_buf.buf = devm_kmalloc(dev,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE,
+ GFP_KERNEL);
+ fifo->vdev[vdev_id] = tm_vdev;
+
+ /* Register the virtio device. */
+ ret = register_virtio_device(&tm_vdev->vdev);
+ reg_dev = tm_vdev;
+ if (ret) {
+ dev_err(dev, "register_virtio_device failed\n");
+ goto vdev_fail;
+ }
+
+ mutex_unlock(&fifo->lock);
+ return 0;
+
+vdev_fail:
+ mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
+ fifo->vdev[vdev_id] = NULL;
+ if (reg_dev)
+ put_device(&tm_vdev->vdev.dev);
+ else
+ kfree(tm_vdev);
+fail:
+ mutex_unlock(&fifo->lock);
+ return ret;
+}
+
+/* Delete vdev for the FIFO. */
+static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev;
+
+ mutex_lock(&fifo->lock);
+
+ /* Unregister vdev. */
+ tm_vdev = fifo->vdev[vdev_id];
+ if (tm_vdev) {
+ unregister_virtio_device(&tm_vdev->vdev);
+ mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
+ fifo->vdev[vdev_id] = NULL;
+ }
+
+ mutex_unlock(&fifo->lock);
+
+ return 0;
+}
+
+/* Read the configured network MAC address from efi variable. */
+static void mlxbf_tmfifo_get_cfg_mac(u8 *mac)
+{
+ efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
+ unsigned long size = ETH_ALEN;
+ u8 buf[ETH_ALEN];
+ efi_status_t rc;
+
+ rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf);
+ if (rc == EFI_SUCCESS && size == ETH_ALEN)
+ ether_addr_copy(mac, buf);
+ else
+ ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac);
+}
+
+/* Set TmFifo thresolds which is used to trigger interrupts. */
+static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo)
+{
+ u64 ctl;
+
+ /* Get Tx FIFO size and set the low/high watermark. */
+ ctl = readq(fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
+ fifo->tx_fifo_size =
+ FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl);
+ ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK,
+ fifo->tx_fifo_size / 2);
+ ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK,
+ fifo->tx_fifo_size - 1);
+ writeq(ctl, fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
+
+ /* Get Rx FIFO size and set the low/high watermark. */
+ ctl = readq(fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
+ fifo->rx_fifo_size =
+ FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl);
+ ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0);
+ ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1);
+ writeq(ctl, fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
+}
+
+static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo)
+{
+ int i;
+
+ fifo->is_ready = false;
+ del_timer_sync(&fifo->timer);
+ mlxbf_tmfifo_disable_irqs(fifo);
+ cancel_work_sync(&fifo->work);
+ for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
+ mlxbf_tmfifo_delete_vdev(fifo, i);
+}
+
+/* Probe the TMFIFO. */
+static int mlxbf_tmfifo_probe(struct platform_device *pdev)
+{
+ struct virtio_net_config net_config;
+ struct device *dev = &pdev->dev;
+ struct mlxbf_tmfifo *fifo;
+ int i, rc;
+
+ fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
+ if (!fifo)
+ return -ENOMEM;
+
+ spin_lock_init(&fifo->spin_lock);
+ INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
+ mutex_init(&fifo->lock);
+
+ /* Get the resource of the Rx FIFO. */
+ fifo->rx_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fifo->rx_base))
+ return PTR_ERR(fifo->rx_base);
+
+ /* Get the resource of the Tx FIFO. */
+ fifo->tx_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(fifo->tx_base))
+ return PTR_ERR(fifo->tx_base);
+
+ platform_set_drvdata(pdev, fifo);
+
+ timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0);
+
+ for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
+ fifo->irq_info[i].index = i;
+ fifo->irq_info[i].fifo = fifo;
+ fifo->irq_info[i].irq = platform_get_irq(pdev, i);
+ rc = devm_request_irq(dev, fifo->irq_info[i].irq,
+ mlxbf_tmfifo_irq_handler, 0,
+ "tmfifo", &fifo->irq_info[i]);
+ if (rc) {
+ dev_err(dev, "devm_request_irq failed\n");
+ fifo->irq_info[i].irq = 0;
+ return rc;
+ }
+ }
+
+ mlxbf_tmfifo_set_threshold(fifo);
+
+ /* Create the console vdev. */
+ rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0);
+ if (rc)
+ goto fail;
+
+ /* Create the network vdev. */
+ memset(&net_config, 0, sizeof(net_config));
+ net_config.mtu = ETH_DATA_LEN;
+ net_config.status = VIRTIO_NET_S_LINK_UP;
+ mlxbf_tmfifo_get_cfg_mac(net_config.mac);
+ rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
+ MLXBF_TMFIFO_NET_FEATURES, &net_config,
+ sizeof(net_config));
+ if (rc)
+ goto fail;
+
+ mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
+
+ fifo->is_ready = true;
+ return 0;
+
+fail:
+ mlxbf_tmfifo_cleanup(fifo);
+ return rc;
+}
+
+/* Device remove function. */
+static int mlxbf_tmfifo_remove(struct platform_device *pdev)
+{
+ struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev);
+
+ mlxbf_tmfifo_cleanup(fifo);
+
+ return 0;
+}
+
+static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = {
+ { "MLNXBF01", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match);
+
+static struct platform_driver mlxbf_tmfifo_driver = {
+ .probe = mlxbf_tmfifo_probe,
+ .remove = mlxbf_tmfifo_remove,
+ .driver = {
+ .name = "bf-tmfifo",
+ .acpi_match_table = mlxbf_tmfifo_acpi_match,
+ },
+};
+
+module_platform_driver(mlxbf_tmfifo_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mellanox Technologies");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index a1ed13183559..85b92a95e4c8 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1263,6 +1263,17 @@ config INTEL_CHTDC_TI_PWRBTN
To compile this driver as a module, choose M here: the module
will be called intel_chtdc_ti_pwrbtn.
+config INTEL_MRFLD_PWRBTN
+ tristate "Intel Merrifield Basin Cove power button driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ depends on INPUT
+ ---help---
+ This option adds a power button driver for Basin Cove PMIC
+ on Intel Merrifield devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_mrfld_pwrbtn.
+
config I2C_MULTI_INSTANTIATE
tristate "I2C multi instantiate pseudo device driver"
depends on I2C && ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 86cb76677bc8..87b0069bd781 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
+obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index f10af5c383c5..83fd7677af24 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -522,23 +522,22 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
input.length = (acpi_size) sizeof(*in_args);
input.pointer = in_args;
- if (out_data != NULL) {
+ if (out_data) {
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL;
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, &output);
- } else
+ if (ACPI_SUCCESS(status)) {
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *out_data = (u32)obj->integer.value;
+ }
+ kfree(output.pointer);
+ } else {
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, NULL);
-
- if (ACPI_SUCCESS(status) && out_data != NULL) {
- obj = (union acpi_object *)output.pointer;
- if (obj && obj->type == ACPI_TYPE_INTEGER)
- *out_data = (u32) obj->integer.value;
}
- kfree(output.pointer);
return status;
-
}
/*
@@ -588,7 +587,7 @@ static ssize_t show_hdmi_source(struct device *dev,
return scnprintf(buf, PAGE_SIZE,
"input [gpu] unknown\n");
}
- pr_err("alienware-wmi: unknown HDMI source status: %d\n", out_data);
+ pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index ee1fa93708ec..f94691615881 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -66,10 +66,13 @@ MODULE_LICENSE("GPL");
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
+#define NOTIFY_FNLOCK_TOGGLE 0x4e
#define NOTIFY_KBD_BRTUP 0xc4
#define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7
+#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
+
#define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13
#define ASUS_FAN_SFUN_READ 0x06
@@ -177,6 +180,8 @@ struct asus_wmi {
struct workqueue_struct *hotplug_workqueue;
struct work_struct hotplug_work;
+ bool fnlock_locked;
+
struct asus_wmi_debug debug;
struct asus_wmi_driver *driver;
@@ -1619,6 +1624,23 @@ static int is_display_toggle(int code)
return 0;
}
+static bool asus_wmi_has_fnlock_key(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FNLOCK, &result);
+
+ return (result & ASUS_WMI_DSTS_PRESENCE_BIT) &&
+ !(result & ASUS_WMI_FNLOCK_BIOS_DISABLED);
+}
+
+static void asus_wmi_fnlock_update(struct asus_wmi *asus)
+{
+ int mode = asus->fnlock_locked;
+
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_FNLOCK, mode, NULL);
+}
+
static void asus_wmi_notify(u32 value, void *context)
{
struct asus_wmi *asus = context;
@@ -1680,6 +1702,12 @@ static void asus_wmi_notify(u32 value, void *context)
goto exit;
}
+ if (code == NOTIFY_FNLOCK_TOGGLE) {
+ asus->fnlock_locked = !asus->fnlock_locked;
+ asus_wmi_fnlock_update(asus);
+ goto exit;
+ }
+
if (is_display_toggle(code) &&
asus->driver->quirks->no_display_toggle)
goto exit;
@@ -2134,6 +2162,11 @@ static int asus_wmi_add(struct platform_device *pdev)
} else
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
+ if (asus_wmi_has_fnlock_key(asus)) {
+ asus->fnlock_locked = true;
+ asus_wmi_fnlock_update(asus);
+ }
+
status = wmi_install_notify_handler(asus->driver->event_guid,
asus_wmi_notify, asus);
if (ACPI_FAILURE(status)) {
@@ -2213,6 +2246,8 @@ static int asus_hotk_resume(struct device *device)
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus);
+ if (asus_wmi_has_fnlock_key(asus))
+ asus_wmi_fnlock_update(asus);
return 0;
}
@@ -2249,6 +2284,8 @@ static int asus_hotk_restore(struct device *device)
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus);
+ if (asus_wmi_has_fnlock_key(asus))
+ asus_wmi_fnlock_update(asus);
return 0;
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 95e6ca116e00..a561f653cf13 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -531,7 +531,7 @@ static void dell_rfkill_query(struct rfkill *rfkill, void *data)
return;
}
- dell_fill_request(&buffer, 0, 0x2, 0, 0);
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
hwswitch = buffer.output[1];
@@ -562,7 +562,7 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
return ret;
status = buffer.output[1];
- dell_fill_request(&buffer, 0, 0x2, 0, 0);
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (hwswitch_ret)
return hwswitch_ret;
@@ -647,7 +647,7 @@ static void dell_update_rfkill(struct work_struct *ignored)
if (ret != 0)
return;
- dell_fill_request(&buffer, 0, 0x2, 0, 0);
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (ret == 0 && (status & BIT(0)))
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index f3afe778001e..56535d7222dd 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -18,6 +18,8 @@
#include <linux/rfkill.h>
#include <linux/input.h>
+#include "dell-rbtn.h"
+
enum rbtn_type {
RBTN_UNKNOWN,
RBTN_TOGGLE,
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index c53ae86b59c7..2d94536dea88 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -980,312 +980,21 @@ static void ideapad_wmi_notify(u32 value, void *context)
#endif
/*
- * Some ideapads don't have a hardware rfkill switch, reading VPCCMD_R_RF
- * always results in 0 on these models, causing ideapad_laptop to wrongly
- * report all radios as hardware-blocked.
+ * Some ideapads have a hardware rfkill switch, but most do not have one.
+ * Reading VPCCMD_R_RF always results in 0 on models without a hardware rfkill,
+ * switch causing ideapad_laptop to wrongly report all radios as hw-blocked.
+ * There used to be a long list of DMI ids for models without a hw rfkill
+ * switch here, but that resulted in playing whack a mole.
+ * More importantly wrongly reporting the wifi radio as hw-blocked, results in
+ * non working wifi. Whereas not reporting it hw-blocked, when it actually is
+ * hw-blocked results in an empty SSID list, which is a much more benign
+ * failure mode.
+ * So the default now is the much safer option of assuming there is no
+ * hardware rfkill switch. This default also actually matches most hardware,
+ * since having a hw rfkill switch is quite rare on modern hardware, so this
+ * also leads to a much shorter list.
*/
-static const struct dmi_system_id no_hw_rfkill_list[] = {
- {
- .ident = "Lenovo RESCUER R720-15IKBN",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"),
- },
- },
- {
- .ident = "Lenovo G40-30",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"),
- },
- },
- {
- .ident = "Lenovo G50-30",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"),
- },
- },
- {
- .ident = "Lenovo V310-14IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14IKB"),
- },
- },
- {
- .ident = "Lenovo V310-14ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14ISK"),
- },
- },
- {
- .ident = "Lenovo V310-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15IKB"),
- },
- },
- {
- .ident = "Lenovo V310-15ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15ISK"),
- },
- },
- {
- .ident = "Lenovo V510-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V510-15IKB"),
- },
- },
- {
- .ident = "Lenovo ideapad 300-15IBR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IBR"),
- },
- },
- {
- .ident = "Lenovo ideapad 300-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IKB"),
- },
- },
- {
- .ident = "Lenovo ideapad 300S-11IBR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300S-11BR"),
- },
- },
- {
- .ident = "Lenovo ideapad 310-15ABR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ABR"),
- },
- },
- {
- .ident = "Lenovo ideapad 310-15IAP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IAP"),
- },
- },
- {
- .ident = "Lenovo ideapad 310-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IKB"),
- },
- },
- {
- .ident = "Lenovo ideapad 310-15ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad 330-15ICH",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 330-15ICH"),
- },
- },
- {
- .ident = "Lenovo ideapad 530S-14ARR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 530S-14ARR"),
- },
- },
- {
- .ident = "Lenovo ideapad S130-14IGM",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad S130-14IGM"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700-14ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-14ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700-15ACZ",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ACZ"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700-15ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700 Touch-15ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700-17ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad MIIX 720-12IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 720-12IKB"),
- },
- },
- {
- .ident = "Lenovo Legion Y520-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"),
- },
- },
- {
- .ident = "Lenovo Y520-15IKBM",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBM"),
- },
- },
- {
- .ident = "Lenovo Legion Y530-15ICH",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH"),
- },
- },
- {
- .ident = "Lenovo Legion Y530-15ICH-1060",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH-1060"),
- },
- },
- {
- .ident = "Lenovo Legion Y720-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKB"),
- },
- },
- {
- .ident = "Lenovo Legion Y720-15IKBN",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBN"),
- },
- },
- {
- .ident = "Lenovo Y720-15IKBM",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBM"),
- },
- },
- {
- .ident = "Lenovo Yoga 2 11 / 13 / Pro",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"),
- },
- },
- {
- .ident = "Lenovo Yoga 2 11 / 13 / Pro",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_BOARD_NAME, "Yoga2"),
- },
- },
- {
- .ident = "Lenovo Yoga 2 13",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Yoga 2 13"),
- },
- },
- {
- .ident = "Lenovo Yoga 3 1170 / 1470",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3"),
- },
- },
- {
- .ident = "Lenovo Yoga 3 Pro 1370",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"),
- },
- },
- {
- .ident = "Lenovo Yoga 700",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
- },
- },
- {
- .ident = "Lenovo Yoga 900",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
- },
- },
- {
- .ident = "Lenovo Yoga 900",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_BOARD_NAME, "VIUU4"),
- },
- },
- {
- .ident = "Lenovo YOGA 910-13IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
- },
- },
- {
- .ident = "Lenovo YOGA 920-13IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920-13IKB"),
- },
- },
- {
- .ident = "Lenovo YOGA C930-13IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA C930-13IKB"),
- },
- },
- {
- .ident = "Lenovo Zhaoyang E42-80",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ZHAOYANG E42-80"),
- },
- },
+static const struct dmi_system_id hw_rfkill_list[] = {
{}
};
@@ -1311,7 +1020,7 @@ static int ideapad_acpi_add(struct platform_device *pdev)
priv->cfg = cfg;
priv->adev = adev;
priv->platform_device = pdev;
- priv->has_hw_rfkill_switch = !dmi_check_system(no_hw_rfkill_list);
+ priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
ret = ideapad_sysfs_init(priv);
if (ret)
diff --git a/drivers/platform/x86/intel_mrfld_pwrbtn.c b/drivers/platform/x86/intel_mrfld_pwrbtn.c
new file mode 100644
index 000000000000..d58fea51747e
--- /dev/null
+++ b/drivers/platform/x86/intel_mrfld_pwrbtn.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power-button driver for Basin Cove PMIC
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+
+#define BCOVE_PBSTATUS 0x27
+#define BCOVE_PBSTATUS_PBLVL BIT(4) /* 1 - release, 0 - press */
+
+static irqreturn_t mrfld_pwrbtn_interrupt(int irq, void *dev_id)
+{
+ struct input_dev *input = dev_id;
+ struct device *dev = input->dev.parent;
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int state;
+ int ret;
+
+ ret = regmap_read(regmap, BCOVE_PBSTATUS, &state);
+ if (ret)
+ return IRQ_NONE;
+
+ dev_dbg(dev, "PBSTATUS=0x%x\n", state);
+ input_report_key(input, KEY_POWER, !(state & BCOVE_PBSTATUS_PBLVL));
+ input_sync(input);
+
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
+ return IRQ_HANDLED;
+}
+
+static int mrfld_pwrbtn_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct input_dev *input;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+ input->name = pdev->name;
+ input->phys = "power-button/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = dev;
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ ret = input_register_device(input);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, pmic->regmap);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_pwrbtn_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, pdev->name,
+ input);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pmic->regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
+ regmap_update_bits(pmic->regmap, BCOVE_MPBIRQ, BCOVE_PBIRQ_PBTN, 0);
+
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+ return 0;
+}
+
+static int mrfld_pwrbtn_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+ return 0;
+}
+
+static const struct platform_device_id mrfld_pwrbtn_id_table[] = {
+ { .name = "mrfld_bcove_pwrbtn" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mrfld_pwrbtn_id_table);
+
+static struct platform_driver mrfld_pwrbtn_driver = {
+ .driver = {
+ .name = "mrfld_bcove_pwrbtn",
+ },
+ .probe = mrfld_pwrbtn_probe,
+ .remove = mrfld_pwrbtn_remove,
+ .id_table = mrfld_pwrbtn_id_table,
+};
+module_platform_driver(mrfld_pwrbtn_driver);
+
+MODULE_DESCRIPTION("Power-button driver for Basin Cove PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index f2c621b55f49..1d902230ba61 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -19,6 +19,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
#include <linux/uaccess.h>
#include <asm/cpu_device_id.h>
@@ -828,7 +830,7 @@ static const struct pci_device_id pmc_pci_ids[] = {
* the platform BIOS enforces 24Mhx Crystal to shutdown
* before PMC can assert SLP_S0#.
*/
-int quirk_xtal_ignore(const struct dmi_system_id *id)
+static int quirk_xtal_ignore(const struct dmi_system_id *id)
{
struct pmc_dev *pmcdev = &pmc;
u32 value;
@@ -854,13 +856,17 @@ static const struct dmi_system_id pmc_core_dmi_table[] = {
{}
};
-static int __init pmc_core_probe(void)
+static int pmc_core_probe(struct platform_device *pdev)
{
+ static bool device_initialized;
struct pmc_dev *pmcdev = &pmc;
const struct x86_cpu_id *cpu_id;
u64 slp_s0_addr;
int err;
+ if (device_initialized)
+ return -ENODEV;
+
cpu_id = x86_match_cpu(intel_pmc_core_ids);
if (!cpu_id)
return -ENODEV;
@@ -886,30 +892,178 @@ static int __init pmc_core_probe(void)
return -ENOMEM;
mutex_init(&pmcdev->lock);
+ platform_set_drvdata(pdev, pmcdev);
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
+ dmi_check_system(pmc_core_dmi_table);
err = pmc_core_dbgfs_register(pmcdev);
if (err < 0) {
- pr_warn(" debugfs register failed.\n");
+ dev_warn(&pdev->dev, "debugfs register failed.\n");
iounmap(pmcdev->regbase);
return err;
}
- dmi_check_system(pmc_core_dmi_table);
- pr_info(" initialized\n");
+ device_initialized = true;
+ dev_info(&pdev->dev, " initialized\n");
+
return 0;
}
-module_init(pmc_core_probe)
-static void __exit pmc_core_remove(void)
+static int pmc_core_remove(struct platform_device *pdev)
{
- struct pmc_dev *pmcdev = &pmc;
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
pmc_core_dbgfs_unregister(pmcdev);
+ platform_set_drvdata(pdev, NULL);
mutex_destroy(&pmcdev->lock);
iounmap(pmcdev->regbase);
+ return 0;
}
-module_exit(pmc_core_remove)
+
+#ifdef CONFIG_PM_SLEEP
+
+static bool warn_on_s0ix_failures;
+module_param(warn_on_s0ix_failures, bool, 0644);
+MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
+
+static int pmc_core_suspend(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+
+ pmcdev->check_counters = false;
+
+ /* No warnings on S0ix failures */
+ if (!warn_on_s0ix_failures)
+ return 0;
+
+ /* Check if the syspend will actually use S0ix */
+ if (pm_suspend_via_firmware())
+ return 0;
+
+ /* Save PC10 residency for checking later */
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
+ return -EIO;
+
+ /* Save S0ix residency for checking later */
+ if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
+ return -EIO;
+
+ pmcdev->check_counters = true;
+ return 0;
+}
+
+static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
+{
+ u64 pc10_counter;
+
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
+ return false;
+
+ if (pc10_counter == pmcdev->pc10_counter)
+ return true;
+
+ return false;
+}
+
+static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
+{
+ u64 s0ix_counter;
+
+ if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
+ return false;
+
+ if (s0ix_counter == pmcdev->s0ix_counter)
+ return true;
+
+ return false;
+}
+
+static int pmc_core_resume(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
+ int offset = pmcdev->map->slps0_dbg_offset;
+ const struct pmc_bit_map *map;
+ u32 data;
+
+ if (!pmcdev->check_counters)
+ return 0;
+
+ if (!pmc_core_is_s0ix_failed(pmcdev))
+ return 0;
+
+ if (pmc_core_is_pc10_failed(pmcdev)) {
+ /* S0ix failed because of PC10 entry failure */
+ dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
+ pmcdev->pc10_counter);
+ return 0;
+ }
+
+ /* The real interesting case - S0ix failed - lets ask PMC why. */
+ dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
+ pmcdev->s0ix_counter);
+ while (*maps) {
+ map = *maps;
+ data = pmc_core_reg_read(pmcdev, offset);
+ offset += 4;
+ while (map->name) {
+ dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ map++;
+ }
+ maps++;
+ }
+ return 0;
+}
+
+#endif
+
+static const struct dev_pm_ops pmc_core_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
+};
+
+static struct platform_driver pmc_core_driver = {
+ .driver = {
+ .name = "intel_pmc_core",
+ .pm = &pmc_core_pm_ops,
+ },
+ .probe = pmc_core_probe,
+ .remove = pmc_core_remove,
+};
+
+static struct platform_device pmc_core_device = {
+ .name = "intel_pmc_core",
+};
+
+static int __init pmc_core_init(void)
+{
+ int ret;
+
+ if (!x86_match_cpu(intel_pmc_core_ids))
+ return -ENODEV;
+
+ ret = platform_driver_register(&pmc_core_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_device_register(&pmc_core_device);
+ if (ret) {
+ platform_driver_unregister(&pmc_core_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit pmc_core_exit(void)
+{
+ platform_device_unregister(&pmc_core_device);
+ platform_driver_unregister(&pmc_core_driver);
+}
+
+module_init(pmc_core_init)
+module_exit(pmc_core_exit)
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel PMC Core Driver");
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index 88d9c0653a5f..fdee5772e532 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -241,6 +241,9 @@ struct pmc_reg_map {
* @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
* used to read MPHY PG and PLL status are available
* @mutex_lock: mutex to complete one transcation
+ * @check_counters: On resume, check if counters are getting incremented
+ * @pc10_counter: PC10 residency counter
+ * @s0ix_counter: S0ix residency (step adjusted)
*
* pmc_dev contains info about power management controller device.
*/
@@ -253,6 +256,10 @@ struct pmc_dev {
#endif /* CONFIG_DEBUG_FS */
int pmc_xram_read_bit;
struct mutex lock; /* generic mutex lock for PMC Core */
+
+ bool check_counters; /* Check for counter increments on resume */
+ u64 pc10_counter;
+ u64 s0ix_counter;
};
#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 7964ba22ef8d..55037ff258f8 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -40,14 +40,14 @@
* The ARC handles the interrupt and services it, writing optional data to
* the IPC1 registers, updates the IPC_STS response register with the status.
*/
-#define IPC_CMD 0x0
-#define IPC_CMD_MSI 0x100
+#define IPC_CMD 0x00
+#define IPC_CMD_MSI BIT(8)
#define IPC_CMD_SIZE 16
#define IPC_CMD_SUBCMD 12
#define IPC_STATUS 0x04
-#define IPC_STATUS_IRQ 0x4
-#define IPC_STATUS_ERR 0x2
-#define IPC_STATUS_BUSY 0x1
+#define IPC_STATUS_IRQ BIT(2)
+#define IPC_STATUS_ERR BIT(1)
+#define IPC_STATUS_BUSY BIT(0)
#define IPC_SPTR 0x08
#define IPC_DPTR 0x0C
#define IPC_WRITE_BUFFER 0x80
@@ -101,13 +101,13 @@
#define TELEM_SSRAM_SIZE 240
#define TELEM_PMC_SSRAM_OFFSET 0x1B00
#define TELEM_PUNIT_SSRAM_OFFSET 0x1A00
-#define TCO_PMC_OFFSET 0x8
-#define TCO_PMC_SIZE 0x4
+#define TCO_PMC_OFFSET 0x08
+#define TCO_PMC_SIZE 0x04
/* PMC register bit definitions */
/* PMC_CFG_REG bit masks */
-#define PMC_CFG_NO_REBOOT_MASK (1 << 4)
+#define PMC_CFG_NO_REBOOT_MASK BIT_MASK(4)
#define PMC_CFG_NO_REBOOT_EN (1 << 4)
#define PMC_CFG_NO_REBOOT_DIS (0 << 4)
@@ -131,6 +131,7 @@ static struct intel_pmc_ipc_dev {
/* punit */
struct platform_device *punit_dev;
+ unsigned int punit_res_count;
/* Telemetry */
resource_size_t telem_pmc_ssram_base;
@@ -682,7 +683,7 @@ static int ipc_create_punit_device(void)
.name = PUNIT_DEVICE_NAME,
.id = -1,
.res = punit_res_array,
- .num_res = ARRAY_SIZE(punit_res_array),
+ .num_res = ipcdev.punit_res_count,
};
pdev = platform_device_register_full(&pdevinfo);
@@ -771,13 +772,17 @@ static int ipc_create_pmc_devices(void)
if (ret) {
dev_err(ipcdev.dev, "Failed to add punit platform device\n");
platform_device_unregister(ipcdev.tco_dev);
+ return ret;
}
if (!ipcdev.telem_res_inval) {
ret = ipc_create_telemetry_device();
- if (ret)
+ if (ret) {
dev_warn(ipcdev.dev,
"Failed to add telemetry platform device\n");
+ platform_device_unregister(ipcdev.punit_dev);
+ platform_device_unregister(ipcdev.tco_dev);
+ }
}
return ret;
@@ -785,7 +790,7 @@ static int ipc_create_pmc_devices(void)
static int ipc_plat_get_res(struct platform_device *pdev)
{
- struct resource *res, *punit_res;
+ struct resource *res, *punit_res = punit_res_array;
void __iomem *addr;
int size;
@@ -800,7 +805,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
ipcdev.acpi_io_size = size;
dev_info(&pdev->dev, "io res: %pR\n", res);
- punit_res = punit_res_array;
+ ipcdev.punit_res_count = 0;
+
/* This is index 0 to cover BIOS data register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_DATA_INDEX);
@@ -808,7 +814,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to get res of punit BIOS data\n");
return -ENXIO;
}
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
/* This is index 1 to cover BIOS interface register */
@@ -818,42 +824,38 @@ static int ipc_plat_get_res(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
return -ENXIO;
}
- *++punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
/* This is index 2 to cover ISP data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_DATA_INDEX);
- ++punit_res;
if (res) {
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
}
/* This is index 3 to cover ISP interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_IFACE_INDEX);
- ++punit_res;
if (res) {
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
}
/* This is index 4 to cover GTD data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_DATA_INDEX);
- ++punit_res;
if (res) {
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
}
/* This is index 5 to cover GTD interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_IFACE_INDEX);
- ++punit_res;
if (res) {
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
}
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index 79671927f4ef..ab7ae1950867 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - GTDRIVER_IPC BASE_IFACE
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 48fa7573e29b..cee039f57499 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -56,6 +56,16 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
+#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7
+#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET 0xc8
+#define MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET 0xc9
+#define MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET 0xcb
+#define MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET 0xcd
+#define MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET 0xce
+#define MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET 0xcf
+#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
+#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
+#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
@@ -72,6 +82,7 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
+#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
@@ -128,6 +139,18 @@
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
#define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14
+/* Masks and default values for watchdogs */
+#define MLXPLAT_CPLD_WD1_CLEAR_MASK GENMASK(7, 1)
+#define MLXPLAT_CPLD_WD2_CLEAR_MASK (GENMASK(7, 0) & ~BIT(1))
+
+#define MLXPLAT_CPLD_WD_TYPE1_TO_MASK GENMASK(7, 4)
+#define MLXPLAT_CPLD_WD_TYPE2_TO_MASK 0
+#define MLXPLAT_CPLD_WD_RESET_ACT_MASK GENMASK(7, 1)
+#define MLXPLAT_CPLD_WD_FAN_ACT_MASK (GENMASK(7, 0) & ~BIT(4))
+#define MLXPLAT_CPLD_WD_COUNT_ACT_MASK (GENMASK(7, 0) & ~BIT(7))
+#define MLXPLAT_CPLD_WD_DFLT_TIMEOUT 30
+#define MLXPLAT_CPLD_WD_MAX_DEVS 2
+
/* mlxplat_priv - platform private data
* @pdev_i2c - i2c controller platform device
* @pdev_mux - array of mux platform devices
@@ -135,6 +158,7 @@
* @pdev_led - led platform devices
* @pdev_io_regs - register access platform devices
* @pdev_fan - FAN platform devices
+ * @pdev_wd - array of watchdog platform devices
*/
struct mlxplat_priv {
struct platform_device *pdev_i2c;
@@ -143,6 +167,7 @@ struct mlxplat_priv {
struct platform_device *pdev_led;
struct platform_device *pdev_io_regs;
struct platform_device *pdev_fan;
+ struct platform_device *pdev_wd[MLXPLAT_CPLD_WD_MAX_DEVS];
};
/* Regions for LPC I2C controller and LPC base register space */
@@ -1339,6 +1364,10 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(3),
},
+ {
+ .label = "conf",
+ .capability = MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET,
+ },
};
static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
@@ -1346,6 +1375,148 @@ static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_data),
};
+/* Watchdog type1: hardware implementation version1
+ * (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140 systems).
+ */
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type1[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .bit = 6,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type1[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
+ .bit = 1,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type1[] = {
+ {
+ .data = mlxplat_mlxcpld_wd_main_regs_type1,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type1),
+ .version = MLX_WDT_TYPE1,
+ .identity = "mlx-wdt-main",
+ },
+ {
+ .data = mlxplat_mlxcpld_wd_aux_regs_type1,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type1),
+ .version = MLX_WDT_TYPE1,
+ .identity = "mlx-wdt-aux",
+ },
+};
+
+/* Watchdog type2: hardware implementation version 2
+ * (all systems except (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140).
+ */
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type2[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .bit = 6,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type2[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type2[] = {
+ {
+ .data = mlxplat_mlxcpld_wd_main_regs_type2,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type2),
+ .version = MLX_WDT_TYPE2,
+ .identity = "mlx-wdt-main",
+ },
+ {
+ .data = mlxplat_mlxcpld_wd_aux_regs_type2,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type2),
+ .version = MLX_WDT_TYPE2,
+ .identity = "mlx-wdt-aux",
+ },
+};
+
static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -1368,6 +1539,14 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
return true;
@@ -1411,6 +1590,16 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
@@ -1428,6 +1617,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
return true;
}
return false;
@@ -1467,6 +1657,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
@@ -1484,6 +1678,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
return true;
}
return false;
@@ -1493,6 +1688,7 @@ static const struct reg_default mlxplat_mlxcpld_regmap_default[] = {
{ MLXPLAT_CPLD_LPC_REG_WP1_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WP2_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
};
struct mlxplat_mlxcpld_regmap_context {
@@ -1542,6 +1738,8 @@ static struct mlxreg_core_hotplug_platform_data *mlxplat_hotplug;
static struct mlxreg_core_platform_data *mlxplat_led;
static struct mlxreg_core_platform_data *mlxplat_regs_io;
static struct mlxreg_core_platform_data *mlxplat_fan;
+static struct mlxreg_core_platform_data
+ *mlxplat_wd_data[MLXPLAT_CPLD_WD_MAX_DEVS];
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
@@ -1557,6 +1755,7 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_default_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
};
@@ -1575,6 +1774,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
};
@@ -1593,6 +1793,7 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
};
@@ -1611,6 +1812,7 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
};
@@ -1630,6 +1832,8 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
mlxplat_led = &mlxplat_default_ng_led_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
return 1;
};
@@ -1912,15 +2116,33 @@ static int __init mlxplat_init(void)
}
}
+ /* Add WD drivers. */
+ for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
+ if (mlxplat_wd_data[j]) {
+ mlxplat_wd_data[j]->regmap = mlxplat_hotplug->regmap;
+ priv->pdev_wd[j] = platform_device_register_resndata(
+ &mlxplat_dev->dev, "mlx-wdt",
+ j, NULL, 0,
+ mlxplat_wd_data[j],
+ sizeof(*mlxplat_wd_data[j]));
+ if (IS_ERR(priv->pdev_wd[j])) {
+ err = PTR_ERR(priv->pdev_wd[j]);
+ goto fail_platform_wd_register;
+ }
+ }
+ }
+
/* Sync registers with hardware. */
regcache_mark_dirty(mlxplat_hotplug->regmap);
err = regcache_sync(mlxplat_hotplug->regmap);
if (err)
- goto fail_platform_fan_register;
+ goto fail_platform_wd_register;
return 0;
-fail_platform_fan_register:
+fail_platform_wd_register:
+ while (--j >= 0)
+ platform_device_unregister(priv->pdev_wd[j]);
if (mlxplat_fan)
platform_device_unregister(priv->pdev_fan);
fail_platform_io_regs_register:
@@ -1946,6 +2168,8 @@ static void __exit mlxplat_exit(void)
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
int i;
+ for (i = MLXPLAT_CPLD_WD_MAX_DEVS - 1; i >= 0 ; i--)
+ platform_device_unregister(priv->pdev_wd[i]);
if (priv->pdev_fan)
platform_device_unregister(priv->pdev_fan);
if (priv->pdev_io_regs)
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 8f018b3f3cd4..c7039f52ad51 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
@@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
}
#endif /* CONFIG_DEBUG_FS */
+/*
+ * Some systems need one or more of their pmc_plt_clks to be
+ * marked as critical.
+ */
+static const struct dmi_system_id critclk_systems[] = {
+ {
+ .ident = "MPL CEC1x",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
+ },
+ },
+ { /*sentinel*/ }
+};
+
static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
const struct pmc_data *pmc_data)
{
struct platform_device *clkdev;
struct pmc_clk_data *clk_data;
+ const struct dmi_system_id *d = dmi_first_match(critclk_systems);
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
@@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
clk_data->base = pmc_regmap; /* offset is added by client */
clk_data->clks = pmc_data->clks;
+ if (d) {
+ clk_data->critical = true;
+ pr_info("%s critclks quirk enabled\n", d->ident);
+ }
clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
PLATFORM_DEVID_NONE,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 4bfbfa3f78e6..2058445fc456 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4424,14 +4424,16 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
}
return AE_OK;
}
+
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ return AE_OK;
+
default:
dprintk("Resource %d isn't an IRQ nor an IO port\n",
resource->type);
+ return AE_CTRL_TERMINATE;
- case ACPI_RESOURCE_TYPE_END_TAG:
- return AE_OK;
}
- return AE_CTRL_TERMINATE;
}
static int sony_pic_possible_resources(struct acpi_device *device)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 726341f2b638..71cfaf26efd1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -79,7 +79,7 @@
#include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
-#include <linux/pci_ids.h>
+#include <linux/pci.h>
#include <linux/power_supply.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -4212,7 +4212,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
known_ev = true;
break;
}
- /* fallthrough to default */
+ /* fallthrough - to default */
default:
known_ev = false;
}
@@ -4501,6 +4501,74 @@ static void bluetooth_exit(void)
bluetooth_shutdown();
}
+static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ {
+ .ident = "ThinkPad E485",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KU"),
+ },
+ },
+ {
+ .ident = "ThinkPad E585",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KV"),
+ },
+ },
+ {
+ .ident = "ThinkPad A285 - 20MW",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MW"),
+ },
+ },
+ {
+ .ident = "ThinkPad A285 - 20MX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MX"),
+ },
+ },
+ {
+ .ident = "ThinkPad A485 - 20MU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MU"),
+ },
+ },
+ {
+ .ident = "ThinkPad A485 - 20MV",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MV"),
+ },
+ },
+ {}
+};
+
+static const struct pci_device_id fwbug_cards_ids[] __initconst = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2526) },
+ {}
+};
+
+
+static int __init have_bt_fwbug(void)
+{
+ /*
+ * Some AMD based ThinkPads have a firmware bug that calling
+ * "GBDC" will cause bluetooth on Intel wireless cards blocked
+ */
+ if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ FW_BUG "disable bluetooth subdriver for Intel cards\n");
+ return 1;
+ } else
+ return 0;
+}
+
static int __init bluetooth_init(struct ibm_init_struct *iibm)
{
int res;
@@ -4513,7 +4581,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
/* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
G4x, R30, R31, R40e, R50e, T20-22, X20-21 */
- tp_features.bluetooth = hkey_handle &&
+ tp_features.bluetooth = !have_bt_fwbug() && hkey_handle &&
acpi_evalf(hkey_handle, &status, "GBDC", "qd");
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
@@ -5808,7 +5876,7 @@ static int led_set_status(const unsigned int led,
return -EPERM;
if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
(1 << led), led_sled_arg1[ledstatus]))
- rc = -EIO;
+ return -EIO;
break;
case TPACPI_LED_OLD:
/* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
@@ -5832,10 +5900,10 @@ static int led_set_status(const unsigned int led,
return -EPERM;
if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
led, led_led_arg1[ledstatus]))
- rc = -EIO;
+ return -EIO;
break;
default:
- rc = -ENXIO;
+ return -ENXIO;
}
if (!rc)
@@ -6249,8 +6317,8 @@ static int thermal_get_sensor(int idx, s32 *value)
t = TP_EC_THERMAL_TMP8;
idx -= 8;
}
- /* fallthrough */
#endif
+ /* fallthrough */
case TPACPI_THERMAL_TPEC_8:
if (idx <= 7) {
if (!acpi_ec_read(t + idx, &tmp))
@@ -9890,6 +9958,37 @@ invalid:
return '\0';
}
+static void find_new_ec_fwstr(const struct dmi_header *dm, void *private)
+{
+ char *ec_fw_string = (char *) private;
+ const char *dmi_data = (const char *)dm;
+ /*
+ * ThinkPad Embedded Controller Program Table on newer models
+ *
+ * Offset | Name | Width | Description
+ * ----------------------------------------------------
+ * 0x00 | Type | BYTE | 0x8C
+ * 0x01 | Length | BYTE |
+ * 0x02 | Handle | WORD | Varies
+ * 0x04 | Signature | BYTEx6 | ASCII for "LENOVO"
+ * 0x0A | OEM struct offset | BYTE | 0x0B
+ * 0x0B | OEM struct number | BYTE | 0x07, for this structure
+ * 0x0C | OEM struct revision | BYTE | 0x01, for this format
+ * 0x0D | ECP version ID | STR ID |
+ * 0x0E | ECP release date | STR ID |
+ */
+
+ /* Return if data structure not match */
+ if (dm->type != 140 || dm->length < 0x0F ||
+ memcmp(dmi_data + 4, "LENOVO", 6) != 0 ||
+ dmi_data[0x0A] != 0x0B || dmi_data[0x0B] != 0x07 ||
+ dmi_data[0x0C] != 0x01)
+ return;
+
+ /* fwstr is the first 8byte string */
+ strncpy(ec_fw_string, dmi_data + 0x0F, 8);
+}
+
/* returns 0 - probe ok, or < 0 - probe error.
* Probe ok doesn't mean thinkpad found.
* On error, kfree() cleanup on tp->* is not performed, caller must do it */
@@ -9897,7 +9996,7 @@ static int __must_check __init get_thinkpad_model_data(
struct thinkpad_id_data *tp)
{
const struct dmi_device *dev = NULL;
- char ec_fw_string[18];
+ char ec_fw_string[18] = {0};
char const *s;
char t;
@@ -9937,20 +10036,25 @@ static int __must_check __init get_thinkpad_model_data(
ec_fw_string) == 1) {
ec_fw_string[sizeof(ec_fw_string) - 1] = 0;
ec_fw_string[strcspn(ec_fw_string, " ]")] = 0;
+ break;
+ }
+ }
- tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
- if (!tp->ec_version_str)
- return -ENOMEM;
+ /* Newer ThinkPads have different EC program info table */
+ if (!ec_fw_string[0])
+ dmi_walk(find_new_ec_fwstr, &ec_fw_string);
- t = tpacpi_parse_fw_id(ec_fw_string,
- &tp->ec_model, &tp->ec_release);
- if (t != 'H') {
- pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n",
- ec_fw_string);
- pr_notice("please report this to %s\n",
- TPACPI_MAIL);
- }
- break;
+ if (ec_fw_string[0]) {
+ tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
+ if (!tp->ec_version_str)
+ return -ENOMEM;
+
+ t = tpacpi_parse_fw_id(ec_fw_string,
+ &tp->ec_model, &tp->ec_release);
+ if (t != 'H') {
+ pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n",
+ ec_fw_string);
+ pr_notice("please report this to %s\n", TPACPI_MAIL);
}
}
@@ -10165,7 +10269,7 @@ MODULE_PARM_DESC(volume_mode,
module_param_named(volume_capabilities, volume_capabilities, uint, 0444);
MODULE_PARM_DESC(volume_capabilities,
- "Selects the mixer capabilites: 0=auto, 1=volume and mute, 2=mute only");
+ "Selects the mixer capabilities: 0=auto, 1=volume and mute, 2=mute only");
module_param_named(volume_control, volume_control_allowed, bool, 0444);
MODULE_PARM_DESC(volume_control,
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 2d56ff7c8230..bd0856d2e825 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -249,6 +249,21 @@ static const struct ts_dmi_data jumper_ezpad_6_pro_data = {
.properties = jumper_ezpad_6_pro_props,
};
+static const struct property_entry jumper_ezpad_6_pro_b_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_6_pro_b_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_6_pro_b_props,
+};
+
static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 23),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
@@ -265,6 +280,23 @@ static const struct ts_dmi_data jumper_ezpad_mini3_data = {
.properties = jumper_ezpad_mini3_props,
};
+static const struct property_entry myria_my8307_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data myria_my8307_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = myria_my8307_props,
+};
+
static const struct property_entry onda_obook_20_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
@@ -674,6 +706,17 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Jumper EZpad 6 Pro B */
+ .driver_data = (void *)&jumper_ezpad_6_pro_b_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ DMI_MATCH(DMI_BIOS_VERSION, "5.12"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
+ },
+ },
+ {
/* Jumper EZpad mini3 */
.driver_data = (void *)&jumper_ezpad_mini3_data,
.matches = {
@@ -691,6 +734,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Myria MY8307 */
+ .driver_data = (void *)&myria_my8307_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Complet Electro Serv"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MY8307"),
+ },
+ },
+ {
/* Onda oBook 20 Plus */
.driver_data = (void *)&onda_obook_20_plus_data,
.matches = {
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 803666ae3635..de99f371d362 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -458,7 +458,7 @@ void pnp_fixup_device(struct pnp_dev *dev)
for (f = pnp_fixups; *f->id; f++) {
if (!compare_pnp_id(dev->id, f->id))
continue;
- pnp_dbg(&dev->dev, "%s: calling %pF\n", f->id,
+ pnp_dbg(&dev->dev, "%s: calling %pS\n", f->id,
f->quirk_function);
f->quirk_function(dev);
}
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 2b686c55b717..e341cc5c0ea6 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -57,15 +57,21 @@
#define SHDW_WK_PIN(reg, cfg) ((reg) & AT91_SHDW_WKUPIS((cfg)->wkup_pin_input))
#define SHDW_RTCWK(reg, cfg) (((reg) >> ((cfg)->sr_rtcwk_shift)) & 0x1)
+#define SHDW_RTTWK(reg, cfg) (((reg) >> ((cfg)->sr_rttwk_shift)) & 0x1)
#define SHDW_RTCWKEN(cfg) (1 << ((cfg)->mr_rtcwk_shift))
+#define SHDW_RTTWKEN(cfg) (1 << ((cfg)->mr_rttwk_shift))
#define DBC_PERIOD_US(x) DIV_ROUND_UP_ULL((1000000 * (x)), \
SLOW_CLOCK_FREQ)
+#define SHDW_CFG_NOT_USED (32)
+
struct shdwc_config {
u8 wkup_pin_input;
u8 mr_rtcwk_shift;
+ u8 mr_rttwk_shift;
u8 sr_rtcwk_shift;
+ u8 sr_rttwk_shift;
};
struct shdwc {
@@ -104,6 +110,8 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
reason = "WKUP pin";
else if (SHDW_RTCWK(reg, shdw->cfg))
reason = "RTC";
+ else if (SHDW_RTTWK(reg, shdw->cfg))
+ reason = "RTT";
pr_info("AT91: Wake-Up source: %s\n", reason);
}
@@ -221,6 +229,9 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
if (of_property_read_bool(np, "atmel,wakeup-rtc-timer"))
mode |= SHDW_RTCWKEN(shdw->cfg);
+ if (of_property_read_bool(np, "atmel,wakeup-rtt-timer"))
+ mode |= SHDW_RTTWKEN(shdw->cfg);
+
dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
writel(mode, shdw->shdwc_base + AT91_SHDW_MR);
@@ -231,13 +242,27 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
static const struct shdwc_config sama5d2_shdwc_config = {
.wkup_pin_input = 0,
.mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = SHDW_CFG_NOT_USED,
.sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = SHDW_CFG_NOT_USED,
+};
+
+static const struct shdwc_config sam9x60_shdwc_config = {
+ .wkup_pin_input = 0,
+ .mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = 16,
+ .sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = 4,
};
static const struct of_device_id at91_shdwc_of_match[] = {
{
.compatible = "atmel,sama5d2-shdwc",
.data = &sama5d2_shdwc_config,
+ },
+ {
+ .compatible = "microchip,sam9x60-shdwc",
+ .data = &sam9x60_shdwc_config,
}, {
/*sentinel*/
}
diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c
index 7d0d269a0837..5a6bb638c331 100644
--- a/drivers/power/reset/syscon-reboot.c
+++ b/drivers/power/reset/syscon-reboot.c
@@ -27,6 +27,7 @@
struct syscon_reboot_context {
struct regmap *map;
u32 offset;
+ u32 value;
u32 mask;
struct notifier_block restart_handler;
};
@@ -39,7 +40,7 @@ static int syscon_restart_handle(struct notifier_block *this,
restart_handler);
/* Issue the reboot */
- regmap_write(ctx->map, ctx->offset, ctx->mask);
+ regmap_update_bits(ctx->map, ctx->offset, ctx->mask, ctx->value);
mdelay(1000);
@@ -51,6 +52,7 @@ static int syscon_reboot_probe(struct platform_device *pdev)
{
struct syscon_reboot_context *ctx;
struct device *dev = &pdev->dev;
+ int mask_err, value_err;
int err;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
@@ -64,8 +66,21 @@ static int syscon_reboot_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset))
return -EINVAL;
- if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+ value_err = of_property_read_u32(pdev->dev.of_node, "value", &ctx->value);
+ mask_err = of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask);
+ if (value_err && mask_err) {
+ dev_err(dev, "unable to read 'value' and 'mask'");
return -EINVAL;
+ }
+
+ if (value_err) {
+ /* support old binding */
+ ctx->value = ctx->mask;
+ ctx->mask = 0xFFFFFFFF;
+ } else if (mask_err) {
+ /* support value without mask*/
+ ctx->mask = 0xFFFFFFFF;
+ }
ctx->restart_handler.notifier_call = syscon_restart_handle;
ctx->restart_handler.priority = 192;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index e901b9879e7e..26dacdab03cc 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -169,6 +169,17 @@ config BATTERY_COLLIE
Say Y to enable support for the battery on the Sharp Zaurus
SL-5500 (collie) models.
+config BATTERY_INGENIC
+ tristate "Ingenic JZ47xx SoCs battery driver"
+ depends on MIPS || COMPILE_TEST
+ depends on INGENIC_ADC
+ help
+ Choose this option if you want to monitor battery status on
+ Ingenic JZ47xx SoC based devices.
+
+ This driver can also be built as a module. If so, the module will be
+ called ingenic-battery.
+
config BATTERY_IPAQ_MICRO
tristate "iPAQ Atmel Micro ASIC battery driver"
depends on MFD_IPAQ_MICRO
@@ -475,12 +486,12 @@ config CHARGER_MANAGER
runtime and in suspend-to-RAM by waking up the system periodically
with help of suspend_again support.
-config CHARGER_LTC3651
- tristate "LTC3651 charger"
+config CHARGER_LT3651
+ tristate "Analog Devices LT3651 charger"
depends on GPIOLIB
help
- Say Y to include support for the LTC3651 battery charger which reports
- its status via GPIO lines.
+ Say Y to include support for the Analog Devices (Linear Technology)
+ LT3651 battery charger which reports its status via GPIO lines.
config CHARGER_MAX14577
tristate "Maxim MAX14577/77836 battery charger driver"
@@ -499,6 +510,13 @@ config CHARGER_DETECTOR_MAX14656
Revision 1.2 and can be found e.g. in Kindle 4/5th generation
readers and certain LG devices.
+config CHARGER_MAX77650
+ tristate "Maxim MAX77650 battery charger driver"
+ depends on MFD_MAX77650
+ help
+ Say Y to enable support for the battery charger control of MAX77650
+ PMICs.
+
config CHARGER_MAX77693
tristate "Maxim MAX77693 battery charger driver"
depends on MFD_MAX77693
@@ -660,4 +678,14 @@ config FUEL_GAUGE_SC27XX
Say Y here to enable support for fuel gauge with SC27XX
PMIC chips.
+config CHARGER_UCS1002
+ tristate "Microchip UCS1002 USB Port Power Controller"
+ depends on I2C
+ depends on OF
+ depends on REGULATOR
+ select REGMAP_I2C
+ help
+ Say Y to enable support for Microchip UCS1002 Programmable
+ USB Port Power Controller with Charger Emulation.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index b731c2a9b695..f208273f9686 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
+obj-$(CONFIG_BATTERY_INGENIC) += ingenic-battery.o
obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
@@ -67,9 +68,10 @@ obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
-obj-$(CONFIG_CHARGER_LTC3651) += ltc3651-charger.o
+obj-$(CONFIG_CHARGER_LT3651) += lt3651-charger.o
obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
obj-$(CONFIG_CHARGER_DETECTOR_MAX14656) += max14656_charger_detector.o
+obj-$(CONFIG_CHARGER_MAX77650) += max77650-charger.o
obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
@@ -87,3 +89,4 @@ obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
obj-$(CONFIG_FUEL_GAUGE_SC27XX) += sc27xx_fuel_gauge.o
+obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o
diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c
index 7b2b69916f48..f6a66979cbb5 100644
--- a/drivers/power/supply/ab8500_bmdata.c
+++ b/drivers/power/supply/ab8500_bmdata.c
@@ -508,6 +508,7 @@ int ab8500_bm_of_probe(struct device *dev,
btech = of_get_property(battery_node, "stericsson,battery-type", NULL);
if (!btech) {
dev_warn(dev, "missing property battery-name/type\n");
+ of_node_put(battery_node);
return -EINVAL;
}
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index f52fe77edb6f..d2b1255ee1cc 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -24,6 +24,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/iio/consumer.h>
+#include <linux/workqueue.h>
#define DRVNAME "axp20x-usb-power-supply"
@@ -36,16 +37,27 @@
#define AXP20X_VBUS_VHOLD_MASK GENMASK(5, 3)
#define AXP20X_VBUS_VHOLD_OFFSET 3
#define AXP20X_VBUS_CLIMIT_MASK 3
-#define AXP20X_VBUC_CLIMIT_900mA 0
-#define AXP20X_VBUC_CLIMIT_500mA 1
-#define AXP20X_VBUC_CLIMIT_100mA 2
-#define AXP20X_VBUC_CLIMIT_NONE 3
+#define AXP20X_VBUS_CLIMIT_900mA 0
+#define AXP20X_VBUS_CLIMIT_500mA 1
+#define AXP20X_VBUS_CLIMIT_100mA 2
+#define AXP20X_VBUS_CLIMIT_NONE 3
+
+#define AXP813_VBUS_CLIMIT_900mA 0
+#define AXP813_VBUS_CLIMIT_1500mA 1
+#define AXP813_VBUS_CLIMIT_2000mA 2
+#define AXP813_VBUS_CLIMIT_2500mA 3
#define AXP20X_ADC_EN1_VBUS_CURR BIT(2)
#define AXP20X_ADC_EN1_VBUS_VOLT BIT(3)
#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
+/*
+ * Note do not raise the debounce time, we must report Vusb high within
+ * 100ms otherwise we get Vbus errors in musb.
+ */
+#define DEBOUNCE_TIME msecs_to_jiffies(50)
+
struct axp20x_usb_power {
struct device_node *np;
struct regmap *regmap;
@@ -53,6 +65,8 @@ struct axp20x_usb_power {
enum axp20x_variants axp20x_id;
struct iio_channel *vbus_v;
struct iio_channel *vbus_i;
+ struct delayed_work vbus_detect;
+ unsigned int old_status;
};
static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
@@ -64,6 +78,89 @@ static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static void axp20x_usb_power_poll_vbus(struct work_struct *work)
+{
+ struct axp20x_usb_power *power =
+ container_of(work, struct axp20x_usb_power, vbus_detect.work);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &val);
+ if (ret)
+ goto out;
+
+ val &= (AXP20X_PWR_STATUS_VBUS_PRESENT | AXP20X_PWR_STATUS_VBUS_USED);
+ if (val != power->old_status)
+ power_supply_changed(power->supply);
+
+ power->old_status = val;
+
+out:
+ mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+}
+
+static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
+{
+ if (power->axp20x_id >= AXP221_ID)
+ return true;
+
+ return false;
+}
+
+static int axp20x_get_current_max(struct axp20x_usb_power *power, int *val)
+{
+ unsigned int v;
+ int ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
+
+ if (ret)
+ return ret;
+
+ switch (v & AXP20X_VBUS_CLIMIT_MASK) {
+ case AXP20X_VBUS_CLIMIT_100mA:
+ if (power->axp20x_id == AXP221_ID)
+ *val = -1; /* No 100mA limit */
+ else
+ *val = 100000;
+ break;
+ case AXP20X_VBUS_CLIMIT_500mA:
+ *val = 500000;
+ break;
+ case AXP20X_VBUS_CLIMIT_900mA:
+ *val = 900000;
+ break;
+ case AXP20X_VBUS_CLIMIT_NONE:
+ *val = -1;
+ break;
+ }
+
+ return 0;
+}
+
+static int axp813_get_current_max(struct axp20x_usb_power *power, int *val)
+{
+ unsigned int v;
+ int ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
+
+ if (ret)
+ return ret;
+
+ switch (v & AXP20X_VBUS_CLIMIT_MASK) {
+ case AXP813_VBUS_CLIMIT_900mA:
+ *val = 900000;
+ break;
+ case AXP813_VBUS_CLIMIT_1500mA:
+ *val = 1500000;
+ break;
+ case AXP813_VBUS_CLIMIT_2000mA:
+ *val = 2000000;
+ break;
+ case AXP813_VBUS_CLIMIT_2500mA:
+ *val = 2500000;
+ break;
+ }
+ return 0;
+}
+
static int axp20x_usb_power_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
@@ -102,28 +199,9 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = ret * 1700; /* 1 step = 1.7 mV */
return 0;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
- if (ret)
- return ret;
-
- switch (v & AXP20X_VBUS_CLIMIT_MASK) {
- case AXP20X_VBUC_CLIMIT_100mA:
- if (power->axp20x_id == AXP221_ID)
- val->intval = -1; /* No 100mA limit */
- else
- val->intval = 100000;
- break;
- case AXP20X_VBUC_CLIMIT_500mA:
- val->intval = 500000;
- break;
- case AXP20X_VBUC_CLIMIT_900mA:
- val->intval = 900000;
- break;
- case AXP20X_VBUC_CLIMIT_NONE:
- val->intval = -1;
- break;
- }
- return 0;
+ if (power->axp20x_id == AXP813_ID)
+ return axp813_get_current_max(power, &val->intval);
+ return axp20x_get_current_max(power, &val->intval);
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
ret = iio_read_channel_processed(power->vbus_i,
@@ -214,6 +292,31 @@ static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
return -EINVAL;
}
+static int axp813_usb_power_set_current_max(struct axp20x_usb_power *power,
+ int intval)
+{
+ int val;
+
+ switch (intval) {
+ case 900000:
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_CLIMIT_MASK,
+ AXP813_VBUS_CLIMIT_900mA);
+ case 1500000:
+ case 2000000:
+ case 2500000:
+ val = (intval - 1000000) / 500000;
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_CLIMIT_MASK, val);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power,
int intval)
{
@@ -248,6 +351,9 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
return axp20x_usb_power_set_voltage_min(power, val->intval);
case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if (power->axp20x_id == AXP813_ID)
+ return axp813_usb_power_set_current_max(power,
+ val->intval);
return axp20x_usb_power_set_current_max(power, val->intval);
default:
@@ -357,6 +463,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
+ platform_set_drvdata(pdev, power);
power->axp20x_id = (enum axp20x_variants)of_device_get_match_data(
&pdev->dev);
@@ -382,7 +489,8 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
usb_power_desc = &axp20x_usb_power_desc;
irq_names = axp20x_irq_names;
} else if (power->axp20x_id == AXP221_ID ||
- power->axp20x_id == AXP223_ID) {
+ power->axp20x_id == AXP223_ID ||
+ power->axp20x_id == AXP813_ID) {
usb_power_desc = &axp22x_usb_power_desc;
irq_names = axp22x_irq_names;
} else {
@@ -415,6 +523,19 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
irq_names[i], ret);
}
+ INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
+ if (axp20x_usb_vbus_needs_polling(power))
+ queue_delayed_work(system_wq, &power->vbus_detect, 0);
+
+ return 0;
+}
+
+static int axp20x_usb_power_remove(struct platform_device *pdev)
+{
+ struct axp20x_usb_power *power = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&power->vbus_detect);
+
return 0;
}
@@ -428,12 +549,16 @@ static const struct of_device_id axp20x_usb_power_match[] = {
}, {
.compatible = "x-powers,axp223-usb-power-supply",
.data = (void *)AXP223_ID,
+ }, {
+ .compatible = "x-powers,axp813-usb-power-supply",
+ .data = (void *)AXP813_ID,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
static struct platform_driver axp20x_usb_power_driver = {
.probe = axp20x_usb_power_probe,
+ .remove = axp20x_usb_power_remove,
.driver = {
.name = DRVNAME,
.of_match_table = axp20x_usb_power_match,
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index f8c6da9277b3..00b961890a38 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -833,6 +833,10 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
pirq = platform_get_irq(info->pdev, i);
+ if (pirq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
+ return pirq;
+ }
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_warn(&info->pdev->dev,
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 9ff2461820d8..368281bc0d2b 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -686,6 +686,26 @@ intr_failed:
*/
static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
{
+ /* ACEPC T8 Cherry Trail Z8350 mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ },
+ {
+ /* ACEPC T11 Cherry Trail Z8350 mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ },
+ {
/* Intel Cherry Trail Compute Stick, Windows version */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 29b3a4056865..195c18c2f426 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1612,7 +1612,8 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
di->charge_design_full = bq27xxx_battery_read_dcap(di);
}
- if (di->cache.capacity != cache.capacity)
+ if ((di->cache.capacity != cache.capacity) ||
+ (di->cache.flags != cache.flags))
power_supply_changed(di->bat);
if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 2e8db5e6de0b..a6900aa0d2ed 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -1987,6 +1987,9 @@ static struct platform_driver charger_manager_driver = {
static int __init charger_manager_init(void)
{
cm_wq = create_freezable_workqueue("charger_manager");
+ if (unlikely(!cm_wq))
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
return platform_driver_register(&charger_manager_driver);
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 08d5037fd052..61d6447d1966 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -82,9 +82,9 @@ struct cpcap_battery_config {
};
struct cpcap_coulomb_counter_data {
- s32 sample; /* 24-bits */
+ s32 sample; /* 24 or 32 bits */
s32 accumulator;
- s16 offset; /* 10-bits */
+ s16 offset; /* 9 bits */
};
enum cpcap_battery_state {
@@ -213,7 +213,7 @@ static int cpcap_battery_get_current(struct cpcap_battery_ddata *ddata)
* TI or ST coulomb counter in the PMIC.
*/
static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
- u32 sample, s32 accumulator,
+ s32 sample, s32 accumulator,
s16 offset, u32 divider)
{
s64 acc;
@@ -221,8 +221,8 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
int avg_current;
u32 cc_lsb;
- sample &= 0xffffff; /* 24-bits, unsigned */
- offset &= 0x7ff; /* 10-bits, signed */
+ if (!divider)
+ return 0;
switch (ddata->vendor) {
case CPCAP_VENDOR_ST:
@@ -256,7 +256,7 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
/* 3600000μAms = 1μAh */
static int cpcap_battery_cc_to_uah(struct cpcap_battery_ddata *ddata,
- u32 sample, s32 accumulator,
+ s32 sample, s32 accumulator,
s16 offset)
{
return cpcap_battery_cc_raw_div(ddata, sample,
@@ -265,7 +265,7 @@ static int cpcap_battery_cc_to_uah(struct cpcap_battery_ddata *ddata,
}
static int cpcap_battery_cc_to_ua(struct cpcap_battery_ddata *ddata,
- u32 sample, s32 accumulator,
+ s32 sample, s32 accumulator,
s16 offset)
{
return cpcap_battery_cc_raw_div(ddata, sample,
@@ -309,17 +309,19 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
/* Sample value CPCAP_REG_CCS1 & 2 */
ccd->sample = (buf[1] & 0x0fff) << 16;
ccd->sample |= buf[0];
+ if (ddata->vendor == CPCAP_VENDOR_TI)
+ ccd->sample = sign_extend32(24, ccd->sample);
/* Accumulator value CPCAP_REG_CCA1 & 2 */
ccd->accumulator = ((s16)buf[3]) << 16;
ccd->accumulator |= buf[2];
- /* Offset value CPCAP_REG_CCO */
- ccd->offset = buf[5];
-
- /* Adjust offset based on mode value CPCAP_REG_CCM? */
- if (buf[4] >= 0x200)
- ccd->offset |= 0xfc00;
+ /*
+ * Coulomb counter calibration offset is CPCAP_REG_CCM,
+ * REG_CCO seems unused
+ */
+ ccd->offset = buf[4];
+ ccd->offset = sign_extend32(ccd->offset, 9);
return cpcap_battery_cc_to_uah(ddata,
ccd->sample,
@@ -474,11 +476,11 @@ static int cpcap_battery_get_property(struct power_supply *psy,
val->intval = ddata->config.info.voltage_min_design;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
- if (cached) {
+ sample = latest->cc.sample - previous->cc.sample;
+ if (!sample) {
val->intval = cpcap_battery_cc_get_avg_current(ddata);
break;
}
- sample = latest->cc.sample - previous->cc.sample;
accumulator = latest->cc.accumulator - previous->cc.accumulator;
val->intval = cpcap_battery_cc_to_ua(ddata, sample,
accumulator,
@@ -495,13 +497,13 @@ static int cpcap_battery_get_property(struct power_supply *psy,
val->intval = div64_s64(tmp, 100);
break;
case POWER_SUPPLY_PROP_POWER_AVG:
- if (cached) {
+ sample = latest->cc.sample - previous->cc.sample;
+ if (!sample) {
tmp = cpcap_battery_cc_get_avg_current(ddata);
tmp *= (latest->voltage / 10000);
val->intval = div64_s64(tmp, 100);
break;
}
- sample = latest->cc.sample - previous->cc.sample;
accumulator = latest->cc.accumulator - previous->cc.accumulator;
tmp = cpcap_battery_cc_to_ua(ddata, sample, accumulator,
latest->cc.offset);
@@ -559,11 +561,11 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
switch (d->action) {
case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW:
- if (latest->counter_uah >= 0)
+ if (latest->current_ua >= 0)
dev_warn(ddata->dev, "Battery low at 3.3V!\n");
break;
case CPCAP_BATTERY_IRQ_ACTION_POWEROFF:
- if (latest->counter_uah >= 0) {
+ if (latest->current_ua >= 0) {
dev_emerg(ddata->dev,
"Battery empty at 3.1V, powering off\n");
orderly_poweroff(true);
@@ -667,8 +669,9 @@ static int cpcap_battery_init_iio(struct cpcap_battery_ddata *ddata)
return 0;
out_err:
- dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
- error);
+ if (error != -EPROBE_DEFER)
+ dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
+ error);
return error;
}
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index c3ed7b476676..b4781b5d1e10 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -574,8 +574,9 @@ static int cpcap_charger_init_iio(struct cpcap_charger_ddata *ddata)
return 0;
out_err:
- dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
- error);
+ if (error != -EPROBE_DEFER)
+ dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
+ error);
return error;
}
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index ad969d9fc981..c2644a9fe80f 100644
--- a/drivers/power/supply/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL
+// SPDX-License-Identifier: GPL-2.0
/*
* Power supply driver for the goldfish emulator
*
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 7e4f11d5a230..f99e8f1eef23 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -29,11 +29,13 @@
struct gpio_charger {
unsigned int irq;
+ unsigned int charge_status_irq;
bool wakeup_enabled;
struct power_supply *charger;
struct power_supply_desc charger_desc;
struct gpio_desc *gpiod;
+ struct gpio_desc *charge_status;
};
static irqreturn_t gpio_charger_irq(int irq, void *devid)
@@ -59,6 +61,12 @@ static int gpio_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ONLINE:
val->intval = gpiod_get_value_cansleep(gpio_charger->gpiod);
break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (gpiod_get_value_cansleep(gpio_charger->charge_status))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
default:
return -EINVAL;
}
@@ -93,8 +101,29 @@ static enum power_supply_type gpio_charger_get_type(struct device *dev)
return POWER_SUPPLY_TYPE_UNKNOWN;
}
+static int gpio_charger_get_irq(struct device *dev, void *dev_id,
+ struct gpio_desc *gpio)
+{
+ int ret, irq = gpiod_to_irq(gpio);
+
+ if (irq > 0) {
+ ret = devm_request_any_context_irq(dev, irq, gpio_charger_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ dev_name(dev),
+ dev_id);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to request irq: %d\n", ret);
+ irq = 0;
+ }
+ }
+
+ return irq;
+}
+
static enum power_supply_property gpio_charger_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS /* Must always be last in the array. */
};
static int gpio_charger_probe(struct platform_device *pdev)
@@ -104,8 +133,10 @@ static int gpio_charger_probe(struct platform_device *pdev)
struct power_supply_config psy_cfg = {};
struct gpio_charger *gpio_charger;
struct power_supply_desc *charger_desc;
+ struct gpio_desc *charge_status;
+ int charge_status_irq;
unsigned long flags;
- int irq, ret;
+ int ret;
if (!pdata && !dev->of_node) {
dev_err(dev, "No platform data\n");
@@ -151,9 +182,17 @@ static int gpio_charger_probe(struct platform_device *pdev)
return PTR_ERR(gpio_charger->gpiod);
}
+ charge_status = devm_gpiod_get_optional(dev, "charge-status", GPIOD_IN);
+ gpio_charger->charge_status = charge_status;
+ if (IS_ERR(gpio_charger->charge_status))
+ return PTR_ERR(gpio_charger->charge_status);
+
charger_desc = &gpio_charger->charger_desc;
charger_desc->properties = gpio_charger_properties;
charger_desc->num_properties = ARRAY_SIZE(gpio_charger_properties);
+ /* Remove POWER_SUPPLY_PROP_STATUS from the supported properties. */
+ if (!gpio_charger->charge_status)
+ charger_desc->num_properties -= 1;
charger_desc->get_property = gpio_charger_get_property;
psy_cfg.of_node = dev->of_node;
@@ -180,16 +219,12 @@ static int gpio_charger_probe(struct platform_device *pdev)
return ret;
}
- irq = gpiod_to_irq(gpio_charger->gpiod);
- if (irq > 0) {
- ret = devm_request_any_context_irq(dev, irq, gpio_charger_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(dev), gpio_charger->charger);
- if (ret < 0)
- dev_warn(dev, "Failed to request irq: %d\n", ret);
- else
- gpio_charger->irq = irq;
- }
+ gpio_charger->irq = gpio_charger_get_irq(dev, gpio_charger->charger,
+ gpio_charger->gpiod);
+
+ charge_status_irq = gpio_charger_get_irq(dev, gpio_charger->charger,
+ gpio_charger->charge_status);
+ gpio_charger->charge_status_irq = charge_status_irq;
platform_set_drvdata(pdev, gpio_charger);
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
new file mode 100644
index 000000000000..35816d4b3012
--- /dev/null
+++ b/drivers/power/supply/ingenic-battery.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Battery driver for the Ingenic JZ47xx SoCs
+ * Copyright (c) 2019 Artur Rojek <contact@artur-rojek.eu>
+ *
+ * based on drivers/power/supply/jz4740-battery.c
+ */
+
+#include <linux/iio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+
+struct ingenic_battery {
+ struct device *dev;
+ struct iio_channel *channel;
+ struct power_supply_desc desc;
+ struct power_supply *battery;
+ struct power_supply_battery_info info;
+};
+
+static int ingenic_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ingenic_battery *bat = power_supply_get_drvdata(psy);
+ struct power_supply_battery_info *info = &bat->info;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = iio_read_channel_processed(bat->channel, &val->intval);
+ val->intval *= 1000;
+ if (val->intval < info->voltage_min_design_uv)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (val->intval > info->voltage_max_design_uv)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ return ret;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = iio_read_channel_processed(bat->channel, &val->intval);
+ val->intval *= 1000;
+ return ret;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = info->voltage_min_design_uv;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = info->voltage_max_design_uv;
+ return 0;
+ default:
+ return -EINVAL;
+ };
+}
+
+/* Set the most appropriate IIO channel voltage reference scale
+ * based on the battery's max voltage.
+ */
+static int ingenic_battery_set_scale(struct ingenic_battery *bat)
+{
+ const int *scale_raw;
+ int scale_len, scale_type, best_idx = -1, best_mV, max_raw, i, ret;
+ u64 max_mV;
+
+ ret = iio_read_max_channel_raw(bat->channel, &max_raw);
+ if (ret) {
+ dev_err(bat->dev, "Unable to read max raw channel value\n");
+ return ret;
+ }
+
+ ret = iio_read_avail_channel_attribute(bat->channel, &scale_raw,
+ &scale_type, &scale_len,
+ IIO_CHAN_INFO_SCALE);
+ if (ret < 0) {
+ dev_err(bat->dev, "Unable to read channel avail scale\n");
+ return ret;
+ }
+ if (ret != IIO_AVAIL_LIST || scale_type != IIO_VAL_FRACTIONAL_LOG2)
+ return -EINVAL;
+
+ max_mV = bat->info.voltage_max_design_uv / 1000;
+
+ for (i = 0; i < scale_len; i += 2) {
+ u64 scale_mV = (max_raw * scale_raw[i]) >> scale_raw[i + 1];
+
+ if (scale_mV < max_mV)
+ continue;
+
+ if (best_idx >= 0 && scale_mV > best_mV)
+ continue;
+
+ best_mV = scale_mV;
+ best_idx = i;
+ }
+
+ if (best_idx < 0) {
+ dev_err(bat->dev, "Unable to find matching voltage scale\n");
+ return -EINVAL;
+ }
+
+ return iio_write_channel_attribute(bat->channel,
+ scale_raw[best_idx],
+ scale_raw[best_idx + 1],
+ IIO_CHAN_INFO_SCALE);
+}
+
+static enum power_supply_property ingenic_battery_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+};
+
+static int ingenic_battery_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_battery *bat;
+ struct power_supply_config psy_cfg = {};
+ struct power_supply_desc *desc;
+ int ret;
+
+ bat = devm_kzalloc(dev, sizeof(*bat), GFP_KERNEL);
+ if (!bat)
+ return -ENOMEM;
+
+ bat->dev = dev;
+ bat->channel = devm_iio_channel_get(dev, "battery");
+ if (IS_ERR(bat->channel))
+ return PTR_ERR(bat->channel);
+
+ desc = &bat->desc;
+ desc->name = "jz-battery";
+ desc->type = POWER_SUPPLY_TYPE_BATTERY;
+ desc->properties = ingenic_battery_properties;
+ desc->num_properties = ARRAY_SIZE(ingenic_battery_properties);
+ desc->get_property = ingenic_battery_get_property;
+ psy_cfg.drv_data = bat;
+ psy_cfg.of_node = dev->of_node;
+
+ bat->battery = devm_power_supply_register(dev, desc, &psy_cfg);
+ if (IS_ERR(bat->battery)) {
+ dev_err(dev, "Unable to register battery\n");
+ return PTR_ERR(bat->battery);
+ }
+
+ ret = power_supply_get_battery_info(bat->battery, &bat->info);
+ if (ret) {
+ dev_err(dev, "Unable to get battery info: %d\n", ret);
+ return ret;
+ }
+ if (bat->info.voltage_min_design_uv < 0) {
+ dev_err(dev, "Unable to get voltage min design\n");
+ return bat->info.voltage_min_design_uv;
+ }
+ if (bat->info.voltage_max_design_uv < 0) {
+ dev_err(dev, "Unable to get voltage max design\n");
+ return bat->info.voltage_max_design_uv;
+ }
+
+ return ingenic_battery_set_scale(bat);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ingenic_battery_of_match[] = {
+ { .compatible = "ingenic,jz4740-battery", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ingenic_battery_of_match);
+#endif
+
+static struct platform_driver ingenic_battery_driver = {
+ .driver = {
+ .name = "ingenic-battery",
+ .of_match_table = of_match_ptr(ingenic_battery_of_match),
+ },
+ .probe = ingenic_battery_probe,
+};
+module_platform_driver(ingenic_battery_driver);
+
+MODULE_DESCRIPTION("Battery driver for Ingenic JZ47xx SoCs");
+MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/ltc3651-charger.c b/drivers/power/supply/lt3651-charger.c
index eea63ff211c4..8de500ffad95 100644
--- a/drivers/power/supply/ltc3651-charger.c
+++ b/drivers/power/supply/lt3651-charger.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
+ * Driver for Analog Devices (Linear Technology) LT3651 charger IC.
* Copyright (C) 2017, Topic Embedded Products
- * Driver for LTC3651 charger IC.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/device.h>
@@ -19,7 +15,7 @@
#include <linux/slab.h>
#include <linux/of.h>
-struct ltc3651_charger {
+struct lt3651_charger {
struct power_supply *charger;
struct power_supply_desc charger_desc;
struct gpio_desc *acpr_gpio;
@@ -27,7 +23,7 @@ struct ltc3651_charger {
struct gpio_desc *chrg_gpio;
};
-static irqreturn_t ltc3651_charger_irq(int irq, void *devid)
+static irqreturn_t lt3651_charger_irq(int irq, void *devid)
{
struct power_supply *charger = devid;
@@ -36,37 +32,37 @@ static irqreturn_t ltc3651_charger_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-static inline struct ltc3651_charger *psy_to_ltc3651_charger(
+static inline struct lt3651_charger *psy_to_lt3651_charger(
struct power_supply *psy)
{
return power_supply_get_drvdata(psy);
}
-static int ltc3651_charger_get_property(struct power_supply *psy,
+static int lt3651_charger_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
- struct ltc3651_charger *ltc3651_charger = psy_to_ltc3651_charger(psy);
+ struct lt3651_charger *lt3651_charger = psy_to_lt3651_charger(psy);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (!ltc3651_charger->chrg_gpio) {
+ if (!lt3651_charger->chrg_gpio) {
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
break;
}
- if (gpiod_get_value(ltc3651_charger->chrg_gpio))
+ if (gpiod_get_value(lt3651_charger->chrg_gpio))
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = gpiod_get_value(ltc3651_charger->acpr_gpio);
+ val->intval = gpiod_get_value(lt3651_charger->acpr_gpio);
break;
case POWER_SUPPLY_PROP_HEALTH:
- if (!ltc3651_charger->fault_gpio) {
+ if (!lt3651_charger->fault_gpio) {
val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
break;
}
- if (!gpiod_get_value(ltc3651_charger->fault_gpio)) {
+ if (!gpiod_get_value(lt3651_charger->fault_gpio)) {
val->intval = POWER_SUPPLY_HEALTH_GOOD;
break;
}
@@ -74,11 +70,11 @@ static int ltc3651_charger_get_property(struct power_supply *psy,
* If the fault pin is active, the chrg pin explains the type
* of failure.
*/
- if (!ltc3651_charger->chrg_gpio) {
+ if (!lt3651_charger->chrg_gpio) {
val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
break;
}
- val->intval = gpiod_get_value(ltc3651_charger->chrg_gpio) ?
+ val->intval = gpiod_get_value(lt3651_charger->chrg_gpio) ?
POWER_SUPPLY_HEALTH_OVERHEAT :
POWER_SUPPLY_HEALTH_DEAD;
break;
@@ -89,59 +85,59 @@ static int ltc3651_charger_get_property(struct power_supply *psy,
return 0;
}
-static enum power_supply_property ltc3651_charger_properties[] = {
+static enum power_supply_property lt3651_charger_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_HEALTH,
};
-static int ltc3651_charger_probe(struct platform_device *pdev)
+static int lt3651_charger_probe(struct platform_device *pdev)
{
struct power_supply_config psy_cfg = {};
- struct ltc3651_charger *ltc3651_charger;
+ struct lt3651_charger *lt3651_charger;
struct power_supply_desc *charger_desc;
int ret;
- ltc3651_charger = devm_kzalloc(&pdev->dev, sizeof(*ltc3651_charger),
+ lt3651_charger = devm_kzalloc(&pdev->dev, sizeof(*lt3651_charger),
GFP_KERNEL);
- if (!ltc3651_charger)
+ if (!lt3651_charger)
return -ENOMEM;
- ltc3651_charger->acpr_gpio = devm_gpiod_get(&pdev->dev,
+ lt3651_charger->acpr_gpio = devm_gpiod_get(&pdev->dev,
"lltc,acpr", GPIOD_IN);
- if (IS_ERR(ltc3651_charger->acpr_gpio)) {
- ret = PTR_ERR(ltc3651_charger->acpr_gpio);
+ if (IS_ERR(lt3651_charger->acpr_gpio)) {
+ ret = PTR_ERR(lt3651_charger->acpr_gpio);
dev_err(&pdev->dev, "Failed to acquire acpr GPIO: %d\n", ret);
return ret;
}
- ltc3651_charger->fault_gpio = devm_gpiod_get_optional(&pdev->dev,
+ lt3651_charger->fault_gpio = devm_gpiod_get_optional(&pdev->dev,
"lltc,fault", GPIOD_IN);
- if (IS_ERR(ltc3651_charger->fault_gpio)) {
- ret = PTR_ERR(ltc3651_charger->fault_gpio);
+ if (IS_ERR(lt3651_charger->fault_gpio)) {
+ ret = PTR_ERR(lt3651_charger->fault_gpio);
dev_err(&pdev->dev, "Failed to acquire fault GPIO: %d\n", ret);
return ret;
}
- ltc3651_charger->chrg_gpio = devm_gpiod_get_optional(&pdev->dev,
+ lt3651_charger->chrg_gpio = devm_gpiod_get_optional(&pdev->dev,
"lltc,chrg", GPIOD_IN);
- if (IS_ERR(ltc3651_charger->chrg_gpio)) {
- ret = PTR_ERR(ltc3651_charger->chrg_gpio);
+ if (IS_ERR(lt3651_charger->chrg_gpio)) {
+ ret = PTR_ERR(lt3651_charger->chrg_gpio);
dev_err(&pdev->dev, "Failed to acquire chrg GPIO: %d\n", ret);
return ret;
}
- charger_desc = &ltc3651_charger->charger_desc;
+ charger_desc = &lt3651_charger->charger_desc;
charger_desc->name = pdev->dev.of_node->name;
charger_desc->type = POWER_SUPPLY_TYPE_MAINS;
- charger_desc->properties = ltc3651_charger_properties;
- charger_desc->num_properties = ARRAY_SIZE(ltc3651_charger_properties);
- charger_desc->get_property = ltc3651_charger_get_property;
+ charger_desc->properties = lt3651_charger_properties;
+ charger_desc->num_properties = ARRAY_SIZE(lt3651_charger_properties);
+ charger_desc->get_property = lt3651_charger_get_property;
psy_cfg.of_node = pdev->dev.of_node;
- psy_cfg.drv_data = ltc3651_charger;
+ psy_cfg.drv_data = lt3651_charger;
- ltc3651_charger->charger = devm_power_supply_register(&pdev->dev,
+ lt3651_charger->charger = devm_power_supply_register(&pdev->dev,
charger_desc, &psy_cfg);
- if (IS_ERR(ltc3651_charger->charger)) {
- ret = PTR_ERR(ltc3651_charger->charger);
+ if (IS_ERR(lt3651_charger->charger)) {
+ ret = PTR_ERR(lt3651_charger->charger);
dev_err(&pdev->dev, "Failed to register power supply: %d\n",
ret);
return ret;
@@ -152,59 +148,60 @@ static int ltc3651_charger_probe(struct platform_device *pdev)
* support IRQs on these pins, userspace will have to poll the sysfs
* files manually.
*/
- if (ltc3651_charger->acpr_gpio) {
- ret = gpiod_to_irq(ltc3651_charger->acpr_gpio);
+ if (lt3651_charger->acpr_gpio) {
+ ret = gpiod_to_irq(lt3651_charger->acpr_gpio);
if (ret >= 0)
ret = devm_request_any_context_irq(&pdev->dev, ret,
- ltc3651_charger_irq,
+ lt3651_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), ltc3651_charger->charger);
+ dev_name(&pdev->dev), lt3651_charger->charger);
if (ret < 0)
dev_warn(&pdev->dev, "Failed to request acpr irq\n");
}
- if (ltc3651_charger->fault_gpio) {
- ret = gpiod_to_irq(ltc3651_charger->fault_gpio);
+ if (lt3651_charger->fault_gpio) {
+ ret = gpiod_to_irq(lt3651_charger->fault_gpio);
if (ret >= 0)
ret = devm_request_any_context_irq(&pdev->dev, ret,
- ltc3651_charger_irq,
+ lt3651_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), ltc3651_charger->charger);
+ dev_name(&pdev->dev), lt3651_charger->charger);
if (ret < 0)
dev_warn(&pdev->dev, "Failed to request fault irq\n");
}
- if (ltc3651_charger->chrg_gpio) {
- ret = gpiod_to_irq(ltc3651_charger->chrg_gpio);
+ if (lt3651_charger->chrg_gpio) {
+ ret = gpiod_to_irq(lt3651_charger->chrg_gpio);
if (ret >= 0)
ret = devm_request_any_context_irq(&pdev->dev, ret,
- ltc3651_charger_irq,
+ lt3651_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), ltc3651_charger->charger);
+ dev_name(&pdev->dev), lt3651_charger->charger);
if (ret < 0)
dev_warn(&pdev->dev, "Failed to request chrg irq\n");
}
- platform_set_drvdata(pdev, ltc3651_charger);
+ platform_set_drvdata(pdev, lt3651_charger);
return 0;
}
-static const struct of_device_id ltc3651_charger_match[] = {
- { .compatible = "lltc,ltc3651-charger" },
+static const struct of_device_id lt3651_charger_match[] = {
+ { .compatible = "lltc,ltc3651-charger" }, /* DEPRECATED */
+ { .compatible = "lltc,lt3651-charger" },
{ }
};
-MODULE_DEVICE_TABLE(of, ltc3651_charger_match);
+MODULE_DEVICE_TABLE(of, lt3651_charger_match);
-static struct platform_driver ltc3651_charger_driver = {
- .probe = ltc3651_charger_probe,
+static struct platform_driver lt3651_charger_driver = {
+ .probe = lt3651_charger_probe,
.driver = {
- .name = "ltc3651-charger",
- .of_match_table = ltc3651_charger_match,
+ .name = "lt3651-charger",
+ .of_match_table = lt3651_charger_match,
},
};
-module_platform_driver(ltc3651_charger_driver);
+module_platform_driver(lt3651_charger_driver);
MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
-MODULE_DESCRIPTION("Driver for LTC3651 charger");
+MODULE_DESCRIPTION("Driver for LT3651 charger");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ltc3651-charger");
+MODULE_ALIAS("platform:lt3651-charger");
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
index b91b1d2999dc..9e6472834e37 100644
--- a/drivers/power/supply/max14656_charger_detector.c
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -240,6 +240,14 @@ static enum power_supply_property max14656_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static void stop_irq_work(void *data)
+{
+ struct max14656_chip *chip = data;
+
+ cancel_delayed_work_sync(&chip->irq_work);
+}
+
+
static int max14656_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -278,7 +286,19 @@ static int max14656_probe(struct i2c_client *client,
if (ret)
return -ENODEV;
+ chip->detect_psy = devm_power_supply_register(dev,
+ &chip->psy_desc, &psy_cfg);
+ if (IS_ERR(chip->detect_psy)) {
+ dev_err(dev, "power_supply_register failed\n");
+ return -EINVAL;
+ }
+
INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
+ ret = devm_add_action(dev, stop_irq_work, chip);
+ if (ret) {
+ dev_err(dev, "devm_add_action %d failed\n", ret);
+ return ret;
+ }
ret = devm_request_irq(dev, chip->irq, max14656_irq,
IRQF_TRIGGER_FALLING,
@@ -289,13 +309,6 @@ static int max14656_probe(struct i2c_client *client,
}
enable_irq_wake(chip->irq);
- chip->detect_psy = devm_power_supply_register(dev,
- &chip->psy_desc, &psy_cfg);
- if (IS_ERR(chip->detect_psy)) {
- dev_err(dev, "power_supply_register failed\n");
- return -EINVAL;
- }
-
schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000));
return 0;
diff --git a/drivers/power/supply/max77650-charger.c b/drivers/power/supply/max77650-charger.c
new file mode 100644
index 000000000000..e34714cb05ec
--- /dev/null
+++ b/drivers/power/supply/max77650-charger.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// Battery charger driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+#define MAX77650_CHARGER_ENABLED BIT(0)
+#define MAX77650_CHARGER_DISABLED 0x00
+#define MAX77650_CHARGER_CHG_EN_MASK BIT(0)
+
+#define MAX77650_CHG_DETAILS_MASK GENMASK(7, 4)
+#define MAX77650_CHG_DETAILS_BITS(_reg) \
+ (((_reg) & MAX77650_CHG_DETAILS_MASK) >> 4)
+
+/* Charger is OFF. */
+#define MAX77650_CHG_OFF 0x00
+/* Charger is in prequalification mode. */
+#define MAX77650_CHG_PREQ 0x01
+/* Charger is in fast-charge constant current mode. */
+#define MAX77650_CHG_ON_CURR 0x02
+/* Charger is in JEITA modified fast-charge constant-current mode. */
+#define MAX77650_CHG_ON_CURR_JEITA 0x03
+/* Charger is in fast-charge constant-voltage mode. */
+#define MAX77650_CHG_ON_VOLT 0x04
+/* Charger is in JEITA modified fast-charge constant-voltage mode. */
+#define MAX77650_CHG_ON_VOLT_JEITA 0x05
+/* Charger is in top-off mode. */
+#define MAX77650_CHG_ON_TOPOFF 0x06
+/* Charger is in JEITA modified top-off mode. */
+#define MAX77650_CHG_ON_TOPOFF_JEITA 0x07
+/* Charger is done. */
+#define MAX77650_CHG_DONE 0x08
+/* Charger is JEITA modified done. */
+#define MAX77650_CHG_DONE_JEITA 0x09
+/* Charger is suspended due to a prequalification timer fault. */
+#define MAX77650_CHG_SUSP_PREQ_TIM_FAULT 0x0a
+/* Charger is suspended due to a fast-charge timer fault. */
+#define MAX77650_CHG_SUSP_FAST_CHG_TIM_FAULT 0x0b
+/* Charger is suspended due to a battery temperature fault. */
+#define MAX77650_CHG_SUSP_BATT_TEMP_FAULT 0x0c
+
+#define MAX77650_CHGIN_DETAILS_MASK GENMASK(3, 2)
+#define MAX77650_CHGIN_DETAILS_BITS(_reg) \
+ (((_reg) & MAX77650_CHGIN_DETAILS_MASK) >> 2)
+
+#define MAX77650_CHGIN_UNDERVOLTAGE_LOCKOUT 0x00
+#define MAX77650_CHGIN_OVERVOLTAGE_LOCKOUT 0x01
+#define MAX77650_CHGIN_OKAY 0x11
+
+#define MAX77650_CHARGER_CHG_MASK BIT(1)
+#define MAX77650_CHARGER_CHG_CHARGING(_reg) \
+ (((_reg) & MAX77650_CHARGER_CHG_MASK) > 1)
+
+#define MAX77650_CHARGER_VCHGIN_MIN_MASK 0xc0
+#define MAX77650_CHARGER_VCHGIN_MIN_SHIFT(_val) ((_val) << 5)
+
+#define MAX77650_CHARGER_ICHGIN_LIM_MASK 0x1c
+#define MAX77650_CHARGER_ICHGIN_LIM_SHIFT(_val) ((_val) << 2)
+
+struct max77650_charger_data {
+ struct regmap *map;
+ struct device *dev;
+};
+
+static enum power_supply_property max77650_charger_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_TYPE
+};
+
+static const unsigned int max77650_charger_vchgin_min_table[] = {
+ 4000000, 4100000, 4200000, 4300000, 4400000, 4500000, 4600000, 4700000
+};
+
+static const unsigned int max77650_charger_ichgin_lim_table[] = {
+ 95000, 190000, 285000, 380000, 475000
+};
+
+static int max77650_charger_set_vchgin_min(struct max77650_charger_data *chg,
+ unsigned int val)
+{
+ int i, rv;
+
+ for (i = 0; i < ARRAY_SIZE(max77650_charger_vchgin_min_table); i++) {
+ if (val == max77650_charger_vchgin_min_table[i]) {
+ rv = regmap_update_bits(chg->map,
+ MAX77650_REG_CNFG_CHG_B,
+ MAX77650_CHARGER_VCHGIN_MIN_MASK,
+ MAX77650_CHARGER_VCHGIN_MIN_SHIFT(i));
+ if (rv)
+ return rv;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max77650_charger_set_ichgin_lim(struct max77650_charger_data *chg,
+ unsigned int val)
+{
+ int i, rv;
+
+ for (i = 0; i < ARRAY_SIZE(max77650_charger_ichgin_lim_table); i++) {
+ if (val == max77650_charger_ichgin_lim_table[i]) {
+ rv = regmap_update_bits(chg->map,
+ MAX77650_REG_CNFG_CHG_B,
+ MAX77650_CHARGER_ICHGIN_LIM_MASK,
+ MAX77650_CHARGER_ICHGIN_LIM_SHIFT(i));
+ if (rv)
+ return rv;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max77650_charger_enable(struct max77650_charger_data *chg)
+{
+ int rv;
+
+ rv = regmap_update_bits(chg->map,
+ MAX77650_REG_CNFG_CHG_B,
+ MAX77650_CHARGER_CHG_EN_MASK,
+ MAX77650_CHARGER_ENABLED);
+ if (rv)
+ dev_err(chg->dev, "unable to enable the charger: %d\n", rv);
+
+ return rv;
+}
+
+static int max77650_charger_disable(struct max77650_charger_data *chg)
+{
+ int rv;
+
+ rv = regmap_update_bits(chg->map,
+ MAX77650_REG_CNFG_CHG_B,
+ MAX77650_CHARGER_CHG_EN_MASK,
+ MAX77650_CHARGER_DISABLED);
+ if (rv)
+ dev_err(chg->dev, "unable to disable the charger: %d\n", rv);
+
+ return rv;
+}
+
+static irqreturn_t max77650_charger_check_status(int irq, void *data)
+{
+ struct max77650_charger_data *chg = data;
+ int rv, reg;
+
+ rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, &reg);
+ if (rv) {
+ dev_err(chg->dev,
+ "unable to read the charger status: %d\n", rv);
+ return IRQ_HANDLED;
+ }
+
+ switch (MAX77650_CHGIN_DETAILS_BITS(reg)) {
+ case MAX77650_CHGIN_UNDERVOLTAGE_LOCKOUT:
+ dev_err(chg->dev, "undervoltage lockout detected, disabling charger\n");
+ max77650_charger_disable(chg);
+ break;
+ case MAX77650_CHGIN_OVERVOLTAGE_LOCKOUT:
+ dev_err(chg->dev, "overvoltage lockout detected, disabling charger\n");
+ max77650_charger_disable(chg);
+ break;
+ case MAX77650_CHGIN_OKAY:
+ max77650_charger_enable(chg);
+ break;
+ default:
+ /* May be 0x10 - debouncing */
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int max77650_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max77650_charger_data *chg = power_supply_get_drvdata(psy);
+ int rv, reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, &reg);
+ if (rv)
+ return rv;
+
+ if (MAX77650_CHARGER_CHG_CHARGING(reg)) {
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ }
+
+ switch (MAX77650_CHG_DETAILS_BITS(reg)) {
+ case MAX77650_CHG_OFF:
+ case MAX77650_CHG_SUSP_PREQ_TIM_FAULT:
+ case MAX77650_CHG_SUSP_FAST_CHG_TIM_FAULT:
+ case MAX77650_CHG_SUSP_BATT_TEMP_FAULT:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case MAX77650_CHG_PREQ:
+ case MAX77650_CHG_ON_CURR:
+ case MAX77650_CHG_ON_CURR_JEITA:
+ case MAX77650_CHG_ON_VOLT:
+ case MAX77650_CHG_ON_VOLT_JEITA:
+ case MAX77650_CHG_ON_TOPOFF:
+ case MAX77650_CHG_ON_TOPOFF_JEITA:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case MAX77650_CHG_DONE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, &reg);
+ if (rv)
+ return rv;
+
+ val->intval = MAX77650_CHARGER_CHG_CHARGING(reg);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, &reg);
+ if (rv)
+ return rv;
+
+ if (!MAX77650_CHARGER_CHG_CHARGING(reg)) {
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ }
+
+ switch (MAX77650_CHG_DETAILS_BITS(reg)) {
+ case MAX77650_CHG_PREQ:
+ case MAX77650_CHG_ON_CURR:
+ case MAX77650_CHG_ON_CURR_JEITA:
+ case MAX77650_CHG_ON_VOLT:
+ case MAX77650_CHG_ON_VOLT_JEITA:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case MAX77650_CHG_ON_TOPOFF:
+ case MAX77650_CHG_ON_TOPOFF_JEITA:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct power_supply_desc max77650_battery_desc = {
+ .name = "max77650",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .get_property = max77650_charger_get_property,
+ .properties = max77650_charger_properties,
+ .num_properties = ARRAY_SIZE(max77650_charger_properties),
+};
+
+static int max77650_charger_probe(struct platform_device *pdev)
+{
+ struct power_supply_config pscfg = {};
+ struct max77650_charger_data *chg;
+ struct power_supply *battery;
+ struct device *dev, *parent;
+ int rv, chg_irq, chgin_irq;
+ unsigned int prop;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+
+ chg = devm_kzalloc(dev, sizeof(*chg), GFP_KERNEL);
+ if (!chg)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, chg);
+
+ chg->map = dev_get_regmap(parent, NULL);
+ if (!chg->map)
+ return -ENODEV;
+
+ chg->dev = dev;
+
+ pscfg.of_node = dev->of_node;
+ pscfg.drv_data = chg;
+
+ chg_irq = platform_get_irq_byname(pdev, "CHG");
+ if (chg_irq < 0)
+ return chg_irq;
+
+ chgin_irq = platform_get_irq_byname(pdev, "CHGIN");
+ if (chgin_irq < 0)
+ return chgin_irq;
+
+ rv = devm_request_any_context_irq(dev, chg_irq,
+ max77650_charger_check_status,
+ IRQF_ONESHOT, "chg", chg);
+ if (rv < 0)
+ return rv;
+
+ rv = devm_request_any_context_irq(dev, chgin_irq,
+ max77650_charger_check_status,
+ IRQF_ONESHOT, "chgin", chg);
+ if (rv < 0)
+ return rv;
+
+ battery = devm_power_supply_register(dev,
+ &max77650_battery_desc, &pscfg);
+ if (IS_ERR(battery))
+ return PTR_ERR(battery);
+
+ rv = of_property_read_u32(dev->of_node,
+ "input-voltage-min-microvolt", &prop);
+ if (rv == 0) {
+ rv = max77650_charger_set_vchgin_min(chg, prop);
+ if (rv)
+ return rv;
+ }
+
+ rv = of_property_read_u32(dev->of_node,
+ "input-current-limit-microamp", &prop);
+ if (rv == 0) {
+ rv = max77650_charger_set_ichgin_lim(chg, prop);
+ if (rv)
+ return rv;
+ }
+
+ return max77650_charger_enable(chg);
+}
+
+static int max77650_charger_remove(struct platform_device *pdev)
+{
+ struct max77650_charger_data *chg = platform_get_drvdata(pdev);
+
+ return max77650_charger_disable(chg);
+}
+
+static struct platform_driver max77650_charger_driver = {
+ .driver = {
+ .name = "max77650-charger",
+ },
+ .probe = max77650_charger_probe,
+ .remove = max77650_charger_remove,
+};
+module_platform_driver(max77650_charger_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 charger driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 5a97e42a3547..7720e4c2ac0b 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/jiffies.h>
@@ -52,6 +53,14 @@
#define BAT_ADDR_MFR_TYPE 0x5F
+struct olpc_battery_data {
+ struct power_supply *olpc_ac;
+ struct power_supply *olpc_bat;
+ char bat_serial[17];
+ bool new_proto;
+ bool little_endian;
+};
+
/*********************************************************************
* Power
*********************************************************************/
@@ -90,13 +99,10 @@ static const struct power_supply_desc olpc_ac_desc = {
.get_property = olpc_ac_get_prop,
};
-static struct power_supply *olpc_ac;
-
-static char bat_serial[17]; /* Ick */
-
-static int olpc_bat_get_status(union power_supply_propval *val, uint8_t ec_byte)
+static int olpc_bat_get_status(struct olpc_battery_data *data,
+ union power_supply_propval *val, uint8_t ec_byte)
{
- if (olpc_platform_info.ecver > 0x44) {
+ if (data->new_proto) {
if (ec_byte & (BAT_STAT_CHARGING | BAT_STAT_TRICKLE))
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (ec_byte & BAT_STAT_DISCHARGING)
@@ -318,6 +324,14 @@ static int olpc_bat_get_voltage_max_design(union power_supply_propval *val)
return ret;
}
+static u16 ecword_to_cpu(struct olpc_battery_data *data, u16 ec_word)
+{
+ if (data->little_endian)
+ return le16_to_cpu((__force __le16)ec_word);
+ else
+ return be16_to_cpu((__force __be16)ec_word);
+}
+
/*********************************************************************
* Battery properties
*********************************************************************/
@@ -325,8 +339,9 @@ static int olpc_bat_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ struct olpc_battery_data *data = power_supply_get_drvdata(psy);
int ret = 0;
- __be16 ec_word;
+ u16 ec_word;
uint8_t ec_byte;
__be64 ser_buf;
@@ -346,7 +361,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- ret = olpc_bat_get_status(val, ec_byte);
+ ret = olpc_bat_get_status(data, val, ec_byte);
if (ret)
return ret;
break;
@@ -389,7 +404,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
+ val->intval = ecword_to_cpu(data, ec_word) * 9760L / 32;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_NOW:
@@ -397,7 +412,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120;
+ val->intval = ecword_to_cpu(data, ec_word) * 15625L / 120;
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1);
@@ -428,29 +443,29 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
+ val->intval = ecword_to_cpu(data, ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
+ val->intval = (int)ecword_to_cpu(data, ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15;
+ val->intval = ecword_to_cpu(data, ec_word) * 6250 / 15;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
if (ret)
return ret;
- sprintf(bat_serial, "%016llx", (long long)be64_to_cpu(ser_buf));
- val->strval = bat_serial;
+ sprintf(data->bat_serial, "%016llx", (long long)be64_to_cpu(ser_buf));
+ val->strval = data->bat_serial;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
ret = olpc_bat_get_voltage_max_design(val);
@@ -536,7 +551,7 @@ static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
return count;
}
-static const struct bin_attribute olpc_bat_eeprom = {
+static struct bin_attribute olpc_bat_eeprom = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
@@ -560,7 +575,7 @@ static ssize_t olpc_bat_error_read(struct device *dev,
return sprintf(buf, "%d\n", ec_byte);
}
-static const struct device_attribute olpc_bat_error = {
+static struct device_attribute olpc_bat_error = {
.attr = {
.name = "error",
.mode = S_IRUGO,
@@ -568,6 +583,27 @@ static const struct device_attribute olpc_bat_error = {
.show = olpc_bat_error_read,
};
+static struct attribute *olpc_bat_sysfs_attrs[] = {
+ &olpc_bat_error.attr,
+ NULL
+};
+
+static struct bin_attribute *olpc_bat_sysfs_bin_attrs[] = {
+ &olpc_bat_eeprom,
+ NULL
+};
+
+static const struct attribute_group olpc_bat_sysfs_group = {
+ .attrs = olpc_bat_sysfs_attrs,
+ .bin_attrs = olpc_bat_sysfs_bin_attrs,
+
+};
+
+static const struct attribute_group *olpc_bat_sysfs_groups[] = {
+ &olpc_bat_sysfs_group,
+ NULL
+};
+
/*********************************************************************
* Initialisation
*********************************************************************/
@@ -578,17 +614,17 @@ static struct power_supply_desc olpc_bat_desc = {
.use_for_apm = 1,
};
-static struct power_supply *olpc_bat;
-
static int olpc_battery_suspend(struct platform_device *pdev,
pm_message_t state)
{
- if (device_may_wakeup(&olpc_ac->dev))
+ struct olpc_battery_data *data = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&data->olpc_ac->dev))
olpc_ec_wakeup_set(EC_SCI_SRC_ACPWR);
else
olpc_ec_wakeup_clear(EC_SCI_SRC_ACPWR);
- if (device_may_wakeup(&olpc_bat->dev))
+ if (device_may_wakeup(&data->olpc_bat->dev))
olpc_ec_wakeup_set(EC_SCI_SRC_BATTERY | EC_SCI_SRC_BATSOC
| EC_SCI_SRC_BATERR);
else
@@ -600,16 +636,37 @@ static int olpc_battery_suspend(struct platform_device *pdev,
static int olpc_battery_probe(struct platform_device *pdev)
{
- int ret;
+ struct power_supply_config bat_psy_cfg = {};
+ struct power_supply_config ac_psy_cfg = {};
+ struct olpc_battery_data *data;
uint8_t status;
+ uint8_t ecver;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, data);
- /*
- * We've seen a number of EC protocol changes; this driver requires
- * the latest EC protocol, supported by 0x44 and above.
- */
- if (olpc_platform_info.ecver < 0x44) {
+ /* See if the EC is already there and get the EC revision */
+ ret = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ecver, 1);
+ if (ret)
+ return ret;
+
+ if (of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec")) {
+ /* XO 1.75 */
+ data->new_proto = true;
+ data->little_endian = true;
+ } else if (ecver > 0x44) {
+ /* XO 1 or 1.5 with a new EC firmware. */
+ data->new_proto = true;
+ } else if (ecver < 0x44) {
+ /*
+ * We've seen a number of EC protocol changes; this driver
+ * requires the latest EC protocol, supported by 0x44 and above.
+ */
printk(KERN_NOTICE "OLPC EC version 0x%02x too old for "
- "battery driver.\n", olpc_platform_info.ecver);
+ "battery driver.\n", ecver);
return -ENXIO;
}
@@ -619,59 +676,44 @@ static int olpc_battery_probe(struct platform_device *pdev)
/* Ignore the status. It doesn't actually matter */
- olpc_ac = power_supply_register(&pdev->dev, &olpc_ac_desc, NULL);
- if (IS_ERR(olpc_ac))
- return PTR_ERR(olpc_ac);
+ ac_psy_cfg.of_node = pdev->dev.of_node;
+ ac_psy_cfg.drv_data = data;
+
+ data->olpc_ac = devm_power_supply_register(&pdev->dev, &olpc_ac_desc,
+ &ac_psy_cfg);
+ if (IS_ERR(data->olpc_ac))
+ return PTR_ERR(data->olpc_ac);
- if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */
+ if (of_device_is_compatible(pdev->dev.of_node, "olpc,xo1.5-battery")) {
+ /* XO-1.5 */
olpc_bat_desc.properties = olpc_xo15_bat_props;
olpc_bat_desc.num_properties = ARRAY_SIZE(olpc_xo15_bat_props);
- } else { /* XO-1 */
+ } else {
+ /* XO-1 */
olpc_bat_desc.properties = olpc_xo1_bat_props;
olpc_bat_desc.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
}
- olpc_bat = power_supply_register(&pdev->dev, &olpc_bat_desc, NULL);
- if (IS_ERR(olpc_bat)) {
- ret = PTR_ERR(olpc_bat);
- goto battery_failed;
- }
-
- ret = device_create_bin_file(&olpc_bat->dev, &olpc_bat_eeprom);
- if (ret)
- goto eeprom_failed;
+ bat_psy_cfg.of_node = pdev->dev.of_node;
+ bat_psy_cfg.drv_data = data;
+ bat_psy_cfg.attr_grp = olpc_bat_sysfs_groups;
- ret = device_create_file(&olpc_bat->dev, &olpc_bat_error);
- if (ret)
- goto error_failed;
+ data->olpc_bat = devm_power_supply_register(&pdev->dev, &olpc_bat_desc,
+ &bat_psy_cfg);
+ if (IS_ERR(data->olpc_bat))
+ return PTR_ERR(data->olpc_bat);
if (olpc_ec_wakeup_available()) {
- device_set_wakeup_capable(&olpc_ac->dev, true);
- device_set_wakeup_capable(&olpc_bat->dev, true);
+ device_set_wakeup_capable(&data->olpc_ac->dev, true);
+ device_set_wakeup_capable(&data->olpc_bat->dev, true);
}
return 0;
-
-error_failed:
- device_remove_bin_file(&olpc_bat->dev, &olpc_bat_eeprom);
-eeprom_failed:
- power_supply_unregister(olpc_bat);
-battery_failed:
- power_supply_unregister(olpc_ac);
- return ret;
-}
-
-static int olpc_battery_remove(struct platform_device *pdev)
-{
- device_remove_file(&olpc_bat->dev, &olpc_bat_error);
- device_remove_bin_file(&olpc_bat->dev, &olpc_bat_eeprom);
- power_supply_unregister(olpc_bat);
- power_supply_unregister(olpc_ac);
- return 0;
}
static const struct of_device_id olpc_battery_ids[] = {
{ .compatible = "olpc,xo1-battery" },
+ { .compatible = "olpc,xo1.5-battery" },
{}
};
MODULE_DEVICE_TABLE(of, olpc_battery_ids);
@@ -682,7 +724,6 @@ static struct platform_driver olpc_battery_driver = {
.of_match_table = olpc_battery_ids,
},
.probe = olpc_battery_probe,
- .remove = olpc_battery_remove,
.suspend = olpc_battery_suspend,
};
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index c917a8b43b2b..f7033ecf6d0b 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -598,10 +598,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
err = of_property_read_string(battery_np, "compatible", &value);
if (err)
- return err;
+ goto out_put_node;
- if (strcmp("simple-battery", value))
- return -ENODEV;
+ if (strcmp("simple-battery", value)) {
+ err = -ENODEV;
+ goto out_put_node;
+ }
/* The property and field names below must correspond to elements
* in enum power_supply_property. For reasoning, see
@@ -620,19 +622,21 @@ int power_supply_get_battery_info(struct power_supply *psy,
&info->precharge_current_ua);
of_property_read_u32(battery_np, "charge-term-current-microamp",
&info->charge_term_current_ua);
- of_property_read_u32(battery_np, "constant_charge_current_max_microamp",
+ of_property_read_u32(battery_np, "constant-charge-current-max-microamp",
&info->constant_charge_current_max_ua);
- of_property_read_u32(battery_np, "constant_charge_voltage_max_microvolt",
+ of_property_read_u32(battery_np, "constant-charge-voltage-max-microvolt",
&info->constant_charge_voltage_max_uv);
of_property_read_u32(battery_np, "factory-internal-resistance-micro-ohms",
&info->factory_internal_resistance_uohm);
len = of_property_count_u32_elems(battery_np, "ocv-capacity-celsius");
if (len < 0 && len != -EINVAL) {
- return len;
+ err = len;
+ goto out_put_node;
} else if (len > POWER_SUPPLY_OCV_TEMP_MAX) {
dev_err(&psy->dev, "Too many temperature values\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_node;
} else if (len > 0) {
of_property_read_u32_array(battery_np, "ocv-capacity-celsius",
info->ocv_temp, len);
@@ -650,7 +654,8 @@ int power_supply_get_battery_info(struct power_supply *psy,
dev_err(&psy->dev, "failed to get %s\n", propname);
kfree(propname);
power_supply_put_battery_info(psy, info);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_node;
}
kfree(propname);
@@ -661,16 +666,21 @@ int power_supply_get_battery_info(struct power_supply *psy,
devm_kcalloc(&psy->dev, tab_len, sizeof(*table), GFP_KERNEL);
if (!info->ocv_table[index]) {
power_supply_put_battery_info(psy, info);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_put_node;
}
for (i = 0; i < tab_len; i++) {
- table[i].ocv = be32_to_cpu(*list++);
- table[i].capacity = be32_to_cpu(*list++);
+ table[i].ocv = be32_to_cpu(*list);
+ list++;
+ table[i].capacity = be32_to_cpu(*list);
+ list++;
}
}
- return 0;
+out_put_node:
+ of_node_put(battery_np);
+ return err;
}
EXPORT_SYMBOL_GPL(power_supply_get_battery_info);
@@ -899,7 +909,7 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
return ret;
}
-static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
+static int ps_get_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
unsigned long *state)
{
struct power_supply *psy;
@@ -934,7 +944,7 @@ static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
static const struct thermal_cooling_device_ops psy_tcd_ops = {
.get_max_state = ps_get_max_charge_cntl_limit,
- .get_cur_state = ps_get_cur_chrage_cntl_limit,
+ .get_cur_state = ps_get_cur_charge_cntl_limit,
.set_cur_state = ps_set_cur_charge_cntl_limit,
};
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index dce24f596160..a704a76d7529 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -56,13 +56,13 @@ static const char * const power_supply_status_text[] = {
};
static const char * const power_supply_charge_type_text[] = {
- "Unknown", "N/A", "Trickle", "Fast"
+ "Unknown", "N/A", "Trickle", "Fast", "Standard", "Adaptive", "Custom"
};
static const char * const power_supply_health_text[] = {
"Unknown", "Good", "Overheat", "Dead", "Over voltage",
"Unspecified failure", "Cold", "Watchdog timer expire",
- "Safety timer expire"
+ "Safety timer expire", "Over current"
};
static const char * const power_supply_technology_text[] = {
@@ -274,6 +274,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(constant_charge_voltage_max),
POWER_SUPPLY_ATTR(charge_control_limit),
POWER_SUPPLY_ATTR(charge_control_limit_max),
+ POWER_SUPPLY_ATTR(charge_control_start_threshold),
+ POWER_SUPPLY_ATTR(charge_control_end_threshold),
POWER_SUPPLY_ATTR(input_current_limit),
POWER_SUPPLY_ATTR(energy_full_design),
POWER_SUPPLY_ATTR(energy_empty_design),
@@ -383,15 +385,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
char *prop_buf;
char *attrname;
- dev_dbg(dev, "uevent\n");
-
if (!psy || !psy->desc) {
dev_dbg(dev, "No power supply yet\n");
return ret;
}
- dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
-
ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
if (ret)
return ret;
@@ -427,8 +425,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
goto out;
}
- dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
-
ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
kfree(attrname);
if (ret)
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
new file mode 100644
index 000000000000..1c89d030c045
--- /dev/null
+++ b/drivers/power/supply/ucs1002_power.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for UCS1002 Programmable USB Port Power Controller
+ *
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ */
+#include <linux/bits.h>
+#include <linux/freezer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/* UCS1002 Registers */
+#define UCS1002_REG_CURRENT_MEASUREMENT 0x00
+
+/*
+ * The Total Accumulated Charge registers store the total accumulated
+ * charge delivered from the VS source to a portable device. The total
+ * value is calculated using four registers, from 01h to 04h. The bit
+ * weighting of the registers is given in mA/hrs.
+ */
+#define UCS1002_REG_TOTAL_ACC_CHARGE 0x01
+
+/* Other Status Register */
+#define UCS1002_REG_OTHER_STATUS 0x0f
+# define F_ADET_PIN BIT(4)
+# define F_CHG_ACT BIT(3)
+
+/* Interrupt Status */
+#define UCS1002_REG_INTERRUPT_STATUS 0x10
+# define F_DISCHARGE_ERR BIT(6)
+# define F_RESET BIT(5)
+# define F_MIN_KEEP_OUT BIT(4)
+# define F_TSD BIT(3)
+# define F_OVER_VOLT BIT(2)
+# define F_BACK_VOLT BIT(1)
+# define F_OVER_ILIM BIT(0)
+
+/* Pin Status Register */
+#define UCS1002_REG_PIN_STATUS 0x14
+# define UCS1002_PWR_STATE_MASK 0x03
+# define F_PWR_EN_PIN BIT(6)
+# define F_M2_PIN BIT(5)
+# define F_M1_PIN BIT(4)
+# define F_EM_EN_PIN BIT(3)
+# define F_SEL_PIN BIT(2)
+# define F_ACTIVE_MODE_MASK GENMASK(5, 3)
+# define F_ACTIVE_MODE_PASSTHROUGH F_M2_PIN
+# define F_ACTIVE_MODE_DEDICATED F_EM_EN_PIN
+# define F_ACTIVE_MODE_BC12_DCP (F_M2_PIN | F_EM_EN_PIN)
+# define F_ACTIVE_MODE_BC12_SDP F_M1_PIN
+# define F_ACTIVE_MODE_BC12_CDP (F_M1_PIN | F_M2_PIN | F_EM_EN_PIN)
+
+/* General Configuration Register */
+#define UCS1002_REG_GENERAL_CFG 0x15
+# define F_RATION_EN BIT(3)
+
+/* Emulation Configuration Register */
+#define UCS1002_REG_EMU_CFG 0x16
+
+/* Switch Configuration Register */
+#define UCS1002_REG_SWITCH_CFG 0x17
+# define F_PIN_IGNORE BIT(7)
+# define F_EM_EN_SET BIT(5)
+# define F_M2_SET BIT(4)
+# define F_M1_SET BIT(3)
+# define F_S0_SET BIT(2)
+# define F_PWR_EN_SET BIT(1)
+# define F_LATCH_SET BIT(0)
+# define V_SET_ACTIVE_MODE_MASK GENMASK(5, 3)
+# define V_SET_ACTIVE_MODE_PASSTHROUGH F_M2_SET
+# define V_SET_ACTIVE_MODE_DEDICATED F_EM_EN_SET
+# define V_SET_ACTIVE_MODE_BC12_DCP (F_M2_SET | F_EM_EN_SET)
+# define V_SET_ACTIVE_MODE_BC12_SDP F_M1_SET
+# define V_SET_ACTIVE_MODE_BC12_CDP (F_M1_SET | F_M2_SET | F_EM_EN_SET)
+
+/* Current Limit Register */
+#define UCS1002_REG_ILIMIT 0x19
+# define UCS1002_ILIM_SW_MASK GENMASK(3, 0)
+
+/* Product ID */
+#define UCS1002_REG_PRODUCT_ID 0xfd
+# define UCS1002_PRODUCT_ID 0x4e
+
+/* Manufacture name */
+#define UCS1002_MANUFACTURER "SMSC"
+
+struct ucs1002_info {
+ struct power_supply *charger;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct regulator_desc *regulator_descriptor;
+ bool present;
+};
+
+static enum power_supply_property ucs1002_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of PED */
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+static int ucs1002_get_online(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_OTHER_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = !!(reg & F_CHG_ACT);
+
+ return 0;
+}
+
+static int ucs1002_get_charge(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ /*
+ * To fit within 32 bits some values are rounded (uA/h)
+ *
+ * For Total Accumulated Charge Middle Low Byte register, addr
+ * 03h, byte 2
+ *
+ * B0: 0.01084 mA/h rounded to 11 uA/h
+ * B1: 0.02169 mA/h rounded to 22 uA/h
+ * B2: 0.04340 mA/h rounded to 43 uA/h
+ * B3: 0.08676 mA/h rounded to 87 uA/h
+ * B4: 0.17350 mA/h rounded to 173 uÁ/h
+ *
+ * For Total Accumulated Charge Low Byte register, addr 04h,
+ * byte 3
+ *
+ * B6: 0.00271 mA/h rounded to 3 uA/h
+ * B7: 0.005422 mA/h rounded to 5 uA/h
+ */
+ static const int bit_weights_uAh[BITS_PER_TYPE(u32)] = {
+ /*
+ * Bit corresponding to low byte (offset 0x04)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 0, 0, 0, 0, 0, 0, 3, 5,
+ /*
+ * Bit corresponding to middle low byte (offset 0x03)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 11, 22, 43, 87, 173, 347, 694, 1388,
+ /*
+ * Bit corresponding to middle high byte (offset 0x02)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 2776, 5552, 11105, 22210, 44420, 88840, 177700, 355400,
+ /*
+ * Bit corresponding to high byte (offset 0x01)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 710700, 1421000, 2843000, 5685000, 11371000, 22742000,
+ 45484000, 90968000,
+ };
+ unsigned long total_acc_charger;
+ unsigned int reg;
+ int i, ret;
+
+ ret = regmap_bulk_read(info->regmap, UCS1002_REG_TOTAL_ACC_CHARGE,
+ &reg, sizeof(u32));
+ if (ret)
+ return ret;
+
+ total_acc_charger = be32_to_cpu(reg); /* BE as per offsets above */
+ val->intval = 0;
+
+ for_each_set_bit(i, &total_acc_charger, ARRAY_SIZE(bit_weights_uAh))
+ val->intval += bit_weights_uAh[i];
+
+ return 0;
+}
+
+static int ucs1002_get_current(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ /*
+ * The Current Measurement register stores the measured
+ * current value delivered to the portable device. The range
+ * is from 9.76 mA to 2.5 A.
+ */
+ static const int bit_weights_uA[BITS_PER_TYPE(u8)] = {
+ 9760, 19500, 39000, 78100, 156200, 312300, 624600, 1249300,
+ };
+ unsigned long current_measurement;
+ unsigned int reg;
+ int i, ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_CURRENT_MEASUREMENT, &reg);
+ if (ret)
+ return ret;
+
+ current_measurement = reg;
+ val->intval = 0;
+
+ for_each_set_bit(i, &current_measurement, ARRAY_SIZE(bit_weights_uA))
+ val->intval += bit_weights_uA[i];
+
+ return 0;
+}
+
+/*
+ * The Current Limit register stores the maximum current used by the
+ * port switch. The range is from 500mA to 2.5 A.
+ */
+static const u32 ucs1002_current_limit_uA[] = {
+ 500000, 900000, 1000000, 1200000, 1500000, 1800000, 2000000, 2500000,
+};
+
+static int ucs1002_get_max_current(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_ILIMIT, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = ucs1002_current_limit_uA[reg & UCS1002_ILIM_SW_MASK];
+
+ return 0;
+}
+
+static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
+{
+ unsigned int reg;
+ int ret, idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ucs1002_current_limit_uA); idx++) {
+ if (val == ucs1002_current_limit_uA[idx])
+ break;
+ }
+
+ if (idx == ARRAY_SIZE(ucs1002_current_limit_uA))
+ return -EINVAL;
+
+ ret = regmap_write(info->regmap, UCS1002_REG_ILIMIT, idx);
+ if (ret)
+ return ret;
+ /*
+ * Any current limit setting exceeding the one set via ILIM
+ * pin will be rejected, so we read out freshly changed limit
+ * to make sure that it took effect.
+ */
+ ret = regmap_read(info->regmap, UCS1002_REG_ILIMIT, &reg);
+ if (ret)
+ return ret;
+
+ if (reg != idx)
+ return -EINVAL;
+
+ return 0;
+}
+
+static enum power_supply_usb_type ucs1002_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_PD,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+};
+
+static int ucs1002_set_usb_type(struct ucs1002_info *info, int val)
+{
+ unsigned int mode;
+
+ if (val < 0 || val >= ARRAY_SIZE(ucs1002_usb_types))
+ return -EINVAL;
+
+ switch (ucs1002_usb_types[val]) {
+ case POWER_SUPPLY_USB_TYPE_PD:
+ mode = V_SET_ACTIVE_MODE_DEDICATED;
+ break;
+ case POWER_SUPPLY_USB_TYPE_SDP:
+ mode = V_SET_ACTIVE_MODE_BC12_SDP;
+ break;
+ case POWER_SUPPLY_USB_TYPE_DCP:
+ mode = V_SET_ACTIVE_MODE_BC12_DCP;
+ break;
+ case POWER_SUPPLY_USB_TYPE_CDP:
+ mode = V_SET_ACTIVE_MODE_BC12_CDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(info->regmap, UCS1002_REG_SWITCH_CFG,
+ V_SET_ACTIVE_MODE_MASK, mode);
+}
+
+static int ucs1002_get_usb_type(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ enum power_supply_usb_type type;
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_PIN_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ switch (reg & F_ACTIVE_MODE_MASK) {
+ default:
+ type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ case F_ACTIVE_MODE_DEDICATED:
+ type = POWER_SUPPLY_USB_TYPE_PD;
+ break;
+ case F_ACTIVE_MODE_BC12_SDP:
+ type = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case F_ACTIVE_MODE_BC12_DCP:
+ type = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case F_ACTIVE_MODE_BC12_CDP:
+ type = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ };
+
+ val->intval = type;
+
+ return 0;
+}
+
+static int ucs1002_get_health(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret, health;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_INTERRUPT_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ if (reg & F_TSD)
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (reg & (F_OVER_VOLT | F_BACK_VOLT))
+ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (reg & F_OVER_ILIM)
+ health = POWER_SUPPLY_HEALTH_OVERCURRENT;
+ else if (reg & (F_DISCHARGE_ERR | F_MIN_KEEP_OUT))
+ health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else
+ health = POWER_SUPPLY_HEALTH_GOOD;
+
+ val->intval = health;
+
+ return 0;
+}
+
+static int ucs1002_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ucs1002_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return ucs1002_get_online(info, val);
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ return ucs1002_get_charge(info, val);
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return ucs1002_get_current(info, val);
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return ucs1002_get_max_current(info, val);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return ucs1002_get_usb_type(info, val);
+ case POWER_SUPPLY_PROP_HEALTH:
+ return ucs1002_get_health(info, val);
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = info->present;
+ return 0;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = UCS1002_MANUFACTURER;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ucs1002_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct ucs1002_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return ucs1002_set_max_current(info, val->intval);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return ucs1002_set_usb_type(info, val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ucs1002_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct power_supply_desc ucs1002_charger_desc = {
+ .name = "ucs1002",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = ucs1002_usb_types,
+ .num_usb_types = ARRAY_SIZE(ucs1002_usb_types),
+ .get_property = ucs1002_get_property,
+ .set_property = ucs1002_set_property,
+ .property_is_writeable = ucs1002_property_is_writeable,
+ .properties = ucs1002_props,
+ .num_properties = ARRAY_SIZE(ucs1002_props),
+};
+
+static irqreturn_t ucs1002_charger_irq(int irq, void *data)
+{
+ int ret, regval;
+ bool present;
+ struct ucs1002_info *info = data;
+
+ present = info->present;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_OTHER_STATUS, &regval);
+ if (ret)
+ return IRQ_HANDLED;
+
+ /* update attached status */
+ info->present = regval & F_ADET_PIN;
+
+ /* notify the change */
+ if (present != info->present)
+ power_supply_changed(info->charger);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ucs1002_alert_irq(int irq, void *data)
+{
+ struct ucs1002_info *info = data;
+
+ power_supply_changed(info->charger);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regulator_ops ucs1002_regulator_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+static const struct regulator_desc ucs1002_regulator_descriptor = {
+ .name = "ucs1002-vbus",
+ .ops = &ucs1002_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = UCS1002_REG_SWITCH_CFG,
+ .enable_mask = F_PWR_EN_SET,
+ .enable_val = F_PWR_EN_SET,
+ .fixed_uV = 5000000,
+ .n_voltages = 1,
+};
+
+static int ucs1002_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ struct device *dev = &client->dev;
+ struct power_supply_config charger_config = {};
+ const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+ struct regulator_config regulator_config = {};
+ int irq_a_det, irq_alert, ret;
+ struct regulator_dev *rdev;
+ struct ucs1002_info *info;
+ unsigned int regval;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ ret = PTR_ERR_OR_ZERO(info->regmap);
+ if (ret) {
+ dev_err(dev, "Regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ info->client = client;
+
+ irq_a_det = of_irq_get_byname(dev->of_node, "a_det");
+ irq_alert = of_irq_get_byname(dev->of_node, "alert");
+
+ charger_config.of_node = dev->of_node;
+ charger_config.drv_data = info;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_PRODUCT_ID, &regval);
+ if (ret) {
+ dev_err(dev, "Failed to read product ID: %d\n", ret);
+ return ret;
+ }
+
+ if (regval != UCS1002_PRODUCT_ID) {
+ dev_err(dev,
+ "Product ID does not match (0x%02x != 0x%02x)\n",
+ regval, UCS1002_PRODUCT_ID);
+ return -ENODEV;
+ }
+
+ /* Enable charge rationing by default */
+ ret = regmap_update_bits(info->regmap, UCS1002_REG_GENERAL_CFG,
+ F_RATION_EN, F_RATION_EN);
+ if (ret) {
+ dev_err(dev, "Failed to read general config: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Ignore the M1, M2, PWR_EN, and EM_EN pin states. Set active
+ * mode selection to BC1.2 CDP.
+ */
+ ret = regmap_update_bits(info->regmap, UCS1002_REG_SWITCH_CFG,
+ V_SET_ACTIVE_MODE_MASK | F_PIN_IGNORE,
+ V_SET_ACTIVE_MODE_BC12_CDP | F_PIN_IGNORE);
+ if (ret) {
+ dev_err(dev, "Failed to configure default mode: %d\n", ret);
+ return ret;
+ }
+ /*
+ * Be safe and set initial current limit to 500mA
+ */
+ ret = ucs1002_set_max_current(info, 500000);
+ if (ret) {
+ dev_err(dev, "Failed to set max current default: %d\n", ret);
+ return ret;
+ }
+
+ info->charger = devm_power_supply_register(dev, &ucs1002_charger_desc,
+ &charger_config);
+ ret = PTR_ERR_OR_ZERO(info->charger);
+ if (ret) {
+ dev_err(dev, "Failed to register power supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(info->regmap, UCS1002_REG_PIN_STATUS, &regval);
+ if (ret) {
+ dev_err(dev, "Failed to read pin status: %d\n", ret);
+ return ret;
+ }
+
+ info->regulator_descriptor =
+ devm_kmemdup(dev, &ucs1002_regulator_descriptor,
+ sizeof(ucs1002_regulator_descriptor),
+ GFP_KERNEL);
+ if (!info->regulator_descriptor)
+ return -ENOMEM;
+
+ info->regulator_descriptor->enable_is_inverted = !(regval & F_SEL_PIN);
+
+ regulator_config.dev = dev;
+ regulator_config.of_node = dev->of_node;
+ regulator_config.regmap = info->regmap;
+
+ rdev = devm_regulator_register(dev, info->regulator_descriptor,
+ &regulator_config);
+ ret = PTR_ERR_OR_ZERO(rdev);
+ if (ret) {
+ dev_err(dev, "Failed to register VBUS regulator: %d\n", ret);
+ return ret;
+ }
+
+ if (irq_a_det > 0) {
+ ret = devm_request_threaded_irq(dev, irq_a_det, NULL,
+ ucs1002_charger_irq,
+ IRQF_ONESHOT,
+ "ucs1002-a_det", info);
+ if (ret) {
+ dev_err(dev, "Failed to request A_DET threaded irq: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (irq_alert > 0) {
+ ret = devm_request_threaded_irq(dev, irq_alert, NULL,
+ ucs1002_alert_irq,
+ IRQF_ONESHOT,
+ "ucs1002-alert", info);
+ if (ret) {
+ dev_err(dev, "Failed to request ALERT threaded irq: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ucs1002_of_match[] = {
+ { .compatible = "microchip,ucs1002", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ucs1002_of_match);
+
+static struct i2c_driver ucs1002_driver = {
+ .driver = {
+ .name = "ucs1002",
+ .of_match_table = ucs1002_of_match,
+ },
+ .probe = ucs1002_probe,
+};
+module_i2c_driver(ucs1002_driver);
+
+MODULE_DESCRIPTION("Microchip UCS1002 Programmable USB Port Power Controller");
+MODULE_AUTHOR("Enric Balletbo Serra <enric.balletbo@collabora.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index dd5d1103e02b..4b6418039387 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -31,19 +31,25 @@
#include <linux/slab.h>
#include <linux/pps_kernel.h>
#include <linux/pps-gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/list.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
/* Info for each registered platform device */
struct pps_gpio_device_data {
int irq; /* IRQ used as PPS source */
struct pps_device *pps; /* PPS source device */
struct pps_source_info info; /* PPS source information */
+ struct gpio_desc *gpio_pin; /* GPIO port descriptors */
+ struct gpio_desc *echo_pin;
+ struct timer_list echo_timer; /* timer to reset echo active state */
bool assert_falling_edge;
bool capture_clear;
- unsigned int gpio_pin;
+ unsigned int echo_active_ms; /* PPS echo active duration */
+ unsigned long echo_timeout; /* timer timeout value in jiffies */
};
/*
@@ -61,18 +67,101 @@ static irqreturn_t pps_gpio_irq_handler(int irq, void *data)
info = data;
- rising_edge = gpio_get_value(info->gpio_pin);
+ rising_edge = gpiod_get_value(info->gpio_pin);
if ((rising_edge && !info->assert_falling_edge) ||
(!rising_edge && info->assert_falling_edge))
- pps_event(info->pps, &ts, PPS_CAPTUREASSERT, NULL);
+ pps_event(info->pps, &ts, PPS_CAPTUREASSERT, data);
else if (info->capture_clear &&
((rising_edge && info->assert_falling_edge) ||
- (!rising_edge && !info->assert_falling_edge)))
- pps_event(info->pps, &ts, PPS_CAPTURECLEAR, NULL);
+ (!rising_edge && !info->assert_falling_edge)))
+ pps_event(info->pps, &ts, PPS_CAPTURECLEAR, data);
return IRQ_HANDLED;
}
+/* This function will only be called when an ECHO GPIO is defined */
+static void pps_gpio_echo(struct pps_device *pps, int event, void *data)
+{
+ /* add_timer() needs to write into info->echo_timer */
+ struct pps_gpio_device_data *info = data;
+
+ switch (event) {
+ case PPS_CAPTUREASSERT:
+ if (pps->params.mode & PPS_ECHOASSERT)
+ gpiod_set_value(info->echo_pin, 1);
+ break;
+
+ case PPS_CAPTURECLEAR:
+ if (pps->params.mode & PPS_ECHOCLEAR)
+ gpiod_set_value(info->echo_pin, 1);
+ break;
+ }
+
+ /* fire the timer */
+ if (info->pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) {
+ info->echo_timer.expires = jiffies + info->echo_timeout;
+ add_timer(&info->echo_timer);
+ }
+}
+
+/* Timer callback to reset the echo pin to the inactive state */
+static void pps_gpio_echo_timer_callback(struct timer_list *t)
+{
+ const struct pps_gpio_device_data *info;
+
+ info = from_timer(info, t, echo_timer);
+
+ gpiod_set_value(info->echo_pin, 0);
+}
+
+static int pps_gpio_setup(struct platform_device *pdev)
+{
+ struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ u32 value;
+
+ data->gpio_pin = devm_gpiod_get(&pdev->dev,
+ NULL, /* request "gpios" */
+ GPIOD_IN);
+ if (IS_ERR(data->gpio_pin)) {
+ dev_err(&pdev->dev,
+ "failed to request PPS GPIO\n");
+ return PTR_ERR(data->gpio_pin);
+ }
+
+ data->echo_pin = devm_gpiod_get_optional(&pdev->dev,
+ "echo",
+ GPIOD_OUT_LOW);
+ if (data->echo_pin) {
+ if (IS_ERR(data->echo_pin)) {
+ dev_err(&pdev->dev, "failed to request ECHO GPIO\n");
+ return PTR_ERR(data->echo_pin);
+ }
+
+ ret = of_property_read_u32(np,
+ "echo-active-ms",
+ &value);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get echo-active-ms from OF\n");
+ return ret;
+ }
+ data->echo_active_ms = value;
+ /* sanity check on echo_active_ms */
+ if (!data->echo_active_ms || data->echo_active_ms > 999) {
+ dev_err(&pdev->dev,
+ "echo-active-ms: %u - bad value from OF\n",
+ data->echo_active_ms);
+ return -EINVAL;
+ }
+ }
+
+ if (of_property_read_bool(np, "assert-falling-edge"))
+ data->assert_falling_edge = true;
+ return 0;
+}
+
static unsigned long
get_irqf_trigger_flags(const struct pps_gpio_device_data *data)
{
@@ -90,53 +179,32 @@ get_irqf_trigger_flags(const struct pps_gpio_device_data *data)
static int pps_gpio_probe(struct platform_device *pdev)
{
struct pps_gpio_device_data *data;
- const char *gpio_label;
int ret;
int pps_default_params;
const struct pps_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct device_node *np = pdev->dev.of_node;
/* allocate space for device info */
- data = devm_kzalloc(&pdev->dev, sizeof(struct pps_gpio_device_data),
- GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ platform_set_drvdata(pdev, data);
+ /* GPIO setup */
if (pdata) {
data->gpio_pin = pdata->gpio_pin;
- gpio_label = pdata->gpio_label;
+ data->echo_pin = pdata->echo_pin;
data->assert_falling_edge = pdata->assert_falling_edge;
data->capture_clear = pdata->capture_clear;
+ data->echo_active_ms = pdata->echo_active_ms;
} else {
- ret = of_get_gpio(np, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get GPIO from device tree\n");
- return ret;
- }
- data->gpio_pin = ret;
- gpio_label = PPS_GPIO_NAME;
-
- if (of_get_property(np, "assert-falling-edge", NULL))
- data->assert_falling_edge = true;
- }
-
- /* GPIO setup */
- ret = devm_gpio_request(&pdev->dev, data->gpio_pin, gpio_label);
- if (ret) {
- dev_err(&pdev->dev, "failed to request GPIO %u\n",
- data->gpio_pin);
- return ret;
- }
-
- ret = gpio_direction_input(data->gpio_pin);
- if (ret) {
- dev_err(&pdev->dev, "failed to set pin direction\n");
- return -EINVAL;
+ ret = pps_gpio_setup(pdev);
+ if (ret)
+ return -EINVAL;
}
/* IRQ setup */
- ret = gpio_to_irq(data->gpio_pin);
+ ret = gpiod_to_irq(data->gpio_pin);
if (ret < 0) {
dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", ret);
return -EINVAL;
@@ -152,6 +220,11 @@ static int pps_gpio_probe(struct platform_device *pdev)
data->info.owner = THIS_MODULE;
snprintf(data->info.name, PPS_MAX_NAME_LEN - 1, "%s.%d",
pdev->name, pdev->id);
+ if (data->echo_pin) {
+ data->info.echo = pps_gpio_echo;
+ data->echo_timeout = msecs_to_jiffies(data->echo_active_ms);
+ timer_setup(&data->echo_timer, pps_gpio_echo_timer_callback, 0);
+ }
/* register PPS source */
pps_default_params = PPS_CAPTUREASSERT | PPS_OFFSETASSERT;
@@ -173,7 +246,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- platform_set_drvdata(pdev, data);
dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n",
data->irq);
@@ -185,6 +257,11 @@ static int pps_gpio_remove(struct platform_device *pdev)
struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
pps_unregister_source(data->pps);
+ if (data->echo_pin) {
+ del_timer_sync(&data->echo_timer);
+ /* reset echo pin in any case */
+ gpiod_set_value(data->echo_pin, 0);
+ }
dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
return 0;
}
@@ -209,4 +286,4 @@ MODULE_AUTHOR("Ricardo Martins <rasm@fe.up.pt>");
MODULE_AUTHOR("James Nuss <jamesnuss@nanometrics.ca>");
MODULE_DESCRIPTION("Use GPIO pin as PPS source");
MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0.0");
+MODULE_VERSION("1.2.0");
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index 53775362aac6..e10642403b25 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -467,6 +467,9 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
unsigned long flags;
u32 tmr_ctrl;
+ if (!node)
+ return -ENODEV;
+
ptp_qoriq->base = base;
ptp_qoriq->caps = *caps;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 54f8238aac0d..1311b54089be 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -210,6 +210,17 @@ config PWM_IMX27
To compile this driver as a module, choose M here: the module
will be called pwm-imx27.
+config PWM_IMX_TPM
+ tristate "i.MX TPM PWM support"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAVE_CLK && HAS_IOMEM
+ help
+ Generic PWM framework driver for i.MX7ULP TPM module, TPM's full
+ name is Low Power Timer/Pulse Width Modulation Module.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-imx-tpm.
+
config PWM_JZ4740
tristate "Ingenic JZ47xx PWM support"
depends on MACH_INGENIC
@@ -467,10 +478,9 @@ config PWM_TIECAP
config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
- depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3
help
- PWM driver support for the EHRPWM controller found on AM33XX
- TI SOC
+ PWM driver support for the EHRPWM controller found on TI SOCs
To compile this driver as a module, choose M here: the module
will be called pwm-tiehrpwm.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 448825e892bc..c368599d36c0 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o
obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o
+obj-$(CONFIG_PWM_IMX_TPM) += pwm-imx-tpm.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 3149204567f3..3998ebd51db4 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -311,10 +311,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
if (IS_ENABLED(CONFIG_OF))
of_pwmchip_add(chip);
- pwmchip_sysfs_export(chip);
-
out:
mutex_unlock(&pwm_lock);
+
+ if (!ret)
+ pwmchip_sysfs_export(chip);
+
return ret;
}
EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
@@ -348,7 +350,7 @@ int pwmchip_remove(struct pwm_chip *chip)
unsigned int i;
int ret = 0;
- pwmchip_sysfs_unexport_children(chip);
+ pwmchip_sysfs_unexport(chip);
mutex_lock(&pwm_lock);
@@ -368,8 +370,6 @@ int pwmchip_remove(struct pwm_chip *chip)
free_pwms(chip);
- pwmchip_sysfs_unexport(chip);
-
out:
mutex_unlock(&pwm_lock);
return ret;
@@ -877,6 +877,7 @@ void pwm_put(struct pwm_device *pwm)
if (pwm->chip->ops->free)
pwm->chip->ops->free(pwm->chip, pwm);
+ pwm_set_chip_data(pwm, NULL);
pwm->label = NULL;
module_put(pwm->chip->ops->owner);
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 7c8d6a168ceb..b91c477cc84b 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -84,7 +84,6 @@ static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm);
- pwm_set_chip_data(pwm, NULL);
kfree(channel);
}
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index bbf10ae02f0e..fa168581e6b8 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -35,7 +35,7 @@
#include <asm/div64.h>
-#include <mach/platform.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
+#include <linux/soc/cirrus/ep93xx.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
#define EP93XX_PWMx_TERM_COUNT 0x00
#define EP93XX_PWMx_DUTY_CYCLE 0x04
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 883378d055c6..f21ea1b97116 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -22,51 +22,9 @@
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/fsl/ftm.h>
-#define FTM_SC 0x00
-#define FTM_SC_CLK_MASK_SHIFT 3
-#define FTM_SC_CLK_MASK (3 << FTM_SC_CLK_MASK_SHIFT)
#define FTM_SC_CLK(c) (((c) + 1) << FTM_SC_CLK_MASK_SHIFT)
-#define FTM_SC_PS_MASK 0x7
-
-#define FTM_CNT 0x04
-#define FTM_MOD 0x08
-
-#define FTM_CSC_BASE 0x0C
-#define FTM_CSC_MSB BIT(5)
-#define FTM_CSC_MSA BIT(4)
-#define FTM_CSC_ELSB BIT(3)
-#define FTM_CSC_ELSA BIT(2)
-#define FTM_CSC(_channel) (FTM_CSC_BASE + ((_channel) * 8))
-
-#define FTM_CV_BASE 0x10
-#define FTM_CV(_channel) (FTM_CV_BASE + ((_channel) * 8))
-
-#define FTM_CNTIN 0x4C
-#define FTM_STATUS 0x50
-
-#define FTM_MODE 0x54
-#define FTM_MODE_FTMEN BIT(0)
-#define FTM_MODE_INIT BIT(2)
-#define FTM_MODE_PWMSYNC BIT(3)
-
-#define FTM_SYNC 0x58
-#define FTM_OUTINIT 0x5C
-#define FTM_OUTMASK 0x60
-#define FTM_COMBINE 0x64
-#define FTM_DEADTIME 0x68
-#define FTM_EXTTRIG 0x6C
-#define FTM_POL 0x70
-#define FTM_FMS 0x74
-#define FTM_FILTER 0x78
-#define FTM_FLTCTRL 0x7C
-#define FTM_QDCTRL 0x80
-#define FTM_CONF 0x84
-#define FTM_FLTPOL 0x88
-#define FTM_SYNCONF 0x8C
-#define FTM_INVCTRL 0x90
-#define FTM_SWOCTRL 0x94
-#define FTM_PWMLOAD 0x98
enum fsl_pwm_clk {
FSL_PWM_CLK_SYS,
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 815f5333bb8f..1cc5fbe1e1d3 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -123,7 +123,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
} else if (mul <= max_timebase * 512) {
div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
timebase = DIV_ROUND_UP(mul, 512);
- } else if (mul > max_timebase * 512) {
+ } else {
dev_err(chip->dev,
"failed to configure timebase steps/divider value\n");
return -EINVAL;
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
new file mode 100644
index 000000000000..e8385c1cf342
--- /dev/null
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2019 NXP.
+ *
+ * Limitations:
+ * - The TPM counter and period counter are shared between
+ * multiple channels, so all channels should use same period
+ * settings.
+ * - Changes to polarity cannot be latched at the time of the
+ * next period start.
+ * - Changing period and duty cycle together isn't atomic,
+ * with the wrong timing it might happen that a period is
+ * produced with old duty cycle but new period settings.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define PWM_IMX_TPM_PARAM 0x4
+#define PWM_IMX_TPM_GLOBAL 0x8
+#define PWM_IMX_TPM_SC 0x10
+#define PWM_IMX_TPM_CNT 0x14
+#define PWM_IMX_TPM_MOD 0x18
+#define PWM_IMX_TPM_CnSC(n) (0x20 + (n) * 0x8)
+#define PWM_IMX_TPM_CnV(n) (0x24 + (n) * 0x8)
+
+#define PWM_IMX_TPM_PARAM_CHAN GENMASK(7, 0)
+
+#define PWM_IMX_TPM_SC_PS GENMASK(2, 0)
+#define PWM_IMX_TPM_SC_CMOD GENMASK(4, 3)
+#define PWM_IMX_TPM_SC_CMOD_INC_EVERY_CLK FIELD_PREP(PWM_IMX_TPM_SC_CMOD, 1)
+#define PWM_IMX_TPM_SC_CPWMS BIT(5)
+
+#define PWM_IMX_TPM_CnSC_CHF BIT(7)
+#define PWM_IMX_TPM_CnSC_MSB BIT(5)
+#define PWM_IMX_TPM_CnSC_MSA BIT(4)
+
+/*
+ * The reference manual describes this field as two separate bits. The
+ * semantic of the two bits isn't orthogonal though, so they are treated
+ * together as a 2-bit field here.
+ */
+#define PWM_IMX_TPM_CnSC_ELS GENMASK(3, 2)
+#define PWM_IMX_TPM_CnSC_ELS_INVERSED FIELD_PREP(PWM_IMX_TPM_CnSC_ELS, 1)
+#define PWM_IMX_TPM_CnSC_ELS_NORMAL FIELD_PREP(PWM_IMX_TPM_CnSC_ELS, 2)
+
+
+#define PWM_IMX_TPM_MOD_WIDTH 16
+#define PWM_IMX_TPM_MOD_MOD GENMASK(PWM_IMX_TPM_MOD_WIDTH - 1, 0)
+
+struct imx_tpm_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ void __iomem *base;
+ struct mutex lock;
+ u32 user_count;
+ u32 enable_count;
+ u32 real_period;
+};
+
+struct imx_tpm_pwm_param {
+ u8 prescale;
+ u32 mod;
+ u32 val;
+};
+
+static inline struct imx_tpm_pwm_chip *
+to_imx_tpm_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct imx_tpm_pwm_chip, chip);
+}
+
+/*
+ * This function determines for a given pwm_state *state that a consumer
+ * might request the pwm_state *real_state that eventually is implemented
+ * by the hardware and the necessary register values (in *p) to achieve
+ * this.
+ */
+static int pwm_imx_tpm_round_state(struct pwm_chip *chip,
+ struct imx_tpm_pwm_param *p,
+ struct pwm_state *real_state,
+ struct pwm_state *state)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+ u32 rate, prescale, period_count, clock_unit;
+ u64 tmp;
+
+ rate = clk_get_rate(tpm->clk);
+ tmp = (u64)state->period * rate;
+ clock_unit = DIV_ROUND_CLOSEST_ULL(tmp, NSEC_PER_SEC);
+ if (clock_unit <= PWM_IMX_TPM_MOD_MOD)
+ prescale = 0;
+ else
+ prescale = ilog2(clock_unit) + 1 - PWM_IMX_TPM_MOD_WIDTH;
+
+ if ((!FIELD_FIT(PWM_IMX_TPM_SC_PS, prescale)))
+ return -ERANGE;
+ p->prescale = prescale;
+
+ period_count = (clock_unit + ((1 << prescale) >> 1)) >> prescale;
+ p->mod = period_count;
+
+ /* calculate real period HW can support */
+ tmp = (u64)period_count << prescale;
+ tmp *= NSEC_PER_SEC;
+ real_state->period = DIV_ROUND_CLOSEST_ULL(tmp, rate);
+
+ /*
+ * if eventually the PWM output is inactive, either
+ * duty cycle is 0 or status is disabled, need to
+ * make sure the output pin is inactive.
+ */
+ if (!state->enabled)
+ real_state->duty_cycle = 0;
+ else
+ real_state->duty_cycle = state->duty_cycle;
+
+ tmp = (u64)p->mod * real_state->duty_cycle;
+ p->val = DIV_ROUND_CLOSEST_ULL(tmp, real_state->period);
+
+ real_state->polarity = state->polarity;
+ real_state->enabled = state->enabled;
+
+ return 0;
+}
+
+static void pwm_imx_tpm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+ u32 rate, val, prescale;
+ u64 tmp;
+
+ /* get period */
+ state->period = tpm->real_period;
+
+ /* get duty cycle */
+ rate = clk_get_rate(tpm->clk);
+ val = readl(tpm->base + PWM_IMX_TPM_SC);
+ prescale = FIELD_GET(PWM_IMX_TPM_SC_PS, val);
+ tmp = readl(tpm->base + PWM_IMX_TPM_CnV(pwm->hwpwm));
+ tmp = (tmp << prescale) * NSEC_PER_SEC;
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate);
+
+ /* get polarity */
+ val = readl(tpm->base + PWM_IMX_TPM_CnSC(pwm->hwpwm));
+ if ((val & PWM_IMX_TPM_CnSC_ELS) == PWM_IMX_TPM_CnSC_ELS_INVERSED)
+ state->polarity = PWM_POLARITY_INVERSED;
+ else
+ /*
+ * Assume reserved values (2b00 and 2b11) to yield
+ * normal polarity.
+ */
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ /* get channel status */
+ state->enabled = FIELD_GET(PWM_IMX_TPM_CnSC_ELS, val) ? true : false;
+}
+
+/* this function is supposed to be called with mutex hold */
+static int pwm_imx_tpm_apply_hw(struct pwm_chip *chip,
+ struct imx_tpm_pwm_param *p,
+ struct pwm_state *state,
+ struct pwm_device *pwm)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+ bool period_update = false;
+ bool duty_update = false;
+ u32 val, cmod, cur_prescale;
+ unsigned long timeout;
+ struct pwm_state c;
+
+ if (state->period != tpm->real_period) {
+ /*
+ * TPM counter is shared by multiple channels, so
+ * prescale and period can NOT be modified when
+ * there are multiple channels in use with different
+ * period settings.
+ */
+ if (tpm->user_count > 1)
+ return -EBUSY;
+
+ val = readl(tpm->base + PWM_IMX_TPM_SC);
+ cmod = FIELD_GET(PWM_IMX_TPM_SC_CMOD, val);
+ cur_prescale = FIELD_GET(PWM_IMX_TPM_SC_PS, val);
+ if (cmod && cur_prescale != p->prescale)
+ return -EBUSY;
+
+ /* set TPM counter prescale */
+ val &= ~PWM_IMX_TPM_SC_PS;
+ val |= FIELD_PREP(PWM_IMX_TPM_SC_PS, p->prescale);
+ writel(val, tpm->base + PWM_IMX_TPM_SC);
+
+ /*
+ * set period count:
+ * if the PWM is disabled (CMOD[1:0] = 2b00), then MOD register
+ * is updated when MOD register is written.
+ *
+ * if the PWM is enabled (CMOD[1:0] ≠ 2b00), the period length
+ * is latched into hardware when the next period starts.
+ */
+ writel(p->mod, tpm->base + PWM_IMX_TPM_MOD);
+ tpm->real_period = state->period;
+ period_update = true;
+ }
+
+ pwm_imx_tpm_get_state(chip, pwm, &c);
+
+ /* polarity is NOT allowed to be changed if PWM is active */
+ if (c.enabled && c.polarity != state->polarity)
+ return -EBUSY;
+
+ if (state->duty_cycle != c.duty_cycle) {
+ /*
+ * set channel value:
+ * if the PWM is disabled (CMOD[1:0] = 2b00), then CnV register
+ * is updated when CnV register is written.
+ *
+ * if the PWM is enabled (CMOD[1:0] ≠ 2b00), the duty length
+ * is latched into hardware when the next period starts.
+ */
+ writel(p->val, tpm->base + PWM_IMX_TPM_CnV(pwm->hwpwm));
+ duty_update = true;
+ }
+
+ /* make sure MOD & CnV registers are updated */
+ if (period_update || duty_update) {
+ timeout = jiffies + msecs_to_jiffies(tpm->real_period /
+ NSEC_PER_MSEC + 1);
+ while (readl(tpm->base + PWM_IMX_TPM_MOD) != p->mod
+ || readl(tpm->base + PWM_IMX_TPM_CnV(pwm->hwpwm))
+ != p->val) {
+ if (time_after(jiffies, timeout))
+ return -ETIME;
+ cpu_relax();
+ }
+ }
+
+ /*
+ * polarity settings will enabled/disable output status
+ * immediately, so if the channel is disabled, need to
+ * make sure MSA/MSB/ELS are set to 0 which means channel
+ * disabled.
+ */
+ val = readl(tpm->base + PWM_IMX_TPM_CnSC(pwm->hwpwm));
+ val &= ~(PWM_IMX_TPM_CnSC_ELS | PWM_IMX_TPM_CnSC_MSA |
+ PWM_IMX_TPM_CnSC_MSB);
+ if (state->enabled) {
+ /*
+ * set polarity (for edge-aligned PWM modes)
+ *
+ * ELS[1:0] = 2b10 yields normal polarity behaviour,
+ * ELS[1:0] = 2b01 yields inversed polarity.
+ * The other values are reserved.
+ */
+ val |= PWM_IMX_TPM_CnSC_MSB;
+ val |= (state->polarity == PWM_POLARITY_NORMAL) ?
+ PWM_IMX_TPM_CnSC_ELS_NORMAL :
+ PWM_IMX_TPM_CnSC_ELS_INVERSED;
+ }
+ writel(val, tpm->base + PWM_IMX_TPM_CnSC(pwm->hwpwm));
+
+ /* control the counter status */
+ if (state->enabled != c.enabled) {
+ val = readl(tpm->base + PWM_IMX_TPM_SC);
+ if (state->enabled) {
+ if (++tpm->enable_count == 1)
+ val |= PWM_IMX_TPM_SC_CMOD_INC_EVERY_CLK;
+ } else {
+ if (--tpm->enable_count == 0)
+ val &= ~PWM_IMX_TPM_SC_CMOD;
+ }
+ writel(val, tpm->base + PWM_IMX_TPM_SC);
+ }
+
+ return 0;
+}
+
+static int pwm_imx_tpm_apply(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+ struct imx_tpm_pwm_param param;
+ struct pwm_state real_state;
+ int ret;
+
+ ret = pwm_imx_tpm_round_state(chip, &param, &real_state, state);
+ if (ret)
+ return ret;
+
+ mutex_lock(&tpm->lock);
+ ret = pwm_imx_tpm_apply_hw(chip, &param, &real_state, pwm);
+ mutex_unlock(&tpm->lock);
+
+ return ret;
+}
+
+static int pwm_imx_tpm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+
+ mutex_lock(&tpm->lock);
+ tpm->user_count++;
+ mutex_unlock(&tpm->lock);
+
+ return 0;
+}
+
+static void pwm_imx_tpm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+
+ mutex_lock(&tpm->lock);
+ tpm->user_count--;
+ mutex_unlock(&tpm->lock);
+}
+
+static const struct pwm_ops imx_tpm_pwm_ops = {
+ .request = pwm_imx_tpm_request,
+ .free = pwm_imx_tpm_free,
+ .get_state = pwm_imx_tpm_get_state,
+ .apply = pwm_imx_tpm_apply,
+ .owner = THIS_MODULE,
+};
+
+static int pwm_imx_tpm_probe(struct platform_device *pdev)
+{
+ struct imx_tpm_pwm_chip *tpm;
+ int ret;
+ u32 val;
+
+ tpm = devm_kzalloc(&pdev->dev, sizeof(*tpm), GFP_KERNEL);
+ if (!tpm)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, tpm);
+
+ tpm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tpm->base))
+ return PTR_ERR(tpm->base);
+
+ tpm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tpm->clk)) {
+ ret = PTR_ERR(tpm->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get PWM clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tpm->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to prepare or enable clock: %d\n", ret);
+ return ret;
+ }
+
+ tpm->chip.dev = &pdev->dev;
+ tpm->chip.ops = &imx_tpm_pwm_ops;
+ tpm->chip.base = -1;
+ tpm->chip.of_xlate = of_pwm_xlate_with_flags;
+ tpm->chip.of_pwm_n_cells = 3;
+
+ /* get number of channels */
+ val = readl(tpm->base + PWM_IMX_TPM_PARAM);
+ tpm->chip.npwm = FIELD_GET(PWM_IMX_TPM_PARAM_CHAN, val);
+
+ mutex_init(&tpm->lock);
+
+ ret = pwmchip_add(&tpm->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+ clk_disable_unprepare(tpm->clk);
+ }
+
+ return ret;
+}
+
+static int pwm_imx_tpm_remove(struct platform_device *pdev)
+{
+ struct imx_tpm_pwm_chip *tpm = platform_get_drvdata(pdev);
+ int ret = pwmchip_remove(&tpm->chip);
+
+ clk_disable_unprepare(tpm->clk);
+
+ return ret;
+}
+
+static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
+{
+ struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
+
+ if (tpm->enable_count > 0)
+ return -EBUSY;
+
+ clk_disable_unprepare(tpm->clk);
+
+ return 0;
+}
+
+static int __maybe_unused pwm_imx_tpm_resume(struct device *dev)
+{
+ struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(tpm->clk);
+ if (ret)
+ dev_err(dev,
+ "failed to prepare or enable clock: %d\n",
+ ret);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(imx_tpm_pwm_pm,
+ pwm_imx_tpm_suspend, pwm_imx_tpm_resume);
+
+static const struct of_device_id imx_tpm_pwm_dt_ids[] = {
+ { .compatible = "fsl,imx7ulp-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_tpm_pwm_dt_ids);
+
+static struct platform_driver imx_tpm_pwm_driver = {
+ .driver = {
+ .name = "imx7ulp-tpm-pwm",
+ .of_match_table = imx_tpm_pwm_dt_ids,
+ .pm = &imx_tpm_pwm_pm,
+ },
+ .probe = pwm_imx_tpm_probe,
+ .remove = pwm_imx_tpm_remove,
+};
+module_platform_driver(imx_tpm_pwm_driver);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("i.MX TPM PWM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 806130654211..434a351fb626 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -291,7 +291,6 @@ MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids);
static int pwm_imx27_probe(struct platform_device *pdev)
{
struct pwm_imx27_chip *imx;
- struct resource *r;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
if (imx == NULL)
@@ -326,8 +325,7 @@ static int pwm_imx27_probe(struct platform_device *pdev)
imx->chip.of_xlate = of_pwm_xlate_with_flags;
imx->chip.of_pwm_n_cells = 3;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index c1ed641b3e26..fb5a369b1a8d 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -111,6 +111,10 @@ struct meson_pwm {
const struct meson_pwm_data *data;
void __iomem *base;
u8 inverter_mask;
+ /*
+ * Protects register (write) access to the REG_MISC_AB register
+ * that is shared between the two PWMs.
+ */
spinlock_t lock;
};
@@ -184,7 +188,7 @@ static int meson_pwm_calc(struct meson_pwm *meson,
do_div(fin_ps, fin_freq);
/* Calc pre_div with the period */
- for (pre_div = 0; pre_div < MISC_CLK_DIV_MASK; pre_div++) {
+ for (pre_div = 0; pre_div <= MISC_CLK_DIV_MASK; pre_div++) {
cnt = DIV_ROUND_CLOSEST_ULL((u64)period * 1000,
fin_ps * (pre_div + 1));
dev_dbg(meson->chip.dev, "fin_ps=%llu pre_div=%u cnt=%u\n",
@@ -193,7 +197,7 @@ static int meson_pwm_calc(struct meson_pwm *meson,
break;
}
- if (pre_div == MISC_CLK_DIV_MASK) {
+ if (pre_div > MISC_CLK_DIV_MASK) {
dev_err(meson->chip.dev, "unable to get period pre_div\n");
return -EINVAL;
}
@@ -235,6 +239,7 @@ static void meson_pwm_enable(struct meson_pwm *meson,
{
u32 value, clk_shift, clk_enable, enable;
unsigned int offset;
+ unsigned long flags;
switch (id) {
case 0:
@@ -255,6 +260,8 @@ static void meson_pwm_enable(struct meson_pwm *meson,
return;
}
+ spin_lock_irqsave(&meson->lock, flags);
+
value = readl(meson->base + REG_MISC_AB);
value &= ~(MISC_CLK_DIV_MASK << clk_shift);
value |= channel->pre_div << clk_shift;
@@ -267,11 +274,14 @@ static void meson_pwm_enable(struct meson_pwm *meson,
value = readl(meson->base + REG_MISC_AB);
value |= enable;
writel(value, meson->base + REG_MISC_AB);
+
+ spin_unlock_irqrestore(&meson->lock, flags);
}
static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
{
u32 value, enable;
+ unsigned long flags;
switch (id) {
case 0:
@@ -286,9 +296,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
return;
}
+ spin_lock_irqsave(&meson->lock, flags);
+
value = readl(meson->base + REG_MISC_AB);
value &= ~enable;
writel(value, meson->base + REG_MISC_AB);
+
+ spin_unlock_irqrestore(&meson->lock, flags);
}
static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -296,29 +310,21 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm *meson = to_meson_pwm(chip);
- unsigned long flags;
int err = 0;
if (!state)
return -EINVAL;
- spin_lock_irqsave(&meson->lock, flags);
-
if (!state->enabled) {
meson_pwm_disable(meson, pwm->hwpwm);
channel->state.enabled = false;
- goto unlock;
+ return 0;
}
if (state->period != channel->state.period ||
state->duty_cycle != channel->state.duty_cycle ||
state->polarity != channel->state.polarity) {
- if (channel->state.enabled) {
- meson_pwm_disable(meson, pwm->hwpwm);
- channel->state.enabled = false;
- }
-
if (state->polarity != channel->state.polarity) {
if (state->polarity == PWM_POLARITY_NORMAL)
meson->inverter_mask |= BIT(pwm->hwpwm);
@@ -329,7 +335,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
err = meson_pwm_calc(meson, channel, pwm->hwpwm,
state->duty_cycle, state->period);
if (err < 0)
- goto unlock;
+ return err;
channel->state.polarity = state->polarity;
channel->state.period = state->period;
@@ -341,9 +347,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
channel->state.enabled = true;
}
-unlock:
- spin_unlock_irqrestore(&meson->lock, flags);
- return err;
+ return 0;
}
static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -429,6 +433,24 @@ static const struct meson_pwm_data pwm_axg_ao_data = {
.num_parents = ARRAY_SIZE(pwm_axg_ao_parent_names),
};
+static const char * const pwm_g12a_ao_cd_parent_names[] = {
+ "aoclk81", "xtal",
+};
+
+static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
+ .parent_names = pwm_g12a_ao_cd_parent_names,
+ .num_parents = ARRAY_SIZE(pwm_g12a_ao_cd_parent_names),
+};
+
+static const char * const pwm_g12a_ee_parent_names[] = {
+ "xtal", "hdmi_pll", "fclk_div4", "fclk_div3"
+};
+
+static const struct meson_pwm_data pwm_g12a_ee_data = {
+ .parent_names = pwm_g12a_ee_parent_names,
+ .num_parents = ARRAY_SIZE(pwm_g12a_ee_parent_names),
+};
+
static const struct of_device_id meson_pwm_matches[] = {
{
.compatible = "amlogic,meson8b-pwm",
@@ -450,6 +472,18 @@ static const struct of_device_id meson_pwm_matches[] = {
.compatible = "amlogic,meson-axg-ao-pwm",
.data = &pwm_axg_ao_data
},
+ {
+ .compatible = "amlogic,meson-g12a-ee-pwm",
+ .data = &pwm_g12a_ee_data
+ },
+ {
+ .compatible = "amlogic,meson-g12a-ao-pwm-ab",
+ .data = &pwm_axg_ao_data
+ },
+ {
+ .compatible = "amlogic,meson-g12a-ao-pwm-cd",
+ .data = &pwm_g12a_ao_cd_data
+ },
{},
};
MODULE_DEVICE_TABLE(of, meson_pwm_matches);
@@ -470,7 +504,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
init.name = name;
init.ops = &clk_mux_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
init.parent_names = meson->data->parent_names;
init.num_parents = meson->data->num_parents;
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index a7eaf962a95b..567f5e2771c4 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -176,7 +176,6 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
pm_runtime_put(pca->chip.dev);
mutex_lock(&pca->lock);
pwm = &pca->chip.pwms[offset];
- pwm_set_chip_data(pwm, NULL);
mutex_unlock(&pca->lock);
}
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 062f2cfc45ec..6674e1e80175 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -226,7 +226,7 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
return -EINVAL;
}
- our_chan = devm_kzalloc(chip->dev, sizeof(*our_chan), GFP_KERNEL);
+ our_chan = kzalloc(sizeof(*our_chan), GFP_KERNEL);
if (!our_chan)
return -ENOMEM;
@@ -237,8 +237,7 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- devm_kfree(chip->dev, pwm_get_chip_data(pwm));
- pwm_set_chip_data(pwm, NULL);
+ kfree(pwm_get_chip_data(pwm));
}
static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm)
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index f7b8a86fa5c5..ad4a40c0f27c 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -382,6 +382,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
}
/* Update shadow register first before modifying active register */
+ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
+ AQSFRC_RLDCSF_ZRO);
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/*
* Changes to immediate action on Action Qualifier. This puts
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index ceb233dd6048..719f8fada0a7 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -398,7 +398,7 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
/*
* If device_create() fails the pwm_chip is still usable by
- * the kernel its just not exported.
+ * the kernel it's just not exported.
*/
parent = device_create(&pwm_class, chip->dev, MKDEV(0, 0), chip,
"pwmchip%d", chip->base);
@@ -411,19 +411,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
struct device *parent;
-
- parent = class_find_device(&pwm_class, NULL, chip,
- pwmchip_sysfs_match);
- if (parent) {
- /* for class_find_device() */
- put_device(parent);
- device_unregister(parent);
- }
-}
-
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
- struct device *parent;
unsigned int i;
parent = class_find_device(&pwm_class, NULL, chip,
@@ -439,6 +426,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
}
put_device(parent);
+ device_unregister(parent);
}
static int __init pwm_sysfs_init(void)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 1e1f42e210a0..4a4a75fa26d5 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -868,7 +868,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
pinned = get_user_pages_fast(
(unsigned long)xfer->loc_addr & PAGE_MASK,
- nr_pages, dir == DMA_FROM_DEVICE, page_list);
+ nr_pages,
+ dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
+ page_list);
if (pinned != nr_pages) {
if (pinned < 0) {
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index cf45829585cb..b29fc258eeba 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -2147,6 +2147,14 @@ static int riocm_add_mport(struct device *dev,
mutex_init(&cm->rx_lock);
riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+ if (!cm->rx_wq) {
+ riocm_error("failed to allocate IBMBOX_%d on %s",
+ cmbox, mport->name);
+ rio_release_outb_mbox(mport, cmbox);
+ kfree(cm);
+ return -ENOMEM;
+ }
+
INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
cm->tx_slot = 0;
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 2d9ec378a8bc..88e4f3ff0cb8 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -286,10 +286,10 @@ int cec_add_elem(u64 pfn)
if (!ce_arr.array || ce_arr.disabled)
return -ENODEV;
- ca->ces_entered++;
-
mutex_lock(&ce_mutex);
+ ca->ces_entered++;
+
if (ca->n == MAX_ELEMS)
WARN_ON(!del_lru_elem_unlocked(ca));
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 89bbd6e8bad1..9fd379732d18 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -77,11 +77,6 @@ struct pm800_regulator_info {
int max_ua;
};
-struct pm800_regulators {
- struct pm80x_chip *chip;
- struct regmap *map;
-};
-
/*
* vreg - the buck regs string.
* ereg - the string for the enable register.
@@ -235,7 +230,6 @@ static int pm800_regulator_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
- struct pm800_regulators *pm800_data;
struct regulator_config config = { };
struct regulator_init_data *init_data;
int i, ret;
@@ -252,18 +246,8 @@ static int pm800_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
- pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
- GFP_KERNEL);
- if (!pm800_data)
- return -ENOMEM;
-
- pm800_data->map = chip->subchip->regmap_power;
- pm800_data->chip = chip;
-
- platform_set_drvdata(pdev, pm800_data);
-
config.dev = chip->dev;
- config.regmap = pm800_data->map;
+ config.regmap = chip->subchip->regmap_power;
for (i = 0; i < PM800_ID_RG_MAX; i++) {
struct regulator_dev *regulator;
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 753a6a1b30c3..35d767aeeb57 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -235,6 +235,8 @@ static const struct regulator_ops pm8606_preg_ops = {
{ \
.desc = { \
.name = "PREG", \
+ .of_match = of_match_ptr("PREG"), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &pm8606_preg_ops, \
.type = REGULATOR_CURRENT, \
.id = PM8606_ID_PREG, \
@@ -249,6 +251,8 @@ static const struct regulator_ops pm8606_preg_ops = {
{ \
.desc = { \
.name = #vreg, \
+ .of_match = of_match_ptr(#vreg), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &pm8607_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_##vreg, \
@@ -270,6 +274,8 @@ static const struct regulator_ops pm8606_preg_ops = {
{ \
.desc = { \
.name = "LDO" #_id, \
+ .of_match = of_match_ptr("LDO" #_id), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &pm8607_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_LDO##_id, \
@@ -309,36 +315,6 @@ static struct pm8607_regulator_info pm8606_regulator_info[] = {
PM8606_PREG(PREREGULATORB, 5),
};
-#ifdef CONFIG_OF
-static int pm8607_regulator_dt_init(struct platform_device *pdev,
- struct pm8607_regulator_info *info,
- struct regulator_config *config)
-{
- struct device_node *nproot, *np;
- nproot = pdev->dev.parent->of_node;
- if (!nproot)
- return -ENODEV;
- nproot = of_get_child_by_name(nproot, "regulators");
- if (!nproot) {
- dev_err(&pdev->dev, "failed to find regulators node\n");
- return -ENODEV;
- }
- for_each_child_of_node(nproot, np) {
- if (of_node_name_eq(np, info->desc.name)) {
- config->init_data =
- of_get_regulator_init_data(&pdev->dev, np,
- &info->desc);
- config->of_node = np;
- break;
- }
- }
- of_node_put(nproot);
- return 0;
-}
-#else
-#define pm8607_regulator_dt_init(x, y, z) (-1)
-#endif
-
static int pm8607_regulator_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -373,12 +349,11 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
if ((i == PM8607_ID_BUCK3) && chip->buck3_double)
info->slope_double = 1;
- config.dev = &pdev->dev;
+ config.dev = chip->dev;
config.driver_data = info;
- if (pm8607_regulator_dt_init(pdev, info, &config))
- if (pdata)
- config.init_data = pdata;
+ if (pdata)
+ config.init_data = pdata;
if (chip->id == CHIP_PM8607)
config.regmap = chip->regmap;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index b7f249ee5e68..6c37f0df9323 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -223,6 +223,7 @@ config REGULATOR_CPCAP
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
+ depends on !CC_IS_CLANG # https://bugs.llvm.org/show_bug.cgi?id=38789
help
Say y here to support the BUCKs and LDOs regulators found on
Dialog Semiconductor DA9030/DA9034 PMIC.
@@ -839,6 +840,13 @@ config REGULATOR_STM32_VREFBUF
This driver can also be built as a module. If so, the module
will be called stm32-vrefbuf.
+config REGULATOR_STM32_PWR
+ bool "STMicroelectronics STM32 PWR"
+ depends on ARCH_STM32 || COMPILE_TEST
+ help
+ This driver supports internal regulators (1V1, 1V8, 3V3) in the
+ STMicroelectronics STM32 chips.
+
config REGULATOR_STPMIC1
tristate "STMicroelectronics STPMIC1 PMIC Regulators"
depends on MFD_STPMIC1
@@ -1010,7 +1018,8 @@ config REGULATOR_TWL4030
config REGULATOR_UNIPHIER
tristate "UniPhier regulator driver"
depends on ARCH_UNIPHIER || COMPILE_TEST
- depends on OF && MFD_SYSCON
+ depends on OF
+ select REGMAP_MMIO
default ARCH_UNIPHIER
help
Support for regulators implemented on Socionext UniPhier SoCs.
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 1169f8a27d91..93f53840e8f1 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -105,6 +105,7 @@ obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
+obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index de2644490f0d..438509f55f05 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -48,7 +48,6 @@
* @regreg: regulator register number in the AB3100
*/
struct ab3100_regulator {
- struct regulator_dev *rdev;
struct device *dev;
struct ab3100_platform_data *plfdata;
u8 regreg;
@@ -354,14 +353,13 @@ static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg)
return 0;
}
-static struct regulator_ops regulator_ops_fixed = {
- .list_voltage = regulator_list_voltage_linear,
+static const struct regulator_ops regulator_ops_fixed = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
};
-static struct regulator_ops regulator_ops_variable = {
+static const struct regulator_ops regulator_ops_variable = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
@@ -370,7 +368,7 @@ static struct regulator_ops regulator_ops_variable = {
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops regulator_ops_variable_sleepable = {
+static const struct regulator_ops regulator_ops_variable_sleepable = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
@@ -386,14 +384,14 @@ static struct regulator_ops regulator_ops_variable_sleepable = {
* is an on/off switch plain an simple. The external
* voltage is defined in the board set-up if any.
*/
-static struct regulator_ops regulator_ops_external = {
+static const struct regulator_ops regulator_ops_external = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
.get_voltage = ab3100_get_voltage_regulator_external,
};
-static struct regulator_desc
+static const struct regulator_desc
ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
{
.name = "LDO_A",
@@ -402,7 +400,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
- .min_uV = LDO_A_VOLTAGE,
+ .fixed_uV = LDO_A_VOLTAGE,
.enable_time = 200,
},
{
@@ -412,7 +410,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
- .min_uV = LDO_C_VOLTAGE,
+ .fixed_uV = LDO_C_VOLTAGE,
.enable_time = 200,
},
{
@@ -422,7 +420,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
- .min_uV = LDO_D_VOLTAGE,
+ .fixed_uV = LDO_D_VOLTAGE,
.enable_time = 200,
},
{
@@ -500,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
struct device_node *np,
unsigned long id)
{
- struct regulator_desc *desc;
+ const struct regulator_desc *desc;
struct ab3100_regulator *reg;
struct regulator_dev *rdev;
struct regulator_config config = { };
@@ -545,8 +543,6 @@ static int ab3100_regulator_register(struct platform_device *pdev,
return err;
}
- /* Then set a pointer back to the registered regulator */
- reg->rdev = rdev;
return 0;
}
@@ -609,18 +605,6 @@ static const u8 ab3100_reg_initvals[] = {
LDO_D_SETTING,
};
-static int ab3100_regulators_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
- struct ab3100_regulator *reg = &ab3100_regulators[i];
-
- reg->rdev = NULL;
- }
- return 0;
-}
-
static int
ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
{
@@ -647,10 +631,8 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
pdev, NULL, ab3100_regulator_matches[i].init_data,
ab3100_regulator_matches[i].of_node,
(unsigned long)ab3100_regulator_matches[i].driver_data);
- if (err) {
- ab3100_regulators_remove(pdev);
+ if (err)
return err;
- }
}
return 0;
@@ -705,14 +687,12 @@ static int ab3100_regulators_probe(struct platform_device *pdev)
/* Register the regulators */
for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
- struct regulator_desc *desc = &ab3100_regulator_desc[i];
+ const struct regulator_desc *desc = &ab3100_regulator_desc[i];
err = ab3100_regulator_register(pdev, plfdata, NULL, NULL,
desc->id);
- if (err) {
- ab3100_regulators_remove(pdev);
+ if (err)
return err;
- }
}
return 0;
@@ -723,7 +703,6 @@ static struct platform_driver ab3100_regulators_driver = {
.name = "ab3100-regulators",
},
.probe = ab3100_regulators_probe,
- .remove = ab3100_regulators_remove,
};
static __init int ab3100_regulators_init(void)
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
index 2ca00045eb99..95704446d89e 100644
--- a/drivers/regulator/ab8500-ext.c
+++ b/drivers/regulator/ab8500-ext.c
@@ -479,7 +479,6 @@ static struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
* struct ab8500_ext_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
- * @rdev: regulator device
* @cfg: regulator configuration (extension of regulator FW configuration)
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
@@ -495,7 +494,6 @@ static struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
struct ab8500_ext_regulator_info {
struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *rdev;
struct ab8500_ext_regulator_cfg *cfg;
u8 update_bank;
u8 update_reg;
@@ -530,7 +528,7 @@ static int ab8500_ext_regulator_enable(struct regulator_dev *rdev)
info->update_bank, info->update_reg,
info->update_mask, regval);
if (ret < 0) {
- dev_err(rdev_get_dev(info->rdev),
+ dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
return ret;
}
@@ -566,7 +564,7 @@ static int ab8500_ext_regulator_disable(struct regulator_dev *rdev)
info->update_bank, info->update_reg,
info->update_mask, regval);
if (ret < 0) {
- dev_err(rdev_get_dev(info->rdev),
+ dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
return ret;
}
@@ -720,7 +718,7 @@ static int ab8500_ext_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static struct regulator_ops ab8500_ext_regulator_ops = {
+static const struct regulator_ops ab8500_ext_regulator_ops = {
.enable = ab8500_ext_regulator_enable,
.disable = ab8500_ext_regulator_disable,
.is_enabled = ab8500_ext_regulator_is_enabled,
@@ -735,6 +733,7 @@ static struct ab8500_ext_regulator_info
[AB8500_EXT_SUPPLY1] = {
.desc = {
.name = "VEXTSUPPLY1",
+ .of_match = of_match_ptr("ab8500_ext1"),
.ops = &ab8500_ext_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_EXT_SUPPLY1,
@@ -752,6 +751,7 @@ static struct ab8500_ext_regulator_info
[AB8500_EXT_SUPPLY2] = {
.desc = {
.name = "VEXTSUPPLY2",
+ .of_match = of_match_ptr("ab8500_ext2"),
.ops = &ab8500_ext_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_EXT_SUPPLY2,
@@ -769,6 +769,7 @@ static struct ab8500_ext_regulator_info
[AB8500_EXT_SUPPLY3] = {
.desc = {
.name = "VEXTSUPPLY3",
+ .of_match = of_match_ptr("ab8500_ext3"),
.ops = &ab8500_ext_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_EXT_SUPPLY3,
@@ -785,30 +786,13 @@ static struct ab8500_ext_regulator_info
},
};
-static struct of_regulator_match ab8500_ext_regulator_match[] = {
- { .name = "ab8500_ext1", .driver_data = (void *) AB8500_EXT_SUPPLY1, },
- { .name = "ab8500_ext2", .driver_data = (void *) AB8500_EXT_SUPPLY2, },
- { .name = "ab8500_ext3", .driver_data = (void *) AB8500_EXT_SUPPLY3, },
-};
-
static int ab8500_ext_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_regulator_platform_data *pdata = &ab8500_regulator_plat_data;
- struct device_node *np = pdev->dev.of_node;
struct regulator_config config = { };
- int i, err;
-
- if (np) {
- err = of_regulator_match(&pdev->dev, np,
- ab8500_ext_regulator_match,
- ARRAY_SIZE(ab8500_ext_regulator_match));
- if (err < 0) {
- dev_err(&pdev->dev,
- "Error parsing regulator init data: %d\n", err);
- return err;
- }
- }
+ struct regulator_dev *rdev;
+ int i;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
@@ -844,23 +828,18 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.driver_data = info;
- config.of_node = ab8500_ext_regulator_match[i].of_node;
- config.init_data = (np) ?
- ab8500_ext_regulator_match[i].init_data :
- &pdata->ext_regulator[i];
+ config.init_data = &pdata->ext_regulator[i];
/* register regulator with framework */
- info->rdev = devm_regulator_register(&pdev->dev, &info->desc,
- &config);
- if (IS_ERR(info->rdev)) {
- err = PTR_ERR(info->rdev);
+ rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- return err;
+ return PTR_ERR(rdev);
}
- dev_dbg(rdev_get_dev(info->rdev),
- "%s-probed\n", info->desc.name);
+ dev_dbg(&pdev->dev, "%s-probed\n", info->desc.name);
}
return 0;
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 83dba3fbfe0c..3fcb4cbaab02 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -44,7 +44,6 @@ struct ab8500_shared_mode {
* struct ab8500_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
- * @regulator_dev: regulator device
* @shared_mode: used when mode is shared between two regulators
* @load_lp_uA: maximum load in idle (low power) mode
* @update_bank: bank to control on/off
@@ -65,7 +64,6 @@ struct ab8500_shared_mode {
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *regulator;
struct ab8500_shared_mode *shared_mode;
int load_lp_uA;
u8 update_bank;
@@ -510,7 +508,7 @@ static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
-static struct regulator_ops ab8500_regulator_volt_mode_ops = {
+static const struct regulator_ops ab8500_regulator_volt_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
@@ -522,7 +520,7 @@ static struct regulator_ops ab8500_regulator_volt_mode_ops = {
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops ab8500_regulator_volt_ops = {
+static const struct regulator_ops ab8500_regulator_volt_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
@@ -531,7 +529,7 @@ static struct regulator_ops ab8500_regulator_volt_ops = {
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops ab8500_regulator_mode_ops = {
+static const struct regulator_ops ab8500_regulator_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
@@ -541,14 +539,14 @@ static struct regulator_ops ab8500_regulator_mode_ops = {
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops ab8500_regulator_ops = {
+static const struct regulator_ops ab8500_regulator_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops ab8500_regulator_anamic_mode_ops = {
+static const struct regulator_ops ab8500_regulator_anamic_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
@@ -1600,6 +1598,7 @@ static int ab8500_regulator_register(struct platform_device *pdev,
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_regulator_info *info = NULL;
struct regulator_config config = { };
+ struct regulator_dev *rdev;
/* assign per-regulator data */
info = &abx500_regulator.info[id];
@@ -1621,12 +1620,11 @@ static int ab8500_regulator_register(struct platform_device *pdev,
}
/* register regulator with framework */
- info->regulator = devm_regulator_register(&pdev->dev, &info->desc,
- &config);
- if (IS_ERR(info->regulator)) {
+ rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- return PTR_ERR(info->regulator);
+ return PTR_ERR(rdev);
}
return 0;
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index e0239cf3f56d..19d9ee2dac1f 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -226,7 +226,7 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
};
-static struct regulator_ops act8865_ops = {
+static const struct regulator_ops act8865_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -236,7 +236,7 @@ static struct regulator_ops act8865_ops = {
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops act8865_ldo_ops = {
+static const struct regulator_ops act8865_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -245,6 +245,8 @@ static struct regulator_ops act8865_ldo_ops = {
#define ACT88xx_REG(_name, _family, _id, _vsel_reg, _supply) \
[_family##_ID_##_id] = { \
.name = _name, \
+ .of_match = of_match_ptr(_name), \
+ .regulators_node = of_match_ptr("regulators"), \
.supply_name = _supply, \
.id = _family##_ID_##_id, \
.type = REGULATOR_VOLTAGE, \
@@ -265,6 +267,8 @@ static const struct regulator_desc act8600_regulators[] = {
ACT88xx_REG("DCDC3", ACT8600, DCDC3, VSET, "vp3"),
{
.name = "SUDCDC_REG4",
+ .of_match = of_match_ptr("SUDCDC_REG4"),
+ .regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_SUDCDC4,
.ops = &act8865_ops,
.type = REGULATOR_VOLTAGE,
@@ -283,6 +287,8 @@ static const struct regulator_desc act8600_regulators[] = {
ACT88xx_REG("LDO8", ACT8600, LDO8, VSET, "inl"),
{
.name = "LDO_REG9",
+ .of_match = of_match_ptr("LDO_REG9"),
+ .regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_LDO9,
.ops = &act8865_ldo_ops,
.type = REGULATOR_VOLTAGE,
@@ -294,6 +300,8 @@ static const struct regulator_desc act8600_regulators[] = {
},
{
.name = "LDO_REG10",
+ .of_match = of_match_ptr("LDO_REG10"),
+ .regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_LDO10,
.ops = &act8865_ldo_ops,
.type = REGULATOR_VOLTAGE,
@@ -348,110 +356,6 @@ static const struct of_device_id act8865_dt_ids[] = {
{ }
};
MODULE_DEVICE_TABLE(of, act8865_dt_ids);
-
-static struct of_regulator_match act8846_matches[] = {
- [ACT8846_ID_REG1] = { .name = "REG1" },
- [ACT8846_ID_REG2] = { .name = "REG2" },
- [ACT8846_ID_REG3] = { .name = "REG3" },
- [ACT8846_ID_REG4] = { .name = "REG4" },
- [ACT8846_ID_REG5] = { .name = "REG5" },
- [ACT8846_ID_REG6] = { .name = "REG6" },
- [ACT8846_ID_REG7] = { .name = "REG7" },
- [ACT8846_ID_REG8] = { .name = "REG8" },
- [ACT8846_ID_REG9] = { .name = "REG9" },
- [ACT8846_ID_REG10] = { .name = "REG10" },
- [ACT8846_ID_REG11] = { .name = "REG11" },
- [ACT8846_ID_REG12] = { .name = "REG12" },
-};
-
-static struct of_regulator_match act8865_matches[] = {
- [ACT8865_ID_DCDC1] = { .name = "DCDC_REG1"},
- [ACT8865_ID_DCDC2] = { .name = "DCDC_REG2"},
- [ACT8865_ID_DCDC3] = { .name = "DCDC_REG3"},
- [ACT8865_ID_LDO1] = { .name = "LDO_REG1"},
- [ACT8865_ID_LDO2] = { .name = "LDO_REG2"},
- [ACT8865_ID_LDO3] = { .name = "LDO_REG3"},
- [ACT8865_ID_LDO4] = { .name = "LDO_REG4"},
-};
-
-static struct of_regulator_match act8600_matches[] = {
- [ACT8600_ID_DCDC1] = { .name = "DCDC_REG1"},
- [ACT8600_ID_DCDC2] = { .name = "DCDC_REG2"},
- [ACT8600_ID_DCDC3] = { .name = "DCDC_REG3"},
- [ACT8600_ID_SUDCDC4] = { .name = "SUDCDC_REG4"},
- [ACT8600_ID_LDO5] = { .name = "LDO_REG5"},
- [ACT8600_ID_LDO6] = { .name = "LDO_REG6"},
- [ACT8600_ID_LDO7] = { .name = "LDO_REG7"},
- [ACT8600_ID_LDO8] = { .name = "LDO_REG8"},
- [ACT8600_ID_LDO9] = { .name = "LDO_REG9"},
- [ACT8600_ID_LDO10] = { .name = "LDO_REG10"},
-};
-
-static int act8865_pdata_from_dt(struct device *dev,
- struct act8865_platform_data *pdata,
- unsigned long type)
-{
- int matched, i, num_matches;
- struct device_node *np;
- struct act8865_regulator_data *regulator;
- struct of_regulator_match *matches;
-
- switch (type) {
- case ACT8600:
- matches = act8600_matches;
- num_matches = ARRAY_SIZE(act8600_matches);
- break;
- case ACT8846:
- matches = act8846_matches;
- num_matches = ARRAY_SIZE(act8846_matches);
- break;
- case ACT8865:
- matches = act8865_matches;
- num_matches = ARRAY_SIZE(act8865_matches);
- break;
- default:
- dev_err(dev, "invalid device id %lu\n", type);
- return -EINVAL;
- }
-
- np = of_get_child_by_name(dev->of_node, "regulators");
- if (!np) {
- dev_err(dev, "missing 'regulators' subnode in DT\n");
- return -EINVAL;
- }
-
- matched = of_regulator_match(dev, np, matches, num_matches);
- of_node_put(np);
- if (matched <= 0)
- return matched;
-
- pdata->regulators = devm_kcalloc(dev,
- num_matches,
- sizeof(struct act8865_regulator_data),
- GFP_KERNEL);
- if (!pdata->regulators)
- return -ENOMEM;
-
- pdata->num_regulators = num_matches;
- regulator = pdata->regulators;
-
- for (i = 0; i < num_matches; i++) {
- regulator->id = i;
- regulator->name = matches[i].name;
- regulator->init_data = matches[i].init_data;
- regulator->of_node = matches[i].of_node;
- regulator++;
- }
-
- return 0;
-}
-#else
-static inline int act8865_pdata_from_dt(struct device *dev,
- struct act8865_platform_data *pdata,
- unsigned long type)
-{
- return 0;
-}
#endif
static struct act8865_regulator_data *act8865_get_regulator_data(
@@ -459,9 +363,6 @@ static struct act8865_regulator_data *act8865_get_regulator_data(
{
int i;
- if (!pdata)
- return NULL;
-
for (i = 0; i < pdata->num_regulators; i++) {
if (pdata->regulators[i].id == id)
return &pdata->regulators[i];
@@ -484,7 +385,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
const struct regulator_desc *regulators;
- struct act8865_platform_data pdata_of, *pdata;
+ struct act8865_platform_data *pdata = NULL;
struct device *dev = &client->dev;
int i, ret, num_regulators;
struct act8865 *act8865;
@@ -493,9 +394,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
int off_reg, off_mask;
int voltage_select = 0;
- pdata = dev_get_platdata(dev);
-
- if (dev->of_node && !pdata) {
+ if (dev->of_node) {
const struct of_device_id *id;
id = of_match_device(of_match_ptr(act8865_dt_ids), dev);
@@ -509,6 +408,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
NULL);
} else {
type = i2c_id->driver_data;
+ pdata = dev_get_platdata(dev);
}
switch (type) {
@@ -543,14 +443,6 @@ static int act8865_pmic_probe(struct i2c_client *client,
return -EINVAL;
}
- if (dev->of_node && !pdata) {
- ret = act8865_pdata_from_dt(dev, &pdata_of, type);
- if (ret < 0)
- return ret;
-
- pdata = &pdata_of;
- }
-
act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL);
if (!act8865)
return -ENOMEM;
@@ -577,17 +469,20 @@ static int act8865_pmic_probe(struct i2c_client *client,
for (i = 0; i < num_regulators; i++) {
const struct regulator_desc *desc = &regulators[i];
struct regulator_config config = { };
- struct act8865_regulator_data *rdata;
struct regulator_dev *rdev;
config.dev = dev;
config.driver_data = act8865;
config.regmap = act8865->regmap;
- rdata = act8865_get_regulator_data(desc->id, pdata);
- if (rdata) {
- config.init_data = rdata->init_data;
- config.of_node = rdata->of_node;
+ if (pdata) {
+ struct act8865_regulator_data *rdata;
+
+ rdata = act8865_get_regulator_data(desc->id, pdata);
+ if (rdata) {
+ config.init_data = rdata->init_data;
+ config.of_node = rdata->of_node;
+ }
}
rdev = devm_regulator_register(dev, desc, &config);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index d9d8155ed8cb..754739d004e5 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -23,18 +23,10 @@
#define LDO_FET_FULL_ON 0x1f
struct anatop_regulator {
- u32 control_reg;
- struct regmap *anatop;
- int vol_bit_shift;
- int vol_bit_width;
u32 delay_reg;
int delay_bit_shift;
int delay_bit_width;
- int min_bit_val;
- int min_voltage;
- int max_voltage;
struct regulator_desc rdesc;
- struct regulator_init_data *initdata;
bool bypass;
int sel;
};
@@ -55,7 +47,7 @@ static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
* to calculate how many steps LDO need to
* ramp up, and how much delay needed. (us)
*/
- regmap_read(anatop_reg->anatop, anatop_reg->delay_reg, &val);
+ regmap_read(reg->regmap, anatop_reg->delay_reg, &val);
val = (val >> anatop_reg->delay_bit_shift) &
((1 << anatop_reg->delay_bit_width) - 1);
ret = (new_sel - old_sel) * (LDO_RAMP_UP_UNIT_IN_CYCLES <<
@@ -170,6 +162,13 @@ static int anatop_regulator_probe(struct platform_device *pdev)
struct anatop_regulator *sreg;
struct regulator_init_data *initdata;
struct regulator_config config = { };
+ struct regmap *regmap;
+ u32 control_reg;
+ u32 vol_bit_shift;
+ u32 vol_bit_width;
+ u32 min_bit_val;
+ u32 min_voltage;
+ u32 max_voltage;
int ret = 0;
u32 val;
@@ -192,48 +191,41 @@ static int anatop_regulator_probe(struct platform_device *pdev)
return -ENOMEM;
initdata->supply_regulator = "vin";
- sreg->initdata = initdata;
anatop_np = of_get_parent(np);
if (!anatop_np)
return -ENODEV;
- sreg->anatop = syscon_node_to_regmap(anatop_np);
+ regmap = syscon_node_to_regmap(anatop_np);
of_node_put(anatop_np);
- if (IS_ERR(sreg->anatop))
- return PTR_ERR(sreg->anatop);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
- ret = of_property_read_u32(np, "anatop-reg-offset",
- &sreg->control_reg);
+ ret = of_property_read_u32(np, "anatop-reg-offset", &control_reg);
if (ret) {
dev_err(dev, "no anatop-reg-offset property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-vol-bit-width",
- &sreg->vol_bit_width);
+ ret = of_property_read_u32(np, "anatop-vol-bit-width", &vol_bit_width);
if (ret) {
dev_err(dev, "no anatop-vol-bit-width property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-vol-bit-shift",
- &sreg->vol_bit_shift);
+ ret = of_property_read_u32(np, "anatop-vol-bit-shift", &vol_bit_shift);
if (ret) {
dev_err(dev, "no anatop-vol-bit-shift property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-min-bit-val",
- &sreg->min_bit_val);
+ ret = of_property_read_u32(np, "anatop-min-bit-val", &min_bit_val);
if (ret) {
dev_err(dev, "no anatop-min-bit-val property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-min-voltage",
- &sreg->min_voltage);
+ ret = of_property_read_u32(np, "anatop-min-voltage", &min_voltage);
if (ret) {
dev_err(dev, "no anatop-min-voltage property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-max-voltage",
- &sreg->max_voltage);
+ ret = of_property_read_u32(np, "anatop-max-voltage", &max_voltage);
if (ret) {
dev_err(dev, "no anatop-max-voltage property set\n");
return ret;
@@ -247,24 +239,23 @@ static int anatop_regulator_probe(struct platform_device *pdev)
of_property_read_u32(np, "anatop-delay-bit-shift",
&sreg->delay_bit_shift);
- rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage) / 25000 + 1
- + sreg->min_bit_val;
- rdesc->min_uV = sreg->min_voltage;
+ rdesc->n_voltages = (max_voltage - min_voltage) / 25000 + 1
+ + min_bit_val;
+ rdesc->min_uV = min_voltage;
rdesc->uV_step = 25000;
- rdesc->linear_min_sel = sreg->min_bit_val;
- rdesc->vsel_reg = sreg->control_reg;
- rdesc->vsel_mask = ((1 << sreg->vol_bit_width) - 1) <<
- sreg->vol_bit_shift;
+ rdesc->linear_min_sel = min_bit_val;
+ rdesc->vsel_reg = control_reg;
+ rdesc->vsel_mask = ((1 << vol_bit_width) - 1) << vol_bit_shift;
rdesc->min_dropout_uV = 125000;
config.dev = &pdev->dev;
config.init_data = initdata;
config.driver_data = sreg;
config.of_node = pdev->dev.of_node;
- config.regmap = sreg->anatop;
+ config.regmap = regmap;
/* Only core regulators have the ramp up delay configuration. */
- if (sreg->control_reg && sreg->delay_bit_width) {
+ if (control_reg && sreg->delay_bit_width) {
rdesc->ops = &anatop_core_rops;
ret = regmap_read(config.regmap, rdesc->vsel_reg, &val);
@@ -273,7 +264,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
return ret;
}
- sreg->sel = (val & rdesc->vsel_mask) >> sreg->vol_bit_shift;
+ sreg->sel = (val & rdesc->vsel_mask) >> vol_bit_shift;
if (sreg->sel == LDO_FET_FULL_ON) {
sreg->sel = 0;
sreg->bypass = true;
@@ -306,7 +297,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
anatop_rops.disable = regulator_disable_regmap;
anatop_rops.is_enabled = regulator_is_enabled_regmap;
- rdesc->enable_reg = sreg->control_reg;
+ rdesc->enable_reg = control_reg;
rdesc->enable_mask = BIT(enable_bit);
}
}
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index bf3ab405eed1..e4bc7b1e5ccd 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -1,15 +1,10 @@
-/*
- * arizona-ldo1.c -- LDO1 supply for Arizona devices
- *
- * Copyright 2012 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// arizona-ldo1.c -- LDO1 supply for Arizona devices
+//
+// Copyright 2012 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index 120de94caf02..be0d46da51a1 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -1,15 +1,10 @@
-/*
- * arizona-micsupp.c -- Microphone supply for Arizona devices
- *
- * Copyright 2012 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// arizona-micsupp.c -- Microphone supply for Arizona devices
+//
+// Copyright 2012 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index f7fe218bb3e4..ece88103f2fd 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -17,14 +17,6 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
-struct as3711_regulator_info {
- struct regulator_desc desc;
-};
-
-struct as3711_regulator {
- struct as3711_regulator_info *reg_info;
-};
-
/*
* The regulator API supports 4 modes of operataion: FAST, NORMAL, IDLE and
* STANDBY. We map them in the following way to AS3711 SD1-4 DCDC modes:
@@ -129,7 +121,6 @@ static const struct regulator_linear_range as3711_dldo_ranges[] = {
#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _sfx) \
[AS3711_REGULATOR_ ## _id] = { \
- .desc = { \
.name = "as3711-regulator-" # _id, \
.id = AS3711_REGULATOR_ ## _id, \
.n_voltages = (_vmask + 1), \
@@ -142,10 +133,9 @@ static const struct regulator_linear_range as3711_dldo_ranges[] = {
.enable_mask = BIT(_en_bit), \
.linear_ranges = as3711_ ## _sfx ## _ranges, \
.n_linear_ranges = ARRAY_SIZE(as3711_ ## _sfx ## _ranges), \
- }, \
}
-static struct as3711_regulator_info as3711_reg_info[] = {
+static const struct regulator_desc as3711_reg_desc[] = {
AS3711_REG(SD_1, SD_CONTROL, 0, 0x7f, sd),
AS3711_REG(SD_2, SD_CONTROL, 1, 0x7f, sd),
AS3711_REG(SD_3, SD_CONTROL, 2, 0x7f, sd),
@@ -161,7 +151,7 @@ static struct as3711_regulator_info as3711_reg_info[] = {
/* StepUp output voltage depends on supplying regulator */
};
-#define AS3711_REGULATOR_NUM ARRAY_SIZE(as3711_reg_info)
+#define AS3711_REGULATOR_NUM ARRAY_SIZE(as3711_reg_desc)
static struct of_regulator_match
as3711_regulator_matches[AS3711_REGULATOR_NUM] = {
@@ -215,11 +205,8 @@ static int as3711_regulator_probe(struct platform_device *pdev)
struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev);
struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = {.dev = &pdev->dev,};
- struct as3711_regulator *reg = NULL;
- struct as3711_regulator *regs;
struct device_node *of_node[AS3711_REGULATOR_NUM] = {};
struct regulator_dev *rdev;
- struct as3711_regulator_info *ri;
int ret;
int id;
@@ -236,30 +223,20 @@ static int as3711_regulator_probe(struct platform_device *pdev)
}
}
- regs = devm_kcalloc(&pdev->dev,
- AS3711_REGULATOR_NUM,
- sizeof(struct as3711_regulator),
- GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
-
- for (id = 0, ri = as3711_reg_info; id < AS3711_REGULATOR_NUM; ++id, ri++) {
- reg = &regs[id];
- reg->reg_info = ri;
-
+ for (id = 0; id < AS3711_REGULATOR_NUM; id++) {
config.init_data = pdata->init_data[id];
- config.driver_data = reg;
config.regmap = as3711->regmap;
config.of_node = of_node[id];
- rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &as3711_reg_desc[id],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
- ri->desc.name);
+ as3711_reg_desc[id].name);
return PTR_ERR(rdev);
}
}
- platform_set_drvdata(pdev, regs);
+
return 0;
}
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index e5fed289b52d..dc75c33bd950 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -81,7 +81,6 @@ struct as3722_regulator_config_data {
struct as3722_regulators {
struct device *dev;
struct as3722 *as3722;
- struct regulator_dev *rdevs[AS3722_REGULATOR_ID_MAX];
struct regulator_desc desc[AS3722_REGULATOR_ID_MAX];
struct as3722_regulator_config_data
reg_config_data[AS3722_REGULATOR_ID_MAX];
@@ -314,63 +313,10 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
},
};
-
-static const int as3722_ldo_current[] = { 150000, 300000 };
-static const int as3722_sd016_current[] = { 2500000, 3000000, 3500000 };
-
-static int as3722_current_to_index(int min_uA, int max_uA,
- const int *curr_table, int n_currents)
-{
- int i;
-
- for (i = n_currents - 1; i >= 0; i--) {
- if ((min_uA <= curr_table[i]) && (curr_table[i] <= max_uA))
- return i;
- }
- return -EINVAL;
-}
-
-static int as3722_ldo_get_current_limit(struct regulator_dev *rdev)
-{
- struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
- struct as3722 *as3722 = as3722_regs->as3722;
- int id = rdev_get_id(rdev);
- u32 val;
- int ret;
-
- ret = as3722_read(as3722, as3722_reg_lookup[id].vsel_reg, &val);
- if (ret < 0) {
- dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
- as3722_reg_lookup[id].vsel_reg, ret);
- return ret;
- }
- if (val & AS3722_LDO_ILIMIT_MASK)
- return 300000;
- return 150000;
-}
-
-static int as3722_ldo_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
- struct as3722 *as3722 = as3722_regs->as3722;
- int id = rdev_get_id(rdev);
- int ret;
- u32 reg = 0;
-
- ret = as3722_current_to_index(min_uA, max_uA, as3722_ldo_current,
- ARRAY_SIZE(as3722_ldo_current));
- if (ret < 0) {
- dev_err(as3722_regs->dev,
- "Current range min:max = %d:%d does not support\n",
- min_uA, max_uA);
- return ret;
- }
- if (ret)
- reg = AS3722_LDO_ILIMIT_BIT;
- return as3722_update_bits(as3722, as3722_reg_lookup[id].vsel_reg,
- AS3722_LDO_ILIMIT_MASK, reg);
-}
+static const unsigned int as3722_ldo_current[] = { 150000, 300000 };
+static const unsigned int as3722_sd016_current[] = {
+ 2500000, 3000000, 3500000
+};
static const struct regulator_ops as3722_ldo0_ops = {
.is_enabled = regulator_is_enabled_regmap,
@@ -379,16 +325,16 @@ static const struct regulator_ops as3722_ldo0_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static const struct regulator_ops as3722_ldo0_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static int as3722_ldo3_set_tracking_mode(struct as3722_regulators *as3722_reg,
@@ -440,8 +386,8 @@ static const struct regulator_ops as3722_ldo6_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
};
@@ -451,8 +397,8 @@ static const struct regulator_ops as3722_ldo6_extcntrl_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
};
@@ -471,8 +417,8 @@ static const struct regulator_ops as3722_ldo_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static const struct regulator_ops as3722_ldo_extcntrl_ops = {
@@ -480,8 +426,8 @@ static const struct regulator_ops as3722_ldo_extcntrl_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static unsigned int as3722_sd_get_mode(struct regulator_dev *rdev)
@@ -539,85 +485,6 @@ static int as3722_sd_set_mode(struct regulator_dev *rdev,
return ret;
}
-static int as3722_sd016_get_current_limit(struct regulator_dev *rdev)
-{
- struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
- struct as3722 *as3722 = as3722_regs->as3722;
- int id = rdev_get_id(rdev);
- u32 val, reg;
- int mask;
- int ret;
-
- switch (id) {
- case AS3722_REGULATOR_ID_SD0:
- reg = AS3722_OVCURRENT_REG;
- mask = AS3722_OVCURRENT_SD0_TRIP_MASK;
- break;
- case AS3722_REGULATOR_ID_SD1:
- reg = AS3722_OVCURRENT_REG;
- mask = AS3722_OVCURRENT_SD1_TRIP_MASK;
- break;
- case AS3722_REGULATOR_ID_SD6:
- reg = AS3722_OVCURRENT_DEB_REG;
- mask = AS3722_OVCURRENT_SD6_TRIP_MASK;
- break;
- default:
- return -EINVAL;
- }
- ret = as3722_read(as3722, reg, &val);
- if (ret < 0) {
- dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
- reg, ret);
- return ret;
- }
- val &= mask;
- val >>= ffs(mask) - 1;
- if (val == 3)
- return -EINVAL;
- return as3722_sd016_current[val];
-}
-
-static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
- struct as3722 *as3722 = as3722_regs->as3722;
- int id = rdev_get_id(rdev);
- int ret;
- int val;
- int mask;
- u32 reg;
-
- ret = as3722_current_to_index(min_uA, max_uA, as3722_sd016_current,
- ARRAY_SIZE(as3722_sd016_current));
- if (ret < 0) {
- dev_err(as3722_regs->dev,
- "Current range min:max = %d:%d does not support\n",
- min_uA, max_uA);
- return ret;
- }
-
- switch (id) {
- case AS3722_REGULATOR_ID_SD0:
- reg = AS3722_OVCURRENT_REG;
- mask = AS3722_OVCURRENT_SD0_TRIP_MASK;
- break;
- case AS3722_REGULATOR_ID_SD1:
- reg = AS3722_OVCURRENT_REG;
- mask = AS3722_OVCURRENT_SD1_TRIP_MASK;
- break;
- case AS3722_REGULATOR_ID_SD6:
- reg = AS3722_OVCURRENT_DEB_REG;
- mask = AS3722_OVCURRENT_SD6_TRIP_MASK;
- break;
- default:
- return -EINVAL;
- }
- ret <<= ffs(mask) - 1;
- val = ret & mask;
- return as3722_update_bits(as3722, reg, mask, val);
-}
-
static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
{
int err;
@@ -649,8 +516,8 @@ static const struct regulator_ops as3722_sd016_ops = {
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_current_limit = as3722_sd016_get_current_limit,
- .set_current_limit = as3722_sd016_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_mode = as3722_sd_get_mode,
.set_mode = as3722_sd_set_mode,
};
@@ -660,8 +527,8 @@ static const struct regulator_ops as3722_sd016_extcntrl_ops = {
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_current_limit = as3722_sd016_get_current_limit,
- .set_current_limit = as3722_sd016_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_mode = as3722_sd_get_mode,
.set_mode = as3722_sd_set_mode,
};
@@ -807,42 +674,45 @@ static int as3722_regulator_probe(struct platform_device *pdev)
config.regmap = as3722->regmap;
for (id = 0; id < AS3722_REGULATOR_ID_MAX; id++) {
+ struct regulator_desc *desc;
+
+ desc = &as3722_regs->desc[id];
reg_config = &as3722_regs->reg_config_data[id];
- as3722_regs->desc[id].name = as3722_reg_lookup[id].name;
- as3722_regs->desc[id].supply_name = as3722_reg_lookup[id].sname;
- as3722_regs->desc[id].id = as3722_reg_lookup[id].regulator_id;
- as3722_regs->desc[id].n_voltages =
- as3722_reg_lookup[id].n_voltages;
- as3722_regs->desc[id].type = REGULATOR_VOLTAGE;
- as3722_regs->desc[id].owner = THIS_MODULE;
- as3722_regs->desc[id].enable_reg =
- as3722_reg_lookup[id].enable_reg;
- as3722_regs->desc[id].enable_mask =
- as3722_reg_lookup[id].enable_mask;
- as3722_regs->desc[id].vsel_reg = as3722_reg_lookup[id].vsel_reg;
- as3722_regs->desc[id].vsel_mask =
- as3722_reg_lookup[id].vsel_mask;
+ desc->name = as3722_reg_lookup[id].name;
+ desc->supply_name = as3722_reg_lookup[id].sname;
+ desc->id = as3722_reg_lookup[id].regulator_id;
+ desc->n_voltages = as3722_reg_lookup[id].n_voltages;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->owner = THIS_MODULE;
+ desc->enable_reg = as3722_reg_lookup[id].enable_reg;
+ desc->enable_mask = as3722_reg_lookup[id].enable_mask;
+ desc->vsel_reg = as3722_reg_lookup[id].vsel_reg;
+ desc->vsel_mask = as3722_reg_lookup[id].vsel_mask;
switch (id) {
case AS3722_REGULATOR_ID_LDO0:
if (reg_config->ext_control)
ops = &as3722_ldo0_extcntrl_ops;
else
ops = &as3722_ldo0_ops;
- as3722_regs->desc[id].min_uV = 825000;
- as3722_regs->desc[id].uV_step = 25000;
- as3722_regs->desc[id].linear_min_sel = 1;
- as3722_regs->desc[id].enable_time = 500;
+ desc->min_uV = 825000;
+ desc->uV_step = 25000;
+ desc->linear_min_sel = 1;
+ desc->enable_time = 500;
+ desc->curr_table = as3722_ldo_current;
+ desc->n_current_limits = ARRAY_SIZE(as3722_ldo_current);
+ desc->csel_reg = as3722_reg_lookup[id].vsel_reg;
+ desc->csel_mask = AS3722_LDO_ILIMIT_MASK;
break;
case AS3722_REGULATOR_ID_LDO3:
if (reg_config->ext_control)
ops = &as3722_ldo3_extcntrl_ops;
else
ops = &as3722_ldo3_ops;
- as3722_regs->desc[id].min_uV = 620000;
- as3722_regs->desc[id].uV_step = 20000;
- as3722_regs->desc[id].linear_min_sel = 1;
- as3722_regs->desc[id].enable_time = 500;
+ desc->min_uV = 620000;
+ desc->uV_step = 20000;
+ desc->linear_min_sel = 1;
+ desc->enable_time = 500;
if (reg_config->enable_tracking) {
ret = as3722_ldo3_set_tracking_mode(as3722_regs,
id, AS3722_LDO3_MODE_PMOS_TRACKING);
@@ -859,18 +729,17 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_ldo6_extcntrl_ops;
else
ops = &as3722_ldo6_ops;
- as3722_regs->desc[id].enable_time = 500;
- as3722_regs->desc[id].bypass_reg =
- AS3722_LDO6_VOLTAGE_REG;
- as3722_regs->desc[id].bypass_mask =
- AS3722_LDO_VSEL_MASK;
- as3722_regs->desc[id].bypass_val_on =
- AS3722_LDO6_VSEL_BYPASS;
- as3722_regs->desc[id].bypass_val_off =
- AS3722_LDO6_VSEL_BYPASS;
- as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
- as3722_regs->desc[id].n_linear_ranges =
- ARRAY_SIZE(as3722_ldo_ranges);
+ desc->enable_time = 500;
+ desc->bypass_reg = AS3722_LDO6_VOLTAGE_REG;
+ desc->bypass_mask = AS3722_LDO_VSEL_MASK;
+ desc->bypass_val_on = AS3722_LDO6_VSEL_BYPASS;
+ desc->bypass_val_off = AS3722_LDO6_VSEL_BYPASS;
+ desc->linear_ranges = as3722_ldo_ranges;
+ desc->n_linear_ranges = ARRAY_SIZE(as3722_ldo_ranges);
+ desc->curr_table = as3722_ldo_current;
+ desc->n_current_limits = ARRAY_SIZE(as3722_ldo_current);
+ desc->csel_reg = as3722_reg_lookup[id].vsel_reg;
+ desc->csel_mask = AS3722_LDO_ILIMIT_MASK;
break;
case AS3722_REGULATOR_ID_SD0:
case AS3722_REGULATOR_ID_SD1:
@@ -889,9 +758,25 @@ static int as3722_regulator_probe(struct platform_device *pdev)
AS3722_SD0_VSEL_MAX + 1;
as3722_regs->desc[id].min_uV = 610000;
}
- as3722_regs->desc[id].uV_step = 10000;
- as3722_regs->desc[id].linear_min_sel = 1;
- as3722_regs->desc[id].enable_time = 600;
+ desc->uV_step = 10000;
+ desc->linear_min_sel = 1;
+ desc->enable_time = 600;
+ desc->curr_table = as3722_sd016_current;
+ desc->n_current_limits =
+ ARRAY_SIZE(as3722_sd016_current);
+ if (id == AS3722_REGULATOR_ID_SD0) {
+ desc->csel_reg = AS3722_OVCURRENT_REG;
+ desc->csel_mask =
+ AS3722_OVCURRENT_SD0_TRIP_MASK;
+ } else if (id == AS3722_REGULATOR_ID_SD1) {
+ desc->csel_reg = AS3722_OVCURRENT_REG;
+ desc->csel_mask =
+ AS3722_OVCURRENT_SD1_TRIP_MASK;
+ } else if (id == AS3722_REGULATOR_ID_SD6) {
+ desc->csel_reg = AS3722_OVCURRENT_DEB_REG;
+ desc->csel_mask =
+ AS3722_OVCURRENT_SD6_TRIP_MASK;
+ }
break;
case AS3722_REGULATOR_ID_SD2:
case AS3722_REGULATOR_ID_SD3:
@@ -901,9 +786,8 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_sd2345_extcntrl_ops;
else
ops = &as3722_sd2345_ops;
- as3722_regs->desc[id].linear_ranges =
- as3722_sd2345_ranges;
- as3722_regs->desc[id].n_linear_ranges =
+ desc->linear_ranges = as3722_sd2345_ranges;
+ desc->n_linear_ranges =
ARRAY_SIZE(as3722_sd2345_ranges);
break;
default:
@@ -911,17 +795,19 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_ldo_extcntrl_ops;
else
ops = &as3722_ldo_ops;
- as3722_regs->desc[id].enable_time = 500;
- as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
- as3722_regs->desc[id].n_linear_ranges =
- ARRAY_SIZE(as3722_ldo_ranges);
+ desc->enable_time = 500;
+ desc->linear_ranges = as3722_ldo_ranges;
+ desc->n_linear_ranges = ARRAY_SIZE(as3722_ldo_ranges);
+ desc->curr_table = as3722_ldo_current;
+ desc->n_current_limits = ARRAY_SIZE(as3722_ldo_current);
+ desc->csel_reg = as3722_reg_lookup[id].vsel_reg;
+ desc->csel_mask = AS3722_LDO_ILIMIT_MASK;
break;
}
- as3722_regs->desc[id].ops = ops;
+ desc->ops = ops;
config.init_data = reg_config->reg_init;
config.of_node = as3722_regulator_matches[id].of_node;
- rdev = devm_regulator_register(&pdev->dev,
- &as3722_regs->desc[id], &config);
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "regulator %d register failed %d\n",
@@ -929,7 +815,6 @@ static int as3722_regulator_probe(struct platform_device *pdev)
return ret;
}
- as3722_regs->rdevs[id] = rdev;
if (reg_config->ext_control) {
ret = regulator_enable_regmap(rdev);
if (ret < 0) {
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index fba8f58ab769..152053361862 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -367,16 +367,14 @@ static const int axp209_dcdc2_ldo3_slew_rates[] = {
static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
- const struct regulator_desc *desc;
+ int id = rdev_get_id(rdev);
u8 reg, mask, enable, cfg = 0xff;
const int *slew_rates;
int rate_count = 0;
- desc = rdev->desc;
-
switch (axp20x->variant) {
case AXP209_ID:
- if (desc->id == AXP20X_DCDC2) {
+ if (id == AXP20X_DCDC2) {
slew_rates = axp209_dcdc2_ldo3_slew_rates;
rate_count = ARRAY_SIZE(axp209_dcdc2_ldo3_slew_rates);
reg = AXP20X_DCDC2_LDO3_V_RAMP;
@@ -388,7 +386,7 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
break;
}
- if (desc->id == AXP20X_LDO3) {
+ if (id == AXP20X_LDO3) {
slew_rates = axp209_dcdc2_ldo3_slew_rates;
rate_count = ARRAY_SIZE(axp209_dcdc2_ldo3_slew_rates);
reg = AXP20X_DCDC2_LDO3_V_RAMP;
@@ -435,16 +433,11 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
static int axp20x_regulator_enable_regmap(struct regulator_dev *rdev)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
- const struct regulator_desc *desc;
-
- if (!rdev)
- return -EINVAL;
-
- desc = rdev->desc;
+ int id = rdev_get_id(rdev);
switch (axp20x->variant) {
case AXP209_ID:
- if ((desc->id == AXP20X_LDO3) &&
+ if ((id == AXP20X_LDO3) &&
rdev->constraints && rdev->constraints->soft_start) {
int v_out;
int ret;
@@ -1028,7 +1021,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
* (See include/linux/mfd/axp20x.h)
*/
reg = AXP803_DCDC_FREQ_CTRL;
- /* Fall through to the check below.*/
+ /* Fall through - to the check below.*/
case AXP806_ID:
/*
* AXP806 also have DCDC work frequency setting register at a
@@ -1119,12 +1112,12 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
break;
case AXP806_ID:
- reg = AXP806_DCDC_MODE_CTRL2;
/*
* AXP806 DCDC regulator IDs have the same range as AXP22X.
- * Fall through to the check below.
* (See include/linux/mfd/axp20x.h)
*/
+ reg = AXP806_DCDC_MODE_CTRL2;
+ /* Fall through - to the check below. */
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index e49c0a7d5dd5..85ccc93b2bb6 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -103,10 +103,6 @@
((n > BCM590XX_REG_VSR) && (n < BCM590XX_REG_VBUS))
#define BCM590XX_REG_IS_VBUS(n) (n == BCM590XX_REG_VBUS)
-struct bcm590xx_board {
- struct regulator_init_data *bcm590xx_pmu_init_data[BCM590XX_NUM_REGS];
-};
-
/* LDO group A: supported voltages in microvolts */
static const unsigned int ldo_a_table[] = {
1200000, 1800000, 2500000, 2700000, 2800000,
@@ -280,105 +276,15 @@ static const struct regulator_ops bcm590xx_ops_vbus = {
.disable = regulator_disable_regmap,
};
-#define BCM590XX_MATCH(_name, _id) \
- { \
- .name = #_name, \
- .driver_data = (void *)&bcm590xx_regs[BCM590XX_REG_##_id], \
- }
-
-static struct of_regulator_match bcm590xx_matches[] = {
- BCM590XX_MATCH(rfldo, RFLDO),
- BCM590XX_MATCH(camldo1, CAMLDO1),
- BCM590XX_MATCH(camldo2, CAMLDO2),
- BCM590XX_MATCH(simldo1, SIMLDO1),
- BCM590XX_MATCH(simldo2, SIMLDO2),
- BCM590XX_MATCH(sdldo, SDLDO),
- BCM590XX_MATCH(sdxldo, SDXLDO),
- BCM590XX_MATCH(mmcldo1, MMCLDO1),
- BCM590XX_MATCH(mmcldo2, MMCLDO2),
- BCM590XX_MATCH(audldo, AUDLDO),
- BCM590XX_MATCH(micldo, MICLDO),
- BCM590XX_MATCH(usbldo, USBLDO),
- BCM590XX_MATCH(vibldo, VIBLDO),
- BCM590XX_MATCH(csr, CSR),
- BCM590XX_MATCH(iosr1, IOSR1),
- BCM590XX_MATCH(iosr2, IOSR2),
- BCM590XX_MATCH(msr, MSR),
- BCM590XX_MATCH(sdsr1, SDSR1),
- BCM590XX_MATCH(sdsr2, SDSR2),
- BCM590XX_MATCH(vsr, VSR),
- BCM590XX_MATCH(gpldo1, GPLDO1),
- BCM590XX_MATCH(gpldo2, GPLDO2),
- BCM590XX_MATCH(gpldo3, GPLDO3),
- BCM590XX_MATCH(gpldo4, GPLDO4),
- BCM590XX_MATCH(gpldo5, GPLDO5),
- BCM590XX_MATCH(gpldo6, GPLDO6),
- BCM590XX_MATCH(vbus, VBUS),
-};
-
-static struct bcm590xx_board *bcm590xx_parse_dt_reg_data(
- struct platform_device *pdev,
- struct of_regulator_match **bcm590xx_reg_matches)
-{
- struct bcm590xx_board *data;
- struct device_node *np = pdev->dev.parent->of_node;
- struct device_node *regulators;
- struct of_regulator_match *matches = bcm590xx_matches;
- int count = ARRAY_SIZE(bcm590xx_matches);
- int idx = 0;
- int ret;
-
- if (!np) {
- dev_err(&pdev->dev, "of node not found\n");
- return NULL;
- }
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return NULL;
-
- np = of_node_get(np);
- regulators = of_get_child_by_name(np, "regulators");
- if (!regulators) {
- dev_warn(&pdev->dev, "regulator node not found\n");
- return NULL;
- }
-
- ret = of_regulator_match(&pdev->dev, regulators, matches, count);
- of_node_put(regulators);
- if (ret < 0) {
- dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
- ret);
- return NULL;
- }
-
- *bcm590xx_reg_matches = matches;
-
- for (idx = 0; idx < count; idx++) {
- if (!matches[idx].init_data || !matches[idx].of_node)
- continue;
-
- data->bcm590xx_pmu_init_data[idx] = matches[idx].init_data;
- }
-
- return data;
-}
-
static int bcm590xx_probe(struct platform_device *pdev)
{
struct bcm590xx *bcm590xx = dev_get_drvdata(pdev->dev.parent);
- struct bcm590xx_board *pmu_data = NULL;
struct bcm590xx_reg *pmu;
struct regulator_config config = { };
struct bcm590xx_info *info;
- struct regulator_init_data *reg_data;
struct regulator_dev *rdev;
- struct of_regulator_match *bcm590xx_reg_matches = NULL;
int i;
- pmu_data = bcm590xx_parse_dt_reg_data(pdev,
- &bcm590xx_reg_matches);
-
pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
return -ENOMEM;
@@ -397,13 +303,10 @@ static int bcm590xx_probe(struct platform_device *pdev)
info = bcm590xx_regs;
for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) {
- if (pmu_data)
- reg_data = pmu_data->bcm590xx_pmu_init_data[i];
- else
- reg_data = NULL;
-
/* Register the regulators */
pmu->desc[i].name = info->name;
+ pmu->desc[i].of_match = of_match_ptr(info->name);
+ pmu->desc[i].regulators_node = of_match_ptr("regulators");
pmu->desc[i].supply_name = info->vin_name;
pmu->desc[i].id = i;
pmu->desc[i].volt_table = info->volt_table;
@@ -433,16 +336,12 @@ static int bcm590xx_probe(struct platform_device *pdev)
pmu->desc[i].owner = THIS_MODULE;
config.dev = bcm590xx->dev;
- config.init_data = reg_data;
config.driver_data = pmu;
if (BCM590XX_REG_IS_GPLDO(i) || BCM590XX_REG_IS_VBUS(i))
config.regmap = bcm590xx->regmap_sec;
else
config.regmap = bcm590xx->regmap_pri;
- if (bcm590xx_reg_matches)
- config.of_node = bcm590xx_reg_matches[i].of_node;
-
rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i],
&config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index b2191be49670..fde4264da6ff 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -27,8 +27,8 @@
static int bd718xx_buck1234_set_ramp_delay(struct regulator_dev *rdev,
int ramp_delay)
{
- int id = rdev->desc->id;
- unsigned int ramp_value = BUCK_RAMPRATE_10P00MV;
+ int id = rdev_get_id(rdev);
+ unsigned int ramp_value;
dev_dbg(&rdev->dev, "Buck[%d] Set Ramp = %d\n", id + 1,
ramp_delay);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 68473d0cc57e..955a0a15b9cb 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1339,9 +1339,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
* We'll only apply the initial system load if an
* initial mode wasn't specified.
*/
- regulator_lock(rdev);
drms_uA_update(rdev);
- regulator_unlock(rdev);
}
if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
@@ -2256,6 +2254,7 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
if (pin->gpiod == rdev->ena_pin->gpiod) {
if (pin->request_count <= 1) {
pin->request_count = 0;
+ gpiod_put(pin->gpiod);
list_del(&pin->list);
kfree(pin);
rdev->ena_pin = NULL;
@@ -3004,7 +3003,7 @@ EXPORT_SYMBOL_GPL(regulator_get_linear_step);
* @min_uV: Minimum required voltage in uV.
* @max_uV: Maximum required voltage in uV.
*
- * Returns a boolean or a negative error code.
+ * Returns a boolean.
*/
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
@@ -3028,7 +3027,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
ret = regulator_count_voltages(regulator);
if (ret < 0)
- return ret;
+ return 0;
voltages = ret;
for (i = 0; i < voltages; i++) {
@@ -3322,15 +3321,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
/* for not coupled regulators this will just set the voltage */
ret = regulator_balance_voltage(rdev, state);
- if (ret < 0)
- goto out2;
+ if (ret < 0) {
+ voltage->min_uV = old_min_uV;
+ voltage->max_uV = old_max_uV;
+ }
out:
- return 0;
-out2:
- voltage->min_uV = old_min_uV;
- voltage->max_uV = old_max_uV;
-
return ret;
}
@@ -4347,8 +4343,6 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
ret = PTR_ERR(consumers[i].consumer);
- dev_err(dev, "Failed to get supply '%s': %d\n",
- consumers[i].supply, ret);
consumers[i].consumer = NULL;
goto err;
}
@@ -4357,6 +4351,13 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
return 0;
err:
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get supply '%s': %d\n",
+ consumers[i].supply, ret);
+ else
+ dev_dbg(dev, "Failed to get supply '%s', deferring\n",
+ consumers[i].supply);
+
while (--i >= 0)
regulator_put(consumers[i].consumer);
@@ -5064,10 +5065,11 @@ void regulator_unregister(struct regulator_dev *rdev)
regulator_put(rdev->supply);
}
+ flush_work(&rdev->disable_work.work);
+
mutex_lock(&regulator_list_mutex);
debugfs_remove_recursive(rdev->debugfs);
- flush_work(&rdev->disable_work.work);
WARN_ON(rdev->open_count);
regulator_remove_coupling(rdev);
unset_regulator_supplies(rdev);
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index e7dab5c4d1d1..d3284361e594 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -505,17 +505,12 @@ MODULE_DEVICE_TABLE(of, cpcap_regulator_id_table);
static int cpcap_regulator_probe(struct platform_device *pdev)
{
struct cpcap_ddata *ddata;
- const struct of_device_id *match;
+ const struct cpcap_regulator *match_data;
struct regulator_config config;
- struct regulator_init_data init_data;
int i;
- match = of_match_device(of_match_ptr(cpcap_regulator_id_table),
- &pdev->dev);
- if (!match)
- return -EINVAL;
-
- if (!match->data) {
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data) {
dev_err(&pdev->dev, "no configuration data found\n");
return -ENODEV;
@@ -530,14 +525,12 @@ static int cpcap_regulator_probe(struct platform_device *pdev)
return -ENODEV;
ddata->dev = &pdev->dev;
- ddata->soc = match->data;
+ ddata->soc = match_data;
platform_set_drvdata(pdev, ddata);
memset(&config, 0, sizeof(config));
- memset(&init_data, 0, sizeof(init_data));
config.dev = &pdev->dev;
config.regmap = ddata->reg;
- config.init_data = &init_data;
for (i = 0; i < CPCAP_NR_REGULATORS; i++) {
const struct cpcap_regulator *regulator = &ddata->soc[i];
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 33e8f3b8d2bd..5493c3a86426 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -1,13 +1,9 @@
-/*
- * Regulators driver for Dialog Semiconductor DA903x
- *
- * Copyright (C) 2006-2008 Marvell International Ltd.
- * Copyright (C) 2008 Compulab Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Regulators driver for Dialog Semiconductor DA903x
+//
+// Copyright (C) 2006-2008 Marvell International Ltd.
+// Copyright (C) 2008 Compulab Ltd.
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index cefa3558236d..e18d291c7f21 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -1,16 +1,10 @@
-/*
-* da9052-regulator.c: Regulator driver for DA9052
-*
-* Copyright(c) 2011 Dialog Semiconductor Ltd.
-*
-* Author: David Dajun Chen <dchen@diasemi.com>
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
-*/
+// SPDX-License-Identifier: GPL-2.0+
+//
+// da9052-regulator.c: Regulator driver for DA9052
+//
+// Copyright(c) 2011 Dialog Semiconductor Ltd.
+//
+// Author: David Dajun Chen <dchen@diasemi.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -19,10 +13,8 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#ifdef CONFIG_OF
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
-#endif
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
@@ -294,6 +286,8 @@ static const struct regulator_ops da9052_ldo_ops = {
{\
.reg_desc = {\
.name = #_name,\
+ .of_match = of_match_ptr(#_name),\
+ .regulators_node = of_match_ptr("regulators"),\
.ops = &da9052_ldo_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9052_ID_##_id,\
@@ -314,6 +308,8 @@ static const struct regulator_ops da9052_ldo_ops = {
{\
.reg_desc = {\
.name = #_name,\
+ .of_match = of_match_ptr(#_name),\
+ .regulators_node = of_match_ptr("regulators"),\
.ops = &da9052_dcdc_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9052_ID_##_id,\
@@ -417,36 +413,11 @@ static int da9052_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
- config.dev = &pdev->dev;
+ config.dev = da9052->dev;
config.driver_data = regulator;
config.regmap = da9052->regmap;
- if (pdata) {
+ if (pdata)
config.init_data = pdata->regulators[cell->id];
- } else {
-#ifdef CONFIG_OF
- struct device_node *nproot = da9052->dev->of_node;
- struct device_node *np;
-
- if (!nproot)
- return -ENODEV;
-
- nproot = of_get_child_by_name(nproot, "regulators");
- if (!nproot)
- return -ENODEV;
-
- for_each_child_of_node(nproot, np) {
- if (of_node_name_eq(np,
- regulator->info->reg_desc.name)) {
- config.init_data = of_get_regulator_init_data(
- &pdev->dev, np,
- &regulator->info->reg_desc);
- config.of_node = np;
- break;
- }
- }
- of_node_put(nproot);
-#endif
- }
regulator->rdev = devm_regulator_register(&pdev->dev,
&regulator->info->reg_desc,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 3c6fac793658..c025ccb1a30a 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -1,16 +1,10 @@
-/*
-* Regulator driver for DA9055 PMIC
-*
-* Copyright(c) 2012 Dialog Semiconductor Ltd.
-*
-* Author: David Dajun Chen <dchen@diasemi.com>
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
-*/
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Regulator driver for DA9055 PMIC
+//
+// Copyright(c) 2012 Dialog Semiconductor Ltd.
+//
+// Author: David Dajun Chen <dchen@diasemi.com>
#include <linux/module.h>
#include <linux/init.h>
@@ -338,6 +332,8 @@ static const struct regulator_ops da9055_ldo_ops = {
{\
.reg_desc = {\
.name = #_id,\
+ .of_match = of_match_ptr(#_id),\
+ .regulators_node = of_match_ptr("regulators"),\
.ops = &da9055_ldo_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9055_ID_##_id,\
@@ -366,6 +362,8 @@ static const struct regulator_ops da9055_ldo_ops = {
{\
.reg_desc = {\
.name = #_id,\
+ .of_match = of_match_ptr(#_id),\
+ .regulators_node = of_match_ptr("regulators"),\
.ops = &da9055_buck_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9055_ID_##_id,\
@@ -487,8 +485,10 @@ static irqreturn_t da9055_ldo5_6_oc_irq(int irq, void *data)
{
struct da9055_regulator *regulator = data;
+ regulator_lock(regulator->rdev);
regulator_notifier_call_chain(regulator->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(regulator->rdev);
return IRQ_HANDLED;
}
@@ -507,59 +507,6 @@ static inline struct da9055_regulator_info *find_regulator_info(int id)
return NULL;
}
-#ifdef CONFIG_OF
-static struct of_regulator_match da9055_reg_matches[] = {
- { .name = "BUCK1", },
- { .name = "BUCK2", },
- { .name = "LDO1", },
- { .name = "LDO2", },
- { .name = "LDO3", },
- { .name = "LDO4", },
- { .name = "LDO5", },
- { .name = "LDO6", },
-};
-
-static int da9055_regulator_dt_init(struct platform_device *pdev,
- struct da9055_regulator *regulator,
- struct regulator_config *config,
- int regid)
-{
- struct device_node *nproot, *np;
- int ret;
-
- nproot = of_node_get(pdev->dev.parent->of_node);
- if (!nproot)
- return -ENODEV;
-
- np = of_get_child_by_name(nproot, "regulators");
- if (!np)
- return -ENODEV;
-
- ret = of_regulator_match(&pdev->dev, np, &da9055_reg_matches[regid], 1);
- of_node_put(nproot);
- if (ret < 0) {
- dev_err(&pdev->dev, "Error matching regulator: %d\n", ret);
- return ret;
- }
-
- config->init_data = da9055_reg_matches[regid].init_data;
- config->of_node = da9055_reg_matches[regid].of_node;
-
- if (!config->of_node)
- return -ENODEV;
-
- return 0;
-}
-#else
-static inline int da9055_regulator_dt_init(struct platform_device *pdev,
- struct da9055_regulator *regulator,
- struct regulator_config *config,
- int regid)
-{
- return -ENODEV;
-}
-#endif /* CONFIG_OF */
-
static int da9055_regulator_probe(struct platform_device *pdev)
{
struct regulator_config config = { };
@@ -580,18 +527,12 @@ static int da9055_regulator_probe(struct platform_device *pdev)
}
regulator->da9055 = da9055;
- config.dev = &pdev->dev;
+ config.dev = da9055->dev;
config.driver_data = regulator;
config.regmap = da9055->regmap;
- if (pdata) {
+ if (pdata)
config.init_data = pdata->regulators[pdev->id];
- } else {
- ret = da9055_regulator_dt_init(pdev, regulator, &config,
- pdev->id);
- if (ret < 0)
- return ret;
- }
ret = da9055_gpio_init(regulator, &config, pdata, pdev->id);
if (ret < 0)
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index b064d8a19d4c..a02e0488410f 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -1,17 +1,8 @@
-/*
- * Regulator device driver for DA9061 and DA9062.
- * Copyright (C) 2015-2017 Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Regulator device driver for DA9061 and DA9062.
+// Copyright (C) 2015-2017 Dialog Semiconductor
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -53,16 +44,12 @@ enum {
/* Regulator capabilities and registers description */
struct da9062_regulator_info {
struct regulator_desc desc;
- /* Current limiting */
- unsigned int n_current_limits;
- const int *current_limits;
/* Main register fields */
struct reg_field mode;
struct reg_field suspend;
struct reg_field sleep;
struct reg_field suspend_sleep;
unsigned int suspend_vsel_reg;
- struct reg_field ilimit;
/* Event detection bit */
struct reg_field oc_event;
};
@@ -78,7 +65,6 @@ struct da9062_regulator {
struct regmap_field *suspend;
struct regmap_field *sleep;
struct regmap_field *suspend_sleep;
- struct regmap_field *ilimit;
};
/* Encapsulates all information for the regulators driver */
@@ -104,7 +90,7 @@ enum {
* - DA9062_ID_[BUCK1|BUCK2|BUCK4]
* Entry indexes corresponds to register values.
*/
-static const int da9062_buck_a_limits[] = {
+static const unsigned int da9062_buck_a_limits[] = {
500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000,
1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000
};
@@ -114,44 +100,11 @@ static const int da9062_buck_a_limits[] = {
* - DA9062_ID_BUCK3
* Entry indexes corresponds to register values.
*/
-static const int da9062_buck_b_limits[] = {
+static const unsigned int da9062_buck_b_limits[] = {
1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
};
-static int da9062_set_current_limit(struct regulator_dev *rdev,
- int min_ua, int max_ua)
-{
- struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- const struct da9062_regulator_info *rinfo = regl->info;
- int n, tval;
-
- for (n = rinfo->n_current_limits - 1; n >= 0; n--) {
- tval = rinfo->current_limits[n];
- if (tval >= min_ua && tval <= max_ua)
- return regmap_field_write(regl->ilimit, n);
- }
-
- return -EINVAL;
-}
-
-static int da9062_get_current_limit(struct regulator_dev *rdev)
-{
- struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- const struct da9062_regulator_info *rinfo = regl->info;
- unsigned int sel;
- int ret;
-
- ret = regmap_field_read(regl->ilimit, &sel);
- if (ret < 0)
- return ret;
-
- if (sel >= rinfo->n_current_limits)
- sel = rinfo->n_current_limits - 1;
-
- return rinfo->current_limits[sel];
-}
-
static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
@@ -395,8 +348,8 @@ static const struct regulator_ops da9062_buck_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = da9062_set_current_limit,
- .get_current_limit = da9062_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
.set_mode = da9062_buck_set_mode,
.get_mode = da9062_buck_get_mode,
.get_status = da9062_buck_get_status,
@@ -433,8 +386,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.min_uV = (300) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1570) - (300))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_C,
+ .desc.csel_mask = DA9062AA_BUCK1_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK1_CONT,
.desc.enable_mask = DA9062AA_BUCK1_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK1_A,
@@ -457,10 +412,6 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_C,
- __builtin_ffs((int)DA9062AA_BUCK1_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK1_ILIM_MASK)) - 1),
},
{
.desc.id = DA9061_ID_BUCK2,
@@ -471,8 +422,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.min_uV = (800) * 1000,
.desc.uV_step = (20) * 1000,
.desc.n_voltages = ((3340) - (800))/(20) + 1,
- .current_limits = da9062_buck_b_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_b_limits),
+ .desc.curr_table = da9062_buck_b_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_b_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_A,
+ .desc.csel_mask = DA9062AA_BUCK3_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK3_CONT,
.desc.enable_mask = DA9062AA_BUCK3_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK3_A,
@@ -495,10 +448,6 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_A,
- __builtin_ffs((int)DA9062AA_BUCK3_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK3_ILIM_MASK)) - 1),
},
{
.desc.id = DA9061_ID_BUCK3,
@@ -509,8 +458,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.min_uV = (530) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1800) - (530))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_B,
+ .desc.csel_mask = DA9062AA_BUCK4_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK4_CONT,
.desc.enable_mask = DA9062AA_BUCK4_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK4_A,
@@ -533,10 +484,6 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_B,
- __builtin_ffs((int)DA9062AA_BUCK4_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK4_ILIM_MASK)) - 1),
},
{
.desc.id = DA9061_ID_LDO1,
@@ -679,8 +626,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.min_uV = (300) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1570) - (300))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_C,
+ .desc.csel_mask = DA9062AA_BUCK1_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK1_CONT,
.desc.enable_mask = DA9062AA_BUCK1_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK1_A,
@@ -703,10 +652,6 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_C,
- __builtin_ffs((int)DA9062AA_BUCK1_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK1_ILIM_MASK)) - 1),
},
{
.desc.id = DA9062_ID_BUCK2,
@@ -717,8 +662,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.min_uV = (300) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1570) - (300))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_C,
+ .desc.csel_mask = DA9062AA_BUCK2_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK2_CONT,
.desc.enable_mask = DA9062AA_BUCK2_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK2_A,
@@ -741,10 +688,6 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK2_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK2_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_C,
- __builtin_ffs((int)DA9062AA_BUCK2_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK2_ILIM_MASK)) - 1),
},
{
.desc.id = DA9062_ID_BUCK3,
@@ -755,8 +698,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.min_uV = (800) * 1000,
.desc.uV_step = (20) * 1000,
.desc.n_voltages = ((3340) - (800))/(20) + 1,
- .current_limits = da9062_buck_b_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_b_limits),
+ .desc.curr_table = da9062_buck_b_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_b_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_A,
+ .desc.csel_mask = DA9062AA_BUCK3_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK3_CONT,
.desc.enable_mask = DA9062AA_BUCK3_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK3_A,
@@ -779,10 +724,6 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_A,
- __builtin_ffs((int)DA9062AA_BUCK3_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK3_ILIM_MASK)) - 1),
},
{
.desc.id = DA9062_ID_BUCK4,
@@ -793,8 +734,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.min_uV = (530) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1800) - (530))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_B,
+ .desc.csel_mask = DA9062AA_BUCK4_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK4_CONT,
.desc.enable_mask = DA9062AA_BUCK4_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK4_A,
@@ -817,10 +760,6 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_B,
- __builtin_ffs((int)DA9062AA_BUCK4_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK4_ILIM_MASK)) - 1),
},
{
.desc.id = DA9062_ID_LDO1,
@@ -974,8 +913,10 @@ static irqreturn_t da9062_ldo_lim_event(int irq, void *data)
continue;
if (BIT(regl->info->oc_event.lsb) & bits) {
+ regulator_lock(regl->rdev);
regulator_notifier_call_chain(regl->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(regl->rdev);
handled = IRQ_HANDLED;
}
}
@@ -1063,15 +1004,6 @@ static int da9062_regulator_probe(struct platform_device *pdev)
return PTR_ERR(regl->suspend_sleep);
}
- if (regl->info->ilimit.reg) {
- regl->ilimit = devm_regmap_field_alloc(
- &pdev->dev,
- chip->regmap,
- regl->info->ilimit);
- if (IS_ERR(regl->ilimit))
- return PTR_ERR(regl->ilimit);
- }
-
/* Register regulator */
memset(&config, 0, sizeof(config));
config.dev = chip->dev;
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 2b0c7a85306a..6f9ce1a6e44d 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -1,18 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Regulator driver for DA9063 PMIC series
+//
+// Copyright 2012 Dialog Semiconductors Ltd.
+// Copyright 2013 Philipp Zabel, Pengutronix
+//
+// Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
-/*
- * Regulator driver for DA9063 PMIC series
- *
- * Copyright 2012 Dialog Semiconductors Ltd.
- * Copyright 2013 Philipp Zabel, Pengutronix
- *
- * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -38,17 +32,12 @@
struct da9063_regulator_info {
struct regulator_desc desc;
- /* Current limiting */
- unsigned n_current_limits;
- const int *current_limits;
-
/* DA9063 main register fields */
struct reg_field mode; /* buck mode of operation */
struct reg_field suspend;
struct reg_field sleep;
struct reg_field suspend_sleep;
unsigned int suspend_vsel_reg;
- struct reg_field ilimit;
/* DA9063 event detection bit */
struct reg_field oc_event;
@@ -73,15 +62,18 @@ struct da9063_regulator_info {
.suspend_vsel_reg = DA9063_REG_V##regl_name##_B
/* Macros for voltage DC/DC converters (BUCKs) */
-#define DA9063_BUCK(chip, regl_name, min_mV, step_mV, max_mV, limits_array) \
+#define DA9063_BUCK(chip, regl_name, min_mV, step_mV, max_mV, limits_array, \
+ creg, cmask) \
.desc.id = chip##_ID_##regl_name, \
.desc.name = __stringify(chip##_##regl_name), \
.desc.ops = &da9063_buck_ops, \
.desc.min_uV = (min_mV) * 1000, \
.desc.uV_step = (step_mV) * 1000, \
.desc.n_voltages = ((max_mV) - (min_mV))/(step_mV) + 1, \
- .current_limits = limits_array, \
- .n_current_limits = ARRAY_SIZE(limits_array)
+ .desc.csel_reg = (creg), \
+ .desc.csel_mask = (cmask), \
+ .desc.curr_table = limits_array, \
+ .desc.n_current_limits = ARRAY_SIZE(limits_array)
#define DA9063_BUCK_COMMON_FIELDS(regl_name) \
.desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
@@ -112,7 +104,6 @@ struct da9063_regulator {
struct regmap_field *suspend;
struct regmap_field *sleep;
struct regmap_field *suspend_sleep;
- struct regmap_field *ilimit;
};
/* Encapsulates all information for the regulators driver */
@@ -134,65 +125,32 @@ enum {
/* Current limits array (in uA) for BCORE1, BCORE2, BPRO.
Entry indexes corresponds to register values. */
-static const int da9063_buck_a_limits[] = {
+static const unsigned int da9063_buck_a_limits[] = {
500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000,
1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000
};
/* Current limits array (in uA) for BMEM, BIO, BPERI.
Entry indexes corresponds to register values. */
-static const int da9063_buck_b_limits[] = {
+static const unsigned int da9063_buck_b_limits[] = {
1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
};
/* Current limits array (in uA) for merged BCORE1 and BCORE2.
Entry indexes corresponds to register values. */
-static const int da9063_bcores_merged_limits[] = {
+static const unsigned int da9063_bcores_merged_limits[] = {
1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000,
2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000
};
/* Current limits array (in uA) for merged BMEM and BIO.
Entry indexes corresponds to register values. */
-static const int da9063_bmem_bio_merged_limits[] = {
+static const unsigned int da9063_bmem_bio_merged_limits[] = {
3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000,
4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000
};
-static int da9063_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct da9063_regulator *regl = rdev_get_drvdata(rdev);
- const struct da9063_regulator_info *rinfo = regl->info;
- int n, tval;
-
- for (n = rinfo->n_current_limits - 1; n >= 0; n--) {
- tval = rinfo->current_limits[n];
- if (tval >= min_uA && tval <= max_uA)
- return regmap_field_write(regl->ilimit, n);
- }
-
- return -EINVAL;
-}
-
-static int da9063_get_current_limit(struct regulator_dev *rdev)
-{
- struct da9063_regulator *regl = rdev_get_drvdata(rdev);
- const struct da9063_regulator_info *rinfo = regl->info;
- unsigned int sel;
- int ret;
-
- ret = regmap_field_read(regl->ilimit, &sel);
- if (ret < 0)
- return ret;
-
- if (sel >= rinfo->n_current_limits)
- sel = rinfo->n_current_limits - 1;
-
- return rinfo->current_limits[sel];
-}
-
static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
@@ -434,8 +392,8 @@ static const struct regulator_ops da9063_buck_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = da9063_set_current_limit,
- .get_current_limit = da9063_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
.set_mode = da9063_buck_set_mode,
.get_mode = da9063_buck_get_mode,
.get_status = da9063_buck_get_status,
@@ -465,69 +423,61 @@ static const struct regulator_ops da9063_ldo_ops = {
static const struct da9063_regulator_info da9063_regulator_info[] = {
{
DA9063_BUCK(DA9063, BCORE1, 300, 10, 1570,
- da9063_buck_a_limits),
+ da9063_buck_a_limits,
+ DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BCORE1),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C,
- DA9063_BCORE1_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BCORE2, 300, 10, 1570,
- da9063_buck_a_limits),
+ da9063_buck_a_limits,
+ DA9063_REG_BUCK_ILIM_C, DA9063_BCORE2_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BCORE2),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE2_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C,
- DA9063_BCORE2_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BPRO, 530, 10, 1800,
- da9063_buck_a_limits),
+ da9063_buck_a_limits,
+ DA9063_REG_BUCK_ILIM_B, DA9063_BPRO_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BPRO),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPRO_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B,
- DA9063_BPRO_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BMEM, 800, 20, 3340,
- da9063_buck_b_limits),
+ da9063_buck_b_limits,
+ DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BMEM),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A,
- DA9063_BMEM_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BIO, 800, 20, 3340,
- da9063_buck_b_limits),
+ da9063_buck_b_limits,
+ DA9063_REG_BUCK_ILIM_A, DA9063_BIO_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BIO),
.suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VBIO_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A,
- DA9063_BIO_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BPERI, 800, 20, 3340,
- da9063_buck_b_limits),
+ da9063_buck_b_limits,
+ DA9063_REG_BUCK_ILIM_B, DA9063_BPERI_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BPERI),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPERI_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B,
- DA9063_BPERI_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BCORES_MERGED, 300, 10, 1570,
- da9063_bcores_merged_limits),
+ da9063_bcores_merged_limits,
+ DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK),
/* BCORES_MERGED uses the same register fields as BCORE1 */
DA9063_BUCK_COMMON_FIELDS(BCORE1),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C,
- DA9063_BCORE1_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BMEM_BIO_MERGED, 800, 20, 3340,
- da9063_bmem_bio_merged_limits),
+ da9063_bmem_bio_merged_limits,
+ DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK),
/* BMEM_BIO_MERGED uses the same register fields as BMEM */
DA9063_BUCK_COMMON_FIELDS(BMEM),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A,
- DA9063_BMEM_ILIM_MASK),
},
{
DA9063_LDO(DA9063, LDO3, 900, 20, 3440),
@@ -615,9 +565,12 @@ static irqreturn_t da9063_ldo_lim_event(int irq, void *data)
if (regl->info->oc_event.reg != DA9063_REG_STATUS_D)
continue;
- if (BIT(regl->info->oc_event.lsb) & bits)
+ if (BIT(regl->info->oc_event.lsb) & bits) {
+ regulator_lock(regl->rdev);
regulator_notifier_call_chain(regl->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(regl->rdev);
+ }
}
return IRQ_HANDLED;
@@ -861,13 +814,6 @@ static int da9063_regulator_probe(struct platform_device *pdev)
return PTR_ERR(regl->suspend_sleep);
}
- if (regl->info->ilimit.reg) {
- regl->ilimit = devm_regmap_field_alloc(&pdev->dev,
- da9063->regmap, regl->info->ilimit);
- if (IS_ERR(regl->ilimit))
- return PTR_ERR(regl->ilimit);
- }
-
/* Register regulator */
memset(&config, 0, sizeof(config));
config.dev = &pdev->dev;
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 528303771723..f9448ed50e05 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -1,22 +1,7 @@
-/*
- * da9210-regulator.c - Regulator device driver for DA9210
- * Copyright (C) 2013 Dialog Semiconductor Ltd.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License along with this library; if not, write to the
- * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// da9210-regulator.c - Regulator device driver for DA9210
+// Copyright (C) 2013 Dialog Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
diff --git a/drivers/regulator/da9210-regulator.h b/drivers/regulator/da9210-regulator.h
index 749c550808b6..b1f1a607c208 100644
--- a/drivers/regulator/da9210-regulator.h
+++ b/drivers/regulator/da9210-regulator.h
@@ -1,22 +1,7 @@
-
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* da9210-regulator.h - Regulator definitions for DA9210
* Copyright (C) 2013 Dialog Semiconductor Ltd.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License along with this library; if not, write to the
- * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
- * Boston, MA 02110-1301, USA.
*/
#ifndef __DA9210_REGISTERS_H__
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 109ee12d4362..da37b4ccd834 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,18 +1,8 @@
-/*
- * da9211-regulator.c - Regulator device driver for DA9211/DA9212
- * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
- * Copyright (C) 2015 Dialog Semiconductor Ltd.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// da9211-regulator.c - Regulator device driver for DA9211/DA9212
+// /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
+// Copyright (C) 2015 Dialog Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
@@ -322,8 +312,10 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
goto error_i2c;
if (reg_val & DA9211_E_OV_CURR_A) {
+ regulator_lock(chip->rdev[0]);
regulator_notifier_call_chain(chip->rdev[0],
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(chip->rdev[0]);
err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
DA9211_E_OV_CURR_A);
@@ -334,8 +326,10 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
}
if (reg_val & DA9211_E_OV_CURR_B) {
+ regulator_lock(chip->rdev[1]);
regulator_notifier_call_chain(chip->rdev[1],
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(chip->rdev[1]);
err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
DA9211_E_OV_CURR_B);
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index 2cb32aab4f82..1201e7cc056c 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* da9211-regulator.h - Regulator definitions for DA9211/DA9212
* /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DA9211_REGISTERS_H__
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 7cec535cf0bc..eb317663f875 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -75,7 +75,7 @@ static int db8500_regulator_is_enabled(struct regulator_dev *rdev)
}
/* db8500 regulator operations */
-static struct regulator_ops db8500_regulator_ops = {
+static const struct regulator_ops db8500_regulator_ops = {
.enable = db8500_regulator_enable,
.disable = db8500_regulator_disable,
.is_enabled = db8500_regulator_is_enabled,
@@ -200,7 +200,7 @@ static int db8500_regulator_switch_is_enabled(struct regulator_dev *rdev)
return info->is_enabled;
}
-static struct regulator_ops db8500_regulator_switch_ops = {
+static const struct regulator_ops db8500_regulator_switch_ops = {
.enable = db8500_regulator_switch_enable,
.disable = db8500_regulator_switch_disable,
.is_enabled = db8500_regulator_switch_is_enabled,
@@ -214,6 +214,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.desc = {
.name = "db8500-vape",
+ .of_match = of_match_ptr("db8500_vape"),
.id = DB8500_REGULATOR_VAPE,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -223,6 +224,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VARM] = {
.desc = {
.name = "db8500-varm",
+ .of_match = of_match_ptr("db8500_varm"),
.id = DB8500_REGULATOR_VARM,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -232,6 +234,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VMODEM] = {
.desc = {
.name = "db8500-vmodem",
+ .of_match = of_match_ptr("db8500_vmodem"),
.id = DB8500_REGULATOR_VMODEM,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -241,6 +244,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VPLL] = {
.desc = {
.name = "db8500-vpll",
+ .of_match = of_match_ptr("db8500_vpll"),
.id = DB8500_REGULATOR_VPLL,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -250,6 +254,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VSMPS1] = {
.desc = {
.name = "db8500-vsmps1",
+ .of_match = of_match_ptr("db8500_vsmps1"),
.id = DB8500_REGULATOR_VSMPS1,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -259,6 +264,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VSMPS2] = {
.desc = {
.name = "db8500-vsmps2",
+ .of_match = of_match_ptr("db8500_vsmps2"),
.id = DB8500_REGULATOR_VSMPS2,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -271,6 +277,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VSMPS3] = {
.desc = {
.name = "db8500-vsmps3",
+ .of_match = of_match_ptr("db8500_vsmps3"),
.id = DB8500_REGULATOR_VSMPS3,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -280,6 +287,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VRF1] = {
.desc = {
.name = "db8500-vrf1",
+ .of_match = of_match_ptr("db8500_vrf1"),
.id = DB8500_REGULATOR_VRF1,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -289,6 +297,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
.desc = {
.name = "db8500-sva-mmdsp",
+ .of_match = of_match_ptr("db8500_sva_mmdsp"),
.id = DB8500_REGULATOR_SWITCH_SVAMMDSP,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -299,6 +308,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
.desc = {
.name = "db8500-sva-mmdsp-ret",
+ .of_match = of_match_ptr("db8500_sva_mmdsp_ret"),
.id = DB8500_REGULATOR_SWITCH_SVAMMDSPRET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -310,6 +320,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SVAPIPE] = {
.desc = {
.name = "db8500-sva-pipe",
+ .of_match = of_match_ptr("db8500_sva_pipe"),
.id = DB8500_REGULATOR_SWITCH_SVAPIPE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -320,6 +331,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
.desc = {
.name = "db8500-sia-mmdsp",
+ .of_match = of_match_ptr("db8500_sia_mmdsp"),
.id = DB8500_REGULATOR_SWITCH_SIAMMDSP,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -330,6 +342,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
.desc = {
.name = "db8500-sia-mmdsp-ret",
+ .of_match = of_match_ptr("db8500_sia_mmdsp_ret"),
.id = DB8500_REGULATOR_SWITCH_SIAMMDSPRET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -341,6 +354,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SIAPIPE] = {
.desc = {
.name = "db8500-sia-pipe",
+ .of_match = of_match_ptr("db8500_sia_pipe"),
.id = DB8500_REGULATOR_SWITCH_SIAPIPE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -351,6 +365,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SGA] = {
.desc = {
.name = "db8500-sga",
+ .of_match = of_match_ptr("db8500_sga"),
.id = DB8500_REGULATOR_SWITCH_SGA,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -361,6 +376,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
.desc = {
.name = "db8500-b2r2-mcde",
+ .of_match = of_match_ptr("db8500_b2r2_mcde"),
.id = DB8500_REGULATOR_SWITCH_B2R2_MCDE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -371,6 +387,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_ESRAM12] = {
.desc = {
.name = "db8500-esram12",
+ .of_match = of_match_ptr("db8500_esram12"),
.id = DB8500_REGULATOR_SWITCH_ESRAM12,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -382,6 +399,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
.desc = {
.name = "db8500-esram12-ret",
+ .of_match = of_match_ptr("db8500_esram12_ret"),
.id = DB8500_REGULATOR_SWITCH_ESRAM12RET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -393,6 +411,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_ESRAM34] = {
.desc = {
.name = "db8500-esram34",
+ .of_match = of_match_ptr("db8500_esram34"),
.id = DB8500_REGULATOR_SWITCH_ESRAM34,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -404,6 +423,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
.desc = {
.name = "db8500-esram34-ret",
+ .of_match = of_match_ptr("db8500_esram34_ret"),
.id = DB8500_REGULATOR_SWITCH_ESRAM34RET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -414,113 +434,38 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
},
};
-static int db8500_regulator_register(struct platform_device *pdev,
- struct regulator_init_data *init_data,
- int id,
- struct device_node *np)
+static int db8500_regulator_probe(struct platform_device *pdev)
{
+ struct regulator_init_data *db8500_init_data;
struct dbx500_regulator_info *info;
struct regulator_config config = { };
- int err;
-
- /* assign per-regulator data */
- info = &dbx500_regulator_info[id];
- info->dev = &pdev->dev;
-
- config.dev = &pdev->dev;
- config.init_data = init_data;
- config.driver_data = info;
- config.of_node = np;
-
- /* register with the regulator framework */
- info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
- if (IS_ERR(info->rdev)) {
- err = PTR_ERR(info->rdev);
- dev_err(&pdev->dev, "failed to register %s: err %i\n",
- info->desc.name, err);
- return err;
- }
-
- dev_dbg(rdev_get_dev(info->rdev),
- "regulator-%s-probed\n", info->desc.name);
+ struct regulator_dev *rdev;
+ int err, i;
- return 0;
-}
-
-static struct of_regulator_match db8500_regulator_matches[] = {
- { .name = "db8500_vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
- { .name = "db8500_varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
- { .name = "db8500_vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
- { .name = "db8500_vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
- { .name = "db8500_vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
- { .name = "db8500_vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
- { .name = "db8500_vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
- { .name = "db8500_vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
- { .name = "db8500_sva_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
- { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
- { .name = "db8500_sva_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
- { .name = "db8500_sia_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
- { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
- { .name = "db8500_sia_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
- { .name = "db8500_sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
- { .name = "db8500_b2r2_mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
- { .name = "db8500_esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
- { .name = "db8500_esram12_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
- { .name = "db8500_esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
- { .name = "db8500_esram34_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
-};
-
-static int
-db8500_regulator_of_probe(struct platform_device *pdev,
- struct device_node *np)
-{
- int i, err;
+ db8500_init_data = dev_get_platdata(&pdev->dev);
for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
- err = db8500_regulator_register(
- pdev, db8500_regulator_matches[i].init_data,
- i, db8500_regulator_matches[i].of_node);
- if (err)
+ /* assign per-regulator data */
+ info = &dbx500_regulator_info[i];
+
+ config.driver_data = info;
+ config.dev = &pdev->dev;
+ if (db8500_init_data)
+ config.init_data = &db8500_init_data[i];
+
+ rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
+ dev_err(&pdev->dev, "failed to register %s: err %i\n",
+ info->desc.name, err);
return err;
- }
-
- return 0;
-}
-
-static int db8500_regulator_probe(struct platform_device *pdev)
-{
- struct regulator_init_data *db8500_init_data =
- dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
- int i, err;
-
- /* register all regulators */
- if (np) {
- err = of_regulator_match(&pdev->dev, np,
- db8500_regulator_matches,
- ARRAY_SIZE(db8500_regulator_matches));
- if (err < 0) {
- dev_err(&pdev->dev,
- "Error parsing regulator init data: %d\n", err);
- return err;
- }
-
- err = db8500_regulator_of_probe(pdev, np);
- if (err)
- return err;
- } else {
- for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
- err = db8500_regulator_register(pdev,
- &db8500_init_data[i],
- i, NULL);
- if (err)
- return err;
}
+ dev_dbg(&pdev->dev, "regulator-%s-probed\n", info->desc.name);
}
- err = ux500_regulator_debug_init(pdev,
- dbx500_regulator_info,
- ARRAY_SIZE(dbx500_regulator_info));
+ ux500_regulator_debug_init(pdev, dbx500_regulator_info,
+ ARRAY_SIZE(dbx500_regulator_info));
return 0;
}
diff --git a/drivers/regulator/dbx500-prcmu.h b/drivers/regulator/dbx500-prcmu.h
index c8e51ace9f06..6e20dab611ac 100644
--- a/drivers/regulator/dbx500-prcmu.h
+++ b/drivers/regulator/dbx500-prcmu.h
@@ -15,18 +15,14 @@
/**
* struct dbx500_regulator_info - dbx500 regulator information
- * @dev: device pointer
* @desc: regulator description
- * @rdev: regulator device pointer
* @is_enabled: status of the regulator
* @epod_id: id for EPOD (power domain)
* @is_ramret: RAM retention switch for EPOD (power domain)
*
*/
struct dbx500_regulator_info {
- struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *rdev;
bool is_enabled;
u16 epod_id;
bool is_ramret;
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 771a06d1900d..dbe477da4e55 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -1,17 +1,13 @@
-/*
- * FAN53555 Fairchild Digitally Programmable TinyBuck Regulator Driver.
- *
- * Supported Part Numbers:
- * FAN53555UC00X/01X/03X/04X/05X
- *
- * Copyright (c) 2012 Marvell Technology Ltd.
- * Yunfan Zhang <yfzhang@marvell.com>
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// FAN53555 Fairchild Digitally Programmable TinyBuck Regulator Driver.
+//
+// Supported Part Numbers:
+// FAN53555UC00X/01X/03X/04X/05X
+//
+// Copyright (c) 2012 Marvell Technology Ltd.
+// Yunfan Zhang <yfzhang@marvell.com>
+
#include <linux/module.h>
#include <linux/param.h>
#include <linux/err.h>
@@ -91,10 +87,8 @@ enum {
struct fan53555_device_info {
enum fan53555_vendor vendor;
- struct regmap *regmap;
struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *rdev;
struct regulator_init_data *regulator;
/* IC Type and Rev */
int chip_id;
@@ -106,8 +100,6 @@ struct fan53555_device_info {
unsigned int vsel_min;
unsigned int vsel_step;
unsigned int vsel_count;
- /* Voltage slew rate limiting */
- unsigned int slew_rate;
/* Mode */
unsigned int mode_reg;
unsigned int mode_mask;
@@ -125,7 +117,7 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
ret = regulator_map_voltage_linear(rdev, uV, uV);
if (ret < 0)
return ret;
- ret = regmap_update_bits(di->regmap, di->sleep_reg,
+ ret = regmap_update_bits(rdev->regmap, di->sleep_reg,
di->desc.vsel_mask, ret);
if (ret < 0)
return ret;
@@ -140,7 +132,7 @@ static int fan53555_set_suspend_enable(struct regulator_dev *rdev)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
- return regmap_update_bits(di->regmap, di->sleep_reg,
+ return regmap_update_bits(rdev->regmap, di->sleep_reg,
VSEL_BUCK_EN, VSEL_BUCK_EN);
}
@@ -148,7 +140,7 @@ static int fan53555_set_suspend_disable(struct regulator_dev *rdev)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
- return regmap_update_bits(di->regmap, di->sleep_reg,
+ return regmap_update_bits(rdev->regmap, di->sleep_reg,
VSEL_BUCK_EN, 0);
}
@@ -158,11 +150,11 @@ static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_FAST:
- regmap_update_bits(di->regmap, di->mode_reg,
+ regmap_update_bits(rdev->regmap, di->mode_reg,
di->mode_mask, di->mode_mask);
break;
case REGULATOR_MODE_NORMAL:
- regmap_update_bits(di->regmap, di->vol_reg, di->mode_mask, 0);
+ regmap_update_bits(rdev->regmap, di->vol_reg, di->mode_mask, 0);
break;
default:
return -EINVAL;
@@ -176,7 +168,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
unsigned int val;
int ret = 0;
- ret = regmap_read(di->regmap, di->mode_reg, &val);
+ ret = regmap_read(rdev->regmap, di->mode_reg, &val);
if (ret < 0)
return ret;
if (val & di->mode_mask)
@@ -213,7 +205,7 @@ static int fan53555_set_ramp(struct regulator_dev *rdev, int ramp)
return -EINVAL;
}
- return regmap_update_bits(di->regmap, FAN53555_CONTROL,
+ return regmap_update_bits(rdev->regmap, FAN53555_CONTROL,
CTL_SLEW_MASK, regval << CTL_SLEW_SHIFT);
}
@@ -396,6 +388,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
struct regulator_config *config)
{
struct regulator_desc *rdesc = &di->desc;
+ struct regulator_dev *rdev;
rdesc->name = "fan53555-reg";
rdesc->supply_name = "vin";
@@ -410,8 +403,8 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->vsel_mask = di->vsel_count - 1;
rdesc->owner = THIS_MODULE;
- di->rdev = devm_regulator_register(di->dev, &di->desc, config);
- return PTR_ERR_OR_ZERO(di->rdev);
+ rdev = devm_regulator_register(di->dev, &di->desc, config);
+ return PTR_ERR_OR_ZERO(rdev);
}
static const struct regmap_config fan53555_regmap_config = {
@@ -466,6 +459,7 @@ static int fan53555_regulator_probe(struct i2c_client *client,
struct fan53555_device_info *di;
struct fan53555_platform_data *pdata;
struct regulator_config config = { };
+ struct regmap *regmap;
unsigned int val;
int ret;
@@ -502,22 +496,22 @@ static int fan53555_regulator_probe(struct i2c_client *client,
di->vendor = id->driver_data;
}
- di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config);
- if (IS_ERR(di->regmap)) {
+ regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config);
+ if (IS_ERR(regmap)) {
dev_err(&client->dev, "Failed to allocate regmap!\n");
- return PTR_ERR(di->regmap);
+ return PTR_ERR(regmap);
}
di->dev = &client->dev;
i2c_set_clientdata(client, di);
/* Get chip ID */
- ret = regmap_read(di->regmap, FAN53555_ID1, &val);
+ ret = regmap_read(regmap, FAN53555_ID1, &val);
if (ret < 0) {
dev_err(&client->dev, "Failed to get chip ID!\n");
return ret;
}
di->chip_id = val & DIE_ID;
/* Get chip revision */
- ret = regmap_read(di->regmap, FAN53555_ID2, &val);
+ ret = regmap_read(regmap, FAN53555_ID2, &val);
if (ret < 0) {
dev_err(&client->dev, "Failed to get chip Rev!\n");
return ret;
@@ -534,7 +528,7 @@ static int fan53555_regulator_probe(struct i2c_client *client,
/* Register regulator */
config.dev = di->dev;
config.init_data = di->regulator;
- config.regmap = di->regmap;
+ config.regmap = regmap;
config.driver_data = di;
config.of_node = np;
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 6157001df0a4..f50d86a66138 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -36,7 +36,6 @@
struct gpio_regulator_data {
struct regulator_desc desc;
- struct regulator_dev *dev;
struct gpio_desc **gpiods;
int nr_gpios;
@@ -125,7 +124,7 @@ static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
return 0;
}
-static struct regulator_ops gpio_regulator_voltage_ops = {
+static const struct regulator_ops gpio_regulator_voltage_ops = {
.get_voltage = gpio_regulator_get_value,
.set_voltage = gpio_regulator_set_voltage,
.list_voltage = gpio_regulator_list_voltage,
@@ -221,7 +220,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
return config;
}
-static struct regulator_ops gpio_regulator_current_ops = {
+static const struct regulator_ops gpio_regulator_current_ops = {
.get_current_limit = gpio_regulator_get_value,
.set_current_limit = gpio_regulator_set_current_limit,
};
@@ -233,6 +232,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct gpio_regulator_data *drvdata;
struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
enum gpiod_flags gflags;
int ptr, ret, state, i;
@@ -326,9 +326,9 @@ static int gpio_regulator_probe(struct platform_device *pdev)
if (IS_ERR(cfg.ena_gpiod))
return PTR_ERR(cfg.ena_gpiod);
- drvdata->dev = regulator_register(&drvdata->desc, &cfg);
- if (IS_ERR(drvdata->dev)) {
- ret = PTR_ERR(drvdata->dev);
+ rdev = devm_regulator_register(dev, &drvdata->desc, &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(dev, "Failed to register regulator: %d\n", ret);
return ret;
}
@@ -338,15 +338,6 @@ static int gpio_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int gpio_regulator_remove(struct platform_device *pdev)
-{
- struct gpio_regulator_data *drvdata = platform_get_drvdata(pdev);
-
- regulator_unregister(drvdata->dev);
-
- return 0;
-}
-
#if defined(CONFIG_OF)
static const struct of_device_id regulator_gpio_of_match[] = {
{ .compatible = "regulator-gpio", },
@@ -357,7 +348,6 @@ MODULE_DEVICE_TABLE(of, regulator_gpio_of_match);
static struct platform_driver gpio_regulator_driver = {
.probe = gpio_regulator_probe,
- .remove = gpio_regulator_remove,
.driver = {
.name = "gpio-regulator",
.of_match_table = of_match_ptr(regulator_gpio_of_match),
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 259c3a865ac6..5ac3d7c29725 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -1,17 +1,13 @@
-/*
- * Device driver for regulators in Hi6421 IC
- *
- * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
- * http://www.hisilicon.com
- * Copyright (c) <2013-2014> Linaro Ltd.
- * http://www.linaro.org
- *
- * Author: Guodong Xu <guodong.xu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Device driver for regulators in Hi6421 IC
+//
+// Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
+// http://www.hisilicon.com
+// Copyright (c) <2013-2014> Linaro Ltd.
+// http://www.linaro.org
+//
+// Author: Guodong Xu <guodong.xu@linaro.org>
#include <linux/slab.h>
#include <linux/device.h>
@@ -78,43 +74,6 @@ enum hi6421_regulator_id {
HI6421_NUM_REGULATORS,
};
-#define HI6421_REGULATOR_OF_MATCH(_name, id) \
-{ \
- .name = #_name, \
- .driver_data = (void *) HI6421_##id, \
-}
-
-static struct of_regulator_match hi6421_regulator_match[] = {
- HI6421_REGULATOR_OF_MATCH(hi6421_vout0, LDO0),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout1, LDO1),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout2, LDO2),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout3, LDO3),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout4, LDO4),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout5, LDO5),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout6, LDO6),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout7, LDO7),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout8, LDO8),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout9, LDO9),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout10, LDO10),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout11, LDO11),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout12, LDO12),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout13, LDO13),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout14, LDO14),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout15, LDO15),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout16, LDO16),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout17, LDO17),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout18, LDO18),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout19, LDO19),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout20, LDO20),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout_audio, LDOAUDIO),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck0, BUCK0),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck1, BUCK1),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck2, BUCK2),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck3, BUCK3),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck4, BUCK4),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck5, BUCK5),
-};
-
/* LDO 0, 4~7, 9~14, 16~20 have same voltage table. */
static const unsigned int ldo_0_voltages[] = {
1500000, 1800000, 2400000, 2500000,
@@ -157,6 +116,7 @@ static const struct regulator_ops hi6421_buck345_ops;
#define HI6421_LDO_ENABLE_TIME (350)
/*
* _id - LDO id name string
+ * _match - of match name string
* v_table - voltage table
* vreg - voltage select register
* vmask - voltage select mask
@@ -166,11 +126,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
-#define HI6421_LDO(_id, v_table, vreg, vmask, ereg, emask, \
+#define HI6421_LDO(_id, _match, v_table, vreg, vmask, ereg, emask, \
odelay, ecomask, ecoamp) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -191,6 +153,7 @@ static const struct regulator_ops hi6421_buck345_ops;
/* HI6421 LDO1~3 are linear voltage regulators at fixed uV_step
*
* _id - LDO id name string
+ * _match - of match name string
* _min_uV - minimum voltage supported in uV
* n_volt - number of votages available
* vstep - voltage increase in each linear step in uV
@@ -202,11 +165,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
-#define HI6421_LDO_LINEAR(_id, _min_uV, n_volt, vstep, vreg, vmask, \
+#define HI6421_LDO_LINEAR(_id, _match, _min_uV, n_volt, vstep, vreg, vmask,\
ereg, emask, odelay, ecomask, ecoamp) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_ldo_linear_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -228,6 +193,7 @@ static const struct regulator_ops hi6421_buck345_ops;
/* HI6421 LDOAUDIO is a linear voltage regulator with two 4-step ranges
*
* _id - LDO id name string
+ * _match - of match name string
* n_volt - number of votages available
* volt_ranges - array of regulator_linear_range
* vstep - voltage increase in each linear step in uV
@@ -239,11 +205,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
-#define HI6421_LDO_LINEAR_RANGE(_id, n_volt, volt_ranges, vreg, vmask, \
+#define HI6421_LDO_LINEAR_RANGE(_id, _match, n_volt, volt_ranges, vreg, vmask,\
ereg, emask, odelay, ecomask, ecoamp) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_ldo_linear_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -265,6 +233,7 @@ static const struct regulator_ops hi6421_buck345_ops;
/* HI6421 BUCK0/1/2 are linear voltage regulators at fixed uV_step
*
* _id - BUCK0/1/2 id name string
+ * _match - of match name string
* vreg - voltage select register
* vmask - voltage select mask
* ereg - enable register
@@ -273,11 +242,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* etime - enable time
* odelay - off/on delay time in uS
*/
-#define HI6421_BUCK012(_id, vreg, vmask, ereg, emask, sleepmask, \
+#define HI6421_BUCK012(_id, _match, vreg, vmask, ereg, emask, sleepmask,\
etime, odelay) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_buck012_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -299,6 +270,7 @@ static const struct regulator_ops hi6421_buck345_ops;
* that it supports SLEEP mode, so has different .ops.
*
* _id - LDO id name string
+ * _match - of match name string
* v_table - voltage table
* vreg - voltage select register
* vmask - voltage select mask
@@ -307,11 +279,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* odelay - off/on delay time in uS
* sleepmask - mask of sleep mode
*/
-#define HI6421_BUCK345(_id, v_table, vreg, vmask, ereg, emask, \
+#define HI6421_BUCK345(_id, _match, v_table, vreg, vmask, ereg, emask, \
odelay, sleepmask) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_buck345_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -331,59 +305,63 @@ static const struct regulator_ops hi6421_buck345_ops;
/* HI6421 regulator information */
static struct hi6421_regulator_info
hi6421_regulator_info[HI6421_NUM_REGULATORS] = {
- HI6421_LDO(LDO0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10,
+ HI6421_LDO(LDO0, hi6421_vout0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10,
10000, 0x20, 8000),
- HI6421_LDO_LINEAR(LDO1, 1700000, 4, 100000, 0x21, 0x03, 0x21, 0x10,
- 10000, 0x20, 5000),
- HI6421_LDO_LINEAR(LDO2, 1050000, 8, 50000, 0x22, 0x07, 0x22, 0x10,
- 20000, 0x20, 8000),
- HI6421_LDO_LINEAR(LDO3, 1050000, 8, 50000, 0x23, 0x07, 0x23, 0x10,
- 20000, 0x20, 8000),
- HI6421_LDO(LDO4, ldo_0_voltages, 0x24, 0x07, 0x24, 0x10,
+ HI6421_LDO_LINEAR(LDO1, hi6421_vout1, 1700000, 4, 100000, 0x21, 0x03,
+ 0x21, 0x10, 10000, 0x20, 5000),
+ HI6421_LDO_LINEAR(LDO2, hi6421_vout2, 1050000, 8, 50000, 0x22, 0x07,
+ 0x22, 0x10, 20000, 0x20, 8000),
+ HI6421_LDO_LINEAR(LDO3, hi6421_vout3, 1050000, 8, 50000, 0x23, 0x07,
+ 0x23, 0x10, 20000, 0x20, 8000),
+ HI6421_LDO(LDO4, hi6421_vout4, ldo_0_voltages, 0x24, 0x07, 0x24, 0x10,
20000, 0x20, 8000),
- HI6421_LDO(LDO5, ldo_0_voltages, 0x25, 0x07, 0x25, 0x10,
+ HI6421_LDO(LDO5, hi6421_vout5, ldo_0_voltages, 0x25, 0x07, 0x25, 0x10,
20000, 0x20, 8000),
- HI6421_LDO(LDO6, ldo_0_voltages, 0x26, 0x07, 0x26, 0x10,
+ HI6421_LDO(LDO6, hi6421_vout6, ldo_0_voltages, 0x26, 0x07, 0x26, 0x10,
20000, 0x20, 8000),
- HI6421_LDO(LDO7, ldo_0_voltages, 0x27, 0x07, 0x27, 0x10,
+ HI6421_LDO(LDO7, hi6421_vout7, ldo_0_voltages, 0x27, 0x07, 0x27, 0x10,
20000, 0x20, 5000),
- HI6421_LDO(LDO8, ldo_8_voltages, 0x28, 0x07, 0x28, 0x10,
+ HI6421_LDO(LDO8, hi6421_vout8, ldo_8_voltages, 0x28, 0x07, 0x28, 0x10,
20000, 0x20, 8000),
- HI6421_LDO(LDO9, ldo_0_voltages, 0x29, 0x07, 0x29, 0x10,
+ HI6421_LDO(LDO9, hi6421_vout9, ldo_0_voltages, 0x29, 0x07, 0x29, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO10, ldo_0_voltages, 0x2a, 0x07, 0x2a, 0x10,
+ HI6421_LDO(LDO10, hi6421_vout10, ldo_0_voltages, 0x2a, 0x07, 0x2a, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO11, ldo_0_voltages, 0x2b, 0x07, 0x2b, 0x10,
+ HI6421_LDO(LDO11, hi6421_vout11, ldo_0_voltages, 0x2b, 0x07, 0x2b, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO12, ldo_0_voltages, 0x2c, 0x07, 0x2c, 0x10,
+ HI6421_LDO(LDO12, hi6421_vout12, ldo_0_voltages, 0x2c, 0x07, 0x2c, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO13, ldo_0_voltages, 0x2d, 0x07, 0x2d, 0x10,
+ HI6421_LDO(LDO13, hi6421_vout13, ldo_0_voltages, 0x2d, 0x07, 0x2d, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO14, ldo_0_voltages, 0x2e, 0x07, 0x2e, 0x10,
+ HI6421_LDO(LDO14, hi6421_vout14, ldo_0_voltages, 0x2e, 0x07, 0x2e, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO15, ldo_8_voltages, 0x2f, 0x07, 0x2f, 0x10,
+ HI6421_LDO(LDO15, hi6421_vout15, ldo_8_voltages, 0x2f, 0x07, 0x2f, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO16, ldo_0_voltages, 0x30, 0x07, 0x30, 0x10,
+ HI6421_LDO(LDO16, hi6421_vout16, ldo_0_voltages, 0x30, 0x07, 0x30, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO17, ldo_0_voltages, 0x31, 0x07, 0x31, 0x10,
+ HI6421_LDO(LDO17, hi6421_vout17, ldo_0_voltages, 0x31, 0x07, 0x31, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO18, ldo_0_voltages, 0x32, 0x07, 0x32, 0x10,
+ HI6421_LDO(LDO18, hi6421_vout18, ldo_0_voltages, 0x32, 0x07, 0x32, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO19, ldo_0_voltages, 0x33, 0x07, 0x33, 0x10,
+ HI6421_LDO(LDO19, hi6421_vout19, ldo_0_voltages, 0x33, 0x07, 0x33, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO20, ldo_0_voltages, 0x34, 0x07, 0x34, 0x10,
+ HI6421_LDO(LDO20, hi6421_vout20, ldo_0_voltages, 0x34, 0x07, 0x34, 0x10,
40000, 0x20, 8000),
- HI6421_LDO_LINEAR_RANGE(LDOAUDIO, 8, ldo_audio_volt_range, 0x36,
- 0x70, 0x36, 0x01, 40000, 0x02, 5000),
- HI6421_BUCK012(BUCK0, 0x0d, 0x7f, 0x0c, 0x01, 0x10, 400, 20000),
- HI6421_BUCK012(BUCK1, 0x0f, 0x7f, 0x0e, 0x01, 0x10, 400, 20000),
- HI6421_BUCK012(BUCK2, 0x11, 0x7f, 0x10, 0x01, 0x10, 350, 100),
- HI6421_BUCK345(BUCK3, buck_3_voltages, 0x13, 0x07, 0x12, 0x01,
- 20000, 0x10),
- HI6421_BUCK345(BUCK4, buck_4_voltages, 0x15, 0x07, 0x14, 0x01,
- 20000, 0x10),
- HI6421_BUCK345(BUCK5, buck_5_voltages, 0x17, 0x07, 0x16, 0x01,
- 20000, 0x10),
+ HI6421_LDO_LINEAR_RANGE(LDOAUDIO, hi6421_vout_audio, 8,
+ ldo_audio_volt_range, 0x36, 0x70, 0x36, 0x01,
+ 40000, 0x02, 5000),
+ HI6421_BUCK012(BUCK0, hi6421_buck0, 0x0d, 0x7f, 0x0c, 0x01, 0x10, 400,
+ 20000),
+ HI6421_BUCK012(BUCK1, hi6421_buck1, 0x0f, 0x7f, 0x0e, 0x01, 0x10, 400,
+ 20000),
+ HI6421_BUCK012(BUCK2, hi6421_buck2, 0x11, 0x7f, 0x10, 0x01, 0x10, 350,
+ 100),
+ HI6421_BUCK345(BUCK3, hi6421_buck3, buck_3_voltages, 0x13, 0x07, 0x12,
+ 0x01, 20000, 0x10),
+ HI6421_BUCK345(BUCK4, hi6421_buck4, buck_4_voltages, 0x15, 0x07, 0x14,
+ 0x01, 20000, 0x10),
+ HI6421_BUCK345(BUCK5, hi6421_buck5, buck_5_voltages, 0x17, 0x07, 0x16,
+ 0x01, 20000, 0x10),
};
static int hi6421_regulator_enable(struct regulator_dev *rdev)
@@ -552,42 +530,14 @@ static const struct regulator_ops hi6421_buck345_ops = {
.set_mode = hi6421_regulator_buck_set_mode,
};
-static int hi6421_regulator_register(struct platform_device *pdev,
- struct regmap *rmap,
- struct regulator_init_data *init_data,
- int id, struct device_node *np)
-{
- struct hi6421_regulator_info *info = NULL;
- struct regulator_config config = { };
- struct regulator_dev *rdev;
-
- /* assign per-regulator data */
- info = &hi6421_regulator_info[id];
-
- config.dev = &pdev->dev;
- config.init_data = init_data;
- config.driver_data = info;
- config.regmap = rmap;
- config.of_node = np;
-
- /* register regulator with framework */
- rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
- if (IS_ERR(rdev)) {
- dev_err(&pdev->dev, "failed to register regulator %s\n",
- info->desc.name);
- return PTR_ERR(rdev);
- }
-
- return 0;
-}
-
static int hi6421_regulator_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct device_node *np;
- struct hi6421_pmic *pmic;
+ struct hi6421_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct hi6421_regulator_pdata *pdata;
- int i, ret = 0;
+ struct hi6421_regulator_info *info;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ int i;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -595,27 +545,21 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
mutex_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
- np = of_get_child_by_name(dev->parent->of_node, "regulators");
- if (!np)
- return -ENODEV;
-
- ret = of_regulator_match(dev, np,
- hi6421_regulator_match,
- ARRAY_SIZE(hi6421_regulator_match));
- of_node_put(np);
- if (ret < 0) {
- dev_err(dev, "Error parsing regulator init data: %d\n", ret);
- return ret;
- }
-
- pmic = dev_get_drvdata(dev->parent);
-
for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) {
- ret = hi6421_regulator_register(pdev, pmic->regmap,
- hi6421_regulator_match[i].init_data, i,
- hi6421_regulator_match[i].of_node);
- if (ret)
- return ret;
+ /* assign per-regulator data */
+ info = &hi6421_regulator_info[i];
+
+ config.dev = pdev->dev.parent;
+ config.driver_data = info;
+ config.regmap = pmic->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ return PTR_ERR(rdev);
+ }
}
return 0;
diff --git a/drivers/regulator/hi6421v530-regulator.c b/drivers/regulator/hi6421v530-regulator.c
index c09bc71538a5..06ae65199afd 100644
--- a/drivers/regulator/hi6421v530-regulator.c
+++ b/drivers/regulator/hi6421v530-regulator.c
@@ -1,18 +1,14 @@
-/*
- * Device driver for regulators in Hi6421V530 IC
- *
- * Copyright (c) <2017> HiSilicon Technologies Co., Ltd.
- * http://www.hisilicon.com
- * Copyright (c) <2017> Linaro Ltd.
- * http://www.linaro.org
- *
- * Author: Wang Xiaoyin <hw.wangxiaoyin@hisilicon.com>
- * Guodong Xu <guodong.xu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Device driver for regulators in Hi6421V530 IC
+//
+// Copyright (c) <2017> HiSilicon Technologies Co., Ltd.
+// http://www.hisilicon.com
+// Copyright (c) <2017> Linaro Ltd.
+// http://www.linaro.org
+//
+// Author: Wang Xiaoyin <hw.wangxiaoyin@hisilicon.com>
+// Guodong Xu <guodong.xu@linaro.org>
#include <linux/mfd/hi6421-pmic.h>
#include <linux/module.h>
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
index bba24a6fdb1e..ac2ee2030211 100644
--- a/drivers/regulator/hi655x-regulator.c
+++ b/drivers/regulator/hi655x-regulator.c
@@ -1,16 +1,12 @@
-/*
- * Device driver for regulators in Hi655x IC
- *
- * Copyright (c) 2016 Hisilicon.
- *
- * Authors:
- * Chen Feng <puck.chen@hisilicon.com>
- * Fei Wang <w.f@huawei.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Device driver for regulators in Hi655x IC
+//
+// Copyright (c) 2016 Hisilicon.
+//
+// Authors:
+// Chen Feng <puck.chen@hisilicon.com>
+// Fei Wang <w.f@huawei.com>
#include <linux/bitops.h>
#include <linux/device.h>
@@ -28,7 +24,6 @@
struct hi655x_regulator {
unsigned int disable_reg;
unsigned int status_reg;
- unsigned int ctrl_mask;
struct regulator_desc rdesc;
};
@@ -77,22 +72,18 @@ enum hi655x_regulator_id {
static int hi655x_is_enabled(struct regulator_dev *rdev)
{
unsigned int value = 0;
-
struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
regmap_read(rdev->regmap, regulator->status_reg, &value);
- return (value & BIT(regulator->ctrl_mask));
+ return (value & rdev->desc->enable_mask);
}
static int hi655x_disable(struct regulator_dev *rdev)
{
- int ret = 0;
-
struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
- ret = regmap_write(rdev->regmap, regulator->disable_reg,
- BIT(regulator->ctrl_mask));
- return ret;
+ return regmap_write(rdev->regmap, regulator->disable_reg,
+ rdev->desc->enable_mask);
}
static const struct regulator_ops hi655x_regulator_ops = {
@@ -132,7 +123,6 @@ static const struct regulator_ops hi655x_ldo_linear_ops = {
}, \
.disable_reg = HI655X_BUS_ADDR(dreg), \
.status_reg = HI655X_BUS_ADDR(sreg), \
- .ctrl_mask = cmask, \
}
#define HI655X_LDO_LINEAR(_ID, vreg, vmask, ereg, dreg, \
@@ -155,10 +145,9 @@ static const struct regulator_ops hi655x_ldo_linear_ops = {
}, \
.disable_reg = HI655X_BUS_ADDR(dreg), \
.status_reg = HI655X_BUS_ADDR(sreg), \
- .ctrl_mask = cmask, \
}
-static struct hi655x_regulator regulators[] = {
+static const struct hi655x_regulator regulators[] = {
HI655X_LDO_LINEAR(LDO2, 0x72, 0x07, 0x29, 0x2a, 0x2b, 0x01,
2500000, 8, 100000),
HI655X_LDO(LDO7, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x06, ldo7_voltages),
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index c876e161052a..e02fdd1dd092 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -48,7 +48,7 @@ static const int ldo_cont_enable_time[] = {
static int lm363x_regulator_enable_time(struct regulator_dev *rdev)
{
enum lm363x_regulator_id id = rdev_get_id(rdev);
- u8 val, addr, mask;
+ unsigned int val, addr, mask;
switch (id) {
case LM3631_LDO_CONT:
@@ -71,7 +71,7 @@ static int lm363x_regulator_enable_time(struct regulator_dev *rdev)
return 0;
}
- if (regmap_read(rdev->regmap, addr, (unsigned int *)&val))
+ if (regmap_read(rdev->regmap, addr, &val))
return -EINVAL;
val = (val & mask) >> LM3631_ENTIME_SHIFT;
@@ -82,13 +82,13 @@ static int lm363x_regulator_enable_time(struct regulator_dev *rdev)
return ENABLE_TIME_USEC * val;
}
-static struct regulator_ops lm363x_boost_voltage_table_ops = {
+static const struct regulator_ops lm363x_boost_voltage_table_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct regulator_ops lm363x_regulator_voltage_table_ops = {
+static const struct regulator_ops lm363x_regulator_voltage_table_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 14fd38807134..2e16a6ab491d 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -372,10 +372,13 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if ((flag0 & (0x4 << icnt))
&& (pchip->irqmask & (0x04 << icnt))
- && (pchip->rdev[icnt] != NULL))
+ && (pchip->rdev[icnt] != NULL)) {
+ regulator_lock(pchip->rdev[icnt]);
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_PWR_FAULT,
NULL);
+ regulator_unlock(pchip->rdev[icnt]);
+ }
/* read flag1 register */
ret = lp8755_read(pchip, 0x0E, &flag1);
@@ -389,18 +392,24 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
/* send OCP event to all regulator devices */
if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
- if (pchip->rdev[icnt] != NULL)
+ if (pchip->rdev[icnt] != NULL) {
+ regulator_lock(pchip->rdev[icnt]);
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_OCP,
NULL);
+ regulator_unlock(pchip->rdev[icnt]);
+ }
/* send OVP event to all regulator devices */
if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
- if (pchip->rdev[icnt] != NULL)
+ if (pchip->rdev[icnt] != NULL) {
+ regulator_lock(pchip->rdev[icnt]);
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_OVP,
NULL);
+ regulator_unlock(pchip->rdev[icnt]);
+ }
return IRQ_HANDLED;
err_i2c:
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index 4ed41731a5b1..81eb4b890c0c 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -34,6 +34,10 @@
.ramp_delay = _delay, \
.linear_ranges = _lr, \
.n_linear_ranges = ARRAY_SIZE(_lr), \
+ .curr_table = lp87565_buck_uA, \
+ .n_current_limits = ARRAY_SIZE(lp87565_buck_uA),\
+ .csel_reg = (_cr), \
+ .csel_mask = LP87565_BUCK_CTRL_2_ILIM, \
}, \
.ctrl2_reg = _cr, \
}
@@ -102,44 +106,7 @@ static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
return 0;
}
-static int lp87565_buck_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- int id = rdev_get_id(rdev);
- struct lp87565 *lp87565 = rdev_get_drvdata(rdev);
- int i;
-
- for (i = ARRAY_SIZE(lp87565_buck_uA) - 1; i >= 0; i--) {
- if (lp87565_buck_uA[i] >= min_uA &&
- lp87565_buck_uA[i] <= max_uA)
- return regmap_update_bits(lp87565->regmap,
- regulators[id].ctrl2_reg,
- LP87565_BUCK_CTRL_2_ILIM,
- i << __ffs(LP87565_BUCK_CTRL_2_ILIM));
- }
-
- return -EINVAL;
-}
-
-static int lp87565_buck_get_current_limit(struct regulator_dev *rdev)
-{
- int id = rdev_get_id(rdev);
- struct lp87565 *lp87565 = rdev_get_drvdata(rdev);
- int ret;
- unsigned int val;
-
- ret = regmap_read(lp87565->regmap, regulators[id].ctrl2_reg, &val);
- if (ret)
- return ret;
-
- val = (val & LP87565_BUCK_CTRL_2_ILIM) >>
- __ffs(LP87565_BUCK_CTRL_2_ILIM);
-
- return (val < ARRAY_SIZE(lp87565_buck_uA)) ?
- lp87565_buck_uA[val] : -EINVAL;
-}
-
-/* Operations permitted on BUCK0, BUCK1 */
+/* Operations permitted on BUCKs */
static const struct regulator_ops lp87565_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -150,8 +117,8 @@ static const struct regulator_ops lp87565_buck_ops = {
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = lp87565_buck_set_ramp_delay,
- .set_current_limit = lp87565_buck_set_current_limit,
- .get_current_limit = lp87565_buck_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static const struct lp87565_regulator regulators[] = {
@@ -193,7 +160,7 @@ static int lp87565_regulator_probe(struct platform_device *pdev)
struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
- int i, min_idx = LP87565_BUCK_1, max_idx = LP87565_BUCK_3;
+ int i, min_idx = LP87565_BUCK_0, max_idx = LP87565_BUCK_3;
platform_set_drvdata(pdev, lp87565);
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 63f724f260ef..9a037fdc5fc5 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -1,21 +1,9 @@
-/*
- * Linear Technology LTC3589,LTC3589-1 regulator support
- *
- * Copyright (c) 2014 Philipp Zabel <p.zabel@pengutronix.de>, Pengutronix
- *
- * See file CREDITS for list of people who contributed to this
- * project.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Linear Technology LTC3589,LTC3589-1 regulator support
+//
+// Copyright (c) 2014 Philipp Zabel <p.zabel@pengutronix.de>, Pengutronix
+
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -84,19 +72,11 @@ enum ltc3589_reg {
LTC3589_NUM_REGULATORS,
};
-struct ltc3589_regulator {
- struct regulator_desc desc;
-
- /* External feedback voltage divider */
- unsigned int r1;
- unsigned int r2;
-};
-
struct ltc3589 {
struct regmap *regmap;
struct device *dev;
enum ltc3589_variant variant;
- struct ltc3589_regulator regulator_descs[LTC3589_NUM_REGULATORS];
+ struct regulator_desc regulator_descs[LTC3589_NUM_REGULATORS];
struct regulator_dev *regulators[LTC3589_NUM_REGULATORS];
};
@@ -196,131 +176,90 @@ static const struct regulator_ops ltc3589_table_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
+static inline unsigned int ltc3589_scale(unsigned int uV, u32 r1, u32 r2)
+{
+ uint64_t tmp;
-#define LTC3589_REG(_name, _ops, en_bit, dtv1_reg, dtv_mask, go_bit) \
- [LTC3589_ ## _name] = { \
- .desc = { \
- .name = #_name, \
- .n_voltages = (dtv_mask) + 1, \
- .min_uV = (go_bit) ? 362500 : 0, \
- .uV_step = (go_bit) ? 12500 : 0, \
- .ramp_delay = (go_bit) ? 1750 : 0, \
- .fixed_uV = (dtv_mask) ? 0 : 800000, \
- .ops = &ltc3589_ ## _ops ## _regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = LTC3589_ ## _name, \
- .owner = THIS_MODULE, \
- .vsel_reg = (dtv1_reg), \
- .vsel_mask = (dtv_mask), \
- .apply_reg = (go_bit) ? LTC3589_VCCR : 0, \
- .apply_bit = (go_bit), \
- .enable_reg = (en_bit) ? LTC3589_OVEN : 0, \
- .enable_mask = (en_bit), \
- }, \
- }
-
-#define LTC3589_LINEAR_REG(_name, _dtv1) \
- LTC3589_REG(_name, linear, LTC3589_OVEN_ ## _name, \
- LTC3589_ ## _dtv1, 0x1f, \
- LTC3589_VCCR_ ## _name ## _GO)
-
-#define LTC3589_FIXED_REG(_name) \
- LTC3589_REG(_name, fixed, LTC3589_OVEN_ ## _name, 0, 0, 0)
-
-static struct ltc3589_regulator ltc3589_regulators[LTC3589_NUM_REGULATORS] = {
- LTC3589_LINEAR_REG(SW1, B1DTV1),
- LTC3589_LINEAR_REG(SW2, B2DTV1),
- LTC3589_LINEAR_REG(SW3, B3DTV1),
- LTC3589_FIXED_REG(BB_OUT),
- LTC3589_REG(LDO1, fixed_standby, 0, 0, 0, 0),
- LTC3589_LINEAR_REG(LDO2, L2DTV1),
- LTC3589_FIXED_REG(LDO3),
- LTC3589_REG(LDO4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2, 0x60, 0),
-};
+ if (uV == 0)
+ return 0;
-#ifdef CONFIG_OF
-static struct of_regulator_match ltc3589_matches[LTC3589_NUM_REGULATORS] = {
- { .name = "sw1", },
- { .name = "sw2", },
- { .name = "sw3", },
- { .name = "bb-out", },
- { .name = "ldo1", }, /* standby */
- { .name = "ldo2", },
- { .name = "ldo3", },
- { .name = "ldo4", },
-};
+ tmp = (uint64_t)uV * r1;
+ do_div(tmp, r2);
+ return uV + (unsigned int)tmp;
+}
-static int ltc3589_parse_regulators_dt(struct ltc3589 *ltc3589)
+static int ltc3589_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
{
- struct device *dev = ltc3589->dev;
- struct device_node *node;
- int i, ret;
+ struct ltc3589 *ltc3589 = config->driver_data;
+ struct regulator_desc *rdesc = &ltc3589->regulator_descs[desc->id];
+ u32 r[2];
+ int ret;
- node = of_get_child_by_name(dev->of_node, "regulators");
- if (!node) {
- dev_err(dev, "regulators node not found\n");
- return -EINVAL;
- }
+ /* Parse feedback voltage dividers. LDO3 and LDO4 don't have them */
+ if (desc->id >= LTC3589_LDO3)
+ return 0;
- ret = of_regulator_match(dev, node, ltc3589_matches,
- ARRAY_SIZE(ltc3589_matches));
- of_node_put(node);
- if (ret < 0) {
- dev_err(dev, "Error parsing regulator init data: %d\n", ret);
- return ret;
- }
- if (ret != LTC3589_NUM_REGULATORS) {
- dev_err(dev, "Only %d regulators described in device tree\n",
+ ret = of_property_read_u32_array(np, "lltc,fb-voltage-divider", r, 2);
+ if (ret) {
+ dev_err(ltc3589->dev, "Failed to parse voltage divider: %d\n",
ret);
- return -EINVAL;
+ return ret;
}
- /* Parse feedback voltage dividers. LDO3 and LDO4 don't have them */
- for (i = 0; i < LTC3589_LDO3; i++) {
- struct ltc3589_regulator *desc = &ltc3589->regulator_descs[i];
- struct device_node *np = ltc3589_matches[i].of_node;
- u32 vdiv[2];
-
- ret = of_property_read_u32_array(np, "lltc,fb-voltage-divider",
- vdiv, 2);
- if (ret) {
- dev_err(dev, "Failed to parse voltage divider: %d\n",
- ret);
- return ret;
- }
+ if (!r[0] || !r[1])
+ return 0;
- desc->r1 = vdiv[0];
- desc->r2 = vdiv[1];
- }
+ rdesc->min_uV = ltc3589_scale(desc->min_uV, r[0], r[1]);
+ rdesc->uV_step = ltc3589_scale(desc->uV_step, r[0], r[1]);
+ rdesc->fixed_uV = ltc3589_scale(desc->fixed_uV, r[0], r[1]);
return 0;
}
-static inline struct regulator_init_data *match_init_data(int index)
-{
- return ltc3589_matches[index].init_data;
-}
-
-static inline struct device_node *match_of_node(int index)
-{
- return ltc3589_matches[index].of_node;
-}
-#else
-static inline int ltc3589_parse_regulators_dt(struct ltc3589 *ltc3589)
-{
- return 0;
-}
+#define LTC3589_REG(_name, _of_name, _ops, en_bit, dtv1_reg, dtv_mask, go_bit)\
+ [LTC3589_ ## _name] = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(#_of_name), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .of_parse_cb = ltc3589_of_parse_cb, \
+ .n_voltages = (dtv_mask) + 1, \
+ .min_uV = (go_bit) ? 362500 : 0, \
+ .uV_step = (go_bit) ? 12500 : 0, \
+ .ramp_delay = (go_bit) ? 1750 : 0, \
+ .fixed_uV = (dtv_mask) ? 0 : 800000, \
+ .ops = &ltc3589_ ## _ops ## _regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = LTC3589_ ## _name, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = (dtv1_reg), \
+ .vsel_mask = (dtv_mask), \
+ .apply_reg = (go_bit) ? LTC3589_VCCR : 0, \
+ .apply_bit = (go_bit), \
+ .enable_reg = (en_bit) ? LTC3589_OVEN : 0, \
+ .enable_mask = (en_bit), \
+ }
-static inline struct regulator_init_data *match_init_data(int index)
-{
- return NULL;
-}
+#define LTC3589_LINEAR_REG(_name, _of_name, _dtv1) \
+ LTC3589_REG(_name, _of_name, linear, LTC3589_OVEN_ ## _name, \
+ LTC3589_ ## _dtv1, 0x1f, \
+ LTC3589_VCCR_ ## _name ## _GO)
-static inline struct device_node *match_of_node(int index)
-{
- return NULL;
-}
-#endif
+#define LTC3589_FIXED_REG(_name, _of_name) \
+ LTC3589_REG(_name, _of_name, fixed, LTC3589_OVEN_ ## _name, 0, 0, 0)
+
+static const struct regulator_desc ltc3589_regulators[] = {
+ LTC3589_LINEAR_REG(SW1, sw1, B1DTV1),
+ LTC3589_LINEAR_REG(SW2, sw2, B2DTV1),
+ LTC3589_LINEAR_REG(SW3, sw3, B3DTV1),
+ LTC3589_FIXED_REG(BB_OUT, bb-out),
+ LTC3589_REG(LDO1, ldo1, fixed_standby, 0, 0, 0, 0),
+ LTC3589_LINEAR_REG(LDO2, ldo2, L2DTV1),
+ LTC3589_FIXED_REG(LDO3, ldo3),
+ LTC3589_REG(LDO4, ldo4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2,
+ 0x60, 0),
+};
static bool ltc3589_writeable_reg(struct device *dev, unsigned int reg)
{
@@ -409,7 +348,6 @@ static const struct regmap_config ltc3589_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-
static irqreturn_t ltc3589_isr(int irq, void *dev_id)
{
struct ltc3589 *ltc3589 = dev_id;
@@ -419,16 +357,22 @@ static irqreturn_t ltc3589_isr(int irq, void *dev_id)
if (irqstat & LTC3589_IRQSTAT_THERMAL_WARN) {
event = REGULATOR_EVENT_OVER_TEMP;
- for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
+ for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
+ regulator_lock(ltc3589->regulators[i]);
regulator_notifier_call_chain(ltc3589->regulators[i],
event, NULL);
+ regulator_unlock(ltc3589->regulators[i]);
+ }
}
if (irqstat & LTC3589_IRQSTAT_UNDERVOLT_WARN) {
event = REGULATOR_EVENT_UNDER_VOLTAGE;
- for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
+ for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
+ regulator_lock(ltc3589->regulators[i]);
regulator_notifier_call_chain(ltc3589->regulators[i],
event, NULL);
+ regulator_unlock(ltc3589->regulators[i]);
+ }
}
/* Clear warning condition */
@@ -437,33 +381,11 @@ static irqreturn_t ltc3589_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static inline unsigned int ltc3589_scale(unsigned int uV, u32 r1, u32 r2)
-{
- uint64_t tmp;
- if (uV == 0)
- return 0;
- tmp = (uint64_t)uV * r1;
- do_div(tmp, r2);
- return uV + (unsigned int)tmp;
-}
-
-static void ltc3589_apply_fb_voltage_divider(struct ltc3589_regulator *rdesc)
-{
- struct regulator_desc *desc = &rdesc->desc;
-
- if (!rdesc->r1 || !rdesc->r2)
- return;
-
- desc->min_uV = ltc3589_scale(desc->min_uV, rdesc->r1, rdesc->r2);
- desc->uV_step = ltc3589_scale(desc->uV_step, rdesc->r1, rdesc->r2);
- desc->fixed_uV = ltc3589_scale(desc->fixed_uV, rdesc->r1, rdesc->r2);
-}
-
static int ltc3589_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- struct ltc3589_regulator *descs;
+ struct regulator_desc *descs;
struct ltc3589 *ltc3589;
int i, ret;
@@ -482,11 +404,11 @@ static int ltc3589_probe(struct i2c_client *client,
descs = ltc3589->regulator_descs;
memcpy(descs, ltc3589_regulators, sizeof(ltc3589_regulators));
if (ltc3589->variant == LTC3589) {
- descs[LTC3589_LDO3].desc.fixed_uV = 1800000;
- descs[LTC3589_LDO4].desc.volt_table = ltc3589_ldo4;
+ descs[LTC3589_LDO3].fixed_uV = 1800000;
+ descs[LTC3589_LDO4].volt_table = ltc3589_ldo4;
} else {
- descs[LTC3589_LDO3].desc.fixed_uV = 2800000;
- descs[LTC3589_LDO4].desc.volt_table = ltc3589_12_ldo4;
+ descs[LTC3589_LDO3].fixed_uV = 2800000;
+ descs[LTC3589_LDO4].volt_table = ltc3589_12_ldo4;
}
ltc3589->regmap = devm_regmap_init_i2c(client, &ltc3589_regmap_config);
@@ -496,25 +418,12 @@ static int ltc3589_probe(struct i2c_client *client,
return ret;
}
- ret = ltc3589_parse_regulators_dt(ltc3589);
- if (ret)
- return ret;
-
for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
- struct ltc3589_regulator *rdesc = &ltc3589->regulator_descs[i];
- struct regulator_desc *desc = &rdesc->desc;
- struct regulator_init_data *init_data;
+ struct regulator_desc *desc = &ltc3589->regulator_descs[i];
struct regulator_config config = { };
- init_data = match_init_data(i);
-
- if (i < LTC3589_LDO3)
- ltc3589_apply_fb_voltage_divider(rdesc);
-
config.dev = dev;
- config.init_data = init_data;
config.driver_data = ltc3589;
- config.of_node = match_of_node(i);
ltc3589->regulators[i] = devm_regulator_register(dev, desc,
&config);
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index e6d66e492b85..4be90c78c720 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -285,17 +285,23 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id)
if (irqstat & LTC3676_IRQSTAT_THERMAL_WARN) {
dev_warn(dev, "Over-temperature Warning\n");
event = REGULATOR_EVENT_OVER_TEMP;
- for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
+ for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
+ regulator_lock(ltc3676->regulators[i]);
regulator_notifier_call_chain(ltc3676->regulators[i],
event, NULL);
+ regulator_unlock(ltc3676->regulators[i]);
+ }
}
if (irqstat & LTC3676_IRQSTAT_UNDERVOLT_WARN) {
dev_info(dev, "Undervoltage Warning\n");
event = REGULATOR_EVENT_UNDER_VOLTAGE;
- for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
+ for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
+ regulator_lock(ltc3676->regulators[i]);
regulator_notifier_call_chain(ltc3676->regulators[i],
event, NULL);
+ regulator_unlock(ltc3676->regulators[i]);
+ }
}
/* Clear warning condition */
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index 85a88a9e4d42..07a150c9bbf2 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -155,58 +155,6 @@ static const struct regulator_desc max77836_supported_regulators[] = {
[MAX77836_LDO2] = MAX77836_LDO_REG(2),
};
-#ifdef CONFIG_OF
-static struct of_regulator_match max14577_regulator_matches[] = {
- { .name = "SAFEOUT", },
- { .name = "CHARGER", },
-};
-
-static struct of_regulator_match max77836_regulator_matches[] = {
- { .name = "SAFEOUT", },
- { .name = "CHARGER", },
- { .name = "LDO1", },
- { .name = "LDO2", },
-};
-
-static inline struct regulator_init_data *match_init_data(int index,
- enum maxim_device_type dev_type)
-{
- switch (dev_type) {
- case MAXIM_DEVICE_TYPE_MAX77836:
- return max77836_regulator_matches[index].init_data;
-
- case MAXIM_DEVICE_TYPE_MAX14577:
- default:
- return max14577_regulator_matches[index].init_data;
- }
-}
-
-static inline struct device_node *match_of_node(int index,
- enum maxim_device_type dev_type)
-{
- switch (dev_type) {
- case MAXIM_DEVICE_TYPE_MAX77836:
- return max77836_regulator_matches[index].of_node;
-
- case MAXIM_DEVICE_TYPE_MAX14577:
- default:
- return max14577_regulator_matches[index].of_node;
- }
-}
-#else /* CONFIG_OF */
-static inline struct regulator_init_data *match_init_data(int index,
- enum maxim_device_type dev_type)
-{
- return NULL;
-}
-
-static inline struct device_node *match_of_node(int index,
- enum maxim_device_type dev_type)
-{
- return NULL;
-}
-#endif /* CONFIG_OF */
-
/**
* Registers for regulators of max77836 use different I2C slave addresses so
* different regmaps must be used for them.
@@ -265,9 +213,6 @@ static int max14577_regulator_probe(struct platform_device *pdev)
if (pdata && pdata->regulators) {
config.init_data = pdata->regulators[i].initdata;
config.of_node = pdata->regulators[i].of_node;
- } else {
- config.init_data = match_init_data(i, dev_type);
- config.of_node = match_of_node(i, dev_type);
}
config.regmap = max14577_get_regmap(max14577,
supported_regulators[i].id);
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 1607ac673e44..0ad91a7f9cb9 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -803,7 +803,7 @@ static int max77620_regulator_probe(struct platform_device *pdev)
continue;
rdesc = &rinfo[id].desc;
- pmic->rinfo[id] = &max77620_regs_info[id];
+ pmic->rinfo[id] = &rinfo[id];
pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
pmic->reg_pdata[id].active_fps_src = -1;
pmic->reg_pdata[id].active_fps_pd_slot = -1;
diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c
index 31ebf34b01ec..5c4f86c98510 100644
--- a/drivers/regulator/max77650-regulator.c
+++ b/drivers/regulator/max77650-regulator.c
@@ -41,7 +41,7 @@ struct max77650_regulator_desc {
unsigned int regB;
};
-static const u32 max77651_sbb1_regulator_volt_table[] = {
+static const unsigned int max77651_sbb1_regulator_volt_table[] = {
2400000, 3200000, 4000000, 4800000,
2450000, 3250000, 4050000, 4850000,
2500000, 3300000, 4100000, 4900000,
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 39b63ddefeb2..aed6727982cd 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -159,6 +159,8 @@ static const struct regulator_ops max8925_regulator_ldo_ops = {
{ \
.desc = { \
.name = "SDV" #_id, \
+ .of_match = of_match_ptr("SDV" #_id), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &max8925_regulator_sdv_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MAX8925_ID_SD##_id, \
@@ -175,6 +177,8 @@ static const struct regulator_ops max8925_regulator_ldo_ops = {
{ \
.desc = { \
.name = "LDO" #_id, \
+ .of_match = of_match_ptr("LDO" #_id), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &max8925_regulator_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MAX8925_ID_LDO##_id, \
@@ -187,34 +191,6 @@ static const struct regulator_ops max8925_regulator_ldo_ops = {
.enable_reg = MAX8925_LDOCTL##_id, \
}
-#ifdef CONFIG_OF
-static struct of_regulator_match max8925_regulator_matches[] = {
- { .name = "SDV1",},
- { .name = "SDV2",},
- { .name = "SDV3",},
- { .name = "LDO1",},
- { .name = "LDO2",},
- { .name = "LDO3",},
- { .name = "LDO4",},
- { .name = "LDO5",},
- { .name = "LDO6",},
- { .name = "LDO7",},
- { .name = "LDO8",},
- { .name = "LDO9",},
- { .name = "LDO10",},
- { .name = "LDO11",},
- { .name = "LDO12",},
- { .name = "LDO13",},
- { .name = "LDO14",},
- { .name = "LDO15",},
- { .name = "LDO16",},
- { .name = "LDO17",},
- { .name = "LDO18",},
- { .name = "LDO19",},
- { .name = "LDO20",},
-};
-#endif
-
static struct max8925_regulator_info max8925_regulator_info[] = {
MAX8925_SDV(1, 637.5, 1425, 12.5),
MAX8925_SDV(2, 650, 2225, 25),
@@ -242,37 +218,6 @@ static struct max8925_regulator_info max8925_regulator_info[] = {
MAX8925_LDO(20, 750, 3900, 50),
};
-#ifdef CONFIG_OF
-static int max8925_regulator_dt_init(struct platform_device *pdev,
- struct regulator_config *config,
- int ridx)
-{
- struct device_node *nproot, *np;
- int rcount;
-
- nproot = pdev->dev.parent->of_node;
- if (!nproot)
- return -ENODEV;
- np = of_get_child_by_name(nproot, "regulators");
- if (!np) {
- dev_err(&pdev->dev, "failed to find regulators node\n");
- return -ENODEV;
- }
-
- rcount = of_regulator_match(&pdev->dev, np,
- &max8925_regulator_matches[ridx], 1);
- of_node_put(np);
- if (rcount < 0)
- return rcount;
- config->init_data = max8925_regulator_matches[ridx].init_data;
- config->of_node = max8925_regulator_matches[ridx].of_node;
-
- return 0;
-}
-#else
-#define max8925_regulator_dt_init(x, y, z) (-1)
-#endif
-
static int max8925_regulator_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -281,7 +226,7 @@ static int max8925_regulator_probe(struct platform_device *pdev)
struct max8925_regulator_info *ri;
struct resource *res;
struct regulator_dev *rdev;
- int i, regulator_idx;
+ int i;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (!res) {
@@ -290,10 +235,8 @@ static int max8925_regulator_probe(struct platform_device *pdev)
}
for (i = 0; i < ARRAY_SIZE(max8925_regulator_info); i++) {
ri = &max8925_regulator_info[i];
- if (ri->vol_reg == res->start) {
- regulator_idx = i;
+ if (ri->vol_reg == res->start)
break;
- }
}
if (i == ARRAY_SIZE(max8925_regulator_info)) {
@@ -303,12 +246,11 @@ static int max8925_regulator_probe(struct platform_device *pdev)
}
ri->i2c = chip->i2c;
- config.dev = &pdev->dev;
+ config.dev = chip->dev;
config.driver_data = ri;
- if (max8925_regulator_dt_init(pdev, &config, regulator_idx))
- if (pdata)
- config.init_data = pdata;
+ if (pdata)
+ config.init_data = pdata;
rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 271bb736f3f5..60599c3bb845 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -33,72 +33,6 @@ struct max8998_data {
unsigned int buck2_idx;
};
-struct voltage_map_desc {
- int min;
- int max;
- int step;
-};
-
-/* Voltage maps in uV*/
-static const struct voltage_map_desc ldo23_voltage_map_desc = {
- .min = 800000, .step = 50000, .max = 1300000,
-};
-static const struct voltage_map_desc ldo456711_voltage_map_desc = {
- .min = 1600000, .step = 100000, .max = 3600000,
-};
-static const struct voltage_map_desc ldo8_voltage_map_desc = {
- .min = 3000000, .step = 100000, .max = 3600000,
-};
-static const struct voltage_map_desc ldo9_voltage_map_desc = {
- .min = 2800000, .step = 100000, .max = 3100000,
-};
-static const struct voltage_map_desc ldo10_voltage_map_desc = {
- .min = 950000, .step = 50000, .max = 1300000,
-};
-static const struct voltage_map_desc ldo1213_voltage_map_desc = {
- .min = 800000, .step = 100000, .max = 3300000,
-};
-static const struct voltage_map_desc ldo1415_voltage_map_desc = {
- .min = 1200000, .step = 100000, .max = 3300000,
-};
-static const struct voltage_map_desc ldo1617_voltage_map_desc = {
- .min = 1600000, .step = 100000, .max = 3600000,
-};
-static const struct voltage_map_desc buck12_voltage_map_desc = {
- .min = 750000, .step = 25000, .max = 1525000,
-};
-static const struct voltage_map_desc buck3_voltage_map_desc = {
- .min = 1600000, .step = 100000, .max = 3600000,
-};
-static const struct voltage_map_desc buck4_voltage_map_desc = {
- .min = 800000, .step = 100000, .max = 2300000,
-};
-
-static const struct voltage_map_desc *ldo_voltage_map[] = {
- NULL,
- NULL,
- &ldo23_voltage_map_desc, /* LDO2 */
- &ldo23_voltage_map_desc, /* LDO3 */
- &ldo456711_voltage_map_desc, /* LDO4 */
- &ldo456711_voltage_map_desc, /* LDO5 */
- &ldo456711_voltage_map_desc, /* LDO6 */
- &ldo456711_voltage_map_desc, /* LDO7 */
- &ldo8_voltage_map_desc, /* LDO8 */
- &ldo9_voltage_map_desc, /* LDO9 */
- &ldo10_voltage_map_desc, /* LDO10 */
- &ldo456711_voltage_map_desc, /* LDO11 */
- &ldo1213_voltage_map_desc, /* LDO12 */
- &ldo1213_voltage_map_desc, /* LDO13 */
- &ldo1415_voltage_map_desc, /* LDO14 */
- &ldo1415_voltage_map_desc, /* LDO15 */
- &ldo1617_voltage_map_desc, /* LDO16 */
- &ldo1617_voltage_map_desc, /* LDO17 */
- &buck12_voltage_map_desc, /* BUCK1 */
- &buck12_voltage_map_desc, /* BUCK2 */
- &buck3_voltage_map_desc, /* BUCK3 */
- &buck4_voltage_map_desc, /* BUCK4 */
-};
-
static int max8998_get_enable_register(struct regulator_dev *rdev,
int *reg, int *shift)
{
@@ -400,7 +334,6 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8998->iodev->i2c;
- const struct voltage_map_desc *desc;
int buck = rdev_get_id(rdev);
u8 val = 0;
int difference, ret;
@@ -408,8 +341,6 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
if (buck < MAX8998_BUCK1 || buck > MAX8998_BUCK4)
return -EINVAL;
- desc = ldo_voltage_map[buck];
-
/* Voltage stabilization */
ret = max8998_read_reg(i2c, MAX8998_REG_ONOFF4, &val);
if (ret)
@@ -420,14 +351,14 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
return 0;
- difference = (new_selector - old_selector) * desc->step / 1000;
+ difference = (new_selector - old_selector) * rdev->desc->uV_step / 1000;
if (difference > 0)
return DIV_ROUND_UP(difference, (val & 0x0f) + 1);
return 0;
}
-static struct regulator_ops max8998_ldo_ops = {
+static const struct regulator_ops max8998_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = max8998_ldo_is_enabled,
@@ -437,7 +368,7 @@ static struct regulator_ops max8998_ldo_ops = {
.set_voltage_sel = max8998_set_voltage_ldo_sel,
};
-static struct regulator_ops max8998_buck_ops = {
+static const struct regulator_ops max8998_buck_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = max8998_ldo_is_enabled,
@@ -448,164 +379,59 @@ static struct regulator_ops max8998_buck_ops = {
.set_voltage_time_sel = max8998_set_voltage_buck_time_sel,
};
-static struct regulator_ops max8998_others_ops = {
+static const struct regulator_ops max8998_others_ops = {
.is_enabled = max8998_ldo_is_enabled,
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
};
-static struct regulator_desc regulators[] = {
- {
- .name = "LDO2",
- .id = MAX8998_LDO2,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO3",
- .id = MAX8998_LDO3,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO4",
- .id = MAX8998_LDO4,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO5",
- .id = MAX8998_LDO5,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO6",
- .id = MAX8998_LDO6,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO7",
- .id = MAX8998_LDO7,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO8",
- .id = MAX8998_LDO8,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO9",
- .id = MAX8998_LDO9,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO10",
- .id = MAX8998_LDO10,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO11",
- .id = MAX8998_LDO11,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO12",
- .id = MAX8998_LDO12,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO13",
- .id = MAX8998_LDO13,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO14",
- .id = MAX8998_LDO14,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO15",
- .id = MAX8998_LDO15,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO16",
- .id = MAX8998_LDO16,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO17",
- .id = MAX8998_LDO17,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "BUCK1",
- .id = MAX8998_BUCK1,
- .ops = &max8998_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "BUCK2",
- .id = MAX8998_BUCK2,
- .ops = &max8998_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "BUCK3",
- .id = MAX8998_BUCK3,
- .ops = &max8998_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "BUCK4",
- .id = MAX8998_BUCK4,
- .ops = &max8998_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "EN32KHz-AP",
- .id = MAX8998_EN32KHZ_AP,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "EN32KHz-CP",
- .id = MAX8998_EN32KHZ_CP,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "ENVICHG",
- .id = MAX8998_ENVICHG,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "ESAFEOUT1",
- .id = MAX8998_ESAFEOUT1,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "ESAFEOUT2",
- .id = MAX8998_ESAFEOUT2,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
+#define MAX8998_LINEAR_REG(_name, _ops, _min, _step, _max) \
+ { \
+ .name = #_name, \
+ .id = MAX8998_##_name, \
+ .ops = _ops, \
+ .min_uV = (_min), \
+ .uV_step = (_step), \
+ .n_voltages = ((_max) - (_min)) / (_step) + 1, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
}
+
+#define MAX8998_OTHERS_REG(_name, _id) \
+ { \
+ .name = #_name, \
+ .id = _id, \
+ .ops = &max8998_others_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+static const struct regulator_desc regulators[] = {
+ MAX8998_LINEAR_REG(LDO2, &max8998_ldo_ops, 800000, 50000, 1300000),
+ MAX8998_LINEAR_REG(LDO3, &max8998_ldo_ops, 800000, 50000, 1300000),
+ MAX8998_LINEAR_REG(LDO4, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO5, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO6, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO7, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO8, &max8998_ldo_ops, 3000000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO9, &max8998_ldo_ops, 2800000, 100000, 3100000),
+ MAX8998_LINEAR_REG(LDO10, &max8998_ldo_ops, 950000, 50000, 1300000),
+ MAX8998_LINEAR_REG(LDO11, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO12, &max8998_ldo_ops, 800000, 100000, 3300000),
+ MAX8998_LINEAR_REG(LDO13, &max8998_ldo_ops, 800000, 100000, 3300000),
+ MAX8998_LINEAR_REG(LDO14, &max8998_ldo_ops, 1200000, 100000, 3300000),
+ MAX8998_LINEAR_REG(LDO15, &max8998_ldo_ops, 1200000, 100000, 3300000),
+ MAX8998_LINEAR_REG(LDO16, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO17, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(BUCK1, &max8998_buck_ops, 750000, 25000, 1525000),
+ MAX8998_LINEAR_REG(BUCK2, &max8998_buck_ops, 750000, 25000, 1525000),
+ MAX8998_LINEAR_REG(BUCK3, &max8998_buck_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(BUCK4, &max8998_buck_ops, 800000, 100000, 2300000),
+ MAX8998_OTHERS_REG(EN32KHz-AP, MAX8998_EN32KHZ_AP),
+ MAX8998_OTHERS_REG(EN32KHz-CP, MAX8998_EN32KHZ_CP),
+ MAX8998_OTHERS_REG(ENVICHG, MAX8998_ENVICHG),
+ MAX8998_OTHERS_REG(ESAFEOUT1, MAX8998_ESAFEOUT1),
+ MAX8998_OTHERS_REG(ESAFEOUT2, MAX8998_ESAFEOUT2),
};
static int max8998_pmic_dt_parse_dvs_gpio(struct max8998_dev *iodev,
@@ -796,9 +622,11 @@ static int max8998_pmic_probe(struct platform_device *pdev)
/* Set predefined values for BUCK1 registers */
for (v = 0; v < ARRAY_SIZE(pdata->buck1_voltage); ++v) {
+ int index = MAX8998_BUCK1 - MAX8998_LDO2;
+
i = 0;
- while (buck12_voltage_map_desc.min +
- buck12_voltage_map_desc.step*i
+ while (regulators[index].min_uV +
+ regulators[index].uV_step * i
< pdata->buck1_voltage[v])
i++;
@@ -824,9 +652,11 @@ static int max8998_pmic_probe(struct platform_device *pdev)
/* Set predefined values for BUCK2 registers */
for (v = 0; v < ARRAY_SIZE(pdata->buck2_voltage); ++v) {
+ int index = MAX8998_BUCK2 - MAX8998_LDO2;
+
i = 0;
- while (buck12_voltage_map_desc.min +
- buck12_voltage_map_desc.step*i
+ while (regulators[index].min_uV +
+ regulators[index].uV_step * i
< pdata->buck2_voltage[v])
i++;
@@ -839,18 +669,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
}
for (i = 0; i < pdata->num_regulators; i++) {
- const struct voltage_map_desc *desc;
- int id = pdata->regulators[i].id;
- int index = id - MAX8998_LDO2;
-
- desc = ldo_voltage_map[id];
- if (desc && regulators[index].ops != &max8998_others_ops) {
- int count = (desc->max - desc->min) / desc->step + 1;
-
- regulators[index].n_voltages = count;
- regulators[index].min_uV = desc->min;
- regulators[index].uV_step = desc->step;
- }
+ int index = pdata->regulators[i].id - MAX8998_LDO2;
config.dev = max8998->dev;
config.of_node = pdata->regulators[i].reg_node;
@@ -867,7 +686,6 @@ static int max8998_pmic_probe(struct platform_device *pdev)
}
}
-
return 0;
}
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 3a8004abe044..e5a02711cb46 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -119,8 +119,6 @@ enum {
* @lpm: LPM GPIO descriptor
*/
struct mcp16502 {
- struct regulator_dev *rdev[NUM_REGULATORS];
- struct regmap *rmap;
struct gpio_desc *lpm;
};
@@ -179,13 +177,12 @@ static unsigned int mcp16502_get_mode(struct regulator_dev *rdev)
{
unsigned int val;
int ret, reg;
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
reg = mcp16502_get_reg(rdev, MCP16502_OPMODE_ACTIVE);
if (reg < 0)
return reg;
- ret = regmap_read(mcp->rmap, reg, &val);
+ ret = regmap_read(rdev->regmap, reg, &val);
if (ret)
return ret;
@@ -211,7 +208,6 @@ static int _mcp16502_set_mode(struct regulator_dev *rdev, unsigned int mode,
{
int val;
int reg;
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
reg = mcp16502_get_reg(rdev, op_mode);
if (reg < 0)
@@ -228,7 +224,7 @@ static int _mcp16502_set_mode(struct regulator_dev *rdev, unsigned int mode,
return -EINVAL;
}
- reg = regmap_update_bits(mcp->rmap, reg, MCP16502_MODE, val);
+ reg = regmap_update_bits(rdev->regmap, reg, MCP16502_MODE, val);
return reg;
}
@@ -247,9 +243,8 @@ static int mcp16502_get_status(struct regulator_dev *rdev)
{
int ret;
unsigned int val;
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
- ret = regmap_read(mcp->rmap, MCP16502_STAT_BASE(rdev_get_id(rdev)),
+ ret = regmap_read(rdev->regmap, MCP16502_STAT_BASE(rdev_get_id(rdev)),
&val);
if (ret)
return ret;
@@ -290,7 +285,6 @@ static int mcp16502_suspend_get_target_reg(struct regulator_dev *rdev)
*/
static int mcp16502_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
int sel = regulator_map_voltage_linear_range(rdev, uV, uV);
int reg = mcp16502_suspend_get_target_reg(rdev);
@@ -300,7 +294,7 @@ static int mcp16502_set_suspend_voltage(struct regulator_dev *rdev, int uV)
if (reg < 0)
return reg;
- return regmap_update_bits(mcp->rmap, reg, MCP16502_VSEL, sel);
+ return regmap_update_bits(rdev->regmap, reg, MCP16502_VSEL, sel);
}
/*
@@ -328,13 +322,12 @@ static int mcp16502_set_suspend_mode(struct regulator_dev *rdev,
*/
static int mcp16502_set_suspend_enable(struct regulator_dev *rdev)
{
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
int reg = mcp16502_suspend_get_target_reg(rdev);
if (reg < 0)
return reg;
- return regmap_update_bits(mcp->rmap, reg, MCP16502_EN, MCP16502_EN);
+ return regmap_update_bits(rdev->regmap, reg, MCP16502_EN, MCP16502_EN);
}
/*
@@ -342,13 +335,12 @@ static int mcp16502_set_suspend_enable(struct regulator_dev *rdev)
*/
static int mcp16502_set_suspend_disable(struct regulator_dev *rdev)
{
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
int reg = mcp16502_suspend_get_target_reg(rdev);
if (reg < 0)
return reg;
- return regmap_update_bits(mcp->rmap, reg, MCP16502_EN, 0);
+ return regmap_update_bits(rdev->regmap, reg, MCP16502_EN, 0);
}
#endif /* CONFIG_SUSPEND */
@@ -435,36 +427,15 @@ static const struct regmap_config mcp16502_regmap_config = {
.wr_table = &mcp16502_yes_reg_table,
};
-/*
- * set_up_regulators() - initialize all regulators
- */
-static int setup_regulators(struct mcp16502 *mcp, struct device *dev,
- struct regulator_config config)
-{
- int i;
-
- for (i = 0; i < NUM_REGULATORS; i++) {
- mcp->rdev[i] = devm_regulator_register(dev,
- &mcp16502_desc[i],
- &config);
- if (IS_ERR(mcp->rdev[i])) {
- dev_err(dev,
- "failed to register %s regulator %ld\n",
- mcp16502_desc[i].name, PTR_ERR(mcp->rdev[i]));
- return PTR_ERR(mcp->rdev[i]);
- }
- }
-
- return 0;
-}
-
static int mcp16502_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct regulator_config config = { };
+ struct regulator_dev *rdev;
struct device *dev;
struct mcp16502 *mcp;
- int ret = 0;
+ struct regmap *rmap;
+ int i, ret;
dev = &client->dev;
config.dev = dev;
@@ -473,15 +444,15 @@ static int mcp16502_probe(struct i2c_client *client,
if (!mcp)
return -ENOMEM;
- mcp->rmap = devm_regmap_init_i2c(client, &mcp16502_regmap_config);
- if (IS_ERR(mcp->rmap)) {
- ret = PTR_ERR(mcp->rmap);
+ rmap = devm_regmap_init_i2c(client, &mcp16502_regmap_config);
+ if (IS_ERR(rmap)) {
+ ret = PTR_ERR(rmap);
dev_err(dev, "regmap init failed: %d\n", ret);
return ret;
}
i2c_set_clientdata(client, mcp);
- config.regmap = mcp->rmap;
+ config.regmap = rmap;
config.driver_data = mcp;
mcp->lpm = devm_gpiod_get(dev, "lpm", GPIOD_OUT_LOW);
@@ -490,9 +461,15 @@ static int mcp16502_probe(struct i2c_client *client,
return PTR_ERR(mcp->lpm);
}
- ret = setup_regulators(mcp, dev, config);
- if (ret != 0)
- return ret;
+ for (i = 0; i < NUM_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev, &mcp16502_desc[i], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev,
+ "failed to register %s regulator %ld\n",
+ mcp16502_desc[i].name, PTR_ERR(rdev));
+ return PTR_ERR(rdev);
+ }
+ }
mcp16502_gpio_set_mode(mcp, MCP16502_OPMODE_ACTIVE);
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index 01d69f43d2b0..af95449d3590 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -1,16 +1,7 @@
-/*
- * Copyright (c) 2015 MediaTek Inc.
- * Author: Henry Chen <henryc.chen@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2015 MediaTek Inc.
+// Author: Henry Chen <henryc.chen@mediatek.com>
#include <linux/err.h>
#include <linux/gpio.h>
diff --git a/drivers/regulator/mt6311-regulator.h b/drivers/regulator/mt6311-regulator.h
index 5218db46a798..4904d6751714 100644
--- a/drivers/regulator/mt6311-regulator.h
+++ b/drivers/regulator/mt6311-regulator.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: Henry Chen <henryc.chen@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MT6311_REGULATOR_H__
diff --git a/drivers/regulator/mt6323-regulator.c b/drivers/regulator/mt6323-regulator.c
index b7b9670f0979..893ea190788a 100644
--- a/drivers/regulator/mt6323-regulator.c
+++ b/drivers/regulator/mt6323-regulator.c
@@ -1,11 +1,7 @@
-/*
- * Copyright (c) 2016 MediaTek Inc.
- * Author: Chen Zhong <chen.zhong@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2016 MediaTek Inc.
+// Author: Chen Zhong <chen.zhong@mediatek.com>
#include <linux/module.h>
#include <linux/of.h>
@@ -118,43 +114,43 @@ static const struct regulator_linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
};
-static const u32 ldo_volt_table1[] = {
+static const unsigned int ldo_volt_table1[] = {
3300000, 3400000, 3500000, 3600000,
};
-static const u32 ldo_volt_table2[] = {
+static const unsigned int ldo_volt_table2[] = {
1500000, 1800000, 2500000, 2800000,
};
-static const u32 ldo_volt_table3[] = {
+static const unsigned int ldo_volt_table3[] = {
1800000, 3300000,
};
-static const u32 ldo_volt_table4[] = {
+static const unsigned int ldo_volt_table4[] = {
3000000, 3300000,
};
-static const u32 ldo_volt_table5[] = {
+static const unsigned int ldo_volt_table5[] = {
1200000, 1300000, 1500000, 1800000, 2000000, 2800000, 3000000, 3300000,
};
-static const u32 ldo_volt_table6[] = {
+static const unsigned int ldo_volt_table6[] = {
1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
};
-static const u32 ldo_volt_table7[] = {
+static const unsigned int ldo_volt_table7[] = {
1200000, 1300000, 1500000, 1800000,
};
-static const u32 ldo_volt_table8[] = {
+static const unsigned int ldo_volt_table8[] = {
1800000, 3000000,
};
-static const u32 ldo_volt_table9[] = {
+static const unsigned int ldo_volt_table9[] = {
1200000, 1350000, 1500000, 1800000,
};
-static const u32 ldo_volt_table10[] = {
+static const unsigned int ldo_volt_table10[] = {
1200000, 1300000, 1500000, 1800000,
};
diff --git a/drivers/regulator/mt6380-regulator.c b/drivers/regulator/mt6380-regulator.c
index 127dd720cbcc..b6aed090b5e0 100644
--- a/drivers/regulator/mt6380-regulator.c
+++ b/drivers/regulator/mt6380-regulator.c
@@ -1,16 +1,7 @@
-/*
- * Copyright (c) 2017 MediaTek Inc.
- * Author: Chenglin Xu <chenglin.xu@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2017 MediaTek Inc.
+// Author: Chenglin Xu <chenglin.xu@mediatek.com>
#include <linux/module.h>
#include <linux/of.h>
@@ -173,19 +164,19 @@ static const struct regulator_linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(1200000, 0, 0x3c, 25000),
};
-static const u32 ldo_volt_table1[] = {
+static const unsigned int ldo_volt_table1[] = {
1400000, 1350000, 1300000, 1250000, 1200000, 1150000, 1100000, 1050000,
};
-static const u32 ldo_volt_table2[] = {
+static const unsigned int ldo_volt_table2[] = {
2200000, 3300000,
};
-static const u32 ldo_volt_table3[] = {
+static const unsigned int ldo_volt_table3[] = {
1240000, 1390000, 1540000, 1840000,
};
-static const u32 ldo_volt_table4[] = {
+static const unsigned int ldo_volt_table4[] = {
2200000, 3300000,
};
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
index c6c6aa85e4e8..fd9ed864a0c1 100644
--- a/drivers/regulator/mt6397-regulator.c
+++ b/drivers/regulator/mt6397-regulator.c
@@ -1,16 +1,7 @@
-/*
- * Copyright (c) 2014 MediaTek Inc.
- * Author: Flora Fu <flora.fu@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2014 MediaTek Inc.
+// Author: Flora Fu <flora.fu@mediatek.com>
#include <linux/module.h>
#include <linux/of.h>
@@ -123,35 +114,35 @@ static const struct regulator_linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000),
};
-static const u32 ldo_volt_table1[] = {
+static const unsigned int ldo_volt_table1[] = {
1500000, 1800000, 2500000, 2800000,
};
-static const u32 ldo_volt_table2[] = {
+static const unsigned int ldo_volt_table2[] = {
1800000, 3300000,
};
-static const u32 ldo_volt_table3[] = {
+static const unsigned int ldo_volt_table3[] = {
3000000, 3300000,
};
-static const u32 ldo_volt_table4[] = {
+static const unsigned int ldo_volt_table4[] = {
1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
};
-static const u32 ldo_volt_table5[] = {
+static const unsigned int ldo_volt_table5[] = {
1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
};
-static const u32 ldo_volt_table5_v2[] = {
+static const unsigned int ldo_volt_table5_v2[] = {
1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
};
-static const u32 ldo_volt_table6[] = {
+static const unsigned int ldo_volt_table6[] = {
1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
};
-static const u32 ldo_volt_table7[] = {
+static const unsigned int ldo_volt_table7[] = {
1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
};
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 7b6bf3536271..6dca0ba044d8 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -371,8 +371,9 @@ int of_regulator_match(struct device *dev, struct device_node *node,
}
EXPORT_SYMBOL_GPL(of_regulator_match);
-struct device_node *regulator_of_get_init_node(struct device *dev,
- const struct regulator_desc *desc)
+static struct
+device_node *regulator_of_get_init_node(struct device *dev,
+ const struct regulator_desc *desc)
{
struct device_node *search, *child;
const char *name;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 7fb9e8dd834e..f13c7c8b1061 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -991,9 +991,6 @@ static int palmas_ldo_registration(struct palmas_pmic *pmic,
return PTR_ERR(rdev);
}
- /* Save regulator for cleanup */
- pmic->rdev[id] = rdev;
-
/* Initialise sleep/init values from platform data */
if (pdata) {
reg_init = pdata->reg_init[id];
@@ -1101,9 +1098,6 @@ static int tps65917_ldo_registration(struct palmas_pmic *pmic,
return PTR_ERR(rdev);
}
- /* Save regulator for cleanup */
- pmic->rdev[id] = rdev;
-
/* Initialise sleep/init values from platform data */
if (pdata) {
reg_init = pdata->reg_init[id];
@@ -1288,9 +1282,6 @@ static int palmas_smps_registration(struct palmas_pmic *pmic,
pdev_name);
return PTR_ERR(rdev);
}
-
- /* Save regulator for cleanup */
- pmic->rdev[id] = rdev;
}
return 0;
@@ -1395,9 +1386,6 @@ static int tps65917_smps_registration(struct palmas_pmic *pmic,
pdev_name);
return PTR_ERR(rdev);
}
-
- /* Save regulator for cleanup */
- pmic->rdev[id] = rdev;
}
return 0;
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 1600f9821891..3d3415839ba2 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -1,17 +1,7 @@
-/*
- * pv88060-regulator.c - Regulator device driver for PV88060
- * Copyright (C) 2015 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// pv88060-regulator.c - Regulator device driver for PV88060
+// Copyright (C) 2015 Powerventure Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
@@ -244,9 +234,11 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
if (reg_val & PV88060_E_VDD_FLT) {
for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
@@ -261,9 +253,11 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
if (reg_val & PV88060_E_OVER_TEMP) {
for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
diff --git a/drivers/regulator/pv88060-regulator.h b/drivers/regulator/pv88060-regulator.h
index 02ca9203a172..d333dbf3be94 100644
--- a/drivers/regulator/pv88060-regulator.h
+++ b/drivers/regulator/pv88060-regulator.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* pv88060-regulator.h - Regulator definitions for PV88060
* Copyright (C) 2015 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __PV88060_REGISTERS_H__
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index bdddacdbeb99..a444f68af1a8 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -1,17 +1,7 @@
-/*
- * pv88080-regulator.c - Regulator device driver for PV88080
- * Copyright (C) 2016 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// pv88080-regulator.c - Regulator device driver for PV88080
+// Copyright (C) 2016 Powerventure Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
@@ -345,9 +335,11 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
if (reg_val & PV88080_E_VDD_FLT) {
for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
@@ -362,9 +354,11 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
if (reg_val & PV88080_E_OVER_TEMP) {
for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
diff --git a/drivers/regulator/pv88080-regulator.h b/drivers/regulator/pv88080-regulator.h
index ae25ff360e3d..7d7f8f11a75a 100644
--- a/drivers/regulator/pv88080-regulator.h
+++ b/drivers/regulator/pv88080-regulator.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* pv88080-regulator.h - Regulator definitions for PV88080
* Copyright (C) 2016 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __PV88080_REGISTERS_H__
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index 6e97cc6df2ee..b1d0d97ae935 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -1,17 +1,7 @@
-/*
- * pv88090-regulator.c - Regulator device driver for PV88090
- * Copyright (C) 2015 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// pv88090-regulator.c - Regulator device driver for PV88090
+// Copyright (C) 2015 Powerventure Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
@@ -237,9 +227,11 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
if (reg_val & PV88090_E_VDD_FLT) {
for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
@@ -254,9 +246,11 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
if (reg_val & PV88090_E_OVER_TEMP) {
for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
diff --git a/drivers/regulator/pv88090-regulator.h b/drivers/regulator/pv88090-regulator.h
index 62d9029277f4..f814ee52cff3 100644
--- a/drivers/regulator/pv88090-regulator.h
+++ b/drivers/regulator/pv88090-regulator.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* pv88090-regulator.h - Regulator definitions for PV88090
* Copyright (C) 2015 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __PV88090_REGISTERS_H__
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index 2ec51af43673..9446653e7a89 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -47,18 +47,13 @@ struct rc5t583_regulator_info {
struct regulator_desc desc;
};
-struct rc5t583_regulator {
- struct rc5t583_regulator_info *reg_info;
- struct regulator_dev *rdev;
-};
-
static int rc5t583_regulator_enable_time(struct regulator_dev *rdev)
{
- struct rc5t583_regulator *reg = rdev_get_drvdata(rdev);
+ struct rc5t583_regulator_info *reg_info = rdev_get_drvdata(rdev);
int vsel = regulator_get_voltage_sel_regmap(rdev);
int curr_uV = regulator_list_voltage_linear(rdev, vsel);
- return DIV_ROUND_UP(curr_uV, reg->reg_info->enable_uv_per_us);
+ return DIV_ROUND_UP(curr_uV, reg_info->enable_uv_per_us);
}
static const struct regulator_ops rc5t583_ops = {
@@ -120,8 +115,6 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
struct regulator_config config = { };
- struct rc5t583_regulator *reg = NULL;
- struct rc5t583_regulator *regs;
struct regulator_dev *rdev;
struct rc5t583_regulator_info *ri;
int ret;
@@ -132,18 +125,8 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
return -ENODEV;
}
- regs = devm_kcalloc(&pdev->dev,
- RC5T583_REGULATOR_MAX,
- sizeof(struct rc5t583_regulator),
- GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
-
-
for (id = 0; id < RC5T583_REGULATOR_MAX; ++id) {
- reg = &regs[id];
ri = &rc5t583_reg_info[id];
- reg->reg_info = ri;
if (ri->deepsleep_id == RC5T583_DS_NONE)
goto skip_ext_pwr_config;
@@ -163,7 +146,7 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
skip_ext_pwr_config:
config.dev = &pdev->dev;
config.init_data = pdata->reg_init_data[id];
- config.driver_data = reg;
+ config.driver_data = ri;
config.regmap = rc5t583->regmap;
rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
@@ -172,9 +155,7 @@ skip_ext_pwr_config:
ri->desc.name);
return PTR_ERR(rdev);
}
- reg->rdev = rdev;
}
- platform_set_drvdata(pdev, regs);
return 0;
}
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index 790a4a73ea2c..a79c0c43b9f8 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -46,7 +46,7 @@ static const struct regulator_ops rn5t618_reg_ops = {
.vsel_mask = (vmask), \
}
-static struct regulator_desc rn5t567_regulators[] = {
+static const struct regulator_desc rn5t567_regulators[] = {
/* DCDC */
REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500),
REG(DCDC2, DC2CTL, BIT(0), DC2DAC, 0xff, 600000, 3500000, 12500),
@@ -63,7 +63,7 @@ static struct regulator_desc rn5t567_regulators[] = {
REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000),
};
-static struct regulator_desc rn5t618_regulators[] = {
+static const struct regulator_desc rn5t618_regulators[] = {
/* DCDC */
REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500),
REG(DCDC2, DC2CTL, BIT(0), DC2DAC, 0xff, 600000, 3500000, 12500),
@@ -79,7 +79,7 @@ static struct regulator_desc rn5t618_regulators[] = {
REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000),
};
-static struct regulator_desc rc5t619_regulators[] = {
+static const struct regulator_desc rc5t619_regulators[] = {
/* DCDC */
REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500),
REG(DCDC2, DC2CTL, BIT(0), DC2DAC, 0xff, 600000, 3500000, 12500),
@@ -107,7 +107,7 @@ static int rn5t618_regulator_probe(struct platform_device *pdev)
struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
- struct regulator_desc *regulators;
+ const struct regulator_desc *regulators;
int i;
int num_regulators = 0;
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 58a1fe583a6c..51f7e8b74d8c 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -17,10 +17,7 @@
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s2mpa01.h>
-#define S2MPA01_REGULATOR_CNT ARRAY_SIZE(regulators)
-
struct s2mpa01_info {
- struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX];
int ramp_delay24;
int ramp_delay3;
int ramp_delay5;
@@ -232,6 +229,8 @@ static const struct regulator_ops s2mpa01_buck_ops = {
#define regulator_desc_ldo(num, step) { \
.name = "LDO"#num, \
+ .of_match = of_match_ptr("LDO"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_LDO##num, \
.ops = &s2mpa01_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -247,6 +246,8 @@ static const struct regulator_ops s2mpa01_buck_ops = {
#define regulator_desc_buck1_4(num) { \
.name = "BUCK"#num, \
+ .of_match = of_match_ptr("BUCK"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_BUCK##num, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -263,6 +264,8 @@ static const struct regulator_ops s2mpa01_buck_ops = {
#define regulator_desc_buck5 { \
.name = "BUCK5", \
+ .of_match = of_match_ptr("BUCK5"), \
+ .regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_BUCK5, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -279,6 +282,8 @@ static const struct regulator_ops s2mpa01_buck_ops = {
#define regulator_desc_buck6_10(num, min, step) { \
.name = "BUCK"#num, \
+ .of_match = of_match_ptr("BUCK"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_BUCK##num, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -336,9 +341,7 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
- struct device_node *reg_np = NULL;
struct regulator_config config = { };
- struct of_regulator_match *rdata;
struct s2mpa01_info *s2mpa01;
int i;
@@ -346,39 +349,15 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
if (!s2mpa01)
return -ENOMEM;
- rdata = s2mpa01->rdata;
- for (i = 0; i < S2MPA01_REGULATOR_CNT; i++)
- rdata[i].name = regulators[i].name;
-
- if (iodev->dev->of_node) {
- reg_np = of_get_child_by_name(iodev->dev->of_node,
- "regulators");
- if (!reg_np) {
- dev_err(&pdev->dev,
- "could not find regulators sub-node\n");
- return -EINVAL;
- }
-
- of_regulator_match(&pdev->dev, reg_np, rdata,
- S2MPA01_REGULATOR_MAX);
- of_node_put(reg_np);
- }
-
- platform_set_drvdata(pdev, s2mpa01);
-
- config.dev = &pdev->dev;
+ config.dev = iodev->dev;
config.regmap = iodev->regmap_pmic;
config.driver_data = s2mpa01;
for (i = 0; i < S2MPA01_REGULATOR_MAX; i++) {
struct regulator_dev *rdev;
+
if (pdata)
config.init_data = pdata->regulators[i].initdata;
- else
- config.init_data = rdata[i].init_data;
-
- if (reg_np)
- config.of_node = rdata[i].of_node;
rdev = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
diff --git a/drivers/regulator/sc2731-regulator.c b/drivers/regulator/sc2731-regulator.c
index eb2bdf060b7b..0f21f95c8981 100644
--- a/drivers/regulator/sc2731-regulator.c
+++ b/drivers/regulator/sc2731-regulator.c
@@ -146,7 +146,7 @@ static const struct regulator_ops sc2731_regu_linear_ops = {
.vsel_mask = vmask, \
}
-static struct regulator_desc regulators[] = {
+static const struct regulator_desc regulators[] = {
SC2731_REGU_LINEAR(BUCK_CPU0, SC2731_POWER_PD_SW,
SC2731_DCDC_CPU0_PD_MASK, SC2731_DCDC_CPU0_VOL,
SC2731_DCDC_CPU0_VOL_MASK, 3125, 400000, 1996875),
diff --git a/drivers/regulator/sky81452-regulator.c b/drivers/regulator/sky81452-regulator.c
index 647860611916..177dede82a61 100644
--- a/drivers/regulator/sky81452-regulator.c
+++ b/drivers/regulator/sky81452-regulator.c
@@ -1,21 +1,9 @@
-/*
- * sky81452-regulator.c SKY81452 regulator driver
- *
- * Copyright 2014 Skyworks Solutions Inc.
- * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// sky81452-regulator.c SKY81452 regulator driver
+//
+// Copyright 2014 Skyworks Solutions Inc.
+// Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -34,7 +22,7 @@
#define SKY81452_LEN 0x40
#define SKY81452_LOUT 0x1F
-static struct regulator_ops sky81452_reg_ops = {
+static const struct regulator_ops sky81452_reg_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
new file mode 100644
index 000000000000..e0e627b0106e
--- /dev/null
+++ b/drivers/regulator/stm32-pwr.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) STMicroelectronics 2019
+// Authors: Gabriel Fernandez <gabriel.fernandez@st.com>
+// Pascal Paillet <p.paillet@st.com>.
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * Registers description
+ */
+#define REG_PWR_CR3 0x0C
+
+#define USB_3_3_EN BIT(24)
+#define USB_3_3_RDY BIT(26)
+#define REG_1_8_EN BIT(28)
+#define REG_1_8_RDY BIT(29)
+#define REG_1_1_EN BIT(30)
+#define REG_1_1_RDY BIT(31)
+
+/* list of supported regulators */
+enum {
+ PWR_REG11,
+ PWR_REG18,
+ PWR_USB33,
+ STM32PWR_REG_NUM_REGS
+};
+
+static u32 ready_mask_table[STM32PWR_REG_NUM_REGS] = {
+ [PWR_REG11] = REG_1_1_RDY,
+ [PWR_REG18] = REG_1_8_RDY,
+ [PWR_USB33] = USB_3_3_RDY,
+};
+
+struct stm32_pwr_reg {
+ void __iomem *base;
+ u32 ready_mask;
+};
+
+static int stm32_pwr_reg_is_ready(struct regulator_dev *rdev)
+{
+ struct stm32_pwr_reg *priv = rdev_get_drvdata(rdev);
+ u32 val;
+
+ val = readl_relaxed(priv->base + REG_PWR_CR3);
+
+ return (val & priv->ready_mask);
+}
+
+static int stm32_pwr_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct stm32_pwr_reg *priv = rdev_get_drvdata(rdev);
+ u32 val;
+
+ val = readl_relaxed(priv->base + REG_PWR_CR3);
+
+ return (val & rdev->desc->enable_mask);
+}
+
+static int stm32_pwr_reg_enable(struct regulator_dev *rdev)
+{
+ struct stm32_pwr_reg *priv = rdev_get_drvdata(rdev);
+ int ret;
+ u32 val;
+
+ val = readl_relaxed(priv->base + REG_PWR_CR3);
+ val |= rdev->desc->enable_mask;
+ writel_relaxed(val, priv->base + REG_PWR_CR3);
+
+ /* use an arbitrary timeout of 20ms */
+ ret = readx_poll_timeout(stm32_pwr_reg_is_ready, rdev, val, val,
+ 100, 20 * 1000);
+ if (ret)
+ dev_err(&rdev->dev, "regulator enable timed out!\n");
+
+ return ret;
+}
+
+static int stm32_pwr_reg_disable(struct regulator_dev *rdev)
+{
+ struct stm32_pwr_reg *priv = rdev_get_drvdata(rdev);
+ int ret;
+ u32 val;
+
+ val = readl_relaxed(priv->base + REG_PWR_CR3);
+ val &= ~rdev->desc->enable_mask;
+ writel_relaxed(val, priv->base + REG_PWR_CR3);
+
+ /* use an arbitrary timeout of 20ms */
+ ret = readx_poll_timeout(stm32_pwr_reg_is_ready, rdev, val, !val,
+ 100, 20 * 1000);
+ if (ret)
+ dev_err(&rdev->dev, "regulator disable timed out!\n");
+
+ return ret;
+}
+
+static const struct regulator_ops stm32_pwr_reg_ops = {
+ .enable = stm32_pwr_reg_enable,
+ .disable = stm32_pwr_reg_disable,
+ .is_enabled = stm32_pwr_reg_is_enabled,
+};
+
+#define PWR_REG(_id, _name, _volt, _en, _supply) \
+ [_id] = { \
+ .id = _id, \
+ .name = _name, \
+ .of_match = of_match_ptr(_name), \
+ .n_voltages = 1, \
+ .type = REGULATOR_VOLTAGE, \
+ .fixed_uV = _volt, \
+ .ops = &stm32_pwr_reg_ops, \
+ .enable_mask = _en, \
+ .owner = THIS_MODULE, \
+ .supply_name = _supply, \
+ } \
+
+static const struct regulator_desc stm32_pwr_desc[] = {
+ PWR_REG(PWR_REG11, "reg11", 1100000, REG_1_1_EN, "vdd"),
+ PWR_REG(PWR_REG18, "reg18", 1800000, REG_1_8_EN, "vdd"),
+ PWR_REG(PWR_USB33, "usb33", 3300000, USB_3_3_EN, "vdd_3v3_usbfs"),
+};
+
+static int stm32_pwr_regulator_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct stm32_pwr_reg *priv;
+ void __iomem *base;
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ int i, ret = 0;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(&pdev->dev, "Unable to map IO memory\n");
+ return -ENOMEM;
+ }
+
+ config.dev = &pdev->dev;
+
+ for (i = 0; i < STM32PWR_REG_NUM_REGS; i++) {
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct stm32_pwr_reg),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->base = base;
+ priv->ready_mask = ready_mask_table[i];
+ config.driver_data = priv;
+
+ rdev = devm_regulator_register(&pdev->dev,
+ &stm32_pwr_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&pdev->dev,
+ "Failed to register regulator: %d\n", ret);
+ break;
+ }
+ }
+ return ret;
+}
+
+static const struct of_device_id stm32_pwr_of_match[] = {
+ { .compatible = "st,stm32mp1,pwr-reg", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_pwr_of_match);
+
+static struct platform_driver stm32_pwr_driver = {
+ .probe = stm32_pwr_regulator_probe,
+ .driver = {
+ .name = "stm32-pwr-regulator",
+ .of_match_table = of_match_ptr(stm32_pwr_of_match),
+ },
+};
+module_platform_driver(stm32_pwr_driver);
+
+MODULE_DESCRIPTION("STM32MP1 PWR voltage regulator driver");
+MODULE_AUTHOR("Pascal Paillet <p.paillet@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c
index 65fbd1f0b612..42e03b2c10a0 100644
--- a/drivers/regulator/sy8106a-regulator.c
+++ b/drivers/regulator/sy8106a-regulator.c
@@ -22,12 +22,6 @@
*/
#define SY8106A_GO_BIT BIT(7)
-struct sy8106a {
- struct regulator_dev *rdev;
- struct regmap *regmap;
- u32 fixed_voltage;
-};
-
static const struct regmap_config sy8106a_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -70,36 +64,32 @@ static const struct regulator_desc sy8106a_reg = {
static int sy8106a_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct sy8106a *chip;
struct device *dev = &i2c->dev;
- struct regulator_dev *rdev = NULL;
+ struct regulator_dev *rdev;
struct regulator_config config = { };
+ struct regmap *regmap;
unsigned int reg, vsel;
+ u32 fixed_voltage;
int error;
- chip = devm_kzalloc(&i2c->dev, sizeof(struct sy8106a), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
error = of_property_read_u32(dev->of_node, "silergy,fixed-microvolt",
- &chip->fixed_voltage);
+ &fixed_voltage);
if (error)
return error;
- if (chip->fixed_voltage < SY8106A_MIN_MV * 1000 ||
- chip->fixed_voltage > SY8106A_MAX_MV * 1000)
+ if (fixed_voltage < SY8106A_MIN_MV * 1000 ||
+ fixed_voltage > SY8106A_MAX_MV * 1000)
return -EINVAL;
- chip->regmap = devm_regmap_init_i2c(i2c, &sy8106a_regmap_config);
- if (IS_ERR(chip->regmap)) {
- error = PTR_ERR(chip->regmap);
+ regmap = devm_regmap_init_i2c(i2c, &sy8106a_regmap_config);
+ if (IS_ERR(regmap)) {
+ error = PTR_ERR(regmap);
dev_err(dev, "Failed to allocate register map: %d\n", error);
return error;
}
config.dev = &i2c->dev;
- config.regmap = chip->regmap;
- config.driver_data = chip;
+ config.regmap = regmap;
config.of_node = dev->of_node;
config.init_data = of_get_regulator_init_data(dev, dev->of_node,
@@ -109,15 +99,15 @@ static int sy8106a_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
/* Ensure GO_BIT is enabled when probing */
- error = regmap_read(chip->regmap, SY8106A_REG_VOUT1_SEL, &reg);
+ error = regmap_read(regmap, SY8106A_REG_VOUT1_SEL, &reg);
if (error)
return error;
if (!(reg & SY8106A_GO_BIT)) {
- vsel = (chip->fixed_voltage / 1000 - SY8106A_MIN_MV) /
+ vsel = (fixed_voltage / 1000 - SY8106A_MIN_MV) /
SY8106A_STEP_MV;
- error = regmap_write(chip->regmap, SY8106A_REG_VOUT1_SEL,
+ error = regmap_write(regmap, SY8106A_REG_VOUT1_SEL,
vsel | SY8106A_GO_BIT);
if (error)
return error;
@@ -131,10 +121,6 @@ static int sy8106a_i2c_probe(struct i2c_client *i2c,
return error;
}
- chip->rdev = rdev;
-
- i2c_set_clientdata(i2c, chip);
-
return 0;
}
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index c179a3a221af..a1b7fab91dd4 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -115,7 +115,6 @@ static struct tps_info tps6507x_pmic_regs[] = {
struct tps6507x_pmic {
struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
struct tps6507x_dev *mfd;
- struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
struct tps_info *info[TPS6507X_NUM_REGULATOR];
struct mutex io_lock;
};
@@ -349,7 +348,7 @@ static int tps6507x_pmic_set_voltage_sel(struct regulator_dev *dev,
return tps6507x_pmic_reg_write(tps, reg, data);
}
-static struct regulator_ops tps6507x_pmic_ops = {
+static const struct regulator_ops tps6507x_pmic_ops = {
.is_enabled = tps6507x_pmic_is_enabled,
.enable = tps6507x_pmic_enable,
.disable = tps6507x_pmic_disable,
@@ -359,66 +358,20 @@ static struct regulator_ops tps6507x_pmic_ops = {
.map_voltage = regulator_map_voltage_ascend,
};
-static struct of_regulator_match tps6507x_matches[] = {
- { .name = "VDCDC1"},
- { .name = "VDCDC2"},
- { .name = "VDCDC3"},
- { .name = "LDO1"},
- { .name = "LDO2"},
-};
-
-static struct tps6507x_board *tps6507x_parse_dt_reg_data(
- struct platform_device *pdev,
- struct of_regulator_match **tps6507x_reg_matches)
+static int tps6507x_pmic_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
{
- struct tps6507x_board *tps_board;
- struct device_node *np = pdev->dev.parent->of_node;
- struct device_node *regulators;
- struct of_regulator_match *matches;
- struct regulator_init_data *reg_data;
- int idx = 0, count, ret;
-
- tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
- GFP_KERNEL);
- if (!tps_board)
- return NULL;
-
- regulators = of_get_child_by_name(np, "regulators");
- if (!regulators) {
- dev_err(&pdev->dev, "regulator node not found\n");
- return NULL;
- }
-
- count = ARRAY_SIZE(tps6507x_matches);
- matches = tps6507x_matches;
-
- ret = of_regulator_match(&pdev->dev, regulators, matches, count);
- of_node_put(regulators);
- if (ret < 0) {
- dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
- ret);
- return NULL;
- }
+ struct tps6507x_pmic *tps = config->driver_data;
+ struct tps_info *info = tps->info[desc->id];
+ u32 prop;
+ int ret;
- *tps6507x_reg_matches = matches;
-
- reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
- * TPS6507X_NUM_REGULATOR), GFP_KERNEL);
- if (!reg_data)
- return NULL;
-
- tps_board->tps6507x_pmic_init_data = reg_data;
-
- for (idx = 0; idx < count; idx++) {
- if (!matches[idx].init_data || !matches[idx].of_node)
- continue;
-
- memcpy(&reg_data[idx], matches[idx].init_data,
- sizeof(struct regulator_init_data));
-
- }
+ ret = of_property_read_u32(np, "ti,defdcdc_default", &prop);
+ if (!ret)
+ info->defdcdc_default = prop;
- return tps_board;
+ return 0;
}
static int tps6507x_pmic_probe(struct platform_device *pdev)
@@ -426,14 +379,11 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
struct tps_info *info = &tps6507x_pmic_regs[0];
struct regulator_config config = { };
- struct regulator_init_data *init_data;
+ struct regulator_init_data *init_data = NULL;
struct regulator_dev *rdev;
struct tps6507x_pmic *tps;
struct tps6507x_board *tps_board;
- struct of_regulator_match *tps6507x_reg_matches = NULL;
int i;
- int error;
- unsigned int prop;
/**
* tps_board points to pmic related constants
@@ -441,20 +391,8 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
*/
tps_board = dev_get_platdata(tps6507x_dev->dev);
- if (IS_ENABLED(CONFIG_OF) && !tps_board &&
- tps6507x_dev->dev->of_node)
- tps_board = tps6507x_parse_dt_reg_data(pdev,
- &tps6507x_reg_matches);
- if (!tps_board)
- return -EINVAL;
-
- /**
- * init_data points to array of regulator_init structures
- * coming from the board-evm file.
- */
- init_data = tps_board->tps6507x_pmic_init_data;
- if (!init_data)
- return -EINVAL;
+ if (tps_board)
+ init_data = tps_board->tps6507x_pmic_init_data;
tps = devm_kzalloc(&pdev->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -468,13 +406,16 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
/* Register the regulators */
tps->info[i] = info;
- if (init_data->driver_data) {
+ if (init_data && init_data->driver_data) {
struct tps6507x_reg_platform_data *data =
init_data->driver_data;
- tps->info[i]->defdcdc_default = data->defdcdc_default;
+ info->defdcdc_default = data->defdcdc_default;
}
tps->desc[i].name = info->name;
+ tps->desc[i].of_match = of_match_ptr(info->name);
+ tps->desc[i].regulators_node = of_match_ptr("regulators");
+ tps->desc[i].of_parse_cb = tps6507x_pmic_of_parse_cb;
tps->desc[i].id = i;
tps->desc[i].n_voltages = info->table_len;
tps->desc[i].volt_table = info->table;
@@ -486,17 +427,6 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
config.init_data = init_data;
config.driver_data = tps;
- if (tps6507x_reg_matches) {
- error = of_property_read_u32(
- tps6507x_reg_matches[i].of_node,
- "ti,defdcdc_default", &prop);
-
- if (!error)
- tps->info[i]->defdcdc_default = prop;
-
- config.of_node = tps6507x_reg_matches[i].of_node;
- }
-
rdev = devm_regulator_register(&pdev->dev, &tps->desc[i],
&config);
if (IS_ERR(rdev)) {
@@ -505,9 +435,6 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
pdev->name);
return PTR_ERR(rdev);
}
-
- /* Save regulator for cleanup */
- tps->rdev[i] = rdev;
}
tps6507x_dev->pmic = tps;
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index 45e96e154690..5a5e9b5bf4be 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -90,8 +90,8 @@ static const struct regulator_linear_range tps65086_buck345_25mv_ranges[] = {
static const struct regulator_linear_range tps65086_ldoa1_ranges[] = {
REGULATOR_LINEAR_RANGE(1350000, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1500000, 0x1, 0x7, 100000),
- REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xA, 100000),
- REGULATOR_LINEAR_RANGE(2700000, 0xB, 0xD, 150000),
+ REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xB, 100000),
+ REGULATOR_LINEAR_RANGE(2850000, 0xC, 0xD, 150000),
REGULATOR_LINEAR_RANGE(3300000, 0xE, 0xE, 0),
};
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index 73978dd440f7..6e22f5ebba2e 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -55,10 +55,7 @@ struct tps65132_reg_pdata {
struct tps65132_regulator {
struct device *dev;
- struct regmap *rmap;
- struct regulator_desc *rdesc[TPS65132_MAX_REGULATORS];
struct tps65132_reg_pdata reg_pdata[TPS65132_MAX_REGULATORS];
- struct regulator_dev *rdev[TPS65132_MAX_REGULATORS];
};
static int tps65132_regulator_enable(struct regulator_dev *rdev)
@@ -120,7 +117,7 @@ static int tps65132_regulator_is_enabled(struct regulator_dev *rdev)
return 1;
}
-static struct regulator_ops tps65132_regulator_ops = {
+static const struct regulator_ops tps65132_regulator_ops = {
.enable = tps65132_regulator_enable,
.disable = tps65132_regulator_disable,
.is_enabled = tps65132_regulator_is_enabled,
@@ -196,7 +193,7 @@ static int tps65132_of_parse_cb(struct device_node *np,
.owner = THIS_MODULE, \
}
-static struct regulator_desc tps_regs_desc[TPS65132_MAX_REGULATORS] = {
+static const struct regulator_desc tps_regs_desc[TPS65132_MAX_REGULATORS] = {
TPS65132_REGULATOR_DESC(VPOS, outp),
TPS65132_REGULATOR_DESC(VNEG, outn),
};
@@ -225,6 +222,8 @@ static int tps65132_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct tps65132_regulator *tps;
+ struct regulator_dev *rdev;
+ struct regmap *rmap;
struct regulator_config config = { };
int id;
int ret;
@@ -233,9 +232,9 @@ static int tps65132_probe(struct i2c_client *client,
if (!tps)
return -ENOMEM;
- tps->rmap = devm_regmap_init_i2c(client, &tps65132_regmap_config);
- if (IS_ERR(tps->rmap)) {
- ret = PTR_ERR(tps->rmap);
+ rmap = devm_regmap_init_i2c(client, &tps65132_regmap_config);
+ if (IS_ERR(rmap)) {
+ ret = PTR_ERR(rmap);
dev_err(dev, "regmap init failed: %d\n", ret);
return ret;
}
@@ -244,18 +243,16 @@ static int tps65132_probe(struct i2c_client *client,
tps->dev = dev;
for (id = 0; id < TPS65132_MAX_REGULATORS; ++id) {
- tps->rdesc[id] = &tps_regs_desc[id];
-
- config.regmap = tps->rmap;
+ config.regmap = rmap;
config.dev = dev;
config.driver_data = tps;
- tps->rdev[id] = devm_regulator_register(dev,
- tps->rdesc[id], &config);
- if (IS_ERR(tps->rdev[id])) {
- ret = PTR_ERR(tps->rdev[id]);
+ rdev = devm_regulator_register(dev, &tps_regs_desc[id],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(dev, "regulator %s register failed: %d\n",
- tps->rdesc[id]->name, ret);
+ tps_regs_desc[id].name, ret);
return ret;
}
}
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index d84fab616abf..67ba78da77ec 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -58,10 +58,9 @@ static const unsigned int LDO1_VSEL_table[] = {
static const struct regulator_linear_range tps65217_uv1_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 24, 25000),
- REGULATOR_LINEAR_RANGE(1550000, 25, 30, 50000),
- REGULATOR_LINEAR_RANGE(1850000, 31, 52, 50000),
+ REGULATOR_LINEAR_RANGE(1550000, 25, 52, 50000),
REGULATOR_LINEAR_RANGE(3000000, 53, 55, 100000),
- REGULATOR_LINEAR_RANGE(3300000, 56, 62, 0),
+ REGULATOR_LINEAR_RANGE(3300000, 56, 63, 0),
};
static const struct regulator_linear_range tps65217_uv2_ranges[] = {
@@ -150,7 +149,7 @@ static int tps65217_pmic_set_suspend_disable(struct regulator_dev *dev)
}
/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */
-static struct regulator_ops tps65217_pmic_ops = {
+static const struct regulator_ops tps65217_pmic_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
@@ -163,7 +162,7 @@ static struct regulator_ops tps65217_pmic_ops = {
};
/* Operations permitted on LDO1 */
-static struct regulator_ops tps65217_pmic_ldo1_ops = {
+static const struct regulator_ops tps65217_pmic_ldo1_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 95708d34876b..b72035610013 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -29,7 +29,8 @@
#include <linux/mfd/tps65218.h>
#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
- _em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
+ _em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm, \
+ _ct, _ncl) \
{ \
.name = _name, \
.of_match = _of, \
@@ -42,6 +43,8 @@
.vsel_mask = _vm, \
.csel_reg = _cr, \
.csel_mask = _cm, \
+ .curr_table = _ct, \
+ .n_current_limits = _ncl, \
.enable_reg = _er, \
.enable_mask = _em, \
.volt_table = NULL, \
@@ -162,7 +165,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
}
/* Operations permitted on DCDC1, DCDC2 */
-static struct regulator_ops tps65218_dcdc12_ops = {
+static const struct regulator_ops tps65218_dcdc12_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
@@ -176,7 +179,7 @@ static struct regulator_ops tps65218_dcdc12_ops = {
};
/* Operations permitted on DCDC3, DCDC4 and LDO1 */
-static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
+static const struct regulator_ops tps65218_ldo1_dcdc34_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
@@ -188,8 +191,7 @@ static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
.set_suspend_disable = tps65218_pmic_set_suspend_disable,
};
-static const int ls3_currents[] = { 100000, 200000, 500000, 1000000 };
-
+static const unsigned int ls3_currents[] = { 100000, 200000, 500000, 1000000 };
static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev,
int lim_uA)
@@ -229,33 +231,17 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev,
TPS65218_PROTECT_L1);
}
-static int tps65218_pmic_get_current_limit(struct regulator_dev *dev)
-{
- int retval;
- unsigned int index;
- struct tps65218 *tps = rdev_get_drvdata(dev);
-
- retval = regmap_read(tps->regmap, dev->desc->csel_reg, &index);
- if (retval < 0)
- return retval;
-
- index = (index & dev->desc->csel_mask) >>
- __builtin_ctz(dev->desc->csel_mask);
-
- return ls3_currents[index];
-}
-
-static struct regulator_ops tps65218_ls23_ops = {
+static const struct regulator_ops tps65218_ls23_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
.set_input_current_limit = tps65218_pmic_set_input_current_lim,
.set_current_limit = tps65218_pmic_set_current_limit,
- .get_current_limit = tps65218_pmic_get_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
/* Operations permitted on DCDC5, DCDC6 */
-static struct regulator_ops tps65218_dcdc56_pmic_ops = {
+static const struct regulator_ops tps65218_dcdc56_pmic_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
@@ -270,53 +256,57 @@ static const struct regulator_desc regulators[] = {
TPS65218_CONTROL_DCDC1_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC1_EN, 0, 0, dcdc1_dcdc2_ranges,
2, 4000, 0, TPS65218_REG_SEQ3,
- TPS65218_SEQ3_DC1_SEQ_MASK),
+ TPS65218_SEQ3_DC1_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("DCDC2", "regulator-dcdc2", TPS65218_DCDC_2,
REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64,
TPS65218_REG_CONTROL_DCDC2,
TPS65218_CONTROL_DCDC2_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC2_EN, 0, 0, dcdc1_dcdc2_ranges,
2, 4000, 0, TPS65218_REG_SEQ3,
- TPS65218_SEQ3_DC2_SEQ_MASK),
+ TPS65218_SEQ3_DC2_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("DCDC3", "regulator-dcdc3", TPS65218_DCDC_3,
REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_DCDC3,
TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC3_EN, 0, 0, ldo1_dcdc3_ranges, 2,
- 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC3_SEQ_MASK),
+ 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC3_SEQ_MASK,
+ NULL, 0),
TPS65218_REGULATOR("DCDC4", "regulator-dcdc4", TPS65218_DCDC_4,
REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 53,
TPS65218_REG_CONTROL_DCDC4,
TPS65218_CONTROL_DCDC4_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC4_EN, 0, 0, dcdc4_ranges, 2,
- 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC4_SEQ_MASK),
+ 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC4_SEQ_MASK,
+ NULL, 0),
TPS65218_REGULATOR("DCDC5", "regulator-dcdc5", TPS65218_DCDC_5,
REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
-1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0,
0, NULL, 0, 0, 1000000, TPS65218_REG_SEQ5,
- TPS65218_SEQ5_DC5_SEQ_MASK),
+ TPS65218_SEQ5_DC5_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("DCDC6", "regulator-dcdc6", TPS65218_DCDC_6,
REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
-1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0,
0, NULL, 0, 0, 1800000, TPS65218_REG_SEQ5,
- TPS65218_SEQ5_DC6_SEQ_MASK),
+ TPS65218_SEQ5_DC6_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("LDO1", "regulator-ldo1", TPS65218_LDO_1,
REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_LDO1,
TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges,
2, 0, 0, TPS65218_REG_SEQ6,
- TPS65218_SEQ6_LDO1_SEQ_MASK),
+ TPS65218_SEQ6_LDO1_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("LS2", "regulator-ls2", TPS65218_LS_2,
REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0,
TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS2_EN,
TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS2ILIM_MASK,
- NULL, 0, 0, 0, 0, 0),
+ NULL, 0, 0, 0, 0, 0, ls3_currents,
+ ARRAY_SIZE(ls3_currents)),
TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3,
REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0,
TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN,
TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK,
- NULL, 0, 0, 0, 0, 0),
+ NULL, 0, 0, 0, 0, 0, ls3_currents,
+ ARRAY_SIZE(ls3_currents)),
};
static int tps65218_regulator_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 67cac2682f50..740aeccdfb1f 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -137,7 +137,6 @@ struct tps6524x {
struct spi_device *spi;
struct mutex lock;
struct regulator_desc desc[N_REGULATORS];
- struct regulator_dev *rdev[N_REGULATORS];
};
static int __read_reg(struct tps6524x *hw, int reg)
@@ -565,7 +564,7 @@ static int is_supply_enabled(struct regulator_dev *rdev)
return read_field(hw, &info->enable);
}
-static struct regulator_ops regulator_ops = {
+static const struct regulator_ops regulator_ops = {
.is_enabled = is_supply_enabled,
.enable = enable_supply,
.disable = disable_supply,
@@ -584,6 +583,7 @@ static int pmic_probe(struct spi_device *spi)
const struct supply_info *info = supply_info;
struct regulator_init_data *init_data;
struct regulator_config config = { };
+ struct regulator_dev *rdev;
int i;
init_data = dev_get_platdata(dev);
@@ -616,10 +616,9 @@ static int pmic_probe(struct spi_device *spi)
config.init_data = init_data;
config.driver_data = hw;
- hw->rdev[i] = devm_regulator_register(dev, &hw->desc[i],
- &config);
- if (IS_ERR(hw->rdev[i]))
- return PTR_ERR(hw->rdev[i]);
+ rdev = devm_regulator_register(dev, &hw->desc[i], &config);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
}
return 0;
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index 1001147404c3..85a6a8ca8c1b 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -1,27 +1,13 @@
-/*
- * tps80031-regulator.c -- TI TPS80031 regulator driver.
- *
- * Regulator driver for TI TPS80031/TPS80032 Fully Integrated Power
- * Management with Power Path and Battery Charger.
- *
- * Copyright (c) 2012, NVIDIA Corporation.
- *
- * Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// tps80031-regulator.c -- TI TPS80031 regulator driver.
+//
+// Regulator driver for TI TPS80031/TPS80032 Fully Integrated Power
+// Management with Power Path and Battery Charger.
+//
+// Copyright (c) 2012, NVIDIA Corporation.
+//
+// Author: Laxman Dewangan <ldewangan@nvidia.com>
#include <linux/delay.h>
#include <linux/err.h>
@@ -85,7 +71,6 @@ struct tps80031_regulator_info {
struct tps80031_regulator {
struct device *dev;
- struct regulator_dev *rdev;
struct tps80031_regulator_info *rinfo;
u8 device_flags;
@@ -155,7 +140,7 @@ static int tps80031_reg_disable(struct regulator_dev *rdev)
}
/* DCDC voltages for the selector of 58 to 63 */
-static int tps80031_dcdc_voltages[4][5] = {
+static const int tps80031_dcdc_voltages[4][5] = {
{ 1350, 1500, 1800, 1900, 2100},
{ 1350, 1500, 1800, 1900, 2100},
{ 2084, 2315, 2778, 2932, 3241},
@@ -378,7 +363,7 @@ static int tps80031_vbus_disable(struct regulator_dev *rdev)
return ret;
}
-static struct regulator_ops tps80031_dcdc_ops = {
+static const struct regulator_ops tps80031_dcdc_ops = {
.list_voltage = tps80031_dcdc_list_voltage,
.set_voltage_sel = tps80031_dcdc_set_voltage_sel,
.get_voltage_sel = tps80031_dcdc_get_voltage_sel,
@@ -387,7 +372,7 @@ static struct regulator_ops tps80031_dcdc_ops = {
.is_enabled = tps80031_reg_is_enabled,
};
-static struct regulator_ops tps80031_ldo_ops = {
+static const struct regulator_ops tps80031_ldo_ops = {
.list_voltage = tps80031_ldo_list_voltage,
.map_voltage = tps80031_ldo_map_voltage,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -397,18 +382,18 @@ static struct regulator_ops tps80031_ldo_ops = {
.is_enabled = tps80031_reg_is_enabled,
};
-static struct regulator_ops tps80031_vbus_sw_ops = {
+static const struct regulator_ops tps80031_vbus_sw_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = tps80031_vbus_enable,
.disable = tps80031_vbus_disable,
.is_enabled = tps80031_vbus_is_enabled,
};
-static struct regulator_ops tps80031_vbus_hw_ops = {
+static const struct regulator_ops tps80031_vbus_hw_ops = {
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops tps80031_ext_reg_ops = {
+static const struct regulator_ops tps80031_ext_reg_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = tps80031_reg_enable,
.disable = tps80031_reg_disable,
@@ -736,7 +721,6 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
ri->rinfo->desc.name);
return PTR_ERR(rdev);
}
- ri->rdev = rdev;
}
platform_set_drvdata(pdev, pmic);
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 402ea43c77d1..cdd81e1985ff 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -392,7 +392,7 @@ static int twl4030ldo_get_voltage_sel(struct regulator_dev *rdev)
return vsel;
}
-static struct regulator_ops twl4030ldo_ops = {
+static const struct regulator_ops twl4030ldo_ops = {
.list_voltage = twl4030ldo_list_voltage,
.set_voltage_sel = twl4030ldo_set_voltage_sel,
@@ -430,14 +430,14 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev)
return vsel * 12500 + 600000;
}
-static struct regulator_ops twl4030smps_ops = {
+static const struct regulator_ops twl4030smps_ops = {
.set_voltage = twl4030smps_set_voltage,
.get_voltage = twl4030smps_get_voltage,
};
/*----------------------------------------------------------------------*/
-static struct regulator_ops twl4030fixed_ops = {
+static const struct regulator_ops twl4030fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = twl4030reg_enable,
diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
index 78de002037c7..259864520a06 100644
--- a/drivers/regulator/vctrl-regulator.c
+++ b/drivers/regulator/vctrl-regulator.c
@@ -334,10 +334,8 @@ static int vctrl_init_vtable(struct platform_device *pdev)
ctrl_uV = regulator_list_voltage(ctrl_reg, i);
if (ctrl_uV < vrange_ctrl->min_uV ||
- ctrl_uV > vrange_ctrl->max_uV) {
+ ctrl_uV > vrange_ctrl->max_uV)
rdesc->n_voltages--;
- continue;
- }
}
if (rdesc->n_voltages == 0) {
diff --git a/drivers/regulator/vexpress-regulator.c b/drivers/regulator/vexpress-regulator.c
index c810cbbd463f..1235f46e633e 100644
--- a/drivers/regulator/vexpress-regulator.c
+++ b/drivers/regulator/vexpress-regulator.c
@@ -1,15 +1,6 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2012 ARM Limited
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2012 ARM Limited
#define DRVNAME "vexpress-regulator"
#define pr_fmt(fmt) DRVNAME ": " fmt
@@ -23,17 +14,10 @@
#include <linux/regulator/of_regulator.h>
#include <linux/vexpress.h>
-struct vexpress_regulator {
- struct regulator_desc desc;
- struct regulator_dev *regdev;
- struct regmap *regmap;
-};
-
static int vexpress_regulator_get_voltage(struct regulator_dev *regdev)
{
- struct vexpress_regulator *reg = rdev_get_drvdata(regdev);
- u32 uV;
- int err = regmap_read(reg->regmap, 0, &uV);
+ unsigned int uV;
+ int err = regmap_read(regdev->regmap, 0, &uV);
return err ? err : uV;
}
@@ -41,60 +25,58 @@ static int vexpress_regulator_get_voltage(struct regulator_dev *regdev)
static int vexpress_regulator_set_voltage(struct regulator_dev *regdev,
int min_uV, int max_uV, unsigned *selector)
{
- struct vexpress_regulator *reg = rdev_get_drvdata(regdev);
-
- return regmap_write(reg->regmap, 0, min_uV);
+ return regmap_write(regdev->regmap, 0, min_uV);
}
-static struct regulator_ops vexpress_regulator_ops_ro = {
+static const struct regulator_ops vexpress_regulator_ops_ro = {
.get_voltage = vexpress_regulator_get_voltage,
};
-static struct regulator_ops vexpress_regulator_ops = {
+static const struct regulator_ops vexpress_regulator_ops = {
.get_voltage = vexpress_regulator_get_voltage,
.set_voltage = vexpress_regulator_set_voltage,
};
static int vexpress_regulator_probe(struct platform_device *pdev)
{
- struct vexpress_regulator *reg;
+ struct regulator_desc *desc;
struct regulator_init_data *init_data;
struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
- reg = devm_kzalloc(&pdev->dev, sizeof(*reg), GFP_KERNEL);
- if (!reg)
+ desc = devm_kzalloc(&pdev->dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
return -ENOMEM;
- reg->regmap = devm_regmap_init_vexpress_config(&pdev->dev);
- if (IS_ERR(reg->regmap))
- return PTR_ERR(reg->regmap);
+ regmap = devm_regmap_init_vexpress_config(&pdev->dev);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
- reg->desc.name = dev_name(&pdev->dev);
- reg->desc.type = REGULATOR_VOLTAGE;
- reg->desc.owner = THIS_MODULE;
- reg->desc.continuous_voltage_range = true;
+ desc->name = dev_name(&pdev->dev);
+ desc->type = REGULATOR_VOLTAGE;
+ desc->owner = THIS_MODULE;
+ desc->continuous_voltage_range = true;
init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
- &reg->desc);
+ desc);
if (!init_data)
return -EINVAL;
init_data->constraints.apply_uV = 0;
if (init_data->constraints.min_uV && init_data->constraints.max_uV)
- reg->desc.ops = &vexpress_regulator_ops;
+ desc->ops = &vexpress_regulator_ops;
else
- reg->desc.ops = &vexpress_regulator_ops_ro;
+ desc->ops = &vexpress_regulator_ops_ro;
+ config.regmap = regmap;
config.dev = &pdev->dev;
config.init_data = init_data;
- config.driver_data = reg;
config.of_node = pdev->dev.of_node;
- reg->regdev = devm_regulator_register(&pdev->dev, &reg->desc, &config);
- if (IS_ERR(reg->regdev))
- return PTR_ERR(reg->regdev);
-
- platform_set_drvdata(pdev, reg);
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
return 0;
}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 12b422373580..b422eef97b77 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -1,15 +1,10 @@
-/*
- * wm831x-dcdc.c -- DC-DC buck convertor driver for the WM831x series
- *
- * Copyright 2009 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm831x-dcdc.c -- DC-DC buck converter driver for the WM831x series
+//
+// Copyright 2009 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -183,9 +178,11 @@ static irqreturn_t wm831x_dcdc_uv_irq(int irq, void *data)
{
struct wm831x_dcdc *dcdc = data;
+ regulator_lock(dcdc->regulator);
regulator_notifier_call_chain(dcdc->regulator,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(dcdc->regulator);
return IRQ_HANDLED;
}
@@ -194,9 +191,11 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data)
{
struct wm831x_dcdc *dcdc = data;
+ regulator_lock(dcdc->regulator);
regulator_notifier_call_chain(dcdc->regulator,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
+ regulator_unlock(dcdc->regulator);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 6dd891d7eee3..ff3d2bf50410 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -1,15 +1,10 @@
-/*
- * wm831x-isink.c -- Current sink driver for the WM831x series
- *
- * Copyright 2009 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm831x-isink.c -- Current sink driver for the WM831x series
+//
+// Copyright 2009 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -92,57 +87,23 @@ static int wm831x_isink_is_enabled(struct regulator_dev *rdev)
return 0;
}
-static int wm831x_isink_set_current(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct wm831x_isink *isink = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = isink->wm831x;
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(wm831x_isinkv_values); i++) {
- int val = wm831x_isinkv_values[i];
- if (min_uA <= val && val <= max_uA) {
- ret = wm831x_set_bits(wm831x, isink->reg,
- WM831X_CS1_ISEL_MASK, i);
- return ret;
- }
- }
-
- return -EINVAL;
-}
-
-static int wm831x_isink_get_current(struct regulator_dev *rdev)
-{
- struct wm831x_isink *isink = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = isink->wm831x;
- int ret;
-
- ret = wm831x_reg_read(wm831x, isink->reg);
- if (ret < 0)
- return ret;
-
- ret &= WM831X_CS1_ISEL_MASK;
- if (ret > WM831X_ISINK_MAX_ISEL)
- ret = WM831X_ISINK_MAX_ISEL;
-
- return wm831x_isinkv_values[ret];
-}
-
static const struct regulator_ops wm831x_isink_ops = {
.is_enabled = wm831x_isink_is_enabled,
.enable = wm831x_isink_enable,
.disable = wm831x_isink_disable,
- .set_current_limit = wm831x_isink_set_current,
- .get_current_limit = wm831x_isink_get_current,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static irqreturn_t wm831x_isink_irq(int irq, void *data)
{
struct wm831x_isink *isink = data;
+ regulator_lock(isink->regulator);
regulator_notifier_call_chain(isink->regulator,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
+ regulator_unlock(isink->regulator);
return IRQ_HANDLED;
}
@@ -187,10 +148,15 @@ static int wm831x_isink_probe(struct platform_device *pdev)
isink->desc.ops = &wm831x_isink_ops;
isink->desc.type = REGULATOR_CURRENT;
isink->desc.owner = THIS_MODULE;
+ isink->desc.curr_table = wm831x_isinkv_values,
+ isink->desc.n_current_limits = ARRAY_SIZE(wm831x_isinkv_values),
+ isink->desc.csel_reg = isink->reg,
+ isink->desc.csel_mask = WM831X_CS1_ISEL_MASK,
config.dev = pdev->dev.parent;
config.init_data = pdata->isink[id];
config.driver_data = isink;
+ config.regmap = wm831x->regmap;
isink->regulator = devm_regulator_register(&pdev->dev, &isink->desc,
&config);
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index e4a6f888484e..56754686c982 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -1,15 +1,10 @@
-/*
- * wm831x-ldo.c -- LDO driver for the WM831x series
- *
- * Copyright 2009 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm831x-ldo.c -- LDO driver for the WM831x series
+//
+// Copyright 2009 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -51,9 +46,11 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
{
struct wm831x_ldo *ldo = data;
+ regulator_lock(ldo->regulator);
regulator_notifier_call_chain(ldo->regulator,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(ldo->regulator);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index a1c7dfee5c37..56d6168a888d 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1,16 +1,11 @@
-/*
- * wm8350.c -- Voltage and current regulation for the Wolfson WM8350 PMIC
- *
- * Copyright 2007, 2008 Wolfson Microelectronics PLC.
- *
- * Author: Liam Girdwood
- * linux@wolfsonmicro.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm8350.c -- Voltage and current regulation for the Wolfson WM8350 PMIC
+//
+// Copyright 2007, 2008 Wolfson Microelectronics PLC.
+//
+// Author: Liam Girdwood
+// linux@wolfsonmicro.com
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -28,7 +23,7 @@
#define WM8350_DCDC_MAX_VSEL 0x66
/* Microamps */
-static const int isink_cur[] = {
+static const unsigned int isink_cur[] = {
4,
5,
6,
@@ -95,73 +90,6 @@ static const int isink_cur[] = {
223191
};
-static int get_isink_val(int min_uA, int max_uA, u16 *setting)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(isink_cur); i++) {
- if (min_uA <= isink_cur[i] && max_uA >= isink_cur[i]) {
- *setting = i;
- return 0;
- }
- }
- return -EINVAL;
-}
-
-static int wm8350_isink_set_current(struct regulator_dev *rdev, int min_uA,
- int max_uA)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int isink = rdev_get_id(rdev);
- u16 val, setting;
- int ret;
-
- ret = get_isink_val(min_uA, max_uA, &setting);
- if (ret != 0)
- return ret;
-
- switch (isink) {
- case WM8350_ISINK_A:
- val = wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_A) &
- ~WM8350_CS1_ISEL_MASK;
- wm8350_reg_write(wm8350, WM8350_CURRENT_SINK_DRIVER_A,
- val | setting);
- break;
- case WM8350_ISINK_B:
- val = wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_B) &
- ~WM8350_CS1_ISEL_MASK;
- wm8350_reg_write(wm8350, WM8350_CURRENT_SINK_DRIVER_B,
- val | setting);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int wm8350_isink_get_current(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int isink = rdev_get_id(rdev);
- u16 val;
-
- switch (isink) {
- case WM8350_ISINK_A:
- val = wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_A) &
- WM8350_CS1_ISEL_MASK;
- break;
- case WM8350_ISINK_B:
- val = wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_B) &
- WM8350_CS1_ISEL_MASK;
- break;
- default:
- return 0;
- }
-
- return isink_cur[val];
-}
-
/* turn on ISINK followed by DCDC */
static int wm8350_isink_enable(struct regulator_dev *rdev)
{
@@ -982,8 +910,8 @@ static const struct regulator_ops wm8350_ldo_ops = {
};
static const struct regulator_ops wm8350_isink_ops = {
- .set_current_limit = wm8350_isink_set_current,
- .get_current_limit = wm8350_isink_get_current,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
.enable = wm8350_isink_enable,
.disable = wm8350_isink_disable,
.is_enabled = wm8350_isink_is_enabled,
@@ -1138,6 +1066,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_CS1,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
+ .curr_table = isink_cur,
+ .n_current_limits = ARRAY_SIZE(isink_cur),
+ .csel_reg = WM8350_CURRENT_SINK_DRIVER_A,
+ .csel_mask = WM8350_CS1_ISEL_MASK,
},
{
.name = "ISINKB",
@@ -1146,6 +1078,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_CS2,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
+ .curr_table = isink_cur,
+ .n_current_limits = ARRAY_SIZE(isink_cur),
+ .csel_reg = WM8350_CURRENT_SINK_DRIVER_B,
+ .csel_mask = WM8350_CS2_ISEL_MASK,
},
};
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index fb1837657b64..6f331b51e479 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -1,16 +1,10 @@
-/*
- * Regulator support for WM8400
- *
- * Copyright 2008 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Regulator support for WM8400
+//
+// Copyright 2008 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/bug.h>
#include <linux/err.h>
@@ -36,13 +30,12 @@ static const struct regulator_ops wm8400_ldo_ops = {
static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev)
{
- struct wm8400 *wm8400 = rdev_get_drvdata(dev);
+ struct regmap *rmap = rdev_get_regmap(dev);
int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
u16 data[2];
int ret;
- ret = wm8400_block_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset, 2,
- data);
+ ret = regmap_bulk_read(rmap, WM8400_DCDC1_CONTROL_1 + offset, data, 2);
if (ret != 0)
return 0;
@@ -63,36 +56,36 @@ static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev)
static int wm8400_dcdc_set_mode(struct regulator_dev *dev, unsigned int mode)
{
- struct wm8400 *wm8400 = rdev_get_drvdata(dev);
+ struct regmap *rmap = rdev_get_regmap(dev);
int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
int ret;
switch (mode) {
case REGULATOR_MODE_FAST:
/* Datasheet: active with force PWM */
- ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_2 + offset,
+ ret = regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_2 + offset,
WM8400_DC1_FRC_PWM, WM8400_DC1_FRC_PWM);
if (ret != 0)
return ret;
- return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
+ return regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP,
WM8400_DC1_ACTIVE);
case REGULATOR_MODE_NORMAL:
/* Datasheet: active */
- ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_2 + offset,
+ ret = regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_2 + offset,
WM8400_DC1_FRC_PWM, 0);
if (ret != 0)
return ret;
- return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
+ return regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP,
WM8400_DC1_ACTIVE);
case REGULATOR_MODE_IDLE:
/* Datasheet: standby */
- return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
+ return regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP, 0);
default:
return -EINVAL;
@@ -195,7 +188,7 @@ static struct regulator_desc regulators[] = {
.id = WM8400_DCDC2,
.ops = &wm8400_dcdc_ops,
.enable_reg = WM8400_DCDC2_CONTROL_1,
- .enable_mask = WM8400_DC1_ENA_MASK,
+ .enable_mask = WM8400_DC2_ENA_MASK,
.n_voltages = WM8400_DC2_VSEL_MASK + 1,
.vsel_reg = WM8400_DCDC2_CONTROL_1,
.vsel_mask = WM8400_DC2_VSEL_MASK,
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 38928cdcb6e6..cadea0344486 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -1,15 +1,10 @@
-/*
- * wm8994-regulator.c -- Regulator driver for the WM8994
- *
- * Copyright 2009 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm8994-regulator.c -- Regulator driver for the WM8994
+//
+// Copyright 2009 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
index 91751617b37a..c53a2185a039 100644
--- a/drivers/reset/reset-meson-audio-arb.c
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
arb->rstc.ops = &meson_audio_arb_rstc_ops;
arb->rstc.of_node = dev->of_node;
+ arb->rstc.owner = THIS_MODULE;
/*
* Enable general :
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a71734c41693..7b8e156dbf38 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -439,6 +439,7 @@ config RTC_DRV_PCF8523
config RTC_DRV_PCF85063
tristate "NXP PCF85063"
+ select REGMAP_I2C
help
If you say yes here you get support for the PCF85063 RTC chip
@@ -447,7 +448,6 @@ config RTC_DRV_PCF85063
config RTC_DRV_PCF85363
tristate "NXP PCF85363"
- depends on I2C
select REGMAP_I2C
help
If you say yes here you get support for the PCF85363 RTC chip.
@@ -602,7 +602,6 @@ config RTC_DRV_FM3130
config RTC_DRV_RX8010
tristate "Epson RX8010SJ"
- depends on I2C
help
If you say yes here you get support for the Epson RX8010SJ RTC
chip.
@@ -667,9 +666,9 @@ config RTC_DRV_S5M
will be called rtc-s5m.
config RTC_DRV_SD3078
- tristate "ZXW Crystal SD3078"
+ tristate "ZXW Shenzhen whwave SD3078"
help
- If you say yes here you get support for the ZXW Crystal
+ If you say yes here you get support for the ZXW Shenzhen whwave
SD3078 RTC chips.
This driver can also be built as a module. If so, the module
@@ -1432,7 +1431,7 @@ config RTC_DRV_AT91RM9200
config RTC_DRV_AT91SAM9
tristate "AT91SAM9 RTT as RTC"
depends on ARCH_AT91 || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on OF && HAS_IOMEM
select MFD_SYSCON
help
Some AT91SAM9 SoCs provide an RTT (Real Time Timer) block which
@@ -1841,6 +1840,17 @@ config RTC_DRV_RTD119X
If you say yes here, you get support for the RTD1295 SoC
Real Time Clock.
+config RTC_DRV_ASPEED
+ tristate "ASPEED RTC"
+ depends on OF
+ depends on ARCH_ASPEED || COMPILE_TEST
+ help
+ If you say yes here you get support for the ASPEED BMC SoC real time
+ clocks.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-aspeed".
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
@@ -1857,7 +1867,8 @@ config RTC_DRV_HID_SENSOR_TIME
config RTC_DRV_GOLDFISH
tristate "Goldfish Real Time Clock"
- depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ depends on OF && HAS_IOMEM
+ depends on GOLDFISH || COMPILE_TEST
help
Say yes to enable RTC driver for the Goldfish based virtual platform.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index fe3962496685..9d997faa2c26 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_RTC_DRV_AC100) += rtc-ac100.o
obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o
obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o
obj-$(CONFIG_RTC_DRV_ASM9260) += rtc-asm9260.o
+obj-$(CONFIG_RTC_DRV_ASPEED) += rtc-aspeed.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index ac93b76f2b11..0f492b0940b3 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RTC subsystem, base class
*
@@ -5,11 +6,7 @@
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* class skeleton from drivers/hwmon/hwmon.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -23,13 +20,13 @@
#include "rtc-core.h"
-
static DEFINE_IDA(rtc_ida);
struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
+
ida_simple_remove(&rtc_ida, rtc->id);
kfree(rtc);
}
@@ -47,7 +44,6 @@ int rtc_hctosys_ret = -ENODEV;
static struct timespec64 old_rtc, old_system, old_delta;
-
static int rtc_suspend(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
@@ -71,7 +67,6 @@ static int rtc_suspend(struct device *dev)
ktime_get_real_ts64(&old_system);
old_rtc.tv_sec = rtc_tm_to_time64(&tm);
-
/*
* To avoid drift caused by repeated suspend/resumes,
* which each can add ~1 second drift error,
@@ -83,7 +78,7 @@ static int rtc_suspend(struct device *dev)
if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
/*
* if delta_delta is too large, assume time correction
- * has occured and set old_delta to the current delta.
+ * has occurred and set old_delta to the current delta.
*/
old_delta = delta;
} else {
@@ -136,7 +131,7 @@ static int rtc_resume(struct device *dev)
* to keep things accurate.
*/
sleep_time = timespec64_sub(sleep_time,
- timespec64_sub(new_system, old_system));
+ timespec64_sub(new_system, old_system));
if (sleep_time.tv_sec >= 0)
timekeeping_inject_sleeptime64(&sleep_time);
@@ -397,9 +392,9 @@ EXPORT_SYMBOL_GPL(__rtc_register_device);
* rtc_register_device instead
*/
struct rtc_device *devm_rtc_device_register(struct device *dev,
- const char *name,
- const struct rtc_class_ops *ops,
- struct module *owner)
+ const char *name,
+ const struct rtc_class_ops *ops,
+ struct module *owner)
{
struct rtc_device *rtc;
int err;
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 1d006ef4bb57..84feb2565abd 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RTC subsystem, dev interface
*
@@ -5,11 +6,7 @@
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* based on arch/arm/common/rtctime.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -60,7 +57,7 @@ static void rtc_uie_task(struct work_struct *work)
} else if (rtc->oldsecs != tm.tm_sec) {
num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
rtc->oldsecs = tm.tm_sec;
- rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
+ rtc->uie_timer.expires = jiffies + HZ - (HZ / 10);
rtc->uie_timer_active = 1;
rtc->uie_task_active = 0;
add_timer(&rtc->uie_timer);
@@ -71,6 +68,7 @@ static void rtc_uie_task(struct work_struct *work)
if (num)
rtc_handle_legacy_irq(rtc, num, RTC_UF);
}
+
static void rtc_uie_timer(struct timer_list *t)
{
struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
@@ -202,14 +200,14 @@ static __poll_t rtc_dev_poll(struct file *file, poll_table *wait)
}
static long rtc_dev_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
int err = 0;
struct rtc_device *rtc = file->private_data;
const struct rtc_class_ops *ops = rtc->ops;
struct rtc_time tm;
struct rtc_wkalrm alarm;
- void __user *uarg = (void __user *) arg;
+ void __user *uarg = (void __user *)arg;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
@@ -233,7 +231,7 @@ static long rtc_dev_ioctl(struct file *file,
case RTC_PIE_ON:
if (rtc->irq_freq > rtc->max_user_freq &&
- !capable(CAP_SYS_RESOURCE))
+ !capable(CAP_SYS_RESOURCE))
err = -EACCES;
break;
}
@@ -390,8 +388,9 @@ static long rtc_dev_ioctl(struct file *file,
err = ops->ioctl(rtc->dev.parent, cmd, arg);
if (err == -ENOIOCTLCMD)
err = -ENOTTY;
- } else
+ } else {
err = -ENOTTY;
+ }
break;
}
@@ -403,6 +402,7 @@ done:
static int rtc_dev_fasync(int fd, struct file *file, int on)
{
struct rtc_device *rtc = file->private_data;
+
return fasync_helper(fd, file, on, &rtc->async_queue);
}
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index ff2092a0d38c..a74d0d890600 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RTC subsystem, initialize system time on startup
*
* Copyright (C) 2005 Tower Technologies
* Author: Alessandro Zummo <a.zummo@towertech.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -33,7 +30,7 @@ static int __init rtc_hctosys(void)
};
struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
- if (rtc == NULL) {
+ if (!rtc) {
pr_info("unable to open rtc device (%s)\n",
CONFIG_RTC_HCTOSYS_DEVICE);
goto err_open;
@@ -44,7 +41,6 @@ static int __init rtc_hctosys(void)
dev_err(rtc->dev.parent,
"hctosys: unable to read the hardware clock\n");
goto err_read;
-
}
tv64.tv_sec = rtc_tm_to_time64(&tm);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 98d9c87b0d1b..4124f4dd376b 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RTC subsystem, interface functions
*
@@ -5,11 +6,7 @@
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* based on arch/arm/common/rtctime.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#include <linux/rtc.h>
#include <linux/sched.h>
@@ -87,11 +84,12 @@ static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm)
static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
{
int err;
- if (!rtc->ops)
+
+ if (!rtc->ops) {
err = -ENODEV;
- else if (!rtc->ops->read_time)
+ } else if (!rtc->ops->read_time) {
err = -EINVAL;
- else {
+ } else {
memset(tm, 0, sizeof(struct rtc_time));
err = rtc->ops->read_time(rtc->dev.parent, tm);
if (err < 0) {
@@ -147,14 +145,7 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
err = -ENODEV;
else if (rtc->ops->set_time)
err = rtc->ops->set_time(rtc->dev.parent, tm);
- else if (rtc->ops->set_mmss64) {
- time64_t secs64 = rtc_tm_to_time64(tm);
-
- err = rtc->ops->set_mmss64(rtc->dev.parent, secs64);
- } else if (rtc->ops->set_mmss) {
- time64_t secs64 = rtc_tm_to_time64(tm);
- err = rtc->ops->set_mmss(rtc->dev.parent, secs64);
- } else
+ else
err = -EINVAL;
pm_stay_awake(rtc->dev.parent);
@@ -167,7 +158,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
}
EXPORT_SYMBOL_GPL(rtc_set_time);
-static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+static int rtc_read_alarm_internal(struct rtc_device *rtc,
+ struct rtc_wkalrm *alarm)
{
int err;
@@ -175,11 +167,11 @@ static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *al
if (err)
return err;
- if (rtc->ops == NULL)
+ if (!rtc->ops) {
err = -ENODEV;
- else if (!rtc->ops->read_alarm)
+ } else if (!rtc->ops->read_alarm) {
err = -EINVAL;
- else {
+ } else {
alarm->enabled = 0;
alarm->pending = 0;
alarm->time.tm_sec = -1;
@@ -207,7 +199,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
int first_time = 1;
time64_t t_now, t_alm;
enum { none, day, month, year } missing = none;
- unsigned days;
+ unsigned int days;
/* The lower level RTC driver may return -1 in some fields,
* creating invalid alarm->time values, for reasons like:
@@ -276,10 +268,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
return err;
/* note that tm_sec is a "don't care" value here: */
- } while ( before.tm_min != now.tm_min
- || before.tm_hour != now.tm_hour
- || before.tm_mon != now.tm_mon
- || before.tm_year != now.tm_year);
+ } while (before.tm_min != now.tm_min ||
+ before.tm_hour != now.tm_hour ||
+ before.tm_mon != now.tm_mon ||
+ before.tm_year != now.tm_year);
/* Fill in the missing alarm fields using the timestamp; we
* know there's at least one since alarm->time is invalid.
@@ -296,7 +288,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
alarm->time.tm_mday = now.tm_mday;
missing = day;
}
- if ((unsigned)alarm->time.tm_mon >= 12) {
+ if ((unsigned int)alarm->time.tm_mon >= 12) {
alarm->time.tm_mon = now.tm_mon;
if (missing == none)
missing = month;
@@ -321,7 +313,6 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
goto done;
switch (missing) {
-
/* 24 hour rollover ... if it's now 10am Monday, an alarm that
* that will trigger at 5am will do so at 5am Tuesday, which
* could also be in the next month or year. This is a common
@@ -341,14 +332,14 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
case month:
dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
do {
- if (alarm->time.tm_mon < 11)
+ if (alarm->time.tm_mon < 11) {
alarm->time.tm_mon++;
- else {
+ } else {
alarm->time.tm_mon = 0;
alarm->time.tm_year++;
}
days = rtc_month_days(alarm->time.tm_mon,
- alarm->time.tm_year);
+ alarm->time.tm_year);
} while (days < alarm->time.tm_mday);
break;
@@ -357,8 +348,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
do {
alarm->time.tm_year++;
- } while (!is_leap_year(alarm->time.tm_year + 1900)
- && rtc_valid_tm(&alarm->time) != 0);
+ } while (!is_leap_year(alarm->time.tm_year + 1900) &&
+ rtc_valid_tm(&alarm->time) != 0);
break;
default:
@@ -369,7 +360,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
done:
if (err)
- dev_warn(&rtc->dev, "invalid alarm value: %ptR\n", &alarm->time);
+ dev_warn(&rtc->dev, "invalid alarm value: %ptR\n",
+ &alarm->time);
return err;
}
@@ -381,11 +373,11 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
- if (rtc->ops == NULL)
+ if (!rtc->ops) {
err = -ENODEV;
- else if (!rtc->ops->read_alarm)
+ } else if (!rtc->ops->read_alarm) {
err = -EINVAL;
- else {
+ } else {
memset(alarm, 0, sizeof(struct rtc_wkalrm));
alarm->enabled = rtc->aie_timer.enabled;
alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
@@ -494,7 +486,6 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
/* Alarm has to be enabled & in the future for us to enqueue it */
if (alarm->enabled && (rtc_tm_to_ktime(now) <
rtc->aie_timer.node.expires)) {
-
rtc->aie_timer.enabled = 1;
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
trace_rtc_timer_enqueue(&rtc->aie_timer);
@@ -506,7 +497,9 @@ EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
- int err = mutex_lock_interruptible(&rtc->ops_lock);
+ int err;
+
+ err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
@@ -535,7 +528,9 @@ EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
- int err = mutex_lock_interruptible(&rtc->ops_lock);
+ int err;
+
+ err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
@@ -564,27 +559,25 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
rtc->uie_rtctimer.period = ktime_set(1, 0);
err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
- } else
+ } else {
rtc_timer_remove(rtc, &rtc->uie_rtctimer);
+ }
out:
mutex_unlock(&rtc->ops_lock);
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
/*
- * Enable emulation if the driver did not provide
- * the update_irq_enable function pointer or if returned
- * -EINVAL to signal that it has been configured without
- * interrupts or that are not available at the moment.
+ * Enable emulation if the driver returned -EINVAL to signal that it has
+ * been configured without interrupts or they are not available at the
+ * moment.
*/
if (err == -EINVAL)
err = rtc_dev_update_irq_enable_emul(rtc, enabled);
#endif
return err;
-
}
EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
-
/**
* rtc_handle_legacy_irq - AIE, UIE and PIE event hook
* @rtc: pointer to the rtc device
@@ -599,14 +592,13 @@ void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
/* mark one irq of the appropriate mode */
spin_lock_irqsave(&rtc->irq_lock, flags);
- rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
+ rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF | mode);
spin_unlock_irqrestore(&rtc->irq_lock, flags);
wake_up_interruptible(&rtc->irq_queue);
kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
}
-
/**
* rtc_aie_update_irq - AIE mode rtctimer hook
* @rtc: pointer to the rtc_device
@@ -618,7 +610,6 @@ void rtc_aie_update_irq(struct rtc_device *rtc)
rtc_handle_legacy_irq(rtc, 1, RTC_AF);
}
-
/**
* rtc_uie_update_irq - UIE mode rtctimer hook
* @rtc: pointer to the rtc_device
@@ -630,7 +621,6 @@ void rtc_uie_update_irq(struct rtc_device *rtc)
rtc_handle_legacy_irq(rtc, 1, RTC_UF);
}
-
/**
* rtc_pie_update_irq - PIE mode hrtimer hook
* @timer: pointer to the pie mode hrtimer
@@ -644,6 +634,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
struct rtc_device *rtc;
ktime_t period;
int count;
+
rtc = container_of(timer, struct rtc_device, pie_timer);
period = NSEC_PER_SEC / rtc->irq_freq;
@@ -662,7 +653,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
* Context: any
*/
void rtc_update_irq(struct rtc_device *rtc,
- unsigned long num, unsigned long events)
+ unsigned long num, unsigned long events)
{
if (IS_ERR_OR_NULL(rtc))
return;
@@ -811,6 +802,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
if (!next || ktime_before(timer->node.expires, next->expires)) {
struct rtc_wkalrm alarm;
int err;
+
alarm.time = rtc_ktime_to_tm(timer->node.expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
@@ -851,12 +843,14 @@ static void rtc_alarm_disable(struct rtc_device *rtc)
static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
{
struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+
timerqueue_del(&rtc->timerqueue, &timer->node);
trace_rtc_timer_dequeue(timer);
timer->enabled = 0;
if (next == &timer->node) {
struct rtc_wkalrm alarm;
int err;
+
next = timerqueue_getnext(&rtc->timerqueue);
if (!next) {
rtc_alarm_disable(rtc);
@@ -929,9 +923,9 @@ again:
alarm.enabled = 1;
reprogram:
err = __rtc_set_alarm(rtc, &alarm);
- if (err == -ETIME)
+ if (err == -ETIME) {
goto again;
- else if (err) {
+ } else if (err) {
if (retry-- > 0)
goto reprogram;
@@ -942,14 +936,14 @@ reprogram:
dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err);
goto again;
}
- } else
+ } else {
rtc_alarm_disable(rtc);
+ }
pm_relax(rtc->dev.parent);
mutex_unlock(&rtc->ops_lock);
}
-
/* rtc_timer_init - Initializes an rtc_timer
* @timer: timer to be intiialized
* @f: function pointer to be called when timer fires
@@ -975,9 +969,10 @@ void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r),
* Kernel interface to set an rtc_timer
*/
int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
- ktime_t expires, ktime_t period)
+ ktime_t expires, ktime_t period)
{
int ret = 0;
+
mutex_lock(&rtc->ops_lock);
if (timer->enabled)
rtc_timer_remove(rtc, timer);
diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c
index 9714cb3d1e29..23284580df97 100644
--- a/drivers/rtc/lib.c
+++ b/drivers/rtc/lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* rtc and date/time utility functions
*
@@ -5,11 +6,7 @@
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* based on arch/arm/common/rtctime.c and other bits
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#include <linux/export.h>
#include <linux/rtc.h>
@@ -25,7 +22,7 @@ static const unsigned short rtc_ydays[2][13] = {
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
-#define LEAPS_THRU_END_OF(y) ((y)/4 - (y)/100 + (y)/400)
+#define LEAPS_THRU_END_OF(y) ((y) / 4 - (y) / 100 + (y) / 400)
/*
* The number of days in the month.
@@ -41,11 +38,10 @@ EXPORT_SYMBOL(rtc_month_days);
*/
int rtc_year_days(unsigned int day, unsigned int month, unsigned int year)
{
- return rtc_ydays[is_leap_year(year)][month] + day-1;
+ return rtc_ydays[is_leap_year(year)][month] + day - 1;
}
EXPORT_SYMBOL(rtc_year_days);
-
/*
* rtc_time64_to_tm - Converts time64_t to rtc_time.
* Convert seconds since 01-01-1970 00:00:00 to Gregorian date.
@@ -97,13 +93,15 @@ EXPORT_SYMBOL(rtc_time64_to_tm);
*/
int rtc_valid_tm(struct rtc_time *tm)
{
- if (tm->tm_year < 70
- || ((unsigned)tm->tm_mon) >= 12
- || tm->tm_mday < 1
- || tm->tm_mday > rtc_month_days(tm->tm_mon, ((unsigned)tm->tm_year + 1900))
- || ((unsigned)tm->tm_hour) >= 24
- || ((unsigned)tm->tm_min) >= 60
- || ((unsigned)tm->tm_sec) >= 60)
+ if (tm->tm_year < 70 ||
+ tm->tm_year > (INT_MAX - 1900) ||
+ ((unsigned int)tm->tm_mon) >= 12 ||
+ tm->tm_mday < 1 ||
+ tm->tm_mday > rtc_month_days(tm->tm_mon,
+ ((unsigned int)tm->tm_year + 1900)) ||
+ ((unsigned int)tm->tm_hour) >= 24 ||
+ ((unsigned int)tm->tm_min) >= 60 ||
+ ((unsigned int)tm->tm_sec) >= 60)
return -EINVAL;
return 0;
@@ -116,7 +114,7 @@ EXPORT_SYMBOL(rtc_valid_tm);
*/
time64_t rtc_tm_to_time64(struct rtc_time *tm)
{
- return mktime64(((unsigned)tm->tm_year + 1900), tm->tm_mon + 1,
+ return mktime64(((unsigned int)tm->tm_year + 1900), tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
}
EXPORT_SYMBOL(rtc_tm_to_time64);
diff --git a/drivers/rtc/nvmem.c b/drivers/rtc/nvmem.c
index dce518d5e50e..4312096c7738 100644
--- a/drivers/rtc/nvmem.c
+++ b/drivers/rtc/nvmem.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RTC subsystem, nvmem interface
*
* Copyright (C) 2017 Alexandre Belloni
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
@@ -46,7 +43,7 @@ static int rtc_nvram_register(struct rtc_device *rtc,
{
int err;
- rtc->nvram = kzalloc(sizeof(struct bin_attribute), GFP_KERNEL);
+ rtc->nvram = kzalloc(sizeof(*rtc->nvram), GFP_KERNEL);
if (!rtc->nvram)
return -ENOMEM;
diff --git a/drivers/rtc/proc.c b/drivers/rtc/proc.c
index 4d74e4f4ff30..73344598fc1b 100644
--- a/drivers/rtc/proc.c
+++ b/drivers/rtc/proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RTC subsystem, proc interface
*
@@ -5,11 +6,7 @@
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* based on arch/arm/common/rtctime.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#include <linux/module.h>
#include <linux/rtc.h>
@@ -60,17 +57,17 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
seq_printf(seq, "alrm_time\t: %ptRt\n", &alrm.time);
seq_printf(seq, "alrm_date\t: %ptRd\n", &alrm.time);
seq_printf(seq, "alarm_IRQ\t: %s\n",
- alrm.enabled ? "yes" : "no");
+ alrm.enabled ? "yes" : "no");
seq_printf(seq, "alrm_pending\t: %s\n",
- alrm.pending ? "yes" : "no");
+ alrm.pending ? "yes" : "no");
seq_printf(seq, "update IRQ enabled\t: %s\n",
- (rtc->uie_rtctimer.enabled) ? "yes" : "no");
+ (rtc->uie_rtctimer.enabled) ? "yes" : "no");
seq_printf(seq, "periodic IRQ enabled\t: %s\n",
- (rtc->pie_enabled) ? "yes" : "no");
+ (rtc->pie_enabled) ? "yes" : "no");
seq_printf(seq, "periodic IRQ frequency\t: %d\n",
- rtc->irq_freq);
+ rtc->irq_freq);
seq_printf(seq, "max user IRQ frequency\t: %d\n",
- rtc->max_user_freq);
+ rtc->max_user_freq);
}
seq_printf(seq, "24hr\t\t: yes\n");
@@ -85,7 +82,7 @@ void rtc_proc_add_device(struct rtc_device *rtc)
{
if (is_rtc_hctosys(rtc))
proc_create_single_data("driver/rtc", 0, NULL, rtc_proc_show,
- rtc);
+ rtc);
}
void rtc_proc_del_device(struct rtc_device *rtc)
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 1fc48ebd3cd0..e4d5a19fd1c9 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Real Time Clock driver for Marvell 88PM80x PMIC
*
* Copyright (c) 2012 Marvell International Ltd.
* Wenzeng Chen<wzch@marvell.com>
* Qiao Zhou <zhouqiao@marvell.com>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index d25282b4a7dd..73697e4b18a9 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -421,7 +421,7 @@ static int pm860x_rtc_remove(struct platform_device *pdev)
struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
#ifdef VRTC_CALIBRATION
- flush_scheduled_work();
+ cancel_delayed_work_sync(&info->calib_work);
/* disable measurement */
pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
#endif /* VRTC_CALIBRATION */
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 2233601761ac..cdad6f00debf 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rtc-ab-b5ze-s3 - Driver for Abracon AB-RTCMC-32.768Khz-B5ZE-S3
* I2C RTC / Alarm chip
@@ -10,19 +11,9 @@
*
* This work is based on ISL12057 driver (drivers/rtc/rtc-isl12057.c).
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/rtc.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
@@ -128,7 +119,6 @@
struct abb5zes3_rtc_data {
struct rtc_device *rtc;
struct regmap *regmap;
- struct mutex lock;
int irq;
@@ -138,8 +128,7 @@ struct abb5zes3_rtc_data {
/*
* Try and match register bits w/ fixed null values to see whether we
- * are dealing with an ABB5ZES3. Note: this function is called early
- * during init and hence does need mutex protection.
+ * are dealing with an ABB5ZES3.
*/
static int abb5zes3_i2c_validate_chip(struct regmap *regmap)
{
@@ -230,14 +219,12 @@ static int _abb5zes3_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (ret) {
dev_err(dev, "%s: reading RTC time failed (%d)\n",
__func__, ret);
- goto err;
+ return ret;
}
/* If clock integrity is not guaranteed, do not return a time value */
- if (regs[ABB5ZES3_REG_RTC_SC] & ABB5ZES3_REG_RTC_SC_OSC) {
- ret = -ENODATA;
- goto err;
- }
+ if (regs[ABB5ZES3_REG_RTC_SC] & ABB5ZES3_REG_RTC_SC_OSC)
+ return -ENODATA;
tm->tm_sec = bcd2bin(regs[ABB5ZES3_REG_RTC_SC] & 0x7F);
tm->tm_min = bcd2bin(regs[ABB5ZES3_REG_RTC_MN]);
@@ -255,7 +242,6 @@ static int _abb5zes3_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_mon = bcd2bin(regs[ABB5ZES3_REG_RTC_MO]) - 1; /* starts at 1 */
tm->tm_year = bcd2bin(regs[ABB5ZES3_REG_RTC_YR]) + 100;
-err:
return ret;
}
@@ -273,12 +259,9 @@ static int abb5zes3_rtc_set_time(struct device *dev, struct rtc_time *tm)
regs[ABB5ZES3_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1);
regs[ABB5ZES3_REG_RTC_YR] = bin2bcd(tm->tm_year - 100);
- mutex_lock(&data->lock);
ret = regmap_bulk_write(data->regmap, ABB5ZES3_REG_RTC_SC,
regs + ABB5ZES3_REG_RTC_SC,
ABB5ZES3_RTC_SEC_LEN);
- mutex_unlock(&data->lock);
-
return ret;
}
@@ -332,38 +315,35 @@ static int _abb5zes3_rtc_read_timer(struct device *dev,
if (ret) {
dev_err(dev, "%s: reading Timer A section failed (%d)\n",
__func__, ret);
- goto err;
+ return ret;
}
/* get current time ... */
ret = _abb5zes3_rtc_read_time(dev, &rtc_tm);
if (ret)
- goto err;
+ return ret;
/* ... convert to seconds ... */
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err;
+ rtc_secs = rtc_tm_to_time64(&rtc_tm);
/* ... add remaining timer A time ... */
ret = sec_from_timer_a(&timer_secs, regs[1], regs[2]);
if (ret)
- goto err;
+ return ret;
/* ... and convert back. */
- rtc_time_to_tm(rtc_secs + timer_secs, alarm_tm);
+ rtc_time64_to_tm(rtc_secs + timer_secs, alarm_tm);
ret = regmap_read(data->regmap, ABB5ZES3_REG_CTRL2, &reg);
if (ret) {
dev_err(dev, "%s: reading ctrl reg failed (%d)\n",
__func__, ret);
- goto err;
+ return ret;
}
alarm->enabled = !!(reg & ABB5ZES3_REG_CTRL2_WTAIE);
-err:
- return ret;
+ return 0;
}
/* Read alarm currently configured via a RTC alarm registers. */
@@ -382,7 +362,7 @@ static int _abb5zes3_rtc_read_alarm(struct device *dev,
if (ret) {
dev_err(dev, "%s: reading alarm section failed (%d)\n",
__func__, ret);
- goto err;
+ return ret;
}
alarm_tm->tm_sec = 0;
@@ -398,18 +378,13 @@ static int _abb5zes3_rtc_read_alarm(struct device *dev,
*/
ret = _abb5zes3_rtc_read_time(dev, &rtc_tm);
if (ret)
- goto err;
+ return ret;
alarm_tm->tm_year = rtc_tm.tm_year;
alarm_tm->tm_mon = rtc_tm.tm_mon;
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err;
-
- ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
- if (ret)
- goto err;
+ rtc_secs = rtc_tm_to_time64(&rtc_tm);
+ alarm_secs = rtc_tm_to_time64(alarm_tm);
if (alarm_secs < rtc_secs) {
if (alarm_tm->tm_mon == 11) {
@@ -424,13 +399,12 @@ static int _abb5zes3_rtc_read_alarm(struct device *dev,
if (ret) {
dev_err(dev, "%s: reading ctrl reg failed (%d)\n",
__func__, ret);
- goto err;
+ return ret;
}
alarm->enabled = !!(reg & ABB5ZES3_REG_CTRL1_AIE);
-err:
- return ret;
+ return 0;
}
/*
@@ -447,12 +421,10 @@ static int abb5zes3_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
int ret;
- mutex_lock(&data->lock);
if (data->timer_alarm)
ret = _abb5zes3_rtc_read_timer(dev, alarm);
else
ret = _abb5zes3_rtc_read_alarm(dev, alarm);
- mutex_unlock(&data->lock);
return ret;
}
@@ -466,33 +438,25 @@ static int _abb5zes3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
struct rtc_time *alarm_tm = &alarm->time;
- unsigned long rtc_secs, alarm_secs;
u8 regs[ABB5ZES3_ALRM_SEC_LEN];
struct rtc_time rtc_tm;
int ret, enable = 1;
- ret = _abb5zes3_rtc_read_time(dev, &rtc_tm);
- if (ret)
- goto err;
-
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err;
-
- ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
- if (ret)
- goto err;
-
- /* If alarm time is before current time, disable the alarm */
- if (!alarm->enabled || alarm_secs <= rtc_secs) {
+ if (!alarm->enabled) {
enable = 0;
} else {
+ unsigned long rtc_secs, alarm_secs;
+
/*
* Chip only support alarms up to one month in the future. Let's
* return an error if we get something after that limit.
* Comparison is done by incrementing rtc_tm month field by one
* and checking alarm value is still below.
*/
+ ret = _abb5zes3_rtc_read_time(dev, &rtc_tm);
+ if (ret)
+ return ret;
+
if (rtc_tm.tm_mon == 11) { /* handle year wrapping */
rtc_tm.tm_mon = 0;
rtc_tm.tm_year += 1;
@@ -500,15 +464,13 @@ static int _abb5zes3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
rtc_tm.tm_mon += 1;
}
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err;
+ rtc_secs = rtc_tm_to_time64(&rtc_tm);
+ alarm_secs = rtc_tm_to_time64(alarm_tm);
if (alarm_secs > rtc_secs) {
- dev_err(dev, "%s: alarm maximum is one month in the "
- "future (%d)\n", __func__, ret);
- ret = -EINVAL;
- goto err;
+ dev_err(dev, "%s: alarm maximum is one month in the future (%d)\n",
+ __func__, ret);
+ return -EINVAL;
}
}
@@ -526,17 +488,14 @@ static int _abb5zes3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (ret < 0) {
dev_err(dev, "%s: writing ALARM section failed (%d)\n",
__func__, ret);
- goto err;
+ return ret;
}
/* Record currently configured alarm is not a timer */
data->timer_alarm = 0;
/* Enable or disable alarm interrupt generation */
- ret = _abb5zes3_rtc_update_alarm(dev, enable);
-
-err:
- return ret;
+ return _abb5zes3_rtc_update_alarm(dev, enable);
}
/*
@@ -557,7 +516,7 @@ static int _abb5zes3_rtc_set_timer(struct device *dev, struct rtc_wkalrm *alarm,
ABB5ZES3_TIMA_SEC_LEN);
if (ret < 0) {
dev_err(dev, "%s: writing timer section failed\n", __func__);
- goto err;
+ return ret;
}
/* Configure Timer A as a watchdog timer */
@@ -570,10 +529,7 @@ static int _abb5zes3_rtc_set_timer(struct device *dev, struct rtc_wkalrm *alarm,
data->timer_alarm = 1;
/* Enable or disable timer interrupt generation */
- ret = _abb5zes3_rtc_update_timer(dev, alarm->enabled);
-
-err:
- return ret;
+ return _abb5zes3_rtc_update_timer(dev, alarm->enabled);
}
/*
@@ -590,31 +546,25 @@ static int abb5zes3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
struct rtc_time rtc_tm;
int ret;
- mutex_lock(&data->lock);
ret = _abb5zes3_rtc_read_time(dev, &rtc_tm);
if (ret)
- goto err;
+ return ret;
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err;
-
- ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
- if (ret)
- goto err;
+ rtc_secs = rtc_tm_to_time64(&rtc_tm);
+ alarm_secs = rtc_tm_to_time64(alarm_tm);
/* Let's first disable both the alarm and the timer interrupts */
ret = _abb5zes3_rtc_update_alarm(dev, false);
if (ret < 0) {
dev_err(dev, "%s: unable to disable alarm (%d)\n", __func__,
ret);
- goto err;
+ return ret;
}
ret = _abb5zes3_rtc_update_timer(dev, false);
if (ret < 0) {
dev_err(dev, "%s: unable to disable timer (%d)\n", __func__,
ret);
- goto err;
+ return ret;
}
data->timer_alarm = 0;
@@ -629,9 +579,6 @@ static int abb5zes3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
else
ret = _abb5zes3_rtc_set_alarm(dev, alarm);
- err:
- mutex_unlock(&data->lock);
-
if (ret)
dev_err(dev, "%s: unable to configure alarm (%d)\n", __func__,
ret);
@@ -650,8 +597,7 @@ static inline int _abb5zes3_rtc_battery_low_irq_enable(struct regmap *regmap,
/*
* Check current RTC status and enable/disable what needs to be. Return 0 if
- * everything went ok and a negative value upon error. Note: this function
- * is called early during init and hence does need mutex protection.
+ * everything went ok and a negative value upon error.
*/
static int abb5zes3_rtc_check_setup(struct device *dev)
{
@@ -675,8 +621,9 @@ static int abb5zes3_rtc_check_setup(struct device *dev)
ABB5ZES3_REG_TIM_CLK_COF1 | ABB5ZES3_REG_TIM_CLK_COF2 |
ABB5ZES3_REG_TIM_CLK_TBM | ABB5ZES3_REG_TIM_CLK_TAM);
ret = regmap_update_bits(regmap, ABB5ZES3_REG_TIM_CLK, mask,
- ABB5ZES3_REG_TIM_CLK_COF0 | ABB5ZES3_REG_TIM_CLK_COF1 |
- ABB5ZES3_REG_TIM_CLK_COF2);
+ ABB5ZES3_REG_TIM_CLK_COF0 |
+ ABB5ZES3_REG_TIM_CLK_COF1 |
+ ABB5ZES3_REG_TIM_CLK_COF2);
if (ret < 0) {
dev_err(dev, "%s: unable to initialize clkout register (%d)\n",
__func__, ret);
@@ -729,9 +676,9 @@ static int abb5zes3_rtc_check_setup(struct device *dev)
* switchover flag but not battery low flag. The latter is checked
* later below.
*/
- mask = (ABB5ZES3_REG_CTRL3_PM0 | ABB5ZES3_REG_CTRL3_PM1 |
- ABB5ZES3_REG_CTRL3_PM2 | ABB5ZES3_REG_CTRL3_BLIE |
- ABB5ZES3_REG_CTRL3_BSIE| ABB5ZES3_REG_CTRL3_BSF);
+ mask = (ABB5ZES3_REG_CTRL3_PM0 | ABB5ZES3_REG_CTRL3_PM1 |
+ ABB5ZES3_REG_CTRL3_PM2 | ABB5ZES3_REG_CTRL3_BLIE |
+ ABB5ZES3_REG_CTRL3_BSIE | ABB5ZES3_REG_CTRL3_BSF);
ret = regmap_update_bits(regmap, ABB5ZES3_REG_CTRL3, mask, 0);
if (ret < 0) {
dev_err(dev, "%s: unable to initialize CTRL3 register (%d)\n",
@@ -748,10 +695,8 @@ static int abb5zes3_rtc_check_setup(struct device *dev)
}
if (reg & ABB5ZES3_REG_RTC_SC_OSC) {
- dev_err(dev, "clock integrity not guaranteed. Osc. has stopped "
- "or has been interrupted.\n");
- dev_err(dev, "change battery (if not already done) and "
- "then set time to reset osc. failure flag.\n");
+ dev_err(dev, "clock integrity not guaranteed. Osc. has stopped or has been interrupted.\n");
+ dev_err(dev, "change battery (if not already done) and then set time to reset osc. failure flag.\n");
}
/*
@@ -769,13 +714,12 @@ static int abb5zes3_rtc_check_setup(struct device *dev)
data->battery_low = reg & ABB5ZES3_REG_CTRL3_BLF;
if (data->battery_low) {
- dev_err(dev, "RTC battery is low; please, consider "
- "changing it!\n");
+ dev_err(dev, "RTC battery is low; please, consider changing it!\n");
ret = _abb5zes3_rtc_battery_low_irq_enable(regmap, false);
if (ret)
- dev_err(dev, "%s: disabling battery low interrupt "
- "generation failed (%d)\n", __func__, ret);
+ dev_err(dev, "%s: disabling battery low interrupt generation failed (%d)\n",
+ __func__, ret);
}
return ret;
@@ -788,12 +732,10 @@ static int abb5zes3_rtc_alarm_irq_enable(struct device *dev,
int ret = 0;
if (rtc_data->irq) {
- mutex_lock(&rtc_data->lock);
if (rtc_data->timer_alarm)
ret = _abb5zes3_rtc_update_timer(dev, enable);
else
ret = _abb5zes3_rtc_update_alarm(dev, enable);
- mutex_unlock(&rtc_data->lock);
}
return ret;
@@ -885,49 +827,44 @@ static int abb5zes3_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK)) {
- ret = -ENODEV;
- goto err;
- }
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENODEV;
regmap = devm_regmap_init_i2c(client, &abb5zes3_rtc_regmap_config);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(dev, "%s: regmap allocation failed: %d\n",
__func__, ret);
- goto err;
+ return ret;
}
ret = abb5zes3_i2c_validate_chip(regmap);
if (ret)
- goto err;
+ return ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!data)
+ return -ENOMEM;
- mutex_init(&data->lock);
data->regmap = regmap;
dev_set_drvdata(dev, data);
ret = abb5zes3_rtc_check_setup(dev);
if (ret)
- goto err;
+ return ret;
data->rtc = devm_rtc_allocate_device(dev);
ret = PTR_ERR_OR_ZERO(data->rtc);
if (ret) {
dev_err(dev, "%s: unable to allocate RTC device (%d)\n",
__func__, ret);
- goto err;
+ return ret;
}
if (client->irq > 0) {
ret = devm_request_threaded_irq(dev, client->irq, NULL,
_abb5zes3_rtc_interrupt,
- IRQF_SHARED|IRQF_ONESHOT,
+ IRQF_SHARED | IRQF_ONESHOT,
DRV_NAME, client);
if (!ret) {
device_init_wakeup(dev, true);
@@ -949,8 +886,8 @@ static int abb5zes3_probe(struct i2c_client *client,
if (!data->battery_low && data->irq) {
ret = _abb5zes3_rtc_battery_low_irq_enable(regmap, true);
if (ret) {
- dev_err(dev, "%s: enabling battery low interrupt "
- "generation failed (%d)\n", __func__, ret);
+ dev_err(dev, "%s: enabling battery low interrupt generation failed (%d)\n",
+ __func__, ret);
goto err;
}
}
@@ -958,7 +895,7 @@ static int abb5zes3_probe(struct i2c_client *client,
ret = rtc_register_device(data->rtc);
err:
- if (ret && data && data->irq)
+ if (ret && data->irq)
device_init_wakeup(dev, false);
return ret;
}
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index 821ff52a2222..2ed6def90975 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
* RTC clock driver for the AB3100 Analog Baseband Chip
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
@@ -43,12 +43,12 @@
/*
* RTC clock functions and device struct declaration
*/
-static int ab3100_rtc_set_mmss(struct device *dev, time64_t secs)
+static int ab3100_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2,
AB3100_TI3, AB3100_TI4, AB3100_TI5};
unsigned char buf[6];
- u64 hw_counter = secs * AB3100_RTC_CLOCK_RATE * 2;
+ u64 hw_counter = rtc_tm_to_time64(tm) * AB3100_RTC_CLOCK_RATE * 2;
int err = 0;
int i;
@@ -192,7 +192,7 @@ static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled)
static const struct rtc_class_ops ab3100_rtc_ops = {
.read_time = ab3100_rtc_read_time,
- .set_mmss64 = ab3100_rtc_set_mmss,
+ .set_time = ab3100_rtc_set_time,
.read_alarm = ab3100_rtc_read_alarm,
.set_alarm = ab3100_rtc_set_alarm,
.alarm_irq_enable = ab3100_rtc_irq_enable,
@@ -228,15 +228,17 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
/* Ignore any error on this write */
}
- rtc = devm_rtc_device_register(&pdev->dev, "ab3100-rtc",
- &ab3100_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- err = PTR_ERR(rtc);
- return err;
- }
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ rtc->ops = &ab3100_rtc_ops;
+ /* 48bit counter at (AB3100_RTC_CLOCK_RATE * 2) */
+ rtc->range_max = U32_MAX;
+
platform_set_drvdata(pdev, rtc);
- return 0;
+ return rtc_register_device(rtc);
}
static struct platform_driver ab3100_rtc_driver = {
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 6ddcad642d1e..73830670a41f 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* A driver for the I2C members of the Abracon AB x8xx RTC family,
* and compatible: AB 1805 and AB 0805
@@ -7,10 +8,6 @@
* Author: Philippe De Muyter <phdm@macqel.be>
* Author: Alexandre Belloni <alexandre.belloni@bootlin.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/bcd.h>
@@ -404,7 +401,7 @@ static ssize_t autocalibration_store(struct device *dev,
return -EINVAL;
}
- retval = abx80x_rtc_set_autocalibration(dev, autocalibration);
+ retval = abx80x_rtc_set_autocalibration(dev->parent, autocalibration);
return retval ? retval : count;
}
@@ -414,7 +411,7 @@ static ssize_t autocalibration_show(struct device *dev,
{
int autocalibration = 0;
- autocalibration = abx80x_rtc_get_autocalibration(dev);
+ autocalibration = abx80x_rtc_get_autocalibration(dev->parent);
if (autocalibration < 0) {
dev_err(dev, "Failed to read RTC autocalibration\n");
sprintf(buf, "0\n");
@@ -430,7 +427,7 @@ static ssize_t oscillator_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
int retval, flags, rc_mode = 0;
if (strncmp(buf, "rc", 2) == 0) {
@@ -472,7 +469,7 @@ static ssize_t oscillator_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc_mode = 0;
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
rc_mode = abx80x_is_rc_mode(client);
@@ -592,13 +589,6 @@ static int abx80x_dt_trickle_cfg(struct device_node *np)
return (trickle_cfg | i);
}
-static void rtc_calib_remove_sysfs_group(void *_dev)
-{
- struct device *dev = _dev;
-
- sysfs_remove_group(&dev->kobj, &rtc_calib_attr_group);
-}
-
#ifdef CONFIG_WATCHDOG
static inline u8 timeout_bits(unsigned int timeout)
@@ -851,32 +841,14 @@ static int abx80x_probe(struct i2c_client *client,
}
}
- /* Export sysfs entries */
- err = sysfs_create_group(&(&client->dev)->kobj, &rtc_calib_attr_group);
+ err = rtc_add_group(priv->rtc, &rtc_calib_attr_group);
if (err) {
dev_err(&client->dev, "Failed to create sysfs group: %d\n",
err);
return err;
}
- err = devm_add_action_or_reset(&client->dev,
- rtc_calib_remove_sysfs_group,
- &client->dev);
- if (err) {
- dev_err(&client->dev,
- "Failed to add sysfs cleanup action: %d\n",
- err);
- return err;
- }
-
- err = rtc_register_device(priv->rtc);
-
- return err;
-}
-
-static int abx80x_remove(struct i2c_client *client)
-{
- return 0;
+ return rtc_register_device(priv->rtc);
}
static const struct i2c_device_id abx80x_id[] = {
@@ -899,7 +871,6 @@ static struct i2c_driver abx80x_driver = {
.name = "rtc-abx80x",
},
.probe = abx80x_probe,
- .remove = abx80x_remove,
.id_table = abx80x_id,
};
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
new file mode 100644
index 000000000000..af3eb676d7c3
--- /dev/null
+++ b/drivers/rtc/rtc-aspeed.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2015 IBM Corp.
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/io.h>
+
+struct aspeed_rtc {
+ struct rtc_device *rtc_dev;
+ void __iomem *base;
+};
+
+#define RTC_TIME 0x00
+#define RTC_YEAR 0x04
+#define RTC_CTRL 0x10
+
+#define RTC_UNLOCK BIT(1)
+#define RTC_ENABLE BIT(0)
+
+static int aspeed_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct aspeed_rtc *rtc = dev_get_drvdata(dev);
+ unsigned int cent, year;
+ u32 reg1, reg2;
+
+ if (!(readl(rtc->base + RTC_CTRL) & RTC_ENABLE)) {
+ dev_dbg(dev, "%s failing as rtc disabled\n", __func__);
+ return -EINVAL;
+ }
+
+ do {
+ reg2 = readl(rtc->base + RTC_YEAR);
+ reg1 = readl(rtc->base + RTC_TIME);
+ } while (reg2 != readl(rtc->base + RTC_YEAR));
+
+ tm->tm_mday = (reg1 >> 24) & 0x1f;
+ tm->tm_hour = (reg1 >> 16) & 0x1f;
+ tm->tm_min = (reg1 >> 8) & 0x3f;
+ tm->tm_sec = (reg1 >> 0) & 0x3f;
+
+ cent = (reg2 >> 16) & 0x1f;
+ year = (reg2 >> 8) & 0x7f;
+ tm->tm_mon = ((reg2 >> 0) & 0x0f) - 1;
+ tm->tm_year = year + (cent * 100) - 1900;
+
+ dev_dbg(dev, "%s %ptR", __func__, tm);
+
+ return 0;
+}
+
+static int aspeed_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct aspeed_rtc *rtc = dev_get_drvdata(dev);
+ u32 reg1, reg2, ctrl;
+ int year, cent;
+
+ cent = (tm->tm_year + 1900) / 100;
+ year = tm->tm_year % 100;
+
+ reg1 = (tm->tm_mday << 24) | (tm->tm_hour << 16) | (tm->tm_min << 8) |
+ tm->tm_sec;
+
+ reg2 = ((cent & 0x1f) << 16) | ((year & 0x7f) << 8) |
+ ((tm->tm_mon + 1) & 0xf);
+
+ ctrl = readl(rtc->base + RTC_CTRL);
+ writel(ctrl | RTC_UNLOCK, rtc->base + RTC_CTRL);
+
+ writel(reg1, rtc->base + RTC_TIME);
+ writel(reg2, rtc->base + RTC_YEAR);
+
+ /* Re-lock and ensure enable is set now that a time is programmed */
+ writel(ctrl | RTC_ENABLE, rtc->base + RTC_CTRL);
+
+ return 0;
+}
+
+static const struct rtc_class_ops aspeed_rtc_ops = {
+ .read_time = aspeed_rtc_read_time,
+ .set_time = aspeed_rtc_set_time,
+};
+
+static int aspeed_rtc_probe(struct platform_device *pdev)
+{
+ struct aspeed_rtc *rtc;
+ struct resource *res;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rtc->base))
+ return PTR_ERR(rtc->base);
+
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
+ platform_set_drvdata(pdev, rtc);
+
+ rtc->rtc_dev->ops = &aspeed_rtc_ops;
+ rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rtc->rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */
+
+ ret = rtc_register_device(rtc->rtc_dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_rtc_match[] = {
+ { .compatible = "aspeed,ast2400-rtc", },
+ { .compatible = "aspeed,ast2500-rtc", },
+ { .compatible = "aspeed,ast2600-rtc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, aspeed_rtc_match);
+
+static struct platform_driver aspeed_rtc_driver = {
+ .driver = {
+ .name = "aspeed-rtc",
+ .of_match_table = of_match_ptr(aspeed_rtc_match),
+ },
+};
+
+module_platform_driver_probe(aspeed_rtc_driver, aspeed_rtc_probe);
+
+MODULE_DESCRIPTION("ASPEED RTC driver");
+MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 1d31c0ae6334..4daf3789b978 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* "RTT as Real Time Clock" driver for AT91SAM9 SoC family
*
* (C) 2007 Michel Benoit
*
* Based on rtc-at91rm9200.c by Rick Bronson
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/clk.h>
@@ -47,21 +43,21 @@
* registers available, likewise usable for more than "RTC" support.
*/
-#define AT91_RTT_MR 0x00 /* Real-time Mode Register */
-#define AT91_RTT_RTPRES (0xffff << 0) /* Real-time Timer Prescaler Value */
-#define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */
-#define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */
-#define AT91_RTT_RTTRST (1 << 18) /* Real Time Timer Restart */
+#define AT91_RTT_MR 0x00 /* Real-time Mode Register */
+#define AT91_RTT_RTPRES (0xffff << 0) /* Timer Prescaler Value */
+#define AT91_RTT_ALMIEN BIT(16) /* Alarm Interrupt Enable */
+#define AT91_RTT_RTTINCIEN BIT(17) /* Increment Interrupt Enable */
+#define AT91_RTT_RTTRST BIT(18) /* Timer Restart */
-#define AT91_RTT_AR 0x04 /* Real-time Alarm Register */
-#define AT91_RTT_ALMV (0xffffffff) /* Alarm Value */
+#define AT91_RTT_AR 0x04 /* Real-time Alarm Register */
+#define AT91_RTT_ALMV (0xffffffff) /* Alarm Value */
-#define AT91_RTT_VR 0x08 /* Real-time Value Register */
-#define AT91_RTT_CRTV (0xffffffff) /* Current Real-time Value */
+#define AT91_RTT_VR 0x08 /* Real-time Value Register */
+#define AT91_RTT_CRTV (0xffffffff) /* Current Real-time Value */
-#define AT91_RTT_SR 0x0c /* Real-time Status Register */
-#define AT91_RTT_ALMS (1 << 0) /* Real-time Alarm Status */
-#define AT91_RTT_RTTINC (1 << 1) /* Real-time Timer Increment */
+#define AT91_RTT_SR 0x0c /* Real-time Status Register */
+#define AT91_RTT_ALMS BIT(0) /* Alarm Status */
+#define AT91_RTT_RTTINC BIT(1) /* Timer Increment */
/*
* We store ALARM_DISABLED in ALMV to record that no alarm is set.
@@ -69,14 +65,13 @@
*/
#define ALARM_DISABLED ((u32)~0)
-
struct sam9_rtc {
void __iomem *rtt;
struct rtc_device *rtcdev;
u32 imr;
struct regmap *gpbr;
unsigned int gpbr_offset;
- int irq;
+ int irq;
struct clk *sclk;
bool suspended;
unsigned long events;
@@ -122,7 +117,7 @@ static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
if (secs != secs2)
secs = rtt_readl(rtc, VR);
- rtc_time_to_tm(offset + secs, tm);
+ rtc_time64_to_tm(offset + secs, tm);
dev_dbg(dev, "%s: %ptR\n", __func__, tm);
@@ -135,15 +130,12 @@ static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
- int err;
u32 offset, alarm, mr;
unsigned long secs;
dev_dbg(dev, "%s: %ptR\n", __func__, tm);
- err = rtc_tm_to_time(tm, &secs);
- if (err != 0)
- return err;
+ secs = rtc_tm_to_time64(tm);
mr = rtt_readl(rtc, MR);
@@ -193,7 +185,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
memset(alrm, 0, sizeof(*alrm));
if (alarm != ALARM_DISABLED && offset != 0) {
- rtc_time_to_tm(offset + alarm, tm);
+ rtc_time64_to_tm(offset + alarm, tm);
dev_dbg(dev, "%s: %ptR\n", __func__, tm);
@@ -211,11 +203,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned long secs;
u32 offset;
u32 mr;
- int err;
- err = rtc_tm_to_time(tm, &secs);
- if (err != 0)
- return err;
+ secs = rtc_tm_to_time64(tm);
offset = gpbr_readl(rtc);
if (offset == 0) {
@@ -263,7 +252,7 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
u32 mr = rtt_readl(rtc, MR);
seq_printf(seq, "update_IRQ\t: %s\n",
- (mr & AT91_RTT_RTTINCIEN) ? "yes" : "no");
+ (mr & AT91_RTT_RTTINCIEN) ? "yes" : "no");
return 0;
}
@@ -299,7 +288,7 @@ static void at91_rtc_flush_events(struct sam9_rtc *rtc)
rtc->events = 0;
pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
- rtc->events >> 8, rtc->events & 0x000000FF);
+ rtc->events >> 8, rtc->events & 0x000000FF);
}
/*
@@ -340,13 +329,6 @@ static const struct rtc_class_ops at91_rtc_ops = {
.alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
-static const struct regmap_config gpbr_regmap_config = {
- .name = "gpbr",
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
/*
* Initialize and install RTC driver
*/
@@ -357,6 +339,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
int ret, irq;
u32 mr;
unsigned int sclk_rate;
+ struct of_phandle_args args;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -382,34 +365,14 @@ static int at91_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->rtt))
return PTR_ERR(rtc->rtt);
- if (!pdev->dev.of_node) {
- /*
- * TODO: Remove this code chunk when removing non DT board
- * support. Remember to remove the gpbr_regmap_config
- * variable too.
- */
- void __iomem *gpbr;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- gpbr = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(gpbr))
- return PTR_ERR(gpbr);
-
- rtc->gpbr = regmap_init_mmio(NULL, gpbr,
- &gpbr_regmap_config);
- } else {
- struct of_phandle_args args;
-
- ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
- "atmel,rtt-rtc-time-reg", 1, 0,
- &args);
- if (ret)
- return ret;
-
- rtc->gpbr = syscon_node_to_regmap(args.np);
- rtc->gpbr_offset = args.args[0];
- }
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "atmel,rtt-rtc-time-reg", 1, 0,
+ &args);
+ if (ret)
+ return ret;
+ rtc->gpbr = syscon_node_to_regmap(args.np);
+ rtc->gpbr_offset = args.args[0];
if (IS_ERR(rtc->gpbr)) {
dev_err(&pdev->dev, "failed to retrieve gpbr regmap, aborting.\n");
return -ENOMEM;
@@ -444,13 +407,15 @@ static int at91_rtc_probe(struct platform_device *pdev)
mr &= ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
rtt_writel(rtc, MR, mr);
- rtc->rtcdev = devm_rtc_device_register(&pdev->dev, pdev->name,
- &at91_rtc_ops, THIS_MODULE);
+ rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtcdev)) {
ret = PTR_ERR(rtc->rtcdev);
goto err_clk;
}
+ rtc->rtcdev->ops = &at91_rtc_ops;
+ rtc->rtcdev->range_max = U32_MAX;
+
/* register irq handler after we know what name we'll use */
ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
IRQF_SHARED | IRQF_COND_SUSPEND,
@@ -468,9 +433,9 @@ static int at91_rtc_probe(struct platform_device *pdev)
if (gpbr_readl(rtc) == 0)
dev_warn(&pdev->dev, "%s: SET TIME!\n",
- dev_name(&rtc->rtcdev->dev));
+ dev_name(&rtc->rtcdev->dev));
- return 0;
+ return rtc_register_device(rtc->rtcdev);
err_clk:
clk_disable_unprepare(rtc->sclk);
@@ -528,8 +493,9 @@ static int at91_rtc_suspend(struct device *dev)
/* don't let RTTINC cause wakeups */
if (mr & AT91_RTT_RTTINCIEN)
rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
- } else
+ } else {
rtt_writel(rtc, MR, mr & ~rtc->imr);
+ }
}
return 0;
@@ -561,13 +527,11 @@ static int at91_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
-#ifdef CONFIG_OF
static const struct of_device_id at91_rtc_dt_ids[] = {
{ .compatible = "atmel,at91sam9260-rtt" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
-#endif
static struct platform_driver at91_rtc_driver = {
.probe = at91_rtc_probe,
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index f4010a75f2be..a193396a8140 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -132,7 +132,7 @@ static int brcmstb_waketmr_gettime(struct device *dev,
wktmr_read(timer, &now);
- rtc_time_to_tm(now.sec, tm);
+ rtc_time64_to_tm(now.sec, tm);
return 0;
}
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 0b232c84f674..4ac850837153 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
* Real Time Clock interface for ST-Ericsson AB COH 901 331 RTC.
* Author: Linus Walleij <linus.walleij@stericsson.com>
* Based on rtc-pl031.c by Deepak Saxena <dsaxena@plexity.net>
@@ -80,21 +80,22 @@ static int coh901331_read_time(struct device *dev, struct rtc_time *tm)
clk_enable(rtap->clk);
/* Check if the time is valid */
- if (readl(rtap->virtbase + COH901331_VALID)) {
- rtc_time_to_tm(readl(rtap->virtbase + COH901331_CUR_TIME), tm);
+ if (!readl(rtap->virtbase + COH901331_VALID)) {
clk_disable(rtap->clk);
- return 0;
+ return -EINVAL;
}
+
+ rtc_time64_to_tm(readl(rtap->virtbase + COH901331_CUR_TIME), tm);
clk_disable(rtap->clk);
- return -EINVAL;
+ return 0;
}
-static int coh901331_set_mmss(struct device *dev, unsigned long secs)
+static int coh901331_set_time(struct device *dev, struct rtc_time *tm)
{
struct coh901331_port *rtap = dev_get_drvdata(dev);
clk_enable(rtap->clk);
- writel(secs, rtap->virtbase + COH901331_SET_TIME);
+ writel(rtc_tm_to_time64(tm), rtap->virtbase + COH901331_SET_TIME);
clk_disable(rtap->clk);
return 0;
@@ -105,7 +106,7 @@ static int coh901331_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
struct coh901331_port *rtap = dev_get_drvdata(dev);
clk_enable(rtap->clk);
- rtc_time_to_tm(readl(rtap->virtbase + COH901331_ALARM), &alarm->time);
+ rtc_time64_to_tm(readl(rtap->virtbase + COH901331_ALARM), &alarm->time);
alarm->pending = readl(rtap->virtbase + COH901331_IRQ_EVENT) & 1U;
alarm->enabled = readl(rtap->virtbase + COH901331_IRQ_MASK) & 1U;
clk_disable(rtap->clk);
@@ -116,9 +117,8 @@ static int coh901331_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int coh901331_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct coh901331_port *rtap = dev_get_drvdata(dev);
- unsigned long time;
+ unsigned long time = rtc_tm_to_time64(&alarm->time);
- rtc_tm_to_time(&alarm->time, &time);
clk_enable(rtap->clk);
writel(time, rtap->virtbase + COH901331_ALARM);
writel(alarm->enabled, rtap->virtbase + COH901331_IRQ_MASK);
@@ -143,7 +143,7 @@ static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled)
static const struct rtc_class_ops coh901331_ops = {
.read_time = coh901331_read_time,
- .set_mmss = coh901331_set_mmss,
+ .set_time = coh901331_set_time,
.read_alarm = coh901331_read_alarm,
.set_alarm = coh901331_set_alarm,
.alarm_irq_enable = coh901331_alarm_irq_enable,
@@ -188,6 +188,13 @@ static int __init coh901331_probe(struct platform_device *pdev)
return ret;
}
+ rtap->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtap->rtc))
+ return PTR_ERR(rtap->rtc);
+
+ rtap->rtc->ops = &coh901331_ops;
+ rtap->rtc->range_max = U32_MAX;
+
/* We enable/disable the clock only to assure it works */
ret = clk_prepare_enable(rtap->clk);
if (ret) {
@@ -197,12 +204,10 @@ static int __init coh901331_probe(struct platform_device *pdev)
clk_disable(rtap->clk);
platform_set_drvdata(pdev, rtap);
- rtap->rtc = devm_rtc_device_register(&pdev->dev, "coh901331",
- &coh901331_ops, THIS_MODULE);
- if (IS_ERR(rtap->rtc)) {
- ret = PTR_ERR(rtap->rtc);
+
+ ret = rtc_register_device(rtap->rtc);
+ if (ret)
goto out_no_rtc;
- }
return 0;
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index e5444296075e..4d6bf9304ceb 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev))
- enable_irq_wake(cros_ec_rtc->cros_ec->irq);
+ return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
return 0;
}
@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev))
- disable_irq_wake(cros_ec_rtc->cros_ec->irq);
+ return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
return 0;
}
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index b4e054c64bad..15908d51b1cb 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -1,15 +1,7 @@
-/* rtc-da9063.c - Real time clock device driver for DA9063
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Real time clock device driver for DA9063
* Copyright (C) 2013-2015 Dialog Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/delay.h>
@@ -247,8 +239,8 @@ static int da9063_rtc_read_time(struct device *dev, struct rtc_time *tm)
da9063_data_to_tm(data, tm, rtc);
- rtc_tm_to_time(tm, &tm_secs);
- rtc_tm_to_time(&rtc->alarm_time, &al_secs);
+ tm_secs = rtc_tm_to_time64(tm);
+ al_secs = rtc_tm_to_time64(&rtc->alarm_time);
/* handle the rtc synchronisation delay */
if (rtc->rtc_sync == true && al_secs - tm_secs == 1)
@@ -472,14 +464,24 @@ static int da9063_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
- &da9063_rtc_ops, THIS_MODULE);
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
+ rtc->rtc_dev->ops = &da9063_rtc_ops;
+ rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->rtc_dev->range_max = RTC_TIMESTAMP_END_2063;
+
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
+ /*
+ * TODO: some models have alarms on a minute boundary but still support
+ * real hardware interrupts. Add this once the core supports it.
+ */
+ if (config->rtc_data_start != RTC_SEC)
+ rtc->rtc_dev->uie_unsupported = 1;
+
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,
@@ -489,7 +491,7 @@ static int da9063_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
irq_alarm, ret);
- return ret;
+ return rtc_register_device(rtc->rtc_dev);
}
static struct platform_driver da9063_rtc_driver = {
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index b253bf1b3531..0aecc3f8e721 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Real Time Clock driver for Conexant Digicolor
*
* Copyright (C) 2015 Paradox Innovation Ltd.
*
* Author: Baruch Siach <baruch@tkos.co.il>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/io.h>
@@ -106,11 +102,11 @@ static int dc_rtc_read_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-static int dc_rtc_set_mmss(struct device *dev, unsigned long secs)
+static int dc_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct dc_rtc *rtc = dev_get_drvdata(dev);
- return dc_rtc_write(rtc, secs);
+ return dc_rtc_write(rtc, rtc_tm_to_time64(tm));
}
static int dc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
@@ -161,7 +157,7 @@ static int dc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
static const struct rtc_class_ops dc_rtc_ops = {
.read_time = dc_rtc_read_time,
- .set_mmss = dc_rtc_set_mmss,
+ .set_time = dc_rtc_set_time,
.read_alarm = dc_rtc_read_alarm,
.set_alarm = dc_rtc_set_alarm,
.alarm_irq_enable = dc_rtc_alarm_irq_enable,
@@ -192,6 +188,10 @@ static int __init dc_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->regs))
return PTR_ERR(rtc->regs);
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -200,12 +200,11 @@ static int __init dc_rtc_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, pdev->name,
- &dc_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtc_dev))
- return PTR_ERR(rtc->rtc_dev);
- return 0;
+ rtc->rtc_dev->ops = &dc_rtc_ops;
+ rtc->rtc_dev->range_max = U32_MAX;
+
+ return rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id dc_dt_ids[] = {
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
index 97d8259b9494..cd947a20843b 100644
--- a/drivers/rtc/rtc-dm355evm.c
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rtc-dm355evm.c - access battery-backed counter in MSP430 firmware
*
* Copyright (c) 2008 by David Brownell
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
@@ -78,7 +74,7 @@ static int dm355evm_rtc_read_time(struct device *dev, struct rtc_time *tm)
dev_dbg(dev, "read timestamp %08x\n", time.value);
- rtc_time_to_tm(le32_to_cpu(time.value), tm);
+ rtc_time64_to_tm(le32_to_cpu(time.value), tm);
return 0;
}
@@ -88,7 +84,7 @@ static int dm355evm_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned long value;
int status;
- rtc_tm_to_time(tm, &value);
+ value = rtc_tm_to_time64(tm);
time.value = cpu_to_le32(value);
dev_dbg(dev, "write timestamp %08x\n", time.value);
@@ -127,16 +123,16 @@ static int dm355evm_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &dm355evm_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
- PTR_ERR(rtc));
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
return PTR_ERR(rtc);
- }
+
platform_set_drvdata(pdev, rtc);
- return 0;
+ rtc->ops = &dm355evm_rtc_ops;
+ rtc->range_max = U32_MAX;
+
+ return rtc_register_device(rtc);
}
/*
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 38a2e9e684df..225a8df1d4e9 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -467,7 +467,7 @@ static int ds1374_wdt_open(struct inode *inode, struct file *file)
*/
wdt_is_open = 1;
mutex_unlock(&ds1374->mutex);
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
return -ENODEV;
}
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index b1ebca099b0d..e9e8d02743ee 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* An rtc/i2c driver for the Dallas DS1672
* Copyright 2005-06 Tower Technologies
*
* Author: Alessandro Zummo <a.zummo@towertech.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/i2c.h>
@@ -21,17 +18,16 @@
#define DS1672_REG_CONTROL_EOSC 0x80
-static struct i2c_driver ds1672_driver;
-
/*
* In the routines that deal directly with the ds1672 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch
- * Epoch is initialized as 2000. Time is set to UTC.
+ * Time is set to UTC.
*/
-static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+static int ds1672_read_time(struct device *dev, struct rtc_time *tm)
{
+ struct i2c_client *client = to_i2c_client(dev);
unsigned long time;
- unsigned char addr = DS1672_REG_CNT_BASE;
+ unsigned char addr = DS1672_REG_CONTROL;
unsigned char buf[4];
struct i2c_msg msgs[] = {
@@ -43,11 +39,25 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{/* read date */
.addr = client->addr,
.flags = I2C_M_RD,
- .len = 4,
+ .len = 1,
.buf = buf
},
};
+ /* read control register */
+ if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
+ dev_warn(&client->dev, "Unable to read the control register\n");
+ return -EIO;
+ }
+
+ if (buf[0] & DS1672_REG_CONTROL_EOSC) {
+ dev_warn(&client->dev, "Oscillator not enabled. Set time to enable.\n");
+ return -EINVAL;
+ }
+
+ addr = DS1672_REG_CNT_BASE;
+ msgs[1].len = 4;
+
/* read date registers */
if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
@@ -61,20 +71,19 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
time = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
- rtc_time_to_tm(time, tm);
+ rtc_time64_to_tm(time, tm);
- dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
- "mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__, tm->tm_sec, tm->tm_min, tm->tm_hour,
- tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+ dev_dbg(&client->dev, "%s: tm is %ptR\n", __func__, tm);
return 0;
}
-static int ds1672_set_mmss(struct i2c_client *client, unsigned long secs)
+static int ds1672_set_time(struct device *dev, struct rtc_time *tm)
{
+ struct i2c_client *client = to_i2c_client(dev);
int xfer;
unsigned char buf[6];
+ unsigned long secs = rtc_tm_to_time64(tm);
buf[0] = DS1672_REG_CNT_BASE;
buf[1] = secs & 0x000000FF;
@@ -92,71 +101,15 @@ static int ds1672_set_mmss(struct i2c_client *client, unsigned long secs)
return 0;
}
-static int ds1672_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- return ds1672_get_datetime(to_i2c_client(dev), tm);
-}
-
-static int ds1672_rtc_set_mmss(struct device *dev, unsigned long secs)
-{
- return ds1672_set_mmss(to_i2c_client(dev), secs);
-}
-
-static int ds1672_get_control(struct i2c_client *client, u8 *status)
-{
- unsigned char addr = DS1672_REG_CONTROL;
-
- struct i2c_msg msgs[] = {
- {/* setup read ptr */
- .addr = client->addr,
- .len = 1,
- .buf = &addr
- },
- {/* read control */
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = status
- },
- };
-
- /* read control register */
- if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
- dev_err(&client->dev, "%s: read error\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-/* following are the sysfs callback functions */
-static ssize_t show_control(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct i2c_client *client = to_i2c_client(dev);
- u8 control;
- int err;
-
- err = ds1672_get_control(client, &control);
- if (err)
- return err;
-
- return sprintf(buf, "%s\n", (control & DS1672_REG_CONTROL_EOSC)
- ? "disabled" : "enabled");
-}
-
-static DEVICE_ATTR(control, S_IRUGO, show_control, NULL);
-
static const struct rtc_class_ops ds1672_rtc_ops = {
- .read_time = ds1672_rtc_read_time,
- .set_mmss = ds1672_rtc_set_mmss,
+ .read_time = ds1672_read_time,
+ .set_time = ds1672_set_time,
};
static int ds1672_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err = 0;
- u8 control;
struct rtc_device *rtc;
dev_dbg(&client->dev, "%s\n", __func__);
@@ -164,29 +117,21 @@ static int ds1672_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- rtc = devm_rtc_device_register(&client->dev, ds1672_driver.driver.name,
- &ds1672_rtc_ops, THIS_MODULE);
-
+ rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- i2c_set_clientdata(client, rtc);
+ rtc->ops = &ds1672_rtc_ops;
+ rtc->range_max = U32_MAX;
- /* read control register */
- err = ds1672_get_control(client, &control);
- if (err) {
- dev_warn(&client->dev, "Unable to read the control register\n");
- }
+ err = rtc_register_device(rtc);
+ if (err)
+ return err;
- if (control & DS1672_REG_CONTROL_EOSC)
- dev_warn(&client->dev, "Oscillator not enabled. "
- "Set time to enable.\n");
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- /* Register sysfs hooks */
- err = device_create_file(&client->dev, &dev_attr_control);
- if (err)
- dev_err(&client->dev, "Unable to create sysfs entry: %s\n",
- dev_attr_control.attr.name);
+ i2c_set_clientdata(client, rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 2710f2594c42..5f4328524183 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -192,42 +192,6 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
}
/**
- * ds1685_rtc_begin_ctrl_access - prepare the rtc for ctrl access.
- * @rtc: pointer to the ds1685 rtc structure.
- * @flags: irq flags variable for spin_lock_irqsave.
- *
- * This takes several steps to prepare the rtc for access to read just the
- * control registers:
- * - Sets a spinlock on the rtc IRQ.
- * - Switches the rtc to bank 1. This allows access to the two extended
- * control registers.
- *
- * Only use this where you are certain another lock will not be held.
- */
-static inline void
-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
-{
- spin_lock_irqsave(&rtc->lock, *flags);
- ds1685_rtc_switch_to_bank1(rtc);
-}
-
-/**
- * ds1685_rtc_end_ctrl_access - end ctrl access on the rtc.
- * @rtc: pointer to the ds1685 rtc structure.
- * @flags: irq flags variable for spin_unlock_irqrestore.
- *
- * This ends what was started by ds1685_rtc_begin_ctrl_access:
- * - Switches the rtc back to bank 0.
- * - Unsets the spinlock on the rtc IRQ.
- */
-static inline void
-ds1685_rtc_end_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
-{
- ds1685_rtc_switch_to_bank0(rtc);
- spin_unlock_irqrestore(&rtc->lock, flags);
-}
-
-/**
* ds1685_rtc_get_ssn - retrieve the silicon serial number.
* @rtc: pointer to the ds1685 rtc structure.
* @ssn: u8 array to hold the bits of the silicon serial number.
@@ -546,10 +510,6 @@ static int
ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
- unsigned long flags = 0;
-
- /* Enable/disable the Alarm IRQ-Enable flag. */
- spin_lock_irqsave(&rtc->lock, flags);
/* Flip the requisite interrupt-enable bit. */
if (enabled)
@@ -561,7 +521,6 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
/* Read Control C to clear all the flag bits. */
rtc->read(rtc, RTC_CTRL_C);
- spin_unlock_irqrestore(&rtc->lock, flags);
return 0;
}
@@ -569,98 +528,18 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
/* ----------------------------------------------------------------------- */
-/* IRQ handler & workqueue. */
-
-/**
- * ds1685_rtc_irq_handler - IRQ handler.
- * @irq: IRQ number.
- * @dev_id: platform device pointer.
- */
-static irqreturn_t
-ds1685_rtc_irq_handler(int irq, void *dev_id)
-{
- struct platform_device *pdev = dev_id;
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
- u8 ctrlb, ctrlc;
- unsigned long events = 0;
- u8 num_irqs = 0;
-
- /* Abort early if the device isn't ready yet (i.e., DEBUG_SHIRQ). */
- if (unlikely(!rtc))
- return IRQ_HANDLED;
-
- /* Ctrlb holds the interrupt-enable bits and ctrlc the flag bits. */
- spin_lock(&rtc->lock);
- ctrlb = rtc->read(rtc, RTC_CTRL_B);
- ctrlc = rtc->read(rtc, RTC_CTRL_C);
-
- /* Is the IRQF bit set? */
- if (likely(ctrlc & RTC_CTRL_C_IRQF)) {
- /*
- * We need to determine if it was one of the standard
- * events: PF, AF, or UF. If so, we handle them and
- * update the RTC core.
- */
- if (likely(ctrlc & RTC_CTRL_B_PAU_MASK)) {
- events = RTC_IRQF;
-
- /* Check for a periodic interrupt. */
- if ((ctrlb & RTC_CTRL_B_PIE) &&
- (ctrlc & RTC_CTRL_C_PF)) {
- events |= RTC_PF;
- num_irqs++;
- }
-
- /* Check for an alarm interrupt. */
- if ((ctrlb & RTC_CTRL_B_AIE) &&
- (ctrlc & RTC_CTRL_C_AF)) {
- events |= RTC_AF;
- num_irqs++;
- }
-
- /* Check for an update interrupt. */
- if ((ctrlb & RTC_CTRL_B_UIE) &&
- (ctrlc & RTC_CTRL_C_UF)) {
- events |= RTC_UF;
- num_irqs++;
- }
-
- rtc_update_irq(rtc->dev, num_irqs, events);
- } else {
- /*
- * One of the "extended" interrupts was received that
- * is not recognized by the RTC core. These need to
- * be handled in task context as they can call other
- * functions and the time spent in irq context needs
- * to be minimized. Schedule them into a workqueue
- * and inform the RTC core that the IRQs were handled.
- */
- spin_unlock(&rtc->lock);
- schedule_work(&rtc->work);
- rtc_update_irq(rtc->dev, 0, 0);
- return IRQ_HANDLED;
- }
- }
- spin_unlock(&rtc->lock);
-
- return events ? IRQ_HANDLED : IRQ_NONE;
-}
+/* IRQ handler */
/**
- * ds1685_rtc_work_queue - work queue handler.
- * @work: work_struct containing data to work on in task context.
+ * ds1685_rtc_extended_irq - take care of extended interrupts
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @pdev: platform device pointer.
*/
static void
-ds1685_rtc_work_queue(struct work_struct *work)
+ds1685_rtc_extended_irq(struct ds1685_priv *rtc, struct platform_device *pdev)
{
- struct ds1685_priv *rtc = container_of(work,
- struct ds1685_priv, work);
- struct platform_device *pdev = to_platform_device(&rtc->dev->dev);
- struct mutex *rtc_mutex = &rtc->dev->ops_lock;
u8 ctrl4a, ctrl4b;
- mutex_lock(rtc_mutex);
-
ds1685_rtc_switch_to_bank1(rtc);
ctrl4a = rtc->read(rtc, RTC_EXT_CTRL_4A);
ctrl4b = rtc->read(rtc, RTC_EXT_CTRL_4B);
@@ -739,8 +618,76 @@ ds1685_rtc_work_queue(struct work_struct *work)
"RAM-Clear IRQ just occurred!\n");
}
ds1685_rtc_switch_to_bank0(rtc);
+}
+
+/**
+ * ds1685_rtc_irq_handler - IRQ handler.
+ * @irq: IRQ number.
+ * @dev_id: platform device pointer.
+ */
+static irqreturn_t
+ds1685_rtc_irq_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct mutex *rtc_mutex;
+ u8 ctrlb, ctrlc;
+ unsigned long events = 0;
+ u8 num_irqs = 0;
+
+ /* Abort early if the device isn't ready yet (i.e., DEBUG_SHIRQ). */
+ if (unlikely(!rtc))
+ return IRQ_HANDLED;
+
+ rtc_mutex = &rtc->dev->ops_lock;
+ mutex_lock(rtc_mutex);
+ /* Ctrlb holds the interrupt-enable bits and ctrlc the flag bits. */
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ ctrlc = rtc->read(rtc, RTC_CTRL_C);
+
+ /* Is the IRQF bit set? */
+ if (likely(ctrlc & RTC_CTRL_C_IRQF)) {
+ /*
+ * We need to determine if it was one of the standard
+ * events: PF, AF, or UF. If so, we handle them and
+ * update the RTC core.
+ */
+ if (likely(ctrlc & RTC_CTRL_B_PAU_MASK)) {
+ events = RTC_IRQF;
+
+ /* Check for a periodic interrupt. */
+ if ((ctrlb & RTC_CTRL_B_PIE) &&
+ (ctrlc & RTC_CTRL_C_PF)) {
+ events |= RTC_PF;
+ num_irqs++;
+ }
+
+ /* Check for an alarm interrupt. */
+ if ((ctrlb & RTC_CTRL_B_AIE) &&
+ (ctrlc & RTC_CTRL_C_AF)) {
+ events |= RTC_AF;
+ num_irqs++;
+ }
+
+ /* Check for an update interrupt. */
+ if ((ctrlb & RTC_CTRL_B_UIE) &&
+ (ctrlc & RTC_CTRL_C_UF)) {
+ events |= RTC_UF;
+ num_irqs++;
+ }
+ } else {
+ /*
+ * One of the "extended" interrupts was received that
+ * is not recognized by the RTC core.
+ */
+ ds1685_rtc_extended_irq(rtc, pdev);
+ }
+ }
+ rtc_update_irq(rtc->dev, num_irqs, events);
mutex_unlock(rtc_mutex);
+
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
/* ----------------------------------------------------------------------- */
@@ -869,11 +816,15 @@ static int ds1685_nvram_read(void *priv, unsigned int pos, void *val,
size_t size)
{
struct ds1685_priv *rtc = priv;
+ struct mutex *rtc_mutex = &rtc->dev->ops_lock;
ssize_t count;
- unsigned long flags = 0;
u8 *buf = val;
+ int err;
+
+ err = mutex_lock_interruptible(rtc_mutex);
+ if (err)
+ return err;
- spin_lock_irqsave(&rtc->lock, flags);
ds1685_rtc_switch_to_bank0(rtc);
/* Read NVRAM in time and bank0 registers. */
@@ -923,7 +874,7 @@ static int ds1685_nvram_read(void *priv, unsigned int pos, void *val,
ds1685_rtc_switch_to_bank0(rtc);
}
#endif /* !CONFIG_RTC_DRV_DS1689 */
- spin_unlock_irqrestore(&rtc->lock, flags);
+ mutex_unlock(rtc_mutex);
return 0;
}
@@ -932,11 +883,15 @@ static int ds1685_nvram_write(void *priv, unsigned int pos, void *val,
size_t size)
{
struct ds1685_priv *rtc = priv;
+ struct mutex *rtc_mutex = &rtc->dev->ops_lock;
ssize_t count;
- unsigned long flags = 0;
u8 *buf = val;
+ int err;
+
+ err = mutex_lock_interruptible(rtc_mutex);
+ if (err)
+ return err;
- spin_lock_irqsave(&rtc->lock, flags);
ds1685_rtc_switch_to_bank0(rtc);
/* Write NVRAM in time and bank0 registers. */
@@ -986,7 +941,7 @@ static int ds1685_nvram_write(void *priv, unsigned int pos, void *val,
ds1685_rtc_switch_to_bank0(rtc);
}
#endif /* !CONFIG_RTC_DRV_DS1689 */
- spin_unlock_irqrestore(&rtc->lock, flags);
+ mutex_unlock(rtc_mutex);
return 0;
}
@@ -1004,7 +959,7 @@ static ssize_t
ds1685_rtc_sysfs_battery_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev->parent);
u8 ctrld;
ctrld = rtc->read(rtc, RTC_CTRL_D);
@@ -1024,7 +979,7 @@ static ssize_t
ds1685_rtc_sysfs_auxbatt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev->parent);
u8 ctrl4a;
ds1685_rtc_switch_to_bank1(rtc);
@@ -1046,7 +1001,7 @@ static ssize_t
ds1685_rtc_sysfs_serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev->parent);
u8 ssn[8];
ds1685_rtc_switch_to_bank1(rtc);
@@ -1177,9 +1132,7 @@ ds1685_rtc_probe(struct platform_device *pdev)
if (pdata->plat_post_ram_clear)
rtc->post_ram_clear = pdata->plat_post_ram_clear;
- /* Init the spinlock, workqueue, & set the driver data. */
- spin_lock_init(&rtc->lock);
- INIT_WORK(&rtc->work, ds1685_rtc_work_queue);
+ /* set the driver data. */
platform_set_drvdata(pdev, rtc);
/* Turn the oscillator on if is not already on (DV1 = 1). */
@@ -1335,22 +1288,23 @@ ds1685_rtc_probe(struct platform_device *pdev)
*/
if (!pdata->no_irq) {
ret = platform_get_irq(pdev, 0);
- if (ret > 0) {
- rtc->irq_num = ret;
-
- /* Request an IRQ. */
- ret = devm_request_irq(&pdev->dev, rtc->irq_num,
- ds1685_rtc_irq_handler,
- IRQF_SHARED, pdev->name, pdev);
-
- /* Check to see if something came back. */
- if (unlikely(ret)) {
- dev_warn(&pdev->dev,
- "RTC interrupt not available\n");
- rtc->irq_num = 0;
- }
- } else
+ if (ret <= 0)
return ret;
+
+ rtc->irq_num = ret;
+
+ /* Request an IRQ. */
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq_num,
+ NULL, ds1685_rtc_irq_handler,
+ IRQF_SHARED | IRQF_ONESHOT,
+ pdev->name, pdev);
+
+ /* Check to see if something came back. */
+ if (unlikely(ret)) {
+ dev_warn(&pdev->dev,
+ "RTC interrupt not available\n");
+ rtc->irq_num = 0;
+ }
}
rtc->no_irq = pdata->no_irq;
@@ -1397,8 +1351,6 @@ ds1685_rtc_remove(struct platform_device *pdev)
(rtc->read(rtc, RTC_EXT_CTRL_4A) &
~(RTC_CTRL_4A_RWK_MASK)));
- cancel_work_sync(&rtc->work);
-
return 0;
}
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index b886b6a5c178..1e9f429ada64 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -29,14 +23,6 @@
#define DS2404_COPY_SCRATCHPAD_CMD 0x55
#define DS2404_READ_MEMORY_CMD 0xf0
-struct ds2404;
-
-struct ds2404_chip_ops {
- int (*map_io)(struct ds2404 *chip, struct platform_device *pdev,
- struct ds2404_platform_data *pdata);
- void (*unmap_io)(struct ds2404 *chip);
-};
-
#define DS2404_RST 0
#define DS2404_CLK 1
#define DS2404_DQ 2
@@ -48,7 +34,6 @@ struct ds2404_gpio {
struct ds2404 {
struct ds2404_gpio *gpio;
- const struct ds2404_chip_ops *ops;
struct rtc_device *rtc;
};
@@ -87,7 +72,7 @@ err_request:
return err;
}
-static void ds2404_gpio_unmap(struct ds2404 *chip)
+static void ds2404_gpio_unmap(void *data)
{
int i;
@@ -95,11 +80,6 @@ static void ds2404_gpio_unmap(struct ds2404 *chip)
gpio_free(ds2404_gpio[i].gpio);
}
-static const struct ds2404_chip_ops ds2404_gpio_ops = {
- .map_io = ds2404_gpio_map,
- .unmap_io = ds2404_gpio_unmap,
-};
-
static void ds2404_reset(struct device *dev)
{
gpio_set_value(ds2404_gpio[DS2404_RST].gpio, 0);
@@ -206,20 +186,20 @@ static int ds2404_read_time(struct device *dev, struct rtc_time *dt)
ds2404_read_memory(dev, 0x203, 4, (u8 *)&time);
time = le32_to_cpu(time);
- rtc_time_to_tm(time, dt);
+ rtc_time64_to_tm(time, dt);
return 0;
}
-static int ds2404_set_mmss(struct device *dev, unsigned long secs)
+static int ds2404_set_time(struct device *dev, struct rtc_time *dt)
{
- u32 time = cpu_to_le32(secs);
+ u32 time = cpu_to_le32(rtc_tm_to_time64(dt));
ds2404_write_memory(dev, 0x203, 4, (u8 *)&time);
return 0;
}
static const struct rtc_class_ops ds2404_rtc_ops = {
.read_time = ds2404_read_time,
- .set_mmss = ds2404_set_mmss,
+ .set_time = ds2404_set_time,
};
static int rtc_probe(struct platform_device *pdev)
@@ -232,11 +212,17 @@ static int rtc_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- chip->ops = &ds2404_gpio_ops;
+ chip->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(chip->rtc))
+ return PTR_ERR(chip->rtc);
- retval = chip->ops->map_io(chip, pdev, pdata);
+ retval = ds2404_gpio_map(chip, pdev, pdata);
if (retval)
- goto err_chip;
+ return retval;
+
+ retval = devm_add_action_or_reset(&pdev->dev, ds2404_gpio_unmap, chip);
+ if (retval)
+ return retval;
dev_info(&pdev->dev, "using GPIOs RST:%d, CLK:%d, DQ:%d\n",
chip->gpio[DS2404_RST].gpio, chip->gpio[DS2404_CLK].gpio,
@@ -244,34 +230,19 @@ static int rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
- chip->rtc = devm_rtc_device_register(&pdev->dev, "ds2404",
- &ds2404_rtc_ops, THIS_MODULE);
- if (IS_ERR(chip->rtc)) {
- retval = PTR_ERR(chip->rtc);
- goto err_io;
- }
+ chip->rtc->ops = &ds2404_rtc_ops;
+ chip->rtc->range_max = U32_MAX;
- ds2404_enable_osc(&pdev->dev);
- return 0;
-
-err_io:
- chip->ops->unmap_io(chip);
-err_chip:
- return retval;
-}
-
-static int rtc_remove(struct platform_device *dev)
-{
- struct ds2404 *chip = platform_get_drvdata(dev);
-
- chip->ops->unmap_io(chip);
+ retval = rtc_register_device(chip->rtc);
+ if (retval)
+ return retval;
+ ds2404_enable_osc(&pdev->dev);
return 0;
}
static struct platform_driver rtc_device_driver = {
.probe = rtc_probe,
- .remove = rtc_remove,
.driver = {
.name = "ds2404",
},
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 7184e5145f12..1e9312f96021 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -48,6 +48,10 @@
# define DS3232_REG_SR_A1F 0x01
#define DS3232_REG_TEMPERATURE 0x11
+#define DS3232_REG_SRAM_START 0x14
+#define DS3232_REG_SRAM_END 0xFF
+
+#define DS3232_REG_SRAM_SIZE 236
struct ds3232 {
struct device *dev;
@@ -461,11 +465,39 @@ static const struct rtc_class_ops ds3232_rtc_ops = {
.alarm_irq_enable = ds3232_alarm_irq_enable,
};
+static int ds3232_nvmem_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct regmap *ds3232_regmap = (struct regmap *)priv;
+
+ return regmap_bulk_read(ds3232_regmap, DS3232_REG_SRAM_START + offset,
+ val, bytes);
+}
+
+static int ds3232_nvmem_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct regmap *ds3232_regmap = (struct regmap *)priv;
+
+ return regmap_bulk_write(ds3232_regmap, DS3232_REG_SRAM_START + offset,
+ val, bytes);
+}
+
static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
const char *name)
{
struct ds3232 *ds3232;
int ret;
+ struct nvmem_config nvmem_cfg = {
+ .name = "ds3232_sram",
+ .stride = 1,
+ .size = DS3232_REG_SRAM_SIZE,
+ .word_size = 1,
+ .reg_read = ds3232_nvmem_read,
+ .reg_write = ds3232_nvmem_write,
+ .priv = regmap,
+ .type = NVMEM_TYPE_BATTERY_BACKED
+ };
ds3232 = devm_kzalloc(dev, sizeof(*ds3232), GFP_KERNEL);
if (!ds3232)
@@ -490,6 +522,10 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
if (IS_ERR(ds3232->rtc))
return PTR_ERR(ds3232->rtc);
+ ret = rtc_nvmem_register(ds3232->rtc, &nvmem_cfg);
+ if(ret)
+ return ret;
+
if (ds3232->irq > 0) {
ret = devm_request_threaded_irq(dev, ds3232->irq, NULL,
ds3232_irq,
@@ -542,7 +578,7 @@ static int ds3232_i2c_probe(struct i2c_client *client,
static const struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0x13,
+ .max_register = DS3232_REG_SRAM_END,
};
regmap = devm_regmap_init_i2c(client, &config);
@@ -609,7 +645,7 @@ static int ds3234_probe(struct spi_device *spi)
static const struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0x13,
+ .max_register = DS3232_REG_SRAM_END,
.write_flag_mask = 0x80,
};
struct regmap *regmap;
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 1932a4f861d1..1766496385fe 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* A driver for the RTC embedded in the Cirrus Logic EP93XX processors
* Copyright (c) 2006 Tower Technologies
*
* Author: Alessandro Zummo <a.zummo@towertech.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -18,27 +15,23 @@
#define EP93XX_RTC_DATA 0x000
#define EP93XX_RTC_MATCH 0x004
#define EP93XX_RTC_STATUS 0x008
-#define EP93XX_RTC_STATUS_INTR (1<<0)
+#define EP93XX_RTC_STATUS_INTR BIT(0)
#define EP93XX_RTC_LOAD 0x00C
#define EP93XX_RTC_CONTROL 0x010
-#define EP93XX_RTC_CONTROL_MIE (1<<0)
+#define EP93XX_RTC_CONTROL_MIE BIT(0)
#define EP93XX_RTC_SWCOMP 0x108
#define EP93XX_RTC_SWCOMP_DEL_MASK 0x001f0000
#define EP93XX_RTC_SWCOMP_DEL_SHIFT 16
#define EP93XX_RTC_SWCOMP_INT_MASK 0x0000ffff
#define EP93XX_RTC_SWCOMP_INT_SHIFT 0
-/*
- * struct device dev.platform_data is used to store our private data
- * because struct rtc_device does not have a variable to hold it.
- */
struct ep93xx_rtc {
void __iomem *mmio_base;
struct rtc_device *rtc;
};
static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
- unsigned short *delete)
+ unsigned short *delete)
{
struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
unsigned long comp;
@@ -63,13 +56,14 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
time = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA);
- rtc_time_to_tm(time, tm);
+ rtc_time64_to_tm(time, tm);
return 0;
}
-static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs)
+static int ep93xx_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
+ unsigned long secs = rtc_tm_to_time64(tm);
writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD);
return 0;
@@ -89,31 +83,31 @@ static int ep93xx_rtc_proc(struct device *dev, struct seq_file *seq)
static const struct rtc_class_ops ep93xx_rtc_ops = {
.read_time = ep93xx_rtc_read_time,
- .set_mmss = ep93xx_rtc_set_mmss,
+ .set_time = ep93xx_rtc_set_time,
.proc = ep93xx_rtc_proc,
};
-static ssize_t ep93xx_rtc_show_comp_preload(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t comp_preload_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned short preload;
- ep93xx_rtc_get_swcomp(dev, &preload, NULL);
+ ep93xx_rtc_get_swcomp(dev->parent, &preload, NULL);
return sprintf(buf, "%d\n", preload);
}
-static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_rtc_show_comp_preload, NULL);
+static DEVICE_ATTR_RO(comp_preload);
-static ssize_t ep93xx_rtc_show_comp_delete(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t comp_delete_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned short delete;
- ep93xx_rtc_get_swcomp(dev, NULL, &delete);
+ ep93xx_rtc_get_swcomp(dev->parent, NULL, &delete);
return sprintf(buf, "%d\n", delete);
}
-static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_rtc_show_comp_delete, NULL);
+static DEVICE_ATTR_RO(comp_delete);
static struct attribute *ep93xx_rtc_attrs[] = {
&dev_attr_comp_preload.attr,
@@ -140,33 +134,20 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
if (IS_ERR(ep93xx_rtc->mmio_base))
return PTR_ERR(ep93xx_rtc->mmio_base);
- pdev->dev.platform_data = ep93xx_rtc;
platform_set_drvdata(pdev, ep93xx_rtc);
- ep93xx_rtc->rtc = devm_rtc_device_register(&pdev->dev,
- pdev->name, &ep93xx_rtc_ops, THIS_MODULE);
- if (IS_ERR(ep93xx_rtc->rtc)) {
- err = PTR_ERR(ep93xx_rtc->rtc);
- goto exit;
- }
+ ep93xx_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(ep93xx_rtc->rtc))
+ return PTR_ERR(ep93xx_rtc->rtc);
- err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
- if (err)
- goto exit;
-
- return 0;
+ ep93xx_rtc->rtc->ops = &ep93xx_rtc_ops;
+ ep93xx_rtc->rtc->range_max = U32_MAX;
-exit:
- pdev->dev.platform_data = NULL;
- return err;
-}
-
-static int ep93xx_rtc_remove(struct platform_device *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
- pdev->dev.platform_data = NULL;
+ err = rtc_add_group(ep93xx_rtc->rtc, &ep93xx_rtc_sysfs_files);
+ if (err)
+ return err;
- return 0;
+ return rtc_register_device(ep93xx_rtc->rtc);
}
static struct platform_driver ep93xx_rtc_driver = {
@@ -174,7 +155,6 @@ static struct platform_driver ep93xx_rtc_driver = {
.name = "ep93xx-rtc",
},
.probe = ep93xx_rtc_probe,
- .remove = ep93xx_rtc_remove,
};
module_platform_driver(ep93xx_rtc_driver);
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index a1c44d0c8557..1a3420ee6a4d 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -1,23 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/* drivers/rtc/rtc-goldfish.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2017 Imagination Technologies Ltd.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
-#include <linux/io.h>
#define TIMER_TIME_LOW 0x00 /* get low bits of current time */
/* and update TIMER_TIME_HIGH */
@@ -56,7 +48,7 @@ static int goldfish_rtc_read_alarm(struct device *dev,
do_div(rtc_alarm, NSEC_PER_SEC);
memset(alrm, 0, sizeof(struct rtc_wkalrm));
- rtc_time_to_tm(rtc_alarm, &alrm->time);
+ rtc_time64_to_tm(rtc_alarm, &alrm->time);
if (readl(base + TIMER_ALARM_STATUS))
alrm->enabled = 1;
@@ -70,21 +62,15 @@ static int goldfish_rtc_set_alarm(struct device *dev,
struct rtc_wkalrm *alrm)
{
struct goldfish_rtc *rtcdrv;
- unsigned long rtc_alarm;
u64 rtc_alarm64;
u64 rtc_status_reg;
void __iomem *base;
- int ret = 0;
rtcdrv = dev_get_drvdata(dev);
base = rtcdrv->base;
if (alrm->enabled) {
- ret = rtc_tm_to_time(&alrm->time, &rtc_alarm);
- if (ret != 0)
- return ret;
-
- rtc_alarm64 = rtc_alarm * NSEC_PER_SEC;
+ rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC;
writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
writel(rtc_alarm64, base + TIMER_ALARM_LOW);
} else {
@@ -98,7 +84,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
writel(1, base + TIMER_CLEAR_ALARM);
}
- return ret;
+ return 0;
}
static int goldfish_rtc_alarm_irq_enable(struct device *dev,
@@ -147,7 +133,7 @@ static int goldfish_rtc_read_time(struct device *dev, struct rtc_time *tm)
do_div(time, NSEC_PER_SEC);
- rtc_time_to_tm(time, tm);
+ rtc_time64_to_tm(time, tm);
return 0;
}
@@ -156,21 +142,16 @@ static int goldfish_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct goldfish_rtc *rtcdrv;
void __iomem *base;
- unsigned long now;
u64 now64;
- int ret;
rtcdrv = dev_get_drvdata(dev);
base = rtcdrv->base;
- ret = rtc_tm_to_time(tm, &now);
- if (ret == 0) {
- now64 = now * NSEC_PER_SEC;
- writel((now64 >> 32), base + TIMER_TIME_HIGH);
- writel(now64, base + TIMER_TIME_LOW);
- }
+ now64 = rtc_tm_to_time64(tm) * NSEC_PER_SEC;
+ writel((now64 >> 32), base + TIMER_TIME_HIGH);
+ writel(now64, base + TIMER_TIME_LOW);
- return ret;
+ return 0;
}
static const struct rtc_class_ops goldfish_rtc_ops = {
@@ -205,19 +186,20 @@ static int goldfish_rtc_probe(struct platform_device *pdev)
if (rtcdrv->irq < 0)
return -ENODEV;
- rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &goldfish_rtc_ops,
- THIS_MODULE);
+ rtcdrv->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtcdrv->rtc))
return PTR_ERR(rtcdrv->rtc);
+ rtcdrv->rtc->ops = &goldfish_rtc_ops;
+ rtcdrv->rtc->range_max = U64_MAX / NSEC_PER_SEC;
+
err = devm_request_irq(&pdev->dev, rtcdrv->irq,
goldfish_rtc_interrupt,
0, pdev->name, rtcdrv);
if (err)
return err;
- return 0;
+ return rtc_register_device(rtcdrv->rtc);
}
static const struct of_device_id goldfish_rtc_of_match[] = {
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 3e1abb455472..f27c40e8331f 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -205,8 +205,7 @@ static int hid_time_parse_report(struct platform_device *pdev,
static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned long flags;
- struct hid_time_state *time_state =
- platform_get_drvdata(to_platform_device(dev));
+ struct hid_time_state *time_state = dev_get_drvdata(dev);
int ret;
reinit_completion(&time_state->comp_last_time);
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 80931114c899..3f3d652a0b0f 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -1,20 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2010 Orex Computed Radiography
*/
/*
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-/* based on rtc-mc13892.c */
-
-/*
* This driver uses the 47-bit 32 kHz counter in the Freescale DryIce block
* to implement a Linux RTC. Times and alarms are truncated to seconds.
* Since the RTC framework performs API locking via rtc->ops_lock the
@@ -552,7 +542,7 @@ static int dryice_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned long now;
now = readl(imxdi->ioaddr + DTCMR);
- rtc_time_to_tm(now, tm);
+ rtc_time64_to_tm(now, tm);
return 0;
}
@@ -561,7 +551,7 @@ static int dryice_rtc_read_time(struct device *dev, struct rtc_time *tm)
* set the seconds portion of dryice time counter and clear the
* fractional part.
*/
-static int dryice_rtc_set_mmss(struct device *dev, unsigned long secs)
+static int dryice_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct imxdi_dev *imxdi = dev_get_drvdata(dev);
u32 dcr, dsr;
@@ -588,7 +578,7 @@ static int dryice_rtc_set_mmss(struct device *dev, unsigned long secs)
if (rc != 0)
return rc;
- rc = di_write_wait(imxdi, secs, DTCMR);
+ rc = di_write_wait(imxdi, rtc_tm_to_time64(tm), DTCMR);
if (rc != 0)
return rc;
@@ -618,7 +608,7 @@ static int dryice_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
u32 dcamr;
dcamr = readl(imxdi->ioaddr + DCAMR);
- rtc_time_to_tm(dcamr, &alarm->time);
+ rtc_time64_to_tm(dcamr, &alarm->time);
/* alarm is enabled if the interrupt is enabled */
alarm->enabled = (readl(imxdi->ioaddr + DIER) & DIER_CAIE) != 0;
@@ -640,21 +630,10 @@ static int dryice_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int dryice_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct imxdi_dev *imxdi = dev_get_drvdata(dev);
- unsigned long now;
- unsigned long alarm_time;
int rc;
- rc = rtc_tm_to_time(&alarm->time, &alarm_time);
- if (rc)
- return rc;
-
- /* don't allow setting alarm in the past */
- now = readl(imxdi->ioaddr + DTCMR);
- if (alarm_time < now)
- return -EINVAL;
-
/* write the new alarm time */
- rc = di_write_wait(imxdi, (u32)alarm_time, DCAMR);
+ rc = di_write_wait(imxdi, rtc_tm_to_time64(&alarm->time), DCAMR);
if (rc)
return rc;
@@ -668,7 +647,7 @@ static int dryice_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static const struct rtc_class_ops dryice_rtc_ops = {
.read_time = dryice_rtc_read_time,
- .set_mmss = dryice_rtc_set_mmss,
+ .set_time = dryice_rtc_set_time,
.alarm_irq_enable = dryice_rtc_alarm_irq_enable,
.read_alarm = dryice_rtc_read_alarm,
.set_alarm = dryice_rtc_set_alarm,
@@ -796,6 +775,10 @@ static int __init dryice_rtc_probe(struct platform_device *pdev)
mutex_init(&imxdi->write_mutex);
+ imxdi->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(imxdi->rtc))
+ return PTR_ERR(imxdi->rtc);
+
imxdi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(imxdi->clk))
return PTR_ERR(imxdi->clk);
@@ -829,12 +812,13 @@ static int __init dryice_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, imxdi);
- imxdi->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &dryice_rtc_ops, THIS_MODULE);
- if (IS_ERR(imxdi->rtc)) {
- rc = PTR_ERR(imxdi->rtc);
+
+ imxdi->rtc->ops = &dryice_rtc_ops;
+ imxdi->rtc->range_max = U32_MAX;
+
+ rc = rtc_register_device(imxdi->rtc);
+ if (rc)
goto err;
- }
return 0;
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index d0a891777f44..9e7b3a04debc 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
* JZ4740 SoC RTC driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/clk.h>
@@ -20,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/reboot.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -156,6 +148,9 @@ static int jz4740_rtc_read_time(struct device *dev, struct rtc_time *time)
uint32_t secs, secs2;
int timeout = 5;
+ if (jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SCRATCHPAD) != 0x12345678)
+ return -EINVAL;
+
/* If the seconds register is read while it is updated, it can contain a
* bogus value. This can be avoided by making sure that two consecutive
* reads have the same value.
@@ -171,16 +166,21 @@ static int jz4740_rtc_read_time(struct device *dev, struct rtc_time *time)
if (timeout == 0)
return -EIO;
- rtc_time_to_tm(secs, time);
+ rtc_time64_to_tm(secs, time);
return 0;
}
-static int jz4740_rtc_set_mmss(struct device *dev, unsigned long secs)
+static int jz4740_rtc_set_time(struct device *dev, struct rtc_time *time)
{
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+ int ret;
- return jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, secs);
+ ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, rtc_tm_to_time64(time));
+ if (ret)
+ return ret;
+
+ return jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
}
static int jz4740_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -196,18 +196,16 @@ static int jz4740_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = !!(ctrl & JZ_RTC_CTRL_AE);
alrm->pending = !!(ctrl & JZ_RTC_CTRL_AF);
- rtc_time_to_tm(secs, &alrm->time);
+ rtc_time64_to_tm(secs, &alrm->time);
- return rtc_valid_tm(&alrm->time);
+ return 0;
}
static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
int ret;
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
- unsigned long secs;
-
- rtc_tm_to_time(&alrm->time, &secs);
+ uint32_t secs = lower_32_bits(rtc_tm_to_time64(&alrm->time));
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC_ALARM, secs);
if (!ret)
@@ -225,7 +223,7 @@ static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
static const struct rtc_class_ops jz4740_rtc_ops = {
.read_time = jz4740_rtc_read_time,
- .set_mmss = jz4740_rtc_set_mmss,
+ .set_time = jz4740_rtc_set_time,
.read_alarm = jz4740_rtc_read_alarm,
.set_alarm = jz4740_rtc_set_alarm,
.alarm_irq_enable = jz4740_rtc_alarm_irq_enable,
@@ -309,7 +307,6 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_rtc *rtc;
- uint32_t scratchpad;
struct resource *mem;
const struct platform_device_id *id = platform_get_device_id(pdev);
const struct of_device_id *of_id = of_match_device(
@@ -348,10 +345,24 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &jz4740_rtc_ops, THIS_MODULE);
+ ret = dev_pm_set_wake_irq(&pdev->dev, rtc->irq);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set wake irq: %d\n", ret);
+ return ret;
+ }
+
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
+ dev_err(&pdev->dev, "Failed to allocate rtc device: %d\n", ret);
+ return ret;
+ }
+
+ rtc->rtc->ops = &jz4740_rtc_ops;
+ rtc->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(rtc->rtc);
+ if (ret) {
dev_err(&pdev->dev, "Failed to register rtc device: %d\n", ret);
return ret;
}
@@ -363,16 +374,6 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
return ret;
}
- scratchpad = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SCRATCHPAD);
- if (scratchpad != 0x12345678) {
- ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
- ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0);
- if (ret) {
- dev_err(&pdev->dev, "Could not write to RTC registers\n");
- return ret;
- }
- }
-
if (np && of_device_is_system_power_controller(np)) {
if (!pm_power_off) {
/* Default: 60ms */
@@ -397,35 +398,6 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int jz4740_rtc_suspend(struct device *dev)
-{
- struct jz4740_rtc *rtc = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(rtc->irq);
- return 0;
-}
-
-static int jz4740_rtc_resume(struct device *dev)
-{
- struct jz4740_rtc *rtc = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(rtc->irq);
- return 0;
-}
-
-static const struct dev_pm_ops jz4740_pm_ops = {
- .suspend = jz4740_rtc_suspend,
- .resume = jz4740_rtc_resume,
-};
-#define JZ4740_RTC_PM_OPS (&jz4740_pm_ops)
-
-#else
-#define JZ4740_RTC_PM_OPS NULL
-#endif /* CONFIG_PM */
-
static const struct platform_device_id jz4740_rtc_ids[] = {
{ "jz4740-rtc", ID_JZ4740 },
{ "jz4780-rtc", ID_JZ4780 },
@@ -437,7 +409,6 @@ static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.driver = {
.name = "jz4740-rtc",
- .pm = JZ4740_RTC_PM_OPS,
.of_match_table = of_match_ptr(jz4740_rtc_of_match),
},
.id_table = jz4740_rtc_ids,
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 910e600275b9..ac393230e592 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
@@ -47,8 +39,6 @@
#define LPC32XX_RTC_KEY_ONSW_LOADVAL 0xB5C13F27
-#define RTC_NAME "rtc-lpc32xx"
-
#define rtc_readl(dev, reg) \
__raw_readl((dev)->rtc_base + (reg))
#define rtc_writel(dev, reg, val) \
@@ -68,14 +58,15 @@ static int lpc32xx_rtc_read_time(struct device *dev, struct rtc_time *time)
struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
elapsed_sec = rtc_readl(rtc, LPC32XX_RTC_UCOUNT);
- rtc_time_to_tm(elapsed_sec, time);
+ rtc_time64_to_tm(elapsed_sec, time);
return 0;
}
-static int lpc32xx_rtc_set_mmss(struct device *dev, unsigned long secs)
+static int lpc32xx_rtc_set_time(struct device *dev, struct rtc_time *time)
{
struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+ u32 secs = rtc_tm_to_time64(time);
u32 tmp;
spin_lock_irq(&rtc->lock);
@@ -97,7 +88,7 @@ static int lpc32xx_rtc_read_alarm(struct device *dev,
{
struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
- rtc_time_to_tm(rtc_readl(rtc, LPC32XX_RTC_MATCH0), &wkalrm->time);
+ rtc_time64_to_tm(rtc_readl(rtc, LPC32XX_RTC_MATCH0), &wkalrm->time);
wkalrm->enabled = rtc->alarm_enabled;
wkalrm->pending = !!(rtc_readl(rtc, LPC32XX_RTC_INTSTAT) &
LPC32XX_RTC_INTSTAT_MATCH0);
@@ -111,13 +102,8 @@ static int lpc32xx_rtc_set_alarm(struct device *dev,
struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
unsigned long alarmsecs;
u32 tmp;
- int ret;
- ret = rtc_tm_to_time(&wkalrm->time, &alarmsecs);
- if (ret < 0) {
- dev_warn(dev, "Failed to convert time: %d\n", ret);
- return ret;
- }
+ alarmsecs = rtc_tm_to_time64(&wkalrm->time);
spin_lock_irq(&rtc->lock);
@@ -191,7 +177,7 @@ static irqreturn_t lpc32xx_rtc_alarm_interrupt(int irq, void *dev)
static const struct rtc_class_ops lpc32xx_rtc_ops = {
.read_time = lpc32xx_rtc_read_time,
- .set_mmss = lpc32xx_rtc_set_mmss,
+ .set_time = lpc32xx_rtc_set_time,
.read_alarm = lpc32xx_rtc_read_alarm,
.set_alarm = lpc32xx_rtc_set_alarm,
.alarm_irq_enable = lpc32xx_rtc_alarm_irq_enable,
@@ -201,21 +187,13 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct lpc32xx_rtc *rtc;
- int rtcirq;
+ int err;
u32 tmp;
- rtcirq = platform_get_irq(pdev, 0);
- if (rtcirq < 0) {
- dev_warn(&pdev->dev, "Can't get interrupt resource\n");
- rtcirq = -1;
- }
-
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (unlikely(!rtc))
return -ENOMEM;
- rtc->irq = rtcirq;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rtc->rtc_base))
@@ -256,18 +234,25 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- rtc->rtc = devm_rtc_device_register(&pdev->dev, RTC_NAME,
- &lpc32xx_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtc)) {
- dev_err(&pdev->dev, "Can't get RTC\n");
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc))
return PTR_ERR(rtc->rtc);
- }
+
+ rtc->rtc->ops = &lpc32xx_rtc_ops;
+ rtc->rtc->range_max = U32_MAX;
+
+ err = rtc_register_device(rtc->rtc);
+ if (err)
+ return err;
/*
* IRQ is enabled after device registration in case alarm IRQ
* is pending upon suspend exit.
*/
- if (rtc->irq >= 0) {
+ rtc->irq = platform_get_irq(pdev, 0);
+ if (rtc->irq < 0) {
+ dev_warn(&pdev->dev, "Can't get interrupt resource\n");
+ } else {
if (devm_request_irq(&pdev->dev, rtc->irq,
lpc32xx_rtc_alarm_interrupt,
0, pdev->name, rtc) < 0) {
@@ -374,7 +359,7 @@ static struct platform_driver lpc32xx_rtc_driver = {
.probe = lpc32xx_rtc_probe,
.remove = lpc32xx_rtc_remove,
.driver = {
- .name = RTC_NAME,
+ .name = "rtc-lpc32xx",
.pm = LPC32XX_RTC_PM_OPS,
.of_match_table = of_match_ptr(lpc32xx_rtc_match),
},
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index ebf50b1540f2..dd5a8991f75b 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -802,7 +802,7 @@ static int wdt_open(struct inode *inode, struct file *file)
*/
wdt_is_open = 1;
mutex_unlock(&m41t80_rtc_mutex);
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
return -ENODEV;
}
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 0fa33708fc49..afce2c0b4bd6 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Real Time Clock driver for Freescale MC13XXX PMIC
*
* (C) 2009 Sascha Hauer, Pengutronix
* (C) 2009 Uwe Kleine-Koenig, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/mfd/mc13xxx.h>
@@ -89,14 +86,14 @@ static int mc13xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-static int mc13xxx_rtc_set_mmss(struct device *dev, time64_t secs)
+static int mc13xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
unsigned int seconds, days;
unsigned int alarmseconds;
int ret;
- days = div_s64_rem(secs, SEC_PER_DAY, &seconds);
+ days = div_s64_rem(rtc_tm_to_time64(tm), SEC_PER_DAY, &seconds);
mc13xxx_lock(priv->mc13xxx);
@@ -158,7 +155,7 @@ out:
static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
- unsigned seconds, days;
+ unsigned int seconds, days;
time64_t s1970;
int enabled, pending;
int ret;
@@ -253,7 +250,7 @@ static irqreturn_t mc13xxx_rtc_alarm_handler(int irq, void *dev)
static const struct rtc_class_ops mc13xxx_rtc_ops = {
.read_time = mc13xxx_rtc_read_time,
- .set_mmss64 = mc13xxx_rtc_set_mmss,
+ .set_time = mc13xxx_rtc_set_time,
.read_alarm = mc13xxx_rtc_read_alarm,
.set_alarm = mc13xxx_rtc_set_alarm,
.alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable,
@@ -285,8 +282,15 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
priv->mc13xxx = mc13xxx;
priv->valid = 1;
+ priv->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(priv->rtc))
+ return PTR_ERR(priv->rtc);
platform_set_drvdata(pdev, priv);
+ priv->rtc->ops = &mc13xxx_rtc_ops;
+ /* 15bit days + hours, minutes, seconds */
+ priv->rtc->range_max = (timeu64_t)(1 << 15) * SEC_PER_DAY - 1;
+
mc13xxx_lock(mc13xxx);
mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_RTCRST);
@@ -303,8 +307,9 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
mc13xxx_unlock(mc13xxx);
- priv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &mc13xxx_rtc_ops, THIS_MODULE);
+ ret = rtc_register_device(priv->rtc);
+ if (ret)
+ goto err_irq_request;
return 0;
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index e9a25ec4d434..c06cf5202e02 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -343,7 +343,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
rtc->irq, ret);
- goto out_dispose_irq;
+ return ret;
}
device_init_wakeup(&pdev->dev, 1);
@@ -359,9 +359,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
return 0;
out_free_irq:
- free_irq(rtc->irq, rtc->rtc_dev);
-out_dispose_irq:
- irq_dispose_mapping(rtc->irq);
+ free_irq(rtc->irq, rtc);
return ret;
}
@@ -369,8 +367,7 @@ static int mtk_rtc_remove(struct platform_device *pdev)
{
struct mt6397_rtc *rtc = platform_get_drvdata(pdev);
- free_irq(rtc->irq, rtc->rtc_dev);
- irq_dispose_mapping(rtc->irq);
+ free_irq(rtc->irq, rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index e7f14bd12fe3..ab9db57a6834 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the RTC in Marvell SoCs.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
@@ -60,7 +57,7 @@ static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_reg = (bin2bcd(tm->tm_mday) << RTC_MDAY_OFFS) |
(bin2bcd(tm->tm_mon + 1) << RTC_MONTH_OFFS) |
- (bin2bcd(tm->tm_year % 100) << RTC_YEAR_OFFS);
+ (bin2bcd(tm->tm_year - 100) << RTC_YEAR_OFFS);
writel(rtc_reg, ioaddr + RTC_DATE_REG_OFFS);
return 0;
@@ -159,7 +156,7 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
<< RTC_MONTH_OFFS;
if (alm->time.tm_year >= 0)
- rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_year % 100))
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_year - 100))
<< RTC_YEAR_OFFS;
writel(rtc_reg, ioaddr + RTC_ALARM_DATE_REG_OFFS);
@@ -257,15 +254,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdata);
- if (pdata->irq >= 0) {
- device_init_wakeup(&pdev->dev, 1);
- pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &mv_rtc_alarm_ops,
- THIS_MODULE);
- } else {
- pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &mv_rtc_ops, THIS_MODULE);
- }
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(pdata->rtc)) {
ret = PTR_ERR(pdata->rtc);
goto out;
@@ -281,7 +270,19 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
}
}
- return 0;
+ if (pdata->irq >= 0) {
+ device_init_wakeup(&pdev->dev, 1);
+ pdata->rtc->ops = &mv_rtc_alarm_ops;
+ } else {
+ pdata->rtc->ops = &mv_rtc_ops;
+ }
+
+ pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ ret = rtc_register_device(pdata->rtc);
+ if (!ret)
+ return 0;
out:
if (!IS_ERR(pdata->clk))
clk_disable_unprepare(pdata->clk);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 878c6ee82901..e697e96612bb 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -253,20 +254,9 @@ static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
/*
* This function sets the internal RTC time based on tm in Gregorian date.
*/
-static int mxc_rtc_set_mmss(struct device *dev, time64_t time)
+static int mxc_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- /*
- * TTC_DAYR register is 9-bit in MX1 SoC, save time and day of year only
- */
- if (is_imx1_rtc(pdata)) {
- struct rtc_time tm;
-
- rtc_time64_to_tm(time, &tm);
- tm.tm_year = 70;
- time = rtc_tm_to_time64(&tm);
- }
+ time64_t time = rtc_tm_to_time64(tm);
/* Avoid roll-over from reading the different registers */
do {
@@ -310,7 +300,7 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* RTC layer */
static const struct rtc_class_ops mxc_rtc_ops = {
.read_time = mxc_rtc_read_time,
- .set_mmss64 = mxc_rtc_set_mmss,
+ .set_time = mxc_rtc_set_time,
.read_alarm = mxc_rtc_read_alarm,
.set_alarm = mxc_rtc_set_alarm,
.alarm_irq_enable = mxc_rtc_alarm_irq_enable,
@@ -318,7 +308,6 @@ static const struct rtc_class_ops mxc_rtc_ops = {
static int mxc_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct rtc_device *rtc;
struct rtc_plat_data *pdata = NULL;
u32 reg;
@@ -336,11 +325,34 @@ static int mxc_rtc_probe(struct platform_device *pdev)
else
pdata->devtype = pdev->id_entry->driver_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ pdata->rtc = rtc;
+ rtc->ops = &mxc_rtc_ops;
+ if (is_imx1_rtc(pdata)) {
+ struct rtc_time tm;
+
+ /* 9bit days + hours minutes seconds */
+ rtc->range_max = (1 << 9) * 86400 - 1;
+
+ /*
+ * Set the start date as beginning of the current year. This can
+ * be overridden using device tree.
+ */
+ rtc_time64_to_tm(ktime_get_real_seconds(), &tm);
+ rtc->start_secs = mktime64(tm.tm_year, 1, 1, 0, 0, 0);
+ rtc->set_start_time = true;
+ } else {
+ /* 16bit days + hours minutes seconds */
+ rtc->range_max = (1 << 16) * 86400ULL - 1;
+ }
+
pdata->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(pdata->clk_ipg)) {
dev_err(&pdev->dev, "unable to get ipg clock!\n");
@@ -396,17 +408,16 @@ static int mxc_rtc_probe(struct platform_device *pdev)
pdata->irq = -1;
}
- if (pdata->irq >= 0)
+ if (pdata->irq >= 0) {
device_init_wakeup(&pdev->dev, 1);
-
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &mxc_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto exit_put_clk_ref;
+ ret = dev_pm_set_wake_irq(&pdev->dev, pdata->irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to enable irq wake\n");
}
- pdata->rtc = rtc;
+ ret = rtc_register_device(rtc);
+ if (ret)
+ goto exit_put_clk_ref;
return 0;
@@ -428,35 +439,10 @@ static int mxc_rtc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int mxc_rtc_suspend(struct device *dev)
-{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(pdata->irq);
-
- return 0;
-}
-
-static int mxc_rtc_resume(struct device *dev)
-{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(pdata->irq);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(mxc_rtc_pm_ops, mxc_rtc_suspend, mxc_rtc_resume);
-
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
.of_match_table = of_match_ptr(imx_rtc_dt_ids),
- .pm = &mxc_rtc_pm_ops,
},
.id_table = imx_rtc_devtype,
.probe = mxc_rtc_probe,
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index 007879a5042d..5b970a816631 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/rtc.h>
#define SRTC_LPPDR_INIT 0x41736166 /* init for glitch detect */
@@ -305,6 +306,9 @@ static int mxc_rtc_probe(struct platform_device *pdev)
return pdata->irq;
device_init_wakeup(&pdev->dev, 1);
+ ret = dev_pm_set_wake_irq(&pdev->dev, pdata->irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to enable irq wake\n");
ret = clk_prepare_enable(pdata->clk);
if (ret)
@@ -367,30 +371,6 @@ static int mxc_rtc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int mxc_rtc_suspend(struct device *dev)
-{
- struct mxc_rtc_data *pdata = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(pdata->irq);
-
- return 0;
-}
-
-static int mxc_rtc_resume(struct device *dev)
-{
- struct mxc_rtc_data *pdata = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(pdata->irq);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(mxc_rtc_pm_ops, mxc_rtc_suspend, mxc_rtc_resume);
-
static const struct of_device_id mxc_ids[] = {
{ .compatible = "fsl,imx53-rtc", },
{}
@@ -400,7 +380,6 @@ static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc_v2",
.of_match_table = mxc_ids,
- .pm = &mxc_rtc_pm_ops,
},
.probe = mxc_rtc_probe,
.remove = mxc_rtc_remove,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 19b11b824d9c..a2941c875a06 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* TI OMAP Real Time Clock interface for Linux
*
@@ -6,11 +7,6 @@
*
* Copyright (C) 2006 David Brownell (new RTC framework)
* Copyright (C) 2014 Johan Hovold <johan@kernel.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <dt-bindings/gpio/gpio.h>
@@ -271,7 +267,7 @@ static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
}
/* this hardware doesn't support "don't care" alarm fields */
-static int tm2bcd(struct rtc_time *tm)
+static void tm2bcd(struct rtc_time *tm)
{
tm->tm_sec = bin2bcd(tm->tm_sec);
tm->tm_min = bin2bcd(tm->tm_min);
@@ -279,13 +275,7 @@ static int tm2bcd(struct rtc_time *tm)
tm->tm_mday = bin2bcd(tm->tm_mday);
tm->tm_mon = bin2bcd(tm->tm_mon + 1);
-
- /* epoch == 1900 */
- if (tm->tm_year < 100 || tm->tm_year > 199)
- return -EINVAL;
tm->tm_year = bin2bcd(tm->tm_year - 100);
-
- return 0;
}
static void bcd2tm(struct rtc_time *tm)
@@ -328,8 +318,7 @@ static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct omap_rtc *rtc = dev_get_drvdata(dev);
- if (tm2bcd(tm) < 0)
- return -EINVAL;
+ tm2bcd(tm);
local_irq_disable();
rtc_wait_not_busy(rtc);
@@ -378,8 +367,7 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
struct omap_rtc *rtc = dev_get_drvdata(dev);
u8 reg, irqwake_reg = 0;
- if (tm2bcd(&alm->time) < 0)
- return -EINVAL;
+ tm2bcd(&alm->time);
local_irq_disable();
rtc_wait_not_busy(rtc);
@@ -441,14 +429,10 @@ again:
omap_rtc_read_time_raw(rtc, &tm);
seconds = tm.tm_sec;
bcd2tm(&tm);
- rtc_tm_to_time(&tm, &now);
- rtc_time_to_tm(now + 1, &tm);
+ now = rtc_tm_to_time64(&tm);
+ rtc_time64_to_tm(now + 1, &tm);
- if (tm2bcd(&tm) < 0) {
- dev_err(&rtc->rtc->dev, "power off failed\n");
- rtc->type->lock(rtc);
- return -EINVAL;
- }
+ tm2bcd(&tm);
rtc_wait_not_busy(rtc);
@@ -878,6 +862,8 @@ static int omap_rtc_probe(struct platform_device *pdev)
}
rtc->rtc->ops = &omap_rtc_ops;
+ rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
omap_rtc_nvmem_config.priv = rtc;
/* handle periodic and alarm irqs */
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 60f2250fd96b..3dd9d266ce09 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -224,7 +224,7 @@ exit:
return rc;
}
-int opal_tpo_alarm_irq_enable(struct device *dev, unsigned int enabled)
+static int opal_tpo_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct rtc_wkalrm alarm = { .enabled = 0 };
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index f176cb9d0dbc..178bfb1dea21 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* pcap rtc code for Motorola EZX phones
*
@@ -5,11 +6,6 @@
* Copyright (c) 2009 Daniel Ribeiro <drwyrm@gmail.com>
*
* Based on Motorola's rtc.c Copyright (c) 2003-2005 Motorola
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/kernel.h>
@@ -55,7 +51,7 @@ static int pcap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_DAYA, &days);
secs += (days & PCAP_RTC_DAY_MASK) * SEC_PER_DAY;
- rtc_time_to_tm(secs, tm);
+ rtc_time64_to_tm(secs, tm);
return 0;
}
@@ -63,12 +59,9 @@ static int pcap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int pcap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pcap_rtc *pcap_rtc = dev_get_drvdata(dev);
- struct rtc_time *tm = &alrm->time;
- unsigned long secs;
+ unsigned long secs = rtc_tm_to_time64(&alrm->time);
u32 tod, days;
- rtc_tm_to_time(tm, &secs);
-
tod = secs % SEC_PER_DAY;
ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_TODA, tod);
@@ -90,14 +83,15 @@ static int pcap_rtc_read_time(struct device *dev, struct rtc_time *tm)
ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_DAY, &days);
secs += (days & PCAP_RTC_DAY_MASK) * SEC_PER_DAY;
- rtc_time_to_tm(secs, tm);
+ rtc_time64_to_tm(secs, tm);
return 0;
}
-static int pcap_rtc_set_mmss(struct device *dev, unsigned long secs)
+static int pcap_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct pcap_rtc *pcap_rtc = dev_get_drvdata(dev);
+ unsigned long secs = rtc_tm_to_time64(tm);
u32 tod, days;
tod = secs % SEC_PER_DAY;
@@ -128,9 +122,9 @@ static int pcap_rtc_alarm_irq_enable(struct device *dev, unsigned int en)
static const struct rtc_class_ops pcap_rtc_ops = {
.read_time = pcap_rtc_read_time,
+ .set_time = pcap_rtc_set_time,
.read_alarm = pcap_rtc_read_alarm,
.set_alarm = pcap_rtc_set_alarm,
- .set_mmss = pcap_rtc_set_mmss,
.alarm_irq_enable = pcap_rtc_alarm_irq_enable,
};
@@ -149,11 +143,13 @@ static int __init pcap_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcap_rtc);
- pcap_rtc->rtc = devm_rtc_device_register(&pdev->dev, "pcap",
- &pcap_rtc_ops, THIS_MODULE);
+ pcap_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(pcap_rtc->rtc))
return PTR_ERR(pcap_rtc->rtc);
+ pcap_rtc->rtc->ops = &pcap_rtc_ops;
+ pcap_rtc->rtc->range_max = (1 << 14) * 86400ULL - 1;
+
timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ);
alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA);
@@ -167,7 +163,7 @@ static int __init pcap_rtc_probe(struct platform_device *pdev)
if (err)
return err;
- return 0;
+ return rtc_register_device(pcap_rtc->rtc);
}
static int __exit pcap_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index f6ce63c443a0..1afa6d9fa9fb 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* An I2C driver for the PCF85063 RTC
* Copyright 2014 Rose Technology
@@ -5,16 +6,16 @@
* Author: Søren Andersen <san@rosetechnology.dk>
* Maintainers: http://www.nslu2-linux.org/
*
- * based on the other drivers in this same directory.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2019 Micro Crystal AG
+ * Author: Alexandre Belloni <alexandre.belloni@bootlin.com>
*/
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
/*
* Information for this driver was pulled from the following datasheets.
@@ -24,61 +25,47 @@
*
* PCF85063A -- Rev. 6 — 18 November 2015
* PCF85063TP -- Rev. 4 — 6 May 2015
-*/
+ *
+ * https://www.microcrystal.com/fileadmin/Media/Products/RTC/App.Manual/RV-8263-C7_App-Manual.pdf
+ * RV8263 -- Rev. 1.0 — January 2019
+ */
#define PCF85063_REG_CTRL1 0x00 /* status */
#define PCF85063_REG_CTRL1_CAP_SEL BIT(0)
#define PCF85063_REG_CTRL1_STOP BIT(5)
-#define PCF85063_REG_SC 0x04 /* datetime */
-#define PCF85063_REG_SC_OS 0x80
-
-static struct i2c_driver pcf85063_driver;
-
-static int pcf85063_stop_clock(struct i2c_client *client, u8 *ctrl1)
-{
- int rc;
- u8 reg;
+#define PCF85063_REG_CTRL2 0x01
+#define PCF85063_CTRL2_AF BIT(6)
+#define PCF85063_CTRL2_AIE BIT(7)
- rc = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1);
- if (rc < 0) {
- dev_err(&client->dev, "Failing to stop the clock\n");
- return -EIO;
- }
+#define PCF85063_REG_OFFSET 0x02
+#define PCF85063_OFFSET_SIGN_BIT 6 /* 2's complement sign bit */
+#define PCF85063_OFFSET_MODE BIT(7)
+#define PCF85063_OFFSET_STEP0 4340
+#define PCF85063_OFFSET_STEP1 4069
- /* stop the clock */
- reg = rc | PCF85063_REG_CTRL1_STOP;
+#define PCF85063_REG_RAM 0x03
- rc = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, reg);
- if (rc < 0) {
- dev_err(&client->dev, "Failing to stop the clock\n");
- return -EIO;
- }
-
- *ctrl1 = reg;
-
- return 0;
-}
-
-static int pcf85063_start_clock(struct i2c_client *client, u8 ctrl1)
-{
- int rc;
+#define PCF85063_REG_SC 0x04 /* datetime */
+#define PCF85063_REG_SC_OS 0x80
- /* start the clock */
- ctrl1 &= ~PCF85063_REG_CTRL1_STOP;
+#define PCF85063_REG_ALM_S 0x0b
+#define PCF85063_AEN BIT(7)
- rc = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, ctrl1);
- if (rc < 0) {
- dev_err(&client->dev, "Failing to start the clock\n");
- return -EIO;
- }
+struct pcf85063_config {
+ struct regmap_config regmap;
+ unsigned has_alarms:1;
+ unsigned force_cap_7000:1;
+};
- return 0;
-}
+struct pcf85063 {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+};
static int pcf85063_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct pcf85063 *pcf85063 = dev_get_drvdata(dev);
int rc;
u8 regs[7];
@@ -88,16 +75,14 @@ static int pcf85063_rtc_read_time(struct device *dev, struct rtc_time *tm)
* event, the access must be finished within one second. So, read all
* time/date registers in one turn.
*/
- rc = i2c_smbus_read_i2c_block_data(client, PCF85063_REG_SC,
- sizeof(regs), regs);
- if (rc != sizeof(regs)) {
- dev_err(&client->dev, "date/time register read error\n");
- return -EIO;
- }
+ rc = regmap_bulk_read(pcf85063->regmap, PCF85063_REG_SC, regs,
+ sizeof(regs));
+ if (rc)
+ return rc;
/* if the clock has lost its power it makes no sense to use its time */
if (regs[0] & PCF85063_REG_SC_OS) {
- dev_warn(&client->dev, "Power loss detected, invalid time\n");
+ dev_warn(&pcf85063->rtc->dev, "Power loss detected, invalid time\n");
return -EINVAL;
}
@@ -115,20 +100,18 @@ static int pcf85063_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int pcf85063_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct pcf85063 *pcf85063 = dev_get_drvdata(dev);
int rc;
u8 regs[7];
- u8 ctrl1;
-
- if ((tm->tm_year < 100) || (tm->tm_year > 199))
- return -EINVAL;
/*
* to accurately set the time, reset the divider chain and keep it in
* reset state until all time/date registers are written
*/
- rc = pcf85063_stop_clock(client, &ctrl1);
- if (rc != 0)
+ rc = regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL1,
+ PCF85063_REG_CTRL1_STOP,
+ PCF85063_REG_CTRL1_STOP);
+ if (rc)
return rc;
/* hours, minutes and seconds */
@@ -150,101 +133,351 @@ static int pcf85063_rtc_set_time(struct device *dev, struct rtc_time *tm)
regs[6] = bin2bcd(tm->tm_year - 100);
/* write all registers at once */
- rc = i2c_smbus_write_i2c_block_data(client, PCF85063_REG_SC,
- sizeof(regs), regs);
- if (rc < 0) {
- dev_err(&client->dev, "date/time register write error\n");
+ rc = regmap_bulk_write(pcf85063->regmap, PCF85063_REG_SC,
+ regs, sizeof(regs));
+ if (rc)
return rc;
- }
/*
* Write the control register as a separate action since the size of
* the register space is different between the PCF85063TP and
* PCF85063A devices. The rollover point can not be used.
*/
- rc = pcf85063_start_clock(client, ctrl1);
- if (rc != 0)
- return rc;
+ return regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL1,
+ PCF85063_REG_CTRL1_STOP, 0);
+}
+
+static int pcf85063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct pcf85063 *pcf85063 = dev_get_drvdata(dev);
+ u8 buf[4];
+ unsigned int val;
+ int ret;
+
+ ret = regmap_bulk_read(pcf85063->regmap, PCF85063_REG_ALM_S,
+ buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ alrm->time.tm_sec = bcd2bin(buf[0]);
+ alrm->time.tm_min = bcd2bin(buf[1]);
+ alrm->time.tm_hour = bcd2bin(buf[2]);
+ alrm->time.tm_mday = bcd2bin(buf[3]);
+
+ ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & PCF85063_CTRL2_AIE);
return 0;
}
+static int pcf85063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct pcf85063 *pcf85063 = dev_get_drvdata(dev);
+ u8 buf[5];
+ int ret;
+
+ buf[0] = bin2bcd(alrm->time.tm_sec);
+ buf[1] = bin2bcd(alrm->time.tm_min);
+ buf[2] = bin2bcd(alrm->time.tm_hour);
+ buf[3] = bin2bcd(alrm->time.tm_mday);
+ buf[4] = PCF85063_AEN; /* Do not match on week day */
+
+ ret = regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL2,
+ PCF85063_CTRL2_AIE | PCF85063_CTRL2_AF, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(pcf85063->regmap, PCF85063_REG_ALM_S,
+ buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL2,
+ PCF85063_CTRL2_AIE | PCF85063_CTRL2_AF,
+ alrm->enabled ? PCF85063_CTRL2_AIE | PCF85063_CTRL2_AF : PCF85063_CTRL2_AF);
+}
+
+static int pcf85063_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct pcf85063 *pcf85063 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL2,
+ PCF85063_CTRL2_AIE,
+ enabled ? PCF85063_CTRL2_AIE : 0);
+}
+
+static irqreturn_t pcf85063_rtc_handle_irq(int irq, void *dev_id)
+{
+ struct pcf85063 *pcf85063 = dev_id;
+ unsigned int val;
+ int err;
+
+ err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val);
+ if (err)
+ return IRQ_NONE;
+
+ if (val & PCF85063_CTRL2_AF) {
+ rtc_update_irq(pcf85063->rtc, 1, RTC_IRQF | RTC_AF);
+ regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL2,
+ PCF85063_CTRL2_AIE | PCF85063_CTRL2_AF,
+ 0);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int pcf85063_read_offset(struct device *dev, long *offset)
+{
+ struct pcf85063 *pcf85063 = dev_get_drvdata(dev);
+ long val;
+ u32 reg;
+ int ret;
+
+ ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &reg);
+ if (ret < 0)
+ return ret;
+
+ val = sign_extend32(reg & ~PCF85063_OFFSET_MODE,
+ PCF85063_OFFSET_SIGN_BIT);
+
+ if (reg & PCF85063_OFFSET_MODE)
+ *offset = val * PCF85063_OFFSET_STEP1;
+ else
+ *offset = val * PCF85063_OFFSET_STEP0;
+
+ return 0;
+}
+
+static int pcf85063_set_offset(struct device *dev, long offset)
+{
+ struct pcf85063 *pcf85063 = dev_get_drvdata(dev);
+ s8 mode0, mode1, reg;
+ unsigned int error0, error1;
+
+ if (offset > PCF85063_OFFSET_STEP0 * 63)
+ return -ERANGE;
+ if (offset < PCF85063_OFFSET_STEP0 * -64)
+ return -ERANGE;
+
+ mode0 = DIV_ROUND_CLOSEST(offset, PCF85063_OFFSET_STEP0);
+ mode1 = DIV_ROUND_CLOSEST(offset, PCF85063_OFFSET_STEP1);
+
+ error0 = abs(offset - (mode0 * PCF85063_OFFSET_STEP0));
+ error1 = abs(offset - (mode1 * PCF85063_OFFSET_STEP1));
+ if (mode1 > 63 || mode1 < -64 || error0 < error1)
+ reg = mode0 & ~PCF85063_OFFSET_MODE;
+ else
+ reg = mode1 | PCF85063_OFFSET_MODE;
+
+ return regmap_write(pcf85063->regmap, PCF85063_REG_OFFSET, reg);
+}
+
+static int pcf85063_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct pcf85063 *pcf85063 = dev_get_drvdata(dev);
+ int status, ret = 0;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ ret = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &status);
+ if (ret < 0)
+ return ret;
+
+ if (status & PCF85063_REG_SC_OS)
+ dev_warn(&pcf85063->rtc->dev, "Voltage low, data loss detected.\n");
+
+ status &= PCF85063_REG_SC_OS;
+
+ if (copy_to_user((void __user *)arg, &status, sizeof(int)))
+ return -EFAULT;
+
+ return 0;
+
+ case RTC_VL_CLR:
+ ret = regmap_update_bits(pcf85063->regmap, PCF85063_REG_SC,
+ PCF85063_REG_SC_OS, 0);
+
+ return ret;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
static const struct rtc_class_ops pcf85063_rtc_ops = {
.read_time = pcf85063_rtc_read_time,
- .set_time = pcf85063_rtc_set_time
+ .set_time = pcf85063_rtc_set_time,
+ .read_offset = pcf85063_read_offset,
+ .set_offset = pcf85063_set_offset,
+ .ioctl = pcf85063_ioctl,
+};
+
+static const struct rtc_class_ops pcf85063_rtc_ops_alarm = {
+ .read_time = pcf85063_rtc_read_time,
+ .set_time = pcf85063_rtc_set_time,
+ .read_offset = pcf85063_read_offset,
+ .set_offset = pcf85063_set_offset,
+ .read_alarm = pcf85063_rtc_read_alarm,
+ .set_alarm = pcf85063_rtc_set_alarm,
+ .alarm_irq_enable = pcf85063_rtc_alarm_irq_enable,
+ .ioctl = pcf85063_ioctl,
};
-static int pcf85063_load_capacitance(struct i2c_client *client)
+static int pcf85063_nvmem_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
{
- u32 load;
- int rc;
- u8 reg;
+ return regmap_read(priv, PCF85063_REG_RAM, val);
+}
- rc = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1);
- if (rc < 0)
- return rc;
+static int pcf85063_nvmem_write(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ return regmap_write(priv, PCF85063_REG_RAM, *(u8 *)val);
+}
- reg = rc;
- load = 7000;
- of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads",
- &load);
+static int pcf85063_load_capacitance(struct pcf85063 *pcf85063,
+ const struct device_node *np,
+ unsigned int force_cap)
+{
+ u32 load = 7000;
+ u8 reg = 0;
+
+ if (force_cap)
+ load = force_cap;
+ else
+ of_property_read_u32(np, "quartz-load-femtofarads", &load);
switch (load) {
default:
- dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 7000",
+ dev_warn(&pcf85063->rtc->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 7000",
load);
/* fall through */
case 7000:
- reg &= ~PCF85063_REG_CTRL1_CAP_SEL;
break;
case 12500:
- reg |= PCF85063_REG_CTRL1_CAP_SEL;
+ reg = PCF85063_REG_CTRL1_CAP_SEL;
break;
}
- rc = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, reg);
-
- return rc;
+ return regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL1,
+ PCF85063_REG_CTRL1_CAP_SEL, reg);
}
-static int pcf85063_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct pcf85063_config pcf85063a_config = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+};
+
+static const struct pcf85063_config pcf85063tp_config = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
+ },
+};
+
+static const struct pcf85063_config rv8263_config = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+ .force_cap_7000 = 1,
+};
+
+static int pcf85063_probe(struct i2c_client *client)
{
- struct rtc_device *rtc;
+ struct pcf85063 *pcf85063;
+ unsigned int tmp;
int err;
+ const struct pcf85063_config *config = &pcf85063tp_config;
+ const void *data = of_device_get_match_data(&client->dev);
+ struct nvmem_config nvmem_cfg = {
+ .name = "pcf85063_nvram",
+ .reg_read = pcf85063_nvmem_read,
+ .reg_write = pcf85063_nvmem_write,
+ .type = NVMEM_TYPE_BATTERY_BACKED,
+ .size = 1,
+ };
dev_dbg(&client->dev, "%s\n", __func__);
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENODEV;
+ pcf85063 = devm_kzalloc(&client->dev, sizeof(struct pcf85063),
+ GFP_KERNEL);
+ if (!pcf85063)
+ return -ENOMEM;
+
+ if (data)
+ config = data;
+
+ pcf85063->regmap = devm_regmap_init_i2c(client, &config->regmap);
+ if (IS_ERR(pcf85063->regmap))
+ return PTR_ERR(pcf85063->regmap);
- err = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1);
- if (err < 0) {
+ i2c_set_clientdata(client, pcf85063);
+
+ err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL1, &tmp);
+ if (err) {
dev_err(&client->dev, "RTC chip is not present\n");
return err;
}
- err = pcf85063_load_capacitance(client);
+ pcf85063->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(pcf85063->rtc))
+ return PTR_ERR(pcf85063->rtc);
+
+ err = pcf85063_load_capacitance(pcf85063, client->dev.of_node,
+ config->force_cap_7000 ? 7000 : 0);
if (err < 0)
dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
err);
- rtc = devm_rtc_device_register(&client->dev,
- pcf85063_driver.driver.name,
- &pcf85063_rtc_ops, THIS_MODULE);
+ pcf85063->rtc->ops = &pcf85063_rtc_ops;
+ pcf85063->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ pcf85063->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ pcf85063->rtc->uie_unsupported = 1;
+
+ if (config->has_alarms && client->irq > 0) {
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, pcf85063_rtc_handle_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "pcf85063", pcf85063);
+ if (err) {
+ dev_warn(&pcf85063->rtc->dev,
+ "unable to request IRQ, alarms disabled\n");
+ } else {
+ pcf85063->rtc->ops = &pcf85063_rtc_ops_alarm;
+ device_init_wakeup(&client->dev, true);
+ err = dev_pm_set_wake_irq(&client->dev, client->irq);
+ if (err)
+ dev_err(&pcf85063->rtc->dev,
+ "failed to enable irq wake\n");
+ }
+ }
+
+ nvmem_cfg.priv = pcf85063->regmap;
+ rtc_nvmem_register(pcf85063->rtc, &nvmem_cfg);
- return PTR_ERR_OR_ZERO(rtc);
+ return rtc_register_device(pcf85063->rtc);
}
-static const struct i2c_device_id pcf85063_id[] = {
- { "pcf85063", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, pcf85063_id);
-
#ifdef CONFIG_OF
static const struct of_device_id pcf85063_of_match[] = {
- { .compatible = "nxp,pcf85063" },
+ { .compatible = "nxp,pcf85063", .data = &pcf85063tp_config },
+ { .compatible = "nxp,pcf85063tp", .data = &pcf85063tp_config },
+ { .compatible = "nxp,pcf85063a", .data = &pcf85063a_config },
+ { .compatible = "microcrystal,rv8263", .data = &rv8263_config },
{}
};
MODULE_DEVICE_TABLE(of, pcf85063_of_match);
@@ -255,8 +488,7 @@ static struct i2c_driver pcf85063_driver = {
.name = "rtc-pcf85063",
.of_match_table = of_match_ptr(pcf85063_of_match),
},
- .probe = pcf85063_probe,
- .id_table = pcf85063_id,
+ .probe_new = pcf85063_probe,
};
module_i2c_driver(pcf85063_driver);
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index a3988079f60a..a075e77617dc 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/rtc/rtc-pcf85363.c
*
* Driver for NXP PCF85363 real-time clock.
*
* Copyright (C) 2017 Eric Nelson
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based loosely on rtc-8583 by Russell King, Wolfram Sang and Juergen Beisert
*/
#include <linux/module.h>
#include <linux/i2c.h>
@@ -112,10 +107,7 @@
#define NVRAM_SIZE 0x40
-static struct i2c_driver pcf85363_driver;
-
struct pcf85363 {
- struct device *dev;
struct rtc_device *rtc;
struct regmap *regmap;
};
@@ -386,9 +378,6 @@ static int pcf85363_probe(struct i2c_client *client,
if (data)
config = data;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
pcf85363 = devm_kzalloc(&client->dev, sizeof(struct pcf85363),
GFP_KERNEL);
if (!pcf85363)
@@ -400,20 +389,21 @@ static int pcf85363_probe(struct i2c_client *client,
return PTR_ERR(pcf85363->regmap);
}
- pcf85363->dev = &client->dev;
i2c_set_clientdata(client, pcf85363);
- pcf85363->rtc = devm_rtc_allocate_device(pcf85363->dev);
+ pcf85363->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(pcf85363->rtc))
return PTR_ERR(pcf85363->rtc);
pcf85363->rtc->ops = &rtc_ops;
+ pcf85363->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ pcf85363->rtc->range_max = RTC_TIMESTAMP_END_2099;
if (client->irq > 0) {
regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
PIN_IO_INTA_OUT, PIN_IO_INTAPM);
- ret = devm_request_threaded_irq(pcf85363->dev, client->irq,
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf85363_rtc_handle_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"pcf85363", client);
diff --git a/drivers/rtc/rtc-ps3.c b/drivers/rtc/rtc-ps3.c
index 347288bff438..f0336d691e6c 100644
--- a/drivers/rtc/rtc-ps3.c
+++ b/drivers/rtc/rtc-ps3.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PS3 RTC Driver
*
* Copyright 2009 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- * If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -40,16 +28,13 @@ static u64 read_rtc(void)
static int ps3_get_time(struct device *dev, struct rtc_time *tm)
{
- rtc_time_to_tm(read_rtc() + ps3_os_area_get_rtc_diff(), tm);
+ rtc_time64_to_tm(read_rtc() + ps3_os_area_get_rtc_diff(), tm);
return 0;
}
static int ps3_set_time(struct device *dev, struct rtc_time *tm)
{
- unsigned long now;
-
- rtc_tm_to_time(tm, &now);
- ps3_os_area_set_rtc_diff(now - read_rtc());
+ ps3_os_area_set_rtc_diff(rtc_tm_to_time64(tm) - read_rtc());
return 0;
}
@@ -62,13 +47,16 @@ static int __init ps3_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
- rtc = devm_rtc_device_register(&dev->dev, "rtc-ps3", &ps3_rtc_ops,
- THIS_MODULE);
+ rtc = devm_rtc_allocate_device(&dev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ rtc->ops = &ps3_rtc_ops;
+ rtc->range_max = U64_MAX;
+
platform_set_drvdata(dev, rtc);
- return 0;
+
+ return rtc_register_device(rtc);
}
static struct platform_driver ps3_rtc_driver = {
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index e1887b86fdc7..d4766734e40b 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -145,8 +145,7 @@ static void rtsr_set_bits(struct pxa_rtc *pxa_rtc, u32 mask)
static irqreturn_t pxa_rtc_irq(int irq, void *dev_id)
{
- struct platform_device *pdev = to_platform_device(dev_id);
- struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev);
+ struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev_id);
u32 rtsr;
unsigned long events = 0;
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index 1fb864d4ef83..5c5d9f125669 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -336,8 +336,7 @@ static const struct rtc_class_ops rk808_rtc_ops = {
/* Turn off the alarm if it should not be a wake source. */
static int rk808_rtc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rk808_rtc *rk808_rtc = dev_get_drvdata(&pdev->dev);
+ struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(rk808_rtc->irq);
@@ -350,8 +349,7 @@ static int rk808_rtc_suspend(struct device *dev)
*/
static int rk808_rtc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rk808_rtc *rk808_rtc = dev_get_drvdata(&pdev->dev);
+ struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(rk808_rtc->irq);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 5899ca368d59..71e20a6bd387 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -21,6 +21,8 @@
#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
/* RX-6110 Register definitions */
@@ -379,9 +381,16 @@ static const struct spi_device_id rx6110_id[] = {
};
MODULE_DEVICE_TABLE(spi, rx6110_id);
+static const struct of_device_id rx6110_spi_of_match[] = {
+ { .compatible = "epson,rx6110" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rx6110_spi_of_match);
+
static struct spi_driver rx6110_driver = {
.driver = {
.name = RX6110_DRIVER_NAME,
+ .of_match_table = of_match_ptr(rx6110_spi_of_match),
},
.probe = rx6110_probe,
.remove = rx6110_remove,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 41de38acc570..fddc996cb38d 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -311,7 +311,7 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
+ (ald[1] & 0x20 ? 12 : 0);
- dev_dbg(dev, "%s: date: %ptRr\n", __func__, t);
+ dev_dbg(dev, "%s: date: %ptRr\n", __func__, &t->time);
t->enabled = !!(rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE);
t->pending = (ctrl2 & RX8025_BIT_CTRL2_DAFG) && t->enabled;
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index d417b203cbc5..579b3ff5c644 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -276,6 +276,9 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int sec128, sec2, yr, yr100, cf_bit;
+ if (!(readb(rtc->regbase + RCR2) & RCR2_RTCEN))
+ return -EINVAL;
+
do {
unsigned int tmp;
@@ -374,7 +377,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
{
unsigned int byte;
- int value = 0xff; /* return 0xff for ignored values */
+ int value = -1; /* return -1 for ignored values */
byte = readb(rtc->regbase + reg_off);
if (byte & AR_ENB) {
@@ -466,7 +469,6 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
{
struct sh_rtc *rtc;
struct resource *res;
- struct rtc_time r;
char clk_name[6];
int clk_id, ret;
@@ -528,6 +530,10 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
rtc->clk = NULL;
}
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
clk_enable(rtc->clk);
rtc->capabilities = RTC_DEF_CAPABILITIES;
@@ -591,21 +597,21 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
sh_rtc_setaie(&pdev->dev, 0);
sh_rtc_setcie(&pdev->dev, 0);
- rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, "sh",
- &sh_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtc_dev)) {
- ret = PTR_ERR(rtc->rtc_dev);
- goto err_unmap;
- }
-
+ rtc->rtc_dev->ops = &sh_rtc_ops;
rtc->rtc_dev->max_user_freq = 256;
- /* reset rtc to epoch 0 if time is invalid */
- if (rtc_read_time(rtc->rtc_dev, &r) < 0) {
- rtc_time_to_tm(0, &r);
- rtc_set_time(rtc->rtc_dev, &r);
+ if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
+ rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rtc->rtc_dev->range_max = RTC_TIMESTAMP_END_9999;
+ } else {
+ rtc->rtc_dev->range_min = mktime64(1999, 1, 1, 0, 0, 0);
+ rtc->rtc_dev->range_max = mktime64(2098, 12, 31, 23, 59, 59);
}
+ ret = rtc_register_device(rtc->rtc_dev);
+ if (ret)
+ goto err_unmap;
+
device_init_wakeup(&pdev->dev, 1);
return 0;
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index 2a9e151cae99..9ba28d1ebd87 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -279,7 +279,7 @@ static const struct of_device_id sirfsoc_rtc_of_match[] = {
{},
};
-const struct regmap_config sysrtc_regmap_config = {
+static const struct regmap_config sysrtc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.fast_io = true,
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 0b9eff19149b..7ee673a25fd0 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -9,6 +9,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/rtc.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
@@ -271,7 +272,6 @@ static const struct regmap_config snvs_rtc_config = {
static int snvs_rtc_probe(struct platform_device *pdev)
{
struct snvs_rtc_data *data;
- struct resource *res;
int ret;
void __iomem *mmio;
@@ -283,9 +283,8 @@ static int snvs_rtc_probe(struct platform_device *pdev)
if (IS_ERR(data->regmap)) {
dev_warn(&pdev->dev, "snvs rtc: you use old dts file, please update it\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio = devm_ioremap_resource(&pdev->dev, res);
+ mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio))
return PTR_ERR(mmio);
@@ -332,6 +331,9 @@ static int snvs_rtc_probe(struct platform_device *pdev)
}
device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, data->irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to enable irq wake\n");
ret = devm_request_irq(&pdev->dev, data->irq, snvs_rtc_irq_handler,
IRQF_SHARED, "rtc alarm", &pdev->dev);
@@ -358,18 +360,7 @@ error_rtc_device_register:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
-static int snvs_rtc_suspend(struct device *dev)
-{
- struct snvs_rtc_data *data = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- return enable_irq_wake(data->irq);
-
- return 0;
-}
-
-static int snvs_rtc_suspend_noirq(struct device *dev)
+static int __maybe_unused snvs_rtc_suspend_noirq(struct device *dev)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
@@ -379,17 +370,7 @@ static int snvs_rtc_suspend_noirq(struct device *dev)
return 0;
}
-static int snvs_rtc_resume(struct device *dev)
-{
- struct snvs_rtc_data *data = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- return disable_irq_wake(data->irq);
-
- return 0;
-}
-
-static int snvs_rtc_resume_noirq(struct device *dev)
+static int __maybe_unused snvs_rtc_resume_noirq(struct device *dev)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
@@ -400,20 +381,9 @@ static int snvs_rtc_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops snvs_rtc_pm_ops = {
- .suspend = snvs_rtc_suspend,
- .suspend_noirq = snvs_rtc_suspend_noirq,
- .resume = snvs_rtc_resume,
- .resume_noirq = snvs_rtc_resume_noirq,
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(snvs_rtc_suspend_noirq, snvs_rtc_resume_noirq)
};
-#define SNVS_RTC_PM_OPS (&snvs_rtc_pm_ops)
-
-#else
-
-#define SNVS_RTC_PM_OPS NULL
-
-#endif
-
static const struct of_device_id snvs_dt_ids[] = {
{ .compatible = "fsl,sec-v4.0-mon-rtc-lp", },
{ /* sentinel */ }
@@ -423,7 +393,7 @@ MODULE_DEVICE_TABLE(of, snvs_dt_ids);
static struct platform_driver snvs_rtc_driver = {
.driver = {
.name = "snvs_rtc",
- .pm = SNVS_RTC_PM_OPS,
+ .pm = &snvs_rtc_pm_ops,
.of_match_table = snvs_dt_ids,
},
.probe = snvs_rtc_probe,
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index c5908cfea234..8e6c9b3bcc29 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -788,11 +788,14 @@ static int stm32_rtc_probe(struct platform_device *pdev)
ret = device_init_wakeup(&pdev->dev, true);
if (rtc->data->has_wakeirq) {
rtc->wakeirq_alarm = platform_get_irq(pdev, 1);
- if (rtc->wakeirq_alarm <= 0)
- ret = rtc->wakeirq_alarm;
- else
+ if (rtc->wakeirq_alarm > 0) {
ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
rtc->wakeirq_alarm);
+ } else {
+ ret = rtc->wakeirq_alarm;
+ if (rtc->wakeirq_alarm == -EPROBE_DEFER)
+ goto err;
+ }
}
if (ret)
dev_warn(&pdev->dev, "alarm can't wake up the system: %d", ret);
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index b76318fd5bb0..ff6488be385f 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale STMP37XX/STMP378X Real Time Clock driver
*
@@ -8,15 +9,6 @@
* Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
* Copyright 2011 Wolfram Sang, Pengutronix e.K.
*/
-
-/*
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -160,15 +152,15 @@ static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
if (ret)
return ret;
- rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
+ rtc_time64_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
return 0;
}
-static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
+static int stmp3xxx_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
{
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
+ writel(rtc_tm_to_time64(rtc_tm), rtc_data->io + STMP3XXX_RTC_SECONDS);
return stmp3xxx_wait_time(rtc_data);
}
@@ -214,17 +206,15 @@ static int stmp3xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_ALARM), &alm->time);
+ rtc_time64_to_tm(readl(rtc_data->io + STMP3XXX_RTC_ALARM), &alm->time);
return 0;
}
static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
- unsigned long t;
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- rtc_tm_to_time(&alm->time, &t);
- writel(t, rtc_data->io + STMP3XXX_RTC_ALARM);
+ writel(rtc_tm_to_time64(&alm->time), rtc_data->io + STMP3XXX_RTC_ALARM);
stmp3xxx_alarm_irq_enable(dev, alm->enabled);
@@ -235,7 +225,7 @@ static const struct rtc_class_ops stmp3xxx_rtc_ops = {
.alarm_irq_enable =
stmp3xxx_alarm_irq_enable,
.read_time = stmp3xxx_rtc_gettime,
- .set_mmss = stmp3xxx_rtc_set_mmss,
+ .set_time = stmp3xxx_rtc_settime,
.read_alarm = stmp3xxx_rtc_read_alarm,
.set_alarm = stmp3xxx_rtc_set_alarm,
};
@@ -361,8 +351,7 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
STMP3XXX_RTC_CTRL_ALARM_IRQ_EN,
rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_CLR);
- rtc_data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &stmp3xxx_rtc_ops, THIS_MODULE);
+ rtc_data->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc_data->rtc))
return PTR_ERR(rtc_data->rtc);
@@ -374,6 +363,13 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
return err;
}
+ rtc_data->rtc->ops = &stmp3xxx_rtc_ops;
+ rtc_data->rtc->range_max = U32_MAX;
+
+ err = rtc_register_device(rtc_data->rtc);
+ if (err)
+ return err;
+
stmp3xxx_wdt_register(pdev);
return 0;
}
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c
index 11bc562eba5d..036463dfa103 100644
--- a/drivers/rtc/rtc-sun4v.c
+++ b/drivers/rtc/rtc-sun4v.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/* rtc-sun4v.c: Hypervisor based RTC for SUN4V systems.
*
* Author: David S. Miller
- * License: GPL
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
@@ -39,7 +39,7 @@ retry:
static int sun4v_read_time(struct device *dev, struct rtc_time *tm)
{
- rtc_time_to_tm(hypervisor_get_time(), tm);
+ rtc_time64_to_tm(hypervisor_get_time(), tm);
return 0;
}
@@ -66,14 +66,7 @@ retry:
static int sun4v_set_time(struct device *dev, struct rtc_time *tm)
{
- unsigned long secs;
- int err;
-
- err = rtc_tm_to_time(tm, &secs);
- if (err)
- return err;
-
- return hypervisor_set_time(secs);
+ return hypervisor_set_time(rtc_tm_to_time64(tm));
}
static const struct rtc_class_ops sun4v_rtc_ops = {
@@ -85,13 +78,15 @@ static int __init sun4v_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- rtc = devm_rtc_device_register(&pdev->dev, "sun4v",
- &sun4v_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ rtc->ops = &sun4v_rtc_ops;
+ rtc->range_max = U64_MAX;
platform_set_drvdata(pdev, rtc);
- return 0;
+
+ return rtc_register_device(rtc);
}
static struct platform_driver sun4v_rtc_driver = {
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index c6b0a99aa3a9..f0ce76865434 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* An RTC driver for the NVIDIA Tegra 200 series internal RTC.
*
* Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/clk.h>
@@ -123,7 +110,7 @@ static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
- rtc_time_to_tm(sec, tm);
+ rtc_time64_to_tm(sec, tm);
dev_vdbg(dev, "time read as %lu. %ptR\n", sec, tm);
@@ -137,7 +124,7 @@ static int tegra_rtc_set_time(struct device *dev, struct rtc_time *tm)
int ret;
/* convert tm to seconds. */
- rtc_tm_to_time(tm, &sec);
+ sec = rtc_tm_to_time64(tm);
dev_vdbg(dev, "time set to %lu. %ptR\n", sec, tm);
@@ -166,7 +153,7 @@ static int tegra_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
} else {
/* alarm is enabled. */
alarm->enabled = 1;
- rtc_time_to_tm(sec, &alarm->time);
+ rtc_time64_to_tm(sec, &alarm->time);
}
tmp = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
@@ -204,7 +191,7 @@ static int tegra_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
unsigned long sec;
if (alarm->enabled)
- rtc_tm_to_time(&alarm->time, &sec);
+ sec = rtc_tm_to_time64(&alarm->time);
else
sec = 0;
@@ -306,6 +293,13 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
info->tegra_rtc_irq = ret;
+ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(info->rtc_dev))
+ return PTR_ERR(info->rtc_dev);
+
+ info->rtc_dev->ops = &tegra_rtc_ops;
+ info->rtc_dev->range_max = U32_MAX;
+
info->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk))
return PTR_ERR(info->clk);
@@ -327,16 +321,6 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- info->rtc_dev = devm_rtc_device_register(&pdev->dev,
- dev_name(&pdev->dev), &tegra_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(info->rtc_dev)) {
- ret = PTR_ERR(info->rtc_dev);
- dev_err(&pdev->dev, "Unable to register device (err=%d).\n",
- ret);
- goto disable_clk;
- }
-
ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH,
dev_name(&pdev->dev), &pdev->dev);
@@ -347,6 +331,13 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
goto disable_clk;
}
+ ret = rtc_register_device(info->rtc_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register device (err=%d).\n",
+ ret);
+ goto disable_clk;
+ }
+
dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
return 0;
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 6c5f09c815e8..b298e9902f45 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -70,11 +70,11 @@ static int test_rtc_read_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-static int test_rtc_set_mmss64(struct device *dev, time64_t secs)
+static int test_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct rtc_test_data *rtd = dev_get_drvdata(dev);
- rtd->offset = secs - ktime_get_real_seconds();
+ rtd->offset = rtc_tm_to_time64(tm) - ktime_get_real_seconds();
return 0;
}
@@ -94,15 +94,15 @@ static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
static const struct rtc_class_ops test_rtc_ops_noalm = {
.read_time = test_rtc_read_time,
- .set_mmss64 = test_rtc_set_mmss64,
+ .set_time = test_rtc_set_time,
.alarm_irq_enable = test_rtc_alarm_irq_enable,
};
static const struct rtc_class_ops test_rtc_ops = {
.read_time = test_rtc_read_time,
+ .set_time = test_rtc_set_time,
.read_alarm = test_rtc_read_alarm,
.set_alarm = test_rtc_set_alarm,
- .set_mmss64 = test_rtc_set_mmss64,
.alarm_irq_enable = test_rtc_alarm_irq_enable,
};
@@ -152,7 +152,8 @@ static int __init test_init(void)
{
int i, err;
- if ((err = platform_driver_register(&test_driver)))
+ err = platform_driver_register(&test_driver);
+ if (err)
return err;
err = -ENOMEM;
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 2d24babc4057..5a29915a06ec 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -42,11 +42,6 @@ struct tx4939rtc_plat_data {
spinlock_t lock;
};
-static struct tx4939rtc_plat_data *get_tx4939rtc_plat_data(struct device *dev)
-{
- return platform_get_drvdata(to_platform_device(dev));
-}
-
static int tx4939_rtc_cmd(struct tx4939_rtc_reg __iomem *rtcreg, int cmd)
{
int i = 0;
@@ -64,7 +59,7 @@ static int tx4939_rtc_cmd(struct tx4939_rtc_reg __iomem *rtcreg, int cmd)
static int tx4939_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev);
+ struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
unsigned long secs = rtc_tm_to_time64(tm);
int i, ret;
@@ -89,7 +84,7 @@ static int tx4939_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev);
+ struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
int i, ret;
unsigned long sec;
@@ -115,7 +110,7 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int tx4939_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev);
+ struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
int i, ret;
unsigned long sec;
@@ -140,7 +135,7 @@ static int tx4939_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev);
+ struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
int i, ret;
unsigned long sec;
@@ -170,7 +165,7 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int tx4939_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev);
+ struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev);
spin_lock_irq(&pdata->lock);
tx4939_rtc_cmd(pdata->rtcreg,
@@ -182,7 +177,7 @@ static int tx4939_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
static irqreturn_t tx4939_rtc_interrupt(int irq, void *dev_id)
{
- struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev_id);
+ struct tx4939rtc_plat_data *pdata = dev_get_drvdata(dev_id);
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
unsigned long events = RTC_IRQF;
diff --git a/drivers/rtc/rtc-wilco-ec.c b/drivers/rtc/rtc-wilco-ec.c
index e62bda0cb53e..8ad4c4e6d557 100644
--- a/drivers/rtc/rtc-wilco-ec.c
+++ b/drivers/rtc/rtc-wilco-ec.c
@@ -21,8 +21,20 @@
#define EC_CMOS_TOD_WRITE 0x02
#define EC_CMOS_TOD_READ 0x08
+/* Message sent to the EC to request the current time. */
+struct ec_rtc_read_request {
+ u8 command;
+ u8 reserved;
+ u8 param;
+} __packed;
+static struct ec_rtc_read_request read_rq = {
+ .command = EC_COMMAND_CMOS,
+ .param = EC_CMOS_TOD_READ,
+};
+
/**
- * struct ec_rtc_read - Format of RTC returned by EC.
+ * struct ec_rtc_read_response - Format of RTC returned by EC.
+ * @reserved: Unused byte
* @second: Second value (0..59)
* @minute: Minute value (0..59)
* @hour: Hour value (0..23)
@@ -33,7 +45,8 @@
*
* All values are presented in binary (not BCD).
*/
-struct ec_rtc_read {
+struct ec_rtc_read_response {
+ u8 reserved;
u8 second;
u8 minute;
u8 hour;
@@ -44,8 +57,10 @@ struct ec_rtc_read {
} __packed;
/**
- * struct ec_rtc_write - Format of RTC sent to the EC.
- * @param: EC_CMOS_TOD_WRITE
+ * struct ec_rtc_write_request - Format of RTC sent to the EC.
+ * @command: Always EC_COMMAND_CMOS
+ * @reserved: Unused byte
+ * @param: Always EC_CMOS_TOD_WRITE
* @century: Century value (full year / 100)
* @year: Year value (full year % 100)
* @month: Month value (1..12)
@@ -57,7 +72,9 @@ struct ec_rtc_read {
*
* All values are presented in BCD.
*/
-struct ec_rtc_write {
+struct ec_rtc_write_request {
+ u8 command;
+ u8 reserved;
u8 param;
u8 century;
u8 year;
@@ -72,19 +89,17 @@ struct ec_rtc_write {
static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev->parent);
- u8 param = EC_CMOS_TOD_READ;
- struct ec_rtc_read rtc;
- struct wilco_ec_message msg = {
- .type = WILCO_EC_MSG_LEGACY,
- .flags = WILCO_EC_FLAG_RAW_RESPONSE,
- .command = EC_COMMAND_CMOS,
- .request_data = &param,
- .request_size = sizeof(param),
- .response_data = &rtc,
- .response_size = sizeof(rtc),
- };
+ struct ec_rtc_read_response rtc;
+ struct wilco_ec_message msg;
int ret;
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = &read_rq;
+ msg.request_size = sizeof(read_rq);
+ msg.response_data = &rtc;
+ msg.response_size = sizeof(rtc);
+
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
@@ -106,14 +121,8 @@ static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm)
static int wilco_ec_rtc_write(struct device *dev, struct rtc_time *tm)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev->parent);
- struct ec_rtc_write rtc;
- struct wilco_ec_message msg = {
- .type = WILCO_EC_MSG_LEGACY,
- .flags = WILCO_EC_FLAG_RAW_RESPONSE,
- .command = EC_COMMAND_CMOS,
- .request_data = &rtc,
- .request_size = sizeof(rtc),
- };
+ struct ec_rtc_write_request rtc;
+ struct wilco_ec_message msg;
int year = tm->tm_year + 1900;
/*
* Convert from 0=Sunday to 0=Saturday for the EC
@@ -123,6 +132,7 @@ static int wilco_ec_rtc_write(struct device *dev, struct rtc_time *tm)
int wday = tm->tm_wday == 6 ? 0 : tm->tm_wday + 1;
int ret;
+ rtc.command = EC_COMMAND_CMOS;
rtc.param = EC_CMOS_TOD_WRITE;
rtc.century = bin2bcd(year / 100);
rtc.year = bin2bcd(year % 100);
@@ -133,6 +143,11 @@ static int wilco_ec_rtc_write(struct device *dev, struct rtc_time *tm)
rtc.second = bin2bcd(tm->tm_sec);
rtc.weekday = bin2bcd(wday);
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = &rtc;
+ msg.request_size = sizeof(rtc);
+
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 7b824dabf104..d2e8b21c90c4 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Real Time Clock driver for Wolfson Microelectronics WM831x
*
@@ -5,11 +6,6 @@
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/module.h>
@@ -155,7 +151,7 @@ static int wm831x_rtc_readtime(struct device *dev, struct rtc_time *tm)
if (memcmp(time1, time2, sizeof(time1)) == 0) {
u32 time = (time1[0] << 16) | time1[1];
- rtc_time_to_tm(time, tm);
+ rtc_time64_to_tm(time, tm);
return 0;
}
@@ -169,15 +165,17 @@ static int wm831x_rtc_readtime(struct device *dev, struct rtc_time *tm)
/*
* Set current time and date in RTC
*/
-static int wm831x_rtc_set_mmss(struct device *dev, unsigned long time)
+static int wm831x_rtc_settime(struct device *dev, struct rtc_time *tm)
{
struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
struct wm831x *wm831x = wm831x_rtc->wm831x;
struct rtc_time new_tm;
- unsigned long new_time;
+ unsigned long time, new_time;
int ret;
int count = 0;
+ time = rtc_tm_to_time64(tm);
+
ret = wm831x_reg_write(wm831x, WM831X_RTC_TIME_1,
(time >> 16) & 0xffff);
if (ret < 0) {
@@ -215,11 +213,7 @@ static int wm831x_rtc_set_mmss(struct device *dev, unsigned long time)
if (ret < 0)
return ret;
- ret = rtc_tm_to_time(&new_tm, &new_time);
- if (ret < 0) {
- dev_err(dev, "Failed to convert time: %d\n", ret);
- return ret;
- }
+ new_time = rtc_tm_to_time64(&new_tm);
/* Allow a second of change in case of tick */
if (new_time - time > 1) {
@@ -249,7 +243,7 @@ static int wm831x_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
time = (data[0] << 16) | data[1];
- rtc_time_to_tm(time, &alrm->time);
+ rtc_time64_to_tm(time, &alrm->time);
ret = wm831x_reg_read(wm831x_rtc->wm831x, WM831X_RTC_CONTROL);
if (ret < 0) {
@@ -288,11 +282,7 @@ static int wm831x_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
int ret;
unsigned long time;
- ret = rtc_tm_to_time(&alrm->time, &time);
- if (ret < 0) {
- dev_err(dev, "Failed to convert time: %d\n", ret);
- return ret;
- }
+ time = rtc_tm_to_time64(&alrm->time);
ret = wm831x_rtc_stop_alarm(wm831x_rtc);
if (ret < 0) {
@@ -346,7 +336,7 @@ static irqreturn_t wm831x_alm_irq(int irq, void *data)
static const struct rtc_class_ops wm831x_rtc_ops = {
.read_time = wm831x_rtc_readtime,
- .set_mmss = wm831x_rtc_set_mmss,
+ .set_time = wm831x_rtc_settime,
.read_alarm = wm831x_rtc_readalarm,
.set_alarm = wm831x_rtc_setalarm,
.alarm_irq_enable = wm831x_rtc_alarm_irq_enable,
@@ -356,11 +346,10 @@ static const struct rtc_class_ops wm831x_rtc_ops = {
/* Turn off the alarm if it should not be a wake source. */
static int wm831x_rtc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(&pdev->dev);
+ struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
int ret, enable;
- if (wm831x_rtc->alarm_enabled && device_may_wakeup(&pdev->dev))
+ if (wm831x_rtc->alarm_enabled && device_may_wakeup(dev))
enable = WM831X_RTC_ALM_ENA;
else
enable = 0;
@@ -368,7 +357,7 @@ static int wm831x_rtc_suspend(struct device *dev)
ret = wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL,
WM831X_RTC_ALM_ENA, enable);
if (ret != 0)
- dev_err(&pdev->dev, "Failed to update RTC alarm: %d\n", ret);
+ dev_err(dev, "Failed to update RTC alarm: %d\n", ret);
return 0;
}
@@ -378,15 +367,13 @@ static int wm831x_rtc_suspend(struct device *dev)
*/
static int wm831x_rtc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(&pdev->dev);
+ struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
int ret;
if (wm831x_rtc->alarm_enabled) {
ret = wm831x_rtc_start_alarm(wm831x_rtc);
if (ret != 0)
- dev_err(&pdev->dev,
- "Failed to restart RTC alarm: %d\n", ret);
+ dev_err(dev, "Failed to restart RTC alarm: %d\n", ret);
}
return 0;
@@ -395,14 +382,13 @@ static int wm831x_rtc_resume(struct device *dev)
/* Unconditionally disable the alarm */
static int wm831x_rtc_freeze(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(&pdev->dev);
+ struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
int ret;
ret = wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL,
WM831X_RTC_ALM_ENA, 0);
if (ret != 0)
- dev_err(&pdev->dev, "Failed to stop RTC alarm: %d\n", ret);
+ dev_err(dev, "Failed to stop RTC alarm: %d\n", ret);
return 0;
}
@@ -429,19 +415,23 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
ret = wm831x_reg_read(wm831x, WM831X_RTC_CONTROL);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to read RTC control: %d\n", ret);
- goto err;
+ return ret;
}
if (ret & WM831X_RTC_ALM_ENA)
wm831x_rtc->alarm_enabled = 1;
device_init_wakeup(&pdev->dev, 1);
- wm831x_rtc->rtc = devm_rtc_device_register(&pdev->dev, "wm831x",
- &wm831x_rtc_ops, THIS_MODULE);
- if (IS_ERR(wm831x_rtc->rtc)) {
- ret = PTR_ERR(wm831x_rtc->rtc);
- goto err;
- }
+ wm831x_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(wm831x_rtc->rtc))
+ return PTR_ERR(wm831x_rtc->rtc);
+
+ wm831x_rtc->rtc->ops = &wm831x_rtc_ops;
+ wm831x_rtc->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(wm831x_rtc->rtc);
+ if (ret)
+ return ret;
ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL,
wm831x_alm_irq,
@@ -455,9 +445,6 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
wm831x_rtc_add_randomness(wm831x);
return 0;
-
-err:
- return ret;
}
static const struct dev_pm_ops wm831x_rtc_pm_ops = {
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 483c7993516b..f54fa12c4b4b 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -340,8 +340,7 @@ static const struct rtc_class_ops wm8350_rtc_ops = {
#ifdef CONFIG_PM_SLEEP
static int wm8350_rtc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
+ struct wm8350 *wm8350 = dev_get_drvdata(dev);
int ret = 0;
u16 reg;
@@ -351,8 +350,7 @@ static int wm8350_rtc_suspend(struct device *dev)
reg & WM8350_RTC_ALMSTS) {
ret = wm8350_rtc_stop_alarm(wm8350);
if (ret != 0)
- dev_err(&pdev->dev, "Failed to stop RTC alarm: %d\n",
- ret);
+ dev_err(dev, "Failed to stop RTC alarm: %d\n", ret);
}
return ret;
@@ -360,15 +358,13 @@ static int wm8350_rtc_suspend(struct device *dev)
static int wm8350_rtc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
+ struct wm8350 *wm8350 = dev_get_drvdata(dev);
int ret;
if (wm8350->rtc.alarm_enabled) {
ret = wm8350_rtc_start_alarm(wm8350);
if (ret != 0)
- dev_err(&pdev->dev,
- "Failed to restart RTC alarm: %d\n", ret);
+ dev_err(dev, "Failed to restart RTC alarm: %d\n", ret);
}
return 0;
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index f08f18e4fcdf..ad2ae2f0536e 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -673,9 +673,16 @@ static const struct i2c_device_id x1205_id[] = {
};
MODULE_DEVICE_TABLE(i2c, x1205_id);
+static const struct of_device_id x1205_dt_ids[] = {
+ { .compatible = "xircom,x1205", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, x1205_dt_ids);
+
static struct i2c_driver x1205_driver = {
.driver = {
.name = "rtc-x1205",
+ .of_match_table = x1205_dt_ids,
},
.probe = x1205_probe,
.remove = x1205_remove,
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 153820876a82..9888383f0088 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -1,34 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* APM X-Gene SoC Real Time Clock Driver
*
* Copyright (c) 2014, Applied Micro Circuits Corporation
* Author: Rameshwar Prasad Sahu <rsahu@apm.com>
* Loc Ho <lho@apm.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/rtc.h>
+#include <linux/slab.h>
/* RTC CSR Registers */
#define RTC_CCVR 0x00
@@ -48,7 +35,6 @@
struct xgene_rtc_dev {
struct rtc_device *rtc;
struct device *dev;
- unsigned long alarm_time;
void __iomem *csr_base;
struct clk *clk;
unsigned int irq_wake;
@@ -59,11 +45,11 @@ static int xgene_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(pdata->csr_base + RTC_CCVR), tm);
+ rtc_time64_to_tm(readl(pdata->csr_base + RTC_CCVR), tm);
return 0;
}
-static int xgene_rtc_set_mmss(struct device *dev, unsigned long secs)
+static int xgene_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
@@ -71,7 +57,7 @@ static int xgene_rtc_set_mmss(struct device *dev, unsigned long secs)
* NOTE: After the following write, the RTC_CCVR is only reflected
* after the update cycle of 1 seconds.
*/
- writel((u32) secs, pdata->csr_base + RTC_CLR);
+ writel((u32)rtc_tm_to_time64(tm), pdata->csr_base + RTC_CLR);
readl(pdata->csr_base + RTC_CLR); /* Force a barrier */
return 0;
@@ -81,7 +67,8 @@ static int xgene_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
- rtc_time_to_tm(pdata->alarm_time, &alrm->time);
+ /* If possible, CMR should be read here */
+ rtc_time64_to_tm(0, &alrm->time);
alrm->enabled = readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE;
return 0;
@@ -115,11 +102,8 @@ static int xgene_rtc_alarm_irq_enabled(struct device *dev)
static int xgene_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
- unsigned long alarm_time;
- rtc_tm_to_time(&alrm->time, &alarm_time);
- pdata->alarm_time = alarm_time;
- writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR);
+ writel((u32)rtc_tm_to_time64(&alrm->time), pdata->csr_base + RTC_CMR);
xgene_rtc_alarm_irq_enable(dev, alrm->enabled);
@@ -128,7 +112,7 @@ static int xgene_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static const struct rtc_class_ops xgene_rtc_ops = {
.read_time = xgene_rtc_read_time,
- .set_mmss = xgene_rtc_set_mmss,
+ .set_time = xgene_rtc_set_time,
.read_alarm = xgene_rtc_read_alarm,
.set_alarm = xgene_rtc_set_alarm,
.alarm_irq_enable = xgene_rtc_alarm_irq_enable,
@@ -136,7 +120,7 @@ static const struct rtc_class_ops xgene_rtc_ops = {
static irqreturn_t xgene_rtc_interrupt(int irq, void *id)
{
- struct xgene_rtc_dev *pdata = (struct xgene_rtc_dev *) id;
+ struct xgene_rtc_dev *pdata = id;
/* Check if interrupt asserted */
if (!(readl(pdata->csr_base + RTC_STAT) & RTC_STAT_BIT))
@@ -168,6 +152,10 @@ static int xgene_rtc_probe(struct platform_device *pdev)
if (IS_ERR(pdata->csr_base))
return PTR_ERR(pdata->csr_base);
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(pdata->rtc))
+ return PTR_ERR(pdata->rtc);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
@@ -198,15 +186,16 @@ static int xgene_rtc_probe(struct platform_device *pdev)
return ret;
}
- pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &xgene_rtc_ops, THIS_MODULE);
- if (IS_ERR(pdata->rtc)) {
- clk_disable_unprepare(pdata->clk);
- return PTR_ERR(pdata->rtc);
- }
-
/* HW does not support update faster than 1 seconds */
pdata->rtc->uie_unsupported = 1;
+ pdata->rtc->ops = &xgene_rtc_ops;
+ pdata->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(pdata->rtc);
+ if (ret) {
+ clk_disable_unprepare(pdata->clk);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index bb950945ec7f..00639594de0c 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Zynq Ultrascale+ MPSoC Real Time Clock Driver
*
* Copyright (C) 2015 Xilinx, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
*/
#include <linux/delay.h>
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index a8f22ee726bb..be3531e7f868 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -1,20 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RTC subsystem, sysfs interface
*
* Copyright (C) 2005 Tower Technologies
* Author: Alessandro Zummo <a.zummo@towertech.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#include <linux/module.h>
#include <linux/rtc.h>
#include "rtc-core.h"
-
/* device attributes */
/*
@@ -86,7 +82,7 @@ max_user_freq_show(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t
max_user_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t n)
+ const char *buf, size_t n)
{
struct rtc_device *rtc = to_rtc_device(dev);
unsigned long val;
@@ -116,12 +112,11 @@ hctosys_show(struct device *dev, struct device_attribute *attr, char *buf)
{
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
if (rtc_hctosys_ret == 0 &&
- strcmp(dev_name(&to_rtc_device(dev)->dev),
- CONFIG_RTC_HCTOSYS_DEVICE) == 0)
+ strcmp(dev_name(&to_rtc_device(dev)->dev),
+ CONFIG_RTC_HCTOSYS_DEVICE) == 0)
return sprintf(buf, "1\n");
- else
#endif
- return sprintf(buf, "0\n");
+ return sprintf(buf, "0\n");
}
static DEVICE_ATTR_RO(hctosys);
@@ -175,15 +170,15 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
if (*buf_ptr == '=') {
buf_ptr++;
push = 1;
- } else
+ } else {
adjust = 1;
+ }
}
retval = kstrtos64(buf_ptr, 0, &alarm);
if (retval)
return retval;
- if (adjust) {
+ if (adjust)
alarm += now;
- }
if (alarm > now || push) {
/* Avoid accidentally clobbering active alarms; we can't
* entirely prevent that here, without even the minimal
diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c
index 718293d72426..8b70f0520e13 100644
--- a/drivers/rtc/systohc.c
+++ b/drivers/rtc/systohc.c
@@ -1,9 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
#include <linux/rtc.h>
#include <linux/time.h>
@@ -35,8 +30,7 @@ int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec)
if (!rtc)
goto out_err;
- if (!rtc->ops || (!rtc->ops->set_time && !rtc->ops->set_mmss64 &&
- !rtc->ops->set_mmss))
+ if (!rtc->ops || !rtc->ops->set_time)
goto out_close;
/* Compute the value of tv_nsec we require the caller to supply in
@@ -58,9 +52,6 @@ int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec)
rtc_time64_to_tm(to_set.tv_sec, &tm);
- /* rtc_hctosys exclusively uses UTC, so we call set_time here, not
- * set_mmss.
- */
err = rtc_set_time(rtc, &tm);
out_close:
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6e294b4d3635..f89f9d02e788 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
raw:
- block->blocks = (private->real_cyl *
+ block->blocks = ((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk);
dev_info(&device->cdev->dev,
- "DASD with %d KB/block, %d KB total size, %d KB/track, "
+ "DASD with %u KB/block, %lu KB total size, %u KB/track, "
"%s\n", (block->bp_block >> 10),
- ((private->real_cyl *
+ (((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk * (block->bp_block >> 9)) >> 1),
((blk_per_trk * block->bp_block) >> 10),
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index fd2146bcc0ad..e17364e13d2f 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -629,7 +629,7 @@ con3270_init(void)
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
- raw3270_add_view(&condev->view, &con3270_fn, 1);
+ raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 8f3a2eeb28dc..4c4683d8784a 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp)
init_waitqueue_head(&fp->wait);
fp->fs_pid = get_pid(task_pid(current));
- rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+ rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+ RAW3270_VIEW_LOCK_BH);
if (rc) {
fs3270_free_view(&fp->view);
goto out;
@@ -485,7 +486,7 @@ fs3270_open(struct inode *inode, struct file *filp)
raw3270_del_view(&fp->view);
goto out;
}
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
filp->private_data = fp;
out:
mutex_unlock(&fs3270_mutex);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index f8cd2935fbfd..63a41b168761 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
* Add view to device with minor "minor".
*/
int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
{
unsigned long flags;
struct raw3270 *rp;
@@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
view->cols = rp->cols;
view->ascebc = rp->ascebc;
spin_lock_init(&view->lock);
+ lockdep_set_subclass(&view->lock, subclass);
list_add(&view->list, &rp->view_list);
rc = 0;
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 114ca7cbf889..3afaa35f7351 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -150,6 +150,8 @@ struct raw3270_fn {
struct raw3270_view {
struct list_head list;
spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ 0
+#define RAW3270_VIEW_LOCK_BH 1
atomic_t ref_count;
struct raw3270 *dev;
struct raw3270_fn *fn;
@@ -158,7 +160,7 @@ struct raw3270_view {
unsigned char *ascebc; /* ascii -> ebcdic table */
};
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
int raw3270_activate_view(struct raw3270_view *);
void raw3270_del_view(struct raw3270_view *);
void raw3270_deactivate_view(struct raw3270_view *);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index e9aa71cdfc44..d2ab3f07c008 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -45,8 +45,8 @@ static struct list_head sclp_req_queue;
/* Data for read and and init requests. */
static struct sclp_req sclp_read_req;
static struct sclp_req sclp_init_req;
-static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
-static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+static void *sclp_read_sccb;
+static struct init_sccb *sclp_init_sccb;
/* Suspend request */
static DECLARE_COMPLETION(sclp_request_queue_flushed);
@@ -753,9 +753,8 @@ EXPORT_SYMBOL(sclp_remove_processed);
static inline void
__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
{
- struct init_sccb *sccb;
+ struct init_sccb *sccb = sclp_init_sccb;
- sccb = (struct init_sccb *) sclp_init_sccb;
clear_page(sccb);
memset(&sclp_init_req, 0, sizeof(struct sclp_req));
sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
@@ -782,7 +781,7 @@ static int
sclp_init_mask(int calculate)
{
unsigned long flags;
- struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
+ struct init_sccb *sccb = sclp_init_sccb;
sccb_mask_t receive_mask;
sccb_mask_t send_mask;
int retry;
@@ -1175,6 +1174,9 @@ sclp_init(void)
if (sclp_init_state != sclp_init_state_uninitialized)
goto fail_unlock;
sclp_init_state = sclp_init_state_initializing;
+ sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
+ sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
+ BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
/* Set up variables */
INIT_LIST_HEAD(&sclp_req_queue);
INIT_LIST_HEAD(&sclp_reg_list);
@@ -1207,6 +1209,8 @@ fail_unregister_reboot_notifier:
unregister_reboot_notifier(&sclp_reboot_notifier);
fail_init_state_uninitialized:
sclp_init_state = sclp_init_state_uninitialized;
+ free_page((unsigned long) sclp_read_sccb);
+ free_page((unsigned long) sclp_init_sccb);
fail_unlock:
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 367e9d384d85..196333013e54 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -197,7 +197,9 @@ struct read_info_sccb {
u32 hmfai; /* 124-127 */
u8 _pad_128[134 - 128]; /* 128-133 */
u8 byte_134; /* 134 */
- u8 _pad_135[4096 - 135]; /* 135-4095 */
+ u8 cpudirq; /* 135 */
+ u16 cbl; /* 136-137 */
+ u8 _pad_138[4096 - 138]; /* 138-4095 */
} __packed __aligned(PAGE_SIZE);
struct read_storage_sccb {
@@ -319,7 +321,7 @@ extern int sclp_console_drop;
extern unsigned long sclp_console_full;
extern bool sclp_mask_compat_mode;
-extern char sclp_early_sccb[PAGE_SIZE];
+extern char *sclp_early_sccb;
void sclp_early_wait_irq(void);
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
@@ -365,14 +367,14 @@ sclp_ascebc(unsigned char ch)
/* translate string from EBCDIC to ASCII */
static inline void
-sclp_ebcasc_str(unsigned char *str, int nr)
+sclp_ebcasc_str(char *str, int nr)
{
(MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
}
/* translate string from ASCII to EBCDIC */
static inline void
-sclp_ascebc_str(unsigned char *str, int nr)
+sclp_ascebc_str(char *str, int nr)
{
(MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
}
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 8332788681c4..6c90aa725f23 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -40,6 +40,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
+ sclp.has_sipl = !!(sccb->cbl & 0x02);
+ sclp.has_sipl_g2 = !!(sccb->cbl & 0x04);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
@@ -93,6 +95,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
sclp.hmfai = sccb->hmfai;
+ sclp.has_dirq = !!(sccb->cpudirq & 0x80);
}
/*
@@ -144,7 +147,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb)
void __init sclp_early_detect(void)
{
- void *sccb = &sclp_early_sccb;
+ void *sccb = sclp_early_sccb;
sclp_early_facilities_detect(sccb);
sclp_early_init_core_info(sccb);
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 387c114ded3f..7737470f8498 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -16,7 +16,7 @@
static struct read_info_sccb __bootdata(sclp_info_sccb);
static int __bootdata(sclp_info_sccb_valid);
-char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
+char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
/*
* Used to keep track of the size of the event masks. Qemu until version 2.11
@@ -91,8 +91,8 @@ static void sclp_early_print_lm(const char *str, unsigned int len)
struct mto *mto;
struct go *go;
- sccb = (struct write_sccb *) &sclp_early_sccb;
- end = (unsigned char *) sccb + sizeof(sclp_early_sccb) - 1;
+ sccb = (struct write_sccb *) sclp_early_sccb;
+ end = (unsigned char *) sccb + EARLY_SCCB_SIZE - 1;
memset(sccb, 0, sizeof(*sccb));
ptr = (unsigned char *) &sccb->msg.mdb.mto;
offset = 0;
@@ -139,9 +139,9 @@ static void sclp_early_print_vt220(const char *str, unsigned int len)
{
struct vt220_sccb *sccb;
- sccb = (struct vt220_sccb *) &sclp_early_sccb;
- if (sizeof(*sccb) + len >= sizeof(sclp_early_sccb))
- len = sizeof(sclp_early_sccb) - sizeof(*sccb);
+ sccb = (struct vt220_sccb *) sclp_early_sccb;
+ if (sizeof(*sccb) + len >= EARLY_SCCB_SIZE)
+ len = EARLY_SCCB_SIZE - sizeof(*sccb);
memset(sccb, 0, sizeof(*sccb));
memcpy(&sccb->msg.data, str, len);
sccb->header.length = sizeof(*sccb) + len;
@@ -199,7 +199,7 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE);
*have_linemode = *have_vt220 = 0;
- sccb = (struct init_sccb *) &sclp_early_sccb;
+ sccb = (struct init_sccb *) sclp_early_sccb;
receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
send_mask = disable ? 0 : EVTYP_VT220MSG_MASK | EVTYP_MSG_MASK;
rc = sclp_early_set_event_mask(sccb, receive_mask, send_mask);
@@ -304,7 +304,7 @@ int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
void __weak __init add_mem_detect_block(u64 start, u64 end) {}
int __init sclp_early_read_storage_info(void)
{
- struct read_storage_sccb *sccb = (struct read_storage_sccb *)&sclp_early_sccb;
+ struct read_storage_sccb *sccb = (struct read_storage_sccb *)sclp_early_sccb;
int rc, id, max_id = 0;
unsigned long rn, rzm;
sclp_cmdw_t command;
@@ -320,8 +320,8 @@ int __init sclp_early_read_storage_info(void)
rzm <<= 20;
for (id = 0; id <= max_id; id++) {
- memset(sclp_early_sccb, 0, sizeof(sclp_early_sccb));
- sccb->header.length = sizeof(sclp_early_sccb);
+ memset(sclp_early_sccb, 0, EARLY_SCCB_SIZE);
+ sccb->header.length = EARLY_SCCB_SIZE;
command = SCLP_CMDW_READ_STORAGE_INFO | (id << 8);
rc = sclp_early_cmd(command, sccb);
if (rc)
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 8e0b69a2f11a..13f97fd73aca 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -29,7 +29,7 @@ static struct sclp_register sclp_sdias_register = {
.send_mask = EVTYP_SDIAS_MASK,
};
-static struct sdias_sccb sccb __attribute__((aligned(4096)));
+static struct sdias_sccb *sclp_sdias_sccb;
static struct sdias_evbuf sdias_evbuf;
static DECLARE_COMPLETION(evbuf_accepted);
@@ -58,6 +58,7 @@ static void sdias_callback(struct sclp_req *request, void *data)
static int sdias_sclp_send(struct sclp_req *req)
{
+ struct sdias_sccb *sccb = sclp_sdias_sccb;
int retries;
int rc;
@@ -78,16 +79,16 @@ static int sdias_sclp_send(struct sclp_req *req)
continue;
}
/* if not accepted, retry */
- if (!(sccb.evbuf.hdr.flags & 0x80)) {
+ if (!(sccb->evbuf.hdr.flags & 0x80)) {
TRACE("sclp request failed: flags=%x\n",
- sccb.evbuf.hdr.flags);
+ sccb->evbuf.hdr.flags);
continue;
}
/*
* for the sync interface the response is in the initial sccb
*/
if (!sclp_sdias_register.receiver_fn) {
- memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
+ memcpy(&sdias_evbuf, &sccb->evbuf, sizeof(sdias_evbuf));
TRACE("sync request done\n");
return 0;
}
@@ -104,23 +105,24 @@ static int sdias_sclp_send(struct sclp_req *req)
*/
int sclp_sdias_blk_count(void)
{
+ struct sdias_sccb *sccb = sclp_sdias_sccb;
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
- memset(&sccb, 0, sizeof(sccb));
+ memset(sccb, 0, sizeof(*sccb));
memset(&request, 0, sizeof(request));
- sccb.hdr.length = sizeof(sccb);
- sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
- sccb.evbuf.hdr.type = EVTYP_SDIAS;
- sccb.evbuf.event_qual = SDIAS_EQ_SIZE;
- sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
- sccb.evbuf.event_id = 4712;
- sccb.evbuf.dbs = 1;
+ sccb->hdr.length = sizeof(*sccb);
+ sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb->evbuf.hdr.type = EVTYP_SDIAS;
+ sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
+ sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+ sccb->evbuf.event_id = 4712;
+ sccb->evbuf.dbs = 1;
- request.sccb = &sccb;
+ request.sccb = sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
@@ -130,8 +132,8 @@ int sclp_sdias_blk_count(void)
pr_err("sclp_send failed for get_nr_blocks\n");
goto out;
}
- if (sccb.hdr.response_code != 0x0020) {
- TRACE("send failed: %x\n", sccb.hdr.response_code);
+ if (sccb->hdr.response_code != 0x0020) {
+ TRACE("send failed: %x\n", sccb->hdr.response_code);
rc = -EIO;
goto out;
}
@@ -163,30 +165,31 @@ out:
*/
int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
{
+ struct sdias_sccb *sccb = sclp_sdias_sccb;
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
- memset(&sccb, 0, sizeof(sccb));
+ memset(sccb, 0, sizeof(*sccb));
memset(&request, 0, sizeof(request));
- sccb.hdr.length = sizeof(sccb);
- sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
- sccb.evbuf.hdr.type = EVTYP_SDIAS;
- sccb.evbuf.hdr.flags = 0;
- sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
- sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
- sccb.evbuf.event_id = 4712;
- sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
- sccb.evbuf.event_status = 0;
- sccb.evbuf.blk_cnt = nr_blks;
- sccb.evbuf.asa = (unsigned long)dest;
- sccb.evbuf.fbn = start_blk;
- sccb.evbuf.lbn = 0;
- sccb.evbuf.dbs = 1;
-
- request.sccb = &sccb;
+ sccb->hdr.length = sizeof(*sccb);
+ sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb->evbuf.hdr.type = EVTYP_SDIAS;
+ sccb->evbuf.hdr.flags = 0;
+ sccb->evbuf.event_qual = SDIAS_EQ_STORE_DATA;
+ sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+ sccb->evbuf.event_id = 4712;
+ sccb->evbuf.asa_size = SDIAS_ASA_SIZE_64;
+ sccb->evbuf.event_status = 0;
+ sccb->evbuf.blk_cnt = nr_blks;
+ sccb->evbuf.asa = (unsigned long)dest;
+ sccb->evbuf.fbn = start_blk;
+ sccb->evbuf.lbn = 0;
+ sccb->evbuf.dbs = 1;
+
+ request.sccb = sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
@@ -196,8 +199,8 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
pr_err("sclp_send failed: %x\n", rc);
goto out;
}
- if (sccb.hdr.response_code != 0x0020) {
- TRACE("copy failed: %x\n", sccb.hdr.response_code);
+ if (sccb->hdr.response_code != 0x0020) {
+ TRACE("copy failed: %x\n", sccb->hdr.response_code);
rc = -EIO;
goto out;
}
@@ -256,6 +259,8 @@ int __init sclp_sdias_init(void)
{
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return 0;
+ sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
+ BUG_ON(!sclp_sdias_sccb);
sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6);
@@ -264,6 +269,7 @@ int __init sclp_sdias_init(void)
if (sclp_sdias_init_async() == 0)
goto out;
TRACE("init failed\n");
+ free_page((unsigned long) sclp_sdias_sccb);
return -ENODEV;
out:
TRACE("init done\n");
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index fc206c9d1c56..ea4253939555 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -290,7 +290,7 @@ tapechar_open (struct inode *inode, struct file *filp)
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
} else
tape_put_device(device);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 2b0c36c2c568..98d7fc152e32 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return PTR_ERR(tp);
rc = raw3270_add_view(&tp->view, &tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR);
+ tty->index + RAW3270_FIRSTMINOR,
+ RAW3270_VIEW_LOCK_BH);
if (rc) {
tty3270_free_view(tp);
return rc;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 76d3c50bf078..405a60538630 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -51,7 +51,7 @@ static struct dentry *zcore_dir;
static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
-static struct ipl_parameter_block *ipl_block;
+static struct ipl_parameter_block *zcore_ipl_block;
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
@@ -182,8 +182,8 @@ static const struct file_operations zcore_memmap_fops = {
static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
- if (ipl_block) {
- diag308(DIAG308_SET, ipl_block);
+ if (zcore_ipl_block) {
+ diag308(DIAG308_SET, zcore_ipl_block);
diag308(DIAG308_LOAD_CLEAR, NULL);
}
return count;
@@ -191,7 +191,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
@@ -265,18 +265,20 @@ static int __init zcore_reipl_init(void)
return rc;
if (ipib_info.ipib == 0)
return 0;
- ipl_block = (void *) __get_free_page(GFP_KERNEL);
- if (!ipl_block)
+ zcore_ipl_block = (void *) __get_free_page(GFP_KERNEL);
+ if (!zcore_ipl_block)
return -ENOMEM;
if (ipib_info.ipib < sclp.hsa_size)
- rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
+ rc = memcpy_hsa_kernel(zcore_ipl_block, ipib_info.ipib,
+ PAGE_SIZE);
else
- rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
- if (rc || (__force u32)csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+ rc = memcpy_real(zcore_ipl_block, (void *) ipib_info.ipib,
+ PAGE_SIZE);
+ if (rc || (__force u32)csum_partial(zcore_ipl_block, zcore_ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
- free_page((unsigned long) ipl_block);
- ipl_block = NULL;
+ free_page((unsigned long) zcore_ipl_block);
+ zcore_ipl_block = NULL;
}
return 0;
}
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index f230516abb96..f6a8db04177c 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -20,5 +20,6 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
-vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o
+vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
+ vfio_ccw_async.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index a45011e4529e..4534afc63591 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -27,6 +27,8 @@
static DEFINE_SPINLOCK(airq_lists_lock);
static struct hlist_head airq_lists[MAX_ISC+1];
+static struct kmem_cache *airq_iv_cache;
+
/**
* register_adapter_interrupt() - register adapter interrupt handler
* @airq: pointer to adapter interrupt descriptor
@@ -95,7 +97,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list)
if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
- airq->handler(airq);
+ airq->handler(airq, !tpi_info->directed_irq);
rcu_read_unlock();
return IRQ_HANDLED;
@@ -129,10 +131,21 @@ struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
if (!iv)
goto out;
iv->bits = bits;
+ iv->flags = flags;
size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
- iv->vector = kzalloc(size, GFP_KERNEL);
- if (!iv->vector)
- goto out_free;
+
+ if (flags & AIRQ_IV_CACHELINE) {
+ if ((cache_line_size() * BITS_PER_BYTE) < bits)
+ goto out_free;
+
+ iv->vector = kmem_cache_zalloc(airq_iv_cache, GFP_KERNEL);
+ if (!iv->vector)
+ goto out_free;
+ } else {
+ iv->vector = kzalloc(size, GFP_KERNEL);
+ if (!iv->vector)
+ goto out_free;
+ }
if (flags & AIRQ_IV_ALLOC) {
iv->avail = kmalloc(size, GFP_KERNEL);
if (!iv->avail)
@@ -165,7 +178,10 @@ out_free:
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->avail);
- kfree(iv->vector);
+ if (iv->flags & AIRQ_IV_CACHELINE)
+ kmem_cache_free(airq_iv_cache, iv->vector);
+ else
+ kfree(iv->vector);
kfree(iv);
out:
return NULL;
@@ -181,7 +197,10 @@ void airq_iv_release(struct airq_iv *iv)
kfree(iv->data);
kfree(iv->ptr);
kfree(iv->bitlock);
- kfree(iv->vector);
+ if (iv->flags & AIRQ_IV_CACHELINE)
+ kmem_cache_free(airq_iv_cache, iv->vector);
+ else
+ kfree(iv->vector);
kfree(iv->avail);
kfree(iv);
}
@@ -275,3 +294,13 @@ unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
return bit;
}
EXPORT_SYMBOL(airq_iv_scan);
+
+static int __init airq_init(void)
+{
+ airq_iv_cache = kmem_cache_create("airq_iv_cache", cache_line_size(),
+ cache_line_size(), 0, NULL);
+ if (!airq_iv_cache)
+ return -ENOMEM;
+ return 0;
+}
+subsys_initcall(airq_init);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index de744ca158fd..18f5458f90e8 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -564,7 +564,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
}
static struct irqaction io_interrupt = {
- .name = "IO",
+ .name = "I/O",
.handler = do_cio_interrupt,
};
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 9811fd8a0c73..06a91743335a 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -51,7 +51,7 @@ struct tpi_info {
struct subchannel_id schid;
u32 intparm;
u32 adapter_IO:1;
- u32 :1;
+ u32 directed_irq:1;
u32 isc:3;
u32 :27;
u32 type:3;
@@ -115,7 +115,7 @@ struct subchannel {
struct schib_config config;
} __attribute__ ((aligned(8)));
-DECLARE_PER_CPU(struct irb, cio_irb);
+DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
#define to_subchannel(n) container_of(n, struct subchannel, dev)
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 14d328338ce2..08eb10283b18 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -233,6 +233,7 @@ int hsch(struct subchannel_id schid)
return ccode;
}
+EXPORT_SYMBOL(hsch);
static inline int __xsch(struct subchannel_id schid)
{
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index a6f7c2986b94..a06944399865 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -228,9 +228,6 @@ struct qdio_q {
*/
int first_to_check;
- /* first_to_check of the last time */
- int last_move;
-
/* beginning position for calling the program */
int first_to_kick;
@@ -341,8 +338,7 @@ static inline int multicast_outbound(struct qdio_q *q)
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
-#define pci_out_supported(q) \
- (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
+#define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index d2f98e5829d4..35410e6eda2e 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -121,15 +121,14 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
q->timestamp, last_ai_time);
- seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n",
- atomic_read(&q->nr_buf_used),
- q->first_to_check, q->last_move);
+ seq_printf(m, "nr_used: %d ftc: %d\n",
+ atomic_read(&q->nr_buf_used), q->first_to_check);
if (q->is_input_q) {
seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
q->u.in.polling, q->u.in.ack_start,
q->u.in.ack_count);
- seq_printf(m, "DSCI: %d IRQs disabled: %u\n",
- *(u32 *)q->irq_ptr->dsci,
+ seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
+ *(u8 *)q->irq_ptr->dsci,
test_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state));
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9537e656e927..cfce255521ac 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -371,7 +371,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
static inline void qdio_sync_queues(struct qdio_q *q)
{
/* PCI capable outbound queues will also be scanned so sync them too */
- if (pci_out_supported(q))
+ if (pci_out_supported(q->irq_ptr))
qdio_siga_sync_all(q);
else
qdio_siga_sync_q(q);
@@ -415,7 +415,8 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count)
q->q_stats.nr_sbals[pos]++;
}
-static void process_buffer_error(struct qdio_q *q, int count)
+static void process_buffer_error(struct qdio_q *q, unsigned int start,
+ int count)
{
unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
SLSB_P_OUTPUT_NOT_INIT;
@@ -424,29 +425,29 @@ static void process_buffer_error(struct qdio_q *q, int count)
/* special handling for no target buffer empty */
if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
- q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
+ q->sbal[start]->element[15].sflags == 0x10) {
qperf_inc(q, target_full);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
- q->first_to_check);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
goto set;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
- DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
+ DBF_ERROR("FTC:%3d C:%3d", start, count);
DBF_ERROR("F14:%2x F15:%2x",
- q->sbal[q->first_to_check]->element[14].sflags,
- q->sbal[q->first_to_check]->element[15].sflags);
+ q->sbal[start]->element[14].sflags,
+ q->sbal[start]->element[15].sflags);
set:
/*
* Interrupts may be avoided as long as the error is present
* so change the buffer state immediately to avoid starvation.
*/
- set_buf_states(q, q->first_to_check, state, count);
+ set_buf_states(q, start, state, count);
}
-static inline void inbound_primed(struct qdio_q *q, int count)
+static inline void inbound_primed(struct qdio_q *q, unsigned int start,
+ int count)
{
int new;
@@ -457,7 +458,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
if (!q->u.in.polling) {
q->u.in.polling = 1;
q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
+ q->u.in.ack_start = start;
return;
}
@@ -465,7 +466,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.ack_count);
q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
+ q->u.in.ack_start = start;
return;
}
@@ -473,7 +474,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
* ACK the newest buffer. The ACK will be removed in qdio_stop_polling
* or by the next inbound run.
*/
- new = add_buf(q->first_to_check, count - 1);
+ new = add_buf(start, count - 1);
if (q->u.in.polling) {
/* reset the previous ACK but first set the new one */
set_buf_state(q, new, SLSB_P_INPUT_ACK);
@@ -488,10 +489,10 @@ static inline void inbound_primed(struct qdio_q *q, int count)
if (!count)
return;
/* need to change ALL buffers to get more interrupts */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
+ set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
}
-static int get_inbound_buffer_frontier(struct qdio_q *q)
+static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
@@ -504,64 +505,58 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
if (!count)
- goto out;
+ return 0;
/*
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
- count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
+ count = get_buf_states(q, start, &state, count, 1, 0);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_INPUT_PRIMED:
- inbound_primed(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ inbound_primed(q, start, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
- break;
+ return count;
case SLSB_P_INPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ process_buffer_error(q, start, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
- q->nr, q->first_to_check);
- break;
+ q->nr, start);
+ return 0;
default:
WARN_ON_ONCE(1);
+ return 0;
}
-out:
- return q->first_to_check;
}
-static int qdio_inbound_q_moved(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
{
- int bufnr;
+ int count;
- bufnr = get_inbound_buffer_frontier(q);
+ count = get_inbound_buffer_frontier(q, start);
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
- if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_tod_clock();
- return 1;
- } else
- return 0;
+ if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
+ q->u.in.timestamp = get_tod_clock();
+
+ return count;
}
-static inline int qdio_inbound_q_done(struct qdio_q *q)
+static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
@@ -570,7 +565,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
+ get_buf_state(q, start, &state, 0);
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
@@ -588,8 +583,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
- q->first_to_check);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
return 1;
} else
return 0;
@@ -637,17 +631,13 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
return phys_aob;
}
-static void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
{
int start = q->first_to_kick;
- int end = q->first_to_check;
- int count;
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
- count = sub_buf(end, start);
-
if (q->is_input_q) {
qperf_inc(q, inbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
@@ -663,7 +653,7 @@ static void qdio_kick_handler(struct qdio_q *q)
q->irq_ptr->int_parm);
/* for the next time */
- q->first_to_kick = end;
+ q->first_to_kick = add_buf(start, count);
q->qdio_error = 0;
}
@@ -678,14 +668,20 @@ static inline int qdio_tasklet_schedule(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_inbound);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return;
- qdio_kick_handler(q);
+ start = add_buf(start, count);
+ q->first_to_check = start;
+ qdio_kick_handler(q, count);
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
if (!qdio_tasklet_schedule(q))
@@ -697,7 +693,7 @@ static void __qdio_inbound_processing(struct qdio_q *q)
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched2);
qdio_tasklet_schedule(q);
}
@@ -709,7 +705,7 @@ void qdio_inbound_processing(unsigned long data)
__qdio_inbound_processing(q);
}
-static int get_outbound_buffer_frontier(struct qdio_q *q)
+static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
@@ -718,7 +714,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
- !pci_out_supported(q)) ||
+ !pci_out_supported(q->irq_ptr)) ||
(queue_type(q) == QDIO_IQDIO_QFMT &&
multicast_outbound(q)))
qdio_siga_sync_q(q);
@@ -729,12 +725,11 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
if (!count)
- goto out;
+ return 0;
- count = get_buf_states(q, q->first_to_check, &state, count, 0,
- q->u.out.use_cq);
+ count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
@@ -743,34 +738,29 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
"out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
- q->first_to_check = add_buf(q->first_to_check, count);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
-
- break;
+ return count;
case SLSB_P_OUTPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ process_buffer_error(q, start, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
q->nr);
- break;
+ return 0;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
- break;
+ return 0;
default:
WARN_ON_ONCE(1);
+ return 0;
}
-
-out:
- return q->first_to_check;
}
/* all buffers processed? */
@@ -779,18 +769,16 @@ static inline int qdio_outbound_q_done(struct qdio_q *q)
return atomic_read(&q->nr_buf_used) == 0;
}
-static inline int qdio_outbound_q_moved(struct qdio_q *q)
+static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
{
- int bufnr;
+ int count;
- bufnr = get_outbound_buffer_frontier(q);
+ count = get_outbound_buffer_frontier(q, start);
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
+ if (count)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
- return 1;
- } else
- return 0;
+
+ return count;
}
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
@@ -837,15 +825,21 @@ retry:
static void __qdio_outbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_outbound);
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
- if (qdio_outbound_q_moved(q))
- qdio_kick_handler(q);
+ count = qdio_outbound_q_moved(q, start);
+ if (count) {
+ q->first_to_check = add_buf(start, count);
+ qdio_kick_handler(q, count);
+ }
- if (queue_type(q) == QDIO_ZFCP_QFMT)
- if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
- goto sched;
+ if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
+ !qdio_outbound_q_done(q))
+ goto sched;
if (q->u.out.pci_out_enabled)
return;
@@ -881,37 +875,40 @@ void qdio_outbound_timer(struct timer_list *t)
qdio_tasklet_schedule(q);
}
-static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
{
struct qdio_q *out;
int i;
- if (!pci_out_supported(q))
+ if (!pci_out_supported(irq))
return;
- for_each_output_queue(q->irq_ptr, out, i)
+ for_each_output_queue(irq, out, i)
if (!qdio_outbound_q_done(out))
qdio_tasklet_schedule(out);
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_inbound);
if (need_siga_sync(q) && need_siga_sync_after_ai(q))
qdio_sync_queues(q);
- /*
- * The interrupt could be caused by a PCI request. Check the
- * PCI capable outbound queues.
- */
- qdio_check_outbound_after_thinint(q);
+ /* The interrupt could be caused by a PCI request: */
+ qdio_check_outbound_pci_queues(q->irq_ptr);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return;
- qdio_kick_handler(q);
+ start = add_buf(start, count);
+ q->first_to_check = start;
+ qdio_kick_handler(q, count);
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched);
if (!qdio_tasklet_schedule(q))
return;
@@ -922,7 +919,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched2);
qdio_tasklet_schedule(q);
}
@@ -976,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+ if (!pci_out_supported(irq_ptr))
return;
for_each_output_queue(irq_ptr, q, i) {
@@ -1642,7 +1639,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
*/
if (test_nonshared_ind(irq_ptr))
goto rescan;
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q, q->first_to_check))
goto rescan;
return 0;
@@ -1672,12 +1669,14 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
int *error)
{
struct qdio_q *q;
- int start, end;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ unsigned int start;
+ int count;
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
+ start = q->first_to_check;
/*
* Cannot rely on automatic sync after interrupt since queues may
@@ -1686,25 +1685,27 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
if (need_siga_sync(q))
qdio_sync_queues(q);
- /* check the PCI capable outbound queues. */
- qdio_check_outbound_after_thinint(q);
+ qdio_check_outbound_pci_queues(irq_ptr);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return 0;
+ start = add_buf(start, count);
+ q->first_to_check = start;
+
/* Note: upper-layer MUST stop processing immediately here ... */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return -EIO;
- start = q->first_to_kick;
- end = q->first_to_check;
- *bufnr = start;
+ *bufnr = q->first_to_kick;
*error = q->qdio_error;
/* for the next time */
- q->first_to_kick = end;
+ q->first_to_kick = add_buf(q->first_to_kick, count);
q->qdio_error = 0;
- return sub_buf(end, start);
+
+ return count;
}
EXPORT_SYMBOL(qdio_get_next_buffers);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index a59887fad13e..99d7d2566a3a 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -523,7 +523,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
irq_ptr->schid.sch_no,
is_thinint_irq(irq_ptr),
(irq_ptr->sch_token) ? 1 : 0,
- (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
+ pci_out_supported(irq_ptr) ? 1 : 0,
css_general_characteristics.aif_tdd,
(irq_ptr->siga_flag.input) ? "R" : " ",
(irq_ptr->siga_flag.output) ? "W" : " ",
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 07dea602205b..28d59ac2204c 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -40,7 +40,7 @@ static LIST_HEAD(tiq_list);
static DEFINE_MUTEX(tiq_list_lock);
/* Adapter interrupt definitions */
-static void tiqdio_thinint_handler(struct airq_struct *airq);
+static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating);
static struct airq_struct tiqdio_airq = {
.handler = tiqdio_thinint_handler,
@@ -179,7 +179,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @airq: pointer to adapter interrupt descriptor
*/
-static void tiqdio_thinint_handler(struct airq_struct *airq)
+static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{
u32 si_used = clear_shared_ind();
struct qdio_q *q;
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
new file mode 100644
index 000000000000..8c1d2357ef5b
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Async I/O region for vfio_ccw
+ *
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Cornelia Huck <cohuck@redhat.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "vfio_ccw_private.h"
+
+static ssize_t vfio_ccw_async_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_cmd_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static ssize_t vfio_ccw_async_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_cmd_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ if (!mutex_trylock(&private->io_mutex))
+ return -EAGAIN;
+
+ region = private->region[i].data;
+ if (copy_from_user((void *)region + pos, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_ASYNC_REQ);
+
+ ret = region->ret_code ? region->ret_code : count;
+
+out_unlock:
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
+ .read = vfio_ccw_async_region_read,
+ .write = vfio_ccw_async_region_write,
+ .release = vfio_ccw_async_region_release,
+};
+
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD,
+ &vfio_ccw_async_region_ops,
+ sizeof(struct ccw_cmd_region),
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE,
+ private->cmd_region);
+}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 384b3987eeb4..0e79799e9a71 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -362,6 +362,7 @@ static void cp_unpin_free(struct channel_program *cp)
struct ccwchain *chain, *temp;
int i;
+ cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
pfn_array_table_unpin_free(chain->ch_pat + i,
@@ -732,6 +733,9 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/
cp->orb.cmd.c64 = 1;
+ if (!ret)
+ cp->initialized = true;
+
return ret;
}
@@ -746,7 +750,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/
void cp_free(struct channel_program *cp)
{
- cp_unpin_free(cp);
+ if (cp->initialized)
+ cp_unpin_free(cp);
}
/**
@@ -791,6 +796,10 @@ int cp_prefetch(struct channel_program *cp)
struct ccwchain *chain;
int len, idx, ret;
+ /* this is an error in the caller */
+ if (!cp->initialized)
+ return -EINVAL;
+
list_for_each_entry(chain, &cp->ccwchain_list, next) {
len = chain->ch_len;
for (idx = 0; idx < len; idx++) {
@@ -826,6 +835,10 @@ union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
struct ccwchain *chain;
struct ccw1 *cpa;
+ /* this is an error in the caller */
+ if (!cp->initialized)
+ return NULL;
+
orb = &cp->orb;
orb->cmd.intparm = intparm;
@@ -862,6 +875,9 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
u32 cpa = scsw->cmd.cpa;
u32 ccw_head;
+ if (!cp->initialized)
+ return;
+
/*
* LATER:
* For now, only update the cmd.cpa part. We may need to deal with
@@ -898,6 +914,9 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
struct ccwchain *chain;
int i;
+ if (!cp->initialized)
+ return false;
+
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
if (pfn_array_table_iova_pinned(chain->ch_pat + i,
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index a4b74fb1aa57..3c20cd208da5 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -21,6 +21,7 @@
* @ccwchain_list: list head of ccwchains
* @orb: orb for the currently processed ssch request
* @mdev: the mediated device to perform page pinning/unpinning
+ * @initialized: whether this instance is actually initialized
*
* @ccwchain_list is the head of a ccwchain list, that contents the
* translated result of the guest channel program that pointed out by
@@ -30,6 +31,7 @@ struct channel_program {
struct list_head ccwchain_list;
union orb orb;
struct device *mdev;
+ bool initialized;
};
extern int cp_init(struct channel_program *cp, struct device *mdev,
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 0b3b9de45c60..ee8767f5845a 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -3,9 +3,11 @@
* VFIO based Physical Subchannel device driver
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/module.h>
@@ -23,6 +25,7 @@
struct workqueue_struct *vfio_ccw_work_q;
static struct kmem_cache *vfio_ccw_io_region;
+static struct kmem_cache *vfio_ccw_cmd_region;
/*
* Helpers
@@ -40,26 +43,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
if (ret != -EBUSY)
goto out_unlock;
+ iretry = 255;
do {
- iretry = 255;
ret = cio_cancel_halt_clear(sch, &iretry);
- while (ret == -EBUSY) {
- /*
- * Flush all I/O and wait for
- * cancel/halt/clear completion.
- */
- private->completion = &completion;
- spin_unlock_irq(sch->lock);
- wait_for_completion_timeout(&completion, 3*HZ);
+ if (ret == -EIO) {
+ pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ break;
+ }
+
+ /*
+ * Flush all I/O and wait for
+ * cancel/halt/clear completion.
+ */
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
- spin_lock_irq(sch->lock);
- private->completion = NULL;
- flush_workqueue(vfio_ccw_work_q);
- ret = cio_cancel_halt_clear(sch, &iretry);
- };
+ if (ret == -EBUSY)
+ wait_for_completion_timeout(&completion, 3*HZ);
+ private->completion = NULL;
+ flush_workqueue(vfio_ccw_work_q);
+ spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
out_unlock:
@@ -84,7 +91,9 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
if (is_final)
cp_free(&private->cp);
}
+ mutex_lock(&private->io_mutex);
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
+ mutex_unlock(&private->io_mutex);
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
@@ -108,7 +117,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
struct vfio_ccw_private *private;
- int ret;
+ int ret = -ENOMEM;
if (pmcw->qf) {
dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
@@ -122,13 +131,17 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
- if (!private->io_region) {
- kfree(private);
- return -ENOMEM;
- }
+ if (!private->io_region)
+ goto out_free;
+
+ private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
+ GFP_KERNEL | GFP_DMA);
+ if (!private->cmd_region)
+ goto out_free;
private->sch = sch;
dev_set_drvdata(&sch->dev, private);
+ mutex_init(&private->io_mutex);
spin_lock_irq(sch->lock);
private->state = VFIO_CCW_STATE_NOT_OPER;
@@ -152,7 +165,10 @@ out_disable:
cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
- kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ if (private->cmd_region)
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+ if (private->io_region)
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
return ret;
}
@@ -167,6 +183,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
dev_set_drvdata(&sch->dev, NULL);
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
@@ -241,7 +258,7 @@ static struct css_driver vfio_ccw_sch_driver = {
static int __init vfio_ccw_sch_init(void)
{
- int ret;
+ int ret = -ENOMEM;
vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
if (!vfio_ccw_work_q)
@@ -251,20 +268,30 @@ static int __init vfio_ccw_sch_init(void)
sizeof(struct ccw_io_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_io_region), NULL);
- if (!vfio_ccw_io_region) {
- destroy_workqueue(vfio_ccw_work_q);
- return -ENOMEM;
- }
+ if (!vfio_ccw_io_region)
+ goto out_err;
+
+ vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
+ sizeof(struct ccw_cmd_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_cmd_region), NULL);
+ if (!vfio_ccw_cmd_region)
+ goto out_err;
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
isc_unregister(VFIO_CCW_ISC);
- kmem_cache_destroy(vfio_ccw_io_region);
- destroy_workqueue(vfio_ccw_work_q);
+ goto out_err;
}
return ret;
+
+out_err:
+ kmem_cache_destroy(vfio_ccw_cmd_region);
+ kmem_cache_destroy(vfio_ccw_io_region);
+ destroy_workqueue(vfio_ccw_work_q);
+ return ret;
}
static void __exit vfio_ccw_sch_exit(void)
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index cab17865aafe..49d9d3da0282 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -3,8 +3,10 @@
* Finite state machine for vfio-ccw device handling
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/vfio.h>
@@ -28,9 +30,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
sch = private->sch;
spin_lock_irqsave(sch->lock, flags);
- private->state = VFIO_CCW_STATE_BUSY;
orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
+ if (!orb) {
+ ret = -EIO;
+ goto out;
+ }
/* Issue "Start Subchannel" */
ccode = ssch(sch->schid, orb);
@@ -42,6 +47,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
ret = 0;
+ private->state = VFIO_CCW_STATE_CP_PENDING;
break;
case 1: /* Status pending */
case 2: /* Busy */
@@ -64,6 +70,76 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
default:
ret = ccode;
}
+out:
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
+}
+
+static int fsm_do_halt(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ unsigned long flags;
+ int ccode;
+ int ret;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+
+ /* Issue "Halt Subchannel" */
+ ccode = hsch(sch->schid);
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
+ ret = 0;
+ break;
+ case 1: /* Status pending */
+ case 2: /* Busy */
+ ret = -EBUSY;
+ break;
+ case 3: /* Device not operational */
+ ret = -ENODEV;
+ break;
+ default:
+ ret = ccode;
+ }
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
+}
+
+static int fsm_do_clear(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ unsigned long flags;
+ int ccode;
+ int ret;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+
+ /* Issue "Clear Subchannel" */
+ ccode = csch(sch->schid);
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
+ /* TODO: check what else we might need to clear */
+ ret = 0;
+ break;
+ case 3: /* Device not operational */
+ ret = -ENODEV;
+ break;
+ default:
+ ret = ccode;
+ }
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
@@ -102,6 +178,30 @@ static void fsm_io_busy(struct vfio_ccw_private *private,
private->io_region->ret_code = -EBUSY;
}
+static void fsm_io_retry(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->io_region->ret_code = -EAGAIN;
+}
+
+static void fsm_async_error(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
+
+ pr_err("vfio-ccw: FSM: %s request from state:%d\n",
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
+ "<unknown>", private->state);
+ cmd_region->ret_code = -EIO;
+}
+
+static void fsm_async_retry(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->cmd_region->ret_code = -EAGAIN;
+}
+
static void fsm_disabled_irq(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
@@ -130,8 +230,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
struct mdev_device *mdev = private->mdev;
char *errstr = "request";
- private->state = VFIO_CCW_STATE_BUSY;
-
+ private->state = VFIO_CCW_STATE_CP_PROCESSING;
memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
@@ -166,22 +265,42 @@ static void fsm_io_request(struct vfio_ccw_private *private,
}
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
- /* XXX: Handle halt. */
+ /* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
- /* XXX: Handle clear. */
+ /* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
}
err_out:
- private->state = VFIO_CCW_STATE_IDLE;
trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
io_region->ret_code, errstr);
}
/*
+ * Deal with an async request from userspace.
+ */
+static void fsm_async_request(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
+
+ switch (cmd_region->command) {
+ case VFIO_CCW_ASYNC_CMD_HSCH:
+ cmd_region->ret_code = fsm_do_halt(private);
+ break;
+ case VFIO_CCW_ASYNC_CMD_CSCH:
+ cmd_region->ret_code = fsm_do_clear(private);
+ break;
+ default:
+ /* should not happen? */
+ cmd_region->ret_code = -EINVAL;
+ }
+}
+
+/*
* Got an interrupt for a normal io (state busy).
*/
static void fsm_irq(struct vfio_ccw_private *private,
@@ -204,21 +323,31 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_STATE_NOT_OPER] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_CP_PROCESSING] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
- [VFIO_CCW_STATE_BUSY] = {
+ [VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index f673e106c041..5eb61116ca6f 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -3,13 +3,17 @@
* Physical device callbacks for vfio_ccw
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/vfio.h>
#include <linux/mdev.h>
+#include <linux/nospec.h>
+#include <linux/slab.h>
#include "vfio_ccw_private.h"
@@ -130,11 +134,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_mdev_reset(mdev))
+ if (!vfio_ccw_sch_quiesce(private->sch))
private->state = VFIO_CCW_STATE_STANDBY;
/* The state will be NOT_OPER on error. */
}
+ cp_free(&private->cp);
private->mdev = NULL;
atomic_inc(&private->avail);
@@ -146,20 +151,66 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ int ret;
private->nb.notifier_call = vfio_ccw_mdev_notifier;
- return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &events, &private->nb);
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &events, &private->nb);
+ if (ret)
+ return ret;
+
+ ret = vfio_ccw_register_async_dev_regions(private);
+ if (ret)
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &private->nb);
+ return ret;
}
static void vfio_ccw_mdev_release(struct mdev_device *mdev)
{
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
+ int i;
+ if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
+ (private->state != VFIO_CCW_STATE_STANDBY)) {
+ if (!vfio_ccw_mdev_reset(mdev))
+ private->state = VFIO_CCW_STATE_STANDBY;
+ /* The state will be NOT_OPER on error. */
+ }
+
+ cp_free(&private->cp);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&private->nb);
+
+ for (i = 0; i < private->num_regions; i++)
+ private->region[i].ops->release(private, &private->region[i]);
+
+ private->num_regions = 0;
+ kfree(private->region);
+ private->region = NULL;
+}
+
+static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_io_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->io_region;
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+ mutex_unlock(&private->io_mutex);
+ return ret;
}
static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
@@ -167,18 +218,54 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
size_t count,
loff_t *ppos)
{
+ unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
struct vfio_ccw_private *private;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
+
+ if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
+
+ switch (index) {
+ case VFIO_CCW_CONFIG_REGION_INDEX:
+ return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
+ default:
+ index -= VFIO_CCW_NUM_REGIONS;
+ return private->region[index].ops->read(private, buf, count,
+ ppos);
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_io_region *region;
+ int ret;
- if (*ppos + count > sizeof(*region))
+ if (pos + count > sizeof(*region))
return -EINVAL;
- private = dev_get_drvdata(mdev_parent_dev(mdev));
+ if (!mutex_trylock(&private->io_mutex))
+ return -EAGAIN;
+
region = private->io_region;
- if (copy_to_user(buf, (void *)region + *ppos, count))
- return -EFAULT;
+ if (copy_from_user((void *)region + pos, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
- return count;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
+ if (region->ret_code != 0)
+ private->state = VFIO_CCW_STATE_IDLE;
+ ret = (region->ret_code != 0) ? region->ret_code : count;
+
+out_unlock:
+ mutex_unlock(&private->io_mutex);
+ return ret;
}
static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
@@ -186,42 +273,47 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
size_t count,
loff_t *ppos)
{
+ unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
struct vfio_ccw_private *private;
- struct ccw_io_region *region;
-
- if (*ppos + count > sizeof(*region))
- return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev));
- if (private->state != VFIO_CCW_STATE_IDLE)
- return -EACCES;
- region = private->io_region;
- if (copy_from_user((void *)region + *ppos, buf, count))
- return -EFAULT;
+ if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
- if (region->ret_code != 0) {
- private->state = VFIO_CCW_STATE_IDLE;
- return region->ret_code;
+ switch (index) {
+ case VFIO_CCW_CONFIG_REGION_INDEX:
+ return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
+ default:
+ index -= VFIO_CCW_NUM_REGIONS;
+ return private->region[index].ops->write(private, buf, count,
+ ppos);
}
- return count;
+ return -EINVAL;
}
-static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info)
+static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
+ struct mdev_device *mdev)
{
+ struct vfio_ccw_private *private;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
- info->num_regions = VFIO_CCW_NUM_REGIONS;
+ info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
info->num_irqs = VFIO_CCW_NUM_IRQS;
return 0;
}
static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
- u16 *cap_type_id,
- void **cap_type)
+ struct mdev_device *mdev,
+ unsigned long arg)
{
+ struct vfio_ccw_private *private;
+ int i;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
switch (info->index) {
case VFIO_CCW_CONFIG_REGION_INDEX:
info->offset = 0;
@@ -229,9 +321,55 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
info->flags = VFIO_REGION_INFO_FLAG_READ
| VFIO_REGION_INFO_FLAG_WRITE;
return 0;
- default:
- return -EINVAL;
+ default: /* all other regions are handled via capability chain */
+ {
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1 };
+ int ret;
+
+ if (info->index >=
+ VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
+
+ info->index = array_index_nospec(info->index,
+ VFIO_CCW_NUM_REGIONS +
+ private->num_regions);
+
+ i = info->index - VFIO_CCW_NUM_REGIONS;
+
+ info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
+ info->size = private->region[i].size;
+ info->flags = private->region[i].flags;
+
+ cap_type.type = private->region[i].type;
+ cap_type.subtype = private->region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
+
+ info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info->argsz < sizeof(*info) + caps.size) {
+ info->argsz = sizeof(*info) + caps.size;
+ info->cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(*info));
+ if (copy_to_user((void __user *)arg + sizeof(*info),
+ caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info->cap_offset = sizeof(*info);
+ }
+
+ kfree(caps.buf);
+
}
+ }
+ return 0;
}
static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
@@ -308,6 +446,32 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
}
}
+int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
+ unsigned int subtype,
+ const struct vfio_ccw_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_ccw_region *region;
+
+ region = krealloc(private->region,
+ (private->num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ private->region = region;
+ private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
+ private->region[private->num_regions].subtype = subtype;
+ private->region[private->num_regions].ops = ops;
+ private->region[private->num_regions].size = size;
+ private->region[private->num_regions].flags = flags;
+ private->region[private->num_regions].data = data;
+
+ private->num_regions++;
+
+ return 0;
+}
+
static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
unsigned int cmd,
unsigned long arg)
@@ -328,7 +492,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz)
return -EINVAL;
- ret = vfio_ccw_mdev_get_device_info(&info);
+ ret = vfio_ccw_mdev_get_device_info(&info, mdev);
if (ret)
return ret;
@@ -337,8 +501,6 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
case VFIO_DEVICE_GET_REGION_INFO:
{
struct vfio_region_info info;
- u16 cap_type_id = 0;
- void *cap_type = NULL;
minsz = offsetofend(struct vfio_region_info, offset);
@@ -348,8 +510,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz)
return -EINVAL;
- ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id,
- &cap_type);
+ ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
if (ret)
return ret;
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 08e9a7dc9176..f1092c3dc1b1 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -3,9 +3,11 @@
* Private stuff for vfio_ccw driver
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#ifndef _VFIO_CCW_PRIVATE_H_
@@ -19,6 +21,40 @@
#include "css.h"
#include "vfio_ccw_cp.h"
+#define VFIO_CCW_OFFSET_SHIFT 10
+#define VFIO_CCW_OFFSET_TO_INDEX(off) (off >> VFIO_CCW_OFFSET_SHIFT)
+#define VFIO_CCW_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_CCW_OFFSET_SHIFT)
+#define VFIO_CCW_OFFSET_MASK (((u64)(1) << VFIO_CCW_OFFSET_SHIFT) - 1)
+
+/* capability chain handling similar to vfio-pci */
+struct vfio_ccw_private;
+struct vfio_ccw_region;
+
+struct vfio_ccw_regops {
+ ssize_t (*read)(struct vfio_ccw_private *private, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count, loff_t *ppos);
+ void (*release)(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region);
+};
+
+struct vfio_ccw_region {
+ u32 type;
+ u32 subtype;
+ const struct vfio_ccw_regops *ops;
+ void *data;
+ size_t size;
+ u32 flags;
+};
+
+int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
+ unsigned int subtype,
+ const struct vfio_ccw_regops *ops,
+ size_t size, u32 flags, void *data);
+
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
+
/**
* struct vfio_ccw_private
* @sch: pointer to the subchannel
@@ -28,6 +64,10 @@
* @mdev: pointer to the mediated device
* @nb: notifier for vfio events
* @io_region: MMIO region to input/output I/O arguments/results
+ * @io_mutex: protect against concurrent update of I/O regions
+ * @region: additional regions for other subchannel operations
+ * @cmd_region: MMIO region for asynchronous I/O commands other than START
+ * @num_regions: number of additional regions
* @cp: channel program for the current I/O operation
* @irb: irb info received from interrupt
* @scsw: scsw info
@@ -42,6 +82,10 @@ struct vfio_ccw_private {
struct mdev_device *mdev;
struct notifier_block nb;
struct ccw_io_region *io_region;
+ struct mutex io_mutex;
+ struct vfio_ccw_region *region;
+ struct ccw_cmd_region *cmd_region;
+ int num_regions;
struct channel_program cp;
struct irb irb;
@@ -63,7 +107,8 @@ enum vfio_ccw_state {
VFIO_CCW_STATE_NOT_OPER,
VFIO_CCW_STATE_STANDBY,
VFIO_CCW_STATE_IDLE,
- VFIO_CCW_STATE_BUSY,
+ VFIO_CCW_STATE_CP_PROCESSING,
+ VFIO_CCW_STATE_CP_PENDING,
/* last element! */
NR_VFIO_CCW_STATES
};
@@ -75,6 +120,7 @@ enum vfio_ccw_event {
VFIO_CCW_EVENT_NOT_OPER,
VFIO_CCW_EVENT_IO_REQ,
VFIO_CCW_EVENT_INTERRUPT,
+ VFIO_CCW_EVENT_ASYNC_REQ,
/* last element! */
NR_VFIO_CCW_EVENTS
};
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 1546389d71db..cc30e4f07fff 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -116,7 +116,7 @@ static int user_set_domain;
static struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
-static void ap_interrupt_handler(struct airq_struct *airq);
+static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
static int ap_airq_flag;
@@ -393,7 +393,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
* ap_interrupt_handler() - Schedule ap_tasklet on interrupt
* @airq: pointer to adapter interrupt descriptor
*/
-static void ap_interrupt_handler(struct airq_struct *airq)
+static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
{
inc_irq_stat(IRQIO_APB);
if (!ap_suspend_flag)
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 6a340f2c3556..5ea83dc4f1d7 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -751,8 +751,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq)
__ap_flush_queue(aq);
/* set REMOVE state to prevent new messages are queued in */
aq->state = AP_STATE_REMOVE;
- del_timer_sync(&aq->timeout);
spin_unlock_bh(&aq->lock);
+ del_timer_sync(&aq->timeout);
}
void ap_queue_remove(struct ap_queue *aq)
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 3e85d665c572..45eb0c14b880 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -51,7 +51,8 @@ static debug_info_t *debug_info;
static void __init pkey_debug_init(void)
{
- debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
+ /* 5 arguments per dbf entry (including the format string ptr) */
+ debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
debug_register_view(debug_info, &debug_sprintf_view);
debug_set_level(debug_info, 3);
}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 689c2af7026a..852b8c2299c1 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -525,7 +525,7 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
filp->private_data = (void *) perms;
atomic_inc(&zcrypt_open_count);
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
/**
@@ -659,6 +659,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
if (mex->outputdatalength < mex->inputdatalength) {
+ func_code = 0;
rc = -EINVAL;
goto out;
}
@@ -742,6 +743,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
if (crt->outputdatalength < crt->inputdatalength) {
+ func_code = 0;
rc = -EINVAL;
goto out;
}
@@ -951,6 +953,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
if (!targets) {
+ func_code = 0;
rc = -ENOMEM;
goto out;
}
@@ -958,6 +961,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
if (copy_from_user(targets, uptr,
target_num * sizeof(*targets))) {
+ func_code = 0;
rc = -EFAULT;
goto out_free;
}
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 7617d21cb296..f63c5c871d3d 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
if (priv->channel[direction] == NULL) {
if (direction == CTCM_WRITE)
channel_free(priv->channel[CTCM_READ]);
+ result = -ENODEV;
goto out_dev;
}
priv->channel[direction]->netdev = dev;
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 0aab90817326..66eac2b9704d 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <net/smc.h>
+#include <asm/pci_insn.h>
#define UTIL_STR_LEN 16
@@ -194,8 +195,6 @@ struct ism_dev {
struct pci_dev *pdev;
struct smcd_dev *smcd;
- void __iomem *ctl;
-
struct ism_sba *sba;
dma_addr_t sba_dma_addr;
DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
@@ -209,13 +208,37 @@ struct ism_dev {
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
+static inline void __ism_read_cmd(struct ism_dev *ism, void *data,
+ unsigned long offset, unsigned long len)
+{
+ struct zpci_dev *zdev = to_zpci(ism->pdev);
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, 8);
+
+ while (len > 0) {
+ __zpci_load(data, req, offset);
+ offset += 8;
+ data += 8;
+ len -= 8;
+ }
+}
+
+static inline void __ism_write_cmd(struct ism_dev *ism, void *data,
+ unsigned long offset, unsigned long len)
+{
+ struct zpci_dev *zdev = to_zpci(ism->pdev);
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, len);
+
+ if (len)
+ __zpci_store_block(data, req, offset);
+}
+
static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data,
unsigned int size)
{
struct zpci_dev *zdev = to_zpci(ism->pdev);
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
- return zpci_write_block(req, data, dmb_req);
+ return __zpci_store_block(data, req, dmb_req);
}
#endif /* S390_ISM_H */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 3e132592c1fe..4fc2056bd227 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -38,19 +38,18 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
struct ism_req_hdr *req = cmd;
struct ism_resp_hdr *resp = cmd;
- memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
- memcpy_toio(ism->ctl, req, sizeof(*req));
+ __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
+ __ism_write_cmd(ism, req, 0, sizeof(*req));
WRITE_ONCE(resp->ret, ISM_ERROR);
- memcpy_fromio(resp, ism->ctl, sizeof(*resp));
+ __ism_read_cmd(ism, resp, 0, sizeof(*resp));
if (resp->ret) {
debug_text_event(ism_debug_info, 0, "cmd failure");
debug_event(ism_debug_info, 0, resp, sizeof(*resp));
goto out;
}
- memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
- resp->len - sizeof(*resp));
+ __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
out:
return resp->ret;
}
@@ -512,13 +511,9 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_disable;
- ism->ctl = pci_iomap(pdev, 2, 0);
- if (!ism->ctl)
- goto err_resource;
-
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret)
- goto err_unmap;
+ goto err_resource;
dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
dma_set_max_seg_size(&pdev->dev, SZ_1M);
@@ -527,7 +522,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
ISM_NR_DMBS);
if (!ism->smcd)
- goto err_unmap;
+ goto err_resource;
ism->smcd->priv = ism;
ret = ism_dev_init(ism);
@@ -538,8 +533,6 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_free:
smcd_free_dev(ism->smcd);
-err_unmap:
- pci_iounmap(pdev, ism->ctl);
err_resource:
pci_release_mem_regions(pdev);
err_disable:
@@ -568,7 +561,6 @@ static void ism_remove(struct pci_dev *pdev)
ism_dev_exit(ism);
smcd_free_dev(ism->smcd);
- pci_iounmap(pdev, ism->ctl);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
dev_set_drvdata(&pdev->dev, NULL);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index c851cf6e01c4..784a2e76a1b0 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -10,6 +10,7 @@
#ifndef __QETH_CORE_H__
#define __QETH_CORE_H__
+#include <linux/completion.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
@@ -21,6 +22,7 @@
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/ipv6.h>
@@ -163,6 +165,12 @@ struct qeth_vnicc_info {
bool rx_bcast_enabled;
};
+static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
+ enum qeth_ipa_setadp_cmd func)
+{
+ return (ipa->supported_funcs & func);
+}
+
static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
@@ -176,9 +184,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
}
#define qeth_adp_supported(c, f) \
- qeth_is_ipa_supported(&c->options.adp, f)
-#define qeth_adp_enabled(c, f) \
- qeth_is_ipa_enabled(&c->options.adp, f)
+ qeth_is_adp_supported(&c->options.adp, f)
#define qeth_is_supported(c, f) \
qeth_is_ipa_supported(&c->options.ipa4, f)
#define qeth_is_enabled(c, f) \
@@ -217,6 +223,9 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/* QDIO queue and buffer handling */
/*****************************************************************************/
#define QETH_MAX_QUEUES 4
+#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */
+#define QETH_IQD_MCAST_TXQ 0
+#define QETH_IQD_MIN_UCAST_TXQ 1
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
@@ -365,34 +374,6 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
-enum qeth_qdio_buffer_states {
- /*
- * inbound: read out by driver; owned by hardware in order to be filled
- * outbound: owned by driver in order to be filled
- */
- QETH_QDIO_BUF_EMPTY,
- /*
- * inbound: filled by hardware; owned by driver in order to be read out
- * outbound: filled by driver; owned by hardware in order to be sent
- */
- QETH_QDIO_BUF_PRIMED,
- /*
- * inbound: not applicable
- * outbound: identified to be pending in TPQ
- */
- QETH_QDIO_BUF_PENDING,
- /*
- * inbound: not applicable
- * outbound: found in completion queue
- */
- QETH_QDIO_BUF_IN_CQ,
- /*
- * inbound: not applicable
- * outbound: handled via transfer pending / completion queue
- */
- QETH_QDIO_BUF_HANDLED_DELAYED,
-};
-
enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
@@ -424,6 +405,19 @@ struct qeth_qdio_q {
int next_buf_to_init;
};
+enum qeth_qdio_out_buffer_state {
+ /* Owned by driver, in order to be filled. */
+ QETH_QDIO_BUF_EMPTY,
+ /* Filled by driver; owned by hardware in order to be sent. */
+ QETH_QDIO_BUF_PRIMED,
+ /* Identified to be pending in TPQ. */
+ QETH_QDIO_BUF_PENDING,
+ /* Found in completion queue. */
+ QETH_QDIO_BUF_IN_CQ,
+ /* Handled via transfer pending / completion queue. */
+ QETH_QDIO_BUF_HANDLED_DELAYED,
+};
+
struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer;
atomic_t state;
@@ -462,7 +456,6 @@ struct qeth_card_stats {
u64 rx_errors;
u64 rx_dropped;
u64 rx_multicast;
- u64 tx_errors;
};
struct qeth_out_q_stats {
@@ -477,6 +470,7 @@ struct qeth_out_q_stats {
u64 skbs_linearized_fail;
u64 tso_bytes;
u64 packing_mode_switch;
+ u64 stopped;
/* rtnl_link_stats64 */
u64 tx_packets;
@@ -490,14 +484,12 @@ struct qeth_qdio_out_q {
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
struct qeth_out_q_stats stats;
- int queue_no;
+ u8 next_buf_to_fill;
+ u8 max_elements;
+ u8 queue_no;
+ u8 do_pack;
struct qeth_card *card;
atomic_t state;
- int do_pack;
- /*
- * index of buffer to be filled by driver; state EMPTY or PACKING
- */
- int next_buf_to_fill;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
@@ -507,6 +499,11 @@ struct qeth_qdio_out_q {
atomic_t set_pci_flags_count;
};
+static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
+{
+ return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
+}
+
struct qeth_qdio_info {
atomic_t state;
/* input */
@@ -538,7 +535,6 @@ struct qeth_qdio_info {
enum qeth_channel_states {
CH_STATE_UP,
CH_STATE_DOWN,
- CH_STATE_ACTIVATING,
CH_STATE_HALTED,
CH_STATE_STOPPED,
CH_STATE_RCD,
@@ -585,7 +581,10 @@ struct qeth_cmd_buffer {
enum qeth_cmd_buffer_state state;
struct qeth_channel *channel;
struct qeth_reply *reply;
+ long timeout;
unsigned char *data;
+ void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ unsigned int length);
void (*callback)(struct qeth_card *card, struct qeth_channel *channel,
struct qeth_cmd_buffer *iob);
};
@@ -610,6 +609,11 @@ struct qeth_channel {
int io_buf_no;
};
+static inline bool qeth_trylock_channel(struct qeth_channel *channel)
+{
+ return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0;
+}
+
/**
* OSA card related definitions
*/
@@ -631,17 +635,15 @@ struct qeth_seqno {
__u32 pdu_hdr;
__u32 pdu_hdr_ack;
__u16 ipa;
- __u32 pkt_seqno;
};
struct qeth_reply {
struct list_head list;
- wait_queue_head_t wait_q;
+ struct completion received;
int (*callback)(struct qeth_card *, struct qeth_reply *,
unsigned long);
u32 seqno;
unsigned long offset;
- atomic_t received;
int rc;
void *param;
refcount_t refcnt;
@@ -663,7 +665,7 @@ struct qeth_card_info {
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
u8 open_when_online:1;
- int guestlan;
+ u8 is_vm_nic:1;
int mac_bits;
enum qeth_card_types type;
enum qeth_link_types link_type;
@@ -774,18 +776,19 @@ struct qeth_card {
struct qeth_card_options options;
struct workqueue_struct *event_wq;
+ struct workqueue_struct *cmd_wq;
wait_queue_head_t wait_q;
- spinlock_t mclock;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
+ struct mutex ip_lock;
DECLARE_HASHTABLE(ip_mc_htable, 4);
+ struct work_struct rx_mode_work;
struct work_struct kernel_thread_starter;
spinlock_t thread_mask_lock;
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- spinlock_t ip_lock;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
@@ -827,6 +830,15 @@ static inline bool qeth_netdev_is_registered(struct net_device *dev)
return dev->netdev_ops != NULL;
}
+static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
+{
+ if (txq == QETH_IQD_MCAST_TXQ)
+ return dev->num_tx_queues - 1;
+ if (txq == dev->num_tx_queues - 1)
+ return QETH_IQD_MCAST_TXQ;
+ return txq;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
@@ -869,6 +881,16 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
}
}
+static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
+{
+ u8 *addr = eth_hdr(skb)->h_dest;
+
+ if (is_multicast_ether_addr(addr))
+ return is_broadcast_ether_addr(addr) ? RTN_BROADCAST :
+ RTN_MULTICAST;
+ return RTN_UNICAST;
+}
+
static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
u8 flags)
{
@@ -922,18 +944,7 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
data, QETH_PROT_IPV6);
}
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
- int ipv);
-static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
- struct sk_buff *skb,
- int ipv, int cast_type)
-{
- if (IS_IQD(card) && cast_type != RTN_UNICAST)
- return card->qdio.out_qs[card->qdio.no_out_queues - 1];
- if (!card->qdio.do_prio_queueing)
- return card->qdio.out_qs[card->qdio.default_out_queue];
- return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
-}
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
@@ -979,12 +990,10 @@ void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_clear_cmd_buffers(struct qeth_channel *);
-void qeth_clear_qdio_buffers(struct qeth_card *);
+void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
-void qeth_prepare_control_data(struct qeth_card *, int,
- struct qeth_cmd_buffer *);
void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
u16 cmd_length);
@@ -1016,6 +1025,8 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ u8 cast_type, struct net_device *sb_dev);
int qeth_open(struct net_device *dev);
int qeth_stop(struct net_device *dev);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 44bd6f04c145..009f2c0ec504 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -61,13 +61,13 @@ static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
static struct lock_class_key qdio_out_skb_queue_key;
-static void qeth_send_control_data_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob);
+static void qeth_issue_next_read_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob);
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
-static void qeth_free_qdio_buffers(struct qeth_card *);
+static void qeth_free_qdio_queues(struct qeth_card *card);
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification);
@@ -85,7 +85,7 @@ static void qeth_close_dev_handler(struct work_struct *work)
static const char *qeth_get_cardname(struct qeth_card *card)
{
- if (card->info.guestlan) {
+ if (IS_VM_NIC(card)) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return " Virtual NIC QDIO";
@@ -120,7 +120,7 @@ static const char *qeth_get_cardname(struct qeth_card *card)
/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
- if (card->info.guestlan) {
+ if (IS_VM_NIC(card)) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return "Virt.NIC QDIO";
@@ -511,7 +511,9 @@ static int __qeth_issue_next_read(struct qeth_card *card)
CARD_DEVID(card));
return -ENOMEM;
}
+
qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
+ iob->callback = qeth_issue_next_read_cb;
QETH_CARD_TEXT(card, 6, "noirqpnd");
rc = ccw_device_start(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0);
@@ -542,11 +544,10 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
- reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
+ reply = kzalloc(sizeof(*reply), GFP_KERNEL);
if (reply) {
refcount_set(&reply->refcnt, 1);
- atomic_set(&reply->received, 0);
- init_waitqueue_head(&reply->wait_q);
+ init_completion(&reply->received);
}
return reply;
}
@@ -576,10 +577,10 @@ static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply)
spin_unlock_irq(&card->lock);
}
-static void qeth_notify_reply(struct qeth_reply *reply)
+static void qeth_notify_reply(struct qeth_reply *reply, int reason)
{
- atomic_inc(&reply->received);
- wake_up(&reply->wait_q);
+ reply->rc = reason;
+ complete(&reply->received);
}
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
@@ -664,10 +665,8 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(reply, &card->cmd_waiter_list, list) {
- reply->rc = -EIO;
- qeth_notify_reply(reply);
- }
+ list_for_each_entry(reply, &card->cmd_waiter_list, list)
+ qeth_notify_reply(reply, -EIO);
spin_unlock_irqrestore(&card->lock, flags);
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
@@ -675,9 +674,6 @@ EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer)
{
- if (!buffer)
- return 0;
-
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) {
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
@@ -704,6 +700,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
do {
if (channel->iob[index].state == BUF_STATE_FREE) {
channel->iob[index].state = BUF_STATE_LOCKED;
+ channel->iob[index].timeout = QETH_TIMEOUT;
channel->io_buf_no = (channel->io_buf_no + 1) %
QETH_CMD_BUFFER_NO;
memset(channel->iob[index].data, 0, QETH_BUFSIZE);
@@ -722,7 +719,7 @@ void qeth_release_buffer(struct qeth_channel *channel,
spin_lock_irqsave(&channel->iob_lock, flags);
iob->state = BUF_STATE_FREE;
- iob->callback = qeth_send_control_data_cb;
+ iob->callback = NULL;
if (iob->reply) {
qeth_put_reply(iob->reply);
iob->reply = NULL;
@@ -743,10 +740,8 @@ static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
struct qeth_reply *reply = iob->reply;
- if (reply) {
- reply->rc = rc;
- qeth_notify_reply(reply);
- }
+ if (reply)
+ qeth_notify_reply(reply, rc);
qeth_release_buffer(iob->channel, iob);
}
@@ -780,9 +775,9 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
}
EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
-static void qeth_send_control_data_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static void qeth_issue_next_read_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
{
struct qeth_ipa_cmd *cmd = NULL;
struct qeth_reply *reply = NULL;
@@ -846,11 +841,8 @@ static void qeth_send_control_data_cb(struct qeth_card *card,
}
}
- if (rc <= 0) {
- reply->rc = rc;
- qeth_notify_reply(reply);
- }
-
+ if (rc <= 0)
+ qeth_notify_reply(reply, rc);
qeth_put_reply(reply);
out:
@@ -1173,20 +1165,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_release_skbs(buf);
- for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
+ for (i = 0; i < queue->max_elements; ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
kmem_cache_free(qeth_core_header_cache,
buf->buffer->element[i].addr);
buf->is_header[i] = 0;
}
- qeth_scrub_qdio_buffer(buf->buffer,
- QETH_MAX_BUFFER_ELEMENTS(queue->card));
+ qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
buf->next_element_to_fill = 0;
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
-static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
+static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
{
int j;
@@ -1202,19 +1193,18 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
}
}
-void qeth_clear_qdio_buffers(struct qeth_card *card)
+void qeth_drain_output_queues(struct qeth_card *card)
{
int i;
QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- if (card->qdio.out_qs[i]) {
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
- }
+ if (card->qdio.out_qs[i])
+ qeth_drain_output_queue(card->qdio.out_qs[i], false);
}
}
-EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
+EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
static void qeth_free_buffer_pool(struct qeth_card *card)
{
@@ -1273,7 +1263,6 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
break;
channel->iob[cnt].state = BUF_STATE_FREE;
channel->iob[cnt].channel = channel;
- channel->iob[cnt].callback = qeth_send_control_data_cb;
}
if (cnt < QETH_CMD_BUFFER_NO) {
qeth_clean_channel(channel);
@@ -1285,30 +1274,28 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
return 0;
}
-static void qeth_set_single_write_queues(struct qeth_card *card)
+static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
- if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
- (card->qdio.no_out_queues == 4))
- qeth_free_qdio_buffers(card);
+ unsigned int count = single ? 1 : card->dev->num_tx_queues;
- card->qdio.no_out_queues = 1;
- if (card->qdio.default_out_queue != 0)
- dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+ rtnl_lock();
+ netif_set_real_num_tx_queues(card->dev, count);
+ rtnl_unlock();
- card->qdio.default_out_queue = 0;
-}
+ if (card->qdio.no_out_queues == count)
+ return;
-static void qeth_set_multiple_write_queues(struct qeth_card *card)
-{
- if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
- (card->qdio.no_out_queues == 1)) {
- qeth_free_qdio_buffers(card);
- card->qdio.default_out_queue = 2;
- }
- card->qdio.no_out_queues = 4;
+ if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
+ qeth_free_qdio_queues(card);
+
+ if (count == 1)
+ dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+
+ card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
+ card->qdio.no_out_queues = count;
}
-static void qeth_update_from_chp_desc(struct qeth_card *card)
+static int qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc;
@@ -1318,21 +1305,18 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
ccwdev = card->data.ccwdev;
chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
if (!chp_dsc)
- goto out;
+ return -ENOMEM;
card->info.func_level = 0x4100 + chp_dsc->desc;
- if (card->info.type == QETH_CARD_TYPE_IQD)
- goto out;
- /* CHPP field bit 6 == 1 -> single queue */
- if ((chp_dsc->chpp & 0x02) == 0x02)
- qeth_set_single_write_queues(card);
- else
- qeth_set_multiple_write_queues(card);
-out:
+ if (IS_OSD(card) || IS_OSX(card))
+ /* CHPP field bit 6 == 1 -> single queue */
+ qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
+
kfree(chp_dsc);
QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+ return 0;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1341,12 +1325,11 @@ static void qeth_init_qdio_info(struct qeth_card *card)
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- card->qdio.no_out_queues = QETH_MAX_QUEUES;
/* inbound */
card->qdio.no_in_queues = 1;
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
- if (card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(card))
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
else
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
@@ -1409,9 +1392,7 @@ static void qeth_setup_card(struct qeth_card *card)
card->info.type = CARD_RDEV(card)->id.driver_info;
card->state = CARD_STATE_DOWN;
- spin_lock_init(&card->mclock);
spin_lock_init(&card->lock);
- spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
@@ -1451,7 +1432,8 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
CARD_WDEV(card) = gdev->cdev[1];
CARD_DDEV(card) = gdev->cdev[2];
- card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
+ card->event_wq = alloc_ordered_workqueue("%s_event", 0,
+ dev_name(&gdev->dev));
if (!card->event_wq)
goto out_wq;
if (qeth_setup_channel(&card->read, true))
@@ -1571,7 +1553,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
- if (card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(card))
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
@@ -1644,8 +1626,8 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
card->info.chpid = prcd[30];
card->info.unit_addr2 = prcd[31];
card->info.cula = prcd[63];
- card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
- (prcd[0x11] == _ascebc['M']));
+ card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) &&
+ (prcd[0x11] == _ascebc['M']));
}
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
@@ -1709,13 +1691,11 @@ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
- if (card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSN)
+ if (IS_OSM(card) || IS_OSN(card))
disc = QETH_DISCIPLINE_LAYER2;
- else if (card->info.guestlan)
- disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
- QETH_DISCIPLINE_LAYER3 :
- qeth_vm_detect_layer(card);
+ else if (IS_VM_NIC(card))
+ disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
+ qeth_vm_detect_layer(card);
switch (disc) {
case QETH_DISCIPLINE_LAYER2:
@@ -1771,121 +1751,16 @@ static void qeth_init_func_level(struct qeth_card *card)
}
}
-static int qeth_idx_activate_get_answer(struct qeth_card *card,
- struct qeth_channel *channel,
- void (*reply_cb)(struct qeth_card *,
- struct qeth_channel *,
- struct qeth_cmd_buffer *))
-{
- struct qeth_cmd_buffer *iob;
- int rc;
-
- QETH_DBF_TEXT(SETUP, 2, "idxanswr");
- iob = qeth_get_buffer(channel);
- if (!iob)
- return -ENOMEM;
- iob->callback = reply_cb;
- qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
-
- wait_event(card->wait_q,
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
- QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, QETH_TIMEOUT);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
-
- if (rc) {
- QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- atomic_set(&channel->irq_pending, 0);
- qeth_release_buffer(channel, iob);
- wake_up(&card->wait_q);
- return rc;
- }
- rc = wait_event_interruptible_timeout(card->wait_q,
- channel->state == CH_STATE_UP, QETH_TIMEOUT);
- if (rc == -ERESTARTSYS)
- return rc;
- if (channel->state != CH_STATE_UP) {
- rc = -ETIME;
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- } else
- rc = 0;
- return rc;
-}
-
-static int qeth_idx_activate_channel(struct qeth_card *card,
- struct qeth_channel *channel,
- void (*reply_cb)(struct qeth_card *,
- struct qeth_channel *,
- struct qeth_cmd_buffer *))
+static void qeth_idx_finalize_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob,
+ unsigned int length)
{
- struct qeth_cmd_buffer *iob;
- __u16 temp;
- __u8 tmp;
- int rc;
- struct ccw_dev_id temp_devid;
-
- QETH_DBF_TEXT(SETUP, 2, "idxactch");
+ qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, length, iob->data);
- iob = qeth_get_buffer(channel);
- if (!iob)
- return -ENOMEM;
- iob->callback = reply_cb;
- qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
- iob->data);
- if (channel == &card->write) {
- memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+ memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
+ QETH_SEQ_NO_LENGTH);
+ if (iob->channel == &card->write)
card->seqno.trans_hdr++;
- } else {
- memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
- }
- tmp = ((u8)card->dev->dev_port) | 0x80;
- memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
- memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
- &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
- memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
- &card->info.func_level, sizeof(__u16));
- ccw_device_get_id(CARD_DDEV(card), &temp_devid);
- memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
- temp = (card->info.cula << 8) + card->info.unit_addr2;
- memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
-
- wait_event(card->wait_q,
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
- QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, QETH_TIMEOUT);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
-
- if (rc) {
- QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
- rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- atomic_set(&channel->irq_pending, 0);
- qeth_release_buffer(channel, iob);
- wake_up(&card->wait_q);
- return rc;
- }
- rc = wait_event_interruptible_timeout(card->wait_q,
- channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
- if (rc == -ERESTARTSYS)
- return rc;
- if (channel->state != CH_STATE_ACTIVATING) {
- dev_warn(&channel->ccwdev->dev, "The qeth device driver"
- " failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
- CCW_DEVID(channel->ccwdev));
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
- return -ETIME;
- }
- return qeth_idx_activate_get_answer(card, channel, reply_cb);
}
static int qeth_peer_func_level(int level)
@@ -1897,112 +1772,21 @@ static int qeth_peer_func_level(int level)
return level;
}
-static void qeth_idx_write_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
-{
- __u16 temp;
-
- QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
-
- if (channel->state == CH_STATE_DOWN) {
- channel->state = CH_STATE_ACTIVATING;
- goto out;
- }
-
- if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
- dev_err(&channel->ccwdev->dev,
- "The adapter is used exclusively by another "
- "host\n");
- else
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
- CCW_DEVID(channel->ccwdev));
- goto out;
- }
- memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
- if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
- CCW_DEVID(channel->ccwdev),
- card->info.func_level, temp);
- goto out;
- }
- channel->state = CH_STATE_UP;
-out:
- qeth_release_buffer(channel, iob);
-}
-
-static void qeth_idx_read_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
-{
- __u16 temp;
-
- QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
- if (channel->state == CH_STATE_DOWN) {
- channel->state = CH_STATE_ACTIVATING;
- goto out;
- }
-
- if (qeth_check_idx_response(card, iob->data))
- goto out;
-
- if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
- case QETH_IDX_ACT_ERR_EXCL:
- dev_err(&channel->ccwdev->dev,
- "The adapter is used exclusively by another "
- "host\n");
- break;
- case QETH_IDX_ACT_ERR_AUTH:
- case QETH_IDX_ACT_ERR_AUTH_USER:
- dev_err(&channel->ccwdev->dev,
- "Setting the device online failed because of "
- "insufficient authorization\n");
- break;
- default:
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
- CCW_DEVID(channel->ccwdev));
- }
- QETH_CARD_TEXT_(card, 2, "idxread%c",
- QETH_IDX_ACT_CAUSE_CODE(iob->data));
- goto out;
- }
-
- memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
- if (temp != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
- CCW_DEVID(channel->ccwdev),
- card->info.func_level, temp);
- goto out;
- }
- memcpy(&card->token.issuer_rm_r,
- QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
- QETH_MPC_TOKEN_LENGTH);
- memcpy(&card->info.mcl_level[0],
- QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
- channel->state = CH_STATE_UP;
-out:
- qeth_release_buffer(channel, iob);
-}
-
-void qeth_prepare_control_data(struct qeth_card *card, int len,
- struct qeth_cmd_buffer *iob)
+static void qeth_mpc_finalize_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob,
+ unsigned int length)
{
- qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
- iob->callback = qeth_release_buffer_cb;
+ qeth_idx_finalize_cmd(card, iob, length);
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
- card->seqno.trans_hdr++;
memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
&card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
card->seqno.pdu_hdr++;
memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
&card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
- QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
+
+ iob->reply->seqno = QETH_IDX_COMMAND_SEQNO;
+ iob->callback = qeth_release_buffer_cb;
}
-EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
/**
* qeth_send_control_data() - send control command to the card
@@ -2035,17 +1819,12 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
void *reply_param)
{
struct qeth_channel *channel = iob->channel;
+ long timeout = iob->timeout;
int rc;
struct qeth_reply *reply = NULL;
- unsigned long timeout, event_timeout;
- struct qeth_ipa_cmd *cmd = NULL;
QETH_CARD_TEXT(card, 2, "sendctl");
- if (card->read_or_write_problem) {
- qeth_release_buffer(channel, iob);
- return -EIO;
- }
reply = qeth_alloc_reply(card);
if (!reply) {
qeth_release_buffer(channel, iob);
@@ -2058,27 +1837,24 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
qeth_get_reply(reply);
iob->reply = reply;
- while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
-
- if (IS_IPA(iob->data)) {
- cmd = __ipa_cmd(iob);
- cmd->hdr.seqno = card->seqno.ipa++;
- reply->seqno = cmd->hdr.seqno;
- event_timeout = QETH_IPA_TIMEOUT;
- } else {
- reply->seqno = QETH_IDX_COMMAND_SEQNO;
- event_timeout = QETH_TIMEOUT;
+ timeout = wait_event_interruptible_timeout(card->wait_q,
+ qeth_trylock_channel(channel),
+ timeout);
+ if (timeout <= 0) {
+ qeth_put_reply(reply);
+ qeth_release_buffer(channel, iob);
+ return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
}
- qeth_prepare_control_data(card, len, iob);
- qeth_enqueue_reply(card, reply);
+ iob->finalize(card, iob, len);
+ QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
- timeout = jiffies + event_timeout;
+ qeth_enqueue_reply(card, reply);
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, event_timeout);
+ (addr_t) iob, 0, 0, timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
@@ -2092,30 +1868,211 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
return rc;
}
- /* we have only one long running ipassist, since we can ensure
- process context of this command we can sleep */
- if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
- cmd->hdr.prot_version == QETH_PROT_IPV4) {
- if (!wait_event_timeout(reply->wait_q,
- atomic_read(&reply->received), event_timeout))
- goto time_err;
- } else {
- while (!atomic_read(&reply->received)) {
- if (time_after(jiffies, timeout))
- goto time_err;
- cpu_relax();
- }
- }
+ timeout = wait_for_completion_interruptible_timeout(&reply->received,
+ timeout);
+ if (timeout <= 0)
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
qeth_dequeue_reply(card, reply);
- rc = reply->rc;
+ if (!rc)
+ rc = reply->rc;
qeth_put_reply(reply);
return rc;
+}
-time_err:
- qeth_dequeue_reply(card, reply);
- qeth_put_reply(reply);
- return -ETIME;
+static int qeth_idx_check_activate_response(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ int rc;
+
+ rc = qeth_check_idx_response(card, iob->data);
+ if (rc)
+ return rc;
+
+ if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
+ return 0;
+
+ /* negative reply: */
+ QETH_DBF_TEXT_(SETUP, 2, "idxneg%c",
+ QETH_IDX_ACT_CAUSE_CODE(iob->data));
+
+ switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
+ case QETH_IDX_ACT_ERR_EXCL:
+ dev_err(&channel->ccwdev->dev,
+ "The adapter is used exclusively by another host\n");
+ return -EBUSY;
+ case QETH_IDX_ACT_ERR_AUTH:
+ case QETH_IDX_ACT_ERR_AUTH_USER:
+ dev_err(&channel->ccwdev->dev,
+ "Setting the device online failed because of insufficient authorization\n");
+ return -EPERM;
+ default:
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+ CCW_DEVID(channel->ccwdev));
+ return -EIO;
+ }
+}
+
+static void qeth_idx_query_read_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ u16 peer_level;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "idxrdcb");
+
+ rc = qeth_idx_check_activate_response(card, channel, iob);
+ if (rc)
+ goto out;
+
+ memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+ if (peer_level != qeth_peer_func_level(card->info.func_level)) {
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, peer_level);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&card->token.issuer_rm_r,
+ QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+ QETH_MPC_TOKEN_LENGTH);
+ memcpy(&card->info.mcl_level[0],
+ QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
+
+out:
+ qeth_notify_reply(iob->reply, rc);
+ qeth_release_buffer(channel, iob);
+}
+
+static void qeth_idx_query_write_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ u16 peer_level;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "idxwrcb");
+
+ rc = qeth_idx_check_activate_response(card, channel, iob);
+ if (rc)
+ goto out;
+
+ memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+ if ((peer_level & ~0x0100) !=
+ qeth_peer_func_level(card->info.func_level)) {
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, peer_level);
+ rc = -EINVAL;
+ }
+
+out:
+ qeth_notify_reply(iob->reply, rc);
+ qeth_release_buffer(channel, iob);
+}
+
+static void qeth_idx_finalize_query_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob,
+ unsigned int length)
+{
+ qeth_setup_ccw(iob->channel->ccw, CCW_CMD_READ, length, iob->data);
+}
+
+static void qeth_idx_activate_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ qeth_notify_reply(iob->reply, 0);
+ qeth_release_buffer(channel, iob);
+}
+
+static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
+{
+ u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
+ u8 port = ((u8)card->dev->dev_port) | 0x80;
+ struct ccw_dev_id dev_id;
+
+ ccw_device_get_id(CARD_DDEV(card), &dev_id);
+ iob->finalize = qeth_idx_finalize_cmd;
+ iob->callback = qeth_idx_activate_cb;
+
+ memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
+ memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+ &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
+ &card->info.func_level, 2);
+ memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
+ memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
+}
+
+static int qeth_idx_activate_read_channel(struct qeth_card *card)
+{
+ struct qeth_channel *channel = &card->read;
+ struct qeth_cmd_buffer *iob;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "idxread");
+
+ iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
+
+ memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
+ qeth_idx_setup_activate_cmd(card, iob);
+
+ rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
+
+ iob->finalize = qeth_idx_finalize_query_cmd;
+ iob->callback = qeth_idx_query_read_cb;
+ rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ channel->state = CH_STATE_UP;
+ return 0;
+}
+
+static int qeth_idx_activate_write_channel(struct qeth_card *card)
+{
+ struct qeth_channel *channel = &card->write;
+ struct qeth_cmd_buffer *iob;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "idxwrite");
+
+ iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
+
+ memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
+ qeth_idx_setup_activate_cmd(card, iob);
+
+ rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
+
+ iob->finalize = qeth_idx_finalize_query_cmd;
+ iob->callback = qeth_idx_query_write_cb;
+ rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ channel->state = CH_STATE_UP;
+ return 0;
}
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -2140,7 +2097,9 @@ static int qeth_cm_enable(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "cmenable");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
+
memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
@@ -2173,7 +2132,9 @@ static int qeth_cm_setup(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "cmsetup");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
+
memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
@@ -2206,7 +2167,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
/* adjust RX buffer size to new max MTU: */
card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
if (dev->max_mtu && dev->max_mtu != max_mtu)
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
} else {
if (dev->mtu)
new_mtu = dev->mtu;
@@ -2253,7 +2214,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
memcpy(&card->token.ulp_filter_r,
QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
- if (card->info.type == QETH_CARD_TYPE_IQD) {
+ if (IS_IQD(card)) {
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
mtu = qeth_get_mtu_outof_framesize(framesize);
} else {
@@ -2290,6 +2251,7 @@ static int qeth_ulp_enable(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
@@ -2336,6 +2298,7 @@ static int qeth_ulp_setup(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
@@ -2377,12 +2340,12 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
if (!q)
return;
- qeth_clear_outq_buffers(q, 1);
+ qeth_drain_output_queue(q, true);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
-static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
+static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
{
struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
@@ -2396,7 +2359,7 @@ static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
return q;
}
-static int qeth_alloc_qdio_buffers(struct qeth_card *card)
+static int qeth_alloc_qdio_queues(struct qeth_card *card)
{
int i, j;
@@ -2417,11 +2380,12 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
/* outbound */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
+ card->qdio.out_qs[i] = qeth_alloc_output_queue();
if (!card->qdio.out_qs[i])
goto out_freeoutq;
QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
+ card->qdio.out_qs[i]->card = card;
card->qdio.out_qs[i]->queue_no = i;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
@@ -2458,7 +2422,7 @@ out_nomem:
return -ENOMEM;
}
-static void qeth_free_qdio_buffers(struct qeth_card *card)
+static void qeth_free_qdio_queues(struct qeth_card *card)
{
int i, j;
@@ -2523,6 +2487,7 @@ static int qeth_dm_act(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "dmact");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
@@ -2564,7 +2529,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_qdio;
}
- rc = qeth_alloc_qdio_buffers(card);
+ rc = qeth_alloc_qdio_queues(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_qdio;
@@ -2572,7 +2537,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
rc = qeth_qdio_establish(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
goto out_qdio;
}
rc = qeth_qdio_activate(card);
@@ -2588,7 +2553,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
return 0;
out_qdio:
- qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ qeth_qdio_clear_card(card, !IS_IQD(card));
qdio_free(CARD_DDEV(card));
return rc;
}
@@ -2611,8 +2576,7 @@ void qeth_print_status_message(struct qeth_card *card)
}
/* fallthrough */
case QETH_CARD_TYPE_IQD:
- if ((card->info.guestlan) ||
- (card->info.mcl_level[0] & 0x80)) {
+ if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
card->info.mcl_level[0] = (char) _ebcasc[(__u8)
card->info.mcl_level[0]];
card->info.mcl_level[1] = (char) _ebcasc[(__u8)
@@ -2733,7 +2697,7 @@ static int qeth_init_input_buffer(struct qeth_card *card,
int qeth_init_qdio_queues(struct qeth_card *card)
{
- int i, j;
+ unsigned int i;
int rc;
QETH_DBF_TEXT(SETUP, 2, "initqdqs");
@@ -2762,19 +2726,15 @@ int qeth_init_qdio_queues(struct qeth_card *card)
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
- QDIO_MAX_BUFFERS_PER_Q);
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
- qeth_clear_output_buffer(card->qdio.out_qs[i],
- card->qdio.out_qs[i]->bufs[j]);
- }
- card->qdio.out_qs[i]->card = card;
- card->qdio.out_qs[i]->next_buf_to_fill = 0;
- card->qdio.out_qs[i]->do_pack = 0;
- atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
- atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
- atomic_set(&card->qdio.out_qs[i]->state,
- QETH_OUT_Q_UNLOCKED);
+ struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
+
+ qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+ queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ queue->next_buf_to_fill = 0;
+ queue->do_pack = 0;
+ atomic_set(&queue->used_buffers, 0);
+ atomic_set(&queue->set_pci_flags_count, 0);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
return 0;
}
@@ -2805,12 +2765,26 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
cmd->hdr.prot_version = prot;
}
+static void qeth_ipa_finalize_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob,
+ unsigned int length)
+{
+ qeth_mpc_finalize_cmd(card, iob, length);
+
+ /* override with IPA-specific values: */
+ __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa;
+ iob->reply->seqno = card->seqno.ipa++;
+}
+
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
u16 cmd_length)
{
u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length;
u8 prot_type = qeth_mpc_select_prot_type(card);
+ iob->finalize = qeth_ipa_finalize_cmd;
+ iob->timeout = QETH_IPA_TIMEOUT;
+
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
@@ -2866,6 +2840,11 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
QETH_CARD_TEXT(card, 4, "sendipa");
+ if (card->read_or_write_problem) {
+ qeth_release_buffer(iob->channel, iob);
+ return -EIO;
+ }
+
if (reply_cb == NULL)
reply_cb = qeth_send_ipa_cmd_cb;
memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
@@ -3251,7 +3230,7 @@ static void qeth_handle_send_error(struct qeth_card *card,
int sbalf15 = buffer->buffer->element[15].sflags;
QETH_CARD_TEXT(card, 6, "hdsnderr");
- if (card->info.type == QETH_CARD_TYPE_IQD) {
+ if (IS_IQD(card)) {
if (sbalf15 == 0) {
qdio_err = 0;
} else {
@@ -3348,7 +3327,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (queue->bufstates)
queue->bufstates[bidx].user = buf;
- if (queue->card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(queue->card))
continue;
if (!queue->do_pack) {
@@ -3378,11 +3357,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
QETH_TXQ_STAT_ADD(queue, bufs, count);
- netif_trans_update(queue->card->dev);
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
- atomic_add(count, &queue->used_buffers);
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
if (rc) {
@@ -3422,7 +3399,6 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
* do_send_packet. So, we check if there is a
* packing buffer to be flushed here.
*/
- netif_stop_queue(queue->card->dev);
index = queue->next_buf_to_fill;
q_was_packing = queue->do_pack;
/* queue->do_pack may change */
@@ -3467,7 +3443,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
goto out;
}
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
card->options.cq = cq;
rc = 0;
}
@@ -3493,7 +3469,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
if (qdio_err) {
- netif_stop_queue(card->dev);
+ netif_tx_stop_all_queues(card->dev);
qeth_schedule_recovery(card);
return;
}
@@ -3549,12 +3525,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
struct qeth_qdio_out_buffer *buffer;
+ struct net_device *dev = card->dev;
+ struct netdev_queue *txq;
int i;
QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_FATAL) {
QETH_CARD_TEXT(card, 2, "achkcond");
- netif_stop_queue(card->dev);
+ netif_tx_stop_all_queues(dev);
qeth_schedule_recovery(card);
return;
}
@@ -3580,7 +3558,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
/* prepare the queue slot for re-use: */
qeth_scrub_qdio_buffer(buffer->buffer,
- QETH_MAX_BUFFER_ELEMENTS(card));
+ queue->max_elements);
if (qeth_init_qdio_out_buf(queue, bidx)) {
QETH_CARD_TEXT(card, 2, "outofbuf");
qeth_schedule_recovery(card);
@@ -3600,33 +3578,32 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
atomic_sub(count, &queue->used_buffers);
/* check if we need to do something on this outbound queue */
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
qeth_check_outbound_queue(queue);
- netif_wake_queue(queue->card->dev);
-}
-
-/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
-static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
-{
- if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
- return 2;
- return queue_num;
+ if (IS_IQD(card))
+ __queue = qeth_iqd_translate_txq(dev, __queue);
+ txq = netdev_get_tx_queue(dev, __queue);
+ /* xmit may have observed the full-condition, but not yet stopped the
+ * txq. In which case the code below won't trigger. So before returning,
+ * xmit will re-check the txq's fill level and wake it up if needed.
+ */
+ if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
+ netif_tx_wake_queue(txq);
}
/**
* Note: Function assumes that we have 4 outbound queues.
*/
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
- int ipv)
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
{
- __be16 *tci;
+ struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
u8 tos;
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC:
- switch (ipv) {
+ switch (qeth_get_ip_version(skb)) {
case 4:
tos = ipv4_get_dsfield(ip_hdr(skb));
break;
@@ -3637,9 +3614,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
return card->qdio.default_out_queue;
}
if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
- return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
+ return ~tos >> 6 & 3;
if (tos & IPTOS_MINCOST)
- return qeth_cut_iqd_prio(card, 3);
+ return 3;
if (tos & IPTOS_RELIABILITY)
return 2;
if (tos & IPTOS_THROUGHPUT)
@@ -3650,12 +3627,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
case QETH_PRIO_Q_ING_SKB:
if (skb->priority > 5)
return 0;
- return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
+ return ~skb->priority >> 1 & 3;
case QETH_PRIO_Q_ING_VLAN:
- tci = &((struct ethhdr *)skb->data)->h_proto;
- if (be16_to_cpu(*tci) == ETH_P_8021Q)
- return qeth_cut_iqd_prio(card,
- ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
+ if (veth->h_vlan_proto == htons(ETH_P_8021Q))
+ return ~ntohs(veth->h_vlan_TCI) >>
+ (VLAN_PRIO_SHIFT + 1) & 3;
break;
default:
break;
@@ -3729,8 +3705,8 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
unsigned int hdr_len, unsigned int proto_len,
unsigned int *elements)
{
- const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card);
const unsigned int contiguous = proto_len ? proto_len : 1;
+ const unsigned int max_elements = queue->max_elements;
unsigned int __elements;
addr_t start, end;
bool push_ok;
@@ -3867,11 +3843,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
* from qeth_core_header_cache.
* @offset: when mapping the skb, start at skb->data + offset
* @hd_len: if > 0, build a dedicated header element of this size
+ * flush: Prepare the buffer to be flushed, regardless of its fill level.
*/
static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len)
+ unsigned int offset, unsigned int hd_len,
+ bool flush)
{
struct qdio_buffer *buffer = buf->buffer;
bool is_first_elem = true;
@@ -3900,8 +3878,8 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
QETH_TXQ_STAT_INC(queue, skbs_pack);
/* If the buffer still has free elements, keep using it. */
- if (buf->next_element_to_fill <
- QETH_MAX_BUFFER_ELEMENTS(queue->card))
+ if (!flush &&
+ buf->next_element_to_fill < queue->max_elements)
return 0;
}
@@ -3918,15 +3896,31 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
{
int index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
+ struct netdev_queue *txq;
+ bool stopped = false;
- /*
- * check if buffer is empty to make sure that we do not 'overtake'
- * ourselves and try to fill a buffer that is already primed
+ /* Just a sanity check, the wake/stop logic should ensure that we always
+ * get a free buffer.
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
+
+ txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb));
+
+ if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+ /* If a TX completion happens right _here_ and misses to wake
+ * the txq, then our re-check below will catch the race.
+ */
+ QETH_TXQ_STAT_INC(queue, stopped);
+ netif_tx_stop_queue(txq);
+ stopped = true;
+ }
+
+ qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
qeth_flush_buffers(queue, index, 1);
+
+ if (stopped && !qeth_out_queue_is_full(queue))
+ netif_tx_start_queue(txq);
return 0;
}
@@ -3936,6 +3930,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
+ struct netdev_queue *txq;
+ bool stopped = false;
int start_index;
int flush_count = 0;
int do_pack = 0;
@@ -3947,21 +3943,24 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
start_index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill];
- /*
- * check if buffer is empty to make sure that we do not 'overtake'
- * ourselves and try to fill a buffer that is already primed
+
+ /* Just a sanity check, the wake/stop logic should ensure that we always
+ * get a free buffer.
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
+
+ txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
+
/* check if we need to switch packing state of this queue */
qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack) {
do_pack = 1;
/* does packet fit in current buffer? */
- if ((QETH_MAX_BUFFER_ELEMENTS(card) -
- buffer->next_element_to_fill) < elements_needed) {
+ if (buffer->next_element_to_fill + elements_needed >
+ queue->max_elements) {
/* ... no -> set state PRIMED */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
flush_count++;
@@ -3969,8 +3968,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
(queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
buffer = queue->bufs[queue->next_buf_to_fill];
- /* we did a step forward, so check buffer state
- * again */
+
+ /* We stepped forward, so sanity-check again: */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY) {
qeth_flush_buffers(queue, start_index,
@@ -3983,8 +3982,18 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
}
- flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset,
- hd_len);
+ if (buffer->next_element_to_fill == 0 &&
+ atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+ /* If a TX completion happens right _here_ and misses to wake
+ * the txq, then our re-check below will catch the race.
+ */
+ QETH_TXQ_STAT_INC(queue, stopped);
+ netif_tx_stop_queue(txq);
+ stopped = true;
+ }
+
+ flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len,
+ stopped);
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
@@ -4015,6 +4024,8 @@ out:
if (do_pack)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
+ if (stopped && !qeth_out_queue_is_full(queue))
+ netif_tx_start_queue(txq);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
@@ -4101,9 +4112,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
} else {
if (!push_len)
kmem_cache_free(qeth_core_header_cache, hdr);
- if (rc == -EBUSY)
- /* roll back to ETH header */
- skb_pull(skb, push_len);
}
return rc;
}
@@ -4321,9 +4329,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
QETH_CARD_TEXT(card, 4, "setactlo");
- if ((card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSX) &&
- qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+ if ((IS_OSD(card) || IS_OSX(card)) &&
+ qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation, fallback);
if (rc) {
@@ -4348,7 +4355,6 @@ void qeth_tx_timeout(struct net_device *dev)
card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "txtimeo");
- QETH_CARD_STAT_INC(card, tx_errors);
qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);
@@ -4489,7 +4495,7 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
QETH_CARD_TEXT(card, 3, "snmpcmd");
- if (card->info.guestlan)
+ if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
@@ -4732,14 +4738,6 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
-static int qeth_get_qdio_q_format(struct qeth_card *card)
-{
- if (card->info.type == QETH_CARD_TYPE_IQD)
- return QDIO_IQDIO_QFMT;
- else
- return QDIO_QETH_QFMT;
-}
-
static void qeth_determine_capabilities(struct qeth_card *card)
{
int rc;
@@ -4878,7 +4876,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
memset(&init_data, 0, sizeof(struct qdio_initialize));
init_data.cdev = CARD_DDEV(card);
- init_data.q_format = qeth_get_qdio_q_format(card);
+ init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
+ QDIO_QETH_QFMT;
init_data.qib_param_field_format = 0;
init_data.qib_param_field = qib_param_field;
init_data.no_input_qs = card->qdio.no_in_queues;
@@ -4890,8 +4889,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.input_sbal_addr_array = in_sbal_ptrs;
init_data.output_sbal_addr_array = out_sbal_ptrs;
init_data.output_sbal_state_array = card->qdio.out_bufstates;
- init_data.scan_threshold =
- (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
+ init_data.scan_threshold = IS_IQD(card) ? 1 : 32;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
@@ -4937,7 +4935,7 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->write);
qeth_clean_channel(&card->data);
destroy_workqueue(card->event_wq);
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
@@ -4986,12 +4984,14 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0);
- qeth_update_from_chp_desc(card);
+ rc = qeth_update_from_chp_desc(card);
+ if (rc)
+ return rc;
retry:
if (retries < 3)
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
CARD_DEVID(card));
- rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ rc = qeth_qdio_clear_card(card, !IS_IQD(card));
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
@@ -5019,8 +5019,9 @@ retriable:
qeth_determine_capabilities(card);
qeth_init_tokens(card);
qeth_init_func_level(card);
- rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb);
- if (rc == -ERESTARTSYS) {
+
+ rc = qeth_idx_activate_read_channel(card);
+ if (rc == -EINTR) {
QETH_DBF_TEXT(SETUP, 2, "break2");
return rc;
} else if (rc) {
@@ -5030,8 +5031,9 @@ retriable:
else
goto retry;
}
- rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb);
- if (rc == -ERESTARTSYS) {
+
+ rc = qeth_idx_activate_write_channel(card);
+ if (rc == -EINTR) {
QETH_DBF_TEXT(SETUP, 2, "break3");
return rc;
} else if (rc) {
@@ -5171,7 +5173,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
return NULL;
if (((skb_len >= card->options.rx_sg_cb) &&
- (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
+ !IS_OSN(card) &&
(!atomic_read(&card->force_alloc_skb))) ||
(card->options.cq == QETH_CQ_ENABLED))
use_rx_sg = 1;
@@ -5562,13 +5564,17 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
- dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
+ dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
+ ether_setup, QETH_MAX_QUEUES, 1);
+ break;
+ case QETH_CARD_TYPE_OSM:
+ dev = alloc_etherdev(0);
break;
case QETH_CARD_TYPE_OSN:
dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
break;
default:
- dev = alloc_etherdev(0);
+ dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
}
if (!dev)
@@ -5590,8 +5596,10 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
- if (IS_IQD(card))
+ if (IS_IQD(card)) {
+ netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ);
dev->features |= NETIF_F_SG;
+ }
}
return dev;
@@ -5641,14 +5649,16 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
}
qeth_setup_card(card);
- qeth_update_from_chp_desc(card);
-
card->dev = qeth_alloc_netdev(card);
if (!card->dev) {
rc = -ENOMEM;
goto err_card;
}
+ card->qdio.no_out_queues = card->dev->num_tx_queues;
+ rc = qeth_update_from_chp_desc(card);
+ if (rc)
+ goto err_chp_desc;
qeth_determine_capabilities(card);
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
@@ -5661,9 +5671,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_load;
- gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
- ? card->discipline->devtype
- : &qeth_osn_devtype;
+ gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
+ card->discipline->devtype;
rc = card->discipline->setup(card->gdev);
if (rc)
goto err_disc;
@@ -5675,6 +5684,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
err_disc:
qeth_core_free_discipline(card);
err_load:
+err_chp_desc:
free_netdev(card->dev);
err_card:
qeth_core_free_card(card);
@@ -5706,10 +5716,8 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
enum qeth_discipline_id def_discipline;
if (!card->discipline) {
- if (card->info.type == QETH_CARD_TYPE_IQD)
- def_discipline = QETH_DISCIPLINE_LAYER3;
- else
- def_discipline = QETH_DISCIPLINE_LAYER2;
+ def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
+ QETH_DISCIPLINE_LAYER2;
rc = qeth_core_load_discipline(card, def_discipline);
if (rc)
goto err;
@@ -5737,7 +5745,7 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
- qeth_clear_qdio_buffers(card);
+ qeth_drain_output_queues(card);
qdio_free(CARD_DDEV(card));
}
@@ -5837,13 +5845,10 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_GET_CARD_TYPE:
- if ((card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX) &&
- !card->info.guestlan)
+ if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
+ !IS_VM_NIC(card))
return 1;
- else
- return 0;
+ return 0;
case SIOCGMIIPHY:
mii_data = if_mii(rq);
mii_data->phy_id = 0;
@@ -6193,7 +6198,6 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = card->stats.rx_errors;
stats->rx_dropped = card->stats.rx_dropped;
stats->multicast = card->stats.rx_multicast;
- stats->tx_errors = card->stats.tx_errors;
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
@@ -6206,6 +6210,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);
+u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ u8 cast_type, struct net_device *sb_dev)
+{
+ if (cast_type != RTN_UNICAST)
+ return QETH_IQD_MCAST_TXQ;
+ return QETH_IQD_MIN_UCAST_TXQ;
+}
+EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
+
int qeth_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -6216,7 +6229,7 @@ int qeth_open(struct net_device *dev)
return -EIO;
card->data.state = CH_STATE_UP;
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
napi_enable(&card->napi);
local_bh_disable();
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index f8c5d4a9be13..f5237b7c14c4 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -82,7 +82,7 @@ enum qeth_card_types {
#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
-#define IS_VM_NIC(card) ((card)->info.guestlan)
+#define IS_VM_NIC(card) ((card)->info.is_vm_nic)
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 56deeb6f7bc0..9f392497d570 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -198,6 +198,9 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
if (!card)
return -EINVAL;
+ if (IS_IQD(card))
+ return -EOPNOTSUPP;
+
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -239,10 +242,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
- if (card->info.type == QETH_CARD_TYPE_IQD) {
- rc = -EPERM;
- goto out;
- }
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) {
@@ -480,8 +479,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (card->info.type != QETH_CARD_TYPE_OSD &&
- card->info.type != QETH_CARD_TYPE_OSX) {
+ if (!IS_OSD(card) && !IS_OSX(card)) {
rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 93a53fed4cf8..4166eb29f0bd 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -38,6 +38,7 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
+ QETH_TXQ_STAT("Queue stopped", stopped),
};
static const struct qeth_stats card_stats[] = {
@@ -154,6 +155,21 @@ static void qeth_get_drvinfo(struct net_device *dev,
CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
}
+static void qeth_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ channels->max_rx = dev->num_rx_queues;
+ channels->max_tx = card->qdio.no_out_queues;
+ channels->max_other = 0;
+ channels->max_combined = 0;
+ channels->rx_count = dev->real_num_rx_queues;
+ channels->tx_count = dev->real_num_tx_queues;
+ channels->other_count = 0;
+ channels->combined_count = 0;
+}
+
/* Helper function to fill 'advertising' and 'supported' which are the same. */
/* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */
@@ -359,6 +375,7 @@ const struct ethtool_ops qeth_ethtool_ops = {
.get_ethtool_stats = qeth_get_ethtool_stats,
.get_sset_count = qeth_get_sset_count,
.get_drvinfo = qeth_get_drvinfo,
+ .get_channels = qeth_get_channels,
.get_link_ksettings = qeth_get_link_ksettings,
};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index c3067fd3bd9e..218801232ca2 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -149,29 +149,16 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
return rc;
}
-static void qeth_l2_del_all_macs(struct qeth_card *card)
+static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
{
struct qeth_mac *mac;
struct hlist_node *tmp;
int i;
- spin_lock_bh(&card->mclock);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
hash_del(&mac->hnode);
kfree(mac);
}
- spin_unlock_bh(&card->mclock);
-}
-
-static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
-{
- if (card->info.type == QETH_CARD_TYPE_OSN)
- return RTN_UNICAST;
- if (is_broadcast_ether_addr(skb->data))
- return RTN_BROADCAST;
- if (is_multicast_ether_addr(skb->data))
- return RTN_MULTICAST;
- return RTN_UNICAST;
}
static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
@@ -292,14 +279,16 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_set_allowed_threads(card, 0, 1);
+ cancel_work_sync(&card->rx_mode_work);
+ qeth_l2_drain_rx_mode_cache(card);
+
if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l2_del_all_macs(card);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0);
- qeth_clear_qdio_buffers(card);
+ qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
@@ -334,13 +323,11 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
case QETH_HEADER_TYPE_LAYER2:
skb->protocol = eth_type_trans(skb, skb->dev);
qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
- if (skb->protocol == htons(ETH_P_802_2))
- *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
len = skb->len;
napi_gro_receive(&card->napi, skb);
break;
case QETH_HEADER_TYPE_OSN:
- if (card->info.type == QETH_CARD_TYPE_OSN) {
+ if (IS_OSN(card)) {
skb_push(skb, sizeof(struct qeth_hdr));
skb_copy_to_linear_data(skb, hdr,
sizeof(struct qeth_hdr));
@@ -391,8 +378,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
}
/* some devices don't support a custom MAC address: */
- if (card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX)
+ if (IS_OSM(card) || IS_OSX(card))
return (rc) ? rc : -EADDRNOTAVAIL;
eth_hw_addr_random(card->dev);
@@ -515,9 +501,11 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
hash_add(card->mac_htable, &mac->hnode, mac_hash);
}
-static void qeth_l2_set_rx_mode(struct net_device *dev)
+static void qeth_l2_rx_mode_work(struct work_struct *work)
{
- struct qeth_card *card = dev->ml_priv;
+ struct qeth_card *card = container_of(work, struct qeth_card,
+ rx_mode_work);
+ struct net_device *dev = card->dev;
struct netdev_hw_addr *ha;
struct qeth_mac *mac;
struct hlist_node *tmp;
@@ -526,12 +514,12 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
QETH_CARD_TEXT(card, 3, "setmulti");
- spin_lock_bh(&card->mclock);
-
+ netif_addr_lock_bh(dev);
netdev_for_each_mc_addr(ha, dev)
qeth_l2_add_mac(card, ha);
netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mac(card, ha);
+ netif_addr_unlock_bh(dev);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
switch (mac->disp_flag) {
@@ -554,8 +542,6 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
}
}
- spin_unlock_bh(&card->mclock);
-
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
qeth_setadp_promisc_mode(card);
else
@@ -586,7 +572,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
}
elements += qeth_count_elements(skb, hd_len);
- if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) {
+ if (elements > queue->max_elements) {
rc = -E2BIG;
goto out;
}
@@ -603,37 +589,45 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- int cast_type = qeth_l2_get_cast_type(card, skb);
- int ipv = qeth_get_ip_version(skb);
+ u16 txq = skb_get_queue_mapping(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
- queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
-
- netif_stop_queue(dev);
+ if (IS_IQD(card))
+ txq = qeth_iqd_translate_txq(dev, txq);
+ queue = card->qdio.out_qs[txq];
if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
else
- rc = qeth_xmit(card, skb, queue, ipv, cast_type,
+ rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
+ qeth_get_ether_cast_type(skb),
qeth_l2_fill_header);
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
- } else if (rc == -EBUSY) {
- return NETDEV_TX_BUSY;
- } /* else fall through */
+ }
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
}
+static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (IS_IQD(card))
+ return qeth_iqd_select_queue(dev, skb,
+ qeth_get_ether_cast_type(skb),
+ sb_dev);
+ return qeth_get_priority_queue(card, skb);
+}
+
static const struct device_type qeth_l2_devtype = {
.name = "qeth_layer2",
.groups = qeth_l2_attr_groups,
@@ -653,6 +647,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
}
hash_init(card->mac_htable);
+ INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
return 0;
}
@@ -673,12 +668,20 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
unregister_netdev(card->dev);
}
+static void qeth_l2_set_rx_mode(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ schedule_work(&card->rx_mode_work);
+}
+
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
+ .ndo_select_queue = qeth_l2_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -721,7 +724,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
- if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
+ if (IS_OSD(card) && !IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */
if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
@@ -831,8 +834,7 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- if ((card->info.type == QETH_CARD_TYPE_OSD) ||
- (card->info.type == QETH_CARD_TYPE_OSX)) {
+ if (IS_OSD(card) || IS_OSX(card)) {
rc = qeth_l2_start_ipassists(card);
if (rc)
goto out_remove;
@@ -1042,13 +1044,13 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
QETH_CARD_TEXT(card, 5, "osndctrd");
- wait_event(card->wait_q,
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
- qeth_prepare_control_data(card, len, iob);
+ wait_event(card->wait_q, qeth_trylock_channel(channel));
+ iob->finalize(card, iob, len);
+ QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
QETH_CARD_TEXT(card, 6, "osnoirqp");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
+ (addr_t) iob, 0, 0, iob->timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
@@ -1456,9 +1458,8 @@ static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
enum qeth_ipa_sbp_cmd sbp_cmd,
unsigned int cmd_length)
{
- enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ?
- IPA_CMD_SETBRIDGEPORT_IQD :
- IPA_CMD_SETBRIDGEPORT_OSA;
+ enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD :
+ IPA_CMD_SETBRIDGEPORT_OSA;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 53712cf26406..0271833da6a2 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -246,9 +246,9 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
*/
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
rc = qeth_l3_register_addr_entry(card, addr);
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
addr->in_progress = 0;
} else
rc = qeth_l3_register_addr_entry(card, addr);
@@ -268,6 +268,30 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
return rc;
}
+static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr,
+ bool add)
+{
+ int rc;
+
+ mutex_lock(&card->ip_lock);
+ rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr);
+ mutex_unlock(&card->ip_lock);
+
+ return rc;
+}
+
+static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card)
+{
+ struct qeth_ipaddr *addr;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
+}
+
static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
{
struct qeth_ipaddr *addr;
@@ -276,7 +300,7 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
QETH_CARD_TEXT(card, 4, "clearip");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (!recover) {
@@ -287,19 +311,9 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
addr->disp_flag = QETH_DISP_ADDR_ADD;
}
- spin_unlock_bh(&card->ip_lock);
-
- spin_lock_bh(&card->mclock);
-
- hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
- hash_del(&addr->hnode);
- kfree(addr);
- }
-
- spin_unlock_bh(&card->mclock);
-
-
+ mutex_unlock(&card->ip_lock);
}
+
static void qeth_l3_recover_ip(struct qeth_card *card)
{
struct qeth_ipaddr *addr;
@@ -309,15 +323,15 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
QETH_CARD_TEXT(card, 4, "recovrip");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
rc = qeth_l3_register_addr_entry(card, addr);
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
addr->in_progress = 0;
} else
rc = qeth_l3_register_addr_entry(card, addr);
@@ -333,8 +347,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
}
}
- spin_unlock_bh(&card->ip_lock);
-
+ mutex_unlock(&card->ip_lock);
}
static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -461,7 +474,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
static int qeth_l3_correct_routing_type(struct qeth_card *card,
enum qeth_routing_types *type, enum qeth_prot_versions prot)
{
- if (card->info.type == QETH_CARD_TYPE_IQD) {
+ if (IS_IQD(card)) {
switch (*type) {
case NO_ROUTER:
case PRIMARY_CONNECTOR:
@@ -559,7 +572,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
{
struct qeth_ipato_entry *ipatoe, *tmp;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
list_del(&ipatoe->entry);
@@ -567,7 +580,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
}
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
int qeth_l3_add_ipato_entry(struct qeth_card *card,
@@ -578,7 +591,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "addipato");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
@@ -596,7 +609,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
qeth_l3_update_ipato(card);
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
return rc;
}
@@ -610,7 +623,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "delipato");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
@@ -625,7 +638,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
}
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
return rc;
}
@@ -634,7 +647,6 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
- int rc;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
@@ -642,16 +654,13 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
else
memcpy(&addr.u.a6.addr, ip, 16);
- spin_lock_bh(&card->ip_lock);
- rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
- spin_unlock_bh(&card->ip_lock);
- return rc;
+ return qeth_l3_modify_ip(card, &addr, add);
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
{
struct qeth_ipaddr addr;
- int rc, i;
+ unsigned int i;
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
addr.u.a6.addr.s6_addr[0] = 0xfe;
@@ -659,10 +668,7 @@ int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
for (i = 0; i < 8; i++)
addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i];
- spin_lock_bh(&card->ip_lock);
- rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
- spin_unlock_bh(&card->ip_lock);
- return rc;
+ return qeth_l3_modify_ip(card, &addr, add);
}
static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -848,7 +854,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "softipv6");
- if (card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(card))
goto out;
rc = qeth_send_simple_setassparms(card, IPA_IPV6,
@@ -1374,8 +1380,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
switch (hdr->hdr.l3.id) {
case QETH_HEADER_TYPE_LAYER3:
magic = *(__u16 *)skb->data;
- if ((card->info.type == QETH_CARD_TYPE_IQD) &&
- (magic == ETH_P_AF_IUCV)) {
+ if (IS_IQD(card) && magic == ETH_P_AF_IUCV) {
len = skb->len;
dev_hard_header(skb, dev, ETH_P_AF_IUCV,
dev->dev_addr, "FAKELL", len);
@@ -1413,6 +1418,9 @@ static void qeth_l3_stop_card(struct qeth_card *card)
qeth_set_allowed_threads(card, 0, 1);
+ cancel_work_sync(&card->rx_mode_work);
+ qeth_l3_drain_rx_mode_cache(card);
+
if (card->options.sniffer &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON))
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
@@ -1424,7 +1432,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
}
if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0);
- qeth_clear_qdio_buffers(card);
+ qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
@@ -1451,7 +1459,7 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
(card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
return;
- if (card->info.guestlan) { /* Guestlan trace */
+ if (IS_VM_NIC(card)) { /* Guestlan trace */
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
qeth_setadp_promisc_mode(card);
} else if (card->options.sniffer && /* HiperSockets trace */
@@ -1466,9 +1474,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
}
}
-static void qeth_l3_set_rx_mode(struct net_device *dev)
+static void qeth_l3_rx_mode_work(struct work_struct *work)
{
- struct qeth_card *card = dev->ml_priv;
+ struct qeth_card *card = container_of(work, struct qeth_card,
+ rx_mode_work);
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i, rc;
@@ -1476,8 +1485,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
QETH_CARD_TEXT(card, 3, "setmulti");
if (!card->options.sniffer) {
- spin_lock_bh(&card->mclock);
-
qeth_l3_add_multicast_ipv4(card);
qeth_l3_add_multicast_ipv6(card);
@@ -1505,8 +1512,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
}
}
- spin_unlock_bh(&card->mclock);
-
if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
return;
}
@@ -1551,7 +1556,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
* thus we say EOPNOTSUPP for this ARP function
*/
- if (card->info.guestlan)
+ if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
@@ -1783,7 +1788,7 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card,
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
* thus we say EOPNOTSUPP for this ARP function
*/
- if (card->info.guestlan)
+ if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
@@ -1816,7 +1821,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
* thus we say EOPNOTSUPP for this ARP function
*/
- if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
+ if (IS_VM_NIC(card) || IS_IQD(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
@@ -1913,13 +1918,7 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
RTN_MULTICAST : RTN_UNICAST;
default:
/* ... and MAC address */
- if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
- skb->dev->broadcast))
- return RTN_BROADCAST;
- if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
- return RTN_MULTICAST;
- /* default to unicast */
- return RTN_UNICAST;
+ return qeth_get_ether_cast_type(skb);
}
}
@@ -1977,19 +1976,14 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
}
+ l3_hdr->flags = qeth_l3_cast_type_to_flag(cast_type);
+
/* OSA only: */
if (!ipv) {
- hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
- if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
- skb->dev->broadcast))
- hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
- else
- hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
- QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
+ l3_hdr->flags |= QETH_HDR_PASSTHRU;
return;
}
- hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
rcu_read_lock();
if (ipv == 4) {
struct rtable *rt = skb_rtable(skb);
@@ -2007,7 +2001,7 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
}
rcu_read_unlock();
@@ -2030,7 +2024,6 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
- unsigned char eth_hdr[ETH_HLEN];
unsigned int hw_hdr_len;
int rc;
@@ -2040,45 +2033,44 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc)
return rc;
- skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
- rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
- if (rc == -EBUSY) {
- /* roll back to ETH header */
- skb_push(skb, ETH_HLEN);
- skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
- }
- return rc;
+ return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- int cast_type = qeth_l3_get_cast_type(skb);
struct qeth_card *card = dev->ml_priv;
+ u16 txq = skb_get_queue_mapping(skb);
int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
- int rc;
-
- queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+ int cast_type, rc;
if (IS_IQD(card)) {
+ queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
+
if (card->options.sniffer)
goto tx_drop;
if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
(card->options.cq == QETH_CQ_ENABLED &&
skb->protocol != htons(ETH_P_AF_IUCV)))
goto tx_drop;
+
+ if (txq == QETH_IQD_MCAST_TXQ)
+ cast_type = qeth_l3_get_cast_type(skb);
+ else
+ cast_type = RTN_UNICAST;
+ } else {
+ queue = card->qdio.out_qs[txq];
+ cast_type = qeth_l3_get_cast_type(skb);
}
if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
goto tx_drop;
- netif_stop_queue(dev);
-
if (ipv == 4 || IS_IQD(card))
rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
else
@@ -2088,19 +2080,22 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
- } else if (rc == -EBUSY) {
- return NETDEV_TX_BUSY;
- } /* else fall through */
+ }
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
}
+static void qeth_l3_set_rx_mode(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ schedule_work(&card->rx_mode_work);
+}
+
/*
* we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
* NOARP on the netdevice is no option because it also turns off neighbor
@@ -2134,11 +2129,27 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
return qeth_features_check(skb, dev, features);
}
+static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
+ sb_dev);
+}
+
+static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ return qeth_get_priority_queue(card, skb);
+}
+
static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
+ .ndo_select_queue = qeth_l3_iqd_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -2155,6 +2166,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
+ .ndo_select_queue = qeth_l3_osa_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -2171,8 +2183,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
unsigned int headroom;
int rc;
- if (card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSX) {
+ if (IS_OSD(card) || IS_OSX(card)) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
pr_info("qeth_l3: ignoring TR device\n");
@@ -2186,7 +2197,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
card->dev->dev_id = card->info.unique_id & 0xffff;
- if (!card->info.guestlan) {
+ if (!IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
card->dev->hw_features |= NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
@@ -2210,7 +2221,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
headroom = sizeof(struct qeth_hdr_tso);
else
headroom = sizeof(struct qeth_hdr) + VLAN_HLEN;
- } else if (card->info.type == QETH_CARD_TYPE_IQD) {
+ } else if (IS_IQD(card)) {
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
@@ -2253,14 +2264,22 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
int rc;
hash_init(card->ip_htable);
+ mutex_init(&card->ip_lock);
+ card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0,
+ dev_name(&gdev->dev));
+ if (!card->cmd_wq)
+ return -ENOMEM;
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l3_create_device_attributes(&gdev->dev);
- if (rc)
+ if (rc) {
+ destroy_workqueue(card->cmd_wq);
return rc;
+ }
}
hash_init(card->ip_mc_htable);
+ INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
return 0;
}
@@ -2280,6 +2299,9 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
+
+ flush_workqueue(card->cmd_wq);
+ destroy_workqueue(card->cmd_wq);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
@@ -2517,20 +2539,40 @@ static int qeth_l3_handle_ip_event(struct qeth_card *card,
{
switch (event) {
case NETDEV_UP:
- spin_lock_bh(&card->ip_lock);
- qeth_l3_add_ip(card, addr);
- spin_unlock_bh(&card->ip_lock);
+ qeth_l3_modify_ip(card, addr, true);
return NOTIFY_OK;
case NETDEV_DOWN:
- spin_lock_bh(&card->ip_lock);
- qeth_l3_delete_ip(card, addr);
- spin_unlock_bh(&card->ip_lock);
+ qeth_l3_modify_ip(card, addr, false);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
+struct qeth_l3_ip_event_work {
+ struct work_struct work;
+ struct qeth_card *card;
+ struct qeth_ipaddr addr;
+};
+
+#define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work)
+
+static void qeth_l3_add_ip_worker(struct work_struct *work)
+{
+ struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
+
+ qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true);
+ kfree(work);
+}
+
+static void qeth_l3_delete_ip_worker(struct work_struct *work)
+{
+ struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
+
+ qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false);
+ kfree(work);
+}
+
static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
{
if (is_vlan_dev(dev))
@@ -2575,9 +2617,12 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct net_device *dev = ifa->idev->dev;
- struct qeth_ipaddr addr;
+ struct qeth_l3_ip_event_work *ip_work;
struct qeth_card *card;
+ if (event != NETDEV_UP && event != NETDEV_DOWN)
+ return NOTIFY_DONE;
+
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
@@ -2585,11 +2630,23 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
if (!qeth_is_supported(card, IPA_IPV6))
return NOTIFY_DONE;
- qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
- addr.u.a6.addr = ifa->addr;
- addr.u.a6.pfxlen = ifa->prefix_len;
+ ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC);
+ if (!ip_work)
+ return NOTIFY_DONE;
- return qeth_l3_handle_ip_event(card, &addr, event);
+ if (event == NETDEV_UP)
+ INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker);
+ else
+ INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker);
+
+ ip_work->card = card;
+ qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL,
+ QETH_PROT_IPV6);
+ ip_work->addr.u.a6.addr = ifa->addr;
+ ip_work->addr.u.a6.pfxlen = ifa->prefix_len;
+
+ queue_work(card->cmd_wq, &ip_work->work);
+ return NOTIFY_OK;
}
static struct notifier_block qeth_l3_ip6_notifier = {
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index cff518b0f904..2f73b33c9347 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -206,7 +206,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
if (!card)
return -EINVAL;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
return -EPERM;
if (card->options.cq == QETH_CQ_ENABLED)
return -EPERM;
@@ -258,7 +258,7 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
if (!card)
return -EINVAL;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
return -EPERM;
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
@@ -276,7 +276,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
if (!card)
return -EINVAL;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
return -EPERM;
if (card->state != CARD_STATE_DOWN)
return -EPERM;
@@ -367,9 +367,9 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
if (card->ipato.enabled != enable) {
card->ipato.enabled = enable;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
out:
mutex_unlock(&card->conf_mutex);
@@ -412,9 +412,9 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
if (card->ipato.invert4 != invert) {
card->ipato.invert4 = invert;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
out:
mutex_unlock(&card->conf_mutex);
@@ -436,7 +436,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
/* add strlen for "/<mask>\n" */
entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
@@ -449,7 +449,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
i += snprintf(buf + i, PAGE_SIZE - i,
"%s/%i\n", addr_str, ipatoe->mask_bits);
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
@@ -598,9 +598,9 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
if (card->ipato.invert6 != invert) {
card->ipato.invert6 = invert;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
out:
mutex_unlock(&card->conf_mutex);
@@ -684,7 +684,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto || ipaddr->type != type)
continue;
@@ -698,7 +698,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
addr_str);
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
return str_len;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 74c328321889..6a3076881321 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -66,6 +66,7 @@ struct virtio_ccw_device {
bool device_lost;
unsigned int config_ready;
void *airq_info;
+ u64 dma_mask;
};
struct vq_info_block_legacy {
@@ -108,7 +109,6 @@ struct virtio_rev_info {
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
- void *queue;
union {
struct vq_info_block s;
struct vq_info_block_legacy l;
@@ -182,7 +182,7 @@ static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
write_unlock_irqrestore(&info->lock, flags);
}
-static void virtio_airq_handler(struct airq_struct *airq)
+static void virtio_airq_handler(struct airq_struct *airq, bool floating)
{
struct airq_info *info = container_of(airq, struct airq_info, airq);
unsigned long ai;
@@ -423,7 +423,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
struct virtio_ccw_vq_info *info = vq->priv;
unsigned long flags;
- unsigned long size;
int ret;
unsigned int index = vq->index;
@@ -461,8 +460,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
ret, index);
vring_del_virtqueue(vq);
- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
- free_pages_exact(info->queue, size);
kfree(info->info_block);
kfree(info);
}
@@ -494,8 +491,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
int err;
struct virtqueue *vq = NULL;
struct virtio_ccw_vq_info *info;
- unsigned long size = 0; /* silence the compiler */
+ u64 queue;
unsigned long flags;
+ bool may_reduce;
/* Allocate queue. */
info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
@@ -516,37 +514,34 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = info->num;
goto out_err;
}
- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
- info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
- if (info->queue == NULL) {
- dev_warn(&vcdev->cdev->dev, "no queue\n");
- err = -ENOMEM;
- goto out_err;
- }
+ may_reduce = vcdev->revision > 0;
+ vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
+ vdev, true, may_reduce, ctx,
+ virtio_ccw_kvm_notify, callback, name);
- vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
- true, ctx, info->queue, virtio_ccw_kvm_notify,
- callback, name);
if (!vq) {
/* For now, we fail if we can't get the requested size. */
dev_warn(&vcdev->cdev->dev, "no vq\n");
err = -ENOMEM;
goto out_err;
}
+ /* it may have been reduced */
+ info->num = virtqueue_get_vring_size(vq);
/* Register it with the host. */
+ queue = virtqueue_get_desc_addr(vq);
if (vcdev->revision == 0) {
- info->info_block->l.queue = (__u64)info->queue;
+ info->info_block->l.queue = queue;
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
info->info_block->l.index = i;
info->info_block->l.num = info->num;
ccw->count = sizeof(info->info_block->l);
} else {
- info->info_block->s.desc = (__u64)info->queue;
+ info->info_block->s.desc = queue;
info->info_block->s.index = i;
info->info_block->s.num = info->num;
- info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
- info->info_block->s.used = (__u64)virtqueue_get_used(vq);
+ info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
+ info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
ccw->count = sizeof(info->info_block->s);
}
ccw->cmd_code = CCW_CMD_SET_VQ;
@@ -572,8 +567,6 @@ out_err:
if (vq)
vring_del_virtqueue(vq);
if (info) {
- if (info->queue)
- free_pages_exact(info->queue, size);
kfree(info->info_block);
}
kfree(info);
@@ -780,12 +773,8 @@ out_free:
static void ccw_transport_features(struct virtio_device *vdev)
{
/*
- * Packed ring isn't enabled on virtio_ccw for now,
- * because virtio_ccw uses some legacy accessors,
- * e.g. virtqueue_get_avail() and virtqueue_get_used()
- * which aren't available in packed ring currently.
+ * Currently nothing to do here.
*/
- __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
}
static int virtio_ccw_finalize_features(struct virtio_device *vdev)
@@ -1266,6 +1255,16 @@ static int virtio_ccw_online(struct ccw_device *cdev)
ret = -ENOMEM;
goto out_free;
}
+
+ vcdev->vdev.dev.parent = &cdev->dev;
+ cdev->dev.dma_mask = &vcdev->dma_mask;
+ /* we are fine with common virtio infrastructure using 64 bit DMA */
+ ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
+ goto out_free;
+ }
+
vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
GFP_DMA | GFP_KERNEL);
if (!vcdev->config_block) {
@@ -1280,7 +1279,6 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
- vcdev->vdev.dev.parent = &cdev->dev;
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
vcdev->cdev = cdev;
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index 6516bc3cb58b..8090dc9a1514 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -30,7 +30,7 @@
* the recommended way for applications to use the coprocessor, and
* the driver interface is not intended for general use.
*
- * See Documentation/sparc/oradax/oracle-dax.txt for more details.
+ * See Documentation/sparc/oradax/oracle-dax.rst for more details.
*/
#include <linux/uaccess.h>
@@ -437,7 +437,7 @@ static int dax_lock_page(void *va, struct page **p)
dax_dbg("uva %p", va);
- ret = get_user_pages_fast((unsigned long)va, 1, 1, p);
+ ret = get_user_pages_fast((unsigned long)va, 1, FOLL_WRITE, p);
if (ret == 1) {
dax_dbg("locked page %p, for VA %p", *p, va);
return 0;
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 01c23d27f290..fe0535affc14 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -272,9 +272,8 @@ mrs[] = {
static void NCR5380_print(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char status, data, basr, mr, icr, i;
+ unsigned char status, basr, mr, icr, i;
- data = NCR5380_read(CURRENT_SCSI_DATA_REG);
status = NCR5380_read(STATUS_REG);
mr = NCR5380_read(MODE_REG);
icr = NCR5380_read(INITIATOR_COMMAND_REG);
@@ -1933,13 +1932,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
if (!hostdata->connected)
return;
- /* Fall through to reject message */
-
+ /* Reject message */
+ /* Fall through */
+ default:
/*
* If we get something weird that we aren't expecting,
- * reject it.
+ * log it.
*/
- default:
if (tmp == EXTENDED_MESSAGE)
scmd_printk(KERN_INFO, cmd,
"rejecting unknown extended message code %02x, length %d\n",
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 55ac55ee6068..40fe08a64535 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -3,7 +3,7 @@
# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic7xxx#7 $
#
config SCSI_AIC7XXX
- tristate "Adaptec AIC7xxx Fast -> U160 support (New Driver)"
+ tristate "Adaptec AIC7xxx Fast -> U160 support"
depends on (PCI || EISA) && SCSI
select SCSI_SPI_ATTRS
---help---
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 3d401d02c019..bdd177e3d762 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -91,6 +91,7 @@ aic7770_probe(struct device *dev)
ahc = ahc_alloc(&aic7xxx_driver_template, name);
if (ahc == NULL)
return (ENOMEM);
+ ahc->dev = dev;
error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
eisaBase);
if (error != 0) {
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 5614921b4041..88b90f9806c9 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -943,6 +943,7 @@ struct ahc_softc {
* Platform specific device information.
*/
ahc_dev_softc_t dev_softc;
+ struct device *dev;
/*
* Bus specific device information.
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index f3362f4ab16e..a9d40d3b90ef 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -1666,7 +1666,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
printk("\tCRC Value Mismatch\n");
if ((sstat2 & CRCENDERR) != 0)
printk("\tNo terminal CRC packet "
- "recevied\n");
+ "received\n");
if ((sstat2 & CRCREQERR) != 0)
printk("\tIllegal CRC packet "
"request\n");
@@ -4920,24 +4920,30 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
}
ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
}
+ /* fall through */
case 6:
ahc_dmamap_unload(ahc, scb_data->sense_dmat,
scb_data->sense_dmamap);
+ /* fall through */
case 5:
ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
scb_data->sense_dmamap);
ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
scb_data->sense_dmamap);
+ /* fall through */
case 4:
ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
+ /* fall through */
case 3:
ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
scb_data->hscb_dmamap);
+ /* fall through */
case 2:
ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
scb_data->hscb_dmamap);
ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
scb_data->hscb_dmamap);
+ /* fall through */
case 1:
ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
break;
@@ -6002,8 +6008,8 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in Waiting List\n");
ahc_done(ahc, scb);
- /* FALLTHROUGH */
}
+ /* fall through */
case SEARCH_REMOVE:
next = ahc_rem_wscb(ahc, next, prev);
break;
@@ -7008,8 +7014,8 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
}
address -= address_offset;
fmt3_ins->address = address;
- /* FALLTHROUGH */
}
+ /* fall through */
case AIC_OP_OR:
case AIC_OP_AND:
case AIC_OP_XOR:
@@ -7035,7 +7041,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
fmt1_ins->opcode = AIC_OP_AND;
fmt1_ins->immediate = 0xff;
}
- /* FALLTHROUGH */
+ /* fall through */
case AIC_OP_ROL:
if ((ahc->features & AHC_ULTRA2) != 0) {
int i, count;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 3c9c17450bb3..d5c4a0d23706 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -860,8 +860,8 @@ int
ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
int flags, bus_dmamap_t *mapp)
{
- *vaddr = pci_alloc_consistent(ahc->dev_softc,
- dmat->maxsize, mapp);
+ /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */
+ *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC);
if (*vaddr == NULL)
return ENOMEM;
return 0;
@@ -871,8 +871,7 @@ void
ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
void* vaddr, bus_dmamap_t map)
{
- pci_free_consistent(ahc->dev_softc, dmat->maxsize,
- vaddr, map);
+ dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
}
int
@@ -1123,8 +1122,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
host->transportt = ahc_linux_transport_template;
- retval = scsi_add_host(host,
- (ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
+ retval = scsi_add_host(host, ahc->dev);
if (retval) {
printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
scsi_host_put(host);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0fc14dac7070..717d8d1082ce 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
ahc->dev_softc = pci;
+ ahc->dev = &pci->dev;
error = ahc_pci_config(ahc, entry);
if (error != 0) {
ahc_free(ahc);
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 1267200380f8..446a789cdaf5 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -194,12 +194,11 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12);
((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13);
((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14);
- if (dev->id[c][target_id].last_len != adrcnt)
- {
- k = dev->id[c][target_id].last_len;
+ if (dev->id[c][target_id].last_len != adrcnt) {
+ k = dev->id[c][target_id].last_len;
k -= adrcnt;
dev->id[c][target_id].tran_len = k;
- dev->id[c][target_id].last_len = adrcnt;
+ dev->id[c][target_id].last_len = adrcnt;
}
#ifdef ED_DBGP
printk("dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len);
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 0a6972ee94d7..ea042c1fc70d 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -963,7 +963,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
* @ctrl: ptr to ctrl_info
* @cq: Completion Queue
* @dq: Default Queue
- * @lenght: ring size
+ * @length: ring size
* @entry_size: size of each entry in DEFQ
* @is_header: Header or Data DEFQ
* @ulp_num: Bind to which ULP
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 96b96e2ab91a..ed1bd369baa0 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -679,6 +679,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
if (conn->max_xmit_dlength > 65536)
conn->max_xmit_dlength = 65536;
+ /* fall through */
default:
return 0;
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 76e49d902609..0760d0bd8a10 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1532,6 +1532,7 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
error = 1;
+ /* fall through */
case UNSOL_DATA_NOTIFY:
pasync_handle = pasync_ctx->async_entry[ci].data;
break;
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 0e119d838e1b..762cb77253b9 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -62,8 +62,7 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
writel((__bfa)->iocfc.req_cq_pi[__reqq], \
(__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \
- mmiowb(); \
- } while (0)
+ } while (0)
#define bfa_rspq_pi(__bfa, __rspq) \
(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 2c85f5b1f9c1..7e996bcf026c 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -2586,6 +2586,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
case FCP_IODIR_RW:
bfa_stats(itnim, input_reqs);
bfa_stats(itnim, output_reqs);
+ /* fall through */
default:
bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
}
@@ -2820,6 +2821,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
case BFI_IOIM_STS_TIMEDOUT:
bfa_stats(ioim->itnim, iocomp_timedout);
+ /* fall through */
case BFI_IOIM_STS_ABORTED:
rsp->io_status = BFI_IOIM_STS_ABORTED;
bfa_stats(ioim->itnim, iocomp_aborted);
@@ -3215,9 +3217,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
switch (event) {
case BFA_TSKIM_SM_DONE:
bfa_reqq_wcancel(&tskim->reqq_wait);
- /*
- * Fall through !!!
- */
+ /* fall through */
case BFA_TSKIM_SM_QRESUME:
bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
bfa_tskim_send_abort(tskim);
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index c4a0c0eb88a5..4a0d881b2602 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -61,7 +61,6 @@ bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
- mmiowb();
}
void
@@ -72,7 +71,6 @@ bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
- mmiowb();
}
void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index b0ff378dece2..b7be5f4f02a5 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -81,7 +81,6 @@ bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
- mmiowb();
}
/*
@@ -94,7 +93,6 @@ bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
{
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
- mmiowb();
}
void
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 039328d9ef13..19734ec7f42e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -991,7 +991,6 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
FCOE_CQE_TOGGLE_BIT_SHIFT);
msg = *((u32 *)rx_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
- mmiowb();
}
@@ -1409,7 +1408,6 @@ void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
(tgt->sq_curr_toggle_bit << 15);
msg = *((u32 *)sq_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
- mmiowb();
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index bc9f2a2365f4..8def63c0755f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1083,7 +1083,6 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
{
struct bnx2fc_rport *tgt = io_req->tgt;
- int rc = SUCCESS;
unsigned int time_left;
io_req->wait_for_comp = 1;
@@ -1110,7 +1109,7 @@ static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_lock_bh(&tgt->tgt_lock);
- return rc;
+ return SUCCESS;
}
/**
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index fae6f71e677d..12666313b937 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -253,7 +253,6 @@ void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
writew(ep->qp.rq_prod_idx,
ep->qp.ctx_base + CNIC_RECV_DOORBELL);
}
- mmiowb();
}
@@ -279,8 +278,6 @@ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
bnx2i_ring_577xx_doorbell(bnx2i_conn);
} else
writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
-
- mmiowb(); /* flush posted PCI writes */
}
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
index 7c8814715711..b2540402fafc 100644
--- a/drivers/scsi/csiostor/csio_isr.c
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -474,13 +474,39 @@ csio_reduce_sqsets(struct csio_hw *hw, int cnt)
csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
}
+static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs)
+{
+ struct csio_hw *hw = affd->priv;
+ u8 i;
+
+ if (!nvecs)
+ return;
+
+ if (nvecs < hw->num_pports) {
+ affd->nr_sets = 1;
+ affd->set_size[0] = nvecs;
+ return;
+ }
+
+ affd->nr_sets = hw->num_pports;
+ for (i = 0; i < hw->num_pports; i++)
+ affd->set_size[i] = nvecs / hw->num_pports;
+}
+
static int
csio_enable_msix(struct csio_hw *hw)
{
int i, j, k, n, min, cnt;
int extra = CSIO_EXTRA_VECS;
struct csio_scsi_cpu_info *info;
- struct irq_affinity desc = { .pre_vectors = 2 };
+ struct irq_affinity desc = {
+ .pre_vectors = CSIO_EXTRA_VECS,
+ .calc_sets = csio_calc_sets,
+ .priv = hw,
+ };
+
+ if (hw->num_pports > IRQ_AFFINITY_MAX_SETS)
+ return -ENOSPC;
min = hw->num_pports + extra;
cnt = hw->num_sqsets + extra;
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 462560b2855e..469d0bc9f5fe 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
}
out:
- if (req->nsge > 0)
+ if (req->nsge > 0) {
scsi_dma_unmap(cmnd);
+ if (req->dcopy && (host_status == DID_OK))
+ host_status = csio_scsi_copy_to_sgl(hw, req);
+ }
cmnd->result = (((host_status) << 16) | scsi_status);
cmnd->scsi_done(cmnd);
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 66bbd21819ae..03bd896cdbb9 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -808,6 +808,7 @@ csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
csio_q_eqid(hw, i) = CSIO_MAX_QID;
}
+ /* fall through */
case CSIO_INGRESS:
if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
csio_wr_cleanup_iq_ftr(hw, i);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 75e1273a44b3..b8dd9e648dd0 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -979,14 +979,17 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
if (csk->atid < 0) {
pr_err("NO atid available.\n");
- goto rel_resource;
+ return -EINVAL;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
- if (!skb)
- goto rel_resource;
+ if (!skb) {
+ cxgb3_free_atid(t3dev, csk->atid);
+ cxgbi_sock_put(csk);
+ return -ENOMEM;
+ }
skb->sk = (struct sock *)csk;
set_arp_failure_handler(skb, act_open_arp_failure);
csk->snd_win = cxgb3i_snd_win;
@@ -1007,11 +1010,6 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
return 0;
-
-rel_resource:
- if (skb)
- __kfree_skb(skb);
- return -EINVAL;
}
cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index d44914e5e415..124f3345420f 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -60,7 +60,7 @@ MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
#define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
static int cxgb4i_rcv_win = -1;
module_param(cxgb4i_rcv_win, int, 0644);
-MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
+MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes");
#define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
static int cxgb4i_snd_win = -1;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 006372b3fba2..8b915d4ed98d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -282,7 +282,6 @@ struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
}
EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
-#if IS_ENABLED(CONFIG_IPV6)
static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
int *port)
{
@@ -315,7 +314,6 @@ static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
ndev, ndev->name);
return NULL;
}
-#endif
void cxgbi_hbas_remove(struct cxgbi_device *cdev)
{
@@ -653,6 +651,8 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
}
cdev = cxgbi_device_find_by_netdev(ndev, &port);
+ if (!cdev)
+ cdev = cxgbi_device_find_by_mac(ndev, &port);
if (!cdev) {
pr_info("dst %pI4, %s, NOT cxgbi device.\n",
&daddr->sin_addr.s_addr, ndev->name);
@@ -2310,7 +2310,6 @@ int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
{
struct cxgbi_endpoint *cep = ep->dd_data;
struct cxgbi_sock *csk;
- int len;
log_debug(1 << CXGBI_DBG_ISCSI,
"cls_conn 0x%p, param %d.\n", ep, param);
@@ -2328,9 +2327,9 @@ int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&csk->daddr, param, buf);
default:
- return -ENOSYS;
+ break;
}
- return len;
+ return -ENOSYS;
}
EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
@@ -2563,13 +2562,9 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
pr_info("shost 0x%p, priv NULL.\n", shost);
goto err_out;
}
-
- rtnl_lock();
- if (!vlan_uses_dev(hba->ndev))
- ifindex = hba->ndev->ifindex;
- rtnl_unlock();
}
+check_route:
if (dst_addr->sa_family == AF_INET) {
csk = cxgbi_check_route(dst_addr, ifindex);
#if IS_ENABLED(CONFIG_IPV6)
@@ -2590,6 +2585,13 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
if (!hba)
hba = csk->cdev->hbas[csk->port_id];
else if (hba != csk->cdev->hbas[csk->port_id]) {
+ if (ifindex != hba->ndev->ifindex) {
+ cxgbi_sock_put(csk);
+ cxgbi_sock_closed(csk);
+ ifindex = hba->ndev->ifindex;
+ goto check_route;
+ }
+
pr_info("Could not connect through requested host %u"
"hba 0x%p != 0x%p (%u).\n",
shost->host_no, hba,
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index abdc34affdf6..a3afd1478ab6 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -835,8 +835,8 @@ static void adpt_i2o_sys_shutdown(void)
adpt_hba *pHba, *pNext;
struct adpt_i2o_post_wait_data *p1, *old;
- printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
- printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
+ printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
+ printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
/* Delete all IOPs from the controller chain */
/* They should have already been released by the
* scsi-core
@@ -859,7 +859,7 @@ static void adpt_i2o_sys_shutdown(void)
// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
adpt_post_wait_queue = NULL;
- printk(KERN_INFO "Adaptec I2O controllers down.\n");
+ printk(KERN_INFO "Adaptec I2O controllers down.\n");
}
static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
@@ -3390,7 +3390,7 @@ static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
}
- return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
+ return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
}
@@ -3463,8 +3463,8 @@ static int adpt_i2o_enable_hba(adpt_hba* pHba)
static int adpt_i2o_systab_send(adpt_hba* pHba)
{
- u32 msg[12];
- int ret;
+ u32 msg[12];
+ int ret;
msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 465df475f753..76fd02ccbf49 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1031,7 +1031,7 @@ static int esp_check_spur_intr(struct esp *esp)
static void esp_schedule_reset(struct esp *esp)
{
- esp_log_reset("esp_schedule_reset() from %pf\n",
+ esp_log_reset("esp_schedule_reset() from %ps\n",
__builtin_return_address(0));
esp->flags |= ESP_FLAG_RESETTING;
esp_event(esp, ESP_EVENT_RESET);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index e7f1dd4f3b66..0ca9b4393770 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -3697,8 +3697,9 @@ static int ioc_general(void __user *arg, char *cmnd)
rval = 0;
out_free_buf:
- dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len, buf,
- paddr);
+ if (buf)
+ dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len,
+ buf, paddr);
return rval;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 9bfa9f12d81e..fc87994b5d73 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -170,6 +170,7 @@ struct hisi_sas_phy {
u32 code_violation_err_count;
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
+ int enable;
};
struct hisi_sas_port {
@@ -551,6 +552,8 @@ extern int hisi_sas_slave_configure(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
+extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no,
+ int enable);
extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 14bac4966c87..8a7feb8ed8d6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -10,7 +10,6 @@
*/
#include "hisi_sas.h"
-#include "../libsas/sas_internal.h"
#define DRV_NAME "hisi_sas"
#define DEV_IS_GONE(dev) \
@@ -171,7 +170,7 @@ void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
int phy_no;
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
- hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
}
EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
@@ -684,7 +683,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
id->initiator_bits = SAS_PROTOCOL_ALL;
id->target_bits = phy->identify.target_port_protocols;
} else if (phy->phy_type & PORT_TYPE_SATA) {
- /*Nothing*/
+ /* Nothing */
}
sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
@@ -755,7 +754,8 @@ static int hisi_sas_init_device(struct domain_device *device)
* STP target port
*/
local_phy = sas_get_local_phy(device);
- if (!scsi_is_sas_phy_local(local_phy)) {
+ if (!scsi_is_sas_phy_local(local_phy) &&
+ !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
unsigned long deadline = ata_deadline(jiffies, 20000);
struct sata_device *sata_dev = &device->sata_dev;
struct ata_host *ata_host = sata_dev->ata_host;
@@ -770,8 +770,7 @@ static int hisi_sas_init_device(struct domain_device *device)
}
sas_put_local_phy(local_phy);
if (rc) {
- dev_warn(dev, "SATA disk hardreset fail: 0x%x\n",
- rc);
+ dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
return rc;
}
@@ -976,6 +975,30 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
}
+/* Wrapper to ensure we track hisi_sas_phy.enable properly */
+void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *aphy = &phy->sas_phy;
+ struct sas_phy *sphy = aphy->phy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy->lock, flags);
+
+ if (enable) {
+ /* We may have been enabled already; if so, don't touch */
+ if (!phy->enable)
+ sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+ hisi_hba->hw->phy_start(hisi_hba, phy_no);
+ } else {
+ sphy->negotiated_linkrate = SAS_PHY_DISABLED;
+ hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+ }
+ phy->enable = enable;
+ spin_unlock_irqrestore(&phy->lock, flags);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
+
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
{
struct sas_ha_struct *sas_ha = sas_phy->ha;
@@ -1112,10 +1135,10 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
sas_phy->phy->maximum_linkrate = max;
sas_phy->phy->minimum_linkrate = min;
- hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
msleep(100);
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
- hisi_hba->hw->phy_start(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 1);
return 0;
}
@@ -1133,13 +1156,13 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_LINK_RESET:
- hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
msleep(100);
- hisi_hba->hw->phy_start(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 1);
break;
case PHY_FUNC_DISABLE:
- hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
break;
case PHY_FUNC_SET_LINK_RATE:
@@ -1264,8 +1287,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
/* no error, but return the number of bytes of
* underrun
*/
- dev_warn(dev, "abort tmf: task to dev %016llx "
- "resp: 0x%x sts 0x%x underrun\n",
+ dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
SAS_ADDR(device->sas_addr),
task->task_status.resp,
task->task_status.stat);
@@ -1280,10 +1302,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
break;
}
- dev_warn(dev, "abort tmf: task to dev "
- "%016llx resp: 0x%x status 0x%x\n",
- SAS_ADDR(device->sas_addr), task->task_status.resp,
- task->task_status.stat);
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_OPEN_REJECT) {
+ dev_warn(dev, "abort tmf: open reject failed\n");
+ res = -EIO;
+ } else {
+ dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
+ SAS_ADDR(device->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ }
sas_free_task(task);
task = NULL;
}
@@ -1427,9 +1455,9 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
sas_ha->notify_port_event(sas_phy,
PORTE_BROADCAST_RCVD);
}
- } else if (old_state & (1 << phy_no))
- /* PHY down but was up before */
+ } else {
hisi_sas_phy_down(hisi_hba, phy_no, 0);
+ }
}
}
@@ -1711,7 +1739,7 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
struct hisi_sas_tmf_task tmf_task;
- int rc = TMF_RESP_FUNC_FAILED;
+ int rc;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
@@ -1803,7 +1831,7 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
if (dev_is_sata(device)) {
rc = hisi_sas_softreset_ata_disk(device);
- if (rc)
+ if (rc == TMF_RESP_FUNC_FAILED)
return TMF_RESP_FUNC_FAILED;
}
@@ -2100,10 +2128,8 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
}
exit:
- dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
- "resp: 0x%x sts 0x%x\n",
- SAS_ADDR(device->sas_addr),
- task,
+ dev_dbg(dev, "internal task abort: task to dev %016llx task=%p resp: 0x%x sts 0x%x\n",
+ SAS_ADDR(device->sas_addr), task,
task->task_status.resp, /* 0 is complete, -1 is undelivered */
task->task_status.stat);
sas_free_task(task);
@@ -2172,16 +2198,18 @@ static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_phy *sphy = sas_phy->phy;
- struct sas_phy_data *d = sphy->hostdata;
+ unsigned long flags;
phy->phy_attached = 0;
phy->phy_type = 0;
phy->port = NULL;
- if (d->enable)
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->enable)
sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
else
sphy->negotiated_linkrate = SAS_PHY_DISABLED;
+ spin_unlock_irqrestore(&phy->lock, flags);
}
void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
@@ -2234,6 +2262,19 @@ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
}
EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
+int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+
+ if (reset_type != SCSI_ADAPTER_RESET)
+ return -EOPNOTSUPP;
+
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
+
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
@@ -2491,22 +2532,19 @@ int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
if (device_property_read_u32(dev, "ctrl-reset-reg",
&hisi_hba->ctrl_reset_reg)) {
- dev_err(dev,
- "could not get property ctrl-reset-reg\n");
+ dev_err(dev, "could not get property ctrl-reset-reg\n");
return -ENOENT;
}
if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
&hisi_hba->ctrl_reset_sts_reg)) {
- dev_err(dev,
- "could not get property ctrl-reset-sts-reg\n");
+ dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
return -ENOENT;
}
if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
&hisi_hba->ctrl_clock_ena_reg)) {
- dev_err(dev,
- "could not get property ctrl-clock-ena-reg\n");
+ dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
return -ENOENT;
}
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 293807443480..78fe7d344848 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -798,16 +798,11 @@ static void start_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
enable_phy_v1_hw(hisi_hba, phy_no);
}
-static void stop_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
-{
- disable_phy_v1_hw(hisi_hba, phy_no);
-}
-
static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
{
- stop_phy_v1_hw(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
msleep(100);
- start_phy_v1_hw(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 1);
}
static void start_phys_v1_hw(struct timer_list *t)
@@ -817,7 +812,7 @@ static void start_phys_v1_hw(struct timer_list *t)
for (i = 0; i < hisi_hba->n_phy; i++) {
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x12a);
- start_phy_v1_hw(hisi_hba, i);
+ hisi_sas_phy_enable(hisi_hba, i, 1);
}
}
@@ -1695,8 +1690,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
irq = platform_get_irq(pdev, idx);
if (!irq) {
- dev_err(dev,
- "irq init: fail map phy interrupt %d\n",
+ dev_err(dev, "irq init: fail map phy interrupt %d\n",
idx);
return -ENOENT;
}
@@ -1704,8 +1698,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
DRV_NAME " phy", phy);
if (rc) {
- dev_err(dev, "irq init: could not request "
- "phy interrupt %d, rc=%d\n",
+ dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
irq, rc);
return -ENOENT;
}
@@ -1742,8 +1735,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
- dev_err(dev,
- "irq init: could not request fatal interrupt %d, rc=%d\n",
+ dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
irq, rc);
return -ENOENT;
}
@@ -1823,6 +1815,7 @@ static struct scsi_host_template sht_v1_hw = {
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = host_attrs_v1_hw,
+ .host_reset = hisi_sas_host_reset,
};
static const struct hisi_sas_hw hisi_sas_v1_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 89160ab3efb0..d4650bed8274 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1546,14 +1546,14 @@ static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
u32 txid_auto;
- disable_phy_v2_hw(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
if (phy->identify.device_type == SAS_END_DEVICE) {
txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
txid_auto | TX_HARDRST_MSK);
}
msleep(100);
- start_phy_v2_hw(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 1);
}
static void phy_get_events_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1586,7 +1586,7 @@ static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
if (!sas_phy->phy->enabled)
continue;
- start_phy_v2_hw(hisi_hba, i);
+ hisi_sas_phy_enable(hisi_hba, i, 1);
}
}
@@ -2423,14 +2423,12 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
slot_err_v2_hw(hisi_hba, task, slot, 2);
if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
- "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
- "Error info: 0x%x 0x%x 0x%x 0x%x\n",
- slot->idx, task, sas_dev->device_id,
- complete_hdr->dw0, complete_hdr->dw1,
- complete_hdr->act, complete_hdr->dw3,
- error_info[0], error_info[1],
- error_info[2], error_info[3]);
+ dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task, sas_dev->device_id,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
if (unlikely(slot->abort))
return ts->stat;
@@ -2502,7 +2500,7 @@ out:
spin_lock_irqsave(&device->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state)) {
spin_unlock_irqrestore(&device->done_lock, flags);
- dev_info(dev, "slot complete: task(%p) ignored\n ",
+ dev_info(dev, "slot complete: task(%p) ignored\n",
task);
return sts;
}
@@ -2935,7 +2933,7 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
dev_warn(dev, "phy%d identify timeout\n",
- phy_no);
+ phy_no);
hisi_sas_notify_phy_event(phy,
HISI_PHYE_LINK_RESET);
}
@@ -3036,7 +3034,7 @@ static const struct hisi_sas_hw_error axi_error[] = {
{ .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
{ .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
{ .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
- {},
+ {}
};
static const struct hisi_sas_hw_error fifo_error[] = {
@@ -3045,7 +3043,7 @@ static const struct hisi_sas_hw_error fifo_error[] = {
{ .msk = BIT(10), .msg = "GETDQE_FIFO" },
{ .msk = BIT(11), .msg = "CMDP_FIFO" },
{ .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
- {},
+ {}
};
static const struct hisi_sas_hw_error fatal_axi_errors[] = {
@@ -3109,12 +3107,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
if (!(err_value & sub->msk))
continue;
dev_err(dev, "%s (0x%x) found!\n",
- sub->msg, irq_value);
+ sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
dev_err(dev, "%s (0x%x) found!\n",
- axi_error->msg, irq_value);
+ axi_error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
}
@@ -3258,7 +3256,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
/* check ERR bit of Status Register */
if (fis->status & ATA_ERR) {
dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
- fis->status);
+ fis->status);
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
res = IRQ_NONE;
goto end;
@@ -3349,8 +3347,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
- dev_err(dev, "irq init: could not request "
- "phy interrupt %d, rc=%d\n",
+ dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
goto free_phy_int_irqs;
@@ -3364,8 +3361,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
DRV_NAME " sata", phy);
if (rc) {
- dev_err(dev, "irq init: could not request "
- "sata interrupt %d, rc=%d\n",
+ dev_err(dev, "irq init: could not request sata interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
goto free_sata_int_irqs;
@@ -3377,8 +3373,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
- dev_err(dev,
- "irq init: could not request fatal interrupt %d, rc=%d\n",
+ dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
goto free_fatal_int_irqs;
@@ -3393,8 +3388,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
DRV_NAME " cq", cq);
if (rc) {
- dev_err(dev,
- "irq init: could not request cq interrupt %d, rc=%d\n",
+ dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
goto free_cq_int_irqs;
@@ -3546,7 +3540,7 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
break;
default:
dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
- reg_type);
+ reg_type);
return -EINVAL;
}
@@ -3599,6 +3593,7 @@ static struct scsi_host_template sht_v2_hw = {
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = host_attrs_v2_hw,
+ .host_reset = hisi_sas_host_reset,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 086695a4099f..49620c2411df 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -52,7 +52,36 @@
#define CFG_ABT_SET_IPTT_DONE 0xd8
#define CFG_ABT_SET_IPTT_DONE_OFF 0
#define HGC_IOMB_PROC1_STATUS 0x104
+#define HGC_LM_DFX_STATUS2 0x128
+#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
+#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
+#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
+#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
+#define HGC_CQE_ECC_ADDR 0x13c
+#define HGC_CQE_ECC_1B_ADDR_OFF 0
+#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
+#define HGC_CQE_ECC_MB_ADDR_OFF 8
+#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
+#define HGC_IOST_ECC_ADDR 0x140
+#define HGC_IOST_ECC_1B_ADDR_OFF 0
+#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
+#define HGC_IOST_ECC_MB_ADDR_OFF 16
+#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
+#define HGC_DQE_ECC_ADDR 0x144
+#define HGC_DQE_ECC_1B_ADDR_OFF 0
+#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
+#define HGC_DQE_ECC_MB_ADDR_OFF 16
+#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
#define CHNL_INT_STATUS 0x148
+#define HGC_ITCT_ECC_ADDR 0x150
+#define HGC_ITCT_ECC_1B_ADDR_OFF 0
+#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_1B_ADDR_OFF)
+#define HGC_ITCT_ECC_MB_ADDR_OFF 16
+#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_MB_ADDR_OFF)
#define HGC_AXI_FIFO_ERR_INFO 0x154
#define AXI_ERR_INFO_OFF 0
#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
@@ -81,6 +110,10 @@
#define ENT_INT_SRC3_ITC_INT_OFF 15
#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
#define ENT_INT_SRC3_ABT_OFF 16
+#define ENT_INT_SRC3_DQE_POISON_OFF 18
+#define ENT_INT_SRC3_IOST_POISON_OFF 19
+#define ENT_INT_SRC3_ITCT_POISON_OFF 20
+#define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21
#define ENT_INT_SRC_MSK1 0x1c4
#define ENT_INT_SRC_MSK2 0x1c8
#define ENT_INT_SRC_MSK3 0x1cc
@@ -90,6 +123,28 @@
#define HGC_COM_INT_MSK 0x1d8
#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
#define SAS_ECC_INTR 0x1e8
+#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
+#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
+#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
+#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
+#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4
+#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5
+#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6
+#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7
+#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8
+#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9
+#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
+#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19
+#define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20
+#define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21
#define SAS_ECC_INTR_MSK 0x1ec
#define HGC_ERR_STAT_EN 0x238
#define CQE_SEND_CNT 0x248
@@ -105,6 +160,20 @@
#define COMPL_Q_0_DEPTH 0x4e8
#define COMPL_Q_0_WR_PTR 0x4ec
#define COMPL_Q_0_RD_PTR 0x4f0
+#define HGC_RXM_DFX_STATUS14 0xae8
+#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
+#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM0_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
+#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM1_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
+#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM2_OFF)
+#define HGC_RXM_DFX_STATUS15 0xaec
+#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
+#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS15_MEM3_OFF)
#define AWQOS_AWCACHE_CFG 0xc84
#define ARQOS_ARCACHE_CFG 0xc88
#define HILINK_ERR_DFX 0xe04
@@ -172,14 +241,18 @@
#define CHL_INT0_PHY_RDY_OFF 5
#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
#define CHL_INT1 (PORT_BASE + 0x1b8)
-#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
-#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
-#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
-#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15
+#define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16
+#define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17
+#define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18
#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
+#define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23
+#define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24
+#define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26
+#define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27
#define CHL_INT2 (PORT_BASE + 0x1bc)
#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
#define CHL_INT2_RX_DISP_ERR_OFF 28
@@ -227,10 +300,8 @@
#define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
#define AXI_CFG (0x5100)
#define AM_ROB_ECC_ERR_ADDR (0x510c)
-#define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF 0
-#define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF)
-#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
-#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
+#define AM_ROB_ECC_ERR_ADDR_OFF 0
+#define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff
/* RAS registers need init */
#define RAS_BASE (0x6000)
@@ -408,6 +479,10 @@ struct hisi_sas_err_record_v3 {
#define BASE_VECTORS_V3_HW 16
#define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
+enum {
+ DSM_FUNC_ERR_HANDLE_MSI = 0,
+};
+
static bool hisi_sas_intr_conv;
MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
@@ -474,7 +549,6 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
{
- struct pci_dev *pdev = hisi_hba->pci_dev;
int i;
/* Global registers init */
@@ -494,14 +568,11 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
- if (pdev->revision >= 0x21)
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7aff);
- else
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
- hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555);
hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
for (i = 0; i < hisi_hba->queue_count; i++)
@@ -532,12 +603,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- if (pdev->revision >= 0x21)
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
- 0xffffffff);
- else
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
- 0xff87ffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -804,6 +870,8 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ union acpi_object *obj;
+ guid_t guid;
int rc;
rc = reset_hw_v3_hw(hisi_hba);
@@ -815,6 +883,19 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
msleep(100);
init_reg_v3_hw(hisi_hba);
+ if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) {
+ dev_err(dev, "Parse GUID failed\n");
+ return -EINVAL;
+ }
+
+ /* Switch over to MSI handling , from PCI AER default */
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
+ DSM_FUNC_ERR_HANDLE_MSI, NULL);
+ if (!obj)
+ dev_warn(dev, "Switch over to MSI handling failed\n");
+ else
+ ACPI_FREE(obj);
+
return 0;
}
@@ -856,14 +937,14 @@ static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
u32 txid_auto;
- disable_phy_v3_hw(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
if (phy->identify.device_type == SAS_END_DEVICE) {
txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
txid_auto | TX_HARDRST_MSK);
}
msleep(100);
- start_phy_v3_hw(hisi_hba, phy_no);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 1);
}
static enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
@@ -882,7 +963,7 @@ static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
if (!sas_phy->phy->enabled)
continue;
- start_phy_v3_hw(hisi_hba, i);
+ hisi_sas_phy_enable(hisi_hba, i, 1);
}
}
@@ -929,7 +1010,7 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
DLVRY_Q_0_RD_PTR + (queue * 0x14));
if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
dev_warn(dev, "full queue=%d r=%d w=%d\n",
- queue, r, w);
+ queue, r, w);
return -EAGAIN;
}
@@ -1380,6 +1461,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_initial_fis *initial_fis;
struct dev_to_host_fis *fis;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
+ struct Scsi_Host *shost = hisi_hba->shost;
dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
initial_fis = &hisi_hba->initial_fis[phy_no];
@@ -1396,6 +1478,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
sas_phy->oob_mode = SATA_OOB_MODE;
attached_sas_addr[0] = 0x50;
+ attached_sas_addr[6] = shost->host_no;
attached_sas_addr[7] = phy_no;
memcpy(sas_phy->attached_sas_addr,
attached_sas_addr,
@@ -1540,6 +1623,14 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
static const struct hisi_sas_hw_error port_axi_error[] = {
{
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF),
+ .msg = "dmac_tx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF),
+ .msg = "dmac_rx_ecc_bad_err",
+ },
+ {
.irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
.msg = "dma_tx_axi_wr_err",
},
@@ -1555,6 +1646,22 @@ static const struct hisi_sas_hw_error port_axi_error[] = {
.irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
.msg = "dma_rx_axi_rd_err",
},
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF),
+ .msg = "dma_tx_fifo_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF),
+ .msg = "dma_rx_fifo_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF),
+ .msg = "dma_tx_axi_ruser_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF),
+ .msg = "dma_rx_axi_ruser_err",
+ },
};
static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1719,6 +1826,121 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
+static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
+ .msk = HGC_DQE_ECC_MB_ADDR_MSK,
+ .shift = HGC_DQE_ECC_MB_ADDR_OFF,
+ .msg = "hgc_dqe_eccbad_intr found: ram addr is 0x%08X\n",
+ .reg = HGC_DQE_ECC_ADDR,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
+ .msk = HGC_IOST_ECC_MB_ADDR_MSK,
+ .shift = HGC_IOST_ECC_MB_ADDR_OFF,
+ .msg = "hgc_iost_eccbad_intr found: ram addr is 0x%08X\n",
+ .reg = HGC_IOST_ECC_ADDR,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
+ .msk = HGC_ITCT_ECC_MB_ADDR_MSK,
+ .shift = HGC_ITCT_ECC_MB_ADDR_OFF,
+ .msg = "hgc_itct_eccbad_intr found: ram addr is 0x%08X\n",
+ .reg = HGC_ITCT_ECC_ADDR,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
+ .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
+ .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
+ .msg = "hgc_iostl_eccbad_intr found: mem addr is 0x%08X\n",
+ .reg = HGC_LM_DFX_STATUS2,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
+ .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
+ .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
+ .msg = "hgc_itctl_eccbad_intr found: mem addr is 0x%08X\n",
+ .reg = HGC_LM_DFX_STATUS2,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
+ .msk = HGC_CQE_ECC_MB_ADDR_MSK,
+ .shift = HGC_CQE_ECC_MB_ADDR_OFF,
+ .msg = "hgc_cqe_eccbad_intr found: ram address is 0x%08X\n",
+ .reg = HGC_CQE_ECC_ADDR,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
+ .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
+ .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
+ .msg = "rxm_mem0_eccbad_intr found: mem addr is 0x%08X\n",
+ .reg = HGC_RXM_DFX_STATUS14,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
+ .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
+ .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
+ .msg = "rxm_mem1_eccbad_intr found: mem addr is 0x%08X\n",
+ .reg = HGC_RXM_DFX_STATUS14,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
+ .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
+ .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
+ .msg = "rxm_mem2_eccbad_intr found: mem addr is 0x%08X\n",
+ .reg = HGC_RXM_DFX_STATUS14,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
+ .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
+ .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
+ .msg = "rxm_mem3_eccbad_intr found: mem addr is 0x%08X\n",
+ .reg = HGC_RXM_DFX_STATUS15,
+ },
+ {
+ .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF),
+ .msk = AM_ROB_ECC_ERR_ADDR_MSK,
+ .shift = AM_ROB_ECC_ERR_ADDR_OFF,
+ .msg = "ooo_ram_eccbad_intr found: ROB_ECC_ERR_ADDR=0x%08X\n",
+ .reg = AM_ROB_ECC_ERR_ADDR,
+ },
+};
+
+static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba,
+ u32 irq_value)
+{
+ struct device *dev = hisi_hba->dev;
+ const struct hisi_sas_hw_error *ecc_error;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) {
+ ecc_error = &multi_bit_ecc_errors[i];
+ if (irq_value & ecc_error->irq_msk) {
+ val = hisi_sas_read32(hisi_hba, ecc_error->reg);
+ val &= ecc_error->msk;
+ val >>= ecc_error->shift;
+ dev_err(dev, ecc_error->msg, irq_value, val);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ }
+}
+
+static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba)
+{
+ u32 irq_value, irq_msk;
+
+ irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
+ if (irq_value)
+ multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value);
+
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
+}
+
static const struct hisi_sas_hw_error axi_error[] = {
{ .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
{ .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
@@ -1728,7 +1950,7 @@ static const struct hisi_sas_hw_error axi_error[] = {
{ .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
{ .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
{ .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
- {},
+ {}
};
static const struct hisi_sas_hw_error fifo_error[] = {
@@ -1737,7 +1959,7 @@ static const struct hisi_sas_hw_error fifo_error[] = {
{ .msk = BIT(10), .msg = "GETDQE_FIFO" },
{ .msk = BIT(11), .msg = "CMDP_FIFO" },
{ .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
- {},
+ {}
};
static const struct hisi_sas_hw_error fatal_axi_error[] = {
@@ -1771,6 +1993,23 @@ static const struct hisi_sas_hw_error fatal_axi_error[] = {
.irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
.msg = "SAS_HGC_ABT fetch LM list",
},
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF),
+ .msg = "read dqe poison",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF),
+ .msg = "read iost poison",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF),
+ .msg = "read itct poison",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF),
+ .msg = "read itct ncq poison",
+ },
+
};
static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
@@ -1823,6 +2062,8 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
}
}
+ fatal_ecc_int_v3_hw(hisi_hba);
+
if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
u32 dev_id = reg_val & ITCT_DEV_MSK;
@@ -1966,13 +2207,11 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
slot_err_v3_hw(hisi_hba, task, slot);
if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
- "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
- "Error info: 0x%x 0x%x 0x%x 0x%x\n",
- slot->idx, task, sas_dev->device_id,
- dw0, dw1, complete_hdr->act, dw3,
- error_info[0], error_info[1],
- error_info[2], error_info[3]);
+ dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task, sas_dev->device_id,
+ dw0, dw1, complete_hdr->act, dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -2205,8 +2444,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
cq_interrupt_v3_hw, irqflags,
DRV_NAME " cq", cq);
if (rc) {
- dev_err(dev,
- "could not request cq%d interrupt, rc=%d\n",
+ dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
i, rc);
rc = -ENOENT;
goto free_cq_irqs;
@@ -2362,7 +2600,7 @@ static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
break;
default:
dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
- reg_type);
+ reg_type);
return -EINVAL;
}
@@ -2678,6 +2916,7 @@ static struct scsi_host_template sht_v3_hw = {
.ioctl = sas_ioctl,
.shost_attrs = host_attrs_v3_hw,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .host_reset = hisi_sas_host_reset,
};
static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -2800,7 +3039,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hisi_hba->regs = pcim_iomap(pdev, 5, 0);
if (!hisi_hba->regs) {
- dev_err(dev, "cannot map register.\n");
+ dev_err(dev, "cannot map register\n");
rc = -ENOMEM;
goto err_out_ha;
}
@@ -2921,161 +3160,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
scsi_host_put(shost);
}
-static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
- { .irq_msk = BIT(19), .msg = "HILINK_INT" },
- { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
- { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
- { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
- { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
- { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
- { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
- { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
- { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
- { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
- { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
- { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
- { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
-};
-
-static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
- { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
- { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
- { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
- { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
- { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
- { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
- { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
- { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
- { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
- { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
- { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
- { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
- { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
- { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
- { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
- { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
- { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
- { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
- { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
- { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
- { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
- { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
- { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
- { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
- { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
- { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
- { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
- { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
- { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
- { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
- { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
-};
-
-static const struct hisi_sas_hw_error sas_ras_intr2_nfe[] = {
- { .irq_msk = BIT(0), .msg = "DMAC0_AXI_BUS_ERR" },
- { .irq_msk = BIT(1), .msg = "DMAC1_AXI_BUS_ERR" },
- { .irq_msk = BIT(2), .msg = "DMAC2_AXI_BUS_ERR" },
- { .irq_msk = BIT(3), .msg = "DMAC3_AXI_BUS_ERR" },
- { .irq_msk = BIT(4), .msg = "DMAC4_AXI_BUS_ERR" },
- { .irq_msk = BIT(5), .msg = "DMAC5_AXI_BUS_ERR" },
- { .irq_msk = BIT(6), .msg = "DMAC6_AXI_BUS_ERR" },
- { .irq_msk = BIT(7), .msg = "DMAC7_AXI_BUS_ERR" },
- { .irq_msk = BIT(8), .msg = "DMAC0_FIFO_OMIT_ERR" },
- { .irq_msk = BIT(9), .msg = "DMAC1_FIFO_OMIT_ERR" },
- { .irq_msk = BIT(10), .msg = "DMAC2_FIFO_OMIT_ERR" },
- { .irq_msk = BIT(11), .msg = "DMAC3_FIFO_OMIT_ERR" },
- { .irq_msk = BIT(12), .msg = "DMAC4_FIFO_OMIT_ERR" },
- { .irq_msk = BIT(13), .msg = "DMAC5_FIFO_OMIT_ERR" },
- { .irq_msk = BIT(14), .msg = "DMAC6_FIFO_OMIT_ERR" },
- { .irq_msk = BIT(15), .msg = "DMAC7_FIFO_OMIT_ERR" },
- { .irq_msk = BIT(16), .msg = "HGC_RLSE_SLOT_UNMATCH" },
- { .irq_msk = BIT(17), .msg = "HGC_LM_ADD_FCH_LIST_ERR" },
- { .irq_msk = BIT(18), .msg = "HGC_AXI_BUS_ERR" },
- { .irq_msk = BIT(19), .msg = "HGC_FIFO_OMIT_ERR" },
-};
-
-static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
-{
- struct device *dev = hisi_hba->dev;
- const struct hisi_sas_hw_error *ras_error;
- bool need_reset = false;
- u32 irq_value;
- int i;
-
- irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
- for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
- ras_error = &sas_ras_intr0_nfe[i];
- if (ras_error->irq_msk & irq_value) {
- dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
- ras_error->msg, irq_value);
- need_reset = true;
- }
- }
- hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
-
- irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
- for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
- ras_error = &sas_ras_intr1_nfe[i];
- if (ras_error->irq_msk & irq_value) {
- dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
- ras_error->msg, irq_value);
- need_reset = true;
- }
- }
- hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
-
- irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR2);
- for (i = 0; i < ARRAY_SIZE(sas_ras_intr2_nfe); i++) {
- ras_error = &sas_ras_intr2_nfe[i];
- if (ras_error->irq_msk & irq_value) {
- dev_warn(dev, "SAS_RAS_INTR2: %s(irq_value=0x%x) found.\n",
- ras_error->msg, irq_value);
- need_reset = true;
- }
- }
- hisi_sas_write32(hisi_hba, SAS_RAS_INTR2, irq_value);
-
- return need_reset;
-}
-
-static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct sas_ha_struct *sha = pci_get_drvdata(pdev);
- struct hisi_hba *hisi_hba = sha->lldd_ha;
- struct device *dev = hisi_hba->dev;
-
- dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (process_non_fatal_error_v3_hw(hisi_hba))
- return PCI_ERS_RESULT_NEED_RESET;
-
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
-{
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
-{
- struct sas_ha_struct *sha = pci_get_drvdata(pdev);
- struct hisi_hba *hisi_hba = sha->lldd_ha;
- struct device *dev = hisi_hba->dev;
- HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
-
- dev_info(dev, "PCI error: slot reset callback!!\n");
- queue_work(hisi_hba->wq, &r.work);
- wait_for_completion(r.completion);
- if (r.done)
- return PCI_ERS_RESULT_RECOVERED;
-
- return PCI_ERS_RESULT_DISCONNECT;
-}
-
static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
@@ -3171,7 +3255,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
pci_power_t device_state = pdev->current_state;
dev_warn(dev, "resuming from operating state [D%d]\n",
- device_state);
+ device_state);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
@@ -3199,9 +3283,6 @@ static const struct pci_device_id sas_v3_pci_table[] = {
MODULE_DEVICE_TABLE(pci, sas_v3_pci_table);
static const struct pci_error_handlers hisi_sas_err_handler = {
- .error_detected = hisi_sas_error_detected_v3_hw,
- .mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
- .slot_reset = hisi_sas_slot_reset_v3_hw,
.reset_prepare = hisi_sas_reset_prepare_v3_hw,
.reset_done = hisi_sas_reset_done_v3_hw,
};
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index f044e7d10d63..1bef1da273c2 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.20-125"
+#define HPSA_DRIVER_VERSION "3.4.20-160"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -2647,9 +2647,20 @@ static void complete_scsi_command(struct CommandList *cp)
decode_sense_data(ei->SenseInfo, sense_data_size,
&sense_key, &asc, &ascq);
if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
- if (sense_key == ABORTED_COMMAND) {
+ switch (sense_key) {
+ case ABORTED_COMMAND:
cmd->result |= DID_SOFT_ERROR << 16;
break;
+ case UNIT_ATTENTION:
+ if (asc == 0x3F && ascq == 0x0E)
+ h->drv_req_rescan = 1;
+ break;
+ case ILLEGAL_REQUEST:
+ if (asc == 0x25 && ascq == 0x00) {
+ dev->removed = 1;
+ cmd->result = DID_NO_CONNECT << 16;
+ }
+ break;
}
break;
}
@@ -3956,14 +3967,18 @@ static int hpsa_update_device_info(struct ctlr_info *h,
memset(this_device->device_id, 0,
sizeof(this_device->device_id));
if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
- sizeof(this_device->device_id)) < 0)
+ sizeof(this_device->device_id)) < 0) {
dev_err(&h->pdev->dev,
- "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
+ "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
h->ctlr, __func__,
h->scsi_host->host_no,
- this_device->target, this_device->lun,
+ this_device->bus, this_device->target,
+ this_device->lun,
scsi_device_type(this_device->devtype),
this_device->model);
+ rc = HPSA_LV_FAILED;
+ goto bail_out;
+ }
if ((this_device->devtype == TYPE_DISK ||
this_device->devtype == TYPE_ZBC) &&
@@ -5809,7 +5824,7 @@ static int hpsa_send_test_unit_ready(struct ctlr_info *h,
/* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
(void) fill_cmd(c, TEST_UNIT_READY, h,
NULL, 0, 0, lunaddr, TYPE_CMD);
- rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
if (rc)
return rc;
/* no unmap needed here because no data xfer. */
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index cea7f502e8ca..64ae418d29f3 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -796,21 +796,21 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
return 1; /* wait until imm_wakeup claims parport */
- /* Phase 1 - Connected */
- case 1:
+
+ case 1: /* Phase 1 - Connected */
imm_connect(dev, CONNECT_EPP_MAYBE);
cmd->SCp.phase++;
+ /* fall through */
- /* Phase 2 - We are now talking to the scsi bus */
- case 2:
+ case 2: /* Phase 2 - We are now talking to the scsi bus */
if (!imm_select(dev, scmd_id(cmd))) {
imm_fail(dev, DID_NO_CONNECT);
return 0;
}
cmd->SCp.phase++;
+ /* fall through */
- /* Phase 3 - Ready to accept a command */
- case 3:
+ case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
if (!(r_str(ppb) & 0x80))
return 1;
@@ -818,9 +818,9 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (!imm_send_command(cmd))
return 0;
cmd->SCp.phase++;
+ /* fall through */
- /* Phase 4 - Setup scatter/gather buffers */
- case 4:
+ case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
cmd->SCp.buffer = scsi_sglist(cmd);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
@@ -834,8 +834,9 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
cmd->SCp.phase++;
if (cmd->SCp.this_residual & 0x01)
cmd->SCp.this_residual++;
- /* Phase 5 - Pre-Data transfer stage */
- case 5:
+ /* fall through */
+
+ case 5: /* Phase 5 - Pre-Data transfer stage */
/* Spin lock for BUSY */
w_ctr(ppb, 0x0c);
if (!(r_str(ppb) & 0x80))
@@ -850,9 +851,9 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (imm_negotiate(dev))
return 0;
cmd->SCp.phase++;
+ /* fall through */
- /* Phase 6 - Data transfer stage */
- case 6:
+ case 6: /* Phase 6 - Data transfer stage */
/* Spin lock for BUSY */
w_ctr(ppb, 0x0c);
if (!(r_str(ppb) & 0x80))
@@ -866,9 +867,9 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
return 1;
}
cmd->SCp.phase++;
+ /* fall through */
- /* Phase 7 - Post data transfer stage */
- case 7:
+ case 7: /* Phase 7 - Post data transfer stage */
if ((dev->dp) && (dev->rd)) {
if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) {
w_ctr(ppb, 0x4);
@@ -878,9 +879,9 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
}
}
cmd->SCp.phase++;
+ /* fall through */
- /* Phase 8 - Read status/message */
- case 8:
+ case 8: /* Phase 8 - Read status/message */
/* Check for data overrun */
if (imm_wait(dev) != (unsigned char) 0xb8) {
imm_fail(dev, DID_ERROR);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index dfba4921b265..5bf61431434b 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -2162,7 +2162,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
fc_rport_state(rdata));
- rdata->flags &= ~FC_RP_STARTED;
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 6f93fee2b21b..1ecca71df8b5 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -281,7 +281,7 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
&dev->sata_dev.rps_resp);
if (res) {
- pr_debug("report phy sata to %016llx:0x%x returned 0x%x\n",
+ pr_debug("report phy sata to %016llx:%02d returned 0x%x\n",
SAS_ADDR(dev->parent->sas_addr),
phy->phy_id, res);
return res;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 17b45a0c7bc3..83f2fd70ce76 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -826,9 +826,14 @@ static struct domain_device *sas_ex_discover_end_dev(
#ifdef CONFIG_SCSI_SAS_ATA
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
if (child->linkrate > parent->min_linkrate) {
+ struct sas_phy *cphy = child->phy;
+ enum sas_linkrate min_prate = cphy->minimum_linkrate,
+ parent_min_lrate = parent->min_linkrate,
+ min_linkrate = (min_prate > parent_min_lrate) ?
+ parent_min_lrate : 0;
struct sas_phy_linkrates rates = {
.maximum_linkrate = parent->min_linkrate,
- .minimum_linkrate = parent->min_linkrate,
+ .minimum_linkrate = min_linkrate,
};
int ret;
@@ -865,7 +870,7 @@ static struct domain_device *sas_ex_discover_end_dev(
res = sas_discover_sata(child);
if (res) {
- pr_notice("sas_discover_sata() for device %16llx at %016llx:0x%x returned 0x%x\n",
+ pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res);
goto out_list_del;
@@ -890,7 +895,7 @@ static struct domain_device *sas_ex_discover_end_dev(
res = sas_discover_end_dev(child);
if (res) {
- pr_notice("sas_discover_end_dev() for device %16llx at %016llx:0x%x returned 0x%x\n",
+ pr_notice("sas_discover_end_dev() for device %16llx at %016llx:%02d returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res);
goto out_list_del;
@@ -955,7 +960,7 @@ static struct domain_device *sas_ex_discover_expander(
int res;
if (phy->routing_attr == DIRECT_ROUTING) {
- pr_warn("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not allowed\n",
+ pr_warn("ex %016llx:%02d:D <--> ex %016llx:0x%x is not allowed\n",
SAS_ADDR(parent->sas_addr), phy_id,
SAS_ADDR(phy->attached_sas_addr),
phy->attached_phy_id);
@@ -1065,7 +1070,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
ex_phy->attached_dev_type != SAS_SATA_PENDING) {
- pr_warn("unknown device type(0x%x) attached to ex %016llx phy 0x%x\n",
+ pr_warn("unknown device type(0x%x) attached to ex %016llx phy%02d\n",
ex_phy->attached_dev_type,
SAS_ADDR(dev->sas_addr),
phy_id);
@@ -1081,7 +1086,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
}
if (sas_ex_join_wide_port(dev, phy_id)) {
- pr_debug("Attaching ex phy%d to wide port %016llx\n",
+ pr_debug("Attaching ex phy%02d to wide port %016llx\n",
phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
return res;
}
@@ -1093,7 +1098,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
break;
case SAS_FANOUT_EXPANDER_DEVICE:
if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
- pr_debug("second fanout expander %016llx phy 0x%x attached to ex %016llx phy 0x%x\n",
+ pr_debug("second fanout expander %016llx phy%02d attached to ex %016llx phy%02d\n",
SAS_ADDR(ex_phy->attached_sas_addr),
ex_phy->attached_phy_id,
SAS_ADDR(dev->sas_addr),
@@ -1126,7 +1131,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
SAS_ADDR(child->sas_addr)) {
ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
if (sas_ex_join_wide_port(dev, i))
- pr_debug("Attaching ex phy%d to wide port %016llx\n",
+ pr_debug("Attaching ex phy%02d to wide port %016llx\n",
i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
}
}
@@ -1151,7 +1156,7 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
phy->routing_attr == SUBTRACTIVE_ROUTING) {
- memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
+ memcpy(sub_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
return 1;
}
@@ -1163,7 +1168,7 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
{
struct expander_device *ex = &dev->ex_dev;
struct domain_device *child;
- u8 sub_addr[8] = {0, };
+ u8 sub_addr[SAS_ADDR_SIZE] = {0, };
list_for_each_entry(child, &ex->children, siblings) {
if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
@@ -1173,7 +1178,7 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
sas_find_sub_addr(child, sub_addr);
continue;
} else {
- u8 s2[8];
+ u8 s2[SAS_ADDR_SIZE];
if (sas_find_sub_addr(child, s2) &&
(SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
@@ -1261,7 +1266,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
else if (SAS_ADDR(sub_sas_addr) !=
SAS_ADDR(phy->attached_sas_addr)) {
- pr_notice("ex %016llx phy 0x%x diverges(%016llx) on subtractive boundary(%016llx). Disabled\n",
+ pr_notice("ex %016llx phy%02d diverges(%016llx) on subtractive boundary(%016llx). Disabled\n",
SAS_ADDR(dev->sas_addr), i,
SAS_ADDR(phy->attached_sas_addr),
SAS_ADDR(sub_sas_addr));
@@ -1282,7 +1287,7 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
};
struct domain_device *parent = child->parent;
- pr_notice("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x has %c:%c routing link!\n",
+ pr_notice("%s ex %016llx phy%02d <--> %s ex %016llx phy%02d has %c:%c routing link!\n",
ex_type[parent->dev_type],
SAS_ADDR(parent->sas_addr),
parent_phy->phy_id,
@@ -1304,7 +1309,7 @@ static int sas_check_eeds(struct domain_device *child,
if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
res = -ENODEV;
- pr_warn("edge ex %016llx phy S:0x%x <--> edge ex %016llx phy S:0x%x, while there is a fanout ex %016llx\n",
+ pr_warn("edge ex %016llx phy S:%02d <--> edge ex %016llx phy S:%02d, while there is a fanout ex %016llx\n",
SAS_ADDR(parent->sas_addr),
parent_phy->phy_id,
SAS_ADDR(child->sas_addr),
@@ -1327,7 +1332,7 @@ static int sas_check_eeds(struct domain_device *child,
;
else {
res = -ENODEV;
- pr_warn("edge ex %016llx phy 0x%x <--> edge ex %016llx phy 0x%x link forms a third EEDS!\n",
+ pr_warn("edge ex %016llx phy%02d <--> edge ex %016llx phy%02d link forms a third EEDS!\n",
SAS_ADDR(parent->sas_addr),
parent_phy->phy_id,
SAS_ADDR(child->sas_addr),
@@ -1445,11 +1450,11 @@ static int sas_configure_present(struct domain_device *dev, int phy_id,
goto out;
res = rri_resp[2];
if (res == SMP_RESP_NO_INDEX) {
- pr_warn("overflow of indexes: dev %016llx phy 0x%x index 0x%x\n",
+ pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n",
SAS_ADDR(dev->sas_addr), phy_id, i);
goto out;
} else if (res != SMP_RESP_FUNC_ACC) {
- pr_notice("%s: dev %016llx phy 0x%x index 0x%x result 0x%x\n",
+ pr_notice("%s: dev %016llx phy%02d index 0x%x result 0x%x\n",
__func__, SAS_ADDR(dev->sas_addr), phy_id,
i, res);
goto out;
@@ -1515,7 +1520,7 @@ static int sas_configure_set(struct domain_device *dev, int phy_id,
goto out;
res = cri_resp[2];
if (res == SMP_RESP_NO_INDEX) {
- pr_warn("overflow of indexes: dev %016llx phy 0x%x index 0x%x\n",
+ pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n",
SAS_ADDR(dev->sas_addr), phy_id, index);
}
out:
@@ -1760,10 +1765,11 @@ static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
res = sas_get_phy_discover(dev, phy_id, disc_resp);
if (res == 0) {
- memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8);
+ memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
+ SAS_ADDR_SIZE);
*type = to_dev_type(dr);
if (*type == 0)
- memset(sas_addr, 0, 8);
+ memset(sas_addr, 0, SAS_ADDR_SIZE);
}
kfree(disc_resp);
return res;
@@ -1870,10 +1876,12 @@ static int sas_find_bcast_dev(struct domain_device *dev,
if (phy_id != -1) {
*src_dev = dev;
ex->ex_change_count = ex_change_count;
- pr_info("Expander phy change count has changed\n");
+ pr_info("ex %016llx phy%02d change count has changed\n",
+ SAS_ADDR(dev->sas_addr), phy_id);
return res;
} else
- pr_info("Expander phys DID NOT change\n");
+ pr_info("ex %016llx phys DID NOT change\n",
+ SAS_ADDR(dev->sas_addr));
}
list_for_each_entry(ch, &ex->children, siblings) {
if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
@@ -1983,7 +1991,7 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
struct domain_device *child;
int res;
- pr_debug("ex %016llx phy%d new device attached\n",
+ pr_debug("ex %016llx phy%02d new device attached\n",
SAS_ADDR(dev->sas_addr), phy_id);
res = sas_ex_phy_discover(dev, phy_id);
if (res)
@@ -2022,15 +2030,23 @@ static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
return false;
}
-static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
+static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+ bool last, int sibling)
{
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
enum sas_device_type type = SAS_PHY_UNUSED;
- u8 sas_addr[8];
+ u8 sas_addr[SAS_ADDR_SIZE];
+ char msg[80] = "";
int res;
- memset(sas_addr, 0, 8);
+ if (!last)
+ sprintf(msg, ", part of a wide port with phy%02d", sibling);
+
+ pr_debug("ex %016llx rediscovering phy%02d%s\n",
+ SAS_ADDR(dev->sas_addr), phy_id, msg);
+
+ memset(sas_addr, 0, SAS_ADDR_SIZE);
res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
switch (res) {
case SMP_RESP_NO_PHY:
@@ -2052,6 +2068,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
phy->phy_state = PHY_EMPTY;
sas_unregister_devs_sas_addr(dev, phy_id, last);
+ /*
+ * Even though the PHY is empty, for convenience we discover
+ * the PHY to update the PHY info, like negotiated linkrate.
+ */
+ sas_ex_phy_discover(dev, phy_id);
return res;
} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
dev_type_flutter(type, phy->attached_dev_type)) {
@@ -2062,13 +2083,13 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
action = ", needs recovery";
- pr_debug("ex %016llx phy 0x%x broadcast flutter%s\n",
+ pr_debug("ex %016llx phy%02d broadcast flutter%s\n",
SAS_ADDR(dev->sas_addr), phy_id, action);
return res;
}
/* we always have to delete the old device when we went here */
- pr_info("ex %016llx phy 0x%x replace %016llx\n",
+ pr_info("ex %016llx phy%02d replace %016llx\n",
SAS_ADDR(dev->sas_addr), phy_id,
SAS_ADDR(phy->attached_sas_addr));
sas_unregister_devs_sas_addr(dev, phy_id, last);
@@ -2098,7 +2119,7 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
int i;
bool last = true; /* is this the last phy of the port */
- pr_debug("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
+ pr_debug("ex %016llx phy%02d originated BROADCAST(CHANGE)\n",
SAS_ADDR(dev->sas_addr), phy_id);
if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
@@ -2109,13 +2130,11 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
continue;
if (SAS_ADDR(phy->attached_sas_addr) ==
SAS_ADDR(changed_phy->attached_sas_addr)) {
- pr_debug("phy%d part of wide port with phy%d\n",
- phy_id, i);
last = false;
break;
}
}
- res = sas_rediscover_dev(dev, phy_id, last);
+ res = sas_rediscover_dev(dev, phy_id, last, i);
} else
res = sas_discover_new(dev, phy_id);
return res;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 221340ee8651..28a460c36c0b 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -87,25 +87,27 @@ EXPORT_SYMBOL_GPL(sas_free_task);
/*------------ SAS addr hash -----------*/
void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
{
- const u32 poly = 0x00DB2777;
- u32 r = 0;
- int i;
-
- for (i = 0; i < 8; i++) {
- int b;
- for (b = 7; b >= 0; b--) {
- r <<= 1;
- if ((1 << b) & sas_addr[i]) {
- if (!(r & 0x01000000))
- r ^= poly;
- } else if (r & 0x01000000)
- r ^= poly;
- }
- }
-
- hashed[0] = (r >> 16) & 0xFF;
- hashed[1] = (r >> 8) & 0xFF ;
- hashed[2] = r & 0xFF;
+ const u32 poly = 0x00DB2777;
+ u32 r = 0;
+ int i;
+
+ for (i = 0; i < SAS_ADDR_SIZE; i++) {
+ int b;
+
+ for (b = (SAS_ADDR_SIZE - 1); b >= 0; b--) {
+ r <<= 1;
+ if ((1 << b) & sas_addr[i]) {
+ if (!(r & 0x01000000))
+ r ^= poly;
+ } else if (r & 0x01000000) {
+ r ^= poly;
+ }
+ }
+ }
+
+ hashed[0] = (r >> 16) & 0xFF;
+ hashed[1] = (r >> 8) & 0xFF;
+ hashed[2] = r & 0xFF;
}
int sas_register_ha(struct sas_ha_struct *sas_ha)
@@ -623,7 +625,7 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
if (i->dft->lldd_control_phy) {
if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
- pr_notice("The phy%02d bursting events, shut it down.\n",
+ pr_notice("The phy%d bursting events, shut it down.\n",
phy->id);
sas_notify_phy_event(phy, PHYE_SHUTDOWN);
}
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 0374243c85d0..e030e1452136 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -122,11 +122,10 @@ static void sas_phye_shutdown(struct work_struct *work)
phy->enabled = 0;
ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
if (ret)
- pr_notice("lldd disable phy%02d returned %d\n",
- phy->id, ret);
+ pr_notice("lldd disable phy%d returned %d\n", phy->id,
+ ret);
} else
- pr_notice("phy%02d is not enabled, cannot shutdown\n",
- phy->id);
+ pr_notice("phy%d is not enabled, cannot shutdown\n", phy->id);
}
/* ---------- Phy class registration ---------- */
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 03fe479359b6..38a10478605c 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -95,6 +95,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
int i;
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
+ struct domain_device *port_dev;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
unsigned long flags;
@@ -153,8 +154,9 @@ static void sas_form_port(struct asd_sas_phy *phy)
}
/* add the phy to the port */
+ port_dev = port->port_dev;
list_add_tail(&phy->port_phy_el, &port->phy_list);
- sas_phy_set_target(phy, port->port_dev);
+ sas_phy_set_target(phy, port_dev);
phy->port = port;
port->num_phys++;
port->phy_mask |= (1U << phy->id);
@@ -184,14 +186,21 @@ static void sas_form_port(struct asd_sas_phy *phy)
port->phy_mask,
SAS_ADDR(port->attached_sas_addr));
- if (port->port_dev)
- port->port_dev->pathways = port->num_phys;
+ if (port_dev)
+ port_dev->pathways = port->num_phys;
/* Tell the LLDD about this port formation. */
if (si->dft->lldd_port_formed)
si->dft->lldd_port_formed(phy);
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
+ /* Only insert a revalidate event after initial discovery */
+ if (port_dev && sas_dev_type_is_expander(port_dev->dev_type)) {
+ struct expander_device *ex_dev = &port_dev->ex_dev;
+
+ ex_dev->ex_change_count = -1;
+ sas_discover_event(port, DISCE_REVALIDATE_DOMAIN);
+ }
flush_workqueue(sas_ha->disco_q);
}
@@ -254,6 +263,15 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
spin_unlock(&port->phy_list_lock);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
+ /* Only insert revalidate event if the port still has members */
+ if (port->port && dev && sas_dev_type_is_expander(dev->dev_type)) {
+ struct expander_device *ex_dev = &dev->ex_dev;
+
+ ex_dev->ex_change_count = -1;
+ sas_discover_event(port, DISCE_REVALIDATE_DOMAIN);
+ }
+ flush_workqueue(sas_ha->disco_q);
+
return;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 41d849f283f6..aafcffaa25f7 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -942,6 +942,7 @@ struct lpfc_hba {
int brd_no; /* FC board number */
char SerialNumber[32]; /* adapter Serial Number */
char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
+ char BIOSVersion[16]; /* Boot BIOS version */
char ModelDesc[256]; /* Model Description */
char ModelName[80]; /* Model Name */
char ProgramType[256]; /* Program Type */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ce3e541434dc..e9adb3f1961d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -71,6 +71,23 @@
#define LPFC_REG_WRITE_KEY_SIZE 4
#define LPFC_REG_WRITE_KEY "EMLX"
+const char *const trunk_errmsg[] = { /* map errcode */
+ "", /* There is no such error code at index 0*/
+ "link negotiated speed does not match existing"
+ " trunk - link was \"low\" speed",
+ "link negotiated speed does not match"
+ " existing trunk - link was \"middle\" speed",
+ "link negotiated speed does not match existing"
+ " trunk - link was \"high\" speed",
+ "Attached to non-trunking port - F_Port",
+ "Attached to non-trunking port - N_Port",
+ "FLOGI response timeout",
+ "non-FLOGI frame received",
+ "Invalid FLOGI response",
+ "Trunking initialization protocol",
+ "Trunk peer device mismatch",
+};
+
/**
* lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
* @incr: integer to convert.
@@ -114,7 +131,7 @@ static ssize_t
lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
+ return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
}
/**
@@ -134,9 +151,9 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
if (phba->hba_flag & HBA_FIP_SUPPORT)
- return snprintf(buf, PAGE_SIZE, "1\n");
+ return scnprintf(buf, PAGE_SIZE, "1\n");
else
- return snprintf(buf, PAGE_SIZE, "0\n");
+ return scnprintf(buf, PAGE_SIZE, "0\n");
}
static ssize_t
@@ -564,14 +581,15 @@ lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- if (phba->cfg_enable_bg)
+ if (phba->cfg_enable_bg) {
if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
- return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
+ return scnprintf(buf, PAGE_SIZE,
+ "BlockGuard Enabled\n");
else
- return snprintf(buf, PAGE_SIZE,
+ return scnprintf(buf, PAGE_SIZE,
"BlockGuard Not Supported\n");
- else
- return snprintf(buf, PAGE_SIZE,
+ } else
+ return scnprintf(buf, PAGE_SIZE,
"BlockGuard Disabled\n");
}
@@ -583,7 +601,7 @@ lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_guard_err_cnt);
}
@@ -595,7 +613,7 @@ lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_apptag_err_cnt);
}
@@ -607,7 +625,7 @@ lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_reftag_err_cnt);
}
@@ -625,7 +643,7 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *host = class_to_shost(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
+ return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
}
/**
@@ -644,7 +662,7 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
}
/**
@@ -666,7 +684,7 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
}
/**
@@ -685,7 +703,7 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
}
/**
@@ -704,7 +722,7 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
}
/**
@@ -723,7 +741,7 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
}
/**
@@ -741,7 +759,7 @@ lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
(phba->sli.sli_flag & LPFC_MENLO_MAINT));
}
@@ -761,7 +779,7 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
}
/**
@@ -789,10 +807,10 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
if (phba->sli_rev < LPFC_SLI_REV4)
- len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+ len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
fwrev, phba->sli_rev);
else
- len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+ len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
fwrev, phba->sli_rev, if_type, sli_family);
return len;
@@ -816,7 +834,7 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
lpfc_vpd_t *vp = &phba->vpd;
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
- return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
}
/**
@@ -837,10 +855,11 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
char fwrev[FW_REV_STR_SIZE];
if (phba->sli_rev < LPFC_SLI_REV4)
- return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ phba->OptionROMVersion);
lpfc_decode_firmware_rev(phba, fwrev, 1);
- return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
}
/**
@@ -871,20 +890,20 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
if (phba->hba_flag & LINK_DISABLED)
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Link Down - User disabled\n");
else
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Link Down\n");
break;
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
- len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
switch (vport->port_state) {
case LPFC_LOCAL_CFG_LINK:
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Configuring Link\n");
break;
case LPFC_FDISC:
@@ -894,38 +913,40 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
case LPFC_NS_QRY:
case LPFC_BUILD_DISC_LIST:
case LPFC_DISC_AUTH:
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Discovery\n");
break;
case LPFC_VPORT_READY:
- len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "Ready\n");
break;
case LPFC_VPORT_FAILED:
- len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "Failed\n");
break;
case LPFC_VPORT_UNKNOWN:
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Unknown\n");
break;
}
if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
" Menlo Maint Mode\n");
else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
else
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
if (vport->fc_flag & FC_FABRIC)
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
" Fabric\n");
else
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
" Point-2-Point\n");
}
}
@@ -937,28 +958,28 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
struct lpfc_trunk_link link = phba->trunk_link;
if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Trunk port 0: Link %s %s\n",
(link.link0.state == LPFC_LINK_UP) ?
"Up" : "Down. ",
trunk_errmsg[link.link0.fault]);
if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Trunk port 1: Link %s %s\n",
(link.link1.state == LPFC_LINK_UP) ?
"Up" : "Down. ",
trunk_errmsg[link.link1.fault]);
if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Trunk port 2: Link %s %s\n",
(link.link2.state == LPFC_LINK_UP) ?
"Up" : "Down. ",
trunk_errmsg[link.link2.fault]);
if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Trunk port 3: Link %s %s\n",
(link.link3.state == LPFC_LINK_UP) ?
"Up" : "Down. ",
@@ -986,15 +1007,15 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
if (phba->sli_rev < LPFC_SLI_REV4)
- return snprintf(buf, PAGE_SIZE, "fc\n");
+ return scnprintf(buf, PAGE_SIZE, "fc\n");
if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
- return snprintf(buf, PAGE_SIZE, "fcoe\n");
+ return scnprintf(buf, PAGE_SIZE, "fcoe\n");
if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
- return snprintf(buf, PAGE_SIZE, "fc\n");
+ return scnprintf(buf, PAGE_SIZE, "fc\n");
}
- return snprintf(buf, PAGE_SIZE, "unknown\n");
+ return scnprintf(buf, PAGE_SIZE, "unknown\n");
}
/**
@@ -1014,7 +1035,7 @@ lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
phba->sli4_hba.pc_sli4_params.oas_supported);
}
@@ -1072,7 +1093,7 @@ lpfc_num_discovered_ports_show(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
vport->fc_map_cnt + vport->fc_unmap_cnt);
}
@@ -1204,6 +1225,20 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
psli = &phba->sli;
+ /*
+ * If freeing the queues have already started, don't access them.
+ * Otherwise set FREE_WAIT to indicate that queues are being used
+ * to hold the freeing process until we finish.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
+ psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
+ } else {
+ spin_unlock_irq(&phba->hbalock);
+ goto skip_wait;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
/* Wait a little for things to settle down, but not
* long enough for dev loss timeout to expire.
*/
@@ -1225,6 +1260,11 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
}
}
out:
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
+ spin_unlock_irq(&phba->hbalock);
+
+skip_wait:
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
if (rc == 0)
@@ -1258,7 +1298,7 @@ out:
* -EBUSY, port is not in offline state
* 0, successful
*/
-int
+static int
lpfc_reset_pci_bus(struct lpfc_hba *phba)
{
struct pci_dev *pdev = phba->pcidev;
@@ -1586,10 +1626,10 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
}
-int
+static int
lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
{
LPFC_MBOXQ_t *mbox = NULL;
@@ -1675,7 +1715,7 @@ lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
else
state = "online";
- return snprintf(buf, PAGE_SIZE, "%s\n", state);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", state);
}
/**
@@ -1901,8 +1941,8 @@ lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt;
if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
- return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
- return snprintf(buf, PAGE_SIZE, "Unknown\n");
+ return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@@ -1929,8 +1969,8 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
- return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
- return snprintf(buf, PAGE_SIZE, "Unknown\n");
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@@ -1957,8 +1997,8 @@ lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt;
if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
- return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
- return snprintf(buf, PAGE_SIZE, "Unknown\n");
+ return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@@ -1985,8 +2025,8 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
- return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
- return snprintf(buf, PAGE_SIZE, "Unknown\n");
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@@ -2013,8 +2053,8 @@ lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt;
if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
- return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
- return snprintf(buf, PAGE_SIZE, "Unknown\n");
+ return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@@ -2041,8 +2081,8 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
- return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
- return snprintf(buf, PAGE_SIZE, "Unknown\n");
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@@ -2067,10 +2107,10 @@ lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
if (!(phba->max_vpi))
- return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
+ return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
if (vport->port_type == LPFC_PHYSICAL_PORT)
- return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
- return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
+ return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
+ return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
}
/**
@@ -2092,7 +2132,7 @@ lpfc_poll_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
+ return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
}
/**
@@ -2196,7 +2236,7 @@ lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
}
/**
@@ -2215,7 +2255,7 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
}
/**
@@ -2234,7 +2274,7 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+ return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
"" : "Not ");
@@ -2263,7 +2303,7 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
uint16_t max_nr_virtfn;
max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
}
static inline bool lpfc_rangecheck(uint val, uint min, uint max)
@@ -2323,7 +2363,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
- return snprintf(buf, PAGE_SIZE, "%d\n",\
+ return scnprintf(buf, PAGE_SIZE, "%d\n",\
phba->cfg_##attr);\
}
@@ -2351,7 +2391,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
struct lpfc_hba *phba = vport->phba;\
uint val = 0;\
val = phba->cfg_##attr;\
- return snprintf(buf, PAGE_SIZE, "%#x\n",\
+ return scnprintf(buf, PAGE_SIZE, "%#x\n",\
phba->cfg_##attr);\
}
@@ -2487,7 +2527,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
- return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
+ return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
/**
@@ -2512,7 +2552,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
- return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
+ return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
/**
@@ -2784,7 +2824,7 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwpn);
}
@@ -2881,7 +2921,7 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwnn);
}
@@ -2947,7 +2987,7 @@ lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
wwn_to_u64(phba->cfg_oas_tgt_wwpn));
}
@@ -3015,7 +3055,7 @@ lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
}
/**
@@ -3078,7 +3118,7 @@ lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
wwn_to_u64(phba->cfg_oas_vpt_wwpn));
}
@@ -3149,7 +3189,7 @@ lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
}
/**
@@ -3213,7 +3253,7 @@ lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
return -EFAULT;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
}
static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
lpfc_oas_lun_status_show, NULL);
@@ -3365,7 +3405,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
if (oas_lun != NOT_OAS_ENABLED_LUN)
phba->cfg_oas_flags |= OAS_LUN_VALID;
- len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+ len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
return len;
}
@@ -3499,7 +3539,7 @@ lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
}
static DEVICE_ATTR(iocb_hw, S_IRUGO,
@@ -3511,7 +3551,7 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
pring ? pring->txq_max : 0);
}
@@ -3525,7 +3565,7 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
pring ? pring->txcmplq_max : 0);
}
@@ -3561,7 +3601,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
}
/**
@@ -4050,9 +4090,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
}
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
- val == 4) {
+ val != FLAGS_TOPOLOGY_MODE_PT_PT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "3114 Loop mode not supported\n");
+ "3114 Only non-FC-AL mode is supported\n");
return -EINVAL;
}
phba->cfg_topology = val;
@@ -5169,12 +5209,12 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
switch (phba->cfg_fcp_cpu_map) {
case 0:
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: No mapping (%d)\n",
phba->cfg_fcp_cpu_map);
return len;
case 1:
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: HBA centric mapping (%d): "
"%d of %d CPUs online from %d possible CPUs\n",
phba->cfg_fcp_cpu_map, num_online_cpus(),
@@ -5188,12 +5228,12 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %02d not present\n",
phba->sli4_hba.curr_disp_cpu);
else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d hdwq None "
"physid %d coreid %d ht %d\n",
@@ -5201,7 +5241,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->phys_id,
cpup->core_id, cpup->hyper);
else
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d EQ %04d hdwq %04d "
"physid %d coreid %d ht %d\n",
@@ -5210,7 +5250,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id, cpup->hyper);
} else {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d hdwq None "
"physid %d coreid %d ht %d IRQ %d\n",
@@ -5218,7 +5258,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->phys_id,
cpup->core_id, cpup->hyper, cpup->irq);
else
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d EQ %04d hdwq %04d "
"physid %d coreid %d ht %d IRQ %d\n",
@@ -5233,7 +5273,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
if (phba->sli4_hba.curr_disp_cpu <
phba->sli4_hba.num_possible_cpu &&
(len >= (PAGE_SIZE - 64))) {
- len += snprintf(buf + len,
+ len += scnprintf(buf + len,
PAGE_SIZE - len, "more...\n");
break;
}
@@ -5753,10 +5793,10 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
int len;
- len = snprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
+ len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
- len += snprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
+ len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt);
return len;
@@ -6755,7 +6795,7 @@ lpfc_show_rport_##field (struct device *dev, \
{ \
struct fc_rport *rport = transport_class_to_rport(dev); \
struct lpfc_rport_data *rdata = rport->hostdata; \
- return snprintf(buf, sz, format_string, \
+ return scnprintf(buf, sz, format_string, \
(rdata->target) ? cast rdata->target->field : 0); \
}
@@ -7003,6 +7043,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
if (phba->sli_rev != LPFC_SLI_REV4) {
/* NVME only supported on SLI4 */
phba->nvmet_support = 0;
+ phba->cfg_nvmet_mrq = 0;
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
phba->cfg_enable_bbcr = 0;
phba->cfg_xri_rebalancing = 0;
@@ -7104,7 +7145,7 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
} else {
/* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
+ phba->cfg_nvmet_mrq = 0;
phba->cfg_nvmet_fb_size = 0;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f2494d3b365c..b0202bc0aa62 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1968,14 +1968,17 @@ link_diag_state_set_out:
}
/**
- * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
* @phba: Pointer to HBA context object.
+ * @mode: loopback mode to set
+ * @link_no: link number for loopback mode to set
*
* This function is responsible for issuing a sli4 mailbox command for setting
- * up internal loopback diagnostic.
+ * up loopback diagnostic for a link.
*/
static int
-lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
+ uint32_t link_no)
{
LPFC_MBOXQ_t *pmboxq;
uint32_t req_len, alloc_len;
@@ -1996,11 +1999,19 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
}
link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
bf_set(lpfc_mbx_set_diag_state_link_num,
- &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
- bf_set(lpfc_mbx_set_diag_state_link_type,
- &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+ &link_diag_loopback->u.req, link_no);
+
+ if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
+ } else {
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req,
+ phba->sli4_hba.lnk_info.lnk_tp);
+ }
+
bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
- LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+ mode);
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
@@ -2054,7 +2065,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
- uint32_t link_flags, timeout;
+ uint32_t link_flags, timeout, link_no;
int i, rc = 0;
/* no data to return just the return code */
@@ -2069,12 +2080,39 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
(int)(sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)));
rc = -EINVAL;
- goto job_error;
+ goto job_done;
+ }
+
+ loopback_mode = (struct diag_mode_set *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout * 100;
+
+ if (loopback_mode->physical_link == -1)
+ link_no = phba->sli4_hba.lnk_info.lnk_no;
+ else
+ link_no = loopback_mode->physical_link;
+
+ if (link_flags == DISABLE_LOOP_BACK) {
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
+ link_no);
+ if (!rc) {
+ /* Unset the need disable bit */
+ phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
+ }
+ goto job_done;
+ } else {
+ /* Check if we need to disable the loopback state */
+ if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
+ rc = -EPERM;
+ goto job_done;
+ }
}
rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
- goto job_error;
+ goto job_done;
/* indicate we are in loobpack diagnostic mode */
spin_lock_irq(&phba->hbalock);
@@ -2084,15 +2122,11 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
/* reset port to start frome scratch */
rc = lpfc_selective_reset(phba);
if (rc)
- goto job_error;
+ goto job_done;
/* bring the link to diagnostic mode */
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3129 Bring link to diagnostic state.\n");
- loopback_mode = (struct diag_mode_set *)
- bsg_request->rqst_data.h_vendor.vendor_cmd;
- link_flags = loopback_mode->type;
- timeout = loopback_mode->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
if (rc) {
@@ -2120,13 +2154,54 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3132 Set up loopback mode:x%x\n", link_flags);
- if (link_flags == INTERNAL_LOOP_BACK)
- rc = lpfc_sli4_bsg_set_internal_loopback(phba);
- else if (link_flags == EXTERNAL_LOOP_BACK)
- rc = lpfc_hba_init_link_fc_topology(phba,
- FLAGS_TOPOLOGY_MODE_PT_PT,
- MBX_NOWAIT);
- else {
+ switch (link_flags) {
+ case INTERNAL_LOOP_BACK:
+ if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
+ link_no);
+ } else {
+ /* Trunk is configured, but link is not in this trunk */
+ if (phba->sli4_hba.conf_trunk) {
+ rc = -ELNRNG;
+ goto loopback_mode_exit;
+ }
+
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
+ link_no);
+ }
+
+ if (!rc) {
+ /* Set the need disable bit */
+ phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
+ }
+
+ break;
+ case EXTERNAL_LOOP_BACK:
+ if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
+ link_no);
+ } else {
+ /* Trunk is configured, but link is not in this trunk */
+ if (phba->sli4_hba.conf_trunk) {
+ rc = -ELNRNG;
+ goto loopback_mode_exit;
+ }
+
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_SERDES,
+ link_no);
+ }
+
+ if (!rc) {
+ /* Set the need disable bit */
+ phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
+ }
+
+ break;
+ default:
rc = -EINVAL;
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3141 Loopback mode:x%x not supported\n",
@@ -2185,7 +2260,7 @@ loopback_mode_exit:
}
lpfc_bsg_diag_mode_exit(phba);
-job_error:
+job_done:
/* make error code available to userspace */
bsg_reply->result = rc;
/* complete the job back to userspace if no error */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 9151824beea4..d1708133fd54 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -68,6 +68,7 @@ struct send_mgmt_resp {
};
+#define DISABLE_LOOP_BACK 0x0 /* disables loop back */
#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
@@ -75,6 +76,7 @@ struct diag_mode_set {
uint32_t command;
uint32_t type;
uint32_t timeout;
+ uint32_t physical_link;
};
struct sli4_link_diag {
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7290573110fe..4812bbbf43cc 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -886,7 +886,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
if (lpfc_error_lost_link(irsp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4101 NS query failed due to link event\n");
+ "4166 NS query failed due to link event\n");
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
goto out;
@@ -907,7 +907,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* Re-issue the NS cmd
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "4102 Process Deferred RSCN Data: x%x x%x\n",
+ "4167 Process Deferred RSCN Data: x%x x%x\n",
vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_els_handle_rscn(vport);
@@ -1430,7 +1430,7 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
* Name object. NPIV is not in play so this integer
* value is sufficient and unique per FC-ID.
*/
- n = snprintf(symbol, size, "%d", vport->phba->brd_no);
+ n = scnprintf(symbol, size, "%d", vport->phba->brd_no);
return n;
}
@@ -1444,26 +1444,26 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
- n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
+ n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
if (size < n)
return n;
- n += snprintf(symbol + n, size - n, " FV%s", fwrev);
+ n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
if (size < n)
return n;
- n += snprintf(symbol + n, size - n, " DV%s.",
+ n += scnprintf(symbol + n, size - n, " DV%s.",
lpfc_release_version);
if (size < n)
return n;
- n += snprintf(symbol + n, size - n, " HN:%s.",
+ n += scnprintf(symbol + n, size - n, " HN:%s.",
init_utsname()->nodename);
if (size < n)
return n;
/* Note :- OS name is "Linux" */
- n += snprintf(symbol + n, size - n, " OS:%s\n",
+ n += scnprintf(symbol + n, size - n, " OS:%s",
init_utsname()->sysname);
return n;
}
@@ -2005,8 +2005,11 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 256);
+ /* This string MUST be consistent with other FC platforms
+ * supported by Broadcom.
+ */
strncpy(ae->un.AttrString,
- "Broadcom Inc.",
+ "Emulex Corporation",
sizeof(ae->un.AttrString));
len = strnlen(ae->un.AttrString,
sizeof(ae->un.AttrString));
@@ -2301,7 +2304,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 256);
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+ strlcat(ae->un.AttrString, phba->BIOSVersion,
+ sizeof(ae->un.AttrString));
len = strnlen(ae->un.AttrString,
sizeof(ae->un.AttrString));
len += (len & 3) ? (4 - (len & 3)) : 4;
@@ -2360,10 +2364,11 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 32);
- ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
- ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
- ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
- ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
+ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+ if (vport->nvmei_support || vport->phba->nvmet_support)
+ ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+ ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
size = FOURBYTES + 32;
ad->AttrLen = cpu_to_be16(size);
ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
@@ -2673,9 +2678,11 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 32);
- ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
- ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
+ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+ if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+ ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
size = FOURBYTES + 32;
ad->AttrLen = cpu_to_be16(size);
ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
@@ -3092,6 +3099,7 @@ port_out:
case SLI_MGMT_GHAT:
case SLI_MGMT_GRPL:
rsp_size = FC_MAX_NS_RSP;
+ /* fall through */
case SLI_MGMT_DHBA:
case SLI_MGMT_DHAT:
pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
@@ -3104,6 +3112,7 @@ port_out:
case SLI_MGMT_GPAT:
case SLI_MGMT_GPAS:
rsp_size = FC_MAX_NS_RSP;
+ /* fall through */
case SLI_MGMT_DPRT:
case SLI_MGMT_DPA:
pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 1215eaa530db..1ee857d9d165 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -170,7 +170,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
snprintf(buffer,
LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
dtp->seq_cnt, ms, dtp->fmt);
- len += snprintf(buf+len, size-len, buffer,
+ len += scnprintf(buf+len, size-len, buffer,
dtp->data1, dtp->data2, dtp->data3);
}
for (i = 0; i < index; i++) {
@@ -181,7 +181,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
snprintf(buffer,
LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
dtp->seq_cnt, ms, dtp->fmt);
- len += snprintf(buf+len, size-len, buffer,
+ len += scnprintf(buf+len, size-len, buffer,
dtp->data1, dtp->data2, dtp->data3);
}
@@ -236,7 +236,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
snprintf(buffer,
LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
dtp->seq_cnt, ms, dtp->fmt);
- len += snprintf(buf+len, size-len, buffer,
+ len += scnprintf(buf+len, size-len, buffer,
dtp->data1, dtp->data2, dtp->data3);
}
for (i = 0; i < index; i++) {
@@ -247,7 +247,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
snprintf(buffer,
LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
dtp->seq_cnt, ms, dtp->fmt);
- len += snprintf(buf+len, size-len, buffer,
+ len += scnprintf(buf+len, size-len, buffer,
dtp->data1, dtp->data2, dtp->data3);
}
@@ -307,7 +307,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
i = lpfc_debugfs_last_hbq;
- len += snprintf(buf+len, size-len, "HBQ %d Info\n", i);
+ len += scnprintf(buf+len, size-len, "HBQ %d Info\n", i);
hbqs = &phba->hbqs[i];
posted = 0;
@@ -315,21 +315,21 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
posted++;
hip = lpfc_hbq_defs[i];
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
hip->hbq_index, hip->profile, hip->rn,
hip->buffer_count, hip->init_count, hip->add_count, posted);
raw_index = phba->hbq_get[i];
getidx = le32_to_cpu(raw_index);
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
for (j=0; j<hbqs->entry_count; j++) {
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"%03d: %08x %04x %05x ", j,
le32_to_cpu(hbqe->bde.addrLow),
le32_to_cpu(hbqe->bde.tus.w),
@@ -341,14 +341,16 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
low = hbqs->hbqPutIdx - posted;
if (low >= 0) {
if ((j >= hbqs->hbqPutIdx) || (j < low)) {
- len += snprintf(buf+len, size-len, "Unused\n");
+ len += scnprintf(buf + len, size - len,
+ "Unused\n");
goto skipit;
}
}
else {
if ((j >= hbqs->hbqPutIdx) &&
(j < (hbqs->entry_count+low))) {
- len += snprintf(buf+len, size-len, "Unused\n");
+ len += scnprintf(buf + len, size - len,
+ "Unused\n");
goto skipit;
}
}
@@ -358,7 +360,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"Buf%d: %p %06x\n", i,
hbq_buf->dbuf.virt, hbq_buf->tag);
found = 1;
@@ -367,7 +369,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
i++;
}
if (!found) {
- len += snprintf(buf+len, size-len, "No DMAinfo?\n");
+ len += scnprintf(buf+len, size-len, "No DMAinfo?\n");
}
skipit:
hbqe++;
@@ -413,14 +415,14 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
break;
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
- len += snprintf(buf + len, size - len, "HdwQ %d Info ", i);
+ len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
spin_lock(&qp->abts_nvme_buf_list_lock);
spin_lock(&qp->io_buf_list_get_lock);
spin_lock(&qp->io_buf_list_put_lock);
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs);
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"tot:%d get:%d put:%d mt:%d "
"ABTS scsi:%d nvme:%d Out:%d\n",
qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs,
@@ -612,9 +614,9 @@ lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size)
break;
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock];
- len += snprintf(buf + len, size - len, "HdwQ %03d Lock ", i);
+ len += scnprintf(buf + len, size - len, "HdwQ %03d Lock ", i);
if (phba->cfg_xri_rebalancing) {
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"get_pvt:%d mv_pvt:%d "
"mv2pub:%d mv2pvt:%d "
"put_pvt:%d put_pub:%d wq:%d\n",
@@ -626,7 +628,7 @@ lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size)
qp->lock_conflict.free_pub_pool,
qp->lock_conflict.wq_access);
} else {
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"get:%d put:%d free:%d wq:%d\n",
qp->lock_conflict.alloc_xri_get,
qp->lock_conflict.alloc_xri_put,
@@ -678,7 +680,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
off = 0;
spin_lock_irq(&phba->hbalock);
- len += snprintf(buf+len, size-len, "HBA SLIM\n");
+ len += scnprintf(buf+len, size-len, "HBA SLIM\n");
lpfc_memcpy_from_slim(buffer,
phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
@@ -692,7 +694,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
i = 1024;
while (i > 0) {
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
*(ptr+5), *(ptr+6), *(ptr+7));
@@ -736,11 +738,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
off = 0;
spin_lock_irq(&phba->hbalock);
- len += snprintf(buf+len, size-len, "SLIM Mailbox\n");
+ len += scnprintf(buf+len, size-len, "SLIM Mailbox\n");
ptr = (uint32_t *)phba->slim2p.virt;
i = sizeof(MAILBOX_t);
while (i > 0) {
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
*(ptr+5), *(ptr+6), *(ptr+7));
@@ -749,11 +751,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
off += (8 * sizeof(uint32_t));
}
- len += snprintf(buf+len, size-len, "SLIM PCB\n");
+ len += scnprintf(buf+len, size-len, "SLIM PCB\n");
ptr = (uint32_t *)phba->pcb;
i = sizeof(PCB_t);
while (i > 0) {
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
*(ptr+5), *(ptr+6), *(ptr+7));
@@ -766,7 +768,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
for (i = 0; i < 4; i++) {
pgpp = &phba->port_gp[i];
pring = &psli->sli3_ring[i];
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"Ring %d: CMD GetInx:%d "
"(Max:%d Next:%d "
"Local:%d flg:x%x) "
@@ -783,7 +785,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
word1 = readl(phba->CAregaddr);
word2 = readl(phba->HSregaddr);
word3 = readl(phba->HCregaddr);
- len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+ len += scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
"HC:%08x\n", word0, word1, word2, word3);
}
spin_unlock_irq(&phba->hbalock);
@@ -821,12 +823,12 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
outio = 0;
- len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
+ len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
iocnt = 0;
if (!cnt) {
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"Missing Nodelist Entries\n");
break;
}
@@ -864,63 +866,63 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
default:
statep = "UNKNOWN";
}
- len += snprintf(buf+len, size-len, "%s DID:x%06x ",
+ len += scnprintf(buf+len, size-len, "%s DID:x%06x ",
statep, ndlp->nlp_DID);
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"WWPN x%llx ",
wwn_to_u64(ndlp->nlp_portname.u.wwn));
- len += snprintf(buf+len, size-len,
+ len += scnprintf(buf+len, size-len,
"WWNN x%llx ",
wwn_to_u64(ndlp->nlp_nodename.u.wwn));
if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
- len += snprintf(buf+len, size-len, "RPI:%03d ",
+ len += scnprintf(buf+len, size-len, "RPI:%03d ",
ndlp->nlp_rpi);
else
- len += snprintf(buf+len, size-len, "RPI:none ");
- len += snprintf(buf+len, size-len, "flag:x%08x ",
+ len += scnprintf(buf+len, size-len, "RPI:none ");
+ len += scnprintf(buf+len, size-len, "flag:x%08x ",
ndlp->nlp_flag);
if (!ndlp->nlp_type)
- len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
+ len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE ");
if (ndlp->nlp_type & NLP_FC_NODE)
- len += snprintf(buf+len, size-len, "FC_NODE ");
+ len += scnprintf(buf+len, size-len, "FC_NODE ");
if (ndlp->nlp_type & NLP_FABRIC) {
- len += snprintf(buf+len, size-len, "FABRIC ");
+ len += scnprintf(buf+len, size-len, "FABRIC ");
iocnt = 0;
}
if (ndlp->nlp_type & NLP_FCP_TARGET)
- len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+ len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ",
ndlp->nlp_sid);
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
- len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
+ len += scnprintf(buf+len, size-len, "FCP_INITIATOR ");
if (ndlp->nlp_type & NLP_NVME_TARGET)
- len += snprintf(buf + len,
+ len += scnprintf(buf + len,
size - len, "NVME_TGT sid:%d ",
NLP_NO_SID);
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
- len += snprintf(buf + len,
+ len += scnprintf(buf + len,
size - len, "NVME_INITIATOR ");
- len += snprintf(buf+len, size-len, "usgmap:%x ",
+ len += scnprintf(buf+len, size-len, "usgmap:%x ",
ndlp->nlp_usg_map);
- len += snprintf(buf+len, size-len, "refcnt:%x",
+ len += scnprintf(buf+len, size-len, "refcnt:%x",
kref_read(&ndlp->kref));
if (iocnt) {
i = atomic_read(&ndlp->cmd_pending);
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
" OutIO:x%x Qdepth x%x",
i, ndlp->cmd_qdepth);
outio += i;
}
- len += snprintf(buf + len, size - len, "defer:%x ",
+ len += scnprintf(buf + len, size - len, "defer:%x ",
ndlp->nlp_defer_did);
- len += snprintf(buf+len, size-len, "\n");
+ len += scnprintf(buf+len, size-len, "\n");
}
spin_unlock_irq(shost->host_lock);
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"\nOutstanding IO x%x\n", outio);
if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"\nNVME Targetport Entry ...\n");
/* Port state is only one of two values for now. */
@@ -928,18 +930,18 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
statep = "REGISTERED";
else
statep = "INIT";
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"TGT WWNN x%llx WWPN x%llx State %s\n",
wwn_to_u64(vport->fc_nodename.u.wwn),
wwn_to_u64(vport->fc_portname.u.wwn),
statep);
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
" Targetport DID x%06x\n",
phba->targetport->port_id);
goto out_exit;
}
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"\nNVME Lport/Rport Entries ...\n");
localport = vport->localport;
@@ -954,11 +956,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
else
statep = "UNKNOWN ";
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Lport DID x%06x PortState %s\n",
localport->port_id, statep);
- len += snprintf(buf + len, size - len, "\tRport List:\n");
+ len += scnprintf(buf + len, size - len, "\tRport List:\n");
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
spin_lock(&phba->hbalock);
@@ -985,32 +987,32 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
}
/* Tab in to show lport ownership. */
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"\t%s Port ID:x%06x ",
statep, nrport->port_id);
- len += snprintf(buf + len, size - len, "WWPN x%llx ",
+ len += scnprintf(buf + len, size - len, "WWPN x%llx ",
nrport->port_name);
- len += snprintf(buf + len, size - len, "WWNN x%llx ",
+ len += scnprintf(buf + len, size - len, "WWNN x%llx ",
nrport->node_name);
/* An NVME rport can have multiple roles. */
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"INITIATOR ");
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"TARGET ");
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"DISCSRVC ");
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
FC_PORT_ROLE_NVME_DISCOVERY))
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"UNKNOWN ROLE x%x",
nrport->port_role);
/* Terminate the string. */
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "\n");
}
spin_unlock_irq(shost->host_lock);
@@ -1049,35 +1051,35 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
if (!phba->targetport)
return len;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"\nNVME Targetport Statistics\n");
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"LS: Rcv %08x Drop %08x Abort %08x\n",
atomic_read(&tgtp->rcv_ls_req_in),
atomic_read(&tgtp->rcv_ls_req_drop),
atomic_read(&tgtp->xmt_ls_abort));
if (atomic_read(&tgtp->rcv_ls_req_in) !=
atomic_read(&tgtp->rcv_ls_req_out)) {
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Rcv LS: in %08x != out %08x\n",
atomic_read(&tgtp->rcv_ls_req_in),
atomic_read(&tgtp->rcv_ls_req_out));
}
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
atomic_read(&tgtp->xmt_ls_rsp_cmpl));
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"LS: RSP Abort %08x xb %08x Err %08x\n",
atomic_read(&tgtp->xmt_ls_rsp_aborted),
atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"FCP: Rcv %08x Defer %08x Release %08x "
"Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
@@ -1087,13 +1089,13 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
atomic_read(&tgtp->rcv_fcp_cmd_out)) {
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Rcv FCP: in %08x != out %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_out));
}
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"FCP Rsp: read %08x readrsp %08x "
"write %08x rsp %08x\n",
atomic_read(&tgtp->xmt_fcp_read),
@@ -1101,31 +1103,31 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_fcp_write),
atomic_read(&tgtp->xmt_fcp_rsp));
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
atomic_read(&tgtp->xmt_fcp_rsp_error),
atomic_read(&tgtp->xmt_fcp_rsp_drop));
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
atomic_read(&tgtp->xmt_fcp_rsp_aborted),
atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
atomic_read(&tgtp->xmt_abort_sol),
atomic_read(&tgtp->xmt_abort_unsol),
atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error));
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "\n");
cnt = 0;
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
@@ -1136,7 +1138,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
}
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
if (cnt) {
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"ABORT: %d ctx entries\n", cnt);
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
@@ -1144,7 +1146,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
list) {
if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ))
break;
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Entry: oxid %x state %x "
"flag %x\n",
ctxp->oxid, ctxp->state,
@@ -1158,7 +1160,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
tot += atomic_read(&tgtp->xmt_fcp_release);
tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
"CTX Outstanding %08llx\n",
phba->sli4_hba.nvmet_xri_cnt,
@@ -1176,10 +1178,10 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
if (!lport)
return len;
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"\nNVME HDWQ Statistics\n");
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"LS: Xmt %016x Cmpl %016x\n",
atomic_read(&lport->fc4NvmeLsRequests),
atomic_read(&lport->fc4NvmeLsCmpls));
@@ -1199,20 +1201,20 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
if (i >= 32)
continue;
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"HDWQ (%d): Rd %016llx Wr %016llx "
"IO %016llx ",
i, data1, data2, data3);
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Cmpl %016llx OutIO %016llx\n",
tot, ((data1 + data2 + data3) - tot));
}
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Total FCP Cmpl %016llx Issue %016llx "
"OutIO %016llx\n",
totin, totout, totout - totin);
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"LS Xmt Err: Abrt %08x Err %08x "
"Cmpl Err: xb %08x Err %08x\n",
atomic_read(&lport->xmt_ls_abort),
@@ -1220,7 +1222,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&lport->cmpl_ls_xb),
atomic_read(&lport->cmpl_ls_err));
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"FCP Xmt Err: noxri %06x nondlp %06x "
"qdepth %06x wqerr %06x err %06x Abrt %06x\n",
atomic_read(&lport->xmt_fcp_noxri),
@@ -1230,7 +1232,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&lport->xmt_fcp_err),
atomic_read(&lport->xmt_fcp_abort));
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"FCP Cmpl Err: xb %08x Err %08x\n",
atomic_read(&lport->cmpl_fcp_xb),
atomic_read(&lport->cmpl_fcp_err));
@@ -1322,58 +1324,58 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
if (phba->nvmet_support == 0) {
/* NVME Initiator */
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"ktime %s: Total Samples: %lld\n",
(phba->ktime_on ? "Enabled" : "Disabled"),
phba->ktime_data_samples);
if (phba->ktime_data_samples == 0)
return len;
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 1: Last NVME Cmd cmpl "
"done -to- Start of next NVME cnd (in driver)\n");
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg1_total,
phba->ktime_data_samples),
phba->ktime_seg1_min,
phba->ktime_seg1_max);
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 2: Driver start of NVME cmd "
"-to- Firmware WQ doorbell\n");
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg2_total,
phba->ktime_data_samples),
phba->ktime_seg2_min,
phba->ktime_seg2_max);
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 3: Firmware WQ doorbell -to- "
"MSI-X ISR cmpl\n");
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg3_total,
phba->ktime_data_samples),
phba->ktime_seg3_min,
phba->ktime_seg3_max);
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 4: MSI-X ISR cmpl -to- "
"NVME cmpl done\n");
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg4_total,
phba->ktime_data_samples),
phba->ktime_seg4_min,
phba->ktime_seg4_max);
- len += snprintf(
+ len += scnprintf(
buf + len, PAGE_SIZE - len,
"Total IO avg time: %08lld\n",
div_u64(phba->ktime_seg1_total +
@@ -1385,7 +1387,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
}
/* NVME Target */
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"ktime %s: Total Samples: %lld %lld\n",
(phba->ktime_on ? "Enabled" : "Disabled"),
phba->ktime_data_samples,
@@ -1393,46 +1395,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
if (phba->ktime_data_samples == 0)
return len;
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Segment 1: MSI-X ISR Rcv cmd -to- "
"cmd pass to NVME Layer\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg1_total,
phba->ktime_data_samples),
phba->ktime_seg1_min,
phba->ktime_seg1_max);
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Segment 2: cmd pass to NVME Layer- "
"-to- Driver rcv cmd OP (action)\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg2_total,
phba->ktime_data_samples),
phba->ktime_seg2_min,
phba->ktime_seg2_max);
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Segment 3: Driver rcv cmd OP -to- "
"Firmware WQ doorbell: cmd\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg3_total,
phba->ktime_data_samples),
phba->ktime_seg3_min,
phba->ktime_seg3_max);
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Segment 4: Firmware WQ doorbell: cmd "
"-to- MSI-X ISR for cmd cmpl\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg4_total,
phba->ktime_data_samples),
phba->ktime_seg4_min,
phba->ktime_seg4_max);
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Segment 5: MSI-X ISR for cmd cmpl "
"-to- NVME layer passed cmd done\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg5_total,
phba->ktime_data_samples),
@@ -1440,10 +1442,10 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
phba->ktime_seg5_max);
if (phba->ktime_status_samples == 0) {
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Total: cmd received by MSI-X ISR "
"-to- cmd completed on wire\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld "
"max %08lld\n",
div_u64(phba->ktime_seg10_total,
@@ -1453,46 +1455,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
return len;
}
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Segment 6: NVME layer passed cmd done "
"-to- Driver rcv rsp status OP\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg6_total,
phba->ktime_status_samples),
phba->ktime_seg6_min,
phba->ktime_seg6_max);
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Segment 7: Driver rcv rsp status OP "
"-to- Firmware WQ doorbell: status\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg7_total,
phba->ktime_status_samples),
phba->ktime_seg7_min,
phba->ktime_seg7_max);
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Segment 8: Firmware WQ doorbell: status"
" -to- MSI-X ISR for status cmpl\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg8_total,
phba->ktime_status_samples),
phba->ktime_seg8_min,
phba->ktime_seg8_max);
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Segment 9: MSI-X ISR for status cmpl "
"-to- NVME layer passed status done\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg9_total,
phba->ktime_status_samples),
phba->ktime_seg9_min,
phba->ktime_seg9_max);
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"Total: cmd received by MSI-X ISR -to- "
"cmd completed on wire\n");
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
div_u64(phba->ktime_seg10_total,
phba->ktime_status_samples),
@@ -1527,7 +1529,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
(phba->nvmeio_trc_size - 1);
skip = phba->nvmeio_trc_output_idx;
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"%s IO Trace %s: next_idx %d skip %d size %d\n",
(phba->nvmet_support ? "NVME" : "NVMET"),
(state ? "Enabled" : "Disabled"),
@@ -1549,18 +1551,18 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
if (!dtp->fmt)
continue;
- len += snprintf(buf + len, size - len, dtp->fmt,
+ len += scnprintf(buf + len, size - len, dtp->fmt,
dtp->data1, dtp->data2, dtp->data3);
if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
phba->nvmeio_trc_output_idx = 0;
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Trace Complete\n");
goto out;
}
if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Trace Continue (%d of %d)\n",
phba->nvmeio_trc_output_idx,
phba->nvmeio_trc_size);
@@ -1578,18 +1580,18 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
if (!dtp->fmt)
continue;
- len += snprintf(buf + len, size - len, dtp->fmt,
+ len += scnprintf(buf + len, size - len, dtp->fmt,
dtp->data1, dtp->data2, dtp->data3);
if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
phba->nvmeio_trc_output_idx = 0;
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Trace Complete\n");
goto out;
}
if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Trace Continue (%d of %d)\n",
phba->nvmeio_trc_output_idx,
phba->nvmeio_trc_size);
@@ -1597,7 +1599,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
}
}
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Trace Done\n");
out:
return len;
@@ -1627,17 +1629,17 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
uint32_t tot_rcv;
uint32_t tot_cmpl;
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"CPUcheck %s ",
(phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
"Enabled" : "Disabled"));
if (phba->nvmet_support) {
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"%s\n",
(phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
"Rcv Enabled\n" : "Rcv Disabled\n"));
} else {
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
}
max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ;
@@ -1658,7 +1660,7 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
if (!tot_xmt && !tot_cmpl && !tot_rcv)
continue;
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"HDWQ %03d: ", i);
for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
/* Only display non-zero counters */
@@ -1667,22 +1669,22 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
!qp->cpucheck_rcv_io[j])
continue;
if (phba->nvmet_support) {
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %03d: %x/%x/%x ", j,
qp->cpucheck_rcv_io[j],
qp->cpucheck_xmt_io[j],
qp->cpucheck_cmpl_io[j]);
} else {
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %03d: %x/%x ", j,
qp->cpucheck_xmt_io[j],
qp->cpucheck_cmpl_io[j]);
}
}
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Total: %x\n", tot_xmt);
if (len >= max_cnt) {
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"Truncated ...\n");
return len;
}
@@ -2258,28 +2260,29 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
int cnt = 0;
if (dent == phba->debug_writeGuard)
- cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+ cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
else if (dent == phba->debug_writeApp)
- cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
+ cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
else if (dent == phba->debug_writeRef)
- cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
+ cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
else if (dent == phba->debug_readGuard)
- cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+ cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
else if (dent == phba->debug_readApp)
- cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
+ cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
else if (dent == phba->debug_readRef)
- cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+ cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
else if (dent == phba->debug_InjErrNPortID)
- cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+ cnt = scnprintf(cbuf, 32, "0x%06x\n",
+ phba->lpfc_injerr_nportid);
else if (dent == phba->debug_InjErrWWPN) {
memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
tmp = cpu_to_be64(tmp);
- cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+ cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp);
} else if (dent == phba->debug_InjErrLBA) {
if (phba->lpfc_injerr_lba == (sector_t)(-1))
- cnt = snprintf(cbuf, 32, "off\n");
+ cnt = scnprintf(cbuf, 32, "off\n");
else
- cnt = snprintf(cbuf, 32, "0x%llx\n",
+ cnt = scnprintf(cbuf, 32, "0x%llx\n",
(uint64_t) phba->lpfc_injerr_lba);
} else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3224,17 +3227,17 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
switch (count) {
case SIZE_U8: /* byte (8 bits) */
pci_read_config_byte(pdev, where, &u8val);
- len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
"%03x: %02x\n", where, u8val);
break;
case SIZE_U16: /* word (16 bits) */
pci_read_config_word(pdev, where, &u16val);
- len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
"%03x: %04x\n", where, u16val);
break;
case SIZE_U32: /* double word (32 bits) */
pci_read_config_dword(pdev, where, &u32val);
- len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
"%03x: %08x\n", where, u32val);
break;
case LPFC_PCI_CFG_BROWSE: /* browse all */
@@ -3254,25 +3257,25 @@ pcicfg_browse:
offset = offset_label;
/* Read PCI config space */
- len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
"%03x: ", offset_label);
while (index > 0) {
pci_read_config_dword(pdev, offset, &u32val);
- len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
"%08x ", u32val);
offset += sizeof(uint32_t);
if (offset >= LPFC_PCI_CFG_SIZE) {
- len += snprintf(pbuffer+len,
+ len += scnprintf(pbuffer+len,
LPFC_PCI_CFG_SIZE-len, "\n");
break;
}
index -= sizeof(uint32_t);
if (!index)
- len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
"\n");
else if (!(index % (8 * sizeof(uint32_t)))) {
offset_label += (8 * sizeof(uint32_t));
- len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
"\n%03x: ", offset_label);
}
}
@@ -3543,7 +3546,7 @@ lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
if (acc_range == SINGLE_WORD) {
offset_run = offset;
u32val = readl(mem_mapped_bar + offset_run);
- len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
"%05x: %08x\n", offset_run, u32val);
} else
goto baracc_browse;
@@ -3557,35 +3560,35 @@ baracc_browse:
offset_run = offset_label;
/* Read PCI bar memory mapped space */
- len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
"%05x: ", offset_label);
index = LPFC_PCI_BAR_RD_SIZE;
while (index > 0) {
u32val = readl(mem_mapped_bar + offset_run);
- len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
"%08x ", u32val);
offset_run += sizeof(uint32_t);
if (acc_range == LPFC_PCI_BAR_BROWSE) {
if (offset_run >= bar_size) {
- len += snprintf(pbuffer+len,
+ len += scnprintf(pbuffer+len,
LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
break;
}
} else {
if (offset_run >= offset +
(acc_range * sizeof(uint32_t))) {
- len += snprintf(pbuffer+len,
+ len += scnprintf(pbuffer+len,
LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
break;
}
}
index -= sizeof(uint32_t);
if (!index)
- len += snprintf(pbuffer+len,
+ len += scnprintf(pbuffer+len,
LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
else if (!(index % (8 * sizeof(uint32_t)))) {
offset_label += (8 * sizeof(uint32_t));
- len += snprintf(pbuffer+len,
+ len += scnprintf(pbuffer+len,
LPFC_PCI_BAR_RD_BUF_SIZE-len,
"\n%05x: ", offset_label);
}
@@ -3758,19 +3761,19 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
if (!qp)
return len;
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\t%s WQ info: ", wqtype);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n",
qp->assoc_qid, qp->q_cnt_1,
(unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]",
qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index,
qp->hba_index, qp->notify_interval);
- len += snprintf(pbuffer + len,
+ len += scnprintf(pbuffer + len,
LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
return len;
}
@@ -3810,21 +3813,22 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
if (!qp)
return len;
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t%s CQ info: ", cqtype);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x "
"xabt:x%x wq:x%llx]\n",
qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HST-IDX[%04d], NTFI[%03d], PLMT[%03d]",
qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index,
qp->notify_interval, qp->max_proc_limit);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\n");
return len;
}
@@ -3836,19 +3840,19 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
if (!qp || !datqp)
return len;
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\t%s RQ info: ", rqtype);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
"posted:x%x rcv:x%llx]\n",
qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n",
qp->queue_id, qp->entry_count, qp->entry_size,
qp->host_index, qp->hba_index, qp->notify_interval);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n",
datqp->queue_id, datqp->entry_count,
@@ -3927,18 +3931,19 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
if (!qp)
return len;
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
"cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4, qp->q_mode);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HST-IDX[%04d], NTFI[%03d], PLMT[%03d], AFFIN[%03d]",
qp->queue_id, qp->entry_count, qp->entry_size,
qp->host_index, qp->notify_interval,
qp->max_proc_limit, qp->chann);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+ len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\n");
return len;
}
@@ -3991,9 +3996,10 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue)
phba->lpfc_idiag_last_eq = 0;
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
- "HDWQ %d out of %d HBA HDWQs\n",
- x, phba->cfg_hdw_queue);
+ len += scnprintf(pbuffer + len,
+ LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "HDWQ %d out of %d HBA HDWQs\n",
+ x, phba->cfg_hdw_queue);
/* Fast-path EQ */
qp = phba->sli4_hba.hdwq[x].hba_eq;
@@ -4075,7 +4081,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
too_big:
- len += snprintf(pbuffer + len,
+ len += scnprintf(pbuffer + len,
LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n");
out:
spin_unlock_irq(&phba->hbalock);
@@ -4131,22 +4137,22 @@ lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque,
return 0;
esize = pque->entry_size;
- len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
"QE-INDEX[%04d]:\n", index);
offset = 0;
- pentry = pque->qe[index].address;
+ pentry = lpfc_sli4_qe(pque, index);
while (esize > 0) {
- len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
"%08x ", *pentry);
pentry++;
offset += sizeof(uint32_t);
esize -= sizeof(uint32_t);
if (esize > 0 && !(offset % (4 * sizeof(uint32_t))))
- len += snprintf(pbuffer+len,
+ len += scnprintf(pbuffer+len,
LPFC_QUE_ACC_BUF_SIZE-len, "\n");
}
- len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+ len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
return len;
}
@@ -4485,7 +4491,7 @@ pass_check:
pque = (struct lpfc_queue *)idiag.ptr_private;
if (offset > pque->entry_size/sizeof(uint32_t) - 1)
goto error_out;
- pentry = pque->qe[index].address;
+ pentry = lpfc_sli4_qe(pque, index);
pentry += offset;
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR)
*pentry = value;
@@ -4506,7 +4512,7 @@ error_out:
* lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register
* @phba: The pointer to hba structure.
* @pbuffer: The pointer to the buffer to copy the data to.
- * @len: The lenght of bytes to copied.
+ * @len: The length of bytes to copied.
* @drbregid: The id to doorbell registers.
*
* Description:
@@ -4526,27 +4532,27 @@ lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
switch (drbregid) {
case LPFC_DRB_EQ:
- len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len,
"EQ-DRB-REG: 0x%08x\n",
readl(phba->sli4_hba.EQDBregaddr));
break;
case LPFC_DRB_CQ:
- len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len,
+ len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len,
"CQ-DRB-REG: 0x%08x\n",
readl(phba->sli4_hba.CQDBregaddr));
break;
case LPFC_DRB_MQ:
- len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
"MQ-DRB-REG: 0x%08x\n",
readl(phba->sli4_hba.MQDBregaddr));
break;
case LPFC_DRB_WQ:
- len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
"WQ-DRB-REG: 0x%08x\n",
readl(phba->sli4_hba.WQDBregaddr));
break;
case LPFC_DRB_RQ:
- len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
"RQ-DRB-REG: 0x%08x\n",
readl(phba->sli4_hba.RQDBregaddr));
break;
@@ -4716,7 +4722,7 @@ error_out:
* lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
* @phba: The pointer to hba structure.
* @pbuffer: The pointer to the buffer to copy the data to.
- * @len: The lenght of bytes to copied.
+ * @len: The length of bytes to copied.
* @drbregid: The id to doorbell registers.
*
* Description:
@@ -4736,37 +4742,37 @@ lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
switch (ctlregid) {
case LPFC_CTL_PORT_SEM:
- len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
"Port SemReg: 0x%08x\n",
readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_SEM_OFFSET));
break;
case LPFC_CTL_PORT_STA:
- len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
"Port StaReg: 0x%08x\n",
readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_STA_OFFSET));
break;
case LPFC_CTL_PORT_CTL:
- len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
"Port CtlReg: 0x%08x\n",
readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_CTL_OFFSET));
break;
case LPFC_CTL_PORT_ER1:
- len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
"Port Er1Reg: 0x%08x\n",
readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_ER1_OFFSET));
break;
case LPFC_CTL_PORT_ER2:
- len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
"Port Er2Reg: 0x%08x\n",
readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_ER2_OFFSET));
break;
case LPFC_CTL_PDEV_CTL:
- len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
"PDev CtlReg: 0x%08x\n",
readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PDEV_CTL_OFFSET));
@@ -4959,13 +4965,13 @@ lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
- len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
"mbx_dump_map: 0x%08x\n", mbx_dump_map);
- len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
"mbx_dump_cnt: %04d\n", mbx_dump_cnt);
- len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
"mbx_word_cnt: %04d\n", mbx_word_cnt);
- len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
"mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
return len;
@@ -5114,35 +5120,35 @@ lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
{
uint16_t ext_cnt, ext_size;
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\nAvailable Extents Information:\n");
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tPort Available VPI extents: ");
lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
&ext_cnt, &ext_size);
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"Count %3d, Size %3d\n", ext_cnt, ext_size);
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tPort Available VFI extents: ");
lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
&ext_cnt, &ext_size);
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"Count %3d, Size %3d\n", ext_cnt, ext_size);
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tPort Available RPI extents: ");
lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
&ext_cnt, &ext_size);
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"Count %3d, Size %3d\n", ext_cnt, ext_size);
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tPort Available XRI extents: ");
lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
&ext_cnt, &ext_size);
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"Count %3d, Size %3d\n", ext_cnt, ext_size);
return len;
@@ -5166,55 +5172,55 @@ lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
uint16_t ext_cnt, ext_size;
int rc;
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\nAllocated Extents Information:\n");
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tHost Allocated VPI extents: ");
rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
&ext_cnt, &ext_size);
if (!rc)
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"Port %d Extent %3d, Size %3d\n",
phba->brd_no, ext_cnt, ext_size);
else
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"N/A\n");
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tHost Allocated VFI extents: ");
rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
&ext_cnt, &ext_size);
if (!rc)
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"Port %d Extent %3d, Size %3d\n",
phba->brd_no, ext_cnt, ext_size);
else
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"N/A\n");
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tHost Allocated RPI extents: ");
rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
&ext_cnt, &ext_size);
if (!rc)
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"Port %d Extent %3d, Size %3d\n",
phba->brd_no, ext_cnt, ext_size);
else
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"N/A\n");
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tHost Allocated XRI extents: ");
rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
&ext_cnt, &ext_size);
if (!rc)
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"Port %d Extent %3d, Size %3d\n",
phba->brd_no, ext_cnt, ext_size);
else
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"N/A\n");
return len;
@@ -5238,49 +5244,49 @@ lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
struct lpfc_rsrc_blks *rsrc_blks;
int index;
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\nDriver Extents Information:\n");
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tVPI extents:\n");
index = 0;
list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\t\tBlock %3d: Start %4d, Count %4d\n",
index, rsrc_blks->rsrc_start,
rsrc_blks->rsrc_size);
index++;
}
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tVFI extents:\n");
index = 0;
list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
list) {
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\t\tBlock %3d: Start %4d, Count %4d\n",
index, rsrc_blks->rsrc_start,
rsrc_blks->rsrc_size);
index++;
}
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tRPI extents:\n");
index = 0;
list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
list) {
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\t\tBlock %3d: Start %4d, Count %4d\n",
index, rsrc_blks->rsrc_start,
rsrc_blks->rsrc_size);
index++;
}
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\tXRI extents:\n");
index = 0;
list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
list) {
- len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\t\tBlock %3d: Start %4d, Count %4d\n",
index, rsrc_blks->rsrc_start,
rsrc_blks->rsrc_size);
@@ -5706,11 +5712,11 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
if (i != 0)
pr_err("%s\n", line_buf);
len = 0;
- len += snprintf(line_buf+len,
+ len += scnprintf(line_buf+len,
LPFC_MBX_ACC_LBUF_SZ-len,
"%03d: ", i);
}
- len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+ len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
"%08x ", (uint32_t)*pword);
pword++;
}
@@ -5773,11 +5779,11 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
pr_err("%s\n", line_buf);
len = 0;
memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
- len += snprintf(line_buf+len,
+ len += scnprintf(line_buf+len,
LPFC_MBX_ACC_LBUF_SZ-len,
"%03d: ", i);
}
- len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+ len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
"%08x ",
((uint32_t)*pword) & 0xffffffff);
pword++;
@@ -5796,18 +5802,18 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
pr_err("%s\n", line_buf);
len = 0;
memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
- len += snprintf(line_buf+len,
+ len += scnprintf(line_buf+len,
LPFC_MBX_ACC_LBUF_SZ-len,
"%03d: ", i);
}
for (j = 0; j < 4; j++) {
- len += snprintf(line_buf+len,
+ len += scnprintf(line_buf+len,
LPFC_MBX_ACC_LBUF_SZ-len,
"%02x",
((uint8_t)*pbyte) & 0xff);
pbyte++;
}
- len += snprintf(line_buf+len,
+ len += scnprintf(line_buf+len,
LPFC_MBX_ACC_LBUF_SZ-len, " ");
}
if ((i - 1) % 8)
@@ -5891,7 +5897,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_lockstat);
if (!phba->debug_lockstat) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0913 Cant create debugfs lockstat\n");
+ "4610 Cant create debugfs lockstat\n");
goto debug_failed;
}
#endif
@@ -6134,7 +6140,7 @@ nvmeio_off:
vport, &lpfc_debugfs_op_scsistat);
if (!vport->debug_scsistat) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0914 Cannot create debugfs scsistat\n");
+ "4611 Cannot create debugfs scsistat\n");
goto debug_failed;
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 93ab7dfb8ee0..2322ddb085c0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -345,10 +345,10 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
esize = q->entry_size;
qe_word_cnt = esize / sizeof(uint32_t);
- pword = q->qe[idx].address;
+ pword = lpfc_sli4_qe(q, idx);
len = 0;
- len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
+ len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
if (qe_word_cnt > 8)
printk(KERN_ERR "%s\n", line_buf);
@@ -359,11 +359,11 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
if (qe_word_cnt > 8) {
len = 0;
memset(line_buf, 0, LPFC_LBUF_SZ);
- len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
+ len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len,
"%03d: ", i);
}
}
- len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
+ len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
((uint32_t)*pword) & 0xffffffff);
pword++;
}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index fc077cb87900..c8fb0b455f2a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1961,7 +1961,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *prsp;
- int disc, rc;
+ int disc;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1990,7 +1990,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- rc = 0;
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -2029,18 +2028,16 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp))
- rc = NLP_STE_FREED_NODE;
- else
- rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_PLOGI);
+ if (!lpfc_error_lost_link(irsp))
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
} else {
/* Good status, call state machine */
prsp = list_entry(((struct lpfc_dmabuf *)
cmdiocb->context2)->list.next,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
- rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
}
@@ -6744,12 +6741,11 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t *lp;
RNID *rn;
struct ls_rjt stat;
- uint32_t cmd;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
- cmd = *lp++;
+ lp++;
rn = (RNID *) lp;
/* RNID received */
@@ -7508,14 +7504,14 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t *lp;
IOCB_t *icmd;
FARP *fp;
- uint32_t cmd, cnt, did;
+ uint32_t cnt, did;
icmd = &cmdiocb->iocb;
did = icmd->un.elsreq64.remoteID;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
- cmd = *lp++;
+ lp++;
fp = (FARP *) lp;
/* FARP-REQ received from DID <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -7580,14 +7576,14 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
IOCB_t *icmd;
- uint32_t cmd, did;
+ uint32_t did;
icmd = &cmdiocb->iocb;
did = icmd->un.elsreq64.remoteID;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
- cmd = *lp++;
+ lp++;
/* FARP-RSP received from DID <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0600 FARP-RSP received from DID x%x\n", did);
@@ -8454,6 +8450,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_INVALID_OX_RX;
break;
+ case ELS_CMD_FPIN:
+ /*
+ * Received FPIN from fabric - pass it to the
+ * transport FPIN handler.
+ */
+ fc_host_fpin_rcv(shost, elsiocb->iocb.unsli3.rcvsli3.acc_len,
+ (char *)payload);
+ break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -8775,7 +8779,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_nlp_put(ndlp);
return;
}
-
+ /* fall through */
default:
/* Try to recover from this error */
if (phba->sli_rev == LPFC_SLI_REV4)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index aa4961a2caf8..c43852f97f25 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -885,15 +885,9 @@ lpfc_linkdown(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mb;
int i;
- if (phba->link_state == LPFC_LINK_DOWN) {
- if (phba->sli4_hba.conf_trunk) {
- phba->trunk_link.link0.state = 0;
- phba->trunk_link.link1.state = 0;
- phba->trunk_link.link2.state = 0;
- phba->trunk_link.link3.state = 0;
- }
+ if (phba->link_state == LPFC_LINK_DOWN)
return 0;
- }
+
/* Block all SCSI stack I/Os */
lpfc_scsi_dev_block(phba);
@@ -932,7 +926,11 @@ lpfc_linkdown(struct lpfc_hba *phba)
}
}
lpfc_destroy_vport_work_array(phba, vports);
- /* Clean up any firmware default rpi's */
+
+ /* Clean up any SLI3 firmware default rpi's */
+ if (phba->sli_rev > LPFC_SLI_REV3)
+ goto skip_unreg_did;
+
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
@@ -944,6 +942,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
}
}
+ skip_unreg_did:
/* Setup myDID for link up if we are in pt2pt mode */
if (phba->pport->fc_flag & FC_PT2PT) {
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4147,9 +4146,15 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rdata->pnode = lpfc_nlp_get(ndlp);
if (ndlp->nlp_type & NLP_FCP_TARGET)
- rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
- rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
+ if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+ rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
+ if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
+ rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
@@ -4667,12 +4672,15 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
case CMD_GEN_REQUEST64_CR:
if (iocb->context_un.ndlp == ndlp)
return 1;
+ /* fall through */
case CMD_ELS_REQUEST64_CR:
if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
return 1;
+ /* fall through */
case CMD_XMIT_ELS_RSP64_CX:
if (iocb->context1 == (uint8_t *) ndlp)
return 1;
+ /* fall through */
}
} else if (pring->ringno == LPFC_FCP_RING) {
/* Skip match check if waiting to relogin to FCP target */
@@ -4868,6 +4876,10 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* accept PLOGIs after unreg_rpi_cmpl
*/
acc_plogi = 0;
+ } else if (vport->load_flag & FC_UNLOADING) {
+ mbox->ctx_ndlp = NULL;
+ mbox->mbox_cmpl =
+ lpfc_sli_def_mbox_cmpl;
} else {
mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl =
@@ -4979,6 +4991,10 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
LPFC_MBOXQ_t *mbox;
int rc;
+ /* Unreg DID is an SLI3 operation. */
+ if (phba->sli_rev > LPFC_SLI_REV3)
+ return;
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
@@ -5856,7 +5872,7 @@ restart_disc:
case LPFC_LINK_UP:
lpfc_issue_clear_la(phba, vport);
- /* Drop thru */
+ /* fall through */
case LPFC_LINK_UNKNOWN:
case LPFC_WARM_START:
case LPFC_INIT_START:
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ec1227018913..edd8f3982023 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -560,6 +560,8 @@ struct fc_vft_header {
#define fc_vft_hdr_hopct_WORD word1
};
+#include <uapi/scsi/fc/fc_els.h>
+
/*
* Extended Link Service LS_COMMAND codes (Payload Word 0)
*/
@@ -603,6 +605,7 @@ struct fc_vft_header {
#define ELS_CMD_RNID 0x78000000
#define ELS_CMD_LIRR 0x7A000000
#define ELS_CMD_LCB 0x81000000
+#define ELS_CMD_FPIN 0x16000000
#else /* __LITTLE_ENDIAN_BITFIELD */
#define ELS_CMD_MASK 0xffff
#define ELS_RSP_MASK 0xff
@@ -643,6 +646,7 @@ struct fc_vft_header {
#define ELS_CMD_RNID 0x78
#define ELS_CMD_LIRR 0x7A
#define ELS_CMD_LCB 0x81
+#define ELS_CMD_FPIN ELS_FPIN
#endif
/*
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index ff875b833192..77f9a55a3f54 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1894,18 +1894,19 @@ struct lpfc_mbx_set_link_diag_loopback {
union {
struct {
uint32_t word0;
-#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
-#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000003
-#define lpfc_mbx_set_diag_lpbk_type_WORD word0
-#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
-#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
-#define LPFC_DIAG_LOOPBACK_TYPE_SERDES 0x2
-#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
-#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
-#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
-#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
-#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
-#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_lpbk_type_WORD word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES 0x2
+#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED 0x3
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
} req;
struct {
uint32_t word0;
@@ -4083,22 +4084,7 @@ struct lpfc_acqe_grp5 {
uint32_t trailer;
};
-static char *const trunk_errmsg[] = { /* map errcode */
- "", /* There is no such error code at index 0*/
- "link negotiated speed does not match existing"
- " trunk - link was \"low\" speed",
- "link negotiated speed does not match"
- " existing trunk - link was \"middle\" speed",
- "link negotiated speed does not match existing"
- " trunk - link was \"high\" speed",
- "Attached to non-trunking port - F_Port",
- "Attached to non-trunking port - N_Port",
- "FLOGI response timeout",
- "non-FLOGI frame received",
- "Invalid FLOGI response",
- "Trunking initialization protocol",
- "Trunk peer device mismatch",
-};
+extern const char *const trunk_errmsg[];
struct lpfc_acqe_fc_la {
uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7fcdaed3fa94..eaaef682de25 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1117,19 +1117,19 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
}
}
+ spin_unlock_irq(&phba->hbalock);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
&nvmet_aborts);
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
}
- spin_unlock_irq(&phba->hbalock);
lpfc_sli4_free_sp_events(phba);
return cnt;
}
@@ -1844,8 +1844,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
/* If the pci channel is offline, ignore possible errors, since
* we cannot communicate with the pci card anyway.
*/
- if (pci_channel_offline(phba->pcidev))
+ if (pci_channel_offline(phba->pcidev)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3166 pci channel is offline\n");
+ lpfc_sli4_offline_eratt(phba);
return;
+ }
memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
@@ -1922,6 +1926,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3151 PCI bus read access failure: x%x\n",
readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
+ lpfc_sli4_offline_eratt(phba);
return;
}
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
@@ -3075,7 +3080,7 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
* This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
* to expedite pool. Mark them as expedite.
**/
-void lpfc_create_expedite_pool(struct lpfc_hba *phba)
+static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
{
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_io_buf *lpfc_ncmd;
@@ -3110,7 +3115,7 @@ void lpfc_create_expedite_pool(struct lpfc_hba *phba)
* This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
* of HWQ 0. Clear the mark.
**/
-void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
+static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
{
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_io_buf *lpfc_ncmd;
@@ -3230,7 +3235,7 @@ void lpfc_create_multixri_pools(struct lpfc_hba *phba)
*
* This routine returns XRIs from public/private to lpfc_io_buf_list_put.
**/
-void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
+static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
{
u32 i;
u32 hwq_count;
@@ -3245,6 +3250,13 @@ void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
lpfc_destroy_expedite_pool(phba);
+ if (!(phba->pport->load_flag & FC_UNLOADING)) {
+ lpfc_sli_flush_fcp_rings(phba);
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
+ }
+
hwq_count = phba->cfg_hdw_queue;
for (i = 0; i < hwq_count; i++) {
@@ -3611,8 +3623,6 @@ lpfc_io_free(struct lpfc_hba *phba)
struct lpfc_sli4_hdw_queue *qp;
int idx;
- spin_lock_irq(&phba->hbalock);
-
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
/* Release all the lpfc_nvme_bufs maintained by this host. */
@@ -3642,8 +3652,6 @@ lpfc_io_free(struct lpfc_hba *phba)
}
spin_unlock(&qp->io_buf_list_get_lock);
}
-
- spin_unlock_irq(&phba->hbalock);
}
/**
@@ -4457,7 +4465,7 @@ finished:
return stat;
}
-void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
+static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -8603,9 +8611,9 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
if (phba->nvmet_support) {
if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
}
- if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
- phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
@@ -8626,10 +8634,12 @@ static int
lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
+ int cpu;
+ cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
phba->sli4_hba.cq_esize,
- LPFC_CQE_EXP_COUNT);
+ LPFC_CQE_EXP_COUNT, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0508 Failed allocate fast-path NVME CQ (%d)\n",
@@ -8638,11 +8648,12 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
}
qdesc->qe_valid = 1;
qdesc->hdwq = wqidx;
- qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
+ qdesc->chann = cpu;
phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
- LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
+ LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT,
+ cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0509 Failed allocate fast-path NVME WQ (%d)\n",
@@ -8661,18 +8672,20 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
uint32_t wqesize;
+ int cpu;
+ cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
/* Create Fast Path FCP CQs */
if (phba->enab_exp_wqcq_pages)
/* Increase the CQ size when WQEs contain an embedded cdb */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
phba->sli4_hba.cq_esize,
- LPFC_CQE_EXP_COUNT);
+ LPFC_CQE_EXP_COUNT, cpu);
else
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
@@ -8680,7 +8693,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
}
qdesc->qe_valid = 1;
qdesc->hdwq = wqidx;
- qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
+ qdesc->chann = cpu;
phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
/* Create Fast Path FCP WQs */
@@ -8690,11 +8703,11 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
wqesize,
- LPFC_WQE_EXP_COUNT);
+ LPFC_WQE_EXP_COUNT, cpu);
} else
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
+ phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8727,7 +8740,7 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- int idx, eqidx;
+ int idx, eqidx, cpu;
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_eq_intr_info *eqi;
@@ -8814,13 +8827,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create HBA Event Queues (EQs) */
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ /* determine EQ affinity */
+ eqidx = lpfc_find_eq_handle(phba, idx);
+ cpu = lpfc_find_cpu_handle(phba, eqidx, LPFC_FIND_BY_EQ);
/*
* If there are more Hardware Queues than available
- * CQs, multiple Hardware Queues may share a common EQ.
+ * EQs, multiple Hardware Queues may share a common EQ.
*/
if (idx >= phba->cfg_irq_chann) {
/* Share an existing EQ */
- eqidx = lpfc_find_eq_handle(phba, idx);
phba->sli4_hba.hdwq[idx].hba_eq =
phba->sli4_hba.hdwq[eqidx].hba_eq;
continue;
@@ -8828,7 +8843,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create an EQ */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.eq_esize,
- phba->sli4_hba.eq_ecount);
+ phba->sli4_hba.eq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0497 Failed allocate EQ (%d)\n", idx);
@@ -8838,9 +8853,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
qdesc->hdwq = idx;
/* Save the CPU this EQ is affinitised to */
- eqidx = lpfc_find_eq_handle(phba, idx);
- qdesc->chann = lpfc_find_cpu_handle(phba, eqidx,
- LPFC_FIND_BY_EQ);
+ qdesc->chann = cpu;
phba->sli4_hba.hdwq[idx].hba_eq = qdesc;
qdesc->last_cpu = qdesc->chann;
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
@@ -8863,11 +8876,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ cpu = lpfc_find_cpu_handle(phba, idx,
+ LPFC_FIND_BY_HDWQ);
qdesc = lpfc_sli4_queue_alloc(
phba,
LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ phba->sli4_hba.cq_ecount,
+ cpu);
if (!qdesc) {
lpfc_printf_log(
phba, KERN_ERR, LOG_INIT,
@@ -8877,7 +8893,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
qdesc->qe_valid = 1;
qdesc->hdwq = idx;
- qdesc->chann = idx;
+ qdesc->chann = cpu;
phba->sli4_hba.nvmet_cqset[idx] = qdesc;
}
}
@@ -8887,10 +8903,11 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
* Create Slow Path Completion Queues (CQs)
*/
+ cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
/* Create slow-path Mailbox Command Complete Queue */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0500 Failed allocate slow-path mailbox CQ\n");
@@ -8902,7 +8919,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create slow-path ELS Complete Queue */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0501 Failed allocate slow-path ELS CQ\n");
@@ -8921,7 +8938,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.mq_esize,
- phba->sli4_hba.mq_ecount);
+ phba->sli4_hba.mq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0505 Failed allocate slow-path MQ\n");
@@ -8937,7 +8954,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create slow-path ELS Work Queue */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
+ phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0504 Failed allocate slow-path ELS WQ\n");
@@ -8951,7 +8968,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create NVME LS Complete Queue */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6079 Failed allocate NVME LS CQ\n");
@@ -8964,7 +8981,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create NVME LS Work Queue */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
+ phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6080 Failed allocate NVME LS WQ\n");
@@ -8982,7 +8999,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
- phba->sli4_hba.rq_ecount);
+ phba->sli4_hba.rq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0506 Failed allocate receive HRQ\n");
@@ -8993,7 +9010,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
- phba->sli4_hba.rq_ecount);
+ phba->sli4_hba.rq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0507 Failed allocate receive DRQ\n");
@@ -9004,11 +9021,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ cpu = lpfc_find_cpu_handle(phba, idx,
+ LPFC_FIND_BY_HDWQ);
/* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba,
LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
- LPFC_NVMET_RQE_DEF_COUNT);
+ LPFC_NVMET_RQE_DEF_COUNT,
+ cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3146 Failed allocate "
@@ -9019,8 +9039,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
/* Only needed for header of RQ pair */
- qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
- GFP_KERNEL);
+ qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
+ GFP_KERNEL,
+ cpu_to_node(cpu));
if (qdesc->rqbp == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6131 Failed allocate "
@@ -9035,7 +9056,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
qdesc = lpfc_sli4_queue_alloc(phba,
LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
- LPFC_NVMET_RQE_DEF_COUNT);
+ LPFC_NVMET_RQE_DEF_COUNT,
+ cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3156 Failed allocate "
@@ -9134,6 +9156,20 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
void
lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{
+ /*
+ * Set FREE_INIT before beginning to free the queues.
+ * Wait until the users of queues to acknowledge to
+ * release queues by clearing FREE_WAIT.
+ */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
+ while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
+ spin_unlock_irq(&phba->hbalock);
+ msleep(20);
+ spin_lock_irq(&phba->hbalock);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
/* Release HBA eqs */
if (phba->sli4_hba.hdwq)
lpfc_sli4_release_hdwq(phba);
@@ -9172,6 +9208,11 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Everything on this list has been freed */
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
+
+ /* Done with freeing the queues */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
+ spin_unlock_irq(&phba->hbalock);
}
int
@@ -9231,7 +9272,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
rc = lpfc_wq_create(phba, wq, cq, qtype);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
+ "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
qidx, (uint32_t)rc);
/* no need to tear down cq - caller will do so */
return rc;
@@ -9271,7 +9312,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
* This routine will populate the cq_lookup table by all
* available CQ queue_id's.
**/
-void
+static void
lpfc_setup_cq_lookup(struct lpfc_hba *phba)
{
struct lpfc_queue *eq, *childq;
@@ -10740,7 +10781,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->cfg_irq_chann, vectors);
if (phba->cfg_irq_chann > vectors)
phba->cfg_irq_chann = vectors;
- if (phba->cfg_nvmet_mrq > vectors)
+ if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
phba->cfg_nvmet_mrq = vectors;
}
@@ -11297,7 +11338,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!phba->nvme_support) {
phba->nvme_support = 0;
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
+ phba->cfg_nvmet_mrq = 0;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
"6101 Disabling NVME support: "
"Not supported by firmware: %d %d\n",
@@ -13046,7 +13087,7 @@ lpfc_io_resume(struct pci_dev *pdev)
* is destroyed.
*
**/
-void
+static void
lpfc_sli4_oas_verify(struct lpfc_hba *phba)
{
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 6172682a24ba..59252bfca14e 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -360,6 +360,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
break;
+ /* fall through */
case NLP_STE_REG_LOGIN_ISSUE:
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
@@ -870,7 +871,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* This function will send a unreg_login mailbox command to the firmware
* to release a rpi.
**/
-void
+static void
lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, uint16_t rpi)
{
@@ -1732,7 +1733,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
MAILBOX_t *mb = &pmb->u.mb;
uint32_t did = mb->un.varWords[1];
- int rc = 0;
if (mb->mbxStatus) {
/* RegLogin failed */
@@ -1805,8 +1805,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* GFT_ID to determine if remote port supports NVME.
*/
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
- rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
- 0, ndlp->nlp_DID);
+ lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0,
+ ndlp->nlp_DID);
return ndlp->nlp_state;
}
ndlp->nlp_fc4_type = NLP_FC4_FCP;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 1aa00d2c3f74..9d99cb915390 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -229,7 +229,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
if (qhandle == NULL)
return -ENOMEM;
- qhandle->cpu_id = smp_processor_id();
+ qhandle->cpu_id = raw_smp_processor_id();
qhandle->qidx = qidx;
/*
* NVME qidx == 0 is the admin queue, so both admin queue
@@ -312,7 +312,7 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
* Return value :
* None
*/
-void
+static void
lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct lpfc_nvme_rport *rport = remoteport->private;
@@ -1106,13 +1106,16 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_ncmd, nCmd,
lpfc_ncmd->cur_iocbq.sli4_xritag,
bf_get(lpfc_wcqe_c_xb, wcqe));
+ /* fall through */
default:
out_err:
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6072 NVME Completion Error: xri %x "
- "status x%x result x%x placed x%x\n",
+ "status x%x result x%x [x%x] "
+ "placed x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->status, lpfc_ncmd->result,
+ wcqe->parameter,
wcqe->total_data_placed);
nCmd->transferred_length = 0;
nCmd->rcv_rsplen = 0;
@@ -1140,7 +1143,7 @@ out_err:
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
uint32_t cpu;
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
if (lpfc_ncmd->cpu != cpu)
lpfc_printf_vlog(vport,
@@ -1558,7 +1561,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
idx = lpfc_queue_info->index;
} else {
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
}
@@ -1638,7 +1641,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
lpfc_ncmd->cpu = cpu;
if (idx != cpu)
@@ -2080,15 +2083,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lpfc_nvme_template.max_hw_queues =
phba->sli4_hba.num_present_cpu;
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return ret;
+
/* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area.
*/
-#if (IS_ENABLED(CONFIG_NVME_FC))
+
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
&vport->phba->pcidev->dev, &localport);
-#else
- ret = -ENOMEM;
-#endif
if (!ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
"6005 Successfully registered local "
@@ -2123,6 +2126,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
return ret;
}
+#if (IS_ENABLED(CONFIG_NVME_FC))
/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
*
* The driver has to wait for the host nvme transport to callback
@@ -2133,12 +2137,11 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
* An uninterruptible wait is used because of the risk of transport-to-
* driver state mismatch.
*/
-void
+static void
lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
struct lpfc_nvme_lport *lport,
struct completion *lport_unreg_cmp)
{
-#if (IS_ENABLED(CONFIG_NVME_FC))
u32 wait_tmo;
int ret;
@@ -2161,8 +2164,8 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6177 Lport %p Localport %p Complete Success\n",
lport, vport->localport);
-#endif
}
+#endif
/**
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 361e2b103648..d74bfd264495 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -220,7 +220,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 12, 13, 14, 15 - is zero */
}
-void
+static void
lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
{
lockdep_assert_held(&ctxp->ctxlock);
@@ -325,7 +325,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
struct fc_frame_header *fc_hdr;
struct rqb_dmabuf *nvmebuf;
struct lpfc_nvmet_ctx_info *infop;
- uint32_t *payload;
uint32_t size, oxid, sid;
int cpu;
unsigned long iflag;
@@ -370,7 +369,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- payload = (uint32_t *)(nvmebuf->dbuf.virt);
size = nvmebuf->bytes_recv;
sid = sli4_sid_from_fc_hdr(fc_hdr);
@@ -435,7 +433,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
* Use the CPU context list, from the MRQ the IO was received on
* (ctxp->idx), to save context structure.
*/
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
@@ -765,7 +763,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- id = smp_processor_id();
+ id = raw_smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT) {
if (ctxp->cpu != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
@@ -906,7 +904,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- int id = smp_processor_id();
+ int id = raw_smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT) {
if (rsp->hwqid != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
@@ -1120,7 +1118,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
- ctxp->oxid, ctxp->size, smp_processor_id());
+ ctxp->oxid, ctxp->size, raw_smp_processor_id());
if (!nvmebuf) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
@@ -1596,7 +1594,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_nvmeio_data(phba,
"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
- xri, smp_processor_id(), 0);
+ xri, raw_smp_processor_id(), 0);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
@@ -1612,7 +1610,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
- xri, smp_processor_id(), 1);
+ xri, raw_smp_processor_id(), 1);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
@@ -1725,7 +1723,11 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
- wait_for_completion_timeout(&tport_unreg_cmp, 5);
+ if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
+ msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6179 Unreg targetport %p timeout "
+ "reached.\n", phba->targetport);
lpfc_nvmet_cleanup_io_context(phba);
}
phba->targetport = NULL;
@@ -1843,7 +1845,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
struct lpfc_hba *phba = ctxp->phba;
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
struct lpfc_nvmet_tgtport *tgtp;
- uint32_t *payload;
+ uint32_t *payload, qno;
uint32_t rc;
unsigned long iflags;
@@ -1876,6 +1878,15 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
/* Process FCP command */
if (rc == 0) {
atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ spin_lock_irqsave(&ctxp->ctxlock, iflags);
+ if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
+ (nvmebuf != ctxp->rqb_buffer)) {
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+ return;
+ }
+ ctxp->rqb_buffer = NULL;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return;
}
@@ -1886,6 +1897,20 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp->oxid, ctxp->size, ctxp->sid);
atomic_inc(&tgtp->rcv_fcp_cmd_out);
atomic_inc(&tgtp->defer_fod);
+ spin_lock_irqsave(&ctxp->ctxlock, iflags);
+ if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+ return;
+ }
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+ /*
+ * Post a replacement DMA buffer to RQ and defer
+ * freeing rcv buffer till .defer_rcv callback
+ */
+ qno = nvmebuf->idx;
+ lpfc_post_rq_buffer(
+ phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
+ phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
return;
}
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
@@ -1996,7 +2021,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_ctxbuf *ctx_buf;
struct lpfc_nvmet_ctx_info *current_infop;
- uint32_t *payload;
uint32_t size, oxid, sid, qno;
unsigned long iflag;
int current_cpu;
@@ -2020,7 +2044,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
* be empty, thus it would need to be replenished with the
* context list from another CPU for this MRQ.
*/
- current_cpu = smp_processor_id();
+ current_cpu = raw_smp_processor_id();
current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
if (current_infop->nvmet_ctx_list_cnt) {
@@ -2050,7 +2074,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
#endif
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
- oxid, size, smp_processor_id());
+ oxid, size, raw_smp_processor_id());
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -2074,7 +2098,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
return;
}
- payload = (uint32_t *)(nvmebuf->dbuf.virt);
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
@@ -2690,12 +2713,11 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
{
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
- uint32_t status, result;
+ uint32_t result;
unsigned long flags;
bool released = false;
ctxp = cmdwqe->context2;
- status = bf_get(lpfc_wcqe_c_status, wcqe);
result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -2761,11 +2783,10 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
unsigned long flags;
- uint32_t status, result;
+ uint32_t result;
bool released = false;
ctxp = cmdwqe->context2;
- status = bf_get(lpfc_wcqe_c_status, wcqe);
result = wcqe->parameter;
if (!ctxp) {
@@ -2842,10 +2863,9 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
{
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
- uint32_t status, result;
+ uint32_t result;
ctxp = cmdwqe->context2;
- status = bf_get(lpfc_wcqe_c_status, wcqe);
result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3200,7 +3220,6 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
{
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_iocbq *abts_wqeq;
- union lpfc_wqe128 *wqe_abts;
unsigned long flags;
int rc;
@@ -3230,7 +3249,6 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
}
}
abts_wqeq = ctxp->wqeq;
- wqe_abts = &abts_wqeq->wqe;
if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
rc = WQE_BUSY;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 368deea2bcf8..2f3f603d94c4 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -27,10 +27,11 @@
#define LPFC_NVMET_RQE_DEF_COUNT 2048
#define LPFC_NVMET_SUCCESS_LEN 12
-#define LPFC_NVMET_MRQ_OFF 0xffff
#define LPFC_NVMET_MRQ_AUTO 0
#define LPFC_NVMET_MRQ_MAX 16
+#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
+
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c98f264f1d83..ba996fbde89b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -688,7 +688,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint32_t sgl_size, cpu, idx;
int tag;
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
tag = blk_mq_unique_tag(cmnd->request);
idx = blk_mq_unique_tag_to_hwq(tag);
@@ -1137,7 +1137,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
break;
}
- /* Drop thru */
+ /* fall through */
case SCSI_PROT_WRITE_INSERT:
/*
* For WRITE_INSERT, force the error
@@ -1256,7 +1256,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
rc = BG_ERR_TGT | BG_ERR_CHECK;
break;
}
- /* Drop thru */
+ /* fall through */
case SCSI_PROT_WRITE_INSERT:
/*
* For WRITE_INSERT, force the
@@ -1338,7 +1338,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
switch (op) {
case SCSI_PROT_WRITE_PASS:
rc = BG_ERR_CHECK;
- /* Drop thru */
+ /* fall through */
case SCSI_PROT_WRITE_INSERT:
/*
@@ -3669,8 +3669,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
- cpu = smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT)
+ cpu = raw_smp_processor_id();
+ if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
}
#endif
@@ -3822,7 +3822,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->cur_iocbq.sli4_lxritag,
0, 0);
}
- /* else: fall through */
+ /* fall through */
default:
cmd->result = DID_ERROR << 16;
break;
@@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* wake up the thread.
*/
spin_lock(&lpfc_cmd->buf_lock);
- if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) {
- lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
- if (lpfc_cmd->waitq)
- wake_up(lpfc_cmd->waitq);
+ lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ if (lpfc_cmd->waitq) {
+ wake_up(lpfc_cmd->waitq);
lpfc_cmd->waitq = NULL;
}
spin_unlock(&lpfc_cmd->buf_lock);
@@ -4464,7 +4463,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq =
&phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
@@ -5049,7 +5048,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0798 Device Reset rport failure: rdata x%p\n",
+ "0798 Device Reset rdata failure: rdata x%p\n",
rdata);
return FAILED;
}
@@ -5118,9 +5117,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
int status;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
- if (!rdata) {
+ if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0799 Target Reset rport failure: rdata x%p\n", rdata);
+ "0799 Target Reset rdata failure: rdata x%p\n",
+ rdata);
return FAILED;
}
pnode = rdata->pnode;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 57b4a463b589..2acda188b0dc 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -87,9 +87,6 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_eqe *eqe);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
-static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *cmdiocb);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -151,7 +148,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
/* sanity check on queue memory */
if (unlikely(!q))
return -ENOMEM;
- temp_wqe = q->qe[q->host_index].wqe;
+ temp_wqe = lpfc_sli4_qe(q, q->host_index);
/* If the host has not yet processed the next entry then we are done */
idx = ((q->host_index + 1) % q->entry_count);
@@ -271,7 +268,7 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
/* sanity check on queue memory */
if (unlikely(!q))
return -ENOMEM;
- temp_mqe = q->qe[q->host_index].mqe;
+ temp_mqe = lpfc_sli4_qe(q, q->host_index);
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
@@ -331,7 +328,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
/* sanity check on queue memory */
if (unlikely(!q))
return NULL;
- eqe = q->qe[q->host_index].eqe;
+ eqe = lpfc_sli4_qe(q, q->host_index);
/* If the next EQE is not valid then we are done */
if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
@@ -355,7 +352,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
* @q: The Event Queue to disable interrupts
*
**/
-inline void
+void
lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
{
struct lpfc_register doorbell;
@@ -374,7 +371,7 @@ lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
* @q: The Event Queue to disable interrupts
*
**/
-inline void
+void
lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
{
struct lpfc_register doorbell;
@@ -545,7 +542,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
/* sanity check on queue memory */
if (unlikely(!q))
return NULL;
- cqe = q->qe[q->host_index].cqe;
+ cqe = lpfc_sli4_qe(q, q->host_index);
/* If the next CQE is not valid then we are done */
if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
@@ -667,8 +664,8 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
return -ENOMEM;
hq_put_index = hq->host_index;
dq_put_index = dq->host_index;
- temp_hrqe = hq->qe[hq_put_index].rqe;
- temp_drqe = dq->qe[dq_put_index].rqe;
+ temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
+ temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
@@ -907,10 +904,10 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
mod_timer(&phba->rrq_tmr, next_time);
list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
list_del(&rrq->list);
- if (!rrq->send_rrq)
+ if (!rrq->send_rrq) {
/* this call will free the rrq */
- lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- else if (lpfc_send_rrq(phba, rrq)) {
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ } else if (lpfc_send_rrq(phba, rrq)) {
/* if we send the rrq then the completion handler
* will clear the bit in the xribitmap.
*/
@@ -2502,8 +2499,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
+ pmb->ctx_ndlp = NULL;
}
- pmb->ctx_ndlp = NULL;
}
/* Check security permission status on INIT_LINK mailbox command */
@@ -3922,33 +3919,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
}
/**
- * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- *
- * This function aborts all iocbs in the given ring and frees all the iocb
- * objects in txq. This function issues an abort iocb for all the iocb commands
- * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
- * the return of this function. The caller is not required to hold any locks.
- **/
-void
-lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
-{
- LIST_HEAD(completions);
- struct lpfc_iocbq *iocb, *next_iocb;
-
- if (pring->ringno == LPFC_ELS_RING)
- lpfc_fabric_abort_hba(phba);
-
- spin_lock_irq(&phba->hbalock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli4_abort_nvme_io(phba, pring, iocb);
- spin_unlock_irq(&phba->hbalock);
-}
-
-
-/**
* lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
@@ -3978,33 +3948,6 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
- * @phba: Pointer to HBA context object.
- *
- * This function aborts all wqes in NVME rings. This function issues an
- * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
- * the txcmplq is not guaranteed to complete before the return of this
- * function. The caller is not required to hold any locks.
- **/
-void
-lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
-{
- struct lpfc_sli_ring *pring;
- uint32_t i;
-
- if ((phba->sli_rev < LPFC_SLI_REV4) ||
- !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
- return;
-
- /* Abort all IO on each NVME ring. */
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
- lpfc_sli_abort_wqe_ring(phba, pring);
- }
-}
-
-
-/**
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
* @phba: Pointer to HBA context object.
*
@@ -4487,7 +4430,9 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
}
/* Turn off parity checking and serr during the physical reset */
- pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+ if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
+ return -EIO;
+
pci_write_config_word(phba->pcidev, PCI_COMMAND,
(cfg_value &
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
@@ -4564,7 +4509,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
"0389 Performing PCI function reset!\n");
/* Turn off parity checking and serr during the physical reset */
- pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+ if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3205 PCI read Config failed\n");
+ return -EIO;
+ }
+
pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
@@ -5395,7 +5345,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
}
/**
- * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
* @phba: pointer to lpfc hba data structure.
*
* This routine retrieves SLI4 device physical port name this PCI function
@@ -5403,40 +5353,30 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
*
* Return codes
* 0 - successful
- * otherwise - failed to retrieve physical port name
+ * otherwise - failed to retrieve controller attributes
**/
static int
-lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *mboxq;
struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
struct lpfc_controller_attribute *cntl_attr;
- struct lpfc_mbx_get_port_name *get_port_name;
void *virtaddr = NULL;
uint32_t alloclen, reqlen;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
- char cport_name = 0;
int rc;
- /* We assume nothing at this point */
- phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
- phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
-
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
- /* obtain link type and link number via READ_CONFIG */
- phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
- lpfc_sli4_read_config(phba);
- if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
- goto retrieve_ppname;
- /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+ /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
LPFC_SLI4_MBX_NEMBED);
+
if (alloclen < reqlen) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3084 Allocated DMA memory size (%d) is "
@@ -5462,16 +5402,71 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
rc = -ENXIO;
goto out_free_mboxq;
}
+
cntl_attr = &mbx_cntl_attr->cntl_attr;
phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
phba->sli4_hba.lnk_info.lnk_tp =
bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
phba->sli4_hba.lnk_info.lnk_no =
bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+
+ memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
+ strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
+ sizeof(phba->BIOSVersion));
+
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3086 lnk_type:%d, lnk_numb:%d\n",
+ "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
phba->sli4_hba.lnk_info.lnk_tp,
- phba->sli4_hba.lnk_info.lnk_no);
+ phba->sli4_hba.lnk_info.lnk_no,
+ phba->BIOSVersion);
+out_free_mboxq:
+ if (rc != MBX_TIMEOUT) {
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ else
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine retrieves SLI4 device physical port name this PCI function
+ * is attached to.
+ *
+ * Return codes
+ * 0 - successful
+ * otherwise - failed to retrieve physical port name
+ **/
+static int
+lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_get_port_name *get_port_name;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ char cport_name = 0;
+ int rc;
+
+ /* We assume nothing at this point */
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+ /* obtain link type and link number via READ_CONFIG */
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+ lpfc_sli4_read_config(phba);
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+ goto retrieve_ppname;
+
+ /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+ rc = lpfc_sli4_get_ctl_attr(phba);
+ if (rc)
+ goto out_free_mboxq;
retrieve_ppname:
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
@@ -7047,7 +7042,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
*
* Returns: 0 = success, non-zero failure.
**/
-int
+static int
lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
{
LIST_HEAD(post_nblist);
@@ -7067,7 +7062,7 @@ lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
return rc;
}
-void
+static void
lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
uint32_t len;
@@ -7250,6 +7245,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"3080 Successful retrieving SLI4 device "
"physical port name: %s.\n", phba->Port);
+ rc = lpfc_sli4_get_ctl_attr(phba);
+ if (!rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "8351 Successful retrieving SLI4 device "
+ "CTL ATTR\n");
+
/*
* Evaluate the read rev and vpd data. Populate the driver
* state with the results. If this routine fails, the failure
@@ -7652,12 +7653,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->cfg_xri_rebalancing = 0;
}
- /* Arm the CQs and then EQs on device */
- lpfc_sli4_arm_cqeq_intr(phba);
-
- /* Indicate device interrupt mode */
- phba->sli4_hba.intr_enable = 1;
-
/* Allow asynchronous mailbox command to go through */
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
@@ -7726,6 +7721,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->trunk_link.link3.state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
+ /* Arm the CQs and then EQs on device */
+ lpfc_sli4_arm_cqeq_intr(phba);
+
+ /* Indicate device interrupt mode */
+ phba->sli4_hba.intr_enable = 1;
+
if (!(phba->hba_flag & HBA_FCOE_MODE) &&
(phba->hba_flag & LINK_DISABLED)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
@@ -7820,8 +7821,9 @@ lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
mcq = phba->sli4_hba.mbx_cq;
idx = mcq->hba_index;
qe_valid = mcq->qe_valid;
- while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
- mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+ while (bf_get_le32(lpfc_cqe_valid,
+ (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
+ mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
(!bf_get_le32(lpfc_trailer_async, mcqe))) {
pending_completions = true;
@@ -8500,7 +8502,7 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
if (!db_ready)
- msleep(2);
+ mdelay(2);
if (time_after(jiffies, timeout))
return MBXERR_ERROR;
@@ -11264,102 +11266,6 @@ abort_iotag_exit:
}
/**
- * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- * @cmdiocb: Pointer to driver command iocb object.
- *
- * This function issues an abort iocb for the provided command iocb down to
- * the port. Other than the case the outstanding command iocb is an abort
- * request, this function issues abort out unconditionally. This function is
- * called with hbalock held. The function returns 0 when it fails due to
- * memory allocation failure or when the command iocb is an abort request.
- **/
-static int
-lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *cmdiocb)
-{
- struct lpfc_vport *vport = cmdiocb->vport;
- struct lpfc_iocbq *abtsiocbp;
- union lpfc_wqe128 *abts_wqe;
- int retval;
- int idx = cmdiocb->hba_wqidx;
-
- /*
- * There are certain command types we don't want to abort. And we
- * don't want to abort commands that are already in the process of
- * being aborted.
- */
- if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
- (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
- return 0;
-
- /* issue ABTS for this io based on iotag */
- abtsiocbp = __lpfc_sli_get_iocbq(phba);
- if (abtsiocbp == NULL)
- return 0;
-
- /* This signals the response to set the correct status
- * before calling the completion handler
- */
- cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
-
- /* Complete prepping the abort wqe and issue to the FW. */
- abts_wqe = &abtsiocbp->wqe;
-
- /* Clear any stale WQE contents */
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
- cmdiocb->iocb.ulpClass);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abtsiocbp->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbp->iocb_flag |= LPFC_IO_NVME;
- abtsiocbp->vport = vport;
- abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
- retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx],
- abtsiocbp);
- if (retval) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
- "6147 Failed abts issue_wqe with status x%x "
- "for oxid x%x\n",
- retval, cmdiocb->sli4_xritag);
- lpfc_sli_release_iocbq(phba, abtsiocbp);
- return retval;
- }
-
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
- "6148 Drv Abort NVME Request Issued for "
- "ox_id x%x on reqtag x%x\n",
- cmdiocb->sli4_xritag,
- abtsiocbp->iotag);
-
- return retval;
-}
-
-/**
* lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
* @phba: pointer to lpfc HBA data structure.
*
@@ -13636,7 +13542,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0390 Cannot schedule soft IRQ "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
- cqid, cq->queue_id, smp_processor_id());
+ cqid, cq->queue_id, raw_smp_processor_id());
}
/**
@@ -14019,7 +13925,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
return false;
}
drop:
- lpfc_in_buf_free(phba, &dma_buf->dbuf);
+ lpfc_rq_buf_free(phba, &dma_buf->hbuf);
break;
case FC_STATUS_INSUFF_BUF_FRM_DISC:
if (phba->nvmet_support) {
@@ -14185,7 +14091,7 @@ work_cq:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0363 Cannot schedule soft IRQ "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
- cqid, cq->queue_id, smp_processor_id());
+ cqid, cq->queue_id, raw_smp_processor_id());
}
/**
@@ -14324,7 +14230,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
eqi = phba->sli4_hba.eq_info;
icnt = this_cpu_inc_return(eqi->icnt);
- fpeq->last_cpu = smp_processor_id();
+ fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER &&
phba->cfg_irq_chann == 1 &&
@@ -14410,6 +14316,9 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
if (!queue)
return;
+ if (!list_empty(&queue->wq_list))
+ list_del(&queue->wq_list);
+
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
@@ -14425,9 +14334,6 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
if (!list_empty(&queue->cpu_list))
list_del(&queue->cpu_list);
- if (!list_empty(&queue->wq_list))
- list_del(&queue->wq_list);
-
kfree(queue);
return;
}
@@ -14438,6 +14344,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
* @page_size: The size of a queue page
* @entry_size: The size of each queue entry for this queue.
* @entry count: The number of entries that this queue will handle.
+ * @cpu: The cpu that will primarily utilize this queue.
*
* This function allocates a queue structure and the DMAable memory used for
* the host resident queue. This function must be called before creating the
@@ -14445,28 +14352,26 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
**/
struct lpfc_queue *
lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
- uint32_t entry_size, uint32_t entry_count)
+ uint32_t entry_size, uint32_t entry_count, int cpu)
{
struct lpfc_queue *queue;
struct lpfc_dmabuf *dmabuf;
- int x, total_qe_count;
- void *dma_pointer;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ uint16_t x, pgcnt;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = page_size;
- queue = kzalloc(sizeof(struct lpfc_queue) +
- (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
- if (!queue)
- return NULL;
- queue->page_count = (ALIGN(entry_size * entry_count,
- hw_page_size))/hw_page_size;
+ pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
/* If needed, Adjust page count to match the max the adapter supports */
- if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
- (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
- queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
+ if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
+ pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
+
+ queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!queue)
+ return NULL;
INIT_LIST_HEAD(&queue->list);
INIT_LIST_HEAD(&queue->wq_list);
@@ -14478,13 +14383,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
/* Set queue parameters now. If the system cannot provide memory
* resources, the free routine needs to know what was allocated.
*/
+ queue->page_count = pgcnt;
+ queue->q_pgs = (void **)&queue[1];
+ queue->entry_cnt_per_pg = hw_page_size / entry_size;
queue->entry_size = entry_size;
queue->entry_count = entry_count;
queue->page_size = hw_page_size;
queue->phba = phba;
- for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ for (x = 0; x < queue->page_count; x++) {
+ dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
+ dev_to_node(&phba->pcidev->dev));
if (!dmabuf)
goto out_fail;
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
@@ -14496,13 +14405,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
}
dmabuf->buffer_tag = x;
list_add_tail(&dmabuf->list, &queue->page_list);
- /* initialize queue's entry array */
- dma_pointer = dmabuf->virt;
- for (; total_qe_count < entry_count &&
- dma_pointer < (hw_page_size + dmabuf->virt);
- total_qe_count++, dma_pointer += entry_size) {
- queue->qe[total_qe_count].address = dma_pointer;
- }
+ /* use lpfc_sli4_qe to index a paritcular entry in this page */
+ queue->q_pgs[x] = dmabuf->virt;
}
INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7a1a761efdd6..467b8270f7fd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -327,6 +327,10 @@ struct lpfc_sli {
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
+#define LPFC_QUEUE_FREE_INIT 0x10000 /* Queue freeing is in progress */
+#define LPFC_QUEUE_FREE_WAIT 0x20000 /* Hold Queue free as it is being
+ * used outside worker thread
+ */
struct lpfc_sli_ring *sli3_ring;
@@ -427,14 +431,13 @@ struct lpfc_io_buf {
struct {
struct nvmefc_fcp_req *nvmeCmd;
uint16_t qidx;
-
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint64_t ts_cmd_start;
- uint64_t ts_last_cmd;
- uint64_t ts_cmd_wqput;
- uint64_t ts_isr_cmpl;
- uint64_t ts_data_nvme;
-#endif
};
};
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint64_t ts_cmd_start;
+ uint64_t ts_last_cmd;
+ uint64_t ts_cmd_wqput;
+ uint64_t ts_isr_cmpl;
+ uint64_t ts_data_nvme;
+#endif
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 40c85091c805..8e4fd1a98023 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -117,21 +117,6 @@ enum lpfc_sli4_queue_subtype {
LPFC_USOL
};
-union sli4_qe {
- void *address;
- struct lpfc_eqe *eqe;
- struct lpfc_cqe *cqe;
- struct lpfc_mcqe *mcqe;
- struct lpfc_wcqe_complete *wcqe_complete;
- struct lpfc_wcqe_release *wcqe_release;
- struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
- struct lpfc_rcqe_complete *rcqe_complete;
- struct lpfc_mqe *mqe;
- union lpfc_wqe *wqe;
- union lpfc_wqe128 *wqe128;
- struct lpfc_rqe *rqe;
-};
-
/* RQ buffer list */
struct lpfc_rqb {
uint16_t entry_count; /* Current number of RQ slots */
@@ -157,6 +142,7 @@ struct lpfc_queue {
struct list_head cpu_list;
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
+ uint32_t entry_cnt_per_pg;
uint32_t notify_interval; /* Queue Notification Interval
* For chip->host queues (EQ, CQ, RQ):
* specifies the interval (number of
@@ -254,17 +240,17 @@ struct lpfc_queue {
uint16_t last_cpu; /* most recent cpu */
uint8_t qe_valid;
struct lpfc_queue *assoc_qp;
- union sli4_qe qe[1]; /* array to index entries (must be last) */
+ void **q_pgs; /* array to index entries per page */
};
struct lpfc_sli4_link {
- uint16_t speed;
+ uint32_t speed;
uint8_t duplex;
uint8_t status;
uint8_t type;
uint8_t number;
uint8_t fault;
- uint16_t logical_speed;
+ uint32_t logical_speed;
uint16_t topology;
};
@@ -543,8 +529,9 @@ struct lpfc_sli4_lnk_info {
#define LPFC_LNK_DAT_INVAL 0
#define LPFC_LNK_DAT_VAL 1
uint8_t lnk_tp;
-#define LPFC_LNK_GE 0x0 /* FCoE */
-#define LPFC_LNK_FC 0x1 /* FC */
+#define LPFC_LNK_GE 0x0 /* FCoE */
+#define LPFC_LNK_FC 0x1 /* FC */
+#define LPFC_LNK_FC_TRUNKED 0x2 /* FC_Trunked */
uint8_t lnk_no;
uint8_t optic_state;
};
@@ -907,6 +894,18 @@ struct lpfc_sli4_hba {
#define lpfc_conf_trunk_port3_WORD conf_trunk
#define lpfc_conf_trunk_port3_SHIFT 3
#define lpfc_conf_trunk_port3_MASK 0x1
+#define lpfc_conf_trunk_port0_nd_WORD conf_trunk
+#define lpfc_conf_trunk_port0_nd_SHIFT 4
+#define lpfc_conf_trunk_port0_nd_MASK 0x1
+#define lpfc_conf_trunk_port1_nd_WORD conf_trunk
+#define lpfc_conf_trunk_port1_nd_SHIFT 5
+#define lpfc_conf_trunk_port1_nd_MASK 0x1
+#define lpfc_conf_trunk_port2_nd_WORD conf_trunk
+#define lpfc_conf_trunk_port2_nd_SHIFT 6
+#define lpfc_conf_trunk_port2_nd_MASK 0x1
+#define lpfc_conf_trunk_port3_nd_WORD conf_trunk
+#define lpfc_conf_trunk_port3_nd_SHIFT 7
+#define lpfc_conf_trunk_port3_nd_MASK 0x1
};
enum lpfc_sge_type {
@@ -990,8 +989,10 @@ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
uint16_t);
void lpfc_sli4_hba_reset(struct lpfc_hba *);
-struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
- uint32_t, uint32_t);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *phba,
+ uint32_t page_size,
+ uint32_t entry_size,
+ uint32_t entry_count, int cpu);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
@@ -1057,12 +1058,12 @@ void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
int lpfc_sli4_init_vpi(struct lpfc_vport *);
-inline void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
+void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
uint32_t count, bool arm);
void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
uint32_t count, bool arm);
-inline void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
+void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
uint32_t count, bool arm);
void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
@@ -1079,3 +1080,8 @@ int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
+static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
+{
+ return q->q_pgs[idx / q->entry_cnt_per_pg] +
+ (q->entry_size * (idx % q->entry_cnt_per_pg));
+}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 43fd693cf042..f7d9ef428417 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.2.0.0"
+#define LPFC_DRIVER_VERSION "12.2.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2018 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2019 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 293f5cf524d7..473a120eb75d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -815,7 +815,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
&(regs)->inbound_high_queue_port);
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
- mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -2725,7 +2724,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
do {
if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
dev_info(&instance->pdev->dev,
- "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
+ "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
if (i == 3)
goto kill_hba_and_failed;
@@ -4648,7 +4647,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
* Return: 0 if DCMD succeeded
* non-zero if failed
*/
-int
+static int
megasas_host_device_list_query(struct megasas_instance *instance,
bool is_probe)
{
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 1d17128030cd..6129399c1942 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -242,7 +242,6 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
&instance->reg_set->inbound_low_queue_port);
writel(le32_to_cpu(req_desc->u.high),
&instance->reg_set->inbound_high_queue_port);
- mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
}
@@ -4419,7 +4418,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
if (!smid) {
ret = SUCCESS;
scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
- " issued is not found in oustanding commands\n");
+ " issued is not found in outstanding commands\n");
mutex_unlock(&instance->reset_mutex);
goto out;
}
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index b736dbc80485..a072187875df 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -45,6 +45,7 @@ config SCSI_MPT3SAS
depends on PCI && SCSI
select SCSI_SAS_ATTRS
select RAID_ATTRS
+ select IRQ_POLL
---help---
This driver supports PCI-Express SAS 12Gb/s Host Adapters.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 1d8c584ec1e9..8aacbd1e7db2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -94,6 +94,11 @@ module_param(max_msix_vectors, int, 0);
MODULE_PARM_DESC(max_msix_vectors,
" max msix vectors");
+static int irqpoll_weight = -1;
+module_param(irqpoll_weight, int, 0);
+MODULE_PARM_DESC(irqpoll_weight,
+ "irq poll weight (default= one fourth of HBA queue depth)");
+
static int mpt3sas_fwfault_debug;
MODULE_PARM_DESC(mpt3sas_fwfault_debug,
" enable detection of firmware fault and halt firmware - (default=0)");
@@ -1382,20 +1387,30 @@ union reply_descriptor {
} u;
};
+static u32 base_mod64(u64 dividend, u32 divisor)
+{
+ u32 remainder;
+
+ if (!divisor)
+ pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
+ remainder = do_div(dividend, divisor);
+ return remainder;
+}
+
/**
- * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
- * @irq: irq number (not used)
- * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ * _base_process_reply_queue - Process reply descriptors from reply
+ * descriptor post queue.
+ * @reply_q: per IRQ's reply queue object.
*
- * Return: IRQ_HANDLED if processed, else IRQ_NONE.
+ * Return: number of reply descriptors processed from reply
+ * descriptor queue.
*/
-static irqreturn_t
-_base_interrupt(int irq, void *bus_id)
+static int
+_base_process_reply_queue(struct adapter_reply_queue *reply_q)
{
- struct adapter_reply_queue *reply_q = bus_id;
union reply_descriptor rd;
- u32 completed_cmds;
- u8 request_desript_type;
+ u64 completed_cmds;
+ u8 request_descript_type;
u16 smid;
u8 cb_idx;
u32 reply;
@@ -1404,21 +1419,18 @@ _base_interrupt(int irq, void *bus_id)
Mpi2ReplyDescriptorsUnion_t *rpf;
u8 rc;
- if (ioc->mask_interrupts)
- return IRQ_NONE;
-
+ completed_cmds = 0;
if (!atomic_add_unless(&reply_q->busy, 1, 1))
- return IRQ_NONE;
+ return completed_cmds;
rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
- request_desript_type = rpf->Default.ReplyFlags
+ request_descript_type = rpf->Default.ReplyFlags
& MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
- if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+ if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
atomic_dec(&reply_q->busy);
- return IRQ_NONE;
+ return completed_cmds;
}
- completed_cmds = 0;
cb_idx = 0xFF;
do {
rd.word = le64_to_cpu(rpf->Words);
@@ -1426,11 +1438,11 @@ _base_interrupt(int irq, void *bus_id)
goto out;
reply = 0;
smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
- if (request_desript_type ==
+ if (request_descript_type ==
MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
- request_desript_type ==
+ request_descript_type ==
MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
- request_desript_type ==
+ request_descript_type ==
MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
cb_idx = _base_get_cb_idx(ioc, smid);
if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
@@ -1440,7 +1452,7 @@ _base_interrupt(int irq, void *bus_id)
if (rc)
mpt3sas_base_free_smid(ioc, smid);
}
- } else if (request_desript_type ==
+ } else if (request_descript_type ==
MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
reply = le32_to_cpu(
rpf->AddressReply.ReplyFrameAddress);
@@ -1486,7 +1498,7 @@ _base_interrupt(int irq, void *bus_id)
(reply_q->reply_post_host_index ==
(ioc->reply_post_queue_depth - 1)) ? 0 :
reply_q->reply_post_host_index + 1;
- request_desript_type =
+ request_descript_type =
reply_q->reply_post_free[reply_q->reply_post_host_index].
Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
completed_cmds++;
@@ -1495,7 +1507,7 @@ _base_interrupt(int irq, void *bus_id)
* So that FW can find enough entries to post the Reply
* Descriptors in the reply descriptor post queue.
*/
- if (completed_cmds > ioc->hba_queue_depth/3) {
+ if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
if (ioc->combined_reply_queue) {
writel(reply_q->reply_post_host_index |
((msix_index & 7) <<
@@ -1507,9 +1519,14 @@ _base_interrupt(int irq, void *bus_id)
MPI2_RPHI_MSIX_INDEX_SHIFT),
&ioc->chip->ReplyPostHostIndex);
}
- completed_cmds = 1;
+ if (!reply_q->irq_poll_scheduled) {
+ reply_q->irq_poll_scheduled = true;
+ irq_poll_sched(&reply_q->irqpoll);
+ }
+ atomic_dec(&reply_q->busy);
+ return completed_cmds;
}
- if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
goto out;
if (!reply_q->reply_post_host_index)
rpf = reply_q->reply_post_free;
@@ -1521,14 +1538,14 @@ _base_interrupt(int irq, void *bus_id)
if (!completed_cmds) {
atomic_dec(&reply_q->busy);
- return IRQ_NONE;
+ return completed_cmds;
}
if (ioc->is_warpdrive) {
writel(reply_q->reply_post_host_index,
ioc->reply_post_host_index[msix_index]);
atomic_dec(&reply_q->busy);
- return IRQ_HANDLED;
+ return completed_cmds;
}
/* Update Reply Post Host Index.
@@ -1555,7 +1572,82 @@ _base_interrupt(int irq, void *bus_id)
MPI2_RPHI_MSIX_INDEX_SHIFT),
&ioc->chip->ReplyPostHostIndex);
atomic_dec(&reply_q->busy);
- return IRQ_HANDLED;
+ return completed_cmds;
+}
+
+/**
+ * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ *
+ * Return: IRQ_HANDLED if processed, else IRQ_NONE.
+ */
+static irqreturn_t
+_base_interrupt(int irq, void *bus_id)
+{
+ struct adapter_reply_queue *reply_q = bus_id;
+ struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
+
+ if (ioc->mask_interrupts)
+ return IRQ_NONE;
+ if (reply_q->irq_poll_scheduled)
+ return IRQ_HANDLED;
+ return ((_base_process_reply_queue(reply_q) > 0) ?
+ IRQ_HANDLED : IRQ_NONE);
+}
+
+/**
+ * _base_irqpoll - IRQ poll callback handler
+ * @irqpoll - irq_poll object
+ * @budget - irq poll weight
+ *
+ * returns number of reply descriptors processed
+ */
+static int
+_base_irqpoll(struct irq_poll *irqpoll, int budget)
+{
+ struct adapter_reply_queue *reply_q;
+ int num_entries = 0;
+
+ reply_q = container_of(irqpoll, struct adapter_reply_queue,
+ irqpoll);
+ if (reply_q->irq_line_enable) {
+ disable_irq(reply_q->os_irq);
+ reply_q->irq_line_enable = false;
+ }
+ num_entries = _base_process_reply_queue(reply_q);
+ if (num_entries < budget) {
+ irq_poll_complete(irqpoll);
+ reply_q->irq_poll_scheduled = false;
+ reply_q->irq_line_enable = true;
+ enable_irq(reply_q->os_irq);
+ }
+
+ return num_entries;
+}
+
+/**
+ * _base_init_irqpolls - initliaze IRQ polls
+ * @ioc: per adapter object
+ *
+ * returns nothing
+ */
+static void
+_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q, *next;
+
+ if (list_empty(&ioc->reply_queue_list))
+ return;
+
+ list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ irq_poll_init(&reply_q->irqpoll,
+ ioc->hba_queue_depth/4, _base_irqpoll);
+ reply_q->irq_poll_scheduled = false;
+ reply_q->irq_line_enable = true;
+ reply_q->os_irq = pci_irq_vector(ioc->pdev,
+ reply_q->msix_index);
+ }
}
/**
@@ -1596,6 +1688,17 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
/* TMs are on msix_index == 0 */
if (reply_q->msix_index == 0)
continue;
+ if (reply_q->irq_poll_scheduled) {
+ /* Calling irq_poll_disable will wait for any pending
+ * callbacks to have completed.
+ */
+ irq_poll_disable(&reply_q->irqpoll);
+ irq_poll_enable(&reply_q->irqpoll);
+ reply_q->irq_poll_scheduled = false;
+ reply_q->irq_line_enable = true;
+ enable_irq(reply_q->os_irq);
+ continue;
+ }
synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
}
}
@@ -2757,6 +2860,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
if (!_base_is_controller_msix_enabled(ioc))
return;
+ ioc->msix_load_balance = false;
+ if (ioc->reply_queue_count < num_online_cpus()) {
+ ioc->msix_load_balance = true;
+ return;
+ }
memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
@@ -3015,6 +3123,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_fail;
+ if (!ioc->is_driver_loading)
+ _base_init_irqpolls(ioc);
/* Use the Combined reply queue feature only for SAS3 C0 & higher
* revision HBAs and also only when reply queue count is greater than 8
*/
@@ -3158,6 +3268,12 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
static inline u8
_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
{
+ /* Enables reply_queue load balancing */
+ if (ioc->msix_load_balance)
+ return ioc->reply_queue_count ?
+ base_mod64(atomic64_add_return(1,
+ &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
+
return ioc->cpu_msix_table[raw_smp_processor_id()];
}
@@ -3333,7 +3449,6 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
spin_lock_irqsave(writeq_lock, flags);
__raw_writel((u32)(b), addr);
__raw_writel((u32)(b >> 32), (addr + 4));
- mmiowb();
spin_unlock_irqrestore(writeq_lock, flags);
}
@@ -6507,6 +6622,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_free_resources;
+ if (irqpoll_weight > 0)
+ ioc->thresh_hold = irqpoll_weight;
+ else
+ ioc->thresh_hold = ioc->hba_queue_depth/4;
+
+ _base_init_irqpolls(ioc);
init_waitqueue_head(&ioc->reset_wq);
/* allocate memory pd handle bitmask list */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 19158cb59101..480219f0efc5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -67,6 +67,7 @@
#include <scsi/scsi_eh.h>
#include <linux/pci.h>
#include <linux/poll.h>
+#include <linux/irq_poll.h>
#include "mpt3sas_debug.h"
#include "mpt3sas_trigger_diag.h"
@@ -75,9 +76,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "27.102.00.00"
-#define MPT3SAS_MAJOR_VERSION 27
-#define MPT3SAS_MINOR_VERSION 102
+#define MPT3SAS_DRIVER_VERSION "28.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 28
+#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -882,6 +883,9 @@ struct _event_ack_list {
* @reply_post_free: reply post base virt address
* @name: the name registered to request_irq()
* @busy: isr is actively processing replies on another cpu
+ * @os_irq: irq number
+ * @irqpoll: irq_poll object
+ * @irq_poll_scheduled: Tells whether irq poll is scheduled or not
* @list: this list
*/
struct adapter_reply_queue {
@@ -891,6 +895,10 @@ struct adapter_reply_queue {
Mpi2ReplyDescriptorsUnion_t *reply_post_free;
char name[MPT_NAME_LENGTH];
atomic_t busy;
+ u32 os_irq;
+ struct irq_poll irqpoll;
+ bool irq_poll_scheduled;
+ bool irq_line_enable;
struct list_head list;
};
@@ -1016,7 +1024,12 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @msix_vector_count: number msix vectors
* @cpu_msix_table: table for mapping cpus to msix index
* @cpu_msix_table_sz: table size
+ * @total_io_cnt: Gives total IO count, used to load balance the interrupts
+ * @msix_load_balance: Enables load balancing of interrupts across
+ * the multiple MSIXs
* @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
+ * @thresh_hold: Max number of reply descriptors processed
+ * before updating Host Index
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -1192,6 +1205,9 @@ struct MPT3SAS_ADAPTER {
u32 ioc_reset_count;
MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
u32 non_operational_loop;
+ atomic64_t total_io_cnt;
+ bool msix_load_balance;
+ u16 thresh_hold;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index b757d389e32f..2c699bc785a8 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -678,7 +678,8 @@ static u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
{
void __iomem *regs = mvi->regs_ex;
- iow32(SPI_DATA_REG_64XX, data);
+
+ iow32(SPI_DATA_REG_64XX, data);
}
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index eb5471bc7263..68b5b5f39a03 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -946,7 +946,8 @@ static u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
static void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
- mw32(SPI_RD_DATA_REG_94XX, data);
+
+ mw32(SPI_RD_DATA_REG_94XX, data);
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 311d23c727ce..e933c65d8e0b 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1422,7 +1422,7 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
{
unsigned long flags;
int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
if (mvi_dev->dev_status != MVS_DEV_EH)
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 3df02691a092..a5410615edac 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -752,7 +752,7 @@ static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
spin_lock_irqsave(mhba->shost->host_lock, flags);
atomic_dec(&cmd->sync_cmd);
if (mhba->tag_cmd[cmd->frame->tag]) {
- mhba->tag_cmd[cmd->frame->tag] = 0;
+ mhba->tag_cmd[cmd->frame->tag] = NULL;
dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
cmd->frame->tag);
tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
@@ -1794,7 +1794,7 @@ static void mvumi_handle_clob(struct mvumi_hba *mhba)
cmd = mhba->tag_cmd[ob_frame->tag];
atomic_dec(&mhba->fw_outstanding);
- mhba->tag_cmd[ob_frame->tag] = 0;
+ mhba->tag_cmd[ob_frame->tag] = NULL;
tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
if (cmd->scmd)
mvumi_complete_cmd(mhba, cmd, ob_frame);
@@ -2139,7 +2139,7 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
spin_lock_irqsave(mhba->shost->host_lock, flags);
if (mhba->tag_cmd[cmd->frame->tag]) {
- mhba->tag_cmd[cmd->frame->tag] = 0;
+ mhba->tag_cmd[cmd->frame->tag] = NULL;
tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
}
if (!list_empty(&cmd->queue_pointer))
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index be3c73ebbfde..4bad54463eb2 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -216,12 +216,14 @@ static void osst_analyze_sense(struct osst_request *SRpnt, struct st_cmdstatus *
switch (sense[0] & 0x7f) {
case 0x71:
s->deferred = 1;
+ /* fall through */
case 0x70:
s->fixed_format = 1;
s->flags = sense[2] & 0xe0;
break;
case 0x73:
s->deferred = 1;
+ /* fall through */
case 0x72:
s->fixed_format = 0;
ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
@@ -591,6 +593,7 @@ static void osst_init_aux(struct osst_tape * STp, int frame_type, int frame_seq_
dat->dat_list[0].flags = frame_type==OS_FRAME_TYPE_MARKER?
OS_DAT_FLAGS_MARK:OS_DAT_FLAGS_DATA;
dat->dat_list[0].reserved = 0;
+ /* fall through */
case OS_FRAME_TYPE_EOD:
aux->update_frame_cntr = htonl(0);
par->partition_num = OS_DATA_PARTITION;
@@ -4086,6 +4089,7 @@ static int osst_int_ioctl(struct osst_tape * STp, struct osst_request ** aSRpnt,
switch (cmd_in) {
case MTFSFM:
chg_eof = 0; /* Changed from the FSF after this */
+ /* fall through */
case MTFSF:
if (STp->raw)
return (-EIO);
@@ -4101,6 +4105,7 @@ static int osst_int_ioctl(struct osst_tape * STp, struct osst_request ** aSRpnt,
case MTBSF:
chg_eof = 0; /* Changed from the FSF after this */
+ /* fall through */
case MTBSFM:
if (STp->raw)
return (-EIO);
@@ -4312,6 +4317,7 @@ static int osst_int_ioctl(struct osst_tape * STp, struct osst_request ** aSRpnt,
name, STp->block_size);
return 0;
}
+ /* fall through */
case MTSETDENSITY: /* Set tape density */
case MTSETDRVBUFFER: /* Set drive buffering */
case SET_DENS_AND_BLK: /* Set density and block size */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index d0bb357034d8..109effd3557d 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -960,9 +960,9 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GPIO Output Control Register:"
- " = 0x%x\n", regVal));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GPIO Output Control Register:"
+ " = 0x%x\n", regVal));
/* set GPIO-0 output control to tri-state */
regVal &= 0xFFFFFFFC;
pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
@@ -1204,6 +1204,7 @@ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
}
}
+#ifndef PM8001_USE_MSIX
/**
* pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
@@ -1225,6 +1226,8 @@ pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
}
+#else
+
/**
* pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
@@ -1256,6 +1259,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
msi_index += MSIX_TABLE_BASE;
pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE);
}
+#endif
/**
* pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
@@ -1266,10 +1270,9 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
- return;
-#endif
+#else
pm8001_chip_intx_interrupt_enable(pm8001_ha);
-
+#endif
}
/**
@@ -1281,10 +1284,9 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
- return;
-#endif
+#else
pm8001_chip_intx_interrupt_disable(pm8001_ha);
-
+#endif
}
/**
@@ -2898,7 +2900,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
static void
mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
- u32 param;
struct sas_task *t;
struct pm8001_ccb_info *ccb;
unsigned long flags;
@@ -2913,7 +2914,6 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
tag = le32_to_cpu(psmpPayload->tag);
ccb = &pm8001_ha->ccb_info[tag];
- param = le32_to_cpu(psmpPayload->param);
t = ccb->task;
ts = &t->task_status;
pm8001_dev = ccb->device;
@@ -2928,7 +2928,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_GOOD;
- if (pm8001_dev)
+ if (pm8001_dev)
pm8001_dev->running_req--;
break;
case IO_ABORTED:
@@ -3244,11 +3244,9 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
{
struct pm8001_phy *phy = &pm8001_ha->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha;
if (!phy->phy_attached)
return;
- sas_ha = pm8001_ha->sas;
if (sas_phy->phy) {
struct sas_phy *sphy = sas_phy->phy;
sphy->negotiated_linkrate = sas_phy->linkrate;
@@ -4627,17 +4625,18 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
return ret;
}
-static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
{
- u32 value;
#ifdef PM8001_USE_MSIX
return 1;
-#endif
+#else
+ u32 value;
+
value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
if (value)
return 1;
return 0;
-
+#endif
}
/**
@@ -5123,7 +5122,7 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
.chip_rst = pm8001_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
.isr = pm8001_chip_isr,
- .is_our_interupt = pm8001_chip_is_our_interupt,
+ .is_our_interrupt = pm8001_chip_is_our_interrupt,
.isr_process_oq = process_oq,
.interrupt_enable = pm8001_chip_interrupt_enable,
.interrupt_disable = pm8001_chip_interrupt_disable,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index a36060c23b37..3374f553c617 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -201,7 +201,7 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
if (unlikely(!pm8001_ha))
return IRQ_NONE;
- if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+ if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
return IRQ_NONE;
#ifdef PM8001_USE_TASKLET
tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
@@ -224,7 +224,7 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
pm8001_ha = sha->lldd_ha;
if (unlikely(!pm8001_ha))
return IRQ_NONE;
- if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+ if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
return IRQ_NONE;
#ifdef PM8001_USE_TASKLET
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 084f2fcced0a..88eef3b18e41 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -740,8 +740,8 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
wait_for_completion(&task->slow_task->completion);
if (pm8001_ha->chip_id != chip_8001) {
pm8001_dev->setds_completion = &completion_setstate;
- PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x01);
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x01);
wait_for_completion(&completion_setstate);
}
res = -TMF_RESP_FUNC_FAILED;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index f88b0d33c385..ac6d8e3f22de 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -197,7 +197,7 @@ struct pm8001_dispatch {
int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
- u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
+ u32 (*is_our_interrupt)(struct pm8001_hba_info *pm8001_ha);
int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 63e4f7d34d6c..301de40eb708 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -1316,7 +1316,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
{
- u32 i;
+ u32 i;
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("chip reset start\n"));
@@ -4381,27 +4381,27 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
}
- /* scsi cdb */
- sata_cmd.atapi_scsi_cdb[0] =
- cpu_to_le32(((task->ata_task.atapi_packet[0]) |
- (task->ata_task.atapi_packet[1] << 8) |
- (task->ata_task.atapi_packet[2] << 16) |
- (task->ata_task.atapi_packet[3] << 24)));
- sata_cmd.atapi_scsi_cdb[1] =
- cpu_to_le32(((task->ata_task.atapi_packet[4]) |
- (task->ata_task.atapi_packet[5] << 8) |
- (task->ata_task.atapi_packet[6] << 16) |
- (task->ata_task.atapi_packet[7] << 24)));
- sata_cmd.atapi_scsi_cdb[2] =
- cpu_to_le32(((task->ata_task.atapi_packet[8]) |
- (task->ata_task.atapi_packet[9] << 8) |
- (task->ata_task.atapi_packet[10] << 16) |
- (task->ata_task.atapi_packet[11] << 24)));
- sata_cmd.atapi_scsi_cdb[3] =
- cpu_to_le32(((task->ata_task.atapi_packet[12]) |
- (task->ata_task.atapi_packet[13] << 8) |
- (task->ata_task.atapi_packet[14] << 16) |
- (task->ata_task.atapi_packet[15] << 24)));
+ /* scsi cdb */
+ sata_cmd.atapi_scsi_cdb[0] =
+ cpu_to_le32(((task->ata_task.atapi_packet[0]) |
+ (task->ata_task.atapi_packet[1] << 8) |
+ (task->ata_task.atapi_packet[2] << 16) |
+ (task->ata_task.atapi_packet[3] << 24)));
+ sata_cmd.atapi_scsi_cdb[1] =
+ cpu_to_le32(((task->ata_task.atapi_packet[4]) |
+ (task->ata_task.atapi_packet[5] << 8) |
+ (task->ata_task.atapi_packet[6] << 16) |
+ (task->ata_task.atapi_packet[7] << 24)));
+ sata_cmd.atapi_scsi_cdb[2] =
+ cpu_to_le32(((task->ata_task.atapi_packet[8]) |
+ (task->ata_task.atapi_packet[9] << 8) |
+ (task->ata_task.atapi_packet[10] << 16) |
+ (task->ata_task.atapi_packet[11] << 24)));
+ sata_cmd.atapi_scsi_cdb[3] =
+ cpu_to_le32(((task->ata_task.atapi_packet[12]) |
+ (task->ata_task.atapi_packet[13] << 8) |
+ (task->ata_task.atapi_packet[14] << 16) |
+ (task->ata_task.atapi_packet[15] << 24)));
}
/* Check for read log for failed drive and return */
@@ -4617,17 +4617,18 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
}
-static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
{
- u32 value;
#ifdef PM8001_USE_MSIX
return 1;
-#endif
+#else
+ u32 value;
+
value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
if (value)
return 1;
return 0;
-
+#endif
}
/**
@@ -4724,7 +4725,7 @@ const struct pm8001_dispatch pm8001_80xx_dispatch = {
.chip_rst = pm80xx_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
.isr = pm80xx_chip_isr,
- .is_our_interupt = pm80xx_chip_is_our_interupt,
+ .is_our_interrupt = pm80xx_chip_is_our_interrupt,
.isr_process_oq = process_oq,
.interrupt_enable = pm80xx_chip_interrupt_enable,
.interrupt_disable = pm80xx_chip_interrupt_disable,
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index c182b5458f98..35213082e933 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -717,6 +717,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
}
cmd->SCp.phase++;
}
+ /* fall through */
case 2: /* Phase 2 - We are now talking to the scsi bus */
if (!ppa_select(dev, scmd_id(cmd))) {
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 2c78d8fb9122..2c08f6f4db42 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -35,9 +35,6 @@
#define QEDF_DESCR "QLogic FCoE Offload Driver"
#define QEDF_MODULE_NAME "qedf"
-#define QEDF_MIN_XID 0
-#define QEDF_MAX_SCSI_XID (NUM_TASKS_PER_CONNECTION - 1)
-#define QEDF_MAX_ELS_XID 4095
#define QEDF_FLOGI_RETRY_CNT 3
#define QEDF_RPORT_RETRY_CNT 255
#define QEDF_MAX_SESSIONS 1024
@@ -52,8 +49,8 @@
sizeof(struct fc_frame_header))
#define QEDF_MAX_NPIV 64
#define QEDF_TM_TIMEOUT 10
-#define QEDF_ABORT_TIMEOUT 10
-#define QEDF_CLEANUP_TIMEOUT 10
+#define QEDF_ABORT_TIMEOUT (10 * 1000)
+#define QEDF_CLEANUP_TIMEOUT 1
#define QEDF_MAX_CDB_LEN 16
#define UPSTREAM_REMOVE 1
@@ -85,6 +82,7 @@ struct qedf_els_cb_arg {
};
enum qedf_ioreq_event {
+ QEDF_IOREQ_EV_NONE,
QEDF_IOREQ_EV_ABORT_SUCCESS,
QEDF_IOREQ_EV_ABORT_FAILED,
QEDF_IOREQ_EV_SEND_RRQ,
@@ -105,7 +103,6 @@ struct qedf_ioreq {
struct list_head link;
uint16_t xid;
struct scsi_cmnd *sc_cmd;
- bool use_slowpath; /* Use slow SGL for this I/O */
#define QEDF_SCSI_CMD 1
#define QEDF_TASK_MGMT_CMD 2
#define QEDF_ABTS 3
@@ -117,22 +114,43 @@ struct qedf_ioreq {
#define QEDF_CMD_IN_ABORT 0x1
#define QEDF_CMD_IN_CLEANUP 0x2
#define QEDF_CMD_SRR_SENT 0x3
+#define QEDF_CMD_DIRTY 0x4
+#define QEDF_CMD_ERR_SCSI_DONE 0x5
u8 io_req_flags;
uint8_t tm_flags;
struct qedf_rport *fcport;
+#define QEDF_CMD_ST_INACTIVE 0
+#define QEDFC_CMD_ST_IO_ACTIVE 1
+#define QEDFC_CMD_ST_ABORT_ACTIVE 2
+#define QEDFC_CMD_ST_ABORT_ACTIVE_EH 3
+#define QEDFC_CMD_ST_CLEANUP_ACTIVE 4
+#define QEDFC_CMD_ST_CLEANUP_ACTIVE_EH 5
+#define QEDFC_CMD_ST_RRQ_ACTIVE 6
+#define QEDFC_CMD_ST_RRQ_WAIT 7
+#define QEDFC_CMD_ST_OXID_RETIRE_WAIT 8
+#define QEDFC_CMD_ST_TMF_ACTIVE 9
+#define QEDFC_CMD_ST_DRAIN_ACTIVE 10
+#define QEDFC_CMD_ST_CLEANED 11
+#define QEDFC_CMD_ST_ELS_ACTIVE 12
+ atomic_t state;
unsigned long flags;
enum qedf_ioreq_event event;
size_t data_xfer_len;
+ /* ID: 001: Alloc cmd (qedf_alloc_cmd) */
+ /* ID: 002: Initiate ABTS (qedf_initiate_abts) */
+ /* ID: 003: For RRQ (qedf_process_abts_compl) */
struct kref refcount;
struct qedf_cmd_mgr *cmd_mgr;
struct io_bdt *bd_tbl;
struct delayed_work timeout_work;
struct completion tm_done;
struct completion abts_done;
+ struct completion cleanup_done;
struct e4_fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
int idx;
+ int lun;
/*
* Need to allocate enough room for both sense data and FCP response data
* which has a max length of 8 bytes according to spec.
@@ -155,9 +173,9 @@ struct qedf_ioreq {
int fp_idx;
unsigned int cpu;
unsigned int int_cpu;
-#define QEDF_IOREQ_SLOW_SGE 0
-#define QEDF_IOREQ_SINGLE_SGE 1
-#define QEDF_IOREQ_FAST_SGE 2
+#define QEDF_IOREQ_UNKNOWN_SGE 1
+#define QEDF_IOREQ_SLOW_SGE 2
+#define QEDF_IOREQ_FAST_SGE 3
u8 sge_type;
struct delayed_work rrq_work;
@@ -172,6 +190,8 @@ struct qedf_ioreq {
* during some form of error processing.
*/
bool return_scsi_cmd_on_abts;
+
+ unsigned int alloc;
};
extern struct workqueue_struct *qedf_io_wq;
@@ -181,7 +201,10 @@ struct qedf_rport {
#define QEDF_RPORT_SESSION_READY 1
#define QEDF_RPORT_UPLOADING_CONNECTION 2
#define QEDF_RPORT_IN_RESET 3
+#define QEDF_RPORT_IN_LUN_RESET 4
+#define QEDF_RPORT_IN_TARGET_RESET 5
unsigned long flags;
+ int lun_reset_lun;
unsigned long retry_delay_timestamp;
struct fc_rport *rport;
struct fc_rport_priv *rdata;
@@ -191,6 +214,7 @@ struct qedf_rport {
void __iomem *p_doorbell;
/* Send queue management */
atomic_t free_sqes;
+ atomic_t ios_to_queue;
atomic_t num_active_ios;
struct fcoe_wqe *sq;
dma_addr_t sq_dma;
@@ -295,8 +319,6 @@ struct qedf_ctx {
#define QEDF_DCBX_PENDING 0
#define QEDF_DCBX_DONE 1
atomic_t dcbx;
- uint16_t max_scsi_xid;
- uint16_t max_els_xid;
#define QEDF_NULL_VLAN_ID -1
#define QEDF_FALLBACK_VLAN 1002
#define QEDF_DEFAULT_PRIO 3
@@ -371,7 +393,6 @@ struct qedf_ctx {
u32 slow_sge_ios;
u32 fast_sge_ios;
- u32 single_sge_ios;
uint8_t *grcdump;
uint32_t grcdump_size;
@@ -396,6 +417,8 @@ struct qedf_ctx {
u8 target_resets;
u8 task_set_fulls;
u8 busy;
+ /* Used for flush routine */
+ struct mutex flush_mutex;
};
struct io_bdt {
@@ -435,6 +458,12 @@ static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
/*
* Externs
*/
+
+/*
+ * (QEDF_LOG_NPIV | QEDF_LOG_SESS | QEDF_LOG_LPORT | QEDF_LOG_ELS | QEDF_LOG_MQ
+ * | QEDF_LOG_IO | QEDF_LOG_UNSOL | QEDF_LOG_SCSI_TM | QEDF_LOG_MP_REQ |
+ * QEDF_LOG_EVT | QEDF_LOG_CONN | QEDF_LOG_DISC | QEDF_LOG_INFO)
+ */
#define QEDF_DEFAULT_LOG_MASK 0x3CFB6
extern const struct qed_fcoe_ops *qed_ops;
extern uint qedf_dump_frames;
@@ -494,7 +523,7 @@ extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id);
extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
-extern void qedf_wait_for_upload(struct qedf_ctx *qedf);
+bool qedf_wait_for_upload(struct qedf_ctx *qedf);
extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
struct fcoe_cqe *cqe);
extern void qedf_restart_rport(struct qedf_rport *fcport);
@@ -508,6 +537,8 @@ extern void qedf_get_protocol_tlv_data(void *dev, void *data);
extern void qedf_fp_io_handler(struct work_struct *work);
extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
extern void qedf_wq_grcdump(struct work_struct *work);
+void qedf_stag_change_work(struct work_struct *work);
+void qedf_ctx_soft_reset(struct fc_lport *lport);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF
diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c
index f2397ee9ba69..f7d170bffc82 100644
--- a/drivers/scsi/qedf/qedf_dbg.c
+++ b/drivers/scsi/qedf/qedf_dbg.c
@@ -15,10 +15,6 @@ qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
{
va_list va;
struct va_format vaf;
- char nfunc[32];
-
- memset(nfunc, 0, sizeof(nfunc));
- memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
@@ -27,9 +23,9 @@ qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
if (likely(qedf) && likely(qedf->pdev))
pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
- nfunc, line, qedf->host_no, &vaf);
+ func, line, qedf->host_no, &vaf);
else
- pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
va_end(va);
}
@@ -40,10 +36,6 @@ qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
{
va_list va;
struct va_format vaf;
- char nfunc[32];
-
- memset(nfunc, 0, sizeof(nfunc));
- memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
@@ -55,9 +47,9 @@ qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
if (likely(qedf) && likely(qedf->pdev))
pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
- nfunc, line, qedf->host_no, &vaf);
+ func, line, qedf->host_no, &vaf);
else
- pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
ret:
va_end(va);
@@ -69,10 +61,6 @@ qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
{
va_list va;
struct va_format vaf;
- char nfunc[32];
-
- memset(nfunc, 0, sizeof(nfunc));
- memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
@@ -84,10 +72,10 @@ qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
if (likely(qedf) && likely(qedf->pdev))
pr_notice("[%s]:[%s:%d]:%d: %pV",
- dev_name(&(qedf->pdev->dev)), nfunc, line,
+ dev_name(&(qedf->pdev->dev)), func, line,
qedf->host_no, &vaf);
else
- pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
ret:
va_end(va);
@@ -99,10 +87,6 @@ qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
{
va_list va;
struct va_format vaf;
- char nfunc[32];
-
- memset(nfunc, 0, sizeof(nfunc));
- memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
@@ -114,9 +98,9 @@ qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
if (likely(qedf) && likely(qedf->pdev))
pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
- nfunc, line, qedf->host_no, &vaf);
+ func, line, qedf->host_no, &vaf);
else
- pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
ret:
va_end(va);
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index a32d8ee4666e..235389209689 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -293,6 +293,33 @@ qedf_dbg_io_trace_open(struct inode *inode, struct file *file)
return single_open(file, qedf_io_trace_show, qedf);
}
+/* Based on fip_state enum from libfcoe.h */
+static char *fip_state_names[] = {
+ "FIP_ST_DISABLED",
+ "FIP_ST_LINK_WAIT",
+ "FIP_ST_AUTO",
+ "FIP_ST_NON_FIP",
+ "FIP_ST_ENABLED",
+ "FIP_ST_VNMP_START",
+ "FIP_ST_VNMP_PROBE1",
+ "FIP_ST_VNMP_PROBE2",
+ "FIP_ST_VNMP_CLAIM",
+ "FIP_ST_VNMP_UP",
+};
+
+/* Based on fc_rport_state enum from libfc.h */
+static char *fc_rport_state_names[] = {
+ "RPORT_ST_INIT",
+ "RPORT_ST_FLOGI",
+ "RPORT_ST_PLOGI_WAIT",
+ "RPORT_ST_PLOGI",
+ "RPORT_ST_PRLI",
+ "RPORT_ST_RTV",
+ "RPORT_ST_READY",
+ "RPORT_ST_ADISC",
+ "RPORT_ST_DELETE",
+};
+
static int
qedf_driver_stats_show(struct seq_file *s, void *unused)
{
@@ -300,10 +327,28 @@ qedf_driver_stats_show(struct seq_file *s, void *unused)
struct qedf_rport *fcport;
struct fc_rport_priv *rdata;
+ seq_printf(s, "Host WWNN/WWPN: %016llx/%016llx\n",
+ qedf->wwnn, qedf->wwpn);
+ seq_printf(s, "Host NPortID: %06x\n", qedf->lport->port_id);
+ seq_printf(s, "Link State: %s\n", atomic_read(&qedf->link_state) ?
+ "Up" : "Down");
+ seq_printf(s, "Logical Link State: %s\n", qedf->lport->link_up ?
+ "Up" : "Down");
+ seq_printf(s, "FIP state: %s\n", fip_state_names[qedf->ctlr.state]);
+ seq_printf(s, "FIP VLAN ID: %d\n", qedf->vlan_id & 0xfff);
+ seq_printf(s, "FIP 802.1Q Priority: %d\n", qedf->prio);
+ if (qedf->ctlr.sel_fcf) {
+ seq_printf(s, "FCF WWPN: %016llx\n",
+ qedf->ctlr.sel_fcf->switch_name);
+ seq_printf(s, "FCF MAC: %pM\n", qedf->ctlr.sel_fcf->fcf_mac);
+ } else {
+ seq_puts(s, "FCF not selected\n");
+ }
+
+ seq_puts(s, "\nSGE stats:\n\n");
seq_printf(s, "cmg_mgr free io_reqs: %d\n",
atomic_read(&qedf->cmd_mgr->free_list_cnt));
seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios);
- seq_printf(s, "single SGEs: %d\n", qedf->single_sge_ios);
seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios);
seq_puts(s, "Offloaded ports:\n\n");
@@ -313,9 +358,12 @@ qedf_driver_stats_show(struct seq_file *s, void *unused)
rdata = fcport->rdata;
if (rdata == NULL)
continue;
- seq_printf(s, "%06x: free_sqes: %d, num_active_ios: %d\n",
- rdata->ids.port_id, atomic_read(&fcport->free_sqes),
- atomic_read(&fcport->num_active_ios));
+ seq_printf(s, "%016llx/%016llx/%06x: state=%s, free_sqes=%d, num_active_ios=%d\n",
+ rdata->rport->node_name, rdata->rport->port_name,
+ rdata->ids.port_id,
+ fc_rport_state_names[rdata->rp_state],
+ atomic_read(&fcport->free_sqes),
+ atomic_read(&fcport->num_active_ios));
}
rcu_read_unlock();
@@ -361,7 +409,6 @@ qedf_dbg_clear_stats_cmd_write(struct file *filp,
/* Clear stat counters exposed by 'stats' node */
qedf->slow_sge_ios = 0;
- qedf->single_sge_ios = 0;
qedf->fast_sge_ios = 0;
return count;
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 04f0c4d2e256..d900c89e8049 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -23,8 +23,6 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
int rc = 0;
uint32_t did, sid;
uint16_t xid;
- uint32_t start_time = jiffies / HZ;
- uint32_t current_time;
struct fcoe_wqe *sqe;
unsigned long flags;
u16 sqe_idx;
@@ -59,18 +57,12 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
goto els_err;
}
-retry_els:
els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
if (!els_req) {
- current_time = jiffies / HZ;
- if ((current_time - start_time) > 10) {
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
- "els: Failed els 0x%x\n", op);
- rc = -ENOMEM;
- goto els_err;
- }
- mdelay(20 * USEC_PER_MSEC);
- goto retry_els;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
+ "Failed to alloc ELS request 0x%x\n", op);
+ rc = -ENOMEM;
+ goto els_err;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
@@ -143,6 +135,8 @@ retry_els:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
"req\n");
qedf_ring_doorbell(fcport);
+ set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
els_err:
return rc;
@@ -151,21 +145,16 @@ els_err:
void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req)
{
- struct fcoe_task_context *task_ctx;
- struct scsi_cmnd *sc_cmd;
- uint16_t xid;
struct fcoe_cqe_midpath_info *mp_info;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
" cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
+ clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
+
/* Kill the ELS timer */
cancel_delayed_work(&els_req->timeout_work);
- xid = els_req->xid;
- task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
- sc_cmd = els_req->sc_cmd;
-
/* Get ELS response length from CQE */
mp_info = &cqe->cqe_info.midpath_info;
els_req->mp_req.resp_len = mp_info->data_placement_size;
@@ -205,8 +194,12 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
" orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
- /* This should return the aborted io_req to the command pool */
- if (orig_io_req)
+ /*
+ * This should return the aborted io_req to the command pool. Note that
+ * we need to check the refcound in case the original request was
+ * flushed but we get a completion on this xid.
+ */
+ if (orig_io_req && refcount > 0)
kref_put(&orig_io_req->refcount, qedf_release_cmd);
out_free:
@@ -233,6 +226,7 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
uint32_t sid;
uint32_t r_a_tov;
int rc;
+ int refcount;
if (!aborted_io_req) {
QEDF_ERR(NULL, "abort_io_req is NULL.\n");
@@ -241,6 +235,15 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
fcport = aborted_io_req->fcport;
+ if (!fcport) {
+ refcount = kref_read(&aborted_io_req->refcount);
+ QEDF_ERR(NULL,
+ "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
+ aborted_io_req->xid, refcount);
+ kref_put(&aborted_io_req->refcount, qedf_release_cmd);
+ return -EINVAL;
+ }
+
/* Check that fcport is still offloaded */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
@@ -253,6 +256,19 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
}
qedf = fcport->qedf;
+
+ /*
+ * Sanity check that we can send a RRQ to make sure that refcount isn't
+ * 0
+ */
+ refcount = kref_read(&aborted_io_req->refcount);
+ if (refcount != 1) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
+ "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
+ aborted_io_req->xid, aborted_io_req, refcount);
+ return -EINVAL;
+ }
+
lport = qedf->lport;
sid = fcport->sid;
r_a_tov = lport->r_a_tov;
@@ -335,32 +351,49 @@ void qedf_restart_rport(struct qedf_rport *fcport)
struct fc_lport *lport;
struct fc_rport_priv *rdata;
u32 port_id;
+ unsigned long flags;
if (!fcport)
return;
+ spin_lock_irqsave(&fcport->rport_lock, flags);
if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
fcport);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
return;
}
/* Set that we are now in reset */
set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
rdata = fcport->rdata;
- if (rdata) {
+ if (rdata && !kref_get_unless_zero(&rdata->kref)) {
+ fcport->rdata = NULL;
+ rdata = NULL;
+ }
+
+ if (rdata && rdata->rp_state == RPORT_ST_READY) {
lport = fcport->qedf->lport;
port_id = rdata->ids.port_id;
QEDF_ERR(&(fcport->qedf->dbg_ctx),
"LOGO port_id=%x.\n", port_id);
fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ mutex_lock(&lport->disc.disc_mutex);
/* Recreate the rport and log back in */
rdata = fc_rport_create(lport, port_id);
- if (rdata)
+ if (rdata) {
+ mutex_unlock(&lport->disc.disc_mutex);
fc_rport_login(rdata);
+ fcport->rdata = rdata;
+ } else {
+ mutex_unlock(&lport->disc.disc_mutex);
+ fcport->rdata = NULL;
+ }
}
clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
}
@@ -569,7 +602,7 @@ static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
struct qedf_rport *fcport;
struct fc_lport *lport;
struct qedf_els_cb_arg *cb_arg = NULL;
- u32 sid, r_a_tov;
+ u32 r_a_tov;
int rc;
if (!orig_io_req) {
@@ -595,7 +628,6 @@ static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
qedf = fcport->qedf;
lport = qedf->lport;
- sid = fcport->sid;
r_a_tov = lport->r_a_tov;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 3fd3af799b3d..d4741f8dcb41 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -19,17 +19,16 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
{
struct sk_buff *skb;
char *eth_fr;
- int fr_len;
struct fip_vlan *vlan;
#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
unsigned long flags = 0;
+ int rc = -1;
skb = dev_alloc_skb(sizeof(struct fip_vlan));
if (!skb)
return;
- fr_len = sizeof(*vlan);
eth_fr = (char *)skb->data;
vlan = (struct fip_vlan *)eth_fr;
@@ -68,7 +67,13 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
}
set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags);
- qed_ops->ll2->start_xmit(qedf->cdev, skb, flags);
+ rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, flags);
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+ kfree_skb(skb);
+ return;
+ }
+
}
static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
@@ -95,6 +100,12 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
rlen -= dlen;
}
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Dropping VLAN response as link is down.\n");
+ return;
+ }
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, "
"vid=0x%x.\n", vid);
@@ -114,6 +125,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fip_header *fiph;
u16 op, vlan_tci = 0;
u8 sub;
+ int rc = -1;
if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
@@ -142,9 +154,16 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, false);
- qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+ rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+ kfree_skb(skb);
+ return;
+ }
}
+static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
+
/* Process incoming FIP frames. */
void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
{
@@ -157,20 +176,37 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
size_t rlen, dlen;
u16 op;
u8 sub;
- bool do_reset = false;
+ bool fcf_valid = false;
+ /* Default is to handle CVL regardless of fabric id descriptor */
+ bool fabric_id_valid = true;
+ bool fc_wwpn_valid = false;
+ u64 switch_name;
+ u16 vlan = 0;
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame received: "
- "skb=%p fiph=%p source=%pM op=%x sub=%x", skb, fiph,
- eth_hdr->h_source, op, sub);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+ "FIP frame received: skb=%p fiph=%p source=%pM destn=%pM op=%x sub=%x vlan=%04x",
+ skb, fiph, eth_hdr->h_source, eth_hdr->h_dest, op,
+ sub, vlan);
if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, false);
+ if (!ether_addr_equal(eth_hdr->h_dest, qedf->mac) &&
+ !ether_addr_equal(eth_hdr->h_dest, fcoe_all_enode) &&
+ !ether_addr_equal(eth_hdr->h_dest, qedf->data_src_addr)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+ "Dropping FIP type 0x%x pkt due to destination MAC mismatch dest_mac=%pM ctlr.dest_addr=%pM data_src_addr=%pM.\n",
+ op, eth_hdr->h_dest, qedf->mac,
+ qedf->data_src_addr);
+ kfree_skb(skb);
+ return;
+ }
+
/* Handle FIP VLAN resp in the driver */
if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
qedf_fcoe_process_vlan_resp(qedf, skb);
@@ -199,25 +235,36 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
switch (desc->fip_dtype) {
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "fd_mac=%pM\n", mp->fd_mac);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Switch fd_mac=%pM.\n", mp->fd_mac);
if (ether_addr_equal(mp->fd_mac,
qedf->ctlr.sel_fcf->fcf_mac))
- do_reset = true;
+ fcf_valid = true;
break;
case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "fc_wwpn=%016llx.\n",
- get_unaligned_be64(&wp->fd_wwn));
+ switch_name = get_unaligned_be64(&wp->fd_wwn);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Switch fd_wwn=%016llx fcf_switch_name=%016llx.\n",
+ switch_name,
+ qedf->ctlr.sel_fcf->switch_name);
+ if (switch_name ==
+ qedf->ctlr.sel_fcf->switch_name)
+ fc_wwpn_valid = true;
break;
case FIP_DT_VN_ID:
vp = (struct fip_vn_desc *)desc;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
- if (ntoh24(vp->fd_fc_id) ==
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "vx_port fd_fc_id=%x fd_mac=%pM.\n",
+ ntoh24(vp->fd_fc_id), vp->fd_mac);
+ /* Check vx_port fabric ID */
+ if (ntoh24(vp->fd_fc_id) !=
qedf->lport->port_id)
- do_reset = true;
+ fabric_id_valid = false;
+ /* Check vx_port MAC */
+ if (!ether_addr_equal(vp->fd_mac,
+ qedf->data_src_addr))
+ fabric_id_valid = false;
break;
default:
/* Ignore anything else */
@@ -227,13 +274,11 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
rlen -= dlen;
}
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "do_reset=%d.\n", do_reset);
- if (do_reset) {
- fcoe_ctlr_link_down(&qedf->ctlr);
- qedf_wait_for_upload(qedf);
- fcoe_ctlr_link_up(&qedf->ctlr);
- }
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "fcf_valid=%d fabric_id_valid=%d fc_wwpn_valid=%d.\n",
+ fcf_valid, fabric_id_valid, fc_wwpn_valid);
+ if (fcf_valid && fabric_id_valid && fc_wwpn_valid)
+ qedf_ctx_soft_reset(qedf->lport);
kfree_skb(skb);
} else {
/* Everything else is handled by libfcoe */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 6ca583bdde23..42f9f2a9d8ea 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -43,8 +43,9 @@ static void qedf_cmd_timeout(struct work_struct *work)
switch (io_req->cmd_type) {
case QEDF_ABTS:
if (qedf == NULL) {
- QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
- io_req->xid);
+ QEDF_INFO(NULL, QEDF_LOG_IO,
+ "qedf is NULL for ABTS xid=0x%x.\n",
+ io_req->xid);
return;
}
@@ -61,6 +62,9 @@ static void qedf_cmd_timeout(struct work_struct *work)
*/
kref_put(&io_req->refcount, qedf_release_cmd);
+ /* Clear in abort bit now that we're done with the command */
+ clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+
/*
* Now that the original I/O and the ABTS are complete see
* if we need to reconnect to the target.
@@ -68,6 +72,15 @@ static void qedf_cmd_timeout(struct work_struct *work)
qedf_restart_rport(fcport);
break;
case QEDF_ELS:
+ if (!qedf) {
+ QEDF_INFO(NULL, QEDF_LOG_IO,
+ "qedf is NULL for ELS xid=0x%x.\n",
+ io_req->xid);
+ return;
+ }
+ /* ELS request no longer outstanding since it timed out */
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
kref_get(&io_req->refcount);
/*
* Don't attempt to clean an ELS timeout as any subseqeunt
@@ -103,7 +116,7 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
struct io_bdt *bdt_info;
struct qedf_ctx *qedf = cmgr->qedf;
size_t bd_tbl_sz;
- u16 min_xid = QEDF_MIN_XID;
+ u16 min_xid = 0;
u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
int num_ios;
int i;
@@ -157,6 +170,7 @@ static void qedf_handle_rrq(struct work_struct *work)
struct qedf_ioreq *io_req =
container_of(work, struct qedf_ioreq, rrq_work.work);
+ atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
qedf_send_rrq(io_req);
}
@@ -169,7 +183,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
u16 xid;
int i;
int num_ios;
- u16 min_xid = QEDF_MIN_XID;
+ u16 min_xid = 0;
u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
/* Make sure num_queues is already set before calling this function */
@@ -201,7 +215,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
/*
* Initialize I/O request fields.
*/
- xid = QEDF_MIN_XID;
+ xid = 0;
for (i = 0; i < num_ios; i++) {
io_req = &cmgr->cmds[i];
@@ -329,7 +343,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
cmd_mgr->idx = 0;
/* Check to make sure command was previously freed */
- if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
+ if (!io_req->alloc)
break;
}
@@ -338,7 +352,14 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
goto out_failed;
}
- set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+ if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
+ QEDF_ERR(&qedf->dbg_ctx,
+ "io_req found to be dirty ox_id = 0x%x.\n",
+ io_req->xid);
+
+ /* Clear any flags now that we've reallocated the xid */
+ io_req->flags = 0;
+ io_req->alloc = 1;
spin_unlock_irqrestore(&cmd_mgr->lock, flags);
atomic_inc(&fcport->num_active_ios);
@@ -349,8 +370,13 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
io_req->cmd_mgr = cmd_mgr;
io_req->fcport = fcport;
+ /* Clear any stale sc_cmd back pointer */
+ io_req->sc_cmd = NULL;
+ io_req->lun = -1;
+
/* Hold the io_req against deletion */
- kref_init(&io_req->refcount);
+ kref_init(&io_req->refcount); /* ID: 001 */
+ atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
/* Bind io_bdt for this io_req */
/* Have a static link between io_req and io_bdt_pool */
@@ -412,6 +438,10 @@ void qedf_release_cmd(struct kref *ref)
container_of(ref, struct qedf_ioreq, refcount);
struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
struct qedf_rport *fcport = io_req->fcport;
+ unsigned long flags;
+
+ if (io_req->cmd_type == QEDF_SCSI_CMD)
+ WARN_ON(io_req->sc_cmd);
if (io_req->cmd_type == QEDF_ELS ||
io_req->cmd_type == QEDF_TASK_MGMT_CMD)
@@ -419,36 +449,20 @@ void qedf_release_cmd(struct kref *ref)
atomic_inc(&cmd_mgr->free_list_cnt);
atomic_dec(&fcport->num_active_ios);
+ atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
if (atomic_read(&fcport->num_active_ios) < 0)
QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
/* Increment task retry identifier now that the request is released */
io_req->task_retry_identifier++;
+ io_req->fcport = NULL;
- clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
-}
-
-static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
- int bd_index)
-{
- struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
- int frag_size, sg_frags;
-
- sg_frags = 0;
- while (sg_len) {
- if (sg_len > QEDF_BD_SPLIT_SZ)
- frag_size = QEDF_BD_SPLIT_SZ;
- else
- frag_size = sg_len;
- bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
- bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
- bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
-
- addr += (u64)frag_size;
- sg_frags++;
- sg_len -= frag_size;
- }
- return sg_frags;
+ clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
+ io_req->cpu = 0;
+ spin_lock_irqsave(&cmd_mgr->lock, flags);
+ io_req->fcport = NULL;
+ io_req->alloc = 0;
+ spin_unlock_irqrestore(&cmd_mgr->lock, flags);
}
static int qedf_map_sg(struct qedf_ioreq *io_req)
@@ -462,75 +476,45 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
int byte_count = 0;
int sg_count = 0;
int bd_count = 0;
- int sg_frags;
- unsigned int sg_len;
+ u32 sg_len;
u64 addr, end_addr;
- int i;
+ int i = 0;
sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
scsi_sg_count(sc), sc->sc_data_direction);
-
sg = scsi_sglist(sc);
- /*
- * New condition to send single SGE as cached-SGL with length less
- * than 64k.
- */
- if ((sg_count == 1) && (sg_dma_len(sg) <=
- QEDF_MAX_SGLEN_FOR_CACHESGL)) {
- sg_len = sg_dma_len(sg);
- addr = (u64)sg_dma_address(sg);
-
- bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
- bd[bd_count].sge_addr.hi = (addr >> 32);
- bd[bd_count].sge_len = (u16)sg_len;
+ io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
- return ++bd_count;
- }
+ if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE;
scsi_for_each_sg(sc, sg, sg_count, i) {
- sg_len = sg_dma_len(sg);
+ sg_len = (u32)sg_dma_len(sg);
addr = (u64)sg_dma_address(sg);
end_addr = (u64)(addr + sg_len);
/*
- * First s/g element in the list so check if the end_addr
- * is paged aligned. Also check to make sure the length is
- * at least page size.
- */
- if ((i == 0) && (sg_count > 1) &&
- ((end_addr % QEDF_PAGE_SIZE) ||
- sg_len < QEDF_PAGE_SIZE))
- io_req->use_slowpath = true;
- /*
- * Last s/g element so check if the start address is paged
- * aligned.
- */
- else if ((i == (sg_count - 1)) && (sg_count > 1) &&
- (addr % QEDF_PAGE_SIZE))
- io_req->use_slowpath = true;
- /*
* Intermediate s/g element so check if start and end address
- * is page aligned.
+ * is page aligned. Only required for writes and only if the
+ * number of scatter/gather elements is 8 or more.
*/
- else if ((i != 0) && (i != (sg_count - 1)) &&
- ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
- io_req->use_slowpath = true;
+ if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
+ (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
+ io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
- if (sg_len > QEDF_MAX_BD_LEN) {
- sg_frags = qedf_split_bd(io_req, addr, sg_len,
- bd_count);
- } else {
- sg_frags = 1;
- bd[bd_count].sge_addr.lo = U64_LO(addr);
- bd[bd_count].sge_addr.hi = U64_HI(addr);
- bd[bd_count].sge_len = (uint16_t)sg_len;
- }
+ bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
+ bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
+ bd[bd_count].sge_len = cpu_to_le32(sg_len);
- bd_count += sg_frags;
+ bd_count++;
byte_count += sg_len;
}
+ /* To catch a case where FAST and SLOW nothing is set, set FAST */
+ if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE;
+
if (byte_count != scsi_bufflen(sc))
QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
"scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
@@ -655,8 +639,10 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
io_req->sgl_task_params->num_sges = bd_count;
io_req->sgl_task_params->total_buffer_size =
scsi_bufflen(io_req->sc_cmd);
- io_req->sgl_task_params->small_mid_sge =
- io_req->use_slowpath;
+ if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
+ io_req->sgl_task_params->small_mid_sge = 1;
+ else
+ io_req->sgl_task_params->small_mid_sge = 0;
}
/* Fill in physical address of sense buffer */
@@ -679,16 +665,10 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
io_req->task_retry_identifier, fcp_cmnd);
/* Increment SGL type counters */
- if (bd_count == 1) {
- qedf->single_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
- } else if (io_req->use_slowpath) {
+ if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
- } else {
+ else
qedf->fast_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_FAST_SGE;
- }
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
@@ -770,9 +750,6 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
&task_fc_hdr,
&tx_sgl_task_params,
&rx_sgl_task_params, 0);
-
- /* Midpath requests always consume 1 SGE */
- qedf->single_sge_ios++;
}
/* Presumed that fcport->rport_lock is held */
@@ -804,10 +781,18 @@ void qedf_ring_doorbell(struct qedf_rport *fcport)
FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
dbell.sq_prod = fcport->fw_sq_prod_idx;
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
writel(*(u32 *)&dbell, fcport->p_doorbell);
- /* Make sure SQ index is updated so f/w prcesses requests in order */
+ /*
+ * Fence required to flush the write combined buffer, since another
+ * CPU may write to the same doorbell address and data may be lost
+ * due to relaxed order nature of write combined bar.
+ */
wmb();
- mmiowb();
}
static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
@@ -872,7 +857,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
/* Initialize rest of io_req fileds */
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
sc_cmd->SCp.ptr = (char *)io_req;
- io_req->use_slowpath = false; /* Assume fast SGL by default */
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
/* Record which cpu this request is associated with */
io_req->cpu = smp_processor_id();
@@ -895,15 +880,24 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
/* Build buffer descriptor list for firmware from sg list */
if (qedf_build_bd_list_from_sg(io_req)) {
QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
+ /* Release cmd will release io_req, but sc_cmd is assigned */
+ io_req->sc_cmd = NULL;
kref_put(&io_req->refcount, qedf_release_cmd);
return -EAGAIN;
}
- if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
+ test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+ /* Release cmd will release io_req, but sc_cmd is assigned */
+ io_req->sc_cmd = NULL;
kref_put(&io_req->refcount, qedf_release_cmd);
+ return -EINVAL;
}
+ /* Record LUN number for later use if we neeed them */
+ io_req->lun = (int)sc_cmd->device->lun;
+
/* Obtain free SQE */
sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx];
@@ -914,6 +908,8 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
if (!task_ctx) {
QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
xid);
+ /* Release cmd will release io_req, but sc_cmd is assigned */
+ io_req->sc_cmd = NULL;
kref_put(&io_req->refcount, qedf_release_cmd);
return -EINVAL;
}
@@ -923,6 +919,9 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
/* Ring doorbell */
qedf_ring_doorbell(fcport);
+ /* Set that command is with the firmware now */
+ set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
if (qedf_io_tracing && io_req->sc_cmd)
qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
@@ -941,7 +940,17 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
int rc = 0;
int rval;
unsigned long flags = 0;
-
+ int num_sgs = 0;
+
+ num_sgs = scsi_sg_count(sc_cmd);
+ if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Number of SG elements %d exceeds what hardware limitation of %d.\n",
+ num_sgs, QEDF_MAX_BDS_PER_CMD);
+ sc_cmd->result = DID_ERROR;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
@@ -981,7 +990,8 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
/* rport and tgt are allocated together, so tgt should be non-NULL */
fcport = (struct qedf_rport *)&rp[1];
- if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
+ test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
/*
* Session is not offloaded yet. Let SCSI-ml retry
* the command.
@@ -989,12 +999,16 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
rc = SCSI_MLQUEUE_TARGET_BUSY;
goto exit_qcmd;
}
+
+ atomic_inc(&fcport->ios_to_queue);
+
if (fcport->retry_delay_timestamp) {
if (time_after(jiffies, fcport->retry_delay_timestamp)) {
fcport->retry_delay_timestamp = 0;
} else {
/* If retry_delay timer is active, flow off the ML */
rc = SCSI_MLQUEUE_TARGET_BUSY;
+ atomic_dec(&fcport->ios_to_queue);
goto exit_qcmd;
}
}
@@ -1002,6 +1016,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
if (!io_req) {
rc = SCSI_MLQUEUE_HOST_BUSY;
+ atomic_dec(&fcport->ios_to_queue);
goto exit_qcmd;
}
@@ -1016,6 +1031,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
rc = SCSI_MLQUEUE_HOST_BUSY;
}
spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ atomic_dec(&fcport->ios_to_queue);
exit_qcmd:
return rc;
@@ -1092,7 +1108,7 @@ static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
- u16 xid, rval;
+ u16 xid;
struct e4_fcoe_task_context *task_ctx;
struct scsi_cmnd *sc_cmd;
struct fcoe_cqe_rsp_info *fcp_rsp;
@@ -1106,6 +1122,15 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
if (!cqe)
return;
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
+ test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
+ test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
+ io_req->xid);
+ return;
+ }
+
xid = io_req->xid;
task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
sc_cmd = io_req->sc_cmd;
@@ -1122,6 +1147,12 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
return;
}
+ if (!sc_cmd->device) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Device for sc_cmd %p is NULL.\n", sc_cmd);
+ return;
+ }
+
if (!sc_cmd->request) {
QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
"sc_cmd=%p.\n", sc_cmd);
@@ -1136,6 +1167,19 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
fcport = io_req->fcport;
+ /*
+ * When flush is active, let the cmds be completed from the cleanup
+ * context
+ */
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
+ (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
+ sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Dropping good completion xid=0x%x as fcport is flushing",
+ io_req->xid);
+ return;
+ }
+
qedf_parse_fcp_rsp(io_req, fcp_rsp);
qedf_unmap_sg_list(qedf, io_req);
@@ -1153,25 +1197,18 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
FCOE_CQE_RSP_INFO_FW_UNDERRUN);
if (fw_residual_flag) {
- QEDF_ERR(&(qedf->dbg_ctx),
- "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
- "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
- fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
- cqe->cqe_info.rsp_info.fw_residual);
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
+ io_req->xid, fcp_rsp->rsp_flags.flags,
+ io_req->fcp_resid,
+ cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
+ sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
if (io_req->cdb_status == 0)
sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
else
sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
- /* Abort the command since we did not get all the data */
- init_completion(&io_req->abts_done);
- rval = qedf_initiate_abts(io_req, true);
- if (rval) {
- QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
- sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
- }
-
/*
* Set resid to the whole buffer length so we won't try to resue
* any previously data.
@@ -1243,6 +1280,12 @@ out:
if (qedf_io_tracing)
qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
+ /*
+ * We wait till the end of the function to clear the
+ * outstanding bit in case we need to send an abort
+ */
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
io_req->sc_cmd = NULL;
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
@@ -1260,6 +1303,19 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
if (!io_req)
return;
+ if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "io_req:%p scsi_done handling already done\n",
+ io_req);
+ return;
+ }
+
+ /*
+ * We will be done with this command after this call so clear the
+ * outstanding bit.
+ */
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
xid = io_req->xid;
sc_cmd = io_req->sc_cmd;
@@ -1268,12 +1324,50 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
return;
}
+ if (!virt_addr_valid(sc_cmd)) {
+ QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
+ goto bad_scsi_ptr;
+ }
+
if (!sc_cmd->SCp.ptr) {
QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
"another context.\n");
return;
}
+ if (!sc_cmd->device) {
+ QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
+ sc_cmd);
+ goto bad_scsi_ptr;
+ }
+
+ if (!virt_addr_valid(sc_cmd->device)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
+ goto bad_scsi_ptr;
+ }
+
+ if (!sc_cmd->sense_buffer) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
+ sc_cmd);
+ goto bad_scsi_ptr;
+ }
+
+ if (!virt_addr_valid(sc_cmd->sense_buffer)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
+ sc_cmd);
+ goto bad_scsi_ptr;
+ }
+
+ if (!sc_cmd->scsi_done) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
+ sc_cmd);
+ goto bad_scsi_ptr;
+ }
+
qedf_unmap_sg_list(qedf, io_req);
sc_cmd->result = result << 16;
@@ -1300,6 +1394,15 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
kref_put(&io_req->refcount, qedf_release_cmd);
+ return;
+
+bad_scsi_ptr:
+ /*
+ * Clear the io_req->sc_cmd backpointer so we don't try to process
+ * this again
+ */
+ io_req->sc_cmd = NULL;
+ kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
}
/*
@@ -1438,6 +1541,10 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
struct qedf_ctx *qedf;
struct qedf_cmd_mgr *cmd_mgr;
int i, rc;
+ unsigned long flags;
+ int flush_cnt = 0;
+ int wait_cnt = 100;
+ int refcount = 0;
if (!fcport)
return;
@@ -1449,18 +1556,102 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
}
qedf = fcport->qedf;
+
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is NULL.\n");
+ return;
+ }
+
+ /* Only wait for all commands to be queued in the Upload context */
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
+ (lun == -1)) {
+ while (atomic_read(&fcport->ios_to_queue)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Waiting for %d I/Os to be queued\n",
+ atomic_read(&fcport->ios_to_queue));
+ if (wait_cnt == 0) {
+ QEDF_ERR(NULL,
+ "%d IOs request could not be queued\n",
+ atomic_read(&fcport->ios_to_queue));
+ }
+ msleep(20);
+ wait_cnt--;
+ }
+ }
+
cmd_mgr = qedf->cmd_mgr;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
+ atomic_read(&fcport->num_active_ios), fcport,
+ fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
+
+ mutex_lock(&qedf->flush_mutex);
+ if (lun == -1) {
+ set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
+ } else {
+ set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
+ fcport->lun_reset_lun = lun;
+ }
for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
io_req = &cmd_mgr->cmds[i];
if (!io_req)
continue;
+ if (!io_req->fcport)
+ continue;
+
+ spin_lock_irqsave(&cmd_mgr->lock, flags);
+
+ if (io_req->alloc) {
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
+ if (io_req->cmd_type == QEDF_SCSI_CMD)
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Allocated but not queued, xid=0x%x\n",
+ io_req->xid);
+ }
+ spin_unlock_irqrestore(&cmd_mgr->lock, flags);
+ } else {
+ spin_unlock_irqrestore(&cmd_mgr->lock, flags);
+ continue;
+ }
+
if (io_req->fcport != fcport)
continue;
- if (io_req->cmd_type == QEDF_ELS) {
+
+ /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
+ * but RRQ is still pending.
+ * Workaround: Within qedf_send_rrq, we check if the fcport is
+ * NULL, and we drop the ref on the io_req to clean it up.
+ */
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
+ refcount = kref_read(&io_req->refcount);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
+ io_req->xid, io_req->cmd_type, refcount);
+ /* If RRQ work has been queue, try to cancel it and
+ * free the io_req
+ */
+ if (atomic_read(&io_req->state) ==
+ QEDFC_CMD_ST_RRQ_WAIT) {
+ if (cancel_delayed_work_sync
+ (&io_req->rrq_work)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Putting reference for pending RRQ work xid=0x%x.\n",
+ io_req->xid);
+ /* ID: 003 */
+ kref_put(&io_req->refcount,
+ qedf_release_cmd);
+ }
+ }
+ continue;
+ }
+
+ /* Only consider flushing ELS during target reset */
+ if (io_req->cmd_type == QEDF_ELS &&
+ lun == -1) {
rc = kref_get_unless_zero(&io_req->refcount);
if (!rc) {
QEDF_ERR(&(qedf->dbg_ctx),
@@ -1468,6 +1659,7 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
io_req, io_req->xid);
continue;
}
+ flush_cnt++;
qedf_flush_els_req(qedf, io_req);
/*
* Release the kref and go back to the top of the
@@ -1477,6 +1669,7 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
}
if (io_req->cmd_type == QEDF_ABTS) {
+ /* ID: 004 */
rc = kref_get_unless_zero(&io_req->refcount);
if (!rc) {
QEDF_ERR(&(qedf->dbg_ctx),
@@ -1484,28 +1677,50 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
io_req, io_req->xid);
continue;
}
+ if (lun != -1 && io_req->lun != lun)
+ goto free_cmd;
+
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
"Flushing abort xid=0x%x.\n", io_req->xid);
- clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
-
- if (io_req->sc_cmd) {
- if (io_req->return_scsi_cmd_on_abts)
- qedf_scsi_done(qedf, io_req, DID_ERROR);
+ if (cancel_delayed_work_sync(&io_req->rrq_work)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Putting ref for cancelled RRQ work xid=0x%x.\n",
+ io_req->xid);
+ kref_put(&io_req->refcount, qedf_release_cmd);
}
- /* Notify eh_abort handler that ABTS is complete */
- complete(&io_req->abts_done);
- kref_put(&io_req->refcount, qedf_release_cmd);
-
+ if (cancel_delayed_work_sync(&io_req->timeout_work)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Putting ref for cancelled tmo work xid=0x%x.\n",
+ io_req->xid);
+ qedf_initiate_cleanup(io_req, true);
+ /* Notify eh_abort handler that ABTS is
+ * complete
+ */
+ complete(&io_req->abts_done);
+ clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+ /* ID: 002 */
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ }
+ flush_cnt++;
goto free_cmd;
}
if (!io_req->sc_cmd)
continue;
- if (lun > 0) {
- if (io_req->sc_cmd->device->lun !=
- (u64)lun)
+ if (!io_req->sc_cmd->device) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Device backpointer NULL for sc_cmd=%p.\n",
+ io_req->sc_cmd);
+ /* Put reference for non-existent scsi_cmnd */
+ io_req->sc_cmd = NULL;
+ qedf_initiate_cleanup(io_req, false);
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ continue;
+ }
+ if (lun > -1) {
+ if (io_req->lun != lun)
continue;
}
@@ -1519,15 +1734,65 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
"io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
continue;
}
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
"Cleanup xid=0x%x.\n", io_req->xid);
+ flush_cnt++;
/* Cleanup task and return I/O mid-layer */
qedf_initiate_cleanup(io_req, true);
free_cmd:
- kref_put(&io_req->refcount, qedf_release_cmd);
+ kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
+ }
+
+ wait_cnt = 60;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Flushed 0x%x I/Os, active=0x%x.\n",
+ flush_cnt, atomic_read(&fcport->num_active_ios));
+ /* Only wait for all commands to complete in the Upload context */
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
+ (lun == -1)) {
+ while (atomic_read(&fcport->num_active_ios)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
+ flush_cnt,
+ atomic_read(&fcport->num_active_ios),
+ wait_cnt);
+ if (wait_cnt == 0) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Flushed %d I/Os, active=%d.\n",
+ flush_cnt,
+ atomic_read(&fcport->num_active_ios));
+ for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
+ io_req = &cmd_mgr->cmds[i];
+ if (io_req->fcport &&
+ io_req->fcport == fcport) {
+ refcount =
+ kref_read(&io_req->refcount);
+ set_bit(QEDF_CMD_DIRTY,
+ &io_req->flags);
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
+ io_req, io_req->xid,
+ io_req->flags,
+ io_req->sc_cmd,
+ refcount,
+ io_req->cmd_type);
+ }
+ }
+ WARN_ON(1);
+ break;
+ }
+ msleep(500);
+ wait_cnt--;
+ }
}
+
+ clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
+ clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
+ mutex_unlock(&qedf->flush_mutex);
}
/*
@@ -1546,52 +1811,60 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
unsigned long flags;
struct fcoe_wqe *sqe;
u16 sqe_idx;
+ int refcount = 0;
/* Sanity check qedf_rport before dereferencing any pointers */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "tgt not offloaded\n");
rc = 1;
- goto abts_err;
+ goto out;
}
+ qedf = fcport->qedf;
rdata = fcport->rdata;
+
+ if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+ QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
+ rc = 1;
+ goto out;
+ }
+
r_a_tov = rdata->r_a_tov;
- qedf = fcport->qedf;
lport = qedf->lport;
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
rc = 1;
- goto abts_err;
+ goto drop_rdata_kref;
}
if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
rc = 1;
- goto abts_err;
+ goto drop_rdata_kref;
}
/* Ensure room on SQ */
if (!atomic_read(&fcport->free_sqes)) {
QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
rc = 1;
- goto abts_err;
+ goto drop_rdata_kref;
}
if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
rc = 1;
- goto out;
+ goto drop_rdata_kref;
}
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
- QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
- "cleanup or abort processing or already "
- "completed.\n", io_req->xid);
+ QEDF_ERR(&qedf->dbg_ctx,
+ "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
+ io_req->xid, io_req->sc_cmd);
rc = 1;
- goto out;
+ goto drop_rdata_kref;
}
kref_get(&io_req->refcount);
@@ -1600,18 +1873,17 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
qedf->control_requests++;
qedf->packet_aborts++;
- /* Set the return CPU to be the same as the request one */
- io_req->cpu = smp_processor_id();
-
/* Set the command type to abort */
io_req->cmd_type = QEDF_ABTS;
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
- "0x%x\n", xid);
+ refcount = kref_read(&io_req->refcount);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+ "ABTS io_req xid = 0x%x refcount=%d\n",
+ xid, refcount);
- qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
+ qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
spin_lock_irqsave(&fcport->rport_lock, flags);
@@ -1625,13 +1897,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
spin_unlock_irqrestore(&fcport->rport_lock, flags);
- return rc;
-abts_err:
- /*
- * If the ABTS task fails to queue then we need to cleanup the
- * task at the firmware.
- */
- qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
+drop_rdata_kref:
+ kref_put(&rdata->kref, fc_rport_destroy);
out:
return rc;
}
@@ -1641,27 +1908,62 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
{
uint32_t r_ctl;
uint16_t xid;
+ int rc;
+ struct qedf_rport *fcport = io_req->fcport;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
"0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
- cancel_delayed_work(&io_req->timeout_work);
-
xid = io_req->xid;
r_ctl = cqe->cqe_info.abts_info.r_ctl;
+ /* This was added at a point when we were scheduling abts_compl &
+ * cleanup_compl on different CPUs and there was a possibility of
+ * the io_req to be freed from the other context before we got here.
+ */
+ if (!fcport) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Dropping ABTS completion xid=0x%x as fcport is NULL",
+ io_req->xid);
+ return;
+ }
+
+ /*
+ * When flush is active, let the cmds be completed from the cleanup
+ * context
+ */
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
+ test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Dropping ABTS completion xid=0x%x as fcport is flushing",
+ io_req->xid);
+ return;
+ }
+
+ if (!cancel_delayed_work(&io_req->timeout_work)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Wasn't able to cancel abts timeout work.\n");
+ }
+
switch (r_ctl) {
case FC_RCTL_BA_ACC:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
"ABTS response - ACC Send RRQ after R_A_TOV\n");
io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
+ rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
+ if (!rc) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+ "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
+ io_req->xid);
+ return;
+ }
/*
* Dont release this cmd yet. It will be relesed
* after we get RRQ response
*/
- kref_get(&io_req->refcount);
queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
msecs_to_jiffies(qedf->lport->r_a_tov));
+ atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
break;
/* For error cases let the cleanup return the command */
case FC_RCTL_BA_RJT:
@@ -1803,6 +2105,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
unsigned long flags;
struct fcoe_wqe *sqe;
u16 sqe_idx;
+ int refcount = 0;
fcport = io_req->fcport;
if (!fcport) {
@@ -1824,36 +2127,45 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
}
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
- test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
+ test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
"cleanup processing or already completed.\n",
io_req->xid);
return SUCCESS;
}
+ set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
/* Ensure room on SQ */
if (!atomic_read(&fcport->free_sqes)) {
QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
+ /* Need to make sure we clear the flag since it was set */
+ clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
return FAILED;
}
+ if (io_req->cmd_type == QEDF_CLEANUP) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
+ io_req->xid, io_req->cmd_type);
+ clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
+ return SUCCESS;
+ }
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
- io_req->xid);
+ refcount = kref_read(&io_req->refcount);
+
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
+ io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
+ refcount, fcport, fcport->rdata->ids.port_id);
/* Cleanup cmds re-use the same TID as the original I/O */
xid = io_req->xid;
io_req->cmd_type = QEDF_CLEANUP;
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
- /* Set the return CPU to be the same as the request one */
- io_req->cpu = smp_processor_id();
-
- set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
-
task = qedf_get_task_mem(&qedf->tasks, xid);
- init_completion(&io_req->tm_done);
+ init_completion(&io_req->cleanup_done);
spin_lock_irqsave(&fcport->rport_lock, flags);
@@ -1867,8 +2179,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
spin_unlock_irqrestore(&fcport->rport_lock, flags);
- tmo = wait_for_completion_timeout(&io_req->tm_done,
- QEDF_CLEANUP_TIMEOUT * HZ);
+ tmo = wait_for_completion_timeout(&io_req->cleanup_done,
+ QEDF_CLEANUP_TIMEOUT * HZ);
if (!tmo) {
rc = FAILED;
@@ -1881,6 +2193,16 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
qedf_drain_request(qedf);
}
+ /* If it TASK MGMT handle it, reference will be decreased
+ * in qedf_execute_tmf
+ */
+ if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
+ io_req->tm_flags == FCP_TMF_TGT_RESET) {
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+ io_req->sc_cmd = NULL;
+ complete(&io_req->tm_done);
+ }
+
if (io_req->sc_cmd) {
if (io_req->return_scsi_cmd_on_abts)
qedf_scsi_done(qedf, io_req, DID_ERROR);
@@ -1903,7 +2225,7 @@ void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
/* Complete so we can finish cleaning up the I/O */
- complete(&io_req->tm_done);
+ complete(&io_req->cleanup_done);
}
static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
@@ -1916,6 +2238,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
int rc = 0;
uint16_t xid;
int tmo = 0;
+ int lun = 0;
unsigned long flags;
struct fcoe_wqe *sqe;
u16 sqe_idx;
@@ -1925,20 +2248,18 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
return FAILED;
}
+ lun = (int)sc_cmd->device->lun;
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
rc = FAILED;
- return FAILED;
+ goto no_flush;
}
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
- "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
-
io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
if (!io_req) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
rc = -EAGAIN;
- goto reset_tmf_err;
+ goto no_flush;
}
if (tm_flags == FCP_TMF_LUN_RESET)
@@ -1951,7 +2272,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
io_req->fcport = fcport;
io_req->cmd_type = QEDF_TASK_MGMT_CMD;
- /* Set the return CPU to be the same as the request one */
+ /* Record which cpu this request is associated with */
io_req->cpu = smp_processor_id();
/* Set TM flags */
@@ -1960,7 +2281,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
io_req->tm_flags = tm_flags;
/* Default is to return a SCSI command when an error occurs */
- io_req->return_scsi_cmd_on_abts = true;
+ io_req->return_scsi_cmd_on_abts = false;
/* Obtain exchange id */
xid = io_req->xid;
@@ -1984,12 +2305,16 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
tmo = wait_for_completion_timeout(&io_req->tm_done,
QEDF_TM_TIMEOUT * HZ);
if (!tmo) {
rc = FAILED;
QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
+ /* Clear outstanding bit since command timed out */
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+ io_req->sc_cmd = NULL;
} else {
/* Check TMF response code */
if (io_req->fcp_rsp_code == 0)
@@ -1997,14 +2322,25 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
else
rc = FAILED;
}
+ /*
+ * Double check that fcport has not gone into an uploading state before
+ * executing the command flush for the LUN/target.
+ */
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "fcport is uploading, not executing flush.\n");
+ goto no_flush;
+ }
+ /* We do not need this io_req any more */
+ kref_put(&io_req->refcount, qedf_release_cmd);
+
if (tm_flags == FCP_TMF_LUN_RESET)
- qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
+ qedf_flush_active_ios(fcport, lun);
else
qedf_flush_active_ios(fcport, -1);
- kref_put(&io_req->refcount, qedf_release_cmd);
-
+no_flush:
if (rc != SUCCESS) {
QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
rc = FAILED;
@@ -2012,7 +2348,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
rc = SUCCESS;
}
-reset_tmf_err:
return rc;
}
@@ -2022,26 +2357,65 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
struct qedf_ctx *qedf;
- struct fc_lport *lport;
+ struct fc_lport *lport = shost_priv(sc_cmd->device->host);
int rc = SUCCESS;
int rval;
+ struct qedf_ioreq *io_req = NULL;
+ int ref_cnt = 0;
+ struct fc_rport_priv *rdata = fcport->rdata;
- rval = fc_remote_port_chkready(rport);
+ QEDF_ERR(NULL,
+ "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
+ tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id,
+ (int)sc_cmd->device->lun);
+
+ if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+ QEDF_ERR(NULL, "stale rport\n");
+ return FAILED;
+ }
+
+ QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
+ (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
+ "LUN RESET");
+
+ if (sc_cmd->SCp.ptr) {
+ io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+ ref_cnt = kref_read(&io_req->refcount);
+ QEDF_ERR(NULL,
+ "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
+ io_req, io_req->xid, ref_cnt);
+ }
+ rval = fc_remote_port_chkready(rport);
if (rval) {
QEDF_ERR(NULL, "device_reset rport not ready\n");
rc = FAILED;
goto tmf_err;
}
- if (fcport == NULL) {
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ goto tmf_err;
+
+ if (!fcport) {
QEDF_ERR(NULL, "device_reset: rport is NULL\n");
rc = FAILED;
goto tmf_err;
}
qedf = fcport->qedf;
- lport = qedf->lport;
+
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is NULL.\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
+ rc = SUCCESS;
+ goto tmf_err;
+ }
if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
@@ -2055,9 +2429,22 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
goto tmf_err;
}
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+ if (!fcport->rdata)
+ QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
+ fcport);
+ else
+ QEDF_ERR(&qedf->dbg_ctx,
+ "fcport %p port_id=%06x is uploading.\n",
+ fcport, fcport->rdata->ids.port_id);
+ rc = FAILED;
+ goto tmf_err;
+ }
+
rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
tmf_err:
+ kref_put(&rdata->kref, fc_rport_destroy);
return rc;
}
@@ -2066,6 +2453,8 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
{
struct fcoe_cqe_rsp_info *fcp_rsp;
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
fcp_rsp = &cqe->cqe_info.rsp_info;
qedf_parse_fcp_rsp(io_req, fcp_rsp);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 9f9431a4cc0e..5b07235497c6 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -124,21 +124,24 @@ static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
{
int rc;
- if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
- QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
- return false;
- }
-
while (qedf->fipvlan_retries--) {
+ /* This is to catch if link goes down during fipvlan retries */
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+ QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
+ return false;
+ }
+
if (qedf->vlan_id > 0)
return true;
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Retry %d.\n", qedf->fipvlan_retries);
init_completion(&qedf->fipvlan_compl);
qedf_fcoe_send_vlan_req(qedf);
rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
1 * HZ);
- if (rc > 0) {
+ if (rc > 0 &&
+ (atomic_read(&qedf->link_state) == QEDF_LINK_UP)) {
fcoe_ctlr_link_up(&qedf->ctlr);
return true;
}
@@ -153,12 +156,19 @@ static void qedf_handle_link_update(struct work_struct *work)
container_of(work, struct qedf_ctx, link_update.work);
int rc;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
+ atomic_read(&qedf->link_state));
if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
rc = qedf_initiate_fipvlan_req(qedf);
if (rc)
return;
+
+ if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+ qedf->vlan_id = 0;
+ return;
+ }
+
/*
* If we get here then we never received a repsonse to our
* fip vlan request so set the vlan_id to the default and
@@ -185,7 +195,9 @@ static void qedf_handle_link_update(struct work_struct *work)
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Calling fcoe_ctlr_link_down().\n");
fcoe_ctlr_link_down(&qedf->ctlr);
- qedf_wait_for_upload(qedf);
+ if (qedf_wait_for_upload(qedf) == false)
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Could not upload all sessions.\n");
/* Reset the number of FIP VLAN retries */
qedf->fipvlan_retries = qedf_fipvlan_retries;
}
@@ -615,50 +627,113 @@ static struct scsi_transport_template *qedf_fc_vport_transport_template;
static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
- struct qedf_rport *fcport;
struct fc_lport *lport;
struct qedf_ctx *qedf;
struct qedf_ioreq *io_req;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_rport_priv *rdata;
+ struct qedf_rport *fcport = NULL;
int rc = FAILED;
+ int wait_count = 100;
+ int refcount = 0;
int rval;
-
- if (fc_remote_port_chkready(rport)) {
- QEDF_ERR(NULL, "rport not ready\n");
- goto out;
- }
+ int got_ref = 0;
lport = shost_priv(sc_cmd->device->host);
qedf = (struct qedf_ctx *)lport_priv(lport);
- if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
- QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n");
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ fcport = (struct qedf_rport *)&rp[1];
+ rdata = fcport->rdata;
+ if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+ QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
+ rc = 1;
goto out;
}
- fcport = (struct qedf_rport *)&rp[1];
io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
if (!io_req) {
- QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n");
+ QEDF_ERR(&qedf->dbg_ctx,
+ "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
+ sc_cmd, sc_cmd->cmnd[0],
+ rdata->ids.port_id);
rc = SUCCESS;
- goto out;
+ goto drop_rdata_kref;
+ }
+
+ rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
+ if (rval)
+ got_ref = 1;
+
+ /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
+ if (!rval || io_req->sc_cmd != sc_cmd) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
+ io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
+
+ goto drop_rdata_kref;
+ }
+
+ if (fc_remote_port_chkready(rport)) {
+ refcount = kref_read(&io_req->refcount);
+ QEDF_ERR(&qedf->dbg_ctx,
+ "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
+ io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
+ refcount, rdata->ids.port_id);
+
+ goto drop_rdata_kref;
+ }
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ goto drop_rdata_kref;
+
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Connection uploading, xid=0x%x., port_id=%06x\n",
+ io_req->xid, rdata->ids.port_id);
+ while (io_req->sc_cmd && (wait_count != 0)) {
+ msleep(100);
+ wait_count--;
+ }
+ if (wait_count) {
+ QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
+ rc = SUCCESS;
+ } else {
+ QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
+ rc = FAILED;
+ }
+ goto drop_rdata_kref;
}
- QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
- "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
+ goto drop_rdata_kref;
+ }
+
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
+ io_req, sc_cmd, io_req->xid, io_req->fp_idx,
+ rdata->ids.port_id);
if (qedf->stop_io_on_error) {
qedf_stop_all_io(qedf);
rc = SUCCESS;
- goto out;
+ goto drop_rdata_kref;
}
init_completion(&io_req->abts_done);
rval = qedf_initiate_abts(io_req, true);
if (rval) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
- goto out;
+ /*
+ * If we fail to queue the ABTS then return this command to
+ * the SCSI layer as it will own and free the xid
+ */
+ rc = SUCCESS;
+ qedf_scsi_done(qedf, io_req, DID_ERROR);
+ goto drop_rdata_kref;
}
wait_for_completion(&io_req->abts_done);
@@ -684,38 +759,68 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
io_req->xid);
+drop_rdata_kref:
+ kref_put(&rdata->kref, fc_rport_destroy);
out:
+ if (got_ref)
+ kref_put(&io_req->refcount, qedf_release_cmd);
return rc;
}
static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
{
- QEDF_ERR(NULL, "TARGET RESET Issued...");
+ QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
+ sc_cmd->device->host->host_no, sc_cmd->device->id,
+ sc_cmd->device->lun);
return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
}
static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
{
- QEDF_ERR(NULL, "LUN RESET Issued...\n");
+ QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
+ sc_cmd->device->host->host_no, sc_cmd->device->id,
+ sc_cmd->device->lun);
return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
}
-void qedf_wait_for_upload(struct qedf_ctx *qedf)
+bool qedf_wait_for_upload(struct qedf_ctx *qedf)
{
- while (1) {
+ struct qedf_rport *fcport = NULL;
+ int wait_cnt = 120;
+
+ while (wait_cnt--) {
if (atomic_read(&qedf->num_offloads))
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
- "Waiting for all uploads to complete.\n");
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Waiting for all uploads to complete num_offloads = 0x%x.\n",
+ atomic_read(&qedf->num_offloads));
else
- break;
+ return true;
msleep(500);
}
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
+ if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
+ &fcport->flags)) {
+ if (fcport->rdata)
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Waiting for fcport %p portid=%06x.\n",
+ fcport, fcport->rdata->ids.port_id);
+ } else {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Waiting for fcport %p.\n", fcport);
+ }
+ }
+ rcu_read_unlock();
+ return false;
+
}
/* Performs soft reset of qedf_ctx by simulating a link down/up */
-static void qedf_ctx_soft_reset(struct fc_lport *lport)
+void qedf_ctx_soft_reset(struct fc_lport *lport)
{
struct qedf_ctx *qedf;
+ struct qed_link_output if_link;
if (lport->vport) {
QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
@@ -726,11 +831,32 @@ static void qedf_ctx_soft_reset(struct fc_lport *lport)
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Queuing link down work.\n");
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
- qedf_wait_for_upload(qedf);
+
+ if (qedf_wait_for_upload(qedf) == false) {
+ QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
+ WARN_ON(atomic_read(&qedf->num_offloads));
+ }
+
+ /* Before setting link up query physical link state */
+ qed_ops->common->get_link(qedf->cdev, &if_link);
+ /* Bail if the physical link is not up */
+ if (!if_link.link_up) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Physical link is not up.\n");
+ return;
+ }
+ /* Flush and wait to make sure link down is processed */
+ flush_delayed_work(&qedf->link_update);
+ msleep(500);
+
atomic_set(&qedf->link_state, QEDF_LINK_UP);
qedf->vlan_id = 0;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Queue link up work.\n");
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
}
@@ -740,22 +866,6 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
{
struct fc_lport *lport;
struct qedf_ctx *qedf;
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
- struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
- int rval;
-
- rval = fc_remote_port_chkready(rport);
-
- if (rval) {
- QEDF_ERR(NULL, "device_reset rport not ready\n");
- return FAILED;
- }
-
- if (fcport == NULL) {
- QEDF_ERR(NULL, "device_reset: rport is NULL\n");
- return FAILED;
- }
lport = shost_priv(sc_cmd->device->host);
qedf = lport_priv(lport);
@@ -907,8 +1017,10 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
"Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
kfree_skb(skb);
rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
- if (rdata)
+ if (rdata) {
rdata->retries = lport->max_rport_retry_count;
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
return -EINVAL;
}
/* End NPIV filtering */
@@ -1031,7 +1143,12 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
1, skb->data, skb->len, false);
- qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+ rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+ kfree_skb(skb);
+ return rc;
+ }
return 0;
}
@@ -1224,6 +1341,8 @@ static void qedf_upload_connection(struct qedf_ctx *qedf,
static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
struct qedf_rport *fcport)
{
+ struct fc_rport_priv *rdata = fcport->rdata;
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
fcport->rdata->ids.port_id);
@@ -1235,6 +1354,7 @@ static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
qedf_free_sq(qedf, fcport);
fcport->rdata = NULL;
fcport->qedf = NULL;
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1310,6 +1430,8 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
break;
}
+ /* Initial reference held on entry, so this can't fail */
+ kref_get(&rdata->kref);
fcport->rdata = rdata;
fcport->rport = rport;
@@ -1369,11 +1491,15 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
*/
fcport = (struct qedf_rport *)&rp[1];
+ spin_lock_irqsave(&fcport->rport_lock, flags);
/* Only free this fcport if it is offloaded already */
- if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
- set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
+ if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
+ !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+ &fcport->flags)) {
+ set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+ &fcport->flags);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
qedf_cleanup_fcport(qedf, fcport);
-
/*
* Remove fcport to list of qedf_ctx list of offloaded
* ports
@@ -1385,8 +1511,9 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
&fcport->flags);
atomic_dec(&qedf->num_offloads);
+ } else {
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
}
-
break;
case RPORT_EV_NONE:
@@ -1498,11 +1625,15 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
fc_set_wwnn(lport, qedf->wwnn);
fc_set_wwpn(lport, qedf->wwpn);
- fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
+ if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "fcoe_libfc_config failed.\n");
+ return -ENOMEM;
+ }
/* Allocate the exchange manager */
- fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1,
- qedf->max_els_xid, NULL);
+ fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
+ 0xfffe, NULL);
if (fc_lport_init_stats(lport))
return -ENOMEM;
@@ -1625,14 +1756,15 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->wwpn = vn_port->wwpn;
vn_port->host->transportt = qedf_fc_vport_transport_template;
- vn_port->host->can_queue = QEDF_MAX_ELS_XID;
+ vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
vn_port->host->max_lun = qedf_max_lun;
vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
rc = scsi_add_host(vn_port->host, &vport->dev);
if (rc) {
- QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
+ QEDF_WARN(&base_qedf->dbg_ctx,
+ "Error adding Scsi_Host rc=0x%x.\n", rc);
goto err2;
}
@@ -2155,7 +2287,8 @@ static int qedf_setup_int(struct qedf_ctx *qedf)
QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
qedf->int_info.used_cnt = 1;
- QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n");
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Cannot load driver due to a lack of MSI-X vectors.\n");
return -EINVAL;
}
@@ -2356,6 +2489,13 @@ static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
struct qedf_skb_work *skb_work;
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+ "Dropping frame as link state is down.\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
if (!skb_work) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
@@ -2990,6 +3130,8 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
goto err0;
}
+ fc_disc_init(lport);
+
/* Initialize qedf_ctx */
qedf = lport_priv(lport);
qedf->lport = lport;
@@ -3005,6 +3147,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
pci_set_drvdata(pdev, qedf);
init_completion(&qedf->fipvlan_compl);
mutex_init(&qedf->stats_mutex);
+ mutex_init(&qedf->flush_mutex);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
@@ -3181,11 +3324,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
sprintf(host_buf, "host_%d", host->host_no);
qed_ops->common->set_name(qedf->cdev, host_buf);
-
- /* Set xid max values */
- qedf->max_scsi_xid = QEDF_MAX_SCSI_XID;
- qedf->max_els_xid = QEDF_MAX_ELS_XID;
-
/* Allocate cmd mgr */
qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
if (!qedf->cmd_mgr) {
@@ -3196,12 +3334,15 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
if (mode != QEDF_MODE_RECOVERY) {
host->transportt = qedf_fc_transport_template;
- host->can_queue = QEDF_MAX_ELS_XID;
host->max_lun = qedf_max_lun;
host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ host->can_queue = FCOE_PARAMS_NUM_TASKS;
rc = scsi_add_host(host, &pdev->dev);
- if (rc)
+ if (rc) {
+ QEDF_WARN(&qedf->dbg_ctx,
+ "Error adding Scsi_Host rc=0x%x.\n", rc);
goto err6;
+ }
}
memset(&params, 0, sizeof(params));
@@ -3377,7 +3518,9 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
fcoe_ctlr_link_down(&qedf->ctlr);
else
fc_fabric_logoff(qedf->lport);
- qedf_wait_for_upload(qedf);
+
+ if (qedf_wait_for_upload(qedf) == false)
+ QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
#ifdef CONFIG_DEBUG_FS
qedf_dbg_host_exit(&(qedf->dbg_ctx));
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 9455faacd5de..334a9cdf346a 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -7,9 +7,9 @@
* this source tree.
*/
-#define QEDF_VERSION "8.33.16.20"
+#define QEDF_VERSION "8.37.25.20"
#define QEDF_DRIVER_MAJOR_VER 8
-#define QEDF_DRIVER_MINOR_VER 33
-#define QEDF_DRIVER_REV_VER 16
+#define QEDF_DRIVER_MINOR_VER 37
+#define QEDF_DRIVER_REV_VER 25
#define QEDF_DRIVER_ENG_VER 20
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index e2a995a6e8e7..bd81bbee61e6 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -155,12 +155,10 @@ static void qedi_tmf_resp_work(struct work_struct *work)
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
struct iscsi_tm_rsp *resp_hdr_ptr;
- struct iscsi_cls_session *cls_sess;
int rval = 0;
set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
- cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
iscsi_block_session(session->cls_session);
rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
@@ -985,7 +983,6 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
* others they are two different assembly operations.
*/
wmb();
- mmiowb();
QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
"prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
@@ -1367,7 +1364,6 @@ static void qedi_tmf_work(struct work_struct *work)
struct qedi_conn *qedi_conn = qedi_cmd->conn;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
- struct iscsi_cls_session *cls_sess;
struct qedi_work_map *list_work = NULL;
struct iscsi_task *mtask;
struct qedi_cmd *cmd;
@@ -1378,7 +1374,6 @@ static void qedi_tmf_work(struct work_struct *work)
mtask = qedi_cmd->task;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 6d6d6013e35b..615cea4fad56 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -579,7 +579,7 @@ static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
rval = qedi_iscsi_update_conn(qedi, qedi_conn);
if (rval) {
iscsi_conn_printk(KERN_ALERT, conn,
- "conn_start: FW oflload conn failed.\n");
+ "conn_start: FW offload conn failed.\n");
rval = -EINVAL;
goto start_err;
}
@@ -590,7 +590,7 @@ static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
rval = iscsi_conn_start(cls_conn);
if (rval) {
iscsi_conn_printk(KERN_ALERT, conn,
- "iscsi_conn_start: FW oflload conn failed!!\n");
+ "iscsi_conn_start: FW offload conn failed!!\n");
}
start_err:
@@ -993,13 +993,17 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
struct iscsi_conn *conn = NULL;
struct qedi_ctx *qedi;
int ret = 0;
- int wait_delay = 20 * HZ;
+ int wait_delay;
int abrt_conn = 0;
int count = 10;
+ wait_delay = 60 * HZ + DEF_MAX_RT_TIME;
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
+ if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+ goto ep_exit_recover;
+
flush_work(&qedi_ep->offload_work);
if (qedi_ep->conn) {
@@ -1163,7 +1167,7 @@ static void qedi_offload_work(struct work_struct *work)
struct qedi_endpoint *qedi_ep =
container_of(work, struct qedi_endpoint, offload_work);
struct qedi_ctx *qedi;
- int wait_delay = 20 * HZ;
+ int wait_delay = 5 * HZ;
int ret;
qedi = qedi_ep->qedi;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e74a62448ba4..e5db9a9954dc 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
{
- struct qedi_nvm_iscsi_image nvm_image;
-
qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(nvm_image),
+ sizeof(struct qedi_nvm_iscsi_image),
&qedi->nvm_buf_dma, GFP_KERNEL);
if (!qedi->iscsi_image) {
QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
@@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data)
static int qedi_get_boot_info(struct qedi_ctx *qedi)
{
int ret = 1;
- struct qedi_nvm_iscsi_image nvm_image;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Get NVM iSCSI CFG image\n");
ret = qedi_ops->common->nvm_get_image(qedi->cdev,
QED_NVM_IMAGE_ISCSI_CFG,
(char *)qedi->iscsi_image,
- sizeof(nvm_image));
+ sizeof(struct qedi_nvm_iscsi_image));
if (ret)
QEDI_ERR(&qedi->dbg_ctx,
"Could not get NVM image. ret = %d\n", ret);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 6856dfdfa473..327eff67a1ee 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -3004,8 +3004,6 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
- /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
- mmiowb();
out:
if (status)
@@ -3254,8 +3252,6 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
- /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
- mmiowb();
out:
if (status)
@@ -3367,19 +3363,8 @@ qla1280_isp_cmd(struct scsi_qla_host *ha)
/*
* Update request index to mailbox4 (Request Queue In).
- * The mmiowb() ensures that this write is ordered with writes by other
- * CPUs. Without the mmiowb(), it is possible for the following:
- * CPUA posts write of index 5 to mailbox4
- * CPUA releases host lock
- * CPUB acquires host lock
- * CPUB posts write of index 6 to mailbox4
- * On PCI bus, order reverses and write of 6 posts, then index 5,
- * causing chip to issue full queue of stale commands
- * The mmiowb() prevents future writes from crossing the barrier.
- * See Documentation/driver-api/device-io.rst for more information.
*/
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
- mmiowb();
LEAVE("qla1280_isp_cmd");
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index f928c4d3a1ef..8d560c562e9c 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -29,24 +29,27 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
return 0;
+ mutex_lock(&ha->optrom_mutex);
if (IS_P3P_TYPE(ha)) {
if (off < ha->md_template_size) {
rval = memory_read_from_buffer(buf, count,
&off, ha->md_tmplt_hdr, ha->md_template_size);
- return rval;
+ } else {
+ off -= ha->md_template_size;
+ rval = memory_read_from_buffer(buf, count,
+ &off, ha->md_dump, ha->md_dump_size);
}
- off -= ha->md_template_size;
- rval = memory_read_from_buffer(buf, count,
- &off, ha->md_dump, ha->md_dump_size);
- return rval;
- } else if (ha->mctp_dumped && ha->mctp_dump_reading)
- return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
+ } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
+ rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
MCTP_DUMP_SIZE);
- else if (ha->fw_dump_reading)
- return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+ } else if (ha->fw_dump_reading) {
+ rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_len);
- else
- return 0;
+ } else {
+ rval = 0;
+ }
+ mutex_unlock(&ha->optrom_mutex);
+ return rval;
}
static ssize_t
@@ -154,6 +157,8 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ uint32_t faddr;
+ struct active_regions active_regions = { };
if (!capable(CAP_SYS_ADMIN))
return 0;
@@ -164,11 +169,21 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
return -EAGAIN;
}
- if (IS_NOCACHE_VPD_TYPE(ha))
- ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
- ha->nvram_size);
+ if (!IS_NOCACHE_VPD_TYPE(ha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ goto skip;
+ }
+
+ faddr = ha->flt_region_nvram;
+ if (IS_QLA28XX(ha)) {
+ if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_nvram_sec;
+ }
+ ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
+
mutex_unlock(&ha->optrom_mutex);
+skip:
return memory_read_from_buffer(buf, count, &off, ha->nvram,
ha->nvram_size);
}
@@ -223,9 +238,9 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
}
/* Write NVRAM. */
- ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
- ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
- count);
+ ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
+ ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
+ count);
mutex_unlock(&ha->optrom_mutex);
ql_dbg(ql_dbg_user, vha, 0x7060,
@@ -364,7 +379,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size;
+ ha->optrom_region_size = size;
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -418,6 +433,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
* 0x000000 -> 0x07ffff -- Boot code.
* 0x080000 -> 0x0fffff -- Firmware.
* 0x120000 -> 0x12ffff -- VPD and HBA parameters.
+ *
+ * > ISP25xx type boards:
+ *
+ * None -- should go through BSG.
*/
valid = 0;
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
@@ -425,9 +444,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
else if (start == (ha->flt_region_boot * 4) ||
start == (ha->flt_region_fw * 4))
valid = 1;
- else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
- || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
- || IS_QLA27XX(ha))
+ else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7065,
@@ -437,7 +454,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size;
+ ha->optrom_region_size = size;
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -504,6 +521,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
uint32_t faddr;
+ struct active_regions active_regions = { };
if (unlikely(pci_channel_offline(ha->pdev)))
return -EAGAIN;
@@ -511,22 +529,33 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EINVAL;
- if (IS_NOCACHE_VPD_TYPE(ha)) {
- faddr = ha->flt_region_vpd << 2;
+ if (IS_NOCACHE_VPD_TYPE(ha))
+ goto skip;
+
+ faddr = ha->flt_region_vpd << 2;
- if (IS_QLA27XX(ha) &&
- qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ if (IS_QLA28XX(ha)) {
+ qla28xx_get_aux_images(vha, &active_regions);
+ if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
faddr = ha->flt_region_vpd_sec << 2;
- mutex_lock(&ha->optrom_mutex);
- if (qla2x00_chip_is_down(vha)) {
- mutex_unlock(&ha->optrom_mutex);
- return -EAGAIN;
- }
- ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
- ha->vpd_size);
+ ql_dbg(ql_dbg_init, vha, 0x7070,
+ "Loading %s nvram image.\n",
+ active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
+ "primary" : "secondary");
+ }
+
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
}
+
+ ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
+ mutex_unlock(&ha->optrom_mutex);
+
+ ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
+skip:
return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
}
@@ -563,8 +592,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
}
/* Write NVRAM. */
- ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
- ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
+ ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
+ ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
/* Update flash version information for 4Gb & above. */
if (!IS_FWI2_CAPABLE(ha)) {
@@ -645,6 +674,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
int type;
uint32_t idc_control;
uint8_t *tmp_data = NULL;
+
if (off != 0)
return -EINVAL;
@@ -682,7 +712,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
uint32_t idc_control;
qla83xx_idc_lock(vha, 0);
@@ -858,7 +888,7 @@ do_read:
count = 0;
}
- count = actual_size > count ? count: actual_size;
+ count = actual_size > count ? count : actual_size;
memcpy(buf, ha->xgmac_data, count);
return count;
@@ -934,7 +964,7 @@ static struct bin_attribute sysfs_dcbx_tlv_attr = {
static struct sysfs_entry {
char *name;
struct bin_attribute *attr;
- int is4GBp_only;
+ int type;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr, },
{ "nvram", &sysfs_nvram_attr, },
@@ -957,11 +987,11 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
int ret;
for (iter = bin_file_entries; iter->name; iter++) {
- if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
+ if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
continue;
- if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
+ if (iter->type == 2 && !IS_QLA25XX(vha->hw))
continue;
- if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+ if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -985,13 +1015,14 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
struct qla_hw_data *ha = vha->hw;
for (iter = bin_file_entries; iter->name; iter++) {
- if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
+ if (iter->type && !IS_FWI2_CAPABLE(ha))
continue;
- if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
+ if (iter->type == 2 && !IS_QLA25XX(ha))
continue;
- if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+ if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
continue;
- if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+ if (iter->type == 0x27 &&
+ (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -1049,6 +1080,7 @@ qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
}
@@ -1082,6 +1114,7 @@ qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
}
@@ -1294,6 +1327,7 @@ qla2x00_optrom_bios_version_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
+
return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
ha->bios_revision[0]);
}
@@ -1304,6 +1338,7 @@ qla2x00_optrom_efi_version_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
+
return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
ha->efi_revision[0]);
}
@@ -1314,6 +1349,7 @@ qla2x00_optrom_fcode_version_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
+
return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
ha->fcode_revision[0]);
}
@@ -1324,6 +1360,7 @@ qla2x00_optrom_fw_version_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
+
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
ha->fw_revision[3]);
@@ -1336,7 +1373,8 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return scnprintf(buf, PAGE_SIZE, "\n");
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1349,6 +1387,7 @@ qla2x00_total_isp_aborts_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
return scnprintf(buf, PAGE_SIZE, "%d\n",
vha->qla_stats.total_isp_aborts);
}
@@ -1358,24 +1397,40 @@ qla24xx_84xx_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rval = QLA_SUCCESS;
- uint16_t status[2] = {0, 0};
+ uint16_t status[2] = { 0 };
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA84XX(ha))
return scnprintf(buf, PAGE_SIZE, "\n");
- if (ha->cs84xx->op_fw_version == 0)
+ if (!ha->cs84xx->op_fw_version) {
rval = qla84xx_verify_chip(vha, status);
- if ((rval == QLA_SUCCESS) && (status[0] == 0))
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- (uint32_t)ha->cs84xx->op_fw_version);
+ if (!rval && !status[0])
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (uint32_t)ha->cs84xx->op_fw_version);
+ }
return scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t
+qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+ ha->serdes_version[0], ha->serdes_version[1],
+ ha->serdes_version[2]);
+}
+
+static ssize_t
qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1383,7 +1438,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
- !IS_QLA27XX(ha))
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return scnprintf(buf, PAGE_SIZE, "\n");
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1596,7 +1651,7 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA27XX(ha))
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return scnprintf(buf, PAGE_SIZE, "\n");
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1604,35 +1659,38 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+qla2x00_min_supported_speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA27XX(ha))
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return scnprintf(buf, PAGE_SIZE, "\n");
return scnprintf(buf, PAGE_SIZE, "%s\n",
- ha->min_link_speed == 5 ? "32Gps" :
- ha->min_link_speed == 4 ? "16Gps" :
- ha->min_link_speed == 3 ? "8Gps" :
- ha->min_link_speed == 2 ? "4Gps" :
- ha->min_link_speed != 0 ? "unknown" : "");
+ ha->min_supported_speed == 6 ? "64Gps" :
+ ha->min_supported_speed == 5 ? "32Gps" :
+ ha->min_supported_speed == 4 ? "16Gps" :
+ ha->min_supported_speed == 3 ? "8Gps" :
+ ha->min_supported_speed == 2 ? "4Gps" :
+ ha->min_supported_speed != 0 ? "unknown" : "");
}
static ssize_t
-qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+qla2x00_max_supported_speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA27XX(ha))
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return scnprintf(buf, PAGE_SIZE, "\n");
return scnprintf(buf, PAGE_SIZE, "%s\n",
- ha->max_speed_sup ? "32Gps" : "16Gps");
+ ha->max_supported_speed == 2 ? "64Gps" :
+ ha->max_supported_speed == 1 ? "32Gps" :
+ ha->max_supported_speed == 0 ? "16Gps" : "unknown");
}
static ssize_t
@@ -1645,7 +1703,7 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
int mode = QLA_SET_DATA_RATE_LR;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA27XX(vha->hw)) {
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
ql_log(ql_log_warn, vha, 0x70d8,
"Speed setting not supported \n");
return -EINVAL;
@@ -2164,6 +2222,32 @@ qla2x00_dif_bundle_statistics_show(struct device *dev,
ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
}
+static ssize_t
+qla2x00_fw_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%llx\n",
+ (uint64_t)ha->fw_attributes_ext[1] << 48 |
+ (uint64_t)ha->fw_attributes_ext[0] << 32 |
+ (uint64_t)ha->fw_attributes_h << 16 |
+ (uint64_t)ha->fw_attributes);
+}
+
+static ssize_t
+qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -2192,6 +2276,7 @@ static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
NULL);
static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
NULL);
+static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
@@ -2209,8 +2294,10 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
qla2x00_allow_cna_fw_dump_show,
qla2x00_allow_cna_fw_dump_store);
static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
-static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
-static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
+static DEVICE_ATTR(min_supported_speed, 0444,
+ qla2x00_min_supported_speed_show, NULL);
+static DEVICE_ATTR(max_supported_speed, 0444,
+ qla2x00_max_supported_speed_show, NULL);
static DEVICE_ATTR(zio_threshold, 0644,
qla_zio_threshold_show,
qla_zio_threshold_store);
@@ -2221,6 +2308,8 @@ static DEVICE_ATTR(dif_bundle_statistics, 0444,
qla2x00_dif_bundle_statistics_show, NULL);
static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
qla2x00_port_speed_store);
+static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
+static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
struct device_attribute *qla2x00_host_attrs[] = {
@@ -2242,6 +2331,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_optrom_fw_version,
&dev_attr_84xx_fw_version,
&dev_attr_total_isp_aborts,
+ &dev_attr_serdes_version,
&dev_attr_mpi_version,
&dev_attr_phy_version,
&dev_attr_flash_block_size,
@@ -2256,11 +2346,13 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_fw_dump_size,
&dev_attr_allow_cna_fw_dump,
&dev_attr_pep_version,
- &dev_attr_min_link_speed,
- &dev_attr_max_speed_sup,
+ &dev_attr_min_supported_speed,
+ &dev_attr_max_supported_speed,
&dev_attr_zio_threshold,
&dev_attr_dif_bundle_statistics,
&dev_attr_port_speed,
+ &dev_attr_port_no,
+ &dev_attr_fw_attr,
NULL, /* reserve for qlini_mode */
NULL, /* reserve for ql2xiniexchg */
NULL, /* reserve for ql2xexchoffld */
@@ -2296,16 +2388,15 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
static void
qla2x00_get_host_speed(struct Scsi_Host *shost)
{
- struct qla_hw_data *ha = ((struct scsi_qla_host *)
- (shost_priv(shost)))->hw;
- u32 speed = FC_PORTSPEED_UNKNOWN;
+ scsi_qla_host_t *vha = shost_priv(shost);
+ u32 speed;
- if (IS_QLAFX00(ha)) {
+ if (IS_QLAFX00(vha->hw)) {
qlafx00_get_host_speed(shost);
return;
}
- switch (ha->link_data_rate) {
+ switch (vha->hw->link_data_rate) {
case PORT_SPEED_1GB:
speed = FC_PORTSPEED_1GBIT;
break;
@@ -2327,7 +2418,14 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
case PORT_SPEED_32GB:
speed = FC_PORTSPEED_32GBIT;
break;
+ case PORT_SPEED_64GB:
+ speed = FC_PORTSPEED_64GBIT;
+ break;
+ default:
+ speed = FC_PORTSPEED_UNKNOWN;
+ break;
}
+
fc_host_speed(shost) = speed;
}
@@ -2335,7 +2433,7 @@ static void
qla2x00_get_host_port_type(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
- uint32_t port_type = FC_PORTTYPE_UNKNOWN;
+ uint32_t port_type;
if (vha->vp_idx) {
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
@@ -2354,7 +2452,11 @@ qla2x00_get_host_port_type(struct Scsi_Host *shost)
case ISP_CFG_F:
port_type = FC_PORTTYPE_NPORT;
break;
+ default:
+ port_type = FC_PORTTYPE_UNKNOWN;
+ break;
}
+
fc_host_port_type(shost) = port_type;
}
@@ -2416,13 +2518,10 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
fc_starget_port_id(starget) = port_id;
}
-static void
+static inline void
qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
- if (timeout)
- rport->dev_loss_tmo = timeout;
- else
- rport->dev_loss_tmo = 1;
+ rport->dev_loss_tmo = timeout ? timeout : 1;
}
static void
@@ -2632,8 +2731,9 @@ static void
qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
- uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
- 0xFF, 0xFF, 0xFF, 0xFF};
+ static const uint8_t node_name[WWN_SIZE] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ };
u64 fabric_name = wwn_to_u64(node_name);
if (vha->device_flags & SWITCH_FOUND)
@@ -2711,8 +2811,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
/* initialized vport states */
atomic_set(&vha->loop_state, LOOP_DOWN);
- vha->vp_err_state= VP_ERR_PORTDWN;
- vha->vp_prev_err_state= VP_ERR_UNKWN;
+ vha->vp_err_state = VP_ERR_PORTDWN;
+ vha->vp_prev_err_state = VP_ERR_UNKWN;
/* Check if physical ha port is Up */
if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
@@ -2727,6 +2827,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
int prot = 0, guard;
+
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
@@ -2977,7 +3078,7 @@ void
qla2x00_init_host_attr(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- u32 speed = FC_PORTSPEED_UNKNOWN;
+ u32 speeds = FC_PORTSPEED_UNKNOWN;
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
@@ -2988,25 +3089,45 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
if (IS_CNA_CAPABLE(ha))
- speed = FC_PORTSPEED_10GBIT;
- else if (IS_QLA2031(ha))
- speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
- FC_PORTSPEED_4GBIT;
- else if (IS_QLA25XX(ha))
- speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
- FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+ speeds = FC_PORTSPEED_10GBIT;
+ else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
+ if (ha->max_supported_speed == 2) {
+ if (ha->min_supported_speed <= 6)
+ speeds |= FC_PORTSPEED_64GBIT;
+ }
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1) {
+ if (ha->min_supported_speed <= 5)
+ speeds |= FC_PORTSPEED_32GBIT;
+ }
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 4)
+ speeds |= FC_PORTSPEED_16GBIT;
+ }
+ if (ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 3)
+ speeds |= FC_PORTSPEED_8GBIT;
+ }
+ if (ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 2)
+ speeds |= FC_PORTSPEED_4GBIT;
+ }
+ } else if (IS_QLA2031(ha))
+ speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
+ FC_PORTSPEED_4GBIT;
+ else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
+ speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
+ FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
else if (IS_QLA24XX_TYPE(ha))
- speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
- FC_PORTSPEED_1GBIT;
+ speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
+ FC_PORTSPEED_1GBIT;
else if (IS_QLA23XX(ha))
- speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
- else if (IS_QLAFX00(ha))
- speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
- FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
- else if (IS_QLA27XX(ha))
- speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
- FC_PORTSPEED_8GBIT;
+ speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
else
- speed = FC_PORTSPEED_1GBIT;
- fc_host_supported_speeds(vha->host) = speed;
+ speeds = FC_PORTSPEED_1GBIT;
+
+ fc_host_supported_speeds(vha->host) = speeds;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 17d42658ad9a..5441557b424b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,4 +1,4 @@
- /*
+/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
*
@@ -84,8 +84,7 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
return 0;
}
- if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
- bcode[3] != 'S') {
+ if (memcmp(bcode, "HQOS", 4)) {
/* Invalid FCP priority data header*/
ql_dbg(ql_dbg_user, vha, 0x7052,
"Invalid FCP Priority data header. bcode=0x%x.\n",
@@ -1044,7 +1043,7 @@ qla84xx_updatefw(struct bsg_job *bsg_job)
}
flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
- fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
+ fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
mn->entry_count = 1;
@@ -1057,9 +1056,8 @@ qla84xx_updatefw(struct bsg_job *bsg_job)
mn->fw_ver = cpu_to_le32(fw_ver);
mn->fw_size = cpu_to_le32(data_len);
mn->fw_seq_size = cpu_to_le32(data_len);
- mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
- mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
- mn->dseg_length = cpu_to_le32(data_len);
+ put_unaligned_le64(fw_dma, &mn->dsd.address);
+ mn->dsd.length = cpu_to_le32(data_len);
mn->data_seg_cnt = cpu_to_le16(1);
rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
@@ -1238,9 +1236,8 @@ qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
mn->dseg_count = cpu_to_le16(1);
- mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
- mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
- mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
+ put_unaligned_le64(mgmt_dma, &mn->dsd.address);
+ mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
}
rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
@@ -1354,7 +1351,7 @@ qla24xx_iidma(struct bsg_job *bsg_job)
if (rval) {
ql_log(ql_log_warn, vha, 0x704c,
- "iIDMA cmd failed for %8phN -- "
+ "iiDMA cmd failed for %8phN -- "
"%04x %x %04x %04x.\n", fcport->port_name,
rval, fcport->fp_speed, mb[0], mb[1]);
rval = (DID_ERROR << 16);
@@ -1412,7 +1409,8 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
- IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
+ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7058,
@@ -1534,6 +1532,7 @@ qla2x00_update_fru_versions(struct bsg_job *bsg_job)
uint32_t count;
dma_addr_t sfp_dma;
void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
@@ -1584,6 +1583,7 @@ qla2x00_read_fru_status(struct bsg_job *bsg_job)
struct qla_status_reg *sr = (void *)bsg;
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
@@ -1634,6 +1634,7 @@ qla2x00_write_fru_status(struct bsg_job *bsg_job)
struct qla_status_reg *sr = (void *)bsg;
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
@@ -1680,6 +1681,7 @@ qla2x00_write_i2c(struct bsg_job *bsg_job)
struct qla_i2c_access *i2c = (void *)bsg;
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
@@ -1725,6 +1727,7 @@ qla2x00_read_i2c(struct bsg_job *bsg_job)
struct qla_i2c_access *i2c = (void *)bsg;
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
@@ -1961,7 +1964,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
/* Dump the vendor information */
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
- (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
+ piocb_rqst, sizeof(*piocb_rqst));
if (!vha->flags.online) {
ql_log(ql_log_warn, vha, 0x70d0,
@@ -2157,7 +2160,7 @@ qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
struct qla_flash_update_caps cap;
- if (!(IS_QLA27XX(ha)))
+ if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
return -EPERM;
memset(&cap, 0, sizeof(cap));
@@ -2190,7 +2193,7 @@ qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
uint64_t online_fw_attr = 0;
struct qla_flash_update_caps cap;
- if (!(IS_QLA27XX(ha)))
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return -EPERM;
memset(&cap, 0, sizeof(cap));
@@ -2238,7 +2241,7 @@ qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
uint8_t domain, area, al_pa, state;
int rval;
- if (!(IS_QLA27XX(ha)))
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return -EPERM;
memset(&bbcr, 0, sizeof(bbcr));
@@ -2323,8 +2326,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
if (rval == QLA_SUCCESS) {
- ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
- (uint8_t *)stats, sizeof(*stats));
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
+ stats, sizeof(*stats));
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
}
@@ -2353,7 +2356,8 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
int rval;
struct qla_dport_diag *dd;
- if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+ if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+ !IS_QLA28XX(vha->hw))
return -EPERM;
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
@@ -2388,6 +2392,45 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
}
static int
+qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_active_regions regions = { };
+ struct active_regions active_regions = { };
+
+ qla28xx_get_aux_images(vha, &active_regions);
+ regions.global_image = active_regions.global;
+
+ if (IS_QLA28XX(ha)) {
+ qla27xx_get_active_image(vha, &active_regions);
+ regions.board_config = active_regions.aux.board_config;
+ regions.vpd_nvram = active_regions.aux.vpd_nvram;
+ regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
+ regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x70e1,
+ "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
+ __func__, vha->host_no, regions.global_image,
+ regions.board_config, regions.vpd_nvram,
+ regions.npiv_config_0_1, regions.npiv_config_2_3);
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
+
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ bsg_reply->reply_payload_rcv_len = sizeof(regions);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return 0;
+}
+
+static int
qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
{
struct fc_bsg_request *bsg_request = bsg_job->request;
@@ -2460,6 +2503,9 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
case QL_VND_DPORT_DIAGNOSTICS:
return qla2x00_do_dport_diagnostics(bsg_job);
+ case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
+ return qla2x00_get_flash_image_status(bsg_job);
+
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index d97dfd521356..7594fad7b5b5 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -31,6 +31,7 @@
#define QL_VND_GET_PRIV_STATS 0x18
#define QL_VND_DPORT_DIAGNOSTICS 0x19
#define QL_VND_GET_PRIV_STATS_EX 0x1A
+#define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -279,4 +280,14 @@ struct qla_dport_diag {
#define QLA_DPORT_RESULT 0x0
#define QLA_DPORT_START 0x2
+/* active images in flash */
+struct qla_active_regions {
+ uint8_t global_image;
+ uint8_t board_config;
+ uint8_t vpd_nvram;
+ uint8_t npiv_config_0_1;
+ uint8_t npiv_config_2_3;
+ uint8_t reserved[32];
+} __packed;
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c7533fa7f46e..9e80646722e2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -111,30 +111,25 @@ int
qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
uint32_t ram_dwords, void **nxt)
{
- int rval;
- uint32_t cnt, stat, timer, dwords, idx;
- uint16_t mb0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *dump = (uint32_t *)ha->gid_list;
-
- rval = QLA_SUCCESS;
- mb0 = 0;
+ uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
+ uint32_t stat;
+ ulong i, j, timer = 6000000;
+ int rval = QLA_FUNCTION_FAILED;
- WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
+ if (i + dwords > ram_dwords)
+ dwords = ram_dwords - i;
- dwords = qla2x00_gid_list_size(ha) / 4;
- for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
- cnt += dwords, addr += dwords) {
- if (cnt + dwords > ram_dwords)
- dwords = ram_dwords - cnt;
-
+ WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
WRT_REG_WORD(&reg->mailbox1, LSW(addr));
WRT_REG_WORD(&reg->mailbox8, MSW(addr));
- WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
- WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
+ WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
@@ -145,76 +140,76 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->host_status);
- if (stat & HSRX_RISC_INT) {
- stat &= 0xff;
-
- if (stat == 0x1 || stat == 0x2 ||
- stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
+ while (timer--) {
+ udelay(5);
- mb0 = RD_REG_WORD(&reg->mailbox0);
- RD_REG_WORD(&reg->mailbox1);
+ stat = RD_REG_DWORD(&reg->host_status);
+ /* Check for pending interrupts. */
+ if (!(stat & HSRX_RISC_INT))
+ continue;
- WRT_REG_DWORD(&reg->hccr,
- HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- break;
- }
+ stat &= 0xff;
+ if (stat != 0x1 && stat != 0x2 &&
+ stat != 0x10 && stat != 0x11) {
/* Clear this intr; it wasn't a mailbox intr */
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD(&reg->hccr);
+ continue;
}
- udelay(5);
+
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ break;
}
ha->flags.mbox_int = 1;
+ *nxt = ram + i;
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb0 & MBS_MASK;
- for (idx = 0; idx < dwords; idx++)
- ram[cnt + idx] = IS_QLA27XX(ha) ?
- le32_to_cpu(dump[idx]) : swab32(dump[idx]);
- } else {
- rval = QLA_FUNCTION_FAILED;
+ if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ /* no interrupt, timed out*/
+ return rval;
+ }
+ if (rval) {
+ /* error completion status */
+ return rval;
+ }
+ for (j = 0; j < dwords; j++) {
+ ram[i + j] =
+ (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+ chunk[j] : swab32(chunk[j]);
}
}
- *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
- return rval;
+ *nxt = ram + i;
+ return QLA_SUCCESS;
}
int
qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
uint32_t ram_dwords, void **nxt)
{
- int rval;
- uint32_t cnt, stat, timer, dwords, idx;
- uint16_t mb0;
+ int rval = QLA_FUNCTION_FAILED;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *dump = (uint32_t *)ha->gid_list;
+ uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
+ uint32_t stat;
+ ulong i, j, timer = 6000000;
- rval = QLA_SUCCESS;
- mb0 = 0;
-
- WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- dwords = qla2x00_gid_list_size(ha) / 4;
- for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
- cnt += dwords, addr += dwords) {
- if (cnt + dwords > ram_dwords)
- dwords = ram_dwords - cnt;
+ for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
+ if (i + dwords > ram_dwords)
+ dwords = ram_dwords - i;
+ WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
WRT_REG_WORD(&reg->mailbox1, LSW(addr));
WRT_REG_WORD(&reg->mailbox8, MSW(addr));
- WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
- WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
+ WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
@@ -223,45 +218,48 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
+ while (timer--) {
+ udelay(5);
stat = RD_REG_DWORD(&reg->host_status);
- if (stat & HSRX_RISC_INT) {
- stat &= 0xff;
- if (stat == 0x1 || stat == 0x2 ||
- stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_REG_WORD(&reg->mailbox0);
-
- WRT_REG_DWORD(&reg->hccr,
- HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- break;
- }
+ /* Check for pending interrupts. */
+ if (!(stat & HSRX_RISC_INT))
+ continue;
- /* Clear this intr; it wasn't a mailbox intr */
+ stat &= 0xff;
+ if (stat != 0x1 && stat != 0x2 &&
+ stat != 0x10 && stat != 0x11) {
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD(&reg->hccr);
+ continue;
}
- udelay(5);
+
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ break;
}
ha->flags.mbox_int = 1;
+ *nxt = ram + i;
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb0 & MBS_MASK;
- for (idx = 0; idx < dwords; idx++)
- ram[cnt + idx] = IS_QLA27XX(ha) ?
- le32_to_cpu(dump[idx]) : swab32(dump[idx]);
- } else {
- rval = QLA_FUNCTION_FAILED;
+ if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ /* no interrupt, timed out*/
+ return rval;
+ }
+ if (rval) {
+ /* error completion status */
+ return rval;
+ }
+ for (j = 0; j < dwords; j++) {
+ ram[i + j] =
+ (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+ chunk[j] : swab32(chunk[j]);
}
}
- *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
- return rval;
+ *nxt = ram + i;
+ return QLA_SUCCESS;
}
static int
@@ -447,7 +445,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
}
}
- *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
return rval;
}
@@ -669,7 +667,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
struct qla2xxx_mq_chain *mq = ptr;
device_reg_t *reg;
- if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha))
return ptr;
mq = ptr;
@@ -2521,7 +2520,7 @@ qla83xx_fw_dump_failed:
/****************************************************************************/
static inline int
-ql_mask_match(uint32_t level)
+ql_mask_match(uint level)
{
return (level & ql2xextended_error_logging) == level;
}
@@ -2540,7 +2539,7 @@ ql_mask_match(uint32_t level)
* msg: The message to be displayed.
*/
void
-ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
{
va_list va;
struct va_format vaf;
@@ -2583,8 +2582,7 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
* msg: The message to be displayed.
*/
void
-ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
- const char *fmt, ...)
+ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
{
va_list va;
struct va_format vaf;
@@ -2620,7 +2618,7 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
* msg: The message to be displayed.
*/
void
-ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
{
va_list va;
struct va_format vaf;
@@ -2678,8 +2676,7 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
* msg: The message to be displayed.
*/
void
-ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
- const char *fmt, ...)
+ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
{
va_list va;
struct va_format vaf;
@@ -2719,7 +2716,7 @@ ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
}
void
-ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
+ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
{
int i;
struct qla_hw_data *ha = vha->hw;
@@ -2741,13 +2738,12 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
ql_dbg(level, vha, id, "Mailbox registers:\n");
for (i = 0; i < 6; i++, mbx_reg++)
ql_dbg(level, vha, id,
- "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg));
+ "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg));
}
void
-ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
- uint8_t *buf, uint size)
+ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, void *buf, uint size)
{
uint cnt;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 8877aa97d829..bb01b680ce9f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -318,20 +318,20 @@ struct qla2xxx_fw_dump {
* as compared to other log levels.
*/
-extern int ql_errlev;
+extern uint ql_errlev;
void __attribute__((format (printf, 4, 5)))
-ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+ql_dbg(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
-ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+ql_dbg_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
ql_dbg_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
-ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+ql_log(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
-ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3d46975a5e5c..1a4095c56eee 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -35,6 +35,7 @@
#include <scsi/scsi_bsg_fc.h>
#include "qla_bsg.h"
+#include "qla_dsd.h"
#include "qla_nx.h"
#include "qla_nx2.h"
#include "qla_nvme.h"
@@ -545,7 +546,7 @@ typedef struct srb {
u32 gen2; /* scratch */
int rc;
int retry_count;
- struct completion comp;
+ struct completion *comp;
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
@@ -1033,6 +1034,7 @@ struct mbx_cmd_32 {
#define MBC_GET_FIRMWARE_VERSION 8 /* Get firmware revision. */
#define MBC_LOAD_RISC_RAM 9 /* Load RAM command. */
#define MBC_DUMP_RISC_RAM 0xa /* Dump RAM command. */
+#define MBC_SECURE_FLASH_UPDATE 0xa /* Secure Flash Update(28xx) */
#define MBC_LOAD_RISC_RAM_EXTENDED 0xb /* Load RAM extended. */
#define MBC_DUMP_RISC_RAM_EXTENDED 0xc /* Dump RAM extended. */
#define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */
@@ -1203,6 +1205,10 @@ struct mbx_cmd_32 {
#define QLA27XX_IMG_STATUS_VER_MAJOR 0x01
#define QLA27XX_IMG_STATUS_VER_MINOR 0x00
#define QLA27XX_IMG_STATUS_SIGN 0xFACEFADE
+#define QLA28XX_IMG_STATUS_SIGN 0xFACEFADF
+#define QLA28XX_IMG_STATUS_SIGN 0xFACEFADF
+#define QLA28XX_AUX_IMG_STATUS_SIGN 0xFACEFAED
+#define QLA27XX_DEFAULT_IMAGE 0
#define QLA27XX_PRIMARY_IMAGE 1
#define QLA27XX_SECONDARY_IMAGE 2
@@ -1323,8 +1329,8 @@ typedef struct {
uint16_t response_q_inpointer;
uint16_t request_q_length;
uint16_t response_q_length;
- uint32_t request_q_address[2];
- uint32_t response_q_address[2];
+ __le64 request_q_address __packed;
+ __le64 response_q_address __packed;
uint16_t lun_enables;
uint8_t command_resource_count;
@@ -1749,12 +1755,10 @@ typedef struct {
uint16_t dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
uint32_t byte_count; /* Total byte count. */
- uint32_t dseg_0_address; /* Data segment 0 address. */
- uint32_t dseg_0_length; /* Data segment 0 length. */
- uint32_t dseg_1_address; /* Data segment 1 address. */
- uint32_t dseg_1_length; /* Data segment 1 length. */
- uint32_t dseg_2_address; /* Data segment 2 address. */
- uint32_t dseg_2_length; /* Data segment 2 length. */
+ union {
+ struct dsd32 dsd32[3];
+ struct dsd64 dsd64[2];
+ };
} cmd_entry_t;
/*
@@ -1775,10 +1779,7 @@ typedef struct {
uint16_t dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
uint32_t byte_count; /* Total byte count. */
- uint32_t dseg_0_address[2]; /* Data segment 0 address. */
- uint32_t dseg_0_length; /* Data segment 0 length. */
- uint32_t dseg_1_address[2]; /* Data segment 1 address. */
- uint32_t dseg_1_length; /* Data segment 1 length. */
+ struct dsd64 dsd[2];
} cmd_a64_entry_t, request_t;
/*
@@ -1791,20 +1792,7 @@ typedef struct {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t reserved;
- uint32_t dseg_0_address; /* Data segment 0 address. */
- uint32_t dseg_0_length; /* Data segment 0 length. */
- uint32_t dseg_1_address; /* Data segment 1 address. */
- uint32_t dseg_1_length; /* Data segment 1 length. */
- uint32_t dseg_2_address; /* Data segment 2 address. */
- uint32_t dseg_2_length; /* Data segment 2 length. */
- uint32_t dseg_3_address; /* Data segment 3 address. */
- uint32_t dseg_3_length; /* Data segment 3 length. */
- uint32_t dseg_4_address; /* Data segment 4 address. */
- uint32_t dseg_4_length; /* Data segment 4 length. */
- uint32_t dseg_5_address; /* Data segment 5 address. */
- uint32_t dseg_5_length; /* Data segment 5 length. */
- uint32_t dseg_6_address; /* Data segment 6 address. */
- uint32_t dseg_6_length; /* Data segment 6 length. */
+ struct dsd32 dsd[7];
} cont_entry_t;
/*
@@ -1816,16 +1804,7 @@ typedef struct {
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t dseg_0_address[2]; /* Data segment 0 address. */
- uint32_t dseg_0_length; /* Data segment 0 length. */
- uint32_t dseg_1_address[2]; /* Data segment 1 address. */
- uint32_t dseg_1_length; /* Data segment 1 length. */
- uint32_t dseg_2_address [2]; /* Data segment 2 address. */
- uint32_t dseg_2_length; /* Data segment 2 length. */
- uint32_t dseg_3_address[2]; /* Data segment 3 address. */
- uint32_t dseg_3_length; /* Data segment 3 length. */
- uint32_t dseg_4_address[2]; /* Data segment 4 address. */
- uint32_t dseg_4_length; /* Data segment 4 length. */
+ struct dsd64 dsd[5];
} cont_a64_entry_t;
#define PO_MODE_DIF_INSERT 0
@@ -1869,8 +1848,7 @@ struct crc_context {
uint16_t reserved_2;
uint16_t reserved_3;
uint32_t reserved_4;
- uint32_t data_address[2];
- uint32_t data_length;
+ struct dsd64 data_dsd;
uint32_t reserved_5[2];
uint32_t reserved_6;
} nobundling;
@@ -1880,11 +1858,8 @@ struct crc_context {
uint16_t reserved_1;
__le16 dseg_count; /* Data segment count */
uint32_t reserved_2;
- uint32_t data_address[2];
- uint32_t data_length;
- uint32_t dif_address[2];
- uint32_t dif_length; /* Data segment 0
- * length */
+ struct dsd64 data_dsd;
+ struct dsd64 dif_dsd;
} bundling;
} u;
@@ -2083,10 +2058,8 @@ typedef struct {
uint32_t handle2;
uint32_t rsp_bytecount;
uint32_t req_bytecount;
- uint32_t dseg_req_address[2]; /* Data segment 0 address. */
- uint32_t dseg_req_length; /* Data segment 0 length. */
- uint32_t dseg_rsp_address[2]; /* Data segment 1 address. */
- uint32_t dseg_rsp_length; /* Data segment 1 length. */
+ struct dsd64 req_dsd;
+ struct dsd64 rsp_dsd;
} ms_iocb_entry_t;
@@ -2258,7 +2231,10 @@ typedef enum {
FCT_BROADCAST,
FCT_INITIATOR,
FCT_TARGET,
- FCT_NVME
+ FCT_NVME_INITIATOR = 0x10,
+ FCT_NVME_TARGET = 0x20,
+ FCT_NVME_DISCOVERY = 0x40,
+ FCT_NVME = 0xf0,
} fc_port_type_t;
enum qla_sess_deletion {
@@ -2463,13 +2439,7 @@ struct event_arg {
#define FCS_DEVICE_LOST 3
#define FCS_ONLINE 4
-static const char * const port_state_str[] = {
- "Unknown",
- "UNCONFIGURED",
- "DEAD",
- "LOST",
- "ONLINE"
-};
+extern const char *const port_state_str[5];
/*
* FC port flags.
@@ -2672,6 +2642,7 @@ struct ct_fdmiv2_hba_attributes {
#define FDMI_PORT_SPEED_8GB 0x10
#define FDMI_PORT_SPEED_16GB 0x20
#define FDMI_PORT_SPEED_32GB 0x40
+#define FDMI_PORT_SPEED_64GB 0x80
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
#define FC_CLASS_2 0x04
@@ -3060,7 +3031,7 @@ struct sns_cmd_pkt {
struct {
uint16_t buffer_length;
uint16_t reserved_1;
- uint32_t buffer_address[2];
+ __le64 buffer_address __packed;
uint16_t subcommand_length;
uint16_t reserved_2;
uint16_t subcommand;
@@ -3130,10 +3101,10 @@ struct rsp_que;
struct isp_operations {
int (*pci_config) (struct scsi_qla_host *);
- void (*reset_chip) (struct scsi_qla_host *);
+ int (*reset_chip)(struct scsi_qla_host *);
int (*chip_diag) (struct scsi_qla_host *);
void (*config_rings) (struct scsi_qla_host *);
- void (*reset_adapter) (struct scsi_qla_host *);
+ int (*reset_adapter)(struct scsi_qla_host *);
int (*nvram_config) (struct scsi_qla_host *);
void (*update_fw_options) (struct scsi_qla_host *);
int (*load_risc) (struct scsi_qla_host *, uint32_t *);
@@ -3159,9 +3130,9 @@ struct isp_operations {
void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
uint32_t);
- uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *,
+ uint8_t *(*read_nvram)(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
- int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t,
+ int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t,
uint32_t);
void (*fw_dump) (struct scsi_qla_host *, int);
@@ -3170,16 +3141,16 @@ struct isp_operations {
int (*beacon_off) (struct scsi_qla_host *);
void (*beacon_blink) (struct scsi_qla_host *);
- uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *,
+ void *(*read_optrom)(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
- int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
+ int (*write_optrom)(struct scsi_qla_host *, void *, uint32_t,
uint32_t);
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
int (*start_scsi_mq) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
- int (*iospace_config)(struct qla_hw_data*);
+ int (*iospace_config)(struct qla_hw_data *);
int (*initialize_adapter)(struct scsi_qla_host *);
};
@@ -3368,7 +3339,8 @@ struct qla_tc_param {
#define QLA_MQ_SIZE 32
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
- ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
+ ((ha->mqenable || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? \
((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
((void __iomem *)ha->iobase))
#define QLA_REQ_QUE_ID(tag) \
@@ -3621,6 +3593,8 @@ struct qla_hw_data {
uint32_t rida_fmt2:1;
uint32_t purge_mbox:1;
uint32_t n2n_bigger:1;
+ uint32_t secure_adapter:1;
+ uint32_t secure_fw:1;
} flags;
uint16_t max_exchg;
@@ -3703,6 +3677,7 @@ struct qla_hw_data {
#define PORT_SPEED_8GB 0x04
#define PORT_SPEED_16GB 0x05
#define PORT_SPEED_32GB 0x06
+#define PORT_SPEED_64GB 0x07
#define PORT_SPEED_10GB 0x13
uint16_t link_data_rate; /* F/W operating speed */
uint16_t set_data_rate; /* Set by user */
@@ -3729,6 +3704,11 @@ struct qla_hw_data {
#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
#define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261
+#define PCI_DEVICE_ID_QLOGIC_ISP2061 0x2061
+#define PCI_DEVICE_ID_QLOGIC_ISP2081 0x2081
+#define PCI_DEVICE_ID_QLOGIC_ISP2089 0x2089
+#define PCI_DEVICE_ID_QLOGIC_ISP2281 0x2281
+#define PCI_DEVICE_ID_QLOGIC_ISP2289 0x2289
uint32_t isp_type;
#define DT_ISP2100 BIT_0
@@ -3753,7 +3733,12 @@ struct qla_hw_data {
#define DT_ISP2071 BIT_19
#define DT_ISP2271 BIT_20
#define DT_ISP2261 BIT_21
-#define DT_ISP_LAST (DT_ISP2261 << 1)
+#define DT_ISP2061 BIT_22
+#define DT_ISP2081 BIT_23
+#define DT_ISP2089 BIT_24
+#define DT_ISP2281 BIT_25
+#define DT_ISP2289 BIT_26
+#define DT_ISP_LAST (DT_ISP2289 << 1)
uint32_t device_type;
#define DT_T10_PI BIT_25
@@ -3788,6 +3773,8 @@ struct qla_hw_data {
#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
#define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261)
+#define IS_QLA2081(ha) (DT_MASK(ha) & DT_ISP2081)
+#define IS_QLA2281(ha) (DT_MASK(ha) & DT_ISP2281)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -3797,6 +3784,7 @@ struct qla_hw_data {
#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha))
+#define IS_QLA28XX(ha) (IS_QLA2081(ha) || IS_QLA2281(ha))
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -3805,14 +3793,15 @@ struct qla_hw_data {
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
- IS_QLA8044(ha) || IS_QLA27XX(ha))
+ IS_QLA8044(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha))
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha))
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha))
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
@@ -3823,28 +3812,34 @@ struct qla_hw_data {
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha))
-#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_BIDI_CAPABLE(ha) \
+ (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
((ha)->fw_attributes_ext[0] & BIT_0))
#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
-#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
-#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
-#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
-#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
-#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
+#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_EXCHG_OFFLD_CAPABLE(ha) \
- (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
- (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
- IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3888,6 +3883,9 @@ struct qla_hw_data {
void *sfp_data;
dma_addr_t sfp_data_dma;
+ void *flt;
+ dma_addr_t flt_dma;
+
#define XGMAC_DATA_SIZE 4096
void *xgmac_data;
dma_addr_t xgmac_data_dma;
@@ -3999,18 +3997,23 @@ struct qla_hw_data {
uint8_t fw_seriallink_options[4];
uint16_t fw_seriallink_options24[4];
+ uint8_t serdes_version[3];
uint8_t mpi_version[3];
uint32_t mpi_capabilities;
uint8_t phy_version[3];
uint8_t pep_version[3];
/* Firmware dump template */
- void *fw_dump_template;
- uint32_t fw_dump_template_len;
- /* Firmware dump information. */
+ struct fwdt {
+ void *template;
+ ulong length;
+ ulong dump_size;
+ } fwdt[2];
struct qla2xxx_fw_dump *fw_dump;
uint32_t fw_dump_len;
- int fw_dumped;
+ u32 fw_dump_alloc_len;
+ bool fw_dumped;
+ bool fw_dump_mpi;
unsigned long fw_dump_cap_flags;
#define RISC_PAUSE_CMPL 0
#define DMA_SHUTDOWN_CMPL 1
@@ -4049,7 +4052,6 @@ struct qla_hw_data {
uint16_t product_id[4];
uint8_t model_number[16+1];
-#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
char model_desc[80];
uint8_t adapter_id[16+1];
@@ -4089,22 +4091,28 @@ struct qla_hw_data {
uint32_t fdt_protect_sec_cmd;
uint32_t fdt_wrt_sts_reg_cmd;
- uint32_t flt_region_flt;
- uint32_t flt_region_fdt;
- uint32_t flt_region_boot;
- uint32_t flt_region_boot_sec;
- uint32_t flt_region_fw;
- uint32_t flt_region_fw_sec;
- uint32_t flt_region_vpd_nvram;
- uint32_t flt_region_vpd;
- uint32_t flt_region_vpd_sec;
- uint32_t flt_region_nvram;
- uint32_t flt_region_npiv_conf;
- uint32_t flt_region_gold_fw;
- uint32_t flt_region_fcp_prio;
- uint32_t flt_region_bootload;
- uint32_t flt_region_img_status_pri;
- uint32_t flt_region_img_status_sec;
+ struct {
+ uint32_t flt_region_flt;
+ uint32_t flt_region_fdt;
+ uint32_t flt_region_boot;
+ uint32_t flt_region_boot_sec;
+ uint32_t flt_region_fw;
+ uint32_t flt_region_fw_sec;
+ uint32_t flt_region_vpd_nvram;
+ uint32_t flt_region_vpd_nvram_sec;
+ uint32_t flt_region_vpd;
+ uint32_t flt_region_vpd_sec;
+ uint32_t flt_region_nvram;
+ uint32_t flt_region_nvram_sec;
+ uint32_t flt_region_npiv_conf;
+ uint32_t flt_region_gold_fw;
+ uint32_t flt_region_fcp_prio;
+ uint32_t flt_region_bootload;
+ uint32_t flt_region_img_status_pri;
+ uint32_t flt_region_img_status_sec;
+ uint32_t flt_region_aux_img_status_pri;
+ uint32_t flt_region_aux_img_status_sec;
+ };
uint8_t active_image;
/* Needed for BEACON */
@@ -4197,8 +4205,8 @@ struct qla_hw_data {
struct qlt_hw_data tgt;
int allow_cna_fw_dump;
uint32_t fw_ability_mask;
- uint16_t min_link_speed;
- uint16_t max_speed_sup;
+ uint16_t min_supported_speed;
+ uint16_t max_supported_speed;
/* DMA pool for the DIF bundling buffers */
struct dma_pool *dif_bundl_pool;
@@ -4225,9 +4233,20 @@ struct qla_hw_data {
atomic_t zio_threshold;
uint16_t last_zio_threshold;
+
#define DEFAULT_ZIO_THRESHOLD 5
};
+struct active_regions {
+ uint8_t global;
+ struct {
+ uint8_t board_config;
+ uint8_t vpd_nvram;
+ uint8_t npiv_config_0_1;
+ uint8_t npiv_config_2_3;
+ } aux;
+};
+
#define FW_ABILITY_MAX_SPEED_MASK 0xFUL
#define FW_ABILITY_MAX_SPEED_16G 0x0
#define FW_ABILITY_MAX_SPEED_32G 0x1
@@ -4315,6 +4334,7 @@ typedef struct scsi_qla_host {
#define N2N_LOGIN_NEEDED 30
#define IOCB_WORK_ACTIVE 31
#define SET_ZIO_THRESHOLD_NEEDED 32
+#define ISP_ABORT_TO_ROM 33
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4429,7 +4449,7 @@ typedef struct scsi_qla_host {
int fcport_count;
wait_queue_head_t fcport_waitQ;
wait_queue_head_t vref_waitq;
- uint8_t min_link_speed_feat;
+ uint8_t min_supported_speed;
uint8_t n2n_node_name[WWN_SIZE];
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
@@ -4441,14 +4461,21 @@ typedef struct scsi_qla_host {
struct qla27xx_image_status {
uint8_t image_status_mask;
- uint16_t generation_number;
- uint8_t reserved[3];
- uint8_t ver_minor;
+ uint16_t generation;
uint8_t ver_major;
+ uint8_t ver_minor;
+ uint8_t bitmap; /* 28xx only */
+ uint8_t reserved[2];
uint32_t checksum;
uint32_t signature;
} __packed;
+/* 28xx aux image status bimap values */
+#define QLA28XX_AUX_IMG_BOARD_CONFIG BIT_0
+#define QLA28XX_AUX_IMG_VPD_NVRAM BIT_1
+#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1 BIT_2
+#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3 BIT_3
+
#define SET_VP_IDX 1
#define SET_AL_PA 2
#define RESET_VP_IDX 3
@@ -4495,6 +4522,24 @@ struct qla2_sgx {
} \
}
+
+#define SFUB_CHECKSUM_SIZE 4
+
+struct secure_flash_update_block {
+ uint32_t block_info;
+ uint32_t signature_lo;
+ uint32_t signature_hi;
+ uint32_t signature_upper[0x3e];
+};
+
+struct secure_flash_update_block_pk {
+ uint32_t block_info;
+ uint32_t signature_lo;
+ uint32_t signature_hi;
+ uint32_t signature_upper[0x3e];
+ uint32_t public_key[0x41];
+};
+
/*
* Macros to help code, maintain, etc.
*/
@@ -4595,6 +4640,7 @@ struct qla2_sgx {
#define OPTROM_SIZE_81XX 0x400000
#define OPTROM_SIZE_82XX 0x800000
#define OPTROM_SIZE_83XX 0x1000000
+#define OPTROM_SIZE_28XX 0x2000000
#define OPTROM_BURST_SIZE 0x1000
#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
@@ -4691,10 +4737,13 @@ struct sff_8247_a0 {
#define AUTO_DETECT_SFP_SUPPORT(_vha)\
(ql2xautodetectsfp && !_vha->vp_idx && \
(IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
- IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw)))
+ IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw) || \
+ IS_QLA28XX(_vha->hw)))
+
+#define FLASH_SEMAPHORE_REGISTER_ADDR 0x00101016
#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
- (IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+ (IS_QLA27XX(_ha) || IS_QLA28XX(_ha) || IS_QLA83XX(_ha)))
#define SAVE_TOPO(_ha) { \
if (_ha->current_topology) \
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 5819a45ac5ef..a432caebefec 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -41,6 +41,7 @@ static int
qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
{
scsi_qla_host_t *vha = inode->i_private;
+
return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
}
@@ -161,6 +162,7 @@ static int
qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
{
struct scsi_qla_host *vha = inode->i_private;
+
return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
}
@@ -250,6 +252,7 @@ static int
qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
{
struct scsi_qla_host *vha = inode->i_private;
+
return single_open(file, qla_dfs_tgt_counters_show, vha);
}
@@ -386,7 +389,7 @@ qla_dfs_naqp_write(struct file *file, const char __user *buffer,
int rc = 0;
unsigned long num_act_qp;
- if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) {
+ if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
pr_err("host%ld: this adapter does not support Multi Q.",
vha->host_no);
return -EINVAL;
@@ -438,7 +441,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
- !IS_QLA27XX(ha))
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
goto out;
if (!ha->fce)
goto out;
@@ -474,7 +477,7 @@ create_nodes:
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
out:
diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h
new file mode 100644
index 000000000000..7479924ba422
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_dsd.h
@@ -0,0 +1,30 @@
+#ifndef _QLA_DSD_H_
+#define _QLA_DSD_H_
+
+/* 32-bit data segment descriptor (8 bytes) */
+struct dsd32 {
+ __le32 address;
+ __le32 length;
+};
+
+static inline void append_dsd32(struct dsd32 **dsd, struct scatterlist *sg)
+{
+ put_unaligned_le32(sg_dma_address(sg), &(*dsd)->address);
+ put_unaligned_le32(sg_dma_len(sg), &(*dsd)->length);
+ (*dsd)++;
+}
+
+/* 64-bit data segment descriptor (12 bytes) */
+struct dsd64 {
+ __le64 address;
+ __le32 length;
+} __packed;
+
+static inline void append_dsd64(struct dsd64 **dsd, struct scatterlist *sg)
+{
+ put_unaligned_le64(sg_dma_address(sg), &(*dsd)->address);
+ put_unaligned_le32(sg_dma_len(sg), &(*dsd)->length);
+ (*dsd)++;
+}
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 50c1e6c62e31..df079a8c2b33 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -10,6 +10,8 @@
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
+#include "qla_dsd.h"
+
#define MBS_CHECKSUM_ERROR 0x4010
#define MBS_INVALID_PRODUCT_KEY 0x4020
@@ -339,9 +341,9 @@ struct init_cb_24xx {
uint16_t prio_request_q_length;
- uint32_t request_q_address[2];
- uint32_t response_q_address[2];
- uint32_t prio_request_q_address[2];
+ __le64 request_q_address __packed;
+ __le64 response_q_address __packed;
+ __le64 prio_request_q_address __packed;
uint16_t msix;
uint16_t msix_atio;
@@ -349,7 +351,7 @@ struct init_cb_24xx {
uint16_t atio_q_inpointer;
uint16_t atio_q_length;
- uint32_t atio_q_address[2];
+ __le64 atio_q_address __packed;
uint16_t interrupt_delay_timer; /* 100us increments. */
uint16_t login_timeout;
@@ -453,7 +455,7 @@ struct cmd_bidir {
#define BD_WRITE_DATA BIT_0
uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
- uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
+ __le64 fcp_cmnd_dseg_address __packed;/* Data segment address. */
uint16_t reserved[2]; /* Reserved */
@@ -463,8 +465,7 @@ struct cmd_bidir {
uint8_t port_id[3]; /* PortID of destination port.*/
uint8_t vp_index;
- uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
- uint16_t fcp_data_dseg_len; /* Data segment length. */
+ struct dsd64 fcp_dsd;
};
#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */
@@ -491,18 +492,18 @@ struct cmd_type_6 {
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
- uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
-
- uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */
+ uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ /* Data segment address. */
+ __le64 fcp_cmnd_dseg_address __packed;
+ /* Data segment address. */
+ __le64 fcp_rsp_dseg_address __packed;
uint32_t byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
- uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
- uint32_t fcp_data_dseg_len; /* Data segment length. */
+ struct dsd64 fcp_dsd;
};
#define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */
@@ -548,8 +549,7 @@ struct cmd_type_7 {
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
- uint32_t dseg_0_address[2]; /* Data segment 0 address. */
- uint32_t dseg_0_len; /* Data segment 0 length. */
+ struct dsd64 dsd;
};
#define COMMAND_TYPE_CRC_2 0x6A /* Command Type CRC_2 (Type 6)
@@ -573,17 +573,17 @@ struct cmd_type_crc_2 {
uint16_t control_flags; /* Control flags. */
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
- uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
-
- uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */
+ uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le64 fcp_cmnd_dseg_address __packed;
+ /* Data segment address. */
+ __le64 fcp_rsp_dseg_address __packed;
uint32_t byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
- uint32_t crc_context_address[2]; /* Data segment address. */
+ __le64 crc_context_address __packed; /* Data segment address. */
uint16_t crc_context_len; /* Data segment length. */
uint16_t reserved_1; /* MUST be set to 0. */
};
@@ -717,10 +717,7 @@ struct ct_entry_24xx {
uint32_t rsp_byte_count;
uint32_t cmd_byte_count;
- uint32_t dseg_0_address[2]; /* Data segment 0 address. */
- uint32_t dseg_0_len; /* Data segment 0 length. */
- uint32_t dseg_1_address[2]; /* Data segment 1 address. */
- uint32_t dseg_1_len; /* Data segment 1 length. */
+ struct dsd64 dsd[2];
};
/*
@@ -767,9 +764,9 @@ struct els_entry_24xx {
uint32_t rx_byte_count;
uint32_t tx_byte_count;
- uint32_t tx_address[2]; /* Data segment 0 address. */
+ __le64 tx_address __packed; /* Data segment 0 address. */
uint32_t tx_len; /* Data segment 0 length. */
- uint32_t rx_address[2]; /* Data segment 1 address. */
+ __le64 rx_address __packed; /* Data segment 1 address. */
uint32_t rx_len; /* Data segment 1 length. */
};
@@ -1422,9 +1419,9 @@ struct vf_evfp_entry_24xx {
uint16_t control_flags;
uint32_t io_parameter_0;
uint32_t io_parameter_1;
- uint32_t tx_address[2]; /* Data segment 0 address. */
+ __le64 tx_address __packed; /* Data segment 0 address. */
uint32_t tx_len; /* Data segment 0 length. */
- uint32_t rx_address[2]; /* Data segment 1 address. */
+ __le64 rx_address __packed; /* Data segment 1 address. */
uint32_t rx_len; /* Data segment 1 length. */
};
@@ -1515,13 +1512,31 @@ struct qla_flt_header {
#define FLT_REG_VPD_SEC_27XX_2 0xD8
#define FLT_REG_VPD_SEC_27XX_3 0xDA
+/* 28xx */
+#define FLT_REG_AUX_IMG_PRI_28XX 0x125
+#define FLT_REG_AUX_IMG_SEC_28XX 0x126
+#define FLT_REG_VPD_SEC_28XX_0 0x10C
+#define FLT_REG_VPD_SEC_28XX_1 0x10E
+#define FLT_REG_VPD_SEC_28XX_2 0x110
+#define FLT_REG_VPD_SEC_28XX_3 0x112
+#define FLT_REG_NVRAM_SEC_28XX_0 0x10D
+#define FLT_REG_NVRAM_SEC_28XX_1 0x10F
+#define FLT_REG_NVRAM_SEC_28XX_2 0x111
+#define FLT_REG_NVRAM_SEC_28XX_3 0x113
+
struct qla_flt_region {
- uint32_t code;
+ uint16_t code;
+ uint8_t attribute;
+ uint8_t reserved;
uint32_t size;
uint32_t start;
uint32_t end;
};
+#define FLT_REGION_SIZE 16
+#define FLT_MAX_REGIONS 0xFF
+#define FLT_REGIONS_SIZE (FLT_REGION_SIZE * FLT_MAX_REGIONS)
+
/* Flash NPIV Configuration Table ********************************************/
struct qla_npiv_header {
@@ -1588,8 +1603,7 @@ struct verify_chip_entry_84xx {
uint32_t fw_seq_size;
uint32_t relative_offset;
- uint32_t dseg_address[2];
- uint32_t dseg_length;
+ struct dsd64 dsd;
};
struct verify_chip_rsp_84xx {
@@ -1646,8 +1660,7 @@ struct access_chip_84xx {
uint32_t total_byte_cnt;
uint32_t reserved4;
- uint32_t dseg_address[2];
- uint32_t dseg_length;
+ struct dsd64 dsd;
};
struct access_chip_rsp_84xx {
@@ -1711,6 +1724,10 @@ struct access_chip_rsp_84xx {
#define LR_DIST_FW_SHIFT (LR_DIST_FW_POS - LR_DIST_NV_POS)
#define LR_DIST_FW_FIELD(x) ((x) << LR_DIST_FW_SHIFT & 0xf000)
+/* FAC semaphore defines */
+#define FAC_SEMAPHORE_UNLOCK 0
+#define FAC_SEMAPHORE_LOCK 1
+
struct nvram_81xx {
/* NVRAM header. */
uint8_t id[4];
@@ -1757,7 +1774,7 @@ struct nvram_81xx {
uint16_t reserved_6_3[14];
/* Offset 192. */
- uint8_t min_link_speed;
+ uint8_t min_supported_speed;
uint8_t reserved_7_0;
uint16_t reserved_7[31];
@@ -1911,15 +1928,15 @@ struct init_cb_81xx {
uint16_t prio_request_q_length;
- uint32_t request_q_address[2];
- uint32_t response_q_address[2];
- uint32_t prio_request_q_address[2];
+ __le64 request_q_address __packed;
+ __le64 response_q_address __packed;
+ __le64 prio_request_q_address __packed;
uint8_t reserved_4[8];
uint16_t atio_q_inpointer;
uint16_t atio_q_length;
- uint32_t atio_q_address[2];
+ __le64 atio_q_address __packed;
uint16_t interrupt_delay_timer; /* 100us increments. */
uint16_t login_timeout;
@@ -2005,6 +2022,8 @@ struct ex_init_cb_81xx {
#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
+#define FARX_ACCESS_FLASH_CONF_28XX 0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA_28XX 0x7F7D0000
/* FCP priority config defines *************************************/
/* operations */
@@ -2079,6 +2098,7 @@ struct qla_fcp_prio_cfg {
#define FA_NPIV_CONF1_ADDR_81 0xD2000
/* 83XX Flash locations -- occupies second 8MB region. */
-#define FA_FLASH_LAYOUT_ADDR_83 0xFC400
+#define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4)
+#define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4)
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 4eefe69ca807..bbe69ab5cf3f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -18,14 +18,14 @@ extern int qla2100_pci_config(struct scsi_qla_host *);
extern int qla2300_pci_config(struct scsi_qla_host *);
extern int qla24xx_pci_config(scsi_qla_host_t *);
extern int qla25xx_pci_config(scsi_qla_host_t *);
-extern void qla2x00_reset_chip(struct scsi_qla_host *);
-extern void qla24xx_reset_chip(struct scsi_qla_host *);
+extern int qla2x00_reset_chip(struct scsi_qla_host *);
+extern int qla24xx_reset_chip(struct scsi_qla_host *);
extern int qla2x00_chip_diag(struct scsi_qla_host *);
extern int qla24xx_chip_diag(struct scsi_qla_host *);
extern void qla2x00_config_rings(struct scsi_qla_host *);
extern void qla24xx_config_rings(struct scsi_qla_host *);
-extern void qla2x00_reset_adapter(struct scsi_qla_host *);
-extern void qla24xx_reset_adapter(struct scsi_qla_host *);
+extern int qla2x00_reset_adapter(struct scsi_qla_host *);
+extern int qla24xx_reset_adapter(struct scsi_qla_host *);
extern int qla2x00_nvram_config(struct scsi_qla_host *);
extern int qla24xx_nvram_config(struct scsi_qla_host *);
extern int qla81xx_nvram_config(struct scsi_qla_host *);
@@ -38,8 +38,7 @@ extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
extern int qla2x00_perform_loop_resync(scsi_qla_host_t *);
extern int qla2x00_loop_resync(scsi_qla_host_t *);
-
-extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
+extern void qla2x00_clear_loop_id(fc_port_t *fcport);
extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
@@ -80,6 +79,7 @@ int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
+extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state);
extern fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
@@ -93,7 +93,6 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
-extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
int, int, bool);
extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
@@ -108,6 +107,11 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+extern void qla28xx_get_aux_images(struct scsi_qla_host *,
+ struct active_regions *);
+extern void qla27xx_get_active_image(struct scsi_qla_host *,
+ struct active_regions *);
+
void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
@@ -118,6 +122,7 @@ int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
void qla_rscn_replay(fc_port_t *fcport);
+extern bool qla24xx_risc_firmware_invalid(uint32_t *);
/*
* Global Data in qla_os.c source file.
@@ -215,7 +220,6 @@ extern void qla24xx_sched_upd_fcport(fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
-int qla24xx_async_abort_cmd(srb_t *, bool);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
@@ -238,7 +242,7 @@ extern void qla24xx_report_id_acquisition(scsi_qla_host_t *,
struct vp_rpt_id_entry_24xx *);
extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
-extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
+extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_free_dma(void *);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
@@ -276,21 +280,20 @@ extern int qla2x00_start_sp(srb_t *);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
extern int qla2xxx_dif_start_scsi_mq(srb_t *);
+extern void qla2x00_init_timer(srb_t *sp, unsigned long tmo);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tc_param *);
+ struct dsd64 *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tc_param *);
+ struct dsd64 *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ struct dsd64 *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
-extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
- struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -466,6 +469,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
extern int
qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla81xx_fac_semaphore_access(scsi_qla_host_t *, int);
+
extern int
qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
@@ -511,6 +516,14 @@ extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int);
+extern int qla28xx_secure_flash_update(scsi_qla_host_t *, uint16_t, uint16_t,
+ uint32_t, dma_addr_t, uint32_t);
+
+extern int qla2xxx_read_remote_register(scsi_qla_host_t *, uint32_t,
+ uint32_t *);
+extern int qla2xxx_write_remote_register(scsi_qla_host_t *, uint32_t,
+ uint32_t);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -542,19 +555,20 @@ fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
*/
extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
- uint32_t, uint32_t);
-extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
-extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
-extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
-extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
-extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
-extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t, uint32_t);
+extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+ uint32_t);
+extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+ uint32_t);
+extern int qla2x00_write_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+ uint32_t);
+extern int qla24xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+ uint32_t);
+extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+ uint32_t);
+extern int qla25xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+ uint32_t);
+
extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t);
bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t);
@@ -574,18 +588,18 @@ extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
uint32_t, uint16_t *);
-extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla2x00_read_optrom_data(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
-extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla2x00_write_optrom_data(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
-extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla24xx_read_optrom_data(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
-extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla24xx_write_optrom_data(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
-extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla25xx_read_optrom_data(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
-extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *,
- uint8_t *, uint32_t, uint32_t);
+extern void *qla8044_read_optrom_data(struct scsi_qla_host *,
+ void *, uint32_t, uint32_t);
extern void qla8044_watchdog(struct scsi_qla_host *vha);
extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
@@ -610,20 +624,13 @@ extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
extern void qla8044_fw_dump(scsi_qla_host_t *, int);
extern void qla27xx_fwdump(scsi_qla_host_t *, int);
-extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
+extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *);
extern int qla27xx_fwdt_template_valid(void *);
extern ulong qla27xx_fwdt_template_size(void *);
-extern const void *qla27xx_fwdt_template_default(void);
-extern ulong qla27xx_fwdt_template_default_size(void);
-
-extern void qla2x00_dump_regs(scsi_qla_host_t *);
-extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
-extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
-extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
-extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
- uint8_t *, uint32_t);
-extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
+extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
+extern void ql_dump_regs(uint, scsi_qla_host_t *, uint);
+extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, void *, uint);
/*
* Global Function Prototypes in qla_gs.c source file.
*/
@@ -722,7 +729,7 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
/* qlafx00 related functions */
extern int qlafx00_pci_config(struct scsi_qla_host *);
extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
-extern void qlafx00_soft_reset(scsi_qla_host_t *);
+extern int qlafx00_soft_reset(scsi_qla_host_t *);
extern int qlafx00_chip_diag(scsi_qla_host_t *);
extern void qlafx00_config_rings(struct scsi_qla_host *);
extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
@@ -765,16 +772,16 @@ extern int qla82xx_pci_region_offset(struct pci_dev *, int);
extern int qla82xx_iospace_config(struct qla_hw_data *);
/* Initialization related functions */
-extern void qla82xx_reset_chip(struct scsi_qla_host *);
+extern int qla82xx_reset_chip(struct scsi_qla_host *);
extern void qla82xx_config_rings(struct scsi_qla_host *);
extern void qla82xx_watchdog(scsi_qla_host_t *);
extern int qla82xx_start_firmware(scsi_qla_host_t *);
/* Firmware and flash related functions */
extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
-extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla82xx_read_optrom_data(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
-extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla82xx_write_optrom_data(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
/* Mailbox related functions */
@@ -870,7 +877,7 @@ extern void qla8044_clear_drv_active(struct qla_hw_data *);
void qla8044_get_minidump(struct scsi_qla_host *vha);
int qla8044_collect_md_data(struct scsi_qla_host *vha);
extern int qla8044_md_get_template(scsi_qla_host_t *);
-extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla8044_write_optrom_data(struct scsi_qla_host *, void *,
uint32_t, uint32_t);
extern irqreturn_t qla8044_intr_handler(int, void *);
extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index c6fdad12428e..9f58e591666d 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -45,13 +45,11 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
- ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
- ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
- ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+ put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
+ ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
- ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
- ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
- ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+ put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
+ ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
vha->qla_stats.control_requests++;
@@ -83,13 +81,11 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
- ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
- ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
- ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+ put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
+ ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
- ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
- ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
- ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
+ ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
ct_pkt->vp_index = vha->vp_idx;
vha->qla_stats.control_requests++;
@@ -152,8 +148,8 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
vha->d_id.b.area, vha->d_id.b.al_pa,
comp_status, ct_rsp->header.response);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
- 0x2078, (uint8_t *)&ct_rsp->header,
- sizeof(struct ct_rsp_hdr));
+ 0x2078, ct_rsp,
+ offsetof(typeof(*ct_rsp), rsp));
rval = QLA_INVALID_COMMAND;
} else
rval = QLA_SUCCESS;
@@ -1000,8 +996,7 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
wc = data_size / 2; /* Size in 16bit words. */
sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
- sns_cmd->p.cmd.buffer_address[0] = cpu_to_le32(LSD(ha->sns_cmd_dma));
- sns_cmd->p.cmd.buffer_address[1] = cpu_to_le32(MSD(ha->sns_cmd_dma));
+ put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
wc = (data_size - 16) / 4; /* Size in 32bit words. */
@@ -1385,6 +1380,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
int ret, rval;
uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
+
ret = QLA_SUCCESS;
if (vha->flags.management_server_logged_in)
return ret;
@@ -1423,6 +1419,7 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
{
ms_iocb_entry_t *ms_pkt;
struct qla_hw_data *ha = vha->hw;
+
ms_pkt = ha->ms_iocb;
memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
@@ -1436,13 +1433,11 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
ms_pkt->req_bytecount = cpu_to_le32(req_size);
- ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
- ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+ put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
+ ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
- ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
- ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+ put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
+ ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
return ms_pkt;
}
@@ -1474,13 +1469,11 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
- ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
- ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+ put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
+ ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
- ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
- ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
+ ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
ct_pkt->vp_index = vha->vp_idx;
return ct_pkt;
@@ -1495,10 +1488,10 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
if (IS_FWI2_CAPABLE(ha)) {
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
- ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+ ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
} else {
ms_pkt->req_bytecount = cpu_to_le32(req_size);
- ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+ ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
}
return ms_pkt;
@@ -1794,7 +1787,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
if (IS_CNA_CAPABLE(ha))
eiter->a.sup_speed = cpu_to_be32(
FDMI_PORT_SPEED_10GB);
- else if (IS_QLA27XX(ha))
+ else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
eiter->a.sup_speed = cpu_to_be32(
FDMI_PORT_SPEED_32GB|
FDMI_PORT_SPEED_16GB|
@@ -2373,7 +2366,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
if (IS_CNA_CAPABLE(ha))
eiter->a.sup_speed = cpu_to_be32(
FDMI_PORT_SPEED_10GB);
- else if (IS_QLA27XX(ha))
+ else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
eiter->a.sup_speed = cpu_to_be32(
FDMI_PORT_SPEED_32GB|
FDMI_PORT_SPEED_16GB|
@@ -2446,7 +2439,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
eiter->len = cpu_to_be16(4 + 4);
eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size):
+ le16_to_cpu(icb24->frame_payload_size) :
le16_to_cpu(ha->init_cb->frame_payload_size);
eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
size += 4 + 4;
@@ -2783,6 +2776,31 @@ qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
return &p->p.req;
}
+static uint16_t
+qla2x00_port_speed_capability(uint16_t speed)
+{
+ switch (speed) {
+ case BIT_15:
+ return PORT_SPEED_1GB;
+ case BIT_14:
+ return PORT_SPEED_2GB;
+ case BIT_13:
+ return PORT_SPEED_4GB;
+ case BIT_12:
+ return PORT_SPEED_10GB;
+ case BIT_11:
+ return PORT_SPEED_8GB;
+ case BIT_10:
+ return PORT_SPEED_16GB;
+ case BIT_8:
+ return PORT_SPEED_32GB;
+ case BIT_7:
+ return PORT_SPEED_64GB;
+ default:
+ return PORT_SPEED_UNKNOWN;
+ }
+}
+
/**
* qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
* @vha: HA context
@@ -2855,31 +2873,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
}
rval = QLA_FUNCTION_FAILED;
} else {
- /* Save port-speed */
- switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
- case BIT_15:
- list[i].fp_speed = PORT_SPEED_1GB;
- break;
- case BIT_14:
- list[i].fp_speed = PORT_SPEED_2GB;
- break;
- case BIT_13:
- list[i].fp_speed = PORT_SPEED_4GB;
- break;
- case BIT_12:
- list[i].fp_speed = PORT_SPEED_10GB;
- break;
- case BIT_11:
- list[i].fp_speed = PORT_SPEED_8GB;
- break;
- case BIT_10:
- list[i].fp_speed = PORT_SPEED_16GB;
- break;
- case BIT_8:
- list[i].fp_speed = PORT_SPEED_32GB;
- break;
- }
-
+ list->fp_speed = qla2x00_port_speed_capability(
+ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
ql_dbg(ql_dbg_disc, vha, 0x205b,
"GPSC ext entry - fpn "
"%8phN speeds=%04x speed=%04x.\n",
@@ -3031,6 +3026,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
if (res == QLA_FUNCTION_TIMEOUT)
return;
@@ -3048,29 +3045,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
goto done;
}
} else {
- switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
- case BIT_15:
- fcport->fp_speed = PORT_SPEED_1GB;
- break;
- case BIT_14:
- fcport->fp_speed = PORT_SPEED_2GB;
- break;
- case BIT_13:
- fcport->fp_speed = PORT_SPEED_4GB;
- break;
- case BIT_12:
- fcport->fp_speed = PORT_SPEED_10GB;
- break;
- case BIT_11:
- fcport->fp_speed = PORT_SPEED_8GB;
- break;
- case BIT_10:
- fcport->fp_speed = PORT_SPEED_16GB;
- break;
- case BIT_8:
- fcport->fp_speed = PORT_SPEED_32GB;
- break;
- }
+ fcport->fp_speed = qla2x00_port_speed_capability(
+ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
ql_dbg(ql_dbg_disc, vha, 0x2054,
"Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
@@ -4370,6 +4346,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0c700b140ce7..54772d4c377f 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -95,6 +95,79 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
return tmo;
}
+static void qla24xx_abort_iocb_timeout(void *data)
+{
+ srb_t *sp = data;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ abt->u.abt.comp_status = CS_TIMEOUT;
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+}
+
+static void qla24xx_abort_sp_done(void *ptr, int res)
+{
+ srb_t *sp = ptr;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ if (del_timer(&sp->u.iocb_cmd.timer)) {
+ if (sp->flags & SRB_WAKEUP_ON_COMP)
+ complete(&abt->u.abt.comp);
+ else
+ sp->free(sp);
+ }
+}
+
+static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+{
+ scsi_qla_host_t *vha = cmd_sp->vha;
+ struct srb_iocb *abt_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+
+ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
+ GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ abt_iocb = &sp->u.iocb_cmd;
+ sp->type = SRB_ABT_CMD;
+ sp->name = "abort";
+ sp->qpair = cmd_sp->qpair;
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
+
+ abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+ init_completion(&abt_iocb->u.abt.comp);
+ /* FW can send 2 x ABTS's timeout/20s */
+ qla2x00_init_timer(sp, 42);
+
+ abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+ abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
+
+ sp->done = qla24xx_abort_sp_done;
+
+ ql_dbg(ql_dbg_async, vha, 0x507c,
+ "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
+ cmd_sp->type);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ if (wait) {
+ wait_for_completion(&abt_iocb->u.abt.comp);
+ rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ } else {
+ goto done;
+ }
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
void
qla2x00_async_iocb_timeout(void *data)
{
@@ -514,6 +587,72 @@ done:
return rval;
}
+static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_FWI2_CAPABLE(ha))
+ return loop_id > NPH_LAST_HANDLE;
+
+ return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
+ loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
+}
+
+/**
+ * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
+ * @vha: adapter state pointer.
+ * @dev: port structure pointer.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ rval = QLA_SUCCESS;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+
+ dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
+ if (dev->loop_id >= LOOPID_MAP_SIZE ||
+ qla2x00_is_reserved_id(vha, dev->loop_id)) {
+ dev->loop_id = FC_NO_LOOP_ID;
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ set_bit(dev->loop_id, ha->loop_id_map);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ if (rval == QLA_SUCCESS)
+ ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+ "Assigning new loopid=%x, portid=%x.\n",
+ dev->loop_id, dev->d_id.b24);
+ else
+ ql_log(ql_log_warn, dev->vha, 0x2087,
+ "No loop_id's available, portid=%x.\n",
+ dev->d_id.b24);
+
+ return rval;
+}
+
+void qla2x00_clear_loop_id(fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
+ return;
+
+ clear_bit(fcport->loop_id, ha->loop_id_map);
+ fcport->loop_id = FC_NO_LOOP_ID;
+}
+
static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
struct event_arg *ea)
{
@@ -1482,6 +1621,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
{
struct qla_work_evt *e;
+
e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
if (!e)
return QLA_FUNCTION_FAILED;
@@ -1558,6 +1698,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
{
unsigned long flags;
+
fcport = qla2x00_find_fcport_by_nportid
(vha, &ea->id, 1);
if (fcport) {
@@ -1620,21 +1761,21 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
*/
void qla_rscn_replay(fc_port_t *fcport)
{
- struct event_arg ea;
+ struct event_arg ea;
- switch (fcport->disc_state) {
- case DSC_DELETE_PEND:
- return;
- default:
- break;
- }
+ switch (fcport->disc_state) {
+ case DSC_DELETE_PEND:
+ return;
+ default:
+ break;
+ }
- if (fcport->scan_needed) {
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_RSCN;
- ea.id = fcport->d_id;
- ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
- qla2x00_fcport_event_handler(fcport->vha, &ea);
+ if (fcport->scan_needed) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RSCN;
+ ea.id = fcport->d_id;
+ ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
+ qla2x00_fcport_event_handler(fcport->vha, &ea);
}
}
@@ -1717,82 +1858,6 @@ done:
return rval;
}
-static void
-qla24xx_abort_iocb_timeout(void *data)
-{
- srb_t *sp = data;
- struct srb_iocb *abt = &sp->u.iocb_cmd;
-
- abt->u.abt.comp_status = CS_TIMEOUT;
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
-}
-
-static void
-qla24xx_abort_sp_done(void *ptr, int res)
-{
- srb_t *sp = ptr;
- struct srb_iocb *abt = &sp->u.iocb_cmd;
-
- if (del_timer(&sp->u.iocb_cmd.timer)) {
- if (sp->flags & SRB_WAKEUP_ON_COMP)
- complete(&abt->u.abt.comp);
- else
- sp->free(sp);
- }
-}
-
-int
-qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
-{
- scsi_qla_host_t *vha = cmd_sp->vha;
- struct srb_iocb *abt_iocb;
- srb_t *sp;
- int rval = QLA_FUNCTION_FAILED;
-
- sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
- GFP_ATOMIC);
- if (!sp)
- goto done;
-
- abt_iocb = &sp->u.iocb_cmd;
- sp->type = SRB_ABT_CMD;
- sp->name = "abort";
- sp->qpair = cmd_sp->qpair;
- if (wait)
- sp->flags = SRB_WAKEUP_ON_COMP;
-
- abt_iocb->timeout = qla24xx_abort_iocb_timeout;
- init_completion(&abt_iocb->u.abt.comp);
- /* FW can send 2 x ABTS's timeout/20s */
- qla2x00_init_timer(sp, 42);
-
- abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
- abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
-
- sp->done = qla24xx_abort_sp_done;
-
- ql_dbg(ql_dbg_async, vha, 0x507c,
- "Abort command issued - hdl=%x, type=%x\n",
- cmd_sp->handle, cmd_sp->type);
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- if (wait) {
- wait_for_completion(&abt_iocb->u.abt.comp);
- rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
- } else {
- goto done;
- }
-
-done_free_sp:
- sp->free(sp);
-done:
- return rval;
-}
-
int
qla24xx_async_abort_command(srb_t *sp)
{
@@ -2102,6 +2167,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
@@ -2136,6 +2202,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ha->isp_ops->reset_chip(vha);
+ /* Check for secure flash support */
+ if (IS_QLA28XX(ha)) {
+ if (RD_REG_DWORD(&reg->mailbox12) & BIT_0) {
+ ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
+ ha->flags.secure_adapter = 1;
+ }
+ }
+
+
rval = qla2xxx_get_flash_info(vha);
if (rval) {
ql_log(ql_log_fatal, vha, 0x004f,
@@ -2452,7 +2527,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-void
+int
qla2x00_reset_chip(scsi_qla_host_t *vha)
{
unsigned long flags = 0;
@@ -2460,9 +2535,10 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t cnt;
uint16_t cmd;
+ int rval = QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(ha->pdev)))
- return;
+ return rval;
ha->isp_ops->disable_intrs(ha);
@@ -2588,6 +2664,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
}
/**
@@ -2828,14 +2906,15 @@ acquired:
*
* Returns 0 on success.
*/
-void
+int
qla24xx_reset_chip(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_FUNCTION_FAILED;
if (pci_channel_offline(ha->pdev) &&
ha->flags.pci_channel_io_perm_failure) {
- return;
+ return rval;
}
ha->isp_ops->disable_intrs(ha);
@@ -2843,7 +2922,9 @@ qla24xx_reset_chip(scsi_qla_host_t *vha)
qla25xx_manipulate_risc_semaphore(vha);
/* Perform RISC reset. */
- qla24xx_reset_risc(vha);
+ rval = qla24xx_reset_risc(vha);
+
+ return rval;
}
/**
@@ -3018,7 +3099,7 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
if (IS_FWI2_CAPABLE(ha)) {
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
- !IS_QLA27XX(ha))
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
goto try_eft;
if (ha->fce)
@@ -3089,12 +3170,15 @@ eft_err:
void
qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
{
+ int rval;
uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
eft_size, fce_size, mq_size;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
struct qla2xxx_fw_dump *fw_dump;
+ dma_addr_t tc_dma;
+ void *tc;
dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
req_q_size = rsp_q_size = 0;
@@ -3106,7 +3190,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
else if (IS_QLA81XX(ha))
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -3118,40 +3202,72 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
+ !IS_QLA28XX(ha))
mq_size = sizeof(struct qla2xxx_mq_chain);
/*
- * Allocate maximum buffer size for all queues.
+ * Allocate maximum buffer size for all queues - Q0.
* Resizing must be done at end-of-dump processing.
*/
- mq_size += ha->max_req_queues *
+ mq_size += (ha->max_req_queues - 1) *
(req->length * sizeof(request_t));
- mq_size += ha->max_rsp_queues *
+ mq_size += (ha->max_rsp_queues - 1) *
(rsp->length * sizeof(response_t));
}
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
- !IS_QLA27XX(ha))
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
goto try_eft;
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
try_eft:
+ if (ha->eft)
+ dma_free_coherent(&ha->pdev->dev,
+ EFT_SIZE, ha->eft, ha->eft_dma);
+
+ /* Allocate memory for Extended Trace Buffer. */
+ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ ql_log(ql_log_warn, vha, 0x00c1,
+ "Unable to allocate (%d KB) for EFT.\n",
+ EFT_SIZE / 1024);
+ goto allocate;
+ }
+
+ rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x00c2,
+ "Unable to initialize EFT (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
+ tc_dma);
+ }
ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
eft_size = EFT_SIZE;
}
- if (IS_QLA27XX(ha)) {
- if (!ha->fw_dump_template) {
- ql_log(ql_log_warn, vha, 0x00ba,
- "Failed missing fwdump template\n");
- return;
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ struct fwdt *fwdt = ha->fwdt;
+ uint j;
+
+ for (j = 0; j < 2; j++, fwdt++) {
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0x00ba,
+ "-> fwdt%u no template\n", j);
+ continue;
+ }
+ ql_dbg(ql_dbg_init, vha, 0x00fa,
+ "-> fwdt%u calculating fwdump size...\n", j);
+ fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
+ vha, fwdt->template);
+ ql_dbg(ql_dbg_init, vha, 0x00fa,
+ "-> fwdt%u calculated fwdump size = %#lx bytes\n",
+ j, fwdt->dump_size);
+ dump_size += fwdt->dump_size;
}
- dump_size = qla27xx_fwdt_calculate_dump_size(vha);
- ql_dbg(ql_dbg_init, vha, 0x00fa,
- "-> allocating fwdump (%x bytes)...\n", dump_size);
goto allocate;
}
@@ -3170,42 +3286,66 @@ try_eft:
ha->exlogin_size;
allocate:
- if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
+ if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
+
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
+ "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
+ __func__, dump_size, ha->fw_dump_len,
+ ha->fw_dump_alloc_len);
+
fw_dump = vmalloc(dump_size);
if (!fw_dump) {
ql_log(ql_log_warn, vha, 0x00c4,
"Unable to allocate (%d KB) for firmware dump.\n",
dump_size / 1024);
} else {
- if (ha->fw_dump)
+ mutex_lock(&ha->optrom_mutex);
+ if (ha->fw_dumped) {
+ memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
vfree(ha->fw_dump);
- ha->fw_dump = fw_dump;
-
- ha->fw_dump_len = dump_size;
- ql_dbg(ql_dbg_init, vha, 0x00c5,
- "Allocated (%d KB) for firmware dump.\n",
- dump_size / 1024);
-
- if (IS_QLA27XX(ha))
- return;
-
- ha->fw_dump->signature[0] = 'Q';
- ha->fw_dump->signature[1] = 'L';
- ha->fw_dump->signature[2] = 'G';
- ha->fw_dump->signature[3] = 'C';
- ha->fw_dump->version = htonl(1);
-
- ha->fw_dump->fixed_size = htonl(fixed_size);
- ha->fw_dump->mem_size = htonl(mem_size);
- ha->fw_dump->req_q_size = htonl(req_q_size);
- ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
-
- ha->fw_dump->eft_size = htonl(eft_size);
- ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
- ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+ ha->fw_dump = fw_dump;
+ ha->fw_dump_alloc_len = dump_size;
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
+ "Re-Allocated (%d KB) and save firmware dump.\n",
+ dump_size / 1024);
+ } else {
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ ha->fw_dump = fw_dump;
+
+ ha->fw_dump_len = ha->fw_dump_alloc_len =
+ dump_size;
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
+ "Allocated (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return;
+ }
- ha->fw_dump->header_size =
- htonl(offsetof(struct qla2xxx_fw_dump, isp));
+ ha->fw_dump->signature[0] = 'Q';
+ ha->fw_dump->signature[1] = 'L';
+ ha->fw_dump->signature[2] = 'G';
+ ha->fw_dump->signature[3] = 'C';
+ ha->fw_dump->version = htonl(1);
+
+ ha->fw_dump->fixed_size = htonl(fixed_size);
+ ha->fw_dump->mem_size = htonl(mem_size);
+ ha->fw_dump->req_q_size = htonl(req_q_size);
+ ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+
+ ha->fw_dump->eft_size = htonl(eft_size);
+ ha->fw_dump->eft_addr_l =
+ htonl(LSD(ha->eft_dma));
+ ha->fw_dump->eft_addr_h =
+ htonl(MSD(ha->eft_dma));
+
+ ha->fw_dump->header_size =
+ htonl(offsetof
+ (struct qla2xxx_fw_dump, isp));
+ }
+ mutex_unlock(&ha->optrom_mutex);
}
}
}
@@ -3498,7 +3638,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS) {
qla24xx_detect_sfp(vha);
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
+ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) &&
(ha->zio_mode == QLA_ZIO_MODE_6))
qla27xx_set_zio_threshold(vha,
ha->last_zio_threshold);
@@ -3570,7 +3711,7 @@ enable_82xx_npiv:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
- if (IS_QLA27XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flags.fac_supported = 1;
else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
uint32_t size;
@@ -3585,7 +3726,8 @@ enable_82xx_npiv:
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) {
ha->flags.fac_supported = 0;
rval = QLA_SUCCESS;
}
@@ -3647,8 +3789,7 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
"Serial link options.\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
- (uint8_t *)&ha->fw_seriallink_options,
- sizeof(ha->fw_seriallink_options));
+ ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
if (ha->fw_seriallink_options[3] & BIT_2) {
@@ -3738,7 +3879,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
/* Move PUREX, ABTS RX & RIDA to ATIOQ */
if (ql2xmvasynctoatio &&
- (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
if (qla_tgt_mode_enabled(vha) ||
qla_dual_mode_enabled(vha))
ha->fw_options[2] |= BIT_11;
@@ -3746,7 +3887,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] &= ~BIT_11;
}
- if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) {
/*
* Tell FW to track each exchange to prevent
* driver from using stale exchange.
@@ -3799,10 +3941,8 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
ha->init_cb->response_q_inpointer = cpu_to_le16(0);
ha->init_cb->request_q_length = cpu_to_le16(req->length);
ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
- ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
- ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
- ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
- ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+ put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
+ put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
@@ -3829,21 +3969,19 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_inpointer = cpu_to_le16(0);
icb->request_q_length = cpu_to_le16(req->length);
icb->response_q_length = cpu_to_le16(rsp->length);
- icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
- icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
- icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
- icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+ put_unaligned_le64(req->dma, &icb->request_q_address);
+ put_unaligned_le64(rsp->dma, &icb->response_q_address);
/* Setup ATIO queue dma pointers for target mode */
icb->atio_q_inpointer = cpu_to_le16(0);
icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
- icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
- icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+ put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
if (IS_SHADOW_REG_CAPABLE(ha))
icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) {
icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
@@ -4266,11 +4404,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
{
char *st, *en;
uint16_t index;
+ uint64_t zero[2] = { 0 };
struct qla_hw_data *ha = vha->hw;
int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
- if (memcmp(model, BINZERO, len) != 0) {
+ if (len > sizeof(zero))
+ len = sizeof(zero);
+ if (memcmp(model, &zero, len) != 0) {
strncpy(ha->model_number, model, len);
st = en = ha->model_number;
en += len - 1;
@@ -4357,7 +4498,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
rval = QLA_SUCCESS;
/* Determine NVRAM starting address. */
- ha->nvram_size = sizeof(nvram_t);
+ ha->nvram_size = sizeof(*nv);
ha->nvram_base = 0;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
@@ -4371,16 +4512,15 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
"Contents of NVRAM.\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
- (uint8_t *)nv, ha->nvram_size);
+ nv, ha->nvram_size);
/* Bad NVRAM data, set defaults parameters. */
- if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
- nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
+ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
+ nv->nvram_version < 1) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x0064,
- "Inconsistent NVRAM "
- "detected: checksum=0x%x id=%c version=0x%x.\n",
- chksum, nv->id[0], nv->nvram_version);
+ "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
+ chksum, nv->id, nv->nvram_version);
ql_log(ql_log_warn, vha, 0x0065,
"Falling back to "
"functioning (yet invalid -- WWPN) defaults.\n");
@@ -4629,7 +4769,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
ha->zio_mode = icb->add_firmware_options[0] &
(BIT_3 | BIT_2 | BIT_1 | BIT_0);
ha->zio_timer = icb->interrupt_delay_timer ?
- icb->interrupt_delay_timer: 2;
+ icb->interrupt_delay_timer : 2;
}
icb->add_firmware_options[0] &=
~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
@@ -4662,7 +4802,7 @@ qla2x00_rport_del(void *data)
unsigned long flags;
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
- rport = fcport->drport ? fcport->drport: fcport->rport;
+ rport = fcport->drport ? fcport->drport : fcport->rport;
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
if (rport) {
@@ -4675,6 +4815,23 @@ qla2x00_rport_del(void *data)
}
}
+void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
+{
+ int old_state;
+
+ old_state = atomic_read(&fcport->state);
+ atomic_set(&fcport->state, state);
+
+ /* Don't print state transitions during initial allocation of fcport */
+ if (old_state && old_state != state) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
+ "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
+ fcport->port_name, port_state_str[old_state],
+ port_state_str[state], fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ }
+}
+
/**
* qla2x00_alloc_fcport() - Allocate a generic fcport.
* @vha: HA context
@@ -4741,6 +4898,8 @@ qla2x00_free_fcport(fc_port_t *fcport)
fcport->ct_desc.ct_sns = NULL;
}
+ list_del(&fcport->list);
+ qla2x00_clear_loop_id(fcport);
kfree(fcport);
}
@@ -4762,6 +4921,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
int rval;
unsigned long flags, save_flags;
struct qla_hw_data *ha = vha->hw;
+
rval = QLA_SUCCESS;
/* Get Initiator ID */
@@ -4943,8 +5103,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_disc, vha, 0x2011,
"Entries in ID list (%d).\n", entries);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
- (uint8_t *)ha->gid_list,
- entries * sizeof(struct gid_list_info));
+ ha->gid_list, entries * sizeof(*ha->gid_list));
if (entries == 0) {
spin_lock_irqsave(&vha->work_lock, flags);
@@ -5194,16 +5353,23 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
rport->supported_classes = fcport->supported_classes;
- rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
if (fcport->port_type == FCT_INITIATOR)
- rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
if (fcport->port_type == FCT_TARGET)
- rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
+ if (fcport->port_type & FCT_NVME_INITIATOR)
+ rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
+ if (fcport->port_type & FCT_NVME_TARGET)
+ rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
+ if (fcport->port_type & FCT_NVME_DISCOVERY)
+ rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
ql_dbg(ql_dbg_disc, vha, 0x20ee,
"%s %8phN. rport %p is %s mode\n",
__func__, fcport->port_name, rport,
- (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
+ (fcport->port_type == FCT_TARGET) ? "tgt" :
+ ((fcport->port_type & FCT_NVME) ? "nvme" :"ini"));
fc_remote_port_rolechg(rport, rport_ids.roles);
}
@@ -5778,55 +5944,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
return (rval);
}
-/*
- * qla2x00_find_new_loop_id
- * Scan through our port list and find a new usable loop ID.
- *
- * Input:
- * ha: adapter state pointer.
- * dev: port structure pointer.
- *
- * Returns:
- * qla2x00 local function return status code.
- *
- * Context:
- * Kernel context.
- */
-int
-qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
-{
- int rval;
- struct qla_hw_data *ha = vha->hw;
- unsigned long flags = 0;
-
- rval = QLA_SUCCESS;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
-
- dev->loop_id = find_first_zero_bit(ha->loop_id_map,
- LOOPID_MAP_SIZE);
- if (dev->loop_id >= LOOPID_MAP_SIZE ||
- qla2x00_is_reserved_id(vha, dev->loop_id)) {
- dev->loop_id = FC_NO_LOOP_ID;
- rval = QLA_FUNCTION_FAILED;
- } else
- set_bit(dev->loop_id, ha->loop_id_map);
-
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- if (rval == QLA_SUCCESS)
- ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
- "Assigning new loopid=%x, portid=%x.\n",
- dev->loop_id, dev->d_id.b24);
- else
- ql_log(ql_log_warn, dev->vha, 0x2087,
- "No loop_id's available, portid=%x.\n",
- dev->d_id.b24);
-
- return (rval);
-}
-
-
/* FW does not set aside Loop id for MGMT Server/FFFFFAh */
int
qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
@@ -6318,6 +6435,7 @@ qla83xx_initiating_reset(scsi_qla_host_t *vha)
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
} else {
const char *state = qla83xx_dev_state_to_string(dev_state);
+
ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
/* SV: XXX: Is timeout required here? */
@@ -6639,6 +6757,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
+ if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
+ ha->flags.chip_reset_done = 1;
+ vha->flags.online = 1;
+ status = 0;
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ return status;
+ }
+
if (IS_QLA8031(ha)) {
ql_dbg(ql_dbg_p3p, vha, 0xb05c,
"Clearing fcoe driver presence.\n");
@@ -6879,7 +7005,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
* Input:
* ha = adapter block pointer.
*/
-void
+int
qla2x00_reset_adapter(scsi_qla_host_t *vha)
{
unsigned long flags = 0;
@@ -6895,17 +7021,20 @@ qla2x00_reset_adapter(scsi_qla_host_t *vha)
WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
RD_REG_WORD(&reg->hccr); /* PCI Posting. */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
}
-void
+int
qla24xx_reset_adapter(scsi_qla_host_t *vha)
{
unsigned long flags = 0;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ int rval = QLA_SUCCESS;
if (IS_P3P_TYPE(ha))
- return;
+ return rval;
vha->flags.online = 0;
ha->isp_ops->disable_intrs(ha);
@@ -6919,6 +7048,8 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
if (IS_NOPOLLING_TYPE(ha))
ha->isp_ops->enable_intrs(ha);
+
+ return rval;
}
/* On sparc systems, obtain port and node WWN from firmware
@@ -6969,34 +7100,33 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->vpd_base = FA_NVRAM_VPD1_ADDR;
}
- ha->nvram_size = sizeof(struct nvram_24xx);
+ ha->nvram_size = sizeof(*nv);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
- ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
+ ha->isp_ops->read_nvram(vha, ha->vpd,
ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
/* Get NVRAM data into cache and calculate checksum. */
dptr = (uint32_t *)nv;
- ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
- ha->nvram_size);
+ ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
"Contents of NVRAM\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
- (uint8_t *)nv, ha->nvram_size);
+ nv, ha->nvram_size);
/* Bad NVRAM data, set defaults parameters. */
- if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
- || nv->id[3] != ' ' ||
- nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
+ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
+ le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x006b,
- "Inconsistent NVRAM detected: checksum=0x%x id=%c "
- "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
+ "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
+ chksum, nv->id, nv->nvram_version);
+ ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
ql_log(ql_log_warn, vha, 0x006c,
"Falling back to functioning (yet invalid -- WWPN) "
"defaults.\n");
@@ -7104,11 +7234,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->flags.disable_risc_code_load = 0;
ha->flags.enable_lip_reset = 0;
ha->flags.enable_lip_full_login =
- le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+ le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
ha->flags.enable_target_reset =
- le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+ le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
ha->flags.enable_led_scheme = 0;
- ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+ ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
(BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -7182,7 +7312,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
(BIT_3 | BIT_2 | BIT_1 | BIT_0);
ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
- le16_to_cpu(icb->interrupt_delay_timer): 2;
+ le16_to_cpu(icb->interrupt_delay_timer) : 2;
}
icb->firmware_options_2 &= cpu_to_le32(
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
@@ -7205,128 +7335,311 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
return (rval);
}
-uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+static void
+qla27xx_print_image(struct scsi_qla_host *vha, char *name,
+ struct qla27xx_image_status *image_status)
+{
+ ql_dbg(ql_dbg_init, vha, 0x018b,
+ "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
+ name, "status",
+ image_status->image_status_mask,
+ le16_to_cpu(image_status->generation),
+ image_status->ver_major,
+ image_status->ver_minor,
+ image_status->bitmap,
+ le32_to_cpu(image_status->checksum),
+ le32_to_cpu(image_status->signature));
+}
+
+static bool
+qla28xx_check_aux_image_status_signature(
+ struct qla27xx_image_status *image_status)
+{
+ ulong signature = le32_to_cpu(image_status->signature);
+
+ return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
+}
+
+static bool
+qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
+{
+ ulong signature = le32_to_cpu(image_status->signature);
+
+ return
+ signature != QLA27XX_IMG_STATUS_SIGN &&
+ signature != QLA28XX_IMG_STATUS_SIGN;
+}
+
+static ulong
+qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
+{
+ uint32_t *p = (void *)image_status;
+ uint n = sizeof(*image_status) / sizeof(*p);
+ uint32_t sum = 0;
+
+ for ( ; n--; p++)
+ sum += le32_to_cpup(p);
+
+ return sum;
+}
+
+static inline uint
+qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
+{
+ return aux->bitmap & bitmask ?
+ QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
+}
+
+static void
+qla28xx_component_status(
+ struct active_regions *active_regions, struct qla27xx_image_status *aux)
+{
+ active_regions->aux.board_config =
+ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
+
+ active_regions->aux.vpd_nvram =
+ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
+
+ active_regions->aux.npiv_config_0_1 =
+ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
+
+ active_regions->aux.npiv_config_2_3 =
+ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
+}
+
+static int
+qla27xx_compare_image_generation(
+ struct qla27xx_image_status *pri_image_status,
+ struct qla27xx_image_status *sec_image_status)
+{
+ /* calculate generation delta as uint16 (this accounts for wrap) */
+ int16_t delta =
+ le16_to_cpu(pri_image_status->generation) -
+ le16_to_cpu(sec_image_status->generation);
+
+ ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
+
+ return delta;
+}
+
+void
+qla28xx_get_aux_images(
+ struct scsi_qla_host *vha, struct active_regions *active_regions)
{
- struct qla27xx_image_status pri_image_status, sec_image_status;
- uint8_t valid_pri_image, valid_sec_image;
- uint32_t *wptr;
- uint32_t cnt, chksum, size;
struct qla_hw_data *ha = vha->hw;
+ struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
+ bool valid_pri_image = false, valid_sec_image = false;
+ bool active_pri_image = false, active_sec_image = false;
+
+ if (!ha->flt_region_aux_img_status_pri) {
+ ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
+ goto check_sec_image;
+ }
- valid_pri_image = valid_sec_image = 1;
- ha->active_image = 0;
- size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
+ qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
+ ha->flt_region_aux_img_status_pri,
+ sizeof(pri_aux_image_status) >> 2);
+ qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
- if (!ha->flt_region_img_status_pri) {
- valid_pri_image = 0;
+ if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
+ ql_dbg(ql_dbg_init, vha, 0x018b,
+ "Primary aux image signature (%#x) not valid\n",
+ le32_to_cpu(pri_aux_image_status.signature));
goto check_sec_image;
}
- qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
- ha->flt_region_img_status_pri, size);
+ if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
+ ql_dbg(ql_dbg_init, vha, 0x018c,
+ "Primary aux image checksum failed\n");
+ goto check_sec_image;
+ }
+
+ valid_pri_image = true;
+
+ if (pri_aux_image_status.image_status_mask & 1) {
+ ql_dbg(ql_dbg_init, vha, 0x018d,
+ "Primary aux image is active\n");
+ active_pri_image = true;
+ }
+
+check_sec_image:
+ if (!ha->flt_region_aux_img_status_sec) {
+ ql_dbg(ql_dbg_init, vha, 0x018a,
+ "Secondary aux image not addressed\n");
+ goto check_valid_image;
+ }
+
+ qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
+ ha->flt_region_aux_img_status_sec,
+ sizeof(sec_aux_image_status) >> 2);
+ qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
- if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+ if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
ql_dbg(ql_dbg_init, vha, 0x018b,
- "Primary image signature (0x%x) not valid\n",
- pri_image_status.signature);
- valid_pri_image = 0;
+ "Secondary aux image signature (%#x) not valid\n",
+ le32_to_cpu(sec_aux_image_status.signature));
+ goto check_valid_image;
+ }
+
+ if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
+ ql_dbg(ql_dbg_init, vha, 0x018c,
+ "Secondary aux image checksum failed\n");
+ goto check_valid_image;
+ }
+
+ valid_sec_image = true;
+
+ if (sec_aux_image_status.image_status_mask & 1) {
+ ql_dbg(ql_dbg_init, vha, 0x018d,
+ "Secondary aux image is active\n");
+ active_sec_image = true;
+ }
+
+check_valid_image:
+ if (valid_pri_image && active_pri_image &&
+ valid_sec_image && active_sec_image) {
+ if (qla27xx_compare_image_generation(&pri_aux_image_status,
+ &sec_aux_image_status) >= 0) {
+ qla28xx_component_status(active_regions,
+ &pri_aux_image_status);
+ } else {
+ qla28xx_component_status(active_regions,
+ &sec_aux_image_status);
+ }
+ } else if (valid_pri_image && active_pri_image) {
+ qla28xx_component_status(active_regions, &pri_aux_image_status);
+ } else if (valid_sec_image && active_sec_image) {
+ qla28xx_component_status(active_regions, &sec_aux_image_status);
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x018f,
+ "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
+ active_regions->aux.board_config,
+ active_regions->aux.vpd_nvram,
+ active_regions->aux.npiv_config_0_1,
+ active_regions->aux.npiv_config_2_3);
+}
+
+void
+qla27xx_get_active_image(struct scsi_qla_host *vha,
+ struct active_regions *active_regions)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla27xx_image_status pri_image_status, sec_image_status;
+ bool valid_pri_image = false, valid_sec_image = false;
+ bool active_pri_image = false, active_sec_image = false;
+
+ if (!ha->flt_region_img_status_pri) {
+ ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
goto check_sec_image;
}
- wptr = (uint32_t *)(&pri_image_status);
- cnt = size;
+ qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
+ ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2);
+ qla27xx_print_image(vha, "Primary image", &pri_image_status);
- for (chksum = 0; cnt--; wptr++)
- chksum += le32_to_cpu(*wptr);
+ if (qla27xx_check_image_status_signature(&pri_image_status)) {
+ ql_dbg(ql_dbg_init, vha, 0x018b,
+ "Primary image signature (%#x) not valid\n",
+ le32_to_cpu(pri_image_status.signature));
+ goto check_sec_image;
+ }
- if (chksum) {
+ if (qla27xx_image_status_checksum(&pri_image_status)) {
ql_dbg(ql_dbg_init, vha, 0x018c,
- "Checksum validation failed for primary image (0x%x)\n",
- chksum);
- valid_pri_image = 0;
+ "Primary image checksum failed\n");
+ goto check_sec_image;
+ }
+
+ valid_pri_image = true;
+
+ if (pri_image_status.image_status_mask & 1) {
+ ql_dbg(ql_dbg_init, vha, 0x018d,
+ "Primary image is active\n");
+ active_pri_image = true;
}
check_sec_image:
if (!ha->flt_region_img_status_sec) {
- valid_sec_image = 0;
+ ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
goto check_valid_image;
}
qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
- ha->flt_region_img_status_sec, size);
+ ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
+ qla27xx_print_image(vha, "Secondary image", &sec_image_status);
- if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
- ql_dbg(ql_dbg_init, vha, 0x018d,
- "Secondary image signature(0x%x) not valid\n",
- sec_image_status.signature);
- valid_sec_image = 0;
+ if (qla27xx_check_image_status_signature(&sec_image_status)) {
+ ql_dbg(ql_dbg_init, vha, 0x018b,
+ "Secondary image signature (%#x) not valid\n",
+ le32_to_cpu(sec_image_status.signature));
goto check_valid_image;
}
- wptr = (uint32_t *)(&sec_image_status);
- cnt = size;
- for (chksum = 0; cnt--; wptr++)
- chksum += le32_to_cpu(*wptr);
- if (chksum) {
- ql_dbg(ql_dbg_init, vha, 0x018e,
- "Checksum validation failed for secondary image (0x%x)\n",
- chksum);
- valid_sec_image = 0;
+ if (qla27xx_image_status_checksum(&sec_image_status)) {
+ ql_dbg(ql_dbg_init, vha, 0x018c,
+ "Secondary image checksum failed\n");
+ goto check_valid_image;
+ }
+
+ valid_sec_image = true;
+
+ if (sec_image_status.image_status_mask & 1) {
+ ql_dbg(ql_dbg_init, vha, 0x018d,
+ "Secondary image is active\n");
+ active_sec_image = true;
}
check_valid_image:
- if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
- ha->active_image = QLA27XX_PRIMARY_IMAGE;
- if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
- if (!ha->active_image ||
- pri_image_status.generation_number <
- sec_image_status.generation_number)
- ha->active_image = QLA27XX_SECONDARY_IMAGE;
+ if (valid_pri_image && active_pri_image)
+ active_regions->global = QLA27XX_PRIMARY_IMAGE;
+
+ if (valid_sec_image && active_sec_image) {
+ if (!active_regions->global ||
+ qla27xx_compare_image_generation(
+ &pri_image_status, &sec_image_status) < 0) {
+ active_regions->global = QLA27XX_SECONDARY_IMAGE;
+ }
}
- ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
- ha->active_image == 0 ? "default bootld and fw" :
- ha->active_image == 1 ? "primary" :
- ha->active_image == 2 ? "secondary" :
- "Invalid");
+ ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
+ active_regions->global == QLA27XX_DEFAULT_IMAGE ?
+ "default (boot/fw)" :
+ active_regions->global == QLA27XX_PRIMARY_IMAGE ?
+ "primary" :
+ active_regions->global == QLA27XX_SECONDARY_IMAGE ?
+ "secondary" : "invalid",
+ active_regions->global);
+}
- return ha->active_image;
+bool qla24xx_risc_firmware_invalid(uint32_t *dword)
+{
+ return
+ !(dword[4] | dword[5] | dword[6] | dword[7]) ||
+ !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
}
static int
qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
uint32_t faddr)
{
- int rval = QLA_SUCCESS;
- int segments, fragment;
- uint32_t *dcode, dlen;
- uint32_t risc_addr;
- uint32_t risc_size;
- uint32_t i;
+ int rval;
+ uint templates, segments, fragment;
+ ulong i;
+ uint j;
+ ulong dlen;
+ uint32_t *dcode;
+ uint32_t risc_addr, risc_size, risc_attr = 0;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
+ struct fwdt *fwdt = ha->fwdt;
ql_dbg(ql_dbg_init, vha, 0x008b,
"FW: Loading firmware from flash (%x).\n", faddr);
- rval = QLA_SUCCESS;
-
- segments = FA_RISC_CODE_SEGMENTS;
- dcode = (uint32_t *)req->ring;
- *srisc_addr = 0;
-
- if (IS_QLA27XX(ha) &&
- qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
- faddr = ha->flt_region_fw_sec;
-
- /* Validate firmware image by checking version. */
- qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
- for (i = 0; i < 4; i++)
- dcode[i] = be32_to_cpu(dcode[i]);
- if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
- dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
- (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
- dcode[3] == 0)) {
+ dcode = (void *)req->ring;
+ qla24xx_read_flash_data(vha, dcode, faddr, 8);
+ if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x008c,
"Unable to verify the integrity of flash firmware "
"image.\n");
@@ -7337,34 +7650,36 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return QLA_FUNCTION_FAILED;
}
- while (segments && rval == QLA_SUCCESS) {
- /* Read segment's load information. */
- qla24xx_read_flash_data(vha, dcode, faddr, 4);
-
+ dcode = (void *)req->ring;
+ *srisc_addr = 0;
+ segments = FA_RISC_CODE_SEGMENTS;
+ for (j = 0; j < segments; j++) {
+ ql_dbg(ql_dbg_init, vha, 0x008d,
+ "-> Loading segment %u...\n", j);
+ qla24xx_read_flash_data(vha, dcode, faddr, 10);
risc_addr = be32_to_cpu(dcode[2]);
- *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
risc_size = be32_to_cpu(dcode[3]);
+ if (!*srisc_addr) {
+ *srisc_addr = risc_addr;
+ risc_attr = be32_to_cpu(dcode[9]);
+ }
- fragment = 0;
- while (risc_size > 0 && rval == QLA_SUCCESS) {
- dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+ dlen = ha->fw_transfer_size >> 2;
+ for (fragment = 0; risc_size; fragment++) {
if (dlen > risc_size)
dlen = risc_size;
ql_dbg(ql_dbg_init, vha, 0x008e,
- "Loading risc segment@ risc addr %x "
- "number of dwords 0x%x offset 0x%x.\n",
- risc_addr, dlen, faddr);
-
+ "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
+ fragment, risc_addr, faddr, dlen);
qla24xx_read_flash_data(vha, dcode, faddr, dlen);
for (i = 0; i < dlen; i++)
dcode[i] = swab32(dcode[i]);
- rval = qla2x00_load_ram(vha, req->dma, risc_addr,
- dlen);
+ rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
if (rval) {
ql_log(ql_log_fatal, vha, 0x008f,
- "Failed to load segment %d of firmware.\n",
+ "-> Failed load firmware fragment %u.\n",
fragment);
return QLA_FUNCTION_FAILED;
}
@@ -7372,107 +7687,82 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
faddr += dlen;
risc_addr += dlen;
risc_size -= dlen;
- fragment++;
}
-
- /* Next segment. */
- segments--;
}
- if (!IS_QLA27XX(ha))
- return rval;
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_SUCCESS;
- if (ha->fw_dump_template)
- vfree(ha->fw_dump_template);
- ha->fw_dump_template = NULL;
- ha->fw_dump_template_len = 0;
-
- ql_dbg(ql_dbg_init, vha, 0x0161,
- "Loading fwdump template from %x\n", faddr);
- qla24xx_read_flash_data(vha, dcode, faddr, 7);
- risc_size = be32_to_cpu(dcode[2]);
- ql_dbg(ql_dbg_init, vha, 0x0162,
- "-> array size %x dwords\n", risc_size);
- if (risc_size == 0 || risc_size == ~0)
- goto default_template;
-
- dlen = (risc_size - 8) * sizeof(*dcode);
- ql_dbg(ql_dbg_init, vha, 0x0163,
- "-> template allocating %x bytes...\n", dlen);
- ha->fw_dump_template = vmalloc(dlen);
- if (!ha->fw_dump_template) {
- ql_log(ql_log_warn, vha, 0x0164,
- "Failed fwdump template allocate %x bytes.\n", risc_size);
- goto default_template;
- }
-
- faddr += 7;
- risc_size -= 8;
- dcode = ha->fw_dump_template;
- qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
- for (i = 0; i < risc_size; i++)
- dcode[i] = le32_to_cpu(dcode[i]);
-
- if (!qla27xx_fwdt_template_valid(dcode)) {
- ql_log(ql_log_warn, vha, 0x0165,
- "Failed fwdump template validate\n");
- goto default_template;
- }
-
- dlen = qla27xx_fwdt_template_size(dcode);
- ql_dbg(ql_dbg_init, vha, 0x0166,
- "-> template size %x bytes\n", dlen);
- if (dlen > risc_size * sizeof(*dcode)) {
- ql_log(ql_log_warn, vha, 0x0167,
- "Failed fwdump template exceeds array by %zx bytes\n",
- (size_t)(dlen - risc_size * sizeof(*dcode)));
- goto default_template;
- }
- ha->fw_dump_template_len = dlen;
- return rval;
+ templates = (risc_attr & BIT_9) ? 2 : 1;
+ ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
+ for (j = 0; j < templates; j++, fwdt++) {
+ if (fwdt->template)
+ vfree(fwdt->template);
+ fwdt->template = NULL;
+ fwdt->length = 0;
+
+ dcode = (void *)req->ring;
+ qla24xx_read_flash_data(vha, dcode, faddr, 7);
+ risc_size = be32_to_cpu(dcode[2]);
+ ql_dbg(ql_dbg_init, vha, 0x0161,
+ "-> fwdt%u template array at %#x (%#x dwords)\n",
+ j, faddr, risc_size);
+ if (!risc_size || !~risc_size) {
+ ql_dbg(ql_dbg_init, vha, 0x0162,
+ "-> fwdt%u failed to read array\n", j);
+ goto failed;
+ }
-default_template:
- ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
- if (ha->fw_dump_template)
- vfree(ha->fw_dump_template);
- ha->fw_dump_template = NULL;
- ha->fw_dump_template_len = 0;
-
- dlen = qla27xx_fwdt_template_default_size();
- ql_dbg(ql_dbg_init, vha, 0x0169,
- "-> template allocating %x bytes...\n", dlen);
- ha->fw_dump_template = vmalloc(dlen);
- if (!ha->fw_dump_template) {
- ql_log(ql_log_warn, vha, 0x016a,
- "Failed fwdump template allocate %x bytes.\n", risc_size);
- goto failed_template;
- }
-
- dcode = ha->fw_dump_template;
- risc_size = dlen / sizeof(*dcode);
- memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
- for (i = 0; i < risc_size; i++)
- dcode[i] = be32_to_cpu(dcode[i]);
-
- if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
- ql_log(ql_log_warn, vha, 0x016b,
- "Failed fwdump template validate\n");
- goto failed_template;
- }
-
- dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
- ql_dbg(ql_dbg_init, vha, 0x016c,
- "-> template size %x bytes\n", dlen);
- ha->fw_dump_template_len = dlen;
- return rval;
+ /* skip header and ignore checksum */
+ faddr += 7;
+ risc_size -= 8;
+
+ ql_dbg(ql_dbg_init, vha, 0x0163,
+ "-> fwdt%u template allocate template %#x words...\n",
+ j, risc_size);
+ fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0x0164,
+ "-> fwdt%u failed allocate template.\n", j);
+ goto failed;
+ }
-failed_template:
- ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
- if (ha->fw_dump_template)
- vfree(ha->fw_dump_template);
- ha->fw_dump_template = NULL;
- ha->fw_dump_template_len = 0;
- return rval;
+ dcode = fwdt->template;
+ qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+
+ if (!qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0165,
+ "-> fwdt%u failed template validate\n", j);
+ goto failed;
+ }
+
+ dlen = qla27xx_fwdt_template_size(dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0166,
+ "-> fwdt%u template size %#lx bytes (%#lx words)\n",
+ j, dlen, dlen / sizeof(*dcode));
+ if (dlen > risc_size * sizeof(*dcode)) {
+ ql_log(ql_log_warn, vha, 0x0167,
+ "-> fwdt%u template exceeds array (%-lu bytes)\n",
+ j, dlen - risc_size * sizeof(*dcode));
+ goto failed;
+ }
+
+ fwdt->length = dlen;
+ ql_dbg(ql_dbg_init, vha, 0x0168,
+ "-> fwdt%u loaded template ok\n", j);
+
+ faddr += risc_size + 1;
+ }
+
+ return QLA_SUCCESS;
+
+failed:
+ if (fwdt->template)
+ vfree(fwdt->template);
+ fwdt->template = NULL;
+ fwdt->length = 0;
+
+ return QLA_SUCCESS;
}
#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
@@ -7580,94 +7870,73 @@ static int
qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
- int segments, fragment;
- uint32_t *dcode, dlen;
- uint32_t risc_addr;
- uint32_t risc_size;
- uint32_t i;
+ uint templates, segments, fragment;
+ uint32_t *dcode;
+ ulong dlen;
+ uint32_t risc_addr, risc_size, risc_attr = 0;
+ ulong i;
+ uint j;
struct fw_blob *blob;
- const uint32_t *fwcode;
- uint32_t fwclen;
+ uint32_t *fwcode;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
+ struct fwdt *fwdt = ha->fwdt;
+
+ ql_dbg(ql_dbg_init, vha, 0x0090,
+ "-> FW: Loading via request-firmware.\n");
- /* Load firmware blob. */
blob = qla2x00_request_firmware(vha);
if (!blob) {
- ql_log(ql_log_warn, vha, 0x0090,
- "Firmware image unavailable.\n");
- ql_log(ql_log_warn, vha, 0x0091,
- "Firmware images can be retrieved from: "
- QLA_FW_URL ".\n");
+ ql_log(ql_log_warn, vha, 0x0092,
+ "-> Firmware file not found.\n");
return QLA_FUNCTION_FAILED;
}
- ql_dbg(ql_dbg_init, vha, 0x0092,
- "FW: Loading via request-firmware.\n");
-
- rval = QLA_SUCCESS;
-
- segments = FA_RISC_CODE_SEGMENTS;
- dcode = (uint32_t *)req->ring;
- *srisc_addr = 0;
- fwcode = (uint32_t *)blob->fw->data;
- fwclen = 0;
-
- /* Validate firmware image by checking version. */
- if (blob->fw->size < 8 * sizeof(uint32_t)) {
+ fwcode = (void *)blob->fw->data;
+ dcode = fwcode;
+ if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x0093,
"Unable to verify integrity of firmware image (%zd).\n",
blob->fw->size);
- return QLA_FUNCTION_FAILED;
- }
- for (i = 0; i < 4; i++)
- dcode[i] = be32_to_cpu(fwcode[i + 4]);
- if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
- dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
- (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
- dcode[3] == 0)) {
- ql_log(ql_log_fatal, vha, 0x0094,
- "Unable to verify integrity of firmware image (%zd).\n",
- blob->fw->size);
ql_log(ql_log_fatal, vha, 0x0095,
"Firmware data: %08x %08x %08x %08x.\n",
dcode[0], dcode[1], dcode[2], dcode[3]);
return QLA_FUNCTION_FAILED;
}
- while (segments && rval == QLA_SUCCESS) {
+ dcode = (void *)req->ring;
+ *srisc_addr = 0;
+ segments = FA_RISC_CODE_SEGMENTS;
+ for (j = 0; j < segments; j++) {
+ ql_dbg(ql_dbg_init, vha, 0x0096,
+ "-> Loading segment %u...\n", j);
risc_addr = be32_to_cpu(fwcode[2]);
- *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
risc_size = be32_to_cpu(fwcode[3]);
- /* Validate firmware image size. */
- fwclen += risc_size * sizeof(uint32_t);
- if (blob->fw->size < fwclen) {
- ql_log(ql_log_fatal, vha, 0x0096,
- "Unable to verify integrity of firmware image "
- "(%zd).\n", blob->fw->size);
- return QLA_FUNCTION_FAILED;
+ if (!*srisc_addr) {
+ *srisc_addr = risc_addr;
+ risc_attr = be32_to_cpu(fwcode[9]);
}
- fragment = 0;
- while (risc_size > 0 && rval == QLA_SUCCESS) {
- dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+ dlen = ha->fw_transfer_size >> 2;
+ for (fragment = 0; risc_size; fragment++) {
if (dlen > risc_size)
dlen = risc_size;
ql_dbg(ql_dbg_init, vha, 0x0097,
- "Loading risc segment@ risc addr %x "
- "number of dwords 0x%x.\n", risc_addr, dlen);
+ "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
+ fragment, risc_addr,
+ (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
+ dlen);
for (i = 0; i < dlen; i++)
dcode[i] = swab32(fwcode[i]);
- rval = qla2x00_load_ram(vha, req->dma, risc_addr,
- dlen);
+ rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
if (rval) {
ql_log(ql_log_fatal, vha, 0x0098,
- "Failed to load segment %d of firmware.\n",
+ "-> Failed load firmware fragment %u.\n",
fragment);
return QLA_FUNCTION_FAILED;
}
@@ -7675,106 +7944,82 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
fwcode += dlen;
risc_addr += dlen;
risc_size -= dlen;
- fragment++;
}
-
- /* Next segment. */
- segments--;
}
- if (!IS_QLA27XX(ha))
- return rval;
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_SUCCESS;
- if (ha->fw_dump_template)
- vfree(ha->fw_dump_template);
- ha->fw_dump_template = NULL;
- ha->fw_dump_template_len = 0;
-
- ql_dbg(ql_dbg_init, vha, 0x171,
- "Loading fwdump template from %x\n",
- (uint32_t)((void *)fwcode - (void *)blob->fw->data));
- risc_size = be32_to_cpu(fwcode[2]);
- ql_dbg(ql_dbg_init, vha, 0x172,
- "-> array size %x dwords\n", risc_size);
- if (risc_size == 0 || risc_size == ~0)
- goto default_template;
-
- dlen = (risc_size - 8) * sizeof(*fwcode);
- ql_dbg(ql_dbg_init, vha, 0x0173,
- "-> template allocating %x bytes...\n", dlen);
- ha->fw_dump_template = vmalloc(dlen);
- if (!ha->fw_dump_template) {
- ql_log(ql_log_warn, vha, 0x0174,
- "Failed fwdump template allocate %x bytes.\n", risc_size);
- goto default_template;
- }
-
- fwcode += 7;
- risc_size -= 8;
- dcode = ha->fw_dump_template;
- for (i = 0; i < risc_size; i++)
- dcode[i] = le32_to_cpu(fwcode[i]);
-
- if (!qla27xx_fwdt_template_valid(dcode)) {
- ql_log(ql_log_warn, vha, 0x0175,
- "Failed fwdump template validate\n");
- goto default_template;
- }
-
- dlen = qla27xx_fwdt_template_size(dcode);
- ql_dbg(ql_dbg_init, vha, 0x0176,
- "-> template size %x bytes\n", dlen);
- if (dlen > risc_size * sizeof(*fwcode)) {
- ql_log(ql_log_warn, vha, 0x0177,
- "Failed fwdump template exceeds array by %zx bytes\n",
- (size_t)(dlen - risc_size * sizeof(*fwcode)));
- goto default_template;
- }
- ha->fw_dump_template_len = dlen;
- return rval;
+ templates = (risc_attr & BIT_9) ? 2 : 1;
+ ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
+ for (j = 0; j < templates; j++, fwdt++) {
+ if (fwdt->template)
+ vfree(fwdt->template);
+ fwdt->template = NULL;
+ fwdt->length = 0;
+
+ risc_size = be32_to_cpu(fwcode[2]);
+ ql_dbg(ql_dbg_init, vha, 0x0171,
+ "-> fwdt%u template array at %#x (%#x dwords)\n",
+ j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
+ risc_size);
+ if (!risc_size || !~risc_size) {
+ ql_dbg(ql_dbg_init, vha, 0x0172,
+ "-> fwdt%u failed to read array\n", j);
+ goto failed;
+ }
-default_template:
- ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
- if (ha->fw_dump_template)
- vfree(ha->fw_dump_template);
- ha->fw_dump_template = NULL;
- ha->fw_dump_template_len = 0;
-
- dlen = qla27xx_fwdt_template_default_size();
- ql_dbg(ql_dbg_init, vha, 0x0179,
- "-> template allocating %x bytes...\n", dlen);
- ha->fw_dump_template = vmalloc(dlen);
- if (!ha->fw_dump_template) {
- ql_log(ql_log_warn, vha, 0x017a,
- "Failed fwdump template allocate %x bytes.\n", risc_size);
- goto failed_template;
- }
-
- dcode = ha->fw_dump_template;
- risc_size = dlen / sizeof(*fwcode);
- fwcode = qla27xx_fwdt_template_default();
- for (i = 0; i < risc_size; i++)
- dcode[i] = be32_to_cpu(fwcode[i]);
-
- if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
- ql_log(ql_log_warn, vha, 0x017b,
- "Failed fwdump template validate\n");
- goto failed_template;
- }
-
- dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
- ql_dbg(ql_dbg_init, vha, 0x017c,
- "-> template size %x bytes\n", dlen);
- ha->fw_dump_template_len = dlen;
- return rval;
+ /* skip header and ignore checksum */
+ fwcode += 7;
+ risc_size -= 8;
+
+ ql_dbg(ql_dbg_init, vha, 0x0173,
+ "-> fwdt%u template allocate template %#x words...\n",
+ j, risc_size);
+ fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0x0174,
+ "-> fwdt%u failed allocate template.\n", j);
+ goto failed;
+ }
-failed_template:
- ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
- if (ha->fw_dump_template)
- vfree(ha->fw_dump_template);
- ha->fw_dump_template = NULL;
- ha->fw_dump_template_len = 0;
- return rval;
+ dcode = fwdt->template;
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = fwcode[i];
+
+ if (!qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0175,
+ "-> fwdt%u failed template validate\n", j);
+ goto failed;
+ }
+
+ dlen = qla27xx_fwdt_template_size(dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0176,
+ "-> fwdt%u template size %#lx bytes (%#lx words)\n",
+ j, dlen, dlen / sizeof(*dcode));
+ if (dlen > risc_size * sizeof(*dcode)) {
+ ql_log(ql_log_warn, vha, 0x0177,
+ "-> fwdt%u template exceeds array (%-lu bytes)\n",
+ j, dlen - risc_size * sizeof(*dcode));
+ goto failed;
+ }
+
+ fwdt->length = dlen;
+ ql_dbg(ql_dbg_init, vha, 0x0178,
+ "-> fwdt%u loaded template ok\n", j);
+
+ fwcode += risc_size + 1;
+ }
+
+ return QLA_SUCCESS;
+
+failed:
+ if (fwdt->template)
+ vfree(fwdt->template);
+ fwdt->template = NULL;
+ fwdt->length = 0;
+
+ return QLA_SUCCESS;
}
int
@@ -7803,32 +8048,50 @@ qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
struct qla_hw_data *ha = vha->hw;
+ struct active_regions active_regions = { };
if (ql2xfwloadbin == 2)
goto try_blob_fw;
- /*
- * FW Load priority:
+ /* FW Load priority:
* 1) Firmware residing in flash.
* 2) Firmware via request-firmware interface (.bin file).
- * 3) Golden-Firmware residing in flash -- limited operation.
+ * 3) Golden-Firmware residing in flash -- (limited operation).
*/
+
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ goto try_primary_fw;
+
+ qla27xx_get_active_image(vha, &active_regions);
+
+ if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
+ goto try_primary_fw;
+
+ ql_dbg(ql_dbg_init, vha, 0x008b,
+ "Loading secondary firmware image.\n");
+ rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
+ if (!rval)
+ return rval;
+
+try_primary_fw:
+ ql_dbg(ql_dbg_init, vha, 0x008b,
+ "Loading primary firmware image.\n");
rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
- if (rval == QLA_SUCCESS)
+ if (!rval)
return rval;
try_blob_fw:
rval = qla24xx_load_risc_blob(vha, srisc_addr);
- if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
+ if (!rval || !ha->flt_region_gold_fw)
return rval;
ql_log(ql_log_info, vha, 0x0099,
"Attempting to fallback to golden firmware.\n");
rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
- if (rval != QLA_SUCCESS)
+ if (rval)
return rval;
- ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
+ ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
ha->flags.running_gold_fw = 1;
return rval;
}
@@ -7963,6 +8226,7 @@ void
qla84xx_put_chip(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
+
if (ha->cs84xx)
kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
}
@@ -7980,7 +8244,7 @@ qla84xx_init_chip(scsi_qla_host_t *vha)
mutex_unlock(&ha->cs84xx->fw_update_mutex);
- return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
+ return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
QLA_SUCCESS;
}
@@ -7997,25 +8261,48 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
uint32_t chksum;
uint16_t cnt;
struct qla_hw_data *ha = vha->hw;
+ uint32_t faddr;
+ struct active_regions active_regions = { };
rval = QLA_SUCCESS;
icb = (struct init_cb_81xx *)ha->init_cb;
nv = ha->nvram;
/* Determine NVRAM starting address. */
- ha->nvram_size = sizeof(struct nvram_81xx);
+ ha->nvram_size = sizeof(*nv);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
ha->vpd_size = FA_VPD_SIZE_82XX;
+ if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
+ qla28xx_get_aux_images(vha, &active_regions);
+
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
- ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
- ha->vpd_size);
+
+ faddr = ha->flt_region_vpd;
+ if (IS_QLA28XX(ha)) {
+ if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_vpd_sec;
+ ql_dbg(ql_dbg_init, vha, 0x0110,
+ "Loading %s nvram image.\n",
+ active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
+ "primary" : "secondary");
+ }
+ qla24xx_read_flash_data(vha, ha->vpd, faddr, ha->vpd_size >> 2);
/* Get NVRAM data into cache and calculate checksum. */
- ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
- ha->nvram_size);
+ faddr = ha->flt_region_nvram;
+ if (IS_QLA28XX(ha)) {
+ if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_nvram_sec;
+ }
+ ql_dbg(ql_dbg_init, vha, 0x0110,
+ "Loading %s nvram image.\n",
+ active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
+ "primary" : "secondary");
+ qla24xx_read_flash_data(vha, ha->nvram, faddr, ha->nvram_size >> 2);
+
dptr = (uint32_t *)nv;
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
@@ -8023,17 +8310,16 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
"Contents of NVRAM:\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
- (uint8_t *)nv, ha->nvram_size);
+ nv, ha->nvram_size);
/* Bad NVRAM data, set defaults parameters. */
- if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
- || nv->id[3] != ' ' ||
- nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
+ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
+ le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
/* Reset NVRAM data. */
ql_log(ql_log_info, vha, 0x0073,
- "Inconsistent NVRAM detected: checksum=0x%x id=%c "
- "version=0x%x.\n", chksum, nv->id[0],
- le16_to_cpu(nv->nvram_version));
+ "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
+ chksum, nv->id, le16_to_cpu(nv->nvram_version));
+ ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
ql_log(ql_log_info, vha, 0x0074,
"Falling back to functioning (yet invalid -- WWPN) "
"defaults.\n");
@@ -8154,11 +8440,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
ha->flags.disable_risc_code_load = 0;
ha->flags.enable_lip_reset = 0;
ha->flags.enable_lip_full_login =
- le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+ le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
ha->flags.enable_target_reset =
- le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+ le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
ha->flags.enable_led_scheme = 0;
- ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+ ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
(BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -8222,7 +8508,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
ha->login_retry_count = ql2xloginretrycount;
/* if not running MSI-X we need handshaking on interrupts */
- if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
+ if (!vha->hw->flags.msix_enabled &&
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
icb->firmware_options_2 |= cpu_to_le32(BIT_22);
/* Enable ZIO. */
@@ -8230,7 +8517,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
(BIT_3 | BIT_2 | BIT_1 | BIT_0);
ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
- le16_to_cpu(icb->interrupt_delay_timer): 2;
+ le16_to_cpu(icb->interrupt_delay_timer) : 2;
}
icb->firmware_options_2 &= cpu_to_le32(
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
@@ -8255,12 +8542,6 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* N2N: driver will initiate Login instead of FW */
icb->firmware_options_3 |= BIT_8;
- if (IS_QLA27XX(ha)) {
- icb->firmware_options_3 |= BIT_8;
- ql_dbg(ql_log_info, vha, 0x0075,
- "Enabling direct connection.\n");
- }
-
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
@@ -8621,7 +8902,6 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
"Failed to allocate memory for queue pair.\n");
return NULL;
}
- memset(qpair, 0, sizeof(struct qla_qpair));
qpair->hw = vha->hw;
qpair->vha = vha;
@@ -8668,7 +8948,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->msix->in_use = 1;
list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
qpair->pdev = ha->pdev;
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
mutex_unlock(&ha->mq_lock);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 512c3c37b447..bf063c664352 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -91,43 +91,6 @@ host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
}
static inline void
-qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
-{
- int i;
-
- if (IS_FWI2_CAPABLE(ha))
- return;
-
- for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
- set_bit(i, ha->loop_id_map);
- set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
- set_bit(BROADCAST, ha->loop_id_map);
-}
-
-static inline int
-qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
-{
- struct qla_hw_data *ha = vha->hw;
- if (IS_FWI2_CAPABLE(ha))
- return (loop_id > NPH_LAST_HANDLE);
-
- return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
- loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
-}
-
-static inline void
-qla2x00_clear_loop_id(fc_port_t *fcport) {
- struct qla_hw_data *ha = fcport->vha->hw;
-
- if (fcport->loop_id == FC_NO_LOOP_ID ||
- qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
- return;
-
- clear_bit(fcport->loop_id, ha->loop_id_map);
- fcport->loop_id = FC_NO_LOOP_ID;
-}
-
-static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
{
struct dsd_dma *dsd, *tdsd;
@@ -142,25 +105,6 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
INIT_LIST_HEAD(&ctx->dsd_list);
}
-static inline void
-qla2x00_set_fcport_state(fc_port_t *fcport, int state)
-{
- int old_state;
-
- old_state = atomic_read(&fcport->state);
- atomic_set(&fcport->state, state);
-
- /* Don't print state transitions during initial allocation of fcport */
- if (old_state && old_state != state) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
- "FCPort %8phC state transitioned from %s to %s - "
- "portid=%02x%02x%02x.\n", fcport->port_name,
- port_state_str[old_state], port_state_str[state],
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- }
-}
-
static inline int
qla2x00_hba_err_chk_enabled(srb_t *sp)
{
@@ -240,6 +184,7 @@ done:
static inline void
qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
{
+ sp->qpair = NULL;
mempool_free(sp, qpair->srb_mempool);
QLA_QPAIR_MARK_NOT_BUSY(qpair);
}
@@ -274,18 +219,6 @@ qla2x00_rel_sp(srb_t *sp)
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
-static inline void
-qla2x00_init_timer(srb_t *sp, unsigned long tmo)
-{
- timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
- sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
- sp->free = qla2x00_sp_free;
- init_completion(&sp->comp);
- if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
- init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
- add_timer(&sp->u.iocb_cmd.timer);
-}
-
static inline int
qla2x00_gid_list_size(struct qla_hw_data *ha)
{
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 456a41d2e2c6..9312b19ed708 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -107,7 +107,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
cont_pkt = (cont_entry_t *)req->ring_ptr;
/* Load packet defaults. */
- *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
+ put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
return (cont_pkt);
}
@@ -136,9 +136,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
/* Load packet defaults. */
- *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
- cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
- cpu_to_le32(CONTINUE_A64_TYPE);
+ put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
+ CONTINUE_A64_TYPE, &cont_pkt->entry_type);
return (cont_pkt);
}
@@ -193,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
uint16_t tot_dsds)
{
uint16_t avail_dsds;
- uint32_t *cur_dsd;
+ struct dsd32 *cur_dsd;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
@@ -202,8 +201,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 2 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) =
- cpu_to_le32(COMMAND_TYPE);
+ put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -215,8 +213,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Three DSDs are available in the Command Type 2 IOCB */
- avail_dsds = 3;
- cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+ avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
+ cur_dsd = cmd_pkt->dsd32;
/* Load data segments */
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
@@ -229,12 +227,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
* Type 0 IOCB.
*/
cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
- cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
- avail_dsds = 7;
+ cur_dsd = cont_pkt->dsd;
+ avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
}
- *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd32(&cur_dsd, sg);
avail_dsds--;
}
}
@@ -251,7 +248,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
uint16_t tot_dsds)
{
uint16_t avail_dsds;
- uint32_t *cur_dsd;
+ struct dsd64 *cur_dsd;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
@@ -260,7 +257,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
+ put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -272,12 +269,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Two DSDs are available in the Command Type 3 IOCB */
- avail_dsds = 2;
- cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+ avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
+ cur_dsd = cmd_pkt->dsd64;
/* Load data segments */
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
- dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
@@ -287,14 +283,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Type 1 IOCB.
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
- avail_dsds = 5;
+ cur_dsd = cont_pkt->dsd;
+ avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
}
@@ -467,7 +460,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable || IS_QLA27XX(ha)) {
+ if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index);
} else if (IS_QLA83XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index);
@@ -580,13 +573,11 @@ static inline int
qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint16_t tot_dsds)
{
- uint32_t *cur_dsd = NULL;
+ struct dsd64 *cur_dsd = NULL, *next_dsd;
scsi_qla_host_t *vha;
struct qla_hw_data *ha;
struct scsi_cmnd *cmd;
struct scatterlist *cur_seg;
- uint32_t *dsd_seg;
- void *next_dsd;
uint8_t avail_dsds;
uint8_t first_iocb = 1;
uint32_t dsd_list_len;
@@ -596,7 +587,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
+ put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -638,32 +629,27 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
if (first_iocb) {
first_iocb = 0;
- dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
- *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
+ &cmd_pkt->fcp_dsd.address);
+ cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
} else {
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(dsd_list_len);
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
+ &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
+ cur_dsd++;
}
- cur_dsd = (uint32_t *)next_dsd;
+ cur_dsd = next_dsd;
while (avail_dsds) {
- dma_addr_t sle_dma;
-
- sle_dma = sg_dma_address(cur_seg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+ append_dsd64(&cur_dsd, cur_seg);
cur_seg = sg_next(cur_seg);
avail_dsds--;
}
}
/* Null termination */
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
+ cur_dsd->address = 0;
+ cur_dsd->length = 0;
+ cur_dsd++;
cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
return 0;
}
@@ -702,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
uint16_t tot_dsds, struct req_que *req)
{
uint16_t avail_dsds;
- uint32_t *cur_dsd;
+ struct dsd64 *cur_dsd;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
@@ -711,7 +697,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
+ put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -734,12 +720,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
/* One DSD is available in the Command Type 3 IOCB */
avail_dsds = 1;
- cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+ cur_dsd = &cmd_pkt->dsd;
/* Load data segments */
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
- dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
@@ -749,14 +734,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Type 1 IOCB.
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
- avail_dsds = 5;
+ cur_dsd = cont_pkt->dsd;
+ avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
}
@@ -892,14 +874,14 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
+ struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
struct scatterlist *sg_prot;
- uint32_t *cur_dsd = dsd;
+ struct dsd64 *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
uint32_t prot_int; /* protection interval */
uint32_t partial;
@@ -973,14 +955,14 @@ alloc_and_fill:
/* add new list to cmd iocb or last list */
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = dsd_list_len;
- cur_dsd = (uint32_t *)next_dsd;
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
+ &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
+ cur_dsd = next_dsd;
}
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sle_dma_len);
+ put_unaligned_le64(sle_dma, &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(sle_dma_len);
+ cur_dsd++;
avail_dsds--;
if (partial == 0) {
@@ -999,22 +981,22 @@ alloc_and_fill:
}
}
/* Null termination */
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
+ cur_dsd->address = 0;
+ cur_dsd->length = 0;
+ cur_dsd++;
return 0;
}
int
-qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
- uint16_t tot_dsds, struct qla_tc_param *tc)
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
+ struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
struct scatterlist *sg, *sgl;
- uint32_t *cur_dsd = dsd;
+ struct dsd64 *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
struct scsi_cmnd *cmd;
@@ -1031,8 +1013,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
for_each_sg(sgl, sg, tot_dsds, i) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1072,29 +1052,25 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
}
/* add new list to cmd iocb or last list */
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = dsd_list_len;
- cur_dsd = (uint32_t *)next_dsd;
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
+ &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
+ cur_dsd = next_dsd;
}
- sle_dma = sg_dma_address(sg);
-
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
/* Null termination */
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
+ cur_dsd->address = 0;
+ cur_dsd->length = 0;
+ cur_dsd++;
return 0;
}
int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
struct scatterlist *sg, *sgl;
@@ -1109,6 +1085,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
if (sp) {
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
sgl = scsi_prot_sglist(cmd);
vha = sp->vha;
difctx = sp->u.scmd.ctx;
@@ -1314,16 +1291,15 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
}
/* add new list to cmd iocb or last list */
- *cur_dsd++ =
- cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ =
- cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = dsd_list_len;
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
+ &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
cur_dsd = dsd_ptr->dsd_addr;
}
- *cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(sglen);
+ put_unaligned_le64(dif_dsd->dsd_list_dma,
+ &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(sglen);
+ cur_dsd++;
avail_dsds--;
difctx->dif_bundl_len -= sglen;
track_difbundl_buf--;
@@ -1334,8 +1310,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
difctx->no_ldif_dsd, difctx->no_dif_bundl);
} else {
for_each_sg(sgl, sg, tot_dsds, i) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1375,24 +1349,19 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
}
/* add new list to cmd iocb or last list */
- *cur_dsd++ =
- cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ =
- cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = dsd_list_len;
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
+ &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
cur_dsd = dsd_ptr->dsd_addr;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
}
/* Null termination */
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
+ cur_dsd->address = 0;
+ cur_dsd->length = 0;
+ cur_dsd++;
return 0;
}
/**
@@ -1405,11 +1374,12 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
* @tot_prot_dsds: Total number of segments with protection information
* @fw_prot_opts: Protection options to be passed to firmware
*/
-inline int
+static inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{
- uint32_t *cur_dsd, *fcp_dl;
+ struct dsd64 *cur_dsd;
+ uint32_t *fcp_dl;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t total_bytes = 0;
@@ -1427,7 +1397,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type CRC_2 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
+ put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
vha = sp->vha;
ha = vha->hw;
@@ -1475,8 +1445,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
&crc_ctx_pkt->ref_tag, tot_prot_dsds);
- cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
- cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+ put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
/* Determine SCSI command length -- align to 4 byte boundary */
@@ -1503,10 +1472,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
- cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
- LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
- cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
- MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+ put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
+ &cmd_pkt->fcp_cmnd_dseg_address);
fcp_cmnd->task_management = 0;
fcp_cmnd->task_attribute = TSK_SIMPLE;
@@ -1520,18 +1487,18 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- total_bytes = data_bytes;
- data_bytes += dif_bytes;
- break;
+ total_bytes = data_bytes;
+ data_bytes += dif_bytes;
+ break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- total_bytes = data_bytes + dif_bytes;
- break;
+ total_bytes = data_bytes + dif_bytes;
+ break;
default:
- BUG();
+ BUG();
}
if (!qla2x00_hba_err_chk_enabled(sp))
@@ -1548,7 +1515,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
}
if (!bundling) {
- cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+ cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
} else {
/*
* Configure Bundling if we need to fetch interlaving
@@ -1558,7 +1525,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
tot_prot_dsds);
- cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+ cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
}
/* Finish the common fields of CRC pkt */
@@ -1591,7 +1558,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
if (bundling && tot_prot_dsds) {
/* Walks dif segments */
cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
- cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+ cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
tot_prot_dsds, NULL))
goto crc_queuing_error;
@@ -2325,7 +2292,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
if (req->cnt < req_cnt + 2) {
if (qpair->use_shadow_reg)
cnt = *req->out_ptr;
- else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2494,7 +2462,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
- cpu_to_le16(sp->fcport->loop_id):
+ cpu_to_le16(sp->fcport->loop_id) :
cpu_to_le16(sp->fcport->loop_id << 8);
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
@@ -2565,6 +2533,16 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
}
}
+void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+{
+ timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
+ sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+ sp->free = qla2x00_sp_free;
+ if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
+ init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
+ add_timer(&sp->u.iocb_cmd.timer);
+}
+
static void
qla2x00_els_dcmd_sp_free(void *data)
{
@@ -2726,18 +2704,13 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
els_iocb->tx_byte_count = els_iocb->tx_len =
sizeof(struct els_plogi_payload);
- els_iocb->tx_address[0] =
- cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
- els_iocb->tx_address[1] =
- cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
-
+ put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
+ &els_iocb->tx_address);
els_iocb->rx_dsd_count = 1;
els_iocb->rx_byte_count = els_iocb->rx_len =
sizeof(struct els_plogi_payload);
- els_iocb->rx_address[0] =
- cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
- els_iocb->rx_address[1] =
- cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
+ put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
+ &els_iocb->rx_address);
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
"PLOGI ELS IOCB:\n");
@@ -2745,15 +2718,12 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(uint8_t *)els_iocb, 0x70);
} else {
els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
- els_iocb->tx_address[0] =
- cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
- els_iocb->tx_address[1] =
- cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
+ put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
+ &els_iocb->tx_address);
els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
els_iocb->rx_byte_count = 0;
- els_iocb->rx_address[0] = 0;
- els_iocb->rx_address[1] = 0;
+ els_iocb->rx_address = 0;
els_iocb->rx_len = 0;
}
@@ -2976,17 +2946,13 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->tx_byte_count =
cpu_to_le32(bsg_job->request_payload.payload_len);
- els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
- (bsg_job->request_payload.sg_list)));
- els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
- (bsg_job->request_payload.sg_list)));
+ put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
+ &els_iocb->tx_address);
els_iocb->tx_len = cpu_to_le32(sg_dma_len
(bsg_job->request_payload.sg_list));
- els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
- (bsg_job->reply_payload.sg_list)));
- els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
- (bsg_job->reply_payload.sg_list)));
+ put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
+ &els_iocb->rx_address);
els_iocb->rx_len = cpu_to_le32(sg_dma_len
(bsg_job->reply_payload.sg_list));
@@ -2997,14 +2963,13 @@ static void
qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
{
uint16_t avail_dsds;
- uint32_t *cur_dsd;
+ struct dsd64 *cur_dsd;
struct scatterlist *sg;
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct bsg_job *bsg_job = sp->u.bsg_job;
- int loop_iterartion = 0;
int entry_count = 1;
memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
@@ -3024,25 +2989,20 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
ct_iocb->rsp_bytecount =
cpu_to_le32(bsg_job->reply_payload.payload_len);
- ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
- (bsg_job->request_payload.sg_list)));
- ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
- (bsg_job->request_payload.sg_list)));
- ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
+ put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
+ &ct_iocb->req_dsd.address);
+ ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
- ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
- (bsg_job->reply_payload.sg_list)));
- ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
- (bsg_job->reply_payload.sg_list)));
- ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
+ put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
+ &ct_iocb->rsp_dsd.address);
+ ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
avail_dsds = 1;
- cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
+ cur_dsd = &ct_iocb->rsp_dsd;
index = 0;
tot_dsds = bsg_job->reply_payload.sg_cnt;
for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
- dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
@@ -3053,16 +3013,12 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
vha->hw->req_q_map[0]);
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ cur_dsd = cont_pkt->dsd;
avail_dsds = 5;
entry_count++;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
- loop_iterartion++;
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
ct_iocb->entry_count = entry_count;
@@ -3074,7 +3030,7 @@ static void
qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
{
uint16_t avail_dsds;
- uint32_t *cur_dsd;
+ struct dsd64 *cur_dsd;
struct scatterlist *sg;
int index;
uint16_t cmd_dsds, rsp_dsds;
@@ -3103,12 +3059,10 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
cpu_to_le32(bsg_job->request_payload.payload_len);
avail_dsds = 2;
- cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
+ cur_dsd = ct_iocb->dsd;
index = 0;
for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
@@ -3117,23 +3071,18 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(
vha, ha->req_q_map[0]);
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ cur_dsd = cont_pkt->dsd;
avail_dsds = 5;
entry_count++;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
index = 0;
for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
@@ -3142,15 +3091,12 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
ha->req_q_map[0]);
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ cur_dsd = cont_pkt->dsd;
avail_dsds = 5;
entry_count++;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
ct_iocb->entry_count = entry_count;
@@ -3371,10 +3317,8 @@ sufficient_dsds:
*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
- cmd_pkt->fcp_cmnd_dseg_address[0] =
- cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
- cmd_pkt->fcp_cmnd_dseg_address[1] =
- cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+ put_unaligned_le64(ctx->fcp_cmnd_dma,
+ &cmd_pkt->fcp_cmnd_dseg_address);
sp->flags |= SRB_FCP_CMND_DMA_VALID;
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
@@ -3386,6 +3330,7 @@ sufficient_dsds:
cmd_pkt->entry_status = (uint8_t) rsp->id;
} else {
struct cmd_type_7 *cmd_pkt;
+
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)RD_REG_DWORD_RELAXED(
@@ -3590,15 +3535,13 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
cmd_pkt->tx_dseg_count = 1;
cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
- cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
- cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
- cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
+ cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
+ put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
cmd_pkt->rx_dseg_count = 1;
cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
- cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
- cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
- cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
+ cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
+ put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
return rval;
}
@@ -3737,7 +3680,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
{
uint16_t avail_dsds;
- uint32_t *cur_dsd;
+ struct dsd64 *cur_dsd;
uint32_t req_data_len = 0;
uint32_t rsp_data_len = 0;
struct scatterlist *sg;
@@ -3746,8 +3689,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
struct bsg_job *bsg_job = sp->u.bsg_job;
/*Update entry type to indicate bidir command */
- *((uint32_t *)(&cmd_pkt->entry_type)) =
- cpu_to_le32(COMMAND_BIDIRECTIONAL);
+ put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
/* Set the transfer direction, in this set both flags
* Also set the BD_WRAP_BACK flag, firmware will take care
@@ -3773,13 +3715,12 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
* are bundled in continuation iocb
*/
avail_dsds = 1;
- cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+ cur_dsd = &cmd_pkt->fcp_dsd;
index = 0;
for_each_sg(bsg_job->request_payload.sg_list, sg,
bsg_job->request_payload.sg_cnt, index) {
- dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets */
@@ -3788,14 +3729,11 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
* 5 DSDS
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ cur_dsd = cont_pkt->dsd;
avail_dsds = 5;
entry_count++;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
/* For read request DSD will always goes to continuation IOCB
@@ -3805,7 +3743,6 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
*/
for_each_sg(bsg_job->reply_payload.sg_list, sg,
bsg_job->reply_payload.sg_cnt, index) {
- dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets */
@@ -3814,14 +3751,11 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
* 5 DSDS
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ cur_dsd = cont_pkt->dsd;
avail_dsds = 5;
entry_count++;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
/* This value should be same as number of IOCB required for this cmd */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 69bbea9239cc..78aec50abe0f 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -23,6 +23,14 @@ static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
+const char *const port_state_str[] = {
+ "Unknown",
+ "UNCONFIGURED",
+ "DEAD",
+ "LOST",
+ "ONLINE"
+};
+
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
* @irq: interrupt number
@@ -41,7 +49,7 @@ qla2100_intr_handler(int irq, void *dev_id)
int status;
unsigned long iter;
uint16_t hccr;
- uint16_t mb[4];
+ uint16_t mb[8];
struct rsp_que *rsp;
unsigned long flags;
@@ -160,7 +168,7 @@ qla2300_intr_handler(int irq, void *dev_id)
unsigned long iter;
uint32_t stat;
uint16_t hccr;
- uint16_t mb[4];
+ uint16_t mb[8];
struct rsp_que *rsp;
struct qla_hw_data *ha;
unsigned long flags;
@@ -366,7 +374,7 @@ qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
static const char *const link_speeds[] = {
"1", "2", "?", "4", "8", "16", "32", "10"
};
-#define QLA_LAST_SPEED 7
+#define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return link_speeds[0];
@@ -708,12 +716,15 @@ skip_rio:
break;
case MBA_SYSTEM_ERR: /* System Error */
- mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
+ mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) ?
RD_REG_WORD(&reg24->mailbox7) : 0;
ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
-
+ ha->fw_dump_mpi =
+ (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ RD_REG_WORD(&reg24->mailbox7) & BIT_8;
ha->isp_ops->fw_dump(vha, 1);
ha->flags.fw_init_done = 0;
QLA_FW_STOPPED(ha);
@@ -837,6 +848,7 @@ skip_rio:
if (ha->flags.fawwpn_enabled &&
(ha->current_topology == ISP_CFG_F)) {
void *wwpn = ha->init_cb->port_name;
+
memcpy(vha->port_name, wwpn, WWN_SIZE);
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
@@ -1372,7 +1384,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->status_flags));
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
- (uint8_t *)mbx, sizeof(*mbx));
+ mbx, sizeof(*mbx));
goto logio_done;
}
@@ -1516,7 +1528,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_reply->reply_payload_rcv_len = 0;
}
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
- (uint8_t *)pkt, sizeof(*pkt));
+ pkt, sizeof(*pkt));
} else {
res = DID_OK << 16;
bsg_reply->reply_payload_rcv_len =
@@ -1591,8 +1603,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
- fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
- fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+ fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
+ fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
if (iocb_type == ELS_IOCB_TYPE) {
els = &sp->u.iocb_cmd;
@@ -1613,7 +1625,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
res = DID_ERROR << 16;
}
}
- ql_log(ql_log_info, vha, 0x503f,
+ ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *)
@@ -1656,7 +1668,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
fw_status, sizeof(fw_status));
ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
- (uint8_t *)pkt, sizeof(*pkt));
+ pkt, sizeof(*pkt));
}
else {
res = DID_OK << 16;
@@ -1700,7 +1712,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
logio->entry_status);
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
- (uint8_t *)logio, sizeof(*logio));
+ logio, sizeof(*logio));
goto logio_done;
}
@@ -1846,8 +1858,8 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
}
if (iocb->u.tmf.data != QLA_SUCCESS)
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
- (uint8_t *)sts, sizeof(*sts));
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
+ sts, sizeof(*sts));
sp->done(sp, 0);
}
@@ -1969,6 +1981,52 @@ static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
sp->done(sp, rval);
}
+/* Process a single response queue entry. */
+static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
+ struct rsp_que *rsp,
+ sts_entry_t *pkt)
+{
+ sts21_entry_t *sts21_entry;
+ sts22_entry_t *sts22_entry;
+ uint16_t handle_cnt;
+ uint16_t cnt;
+
+ switch (pkt->entry_type) {
+ case STATUS_TYPE:
+ qla2x00_status_entry(vha, rsp, pkt);
+ break;
+ case STATUS_TYPE_21:
+ sts21_entry = (sts21_entry_t *)pkt;
+ handle_cnt = sts21_entry->handle_count;
+ for (cnt = 0; cnt < handle_cnt; cnt++)
+ qla2x00_process_completed_request(vha, rsp->req,
+ sts21_entry->handle[cnt]);
+ break;
+ case STATUS_TYPE_22:
+ sts22_entry = (sts22_entry_t *)pkt;
+ handle_cnt = sts22_entry->handle_count;
+ for (cnt = 0; cnt < handle_cnt; cnt++)
+ qla2x00_process_completed_request(vha, rsp->req,
+ sts22_entry->handle[cnt]);
+ break;
+ case STATUS_CONT_TYPE:
+ qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+ break;
+ case MBX_IOCB_TYPE:
+ qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
+ break;
+ case CT_IOCB_TYPE:
+ qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+ break;
+ default:
+ /* Type Not Supported. */
+ ql_log(ql_log_warn, vha, 0x504a,
+ "Received unknown response pkt type %x entry status=%x.\n",
+ pkt->entry_type, pkt->entry_status);
+ break;
+ }
+}
+
/**
* qla2x00_process_response_queue() - Process response queue entries.
* @rsp: response queue
@@ -1980,8 +2038,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
struct qla_hw_data *ha = rsp->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
sts_entry_t *pkt;
- uint16_t handle_cnt;
- uint16_t cnt;
vha = pci_get_drvdata(ha->pdev);
@@ -2006,42 +2062,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
continue;
}
- switch (pkt->entry_type) {
- case STATUS_TYPE:
- qla2x00_status_entry(vha, rsp, pkt);
- break;
- case STATUS_TYPE_21:
- handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
- for (cnt = 0; cnt < handle_cnt; cnt++) {
- qla2x00_process_completed_request(vha, rsp->req,
- ((sts21_entry_t *)pkt)->handle[cnt]);
- }
- break;
- case STATUS_TYPE_22:
- handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
- for (cnt = 0; cnt < handle_cnt; cnt++) {
- qla2x00_process_completed_request(vha, rsp->req,
- ((sts22_entry_t *)pkt)->handle[cnt]);
- }
- break;
- case STATUS_CONT_TYPE:
- qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
- break;
- case MBX_IOCB_TYPE:
- qla2x00_mbx_iocb_entry(vha, rsp->req,
- (struct mbx_entry *)pkt);
- break;
- case CT_IOCB_TYPE:
- qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
- break;
- default:
- /* Type Not Supported. */
- ql_log(ql_log_warn, vha, 0x504a,
- "Received unknown response pkt type %x "
- "entry status=%x.\n",
- pkt->entry_type, pkt->entry_status);
- break;
- }
+ qla2x00_process_response_entry(vha, rsp, pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
}
@@ -2238,6 +2259,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
struct fc_bsg_reply *bsg_reply;
sts_entry_t *sts;
struct sts_entry_24xx *sts24;
+
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -3014,7 +3036,8 @@ process_err:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) {
/* ensure that the ATIO queue is empty */
qlt_handle_abts_recv(vha, rsp,
(response_t *)pkt);
@@ -3072,6 +3095,7 @@ process_err:
/* Adjust ring index */
if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+
WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
} else {
WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
@@ -3087,7 +3111,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
- !IS_QLA27XX(ha))
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return;
rval = QLA_SUCCESS;
@@ -3475,7 +3499,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
ql_log(ql_log_fatal, vha, 0x00c8,
"Failed to allocate memory for ha->msix_entries.\n");
ret = -ENOMEM;
- goto msix_out;
+ goto free_irqs;
}
ha->flags.msix_enabled = 1;
@@ -3539,7 +3563,7 @@ msix_register_fail:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
(ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
ql2xmqsupport))
@@ -3558,6 +3582,10 @@ msix_register_fail:
msix_out:
return ret;
+
+free_irqs:
+ pci_free_irq_vectors(ha->pdev);
+ goto msix_out;
}
int
@@ -3570,7 +3598,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
/* If possible, enable MSI-X. */
if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
!IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
- !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
+ !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
goto skip_msi;
if (ql2xenablemsix == 2)
@@ -3609,7 +3637,7 @@ skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
!IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
- !IS_QLA27XX(ha))
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
goto skip_msi;
ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 5400696e1f6b..133f5f6270ff 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -567,9 +567,9 @@ mbx_done:
mcp->mb[0]);
} else if (rval) {
if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
- pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
+ pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
dev_name(&ha->pdev->dev), 0x1020+0x800,
- vha->host_no);
+ vha->host_no, rval);
mboxes = mcp->in_mb;
cnt = 4;
for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
@@ -634,14 +634,15 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
mcp->out_mb |= MBX_4;
}
- mcp->in_mb = MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1023,
- "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
"Done %s.\n", __func__);
@@ -656,7 +657,7 @@ static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
{
uint16_t mb4 = BIT_0;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
return mb4;
@@ -666,7 +667,7 @@ static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
{
uint16_t mb4 = BIT_0;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
struct nvram_81xx *nv = ha->nvram;
mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
@@ -711,7 +712,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[4] = 0;
ha->flags.using_lr_setting = 0;
if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
- IS_QLA27XX(ha)) {
+ IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
if (ql2xautodetectsfp) {
if (ha->flags.detected_lr_sfp) {
mcp->mb[4] |=
@@ -730,19 +731,20 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
}
}
- if (ql2xnvmeenable && IS_QLA27XX(ha))
+ if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
mcp->mb[4] |= NVME_ENABLE_FLAG;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
struct nvram_81xx *nv = ha->nvram;
/* set minimum speed if specified in nvram */
- if (nv->min_link_speed >= 2 &&
- nv->min_link_speed <= 5) {
+ if (nv->min_supported_speed >= 2 &&
+ nv->min_supported_speed <= 5) {
mcp->mb[4] |= BIT_4;
- mcp->mb[11] = nv->min_link_speed;
+ mcp->mb[11] |= nv->min_supported_speed & 0xF;
mcp->out_mb |= MBX_11;
mcp->in_mb |= BIT_5;
- vha->min_link_speed_feat = nv->min_link_speed;
+ vha->min_supported_speed =
+ nv->min_supported_speed;
}
}
@@ -770,34 +772,39 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1026,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
- } else {
- if (IS_FWI2_CAPABLE(ha)) {
- ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
- ql_dbg(ql_dbg_mbx, vha, 0x119a,
- "fw_ability_mask=%x.\n", ha->fw_ability_mask);
- ql_dbg(ql_dbg_mbx, vha, 0x1027,
- "exchanges=%x.\n", mcp->mb[1]);
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
- ha->max_speed_sup = mcp->mb[2] & BIT_0;
- ql_dbg(ql_dbg_mbx, vha, 0x119b,
- "Maximum speed supported=%s.\n",
- ha->max_speed_sup ? "32Gps" : "16Gps");
- if (vha->min_link_speed_feat) {
- ha->min_link_speed = mcp->mb[5];
- ql_dbg(ql_dbg_mbx, vha, 0x119c,
- "Minimum speed set=%s.\n",
- mcp->mb[5] == 5 ? "32Gps" :
- mcp->mb[5] == 4 ? "16Gps" :
- mcp->mb[5] == 3 ? "8Gps" :
- mcp->mb[5] == 2 ? "4Gps" :
- "unknown");
- }
- }
+ return rval;
+ }
+
+ if (!IS_FWI2_CAPABLE(ha))
+ goto done;
+
+ ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
+ ql_dbg(ql_dbg_mbx, vha, 0x119a,
+ "fw_ability_mask=%x.\n", ha->fw_ability_mask);
+ ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
+ ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
+ ha->max_supported_speed == 0 ? "16Gps" :
+ ha->max_supported_speed == 1 ? "32Gps" :
+ ha->max_supported_speed == 2 ? "64Gps" : "unknown");
+ if (vha->min_supported_speed) {
+ ha->min_supported_speed = mcp->mb[5] &
+ (BIT_0 | BIT_1 | BIT_2);
+ ql_dbg(ql_dbg_mbx, vha, 0x119c,
+ "min_supported_speed=%s.\n",
+ ha->min_supported_speed == 6 ? "64Gps" :
+ ha->min_supported_speed == 5 ? "32Gps" :
+ ha->min_supported_speed == 4 ? "16Gps" :
+ ha->min_supported_speed == 3 ? "8Gps" :
+ ha->min_supported_speed == 2 ? "4Gps" : "unknown");
}
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
- "Done.\n");
}
+done:
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+ "Done %s.\n", __func__);
+
return rval;
}
@@ -1053,10 +1060,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
if (IS_FWI2_CAPABLE(ha))
mcp->in_mb |= MBX_17|MBX_16|MBX_15;
- if (IS_QLA27XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
mcp->in_mb |=
MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
- MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
+ MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
@@ -1122,7 +1129,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
}
}
- if (IS_QLA27XX(ha)) {
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->serdes_version[0] = mcp->mb[7] & 0xff;
+ ha->serdes_version[1] = mcp->mb[8] >> 8;
+ ha->serdes_version[2] = mcp->mb[8] & 0xff;
ha->mpi_version[0] = mcp->mb[10] & 0xff;
ha->mpi_version[1] = mcp->mb[11] >> 8;
ha->mpi_version[2] = mcp->mb[11] & 0xff;
@@ -1133,6 +1143,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
+ if (IS_QLA28XX(ha)) {
+ if (mcp->mb[16] & BIT_10) {
+ ql_log(ql_log_info, vha, 0xffff,
+ "FW support secure flash updates\n");
+ ha->flags.secure_fw = 1;
+ }
+ }
}
failed:
@@ -1638,7 +1655,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
if (IS_FWI2_CAPABLE(vha->hw))
mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
- if (IS_QLA27XX(vha->hw))
+ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
mcp->in_mb |= MBX_15;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -1692,7 +1709,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
}
}
- if (IS_QLA27XX(vha->hw))
+ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
vha->bbcr = mcp->mb[15];
}
@@ -1808,7 +1825,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
}
/* 1 and 2 should normally be captured. */
mcp->in_mb = MBX_2|MBX_1|MBX_0;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* mb3 is additional info about the installed SFP. */
mcp->in_mb |= MBX_3;
mcp->buf_size = size;
@@ -1819,10 +1836,20 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x104d,
- "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
+ "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
+ if (ha->init_cb) {
+ ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
+ 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
+ }
+ if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
+ ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
+ 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
+ }
} else {
- if (IS_QLA27XX(ha)) {
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
ql_dbg(ql_dbg_mbx, vha, 0x119d,
"Invalid SFP/Validation Failed\n");
@@ -2006,7 +2033,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
/* Passback COS information. */
fcport->supported_classes = (pd->options & BIT_4) ?
- FC_COS_CLASS2: FC_COS_CLASS3;
+ FC_COS_CLASS2 : FC_COS_CLASS3;
}
gpd_error_out:
@@ -2076,7 +2103,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
} else {
- if (IS_QLA27XX(ha)) {
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
ql_dbg(ql_dbg_mbx, vha, 0x119e,
"Invalid SFP/Validation Failed\n");
@@ -2859,7 +2886,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
mcp->in_mb |= MBX_12;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -2884,7 +2912,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
ha->orig_fw_iocb_count = mcp->mb[10];
if (ha->flags.npiv_supported)
ha->max_npiv_vports = mcp->mb[11];
- if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha))
ha->fw_max_fcf_count = mcp->mb[12];
}
@@ -3248,7 +3277,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
/* Issue marker IOCB. */
rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
- type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
+ type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1099,
"Failed to issue marker IOCB (%x).\n", rval2);
@@ -3323,7 +3352,7 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
mbx_cmd_t *mcp = &mc;
if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
- !IS_QLA27XX(vha->hw))
+ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
@@ -3362,7 +3391,7 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
mbx_cmd_t *mcp = &mc;
if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
- !IS_QLA27XX(vha->hw))
+ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
@@ -3631,7 +3660,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
"Entered %s.\n", __func__);
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
- !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+ !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+ !IS_QLA28XX(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -3744,7 +3774,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
rval = qla2x00_mailbox_command(vha, mcp);
/* Return mailbox statuses. */
- if (mb != NULL) {
+ if (mb) {
mb[0] = mcp->mb[0];
mb[1] = mcp->mb[1];
mb[3] = mcp->mb[3];
@@ -3779,7 +3809,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
+ mcp->mb[3] = port_speed & 0x3F;
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_3|MBX_1|MBX_0;
@@ -3788,7 +3818,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
rval = qla2x00_mailbox_command(vha, mcp);
/* Return mailbox statuses. */
- if (mb != NULL) {
+ if (mb) {
mb[0] = mcp->mb[0];
mb[1] = mcp->mb[1];
mb[3] = mcp->mb[3];
@@ -4230,7 +4260,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
"Dump of Verify Request.\n");
ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
- (uint8_t *)mn, sizeof(*mn));
+ mn, sizeof(*mn));
rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
if (rval != QLA_SUCCESS) {
@@ -4242,7 +4272,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
"Dump of Verify Response.\n");
ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
- (uint8_t *)mn, sizeof(*mn));
+ mn, sizeof(*mn));
status[0] = le16_to_cpu(mn->p.rsp.comp_status);
status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
@@ -4318,7 +4348,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
mcp->mb[12] = req->qos;
mcp->mb[11] = req->vp_idx;
mcp->mb[13] = req->rid;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
mcp->mb[15] = 0;
mcp->mb[4] = req->id;
@@ -4332,9 +4362,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
mcp->flags = MBX_DMA_OUT;
mcp->tov = MBX_TOV_SECONDS * 2;
- if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha))
mcp->in_mb |= MBX_1;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
mcp->out_mb |= MBX_15;
/* debug q create issue in SR-IOV */
mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -4343,7 +4374,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
WRT_REG_DWORD(req->req_q_in, 0);
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
WRT_REG_DWORD(req->req_q_out, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4387,7 +4418,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mcp->mb[5] = rsp->length;
mcp->mb[14] = rsp->msix->entry;
mcp->mb[13] = rsp->rid;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
mcp->mb[15] = 0;
mcp->mb[4] = rsp->id;
@@ -4404,7 +4435,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
if (IS_QLA81XX(ha)) {
mcp->out_mb |= MBX_12|MBX_11|MBX_10;
mcp->in_mb |= MBX_1;
- } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
mcp->in_mb |= MBX_1;
/* debug q create issue in SR-IOV */
@@ -4414,7 +4445,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
WRT_REG_DWORD(rsp->rsp_q_out, 0);
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
WRT_REG_DWORD(rsp->rsp_q_in, 0);
}
@@ -4472,7 +4503,7 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
"Entered %s.\n", __func__);
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
- !IS_QLA27XX(vha->hw))
+ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -4504,7 +4535,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
mbx_cmd_t *mcp = &mc;
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
- !IS_QLA27XX(vha->hw))
+ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
@@ -4539,7 +4570,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
mbx_cmd_t *mcp = &mc;
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
- !IS_QLA27XX(vha->hw))
+ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
@@ -4570,6 +4601,42 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
}
int
+qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
+{
+ int rval = QLA_SUCCESS;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return rval;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+ mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
+ FAC_OPT_CMD_UNLOCK_SEMAPHORE);
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10e3,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
{
int rval = 0;
@@ -4818,10 +4885,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x10e9,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
- if (mcp->mb[0] == MBS_COMMAND_ERROR &&
- mcp->mb[1] == 0x22)
+ if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
/* sfp is not there */
rval = QLA_INTERFACE_ERROR;
+ }
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
"Done %s.\n", __func__);
@@ -5161,13 +5228,14 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mcp->mb[3] = MSW(data);
mcp->mb[8] = MSW(risc_addr);
mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
mcp->tov = 30;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1101,
- "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
"Done %s.\n", __func__);
@@ -5278,7 +5346,7 @@ qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
mcp->out_mb = MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
mcp->in_mb |= MBX_4|MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -5316,7 +5384,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mcp->mb[1] = QLA_GET_DATA_RATE;
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -5346,7 +5414,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
"Entered %s.\n", __func__);
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
- !IS_QLA27XX(ha))
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_PORT_CONFIG;
mcp->out_mb = MBX_0;
@@ -5662,6 +5730,7 @@ qla8044_md_get_template(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
int rval = QLA_FUNCTION_FAILED;
int offset = 0, size = MINIDUMP_SIZE_36K;
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
"Entered %s.\n", __func__);
@@ -5842,7 +5911,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
@@ -5917,7 +5986,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
struct qla_hw_data *ha = vha->hw;
unsigned long retry_max_time = jiffies + (2 * HZ);
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
@@ -5967,7 +6036,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ if (!IS_QLA83XX(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
@@ -6101,7 +6170,8 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
mbx_cmd_t *mcp = &mc;
dma_addr_t dd_dma;
- if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+ if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+ !IS_QLA28XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
@@ -6318,7 +6388,13 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.rsvd_1 = 0;
if (fcport->fc4f_nvme) {
- fcport->port_type = FCT_NVME;
+ fcport->port_type = 0;
+ if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
+ fcport->port_type |= FCT_NVME_INITIATOR;
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type |= FCT_NVME_TARGET;
+ if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
+ fcport->port_type |= FCT_NVME_DISCOVERY;
} else {
/* If not target must be initiator or unknown type. */
if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -6507,3 +6583,101 @@ int qla24xx_res_count_wait(struct scsi_qla_host *vha,
done:
return rval;
}
+
+int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
+ uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
+ uint32_t sfub_len)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
+ mcp->mb[1] = opts;
+ mcp->mb[2] = region;
+ mcp->mb[3] = MSW(len);
+ mcp->mb[4] = LSW(len);
+ mcp->mb[5] = MSW(sfub_dma_addr);
+ mcp->mb[6] = LSW(sfub_dma_addr);
+ mcp->mb[7] = MSW(MSD(sfub_dma_addr));
+ mcp->mb[8] = LSW(MSD(sfub_dma_addr));
+ mcp->mb[9] = sfub_len;
+ mcp->out_mb =
+ MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
+ __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
+ mcp->mb[2]);
+ }
+
+ return rval;
+}
+
+int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
+ uint32_t data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_WRITE_REMOTE_REG;
+ mcp->mb[1] = LSW(addr);
+ mcp->mb[2] = MSW(addr);
+ mcp->mb[3] = LSW(data);
+ mcp->mb[4] = MSW(data);
+ mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
+ uint32_t *data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_READ_REMOTE_REG;
+ mcp->mb[1] = LSW(addr);
+ mcp->mb[2] = MSW(addr);
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 099d8e9851cb..b2977e49356b 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -905,7 +905,8 @@ static void qla_ctrlvp_sp_done(void *s, int res)
{
struct srb *sp = s;
- complete(&sp->comp);
+ if (sp->comp)
+ complete(sp->comp);
/* don't free sp here. Let the caller do the free */
}
@@ -922,6 +923,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
struct qla_hw_data *ha = vha->hw;
int vp_index = vha->vp_idx;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ DECLARE_COMPLETION_ONSTACK(comp);
srb_t *sp;
ql_dbg(ql_dbg_vport, vha, 0x10c1,
@@ -936,6 +938,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
+ sp->comp = &comp;
sp->done = qla_ctrlvp_sp_done;
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
@@ -953,7 +956,9 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
sp->name, sp->handle);
- wait_for_completion(&sp->comp);
+ wait_for_completion(&comp);
+ sp->comp = NULL;
+
rval = sp->rc;
switch (rval) {
case QLA_FUNCTION_TIMEOUT:
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 60f964c53c01..942ee13b96a4 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -273,9 +273,9 @@ premature_exit:
if (rval) {
ql_log(ql_log_warn, base_vha, 0x1163,
- "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
- "mb[3]=%x, cmd=%x ****.\n",
- mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+ "**** Failed=%x mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
+ command);
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
}
@@ -629,17 +629,20 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-void
+int
qlafx00_soft_reset(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(ha->pdev) &&
ha->flags.pci_channel_io_perm_failure))
- return;
+ return rval;
ha->isp_ops->disable_intrs(ha);
qlafx00_soc_cpu_reset(vha);
+
+ return QLA_SUCCESS;
}
/**
@@ -1138,8 +1141,8 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
"Listing Target bit map...\n");
- ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
- 0x2089, (uint8_t *)ha->gid_list, 32);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 0x2089,
+ ha->gid_list, 32);
/* Allocate temporary rmtport for any new rmtports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
@@ -1320,6 +1323,7 @@ qlafx00_configure_devices(scsi_qla_host_t *vha)
{
int rval;
unsigned long flags;
+
rval = QLA_SUCCESS;
flags = vha->dpc_flags;
@@ -1913,8 +1917,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
phost_info->domainname,
phost_info->hostdriver);
ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
- (uint8_t *)phost_info,
- sizeof(struct host_system_info));
+ phost_info, sizeof(*phost_info));
}
}
@@ -1968,7 +1971,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
vha->d_id.b.al_pa = pinfo->port_id[2];
qlafx00_update_host_attr(vha, pinfo);
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
- (uint8_t *)pinfo, 16);
+ pinfo, 16);
} else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
struct qlafx00_tgt_node_info *pinfo =
(struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
@@ -1976,12 +1979,12 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
fcport->port_type = FCT_TARGET;
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
- (uint8_t *)pinfo, 16);
+ pinfo, 16);
} else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
struct qlafx00_tgt_node_info *pinfo =
(struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
- (uint8_t *)pinfo, 16);
+ pinfo, 16);
memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
} else if (fx_type == FXDISC_ABORT_IOCTL)
fdisc->u.fxiocb.result =
@@ -2248,18 +2251,16 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
- sizeof(struct qla_mt_iocb_rsp_fx00));
+ memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus));
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x5080,
- (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
+ sp->vha, 0x5080, pkt, sizeof(*pkt));
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x5074,
- (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
+ sp->vha, 0x5074,
+ fw_sts_ptr, sizeof(fstatus));
res = bsg_reply->result = DID_OK << 16;
bsg_reply->reply_payload_rcv_len =
@@ -2597,7 +2598,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
/* Move sense data. */
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
- (uint8_t *)pkt, sizeof(sts_cont_entry_t));
+ pkt, sizeof(*pkt));
memcpy(sense_ptr, pkt->data, sense_sz);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
sense_ptr, sense_sz);
@@ -2992,7 +2993,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
{
uint16_t avail_dsds;
- __le32 *cur_dsd;
+ struct dsd64 *cur_dsd;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
@@ -3028,12 +3029,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
/* One DSD is available in the Command Type 3 IOCB */
avail_dsds = 1;
- cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
+ cur_dsd = &lcmd_pkt->dsd;
/* Load data segments */
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
@@ -3043,26 +3042,23 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
cont_pkt =
qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
- cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
+ cur_dsd = lcont_pkt.dsd;
avail_dsds = 5;
cont = 1;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
if (avail_dsds == 0 && cont == 1) {
cont = 0;
memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
- REQUEST_ENTRY_SIZE);
+ sizeof(lcont_pkt));
}
}
if (avail_dsds != 0 && cont == 1) {
memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
- REQUEST_ENTRY_SIZE);
+ sizeof(lcont_pkt));
}
}
@@ -3172,9 +3168,9 @@ qlafx00_start_scsi(srb_t *sp)
lcmd_pkt.entry_status = (uint8_t) rsp->id;
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
- (uint8_t *)cmd->cmnd, cmd->cmd_len);
+ cmd->cmnd, cmd->cmd_len);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
- (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
+ &lcmd_pkt, sizeof(lcmd_pkt));
memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
wmb();
@@ -3282,11 +3278,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
fx_iocb.req_dsdcnt = cpu_to_le16(1);
fx_iocb.req_xfrcnt =
cpu_to_le16(fxio->u.fxiocb.req_len);
- fx_iocb.dseg_rq_address[0] =
- cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
- fx_iocb.dseg_rq_address[1] =
- cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
- fx_iocb.dseg_rq_len =
+ put_unaligned_le64(fxio->u.fxiocb.req_dma_handle,
+ &fx_iocb.dseg_rq.address);
+ fx_iocb.dseg_rq.length =
cpu_to_le32(fxio->u.fxiocb.req_len);
}
@@ -3294,11 +3288,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
fx_iocb.rsp_xfrcnt =
cpu_to_le16(fxio->u.fxiocb.rsp_len);
- fx_iocb.dseg_rsp_address[0] =
- cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
- fx_iocb.dseg_rsp_address[1] =
- cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
- fx_iocb.dseg_rsp_len =
+ put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle,
+ &fx_iocb.dseg_rsp.address);
+ fx_iocb.dseg_rsp.length =
cpu_to_le32(fxio->u.fxiocb.rsp_len);
}
@@ -3308,6 +3300,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
fx_iocb.flags = fxio->u.fxiocb.flags;
} else {
struct scatterlist *sg;
+
bsg_job = sp->u.bsg_job;
bsg_request = bsg_job->request;
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
@@ -3327,19 +3320,17 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
int avail_dsds, tot_dsds;
cont_a64_entry_t lcont_pkt;
cont_a64_entry_t *cont_pkt = NULL;
- __le32 *cur_dsd;
+ struct dsd64 *cur_dsd;
int index = 0, cont = 0;
fx_iocb.req_dsdcnt =
cpu_to_le16(bsg_job->request_payload.sg_cnt);
tot_dsds =
bsg_job->request_payload.sg_cnt;
- cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
+ cur_dsd = &fx_iocb.dseg_rq;
avail_dsds = 1;
for_each_sg(bsg_job->request_payload.sg_list, sg,
tot_dsds, index) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
@@ -3351,17 +3342,13 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
cont_pkt =
qlafx00_prep_cont_type1_iocb(
sp->vha->req, &lcont_pkt);
- cur_dsd = (__le32 *)
- lcont_pkt.dseg_0_address;
+ cur_dsd = lcont_pkt.dsd;
avail_dsds = 5;
cont = 1;
entry_cnt++;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
if (avail_dsds == 0 && cont == 1) {
@@ -3389,19 +3376,17 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
int avail_dsds, tot_dsds;
cont_a64_entry_t lcont_pkt;
cont_a64_entry_t *cont_pkt = NULL;
- __le32 *cur_dsd;
+ struct dsd64 *cur_dsd;
int index = 0, cont = 0;
fx_iocb.rsp_dsdcnt =
cpu_to_le16(bsg_job->reply_payload.sg_cnt);
tot_dsds = bsg_job->reply_payload.sg_cnt;
- cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
+ cur_dsd = &fx_iocb.dseg_rsp;
avail_dsds = 1;
for_each_sg(bsg_job->reply_payload.sg_list, sg,
tot_dsds, index) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
@@ -3413,17 +3398,13 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
cont_pkt =
qlafx00_prep_cont_type1_iocb(
sp->vha->req, &lcont_pkt);
- cur_dsd = (__le32 *)
- lcont_pkt.dseg_0_address;
+ cur_dsd = lcont_pkt.dsd;
avail_dsds = 5;
cont = 1;
entry_cnt++;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
if (avail_dsds == 0 && cont == 1) {
@@ -3454,10 +3435,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
}
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->vha, 0x3047,
- (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
+ sp->vha, 0x3047, &fx_iocb, sizeof(fx_iocb));
- memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
- sizeof(struct fxdisc_entry_fx00));
+ memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(fx_iocb));
wmb();
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index aeaa1b40b1fc..4567f0c42486 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -7,6 +7,8 @@
#ifndef __QLA_MR_H
#define __QLA_MR_H
+#include "qla_dsd.h"
+
/*
* The PCI VendorID and DeviceID for our board.
*/
@@ -46,8 +48,7 @@ struct cmd_type_7_fx00 {
uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
__le32 byte_count; /* Total byte count. */
- uint32_t dseg_0_address[2]; /* Data segment 0 address. */
- uint32_t dseg_0_len; /* Data segment 0 length. */
+ struct dsd64 dsd;
};
#define STATUS_TYPE_FX00 0x01 /* Status entry. */
@@ -176,10 +177,8 @@ struct fxdisc_entry_fx00 {
uint8_t flags;
uint8_t reserved_1;
- __le32 dseg_rq_address[2]; /* Data segment 0 address. */
- __le32 dseg_rq_len; /* Data segment 0 length. */
- __le32 dseg_rsp_address[2]; /* Data segment 1 address. */
- __le32 dseg_rsp_len; /* Data segment 1 length. */
+ struct dsd64 dseg_rq;
+ struct dsd64 dseg_rsp;
__le32 dataword;
__le32 adapid;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 41c85da3ab32..22e3fba28e51 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -131,14 +131,10 @@ static void qla_nvme_sp_ls_done(void *ptr, int res)
struct nvmefc_ls_req *fd;
struct nvme_private *priv;
- if (atomic_read(&sp->ref_count) == 0) {
- ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
- "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
+ if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
return;
- }
- if (!atomic_dec_and_test(&sp->ref_count))
- return;
+ atomic_dec(&sp->ref_count);
if (res)
res = -EINVAL;
@@ -161,15 +157,18 @@ static void qla_nvme_sp_done(void *ptr, int res)
nvme = &sp->u.iocb_cmd;
fd = nvme->u.nvme.desc;
- if (!atomic_dec_and_test(&sp->ref_count))
+ if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
return;
- if (res == QLA_SUCCESS)
- fd->status = 0;
- else
- fd->status = NVME_SC_INTERNAL;
+ atomic_dec(&sp->ref_count);
- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ if (res == QLA_SUCCESS) {
+ fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ } else {
+ fd->rcv_rsplen = 0;
+ fd->transferred_length = 0;
+ }
+ fd->status = 0;
fd->done(fd);
qla2xxx_rel_qpair_sp(sp->qpair, sp);
@@ -185,14 +184,24 @@ static void qla_nvme_abort_work(struct work_struct *work)
struct qla_hw_data *ha = fcport->vha->hw;
int rval;
- if (fcport)
- ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
- "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
- __func__, sp, sp->handle, fcport, fcport->deleted);
+ ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
+ "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
+ __func__, sp, sp->handle, fcport, fcport->deleted);
if (!ha->flags.fw_started && (fcport && fcport->deleted))
return;
+ if (ha->flags.host_shutting_down) {
+ ql_log(ql_log_info, sp->fcport->vha, 0xffff,
+ "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
+ __func__, sp, sp->type, atomic_read(&sp->ref_count));
+ sp->done(sp, 0);
+ return;
+ }
+
+ if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
+ return;
+
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
@@ -291,7 +300,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
uint16_t req_cnt;
uint16_t tot_dsds;
uint16_t avail_dsds;
- uint32_t *cur_dsd;
+ struct dsd64 *cur_dsd;
struct req_que *req = NULL;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
@@ -340,6 +349,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
if (unlikely(!fd->sqid)) {
struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
+
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
nvme->u.nvme.aen_op = 1;
atomic_inc(&ha->nvme_active_aen_cnt);
@@ -395,25 +405,22 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* NVME RSP IU */
cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
- cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
- cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
+ put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
/* NVME CNMD IU */
cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
- cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
- cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
+ cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
/* One DSD is available in the Command Type NVME IOCB */
avail_dsds = 1;
- cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
+ cur_dsd = &cmd_pkt->nvme_dsd;
sgl = fd->first_sgl;
/* Load data segments */
for_each_sg(sgl, sg, tot_dsds, i) {
- dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
@@ -432,17 +439,14 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->ring_ptr++;
}
cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
- *((uint32_t *)(&cont_pkt->entry_type)) =
- cpu_to_le32(CONTINUE_A64_TYPE);
+ put_unaligned_le32(CONTINUE_A64_TYPE,
+ &cont_pkt->entry_type);
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
- avail_dsds = 5;
+ cur_dsd = cont_pkt->dsd;
+ avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
@@ -573,7 +577,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
.max_hw_queues = 8,
- .max_sgl_segments = 128,
+ .max_sgl_segments = 1024,
.max_dif_sgl_segments = 64,
.dma_boundary = 0xFFFFFFFF,
.local_priv_sz = 8,
@@ -582,40 +586,11 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.fcprqst_priv_sz = sizeof(struct nvme_private),
};
-#define NVME_ABORT_POLLING_PERIOD 2
-static int qla_nvme_wait_on_command(srb_t *sp)
-{
- int ret = QLA_SUCCESS;
-
- wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
- NVME_ABORT_POLLING_PERIOD*HZ);
-
- if (atomic_read(&sp->ref_count) > 1)
- ret = QLA_FUNCTION_FAILED;
-
- return ret;
-}
-
-void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
-{
- int rval;
-
- if (ha->flags.fw_started) {
- rval = ha->isp_ops->abort_command(sp);
- if (!rval && !qla_nvme_wait_on_command(sp))
- ql_log(ql_log_warn, NULL, 0x2112,
- "timed out waiting on sp=%p\n", sp);
- } else {
- sp->done(sp, res);
- }
-}
-
static void qla_nvme_unregister_remote_port(struct work_struct *work)
{
struct fc_port *fcport = container_of(work, struct fc_port,
nvme_del_work);
struct qla_nvme_rport *qla_rport, *trport;
- scsi_qla_host_t *base_vha;
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
@@ -623,23 +598,19 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work)
ql_log(ql_log_warn, NULL, 0x2112,
"%s: unregister remoteport on %p\n",__func__, fcport);
- base_vha = pci_get_drvdata(fcport->vha->hw->pdev);
- if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2114,
- "%s: Notify FC-NVMe transport, set devloss=0\n",
- __func__);
-
- nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
- }
-
list_for_each_entry_safe(qla_rport, trport,
&fcport->vha->nvme_rport_list, list) {
if (qla_rport->fcport == fcport) {
ql_log(ql_log_info, fcport->vha, 0x2113,
"%s: fcport=%p\n", __func__, fcport);
+ nvme_fc_set_remoteport_devloss
+ (fcport->nvme_remote_port, 0);
init_completion(&fcport->nvme_del_done);
- nvme_fc_unregister_remoteport(
- fcport->nvme_remote_port);
+ if (nvme_fc_unregister_remoteport
+ (fcport->nvme_remote_port))
+ ql_log(ql_log_info, fcport->vha, 0x2114,
+ "%s: Failed to unregister nvme_remote_port\n",
+ __func__);
wait_for_completion(&fcport->nvme_del_done);
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index da8dad5ad693..d3b8a6440113 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -13,6 +13,7 @@
#include <linux/nvme-fc-driver.h>
#include "qla_def.h"
+#include "qla_dsd.h"
/* default dev loss time (seconds) before transport tears down ctrl */
#define NVME_FC_DEV_LOSS_TMO 30
@@ -64,16 +65,15 @@ struct cmd_nvme {
#define CF_WRITE_DATA BIT_0
uint16_t nvme_cmnd_dseg_len; /* Data segment length. */
- uint32_t nvme_cmnd_dseg_address[2]; /* Data segment address. */
- uint32_t nvme_rsp_dseg_address[2]; /* Data segment address. */
+ __le64 nvme_cmnd_dseg_address __packed;/* Data segment address. */
+ __le64 nvme_rsp_dseg_address __packed; /* Data segment address. */
uint32_t byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
- uint32_t nvme_data_dseg_address[2]; /* Data segment address. */
- uint32_t nvme_data_dseg_len; /* Data segment length. */
+ struct dsd64 nvme_dsd;
};
#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
@@ -101,10 +101,7 @@ struct pt_ls4_request {
uint32_t rsvd3;
uint32_t rx_byte_count;
uint32_t tx_byte_count;
- uint32_t dseg0_address[2];
- uint32_t dseg0_len;
- uint32_t dseg1_address[2];
- uint32_t dseg1_len;
+ struct dsd64 dsd[2];
};
#define PT_LS4_UNSOL 0x56 /* pass-up unsolicited rec FC-NVMe request */
@@ -145,7 +142,6 @@ struct pt_ls4_rx_unsol {
int qla_nvme_register_hba(struct scsi_qla_host *);
int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
void qla_nvme_delete(struct scsi_qla_host *);
-void qla_nvme_abort(struct qla_hw_data *, struct srb *sp, int res);
void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *,
struct req_que *);
void qla24xx_async_gffid_sp_done(void *, int);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index f2f54806f4da..c760ae354174 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -6,6 +6,7 @@
*/
#include "qla_def.h"
#include <linux/delay.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
@@ -608,6 +609,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
} else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
QLA82XX_ADDR_OCM0_MAX)) {
unsigned int temp1;
+
if ((addr & 0x00ff800) == 0xff800) {
ql_log(ql_log_warn, vha, 0xb004,
"%s: QM access not handled.\n", __func__);
@@ -990,6 +992,7 @@ static int
qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
qla82xx_wait_rom_busy(ha);
if (qla82xx_wait_rom_done(ha)) {
@@ -1030,6 +1033,7 @@ static int
qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
{
uint32_t val;
+
qla82xx_wait_rom_busy(ha);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
@@ -1047,6 +1051,7 @@ static int
qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
if (qla82xx_flash_set_write_enable(ha))
return -1;
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
@@ -1063,6 +1068,7 @@ static int
qla82xx_write_disable_flash(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
if (qla82xx_wait_rom_done(ha)) {
ql_log(ql_log_warn, vha, 0xb00f,
@@ -1435,6 +1441,7 @@ qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
long memaddr = BOOTLD_START;
u64 data;
u32 high, low;
+
size = (IMAGE_START - BOOTLD_START) / 8;
for (i = 0; i < size; i++) {
@@ -1757,11 +1764,14 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-void
+int
qla82xx_reset_chip(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+
ha->isp_ops->disable_intrs(ha);
+
+ return QLA_SUCCESS;
}
void qla82xx_config_rings(struct scsi_qla_host *vha)
@@ -1778,10 +1788,8 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_inpointer = cpu_to_le16(0);
icb->request_q_length = cpu_to_le16(req->length);
icb->response_q_length = cpu_to_le16(rsp->length);
- icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
- icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
- icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
- icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+ put_unaligned_le64(req->dma, &icb->request_q_address);
+ put_unaligned_le64(rsp->dma, &icb->response_q_address);
WRT_REG_DWORD(&reg->req_q_out[0], 0);
WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
@@ -1992,6 +2000,7 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
uint16_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+
wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
/* Load return mailbox registers. */
@@ -2028,7 +2037,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
unsigned long flags;
unsigned long iter;
uint32_t stat = 0;
- uint16_t mb[4];
+ uint16_t mb[8];
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2112,7 +2121,7 @@ qla82xx_msix_default(int irq, void *dev_id)
unsigned long flags;
uint32_t stat = 0;
uint32_t host_int = 0;
- uint16_t mb[4];
+ uint16_t mb[8];
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2208,7 +2217,7 @@ qla82xx_poll(int irq, void *dev_id)
int status = 0;
uint32_t stat;
uint32_t host_int = 0;
- uint16_t mb[4];
+ uint16_t mb[8];
unsigned long flags;
rsp = (struct rsp_que *) dev_id;
@@ -2262,6 +2271,7 @@ void
qla82xx_enable_intrs(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_mbx_intr_enable(vha);
spin_lock_irq(&ha->hardware_lock);
if (IS_QLA8044(ha))
@@ -2276,6 +2286,7 @@ void
qla82xx_disable_intrs(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_mbx_intr_disable(vha);
spin_lock_irq(&ha->hardware_lock);
if (IS_QLA8044(ha))
@@ -2658,8 +2669,8 @@ done:
/*
* Address and length are byte address
*/
-uint8_t *
-qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
scsi_block_requests(vha->host);
@@ -2767,15 +2778,14 @@ write_done:
}
int
-qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla82xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
int rval;
/* Suspend HBA. */
scsi_block_requests(vha->host);
- rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
- length >> 2);
+ rval = qla82xx_write_flash_data(vha, buf, offset, length >> 2);
scsi_unblock_requests(vha->host);
/* Convert return ISP82xx to generic */
@@ -4464,6 +4474,7 @@ qla82xx_beacon_on(struct scsi_qla_host *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
+
qla82xx_idc_lock(ha);
rval = qla82xx_mbx_beacon_ctl(vha, 1);
@@ -4484,6 +4495,7 @@ qla82xx_beacon_off(struct scsi_qla_host *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
+
qla82xx_idc_lock(ha);
rval = qla82xx_mbx_beacon_ctl(vha, 0);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 71a41093530e..3c7beef92c35 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -7,7 +7,7 @@
#ifndef __QLA_NX_H
#define __QLA_NX_H
-#include <linux/io-64-nonatomic-lo-hi.h>
+#include <scsi/scsi.h>
/*
* Following are the states of the Phantom. Phantom will set them and
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index fe856b602e03..369ac04d0454 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -559,12 +559,12 @@ exit_lock_error:
/*
* Address and length are byte address
*/
-uint8_t *
-qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla8044_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
scsi_block_requests(vha->host);
- if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
+ if (qla8044_read_flash_data(vha, buf, offset, length / 4)
!= QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0xb08d,
"%s: Failed to read from flash\n",
@@ -3007,10 +3007,9 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
uint16_t count;
uint32_t poll, mask, modify_mask;
uint32_t wait_count = 0;
-
uint32_t *data_ptr = *d_ptr;
-
struct qla8044_minidump_entry_rddfe *rddfe;
+
rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
addr1 = rddfe->addr_1;
@@ -3797,7 +3796,7 @@ qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
}
int
-qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla8044_write_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
@@ -3896,7 +3895,7 @@ qla8044_intr_handler(int irq, void *dev_id)
unsigned long flags;
unsigned long iter;
uint32_t stat;
- uint16_t mb[4];
+ uint16_t mb[8];
uint32_t leg_int_ptr = 0, pf_bit;
rsp = (struct rsp_que *) dev_id;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 91f576d743fe..e1c82a0a9745 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -42,7 +42,7 @@ static struct kmem_cache *ctx_cachep;
/*
* error level for logging
*/
-int ql_errlev = ql_log_all;
+uint ql_errlev = 0x8001;
static int ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
@@ -108,7 +108,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd,
"Set to control shifting of command type processing "
"based on total number of SG elements.");
-int ql2xfdmienable=1;
+int ql2xfdmienable = 1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfdmienable,
@@ -154,7 +154,7 @@ MODULE_PARM_DESC(ql2xenablehba_err_chk,
" 1 -- Error isolation enabled only for DIX Type 0\n"
" 2 -- Error isolation enabled for all Types\n");
-int ql2xiidmaenable=1;
+int ql2xiidmaenable = 1;
module_param(ql2xiidmaenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xiidmaenable,
"Enables iIDMA settings "
@@ -285,14 +285,14 @@ MODULE_PARM_DESC(qla2xuseresexchforels,
"Reserve 1/2 of emergency exchanges for ELS.\n"
" 0 (default): disabled");
-int ql2xprotmask;
+static int ql2xprotmask;
module_param(ql2xprotmask, int, 0644);
MODULE_PARM_DESC(ql2xprotmask,
"Override DIF/DIX protection capabilities mask\n"
"Default is 0 which sets protection mask based on "
"capabilities reported by HBA firmware.\n");
-int ql2xprotguard;
+static int ql2xprotguard;
module_param(ql2xprotguard, int, 0644);
MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
" 0 -- Let HBA firmware decide\n"
@@ -306,58 +306,12 @@ MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
"0 (Default). Based on check.\n"
"1 Force using internal buffers\n");
-/*
- * SCSI host template entry points
- */
-static int qla2xxx_slave_configure(struct scsi_device * device);
-static int qla2xxx_slave_alloc(struct scsi_device *);
-static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
-static void qla2xxx_scan_start(struct Scsi_Host *);
-static void qla2xxx_slave_destroy(struct scsi_device *);
-static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
-static int qla2xxx_eh_abort(struct scsi_cmnd *);
-static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
-static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
-static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
-static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
-
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
-struct scsi_host_template qla2xxx_driver_template = {
- .module = THIS_MODULE,
- .name = QLA2XXX_DRIVER_NAME,
- .queuecommand = qla2xxx_queuecommand,
-
- .eh_timed_out = fc_eh_timed_out,
- .eh_abort_handler = qla2xxx_eh_abort,
- .eh_device_reset_handler = qla2xxx_eh_device_reset,
- .eh_target_reset_handler = qla2xxx_eh_target_reset,
- .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
- .eh_host_reset_handler = qla2xxx_eh_host_reset,
-
- .slave_configure = qla2xxx_slave_configure,
-
- .slave_alloc = qla2xxx_slave_alloc,
- .slave_destroy = qla2xxx_slave_destroy,
- .scan_finished = qla2xxx_scan_finished,
- .scan_start = qla2xxx_scan_start,
- .change_queue_depth = scsi_change_queue_depth,
- .map_queues = qla2xxx_map_queues,
- .this_id = -1,
- .cmd_per_lun = 3,
- .sg_tablesize = SG_ALL,
-
- .max_sectors = 0xFFFF,
- .shost_attrs = qla2x00_host_attrs,
-
- .supported_mode = MODE_INITIATOR,
- .track_queue_depth = 1,
-};
-
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
@@ -411,6 +365,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
struct rsp_que *rsp)
{
struct qla_hw_data *ha = vha->hw;
+
rsp->qpair = ha->base_qpair;
rsp->req = req;
ha->base_qpair->hw = ha;
@@ -427,7 +382,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
qla_cpu_update(rsp->qpair, raw_smp_processor_id());
ha->base_qpair->pdev = ha->pdev;
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
}
@@ -435,6 +390,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
struct rsp_que *rsp)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
GFP_KERNEL);
if (!ha->req_q_map) {
@@ -726,7 +682,7 @@ qla2x00_sp_free_dma(void *ptr)
}
if (!ctx)
- goto end;
+ return;
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
@@ -751,12 +707,6 @@ qla2x00_sp_free_dma(void *ptr)
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
mempool_free(ctx1, ha->ctx_mempool);
}
-
-end:
- if (sp->type != SRB_NVME_CMD && sp->type != SRB_NVME_LS) {
- CMD_SP(cmd) = NULL;
- qla2x00_rel_sp(sp);
- }
}
void
@@ -764,22 +714,20 @@ qla2x00_sp_compl(void *ptr, int res)
{
srb_t *sp = ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct completion *comp = sp->comp;
- cmd->result = res;
-
- if (atomic_read(&sp->ref_count) == 0) {
- ql_dbg(ql_dbg_io, sp->vha, 0x3015,
- "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
- sp, GET_CMD_SP(sp));
- if (ql2xextended_error_logging & ql_dbg_io)
- WARN_ON(atomic_read(&sp->ref_count) == 0);
- return;
- }
- if (!atomic_dec_and_test(&sp->ref_count))
+ if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
return;
+ atomic_dec(&sp->ref_count);
+
sp->free(sp);
+ cmd->result = res;
+ CMD_SP(cmd) = NULL;
cmd->scsi_done(cmd);
+ if (comp)
+ complete(comp);
+ qla2x00_rel_sp(sp);
}
void
@@ -802,7 +750,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
}
if (!ctx)
- goto end;
+ return;
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
@@ -810,25 +758,8 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
- if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
- struct crc_context *ctx0 = ctx;
-
- dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
- sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
- }
-
- if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx1 = ctx;
- dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
- ctx1->fcp_cmnd_dma);
- list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
- mempool_free(ctx1, ha->ctx_mempool);
- sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
- }
if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
- struct crc_context *difctx = sp->u.scmd.ctx;
+ struct crc_context *difctx = ctx;
struct dsd_dma *dif_dsd, *nxt_dsd;
list_for_each_entry_safe(dif_dsd, nxt_dsd,
@@ -863,9 +794,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
}
-end:
- CMD_SP(cmd) = NULL;
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx1 = ctx;
+
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+ ctx1->fcp_cmnd_dma);
+ list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ mempool_free(ctx1, ha->ctx_mempool);
+ sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ struct crc_context *ctx0 = ctx;
+
+ dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
}
void
@@ -873,27 +819,22 @@ qla2xxx_qpair_sp_compl(void *ptr, int res)
{
srb_t *sp = ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct completion *comp = sp->comp;
- cmd->result = res;
-
- if (atomic_read(&sp->ref_count) == 0) {
- ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
- "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
- sp, GET_CMD_SP(sp));
- if (ql2xextended_error_logging & ql_dbg_io)
- WARN_ON(atomic_read(&sp->ref_count) == 0);
- return;
- }
- if (!atomic_dec_and_test(&sp->ref_count))
+ if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
return;
+ atomic_dec(&sp->ref_count);
+
sp->free(sp);
+ cmd->result = res;
+ CMD_SP(cmd) = NULL;
cmd->scsi_done(cmd);
+ if (comp)
+ complete(comp);
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
-/* If we are SP1 here, we need to still take and release the host_lock as SP1
- * does not have the changes necessary to avoid taking host->host_lock.
- */
static int
qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
@@ -908,7 +849,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
uint32_t tag;
uint16_t hwq;
- if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
+ if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
+ WARN_ON_ONCE(!rport)) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
@@ -1031,7 +973,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
srb_t *sp;
int rval;
- rval = fc_remote_port_chkready(rport);
+ rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE;
if (rval) {
cmd->result = rval;
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
@@ -1272,7 +1214,7 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
static int
sp_get(struct srb *sp)
{
- if (!refcount_inc_not_zero((refcount_t*)&sp->ref_count))
+ if (!refcount_inc_not_zero((refcount_t *)&sp->ref_count))
/* kref get fail */
return ENXIO;
else
@@ -1332,7 +1274,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned int id;
uint64_t lun;
unsigned long flags;
- int rval, wait = 0;
+ int rval;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair;
@@ -1345,7 +1287,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
ret = fc_block_scsi_eh(cmd);
if (ret != 0)
return ret;
- ret = SUCCESS;
sp = (srb_t *) CMD_SP(cmd);
if (!sp)
@@ -1356,7 +1297,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return SUCCESS;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- if (!CMD_SP(cmd)) {
+ if (sp->type != SRB_SCSI_CMD || GET_CMD_SP(sp) != cmd) {
/* there's a chance an interrupt could clear
the ptr as part of done & free */
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -1377,58 +1318,31 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
vha->host_no, id, lun, sp, cmd, sp->handle);
- /* Get a reference to the sp and drop the lock.*/
-
rval = ha->isp_ops->abort_command(sp);
- if (rval) {
- if (rval == QLA_FUNCTION_PARAMETER_ERROR)
- ret = SUCCESS;
- else
- ret = FAILED;
-
- ql_dbg(ql_dbg_taskm, vha, 0x8003,
- "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
- } else {
- ql_dbg(ql_dbg_taskm, vha, 0x8004,
- "Abort command mbx success cmd=%p.\n", cmd);
- wait = 1;
- }
-
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- /*
- * Clear the slot in the oustanding_cmds array if we can't find the
- * command to reclaim the resources.
- */
- if (rval == QLA_FUNCTION_PARAMETER_ERROR)
- vha->req->outstanding_cmds[sp->handle] = NULL;
+ ql_dbg(ql_dbg_taskm, vha, 0x8003,
+ "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
- /*
- * sp->done will do ref_count--
- * sp_get() took an extra count above
- */
- sp->done(sp, DID_RESET << 16);
-
- /* Did the command return during mailbox execution? */
- if (ret == FAILED && !CMD_SP(cmd))
+ switch (rval) {
+ case QLA_SUCCESS:
+ /*
+ * The command has been aborted. That means that the firmware
+ * won't report a completion.
+ */
+ sp->done(sp, DID_ABORT << 16);
ret = SUCCESS;
-
- if (!CMD_SP(cmd))
- wait = 0;
-
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
- /* Wait for the command to be returned. */
- if (wait) {
- if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x8006,
- "Abort handler timed out cmd=%p.\n", cmd);
- ret = FAILED;
- }
+ break;
+ default:
+ /*
+ * Either abort failed or abort and completion raced. Let
+ * the SCSI core retry the abort in the former case.
+ */
+ ret = FAILED;
+ break;
}
ql_log(ql_log_info, vha, 0x801c,
- "Abort command issued nexus=%ld:%d:%llu -- %d %x.\n",
- vha->host_no, id, lun, wait, ret);
+ "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
+ vha->host_no, id, lun, ret);
return ret;
}
@@ -1804,42 +1718,34 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
__releases(qp->qp_lock_ptr)
__acquires(qp->qp_lock_ptr)
{
+ DECLARE_COMPLETION_ONSTACK(comp);
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
+ int rval;
- if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS) {
- if (!sp_get(sp)) {
- /* got sp */
- spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
- qla_nvme_abort(ha, sp, res);
- spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- }
- } else if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
- !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
- !qla2x00_isp_reg_stat(ha) && sp->type == SRB_SCSI_CMD) {
- /*
- * Don't abort commands in adapter during EEH recovery as it's
- * not accessible/responding.
- *
- * Get a reference to the sp and drop the lock. The reference
- * ensures this sp->done() call and not the call in
- * qla2xxx_eh_abort() ends the SCSI cmd (with result 'res').
- */
- if (!sp_get(sp)) {
- int status;
+ if (sp_get(sp))
+ return;
- spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
- status = qla2xxx_eh_abort(GET_CMD_SP(sp));
- spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- /*
- * Get rid of extra reference caused
- * by early exit from qla2xxx_eh_abort
- */
- if (status == FAST_IO_FAIL)
- atomic_dec(&sp->ref_count);
+ if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
+ (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+ !qla2x00_isp_reg_stat(ha))) {
+ sp->comp = &comp;
+ rval = ha->isp_ops->abort_command(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
+
+ switch (rval) {
+ case QLA_SUCCESS:
+ sp->done(sp, res);
+ break;
+ case QLA_FUNCTION_PARAMETER_ERROR:
+ wait_for_completion(&comp);
+ break;
}
+
+ spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+ sp->comp = NULL;
}
- sp->done(sp, res);
}
static void
@@ -1875,15 +1781,10 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
continue;
}
cmd = (struct qla_tgt_cmd *)sp;
- qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ cmd->aborted = 1;
break;
case TYPE_TGT_TMCMD:
- /*
- * Currently, only ABTS response gets on the
- * outstanding_cmds[]
- */
- ha->tgt.tgt_ops->free_mcmd(
- (struct qla_tgt_mgmt_cmd *)sp);
+ /* Skip task management functions. */
break;
default:
break;
@@ -2753,6 +2654,24 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2081:
+ case PCI_DEVICE_ID_QLOGIC_ISP2089:
+ ha->isp_type |= DT_ISP2081;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2281:
+ case PCI_DEVICE_ID_QLOGIC_ISP2289:
+ ha->isp_type |= DT_ISP2281;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
}
if (IS_QLA82XX(ha))
@@ -2760,7 +2679,8 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
else {
/* Get adapter physical port no from interrupt pin register. */
pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
- if (IS_QLA27XX(ha))
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->port_no--;
else
ha->port_no = !(ha->port_no & 1);
@@ -2857,7 +2777,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2877,6 +2801,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* This may fail but that's ok */
pci_enable_pcie_error_reporting(pdev);
+ /* Turn off T10-DIF when FC-NVMe is enabled */
+ if (ql2xnvmeenable)
+ ql2xenabledif = 0;
+
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009,
@@ -2906,7 +2834,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set EEH reset type to fundamental if required by hba */
if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
- IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
pdev->needs_freset = 1;
ha->prev_topology = 0;
@@ -3085,6 +3013,23 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
+ } else if (IS_QLA28XX(ha)) {
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_28XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla27xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -3250,7 +3195,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
req->req_q_out = &ha->iobase->isp24.req_q_out;
rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) {
req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -3395,6 +3341,7 @@ skip_dpc:
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
int prot = 0, guard;
+
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
@@ -3576,7 +3523,8 @@ qla2x00_shutdown(struct pci_dev *pdev)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) {
if (ha->flags.fw_started)
qla2x00_abort_isp_cleanup(vha);
} else {
@@ -3681,7 +3629,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
if (ha->mqiobase)
iounmap(ha->mqiobase);
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
+ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ ha->msixbase)
iounmap(ha->msixbase);
}
}
@@ -3732,7 +3681,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);
- if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) {
if (ha->flags.fw_started)
qla2x00_abort_isp_cleanup(base_vha);
} else if (!IS_QLAFX00(ha)) {
@@ -3770,8 +3720,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_delete_all_vps(ha, base_vha);
- qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
-
qla2x00_dfs_remove(base_vha);
qla84xx_put_chip(base_vha);
@@ -3860,11 +3808,8 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
{
fc_port_t *fcport, *tfcport;
- list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
- list_del(&fcport->list);
- qla2x00_clear_loop_id(fcport);
- kfree(fcport);
- }
+ list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
+ qla2x00_free_fcport(fcport);
}
static inline void
@@ -3889,6 +3834,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
qla2xxx_wake_dpc(base_vha);
} else {
int now;
+
if (rport) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
"%s %8phN. rport %p roles %x\n",
@@ -3980,6 +3926,19 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
}
}
+static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
+{
+ int i;
+
+ if (IS_FWI2_CAPABLE(ha))
+ return;
+
+ for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
+ set_bit(i, ha->loop_id_map);
+ set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
+ set_bit(BROADCAST, ha->loop_id_map);
+}
+
/*
* qla2x00_mem_alloc
* Allocates adapter memory.
@@ -4222,7 +4181,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
@@ -4265,8 +4225,20 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
goto fail_sfp_data;
}
+ ha->flt = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
+ GFP_KERNEL);
+ if (!ha->flt) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+ "Unable to allocate memory for FLT.\n");
+ goto fail_flt_buffer;
+ }
+
return 0;
+fail_flt_buffer:
+ dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ ha->sfp_data, ha->sfp_data_dma);
fail_sfp_data:
kfree(ha->loop_id_map);
fail_loop_id_map:
@@ -4602,6 +4574,9 @@ qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
static void
qla2x00_free_fw_dump(struct qla_hw_data *ha)
{
+ struct fwdt *fwdt = ha->fwdt;
+ uint j;
+
if (ha->fce)
dma_free_coherent(&ha->pdev->dev,
FCE_SIZE, ha->fce, ha->fce_dma);
@@ -4612,8 +4587,6 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
if (ha->fw_dump)
vfree(ha->fw_dump);
- if (ha->fw_dump_template)
- vfree(ha->fw_dump_template);
ha->fce = NULL;
ha->fce_dma = 0;
@@ -4624,8 +4597,13 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
ha->fw_dump_reading = 0;
ha->fw_dump = NULL;
ha->fw_dump_len = 0;
- ha->fw_dump_template = NULL;
- ha->fw_dump_template_len = 0;
+
+ for (j = 0; j < 2; j++, fwdt++) {
+ if (fwdt->template)
+ vfree(fwdt->template);
+ fwdt->template = NULL;
+ fwdt->length = 0;
+ }
}
/*
@@ -4643,44 +4621,68 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->mctp_dump)
dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
ha->mctp_dump_dma);
+ ha->mctp_dump = NULL;
mempool_destroy(ha->srb_mempool);
+ ha->srb_mempool = NULL;
if (ha->dcbx_tlv)
dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
ha->dcbx_tlv, ha->dcbx_tlv_dma);
+ ha->dcbx_tlv = NULL;
if (ha->xgmac_data)
dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
ha->xgmac_data, ha->xgmac_data_dma);
+ ha->xgmac_data = NULL;
if (ha->sns_cmd)
dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
ha->sns_cmd, ha->sns_cmd_dma);
+ ha->sns_cmd = NULL;
+ ha->sns_cmd_dma = 0;
if (ha->ct_sns)
dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
ha->ct_sns, ha->ct_sns_dma);
+ ha->ct_sns = NULL;
+ ha->ct_sns_dma = 0;
if (ha->sfp_data)
dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
ha->sfp_data_dma);
+ ha->sfp_data = NULL;
+
+ if (ha->flt)
+ dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ ha->flt, ha->flt_dma);
+ ha->flt = NULL;
+ ha->flt_dma = 0;
if (ha->ms_iocb)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+ ha->ms_iocb = NULL;
+ ha->ms_iocb_dma = 0;
if (ha->ex_init_cb)
dma_pool_free(ha->s_dma_pool,
ha->ex_init_cb, ha->ex_init_cb_dma);
+ ha->ex_init_cb = NULL;
+ ha->ex_init_cb_dma = 0;
if (ha->async_pd)
dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
+ ha->async_pd = NULL;
+ ha->async_pd_dma = 0;
dma_pool_destroy(ha->s_dma_pool);
+ ha->s_dma_pool = NULL;
if (ha->gid_list)
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
ha->gid_list, ha->gid_list_dma);
+ ha->gid_list = NULL;
+ ha->gid_list_dma = 0;
if (IS_QLA82XX(ha)) {
if (!list_empty(&ha->gbl_dsd_list)) {
@@ -4698,10 +4700,13 @@ qla2x00_mem_free(struct qla_hw_data *ha)
}
dma_pool_destroy(ha->dl_dma_pool);
+ ha->dl_dma_pool = NULL;
dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+ ha->fcp_cmnd_dma_pool = NULL;
mempool_destroy(ha->ctx_mempool);
+ ha->ctx_mempool = NULL;
if (ql2xenabledif) {
struct dsd_dma *dsd, *nxt;
@@ -4728,53 +4733,26 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->dif_bundl_pool)
dma_pool_destroy(ha->dif_bundl_pool);
+ ha->dif_bundl_pool = NULL;
qlt_mem_free(ha);
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
+ ha->init_cb = NULL;
+ ha->init_cb_dma = 0;
vfree(ha->optrom_buffer);
+ ha->optrom_buffer = NULL;
kfree(ha->nvram);
+ ha->nvram = NULL;
kfree(ha->npiv_info);
+ ha->npiv_info = NULL;
kfree(ha->swl);
+ ha->swl = NULL;
kfree(ha->loop_id_map);
-
- ha->srb_mempool = NULL;
- ha->ctx_mempool = NULL;
- ha->sns_cmd = NULL;
- ha->sns_cmd_dma = 0;
- ha->ct_sns = NULL;
- ha->ct_sns_dma = 0;
- ha->ms_iocb = NULL;
- ha->ms_iocb_dma = 0;
- ha->init_cb = NULL;
- ha->init_cb_dma = 0;
- ha->ex_init_cb = NULL;
- ha->ex_init_cb_dma = 0;
- ha->async_pd = NULL;
- ha->async_pd_dma = 0;
ha->loop_id_map = NULL;
- ha->npiv_info = NULL;
- ha->optrom_buffer = NULL;
- ha->swl = NULL;
- ha->nvram = NULL;
- ha->mctp_dump = NULL;
- ha->dcbx_tlv = NULL;
- ha->xgmac_data = NULL;
- ha->sfp_data = NULL;
-
- ha->s_dma_pool = NULL;
- ha->dl_dma_pool = NULL;
- ha->fcp_cmnd_dma_pool = NULL;
-
- ha->gid_list = NULL;
- ha->gid_list_dma = 0;
-
- ha->tgt.atio_ring = NULL;
- ha->tgt.atio_dma = 0;
- ha->tgt.tgt_vp_map = NULL;
}
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -5608,6 +5586,7 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
uint32_t idc_lck_rcvry_stage_mask = 0x3;
uint32_t idc_lck_rcvry_owner_mask = 0x3c;
struct qla_hw_data *ha = base_vha->hw;
+
ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
"Trying force recovery of the IDC lock.\n");
@@ -6677,8 +6656,10 @@ qla2x00_timer(struct timer_list *t)
* FC-NVME
* see if the active AEN count has changed from what was last reported.
*/
- if (!vha->vp_idx && (atomic_read(&ha->nvme_active_aen_cnt) !=
- ha->nvme_last_rptd_aen) && ha->zio_mode == QLA_ZIO_MODE_6) {
+ if (!vha->vp_idx &&
+ (atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen) &&
+ ha->zio_mode == QLA_ZIO_MODE_6 &&
+ !ha->flags.host_shutting_down) {
ql_log(ql_log_info, vha, 0x3002,
"nvme: Sched: Set ZIO exchange threshold to %d.\n",
ha->nvme_last_rptd_aen);
@@ -6690,7 +6671,7 @@ qla2x00_timer(struct timer_list *t)
if (!vha->vp_idx &&
(atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
(ha->zio_mode == QLA_ZIO_MODE_6) &&
- (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
ql_log(ql_log_info, vha, 0x3002,
"Sched: Set ZIO exchange threshold to %d.\n",
ha->last_zio_threshold);
@@ -6736,7 +6717,6 @@ qla2x00_timer(struct timer_list *t)
/* Firmware interface routines. */
-#define FW_BLOBS 11
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -6748,6 +6728,7 @@ qla2x00_timer(struct timer_list *t)
#define FW_ISP2031 8
#define FW_ISP8031 9
#define FW_ISP27XX 10
+#define FW_ISP28XX 11
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -6760,11 +6741,12 @@ qla2x00_timer(struct timer_list *t)
#define FW_FILE_ISP2031 "ql2600_fw.bin"
#define FW_FILE_ISP8031 "ql8300_fw.bin"
#define FW_FILE_ISP27XX "ql2700_fw.bin"
+#define FW_FILE_ISP28XX "ql2800_fw.bin"
static DEFINE_MUTEX(qla_fw_lock);
-static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
+static struct fw_blob qla_fw_blobs[] = {
{ .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
{ .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
{ .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
@@ -6776,6 +6758,8 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP2031, },
{ .name = FW_FILE_ISP8031, },
{ .name = FW_FILE_ISP27XX, },
+ { .name = FW_FILE_ISP28XX, },
+ { .name = NULL, },
};
struct fw_blob *
@@ -6806,10 +6790,15 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP8031];
} else if (IS_QLA27XX(ha)) {
blob = &qla_fw_blobs[FW_ISP27XX];
+ } else if (IS_QLA28XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP28XX];
} else {
return NULL;
}
+ if (!blob->name)
+ return NULL;
+
mutex_lock(&qla_fw_lock);
if (blob->fw)
goto out;
@@ -6819,7 +6808,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
"Failed to load firmware image (%s).\n", blob->name);
blob->fw = NULL;
blob = NULL;
- goto out;
}
out:
@@ -6830,11 +6818,11 @@ out:
static void
qla2x00_release_firmware(void)
{
- int idx;
+ struct fw_blob *blob;
mutex_lock(&qla_fw_lock);
- for (idx = 0; idx < FW_BLOBS; idx++)
- release_firmware(qla_fw_blobs[idx].fw);
+ for (blob = qla_fw_blobs; blob->name; blob++)
+ release_firmware(blob->fw);
mutex_unlock(&qla_fw_lock);
}
@@ -7179,7 +7167,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
{
int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
- struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
rc = blk_mq_map_queues(qmap);
@@ -7188,6 +7176,37 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
return rc;
}
+struct scsi_host_template qla2xxx_driver_template = {
+ .module = THIS_MODULE,
+ .name = QLA2XXX_DRIVER_NAME,
+ .queuecommand = qla2xxx_queuecommand,
+
+ .eh_timed_out = fc_eh_timed_out,
+ .eh_abort_handler = qla2xxx_eh_abort,
+ .eh_device_reset_handler = qla2xxx_eh_device_reset,
+ .eh_target_reset_handler = qla2xxx_eh_target_reset,
+ .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
+ .eh_host_reset_handler = qla2xxx_eh_host_reset,
+
+ .slave_configure = qla2xxx_slave_configure,
+
+ .slave_alloc = qla2xxx_slave_alloc,
+ .slave_destroy = qla2xxx_slave_destroy,
+ .scan_finished = qla2xxx_scan_finished,
+ .scan_start = qla2xxx_scan_start,
+ .change_queue_depth = scsi_change_queue_depth,
+ .map_queues = qla2xxx_map_queues,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .sg_tablesize = SG_ALL,
+
+ .max_sectors = 0xFFFF,
+ .shost_attrs = qla2x00_host_attrs,
+
+ .supported_mode = MODE_INITIATOR,
+ .track_queue_depth = 1,
+};
+
static const struct pci_error_handlers qla2xxx_err_handler = {
.error_detected = qla2xxx_pci_error_detected,
.mmio_enabled = qla2xxx_pci_mmio_enabled,
@@ -7220,6 +7239,11 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -7249,6 +7273,30 @@ qla2x00_module_init(void)
{
int ret = 0;
+ BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(init_cb_t) != 96);
+ BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(request_t) != 64);
+ BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
+ BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
+ BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
+ BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
+ BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
+ BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
+ BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
+ BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
+ BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
+ BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
+
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
SLAB_HWCACHE_ALIGN, NULL);
@@ -7261,8 +7309,7 @@ qla2x00_module_init(void)
/* Initialize target kmem_cache and mem_pools */
ret = qlt_init();
if (ret < 0) {
- kmem_cache_destroy(srb_cachep);
- return ret;
+ goto destroy_cache;
} else if (ret > 0) {
/*
* If initiator mode is explictly disabled by qlt_init(),
@@ -7286,11 +7333,10 @@ qla2x00_module_init(void)
qla2xxx_transport_template =
fc_attach_transport(&qla2xxx_transport_functions);
if (!qla2xxx_transport_template) {
- kmem_cache_destroy(srb_cachep);
ql_log(ql_log_fatal, NULL, 0x0002,
"fc_attach_transport failed...Failing load!.\n");
- qlt_exit();
- return -ENODEV;
+ ret = -ENODEV;
+ goto qlt_exit;
}
apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
@@ -7302,27 +7348,37 @@ qla2x00_module_init(void)
qla2xxx_transport_vport_template =
fc_attach_transport(&qla2xxx_transport_vport_functions);
if (!qla2xxx_transport_vport_template) {
- kmem_cache_destroy(srb_cachep);
- qlt_exit();
- fc_release_transport(qla2xxx_transport_template);
ql_log(ql_log_fatal, NULL, 0x0004,
"fc_attach_transport vport failed...Failing load!.\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto unreg_chrdev;
}
ql_log(ql_log_info, NULL, 0x0005,
"QLogic Fibre Channel HBA Driver: %s.\n",
qla2x00_version_str);
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
- kmem_cache_destroy(srb_cachep);
- qlt_exit();
- fc_release_transport(qla2xxx_transport_template);
- fc_release_transport(qla2xxx_transport_vport_template);
ql_log(ql_log_fatal, NULL, 0x0006,
"pci_register_driver failed...ret=%d Failing load!.\n",
ret);
+ goto release_vport_transport;
}
return ret;
+
+release_vport_transport:
+ fc_release_transport(qla2xxx_transport_vport_template);
+
+unreg_chrdev:
+ if (apidev_major >= 0)
+ unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+ fc_release_transport(qla2xxx_transport_template);
+
+qlt_exit:
+ qlt_exit();
+
+destroy_cache:
+ kmem_cache_destroy(srb_cachep);
+ return ret;
}
/**
@@ -7331,14 +7387,15 @@ qla2x00_module_init(void)
static void __exit
qla2x00_module_exit(void)
{
- unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_release_firmware();
- kmem_cache_destroy(srb_cachep);
- qlt_exit();
kmem_cache_destroy(ctx_cachep);
- fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
+ if (apidev_major >= 0)
+ unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+ fc_release_transport(qla2xxx_transport_template);
+ qlt_exit();
+ kmem_cache_destroy(srb_cachep);
}
module_init(qla2x00_module_init);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 2a3055c799fb..1eb82384d933 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -429,66 +429,64 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
static inline uint32_t
flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
{
- return ha->flash_conf_off | faddr;
+ return ha->flash_conf_off + faddr;
}
static inline uint32_t
flash_data_addr(struct qla_hw_data *ha, uint32_t faddr)
{
- return ha->flash_data_off | faddr;
+ return ha->flash_data_off + faddr;
}
static inline uint32_t
nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr)
{
- return ha->nvram_conf_off | naddr;
+ return ha->nvram_conf_off + naddr;
}
static inline uint32_t
nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr)
{
- return ha->nvram_data_off | naddr;
+ return ha->nvram_data_off + naddr;
}
-static uint32_t
-qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
+static int
+qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
{
- int rval;
- uint32_t cnt, data;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ ulong cnt = 30000;
WRT_REG_DWORD(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
- /* Wait for READ cycle to complete. */
- rval = QLA_SUCCESS;
- for (cnt = 3000;
- (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) == 0 &&
- rval == QLA_SUCCESS; cnt--) {
- if (cnt)
- udelay(10);
- else
- rval = QLA_FUNCTION_TIMEOUT;
+
+ while (cnt--) {
+ if (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) {
+ *data = RD_REG_DWORD(&reg->flash_data);
+ return QLA_SUCCESS;
+ }
+ udelay(10);
cond_resched();
}
- /* TODO: What happens if we time out? */
- data = 0xDEADDEAD;
- if (rval == QLA_SUCCESS)
- data = RD_REG_DWORD(&reg->flash_data);
-
- return data;
+ ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090,
+ "Flash read dword at %x timeout.\n", addr);
+ *data = 0xDEADDEAD;
+ return QLA_FUNCTION_TIMEOUT;
}
uint32_t *
qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t dwords)
{
- uint32_t i;
+ ulong i;
struct qla_hw_data *ha = vha->hw;
/* Dword reads to flash. */
- for (i = 0; i < dwords; i++, faddr++)
- dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
- flash_data_addr(ha, faddr)));
+ faddr = flash_data_addr(ha, faddr);
+ for (i = 0; i < dwords; i++, faddr++, dwptr++) {
+ if (qla24xx_read_flash_dword(ha, faddr, dwptr))
+ break;
+ cpu_to_le32s(dwptr);
+ }
return dwptr;
}
@@ -496,35 +494,37 @@ qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
static int
qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
{
- int rval;
- uint32_t cnt;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ ulong cnt = 500000;
WRT_REG_DWORD(&reg->flash_data, data);
- RD_REG_DWORD(&reg->flash_data); /* PCI Posting. */
WRT_REG_DWORD(&reg->flash_addr, addr | FARX_DATA_FLAG);
- /* Wait for Write cycle to complete. */
- rval = QLA_SUCCESS;
- for (cnt = 500000; (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) &&
- rval == QLA_SUCCESS; cnt--) {
- if (cnt)
- udelay(10);
- else
- rval = QLA_FUNCTION_TIMEOUT;
+
+ while (cnt--) {
+ if (!(RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG))
+ return QLA_SUCCESS;
+ udelay(10);
cond_resched();
}
- return rval;
+
+ ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090,
+ "Flash write dword at %x timeout.\n", addr);
+ return QLA_FUNCTION_TIMEOUT;
}
static void
qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
uint8_t *flash_id)
{
- uint32_t ids;
+ uint32_t faddr, ids = 0;
- ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab));
- *man_id = LSB(ids);
- *flash_id = MSB(ids);
+ *man_id = *flash_id = 0;
+
+ faddr = flash_conf_addr(ha, 0x03ab);
+ if (!qla24xx_read_flash_dword(ha, faddr, &ids)) {
+ *man_id = LSB(ids);
+ *flash_id = MSB(ids);
+ }
/* Check if man_id and flash_id are valid. */
if (ids != 0xDEADDEAD && (*man_id == 0 || *flash_id == 0)) {
@@ -534,9 +534,11 @@ qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
* Example: ATMEL 0x00 01 45 1F
* Extract MFG and Dev ID from last two bytes.
*/
- ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f));
- *man_id = LSB(ids);
- *flash_id = MSB(ids);
+ faddr = flash_conf_addr(ha, 0x009f);
+ if (!qla24xx_read_flash_dword(ha, faddr, &ids)) {
+ *man_id = LSB(ids);
+ *flash_id = MSB(ids);
+ }
}
}
@@ -545,12 +547,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
{
const char *loc, *locations[] = { "DEF", "PCI" };
uint32_t pcihdr, pcids;
- uint32_t *dcode;
- uint8_t *buf, *bcode, last_image;
uint16_t cnt, chksum, *wptr;
- struct qla_flt_location *fltl;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
+ struct qla_flt_location *fltl = (void *)req->ring;
+ uint32_t *dcode = (void *)req->ring;
+ uint8_t *buf = (void *)req->ring, *bcode, last_image;
/*
* FLT-location structure resides after the last PCI region.
@@ -571,12 +573,13 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
*start = FA_FLASH_LAYOUT_ADDR_83;
goto end;
+ } else if (IS_QLA28XX(ha)) {
+ *start = FA_FLASH_LAYOUT_ADDR_28;
+ goto end;
}
+
/* Begin with first PCI expansion ROM header. */
- buf = (uint8_t *)req->ring;
- dcode = (uint32_t *)req->ring;
pcihdr = 0;
- last_image = 1;
do {
/* Verify PCI expansion ROM header. */
qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
@@ -601,22 +604,19 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
} while (!last_image);
/* Now verify FLT-location structure. */
- fltl = (struct qla_flt_location *)req->ring;
- qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
- sizeof(struct qla_flt_location) >> 2);
- if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
- fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
+ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2);
+ if (memcmp(fltl->sig, "QFLT", 4))
goto end;
- wptr = (uint16_t *)req->ring;
- cnt = sizeof(struct qla_flt_location) >> 1;
+ wptr = (void *)req->ring;
+ cnt = sizeof(*fltl) / sizeof(*wptr);
for (chksum = 0; cnt--; wptr++)
chksum += le16_to_cpu(*wptr);
if (chksum) {
ql_log(ql_log_fatal, vha, 0x0045,
"Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
- buf, sizeof(struct qla_flt_location));
+ fltl, sizeof(*fltl));
return QLA_FUNCTION_FAILED;
}
@@ -634,7 +634,7 @@ end:
static void
qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
{
- const char *loc, *locations[] = { "DEF", "FLT" };
+ const char *locations[] = { "DEF", "FLT" }, *loc = locations[1];
const uint32_t def_fw[] =
{ FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 };
const uint32_t def_boot[] =
@@ -664,20 +664,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
const uint32_t fcp_prio_cfg1[] =
{ FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25,
0 };
- uint32_t def;
- uint16_t *wptr;
- uint16_t cnt, chksum;
- uint32_t start;
- struct qla_flt_header *flt;
- struct qla_flt_region *region;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
- def = 0;
- if (IS_QLA25XX(ha))
- def = 1;
- else if (IS_QLA81XX(ha))
- def = 2;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
+ struct qla_flt_header *flt = (void *)ha->flt;
+ struct qla_flt_region *region = (void *)&flt[1];
+ uint16_t *wptr, cnt, chksum;
+ uint32_t start;
/* Assign FCP prio region since older adapters may not have FLT, or
FCP prio region in it's FLT.
@@ -686,12 +679,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
ha->flt_region_flt = flt_addr;
- wptr = (uint16_t *)req->ring;
- flt = (struct qla_flt_header *)req->ring;
- region = (struct qla_flt_region *)&flt[1];
- ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
- flt_addr << 2, OPTROM_BURST_SIZE);
- if (*wptr == cpu_to_le16(0xffff))
+ wptr = (uint16_t *)ha->flt;
+ qla24xx_read_flash_data(vha, (void *)flt, flt_addr,
+ (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE) >> 2);
+
+ if (le16_to_cpu(*wptr) == 0xffff)
goto no_flash_data;
if (flt->version != cpu_to_le16(1)) {
ql_log(ql_log_warn, vha, 0x0047,
@@ -701,7 +693,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
goto no_flash_data;
}
- cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
+ cnt = (sizeof(*flt) + le16_to_cpu(flt->length)) / sizeof(*wptr);
for (chksum = 0; cnt--; wptr++)
chksum += le16_to_cpu(*wptr);
if (chksum) {
@@ -712,18 +704,20 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
goto no_flash_data;
}
- loc = locations[1];
- cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+ cnt = le16_to_cpu(flt->length) / sizeof(*region);
for ( ; cnt; cnt--, region++) {
/* Store addresses as DWORD offsets. */
start = le32_to_cpu(region->start) >> 2;
ql_dbg(ql_dbg_init, vha, 0x0049,
- "FLT[%02x]: start=0x%x "
- "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff,
- start, le32_to_cpu(region->end) >> 2,
- le32_to_cpu(region->size));
-
- switch (le32_to_cpu(region->code) & 0xff) {
+ "FLT[%#x]: start=%#x end=%#x size=%#x.\n",
+ le16_to_cpu(region->code), start,
+ le32_to_cpu(region->end) >> 2,
+ le32_to_cpu(region->size) >> 2);
+ if (region->attribute)
+ ql_log(ql_dbg_init, vha, 0xffff,
+ "Region %x is secure\n", region->code);
+
+ switch (le16_to_cpu(region->code)) {
case FLT_REG_FCOE_FW:
if (!IS_QLA8031(ha))
break;
@@ -753,13 +747,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_2:
- if (!IS_QLA27XX(ha))
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
break;
if (ha->port_no == 2)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_3:
- if (!IS_QLA27XX(ha))
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
break;
if (ha->port_no == 3)
ha->flt_region_vpd = start;
@@ -777,13 +771,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_nvram = start;
break;
case FLT_REG_NVRAM_2:
- if (!IS_QLA27XX(ha))
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
break;
if (ha->port_no == 2)
ha->flt_region_nvram = start;
break;
case FLT_REG_NVRAM_3:
- if (!IS_QLA27XX(ha))
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
break;
if (ha->port_no == 3)
ha->flt_region_nvram = start;
@@ -847,36 +841,74 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_nvram = start;
break;
case FLT_REG_IMG_PRI_27XX:
- if (IS_QLA27XX(ha))
+ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
ha->flt_region_img_status_pri = start;
break;
case FLT_REG_IMG_SEC_27XX:
- if (IS_QLA27XX(ha))
+ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
ha->flt_region_img_status_sec = start;
break;
case FLT_REG_FW_SEC_27XX:
- if (IS_QLA27XX(ha))
+ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
ha->flt_region_fw_sec = start;
break;
case FLT_REG_BOOTLOAD_SEC_27XX:
- if (IS_QLA27XX(ha))
+ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
ha->flt_region_boot_sec = start;
break;
+ case FLT_REG_AUX_IMG_PRI_28XX:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ ha->flt_region_aux_img_status_pri = start;
+ break;
+ case FLT_REG_AUX_IMG_SEC_28XX:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ ha->flt_region_aux_img_status_sec = start;
+ break;
+ case FLT_REG_NVRAM_SEC_28XX_0:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (ha->port_no == 0)
+ ha->flt_region_nvram_sec = start;
+ break;
+ case FLT_REG_NVRAM_SEC_28XX_1:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (ha->port_no == 1)
+ ha->flt_region_nvram_sec = start;
+ break;
+ case FLT_REG_NVRAM_SEC_28XX_2:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (ha->port_no == 2)
+ ha->flt_region_nvram_sec = start;
+ break;
+ case FLT_REG_NVRAM_SEC_28XX_3:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (ha->port_no == 3)
+ ha->flt_region_nvram_sec = start;
+ break;
case FLT_REG_VPD_SEC_27XX_0:
- if (IS_QLA27XX(ha))
- ha->flt_region_vpd_sec = start;
+ case FLT_REG_VPD_SEC_28XX_0:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->flt_region_vpd_nvram_sec = start;
+ if (ha->port_no == 0)
+ ha->flt_region_vpd_sec = start;
+ }
break;
case FLT_REG_VPD_SEC_27XX_1:
- if (IS_QLA27XX(ha))
- ha->flt_region_vpd_sec = start;
+ case FLT_REG_VPD_SEC_28XX_1:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (ha->port_no == 1)
+ ha->flt_region_vpd_sec = start;
break;
case FLT_REG_VPD_SEC_27XX_2:
- if (IS_QLA27XX(ha))
- ha->flt_region_vpd_sec = start;
+ case FLT_REG_VPD_SEC_28XX_2:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (ha->port_no == 2)
+ ha->flt_region_vpd_sec = start;
break;
case FLT_REG_VPD_SEC_27XX_3:
- if (IS_QLA27XX(ha))
- ha->flt_region_vpd_sec = start;
+ case FLT_REG_VPD_SEC_28XX_3:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (ha->port_no == 3)
+ ha->flt_region_vpd_sec = start;
break;
}
}
@@ -912,22 +944,19 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
#define FLASH_BLK_SIZE_32K 0x8000
#define FLASH_BLK_SIZE_64K 0x10000
const char *loc, *locations[] = { "MID", "FDT" };
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
uint16_t cnt, chksum;
- uint16_t *wptr;
- struct qla_fdt_layout *fdt;
+ uint16_t *wptr = (void *)req->ring;
+ struct qla_fdt_layout *fdt = (void *)req->ring;
uint8_t man_id, flash_id;
uint16_t mid = 0, fid = 0;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
- wptr = (uint16_t *)req->ring;
- fdt = (struct qla_fdt_layout *)req->ring;
- ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
- ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
- if (*wptr == cpu_to_le16(0xffff))
+ qla24xx_read_flash_data(vha, (void *)fdt, ha->flt_region_fdt,
+ OPTROM_BURST_DWORDS);
+ if (le16_to_cpu(*wptr) == 0xffff)
goto no_flash_data;
- if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
- fdt->sig[3] != 'D')
+ if (memcmp(fdt->sig, "QLID", 4))
goto no_flash_data;
for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++)
@@ -938,7 +967,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
" checksum=0x%x id=%c version0x%x.\n", chksum,
fdt->sig[0], le16_to_cpu(fdt->version));
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
- (uint8_t *)fdt, sizeof(*fdt));
+ fdt, sizeof(*fdt));
goto no_flash_data;
}
@@ -958,7 +987,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
fdt->unprotect_sec_cmd);
ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
- flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd):
+ flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd) :
flash_conf_addr(ha, 0x0336);
}
goto done;
@@ -1019,8 +1048,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
return;
wptr = (uint32_t *)req->ring;
- ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
- QLA82XX_IDC_PARAM_ADDR , 8);
+ ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8);
if (*wptr == cpu_to_le32(0xffffffff)) {
ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
@@ -1045,7 +1073,8 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
- !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return QLA_SUCCESS;
ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -1081,8 +1110,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
if (IS_QLA8044(ha))
return;
- ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
- ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
+ ha->isp_ops->read_optrom(vha, &hdr, ha->flt_region_npiv_conf << 2,
+ sizeof(struct qla_npiv_header));
if (hdr.version == cpu_to_le16(0xffff))
return;
if (hdr.version != cpu_to_le16(1)) {
@@ -1101,8 +1130,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
return;
}
- ha->isp_ops->read_optrom(vha, (uint8_t *)data,
- ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
+ ha->isp_ops->read_optrom(vha, data, ha->flt_region_npiv_conf << 2,
+ NPIV_CONFIG_SIZE);
cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1;
for (wptr = data, chksum = 0; cnt--; wptr++)
@@ -1139,10 +1168,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
vid.node_name = wwn_to_u64(entry->node_name);
ql_dbg(ql_dbg_user, vha, 0x7093,
- "NPIV[%02x]: wwpn=%llx "
- "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
- (unsigned long long)vid.port_name,
- (unsigned long long)vid.node_name,
+ "NPIV[%02x]: wwpn=%llx wwnn=%llx vf_id=%#x Q_qos=%#x F_qos=%#x.\n",
+ cnt, vid.port_name, vid.node_name,
le16_to_cpu(entry->vf_id),
entry->q_qos, entry->f_qos);
@@ -1150,10 +1177,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
vport = fc_vport_create(vha->host, 0, &vid);
if (!vport)
ql_log(ql_log_warn, vha, 0x7094,
- "NPIV-Config Failed to create vport [%02x]: "
- "wwpn=%llx wwnn=%llx.\n", cnt,
- (unsigned long long)vid.port_name,
- (unsigned long long)vid.node_name);
+ "NPIV-Config Failed to create vport [%02x]: wwpn=%llx wwnn=%llx.\n",
+ cnt, vid.port_name, vid.node_name);
}
}
done:
@@ -1188,9 +1213,10 @@ done:
static int
qla24xx_protect_flash(scsi_qla_host_t *vha)
{
- uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ ulong cnt = 300;
+ uint32_t faddr, dword;
if (ha->flags.fac_supported)
return qla81xx_fac_do_write_enable(vha, 0);
@@ -1199,11 +1225,14 @@ qla24xx_protect_flash(scsi_qla_host_t *vha)
goto skip_wrt_protect;
/* Enable flash write-protection and wait for completion. */
- qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101),
- ha->fdt_wrt_disable);
- for (cnt = 300; cnt &&
- qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0;
- cnt--) {
+ faddr = flash_conf_addr(ha, 0x101);
+ qla24xx_write_flash_dword(ha, faddr, ha->fdt_wrt_disable);
+ faddr = flash_conf_addr(ha, 0x5);
+ while (cnt--) {
+ if (!qla24xx_read_flash_dword(ha, faddr, &dword)) {
+ if (!(dword & BIT_0))
+ break;
+ }
udelay(10);
}
@@ -1211,7 +1240,6 @@ skip_wrt_protect:
/* Disable flash write. */
WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
return QLA_SUCCESS;
}
@@ -1239,107 +1267,103 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t dwords)
{
int ret;
- uint32_t liter;
- uint32_t sec_mask, rest_addr;
- uint32_t fdata;
+ ulong liter;
+ ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */
+ uint32_t sec_mask, rest_addr, fdata;
dma_addr_t optrom_dma;
void *optrom = NULL;
struct qla_hw_data *ha = vha->hw;
- /* Prepare burst-capable write on supported ISPs. */
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
- IS_QLA27XX(ha)) &&
- !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
- optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
- &optrom_dma, GFP_KERNEL);
- if (!optrom) {
- ql_log(ql_log_warn, vha, 0x7095,
- "Unable to allocate "
- "memory for optrom burst write (%x KB).\n",
- OPTROM_BURST_SIZE / 1024);
- }
- }
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ goto next;
- rest_addr = (ha->fdt_block_size >> 2) - 1;
- sec_mask = ~rest_addr;
+ /* Allocate dma buffer for burst write */
+ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ &optrom_dma, GFP_KERNEL);
+ if (!optrom) {
+ ql_log(ql_log_warn, vha, 0x7095,
+ "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE);
+ }
+next:
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+ "Unprotect flash...\n");
ret = qla24xx_unprotect_flash(vha);
- if (ret != QLA_SUCCESS) {
+ if (ret) {
ql_log(ql_log_warn, vha, 0x7096,
- "Unable to unprotect flash for update.\n");
+ "Failed to unprotect flash.\n");
goto done;
}
+ rest_addr = (ha->fdt_block_size >> 2) - 1;
+ sec_mask = ~rest_addr;
for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
fdata = (faddr & sec_mask) << 2;
/* Are we at the beginning of a sector? */
- if ((faddr & rest_addr) == 0) {
- /* Do sector unprotect. */
- if (ha->fdt_unprotect_sec_cmd)
- qla24xx_write_flash_dword(ha,
- ha->fdt_unprotect_sec_cmd,
- (fdata & 0xff00) | ((fdata << 16) &
- 0xff0000) | ((fdata >> 16) & 0xff));
+ if (!(faddr & rest_addr)) {
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+ "Erase sector %#x...\n", faddr);
+
ret = qla24xx_erase_sector(vha, fdata);
- if (ret != QLA_SUCCESS) {
+ if (ret) {
ql_dbg(ql_dbg_user, vha, 0x7007,
- "Unable to erase erase sector: address=%x.\n",
- faddr);
+ "Failed to erase sector %x.\n", faddr);
break;
}
}
- /* Go with burst-write. */
- if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
- /* Copy data to DMA'ble buffer. */
- memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+ if (optrom) {
+ /* If smaller than a burst remaining */
+ if (dwords - liter < dburst)
+ dburst = dwords - liter;
+ /* Copy to dma buffer */
+ memcpy(optrom, dwptr, dburst << 2);
+
+ /* Burst write */
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+ "Write burst (%#lx dwords)...\n", dburst);
ret = qla2x00_load_ram(vha, optrom_dma,
- flash_data_addr(ha, faddr),
- OPTROM_BURST_DWORDS);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x7097,
- "Unable to burst-write optrom segment "
- "(%x/%x/%llx).\n", ret,
- flash_data_addr(ha, faddr),
- (unsigned long long)optrom_dma);
- ql_log(ql_log_warn, vha, 0x7098,
- "Reverting to slow-write.\n");
-
- dma_free_coherent(&ha->pdev->dev,
- OPTROM_BURST_SIZE, optrom, optrom_dma);
- optrom = NULL;
- } else {
- liter += OPTROM_BURST_DWORDS - 1;
- faddr += OPTROM_BURST_DWORDS - 1;
- dwptr += OPTROM_BURST_DWORDS - 1;
+ flash_data_addr(ha, faddr), dburst);
+ if (!ret) {
+ liter += dburst - 1;
+ faddr += dburst - 1;
+ dwptr += dburst - 1;
continue;
}
+
+ ql_log(ql_log_warn, vha, 0x7097,
+ "Failed burst-write at %x (%p/%#llx)....\n",
+ flash_data_addr(ha, faddr), optrom,
+ (u64)optrom_dma);
+
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ optrom = NULL;
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ break;
+ ql_log(ql_log_warn, vha, 0x7098,
+ "Reverting to slow write...\n");
}
+ /* Slow write */
ret = qla24xx_write_flash_dword(ha,
flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
- if (ret != QLA_SUCCESS) {
+ if (ret) {
ql_dbg(ql_dbg_user, vha, 0x7006,
- "Unable to program flash address=%x data=%x.\n",
- faddr, *dwptr);
+ "Failed slopw write %x (%x)\n", faddr, *dwptr);
break;
}
-
- /* Do sector protect. */
- if (ha->fdt_unprotect_sec_cmd &&
- ((faddr & rest_addr) == rest_addr))
- qla24xx_write_flash_dword(ha,
- ha->fdt_protect_sec_cmd,
- (fdata & 0xff00) | ((fdata << 16) &
- 0xff0000) | ((fdata >> 16) & 0xff));
}
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+ "Protect flash...\n");
ret = qla24xx_protect_flash(vha);
- if (ret != QLA_SUCCESS)
+ if (ret)
ql_log(ql_log_warn, vha, 0x7099,
- "Unable to protect flash after update.\n");
+ "Failed to protect flash\n");
done:
if (optrom)
dma_free_coherent(&ha->pdev->dev,
@@ -1349,7 +1373,7 @@ done:
}
uint8_t *
-qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
uint32_t i;
@@ -1368,27 +1392,30 @@ qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
}
uint8_t *
-qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla24xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
- uint32_t i;
- uint32_t *dwptr;
struct qla_hw_data *ha = vha->hw;
+ uint32_t *dwptr = buf;
+ uint32_t i;
if (IS_P3P_TYPE(ha))
return buf;
/* Dword reads to flash. */
- dwptr = (uint32_t *)buf;
- for (i = 0; i < bytes >> 2; i++, naddr++)
- dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
- nvram_data_addr(ha, naddr)));
+ naddr = nvram_data_addr(ha, naddr);
+ bytes >>= 2;
+ for (i = 0; i < bytes; i++, naddr++, dwptr++) {
+ if (qla24xx_read_flash_dword(ha, naddr, dwptr))
+ break;
+ cpu_to_le32s(dwptr);
+ }
return buf;
}
int
-qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla2x00_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
int ret, stat;
@@ -1422,14 +1449,14 @@ qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
}
int
-qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
- int ret;
- uint32_t i;
- uint32_t *dwptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ uint32_t *dwptr = buf;
+ uint32_t i;
+ int ret;
ret = QLA_SUCCESS;
@@ -1446,11 +1473,10 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
/* Dword writes to flash. */
- dwptr = (uint32_t *)buf;
- for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) {
- ret = qla24xx_write_flash_dword(ha,
- nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
- if (ret != QLA_SUCCESS) {
+ naddr = nvram_data_addr(ha, naddr);
+ bytes >>= 2;
+ for (i = 0; i < bytes; i++, naddr++, dwptr++) {
+ if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) {
ql_dbg(ql_dbg_user, vha, 0x709a,
"Unable to program nvram address=%x data=%x.\n",
naddr, *dwptr);
@@ -1470,31 +1496,34 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
}
uint8_t *
-qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla25xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
- uint32_t i;
- uint32_t *dwptr;
struct qla_hw_data *ha = vha->hw;
+ uint32_t *dwptr = buf;
+ uint32_t i;
/* Dword reads to flash. */
- dwptr = (uint32_t *)buf;
- for (i = 0; i < bytes >> 2; i++, naddr++)
- dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
- flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr)));
+ naddr = flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr);
+ bytes >>= 2;
+ for (i = 0; i < bytes; i++, naddr++, dwptr++) {
+ if (qla24xx_read_flash_dword(ha, naddr, dwptr))
+ break;
+
+ cpu_to_le32s(dwptr);
+ }
return buf;
}
+#define RMW_BUFFER_SIZE (64 * 1024)
int
-qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla25xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
struct qla_hw_data *ha = vha->hw;
-#define RMW_BUFFER_SIZE (64 * 1024)
- uint8_t *dbuf;
+ uint8_t *dbuf = vmalloc(RMW_BUFFER_SIZE);
- dbuf = vmalloc(RMW_BUFFER_SIZE);
if (!dbuf)
return QLA_MEMORY_ALLOC_FAILED;
ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
@@ -1728,7 +1757,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
{
uint32_t led_select_value = 0;
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
goto out;
if (ha->port_no == 0)
@@ -1749,13 +1778,14 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
uint16_t orig_led_cfg[6];
uint32_t led_10_value, led_43_value;
- if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha) &&
+ !IS_QLA28XX(ha))
return;
if (!ha->beacon_blink_led)
return;
- if (IS_QLA27XX(ha)) {
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
qla2x00_write_ram_word(vha, 0x1003, 0x40000230);
qla2x00_write_ram_word(vha, 0x1004, 0x40000230);
} else if (IS_QLA2031(ha)) {
@@ -1845,7 +1875,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
return QLA_FUNCTION_FAILED;
}
- if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+ if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
goto skip_gpio;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1885,7 +1915,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
ha->beacon_blink_led = 0;
- if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+ if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
goto set_fw_options;
if (IS_QLA8031(ha) || IS_QLA81XX(ha))
@@ -2314,8 +2344,8 @@ qla2x00_resume_hba(struct scsi_qla_host *vha)
scsi_unblock_requests(vha->host);
}
-uint8_t *
-qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
uint32_t addr, midpoint;
@@ -2349,12 +2379,12 @@ qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
}
int
-qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
int rval;
- uint8_t man_id, flash_id, sec_number, data;
+ uint8_t man_id, flash_id, sec_number, *data;
uint16_t wd;
uint32_t addr, liter, sec_mask, rest_addr;
struct qla_hw_data *ha = vha->hw;
@@ -2483,7 +2513,7 @@ update_flash:
for (addr = offset, liter = 0; liter < length; liter++,
addr++) {
- data = buf[liter];
+ data = buf + liter;
/* Are we at the beginning of a sector? */
if ((addr & rest_addr) == 0) {
if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
@@ -2551,7 +2581,7 @@ update_flash:
}
}
- if (qla2x00_program_flash_address(ha, addr, data,
+ if (qla2x00_program_flash_address(ha, addr, *data,
man_id, flash_id)) {
rval = QLA_FUNCTION_FAILED;
break;
@@ -2567,8 +2597,8 @@ update_flash:
return rval;
}
-uint8_t *
-qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
struct qla_hw_data *ha = vha->hw;
@@ -2578,7 +2608,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with read. */
- qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
+ qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2);
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
@@ -2587,8 +2617,340 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
return buf;
}
+static int
+qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf,
+ uint32_t len, uint32_t buf_size_without_sfub, uint8_t *sfub_buf)
+{
+ uint32_t *p, check_sum = 0;
+ int i;
+
+ p = buf + buf_size_without_sfub;
+
+ /* Extract SFUB from end of file */
+ memcpy(sfub_buf, (uint8_t *)p,
+ sizeof(struct secure_flash_update_block));
+
+ for (i = 0; i < (sizeof(struct secure_flash_update_block) >> 2); i++)
+ check_sum += p[i];
+
+ check_sum = (~check_sum) + 1;
+
+ if (check_sum != p[i]) {
+ ql_log(ql_log_warn, vha, 0x7097,
+ "SFUB checksum failed, 0x%x, 0x%x\n",
+ check_sum, p[i]);
+ return QLA_COMMAND_ERROR;
+ }
+
+ return QLA_SUCCESS;
+}
+
+static int
+qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
+ struct qla_flt_region *region)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_flt_header *flt;
+ struct qla_flt_region *flt_reg;
+ uint16_t cnt;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!ha->flt)
+ return QLA_FUNCTION_FAILED;
+
+ flt = (struct qla_flt_header *)ha->flt;
+ flt_reg = (struct qla_flt_region *)&flt[1];
+ cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+
+ for (; cnt; cnt--, flt_reg++) {
+ if (flt_reg->start == start) {
+ memcpy((uint8_t *)region, flt_reg,
+ sizeof(struct qla_flt_region));
+ rval = QLA_SUCCESS;
+ break;
+ }
+ }
+
+ return rval;
+}
+
+static int
+qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+ uint32_t dwords)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ulong liter;
+ ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */
+ uint32_t sec_mask, rest_addr, fdata;
+ void *optrom = NULL;
+ dma_addr_t optrom_dma;
+ int rval;
+ struct secure_flash_update_block *sfub;
+ dma_addr_t sfub_dma;
+ uint32_t offset = faddr << 2;
+ uint32_t buf_size_without_sfub = 0;
+ struct qla_flt_region region;
+ bool reset_to_rom = false;
+ uint32_t risc_size, risc_attr = 0;
+ uint32_t *fw_array = NULL;
+
+ /* Retrieve region info - must be a start address passed in */
+ rval = qla28xx_get_flash_region(vha, offset, &region);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Invalid address %x - not a region start address\n",
+ offset);
+ goto done;
+ }
+
+ /* Allocate dma buffer for burst write */
+ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ &optrom_dma, GFP_KERNEL);
+ if (!optrom) {
+ ql_log(ql_log_warn, vha, 0x7095,
+ "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE);
+ rval = QLA_COMMAND_ERROR;
+ goto done;
+ }
+
+ /*
+ * If adapter supports secure flash and region is secure
+ * extract secure flash update block (SFUB) and verify
+ */
+ if (ha->flags.secure_adapter && region.attribute) {
+
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+ "Region %x is secure\n", region.code);
+
+ if (region.code == FLT_REG_FW ||
+ region.code == FLT_REG_FW_SEC_27XX) {
+ fw_array = dwptr;
+
+ /* 1st fw array */
+ risc_size = be32_to_cpu(fw_array[3]);
+ risc_attr = be32_to_cpu(fw_array[9]);
+
+ buf_size_without_sfub = risc_size;
+ fw_array += risc_size;
+
+ /* 2nd fw array */
+ risc_size = be32_to_cpu(fw_array[3]);
+
+ buf_size_without_sfub += risc_size;
+ fw_array += risc_size;
+
+ /* 1st dump template */
+ risc_size = be32_to_cpu(fw_array[2]);
+
+ /* skip header and ignore checksum */
+ buf_size_without_sfub += risc_size;
+ fw_array += risc_size;
+
+ if (risc_attr & BIT_9) {
+ /* 2nd dump template */
+ risc_size = be32_to_cpu(fw_array[2]);
+
+ /* skip header and ignore checksum */
+ buf_size_without_sfub += risc_size;
+ fw_array += risc_size;
+ }
+ } else {
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+ "Secure region %x not supported\n",
+ region.code);
+ rval = QLA_COMMAND_ERROR;
+ goto done;
+ }
+
+ sfub = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct secure_flash_update_block), &sfub_dma,
+ GFP_KERNEL);
+ if (!sfub) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to allocate memory for SFUB\n");
+ rval = QLA_COMMAND_ERROR;
+ goto done;
+ }
+
+ rval = qla28xx_extract_sfub_and_verify(vha, dwptr, dwords,
+ buf_size_without_sfub, (uint8_t *)sfub);
+
+ if (rval != QLA_SUCCESS)
+ goto done;
+
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+ "SFUB extract and verify successful\n");
+ }
+
+ rest_addr = (ha->fdt_block_size >> 2) - 1;
+ sec_mask = ~rest_addr;
+
+ /* Lock semaphore */
+ rval = qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_LOCK);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to lock flash semaphore.");
+ goto done;
+ }
+
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+ "Unprotect flash...\n");
+ rval = qla24xx_unprotect_flash(vha);
+ if (rval) {
+ qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
+ ql_log(ql_log_warn, vha, 0x7096, "Failed unprotect flash\n");
+ goto done;
+ }
+
+ for (liter = 0; liter < dwords; liter++, faddr++) {
+ fdata = (faddr & sec_mask) << 2;
+
+ /* If start of sector */
+ if (!(faddr & rest_addr)) {
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+ "Erase sector %#x...\n", faddr);
+ rval = qla24xx_erase_sector(vha, fdata);
+ if (rval) {
+ ql_dbg(ql_dbg_user, vha, 0x7007,
+ "Failed erase sector %#x\n", faddr);
+ goto write_protect;
+ }
+ }
+ }
+
+ if (ha->flags.secure_adapter) {
+ /*
+ * If adapter supports secure flash but FW doesn't,
+ * disable write protect, release semaphore and reset
+ * chip to execute ROM code in order to update region securely
+ */
+ if (!ha->flags.secure_fw) {
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+ "Disable Write and Release Semaphore.");
+ rval = qla24xx_protect_flash(vha);
+ if (rval != QLA_SUCCESS) {
+ qla81xx_fac_semaphore_access(vha,
+ FAC_SEMAPHORE_UNLOCK);
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to protect flash.");
+ goto done;
+ }
+
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+ "Reset chip to ROM.");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ set_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ rval = qla2x00_wait_for_chip_reset(vha);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to reset to ROM code.");
+ goto done;
+ }
+ reset_to_rom = true;
+ ha->flags.fac_supported = 0;
+
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+ "Lock Semaphore");
+ rval = qla2xxx_write_remote_register(vha,
+ FLASH_SEMAPHORE_REGISTER_ADDR, 0x00020002);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to lock flash semaphore.");
+ goto done;
+ }
+
+ /* Unprotect flash */
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+ "Enable Write.");
+ rval = qla2x00_write_ram_word(vha, 0x7ffd0101, 0);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x7096,
+ "Failed unprotect flash\n");
+ goto done;
+ }
+ }
+
+ /* If region is secure, send Secure Flash MB Cmd */
+ if (region.attribute && buf_size_without_sfub) {
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+ "Sending Secure Flash MB Cmd\n");
+ rval = qla28xx_secure_flash_update(vha, 0, region.code,
+ buf_size_without_sfub, sfub_dma,
+ sizeof(struct secure_flash_update_block));
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Secure Flash MB Cmd failed %x.", rval);
+ goto write_protect;
+ }
+ }
+
+ }
+
+ /* re-init flash offset */
+ faddr = offset >> 2;
+
+ for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
+ fdata = (faddr & sec_mask) << 2;
+
+ /* If smaller than a burst remaining */
+ if (dwords - liter < dburst)
+ dburst = dwords - liter;
+
+ /* Copy to dma buffer */
+ memcpy(optrom, dwptr, dburst << 2);
+
+ /* Burst write */
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+ "Write burst (%#lx dwords)...\n", dburst);
+ rval = qla2x00_load_ram(vha, optrom_dma,
+ flash_data_addr(ha, faddr), dburst);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7097,
+ "Failed burst write at %x (%p/%#llx)...\n",
+ flash_data_addr(ha, faddr), optrom,
+ (u64)optrom_dma);
+ break;
+ }
+
+ liter += dburst - 1;
+ faddr += dburst - 1;
+ dwptr += dburst - 1;
+ continue;
+ }
+
+write_protect:
+ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+ "Protect flash...\n");
+ rval = qla24xx_protect_flash(vha);
+ if (rval) {
+ qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
+ ql_log(ql_log_warn, vha, 0x7099,
+ "Failed protect flash\n");
+ }
+
+ if (reset_to_rom == true) {
+ /* Schedule DPC to restart the RISC */
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+
+ rval = qla2x00_wait_for_hba_online(vha);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Adapter did not come out of reset\n");
+ }
+
+done:
+ if (optrom)
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+
+ return rval;
+}
+
int
-qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
int rval;
@@ -2599,8 +2961,12 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with write. */
- rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
- length >> 2);
+ if (IS_QLA28XX(ha))
+ rval = qla28xx_write_flash_data(vha, (uint32_t *)buf,
+ offset >> 2, length >> 2);
+ else
+ rval = qla24xx_write_flash_data(vha, (uint32_t *)buf,
+ offset >> 2, length >> 2);
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
scsi_unblock_requests(vha->host);
@@ -2608,8 +2974,8 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
return rval;
}
-uint8_t *
-qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla25xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
int rval;
@@ -2620,7 +2986,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
struct qla_hw_data *ha = vha->hw;
if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
- IS_QLA27XX(ha))
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
goto try_fast;
if (offset & 0xfff)
goto slow_read;
@@ -2628,6 +2994,8 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
goto slow_read;
try_fast:
+ if (offset & 0xff)
+ goto slow_read;
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
@@ -2874,7 +3242,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
"Dumping fw "
"ver from flash:.\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
- (uint8_t *)dbyte, 8);
+ dbyte, 32);
if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
dcode[2] == 0xffff && dcode[3] == 0xffff) ||
@@ -2905,8 +3273,8 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
{
int ret = QLA_SUCCESS;
uint32_t pcihdr, pcids;
- uint32_t *dcode;
- uint8_t *bcode;
+ uint32_t *dcode = mbuf;
+ uint8_t *bcode = mbuf;
uint8_t code_type, last_image;
struct qla_hw_data *ha = vha->hw;
@@ -2918,17 +3286,14 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
- dcode = mbuf;
-
/* Begin with first PCI expansion ROM header. */
pcihdr = ha->flt_region_boot << 2;
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
- ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr,
- 0x20 * 4);
+ ha->isp_ops->read_optrom(vha, dcode, pcihdr, 0x20 * 4);
bcode = mbuf + (pcihdr % 4);
- if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+ if (memcmp(bcode, "\x55\xaa", 2)) {
/* No signature */
ql_log(ql_log_fatal, vha, 0x0154,
"No matching ROM signature.\n");
@@ -2939,13 +3304,11 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
/* Locate PCI data structure. */
pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
- ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids,
- 0x20 * 4);
+ ha->isp_ops->read_optrom(vha, dcode, pcids, 0x20 * 4);
bcode = mbuf + (pcihdr % 4);
/* Validate signature of PCI data structure. */
- if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
- bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+ if (memcmp(bcode, "PCIR", 4)) {
/* Incorrect header. */
ql_log(ql_log_fatal, vha, 0x0155,
"PCI data struct not found pcir_adr=%x.\n", pcids);
@@ -2996,8 +3359,7 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
/* Read firmware image information. */
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
- ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2,
- 0x20);
+ ha->isp_ops->read_optrom(vha, dcode, ha->flt_region_fw << 2, 0x20);
bcode = mbuf + (pcihdr % 4);
/* Validate signature of PCI data structure. */
@@ -3019,15 +3381,14 @@ int
qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
{
int ret = QLA_SUCCESS;
- uint32_t pcihdr, pcids;
- uint32_t *dcode;
- uint8_t *bcode;
+ uint32_t pcihdr = 0, pcids = 0;
+ uint32_t *dcode = mbuf;
+ uint8_t *bcode = mbuf;
uint8_t code_type, last_image;
int i;
struct qla_hw_data *ha = vha->hw;
uint32_t faddr = 0;
-
- pcihdr = pcids = 0;
+ struct active_regions active_regions = { };
if (IS_P3P_TYPE(ha))
return ret;
@@ -3040,18 +3401,19 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
- dcode = mbuf;
pcihdr = ha->flt_region_boot << 2;
- if (IS_QLA27XX(ha) &&
- qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
- pcihdr = ha->flt_region_boot_sec << 2;
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ qla27xx_get_active_image(vha, &active_regions);
+ if (active_regions.global == QLA27XX_SECONDARY_IMAGE) {
+ pcihdr = ha->flt_region_boot_sec << 2;
+ }
+ }
- last_image = 1;
do {
/* Verify PCI expansion ROM header. */
qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
bcode = mbuf + (pcihdr % 4);
- if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+ if (memcmp(bcode, "\x55\xaa", 2)) {
/* No signature */
ql_log(ql_log_fatal, vha, 0x0059,
"No matching ROM signature.\n");
@@ -3066,11 +3428,11 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
bcode = mbuf + (pcihdr % 4);
/* Validate signature of PCI data structure. */
- if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
- bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+ if (memcmp(bcode, "PCIR", 4)) {
/* Incorrect header. */
ql_log(ql_log_fatal, vha, 0x005a,
"PCI data struct not found pcir_adr=%x.\n", pcids);
+ ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32);
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -3117,30 +3479,24 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
/* Read firmware image information. */
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
- dcode = mbuf;
faddr = ha->flt_region_fw;
- if (IS_QLA27XX(ha) &&
- qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
- faddr = ha->flt_region_fw_sec;
-
- qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
- for (i = 0; i < 4; i++)
- dcode[i] = be32_to_cpu(dcode[i]);
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ qla27xx_get_active_image(vha, &active_regions);
+ if (active_regions.global == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_fw_sec;
+ }
- if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
- dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
- (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
- dcode[3] == 0)) {
+ qla24xx_read_flash_data(vha, dcode, faddr, 8);
+ if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x005f,
"Unrecognized fw revision at %x.\n",
ha->flt_region_fw * 4);
+ ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
} else {
- ha->fw_revision[0] = dcode[0];
- ha->fw_revision[1] = dcode[1];
- ha->fw_revision[2] = dcode[2];
- ha->fw_revision[3] = dcode[3];
+ for (i = 0; i < 4; i++)
+ ha->fw_revision[i] = be32_to_cpu(dcode[4+i]);
ql_dbg(ql_dbg_init, vha, 0x0060,
- "Firmware revision %d.%d.%d (%x).\n",
+ "Firmware revision (flash) %u.%u.%u (%x).\n",
ha->fw_revision[0], ha->fw_revision[1],
ha->fw_revision[2], ha->fw_revision[3]);
}
@@ -3152,20 +3508,17 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
}
memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
- dcode = mbuf;
- ha->isp_ops->read_optrom(vha, (uint8_t *)dcode,
- ha->flt_region_gold_fw << 2, 32);
-
- if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
- dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
+ faddr = ha->flt_region_gold_fw;
+ qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8);
+ if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x0056,
- "Unrecognized golden fw at 0x%x.\n",
- ha->flt_region_gold_fw * 4);
+ "Unrecognized golden fw at %#x.\n", faddr);
+ ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32);
return ret;
}
- for (i = 4; i < 8; i++)
- ha->gold_fw_version[i-4] = be32_to_cpu(dcode[i]);
+ for (i = 0; i < 4; i++)
+ ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]);
return ret;
}
@@ -3237,7 +3590,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
fcp_prio_addr = ha->flt_region_fcp_prio;
/* first read the fcp priority data header from flash */
- ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
+ ha->isp_ops->read_optrom(vha, ha->fcp_prio_cfg,
fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
@@ -3248,7 +3601,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
- ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0],
+ ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0],
fcp_prio_addr << 2, (len < max_len ? len : max_len));
/* revalidate the entire FCP priority config data, including entries */
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 582d1663f971..3eeae72793bc 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -184,6 +184,7 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
/* Send marker if required */
if (unlikely(vha->marker_needed != 0)) {
int rc = qla2x00_issue_marker(vha, vha_locked);
+
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt, vha, 0xe03d,
"qla_target(%d): issue_marker() failed\n",
@@ -557,6 +558,7 @@ static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
struct imm_ntfy_from_isp *ntfy, int type)
{
struct qla_work_evt *e;
+
e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
if (!e)
return QLA_FUNCTION_FAILED;
@@ -680,7 +682,6 @@ done:
void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
fc_port_t *t;
- unsigned long flags;
switch (e->u.nack.type) {
case SRB_NACK_PRLI:
@@ -693,24 +694,19 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (t) {
ql_log(ql_log_info, vha, 0xd034,
"%s create sess success %p", __func__, t);
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* create sess has an extra kref */
vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
break;
}
qla24xx_async_notify_ack(vha, e->u.nack.fcport,
- (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
+ (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
}
void qla24xx_delete_sess_fn(struct work_struct *work)
{
fc_port_t *fcport = container_of(work, struct fc_port, del_work);
struct qla_hw_data *ha = fcport->vha->hw;
- unsigned long flags;
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (fcport->se_sess) {
ha->tgt.tgt_ops->shutdown_sess(fcport);
@@ -718,7 +714,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
} else {
qlt_unreg_sess(fcport);
}
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
/*
@@ -787,8 +782,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->port_name, sess->loop_id);
sess->local = 0;
}
- ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
}
/*
@@ -980,6 +976,8 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
+ qla2x00_mark_device_lost(vha, sess, 0, 0);
+
if (sess->send_els_logo) {
qlt_port_logo_t logo;
@@ -1076,6 +1074,7 @@ void qlt_free_session_done(struct work_struct *work)
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
+
own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
if (con) {
@@ -1160,8 +1159,6 @@ void qlt_unreg_sess(struct fc_port *sess)
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- qla2x00_mark_device_lost(vha, sess, 0, 0);
-
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
sess->disc_state = DSC_DELETE_PEND;
sess->last_rscn_gen = sess->rscn_gen;
@@ -1329,6 +1326,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
res = -ENOENT;
for (i = 0; i < entries; i++) {
struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+
if ((gid->al_pa == s_id[2]) &&
(gid->area == s_id[1]) &&
(gid->domain == s_id[0])) {
@@ -2331,14 +2329,14 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
ctio->u.status1.scsi_status |=
cpu_to_le16(SS_RESIDUAL_UNDER);
- /* Response code and sense key */
- put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
- (&ctio->u.status1.sense_data)[0]);
+ /* Fixed format sense data. */
+ ctio->u.status1.sense_data[0] = 0x70;
+ ctio->u.status1.sense_data[2] = sense_key;
/* Additional sense length */
- put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
+ ctio->u.status1.sense_data[7] = 0xa;
/* ASC and ASCQ */
- put_unaligned_le32(((asc << 24) | (ascq << 16)),
- (&ctio->u.status1.sense_data)[3]);
+ ctio->u.status1.sense_data[12] = asc;
+ ctio->u.status1.sense_data[13] = ascq;
/* Memory Barrier */
wmb();
@@ -2387,7 +2385,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
case ELS_PRLO:
case ELS_TPRLO:
ql_dbg(ql_dbg_disc, vha, 0x2106,
- "TM response logo %phC status %#x state %#x",
+ "TM response logo %8phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
qlt_schedule_sess_for_deletion(mcmd->sess);
@@ -2485,6 +2483,7 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
struct qla_hw_data *ha;
struct qla_qpair *qpair;
+
if (!cmd->sg_mapped)
return;
@@ -2635,7 +2634,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
{
int cnt;
- uint32_t *dword_ptr;
+ struct dsd64 *cur_dsd;
/* Build continuation packets */
while (prm->seg_cnt > 0) {
@@ -2656,19 +2655,13 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
cont_pkt64->sys_define = 0;
cont_pkt64->entry_type = CONTINUE_A64_TYPE;
- dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address;
+ cur_dsd = cont_pkt64->dsd;
/* Load continuation entry data segments */
for (cnt = 0;
cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
- *dword_ptr++ =
- cpu_to_le32(lower_32_bits
- (sg_dma_address(prm->sg)));
- *dword_ptr++ = cpu_to_le32(upper_32_bits
- (sg_dma_address(prm->sg)));
- *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
-
+ append_dsd64(&cur_dsd, prm->sg);
prm->sg = sg_next(prm->sg);
}
}
@@ -2681,13 +2674,13 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
static void qlt_load_data_segments(struct qla_tgt_prm *prm)
{
int cnt;
- uint32_t *dword_ptr;
+ struct dsd64 *cur_dsd;
struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
/* Setup packet address segment pointer */
- dword_ptr = pkt24->u.status0.dseg_0_address;
+ cur_dsd = &pkt24->u.status0.dsd;
/* Set total data segment count */
if (prm->seg_cnt)
@@ -2695,8 +2688,8 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm)
if (prm->seg_cnt == 0) {
/* No data transfer */
- *dword_ptr++ = 0;
- *dword_ptr = 0;
+ cur_dsd->address = 0;
+ cur_dsd->length = 0;
return;
}
@@ -2706,14 +2699,7 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm)
for (cnt = 0;
(cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
- *dword_ptr++ =
- cpu_to_le32(lower_32_bits(sg_dma_address(prm->sg)));
-
- *dword_ptr++ = cpu_to_le32(upper_32_bits(
- sg_dma_address(prm->sg)));
-
- *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
-
+ append_dsd64(&cur_dsd, prm->sg);
prm->sg = sg_next(prm->sg);
}
@@ -3037,7 +3023,7 @@ qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
static inline int
qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
{
- uint32_t *cur_dsd;
+ struct dsd64 *cur_dsd;
uint32_t transfer_length = 0;
uint32_t data_bytes;
uint32_t dif_bytes;
@@ -3183,12 +3169,11 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
- pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
- pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+ put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
if (!bundling) {
- cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+ cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
} else {
/*
* Configure Bundling if we need to fetch interlaving
@@ -3198,7 +3183,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
crc_ctx_pkt->u.bundling.dseg_count =
cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
- cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+ cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
}
/* Finish the common fields of CRC pkt */
@@ -3231,7 +3216,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
/* Walks dif segments */
pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
- cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+ cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
prm->prot_seg_cnt, cmd))
goto crc_queuing_error;
@@ -3263,7 +3248,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
cmd->state = QLA_TGT_STATE_PROCESSED;
- qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
return 0;
}
@@ -3292,7 +3276,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
* previous life, just abort the processing.
*/
cmd->state = QLA_TGT_STATE_PROCESSED;
- qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
@@ -3384,9 +3367,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
- spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
- spin_unlock(&cmd->cmd_lock);
cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
@@ -3433,8 +3414,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
* Either the port is not online or this request was from
* previous life, just abort the processing.
*/
- cmd->state = QLA_TGT_STATE_NEED_DATA;
- qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ cmd->aborted = 1;
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ vha->hw->tgt.tgt_ops->handle_data(cmd);
ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
@@ -3465,9 +3448,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
qlt_load_data_segments(&prm);
cmd->state = QLA_TGT_STATE_NEED_DATA;
- spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
- spin_unlock(&cmd->cmd_lock);
cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
@@ -3646,33 +3627,11 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *imm, int ha_locked)
{
- unsigned long flags = 0;
int rc;
- if (ha_locked) {
- rc = __qlt_send_term_imm_notif(vha, imm);
-
-#if 0 /* Todo */
- if (rc == -ENOMEM)
- qlt_alloc_qfull_cmd(vha, imm, 0, 0);
-#else
- if (rc) {
- }
-#endif
- goto done;
- }
-
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ WARN_ON_ONCE(!ha_locked);
rc = __qlt_send_term_imm_notif(vha, imm);
-
-#if 0 /* Todo */
- if (rc == -ENOMEM)
- qlt_alloc_qfull_cmd(vha, imm, 0, 0);
-#endif
-
-done:
- if (!ha_locked)
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ pr_debug("rc = %d\n", rc);
}
/*
@@ -3913,6 +3872,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
if (ctio != NULL) {
struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+
term = !(c->flags &
cpu_to_le16(OF_TERM_EXCH));
} else
@@ -3977,39 +3937,6 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return cmd;
}
-/* hardware_lock should be held by caller. */
-void
-qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
-{
- struct qla_hw_data *ha = vha->hw;
-
- if (cmd->sg_mapped)
- qlt_unmap_sg(vha, cmd);
-
- /* TODO: fix debug message type and ids. */
- if (cmd->state == QLA_TGT_STATE_PROCESSED) {
- ql_dbg(ql_dbg_io, vha, 0xff00,
- "HOST-ABORT: state=PROCESSED.\n");
- } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
- cmd->write_data_transferred = 0;
- cmd->state = QLA_TGT_STATE_DATA_IN;
-
- ql_dbg(ql_dbg_io, vha, 0xff01,
- "HOST-ABORT: state=DATA_IN.\n");
-
- ha->tgt.tgt_ops->handle_data(cmd);
- return;
- } else {
- ql_dbg(ql_dbg_io, vha, 0xff03,
- "HOST-ABORT: state=BAD(%d).\n",
- cmd->state);
- dump_stack();
- }
-
- cmd->trc_flags |= TRC_FLUSH;
- ha->tgt.tgt_ops->free_cmd(cmd);
-}
-
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -4031,7 +3958,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
return;
}
- cmd = (struct qla_tgt_cmd *)qlt_ctio_to_cmd(vha, rsp, handle, ctio);
+ cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
if (cmd == NULL)
return;
@@ -4240,11 +4167,9 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
if (ret != 0)
goto out_term;
/*
- * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+ * Drop extra session reference from qlt_handle_cmd_for_atio().
*/
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
out_term:
@@ -4261,9 +4186,7 @@ out_term:
target_free_tag(sess->se_sess, &cmd->se_cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static void qlt_do_work(struct work_struct *work)
@@ -4472,9 +4395,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
if (!cmd) {
ql_dbg(ql_dbg_io, vha, 0x3062,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return -EBUSY;
}
@@ -4773,6 +4694,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+
if (op_key == key) {
op->aborted = true;
count++;
@@ -4781,6 +4703,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+
if (cmd_key == key) {
cmd->aborted = 1;
count++;
@@ -5051,6 +4974,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
if (sess != NULL) {
bool delete = false;
int sec;
+
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
switch (sess->fw_login_state) {
case DSC_LS_PLOGI_PEND:
@@ -5203,6 +5127,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
case ELS_ADISC:
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
if (tgt->link_reinit_iocb_pending) {
qlt_send_notify_ack(ha->base_qpair,
&tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
@@ -5266,6 +5191,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
case IMM_NTFY_LIP_LINK_REINIT:
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
"qla_target(%d): LINK REINIT (loop %#x, "
"subcode %x)\n", vha->vp_idx,
@@ -5492,11 +5418,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
se_sess = sess->se_sess;
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
- if (tag < 0)
- return;
-
- cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
- if (!cmd) {
+ if (tag < 0) {
ql_dbg(ql_dbg_io, vha, 0x3009,
"qla_target(%d): %s: Allocation of cmd failed\n",
vha->vp_idx, __func__);
@@ -5511,6 +5433,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
return;
}
+ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(struct qla_tgt_cmd));
qlt_incr_num_pend_cmds(vha);
@@ -5820,8 +5743,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
struct qla_tgt_mgmt_cmd *mcmd;
struct qla_hw_data *ha = vha->hw;
- mcmd = (struct qla_tgt_mgmt_cmd *)qlt_ctio_to_cmd(vha, rsp,
- pkt->handle, pkt);
+ mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
ql_dbg(ql_dbg_async, vha, 0xe064,
"qla_target(%d): ABTS Comp without mcmd\n",
@@ -5883,6 +5805,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+
qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -5893,6 +5816,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
{
struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
int rc;
+
if (atio->u.isp2x.status !=
cpu_to_le16(ATIO_CDB_VALID)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05e,
@@ -5941,6 +5865,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
case CONTINUE_TGT_IO_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+
qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -5950,6 +5875,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
case CTIO_A64_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+
qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -5964,6 +5890,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
case NOTIFY_ACK_TYPE:
if (tgt->notify_ack_expected > 0) {
struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
ql_dbg(ql_dbg_tgt, vha, 0xe036,
"NOTIFY_ACK seq %08x status %x\n",
le16_to_cpu(entry->u.isp2x.seq_id),
@@ -6239,6 +6166,7 @@ retry:
if (rc == -ENOENT) {
qlt_port_logo_t logo;
+
sid_to_portid(s_id, &logo.id);
logo.cmd_count = 1;
qlt_send_first_logo(vha, &logo);
@@ -6318,17 +6246,19 @@ static void qlt_abort_work(struct qla_tgt *tgt,
}
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
- ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+ ha->tgt.tgt_ops->put_sess(sess);
+
if (rc != 0)
goto out_term;
return;
out_term2:
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -6386,9 +6316,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
- ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ ha->tgt.tgt_ops->put_sess(sess);
+
if (rc != 0)
goto out_term;
return;
@@ -6499,6 +6430,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
unsigned long flags;
struct qla_qpair *qpair = ha->queue_pair_map[i];
+
h = &tgt->qphints[i + 1];
INIT_LIST_HEAD(&h->hint_elem);
if (qpair) {
@@ -6937,7 +6869,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
if (ha->flags.msix_enabled) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
if (IS_QLA2071(ha)) {
/* 4 ports Baker: Enable Interrupt Handshake */
icb->msix_atio = 0;
@@ -6952,7 +6884,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
}
} else {
/* INTx|MSI */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
icb->msix_atio = 0;
icb->firmware_options_2 |= BIT_26;
ql_dbg(ql_dbg_init, vha, 0xf072,
@@ -7201,7 +7133,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
- if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
@@ -7329,7 +7262,10 @@ qlt_mem_free(struct qla_hw_data *ha)
sizeof(struct atio_from_isp), ha->tgt.atio_ring,
ha->tgt.atio_dma);
}
+ ha->tgt.atio_ring = NULL;
+ ha->tgt.atio_dma = 0;
kfree(ha->tgt.tgt_vp_map);
+ ha->tgt.tgt_vp_map = NULL;
}
/* vport_slock to be held by the caller */
@@ -7413,6 +7349,9 @@ int __init qlt_init(void)
{
int ret;
+ BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
+
if (!qlt_parse_ini_mode()) {
ql_log(ql_log_fatal, NULL, 0xe06b,
"qlt_parse_ini_mode() failed\n");
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index f3de75000a08..89ceffa7d4fd 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -29,6 +29,7 @@
#define __QLA_TARGET_H
#include "qla_def.h"
+#include "qla_dsd.h"
/*
* Must be changed on any change in any initiator visible interfaces or
@@ -224,12 +225,7 @@ struct ctio_to_2xxx {
uint16_t reserved_1[3];
uint16_t scsi_status;
uint32_t transfer_length;
- uint32_t dseg_0_address; /* Data segment 0 address. */
- uint32_t dseg_0_length; /* Data segment 0 length. */
- uint32_t dseg_1_address; /* Data segment 1 address. */
- uint32_t dseg_1_length; /* Data segment 1 length. */
- uint32_t dseg_2_address; /* Data segment 2 address. */
- uint32_t dseg_2_length; /* Data segment 2 length. */
+ struct dsd32 dsd[3];
} __packed;
#define ATIO_PATH_INVALID 0x07
#define ATIO_CANT_PROV_CAP 0x16
@@ -429,10 +425,7 @@ struct ctio7_to_24xx {
uint32_t reserved2;
uint32_t transfer_length;
uint32_t reserved3;
- /* Data segment 0 address. */
- uint32_t dseg_0_address[2];
- /* Data segment 0 length. */
- uint32_t dseg_0_length;
+ struct dsd64 dsd;
} status0;
struct {
uint16_t sense_length;
@@ -526,10 +519,10 @@ struct ctio_crc2_to_fw {
uint32_t reserved5;
__le32 transfer_length; /* total fc transfer length */
uint32_t reserved6;
- __le32 crc_context_address[2];/* Data segment address. */
+ __le64 crc_context_address __packed; /* Data segment address. */
uint16_t crc_context_len; /* Data segment length. */
uint16_t reserved_1; /* MUST be set to 0. */
-} __packed;
+};
/* CTIO Type CRC_x Status IOCB */
struct ctio_crc_from_fw {
@@ -855,7 +848,7 @@ enum trace_flags {
TRC_CTIO_ERR = BIT_11,
TRC_CTIO_DONE = BIT_12,
TRC_CTIO_ABORTED = BIT_13,
- TRC_CTIO_STRANGE= BIT_14,
+ TRC_CTIO_STRANGE = BIT_14,
TRC_CMD_DONE = BIT_15,
TRC_CMD_CHK_STOP = BIT_16,
TRC_CMD_FREE = BIT_17,
@@ -889,10 +882,14 @@ struct qla_tgt_cmd {
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
- unsigned int aborted:1;
- unsigned int data_work:1;
- unsigned int data_work_free:1;
- unsigned int released:1;
+
+ /*
+ * This variable may be set from outside the LIO and I/O completion
+ * callback functions. Do not declare this member variable as a
+ * bitfield to avoid a read-modify-write operation when this variable
+ * is set.
+ */
+ unsigned int aborted;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -1103,7 +1100,5 @@ extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t,
uint8_t, uint8_t, uint8_t);
-extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *,
- struct qla_tgt_cmd *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 9e52500caff0..de696a07532e 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -7,103 +7,9 @@
#include "qla_def.h"
#include "qla_tmpl.h"
-/* note default template is in big endian */
-static const uint32_t ql27xx_fwdt_default_template[] = {
- 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
- 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x04010000, 0x14000000, 0x00000000,
- 0x02000000, 0x44000000, 0x09010000, 0x10000000,
- 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
- 0x00000000, 0x02000000, 0x00600000, 0x00000000,
- 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
- 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
- 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
- 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
- 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
- 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
- 0x00010000, 0x18000000, 0x00000000, 0x02000000,
- 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
- 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
- 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
- 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
- 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
- 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
- 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
- 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
- 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
- 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
- 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
- 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
- 0x00010000, 0x18000000, 0x00000000, 0x02000000,
- 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
- 0x00010000, 0x18000000, 0x00000000, 0x02000000,
- 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
- 0x00010000, 0x18000000, 0x00000000, 0x02000000,
- 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
- 0x00010000, 0x18000000, 0x00000000, 0x02000000,
- 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
- 0x00010000, 0x18000000, 0x00000000, 0x02000000,
- 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
- 0x00010000, 0x18000000, 0x00000000, 0x02000000,
- 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
- 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
- 0x00010000, 0x18000000, 0x00000000, 0x02000000,
- 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
- 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
- 0x00000000, 0x02000000, 0x01000000, 0x00000200,
- 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
- 0x02000000, 0x02000000, 0x00001000, 0x00000000,
- 0x07010000, 0x18000000, 0x00000000, 0x02000000,
- 0x00000000, 0x01000000, 0x07010000, 0x18000000,
- 0x00000000, 0x02000000, 0x00000000, 0x02000000,
- 0x07010000, 0x18000000, 0x00000000, 0x02000000,
- 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
- 0x00000000, 0x02000000, 0x00000000, 0xff000000,
- 0x10000000, 0x00000000, 0x00000080,
-};
-
-static inline void __iomem *
-qla27xx_isp_reg(struct scsi_qla_host *vha)
-{
- return &vha->hw->iobase->isp24;
-}
+#define ISPREG(vha) (&(vha)->hw->iobase->isp24)
+#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
+#define IOBASE(vha) IOBAR(ISPREG(vha))
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
@@ -128,7 +34,6 @@ qla27xx_insert32(uint32_t value, void *buf, ulong *len)
static inline void
qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
{
-
if (buf && mem && size) {
buf += *len;
memcpy(buf, mem, size);
@@ -190,9 +95,9 @@ static inline void
qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
uint offset, uint32_t data, void *buf)
{
- __iomem void *window = (void __iomem *)reg + offset;
-
if (buf) {
+ void __iomem *window = (void __iomem *)reg + offset;
+
WRT_REG_DWORD(window, data);
}
}
@@ -205,7 +110,7 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
void __iomem *window = (void __iomem *)reg + offset;
void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
- qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
+ qla27xx_write_reg(reg, IOBAR(reg), addr, buf);
while (count--) {
qla27xx_insert32(addr, buf, len);
readn(window, buf, len);
@@ -224,7 +129,7 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
static inline struct qla27xx_fwdt_entry *
qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
{
- return (void *)ent + ent->hdr.size;
+ return (void *)ent + le32_to_cpu(ent->hdr.size);
}
static struct qla27xx_fwdt_entry *
@@ -254,12 +159,14 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ ulong addr = le32_to_cpu(ent->t256.base_addr);
+ uint offset = ent->t256.pci_offset;
+ ulong count = le16_to_cpu(ent->t256.reg_count);
+ uint width = ent->t256.reg_width;
ql_dbg(ql_dbg_misc, vha, 0xd200,
"%s: rdio t1 [%lx]\n", __func__, *len);
- qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
- ent->t256.reg_count, ent->t256.reg_width, buf, len);
+ qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
return qla27xx_next_entry(ent);
}
@@ -268,12 +175,14 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ ulong addr = le32_to_cpu(ent->t257.base_addr);
+ uint offset = ent->t257.pci_offset;
+ ulong data = le32_to_cpu(ent->t257.write_data);
ql_dbg(ql_dbg_misc, vha, 0xd201,
"%s: wrio t1 [%lx]\n", __func__, *len);
- qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
- qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
+ qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
+ qla27xx_write_reg(ISPREG(vha), offset, data, buf);
return qla27xx_next_entry(ent);
}
@@ -282,13 +191,17 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ uint banksel = ent->t258.banksel_offset;
+ ulong bank = le32_to_cpu(ent->t258.bank);
+ ulong addr = le32_to_cpu(ent->t258.base_addr);
+ uint offset = ent->t258.pci_offset;
+ uint count = le16_to_cpu(ent->t258.reg_count);
+ uint width = ent->t258.reg_width;
ql_dbg(ql_dbg_misc, vha, 0xd202,
"%s: rdio t2 [%lx]\n", __func__, *len);
- qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
- qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
- ent->t258.reg_count, ent->t258.reg_width, buf, len);
+ qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
+ qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
return qla27xx_next_entry(ent);
}
@@ -297,13 +210,17 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ ulong addr = le32_to_cpu(ent->t259.base_addr);
+ uint banksel = ent->t259.banksel_offset;
+ ulong bank = le32_to_cpu(ent->t259.bank);
+ uint offset = ent->t259.pci_offset;
+ ulong data = le32_to_cpu(ent->t259.write_data);
ql_dbg(ql_dbg_misc, vha, 0xd203,
"%s: wrio t2 [%lx]\n", __func__, *len);
- qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
- qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
- qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
+ qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
+ qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
+ qla27xx_write_reg(ISPREG(vha), offset, data, buf);
return qla27xx_next_entry(ent);
}
@@ -312,12 +229,12 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ uint offset = ent->t260.pci_offset;
ql_dbg(ql_dbg_misc, vha, 0xd204,
"%s: rdpci [%lx]\n", __func__, *len);
- qla27xx_insert32(ent->t260.pci_offset, buf, len);
- qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
+ qla27xx_insert32(offset, buf, len);
+ qla27xx_read_reg(ISPREG(vha), offset, buf, len);
return qla27xx_next_entry(ent);
}
@@ -326,11 +243,12 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ uint offset = ent->t261.pci_offset;
+ ulong data = le32_to_cpu(ent->t261.write_data);
ql_dbg(ql_dbg_misc, vha, 0xd205,
"%s: wrpci [%lx]\n", __func__, *len);
- qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
+ qla27xx_write_reg(ISPREG(vha), offset, data, buf);
return qla27xx_next_entry(ent);
}
@@ -339,51 +257,50 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
+ uint area = ent->t262.ram_area;
+ ulong start = le32_to_cpu(ent->t262.start_addr);
+ ulong end = le32_to_cpu(ent->t262.end_addr);
ulong dwords;
- ulong start;
- ulong end;
ql_dbg(ql_dbg_misc, vha, 0xd206,
"%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
- start = ent->t262.start_addr;
- end = ent->t262.end_addr;
- if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
+ if (area == T262_RAM_AREA_CRITICAL_RAM) {
;
- } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
+ } else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
end = vha->hw->fw_memory_size;
if (buf)
- ent->t262.end_addr = end;
- } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
+ ent->t262.end_addr = cpu_to_le32(end);
+ } else if (area == T262_RAM_AREA_SHARED_RAM) {
start = vha->hw->fw_shared_ram_start;
end = vha->hw->fw_shared_ram_end;
if (buf) {
- ent->t262.start_addr = start;
- ent->t262.end_addr = end;
+ ent->t262.start_addr = cpu_to_le32(start);
+ ent->t262.end_addr = cpu_to_le32(end);
}
- } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
+ } else if (area == T262_RAM_AREA_DDR_RAM) {
start = vha->hw->fw_ddr_ram_start;
end = vha->hw->fw_ddr_ram_end;
if (buf) {
- ent->t262.start_addr = start;
- ent->t262.end_addr = end;
+ ent->t262.start_addr = cpu_to_le32(start);
+ ent->t262.end_addr = cpu_to_le32(end);
}
- } else if (ent->t262.ram_area == T262_RAM_AREA_MISC) {
+ } else if (area == T262_RAM_AREA_MISC) {
if (buf) {
- ent->t262.start_addr = start;
- ent->t262.end_addr = end;
+ ent->t262.start_addr = cpu_to_le32(start);
+ ent->t262.end_addr = cpu_to_le32(end);
}
} else {
ql_dbg(ql_dbg_misc, vha, 0xd022,
- "%s: unknown area %x\n", __func__, ent->t262.ram_area);
+ "%s: unknown area %x\n", __func__, area);
qla27xx_skip_entry(ent, buf);
goto done;
}
if (end < start || start == 0 || end == 0) {
ql_dbg(ql_dbg_misc, vha, 0xd023,
- "%s: unusable range (start=%x end=%x)\n", __func__,
- ent->t262.end_addr, ent->t262.start_addr);
+ "%s: unusable range (start=%lx end=%lx)\n",
+ __func__, start, end);
qla27xx_skip_entry(ent, buf);
goto done;
}
@@ -402,13 +319,14 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
+ uint type = ent->t263.queue_type;
uint count = 0;
uint i;
uint length;
- ql_dbg(ql_dbg_misc, vha, 0xd207,
- "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
- if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
+ "%s: getq(%x) [%lx]\n", __func__, type, *len);
+ if (type == T263_QUEUE_TYPE_REQ) {
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
@@ -422,7 +340,7 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
count++;
}
}
- } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+ } else if (type == T263_QUEUE_TYPE_RSP) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
@@ -450,7 +368,7 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
}
} else {
ql_dbg(ql_dbg_misc, vha, 0xd026,
- "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
+ "%s: unknown queue %x\n", __func__, type);
qla27xx_skip_entry(ent, buf);
}
@@ -496,12 +414,10 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
-
- ql_dbg(ql_dbg_misc, vha, 0xd209,
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209,
"%s: pause risc [%lx]\n", __func__, *len);
if (buf)
- qla24xx_pause_risc(reg, vha->hw);
+ qla24xx_pause_risc(ISPREG(vha), vha->hw);
return qla27xx_next_entry(ent);
}
@@ -522,11 +438,12 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ uint offset = ent->t267.pci_offset;
+ ulong data = le32_to_cpu(ent->t267.data);
ql_dbg(ql_dbg_misc, vha, 0xd20b,
"%s: dis intr [%lx]\n", __func__, *len);
- qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
+ qla27xx_write_reg(ISPREG(vha), offset, data, buf);
return qla27xx_next_entry(ent);
}
@@ -622,17 +539,16 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
- ulong dwords = ent->t270.count;
- ulong addr = ent->t270.addr;
+ ulong addr = le32_to_cpu(ent->t270.addr);
+ ulong dwords = le32_to_cpu(ent->t270.count);
ql_dbg(ql_dbg_misc, vha, 0xd20e,
"%s: rdremreg [%lx]\n", __func__, *len);
- qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+ qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf);
while (dwords--) {
- qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
+ qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf);
qla27xx_insert32(addr, buf, len);
- qla27xx_read_reg(reg, 0xc4, buf, len);
+ qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len);
addr += sizeof(uint32_t);
}
@@ -643,15 +559,14 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
- ulong addr = ent->t271.addr;
- ulong data = ent->t271.data;
+ ulong addr = le32_to_cpu(ent->t271.addr);
+ ulong data = le32_to_cpu(ent->t271.data);
ql_dbg(ql_dbg_misc, vha, 0xd20f,
"%s: wrremreg [%lx]\n", __func__, *len);
- qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
- qla27xx_write_reg(reg, 0xc4, data, buf);
- qla27xx_write_reg(reg, 0xc0, addr, buf);
+ qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf);
+ qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf);
+ qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf);
return qla27xx_next_entry(ent);
}
@@ -660,8 +575,8 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- ulong dwords = ent->t272.count;
- ulong start = ent->t272.addr;
+ ulong dwords = le32_to_cpu(ent->t272.count);
+ ulong start = le32_to_cpu(ent->t272.addr);
ql_dbg(ql_dbg_misc, vha, 0xd210,
"%s: rdremram [%lx]\n", __func__, *len);
@@ -680,8 +595,8 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- ulong dwords = ent->t273.count;
- ulong addr = ent->t273.addr;
+ ulong dwords = le32_to_cpu(ent->t273.count);
+ ulong addr = le32_to_cpu(ent->t273.addr);
uint32_t value;
ql_dbg(ql_dbg_misc, vha, 0xd211,
@@ -703,12 +618,13 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
+ ulong type = ent->t274.queue_type;
uint count = 0;
uint i;
- ql_dbg(ql_dbg_misc, vha, 0xd212,
- "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
- if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
+ "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
+ if (type == T274_QUEUE_TYPE_REQ_SHAD) {
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
@@ -720,7 +636,7 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
count++;
}
}
- } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+ } else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
@@ -746,7 +662,7 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
}
} else {
ql_dbg(ql_dbg_misc, vha, 0xd02f,
- "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
+ "%s: unknown queue %lx\n", __func__, type);
qla27xx_skip_entry(ent, buf);
}
@@ -765,23 +681,26 @@ qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
ulong offset = offsetof(typeof(*ent), t275.buffer);
+ ulong length = le32_to_cpu(ent->t275.length);
+ ulong size = le32_to_cpu(ent->hdr.size);
+ void *buffer = ent->t275.buffer;
- ql_dbg(ql_dbg_misc, vha, 0xd213,
- "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
- if (!ent->t275.length) {
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
+ "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
+ if (!length) {
ql_dbg(ql_dbg_misc, vha, 0xd020,
"%s: buffer zero length\n", __func__);
qla27xx_skip_entry(ent, buf);
goto done;
}
- if (offset + ent->t275.length > ent->hdr.size) {
+ if (offset + length > size) {
+ length = size - offset;
ql_dbg(ql_dbg_misc, vha, 0xd030,
- "%s: buffer overflow\n", __func__);
- qla27xx_skip_entry(ent, buf);
- goto done;
+ "%s: buffer overflow, truncate [%lx]\n", __func__, length);
+ ent->t275.length = cpu_to_le32(length);
}
- qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
+ qla27xx_insertbuf(buffer, length, buf, len);
done:
return qla27xx_next_entry(ent);
}
@@ -790,15 +709,22 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- uint type = vha->hw->pdev->device >> 4 & 0xf;
- uint func = vha->hw->port_no & 0x3;
-
ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
"%s: cond [%lx]\n", __func__, *len);
- if (type != ent->t276.cond1 || func != ent->t276.cond2) {
- ent = qla27xx_next_entry(ent);
- qla27xx_skip_entry(ent, buf);
+ if (buf) {
+ ulong cond1 = le32_to_cpu(ent->t276.cond1);
+ ulong cond2 = le32_to_cpu(ent->t276.cond2);
+ uint type = vha->hw->pdev->device >> 4 & 0xf;
+ uint func = vha->hw->port_no & 0x3;
+
+ if (type != cond1 || func != cond2) {
+ struct qla27xx_fwdt_template *tmp = buf;
+
+ tmp->count--;
+ ent = qla27xx_next_entry(ent);
+ qla27xx_skip_entry(ent, buf);
+ }
}
return qla27xx_next_entry(ent);
@@ -808,13 +734,15 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
+ ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
+ ulong data_addr = le32_to_cpu(ent->t277.data_addr);
ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
"%s: rdpep [%lx]\n", __func__, *len);
- qla27xx_insert32(ent->t277.wr_cmd_data, buf, len);
- qla27xx_write_reg(reg, ent->t277.cmd_addr, ent->t277.wr_cmd_data, buf);
- qla27xx_read_reg(reg, ent->t277.data_addr, buf, len);
+ qla27xx_insert32(wr_cmd_data, buf, len);
+ qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
+ qla27xx_read_reg(ISPREG(vha), data_addr, buf, len);
return qla27xx_next_entry(ent);
}
@@ -823,12 +751,15 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
- struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
+ ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
+ ulong data_addr = le32_to_cpu(ent->t278.data_addr);
+ ulong wr_data = le32_to_cpu(ent->t278.wr_data);
ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
"%s: wrpep [%lx]\n", __func__, *len);
- qla27xx_write_reg(reg, ent->t278.data_addr, ent->t278.wr_data, buf);
- qla27xx_write_reg(reg, ent->t278.cmd_addr, ent->t278.wr_cmd_data, buf);
+ qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf);
+ qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
return qla27xx_next_entry(ent);
}
@@ -837,8 +768,10 @@ static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
+ ulong type = le32_to_cpu(ent->hdr.type);
+
ql_dbg(ql_dbg_misc, vha, 0xd2ff,
- "%s: type %x [%lx]\n", __func__, ent->hdr.type, *len);
+ "%s: other %lx [%lx]\n", __func__, type, *len);
qla27xx_skip_entry(ent, buf);
return qla27xx_next_entry(ent);
@@ -893,36 +826,27 @@ static void
qla27xx_walk_template(struct scsi_qla_host *vha,
struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
{
- struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
- ulong count = tmp->entry_count;
+ struct qla27xx_fwdt_entry *ent = (void *)tmp +
+ le32_to_cpu(tmp->entry_offset);
+ ulong type;
+ tmp->count = le32_to_cpu(tmp->entry_count);
ql_dbg(ql_dbg_misc, vha, 0xd01a,
- "%s: entry count %lx\n", __func__, count);
- while (count--) {
- ent = qla27xx_find_entry(ent->hdr.type)(vha, ent, buf, len);
+ "%s: entry count %u\n", __func__, tmp->count);
+ while (ent && tmp->count--) {
+ type = le32_to_cpu(ent->hdr.type);
+ ent = qla27xx_find_entry(type)(vha, ent, buf, len);
if (!ent)
break;
}
- if (count)
+ if (tmp->count)
ql_dbg(ql_dbg_misc, vha, 0xd018,
- "%s: entry residual count (%lx)\n", __func__, count);
+ "%s: entry count residual=+%u\n", __func__, tmp->count);
if (ent)
ql_dbg(ql_dbg_misc, vha, 0xd019,
- "%s: missing end entry (%lx)\n", __func__, count);
-
- if (buf && *len != vha->hw->fw_dump_len)
- ql_dbg(ql_dbg_misc, vha, 0xd01b,
- "%s: length=%#lx residual=%+ld\n",
- __func__, *len, vha->hw->fw_dump_len - *len);
-
- if (buf) {
- ql_log(ql_log_warn, vha, 0xd015,
- "Firmware dump saved to temp buffer (%lu/%p)\n",
- vha->host_no, vha->hw->fw_dump);
- qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
- }
+ "%s: missing end entry\n", __func__);
}
static void
@@ -945,8 +869,8 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
}
static void
-qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
- struct scsi_qla_host *vha)
+qla27xx_firmware_info(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_template *tmp)
{
tmp->firmware_version[0] = vha->hw->fw_major_version;
tmp->firmware_version[1] = vha->hw->fw_minor_version;
@@ -963,19 +887,19 @@ ql27xx_edit_template(struct scsi_qla_host *vha,
{
qla27xx_time_stamp(tmp);
qla27xx_driver_info(tmp);
- qla27xx_firmware_info(tmp, vha);
+ qla27xx_firmware_info(vha, tmp);
}
static inline uint32_t
qla27xx_template_checksum(void *p, ulong size)
{
- uint32_t *buf = p;
+ __le32 *buf = p;
uint64_t sum = 0;
size /= sizeof(*buf);
- while (size--)
- sum += *buf++;
+ for ( ; size--; buf++)
+ sum += le32_to_cpu(*buf);
sum = (sum & 0xffffffff) + (sum >> 32);
@@ -991,29 +915,29 @@ qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
static inline int
qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
{
- return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
+ return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
}
-static void
-qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
+static ulong
+qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_template *tmp, void *buf)
{
- struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
- ulong len;
+ ulong len = 0;
if (qla27xx_fwdt_template_valid(tmp)) {
len = tmp->template_size;
- tmp = memcpy(vha->hw->fw_dump, tmp, len);
+ tmp = memcpy(buf, tmp, len);
ql27xx_edit_template(vha, tmp);
- qla27xx_walk_template(vha, tmp, tmp, &len);
- vha->hw->fw_dump_len = len;
- vha->hw->fw_dumped = 1;
+ qla27xx_walk_template(vha, tmp, buf, &len);
}
+
+ return len;
}
ulong
-qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
+qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
{
- struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+ struct qla27xx_fwdt_template *tmp = p;
ulong len = 0;
if (qla27xx_fwdt_template_valid(tmp)) {
@@ -1032,18 +956,6 @@ qla27xx_fwdt_template_size(void *p)
return tmp->template_size;
}
-ulong
-qla27xx_fwdt_template_default_size(void)
-{
- return sizeof(ql27xx_fwdt_default_template);
-}
-
-const void *
-qla27xx_fwdt_template_default(void)
-{
- return ql27xx_fwdt_default_template;
-}
-
int
qla27xx_fwdt_template_valid(void *p)
{
@@ -1051,7 +963,8 @@ qla27xx_fwdt_template_valid(void *p)
if (!qla27xx_verify_template_header(tmp)) {
ql_log(ql_log_warn, NULL, 0xd01c,
- "%s: template type %x\n", __func__, tmp->template_type);
+ "%s: template type %x\n", __func__,
+ le32_to_cpu(tmp->template_type));
return false;
}
@@ -1074,17 +987,41 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
#endif
- if (!vha->hw->fw_dump)
- ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
- else if (!vha->hw->fw_dump_template)
- ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
- else if (vha->hw->fw_dumped)
- ql_log(ql_log_warn, vha, 0xd300,
- "Firmware has been previously dumped (%p),"
- " -- ignoring request\n", vha->hw->fw_dump);
- else {
- QLA_FW_STOPPED(vha->hw);
- qla27xx_execute_fwdt_template(vha);
+ if (!vha->hw->fw_dump) {
+ ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
+ } else if (vha->hw->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xd01f,
+ "-> Firmware already dumped (%p) -- ignoring request\n",
+ vha->hw->fw_dump);
+ } else {
+ struct fwdt *fwdt = vha->hw->fwdt;
+ uint j;
+ ulong len;
+ void *buf = vha->hw->fw_dump;
+
+ for (j = 0; j < 2; j++, fwdt++, buf += len) {
+ ql_log(ql_log_warn, vha, 0xd011,
+ "-> fwdt%u running...\n", j);
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0xd012,
+ "-> fwdt%u no template\n", j);
+ break;
+ }
+ len = qla27xx_execute_fwdt_template(vha,
+ fwdt->template, buf);
+ if (len != fwdt->dump_size) {
+ ql_log(ql_log_warn, vha, 0xd013,
+ "-> fwdt%u fwdump residual=%+ld\n",
+ j, fwdt->dump_size - len);
+ }
+ }
+ vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
+ vha->hw->fw_dumped = 1;
+
+ ql_log(ql_log_warn, vha, 0xd015,
+ "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
+ vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
#ifndef __CHECKER__
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index 5c2c2a8a19c4..d2a0014e8b21 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -11,12 +11,12 @@
#define IOBASE_ADDR offsetof(struct device_reg_24xx, iobase_addr)
struct __packed qla27xx_fwdt_template {
- uint32_t template_type;
- uint32_t entry_offset;
+ __le32 template_type;
+ __le32 entry_offset;
uint32_t template_size;
- uint32_t reserved_1;
+ uint32_t count; /* borrow field for running/residual count */
- uint32_t entry_count;
+ __le32 entry_count;
uint32_t template_version;
uint32_t capture_timestamp;
uint32_t template_checksum;
@@ -65,8 +65,8 @@ struct __packed qla27xx_fwdt_template {
struct __packed qla27xx_fwdt_entry {
struct __packed {
- uint32_t type;
- uint32_t size;
+ __le32 type;
+ __le32 size;
uint32_t reserved_1;
uint8_t capture_flags;
@@ -81,36 +81,36 @@ struct __packed qla27xx_fwdt_entry {
} t255;
struct __packed {
- uint32_t base_addr;
+ __le32 base_addr;
uint8_t reg_width;
- uint16_t reg_count;
+ __le16 reg_count;
uint8_t pci_offset;
} t256;
struct __packed {
- uint32_t base_addr;
- uint32_t write_data;
+ __le32 base_addr;
+ __le32 write_data;
uint8_t pci_offset;
uint8_t reserved[3];
} t257;
struct __packed {
- uint32_t base_addr;
+ __le32 base_addr;
uint8_t reg_width;
- uint16_t reg_count;
+ __le16 reg_count;
uint8_t pci_offset;
uint8_t banksel_offset;
uint8_t reserved[3];
- uint32_t bank;
+ __le32 bank;
} t258;
struct __packed {
- uint32_t base_addr;
- uint32_t write_data;
+ __le32 base_addr;
+ __le32 write_data;
uint8_t reserved[2];
uint8_t pci_offset;
uint8_t banksel_offset;
- uint32_t bank;
+ __le32 bank;
} t259;
struct __packed {
@@ -121,14 +121,14 @@ struct __packed qla27xx_fwdt_entry {
struct __packed {
uint8_t pci_offset;
uint8_t reserved[3];
- uint32_t write_data;
+ __le32 write_data;
} t261;
struct __packed {
uint8_t ram_area;
uint8_t reserved[3];
- uint32_t start_addr;
- uint32_t end_addr;
+ __le32 start_addr;
+ __le32 end_addr;
} t262;
struct __packed {
@@ -158,7 +158,7 @@ struct __packed qla27xx_fwdt_entry {
struct __packed {
uint8_t pci_offset;
uint8_t reserved[3];
- uint32_t data;
+ __le32 data;
} t267;
struct __packed {
@@ -173,23 +173,23 @@ struct __packed qla27xx_fwdt_entry {
} t269;
struct __packed {
- uint32_t addr;
- uint32_t count;
+ __le32 addr;
+ __le32 count;
} t270;
struct __packed {
- uint32_t addr;
- uint32_t data;
+ __le32 addr;
+ __le32 data;
} t271;
struct __packed {
- uint32_t addr;
- uint32_t count;
+ __le32 addr;
+ __le32 count;
} t272;
struct __packed {
- uint32_t addr;
- uint32_t count;
+ __le32 addr;
+ __le32 count;
} t273;
struct __packed {
@@ -199,26 +199,26 @@ struct __packed qla27xx_fwdt_entry {
} t274;
struct __packed {
- uint32_t length;
+ __le32 length;
uint8_t buffer[];
} t275;
struct __packed {
- uint32_t cond1;
- uint32_t cond2;
+ __le32 cond1;
+ __le32 cond2;
} t276;
struct __packed {
- uint32_t cmd_addr;
- uint32_t wr_cmd_data;
- uint32_t data_addr;
+ __le32 cmd_addr;
+ __le32 wr_cmd_data;
+ __le32 data_addr;
} t277;
struct __packed {
- uint32_t cmd_addr;
- uint32_t wr_cmd_data;
- uint32_t data_addr;
- uint32_t wr_data;
+ __le32 cmd_addr;
+ __le32 wr_cmd_data;
+ __le32 data_addr;
+ __le32 wr_data;
} t278;
};
};
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 0690dac24081..cd6bdf71e533 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.14-k"
+#define QLA2XXX_VERSION "10.01.00.16-k"
#define QLA_DRIVER_MAJOR_VER 10
-#define QLA_DRIVER_MINOR_VER 0
+#define QLA_DRIVER_MINOR_VER 1
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8a3075d17c63..ec9f1996b417 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -24,22 +24,16 @@
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
-#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -267,25 +261,17 @@ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
- bool released = false;
- unsigned long flags;
cmd->cmd_in_wq = 0;
WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
- spin_lock_irqsave(&cmd->cmd_lock, flags);
+ /* To do: protect all tgt_counters manipulations with proper locking. */
cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
cmd->trc_flags |= TRC_CMD_FREE;
cmd->cmd_sent_to_fw = 0;
- if (cmd->released)
- released = true;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- if (released)
- qlt_free_cmd(cmd);
- else
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
}
/*
@@ -326,7 +312,6 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd;
- unsigned long flags;
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
@@ -336,14 +321,10 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
}
cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if (cmd->cmd_sent_to_fw) {
- cmd->released = 1;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- } else {
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_free_cmd(cmd);
- }
+ if (WARN_ON(cmd->cmd_sent_to_fw))
+ return;
+
+ qlt_free_cmd(cmd);
}
static void tcm_qla2xxx_release_session(struct kref *kref)
@@ -359,7 +340,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess)
if (!sess)
return;
- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
}
@@ -374,8 +354,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
target_sess_cmd_list_set_waiting(se_sess);
- tcm_qla2xxx_put_sess(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ tcm_qla2xxx_put_sess(sess);
}
static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
@@ -399,6 +380,8 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
cmd->se_cmd.transport_state,
cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
return 0;
}
cmd->trc_flags |= TRC_XFR_RDY;
@@ -488,32 +471,18 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
- unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status.
*/
cmd->cmd_in_wq = 0;
-
- spin_lock_irqsave(&cmd->cmd_lock, flags);
cmd->cmd_sent_to_fw = 0;
-
- if (cmd->released) {
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_free_cmd(cmd);
- return;
- }
-
- cmd->data_work = 1;
if (cmd->aborted) {
- cmd->data_work_free = 1;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
- tcm_qla2xxx_free_cmd(cmd);
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
return;
}
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
@@ -829,7 +798,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
{
- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
target_sess_cmd_list_set_waiting(sess->se_sess);
}
@@ -1489,7 +1457,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
*/
tpg = lport->tpg_1;
if (!tpg) {
- pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+ pr_err("Unable to locate struct tcm_qla2xxx_lport->tpg_1\n");
return -EINVAL;
}
/*
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 6e4f4931ae17..8c674eca09f1 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -5930,7 +5930,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
val = rd_nvram_byte(ha, sec_addr);
if (val & BIT_7)
ddb_index[1] = (val & 0x7f);
-
+ goto exit_boot_info;
} else if (is_qla80XX(ha)) {
buf = dma_alloc_coherent(&ha->pdev->dev, size,
&buf_dma, GFP_KERNEL);
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 8b471a925b43..136681ad18a5 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -139,7 +139,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
} else { /* out */
#if QL_TURBO_PDMA
rtrc(4)
- if (reqlen >= 128 && inb(qbase + 8) & 0x10) { /* empty */
+ if (reqlen >= 128 && inb(qbase + 8) & 0x10) { /* empty */
outsl(qbase + 4, request, 32);
reqlen -= 128;
request += 128;
@@ -240,7 +240,7 @@ static void ql_icmd(struct scsi_cmnd *cmd)
outb(0x40 | qlcfg8 | priv->qinitid, qbase + 8);
outb(qlcfg7, qbase + 7);
outb(qlcfg6, qbase + 6);
- /**/ outb(qlcfg5, qbase + 5); /* select timer */
+ outb(qlcfg5, qbase + 5); /* select timer */
outb(qlcfg9 & 7, qbase + 9); /* prescaler */
/* outb(0x99, qbase + 5); */
outb(scmd_id(cmd), qbase + 4);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c4cbfd07b916..a08ff3bd6310 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -238,6 +238,7 @@ static struct {
{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 5a58cbf3a75d..c14006ac98f9 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"NETAPP", "INF-01-00", "rdac", },
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
+ {"LENOVO", "DE_Series", "rdac", },
{NULL, NULL, NULL },
};
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1b8378f36139..8e9680572b9f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2393,7 +2393,6 @@ out_put_autopm_host:
scsi_autopm_put_host(shost);
return error;
}
-EXPORT_SYMBOL(scsi_ioctl_reset);
bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 601b9f1de267..0916bd6d22b0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -141,8 +141,6 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
- struct scsi_device *sdev = cmd->device;
-
if (cmd->request->rq_flags & RQF_DONTPREP) {
cmd->request->rq_flags &= ~RQF_DONTPREP;
scsi_mq_uninit_cmd(cmd);
@@ -150,7 +148,6 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
WARN_ON_ONCE(true);
}
blk_mq_requeue_request(cmd->request, true);
- put_device(&sdev->sdev_gendev);
}
/**
@@ -189,19 +186,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
*/
cmd->result = 0;
- /*
- * Before a SCSI command is dispatched,
- * get_device(&sdev->sdev_gendev) is called and the host,
- * target and device busy counters are increased. Since
- * requeuing a request causes these actions to be repeated and
- * since scsi_device_unbusy() has already been called,
- * put_device(&device->sdev_gendev) must still be called. Call
- * put_device() after blk_mq_requeue_request() to avoid that
- * removal of the SCSI device can start before requeueing has
- * happened.
- */
blk_mq_requeue_request(cmd->request, true);
- put_device(&device->sdev_gendev);
}
/*
@@ -619,7 +604,6 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
blk_mq_run_hw_queues(q, true);
percpu_ref_put(&q->q_usage_counter);
- put_device(&sdev->sdev_gendev);
return false;
}
@@ -1613,7 +1597,6 @@ static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
struct scsi_device *sdev = q->queuedata;
atomic_dec(&sdev->device_busy);
- put_device(&sdev->sdev_gendev);
}
static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
@@ -1621,16 +1604,9 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
- if (!get_device(&sdev->sdev_gendev))
- goto out;
- if (!scsi_dev_queue_ready(q, sdev))
- goto out_put_device;
-
- return true;
+ if (scsi_dev_queue_ready(q, sdev))
+ return true;
-out_put_device:
- put_device(&sdev->sdev_gendev);
-out:
if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
return false;
@@ -1706,8 +1682,12 @@ out_put_budget:
ret = BLK_STS_DEV_RESOURCE;
break;
default:
+ if (unlikely(!scsi_device_online(sdev)))
+ scsi_req(req)->result = DID_NO_CONNECT << 16;
+ else
+ scsi_req(req)->result = DID_ERROR << 16;
/*
- * Make sure to release all allocated ressources when
+ * Make sure to release all allocated resources when
* we hit an error, as we will never see this command
* again.
*/
@@ -1766,7 +1746,7 @@ static int scsi_map_queues(struct blk_mq_tag_set *set)
if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
- return blk_mq_map_queues(&set->map[0]);
+ return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
}
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 53380e07b40e..058079f915f1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1129,7 +1129,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* that no LUN is present, so don't add sdev in these cases.
* Two specific examples are:
* 1) NetApp targets: return PQ=1, PDT=0x1f
- * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
+ * 2) IBM/2145 targets: return PQ=1, PDT=0
+ * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
* in the UFI 1.0 spec (we cannot rely on reserved bits).
*
* References:
@@ -1143,8 +1144,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* PDT=00h Direct-access device (floppy)
* PDT=1Fh none (no FDD connected to the requested logical unit)
*/
- if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
- (result[0] & 0x1f) == 0x1f &&
+ if (((result[0] >> 5) == 1 ||
+ (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
!scsi_is_wlun(lun)) {
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: peripheral device type"
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index d7035270d274..d9e3cf3721f6 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -147,6 +147,7 @@ static const struct {
{ FCH_EVT_PORT_OFFLINE, "port_offline" },
{ FCH_EVT_PORT_FABRIC, "port_fabric" },
{ FCH_EVT_LINK_UNKNOWN, "link_unknown" },
+ { FCH_EVT_LINK_FPIN, "link_FPIN" },
{ FCH_EVT_VENDOR_UNIQUE, "vendor_unique" },
};
fc_enum_name_search(host_event_code, fc_host_event_code,
@@ -295,6 +296,9 @@ static const struct {
{ FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
{ FC_PORT_ROLE_IP_PORT, "IP Port" },
{ FC_PORT_ROLE_FCP_DUMMY_INITIATOR, "FCP Dummy Initiator" },
+ { FC_PORT_ROLE_NVME_INITIATOR, "NVMe Initiator" },
+ { FC_PORT_ROLE_NVME_TARGET, "NVMe Target" },
+ { FC_PORT_ROLE_NVME_DISCOVERY, "NVMe Discovery" },
};
fc_bitfield_name_search(port_roles, fc_port_role_names)
@@ -523,20 +527,23 @@ fc_get_event_number(void)
}
EXPORT_SYMBOL(fc_get_event_number);
-
/**
- * fc_host_post_event - called to post an even on an fc_host.
+ * fc_host_post_fc_event - routine to do the work of posting an event
+ * on an fc_host.
* @shost: host the event occurred on
* @event_number: fc event number obtained from get_fc_event_number()
* @event_code: fc_host event being posted
- * @event_data: 32bits of data for the event being posted
+ * @data_len: amount, in bytes, of event data
+ * @data_buf: pointer to event data
+ * @vendor_id: value for Vendor id
*
* Notes:
* This routine assumes no locks are held on entry.
*/
void
-fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
- enum fc_host_event_code event_code, u32 event_data)
+fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
+ enum fc_host_event_code event_code,
+ u32 data_len, char *data_buf, u64 vendor_id)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
@@ -545,12 +552,15 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
u32 len;
int err;
+ if (!data_buf || data_len < 4)
+ data_len = 0;
+
if (!scsi_nl_sock) {
err = -ENOENT;
goto send_fail;
}
- len = FC_NL_MSGALIGN(sizeof(*event));
+ len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
skb = nlmsg_new(len, GFP_KERNEL);
if (!skb) {
@@ -568,12 +578,13 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
FC_NL_ASYNC_EVENT, len);
event->seconds = ktime_get_real_seconds();
- event->vendor_id = 0;
+ event->vendor_id = vendor_id;
event->host_no = shost->host_no;
- event->event_datalen = sizeof(u32); /* bytes */
+ event->event_datalen = data_len; /* bytes */
event->event_num = event_number;
event->event_code = event_code;
- event->event_data = event_data;
+ if (data_len)
+ memcpy(&event->event_data, data_buf, data_len);
nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
GFP_KERNEL);
@@ -586,14 +597,35 @@ send_fail:
printk(KERN_WARNING
"%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
__func__, shost->host_no,
- (name) ? name : "<unknown>", event_data, err);
+ (name) ? name : "<unknown>",
+ (data_len) ? *((u32 *)data_buf) : 0xFFFFFFFF, err);
return;
}
+EXPORT_SYMBOL(fc_host_post_fc_event);
+
+/**
+ * fc_host_post_event - called to post an even on an fc_host.
+ * @shost: host the event occurred on
+ * @event_number: fc event number obtained from get_fc_event_number()
+ * @event_code: fc_host event being posted
+ * @event_data: 32bits of data for the event being posted
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+void
+fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
+ enum fc_host_event_code event_code, u32 event_data)
+{
+ fc_host_post_fc_event(shost, event_number, event_code,
+ (u32)sizeof(u32), (char *)&event_data, 0);
+}
EXPORT_SYMBOL(fc_host_post_event);
/**
- * fc_host_post_vendor_event - called to post a vendor unique event on an fc_host
+ * fc_host_post_vendor_event - called to post a vendor unique event
+ * on an fc_host
* @shost: host the event occurred on
* @event_number: fc event number obtained from get_fc_event_number()
* @data_len: amount, in bytes, of vendor unique data
@@ -607,56 +639,27 @@ void
fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
u32 data_len, char * data_buf, u64 vendor_id)
{
- struct sk_buff *skb;
- struct nlmsghdr *nlh;
- struct fc_nl_event *event;
- u32 len;
- int err;
-
- if (!scsi_nl_sock) {
- err = -ENOENT;
- goto send_vendor_fail;
- }
-
- len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
-
- skb = nlmsg_new(len, GFP_KERNEL);
- if (!skb) {
- err = -ENOBUFS;
- goto send_vendor_fail;
- }
-
- nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
- if (!nlh) {
- err = -ENOBUFS;
- goto send_vendor_fail_skb;
- }
- event = nlmsg_data(nlh);
-
- INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
- FC_NL_ASYNC_EVENT, len);
- event->seconds = ktime_get_real_seconds();
- event->vendor_id = vendor_id;
- event->host_no = shost->host_no;
- event->event_datalen = data_len; /* bytes */
- event->event_num = event_number;
- event->event_code = FCH_EVT_VENDOR_UNIQUE;
- memcpy(&event->event_data, data_buf, data_len);
-
- nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
- GFP_KERNEL);
- return;
-
-send_vendor_fail_skb:
- kfree_skb(skb);
-send_vendor_fail:
- printk(KERN_WARNING
- "%s: Dropped Event : host %d vendor_unique - err %d\n",
- __func__, shost->host_no, err);
- return;
+ fc_host_post_fc_event(shost, event_number, FCH_EVT_VENDOR_UNIQUE,
+ data_len, data_buf, vendor_id);
}
EXPORT_SYMBOL(fc_host_post_vendor_event);
+/**
+ * fc_host_rcv_fpin - routine to process a received FPIN.
+ * @shost: host the FPIN was received on
+ * @fpin_len: length of FPIN payload, in bytes
+ * @fpin_buf: pointer to FPIN payload
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+void
+fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf)
+{
+ fc_host_post_fc_event(shost, fc_get_event_number(),
+ FCH_EVT_LINK_FPIN, fpin_len, fpin_buf, 0);
+}
+EXPORT_SYMBOL(fc_host_fpin_rcv);
static __init int fc_transport_init(void)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2b2bc4b49d78..ebc80354714c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2256,22 +2256,6 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10
-/*
- * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
- * and the reported logical block size is bigger than 512 bytes. Note
- * that last_sector is a u64 and therefore logical_to_sectors() is not
- * applicable.
- */
-static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
-{
- u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
-
- if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
- return false;
-
- return true;
-}
-
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -2337,14 +2321,6 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
return -ENODEV;
}
- if (!sd_addressable_capacity(lba, sector_size)) {
- sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
- "kernel compiled with support for large block "
- "devices.\n");
- sdkp->capacity = 0;
- return -EOVERFLOW;
- }
-
/* Logical blocks per physical block exponent */
sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
@@ -2426,14 +2402,6 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
return sector_size;
}
- if (!sd_addressable_capacity(lba, sector_size)) {
- sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
- "kernel compiled with support for large block "
- "devices.\n");
- sdkp->capacity = 0;
- return -EOVERFLOW;
- }
-
sdkp->capacity = lba + 1;
sdkp->physical_block_size = sector_size;
return sector_size;
@@ -3325,6 +3293,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
gd->events |= DISK_EVENT_MEDIA_CHANGE;
+ gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
}
blk_pm_runtime_init(sdp->request_queue, dev);
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
index a03a6edb0060..28985e508b5c 100644
--- a/drivers/scsi/smartpqi/Makefile
+++ b/drivers/scsi/smartpqi/Makefile
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index af962368818b..e8e768849c70 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,18 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016-2017 Microsemi Corporation
+ * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ * Questions/Comments/Bugfixes to storagedev@microchip.com
*
*/
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 75ec43aa8df3..c26cac819f9e 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016-2017 Microsemi Corporation
+ * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ * Questions/Comments/Bugfixes to storagedev@microchip.com
*
*/
@@ -40,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.4-070"
+#define DRIVER_VERSION "1.2.6-015"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 4
-#define DRIVER_REVISION 70
+#define DRIVER_RELEASE 6
+#define DRIVER_REVISION 15
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -5660,9 +5653,11 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
return rc;
}
+/* Performs a reset at the LUN level. */
+
#define PQI_LUN_RESET_RETRIES 3
#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
-/* Performs a reset at the LUN level. */
+#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
@@ -5673,12 +5668,12 @@ static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
for (retries = 0;;) {
rc = pqi_lun_reset(ctrl_info, device);
- if (rc != -EAGAIN ||
- ++retries > PQI_LUN_RESET_RETRIES)
+ if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
- timeout_secs = rc ? PQI_LUN_RESET_TIMEOUT_SECS : NO_TIMEOUT;
+
+ timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
@@ -5707,6 +5702,7 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
pqi_device_reset_done(device);
mutex_unlock(&ctrl_info->lun_reset_mutex);
+
return rc;
}
@@ -5737,6 +5733,7 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
pqi_wait_until_ofa_finished(ctrl_info);
rc = pqi_device_reset(ctrl_info, device);
+
out:
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
@@ -5795,7 +5792,7 @@ static int pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- return blk_mq_pci_map_queues(&shost->tag_set.map[0],
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
ctrl_info->pci_dev, 0);
}
@@ -7948,6 +7945,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1104)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1105)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1106)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1107)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
},
{
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 0e4ef215115f..5cca1b9ef1f1 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016-2017 Microsemi Corporation
+ * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ * Questions/Comments/Bugfixes to storagedev@microchip.com
*
*/
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index dcd11c6418cc..f0d6e88ba2c1 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016-2017 Microsemi Corporation
+ * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ * Questions/Comments/Bugfixes to storagedev@microchip.com
*
*/
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index d018cb9c3f82..86b0e484d921 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,18 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016-2017 Microsemi Corporation
+ * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ * Questions/Comments/Bugfixes to storagedev@microchip.com
*
*/
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 039c27c2d7b3..c3f443d5aea8 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -716,6 +716,7 @@ static int sr_probe(struct device *dev)
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
+ disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 19c022e66d63..3c6a18ad9a87 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4922,7 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
/* Try to fault in all of the necessary pages */
/* rw==READ means read from drive, write into memory area */
- res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages);
+ res = get_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
+ pages);
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 84380bae20f1..8472de1007ff 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -385,7 +385,7 @@ enum storvsc_request_type {
* This is the end of Protocol specific defines.
*/
-static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static int storvsc_ringbuffer_size = (128 * 1024);
static u32 max_outstanding_req_per_channel;
static int storvsc_vcpus_per_sub_channel = 4;
@@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
{
struct device *dev = &device->device;
struct storvsc_device *stor_device;
- int num_cpus = num_online_cpus();
int num_sc;
struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
- num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+ /*
+ * If the number of CPUs is artificially restricted, such as
+ * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+ * sub-channels >= the number of CPUs. These sub-channels
+ * should not be created. The primary channel is already created
+ * and assigned to one CPU, so check against # CPUs - 1.
+ */
+ num_sc = min((int)(num_online_cpus() - 1), max_chns);
+ if (!num_sc)
+ return;
+
stor_device = get_out_stor_device(device);
if (!stor_device)
return;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 0a2a54517b15..054fb0599263 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3072,6 +3072,7 @@ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb
sym_print_addr(cp->cmd, "%s\n",
s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
}
+ /* fall through */
default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
sym_complete_error (np, cp);
break;
@@ -4632,6 +4633,7 @@ static void sym_int_sir(struct sym_hcb *np)
* Negotiation failed.
* Target does not want answer message.
*/
+ /* fall through */
case SIR_NEGO_PROTO:
sym_nego_default(np, tp, cp);
goto out;
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index 5662fbb3ff60..0d37b4f07b5e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -708,6 +708,7 @@ static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram)
data, len);
if (!x)
break;
+ /* fall through */
default:
x = sym_read_T93C46_nvram(np, nvram);
break;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 6db37cf306b0..0b845ab7c3bf 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -99,6 +99,7 @@ config SCSI_UFS_DWC_TC_PLATFORM
config SCSI_UFS_QCOM
tristate "QCOM specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
+ select RESET_CONTROLLER
help
This selects the QCOM specific additions to UFSHCD platform driver.
UFS host on QCOM needs some vendor specific configuration before
@@ -108,6 +109,20 @@ config SCSI_UFS_QCOM
Select this if you have UFS controller on QCOM chipset.
If unsure, say N.
+config SCSI_UFS_MEDIATEK
+ tristate "Mediatek specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
+ select PHY_MTK_UFS
+ help
+ This selects the Mediatek specific additions to UFSHCD platform driver.
+ UFS host on Mediatek needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on Mediatek chipset.
+
+ If unsure, say N.
+
config SCSI_UFS_HISI
tristate "Hisilicon specific hooks to UFS controller platform driver"
depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index a3bd70c3652c..2a9097939bcb 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -10,3 +10,4 @@ ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
+obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 4a37b4f57164..86dbb723f3ac 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -17,7 +17,8 @@
#include "ufshcd-pltfrm.h"
-#define CDNS_UFS_REG_HCLKDIV 0xFC
+#define CDNS_UFS_REG_HCLKDIV 0xFC
+#define CDNS_UFS_REG_PHY_XCFGD1 0x113C
/**
* Sets HCLKDIV register value based on the core_clk
@@ -77,11 +78,66 @@ static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on,
return cdns_ufs_set_hclkdiv(hba);
}
-static struct ufs_hba_variant_ops cdns_pltfm_hba_vops = {
+/**
+ * cdns_ufs_init - performs additional ufs initialization
+ * @hba: host controller instance
+ *
+ * Returns status of initialization
+ */
+static int cdns_ufs_init(struct ufs_hba *hba)
+{
+ int status = 0;
+
+ if (hba->vops && hba->vops->phy_initialization)
+ status = hba->vops->phy_initialization(hba);
+
+ return status;
+}
+
+/**
+ * cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization
+ * @hba: host controller instance
+ *
+ * Always returns 0
+ */
+static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
+{
+ u32 data;
+
+ /* Increase RX_Advanced_Min_ActivateTime_Capability */
+ data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1);
+ data |= BIT(24);
+ ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1);
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
+ .name = "cdns-ufs-pltfm",
+ .setup_clocks = cdns_ufs_setup_clocks,
+};
+
+static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
.name = "cdns-ufs-pltfm",
+ .init = cdns_ufs_init,
.setup_clocks = cdns_ufs_setup_clocks,
+ .phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
+};
+
+static const struct of_device_id cdns_ufs_of_match[] = {
+ {
+ .compatible = "cdns,ufshc",
+ .data = &cdns_ufs_pltfm_hba_vops,
+ },
+ {
+ .compatible = "cdns,ufshc-m31-16nm",
+ .data = &cdns_ufs_m31_16nm_pltfm_hba_vops,
+ },
+ { },
};
+MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
+
/**
* cdns_ufs_pltfrm_probe - probe routine of the driver
* @pdev: pointer to platform device handle
@@ -91,10 +147,15 @@ static struct ufs_hba_variant_ops cdns_pltfm_hba_vops = {
static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
{
int err;
+ const struct of_device_id *of_id;
+ struct ufs_hba_variant_ops *vops;
struct device *dev = &pdev->dev;
+ of_id = of_match_node(cdns_ufs_of_match, dev->of_node);
+ vops = (struct ufs_hba_variant_ops *)of_id->data;
+
/* Perform generic probe */
- err = ufshcd_pltfrm_init(pdev, &cdns_pltfm_hba_vops);
+ err = ufshcd_pltfrm_init(pdev, vops);
if (err)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
@@ -115,13 +176,6 @@ static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id cdns_ufs_of_match[] = {
- { .compatible = "cdns,ufshc" },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
-
static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
.resume = ufshcd_pltfrm_resume,
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 0e855b5afe82..7aed0a1a794e 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -293,108 +293,7 @@ static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
return err;
}
-struct ufs_hisi_dev_params {
- u32 pwm_rx_gear; /* pwm rx gear to work in */
- u32 pwm_tx_gear; /* pwm tx gear to work in */
- u32 hs_rx_gear; /* hs rx gear to work in */
- u32 hs_tx_gear; /* hs tx gear to work in */
- u32 rx_lanes; /* number of rx lanes */
- u32 tx_lanes; /* number of tx lanes */
- u32 rx_pwr_pwm; /* rx pwm working pwr */
- u32 tx_pwr_pwm; /* tx pwm working pwr */
- u32 rx_pwr_hs; /* rx hs working pwr */
- u32 tx_pwr_hs; /* tx hs working pwr */
- u32 hs_rate; /* rate A/B to work in HS */
- u32 desired_working_mode;
-};
-
-static int ufs_hisi_get_pwr_dev_param(
- struct ufs_hisi_dev_params *hisi_param,
- struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr)
-{
- int min_hisi_gear;
- int min_dev_gear;
- bool is_dev_sup_hs = false;
- bool is_hisi_max_hs = false;
-
- if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE)
- is_dev_sup_hs = true;
-
- if (hisi_param->desired_working_mode == FAST) {
- is_hisi_max_hs = true;
- min_hisi_gear = min_t(u32, hisi_param->hs_rx_gear,
- hisi_param->hs_tx_gear);
- } else {
- min_hisi_gear = min_t(u32, hisi_param->pwm_rx_gear,
- hisi_param->pwm_tx_gear);
- }
-
- /*
- * device doesn't support HS but
- * hisi_param->desired_working_mode is HS,
- * thus device and hisi_param don't agree
- */
- if (!is_dev_sup_hs && is_hisi_max_hs) {
- pr_err("%s: device not support HS\n", __func__);
- return -ENOTSUPP;
- } else if (is_dev_sup_hs && is_hisi_max_hs) {
- /*
- * since device supports HS, it supports FAST_MODE.
- * since hisi_param->desired_working_mode is also HS
- * then final decision (FAST/FASTAUTO) is done according
- * to hisi_params as it is the restricting factor
- */
- agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
- hisi_param->rx_pwr_hs;
- } else {
- /*
- * here hisi_param->desired_working_mode is PWM.
- * it doesn't matter whether device supports HS or PWM,
- * in both cases hisi_param->desired_working_mode will
- * determine the mode
- */
- agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
- hisi_param->rx_pwr_pwm;
- }
-
- /*
- * we would like tx to work in the minimum number of lanes
- * between device capability and vendor preferences.
- * the same decision will be made for rx
- */
- agreed_pwr->lane_tx =
- min_t(u32, dev_max->lane_tx, hisi_param->tx_lanes);
- agreed_pwr->lane_rx =
- min_t(u32, dev_max->lane_rx, hisi_param->rx_lanes);
-
- /* device maximum gear is the minimum between device rx and tx gears */
- min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
-
- /*
- * if both device capabilities and vendor pre-defined preferences are
- * both HS or both PWM then set the minimum gear to be the chosen
- * working gear.
- * if one is PWM and one is HS then the one that is PWM get to decide
- * what is the gear, as it is the one that also decided previously what
- * pwr the device will be configured to.
- */
- if ((is_dev_sup_hs && is_hisi_max_hs) ||
- (!is_dev_sup_hs && !is_hisi_max_hs))
- agreed_pwr->gear_rx = agreed_pwr->gear_tx =
- min_t(u32, min_dev_gear, min_hisi_gear);
- else
- agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hisi_gear;
-
- agreed_pwr->hs_rate = hisi_param->hs_rate;
-
- pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, rate = %d\n",
- agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx,
- agreed_pwr->hs_rate);
- return 0;
-}
-
-static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
+static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
{
hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
@@ -477,7 +376,7 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
- struct ufs_hisi_dev_params ufs_hisi_cap;
+ struct ufs_dev_params ufs_hisi_cap;
int ret = 0;
if (!dev_req_params) {
@@ -490,8 +389,8 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
ufs_hisi_set_dev_cap(&ufs_hisi_cap);
- ret = ufs_hisi_get_pwr_dev_param(
- &ufs_hisi_cap, dev_max_params, dev_req_params);
+ ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
+ dev_max_params, dev_req_params);
if (ret) {
dev_err(hba->dev,
"%s: failed to determine capabilities\n", __func__);
@@ -587,6 +486,10 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
ufshcd_set_variant(hba, host);
host->rst = devm_reset_control_get(dev, "rst");
+ if (IS_ERR(host->rst)) {
+ dev_err(dev, "%s: failed to get reset control\n", __func__);
+ return PTR_ERR(host->rst);
+ }
ufs_hisi_set_pm_lvl(hba);
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
new file mode 100644
index 000000000000..0f6ff33ce52e
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Authors:
+ * Stanley Chu <stanley.chu@mediatek.com>
+ * Peter Wang <peter.wang@mediatek.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#include "ufshcd.h"
+#include "ufshcd-pltfrm.h"
+#include "unipro.h"
+#include "ufs-mediatek.h"
+
+static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
+{
+ u32 tmp;
+
+ if (enable) {
+ ufshcd_dme_get(hba,
+ UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+ tmp = tmp |
+ (1 << RX_SYMBOL_CLK_GATE_EN) |
+ (1 << SYS_CLK_GATE_EN) |
+ (1 << TX_CLK_GATE_EN);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+ ufshcd_dme_get(hba,
+ UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
+ tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
+ } else {
+ ufshcd_dme_get(hba,
+ UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+ tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
+ (1 << SYS_CLK_GATE_EN) |
+ (1 << TX_CLK_GATE_EN));
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+ ufshcd_dme_get(hba,
+ UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
+ tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
+ }
+}
+
+static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct device *dev = hba->dev;
+ struct device_node *np = dev->of_node;
+ int err = 0;
+
+ host->mphy = devm_of_phy_get_by_index(dev, np, 0);
+
+ if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
+ /*
+ * UFS driver might be probed before the phy driver does.
+ * In that case we would like to return EPROBE_DEFER code.
+ */
+ err = -EPROBE_DEFER;
+ dev_info(dev,
+ "%s: required phy hasn't probed yet. err = %d\n",
+ __func__, err);
+ } else if (IS_ERR(host->mphy)) {
+ err = PTR_ERR(host->mphy);
+ dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
+ }
+
+ if (err)
+ host->mphy = NULL;
+
+ return err;
+}
+
+/**
+ * ufs_mtk_setup_clocks - enables/disable clocks
+ * @hba: host controller instance
+ * @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
+ *
+ * Returns 0 on success, non-zero on failure.
+ */
+static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ int ret = -EINVAL;
+
+ /*
+ * In case ufs_mtk_init() is not yet done, simply ignore.
+ * This ufs_mtk_setup_clocks() shall be called from
+ * ufs_mtk_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (!on)
+ ret = phy_power_off(host->mphy);
+ break;
+ case POST_CHANGE:
+ if (on)
+ ret = phy_power_on(host->mphy);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * ufs_mtk_init - find other essential mmio bases
+ * @hba: host controller instance
+ *
+ * Binds PHY with controller and powers up PHY enabling clocks
+ * and regulators.
+ *
+ * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * on phy power up failure and returns zero on success.
+ */
+static int ufs_mtk_init(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host;
+ struct device *dev = hba->dev;
+ int err = 0;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ err = -ENOMEM;
+ dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
+ goto out;
+ }
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ err = ufs_mtk_bind_mphy(hba);
+ if (err)
+ goto out_variant_clear;
+
+ /*
+ * ufshcd_vops_init() is invoked after
+ * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
+ * phy clock setup is skipped.
+ *
+ * Enable phy clocks specifically here.
+ */
+ ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
+
+ goto out;
+
+out_variant_clear:
+ ufshcd_set_variant(hba, NULL);
+out:
+ return err;
+}
+
+static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_dev_params host_cap;
+ int ret;
+
+ host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
+ host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
+ host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
+ host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
+ host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
+ host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
+ host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
+ host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
+ host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
+ host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
+ host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
+ host_cap.desired_working_mode =
+ UFS_MTK_LIMIT_DESIRED_MODE;
+
+ ret = ufshcd_get_pwr_dev_param(&host_cap,
+ dev_max_params,
+ dev_req_params);
+ if (ret) {
+ pr_info("%s: failed to determine capabilities\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status stage,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ int ret = 0;
+
+ switch (stage) {
+ case PRE_CHANGE:
+ ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
+ dev_req_params);
+ break;
+ case POST_CHANGE:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_pre_link(struct ufs_hba *hba)
+{
+ int ret;
+ u32 tmp;
+
+ /* disable deep stall */
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+ if (ret)
+ return ret;
+
+ tmp &= ~(1 << 6);
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+ return ret;
+}
+
+static int ufs_mtk_post_link(struct ufs_hba *hba)
+{
+ /* disable device LCC */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+
+ /* enable unipro clock gating feature */
+ ufs_mtk_cfg_unipro_cg(hba, true);
+
+ return 0;
+}
+
+static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status stage)
+{
+ int ret = 0;
+
+ switch (stage) {
+ case PRE_CHANGE:
+ ret = ufs_mtk_pre_link(hba);
+ break;
+ case POST_CHANGE:
+ ret = ufs_mtk_post_link(hba);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (ufshcd_is_link_hibern8(hba))
+ phy_power_off(host->mphy);
+
+ return 0;
+}
+
+static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (ufshcd_is_link_hibern8(hba))
+ phy_power_on(host->mphy);
+
+ return 0;
+}
+
+/**
+ * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
+ *
+ * The variant operations configure the necessary controller and PHY
+ * handshake during initialization.
+ */
+static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
+ .name = "mediatek.ufshci",
+ .init = ufs_mtk_init,
+ .setup_clocks = ufs_mtk_setup_clocks,
+ .link_startup_notify = ufs_mtk_link_startup_notify,
+ .pwr_change_notify = ufs_mtk_pwr_change_notify,
+ .suspend = ufs_mtk_suspend,
+ .resume = ufs_mtk_resume,
+};
+
+/**
+ * ufs_mtk_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int ufs_mtk_probe(struct platform_device *pdev)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+
+ /* perform generic probe */
+ err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
+ if (err)
+ dev_info(dev, "probe failed %d\n", err);
+
+ return err;
+}
+
+/**
+ * ufs_mtk_remove - set driver_data of the device to NULL
+ * @pdev: pointer to platform device handle
+ *
+ * Always return 0
+ */
+static int ufs_mtk_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct of_device_id ufs_mtk_of_match[] = {
+ { .compatible = "mediatek,mt8183-ufshci"},
+ {},
+};
+
+static const struct dev_pm_ops ufs_mtk_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver ufs_mtk_pltform = {
+ .probe = ufs_mtk_probe,
+ .remove = ufs_mtk_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-mtk",
+ .pm = &ufs_mtk_pm_ops,
+ .of_match_table = ufs_mtk_of_match,
+ },
+};
+
+MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
+MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek UFS Host Driver");
+MODULE_LICENSE("GPL v2");
+
+module_platform_driver(ufs_mtk_pltform);
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
new file mode 100644
index 000000000000..19f8c42fe06f
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+
+#ifndef _UFS_MEDIATEK_H
+#define _UFS_MEDIATEK_H
+
+/*
+ * Vendor specific pre-defined parameters
+ */
+#define UFS_MTK_LIMIT_NUM_LANES_RX 1
+#define UFS_MTK_LIMIT_NUM_LANES_TX 1
+#define UFS_MTK_LIMIT_HSGEAR_RX UFS_HS_G3
+#define UFS_MTK_LIMIT_HSGEAR_TX UFS_HS_G3
+#define UFS_MTK_LIMIT_PWMGEAR_RX UFS_PWM_G4
+#define UFS_MTK_LIMIT_PWMGEAR_TX UFS_PWM_G4
+#define UFS_MTK_LIMIT_RX_PWR_PWM SLOW_MODE
+#define UFS_MTK_LIMIT_TX_PWR_PWM SLOW_MODE
+#define UFS_MTK_LIMIT_RX_PWR_HS FAST_MODE
+#define UFS_MTK_LIMIT_TX_PWR_HS FAST_MODE
+#define UFS_MTK_LIMIT_HS_RATE PA_HS_MODE_B
+#define UFS_MTK_LIMIT_DESIRED_MODE UFS_HS_MODE
+
+/*
+ * Other attributes
+ */
+#define VS_DEBUGCLOCKENABLE 0xD0A1
+#define VS_SAVEPOWERCONTROL 0xD0A6
+#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8
+
+/*
+ * VS_DEBUGCLOCKENABLE
+ */
+enum {
+ TX_SYMBOL_CLK_REQ_FORCE = 5,
+};
+
+/*
+ * VS_SAVEPOWERCONTROL
+ */
+enum {
+ RX_SYMBOL_CLK_GATE_EN = 0,
+ SYS_CLK_GATE_EN = 2,
+ TX_CLK_GATE_EN = 3,
+};
+
+struct ufs_mtk_host {
+ struct ufs_hba *hba;
+ struct phy *mphy;
+};
+
+#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aeadb14aae1..ea7219407309 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/reset-controller.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
@@ -49,6 +50,11 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles);
+static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
+{
+ return container_of(rcd, struct ufs_qcom_host, rcdev);
+}
+
static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
const char *prefix, void *priv)
{
@@ -255,11 +261,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
if (is_rate_B)
phy_set_mode(phy, PHY_MODE_UFS_HS_B);
- /* Assert PHY reset and apply PHY calibration values */
- ufs_qcom_assert_reset(hba);
- /* provide 1ms delay to let the reset pulse propagate */
- usleep_range(1000, 1100);
-
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
if (ret) {
@@ -268,15 +269,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
goto out;
}
- /* De-assert PHY reset and start serdes */
- ufs_qcom_deassert_reset(hba);
-
- /*
- * after reset deassertion, phy will need all ref clocks,
- * voltage, current to settle down before starting serdes.
- */
- usleep_range(1000, 1100);
-
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy);
if (ret) {
@@ -290,7 +282,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
return 0;
out_disable_phy:
- ufs_qcom_assert_reset(hba);
phy_exit(phy);
out:
return ret;
@@ -554,21 +545,10 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufs_qcom_disable_lane_clks(host);
phy_power_off(phy);
- /* Assert PHY soft reset */
- ufs_qcom_assert_reset(hba);
- goto out;
- }
-
- /*
- * If UniPro link is not active, PHY ref_clk, main PHY analog power
- * rail and low noise analog power rail for PLL can be switched off.
- */
- if (!ufs_qcom_is_link_active(hba)) {
+ } else if (!ufs_qcom_is_link_active(hba)) {
ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
}
-out:
return ret;
}
@@ -578,118 +558,25 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
struct phy *phy = host->generic_phy;
int err;
- err = phy_power_on(phy);
- if (err) {
- dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- goto out;
-
- hba->is_sys_suspended = false;
-
-out:
- return err;
-}
-
-struct ufs_qcom_dev_params {
- u32 pwm_rx_gear; /* pwm rx gear to work in */
- u32 pwm_tx_gear; /* pwm tx gear to work in */
- u32 hs_rx_gear; /* hs rx gear to work in */
- u32 hs_tx_gear; /* hs tx gear to work in */
- u32 rx_lanes; /* number of rx lanes */
- u32 tx_lanes; /* number of tx lanes */
- u32 rx_pwr_pwm; /* rx pwm working pwr */
- u32 tx_pwr_pwm; /* tx pwm working pwr */
- u32 rx_pwr_hs; /* rx hs working pwr */
- u32 tx_pwr_hs; /* tx hs working pwr */
- u32 hs_rate; /* rate A/B to work in HS */
- u32 desired_working_mode;
-};
+ if (ufs_qcom_is_link_off(hba)) {
+ err = phy_power_on(phy);
+ if (err) {
+ dev_err(hba->dev, "%s: failed PHY power on: %d\n",
+ __func__, err);
+ return err;
+ }
-static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
- struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr)
-{
- int min_qcom_gear;
- int min_dev_gear;
- bool is_dev_sup_hs = false;
- bool is_qcom_max_hs = false;
-
- if (dev_max->pwr_rx == FAST_MODE)
- is_dev_sup_hs = true;
-
- if (qcom_param->desired_working_mode == FAST) {
- is_qcom_max_hs = true;
- min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
- qcom_param->hs_tx_gear);
- } else {
- min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
- qcom_param->pwm_tx_gear);
- }
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err)
+ return err;
- /*
- * device doesn't support HS but qcom_param->desired_working_mode is
- * HS, thus device and qcom_param don't agree
- */
- if (!is_dev_sup_hs && is_qcom_max_hs) {
- pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
- __func__);
- return -ENOTSUPP;
- } else if (is_dev_sup_hs && is_qcom_max_hs) {
- /*
- * since device supports HS, it supports FAST_MODE.
- * since qcom_param->desired_working_mode is also HS
- * then final decision (FAST/FASTAUTO) is done according
- * to qcom_params as it is the restricting factor
- */
- agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
- qcom_param->rx_pwr_hs;
- } else {
- /*
- * here qcom_param->desired_working_mode is PWM.
- * it doesn't matter whether device supports HS or PWM,
- * in both cases qcom_param->desired_working_mode will
- * determine the mode
- */
- agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
- qcom_param->rx_pwr_pwm;
+ } else if (!ufs_qcom_is_link_active(hba)) {
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err)
+ return err;
}
- /*
- * we would like tx to work in the minimum number of lanes
- * between device capability and vendor preferences.
- * the same decision will be made for rx
- */
- agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
- qcom_param->tx_lanes);
- agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
- qcom_param->rx_lanes);
-
- /* device maximum gear is the minimum between device rx and tx gears */
- min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
-
- /*
- * if both device capabilities and vendor pre-defined preferences are
- * both HS or both PWM then set the minimum gear to be the chosen
- * working gear.
- * if one is PWM and one is HS then the one that is PWM get to decide
- * what is the gear, as it is the one that also decided previously what
- * pwr the device will be configured to.
- */
- if ((is_dev_sup_hs && is_qcom_max_hs) ||
- (!is_dev_sup_hs && !is_qcom_max_hs))
- agreed_pwr->gear_rx = agreed_pwr->gear_tx =
- min_t(u32, min_dev_gear, min_qcom_gear);
- else if (!is_dev_sup_hs)
- agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
- else
- agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
-
- agreed_pwr->hs_rate = qcom_param->hs_rate;
+ hba->is_sys_suspended = false;
return 0;
}
@@ -920,7 +807,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
{
u32 val;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_qcom_dev_params ufs_qcom_cap;
+ struct ufs_dev_params ufs_qcom_cap;
int ret = 0;
if (!dev_req_params) {
@@ -959,9 +846,9 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
}
- ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
- dev_max_params,
- dev_req_params);
+ ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
+ dev_max_params,
+ dev_req_params);
if (ret) {
pr_err("%s: failed to determine capabilities\n",
__func__);
@@ -1118,8 +1005,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
return 0;
if (on && (status == POST_CHANGE)) {
- phy_power_on(host->generic_phy);
-
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1131,9 +1016,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
-
- /* powering off PHY during aggressive clk gating */
- phy_power_off(host->generic_phy);
}
vote = host->bus_vote.min_bw_vote;
@@ -1147,6 +1029,41 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
return err;
}
+static int
+ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
+
+ /* Currently this code only knows about a single reset. */
+ WARN_ON(id);
+ ufs_qcom_assert_reset(host->hba);
+ /* provide 1ms delay to let the reset pulse propagate. */
+ usleep_range(1000, 1100);
+ return 0;
+}
+
+static int
+ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
+
+ /* Currently this code only knows about a single reset. */
+ WARN_ON(id);
+ ufs_qcom_deassert_reset(host->hba);
+
+ /*
+ * after reset deassertion, phy will need all ref clocks,
+ * voltage, current to settle down before starting serdes.
+ */
+ usleep_range(1000, 1100);
+ return 0;
+}
+
+static const struct reset_control_ops ufs_qcom_reset_ops = {
+ .assert = ufs_qcom_reset_assert,
+ .deassert = ufs_qcom_reset_deassert,
+};
+
#define ANDROID_BOOT_DEV_MAX 30
static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
@@ -1191,6 +1108,17 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
+ /* Fire up the reset controller. Failure here is non-fatal. */
+ host->rcdev.of_node = dev->of_node;
+ host->rcdev.ops = &ufs_qcom_reset_ops;
+ host->rcdev.owner = dev->driver->owner;
+ host->rcdev.nr_resets = 1;
+ err = devm_reset_controller_register(dev, &host->rcdev);
+ if (err) {
+ dev_warn(dev, "Failed to register reset controller\n");
+ err = 0;
+ }
+
/*
* voting/devoting device ref_clk source is time consuming hence
* skip devoting it during aggressive clock gating. This clock
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index c114826316eb..68a880185752 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -14,6 +14,8 @@
#ifndef UFS_QCOM_H_
#define UFS_QCOM_H_
+#include <linux/reset-controller.h>
+
#define MAX_UFS_QCOM_HOSTS 1
#define MAX_U32 (~(u32)0)
#define MPHY_TX_FSM_STATE 0x41
@@ -237,6 +239,8 @@ struct ufs_qcom_host {
/* Bitmask for enabling debug prints */
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
+
+ struct reset_controller_dev rcdev;
};
static inline u32
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 21e4ccb5ba6e..99a9c4d16f6b 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -516,7 +516,6 @@ struct ufs_vreg {
bool enabled;
int min_uV;
int max_uV;
- int min_uA;
int max_uA;
};
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 27213676329c..8a74ec30c3d2 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -39,6 +39,7 @@
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
+#include "unipro.h"
#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
@@ -151,20 +152,12 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
vreg->name = kstrdup(name, GFP_KERNEL);
- /* if fixed regulator no need further initialization */
- snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name);
- if (of_property_read_bool(np, prop_name))
- goto out;
-
snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
- ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
- if (ret) {
- dev_err(dev, "%s: unable to find %s err %d\n",
- __func__, prop_name, ret);
- goto out;
+ if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
+ dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
+ vreg->max_uA = 0;
}
- vreg->min_uA = 0;
if (!strcmp(name, "vcc")) {
if (of_property_read_bool(np, "vcc-supply-1p8")) {
vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
@@ -290,6 +283,103 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
}
/**
+ * ufshcd_get_pwr_dev_param - get finally agreed attributes for
+ * power mode change
+ * @pltfrm_param: pointer to platform parameters
+ * @dev_max: pointer to device attributes
+ * @agreed_pwr: returned agreed attributes
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
+ struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
+{
+ int min_pltfrm_gear;
+ int min_dev_gear;
+ bool is_dev_sup_hs = false;
+ bool is_pltfrm_max_hs = false;
+
+ if (dev_max->pwr_rx == FAST_MODE)
+ is_dev_sup_hs = true;
+
+ if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
+ is_pltfrm_max_hs = true;
+ min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
+ pltfrm_param->hs_tx_gear);
+ } else {
+ min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
+ pltfrm_param->pwm_tx_gear);
+ }
+
+ /*
+ * device doesn't support HS but
+ * pltfrm_param->desired_working_mode is HS,
+ * thus device and pltfrm_param don't agree
+ */
+ if (!is_dev_sup_hs && is_pltfrm_max_hs) {
+ pr_info("%s: device doesn't support HS\n",
+ __func__);
+ return -ENOTSUPP;
+ } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
+ /*
+ * since device supports HS, it supports FAST_MODE.
+ * since pltfrm_param->desired_working_mode is also HS
+ * then final decision (FAST/FASTAUTO) is done according
+ * to pltfrm_params as it is the restricting factor
+ */
+ agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
+ agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
+ } else {
+ /*
+ * here pltfrm_param->desired_working_mode is PWM.
+ * it doesn't matter whether device supports HS or PWM,
+ * in both cases pltfrm_param->desired_working_mode will
+ * determine the mode
+ */
+ agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
+ agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
+ }
+
+ /*
+ * we would like tx to work in the minimum number of lanes
+ * between device capability and vendor preferences.
+ * the same decision will be made for rx
+ */
+ agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
+ pltfrm_param->tx_lanes);
+ agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
+ pltfrm_param->rx_lanes);
+
+ /* device maximum gear is the minimum between device rx and tx gears */
+ min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+ /*
+ * if both device capabilities and vendor pre-defined preferences are
+ * both HS or both PWM then set the minimum gear to be the chosen
+ * working gear.
+ * if one is PWM and one is HS then the one that is PWM get to decide
+ * what is the gear, as it is the one that also decided previously what
+ * pwr the device will be configured to.
+ */
+ if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
+ (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
+ agreed_pwr->gear_rx =
+ min_t(u32, min_dev_gear, min_pltfrm_gear);
+ } else if (!is_dev_sup_hs) {
+ agreed_pwr->gear_rx = min_dev_gear;
+ } else {
+ agreed_pwr->gear_rx = min_pltfrm_gear;
+ }
+ agreed_pwr->gear_tx = agreed_pwr->gear_rx;
+
+ agreed_pwr->hs_rate = pltfrm_param->hs_rate;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
+
+/**
* ufshcd_pltfrm_init - probe routine of the driver
* @pdev: pointer to Platform device handle
* @vops: pointer to variant ops
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
index 1f29e1fd6d52..0919ebba182b 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.h
@@ -16,6 +16,27 @@
#include "ufshcd.h"
+#define UFS_PWM_MODE 1
+#define UFS_HS_MODE 2
+
+struct ufs_dev_params {
+ u32 pwm_rx_gear; /* pwm rx gear to work in */
+ u32 pwm_tx_gear; /* pwm tx gear to work in */
+ u32 hs_rx_gear; /* hs rx gear to work in */
+ u32 hs_tx_gear; /* hs tx gear to work in */
+ u32 rx_lanes; /* number of rx lanes */
+ u32 tx_lanes; /* number of tx lanes */
+ u32 rx_pwr_pwm; /* rx pwm working pwr */
+ u32 tx_pwr_pwm; /* tx pwm working pwr */
+ u32 rx_pwr_hs; /* rx hs working pwr */
+ u32 tx_pwr_hs; /* tx hs working pwr */
+ u32 hs_rate; /* rate A/B to work in HS */
+ u32 desired_working_mode;
+};
+
+int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param,
+ struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e040f9dd9ff3..8c1c551f2b42 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4704,10 +4704,10 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
"Reject UPIU not fully implemented\n");
break;
default:
- result = DID_ERROR << 16;
dev_err(hba->dev,
"Unexpected request response code = %x\n",
result);
+ result = DID_ERROR << 16;
break;
}
break;
@@ -6294,19 +6294,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
goto out;
}
- if (hba->vreg_info.vcc)
+ if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vcc->max_uA,
POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
&desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
- if (hba->vreg_info.vccq)
+ if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vccq->max_uA,
icc_level,
&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
- if (hba->vreg_info.vccq2)
+ if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vccq2->max_uA,
icc_level,
@@ -7004,6 +7004,15 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
if (!vreg)
return 0;
+ /*
+ * "set_load" operation shall be required on those regulators
+ * which specifically configured current limitation. Otherwise
+ * zero max_uA may cause unexpected behavior when regulator is
+ * enabled or set as high power mode.
+ */
+ if (!vreg->max_uA)
+ return 0;
+
ret = regulator_set_load(vreg->reg, ua);
if (ret < 0) {
dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
@@ -7039,12 +7048,15 @@ static int ufshcd_config_vreg(struct device *dev,
name = vreg->name;
if (regulator_count_voltages(reg) > 0) {
- min_uV = on ? vreg->min_uV : 0;
- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
- if (ret) {
- dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ if (vreg->min_uV && vreg->max_uV) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev,
+ "%s: %s set voltage failed, err=%d\n",
__func__, name, ret);
- goto out;
+ goto out;
+ }
}
uA_load = on ? vreg->max_uA : 0;
@@ -7103,9 +7115,6 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
struct device *dev = hba->dev;
struct ufs_vreg_info *info = &hba->vreg_info;
- if (!info)
- goto out;
-
ret = ufshcd_toggle_vreg(dev, info->vcc, on);
if (ret)
goto out;
@@ -7131,10 +7140,7 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
{
struct ufs_vreg_info *info = &hba->vreg_info;
- if (info)
- return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
-
- return 0;
+ return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
}
static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@ -7160,9 +7166,6 @@ static int ufshcd_init_vreg(struct ufs_hba *hba)
struct device *dev = hba->dev;
struct ufs_vreg_info *info = &hba->vreg_info;
- if (!info)
- goto out;
-
ret = ufshcd_get_vreg(dev, info->vcc);
if (ret)
goto out;
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 23129d7b2678..c77e36526447 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -52,7 +52,7 @@
#define RX_HS_UNTERMINATED_ENABLE 0x00A6
#define RX_ENTER_HIBERN8 0x00A7
#define RX_BYPASS_8B10B_ENABLE 0x00A8
-#define RX_TERMINATION_FORCE_ENABLE 0x0089
+#define RX_TERMINATION_FORCE_ENABLE 0x00A9
#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
#define RX_HIBERN8TIME_CAPABILITY 0x0092
#define RX_REFCLKFREQ 0x00EB
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8af01777d09c..c47d38bca948 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -659,7 +659,7 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
static int virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
- struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
}
@@ -793,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
/* We need to know how many queues before we allocate. */
num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+ num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
num_targets = virtscsi_config_get(vdev, max_target) + 1;
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index e649ceaaa410..87d69e7471f9 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/stat.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "internals.h"
static void __iomem *uimask;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 71f094c9ec68..f3585777324c 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1342,6 +1342,10 @@ static int of_qcom_slim_ngd_register(struct device *parent,
return -ENOMEM;
ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
+ if (!ngd->pdev) {
+ kfree(ngd);
+ return -ENOMEM;
+ }
ngd->id = id;
ngd->pdev->dev.parent = parent;
ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index b750a88547c7..75bdbb2c5140 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -7,6 +7,7 @@ source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/imx/Kconfig"
+source "drivers/soc/ixp4xx/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 814128fe479f..524ecdc2a9bb 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
-obj-$(CONFIG_ARCH_ASPEED) += aspeed/
+obj-$(CONFIG_SOC_ASPEED) += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
obj-$(CONFIG_ARCH_DOVE) += dove/
@@ -12,6 +12,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-$(CONFIG_ARCH_MXC) += imx/
+obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
obj-y += amlogic/
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
index 858b5e3f79c9..765d10191387 100644
--- a/drivers/soc/aspeed/Kconfig
+++ b/drivers/soc/aspeed/Kconfig
@@ -1,7 +1,11 @@
menu "Aspeed SoC drivers"
+config SOC_ASPEED
+ def_bool y
+ depends on ARCH_ASPEED || COMPILE_TEST
+
config ASPEED_LPC_CTRL
- depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON
tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
---help---
Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
@@ -10,11 +14,18 @@ config ASPEED_LPC_CTRL
config ASPEED_LPC_SNOOP
tristate "Aspeed ast2500 HOST LPC snoop support"
- depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON
help
Provides a driver to control the LPC snoop interface which
allows the BMC to listen on and save the data written by
the host to an arbitrary LPC I/O port.
+config ASPEED_P2A_CTRL
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
+ help
+ Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
+ ioctl()s, the driver also provides an interface for userspace mappings to
+ a pre-defined region.
endmenu
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
index cfaa9adc67b5..2f7b6da7be79 100644
--- a/drivers/soc/aspeed/Makefile
+++ b/drivers/soc/aspeed/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
+obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
new file mode 100644
index 000000000000..b60fbeaffcbd
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Provides a simple driver to control the ASPEED P2A interface which allows
+ * the host to read and write to various regions of the BMC's memory.
+ */
+
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/aspeed-p2a-ctrl.h>
+
+#define DEVICE_NAME "aspeed-p2a-ctrl"
+
+/* SCU2C is a Misc. Control Register. */
+#define SCU2C 0x2c
+/* SCU180 is the PCIe Configuration Setting Control Register. */
+#define SCU180 0x180
+/* Bit 1 controls the P2A bridge, while bit 0 controls the entire VGA device
+ * on the PCI bus.
+ */
+#define SCU180_ENP2A BIT(1)
+
+/* The ast2400/2500 both have six ranges. */
+#define P2A_REGION_COUNT 6
+
+struct region {
+ u64 min;
+ u64 max;
+ u32 bit;
+};
+
+struct aspeed_p2a_model_data {
+ /* min, max, bit */
+ struct region regions[P2A_REGION_COUNT];
+};
+
+struct aspeed_p2a_ctrl {
+ struct miscdevice miscdev;
+ struct regmap *regmap;
+
+ const struct aspeed_p2a_model_data *config;
+
+ /* Access to these needs to be locked, held via probe, mapping ioctl,
+ * and release, remove.
+ */
+ struct mutex tracking;
+ u32 readers;
+ u32 readerwriters[P2A_REGION_COUNT];
+
+ phys_addr_t mem_base;
+ resource_size_t mem_size;
+};
+
+struct aspeed_p2a_user {
+ struct file *file;
+ struct aspeed_p2a_ctrl *parent;
+
+ /* The entire memory space is opened for reading once the bridge is
+ * enabled, therefore this needs only to be tracked once per user.
+ * If any user has it open for read, the bridge must stay enabled.
+ */
+ u32 read;
+
+ /* Each entry of the array corresponds to a P2A Region. If the user
+ * opens for read or readwrite, the reference goes up here. On
+ * release, this array is walked and references adjusted accordingly.
+ */
+ u32 readwrite[P2A_REGION_COUNT];
+};
+
+static void aspeed_p2a_enable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
+{
+ regmap_update_bits(p2a_ctrl->regmap,
+ SCU180, SCU180_ENP2A, SCU180_ENP2A);
+}
+
+static void aspeed_p2a_disable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
+{
+ regmap_update_bits(p2a_ctrl->regmap, SCU180, SCU180_ENP2A, 0);
+}
+
+static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long vsize;
+ pgprot_t prot;
+ struct aspeed_p2a_user *priv = file->private_data;
+ struct aspeed_p2a_ctrl *ctrl = priv->parent;
+
+ if (ctrl->mem_base == 0 && ctrl->mem_size == 0)
+ return -EINVAL;
+
+ vsize = vma->vm_end - vma->vm_start;
+ prot = vma->vm_page_prot;
+
+ if (vma->vm_pgoff + vsize > ctrl->mem_base + ctrl->mem_size)
+ return -EINVAL;
+
+ /* ast2400/2500 AHB accesses are not cache coherent */
+ prot = pgprot_noncached(prot);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ (ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
+ vsize, prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static bool aspeed_p2a_region_acquire(struct aspeed_p2a_user *priv,
+ struct aspeed_p2a_ctrl *ctrl,
+ struct aspeed_p2a_ctrl_mapping *map)
+{
+ int i;
+ u64 base, end;
+ bool matched = false;
+
+ base = map->addr;
+ end = map->addr + (map->length - 1);
+
+ /* If the value is a legal u32, it will find a match. */
+ for (i = 0; i < P2A_REGION_COUNT; i++) {
+ const struct region *curr = &ctrl->config->regions[i];
+
+ /* If the top of this region is lower than your base, skip it.
+ */
+ if (curr->max < base)
+ continue;
+
+ /* If the bottom of this region is higher than your end, bail.
+ */
+ if (curr->min > end)
+ break;
+
+ /* Lock this and update it, therefore it someone else is
+ * closing their file out, this'll preserve the increment.
+ */
+ mutex_lock(&ctrl->tracking);
+ ctrl->readerwriters[i] += 1;
+ mutex_unlock(&ctrl->tracking);
+
+ /* Track with the user, so when they close their file, we can
+ * decrement properly.
+ */
+ priv->readwrite[i] += 1;
+
+ /* Enable the region as read-write. */
+ regmap_update_bits(ctrl->regmap, SCU2C, curr->bit, 0);
+ matched = true;
+ }
+
+ return matched;
+}
+
+static long aspeed_p2a_ioctl(struct file *file, unsigned int cmd,
+ unsigned long data)
+{
+ struct aspeed_p2a_user *priv = file->private_data;
+ struct aspeed_p2a_ctrl *ctrl = priv->parent;
+ void __user *arg = (void __user *)data;
+ struct aspeed_p2a_ctrl_mapping map;
+
+ if (copy_from_user(&map, arg, sizeof(map)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ASPEED_P2A_CTRL_IOCTL_SET_WINDOW:
+ /* If they want a region to be read-only, since the entire
+ * region is read-only once enabled, we just need to track this
+ * user wants to read from the bridge, and if it's not enabled.
+ * Enable it.
+ */
+ if (map.flags == ASPEED_P2A_CTRL_READ_ONLY) {
+ mutex_lock(&ctrl->tracking);
+ ctrl->readers += 1;
+ mutex_unlock(&ctrl->tracking);
+
+ /* Track with the user, so when they close their file,
+ * we can decrement properly.
+ */
+ priv->read += 1;
+ } else if (map.flags == ASPEED_P2A_CTRL_READWRITE) {
+ /* If we don't acquire any region return error. */
+ if (!aspeed_p2a_region_acquire(priv, ctrl, &map)) {
+ return -EINVAL;
+ }
+ } else {
+ /* Invalid map flags. */
+ return -EINVAL;
+ }
+
+ aspeed_p2a_enable_bridge(ctrl);
+ return 0;
+ case ASPEED_P2A_CTRL_IOCTL_GET_MEMORY_CONFIG:
+ /* This is a request for the memory-region and corresponding
+ * length that is used by the driver for mmap.
+ */
+
+ map.flags = 0;
+ map.addr = ctrl->mem_base;
+ map.length = ctrl->mem_size;
+
+ return copy_to_user(arg, &map, sizeof(map)) ? -EFAULT : 0;
+ }
+
+ return -EINVAL;
+}
+
+
+/*
+ * When a user opens this file, we create a structure to track their mappings.
+ *
+ * A user can map a region as read-only (bridge enabled), or read-write (bit
+ * flipped, and bridge enabled). Either way, this tracking is used, s.t. when
+ * they release the device references are handled.
+ *
+ * The bridge is not enabled until a user calls an ioctl to map a region,
+ * simply opening the device does not enable it.
+ */
+static int aspeed_p2a_open(struct inode *inode, struct file *file)
+{
+ struct aspeed_p2a_user *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->file = file;
+ priv->read = 0;
+ memset(priv->readwrite, 0, sizeof(priv->readwrite));
+
+ /* The file's private_data is initialized to the p2a_ctrl. */
+ priv->parent = file->private_data;
+
+ /* Set the file's private_data to the user's data. */
+ file->private_data = priv;
+
+ return 0;
+}
+
+/*
+ * This will close the users mappings. It will go through what they had opened
+ * for readwrite, and decrement those counts. If at the end, this is the last
+ * user, it'll close the bridge.
+ */
+static int aspeed_p2a_release(struct inode *inode, struct file *file)
+{
+ int i;
+ u32 bits = 0;
+ bool open_regions = false;
+ struct aspeed_p2a_user *priv = file->private_data;
+
+ /* Lock others from changing these values until everything is updated
+ * in one pass.
+ */
+ mutex_lock(&priv->parent->tracking);
+
+ priv->parent->readers -= priv->read;
+
+ for (i = 0; i < P2A_REGION_COUNT; i++) {
+ priv->parent->readerwriters[i] -= priv->readwrite[i];
+
+ if (priv->parent->readerwriters[i] > 0)
+ open_regions = true;
+ else
+ bits |= priv->parent->config->regions[i].bit;
+ }
+
+ /* Setting a bit to 1 disables the region, so let's just OR with the
+ * above to disable any.
+ */
+
+ /* Note, if another user is trying to ioctl, they can't grab tracking,
+ * and therefore can't grab either register mutex.
+ * If another user is trying to close, they can't grab tracking either.
+ */
+ regmap_update_bits(priv->parent->regmap, SCU2C, bits, bits);
+
+ /* If parent->readers is zero and open windows is 0, disable the
+ * bridge.
+ */
+ if (!open_regions && priv->parent->readers == 0)
+ aspeed_p2a_disable_bridge(priv->parent);
+
+ mutex_unlock(&priv->parent->tracking);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct file_operations aspeed_p2a_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .mmap = aspeed_p2a_mmap,
+ .unlocked_ioctl = aspeed_p2a_ioctl,
+ .open = aspeed_p2a_open,
+ .release = aspeed_p2a_release,
+};
+
+/* The regions are controlled by SCU2C */
+static void aspeed_p2a_disable_all(struct aspeed_p2a_ctrl *p2a_ctrl)
+{
+ int i;
+ u32 value = 0;
+
+ for (i = 0; i < P2A_REGION_COUNT; i++)
+ value |= p2a_ctrl->config->regions[i].bit;
+
+ regmap_update_bits(p2a_ctrl->regmap, SCU2C, value, value);
+
+ /* Disable the bridge. */
+ aspeed_p2a_disable_bridge(p2a_ctrl);
+}
+
+static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
+{
+ struct aspeed_p2a_ctrl *misc_ctrl;
+ struct device *dev;
+ struct resource resm;
+ struct device_node *node;
+ int rc = 0;
+
+ dev = &pdev->dev;
+
+ misc_ctrl = devm_kzalloc(dev, sizeof(*misc_ctrl), GFP_KERNEL);
+ if (!misc_ctrl)
+ return -ENOMEM;
+
+ mutex_init(&misc_ctrl->tracking);
+
+ /* optional. */
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node) {
+ rc = of_address_to_resource(node, 0, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for reserved memory\n");
+ return -ENODEV;
+ }
+
+ misc_ctrl->mem_size = resource_size(&resm);
+ misc_ctrl->mem_base = resm.start;
+ }
+
+ misc_ctrl->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(misc_ctrl->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ misc_ctrl->config = of_device_get_match_data(dev);
+
+ dev_set_drvdata(&pdev->dev, misc_ctrl);
+
+ aspeed_p2a_disable_all(misc_ctrl);
+
+ misc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
+ misc_ctrl->miscdev.name = DEVICE_NAME;
+ misc_ctrl->miscdev.fops = &aspeed_p2a_ctrl_fops;
+ misc_ctrl->miscdev.parent = dev;
+
+ rc = misc_register(&misc_ctrl->miscdev);
+ if (rc)
+ dev_err(dev, "Unable to register device\n");
+
+ return rc;
+}
+
+static int aspeed_p2a_ctrl_remove(struct platform_device *pdev)
+{
+ struct aspeed_p2a_ctrl *p2a_ctrl = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&p2a_ctrl->miscdev);
+
+ return 0;
+}
+
+#define SCU2C_DRAM BIT(25)
+#define SCU2C_SPI BIT(24)
+#define SCU2C_SOC BIT(23)
+#define SCU2C_FLASH BIT(22)
+
+static const struct aspeed_p2a_model_data ast2400_model_data = {
+ .regions = {
+ {0x00000000, 0x17FFFFFF, SCU2C_FLASH},
+ {0x18000000, 0x1FFFFFFF, SCU2C_SOC},
+ {0x20000000, 0x2FFFFFFF, SCU2C_FLASH},
+ {0x30000000, 0x3FFFFFFF, SCU2C_SPI},
+ {0x40000000, 0x5FFFFFFF, SCU2C_DRAM},
+ {0x60000000, 0xFFFFFFFF, SCU2C_SOC},
+ }
+};
+
+static const struct aspeed_p2a_model_data ast2500_model_data = {
+ .regions = {
+ {0x00000000, 0x0FFFFFFF, SCU2C_FLASH},
+ {0x10000000, 0x1FFFFFFF, SCU2C_SOC},
+ {0x20000000, 0x3FFFFFFF, SCU2C_FLASH},
+ {0x40000000, 0x5FFFFFFF, SCU2C_SOC},
+ {0x60000000, 0x7FFFFFFF, SCU2C_SPI},
+ {0x80000000, 0xFFFFFFFF, SCU2C_DRAM},
+ }
+};
+
+static const struct of_device_id aspeed_p2a_ctrl_match[] = {
+ { .compatible = "aspeed,ast2400-p2a-ctrl",
+ .data = &ast2400_model_data },
+ { .compatible = "aspeed,ast2500-p2a-ctrl",
+ .data = &ast2500_model_data },
+ { },
+};
+
+static struct platform_driver aspeed_p2a_ctrl_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_p2a_ctrl_match,
+ },
+ .probe = aspeed_p2a_ctrl_probe,
+ .remove = aspeed_p2a_ctrl_remove,
+};
+
+module_platform_driver(aspeed_p2a_ctrl_driver);
+
+MODULE_DEVICE_TABLE(of, aspeed_p2a_ctrl_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick Venture <venture@google.com>");
+MODULE_DESCRIPTION("Control for aspeed 2400/2500 P2A VGA HOST to BMC mappings");
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index a8f1e47ce698..d9231bd3c691 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -429,10 +429,19 @@ static int imx_gpc_probe(struct platform_device *pdev)
return ret;
}
- /* Disable PU power down in normal operation if ERR009619 is present */
+ /*
+ * Disable PU power down by runtime PM if ERR009619 is present.
+ *
+ * The PRE clock will be paused for several cycles when turning on the
+ * PU domain LDO from power down state. If PRE is in use at that time,
+ * the IPU/PRG cannot get the correct display data from the PRE.
+ *
+ * This is not a concern when the whole system enters suspend state, so
+ * it's safe to power down PU in this case.
+ */
if (of_id_data->err009619_present)
imx_gpc_domains[GPC_PGC_DOMAIN_PU].base.flags |=
- GENPD_FLAG_ALWAYS_ON;
+ GENPD_FLAG_RPM_ALWAYS_ON;
/* Keep DISP always on if ERR006287 is present */
if (of_id_data->err006287_present)
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
new file mode 100644
index 000000000000..de6becdc78a2
--- /dev/null
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -0,0 +1,16 @@
+menu "IXP4xx SoC drivers"
+
+config IXP4XX_QMGR
+ tristate "IXP4xx Queue Manager support"
+ help
+ This driver supports IXP4xx built-in hardware queue manager
+ and is automatically selected by Ethernet and HSS drivers.
+
+config IXP4XX_NPE
+ tristate "IXP4xx Network Processor Engine support"
+ select FW_LOADER
+ help
+ This driver supports IXP4xx built-in network coprocessors
+ and is automatically selected by Ethernet and HSS drivers.
+
+endmenu
diff --git a/drivers/soc/ixp4xx/Makefile b/drivers/soc/ixp4xx/Makefile
new file mode 100644
index 000000000000..d20d99e6df65
--- /dev/null
+++ b/drivers/soc/ixp4xx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx-qmgr.o
+obj-$(CONFIG_IXP4XX_NPE) += ixp4xx-npe.o
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
new file mode 100644
index 000000000000..15979d4376ab
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -0,0 +1,762 @@
+/*
+ * Intel IXP4xx Network Processor Engine driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The code is based on publicly available information:
+ * - Intel IXP4xx Developer's Manual and other e-papers
+ * - Intel IXP400 Access Library Software (BSD license)
+ * - previous works by Christian Hohnstaedt <chohnstaedt@innominate.com>
+ * Thanks, Christian.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/npe.h>
+
+#define DEBUG_MSG 0
+#define DEBUG_FW 0
+
+#define NPE_COUNT 3
+#define MAX_RETRIES 1000 /* microseconds */
+#define NPE_42X_DATA_SIZE 0x800 /* in dwords */
+#define NPE_46X_DATA_SIZE 0x1000
+#define NPE_A_42X_INSTR_SIZE 0x1000
+#define NPE_B_AND_C_42X_INSTR_SIZE 0x800
+#define NPE_46X_INSTR_SIZE 0x1000
+#define REGS_SIZE 0x1000
+
+#define NPE_PHYS_REG 32
+
+#define FW_MAGIC 0xFEEDF00D
+#define FW_BLOCK_TYPE_INSTR 0x0
+#define FW_BLOCK_TYPE_DATA 0x1
+#define FW_BLOCK_TYPE_EOF 0xF
+
+/* NPE exec status (read) and command (write) */
+#define CMD_NPE_STEP 0x01
+#define CMD_NPE_START 0x02
+#define CMD_NPE_STOP 0x03
+#define CMD_NPE_CLR_PIPE 0x04
+#define CMD_CLR_PROFILE_CNT 0x0C
+#define CMD_RD_INS_MEM 0x10 /* instruction memory */
+#define CMD_WR_INS_MEM 0x11
+#define CMD_RD_DATA_MEM 0x12 /* data memory */
+#define CMD_WR_DATA_MEM 0x13
+#define CMD_RD_ECS_REG 0x14 /* exec access register */
+#define CMD_WR_ECS_REG 0x15
+
+#define STAT_RUN 0x80000000
+#define STAT_STOP 0x40000000
+#define STAT_CLEAR 0x20000000
+#define STAT_ECS_K 0x00800000 /* pipeline clean */
+
+#define NPE_STEVT 0x1B
+#define NPE_STARTPC 0x1C
+#define NPE_REGMAP 0x1E
+#define NPE_CINDEX 0x1F
+
+#define INSTR_WR_REG_SHORT 0x0000C000
+#define INSTR_WR_REG_BYTE 0x00004000
+#define INSTR_RD_FIFO 0x0F888220
+#define INSTR_RESET_MBOX 0x0FAC8210
+
+#define ECS_BG_CTXT_REG_0 0x00 /* Background Executing Context */
+#define ECS_BG_CTXT_REG_1 0x01 /* Stack level */
+#define ECS_BG_CTXT_REG_2 0x02
+#define ECS_PRI_1_CTXT_REG_0 0x04 /* Priority 1 Executing Context */
+#define ECS_PRI_1_CTXT_REG_1 0x05 /* Stack level */
+#define ECS_PRI_1_CTXT_REG_2 0x06
+#define ECS_PRI_2_CTXT_REG_0 0x08 /* Priority 2 Executing Context */
+#define ECS_PRI_2_CTXT_REG_1 0x09 /* Stack level */
+#define ECS_PRI_2_CTXT_REG_2 0x0A
+#define ECS_DBG_CTXT_REG_0 0x0C /* Debug Executing Context */
+#define ECS_DBG_CTXT_REG_1 0x0D /* Stack level */
+#define ECS_DBG_CTXT_REG_2 0x0E
+#define ECS_INSTRUCT_REG 0x11 /* NPE Instruction Register */
+
+#define ECS_REG_0_ACTIVE 0x80000000 /* all levels */
+#define ECS_REG_0_NEXTPC_MASK 0x1FFF0000 /* BG/PRI1/PRI2 levels */
+#define ECS_REG_0_LDUR_BITS 8
+#define ECS_REG_0_LDUR_MASK 0x00000700 /* all levels */
+#define ECS_REG_1_CCTXT_BITS 16
+#define ECS_REG_1_CCTXT_MASK 0x000F0000 /* all levels */
+#define ECS_REG_1_SELCTXT_BITS 0
+#define ECS_REG_1_SELCTXT_MASK 0x0000000F /* all levels */
+#define ECS_DBG_REG_2_IF 0x00100000 /* debug level */
+#define ECS_DBG_REG_2_IE 0x00080000 /* debug level */
+
+/* NPE watchpoint_fifo register bit */
+#define WFIFO_VALID 0x80000000
+
+/* NPE messaging_status register bit definitions */
+#define MSGSTAT_OFNE 0x00010000 /* OutFifoNotEmpty */
+#define MSGSTAT_IFNF 0x00020000 /* InFifoNotFull */
+#define MSGSTAT_OFNF 0x00040000 /* OutFifoNotFull */
+#define MSGSTAT_IFNE 0x00080000 /* InFifoNotEmpty */
+#define MSGSTAT_MBINT 0x00100000 /* Mailbox interrupt */
+#define MSGSTAT_IFINT 0x00200000 /* InFifo interrupt */
+#define MSGSTAT_OFINT 0x00400000 /* OutFifo interrupt */
+#define MSGSTAT_WFINT 0x00800000 /* WatchFifo interrupt */
+
+/* NPE messaging_control register bit definitions */
+#define MSGCTL_OUT_FIFO 0x00010000 /* enable output FIFO */
+#define MSGCTL_IN_FIFO 0x00020000 /* enable input FIFO */
+#define MSGCTL_OUT_FIFO_WRITE 0x01000000 /* enable FIFO + WRITE */
+#define MSGCTL_IN_FIFO_WRITE 0x02000000
+
+/* NPE mailbox_status value for reset */
+#define RESET_MBOX_STAT 0x0000F0F0
+
+#define NPE_A_FIRMWARE "NPE-A"
+#define NPE_B_FIRMWARE "NPE-B"
+#define NPE_C_FIRMWARE "NPE-C"
+
+const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE };
+
+#define print_npe(pri, npe, fmt, ...) \
+ printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
+
+#if DEBUG_MSG
+#define debug_msg(npe, fmt, ...) \
+ print_npe(KERN_DEBUG, npe, fmt, ## __VA_ARGS__)
+#else
+#define debug_msg(npe, fmt, ...)
+#endif
+
+static struct {
+ u32 reg, val;
+} ecs_reset[] = {
+ { ECS_BG_CTXT_REG_0, 0xA0000000 },
+ { ECS_BG_CTXT_REG_1, 0x01000000 },
+ { ECS_BG_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_1_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_1_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_1_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_2_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_2_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_2_CTXT_REG_2, 0x00008000 },
+ { ECS_DBG_CTXT_REG_0, 0x20000000 },
+ { ECS_DBG_CTXT_REG_1, 0x00000000 },
+ { ECS_DBG_CTXT_REG_2, 0x001E0000 },
+ { ECS_INSTRUCT_REG, 0x1003C00F },
+};
+
+static struct npe npe_tab[NPE_COUNT] = {
+ {
+ .id = 0,
+ }, {
+ .id = 1,
+ }, {
+ .id = 2,
+ }
+};
+
+int npe_running(struct npe *npe)
+{
+ return (__raw_readl(&npe->regs->exec_status_cmd) & STAT_RUN) != 0;
+}
+
+static void npe_cmd_write(struct npe *npe, u32 addr, int cmd, u32 data)
+{
+ __raw_writel(data, &npe->regs->exec_data);
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+}
+
+static u32 npe_cmd_read(struct npe *npe, u32 addr, int cmd)
+{
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+ /* Iintroduce extra read cycles after issuing read command to NPE
+ so that we read the register after the NPE has updated it.
+ This is to overcome race condition between XScale and NPE */
+ __raw_readl(&npe->regs->exec_data);
+ __raw_readl(&npe->regs->exec_data);
+ return __raw_readl(&npe->regs->exec_data);
+}
+
+static void npe_clear_active(struct npe *npe, u32 reg)
+{
+ u32 val = npe_cmd_read(npe, reg, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, reg, CMD_WR_ECS_REG, val & ~ECS_REG_0_ACTIVE);
+}
+
+static void npe_start(struct npe *npe)
+{
+ /* ensure only Background Context Stack Level is active */
+ npe_clear_active(npe, ECS_PRI_1_CTXT_REG_0);
+ npe_clear_active(npe, ECS_PRI_2_CTXT_REG_0);
+ npe_clear_active(npe, ECS_DBG_CTXT_REG_0);
+
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_START, &npe->regs->exec_status_cmd);
+}
+
+static void npe_stop(struct npe *npe)
+{
+ __raw_writel(CMD_NPE_STOP, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /*FIXME?*/
+}
+
+static int __must_check npe_debug_instr(struct npe *npe, u32 instr, u32 ctx,
+ u32 ldur)
+{
+ u32 wc;
+ int i;
+
+ /* set the Active bit, and the LDUR, in the debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG,
+ ECS_REG_0_ACTIVE | (ldur << ECS_REG_0_LDUR_BITS));
+
+ /* set CCTXT at ECS DEBUG L3 to specify in which context to execute
+ the instruction, and set SELCTXT at ECS DEBUG Level to specify
+ which context store to access.
+ Debug ECS Level Reg 1 has form 0x000n000n, where n = context number
+ */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_1, CMD_WR_ECS_REG,
+ (ctx << ECS_REG_1_CCTXT_BITS) |
+ (ctx << ECS_REG_1_SELCTXT_BITS));
+
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+
+ /* load NPE instruction into the instruction register */
+ npe_cmd_write(npe, ECS_INSTRUCT_REG, CMD_WR_ECS_REG, instr);
+
+ /* we need this value later to wait for completion of NPE execution
+ step */
+ wc = __raw_readl(&npe->regs->watch_count);
+
+ /* issue a Step One command via the Execution Control register */
+ __raw_writel(CMD_NPE_STEP, &npe->regs->exec_status_cmd);
+
+ /* Watch Count register increments when NPE completes an instruction */
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (wc != __raw_readl(&npe->regs->watch_count))
+ return 0;
+ udelay(1);
+ }
+
+ print_npe(KERN_ERR, npe, "reset: npe_debug_instr(): timeout\n");
+ return -ETIMEDOUT;
+}
+
+static int __must_check npe_logical_reg_write8(struct npe *npe, u32 addr,
+ u8 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov8 d0, #0 */
+ u32 instr = INSTR_WR_REG_BYTE | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 3 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write16(struct npe *npe, u32 addr,
+ u16 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov16 d0, #0 */
+ u32 instr = INSTR_WR_REG_SHORT | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 11 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr,
+ u32 val, u32 ctx)
+{
+ /* write in 16 bit steps first the high and then the low value */
+ if (npe_logical_reg_write16(npe, addr, val >> 16, ctx))
+ return -ETIMEDOUT;
+ return npe_logical_reg_write16(npe, addr + 2, val & 0xFFFF, ctx);
+}
+
+static int npe_reset(struct npe *npe)
+{
+ u32 val, ctl, exec_count, ctx_reg2;
+ int i;
+
+ ctl = (__raw_readl(&npe->regs->messaging_control) | 0x3F000000) &
+ 0x3F3FFFFF;
+
+ /* disable parity interrupt */
+ __raw_writel(ctl & 0x3F00FFFF, &npe->regs->messaging_control);
+
+ /* pre exec - debug instruction */
+ /* turn off the halt bit by clearing Execution Count register. */
+ exec_count = __raw_readl(&npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->exec_count);
+ /* ensure that IF and IE are on (temporarily), so that we don't end up
+ stepping forever */
+ ctx_reg2 = npe_cmd_read(npe, ECS_DBG_CTXT_REG_2, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2 |
+ ECS_DBG_REG_2_IF | ECS_DBG_REG_2_IE);
+
+ /* clear the FIFOs */
+ while (__raw_readl(&npe->regs->watchpoint_fifo) & WFIFO_VALID)
+ ;
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE)
+ /* read from the outFIFO until empty */
+ print_npe(KERN_DEBUG, npe, "npe_reset: read FIFO = 0x%X\n",
+ __raw_readl(&npe->regs->in_out_fifo));
+
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)
+ /* step execution of the NPE intruction to read inFIFO using
+ the Debug Executing Context stack */
+ if (npe_debug_instr(npe, INSTR_RD_FIFO, 0, 0))
+ return -ETIMEDOUT;
+
+ /* reset the mailbox reg from the XScale side */
+ __raw_writel(RESET_MBOX_STAT, &npe->regs->mailbox_status);
+ /* from NPE side */
+ if (npe_debug_instr(npe, INSTR_RESET_MBOX, 0, 0))
+ return -ETIMEDOUT;
+
+ /* Reset the physical registers in the NPE register file */
+ for (val = 0; val < NPE_PHYS_REG; val++) {
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, val >> 1, 0))
+ return -ETIMEDOUT;
+ /* address is either 0 or 4 */
+ if (npe_logical_reg_write32(npe, (val & 1) * 4, 0, 0))
+ return -ETIMEDOUT;
+ }
+
+ /* Reset the context store = each context's Context Store registers */
+
+ /* Context 0 has no STARTPC. Instead, this value is used to set NextPC
+ for Background ECS, to set where NPE starts executing code */
+ val = npe_cmd_read(npe, ECS_BG_CTXT_REG_0, CMD_RD_ECS_REG);
+ val &= ~ECS_REG_0_NEXTPC_MASK;
+ val |= (0 /* NextPC */ << 16) & ECS_REG_0_NEXTPC_MASK;
+ npe_cmd_write(npe, ECS_BG_CTXT_REG_0, CMD_WR_ECS_REG, val);
+
+ for (i = 0; i < 16; i++) {
+ if (i) { /* Context 0 has no STEVT nor STARTPC */
+ /* STEVT = off, 0x80 */
+ if (npe_logical_reg_write8(npe, NPE_STEVT, 0x80, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write16(npe, NPE_STARTPC, 0, i))
+ return -ETIMEDOUT;
+ }
+ /* REGMAP = d0->p0, d8->p2, d16->p4 */
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, 0x820, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write8(npe, NPE_CINDEX, 0, i))
+ return -ETIMEDOUT;
+ }
+
+ /* post exec */
+ /* clear active bit in debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, 0);
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ /* restore previous values */
+ __raw_writel(exec_count, &npe->regs->exec_count);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2);
+
+ /* write reset values to Execution Context Stack registers */
+ for (val = 0; val < ARRAY_SIZE(ecs_reset); val++)
+ npe_cmd_write(npe, ecs_reset[val].reg, CMD_WR_ECS_REG,
+ ecs_reset[val].val);
+
+ /* clear the profile counter */
+ __raw_writel(CMD_CLR_PROFILE_CNT, &npe->regs->exec_status_cmd);
+
+ __raw_writel(0, &npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->action_points[0]);
+ __raw_writel(0, &npe->regs->action_points[1]);
+ __raw_writel(0, &npe->regs->action_points[2]);
+ __raw_writel(0, &npe->regs->action_points[3]);
+ __raw_writel(0, &npe->regs->watch_count);
+
+ val = ixp4xx_read_feature_bits();
+ /* reset the NPE */
+ ixp4xx_write_feature_bits(val &
+ ~(IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ /* deassert reset */
+ ixp4xx_write_feature_bits(val |
+ (IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_RESET_NPEA << npe->id))
+ break; /* NPE is back alive */
+ udelay(1);
+ }
+ if (i == MAX_RETRIES)
+ return -ETIMEDOUT;
+
+ npe_stop(npe);
+
+ /* restore NPE configuration bus Control Register - parity settings */
+ __raw_writel(ctl, &npe->regs->messaging_control);
+ return 0;
+}
+
+
+int npe_send_message(struct npe *npe, const void *msg, const char *what)
+{
+ const u32 *send = msg;
+ int cycles = 0;
+
+ debug_msg(npe, "Trying to send message %s [%08X:%08X]\n",
+ what, send[0], send[1]);
+
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) {
+ debug_msg(npe, "NPE input FIFO not empty\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[0], &npe->regs->in_out_fifo);
+
+ if (!(__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNF)) {
+ debug_msg(npe, "NPE input FIFO full\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[1], &npe->regs->in_out_fifo);
+
+ while ((cycles < MAX_RETRIES) &&
+ (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)) {
+ udelay(1);
+ cycles++;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout sending message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Sending a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ u32 *recv = msg;
+ int cycles = 0, cnt = 0;
+
+ debug_msg(npe, "Trying to receive message %s\n", what);
+
+ while (cycles < MAX_RETRIES) {
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) {
+ recv[cnt++] = __raw_readl(&npe->regs->in_out_fifo);
+ if (cnt == 2)
+ break;
+ } else {
+ udelay(1);
+ cycles++;
+ }
+ }
+
+ switch(cnt) {
+ case 1:
+ debug_msg(npe, "Received [%08X]\n", recv[0]);
+ break;
+ case 2:
+ debug_msg(npe, "Received [%08X:%08X]\n", recv[0], recv[1]);
+ break;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout waiting for message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Receiving a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_send_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ int result;
+ u32 *send = msg, recv[2];
+
+ if ((result = npe_send_message(npe, msg, what)) != 0)
+ return result;
+ if ((result = npe_recv_message(npe, recv, what)) != 0)
+ return result;
+
+ if ((recv[0] != send[0]) || (recv[1] != send[1])) {
+ debug_msg(npe, "Message %s: unexpected message received\n",
+ what);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+int npe_load_firmware(struct npe *npe, const char *name, struct device *dev)
+{
+ const struct firmware *fw_entry;
+
+ struct dl_block {
+ u32 type;
+ u32 offset;
+ } *blk;
+
+ struct dl_image {
+ u32 magic;
+ u32 id;
+ u32 size;
+ union {
+ u32 data[0];
+ struct dl_block blocks[0];
+ };
+ } *image;
+
+ struct dl_codeblock {
+ u32 npe_addr;
+ u32 size;
+ u32 data[0];
+ } *cb;
+
+ int i, j, err, data_size, instr_size, blocks, table_end;
+ u32 cmd;
+
+ if ((err = request_firmware(&fw_entry, name, dev)) != 0)
+ return err;
+
+ err = -EINVAL;
+ if (fw_entry->size < sizeof(struct dl_image)) {
+ print_npe(KERN_ERR, npe, "incomplete firmware file\n");
+ goto err;
+ }
+ image = (struct dl_image*)fw_entry->data;
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "firmware: %08X %08X %08X (0x%X bytes)\n",
+ image->magic, image->id, image->size, image->size * 4);
+#endif
+
+ if (image->magic == swab32(FW_MAGIC)) { /* swapped file */
+ image->id = swab32(image->id);
+ image->size = swab32(image->size);
+ } else if (image->magic != FW_MAGIC) {
+ print_npe(KERN_ERR, npe, "bad firmware file magic: 0x%X\n",
+ image->magic);
+ goto err;
+ }
+ if ((image->size * 4 + sizeof(struct dl_image)) != fw_entry->size) {
+ print_npe(KERN_ERR, npe,
+ "inconsistent size of firmware file\n");
+ goto err;
+ }
+ if (((image->id >> 24) & 0xF /* NPE ID */) != npe->id) {
+ print_npe(KERN_ERR, npe, "firmware file NPE ID mismatch\n");
+ goto err;
+ }
+ if (image->magic == swab32(FW_MAGIC))
+ for (i = 0; i < image->size; i++)
+ image->data[i] = swab32(image->data[i]);
+
+ if (cpu_is_ixp42x() && ((image->id >> 28) & 0xF /* device ID */)) {
+ print_npe(KERN_INFO, npe, "IXP43x/IXP46x firmware ignored on "
+ "IXP42x\n");
+ goto err;
+ }
+
+ if (npe_running(npe)) {
+ print_npe(KERN_INFO, npe, "unable to load firmware, NPE is "
+ "already running\n");
+ err = -EBUSY;
+ goto err;
+ }
+#if 0
+ npe_stop(npe);
+ npe_reset(npe);
+#endif
+
+ print_npe(KERN_INFO, npe, "firmware functionality 0x%X, "
+ "revision 0x%X:%X\n", (image->id >> 16) & 0xFF,
+ (image->id >> 8) & 0xFF, image->id & 0xFF);
+
+ if (cpu_is_ixp42x()) {
+ if (!npe->id)
+ instr_size = NPE_A_42X_INSTR_SIZE;
+ else
+ instr_size = NPE_B_AND_C_42X_INSTR_SIZE;
+ data_size = NPE_42X_DATA_SIZE;
+ } else {
+ instr_size = NPE_46X_INSTR_SIZE;
+ data_size = NPE_46X_DATA_SIZE;
+ }
+
+ for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size;
+ blocks++)
+ if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF)
+ break;
+ if (blocks * sizeof(struct dl_block) / 4 >= image->size) {
+ print_npe(KERN_INFO, npe, "firmware EOF block marker not "
+ "found\n");
+ goto err;
+ }
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks);
+#endif
+
+ table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */;
+ for (i = 0, blk = image->blocks; i < blocks; i++, blk++) {
+ if (blk->offset > image->size - sizeof(struct dl_codeblock) / 4
+ || blk->offset < table_end) {
+ print_npe(KERN_INFO, npe, "invalid offset 0x%X of "
+ "firmware block #%i\n", blk->offset, i);
+ goto err;
+ }
+
+ cb = (struct dl_codeblock*)&image->data[blk->offset];
+ if (blk->type == FW_BLOCK_TYPE_INSTR) {
+ if (cb->npe_addr + cb->size > instr_size)
+ goto too_big;
+ cmd = CMD_WR_INS_MEM;
+ } else if (blk->type == FW_BLOCK_TYPE_DATA) {
+ if (cb->npe_addr + cb->size > data_size)
+ goto too_big;
+ cmd = CMD_WR_DATA_MEM;
+ } else {
+ print_npe(KERN_INFO, npe, "invalid firmware block #%i "
+ "type 0x%X\n", i, blk->type);
+ goto err;
+ }
+ if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) {
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't "
+ "fit in firmware image: type %c, start 0x%X,"
+ " length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+ goto err;
+ }
+
+ for (j = 0; j < cb->size; j++)
+ npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]);
+ }
+
+ npe_start(npe);
+ if (!npe_running(npe))
+ print_npe(KERN_ERR, npe, "unable to start\n");
+ release_firmware(fw_entry);
+ return 0;
+
+too_big:
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't fit in NPE "
+ "memory: type %c, start 0x%X, length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+err:
+ release_firmware(fw_entry);
+ return err;
+}
+
+
+struct npe *npe_request(unsigned id)
+{
+ if (id < NPE_COUNT)
+ if (npe_tab[id].valid)
+ if (try_module_get(THIS_MODULE))
+ return &npe_tab[id];
+ return NULL;
+}
+
+void npe_release(struct npe *npe)
+{
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_npe_probe(struct platform_device *pdev)
+{
+ int i, found = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ for (i = 0; i < NPE_COUNT; i++) {
+ struct npe *npe = &npe_tab[i];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ return -ENODEV;
+
+ if (!(ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_RESET_NPEA << i))) {
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
+ i, res->start, res->end);
+ continue; /* NPE already disabled or not present */
+ }
+ npe->regs = devm_ioremap_resource(dev, res);
+ if (!npe->regs)
+ return -ENOMEM;
+
+ if (npe_reset(npe)) {
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
+ i, res->start, res->end);
+ continue;
+ }
+ npe->valid = 1;
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n",
+ i, res->start, res->end);
+ found++;
+ }
+
+ if (!found)
+ return -ENODEV;
+ return 0;
+}
+
+static int ixp4xx_npe_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < NPE_COUNT; i++)
+ if (npe_tab[i].regs) {
+ npe_reset(&npe_tab[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_npe_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-network-processing-engine",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_npe_driver = {
+ .driver = {
+ .name = "ixp4xx-npe",
+ .of_match_table = of_match_ptr(ixp4xx_npe_of_match),
+ },
+ .probe = ixp4xx_npe_probe,
+ .remove = ixp4xx_npe_remove,
+};
+module_platform_driver(ixp4xx_npe_driver);
+
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(NPE_A_FIRMWARE);
+MODULE_FIRMWARE(NPE_B_FIRMWARE);
+MODULE_FIRMWARE(NPE_C_FIRMWARE);
+
+EXPORT_SYMBOL(npe_names);
+EXPORT_SYMBOL(npe_running);
+EXPORT_SYMBOL(npe_request);
+EXPORT_SYMBOL(npe_release);
+EXPORT_SYMBOL(npe_load_firmware);
+EXPORT_SYMBOL(npe_send_message);
+EXPORT_SYMBOL(npe_recv_message);
+EXPORT_SYMBOL(npe_send_recv_message);
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
new file mode 100644
index 000000000000..13a8a13c9b01
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -0,0 +1,488 @@
+/*
+ * Intel IXP4xx Queue Manager driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/qmgr.h>
+
+static struct qmgr_regs __iomem *qmgr_regs;
+static int qmgr_irq_1;
+static int qmgr_irq_2;
+static spinlock_t qmgr_lock;
+static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
+static void (*irq_handlers[QUEUES])(void *pdev);
+static void *irq_pdevs[QUEUES];
+
+#if DEBUG_QMGR
+char qmgr_queue_descs[QUEUES][32];
+#endif
+
+void qmgr_put_entry(unsigned int queue, u32 val)
+{
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) put %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ __raw_writel(val, &qmgr_regs->acc[queue][0]);
+}
+
+u32 qmgr_get_entry(unsigned int queue)
+{
+ u32 val;
+ val = __raw_readl(&qmgr_regs->acc[queue][0]);
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) get %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ return val;
+}
+
+static int __qmgr_get_stat1(unsigned int queue)
+{
+ return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
+ >> ((queue & 7) << 2)) & 0xF;
+}
+
+static int __qmgr_get_stat2(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
+ >> ((queue & 0xF) << 1)) & 0x3;
+}
+
+/**
+ * qmgr_stat_empty() - checks if a hardware queue is empty
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is empty.
+ */
+int qmgr_stat_empty(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
+}
+
+/**
+ * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is below low watermark.
+ */
+int qmgr_stat_below_low_watermark(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statne_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
+}
+
+/**
+ * qmgr_stat_full() - checks if a hardware queue is full
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is full.
+ */
+int qmgr_stat_full(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statf_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
+}
+
+/**
+ * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue experienced overflow.
+ */
+int qmgr_stat_overflow(unsigned int queue)
+{
+ return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
+}
+
+void qmgr_set_irq(unsigned int queue, int src,
+ void (*handler)(void *pdev), void *pdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ if (queue < HALF_QUEUES) {
+ u32 __iomem *reg;
+ int bit;
+ BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
+ reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
+ bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
+ __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
+ reg);
+ } else
+ /* IRQ source for queues 32-63 is fixed */
+ BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
+
+ irq_handlers[queue] = handler;
+ irq_pdevs[queue] = pdev;
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+
+static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 en_bitmap, src, stat;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
+
+ en_bitmap = qmgr_regs->irqen[0];
+ while (en_bitmap) {
+ i = __fls(en_bitmap); /* number of the last "low" queue */
+ en_bitmap &= ~BIT(i);
+ src = qmgr_regs->irqsrc[i >> 3];
+ stat = qmgr_regs->stat1[i >> 3];
+ if (src & 4) /* the IRQ condition is inverted */
+ stat = ~stat;
+ if (stat & BIT(src & 3)) {
+ irq_handlers[i](irq_pdevs[i]);
+ ret = IRQ_HANDLED;
+ }
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 req_bitmap;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
+
+ req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last "high" queue */
+ req_bitmap &= ~BIT(i);
+ irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq(int irq, void *pdev)
+{
+ int i, half = (irq == qmgr_irq_1 ? 0 : 1);
+ u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
+
+ if (!req_bitmap)
+ return 0;
+ __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
+
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last queue */
+ req_bitmap &= ~BIT(i);
+ i += half * HALF_QUEUES;
+ irq_handlers[i](irq_pdevs[i]);
+ }
+ return IRQ_HANDLED;
+}
+
+
+void qmgr_enable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
+ &qmgr_regs->irqen[half]);
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+void qmgr_disable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
+ &qmgr_regs->irqen[half]);
+ __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+static inline void shift_mask(u32 *mask)
+{
+ mask[3] = mask[3] << 1 | mask[2] >> 31;
+ mask[2] = mask[2] << 1 | mask[1] >> 31;
+ mask[1] = mask[1] << 1 | mask[0] >> 31;
+ mask[0] <<= 1;
+}
+
+#if DEBUG_QMGR
+int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark,
+ const char *desc_format, const char* name)
+#else
+int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark)
+#endif
+{
+ u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
+ int err;
+
+ BUG_ON(queue >= QUEUES);
+
+ if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
+ return -EINVAL;
+
+ switch (len) {
+ case 16:
+ cfg = 0 << 24;
+ mask[0] = 0x1;
+ break;
+ case 32:
+ cfg = 1 << 24;
+ mask[0] = 0x3;
+ break;
+ case 64:
+ cfg = 2 << 24;
+ mask[0] = 0xF;
+ break;
+ case 128:
+ cfg = 3 << 24;
+ mask[0] = 0xFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cfg |= nearly_empty_watermark << 26;
+ cfg |= nearly_full_watermark << 29;
+ len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
+ mask[1] = mask[2] = mask[3] = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ spin_lock_irq(&qmgr_lock);
+ if (__raw_readl(&qmgr_regs->sram[queue])) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ while (1) {
+ if (!(used_sram_bitmap[0] & mask[0]) &&
+ !(used_sram_bitmap[1] & mask[1]) &&
+ !(used_sram_bitmap[2] & mask[2]) &&
+ !(used_sram_bitmap[3] & mask[3]))
+ break; /* found free space */
+
+ addr++;
+ shift_mask(mask);
+ if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
+ printk(KERN_ERR "qmgr: no free SRAM space for"
+ " queue %i\n", queue);
+ err = -ENOMEM;
+ goto err;
+ }
+ }
+
+ used_sram_bitmap[0] |= mask[0];
+ used_sram_bitmap[1] |= mask[1];
+ used_sram_bitmap[2] |= mask[2];
+ used_sram_bitmap[3] |= mask[3];
+ __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
+#if DEBUG_QMGR
+ snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
+ desc_format, name);
+ printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
+ qmgr_queue_descs[queue], queue, addr);
+#endif
+ spin_unlock_irq(&qmgr_lock);
+ return 0;
+
+err:
+ spin_unlock_irq(&qmgr_lock);
+ module_put(THIS_MODULE);
+ return err;
+}
+
+void qmgr_release_queue(unsigned int queue)
+{
+ u32 cfg, addr, mask[4];
+
+ BUG_ON(queue >= QUEUES); /* not in valid range */
+
+ spin_lock_irq(&qmgr_lock);
+ cfg = __raw_readl(&qmgr_regs->sram[queue]);
+ addr = (cfg >> 14) & 0xFF;
+
+ BUG_ON(!addr); /* not requested */
+
+ switch ((cfg >> 24) & 3) {
+ case 0: mask[0] = 0x1; break;
+ case 1: mask[0] = 0x3; break;
+ case 2: mask[0] = 0xF; break;
+ case 3: mask[0] = 0xFF; break;
+ }
+
+ mask[1] = mask[2] = mask[3] = 0;
+
+ while (addr--)
+ shift_mask(mask);
+
+#if DEBUG_QMGR
+ printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
+ qmgr_queue_descs[queue], queue);
+ qmgr_queue_descs[queue][0] = '\x0';
+#endif
+
+ while ((addr = qmgr_get_entry(queue)))
+ printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
+ queue, addr);
+
+ __raw_writel(0, &qmgr_regs->sram[queue]);
+
+ used_sram_bitmap[0] &= ~mask[0];
+ used_sram_bitmap[1] &= ~mask[1];
+ used_sram_bitmap[2] &= ~mask[2];
+ used_sram_bitmap[3] &= ~mask[3];
+ irq_handlers[queue] = NULL; /* catch IRQ bugs */
+ spin_unlock_irq(&qmgr_lock);
+
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_qmgr_probe(struct platform_device *pdev)
+{
+ int i, err;
+ irq_handler_t handler1, handler2;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq1, irq2;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ qmgr_regs = devm_ioremap_resource(dev, res);
+ if (!qmgr_regs)
+ return -ENOMEM;
+
+ irq1 = platform_get_irq(pdev, 0);
+ if (irq1 <= 0)
+ return irq1 ? irq1 : -EINVAL;
+ qmgr_irq_1 = irq1;
+ irq2 = platform_get_irq(pdev, 1);
+ if (irq2 <= 0)
+ return irq2 ? irq2 : -EINVAL;
+ qmgr_irq_2 = irq2;
+
+ /* reset qmgr registers */
+ for (i = 0; i < 4; i++) {
+ __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
+ __raw_writel(0, &qmgr_regs->irqsrc[i]);
+ }
+ for (i = 0; i < 2; i++) {
+ __raw_writel(0, &qmgr_regs->stat2[i]);
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
+ __raw_writel(0, &qmgr_regs->irqen[i]);
+ }
+
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
+ __raw_writel(0, &qmgr_regs->statf_h);
+
+ for (i = 0; i < QUEUES; i++)
+ __raw_writel(0, &qmgr_regs->sram[i]);
+
+ if (cpu_is_ixp42x_rev_a0()) {
+ handler1 = qmgr_irq1_a0;
+ handler2 = qmgr_irq2_a0;
+ } else
+ handler1 = handler2 = qmgr_irq;
+
+ err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq1, err);
+ return err;
+ }
+
+ err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq2, err);
+ return err;
+ }
+
+ used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
+ spin_lock_init(&qmgr_lock);
+
+ dev_info(dev, "IXP4xx Queue Manager initialized.\n");
+ return 0;
+}
+
+static int ixp4xx_qmgr_remove(struct platform_device *pdev)
+{
+ synchronize_irq(qmgr_irq_1);
+ synchronize_irq(qmgr_irq_2);
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_qmgr_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-ahb-queue-manager",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_qmgr_driver = {
+ .driver = {
+ .name = "ixp4xx-qmgr",
+ .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
+ },
+ .probe = ixp4xx_qmgr_probe,
+ .remove = ixp4xx_qmgr_remove,
+};
+module_platform_driver(ixp4xx_qmgr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Krzysztof Halasa");
+
+EXPORT_SYMBOL(qmgr_put_entry);
+EXPORT_SYMBOL(qmgr_get_entry);
+EXPORT_SYMBOL(qmgr_stat_empty);
+EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
+EXPORT_SYMBOL(qmgr_stat_full);
+EXPORT_SYMBOL(qmgr_stat_overflow);
+EXPORT_SYMBOL(qmgr_set_irq);
+EXPORT_SYMBOL(qmgr_enable_irq);
+EXPORT_SYMBOL(qmgr_disable_irq);
+#if DEBUG_QMGR
+EXPORT_SYMBOL(qmgr_queue_descs);
+EXPORT_SYMBOL(qmgr_request_queue);
+#else
+EXPORT_SYMBOL(__qmgr_request_queue);
+#endif
+EXPORT_SYMBOL(qmgr_release_queue);
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
index 353b07e40176..e84eb4e59f58 100644
--- a/drivers/soc/sunxi/Kconfig
+++ b/drivers/soc/sunxi/Kconfig
@@ -4,6 +4,7 @@
config SUNXI_SRAM
bool
default ARCH_SUNXI
+ select REGMAP_MMIO
help
Say y here to enable the SRAM controller support. This
device is responsible on mapping the SRAM in the sunXi SoCs
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index 19c8efb9a5ee..53b55b79c4af 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -4,7 +4,7 @@
menuconfig SOUNDWIRE
bool "SoundWire support"
- ---help---
+ help
SoundWire is a 2-Pin interface with data and clock line ratified
by the MIPI Alliance. SoundWire is used for transporting data
typically related to audio functions. SoundWire interface is
@@ -28,7 +28,7 @@ config SOUNDWIRE_INTEL
select SOUNDWIRE_CADENCE
select SOUNDWIRE_BUS
depends on X86 && ACPI && SND_SOC
- ---help---
+ help
SoundWire Intel Master driver.
If you have an Intel platform which has a SoundWire Master then
enable this config option to get the SoundWire support for that
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 1cbfedfc20ef..aac35fc3cf22 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -21,12 +21,12 @@ int sdw_add_bus_master(struct sdw_bus *bus)
int ret;
if (!bus->dev) {
- pr_err("SoundWire bus has no device");
+ pr_err("SoundWire bus has no device\n");
return -ENODEV;
}
if (!bus->ops) {
- dev_err(bus->dev, "SoundWire Bus ops are not set");
+ dev_err(bus->dev, "SoundWire Bus ops are not set\n");
return -EINVAL;
}
@@ -43,13 +43,14 @@ int sdw_add_bus_master(struct sdw_bus *bus)
if (bus->ops->read_prop) {
ret = bus->ops->read_prop(bus);
if (ret < 0) {
- dev_err(bus->dev, "Bus read properties failed:%d", ret);
+ dev_err(bus->dev,
+ "Bus read properties failed:%d\n", ret);
return ret;
}
}
/*
- * Device numbers in SoundWire are 0 thru 15. Enumeration device
+ * Device numbers in SoundWire are 0 through 15. Enumeration device
* number (0), Broadcast device number (15), Group numbers (12 and
* 13) and Master device number (14) are not used for assignment so
* mask these and other higher bits.
@@ -172,7 +173,8 @@ static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
}
static inline int do_transfer_defer(struct sdw_bus *bus,
- struct sdw_msg *msg, struct sdw_defer *defer)
+ struct sdw_msg *msg,
+ struct sdw_defer *defer)
{
int retry = bus->prop.err_threshold;
enum sdw_command_response resp;
@@ -224,7 +226,7 @@ int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
ret = do_transfer(bus, msg);
if (ret != 0 && ret != -ENODATA)
dev_err(bus->dev, "trf on Slave %d failed:%d\n",
- msg->dev_num, ret);
+ msg->dev_num, ret);
if (msg->page)
sdw_reset_page(bus, msg->dev_num);
@@ -243,7 +245,7 @@ int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
* Caller needs to hold the msg_lock lock while calling this
*/
int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
- struct sdw_defer *defer)
+ struct sdw_defer *defer)
{
int ret;
@@ -253,7 +255,7 @@ int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
ret = do_transfer_defer(bus, msg, defer);
if (ret != 0 && ret != -ENODATA)
dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
- msg->dev_num, ret);
+ msg->dev_num, ret);
if (msg->page)
sdw_reset_page(bus, msg->dev_num);
@@ -261,9 +263,8 @@ int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
return ret;
}
-
int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
- u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
+ u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
{
memset(msg, 0, sizeof(*msg));
msg->addr = addr; /* addr is 16 bit and truncated here */
@@ -271,8 +272,6 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
msg->dev_num = dev_num;
msg->flags = flags;
msg->buf = buf;
- msg->ssp_sync = false;
- msg->page = false;
if (addr < SDW_REG_NO_PAGE) { /* no paging area */
return 0;
@@ -284,7 +283,7 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
if (slave && !slave->prop.paging_support)
return 0;
- /* no need for else as that will fall thru to paging */
+ /* no need for else as that will fall-through to paging */
}
/* paging mandatory */
@@ -298,7 +297,7 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
return -EINVAL;
} else if (!slave->prop.paging_support) {
dev_err(&slave->dev,
- "address %x needs paging but no support", addr);
+ "address %x needs paging but no support\n", addr);
return -EINVAL;
}
@@ -323,7 +322,7 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
int ret;
ret = sdw_fill_msg(&msg, slave, addr, count,
- slave->dev_num, SDW_MSG_FLAG_READ, val);
+ slave->dev_num, SDW_MSG_FLAG_READ, val);
if (ret < 0)
return ret;
@@ -351,7 +350,7 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
int ret;
ret = sdw_fill_msg(&msg, slave, addr, count,
- slave->dev_num, SDW_MSG_FLAG_WRITE, val);
+ slave->dev_num, SDW_MSG_FLAG_WRITE, val);
if (ret < 0)
return ret;
@@ -393,7 +392,6 @@ EXPORT_SYMBOL(sdw_read);
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
{
return sdw_nwrite(slave, addr, 1, &value);
-
}
EXPORT_SYMBOL(sdw_write);
@@ -416,11 +414,10 @@ static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
{
-
- if ((slave->id.unique_id != id.unique_id) ||
- (slave->id.mfg_id != id.mfg_id) ||
- (slave->id.part_id != id.part_id) ||
- (slave->id.class_id != id.class_id))
+ if (slave->id.unique_id != id.unique_id ||
+ slave->id.mfg_id != id.mfg_id ||
+ slave->id.part_id != id.part_id ||
+ slave->id.class_id != id.class_id)
return -ENODEV;
return 0;
@@ -457,24 +454,23 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
dev_num = sdw_get_device_num(slave);
mutex_unlock(&slave->bus->bus_lock);
if (dev_num < 0) {
- dev_err(slave->bus->dev, "Get dev_num failed: %d",
- dev_num);
+ dev_err(slave->bus->dev, "Get dev_num failed: %d\n",
+ dev_num);
return dev_num;
}
} else {
dev_info(slave->bus->dev,
- "Slave already registered dev_num:%d",
- slave->dev_num);
+ "Slave already registered dev_num:%d\n",
+ slave->dev_num);
/* Clear the slave->dev_num to transfer message on device 0 */
dev_num = slave->dev_num;
slave->dev_num = 0;
-
}
ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num);
if (ret < 0) {
- dev_err(&slave->dev, "Program device_num failed: %d", ret);
+ dev_err(&slave->dev, "Program device_num failed: %d\n", ret);
return ret;
}
@@ -485,9 +481,9 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
}
void sdw_extract_slave_id(struct sdw_bus *bus,
- u64 addr, struct sdw_slave_id *id)
+ u64 addr, struct sdw_slave_id *id)
{
- dev_dbg(bus->dev, "SDW Slave Addr: %llx", addr);
+ dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
/*
* Spec definition
@@ -507,10 +503,9 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
id->class_id = addr & GENMASK(7, 0);
dev_dbg(bus->dev,
- "SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x",
+ "SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x\n",
id->class_id, id->part_id, id->mfg_id,
id->unique_id, id->sdw_version);
-
}
static int sdw_program_device_num(struct sdw_bus *bus)
@@ -525,7 +520,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
/* No Slave, so use raw xfer api */
ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
- SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
+ SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
if (ret < 0)
return ret;
@@ -564,7 +559,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
ret = sdw_assign_device_num(slave);
if (ret) {
dev_err(slave->bus->dev,
- "Assign dev_num failed:%d",
+ "Assign dev_num failed:%d\n",
ret);
return ret;
}
@@ -573,9 +568,9 @@ static int sdw_program_device_num(struct sdw_bus *bus)
}
}
- if (found == false) {
+ if (!found) {
/* TODO: Park this device in Group 13 */
- dev_err(bus->dev, "Slave Entry not found");
+ dev_err(bus->dev, "Slave Entry not found\n");
}
count++;
@@ -592,7 +587,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
}
static void sdw_modify_slave_status(struct sdw_slave *slave,
- enum sdw_slave_status status)
+ enum sdw_slave_status status)
{
mutex_lock(&slave->bus->bus_lock);
slave->status = status;
@@ -600,7 +595,7 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
}
int sdw_configure_dpn_intr(struct sdw_slave *slave,
- int port, bool enable, int mask)
+ int port, bool enable, int mask)
{
u32 addr;
int ret;
@@ -620,7 +615,7 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
ret = sdw_update(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
if (ret < 0)
dev_err(slave->bus->dev,
- "SDW_DPN_INTMASK write failed:%d", val);
+ "SDW_DPN_INTMASK write failed:%d\n", val);
return ret;
}
@@ -644,7 +639,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
ret = sdw_update(slave, SDW_SCP_INTMASK1, val, val);
if (ret < 0) {
dev_err(slave->bus->dev,
- "SDW_SCP_INTMASK1 write failed:%d", ret);
+ "SDW_SCP_INTMASK1 write failed:%d\n", ret);
return ret;
}
@@ -659,7 +654,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
if (ret < 0) {
dev_err(slave->bus->dev,
- "SDW_DP0_INTMASK read failed:%d", ret);
+ "SDW_DP0_INTMASK read failed:%d\n", ret);
return val;
}
@@ -674,14 +669,13 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
status = sdw_read(slave, SDW_DP0_INT);
if (status < 0) {
dev_err(slave->bus->dev,
- "SDW_DP0_INT read failed:%d", status);
+ "SDW_DP0_INT read failed:%d\n", status);
return status;
}
do {
-
if (status & SDW_DP0_INT_TEST_FAIL) {
- dev_err(&slave->dev, "Test fail for port 0");
+ dev_err(&slave->dev, "Test fail for port 0\n");
clear |= SDW_DP0_INT_TEST_FAIL;
}
@@ -696,7 +690,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
}
if (status & SDW_DP0_INT_BRA_FAILURE) {
- dev_err(&slave->dev, "BRA failed");
+ dev_err(&slave->dev, "BRA failed\n");
clear |= SDW_DP0_INT_BRA_FAILURE;
}
@@ -712,7 +706,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
ret = sdw_write(slave, SDW_DP0_INT, clear);
if (ret < 0) {
dev_err(slave->bus->dev,
- "SDW_DP0_INT write failed:%d", ret);
+ "SDW_DP0_INT write failed:%d\n", ret);
return ret;
}
@@ -720,7 +714,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
status2 = sdw_read(slave, SDW_DP0_INT);
if (status2 < 0) {
dev_err(slave->bus->dev,
- "SDW_DP0_INT read failed:%d", status2);
+ "SDW_DP0_INT read failed:%d\n", status2);
return status2;
}
status &= status2;
@@ -731,13 +725,13 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
} while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
if (count == SDW_READ_INTR_CLEAR_RETRY)
- dev_warn(slave->bus->dev, "Reached MAX_RETRY on DP0 read");
+ dev_warn(slave->bus->dev, "Reached MAX_RETRY on DP0 read\n");
return ret;
}
static int sdw_handle_port_interrupt(struct sdw_slave *slave,
- int port, u8 *slave_status)
+ int port, u8 *slave_status)
{
u8 clear = 0, impl_int_mask;
int status, status2, ret, count = 0;
@@ -750,15 +744,14 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
status = sdw_read(slave, addr);
if (status < 0) {
dev_err(slave->bus->dev,
- "SDW_DPN_INT read failed:%d", status);
+ "SDW_DPN_INT read failed:%d\n", status);
return status;
}
do {
-
if (status & SDW_DPN_INT_TEST_FAIL) {
- dev_err(&slave->dev, "Test fail for port:%d", port);
+ dev_err(&slave->dev, "Test fail for port:%d\n", port);
clear |= SDW_DPN_INT_TEST_FAIL;
}
@@ -774,7 +767,6 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
impl_int_mask = SDW_DPN_INT_IMPDEF1 |
SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
-
if (status & impl_int_mask) {
clear |= impl_int_mask;
*slave_status = clear;
@@ -784,7 +776,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
ret = sdw_write(slave, addr, clear);
if (ret < 0) {
dev_err(slave->bus->dev,
- "SDW_DPN_INT write failed:%d", ret);
+ "SDW_DPN_INT write failed:%d\n", ret);
return ret;
}
@@ -792,7 +784,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
status2 = sdw_read(slave, addr);
if (status2 < 0) {
dev_err(slave->bus->dev,
- "SDW_DPN_INT read failed:%d", status2);
+ "SDW_DPN_INT read failed:%d\n", status2);
return status2;
}
status &= status2;
@@ -820,17 +812,18 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
/* Read Instat 1, Instat 2 and Instat 3 registers */
- buf = ret = sdw_read(slave, SDW_SCP_INT1);
+ ret = sdw_read(slave, SDW_SCP_INT1);
if (ret < 0) {
dev_err(slave->bus->dev,
- "SDW_SCP_INT1 read failed:%d", ret);
+ "SDW_SCP_INT1 read failed:%d\n", ret);
return ret;
}
+ buf = ret;
ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, buf2);
if (ret < 0) {
dev_err(slave->bus->dev,
- "SDW_SCP_INT2/3 read failed:%d", ret);
+ "SDW_SCP_INT2/3 read failed:%d\n", ret);
return ret;
}
@@ -840,12 +833,12 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
* interrupt
*/
if (buf & SDW_SCP_INT1_PARITY) {
- dev_err(&slave->dev, "Parity error detected");
+ dev_err(&slave->dev, "Parity error detected\n");
clear |= SDW_SCP_INT1_PARITY;
}
if (buf & SDW_SCP_INT1_BUS_CLASH) {
- dev_err(&slave->dev, "Bus clash error detected");
+ dev_err(&slave->dev, "Bus clash error detected\n");
clear |= SDW_SCP_INT1_BUS_CLASH;
}
@@ -869,8 +862,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
port = port >> SDW_REG_SHIFT(SDW_SCP_INT1_PORT0_3);
for_each_set_bit(bit, &port, 8) {
sdw_handle_port_interrupt(slave, bit,
- &port_status[bit]);
-
+ &port_status[bit]);
}
/* Check if cascade 2 interrupt is present */
@@ -898,11 +890,11 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
}
/* Update the Slave driver */
- if (slave_notify && (slave->ops) &&
- (slave->ops->interrupt_callback)) {
+ if (slave_notify && slave->ops &&
+ slave->ops->interrupt_callback) {
slave_intr.control_port = clear;
memcpy(slave_intr.port, &port_status,
- sizeof(slave_intr.port));
+ sizeof(slave_intr.port));
slave->ops->interrupt_callback(slave, &slave_intr);
}
@@ -911,7 +903,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
ret = sdw_write(slave, SDW_SCP_INT1, clear);
if (ret < 0) {
dev_err(slave->bus->dev,
- "SDW_SCP_INT1 write failed:%d", ret);
+ "SDW_SCP_INT1 write failed:%d\n", ret);
return ret;
}
@@ -919,17 +911,18 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
* Read status again to ensure no new interrupts arrived
* while servicing interrupts.
*/
- _buf = ret = sdw_read(slave, SDW_SCP_INT1);
+ ret = sdw_read(slave, SDW_SCP_INT1);
if (ret < 0) {
dev_err(slave->bus->dev,
- "SDW_SCP_INT1 read failed:%d", ret);
+ "SDW_SCP_INT1 read failed:%d\n", ret);
return ret;
}
+ _buf = ret;
ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, _buf2);
if (ret < 0) {
dev_err(slave->bus->dev,
- "SDW_SCP_INT2/3 read failed:%d", ret);
+ "SDW_SCP_INT2/3 read failed:%d\n", ret);
return ret;
}
@@ -949,15 +942,15 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
} while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
if (count == SDW_READ_INTR_CLEAR_RETRY)
- dev_warn(slave->bus->dev, "Reached MAX_RETRY on alert read");
+ dev_warn(slave->bus->dev, "Reached MAX_RETRY on alert read\n");
return ret;
}
static int sdw_update_slave_status(struct sdw_slave *slave,
- enum sdw_slave_status status)
+ enum sdw_slave_status status)
{
- if ((slave->ops) && (slave->ops->update_status))
+ if (slave->ops && slave->ops->update_status)
return slave->ops->update_status(slave, status);
return 0;
@@ -969,7 +962,7 @@ static int sdw_update_slave_status(struct sdw_slave *slave,
* @status: Status for all Slave(s)
*/
int sdw_handle_slave_status(struct sdw_bus *bus,
- enum sdw_slave_status status[])
+ enum sdw_slave_status status[])
{
enum sdw_slave_status prev_status;
struct sdw_slave *slave;
@@ -978,7 +971,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
if (status[0] == SDW_SLAVE_ATTACHED) {
ret = sdw_program_device_num(bus);
if (ret)
- dev_err(bus->dev, "Slave attach failed: %d", ret);
+ dev_err(bus->dev, "Slave attach failed: %d\n", ret);
}
/* Continue to check other slave statuses */
@@ -1006,7 +999,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
ret = sdw_handle_slave_alerts(slave);
if (ret)
dev_err(bus->dev,
- "Slave %d alert handling failed: %d",
+ "Slave %d alert handling failed: %d\n",
i, ret);
break;
@@ -1023,22 +1016,21 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
ret = sdw_initialize_slave(slave);
if (ret)
dev_err(bus->dev,
- "Slave %d initialization failed: %d",
+ "Slave %d initialization failed: %d\n",
i, ret);
break;
default:
- dev_err(bus->dev, "Invalid slave %d status:%d",
- i, status[i]);
+ dev_err(bus->dev, "Invalid slave %d status:%d\n",
+ i, status[i]);
break;
}
ret = sdw_update_slave_status(slave, status[i]);
if (ret)
dev_err(slave->bus->dev,
- "Update Slave status failed:%d", ret);
-
+ "Update Slave status failed:%d\n", ret);
}
return ret;
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index c77de05b8100..3048ca153f22 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -1,5 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
-// Copyright(c) 2015-17 Intel Corporation.
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright(c) 2015-17 Intel Corporation. */
#ifndef __SDW_BUS_H
#define __SDW_BUS_H
@@ -16,7 +16,7 @@ static inline int sdw_acpi_find_slaves(struct sdw_bus *bus)
#endif
void sdw_extract_slave_id(struct sdw_bus *bus,
- u64 addr, struct sdw_slave_id *id);
+ u64 addr, struct sdw_slave_id *id);
enum {
SDW_MSG_FLAG_READ = 0,
@@ -116,19 +116,19 @@ struct sdw_master_runtime {
};
struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
- enum sdw_data_direction direction,
- unsigned int port_num);
+ enum sdw_data_direction direction,
+ unsigned int port_num);
int sdw_configure_dpn_intr(struct sdw_slave *slave, int port,
- bool enable, int mask);
+ bool enable, int mask);
int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg);
int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
- struct sdw_defer *defer);
+ struct sdw_defer *defer);
#define SDW_READ_INTR_CLEAR_RETRY 10
int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
- u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf);
+ u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf);
/* Read-Modify-Write Slave register */
static inline int
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 283b2832728e..2655602f0cfb 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -107,7 +107,7 @@ static int sdw_drv_probe(struct device *dev)
slave->prop.clk_stop_timeout = 300;
slave->bus->clk_stop_timeout = max_t(u32, slave->bus->clk_stop_timeout,
- slave->prop.clk_stop_timeout);
+ slave->prop.clk_stop_timeout);
return 0;
}
@@ -148,7 +148,7 @@ int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
if (!drv->probe) {
pr_err("driver %s didn't provide SDW probe routine\n",
- drv->name);
+ drv->name);
return -EINVAL;
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index cb6a331f448a..682789bb8ab3 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -42,7 +42,6 @@
#define CDNS_MCP_CONTROL_CMD_ACCEPT BIT(1)
#define CDNS_MCP_CONTROL_BLOCK_WAKEUP BIT(0)
-
#define CDNS_MCP_CMDCTRL 0x8
#define CDNS_MCP_SSPSTAT 0xC
#define CDNS_MCP_FRAME_SHAPE 0x10
@@ -226,9 +225,9 @@ static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
/*
* IO Calls
*/
-static enum sdw_command_response cdns_fill_msg_resp(
- struct sdw_cdns *cdns,
- struct sdw_msg *msg, int count, int offset)
+static enum sdw_command_response
+cdns_fill_msg_resp(struct sdw_cdns *cdns,
+ struct sdw_msg *msg, int count, int offset)
{
int nack = 0, no_ack = 0;
int i;
@@ -263,7 +262,7 @@ static enum sdw_command_response cdns_fill_msg_resp(
static enum sdw_command_response
_cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
- int offset, int count, bool defer)
+ int offset, int count, bool defer)
{
unsigned long time;
u32 base, i, data;
@@ -296,7 +295,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
/* wait for timeout or response */
time = wait_for_completion_timeout(&cdns->tx_complete,
- msecs_to_jiffies(CDNS_TX_TIMEOUT));
+ msecs_to_jiffies(CDNS_TX_TIMEOUT));
if (!time) {
dev_err(cdns->dev, "IO transfer timed out\n");
msg->len = 0;
@@ -306,8 +305,8 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
return cdns_fill_msg_resp(cdns, msg, count, offset);
}
-static enum sdw_command_response cdns_program_scp_addr(
- struct sdw_cdns *cdns, struct sdw_msg *msg)
+static enum sdw_command_response
+cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg)
{
int nack = 0, no_ack = 0;
unsigned long time;
@@ -336,7 +335,7 @@ static enum sdw_command_response cdns_program_scp_addr(
cdns_writel(cdns, base, data[1]);
time = wait_for_completion_timeout(&cdns->tx_complete,
- msecs_to_jiffies(CDNS_TX_TIMEOUT));
+ msecs_to_jiffies(CDNS_TX_TIMEOUT));
if (!time) {
dev_err(cdns->dev, "SCP Msg trf timed out\n");
msg->len = 0;
@@ -347,10 +346,10 @@ static enum sdw_command_response cdns_program_scp_addr(
for (i = 0; i < 2; i++) {
if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
no_ack = 1;
- dev_err(cdns->dev, "Program SCP Ack not received");
+ dev_err(cdns->dev, "Program SCP Ack not received\n");
if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
nack = 1;
- dev_err(cdns->dev, "Program SCP NACK received");
+ dev_err(cdns->dev, "Program SCP NACK received\n");
}
}
}
@@ -358,11 +357,11 @@ static enum sdw_command_response cdns_program_scp_addr(
/* For NACK, NO ack, don't return err if we are in Broadcast mode */
if (nack) {
dev_err(cdns->dev,
- "SCP_addrpage NACKed for Slave %d", msg->dev_num);
+ "SCP_addrpage NACKed for Slave %d\n", msg->dev_num);
return SDW_CMD_FAIL;
} else if (no_ack) {
dev_dbg(cdns->dev,
- "SCP_addrpage ignored for Slave %d", msg->dev_num);
+ "SCP_addrpage ignored for Slave %d\n", msg->dev_num);
return SDW_CMD_IGNORED;
}
@@ -410,7 +409,7 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
for (i = 0; i < msg->len / CDNS_MCP_CMD_LEN; i++) {
ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
- CDNS_MCP_CMD_LEN, false);
+ CDNS_MCP_CMD_LEN, false);
if (ret < 0)
goto exit;
}
@@ -419,7 +418,7 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
goto exit;
ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
- msg->len % CDNS_MCP_CMD_LEN, false);
+ msg->len % CDNS_MCP_CMD_LEN, false);
exit:
return ret;
@@ -428,7 +427,7 @@ EXPORT_SYMBOL(cdns_xfer_msg);
enum sdw_command_response
cdns_xfer_msg_defer(struct sdw_bus *bus,
- struct sdw_msg *msg, struct sdw_defer *defer)
+ struct sdw_msg *msg, struct sdw_defer *defer)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
int cmd = 0, ret;
@@ -483,7 +482,7 @@ static void cdns_read_response(struct sdw_cdns *cdns)
}
static int cdns_update_slave_status(struct sdw_cdns *cdns,
- u32 slave0, u32 slave1)
+ u32 slave0, u32 slave1)
{
enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
bool is_slave = false;
@@ -526,8 +525,8 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
/* first check if Slave reported multiple status */
if (set_status > 1) {
dev_warn(cdns->dev,
- "Slave reported multiple Status: %d\n",
- status[i]);
+ "Slave reported multiple Status: %d\n",
+ status[i]);
/*
* TODO: we need to reread the status here by
* issuing a PING cmd
@@ -566,15 +565,15 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
if (cdns->defer) {
cdns_fill_msg_resp(cdns, cdns->defer->msg,
- cdns->defer->length, 0);
+ cdns->defer->length, 0);
complete(&cdns->defer->complete);
cdns->defer = NULL;
- } else
+ } else {
complete(&cdns->tx_complete);
+ }
}
if (int_status & CDNS_MCP_INT_CTRL_CLASH) {
-
/* Slave is driving bit slot during control word */
dev_err_ratelimited(cdns->dev, "Bus clash for control word\n");
int_status |= CDNS_MCP_INT_CTRL_CLASH;
@@ -592,7 +591,7 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
if (int_status & CDNS_MCP_INT_SLAVE_MASK) {
/* Mask the Slave interrupt and wake thread */
cdns_updatel(cdns, CDNS_MCP_INTMASK,
- CDNS_MCP_INT_SLAVE_MASK, 0);
+ CDNS_MCP_INT_SLAVE_MASK, 0);
int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
ret = IRQ_WAKE_THREAD;
@@ -625,7 +624,7 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id)
/* clear and unmask Slave interrupt now */
cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
cdns_updatel(cdns, CDNS_MCP_INTMASK,
- CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
+ CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
return IRQ_HANDLED;
}
@@ -639,9 +638,9 @@ static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
u32 mask;
cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0,
- CDNS_MCP_SLAVE_INTMASK0_MASK);
+ CDNS_MCP_SLAVE_INTMASK0_MASK);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1,
- CDNS_MCP_SLAVE_INTMASK1_MASK);
+ CDNS_MCP_SLAVE_INTMASK1_MASK);
mask = CDNS_MCP_INT_SLAVE_RSVD | CDNS_MCP_INT_SLAVE_ALERT |
CDNS_MCP_INT_SLAVE_ATTACH | CDNS_MCP_INT_SLAVE_NATTACH |
@@ -663,17 +662,17 @@ int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns)
_cdns_enable_interrupt(cdns);
ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
- CDNS_MCP_CONFIG_UPDATE_BIT);
+ CDNS_MCP_CONFIG_UPDATE_BIT);
if (ret < 0)
- dev_err(cdns->dev, "Config update timedout");
+ dev_err(cdns->dev, "Config update timedout\n");
return ret;
}
EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
static int cdns_allocate_pdi(struct sdw_cdns *cdns,
- struct sdw_cdns_pdi **stream,
- u32 num, u32 pdi_offset)
+ struct sdw_cdns_pdi **stream,
+ u32 num, u32 pdi_offset)
{
struct sdw_cdns_pdi *pdi;
int i;
@@ -701,7 +700,7 @@ static int cdns_allocate_pdi(struct sdw_cdns *cdns,
* @config: Stream configurations
*/
int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
- struct sdw_cdns_stream_config config)
+ struct sdw_cdns_stream_config config)
{
struct sdw_cdns_streams *stream;
int offset, i, ret;
@@ -770,7 +769,7 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
cdns->num_ports += stream->num_pdi;
cdns->ports = devm_kcalloc(cdns->dev, cdns->num_ports,
- sizeof(*cdns->ports), GFP_KERNEL);
+ sizeof(*cdns->ports), GFP_KERNEL);
if (!cdns->ports) {
ret = -ENOMEM;
return ret;
@@ -796,7 +795,7 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
/* Exit clock stop */
ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
- CDNS_MCP_CONTROL_CLK_STOP_CLR);
+ CDNS_MCP_CONTROL_CLK_STOP_CLR);
if (ret < 0) {
dev_err(cdns->dev, "Couldn't exit from clock stop\n");
return ret;
@@ -816,7 +815,7 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
/* Set cmd accept mode */
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
- CDNS_MCP_CONTROL_CMD_ACCEPT);
+ CDNS_MCP_CONTROL_CMD_ACCEPT);
/* Configure mcp config */
val = cdns_readl(cdns, CDNS_MCP_CONFIG);
@@ -853,7 +852,7 @@ int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params)
int divider;
if (!params->curr_dr_freq) {
- dev_err(cdns->dev, "NULL curr_dr_freq");
+ dev_err(cdns->dev, "NULL curr_dr_freq\n");
return -EINVAL;
}
@@ -873,7 +872,7 @@ int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params)
EXPORT_SYMBOL(cdns_bus_conf);
static int cdns_port_params(struct sdw_bus *bus,
- struct sdw_port_params *p_params, unsigned int bank)
+ struct sdw_port_params *p_params, unsigned int bank)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
int dpn_config = 0, dpn_config_off;
@@ -898,8 +897,8 @@ static int cdns_port_params(struct sdw_bus *bus,
}
static int cdns_transport_params(struct sdw_bus *bus,
- struct sdw_transport_params *t_params,
- enum sdw_reg_bank bank)
+ struct sdw_transport_params *t_params,
+ enum sdw_reg_bank bank)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
int dpn_offsetctrl = 0, dpn_offsetctrl_off;
@@ -952,7 +951,7 @@ static int cdns_transport_params(struct sdw_bus *bus,
}
static int cdns_port_enable(struct sdw_bus *bus,
- struct sdw_enable_ch *enable_ch, unsigned int bank)
+ struct sdw_enable_ch *enable_ch, unsigned int bank)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
int dpn_chnen_off, ch_mask;
@@ -988,7 +987,7 @@ int sdw_cdns_probe(struct sdw_cdns *cdns)
EXPORT_SYMBOL(sdw_cdns_probe);
int cdns_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, bool pcm, int direction)
+ void *stream, bool pcm, int direction)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_cdns_dma_data *dma;
@@ -1026,12 +1025,13 @@ EXPORT_SYMBOL(cdns_set_sdw_stream);
* Find and return a free PDI for a given PDI array
*/
static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
- unsigned int num, struct sdw_cdns_pdi *pdi)
+ unsigned int num,
+ struct sdw_cdns_pdi *pdi)
{
int i;
for (i = 0; i < num; i++) {
- if (pdi[i].assigned == true)
+ if (pdi[i].assigned)
continue;
pdi[i].assigned = true;
return &pdi[i];
@@ -1050,8 +1050,8 @@ static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
* @pdi: PDI to be used
*/
void sdw_cdns_config_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_port *port,
- u32 ch, u32 dir, struct sdw_cdns_pdi *pdi)
+ struct sdw_cdns_port *port,
+ u32 ch, u32 dir, struct sdw_cdns_pdi *pdi)
{
u32 offset, val = 0;
@@ -1076,13 +1076,13 @@ EXPORT_SYMBOL(sdw_cdns_config_stream);
* @ch_count: Channel count
*/
static int cdns_get_num_pdi(struct sdw_cdns *cdns,
- struct sdw_cdns_pdi *pdi,
- unsigned int num, u32 ch_count)
+ struct sdw_cdns_pdi *pdi,
+ unsigned int num, u32 ch_count)
{
int i, pdis = 0;
for (i = 0; i < num; i++) {
- if (pdi[i].assigned == true)
+ if (pdi[i].assigned)
continue;
if (pdi[i].ch_count < ch_count)
@@ -1139,8 +1139,8 @@ EXPORT_SYMBOL(sdw_cdns_get_stream);
* @dir: Data direction
*/
int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir)
+ struct sdw_cdns_streams *stream,
+ struct sdw_cdns_port *port, u32 ch, u32 dir)
{
struct sdw_cdns_pdi *pdi = NULL;
@@ -1167,7 +1167,7 @@ int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
EXPORT_SYMBOL(sdw_cdns_alloc_stream);
void sdw_cdns_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+ struct snd_soc_dai *dai)
{
struct sdw_cdns_dma_data *dma;
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index eb902b19c5a4..fe2af62958b1 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -1,5 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
-// Copyright(c) 2015-17 Intel Corporation.
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright(c) 2015-17 Intel Corporation. */
#include <sound/soc.h>
#ifndef __SDW_CADENCE_H
@@ -160,24 +160,24 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
int sdw_cdns_init(struct sdw_cdns *cdns);
int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
- struct sdw_cdns_stream_config config);
+ struct sdw_cdns_stream_config config);
int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns);
int sdw_cdns_get_stream(struct sdw_cdns *cdns,
struct sdw_cdns_streams *stream,
u32 ch, u32 dir);
int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir);
+ struct sdw_cdns_streams *stream,
+ struct sdw_cdns_port *port, u32 ch, u32 dir);
void sdw_cdns_config_stream(struct sdw_cdns *cdns, struct sdw_cdns_port *port,
- u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
+ u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
void sdw_cdns_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai);
+ struct snd_soc_dai *dai);
int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai,
- void *stream, int direction);
+ void *stream, int direction);
int sdw_cdns_pdm_set_stream(struct snd_soc_dai *dai,
- void *stream, int direction);
+ void *stream, int direction);
enum sdw_command_response
cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
@@ -187,7 +187,7 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg);
enum sdw_command_response
cdns_xfer_msg_defer(struct sdw_bus *bus,
- struct sdw_msg *msg, struct sdw_defer *defer);
+ struct sdw_msg *msg, struct sdw_defer *defer);
enum sdw_command_response
cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
@@ -195,5 +195,5 @@ cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params);
int cdns_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, bool pcm, int direction);
+ void *stream, bool pcm, int direction);
#endif /* __SDW_CADENCE_H */
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index fd8d034cfec1..31336b0271b0 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -7,6 +7,7 @@
#include <linux/acpi.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <sound/pcm_params.h>
@@ -23,18 +24,18 @@
#define SDW_SHIM_IPPTR 0x8
#define SDW_SHIM_SYNC 0xC
-#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * x)
-#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * x)
-#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * x)
-#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * x)
-#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * x)
-#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * x)
+#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x))
+#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x))
+#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x))
+#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x))
+#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x))
+#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x))
-#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * x) + (0x2 * y))
-#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * x) + (0x2 * y))
-#define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * x)
-#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * x)
-#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * x)
+#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y)))
+#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y)))
+#define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * (x))
+#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x))
+#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x))
#define SDW_SHIM_WAKEEN 0x190
#define SDW_SHIM_WAKESTS 0x192
@@ -81,7 +82,7 @@
#define SDW_SHIM_WAKESTS_STATUS BIT(0)
/* Intel ALH Register definitions */
-#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * x))
+#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x)))
#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3
#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
@@ -235,9 +236,9 @@ static int intel_shim_init(struct sdw_intel *sdw)
/* Set SyncCPU bit */
sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
- SDW_SHIM_SYNC_SYNCCPU);
+ SDW_SHIM_SYNC_SYNCCPU);
if (ret < 0)
- dev_err(sdw->cdns.dev, "Failed to set sync period: %d", ret);
+ dev_err(sdw->cdns.dev, "Failed to set sync period: %d\n", ret);
return ret;
}
@@ -246,7 +247,7 @@ static int intel_shim_init(struct sdw_intel *sdw)
* PDI routines
*/
static void intel_pdi_init(struct sdw_intel *sdw,
- struct sdw_cdns_stream_config *config)
+ struct sdw_cdns_stream_config *config)
{
void __iomem *shim = sdw->res->shim;
unsigned int link_id = sdw->instance;
@@ -295,9 +296,9 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
}
static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
- struct sdw_cdns_pdi *pdi,
- unsigned int num_pdi,
- unsigned int *num_ch, bool pcm)
+ struct sdw_cdns_pdi *pdi,
+ unsigned int num_pdi,
+ unsigned int *num_ch, bool pcm)
{
int i, ch_count = 0;
@@ -312,16 +313,16 @@ static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
}
static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
- struct sdw_cdns_streams *stream, bool pcm)
+ struct sdw_cdns_streams *stream, bool pcm)
{
intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
- &stream->num_ch_bd, pcm);
+ &stream->num_ch_bd, pcm);
intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
- &stream->num_ch_in, pcm);
+ &stream->num_ch_in, pcm);
intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
- &stream->num_ch_out, pcm);
+ &stream->num_ch_out, pcm);
return 0;
}
@@ -386,9 +387,9 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
}
static int intel_config_stream(struct sdw_intel *sdw,
- struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai,
- struct snd_pcm_hw_params *hw_params, int link_id)
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *hw_params, int link_id)
{
if (sdw->res->ops && sdw->res->ops->config_stream)
return sdw->res->ops->config_stream(sdw->res->arg,
@@ -453,9 +454,9 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
sync_reg |= SDW_SHIM_SYNC_SYNCGO;
ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
- SDW_SHIM_SYNC_SYNCGO);
+ SDW_SHIM_SYNC_SYNCGO);
if (ret < 0)
- dev_err(sdw->cdns.dev, "Post bank switch failed: %d", ret);
+ dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
return ret;
}
@@ -465,14 +466,14 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
*/
static struct sdw_cdns_port *intel_alloc_port(struct sdw_intel *sdw,
- u32 ch, u32 dir, bool pcm)
+ u32 ch, u32 dir, bool pcm)
{
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_cdns_port *port = NULL;
int i, ret = 0;
for (i = 0; i < cdns->num_ports; i++) {
- if (cdns->ports[i].assigned == true)
+ if (cdns->ports[i].assigned)
continue;
port = &cdns->ports[i];
@@ -525,8 +526,8 @@ static void intel_port_cleanup(struct sdw_cdns_dma_data *dma)
}
static int intel_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
@@ -555,7 +556,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
}
if (!dma->nr_ports) {
- dev_err(dai->dev, "ports/resources not available");
+ dev_err(dai->dev, "ports/resources not available\n");
return -EINVAL;
}
@@ -574,7 +575,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
/* Inform DSP about PDI stream number */
for (i = 0; i < dma->nr_ports; i++) {
ret = intel_config_stream(sdw, substream, dai, params,
- dma->port[i]->pdi->intel_alh_id);
+ dma->port[i]->pdi->intel_alh_id);
if (ret)
goto port_error;
}
@@ -604,9 +605,9 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
}
ret = sdw_stream_add_master(&cdns->bus, &sconfig,
- pconfig, dma->nr_ports, dma->stream);
+ pconfig, dma->nr_ports, dma->stream);
if (ret) {
- dev_err(cdns->dev, "add master to stream failed:%d", ret);
+ dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
goto stream_error;
}
@@ -634,8 +635,8 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
if (ret < 0)
- dev_err(dai->dev, "remove master from stream %s failed: %d",
- dma->stream->name, ret);
+ dev_err(dai->dev, "remove master from stream %s failed: %d\n",
+ dma->stream->name, ret);
intel_port_cleanup(dma);
kfree(dma->port);
@@ -643,13 +644,13 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
}
static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, int direction)
+ void *stream, int direction)
{
return cdns_set_sdw_stream(dai, stream, true, direction);
}
static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, int direction)
+ void *stream, int direction)
{
return cdns_set_sdw_stream(dai, stream, false, direction);
}
@@ -673,9 +674,9 @@ static const struct snd_soc_component_driver dai_component = {
};
static int intel_create_dai(struct sdw_cdns *cdns,
- struct snd_soc_dai_driver *dais,
- enum intel_pdi_type type,
- u32 num, u32 off, u32 max_ch, bool pcm)
+ struct snd_soc_dai_driver *dais,
+ enum intel_pdi_type type,
+ u32 num, u32 off, u32 max_ch, bool pcm)
{
int i;
@@ -685,14 +686,14 @@ static int intel_create_dai(struct sdw_cdns *cdns,
/* TODO: Read supported rates/formats from hardware */
for (i = off; i < (off + num); i++) {
dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d",
- cdns->instance, i);
+ cdns->instance, i);
if (!dais[i].name)
return -ENOMEM;
if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
- dais[i].playback.stream_name = kasprintf(GFP_KERNEL,
- "SDW%d Tx%d",
- cdns->instance, i);
+ dais[i].playback.stream_name =
+ kasprintf(GFP_KERNEL, "SDW%d Tx%d",
+ cdns->instance, i);
if (!dais[i].playback.stream_name) {
kfree(dais[i].name);
return -ENOMEM;
@@ -705,9 +706,9 @@ static int intel_create_dai(struct sdw_cdns *cdns,
}
if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
- dais[i].capture.stream_name = kasprintf(GFP_KERNEL,
- "SDW%d Rx%d",
- cdns->instance, i);
+ dais[i].capture.stream_name =
+ kasprintf(GFP_KERNEL, "SDW%d Rx%d",
+ cdns->instance, i);
if (!dais[i].capture.stream_name) {
kfree(dais[i].name);
kfree(dais[i].playback.stream_name);
@@ -748,45 +749,45 @@ static int intel_register_dai(struct sdw_intel *sdw)
/* Create PCM DAIs */
stream = &cdns->pcm;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_IN,
- stream->num_in, off, stream->num_ch_in, true);
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, stream->num_in,
+ off, stream->num_ch_in, true);
if (ret)
return ret;
off += cdns->pcm.num_in;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT,
- cdns->pcm.num_out, off, stream->num_ch_out, true);
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
+ off, stream->num_ch_out, true);
if (ret)
return ret;
off += cdns->pcm.num_out;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_BD,
- cdns->pcm.num_bd, off, stream->num_ch_bd, true);
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
+ off, stream->num_ch_bd, true);
if (ret)
return ret;
/* Create PDM DAIs */
stream = &cdns->pdm;
off += cdns->pcm.num_bd;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_IN,
- cdns->pdm.num_in, off, stream->num_ch_in, false);
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in,
+ off, stream->num_ch_in, false);
if (ret)
return ret;
off += cdns->pdm.num_in;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT,
- cdns->pdm.num_out, off, stream->num_ch_out, false);
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out,
+ off, stream->num_ch_out, false);
if (ret)
return ret;
off += cdns->pdm.num_bd;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_BD,
- cdns->pdm.num_bd, off, stream->num_ch_bd, false);
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
+ off, stream->num_ch_bd, false);
if (ret)
return ret;
return snd_soc_register_component(cdns->dev, &dai_component,
- dais, num_dai);
+ dais, num_dai);
}
static int intel_prop_read(struct sdw_bus *bus)
@@ -796,8 +797,8 @@ static int intel_prop_read(struct sdw_bus *bus)
/* BIOS is not giving some values correctly. So, lets override them */
bus->prop.num_freq = 1;
- bus->prop.freq = devm_kcalloc(bus->dev, sizeof(*bus->prop.freq),
- bus->prop.num_freq, GFP_KERNEL);
+ bus->prop.freq = devm_kcalloc(bus->dev, bus->prop.num_freq,
+ sizeof(*bus->prop.freq), GFP_KERNEL);
if (!bus->prop.freq)
return -ENOMEM;
@@ -872,19 +873,18 @@ static int intel_probe(struct platform_device *pdev)
intel_pdi_ch_update(sdw);
/* Acquire IRQ */
- ret = request_threaded_irq(sdw->res->irq, sdw_cdns_irq,
- sdw_cdns_thread, IRQF_SHARED, KBUILD_MODNAME,
- &sdw->cdns);
+ ret = request_threaded_irq(sdw->res->irq, sdw_cdns_irq, sdw_cdns_thread,
+ IRQF_SHARED, KBUILD_MODNAME, &sdw->cdns);
if (ret < 0) {
dev_err(sdw->cdns.dev, "unable to grab IRQ %d, disabling device\n",
- sdw->res->irq);
+ sdw->res->irq);
goto err_init;
}
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
- dev_err(sdw->cdns.dev, "DAI registration failed: %d", ret);
+ dev_err(sdw->cdns.dev, "DAI registration failed: %d\n", ret);
snd_soc_unregister_component(sdw->cdns.dev);
goto err_dai;
}
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index c1a5bac6212e..71050e5f643d 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -1,5 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
-// Copyright(c) 2015-17 Intel Corporation.
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright(c) 2015-17 Intel Corporation. */
#ifndef __SDW_INTEL_LOCAL_H
#define __SDW_INTEL_LOCAL_H
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 5c8a20d99878..d3d6b54c5791 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -8,6 +8,8 @@
*/
#include <linux/acpi.h>
+#include <linux/export.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/soundwire/sdw_intel.h>
#include "intel.h"
@@ -67,7 +69,7 @@ static struct sdw_intel_ctx
/* Found controller, find links supported */
count = 0;
ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
- "mipi-sdw-master-count", &count, 1);
+ "mipi-sdw-master-count", &count, 1);
/* Don't fail on error, continue and use hw value */
if (ret) {
@@ -85,7 +87,7 @@ static struct sdw_intel_ctx
/* Check count is within bounds */
if (count > SDW_MAX_LINKS) {
dev_err(&adev->dev, "Link count %d exceeds max %d\n",
- count, SDW_MAX_LINKS);
+ count, SDW_MAX_LINKS);
return NULL;
}
@@ -104,7 +106,6 @@ static struct sdw_intel_ctx
/* Create SDW Master devices */
for (i = 0; i < count; i++) {
-
link->res.irq = res->irq;
link->res.registers = res->mmio_base + SDW_LINK_BASE
+ (SDW_LINK_SIZE * i);
@@ -145,7 +146,7 @@ link_err:
}
static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
- void *cdata, void **return_value)
+ void *cdata, void **return_value)
{
struct sdw_intel_res *res = cdata;
struct acpi_device *adev;
@@ -172,9 +173,9 @@ void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res)
acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
- parent_handle, 1,
- sdw_intel_acpi_cb,
- NULL, res, NULL);
+ parent_handle, 1,
+ sdw_intel_acpi_cb,
+ NULL, res, NULL);
if (ACPI_FAILURE(status))
return NULL;
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index fdeba0c3b589..c1f51d6a23d2 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -35,11 +35,12 @@ int sdw_master_read_prop(struct sdw_bus *bus)
int nval, i;
device_property_read_u32(bus->dev,
- "mipi-sdw-sw-interface-revision", &prop->revision);
+ "mipi-sdw-sw-interface-revision",
+ &prop->revision);
/* Find master handle */
snprintf(name, sizeof(name),
- "mipi-sdw-master-%d-subproperties", bus->link_id);
+ "mipi-sdw-master-%d-subproperties", bus->link_id);
link = device_get_named_child_node(bus->dev, name);
if (!link) {
@@ -48,23 +49,23 @@ int sdw_master_read_prop(struct sdw_bus *bus)
}
if (fwnode_property_read_bool(link,
- "mipi-sdw-clock-stop-mode0-supported") == true)
+ "mipi-sdw-clock-stop-mode0-supported"))
prop->clk_stop_mode = SDW_CLK_STOP_MODE0;
if (fwnode_property_read_bool(link,
- "mipi-sdw-clock-stop-mode1-supported") == true)
+ "mipi-sdw-clock-stop-mode1-supported"))
prop->clk_stop_mode |= SDW_CLK_STOP_MODE1;
fwnode_property_read_u32(link,
- "mipi-sdw-max-clock-frequency", &prop->max_freq);
+ "mipi-sdw-max-clock-frequency",
+ &prop->max_freq);
nval = fwnode_property_read_u32_array(link,
"mipi-sdw-clock-frequencies-supported", NULL, 0);
if (nval > 0) {
-
prop->num_freq = nval;
prop->freq = devm_kcalloc(bus->dev, prop->num_freq,
- sizeof(*prop->freq), GFP_KERNEL);
+ sizeof(*prop->freq), GFP_KERNEL);
if (!prop->freq)
return -ENOMEM;
@@ -88,47 +89,49 @@ int sdw_master_read_prop(struct sdw_bus *bus)
nval = fwnode_property_read_u32_array(link,
"mipi-sdw-supported-clock-gears", NULL, 0);
if (nval > 0) {
-
prop->num_clk_gears = nval;
prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears,
- sizeof(*prop->clk_gears), GFP_KERNEL);
+ sizeof(*prop->clk_gears),
+ GFP_KERNEL);
if (!prop->clk_gears)
return -ENOMEM;
fwnode_property_read_u32_array(link,
- "mipi-sdw-supported-clock-gears",
- prop->clk_gears, prop->num_clk_gears);
+ "mipi-sdw-supported-clock-gears",
+ prop->clk_gears,
+ prop->num_clk_gears);
}
fwnode_property_read_u32(link, "mipi-sdw-default-frame-rate",
- &prop->default_frame_rate);
+ &prop->default_frame_rate);
fwnode_property_read_u32(link, "mipi-sdw-default-frame-row-size",
- &prop->default_row);
+ &prop->default_row);
fwnode_property_read_u32(link, "mipi-sdw-default-frame-col-size",
- &prop->default_col);
+ &prop->default_col);
prop->dynamic_frame = fwnode_property_read_bool(link,
"mipi-sdw-dynamic-frame-shape");
fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold",
- &prop->err_threshold);
+ &prop->err_threshold);
return 0;
}
EXPORT_SYMBOL(sdw_master_read_prop);
static int sdw_slave_read_dp0(struct sdw_slave *slave,
- struct fwnode_handle *port, struct sdw_dp0_prop *dp0)
+ struct fwnode_handle *port,
+ struct sdw_dp0_prop *dp0)
{
int nval;
fwnode_property_read_u32(port, "mipi-sdw-port-max-wordlength",
- &dp0->max_word);
+ &dp0->max_word);
fwnode_property_read_u32(port, "mipi-sdw-port-min-wordlength",
- &dp0->min_word);
+ &dp0->min_word);
nval = fwnode_property_read_u32_array(port,
"mipi-sdw-port-wordlength-configs", NULL, 0);
@@ -136,8 +139,8 @@ static int sdw_slave_read_dp0(struct sdw_slave *slave,
dp0->num_words = nval;
dp0->words = devm_kcalloc(&slave->dev,
- dp0->num_words, sizeof(*dp0->words),
- GFP_KERNEL);
+ dp0->num_words, sizeof(*dp0->words),
+ GFP_KERNEL);
if (!dp0->words)
return -ENOMEM;
@@ -146,20 +149,21 @@ static int sdw_slave_read_dp0(struct sdw_slave *slave,
dp0->words, dp0->num_words);
}
- dp0->flow_controlled = fwnode_property_read_bool(
- port, "mipi-sdw-bra-flow-controlled");
+ dp0->flow_controlled = fwnode_property_read_bool(port,
+ "mipi-sdw-bra-flow-controlled");
- dp0->simple_ch_prep_sm = fwnode_property_read_bool(
- port, "mipi-sdw-simplified-channel-prepare-sm");
+ dp0->simple_ch_prep_sm = fwnode_property_read_bool(port,
+ "mipi-sdw-simplified-channel-prepare-sm");
- dp0->device_interrupts = fwnode_property_read_bool(
- port, "mipi-sdw-imp-def-dp0-interrupts-supported");
+ dp0->device_interrupts = fwnode_property_read_bool(port,
+ "mipi-sdw-imp-def-dp0-interrupts-supported");
return 0;
}
static int sdw_slave_read_dpn(struct sdw_slave *slave,
- struct sdw_dpn_prop *dpn, int count, int ports, char *type)
+ struct sdw_dpn_prop *dpn, int count, int ports,
+ char *type)
{
struct fwnode_handle *node;
u32 bit, i = 0;
@@ -173,7 +177,7 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
for_each_set_bit(bit, &addr, 32) {
snprintf(name, sizeof(name),
- "mipi-sdw-dp-%d-%s-subproperties", bit, type);
+ "mipi-sdw-dp-%d-%s-subproperties", bit, type);
dpn[i].num = bit;
@@ -184,18 +188,18 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
}
fwnode_property_read_u32(node, "mipi-sdw-port-max-wordlength",
- &dpn[i].max_word);
+ &dpn[i].max_word);
fwnode_property_read_u32(node, "mipi-sdw-port-min-wordlength",
- &dpn[i].min_word);
+ &dpn[i].min_word);
nval = fwnode_property_read_u32_array(node,
"mipi-sdw-port-wordlength-configs", NULL, 0);
if (nval > 0) {
-
dpn[i].num_words = nval;
dpn[i].words = devm_kcalloc(&slave->dev,
- dpn[i].num_words,
- sizeof(*dpn[i].words), GFP_KERNEL);
+ dpn[i].num_words,
+ sizeof(*dpn[i].words),
+ GFP_KERNEL);
if (!dpn[i].words)
return -ENOMEM;
@@ -205,36 +209,36 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
}
fwnode_property_read_u32(node, "mipi-sdw-data-port-type",
- &dpn[i].type);
+ &dpn[i].type);
fwnode_property_read_u32(node,
- "mipi-sdw-max-grouping-supported",
- &dpn[i].max_grouping);
+ "mipi-sdw-max-grouping-supported",
+ &dpn[i].max_grouping);
dpn[i].simple_ch_prep_sm = fwnode_property_read_bool(node,
"mipi-sdw-simplified-channelprepare-sm");
fwnode_property_read_u32(node,
- "mipi-sdw-port-channelprepare-timeout",
- &dpn[i].ch_prep_timeout);
+ "mipi-sdw-port-channelprepare-timeout",
+ &dpn[i].ch_prep_timeout);
fwnode_property_read_u32(node,
"mipi-sdw-imp-def-dpn-interrupts-supported",
&dpn[i].device_interrupts);
fwnode_property_read_u32(node, "mipi-sdw-min-channel-number",
- &dpn[i].min_ch);
+ &dpn[i].min_ch);
fwnode_property_read_u32(node, "mipi-sdw-max-channel-number",
- &dpn[i].max_ch);
+ &dpn[i].max_ch);
nval = fwnode_property_read_u32_array(node,
"mipi-sdw-channel-number-list", NULL, 0);
if (nval > 0) {
-
dpn[i].num_ch = nval;
dpn[i].ch = devm_kcalloc(&slave->dev, dpn[i].num_ch,
- sizeof(*dpn[i].ch), GFP_KERNEL);
+ sizeof(*dpn[i].ch),
+ GFP_KERNEL);
if (!dpn[i].ch)
return -ENOMEM;
@@ -246,7 +250,6 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
nval = fwnode_property_read_u32_array(node,
"mipi-sdw-channel-combination-list", NULL, 0);
if (nval > 0) {
-
dpn[i].num_ch_combinations = nval;
dpn[i].ch_combinations = devm_kcalloc(&slave->dev,
dpn[i].num_ch_combinations,
@@ -265,13 +268,13 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
"mipi-sdw-modes-supported", &dpn[i].modes);
fwnode_property_read_u32(node, "mipi-sdw-max-async-buffer",
- &dpn[i].max_async_buffer);
+ &dpn[i].max_async_buffer);
dpn[i].block_pack_mode = fwnode_property_read_bool(node,
"mipi-sdw-block-packing-mode");
fwnode_property_read_u32(node, "mipi-sdw-port-encoding-type",
- &dpn[i].port_encoding);
+ &dpn[i].port_encoding);
/* TODO: Read audio mode */
@@ -293,7 +296,7 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
int num_of_ports, nval, i, dp0 = 0;
device_property_read_u32(dev, "mipi-sdw-sw-interface-revision",
- &prop->mipi_revision);
+ &prop->mipi_revision);
prop->wake_capable = device_property_read_bool(dev,
"mipi-sdw-wake-up-unavailable");
@@ -311,10 +314,10 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
"mipi-sdw-simplified-clockstopprepare-sm-supported");
device_property_read_u32(dev, "mipi-sdw-clockstopprepare-timeout",
- &prop->clk_stop_timeout);
+ &prop->clk_stop_timeout);
device_property_read_u32(dev, "mipi-sdw-slave-channelprepare-timeout",
- &prop->ch_prep_timeout);
+ &prop->ch_prep_timeout);
device_property_read_u32(dev,
"mipi-sdw-clockstopprepare-hard-reset-behavior",
@@ -333,22 +336,22 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
"mipi-sdw-port15-read-behavior", &prop->p15_behave);
device_property_read_u32(dev, "mipi-sdw-master-count",
- &prop->master_count);
+ &prop->master_count);
device_property_read_u32(dev, "mipi-sdw-source-port-list",
- &prop->source_ports);
+ &prop->source_ports);
device_property_read_u32(dev, "mipi-sdw-sink-port-list",
- &prop->sink_ports);
+ &prop->sink_ports);
/* Read dp0 properties */
port = device_get_named_child_node(dev, "mipi-sdw-dp-0-subproperties");
if (!port) {
dev_dbg(dev, "DP0 node not found!!\n");
} else {
-
prop->dp0_prop = devm_kzalloc(&slave->dev,
- sizeof(*prop->dp0_prop), GFP_KERNEL);
+ sizeof(*prop->dp0_prop),
+ GFP_KERNEL);
if (!prop->dp0_prop)
return -ENOMEM;
@@ -364,23 +367,25 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
/* Allocate memory for set bits in port lists */
nval = hweight32(prop->source_ports);
prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval,
- sizeof(*prop->src_dpn_prop), GFP_KERNEL);
+ sizeof(*prop->src_dpn_prop),
+ GFP_KERNEL);
if (!prop->src_dpn_prop)
return -ENOMEM;
/* Read dpn properties for source port(s) */
sdw_slave_read_dpn(slave, prop->src_dpn_prop, nval,
- prop->source_ports, "source");
+ prop->source_ports, "source");
nval = hweight32(prop->sink_ports);
prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval,
- sizeof(*prop->sink_dpn_prop), GFP_KERNEL);
+ sizeof(*prop->sink_dpn_prop),
+ GFP_KERNEL);
if (!prop->sink_dpn_prop)
return -ENOMEM;
/* Read dpn properties for sink port(s) */
sdw_slave_read_dpn(slave, prop->sink_dpn_prop, nval,
- prop->sink_ports, "sink");
+ prop->sink_ports, "sink");
/* some ports are bidirectional so check total ports by ORing */
nval = prop->source_ports | prop->sink_ports;
@@ -388,7 +393,8 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
/* Allocate port_ready based on num_of_ports */
slave->port_ready = devm_kcalloc(&slave->dev, num_of_ports,
- sizeof(*slave->port_ready), GFP_KERNEL);
+ sizeof(*slave->port_ready),
+ GFP_KERNEL);
if (!slave->port_ready)
return -ENOMEM;
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index ac103bd0c176..f39a5815e25d 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -14,7 +14,7 @@ static void sdw_slave_release(struct device *dev)
}
static int sdw_slave_add(struct sdw_bus *bus,
- struct sdw_slave_id *id, struct fwnode_handle *fwnode)
+ struct sdw_slave_id *id, struct fwnode_handle *fwnode)
{
struct sdw_slave *slave;
int ret;
@@ -30,8 +30,8 @@ static int sdw_slave_add(struct sdw_bus *bus,
/* name shall be sdw:link:mfg:part:class:unique */
dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
- bus->link_id, id->mfg_id, id->part_id,
- id->class_id, id->unique_id);
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id, id->unique_id);
slave->dev.release = sdw_slave_release;
slave->dev.bus = &sdw_bus_type;
@@ -84,11 +84,11 @@ int sdw_acpi_find_slaves(struct sdw_bus *bus)
acpi_status status;
status = acpi_evaluate_integer(adev->handle,
- METHOD_NAME__ADR, NULL, &addr);
+ METHOD_NAME__ADR, NULL, &addr);
if (ACPI_FAILURE(status)) {
dev_err(bus->dev, "_ADR resolution failed: %x\n",
- status);
+ status);
return status;
}
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index bd879b1a76c8..d01060dbee96 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -52,10 +52,11 @@ static int sdw_find_row_index(int row)
pr_warn("Requested row not found, selecting lowest row no: 48\n");
return 0;
}
+
static int _sdw_program_slave_port_params(struct sdw_bus *bus,
- struct sdw_slave *slave,
- struct sdw_transport_params *t_params,
- enum sdw_dpn_type type)
+ struct sdw_slave *slave,
+ struct sdw_transport_params *t_params,
+ enum sdw_dpn_type type)
{
u32 addr1, addr2, addr3, addr4;
int ret;
@@ -76,20 +77,20 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
/* Program DPN_OffsetCtrl2 registers */
ret = sdw_write(slave, addr1, t_params->offset2);
if (ret < 0) {
- dev_err(bus->dev, "DPN_OffsetCtrl2 register write failed");
+ dev_err(bus->dev, "DPN_OffsetCtrl2 register write failed\n");
return ret;
}
/* Program DPN_BlockCtrl3 register */
ret = sdw_write(slave, addr2, t_params->blk_pkg_mode);
if (ret < 0) {
- dev_err(bus->dev, "DPN_BlockCtrl3 register write failed");
+ dev_err(bus->dev, "DPN_BlockCtrl3 register write failed\n");
return ret;
}
/*
* Data ports are FULL, SIMPLE and REDUCED. This function handles
- * FULL and REDUCED only and and beyond this point only FULL is
+ * FULL and REDUCED only and beyond this point only FULL is
* handled, so bail out if we are not FULL data port type
*/
if (type != SDW_DPN_FULL)
@@ -102,7 +103,7 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
ret = sdw_write(slave, addr3, wbuf);
if (ret < 0) {
- dev_err(bus->dev, "DPN_SampleCtrl2 register write failed");
+ dev_err(bus->dev, "DPN_SampleCtrl2 register write failed\n");
return ret;
}
@@ -113,14 +114,14 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
ret = sdw_write(slave, addr4, wbuf);
if (ret < 0)
- dev_err(bus->dev, "DPN_HCtrl register write failed");
+ dev_err(bus->dev, "DPN_HCtrl register write failed\n");
return ret;
}
static int sdw_program_slave_port_params(struct sdw_bus *bus,
- struct sdw_slave_runtime *s_rt,
- struct sdw_port_runtime *p_rt)
+ struct sdw_slave_runtime *s_rt,
+ struct sdw_port_runtime *p_rt)
{
struct sdw_transport_params *t_params = &p_rt->transport_params;
struct sdw_port_params *p_params = &p_rt->port_params;
@@ -131,8 +132,8 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
u8 wbuf;
dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave,
- s_rt->direction,
- t_params->port_num);
+ s_rt->direction,
+ t_params->port_num);
if (!dpn_prop)
return -EINVAL;
@@ -159,7 +160,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
ret = sdw_update(s_rt->slave, addr1, 0xF, wbuf);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
- "DPN_PortCtrl register write failed for port %d",
+ "DPN_PortCtrl register write failed for port %d\n",
t_params->port_num);
return ret;
}
@@ -168,7 +169,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1));
if (ret < 0) {
dev_err(&s_rt->slave->dev,
- "DPN_BlockCtrl1 register write failed for port %d",
+ "DPN_BlockCtrl1 register write failed for port %d\n",
t_params->port_num);
return ret;
}
@@ -178,7 +179,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
ret = sdw_write(s_rt->slave, addr3, wbuf);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
- "DPN_SampleCtrl1 register write failed for port %d",
+ "DPN_SampleCtrl1 register write failed for port %d\n",
t_params->port_num);
return ret;
}
@@ -187,7 +188,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
ret = sdw_write(s_rt->slave, addr4, t_params->offset1);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
- "DPN_OffsetCtrl1 register write failed for port %d",
+ "DPN_OffsetCtrl1 register write failed for port %d\n",
t_params->port_num);
return ret;
}
@@ -197,7 +198,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
ret = sdw_write(s_rt->slave, addr5, t_params->blk_grp_ctrl);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
- "DPN_BlockCtrl2 reg write failed for port %d",
+ "DPN_BlockCtrl2 reg write failed for port %d\n",
t_params->port_num);
return ret;
}
@@ -208,7 +209,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
ret = sdw_write(s_rt->slave, addr6, t_params->lane_ctrl);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
- "DPN_LaneCtrl register write failed for port %d",
+ "DPN_LaneCtrl register write failed for port %d\n",
t_params->port_num);
return ret;
}
@@ -216,10 +217,10 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
if (dpn_prop->type != SDW_DPN_SIMPLE) {
ret = _sdw_program_slave_port_params(bus, s_rt->slave,
- t_params, dpn_prop->type);
+ t_params, dpn_prop->type);
if (ret < 0)
dev_err(&s_rt->slave->dev,
- "Transport reg write failed for port: %d",
+ "Transport reg write failed for port: %d\n",
t_params->port_num);
}
@@ -227,13 +228,13 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
}
static int sdw_program_master_port_params(struct sdw_bus *bus,
- struct sdw_port_runtime *p_rt)
+ struct sdw_port_runtime *p_rt)
{
int ret;
/*
* we need to set transport and port parameters for the port.
- * Transport parameters refers to the smaple interval, offsets and
+ * Transport parameters refers to the sample interval, offsets and
* hstart/stop etc of the data. Port parameters refers to word
* length, flow mode etc of the port
*/
@@ -244,8 +245,8 @@ static int sdw_program_master_port_params(struct sdw_bus *bus,
return ret;
return bus->port_ops->dpn_set_port_params(bus,
- &p_rt->port_params,
- bus->params.next_bank);
+ &p_rt->port_params,
+ bus->params.next_bank);
}
/**
@@ -292,8 +293,9 @@ static int sdw_program_port_params(struct sdw_master_runtime *m_rt)
* actual enable/disable is done with a bank switch
*/
static int sdw_enable_disable_slave_ports(struct sdw_bus *bus,
- struct sdw_slave_runtime *s_rt,
- struct sdw_port_runtime *p_rt, bool en)
+ struct sdw_slave_runtime *s_rt,
+ struct sdw_port_runtime *p_rt,
+ bool en)
{
struct sdw_transport_params *t_params = &p_rt->transport_params;
u32 addr;
@@ -315,19 +317,20 @@ static int sdw_enable_disable_slave_ports(struct sdw_bus *bus,
if (ret < 0)
dev_err(&s_rt->slave->dev,
- "Slave chn_en reg write failed:%d port:%d",
+ "Slave chn_en reg write failed:%d port:%d\n",
ret, t_params->port_num);
return ret;
}
static int sdw_enable_disable_master_ports(struct sdw_master_runtime *m_rt,
- struct sdw_port_runtime *p_rt, bool en)
+ struct sdw_port_runtime *p_rt,
+ bool en)
{
struct sdw_transport_params *t_params = &p_rt->transport_params;
struct sdw_bus *bus = m_rt->bus;
struct sdw_enable_ch enable_ch;
- int ret = 0;
+ int ret;
enable_ch.port_num = p_rt->num;
enable_ch.ch_mask = p_rt->ch_mask;
@@ -336,10 +339,11 @@ static int sdw_enable_disable_master_ports(struct sdw_master_runtime *m_rt,
/* Perform Master port channel(s) enable/disable */
if (bus->port_ops->dpn_port_enable_ch) {
ret = bus->port_ops->dpn_port_enable_ch(bus,
- &enable_ch, bus->params.next_bank);
+ &enable_ch,
+ bus->params.next_bank);
if (ret < 0) {
dev_err(bus->dev,
- "Master chn_en write failed:%d port:%d",
+ "Master chn_en write failed:%d port:%d\n",
ret, t_params->port_num);
return ret;
}
@@ -370,7 +374,7 @@ static int sdw_enable_disable_ports(struct sdw_master_runtime *m_rt, bool en)
list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
list_for_each_entry(s_port, &s_rt->port_list, port_node) {
ret = sdw_enable_disable_slave_ports(m_rt->bus, s_rt,
- s_port, en);
+ s_port, en);
if (ret < 0)
return ret;
}
@@ -387,7 +391,8 @@ static int sdw_enable_disable_ports(struct sdw_master_runtime *m_rt, bool en)
}
static int sdw_do_port_prep(struct sdw_slave_runtime *s_rt,
- struct sdw_prepare_ch prep_ch, enum sdw_port_prep_ops cmd)
+ struct sdw_prepare_ch prep_ch,
+ enum sdw_port_prep_ops cmd)
{
const struct sdw_slave_ops *ops = s_rt->slave->ops;
int ret;
@@ -396,7 +401,8 @@ static int sdw_do_port_prep(struct sdw_slave_runtime *s_rt,
ret = ops->port_prep(s_rt->slave, &prep_ch, cmd);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
- "Slave Port Prep cmd %d failed: %d", cmd, ret);
+ "Slave Port Prep cmd %d failed: %d\n",
+ cmd, ret);
return ret;
}
}
@@ -405,8 +411,9 @@ static int sdw_do_port_prep(struct sdw_slave_runtime *s_rt,
}
static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
- struct sdw_slave_runtime *s_rt,
- struct sdw_port_runtime *p_rt, bool prep)
+ struct sdw_slave_runtime *s_rt,
+ struct sdw_port_runtime *p_rt,
+ bool prep)
{
struct completion *port_ready = NULL;
struct sdw_dpn_prop *dpn_prop;
@@ -420,11 +427,11 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
prep_ch.ch_mask = p_rt->ch_mask;
dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave,
- s_rt->direction,
- prep_ch.num);
+ s_rt->direction,
+ prep_ch.num);
if (!dpn_prop) {
dev_err(bus->dev,
- "Slave Port:%d properties not found", prep_ch.num);
+ "Slave Port:%d properties not found\n", prep_ch.num);
return -EINVAL;
}
@@ -442,7 +449,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
*/
if (prep && intr) {
ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep,
- dpn_prop->device_interrupts);
+ dpn_prop->device_interrupts);
if (ret < 0)
return ret;
}
@@ -456,13 +463,13 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
if (prep)
ret = sdw_update(s_rt->slave, addr,
- 0xFF, p_rt->ch_mask);
+ 0xFF, p_rt->ch_mask);
else
ret = sdw_update(s_rt->slave, addr, 0xFF, 0x0);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
- "Slave prep_ctrl reg write failed");
+ "Slave prep_ctrl reg write failed\n");
return ret;
}
@@ -475,7 +482,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
val &= p_rt->ch_mask;
if (!time_left || val) {
dev_err(&s_rt->slave->dev,
- "Chn prep failed for port:%d", prep_ch.num);
+ "Chn prep failed for port:%d\n", prep_ch.num);
return -ETIMEDOUT;
}
}
@@ -486,13 +493,14 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
/* Disable interrupt after Port de-prepare */
if (!prep && intr)
ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep,
- dpn_prop->device_interrupts);
+ dpn_prop->device_interrupts);
return ret;
}
static int sdw_prep_deprep_master_ports(struct sdw_master_runtime *m_rt,
- struct sdw_port_runtime *p_rt, bool prep)
+ struct sdw_port_runtime *p_rt,
+ bool prep)
{
struct sdw_transport_params *t_params = &p_rt->transport_params;
struct sdw_bus *bus = m_rt->bus;
@@ -509,8 +517,8 @@ static int sdw_prep_deprep_master_ports(struct sdw_master_runtime *m_rt,
if (ops->dpn_port_prep) {
ret = ops->dpn_port_prep(bus, &prep_ch);
if (ret < 0) {
- dev_err(bus->dev, "Port prepare failed for port:%d",
- t_params->port_num);
+ dev_err(bus->dev, "Port prepare failed for port:%d\n",
+ t_params->port_num);
return ret;
}
}
@@ -535,7 +543,7 @@ static int sdw_prep_deprep_ports(struct sdw_master_runtime *m_rt, bool prep)
list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
ret = sdw_prep_deprep_slave_ports(m_rt->bus, s_rt,
- p_rt, prep);
+ p_rt, prep);
if (ret < 0)
return ret;
}
@@ -578,8 +586,8 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
if (slave->ops->bus_config) {
ret = slave->ops->bus_config(slave, &bus->params);
if (ret < 0)
- dev_err(bus->dev, "Notify Slave: %d failed",
- slave->dev_num);
+ dev_err(bus->dev, "Notify Slave: %d failed\n",
+ slave->dev_num);
return ret;
}
}
@@ -602,13 +610,14 @@ static int sdw_program_params(struct sdw_bus *bus)
ret = sdw_program_port_params(m_rt);
if (ret < 0) {
dev_err(bus->dev,
- "Program transport params failed: %d", ret);
+ "Program transport params failed: %d\n", ret);
return ret;
}
ret = sdw_notify_config(m_rt);
if (ret < 0) {
- dev_err(bus->dev, "Notify bus config failed: %d", ret);
+ dev_err(bus->dev,
+ "Notify bus config failed: %d\n", ret);
return ret;
}
@@ -618,7 +627,7 @@ static int sdw_program_params(struct sdw_bus *bus)
ret = sdw_enable_disable_ports(m_rt, true);
if (ret < 0) {
- dev_err(bus->dev, "Enable channel failed: %d", ret);
+ dev_err(bus->dev, "Enable channel failed: %d\n", ret);
return ret;
}
}
@@ -658,7 +667,7 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
addr = SDW_SCP_FRAMECTRL_B0;
sdw_fill_msg(wr_msg, NULL, addr, 1, SDW_BROADCAST_DEV_NUM,
- SDW_MSG_FLAG_WRITE, wbuf);
+ SDW_MSG_FLAG_WRITE, wbuf);
wr_msg->ssp_sync = true;
/*
@@ -673,7 +682,7 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
ret = sdw_transfer(bus, wr_msg);
if (ret < 0) {
- dev_err(bus->dev, "Slave frame_ctrl reg write failed");
+ dev_err(bus->dev, "Slave frame_ctrl reg write failed\n");
goto error;
}
@@ -713,7 +722,7 @@ static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
bus->bank_switch_timeout);
if (!time_left) {
- dev_err(bus->dev, "Controller Timed out on bank switch");
+ dev_err(bus->dev, "Controller Timed out on bank switch\n");
return -ETIMEDOUT;
}
@@ -750,7 +759,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
ret = ops->pre_bank_switch(bus);
if (ret < 0) {
dev_err(bus->dev,
- "Pre bank switch op failed: %d", ret);
+ "Pre bank switch op failed: %d\n", ret);
goto msg_unlock;
}
}
@@ -763,9 +772,8 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
*/
ret = sdw_bank_switch(bus, stream->m_rt_count);
if (ret < 0) {
- dev_err(bus->dev, "Bank switch failed: %d", ret);
+ dev_err(bus->dev, "Bank switch failed: %d\n", ret);
goto error;
-
}
}
@@ -784,12 +792,13 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
ret = ops->post_bank_switch(bus);
if (ret < 0) {
dev_err(bus->dev,
- "Post bank switch op failed: %d", ret);
+ "Post bank switch op failed: %d\n",
+ ret);
goto error;
}
} else if (bus->multi_link && stream->m_rt_count > 1) {
dev_err(bus->dev,
- "Post bank switch ops not implemented");
+ "Post bank switch ops not implemented\n");
goto error;
}
@@ -801,7 +810,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
ret = sdw_ml_sync_bank_switch(bus);
if (ret < 0) {
dev_err(bus->dev,
- "multi link bank switch failed: %d", ret);
+ "multi link bank switch failed: %d\n", ret);
goto error;
}
@@ -812,7 +821,6 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
error:
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
-
bus = m_rt->bus;
kfree(bus->defer_msg.msg->buf);
@@ -873,7 +881,7 @@ EXPORT_SYMBOL(sdw_alloc_stream);
static struct sdw_master_runtime
*sdw_find_master_rt(struct sdw_bus *bus,
- struct sdw_stream_runtime *stream)
+ struct sdw_stream_runtime *stream)
{
struct sdw_master_runtime *m_rt = NULL;
@@ -897,8 +905,8 @@ static struct sdw_master_runtime
*/
static struct sdw_master_runtime
*sdw_alloc_master_rt(struct sdw_bus *bus,
- struct sdw_stream_config *stream_config,
- struct sdw_stream_runtime *stream)
+ struct sdw_stream_config *stream_config,
+ struct sdw_stream_runtime *stream)
{
struct sdw_master_runtime *m_rt;
@@ -941,8 +949,8 @@ stream_config:
*/
static struct sdw_slave_runtime
*sdw_alloc_slave_rt(struct sdw_slave *slave,
- struct sdw_stream_config *stream_config,
- struct sdw_stream_runtime *stream)
+ struct sdw_stream_config *stream_config,
+ struct sdw_stream_runtime *stream)
{
struct sdw_slave_runtime *s_rt = NULL;
@@ -959,20 +967,19 @@ static struct sdw_slave_runtime
}
static void sdw_master_port_release(struct sdw_bus *bus,
- struct sdw_master_runtime *m_rt)
+ struct sdw_master_runtime *m_rt)
{
struct sdw_port_runtime *p_rt, *_p_rt;
- list_for_each_entry_safe(p_rt, _p_rt,
- &m_rt->port_list, port_node) {
+ list_for_each_entry_safe(p_rt, _p_rt, &m_rt->port_list, port_node) {
list_del(&p_rt->port_node);
kfree(p_rt);
}
}
static void sdw_slave_port_release(struct sdw_bus *bus,
- struct sdw_slave *slave,
- struct sdw_stream_runtime *stream)
+ struct sdw_slave *slave,
+ struct sdw_stream_runtime *stream)
{
struct sdw_port_runtime *p_rt, *_p_rt;
struct sdw_master_runtime *m_rt;
@@ -980,13 +987,11 @@ static void sdw_slave_port_release(struct sdw_bus *bus,
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
-
if (s_rt->slave != slave)
continue;
list_for_each_entry_safe(p_rt, _p_rt,
- &s_rt->port_list, port_node) {
-
+ &s_rt->port_list, port_node) {
list_del(&p_rt->port_node);
kfree(p_rt);
}
@@ -1003,7 +1008,7 @@ static void sdw_slave_port_release(struct sdw_bus *bus,
* This function is to be called with bus_lock held.
*/
static void sdw_release_slave_stream(struct sdw_slave *slave,
- struct sdw_stream_runtime *stream)
+ struct sdw_stream_runtime *stream)
{
struct sdw_slave_runtime *s_rt, *_s_rt;
struct sdw_master_runtime *m_rt;
@@ -1011,8 +1016,7 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
/* Retrieve Slave runtime handle */
list_for_each_entry_safe(s_rt, _s_rt,
- &m_rt->slave_rt_list, m_rt_node) {
-
+ &m_rt->slave_rt_list, m_rt_node) {
if (s_rt->slave == slave) {
list_del(&s_rt->m_rt_node);
kfree(s_rt);
@@ -1034,7 +1038,7 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
* no effect as Slave(s) runtime handle would already be freed up.
*/
static void sdw_release_master_stream(struct sdw_master_runtime *m_rt,
- struct sdw_stream_runtime *stream)
+ struct sdw_stream_runtime *stream)
{
struct sdw_slave_runtime *s_rt, *_s_rt;
@@ -1057,15 +1061,14 @@ static void sdw_release_master_stream(struct sdw_master_runtime *m_rt,
* This removes and frees port_rt and master_rt from a stream
*/
int sdw_stream_remove_master(struct sdw_bus *bus,
- struct sdw_stream_runtime *stream)
+ struct sdw_stream_runtime *stream)
{
struct sdw_master_runtime *m_rt, *_m_rt;
mutex_lock(&bus->bus_lock);
list_for_each_entry_safe(m_rt, _m_rt,
- &stream->master_list, stream_node) {
-
+ &stream->master_list, stream_node) {
if (m_rt->bus != bus)
continue;
@@ -1092,7 +1095,7 @@ EXPORT_SYMBOL(sdw_stream_remove_master);
* This removes and frees port_rt and slave_rt from a stream
*/
int sdw_stream_remove_slave(struct sdw_slave *slave,
- struct sdw_stream_runtime *stream)
+ struct sdw_stream_runtime *stream)
{
mutex_lock(&slave->bus->bus_lock);
@@ -1116,8 +1119,9 @@ EXPORT_SYMBOL(sdw_stream_remove_slave);
* This function is to be called with bus_lock held.
*/
static int sdw_config_stream(struct device *dev,
- struct sdw_stream_runtime *stream,
- struct sdw_stream_config *stream_config, bool is_slave)
+ struct sdw_stream_runtime *stream,
+ struct sdw_stream_config *stream_config,
+ bool is_slave)
{
/*
* Update the stream rate, channel and bps based on data
@@ -1128,14 +1132,14 @@ static int sdw_config_stream(struct device *dev,
* comparison and allow the value to be set and stored in stream
*/
if (stream->params.rate &&
- stream->params.rate != stream_config->frame_rate) {
- dev_err(dev, "rate not matching, stream:%s", stream->name);
+ stream->params.rate != stream_config->frame_rate) {
+ dev_err(dev, "rate not matching, stream:%s\n", stream->name);
return -EINVAL;
}
if (stream->params.bps &&
- stream->params.bps != stream_config->bps) {
- dev_err(dev, "bps not matching, stream:%s", stream->name);
+ stream->params.bps != stream_config->bps) {
+ dev_err(dev, "bps not matching, stream:%s\n", stream->name);
return -EINVAL;
}
@@ -1151,20 +1155,21 @@ static int sdw_config_stream(struct device *dev,
}
static int sdw_is_valid_port_range(struct device *dev,
- struct sdw_port_runtime *p_rt)
+ struct sdw_port_runtime *p_rt)
{
if (!SDW_VALID_PORT_RANGE(p_rt->num)) {
dev_err(dev,
- "SoundWire: Invalid port number :%d", p_rt->num);
+ "SoundWire: Invalid port number :%d\n", p_rt->num);
return -EINVAL;
}
return 0;
}
-static struct sdw_port_runtime *sdw_port_alloc(struct device *dev,
- struct sdw_port_config *port_config,
- int port_index)
+static struct sdw_port_runtime
+*sdw_port_alloc(struct device *dev,
+ struct sdw_port_config *port_config,
+ int port_index)
{
struct sdw_port_runtime *p_rt;
@@ -1179,9 +1184,9 @@ static struct sdw_port_runtime *sdw_port_alloc(struct device *dev,
}
static int sdw_master_port_config(struct sdw_bus *bus,
- struct sdw_master_runtime *m_rt,
- struct sdw_port_config *port_config,
- unsigned int num_ports)
+ struct sdw_master_runtime *m_rt,
+ struct sdw_port_config *port_config,
+ unsigned int num_ports)
{
struct sdw_port_runtime *p_rt;
int i;
@@ -1204,9 +1209,9 @@ static int sdw_master_port_config(struct sdw_bus *bus,
}
static int sdw_slave_port_config(struct sdw_slave *slave,
- struct sdw_slave_runtime *s_rt,
- struct sdw_port_config *port_config,
- unsigned int num_config)
+ struct sdw_slave_runtime *s_rt,
+ struct sdw_port_config *port_config,
+ unsigned int num_config)
{
struct sdw_port_runtime *p_rt;
int i, ret;
@@ -1248,10 +1253,10 @@ static int sdw_slave_port_config(struct sdw_slave *slave,
* @stream: SoundWire stream
*/
int sdw_stream_add_master(struct sdw_bus *bus,
- struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
- unsigned int num_ports,
- struct sdw_stream_runtime *stream)
+ struct sdw_stream_config *stream_config,
+ struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream)
{
struct sdw_master_runtime *m_rt = NULL;
int ret;
@@ -1265,7 +1270,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
*/
if (!bus->multi_link && stream->m_rt_count > 0) {
dev_err(bus->dev,
- "Multilink not supported, link %d", bus->link_id);
+ "Multilink not supported, link %d\n", bus->link_id);
ret = -EINVAL;
goto unlock;
}
@@ -1273,8 +1278,8 @@ int sdw_stream_add_master(struct sdw_bus *bus,
m_rt = sdw_alloc_master_rt(bus, stream_config, stream);
if (!m_rt) {
dev_err(bus->dev,
- "Master runtime config failed for stream:%s",
- stream->name);
+ "Master runtime config failed for stream:%s\n",
+ stream->name);
ret = -ENOMEM;
goto unlock;
}
@@ -1313,10 +1318,10 @@ EXPORT_SYMBOL(sdw_stream_add_master);
*
*/
int sdw_stream_add_slave(struct sdw_slave *slave,
- struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
- unsigned int num_ports,
- struct sdw_stream_runtime *stream)
+ struct sdw_stream_config *stream_config,
+ struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream)
{
struct sdw_slave_runtime *s_rt;
struct sdw_master_runtime *m_rt;
@@ -1331,8 +1336,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
m_rt = sdw_alloc_master_rt(slave->bus, stream_config, stream);
if (!m_rt) {
dev_err(&slave->dev,
- "alloc master runtime failed for stream:%s",
- stream->name);
+ "alloc master runtime failed for stream:%s\n",
+ stream->name);
ret = -ENOMEM;
goto error;
}
@@ -1340,8 +1345,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
s_rt = sdw_alloc_slave_rt(slave, stream_config, stream);
if (!s_rt) {
dev_err(&slave->dev,
- "Slave runtime config failed for stream:%s",
- stream->name);
+ "Slave runtime config failed for stream:%s\n",
+ stream->name);
ret = -ENOMEM;
goto stream_error;
}
@@ -1385,8 +1390,8 @@ EXPORT_SYMBOL(sdw_stream_add_slave);
* @port_num: Port number
*/
struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
- enum sdw_data_direction direction,
- unsigned int port_num)
+ enum sdw_data_direction direction,
+ unsigned int port_num)
{
struct sdw_dpn_prop *dpn_prop;
u8 num_ports;
@@ -1470,7 +1475,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
/* TODO: Support Asynchronous mode */
if ((prop->max_freq % stream->params.rate) != 0) {
- dev_err(bus->dev, "Async mode not supported");
+ dev_err(bus->dev, "Async mode not supported\n");
return -EINVAL;
}
@@ -1482,15 +1487,14 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
/* Program params */
ret = sdw_program_params(bus);
if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
+ dev_err(bus->dev, "Program params failed: %d\n", ret);
goto restore_params;
}
-
}
ret = do_bank_switch(stream);
if (ret < 0) {
- dev_err(bus->dev, "Bank switch failed: %d", ret);
+ dev_err(bus->dev, "Bank switch failed: %d\n", ret);
goto restore_params;
}
@@ -1500,8 +1504,8 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
/* Prepare port(s) on the new clock configuration */
ret = sdw_prep_deprep_ports(m_rt, true);
if (ret < 0) {
- dev_err(bus->dev, "Prepare port(s) failed ret = %d",
- ret);
+ dev_err(bus->dev, "Prepare port(s) failed ret = %d\n",
+ ret);
return ret;
}
}
@@ -1527,7 +1531,7 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
int ret = 0;
if (!stream) {
- pr_err("SoundWire: Handle not found for stream");
+ pr_err("SoundWire: Handle not found for stream\n");
return -EINVAL;
}
@@ -1535,7 +1539,7 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
ret = _sdw_prepare_stream(stream);
if (ret < 0)
- pr_err("Prepare for stream:%s failed: %d", stream->name, ret);
+ pr_err("Prepare for stream:%s failed: %d\n", stream->name, ret);
sdw_release_bus_lock(stream);
return ret;
@@ -1555,21 +1559,22 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
/* Program params */
ret = sdw_program_params(bus);
if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
+ dev_err(bus->dev, "Program params failed: %d\n", ret);
return ret;
}
/* Enable port(s) */
ret = sdw_enable_disable_ports(m_rt, true);
if (ret < 0) {
- dev_err(bus->dev, "Enable port(s) failed ret: %d", ret);
+ dev_err(bus->dev,
+ "Enable port(s) failed ret: %d\n", ret);
return ret;
}
}
ret = do_bank_switch(stream);
if (ret < 0) {
- dev_err(bus->dev, "Bank switch failed: %d", ret);
+ dev_err(bus->dev, "Bank switch failed: %d\n", ret);
return ret;
}
@@ -1589,7 +1594,7 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
int ret = 0;
if (!stream) {
- pr_err("SoundWire: Handle not found for stream");
+ pr_err("SoundWire: Handle not found for stream\n");
return -EINVAL;
}
@@ -1597,7 +1602,7 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
ret = _sdw_enable_stream(stream);
if (ret < 0)
- pr_err("Enable for stream:%s failed: %d", stream->name, ret);
+ pr_err("Enable for stream:%s failed: %d\n", stream->name, ret);
sdw_release_bus_lock(stream);
return ret;
@@ -1615,7 +1620,7 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
/* Disable port(s) */
ret = sdw_enable_disable_ports(m_rt, false);
if (ret < 0) {
- dev_err(bus->dev, "Disable port(s) failed: %d", ret);
+ dev_err(bus->dev, "Disable port(s) failed: %d\n", ret);
return ret;
}
}
@@ -1626,7 +1631,7 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
/* Program params */
ret = sdw_program_params(bus);
if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
+ dev_err(bus->dev, "Program params failed: %d\n", ret);
return ret;
}
}
@@ -1646,7 +1651,7 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
int ret = 0;
if (!stream) {
- pr_err("SoundWire: Handle not found for stream");
+ pr_err("SoundWire: Handle not found for stream\n");
return -EINVAL;
}
@@ -1654,7 +1659,7 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
ret = _sdw_disable_stream(stream);
if (ret < 0)
- pr_err("Disable for stream:%s failed: %d", stream->name, ret);
+ pr_err("Disable for stream:%s failed: %d\n", stream->name, ret);
sdw_release_bus_lock(stream);
return ret;
@@ -1672,7 +1677,8 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
/* De-prepare port(s) */
ret = sdw_prep_deprep_ports(m_rt, false);
if (ret < 0) {
- dev_err(bus->dev, "De-prepare port(s) failed: %d", ret);
+ dev_err(bus->dev,
+ "De-prepare port(s) failed: %d\n", ret);
return ret;
}
@@ -1683,10 +1689,9 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
/* Program params */
ret = sdw_program_params(bus);
if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
+ dev_err(bus->dev, "Program params failed: %d\n", ret);
return ret;
}
-
}
stream->state = SDW_STREAM_DEPREPARED;
@@ -1705,14 +1710,14 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
int ret = 0;
if (!stream) {
- pr_err("SoundWire: Handle not found for stream");
+ pr_err("SoundWire: Handle not found for stream\n");
return -EINVAL;
}
sdw_acquire_bus_lock(stream);
ret = _sdw_deprepare_stream(stream);
if (ret < 0)
- pr_err("De-prepare for stream:%d failed: %d", ret, ret);
+ pr_err("De-prepare for stream:%d failed: %d\n", ret, ret);
sdw_release_bus_lock(stream);
return ret;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f761655e2a36..0fba8f400c59 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -426,6 +426,12 @@ config SPI_MT65XX
say Y or M here.If you are not sure, say N.
SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
+config SPI_MT7621
+ tristate "MediaTek MT7621 SPI Controller"
+ depends on RALINK || COMPILE_TEST
+ help
+ This selects a driver for the MediaTek MT7621 SPI Controller.
+
config SPI_NPCM_PSPI
tristate "Nuvoton NPCM PSPI Controller"
depends on ARCH_NPCM || COMPILE_TEST
@@ -842,9 +848,17 @@ config SPI_XTENSA_XTFPGA
16 bit words in SPI mode 0, automatically asserting CS on transfer
start and deasserting on end.
+config SPI_ZYNQ_QSPI
+ tristate "Xilinx Zynq QSPI controller"
+ depends on ARCH_ZYNQ || COMPILE_TEST
+ help
+ This enables support for the Zynq Quad SPI controller
+ in master mode.
+ This controller only supports SPI memory interface.
+
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
- depends on SPI_MASTER && HAS_DMA
+ depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index d8fc03c9faa2..f2f78d03dc28 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -60,6 +60,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
+obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
@@ -118,6 +119,7 @@ obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
obj-$(CONFIG_SPI_XLP) += spi-xlp.o
obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
+obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o
obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
# SPI slave protocol handlers
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index fffc21cd5f79..9f24d5f0b431 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -366,7 +366,7 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
return err;
}
-const char *atmel_qspi_get_name(struct spi_mem *spimem)
+static const char *atmel_qspi_get_name(struct spi_mem *spimem)
{
return dev_name(spimem->spi->dev.parent);
}
@@ -570,7 +570,8 @@ static int atmel_qspi_remove(struct platform_device *pdev)
static int __maybe_unused atmel_qspi_suspend(struct device *dev)
{
- struct atmel_qspi *aq = dev_get_drvdata(dev);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
clk_disable_unprepare(aq->qspick);
clk_disable_unprepare(aq->pclk);
@@ -580,7 +581,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
static int __maybe_unused atmel_qspi_resume(struct device *dev)
{
- struct atmel_qspi *aq = dev_get_drvdata(dev);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
clk_prepare_enable(aq->pclk);
clk_prepare_enable(aq->qspick);
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index a694d702e574..f763e14bdf12 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -178,12 +178,6 @@ static int at91_usart_spi_setup(struct spi_device *spi)
struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
u32 *ausd = spi->controller_state;
unsigned int mr = at91_usart_spi_readl(aus, MR);
- u8 bits = spi->bits_per_word;
-
- if (bits != 8) {
- dev_dbg(&spi->dev, "Only 8 bits per word are supported\n");
- return -EINVAL;
- }
if (spi->mode & SPI_CPOL)
mr |= US_MR_CPOL;
@@ -212,7 +206,7 @@ static int at91_usart_spi_setup(struct spi_device *spi)
dev_dbg(&spi->dev,
"setup: bpw %u mode 0x%x -> mr %d %08x\n",
- bits, spi->mode, spi->chip_select, mr);
+ spi->bits_per_word, spi->mode, spi->chip_select, mr);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 35aebdfd3b4e..8aa22713c483 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -335,20 +335,6 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
return 1;
}
-/*
- * DMA support
- *
- * this implementation has currently a few issues in so far as it does
- * not work arrount limitations of the HW.
- *
- * the main one being that DMA transfers are limited to 16 bit
- * (so 0 to 65535 bytes) by the SPI HW due to BCM2835_SPI_DLEN
- *
- * there may be a few more border-cases we may need to address as well
- * but unfortunately this would mean splitting up the scatter-gather
- * list making it slightly unpractical...
- */
-
/**
* bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
* @master: SPI master
@@ -630,19 +616,6 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
return false;
- /* BCM2835_SPI_DLEN has defined a max transfer size as
- * 16 bit, so max is 65535
- * we can revisit this by using an alternative transfer
- * method - ideally this would get done without any more
- * interaction...
- */
- if (tfr->len > 65535) {
- dev_warn_once(&spi->dev,
- "transfer size of %d too big for dma-transfer\n",
- tfr->len);
- return false;
- }
-
/* return OK */
return true;
}
@@ -707,7 +680,6 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
/* all went well, so set can_dma */
master->can_dma = bcm2835_spi_can_dma;
- master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */
/* need to do TX AND RX DMA, so we need dummy buffers */
master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
@@ -844,6 +816,17 @@ static int bcm2835_spi_prepare_message(struct spi_master *master,
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ int ret;
+
+ /*
+ * DMA transfers are limited to 16 bit (0 to 65535 bytes) by the SPI HW
+ * due to DLEN. Split up transfers (32-bit FIFO aligned) if the limit is
+ * exceeded.
+ */
+ ret = spi_split_transfers_maxsize(master, msg, 65532,
+ GFP_KERNEL | GFP_DMA);
+ if (ret)
+ return ret;
cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index f7e054848ca5..bbf87adb3ff8 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -21,6 +21,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -36,6 +37,12 @@
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
+/* define polling limits */
+static unsigned int polling_limit_us = 30;
+module_param(polling_limit_us, uint, 0664);
+MODULE_PARM_DESC(polling_limit_us,
+ "time in us to run a transfer in polling mode - if zero no polling is used\n");
+
/*
* spi register defines
*
@@ -88,10 +95,6 @@
#define BCM2835_AUX_SPI_STAT_BUSY 0x00000040
#define BCM2835_AUX_SPI_STAT_BITCOUNT 0x0000003F
-/* timeout values */
-#define BCM2835_AUX_SPI_POLLING_LIMIT_US 30
-#define BCM2835_AUX_SPI_POLLING_JIFFIES 2
-
struct bcm2835aux_spi {
void __iomem *regs;
struct clk *clk;
@@ -102,8 +105,53 @@ struct bcm2835aux_spi {
int tx_len;
int rx_len;
int pending;
+
+ u64 count_transfer_polling;
+ u64 count_transfer_irq;
+ u64 count_transfer_irq_after_poll;
+
+ struct dentry *debugfs_dir;
};
+#if defined(CONFIG_DEBUG_FS)
+static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
+ const char *dname)
+{
+ char name[64];
+ struct dentry *dir;
+
+ /* get full name */
+ snprintf(name, sizeof(name), "spi-bcm2835aux-%s", dname);
+
+ /* the base directory */
+ dir = debugfs_create_dir(name, NULL);
+ bs->debugfs_dir = dir;
+
+ /* the counters */
+ debugfs_create_u64("count_transfer_polling", 0444, dir,
+ &bs->count_transfer_polling);
+ debugfs_create_u64("count_transfer_irq", 0444, dir,
+ &bs->count_transfer_irq);
+ debugfs_create_u64("count_transfer_irq_after_poll", 0444, dir,
+ &bs->count_transfer_irq_after_poll);
+}
+
+static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
+{
+ debugfs_remove_recursive(bs->debugfs_dir);
+ bs->debugfs_dir = NULL;
+}
+#else
+static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
+ const char *dname)
+{
+}
+
+static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
{
return readl(bs->regs + reg);
@@ -123,9 +171,6 @@ static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs)
data = bcm2835aux_rd(bs, BCM2835_AUX_SPI_IO);
if (bs->rx_buf) {
switch (count) {
- case 4:
- *bs->rx_buf++ = (data >> 24) & 0xff;
- /* fallthrough */
case 3:
*bs->rx_buf++ = (data >> 16) & 0xff;
/* fallthrough */
@@ -178,24 +223,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
}
-static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
{
- struct spi_master *master = dev_id;
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
- irqreturn_t ret = IRQ_NONE;
-
- /* IRQ may be shared, so return if our interrupts are disabled */
- if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
- (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
- return ret;
+ u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
/* check if we have data to read */
- while (bs->rx_len &&
- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
- BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
+ for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
+ stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
bcm2835aux_rd_fifo(bs);
- ret = IRQ_HANDLED;
- }
/* check if we have data to write */
while (bs->tx_len &&
@@ -203,16 +238,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
- ret = IRQ_HANDLED;
}
+}
- /* and check if we have reached "done" */
- while (bs->rx_len &&
- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
- BCM2835_AUX_SPI_STAT_BUSY))) {
- bcm2835aux_rd_fifo(bs);
- ret = IRQ_HANDLED;
- }
+static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* IRQ may be shared, so return if our interrupts are disabled */
+ if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
+ (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
+ return IRQ_NONE;
+
+ /* do common fifo handling */
+ bcm2835aux_spi_transfer_helper(bs);
if (!bs->tx_len) {
/* disable tx fifo empty interrupt */
@@ -226,8 +266,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
complete(&master->xfer_completion);
}
- /* and return */
- return ret;
+ return IRQ_HANDLED;
}
static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
@@ -251,6 +290,9 @@ static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ /* update statistics */
+ bs->count_transfer_irq++;
+
/* fill in registers and fifos before enabling interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
@@ -273,35 +315,22 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
unsigned long timeout;
- u32 stat;
+
+ /* update statistics */
+ bs->count_transfer_polling++;
/* configure spi */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
- /* set the timeout */
- timeout = jiffies + BCM2835_AUX_SPI_POLLING_JIFFIES;
+ /* set the timeout to at least 2 jiffies */
+ timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
/* loop until finished the transfer */
while (bs->rx_len) {
- /* read status */
- stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
- /* fill in tx fifo with remaining data */
- if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
- bcm2835aux_wr_fifo(bs);
- continue;
- }
-
- /* read data from fifo for both cases */
- if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
- bcm2835aux_rd_fifo(bs);
- continue;
- }
- if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
- bcm2835aux_rd_fifo(bs);
- continue;
- }
+ /* do common fifo handling */
+ bcm2835aux_spi_transfer_helper(bs);
/* there is still data pending to read check the timeout */
if (bs->rx_len && time_after(jiffies, timeout)) {
@@ -310,6 +339,7 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
jiffies - timeout,
bs->tx_len, bs->rx_len);
/* forward to interrupt handler */
+ bs->count_transfer_irq_after_poll++;
return __bcm2835aux_spi_transfer_one_irq(master,
spi, tfr);
}
@@ -324,8 +354,8 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
- unsigned long spi_hz, clk_hz, speed;
- unsigned long spi_used_hz;
+ unsigned long spi_hz, clk_hz, speed, spi_used_hz;
+ unsigned long hz_per_byte, byte_limit;
/* calculate the registers to handle
*
@@ -369,14 +399,15 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
* of Hz per byte per polling limit. E.g., we can transfer 1 byte in
* 30 µs per 300,000 Hz of bus clock.
*/
-#define HZ_PER_BYTE ((9 * 1000000) / BCM2835_AUX_SPI_POLLING_LIMIT_US)
+ hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
+ byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
+
/* run in polling mode for short transfers */
- if (tfr->len < spi_used_hz / HZ_PER_BYTE)
+ if (tfr->len < byte_limit)
return bcm2835aux_spi_transfer_one_poll(master, spi, tfr);
/* run in interrupt mode for all others */
return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
-#undef HZ_PER_BYTE
}
static int bcm2835aux_spi_prepare_message(struct spi_master *master,
@@ -421,6 +452,50 @@ static void bcm2835aux_spi_handle_err(struct spi_master *master,
bcm2835aux_spi_reset_hw(bs);
}
+static int bcm2835aux_spi_setup(struct spi_device *spi)
+{
+ int ret;
+
+ /* sanity check for native cs */
+ if (spi->mode & SPI_NO_CS)
+ return 0;
+ if (gpio_is_valid(spi->cs_gpio)) {
+ /* with gpio-cs set the GPIO to the correct level
+ * and as output (in case the dt has the gpio not configured
+ * as output but native cs)
+ */
+ ret = gpio_direction_output(spi->cs_gpio,
+ (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+ if (ret)
+ dev_err(&spi->dev,
+ "could not set gpio %i as output: %i\n",
+ spi->cs_gpio, ret);
+
+ return ret;
+ }
+
+ /* for dt-backwards compatibility: only support native on CS0
+ * known things not supported with broken native CS:
+ * * multiple chip-selects: cs0-cs2 are all
+ * simultaniously asserted whenever there is a transfer
+ * this even includes SPI_NO_CS
+ * * SPI_CS_HIGH: cs are always asserted low
+ * * cs_change: cs is deasserted after each spi_transfer
+ * * cs_delay_usec: cs is always deasserted one SCK cycle
+ * after the last transfer
+ * probably more...
+ */
+ dev_warn(&spi->dev,
+ "Native CS is not supported - please configure cs-gpio in device-tree\n");
+
+ if (spi->chip_select == 0)
+ return 0;
+
+ dev_warn(&spi->dev, "Native CS is not working for cs > 0\n");
+
+ return -EINVAL;
+}
+
static int bcm2835aux_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -438,7 +513,19 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->num_chipselect = -1;
+ /* even though the driver never officially supported native CS
+ * allow a single native CS for legacy DT support purposes when
+ * no cs-gpio is configured.
+ * Known limitations for native cs are:
+ * * multiple chip-selects: cs0-cs2 are all simultaniously asserted
+ * whenever there is a transfer - this even includes SPI_NO_CS
+ * * SPI_CS_HIGH: is ignores - cs are always asserted low
+ * * cs_change: cs is deasserted after each spi_transfer
+ * * cs_delay_usec: cs is always deasserted one SCK cycle after
+ * a spi_transfer
+ */
+ master->num_chipselect = 1;
+ master->setup = bcm2835aux_spi_setup;
master->transfer_one = bcm2835aux_spi_transfer_one;
master->handle_err = bcm2835aux_spi_handle_err;
master->prepare_message = bcm2835aux_spi_prepare_message;
@@ -502,6 +589,8 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
+ bcm2835aux_debugfs_create(bs, dev_name(&pdev->dev));
+
return 0;
out_clk_disable:
@@ -516,6 +605,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ bcm2835aux_debugfs_remove(bs);
+
bcm2835aux_spi_reset_hw(bs);
/* disable the HW block by releasing the clock */
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index dd9a8c54a693..4243e53f9f7b 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -335,6 +335,42 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
/*----------------------------------------------------------------------*/
+int spi_bitbang_init(struct spi_bitbang *bitbang)
+{
+ struct spi_master *master = bitbang->master;
+
+ if (!master || !bitbang->chipselect)
+ return -EINVAL;
+
+ mutex_init(&bitbang->lock);
+
+ if (!master->mode_bits)
+ master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
+
+ if (master->transfer || master->transfer_one_message)
+ return -EINVAL;
+
+ master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
+ master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
+ master->transfer_one = spi_bitbang_transfer_one;
+ master->set_cs = spi_bitbang_set_cs;
+
+ if (!bitbang->txrx_bufs) {
+ bitbang->use_dma = 0;
+ bitbang->txrx_bufs = spi_bitbang_bufs;
+ if (!master->setup) {
+ if (!bitbang->setup_transfer)
+ bitbang->setup_transfer =
+ spi_bitbang_setup_transfer;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_init);
+
/**
* spi_bitbang_start - start up a polled/bitbanging SPI master driver
* @bitbang: driver handle
@@ -368,33 +404,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
struct spi_master *master = bitbang->master;
int ret;
- if (!master || !bitbang->chipselect)
- return -EINVAL;
-
- mutex_init(&bitbang->lock);
-
- if (!master->mode_bits)
- master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
-
- if (master->transfer || master->transfer_one_message)
- return -EINVAL;
-
- master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
- master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
- master->transfer_one = spi_bitbang_transfer_one;
- master->set_cs = spi_bitbang_set_cs;
-
- if (!bitbang->txrx_bufs) {
- bitbang->use_dma = 0;
- bitbang->txrx_bufs = spi_bitbang_bufs;
- if (!master->setup) {
- if (!bitbang->setup_transfer)
- bitbang->setup_transfer =
- spi_bitbang_setup_transfer;
- master->setup = spi_bitbang_setup;
- master->cleanup = spi_bitbang_cleanup;
- }
- }
+ ret = spi_bitbang_init(bitbang);
+ if (ret)
+ return ret;
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 4bd59a93d988..de952b17bc10 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -30,6 +30,7 @@
struct dw_spi_mmio {
struct dw_spi dws;
struct clk *clk;
+ struct clk *pclk;
void *priv;
};
@@ -172,6 +173,14 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Optional clock needed to access the registers */
+ dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
+ if (IS_ERR(dwsmmio->pclk))
+ return PTR_ERR(dwsmmio->pclk);
+ ret = clk_prepare_enable(dwsmmio->pclk);
+ if (ret)
+ goto out_clk;
+
dws->bus_num = pdev->id;
dws->max_freq = clk_get_rate(dwsmmio->clk);
@@ -199,6 +208,8 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
return 0;
out:
+ clk_disable_unprepare(dwsmmio->pclk);
+out_clk:
clk_disable_unprepare(dwsmmio->clk);
return ret;
}
@@ -208,6 +219,7 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
dw_spi_remove_host(&dwsmmio->dws);
+ clk_disable_unprepare(dwsmmio->pclk);
clk_disable_unprepare(dwsmmio->clk);
return 0;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 79fc3940245a..81889389280b 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -28,7 +28,6 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
-#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/dma-ep93xx.h>
@@ -652,7 +651,6 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
struct resource *res;
int irq;
int error;
- int i;
info = dev_get_platdata(&pdev->dev);
if (!info) {
@@ -676,6 +674,7 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ master->use_gpio_descriptors = true;
master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
master->prepare_message = ep93xx_spi_prepare_message;
@@ -683,31 +682,11 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
-
- master->num_chipselect = info->num_chipselect;
- master->cs_gpios = devm_kcalloc(&master->dev,
- master->num_chipselect, sizeof(int),
- GFP_KERNEL);
- if (!master->cs_gpios) {
- error = -ENOMEM;
- goto fail_release_master;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- master->cs_gpios[i] = info->chipselect[i];
-
- if (!gpio_is_valid(master->cs_gpios[i]))
- continue;
-
- error = devm_gpio_request_one(&pdev->dev, master->cs_gpios[i],
- GPIOF_OUT_INIT_HIGH,
- "ep93xx-spi");
- if (error) {
- dev_err(&pdev->dev, "could not request cs gpio %d\n",
- master->cs_gpios[i]);
- goto fail_release_master;
- }
- }
+ /*
+ * The SPI core will count the number of GPIO descriptors to figure
+ * out the number of chip selects available on the platform.
+ */
+ master->num_chipselect = 0;
platform_set_drvdata(pdev, master);
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index f303f306b38e..483734bc1b1e 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -95,8 +95,10 @@ static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
struct mpc8xxx_spi_probe_info {
struct fsl_spi_platform_data pdata;
+ int ngpios;
int *gpios;
bool *alow_flags;
+ __be32 __iomem *immr_spi_cs;
};
extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 391863914043..d08e9324140e 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -8,7 +8,10 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -16,7 +19,12 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/dma-imx.h>
+#include <linux/platform_data/spi-imx.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -24,6 +32,11 @@
#define DRIVER_NAME "fsl_lpspi"
+#define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
+
+/* The maximum bytes that edma can transfer once.*/
+#define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
+
/* i.MX7ULP LPSPI registers */
#define IMX7ULP_VERID 0x0
#define IMX7ULP_PARAM 0x4
@@ -57,12 +70,14 @@
#define IER_FCIE BIT(9)
#define IER_RDIE BIT(1)
#define IER_TDIE BIT(0)
+#define DER_RDDE BIT(1)
+#define DER_TDDE BIT(0)
#define CFGR1_PCSCFG BIT(27)
#define CFGR1_PINCFG (BIT(24)|BIT(25))
#define CFGR1_PCSPOL BIT(8)
#define CFGR1_NOSTALL BIT(3)
#define CFGR1_MASTER BIT(0)
-#define FSR_RXCOUNT (BIT(16)|BIT(17)|BIT(18))
+#define FSR_TXCOUNT (0xFF)
#define RSR_RXEMPTY BIT(1)
#define TCR_CPOL BIT(31)
#define TCR_CPHA BIT(30)
@@ -84,8 +99,11 @@ struct lpspi_config {
struct fsl_lpspi_data {
struct device *dev;
void __iomem *base;
- struct clk *clk;
+ unsigned long base_phys;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
bool is_slave;
+ bool is_first_byte;
void *rx_buf;
const void *tx_buf;
@@ -101,6 +119,13 @@ struct fsl_lpspi_data {
struct completion xfer_done;
bool slave_aborted;
+
+ /* DMA */
+ bool usedma;
+ struct completion dma_rx_completion;
+ struct completion dma_tx_completion;
+
+ int chipselect[0];
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
@@ -147,12 +172,48 @@ static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
writel(enable, fsl_lpspi->base + IMX7ULP_IER);
}
+static int fsl_lpspi_bytes_per_word(const int bpw)
+{
+ return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
+}
+
+static bool fsl_lpspi_can_dma(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ unsigned int bytes_per_word;
+
+ if (!controller->dma_rx)
+ return false;
+
+ bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
+
+ switch (bytes_per_word)
+ {
+ case 1:
+ case 2:
+ case 4:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
+ int ret;
+
+ ret = pm_runtime_get_sync(fsl_lpspi->dev);
+ if (ret < 0) {
+ dev_err(fsl_lpspi->dev, "failed to enable clock\n");
+ return ret;
+ }
- return clk_prepare_enable(fsl_lpspi->clk);
+ return 0;
}
static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
@@ -160,7 +221,22 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
- clk_disable_unprepare(fsl_lpspi->clk);
+ pm_runtime_mark_last_busy(fsl_lpspi->dev);
+ pm_runtime_put_autosuspend(fsl_lpspi->dev);
+
+ return 0;
+}
+
+static int fsl_lpspi_prepare_message(struct spi_controller *controller,
+ struct spi_message *msg)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+ struct spi_device *spi = msg->spi;
+ int gpio = fsl_lpspi->chipselect[spi->chip_select];
+
+ if (gpio_is_valid(gpio))
+ gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
return 0;
}
@@ -197,8 +273,7 @@ static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
fsl_lpspi->rx(fsl_lpspi);
}
-static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
- bool is_first_xfer)
+static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp = 0;
@@ -213,11 +288,13 @@ static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
* For the first transfer, clear TCR_CONTC to assert SS.
* For subsequent transfer, set TCR_CONTC to keep SS asserted.
*/
- temp |= TCR_CONT;
- if (is_first_xfer)
- temp &= ~TCR_CONTC;
- else
- temp |= TCR_CONTC;
+ if (!fsl_lpspi->usedma) {
+ temp |= TCR_CONT;
+ if (fsl_lpspi->is_first_byte)
+ temp &= ~TCR_CONTC;
+ else
+ temp |= TCR_CONTC;
+ }
}
writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
@@ -228,7 +305,11 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
- temp = fsl_lpspi->watermark >> 1 | (fsl_lpspi->watermark >> 1) << 16;
+ if (!fsl_lpspi->usedma)
+ temp = fsl_lpspi->watermark >> 1 |
+ (fsl_lpspi->watermark >> 1) << 16;
+ else
+ temp = fsl_lpspi->watermark >> 1;
writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
@@ -241,7 +322,14 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
unsigned int perclk_rate, scldiv;
u8 prescale;
- perclk_rate = clk_get_rate(fsl_lpspi->clk);
+ perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
+
+ if (config.speed_hz > perclk_rate / 2) {
+ dev_err(fsl_lpspi->dev,
+ "per-clk should be at least two times of transfer speed");
+ return -EINVAL;
+ }
+
for (prescale = 0; prescale < 8; prescale++) {
scldiv = perclk_rate /
(clkdivs[prescale] * config.speed_hz) - 2;
@@ -257,12 +345,59 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
fsl_lpspi->base + IMX7ULP_CCR);
- dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale =%d, scldiv=%d\n",
+ dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n",
perclk_rate, config.speed_hz, prescale, scldiv);
return 0;
}
+static int fsl_lpspi_dma_configure(struct spi_controller *controller)
+{
+ int ret;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config rx = {}, tx = {};
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+
+ switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
+ case 4:
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ case 2:
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 1:
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tx.direction = DMA_MEM_TO_DEV;
+ tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
+ tx.dst_addr_width = buswidth;
+ tx.dst_maxburst = 1;
+ ret = dmaengine_slave_config(controller->dma_tx, &tx);
+ if (ret) {
+ dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
+ ret);
+ return ret;
+ }
+
+ rx.direction = DMA_DEV_TO_MEM;
+ rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
+ rx.src_addr_width = buswidth;
+ rx.src_maxburst = 1;
+ ret = dmaengine_slave_config(controller->dma_rx, &rx);
+ if (ret) {
+ dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
@@ -288,18 +423,27 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
temp |= CR_RRF | CR_RTF | CR_MEN;
writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+ temp = 0;
+ if (fsl_lpspi->usedma)
+ temp = DER_TDDE | DER_RDDE;
+ writel(temp, fsl_lpspi->base + IMX7ULP_DER);
+
return 0;
}
-static void fsl_lpspi_setup_transfer(struct spi_device *spi,
+static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
+ struct spi_device *spi,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(spi->controller);
+ if (t == NULL)
+ return -EINVAL;
+
fsl_lpspi->config.mode = spi->mode;
- fsl_lpspi->config.bpw = t ? t->bits_per_word : spi->bits_per_word;
- fsl_lpspi->config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
+ fsl_lpspi->config.bpw = t->bits_per_word;
+ fsl_lpspi->config.speed_hz = t->speed_hz;
fsl_lpspi->config.chip_select = spi->chip_select;
if (!fsl_lpspi->config.speed_hz)
@@ -324,7 +468,12 @@ static void fsl_lpspi_setup_transfer(struct spi_device *spi,
else
fsl_lpspi->watermark = fsl_lpspi->txfifosize;
- fsl_lpspi_config(fsl_lpspi);
+ if (fsl_lpspi_can_dma(controller, spi, t))
+ fsl_lpspi->usedma = 1;
+ else
+ fsl_lpspi->usedma = 0;
+
+ return fsl_lpspi_config(fsl_lpspi);
}
static int fsl_lpspi_slave_abort(struct spi_controller *controller)
@@ -333,7 +482,13 @@ static int fsl_lpspi_slave_abort(struct spi_controller *controller)
spi_controller_get_devdata(controller);
fsl_lpspi->slave_aborted = true;
- complete(&fsl_lpspi->xfer_done);
+ if (!fsl_lpspi->usedma)
+ complete(&fsl_lpspi->xfer_done);
+ else {
+ complete(&fsl_lpspi->dma_tx_completion);
+ complete(&fsl_lpspi->dma_rx_completion);
+ }
+
return 0;
}
@@ -362,8 +517,10 @@ static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
- /* Disable all interrupt */
- fsl_lpspi_intctrl(fsl_lpspi, 0);
+ if (!fsl_lpspi->usedma) {
+ /* Disable all interrupt */
+ fsl_lpspi_intctrl(fsl_lpspi, 0);
+ }
/* W1C for all flags in SR */
temp = 0x3F << 8;
@@ -376,8 +533,177 @@ static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
return 0;
}
-static int fsl_lpspi_transfer_one(struct spi_controller *controller,
- struct spi_device *spi,
+static void fsl_lpspi_dma_rx_callback(void *cookie)
+{
+ struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
+
+ complete(&fsl_lpspi->dma_rx_completion);
+}
+
+static void fsl_lpspi_dma_tx_callback(void *cookie)
+{
+ struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
+
+ complete(&fsl_lpspi->dma_tx_completion);
+}
+
+static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
+ int size)
+{
+ unsigned long timeout = 0;
+
+ /* Time with actual data transfer and CS change delay related to HW */
+ timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
+
+ /* Add extra second for scheduler related activities */
+ timeout += 1;
+
+ /* Double calculated timeout */
+ return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+}
+
+static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
+ struct fsl_lpspi_data *fsl_lpspi,
+ struct spi_transfer *transfer)
+{
+ struct dma_async_tx_descriptor *desc_tx, *desc_rx;
+ unsigned long transfer_timeout;
+ unsigned long timeout;
+ struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
+ int ret;
+
+ ret = fsl_lpspi_dma_configure(controller);
+ if (ret)
+ return ret;
+
+ desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
+ rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx)
+ return -EINVAL;
+
+ desc_rx->callback = fsl_lpspi_dma_rx_callback;
+ desc_rx->callback_param = (void *)fsl_lpspi;
+ dmaengine_submit(desc_rx);
+ reinit_completion(&fsl_lpspi->dma_rx_completion);
+ dma_async_issue_pending(controller->dma_rx);
+
+ desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
+ tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dmaengine_terminate_all(controller->dma_tx);
+ return -EINVAL;
+ }
+
+ desc_tx->callback = fsl_lpspi_dma_tx_callback;
+ desc_tx->callback_param = (void *)fsl_lpspi;
+ dmaengine_submit(desc_tx);
+ reinit_completion(&fsl_lpspi->dma_tx_completion);
+ dma_async_issue_pending(controller->dma_tx);
+
+ fsl_lpspi->slave_aborted = false;
+
+ if (!fsl_lpspi->is_slave) {
+ transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
+ transfer->len);
+
+ /* Wait eDMA to finish the data transfer.*/
+ timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -ETIMEDOUT;
+ }
+
+ timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -ETIMEDOUT;
+ }
+ } else {
+ if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
+ fsl_lpspi->slave_aborted) {
+ dev_dbg(fsl_lpspi->dev,
+ "I/O Error in DMA TX interrupted\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -EINTR;
+ }
+
+ if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
+ fsl_lpspi->slave_aborted) {
+ dev_dbg(fsl_lpspi->dev,
+ "I/O Error in DMA RX interrupted\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -EINTR;
+ }
+ }
+
+ fsl_lpspi_reset(fsl_lpspi);
+
+ return 0;
+}
+
+static void fsl_lpspi_dma_exit(struct spi_controller *controller)
+{
+ if (controller->dma_rx) {
+ dma_release_channel(controller->dma_rx);
+ controller->dma_rx = NULL;
+ }
+
+ if (controller->dma_tx) {
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
+ }
+}
+
+static int fsl_lpspi_dma_init(struct device *dev,
+ struct fsl_lpspi_data *fsl_lpspi,
+ struct spi_controller *controller)
+{
+ int ret;
+
+ /* Prepare for TX DMA: */
+ controller->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(controller->dma_tx)) {
+ ret = PTR_ERR(controller->dma_tx);
+ dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
+ controller->dma_tx = NULL;
+ goto err;
+ }
+
+ /* Prepare for RX DMA: */
+ controller->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(controller->dma_rx)) {
+ ret = PTR_ERR(controller->dma_rx);
+ dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
+ controller->dma_rx = NULL;
+ goto err;
+ }
+
+ init_completion(&fsl_lpspi->dma_rx_completion);
+ init_completion(&fsl_lpspi->dma_tx_completion);
+ controller->can_dma = fsl_lpspi_can_dma;
+ controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
+
+ return 0;
+err:
+ fsl_lpspi_dma_exit(controller);
+ return ret;
+}
+
+static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
@@ -402,37 +728,30 @@ static int fsl_lpspi_transfer_one(struct spi_controller *controller,
return 0;
}
-static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller,
- struct spi_message *msg)
+static int fsl_lpspi_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
- spi_controller_get_devdata(controller);
- struct spi_device *spi = msg->spi;
- struct spi_transfer *xfer;
- bool is_first_xfer = true;
- int ret = 0;
-
- msg->status = 0;
- msg->actual_length = 0;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- fsl_lpspi_setup_transfer(spi, xfer);
- fsl_lpspi_set_cmd(fsl_lpspi, is_first_xfer);
-
- is_first_xfer = false;
+ spi_controller_get_devdata(controller);
+ int ret;
- ret = fsl_lpspi_transfer_one(controller, spi, xfer);
- if (ret < 0)
- goto complete;
+ fsl_lpspi->is_first_byte = true;
+ ret = fsl_lpspi_setup_transfer(controller, spi, t);
+ if (ret < 0)
+ return ret;
- msg->actual_length += xfer->len;
- }
+ fsl_lpspi_set_cmd(fsl_lpspi);
+ fsl_lpspi->is_first_byte = false;
-complete:
- msg->status = ret;
- spi_finalize_current_message(controller);
+ if (fsl_lpspi->usedma)
+ ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
+ else
+ ret = fsl_lpspi_pio_transfer(controller, t);
+ if (ret < 0)
+ return ret;
- return ret;
+ return 0;
}
static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
@@ -452,7 +771,7 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
}
if (temp_SR & SR_MBF ||
- readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_RXCOUNT) {
+ readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
return IRQ_HANDLED;
@@ -467,15 +786,67 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
return IRQ_NONE;
}
+#ifdef CONFIG_PM
+static int fsl_lpspi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+ struct fsl_lpspi_data *fsl_lpspi;
+ int ret;
+
+ fsl_lpspi = spi_controller_get_devdata(controller);
+
+ ret = clk_prepare_enable(fsl_lpspi->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
+ if (ret) {
+ clk_disable_unprepare(fsl_lpspi->clk_per);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_lpspi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+ struct fsl_lpspi_data *fsl_lpspi;
+
+ fsl_lpspi = spi_controller_get_devdata(controller);
+
+ clk_disable_unprepare(fsl_lpspi->clk_per);
+ clk_disable_unprepare(fsl_lpspi->clk_ipg);
+
+ return 0;
+}
+#endif
+
+static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
+{
+ struct device *dev = fsl_lpspi->dev;
+
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+
+ return 0;
+}
+
static int fsl_lpspi_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct fsl_lpspi_data *fsl_lpspi;
struct spi_controller *controller;
+ struct spi_imx_master *lpspi_platform_info =
+ dev_get_platdata(&pdev->dev);
struct resource *res;
- int ret, irq;
+ int i, ret, irq;
u32 temp;
+ bool is_slave;
- if (of_property_read_bool((&pdev->dev)->of_node, "spi-slave"))
+ is_slave = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
+ if (is_slave)
controller = spi_alloc_slave(&pdev->dev,
sizeof(struct fsl_lpspi_data));
else
@@ -487,15 +858,35 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, controller);
- controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- controller->bus_num = pdev->id;
-
fsl_lpspi = spi_controller_get_devdata(controller);
fsl_lpspi->dev = &pdev->dev;
- fsl_lpspi->is_slave = of_property_read_bool((&pdev->dev)->of_node,
- "spi-slave");
+ fsl_lpspi->is_slave = is_slave;
- controller->transfer_one_message = fsl_lpspi_transfer_one_msg;
+ if (!fsl_lpspi->is_slave) {
+ for (i = 0; i < controller->num_chipselect; i++) {
+ int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+
+ if (!gpio_is_valid(cs_gpio) && lpspi_platform_info)
+ cs_gpio = lpspi_platform_info->chipselect[i];
+
+ fsl_lpspi->chipselect[i] = cs_gpio;
+ if (!gpio_is_valid(cs_gpio))
+ continue;
+
+ ret = devm_gpio_request(&pdev->dev,
+ fsl_lpspi->chipselect[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get cs gpios\n");
+ goto out_controller_put;
+ }
+ }
+ controller->cs_gpios = fsl_lpspi->chipselect;
+ controller->prepare_message = fsl_lpspi_prepare_message;
+ }
+
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ controller->transfer_one = fsl_lpspi_transfer_one;
controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
@@ -512,6 +903,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = PTR_ERR(fsl_lpspi->base);
goto out_controller_put;
}
+ fsl_lpspi->base_phys = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -526,23 +918,39 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
goto out_controller_put;
}
- fsl_lpspi->clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(fsl_lpspi->clk)) {
- ret = PTR_ERR(fsl_lpspi->clk);
+ fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(fsl_lpspi->clk_per)) {
+ ret = PTR_ERR(fsl_lpspi->clk_per);
goto out_controller_put;
}
- ret = clk_prepare_enable(fsl_lpspi->clk);
- if (ret) {
- dev_err(&pdev->dev, "can't enable lpspi clock, ret=%d\n", ret);
+ fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fsl_lpspi->clk_ipg)) {
+ ret = PTR_ERR(fsl_lpspi->clk_ipg);
+ goto out_controller_put;
+ }
+
+ /* enable the clock */
+ ret = fsl_lpspi_init_rpm(fsl_lpspi);
+ if (ret)
goto out_controller_put;
+
+ ret = pm_runtime_get_sync(fsl_lpspi->dev);
+ if (ret < 0) {
+ dev_err(fsl_lpspi->dev, "failed to enable clock\n");
+ return ret;
}
temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
- clk_disable_unprepare(fsl_lpspi->clk);
+ ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
+ if (ret == -EPROBE_DEFER)
+ goto out_controller_put;
+
+ if (ret < 0)
+ dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
@@ -564,15 +972,50 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
- clk_disable_unprepare(fsl_lpspi->clk);
+ pm_runtime_disable(fsl_lpspi->dev);
+
+ spi_master_put(controller);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int fsl_lpspi_suspend(struct device *dev)
+{
+ int ret;
+
+ pinctrl_pm_select_sleep_state(dev);
+ ret = pm_runtime_force_suspend(dev);
+ return ret;
+}
+
+static int fsl_lpspi_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ dev_err(dev, "Error in resume: %d\n", ret);
+ return ret;
+ }
+
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops fsl_lpspi_pm_ops = {
+ SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
+ fsl_lpspi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
+};
+
static struct platform_driver fsl_lpspi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = fsl_lpspi_dt_ids,
+ .pm = &fsl_lpspi_pm_ops,
},
.probe = fsl_lpspi_probe,
.remove = fsl_lpspi_remove,
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 6a713f78a62e..41a49b93ca60 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -882,7 +882,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = np;
- ret = spi_register_controller(ctlr);
+ ret = devm_spi_register_controller(dev, ctlr);
if (ret)
goto err_destroy_mutex;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 8f2e97857e8b..b36ac6aa3b1f 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -39,6 +39,14 @@
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
+#ifdef CONFIG_FSL_SOC
+#include <sysdev/fsl_soc.h>
+#endif
+
+/* Specific to the MPC8306/MPC8309 */
+#define IMMR_SPI_CS_OFFSET 0x14c
+#define SPI_BOOT_SEL_BIT 0x80000000
+
#include "spi-fsl-lib.h"
#include "spi-fsl-cpm.h"
#include "spi-fsl-spi.h"
@@ -355,33 +363,50 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
static int fsl_spi_do_one_msg(struct spi_master *master,
struct spi_message *m)
{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
struct spi_device *spi = m->spi;
struct spi_transfer *t, *first;
unsigned int cs_change;
const int nsecs = 50;
- int status;
+ int status, last_bpw;
+
+ /*
+ * In CPU mode, optimize large byte transfers to use larger
+ * bits_per_word values to reduce number of interrupts taken.
+ */
+ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->len < 256 || t->bits_per_word != 8)
+ continue;
+ if ((t->len & 3) == 0)
+ t->bits_per_word = 32;
+ else if ((t->len & 1) == 0)
+ t->bits_per_word = 16;
+ }
+ }
/* Don't allow changes if CS is active */
- first = list_first_entry(&m->transfers, struct spi_transfer,
- transfer_list);
+ cs_change = 1;
list_for_each_entry(t, &m->transfers, transfer_list) {
- if ((first->bits_per_word != t->bits_per_word) ||
- (first->speed_hz != t->speed_hz)) {
+ if (cs_change)
+ first = t;
+ cs_change = t->cs_change;
+ if (first->speed_hz != t->speed_hz) {
dev_err(&spi->dev,
- "bits_per_word/speed_hz should be same for the same SPI transfer\n");
+ "speed_hz cannot change while CS is active\n");
return -EINVAL;
}
}
+ last_bpw = -1;
cs_change = 1;
status = -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- if (cs_change)
- status = fsl_spi_setup_transfer(spi, t);
- if (status < 0)
- break;
- }
+ if (cs_change || last_bpw != t->bits_per_word)
+ status = fsl_spi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ last_bpw = t->bits_per_word;
if (cs_change) {
fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE);
@@ -701,10 +726,17 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
u16 cs = spi->chip_select;
- int gpio = pinfo->gpios[cs];
- bool alow = pinfo->alow_flags[cs];
- gpio_set_value(gpio, on ^ alow);
+ if (cs < pinfo->ngpios) {
+ int gpio = pinfo->gpios[cs];
+ bool alow = pinfo->alow_flags[cs];
+
+ gpio_set_value(gpio, on ^ alow);
+ } else {
+ if (WARN_ON_ONCE(cs > pinfo->ngpios || !pinfo->immr_spi_cs))
+ return;
+ iowrite32be(on ? SPI_BOOT_SEL_BIT : 0, pinfo->immr_spi_cs);
+ }
}
static int of_fsl_spi_get_chipselects(struct device *dev)
@@ -712,12 +744,15 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
struct device_node *np = dev->of_node;
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+ bool spisel_boot = IS_ENABLED(CONFIG_FSL_SOC) &&
+ of_property_read_bool(np, "fsl,spisel_boot");
int ngpios;
int i = 0;
int ret;
ngpios = of_gpio_count(np);
- if (ngpios <= 0) {
+ ngpios = max(ngpios, 0);
+ if (ngpios == 0 && !spisel_boot) {
/*
* SPI w/o chip-select line. One SPI device is still permitted
* though.
@@ -726,6 +761,7 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
return 0;
}
+ pinfo->ngpios = ngpios;
pinfo->gpios = kmalloc_array(ngpios, sizeof(*pinfo->gpios),
GFP_KERNEL);
if (!pinfo->gpios)
@@ -769,7 +805,18 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
}
}
- pdata->max_chipselect = ngpios;
+#if IS_ENABLED(CONFIG_FSL_SOC)
+ if (spisel_boot) {
+ pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
+ if (!pinfo->immr_spi_cs) {
+ ret = -ENOMEM;
+ i = ngpios - 1;
+ goto err_loop;
+ }
+ }
+#endif
+
+ pdata->max_chipselect = ngpios + spisel_boot;
pdata->cs_control = fsl_spi_cs_control;
return 0;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 53b35c56a557..487ee55d26f7 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -35,20 +35,16 @@
* platform_device->driver_data ... points to spi_gpio
*
* spi->controller_state ... reserved for bitbang framework code
- * spi->controller_data ... holds chipselect GPIO
*
* spi->master->dev.driver_data ... points to spi_gpio->bitbang
*/
struct spi_gpio {
struct spi_bitbang bitbang;
- struct spi_gpio_platform_data pdata;
- struct platform_device *pdev;
struct gpio_desc *sck;
struct gpio_desc *miso;
struct gpio_desc *mosi;
struct gpio_desc **cs_gpios;
- bool has_cs;
};
/*----------------------------------------------------------------------*/
@@ -96,12 +92,6 @@ spi_to_spi_gpio(const struct spi_device *spi)
return spi_gpio;
}
-static inline struct spi_gpio_platform_data *__pure
-spi_to_pdata(const struct spi_device *spi)
-{
- return &spi_to_spi_gpio(spi)->pdata;
-}
-
/* These helpers are in turn called by the bitbang inlines */
static inline void setsck(const struct spi_device *spi, int is_on)
{
@@ -224,7 +214,7 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
gpiod_set_value_cansleep(spi_gpio->sck, spi->mode & SPI_CPOL);
/* Drive chip select line, if we have one */
- if (spi_gpio->has_cs) {
+ if (spi_gpio->cs_gpios) {
struct gpio_desc *cs = spi_gpio->cs_gpios[spi->chip_select];
/* SPI chip selects are normally active-low */
@@ -242,10 +232,12 @@ static int spi_gpio_setup(struct spi_device *spi)
* The CS GPIOs have already been
* initialized from the descriptor lookup.
*/
- cs = spi_gpio->cs_gpios[spi->chip_select];
- if (!spi->controller_state && cs)
- status = gpiod_direction_output(cs,
- !(spi->mode & SPI_CS_HIGH));
+ if (spi_gpio->cs_gpios) {
+ cs = spi_gpio->cs_gpios[spi->chip_select];
+ if (!spi->controller_state && cs)
+ status = gpiod_direction_output(cs,
+ !(spi->mode & SPI_CS_HIGH));
+ }
if (!status)
status = spi_bitbang_setup(spi);
@@ -296,40 +288,20 @@ static void spi_gpio_cleanup(struct spi_device *spi)
* floating signals. (A weak pulldown would save power too, but many
* drivers expect to see all-ones data as the no slave "response".)
*/
-static int spi_gpio_request(struct device *dev,
- struct spi_gpio *spi_gpio,
- unsigned int num_chipselects,
- u16 *mflags)
+static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
{
- int i;
-
spi_gpio->mosi = devm_gpiod_get_optional(dev, "mosi", GPIOD_OUT_LOW);
if (IS_ERR(spi_gpio->mosi))
return PTR_ERR(spi_gpio->mosi);
- if (!spi_gpio->mosi)
- /* HW configuration without MOSI pin */
- *mflags |= SPI_MASTER_NO_TX;
spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN);
if (IS_ERR(spi_gpio->miso))
return PTR_ERR(spi_gpio->miso);
- /*
- * No setting SPI_MASTER_NO_RX here - if there is only a MOSI
- * pin connected the host can still do RX by changing the
- * direction of the line.
- */
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
if (IS_ERR(spi_gpio->sck))
return PTR_ERR(spi_gpio->sck);
- for (i = 0; i < num_chipselects; i++) {
- spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
- i, GPIOD_OUT_HIGH);
- if (IS_ERR(spi_gpio->cs_gpios[i]))
- return PTR_ERR(spi_gpio->cs_gpios[i]);
- }
-
return 0;
}
@@ -340,142 +312,134 @@ static const struct of_device_id spi_gpio_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
-static int spi_gpio_probe_dt(struct platform_device *pdev)
+static int spi_gpio_probe_dt(struct platform_device *pdev,
+ struct spi_master *master)
{
- int ret;
- u32 tmp;
- struct spi_gpio_platform_data *pdata;
- struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(spi_gpio_dt_ids, &pdev->dev);
-
- if (!of_id)
- return 0;
+ master->dev.of_node = pdev->dev.of_node;
+ master->use_gpio_descriptors = true;
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ return 0;
+}
+#else
+static inline int spi_gpio_probe_dt(struct platform_device *pdev,
+ struct spi_master *master)
+{
+ return 0;
+}
+#endif
+static int spi_gpio_probe_pdata(struct platform_device *pdev,
+ struct spi_master *master)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_gpio_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_gpio *spi_gpio = spi_master_get_devdata(master);
+ int i;
- ret = of_property_read_u32(np, "num-chipselects", &tmp);
- if (ret < 0) {
- dev_err(&pdev->dev, "num-chipselects property not found\n");
- goto error_free;
- }
+#ifdef GENERIC_BITBANG
+ if (!pdata || !pdata->num_chipselect)
+ return -ENODEV;
+#endif
+ /*
+ * The master needs to think there is a chipselect even if not
+ * connected
+ */
+ master->num_chipselect = pdata->num_chipselect ?: 1;
- pdata->num_chipselect = tmp;
- pdev->dev.platform_data = pdata;
+ spi_gpio->cs_gpios = devm_kcalloc(dev, master->num_chipselect,
+ sizeof(*spi_gpio->cs_gpios),
+ GFP_KERNEL);
+ if (!spi_gpio->cs_gpios)
+ return -ENOMEM;
- return 1;
+ for (i = 0; i < master->num_chipselect; i++) {
+ spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", i,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(spi_gpio->cs_gpios[i]))
+ return PTR_ERR(spi_gpio->cs_gpios[i]);
+ }
-error_free:
- devm_kfree(&pdev->dev, pdata);
- return ret;
+ return 0;
}
-#else
-static inline int spi_gpio_probe_dt(struct platform_device *pdev)
+
+static void spi_gpio_put(void *data)
{
- return 0;
+ spi_master_put(data);
}
-#endif
static int spi_gpio_probe(struct platform_device *pdev)
{
int status;
struct spi_master *master;
struct spi_gpio *spi_gpio;
- struct spi_gpio_platform_data *pdata;
- u16 master_flags = 0;
- bool use_of = 0;
+ struct device *dev = &pdev->dev;
+ struct spi_bitbang *bb;
+ const struct of_device_id *of_id;
- status = spi_gpio_probe_dt(pdev);
- if (status < 0)
- return status;
- if (status > 0)
- use_of = 1;
-
- pdata = dev_get_platdata(&pdev->dev);
-#ifdef GENERIC_BITBANG
- if (!pdata || (!use_of && !pdata->num_chipselect))
- return -ENODEV;
-#endif
+ of_id = of_match_device(spi_gpio_dt_ids, &pdev->dev);
- master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio));
+ master = spi_alloc_master(dev, sizeof(*spi_gpio));
if (!master)
return -ENOMEM;
- spi_gpio = spi_master_get_devdata(master);
-
- spi_gpio->cs_gpios = devm_kcalloc(&pdev->dev,
- pdata->num_chipselect,
- sizeof(*spi_gpio->cs_gpios),
- GFP_KERNEL);
- if (!spi_gpio->cs_gpios)
- return -ENOMEM;
+ status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master);
+ if (status)
+ return status;
- platform_set_drvdata(pdev, spi_gpio);
+ if (of_id)
+ status = spi_gpio_probe_dt(pdev, master);
+ else
+ status = spi_gpio_probe_pdata(pdev, master);
- /* Determine if we have chip selects connected */
- spi_gpio->has_cs = !!pdata->num_chipselect;
+ if (status)
+ return status;
- spi_gpio->pdev = pdev;
- if (pdata)
- spi_gpio->pdata = *pdata;
+ spi_gpio = spi_master_get_devdata(master);
- status = spi_gpio_request(&pdev->dev, spi_gpio,
- pdata->num_chipselect, &master_flags);
+ status = spi_gpio_request(dev, spi_gpio);
if (status)
return status;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
SPI_CS_HIGH;
- master->flags = master_flags;
+ if (!spi_gpio->mosi) {
+ /* HW configuration without MOSI pin
+ *
+ * No setting SPI_MASTER_NO_RX here - if there is only
+ * a MOSI pin connected the host can still do RX by
+ * changing the direction of the line.
+ */
+ master->flags = SPI_MASTER_NO_TX;
+ }
+
master->bus_num = pdev->id;
- /* The master needs to think there is a chipselect even if not connected */
- master->num_chipselect = spi_gpio->has_cs ? pdata->num_chipselect : 1;
master->setup = spi_gpio_setup;
master->cleanup = spi_gpio_cleanup;
-#ifdef CONFIG_OF
- master->dev.of_node = pdev->dev.of_node;
-#endif
- spi_gpio->bitbang.master = master;
- spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
- spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction;
+ bb = &spi_gpio->bitbang;
+ bb->master = master;
+ bb->chipselect = spi_gpio_chipselect;
+ bb->set_line_direction = spi_gpio_set_direction;
- if ((master_flags & SPI_MASTER_NO_TX) == 0) {
- spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
- spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
- spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
- spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
+ if (master->flags & SPI_MASTER_NO_TX) {
+ bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
+ bb->txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
+ bb->txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
+ bb->txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
} else {
- spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
- spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
- spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
- spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
+ bb->txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
+ bb->txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
+ bb->txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
+ bb->txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
}
- spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
+ bb->setup_transfer = spi_bitbang_setup_transfer;
- status = spi_bitbang_start(&spi_gpio->bitbang);
+ status = spi_bitbang_init(&spi_gpio->bitbang);
if (status)
- spi_master_put(master);
-
- return status;
-}
-
-static int spi_gpio_remove(struct platform_device *pdev)
-{
- struct spi_gpio *spi_gpio;
-
- spi_gpio = platform_get_drvdata(pdev);
-
- /* stop() unregisters child devices too */
- spi_bitbang_stop(&spi_gpio->bitbang);
-
- spi_master_put(spi_gpio->bitbang.master);
+ return status;
- return 0;
+ return devm_spi_register_master(&pdev->dev, spi_master_get(master));
}
MODULE_ALIAS("platform:" DRIVER_NAME);
@@ -486,7 +450,6 @@ static struct platform_driver spi_gpio_driver = {
.of_match_table = of_match_ptr(spi_gpio_dt_ids),
},
.probe = spi_gpio_probe,
- .remove = spi_gpio_remove,
};
module_platform_driver(spi_gpio_driver);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 6ec647bbba77..09c9a1edb2c6 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -28,6 +28,10 @@
#define DRIVER_NAME "spi_imx"
+static bool use_dma = true;
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
+
#define MXC_CSPIRXDATA 0x00
#define MXC_CSPITXDATA 0x04
#define MXC_CSPICTRL 0x08
@@ -219,6 +223,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ if (!use_dma)
+ return false;
+
if (!master->dma_rx)
return false;
@@ -1494,7 +1501,7 @@ static int spi_imx_transfer(struct spi_device *spi,
/* flush rxfifo before transfer */
while (spi_imx->devtype_data->rx_available(spi_imx))
- spi_imx->rx(spi_imx);
+ readl(spi_imx->base + MXC_CSPIRXDATA);
if (spi_imx->slave_mode)
return spi_imx_pio_transfer_slave(spi, transfer);
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index a4d8d19ecff9..9f0fa9f3116d 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -135,8 +135,8 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
return -ENOTSUPP;
}
-static bool spi_mem_default_supports_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
{
if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
return false;
@@ -622,7 +622,7 @@ void devm_spi_mem_dirmap_destroy(struct device *dev,
EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
/**
- * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping
+ * spi_mem_dirmap_read() - Read data through a direct mapping
* @desc: direct mapping descriptor
* @offs: offset to start reading from. Note that this is not an absolute
* offset, but the offset within the direct mapping which already has
@@ -668,7 +668,7 @@ ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
/**
- * spi_mem_dirmap_dirmap_write() - Write data through a direct mapping
+ * spi_mem_dirmap_write() - Write data through a direct mapping
* @desc: direct mapping descriptor
* @offs: offset to start writing from. Note that this is not an absolute
* offset, but the offset within the direct mapping which already has
diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index b509f9fe3346..ff85982464d2 100644
--- a/drivers/staging/mt7621-spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -1,15 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * spi-mt7621.c -- MediaTek MT7621 SPI controller driver
- *
- * Copyright (C) 2011 Sergiy <piratfm@gmail.com>
- * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
- *
- * Some parts are based on spi-orion.c:
- * Author: Shadi Ammouri <shadi@marvell.com>
- * Copyright (C) 2007-2008 Marvell Ltd.
- */
+//
+// spi-mt7621.c -- MediaTek MT7621 SPI controller driver
+//
+// Copyright (C) 2011 Sergiy <piratfm@gmail.com>
+// Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+// Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
+//
+// Some parts are based on spi-orion.c:
+// Author: Shadi Ammouri <shadi@marvell.com>
+// Copyright (C) 2007-2008 Marvell Ltd.
#include <linux/clk.h>
#include <linux/delay.h>
@@ -52,19 +51,17 @@
#define MT7621_LSB_FIRST BIT(3)
struct mt7621_spi {
- struct spi_master *master;
+ struct spi_controller *master;
void __iomem *base;
unsigned int sys_freq;
unsigned int speed;
struct clk *clk;
int pending_write;
-
- struct mt7621_spi_ops *ops;
};
static inline struct mt7621_spi *spidev_to_mt7621_spi(struct spi_device *spi)
{
- return spi_master_get_devdata(spi->master);
+ return spi_controller_get_devdata(spi->master);
}
static inline u32 mt7621_spi_read(struct mt7621_spi *rs, u32 reg)
@@ -77,29 +74,25 @@ static inline void mt7621_spi_write(struct mt7621_spi *rs, u32 reg, u32 val)
iowrite32(val, rs->base + reg);
}
-static void mt7621_spi_reset(struct mt7621_spi *rs)
+static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
{
- u32 master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
+ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
+ int cs = spi->chip_select;
+ u32 polar = 0;
+ u32 master;
/*
* Select SPI device 7, enable "more buffer mode" and disable
* full-duplex (only half-duplex really works on this chip
* reliably)
*/
+ master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
master |= MASTER_RS_SLAVE_SEL | MASTER_MORE_BUFMODE;
master &= ~MASTER_FULL_DUPLEX;
-
mt7621_spi_write(rs, MT7621_SPI_MASTER, master);
- rs->pending_write = 0;
-}
-static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
-{
- struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
- int cs = spi->chip_select;
- u32 polar = 0;
+ rs->pending_write = 0;
- mt7621_spi_reset(rs);
if (enable)
polar = BIT(cs);
mt7621_spi_write(rs, MT7621_SPI_POLAR, polar);
@@ -163,13 +156,14 @@ static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs)
static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs,
int rx_len, u8 *buf)
{
+ int tx_len;
+
/*
* Combine with any pending write, and perform one or more half-duplex
* transactions reading 'len' bytes. Data to be written is already in
* MT7621_SPI_DATA.
*/
- int tx_len = rs->pending_write;
-
+ tx_len = rs->pending_write;
rs->pending_write = 0;
while (rx_len || tx_len) {
@@ -209,8 +203,8 @@ static inline void mt7621_spi_flush(struct mt7621_spi *rs)
static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
int tx_len, const u8 *buf)
{
- int val = 0;
int len = rs->pending_write;
+ int val = 0;
if (len & 3) {
val = mt7621_spi_read(rs, MT7621_SPI_OPCODE + (len & ~3));
@@ -238,6 +232,7 @@ static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
}
tx_len -= 1;
}
+
if (len & 3) {
if (len < 4) {
val = swab32(val);
@@ -245,13 +240,14 @@ static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
}
mt7621_spi_write(rs, MT7621_SPI_OPCODE + (len & ~3), val);
}
+
rs->pending_write = len;
}
-static int mt7621_spi_transfer_one_message(struct spi_master *master,
+static int mt7621_spi_transfer_one_message(struct spi_controller *master,
struct spi_message *m)
{
- struct mt7621_spi *rs = spi_master_get_devdata(master);
+ struct mt7621_spi *rs = spi_controller_get_devdata(master);
struct spi_device *spi = m->spi;
unsigned int speed = spi->max_speed_hz;
struct spi_transfer *t = NULL;
@@ -268,11 +264,14 @@ static int mt7621_spi_transfer_one_message(struct spi_master *master,
goto msg_done;
}
+ /* Assert CS */
mt7621_spi_set_cs(spi, 1);
+
m->actual_length = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if ((t->rx_buf) && (t->tx_buf)) {
- /* This controller will shift some extra data out
+ /*
+ * This controller will shift some extra data out
* of spi_opcode if (mosi_bit_cnt > 0) &&
* (cmd_bit_cnt == 0). So the claimed full-duplex
* support is broken since we have no way to read
@@ -287,8 +286,9 @@ static int mt7621_spi_transfer_one_message(struct spi_master *master,
}
m->actual_length += t->len;
}
- mt7621_spi_flush(rs);
+ /* Flush data and deassert CS */
+ mt7621_spi_flush(rs);
mt7621_spi_set_cs(spi, 0);
msg_done:
@@ -303,8 +303,8 @@ static int mt7621_spi_setup(struct spi_device *spi)
struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
if ((spi->max_speed_hz == 0) ||
- (spi->max_speed_hz > (rs->sys_freq / 2)))
- spi->max_speed_hz = (rs->sys_freq / 2);
+ (spi->max_speed_hz > (rs->sys_freq / 2)))
+ spi->max_speed_hz = rs->sys_freq / 2;
if (spi->max_speed_hz < (rs->sys_freq / 4097)) {
dev_err(&spi->dev, "setup: requested speed is too low %d Hz\n",
@@ -324,19 +324,17 @@ MODULE_DEVICE_TABLE(of, mt7621_spi_match);
static int mt7621_spi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- struct spi_master *master;
+ struct spi_controller *master;
struct mt7621_spi *rs;
void __iomem *base;
struct resource *r;
int status = 0;
struct clk *clk;
- struct mt7621_spi_ops *ops;
int ret;
match = of_match_device(mt7621_spi_match, &pdev->dev);
if (!match)
return -EINVAL;
- ops = (struct mt7621_spi_ops *)match->data;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, r);
@@ -361,7 +359,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
}
master->mode_bits = SPI_LSB_FIRST;
-
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->setup = mt7621_spi_setup;
master->transfer_one_message = mt7621_spi_transfer_one_message;
master->bits_per_word_mask = SPI_BPW_MASK(8);
@@ -370,12 +368,11 @@ static int mt7621_spi_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, master);
- rs = spi_master_get_devdata(master);
+ rs = spi_controller_get_devdata(master);
rs->base = base;
rs->clk = clk;
rs->master = master;
rs->sys_freq = clk_get_rate(rs->clk);
- rs->ops = ops;
rs->pending_write = 0;
dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
@@ -385,21 +382,18 @@ static int mt7621_spi_probe(struct platform_device *pdev)
return ret;
}
- mt7621_spi_reset(rs);
-
- return spi_register_master(master);
+ return devm_spi_register_controller(&pdev->dev, master);
}
static int mt7621_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *master;
struct mt7621_spi *rs;
master = dev_get_drvdata(&pdev->dev);
- rs = spi_master_get_devdata(master);
+ rs = spi_controller_get_devdata(master);
- clk_disable(rs->clk);
- spi_unregister_master(master);
+ clk_disable_unprepare(rs->clk);
return 0;
}
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index e41ae6ef0f8a..f48563c09b97 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -492,8 +492,7 @@ static int mxic_spi_transfer_one(struct spi_master *master,
static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct mxic_spi *mxic = spi_master_get_devdata(master);
mxic_spi_clk_disable(mxic);
@@ -504,8 +503,7 @@ static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
static int __maybe_unused mxic_spi_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct mxic_spi *mxic = spi_master_get_devdata(master);
int ret;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 7f280567093e..25ea4a9e0dbc 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -470,6 +470,8 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0)
goto out;
count--;
+ if (xfer->word_delay_usecs)
+ udelay(xfer->word_delay_usecs);
} while (count);
} else if (word_len == 16) {
const u16 *tx = xfer->tx_buf;
@@ -479,6 +481,8 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0)
goto out;
count -= 2;
+ if (xfer->word_delay_usecs)
+ udelay(xfer->word_delay_usecs);
} while (count);
}
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index 131849adc570..d9f374c8b709 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -559,7 +559,7 @@ static int pic32_spi_one_transfer(struct spi_master *master,
dev_err(&spi->dev, "wait error/timedout\n");
if (dma_issued) {
dmaengine_terminate_all(master->dma_rx);
- dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_all(master->dma_tx);
}
ret = -ETIMEDOUT;
} else {
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 15592598273e..e5c26c1779ab 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -239,13 +239,15 @@ int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
u32 *threshold)
{
struct pxa2xx_spi_chip *chip_info = spi->controller_data;
+ struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
+ u32 dma_burst_size = drv_data->controller_info->dma_burst_size;
/*
* If the DMA burst size is given in chip_info we use that,
* otherwise we use the default. Also we use the default FIFO
* thresholds for now.
*/
- *burst_code = chip_info ? chip_info->dma_burst_size : 1;
+ *burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size;
*threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
| SSCR1_TxTresh(TX_THRESH_DFLT);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 1727fdfbac28..d456c5251b5d 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -5,7 +5,6 @@
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -35,6 +34,8 @@ struct pxa_spi_info {
void *tx_param;
void *rx_param;
+ int dma_burst_size;
+
int (*setup)(struct pci_dev *pdev, struct pxa_spi_info *c);
};
@@ -133,6 +134,7 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
rx->dma_dev = &dma_dev->dev;
c->dma_filter = lpss_dma_filter;
+ c->dma_burst_size = 8;
return 0;
}
@@ -223,6 +225,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
spi_pdata.tx_param = c->tx_param;
spi_pdata.rx_param = c->rx_param;
spi_pdata.enable_dma = c->rx_param && c->tx_param;
+ spi_pdata.dma_burst_size = c->dma_burst_size ? c->dma_burst_size : 1;
ssp = &spi_pdata.ssp;
ssp->phys_base = pci_resource_start(dev, 0);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index b6ddba833d02..298a0bec29d1 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -884,10 +884,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
rate = min_t(int, ssp_clk, rate);
+ /*
+ * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
+ * that the SSP transmission rate can be greater than the device rate
+ */
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
- return (ssp_clk / (2 * rate) - 1) & 0xff;
+ return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
else
- return (ssp_clk / rate - 1) & 0xfff;
+ return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff;
}
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
@@ -925,7 +929,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
struct spi_message *message = controller->cur_msg;
- struct chip_data *chip = spi_get_ctldata(message->spi);
+ struct chip_data *chip = spi_get_ctldata(spi);
u32 dma_thresh = chip->dma_threshold;
u32 dma_burst = chip->dma_burst_size;
u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
@@ -943,21 +947,21 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
- dev_err(&drv_data->pdev->dev,
+ dev_err(&spi->dev,
"Mapped transfer length of %u is greater than %d\n",
transfer->len, MAX_DMA_LEN);
return -EINVAL;
}
/* warn ... we force this to PIO mode */
- dev_warn_ratelimited(&message->spi->dev,
+ dev_warn_ratelimited(&spi->dev,
"DMA disabled for transfer length %ld greater than %d\n",
(long)transfer->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
if (pxa2xx_spi_flush(drv_data) == 0) {
- dev_err(&drv_data->pdev->dev, "Flush failed\n");
+ dev_err(&spi->dev, "Flush failed\n");
return -EIO;
}
drv_data->n_bytes = chip->n_bytes;
@@ -999,15 +1003,15 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
*/
if (chip->enable_dma) {
if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
- message->spi,
+ spi,
bits, &dma_burst,
&dma_thresh))
- dev_warn_ratelimited(&message->spi->dev,
+ dev_warn_ratelimited(&spi->dev,
"DMA burst size reduced to match bits_per_word\n");
}
dma_mapped = controller->can_dma &&
- controller->can_dma(controller, message->spi, transfer) &&
+ controller->can_dma(controller, spi, transfer) &&
controller->cur_msg_mapped;
if (dma_mapped) {
@@ -1035,12 +1039,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
if (!pxa25x_ssp_comp(drv_data))
- dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+ dev_dbg(&spi->dev, "%u Hz actual, %s\n",
controller->max_speed_hz
/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
else
- dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+ dev_dbg(&spi->dev, "%u Hz actual, %s\n",
controller->max_speed_hz / 2
/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
@@ -1333,6 +1337,9 @@ static int setup(struct spi_device *spi)
dev_warn(&spi->dev,
"in setup: DMA burst size reduced to match bits_per_word\n");
}
+ dev_dbg(&spi->dev,
+ "in setup: DMA burst size set to %u\n",
+ chip->dma_burst_size);
}
switch (drv_data->ssp_type) {
@@ -1451,6 +1458,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0xa32a), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0xa32b), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0xa37b), LPSS_CNL_SSP },
+ /* CML-LP */
+ { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
{ },
};
@@ -1487,12 +1498,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
- struct device *dev = param;
-
- if (dev != chan->device->dev->parent)
- return false;
-
- return true;
+ return param == chan->device->dev;
}
#endif /* CONFIG_PCI */
@@ -1564,6 +1570,7 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
pdata->is_slave = of_property_read_bool(pdev->dev.of_node, "spi-slave");
pdata->num_chipselect = 1;
pdata->enable_dma = true;
+ pdata->dma_burst_size = 1;
return pdata;
}
@@ -1692,7 +1699,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (platform_info->enable_dma) {
status = pxa2xx_spi_dma_setup(drv_data);
if (status) {
- dev_dbg(dev, "no DMA channels available, using PIO\n");
+ dev_warn(dev, "no DMA channels available, using PIO\n");
platform_info->enable_dma = false;
} else {
controller->can_dma = pxa2xx_spi_can_dma;
@@ -1953,3 +1960,5 @@ static void __exit pxa2xx_spi_exit(void)
platform_driver_unregister(&driver);
}
module_exit(pxa2xx_spi_exit);
+
+MODULE_SOFTDEP("pre: dw_dmac");
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 3912526ead66..cdb613d38062 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 556870dcdf79..15f5723d9f95 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -271,7 +271,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Sets parity, interrupt mask */
rspi_write8(rspi, 0x00, RSPI_SPCR2);
- /* Sets SPCMD */
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
@@ -315,7 +316,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
rspi_write8(rspi, 0x00, RSPI_SSLND);
rspi_write8(rspi, 0x00, RSPI_SPND);
- /* Sets SPCMD */
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
@@ -366,7 +368,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Sets buffer to allow normal operation */
rspi_write8(rspi, 0x00, QSPI_SPBFCR);
- /* Sets SPCMD */
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Sets RSPI mode */
@@ -736,27 +739,22 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
while (len > 0) {
n = qspi_set_send_trigger(rspi, len);
qspi_set_receive_trigger(rspi, len);
- if (n == QSPI_BUFFER_SIZE) {
- ret = rspi_wait_for_tx_empty(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "transmit timeout\n");
- return ret;
- }
- for (i = 0; i < n; i++)
- rspi_write_data(rspi, *tx++);
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
+ return ret;
+ }
+ for (i = 0; i < n; i++)
+ rspi_write_data(rspi, *tx++);
- ret = rspi_wait_for_rx_full(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "receive timeout\n");
- return ret;
- }
- for (i = 0; i < n; i++)
- *rx++ = rspi_read_data(rspi);
- } else {
- ret = rspi_pio_transfer(rspi, tx, rx, n);
- if (ret < 0)
- return ret;
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
+ return ret;
}
+ for (i = 0; i < n; i++)
+ *rx++ = rspi_read_data(rspi);
+
len -= n;
}
@@ -793,19 +791,14 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
while (n > 0) {
len = qspi_set_send_trigger(rspi, n);
- if (len == QSPI_BUFFER_SIZE) {
- ret = rspi_wait_for_tx_empty(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "transmit timeout\n");
- return ret;
- }
- for (i = 0; i < len; i++)
- rspi_write_data(rspi, *tx++);
- } else {
- ret = rspi_pio_transfer(rspi, tx, NULL, len);
- if (ret < 0)
- return ret;
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
+ return ret;
}
+ for (i = 0; i < len; i++)
+ rspi_write_data(rspi, *tx++);
+
n -= len;
}
@@ -830,19 +823,14 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
while (n > 0) {
len = qspi_set_receive_trigger(rspi, n);
- if (len == QSPI_BUFFER_SIZE) {
- ret = rspi_wait_for_rx_full(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "receive timeout\n");
- return ret;
- }
- for (i = 0; i < len; i++)
- *rx++ = rspi_read_data(rspi);
- } else {
- ret = rspi_pio_transfer(rspi, NULL, rx, len);
- if (ret < 0)
- return ret;
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
+ return ret;
}
+ for (i = 0; i < len; i++)
+ *rx++ = rspi_read_data(rspi);
+
n -= len;
}
@@ -868,28 +856,6 @@ static int qspi_transfer_one(struct spi_controller *ctlr,
}
}
-static int rspi_setup(struct spi_device *spi)
-{
- struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
-
- rspi->max_speed_hz = spi->max_speed_hz;
-
- rspi->spcmd = SPCMD_SSLKP;
- if (spi->mode & SPI_CPOL)
- rspi->spcmd |= SPCMD_CPOL;
- if (spi->mode & SPI_CPHA)
- rspi->spcmd |= SPCMD_CPHA;
-
- /* CMOS output mode and MOSI signal from previous transfer */
- rspi->sppcr = 0;
- if (spi->mode & SPI_LOOP)
- rspi->sppcr |= SPPCR_SPLP;
-
- set_config_register(rspi, 8);
-
- return 0;
-}
-
static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
{
if (xfer->tx_buf)
@@ -959,8 +925,24 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = msg->spi;
int ret;
+ rspi->max_speed_hz = spi->max_speed_hz;
+
+ rspi->spcmd = SPCMD_SSLKP;
+ if (spi->mode & SPI_CPOL)
+ rspi->spcmd |= SPCMD_CPOL;
+ if (spi->mode & SPI_CPHA)
+ rspi->spcmd |= SPCMD_CPHA;
+
+ /* CMOS output mode and MOSI signal from previous transfer */
+ rspi->sppcr = 0;
+ if (spi->mode & SPI_LOOP)
+ rspi->sppcr |= SPPCR_SPLP;
+
+ set_config_register(rspi, 8);
+
if (msg->spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
/* Setup sequencer for messages with multiple transfer modes */
@@ -1267,7 +1249,6 @@ static int rspi_probe(struct platform_device *pdev)
init_waitqueue_head(&rspi->wait);
ctlr->bus_num = pdev->id;
- ctlr->setup = rspi_setup;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = ops->transfer_one;
ctlr->prepare_message = rspi_prepare_message;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index e2eb466db10a..6aab7b2136db 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -18,6 +18,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -82,111 +83,113 @@ struct sh_msiof_spi_priv {
#define RFDR 0x60 /* Receive FIFO Data Register */
/* TMDR1 and RMDR1 */
-#define MDR1_TRMD 0x80000000 /* Transfer Mode (1 = Master mode) */
-#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
-#define MDR1_SYNCMD_SPI 0x20000000 /* Level mode/SPI */
-#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
-#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
-#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
-#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
-#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
-#define MDR1_FLD_SHIFT 2
-#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
+#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
+#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */
+#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */
+#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
+#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_SHIFT 2
+#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
-#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
-#define TMDR1_SYNCCH_MASK 0xc000000 /* Synchronization Signal Channel Select */
-#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
+#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
/* TMDR2 and RMDR2 */
#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
+#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
/* TSCR and RSCR */
-#define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
+#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
#define SCR_BRPS(i) (((i) - 1) << 8)
-#define SCR_BRDV_MASK 0x0007 /* Baud Rate Generator's Division Ratio */
-#define SCR_BRDV_DIV_2 0x0000
-#define SCR_BRDV_DIV_4 0x0001
-#define SCR_BRDV_DIV_8 0x0002
-#define SCR_BRDV_DIV_16 0x0003
-#define SCR_BRDV_DIV_32 0x0004
-#define SCR_BRDV_DIV_1 0x0007
+#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+#define SCR_BRDV_DIV_2 0
+#define SCR_BRDV_DIV_4 1
+#define SCR_BRDV_DIV_8 2
+#define SCR_BRDV_DIV_16 3
+#define SCR_BRDV_DIV_32 4
+#define SCR_BRDV_DIV_1 7
/* CTR */
-#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
-#define CTR_TSCKIZ_SCK 0x80000000 /* Disable SCK when TX disabled */
-#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
-#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
-#define CTR_RSCKIZ_SCK 0x20000000 /* Must match CTR_TSCKIZ_SCK */
-#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
-#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
-#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
-#define CTR_TXDIZ_MASK 0x00c00000 /* Pin Output When TX is Disabled */
-#define CTR_TXDIZ_LOW 0x00000000 /* 0 */
-#define CTR_TXDIZ_HIGH 0x00400000 /* 1 */
-#define CTR_TXDIZ_HIZ 0x00800000 /* High-impedance */
-#define CTR_TSCKE 0x00008000 /* Transmit Serial Clock Output Enable */
-#define CTR_TFSE 0x00004000 /* Transmit Frame Sync Signal Output Enable */
-#define CTR_TXE 0x00000200 /* Transmit Enable */
-#define CTR_RXE 0x00000100 /* Receive Enable */
+#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define CTR_TXDIZ_LOW (0 << 22) /* 0 */
+#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */
+#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
+#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define CTR_TXE BIT(9) /* Transmit Enable */
+#define CTR_RXE BIT(8) /* Receive Enable */
+#define CTR_TXRST BIT(1) /* Transmit Reset */
+#define CTR_RXRST BIT(0) /* Receive Reset */
/* FCTR */
-#define FCTR_TFWM_MASK 0xe0000000 /* Transmit FIFO Watermark */
-#define FCTR_TFWM_64 0x00000000 /* Transfer Request when 64 empty stages */
-#define FCTR_TFWM_32 0x20000000 /* Transfer Request when 32 empty stages */
-#define FCTR_TFWM_24 0x40000000 /* Transfer Request when 24 empty stages */
-#define FCTR_TFWM_16 0x60000000 /* Transfer Request when 16 empty stages */
-#define FCTR_TFWM_12 0x80000000 /* Transfer Request when 12 empty stages */
-#define FCTR_TFWM_8 0xa0000000 /* Transfer Request when 8 empty stages */
-#define FCTR_TFWM_4 0xc0000000 /* Transfer Request when 4 empty stages */
-#define FCTR_TFWM_1 0xe0000000 /* Transfer Request when 1 empty stage */
-#define FCTR_TFUA_MASK 0x07f00000 /* Transmit FIFO Usable Area */
-#define FCTR_TFUA_SHIFT 20
+#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
+#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
+#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
+#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
+#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
+#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
+#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
+#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
+#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
+#define FCTR_TFUA_SHIFT 20
#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
-#define FCTR_RFWM_MASK 0x0000e000 /* Receive FIFO Watermark */
-#define FCTR_RFWM_1 0x00000000 /* Transfer Request when 1 valid stages */
-#define FCTR_RFWM_4 0x00002000 /* Transfer Request when 4 valid stages */
-#define FCTR_RFWM_8 0x00004000 /* Transfer Request when 8 valid stages */
-#define FCTR_RFWM_16 0x00006000 /* Transfer Request when 16 valid stages */
-#define FCTR_RFWM_32 0x00008000 /* Transfer Request when 32 valid stages */
-#define FCTR_RFWM_64 0x0000a000 /* Transfer Request when 64 valid stages */
-#define FCTR_RFWM_128 0x0000c000 /* Transfer Request when 128 valid stages */
-#define FCTR_RFWM_256 0x0000e000 /* Transfer Request when 256 valid stages */
-#define FCTR_RFUA_MASK 0x00001ff0 /* Receive FIFO Usable Area (0x40 = full) */
-#define FCTR_RFUA_SHIFT 4
+#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
+#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
+#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
+#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
+#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
+#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
+#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
+#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
+#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
+#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+#define FCTR_RFUA_SHIFT 4
#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
/* STR */
-#define STR_TFEMP 0x20000000 /* Transmit FIFO Empty */
-#define STR_TDREQ 0x10000000 /* Transmit Data Transfer Request */
-#define STR_TEOF 0x00800000 /* Frame Transmission End */
-#define STR_TFSERR 0x00200000 /* Transmit Frame Synchronization Error */
-#define STR_TFOVF 0x00100000 /* Transmit FIFO Overflow */
-#define STR_TFUDF 0x00080000 /* Transmit FIFO Underflow */
-#define STR_RFFUL 0x00002000 /* Receive FIFO Full */
-#define STR_RDREQ 0x00001000 /* Receive Data Transfer Request */
-#define STR_REOF 0x00000080 /* Frame Reception End */
-#define STR_RFSERR 0x00000020 /* Receive Frame Synchronization Error */
-#define STR_RFUDF 0x00000010 /* Receive FIFO Underflow */
-#define STR_RFOVF 0x00000008 /* Receive FIFO Overflow */
+#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define STR_TEOF BIT(23) /* Frame Transmission End */
+#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define STR_RFFUL BIT(13) /* Receive FIFO Full */
+#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define STR_REOF BIT(7) /* Frame Reception End */
+#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */
/* IER */
-#define IER_TDMAE 0x80000000 /* Transmit Data DMA Transfer Req. Enable */
-#define IER_TFEMPE 0x20000000 /* Transmit FIFO Empty Enable */
-#define IER_TDREQE 0x10000000 /* Transmit Data Transfer Request Enable */
-#define IER_TEOFE 0x00800000 /* Frame Transmission End Enable */
-#define IER_TFSERRE 0x00200000 /* Transmit Frame Sync Error Enable */
-#define IER_TFOVFE 0x00100000 /* Transmit FIFO Overflow Enable */
-#define IER_TFUDFE 0x00080000 /* Transmit FIFO Underflow Enable */
-#define IER_RDMAE 0x00008000 /* Receive Data DMA Transfer Req. Enable */
-#define IER_RFFULE 0x00002000 /* Receive FIFO Full Enable */
-#define IER_RDREQE 0x00001000 /* Receive Data Transfer Request Enable */
-#define IER_REOFE 0x00000080 /* Frame Reception End Enable */
-#define IER_RFSERRE 0x00000020 /* Receive Frame Sync Error Enable */
-#define IER_RFUDFE 0x00000010 /* Receive FIFO Underflow Enable */
-#define IER_RFOVFE 0x00000008 /* Receive FIFO Overflow Enable */
+#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define IER_REOFE BIT(7) /* Frame Reception End Enable */
+#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
@@ -219,21 +222,14 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
{
u32 mask = clr | set;
u32 data;
- int k;
data = sh_msiof_read(p, CTR);
data &= ~clr;
data |= set;
sh_msiof_write(p, CTR, data);
- for (k = 100; k > 0; k--) {
- if ((sh_msiof_read(p, CTR) & mask) == set)
- break;
-
- udelay(10);
- }
-
- return k > 0 ? 0 : -ETIMEDOUT;
+ return readl_poll_timeout_atomic(p->mapbase + CTR, data,
+ (data & mask) == set, 10, 1000);
}
static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
@@ -247,6 +243,19 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
+{
+ u32 mask = CTR_TXRST | CTR_RXRST;
+ u32 data;
+
+ data = sh_msiof_read(p, CTR);
+ data |= mask;
+ sh_msiof_write(p, CTR, data);
+
+ readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
+ 100);
+}
+
static const u32 sh_msiof_spi_div_array[] = {
SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
@@ -540,25 +549,11 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
static int sh_msiof_spi_setup(struct spi_device *spi)
{
- struct device_node *np = spi->controller->dev.of_node;
struct sh_msiof_spi_priv *p =
spi_controller_get_devdata(spi->controller);
u32 clr, set, tmp;
- if (!np) {
- /*
- * Use spi->controller_data for CS (same strategy as spi_gpio),
- * if any. otherwise let HW control CS
- */
- spi->cs_gpio = (uintptr_t)spi->controller_data;
- }
-
- if (gpio_is_valid(spi->cs_gpio)) {
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- return 0;
- }
-
- if (spi_controller_is_slave(p->ctlr))
+ if (spi->cs_gpiod || spi_controller_is_slave(p->ctlr))
return 0;
if (p->native_cs_inited &&
@@ -591,7 +586,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
u32 ss, cs_high;
/* Configure pins before asserting CS */
- if (gpio_is_valid(spi->cs_gpio)) {
+ if (spi->cs_gpiod) {
ss = p->unused_ss;
cs_high = p->native_cs_high;
} else {
@@ -926,6 +921,9 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
bool swab;
int ret;
+ /* reset registers */
+ sh_msiof_spi_reset_regs(p);
+
/* setup clocks (clock already enabled in chipselect()) */
if (!spi_controller_is_slave(p->ctlr))
sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
@@ -1144,6 +1142,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
if (!IS_ERR(gpiod)) {
+ devm_gpiod_put(dev, gpiod);
cs_gpios++;
continue;
}
@@ -1395,6 +1394,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
+ ctlr->use_gpio_descriptors = true;
ret = sh_msiof_request_dma(p);
if (ret < 0)
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 3b2a9a6b990d..42f8e3c6aa1f 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -5,6 +5,8 @@
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -13,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sizes.h>
@@ -76,7 +79,6 @@
#define QSPI_PSMAR 0x28
#define QSPI_PIR 0x2c
#define QSPI_LPTR 0x30
-#define LPTR_DFT_TIMEOUT 0x10
#define STM32_QSPI_MAX_MMAP_SZ SZ_256M
#define STM32_QSPI_MAX_NORCHIP 2
@@ -84,6 +86,7 @@
#define STM32_FIFO_TIMEOUT_US 30000
#define STM32_BUSY_TIMEOUT_US 100000
#define STM32_ABT_TIMEOUT_US 100000
+#define STM32_COMP_TIMEOUT_MS 1000
struct stm32_qspi_flash {
struct stm32_qspi *qspi;
@@ -93,6 +96,8 @@ struct stm32_qspi_flash {
struct stm32_qspi {
struct device *dev;
+ struct spi_controller *ctrl;
+ phys_addr_t phys_base;
void __iomem *io_base;
void __iomem *mm_base;
resource_size_t mm_size;
@@ -102,6 +107,13 @@ struct stm32_qspi {
struct completion data_completion;
u32 fmode;
+ struct dma_chan *dma_chtx;
+ struct dma_chan *dma_chrx;
+ struct completion dma_completion;
+
+ u32 cr_reg;
+ u32 dcr_reg;
+
/*
* to protect device configuration, could be different between
* 2 flash access (bk1, bk2)
@@ -177,6 +189,81 @@ static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
return 0;
}
+static void stm32_qspi_dma_callback(void *arg)
+{
+ struct completion *dma_completion = arg;
+
+ complete(dma_completion);
+}
+
+static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction dma_dir;
+ struct dma_chan *dma_ch;
+ struct sg_table sgt;
+ dma_cookie_t cookie;
+ u32 cr, t_out;
+ int err;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_dir = DMA_DEV_TO_MEM;
+ dma_ch = qspi->dma_chrx;
+ } else {
+ dma_dir = DMA_MEM_TO_DEV;
+ dma_ch = qspi->dma_chtx;
+ }
+
+ /*
+ * spi_map_buf return -EINVAL if the buffer is not DMA-able
+ * (DMA-able: in vmalloc | kmap | virt_addr_valid)
+ */
+ err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt);
+ if (err)
+ return err;
+
+ desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
+ dma_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+
+ reinit_completion(&qspi->dma_completion);
+ desc->callback = stm32_qspi_dma_callback;
+ desc->callback_param = &qspi->dma_completion;
+ cookie = dmaengine_submit(desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ goto out;
+
+ dma_async_issue_pending(dma_ch);
+
+ writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
+
+ t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
+ if (!wait_for_completion_interruptible_timeout(&qspi->dma_completion,
+ msecs_to_jiffies(t_out)))
+ err = -ETIMEDOUT;
+
+ if (dma_async_is_tx_complete(dma_ch, cookie,
+ NULL, NULL) != DMA_COMPLETE)
+ err = -ETIMEDOUT;
+
+ if (err)
+ dmaengine_terminate_all(dma_ch);
+
+out:
+ writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR);
+out_unmap:
+ spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
+
+ return err;
+}
+
static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
{
if (!op->data.nbytes)
@@ -184,6 +271,10 @@ static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
if (qspi->fmode == CCR_FMODE_MM)
return stm32_qspi_tx_mm(qspi, op);
+ else if ((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) ||
+ (op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx))
+ if (!stm32_qspi_tx_dma(qspi, op))
+ return 0;
return stm32_qspi_tx_poll(qspi, op);
}
@@ -214,7 +305,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
if (!wait_for_completion_interruptible_timeout(&qspi->data_completion,
- msecs_to_jiffies(1000))) {
+ msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
err = -ETIMEDOUT;
} else {
sr = readl_relaxed(qspi->io_base + QSPI_SR);
@@ -356,7 +447,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
struct spi_controller *ctrl = spi->master;
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
- u32 cr, presc;
+ u32 presc;
if (ctrl->busy)
return -EBUSY;
@@ -372,17 +463,60 @@ static int stm32_qspi_setup(struct spi_device *spi)
flash->presc = presc;
mutex_lock(&qspi->lock);
- writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QSPI_LPTR);
- cr = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_TCEN | CR_SSHIFT | CR_EN;
- writel_relaxed(cr, qspi->io_base + QSPI_CR);
+ qspi->cr_reg = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_SSHIFT | CR_EN;
+ writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
- writel_relaxed(DCR_FSIZE_MASK, qspi->io_base + QSPI_DCR);
+ qspi->dcr_reg = DCR_FSIZE_MASK;
+ writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
mutex_unlock(&qspi->lock);
return 0;
}
+static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
+{
+ struct dma_slave_config dma_cfg;
+ struct device *dev = qspi->dev;
+
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.src_addr = qspi->phys_base + QSPI_DR;
+ dma_cfg.dst_addr = qspi->phys_base + QSPI_DR;
+ dma_cfg.src_maxburst = 4;
+ dma_cfg.dst_maxburst = 4;
+
+ qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
+ if (qspi->dma_chrx) {
+ if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
+ dev_err(dev, "dma rx config failed\n");
+ dma_release_channel(qspi->dma_chrx);
+ qspi->dma_chrx = NULL;
+ }
+ }
+
+ qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
+ if (qspi->dma_chtx) {
+ if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
+ dev_err(dev, "dma tx config failed\n");
+ dma_release_channel(qspi->dma_chtx);
+ qspi->dma_chtx = NULL;
+ }
+ }
+
+ init_completion(&qspi->dma_completion);
+}
+
+static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
+{
+ if (qspi->dma_chtx)
+ dma_release_channel(qspi->dma_chtx);
+ if (qspi->dma_chrx)
+ dma_release_channel(qspi->dma_chrx);
+}
+
/*
* no special host constraint, so use default spi_mem_default_supports_op
* to check supported mode.
@@ -395,8 +529,10 @@ static void stm32_qspi_release(struct stm32_qspi *qspi)
{
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
+ stm32_qspi_dma_free(qspi);
mutex_destroy(&qspi->lock);
clk_disable_unprepare(qspi->clk);
+ spi_master_put(qspi->ctrl);
}
static int stm32_qspi_probe(struct platform_device *pdev)
@@ -413,43 +549,62 @@ static int stm32_qspi_probe(struct platform_device *pdev)
return -ENOMEM;
qspi = spi_controller_get_devdata(ctrl);
+ qspi->ctrl = ctrl;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
qspi->io_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->io_base))
- return PTR_ERR(qspi->io_base);
+ if (IS_ERR(qspi->io_base)) {
+ ret = PTR_ERR(qspi->io_base);
+ goto err;
+ }
+
+ qspi->phys_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
qspi->mm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->mm_base))
- return PTR_ERR(qspi->mm_base);
+ if (IS_ERR(qspi->mm_base)) {
+ ret = PTR_ERR(qspi->mm_base);
+ goto err;
+ }
qspi->mm_size = resource_size(res);
- if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
- return -EINVAL;
+ if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
+ ret = -EINVAL;
+ goto err;
+ }
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(dev, "IRQ error missing or invalid\n");
+ return irq;
+ }
+
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
- return ret;
+ goto err;
}
init_completion(&qspi->data_completion);
qspi->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(qspi->clk))
- return PTR_ERR(qspi->clk);
+ if (IS_ERR(qspi->clk)) {
+ ret = PTR_ERR(qspi->clk);
+ goto err;
+ }
qspi->clk_rate = clk_get_rate(qspi->clk);
- if (!qspi->clk_rate)
- return -EINVAL;
+ if (!qspi->clk_rate) {
+ ret = -EINVAL;
+ goto err;
+ }
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
- return ret;
+ goto err;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
@@ -461,6 +616,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->dev = dev;
platform_set_drvdata(pdev, qspi);
+ stm32_qspi_dma_setup(qspi);
mutex_init(&qspi->lock);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
@@ -472,14 +628,11 @@ static int stm32_qspi_probe(struct platform_device *pdev)
ctrl->dev.of_node = dev->of_node;
ret = devm_spi_register_master(dev, ctrl);
- if (ret)
- goto err_spi_register;
-
- return 0;
+ if (!ret)
+ return 0;
-err_spi_register:
+err:
stm32_qspi_release(qspi);
-
return ret;
}
@@ -491,6 +644,31 @@ static int stm32_qspi_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused stm32_qspi_suspend(struct device *dev)
+{
+ struct stm32_qspi *qspi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(qspi->clk);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_qspi_resume(struct device *dev)
+{
+ struct stm32_qspi *qspi = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+ clk_prepare_enable(qspi->clk);
+
+ writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
+ writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_qspi_pm_ops, stm32_qspi_suspend, stm32_qspi_resume);
+
static const struct of_device_id stm32_qspi_match[] = {
{.compatible = "st,stm32f469-qspi"},
{}
@@ -503,6 +681,7 @@ static struct platform_driver stm32_qspi_driver = {
.driver = {
.name = "stm32-qspi",
.of_match_table = stm32_qspi_match,
+ .pm = &stm32_qspi_pm_ops,
},
};
module_platform_driver(stm32_qspi_driver);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 4186ed20d796..b222ce8d083e 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1839,8 +1839,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
spi->irq = platform_get_irq(pdev, 0);
if (spi->irq <= 0) {
- dev_err(&pdev->dev, "no irq: %d\n", spi->irq);
- ret = -ENOENT;
+ ret = spi->irq;
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
goto err_master_put;
}
ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index a76acedd7e2f..b1f31bb16659 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -149,6 +149,8 @@
#define SPI_TX_FIFO 0x108
#define SPI_RX_FIFO 0x188
+#define SPI_INTR_MASK 0x18c
+#define SPI_INTR_ALL_MASK (0x1fUL << 25)
#define MAX_CHIP_SELECT 4
#define SPI_FIFO_DEPTH 64
#define DATA_DIR_TX (1 << 0)
@@ -161,6 +163,10 @@
#define MAX_HOLD_CYCLES 16
#define SPI_DEFAULT_SPEED 25000000
+struct tegra_spi_soc_data {
+ bool has_intr_mask_reg;
+};
+
struct tegra_spi_data {
struct device *dev;
struct spi_master *master;
@@ -211,6 +217,7 @@ struct tegra_spi_data {
u32 *tx_dma_buf;
dma_addr_t tx_dma_phys;
struct dma_async_tx_descriptor *tx_dma_desc;
+ const struct tegra_spi_soc_data *soc_data;
};
static int tegra_spi_runtime_suspend(struct device *dev);
@@ -259,7 +266,8 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
- if (bits_per_word == 8 || bits_per_word == 16) {
+ if ((bits_per_word == 8 || bits_per_word == 16 ||
+ bits_per_word == 32) && t->len > 3) {
tspi->is_packed = 1;
tspi->words_per_32bit = 32/bits_per_word;
} else {
@@ -307,10 +315,16 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
+
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
} else {
+ unsigned int write_bytes;
max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
+ if (nbytes > t->len - tspi->cur_pos)
+ nbytes = t->len - tspi->cur_pos;
+ write_bytes = nbytes;
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
@@ -319,8 +333,10 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
+
+ tspi->cur_tx_pos += write_bytes;
}
- tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+
return written_words;
}
@@ -344,20 +360,27 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
- tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
read_words += tspi->curr_dma_words;
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ u8 bytes_per_word = tspi->bytes_per_word;
+ unsigned int read_bytes;
+ len = rx_full_count * bytes_per_word;
+ if (len > t->len - tspi->cur_pos)
+ len = t->len - tspi->cur_pos;
+ read_bytes = len;
for (count = 0; count < rx_full_count; count++) {
u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
- for (i = 0; (i < tspi->bytes_per_word); i++)
+ for (i = 0; len && (i < bytes_per_word); i++, len--)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
- tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
read_words += rx_full_count;
+ tspi->cur_rx_pos += read_bytes;
}
+
return read_words;
}
@@ -372,12 +395,17 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int write_bytes;
+ if (consume > t->len - tspi->cur_pos)
+ consume = t->len - tspi->cur_pos;
+ write_bytes = consume;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = 0;
@@ -386,8 +414,9 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
+
+ tspi->cur_tx_pos += write_bytes;
}
- tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
@@ -405,20 +434,28 @@ static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int read_bytes;
+ if (consume > t->len - tspi->cur_pos)
+ consume = t->len - tspi->cur_pos;
+ read_bytes = consume;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = tspi->rx_dma_buf[count] & rx_mask;
- for (i = 0; (i < tspi->bytes_per_word); i++)
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
+
+ tspi->cur_rx_pos += read_bytes;
}
- tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
@@ -470,22 +507,39 @@ static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
return 0;
}
-static int tegra_spi_start_dma_based_transfer(
- struct tegra_spi_data *tspi, struct spi_transfer *t)
+static int tegra_spi_flush_fifos(struct tegra_spi_data *tspi)
{
- u32 val;
- unsigned int len;
- int ret = 0;
+ unsigned long timeout = jiffies + HZ;
u32 status;
- /* Make sure that Rx and Tx fifo are empty */
status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
- dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
- (unsigned)status);
- return -EIO;
+ status |= SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH;
+ tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
+ while ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
+ status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (time_after(jiffies, timeout)) {
+ dev_err(tspi->dev,
+ "timeout waiting for fifo flush\n");
+ return -EIO;
+ }
+
+ udelay(1);
+ }
}
+ return 0;
+}
+
+static int tegra_spi_start_dma_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int len;
+ int ret = 0;
+ u8 dma_burst;
+ struct dma_slave_config dma_sconfig = {0};
+
val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
tegra_spi_writel(tspi, val, SPI_DMA_BLK);
@@ -496,23 +550,40 @@ static int tegra_spi_start_dma_based_transfer(
len = tspi->curr_dma_words * 4;
/* Set attention level based on length of transfer */
- if (len & 0xF)
+ if (len & 0xF) {
val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
- else if (((len) >> 4) & 0x1)
+ dma_burst = 1;
+ } else if (((len) >> 4) & 0x1) {
val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
- else
+ dma_burst = 4;
+ } else {
val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
+ dma_burst = 8;
+ }
- if (tspi->cur_direction & DATA_DIR_TX)
- val |= SPI_IE_TX;
+ if (!tspi->soc_data->has_intr_mask_reg) {
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
- if (tspi->cur_direction & DATA_DIR_RX)
- val |= SPI_IE_RX;
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+ }
tegra_spi_writel(tspi, val, SPI_DMA_CTL);
tspi->dma_control_reg = val;
+ dma_sconfig.device_fc = true;
if (tspi->cur_direction & DATA_DIR_TX) {
+ dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "DMA slave config failed: %d\n", ret);
+ return ret;
+ }
+
tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
ret = tegra_spi_start_tx_dma(tspi, len);
if (ret < 0) {
@@ -523,6 +594,16 @@ static int tegra_spi_start_dma_based_transfer(
}
if (tspi->cur_direction & DATA_DIR_RX) {
+ dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "DMA slave config failed: %d\n", ret);
+ return ret;
+ }
+
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
@@ -570,8 +651,9 @@ static int tegra_spi_start_cpu_based_transfer(
tspi->is_curr_dma_xfer = false;
- val |= SPI_DMA_EN;
- tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ val = tspi->command1_reg;
+ val |= SPI_PIO;
+ tegra_spi_writel(tspi, val, SPI_COMMAND1);
return 0;
}
@@ -582,7 +664,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
u32 *dma_buf;
dma_addr_t dma_phys;
int ret;
- struct dma_slave_config dma_sconfig;
dma_chan = dma_request_slave_channel_reason(tspi->dev,
dma_to_memory ? "rx" : "tx");
@@ -603,19 +684,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
}
if (dma_to_memory) {
- dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
- dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.src_maxburst = 0;
- } else {
- dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
- dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.dst_maxburst = 0;
- }
-
- ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
- if (ret)
- goto scrub;
- if (dma_to_memory) {
tspi->rx_dma_chan = dma_chan;
tspi->rx_dma_buf = dma_buf;
tspi->rx_dma_phys = dma_phys;
@@ -625,11 +693,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
tspi->tx_dma_phys = dma_phys;
}
return 0;
-
-scrub:
- dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
- dma_release_channel(dma_chan);
- return ret;
}
static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
@@ -696,6 +759,16 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
else if (req_mode == SPI_MODE_3)
command1 |= SPI_CONTROL_MODE_3;
+ if (spi->mode & SPI_LSB_FIRST)
+ command1 |= SPI_LSBIT_FE;
+ else
+ command1 &= ~SPI_LSBIT_FE;
+
+ if (spi->mode & SPI_3WIRE)
+ command1 |= SPI_BIDIROE;
+ else
+ command1 &= ~SPI_BIDIROE;
+
if (tspi->cs_control) {
if (tspi->cs_control != spi)
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
@@ -728,8 +801,15 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
+ if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
+ command1 |= SPI_BOTH_EN_BIT;
+ else
+ command1 &= ~SPI_BOTH_EN_BIT;
+
if (tspi->is_packed)
command1 |= SPI_PACKED;
+ else
+ command1 &= ~SPI_PACKED;
command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
tspi->cur_direction = 0;
@@ -748,6 +828,9 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
tspi->def_command1_reg, (unsigned)command1);
+ ret = tegra_spi_flush_fifos(tspi);
+ if (ret < 0)
+ return ret;
if (total_fifo_words > SPI_FIFO_DEPTH)
ret = tegra_spi_start_dma_based_transfer(tspi, t);
else
@@ -774,6 +857,12 @@ static int tegra_spi_setup(struct spi_device *spi)
return ret;
}
+ if (tspi->soc_data->has_intr_mask_reg) {
+ val = tegra_spi_readl(tspi, SPI_INTR_MASK);
+ val &= ~SPI_INTR_ALL_MASK;
+ tegra_spi_writel(tspi, val, SPI_INTR_MASK);
+ }
+
spin_lock_irqsave(&tspi->lock, flags);
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
@@ -799,6 +888,33 @@ static void tegra_spi_transfer_delay(int delay)
udelay(delay % 1000);
}
+static void tegra_spi_transfer_end(struct spi_device *spi)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
+
+ if (cs_val)
+ tspi->command1_reg |= SPI_CS_SW_VAL;
+ else
+ tspi->command1_reg &= ~SPI_CS_SW_VAL;
+ tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+}
+
+static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
+{
+ dev_dbg(tspi->dev, "============ SPI REGISTER DUMP ============\n");
+ dev_dbg(tspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
+ tegra_spi_readl(tspi, SPI_COMMAND1),
+ tegra_spi_readl(tspi, SPI_COMMAND2));
+ dev_dbg(tspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
+ tegra_spi_readl(tspi, SPI_DMA_CTL),
+ tegra_spi_readl(tspi, SPI_DMA_BLK));
+ dev_dbg(tspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
+ tegra_spi_readl(tspi, SPI_TRANS_STATUS),
+ tegra_spi_readl(tspi, SPI_FIFO_STATUS));
+}
+
static int tegra_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -838,21 +954,32 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
if (WARN_ON(ret == 0)) {
dev_err(tspi->dev,
"spi transfer timeout, err %d\n", ret);
+ if (tspi->is_curr_dma_xfer &&
+ (tspi->cur_direction & DATA_DIR_TX))
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ if (tspi->is_curr_dma_xfer &&
+ (tspi->cur_direction & DATA_DIR_RX))
+ dmaengine_terminate_all(tspi->rx_dma_chan);
ret = -EIO;
+ tegra_spi_dump_regs(tspi);
+ tegra_spi_flush_fifos(tspi);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
goto complete_xfer;
}
if (tspi->tx_status || tspi->rx_status) {
dev_err(tspi->dev, "Error in Transfer\n");
ret = -EIO;
+ tegra_spi_dump_regs(tspi);
goto complete_xfer;
}
msg->actual_length += xfer->len;
complete_xfer:
if (ret < 0 || skip) {
- tegra_spi_writel(tspi, tspi->def_command1_reg,
- SPI_COMMAND1);
+ tegra_spi_transfer_end(spi);
tegra_spi_transfer_delay(xfer->delay_usecs);
goto exit;
} else if (list_is_last(&xfer->transfer_list,
@@ -860,13 +987,11 @@ complete_xfer:
if (xfer->cs_change)
tspi->cs_control = spi;
else {
- tegra_spi_writel(tspi, tspi->def_command1_reg,
- SPI_COMMAND1);
+ tegra_spi_transfer_end(spi);
tegra_spi_transfer_delay(xfer->delay_usecs);
}
} else if (xfer->cs_change) {
- tegra_spi_writel(tspi, tspi->def_command1_reg,
- SPI_COMMAND1);
+ tegra_spi_transfer_end(spi);
tegra_spi_transfer_delay(xfer->delay_usecs);
}
@@ -889,11 +1014,14 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
+ tegra_spi_dump_regs(tspi);
+ tegra_spi_flush_fifos(tspi);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
- complete(&tspi->xfer_completion);
- goto exit;
+ return IRQ_HANDLED;
}
if (tspi->cur_direction & DATA_DIR_RX)
@@ -961,11 +1089,13 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
+ tegra_spi_dump_regs(tspi);
+ tegra_spi_flush_fifos(tspi);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
- complete(&tspi->xfer_completion);
- spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
}
@@ -1021,8 +1151,29 @@ static irqreturn_t tegra_spi_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
+static struct tegra_spi_soc_data tegra114_spi_soc_data = {
+ .has_intr_mask_reg = false,
+};
+
+static struct tegra_spi_soc_data tegra124_spi_soc_data = {
+ .has_intr_mask_reg = false,
+};
+
+static struct tegra_spi_soc_data tegra210_spi_soc_data = {
+ .has_intr_mask_reg = true,
+};
+
static const struct of_device_id tegra_spi_of_match[] = {
- { .compatible = "nvidia,tegra114-spi", },
+ {
+ .compatible = "nvidia,tegra114-spi",
+ .data = &tegra114_spi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra124-spi",
+ .data = &tegra124_spi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra210-spi",
+ .data = &tegra210_spi_soc_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
@@ -1033,6 +1184,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
struct tegra_spi_data *tspi;
struct resource *r;
int ret, spi_irq;
+ int bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
if (!master) {
@@ -1047,16 +1199,28 @@ static int tegra_spi_probe(struct platform_device *pdev)
master->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = tegra_spi_setup;
master->transfer_one_message = tegra_spi_transfer_one_message;
master->num_chipselect = MAX_CHIP_SELECT;
master->auto_runtime_pm = true;
+ bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (bus_num >= 0)
+ master->bus_num = bus_num;
tspi->master = master;
tspi->dev = &pdev->dev;
spin_lock_init(&tspi->lock);
+ tspi->soc_data = of_device_get_match_data(&pdev->dev);
+ if (!tspi->soc_data) {
+ dev_err(&pdev->dev, "unsupported tegra\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(tspi->base)) {
@@ -1067,27 +1231,19 @@ static int tegra_spi_probe(struct platform_device *pdev)
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
- ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
- tegra_spi_isr_thread, IRQF_ONESHOT,
- dev_name(&pdev->dev), tspi);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
- tspi->irq);
- goto exit_free_master;
- }
tspi->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
- goto exit_free_irq;
+ goto exit_free_master;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
- goto exit_free_irq;
+ goto exit_free_master;
}
tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
@@ -1095,7 +1251,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
ret = tegra_spi_init_dma_param(tspi, true);
if (ret < 0)
- goto exit_free_irq;
+ goto exit_free_master;
ret = tegra_spi_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
@@ -1117,18 +1273,32 @@ static int tegra_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
goto exit_pm_disable;
}
+
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
tspi->def_command1_reg = SPI_M_S;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
pm_runtime_put(&pdev->dev);
+ ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
+ tegra_spi_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_pm_disable;
+ }
master->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
- goto exit_pm_disable;
+ goto exit_free_irq;
}
return ret;
+exit_free_irq:
+ free_irq(spi_irq, tspi);
exit_pm_disable:
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
@@ -1136,8 +1306,6 @@ exit_pm_disable:
tegra_spi_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_spi_deinit_dma_param(tspi, true);
-exit_free_irq:
- free_irq(spi_irq, tspi);
exit_free_master:
spi_master_put(master);
return ret;
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 1427f343b39a..6d4679126213 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -717,9 +717,6 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
command2 = tspi->command2_reg;
command2 &= ~(SLINK_RXEN | SLINK_TXEN);
- tegra_slink_writel(tspi, command, SLINK_COMMAND);
- tspi->command_reg = command;
-
tspi->cur_direction = 0;
if (t->rx_buf) {
command2 |= SLINK_RXEN;
@@ -729,9 +726,18 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
command2 |= SLINK_TXEN;
tspi->cur_direction |= DATA_DIR_TX;
}
+
+ /*
+ * Writing to the command2 register bevore the command register prevents
+ * a spike in chip_select line 0. This selects the chip_select line
+ * before changing the chip_select value.
+ */
tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
tspi->command2_reg = command2;
+ tegra_slink_writel(tspi, command, SLINK_COMMAND);
+ tspi->command_reg = command;
+
if (total_fifo_words > SLINK_FIFO_DEPTH)
ret = tegra_slink_start_dma_based_transfer(tspi, t);
else
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index fba3f180f233..8a5966963834 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1299,18 +1299,27 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
dma->rx_buf_virt, dma->rx_buf_dma);
}
-static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
+ int ret;
dma = &data->dma;
+ ret = 0;
/* Get Consistent memory for Tx DMA */
dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
+ if (!dma->tx_buf_virt)
+ ret = -ENOMEM;
+
/* Get Consistent memory for Rx DMA */
dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
+ if (!dma->rx_buf_virt)
+ ret = -ENOMEM;
+
+ return ret;
}
static int pch_spi_pd_probe(struct platform_device *plat_dev)
@@ -1387,7 +1396,9 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
if (use_dma) {
dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
- pch_alloc_dma_buf(board_dat, data);
+ ret = pch_alloc_dma_buf(board_dat, data);
+ if (ret)
+ goto err_spi_register_master;
}
ret = spi_register_master(master);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
new file mode 100644
index 000000000000..c6bee67decb5
--- /dev/null
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -0,0 +1,761 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi-mem.h>
+
+/* Register offset definitions */
+#define ZYNQ_QSPI_CONFIG_OFFSET 0x00 /* Configuration Register, RW */
+#define ZYNQ_QSPI_STATUS_OFFSET 0x04 /* Interrupt Status Register, RO */
+#define ZYNQ_QSPI_IEN_OFFSET 0x08 /* Interrupt Enable Register, WO */
+#define ZYNQ_QSPI_IDIS_OFFSET 0x0C /* Interrupt Disable Reg, WO */
+#define ZYNQ_QSPI_IMASK_OFFSET 0x10 /* Interrupt Enabled Mask Reg,RO */
+#define ZYNQ_QSPI_ENABLE_OFFSET 0x14 /* Enable/Disable Register, RW */
+#define ZYNQ_QSPI_DELAY_OFFSET 0x18 /* Delay Register, RW */
+#define ZYNQ_QSPI_TXD_00_00_OFFSET 0x1C /* Transmit 4-byte inst, WO */
+#define ZYNQ_QSPI_TXD_00_01_OFFSET 0x80 /* Transmit 1-byte inst, WO */
+#define ZYNQ_QSPI_TXD_00_10_OFFSET 0x84 /* Transmit 2-byte inst, WO */
+#define ZYNQ_QSPI_TXD_00_11_OFFSET 0x88 /* Transmit 3-byte inst, WO */
+#define ZYNQ_QSPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
+#define ZYNQ_QSPI_SIC_OFFSET 0x24 /* Slave Idle Count Register, RW */
+#define ZYNQ_QSPI_TX_THRESH_OFFSET 0x28 /* TX FIFO Watermark Reg, RW */
+#define ZYNQ_QSPI_RX_THRESH_OFFSET 0x2C /* RX FIFO Watermark Reg, RW */
+#define ZYNQ_QSPI_GPIO_OFFSET 0x30 /* GPIO Register, RW */
+#define ZYNQ_QSPI_LINEAR_CFG_OFFSET 0xA0 /* Linear Adapter Config Ref, RW */
+#define ZYNQ_QSPI_MOD_ID_OFFSET 0xFC /* Module ID Register, RO */
+
+/*
+ * QSPI Configuration Register bit Masks
+ *
+ * This register contains various control bits that effect the operation
+ * of the QSPI controller
+ */
+#define ZYNQ_QSPI_CONFIG_IFMODE_MASK BIT(31) /* Flash Memory Interface */
+#define ZYNQ_QSPI_CONFIG_MANSRT_MASK BIT(16) /* Manual TX Start */
+#define ZYNQ_QSPI_CONFIG_MANSRTEN_MASK BIT(15) /* Enable Manual TX Mode */
+#define ZYNQ_QSPI_CONFIG_SSFORCE_MASK BIT(14) /* Manual Chip Select */
+#define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3) /* Baud Rate Mask */
+#define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2) /* Clock Phase Control */
+#define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1) /* Clock Polarity Control */
+#define ZYNQ_QSPI_CONFIG_SSCTRL_MASK BIT(10) /* Slave Select Mask */
+#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6) /* FIFO width */
+#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
+
+/*
+ * QSPI Configuration Register - Baud rate and slave select
+ *
+ * These are the values used in the calculation of baud rate divisor and
+ * setting the slave select.
+ */
+#define ZYNQ_QSPI_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
+#define ZYNQ_QSPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
+#define ZYNQ_QSPI_SS_SHIFT 10 /* Slave Select field shift in CR */
+
+/*
+ * QSPI Interrupt Registers bit Masks
+ *
+ * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
+ * bit definitions.
+ */
+#define ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK BIT(0) /* QSPI RX FIFO Overflow */
+#define ZYNQ_QSPI_IXR_TXNFULL_MASK BIT(2) /* QSPI TX FIFO Overflow */
+#define ZYNQ_QSPI_IXR_TXFULL_MASK BIT(3) /* QSPI TX FIFO is full */
+#define ZYNQ_QSPI_IXR_RXNEMTY_MASK BIT(4) /* QSPI RX FIFO Not Empty */
+#define ZYNQ_QSPI_IXR_RXF_FULL_MASK BIT(5) /* QSPI RX FIFO is full */
+#define ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK BIT(6) /* QSPI TX FIFO Underflow */
+#define ZYNQ_QSPI_IXR_ALL_MASK (ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK | \
+ ZYNQ_QSPI_IXR_TXNFULL_MASK | \
+ ZYNQ_QSPI_IXR_TXFULL_MASK | \
+ ZYNQ_QSPI_IXR_RXNEMTY_MASK | \
+ ZYNQ_QSPI_IXR_RXF_FULL_MASK | \
+ ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK)
+#define ZYNQ_QSPI_IXR_RXTX_MASK (ZYNQ_QSPI_IXR_TXNFULL_MASK | \
+ ZYNQ_QSPI_IXR_RXNEMTY_MASK)
+
+/*
+ * QSPI Enable Register bit Masks
+ *
+ * This register is used to enable or disable the QSPI controller
+ */
+#define ZYNQ_QSPI_ENABLE_ENABLE_MASK BIT(0) /* QSPI Enable Bit Mask */
+
+/*
+ * QSPI Linear Configuration Register
+ *
+ * It is named Linear Configuration but it controls other modes when not in
+ * linear mode also.
+ */
+#define ZYNQ_QSPI_LCFG_TWO_MEM_MASK BIT(30) /* LQSPI Two memories Mask */
+#define ZYNQ_QSPI_LCFG_SEP_BUS_MASK BIT(29) /* LQSPI Separate bus Mask */
+#define ZYNQ_QSPI_LCFG_U_PAGE_MASK BIT(28) /* LQSPI Upper Page Mask */
+
+#define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
+
+#define ZYNQ_QSPI_FAST_READ_QOUT_CODE 0x6B /* read instruction code */
+#define ZYNQ_QSPI_FIFO_DEPTH 63 /* FIFO depth in words */
+#define ZYNQ_QSPI_RX_THRESHOLD 32 /* Rx FIFO threshold level */
+#define ZYNQ_QSPI_TX_THRESHOLD 1 /* Tx FIFO threshold level */
+
+/*
+ * The modebits configurable by the driver to make the SPI support different
+ * data formats
+ */
+#define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
+
+/* Default number of chip selects */
+#define ZYNQ_QSPI_DEFAULT_NUM_CS 1
+
+/**
+ * struct zynq_qspi - Defines qspi driver instance
+ * @regs: Virtual address of the QSPI controller registers
+ * @refclk: Pointer to the peripheral clock
+ * @pclk: Pointer to the APB clock
+ * @irq: IRQ number
+ * @txbuf: Pointer to the TX buffer
+ * @rxbuf: Pointer to the RX buffer
+ * @tx_bytes: Number of bytes left to transfer
+ * @rx_bytes: Number of bytes left to receive
+ * @data_completion: completion structure
+ */
+struct zynq_qspi {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *refclk;
+ struct clk *pclk;
+ int irq;
+ u8 *txbuf;
+ u8 *rxbuf;
+ int tx_bytes;
+ int rx_bytes;
+ struct completion data_completion;
+};
+
+/*
+ * Inline functions for the QSPI controller read/write
+ */
+static inline u32 zynq_qspi_read(struct zynq_qspi *xqspi, u32 offset)
+{
+ return readl_relaxed(xqspi->regs + offset);
+}
+
+static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
+ u32 val)
+{
+ writel_relaxed(val, xqspi->regs + offset);
+}
+
+/**
+ * zynq_qspi_init_hw - Initialize the hardware
+ * @xqspi: Pointer to the zynq_qspi structure
+ *
+ * The default settings of the QSPI controller's configurable parameters on
+ * reset are
+ * - Master mode
+ * - Baud rate divisor is set to 2
+ * - Tx threshold set to 1l Rx threshold set to 32
+ * - Flash memory interface mode enabled
+ * - Size of the word to be transferred as 8 bit
+ * This function performs the following actions
+ * - Disable and clear all the interrupts
+ * - Enable manual slave select
+ * - Enable manual start
+ * - Deselect all the chip select lines
+ * - Set the size of the word to be transferred as 32 bit
+ * - Set the little endian mode of TX FIFO and
+ * - Enable the QSPI controller
+ */
+static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
+{
+ u32 config_reg;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
+
+ /* Disable linear mode as the boot loader may have used it */
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, 0);
+
+ /* Clear the RX FIFO */
+ while (zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET) &
+ ZYNQ_QSPI_IXR_RXNEMTY_MASK)
+ zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+ config_reg &= ~(ZYNQ_QSPI_CONFIG_MSTREN_MASK |
+ ZYNQ_QSPI_CONFIG_CPOL_MASK |
+ ZYNQ_QSPI_CONFIG_CPHA_MASK |
+ ZYNQ_QSPI_CONFIG_BDRATE_MASK |
+ ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
+ ZYNQ_QSPI_CONFIG_MANSRTEN_MASK |
+ ZYNQ_QSPI_CONFIG_MANSRT_MASK);
+ config_reg |= (ZYNQ_QSPI_CONFIG_MSTREN_MASK |
+ ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
+ ZYNQ_QSPI_CONFIG_FWIDTH_MASK |
+ ZYNQ_QSPI_CONFIG_IFMODE_MASK);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_RX_THRESH_OFFSET,
+ ZYNQ_QSPI_RX_THRESHOLD);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
+ ZYNQ_QSPI_TX_THRESHOLD);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+}
+
+static bool zynq_qspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ /*
+ * The number of address bytes should be equal to or less than 3 bytes.
+ */
+ if (op->addr.nbytes > 3)
+ return false;
+
+ return true;
+}
+
+/**
+ * zynq_qspi_rxfifo_op - Read 1..4 bytes from RxFIFO to RX buffer
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @size: Number of bytes to be read (1..4)
+ */
+static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
+{
+ u32 data;
+
+ data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
+
+ if (xqspi->rxbuf) {
+ memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
+ xqspi->rxbuf += size;
+ }
+
+ xqspi->rx_bytes -= size;
+ if (xqspi->rx_bytes < 0)
+ xqspi->rx_bytes = 0;
+}
+
+/**
+ * zynq_qspi_txfifo_op - Write 1..4 bytes from TX buffer to TxFIFO
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @size: Number of bytes to be written (1..4)
+ */
+static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
+{
+ static const unsigned int offset[4] = {
+ ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
+ ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
+ u32 data;
+
+ if (xqspi->txbuf) {
+ data = 0xffffffff;
+ memcpy(&data, xqspi->txbuf, size);
+ xqspi->txbuf += size;
+ } else {
+ data = 0;
+ }
+
+ xqspi->tx_bytes -= size;
+ zynq_qspi_write(xqspi, offset[size - 1], data);
+}
+
+/**
+ * zynq_qspi_chipselect - Select or deselect the chip select line
+ * @spi: Pointer to the spi_device structure
+ * @assert: 1 for select or 0 for deselect the chip select line
+ */
+static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(ctrl);
+ u32 config_reg;
+
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+ if (assert) {
+ /* Select the slave */
+ config_reg &= ~ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
+ config_reg |= (((~(BIT(spi->chip_select))) <<
+ ZYNQ_QSPI_SS_SHIFT) &
+ ZYNQ_QSPI_CONFIG_SSCTRL_MASK);
+ } else {
+ config_reg |= ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
+ }
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
+}
+
+/**
+ * zynq_qspi_config_op - Configure QSPI controller for specified transfer
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @qspi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer and
+ * sets the requested clock frequency.
+ *
+ * Return: 0 on success and -EINVAL on invalid input parameter
+ *
+ * Note: If the requested frequency is not an exact match with what can be
+ * obtained using the prescalar value, the driver sets the clock frequency which
+ * is lower than the requested frequency (maximum lower) for the transfer. If
+ * the requested frequency is higher or lower than that is supported by the QSPI
+ * controller the driver will set the highest or lowest frequency supported by
+ * controller.
+ */
+static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
+{
+ u32 config_reg, baud_rate_val = 0;
+
+ /*
+ * Set the clock frequency
+ * The baud rate divisor is not a direct mapping to the value written
+ * into the configuration register (config_reg[5:3])
+ * i.e. 000 - divide by 2
+ * 001 - divide by 4
+ * ----------------
+ * 111 - divide by 256
+ */
+ while ((baud_rate_val < ZYNQ_QSPI_BAUD_DIV_MAX) &&
+ (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
+ spi->max_speed_hz)
+ baud_rate_val++;
+
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= (~ZYNQ_QSPI_CONFIG_CPHA_MASK) &
+ (~ZYNQ_QSPI_CONFIG_CPOL_MASK);
+ if (spi->mode & SPI_CPHA)
+ config_reg |= ZYNQ_QSPI_CONFIG_CPHA_MASK;
+ if (spi->mode & SPI_CPOL)
+ config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
+
+ config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
+ config_reg |= (baud_rate_val << ZYNQ_QSPI_BAUD_DIV_SHIFT);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
+
+ return 0;
+}
+
+/**
+ * zynq_qspi_setup - Configure the QSPI controller
+ * @spi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer, baud
+ * rate and divisor value to setup the requested qspi clock.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_qspi_setup_op(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct zynq_qspi *qspi = spi_controller_get_devdata(ctrl);
+
+ if (ctrl->busy)
+ return -EBUSY;
+
+ clk_enable(qspi->refclk);
+ clk_enable(qspi->pclk);
+ zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+
+ return 0;
+}
+
+/**
+ * zynq_qspi_write_op - Fills the TX FIFO with as many bytes as possible
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @txcount: Maximum number of words to write
+ * @txempty: Indicates that TxFIFO is empty
+ */
+static void zynq_qspi_write_op(struct zynq_qspi *xqspi, int txcount,
+ bool txempty)
+{
+ int count, len, k;
+
+ len = xqspi->tx_bytes;
+ if (len && len < 4) {
+ /*
+ * We must empty the TxFIFO between accesses to TXD0,
+ * TXD1, TXD2, TXD3.
+ */
+ if (txempty)
+ zynq_qspi_txfifo_op(xqspi, len);
+
+ return;
+ }
+
+ count = len / 4;
+ if (count > txcount)
+ count = txcount;
+
+ if (xqspi->txbuf) {
+ iowrite32_rep(xqspi->regs + ZYNQ_QSPI_TXD_00_00_OFFSET,
+ xqspi->txbuf, count);
+ xqspi->txbuf += count * 4;
+ } else {
+ for (k = 0; k < count; k++)
+ writel_relaxed(0, xqspi->regs +
+ ZYNQ_QSPI_TXD_00_00_OFFSET);
+ }
+
+ xqspi->tx_bytes -= count * 4;
+}
+
+/**
+ * zynq_qspi_read_op - Drains the RX FIFO by as many bytes as possible
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @rxcount: Maximum number of words to read
+ */
+static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
+{
+ int count, len, k;
+
+ len = xqspi->rx_bytes - xqspi->tx_bytes;
+ count = len / 4;
+ if (count > rxcount)
+ count = rxcount;
+ if (xqspi->rxbuf) {
+ ioread32_rep(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET,
+ xqspi->rxbuf, count);
+ xqspi->rxbuf += count * 4;
+ } else {
+ for (k = 0; k < count; k++)
+ readl_relaxed(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET);
+ }
+ xqspi->rx_bytes -= count * 4;
+ len -= count * 4;
+
+ if (len && len < 4 && count < rxcount)
+ zynq_qspi_rxfifo_op(xqspi, len);
+}
+
+/**
+ * zynq_qspi_irq - Interrupt service routine of the QSPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xqspi structure
+ *
+ * This function handles TX empty only.
+ * On TX empty interrupt this function reads the received data from RX FIFO and
+ * fills the TX FIFO if there is any data remaining to be transferred.
+ *
+ * Return: IRQ_HANDLED when interrupt is handled; IRQ_NONE otherwise.
+ */
+static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
+{
+ u32 intr_status;
+ bool txempty;
+ struct zynq_qspi *xqspi = (struct zynq_qspi *)dev_id;
+
+ intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, intr_status);
+
+ if ((intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK) ||
+ (intr_status & ZYNQ_QSPI_IXR_RXNEMTY_MASK)) {
+ /*
+ * This bit is set when Tx FIFO has < THRESHOLD entries.
+ * We have the THRESHOLD value set to 1,
+ * so this bit indicates Tx FIFO is empty.
+ */
+ txempty = !!(intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK);
+ /* Read out the data from the RX FIFO */
+ zynq_qspi_read_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD);
+ if (xqspi->tx_bytes) {
+ /* There is more data to send */
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD,
+ txempty);
+ } else {
+ /*
+ * If transfer and receive is completed then only send
+ * complete signal.
+ */
+ if (!xqspi->rx_bytes) {
+ zynq_qspi_write(xqspi,
+ ZYNQ_QSPI_IDIS_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ complete(&xqspi->data_completion);
+ }
+ }
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/**
+ * zynq_qspi_exec_mem_op() - Initiates the QSPI transfer
+ * @mem: the SPI memory
+ * @op: the memory operation to execute
+ *
+ * Executes a memory operation.
+ *
+ * This function first selects the chip and starts the memory operation.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
+ int err = 0, i;
+ u8 *tmpbuf;
+
+ dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth);
+
+ zynq_qspi_chipselect(mem->spi, true);
+ zynq_qspi_config_op(xqspi, mem->spi);
+
+ if (op->cmd.opcode) {
+ reinit_completion(&xqspi->data_completion);
+ xqspi->txbuf = (u8 *)&op->cmd.opcode;
+ xqspi->rxbuf = NULL;
+ xqspi->tx_bytes = sizeof(op->cmd.opcode);
+ xqspi->rx_bytes = sizeof(op->cmd.opcode);
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++) {
+ xqspi->txbuf[i] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+ }
+
+ reinit_completion(&xqspi->data_completion);
+ xqspi->rxbuf = NULL;
+ xqspi->tx_bytes = op->addr.nbytes;
+ xqspi->rx_bytes = op->addr.nbytes;
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+
+ if (op->dummy.nbytes) {
+ tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
+ memset(tmpbuf, 0xff, op->dummy.nbytes);
+ reinit_completion(&xqspi->data_completion);
+ xqspi->txbuf = tmpbuf;
+ xqspi->rxbuf = NULL;
+ xqspi->tx_bytes = op->dummy.nbytes;
+ xqspi->rx_bytes = op->dummy.nbytes;
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ kfree(tmpbuf);
+ }
+
+ if (op->data.nbytes) {
+ reinit_completion(&xqspi->data_completion);
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ xqspi->txbuf = (u8 *)op->data.buf.out;
+ xqspi->tx_bytes = op->data.nbytes;
+ xqspi->rxbuf = NULL;
+ xqspi->rx_bytes = op->data.nbytes;
+ } else {
+ xqspi->txbuf = NULL;
+ xqspi->rxbuf = (u8 *)op->data.buf.in;
+ xqspi->rx_bytes = op->data.nbytes;
+ xqspi->tx_bytes = op->data.nbytes;
+ }
+
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+ zynq_qspi_chipselect(mem->spi, false);
+
+ return err;
+}
+
+static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
+ .supports_op = zynq_qspi_supports_op,
+ .exec_op = zynq_qspi_exec_mem_op,
+};
+
+/**
+ * zynq_qspi_probe - Probe method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_qspi_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct zynq_qspi *xqspi;
+ struct resource *res;
+ u32 num_cs;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ xqspi = spi_controller_get_devdata(ctlr);
+ xqspi->dev = dev;
+ platform_set_drvdata(pdev, xqspi);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xqspi->regs)) {
+ ret = PTR_ERR(xqspi->regs);
+ goto remove_master;
+ }
+
+ xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(xqspi->pclk)) {
+ dev_err(&pdev->dev, "pclk clock not found.\n");
+ ret = PTR_ERR(xqspi->pclk);
+ goto remove_master;
+ }
+
+ init_completion(&xqspi->data_completion);
+
+ xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(xqspi->refclk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xqspi->refclk);
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable APB clock.\n");
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto clk_dis_pclk;
+ }
+
+ /* QSPI controller initializations */
+ zynq_qspi_init_hw(xqspi);
+
+ xqspi->irq = platform_get_irq(pdev, 0);
+ if (xqspi->irq <= 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "irq resource not found\n");
+ goto remove_master;
+ }
+ ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
+ 0, pdev->name, xqspi);
+ if (ret != 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto remove_master;
+ }
+
+ ret = of_property_read_u32(np, "num-cs",
+ &num_cs);
+ if (ret < 0)
+ ctlr->num_chipselect = ZYNQ_QSPI_DEFAULT_NUM_CS;
+ else
+ ctlr->num_chipselect = num_cs;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->mem_ops = &zynq_qspi_mem_ops;
+ ctlr->setup = zynq_qspi_setup_op;
+ ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
+ ctlr->dev.of_node = np;
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto clk_dis_all;
+ }
+
+ return ret;
+
+clk_dis_all:
+ clk_disable_unprepare(xqspi->refclk);
+clk_dis_pclk:
+ clk_disable_unprepare(xqspi->pclk);
+remove_master:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+/**
+ * zynq_qspi_remove - Remove method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_qspi_remove(struct platform_device *pdev)
+{
+ struct zynq_qspi *xqspi = platform_get_drvdata(pdev);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
+
+ clk_disable_unprepare(xqspi->refclk);
+ clk_disable_unprepare(xqspi->pclk);
+
+ return 0;
+}
+
+static const struct of_device_id zynq_qspi_of_match[] = {
+ { .compatible = "xlnx,zynq-qspi-1.0", },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynq_qspi_of_match);
+
+/*
+ * zynq_qspi_driver - This structure defines the QSPI platform driver
+ */
+static struct platform_driver zynq_qspi_driver = {
+ .probe = zynq_qspi_probe,
+ .remove = zynq_qspi_remove,
+ .driver = {
+ .name = "zynq-qspi",
+ .of_match_table = zynq_qspi_of_match,
+ },
+};
+
+module_platform_driver(zynq_qspi_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Zynq QSPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 93986f879b09..5e75944ad5d1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -36,6 +36,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
@@ -1039,6 +1041,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (max_tx || max_rx) {
list_for_each_entry(xfer, &msg->transfers,
transfer_list) {
+ if (!xfer->len)
+ continue;
if (!xfer->tx_buf)
xfer->tx_buf = ctlr->dummy_tx;
if (!xfer->rx_buf)
@@ -1177,10 +1181,10 @@ out:
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
- spi_res_release(ctlr, msg);
-
spi_finalize_current_message(ctlr);
+ spi_res_release(ctlr, msg);
+
return ret;
}
@@ -2195,6 +2199,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
*/
cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
GPIOD_OUT_LOW);
+ if (IS_ERR(cs[i]))
+ return PTR_ERR(cs[i]);
if (cs[i]) {
/*
@@ -2261,7 +2267,7 @@ int spi_register_controller(struct spi_controller *ctlr)
{
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
- int status = -ENODEV;
+ int status;
int id, first_dynamic;
if (!dev)
@@ -2275,24 +2281,6 @@ int spi_register_controller(struct spi_controller *ctlr)
if (status)
return status;
- if (!spi_controller_is_slave(ctlr)) {
- if (ctlr->use_gpio_descriptors) {
- status = spi_get_gpio_descs(ctlr);
- if (status)
- return status;
- /*
- * A controller using GPIO descriptors always
- * supports SPI_CS_HIGH if need be.
- */
- ctlr->mode_bits |= SPI_CS_HIGH;
- } else {
- /* Legacy code path for GPIOs from DT */
- status = of_spi_register_master(ctlr);
- if (status)
- return status;
- }
- }
-
/* even if it's just one always-selected device, there must
* be at least one chipselect
*/
@@ -2349,6 +2337,25 @@ int spi_register_controller(struct spi_controller *ctlr)
* registration fails if the bus ID is in use.
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
+
+ if (!spi_controller_is_slave(ctlr)) {
+ if (ctlr->use_gpio_descriptors) {
+ status = spi_get_gpio_descs(ctlr);
+ if (status)
+ return status;
+ /*
+ * A controller using GPIO descriptors always
+ * supports SPI_CS_HIGH if need be.
+ */
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ } else {
+ /* Legacy code path for GPIOs from DT */
+ status = of_spi_register_master(ctlr);
+ if (status)
+ return status;
+ }
+ }
+
status = device_add(&ctlr->dev);
if (status < 0) {
/* free bus id */
@@ -2781,11 +2788,6 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
size_t offset;
size_t count, i;
- /* warn once about this fact that we are splitting a transfer */
- dev_warn_once(&msg->spi->dev,
- "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
- xfer->len, maxsize);
-
/* calculate how many we have to replace */
count = DIV_ROUND_UP(xfer->len, maxsize);
@@ -2943,6 +2945,11 @@ int spi_setup(struct spi_device *spi)
* so it is ignored here.
*/
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
+ /* nothing prevents from working with active-high CS in case if it
+ * is driven by GPIO.
+ */
+ if (gpio_is_valid(spi->cs_gpio))
+ bad_bits &= ~SPI_CS_HIGH;
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
@@ -2988,6 +2995,21 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
+/**
+ * spi_set_cs_timing - configure CS setup, hold, and inactive delays
+ * @spi: the device that requires specific CS timing configuration
+ * @setup: CS setup time in terms of clock count
+ * @hold: CS hold time in terms of clock count
+ * @inactive_dly: CS inactive delay between transfers in terms of clock count
+ */
+void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
+ u8 inactive_dly)
+{
+ if (spi->controller->set_cs_timing)
+ spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
+}
+EXPORT_SYMBOL_GPL(spi_set_cs_timing);
+
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
@@ -3062,8 +3084,6 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
- if (!xfer->speed_hz)
- xfer->speed_hz = ctlr->max_speed_hz;
if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
xfer->speed_hz = ctlr->max_speed_hz;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index b0c76e2626ce..ce9142d87f41 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -276,17 +276,19 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
+ k_tmp->word_delay_usecs = u_tmp->word_delay_usecs;
if (!k_tmp->speed_hz)
k_tmp->speed_hz = spidev->speed_hz;
#ifdef VERBOSE
dev_dbg(&spidev->spi->dev,
- " xfer len %u %s%s%s%dbits %u usec %uHz\n",
+ " xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
u_tmp->len,
u_tmp->rx_buf ? "rx " : "",
u_tmp->tx_buf ? "tx " : "",
u_tmp->cs_change ? "cs " : "",
u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
u_tmp->delay_usecs,
+ u_tmp->word_delay_usecs,
u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
#endif
spi_message_add_tail(k_tmp, &msg);
@@ -591,7 +593,7 @@ static int spidev_open(struct inode *inode, struct file *filp)
spidev->users++;
filp->private_data = spidev;
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
mutex_unlock(&device_list_lock);
return 0;
diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
index f51f150307df..ffa379efff83 100644
--- a/drivers/ssb/bridge_pcmcia_80211.c
+++ b/drivers/ssb/bridge_pcmcia_80211.c
@@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = {
.resume = ssb_host_pcmcia_resume,
};
+static int pcmcia_init_failed;
+
/*
* These are not module init/exit functions!
* The module_pcmcia_driver() helper cannot be used here.
*/
int ssb_host_pcmcia_init(void)
{
- return pcmcia_register_driver(&ssb_host_pcmcia_driver);
+ pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver);
+
+ return pcmcia_init_failed;
}
void ssb_host_pcmcia_exit(void)
{
- pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
+ if (!pcmcia_init_failed)
+ pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
}
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 84807a9b4b13..da2d2ab8104d 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -305,7 +305,6 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
else if (i % 2)
pr_cont(".");
writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
- mmiowb();
msleep(20);
}
err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl);
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index 567013f8a8be..d7d730c245c5 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -338,7 +338,6 @@ static void ssb_pcmcia_write8(struct ssb_device *dev, u16 offset, u8 value)
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writeb(value, bus->mmio + offset);
- mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
@@ -352,7 +351,6 @@ static void ssb_pcmcia_write16(struct ssb_device *dev, u16 offset, u16 value)
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writew(value, bus->mmio + offset);
- mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
@@ -368,7 +366,6 @@ static void ssb_pcmcia_write32(struct ssb_device *dev, u16 offset, u32 value)
writew((value & 0x0000FFFF), bus->mmio + offset);
writew(((value & 0xFFFF0000) >> 16), bus->mmio + offset + 2);
}
- mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
@@ -424,7 +421,6 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer,
WARN_ON(1);
}
unlock:
- mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
#endif /* CONFIG_SSB_BLOCKIO */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 62951e836cbc..d5f771fafc21 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -1,6 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig STAGING
bool "Staging drivers"
- default n
---help---
This option allows you to select a number of drivers that are
not of the "normal" Linux kernel quality level. These drivers
@@ -16,8 +16,8 @@ menuconfig STAGING
If you wish to work on these drivers, to help improve them, or
to report problems you have with them, please see the
- driver_name.README file in the drivers/staging/ directory to
- see what needs to be worked on, and who to contact.
+ drivers/staging/<driver_name>/TODO file to see what needs to be
+ worked on, and who to contact.
If in doubt, say N here.
@@ -40,8 +40,6 @@ source "drivers/staging/rtl8712/Kconfig"
source "drivers/staging/rtl8188eu/Kconfig"
-source "drivers/staging/rtlwifi/Kconfig"
-
source "drivers/staging/rts5208/Kconfig"
source "drivers/staging/octeon/Kconfig"
@@ -96,8 +94,6 @@ source "drivers/staging/greybus/Kconfig"
source "drivers/staging/vc04_services/Kconfig"
-source "drivers/staging/vboxvideo/Kconfig"
-
source "drivers/staging/pi433/Kconfig"
source "drivers/staging/mt7621-pci/Kconfig"
@@ -106,14 +102,10 @@ source "drivers/staging/mt7621-pci-phy/Kconfig"
source "drivers/staging/mt7621-pinctrl/Kconfig"
-source "drivers/staging/mt7621-spi/Kconfig"
-
source "drivers/staging/mt7621-dma/Kconfig"
source "drivers/staging/ralink-gdma/Kconfig"
-source "drivers/staging/mt7621-mmc/Kconfig"
-
source "drivers/staging/mt7621-dts/Kconfig"
source "drivers/staging/gasket/Kconfig"
@@ -122,4 +114,8 @@ source "drivers/staging/axis-fifo/Kconfig"
source "drivers/staging/erofs/Kconfig"
+source "drivers/staging/fieldbus/Kconfig"
+
+source "drivers/staging/kpc2000/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index d1b17ddcd354..0da0d3f0b5e4 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_RTL8723BS) += rtl8723bs/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
-obj-$(CONFIG_R8822BE) += rtlwifi/
obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
@@ -38,16 +37,15 @@ obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
-obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_PI433) += pi433/
obj-$(CONFIG_PCI_MT7621) += mt7621-pci/
obj-$(CONFIG_PCI_MT7621_PHY) += mt7621-pci-phy/
obj-$(CONFIG_PINCTRL_RT2880) += mt7621-pinctrl/
-obj-$(CONFIG_SPI_MT7621) += mt7621-spi/
obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
-obj-$(CONFIG_MTK_MMC) += mt7621-mmc/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_EROFS_FS) += erofs/
+obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
+obj-$(CONFIG_KPC2000) += kpc2000/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 17c5587805f5..d6d605d5cbde 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -1,10 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
menu "Android"
if ANDROID
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
- default n
depends on SHMEM
help
The ashmem subsystem is a new shared memory allocator, similar to
@@ -16,7 +16,6 @@ config ASHMEM
config ANDROID_VSOC
tristate "Android Virtual SoC support"
- default n
depends on PCI_MSI
help
This option adds support for the Virtual SoC driver needed to boot
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 90e6154f11a4..14bd9c6ce10d 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index 0fdda6f62953..178df581a8fc 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig ION
bool "Ion Memory Manager"
depends on HAS_DMA && MMU
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
index 8a75bd27c413..00a1ec7b9154 100644
--- a/drivers/staging/android/vsoc.c
+++ b/drivers/staging/android/vsoc.c
@@ -259,7 +259,8 @@ do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
atomic_t *owner_ptr = NULL;
struct vsoc_device_region *managed_region_p;
- if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) ||
+ if (copy_from_user(&np->permission,
+ &arg->perm, sizeof(np->permission)) ||
copy_from_user(&managed_fd,
&arg->managed_region_fd, sizeof(managed_fd))) {
return -EFAULT;
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index d9725888af6f..3fffe4d6f327 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -1,10 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
#
# "Xilinx AXI-Stream FIFO IP core driver"
#
config XIL_AXIS_FIFO
tristate "Xilinx AXI-Stream FIFO IP core driver"
depends on OF
- default n
help
- This adds support for the Xilinx AXI-Stream
- FIFO IP core driver.
+ This adds support for the Xilinx AXI-Stream FIFO IP core driver.
+ The AXI Streaming FIFO allows memory mapped access to a AXI Streaming
+ interface. The Xilinx AXI-Stream FIFO IP core can be used to interface
+ to the AXI Ethernet without the need to use DMA.
diff --git a/drivers/staging/axis-fifo/Makefile b/drivers/staging/axis-fifo/Makefile
index fe62cd1ac5de..c626005c99db 100644
--- a/drivers/staging/axis-fifo/Makefile
+++ b/drivers/staging/axis-fifo/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo.o
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
index 3f287c48e082..d0c6e42eadda 100644
--- a/drivers/staging/board/Kconfig
+++ b/drivers/staging/board/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config STAGING_BOARD
bool "Staging Board Support"
depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP
diff --git a/drivers/staging/board/Makefile b/drivers/staging/board/Makefile
index 6842745feb94..ed7839752e12 100644
--- a/drivers/staging/board/Makefile
+++ b/drivers/staging/board/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y := board.o
obj-$(CONFIG_ARCH_EMEV2) += kzm9d.o
obj-$(CONFIG_ARCH_R8A7740) += armadillo800eva.o
diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
index aa57a5865556..04be22dca9b6 100644
--- a/drivers/staging/clocking-wizard/Kconfig
+++ b/drivers/staging/clocking-wizard/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Xilinx Clocking Wizard Driver
#
diff --git a/drivers/staging/clocking-wizard/Makefile b/drivers/staging/clocking-wizard/Makefile
index 5ad352f521fe..b1f915224d96 100644
--- a/drivers/staging/clocking-wizard/Makefile
+++ b/drivers/staging/clocking-wizard/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 9ab1ee7d36bf..049b659fa6ad 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -1,6 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
config COMEDI
tristate "Data acquisition support (comedi)"
- ---help---
+ help
Enable support for a wide range of data acquisition devices
for Linux.
@@ -8,14 +9,14 @@ if COMEDI
config COMEDI_DEBUG
bool "Comedi debugging"
- ---help---
+ help
This is an option for use by developers; most people should
say N here. This enables comedi core and driver debugging.
config COMEDI_DEFAULT_BUF_SIZE_KB
int "Comedi default initial asynchronous buffer size in KiB"
default "2048"
- ---help---
+ help
This is the default asynchronous buffer size which is used for
commands running in the background in kernel space. This
defaults to 2048 KiB of memory so that a 16 channel card
@@ -24,7 +25,7 @@ config COMEDI_DEFAULT_BUF_SIZE_KB
config COMEDI_DEFAULT_BUF_MAXSIZE_KB
int "Comedi default maximum asynchronous buffer size in KiB"
default "20480"
- ---help---
+ help
This is the default maximum asynchronous buffer size which can
be requested by a userspace program without root privileges.
This is set to 20480 KiB so that a fast I/O card with 16
@@ -32,7 +33,7 @@ config COMEDI_DEFAULT_BUF_MAXSIZE_KB
menuconfig COMEDI_MISC_DRIVERS
bool "Comedi misc drivers"
- ---help---
+ help
Enable comedi misc drivers to be built
Note that the answer to this question won't directly affect the
@@ -44,7 +45,7 @@ if COMEDI_MISC_DRIVERS
config COMEDI_BOND
tristate "Comedi device bonding support"
select COMEDI_KCOMEDILIB
- ---help---
+ help
Enable support for a driver to 'bond' (merge) multiple subdevices
from multiple devices together as one.
@@ -55,7 +56,7 @@ config COMEDI_BOND
config COMEDI_TEST
tristate "Fake waveform generator support"
- ---help---
+ help
Enable support for the fake waveform generator.
This driver is mainly for testing purposes, but can also be used to
generate sample waveforms on systems that don't have data acquisition
@@ -66,7 +67,7 @@ config COMEDI_TEST
config COMEDI_PARPORT
tristate "Parallel port support"
- ---help---
+ help
Enable support for the standard parallel port.
A cheap and easy way to get a few more digital I/O lines. Steal
additional parallel ports from old computers or your neighbors'
@@ -78,7 +79,7 @@ config COMEDI_PARPORT
config COMEDI_SSV_DNP
tristate "SSV Embedded Systems DIL/Net-PC support"
depends on X86_32 || COMPILE_TEST
- ---help---
+ help
Enable support for SSV Embedded Systems DIL/Net-PC
To compile this driver as a module, choose M here: the module will be
@@ -88,7 +89,7 @@ endif # COMEDI_MISC_DRIVERS
menuconfig COMEDI_ISA_DRIVERS
bool "Comedi ISA and PC/104 drivers"
- ---help---
+ help
Enable comedi ISA and PC/104 drivers to be built
Note that the answer to this question won't directly affect the
@@ -100,7 +101,7 @@ if COMEDI_ISA_DRIVERS
config COMEDI_PCL711
tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
select COMEDI_8254
- ---help---
+ help
Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
To compile this driver as a module, choose M here: the module will be
@@ -109,7 +110,7 @@ config COMEDI_PCL711
config COMEDI_PCL724
tristate "Advantech PCL-722/724/731 and ADlink ACL-7122/7124/PET-48DIO"
select COMEDI_8255
- ---help---
+ help
Enable support for ISA and PC/104 based 8255 digital i/o boards. This
driver provides a legacy comedi driver wrapper for the generic 8255
support driver.
@@ -129,7 +130,7 @@ config COMEDI_PCL724
config COMEDI_PCL726
tristate "Advantech PCL-726 and compatible ISA card support"
- ---help---
+ help
Enable support for Advantech PCL-726 and compatible ISA cards.
To compile this driver as a module, choose M here: the module will be
@@ -137,7 +138,7 @@ config COMEDI_PCL726
config COMEDI_PCL730
tristate "Simple Digital I/O board support (8-bit ports)"
- ---help---
+ help
Enable support for various simple ISA or PC/104 Digital I/O boards.
These boards all use 8-bit I/O ports.
@@ -162,7 +163,7 @@ config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
select COMEDI_ISADMA if ISA_DMA_API
select COMEDI_8254
- ---help---
+ help
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
A-822PGH/PGL, A-823PGH/PGL, A-826PG and ICP DAS ISO-813 ISA cards
@@ -174,7 +175,7 @@ config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
select COMEDI_ISADMA if ISA_DMA_API
select COMEDI_8254
- ---help---
+ help
Enable support for Advantech PCL-814 and PCL-816 ISA cards
To compile this driver as a module, choose M here: the module will be
@@ -184,7 +185,7 @@ config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
select COMEDI_ISADMA if ISA_DMA_API
select COMEDI_8254
- ---help---
+ help
Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
@@ -194,7 +195,7 @@ config COMEDI_PCL818
config COMEDI_PCM3724
tristate "Advantech PCM-3724 PC/104 card support"
select COMEDI_8255
- ---help---
+ help
Enable support for Advantech PCM-3724 PC/104 cards.
To compile this driver as a module, choose M here: the module will be
@@ -203,7 +204,7 @@ config COMEDI_PCM3724
config COMEDI_AMPLC_DIO200_ISA
tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
select COMEDI_AMPLC_DIO200
- ---help---
+ help
Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and
PC272E ISA DIO boards
@@ -213,7 +214,7 @@ config COMEDI_AMPLC_DIO200_ISA
config COMEDI_AMPLC_PC236_ISA
tristate "Amplicon PC36AT DIO board support"
select COMEDI_AMPLC_PC236
- ---help---
+ help
Enable support for Amplicon PC36AT ISA DIO board.
To compile this driver as a module, choose M here: the module will be
@@ -221,7 +222,7 @@ config COMEDI_AMPLC_PC236_ISA
config COMEDI_AMPLC_PC263_ISA
tristate "Amplicon PC263 relay board support"
- ---help---
+ help
Enable support for Amplicon PC263 ISA relay board. This board has
16 reed relay output channels.
@@ -230,7 +231,7 @@ config COMEDI_AMPLC_PC263_ISA
config COMEDI_RTI800
tristate "Analog Devices RTI-800/815 ISA card support"
- ---help---
+ help
Enable support for Analog Devices RTI-800/815 ISA cards
To compile this driver as a module, choose M here: the module will be
@@ -238,7 +239,7 @@ config COMEDI_RTI800
config COMEDI_RTI802
tristate "Analog Devices RTI-802 ISA card support"
- ---help---
+ help
Enable support for Analog Devices RTI-802 ISA cards
To compile this driver as a module, choose M here: the module will be
@@ -246,7 +247,7 @@ config COMEDI_RTI802
config COMEDI_DAC02
tristate "Keithley Metrabyte DAC02 compatible ISA card support"
- ---help---
+ help
Enable support for Keithley Metrabyte DAC02 compatible ISA cards.
To compile this driver as a module, choose M here: the module will be
@@ -256,7 +257,7 @@ config COMEDI_DAS16M1
tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
select COMEDI_8254
select COMEDI_8255
- ---help---
+ help
Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
To compile this driver as a module, choose M here: the module will be
@@ -265,7 +266,7 @@ config COMEDI_DAS16M1
config COMEDI_DAS08_ISA
tristate "DAS-08 compatible ISA and PC/104 card support"
select COMEDI_DAS08
- ---help---
+ help
Enable support for Keithley Metrabyte/ComputerBoards DAS08
and compatible ISA and PC/104 cards:
Keithley Metrabyte/ComputerBoards DAS08, DAS08-PGM, DAS08-PGH,
@@ -280,7 +281,7 @@ config COMEDI_DAS16
select COMEDI_ISADMA if ISA_DMA_API
select COMEDI_8254
select COMEDI_8255
- ---help---
+ help
Enable support for Keithley Metrabyte/ComputerBoards DAS16
and compatible ISA and PC/104 cards:
Keithley Metrabyte DAS-16, DAS-16G, DAS-16F, DAS-1201, DAS-1202,
@@ -296,7 +297,7 @@ config COMEDI_DAS16
config COMEDI_DAS800
tristate "DAS800 and compatible ISA card support"
select COMEDI_8254
- ---help---
+ help
Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
Keithley Metrabyte DAS-800, DAS-801, DAS-802
Measurement Computing CIO-DAS800, CIO-DAS801, CIO-DAS802 and
@@ -309,7 +310,7 @@ config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support"
select COMEDI_ISADMA if ISA_DMA_API
select COMEDI_8254
- ---help---
+ help
Enable support for DAS1800 and compatible ISA cards
Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
DAS-1702ST, DAS-1702ST-DA, DAS-1702HR, DAS-1702HR-DA, DAS-1702/AO,
@@ -323,7 +324,7 @@ config COMEDI_DAS1800
config COMEDI_DAS6402
tristate "DAS6402 and compatible ISA card support"
select COMEDI_8254
- ---help---
+ help
Enable support for DAS6402 and compatible ISA cards
Computerboards, Keithley Metrabyte DAS6402 and compatibles
@@ -332,7 +333,7 @@ config COMEDI_DAS6402
config COMEDI_DT2801
tristate "Data Translation DT2801 ISA card support"
- ---help---
+ help
Enable support for Data Translation DT2801 ISA cards
To compile this driver as a module, choose M here: the module will be
@@ -340,7 +341,7 @@ config COMEDI_DT2801
config COMEDI_DT2811
tristate "Data Translation DT2811 ISA card support"
- ---help---
+ help
Enable support for Data Translation DT2811 ISA cards
To compile this driver as a module, choose M here: the module will be
@@ -348,7 +349,7 @@ config COMEDI_DT2811
config COMEDI_DT2814
tristate "Data Translation DT2814 ISA card support"
- ---help---
+ help
Enable support for Data Translation DT2814 ISA cards
To compile this driver as a module, choose M here: the module will be
@@ -356,7 +357,7 @@ config COMEDI_DT2814
config COMEDI_DT2815
tristate "Data Translation DT2815 ISA card support"
- ---help---
+ help
Enable support for Data Translation DT2815 ISA cards
To compile this driver as a module, choose M here: the module will be
@@ -364,7 +365,7 @@ config COMEDI_DT2815
config COMEDI_DT2817
tristate "Data Translation DT2817 ISA card support"
- ---help---
+ help
Enable support for Data Translation DT2817 ISA cards
To compile this driver as a module, choose M here: the module will be
@@ -373,7 +374,7 @@ config COMEDI_DT2817
config COMEDI_DT282X
tristate "Data Translation DT2821 series and DT-EZ ISA card support"
select COMEDI_ISADMA if ISA_DMA_API
- ---help---
+ help
Enable support for Data Translation DT2821 series including DT-EZ
DT2821, DT2821-F-16SE, DT2821-F-8DI, DT2821-G-16SE, DT2821-G-8DI,
DT2823 (dt2823), DT2824-PGH, DT2824-PGL, DT2825, DT2827, DT2828,
@@ -385,7 +386,7 @@ config COMEDI_DT282X
config COMEDI_DMM32AT
tristate "Diamond Systems MM-32-AT PC/104 board support"
select COMEDI_8255
- ---help---
+ help
Enable support for Diamond Systems MM-32-AT PC/104 boards
To compile this driver as a module, choose M here: the module will be
@@ -393,7 +394,7 @@ config COMEDI_DMM32AT
config COMEDI_FL512
tristate "FL512 ISA card support"
- ---help---
+ help
Enable support for FL512 ISA card
To compile this driver as a module, choose M here: the module will be
@@ -403,7 +404,7 @@ config COMEDI_AIO_AIO12_8
tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
select COMEDI_8254
select COMEDI_8255
- ---help---
+ help
Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
To compile this driver as a module, choose M here: the module will be
@@ -411,7 +412,7 @@ config COMEDI_AIO_AIO12_8
config COMEDI_AIO_IIRO_16
tristate "I/O Products PC/104 IIRO16 Board support"
- ---help---
+ help
Enable support for I/O Products PC/104 IIRO16 Relay And Isolated
Input Board
@@ -421,7 +422,7 @@ config COMEDI_AIO_IIRO_16
config COMEDI_II_PCI20KC
tristate "Intelligent Instruments PCI-20001C carrier support"
depends on HAS_IOMEM
- ---help---
+ help
Enable support for Intelligent Instruments PCI-20001C carrier
PCI-20001, PCI-20006 and PCI-20341
@@ -430,7 +431,7 @@ config COMEDI_II_PCI20KC
config COMEDI_C6XDIGIO
tristate "Mechatronic Systems Inc. C6x_DIGIO DSP daughter card support"
- ---help---
+ help
Enable support for Mechatronic Systems Inc. C6x_DIGIO DSP daughter
card
@@ -439,7 +440,7 @@ config COMEDI_C6XDIGIO
config COMEDI_MPC624
tristate "Micro/sys MPC-624 PC/104 board support"
- ---help---
+ help
Enable support for Micro/sys MPC-624 PC/104 board
To compile this driver as a module, choose M here: the module will be
@@ -447,7 +448,7 @@ config COMEDI_MPC624
config COMEDI_ADQ12B
tristate "MicroAxial ADQ12-B data acquisition and control card support"
- ---help---
+ help
Enable MicroAxial ADQ12-B daq and control card support.
To compile this driver as a module, choose M here: the module will be
@@ -457,7 +458,7 @@ config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
select COMEDI_ISADMA if ISA_DMA_API
select COMEDI_8254
- ---help---
+ help
Enable support for National Instruments AT-A2150 cards
To compile this driver as a module, choose M here: the module will be
@@ -466,7 +467,7 @@ config COMEDI_NI_AT_A2150
config COMEDI_NI_AT_AO
tristate "NI AT-AO-6/10 EISA card support"
select COMEDI_8254
- ---help---
+ help
Enable support for National Instruments AT-AO-6/10 cards
To compile this driver as a module, choose M here: the module will be
@@ -476,7 +477,7 @@ config COMEDI_NI_ATMIO
tristate "NI AT-MIO E series ISA-PNP card support"
select COMEDI_8255
select COMEDI_NI_TIO
- ---help---
+ help
Enable support for National Instruments AT-MIO E series cards
National Instruments AT-MIO-16E-1 (ni_atmio),
AT-MIO-16E-2, AT-MIO-16E-10, AT-MIO-16DE-10, AT-MIO-64E-3,
@@ -488,7 +489,7 @@ config COMEDI_NI_ATMIO
config COMEDI_NI_ATMIO16D
tristate "NI AT-MIO-16/AT-MIO-16D series ISA card support"
select COMEDI_8255
- ---help---
+ help
Enable support for National Instruments AT-MIO-16/AT-MIO-16D cards.
To compile this driver as a module, choose M here: the module will be
@@ -497,7 +498,7 @@ config COMEDI_NI_ATMIO16D
config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support"
select COMEDI_NI_LABPC
- ---help---
+ help
Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
Kernel-level ISA plug-and-play support for the lab-pc-1200 boards has
@@ -508,7 +509,7 @@ config COMEDI_NI_LABPC_ISA
config COMEDI_PCMAD
tristate "Winsystems PCM-A/D12 and PCM-A/D16 PC/104 board support"
- ---help---
+ help
Enable support for Winsystems PCM-A/D12 and PCM-A/D16 PC/104 boards.
To compile this driver as a module, choose M here: the module will be
@@ -516,7 +517,7 @@ config COMEDI_PCMAD
config COMEDI_PCMDA12
tristate "Winsystems PCM-D/A-12 8-channel AO PC/104 board support"
- ---help---
+ help
Enable support for Winsystems PCM-D/A-12 8-channel AO PC/104 boards.
Note that the board is not ISA-PNP capable and thus needs the I/O
port comedi_config parameter.
@@ -526,7 +527,7 @@ config COMEDI_PCMDA12
config COMEDI_PCMMIO
tristate "Winsystems PCM-MIO PC/104 board support"
- ---help---
+ help
Enable support for Winsystems PCM-MIO multifunction PC/104 boards.
To compile this driver as a module, choose M here: the module will be
@@ -534,7 +535,7 @@ config COMEDI_PCMMIO
config COMEDI_PCMUIO
tristate "Winsystems PCM-UIO48A and PCM-UIO96A PC/104 board support"
- ---help---
+ help
Enable support for PCM-UIO48A and PCM-UIO96A PC/104 boards.
To compile this driver as a module, choose M here: the module will be
@@ -542,7 +543,7 @@ config COMEDI_PCMUIO
config COMEDI_MULTIQ3
tristate "Quanser Consulting MultiQ-3 ISA card support"
- ---help---
+ help
Enable support for Quanser Consulting MultiQ-3 ISA cards
To compile this driver as a module, choose M here: the module will be
@@ -550,7 +551,7 @@ config COMEDI_MULTIQ3
config COMEDI_S526
tristate "Sensoray s526 support"
- ---help---
+ help
Enable support for Sensoray s526
To compile this driver as a module, choose M here: the module will be
@@ -561,7 +562,7 @@ endif # COMEDI_ISA_DRIVERS
menuconfig COMEDI_PCI_DRIVERS
tristate "Comedi PCI drivers"
depends on PCI
- ---help---
+ help
Enable support for comedi PCI drivers.
To compile this support as a module, choose M here: the module will
@@ -572,7 +573,7 @@ if COMEDI_PCI_DRIVERS
config COMEDI_8255_PCI
tristate "Generic PCI based 8255 digital i/o board support"
select COMEDI_8255
- ---help---
+ help
Enable support for PCI based 8255 digital i/o boards. This driver
provides a PCI wrapper around the generic 8255 driver.
@@ -588,14 +589,14 @@ config COMEDI_8255_PCI
config COMEDI_ADDI_WATCHDOG
tristate
- ---help---
+ help
Provides support for the watchdog subdevice found on many ADDI-DATA
boards. This module will be automatically selected when needed. The
module will be called addi_watchdog.
config COMEDI_ADDI_APCI_1032
tristate "ADDI-DATA APCI_1032 support"
- ---help---
+ help
Enable support for ADDI-DATA APCI_1032 cards
To compile this driver as a module, choose M here: the module will be
@@ -603,7 +604,7 @@ config COMEDI_ADDI_APCI_1032
config COMEDI_ADDI_APCI_1500
tristate "ADDI-DATA APCI_1500 support"
- ---help---
+ help
Enable support for ADDI-DATA APCI_1500 cards
To compile this driver as a module, choose M here: the module will be
@@ -612,7 +613,7 @@ config COMEDI_ADDI_APCI_1500
config COMEDI_ADDI_APCI_1516
tristate "ADDI-DATA APCI-1016/1516/2016 support"
select COMEDI_ADDI_WATCHDOG
- ---help---
+ help
Enable support for ADDI-DATA APCI-1016, APCI-1516 and APCI-2016 boards.
These are 16 channel, optically isolated, digital I/O boards. The 1516
and 2016 boards also have a watchdog for resetting the outputs to "0".
@@ -623,7 +624,7 @@ config COMEDI_ADDI_APCI_1516
config COMEDI_ADDI_APCI_1564
tristate "ADDI-DATA APCI_1564 support"
select COMEDI_ADDI_WATCHDOG
- ---help---
+ help
Enable support for ADDI-DATA APCI_1564 cards
To compile this driver as a module, choose M here: the module will be
@@ -631,7 +632,7 @@ config COMEDI_ADDI_APCI_1564
config COMEDI_ADDI_APCI_16XX
tristate "ADDI-DATA APCI_16xx support"
- ---help---
+ help
Enable support for ADDI-DATA APCI_16xx cards
To compile this driver as a module, choose M here: the module will be
@@ -640,7 +641,7 @@ config COMEDI_ADDI_APCI_16XX
config COMEDI_ADDI_APCI_2032
tristate "ADDI-DATA APCI_2032 support"
select COMEDI_ADDI_WATCHDOG
- ---help---
+ help
Enable support for ADDI-DATA APCI_2032 cards
To compile this driver as a module, choose M here: the module will be
@@ -649,7 +650,7 @@ config COMEDI_ADDI_APCI_2032
config COMEDI_ADDI_APCI_2200
tristate "ADDI-DATA APCI_2200 support"
select COMEDI_ADDI_WATCHDOG
- ---help---
+ help
Enable support for ADDI-DATA APCI_2200 cards
To compile this driver as a module, choose M here: the module will be
@@ -658,7 +659,7 @@ config COMEDI_ADDI_APCI_2200
config COMEDI_ADDI_APCI_3120
tristate "ADDI-DATA APCI_3120/3001 support"
depends on HAS_DMA
- ---help---
+ help
Enable support for ADDI-DATA APCI_3120/3001 cards
To compile this driver as a module, choose M here: the module will be
@@ -666,7 +667,7 @@ config COMEDI_ADDI_APCI_3120
config COMEDI_ADDI_APCI_3501
tristate "ADDI-DATA APCI_3501 support"
- ---help---
+ help
Enable support for ADDI-DATA APCI_3501 cards
To compile this driver as a module, choose M here: the module will be
@@ -674,7 +675,7 @@ config COMEDI_ADDI_APCI_3501
config COMEDI_ADDI_APCI_3XXX
tristate "ADDI-DATA APCI_3xxx support"
- ---help---
+ help
Enable support for ADDI-DATA APCI_3xxx cards
To compile this driver as a module, choose M here: the module will be
@@ -682,7 +683,7 @@ config COMEDI_ADDI_APCI_3XXX
config COMEDI_ADL_PCI6208
tristate "ADLink PCI-6208A support"
- ---help---
+ help
Enable support for ADLink PCI-6208A cards
To compile this driver as a module, choose M here: the module will be
@@ -690,7 +691,7 @@ config COMEDI_ADL_PCI6208
config COMEDI_ADL_PCI7X3X
tristate "ADLink PCI-723X/743X isolated digital i/o board support"
- ---help---
+ help
Enable support for ADlink PCI-723X/743X isolated digital i/o boards.
Supported boards include the 32-channel PCI-7230 (16 in/16 out),
PCI-7233 (32 in), and PCI-7234 (32 out) as well as the 64-channel
@@ -701,7 +702,7 @@ config COMEDI_ADL_PCI7X3X
config COMEDI_ADL_PCI8164
tristate "ADLink PCI-8164 4 Axes Motion Control board support"
- ---help---
+ help
Enable support for ADlink PCI-8164 4 Axes Motion Control board
To compile this driver as a module, choose M here: the module will be
@@ -710,7 +711,7 @@ config COMEDI_ADL_PCI8164
config COMEDI_ADL_PCI9111
tristate "ADLink PCI-9111HR support"
select COMEDI_8254
- ---help---
+ help
Enable support for ADlink PCI9111 cards
To compile this driver as a module, choose M here: the module will be
@@ -720,7 +721,7 @@ config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
depends on HAS_DMA
select COMEDI_8254
- ---help---
+ help
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
To compile this driver as a module, choose M here: the module will be
@@ -729,7 +730,7 @@ config COMEDI_ADL_PCI9118
config COMEDI_ADV_PCI1710
tristate "Advantech PCI-171x and PCI-1731 support"
select COMEDI_8254
- ---help---
+ help
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
PCI-1713 and PCI-1731
@@ -738,7 +739,7 @@ config COMEDI_ADV_PCI1710
config COMEDI_ADV_PCI1720
tristate "Advantech PCI-1720 support"
- ---help---
+ help
Enable support for Advantech PCI-1720 Analog Output board.
To compile this driver as a module, choose M here: the module will be
@@ -746,7 +747,7 @@ config COMEDI_ADV_PCI1720
config COMEDI_ADV_PCI1723
tristate "Advantech PCI-1723 support"
- ---help---
+ help
Enable support for Advantech PCI-1723 cards
To compile this driver as a module, choose M here: the module will be
@@ -754,7 +755,7 @@ config COMEDI_ADV_PCI1723
config COMEDI_ADV_PCI1724
tristate "Advantech PCI-1724U support"
- ---help---
+ help
Enable support for Advantech PCI-1724U cards. These are 32-channel
analog output cards with voltage and current loop output ranges and
14-bit resolution.
@@ -764,7 +765,7 @@ config COMEDI_ADV_PCI1724
config COMEDI_ADV_PCI1760
tristate "Advantech PCI-1760 support"
- ---help---
+ help
Enable support for Advantech PCI-1760 board.
To compile this driver as a module, choose M here: the module will be
@@ -774,7 +775,7 @@ config COMEDI_ADV_PCI_DIO
tristate "Advantech PCI DIO card support"
select COMEDI_8254
select COMEDI_8255
- ---help---
+ help
Enable support for Advantech PCI DIO cards
PCI-1730, PCI-1733, PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U,
PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756,
@@ -786,7 +787,7 @@ config COMEDI_ADV_PCI_DIO
config COMEDI_AMPLC_DIO200_PCI
tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support"
select COMEDI_AMPLC_DIO200
- ---help---
+ help
Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236
and PCIe296 DIO boards.
@@ -796,7 +797,7 @@ config COMEDI_AMPLC_DIO200_PCI
config COMEDI_AMPLC_PC236_PCI
tristate "Amplicon PCI236 DIO board support"
select COMEDI_AMPLC_PC236
- ---help---
+ help
Enable support for Amplicon PCI236 DIO board.
To compile this driver as a module, choose M here: the module will be
@@ -804,7 +805,7 @@ config COMEDI_AMPLC_PC236_PCI
config COMEDI_AMPLC_PC263_PCI
tristate "Amplicon PCI263 relay board support"
- ---help---
+ help
Enable support for Amplicon PCI263 relay board. This is a PCI board
with 16 reed relay output channels.
@@ -814,7 +815,7 @@ config COMEDI_AMPLC_PC263_PCI
config COMEDI_AMPLC_PCI224
tristate "Amplicon PCI224 and PCI234 support"
select COMEDI_8254
- ---help---
+ help
Enable support for Amplicon PCI224 and PCI234 AO boards
To compile this driver as a module, choose M here: the module will be
@@ -824,7 +825,7 @@ config COMEDI_AMPLC_PCI230
tristate "Amplicon PCI230 and PCI260 support"
select COMEDI_8254
select COMEDI_8255
- ---help---
+ help
Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
boards
@@ -833,7 +834,7 @@ config COMEDI_AMPLC_PCI230
config COMEDI_CONTEC_PCI_DIO
tristate "Contec PIO1616L digital I/O board support"
- ---help---
+ help
Enable support for the Contec PIO1616L digital I/O board
To compile this driver as a module, choose M here: the module will be
@@ -842,7 +843,7 @@ config COMEDI_CONTEC_PCI_DIO
config COMEDI_DAS08_PCI
tristate "DAS-08 PCI support"
select COMEDI_DAS08
- ---help---
+ help
Enable support for PCI DAS-08 cards.
To compile this driver as a module, choose M here: the module will be
@@ -850,7 +851,7 @@ config COMEDI_DAS08_PCI
config COMEDI_DT3000
tristate "Data Translation DT3000 series support"
- ---help---
+ help
Enable support for Data Translation DT3000 series
DT3001, DT3001-PGL, DT3002, DT3003, DT3003-PGL, DT3004, DT3005 and
DT3004-200
@@ -860,7 +861,7 @@ config COMEDI_DT3000
config COMEDI_DYNA_PCI10XX
tristate "Dynalog PCI DAQ series support"
- ---help---
+ help
Enable support for Dynalog PCI DAQ series
PCI-1050
@@ -869,7 +870,7 @@ config COMEDI_DYNA_PCI10XX
config COMEDI_GSC_HPDI
tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support"
- ---help---
+ help
Enable support for General Standards Corporation high speed parallel
digital interface rs485 boards PCI-HPDI32 and PMC-HPDI32.
Only receive mode works, transmit not supported.
@@ -879,13 +880,13 @@ config COMEDI_GSC_HPDI
config COMEDI_MF6X4
tristate "Humusoft MF634 and MF624 DAQ Card support"
- ---help---
+ help
This driver supports both Humusoft MF634 and MF624 Data acquisition
cards. The legacy Humusoft MF614 card is not supported.
config COMEDI_ICP_MULTI
tristate "Inova ICP_MULTI support"
- ---help---
+ help
Enable support for Inova ICP_MULTI card
To compile this driver as a module, choose M here: the module will be
@@ -894,7 +895,7 @@ config COMEDI_ICP_MULTI
config COMEDI_DAQBOARD2000
tristate "IOtech DAQboard/2000 support"
select COMEDI_8255
- ---help---
+ help
Enable support for the IOtech DAQboard/2000
To compile this driver as a module, choose M here: the module will be
@@ -902,7 +903,7 @@ config COMEDI_DAQBOARD2000
config COMEDI_JR3_PCI
tristate "JR3/PCI force sensor board support"
- ---help---
+ help
Enable support for JR3/PCI force sensor boards
To compile this driver as a module, choose M here: the module will be
@@ -910,7 +911,7 @@ config COMEDI_JR3_PCI
config COMEDI_KE_COUNTER
tristate "Kolter-Electronic PCI Counter 1 card support"
- ---help---
+ help
Enable support for Kolter-Electronic PCI Counter 1 cards
To compile this driver as a module, choose M here: the module will be
@@ -919,7 +920,7 @@ config COMEDI_KE_COUNTER
config COMEDI_CB_PCIDAS64
tristate "MeasurementComputing PCI-DAS 64xx, 60xx, and 4020 support"
select COMEDI_8255
- ---help---
+ help
Enable support for ComputerBoards/MeasurementComputing PCI-DAS 64xx,
60xx, and 4020 series with the PLX 9080 PCI controller
@@ -930,7 +931,7 @@ config COMEDI_CB_PCIDAS
tristate "MeasurementComputing PCI-DAS support"
select COMEDI_8254
select COMEDI_8255
- ---help---
+ help
Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
AMCC S5933 PCIcontroller: PCI-DAS1602/16, PCI-DAS1602/16jr,
PCI-DAS1602/12, PCI-DAS1200, PCI-DAS1200jr, PCI-DAS1000, PCI-DAS1001
@@ -942,7 +943,7 @@ config COMEDI_CB_PCIDAS
config COMEDI_CB_PCIDDA
tristate "MeasurementComputing PCI-DDA series support"
select COMEDI_8255
- ---help---
+ help
Enable support for ComputerBoards/MeasurementComputing PCI-DDA
series: PCI-DDA08/12, PCI-DDA04/12, PCI-DDA02/12, PCI-DDA08/16,
PCI-DDA04/16 and PCI-DDA02/16
@@ -954,7 +955,7 @@ config COMEDI_CB_PCIMDAS
tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
select COMEDI_8254
select COMEDI_8255
- ---help---
+ help
Enable support for ComputerBoards/MeasurementComputing PCI Migration
series PCIM-DAS1602/16 and PCIe-DAS1602/16.
@@ -964,7 +965,7 @@ config COMEDI_CB_PCIMDAS
config COMEDI_CB_PCIMDDA
tristate "MeasurementComputing PCIM-DDA06-16 support"
select COMEDI_8255
- ---help---
+ help
Enable support for ComputerBoards/MeasurementComputing PCIM-DDA06-16
To compile this driver as a module, choose M here: the module will be
@@ -973,7 +974,7 @@ config COMEDI_CB_PCIMDDA
config COMEDI_ME4000
tristate "Meilhaus ME-4000 support"
select COMEDI_8254
- ---help---
+ help
Enable support for Meilhaus PCI data acquisition cards
ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
@@ -982,7 +983,7 @@ config COMEDI_ME4000
config COMEDI_ME_DAQ
tristate "Meilhaus ME-2000i, ME-2600i, ME-3000vm1 support"
- ---help---
+ help
Enable support for Meilhaus PCI data acquisition cards
ME-2000i, ME-2600i and ME-3000vm1
@@ -991,7 +992,7 @@ config COMEDI_ME_DAQ
config COMEDI_NI_6527
tristate "NI 6527 support"
- ---help---
+ help
Enable support for the National Instruments 6527 PCI card
To compile this driver as a module, choose M here: the module will be
@@ -999,7 +1000,7 @@ config COMEDI_NI_6527
config COMEDI_NI_65XX
tristate "NI 65xx static dio PCI card support"
- ---help---
+ help
Enable support for National Instruments 65xx static dio boards.
Supported devices: National Instruments PCI-6509 (ni_65xx),
PXI-6509, PCI-6510, PCI-6511, PXI-6511, PCI-6512, PXI-6512, PCI-6513,
@@ -1013,7 +1014,7 @@ config COMEDI_NI_660X
tristate "NI 660x counter/timer PCI card support"
depends on HAS_DMA
select COMEDI_NI_TIOCMD
- ---help---
+ help
Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602,
PXI-6602, PXI-6608, PCI-6624, and PXI-6624.
@@ -1022,7 +1023,7 @@ config COMEDI_NI_660X
config COMEDI_NI_670X
tristate "NI 670x PCI card support"
- ---help---
+ help
Enable support for National Instruments PCI-6703 and PCI-6704
To compile this driver as a module, choose M here: the module will be
@@ -1031,7 +1032,7 @@ config COMEDI_NI_670X
config COMEDI_NI_LABPC_PCI
tristate "NI Lab-PC PCI-1200 support"
select COMEDI_NI_LABPC
- ---help---
+ help
Enable support for National Instruments Lab-PC PCI-1200.
To compile this driver as a module, choose M here: the module will be
@@ -1042,7 +1043,7 @@ config COMEDI_NI_PCIDIO
depends on HAS_DMA
select COMEDI_MITE
select COMEDI_8255
- ---help---
+ help
Enable support for National Instruments PCI-DIO-32HS, PXI-6533,
PCI-6533 and PCI-6534
@@ -1054,7 +1055,7 @@ config COMEDI_NI_PCIMIO
depends on HAS_DMA
select COMEDI_NI_TIOCMD
select COMEDI_8255
- ---help---
+ help
Enable support for National Instruments PCI-MIO-E series and M series
(all boards): PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1,
PCI-MIO-16E-4, PCI-6014, PCI-6040E, PXI-6040E, PCI-6030E, PCI-6031E,
@@ -1074,7 +1075,7 @@ config COMEDI_NI_PCIMIO
config COMEDI_RTD520
tristate "Real Time Devices PCI4520/DM7520 support"
select COMEDI_8254
- ---help---
+ help
Enable support for Real Time Devices PCI4520/DM7520
To compile this driver as a module, choose M here: the module will be
@@ -1082,7 +1083,7 @@ config COMEDI_RTD520
config COMEDI_S626
tristate "Sensoray 626 support"
- ---help---
+ help
Enable support for Sensoray 626
To compile this driver as a module, choose M here: the module will be
@@ -1103,7 +1104,7 @@ endif # COMEDI_PCI_DRIVERS
menuconfig COMEDI_PCMCIA_DRIVERS
tristate "Comedi PCMCIA drivers"
depends on PCMCIA
- ---help---
+ help
Enable support for comedi PCMCIA drivers.
To compile this support as a module, choose M here: the module will
@@ -1114,7 +1115,7 @@ if COMEDI_PCMCIA_DRIVERS
config COMEDI_CB_DAS16_CS
tristate "CB DAS16 series PCMCIA support"
select COMEDI_8254
- ---help---
+ help
Enable support for the ComputerBoards/MeasurementComputing PCMCIA
cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
@@ -1124,7 +1125,7 @@ config COMEDI_CB_DAS16_CS
config COMEDI_DAS08_CS
tristate "CB DAS08 PCMCIA support"
select COMEDI_DAS08
- ---help---
+ help
Enable support for the ComputerBoards/MeasurementComputing DAS-08
PCMCIA card
@@ -1133,7 +1134,7 @@ config COMEDI_DAS08_CS
config COMEDI_NI_DAQ_700_CS
tristate "NI DAQCard-700 PCMCIA support"
- ---help---
+ help
Enable support for the National Instruments PCMCIA DAQCard-700 DIO
To compile this driver as a module, choose M here: the module will be
@@ -1142,7 +1143,7 @@ config COMEDI_NI_DAQ_700_CS
config COMEDI_NI_DAQ_DIO24_CS
tristate "NI DAQ-Card DIO-24 PCMCIA support"
select COMEDI_8255
- ---help---
+ help
Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
To compile this driver as a module, choose M here: the module will be
@@ -1151,7 +1152,7 @@ config COMEDI_NI_DAQ_DIO24_CS
config COMEDI_NI_LABPC_CS
tristate "NI DAQCard-1200 PCMCIA support"
select COMEDI_NI_LABPC
- ---help---
+ help
Enable support for the National Instruments PCMCIA DAQCard-1200
To compile this driver as a module, choose M here: the module will be
@@ -1161,7 +1162,7 @@ config COMEDI_NI_MIO_CS
tristate "NI DAQCard E series PCMCIA support"
select COMEDI_NI_TIO
select COMEDI_8255
- ---help---
+ help
Enable support for the National Instruments PCMCIA DAQCard E series
DAQCard-ai-16xe-50, DAQCard-ai-16e-4, DAQCard-6062E, DAQCard-6024E
and DAQCard-6036E
@@ -1171,7 +1172,7 @@ config COMEDI_NI_MIO_CS
config COMEDI_QUATECH_DAQP_CS
tristate "Quatech DAQP PCMCIA data capture card support"
- ---help---
+ help
Enable support for the Quatech DAQP PCMCIA data capture cards
DAQP-208 and DAQP-308
@@ -1183,7 +1184,7 @@ endif # COMEDI_PCMCIA_DRIVERS
menuconfig COMEDI_USB_DRIVERS
tristate "Comedi USB drivers"
depends on USB
- ---help---
+ help
Enable support for comedi USB drivers.
To compile this support as a module, choose M here: the module will
@@ -1193,7 +1194,7 @@ if COMEDI_USB_DRIVERS
config COMEDI_DT9812
tristate "DataTranslation DT9812 USB module support"
- ---help---
+ help
Enable support for the Data Translation DT9812 USB module
To compile this driver as a module, choose M here: the module will be
@@ -1201,7 +1202,7 @@ config COMEDI_DT9812
config COMEDI_NI_USB6501
tristate "NI USB-6501 support"
- ---help---
+ help
Enable support for the National Instruments USB-6501 module.
The NI USB-6501 is a Full-Speed USB 2.0 (12 Mbit/s) device that
@@ -1212,7 +1213,7 @@ config COMEDI_NI_USB6501
config COMEDI_USBDUX
tristate "ITL USB-DUX-D support"
- ---help---
+ help
Enable support for the Incite Technology Ltd USB-DUX-D Board
To compile this driver as a module, choose M here: the module will be
@@ -1220,7 +1221,7 @@ config COMEDI_USBDUX
config COMEDI_USBDUXFAST
tristate "ITL USB-DUXfast support"
- ---help---
+ help
Enable support for the Incite Technology Ltd USB-DUXfast Board
To compile this driver as a module, choose M here: the module will be
@@ -1228,7 +1229,7 @@ config COMEDI_USBDUXFAST
config COMEDI_USBDUXSIGMA
tristate "ITL USB-DUXsigma support"
- ---help---
+ help
Enable support for the Incite Technology Ltd USB-DUXsigma Board
To compile this driver as a module, choose M here: the module will be
@@ -1236,7 +1237,7 @@ config COMEDI_USBDUXSIGMA
config COMEDI_VMK80XX
tristate "Velleman VM110/VM140 USB Board support"
- ---help---
+ help
Build the Velleman USB Board Low-Level Driver supporting the
K8055/K8061 aka VM110/VM140 devices
@@ -1254,7 +1255,7 @@ config COMEDI_8255
config COMEDI_8255_SA
tristate "Standalone 8255 support"
select COMEDI_8255
- ---help---
+ help
Enable support for 8255 digital I/O as a standalone driver.
You should enable compilation this driver if you plan to use a board
@@ -1271,7 +1272,7 @@ config COMEDI_8255_SA
config COMEDI_KCOMEDILIB
tristate "Comedi kcomedilib"
- ---help---
+ help
Build the kcomedilib.
This is a kernel module used to open and manipulate Comedi devices
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index f693c2c0bec3..d2c8cc72a99d 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -211,6 +211,8 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
{
struct comedi_async *async = s->async;
+ lockdep_assert_held(&dev->mutex);
+
/* Round up new_size to multiple of PAGE_SIZE */
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 0caae4a5c471..f6d1287c7b83 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -164,6 +164,7 @@ static bool comedi_clear_board_dev(struct comedi_device *dev)
unsigned int i = dev->minor;
bool cleared = false;
+ lockdep_assert_held(&dev->mutex);
mutex_lock(&comedi_board_minor_table_lock);
if (dev == comedi_board_minor_table[i]) {
comedi_board_minor_table[i] = NULL;
@@ -260,6 +261,7 @@ comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor)
{
struct comedi_subdevice *s;
+ lockdep_assert_held(&dev->mutex);
if (minor >= COMEDI_NUM_BOARD_MINORS) {
s = comedi_subdevice_from_minor(dev, minor);
if (!s || (s->subdev_flags & SDF_CMD_READ))
@@ -273,6 +275,7 @@ comedi_write_subdevice(const struct comedi_device *dev, unsigned int minor)
{
struct comedi_subdevice *s;
+ lockdep_assert_held(&dev->mutex);
if (minor >= COMEDI_NUM_BOARD_MINORS) {
s = comedi_subdevice_from_minor(dev, minor);
if (!s || (s->subdev_flags & SDF_CMD_WRITE))
@@ -336,6 +339,8 @@ static int resize_async_buffer(struct comedi_device *dev,
struct comedi_async *async = s->async;
int retval;
+ lockdep_assert_held(&dev->mutex);
+
if (new_size > async->max_bufsize)
return -EPERM;
@@ -726,6 +731,7 @@ static void do_become_nonbusy(struct comedi_device *dev,
{
struct comedi_async *async = s->async;
+ lockdep_assert_held(&dev->mutex);
comedi_update_subdevice_runflags(s, COMEDI_SRF_RUNNING, 0);
if (async) {
comedi_buf_reset(s);
@@ -745,6 +751,7 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
int ret = 0;
+ lockdep_assert_held(&dev->mutex);
if (comedi_is_subdevice_running(s) && s->cancel)
ret = s->cancel(dev, s);
@@ -758,6 +765,7 @@ void comedi_device_cancel_all(struct comedi_device *dev)
struct comedi_subdevice *s;
int i;
+ lockdep_assert_held(&dev->mutex);
if (!dev->attached)
return;
@@ -773,6 +781,7 @@ static int is_device_busy(struct comedi_device *dev)
struct comedi_subdevice *s;
int i;
+ lockdep_assert_held(&dev->mutex);
if (!dev->attached)
return 0;
@@ -805,6 +814,7 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
{
struct comedi_devconfig it;
+ lockdep_assert_held(&dev->mutex);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -860,6 +870,7 @@ static int do_bufconfig_ioctl(struct comedi_device *dev,
struct comedi_subdevice *s;
int retval = 0;
+ lockdep_assert_held(&dev->mutex);
if (copy_from_user(&bc, arg, sizeof(bc)))
return -EFAULT;
@@ -920,6 +931,7 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
struct comedi_subdevice *s;
struct comedi_devinfo devinfo;
+ lockdep_assert_held(&dev->mutex);
memset(&devinfo, 0, sizeof(devinfo));
/* fill devinfo structure */
@@ -966,6 +978,7 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
struct comedi_subdinfo *tmp, *us;
struct comedi_subdevice *s;
+ lockdep_assert_held(&dev->mutex);
tmp = kcalloc(dev->n_subdevices, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
@@ -1039,6 +1052,7 @@ static int do_chaninfo_ioctl(struct comedi_device *dev,
struct comedi_subdevice *s;
struct comedi_chaninfo it;
+ lockdep_assert_held(&dev->mutex);
if (copy_from_user(&it, arg, sizeof(it)))
return -EFAULT;
@@ -1098,6 +1112,7 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
int retval = 0;
bool become_nonbusy = false;
+ lockdep_assert_held(&dev->mutex);
if (copy_from_user(&bi, arg, sizeof(bi)))
return -EFAULT;
@@ -1282,6 +1297,7 @@ static int check_insn_device_config_length(struct comedi_insn *insn,
*/
static int get_valid_routes(struct comedi_device *dev, unsigned int *data)
{
+ lockdep_assert_held(&dev->mutex);
data[1] = dev->get_valid_routes(dev, data[1], data + 2);
return 0;
}
@@ -1293,6 +1309,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
int ret = 0;
int i;
+ lockdep_assert_held(&dev->mutex);
if (insn->insn & INSN_MASK_SPECIAL) {
/* a non-subdevice instruction */
@@ -1513,6 +1530,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
int i = 0;
int ret = 0;
+ lockdep_assert_held(&dev->mutex);
if (copy_from_user(&insnlist, arg, sizeof(insnlist)))
return -EFAULT;
@@ -1605,6 +1623,7 @@ static int do_insn_ioctl(struct comedi_device *dev,
unsigned int n_data = MIN_SAMPLES;
int ret = 0;
+ lockdep_assert_held(&dev->mutex);
if (copy_from_user(&insn, arg, sizeof(insn)))
return -EFAULT;
@@ -1655,6 +1674,7 @@ static int __comedi_get_user_cmd(struct comedi_device *dev,
{
struct comedi_subdevice *s;
+ lockdep_assert_held(&dev->mutex);
if (copy_from_user(cmd, arg, sizeof(*cmd))) {
dev_dbg(dev->class_dev, "bad cmd address\n");
return -EFAULT;
@@ -1713,6 +1733,7 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev,
unsigned int *chanlist;
int ret;
+ lockdep_assert_held(&dev->mutex);
cmd->chanlist = NULL;
chanlist = memdup_user(user_chanlist,
cmd->chanlist_len * sizeof(unsigned int));
@@ -1754,6 +1775,8 @@ static int do_cmd_ioctl(struct comedi_device *dev,
unsigned int __user *user_chanlist;
int ret;
+ lockdep_assert_held(&dev->mutex);
+
/* get the user's cmd and do some simple validation */
ret = __comedi_get_user_cmd(dev, arg, &cmd);
if (ret)
@@ -1861,6 +1884,8 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
unsigned int __user *user_chanlist;
int ret;
+ lockdep_assert_held(&dev->mutex);
+
/* get the user's cmd and do some simple validation */
ret = __comedi_get_user_cmd(dev, arg, &cmd);
if (ret)
@@ -1914,6 +1939,7 @@ static int do_lock_ioctl(struct comedi_device *dev, unsigned long arg,
unsigned long flags;
struct comedi_subdevice *s;
+ lockdep_assert_held(&dev->mutex);
if (arg >= dev->n_subdevices)
return -EINVAL;
s = &dev->subdevices[arg];
@@ -1946,6 +1972,7 @@ static int do_unlock_ioctl(struct comedi_device *dev, unsigned long arg,
{
struct comedi_subdevice *s;
+ lockdep_assert_held(&dev->mutex);
if (arg >= dev->n_subdevices)
return -EINVAL;
s = &dev->subdevices[arg];
@@ -1980,6 +2007,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg,
{
struct comedi_subdevice *s;
+ lockdep_assert_held(&dev->mutex);
if (arg >= dev->n_subdevices)
return -EINVAL;
s = &dev->subdevices[arg];
@@ -2013,6 +2041,7 @@ static int do_poll_ioctl(struct comedi_device *dev, unsigned long arg,
{
struct comedi_subdevice *s;
+ lockdep_assert_held(&dev->mutex);
if (arg >= dev->n_subdevices)
return -EINVAL;
s = &dev->subdevices[arg];
@@ -2048,6 +2077,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
struct comedi_file *cfp = file->private_data;
struct comedi_subdevice *s_old, *s_new;
+ lockdep_assert_held(&dev->mutex);
if (arg >= dev->n_subdevices)
return -EINVAL;
@@ -2090,6 +2120,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
struct comedi_file *cfp = file->private_data;
struct comedi_subdevice *s_old, *s_new;
+ lockdep_assert_held(&dev->mutex);
if (arg >= dev->n_subdevices)
return -EINVAL;
@@ -2995,6 +3026,7 @@ static int __init comedi_init(void)
goto out_cleanup_board_minors;
}
/* comedi_alloc_board_minor() locked the mutex */
+ lockdep_assert_held(&dev->mutex);
mutex_unlock(&dev->mutex);
}
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 5a32b8fc000e..750a6ff3c03c 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -159,6 +159,8 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
int i;
struct comedi_subdevice *s;
+ lockdep_assert_held(&dev->attach_lock);
+ lockdep_assert_held(&dev->mutex);
if (dev->subdevices) {
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
@@ -196,6 +198,7 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
void comedi_device_detach(struct comedi_device *dev)
{
+ lockdep_assert_held(&dev->mutex);
comedi_device_cancel_all(dev);
down_write(&dev->attach_lock);
dev->attached = false;
@@ -643,6 +646,7 @@ static int __comedi_device_postconfig_async(struct comedi_device *dev,
unsigned int buf_size;
int ret;
+ lockdep_assert_held(&dev->mutex);
if ((s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) == 0) {
dev_warn(dev->class_dev,
"async subdevices must support SDF_CMD_READ or SDF_CMD_WRITE\n");
@@ -690,6 +694,7 @@ static int __comedi_device_postconfig(struct comedi_device *dev)
int ret;
int i;
+ lockdep_assert_held(&dev->mutex);
if (!dev->insn_device_config)
dev->insn_device_config = insn_device_inval;
@@ -747,6 +752,7 @@ static int comedi_device_postconfig(struct comedi_device *dev)
{
int ret;
+ lockdep_assert_held(&dev->mutex);
ret = __comedi_device_postconfig(dev);
if (ret < 0)
return ret;
@@ -946,6 +952,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
struct comedi_driver *driv;
int ret;
+ lockdep_assert_held(&dev->mutex);
if (dev->attached)
return -EBUSY;
@@ -1053,18 +1060,19 @@ int comedi_auto_config(struct device *hardware_device,
return PTR_ERR(dev);
}
/* Note: comedi_alloc_board_minor() locked dev->mutex. */
+ lockdep_assert_held(&dev->mutex);
dev->driver = driver;
dev->board_name = dev->driver->driver_name;
ret = driver->auto_attach(dev, context);
if (ret >= 0)
ret = comedi_device_postconfig(dev);
- mutex_unlock(&dev->mutex);
if (ret < 0) {
dev_warn(hardware_device,
"driver '%s' failed to auto-configure device.\n",
driver->driver_name);
+ mutex_unlock(&dev->mutex);
comedi_release_hardware_device(hardware_device);
} else {
/*
@@ -1074,6 +1082,7 @@ int comedi_auto_config(struct device *hardware_device,
dev_info(dev->class_dev,
"driver '%s' has successfully auto-configured '%s'.\n",
driver->driver_name, dev->board_name);
+ mutex_unlock(&dev->mutex);
}
return ret;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 6a93b04f1fdf..dbff0f7e7cf5 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -317,7 +317,7 @@ static int pci1710_ai_read_sample(struct comedi_device *dev,
chan = sample >> 12;
if (chan != devpriv->act_chanlist[cur_chan]) {
dev_err(dev->class_dev,
- "A/D data droput: received from channel %d, expected %d\n",
+ "A/D data dropout: received from channel %d, expected %d\n",
chan, devpriv->act_chanlist[cur_chan]);
return -ENODATA;
}
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 08ffe26c5d43..65f60c2b702a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -2464,7 +2464,7 @@ static int pci230_auto_attach(struct comedi_device *dev,
devpriv->adcg = 0;
devpriv->adccon = PCI230_ADC_TRIG_NONE | PCI230_ADC_IM_SE |
PCI230_ADC_IR_BIP;
- outw(1 << 0, devpriv->daqio + PCI230_ADCEN);
+ outw(BIT(0), devpriv->daqio + PCI230_ADCEN);
outw(devpriv->adcg, devpriv->daqio + PCI230_ADCG);
outw(devpriv->adccon | PCI230_ADC_FIFO_RESET,
devpriv->daqio + PCI230_ADCCON);
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c
index b77dc8d5d3ff..c729094298c2 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.c
+++ b/drivers/staging/comedi/drivers/comedi_isadma.c
@@ -172,6 +172,19 @@ struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
goto no_dma;
dma->desc = desc;
dma->n_desc = n_desc;
+ if (dev->hw_dev) {
+ dma->dev = dev->hw_dev;
+ } else {
+ /* Fall back to using the "class" device. */
+ if (!dev->class_dev)
+ goto no_dma;
+ /* Need 24-bit mask for ISA DMA. */
+ if (dma_coerce_mask_and_coherent(dev->class_dev,
+ DMA_BIT_MASK(24))) {
+ goto no_dma;
+ }
+ dma->dev = dev->class_dev;
+ }
dma_chans[0] = dma_chan1;
if (dma_chan2 == 0 || dma_chan2 == dma_chan1)
@@ -192,7 +205,7 @@ struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
desc = &dma->desc[i];
desc->chan = dma_chans[i];
desc->maxsize = maxsize;
- desc->virt_addr = dma_alloc_coherent(NULL, desc->maxsize,
+ desc->virt_addr = dma_alloc_coherent(dma->dev, desc->maxsize,
&desc->hw_addr,
GFP_KERNEL);
if (!desc->virt_addr)
@@ -224,7 +237,7 @@ void comedi_isadma_free(struct comedi_isadma *dma)
for (i = 0; i < dma->n_desc; i++) {
desc = &dma->desc[i];
if (desc->virt_addr)
- dma_free_coherent(NULL, desc->maxsize,
+ dma_free_coherent(dma->dev, desc->maxsize,
desc->virt_addr,
desc->hw_addr);
}
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.h b/drivers/staging/comedi/drivers/comedi_isadma.h
index 2bd1329d727f..9d2b12db7e6e 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.h
+++ b/drivers/staging/comedi/drivers/comedi_isadma.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
struct comedi_device;
+struct device;
/*
* These are used to avoid issues when <asm/dma.h> and the DMA_MODE_
@@ -38,6 +39,7 @@ struct comedi_isadma_desc {
/**
* struct comedi_isadma - ISA DMA data
+ * @dev: device to allocate non-coherent memory for
* @desc: cookie for each DMA buffer
* @n_desc: the number of cookies
* @cur_dma: the current cookie in use
@@ -45,6 +47,7 @@ struct comedi_isadma_desc {
* @chan2: the second DMA channel requested
*/
struct comedi_isadma {
+ struct device *dev;
struct comedi_isadma_desc *desc;
int n_desc;
int cur_dma;
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 327ecf9aea30..65e5f2e6c122 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -236,9 +236,9 @@ static int das08_ai_insn_read(struct comedi_device *dev,
* COMEDI 16-bit bipolar data value for 0V is 0x8000.
*/
if (msb & 0x80)
- data[n] = (1 << 15) + magnitude;
+ data[n] = BIT(15) + magnitude;
else
- data[n] = (1 << 15) - magnitude;
+ data[n] = BIT(15) - magnitude;
} else {
dev_err(dev->class_dev, "bug! unknown ai encoding\n");
return -1;
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 05207a519755..8a1f9efe7d4e 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -52,7 +52,7 @@
#define DT2811_ADCSR_ADBUSY BIT(5) /* r 1=A/D busy */
#define DT2811_ADCSR_CLRERROR BIT(4)
#define DT2811_ADCSR_DMAENB BIT(3) /* r/w 1=dma ena */
-#define DT2811_ADCSR_INTENB BIT(2) /* r/w 1=interupts ena */
+#define DT2811_ADCSR_INTENB BIT(2) /* r/w 1=interrupts ena */
#define DT2811_ADCSR_ADMODE(x) (((x) & 0x3) << 0)
#define DT2811_ADGCR_REG 0x01 /* r/w A/D Gain/Channel */
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 9f165f1cefa5..634f57730c1e 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -835,11 +835,8 @@ static void dt9812_detach(struct comedi_device *dev)
if (!devpriv)
return;
- mutex_lock(&devpriv->mut);
-
+ mutex_destroy(&devpriv->mut);
usb_set_intfdata(intf, NULL);
-
- mutex_unlock(&devpriv->mut);
}
static struct comedi_driver dt9812_driver = {
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index e50536731d11..c224422bb126 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -54,7 +54,7 @@ static int dyna_pci10xx_ai_eoc(struct comedi_device *dev,
unsigned int status;
status = inw_p(dev->iobase);
- if (status & (1 << 15))
+ if (status & BIT(15))
return 0;
return -EBUSY;
}
@@ -106,10 +106,6 @@ static int dyna_pci10xx_insn_write_ao(struct comedi_device *dev,
{
struct dyna_pci10xx_private *devpriv = dev->private;
int n;
- unsigned int chan, range;
-
- chan = CR_CHAN(insn->chanspec);
- range = range_codes_pci1050_ai[CR_RANGE((insn->chanspec))];
mutex_lock(&devpriv->mutex);
for (n = 0; n < insn->n; n++) {
@@ -194,17 +190,15 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
s->n_chan = 16;
s->maxdata = 0x0FFF;
s->range_table = &range_pci1050_ai;
- s->len_chanlist = 16;
s->insn_read = dyna_pci10xx_insn_read_ai;
/* analog output */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
+ s->n_chan = 1;
s->maxdata = 0x0FFF;
s->range_table = &range_unipolar10;
- s->len_chanlist = 16;
s->insn_write = dyna_pci10xx_insn_write_ao;
/* digital input */
@@ -214,7 +208,6 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
- s->len_chanlist = 16;
s->insn_bits = dyna_pci10xx_di_insn_bits;
/* digital output */
@@ -224,7 +217,6 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
- s->len_chanlist = 16;
s->state = 0;
s->insn_bits = dyna_pci10xx_do_insn_bits;
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 61e03ad84123..639ec1586976 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -371,7 +371,6 @@ static unsigned int mite_get_status(struct mite_channel *mite_chan)
writel(CHOR_CLRDONE,
mite->mmio + MITE_CHOR(mite_chan->channel));
}
- mmiowb();
spin_unlock_irqrestore(&mite->lock, flags);
return status;
}
@@ -451,7 +450,6 @@ void mite_dma_arm(struct mite_channel *mite_chan)
mite_chan->done = 0;
/* arm */
writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
- mmiowb();
spin_unlock_irqrestore(&mite->lock, flags);
}
EXPORT_SYMBOL_GPL(mite_dma_arm);
@@ -638,7 +636,6 @@ void mite_release_channel(struct mite_channel *mite_chan)
CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
mite->mmio + MITE_CHCR(mite_chan->channel));
mite_chan->ring = NULL;
- mmiowb();
}
spin_unlock_irqrestore(&mite->lock, flags);
}
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 405573e927cf..4ee9b260eab0 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -320,7 +320,6 @@ static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
ni_660x_write(dev, chip, devpriv->dma_cfg[chip] |
NI660X_DMA_CFG_RESET(mite_channel),
NI660X_DMA_CFG);
- mmiowb();
}
static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
@@ -333,7 +332,6 @@ static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel);
ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG);
- mmiowb();
}
static int ni_660x_request_mite_channel(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index aad0b295ee2b..814895d01ffa 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -244,7 +244,7 @@ static int atao_calib_insn_write(struct comedi_device *dev,
/* write the channel and last data value to the caldac */
/* clock the bitstring to the caldac; MSB -> LSB */
- for (bit = 1 << 10; bit; bit >>= 1) {
+ for (bit = BIT(10); bit; bit >>= 1) {
bits = (bit & bitstring) ? ATAO_CFG2_SDATA : 0;
outw(bits, dev->iobase + ATAO_CFG2_REG);
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index b9e525b9beb9..81fd4c26a16f 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -239,7 +239,7 @@ static int daq700_auto_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
s->n_chan = 16;
- s->maxdata = (1 << 12) - 1;
+ s->maxdata = BIT(12) - 1;
s->range_table = &range_daq700_ai;
s->insn_read = daq700_ai_rinsn;
daq700_ai_config(dev, s);
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index b04dad8c7092..c175227009f1 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -547,7 +547,6 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
reg);
break;
}
- mmiowb();
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
@@ -2110,6 +2109,7 @@ static int ni_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (cmd->scan_begin_src == TRIG_TIMER) {
unsigned int tmp = cmd->scan_begin_arg;
+
cmd->scan_begin_arg =
ni_timer_to_ns(dev, ni_ns_to_timer(dev,
cmd->scan_begin_arg,
@@ -2120,6 +2120,7 @@ static int ni_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (cmd->convert_src == TRIG_TIMER) {
if (!devpriv->is_611x && !devpriv->is_6143) {
unsigned int tmp = cmd->convert_arg;
+
cmd->convert_arg =
ni_timer_to_ns(dev, ni_ns_to_timer(dev,
cmd->convert_arg,
@@ -4398,9 +4399,13 @@ static int ni_calib_insn_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- ni_write_caldac(dev, CR_CHAN(insn->chanspec), data[0]);
+ if (insn->n) {
+ /* only bother writing the last sample to the channel */
+ ni_write_caldac(dev, CR_CHAN(insn->chanspec),
+ data[insn->n - 1]);
+ }
- return 1;
+ return insn->n;
}
static int ni_calib_insn_read(struct comedi_device *dev,
@@ -4409,10 +4414,12 @@ static int ni_calib_insn_read(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
+ unsigned int i;
- data[0] = devpriv->caldacs[CR_CHAN(insn->chanspec)];
+ for (i = 0; i < insn->n; i++)
+ data[0] = devpriv->caldacs[CR_CHAN(insn->chanspec)];
- return 1;
+ return insn->n;
}
static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -4505,9 +4512,15 @@ static int ni_eeprom_insn_read(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- data[0] = ni_read_eeprom(dev, CR_CHAN(insn->chanspec));
+ unsigned int val;
+ unsigned int i;
- return 1;
+ if (insn->n) {
+ val = ni_read_eeprom(dev, CR_CHAN(insn->chanspec));
+ for (i = 0; i < insn->n; i++)
+ data[i] = val;
+ }
+ return insn->n;
}
static int ni_m_series_eeprom_insn_read(struct comedi_device *dev,
@@ -4516,10 +4529,12 @@ static int ni_m_series_eeprom_insn_read(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
+ unsigned int i;
- data[0] = devpriv->eeprom_buffer[CR_CHAN(insn->chanspec)];
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->eeprom_buffer[CR_CHAN(insn->chanspec)];
- return 1;
+ return insn->n;
}
static unsigned int ni_old_get_pfi_routing(struct comedi_device *dev,
@@ -4784,7 +4799,7 @@ static int cs5529_do_conversion(struct comedi_device *dev,
if (data) {
*data = ni_ao_win_inw(dev, NI67XX_CAL_DATA_REG);
/* cs5529 returns 16 bit signed data in bipolar mode */
- *data ^= (1 << 15);
+ *data ^= BIT(15);
}
return 0;
}
@@ -6193,7 +6208,7 @@ static int ni_E_init(struct comedi_device *dev,
s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_INTERNAL;
/* one channel for each analog output channel */
s->n_chan = board->n_aochan;
- s->maxdata = (1 << 16) - 1;
+ s->maxdata = BIT(16) - 1;
s->range_table = &range_unknown; /* XXX */
s->insn_read = cs5529_ai_insn_read;
s->insn_config = NULL;
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 4bdef87d5dd7..8f3864799c19 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -310,7 +310,6 @@ static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
dev->mmio + DMA_LINE_CONTROL_GROUP1);
- mmiowb();
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
@@ -327,7 +326,6 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
writeb(primary_DMAChannel_bits(0) |
secondary_DMAChannel_bits(0),
dev->mmio + DMA_LINE_CONTROL_GROUP1);
- mmiowb();
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
diff --git a/drivers/staging/comedi/drivers/ni_routing/tools/Makefile b/drivers/staging/comedi/drivers/ni_routing/tools/Makefile
index 1966850584d2..6e92a06a44cb 100644
--- a/drivers/staging/comedi/drivers/ni_routing/tools/Makefile
+++ b/drivers/staging/comedi/drivers/ni_routing/tools/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# this make file is simply to help autogenerate these files:
# ni_route_values.h
# ni_device_routes.h
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 048cb35723ad..b264db32a411 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -234,7 +234,6 @@ static void ni_tio_set_bits_transient(struct ni_gpct *counter,
regs[reg] &= ~mask;
regs[reg] |= (value & mask);
ni_tio_write(counter, regs[reg] | transient, reg);
- mmiowb();
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
}
}
@@ -1682,9 +1681,11 @@ int ni_tio_insn_write(struct comedi_device *dev,
unsigned int cidx = counter->counter_index;
unsigned int chip = counter->chip_index;
unsigned int load_reg;
+ unsigned int load_val;
if (insn->n < 1)
return 0;
+ load_val = data[insn->n - 1];
switch (channel) {
case 0:
/*
@@ -1697,7 +1698,7 @@ int ni_tio_insn_write(struct comedi_device *dev,
* load register is already selected.
*/
load_reg = ni_tio_next_load_register(counter);
- ni_tio_write(counter, data[0], load_reg);
+ ni_tio_write(counter, load_val, load_reg);
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, GI_LOAD);
/* restore load reg */
@@ -1705,17 +1706,17 @@ int ni_tio_insn_write(struct comedi_device *dev,
load_reg);
break;
case 1:
- counter_dev->regs[chip][NITIO_LOADA_REG(cidx)] = data[0];
- ni_tio_write(counter, data[0], NITIO_LOADA_REG(cidx));
+ counter_dev->regs[chip][NITIO_LOADA_REG(cidx)] = load_val;
+ ni_tio_write(counter, load_val, NITIO_LOADA_REG(cidx));
break;
case 2:
- counter_dev->regs[chip][NITIO_LOADB_REG(cidx)] = data[0];
- ni_tio_write(counter, data[0], NITIO_LOADB_REG(cidx));
+ counter_dev->regs[chip][NITIO_LOADB_REG(cidx)] = load_val;
+ ni_tio_write(counter, load_val, NITIO_LOADB_REG(cidx));
break;
default:
return -EINVAL;
}
- return 0;
+ return insn->n;
}
EXPORT_SYMBOL_GPL(ni_tio_insn_write);
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 808ed92ed66f..360e86a19fe3 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
size = usb_endpoint_maxp(devpriv->ep_tx);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_tx_buf) {
- kfree(devpriv->usb_rx_buf);
+ if (!devpriv->usb_tx_buf)
return -ENOMEM;
- }
return 0;
}
@@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
if (!devpriv)
return -ENOMEM;
+ mutex_init(&devpriv->mut);
+ usb_set_intfdata(intf, devpriv);
+
ret = ni6501_find_endpoints(dev);
if (ret)
return ret;
@@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- mutex_init(&devpriv->mut);
- usb_set_intfdata(intf, devpriv);
-
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
return ret;
@@ -564,14 +562,12 @@ static void ni6501_detach(struct comedi_device *dev)
if (!devpriv)
return;
- mutex_lock(&devpriv->mut);
+ mutex_destroy(&devpriv->mut);
usb_set_intfdata(intf, NULL);
kfree(devpriv->usb_rx_buf);
kfree(devpriv->usb_tx_buf);
-
- mutex_unlock(&devpriv->mut);
}
static struct comedi_driver ni6501_driver = {
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 257b0daff01f..6daaacf7a26a 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* quatech_daqp_cs.c
* Quatech DAQP PCMCIA data capture cards COMEDI client driver
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index f5af6f4069dc..39049d3c56d7 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -108,7 +108,6 @@ static void s626_mc_enable(struct comedi_device *dev,
{
unsigned int val = (cmd << 16) | cmd;
- mmiowb();
writel(val, dev->mmio + reg);
}
@@ -116,7 +115,6 @@ static void s626_mc_disable(struct comedi_device *dev,
unsigned int cmd, unsigned int reg)
{
writel(cmd << 16, dev->mmio + reg);
- mmiowb();
}
static bool s626_mc_test(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/tests/ni_routes_test.c b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
index c6dc18f346e8..f809051820ac 100644
--- a/drivers/staging/comedi/drivers/tests/ni_routes_test.c
+++ b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
@@ -282,7 +282,7 @@ void test_ni_sort_device_routes(void)
void test_ni_find_route_set(void)
{
- unittest(ni_find_route_set(bad_dest, &DR) == NULL,
+ unittest(!ni_find_route_set(bad_dest, &DR),
"check for nonexistent route_set\n");
unittest(ni_find_route_set(dest0, &DR) == &DR.routes[0],
"find first route_set\n");
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index de177418190f..b8f54b7fb34a 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -1691,6 +1691,8 @@ static void usbdux_detach(struct comedi_device *dev)
usbdux_free_usb_buffers(dev);
mutex_unlock(&devpriv->mut);
+
+ mutex_destroy(&devpriv->mut);
}
static struct comedi_driver usbdux_driver = {
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 0d54f394dbd2..04bc488385e6 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -993,6 +993,8 @@ static void usbduxfast_detach(struct comedi_device *dev)
kfree(devpriv->duxbuf);
mutex_unlock(&devpriv->mut);
+
+ mutex_destroy(&devpriv->mut);
}
static struct comedi_driver usbduxfast_driver = {
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index af5605a875e2..3cc40d2544be 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1577,6 +1577,8 @@ static void usbduxsigma_detach(struct comedi_device *dev)
usbduxsigma_free_usb_buffers(dev);
mutex_unlock(&devpriv->mut);
+
+ mutex_destroy(&devpriv->mut);
}
static struct comedi_driver usbduxsigma_driver = {
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 6234b649d887..65dc6c51037e 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
size = usb_endpoint_maxp(devpriv->ep_tx);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_tx_buf) {
- kfree(devpriv->usb_rx_buf);
+ if (!devpriv->usb_tx_buf)
return -ENOMEM;
- }
return 0;
}
@@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
devpriv->model = board->model;
+ sema_init(&devpriv->limit_sem, 8);
+
ret = vmk80xx_find_usb_endpoints(dev);
if (ret)
return ret;
@@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- sema_init(&devpriv->limit_sem, 8);
-
usb_set_intfdata(intf, devpriv);
if (devpriv->model == VMK8055_MODEL)
diff --git a/drivers/staging/comedi/kcomedilib/Makefile b/drivers/staging/comedi/kcomedilib/Makefile
index 3aff8ed08e2d..8031142a105f 100644
--- a/drivers/staging/comedi/kcomedilib/Makefile
+++ b/drivers/staging/comedi/kcomedilib/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
obj-$(CONFIG_COMEDI_KCOMEDILIB) += kcomedilib.o
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
index e50e72218364..ad1478c53e9e 100644
--- a/drivers/staging/emxx_udc/Kconfig
+++ b/drivers/staging/emxx_udc/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config USB_EMXX
tristate "EMXX USB Function Device Controller"
depends on USB_GADGET && (ARCH_RENESAS || (ARM && COMPILE_TEST))
diff --git a/drivers/staging/emxx_udc/Makefile b/drivers/staging/emxx_udc/Makefile
index 6352724c0b57..569c5e9a9bae 100644
--- a/drivers/staging/emxx_udc/Makefile
+++ b/drivers/staging/emxx_udc/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_EMXX) := emxx_udc.o
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index a913d40f0801..4f3c2c13a225 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -136,7 +136,7 @@ static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
struct usb_ctrlrequest *p_ctrl;
struct nbu2ss_udc *udc;
- if ((!_ep) || (!_req))
+ if (!_ep || !_req)
return;
udc = (struct nbu2ss_udc *)_req->context;
@@ -459,22 +459,22 @@ static void _nbu2ss_dma_map_single(struct nbu2ss_udc *udc,
if (req->unaligned) {
req->req.dma = ep->phys_buf;
} else {
- req->req.dma = dma_map_single(
- udc->gadget.dev.parent,
- req->req.buf,
- req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->req.dma = dma_map_single(udc->gadget.dev.parent,
+ req->req.buf,
+ req->req.length,
+ (direct == USB_DIR_IN)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
}
req->mapped = 1;
} else {
if (!req->unaligned)
- dma_sync_single_for_device(
- udc->gadget.dev.parent,
- req->req.dma,
- req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ dma_sync_single_for_device(udc->gadget.dev.parent,
+ req->req.dma,
+ req->req.length,
+ (direct == USB_DIR_IN)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
req->mapped = 0;
}
@@ -940,8 +940,8 @@ static int _nbu2ss_epn_out_transfer(struct nbu2ss_udc *udc,
/*-------------------------------------------------------------*/
/* Receive Length */
- i_recv_length
- = _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT) & EPN_LDATA;
+ i_recv_length =
+ _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT) & EPN_LDATA;
if (i_recv_length != 0) {
result = _nbu2ss_epn_out_data(udc, ep, req, i_recv_length);
@@ -1414,12 +1414,11 @@ static inline int _nbu2ss_req_feature(struct nbu2ss_udc *udc, bool bset)
if (selector == USB_ENDPOINT_HALT) {
ep_adrs = wIndex & 0xFF;
if (!bset) {
- _nbu2ss_endpoint_toggle_reset(
- udc, ep_adrs);
+ _nbu2ss_endpoint_toggle_reset(udc,
+ ep_adrs);
}
- _nbu2ss_set_endpoint_stall(
- udc, ep_adrs, bset);
+ _nbu2ss_set_endpoint_stall(udc, ep_adrs, bset);
result = 0;
}
@@ -1496,10 +1495,10 @@ static int std_req_get_status(struct nbu2ss_udc *udc)
case USB_RECIP_DEVICE:
if (udc->ctrl.wIndex == 0x0000) {
if (udc->gadget.is_selfpowered)
- status_data |= (1 << USB_DEVICE_SELF_POWERED);
+ status_data |= BIT(USB_DEVICE_SELF_POWERED);
if (udc->remote_wakeup)
- status_data |= (1 << USB_DEVICE_REMOTE_WAKEUP);
+ status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
result = 0;
}
@@ -1511,7 +1510,7 @@ static int std_req_get_status(struct nbu2ss_udc *udc)
result = _nbu2ss_get_ep_stall(udc, ep_adrs);
if (result > 0)
- status_data |= (1 << USB_ENDPOINT_HALT);
+ status_data |= BIT(USB_ENDPOINT_HALT);
}
break;
@@ -2423,13 +2422,13 @@ static int nbu2ss_ep_enable(struct usb_ep *_ep,
struct nbu2ss_ep *ep;
struct nbu2ss_udc *udc;
- if ((!_ep) || (!desc)) {
+ if (!_ep || !desc) {
pr_err(" *** %s, bad param\n", __func__);
return -EINVAL;
}
ep = container_of(_ep, struct nbu2ss_ep, ep);
- if ((!ep->udc)) {
+ if (!ep->udc) {
pr_err(" *** %s, ep == NULL !!\n", __func__);
return -EINVAL;
}
@@ -2545,7 +2544,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
int result = -EINVAL;
/* catch various bogus parameters */
- if ((!_ep) || (!_req)) {
+ if (!_ep || !_req) {
if (!_ep)
pr_err("udc: %s --- _ep == NULL\n", __func__);
@@ -2595,9 +2594,9 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
if (req->unaligned) {
if (!ep->virt_buf)
- ep->virt_buf = dma_alloc_coherent(
- NULL, PAGE_SIZE,
- &ep->phys_buf, GFP_ATOMIC | GFP_DMA);
+ ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &ep->phys_buf,
+ GFP_ATOMIC | GFP_DMA);
if (ep->epnum > 0) {
if (ep->direct == USB_DIR_IN)
memcpy(ep->virt_buf, req->req.buf,
@@ -2647,7 +2646,7 @@ static int nbu2ss_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
unsigned long flags;
/* catch various bogus parameters */
- if ((!_ep) || (!_req)) {
+ if (!_ep || !_req) {
/* pr_err("%s, bad param(1)\n", __func__); */
return -EINVAL;
}
diff --git a/drivers/staging/erofs/Documentation/filesystems/erofs.txt b/drivers/staging/erofs/Documentation/filesystems/erofs.txt
index 961ec4da7705..74cf84ac48a3 100644
--- a/drivers/staging/erofs/Documentation/filesystems/erofs.txt
+++ b/drivers/staging/erofs/Documentation/filesystems/erofs.txt
@@ -60,6 +60,7 @@ fault_injection=%d Enable fault injection in all supported types with
specified injection rate. Supported injection type:
Type_Name Type_Value
FAULT_KMALLOC 0x000000001
+ FAULT_READ_IO 0x000000002
(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled
by default if CONFIG_EROFS_FS_XATTR is selected.
(no)acl Setup POSIX Access Control List. Note: acl is enabled
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index 526e0dbea5b5..746685f90564 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -17,12 +17,17 @@
static inline void read_endio(struct bio *bio)
{
- int i;
+ struct super_block *const sb = bio->bi_private;
struct bio_vec *bvec;
- const blk_status_t err = bio->bi_status;
+ blk_status_t err = bio->bi_status;
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
+ if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) {
+ erofs_show_injection_info(FAULT_READ_IO);
+ err = BLK_STS_IOERR;
+ }
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
/* page is already locked */
@@ -63,7 +68,7 @@ repeat:
if (!PageUptodate(page)) {
struct bio *bio;
- bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
+ bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail);
if (IS_ERR(bio)) {
DBG_BUGON(nofail);
err = PTR_ERR(bio);
@@ -188,7 +193,8 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
unsigned int nblocks,
bool ra)
{
- struct inode *inode = mapping->host;
+ struct inode *const inode = mapping->host;
+ struct super_block *const sb = inode->i_sb;
erofs_off_t current_block = (erofs_off_t)page->index;
int err;
@@ -280,9 +286,8 @@ submit_bio_retry:
if (nblocks > BIO_MAX_PAGES)
nblocks = BIO_MAX_PAGES;
- bio = erofs_grab_bio(inode->i_sb,
- blknr, nblocks, read_endio, false);
-
+ bio = erofs_grab_bio(sb, blknr, nblocks, sb,
+ read_endio, false);
if (IS_ERR(bio)) {
err = PTR_ERR(bio);
bio = NULL;
@@ -298,7 +303,7 @@ submit_bio_retry:
*last_block = current_block;
/* shift in advance in case of it followed by too many gaps */
- if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) {
+ if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
/* err should reassign to 0 after submitting */
err = 0;
goto submit_bio_out;
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
index 924b8dfc7a8f..c7d3b815a798 100644
--- a/drivers/staging/erofs/inode.c
+++ b/drivers/staging/erofs/inode.c
@@ -25,7 +25,7 @@ static int read_inode(struct inode *inode, void *data)
if (unlikely(vi->data_mapping_mode >= EROFS_INODE_LAYOUT_MAX)) {
errln("unknown data mapping mode %u of nid %llu",
- vi->data_mapping_mode, vi->nid);
+ vi->data_mapping_mode, vi->nid);
DBG_BUGON(1);
return -EIO;
}
@@ -38,7 +38,7 @@ static int read_inode(struct inode *inode, void *data)
inode->i_mode = le16_to_cpu(v2->i_mode);
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)) {
+ S_ISLNK(inode->i_mode)) {
vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr);
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
inode->i_rdev =
@@ -68,7 +68,7 @@ static int read_inode(struct inode *inode, void *data)
inode->i_mode = le16_to_cpu(v1->i_mode);
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)) {
+ S_ISLNK(inode->i_mode)) {
vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr);
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
inode->i_rdev =
@@ -92,7 +92,7 @@ static int read_inode(struct inode *inode, void *data)
inode->i_size = le32_to_cpu(v1->i_size);
} else {
errln("unsupported on-disk inode version %u of nid %llu",
- __inode_version(advise), vi->nid);
+ __inode_version(advise), vi->nid);
DBG_BUGON(1);
return -EIO;
}
@@ -129,7 +129,7 @@ static int fill_inline_data(struct inode *inode, void *data,
if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) {
char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL);
- if (unlikely(lnk == NULL))
+ if (unlikely(!lnk))
return -ENOMEM;
m_pofs += vi->inode_isize + vi->xattr_isize;
@@ -173,7 +173,7 @@ static int fill_inode(struct inode *inode, int isdir)
if (IS_ERR(page)) {
errln("failed to get inode (nid: %llu) page, err %ld",
- vi->nid, PTR_ERR(page));
+ vi->nid, PTR_ERR(page));
return PTR_ERR(page);
}
@@ -260,16 +260,18 @@ static inline struct inode *erofs_iget_locked(struct super_block *sb,
}
struct inode *erofs_iget(struct super_block *sb,
- erofs_nid_t nid, bool isdir)
+ erofs_nid_t nid,
+ bool isdir)
{
struct inode *inode = erofs_iget_locked(sb, nid);
- if (unlikely(inode == NULL))
+ if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
int err;
struct erofs_vnode *vi = EROFS_V(inode);
+
vi->nid = nid;
err = fill_inode(inode, isdir);
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index e3bfde00c7d2..c47778b3fabd 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -44,11 +44,12 @@
enum {
FAULT_KMALLOC,
+ FAULT_READ_IO,
FAULT_MAX,
};
#ifdef CONFIG_EROFS_FAULT_INJECTION
-extern char *erofs_fault_name[FAULT_MAX];
+extern const char *erofs_fault_name[FAULT_MAX];
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
struct erofs_fault_info {
@@ -268,8 +269,15 @@ int erofs_try_to_free_cached_page(struct address_space *mapping,
struct page *page);
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
+static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
+ struct page *page)
+{
+ return page->mapping == MNGD_MAPPING(sbi);
+}
#else
#define MNGD_MAPPING(sbi) (NULL)
+static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
+ struct page *page) { return false; }
#endif
#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3
@@ -460,7 +468,7 @@ static inline int z_erofs_map_blocks_iter(struct inode *inode,
/* data.c */
static inline struct bio *
erofs_grab_bio(struct super_block *sb,
- erofs_blk_t blkaddr, unsigned int nr_pages,
+ erofs_blk_t blkaddr, unsigned int nr_pages, void *bi_private,
bio_end_io_t endio, bool nofail)
{
const gfp_t gfp = GFP_NOIO;
@@ -469,7 +477,7 @@ erofs_grab_bio(struct super_block *sb,
do {
if (nr_pages == 1) {
bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
- if (unlikely(bio == NULL)) {
+ if (unlikely(!bio)) {
DBG_BUGON(nofail);
return ERR_PTR(-ENOMEM);
}
@@ -477,11 +485,12 @@ erofs_grab_bio(struct super_block *sb,
}
bio = bio_alloc(gfp, nr_pages);
nr_pages /= 2;
- } while (unlikely(bio == NULL));
+ } while (unlikely(!bio));
bio->bi_end_io = endio;
bio_set_dev(bio, sb->s_bdev);
bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
+ bio->bi_private = bi_private;
return bio;
}
@@ -565,7 +574,7 @@ static inline void *erofs_vmap(struct page **pages, unsigned int count)
while (1) {
void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
/* retry two more times (totally 3 times) */
- if (addr != NULL || ++i >= 3)
+ if (addr || ++i >= 3)
return addr;
vm_unmap_aliases();
}
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
index 3f4fa52c10fa..d8d9dc9dab43 100644
--- a/drivers/staging/erofs/namei.c
+++ b/drivers/staging/erofs/namei.c
@@ -211,7 +211,8 @@ int erofs_namei(struct inode *dir,
/* NOTE: i_mutex is already held by vfs */
static struct dentry *erofs_lookup(struct inode *dir,
- struct dentry *dentry, unsigned int flags)
+ struct dentry *dentry,
+ unsigned int flags)
{
int err;
erofs_nid_t nid;
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 15c784fba879..399847d21146 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -33,10 +33,11 @@ static void init_once(void *ptr)
static int __init erofs_init_inode_cache(void)
{
erofs_inode_cachep = kmem_cache_create("erofs_inode",
- sizeof(struct erofs_vnode), 0,
- SLAB_RECLAIM_ACCOUNT, init_once);
+ sizeof(struct erofs_vnode), 0,
+ SLAB_RECLAIM_ACCOUNT,
+ init_once);
- return erofs_inode_cachep != NULL ? 0 : -ENOMEM;
+ return erofs_inode_cachep ? 0 : -ENOMEM;
}
static void erofs_exit_inode_cache(void)
@@ -49,7 +50,7 @@ static struct inode *alloc_inode(struct super_block *sb)
struct erofs_vnode *vi =
kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
- if (vi == NULL)
+ if (!vi)
return NULL;
/* zero out everything except vfs_inode */
@@ -57,9 +58,8 @@ static struct inode *alloc_inode(struct super_block *sb)
return &vi->vfs_inode;
}
-static void i_callback(struct rcu_head *head)
+static void free_inode(struct inode *inode)
{
- struct inode *inode = container_of(head, struct inode, i_rcu);
struct erofs_vnode *vi = EROFS_V(inode);
/* be careful RCU symlink path (see ext4_inode_info->i_data)! */
@@ -71,11 +71,6 @@ static void i_callback(struct rcu_head *head)
kmem_cache_free(erofs_inode_cachep, vi);
}
-static void destroy_inode(struct inode *inode)
-{
- call_rcu(&inode->i_rcu, i_callback);
-}
-
static int superblock_read(struct super_block *sb)
{
struct erofs_sb_info *sbi;
@@ -86,7 +81,7 @@ static int superblock_read(struct super_block *sb)
bh = sb_bread(sb, 0);
- if (bh == NULL) {
+ if (!bh) {
errln("cannot read erofs superblock");
return -EIO;
}
@@ -105,7 +100,7 @@ static int superblock_read(struct super_block *sb)
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
if (unlikely(blkszbits != LOG_BLOCK_SIZE)) {
errln("blksize %u isn't supported on this platform",
- 1 << blkszbits);
+ 1 << blkszbits);
goto out;
}
@@ -121,7 +116,7 @@ static int superblock_read(struct super_block *sb)
if (1 << (sbi->clusterbits - PAGE_SHIFT) > Z_EROFS_CLUSTER_MAX_PAGES)
errln("clusterbits %u is not supported on this kernel",
- sbi->clusterbits);
+ sbi->clusterbits);
#endif
sbi->root_nid = le16_to_cpu(layout->root_nid);
@@ -132,7 +127,7 @@ static int superblock_read(struct super_block *sb)
memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid));
memcpy(sbi->volume_name, layout->volume_name,
- sizeof(layout->volume_name));
+ sizeof(layout->volume_name));
ret = 0;
out:
@@ -141,8 +136,9 @@ out:
}
#ifdef CONFIG_EROFS_FAULT_INJECTION
-char *erofs_fault_name[FAULT_MAX] = {
+const char *erofs_fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
+ [FAULT_READ_IO] = "read IO error",
};
static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
@@ -239,7 +235,7 @@ static int parse_options(struct super_block *sb, char *options)
if (!options)
return 0;
- while ((p = strsep(&options, ",")) != NULL) {
+ while ((p = strsep(&options, ","))) {
int token;
if (!*p)
@@ -313,7 +309,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
}
static void managed_cache_invalidatepage(struct page *page,
- unsigned int offset, unsigned int length)
+ unsigned int offset,
+ unsigned int length)
{
const unsigned int stop = length + offset;
@@ -336,7 +333,7 @@ static struct inode *erofs_init_managed_cache(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
- if (unlikely(inode == NULL))
+ if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
set_nlink(inode, 1);
@@ -352,7 +349,8 @@ static struct inode *erofs_init_managed_cache(struct super_block *sb)
#endif
static int erofs_read_super(struct super_block *sb,
- const char *dev_name, void *data, int silent)
+ const char *dev_name,
+ void *data, int silent)
{
struct inode *inode;
struct erofs_sb_info *sbi;
@@ -367,7 +365,7 @@ static int erofs_read_super(struct super_block *sb,
}
sbi = kzalloc(sizeof(struct erofs_sb_info), GFP_KERNEL);
- if (unlikely(sbi == NULL)) {
+ if (unlikely(!sbi)) {
err = -ENOMEM;
goto err;
}
@@ -424,21 +422,21 @@ static int erofs_read_super(struct super_block *sb,
if (!S_ISDIR(inode->i_mode)) {
errln("rootino(nid %llu) is not a directory(i_mode %o)",
- ROOT_NID(sbi), inode->i_mode);
+ ROOT_NID(sbi), inode->i_mode);
err = -EINVAL;
iput(inode);
goto err_iget;
}
sb->s_root = d_make_root(inode);
- if (sb->s_root == NULL) {
+ if (!sb->s_root) {
err = -ENOMEM;
goto err_iget;
}
/* save the device name to sbi */
sbi->dev_name = __getname();
- if (sbi->dev_name == NULL) {
+ if (!sbi->dev_name) {
err = -ENOMEM;
goto err_devname;
}
@@ -450,7 +448,7 @@ static int erofs_read_super(struct super_block *sb,
if (!silent)
infoln("mounted on %s with opts: %s.", dev_name,
- (char *)data);
+ (char *)data);
return 0;
/*
* please add a label for each exit point and use
@@ -481,7 +479,7 @@ static void erofs_put_super(struct super_block *sb)
struct erofs_sb_info *sbi = EROFS_SB(sb);
/* for cases which are failed in "read_super" */
- if (sbi == NULL)
+ if (!sbi)
return;
WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
@@ -515,7 +513,7 @@ struct erofs_mount_private {
/* support mount_bdev() with options */
static int erofs_fill_super(struct super_block *sb,
- void *_priv, int silent)
+ void *_priv, int silent)
{
struct erofs_mount_private *priv = _priv;
@@ -635,7 +633,7 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
#endif
if (test_opt(sbi, FAULT_INJECTION))
seq_printf(seq, ",fault_injection=%u",
- erofs_get_fault_rate(sbi));
+ erofs_get_fault_rate(sbi));
return 0;
}
@@ -668,7 +666,7 @@ out:
const struct super_operations erofs_sops = {
.put_super = erofs_put_super,
.alloc_inode = alloc_inode,
- .destroy_inode = destroy_inode,
+ .free_inode = free_inode,
.statfs = erofs_statfs,
.show_options = erofs_show_options,
.remount_fs = erofs_remount,
diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
index 23856ba2742d..f37d8fd14771 100644
--- a/drivers/staging/erofs/unzip_pagevec.h
+++ b/drivers/staging/erofs/unzip_pagevec.h
@@ -43,7 +43,7 @@ struct z_erofs_pagevec_ctor {
static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
bool atomic)
{
- if (ctor->curr == NULL)
+ if (!ctor->curr)
return;
if (atomic)
@@ -59,7 +59,7 @@ z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
unsigned index;
/* keep away from occupied pages */
- if (ctor->next != NULL)
+ if (ctor->next)
return ctor->next;
for (index = 0; index < nr; ++index) {
@@ -121,7 +121,7 @@ z_erofs_pagevec_ctor_enqueue(struct z_erofs_pagevec_ctor *ctor,
bool *occupied)
{
*occupied = false;
- if (unlikely(ctor->next == NULL && type))
+ if (unlikely(!ctor->next && type))
if (ctor->index + 1 == ctor->nr)
return false;
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 31eef8395774..9ecaa872bae8 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -313,12 +313,12 @@ static int z_erofs_vle_work_add_page(
/* give priority for the compressed data storage */
if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
- type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
- try_to_reuse_as_compressed_page(builder, page))
+ type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
+ try_to_reuse_as_compressed_page(builder, page))
return 0;
ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
- page, type, &occupied);
+ page, type, &occupied);
builder->work->vcnt += (unsigned int)ret;
return ret ? 0 : -EAGAIN;
@@ -464,10 +464,9 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
grp->obj.index = f->idx;
grp->llen = map->m_llen;
- z_erofs_vle_set_workgrp_fmt(grp,
- (map->m_flags & EROFS_MAP_ZIPPED) ?
- Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
- Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
+ z_erofs_vle_set_workgrp_fmt(grp, (map->m_flags & EROFS_MAP_ZIPPED) ?
+ Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
+ Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
/* new workgrps have been claimed as type 1 */
WRITE_ONCE(grp->next, *f->owned_head);
@@ -554,7 +553,8 @@ repeat:
return PTR_ERR(work);
got_it:
z_erofs_pagevec_ctor_init(&builder->vector,
- Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
+ Z_EROFS_VLE_INLINE_PAGEVECS,
+ work->pagevec, work->vcnt);
if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
/* enable possibly in-place decompression */
@@ -594,8 +594,9 @@ void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
call_rcu(&work->rcu, z_erofs_rcu_callback);
}
-static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
- struct z_erofs_vle_work *work __maybe_unused)
+static void
+__z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
+ struct z_erofs_vle_work *work __maybe_unused)
{
erofs_workgroup_put(&grp->obj);
}
@@ -715,7 +716,7 @@ repeat:
/* lucky, within the range of the current map_blocks */
if (offset + cur >= map->m_la &&
- offset + cur < map->m_la + map->m_llen) {
+ offset + cur < map->m_la + map->m_llen) {
/* didn't get a valid unzip work previously (very rare) */
if (!builder->work)
goto restart_now;
@@ -781,8 +782,8 @@ retry:
struct page *const newpage =
__stagingpage_alloc(page_pool, GFP_NOFS);
- err = z_erofs_vle_work_add_page(builder,
- newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
+ err = z_erofs_vle_work_add_page(builder, newpage,
+ Z_EROFS_PAGE_TYPE_EXCLUSIVE);
if (likely(!err))
goto retry;
}
@@ -843,35 +844,30 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
static inline void z_erofs_vle_read_endio(struct bio *bio)
{
- const blk_status_t err = bio->bi_status;
- unsigned int i;
+ struct erofs_sb_info *sbi = NULL;
+ blk_status_t err = bio->bi_status;
struct bio_vec *bvec;
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- struct address_space *mc = NULL;
-#endif
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
bool cachemngd = false;
DBG_BUGON(PageUptodate(page));
DBG_BUGON(!page->mapping);
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
- struct inode *const inode = page->mapping->host;
- struct super_block *const sb = inode->i_sb;
+ if (unlikely(!sbi && !z_erofs_is_stagingpage(page))) {
+ sbi = EROFS_SB(page->mapping->host->i_sb);
- mc = MNGD_MAPPING(EROFS_SB(sb));
+ if (time_to_inject(sbi, FAULT_READ_IO)) {
+ erofs_show_injection_info(FAULT_READ_IO);
+ err = BLK_STS_IOERR;
+ }
}
- /*
- * If mc has not gotten, it equals NULL,
- * however, page->mapping never be NULL if working properly.
- */
- cachemngd = (page->mapping == mc);
-#endif
+ /* sbi should already be gotten if the page is managed */
+ if (sbi)
+ cachemngd = erofs_page_is_managed(sbi, page);
if (unlikely(err))
SetPageError(page);
@@ -890,8 +886,8 @@ static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
static DEFINE_MUTEX(z_pagemap_global_lock);
static int z_erofs_vle_unzip(struct super_block *sb,
- struct z_erofs_vle_workgroup *grp,
- struct list_head *page_pool)
+ struct z_erofs_vle_workgroup *grp,
+ struct list_head *page_pool)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
const unsigned int clusterpages = erofs_clusterpages(sbi);
@@ -919,12 +915,12 @@ static int z_erofs_vle_unzip(struct super_block *sb,
if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
pages = pages_onstack;
else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
- mutex_trylock(&z_pagemap_global_lock))
+ mutex_trylock(&z_pagemap_global_lock))
pages = z_pagemap_global;
else {
repeat:
- pages = kvmalloc_array(nr_pages,
- sizeof(struct page *), GFP_KERNEL);
+ pages = kvmalloc_array(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
/* fallback to global pagemap for the lowmem scenario */
if (unlikely(!pages)) {
@@ -940,8 +936,8 @@ repeat:
for (i = 0; i < nr_pages; ++i)
pages[i] = NULL;
- z_erofs_pagevec_ctor_init(&ctor,
- Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
+ z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_VLE_INLINE_PAGEVECS,
+ work->pagevec, 0);
for (i = 0; i < work->vcnt; ++i) {
unsigned int pagenr;
@@ -983,13 +979,11 @@ repeat:
DBG_BUGON(!page->mapping);
if (!z_erofs_is_stagingpage(page)) {
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (page->mapping == MNGD_MAPPING(sbi)) {
+ if (erofs_page_is_managed(sbi, page)) {
if (unlikely(!PageUptodate(page)))
err = -EIO;
continue;
}
-#endif
/*
* only if non-head page can be selected
@@ -1019,7 +1013,7 @@ repeat:
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
- pages, nr_pages, work->pageofs);
+ pages, nr_pages, work->pageofs);
goto out;
}
@@ -1048,8 +1042,8 @@ skip_allocpage:
goto out;
}
- err = z_erofs_vle_unzip_vmap(compressed_pages,
- clusterpages, vout, llen, work->pageofs, overlapped);
+ err = z_erofs_vle_unzip_vmap(compressed_pages, clusterpages, vout,
+ llen, work->pageofs, overlapped);
erofs_vunmap(vout, nr_pages);
@@ -1058,10 +1052,9 @@ out:
for (i = 0; i < clusterpages; ++i) {
page = compressed_pages[i];
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (page->mapping == MNGD_MAPPING(sbi))
+ if (erofs_page_is_managed(sbi, page))
continue;
-#endif
+
/* recycle all individual staging pages */
(void)z_erofs_gather_if_stagingpage(page_pool, page);
@@ -1446,10 +1439,8 @@ submit_bio_retry:
if (!bio) {
bio = erofs_grab_bio(sb, first_index + i,
- BIO_MAX_PAGES,
+ BIO_MAX_PAGES, bi_private,
z_erofs_vle_read_endio, true);
- bio->bi_private = bi_private;
-
++nr_bios;
}
@@ -1586,7 +1577,7 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
struct erofs_vnode *vi = EROFS_V(inode);
errln("%s, readahead error at page %lu of nid %llu",
- __func__, page->index, vi->nid);
+ __func__, page->index, vi->nid);
}
put_page(page);
@@ -1741,8 +1732,8 @@ vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
}
int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
+ struct erofs_map_blocks *map,
+ int flags)
{
void *kaddr;
const struct vle_map_blocks_iter_ctx ctx = {
@@ -1849,7 +1840,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* logical cluster number should be >= 1 */
if (unlikely(!lcn)) {
errln("invalid logical cluster 0 at nid %llu",
- EROFS_V(inode)->nid);
+ EROFS_V(inode)->nid);
err = -EIO;
goto unmap_out;
}
@@ -1869,7 +1860,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
break;
default:
errln("unknown cluster type %u at offset %llu of nid %llu",
- cluster_type, ofs, EROFS_V(inode)->nid);
+ cluster_type, ofs, EROFS_V(inode)->nid);
err = -EIO;
goto unmap_out;
}
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index 5f61f99f4c10..3e7d30b6de1d 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -61,7 +61,7 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
repeat:
rcu_read_lock();
grp = radix_tree_lookup(&sbi->workstn_tree, index);
- if (grp != NULL) {
+ if (grp) {
*tag = xa_pointer_tag(grp);
grp = xa_untag_pointer(grp);
@@ -221,7 +221,7 @@ repeat:
erofs_workstn_lock(sbi);
found = radix_tree_gang_lookup(&sbi->workstn_tree,
- batch, first_index, PAGEVEC_SIZE);
+ batch, first_index, PAGEVEC_SIZE);
for (i = 0; i < found; ++i) {
struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index f716ab0446e5..df40654b9fbb 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -36,7 +36,7 @@ static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
static inline void xattr_iter_end_final(struct xattr_iter *it)
{
- if (it->page == NULL)
+ if (!it->page)
return;
xattr_iter_end(it, true);
@@ -107,7 +107,7 @@ static int init_inode_xattrs(struct inode *inode)
vi->xattr_shared_count = ih->h_shared_count;
vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
sizeof(uint), GFP_KERNEL);
- if (vi->xattr_shared_xattrs == NULL) {
+ if (!vi->xattr_shared_xattrs) {
xattr_iter_end(&it, atomic_map);
ret = -ENOMEM;
goto out_unlock;
@@ -122,8 +122,8 @@ static int init_inode_xattrs(struct inode *inode)
BUG_ON(it.ofs != EROFS_BLKSIZ);
xattr_iter_end(&it, atomic_map);
- it.page = erofs_get_meta_page(sb,
- ++it.blkaddr, S_ISDIR(inode->i_mode));
+ it.page = erofs_get_meta_page(sb, ++it.blkaddr,
+ S_ISDIR(inode->i_mode));
if (IS_ERR(it.page)) {
kfree(vi->xattr_shared_xattrs);
vi->xattr_shared_xattrs = NULL;
@@ -187,7 +187,7 @@ static inline int xattr_iter_fixup(struct xattr_iter *it)
}
static int inline_xattr_iter_begin(struct xattr_iter *it,
- struct inode *inode)
+ struct inode *inode)
{
struct erofs_vnode *const vi = EROFS_V(inode);
struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
@@ -217,7 +217,8 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
* `ofs' pointing to the next xattr item rather than an arbitrary position.
*/
static int xattr_foreach(struct xattr_iter *it,
- const struct xattr_iter_handlers *op, unsigned int *tlimit)
+ const struct xattr_iter_handlers *op,
+ unsigned int *tlimit)
{
struct erofs_xattr_entry entry;
unsigned int value_sz, processed, slice;
@@ -234,7 +235,7 @@ static int xattr_foreach(struct xattr_iter *it,
* therefore entry should be in the page
*/
entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
- if (tlimit != NULL) {
+ if (tlimit) {
unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
BUG_ON(*tlimit < entry_sz);
@@ -281,7 +282,7 @@ static int xattr_foreach(struct xattr_iter *it,
/* 3. handle xattr value */
processed = 0;
- if (op->alloc_buffer != NULL) {
+ if (op->alloc_buffer) {
err = op->alloc_buffer(it, value_sz);
if (err) {
it->ofs += value_sz;
@@ -321,7 +322,7 @@ struct getxattr_iter {
};
static int xattr_entrymatch(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
+ struct erofs_xattr_entry *entry)
{
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
@@ -330,7 +331,7 @@ static int xattr_entrymatch(struct xattr_iter *_it,
}
static int xattr_namematch(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
+ unsigned int processed, char *buf, unsigned int len)
{
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
@@ -338,17 +339,18 @@ static int xattr_namematch(struct xattr_iter *_it,
}
static int xattr_checkbuffer(struct xattr_iter *_it,
- unsigned int value_sz)
+ unsigned int value_sz)
{
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
int err = it->buffer_size < value_sz ? -ERANGE : 0;
it->buffer_size = value_sz;
- return it->buffer == NULL ? 1 : err;
+ return !it->buffer ? 1 : err;
}
static void xattr_copyvalue(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
+ unsigned int processed,
+ char *buf, unsigned int len)
{
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
@@ -429,13 +431,13 @@ static bool erofs_xattr_trusted_list(struct dentry *dentry)
}
int erofs_getxattr(struct inode *inode, int index,
- const char *name,
- void *buffer, size_t buffer_size)
+ const char *name,
+ void *buffer, size_t buffer_size)
{
int ret;
struct getxattr_iter it;
- if (unlikely(name == NULL))
+ if (unlikely(!name))
return -EINVAL;
ret = init_inode_xattrs(inode);
@@ -460,8 +462,8 @@ int erofs_getxattr(struct inode *inode, int index,
}
static int erofs_xattr_generic_get(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *name, void *buffer, size_t size)
+ struct dentry *unused, struct inode *inode,
+ const char *name, void *buffer, size_t size)
{
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
@@ -527,7 +529,7 @@ struct listxattr_iter {
};
static int xattr_entrylist(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
+ struct erofs_xattr_entry *entry)
{
struct listxattr_iter *it =
container_of(_it, struct listxattr_iter, it);
@@ -537,13 +539,13 @@ static int xattr_entrylist(struct xattr_iter *_it,
const struct xattr_handler *h =
erofs_xattr_handler(entry->e_name_index);
- if (h == NULL || (h->list != NULL && !h->list(it->dentry)))
+ if (!h || (h->list && !h->list(it->dentry)))
return 1;
prefix = xattr_prefix(h);
prefix_len = strlen(prefix);
- if (it->buffer == NULL) {
+ if (!it->buffer) {
it->buffer_ofs += prefix_len + entry->e_name_len + 1;
return 1;
}
@@ -558,7 +560,7 @@ static int xattr_entrylist(struct xattr_iter *_it,
}
static int xattr_namelist(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
+ unsigned int processed, char *buf, unsigned int len)
{
struct listxattr_iter *it =
container_of(_it, struct listxattr_iter, it);
@@ -569,7 +571,7 @@ static int xattr_namelist(struct xattr_iter *_it,
}
static int xattr_skipvalue(struct xattr_iter *_it,
- unsigned int value_sz)
+ unsigned int value_sz)
{
struct listxattr_iter *it =
container_of(_it, struct listxattr_iter, it);
@@ -641,7 +643,7 @@ static int shared_listxattr(struct listxattr_iter *it)
}
ssize_t erofs_listxattr(struct dentry *dentry,
- char *buffer, size_t buffer_size)
+ char *buffer, size_t buffer_size)
{
int ret;
struct listxattr_iter it;
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index 84b2e7ebc024..8ec524a95ec8 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig FB_TFT
tristate "Support for small TFT LCD display modules"
depends on FB && SPI
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 8f27bd8da17d..eeeeec97ad27 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -383,7 +383,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
/* select right side (sc1)
* set addr
*/
- write_reg(par, 0x01, 1 << 6);
+ write_reg(par, 0x01, BIT(6));
write_reg(par, 0x01, (0x17 << 3) | (u8)y);
/* write bitmap */
@@ -406,7 +406,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
static int write(struct fbtft_par *par, void *buf, size_t len)
{
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
gpiod_set_value(par->RW, 0); /* set write mode */
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index 70b37fc7fb66..398bdbf53c9a 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -24,7 +24,7 @@ static int write_spi(struct fbtft_par *par, void *buf, size_t len)
struct spi_message m;
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
if (!par->spi) {
dev_err(par->info->device,
diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c
index d7c5e2e0eee9..6cf9df579e88 100644
--- a/drivers/staging/fbtft/fb_ssd1306.c
+++ b/drivers/staging/fbtft/fb_ssd1306.c
@@ -184,7 +184,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
for (y = 0; y < yres / 8; y++) {
*buf = 0x00;
for (i = 0; i < 8; i++)
- *buf |= (vmem16[(y * 8 + i) * xres + x] ? 1 : 0) << i;
+ if (vmem16[(y * 8 + i) * xres + x])
+ *buf |= BIT(i);
buf++;
}
}
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 9f54fe28d511..4cfe9f8535d0 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -74,7 +74,8 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
for (i = 0; i < len; i++)
buf[i] = (u8)va_arg(args, unsigned int);
va_end(args);
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, u8, buf, len, "%s: ", __func__);
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device,
+ u8, buf, len, "%s: ", __func__);
}
va_start(args, len);
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index 9ac78ce30619..900b28d826b2 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -81,10 +81,10 @@ static int set_var(struct fbtft_par *par)
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0xA0, remap | 0x00 | 1 << 4);
+ write_reg(par, 0xA0, remap | 0x00 | BIT(4));
break;
case 270:
- write_reg(par, 0xA0, remap | 0x03 | 1 << 4);
+ write_reg(par, 0xA0, remap | 0x03 | BIT(4));
break;
case 180:
write_reg(par, 0xA0, remap | 0x02);
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 0a5206d28da4..27cc8eabcbe9 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -90,15 +90,10 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
return 0;
}
-#define RGB565toRGB323(c) ((((c) & 0xE000) >> 8) |\
- (((c) & 000600) >> 6) |\
- (((c) & 0x001C) >> 2))
-#define RGB565toRGB332(c) ((((c) & 0xE000) >> 8) |\
- (((c) & 000700) >> 6) |\
- (((c) & 0x0018) >> 3))
-#define RGB565toRGB233(c) ((((c) & 0xC000) >> 8) |\
- (((c) & 000700) >> 5) |\
- (((c) & 0x001C) >> 2))
+static inline int rgb565_to_rgb332(u16 c)
+{
+ return ((c & 0xE000) >> 8) | ((c & 000700) >> 6) | ((c & 0x0018) >> 3);
+}
static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
{
@@ -122,7 +117,7 @@ static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
for (i = start_line; i <= end_line; i++) {
pos[1] = cpu_to_be16(i);
for (j = 0; j < par->info->var.xres; j++) {
- buf8[j] = RGB565toRGB332(*vmem16);
+ buf8[j] = rgb565_to_rgb332(*vmem16);
vmem16++;
}
ret = par->fbtftops.write(par,
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index 38cdad6203ea..0863d257d762 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -14,7 +14,7 @@ int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len)
struct spi_message m;
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
if (!par->spi) {
dev_err(par->info->device,
@@ -47,7 +47,7 @@ int fbtft_write_spi_emulate_9(struct fbtft_par *par, void *buf, size_t len)
u64 val, dc, tmp;
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
if (!par->extra) {
dev_err(par->info->device, "%s: error: par->extra is NULL\n",
@@ -109,7 +109,7 @@ int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len)
txbuf[0] = par->startbyte | 0x3;
t.tx_buf = txbuf;
fbtft_par_dbg_hex(DEBUG_READ, par, par->info->device, u8,
- txbuf, len, "%s(len=%d) txbuf => ",
+ txbuf, len, "%s(len=%zu) txbuf => ",
__func__, len);
}
@@ -117,7 +117,7 @@ int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len)
spi_message_add_tail(&t, &m);
ret = spi_sync(par->spi, &m);
fbtft_par_dbg_hex(DEBUG_READ, par, par->info->device, u8, buf, len,
- "%s(len=%d) buf <= ", __func__, len);
+ "%s(len=%zu) buf <= ", __func__, len);
return ret;
}
@@ -136,7 +136,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
#endif
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
while (len--) {
data = *(u8 *)buf;
@@ -186,7 +186,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
#endif
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
while (len) {
data = *(u16 *)buf;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 7fdd3b0851ef..9b6bdb62093d 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -238,6 +238,7 @@ struct fbtft_par {
/* fbtft-core.c */
int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc);
+__printf(5, 6)
void fbtft_dbg_hex(const struct device *dev, int groupsize,
void *buf, size_t len, const char *fmt, ...);
struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 5f6cd0816d58..44e1410eb3fe 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -965,7 +965,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
#endif
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
while (len) {
data = *(u16 *)buf;
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index c5fa59105a43..3747321011fa 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -671,7 +671,8 @@ static int flexfb_probe_common(struct spi_device *sdev,
break;
case 9:
if (regwidth == 16) {
- dev_err(dev, "argument 'regwidth': %d is not supported with buswidth=%d and SPI.\n", regwidth, buswidth);
+ dev_err(dev, "argument 'regwidth': %d is not supported with buswidth=%d and SPI.\n",
+ regwidth, buswidth);
return -EINVAL;
}
par->fbtftops.write_register = fbtft_write_reg8_bus9;
@@ -718,7 +719,9 @@ static int flexfb_probe_common(struct spi_device *sdev,
par->fbtftops.write_vmem = fbtft_write_vmem16_bus16;
break;
default:
- dev_err(dev, "argument 'buswidth': %d is not supported with parallel.\n", buswidth);
+ dev_err(dev,
+ "argument 'buswidth': %d is not supported with parallel.\n",
+ buswidth);
return -EINVAL;
}
}
diff --git a/drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev b/drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev
new file mode 100644
index 000000000000..45f631ea32a6
--- /dev/null
+++ b/drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev
@@ -0,0 +1,31 @@
+What: /dev/fieldbus_devX
+Date: December 2018
+KernelVersion: 5.1 (staging)
+Contact: Sven Van Asbroeck <TheSven73@gmail.com>
+Description:
+ The cdev interface to drivers for Fieldbus Device Memory
+ (aka. Process Memory).
+
+ The following file operations are supported:
+
+ open(2)
+ Create an I/O context associated with the file descriptor.
+
+ read(2)
+ Read from Process Memory's "read area".
+ Clears POLLERR | POLLPRI from the file descriptor.
+
+ write(2)
+ Write to Process Memory's "write area".
+
+ poll(2), select(2), epoll_wait(2) etc.
+ When a "Process Memory Read Area Changed" event occurs,
+ POLLERR | POLLPRI will be set on the file descriptor.
+ Note that POLLIN | POLLOUT events are always set, because the
+ process memory area is always readable and writable.
+
+ close(2)
+ Free up the I/O context that was associated
+ with the file descriptor.
+
+Users: TBD
diff --git a/drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev b/drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev
new file mode 100644
index 000000000000..439f14d33c3b
--- /dev/null
+++ b/drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev
@@ -0,0 +1,62 @@
+What: /sys/class/fieldbus_dev/fieldbus_devX/card_name
+KernelVersion: 5.1 (staging)
+Contact: Sven Van Asbroeck <TheSven73@gmail.com>
+Description:
+ Human-readable name of the Fieldbus Device.
+
+What: /sys/class/fieldbus_dev/fieldbus_devX/fieldbus_type
+KernelVersion: 5.1 (staging)
+Contact: Sven Van Asbroeck <TheSven73@gmail.com>
+Description:
+ The type of fieldbus implemented by this device.
+ Possible values:
+ 'unknown'
+ 'profinet'
+
+What: /sys/class/fieldbus_dev/fieldbus_devX/fieldbus_id
+KernelVersion: 5.1 (staging)
+Contact: Sven Van Asbroeck <TheSven73@gmail.com>
+Description:
+ The unique fieldbus id associated with this device.
+ The exact format of this id is fieldbus type dependent, e.g.
+ a mac address for profinet.
+
+What: /sys/class/fieldbus_dev/fieldbus_devX/read_area_size
+KernelVersion: 5.1 (staging)
+Contact: Sven Van Asbroeck <TheSven73@gmail.com>
+Description:
+ The size, in bytes, of the Process Memory read area.
+ Note: this area is accessible by reading from the associated
+ character device (/dev/fieldbus_devX).
+
+What: /sys/class/fieldbus_dev/fieldbus_devX/write_area_size
+KernelVersion: 5.1 (staging)
+Contact: Sven Van Asbroeck <TheSven73@gmail.com>
+Description:
+ The size, in bytes, of the Process Memory write area.
+ Note: this area is accessible by writing to the associated
+ character device (/dev/fieldbus_devX)
+
+What: /sys/class/fieldbus_dev/fieldbus_devX/online
+KernelVersion: 5.1 (staging)
+Contact: Sven Van Asbroeck <TheSven73@gmail.com>
+Description:
+ Whether the fieldbus is online or offline.
+ Possible values:
+ '1' meaning 'online'
+ '0' meaning 'offline'
+ Note: an uevent is generated when this property changes.
+
+What: /sys/class/fieldbus_dev/fieldbus_devX/enabled
+KernelVersion: 5.1 (staging)
+Contact: Sven Van Asbroeck <TheSven73@gmail.com>
+Description:
+ Whether the device is enabled (power on) or
+ disabled (power off).
+ Possible values:
+ '1' meaning enabled
+ '0' meaning disabled
+ Normally a r/o property, but optionally r/w:
+ Writing '1' enables the device (power on) with default
+ settings.
+ Writing '0' disables the card (power off).
diff --git a/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt b/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt
new file mode 100644
index 000000000000..56af3f650fa3
--- /dev/null
+++ b/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt
@@ -0,0 +1,66 @@
+ Fieldbus-Device Subsystem
+ ============================================
+
+Part 0 - What is a Fieldbus Device ?
+------------------------------------
+
+Fieldbus is the name of a family of industrial computer network protocols used
+for real-time distributed control, standardized as IEC 61158.
+
+A complex automated industrial system -- such as manufacturing assembly line --
+usually needs a distributed control system -- an organized hierarchy of
+controller systems -- to function. In this hierarchy, there is usually a
+Human Machine Interface (HMI) at the top, where an operator can monitor or
+operate the system. This is typically linked to a middle layer of programmable
+logic controllers (PLC) via a non-time-critical communications system
+(e.g. Ethernet). At the bottom of the control chain is the fieldbus that links
+the PLCs to the components that actually do the work, such as sensors,
+actuators, electric motors, console lights, switches, valves and contactors.
+
+(Source: Wikipedia)
+
+A "Fieldbus Device" is such an actuator, motor, console light, switch, ...
+controlled via the Fieldbus by a PLC aka "Fieldbus Controller".
+
+Communication between PLC and device typically happens via process data memory,
+separated into input and output areas. The Fieldbus then cyclically transfers
+the PLC's output area to the device's input area, and vice versa.
+
+Part I - Why do we need this subsystem?
+---------------------------------------
+
+Fieldbus device (client) adapters are commercially available. They allow data
+exchange with a PLC aka "Fieldbus Controller" via process data memory.
+
+They are typically used when a Linux device wants to expose itself as an
+actuator, motor, console light, switch, etc. over the fieldbus.
+
+The purpose of this subsystem is:
+a) present a general, standardized, extensible API/ABI to userspace; and
+b) present a convenient interface to drivers.
+
+Part II - How can drivers use the subsystem?
+--------------------------------------------
+
+Any driver that wants to register as a Fieldbus Device should allocate and
+populate a 'struct fieldbus_dev' (from include/linux/fieldbus_dev.h).
+Registration then happens by calling fieldbus_dev_register().
+
+Part III - How can userspace use the subsystem?
+-----------------------------------------------
+
+Fieldbus protocols and adapters are diverse and varied. However, they share
+a limited few common behaviours and properties. This allows us to define
+a simple interface consisting of a character device and a set of sysfs files:
+
+See:
+Documentation/ABI/testing/sysfs-class-fieldbus-dev
+Documentation/ABI/testing/fieldbus-dev-cdev
+
+Note that this simple interface does not provide a way to modify adapter
+configuration settings. It is therefore useful only for adapters that get their
+configuration settings some other way, e.g. non-volatile memory on the adapter,
+through the network, ...
+
+At a later phase, this simple interface can easily co-exist with a future
+(netlink-based ?) configuration settings interface.
diff --git a/drivers/staging/fieldbus/Kconfig b/drivers/staging/fieldbus/Kconfig
new file mode 100644
index 000000000000..e5e28e52c59b
--- /dev/null
+++ b/drivers/staging/fieldbus/Kconfig
@@ -0,0 +1,18 @@
+menuconfig FIELDBUS_DEV
+ tristate "Fieldbus Device Support"
+ help
+ Support for Fieldbus Device Adapters.
+
+ Fieldbus device (client) adapters allow data exchange with a PLC aka.
+ "Fieldbus Controller" over a fieldbus (Profinet, FLNet, etc.)
+
+ They are typically used when a Linux device wants to expose itself
+ as an actuator, motor, console light, switch, etc. over the fieldbus.
+
+ This framework is designed to provide a generic interface to Fieldbus
+ Devices from both the Linux Kernel and the userspace.
+
+ If unsure, say no.
+
+source "drivers/staging/fieldbus/anybuss/Kconfig"
+
diff --git a/drivers/staging/fieldbus/Makefile b/drivers/staging/fieldbus/Makefile
new file mode 100644
index 000000000000..bdf645d41344
--- /dev/null
+++ b/drivers/staging/fieldbus/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for fieldbus_dev drivers.
+#
+
+obj-$(CONFIG_FIELDBUS_DEV) += fieldbus_dev.o anybuss/
+fieldbus_dev-y := dev_core.o
diff --git a/drivers/staging/fieldbus/TODO b/drivers/staging/fieldbus/TODO
new file mode 100644
index 000000000000..6d6626af4ec7
--- /dev/null
+++ b/drivers/staging/fieldbus/TODO
@@ -0,0 +1,5 @@
+TODO:
+-Get more people/drivers to use the Fieldbus userspace ABI. It requires
+ verification/sign-off by multiple users.
+
+Contact: Sven Van Asbroeck <TheSven73@gmail.com>
diff --git a/drivers/staging/fieldbus/anybuss/Kconfig b/drivers/staging/fieldbus/anybuss/Kconfig
new file mode 100644
index 000000000000..41f241c73826
--- /dev/null
+++ b/drivers/staging/fieldbus/anybuss/Kconfig
@@ -0,0 +1,39 @@
+config HMS_ANYBUSS_BUS
+ tristate "HMS Anybus-S Bus Support"
+ select REGMAP
+ depends on OF && FIELDBUS_DEV
+ help
+ Driver for the HMS Industrial Networks Anybus-S bus.
+ You can attach a single Anybus-S compatible card to it, which
+ typically provides fieldbus and industrial ethernet
+ functionality.
+
+if HMS_ANYBUSS_BUS
+
+config ARCX_ANYBUS_CONTROLLER
+ tristate "Arcx Anybus-S Controller"
+ depends on OF && GPIOLIB && HAS_IOMEM && REGULATOR
+ help
+ Select this to get support for the Arcx Anybus controller.
+ It connects to the SoC via a parallel memory bus, and
+ embeds up to two Anybus-S buses (slots).
+ There is also a CAN power readout, unrelated to the Anybus,
+ modelled as a regulator.
+
+config HMS_PROFINET
+ tristate "HMS Profinet IRT Controller (Anybus-S)"
+ depends on FIELDBUS_DEV && HMS_ANYBUSS_BUS
+ help
+ If you say yes here you get support for the HMS Industrial
+ Networks Profinet IRT Controller.
+
+ It will be registered with the kernel as a fieldbus_dev,
+ so userspace can interact with it via the fieldbus_dev userspace
+ interface(s).
+
+ This driver can also be built as a module. If so, the module
+ will be called hms-profinet.
+
+ If unsure, say N.
+
+endif
diff --git a/drivers/staging/fieldbus/anybuss/Makefile b/drivers/staging/fieldbus/anybuss/Makefile
new file mode 100644
index 000000000000..3ad3dcc6be56
--- /dev/null
+++ b/drivers/staging/fieldbus/anybuss/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for anybuss drivers.
+#
+
+obj-$(CONFIG_HMS_ANYBUSS_BUS) += anybuss_core.o
+anybuss_core-y += host.o
+
+obj-$(CONFIG_ARCX_ANYBUS_CONTROLLER) += arcx-anybus.o
+obj-$(CONFIG_HMS_PROFINET) += hms-profinet.o
diff --git a/drivers/staging/fieldbus/anybuss/anybuss-client.h b/drivers/staging/fieldbus/anybuss/anybuss-client.h
new file mode 100644
index 000000000000..0c4b6a1ffe10
--- /dev/null
+++ b/drivers/staging/fieldbus/anybuss/anybuss-client.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Anybus-S client adapter definitions
+ *
+ * Copyright 2018 Arcx Inc
+ */
+
+#ifndef __LINUX_ANYBUSS_CLIENT_H__
+#define __LINUX_ANYBUSS_CLIENT_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/poll.h>
+
+struct anybuss_host;
+
+struct anybuss_client {
+ struct device dev;
+ struct anybuss_host *host;
+ __be16 anybus_id;
+ /*
+ * these can be optionally set by the client to receive event
+ * notifications from the host.
+ */
+ void (*on_area_updated)(struct anybuss_client *client);
+ void (*on_online_changed)(struct anybuss_client *client, bool online);
+};
+
+struct anybuss_client_driver {
+ struct device_driver driver;
+ int (*probe)(struct anybuss_client *adev);
+ int (*remove)(struct anybuss_client *adev);
+ u16 anybus_id;
+};
+
+int anybuss_client_driver_register(struct anybuss_client_driver *drv);
+void anybuss_client_driver_unregister(struct anybuss_client_driver *drv);
+
+static inline struct anybuss_client *to_anybuss_client(struct device *dev)
+{
+ return container_of(dev, struct anybuss_client, dev);
+}
+
+static inline struct anybuss_client_driver *
+to_anybuss_client_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct anybuss_client_driver, driver);
+}
+
+static inline void *
+anybuss_get_drvdata(const struct anybuss_client *client)
+{
+ return dev_get_drvdata(&client->dev);
+}
+
+static inline void
+anybuss_set_drvdata(struct anybuss_client *client, void *data)
+{
+ dev_set_drvdata(&client->dev, data);
+}
+
+int anybuss_set_power(struct anybuss_client *client, bool power_on);
+
+enum anybuss_offl_mode {
+ AB_OFFL_MODE_CLEAR = 0,
+ AB_OFFL_MODE_FREEZE,
+ AB_OFFL_MODE_SET
+};
+
+struct anybuss_memcfg {
+ u16 input_io;
+ u16 input_dpram;
+ u16 input_total;
+
+ u16 output_io;
+ u16 output_dpram;
+ u16 output_total;
+
+ enum anybuss_offl_mode offl_mode;
+};
+
+int anybuss_start_init(struct anybuss_client *client,
+ const struct anybuss_memcfg *cfg);
+int anybuss_finish_init(struct anybuss_client *client);
+int anybuss_read_fbctrl(struct anybuss_client *client, u16 addr,
+ void *buf, size_t count);
+int anybuss_send_msg(struct anybuss_client *client, u16 cmd_num,
+ const void *buf, size_t count);
+int anybuss_send_ext(struct anybuss_client *client, u16 cmd_num,
+ const void *buf, size_t count);
+int anybuss_recv_msg(struct anybuss_client *client, u16 cmd_num,
+ void *buf, size_t count);
+
+/* these help clients make a struct file_operations */
+int anybuss_write_input(struct anybuss_client *client,
+ const char __user *buf, size_t size,
+ loff_t *offset);
+int anybuss_read_output(struct anybuss_client *client,
+ char __user *buf, size_t size,
+ loff_t *offset);
+
+#endif /* __LINUX_ANYBUSS_CLIENT_H__ */
diff --git a/drivers/staging/fieldbus/anybuss/anybuss-controller.h b/drivers/staging/fieldbus/anybuss/anybuss-controller.h
new file mode 100644
index 000000000000..02fa0749043b
--- /dev/null
+++ b/drivers/staging/fieldbus/anybuss/anybuss-controller.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Anybus-S controller definitions
+ *
+ * Copyright 2018 Arcx Inc
+ */
+
+#ifndef __LINUX_ANYBUSS_CONTROLLER_H__
+#define __LINUX_ANYBUSS_CONTROLLER_H__
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/*
+ * To instantiate an Anybus-S host, a controller should provide the following:
+ * - a reset function which resets the attached card;
+ * - a regmap which provides access to the attached card's dpram;
+ * - the irq of the attached card
+ */
+/**
+ * struct anybuss_ops - Controller resources to instantiate an Anybus-S host
+ *
+ * @reset: asserts/deasserts the anybus card's reset line.
+ * @regmap: provides access to the card's dual-port RAM area.
+ * @irq: number of the interrupt connected to the card's interrupt line.
+ * @host_idx: for multi-host controllers, the host index:
+ * 0 for the first host on the controller, 1 for the second, etc.
+ */
+struct anybuss_ops {
+ void (*reset)(struct device *dev, bool assert);
+ struct regmap *regmap;
+ int irq;
+ int host_idx;
+};
+
+struct anybuss_host;
+
+struct anybuss_host * __must_check
+anybuss_host_common_probe(struct device *dev,
+ const struct anybuss_ops *ops);
+void anybuss_host_common_remove(struct anybuss_host *host);
+
+struct anybuss_host * __must_check
+devm_anybuss_host_common_probe(struct device *dev,
+ const struct anybuss_ops *ops);
+
+#endif /* __LINUX_ANYBUSS_CONTROLLER_H__ */
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
new file mode 100644
index 000000000000..a167fb68e355
--- /dev/null
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arcx Anybus-S Controller driver
+ *
+ * Copyright (C) 2018 Arcx Inc
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/mutex.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+/* move to <linux/anybuss-controller.h> when taking this out of staging */
+#include "anybuss-controller.h"
+
+#define CPLD_STATUS1 0x80
+#define CPLD_CONTROL 0x80
+#define CPLD_CONTROL_CRST 0x40
+#define CPLD_CONTROL_RST1 0x04
+#define CPLD_CONTROL_RST2 0x80
+#define CPLD_STATUS1_AB 0x02
+#define CPLD_STATUS1_CAN_POWER 0x01
+#define CPLD_DESIGN_LO 0x81
+#define CPLD_DESIGN_HI 0x82
+#define CPLD_CAP 0x83
+#define CPLD_CAP_COMPAT 0x01
+#define CPLD_CAP_SEP_RESETS 0x02
+
+struct controller_priv {
+ struct device *class_dev;
+ bool common_reset;
+ struct gpio_desc *reset_gpiod;
+ void __iomem *cpld_base;
+ struct mutex ctrl_lock; /* protects CONTROL register */
+ u8 control_reg;
+ char version[3];
+ u16 design_no;
+};
+
+static void do_reset(struct controller_priv *cd, u8 rst_bit, bool reset)
+{
+ mutex_lock(&cd->ctrl_lock);
+ /*
+ * CPLD_CONTROL is write-only, so cache its value in
+ * cd->control_reg
+ */
+ if (reset)
+ cd->control_reg &= ~rst_bit;
+ else
+ cd->control_reg |= rst_bit;
+ writeb(cd->control_reg, cd->cpld_base + CPLD_CONTROL);
+ /*
+ * h/w work-around:
+ * the hardware is 'too fast', so a reset followed by an immediate
+ * not-reset will _not_ change the anybus reset line in any way,
+ * losing the reset. to prevent this from happening, introduce
+ * a minimum reset duration.
+ * Verified minimum safe duration required using a scope
+ * on 14-June-2018: 100 us.
+ */
+ if (reset)
+ usleep_range(100, 200);
+ mutex_unlock(&cd->ctrl_lock);
+}
+
+static int anybuss_reset(struct controller_priv *cd,
+ unsigned long id, bool reset)
+{
+ if (id >= 2)
+ return -EINVAL;
+ if (cd->common_reset)
+ do_reset(cd, CPLD_CONTROL_CRST, reset);
+ else
+ do_reset(cd, id ? CPLD_CONTROL_RST2 : CPLD_CONTROL_RST1, reset);
+ return 0;
+}
+
+static void export_reset_0(struct device *dev, bool assert)
+{
+ struct controller_priv *cd = dev_get_drvdata(dev);
+
+ anybuss_reset(cd, 0, assert);
+}
+
+static void export_reset_1(struct device *dev, bool assert)
+{
+ struct controller_priv *cd = dev_get_drvdata(dev);
+
+ anybuss_reset(cd, 1, assert);
+}
+
+/*
+ * parallel bus limitation:
+ *
+ * the anybus is 8-bit wide. we can't assume that the hardware will translate
+ * word accesses on the parallel bus to multiple byte-accesses on the anybus.
+ *
+ * the imx WEIM bus does not provide this type of translation.
+ *
+ * to be safe, we will limit parallel bus accesses to a single byte
+ * at a time for now.
+ */
+
+static int read_reg_bus(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ void __iomem *base = context;
+
+ *val = readb(base + reg);
+ return 0;
+}
+
+static int write_reg_bus(void *context, unsigned int reg,
+ unsigned int val)
+{
+ void __iomem *base = context;
+
+ writeb(val, base + reg);
+ return 0;
+}
+
+static struct regmap *create_parallel_regmap(struct platform_device *pdev,
+ int idx)
+{
+ struct regmap_config regmap_cfg = {
+ .reg_bits = 11,
+ .val_bits = 8,
+ /*
+ * single-byte parallel bus accesses are atomic, so don't
+ * require any synchronization.
+ */
+ .disable_locking = true,
+ .reg_read = read_reg_bus,
+ .reg_write = write_reg_bus,
+ };
+ struct resource *res;
+ void __iomem *base;
+ struct device *dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1);
+ if (resource_size(res) < (1 << regmap_cfg.reg_bits))
+ return ERR_PTR(-EINVAL);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+ return devm_regmap_init(dev, NULL, base, &regmap_cfg);
+}
+
+static struct anybuss_host *
+create_anybus_host(struct platform_device *pdev, int idx)
+{
+ struct anybuss_ops ops = {};
+
+ switch (idx) {
+ case 0:
+ ops.reset = export_reset_0;
+ break;
+ case 1:
+ ops.reset = export_reset_1;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ ops.host_idx = idx;
+ ops.regmap = create_parallel_regmap(pdev, idx);
+ if (IS_ERR(ops.regmap))
+ return ERR_CAST(ops.regmap);
+ ops.irq = platform_get_irq(pdev, idx);
+ if (ops.irq <= 0)
+ return ERR_PTR(-EINVAL);
+ return devm_anybuss_host_common_probe(&pdev->dev, &ops);
+}
+
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct controller_priv *cd = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", cd->version);
+}
+static DEVICE_ATTR_RO(version);
+
+static ssize_t design_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct controller_priv *cd = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cd->design_no);
+}
+static DEVICE_ATTR_RO(design_number);
+
+static struct attribute *controller_attributes[] = {
+ &dev_attr_version.attr,
+ &dev_attr_design_number.attr,
+ NULL,
+};
+
+static struct attribute_group controller_attribute_group = {
+ .attrs = controller_attributes,
+};
+
+static const struct attribute_group *controller_attribute_groups[] = {
+ &controller_attribute_group,
+ NULL,
+};
+
+static void controller_device_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static int can_power_is_enabled(struct regulator_dev *rdev)
+{
+ struct controller_priv *cd = rdev_get_drvdata(rdev);
+
+ return !(readb(cd->cpld_base + CPLD_STATUS1) & CPLD_STATUS1_CAN_POWER);
+}
+
+static struct regulator_ops can_power_ops = {
+ .is_enabled = can_power_is_enabled,
+};
+
+static const struct regulator_desc can_power_desc = {
+ .name = "regulator-can-power",
+ .id = -1,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &can_power_ops,
+};
+
+static struct class *controller_class;
+static DEFINE_IDA(controller_index_ida);
+
+static int controller_probe(struct platform_device *pdev)
+{
+ struct controller_priv *cd;
+ struct device *dev = &pdev->dev;
+ struct regulator_config config = { };
+ struct regulator_dev *regulator;
+ int err, id;
+ struct resource *res;
+ struct anybuss_host *host;
+ u8 status1, cap;
+
+ cd = devm_kzalloc(dev, sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+ dev_set_drvdata(dev, cd);
+ mutex_init(&cd->ctrl_lock);
+ cd->reset_gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(cd->reset_gpiod))
+ return PTR_ERR(cd->reset_gpiod);
+
+ /* CPLD control memory, sits at index 0 */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cd->cpld_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cd->cpld_base)) {
+ dev_err(dev,
+ "failed to map cpld base address\n");
+ err = PTR_ERR(cd->cpld_base);
+ goto out_reset;
+ }
+
+ /* identify cpld */
+ status1 = readb(cd->cpld_base + CPLD_STATUS1);
+ cd->design_no = (readb(cd->cpld_base + CPLD_DESIGN_HI) << 8) |
+ readb(cd->cpld_base + CPLD_DESIGN_LO);
+ snprintf(cd->version, sizeof(cd->version), "%c%d",
+ 'A' + ((status1 >> 5) & 0x7),
+ (status1 >> 2) & 0x7);
+ dev_info(dev, "design number %d, revision %s\n",
+ cd->design_no,
+ cd->version);
+ cap = readb(cd->cpld_base + CPLD_CAP);
+ if (!(cap & CPLD_CAP_COMPAT)) {
+ dev_err(dev, "unsupported controller [cap=0x%02X]", cap);
+ err = -ENODEV;
+ goto out_reset;
+ }
+
+ if (status1 & CPLD_STATUS1_AB) {
+ dev_info(dev, "has anybus-S slot(s)");
+ cd->common_reset = !(cap & CPLD_CAP_SEP_RESETS);
+ dev_info(dev, "supports %s", cd->common_reset ?
+ "a common reset" : "separate resets");
+ for (id = 0; id < 2; id++) {
+ host = create_anybus_host(pdev, id);
+ if (!IS_ERR(host))
+ continue;
+ err = PTR_ERR(host);
+ /* -ENODEV is fine, it just means no card detected */
+ if (err != -ENODEV)
+ goto out_reset;
+ }
+ }
+
+ id = ida_simple_get(&controller_index_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ err = id;
+ goto out_reset;
+ }
+ /* export can power readout as a regulator */
+ config.dev = dev;
+ config.driver_data = cd;
+ regulator = devm_regulator_register(dev, &can_power_desc, &config);
+ if (IS_ERR(regulator)) {
+ err = PTR_ERR(regulator);
+ goto out_reset;
+ }
+ /* make controller info visible to userspace */
+ cd->class_dev = kzalloc(sizeof(*cd->class_dev), GFP_KERNEL);
+ if (!cd->class_dev) {
+ err = -ENOMEM;
+ goto out_ida;
+ }
+ cd->class_dev->class = controller_class;
+ cd->class_dev->groups = controller_attribute_groups;
+ cd->class_dev->parent = dev;
+ cd->class_dev->id = id;
+ cd->class_dev->release = controller_device_release;
+ dev_set_name(cd->class_dev, "%d", cd->class_dev->id);
+ dev_set_drvdata(cd->class_dev, cd);
+ err = device_register(cd->class_dev);
+ if (err)
+ goto out_dev;
+ return 0;
+out_dev:
+ put_device(cd->class_dev);
+out_ida:
+ ida_simple_remove(&controller_index_ida, id);
+out_reset:
+ gpiod_set_value_cansleep(cd->reset_gpiod, 1);
+ return err;
+}
+
+static int controller_remove(struct platform_device *pdev)
+{
+ struct controller_priv *cd = platform_get_drvdata(pdev);
+ int id = cd->class_dev->id;
+
+ device_unregister(cd->class_dev);
+ ida_simple_remove(&controller_index_ida, id);
+ gpiod_set_value_cansleep(cd->reset_gpiod, 1);
+ return 0;
+}
+
+static const struct of_device_id controller_of_match[] = {
+ { .compatible = "arcx,anybus-controller" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, controller_of_match);
+
+static struct platform_driver controller_driver = {
+ .probe = controller_probe,
+ .remove = controller_remove,
+ .driver = {
+ .name = "arcx-anybus-controller",
+ .of_match_table = of_match_ptr(controller_of_match),
+ },
+};
+
+static int __init controller_init(void)
+{
+ int err;
+
+ controller_class = class_create(THIS_MODULE, "arcx_anybus_controller");
+ if (IS_ERR(controller_class))
+ return PTR_ERR(controller_class);
+ err = platform_driver_register(&controller_driver);
+ if (err)
+ class_destroy(controller_class);
+
+ return err;
+}
+
+static void __exit controller_exit(void)
+{
+ platform_driver_unregister(&controller_driver);
+ class_destroy(controller_class);
+ ida_destroy(&controller_index_ida);
+}
+
+module_init(controller_init);
+module_exit(controller_exit);
+
+MODULE_DESCRIPTION("Arcx Anybus-S Controller driver");
+MODULE_AUTHOR("Sven Van Asbroeck <TheSven73@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/fieldbus/anybuss/hms-profinet.c b/drivers/staging/fieldbus/anybuss/hms-profinet.c
new file mode 100644
index 000000000000..5446843e35f4
--- /dev/null
+++ b/drivers/staging/fieldbus/anybuss/hms-profinet.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HMS Profinet Client Driver
+ *
+ * Copyright (C) 2018 Arcx Inc
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+/* move to <linux/fieldbus_dev.h> when taking this out of staging */
+#include "../fieldbus_dev.h"
+
+/* move to <linux/anybuss-client.h> when taking this out of staging */
+#include "anybuss-client.h"
+
+#define PROFI_DPRAM_SIZE 512
+
+/*
+ * ---------------------------------------------------------------
+ * Anybus Profinet mailbox messages - definitions
+ * ---------------------------------------------------------------
+ * note that we're depending on the layout of these structures being
+ * exactly as advertised.
+ */
+
+struct msg_mac_addr {
+ u8 addr[6];
+};
+
+struct profi_priv {
+ struct fieldbus_dev fbdev;
+ struct anybuss_client *client;
+ struct mutex enable_lock; /* serializes card enable */
+ bool power_on;
+};
+
+static ssize_t
+profi_read_area(struct fieldbus_dev *fbdev, char __user *buf, size_t size,
+ loff_t *offset)
+{
+ struct profi_priv *priv = container_of(fbdev, struct profi_priv, fbdev);
+
+ return anybuss_read_output(priv->client, buf, size, offset);
+}
+
+static ssize_t
+profi_write_area(struct fieldbus_dev *fbdev, const char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct profi_priv *priv = container_of(fbdev, struct profi_priv, fbdev);
+
+ return anybuss_write_input(priv->client, buf, size, offset);
+}
+
+static int profi_id_get(struct fieldbus_dev *fbdev, char *buf,
+ size_t max_size)
+{
+ struct profi_priv *priv = container_of(fbdev, struct profi_priv, fbdev);
+ struct msg_mac_addr response;
+ int ret;
+
+ ret = anybuss_recv_msg(priv->client, 0x0010, &response,
+ sizeof(response));
+ if (ret < 0)
+ return ret;
+ return snprintf(buf, max_size, "%02X:%02X:%02X:%02X:%02X:%02X\n",
+ response.addr[0], response.addr[1],
+ response.addr[2], response.addr[3],
+ response.addr[4], response.addr[5]);
+}
+
+static bool profi_enable_get(struct fieldbus_dev *fbdev)
+{
+ struct profi_priv *priv = container_of(fbdev, struct profi_priv, fbdev);
+ bool power_on;
+
+ mutex_lock(&priv->enable_lock);
+ power_on = priv->power_on;
+ mutex_unlock(&priv->enable_lock);
+
+ return power_on;
+}
+
+static int __profi_enable(struct profi_priv *priv)
+{
+ int ret;
+ struct anybuss_client *client = priv->client;
+ /* Initialization Sequence, Generic Anybus Mode */
+ const struct anybuss_memcfg mem_cfg = {
+ .input_io = 220,
+ .input_dpram = PROFI_DPRAM_SIZE,
+ .input_total = PROFI_DPRAM_SIZE,
+ .output_io = 220,
+ .output_dpram = PROFI_DPRAM_SIZE,
+ .output_total = PROFI_DPRAM_SIZE,
+ .offl_mode = AB_OFFL_MODE_CLEAR,
+ };
+
+ /*
+ * switch anybus off then on, this ensures we can do a complete
+ * configuration cycle in case anybus was already on.
+ */
+ anybuss_set_power(client, false);
+ ret = anybuss_set_power(client, true);
+ if (ret)
+ goto err;
+ ret = anybuss_start_init(client, &mem_cfg);
+ if (ret)
+ goto err;
+ ret = anybuss_finish_init(client);
+ if (ret)
+ goto err;
+ priv->power_on = true;
+ return 0;
+
+err:
+ anybuss_set_power(client, false);
+ priv->power_on = false;
+ return ret;
+}
+
+static int __profi_disable(struct profi_priv *priv)
+{
+ struct anybuss_client *client = priv->client;
+
+ anybuss_set_power(client, false);
+ priv->power_on = false;
+ return 0;
+}
+
+static int profi_simple_enable(struct fieldbus_dev *fbdev, bool enable)
+{
+ int ret;
+ struct profi_priv *priv = container_of(fbdev, struct profi_priv, fbdev);
+
+ mutex_lock(&priv->enable_lock);
+ if (enable)
+ ret = __profi_enable(priv);
+ else
+ ret = __profi_disable(priv);
+ mutex_unlock(&priv->enable_lock);
+
+ return ret;
+}
+
+static void profi_on_area_updated(struct anybuss_client *client)
+{
+ struct profi_priv *priv = anybuss_get_drvdata(client);
+
+ fieldbus_dev_area_updated(&priv->fbdev);
+}
+
+static void profi_on_online_changed(struct anybuss_client *client, bool online)
+{
+ struct profi_priv *priv = anybuss_get_drvdata(client);
+
+ fieldbus_dev_online_changed(&priv->fbdev, online);
+}
+
+static int profinet_probe(struct anybuss_client *client)
+{
+ struct profi_priv *priv;
+ struct device *dev = &client->dev;
+ int err;
+
+ client->on_area_updated = profi_on_area_updated;
+ client->on_online_changed = profi_on_online_changed;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ mutex_init(&priv->enable_lock);
+ priv->client = client;
+ priv->fbdev.read_area_sz = PROFI_DPRAM_SIZE;
+ priv->fbdev.write_area_sz = PROFI_DPRAM_SIZE;
+ priv->fbdev.card_name = "HMS Profinet IRT (Anybus-S)";
+ priv->fbdev.fieldbus_type = FIELDBUS_DEV_TYPE_PROFINET;
+ priv->fbdev.read_area = profi_read_area;
+ priv->fbdev.write_area = profi_write_area;
+ priv->fbdev.fieldbus_id_get = profi_id_get;
+ priv->fbdev.enable_get = profi_enable_get;
+ priv->fbdev.simple_enable_set = profi_simple_enable;
+ priv->fbdev.parent = dev;
+ err = fieldbus_dev_register(&priv->fbdev);
+ if (err < 0)
+ return err;
+ dev_info(dev, "card detected, registered as %s",
+ dev_name(priv->fbdev.dev));
+ anybuss_set_drvdata(client, priv);
+
+ return 0;
+}
+
+static int profinet_remove(struct anybuss_client *client)
+{
+ struct profi_priv *priv = anybuss_get_drvdata(client);
+
+ fieldbus_dev_unregister(&priv->fbdev);
+ return 0;
+}
+
+static struct anybuss_client_driver profinet_driver = {
+ .probe = profinet_probe,
+ .remove = profinet_remove,
+ .driver = {
+ .name = "hms-profinet",
+ .owner = THIS_MODULE,
+ },
+ .anybus_id = 0x0089,
+};
+
+static int __init profinet_init(void)
+{
+ return anybuss_client_driver_register(&profinet_driver);
+}
+module_init(profinet_init);
+
+static void __exit profinet_exit(void)
+{
+ return anybuss_client_driver_unregister(&profinet_driver);
+}
+module_exit(profinet_exit);
+
+MODULE_AUTHOR("Sven Van Asbroeck <TheSven73@gmail.com>");
+MODULE_DESCRIPTION("HMS Profinet IRT Driver (Anybus-S)");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
new file mode 100644
index 000000000000..f69dc4930457
--- /dev/null
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -0,0 +1,1458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HMS Anybus-S Host Driver
+ *
+ * Copyright (C) 2018 Arcx Inc
+ */
+
+/*
+ * Architecture Overview
+ * =====================
+ * This driver (running on the CPU/SoC) and the Anybus-S card communicate
+ * by reading and writing data to/from the Anybus-S Dual-Port RAM (dpram).
+ * This is memory connected to both the SoC and Anybus-S card, which both sides
+ * can access freely and concurrently.
+ *
+ * Synchronization happens by means of two registers located in the dpram:
+ * IND_AB: written exclusively by the Anybus card; and
+ * IND_AP: written exclusively by this driver.
+ *
+ * Communication happens using one of the following mechanisms:
+ * 1. reserve, read/write, release dpram memory areas:
+ * using an IND_AB/IND_AP protocol, the driver is able to reserve certain
+ * memory areas. no dpram memory can be read or written except if reserved.
+ * (with a few limited exceptions)
+ * 2. send and receive data structures via a shared mailbox:
+ * using an IND_AB/IND_AP protocol, the driver and Anybus card are able to
+ * exchange commands and responses using a shared mailbox.
+ * 3. receive software interrupts:
+ * using an IND_AB/IND_AP protocol, the Anybus card is able to notify the
+ * driver of certain events such as: bus online/offline, data available.
+ * note that software interrupt event bits are located in a memory area
+ * which must be reserved before it can be accessed.
+ *
+ * The manual[1] is silent on whether these mechanisms can happen concurrently,
+ * or how they should be synchronized. However, section 13 (Driver Example)
+ * provides the following suggestion for developing a driver:
+ * a) an interrupt handler which updates global variables;
+ * b) a continuously-running task handling area requests (1 above)
+ * c) a continuously-running task handling mailbox requests (2 above)
+ * The example conspicuously leaves out software interrupts (3 above), which
+ * is the thorniest issue to get right (see below).
+ *
+ * The naive, straightforward way to implement this would be:
+ * - create an isr which updates shared variables;
+ * - create a work_struct which handles software interrupts on a queue;
+ * - create a function which does reserve/update/unlock in a loop;
+ * - create a function which does mailbox send/receive in a loop;
+ * - call the above functions from the driver's read/write/ioctl;
+ * - synchronize using mutexes/spinlocks:
+ * + only one area request at a time
+ * + only one mailbox request at a time
+ * + protect AB_IND, AB_IND against data hazards (e.g. read-after-write)
+ *
+ * Unfortunately, the presence of the software interrupt causes subtle yet
+ * considerable synchronization issues; especially problematic is the
+ * requirement to reserve/release the area which contains the status bits.
+ *
+ * The driver architecture presented here sidesteps these synchronization issues
+ * by accessing the dpram from a single kernel thread only. User-space throws
+ * "tasks" (i.e. 1, 2 above) into a task queue, waits for their completion,
+ * and the kernel thread runs them to completion.
+ *
+ * Each task has a task_function, which is called/run by the queue thread.
+ * That function communicates with the Anybus card, and returns either
+ * 0 (OK), a negative error code (error), or -EINPROGRESS (waiting).
+ * On OK or error, the queue thread completes and dequeues the task,
+ * which also releases the user space thread which may still be waiting for it.
+ * On -EINPROGRESS (waiting), the queue thread will leave the task on the queue,
+ * and revisit (call again) whenever an interrupt event comes in.
+ *
+ * Each task has a state machine, which is run by calling its task_function.
+ * It ensures that the task will go through its various stages over time,
+ * returning -EINPROGRESS if it wants to wait for an event to happen.
+ *
+ * Note that according to the manual's driver example, the following operations
+ * may run independent of each other:
+ * - area reserve/read/write/release (point 1 above)
+ * - mailbox operations (point 2 above)
+ * - switching power on/off
+ *
+ * To allow them to run independently, each operation class gets its own queue.
+ *
+ * Userspace processes A, B, C, D post tasks to the appropriate queue,
+ * and wait for task completion:
+ *
+ * process A B C D
+ * | | | |
+ * v v v v
+ * |<----- ========================================
+ * | | | |
+ * | v v v-------<-------+
+ * | +--------------------------------------+ |
+ * | | power q | mbox q | area q | |
+ * | |------------|------------|------------| |
+ * | | task | task | task | |
+ * | | task | task | task | |
+ * | | task wait | task wait | task wait | |
+ * | +--------------------------------------+ |
+ * | ^ ^ ^ |
+ * | | | | ^
+ * | +--------------------------------------+ |
+ * | | queue thread | |
+ * | |--------------------------------------| |
+ * | | single-threaded: | |
+ * | | loop: | |
+ * v | for each queue: | |
+ * | | run task state machine | |
+ * | | if task waiting: | |
+ * | | leave on queue | |
+ * | | if task done: | |
+ * | | complete task, remove from q | |
+ * | | if software irq event bits set: | |
+ * | | notify userspace | |
+ * | | post clear event bits task------>|>-------+
+ * | | wait for IND_AB changed event OR |
+ * | | task added event OR |
+ * | | timeout |
+ * | | end loop |
+ * | +--------------------------------------+
+ * | + wake up +
+ * | +--------------------------------------+
+ * | ^ ^
+ * | | |
+ * +-------->------- |
+ * |
+ * +--------------------------------------+
+ * | interrupt service routine |
+ * |--------------------------------------|
+ * | wake up queue thread on IND_AB change|
+ * +--------------------------------------+
+ *
+ * Note that the Anybus interrupt is dual-purpose:
+ * - after a reset, triggered when the card becomes ready;
+ * - during normal operation, triggered when AB_IND changes.
+ * This is why the interrupt service routine doesn't just wake up the
+ * queue thread, but also completes the card_boot completion.
+ *
+ * [1] https://www.anybus.com/docs/librariesprovider7/default-document-library/
+ * manuals-design-guides/hms-hmsi-27-275.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/kthread.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/random.h>
+#include <linux/kref.h>
+#include <linux/of_address.h>
+
+/* move to <linux/anybuss-*.h> when taking this out of staging */
+#include "anybuss-client.h"
+#include "anybuss-controller.h"
+
+#define DPRAM_SIZE 0x800
+#define MAX_MBOX_MSG_SZ 0x0FF
+#define TIMEOUT (HZ * 2)
+#define MAX_DATA_AREA_SZ 0x200
+#define MAX_FBCTRL_AREA_SZ 0x1BE
+
+#define REG_BOOTLOADER_V 0x7C0
+#define REG_API_V 0x7C2
+#define REG_FIELDBUS_V 0x7C4
+#define REG_SERIAL_NO 0x7C6
+#define REG_FIELDBUS_TYPE 0x7CC
+#define REG_MODULE_SW_V 0x7CE
+#define REG_IND_AB 0x7FF
+#define REG_IND_AP 0x7FE
+#define REG_EVENT_CAUSE 0x7ED
+#define MBOX_IN_AREA 0x400
+#define MBOX_OUT_AREA 0x520
+#define DATA_IN_AREA 0x000
+#define DATA_OUT_AREA 0x200
+#define FBCTRL_AREA 0x640
+
+#define EVENT_CAUSE_DC 0x01
+#define EVENT_CAUSE_FBOF 0x02
+#define EVENT_CAUSE_FBON 0x04
+
+#define IND_AB_UPDATED 0x08
+#define IND_AX_MIN 0x80
+#define IND_AX_MOUT 0x40
+#define IND_AX_IN 0x04
+#define IND_AX_OUT 0x02
+#define IND_AX_FBCTRL 0x01
+#define IND_AP_LOCK 0x08
+#define IND_AP_ACTION 0x10
+#define IND_AX_EVNT 0x20
+#define IND_AP_ABITS (IND_AX_IN | IND_AX_OUT | \
+ IND_AX_FBCTRL | \
+ IND_AP_ACTION | IND_AP_LOCK)
+
+#define INFO_TYPE_FB 0x0002
+#define INFO_TYPE_APP 0x0001
+#define INFO_COMMAND 0x4000
+
+#define OP_MODE_FBFC 0x0002
+#define OP_MODE_FBS 0x0004
+#define OP_MODE_CD 0x0200
+
+#define CMD_START_INIT 0x0001
+#define CMD_ANYBUS_INIT 0x0002
+#define CMD_END_INIT 0x0003
+
+/*
+ * ---------------------------------------------------------------
+ * Anybus mailbox messages - definitions
+ * ---------------------------------------------------------------
+ * note that we're depending on the layout of these structures being
+ * exactly as advertised.
+ */
+
+struct anybus_mbox_hdr {
+ __be16 id;
+ __be16 info;
+ __be16 cmd_num;
+ __be16 data_size;
+ __be16 frame_count;
+ __be16 frame_num;
+ __be16 offset_high;
+ __be16 offset_low;
+ __be16 extended[8];
+};
+
+struct msg_anybus_init {
+ __be16 input_io_len;
+ __be16 input_dpram_len;
+ __be16 input_total_len;
+ __be16 output_io_len;
+ __be16 output_dpram_len;
+ __be16 output_total_len;
+ __be16 op_mode;
+ __be16 notif_config;
+ __be16 wd_val;
+};
+
+/* ------------- ref counted tasks ------------- */
+
+struct ab_task;
+typedef int (*ab_task_fn_t)(struct anybuss_host *cd,
+ struct ab_task *t);
+typedef void (*ab_done_fn_t)(struct anybuss_host *cd);
+
+struct area_priv {
+ bool is_write;
+ u16 flags;
+ u16 addr;
+ size_t count;
+ u8 buf[MAX_DATA_AREA_SZ];
+};
+
+struct mbox_priv {
+ struct anybus_mbox_hdr hdr;
+ size_t msg_out_sz;
+ size_t msg_in_sz;
+ u8 msg[MAX_MBOX_MSG_SZ];
+};
+
+struct ab_task {
+ struct kmem_cache *cache;
+ struct kref refcount;
+ ab_task_fn_t task_fn;
+ ab_done_fn_t done_fn;
+ int result;
+ struct completion done;
+ unsigned long start_jiffies;
+ union {
+ struct area_priv area_pd;
+ struct mbox_priv mbox_pd;
+ };
+};
+
+static struct ab_task *ab_task_create_get(struct kmem_cache *cache,
+ ab_task_fn_t task_fn)
+{
+ struct ab_task *t;
+
+ t = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!t)
+ return NULL;
+ t->cache = cache;
+ kref_init(&t->refcount);
+ t->task_fn = task_fn;
+ t->done_fn = NULL;
+ t->result = 0;
+ init_completion(&t->done);
+ return t;
+}
+
+static void __ab_task_destroy(struct kref *refcount)
+{
+ struct ab_task *t = container_of(refcount, struct ab_task, refcount);
+ struct kmem_cache *cache = t->cache;
+
+ kmem_cache_free(cache, t);
+}
+
+static void ab_task_put(struct ab_task *t)
+{
+ kref_put(&t->refcount, __ab_task_destroy);
+}
+
+static struct ab_task *__ab_task_get(struct ab_task *t)
+{
+ kref_get(&t->refcount);
+ return t;
+}
+
+static void __ab_task_finish(struct ab_task *t, struct anybuss_host *cd)
+{
+ if (t->done_fn)
+ t->done_fn(cd);
+ complete(&t->done);
+}
+
+static void
+ab_task_dequeue_finish_put(struct kfifo *q, struct anybuss_host *cd)
+{
+ int ret;
+ struct ab_task *t;
+
+ ret = kfifo_out(q, &t, sizeof(t));
+ WARN_ON(!ret);
+ __ab_task_finish(t, cd);
+ ab_task_put(t);
+}
+
+static int
+ab_task_enqueue(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
+ wait_queue_head_t *wq)
+{
+ int ret;
+
+ t->start_jiffies = jiffies;
+ __ab_task_get(t);
+ ret = kfifo_in_spinlocked(q, &t, sizeof(t), slock);
+ if (!ret) {
+ ab_task_put(t);
+ return -ENOMEM;
+ }
+ wake_up(wq);
+ return 0;
+}
+
+static int
+ab_task_enqueue_wait(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
+ wait_queue_head_t *wq)
+{
+ int ret;
+
+ ret = ab_task_enqueue(t, q, slock, wq);
+ if (ret)
+ return ret;
+ ret = wait_for_completion_interruptible(&t->done);
+ if (ret)
+ return ret;
+ return t->result;
+}
+
+/* ------------------------ anybus hardware ------------------------ */
+
+struct anybuss_host {
+ struct device *dev;
+ struct anybuss_client *client;
+ void (*reset)(struct device *dev, bool assert);
+ struct regmap *regmap;
+ int irq;
+ int host_idx;
+ struct task_struct *qthread;
+ wait_queue_head_t wq;
+ struct completion card_boot;
+ atomic_t ind_ab;
+ spinlock_t qlock; /* protects IN side of powerq, mboxq, areaq */
+ struct kmem_cache *qcache;
+ struct kfifo qs[3];
+ struct kfifo *powerq;
+ struct kfifo *mboxq;
+ struct kfifo *areaq;
+ bool power_on;
+ bool softint_pending;
+};
+
+static void reset_assert(struct anybuss_host *cd)
+{
+ cd->reset(cd->dev, true);
+}
+
+static void reset_deassert(struct anybuss_host *cd)
+{
+ cd->reset(cd->dev, false);
+}
+
+static int test_dpram(struct regmap *regmap)
+{
+ int i;
+ unsigned int val;
+
+ for (i = 0; i < DPRAM_SIZE; i++)
+ regmap_write(regmap, i, (u8)i);
+ for (i = 0; i < DPRAM_SIZE; i++) {
+ regmap_read(regmap, i, &val);
+ if ((u8)val != (u8)i)
+ return -EIO;
+ }
+ return 0;
+}
+
+static int read_ind_ab(struct regmap *regmap)
+{
+ unsigned long timeout = jiffies + HZ / 2;
+ unsigned int a, b, i = 0;
+
+ while (time_before_eq(jiffies, timeout)) {
+ regmap_read(regmap, REG_IND_AB, &a);
+ regmap_read(regmap, REG_IND_AB, &b);
+ if (likely(a == b))
+ return (int)a;
+ if (i < 10) {
+ cpu_relax();
+ i++;
+ } else {
+ usleep_range(500, 1000);
+ }
+ }
+ WARN(1, "IND_AB register not stable");
+ return -ETIMEDOUT;
+}
+
+static int write_ind_ap(struct regmap *regmap, unsigned int ind_ap)
+{
+ unsigned long timeout = jiffies + HZ / 2;
+ unsigned int v, i = 0;
+
+ while (time_before_eq(jiffies, timeout)) {
+ regmap_write(regmap, REG_IND_AP, ind_ap);
+ regmap_read(regmap, REG_IND_AP, &v);
+ if (likely(ind_ap == v))
+ return 0;
+ if (i < 10) {
+ cpu_relax();
+ i++;
+ } else {
+ usleep_range(500, 1000);
+ }
+ }
+ WARN(1, "IND_AP register not stable");
+ return -ETIMEDOUT;
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct anybuss_host *cd = data;
+ int ind_ab;
+
+ /*
+ * irq handler needs exclusive access to the IND_AB register,
+ * because the act of reading the register acks the interrupt.
+ *
+ * store the register value in cd->ind_ab (an atomic_t), so that the
+ * queue thread is able to read it without causing an interrupt ack
+ * side-effect (and without spuriously acking an interrupt).
+ */
+ ind_ab = read_ind_ab(cd->regmap);
+ if (ind_ab < 0)
+ return IRQ_NONE;
+ atomic_set(&cd->ind_ab, ind_ab);
+ complete(&cd->card_boot);
+ wake_up(&cd->wq);
+ return IRQ_HANDLED;
+}
+
+/* ------------------------ power on/off tasks --------------------- */
+
+static int task_fn_power_off(struct anybuss_host *cd,
+ struct ab_task *t)
+{
+ struct anybuss_client *client = cd->client;
+
+ if (!cd->power_on)
+ return 0;
+ disable_irq(cd->irq);
+ reset_assert(cd);
+ atomic_set(&cd->ind_ab, IND_AB_UPDATED);
+ if (client->on_online_changed)
+ client->on_online_changed(client, false);
+ cd->power_on = false;
+ return 0;
+}
+
+static int task_fn_power_on_2(struct anybuss_host *cd,
+ struct ab_task *t)
+{
+ if (completion_done(&cd->card_boot)) {
+ cd->power_on = true;
+ return 0;
+ }
+ if (time_after(jiffies, t->start_jiffies + TIMEOUT)) {
+ disable_irq(cd->irq);
+ reset_assert(cd);
+ dev_err(cd->dev, "power on timed out");
+ return -ETIMEDOUT;
+ }
+ return -EINPROGRESS;
+}
+
+static int task_fn_power_on(struct anybuss_host *cd,
+ struct ab_task *t)
+{
+ unsigned int dummy;
+
+ if (cd->power_on)
+ return 0;
+ /*
+ * anybus docs: prevent false 'init done' interrupt by
+ * doing a dummy read of IND_AB register while in reset.
+ */
+ regmap_read(cd->regmap, REG_IND_AB, &dummy);
+ reinit_completion(&cd->card_boot);
+ enable_irq(cd->irq);
+ reset_deassert(cd);
+ t->task_fn = task_fn_power_on_2;
+ return -EINPROGRESS;
+}
+
+int anybuss_set_power(struct anybuss_client *client, bool power_on)
+{
+ struct anybuss_host *cd = client->host;
+ struct ab_task *t;
+ int err;
+
+ t = ab_task_create_get(cd->qcache, power_on ?
+ task_fn_power_on : task_fn_power_off);
+ if (!t)
+ return -ENOMEM;
+ err = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
+ ab_task_put(t);
+ return err;
+}
+EXPORT_SYMBOL_GPL(anybuss_set_power);
+
+/* ---------------------------- area tasks ------------------------ */
+
+static int task_fn_area_3(struct anybuss_host *cd, struct ab_task *t)
+{
+ struct area_priv *pd = &t->area_pd;
+
+ if (!cd->power_on)
+ return -EIO;
+ if (atomic_read(&cd->ind_ab) & pd->flags) {
+ /* area not released yet */
+ if (time_after(jiffies, t->start_jiffies + TIMEOUT))
+ return -ETIMEDOUT;
+ return -EINPROGRESS;
+ }
+ return 0;
+}
+
+static int task_fn_area_2(struct anybuss_host *cd, struct ab_task *t)
+{
+ struct area_priv *pd = &t->area_pd;
+ unsigned int ind_ap;
+ int ret;
+
+ if (!cd->power_on)
+ return -EIO;
+ regmap_read(cd->regmap, REG_IND_AP, &ind_ap);
+ if (!(atomic_read(&cd->ind_ab) & pd->flags)) {
+ /* we don't own the area yet */
+ if (time_after(jiffies, t->start_jiffies + TIMEOUT)) {
+ dev_warn(cd->dev, "timeout waiting for area");
+ dump_stack();
+ return -ETIMEDOUT;
+ }
+ return -EINPROGRESS;
+ }
+ /* we own the area, do what we're here to do */
+ if (pd->is_write)
+ regmap_bulk_write(cd->regmap, pd->addr, pd->buf,
+ pd->count);
+ else
+ regmap_bulk_read(cd->regmap, pd->addr, pd->buf,
+ pd->count);
+ /* ask to release the area, must use unlocked release */
+ ind_ap &= ~IND_AP_ABITS;
+ ind_ap |= pd->flags;
+ ret = write_ind_ap(cd->regmap, ind_ap);
+ if (ret)
+ return ret;
+ t->task_fn = task_fn_area_3;
+ return -EINPROGRESS;
+}
+
+static int task_fn_area(struct anybuss_host *cd, struct ab_task *t)
+{
+ struct area_priv *pd = &t->area_pd;
+ unsigned int ind_ap;
+ int ret;
+
+ if (!cd->power_on)
+ return -EIO;
+ regmap_read(cd->regmap, REG_IND_AP, &ind_ap);
+ /* ask to take the area */
+ ind_ap &= ~IND_AP_ABITS;
+ ind_ap |= pd->flags | IND_AP_ACTION | IND_AP_LOCK;
+ ret = write_ind_ap(cd->regmap, ind_ap);
+ if (ret)
+ return ret;
+ t->task_fn = task_fn_area_2;
+ return -EINPROGRESS;
+}
+
+static struct ab_task *
+create_area_reader(struct kmem_cache *qcache, u16 flags, u16 addr,
+ size_t count)
+{
+ struct ab_task *t;
+ struct area_priv *ap;
+
+ t = ab_task_create_get(qcache, task_fn_area);
+ if (!t)
+ return NULL;
+ ap = &t->area_pd;
+ ap->flags = flags;
+ ap->addr = addr;
+ ap->is_write = false;
+ ap->count = count;
+ return t;
+}
+
+static struct ab_task *
+create_area_writer(struct kmem_cache *qcache, u16 flags, u16 addr,
+ const void *buf, size_t count)
+{
+ struct ab_task *t;
+ struct area_priv *ap;
+
+ t = ab_task_create_get(qcache, task_fn_area);
+ if (!t)
+ return NULL;
+ ap = &t->area_pd;
+ ap->flags = flags;
+ ap->addr = addr;
+ ap->is_write = true;
+ ap->count = count;
+ memcpy(ap->buf, buf, count);
+ return t;
+}
+
+static struct ab_task *
+create_area_user_writer(struct kmem_cache *qcache, u16 flags, u16 addr,
+ const void __user *buf, size_t count)
+{
+ struct ab_task *t;
+ struct area_priv *ap;
+
+ t = ab_task_create_get(qcache, task_fn_area);
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+ ap = &t->area_pd;
+ ap->flags = flags;
+ ap->addr = addr;
+ ap->is_write = true;
+ ap->count = count;
+ if (copy_from_user(ap->buf, buf, count)) {
+ ab_task_put(t);
+ return ERR_PTR(-EFAULT);
+ }
+ return t;
+}
+
+static bool area_range_ok(u16 addr, size_t count, u16 area_start,
+ size_t area_sz)
+{
+ u16 area_end_ex = area_start + area_sz;
+ u16 addr_end_ex;
+
+ if (addr < area_start)
+ return false;
+ if (addr >= area_end_ex)
+ return false;
+ addr_end_ex = addr + count;
+ if (addr_end_ex > area_end_ex)
+ return false;
+ return true;
+}
+
+/* -------------------------- mailbox tasks ----------------------- */
+
+static int task_fn_mbox_2(struct anybuss_host *cd, struct ab_task *t)
+{
+ struct mbox_priv *pd = &t->mbox_pd;
+ unsigned int ind_ap;
+
+ if (!cd->power_on)
+ return -EIO;
+ regmap_read(cd->regmap, REG_IND_AP, &ind_ap);
+ if (((atomic_read(&cd->ind_ab) ^ ind_ap) & IND_AX_MOUT) == 0) {
+ /* output message not here */
+ if (time_after(jiffies, t->start_jiffies + TIMEOUT))
+ return -ETIMEDOUT;
+ return -EINPROGRESS;
+ }
+ /* grab the returned header and msg */
+ regmap_bulk_read(cd->regmap, MBOX_OUT_AREA, &pd->hdr,
+ sizeof(pd->hdr));
+ regmap_bulk_read(cd->regmap, MBOX_OUT_AREA + sizeof(pd->hdr),
+ pd->msg, pd->msg_in_sz);
+ /* tell anybus we've consumed the message */
+ ind_ap ^= IND_AX_MOUT;
+ return write_ind_ap(cd->regmap, ind_ap);
+}
+
+static int task_fn_mbox(struct anybuss_host *cd, struct ab_task *t)
+{
+ struct mbox_priv *pd = &t->mbox_pd;
+ unsigned int ind_ap;
+ int ret;
+
+ if (!cd->power_on)
+ return -EIO;
+ regmap_read(cd->regmap, REG_IND_AP, &ind_ap);
+ if ((atomic_read(&cd->ind_ab) ^ ind_ap) & IND_AX_MIN) {
+ /* mbox input area busy */
+ if (time_after(jiffies, t->start_jiffies + TIMEOUT))
+ return -ETIMEDOUT;
+ return -EINPROGRESS;
+ }
+ /* write the header and msg to input area */
+ regmap_bulk_write(cd->regmap, MBOX_IN_AREA, &pd->hdr,
+ sizeof(pd->hdr));
+ regmap_bulk_write(cd->regmap, MBOX_IN_AREA + sizeof(pd->hdr),
+ pd->msg, pd->msg_out_sz);
+ /* tell anybus we gave it a message */
+ ind_ap ^= IND_AX_MIN;
+ ret = write_ind_ap(cd->regmap, ind_ap);
+ if (ret)
+ return ret;
+ t->start_jiffies = jiffies;
+ t->task_fn = task_fn_mbox_2;
+ return -EINPROGRESS;
+}
+
+static void log_invalid_other(struct device *dev,
+ struct anybus_mbox_hdr *hdr)
+{
+ size_t ext_offs = ARRAY_SIZE(hdr->extended) - 1;
+ u16 code = be16_to_cpu(hdr->extended[ext_offs]);
+
+ dev_err(dev, " Invalid other: [0x%02X]", code);
+}
+
+static const char * const EMSGS[] = {
+ "Invalid Message ID",
+ "Invalid Message Type",
+ "Invalid Command",
+ "Invalid Data Size",
+ "Message Header Malformed (offset 008h)",
+ "Message Header Malformed (offset 00Ah)",
+ "Message Header Malformed (offset 00Ch - 00Dh)",
+ "Invalid Address",
+ "Invalid Response",
+ "Flash Config Error",
+};
+
+static int mbox_cmd_err(struct device *dev, struct mbox_priv *mpriv)
+{
+ int i;
+ u8 ecode;
+ struct anybus_mbox_hdr *hdr = &mpriv->hdr;
+ u16 info = be16_to_cpu(hdr->info);
+ u8 *phdr = (u8 *)hdr;
+ u8 *pmsg = mpriv->msg;
+
+ if (!(info & 0x8000))
+ return 0;
+ ecode = (info >> 8) & 0x0F;
+ dev_err(dev, "mailbox command failed:");
+ if (ecode == 0x0F)
+ log_invalid_other(dev, hdr);
+ else if (ecode < ARRAY_SIZE(EMSGS))
+ dev_err(dev, " Error code: %s (0x%02X)",
+ EMSGS[ecode], ecode);
+ else
+ dev_err(dev, " Error code: 0x%02X\n", ecode);
+ dev_err(dev, "Failed command:");
+ dev_err(dev, "Message Header:");
+ for (i = 0; i < sizeof(mpriv->hdr); i += 2)
+ dev_err(dev, "%02X%02X", phdr[i], phdr[i + 1]);
+ dev_err(dev, "Message Data:");
+ for (i = 0; i < mpriv->msg_in_sz; i += 2)
+ dev_err(dev, "%02X%02X", pmsg[i], pmsg[i + 1]);
+ dev_err(dev, "Stack dump:");
+ dump_stack();
+ return -EIO;
+}
+
+static int _anybus_mbox_cmd(struct anybuss_host *cd,
+ u16 cmd_num, bool is_fb_cmd,
+ const void *msg_out, size_t msg_out_sz,
+ void *msg_in, size_t msg_in_sz,
+ const void *ext, size_t ext_sz)
+{
+ struct ab_task *t;
+ struct mbox_priv *pd;
+ struct anybus_mbox_hdr *h;
+ size_t msg_sz = max(msg_in_sz, msg_out_sz);
+ u16 info;
+ int err;
+
+ if (msg_sz > MAX_MBOX_MSG_SZ)
+ return -EINVAL;
+ if (ext && ext_sz > sizeof(h->extended))
+ return -EINVAL;
+ t = ab_task_create_get(cd->qcache, task_fn_mbox);
+ if (!t)
+ return -ENOMEM;
+ pd = &t->mbox_pd;
+ h = &pd->hdr;
+ info = is_fb_cmd ? INFO_TYPE_FB : INFO_TYPE_APP;
+ /*
+ * prevent uninitialized memory in the header from being sent
+ * across the anybus
+ */
+ memset(h, 0, sizeof(*h));
+ h->info = cpu_to_be16(info | INFO_COMMAND);
+ h->cmd_num = cpu_to_be16(cmd_num);
+ h->data_size = cpu_to_be16(msg_out_sz);
+ h->frame_count = cpu_to_be16(1);
+ h->frame_num = cpu_to_be16(1);
+ h->offset_high = cpu_to_be16(0);
+ h->offset_low = cpu_to_be16(0);
+ if (ext)
+ memcpy(h->extended, ext, ext_sz);
+ memcpy(pd->msg, msg_out, msg_out_sz);
+ pd->msg_out_sz = msg_out_sz;
+ pd->msg_in_sz = msg_in_sz;
+ err = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
+ if (err)
+ goto out;
+ /*
+ * mailbox mechanism worked ok, but maybe the mbox response
+ * contains an error ?
+ */
+ err = mbox_cmd_err(cd->dev, pd);
+ if (err)
+ goto out;
+ memcpy(msg_in, pd->msg, msg_in_sz);
+out:
+ ab_task_put(t);
+ return err;
+}
+
+/* ------------------------ anybus queues ------------------------ */
+
+static void process_q(struct anybuss_host *cd, struct kfifo *q)
+{
+ struct ab_task *t;
+ int ret;
+
+ ret = kfifo_out_peek(q, &t, sizeof(t));
+ if (!ret)
+ return;
+ t->result = t->task_fn(cd, t);
+ if (t->result != -EINPROGRESS)
+ ab_task_dequeue_finish_put(q, cd);
+}
+
+static bool qs_have_work(struct kfifo *qs, size_t num)
+{
+ size_t i;
+ struct ab_task *t;
+ int ret;
+
+ for (i = 0; i < num; i++, qs++) {
+ ret = kfifo_out_peek(qs, &t, sizeof(t));
+ if (ret && (t->result != -EINPROGRESS))
+ return true;
+ }
+ return false;
+}
+
+static void process_qs(struct anybuss_host *cd)
+{
+ size_t i;
+ struct kfifo *qs = cd->qs;
+ size_t nqs = ARRAY_SIZE(cd->qs);
+
+ for (i = 0; i < nqs; i++, qs++)
+ process_q(cd, qs);
+}
+
+static void softint_ack(struct anybuss_host *cd)
+{
+ unsigned int ind_ap;
+
+ cd->softint_pending = false;
+ if (!cd->power_on)
+ return;
+ regmap_read(cd->regmap, REG_IND_AP, &ind_ap);
+ ind_ap &= ~IND_AX_EVNT;
+ ind_ap |= atomic_read(&cd->ind_ab) & IND_AX_EVNT;
+ write_ind_ap(cd->regmap, ind_ap);
+}
+
+static void process_softint(struct anybuss_host *cd)
+{
+ struct anybuss_client *client = cd->client;
+ static const u8 zero;
+ int ret;
+ unsigned int ind_ap, ev;
+ struct ab_task *t;
+
+ if (!cd->power_on)
+ return;
+ if (cd->softint_pending)
+ return;
+ regmap_read(cd->regmap, REG_IND_AP, &ind_ap);
+ if (!((atomic_read(&cd->ind_ab) ^ ind_ap) & IND_AX_EVNT))
+ return;
+ /* process software interrupt */
+ regmap_read(cd->regmap, REG_EVENT_CAUSE, &ev);
+ if (ev & EVENT_CAUSE_FBON) {
+ if (client->on_online_changed)
+ client->on_online_changed(client, true);
+ dev_dbg(cd->dev, "Fieldbus ON");
+ }
+ if (ev & EVENT_CAUSE_FBOF) {
+ if (client->on_online_changed)
+ client->on_online_changed(client, false);
+ dev_dbg(cd->dev, "Fieldbus OFF");
+ }
+ if (ev & EVENT_CAUSE_DC) {
+ if (client->on_area_updated)
+ client->on_area_updated(client);
+ dev_dbg(cd->dev, "Fieldbus data changed");
+ }
+ /*
+ * reset the event cause bits.
+ * this must be done while owning the fbctrl area, so we'll
+ * enqueue a task to do that.
+ */
+ t = create_area_writer(cd->qcache, IND_AX_FBCTRL,
+ REG_EVENT_CAUSE, &zero, sizeof(zero));
+ if (!t) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ t->done_fn = softint_ack;
+ ret = ab_task_enqueue(t, cd->powerq, &cd->qlock, &cd->wq);
+ ab_task_put(t);
+ cd->softint_pending = true;
+out:
+ WARN_ON(ret);
+ if (ret)
+ softint_ack(cd);
+}
+
+static int qthread_fn(void *data)
+{
+ struct anybuss_host *cd = data;
+ struct kfifo *qs = cd->qs;
+ size_t nqs = ARRAY_SIZE(cd->qs);
+ unsigned int ind_ab;
+
+ /*
+ * this kernel thread has exclusive access to the anybus's memory.
+ * only exception: the IND_AB register, which is accessed exclusively
+ * by the interrupt service routine (ISR). This thread must not touch
+ * the IND_AB register, but it does require access to its value.
+ *
+ * the interrupt service routine stores the register's value in
+ * cd->ind_ab (an atomic_t), where we may safely access it, with the
+ * understanding that it can be modified by the ISR at any time.
+ */
+
+ while (!kthread_should_stop()) {
+ /*
+ * make a local copy of IND_AB, so we can go around the loop
+ * again in case it changed while processing queues and softint.
+ */
+ ind_ab = atomic_read(&cd->ind_ab);
+ process_qs(cd);
+ process_softint(cd);
+ wait_event_timeout(cd->wq,
+ (atomic_read(&cd->ind_ab) != ind_ab) ||
+ qs_have_work(qs, nqs) ||
+ kthread_should_stop(),
+ HZ);
+ /*
+ * time out so even 'stuck' tasks will run eventually,
+ * and can time out.
+ */
+ }
+
+ return 0;
+}
+
+/* ------------------------ anybus exports ------------------------ */
+
+int anybuss_start_init(struct anybuss_client *client,
+ const struct anybuss_memcfg *cfg)
+{
+ int ret;
+ u16 op_mode;
+ struct anybuss_host *cd = client->host;
+ struct msg_anybus_init msg = {
+ .input_io_len = cpu_to_be16(cfg->input_io),
+ .input_dpram_len = cpu_to_be16(cfg->input_dpram),
+ .input_total_len = cpu_to_be16(cfg->input_total),
+ .output_io_len = cpu_to_be16(cfg->output_io),
+ .output_dpram_len = cpu_to_be16(cfg->output_dpram),
+ .output_total_len = cpu_to_be16(cfg->output_total),
+ .notif_config = cpu_to_be16(0x000F),
+ .wd_val = cpu_to_be16(0),
+ };
+
+ switch (cfg->offl_mode) {
+ case AB_OFFL_MODE_CLEAR:
+ op_mode = 0;
+ break;
+ case AB_OFFL_MODE_FREEZE:
+ op_mode = OP_MODE_FBFC;
+ break;
+ case AB_OFFL_MODE_SET:
+ op_mode = OP_MODE_FBS;
+ break;
+ default:
+ return -EINVAL;
+ }
+ msg.op_mode = cpu_to_be16(op_mode | OP_MODE_CD);
+ ret = _anybus_mbox_cmd(cd, CMD_START_INIT, false, NULL, 0,
+ NULL, 0, NULL, 0);
+ if (ret)
+ return ret;
+ return _anybus_mbox_cmd(cd, CMD_ANYBUS_INIT, false,
+ &msg, sizeof(msg), NULL, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(anybuss_start_init);
+
+int anybuss_finish_init(struct anybuss_client *client)
+{
+ struct anybuss_host *cd = client->host;
+
+ return _anybus_mbox_cmd(cd, CMD_END_INIT, false, NULL, 0,
+ NULL, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(anybuss_finish_init);
+
+int anybuss_read_fbctrl(struct anybuss_client *client, u16 addr,
+ void *buf, size_t count)
+{
+ struct anybuss_host *cd = client->host;
+ struct ab_task *t;
+ int ret;
+
+ if (count == 0)
+ return 0;
+ if (!area_range_ok(addr, count, FBCTRL_AREA,
+ MAX_FBCTRL_AREA_SZ))
+ return -EFAULT;
+ t = create_area_reader(cd->qcache, IND_AX_FBCTRL, addr, count);
+ if (!t)
+ return -ENOMEM;
+ ret = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
+ if (ret)
+ goto out;
+ memcpy(buf, t->area_pd.buf, count);
+out:
+ ab_task_put(t);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(anybuss_read_fbctrl);
+
+int anybuss_write_input(struct anybuss_client *client,
+ const char __user *buf, size_t size,
+ loff_t *offset)
+{
+ ssize_t len = min_t(loff_t, MAX_DATA_AREA_SZ - *offset, size);
+ struct anybuss_host *cd = client->host;
+ struct ab_task *t;
+ int ret;
+
+ if (len <= 0)
+ return 0;
+ t = create_area_user_writer(cd->qcache, IND_AX_IN,
+ DATA_IN_AREA + *offset, buf, len);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ ret = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
+ ab_task_put(t);
+ if (ret)
+ return ret;
+ /* success */
+ *offset += len;
+ return len;
+}
+EXPORT_SYMBOL_GPL(anybuss_write_input);
+
+int anybuss_read_output(struct anybuss_client *client,
+ char __user *buf, size_t size,
+ loff_t *offset)
+{
+ ssize_t len = min_t(loff_t, MAX_DATA_AREA_SZ - *offset, size);
+ struct anybuss_host *cd = client->host;
+ struct ab_task *t;
+ int ret;
+
+ if (len <= 0)
+ return 0;
+ t = create_area_reader(cd->qcache, IND_AX_OUT,
+ DATA_OUT_AREA + *offset, len);
+ if (!t)
+ return -ENOMEM;
+ ret = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
+ if (ret)
+ goto out;
+ if (copy_to_user(buf, t->area_pd.buf, len))
+ ret = -EFAULT;
+out:
+ ab_task_put(t);
+ if (ret)
+ return ret;
+ /* success */
+ *offset += len;
+ return len;
+}
+EXPORT_SYMBOL_GPL(anybuss_read_output);
+
+int anybuss_send_msg(struct anybuss_client *client, u16 cmd_num,
+ const void *buf, size_t count)
+{
+ struct anybuss_host *cd = client->host;
+
+ return _anybus_mbox_cmd(cd, cmd_num, true, buf, count, NULL, 0,
+ NULL, 0);
+}
+EXPORT_SYMBOL_GPL(anybuss_send_msg);
+
+int anybuss_send_ext(struct anybuss_client *client, u16 cmd_num,
+ const void *buf, size_t count)
+{
+ struct anybuss_host *cd = client->host;
+
+ return _anybus_mbox_cmd(cd, cmd_num, true, NULL, 0, NULL, 0,
+ buf, count);
+}
+EXPORT_SYMBOL_GPL(anybuss_send_ext);
+
+int anybuss_recv_msg(struct anybuss_client *client, u16 cmd_num,
+ void *buf, size_t count)
+{
+ struct anybuss_host *cd = client->host;
+
+ return _anybus_mbox_cmd(cd, cmd_num, true, NULL, 0, buf, count,
+ NULL, 0);
+}
+EXPORT_SYMBOL_GPL(anybuss_recv_msg);
+
+/* ------------------------ bus functions ------------------------ */
+
+static int anybus_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ struct anybuss_client_driver *adrv =
+ to_anybuss_client_driver(drv);
+ struct anybuss_client *adev =
+ to_anybuss_client(dev);
+
+ return adrv->anybus_id == be16_to_cpu(adev->anybus_id);
+}
+
+static int anybus_bus_probe(struct device *dev)
+{
+ struct anybuss_client_driver *adrv =
+ to_anybuss_client_driver(dev->driver);
+ struct anybuss_client *adev =
+ to_anybuss_client(dev);
+
+ if (!adrv->probe)
+ return -ENODEV;
+ return adrv->probe(adev);
+}
+
+static int anybus_bus_remove(struct device *dev)
+{
+ struct anybuss_client_driver *adrv =
+ to_anybuss_client_driver(dev->driver);
+
+ if (adrv->remove)
+ return adrv->remove(to_anybuss_client(dev));
+ return 0;
+}
+
+static struct bus_type anybus_bus = {
+ .name = "anybuss",
+ .match = anybus_bus_match,
+ .probe = anybus_bus_probe,
+ .remove = anybus_bus_remove,
+};
+
+int anybuss_client_driver_register(struct anybuss_client_driver *drv)
+{
+ drv->driver.bus = &anybus_bus;
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(anybuss_client_driver_register);
+
+void anybuss_client_driver_unregister(struct anybuss_client_driver *drv)
+{
+ return driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(anybuss_client_driver_unregister);
+
+static void client_device_release(struct device *dev)
+{
+ kfree(to_anybuss_client(dev));
+}
+
+static int taskq_alloc(struct device *dev, struct kfifo *q)
+{
+ void *buf;
+ size_t size = 64 * sizeof(struct ab_task *);
+
+ buf = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!buf)
+ return -EIO;
+ return kfifo_init(q, buf, size);
+}
+
+static int anybus_of_get_host_idx(struct device_node *np)
+{
+ const __be32 *host_idx;
+
+ host_idx = of_get_address(np, 0, NULL, NULL);
+ if (!host_idx)
+ return -ENOENT;
+ return __be32_to_cpu(*host_idx);
+}
+
+static struct device_node *
+anybus_of_find_child_device(struct device *dev, int host_idx)
+{
+ struct device_node *node;
+
+ if (!dev || !dev->of_node)
+ return NULL;
+ for_each_child_of_node(dev->of_node, node) {
+ if (anybus_of_get_host_idx(node) == host_idx)
+ return node;
+ }
+ return NULL;
+}
+
+struct anybuss_host * __must_check
+anybuss_host_common_probe(struct device *dev,
+ const struct anybuss_ops *ops)
+{
+ int ret, i;
+ u8 val[4];
+ __be16 fieldbus_type;
+ struct anybuss_host *cd;
+
+ cd = devm_kzalloc(dev, sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return ERR_PTR(-ENOMEM);
+ cd->dev = dev;
+ cd->host_idx = ops->host_idx;
+ init_completion(&cd->card_boot);
+ init_waitqueue_head(&cd->wq);
+ for (i = 0; i < ARRAY_SIZE(cd->qs); i++) {
+ ret = taskq_alloc(dev, &cd->qs[i]);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ if (WARN_ON(ARRAY_SIZE(cd->qs) < 3))
+ return ERR_PTR(-EINVAL);
+ cd->powerq = &cd->qs[0];
+ cd->mboxq = &cd->qs[1];
+ cd->areaq = &cd->qs[2];
+ cd->reset = ops->reset;
+ if (!cd->reset)
+ return ERR_PTR(-EINVAL);
+ cd->regmap = ops->regmap;
+ if (!cd->regmap)
+ return ERR_PTR(-EINVAL);
+ spin_lock_init(&cd->qlock);
+ cd->qcache = kmem_cache_create(dev_name(dev),
+ sizeof(struct ab_task), 0, 0, NULL);
+ if (!cd->qcache)
+ return ERR_PTR(-ENOMEM);
+ cd->irq = ops->irq;
+ if (cd->irq <= 0) {
+ ret = -EINVAL;
+ goto err_qcache;
+ }
+ /*
+ * use a dpram test to check if a card is present, this is only
+ * possible while in reset.
+ */
+ reset_assert(cd);
+ if (test_dpram(cd->regmap)) {
+ dev_err(dev, "no Anybus-S card in slot");
+ ret = -ENODEV;
+ goto err_qcache;
+ }
+ ret = devm_request_threaded_irq(dev, cd->irq, NULL, irq_handler,
+ IRQF_ONESHOT, dev_name(dev), cd);
+ if (ret) {
+ dev_err(dev, "could not request irq");
+ goto err_qcache;
+ }
+ /*
+ * startup sequence:
+ * perform dummy IND_AB read to prevent false 'init done' irq
+ * (already done by test_dpram() above)
+ * release reset
+ * wait for first interrupt
+ * interrupt came in: ready to go !
+ */
+ reset_deassert(cd);
+ if (!wait_for_completion_timeout(&cd->card_boot, TIMEOUT)) {
+ ret = -ETIMEDOUT;
+ goto err_reset;
+ }
+ /*
+ * according to the anybus docs, we're allowed to read these
+ * without handshaking / reserving the area
+ */
+ dev_info(dev, "Anybus-S card detected");
+ regmap_bulk_read(cd->regmap, REG_BOOTLOADER_V, val, 2);
+ dev_info(dev, "Bootloader version: %02X%02X",
+ val[0], val[1]);
+ regmap_bulk_read(cd->regmap, REG_API_V, val, 2);
+ dev_info(dev, "API version: %02X%02X", val[0], val[1]);
+ regmap_bulk_read(cd->regmap, REG_FIELDBUS_V, val, 2);
+ dev_info(dev, "Fieldbus version: %02X%02X", val[0], val[1]);
+ regmap_bulk_read(cd->regmap, REG_SERIAL_NO, val, 4);
+ dev_info(dev, "Serial number: %02X%02X%02X%02X",
+ val[0], val[1], val[2], val[3]);
+ add_device_randomness(&val, 4);
+ regmap_bulk_read(cd->regmap, REG_FIELDBUS_TYPE, &fieldbus_type,
+ sizeof(fieldbus_type));
+ dev_info(dev, "Fieldbus type: %04X", be16_to_cpu(fieldbus_type));
+ regmap_bulk_read(cd->regmap, REG_MODULE_SW_V, val, 2);
+ dev_info(dev, "Module SW version: %02X%02X",
+ val[0], val[1]);
+ /* put card back reset until a client driver releases it */
+ disable_irq(cd->irq);
+ reset_assert(cd);
+ atomic_set(&cd->ind_ab, IND_AB_UPDATED);
+ /* fire up the queue thread */
+ cd->qthread = kthread_run(qthread_fn, cd, dev_name(dev));
+ if (IS_ERR(cd->qthread)) {
+ dev_err(dev, "could not create kthread");
+ ret = PTR_ERR(cd->qthread);
+ goto err_reset;
+ }
+ /*
+ * now advertise that we've detected a client device (card).
+ * the bus infrastructure will match it to a client driver.
+ */
+ cd->client = kzalloc(sizeof(*cd->client), GFP_KERNEL);
+ if (!cd->client) {
+ ret = -ENOMEM;
+ goto err_kthread;
+ }
+ cd->client->anybus_id = fieldbus_type;
+ cd->client->host = cd;
+ cd->client->dev.bus = &anybus_bus;
+ cd->client->dev.parent = dev;
+ cd->client->dev.release = client_device_release;
+ cd->client->dev.of_node =
+ anybus_of_find_child_device(dev, cd->host_idx);
+ dev_set_name(&cd->client->dev, "anybuss.card%d", cd->host_idx);
+ ret = device_register(&cd->client->dev);
+ if (ret)
+ goto err_device;
+ return cd;
+err_device:
+ device_unregister(&cd->client->dev);
+err_kthread:
+ kthread_stop(cd->qthread);
+err_reset:
+ reset_assert(cd);
+err_qcache:
+ kmem_cache_destroy(cd->qcache);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(anybuss_host_common_probe);
+
+void anybuss_host_common_remove(struct anybuss_host *host)
+{
+ struct anybuss_host *cd = host;
+
+ device_unregister(&cd->client->dev);
+ kthread_stop(cd->qthread);
+ reset_assert(cd);
+ kmem_cache_destroy(cd->qcache);
+}
+EXPORT_SYMBOL_GPL(anybuss_host_common_remove);
+
+static void host_release(struct device *dev, void *res)
+{
+ struct anybuss_host **dr = res;
+
+ anybuss_host_common_remove(*dr);
+}
+
+struct anybuss_host * __must_check
+devm_anybuss_host_common_probe(struct device *dev,
+ const struct anybuss_ops *ops)
+{
+ struct anybuss_host **dr;
+ struct anybuss_host *host;
+
+ dr = devres_alloc(host_release, sizeof(struct anybuss_host *),
+ GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ host = anybuss_host_common_probe(dev, ops);
+ if (IS_ERR(host)) {
+ devres_free(dr);
+ return host;
+ }
+ *dr = host;
+ devres_add(dev, dr);
+ return host;
+}
+EXPORT_SYMBOL_GPL(devm_anybuss_host_common_probe);
+
+static int __init anybus_init(void)
+{
+ int ret;
+
+ ret = bus_register(&anybus_bus);
+ if (ret)
+ pr_err("could not register Anybus-S bus: %d\n", ret);
+ return ret;
+}
+module_init(anybus_init);
+
+static void __exit anybus_exit(void)
+{
+ bus_unregister(&anybus_bus);
+}
+module_exit(anybus_exit);
+
+MODULE_DESCRIPTION("HMS Anybus-S Host Driver");
+MODULE_AUTHOR("Sven Van Asbroeck <TheSven73@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/fieldbus/dev_core.c b/drivers/staging/fieldbus/dev_core.c
new file mode 100644
index 000000000000..60b85140675a
--- /dev/null
+++ b/drivers/staging/fieldbus/dev_core.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Fieldbus Device Driver Core
+ *
+ */
+
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+
+/* move to <linux/fieldbus_dev.h> when taking this out of staging */
+#include "fieldbus_dev.h"
+
+/* Maximum number of fieldbus devices */
+#define MAX_FIELDBUSES 32
+
+/* the dev_t structure to store the dynamically allocated fieldbus devices */
+static dev_t fieldbus_devt;
+static DEFINE_IDA(fieldbus_ida);
+static DEFINE_MUTEX(fieldbus_mtx);
+
+static const char ctrl_enabled[] = "enabled";
+static const char ctrl_disabled[] = "disabled";
+
+static ssize_t online_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fieldbus_dev *fb = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", !!fb->online);
+}
+static DEVICE_ATTR_RO(online);
+
+static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fieldbus_dev *fb = dev_get_drvdata(dev);
+
+ if (!fb->enable_get)
+ return -EINVAL;
+ return sprintf(buf, "%d\n", !!fb->enable_get(fb));
+}
+
+static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct fieldbus_dev *fb = dev_get_drvdata(dev);
+ bool value;
+ int ret;
+
+ if (!fb->simple_enable_set)
+ return -ENOTSUPP;
+ ret = kstrtobool(buf, &value);
+ if (ret)
+ return ret;
+ ret = fb->simple_enable_set(fb, value);
+ if (ret < 0)
+ return ret;
+ return n;
+}
+static DEVICE_ATTR_RW(enabled);
+
+static ssize_t card_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fieldbus_dev *fb = dev_get_drvdata(dev);
+
+ /*
+ * card_name was provided by child driver, could potentially be long.
+ * protect against buffer overrun.
+ */
+ return snprintf(buf, PAGE_SIZE, "%s\n", fb->card_name);
+}
+static DEVICE_ATTR_RO(card_name);
+
+static ssize_t read_area_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fieldbus_dev *fb = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%zu\n", fb->read_area_sz);
+}
+static DEVICE_ATTR_RO(read_area_size);
+
+static ssize_t write_area_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fieldbus_dev *fb = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%zu\n", fb->write_area_sz);
+}
+static DEVICE_ATTR_RO(write_area_size);
+
+static ssize_t fieldbus_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fieldbus_dev *fb = dev_get_drvdata(dev);
+
+ return fb->fieldbus_id_get(fb, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR_RO(fieldbus_id);
+
+static ssize_t fieldbus_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fieldbus_dev *fb = dev_get_drvdata(dev);
+ const char *t;
+
+ switch (fb->fieldbus_type) {
+ case FIELDBUS_DEV_TYPE_PROFINET:
+ t = "profinet";
+ break;
+ default:
+ t = "unknown";
+ break;
+ }
+
+ return sprintf(buf, "%s\n", t);
+}
+static DEVICE_ATTR_RO(fieldbus_type);
+
+static struct attribute *fieldbus_attrs[] = {
+ &dev_attr_enabled.attr,
+ &dev_attr_card_name.attr,
+ &dev_attr_fieldbus_id.attr,
+ &dev_attr_read_area_size.attr,
+ &dev_attr_write_area_size.attr,
+ &dev_attr_online.attr,
+ &dev_attr_fieldbus_type.attr,
+ NULL,
+};
+
+static umode_t fieldbus_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct fieldbus_dev *fb = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_enabled.attr) {
+ mode = 0;
+ if (fb->enable_get)
+ mode |= 0444;
+ if (fb->simple_enable_set)
+ mode |= 0200;
+ }
+
+ return mode;
+}
+
+static const struct attribute_group fieldbus_group = {
+ .attrs = fieldbus_attrs,
+ .is_visible = fieldbus_is_visible,
+};
+__ATTRIBUTE_GROUPS(fieldbus);
+
+static struct class fieldbus_class = {
+ .name = "fieldbus_dev",
+ .owner = THIS_MODULE,
+ .dev_groups = fieldbus_groups,
+};
+
+struct fb_open_file {
+ struct fieldbus_dev *fbdev;
+ int dc_event;
+};
+
+static int fieldbus_open(struct inode *inode, struct file *filp)
+{
+ struct fb_open_file *of;
+ struct fieldbus_dev *fbdev = container_of(inode->i_cdev,
+ struct fieldbus_dev,
+ cdev);
+
+ of = kzalloc(sizeof(*of), GFP_KERNEL);
+ if (!of)
+ return -ENOMEM;
+ of->fbdev = fbdev;
+ filp->private_data = of;
+ return 0;
+}
+
+static int fieldbus_release(struct inode *node, struct file *filp)
+{
+ struct fb_open_file *of = filp->private_data;
+
+ kfree(of);
+ return 0;
+}
+
+static ssize_t fieldbus_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *offset)
+{
+ struct fb_open_file *of = filp->private_data;
+ struct fieldbus_dev *fbdev = of->fbdev;
+
+ of->dc_event = fbdev->dc_event;
+ return fbdev->read_area(fbdev, buf, size, offset);
+}
+
+static ssize_t fieldbus_write(struct file *filp, const char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct fb_open_file *of = filp->private_data;
+ struct fieldbus_dev *fbdev = of->fbdev;
+
+ return fbdev->write_area(fbdev, buf, size, offset);
+}
+
+static unsigned int fieldbus_poll(struct file *filp, poll_table *wait)
+{
+ struct fb_open_file *of = filp->private_data;
+ struct fieldbus_dev *fbdev = of->fbdev;
+ unsigned int mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
+
+ poll_wait(filp, &fbdev->dc_wq, wait);
+ /* data changed ? */
+ if (fbdev->dc_event != of->dc_event)
+ mask |= POLLPRI | POLLERR;
+ return mask;
+}
+
+static const struct file_operations fieldbus_fops = {
+ .open = fieldbus_open,
+ .release = fieldbus_release,
+ .read = fieldbus_read,
+ .write = fieldbus_write,
+ .poll = fieldbus_poll,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+void fieldbus_dev_area_updated(struct fieldbus_dev *fb)
+{
+ fb->dc_event++;
+ wake_up_all(&fb->dc_wq);
+}
+EXPORT_SYMBOL_GPL(fieldbus_dev_area_updated);
+
+void fieldbus_dev_online_changed(struct fieldbus_dev *fb, bool online)
+{
+ fb->online = online;
+ kobject_uevent(&fb->dev->kobj, KOBJ_CHANGE);
+}
+EXPORT_SYMBOL_GPL(fieldbus_dev_online_changed);
+
+static void __fieldbus_dev_unregister(struct fieldbus_dev *fb)
+{
+ if (!fb)
+ return;
+ device_destroy(&fieldbus_class, fb->cdev.dev);
+ cdev_del(&fb->cdev);
+ ida_simple_remove(&fieldbus_ida, fb->id);
+}
+
+void fieldbus_dev_unregister(struct fieldbus_dev *fb)
+{
+ mutex_lock(&fieldbus_mtx);
+ __fieldbus_dev_unregister(fb);
+ mutex_unlock(&fieldbus_mtx);
+}
+EXPORT_SYMBOL_GPL(fieldbus_dev_unregister);
+
+static int __fieldbus_dev_register(struct fieldbus_dev *fb)
+{
+ dev_t devno;
+ int err;
+
+ if (!fb)
+ return -EINVAL;
+ if (!fb->read_area || !fb->write_area || !fb->fieldbus_id_get)
+ return -EINVAL;
+ fb->id = ida_simple_get(&fieldbus_ida, 0, MAX_FIELDBUSES, GFP_KERNEL);
+ if (fb->id < 0)
+ return fb->id;
+ devno = MKDEV(MAJOR(fieldbus_devt), fb->id);
+ init_waitqueue_head(&fb->dc_wq);
+ cdev_init(&fb->cdev, &fieldbus_fops);
+ err = cdev_add(&fb->cdev, devno, 1);
+ if (err) {
+ pr_err("fieldbus_dev%d unable to add device %d:%d\n",
+ fb->id, MAJOR(fieldbus_devt), fb->id);
+ goto err_cdev;
+ }
+ fb->dev = device_create(&fieldbus_class, fb->parent, devno, fb,
+ "fieldbus_dev%d", fb->id);
+ if (IS_ERR(fb->dev)) {
+ err = PTR_ERR(fb->dev);
+ goto err_dev_create;
+ }
+ return 0;
+
+err_dev_create:
+ cdev_del(&fb->cdev);
+err_cdev:
+ ida_simple_remove(&fieldbus_ida, fb->id);
+ return err;
+}
+
+int fieldbus_dev_register(struct fieldbus_dev *fb)
+{
+ int err;
+
+ mutex_lock(&fieldbus_mtx);
+ err = __fieldbus_dev_register(fb);
+ mutex_unlock(&fieldbus_mtx);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(fieldbus_dev_register);
+
+static int __init fieldbus_init(void)
+{
+ int err;
+
+ err = class_register(&fieldbus_class);
+ if (err < 0) {
+ pr_err("fieldbus_dev: could not register class\n");
+ return err;
+ }
+ err = alloc_chrdev_region(&fieldbus_devt, 0,
+ MAX_FIELDBUSES, "fieldbus_dev");
+ if (err < 0) {
+ pr_err("fieldbus_dev: unable to allocate char dev region\n");
+ goto err_alloc;
+ }
+ return 0;
+
+err_alloc:
+ class_unregister(&fieldbus_class);
+ return err;
+}
+
+static void __exit fieldbus_exit(void)
+{
+ unregister_chrdev_region(fieldbus_devt, MAX_FIELDBUSES);
+ class_unregister(&fieldbus_class);
+ ida_destroy(&fieldbus_ida);
+}
+
+subsys_initcall(fieldbus_init);
+module_exit(fieldbus_exit);
+
+MODULE_AUTHOR("Sven Van Asbroeck <TheSven73@gmail.com>");
+MODULE_AUTHOR("Jonathan Stiles <jonathans@arcx.com>");
+MODULE_DESCRIPTION("Fieldbus Device Driver Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/fieldbus/fieldbus_dev.h b/drivers/staging/fieldbus/fieldbus_dev.h
new file mode 100644
index 000000000000..a10fc3b446dc
--- /dev/null
+++ b/drivers/staging/fieldbus/fieldbus_dev.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Fieldbus Device Driver Core
+ *
+ */
+
+#ifndef __FIELDBUS_DEV_H
+#define __FIELDBUS_DEV_H
+
+#include <linux/cdev.h>
+#include <linux/wait.h>
+
+enum fieldbus_dev_type {
+ FIELDBUS_DEV_TYPE_UNKNOWN = 0,
+ FIELDBUS_DEV_TYPE_PROFINET,
+};
+
+/**
+ * struct fieldbus_dev - Fieldbus device
+ * @read_area: [DRIVER] function to read the process data area of the
+ * device. same parameters/return values as
+ * the read function in struct file_operations
+ * @write_area: [DRIVER] function to write to the process data area of
+ * the device. same parameters/return values as
+ * the write function in struct file_operations
+ * @write_area_sz [DRIVER] size of the writable process data area
+ * @read_area_sz [DRIVER] size of the readable process data area
+ * @card_name [DRIVER] name of the card, e.g. "ACME Inc. profinet"
+ * @fieldbus_type [DRIVER] fieldbus type of this device, e.g.
+ * FIELDBUS_DEV_TYPE_PROFINET
+ * @enable_get [DRIVER] function which returns true if the card
+ * is enabled, false otherwise
+ * @fieldbus_id_get [DRIVER] function to retrieve the unique fieldbus id
+ * by which this device can be identified;
+ * return value follows the snprintf convention
+ * @simple_enable_set [DRIVER] (optional) function to enable the device
+ * according to its default settings
+ * @parent [DRIVER] (optional) the device's parent device
+ */
+struct fieldbus_dev {
+ ssize_t (*read_area)(struct fieldbus_dev *fbdev, char __user *buf,
+ size_t size, loff_t *offset);
+ ssize_t (*write_area)(struct fieldbus_dev *fbdev,
+ const char __user *buf, size_t size,
+ loff_t *offset);
+ size_t write_area_sz, read_area_sz;
+ const char *card_name;
+ enum fieldbus_dev_type fieldbus_type;
+ bool (*enable_get)(struct fieldbus_dev *fbdev);
+ int (*fieldbus_id_get)(struct fieldbus_dev *fbdev, char *buf,
+ size_t max_size);
+ int (*simple_enable_set)(struct fieldbus_dev *fbdev, bool enable);
+ struct device *parent;
+
+ /* private data */
+ int id;
+ struct cdev cdev;
+ struct device *dev;
+ int dc_event;
+ wait_queue_head_t dc_wq;
+ bool online;
+};
+
+#if IS_ENABLED(CONFIG_FIELDBUS_DEV)
+
+/**
+ * fieldbus_dev_unregister()
+ * - unregister a previously registered fieldbus device
+ * @fb: Device structure previously registered
+ **/
+void fieldbus_dev_unregister(struct fieldbus_dev *fb);
+
+/**
+ * fieldbus_dev_register()
+ * - register a device with the fieldbus device subsystem
+ * @fb: Device structure filled by the device driver
+ **/
+int __must_check fieldbus_dev_register(struct fieldbus_dev *fb);
+
+/**
+ * fieldbus_dev_area_updated()
+ * - notify the subsystem that an external fieldbus controller updated
+ * the process data area
+ * @fb: Device structure
+ **/
+void fieldbus_dev_area_updated(struct fieldbus_dev *fb);
+
+/**
+ * fieldbus_dev_online_changed()
+ * - notify the subsystem that the fieldbus online status changed
+ * @fb: Device structure
+ **/
+void fieldbus_dev_online_changed(struct fieldbus_dev *fb, bool online);
+
+#else /* IS_ENABLED(CONFIG_FIELDBUS_DEV) */
+
+static inline void fieldbus_dev_unregister(struct fieldbus_dev *fb) {}
+static inline int __must_check fieldbus_dev_register(struct fieldbus_dev *fb)
+{
+ return -ENOTSUPP;
+}
+
+static inline void fieldbus_dev_area_updated(struct fieldbus_dev *fb) {}
+static inline void fieldbus_dev_online_changed(struct fieldbus_dev *fb,
+ bool online) {}
+
+#endif /* IS_ENABLED(CONFIG_FIELDBUS_DEV) */
+#endif /* __FIELDBUS_DEV_H */
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
index 991e154c0eca..368837cdf281 100644
--- a/drivers/staging/fsl-dpaa2/Kconfig
+++ b/drivers/staging/fsl-dpaa2/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
#
diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile
index c92ab98c27d9..9645db7689c9 100644
--- a/drivers/staging/fsl-dpaa2/Makefile
+++ b/drivers/staging/fsl-dpaa2/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
#
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index ad577beeb052..e3c3e427309a 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -1350,9 +1350,7 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
- err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
-
- return err;
+ return ethsw_port_fdb_add_mc(port_priv, def_mcast);
}
static void ethsw_unregister_notifier(struct device *dev)
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
index 9c7c9267d52c..9543f8454af9 100644
--- a/drivers/staging/fwserial/Kconfig
+++ b/drivers/staging/fwserial/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config FIREWIRE_SERIAL
tristate "TTY over Firewire"
depends on FIREWIRE && TTY
diff --git a/drivers/staging/fwserial/Makefile b/drivers/staging/fwserial/Makefile
index 2170869a19b1..1cd5c5c7e805 100644
--- a/drivers/staging/fwserial/Makefile
+++ b/drivers/staging/fwserial/Makefile
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FIREWIRE_SERIAL) += firewire-serial.o
firewire-serial-objs := fwserial.o dma_fifo.o
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index a1b90ea7fcb8..aec0f19597a9 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -19,7 +19,10 @@
#include "fwserial.h"
-#define be32_to_u64(hi, lo) ((u64)be32_to_cpu(hi) << 32 | be32_to_cpu(lo))
+inline u64 be32_to_u64(__be32 hi, __be32 lo)
+{
+ return ((u64)be32_to_cpu(hi) << 32 | be32_to_cpu(lo));
+}
#define LINUX_VENDOR_ID 0xd00d1eU /* same id used in card root directory */
#define FWSERIAL_VERSION 0x00e81cU /* must be unique within LINUX_VENDOR_ID */
diff --git a/drivers/staging/gasket/Kconfig b/drivers/staging/gasket/Kconfig
index e82b85541f7e..d9bef8ca41ef 100644
--- a/drivers/staging/gasket/Kconfig
+++ b/drivers/staging/gasket/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "Gasket devices"
config STAGING_GASKET_FRAMEWORK
diff --git a/drivers/staging/gasket/Makefile b/drivers/staging/gasket/Makefile
index cec813ece678..ce03e256f501 100644
--- a/drivers/staging/gasket/Makefile
+++ b/drivers/staging/gasket/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Gasket framework and dependent drivers.
#
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
index 0578bf1ba1e9..2be45ee9d061 100644
--- a/drivers/staging/gasket/apex_driver.c
+++ b/drivers/staging/gasket/apex_driver.c
@@ -294,7 +294,7 @@ static int apex_enter_reset(struct gasket_dev *gasket_dev)
/* - Wait for RAM shutdown. */
if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
- APEX_BAR2_REG_SCU_3, 1 << 6, 1 << 6,
+ APEX_BAR2_REG_SCU_3, BIT(6), BIT(6),
APEX_RESET_DELAY, APEX_RESET_RETRY)) {
dev_err(gasket_dev->dev,
"RAM did not shut down within timeout (%d ms)\n",
@@ -340,7 +340,7 @@ static int apex_quit_reset(struct gasket_dev *gasket_dev)
/* - Wait for RAM enable. */
if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
- APEX_BAR2_REG_SCU_3, 1 << 6, 0,
+ APEX_BAR2_REG_SCU_3, BIT(6), 0,
APEX_RESET_DELAY, APEX_RESET_RETRY)) {
dev_err(gasket_dev->dev,
"RAM did not enable within timeout (%d ms)\n",
@@ -439,9 +439,7 @@ static int apex_reset(struct gasket_dev *gasket_dev)
if (ret)
return ret;
}
- ret = apex_quit_reset(gasket_dev);
-
- return ret;
+ return apex_quit_reset(gasket_dev);
}
/*
diff --git a/drivers/staging/gasket/gasket_interrupt.c b/drivers/staging/gasket/gasket_interrupt.c
index ff61b782df30..2d6195f7300e 100644
--- a/drivers/staging/gasket/gasket_interrupt.c
+++ b/drivers/staging/gasket/gasket_interrupt.c
@@ -97,8 +97,7 @@ static void gasket_interrupt_setup(struct gasket_dev *gasket_dev)
* modify-write and shift based on the packing index.
*/
dev_dbg(gasket_dev->dev,
- "Setting up interrupt index %d with index 0x%llx and "
- "packing %d\n",
+ "Setting up interrupt index %d with index 0x%llx and packing %d\n",
interrupt_data->interrupts[i].index,
interrupt_data->interrupts[i].reg,
interrupt_data->interrupts[i].packing);
@@ -120,8 +119,7 @@ static void gasket_interrupt_setup(struct gasket_dev *gasket_dev)
break;
default:
dev_dbg(gasket_dev->dev,
- "Found interrupt description with "
- "unknown enum %d\n",
+ "Found interrupt description with unknown enum %d\n",
interrupt_data->interrupts[i].packing);
return;
}
diff --git a/drivers/staging/gasket/gasket_page_table.c b/drivers/staging/gasket/gasket_page_table.c
index 26755d9ca41d..d35c4fb19e28 100644
--- a/drivers/staging/gasket/gasket_page_table.c
+++ b/drivers/staging/gasket/gasket_page_table.c
@@ -486,8 +486,8 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
off + i * PAGE_SIZE;
} else {
- ret = get_user_pages_fast(page_addr - offset, 1, 1,
- &page);
+ ret = get_user_pages_fast(page_addr - offset, 1,
+ FOLL_WRITE, &page);
if (ret <= 0) {
dev_err(pg_tbl->device,
@@ -768,8 +768,7 @@ static bool gasket_is_extended_dev_addr_bad(struct gasket_page_table *pg_tbl,
page_lvl0_idx = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
/* Get the count of affected level 0 pages. */
- num_lvl0_pages = (num_pages + GASKET_PAGES_PER_SUBTABLE - 1) /
- GASKET_PAGES_PER_SUBTABLE;
+ num_lvl0_pages = DIV_ROUND_UP(num_pages, GASKET_PAGES_PER_SUBTABLE);
if (gasket_components_to_dev_address(pg_tbl, 0, page_global_idx,
page_offset) != dev_addr) {
@@ -1258,7 +1257,7 @@ int gasket_alloc_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
dma_addr_t handle;
void *mem;
int j;
- unsigned int num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ unsigned int num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
const struct gasket_driver_desc *driver_desc =
gasket_get_driver_desc(gasket_dev);
diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c
index fc45f0d13e87..a2d67c28f530 100644
--- a/drivers/staging/gasket/gasket_sysfs.c
+++ b/drivers/staging/gasket/gasket_sysfs.c
@@ -223,8 +223,7 @@ int gasket_sysfs_create_entries(struct device *device,
if (!mapping) {
dev_dbg(device,
- "Creating entries for device without first "
- "initializing mapping\n");
+ "Creating entries for device without first initializing mapping\n");
return -EINVAL;
}
@@ -233,8 +232,7 @@ int gasket_sysfs_create_entries(struct device *device,
i++) {
if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) {
dev_err(device,
- "Maximum number of sysfs nodes reached for "
- "device\n");
+ "Maximum number of sysfs nodes reached for device\n");
mutex_unlock(&mapping->mutex);
put_mapping(mapping);
return -ENOMEM;
@@ -264,8 +262,7 @@ void gasket_sysfs_remove_mapping(struct device *device)
if (!mapping) {
dev_err(device,
- "Attempted to remove non-existent sysfs mapping to "
- "device\n");
+ "Attempted to remove non-existent sysfs mapping to device\n");
return;
}
diff --git a/drivers/staging/gasket/gasket_sysfs.h b/drivers/staging/gasket/gasket_sysfs.h
index 151e8edd28ea..1d0eed66a7f4 100644
--- a/drivers/staging/gasket/gasket_sysfs.h
+++ b/drivers/staging/gasket/gasket_sysfs.h
@@ -40,8 +40,8 @@
*/
#define GASKET_END_OF_ATTR_ARRAY \
{ \
- .attr = __ATTR(GASKET_ARRAY_END_TOKEN, S_IRUGO, NULL, NULL), \
- .data.attr_type = 0, \
+ .attr = __ATTR_NULL, \
+ .data.attr_type = 0, \
}
/*
diff --git a/drivers/staging/gdm724x/Kconfig b/drivers/staging/gdm724x/Kconfig
index 0a1f090bbf38..1f403ecd9608 100644
--- a/drivers/staging/gdm724x/Kconfig
+++ b/drivers/staging/gdm724x/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# GCT GDM724x LTE driver configuration
#
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 3c2aab7a921e..db11498f6fc7 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -871,7 +871,6 @@ int register_lte_device(struct phy_dev *phy_dev,
net = alloc_netdev(sizeof(struct nic), pdn_dev_name,
NET_NAME_UNKNOWN, ether_setup);
if (!net) {
- pr_err("alloc_netdev failed\n");
ret = -ENOMEM;
goto err;
}
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index e2a050ba6fbb..0678f344fafb 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -164,8 +164,7 @@ static int up_to_host(struct mux_rx *r)
total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
- if (len - packet_size_sum <
- total_len) {
+ if (len - packet_size_sum < total_len) {
pr_err("invalid payload : %d %d %04x\n",
payload_size, len, packet_type);
break;
@@ -376,8 +375,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
memcpy(t->buf + MUX_HEADER_SIZE, data, len);
- memset(t->buf + MUX_HEADER_SIZE + len, 0, total_len - MUX_HEADER_SIZE -
- len);
+ memset(t->buf + MUX_HEADER_SIZE + len, 0,
+ total_len - MUX_HEADER_SIZE - len);
t->len = total_len;
t->callback = cb;
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
index 83fbd2515467..6dea3694afdd 100644
--- a/drivers/staging/gdm724x/hci_packet.h
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -40,7 +40,7 @@ struct tlv {
struct sdu_header {
__dev16 cmd_evt;
__dev16 len;
- __dev32 dftEpsId;
+ __dev32 dft_eps_id;
__dev32 bearer_ID;
__dev32 nic_type;
} __packed;
diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig
index 9165385df9de..728f4700b98d 100644
--- a/drivers/staging/goldfish/Kconfig
+++ b/drivers/staging/goldfish/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config GOLDFISH_AUDIO
tristate "Goldfish AVD Audio Device"
depends on GOLDFISH
diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile
index 054eeb82151e..f7cee15529c3 100644
--- a/drivers/staging/goldfish/Makefile
+++ b/drivers/staging/goldfish/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Goldfish audio driver
#
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
index b571e4e8060b..4894c3514955 100644
--- a/drivers/staging/greybus/Kconfig
+++ b/drivers/staging/greybus/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig GREYBUS
tristate "Greybus support"
depends on SYSFS
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index d36f8cd35e06..9ba09ea9c2fc 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -173,66 +173,66 @@ int gbaudio_register_module(struct gbaudio_module_info *module);
void gbaudio_unregister_module(struct gbaudio_module_info *module);
/* protocol related */
-extern int gb_audio_gb_get_topology(struct gb_connection *connection,
- struct gb_audio_topology **topology);
-extern int gb_audio_gb_get_control(struct gb_connection *connection,
- u8 control_id, u8 index,
- struct gb_audio_ctl_elem_value *value);
-extern int gb_audio_gb_set_control(struct gb_connection *connection,
- u8 control_id, u8 index,
- struct gb_audio_ctl_elem_value *value);
-extern int gb_audio_gb_enable_widget(struct gb_connection *connection,
- u8 widget_id);
-extern int gb_audio_gb_disable_widget(struct gb_connection *connection,
- u8 widget_id);
-extern int gb_audio_gb_get_pcm(struct gb_connection *connection,
- u16 data_cport, u32 *format,
- u32 *rate, u8 *channels,
- u8 *sig_bits);
-extern int gb_audio_gb_set_pcm(struct gb_connection *connection,
- u16 data_cport, u32 format,
- u32 rate, u8 channels,
- u8 sig_bits);
-extern int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
- u16 data_cport, u16 size);
-extern int gb_audio_gb_activate_tx(struct gb_connection *connection,
- u16 data_cport);
-extern int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
- u16 data_cport);
-extern int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
- u16 data_cport, u16 size);
-extern int gb_audio_gb_activate_rx(struct gb_connection *connection,
- u16 data_cport);
-extern int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
- u16 data_cport);
-extern int gb_audio_apbridgea_set_config(struct gb_connection *connection,
- __u16 i2s_port, __u32 format,
- __u32 rate, __u32 mclk_freq);
-extern int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
- __u16 i2s_port, __u16 cportid,
- __u8 direction);
-extern int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
- __u16 i2s_port, __u16 cportid,
- __u8 direction);
-extern int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
- __u16 i2s_port, __u16 size);
-extern int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
- __u16 i2s_port);
-extern int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
- __u16 i2s_port, __u64 timestamp);
-extern int gb_audio_apbridgea_stop_tx(struct gb_connection *connection,
- __u16 i2s_port);
-extern int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
- __u16 i2s_port);
-extern int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
- __u16 i2s_port, __u16 size);
-extern int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
- __u16 i2s_port);
-extern int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
- __u16 i2s_port);
-extern int gb_audio_apbridgea_stop_rx(struct gb_connection *connection,
- __u16 i2s_port);
-extern int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
- __u16 i2s_port);
+int gb_audio_gb_get_topology(struct gb_connection *connection,
+ struct gb_audio_topology **topology);
+int gb_audio_gb_get_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value);
+int gb_audio_gb_set_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value);
+int gb_audio_gb_enable_widget(struct gb_connection *connection,
+ u8 widget_id);
+int gb_audio_gb_disable_widget(struct gb_connection *connection,
+ u8 widget_id);
+int gb_audio_gb_get_pcm(struct gb_connection *connection,
+ u16 data_cport, u32 *format,
+ u32 *rate, u8 *channels,
+ u8 *sig_bits);
+int gb_audio_gb_set_pcm(struct gb_connection *connection,
+ u16 data_cport, u32 format,
+ u32 rate, u8 channels,
+ u8 sig_bits);
+int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size);
+int gb_audio_gb_activate_tx(struct gb_connection *connection,
+ u16 data_cport);
+int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
+ u16 data_cport);
+int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size);
+int gb_audio_gb_activate_rx(struct gb_connection *connection,
+ u16 data_cport);
+int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
+ u16 data_cport);
+int gb_audio_apbridgea_set_config(struct gb_connection *connection,
+ __u16 i2s_port, __u32 format,
+ __u32 rate, __u32 mclk_freq);
+int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction);
+int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction);
+int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size);
+int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
+ __u16 i2s_port, __u64 timestamp);
+int gb_audio_apbridgea_stop_tx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size);
+int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_stop_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
+ __u16 i2s_port);
#endif /* __LINUX_GBAUDIO_CODEC_H */
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
index d44b070d8862..c2a4af4c1d06 100644
--- a/drivers/staging/greybus/audio_manager.c
+++ b/drivers/staging/greybus/audio_manager.c
@@ -45,6 +45,9 @@ int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc)
int err;
id = ida_simple_get(&module_id, 0, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
err = gb_audio_manager_module_create(&module, manager_kset,
id, desc);
if (err) {
diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c
index e97b2b87ba47..3f702db9e098 100644
--- a/drivers/staging/greybus/bundle.c
+++ b/drivers/staging/greybus/bundle.c
@@ -32,7 +32,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
{
struct gb_bundle *bundle = to_gb_bundle(dev);
- if (bundle->state == NULL)
+ if (!bundle->state)
return sprintf(buf, "\n");
return sprintf(buf, "%s\n", bundle->state);
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
index 0b72e1b9d325..8ab810bf5716 100644
--- a/drivers/staging/greybus/hid.c
+++ b/drivers/staging/greybus/hid.c
@@ -292,7 +292,6 @@ static int gb_hid_parse(struct hid_device *hid)
rdesc = kzalloc(rsize, GFP_KERNEL);
if (!rdesc) {
- dbg_hid("couldn't allocate rdesc memory\n");
return -ENOMEM;
}
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index 0529e5628c24..34b40a409ea3 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -520,8 +520,8 @@ static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy)
op = gb_operation_create(connection,
GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
- sizeof(req), sizeof(*resp) + props_count *
- sizeof(struct gb_power_supply_props_desc),
+ sizeof(*req),
+ struct_size(resp, props, props_count),
GFP_KERNEL);
if (!op)
return -ENOMEM;
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index 38e85033fc4b..a097a8916b3b 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -275,7 +275,7 @@ static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
return -ENOMEM;
request = operation->request->payload;
- request->data_flags = (data->flags >> 8);
+ request->data_flags = data->flags >> 8;
request->data_blocks = cpu_to_le16(nblocks);
request->data_blksz = cpu_to_le16(data->blksz);
@@ -329,7 +329,7 @@ static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
return -ENOMEM;
request = operation->request->payload;
- request->data_flags = (data->flags >> 8);
+ request->data_flags = data->flags >> 8;
request->data_blocks = cpu_to_le16(nblocks);
request->data_blksz = cpu_to_le16(data->blksz);
@@ -602,9 +602,9 @@ static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
request.vdd = cpu_to_le32(vdd);
- request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
+ request.bus_mode = ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
GB_SDIO_BUSMODE_OPENDRAIN :
- GB_SDIO_BUSMODE_PUSHPULL);
+ GB_SDIO_BUSMODE_PUSHPULL;
switch (ios->power_mode) {
case MMC_POWER_OFF:
diff --git a/drivers/staging/gs_fpgaboot/Kconfig b/drivers/staging/gs_fpgaboot/Kconfig
index 550645291fab..968a153c4ab6 100644
--- a/drivers/staging/gs_fpgaboot/Kconfig
+++ b/drivers/staging/gs_fpgaboot/Kconfig
@@ -1,8 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
#
# "xilinx FPGA firmware download, fpgaboot"
#
config GS_FPGABOOT
tristate "Xilinx FPGA firmware download module"
- default n
help
Xilinx FPGA firmware download module
diff --git a/drivers/staging/gs_fpgaboot/Makefile b/drivers/staging/gs_fpgaboot/Makefile
index d2f0211ba540..33e238be63d6 100644
--- a/drivers/staging/gs_fpgaboot/Makefile
+++ b/drivers/staging/gs_fpgaboot/Makefile
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
gs_fpga-y += gs_fpgaboot.o io.o
obj-$(CONFIG_GS_FPGABOOT) += gs_fpga.o
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933 b/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933
deleted file mode 100644
index 79c7e88c64cd..000000000000
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933
+++ /dev/null
@@ -1,30 +0,0 @@
-What: /sys/bus/iio/devices/iio:deviceX/outY_freq_start
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- Frequency sweep start frequency in Hz.
-
-What: /sys/bus/iio/devices/iio:deviceX/outY_freq_increment
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- Frequency increment in Hz (step size) between consecutive
- frequency points along the sweep.
-
-What: /sys/bus/iio/devices/iio:deviceX/outY_freq_points
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- Number of frequency points (steps) in the frequency sweep.
- This value, in conjunction with the outY_freq_start and the
- outY_freq_increment, determines the frequency sweep range
- for the sweep operation.
-
-What: /sys/bus/iio/devices/iio:deviceX/outY_settling_cycles
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- Number of output excitation cycles (settling time cycles)
- that are allowed to pass through the unknown impedance,
- after each frequency increment, and before the ADC is triggered
- to perform a conversion sequence of the response signal.
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index e86ac9e47867..a8e970db179d 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Industrial I/O subsystem configuration
#
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index befbbfe911c2..3318997a7009 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Accelerometer drivers
#
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 773212e0c859..094cc9be35bd 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O accelerometer drivers
#
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index 5cc96c8086b5..70381756a64a 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ADIS16203 Programmable 360 Degrees Inclinometer
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/delay.h>
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 24e525f1ef25..b80e0d248b0f 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ADIS16240 Programmable Impact Sensor and Recorder driver
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 7a93d3a5c113..23d9a655a520 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -1,21 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
#
# ADC drivers
#
menu "Analog to digital converters"
-config AD7780
- tristate "Analog Devices AD7780 and similar ADCs driver"
- depends on SPI
- depends on GPIOLIB || COMPILE_TEST
- select AD_SIGMA_DELTA
- help
- Say yes here to build support for Analog Devices AD7170, AD7171,
- AD7780 and AD7781 SPI analog to digital converters (ADC).
- If unsure, say N (but it's safe to say "Y").
-
- To compile this driver as a module, choose M here: the
- module will be called ad7780.
-
config AD7816
tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver"
depends on SPI
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 7a421088ff82..4b76769b32bc 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -3,7 +3,6 @@
# Makefile for industrial I/O ADC drivers
#
-obj-$(CONFIG_AD7780) += ad7780.o
obj-$(CONFIG_AD7816) += ad7816.o
obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_AD7280) += ad7280a.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index acdbc07fd259..b6d12fe7c12a 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -1,12 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD7190 AD7192 AD7193 AD7195 SPI ADC driver
*
* Copyright 2011-2015 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#include <linux/interrupt.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -109,10 +109,10 @@
#define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */
#define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */
-#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */
-#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */
-#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */
-#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */
#define AD7193_CH_TEMP 0x100 /* Temp senseor */
#define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */
#define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */
@@ -156,14 +156,16 @@
struct ad7192_state {
struct regulator *avdd;
struct regulator *dvdd;
+ struct clk *mclk;
u16 int_vref_mv;
- u32 mclk;
+ u32 fclk;
u32 f_order;
u32 mode;
u32 conf;
u32 scale_avail[8][2];
u8 gpocon;
u8 devid;
+ u8 clock_sel;
struct mutex lock; /* protect sensor state */
struct ad_sigma_delta sd;
@@ -216,8 +218,8 @@ static const struct ad_sd_calib_data ad7192_calib_arr[8] = {
static int ad7192_calibrate_all(struct ad7192_state *st)
{
- return ad_sd_calibrate_all(&st->sd, ad7192_calib_arr,
- ARRAY_SIZE(ad7192_calib_arr));
+ return ad_sd_calibrate_all(&st->sd, ad7192_calib_arr,
+ ARRAY_SIZE(ad7192_calib_arr));
}
static inline bool ad7192_valid_external_frequency(u32 freq)
@@ -226,23 +228,45 @@ static inline bool ad7192_valid_external_frequency(u32 freq)
freq <= AD7192_EXT_FREQ_MHZ_MAX);
}
-static int ad7192_setup(struct ad7192_state *st,
- const struct ad7192_platform_data *pdata)
+static int ad7192_of_clock_select(struct ad7192_state *st)
+{
+ struct device_node *np = st->sd.spi->dev.of_node;
+ unsigned int clock_sel;
+
+ clock_sel = AD7192_CLK_INT;
+
+ /* use internal clock */
+ if (PTR_ERR(st->mclk) == -ENOENT) {
+ if (of_property_read_bool(np, "adi,int-clock-output-enable"))
+ clock_sel = AD7192_CLK_INT_CO;
+ } else {
+ if (of_property_read_bool(np, "adi,clock-xtal"))
+ clock_sel = AD7192_CLK_EXT_MCLK1_2;
+ else
+ clock_sel = AD7192_CLK_EXT_MCLK2;
+ }
+
+ return clock_sel;
+}
+
+static int ad7192_setup(struct ad7192_state *st, struct device_node *np)
{
struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
+ bool rej60_en, sinc3_en, refin2_en, chop_en;
+ bool buf_en, bipolar, burnout_curr_en;
unsigned long long scale_uv;
int i, ret, id;
/* reset the serial interface */
ret = ad_sd_reset(&st->sd, 48);
if (ret < 0)
- goto out;
+ return ret;
usleep_range(500, 1000); /* Wait for at least 500us */
/* write/read test for device presence */
ret = ad_sd_read_reg(&st->sd, AD7192_REG_ID, 1, &id);
if (ret)
- goto out;
+ return ret;
id &= AD7192_ID_MASK;
@@ -250,44 +274,28 @@ static int ad7192_setup(struct ad7192_state *st,
dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n",
id);
- switch (pdata->clock_source_sel) {
- case AD7192_CLK_INT:
- case AD7192_CLK_INT_CO:
- st->mclk = AD7192_INT_FREQ_MHZ;
- break;
- case AD7192_CLK_EXT_MCLK1_2:
- case AD7192_CLK_EXT_MCLK2:
- if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) {
- st->mclk = pdata->ext_clk_hz;
- break;
- }
- dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n",
- pdata->ext_clk_hz);
- ret = -EINVAL;
- goto out;
- default:
- ret = -EINVAL;
- goto out;
- }
-
st->mode = AD7192_MODE_SEL(AD7192_MODE_IDLE) |
- AD7192_MODE_CLKSRC(pdata->clock_source_sel) |
+ AD7192_MODE_CLKSRC(st->clock_sel) |
AD7192_MODE_RATE(480);
st->conf = AD7192_CONF_GAIN(0);
- if (pdata->rej60_en)
+ rej60_en = of_property_read_bool(np, "adi,rejection-60-Hz-enable");
+ if (rej60_en)
st->mode |= AD7192_MODE_REJ60;
- if (pdata->sinc3_en)
+ sinc3_en = of_property_read_bool(np, "adi,sinc3-filter-enable");
+ if (sinc3_en)
st->mode |= AD7192_MODE_SINC3;
- if (pdata->refin2_en && st->devid != ID_AD7195)
+ refin2_en = of_property_read_bool(np, "adi,refin2-pins-enable");
+ if (refin2_en && st->devid != ID_AD7195)
st->conf |= AD7192_CONF_REFSEL;
- if (pdata->chop_en) {
+ chop_en = of_property_read_bool(np, "adi,chop-enable");
+ if (chop_en) {
st->conf |= AD7192_CONF_CHOP;
- if (pdata->sinc3_en)
+ if (sinc3_en)
st->f_order = 3; /* SINC 3rd order */
else
st->f_order = 4; /* SINC 4th order */
@@ -295,30 +303,34 @@ static int ad7192_setup(struct ad7192_state *st,
st->f_order = 1;
}
- if (pdata->buf_en)
+ buf_en = of_property_read_bool(np, "adi,buffer-enable");
+ if (buf_en)
st->conf |= AD7192_CONF_BUF;
- if (pdata->unipolar_en)
+ bipolar = of_property_read_bool(np, "bipolar");
+ if (!bipolar)
st->conf |= AD7192_CONF_UNIPOLAR;
- if (pdata->burnout_curr_en && pdata->buf_en && !pdata->chop_en) {
+ burnout_curr_en = of_property_read_bool(np,
+ "adi,burnout-currents-enable");
+ if (burnout_curr_en && buf_en && !chop_en) {
st->conf |= AD7192_CONF_BURN;
- } else if (pdata->burnout_curr_en) {
+ } else if (burnout_curr_en) {
dev_warn(&st->sd.spi->dev,
"Can't enable burnout currents: see CHOP or buffer\n");
}
ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
if (ret)
- goto out;
+ return ret;
ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
if (ret)
- goto out;
+ return ret;
ret = ad7192_calibrate_all(st);
if (ret)
- goto out;
+ return ret;
/* Populate available ADC input ranges */
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
@@ -332,35 +344,8 @@ static int ad7192_setup(struct ad7192_state *st,
}
return 0;
-out:
- dev_err(&st->sd.spi->dev, "setup failed\n");
- return ret;
}
-static ssize_t
-ad7192_show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
- int i, len = 0;
-
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0],
- st->scale_avail[i][1]);
-
- len += sprintf(buf + len, "\n");
-
- return len;
-}
-
-static IIO_DEVICE_ATTR_NAMED(in_v_m_v_scale_available,
- in_voltage-voltage_scale_available,
- 0444, ad7192_show_scale_available, NULL, 0);
-
-static IIO_DEVICE_ATTR(in_voltage_scale_available, 0444,
- ad7192_show_scale_available, NULL, 0);
-
static ssize_t ad7192_show_ac_excitation(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -435,8 +420,6 @@ static IIO_DEVICE_ATTR(ac_excitation_en, 0644,
AD7192_REG_MODE);
static struct attribute *ad7192_attributes[] = {
- &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
&iio_dev_attr_ac_excitation_en.dev_attr.attr,
NULL
@@ -447,8 +430,6 @@ static const struct attribute_group ad7192_attribute_group = {
};
static struct attribute *ad7195_attributes[] = {
- &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
NULL
};
@@ -499,7 +480,7 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
*val -= 273 * ad7192_get_temp_scale(unipolar);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = st->mclk /
+ *val = st->fclk /
(st->f_order * 1024 * AD7192_MODE_RATE(st->mode));
return IIO_VAL_INT;
}
@@ -546,7 +527,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
break;
}
- div = st->mclk / (val * st->f_order * 1024);
+ div = st->fclk / (val * st->f_order * 1024);
if (div < 1 || div > 1023) {
ret = -EINVAL;
break;
@@ -579,10 +560,31 @@ static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
}
}
+static int ad7192_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)st->scale_avail;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(st->scale_avail) * 2;
+
+ return IIO_AVAIL_LIST;
+ }
+
+ return -EINVAL;
+}
+
static const struct iio_info ad7192_info = {
.read_raw = ad7192_read_raw,
.write_raw = ad7192_write_raw,
.write_raw_get_fmt = ad7192_write_raw_get_fmt,
+ .read_avail = ad7192_read_avail,
.attrs = &ad7192_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
};
@@ -591,6 +593,7 @@ static const struct iio_info ad7195_info = {
.read_raw = ad7192_read_raw,
.write_raw = ad7192_write_raw,
.write_raw_get_fmt = ad7192_write_raw_get_fmt,
+ .read_avail = ad7192_read_avail,
.attrs = &ad7195_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
};
@@ -625,6 +628,42 @@ static const struct iio_chan_spec ad7193_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(14),
};
+static int ad7192_channels_config(struct iio_dev *indio_dev)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ const struct iio_chan_spec *channels;
+ struct iio_chan_spec *chan;
+ int i;
+
+ switch (st->devid) {
+ case ID_AD7193:
+ channels = ad7193_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7193_channels);
+ break;
+ default:
+ channels = ad7192_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
+ break;
+ }
+
+ chan = devm_kcalloc(indio_dev->dev.parent, indio_dev->num_channels,
+ sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ indio_dev->channels = chan;
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ *chan = channels[i];
+ if (chan->type != IIO_TEMP)
+ chan->info_mask_shared_by_type_available |=
+ BIT(IIO_CHAN_INFO_SCALE);
+ chan++;
+ }
+
+ return 0;
+}
+
static int ad7192_probe(struct spi_device *spi)
{
const struct ad7192_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -687,16 +726,9 @@ static int ad7192_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
- switch (st->devid) {
- case ID_AD7193:
- indio_dev->channels = ad7193_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad7193_channels);
- break;
- default:
- indio_dev->channels = ad7192_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
- break;
- }
+ ret = ad7192_channels_config(indio_dev);
+ if (ret < 0)
+ goto error_disable_dvdd;
if (st->devid == ID_AD7195)
indio_dev->info = &ad7195_info;
@@ -709,15 +741,42 @@ static int ad7192_probe(struct spi_device *spi)
if (ret)
goto error_disable_dvdd;
- ret = ad7192_setup(st, pdata);
- if (ret)
+ st->fclk = AD7192_INT_FREQ_MHZ;
+
+ st->mclk = devm_clk_get(&st->sd.spi->dev, "mclk");
+ if (IS_ERR(st->mclk) && PTR_ERR(st->mclk) != -ENOENT) {
+ ret = PTR_ERR(st->mclk);
goto error_remove_trigger;
+ }
+
+ st->clock_sel = ad7192_of_clock_select(st);
+
+ if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+ st->clock_sel == AD7192_CLK_EXT_MCLK2) {
+ ret = clk_prepare_enable(st->mclk);
+ if (ret < 0)
+ goto error_remove_trigger;
+
+ st->fclk = clk_get_rate(st->mclk);
+ if (!ad7192_valid_external_frequency(st->fclk)) {
+ ret = -EINVAL;
+ dev_err(&spi->dev,
+ "External clock frequency out of bounds\n");
+ goto error_disable_clk;
+ }
+ }
+
+ ret = ad7192_setup(st, spi->dev.of_node);
+ if (ret)
+ goto error_disable_clk;
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto error_remove_trigger;
+ goto error_disable_clk;
return 0;
+error_disable_clk:
+ clk_disable_unprepare(st->mclk);
error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_disable_dvdd:
@@ -734,6 +793,7 @@ static int ad7192_remove(struct spi_device *spi)
struct ad7192_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
+ clk_disable_unprepare(st->mclk);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
regulator_disable(st->dvdd);
diff --git a/drivers/staging/iio/adc/ad7192.h b/drivers/staging/iio/adc/ad7192.h
index 7433a43c2611..f3669e1df084 100644
--- a/drivers/staging/iio/adc/ad7192.h
+++ b/drivers/staging/iio/adc/ad7192.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* AD7190 AD7192 AD7195 SPI ADC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef IIO_ADC_AD7192_H_
#define IIO_ADC_AD7192_H_
@@ -33,15 +32,6 @@
struct ad7192_platform_data {
u16 vref_mv;
- u8 clock_source_sel;
- u32 ext_clk_hz;
- bool refin2_en;
- bool rej60_en;
- bool sinc3_en;
- bool chop_en;
- bool buf_en;
- bool unipolar_en;
- bool burnout_curr_en;
};
#endif /* IIO_ADC_AD7192_H_ */
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index d9df12665176..19a5f244dcae 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD7280A Lithium Ion Battery Monitoring System
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#include <linux/crc8.h>
@@ -97,9 +96,10 @@
#define AD7280A_NUM_CH (AD7280A_AUX_ADC_6 - \
AD7280A_CELL_VOLTAGE_1 + 1)
-#define AD7280A_CALC_VOLTAGE_CHAN_NUM(d, c) ((d * AD7280A_CELLS_PER_DEV) + c)
-#define AD7280A_CALC_TEMP_CHAN_NUM(d, c) ((d * AD7280A_CELLS_PER_DEV) + \
- c - AD7280A_CELLS_PER_DEV)
+#define AD7280A_CALC_VOLTAGE_CHAN_NUM(d, c) (((d) * AD7280A_CELLS_PER_DEV) + \
+ (c))
+#define AD7280A_CALC_TEMP_CHAN_NUM(d, c) (((d) * AD7280A_CELLS_PER_DEV) + \
+ (c) - AD7280A_CELLS_PER_DEV)
#define AD7280A_DEVADDR_MASTER 0
#define AD7280A_DEVADDR_ALL 0x1F
@@ -783,43 +783,38 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
for (i = 0; i < st->scan_cnt; i++) {
if (((channels[i] >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6) {
if (((channels[i] >> 11) & 0xFFF) >=
- st->cell_threshhigh)
- iio_push_event(indio_dev,
- IIO_EVENT_CODE(IIO_VOLTAGE,
- 1,
- 0,
- IIO_EV_DIR_RISING,
- IIO_EV_TYPE_THRESH,
- 0, 0, 0),
+ st->cell_threshhigh) {
+ u64 tmp = IIO_EVENT_CODE(IIO_VOLTAGE, 1, 0,
+ IIO_EV_DIR_RISING,
+ IIO_EV_TYPE_THRESH,
+ 0, 0, 0);
+ iio_push_event(indio_dev, tmp,
iio_get_time_ns(indio_dev));
- else if (((channels[i] >> 11) & 0xFFF) <=
- st->cell_threshlow)
- iio_push_event(indio_dev,
- IIO_EVENT_CODE(IIO_VOLTAGE,
- 1,
- 0,
- IIO_EV_DIR_FALLING,
- IIO_EV_TYPE_THRESH,
- 0, 0, 0),
+ } else if (((channels[i] >> 11) & 0xFFF) <=
+ st->cell_threshlow) {
+ u64 tmp = IIO_EVENT_CODE(IIO_VOLTAGE, 1, 0,
+ IIO_EV_DIR_FALLING,
+ IIO_EV_TYPE_THRESH,
+ 0, 0, 0);
+ iio_push_event(indio_dev, tmp,
iio_get_time_ns(indio_dev));
+ }
} else {
- if (((channels[i] >> 11) & 0xFFF) >= st->aux_threshhigh)
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(
- IIO_TEMP,
- 0,
+ if (((channels[i] >> 11) & 0xFFF) >=
+ st->aux_threshhigh) {
+ u64 tmp = IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
+ IIO_EV_DIR_RISING);
+ iio_push_event(indio_dev, tmp,
iio_get_time_ns(indio_dev));
- else if (((channels[i] >> 11) & 0xFFF) <=
- st->aux_threshlow)
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(
- IIO_TEMP,
- 0,
+ } else if (((channels[i] >> 11) & 0xFFF) <=
+ st->aux_threshlow) {
+ u64 tmp = IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
+ IIO_EV_DIR_FALLING);
+ iio_push_event(indio_dev, tmp,
iio_get_time_ns(indio_dev));
+ }
}
}
@@ -830,30 +825,30 @@ out:
}
static IIO_DEVICE_ATTR_NAMED(in_thresh_low_value,
- in_voltage-voltage_thresh_low_value,
- 0644,
- ad7280_read_channel_config,
- ad7280_write_channel_config,
- AD7280A_CELL_UNDERVOLTAGE);
+ in_voltage-voltage_thresh_low_value,
+ 0644,
+ ad7280_read_channel_config,
+ ad7280_write_channel_config,
+ AD7280A_CELL_UNDERVOLTAGE);
static IIO_DEVICE_ATTR_NAMED(in_thresh_high_value,
- in_voltage-voltage_thresh_high_value,
- 0644,
- ad7280_read_channel_config,
- ad7280_write_channel_config,
- AD7280A_CELL_OVERVOLTAGE);
+ in_voltage-voltage_thresh_high_value,
+ 0644,
+ ad7280_read_channel_config,
+ ad7280_write_channel_config,
+ AD7280A_CELL_OVERVOLTAGE);
static IIO_DEVICE_ATTR(in_temp_thresh_low_value,
- 0644,
- ad7280_read_channel_config,
- ad7280_write_channel_config,
- AD7280A_AUX_ADC_UNDERVOLTAGE);
+ 0644,
+ ad7280_read_channel_config,
+ ad7280_write_channel_config,
+ AD7280A_AUX_ADC_UNDERVOLTAGE);
static IIO_DEVICE_ATTR(in_temp_thresh_high_value,
- 0644,
- ad7280_read_channel_config,
- ad7280_write_channel_config,
- AD7280A_AUX_ADC_OVERVOLTAGE);
+ 0644,
+ ad7280_read_channel_config,
+ ad7280_write_channel_config,
+ AD7280A_AUX_ADC_OVERVOLTAGE);
static struct attribute *ad7280_event_attributes[] = {
&iio_dev_attr_in_thresh_low_value.dev_attr.attr,
@@ -921,8 +916,8 @@ static int ad7280_probe(struct spi_device *spi)
const struct ad7280_platform_data *pdata = dev_get_platdata(&spi->dev);
struct ad7280_state *st;
int ret;
- const unsigned short tACQ_ns[4] = {465, 1010, 1460, 1890};
- const unsigned short nAVG[4] = {1, 2, 4, 8};
+ const unsigned short t_acq_ns[4] = {465, 1010, 1460, 1890};
+ const unsigned short n_avg[4] = {1, 2, 4, 8};
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -970,10 +965,9 @@ static int ad7280_probe(struct spi_device *spi)
*/
st->readback_delay_us =
- ((tACQ_ns[pdata->acquisition_time & 0x3] + 695) *
- (AD7280A_NUM_CH * nAVG[pdata->conversion_averaging & 0x3]))
- - tACQ_ns[pdata->acquisition_time & 0x3] +
- st->slave_num * 250;
+ ((t_acq_ns[pdata->acquisition_time & 0x3] + 695) *
+ (AD7280A_NUM_CH * n_avg[pdata->conversion_averaging & 0x3])) -
+ t_acq_ns[pdata->acquisition_time & 0x3] + st->slave_num * 250;
/* Convert to usecs */
st->readback_delay_us = DIV_ROUND_UP(st->readback_delay_us, 1000);
diff --git a/drivers/staging/iio/adc/ad7280a.h b/drivers/staging/iio/adc/ad7280a.h
index ccfb90d20e71..23f18bb9e279 100644
--- a/drivers/staging/iio/adc/ad7280a.h
+++ b/drivers/staging/iio/adc/ad7280a.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* AD7280A Lithium Ion Battery Monitoring System
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef IIO_ADC_AD7280_H_
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index ee50e7296795..a9985a7f8199 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* AD7816 digital temperature sensor driver supporting AD7816/7/8
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
@@ -231,7 +230,7 @@ static ssize_t ad7816_show_value(struct device *dev,
value = (s8)((data >> AD7816_TEMP_FLOAT_OFFSET) - 103);
data &= AD7816_TEMP_FLOAT_MASK;
if (value < 0)
- data = (1 << AD7816_TEMP_FLOAT_OFFSET) - data;
+ data = BIT(AD7816_TEMP_FLOAT_OFFSET) - data;
return sprintf(buf, "%d.%.2d\n", value, data * 25);
}
return sprintf(buf, "%u\n", data);
diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig
index ba18b8432d9c..b7c3c4a7dfe4 100644
--- a/drivers/staging/iio/addac/Kconfig
+++ b/drivers/staging/iio/addac/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# ADDAC drivers
#
diff --git a/drivers/staging/iio/addac/Makefile b/drivers/staging/iio/addac/Makefile
index 4c7686133692..8fdbd8cab21f 100644
--- a/drivers/staging/iio/addac/Makefile
+++ b/drivers/staging/iio/addac/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O ADDAC drivers
#
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 0f26bc38edc6..5543cc909707 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* I2C bus driver for ADT7316/7/8 ADT7516/7/9 digital temperature
* sensor, ADC and DAC
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 8294b9c1e3c2..2066241b15b1 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* API bus driver for ADT7316/7/8 ADT7516/7/9 digital temperature
* sensor, ADC and DAC
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 6f7891b567b9..b6a65ee8d558 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9
*
- *
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
@@ -2155,7 +2153,8 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
else
chip->dac_bits = 8;
- chip->ldac_pin = devm_gpiod_get_optional(dev, "adi,ldac", GPIOD_OUT_LOW);
+ chip->ldac_pin = devm_gpiod_get_optional(dev, "adi,ldac",
+ GPIOD_OUT_LOW);
if (IS_ERR(chip->ldac_pin)) {
ret = PTR_ERR(chip->ldac_pin);
dev_err(dev, "Failed to request ldac GPIO: %d\n", ret);
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index 84ca4f6c88f5..8c2a92ae7157 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef _ADT7316_H_
diff --git a/drivers/staging/iio/cdc/Kconfig b/drivers/staging/iio/cdc/Kconfig
index b97478e7cbd0..e0a5ce66a984 100644
--- a/drivers/staging/iio/cdc/Kconfig
+++ b/drivers/staging/iio/cdc/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# CDC drivers
#
diff --git a/drivers/staging/iio/cdc/Makefile b/drivers/staging/iio/cdc/Makefile
index 1466bc31f244..ab8222579e7e 100644
--- a/drivers/staging/iio/cdc/Makefile
+++ b/drivers/staging/iio/cdc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O DAC drivers
#
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 24f74ce60f80..dd7fcab8e19e 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* AD7150 capacitive sensor driver supporting AD7150/1/6
*
* Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 0eb28fea876e..47610d863908 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#include <linux/delay.h>
diff --git a/drivers/staging/iio/cdc/ad7746.h b/drivers/staging/iio/cdc/ad7746.h
index ea8572d1df02..8bdbd732dbbd 100644
--- a/drivers/staging/iio/cdc/ad7746.h
+++ b/drivers/staging/iio/cdc/ad7746.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef IIO_CDC_AD7746_H_
diff --git a/drivers/staging/iio/frequency/Kconfig b/drivers/staging/iio/frequency/Kconfig
index fc726d3c64a6..72d899cbef8e 100644
--- a/drivers/staging/iio/frequency/Kconfig
+++ b/drivers/staging/iio/frequency/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Direct Digital Synthesis drivers
#
diff --git a/drivers/staging/iio/frequency/Makefile b/drivers/staging/iio/frequency/Makefile
index e5dbcfce44f9..b8c5cf98aa5e 100644
--- a/drivers/staging/iio/frequency/Makefile
+++ b/drivers/staging/iio/frequency/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Direct Digital Synthesis drivers
#
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index a3ce50427724..74308a2e72db 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -1,27 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD9832 SPI DDS driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
+#include <asm/div64.h>
+
+#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/spi/spi.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <asm/div64.h>
+#include <linux/sysfs.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include "dds.h"
#include "ad9832.h"
+#include "dds.h"
+
/* Registers */
#define AD9832_FREQ0LL 0x0
@@ -94,7 +96,7 @@ struct ad9832_state {
struct spi_device *spi;
struct regulator *avdd;
struct regulator *dvdd;
- unsigned long mclk;
+ struct clk *mclk;
unsigned short ctrl_fp;
unsigned short ctrl_ss;
unsigned short ctrl_src;
@@ -129,10 +131,10 @@ static int ad9832_write_frequency(struct ad9832_state *st,
{
unsigned long regval;
- if (fout > (st->mclk / 2))
+ if (fout > (clk_get_rate(st->mclk) / 2))
return -EINVAL;
- regval = ad9832_calc_freqreg(st->mclk, fout);
+ regval = ad9832_calc_freqreg(clk_get_rate(st->mclk), fout);
st->freq_data[0] = cpu_to_be16((AD9832_CMD_FRE8BITSW << CMD_SHIFT) |
(addr << ADD_SHIFT) |
@@ -333,7 +335,16 @@ static int ad9832_probe(struct spi_device *spi)
goto error_disable_avdd;
}
- st->mclk = pdata->mclk;
+ st->mclk = devm_clk_get(&spi->dev, "mclk");
+ if (IS_ERR(st->mclk)) {
+ ret = PTR_ERR(st->mclk);
+ goto error_disable_dvdd;
+ }
+
+ ret = clk_prepare_enable(st->mclk);
+ if (ret < 0)
+ goto error_disable_dvdd;
+
st->spi = spi;
mutex_init(&st->lock);
@@ -384,39 +395,41 @@ static int ad9832_probe(struct spi_device *spi)
ret = spi_sync(st->spi, &st->msg);
if (ret) {
dev_err(&spi->dev, "device init failed\n");
- goto error_disable_dvdd;
+ goto error_unprepare_mclk;
}
ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0);
if (ret)
- goto error_disable_dvdd;
+ goto error_unprepare_mclk;
ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1);
if (ret)
- goto error_disable_dvdd;
+ goto error_unprepare_mclk;
ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0);
if (ret)
- goto error_disable_dvdd;
+ goto error_unprepare_mclk;
ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1);
if (ret)
- goto error_disable_dvdd;
+ goto error_unprepare_mclk;
ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2);
if (ret)
- goto error_disable_dvdd;
+ goto error_unprepare_mclk;
ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3);
if (ret)
- goto error_disable_dvdd;
+ goto error_unprepare_mclk;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_disable_dvdd;
+ goto error_unprepare_mclk;
return 0;
+error_unprepare_mclk:
+ clk_disable_unprepare(st->mclk);
error_disable_dvdd:
regulator_disable(st->dvdd);
error_disable_avdd:
@@ -431,6 +444,7 @@ static int ad9832_remove(struct spi_device *spi)
struct ad9832_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
+ clk_disable_unprepare(st->mclk);
regulator_disable(st->dvdd);
regulator_disable(st->avdd);
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index 39d326cc1af9..98dfbd9289ab 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* AD9832 SPI DDS driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef IIO_DDS_AD9832_H_
#define IIO_DDS_AD9832_H_
@@ -24,7 +23,6 @@
*/
struct ad9832_platform_data {
- unsigned long mclk;
unsigned long freq0;
unsigned long freq1;
unsigned short phase0;
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 0b0287503fb4..6de3cd7363d7 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD9833/AD9834/AD9837/AD9838 SPI DDS driver
*
* Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#include <linux/clk.h>
@@ -286,7 +285,7 @@ ssize_t ad9834_show_out0_wavetype_available(struct device *dev,
struct ad9834_state *st = iio_priv(indio_dev);
char *str;
- if ((st->devid == ID_AD9833) || (st->devid == ID_AD9837))
+ if (st->devid == ID_AD9833 || st->devid == ID_AD9837)
str = "sine triangle square";
else if (st->control & AD9834_OPBITEN)
str = "sine";
diff --git a/drivers/staging/iio/frequency/ad9834.h b/drivers/staging/iio/frequency/ad9834.h
index da7e83ceedad..521943aa0e61 100644
--- a/drivers/staging/iio/frequency/ad9834.h
+++ b/drivers/staging/iio/frequency/ad9834.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* AD9833/AD9834/AD9837/AD9838 SPI DDS driver
*
* Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef IIO_DDS_AD9834_H_
#define IIO_DDS_AD9834_H_
diff --git a/drivers/staging/iio/frequency/dds.h b/drivers/staging/iio/frequency/dds.h
index d6ccd99c14d7..2ebe68eb7398 100644
--- a/drivers/staging/iio/frequency/dds.h
+++ b/drivers/staging/iio/frequency/dds.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* dds.h - sysfs attributes associated with DDS devices
*
* Copyright (c) 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef IIO_DDS_H_
#define IIO_DDS_H_
diff --git a/drivers/staging/iio/impedance-analyzer/Kconfig b/drivers/staging/iio/impedance-analyzer/Kconfig
index dd97b6bb3fd0..841648847edf 100644
--- a/drivers/staging/iio/impedance-analyzer/Kconfig
+++ b/drivers/staging/iio/impedance-analyzer/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Impedance Converter, Network Analyzer drivers
#
@@ -10,7 +11,7 @@ config AD5933
select IIO_KFIFO_BUF
help
Say yes here to build support for Analog Devices Impedance Converter,
- Network Analyzer, AD5933/4, provides direct access via sysfs.
+ Network Analyzer, AD5933/4.
To compile this driver as a module, choose M here: the
module will be called ad5933.
diff --git a/drivers/staging/iio/impedance-analyzer/Makefile b/drivers/staging/iio/impedance-analyzer/Makefile
index 7604d786583e..b4e657a1ac18 100644
--- a/drivers/staging/iio/impedance-analyzer/Makefile
+++ b/drivers/staging/iio/impedance-analyzer/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Impedance Converter, Network Analyzer drivers
#
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 3134295f014f..af0bcf95ee8a 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -1,27 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD5933 AD5934 Impedance Converter, Network Analyzer
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
-#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/sysfs.h>
+#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/sysfs.h>
#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/sysfs.h>
/* AD5933/AD5934 Registers */
#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 1 byte */
@@ -284,7 +283,7 @@ static ssize_t ad5933_show_frequency(struct device *dev,
freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF;
freqreg = (u64)freqreg * (u64)(st->mclk_hz / 4);
- do_div(freqreg, 1 << 27);
+ do_div(freqreg, BIT(27));
return sprintf(buf, "%d\n", (int)freqreg);
}
@@ -316,12 +315,12 @@ static ssize_t ad5933_store_frequency(struct device *dev,
return ret ? ret : len;
}
-static IIO_DEVICE_ATTR(out_voltage0_freq_start, 0644,
+static IIO_DEVICE_ATTR(out_altvoltage0_frequency_start, 0644,
ad5933_show_frequency,
ad5933_store_frequency,
AD5933_REG_FREQ_START);
-static IIO_DEVICE_ATTR(out_voltage0_freq_increment, 0644,
+static IIO_DEVICE_ATTR(out_altvoltage0_frequency_increment, 0644,
ad5933_show_frequency,
ad5933_store_frequency,
AD5933_REG_FREQ_INC);
@@ -420,7 +419,7 @@ static ssize_t ad5933_store(struct device *dev,
if (val > 1022)
val = (val >> 2) | (3 << 9);
else if (val > 511)
- val = (val >> 1) | (1 << 9);
+ val = (val >> 1) | BIT(9);
dat = cpu_to_be16(val);
ret = ad5933_i2c_write(st->client,
@@ -444,12 +443,12 @@ static ssize_t ad5933_store(struct device *dev,
return ret ? ret : len;
}
-static IIO_DEVICE_ATTR(out_voltage0_scale, 0644,
+static IIO_DEVICE_ATTR(out_altvoltage0_raw, 0644,
ad5933_show,
ad5933_store,
AD5933_OUT_RANGE);
-static IIO_DEVICE_ATTR(out_voltage0_scale_available, 0444,
+static IIO_DEVICE_ATTR(out_altvoltage0_scale_available, 0444,
ad5933_show,
NULL,
AD5933_OUT_RANGE_AVAIL);
@@ -464,28 +463,29 @@ static IIO_DEVICE_ATTR(in_voltage0_scale_available, 0444,
NULL,
AD5933_IN_PGA_GAIN_AVAIL);
-static IIO_DEVICE_ATTR(out_voltage0_freq_points, 0644,
+static IIO_DEVICE_ATTR(out_altvoltage0_frequency_points, 0644,
ad5933_show,
ad5933_store,
AD5933_FREQ_POINTS);
-static IIO_DEVICE_ATTR(out_voltage0_settling_cycles, 0644,
+static IIO_DEVICE_ATTR(out_altvoltage0_settling_cycles, 0644,
ad5933_show,
ad5933_store,
AD5933_OUT_SETTLING_CYCLES);
-/* note:
+/*
+ * note:
* ideally we would handle the scale attributes via the iio_info
* (read|write)_raw methods, however this part is a untypical since we
* don't create dedicated sysfs channel attributes for out0 and in0.
*/
static struct attribute *ad5933_attributes[] = {
- &iio_dev_attr_out_voltage0_scale.dev_attr.attr,
- &iio_dev_attr_out_voltage0_scale_available.dev_attr.attr,
- &iio_dev_attr_out_voltage0_freq_start.dev_attr.attr,
- &iio_dev_attr_out_voltage0_freq_increment.dev_attr.attr,
- &iio_dev_attr_out_voltage0_freq_points.dev_attr.attr,
- &iio_dev_attr_out_voltage0_settling_cycles.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_raw.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_scale_available.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequency_start.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequency_increment.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequency_points.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_settling_cycles.dev_attr.attr,
&iio_dev_attr_in_voltage0_scale.dev_attr.attr,
&iio_dev_attr_in_voltage0_scale_available.dev_attr.attr,
NULL
@@ -572,7 +572,8 @@ static int ad5933_ring_postenable(struct iio_dev *indio_dev)
{
struct ad5933_state *st = iio_priv(indio_dev);
- /* AD5933_CTRL_INIT_START_FREQ:
+ /*
+ * AD5933_CTRL_INIT_START_FREQ:
* High Q complex circuits require a long time to reach steady state.
* To facilitate the measurement of such impedances, this mode allows
* the user full control of the settling time requirement before
@@ -663,7 +664,8 @@ static void ad5933_work(struct work_struct *work)
}
if (status & AD5933_STAT_SWEEP_DONE) {
- /* last sample received - power down do
+ /*
+ * last sample received - power down do
* nothing until the ring enable is toggled
*/
ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
index e01eb8abcdce..aa6a3e7f6cdb 100644
--- a/drivers/staging/iio/meter/Kconfig
+++ b/drivers/staging/iio/meter/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# IIO meter drivers configuration
#
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
index c3aa6ea9d036..a9a06e8dda51 100644
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver (I2C Bus)
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index fc9146757283..f12a6c8b3e88 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver (SPI Bus)
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 029c3bf42d4d..68da6ecde6a3 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
@@ -269,7 +268,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
static IIO_DEV_ATTR_IPEAK(0644,
ade7854_read_32bit,
ade7854_write_32bit,
- ADE7854_VPEAK);
+ ADE7854_IPEAK);
static IIO_DEV_ATTR_APHCAL(0644,
ade7854_read_16bit,
ade7854_write_16bit,
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
index 4a727c17bb8f..6d1e2622e0b0 100644
--- a/drivers/staging/iio/resolver/Kconfig
+++ b/drivers/staging/iio/resolver/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Resolver/Synchro drivers
#
diff --git a/drivers/staging/iio/resolver/Makefile b/drivers/staging/iio/resolver/Makefile
index b2049f2ce36e..398631f7e79b 100644
--- a/drivers/staging/iio/resolver/Makefile
+++ b/drivers/staging/iio/resolver/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Resolver/Synchro drivers
#
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index cec9d995b3df..b6be0bc202f5 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ad2s1210.c support for the ADI Resolver to Digital Converters: AD2S1210
*
* Copyright (c) 2010-2010 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/types.h>
#include <linux/mutex.h>
diff --git a/drivers/staging/kpc2000/Kconfig b/drivers/staging/kpc2000/Kconfig
new file mode 100644
index 000000000000..fb5922928f47
--- /dev/null
+++ b/drivers/staging/kpc2000/Kconfig
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config KPC2000
+ bool "Daktronics KPC Device support"
+ depends on PCI
+ help
+ Select this if you wish to use the Daktronics KPC PCI devices
+
+ If unsure, say N.
+
+config KPC2000_CORE
+ tristate "Daktronics KPC PCI UIO device"
+ depends on KPC2000
+ help
+ Say Y here if you wish to support the Daktronics KPC PCI
+ device in UIO mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called kpc2000
+
+ If unsure, say N.
+
+config KPC2000_SPI
+ tristate "Kaktronics KPC SPI device"
+ depends on KPC2000 && SPI
+ help
+ Say Y here if you wish to support the Daktronics KPC PCI
+ device in SPI mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called kpc2000_spi
+
+ If unsure, say N.
+
+config KPC2000_I2C
+ tristate "Kaktronics KPC I2C device"
+ depends on KPC2000 && I2C
+ help
+ Say Y here if you wish to support the Daktronics KPC PCI
+ device in I2C mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called kpc2000_i2c
+
+ If unsure, say N.
+
+config KPC2000_DMA
+ tristate "Daktronics KPC DMA controller"
+ depends on KPC2000
+ help
+ Say Y here if you wish to support the Daktronics DMA controller.
+
+ To compile this driver as a module, choose M here: the module
+ will be called kpc2000_dma
+
+ If unsure, say N.
+
diff --git a/drivers/staging/kpc2000/Makefile b/drivers/staging/kpc2000/Makefile
new file mode 100644
index 000000000000..1e48e9df1329
--- /dev/null
+++ b/drivers/staging/kpc2000/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_KPC2000) += kpc2000/
+obj-$(CONFIG_KPC2000_I2C) += kpc_i2c/
+obj-$(CONFIG_KPC2000_SPI) += kpc_spi/
+obj-$(CONFIG_KPC2000_DMA) += kpc_dma/
diff --git a/drivers/staging/kpc2000/TODO b/drivers/staging/kpc2000/TODO
new file mode 100644
index 000000000000..8c7af29fefae
--- /dev/null
+++ b/drivers/staging/kpc2000/TODO
@@ -0,0 +1,8 @@
+- the kpc_spi driver doesn't seem to let multiple transactions (to different instances of the core) happen in parallel...
+- The kpc_i2c driver is a hot mess, it should probably be cleaned up a ton. It functions against current hardware though.
+- pcard->card_num in kp2000_pcie_probe() is a global variable and needs atomic / locking / something better.
+- probe_core_uio() probably needs error handling
+- the loop in kp2000_probe_cores() that uses probe_core_uio() also probably needs error handling
+- would be nice if the AIO fileops in kpc_dma could be made to work
+ - probably want to add a CONFIG_ option to control compilation of the AIO functions
+- if the AIO fileops in kpc_dma start working, next would be making iov_count > 1 work too
diff --git a/drivers/staging/kpc2000/kpc.h b/drivers/staging/kpc2000/kpc.h
new file mode 100644
index 000000000000..a3fc9c9221aa
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef KPC_H_
+#define KPC_H_
+
+/* ***** Driver Names ***** */
+#define KP_DRIVER_NAME_KP2000 "kp2000"
+#define KP_DRIVER_NAME_INVALID "kpc_invalid"
+#define KP_DRIVER_NAME_DMA_CONTROLLER "kpc_nwl_dma"
+#define KP_DRIVER_NAME_UIO "uio_pdrv_genirq"
+#define KP_DRIVER_NAME_I2C "kpc_i2c"
+#define KP_DRIVER_NAME_SPI "kpc_spi"
+
+struct kpc_core_device_platdata {
+ u32 card_id;
+ u32 build_version;
+ u32 hardware_revision;
+ u64 ssid;
+ u64 ddna;
+};
+
+#define PCI_DEVICE_ID_DAKTRONICS_KADOKA_P2KR0 0x4b03
+
+#endif /* KPC_H_ */
diff --git a/drivers/staging/kpc2000/kpc2000/Makefile b/drivers/staging/kpc2000/kpc2000/Makefile
new file mode 100644
index 000000000000..28ab1e185f9f
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc2000/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-m := kpc2000.o
+kpc2000-objs += kp2000_module.o core.o cell_probe.o fileops.o
diff --git a/drivers/staging/kpc2000/kpc2000/cell_probe.c b/drivers/staging/kpc2000/kpc2000/cell_probe.c
new file mode 100644
index 000000000000..e0dba91e7fa8
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc2000/cell_probe.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/uio_driver.h>
+#include "pcie.h"
+
+/* Core (Resource) Table Layout:
+ * one Resource per record (8 bytes)
+ * 6 5 4 3 2 1 0
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * IIIIIIIIIIII Core Type [up to 4095 types]
+ * D S2C DMA Present
+ * DDD S2C DMA Channel Number [up to 8 channels]
+ * LLLLLLLLLLLLLLLL Register Count (64-bit registers) [up to 65535 registers]
+ * OOOOOOOOOOOOOOOO Core Offset (in 4kB blocks) [up to 65535 cores]
+ * D C2S DMA Present
+ * DDD C2S DMA Channel Number [up to 8 channels]
+ * II IRQ Count [0 to 3 IRQs per core]
+ 1111111000
+ * IIIIIII IRQ Base Number [up to 128 IRQs per card]
+ * ___ Spare
+ *
+ */
+
+#define KPC_OLD_DMA_CH_NUM(present, channel) ((present) ? (0x8 | ((channel) & 0x7)) : 0)
+#define KPC_OLD_S2C_DMA_CH_NUM(cte) KPC_OLD_DMA_CH_NUM(cte.s2c_dma_present, cte.s2c_dma_channel_num)
+#define KPC_OLD_C2S_DMA_CH_NUM(cte) KPC_OLD_DMA_CH_NUM(cte.c2s_dma_present, cte.c2s_dma_channel_num)
+
+#define KP_CORE_ID_INVALID 0
+#define KP_CORE_ID_I2C 3
+#define KP_CORE_ID_SPI 5
+
+struct core_table_entry {
+ u16 type;
+ u32 offset;
+ u32 length;
+ bool s2c_dma_present;
+ u8 s2c_dma_channel_num;
+ bool c2s_dma_present;
+ u8 c2s_dma_channel_num;
+ u8 irq_count;
+ u8 irq_base_num;
+};
+
+static
+void parse_core_table_entry_v0(struct core_table_entry *cte, const u64 read_val)
+{
+ cte->type = ((read_val & 0xFFF0000000000000) >> 52);
+ cte->offset = ((read_val & 0x00000000FFFF0000) >> 16) * 4096;
+ cte->length = ((read_val & 0x0000FFFF00000000) >> 32) * 8;
+ cte->s2c_dma_present = ((read_val & 0x0008000000000000) >> 51);
+ cte->s2c_dma_channel_num = ((read_val & 0x0007000000000000) >> 48);
+ cte->c2s_dma_present = ((read_val & 0x0000000000008000) >> 15);
+ cte->c2s_dma_channel_num = ((read_val & 0x0000000000007000) >> 12);
+ cte->irq_count = ((read_val & 0x0000000000000C00) >> 10);
+ cte->irq_base_num = ((read_val & 0x00000000000003F8) >> 3);
+}
+
+static
+void dbg_cte(struct kp2000_device *pcard, struct core_table_entry *cte)
+{
+ dev_dbg(&pcard->pdev->dev, "CTE: type:%3d offset:%3d (%3d) length:%3d (%3d) s2c:%d c2s:%d irq_count:%d base_irq:%d\n",
+ cte->type,
+ cte->offset,
+ cte->offset / 4096,
+ cte->length,
+ cte->length / 8,
+ (cte->s2c_dma_present ? cte->s2c_dma_channel_num : -1),
+ (cte->c2s_dma_present ? cte->c2s_dma_channel_num : -1),
+ cte->irq_count,
+ cte->irq_base_num
+ );
+}
+
+static
+void parse_core_table_entry(struct core_table_entry *cte, const u64 read_val, const u8 entry_rev)
+{
+ switch (entry_rev) {
+ case 0: parse_core_table_entry_v0(cte, read_val); break;
+ default: cte->type = 0; break;
+ }
+}
+
+
+int probe_core_basic(unsigned int core_num, struct kp2000_device *pcard, char *name, const struct core_table_entry cte)
+{
+ struct mfd_cell cell = {0};
+ struct resource resources[2];
+
+ struct kpc_core_device_platdata core_pdata = {
+ .card_id = pcard->card_id,
+ .build_version = pcard->build_version,
+ .hardware_revision = pcard->hardware_revision,
+ .ssid = pcard->ssid,
+ .ddna = pcard->ddna,
+ };
+
+ dev_dbg(&pcard->pdev->dev, "Found Basic core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n", cte.type, KPC_OLD_S2C_DMA_CH_NUM(cte), KPC_OLD_C2S_DMA_CH_NUM(cte), cte.offset, cte.length, cte.length / 8);
+
+
+ cell.platform_data = &core_pdata;
+ cell.pdata_size = sizeof(struct kpc_core_device_platdata);
+ cell.name = name;
+ cell.id = core_num;
+ cell.num_resources = 2;
+
+ memset(&resources, 0, sizeof(resources));
+
+ resources[0].start = cte.offset;
+ resources[0].end = cte.offset + (cte.length - 1);
+ resources[0].flags = IORESOURCE_MEM;
+
+ resources[1].start = pcard->pdev->irq;
+ resources[1].end = pcard->pdev->irq;
+ resources[1].flags = IORESOURCE_IRQ;
+
+ cell.resources = resources;
+
+ return mfd_add_devices(
+ PCARD_TO_DEV(pcard), // parent
+ pcard->card_num * 100, // id
+ &cell, // struct mfd_cell *
+ 1, // ndevs
+ &pcard->regs_base_resource,
+ 0, // irq_base
+ NULL // struct irq_domain *
+ );
+}
+
+
+struct kpc_uio_device {
+ struct list_head list;
+ struct kp2000_device *pcard;
+ struct device *dev;
+ struct uio_info uioinfo;
+ struct core_table_entry cte;
+ u16 core_num;
+};
+
+static ssize_t show_attr(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct kpc_uio_device *kudev = dev_get_drvdata(dev);
+
+ #define ATTR_NAME_CMP(v) (strcmp(v, attr->attr.name) == 0)
+ if ATTR_NAME_CMP("offset"){
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kudev->cte.offset);
+ } else if ATTR_NAME_CMP("size"){
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kudev->cte.length);
+ } else if ATTR_NAME_CMP("type"){
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kudev->cte.type);
+ }
+ else if ATTR_NAME_CMP("s2c_dma"){
+ if (kudev->cte.s2c_dma_present){
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kudev->cte.s2c_dma_channel_num);
+ } else {
+ return scnprintf(buf, PAGE_SIZE, "not present\n");
+ }
+ } else if ATTR_NAME_CMP("c2s_dma"){
+ if (kudev->cte.c2s_dma_present){
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kudev->cte.c2s_dma_channel_num);
+ } else {
+ return scnprintf(buf, PAGE_SIZE, "not present\n");
+ }
+ }
+ else if ATTR_NAME_CMP("irq_count"){
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kudev->cte.irq_count);
+ } else if ATTR_NAME_CMP("irq_base_num"){
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kudev->cte.irq_base_num);
+ } else if ATTR_NAME_CMP("core_num"){
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kudev->core_num);
+ } else {
+ return 0;
+ }
+ #undef ATTR_NAME_CMP
+}
+
+
+DEVICE_ATTR(offset, 0444, show_attr, NULL);
+DEVICE_ATTR(size, 0444, show_attr, NULL);
+DEVICE_ATTR(type, 0444, show_attr, NULL);
+DEVICE_ATTR(s2c_dma_ch, 0444, show_attr, NULL);
+DEVICE_ATTR(c2s_dma_ch, 0444, show_attr, NULL);
+DEVICE_ATTR(s2c_dma, 0444, show_attr, NULL);
+DEVICE_ATTR(c2s_dma, 0444, show_attr, NULL);
+DEVICE_ATTR(irq_count, 0444, show_attr, NULL);
+DEVICE_ATTR(irq_base_num, 0444, show_attr, NULL);
+DEVICE_ATTR(core_num, 0444, show_attr, NULL);
+struct attribute * kpc_uio_class_attrs[] = {
+ &dev_attr_offset.attr,
+ &dev_attr_size.attr,
+ &dev_attr_type.attr,
+ &dev_attr_s2c_dma_ch.attr,
+ &dev_attr_c2s_dma_ch.attr,
+ &dev_attr_s2c_dma.attr,
+ &dev_attr_c2s_dma.attr,
+ &dev_attr_irq_count.attr,
+ &dev_attr_irq_base_num.attr,
+ &dev_attr_core_num.attr,
+ NULL,
+};
+
+
+static
+int kp2000_check_uio_irq(struct kp2000_device *pcard, u32 irq_num)
+{
+ u64 interrupt_active = readq(pcard->sysinfo_regs_base + REG_INTERRUPT_ACTIVE);
+ u64 interrupt_mask_inv = ~readq(pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
+ u64 irq_check_mask = (1 << irq_num);
+ if (interrupt_active & irq_check_mask){ // if it's active (interrupt pending)
+ if (interrupt_mask_inv & irq_check_mask){ // and if it's not masked off
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static
+irqreturn_t kuio_handler(int irq, struct uio_info *uioinfo)
+{
+ struct kpc_uio_device *kudev = uioinfo->priv;
+ if (irq != kudev->pcard->pdev->irq)
+ return IRQ_NONE;
+
+ if (kp2000_check_uio_irq(kudev->pcard, kudev->cte.irq_base_num)){
+ writeq((1 << kudev->cte.irq_base_num), kudev->pcard->sysinfo_regs_base + REG_INTERRUPT_ACTIVE); // Clear the active flag
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static
+int kuio_irqcontrol(struct uio_info *uioinfo, s32 irq_on)
+{
+ struct kpc_uio_device *kudev = uioinfo->priv;
+ struct kp2000_device *pcard = kudev->pcard;
+ u64 mask;
+
+ lock_card(pcard);
+ mask = readq(pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
+ if (irq_on){
+ mask &= ~(1 << (kudev->cte.irq_base_num));
+ } else {
+ mask |= (1 << (kudev->cte.irq_base_num));
+ }
+ writeq(mask, pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
+ unlock_card(pcard);
+
+ return 0;
+}
+
+int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard, char *name, const struct core_table_entry cte)
+{
+ struct kpc_uio_device *kudev;
+ int rv;
+
+ dev_dbg(&pcard->pdev->dev, "Found UIO core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n", cte.type, KPC_OLD_S2C_DMA_CH_NUM(cte), KPC_OLD_C2S_DMA_CH_NUM(cte), cte.offset, cte.length, cte.length / 8);
+
+ kudev = kzalloc(sizeof(struct kpc_uio_device), GFP_KERNEL);
+ if (!kudev){
+ dev_err(&pcard->pdev->dev, "probe_core_uio: failed to kzalloc kpc_uio_device\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&kudev->list);
+ kudev->pcard = pcard;
+ kudev->cte = cte;
+ kudev->core_num = core_num;
+
+ kudev->uioinfo.priv = kudev;
+ kudev->uioinfo.name = name;
+ kudev->uioinfo.version = "0.0";
+ if (cte.irq_count > 0){
+ kudev->uioinfo.irq_flags = IRQF_SHARED;
+ kudev->uioinfo.irq = pcard->pdev->irq;
+ kudev->uioinfo.handler = kuio_handler;
+ kudev->uioinfo.irqcontrol = kuio_irqcontrol;
+ } else {
+ kudev->uioinfo.irq = 0;
+ }
+
+ kudev->uioinfo.mem[0].name = "uiomap";
+ kudev->uioinfo.mem[0].addr = pci_resource_start(pcard->pdev, REG_BAR) + cte.offset;
+ kudev->uioinfo.mem[0].size = (cte.length + PAGE_SIZE-1) & ~(PAGE_SIZE-1); // Round up to nearest PAGE_SIZE boundary
+ kudev->uioinfo.mem[0].memtype = UIO_MEM_PHYS;
+
+ kudev->dev = device_create(kpc_uio_class, &pcard->pdev->dev, MKDEV(0,0), kudev, "%s.%d.%d.%d", kudev->uioinfo.name, pcard->card_num, cte.type, kudev->core_num);
+ if (IS_ERR(kudev->dev)) {
+ dev_err(&pcard->pdev->dev, "probe_core_uio device_create failed!\n");
+ return -ENODEV;
+ }
+ dev_set_drvdata(kudev->dev, kudev);
+
+ rv = uio_register_device(kudev->dev, &kudev->uioinfo);
+ if (rv){
+ dev_err(&pcard->pdev->dev, "probe_core_uio failed uio_register_device: %d\n", rv);
+ return rv;
+ }
+
+ list_add_tail(&kudev->list, &pcard->uio_devices_list);
+
+ return 0;
+}
+
+
+static int create_dma_engine_core(struct kp2000_device *pcard, size_t engine_regs_offset, int engine_num, int irq_num)
+{
+ struct mfd_cell cell = {0};
+ struct resource resources[2];
+
+ dev_dbg(&pcard->pdev->dev, "create_dma_core(pcard = [%p], engine_regs_offset = %zx, engine_num = %d)\n", pcard, engine_regs_offset, engine_num);
+
+ cell.platform_data = NULL;
+ cell.pdata_size = 0;
+ cell.id = engine_num;
+ cell.name = KP_DRIVER_NAME_DMA_CONTROLLER;
+ cell.num_resources = 2;
+
+ memset(&resources, 0, sizeof(resources));
+
+ resources[0].start = engine_regs_offset;
+ resources[0].end = engine_regs_offset + (KPC_DMA_ENGINE_SIZE - 1);
+ resources[0].flags = IORESOURCE_MEM;
+
+ resources[1].start = irq_num;
+ resources[1].end = irq_num;
+ resources[1].flags = IORESOURCE_IRQ;
+
+ cell.resources = resources;
+
+ return mfd_add_devices(
+ PCARD_TO_DEV(pcard), // parent
+ pcard->card_num * 100, // id
+ &cell, // struct mfd_cell *
+ 1, // ndevs
+ &pcard->dma_base_resource,
+ 0, // irq_base
+ NULL // struct irq_domain *
+ );
+}
+
+static int kp2000_setup_dma_controller(struct kp2000_device *pcard)
+{
+ int err;
+ unsigned int i;
+ u64 capabilities_reg;
+
+ // S2C Engines
+ for (i = 0 ; i < 32 ; i++){
+ capabilities_reg = readq( pcard->dma_bar_base + KPC_DMA_S2C_BASE_OFFSET + (KPC_DMA_ENGINE_SIZE * i) );
+ if (capabilities_reg & ENGINE_CAP_PRESENT_MASK){
+ err = create_dma_engine_core(pcard, (KPC_DMA_S2C_BASE_OFFSET + (KPC_DMA_ENGINE_SIZE * i)), i, pcard->pdev->irq);
+ if (err) goto err_out;
+ }
+ }
+ // C2S Engines
+ for (i = 0 ; i < 32 ; i++){
+ capabilities_reg = readq( pcard->dma_bar_base + KPC_DMA_C2S_BASE_OFFSET + (KPC_DMA_ENGINE_SIZE * i) );
+ if (capabilities_reg & ENGINE_CAP_PRESENT_MASK){
+ err = create_dma_engine_core(pcard, (KPC_DMA_C2S_BASE_OFFSET + (KPC_DMA_ENGINE_SIZE * i)), 32+i, pcard->pdev->irq);
+ if (err) goto err_out;
+ }
+ }
+
+ return 0;
+
+err_out:
+ dev_err(&pcard->pdev->dev, "kp2000_setup_dma_controller: failed to add a DMA Engine: %d\n", err);
+ return err;
+}
+
+int kp2000_probe_cores(struct kp2000_device *pcard)
+{
+ int err = 0;
+ int i;
+ int current_type_id;
+ u64 read_val;
+ unsigned int highest_core_id = 0;
+ struct core_table_entry cte;
+
+ dev_dbg(&pcard->pdev->dev, "kp2000_probe_cores(pcard = %p / %d)\n", pcard, pcard->card_num);
+
+ err = kp2000_setup_dma_controller(pcard);
+ if (err) return err;
+
+ INIT_LIST_HEAD(&pcard->uio_devices_list);
+
+ // First, iterate the core table looking for the highest CORE_ID
+ for (i = 0 ; i < pcard->core_table_length ; i++){
+ read_val = readq(pcard->sysinfo_regs_base + ((pcard->core_table_offset + i) * 8));
+ parse_core_table_entry(&cte, read_val, pcard->core_table_rev);
+ dbg_cte(pcard, &cte);
+ if (cte.type > highest_core_id){
+ highest_core_id = cte.type;
+ }
+ if (cte.type == KP_CORE_ID_INVALID){
+ dev_info(&pcard->pdev->dev, "Found Invalid core: %016llx\n", read_val);
+ }
+ }
+ // Then, iterate over the possible core types.
+ for (current_type_id = 1 ; current_type_id <= highest_core_id ; current_type_id++){
+ unsigned int core_num = 0;
+ // Foreach core type, iterate the whole table and instantiate subdevices for each core.
+ // Yes, this is O(n*m) but the actual runtime is small enough that it's an acceptable tradeoff.
+ for (i = 0 ; i < pcard->core_table_length ; i++){
+ read_val = readq(pcard->sysinfo_regs_base + ((pcard->core_table_offset + i) * 8));
+ parse_core_table_entry(&cte, read_val, pcard->core_table_rev);
+
+ if (cte.type == current_type_id){
+ switch (cte.type){
+ case KP_CORE_ID_I2C:
+ err = probe_core_basic(core_num, pcard, KP_DRIVER_NAME_I2C, cte);
+ break;
+
+ case KP_CORE_ID_SPI:
+ err = probe_core_basic(core_num, pcard, KP_DRIVER_NAME_SPI, cte);
+ break;
+
+ default:
+ err = probe_core_uio(core_num, pcard, "kpc_uio", cte);
+ break;
+ }
+ if (err){
+ dev_err(&pcard->pdev->dev, "kp2000_probe_cores: failed to add core %d: %d\n", i, err);
+ return err;
+ }
+ core_num++;
+ }
+ }
+ }
+
+ // Finally, instantiate a UIO device for the core_table.
+ cte.type = 0; // CORE_ID_BOARD_INFO
+ cte.offset = 0; // board info is always at the beginning
+ cte.length = 512*8;
+ cte.s2c_dma_present = false;
+ cte.s2c_dma_channel_num = 0;
+ cte.c2s_dma_present = false;
+ cte.c2s_dma_channel_num = 0;
+ cte.irq_count = 0;
+ cte.irq_base_num = 0;
+ err = probe_core_uio(0, pcard, "kpc_uio", cte);
+ if (err){
+ dev_err(&pcard->pdev->dev, "kp2000_probe_cores: failed to add board_info core: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void kp2000_remove_cores(struct kp2000_device *pcard)
+{
+ struct list_head *ptr;
+ struct list_head *next;
+ list_for_each_safe(ptr, next, &pcard->uio_devices_list){
+ struct kpc_uio_device *kudev = list_entry(ptr, struct kpc_uio_device, list);
+ uio_unregister_device(&kudev->uioinfo);
+ device_unregister(kudev->dev);
+ list_del(&kudev->list);
+ kfree(kudev);
+ }
+}
+
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
new file mode 100644
index 000000000000..40390cdd3c8d
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc2000/core.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include "pcie.h"
+
+
+/*******************************************************
+ * SysFS Attributes
+ ******************************************************/
+static ssize_t show_attr(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct kp2000_device *pcard;
+
+ if (!pdev) return -ENXIO;
+ pcard = pci_get_drvdata(pdev);
+ if (!pcard) return -ENXIO;
+
+ if (strcmp("ssid", attr->attr.name) == 0){ return scnprintf(buf, PAGE_SIZE, "%016llx\n", pcard->ssid); } else
+ if (strcmp("ddna", attr->attr.name) == 0){ return scnprintf(buf, PAGE_SIZE, "%016llx\n", pcard->ddna); } else
+ if (strcmp("card_id", attr->attr.name) == 0){ return scnprintf(buf, PAGE_SIZE, "%08x\n", pcard->card_id); } else
+ if (strcmp("hw_rev", attr->attr.name) == 0){ return scnprintf(buf, PAGE_SIZE, "%08x\n", pcard->hardware_revision); } else
+ if (strcmp("build", attr->attr.name) == 0){ return scnprintf(buf, PAGE_SIZE, "%08x\n", pcard->build_version); } else
+ if (strcmp("build_date", attr->attr.name) == 0){ return scnprintf(buf, PAGE_SIZE, "%08x\n", pcard->build_datestamp); } else
+ if (strcmp("build_time", attr->attr.name) == 0){ return scnprintf(buf, PAGE_SIZE, "%08x\n", pcard->build_timestamp); } else
+ { return -ENXIO; }
+}
+
+static ssize_t show_cpld_config_reg(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct kp2000_device *pcard;
+ u64 val;
+
+ if (!pdev)
+ return -ENXIO;
+
+ pcard = pci_get_drvdata(pdev);
+ if (!pcard)
+ return -ENXIO;
+
+ val = readq(pcard->sysinfo_regs_base + REG_CPLD_CONFIG);
+ return scnprintf(buf, PAGE_SIZE, "%016llx\n", val);
+}
+static ssize_t cpld_reconfigure(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ long wr_val;
+ struct kp2000_device *pcard;
+ int rv;
+
+ if (!pdev) return -ENXIO;
+ pcard = pci_get_drvdata(pdev);
+ if (!pcard) return -ENXIO;
+
+ rv = kstrtol(buf, 0, &wr_val);
+ if (rv < 0) return rv;
+ if (wr_val > 7) return -EINVAL;
+
+ wr_val = wr_val << 8;
+ wr_val |= 0x1; // Set the "Configure Go" bit
+ writeq(wr_val, pcard->sysinfo_regs_base + REG_CPLD_CONFIG);
+ return count;
+}
+
+
+DEVICE_ATTR(ssid, 0444, show_attr, NULL);
+DEVICE_ATTR(ddna, 0444, show_attr, NULL);
+DEVICE_ATTR(card_id, 0444, show_attr, NULL);
+DEVICE_ATTR(hw_rev, 0444, show_attr, NULL);
+DEVICE_ATTR(build, 0444, show_attr, NULL);
+DEVICE_ATTR(build_date, 0444, show_attr, NULL);
+DEVICE_ATTR(build_time, 0444, show_attr, NULL);
+DEVICE_ATTR(cpld_reg, 0444, show_cpld_config_reg, NULL);
+DEVICE_ATTR(cpld_reconfigure, 0220, NULL, cpld_reconfigure);
+
+static const struct attribute * kp_attr_list[] = {
+ &dev_attr_ssid.attr,
+ &dev_attr_ddna.attr,
+ &dev_attr_card_id.attr,
+ &dev_attr_hw_rev.attr,
+ &dev_attr_build.attr,
+ &dev_attr_build_date.attr,
+ &dev_attr_build_time.attr,
+ &dev_attr_cpld_reg.attr,
+ &dev_attr_cpld_reconfigure.attr,
+ NULL,
+};
+
+
+/*******************************************************
+ * Functions
+ ******************************************************/
+
+static void wait_and_read_ssid(struct kp2000_device *pcard)
+{
+ u64 read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_SSID);
+ unsigned long timeout;
+
+ if (read_val & 0x8000000000000000){
+ pcard->ssid = read_val;
+ return;
+ }
+
+ timeout = jiffies + (HZ * 2);
+ do {
+ read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_SSID);
+ if (read_val & 0x8000000000000000){
+ pcard->ssid = read_val;
+ return;
+ }
+ cpu_relax();
+ //schedule();
+ } while (time_before(jiffies, timeout));
+
+ dev_notice(&pcard->pdev->dev, "SSID didn't show up!\n");
+
+ #if 0
+ // Timed out waiting for the SSID to show up, just use the DDNA instead?
+ read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_DDNA);
+ pcard->ssid = read_val;
+ #else
+ // Timed out waiting for the SSID to show up, stick all zeros in the value
+ pcard->ssid = 0;
+ #endif
+}
+
+static int read_system_regs(struct kp2000_device *pcard)
+{
+ u64 read_val;
+
+ read_val = readq(pcard->sysinfo_regs_base + REG_MAGIC_NUMBER);
+ if (read_val != KP2000_MAGIC_VALUE){
+ dev_err(&pcard->pdev->dev, "Invalid magic! Got: 0x%016llx Want: 0x%016lx\n", read_val, KP2000_MAGIC_VALUE);
+ return -EILSEQ;
+ }
+
+ read_val = readq(pcard->sysinfo_regs_base + REG_CARD_ID_AND_BUILD);
+ pcard->card_id = (read_val & 0xFFFFFFFF00000000) >> 32;
+ pcard->build_version = (read_val & 0x00000000FFFFFFFF) >> 0;
+
+ read_val = readq(pcard->sysinfo_regs_base + REG_DATE_AND_TIME_STAMPS);
+ pcard->build_datestamp = (read_val & 0xFFFFFFFF00000000) >> 32;
+ pcard->build_timestamp = (read_val & 0x00000000FFFFFFFF) >> 0;
+
+ read_val = readq(pcard->sysinfo_regs_base + REG_CORE_TABLE_OFFSET);
+ pcard->core_table_length = (read_val & 0xFFFFFFFF00000000) >> 32;
+ pcard->core_table_offset = (read_val & 0x00000000FFFFFFFF) >> 0;
+
+ wait_and_read_ssid(pcard);
+
+ read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_HW_ID);
+ pcard->core_table_rev = (read_val & 0x0000000000000F00) >> 8;
+ pcard->hardware_revision = (read_val & 0x000000000000001F);
+
+ read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_DDNA);
+ pcard->ddna = read_val;
+
+ dev_info(&pcard->pdev->dev, "system_regs: %08x %08x %08x %08x %02x %d %d %016llx %016llx\n",
+ pcard->card_id,
+ pcard->build_version,
+ pcard->build_datestamp,
+ pcard->build_timestamp,
+ pcard->hardware_revision,
+ pcard->core_table_rev,
+ pcard->core_table_length,
+ pcard->ssid,
+ pcard->ddna
+ );
+
+ if (pcard->core_table_rev > 1){
+ dev_err(&pcard->pdev->dev, "core table entry revision is higher than we can deal with, cannot continue with this card!\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+irqreturn_t kp2000_irq_handler(int irq, void *dev_id)
+{
+ struct kp2000_device *pcard = (struct kp2000_device*)dev_id;
+ SetBackEndControl(pcard->dma_common_regs, KPC_DMA_CARD_IRQ_ENABLE | KPC_DMA_CARD_USER_INTERRUPT_MODE | KPC_DMA_CARD_USER_INTERRUPT_ACTIVE);
+ return IRQ_HANDLED;
+}
+
+int kp2000_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int err = 0;
+ struct kp2000_device *pcard;
+ static int card_count = 1;
+ int rv;
+ unsigned long reg_bar_phys_addr;
+ unsigned long reg_bar_phys_len;
+ unsigned long dma_bar_phys_addr;
+ unsigned long dma_bar_phys_len;
+ u16 regval;
+
+ dev_dbg(&pdev->dev, "kp2000_pcie_probe(pdev = [%p], id = [%p])\n", pdev, id);
+
+ //{ Step 1: Allocate a struct for the pcard
+ pcard = kzalloc(sizeof(struct kp2000_device), GFP_KERNEL);
+ if (NULL == pcard){
+ dev_err(&pdev->dev, "probe: failed to allocate private card data\n");
+ return -ENOMEM;
+ }
+ dev_dbg(&pdev->dev, "probe: allocated struct kp2000_device @ %p\n", pcard);
+ //}
+
+ //{ Step 2: Initialize trivial pcard elements
+ pcard->card_num = card_count;
+ card_count++;
+ scnprintf(pcard->name, 16, "kpcard%d", pcard->card_num);
+
+ mutex_init(&pcard->sem);
+ lock_card(pcard);
+
+ pcard->pdev = pdev;
+ pci_set_drvdata(pdev, pcard);
+ //}
+
+ //{ Step 3: Enable PCI device
+ err = pci_enable_device(pcard->pdev);
+ if (err){
+ dev_err(&pcard->pdev->dev, "probe: failed to enable PCIE2000 PCIe device (%d)\n", err);
+ goto out3;
+ }
+ //}
+
+ //{ Step 4: Setup the Register BAR
+ reg_bar_phys_addr = pci_resource_start(pcard->pdev, REG_BAR);
+ reg_bar_phys_len = pci_resource_len(pcard->pdev, REG_BAR);
+
+ pcard->regs_bar_base = ioremap_nocache(reg_bar_phys_addr, PAGE_SIZE);
+ if (NULL == pcard->regs_bar_base){
+ dev_err(&pcard->pdev->dev, "probe: REG_BAR could not remap memory to virtual space\n");
+ err = -ENODEV;
+ goto out4;
+ }
+ dev_dbg(&pcard->pdev->dev, "probe: REG_BAR virt hardware address start [%p]\n", pcard->regs_bar_base);
+
+ err = pci_request_region(pcard->pdev, REG_BAR, KP_DRIVER_NAME_KP2000);
+ if (err){
+ iounmap(pcard->regs_bar_base);
+ dev_err(&pcard->pdev->dev, "probe: failed to acquire PCI region (%d)\n", err);
+ err = -ENODEV;
+ goto out4;
+ }
+
+ pcard->regs_base_resource.start = reg_bar_phys_addr;
+ pcard->regs_base_resource.end = reg_bar_phys_addr + reg_bar_phys_len - 1;
+ pcard->regs_base_resource.flags = IORESOURCE_MEM;
+ //}
+
+ //{ Step 5: Setup the DMA BAR
+ dma_bar_phys_addr = pci_resource_start(pcard->pdev, DMA_BAR);
+ dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR);
+
+ pcard->dma_bar_base = ioremap_nocache(dma_bar_phys_addr, dma_bar_phys_len);
+ if (NULL == pcard->dma_bar_base){
+ dev_err(&pcard->pdev->dev, "probe: DMA_BAR could not remap memory to virtual space\n");
+ err = -ENODEV;
+ goto out5;
+ }
+ dev_dbg(&pcard->pdev->dev, "probe: DMA_BAR virt hardware address start [%p]\n", pcard->dma_bar_base);
+
+ pcard->dma_common_regs = pcard->dma_bar_base + KPC_DMA_COMMON_OFFSET;
+
+ err = pci_request_region(pcard->pdev, DMA_BAR, "kp2000_pcie");
+ if (err){
+ iounmap(pcard->dma_bar_base);
+ dev_err(&pcard->pdev->dev, "probe: failed to acquire PCI region (%d)\n", err);
+ err = -ENODEV;
+ goto out5;
+ }
+
+ pcard->dma_base_resource.start = dma_bar_phys_addr;
+ pcard->dma_base_resource.end = dma_bar_phys_addr + dma_bar_phys_len - 1;
+ pcard->dma_base_resource.flags = IORESOURCE_MEM;
+ //}
+
+ //{ Step 6: System Regs
+ pcard->sysinfo_regs_base = pcard->regs_bar_base;
+ err = read_system_regs(pcard);
+ if (err)
+ goto out6;
+
+ // Disable all "user" interrupts because they're not used yet.
+ writeq(0xFFFFFFFFFFFFFFFF, pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
+ //}
+
+ //{ Step 7: Configure PCI thingies
+ // let the card master PCIe
+ pci_set_master(pcard->pdev);
+ // enable IO and mem if not already done
+ pci_read_config_word(pcard->pdev, PCI_COMMAND, &regval);
+ regval |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pcard->pdev, PCI_COMMAND, regval);
+
+ // Clear relaxed ordering bit
+ pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN, 0);
+
+ // Set Max_Payload_Size and Max_Read_Request_Size
+ regval = (0x0) << 5; // Max_Payload_Size = 128 B
+ pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_PAYLOAD, regval);
+ regval = (0x0) << 12; // Max_Read_Request_Size = 128 B
+ pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, regval);
+
+ // Enable error reporting for: Correctable Errors, Non-Fatal Errors, Fatal Errors, Unsupported Requests
+ pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL, 0, PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
+
+ err = dma_set_mask(PCARD_TO_DEV(pcard), DMA_BIT_MASK(64));
+ if (err){
+ dev_err(&pcard->pdev->dev, "CANNOT use DMA mask %0llx\n", DMA_BIT_MASK(64));
+ goto out7;
+ }
+ dev_dbg(&pcard->pdev->dev, "Using DMA mask %0llx\n", dma_get_mask(PCARD_TO_DEV(pcard)));
+ //}
+
+ //{ Step 8: Configure IRQs
+ err = pci_enable_msi(pcard->pdev);
+ if (err < 0)
+ goto out8a;
+
+ rv = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED, pcard->name, pcard);
+ if (rv){
+ dev_err(&pcard->pdev->dev, "kp2000_pcie_probe: failed to request_irq: %d\n", rv);
+ goto out8b;
+ }
+ //}
+
+ //{ Step 9: Setup sysfs attributes
+ err = sysfs_create_files(&(pdev->dev.kobj), kp_attr_list);
+ if (err){
+ dev_err(&pdev->dev, "Failed to add sysfs files: %d\n", err);
+ goto out9;
+ }
+ //}
+
+ //{ Step 10: Setup misc device
+ pcard->miscdev.minor = MISC_DYNAMIC_MINOR;
+ pcard->miscdev.fops = &kp2000_fops;
+ pcard->miscdev.parent = &pcard->pdev->dev;
+ pcard->miscdev.name = pcard->name;
+
+ err = misc_register(&pcard->miscdev);
+ if (err){
+ dev_err(&pcard->pdev->dev, "kp2000_pcie_probe: misc_register failed: %d\n", err);
+ goto out10;
+ }
+ //}
+
+ //{ Step 11: Probe cores
+ err = kp2000_probe_cores(pcard);
+ if (err)
+ goto out11;
+ //}
+
+ //{ Step 12: Enable IRQs in HW
+ SetBackEndControl(pcard->dma_common_regs, KPC_DMA_CARD_IRQ_ENABLE | KPC_DMA_CARD_USER_INTERRUPT_MODE);
+ //}
+
+ dev_dbg(&pcard->pdev->dev, "kp2000_pcie_probe() complete!\n");
+ unlock_card(pcard);
+ return 0;
+
+ out11:
+ misc_deregister(&pcard->miscdev);
+ out10:
+ sysfs_remove_files(&(pdev->dev.kobj), kp_attr_list);
+ out9:
+ free_irq(pcard->pdev->irq, pcard);
+ out8b:
+ pci_disable_msi(pcard->pdev);
+ out8a:
+ out7:
+ out6:
+ iounmap(pcard->dma_bar_base);
+ pci_release_region(pdev, DMA_BAR);
+ pcard->dma_bar_base = NULL;
+ out5:
+ iounmap(pcard->regs_bar_base);
+ pci_release_region(pdev, REG_BAR);
+ pcard->regs_bar_base = NULL;
+ out4:
+ pci_disable_device(pcard->pdev);
+ out3:
+ unlock_card(pcard);
+ kfree(pcard);
+ return err;
+}
+
+
+void kp2000_pcie_remove(struct pci_dev *pdev)
+{
+ struct kp2000_device *pcard = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "kp2000_pcie_remove(pdev=%p)\n", pdev);
+
+ if (pcard == NULL) return;
+
+ lock_card(pcard);
+ kp2000_remove_cores(pcard);
+ mfd_remove_devices(PCARD_TO_DEV(pcard));
+ misc_deregister(&pcard->miscdev);
+ sysfs_remove_files(&(pdev->dev.kobj), kp_attr_list);
+ free_irq(pcard->pdev->irq, pcard);
+ pci_disable_msi(pcard->pdev);
+ if (pcard->dma_bar_base != NULL){
+ iounmap(pcard->dma_bar_base);
+ pci_release_region(pdev, DMA_BAR);
+ pcard->dma_bar_base = NULL;
+ }
+ if (pcard->regs_bar_base != NULL){
+ iounmap(pcard->regs_bar_base);
+ pci_release_region(pdev, REG_BAR);
+ pcard->regs_bar_base = NULL;
+ }
+ pci_disable_device(pcard->pdev);
+ pci_set_drvdata(pdev, NULL);
+ unlock_card(pcard);
+ kfree(pcard);
+}
diff --git a/drivers/staging/kpc2000/kpc2000/dma_common_defs.h b/drivers/staging/kpc2000/kpc2000/dma_common_defs.h
new file mode 100644
index 000000000000..f35e636b1fb7
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc2000/dma_common_defs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef KPC_DMA_COMMON_DEFS_H_
+#define KPC_DMA_COMMON_DEFS_H_
+
+#define KPC_DMA_COMMON_OFFSET 0x4000
+#define KPC_DMA_S2C_BASE_OFFSET 0x0000
+#define KPC_DMA_C2S_BASE_OFFSET 0x2000
+#define KPC_DMA_ENGINE_SIZE 0x0100
+#define ENGINE_CAP_PRESENT_MASK 0x1
+
+
+#define KPC_DMA_CARD_IRQ_ENABLE (1 << 0)
+#define KPC_DMA_CARD_IRQ_ACTIVE (1 << 1)
+#define KPC_DMA_CARD_IRQ_PENDING (1 << 2)
+#define KPC_DMA_CARD_IRQ_MSI (1 << 3)
+#define KPC_DMA_CARD_USER_INTERRUPT_MODE (1 << 4)
+#define KPC_DMA_CARD_USER_INTERRUPT_ACTIVE (1 << 5)
+#define KPC_DMA_CARD_IRQ_MSIX_MODE (1 << 6)
+#define KPC_DMA_CARD_MAX_PAYLOAD_SIZE_MASK 0x0700
+#define KPC_DMA_CARD_MAX_READ_REQUEST_SIZE_MASK 0x7000
+#define KPC_DMA_CARD_S2C_INTERRUPT_STATUS_MASK 0x00FF0000
+#define KPC_DMA_CARD_C2S_INTERRUPT_STATUS_MASK 0xFF000000
+
+static inline void SetBackEndControl(void __iomem *regs, u32 value)
+{
+ writel(value, regs + 0);
+}
+static inline u32 GetBackEndStatus(void __iomem *regs)
+{
+ return readl(regs + 0);
+}
+
+static inline u32 BackEndControlSetClear(void __iomem *regs, u32 set_bits, u32 clear_bits)
+{
+ u32 start_val = GetBackEndStatus(regs);
+ u32 new_val = start_val;
+ new_val &= ~clear_bits;
+ new_val |= set_bits;
+ SetBackEndControl(regs, new_val);
+ return start_val;
+}
+
+#endif /* KPC_DMA_COMMON_DEFS_H_ */
diff --git a/drivers/staging/kpc2000/kpc2000/fileops.c b/drivers/staging/kpc2000/kpc2000/fileops.c
new file mode 100644
index 000000000000..b3b0b763fa1e
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc2000/fileops.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/cdev.h>
+#include <linux/uaccess.h> /* copy_*_user */
+#include <linux/rwsem.h>
+#include <linux/idr.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include "pcie.h"
+#include "uapi.h"
+
+int kp2000_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct kp2000_device *pcard = container_of(filp->private_data, struct kp2000_device, miscdev);
+
+ dev_dbg(&pcard->pdev->dev, "kp2000_cdev_open(filp = [%p], pcard = [%p])\n", filp, pcard);
+
+ filp->private_data = pcard; /* so other methods can access it */
+
+ return 0;
+}
+
+int kp2000_cdev_close(struct inode *inode, struct file *filp)
+{
+ struct kp2000_device *pcard = filp->private_data;
+
+ dev_dbg(&pcard->pdev->dev, "kp2000_cdev_close(filp = [%p], pcard = [%p])\n", filp, pcard);
+ return 0;
+}
+
+
+ssize_t kp2000_cdev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct kp2000_device *pcard = filp->private_data;
+ int cnt = 0;
+ int ret;
+#define BUFF_CNT 1024
+ char buff[BUFF_CNT] = {0}; //NOTE: Increase this so it is at least as large as all the scnprintfs. And don't use unbounded strings. "%s"
+ //NOTE: also, this is a really shitty way to implement the read() call, but it will work for any size 'count'.
+
+ if (WARN(NULL == buf, "kp2000_cdev_read: buf is a NULL pointer!\n"))
+ return -EINVAL;
+
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "Card ID : 0x%08x\n", pcard->card_id);
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "Build Version : 0x%08x\n", pcard->build_version);
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "Build Date : 0x%08x\n", pcard->build_datestamp);
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "Build Time : 0x%08x\n", pcard->build_timestamp);
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "Core Table Offset : 0x%08x\n", pcard->core_table_offset);
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "Core Table Length : 0x%08x\n", pcard->core_table_length);
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "Hardware Revision : 0x%08x\n", pcard->hardware_revision);
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "SSID : 0x%016llx\n", pcard->ssid);
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "DDNA : 0x%016llx\n", pcard->ddna);
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "IRQ Mask : 0x%016llx\n", readq(pcard->sysinfo_regs_base + REG_INTERRUPT_MASK));
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "IRQ Active : 0x%016llx\n", readq(pcard->sysinfo_regs_base + REG_INTERRUPT_ACTIVE));
+ cnt += scnprintf(buff+cnt, BUFF_CNT-cnt, "CPLD : 0x%016llx\n", readq(pcard->sysinfo_regs_base + REG_CPLD_CONFIG));
+
+ if (*f_pos >= cnt)
+ return 0;
+
+ if (count > cnt)
+ count = cnt;
+
+ ret = copy_to_user(buf, buff + *f_pos, count);
+ if (ret)
+ return -EFAULT;
+ *f_pos += count;
+ return count;
+}
+
+ssize_t kp2000_cdev_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
+{
+ return -EINVAL;
+}
+
+long kp2000_cdev_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param)
+{
+ struct kp2000_device *pcard = filp->private_data;
+
+ dev_dbg(&pcard->pdev->dev, "kp2000_cdev_ioctl(filp = [%p], ioctl_num = 0x%08x, ioctl_param = 0x%016lx) pcard = [%p]\n", filp, ioctl_num, ioctl_param, pcard);
+
+ switch (ioctl_num){
+ case KP2000_IOCTL_GET_CPLD_REG: return readq(pcard->sysinfo_regs_base + REG_CPLD_CONFIG);
+ case KP2000_IOCTL_GET_PCIE_ERROR_REG: return readq(pcard->sysinfo_regs_base + REG_PCIE_ERROR_COUNT);
+
+ case KP2000_IOCTL_GET_EVERYTHING: {
+ struct kp2000_regs temp;
+ int ret;
+
+ memset(&temp, 0, sizeof(temp));
+ temp.card_id = pcard->card_id;
+ temp.build_version = pcard->build_version;
+ temp.build_datestamp = pcard->build_datestamp;
+ temp.build_timestamp = pcard->build_timestamp;
+ temp.hw_rev = pcard->hardware_revision;
+ temp.ssid = pcard->ssid;
+ temp.ddna = pcard->ddna;
+ temp.cpld_reg = readq(pcard->sysinfo_regs_base + REG_CPLD_CONFIG);
+
+ ret = copy_to_user((void*)ioctl_param, (void*)&temp, sizeof(temp));
+ if (ret)
+ return -EFAULT;
+
+ return sizeof(temp);
+ }
+
+ default:
+ return -ENOTTY;
+ }
+ return -ENOTTY;
+}
+
+
+struct file_operations kp2000_fops = {
+ .owner = THIS_MODULE,
+ .open = kp2000_cdev_open,
+ .release = kp2000_cdev_close,
+ .read = kp2000_cdev_read,
+ //.write = kp2000_cdev_write,
+ //.poll = kp2000_cdev_poll,
+ //.fasync = kp2000_cdev_fasync,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = kp2000_cdev_ioctl,
+};
+
diff --git a/drivers/staging/kpc2000/kpc2000/kp2000_module.c b/drivers/staging/kpc2000/kpc2000/kp2000_module.c
new file mode 100644
index 000000000000..fa3bd266ba54
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc2000/kp2000_module.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include "pcie.h"
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lee.Brooke@Daktronics.com, Matt.Sickler@Daktronics.com");
+MODULE_SOFTDEP("pre: uio post: kpc_nwl_dma kpc_i2c kpc_spi");
+
+struct class *kpc_uio_class;
+ATTRIBUTE_GROUPS(kpc_uio_class);
+
+static const struct pci_device_id kp2000_pci_device_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_DAKTRONICS, PCI_DEVICE_ID_DAKTRONICS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_DAKTRONICS, PCI_DEVICE_ID_DAKTRONICS_KADOKA_P2KR0) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, kp2000_pci_device_ids);
+
+static struct pci_driver kp2000_driver_inst = {
+ .name = "kp2000_pcie",
+ .id_table = kp2000_pci_device_ids,
+ .probe = kp2000_pcie_probe,
+ .remove = kp2000_pcie_remove
+};
+
+
+static int __init kp2000_pcie_init(void)
+{
+ kpc_uio_class = class_create(THIS_MODULE, "kpc_uio");
+ if (IS_ERR(kpc_uio_class))
+ return PTR_ERR(kpc_uio_class);
+
+ kpc_uio_class->dev_groups = kpc_uio_class_groups;
+ return pci_register_driver(&kp2000_driver_inst);
+}
+module_init(kp2000_pcie_init);
+
+static void __exit kp2000_pcie_exit(void)
+{
+ pci_unregister_driver(&kp2000_driver_inst);
+ class_destroy(kpc_uio_class);
+}
+module_exit(kp2000_pcie_exit);
diff --git a/drivers/staging/kpc2000/kpc2000/pcie.h b/drivers/staging/kpc2000/kpc2000/pcie.h
new file mode 100644
index 000000000000..893aebfd1449
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc2000/pcie.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef KP2000_PCIE_H
+#define KP2000_PCIE_H
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include "../kpc.h"
+#include "dma_common_defs.h"
+
+
+/* System Register Map (BAR 1, Start Addr 0)
+ *
+ * BAR Size:
+ * 1048576 (0x100000) bytes = 131072 (0x20000) registers = 256 pages (4K)
+ *
+ * 6 5 4 3 2 1 0
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * 0 <--------------------------- MAGIC ---------------------------->
+ * 1 <----------- Card ID ---------><----------- Revision ---------->
+ * 2 <--------- Date Stamp --------><--------- Time Stamp ---------->
+ * 3 <-------- Core Tbl Len -------><-------- Core Tbl Offset ------>
+ * 4 <---------------------------- SSID ---------------------------->
+ * 5 < HWID >
+ * 6 <------------------------- FPGA DDNA -------------------------->
+ * 7 <------------------------ CPLD Config ------------------------->
+ * 8 <----------------------- IRQ Mask Flags ----------------------->
+ * 9 <---------------------- IRQ Active Flags ---------------------->
+ */
+
+#define REG_WIDTH 8
+#define REG_MAGIC_NUMBER (0 * REG_WIDTH)
+#define REG_CARD_ID_AND_BUILD (1 * REG_WIDTH)
+#define REG_DATE_AND_TIME_STAMPS (2 * REG_WIDTH)
+#define REG_CORE_TABLE_OFFSET (3 * REG_WIDTH)
+#define REG_FPGA_SSID (4 * REG_WIDTH)
+#define REG_FPGA_HW_ID (5 * REG_WIDTH)
+#define REG_FPGA_DDNA (6 * REG_WIDTH)
+#define REG_CPLD_CONFIG (7 * REG_WIDTH)
+#define REG_INTERRUPT_MASK (8 * REG_WIDTH)
+#define REG_INTERRUPT_ACTIVE (9 * REG_WIDTH)
+#define REG_PCIE_ERROR_COUNT (10 * REG_WIDTH)
+
+#define KP2000_MAGIC_VALUE 0x196C61482231894D
+
+#define PCI_VENDOR_ID_DAKTRONICS 0x1c33
+#define PCI_DEVICE_ID_DAKTRONICS 0x6021
+
+#define DMA_BAR 0
+#define REG_BAR 1
+
+struct kp2000_device {
+ struct pci_dev *pdev;
+ struct miscdevice miscdev;
+ char name[16];
+
+ unsigned int card_num;
+ struct mutex sem;
+
+ void __iomem *sysinfo_regs_base;
+ void __iomem *regs_bar_base;
+ struct resource regs_base_resource;
+ void __iomem *dma_bar_base;
+ void __iomem *dma_common_regs;
+ struct resource dma_base_resource;
+
+ // "System Registers"
+ u32 card_id;
+ u32 build_version;
+ u32 build_datestamp;
+ u32 build_timestamp;
+ u32 core_table_offset;
+ u32 core_table_length;
+ u8 core_table_rev;
+ u8 hardware_revision;
+ u64 ssid;
+ u64 ddna;
+
+ // IRQ stuff
+ unsigned int irq;
+
+ struct list_head uio_devices_list;
+};
+
+extern struct class *kpc_uio_class;
+extern struct attribute *kpc_uio_class_attrs[];
+
+int kp2000_pcie_probe(struct pci_dev *dev, const struct pci_device_id *id);
+void kp2000_pcie_remove(struct pci_dev *pdev);
+int kp2000_probe_cores(struct kp2000_device *pcard);
+void kp2000_remove_cores(struct kp2000_device *pcard);
+
+extern struct file_operations kp2000_fops;
+
+
+// Define this quick little macro because the expression is used frequently
+#define PCARD_TO_DEV(pcard) (&(pcard->pdev->dev))
+
+static inline void
+lock_card(struct kp2000_device *pcard)
+{
+ BUG_ON(pcard == NULL);
+ mutex_lock(&pcard->sem);
+}
+static inline void
+unlock_card(struct kp2000_device *pcard)
+{
+ BUG_ON(pcard == NULL);
+ mutex_unlock(&pcard->sem);
+}
+
+
+#endif /* KP2000_PCIE_H */
diff --git a/drivers/staging/kpc2000/kpc2000/uapi.h b/drivers/staging/kpc2000/kpc2000/uapi.h
new file mode 100644
index 000000000000..ef8008bcd33d
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc2000/uapi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef KP2000_CDEV_UAPI_H_
+#define KP2000_CDEV_UAPI_H_
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct kp2000_regs {
+ __u32 card_id;
+ __u32 build_version;
+ __u32 build_datestamp;
+ __u32 build_timestamp;
+ __u32 hw_rev;
+ __u64 ssid;
+ __u64 ddna;
+ __u64 cpld_reg;
+};
+
+#define KP2000_IOCTL_GET_CPLD_REG _IOR('k', 9, __u32)
+#define KP2000_IOCTL_GET_PCIE_ERROR_REG _IOR('k', 11, __u32)
+#define KP2000_IOCTL_GET_EVERYTHING _IOR('k', 8, struct kp2000_regs*)
+
+#endif /* KP2000_CDEV_UAPI_H_ */
diff --git a/drivers/staging/kpc2000/kpc_dma/Makefile b/drivers/staging/kpc2000/kpc_dma/Makefile
new file mode 100644
index 000000000000..fe5db532c8c8
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_dma/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-m := kpc_dma.o
+kpc_dma-objs += dma.o
+kpc_dma-objs += fileops.o
+kpc_dma-objs += kpc_dma_driver.o
diff --git a/drivers/staging/kpc2000/kpc_dma/dma.c b/drivers/staging/kpc2000/kpc_dma/dma.c
new file mode 100644
index 000000000000..6959bac11388
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_dma/dma.c
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/rwsem.h>
+#include "kpc_dma_driver.h"
+
+/********** IRQ Handlers **********/
+static
+irqreturn_t ndd_irq_handler(int irq, void *dev_id)
+{
+ struct kpc_dma_device *ldev = (struct kpc_dma_device*)dev_id;
+
+ if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) || (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev)))
+ schedule_work(&ldev->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static
+void ndd_irq_worker(struct work_struct *ws)
+{
+ struct kpc_dma_descriptor *cur;
+ struct kpc_dma_device *eng = container_of(ws, struct kpc_dma_device, irq_work);
+ lock_engine(eng);
+
+ if (GetEngineCompletePtr(eng) == 0)
+ goto out;
+
+ if (eng->desc_completed->MyDMAAddr == GetEngineCompletePtr(eng))
+ goto out;
+
+ cur = eng->desc_completed;
+ do {
+ cur = cur->Next;
+ dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n", cur, cur->acd);
+ BUG_ON(cur == eng->desc_next); // Ordering failure.
+
+ if (cur->DescControlFlags & DMA_DESC_CTL_SOP){
+ eng->accumulated_bytes = 0;
+ eng->accumulated_flags = 0;
+ }
+
+ eng->accumulated_bytes += cur->DescByteCount;
+ if (cur->DescStatusFlags & DMA_DESC_STS_ERROR)
+ eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_ERROR;
+
+ if (cur->DescStatusFlags & DMA_DESC_STS_SHORT)
+ eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_SHORT;
+
+ if (cur->DescControlFlags & DMA_DESC_CTL_EOP){
+ if (cur->acd)
+ transfer_complete_cb(cur->acd, eng->accumulated_bytes, eng->accumulated_flags | ACD_FLAG_DONE);
+ }
+
+ eng->desc_completed = cur;
+ } while (cur->MyDMAAddr != GetEngineCompletePtr(eng));
+
+ out:
+ SetClearEngineControl(eng, ENG_CTL_IRQ_ACTIVE, 0);
+
+ unlock_engine(eng);
+}
+
+
+/********** DMA Engine Init/Teardown **********/
+void start_dma_engine(struct kpc_dma_device *eng)
+{
+ eng->desc_next = eng->desc_pool_first;
+ eng->desc_completed = eng->desc_pool_last;
+
+ // Setup the engine pointer registers
+ SetEngineNextPtr(eng, eng->desc_pool_first);
+ SetEngineSWPtr(eng, eng->desc_pool_first);
+ ClearEngineCompletePtr(eng);
+
+ WriteEngineControl(eng, ENG_CTL_DMA_ENABLE | ENG_CTL_IRQ_ENABLE);
+}
+
+int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt)
+{
+ u32 caps;
+ struct kpc_dma_descriptor * cur;
+ struct kpc_dma_descriptor * next;
+ dma_addr_t next_handle;
+ dma_addr_t head_handle;
+ unsigned int i;
+ int rv;
+ dev_dbg(&eng->pldev->dev, "Setting up DMA engine [%p]\n", eng);
+
+ caps = GetEngineCapabilities(eng);
+
+ if (WARN(!(caps & ENG_CAP_PRESENT), "setup_dma_engine() called for DMA Engine at %p which isn't present in hardware!\n", eng))
+ return -ENXIO;
+
+ if (caps & ENG_CAP_DIRECTION){
+ eng->dir = DMA_FROM_DEVICE;
+ } else {
+ eng->dir = DMA_TO_DEVICE;
+ }
+
+ eng->desc_pool_cnt = desc_cnt;
+ eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev, sizeof(struct kpc_dma_descriptor), DMA_DESC_ALIGNMENT, 4096);
+
+ eng->desc_pool_first = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &head_handle);
+ if (!eng->desc_pool_first){
+ dev_err(&eng->pldev->dev, "setup_dma_engine: couldn't allocate desc_pool_first!\n");
+ dma_pool_destroy(eng->desc_pool);
+ return -ENOMEM;
+ }
+
+ eng->desc_pool_first->MyDMAAddr = head_handle;
+ clear_desc(eng->desc_pool_first);
+
+ cur = eng->desc_pool_first;
+ for (i = 1 ; i < eng->desc_pool_cnt ; i++){
+ next = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &next_handle);
+ if (next == NULL)
+ goto done_alloc;
+
+ clear_desc(next);
+ next->MyDMAAddr = next_handle;
+
+ cur->DescNextDescPtr = next_handle;
+ cur->Next = next;
+ cur = next;
+ }
+
+ done_alloc:
+ // Link the last descriptor back to the first, so it's a circular linked list
+ cur->Next = eng->desc_pool_first;
+ cur->DescNextDescPtr = eng->desc_pool_first->MyDMAAddr;
+
+ eng->desc_pool_last = cur;
+ eng->desc_completed = eng->desc_pool_last;
+
+ // Setup work queue
+ INIT_WORK(&eng->irq_work, ndd_irq_worker);
+
+ // Grab IRQ line
+ rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED, KP_DRIVER_NAME_DMA_CONTROLLER, eng);
+ if (rv){
+ dev_err(&eng->pldev->dev, "setup_dma_engine: failed to request_irq: %d\n", rv);
+ return rv;
+ }
+
+ // Turn on the engine!
+ start_dma_engine(eng);
+ unlock_engine(eng);
+
+ return 0;
+}
+
+void stop_dma_engine(struct kpc_dma_device *eng)
+{
+ unsigned long timeout;
+ dev_dbg(&eng->pldev->dev, "Destroying DMA engine [%p]\n", eng);
+
+ // Disable the descriptor engine
+ WriteEngineControl(eng, 0);
+
+ // Wait for descriptor engine to finish current operaion
+ timeout = jiffies + (HZ / 2);
+ while (GetEngineControl(eng) & ENG_CTL_DMA_RUNNING){
+ if (time_after(jiffies, timeout)){
+ dev_crit(&eng->pldev->dev, "DMA_RUNNING still asserted!\n");
+ break;
+ }
+ }
+
+ // Request a reset
+ WriteEngineControl(eng, ENG_CTL_DMA_RESET_REQUEST);
+
+ // Wait for reset request to be processed
+ timeout = jiffies + (HZ / 2);
+ while (GetEngineControl(eng) & (ENG_CTL_DMA_RUNNING | ENG_CTL_DMA_RESET_REQUEST)){
+ if (time_after(jiffies, timeout)){
+ dev_crit(&eng->pldev->dev, "ENG_CTL_DMA_RESET_REQUEST still asserted!\n");
+ break;
+ }
+ }
+
+ // Request a reset
+ WriteEngineControl(eng, ENG_CTL_DMA_RESET);
+
+ // And wait for reset to complete
+ timeout = jiffies + (HZ / 2);
+ while (GetEngineControl(eng) & ENG_CTL_DMA_RESET){
+ if (time_after(jiffies, timeout)){
+ dev_crit(&eng->pldev->dev, "DMA_RESET still asserted!\n");
+ break;
+ }
+ }
+
+ // Clear any persistent bits just to make sure there is no residue from the reset
+ SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE | ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR | ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END | ENG_CTL_DMA_WAITING_PERSIST), 0);
+
+ // Reset performance counters
+
+ // Completely disable the engine
+ WriteEngineControl(eng, 0);
+}
+
+void destroy_dma_engine(struct kpc_dma_device *eng)
+{
+ struct kpc_dma_descriptor * cur;
+ dma_addr_t cur_handle;
+ unsigned int i;
+
+ stop_dma_engine(eng);
+
+ cur = eng->desc_pool_first;
+ cur_handle = eng->desc_pool_first->MyDMAAddr;
+
+ for (i = 0 ; i < eng->desc_pool_cnt ; i++){
+ struct kpc_dma_descriptor *next = cur->Next;
+ dma_addr_t next_handle = cur->DescNextDescPtr;
+ dma_pool_free(eng->desc_pool, cur, cur_handle);
+ cur_handle = next_handle;
+ cur = next;
+ }
+
+ dma_pool_destroy(eng->desc_pool);
+
+ free_irq(eng->irq, eng);
+}
+
+
+
+/********** Helper Functions **********/
+int count_descriptors_available(struct kpc_dma_device *eng)
+{
+ u32 count = 0;
+ struct kpc_dma_descriptor *cur = eng->desc_next;
+ while (cur != eng->desc_completed){
+ BUG_ON(cur == NULL);
+ count++;
+ cur = cur->Next;
+ }
+ return count;
+}
+
+void clear_desc(struct kpc_dma_descriptor *desc)
+{
+ if (desc == NULL)
+ return;
+ desc->DescByteCount = 0;
+ desc->DescStatusErrorFlags = 0;
+ desc->DescStatusFlags = 0;
+ desc->DescUserControlLS = 0;
+ desc->DescUserControlMS = 0;
+ desc->DescCardAddrLS = 0;
+ desc->DescBufferByteCount = 0;
+ desc->DescCardAddrMS = 0;
+ desc->DescControlFlags = 0;
+ desc->DescSystemAddrLS = 0;
+ desc->DescSystemAddrMS = 0;
+ desc->acd = NULL;
+}
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
new file mode 100644
index 000000000000..5741d2b49a7d
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -0,0 +1,420 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/cdev.h>
+#include <asm/uaccess.h> /* copy_*_user */
+#include <linux/aio.h> /* aio stuff */
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include "kpc_dma_driver.h"
+#include "uapi.h"
+
+/********** Helper Functions **********/
+static inline
+unsigned int count_pages(unsigned long iov_base, size_t iov_len)
+{
+ unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT;
+ unsigned long last = ((iov_base+iov_len-1) & PAGE_MASK) >> PAGE_SHIFT;
+ return last - first + 1;
+}
+
+static inline
+unsigned int count_parts_for_sge(struct scatterlist *sg)
+{
+ unsigned int sg_length = sg_dma_len(sg);
+ sg_length += (0x80000-1);
+ return (sg_length / 0x80000);
+}
+
+/********** Transfer Helpers **********/
+static
+int kpc_dma_transfer(struct dev_private_data *priv, struct kiocb *kcb, unsigned long iov_base, size_t iov_len)
+{
+ unsigned int i = 0;
+ long rv = 0;
+ struct kpc_dma_device *ldev;
+ struct aio_cb_data *acd;
+ DECLARE_COMPLETION_ONSTACK(done);
+ u32 desc_needed = 0;
+ struct scatterlist *sg;
+ u32 num_descrs_avail;
+ struct kpc_dma_descriptor *desc;
+ unsigned int pcnt;
+ unsigned int p;
+ u64 card_addr;
+ u64 dma_addr;
+ u64 user_ctl;
+
+ BUG_ON(priv == NULL);
+ ldev = priv->ldev;
+ BUG_ON(ldev == NULL);
+
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_transfer(priv = [%p], kcb = [%p], iov_base = [%p], iov_len = %ld) ldev = [%p]\n", priv, kcb, (void*)iov_base, iov_len, ldev);
+
+ acd = (struct aio_cb_data *) kzalloc(sizeof(struct aio_cb_data), GFP_KERNEL);
+ if (!acd){
+ dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the aio data\n");
+ return -ENOMEM;
+ }
+ memset(acd, 0x66, sizeof(struct aio_cb_data));
+
+ acd->priv = priv;
+ acd->ldev = priv->ldev;
+ acd->cpl = &done;
+ acd->flags = 0;
+ acd->kcb = kcb;
+ acd->len = iov_len;
+ acd->page_count = count_pages(iov_base, iov_len);
+
+ // Allocate an array of page pointers
+ acd->user_pages = kzalloc(sizeof(struct page *) * acd->page_count, GFP_KERNEL);
+ if (!acd->user_pages){
+ dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the page pointers\n");
+ rv = -ENOMEM;
+ goto err_alloc_userpages;
+ }
+
+ // Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
+ down_read(&current->mm->mmap_sem); /* get memory map semaphore */
+ rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL);
+ up_read(&current->mm->mmap_sem); /* release the semaphore */
+ if (rv != acd->page_count){
+ dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv);
+ goto err_get_user_pages;
+ }
+
+ // Allocate and setup the sg_table (scatterlist entries)
+ rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE-1), iov_len, GFP_KERNEL);
+ if (rv){
+ dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%ld)\n", rv);
+ goto err_alloc_sg_table;
+ }
+
+ // Setup the DMA mapping for all the sg entries
+ acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
+ if (acd->mapped_entry_count <= 0){
+ dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n", acd->mapped_entry_count);
+ goto err_dma_map_sg;
+ }
+
+ // Calculate how many descriptors are actually needed for this transfer.
+ for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i){
+ desc_needed += count_parts_for_sge(sg);
+ }
+
+ lock_engine(ldev);
+
+ // Figoure out how many descriptors are available and return an error if there aren't enough
+ num_descrs_avail = count_descriptors_available(ldev);
+ dev_dbg(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
+ if (desc_needed >= ldev->desc_pool_cnt){
+ dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
+ rv = -EAGAIN;
+ unlock_engine(ldev);
+ goto err_descr_too_many;
+ }
+ if (desc_needed > num_descrs_avail){
+ dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
+ rv = -EMSGSIZE;
+ unlock_engine(ldev);
+ goto err_descr_too_many;
+ }
+
+ // Loop through all the sg table entries and fill out a descriptor for each one.
+ desc = ldev->desc_next;
+ card_addr = acd->priv->card_addr;
+ for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i){
+ pcnt = count_parts_for_sge(sg);
+ for (p = 0 ; p < pcnt ; p++){
+ // Fill out the descriptor
+ BUG_ON(desc == NULL);
+ clear_desc(desc);
+ if (p != pcnt-1){
+ desc->DescByteCount = 0x80000;
+ } else {
+ desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000);
+ }
+ desc->DescBufferByteCount = desc->DescByteCount;
+
+ desc->DescControlFlags |= DMA_DESC_CTL_IRQONERR;
+ if (i == 0 && p == 0)
+ desc->DescControlFlags |= DMA_DESC_CTL_SOP;
+ if (i == acd->mapped_entry_count-1 && p == pcnt-1)
+ desc->DescControlFlags |= DMA_DESC_CTL_EOP | DMA_DESC_CTL_IRQONDONE;
+
+ desc->DescCardAddrLS = (card_addr & 0xFFFFFFFF);
+ desc->DescCardAddrMS = (card_addr >> 32) & 0xF;
+ card_addr += desc->DescByteCount;
+
+ dma_addr = sg_dma_address(sg) + (p * 0x80000);
+ desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFF) >> 0;
+ desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000) >> 32;
+
+ user_ctl = acd->priv->user_ctl;
+ if (i == acd->mapped_entry_count-1 && p == pcnt-1){
+ user_ctl = acd->priv->user_ctl_last;
+ }
+ desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFF) >> 0;
+ desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000) >> 32;
+
+ if (i == acd->mapped_entry_count-1 && p == pcnt-1)
+ desc->acd = acd;
+
+ dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n", desc, desc->acd);
+
+ ldev->desc_next = desc->Next;
+ desc = desc->Next;
+ }
+ }
+
+ // Send the filled descriptors off to the hardware to process!
+ SetEngineSWPtr(ldev, ldev->desc_next);
+
+ unlock_engine(ldev);
+
+ // If this is a synchronous kiocb, we need to put the calling process to sleep until the transfer is complete
+ if (kcb == NULL || is_sync_kiocb(kcb)){
+ rv = wait_for_completion_interruptible(&done);
+ // If the user aborted (rv == -ERESTARTSYS), we're no longer responsible for cleaning up the acd
+ if (rv == -ERESTARTSYS){
+ acd->cpl = NULL;
+ }
+ if (rv == 0){
+ rv = acd->len;
+ kfree(acd);
+ }
+ return rv;
+ }
+
+ return -EIOCBQUEUED;
+
+ err_descr_too_many:
+ unlock_engine(ldev);
+ dma_unmap_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
+ sg_free_table(&acd->sgt);
+ err_dma_map_sg:
+ err_alloc_sg_table:
+ for (i = 0 ; i < acd->page_count ; i++){
+ put_page(acd->user_pages[i]);
+ }
+ err_get_user_pages:
+ kfree(acd->user_pages);
+ err_alloc_userpages:
+ kfree(acd);
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_transfer returning with error %ld\n", rv);
+ return rv;
+}
+
+void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags)
+{
+ unsigned int i;
+
+ BUG_ON(acd == NULL);
+ BUG_ON(acd->user_pages == NULL);
+ BUG_ON(acd->sgt.sgl == NULL);
+ BUG_ON(acd->ldev == NULL);
+ BUG_ON(acd->ldev->pldev == NULL);
+
+ dev_dbg(&acd->ldev->pldev->dev, "transfer_complete_cb(acd = [%p])\n", acd);
+
+ for (i = 0 ; i < acd->page_count ; i++){
+ if (!PageReserved(acd->user_pages[i])){
+ set_page_dirty(acd->user_pages[i]);
+ }
+ }
+
+ dma_unmap_sg(&acd->ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, acd->ldev->dir);
+
+ for (i = 0 ; i < acd->page_count ; i++){
+ put_page(acd->user_pages[i]);
+ }
+
+ sg_free_table(&acd->sgt);
+
+ kfree(acd->user_pages);
+
+ acd->flags = flags;
+
+ if (acd->kcb == NULL || is_sync_kiocb(acd->kcb)){
+ if (acd->cpl){
+ complete(acd->cpl);
+ } else {
+ // There's no completion, so we're responsible for cleaning up the acd
+ kfree(acd);
+ }
+ } else {
+#ifdef CONFIG_KPC_DMA_AIO
+ aio_complete(acd->kcb, acd->len, acd->flags);
+#endif
+ kfree(acd);
+ }
+}
+
+/********** Fileops **********/
+static
+int kpc_dma_open(struct inode *inode, struct file *filp)
+{
+ struct dev_private_data *priv;
+ struct kpc_dma_device *ldev = kpc_dma_lookup_device(iminor(inode));
+ if (ldev == NULL)
+ return -ENODEV;
+
+ if (! atomic_dec_and_test(&ldev->open_count)){
+ atomic_inc(&ldev->open_count);
+ return -EBUSY; /* already open */
+ }
+
+ priv = kzalloc(sizeof(struct dev_private_data), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ldev = ldev;
+ filp->private_data = priv;
+
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_open(inode = [%p], filp = [%p]) priv = [%p] ldev = [%p]\n", inode, filp, priv, priv->ldev);
+ return 0;
+}
+
+static
+int kpc_dma_close(struct inode *inode, struct file *filp)
+{
+ struct kpc_dma_descriptor *cur;
+ struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
+ struct kpc_dma_device *eng = priv->ldev;
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_close(inode = [%p], filp = [%p]) priv = [%p], ldev = [%p]\n", inode, filp, priv, priv->ldev);
+
+ lock_engine(eng);
+
+ stop_dma_engine(eng);
+
+ cur = eng->desc_completed->Next;
+ while (cur != eng->desc_next){
+ dev_dbg(&eng->pldev->dev, "Aborting descriptor %p (acd = %p)\n", cur, cur->acd);
+ if (cur->DescControlFlags & DMA_DESC_CTL_EOP){
+ if (cur->acd)
+ transfer_complete_cb(cur->acd, 0, ACD_FLAG_ABORT);
+ }
+
+ clear_desc(cur);
+ eng->desc_completed = cur;
+
+ cur = cur->Next;
+ }
+
+ start_dma_engine(eng);
+
+ unlock_engine(eng);
+
+ atomic_inc(&priv->ldev->open_count); /* release the device */
+ kfree(priv);
+ return 0;
+}
+
+#ifdef CONFIG_KPC_DMA_AIO
+static
+int kpc_dma_aio_cancel(struct kiocb *kcb)
+{
+ struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data;
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_cancel(kcb = [%p]) priv = [%p], ldev = [%p]\n", kcb, priv, priv->ldev);
+ return 0;
+}
+
+static
+ssize_t kpc_dma_aio_read(struct kiocb *kcb, const struct iovec *iov, unsigned long iov_count, loff_t pos)
+{
+ struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data;
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_read(kcb = [%p], iov = [%p], iov_count = %ld, pos = %lld) priv = [%p], ldev = [%p]\n", kcb, iov, iov_count, pos, priv, priv->ldev);
+
+ if (priv->ldev->dir != DMA_FROM_DEVICE)
+ return -EMEDIUMTYPE;
+
+ if (iov_count != 1){
+ dev_err(&priv->ldev->pldev->dev, "kpc_dma_aio_read() called with iov_count > 1!\n");
+ return -EFAULT;
+ }
+
+ if (!is_sync_kiocb(kcb))
+ kiocb_set_cancel_fn(kcb, kpc_dma_aio_cancel);
+ return kpc_dma_transfer(priv, kcb, (unsigned long)iov->iov_base, iov->iov_len);
+}
+
+static
+ssize_t kpc_dma_aio_write(struct kiocb *kcb, const struct iovec *iov, unsigned long iov_count, loff_t pos)
+{
+ struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data;
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_write(kcb = [%p], iov = [%p], iov_count = %ld, pos = %lld) priv = [%p], ldev = [%p]\n", kcb, iov, iov_count, pos, priv, priv->ldev);
+
+ if (priv->ldev->dir != DMA_TO_DEVICE)
+ return -EMEDIUMTYPE;
+
+ if (iov_count != 1){
+ dev_err(&priv->ldev->pldev->dev, "kpc_dma_aio_write() called with iov_count > 1!\n");
+ return -EFAULT;
+ }
+
+ if (!is_sync_kiocb(kcb))
+ kiocb_set_cancel_fn(kcb, kpc_dma_aio_cancel);
+ return kpc_dma_transfer(priv, kcb, (unsigned long)iov->iov_base, iov->iov_len);
+}
+#endif
+
+static
+ssize_t kpc_dma_read( struct file *filp, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_read(filp = [%p], user_buf = [%p], count = %zu, ppos = [%p]) priv = [%p], ldev = [%p]\n", filp, user_buf, count, ppos, priv, priv->ldev);
+
+ if (priv->ldev->dir != DMA_FROM_DEVICE)
+ return -EMEDIUMTYPE;
+
+ return kpc_dma_transfer(priv, (struct kiocb *)NULL, (unsigned long)user_buf, count);
+}
+
+static
+ssize_t kpc_dma_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_write(filp = [%p], user_buf = [%p], count = %zu, ppos = [%p]) priv = [%p], ldev = [%p]\n", filp, user_buf, count, ppos, priv, priv->ldev);
+
+ if (priv->ldev->dir != DMA_TO_DEVICE)
+ return -EMEDIUMTYPE;
+
+ return kpc_dma_transfer(priv, (struct kiocb *)NULL, (unsigned long)user_buf, count);
+}
+
+static
+long kpc_dma_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param)
+{
+ struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
+ dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_ioctl(filp = [%p], ioctl_num = 0x%x, ioctl_param = 0x%lx) priv = [%p], ldev = [%p]\n", filp, ioctl_num, ioctl_param, priv, priv->ldev);
+
+ switch (ioctl_num){
+ case KND_IOCTL_SET_CARD_ADDR: priv->card_addr = ioctl_param; return priv->card_addr;
+ case KND_IOCTL_SET_USER_CTL: priv->user_ctl = ioctl_param; return priv->user_ctl;
+ case KND_IOCTL_SET_USER_CTL_LAST: priv->user_ctl_last = ioctl_param; return priv->user_ctl_last;
+ case KND_IOCTL_GET_USER_STS: return priv->user_sts;
+ }
+
+ return -ENOTTY;
+}
+
+
+struct file_operations kpc_dma_fops = {
+ .owner = THIS_MODULE,
+ .open = kpc_dma_open,
+ .release = kpc_dma_close,
+ .read = kpc_dma_read,
+ .write = kpc_dma_write,
+#ifdef CONFIG_KPC_DMA_AIO
+ .aio_read = kpc_dma_aio_read,
+ .aio_write = kpc_dma_aio_write,
+#endif
+ .unlocked_ioctl = kpc_dma_ioctl,
+};
+
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
new file mode 100644
index 000000000000..aeae58d9bc18
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/rwsem.h>
+#include "kpc_dma_driver.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matt.Sickler@daktronics.com");
+
+#define KPC_DMA_CHAR_MAJOR UNNAMED_MAJOR
+#define KPC_DMA_NUM_MINORS 1 << MINORBITS
+static DEFINE_MUTEX(kpc_dma_mtx);
+static int assigned_major_num;
+static LIST_HEAD(kpc_dma_list);
+
+
+/********** kpc_dma_list list management **********/
+struct kpc_dma_device * kpc_dma_lookup_device(int minor)
+{
+ struct kpc_dma_device *c;
+ mutex_lock(&kpc_dma_mtx);
+ list_for_each_entry(c, &kpc_dma_list, list) {
+ if (c->pldev->id == minor) {
+ goto out;
+ }
+ }
+ c = NULL; // not-found case
+ out:
+ mutex_unlock(&kpc_dma_mtx);
+ return c;
+}
+
+void kpc_dma_add_device(struct kpc_dma_device * ldev)
+{
+ mutex_lock(&kpc_dma_mtx);
+ list_add(&ldev->list, &kpc_dma_list);
+ mutex_unlock(&kpc_dma_mtx);
+}
+
+void kpc_dma_del_device(struct kpc_dma_device * ldev)
+{
+ mutex_lock(&kpc_dma_mtx);
+ list_del(&ldev->list);
+ mutex_unlock(&kpc_dma_mtx);
+}
+
+/********** SysFS Attributes **********/
+static ssize_t show_engine_regs(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct kpc_dma_device *ldev;
+ struct platform_device *pldev = to_platform_device(dev);
+ if (!pldev) return 0;
+ ldev = platform_get_drvdata(pldev);
+ if (!ldev) return 0;
+
+ return scnprintf(buf, PAGE_SIZE,
+ "EngineControlStatus = 0x%08x\n"
+ "RegNextDescPtr = 0x%08x\n"
+ "RegSWDescPtr = 0x%08x\n"
+ "RegCompletedDescPtr = 0x%08x\n"
+ "desc_pool_first = %p\n"
+ "desc_pool_last = %p\n"
+ "desc_next = %p\n"
+ "desc_completed = %p\n",
+ readl(ldev->eng_regs + 1),
+ readl(ldev->eng_regs + 2),
+ readl(ldev->eng_regs + 3),
+ readl(ldev->eng_regs + 4),
+ ldev->desc_pool_first,
+ ldev->desc_pool_last,
+ ldev->desc_next,
+ ldev->desc_completed
+ );
+}
+DEVICE_ATTR(engine_regs, 0444, show_engine_regs, NULL);
+
+static const struct attribute * ndd_attr_list[] = {
+ &dev_attr_engine_regs.attr,
+ NULL,
+};
+
+struct class *kpc_dma_class;
+
+
+/********** Platform Driver Functions **********/
+static
+int kpc_dma_probe(struct platform_device *pldev)
+{
+ struct resource *r = NULL;
+ int rv = 0;
+ dev_t dev;
+
+ struct kpc_dma_device *ldev = kzalloc(sizeof(struct kpc_dma_device), GFP_KERNEL);
+ if (!ldev){
+ dev_err(&pldev->dev, "kpc_dma_probe: unable to kzalloc space for kpc_dma_device\n");
+ rv = -ENOMEM;
+ goto err_rv;
+ }
+
+ dev_dbg(&pldev->dev, "kpc_dma_probe(pldev = [%p]) ldev = [%p]\n", pldev, ldev);
+
+ INIT_LIST_HEAD(&ldev->list);
+
+ ldev->pldev = pldev;
+ platform_set_drvdata(pldev, ldev);
+ atomic_set(&ldev->open_count, 1);
+
+ mutex_init(&ldev->sem);
+ lock_engine(ldev);
+
+ // Get Engine regs resource
+ r = platform_get_resource(pldev, IORESOURCE_MEM, 0);
+ if (!r){
+ dev_err(&ldev->pldev->dev, "kpc_dma_probe: didn't get the engine regs resource!\n");
+ rv = -ENXIO;
+ goto err_kfree;
+ }
+ ldev->eng_regs = ioremap_nocache(r->start, resource_size(r));
+ if (!ldev->eng_regs){
+ dev_err(&ldev->pldev->dev, "kpc_dma_probe: failed to ioremap engine regs!\n");
+ rv = -ENXIO;
+ goto err_kfree;
+ }
+
+ r = platform_get_resource(pldev, IORESOURCE_IRQ, 0);
+ if (!r){
+ dev_err(&ldev->pldev->dev, "kpc_dma_probe: didn't get the IRQ resource!\n");
+ rv = -ENXIO;
+ goto err_kfree;
+ }
+ ldev->irq = r->start;
+
+ // Setup miscdev struct
+ dev = MKDEV(assigned_major_num, pldev->id);
+ ldev->kpc_dma_dev = device_create(kpc_dma_class, &pldev->dev, dev, ldev, "kpc_dma%d", pldev->id);
+ if (IS_ERR(ldev->kpc_dma_dev)){
+ dev_err(&ldev->pldev->dev, "kpc_dma_probe: device_create failed: %d\n", rv);
+ goto err_kfree;
+ }
+
+ // Setup the DMA engine
+ rv = setup_dma_engine(ldev, 30);
+ if (rv){
+ dev_err(&ldev->pldev->dev, "kpc_dma_probe: failed to setup_dma_engine: %d\n", rv);
+ goto err_misc_dereg;
+ }
+
+ // Setup the sysfs files
+ rv = sysfs_create_files(&(ldev->pldev->dev.kobj), ndd_attr_list);
+ if (rv){
+ dev_err(&ldev->pldev->dev, "kpc_dma_probe: Failed to add sysfs files: %d\n", rv);
+ goto err_destroy_eng;
+ }
+
+ kpc_dma_add_device(ldev);
+
+ return 0;
+
+ err_destroy_eng:
+ destroy_dma_engine(ldev);
+ err_misc_dereg:
+ device_destroy(kpc_dma_class, dev);
+ err_kfree:
+ kfree(ldev);
+ err_rv:
+ return rv;
+}
+
+static
+int kpc_dma_remove(struct platform_device *pldev)
+{
+ struct kpc_dma_device *ldev = platform_get_drvdata(pldev);
+ if (!ldev)
+ return -ENXIO;
+
+ dev_dbg(&ldev->pldev->dev, "kpc_dma_remove(pldev = [%p]) ldev = [%p]\n", pldev, ldev);
+
+ lock_engine(ldev);
+ sysfs_remove_files(&(ldev->pldev->dev.kobj), ndd_attr_list);
+ destroy_dma_engine(ldev);
+ kpc_dma_del_device(ldev);
+ device_destroy(kpc_dma_class, MKDEV(assigned_major_num, ldev->pldev->id));
+ kfree(ldev);
+
+ return 0;
+}
+
+
+/********** Driver Functions **********/
+struct platform_driver kpc_dma_plat_driver_i = {
+ .probe = kpc_dma_probe,
+ .remove = kpc_dma_remove,
+ .driver = {
+ .name = KP_DRIVER_NAME_DMA_CONTROLLER,
+ .owner = THIS_MODULE,
+ },
+};
+
+static
+int __init kpc_dma_driver_init(void)
+{
+ int err;
+
+ err = __register_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma", &kpc_dma_fops);
+ if (err < 0){
+ pr_err("Can't allocate a major number (%d) for kpc_dma (err = %d)\n", KPC_DMA_CHAR_MAJOR, err);
+ goto fail_chrdev_register;
+ }
+ assigned_major_num = err;
+
+ kpc_dma_class = class_create(THIS_MODULE, "kpc_dma");
+ err = PTR_ERR(kpc_dma_class);
+ if (IS_ERR(kpc_dma_class)){
+ pr_err("Can't create class kpc_dma (err = %d)\n", err);
+ goto fail_class_create;
+ }
+
+ err = platform_driver_register(&kpc_dma_plat_driver_i);
+ if (err){
+ pr_err("Can't register platform driver for kpc_dma (err = %d)\n", err);
+ goto fail_platdriver_register;
+ }
+
+ return err;
+
+ fail_platdriver_register:
+ class_destroy(kpc_dma_class);
+ fail_class_create:
+ __unregister_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma");
+ fail_chrdev_register:
+ return err;
+}
+module_init(kpc_dma_driver_init);
+
+static
+void __exit kpc_dma_driver_exit(void)
+{
+ platform_driver_unregister(&kpc_dma_plat_driver_i);
+ class_destroy(kpc_dma_class);
+ __unregister_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma");
+}
+module_exit(kpc_dma_driver_exit);
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h
new file mode 100644
index 000000000000..ef913b7496e6
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef KPC_DMA_DRIVER_H
+#define KPC_DMA_DRIVER_H
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/miscdevice.h>
+#include <linux/rwsem.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/aio.h>
+#include <linux/bitops.h>
+#include "../kpc.h"
+
+
+struct kp2000_device;
+struct kpc_dma_device {
+ struct list_head list;
+ struct platform_device *pldev;
+ u32 __iomem *eng_regs;
+ struct device *kpc_dma_dev;
+ struct kobject kobj;
+ char name[16];
+
+ int dir; // DMA_FROM_DEVICE || DMA_TO_DEVICE
+ struct mutex sem;
+ unsigned int irq;
+ struct work_struct irq_work;
+
+ atomic_t open_count;
+
+ size_t accumulated_bytes;
+ u32 accumulated_flags;
+
+ // Descriptor "Pool" housekeeping
+ u32 desc_pool_cnt;
+ struct dma_pool *desc_pool;
+ struct kpc_dma_descriptor *desc_pool_first;
+ struct kpc_dma_descriptor *desc_pool_last;
+
+ struct kpc_dma_descriptor *desc_next;
+ struct kpc_dma_descriptor *desc_completed;
+};
+
+struct dev_private_data {
+ struct kpc_dma_device *ldev;
+ u64 card_addr;
+ u64 user_ctl;
+ u64 user_ctl_last;
+ u64 user_sts;
+};
+
+struct kpc_dma_device * kpc_dma_lookup_device(int minor);
+
+extern struct file_operations kpc_dma_fops;
+
+#define ENG_CAP_PRESENT 0x00000001
+#define ENG_CAP_DIRECTION 0x00000002
+#define ENG_CAP_TYPE_MASK 0x000000F0
+#define ENG_CAP_NUMBER_MASK 0x0000FF00
+#define ENG_CAP_CARD_ADDR_SIZE_MASK 0x007F0000
+#define ENG_CAP_DESC_MAX_BYTE_CNT_MASK 0x3F000000
+#define ENG_CAP_PERF_SCALE_MASK 0xC0000000
+
+#define ENG_CTL_IRQ_ENABLE BIT(0)
+#define ENG_CTL_IRQ_ACTIVE BIT(1)
+#define ENG_CTL_DESC_COMPLETE BIT(2)
+#define ENG_CTL_DESC_ALIGN_ERR BIT(3)
+#define ENG_CTL_DESC_FETCH_ERR BIT(4)
+#define ENG_CTL_SW_ABORT_ERR BIT(5)
+#define ENG_CTL_DESC_CHAIN_END BIT(7)
+#define ENG_CTL_DMA_ENABLE BIT(8)
+#define ENG_CTL_DMA_RUNNING BIT(10)
+#define ENG_CTL_DMA_WAITING BIT(11)
+#define ENG_CTL_DMA_WAITING_PERSIST BIT(12)
+#define ENG_CTL_DMA_RESET_REQUEST BIT(14)
+#define ENG_CTL_DMA_RESET BIT(15)
+#define ENG_CTL_DESC_FETCH_ERR_CLASS_MASK 0x700000
+
+struct aio_cb_data {
+ struct dev_private_data *priv;
+ struct kpc_dma_device *ldev;
+ struct completion *cpl;
+ unsigned char flags;
+ struct kiocb *kcb;
+ size_t len;
+
+ unsigned int page_count;
+ struct page **user_pages;
+ struct sg_table sgt;
+ int mapped_entry_count;
+};
+
+#define ACD_FLAG_DONE 0
+#define ACD_FLAG_ABORT 1
+#define ACD_FLAG_ENG_ACCUM_ERROR 4
+#define ACD_FLAG_ENG_ACCUM_SHORT 5
+
+struct kpc_dma_descriptor {
+ struct {
+ volatile u32 DescByteCount :20;
+ volatile u32 DescStatusErrorFlags :4;
+ volatile u32 DescStatusFlags :8;
+ };
+ volatile u32 DescUserControlLS;
+ volatile u32 DescUserControlMS;
+ volatile u32 DescCardAddrLS;
+ struct {
+ volatile u32 DescBufferByteCount :20;
+ volatile u32 DescCardAddrMS :4;
+ volatile u32 DescControlFlags :8;
+ };
+ volatile u32 DescSystemAddrLS;
+ volatile u32 DescSystemAddrMS;
+ volatile u32 DescNextDescPtr;
+
+ dma_addr_t MyDMAAddr;
+ struct kpc_dma_descriptor *Next;
+
+ struct aio_cb_data *acd;
+} __attribute__((packed));
+// DescControlFlags:
+#define DMA_DESC_CTL_SOP BIT(7)
+#define DMA_DESC_CTL_EOP BIT(6)
+#define DMA_DESC_CTL_AFIFO BIT(2)
+#define DMA_DESC_CTL_IRQONERR BIT(1)
+#define DMA_DESC_CTL_IRQONDONE BIT(0)
+// DescStatusFlags:
+#define DMA_DESC_STS_SOP BIT(7)
+#define DMA_DESC_STS_EOP BIT(6)
+#define DMA_DESC_STS_ERROR BIT(4)
+#define DMA_DESC_STS_USMSZ BIT(3)
+#define DMA_DESC_STS_USLSZ BIT(2)
+#define DMA_DESC_STS_SHORT BIT(1)
+#define DMA_DESC_STS_COMPLETE BIT(0)
+// DescStatusErrorFlags:
+#define DMA_DESC_ESTS_ECRC BIT(2)
+#define DMA_DESC_ESTS_POISON BIT(1)
+#define DMA_DESC_ESTS_UNSUCCESSFUL BIT(0)
+
+#define DMA_DESC_ALIGNMENT 0x20
+
+static inline
+u32 GetEngineCapabilities(struct kpc_dma_device *eng)
+{
+ return readl(eng->eng_regs + 0);
+}
+
+static inline
+void WriteEngineControl(struct kpc_dma_device *eng, u32 value)
+{
+ writel(value, eng->eng_regs + 1);
+}
+static inline
+u32 GetEngineControl(struct kpc_dma_device *eng)
+{
+ return readl(eng->eng_regs + 1);
+}
+static inline
+void SetClearEngineControl(struct kpc_dma_device *eng, u32 set_bits, u32 clear_bits)
+{
+ u32 val = GetEngineControl(eng);
+ val |= set_bits;
+ val &= ~clear_bits;
+ WriteEngineControl(eng, val);
+}
+
+static inline
+void SetEngineNextPtr(struct kpc_dma_device *eng, struct kpc_dma_descriptor * desc)
+{
+ writel(desc->MyDMAAddr, eng->eng_regs + 2);
+}
+static inline
+void SetEngineSWPtr(struct kpc_dma_device *eng, struct kpc_dma_descriptor * desc)
+{
+ writel(desc->MyDMAAddr, eng->eng_regs + 3);
+}
+static inline
+void ClearEngineCompletePtr(struct kpc_dma_device *eng)
+{
+ writel(0, eng->eng_regs + 4);
+}
+static inline
+u32 GetEngineCompletePtr(struct kpc_dma_device *eng)
+{
+ return readl(eng->eng_regs + 4);
+}
+
+static inline
+void lock_engine(struct kpc_dma_device *eng)
+{
+ BUG_ON(eng == NULL);
+ mutex_lock(&eng->sem);
+}
+
+static inline
+void unlock_engine(struct kpc_dma_device *eng)
+{
+ BUG_ON(eng == NULL);
+ mutex_unlock(&eng->sem);
+}
+
+
+/// Shared Functions
+void start_dma_engine(struct kpc_dma_device *eng);
+int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt);
+void stop_dma_engine(struct kpc_dma_device *eng);
+void destroy_dma_engine(struct kpc_dma_device *eng);
+void clear_desc(struct kpc_dma_descriptor *desc);
+int count_descriptors_available(struct kpc_dma_device *eng);
+void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags);
+
+#endif /* KPC_DMA_DRIVER_H */
+
diff --git a/drivers/staging/kpc2000/kpc_dma/uapi.h b/drivers/staging/kpc2000/kpc_dma/uapi.h
new file mode 100644
index 000000000000..5ff6a1a36ff9
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_dma/uapi.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef KPC_DMA_DRIVER_UAPI_H_
+#define KPC_DMA_DRIVER_UAPI_H_
+#include <linux/ioctl.h>
+
+#define KND_IOCTL_SET_CARD_ADDR _IOW('k', 1, __u32)
+#define KND_IOCTL_SET_USER_CTL _IOW('k', 2, __u64)
+#define KND_IOCTL_SET_USER_CTL_LAST _IOW('k', 4, __u64)
+#define KND_IOCTL_GET_USER_STS _IOR('k', 3, __u64)
+
+#endif /* KPC_DMA_DRIVER_UAPI_H_ */
diff --git a/drivers/staging/kpc2000/kpc_i2c/Makefile b/drivers/staging/kpc2000/kpc_i2c/Makefile
new file mode 100644
index 000000000000..73ec07ac7d39
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_i2c/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-m := kpc2000_i2c.o
+kpc2000_i2c-objs := i2c_driver.o fileops.o
diff --git a/drivers/staging/kpc2000/kpc_i2c/fileops.c b/drivers/staging/kpc2000/kpc_i2c/fileops.c
new file mode 100644
index 000000000000..e749c0994491
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_i2c/fileops.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0+
+#if 0
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/cdev.h>
+#include <asm/uaccess.h> /* copy_*_user */
+
+#include "i2c_driver.h"
+
+int i2c_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct i2c_device *lddev;
+
+ if(NULL == inode) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_open: inode is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_open: inode is a NULL pointer\n");
+ return -EINVAL;
+ }
+ if(NULL == filp) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_open: filp is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_open: filp is a NULL pointer\n");
+ return -EINVAL;
+ }
+
+ lddev = container_of(inode->i_cdev, struct i2c_device, cdev);
+ //printk(KERN_DEBUG "<pl_i2c> i2c_cdev_open(filp = [%p], lddev = [%p])\n", filp, lddev);
+ DBG_PRINT(KERN_DEBUG, "i2c_cdev_open(filp = [%p], lddev = [%p])\n", filp, lddev);
+
+ filp->private_data = lddev; /* so other methods can access it */
+
+ return 0; /* success */
+}
+
+int i2c_cdev_close(struct inode *inode, struct file *filp)
+{
+ struct i2c_device *lddev;
+
+ if(NULL == inode) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_close: inode is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_close: inode is a NULL pointer\n");
+ return -EINVAL;
+ }
+ if(NULL == filp) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_close: filp is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_close: filp is a NULL pointer\n");
+ return -EINVAL;
+ }
+
+ lddev = filp->private_data;
+ //printk(KERN_DEBUG "<pl_i2c> i2c_cdev_close(filp = [%p], lddev = [%p])\n", filp, lddev);
+ DBG_PRINT(KERN_DEBUG, "i2c_cdev_close(filp = [%p], lddev = [%p])\n", filp, lddev);
+
+ return 0;
+}
+
+ssize_t i2c_cdev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+ size_t copy;
+ ssize_t ret = 0;
+ int err = 0;
+ u64 read_val;
+ char tmp_buf[48] = { 0 };
+ struct i2c_device *lddev = filp->private_data;
+
+ if(NULL == filp) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_read: filp is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_read: filp is a NULL pointer\n");
+ return -EINVAL;
+ }
+ if(NULL == buf) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_read: buf is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_read: buf is a NULL pointer\n");
+ return -EINVAL;
+ }
+ if(NULL == f_pos) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_read: f_pos is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_read: f_pos is a NULL pointer\n");
+ return -EINVAL;
+ }
+
+ if(count < sizeof(tmp_buf)) {
+ //printk(KERN_INFO "<pl_i2c> i2c_cdev_read: buffer is too small (count = %d, should be at least %d bytes)\n", (int)count, (int)sizeof(tmp_buf));
+ DBG_PRINT(KERN_INFO, "i2c_cdev_read: buffer is too small (count = %d, should be at least %d bytes)\n", (int)count, (int)sizeof(tmp_buf));
+ return -EINVAL;
+ }
+ if(((*f_pos * 8) + lddev->pldev->resource[0].start) > lddev->pldev->resource[0].end) {
+ //printk(KERN_INFO "<pl_i2c> i2c_cdev_read: bad read addr %016llx\n", (*f_pos * 8) + lddev->pldev->resource[0].start);
+ DBG_PRINT(KERN_INFO, "i2c_cdev_read: bad read addr %016llx\n", (*f_pos * 8) + lddev->pldev->resource[0].start);
+ //printk(KERN_INFO "<pl_i2c> i2c_cdev_read: addr end %016llx\n", lddev->pldev->resource[0].end);
+ DBG_PRINT(KERN_INFO, "i2c_cdev_read: addr end %016llx\n", lddev->pldev->resource[0].end);
+ //printk(KERN_INFO "<pl_i2c> i2c_cdev_read: EOF reached\n");
+ DBG_PRINT(KERN_INFO, "i2c_cdev_read: EOF reached\n");
+ return 0;
+ }
+
+ down_read(&lddev->rw_sem);
+
+ read_val = *(lddev->regs + *f_pos);
+ copy = clamp_t(size_t, count, 1, sizeof(tmp_buf));
+ copy = scnprintf(tmp_buf, copy, "reg: 0x%x val: 0x%llx\n", (unsigned int)*f_pos, read_val);
+ err = copy_to_user(buf, tmp_buf, copy);
+ if(err) {
+ //printk(KERN_INFO "<pl_i2c> i2c_cdev_read: could not copy to user (err = %d)\n", err);
+ DBG_PRINT(KERN_INFO, "i2c_cdev_read: could not copy to user (err = %d)\n", err);
+ return -EINVAL;
+ }
+
+ ret = (ssize_t)copy;
+ (*f_pos)++;
+
+ up_read(&lddev->rw_sem);
+
+ return ret;
+}
+
+ssize_t i2c_cdev_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
+{
+ u8 reg;
+ u8 val;
+ char tmp[8] = { 0 };
+ struct i2c_device *lddev = filp->private_data;
+
+ if(NULL == filp) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_write: filp is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_write: filp is a NULL pointer\n");
+ return -EINVAL;
+ }
+ if(NULL == buf) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_write: buf is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_write: buf is a NULL pointer\n");
+ return -EINVAL;
+ }
+ if(NULL == f_pos) {
+ //printk(KERN_WARNING "<pl_i2c> i2c_cdev_write: f_pos is a NULL pointer\n");
+ DBG_PRINT(KERN_WARNING, "i2c_cdev_write: f_pos is a NULL pointer\n");
+ return -EINVAL;
+ }
+
+ //printk(KERN_DEBUG "<pl_i2c> i2c_cdev_write(filp = [%p], lddev = [%p])\n", filp, lddev);
+ DBG_PRINT(KERN_DEBUG, "i2c_cdev_write(filp = [%p], lddev = [%p])\n", filp, lddev);
+
+ down_write(&lddev->rw_sem);
+
+ if(count >= 2) {
+ if(copy_from_user(tmp, buf, 2)) {
+ return -EFAULT;
+ }
+
+ reg = tmp[0] - '0';
+ val = tmp[1] - '0';
+
+ //printk(KERN_DEBUG " reg = %d val = %d\n", reg, val);
+ DBG_PRINT(KERN_DEBUG, " reg = %d val = %d\n", reg, val);
+
+ if(reg >= 0 && reg < 16) {
+ //printk(KERN_DEBUG " Writing 0x%x to %p\n", val, lddev->regs + reg);
+ DBG_PRINT(KERN_DEBUG, " Writing 0x%x to %p\n", val, lddev->regs + reg);
+ *(lddev->regs + reg) = val;
+ }
+ }
+
+ (*f_pos)++;
+
+ up_write(&lddev->rw_sem);
+
+ return count;
+}
+
+struct file_operations i2c_fops = {
+ .owner = THIS_MODULE,
+ .open = i2c_cdev_open,
+ .release = i2c_cdev_close,
+ .read = i2c_cdev_read,
+ .write = i2c_cdev_write,
+};
+#endif
diff --git a/drivers/staging/kpc2000/kpc_i2c/i2c_driver.c b/drivers/staging/kpc2000/kpc_i2c/i2c_driver.c
new file mode 100644
index 000000000000..0fb068b2408d
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_i2c/i2c_driver.c
@@ -0,0 +1,699 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2014-2018 Daktronics,
+ Matt Sickler <matt.sickler@daktronics.com>,
+ Jordon Hofer <jordon.hofer@daktronics.com>
+ Adapted i2c-i801.c for use with Kadoka hardware.
+ Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>,
+ Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker
+ <mdsxyz123@yahoo.com>
+ Copyright (C) 2007 - 2012 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2010 Intel Corporation,
+ David Woodhouse <dwmw2@infradead.org>
+*/
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/rwsem.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include "../kpc.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matt.Sickler@Daktronics.com");
+MODULE_SOFTDEP("pre: i2c-dev");
+
+struct i2c_device {
+ unsigned long smba;
+ struct i2c_adapter adapter;
+ struct platform_device *pldev;
+ struct rw_semaphore rw_sem;
+ unsigned int features;
+};
+
+/*****************************
+ *** Part 1 - i2c Handlers ***
+ *****************************/
+
+#define REG_SIZE 8
+
+/* I801 SMBus address offsets */
+#define SMBHSTSTS(p) ((0 * REG_SIZE) + (p)->smba)
+#define SMBHSTCNT(p) ((2 * REG_SIZE) + (p)->smba)
+#define SMBHSTCMD(p) ((3 * REG_SIZE) + (p)->smba)
+#define SMBHSTADD(p) ((4 * REG_SIZE) + (p)->smba)
+#define SMBHSTDAT0(p) ((5 * REG_SIZE) + (p)->smba)
+#define SMBHSTDAT1(p) ((6 * REG_SIZE) + (p)->smba)
+#define SMBBLKDAT(p) ((7 * REG_SIZE) + (p)->smba)
+#define SMBPEC(p) ((8 * REG_SIZE) + (p)->smba) /* ICH3 and later */
+#define SMBAUXSTS(p) ((12 * REG_SIZE) + (p)->smba) /* ICH4 and later */
+#define SMBAUXCTL(p) ((13 * REG_SIZE) + (p)->smba) /* ICH4 and later */
+
+/* PCI Address Constants */
+#define SMBBAR 4
+#define SMBHSTCFG 0x040
+
+/* Host configuration bits for SMBHSTCFG */
+#define SMBHSTCFG_HST_EN 1
+#define SMBHSTCFG_SMB_SMI_EN 2
+#define SMBHSTCFG_I2C_EN 4
+
+/* Auxiliary control register bits, ICH4+ only */
+#define SMBAUXCTL_CRC 1
+#define SMBAUXCTL_E32B 2
+
+/* kill bit for SMBHSTCNT */
+#define SMBHSTCNT_KILL 2
+
+/* Other settings */
+#define MAX_RETRIES 400
+#define ENABLE_INT9 0 /* set to 0x01 to enable - untested */
+
+/* I801 command constants */
+#define I801_QUICK 0x00
+#define I801_BYTE 0x04
+#define I801_BYTE_DATA 0x08
+#define I801_WORD_DATA 0x0C
+#define I801_PROC_CALL 0x10 /* unimplemented */
+#define I801_BLOCK_DATA 0x14
+#define I801_I2C_BLOCK_DATA 0x18 /* ICH5 and later */
+#define I801_BLOCK_LAST 0x34
+#define I801_I2C_BLOCK_LAST 0x38 /* ICH5 and later */
+#define I801_START 0x40
+#define I801_PEC_EN 0x80 /* ICH3 and later */
+
+/* I801 Hosts Status register bits */
+#define SMBHSTSTS_BYTE_DONE 0x80
+#define SMBHSTSTS_INUSE_STS 0x40
+#define SMBHSTSTS_SMBALERT_STS 0x20
+#define SMBHSTSTS_FAILED 0x10
+#define SMBHSTSTS_BUS_ERR 0x08
+#define SMBHSTSTS_DEV_ERR 0x04
+#define SMBHSTSTS_INTR 0x02
+#define SMBHSTSTS_HOST_BUSY 0x01
+
+#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | SMBHSTSTS_INTR)
+
+/* Older devices have their ID defined in <linux/pci_ids.h> */
+#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
+/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
+#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
+#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
+#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
+
+
+#define FEATURE_SMBUS_PEC (1 << 0)
+#define FEATURE_BLOCK_BUFFER (1 << 1)
+#define FEATURE_BLOCK_PROC (1 << 2)
+#define FEATURE_I2C_BLOCK_READ (1 << 3)
+/* Not really a feature, but it's convenient to handle it as such */
+#define FEATURE_IDF (1 << 15)
+
+static unsigned int disable_features;
+module_param(disable_features, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_features, "Disable selected driver features");
+
+// FIXME!
+#undef inb_p
+#define inb_p(a) readq((void*)a)
+#undef outb_p
+#define outb_p(d,a) writeq(d,(void*)a)
+
+/* Make sure the SMBus host is ready to start transmitting.
+ Return 0 if it is, -EBUSY if it is not. */
+static int i801_check_pre(struct i2c_device *priv)
+{
+ int status;
+
+ dev_dbg(&priv->adapter.dev, "i801_check_pre\n");
+
+ status = inb_p(SMBHSTSTS(priv));
+ if (status & SMBHSTSTS_HOST_BUSY) {
+ dev_err(&priv->adapter.dev, "SMBus is busy, can't use it! (status=%x)\n", status);
+ return -EBUSY;
+ }
+
+ status &= STATUS_FLAGS;
+ if (status) {
+ //dev_dbg(&priv->adapter.dev, "Clearing status flags (%02x)\n", status);
+ outb_p(status, SMBHSTSTS(priv));
+ status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
+ if (status) {
+ dev_err(&priv->adapter.dev, "Failed clearing status flags (%02x)\n", status);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/* Convert the status register to an error code, and clear it. */
+static int i801_check_post(struct i2c_device *priv, int status, int timeout)
+{
+ int result = 0;
+
+ dev_dbg(&priv->adapter.dev, "i801_check_post\n");
+
+ /* If the SMBus is still busy, we give up */
+ if (timeout) {
+ dev_err(&priv->adapter.dev, "Transaction timeout\n");
+ /* try to stop the current command */
+ dev_dbg(&priv->adapter.dev, "Terminating the current operation\n");
+ outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, SMBHSTCNT(priv));
+ usleep_range(1000, 2000);
+ outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), SMBHSTCNT(priv));
+
+ /* Check if it worked */
+ status = inb_p(SMBHSTSTS(priv));
+ if ((status & SMBHSTSTS_HOST_BUSY) || !(status & SMBHSTSTS_FAILED)) {
+ dev_err(&priv->adapter.dev, "Failed terminating the transaction\n");
+ }
+ outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
+ return -ETIMEDOUT;
+ }
+
+ if (status & SMBHSTSTS_FAILED) {
+ result = -EIO;
+ dev_err(&priv->adapter.dev, "Transaction failed\n");
+ }
+ if (status & SMBHSTSTS_DEV_ERR) {
+ result = -ENXIO;
+ dev_dbg(&priv->adapter.dev, "No response\n");
+ }
+ if (status & SMBHSTSTS_BUS_ERR) {
+ result = -EAGAIN;
+ dev_dbg(&priv->adapter.dev, "Lost arbitration\n");
+ }
+
+ if (result) {
+ /* Clear error flags */
+ outb_p(status & STATUS_FLAGS, SMBHSTSTS(priv));
+ status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
+ if (status) {
+ dev_warn(&priv->adapter.dev, "Failed clearing status flags at end of transaction (%02x)\n", status);
+ }
+ }
+
+ return result;
+}
+
+static int i801_transaction(struct i2c_device *priv, int xact)
+{
+ int status;
+ int result;
+ int timeout = 0;
+
+ dev_dbg(&priv->adapter.dev, "i801_transaction\n");
+
+ result = i801_check_pre(priv);
+ if (result < 0) {
+ return result;
+ }
+ /* the current contents of SMBHSTCNT can be overwritten, since PEC,
+ * INTREN, SMBSCMD are passed in xact */
+ outb_p(xact | I801_START, SMBHSTCNT(priv));
+
+ /* We will always wait for a fraction of a second! */
+ do {
+ usleep_range(250, 500);
+ status = inb_p(SMBHSTSTS(priv));
+ } while ((status & SMBHSTSTS_HOST_BUSY) && (timeout++ < MAX_RETRIES));
+
+ result = i801_check_post(priv, status, timeout > MAX_RETRIES);
+ if (result < 0) {
+ return result;
+ }
+
+ outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
+ return 0;
+}
+
+/* wait for INTR bit as advised by Intel */
+static void i801_wait_hwpec(struct i2c_device *priv)
+{
+ int timeout = 0;
+ int status;
+
+ dev_dbg(&priv->adapter.dev, "i801_wait_hwpec\n");
+
+ do {
+ usleep_range(250, 500);
+ status = inb_p(SMBHSTSTS(priv));
+ } while ((!(status & SMBHSTSTS_INTR)) && (timeout++ < MAX_RETRIES));
+
+ if (timeout > MAX_RETRIES) {
+ dev_dbg(&priv->adapter.dev, "PEC Timeout!\n");
+ }
+
+ outb_p(status, SMBHSTSTS(priv));
+}
+
+static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int hwpec)
+{
+ int i, len;
+ int status;
+
+ dev_dbg(&priv->adapter.dev, "i801_block_transaction_by_block\n");
+
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
+
+ /* Use 32-byte buffer to process this transaction */
+ if (read_write == I2C_SMBUS_WRITE) {
+ len = data->block[0];
+ outb_p(len, SMBHSTDAT0(priv));
+ for (i = 0; i < len; i++) {
+ outb_p(data->block[i+1], SMBBLKDAT(priv));
+ }
+ }
+
+ status = i801_transaction(priv, I801_BLOCK_DATA | ENABLE_INT9 | I801_PEC_EN * hwpec);
+ if (status) {
+ return status;
+ }
+
+ if (read_write == I2C_SMBUS_READ) {
+ len = inb_p(SMBHSTDAT0(priv));
+ if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
+ return -EPROTO;
+ }
+
+ data->block[0] = len;
+ for (i = 0; i < len; i++) {
+ data->block[i + 1] = inb_p(SMBBLKDAT(priv));
+ }
+ }
+ return 0;
+}
+
+static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int command, int hwpec)
+{
+ int i, len;
+ int smbcmd;
+ int status;
+ int result;
+ int timeout;
+
+ dev_dbg(&priv->adapter.dev, "i801_block_transaction_byte_by_byte\n");
+
+ result = i801_check_pre(priv);
+ if (result < 0) {
+ return result;
+ }
+
+ len = data->block[0];
+
+ if (read_write == I2C_SMBUS_WRITE) {
+ outb_p(len, SMBHSTDAT0(priv));
+ outb_p(data->block[1], SMBBLKDAT(priv));
+ }
+
+ for (i = 1; i <= len; i++) {
+ if (i == len && read_write == I2C_SMBUS_READ) {
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
+ smbcmd = I801_I2C_BLOCK_LAST;
+ } else {
+ smbcmd = I801_BLOCK_LAST;
+ }
+ } else {
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_READ) {
+ smbcmd = I801_I2C_BLOCK_DATA;
+ } else {
+ smbcmd = I801_BLOCK_DATA;
+ }
+ }
+ outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv));
+
+ if (i == 1) {
+ outb_p(inb(SMBHSTCNT(priv)) | I801_START, SMBHSTCNT(priv));
+ }
+ /* We will always wait for a fraction of a second! */
+ timeout = 0;
+ do {
+ usleep_range(250, 500);
+ status = inb_p(SMBHSTSTS(priv));
+ } while ((!(status & SMBHSTSTS_BYTE_DONE)) && (timeout++ < MAX_RETRIES));
+
+ result = i801_check_post(priv, status, timeout > MAX_RETRIES);
+ if (result < 0) {
+ return result;
+ }
+ if (i == 1 && read_write == I2C_SMBUS_READ && command != I2C_SMBUS_I2C_BLOCK_DATA) {
+ len = inb_p(SMBHSTDAT0(priv));
+ if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&priv->adapter.dev, "Illegal SMBus block read size %d\n", len);
+ /* Recover */
+ while (inb_p(SMBHSTSTS(priv)) & SMBHSTSTS_HOST_BUSY) {
+ outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
+ }
+ outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
+ return -EPROTO;
+ }
+ data->block[0] = len;
+ }
+
+ /* Retrieve/store value in SMBBLKDAT */
+ if (read_write == I2C_SMBUS_READ) {
+ data->block[i] = inb_p(SMBBLKDAT(priv));
+ }
+ if (read_write == I2C_SMBUS_WRITE && i+1 <= len) {
+ outb_p(data->block[i+1], SMBBLKDAT(priv));
+ }
+ /* signals SMBBLKDAT ready */
+ outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS(priv));
+ }
+
+ return 0;
+}
+
+static int i801_set_block_buffer_mode(struct i2c_device *priv)
+{
+ dev_dbg(&priv->adapter.dev, "i801_set_block_buffer_mode\n");
+
+ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
+ if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0) {
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Block transaction function */
+static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int command, int hwpec)
+{
+ int result = 0;
+ //unsigned char hostc;
+
+ dev_dbg(&priv->adapter.dev, "i801_block_transaction\n");
+
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* set I2C_EN bit in configuration register */
+ //TODO: Figure out the right thing to do here...
+ //pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
+ //pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc | SMBHSTCFG_I2C_EN);
+ } else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
+ dev_err(&priv->adapter.dev, "I2C block read is unsupported!\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (read_write == I2C_SMBUS_WRITE || command == I2C_SMBUS_I2C_BLOCK_DATA) {
+ if (data->block[0] < 1) {
+ data->block[0] = 1;
+ }
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
+ data->block[0] = I2C_SMBUS_BLOCK_MAX;
+ }
+ } else {
+ data->block[0] = 32; /* max for SMBus block reads */
+ }
+
+ /* Experience has shown that the block buffer can only be used for
+ SMBus (not I2C) block transactions, even though the datasheet
+ doesn't mention this limitation. */
+ if ((priv->features & FEATURE_BLOCK_BUFFER) && command != I2C_SMBUS_I2C_BLOCK_DATA && i801_set_block_buffer_mode(priv) == 0) {
+ result = i801_block_transaction_by_block(priv, data, read_write, hwpec);
+ } else {
+ result = i801_block_transaction_byte_by_byte(priv, data, read_write, command, hwpec);
+ }
+ if (result == 0 && hwpec) {
+ i801_wait_hwpec(priv);
+ }
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_WRITE) {
+ /* restore saved configuration register value */
+ //TODO: Figure out the right thing to do here...
+ //pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc);
+ }
+ return result;
+}
+
+/* Return negative errno on error. */
+static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data)
+{
+ int hwpec;
+ int block = 0;
+ int ret, xact = 0;
+ struct i2c_device *priv = i2c_get_adapdata(adap);
+
+ dev_dbg(&priv->adapter.dev, "i801_access (addr=%0d) flags=%x read_write=%x command=%x size=%x",
+ addr, flags, read_write, command, size );
+
+ hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA;
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ dev_dbg(&priv->adapter.dev, " [acc] SMBUS_QUICK\n");
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ xact = I801_QUICK;
+ break;
+ case I2C_SMBUS_BYTE:
+ dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE\n");
+
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ if (read_write == I2C_SMBUS_WRITE) {
+ outb_p(command, SMBHSTCMD(priv));
+ }
+ xact = I801_BYTE;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE_DATA\n");
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(command, SMBHSTCMD(priv));
+ if (read_write == I2C_SMBUS_WRITE) {
+ outb_p(data->byte, SMBHSTDAT0(priv));
+ }
+ xact = I801_BYTE_DATA;
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ dev_dbg(&priv->adapter.dev, " [acc] SMBUS_WORD_DATA\n");
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(command, SMBHSTCMD(priv));
+ if (read_write == I2C_SMBUS_WRITE) {
+ outb_p(data->word & 0xff, SMBHSTDAT0(priv));
+ outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
+ }
+ xact = I801_WORD_DATA;
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BLOCK_DATA\n");
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(command, SMBHSTCMD(priv));
+ block = 1;
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ dev_dbg(&priv->adapter.dev, " [acc] SMBUS_I2C_BLOCK_DATA\n");
+ /* NB: page 240 of ICH5 datasheet shows that the R/#W
+ * bit should be cleared here, even when reading */
+ outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
+ if (read_write == I2C_SMBUS_READ) {
+ /* NB: page 240 of ICH5 datasheet also shows
+ * that DATA1 is the cmd field when reading */
+ outb_p(command, SMBHSTDAT1(priv));
+ } else {
+ outb_p(command, SMBHSTCMD(priv));
+ }
+ block = 1;
+ break;
+ default:
+ dev_dbg(&priv->adapter.dev, " [acc] Unsupported transaction %d\n", size);
+ return -EOPNOTSUPP;
+ }
+
+ if (hwpec) { /* enable/disable hardware PEC */
+ dev_dbg(&priv->adapter.dev, " [acc] hwpec: yes\n");
+ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
+ } else {
+ dev_dbg(&priv->adapter.dev, " [acc] hwpec: no\n");
+ outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC), SMBAUXCTL(priv));
+ }
+
+ if (block) {
+ //ret = 0;
+ dev_dbg(&priv->adapter.dev, " [acc] block: yes\n");
+ ret = i801_block_transaction(priv, data, read_write, size, hwpec);
+ } else {
+ dev_dbg(&priv->adapter.dev, " [acc] block: no\n");
+ ret = i801_transaction(priv, xact | ENABLE_INT9);
+ }
+
+ /* Some BIOSes don't like it when PEC is enabled at reboot or resume
+ time, so we forcibly disable it after every transaction. Turn off
+ E32B for the same reason. */
+ if (hwpec || block) {
+ dev_dbg(&priv->adapter.dev, " [acc] hwpec || block\n");
+ outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ }
+ if (block) {
+ dev_dbg(&priv->adapter.dev, " [acc] block\n");
+ return ret;
+ }
+ if (ret) {
+ dev_dbg(&priv->adapter.dev, " [acc] ret %d\n", ret);
+ return ret;
+ }
+ if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK)) {
+ dev_dbg(&priv->adapter.dev, " [acc] I2C_SMBUS_WRITE || I801_QUICK -> ret 0\n");
+ return 0;
+ }
+
+ switch (xact & 0x7f) {
+ case I801_BYTE: /* Result put in SMBHSTDAT0 */
+ case I801_BYTE_DATA:
+ dev_dbg(&priv->adapter.dev, " [acc] I801_BYTE or I801_BYTE_DATA\n");
+ data->byte = inb_p(SMBHSTDAT0(priv));
+ break;
+ case I801_WORD_DATA:
+ dev_dbg(&priv->adapter.dev, " [acc] I801_WORD_DATA\n");
+ data->word = inb_p(SMBHSTDAT0(priv)) + (inb_p(SMBHSTDAT1(priv)) << 8);
+ break;
+ }
+ return 0;
+}
+
+
+
+static u32 i801_func(struct i2c_adapter *adapter)
+{
+ struct i2c_device *priv = i2c_get_adapdata(adapter);
+
+ /* original settings
+ u32 f = I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK |
+ ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
+ ((priv->features & FEATURE_I2C_BLOCK_READ) ?
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0);
+ */
+
+ // http://lxr.free-electrons.com/source/include/uapi/linux/i2c.h#L85
+
+ u32 f =
+ I2C_FUNC_I2C | /* 0x00000001 (I enabled this one) */
+ !I2C_FUNC_10BIT_ADDR | /* 0x00000002 */
+ !I2C_FUNC_PROTOCOL_MANGLING | /* 0x00000004 */
+ ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) | /* 0x00000008 */
+ !I2C_FUNC_SMBUS_BLOCK_PROC_CALL | /* 0x00008000 */
+ I2C_FUNC_SMBUS_QUICK | /* 0x00010000 */
+ !I2C_FUNC_SMBUS_READ_BYTE | /* 0x00020000 */
+ !I2C_FUNC_SMBUS_WRITE_BYTE | /* 0x00040000 */
+ !I2C_FUNC_SMBUS_READ_BYTE_DATA | /* 0x00080000 */
+ !I2C_FUNC_SMBUS_WRITE_BYTE_DATA | /* 0x00100000 */
+ !I2C_FUNC_SMBUS_READ_WORD_DATA | /* 0x00200000 */
+ !I2C_FUNC_SMBUS_WRITE_WORD_DATA | /* 0x00400000 */
+ !I2C_FUNC_SMBUS_PROC_CALL | /* 0x00800000 */
+ !I2C_FUNC_SMBUS_READ_BLOCK_DATA | /* 0x01000000 */
+ !I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | /* 0x02000000 */
+ ((priv->features & FEATURE_I2C_BLOCK_READ) ? I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | /* 0x04000000 */
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | /* 0x08000000 */
+
+ I2C_FUNC_SMBUS_BYTE | /* _READ_BYTE _WRITE_BYTE */
+ I2C_FUNC_SMBUS_BYTE_DATA | /* _READ_BYTE_DATA _WRITE_BYTE_DATA */
+ I2C_FUNC_SMBUS_WORD_DATA | /* _READ_WORD_DATA _WRITE_WORD_DATA */
+ I2C_FUNC_SMBUS_BLOCK_DATA | /* _READ_BLOCK_DATA _WRITE_BLOCK_DATA */
+ !I2C_FUNC_SMBUS_I2C_BLOCK | /* _READ_I2C_BLOCK _WRITE_I2C_BLOCK */
+ !I2C_FUNC_SMBUS_EMUL; /* _QUICK _BYTE _BYTE_DATA _WORD_DATA _PROC_CALL _WRITE_BLOCK_DATA _I2C_BLOCK _PEC */
+ return f;
+}
+
+static const struct i2c_algorithm smbus_algorithm = {
+ .smbus_xfer = i801_access,
+ .functionality = i801_func,
+};
+
+
+
+/********************************
+ *** Part 2 - Driver Handlers ***
+ ********************************/
+int pi2c_probe(struct platform_device *pldev)
+{
+ int err;
+ struct i2c_device *priv;
+ struct resource *res;
+
+ dev_dbg(&pldev->dev, "pi2c_probe(pldev = %p '%s')\n", pldev, pldev->name);
+
+ priv = kzalloc(sizeof(struct i2c_device), GFP_KERNEL);
+ if (!priv) {
+ return -ENOMEM;
+ }
+
+ i2c_set_adapdata(&priv->adapter, priv);
+ priv->adapter.owner = THIS_MODULE;
+ priv->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ priv->adapter.algo = &smbus_algorithm;
+
+ res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
+ priv->smba = (unsigned long)ioremap_nocache(res->start, resource_size(res));
+
+ priv->pldev = pldev;
+ pldev->dev.platform_data = priv;
+
+ priv->features |= FEATURE_IDF;
+ priv->features |= FEATURE_I2C_BLOCK_READ;
+ priv->features |= FEATURE_SMBUS_PEC;
+ priv->features |= FEATURE_BLOCK_BUFFER;
+
+ //init_MUTEX(&lddata->sem);
+ init_rwsem(&priv->rw_sem);
+
+ /* set up the sysfs linkage to our parent device */
+ priv->adapter.dev.parent = &pldev->dev;
+
+ /* Retry up to 3 times on lost arbitration */
+ priv->adapter.retries = 3;
+
+ //snprintf(priv->adapter.name, sizeof(priv->adapter.name), "Fake SMBus I801 adapter at %04lx", priv->smba);
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name), "Fake SMBus I801 adapter");
+
+ err = i2c_add_adapter(&priv->adapter);
+ if (err) {
+ dev_err(&priv->adapter.dev, "Failed to add SMBus adapter\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int pi2c_remove(struct platform_device *pldev)
+{
+ struct i2c_device *lddev;
+ dev_dbg(&pldev->dev, "pi2c_remove(pldev = %p '%s')\n", pldev, pldev->name);
+
+ lddev = (struct i2c_device *)pldev->dev.platform_data;
+
+ i2c_del_adapter(&lddev->adapter);
+
+ //TODO: Figure out the right thing to do here...
+ //pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+ //pci_release_region(dev, SMBBAR);
+ //pci_set_drvdata(dev, NULL);
+
+ //cdev_del(&lddev->cdev);
+ if(lddev != 0) {
+ kfree(lddev);
+ pldev->dev.platform_data = 0;
+ }
+
+ return 0;
+}
+
+struct platform_driver i2c_plat_driver_i = {
+ .probe = pi2c_probe,
+ .remove = pi2c_remove,
+ .driver = {
+ .name = KP_DRIVER_NAME_I2C,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(i2c_plat_driver_i);
diff --git a/drivers/staging/kpc2000/kpc_spi/Makefile b/drivers/staging/kpc2000/kpc_spi/Makefile
new file mode 100644
index 000000000000..3018d200484f
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_spi/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-m += kpc2000_spi.o
+kpc2000_spi-objs := spi_driver.o
diff --git a/drivers/staging/kpc2000/kpc_spi/spi_driver.c b/drivers/staging/kpc2000/kpc_spi/spi_driver.c
new file mode 100644
index 000000000000..86df16547a92
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_spi/spi_driver.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * KP2000 SPI controller driver
+ *
+ * Copyright (C) 2014-2018 Daktronics
+ * Author: Matt Sickler <matt.sickler@daktronics.com>
+ * Very loosely based on spi-omap2-mcspi.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gcd.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/partitions.h>
+
+#include "../kpc.h"
+#include "spi_parts.h"
+
+
+/***************
+ * SPI Defines *
+ ***************/
+#define KP_SPI_REG_CONFIG 0x0 /* 0x00 */
+#define KP_SPI_REG_STATUS 0x1 /* 0x08 */
+#define KP_SPI_REG_FFCTRL 0x2 /* 0x10 */
+#define KP_SPI_REG_TXDATA 0x3 /* 0x18 */
+#define KP_SPI_REG_RXDATA 0x4 /* 0x20 */
+
+#define KP_SPI_CLK 48000000
+#define KP_SPI_MAX_FIFODEPTH 64
+#define KP_SPI_MAX_FIFOWCNT 0xFFFF
+
+#define KP_SPI_REG_CONFIG_TRM_TXRX 0
+#define KP_SPI_REG_CONFIG_TRM_RX 1
+#define KP_SPI_REG_CONFIG_TRM_TX 2
+
+#define KP_SPI_REG_STATUS_RXS 0x01
+#define KP_SPI_REG_STATUS_TXS 0x02
+#define KP_SPI_REG_STATUS_EOT 0x04
+#define KP_SPI_REG_STATUS_TXFFE 0x10
+#define KP_SPI_REG_STATUS_TXFFF 0x20
+#define KP_SPI_REG_STATUS_RXFFE 0x40
+#define KP_SPI_REG_STATUS_RXFFF 0x80
+
+
+
+/******************
+ * SPI Structures *
+ ******************/
+struct kp_spi {
+ struct spi_master *master;
+ u64 __iomem *base;
+ unsigned long phys;
+ struct device *dev;
+ int fifo_depth;
+ unsigned int pin_dir:1;
+};
+
+
+struct kp_spi_controller_state {
+ void __iomem *base;
+ unsigned long phys;
+ unsigned char chip_select;
+ int word_len;
+ s64 conf_cache;
+};
+
+
+union kp_spi_config {
+ /* use this to access individual elements */
+ struct __attribute__((packed)) spi_config_bitfield {
+ unsigned int pha : 1; /* spim_clk Phase */
+ unsigned int pol : 1; /* spim_clk Polarity */
+ unsigned int epol : 1; /* spim_csx Polarity */
+ unsigned int dpe : 1; /* Transmission Enable */
+ unsigned int wl : 5; /* Word Length */
+ unsigned int : 3;
+ unsigned int trm : 2; /* TxRx Mode */
+ unsigned int cs : 4; /* Chip Select */
+ unsigned int wcnt : 7; /* Word Count */
+ unsigned int ffen : 1; /* FIFO Enable */
+ unsigned int spi_en : 1; /* SPI Enable */
+ unsigned int : 5;
+ } bitfield;
+ /* use this to grab the whole register */
+ u32 reg;
+};
+
+
+
+union kp_spi_status {
+ struct __attribute__((packed)) spi_status_bitfield {
+ unsigned int rx : 1; /* Rx Status */
+ unsigned int tx : 1; /* Tx Status */
+ unsigned int eo : 1; /* End of Transfer */
+ unsigned int : 1;
+ unsigned int txffe : 1; /* Tx FIFO Empty */
+ unsigned int txfff : 1; /* Tx FIFO Full */
+ unsigned int rxffe : 1; /* Rx FIFO Empty */
+ unsigned int rxfff : 1; /* Rx FIFO Full */
+ unsigned int : 24;
+ } bitfield;
+ u32 reg;
+};
+
+
+
+union kp_spi_ffctrl {
+ struct __attribute__((packed)) spi_ffctrl_bitfield {
+ unsigned int ffstart : 1; /* FIFO Start */
+ unsigned int : 31;
+ } bitfield;
+ u32 reg;
+};
+
+
+
+/***************
+ * SPI Helpers *
+ ***************/
+static inline int
+kp_spi_bytes_per_word(int word_len)
+{
+ if (word_len <= 8){
+ return 1;
+ }
+ else if (word_len <= 16) {
+ return 2;
+ }
+ else { /* word_len <= 32 */
+ return 4;
+ }
+}
+
+static inline u64
+kp_spi_read_reg(struct kp_spi_controller_state *cs, int idx)
+{
+ u64 __iomem *addr = cs->base;
+ u64 val;
+
+ addr += idx;
+ if ((idx == KP_SPI_REG_CONFIG) && (cs->conf_cache >= 0)){
+ return cs->conf_cache;
+ }
+ val = readq((void*)addr);
+ return val;
+}
+
+static inline void
+kp_spi_write_reg(struct kp_spi_controller_state *cs, int idx, u64 val)
+{
+ u64 __iomem *addr = cs->base;
+ addr += idx;
+ writeq(val, (void*)addr);
+ if (idx == KP_SPI_REG_CONFIG)
+ cs->conf_cache = val;
+}
+
+static int
+kp_spi_wait_for_reg_bit(struct kp_spi_controller_state *cs, int idx, unsigned long bit)
+{
+ unsigned long timeout;
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(kp_spi_read_reg(cs, idx) & bit)) {
+ if (time_after(jiffies, timeout)) {
+ if (!(kp_spi_read_reg(cs, idx) & bit)) {
+ return -ETIMEDOUT;
+ } else {
+ return 0;
+ }
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
+static unsigned
+kp_spi_txrx_pio(struct spi_device *spidev, struct spi_transfer *transfer)
+{
+ struct kp_spi_controller_state *cs = spidev->controller_state;
+ unsigned int count = transfer->len;
+ unsigned int c = count;
+
+ int i;
+ u8 *rx = transfer->rx_buf;
+ const u8 *tx = transfer->tx_buf;
+ int processed = 0;
+
+ if (tx) {
+ for (i = 0 ; i < c ; i++) {
+ char val = *tx++;
+
+ if (kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS, KP_SPI_REG_STATUS_TXS) < 0) {
+ goto out;
+ }
+
+ kp_spi_write_reg(cs, KP_SPI_REG_TXDATA, val);
+ processed++;
+ }
+ }
+ else if(rx) {
+ for (i = 0 ; i < c ; i++) {
+ char test=0;
+
+ kp_spi_write_reg(cs, KP_SPI_REG_TXDATA, 0x00);
+
+ if (kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS, KP_SPI_REG_STATUS_RXS) < 0) {
+ goto out;
+ }
+
+ test = kp_spi_read_reg(cs, KP_SPI_REG_RXDATA);
+ *rx++ = test;
+ processed++;
+ }
+ }
+
+ if (kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS, KP_SPI_REG_STATUS_EOT) < 0) {
+ //TODO: Figure out how to abort transaction?? This has never happened in practice though...
+ }
+
+ out:
+ return processed;
+}
+
+/*****************
+ * SPI Functions *
+ *****************/
+static int
+kp_spi_setup(struct spi_device *spidev)
+{
+ union kp_spi_config sc;
+ struct kp_spi *kpspi = spi_master_get_devdata(spidev->master);
+ struct kp_spi_controller_state *cs;
+
+ /* setup controller state */
+ cs = spidev->controller_state;
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if(!cs) {
+ return -ENOMEM;
+ }
+ cs->base = kpspi->base;
+ cs->phys = kpspi->phys;
+ cs->chip_select = spidev->chip_select;
+ cs->word_len = spidev->bits_per_word;
+ cs->conf_cache = -1;
+ spidev->controller_state = cs;
+ }
+
+ /* set config register */
+ sc.bitfield.wl = spidev->bits_per_word - 1;
+ sc.bitfield.cs = spidev->chip_select;
+ sc.bitfield.spi_en = 0;
+ sc.bitfield.trm = 0;
+ sc.bitfield.ffen = 0;
+ kp_spi_write_reg(spidev->controller_state, KP_SPI_REG_CONFIG, sc.reg);
+ return 0;
+}
+
+static int
+kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m)
+{
+ struct kp_spi_controller_state *cs;
+ struct spi_device *spidev;
+ struct kp_spi *kpspi;
+ struct spi_transfer *transfer;
+ union kp_spi_config sc;
+ int status = 0;
+
+ spidev = m->spi;
+ kpspi = spi_master_get_devdata(master);
+ m->actual_length = 0;
+ m->status = 0;
+
+ cs = spidev->controller_state;
+
+ /* reject invalid messages and transfers */
+ if (list_empty(&m->transfers)) {
+ return -EINVAL;
+ }
+
+ /* validate input */
+ list_for_each_entry(transfer, &m->transfers, transfer_list) {
+ const void *tx_buf = transfer->tx_buf;
+ void *rx_buf = transfer->rx_buf;
+ unsigned len = transfer->len;
+
+ if (transfer->speed_hz > KP_SPI_CLK || (len && !(rx_buf || tx_buf))) {
+ dev_dbg(kpspi->dev, " transfer: %d Hz, %d %s%s, %d bpw\n",
+ transfer->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ transfer->bits_per_word);
+ dev_dbg(kpspi->dev, " transfer -EINVAL\n");
+ return -EINVAL;
+ }
+ if (transfer->speed_hz && (transfer->speed_hz < (KP_SPI_CLK >> 15))) {
+ dev_dbg(kpspi->dev, "speed_hz %d below minimum %d Hz\n",
+ transfer->speed_hz,
+ KP_SPI_CLK >> 15);
+ dev_dbg(kpspi->dev, " speed_hz -EINVAL\n");
+ return -EINVAL;
+ }
+ }
+
+ /* assert chip select to start the sequence*/
+ sc.reg = kp_spi_read_reg(cs, KP_SPI_REG_CONFIG);
+ sc.bitfield.spi_en = 1;
+ kp_spi_write_reg(cs, KP_SPI_REG_CONFIG, sc.reg);
+
+ /* work */
+ if (kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS, KP_SPI_REG_STATUS_EOT) < 0) {
+ dev_info(kpspi->dev, "EOT timed out\n");
+ goto out;
+ }
+
+ /* do the transfers for this message */
+ list_for_each_entry(transfer, &m->transfers, transfer_list) {
+ if (transfer->tx_buf == NULL && transfer->rx_buf == NULL && transfer->len) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* transfer */
+ if (transfer->len) {
+ unsigned int word_len = spidev->bits_per_word;
+ unsigned count;
+
+ /* set up the transfer... */
+ sc.reg = kp_spi_read_reg(cs, KP_SPI_REG_CONFIG);
+
+ /* ...direction */
+ if (transfer->tx_buf) {
+ sc.bitfield.trm = KP_SPI_REG_CONFIG_TRM_TX;
+ }
+ else if (transfer->rx_buf) {
+ sc.bitfield.trm = KP_SPI_REG_CONFIG_TRM_RX;
+ }
+
+ /* ...word length */
+ if (transfer->bits_per_word) {
+ word_len = transfer->bits_per_word;
+ }
+ cs->word_len = word_len;
+ sc.bitfield.wl = word_len-1;
+
+ /* ...chip select */
+ sc.bitfield.cs = spidev->chip_select;
+
+ /* ...and write the new settings */
+ kp_spi_write_reg(cs, KP_SPI_REG_CONFIG, sc.reg);
+
+ /* do the transfer */
+ count = kp_spi_txrx_pio(spidev, transfer);
+ m->actual_length += count;
+
+ if (count != transfer->len) {
+ status = -EIO;
+ break;
+ }
+ }
+
+ if (transfer->delay_usecs) {
+ udelay(transfer->delay_usecs);
+ }
+ }
+
+ /* de-assert chip select to end the sequence */
+ sc.reg = kp_spi_read_reg(cs, KP_SPI_REG_CONFIG);
+ sc.bitfield.spi_en = 0;
+ kp_spi_write_reg(cs, KP_SPI_REG_CONFIG, sc.reg);
+
+ out:
+ /* done work */
+ spi_finalize_current_message(master);
+ return 0;
+}
+
+static void
+kp_spi_cleanup(struct spi_device *spidev)
+{
+ struct kp_spi_controller_state *cs = spidev->controller_state;
+ if (cs) {
+ kfree(cs);
+ }
+}
+
+
+
+/******************
+ * Probe / Remove *
+ ******************/
+static int
+kp_spi_probe(struct platform_device *pldev)
+{
+ struct kpc_core_device_platdata *drvdata;
+ struct spi_master *master;
+ struct kp_spi *kpspi;
+ struct resource *r;
+ int status = 0;
+ int i;
+
+ drvdata = pldev->dev.platform_data;
+ if (!drvdata){
+ dev_err(&pldev->dev, "kp_spi_probe: platform_data is NULL!\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pldev->dev, sizeof(struct kp_spi));
+ if (master == NULL) {
+ dev_err(&pldev->dev, "kp_spi_probe: master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* set up the spi functions */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = (unsigned int)SPI_BPW_RANGE_MASK(4, 32);
+ master->setup = kp_spi_setup;
+ master->transfer_one_message = kp_spi_transfer_one_message;
+ master->cleanup = kp_spi_cleanup;
+
+ platform_set_drvdata(pldev, master);
+
+ kpspi = spi_master_get_devdata(master);
+ kpspi->master = master;
+ kpspi->dev = &pldev->dev;
+
+ master->num_chipselect = 4;
+ if (pldev->id != -1) {
+ master->bus_num = pldev->id;
+ }
+ kpspi->pin_dir = 0;
+
+ r = platform_get_resource(pldev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pldev->dev, "kp_spi_probe: Unable to get platform resources\n");
+ status = -ENODEV;
+ goto free_master;
+ }
+
+ kpspi->phys = (unsigned long)ioremap_nocache(r->start, resource_size(r));
+ kpspi->base = (u64 __iomem *)kpspi->phys;
+
+ status = spi_register_master(master);
+ if (status < 0) {
+ dev_err(&pldev->dev, "Unable to register SPI device\n");
+ goto free_master;
+ }
+
+ /* register the slave boards */
+ #define NEW_SPI_DEVICE_FROM_BOARD_INFO_TABLE(table) \
+ for (i = 0 ; i < ARRAY_SIZE(table) ; i++) { \
+ spi_new_device(master, &(table[i])); \
+ }
+
+ switch ((drvdata->card_id & 0xFFFF0000) >> 16){
+ case PCI_DEVICE_ID_DAKTRONICS_KADOKA_P2KR0:
+ NEW_SPI_DEVICE_FROM_BOARD_INFO_TABLE(p2kr0_board_info);
+ break;
+ default:
+ dev_err(&pldev->dev, "Unknown hardware, cant know what partition table to use!\n");
+ goto free_master;
+ break;
+ }
+
+ return status;
+
+ free_master:
+ spi_master_put(master);
+ return status;
+}
+
+static int
+kp_spi_remove(struct platform_device *pldev)
+{
+ struct spi_master * master = platform_get_drvdata(pldev);
+ spi_unregister_master(master);
+ return 0;
+}
+
+
+static struct platform_driver kp_spi_driver = {
+ .driver = {
+ .name = KP_DRIVER_NAME_SPI,
+ },
+ .probe = kp_spi_probe,
+ .remove = kp_spi_remove,
+};
+
+module_platform_driver(kp_spi_driver);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:kp_spi");
diff --git a/drivers/staging/kpc2000/kpc_spi/spi_parts.h b/drivers/staging/kpc2000/kpc_spi/spi_parts.h
new file mode 100644
index 000000000000..33e62acc5e08
--- /dev/null
+++ b/drivers/staging/kpc2000/kpc_spi/spi_parts.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __KPC_SPI_SPI_PARTS_H__
+#define __KPC_SPI_SPI_PARTS_H__
+
+static struct mtd_partition p2kr0_spi0_parts[] = {
+ { .name = "SLOT_0", .size = 7798784, .offset = 0, },
+ { .name = "SLOT_1", .size = 7798784, .offset = MTDPART_OFS_NXTBLK },
+ { .name = "SLOT_2", .size = 7798784, .offset = MTDPART_OFS_NXTBLK },
+ { .name = "SLOT_3", .size = 7798784, .offset = MTDPART_OFS_NXTBLK },
+ { .name = "CS0_EXTRA", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_NXTBLK }
+};
+static struct mtd_partition p2kr0_spi1_parts[] = {
+ { .name = "SLOT_4", .size = 7798784, .offset = 0, },
+ { .name = "SLOT_5", .size = 7798784, .offset = MTDPART_OFS_NXTBLK },
+ { .name = "SLOT_6", .size = 7798784, .offset = MTDPART_OFS_NXTBLK },
+ { .name = "SLOT_7", .size = 7798784, .offset = MTDPART_OFS_NXTBLK },
+ { .name = "CS1_EXTRA", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_NXTBLK }
+};
+
+static struct flash_platform_data p2kr0_spi0_pdata = {
+ .name = "SPI0",
+ .nr_parts = ARRAY_SIZE(p2kr0_spi0_parts),
+ .parts = p2kr0_spi0_parts,
+};
+static struct flash_platform_data p2kr0_spi1_pdata = {
+ .name = "SPI1",
+ .nr_parts = ARRAY_SIZE(p2kr0_spi1_parts),
+ .parts = p2kr0_spi1_parts,
+};
+
+static struct spi_board_info p2kr0_board_info[] = {
+ {
+ .modalias = "n25q256a11",
+ .bus_num = 1,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ .platform_data = &p2kr0_spi0_pdata
+ },
+ {
+ .modalias = "n25q256a11",
+ .bus_num = 1,
+ .chip_select = 1,
+ .mode = SPI_MODE_0,
+ .platform_data = &p2kr0_spi1_pdata
+ },
+};
+
+#endif
diff --git a/drivers/staging/ks7010/Kconfig b/drivers/staging/ks7010/Kconfig
index 0b9217674d5b..0987fdc2f70d 100644
--- a/drivers/staging/ks7010/Kconfig
+++ b/drivers/staging/ks7010/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config KS7010
tristate "KeyStream KS7010 SDIO support"
depends on MMC && WIRELESS
diff --git a/drivers/staging/ks7010/Makefile b/drivers/staging/ks7010/Makefile
index 412e2105a3a5..009851a32310 100644
--- a/drivers/staging/ks7010/Makefile
+++ b/drivers/staging/ks7010/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_KS7010) += ks7010.o
ks7010-y := ks_hostif.o ks_wlan_net.o ks7010_sdio.o
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 06ebea0be118..e089366ed02a 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -219,7 +219,6 @@ michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result)
}
desc->tfm = tfm;
- desc->flags = 0;
ret = crypto_shash_init(desc);
if (ret < 0)
@@ -362,6 +361,8 @@ int hostif_data_indication_wpa(struct ks_wlan_private *priv,
(auth_type == TYPE_GMK2 &&
priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP)) &&
key->key_len) {
+ int ret;
+
netdev_dbg(priv->net_dev, "TKIP: protocol=%04X: size=%u\n",
eth_proto, priv->rx_size);
/* MIC save */
@@ -369,15 +370,11 @@ int hostif_data_indication_wpa(struct ks_wlan_private *priv,
(priv->rxp) + ((priv->rx_size) - sizeof(recv_mic)),
sizeof(recv_mic));
priv->rx_size = priv->rx_size - sizeof(recv_mic);
- if (auth_type > 0 && auth_type < 4) { /* auth_type check */
- int ret;
-
- ret = michael_mic(key->rx_mic_key,
- priv->rxp, priv->rx_size,
- 0, mic);
- if (ret < 0)
- return ret;
- }
+
+ ret = michael_mic(key->rx_mic_key, priv->rxp, priv->rx_size,
+ 0, mic);
+ if (ret < 0)
+ return ret;
if (memcmp(mic, recv_mic, sizeof(mic)) != 0) {
now = jiffies;
mic_failure = &priv->wpa.mic_failure;
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 1da5c20d65c0..f77f5eee7fc2 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -1,7 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig STAGING_MEDIA
bool "Media staging drivers"
default n
- ---help---
+ help
This option allows you to select a number of media drivers that
don't have the "normal" Linux kernel quality level.
Most of them don't follow properly the V4L, DVB and/or RC API's,
@@ -33,8 +34,6 @@ source "drivers/staging/media/sunxi/Kconfig"
source "drivers/staging/media/tegra-vde/Kconfig"
-source "drivers/staging/media/zoran/Kconfig"
-
source "drivers/staging/media/ipu3/Kconfig"
source "drivers/staging/media/soc_camera/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 0355e3030504..99218bfc997f 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -5,7 +5,6 @@ obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
-obj-$(CONFIG_VIDEO_ZORAN) += zoran/
obj-$(CONFIG_VIDEO_ROCKCHIP_VPU) += rockchip/vpu/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
diff --git a/drivers/staging/media/bcm2048/Kconfig b/drivers/staging/media/bcm2048/Kconfig
index a9fc6e186494..ab2d50cac140 100644
--- a/drivers/staging/media/bcm2048/Kconfig
+++ b/drivers/staging/media/bcm2048/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Multimedia Video device configuration
#
@@ -5,7 +6,7 @@
config I2C_BCM2048
tristate "Broadcom BCM2048 FM Radio Receiver support"
depends on I2C && VIDEO_V4L2 && RADIO_ADAPTERS
- ---help---
+ help
Say Y here if you want support to BCM2048 FM Radio Receiver.
This device driver supports only i2c bus.
diff --git a/drivers/staging/media/bcm2048/Makefile b/drivers/staging/media/bcm2048/Makefile
index b4f5663d1408..f42056848dc6 100644
--- a/drivers/staging/media/bcm2048/Makefile
+++ b/drivers/staging/media/bcm2048/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_I2C_BCM2048) += radio-bcm2048.o
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index d9b02ff66259..09903ffb13ba 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2404,7 +2404,7 @@ static int bcm2048_vidioc_g_audio(struct file *file, void *priv,
if (audio->index > 1)
return -EINVAL;
- strncpy(audio->name, "Radio", 32);
+ strscpy(audio->name, "Radio", sizeof(audio->name));
audio->capability = V4L2_AUDCAP_STEREO;
return 0;
@@ -2432,7 +2432,7 @@ static int bcm2048_vidioc_g_tuner(struct file *file, void *priv,
if (tuner->index > 0)
return -EINVAL;
- strncpy(tuner->name, "FM Receiver", 32);
+ strscpy(tuner->name, "FM Receiver", sizeof(tuner->name));
tuner->type = V4L2_TUNER_RADIO;
tuner->rangelow =
dev_to_v4l2(bcm2048_get_region_bottom_frequency(bdev));
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
index aea449a8dbf8..94bf6746c03f 100644
--- a/drivers/staging/media/davinci_vpfe/Kconfig
+++ b/drivers/staging/media/davinci_vpfe/Kconfig
@@ -1,7 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
config VIDEO_DM365_VPFE
tristate "DM365 VPFE Media Controller Capture Driver"
depends on VIDEO_V4L2
- depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || COMPILE_TEST
+ depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || (COMPILE_TEST && !ARCH_OMAP1)
depends on VIDEO_V4L2_SUBDEV_API
depends on VIDEO_DAVINCI_VPBE_DISPLAY
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile
index 9268e507f791..0ae8c5014f74 100644
--- a/drivers/staging/media/davinci_vpfe/Makefile
+++ b/drivers/staging/media/davinci_vpfe/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci-vfpe.o
davinci-vfpe-objs := \
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index 3d910b85905c..30e2edc0cec5 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1264,8 +1264,7 @@ static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
module_if = &ipipe_modules[i];
from = *(void **)((void *)cfg + module_if->config_offset);
- params = kmalloc(sizeof(struct ipipe_module_params),
- GFP_KERNEL);
+ params = kmalloc(sizeof(*params), GFP_KERNEL);
to = (void *)params + module_if->param_offset;
size = module_if->param_size;
@@ -1306,8 +1305,7 @@ static int ipipe_g_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
module_if = &ipipe_modules[i];
to = *(void **)((void *)cfg + module_if->config_offset);
- params = kmalloc(sizeof(struct ipipe_module_params),
- GFP_KERNEL);
+ params = kmalloc(sizeof(*params), GFP_KERNEL);
from = (void *)params + module_if->param_offset;
size = module_if->param_size;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
index 174334b53f96..866ae12aeb07 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
@@ -27,8 +27,6 @@
#include "davinci_vpfe_user.h"
#include "vpfe_video.h"
-#define CEIL(a, b) (((a) + (b-1)) / (b))
-
enum ipipe_noise_filter {
IPIPE_D2F_1ST = 0,
IPIPE_D2F_2ND = 1,
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index 565a3dc5bed1..110473c30577 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -825,8 +825,10 @@ ipipe_set_lum_adj_regs(void __iomem *base_addr, struct ipipe_lum_adj *lum_adj)
regw_ip(base_addr, val, YUV_ADJ);
}
-#define IPIPE_S12Q8(decimal, integer) \
- (((decimal & 0xff) | ((integer & 0xf) << 8)))
+inline u32 ipipe_s12q8(unsigned short decimal, short integer)
+{
+ return (decimal & 0xff) | ((integer & 0xf) << 8);
+}
void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
struct vpfe_ipipe_rgb2yuv *yuv)
@@ -835,23 +837,23 @@ void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
/* S10Q8 */
ipipe_clock_enable(base_addr);
- val = IPIPE_S12Q8(yuv->coef_ry.decimal, yuv->coef_ry.integer);
+ val = ipipe_s12q8(yuv->coef_ry.decimal, yuv->coef_ry.integer);
regw_ip(base_addr, val, YUV_MUL_RY);
- val = IPIPE_S12Q8(yuv->coef_gy.decimal, yuv->coef_gy.integer);
+ val = ipipe_s12q8(yuv->coef_gy.decimal, yuv->coef_gy.integer);
regw_ip(base_addr, val, YUV_MUL_GY);
- val = IPIPE_S12Q8(yuv->coef_by.decimal, yuv->coef_by.integer);
+ val = ipipe_s12q8(yuv->coef_by.decimal, yuv->coef_by.integer);
regw_ip(base_addr, val, YUV_MUL_BY);
- val = IPIPE_S12Q8(yuv->coef_rcb.decimal, yuv->coef_rcb.integer);
+ val = ipipe_s12q8(yuv->coef_rcb.decimal, yuv->coef_rcb.integer);
regw_ip(base_addr, val, YUV_MUL_RCB);
- val = IPIPE_S12Q8(yuv->coef_gcb.decimal, yuv->coef_gcb.integer);
+ val = ipipe_s12q8(yuv->coef_gcb.decimal, yuv->coef_gcb.integer);
regw_ip(base_addr, val, YUV_MUL_GCB);
- val = IPIPE_S12Q8(yuv->coef_bcb.decimal, yuv->coef_bcb.integer);
+ val = ipipe_s12q8(yuv->coef_bcb.decimal, yuv->coef_bcb.integer);
regw_ip(base_addr, val, YUV_MUL_BCB);
- val = IPIPE_S12Q8(yuv->coef_rcr.decimal, yuv->coef_rcr.integer);
+ val = ipipe_s12q8(yuv->coef_rcr.decimal, yuv->coef_rcr.integer);
regw_ip(base_addr, val, YUV_MUL_RCR);
- val = IPIPE_S12Q8(yuv->coef_gcr.decimal, yuv->coef_gcr.integer);
+ val = ipipe_s12q8(yuv->coef_gcr.decimal, yuv->coef_gcr.integer);
regw_ip(base_addr, val, YUV_MUL_GCR);
- val = IPIPE_S12Q8(yuv->coef_bcr.decimal, yuv->coef_bcr.integer);
+ val = ipipe_s12q8(yuv->coef_bcr.decimal, yuv->coef_bcr.integer);
regw_ip(base_addr, val, YUV_MUL_BCR);
regw_ip(base_addr, yuv->out_ofst_y & RGB2YCBCR_OFST_MASK, YUV_OFT_Y);
regw_ip(base_addr, yuv->out_ofst_cb & RGB2YCBCR_OFST_MASK, YUV_OFT_CB);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index 22fcdbcde96b..51d4cd1bdb97 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -104,7 +104,7 @@ ipipeif_get_cfg_src1(struct vpfe_ipipeif_device *ipipeif)
informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
if (ipipeif->input == IPIPEIF_INPUT_MEMORY &&
- (informat->code == MEDIA_BUS_FMT_Y8_1X8 ||
+ (informat->code == MEDIA_BUS_FMT_Y8_1X8 ||
informat->code == MEDIA_BUS_FMT_UV8_1X8))
return IPIPEIF_CCDC;
@@ -189,7 +189,7 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
enum ipipeif_input_source ipipeif_source;
u32 isif_port_if;
void __iomem *ipipeif_base_addr;
- unsigned int val;
+ unsigned long val;
int data_shift;
int pack_mode;
int source1;
@@ -296,14 +296,14 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
case MEDIA_BUS_FMT_YUYV8_1X16:
case MEDIA_BUS_FMT_UYVY8_2X8:
case MEDIA_BUS_FMT_Y8_1X8:
- RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
- SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ clear_bit(IPIPEIF_CFG2_YUV8_SHIFT, &val);
+ set_bit(IPIPEIF_CFG2_YUV16_SHIFT, &val);
ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
break;
default:
- RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
- RESETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ clear_bit(IPIPEIF_CFG2_YUV8_SHIFT, &val);
+ clear_bit(IPIPEIF_CFG2_YUV16_SHIFT, &val);
ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
break;
}
@@ -344,23 +344,23 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
switch (isif_port_if) {
case MEDIA_BUS_FMT_YUYV8_1X16:
case MEDIA_BUS_FMT_YUYV10_1X20:
- RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
- SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ clear_bit(IPIPEIF_CFG2_YUV8_SHIFT, &val);
+ set_bit(IPIPEIF_CFG2_YUV16_SHIFT, &val);
break;
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
case MEDIA_BUS_FMT_Y8_1X8:
case MEDIA_BUS_FMT_YUYV10_2X10:
- SETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
- SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ set_bit(IPIPEIF_CFG2_YUV8_SHIFT, &val);
+ set_bit(IPIPEIF_CFG2_YUV16_SHIFT, &val);
val |= IPIPEIF_CBCR_Y << IPIPEIF_CFG2_YUV8P_SHIFT;
break;
default:
/* Bayer */
ipipeif_write(params.if_5_1.clip, ipipeif_base_addr,
- IPIPEIF_OCLIP);
+ IPIPEIF_OCLIP);
}
ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
break;
@@ -389,7 +389,7 @@ ipipeif_set_config(struct v4l2_subdev *sd, struct ipipeif_params *config)
ipipeif->config.rsz = config->rsz;
ipipeif->config.decimation = config->decimation;
if (ipipeif->config.decimation &&
- (ipipeif->config.rsz < IPIPEIF_RSZ_MIN ||
+ (ipipeif->config.rsz < IPIPEIF_RSZ_MIN ||
ipipeif->config.rsz > IPIPEIF_RSZ_MAX)) {
dev_err(dev, "rsz range is %d to %d\n",
IPIPEIF_RSZ_MIN, IPIPEIF_RSZ_MAX);
@@ -580,7 +580,7 @@ static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
*/
static int
ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_format *fmt)
{
struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -679,8 +679,8 @@ ipipeif_enum_frame_size(struct v4l2_subdev *sd,
*/
static struct v4l2_mbus_framefmt *
__ipipeif_get_format(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- enum v4l2_subdev_format_whence which)
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&ipipeif->subdev, cfg, pad);
@@ -697,13 +697,13 @@ __ipipeif_get_format(struct vpfe_ipipeif_device *ipipeif,
*/
static int
ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_format *fmt)
{
struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
- if (format == NULL)
+ if (!format)
return -EINVAL;
ipipeif_try_format(ipipeif, cfg, fmt->pad, &fmt->format, fmt->which);
@@ -879,7 +879,7 @@ static const struct vpfe_video_operations video_in_ops = {
static int
ipipeif_link_setup(struct media_entity *entity, const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
+ const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -920,8 +920,7 @@ ipipeif_link_setup(struct media_entity *entity, const struct media_pad *local,
if (remote->entity == &vpfe->vpfe_ipipe.subdev.entity)
/* connencted to ipipe */
ipipeif->output = IPIPEIF_OUTPUT_IPIPE;
- else if (remote->entity == &vpfe->vpfe_resizer.
- crop_resizer.subdev.entity)
+ else if (remote->entity == &vpfe->vpfe_resizer.crop_resizer.subdev.entity)
/* connected to resizer */
ipipeif->output = IPIPEIF_OUTPUT_RESIZER;
else
@@ -976,7 +975,7 @@ vpfe_ipipeif_register_entities(struct vpfe_ipipeif_device *ipipeif,
flags = 0;
ret = media_create_pad_link(&ipipeif->video_in.video_dev.entity, 0,
- &ipipeif->subdev.entity, 0, flags);
+ &ipipeif->subdev.entity, 0, flags);
if (ret < 0)
goto fail;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
index 4685d64016de..4d126fc871f3 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
@@ -166,8 +166,6 @@ struct vpfe_ipipeif_device {
#define IPIPEIF_RSZ_MIN 16
#define IPIPEIF_RSZ_MAX 112
#define IPIPEIF_RSZ_CONST 16
-#define SETBIT(reg, bit) (reg = ((reg) | ((0x00000001)<<(bit))))
-#define RESETBIT(reg, bit) (reg = ((reg) & (~(0x00000001<<(bit)))))
#define IPIPEIF_ADOFS_LSB_MASK 0x1ff
#define IPIPEIF_ADOFS_LSB_SHIFT 5
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 0a6d038fcec9..46fd8184fc77 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -433,9 +433,9 @@ static int isif_get_params(struct v4l2_subdev *sd, void *params)
return 0;
}
-static int isif_validate_df_csc_params(struct vpfe_isif_df_csc *df_csc)
+static int isif_validate_df_csc_params(const struct vpfe_isif_df_csc *df_csc)
{
- struct vpfe_isif_color_space_conv *csc;
+ const struct vpfe_isif_color_space_conv *csc;
int err = -EINVAL;
int i;
@@ -481,7 +481,7 @@ static int isif_validate_df_csc_params(struct vpfe_isif_df_csc *df_csc)
#define DM365_ISIF_MAX_DFCMEM0 0x1fff
#define DM365_ISIF_MAX_DFCMEM1 0x1fff
-static int isif_validate_dfc_params(struct vpfe_isif_dfc *dfc)
+static int isif_validate_dfc_params(const struct vpfe_isif_dfc *dfc)
{
int err = -EINVAL;
int i;
@@ -532,7 +532,7 @@ static int isif_validate_dfc_params(struct vpfe_isif_dfc *dfc)
#define DM365_ISIF_MAX_CLVSV 0x1fff
#define DM365_ISIF_MAX_HEIGHT_BLACK_REGION 0x1fff
-static int isif_validate_bclamp_params(struct vpfe_isif_black_clamp *bclamp)
+static int isif_validate_bclamp_params(const struct vpfe_isif_black_clamp *bclamp)
{
int err = -EINVAL;
@@ -580,7 +580,7 @@ static int isif_validate_bclamp_params(struct vpfe_isif_black_clamp *bclamp)
}
static int
-isif_validate_raw_params(struct vpfe_isif_raw_config *params)
+isif_validate_raw_params(const struct vpfe_isif_raw_config *params)
{
int ret;
@@ -593,20 +593,18 @@ isif_validate_raw_params(struct vpfe_isif_raw_config *params)
return isif_validate_bclamp_params(&params->bclamp);
}
-static int isif_set_params(struct v4l2_subdev *sd, void *params)
+static int isif_set_params(struct v4l2_subdev *sd, const struct vpfe_isif_raw_config *params)
{
struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct vpfe_isif_raw_config isif_raw_params;
int ret = -EINVAL;
/* only raw module parameters can be set through the IOCTL */
if (isif->formats[ISIF_PAD_SINK].code != MEDIA_BUS_FMT_SGRBG12_1X12)
return ret;
- memcpy(&isif_raw_params, params, sizeof(isif_raw_params));
- if (!isif_validate_raw_params(&isif_raw_params)) {
- memcpy(&isif->isif_cfg.bayer.config_params, &isif_raw_params,
- sizeof(isif_raw_params));
+ if (!isif_validate_raw_params(params)) {
+ memcpy(&isif->isif_cfg.bayer.config_params, params,
+ sizeof(*params));
ret = 0;
}
return ret;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 9d726298b406..7adf1fae43f6 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -386,7 +386,7 @@ resizer_calculate_down_scale_f_div_param(struct device *dev,
}
o = 10 + (two_power << 2);
if (((input_width << 7) / rsz) % 2)
- o += (((CEIL(rsz, 1024)) << 1) << n);
+ o += ((DIV_ROUND_UP(rsz, 1024) << 1) << n);
h2 = output_width - h1;
/* phi */
val = (h1 * rsz) - (((upper_h1 - (o - 10)) / two_power) << 8);
@@ -630,7 +630,7 @@ resizer_calculate_normal_f_div_param(struct device *dev, int input_width,
val /= rsz << 1;
val <<= 1;
val += 2;
- o += ((CEIL(rsz, 1024)) << 1);
+ o += (DIV_ROUND_UP(rsz, 1024) << 1);
h1 = val;
}
h2 = output_width - h1;
@@ -1881,7 +1881,7 @@ int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
struct v4l2_subdev *sd = &vpfe_rsz->crop_resizer.subdev;
struct media_pad *pads = &vpfe_rsz->crop_resizer.pads[0];
struct media_entity *me = &sd->entity;
- static resource_size_t res_len;
+ resource_size_t res_len;
struct resource *res;
int ret;
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
index 36b276ea2ecc..4c726345dc25 100644
--- a/drivers/staging/media/imx/Kconfig
+++ b/drivers/staging/media/imx/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VIDEO_IMX_MEDIA
tristate "i.MX5/6 V4L2 media core driver"
depends on ARCH_MXC || COMPILE_TEST
@@ -6,7 +7,7 @@ config VIDEO_IMX_MEDIA
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
- ---help---
+ help
Say yes here to enable support for video4linux media controller
driver for the i.MX5/6 SOC.
@@ -17,7 +18,7 @@ config VIDEO_IMX_CSI
tristate "i.MX5/6 Camera Sensor Interface driver"
depends on VIDEO_IMX_MEDIA && VIDEO_DEV && I2C
default y
- ---help---
+ help
A video4linux camera sensor interface driver for i.MX5/6.
config VIDEO_IMX7_CSI
diff --git a/drivers/staging/media/imx/imx-ic-common.c b/drivers/staging/media/imx/imx-ic-common.c
index 765919487a73..18cd4cb92431 100644
--- a/drivers/staging/media/imx/imx-ic-common.c
+++ b/drivers/staging/media/imx/imx-ic-common.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Image Converter Subdev for Freescale i.MX5/6 SOC
*
* Copyright (c) 2014-2016 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -26,7 +22,7 @@ static struct imx_ic_ops *ic_ops[IC_NUM_OPS] = {
static int imx_ic_probe(struct platform_device *pdev)
{
- struct imx_media_internal_sd_platformdata *pdata;
+ struct imx_media_ipu_internal_sd_pdata *pdata;
struct imx_ic_priv *priv;
int ret;
@@ -63,7 +59,7 @@ static int imx_ic_probe(struct platform_device *pdev)
priv->sd.owner = THIS_MODULE;
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
priv->sd.grp_id = pdata->grp_id;
- strncpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
+ strscpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
ret = ic_ops[priv->task_id]->init(priv);
if (ret)
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 3d43cdcb4bb9..10ffe00f1a54 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
*
@@ -6,11 +7,6 @@
* for resizing, colorspace conversion, and rotation.
*
* Copyright (c) 2012-2017 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 5c8e6ad8c025..1ba4a5154fb5 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
*
@@ -6,11 +7,6 @@
* for resizing, colorspace conversion, and rotation.
*
* Copyright (c) 2012-2017 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/staging/media/imx/imx-ic.h b/drivers/staging/media/imx/imx-ic.h
index 6b2267bda8ab..0dbcf2a7ab5f 100644
--- a/drivers/staging/media/imx/imx-ic.h
+++ b/drivers/staging/media/imx/imx-ic.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* V4L2 Image Converter Subdev for Freescale i.MX5/6 SOC
*
* Copyright (c) 2016 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _IMX_IC_H
#define _IMX_IC_H
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index 9703c85b19c4..b7ce9d439279 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Video Capture Subdev for Freescale i.MX5/6 SOC
*
* Copyright (c) 2012-2016 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/fs.h>
@@ -556,6 +552,7 @@ static void capture_stop_streaming(struct vb2_queue *vq)
{
struct capture_priv *priv = vb2_get_drv_priv(vq);
struct imx_media_buffer *frame;
+ struct imx_media_buffer *tmp;
unsigned long flags;
int ret;
@@ -570,9 +567,7 @@ static void capture_stop_streaming(struct vb2_queue *vq)
/* release all active buffers */
spin_lock_irqsave(&priv->q_lock, flags);
- while (!list_empty(&priv->ready_q)) {
- frame = list_entry(priv->ready_q.next,
- struct imx_media_buffer, list);
+ list_for_each_entry_safe(frame, tmp, &priv->ready_q, list) {
list_del(&frame->list);
vb2_buffer_done(&frame->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
}
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 3b7517348666..28fe66052cc7 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Capture CSI Subdev for Freescale i.MX5/6 SOC
*
* Copyright (c) 2014-2017 Mentor Graphics Inc.
* Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/gcd.h>
@@ -154,9 +150,10 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
/*
* Parses the fwnode endpoint from the source pad of the entity
* connected to this CSI. This will either be the entity directly
- * upstream from the CSI-2 receiver, or directly upstream from the
- * video mux. The endpoint is needed to determine the bus type and
- * bus config coming into the CSI.
+ * upstream from the CSI-2 receiver, directly upstream from the
+ * video mux, or directly upstream from the CSI itself. The endpoint
+ * is needed to determine the bus type and bus config coming into
+ * the CSI.
*/
static int csi_get_upstream_endpoint(struct csi_priv *priv,
struct v4l2_fwnode_endpoint *ep)
@@ -172,7 +169,8 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
if (!priv->src_sd)
return -EPIPE;
- src = &priv->src_sd->entity;
+ sd = priv->src_sd;
+ src = &sd->entity;
if (src->function == MEDIA_ENT_F_VID_MUX) {
/*
@@ -186,6 +184,14 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
src = &sd->entity;
}
+ /*
+ * If the source is neither the video mux nor the CSI-2 receiver,
+ * get the source pad directly upstream from CSI itself.
+ */
+ if (src->function != MEDIA_ENT_F_VID_MUX &&
+ sd->grp_id != IMX_MEDIA_GRP_ID_CSI2)
+ src = &priv->sd.entity;
+
/* get source pad of entity directly upstream from src */
pad = imx_media_find_upstream_pad(priv->md, src, 0);
if (IS_ERR(pad))
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
index 910594125889..6cd93419b81d 100644
--- a/drivers/staging/media/imx/imx-media-dev-common.c
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -30,7 +30,7 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev)
dev_set_drvdata(dev, imxmd);
- strlcpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
+ strscpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
imxmd->md.ops = &imx_media_md_ops;
imxmd->md.dev = dev;
@@ -38,7 +38,7 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev)
imxmd->v4l2_dev.mdev = &imxmd->md;
imxmd->v4l2_dev.notify = imx_media_notify;
- strlcpy(imxmd->v4l2_dev.name, "imx-media",
+ strscpy(imxmd->v4l2_dev.name, "imx-media",
sizeof(imxmd->v4l2_dev.name));
media_device_init(&imxmd->md);
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 28a3d23aad5b..6be95584006d 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
*
* Copyright (c) 2016 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/fs.h>
@@ -50,12 +46,14 @@ int imx_media_add_async_subdev(struct imx_media_dev *imxmd,
int ret;
if (fwnode) {
- asd = v4l2_async_notifier_add_fwnode_subdev(
- &imxmd->notifier, fwnode, sizeof(*imxasd));
+ asd = v4l2_async_notifier_add_fwnode_subdev(&imxmd->notifier,
+ fwnode,
+ sizeof(*imxasd));
} else {
devname = dev_name(&pdev->dev);
- asd = v4l2_async_notifier_add_devname_subdev(
- &imxmd->notifier, devname, sizeof(*imxasd));
+ asd = v4l2_async_notifier_add_devname_subdev(&imxmd->notifier,
+ devname,
+ sizeof(*imxasd));
}
if (IS_ERR(asd)) {
@@ -266,10 +264,9 @@ static int imx_media_alloc_pad_vdev_lists(struct imx_media_dev *imxmd)
list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
entity = &sd->entity;
- vdev_lists = devm_kcalloc(
- imxmd->md.dev,
- entity->num_pads, sizeof(*vdev_lists),
- GFP_KERNEL);
+ vdev_lists = devm_kcalloc(imxmd->md.dev,
+ entity->num_pads, sizeof(*vdev_lists),
+ GFP_KERNEL);
if (!vdev_lists)
return -ENOMEM;
@@ -477,13 +474,6 @@ static int imx_media_probe(struct platform_device *pdev)
goto cleanup;
}
- ret = imx_media_add_internal_subdevs(imxmd);
- if (ret) {
- v4l2_err(&imxmd->v4l2_dev,
- "add_internal_subdevs failed with %d\n", ret);
- goto cleanup;
- }
-
ret = imx_media_dev_notifier_register(imxmd);
if (ret)
goto del_int;
@@ -491,7 +481,7 @@ static int imx_media_probe(struct platform_device *pdev)
return 0;
del_int:
- imx_media_remove_internal_subdevs(imxmd);
+ imx_media_remove_ipu_internal_subdevs(imxmd);
cleanup:
v4l2_async_notifier_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
@@ -508,7 +498,7 @@ static int imx_media_remove(struct platform_device *pdev)
v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
v4l2_async_notifier_unregister(&imxmd->notifier);
- imx_media_remove_internal_subdevs(imxmd);
+ imx_media_remove_ipu_internal_subdevs(imxmd);
v4l2_async_notifier_cleanup(&imxmd->notifier);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
index 8cf773eef9da..2ab64bc30f5c 100644
--- a/drivers/staging/media/imx/imx-media-fim.c
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Frame Interval Monitor.
*
* Copyright (c) 2016 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/irq.h>
diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
index 5e10d95e5529..df49ebfbe98a 100644
--- a/drivers/staging/media/imx/imx-media-internal-sd.c
+++ b/drivers/staging/media/imx/imx-media-internal-sd.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Media driver for Freescale i.MX5/6 SOC
*
- * Adds the internal subdevices and the media links between them.
+ * Adds the IPU internal subdevices and the media links between them.
*
* Copyright (c) 2016 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/platform_device.h>
#include "imx-media.h"
@@ -192,7 +188,7 @@ static struct v4l2_subdev *find_sink(struct imx_media_dev *imxmd,
/*
* retrieve IPU id from subdev name, note: can't get this from
- * struct imx_media_internal_sd_platformdata because if src is
+ * struct imx_media_ipu_internal_sd_pdata because if src is
* a CSI, it has different struct ipu_client_platformdata which
* does not contain IPU id.
*/
@@ -270,7 +266,7 @@ static int add_internal_subdev(struct imx_media_dev *imxmd,
const struct internal_subdev *isd,
int ipu_id)
{
- struct imx_media_internal_sd_platformdata pdata;
+ struct imx_media_ipu_internal_sd_pdata pdata;
struct platform_device_info pdevinfo = {};
struct platform_device *pdev;
@@ -298,13 +294,14 @@ static int add_internal_subdev(struct imx_media_dev *imxmd,
}
/* adds the internal subdevs in one ipu */
-static int add_ipu_internal_subdevs(struct imx_media_dev *imxmd, int ipu_id)
+int imx_media_add_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ int ipu_id)
{
enum isd_enum i;
+ int ret;
for (i = 0; i < num_isd; i++) {
const struct internal_subdev *isd = &int_subdev[i];
- int ret;
/*
* the CSIs are represented in the device-tree, so those
@@ -322,32 +319,17 @@ static int add_ipu_internal_subdevs(struct imx_media_dev *imxmd, int ipu_id)
}
if (ret)
- return ret;
+ goto remove;
}
return 0;
-}
-
-int imx_media_add_internal_subdevs(struct imx_media_dev *imxmd)
-{
- int ret;
-
- ret = add_ipu_internal_subdevs(imxmd, 0);
- if (ret)
- goto remove;
-
- ret = add_ipu_internal_subdevs(imxmd, 1);
- if (ret)
- goto remove;
-
- return 0;
remove:
- imx_media_remove_internal_subdevs(imxmd);
+ imx_media_remove_ipu_internal_subdevs(imxmd);
return ret;
}
-void imx_media_remove_internal_subdevs(struct imx_media_dev *imxmd)
+void imx_media_remove_ipu_internal_subdevs(struct imx_media_dev *imxmd)
{
struct imx_media_async_subdev *imxasd;
struct v4l2_async_subdev *asd;
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index 03446335ac03..990e82aa8e42 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Media driver for Freescale i.MX5/6 SOC
*
* Open Firmware parsing.
*
* Copyright (c) 2016 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/of_platform.h>
#include <media/v4l2-ctrls.h>
@@ -23,36 +19,25 @@
int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct device_node *csi_np)
{
- int ret;
-
if (!of_device_is_available(csi_np)) {
dev_dbg(imxmd->md.dev, "%s: %pOFn not enabled\n", __func__,
csi_np);
- /* unavailable is not an error */
- return 0;
+ return -ENODEV;
}
/* add CSI fwnode to async notifier */
- ret = imx_media_add_async_subdev(imxmd, of_fwnode_handle(csi_np), NULL);
- if (ret) {
- if (ret == -EEXIST) {
- /* already added, everything is fine */
- return 0;
- }
-
- /* other error, can't continue */
- return ret;
- }
-
- return 0;
+ return imx_media_add_async_subdev(imxmd, of_fwnode_handle(csi_np),
+ NULL);
}
EXPORT_SYMBOL_GPL(imx_media_of_add_csi);
int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
struct device_node *np)
{
+ bool ipu_found[2] = {false, false};
struct device_node *csi_np;
int i, ret;
+ u32 ipu_id;
for (i = 0; ; i++) {
csi_np = of_parse_phandle(np, "ports", i);
@@ -60,12 +45,43 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
break;
ret = imx_media_of_add_csi(imxmd, csi_np);
- of_node_put(csi_np);
- if (ret)
- return ret;
+ if (ret) {
+ /* unavailable or already added is not an error */
+ if (ret == -ENODEV || ret == -EEXIST) {
+ of_node_put(csi_np);
+ continue;
+ }
+
+ /* other error, can't continue */
+ goto err_out;
+ }
+
+ ret = of_alias_get_id(csi_np->parent, "ipu");
+ if (ret < 0)
+ goto err_out;
+ if (ret > 1) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ipu_id = ret;
+
+ if (!ipu_found[ipu_id]) {
+ ret = imx_media_add_ipu_internal_subdevs(imxmd,
+ ipu_id);
+ if (ret)
+ goto err_out;
+ }
+
+ ipu_found[ipu_id] = true;
}
return 0;
+
+err_out:
+ imx_media_remove_ipu_internal_subdevs(imxmd);
+ of_node_put(csi_np);
+ return ret;
}
/*
@@ -145,15 +161,18 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
struct v4l2_subdev *csi)
{
struct device_node *csi_np = csi->dev->of_node;
- struct fwnode_handle *fwnode, *csi_ep;
- struct v4l2_fwnode_link link;
struct device_node *ep;
- int ret;
-
- link.local_node = of_fwnode_handle(csi_np);
- link.local_port = CSI_SINK_PAD;
for_each_child_of_node(csi_np, ep) {
+ struct fwnode_handle *fwnode, *csi_ep;
+ struct v4l2_fwnode_link link;
+ int ret;
+
+ memset(&link, 0, sizeof(link));
+
+ link.local_node = of_fwnode_handle(csi_np);
+ link.local_port = CSI_SINK_PAD;
+
csi_ep = of_fwnode_handle(ep);
fwnode = fwnode_graph_get_remote_endpoint(csi_ep);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 1c63a2765a81..b41842dba5ec 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
*
* Copyright (c) 2016 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
#include "imx-media.h"
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index 2808662e2597..4487374c9435 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Deinterlacer Subdev for Freescale i.MX5/6 SOC
*
* Copyright (c) 2017 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -231,6 +227,12 @@ static void __maybe_unused prepare_vdi_in_buffers(struct vdic_priv *priv,
curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
break;
+ default:
+ /*
+ * can't get here, priv->fieldtype can only be one of
+ * the above. This is to quiet smatch errors.
+ */
+ return;
}
ipu_cpmem_set_buffer(priv->vdi_in_ch_p, 0, prev_phys);
@@ -744,7 +746,7 @@ static int vdic_link_setup(struct media_entity *entity,
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
/* direct pad must connect to a CSI */
- if (!(remote_sd->grp_id & IMX_MEDIA_GRP_ID_CSI) ||
+ if (!(remote_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) ||
remote->index != CSI_SRC_PAD_DIRECT) {
ret = -EINVAL;
goto out;
@@ -934,7 +936,7 @@ static const struct v4l2_subdev_internal_ops vdic_internal_ops = {
static int imx_vdic_probe(struct platform_device *pdev)
{
- struct imx_media_internal_sd_platformdata *pdata;
+ struct imx_media_ipu_internal_sd_pdata *pdata;
struct vdic_priv *priv;
int ret;
@@ -958,7 +960,7 @@ static int imx_vdic_probe(struct platform_device *pdev)
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
/* get our group id */
priv->sd.grp_id = pdata->grp_id;
- strncpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
+ strscpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
mutex_init(&priv->lock);
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index ae964c8d5be1..eb59ba0c3b62 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
*
* Copyright (c) 2016 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _IMX_MEDIA_H
#define _IMX_MEDIA_H
@@ -115,7 +111,7 @@ struct imx_media_pad_vdev {
struct list_head list;
};
-struct imx_media_internal_sd_platformdata {
+struct imx_media_ipu_internal_sd_pdata {
char sd_name[V4L2_SUBDEV_NAME_SIZE];
u32 grp_id;
int ipu_id;
@@ -252,10 +248,11 @@ struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd);
void imx_media_fim_free(struct imx_media_fim *fim);
/* imx-media-internal-sd.c */
-int imx_media_add_internal_subdevs(struct imx_media_dev *imxmd);
+int imx_media_add_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ int ipu_id);
int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
struct v4l2_subdev *sd);
-void imx_media_remove_internal_subdevs(struct imx_media_dev *imxmd);
+void imx_media_remove_ipu_internal_subdevs(struct imx_media_dev *imxmd);
/* imx-media-of.c */
int imx_media_add_of_subdevs(struct imx_media_dev *dev,
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index 6a1cee55a49b..f29e28df36ed 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* MIPI CSI-2 Receiver Subdev for Freescale i.MX6 SOC.
*
* Copyright (c) 2012-2017 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
@@ -628,10 +624,8 @@ static int csi2_probe(struct platform_device *pdev)
}
csi2->base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
- if (!csi2->base) {
- v4l2_err(&csi2->sd, "failed to map CSI-2 registers\n");
+ if (!csi2->base)
return -ENOMEM;
- }
mutex_init(&csi2->lock);
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 3fba7c27c0ec..18eb5d3ecf10 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1051,7 +1051,9 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
goto out_unlock;
}
- imx7_csi_try_fmt(csi, cfg, sdformat, &cc);
+ ret = imx7_csi_try_fmt(csi, cfg, sdformat, &cc);
+ if (ret < 0)
+ goto out_unlock;
fmt = imx7_csi_get_format(csi, cfg, sdformat->pad, sdformat->which);
if (!fmt) {
@@ -1271,7 +1273,7 @@ static int imx7_csi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &csi->sd);
ret = imx_media_of_add_csi(imxmd, node);
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV && ret != -EEXIST)
goto cleanup;
ret = imx_media_dev_notifier_register(imxmd);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 2ddcc42ab8ff..19455f425416 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -491,7 +492,7 @@ static int mipi_csis_clk_get(struct csi_state *state)
state->wrap_clk = devm_clk_get(dev, "wrap");
if (IS_ERR(state->wrap_clk))
- return IS_ERR(state->wrap_clk);
+ return PTR_ERR(state->wrap_clk);
/* Set clock rate */
ret = clk_set_rate(state->wrap_clk, state->clk_frequency);
@@ -706,7 +707,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
mutex_lock(&state->lock);
- if (fmt && sdformat->pad == CSIS_PAD_SOURCE) {
+ if (sdformat->pad == CSIS_PAD_SOURCE) {
sdformat->format = *fmt;
goto unlock;
}
@@ -889,8 +890,6 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
return ret;
}
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
static int mipi_csis_dump_regs_show(struct seq_file *m, void *private)
{
@@ -900,7 +899,7 @@ static int mipi_csis_dump_regs_show(struct seq_file *m, void *private)
}
DEFINE_SHOW_ATTRIBUTE(mipi_csis_dump_regs);
-static int __init_or_module mipi_csis_debugfs_init(struct csi_state *state)
+static int mipi_csis_debugfs_init(struct csi_state *state)
{
struct dentry *d;
@@ -934,17 +933,6 @@ static void mipi_csis_debugfs_exit(struct csi_state *state)
debugfs_remove_recursive(state->debugfs_root);
}
-#else
-static int mipi_csis_debugfs_init(struct csi_state *state __maybe_unused)
-{
- return 0;
-}
-
-static void mipi_csis_debugfs_exit(struct csi_state *state __maybe_unused)
-{
-}
-#endif
-
static int mipi_csis_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1039,8 +1027,7 @@ disable_clock:
static int mipi_csis_pm_suspend(struct device *dev, bool runtime)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct v4l2_subdev *mipi_sd = platform_get_drvdata(pdev);
+ struct v4l2_subdev *mipi_sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
int ret = 0;
@@ -1064,8 +1051,7 @@ unlock:
static int mipi_csis_pm_resume(struct device *dev, bool runtime)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct v4l2_subdev *mipi_sd = platform_get_drvdata(pdev);
+ struct v4l2_subdev *mipi_sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
int ret = 0;
diff --git a/drivers/staging/media/ipu3/Kconfig b/drivers/staging/media/ipu3/Kconfig
index 75cd889f18f7..4b51c67eac88 100644
--- a/drivers/staging/media/ipu3/Kconfig
+++ b/drivers/staging/media/ipu3/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VIDEO_IPU3_IMGU
tristate "Intel ipu3-imgu driver"
depends on PCI && VIDEO_V4L2
@@ -5,7 +6,7 @@ config VIDEO_IPU3_IMGU
depends on X86
select IOMMU_IOVA
select VIDEOBUF2_DMA_SG
- ---help---
+ help
This is the Video4Linux2 driver for Intel IPU3 image processing unit,
found in Intel Skylake and Kaby Lake SoCs and used for processing
images and video.
diff --git a/drivers/staging/media/ipu3/Makefile b/drivers/staging/media/ipu3/Makefile
index fa7fa3372bcb..cc288ae6d5f2 100644
--- a/drivers/staging/media/ipu3/Makefile
+++ b/drivers/staging/media/ipu3/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the IPU3 ImgU drivers
#
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 15ab77e4b766..23cf5b2cfe8b 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/iopoll.h>
+#include <linux/slab.h>
#include "ipu3-css.h"
#include "ipu3-css-fw.h"
@@ -1744,15 +1745,18 @@ int imgu_css_fmt_try(struct imgu_css *css,
struct v4l2_rect *const bds = &r[IPU3_CSS_RECT_BDS];
struct v4l2_rect *const env = &r[IPU3_CSS_RECT_ENVELOPE];
struct v4l2_rect *const gdc = &r[IPU3_CSS_RECT_GDC];
- struct imgu_css_queue q[IPU3_CSS_QUEUES];
- struct v4l2_pix_format_mplane *const in =
- &q[IPU3_CSS_QUEUE_IN].fmt.mpix;
- struct v4l2_pix_format_mplane *const out =
- &q[IPU3_CSS_QUEUE_OUT].fmt.mpix;
- struct v4l2_pix_format_mplane *const vf =
- &q[IPU3_CSS_QUEUE_VF].fmt.mpix;
+ struct imgu_css_queue *q;
+ struct v4l2_pix_format_mplane *in, *out, *vf;
int i, s, ret;
+ q = kcalloc(IPU3_CSS_QUEUES, sizeof(struct imgu_css_queue), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ in = &q[IPU3_CSS_QUEUE_IN].fmt.mpix;
+ out = &q[IPU3_CSS_QUEUE_OUT].fmt.mpix;
+ vf = &q[IPU3_CSS_QUEUE_VF].fmt.mpix;
+
/* Adjust all formats, get statistics buffer sizes and formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
if (fmts[i])
@@ -1766,7 +1770,8 @@ int imgu_css_fmt_try(struct imgu_css *css,
IPU3_CSS_QUEUE_TO_FLAGS(i))) {
dev_notice(css->dev, "can not initialize queue %s\n",
qnames[i]);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
}
for (i = 0; i < IPU3_CSS_RECTS; i++) {
@@ -1788,7 +1793,8 @@ int imgu_css_fmt_try(struct imgu_css *css,
if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) ||
!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
dev_warn(css->dev, "required queues are disabled\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
@@ -1829,7 +1835,8 @@ int imgu_css_fmt_try(struct imgu_css *css,
ret = imgu_css_find_binary(css, pipe, q, r);
if (ret < 0) {
dev_err(css->dev, "failed to find suitable binary\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
css->pipes[pipe].bindex = ret;
@@ -1843,7 +1850,8 @@ int imgu_css_fmt_try(struct imgu_css *css,
IPU3_CSS_QUEUE_TO_FLAGS(i))) {
dev_err(css->dev,
"final resolution adjustment failed\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
*fmts[i] = q[i].fmt.mpix;
}
@@ -1859,7 +1867,10 @@ int imgu_css_fmt_try(struct imgu_css *css,
bds->width, bds->height, gdc->width, gdc->height,
out->width, out->height, vf->width, vf->height);
- return 0;
+ ret = 0;
+out:
+ kfree(q);
+ return ret;
}
int imgu_css_fmt_set(struct imgu_css *css,
@@ -2160,11 +2171,6 @@ int imgu_css_set_parameters(struct imgu_css *css, unsigned int pipe,
obgrid_size = imgu_css_fw_obgrid_size(bi);
stripes = bi->info.isp.sp.iterator.num_stripes ? : 1;
- /*
- * TODO(b/118782861): If userspace queues more than 4 buffers, the
- * parameters from previous buffers will be overwritten. Fix the driver
- * not to allow this.
- */
imgu_css_pool_get(&css_pipe->pool.parameter_set_info);
param_set = imgu_css_pool_last(&css_pipe->pool.parameter_set_info,
0)->vaddr;
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 9c0352b193a7..a7bc22040ed8 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -66,7 +66,7 @@ static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable)
struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
- dev_dbg(dev, "%s %d for pipe %d", __func__, enable, pipe);
+ dev_dbg(dev, "%s %d for pipe %u", __func__, enable, pipe);
/* grab ctrl after streamon and return after off */
v4l2_ctrl_grab(imgu_sd->ctrl, enable);
@@ -101,7 +101,7 @@ static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable)
else
css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
- dev_dbg(dev, "IPU3 pipe %d pipe_id %d", pipe, css_pipe->pipe_id);
+ dev_dbg(dev, "IPU3 pipe %u pipe_id %u", pipe, css_pipe->pipe_id);
rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
@@ -109,7 +109,7 @@ static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable)
r = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
if (r) {
- dev_err(dev, "failed to set initial formats pipe %d with (%d)",
+ dev_err(dev, "failed to set initial formats pipe %u with (%d)",
pipe, r);
return r;
}
@@ -157,7 +157,7 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
u32 pad = fmt->pad;
unsigned int pipe = imgu_sd->pipe;
- dev_dbg(&imgu->pci_dev->dev, "set subdev %d pad %d fmt to [%dx%d]",
+ dev_dbg(&imgu->pci_dev->dev, "set subdev %u pad %u fmt to [%ux%u]",
pipe, pad, fmt->format.width, fmt->format.height);
imgu_pipe = &imgu->imgu_pipe[pipe];
@@ -233,7 +233,7 @@ static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_rect *rect, *try_sel;
dev_dbg(&imgu->pci_dev->dev,
- "set subdev %d sel which %d target 0x%4x rect [%dx%d]",
+ "set subdev %u sel which %u target 0x%4x rect [%ux%u]",
imgu_sd->pipe, sel->which, sel->target,
sel->r.width, sel->r.height);
@@ -279,7 +279,7 @@ static int imgu_link_setup(struct media_entity *entity,
WARN_ON(pad >= IMGU_NODE_NUM);
- dev_dbg(&imgu->pci_dev->dev, "pipe %d pad %d is %s", pipe, pad,
+ dev_dbg(&imgu->pci_dev->dev, "pipe %u pad %u is %s", pipe, pad,
flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
imgu_pipe = &imgu->imgu_pipe[pipe];
@@ -294,7 +294,7 @@ static int imgu_link_setup(struct media_entity *entity,
else
__clear_bit(pipe, imgu->css.enabled_pipes);
- dev_dbg(&imgu->pci_dev->dev, "pipe %d is %s", pipe,
+ dev_dbg(&imgu->pci_dev->dev, "pipe %u is %s", pipe,
flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
return 0;
@@ -341,8 +341,10 @@ static void imgu_vb2_buf_queue(struct vb2_buffer *vb)
struct imgu_video_device *node =
container_of(vb->vb2_queue, struct imgu_video_device, vbq);
unsigned int queue = imgu_node_to_queue(node->id);
+ struct imgu_buffer *buf = container_of(vb, struct imgu_buffer,
+ vid_buf.vbb.vb2_buf);
unsigned long need_bytes;
- unsigned int pipe = node->pipe;
+ unsigned long payload = vb2_get_plane_payload(vb, 0);
if (vb->vb2_queue->type == V4L2_BUF_TYPE_META_CAPTURE ||
vb->vb2_queue->type == V4L2_BUF_TYPE_META_OUTPUT)
@@ -350,42 +352,26 @@ static void imgu_vb2_buf_queue(struct vb2_buffer *vb)
else
need_bytes = node->vdev_fmt.fmt.pix_mp.plane_fmt[0].sizeimage;
- if (queue == IPU3_CSS_QUEUE_PARAMS) {
- unsigned long payload = vb2_get_plane_payload(vb, 0);
- struct vb2_v4l2_buffer *buf =
- container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
- int r = -EINVAL;
-
- if (payload == 0) {
- payload = need_bytes;
- vb2_set_plane_payload(vb, 0, payload);
- }
- if (payload >= need_bytes)
- r = imgu_css_set_parameters(&imgu->css, pipe,
- vb2_plane_vaddr(vb, 0));
- buf->flags = V4L2_BUF_FLAG_DONE;
- vb2_buffer_done(vb, r == 0 ? VB2_BUF_STATE_DONE
- : VB2_BUF_STATE_ERROR);
-
- } else {
- struct imgu_buffer *buf = container_of(vb, struct imgu_buffer,
- vid_buf.vbb.vb2_buf);
+ if (queue == IPU3_CSS_QUEUE_PARAMS && payload && payload < need_bytes) {
+ dev_err(&imgu->pci_dev->dev, "invalid data size for params.");
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
- mutex_lock(&imgu->lock);
+ mutex_lock(&imgu->lock);
+ if (queue != IPU3_CSS_QUEUE_PARAMS)
imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
- list_add_tail(&buf->vid_buf.list,
- &node->buffers);
- mutex_unlock(&imgu->lock);
- vb2_set_plane_payload(&buf->vid_buf.vbb.vb2_buf, 0, need_bytes);
+ list_add_tail(&buf->vid_buf.list, &node->buffers);
+ mutex_unlock(&imgu->lock);
- if (imgu->streaming)
- imgu_queue_buffers(imgu, false, pipe);
- }
+ vb2_set_plane_payload(vb, 0, need_bytes);
- dev_dbg(&imgu->pci_dev->dev, "%s for pipe %d node %d", __func__,
- node->pipe, node->id);
+ if (imgu->streaming)
+ imgu_queue_buffers(imgu, false, node->pipe);
+ dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__,
+ node->pipe, node->id);
}
static int imgu_vb2_queue_setup(struct vb2_queue *vq,
@@ -435,7 +421,7 @@ static bool imgu_all_nodes_streaming(struct imgu_device *imgu,
pipe = except->pipe;
if (!test_bit(pipe, imgu->css.enabled_pipes)) {
dev_warn(&imgu->pci_dev->dev,
- "pipe %d link is not ready yet", pipe);
+ "pipe %u link is not ready yet", pipe);
return false;
}
@@ -479,7 +465,7 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
int r;
unsigned int pipe;
- dev_dbg(dev, "%s node name %s pipe %d id %u", __func__,
+ dev_dbg(dev, "%s node name %s pipe %u id %u", __func__,
node->name, node->pipe, node->id);
if (imgu->streaming) {
@@ -539,7 +525,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
WARN_ON(!node->enabled);
pipe = node->pipe;
- dev_dbg(dev, "Try to stream off node [%d][%d]", pipe, node->id);
+ dev_dbg(dev, "Try to stream off node [%u][%u]", pipe, node->id);
imgu_pipe = &imgu->imgu_pipe[pipe];
r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev, video, s_stream, 0);
if (r)
@@ -664,20 +650,19 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
struct v4l2_format *f, bool try)
{
struct device *dev = &imgu->pci_dev->dev;
- struct v4l2_pix_format_mplane try_fmts[IPU3_CSS_QUEUES];
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
struct v4l2_mbus_framefmt pad_fmt;
unsigned int i, css_q;
- int r;
+ int ret;
struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
- dev_dbg(dev, "set fmt node [%u][%u](try = %d)", pipe, node, try);
+ dev_dbg(dev, "set fmt node [%u][%u](try = %u)", pipe, node, try);
for (i = 0; i < IMGU_NODE_NUM; i++)
- dev_dbg(dev, "IMGU pipe %d node %d enabled = %d",
+ dev_dbg(dev, "IMGU pipe %u node %u enabled = %u",
pipe, i, imgu_pipe->nodes[i].enabled);
if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
@@ -688,7 +673,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
else
css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
- dev_dbg(dev, "IPU3 pipe %d pipe_id = %d", pipe, css_pipe->pipe_id);
+ dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
unsigned int inode = imgu_map_node(imgu, i);
@@ -698,9 +683,13 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
continue;
if (try) {
- try_fmts[i] =
- imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
- fmts[i] = &try_fmts[i];
+ fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
+ sizeof(struct v4l2_pix_format_mplane),
+ GFP_KERNEL);
+ if (!fmts[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
} else {
fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
}
@@ -730,26 +719,33 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
* before we return success from this function, so set it here.
*/
css_q = imgu_node_to_queue(node);
- if (fmts[css_q])
- *fmts[css_q] = f->fmt.pix_mp;
- else
- return -EINVAL;
+ if (!fmts[css_q]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ *fmts[css_q] = f->fmt.pix_mp;
if (try)
- r = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
+ ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
else
- r = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
+ ret = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
- /* r is the binary number in the firmware blob */
- if (r < 0)
- return r;
+ /* ret is the binary number in the firmware blob */
+ if (ret < 0)
+ goto out;
if (try)
f->fmt.pix_mp = *fmts[css_q];
else
f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
- return 0;
+out:
+ if (try) {
+ for (i = 0; i < IPU3_CSS_QUEUES; i++)
+ kfree(fmts[i]);
+ }
+
+ return ret;
}
static int imgu_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
@@ -781,7 +777,7 @@ static int imgu_vidioc_try_fmt(struct file *file, void *fh,
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
int r;
- dev_dbg(dev, "%s [%ux%u] for node %d\n", __func__,
+ dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__,
pix_mp->width, pix_mp->height, node->id);
r = imgu_try_fmt(file, fh, f);
@@ -799,7 +795,7 @@ static int imgu_vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
int r;
- dev_dbg(dev, "%s [%ux%u] for node %d\n", __func__,
+ dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__,
pix_mp->width, pix_mp->height, node->id);
r = imgu_try_fmt(file, fh, f);
@@ -1022,7 +1018,7 @@ static int imgu_sd_s_ctrl(struct v4l2_ctrl *ctrl)
struct imgu_device *imgu = v4l2_get_subdevdata(&imgu_sd->subdev);
struct device *dev = &imgu->pci_dev->dev;
- dev_dbg(dev, "set val %d to ctrl 0x%8x for subdev %d",
+ dev_dbg(dev, "set val %d to ctrl 0x%8x for subdev %u",
ctrl->val, ctrl->id, imgu_sd->pipe);
switch (ctrl->id) {
@@ -1122,7 +1118,7 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
imgu_sd->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
snprintf(imgu_sd->subdev.name, sizeof(imgu_sd->subdev.name),
- "%s %d", IMGU_NAME, pipe);
+ "%s %u", IMGU_NAME, pipe);
v4l2_set_subdevdata(&imgu_sd->subdev, imgu);
atomic_set(&imgu_sd->running_mode, IPU3_RUNNING_MODE_VIDEO);
v4l2_ctrl_handler_init(hdl, 1);
@@ -1240,7 +1236,7 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
}
/* Initialize vdev */
- snprintf(vdev->name, sizeof(vdev->name), "%s %d %s",
+ snprintf(vdev->name, sizeof(vdev->name), "%s %u %s",
IMGU_NAME, pipe, node->name);
vdev->release = video_device_release_empty;
vdev->fops = &imgu_v4l2_fops;
@@ -1335,7 +1331,7 @@ static int imgu_v4l2_register_pipes(struct imgu_device *imgu)
r = imgu_v4l2_subdev_register(imgu, &imgu_pipe->imgu_sd, i);
if (r) {
dev_err(&imgu->pci_dev->dev,
- "failed to register subdev%d ret (%d)\n", i, r);
+ "failed to register subdev%u ret (%d)\n", i, r);
goto pipes_cleanup;
}
r = imgu_v4l2_nodes_setup_pipe(imgu, i);
diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
index d575ac78c8f0..a7372395a101 100644
--- a/drivers/staging/media/ipu3/ipu3.c
+++ b/drivers/staging/media/ipu3/ipu3.c
@@ -236,16 +236,45 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe
dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
mutex_lock(&imgu->lock);
+ if (!imgu_css_pipe_queue_empty(&imgu->css, pipe)) {
+ mutex_unlock(&imgu->lock);
+ return 0;
+ }
+
/* Buffer set is queued to FW only when input buffer is ready */
for (node = IMGU_NODE_NUM - 1;
imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
node = node ? node - 1 : IMGU_NODE_NUM - 1) {
-
if (node == IMGU_NODE_VF &&
!imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
dev_warn(&imgu->pci_dev->dev,
"Vf not enabled, ignore queue");
continue;
+ } else if (node == IMGU_NODE_PARAMS &&
+ imgu_pipe->nodes[node].enabled) {
+ struct vb2_buffer *vb;
+ struct imgu_vb2_buffer *ivb;
+
+ /* No parameters for this frame */
+ if (list_empty(&imgu_pipe->nodes[node].buffers))
+ continue;
+
+ ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
+ struct imgu_vb2_buffer, list);
+ vb = &ivb->vbb.vb2_buf;
+ r = imgu_css_set_parameters(&imgu->css, pipe,
+ vb2_plane_vaddr(vb, 0));
+ if (r) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ dev_warn(&imgu->pci_dev->dev,
+ "set parameters failed.");
+ continue;
+ }
+
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ dev_dbg(&imgu->pci_dev->dev,
+ "queue user parameters %d to css.", vb->index);
+ list_del(&ivb->list);
} else if (imgu_pipe->queue_enabled[node]) {
struct imgu_css_buffer *buf =
imgu_queue_getbuf(imgu, node, pipe);
@@ -791,7 +820,7 @@ out:
* PCI rpm framework checks the existence of driver rpm callbacks.
* Place a dummy callback here to avoid rpm going into error state.
*/
-static int imgu_rpm_dummy_cb(struct device *dev)
+static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
{
return 0;
}
diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig
deleted file mode 100644
index 9a58aaf72edd..000000000000
--- a/drivers/staging/media/mt9t031/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-config SOC_CAMERA_MT9T031
- tristate "mt9t031 support (DEPRECATED)"
- depends on SOC_CAMERA && I2C
- help
- This driver supports MT9T031 cameras from Micron.
diff --git a/drivers/staging/media/mt9t031/Makefile b/drivers/staging/media/mt9t031/Makefile
deleted file mode 100644
index bfd24c442b33..000000000000
--- a/drivers/staging/media/mt9t031/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
diff --git a/drivers/staging/media/mt9t031/TODO b/drivers/staging/media/mt9t031/TODO
deleted file mode 100644
index 15580a4f950c..000000000000
--- a/drivers/staging/media/mt9t031/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-This sensor driver needs to be converted to a regular
-v4l2 subdev driver. The soc_camera framework is deprecated and
-will be removed in the future. Unless someone does this work this
-sensor driver will be deleted when the soc_camera framework is
-deleted.
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 841cc0b3ce13..4dcbc5065821 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -6,5 +6,5 @@ config VIDEO_OMAP4
depends on ARCH_OMAP4 || COMPILE_TEST
select MFD_SYSCON
select VIDEOBUF2_DMA_CONTIG
- ---help---
+ help
Driver for an OMAP 4 ISS controller.
diff --git a/drivers/staging/media/rockchip/vpu/Kconfig b/drivers/staging/media/rockchip/vpu/Kconfig
index 9a6fc1378242..fc54bbf6753d 100644
--- a/drivers/staging/media/rockchip/vpu/Kconfig
+++ b/drivers/staging/media/rockchip/vpu/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VIDEO_ROCKCHIP_VPU
tristate "Rockchip VPU driver"
depends on ARCH_ROCKCHIP || COMPILE_TEST
@@ -5,7 +6,6 @@ config VIDEO_ROCKCHIP_VPU
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
- default n
help
Support for the Video Processing Unit present on Rockchip SoC,
which accelerates video and image encoding and decoding.
diff --git a/drivers/staging/media/rockchip/vpu/Makefile b/drivers/staging/media/rockchip/vpu/Makefile
index e9d733bb7632..ae5d143a0bfa 100644
--- a/drivers/staging/media/rockchip/vpu/Makefile
+++ b/drivers/staging/media/rockchip/vpu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_ROCKCHIP_VPU) += rockchip-vpu.o
rockchip-vpu-y += \
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
index 962412c79b91..58721c46fba4 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
@@ -22,7 +22,6 @@
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
-#include <media/videobuf2-core.h>
#include <media/videobuf2-vmalloc.h>
#include "rockchip_vpu_common.h"
@@ -463,7 +462,7 @@ static int rockchip_vpu_probe(struct platform_device *pdev)
}
vpu->mdev.dev = vpu->dev;
- strlcpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
+ strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
media_device_init(&vpu->mdev);
vpu->v4l2_dev.mdev = &vpu->mdev;
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
index ab0fb2053620..fb5e36aedd8c 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
@@ -41,7 +41,7 @@
* @is_compressed: Is it a compressed format?
* @multiplanar: Is it a multiplanar variant format? (e.g. NV12M)
*/
-struct v4l2_format_info {
+struct rockchip_vpu_v4l2_format_info {
u32 format;
u32 header_size;
u8 num_planes;
@@ -52,10 +52,10 @@ struct v4l2_format_info {
u8 multiplanar;
};
-static const struct v4l2_format_info *
-v4l2_format_info(u32 format)
+static const struct rockchip_vpu_v4l2_format_info *
+rockchip_vpu_v4l2_format_info(u32 format)
{
- static const struct v4l2_format_info formats[] = {
+ static const struct rockchip_vpu_v4l2_format_info formats[] = {
{ .format = V4L2_PIX_FMT_YUV420M, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .multiplanar = 1 },
{ .format = V4L2_PIX_FMT_NV12M, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .multiplanar = 1 },
{ .format = V4L2_PIX_FMT_YUYV, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
@@ -76,11 +76,11 @@ static void
fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
int pixelformat, int width, int height)
{
- const struct v4l2_format_info *info;
+ const struct rockchip_vpu_v4l2_format_info *info;
struct v4l2_plane_pix_format *plane;
int i;
- info = v4l2_format_info(pixelformat);
+ info = rockchip_vpu_v4l2_format_info(pixelformat);
if (!info)
return;
diff --git a/drivers/staging/media/soc_camera/Kconfig b/drivers/staging/media/soc_camera/Kconfig
index bacd30f0348d..4a54db121574 100644
--- a/drivers/staging/media/soc_camera/Kconfig
+++ b/drivers/staging/media/soc_camera/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config SOC_CAMERA
tristate "SoC camera support"
depends on VIDEO_V4L2 && HAS_DMA && I2C && BROKEN
diff --git a/drivers/staging/media/soc_camera/TODO b/drivers/staging/media/soc_camera/TODO
new file mode 100644
index 000000000000..932af6443b67
--- /dev/null
+++ b/drivers/staging/media/soc_camera/TODO
@@ -0,0 +1,4 @@
+The SoC camera framework is obsolete and scheduled for removal in the near
+future. Developers are encouraged to convert the drivers to use the
+regular V4L2 API if these drivers are still needed (and if someone has the
+hardware).
diff --git a/drivers/staging/media/soc_camera/imx074.c b/drivers/staging/media/soc_camera/imx074.c
index 1676c166dc83..d907aa62f898 100644
--- a/drivers/staging/media/soc_camera/imx074.c
+++ b/drivers/staging/media/soc_camera/imx074.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for IMX074 CMOS Image Sensor from Sony
*
* Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* Partially inspired by the IMX074 driver from the Android / MSM tree
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/v4l2-mediabus.h>
diff --git a/drivers/staging/media/soc_camera/mt9t031.c b/drivers/staging/media/soc_camera/mt9t031.c
index 4ff179302b4f..615ae9df2c57 100644
--- a/drivers/staging/media/soc_camera/mt9t031.c
+++ b/drivers/staging/media/soc_camera/mt9t031.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for MT9T031 CMOS Image Sensor from Micron
*
* Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering <lg@denx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/log2.h>
diff --git a/drivers/staging/media/soc_camera/soc_camera.c b/drivers/staging/media/soc_camera/soc_camera.c
index 1ab86a7499b9..a6232dcd59bc 100644
--- a/drivers/staging/media/soc_camera/soc_camera.c
+++ b/drivers/staging/media/soc_camera/soc_camera.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camera image capture (abstract) bus driver
*
@@ -10,12 +11,7 @@
* SoCs. Later it should also be used for i.MX31 SoCs from Freescale.
* It can handle multiple cameras and / or multiple buses, which can
* be used, e.g., in stereo-vision applications.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
diff --git a/drivers/staging/media/soc_camera/soc_mediabus.c b/drivers/staging/media/soc_camera/soc_mediabus.c
index be74008ec0ca..2aa646c89c1f 100644
--- a/drivers/staging/media/soc_camera/soc_mediabus.c
+++ b/drivers/staging/media/soc_camera/soc_mediabus.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* soc-camera media bus helper routines
*
* Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/media/soc_camera/soc_mt9v022.c b/drivers/staging/media/soc_camera/soc_mt9v022.c
index 6d922b17ea94..e7e0d3d29499 100644
--- a/drivers/staging/media/soc_camera/soc_mt9v022.c
+++ b/drivers/staging/media/soc_camera/soc_mt9v022.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for MT9V022 CMOS Image Sensor from Micron
*
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <linux/i2c.h>
diff --git a/drivers/staging/media/soc_camera/soc_ov5642.c b/drivers/staging/media/soc_camera/soc_ov5642.c
index 0931898c79dd..94696d7baf83 100644
--- a/drivers/staging/media/soc_camera/soc_ov5642.c
+++ b/drivers/staging/media/soc_camera/soc_ov5642.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for OV5642 CMOS Image Sensor from Omnivision
*
@@ -8,12 +9,7 @@
*
* Based on Omnivision OV7670 Camera Driver
* Copyright (C) 2006-7 Jonathan Corbet <corbet@lwn.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/drivers/staging/media/soc_camera/soc_ov9740.c b/drivers/staging/media/soc_camera/soc_ov9740.c
index a07d3145d1b4..7c765595d85f 100644
--- a/drivers/staging/media/soc_camera/soc_ov9740.c
+++ b/drivers/staging/media/soc_camera/soc_ov9740.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OmniVision OV9740 Camera Driver
*
* Copyright (C) 2011 NVIDIA Corporation
*
* Based on ov9640 camera driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
diff --git a/drivers/staging/media/sunxi/Kconfig b/drivers/staging/media/sunxi/Kconfig
index c78d92240ceb..4549a135741f 100644
--- a/drivers/staging/media/sunxi/Kconfig
+++ b/drivers/staging/media/sunxi/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VIDEO_SUNXI
bool "Allwinner sunXi family Video Devices"
depends on ARCH_SUNXI || COMPILE_TEST
diff --git a/drivers/staging/media/sunxi/Makefile b/drivers/staging/media/sunxi/Makefile
index cee2846c3ecf..b87140b0e15f 100644
--- a/drivers/staging/media/sunxi/Makefile
+++ b/drivers/staging/media/sunxi/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += cedrus/
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
index 3252efa422f9..17733e9a088f 100644
--- a/drivers/staging/media/sunxi/cedrus/Kconfig
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VIDEO_SUNXI_CEDRUS
tristate "Allwinner Cedrus VPU driver"
depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
index e9dc68b7bcb6..808842f0119e 100644
--- a/drivers/staging/media/sunxi/cedrus/Makefile
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o cedrus_mpeg2.o
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index ff11cbeba205..d0429c0e6b6b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -300,7 +300,7 @@ static int cedrus_probe(struct platform_device *pdev)
"Failed to initialize V4L2 M2M device\n");
ret = PTR_ERR(dev->m2m_dev);
- goto err_video;
+ goto err_v4l2;
}
dev->mdev.dev = &pdev->dev;
@@ -310,23 +310,23 @@ static int cedrus_probe(struct platform_device *pdev)
dev->mdev.ops = &cedrus_m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
- ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
- MEDIA_ENT_F_PROC_VIDEO_DECODER);
- if (ret) {
- v4l2_err(&dev->v4l2_dev,
- "Failed to initialize V4L2 M2M media controller\n");
- goto err_m2m;
- }
-
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- goto err_v4l2;
+ goto err_m2m;
}
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M media controller\n");
+ goto err_video;
+ }
+
ret = media_device_register(&dev->mdev);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register media device\n");
@@ -339,10 +339,10 @@ static int cedrus_probe(struct platform_device *pdev)
err_m2m_mc:
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
-err_m2m:
- v4l2_m2m_release(dev->m2m_dev);
err_video:
video_unregister_device(&dev->vfd);
+err_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
err_v4l2:
v4l2_device_unregister(&dev->v4l2_dev);
@@ -396,6 +396,11 @@ static const struct cedrus_variant sun50i_h5_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
};
+static const struct cedrus_variant sun50i_h6_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
+};
+
static const struct of_device_id cedrus_dt_match[] = {
{
.compatible = "allwinner,sun4i-a10-video-engine",
@@ -425,6 +430,10 @@ static const struct of_device_id cedrus_dt_match[] = {
.compatible = "allwinner,sun50i-h5-video-engine",
.data = &sun50i_h5_cedrus_variant,
},
+ {
+ .compatible = "allwinner,sun50i-h6-video-engine",
+ .data = &sun50i_h6_cedrus_variant,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, cedrus_dt_match);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 4aedd24a9848..c57c04b41d2e 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -28,6 +28,8 @@
#define CEDRUS_CAPABILITY_UNTILED BIT(0)
+#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
+
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
@@ -91,6 +93,7 @@ struct cedrus_dec_ops {
struct cedrus_variant {
unsigned int capabilities;
+ unsigned int quirks;
};
struct cedrus_dev {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 0acf219a8c91..fbfff7c1c771 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -177,7 +177,8 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
*/
#ifdef PHYS_PFN_OFFSET
- dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+ if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET))
+ dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
#endif
ret = of_reserved_mem_device_init(dev->dev);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index b47854b3bce4..9673874ece10 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -536,6 +536,7 @@ int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->lock = &ctx->dev->dev_mutex;
src_vq->dev = ctx->dev->dev;
src_vq->supports_requests = true;
+ src_vq->requires_requests = true;
ret = vb2_queue_init(src_vq);
if (ret)
diff --git a/drivers/staging/media/tegra-vde/Kconfig b/drivers/staging/media/tegra-vde/Kconfig
index 5c4914674468..ff8e846cd15d 100644
--- a/drivers/staging/media/tegra-vde/Kconfig
+++ b/drivers/staging/media/tegra-vde/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config TEGRA_VDE
tristate "NVIDIA Tegra Video Decoder Engine driver"
depends on ARCH_TEGRA || COMPILE_TEST
diff --git a/drivers/staging/media/tegra-vde/Makefile b/drivers/staging/media/tegra-vde/Makefile
index 444c1d62daa1..7f9020e634f3 100644
--- a/drivers/staging/media/tegra-vde/Makefile
+++ b/drivers/staging/media/tegra-vde/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEGRA_VDE) += tegra-vde.o
diff --git a/drivers/staging/media/tegra-vde/tegra-vde.c b/drivers/staging/media/tegra-vde/tegra-vde.c
index aa6c6bba961e..a5020dbf6eef 100644
--- a/drivers/staging/media/tegra-vde/tegra-vde.c
+++ b/drivers/staging/media/tegra-vde/tegra-vde.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* NVIDIA Tegra Video decoder driver
*
* Copyright (C) 2016-2017 Dmitry Osipenko <digetx@gmail.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/clk.h>
diff --git a/drivers/staging/media/tegra-vde/uapi.h b/drivers/staging/media/tegra-vde/uapi.h
index 4bce08a7a54c..a0dad1ed94ef 100644
--- a/drivers/staging/media/tegra-vde/uapi.h
+++ b/drivers/staging/media/tegra-vde/uapi.h
@@ -1,12 +1,5 @@
-/*
- * Copyright (C) 2016-2017 Dmitry Osipenko <digetx@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2016-2017 Dmitry Osipenko <digetx@gmail.com> */
#ifndef _UAPI_TEGRA_VDE_H_
#define _UAPI_TEGRA_VDE_H_
diff --git a/drivers/staging/media/zoran/Kconfig b/drivers/staging/media/zoran/Kconfig
deleted file mode 100644
index 34a18135ede0..000000000000
--- a/drivers/staging/media/zoran/Kconfig
+++ /dev/null
@@ -1,75 +0,0 @@
-config VIDEO_ZORAN
- tristate "Zoran ZR36057/36067 Video For Linux (Deprecated)"
- depends on PCI && I2C_ALGOBIT && VIDEO_V4L2 && VIRT_TO_BUS
- depends on !ALPHA
- help
- Say Y for support for MJPEG capture cards based on the Zoran
- 36057/36067 PCI controller chipset. This includes the Iomega
- Buz, Pinnacle DC10+ and the Linux Media Labs LML33. There is
- a driver homepage at <http://mjpeg.sf.net/driver-zoran/>. For
- more information, check <file:Documentation/media/v4l-drivers/zoran.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called zr36067.
-
-config VIDEO_ZORAN_DC30
- tristate "Pinnacle/Miro DC30(+) support"
- depends on VIDEO_ZORAN
- select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_VPX3220 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for the Pinnacle/Miro DC30(+) MJPEG capture/playback
- card. This also supports really old DC10 cards based on the
- zr36050 MJPEG codec and zr36016 VFE.
-
-config VIDEO_ZORAN_ZR36060
- tristate "Zoran ZR36060"
- depends on VIDEO_ZORAN
- help
- Say Y to support Zoran boards based on 36060 chips.
- This includes Iomega Buz, Pinnacle DC10, Linux media Labs 33
- and 33 R10 and AverMedia 6 boards.
-
-config VIDEO_ZORAN_BUZ
- tristate "Iomega Buz support"
- depends on VIDEO_ZORAN_ZR36060
- select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_SAA7185 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for the Iomega Buz MJPEG capture/playback card.
-
-config VIDEO_ZORAN_DC10
- tristate "Pinnacle/Miro DC10(+) support"
- depends on VIDEO_ZORAN_ZR36060
- select VIDEO_SAA7110 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for the Pinnacle/Miro DC10(+) MJPEG capture/playback
- card.
-
-config VIDEO_ZORAN_LML33
- tristate "Linux Media Labs LML33 support"
- depends on VIDEO_ZORAN_ZR36060
- select VIDEO_BT819 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for the Linux Media Labs LML33 MJPEG capture/playback
- card.
-
-config VIDEO_ZORAN_LML33R10
- tristate "Linux Media Labs LML33R10 support"
- depends on VIDEO_ZORAN_ZR36060
- select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_ADV7170 if MEDIA_SUBDRV_AUTOSELECT
- help
- support for the Linux Media Labs LML33R10 MJPEG capture/playback
- card.
-
-config VIDEO_ZORAN_AVS6EYES
- tristate "AverMedia 6 Eyes support"
- depends on VIDEO_ZORAN_ZR36060
- select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_BT866 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_KS0127 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for the AverMedia 6 Eyes video surveillance card.
diff --git a/drivers/staging/media/zoran/Makefile b/drivers/staging/media/zoran/Makefile
deleted file mode 100644
index 21ac29a71458..000000000000
--- a/drivers/staging/media/zoran/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-zr36067-objs := zoran_procfs.o zoran_device.o \
- zoran_driver.o zoran_card.o
-
-obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o videocodec.o
-obj-$(CONFIG_VIDEO_ZORAN_DC30) += zr36050.o zr36016.o
-obj-$(CONFIG_VIDEO_ZORAN_ZR36060) += zr36060.o
diff --git a/drivers/staging/media/zoran/TODO b/drivers/staging/media/zoran/TODO
deleted file mode 100644
index 54464095d0d7..000000000000
--- a/drivers/staging/media/zoran/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-The zoran driver is marked deprecated. It will be removed
-around May 2019 unless someone is willing to update this
-driver to the latest V4L2 frameworks (especially the vb2
-framework).
diff --git a/drivers/staging/media/zoran/videocodec.c b/drivers/staging/media/zoran/videocodec.c
deleted file mode 100644
index 4427ae7126e2..000000000000
--- a/drivers/staging/media/zoran/videocodec.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * VIDEO MOTION CODECs internal API for video devices
- *
- * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's
- * bound to a master device.
- *
- * (c) 2002 Wolfgang Scherr <scherr@net4you.at>
- *
- * $Id: videocodec.c,v 1.1.2.8 2003/03/29 07:16:04 rbultje Exp $
- *
- * ------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ------------------------------------------------------------------------
- */
-
-#define VIDEOCODEC_VERSION "v0.2"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-
-// kernel config is here (procfs flag)
-
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#endif
-
-#include "videocodec.h"
-
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0-4)");
-
-#define dprintk(num, format, args...) \
- do { \
- if (debug >= num) \
- printk(format, ##args); \
- } while (0)
-
-struct attached_list {
- struct videocodec *codec;
- struct attached_list *next;
-};
-
-struct codec_list {
- const struct videocodec *codec;
- int attached;
- struct attached_list *list;
- struct codec_list *next;
-};
-
-static struct codec_list *codeclist_top = NULL;
-
-/* ================================================= */
-/* function prototypes of the master/slave interface */
-/* ================================================= */
-
-struct videocodec *
-videocodec_attach (struct videocodec_master *master)
-{
- struct codec_list *h = codeclist_top;
- struct attached_list *a, *ptr;
- struct videocodec *codec;
- int res;
-
- if (!master) {
- dprintk(1, KERN_ERR "videocodec_attach: no data\n");
- return NULL;
- }
-
- dprintk(2,
- "videocodec_attach: '%s', flags %lx, magic %lx\n",
- master->name, master->flags, master->magic);
-
- if (!h) {
- dprintk(1,
- KERN_ERR
- "videocodec_attach: no device available\n");
- return NULL;
- }
-
- while (h) {
- // attach only if the slave has at least the flags
- // expected by the master
- if ((master->flags & h->codec->flags) == master->flags) {
- dprintk(4, "videocodec_attach: try '%s'\n",
- h->codec->name);
-
- if (!try_module_get(h->codec->owner))
- return NULL;
-
- codec = kmemdup(h->codec, sizeof(struct videocodec),
- GFP_KERNEL);
- if (!codec) {
- dprintk(1,
- KERN_ERR
- "videocodec_attach: no mem\n");
- goto out_module_put;
- }
-
- res = strlen(codec->name);
- snprintf(codec->name + res, sizeof(codec->name) - res,
- "[%d]", h->attached);
- codec->master_data = master;
- res = codec->setup(codec);
- if (res == 0) {
- dprintk(3, "videocodec_attach '%s'\n",
- codec->name);
- ptr = kzalloc(sizeof(struct attached_list), GFP_KERNEL);
- if (!ptr) {
- dprintk(1,
- KERN_ERR
- "videocodec_attach: no memory\n");
- goto out_kfree;
- }
- ptr->codec = codec;
-
- a = h->list;
- if (!a) {
- h->list = ptr;
- dprintk(4,
- "videocodec: first element\n");
- } else {
- while (a->next)
- a = a->next; // find end
- a->next = ptr;
- dprintk(4,
- "videocodec: in after '%s'\n",
- h->codec->name);
- }
-
- h->attached += 1;
- return codec;
- } else {
- kfree(codec);
- }
- }
- h = h->next;
- }
-
- dprintk(1, KERN_ERR "videocodec_attach: no codec found!\n");
- return NULL;
-
- out_module_put:
- module_put(h->codec->owner);
- out_kfree:
- kfree(codec);
- return NULL;
-}
-
-int
-videocodec_detach (struct videocodec *codec)
-{
- struct codec_list *h = codeclist_top;
- struct attached_list *a, *prev;
- int res;
-
- if (!codec) {
- dprintk(1, KERN_ERR "videocodec_detach: no data\n");
- return -EINVAL;
- }
-
- dprintk(2,
- "videocodec_detach: '%s', type: %x, flags %lx, magic %lx\n",
- codec->name, codec->type, codec->flags, codec->magic);
-
- if (!h) {
- dprintk(1,
- KERN_ERR "videocodec_detach: no device left...\n");
- return -ENXIO;
- }
-
- while (h) {
- a = h->list;
- prev = NULL;
- while (a) {
- if (codec == a->codec) {
- res = a->codec->unset(a->codec);
- if (res >= 0) {
- dprintk(3,
- "videocodec_detach: '%s'\n",
- a->codec->name);
- a->codec->master_data = NULL;
- } else {
- dprintk(1,
- KERN_ERR
- "videocodec_detach: '%s'\n",
- a->codec->name);
- a->codec->master_data = NULL;
- }
- if (prev == NULL) {
- h->list = a->next;
- dprintk(4,
- "videocodec: delete first\n");
- } else {
- prev->next = a->next;
- dprintk(4,
- "videocodec: delete middle\n");
- }
- module_put(a->codec->owner);
- kfree(a->codec);
- kfree(a);
- h->attached -= 1;
- return 0;
- }
- prev = a;
- a = a->next;
- }
- h = h->next;
- }
-
- dprintk(1, KERN_ERR "videocodec_detach: given codec not found!\n");
- return -EINVAL;
-}
-
-int
-videocodec_register (const struct videocodec *codec)
-{
- struct codec_list *ptr, *h = codeclist_top;
-
- if (!codec) {
- dprintk(1, KERN_ERR "videocodec_register: no data!\n");
- return -EINVAL;
- }
-
- dprintk(2,
- "videocodec: register '%s', type: %x, flags %lx, magic %lx\n",
- codec->name, codec->type, codec->flags, codec->magic);
-
- ptr = kzalloc(sizeof(struct codec_list), GFP_KERNEL);
- if (!ptr) {
- dprintk(1, KERN_ERR "videocodec_register: no memory\n");
- return -ENOMEM;
- }
- ptr->codec = codec;
-
- if (!h) {
- codeclist_top = ptr;
- dprintk(4, "videocodec: hooked in as first element\n");
- } else {
- while (h->next)
- h = h->next; // find the end
- h->next = ptr;
- dprintk(4, "videocodec: hooked in after '%s'\n",
- h->codec->name);
- }
-
- return 0;
-}
-
-int
-videocodec_unregister (const struct videocodec *codec)
-{
- struct codec_list *prev = NULL, *h = codeclist_top;
-
- if (!codec) {
- dprintk(1, KERN_ERR "videocodec_unregister: no data!\n");
- return -EINVAL;
- }
-
- dprintk(2,
- "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n",
- codec->name, codec->type, codec->flags, codec->magic);
-
- if (!h) {
- dprintk(1,
- KERN_ERR
- "videocodec_unregister: no device left...\n");
- return -ENXIO;
- }
-
- while (h) {
- if (codec == h->codec) {
- if (h->attached) {
- dprintk(1,
- KERN_ERR
- "videocodec: '%s' is used\n",
- h->codec->name);
- return -EBUSY;
- }
- dprintk(3, "videocodec: unregister '%s' is ok.\n",
- h->codec->name);
- if (prev == NULL) {
- codeclist_top = h->next;
- dprintk(4,
- "videocodec: delete first element\n");
- } else {
- prev->next = h->next;
- dprintk(4,
- "videocodec: delete middle element\n");
- }
- kfree(h);
- return 0;
- }
- prev = h;
- h = h->next;
- }
-
- dprintk(1,
- KERN_ERR
- "videocodec_unregister: given codec not found!\n");
- return -EINVAL;
-}
-
-#ifdef CONFIG_PROC_FS
-static int proc_videocodecs_show(struct seq_file *m, void *v)
-{
- struct codec_list *h = codeclist_top;
- struct attached_list *a;
-
- seq_printf(m, "<S>lave or attached <M>aster name type flags magic ");
- seq_printf(m, "(connected as)\n");
-
- while (h) {
- seq_printf(m, "S %32s %04x %08lx %08lx (TEMPLATE)\n",
- h->codec->name, h->codec->type,
- h->codec->flags, h->codec->magic);
- a = h->list;
- while (a) {
- seq_printf(m, "M %32s %04x %08lx %08lx (%s)\n",
- a->codec->master_data->name,
- a->codec->master_data->type,
- a->codec->master_data->flags,
- a->codec->master_data->magic,
- a->codec->name);
- a = a->next;
- }
- h = h->next;
- }
-
- return 0;
-}
-#endif
-
-/* ===================== */
-/* hook in driver module */
-/* ===================== */
-static int __init
-videocodec_init (void)
-{
-#ifdef CONFIG_PROC_FS
- static struct proc_dir_entry *videocodec_proc_entry;
-#endif
-
- printk(KERN_INFO "Linux video codec intermediate layer: %s\n",
- VIDEOCODEC_VERSION);
-
-#ifdef CONFIG_PROC_FS
- videocodec_proc_entry = proc_create_single("videocodecs", 0, NULL,
- proc_videocodecs_show);
- if (!videocodec_proc_entry) {
- dprintk(1, KERN_ERR "videocodec: can't init procfs.\n");
- }
-#endif
- return 0;
-}
-
-static void __exit
-videocodec_exit (void)
-{
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("videocodecs", NULL);
-#endif
-}
-
-EXPORT_SYMBOL(videocodec_attach);
-EXPORT_SYMBOL(videocodec_detach);
-EXPORT_SYMBOL(videocodec_register);
-EXPORT_SYMBOL(videocodec_unregister);
-
-module_init(videocodec_init);
-module_exit(videocodec_exit);
-
-MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>");
-MODULE_DESCRIPTION("Intermediate API module for video codecs "
- VIDEOCODEC_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/videocodec.h b/drivers/staging/media/zoran/videocodec.h
deleted file mode 100644
index 8ed5a0f7ac01..000000000000
--- a/drivers/staging/media/zoran/videocodec.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * VIDEO MOTION CODECs internal API for video devices
- *
- * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's
- * bound to a master device.
- *
- * (c) 2002 Wolfgang Scherr <scherr@net4you.at>
- *
- * $Id: videocodec.h,v 1.1.2.4 2003/01/14 21:15:03 rbultje Exp $
- *
- * ------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ------------------------------------------------------------------------
- */
-
-/* =================== */
-/* general description */
-/* =================== */
-
-/* Should ease the (re-)usage of drivers supporting cards with (different)
- video codecs. The codecs register to this module their functionality,
- and the processors (masters) can attach to them if they fit.
-
- The codecs are typically have a "strong" binding to their master - so I
- don't think it makes sense to have a full blown interfacing as with e.g.
- i2c. If you have an other opinion, let's discuss & implement it :-)))
-
- Usage:
-
- The slave has just to setup the videocodec structure and use two functions:
- videocodec_register(codecdata);
- videocodec_unregister(codecdata);
- The best is just calling them at module (de-)initialisation.
-
- The master sets up the structure videocodec_master and calls:
- codecdata=videocodec_attach(master_codecdata);
- videocodec_detach(codecdata);
-
- The slave is called during attach/detach via functions setup previously
- during register. At that time, the master_data pointer is set up
- and the slave can access any io registers of the master device (in the case
- the slave is bound to it). Otherwise it doesn't need this functions and
- therfor they may not be initialized.
-
- The other functions are just for convenience, as they are for sure used by
- most/all of the codecs. The last ones may be omitted, too.
-
- See the structure declaration below for more information and which data has
- to be set up for the master and the slave.
-
- ----------------------------------------------------------------------------
- The master should have "knowledge" of the slave and vice versa. So the data
- structures sent to/from slave via set_data/get_data set_image/get_image are
- device dependent and vary between MJPEG/MPEG/WAVELET/... devices. (!!!!)
- ----------------------------------------------------------------------------
-*/
-
-
-/* ========================================== */
-/* description of the videocodec_io structure */
-/* ========================================== */
-
-/*
- ==== master setup ====
- name -> name of the device structure for reference and debugging
- master_data -> data ref. for the master (e.g. the zr36055,57,67)
- readreg -> ref. to read-fn from register (setup by master, used by slave)
- writereg -> ref. to write-fn to register (setup by master, used by slave)
- this two functions do the lowlevel I/O job
-
- ==== slave functionality setup ====
- slave_data -> data ref. for the slave (e.g. the zr36050,60)
- check -> fn-ref. checks availability of an device, returns -EIO on failure or
- the type on success
- this makes espcecially sense if a driver module supports more than
- one codec which may be quite similar to access, nevertheless it
- is good for a first functionality check
-
- -- main functions you always need for compression/decompression --
-
- set_mode -> this fn-ref. resets the entire codec, and sets up the mode
- with the last defined norm/size (or device default if not
- available) - it returns 0 if the mode is possible
- set_size -> this fn-ref. sets the norm and image size for
- compression/decompression (returns 0 on success)
- the norm param is defined in videodev2.h (V4L2_STD_*)
-
- additional setup may be available, too - but the codec should work with
- some default values even without this
-
- set_data -> sets device-specific data (tables, quality etc.)
- get_data -> query device-specific data (tables, quality etc.)
-
- if the device delivers interrupts, they may be setup/handled here
- setup_interrupt -> codec irq setup (not needed for 36050/60)
- handle_interrupt -> codec irq handling (not needed for 36050/60)
-
- if the device delivers pictures, they may be handled here
- put_image -> puts image data to the codec (not needed for 36050/60)
- get_image -> gets image data from the codec (not needed for 36050/60)
- the calls include frame numbers and flags (even/odd/...)
- if needed and a flag which allows blocking until its ready
-*/
-
-/* ============== */
-/* user interface */
-/* ============== */
-
-/*
- Currently there is only a information display planned, as the layer
- is not visible for the user space at all.
-
- Information is available via procfs. The current entry is "/proc/videocodecs"
- but it makes sense to "hide" it in the /proc/video tree of v4l(2) --TODO--.
-
-A example for such an output is:
-
-<S>lave or attached <M>aster name type flags magic (connected as)
-S zr36050 0002 0000d001 00000000 (TEMPLATE)
-M zr36055[0] 0001 0000c001 00000000 (zr36050[0])
-M zr36055[1] 0001 0000c001 00000000 (zr36050[1])
-
-*/
-
-
-/* =============================================== */
-/* special defines for the videocodec_io structure */
-/* =============================================== */
-
-#ifndef __LINUX_VIDEOCODEC_H
-#define __LINUX_VIDEOCODEC_H
-
-#include <linux/videodev2.h>
-
-#define CODEC_DO_COMPRESSION 0
-#define CODEC_DO_EXPANSION 1
-
-/* this are the current codec flags I think they are needed */
-/* -> type value in structure */
-#define CODEC_FLAG_JPEG 0x00000001L // JPEG codec
-#define CODEC_FLAG_MPEG 0x00000002L // MPEG1/2/4 codec
-#define CODEC_FLAG_DIVX 0x00000004L // DIVX codec
-#define CODEC_FLAG_WAVELET 0x00000008L // WAVELET codec
- // room for other types
-
-#define CODEC_FLAG_MAGIC 0x00000800L // magic key must match
-#define CODEC_FLAG_HARDWARE 0x00001000L // is a hardware codec
-#define CODEC_FLAG_VFE 0x00002000L // has direct video frontend
-#define CODEC_FLAG_ENCODER 0x00004000L // compression capability
-#define CODEC_FLAG_DECODER 0x00008000L // decompression capability
-#define CODEC_FLAG_NEEDIRQ 0x00010000L // needs irq handling
-#define CODEC_FLAG_RDWRPIC 0x00020000L // handles picture I/O
-
-/* a list of modes, some are just examples (is there any HW?) */
-#define CODEC_MODE_BJPG 0x0001 // Baseline JPEG
-#define CODEC_MODE_LJPG 0x0002 // Lossless JPEG
-#define CODEC_MODE_MPEG1 0x0003 // MPEG 1
-#define CODEC_MODE_MPEG2 0x0004 // MPEG 2
-#define CODEC_MODE_MPEG4 0x0005 // MPEG 4
-#define CODEC_MODE_MSDIVX 0x0006 // MS DivX
-#define CODEC_MODE_ODIVX 0x0007 // Open DivX
-#define CODEC_MODE_WAVELET 0x0008 // Wavelet
-
-/* this are the current codec types I want to implement */
-/* -> type value in structure */
-#define CODEC_TYPE_NONE 0
-#define CODEC_TYPE_L64702 1
-#define CODEC_TYPE_ZR36050 2
-#define CODEC_TYPE_ZR36016 3
-#define CODEC_TYPE_ZR36060 4
-
-/* the type of data may be enhanced by future implementations (data-fn.'s) */
-/* -> used in command */
-#define CODEC_G_STATUS 0x0000 /* codec status (query only) */
-#define CODEC_S_CODEC_MODE 0x0001 /* codec mode (baseline JPEG, MPEG1,... */
-#define CODEC_G_CODEC_MODE 0x8001
-#define CODEC_S_VFE 0x0002 /* additional video frontend setup */
-#define CODEC_G_VFE 0x8002
-#define CODEC_S_MMAP 0x0003 /* MMAP setup (if available) */
-
-#define CODEC_S_JPEG_TDS_BYTE 0x0010 /* target data size in bytes */
-#define CODEC_G_JPEG_TDS_BYTE 0x8010
-#define CODEC_S_JPEG_SCALE 0x0011 /* scaling factor for quant. tables */
-#define CODEC_G_JPEG_SCALE 0x8011
-#define CODEC_S_JPEG_HDT_DATA 0x0018 /* huffman-tables */
-#define CODEC_G_JPEG_HDT_DATA 0x8018
-#define CODEC_S_JPEG_QDT_DATA 0x0019 /* quantizing-tables */
-#define CODEC_G_JPEG_QDT_DATA 0x8019
-#define CODEC_S_JPEG_APP_DATA 0x001A /* APP marker */
-#define CODEC_G_JPEG_APP_DATA 0x801A
-#define CODEC_S_JPEG_COM_DATA 0x001B /* COM marker */
-#define CODEC_G_JPEG_COM_DATA 0x801B
-
-#define CODEC_S_PRIVATE 0x1000 /* "private" commands start here */
-#define CODEC_G_PRIVATE 0x9000
-
-#define CODEC_G_FLAG 0x8000 /* this is how 'get' is detected */
-
-/* types of transfer, directly user space or a kernel buffer (image-fn.'s) */
-/* -> used in get_image, put_image */
-#define CODEC_TRANSFER_KERNEL 0 /* use "memcopy" */
-#define CODEC_TRANSFER_USER 1 /* use "to/from_user" */
-
-
-/* ========================= */
-/* the structures itself ... */
-/* ========================= */
-
-struct vfe_polarity {
- unsigned int vsync_pol:1;
- unsigned int hsync_pol:1;
- unsigned int field_pol:1;
- unsigned int blank_pol:1;
- unsigned int subimg_pol:1;
- unsigned int poe_pol:1;
- unsigned int pvalid_pol:1;
- unsigned int vclk_pol:1;
-};
-
-struct vfe_settings {
- __u32 x, y; /* Offsets into image */
- __u32 width, height; /* Area to capture */
- __u16 decimation; /* Decimation divider */
- __u16 flags; /* Flags for capture */
- __u16 quality; /* quality of the video */
-};
-
-struct tvnorm {
- u16 Wt, Wa, HStart, HSyncStart, Ht, Ha, VStart;
-};
-
-struct jpeg_com_marker {
- int len; /* number of usable bytes in data */
- char data[60];
-};
-
-struct jpeg_app_marker {
- int appn; /* number app segment */
- int len; /* number of usable bytes in data */
- char data[60];
-};
-
-struct videocodec {
- struct module *owner;
- /* -- filled in by slave device during register -- */
- char name[32];
- unsigned long magic; /* may be used for client<->master attaching */
- unsigned long flags; /* functionality flags */
- unsigned int type; /* codec type */
-
- /* -- these is filled in later during master device attach -- */
-
- struct videocodec_master *master_data;
-
- /* -- these are filled in by the slave device during register -- */
-
- void *data; /* private slave data */
-
- /* attach/detach client functions (indirect call) */
- int (*setup) (struct videocodec * codec);
- int (*unset) (struct videocodec * codec);
-
- /* main functions, every client needs them for sure! */
- // set compression or decompression (or freeze, stop, standby, etc)
- int (*set_mode) (struct videocodec * codec,
- int mode);
- // setup picture size and norm (for the codec's video frontend)
- int (*set_video) (struct videocodec * codec,
- struct tvnorm * norm,
- struct vfe_settings * cap,
- struct vfe_polarity * pol);
- // other control commands, also mmap setup etc.
- int (*control) (struct videocodec * codec,
- int type,
- int size,
- void *data);
-
- /* additional setup/query/processing (may be NULL pointer) */
- // interrupt setup / handling (for irq's delivered by master)
- int (*setup_interrupt) (struct videocodec * codec,
- long mode);
- int (*handle_interrupt) (struct videocodec * codec,
- int source,
- long flag);
- // picture interface (if any)
- long (*put_image) (struct videocodec * codec,
- int tr_type,
- int block,
- long *fr_num,
- long *flag,
- long size,
- void *buf);
- long (*get_image) (struct videocodec * codec,
- int tr_type,
- int block,
- long *fr_num,
- long *flag,
- long size,
- void *buf);
-};
-
-struct videocodec_master {
- /* -- filled in by master device for registration -- */
- char name[32];
- unsigned long magic; /* may be used for client<->master attaching */
- unsigned long flags; /* functionality flags */
- unsigned int type; /* master type */
-
- void *data; /* private master data */
-
- __u32(*readreg) (struct videocodec * codec,
- __u16 reg);
- void (*writereg) (struct videocodec * codec,
- __u16 reg,
- __u32 value);
-};
-
-
-/* ================================================= */
-/* function prototypes of the master/slave interface */
-/* ================================================= */
-
-/* attach and detach commands for the master */
-// * master structure needs to be kmalloc'ed before calling attach
-// and free'd after calling detach
-// * returns pointer on success, NULL on failure
-extern struct videocodec *videocodec_attach(struct videocodec_master *);
-// * 0 on success, <0 (errno) on failure
-extern int videocodec_detach(struct videocodec *);
-
-/* register and unregister commands for the slaves */
-// * 0 on success, <0 (errno) on failure
-extern int videocodec_register(const struct videocodec *);
-// * 0 on success, <0 (errno) on failure
-extern int videocodec_unregister(const struct videocodec *);
-
-/* the other calls are directly done via the videocodec structure! */
-
-#endif /*ifndef __LINUX_VIDEOCODEC_H */
diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h
deleted file mode 100644
index e84fb604a689..000000000000
--- a/drivers/staging/media/zoran/zoran.h
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * zoran - Iomega Buz driver
- *
- * Copyright (C) 1999 Rainer Johanni <Rainer@Johanni.de>
- *
- * based on
- *
- * zoran.0.0.3 Copyright (C) 1998 Dave Perks <dperks@ibm.net>
- *
- * and
- *
- * bttv - Bt848 frame grabber driver
- * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
- * & Marcus Metzler (mocm@thp.uni-koeln.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _BUZ_H_
-#define _BUZ_H_
-
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-fh.h>
-
-struct zoran_sync {
- unsigned long frame; /* number of buffer that has been free'd */
- unsigned long length; /* number of code bytes in buffer (capture only) */
- unsigned long seq; /* frame sequence number */
- u64 ts; /* timestamp */
-};
-
-
-#define ZORAN_NAME "ZORAN" /* name of the device */
-
-#define ZR_DEVNAME(zr) ((zr)->name)
-
-#define BUZ_MAX_WIDTH (zr->timing->Wa)
-#define BUZ_MAX_HEIGHT (zr->timing->Ha)
-#define BUZ_MIN_WIDTH 32 /* never display less than 32 pixels */
-#define BUZ_MIN_HEIGHT 24 /* never display less than 24 rows */
-
-#define BUZ_NUM_STAT_COM 4
-#define BUZ_MASK_STAT_COM 3
-
-#define BUZ_MAX_FRAME 256 /* Must be a power of 2 */
-#define BUZ_MASK_FRAME 255 /* Must be BUZ_MAX_FRAME-1 */
-
-#define BUZ_MAX_INPUT 16
-
-#if VIDEO_MAX_FRAME <= 32
-# define V4L_MAX_FRAME 32
-#elif VIDEO_MAX_FRAME <= 64
-# define V4L_MAX_FRAME 64
-#else
-# error "Too many video frame buffers to handle"
-#endif
-#define V4L_MASK_FRAME (V4L_MAX_FRAME - 1)
-
-#define MAX_FRAME (BUZ_MAX_FRAME > VIDEO_MAX_FRAME ? BUZ_MAX_FRAME : VIDEO_MAX_FRAME)
-
-#include "zr36057.h"
-
-enum card_type {
- UNKNOWN = -1,
-
- /* Pinnacle/Miro */
- DC10_old, /* DC30 like */
- DC10_new, /* DC10plus like */
- DC10plus,
- DC30,
- DC30plus,
-
- /* Linux Media Labs */
- LML33,
- LML33R10,
-
- /* Iomega */
- BUZ,
-
- /* AverMedia */
- AVS6EYES,
-
- /* total number of cards */
- NUM_CARDS
-};
-
-enum zoran_codec_mode {
- BUZ_MODE_IDLE, /* nothing going on */
- BUZ_MODE_MOTION_COMPRESS, /* grabbing frames */
- BUZ_MODE_MOTION_DECOMPRESS, /* playing frames */
- BUZ_MODE_STILL_COMPRESS, /* still frame conversion */
- BUZ_MODE_STILL_DECOMPRESS /* still frame conversion */
-};
-
-enum zoran_buffer_state {
- BUZ_STATE_USER, /* buffer is owned by application */
- BUZ_STATE_PEND, /* buffer is queued in pend[] ready to feed to I/O */
- BUZ_STATE_DMA, /* buffer is queued in dma[] for I/O */
- BUZ_STATE_DONE /* buffer is ready to return to application */
-};
-
-enum zoran_map_mode {
- ZORAN_MAP_MODE_RAW,
- ZORAN_MAP_MODE_JPG_REC,
-#define ZORAN_MAP_MODE_JPG ZORAN_MAP_MODE_JPG_REC
- ZORAN_MAP_MODE_JPG_PLAY,
-};
-
-enum gpio_type {
- ZR_GPIO_JPEG_SLEEP = 0,
- ZR_GPIO_JPEG_RESET,
- ZR_GPIO_JPEG_FRAME,
- ZR_GPIO_VID_DIR,
- ZR_GPIO_VID_EN,
- ZR_GPIO_VID_RESET,
- ZR_GPIO_CLK_SEL1,
- ZR_GPIO_CLK_SEL2,
- ZR_GPIO_MAX,
-};
-
-enum gpcs_type {
- GPCS_JPEG_RESET = 0,
- GPCS_JPEG_START,
- GPCS_MAX,
-};
-
-struct zoran_format {
- char *name;
- __u32 fourcc;
- int colorspace;
- int depth;
- __u32 flags;
- __u32 vfespfr;
-};
-/* flags */
-#define ZORAN_FORMAT_COMPRESSED 1<<0
-#define ZORAN_FORMAT_OVERLAY 1<<1
-#define ZORAN_FORMAT_CAPTURE 1<<2
-#define ZORAN_FORMAT_PLAYBACK 1<<3
-
-/* overlay-settings */
-struct zoran_overlay_settings {
- int is_set;
- int x, y, width, height; /* position */
- int clipcount; /* position and number of clips */
- const struct zoran_format *format; /* overlay format */
-};
-
-/* v4l-capture settings */
-struct zoran_v4l_settings {
- int width, height, bytesperline; /* capture size */
- const struct zoran_format *format; /* capture format */
-};
-
-/* jpg-capture/-playback settings */
-struct zoran_jpg_settings {
- int decimation; /* this bit is used to set everything to default */
- int HorDcm, VerDcm, TmpDcm; /* capture decimation settings (TmpDcm=1 means both fields) */
- int field_per_buff, odd_even; /* field-settings (odd_even=1 (+TmpDcm=1) means top-field-first) */
- int img_x, img_y, img_width, img_height; /* crop settings (subframe capture) */
- struct v4l2_jpegcompression jpg_comp; /* JPEG-specific capture settings */
-};
-
-struct zoran_fh;
-
-struct zoran_mapping {
- struct zoran_fh *fh;
- atomic_t count;
-};
-
-struct zoran_buffer {
- struct zoran_mapping *map;
- enum zoran_buffer_state state; /* state: unused/pending/dma/done */
- struct zoran_sync bs; /* DONE: info to return to application */
- union {
- struct {
- __le32 *frag_tab; /* addresses of frag table */
- u32 frag_tab_bus; /* same value cached to save time in ISR */
- } jpg;
- struct {
- char *fbuffer; /* virtual address of frame buffer */
- unsigned long fbuffer_phys;/* physical address of frame buffer */
- unsigned long fbuffer_bus;/* bus address of frame buffer */
- } v4l;
- };
-};
-
-enum zoran_lock_activity {
- ZORAN_FREE, /* free for use */
- ZORAN_ACTIVE, /* active but unlocked */
- ZORAN_LOCKED, /* locked */
-};
-
-/* buffer collections */
-struct zoran_buffer_col {
- enum zoran_lock_activity active; /* feature currently in use? */
- unsigned int num_buffers, buffer_size;
- struct zoran_buffer buffer[MAX_FRAME]; /* buffers */
- u8 allocated; /* Flag if buffers are allocated */
- u8 need_contiguous; /* Flag if contiguous buffers are needed */
- /* only applies to jpg buffers, raw buffers are always contiguous */
-};
-
-struct zoran;
-
-/* zoran_fh contains per-open() settings */
-struct zoran_fh {
- struct v4l2_fh fh;
- struct zoran *zr;
-
- enum zoran_map_mode map_mode; /* Flag which bufferset will map by next mmap() */
-
- struct zoran_overlay_settings overlay_settings;
- u32 *overlay_mask; /* overlay mask */
- enum zoran_lock_activity overlay_active;/* feature currently in use? */
-
- struct zoran_buffer_col buffers; /* buffers' info */
-
- struct zoran_v4l_settings v4l_settings; /* structure with a lot of things to play with */
- struct zoran_jpg_settings jpg_settings; /* structure with a lot of things to play with */
-};
-
-struct card_info {
- enum card_type type;
- char name[32];
- const char *i2c_decoder; /* i2c decoder device */
- const unsigned short *addrs_decoder;
- const char *i2c_encoder; /* i2c encoder device */
- const unsigned short *addrs_encoder;
- u16 video_vfe, video_codec; /* videocodec types */
- u16 audio_chip; /* audio type */
-
- int inputs; /* number of video inputs */
- struct input {
- int muxsel;
- char name[32];
- } input[BUZ_MAX_INPUT];
-
- v4l2_std_id norms;
- struct tvnorm *tvn[3]; /* supported TV norms */
-
- u32 jpeg_int; /* JPEG interrupt */
- u32 vsync_int; /* VSYNC interrupt */
- s8 gpio[ZR_GPIO_MAX];
- u8 gpcs[GPCS_MAX];
-
- struct vfe_polarity vfe_pol;
- u8 gpio_pol[ZR_GPIO_MAX];
-
- /* is the /GWS line connected? */
- u8 gws_not_connected;
-
- /* avs6eyes mux setting */
- u8 input_mux;
-
- void (*init) (struct zoran * zr);
-};
-
-struct zoran {
- struct v4l2_device v4l2_dev;
- struct v4l2_ctrl_handler hdl;
- struct video_device *video_dev;
-
- struct i2c_adapter i2c_adapter; /* */
- struct i2c_algo_bit_data i2c_algo; /* */
- u32 i2cbr;
-
- struct v4l2_subdev *decoder; /* video decoder sub-device */
- struct v4l2_subdev *encoder; /* video encoder sub-device */
-
- struct videocodec *codec; /* video codec */
- struct videocodec *vfe; /* video front end */
-
- struct mutex lock; /* file ops serialize lock */
-
- u8 initialized; /* flag if zoran has been correctly initialized */
- int user; /* number of current users */
- struct card_info card;
- struct tvnorm *timing;
-
- unsigned short id; /* number of this device */
- char name[32]; /* name of this device */
- struct pci_dev *pci_dev; /* PCI device */
- unsigned char revision; /* revision of zr36057 */
- unsigned char __iomem *zr36057_mem;/* pointer to mapped IO memory */
-
- spinlock_t spinlock; /* Spinlock */
-
- /* Video for Linux parameters */
- int input; /* card's norm and input */
- v4l2_std_id norm;
-
- /* Current buffer params */
- void *vbuf_base;
- int vbuf_height, vbuf_width;
- int vbuf_depth;
- int vbuf_bytesperline;
-
- struct zoran_overlay_settings overlay_settings;
- u32 *overlay_mask; /* overlay mask */
- enum zoran_lock_activity overlay_active; /* feature currently in use? */
-
- wait_queue_head_t v4l_capq;
-
- int v4l_overlay_active; /* Overlay grab is activated */
- int v4l_memgrab_active; /* Memory grab is activated */
-
- int v4l_grab_frame; /* Frame number being currently grabbed */
-#define NO_GRAB_ACTIVE (-1)
- unsigned long v4l_grab_seq; /* Number of frames grabbed */
- struct zoran_v4l_settings v4l_settings; /* structure with a lot of things to play with */
-
- /* V4L grab queue of frames pending */
- unsigned long v4l_pend_head;
- unsigned long v4l_pend_tail;
- unsigned long v4l_sync_tail;
- int v4l_pend[V4L_MAX_FRAME];
- struct zoran_buffer_col v4l_buffers; /* V4L buffers' info */
-
- /* Buz MJPEG parameters */
- enum zoran_codec_mode codec_mode; /* status of codec */
- struct zoran_jpg_settings jpg_settings; /* structure with a lot of things to play with */
-
- wait_queue_head_t jpg_capq; /* wait here for grab to finish */
-
- /* grab queue counts/indices, mask with BUZ_MASK_STAT_COM before using as index */
- /* (dma_head - dma_tail) is number active in DMA, must be <= BUZ_NUM_STAT_COM */
- /* (value & BUZ_MASK_STAT_COM) corresponds to index in stat_com table */
- unsigned long jpg_que_head; /* Index where to put next buffer which is queued */
- unsigned long jpg_dma_head; /* Index of next buffer which goes into stat_com */
- unsigned long jpg_dma_tail; /* Index of last buffer in stat_com */
- unsigned long jpg_que_tail; /* Index of last buffer in queue */
- unsigned long jpg_seq_num; /* count of frames since grab/play started */
- unsigned long jpg_err_seq; /* last seq_num before error */
- unsigned long jpg_err_shift;
- unsigned long jpg_queued_num; /* count of frames queued since grab/play started */
-
- /* zr36057's code buffer table */
- __le32 *stat_com; /* stat_com[i] is indexed by dma_head/tail & BUZ_MASK_STAT_COM */
-
- /* (value & BUZ_MASK_FRAME) corresponds to index in pend[] queue */
- int jpg_pend[BUZ_MAX_FRAME];
-
- /* array indexed by frame number */
- struct zoran_buffer_col jpg_buffers; /* MJPEG buffers' info */
-
- /* Additional stuff for testing */
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *zoran_proc;
-#else
- void *zoran_proc;
-#endif
- int testing;
- int jpeg_error;
- int intr_counter_GIRQ1;
- int intr_counter_GIRQ0;
- int intr_counter_CodRepIRQ;
- int intr_counter_JPEGRepIRQ;
- int field_counter;
- int IRQ1_in;
- int IRQ1_out;
- int JPEG_in;
- int JPEG_out;
- int JPEG_0;
- int JPEG_1;
- int END_event_missed;
- int JPEG_missed;
- int JPEG_error;
- int num_errors;
- int JPEG_max_missed;
- int JPEG_min_missed;
-
- u32 last_isr;
- unsigned long frame_num;
-
- wait_queue_head_t test_q;
-};
-
-static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev)
-{
- return container_of(v4l2_dev, struct zoran, v4l2_dev);
-}
-
-/* There was something called _ALPHA_BUZ that used the PCI address instead of
- * the kernel iomapped address for btread/btwrite. */
-#define btwrite(dat,adr) writel((dat), zr->zr36057_mem+(adr))
-#define btread(adr) readl(zr->zr36057_mem+(adr))
-
-#define btand(dat,adr) btwrite((dat) & btread(adr), adr)
-#define btor(dat,adr) btwrite((dat) | btread(adr), adr)
-#define btaor(dat,mask,adr) btwrite((dat) | ((mask) & btread(adr)), adr)
-
-#endif
diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c
deleted file mode 100644
index ea10523194e8..000000000000
--- a/drivers/staging/media/zoran/zoran_card.c
+++ /dev/null
@@ -1,1524 +0,0 @@
-/*
- * Zoran zr36057/zr36067 PCI controller driver, for the
- * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
- * Media Labs LML33/LML33R10.
- *
- * This part handles card-specific data and detection
- *
- * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
- *
- * Currently maintained by:
- * Ronald Bultje <rbultje@ronald.bitfreak.net>
- * Laurent Pinchart <laurent.pinchart@skynet.be>
- * Mailinglist <mjpeg-users@lists.sf.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <linux/proc_fs.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/videodev2.h>
-#include <linux/spinlock.h>
-#include <linux/sem.h>
-#include <linux/kmod.h>
-#include <linux/wait.h>
-
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-#include <media/v4l2-common.h>
-#include <media/i2c/bt819.h>
-
-#include "videocodec.h"
-#include "zoran.h"
-#include "zoran_card.h"
-#include "zoran_device.h"
-#include "zoran_procfs.h"
-
-extern const struct zoran_format zoran_formats[];
-
-static int card[BUZ_MAX] = { [0 ... (BUZ_MAX-1)] = -1 };
-module_param_array(card, int, NULL, 0444);
-MODULE_PARM_DESC(card, "Card type");
-
-/*
- The video mem address of the video card.
- The driver has a little database for some videocards
- to determine it from there. If your video card is not in there
- you have either to give it to the driver as a parameter
- or set in in a VIDIOCSFBUF ioctl
- */
-
-static unsigned long vidmem; /* default = 0 - Video memory base address */
-module_param_hw(vidmem, ulong, iomem, 0444);
-MODULE_PARM_DESC(vidmem, "Default video memory base address");
-
-/*
- Default input and video norm at startup of the driver.
-*/
-
-static unsigned int default_input; /* default 0 = Composite, 1 = S-Video */
-module_param(default_input, uint, 0444);
-MODULE_PARM_DESC(default_input,
- "Default input (0=Composite, 1=S-Video, 2=Internal)");
-
-static int default_mux = 1; /* 6 Eyes input selection */
-module_param(default_mux, int, 0644);
-MODULE_PARM_DESC(default_mux,
- "Default 6 Eyes mux setting (Input selection)");
-
-static int default_norm; /* default 0 = PAL, 1 = NTSC 2 = SECAM */
-module_param(default_norm, int, 0444);
-MODULE_PARM_DESC(default_norm, "Default norm (0=PAL, 1=NTSC, 2=SECAM)");
-
-/* /dev/videoN, -1 for autodetect */
-static int video_nr[BUZ_MAX] = { [0 ... (BUZ_MAX-1)] = -1 };
-module_param_array(video_nr, int, NULL, 0444);
-MODULE_PARM_DESC(video_nr, "Video device number (-1=Auto)");
-
-int v4l_nbufs = 4;
-int v4l_bufsize = 864; /* Everybody should be able to work with this setting */
-module_param(v4l_nbufs, int, 0644);
-MODULE_PARM_DESC(v4l_nbufs, "Maximum number of V4L buffers to use");
-module_param(v4l_bufsize, int, 0644);
-MODULE_PARM_DESC(v4l_bufsize, "Maximum size per V4L buffer (in kB)");
-
-int jpg_nbufs = 32;
-int jpg_bufsize = 512; /* max size for 100% quality full-PAL frame */
-module_param(jpg_nbufs, int, 0644);
-MODULE_PARM_DESC(jpg_nbufs, "Maximum number of JPG buffers to use");
-module_param(jpg_bufsize, int, 0644);
-MODULE_PARM_DESC(jpg_bufsize, "Maximum size per JPG buffer (in kB)");
-
-int pass_through = 0; /* 1=Pass through TV signal when device is not used */
- /* 0=Show color bar when device is not used (LML33: only if lml33dpath=1) */
-module_param(pass_through, int, 0644);
-MODULE_PARM_DESC(pass_through,
- "Pass TV signal through to TV-out when idling");
-
-int zr36067_debug = 1;
-module_param_named(debug, zr36067_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level (0-5)");
-
-#define ZORAN_VERSION "0.10.1"
-
-MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver");
-MODULE_AUTHOR("Serguei Miridonov");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(ZORAN_VERSION);
-
-#define ZR_DEVICE(subven, subdev, data) { \
- .vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \
- .subvendor = (subven), .subdevice = (subdev), .driver_data = (data) }
-
-static const struct pci_device_id zr36067_pci_tbl[] = {
- ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC10PLUS, DC10plus),
- ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC30PLUS, DC30plus),
- ZR_DEVICE(PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, PCI_DEVICE_ID_LML_33R10, LML33R10),
- ZR_DEVICE(PCI_VENDOR_ID_IOMEGA, PCI_DEVICE_ID_IOMEGA_BUZ, BUZ),
- ZR_DEVICE(PCI_ANY_ID, PCI_ANY_ID, NUM_CARDS),
- {0}
-};
-MODULE_DEVICE_TABLE(pci, zr36067_pci_tbl);
-
-static unsigned int zoran_num; /* number of cards found */
-
-/* videocodec bus functions ZR36060 */
-static u32
-zr36060_read (struct videocodec *codec,
- u16 reg)
-{
- struct zoran *zr = (struct zoran *) codec->master_data->data;
- __u32 data;
-
- if (post_office_wait(zr)
- || post_office_write(zr, 0, 1, reg >> 8)
- || post_office_write(zr, 0, 2, reg & 0xff)) {
- return -1;
- }
-
- data = post_office_read(zr, 0, 3) & 0xff;
- return data;
-}
-
-static void
-zr36060_write (struct videocodec *codec,
- u16 reg,
- u32 val)
-{
- struct zoran *zr = (struct zoran *) codec->master_data->data;
-
- if (post_office_wait(zr)
- || post_office_write(zr, 0, 1, reg >> 8)
- || post_office_write(zr, 0, 2, reg & 0xff)) {
- return;
- }
-
- post_office_write(zr, 0, 3, val & 0xff);
-}
-
-/* videocodec bus functions ZR36050 */
-static u32
-zr36050_read (struct videocodec *codec,
- u16 reg)
-{
- struct zoran *zr = (struct zoran *) codec->master_data->data;
- __u32 data;
-
- if (post_office_wait(zr)
- || post_office_write(zr, 1, 0, reg >> 2)) { // reg. HIGHBYTES
- return -1;
- }
-
- data = post_office_read(zr, 0, reg & 0x03) & 0xff; // reg. LOWBYTES + read
- return data;
-}
-
-static void
-zr36050_write (struct videocodec *codec,
- u16 reg,
- u32 val)
-{
- struct zoran *zr = (struct zoran *) codec->master_data->data;
-
- if (post_office_wait(zr)
- || post_office_write(zr, 1, 0, reg >> 2)) { // reg. HIGHBYTES
- return;
- }
-
- post_office_write(zr, 0, reg & 0x03, val & 0xff); // reg. LOWBYTES + wr. data
-}
-
-/* videocodec bus functions ZR36016 */
-static u32
-zr36016_read (struct videocodec *codec,
- u16 reg)
-{
- struct zoran *zr = (struct zoran *) codec->master_data->data;
- __u32 data;
-
- if (post_office_wait(zr)) {
- return -1;
- }
-
- data = post_office_read(zr, 2, reg & 0x03) & 0xff; // read
- return data;
-}
-
-/* hack for in zoran_device.c */
-void
-zr36016_write (struct videocodec *codec,
- u16 reg,
- u32 val)
-{
- struct zoran *zr = (struct zoran *) codec->master_data->data;
-
- if (post_office_wait(zr)) {
- return;
- }
-
- post_office_write(zr, 2, reg & 0x03, val & 0x0ff); // wr. data
-}
-
-/*
- * Board specific information
- */
-
-static void
-dc10_init (struct zoran *zr)
-{
- dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__);
-
- /* Pixel clock selection */
- GPIO(zr, 4, 0);
- GPIO(zr, 5, 1);
- /* Enable the video bus sync signals */
- GPIO(zr, 7, 0);
-}
-
-static void
-dc10plus_init (struct zoran *zr)
-{
- dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__);
-}
-
-static void
-buz_init (struct zoran *zr)
-{
- dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__);
-
- /* some stuff from Iomega */
- pci_write_config_dword(zr->pci_dev, 0xfc, 0x90680f15);
- pci_write_config_dword(zr->pci_dev, 0x0c, 0x00012020);
- pci_write_config_dword(zr->pci_dev, 0xe8, 0xc0200000);
-}
-
-static void
-lml33_init (struct zoran *zr)
-{
- dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__);
-
- GPIO(zr, 2, 1); // Set Composite input/output
-}
-
-static void
-avs6eyes_init (struct zoran *zr)
-{
- // AverMedia 6-Eyes original driver by Christer Weinigel
-
- // Lifted straight from Christer's old driver and
- // modified slightly by Martin Samuelsson.
-
- int mux = default_mux; /* 1 = BT866, 7 = VID1 */
-
- GPIO(zr, 4, 1); /* Bt866 SLEEP on */
- udelay(2);
-
- GPIO(zr, 0, 1); /* ZR36060 /RESET on */
- GPIO(zr, 1, 0); /* ZR36060 /SLEEP on */
- GPIO(zr, 2, mux & 1); /* MUX S0 */
- GPIO(zr, 3, 0); /* /FRAME on */
- GPIO(zr, 4, 0); /* Bt866 SLEEP off */
- GPIO(zr, 5, mux & 2); /* MUX S1 */
- GPIO(zr, 6, 0); /* ? */
- GPIO(zr, 7, mux & 4); /* MUX S2 */
-
-}
-
-static char *
-codecid_to_modulename (u16 codecid)
-{
- char *name = NULL;
-
- switch (codecid) {
- case CODEC_TYPE_ZR36060:
- name = "zr36060";
- break;
- case CODEC_TYPE_ZR36050:
- name = "zr36050";
- break;
- case CODEC_TYPE_ZR36016:
- name = "zr36016";
- break;
- }
-
- return name;
-}
-
-// struct tvnorm {
-// u16 Wt, Wa, HStart, HSyncStart, Ht, Ha, VStart;
-// };
-
-static struct tvnorm f50sqpixel = { 944, 768, 83, 880, 625, 576, 16 };
-static struct tvnorm f60sqpixel = { 780, 640, 51, 716, 525, 480, 12 };
-static struct tvnorm f50ccir601 = { 864, 720, 75, 804, 625, 576, 18 };
-static struct tvnorm f60ccir601 = { 858, 720, 57, 788, 525, 480, 16 };
-
-static struct tvnorm f50ccir601_lml33 = { 864, 720, 75+34, 804, 625, 576, 18 };
-static struct tvnorm f60ccir601_lml33 = { 858, 720, 57+34, 788, 525, 480, 16 };
-
-/* The DC10 (57/16/50) uses VActive as HSync, so HStart must be 0 */
-static struct tvnorm f50sqpixel_dc10 = { 944, 768, 0, 880, 625, 576, 0 };
-static struct tvnorm f60sqpixel_dc10 = { 780, 640, 0, 716, 525, 480, 12 };
-
-/* FIXME: I cannot swap U and V in saa7114, so i do one
- * pixel left shift in zoran (75 -> 74)
- * (Maxim Yevtyushkin <max@linuxmedialabs.com>) */
-static struct tvnorm f50ccir601_lm33r10 = { 864, 720, 74+54, 804, 625, 576, 18 };
-static struct tvnorm f60ccir601_lm33r10 = { 858, 720, 56+54, 788, 525, 480, 16 };
-
-/* FIXME: The ks0127 seem incapable of swapping U and V, too, which is why I
- * copy Maxim's left shift hack for the 6 Eyes.
- *
- * Christer's driver used the unshifted norms, though...
- * /Sam */
-static struct tvnorm f50ccir601_avs6eyes = { 864, 720, 74, 804, 625, 576, 18 };
-static struct tvnorm f60ccir601_avs6eyes = { 858, 720, 56, 788, 525, 480, 16 };
-
-static const unsigned short vpx3220_addrs[] = { 0x43, 0x47, I2C_CLIENT_END };
-static const unsigned short saa7110_addrs[] = { 0x4e, 0x4f, I2C_CLIENT_END };
-static const unsigned short saa7111_addrs[] = { 0x25, 0x24, I2C_CLIENT_END };
-static const unsigned short saa7114_addrs[] = { 0x21, 0x20, I2C_CLIENT_END };
-static const unsigned short adv717x_addrs[] = { 0x6a, 0x6b, 0x2a, 0x2b, I2C_CLIENT_END };
-static const unsigned short ks0127_addrs[] = { 0x6c, 0x6d, I2C_CLIENT_END };
-static const unsigned short saa7185_addrs[] = { 0x44, I2C_CLIENT_END };
-static const unsigned short bt819_addrs[] = { 0x45, I2C_CLIENT_END };
-static const unsigned short bt856_addrs[] = { 0x44, I2C_CLIENT_END };
-static const unsigned short bt866_addrs[] = { 0x44, I2C_CLIENT_END };
-
-static struct card_info zoran_cards[NUM_CARDS] = {
- {
- .type = DC10_old,
- .name = "DC10(old)",
- .i2c_decoder = "vpx3220a",
- .addrs_decoder = vpx3220_addrs,
- .video_codec = CODEC_TYPE_ZR36050,
- .video_vfe = CODEC_TYPE_ZR36016,
-
- .inputs = 3,
- .input = {
- { 1, "Composite" },
- { 2, "S-Video" },
- { 0, "Internal/comp" }
- },
- .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM,
- .tvn = {
- &f50sqpixel_dc10,
- &f60sqpixel_dc10,
- &f50sqpixel_dc10
- },
- .jpeg_int = 0,
- .vsync_int = ZR36057_ISR_GIRQ1,
- .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 },
- .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 },
- .gpcs = { -1, 0 },
- .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
- .gws_not_connected = 0,
- .input_mux = 0,
- .init = &dc10_init,
- }, {
- .type = DC10_new,
- .name = "DC10(new)",
- .i2c_decoder = "saa7110",
- .addrs_decoder = saa7110_addrs,
- .i2c_encoder = "adv7175",
- .addrs_encoder = adv717x_addrs,
- .video_codec = CODEC_TYPE_ZR36060,
-
- .inputs = 3,
- .input = {
- { 0, "Composite" },
- { 7, "S-Video" },
- { 5, "Internal/comp" }
- },
- .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM,
- .tvn = {
- &f50sqpixel,
- &f60sqpixel,
- &f50sqpixel},
- .jpeg_int = ZR36057_ISR_GIRQ0,
- .vsync_int = ZR36057_ISR_GIRQ1,
- .gpio = { 3, 0, 6, 1, 2, -1, 4, 5 },
- .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
- .gpcs = { -1, 1},
- .vfe_pol = { 1, 1, 1, 1, 0, 0, 0, 0 },
- .gws_not_connected = 0,
- .input_mux = 0,
- .init = &dc10plus_init,
- }, {
- .type = DC10plus,
- .name = "DC10plus",
- .i2c_decoder = "saa7110",
- .addrs_decoder = saa7110_addrs,
- .i2c_encoder = "adv7175",
- .addrs_encoder = adv717x_addrs,
- .video_codec = CODEC_TYPE_ZR36060,
-
- .inputs = 3,
- .input = {
- { 0, "Composite" },
- { 7, "S-Video" },
- { 5, "Internal/comp" }
- },
- .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM,
- .tvn = {
- &f50sqpixel,
- &f60sqpixel,
- &f50sqpixel
- },
- .jpeg_int = ZR36057_ISR_GIRQ0,
- .vsync_int = ZR36057_ISR_GIRQ1,
- .gpio = { 3, 0, 6, 1, 2, -1, 4, 5 },
- .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
- .gpcs = { -1, 1 },
- .vfe_pol = { 1, 1, 1, 1, 0, 0, 0, 0 },
- .gws_not_connected = 0,
- .input_mux = 0,
- .init = &dc10plus_init,
- }, {
- .type = DC30,
- .name = "DC30",
- .i2c_decoder = "vpx3220a",
- .addrs_decoder = vpx3220_addrs,
- .i2c_encoder = "adv7175",
- .addrs_encoder = adv717x_addrs,
- .video_codec = CODEC_TYPE_ZR36050,
- .video_vfe = CODEC_TYPE_ZR36016,
-
- .inputs = 3,
- .input = {
- { 1, "Composite" },
- { 2, "S-Video" },
- { 0, "Internal/comp" }
- },
- .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM,
- .tvn = {
- &f50sqpixel_dc10,
- &f60sqpixel_dc10,
- &f50sqpixel_dc10
- },
- .jpeg_int = 0,
- .vsync_int = ZR36057_ISR_GIRQ1,
- .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 },
- .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 },
- .gpcs = { -1, 0 },
- .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
- .gws_not_connected = 0,
- .input_mux = 0,
- .init = &dc10_init,
- }, {
- .type = DC30plus,
- .name = "DC30plus",
- .i2c_decoder = "vpx3220a",
- .addrs_decoder = vpx3220_addrs,
- .i2c_encoder = "adv7175",
- .addrs_encoder = adv717x_addrs,
- .video_codec = CODEC_TYPE_ZR36050,
- .video_vfe = CODEC_TYPE_ZR36016,
-
- .inputs = 3,
- .input = {
- { 1, "Composite" },
- { 2, "S-Video" },
- { 0, "Internal/comp" }
- },
- .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM,
- .tvn = {
- &f50sqpixel_dc10,
- &f60sqpixel_dc10,
- &f50sqpixel_dc10
- },
- .jpeg_int = 0,
- .vsync_int = ZR36057_ISR_GIRQ1,
- .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 },
- .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 },
- .gpcs = { -1, 0 },
- .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
- .gws_not_connected = 0,
- .input_mux = 0,
- .init = &dc10_init,
- }, {
- .type = LML33,
- .name = "LML33",
- .i2c_decoder = "bt819a",
- .addrs_decoder = bt819_addrs,
- .i2c_encoder = "bt856",
- .addrs_encoder = bt856_addrs,
- .video_codec = CODEC_TYPE_ZR36060,
-
- .inputs = 2,
- .input = {
- { 0, "Composite" },
- { 7, "S-Video" }
- },
- .norms = V4L2_STD_NTSC|V4L2_STD_PAL,
- .tvn = {
- &f50ccir601_lml33,
- &f60ccir601_lml33,
- NULL
- },
- .jpeg_int = ZR36057_ISR_GIRQ1,
- .vsync_int = ZR36057_ISR_GIRQ0,
- .gpio = { 1, -1, 3, 5, 7, -1, -1, -1 },
- .gpio_pol = { 0, 0, 0, 0, 1, 0, 0, 0 },
- .gpcs = { 3, 1 },
- .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 },
- .gws_not_connected = 1,
- .input_mux = 0,
- .init = &lml33_init,
- }, {
- .type = LML33R10,
- .name = "LML33R10",
- .i2c_decoder = "saa7114",
- .addrs_decoder = saa7114_addrs,
- .i2c_encoder = "adv7170",
- .addrs_encoder = adv717x_addrs,
- .video_codec = CODEC_TYPE_ZR36060,
-
- .inputs = 2,
- .input = {
- { 0, "Composite" },
- { 7, "S-Video" }
- },
- .norms = V4L2_STD_NTSC|V4L2_STD_PAL,
- .tvn = {
- &f50ccir601_lm33r10,
- &f60ccir601_lm33r10,
- NULL
- },
- .jpeg_int = ZR36057_ISR_GIRQ1,
- .vsync_int = ZR36057_ISR_GIRQ0,
- .gpio = { 1, -1, 3, 5, 7, -1, -1, -1 },
- .gpio_pol = { 0, 0, 0, 0, 1, 0, 0, 0 },
- .gpcs = { 3, 1 },
- .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 },
- .gws_not_connected = 1,
- .input_mux = 0,
- .init = &lml33_init,
- }, {
- .type = BUZ,
- .name = "Buz",
- .i2c_decoder = "saa7111",
- .addrs_decoder = saa7111_addrs,
- .i2c_encoder = "saa7185",
- .addrs_encoder = saa7185_addrs,
- .video_codec = CODEC_TYPE_ZR36060,
-
- .inputs = 2,
- .input = {
- { 3, "Composite" },
- { 7, "S-Video" }
- },
- .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM,
- .tvn = {
- &f50ccir601,
- &f60ccir601,
- &f50ccir601
- },
- .jpeg_int = ZR36057_ISR_GIRQ1,
- .vsync_int = ZR36057_ISR_GIRQ0,
- .gpio = { 1, -1, 3, -1, -1, -1, -1, -1 },
- .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
- .gpcs = { 3, 1 },
- .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 },
- .gws_not_connected = 1,
- .input_mux = 0,
- .init = &buz_init,
- }, {
- .type = AVS6EYES,
- .name = "6-Eyes",
- /* AverMedia chose not to brand the 6-Eyes. Thus it
- can't be autodetected, and requires card=x. */
- .i2c_decoder = "ks0127",
- .addrs_decoder = ks0127_addrs,
- .i2c_encoder = "bt866",
- .addrs_encoder = bt866_addrs,
- .video_codec = CODEC_TYPE_ZR36060,
-
- .inputs = 10,
- .input = {
- { 0, "Composite 1" },
- { 1, "Composite 2" },
- { 2, "Composite 3" },
- { 4, "Composite 4" },
- { 5, "Composite 5" },
- { 6, "Composite 6" },
- { 8, "S-Video 1" },
- { 9, "S-Video 2" },
- {10, "S-Video 3" },
- {15, "YCbCr" }
- },
- .norms = V4L2_STD_NTSC|V4L2_STD_PAL,
- .tvn = {
- &f50ccir601_avs6eyes,
- &f60ccir601_avs6eyes,
- NULL
- },
- .jpeg_int = ZR36057_ISR_GIRQ1,
- .vsync_int = ZR36057_ISR_GIRQ0,
- .gpio = { 1, 0, 3, -1, -1, -1, -1, -1 },// Validity unknown /Sam
- .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, // Validity unknown /Sam
- .gpcs = { 3, 1 }, // Validity unknown /Sam
- .vfe_pol = { 1, 0, 0, 0, 0, 1, 0, 0 }, // Validity unknown /Sam
- .gws_not_connected = 1,
- .input_mux = 1,
- .init = &avs6eyes_init,
- }
-
-};
-
-/*
- * I2C functions
- */
-/* software I2C functions */
-static int
-zoran_i2c_getsda (void *data)
-{
- struct zoran *zr = (struct zoran *) data;
-
- return (btread(ZR36057_I2CBR) >> 1) & 1;
-}
-
-static int
-zoran_i2c_getscl (void *data)
-{
- struct zoran *zr = (struct zoran *) data;
-
- return btread(ZR36057_I2CBR) & 1;
-}
-
-static void
-zoran_i2c_setsda (void *data,
- int state)
-{
- struct zoran *zr = (struct zoran *) data;
-
- if (state)
- zr->i2cbr |= 2;
- else
- zr->i2cbr &= ~2;
- btwrite(zr->i2cbr, ZR36057_I2CBR);
-}
-
-static void
-zoran_i2c_setscl (void *data,
- int state)
-{
- struct zoran *zr = (struct zoran *) data;
-
- if (state)
- zr->i2cbr |= 1;
- else
- zr->i2cbr &= ~1;
- btwrite(zr->i2cbr, ZR36057_I2CBR);
-}
-
-static const struct i2c_algo_bit_data zoran_i2c_bit_data_template = {
- .setsda = zoran_i2c_setsda,
- .setscl = zoran_i2c_setscl,
- .getsda = zoran_i2c_getsda,
- .getscl = zoran_i2c_getscl,
- .udelay = 10,
- .timeout = 100,
-};
-
-static int
-zoran_register_i2c (struct zoran *zr)
-{
- zr->i2c_algo = zoran_i2c_bit_data_template;
- zr->i2c_algo.data = zr;
- strscpy(zr->i2c_adapter.name, ZR_DEVNAME(zr),
- sizeof(zr->i2c_adapter.name));
- i2c_set_adapdata(&zr->i2c_adapter, &zr->v4l2_dev);
- zr->i2c_adapter.algo_data = &zr->i2c_algo;
- zr->i2c_adapter.dev.parent = &zr->pci_dev->dev;
- return i2c_bit_add_bus(&zr->i2c_adapter);
-}
-
-static void
-zoran_unregister_i2c (struct zoran *zr)
-{
- i2c_del_adapter(&zr->i2c_adapter);
-}
-
-/* Check a zoran_params struct for correctness, insert default params */
-
-int
-zoran_check_jpg_settings (struct zoran *zr,
- struct zoran_jpg_settings *settings,
- int try)
-{
- int err = 0, err0 = 0;
-
- dprintk(4,
- KERN_DEBUG
- "%s: %s - dec: %d, Hdcm: %d, Vdcm: %d, Tdcm: %d\n",
- ZR_DEVNAME(zr), __func__, settings->decimation, settings->HorDcm,
- settings->VerDcm, settings->TmpDcm);
- dprintk(4,
- KERN_DEBUG
- "%s: %s - x: %d, y: %d, w: %d, y: %d\n",
- ZR_DEVNAME(zr), __func__, settings->img_x, settings->img_y,
- settings->img_width, settings->img_height);
- /* Check decimation, set default values for decimation = 1, 2, 4 */
- switch (settings->decimation) {
- case 1:
-
- settings->HorDcm = 1;
- settings->VerDcm = 1;
- settings->TmpDcm = 1;
- settings->field_per_buff = 2;
- settings->img_x = 0;
- settings->img_y = 0;
- settings->img_width = BUZ_MAX_WIDTH;
- settings->img_height = BUZ_MAX_HEIGHT / 2;
- break;
- case 2:
-
- settings->HorDcm = 2;
- settings->VerDcm = 1;
- settings->TmpDcm = 2;
- settings->field_per_buff = 1;
- settings->img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
- settings->img_y = 0;
- settings->img_width =
- (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
- settings->img_height = BUZ_MAX_HEIGHT / 2;
- break;
- case 4:
-
- if (zr->card.type == DC10_new) {
- dprintk(1,
- KERN_DEBUG
- "%s: %s - HDec by 4 is not supported on the DC10\n",
- ZR_DEVNAME(zr), __func__);
- err0++;
- break;
- }
-
- settings->HorDcm = 4;
- settings->VerDcm = 2;
- settings->TmpDcm = 2;
- settings->field_per_buff = 1;
- settings->img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
- settings->img_y = 0;
- settings->img_width =
- (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
- settings->img_height = BUZ_MAX_HEIGHT / 2;
- break;
- case 0:
-
- /* We have to check the data the user has set */
-
- if (settings->HorDcm != 1 && settings->HorDcm != 2 &&
- (zr->card.type == DC10_new || settings->HorDcm != 4)) {
- settings->HorDcm = clamp(settings->HorDcm, 1, 2);
- err0++;
- }
- if (settings->VerDcm != 1 && settings->VerDcm != 2) {
- settings->VerDcm = clamp(settings->VerDcm, 1, 2);
- err0++;
- }
- if (settings->TmpDcm != 1 && settings->TmpDcm != 2) {
- settings->TmpDcm = clamp(settings->TmpDcm, 1, 2);
- err0++;
- }
- if (settings->field_per_buff != 1 &&
- settings->field_per_buff != 2) {
- settings->field_per_buff = clamp(settings->field_per_buff, 1, 2);
- err0++;
- }
- if (settings->img_x < 0) {
- settings->img_x = 0;
- err0++;
- }
- if (settings->img_y < 0) {
- settings->img_y = 0;
- err0++;
- }
- if (settings->img_width < 0 || settings->img_width > BUZ_MAX_WIDTH) {
- settings->img_width = clamp(settings->img_width, 0, (int)BUZ_MAX_WIDTH);
- err0++;
- }
- if (settings->img_height < 0 || settings->img_height > BUZ_MAX_HEIGHT / 2) {
- settings->img_height = clamp(settings->img_height, 0, BUZ_MAX_HEIGHT / 2);
- err0++;
- }
- if (settings->img_x + settings->img_width > BUZ_MAX_WIDTH) {
- settings->img_x = BUZ_MAX_WIDTH - settings->img_width;
- err0++;
- }
- if (settings->img_y + settings->img_height > BUZ_MAX_HEIGHT / 2) {
- settings->img_y = BUZ_MAX_HEIGHT / 2 - settings->img_height;
- err0++;
- }
- if (settings->img_width % (16 * settings->HorDcm) != 0) {
- settings->img_width -= settings->img_width % (16 * settings->HorDcm);
- if (settings->img_width == 0)
- settings->img_width = 16 * settings->HorDcm;
- err0++;
- }
- if (settings->img_height % (8 * settings->VerDcm) != 0) {
- settings->img_height -= settings->img_height % (8 * settings->VerDcm);
- if (settings->img_height == 0)
- settings->img_height = 8 * settings->VerDcm;
- err0++;
- }
-
- if (!try && err0) {
- dprintk(1,
- KERN_ERR
- "%s: %s - error in params for decimation = 0\n",
- ZR_DEVNAME(zr), __func__);
- err++;
- }
- break;
- default:
- dprintk(1,
- KERN_ERR
- "%s: %s - decimation = %d, must be 0, 1, 2 or 4\n",
- ZR_DEVNAME(zr), __func__, settings->decimation);
- err++;
- break;
- }
-
- if (settings->jpg_comp.quality > 100)
- settings->jpg_comp.quality = 100;
- if (settings->jpg_comp.quality < 5)
- settings->jpg_comp.quality = 5;
- if (settings->jpg_comp.APPn < 0)
- settings->jpg_comp.APPn = 0;
- if (settings->jpg_comp.APPn > 15)
- settings->jpg_comp.APPn = 15;
- if (settings->jpg_comp.APP_len < 0)
- settings->jpg_comp.APP_len = 0;
- if (settings->jpg_comp.APP_len > 60)
- settings->jpg_comp.APP_len = 60;
- if (settings->jpg_comp.COM_len < 0)
- settings->jpg_comp.COM_len = 0;
- if (settings->jpg_comp.COM_len > 60)
- settings->jpg_comp.COM_len = 60;
- if (err)
- return -EINVAL;
- return 0;
-}
-
-void
-zoran_open_init_params (struct zoran *zr)
-{
- int i;
-
- /* User must explicitly set a window */
- zr->overlay_settings.is_set = 0;
- zr->overlay_mask = NULL;
- zr->overlay_active = ZORAN_FREE;
-
- zr->v4l_memgrab_active = 0;
- zr->v4l_overlay_active = 0;
- zr->v4l_grab_frame = NO_GRAB_ACTIVE;
- zr->v4l_grab_seq = 0;
- zr->v4l_settings.width = 192;
- zr->v4l_settings.height = 144;
- zr->v4l_settings.format = &zoran_formats[7]; /* YUY2 - YUV-4:2:2 packed */
- zr->v4l_settings.bytesperline =
- zr->v4l_settings.width *
- ((zr->v4l_settings.format->depth + 7) / 8);
-
- /* DMA ring stuff for V4L */
- zr->v4l_pend_tail = 0;
- zr->v4l_pend_head = 0;
- zr->v4l_sync_tail = 0;
- zr->v4l_buffers.active = ZORAN_FREE;
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- zr->v4l_buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */
- }
- zr->v4l_buffers.allocated = 0;
-
- for (i = 0; i < BUZ_MAX_FRAME; i++) {
- zr->jpg_buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */
- }
- zr->jpg_buffers.active = ZORAN_FREE;
- zr->jpg_buffers.allocated = 0;
- /* Set necessary params and call zoran_check_jpg_settings to set the defaults */
- zr->jpg_settings.decimation = 1;
- zr->jpg_settings.jpg_comp.quality = 50; /* default compression factor 8 */
- if (zr->card.type != BUZ)
- zr->jpg_settings.odd_even = 1;
- else
- zr->jpg_settings.odd_even = 0;
- zr->jpg_settings.jpg_comp.APPn = 0;
- zr->jpg_settings.jpg_comp.APP_len = 0; /* No APPn marker */
- memset(zr->jpg_settings.jpg_comp.APP_data, 0,
- sizeof(zr->jpg_settings.jpg_comp.APP_data));
- zr->jpg_settings.jpg_comp.COM_len = 0; /* No COM marker */
- memset(zr->jpg_settings.jpg_comp.COM_data, 0,
- sizeof(zr->jpg_settings.jpg_comp.COM_data));
- zr->jpg_settings.jpg_comp.jpeg_markers =
- V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT;
- i = zoran_check_jpg_settings(zr, &zr->jpg_settings, 0);
- if (i)
- dprintk(1, KERN_ERR "%s: %s internal error\n",
- ZR_DEVNAME(zr), __func__);
-
- clear_interrupt_counters(zr);
- zr->testing = 0;
-}
-
-static void test_interrupts (struct zoran *zr)
-{
- DEFINE_WAIT(wait);
- int timeout, icr;
-
- clear_interrupt_counters(zr);
-
- zr->testing = 1;
- icr = btread(ZR36057_ICR);
- btwrite(0x78000000 | ZR36057_ICR_IntPinEn, ZR36057_ICR);
- prepare_to_wait(&zr->test_q, &wait, TASK_INTERRUPTIBLE);
- timeout = schedule_timeout(HZ);
- finish_wait(&zr->test_q, &wait);
- btwrite(0, ZR36057_ICR);
- btwrite(0x78000000, ZR36057_ISR);
- zr->testing = 0;
- dprintk(5, KERN_INFO "%s: Testing interrupts...\n", ZR_DEVNAME(zr));
- if (timeout) {
- dprintk(1, ": time spent: %d\n", 1 * HZ - timeout);
- }
- if (zr36067_debug > 1)
- print_interrupts(zr);
- btwrite(icr, ZR36057_ICR);
-}
-
-static int zr36057_init (struct zoran *zr)
-{
- int j, err;
-
- dprintk(1,
- KERN_INFO
- "%s: %s - initializing card[%d], zr=%p\n",
- ZR_DEVNAME(zr), __func__, zr->id, zr);
-
- /* default setup of all parameters which will persist between opens */
- zr->user = 0;
-
- init_waitqueue_head(&zr->v4l_capq);
- init_waitqueue_head(&zr->jpg_capq);
- init_waitqueue_head(&zr->test_q);
- zr->jpg_buffers.allocated = 0;
- zr->v4l_buffers.allocated = 0;
-
- zr->vbuf_base = (void *) vidmem;
- zr->vbuf_width = 0;
- zr->vbuf_height = 0;
- zr->vbuf_depth = 0;
- zr->vbuf_bytesperline = 0;
-
- /* Avoid nonsense settings from user for default input/norm */
- if (default_norm < 0 || default_norm > 2)
- default_norm = 0;
- if (default_norm == 0) {
- zr->norm = V4L2_STD_PAL;
- zr->timing = zr->card.tvn[0];
- } else if (default_norm == 1) {
- zr->norm = V4L2_STD_NTSC;
- zr->timing = zr->card.tvn[1];
- } else {
- zr->norm = V4L2_STD_SECAM;
- zr->timing = zr->card.tvn[2];
- }
- if (zr->timing == NULL) {
- dprintk(1,
- KERN_WARNING
- "%s: %s - default TV standard not supported by hardware. PAL will be used.\n",
- ZR_DEVNAME(zr), __func__);
- zr->norm = V4L2_STD_PAL;
- zr->timing = zr->card.tvn[0];
- }
-
- if (default_input > zr->card.inputs-1) {
- dprintk(1,
- KERN_WARNING
- "%s: default_input value %d out of range (0-%d)\n",
- ZR_DEVNAME(zr), default_input, zr->card.inputs-1);
- default_input = 0;
- }
- zr->input = default_input;
-
- /* default setup (will be repeated at every open) */
- zoran_open_init_params(zr);
-
- /* allocate memory *before* doing anything to the hardware
- * in case allocation fails */
- zr->stat_com = kzalloc(BUZ_NUM_STAT_COM * 4, GFP_KERNEL);
- zr->video_dev = video_device_alloc();
- if (!zr->stat_com || !zr->video_dev) {
- dprintk(1,
- KERN_ERR
- "%s: %s - kmalloc (STAT_COM) failed\n",
- ZR_DEVNAME(zr), __func__);
- err = -ENOMEM;
- goto exit_free;
- }
- for (j = 0; j < BUZ_NUM_STAT_COM; j++) {
- zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */
- }
-
- /*
- * Now add the template and register the device unit.
- */
- *zr->video_dev = zoran_template;
- zr->video_dev->v4l2_dev = &zr->v4l2_dev;
- zr->video_dev->lock = &zr->lock;
- strscpy(zr->video_dev->name, ZR_DEVNAME(zr), sizeof(zr->video_dev->name));
- /* It's not a mem2mem device, but you can both capture and output from
- one and the same device. This should really be split up into two
- device nodes, but that's a job for another day. */
- zr->video_dev->vfl_dir = VFL_DIR_M2M;
- err = video_register_device(zr->video_dev, VFL_TYPE_GRABBER, video_nr[zr->id]);
- if (err < 0)
- goto exit_free;
- video_set_drvdata(zr->video_dev, zr);
-
- zoran_init_hardware(zr);
- if (zr36067_debug > 2)
- detect_guest_activity(zr);
- test_interrupts(zr);
- if (!pass_through) {
- decoder_call(zr, video, s_stream, 0);
- encoder_call(zr, video, s_routing, 2, 0, 0);
- }
-
- zr->zoran_proc = NULL;
- zr->initialized = 1;
- return 0;
-
-exit_free:
- kfree(zr->stat_com);
- kfree(zr->video_dev);
- return err;
-}
-
-static void zoran_remove(struct pci_dev *pdev)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct zoran *zr = to_zoran(v4l2_dev);
-
- if (!zr->initialized)
- goto exit_free;
-
- /* unregister videocodec bus */
- if (zr->codec) {
- struct videocodec_master *master = zr->codec->master_data;
-
- videocodec_detach(zr->codec);
- kfree(master);
- }
- if (zr->vfe) {
- struct videocodec_master *master = zr->vfe->master_data;
-
- videocodec_detach(zr->vfe);
- kfree(master);
- }
-
- /* unregister i2c bus */
- zoran_unregister_i2c(zr);
- /* disable PCI bus-mastering */
- zoran_set_pci_master(zr, 0);
- /* put chip into reset */
- btwrite(0, ZR36057_SPGPPCR);
- free_irq(zr->pci_dev->irq, zr);
- /* unmap and free memory */
- kfree(zr->stat_com);
- zoran_proc_cleanup(zr);
- iounmap(zr->zr36057_mem);
- pci_disable_device(zr->pci_dev);
- video_unregister_device(zr->video_dev);
-exit_free:
- v4l2_ctrl_handler_free(&zr->hdl);
- v4l2_device_unregister(&zr->v4l2_dev);
- kfree(zr);
-}
-
-void
-zoran_vdev_release (struct video_device *vdev)
-{
- kfree(vdev);
-}
-
-static struct videocodec_master *zoran_setup_videocodec(struct zoran *zr,
- int type)
-{
- struct videocodec_master *m = NULL;
-
- m = kmalloc(sizeof(struct videocodec_master), GFP_KERNEL);
- if (!m) {
- dprintk(1, KERN_ERR "%s: %s - no memory\n",
- ZR_DEVNAME(zr), __func__);
- return m;
- }
-
- /* magic and type are unused for master struct. Makes sense only at
- codec structs.
- In the past, .type were initialized to the old V4L1 .hardware
- value, as VID_HARDWARE_ZR36067
- */
- m->magic = 0L;
- m->type = 0;
-
- m->flags = CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER;
- strscpy(m->name, ZR_DEVNAME(zr), sizeof(m->name));
- m->data = zr;
-
- switch (type)
- {
- case CODEC_TYPE_ZR36060:
- m->readreg = zr36060_read;
- m->writereg = zr36060_write;
- m->flags |= CODEC_FLAG_JPEG | CODEC_FLAG_VFE;
- break;
- case CODEC_TYPE_ZR36050:
- m->readreg = zr36050_read;
- m->writereg = zr36050_write;
- m->flags |= CODEC_FLAG_JPEG;
- break;
- case CODEC_TYPE_ZR36016:
- m->readreg = zr36016_read;
- m->writereg = zr36016_write;
- m->flags |= CODEC_FLAG_VFE;
- break;
- }
-
- return m;
-}
-
-static void zoran_subdev_notify(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- struct zoran *zr = to_zoran(sd->v4l2_dev);
-
- /* Bt819 needs to reset its FIFO buffer using #FRST pin and
- LML33 card uses GPIO(7) for that. */
- if (cmd == BT819_FIFO_RESET_LOW)
- GPIO(zr, 7, 0);
- else if (cmd == BT819_FIFO_RESET_HIGH)
- GPIO(zr, 7, 1);
-}
-
-/*
- * Scan for a Buz card (actually for the PCI controller ZR36057),
- * request the irq and map the io memory
- */
-static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- unsigned char latency, need_latency;
- struct zoran *zr;
- int result;
- struct videocodec_master *master_vfe = NULL;
- struct videocodec_master *master_codec = NULL;
- int card_num;
- char *codec_name, *vfe_name;
- unsigned int nr;
-
-
- nr = zoran_num++;
- if (nr >= BUZ_MAX) {
- dprintk(1, KERN_ERR "%s: driver limited to %d card(s) maximum\n",
- ZORAN_NAME, BUZ_MAX);
- return -ENOENT;
- }
-
- zr = kzalloc(sizeof(struct zoran), GFP_KERNEL);
- if (!zr) {
- dprintk(1, KERN_ERR "%s: %s - kzalloc failed\n",
- ZORAN_NAME, __func__);
- return -ENOMEM;
- }
- zr->v4l2_dev.notify = zoran_subdev_notify;
- if (v4l2_device_register(&pdev->dev, &zr->v4l2_dev))
- goto zr_free_mem;
- zr->pci_dev = pdev;
- zr->id = nr;
- snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id);
- if (v4l2_ctrl_handler_init(&zr->hdl, 10))
- goto zr_unreg;
- zr->v4l2_dev.ctrl_handler = &zr->hdl;
- spin_lock_init(&zr->spinlock);
- mutex_init(&zr->lock);
- if (pci_enable_device(pdev))
- goto zr_unreg;
- zr->revision = zr->pci_dev->revision;
-
- dprintk(1,
- KERN_INFO
- "%s: Zoran ZR360%c7 (rev %d), irq: %d, memory: 0x%08llx\n",
- ZR_DEVNAME(zr), zr->revision < 2 ? '5' : '6', zr->revision,
- zr->pci_dev->irq, (uint64_t)pci_resource_start(zr->pci_dev, 0));
- if (zr->revision >= 2) {
- dprintk(1,
- KERN_INFO
- "%s: Subsystem vendor=0x%04x id=0x%04x\n",
- ZR_DEVNAME(zr), zr->pci_dev->subsystem_vendor,
- zr->pci_dev->subsystem_device);
- }
-
- /* Use auto-detected card type? */
- if (card[nr] == -1) {
- if (zr->revision < 2) {
- dprintk(1,
- KERN_ERR
- "%s: No card type specified, please use the card=X module parameter\n",
- ZR_DEVNAME(zr));
- dprintk(1,
- KERN_ERR
- "%s: It is not possible to auto-detect ZR36057 based cards\n",
- ZR_DEVNAME(zr));
- goto zr_unreg;
- }
-
- card_num = ent->driver_data;
- if (card_num >= NUM_CARDS) {
- dprintk(1,
- KERN_ERR
- "%s: Unknown card, try specifying card=X module parameter\n",
- ZR_DEVNAME(zr));
- goto zr_unreg;
- }
- dprintk(3,
- KERN_DEBUG
- "%s: %s() - card %s detected\n",
- ZR_DEVNAME(zr), __func__, zoran_cards[card_num].name);
- } else {
- card_num = card[nr];
- if (card_num >= NUM_CARDS || card_num < 0) {
- dprintk(1,
- KERN_ERR
- "%s: User specified card type %d out of range (0 .. %d)\n",
- ZR_DEVNAME(zr), card_num, NUM_CARDS - 1);
- goto zr_unreg;
- }
- }
-
- /* even though we make this a non pointer and thus
- * theoretically allow for making changes to this struct
- * on a per-individual card basis at runtime, this is
- * strongly discouraged. This structure is intended to
- * keep general card information, no settings or anything */
- zr->card = zoran_cards[card_num];
- snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)),
- "%s[%u]", zr->card.name, zr->id);
-
- zr->zr36057_mem = pci_ioremap_bar(zr->pci_dev, 0);
- if (!zr->zr36057_mem) {
- dprintk(1, KERN_ERR "%s: %s() - ioremap failed\n",
- ZR_DEVNAME(zr), __func__);
- goto zr_unreg;
- }
-
- result = request_irq(zr->pci_dev->irq, zoran_irq,
- IRQF_SHARED, ZR_DEVNAME(zr), zr);
- if (result < 0) {
- if (result == -EINVAL) {
- dprintk(1,
- KERN_ERR
- "%s: %s - bad irq number or handler\n",
- ZR_DEVNAME(zr), __func__);
- } else if (result == -EBUSY) {
- dprintk(1,
- KERN_ERR
- "%s: %s - IRQ %d busy, change your PnP config in BIOS\n",
- ZR_DEVNAME(zr), __func__, zr->pci_dev->irq);
- } else {
- dprintk(1,
- KERN_ERR
- "%s: %s - can't assign irq, error code %d\n",
- ZR_DEVNAME(zr), __func__, result);
- }
- goto zr_unmap;
- }
-
- /* set PCI latency timer */
- pci_read_config_byte(zr->pci_dev, PCI_LATENCY_TIMER,
- &latency);
- need_latency = zr->revision > 1 ? 32 : 48;
- if (latency != need_latency) {
- dprintk(2, KERN_INFO "%s: Changing PCI latency from %d to %d\n",
- ZR_DEVNAME(zr), latency, need_latency);
- pci_write_config_byte(zr->pci_dev, PCI_LATENCY_TIMER,
- need_latency);
- }
-
- zr36057_restart(zr);
- /* i2c */
- dprintk(2, KERN_INFO "%s: Initializing i2c bus...\n",
- ZR_DEVNAME(zr));
-
- if (zoran_register_i2c(zr) < 0) {
- dprintk(1, KERN_ERR "%s: %s - can't initialize i2c bus\n",
- ZR_DEVNAME(zr), __func__);
- goto zr_free_irq;
- }
-
- zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev,
- &zr->i2c_adapter, zr->card.i2c_decoder,
- 0, zr->card.addrs_decoder);
-
- if (zr->card.i2c_encoder)
- zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev,
- &zr->i2c_adapter, zr->card.i2c_encoder,
- 0, zr->card.addrs_encoder);
-
- dprintk(2,
- KERN_INFO "%s: Initializing videocodec bus...\n",
- ZR_DEVNAME(zr));
-
- if (zr->card.video_codec) {
- codec_name = codecid_to_modulename(zr->card.video_codec);
- if (codec_name) {
- result = request_module(codec_name);
- if (result) {
- dprintk(1,
- KERN_ERR
- "%s: failed to load modules %s: %d\n",
- ZR_DEVNAME(zr), codec_name, result);
- }
- }
- }
- if (zr->card.video_vfe) {
- vfe_name = codecid_to_modulename(zr->card.video_vfe);
- if (vfe_name) {
- result = request_module(vfe_name);
- if (result < 0) {
- dprintk(1,
- KERN_ERR
- "%s: failed to load modules %s: %d\n",
- ZR_DEVNAME(zr), vfe_name, result);
- }
- }
- }
-
- /* reset JPEG codec */
- jpeg_codec_sleep(zr, 1);
- jpeg_codec_reset(zr);
- /* video bus enabled */
- /* display codec revision */
- if (zr->card.video_codec != 0) {
- master_codec = zoran_setup_videocodec(zr, zr->card.video_codec);
- if (!master_codec)
- goto zr_unreg_i2c;
- zr->codec = videocodec_attach(master_codec);
- if (!zr->codec) {
- dprintk(1, KERN_ERR "%s: %s - no codec found\n",
- ZR_DEVNAME(zr), __func__);
- goto zr_free_codec;
- }
- if (zr->codec->type != zr->card.video_codec) {
- dprintk(1, KERN_ERR "%s: %s - wrong codec\n",
- ZR_DEVNAME(zr), __func__);
- goto zr_detach_codec;
- }
- }
- if (zr->card.video_vfe != 0) {
- master_vfe = zoran_setup_videocodec(zr, zr->card.video_vfe);
- if (!master_vfe)
- goto zr_detach_codec;
- zr->vfe = videocodec_attach(master_vfe);
- if (!zr->vfe) {
- dprintk(1, KERN_ERR "%s: %s - no VFE found\n",
- ZR_DEVNAME(zr), __func__);
- goto zr_free_vfe;
- }
- if (zr->vfe->type != zr->card.video_vfe) {
- dprintk(1, KERN_ERR "%s: %s = wrong VFE\n",
- ZR_DEVNAME(zr), __func__);
- goto zr_detach_vfe;
- }
- }
-
- /* take care of Natoma chipset and a revision 1 zr36057 */
- if ((pci_pci_problems & PCIPCI_NATOMA) && zr->revision <= 1) {
- zr->jpg_buffers.need_contiguous = 1;
- dprintk(1, KERN_INFO
- "%s: ZR36057/Natoma bug, max. buffer size is 128K\n",
- ZR_DEVNAME(zr));
- }
-
- if (zr36057_init(zr) < 0)
- goto zr_detach_vfe;
-
- zoran_proc_init(zr);
-
- return 0;
-
-zr_detach_vfe:
- videocodec_detach(zr->vfe);
-zr_free_vfe:
- kfree(master_vfe);
-zr_detach_codec:
- videocodec_detach(zr->codec);
-zr_free_codec:
- kfree(master_codec);
-zr_unreg_i2c:
- zoran_unregister_i2c(zr);
-zr_free_irq:
- btwrite(0, ZR36057_SPGPPCR);
- free_irq(zr->pci_dev->irq, zr);
-zr_unmap:
- iounmap(zr->zr36057_mem);
-zr_unreg:
- v4l2_ctrl_handler_free(&zr->hdl);
- v4l2_device_unregister(&zr->v4l2_dev);
-zr_free_mem:
- kfree(zr);
-
- return -ENODEV;
-}
-
-static struct pci_driver zoran_driver = {
- .name = "zr36067",
- .id_table = zr36067_pci_tbl,
- .probe = zoran_probe,
- .remove = zoran_remove,
-};
-
-static int __init zoran_init(void)
-{
- int res;
-
- printk(KERN_INFO "Zoran MJPEG board driver version %s\n",
- ZORAN_VERSION);
-
- /* check the parameters we have been given, adjust if necessary */
- if (v4l_nbufs < 2)
- v4l_nbufs = 2;
- if (v4l_nbufs > VIDEO_MAX_FRAME)
- v4l_nbufs = VIDEO_MAX_FRAME;
- /* The user specifies the in KB, we want them in byte
- * (and page aligned) */
- v4l_bufsize = PAGE_ALIGN(v4l_bufsize * 1024);
- if (v4l_bufsize < 32768)
- v4l_bufsize = 32768;
- /* 2 MB is arbitrary but sufficient for the maximum possible images */
- if (v4l_bufsize > 2048 * 1024)
- v4l_bufsize = 2048 * 1024;
- if (jpg_nbufs < 4)
- jpg_nbufs = 4;
- if (jpg_nbufs > BUZ_MAX_FRAME)
- jpg_nbufs = BUZ_MAX_FRAME;
- jpg_bufsize = PAGE_ALIGN(jpg_bufsize * 1024);
- if (jpg_bufsize < 8192)
- jpg_bufsize = 8192;
- if (jpg_bufsize > (512 * 1024))
- jpg_bufsize = 512 * 1024;
- /* Use parameter for vidmem or try to find a video card */
- if (vidmem) {
- dprintk(1,
- KERN_INFO
- "%s: Using supplied video memory base address @ 0x%lx\n",
- ZORAN_NAME, vidmem);
- }
-
- /* some mainboards might not do PCI-PCI data transfer well */
- if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL|PCIPCI_ALIMAGIK)) {
- dprintk(1,
- KERN_WARNING
- "%s: chipset does not support reliable PCI-PCI DMA\n",
- ZORAN_NAME);
- }
-
- res = pci_register_driver(&zoran_driver);
- if (res) {
- dprintk(1,
- KERN_ERR
- "%s: Unable to register ZR36057 driver\n",
- ZORAN_NAME);
- return res;
- }
-
- return 0;
-}
-
-static void __exit zoran_exit(void)
-{
- pci_unregister_driver(&zoran_driver);
-}
-
-module_init(zoran_init);
-module_exit(zoran_exit);
diff --git a/drivers/staging/media/zoran/zoran_card.h b/drivers/staging/media/zoran/zoran_card.h
deleted file mode 100644
index 0cdb7d34926d..000000000000
--- a/drivers/staging/media/zoran/zoran_card.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Zoran zr36057/zr36067 PCI controller driver, for the
- * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
- * Media Labs LML33/LML33R10.
- *
- * This part handles card-specific data and detection
- *
- * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
- *
- * Currently maintained by:
- * Ronald Bultje <rbultje@ronald.bitfreak.net>
- * Laurent Pinchart <laurent.pinchart@skynet.be>
- * Mailinglist <mjpeg-users@lists.sf.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ZORAN_CARD_H__
-#define __ZORAN_CARD_H__
-
-extern int zr36067_debug;
-
-#define dprintk(num, format, args...) \
- do { \
- if (zr36067_debug >= num) \
- printk(format, ##args); \
- } while (0)
-
-/* Anybody who uses more than four? */
-#define BUZ_MAX 4
-
-extern const struct video_device zoran_template;
-
-extern int zoran_check_jpg_settings(struct zoran *zr,
- struct zoran_jpg_settings *settings,
- int try);
-extern void zoran_open_init_params(struct zoran *zr);
-extern void zoran_vdev_release(struct video_device *vdev);
-
-void zr36016_write(struct videocodec *codec, u16 reg, u32 val);
-
-#endif /* __ZORAN_CARD_H__ */
diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c
deleted file mode 100644
index 22b27632762d..000000000000
--- a/drivers/staging/media/zoran/zoran_device.c
+++ /dev/null
@@ -1,1619 +0,0 @@
-/*
- * Zoran zr36057/zr36067 PCI controller driver, for the
- * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
- * Media Labs LML33/LML33R10.
- *
- * This part handles device access (PCI/I2C/codec/...)
- *
- * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
- *
- * Currently maintained by:
- * Ronald Bultje <rbultje@ronald.bitfreak.net>
- * Laurent Pinchart <laurent.pinchart@skynet.be>
- * Mailinglist <mjpeg-users@lists.sf.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/ktime.h>
-#include <linux/sched/signal.h>
-
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
-#include <linux/spinlock.h>
-#include <linux/sem.h>
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
-
-#include <asm/byteorder.h>
-#include <asm/io.h>
-
-#include "videocodec.h"
-#include "zoran.h"
-#include "zoran_device.h"
-#include "zoran_card.h"
-
-#define IRQ_MASK ( ZR36057_ISR_GIRQ0 | \
- ZR36057_ISR_GIRQ1 | \
- ZR36057_ISR_JPEGRepIRQ )
-
-static bool lml33dpath; /* default = 0
- * 1 will use digital path in capture
- * mode instead of analog. It can be
- * used for picture adjustments using
- * tool like xawtv while watching image
- * on TV monitor connected to the output.
- * However, due to absence of 75 Ohm
- * load on Bt819 input, there will be
- * some image imperfections */
-
-module_param(lml33dpath, bool, 0644);
-MODULE_PARM_DESC(lml33dpath,
- "Use digital path capture mode (on LML33 cards)");
-
-static void
-zr36057_init_vfe (struct zoran *zr);
-
-/*
- * General Purpose I/O and Guest bus access
- */
-
-/*
- * This is a bit tricky. When a board lacks a GPIO function, the corresponding
- * GPIO bit number in the card_info structure is set to 0.
- */
-
-void
-GPIO (struct zoran *zr,
- int bit,
- unsigned int value)
-{
- u32 reg;
- u32 mask;
-
- /* Make sure the bit number is legal
- * A bit number of -1 (lacking) gives a mask of 0,
- * making it harmless */
- mask = (1 << (24 + bit)) & 0xff000000;
- reg = btread(ZR36057_GPPGCR1) & ~mask;
- if (value) {
- reg |= mask;
- }
- btwrite(reg, ZR36057_GPPGCR1);
- udelay(1);
-}
-
-/*
- * Wait til post office is no longer busy
- */
-
-int
-post_office_wait (struct zoran *zr)
-{
- u32 por;
-
-// while (((por = btread(ZR36057_POR)) & (ZR36057_POR_POPen | ZR36057_POR_POTime)) == ZR36057_POR_POPen) {
- while ((por = btread(ZR36057_POR)) & ZR36057_POR_POPen) {
- /* wait for something to happen */
- }
- if ((por & ZR36057_POR_POTime) && !zr->card.gws_not_connected) {
- /* In LML33/BUZ \GWS line is not connected, so it has always timeout set */
- dprintk(1, KERN_INFO "%s: pop timeout %08x\n", ZR_DEVNAME(zr),
- por);
- return -1;
- }
-
- return 0;
-}
-
-int
-post_office_write (struct zoran *zr,
- unsigned int guest,
- unsigned int reg,
- unsigned int value)
-{
- u32 por;
-
- por =
- ZR36057_POR_PODir | ZR36057_POR_POTime | ((guest & 7) << 20) |
- ((reg & 7) << 16) | (value & 0xFF);
- btwrite(por, ZR36057_POR);
-
- return post_office_wait(zr);
-}
-
-int
-post_office_read (struct zoran *zr,
- unsigned int guest,
- unsigned int reg)
-{
- u32 por;
-
- por = ZR36057_POR_POTime | ((guest & 7) << 20) | ((reg & 7) << 16);
- btwrite(por, ZR36057_POR);
- if (post_office_wait(zr) < 0) {
- return -1;
- }
-
- return btread(ZR36057_POR) & 0xFF;
-}
-
-/*
- * detect guests
- */
-
-static void
-dump_guests (struct zoran *zr)
-{
- if (zr36067_debug > 2) {
- int i, guest[8];
-
- for (i = 1; i < 8; i++) { // Don't read jpeg codec here
- guest[i] = post_office_read(zr, i, 0);
- }
-
- printk(KERN_INFO "%s: Guests: %*ph\n",
- ZR_DEVNAME(zr), 8, guest);
- }
-}
-
-void
-detect_guest_activity (struct zoran *zr)
-{
- int timeout, i, j, res, guest[8], guest0[8], change[8][3];
- ktime_t t0, t1;
-
- dump_guests(zr);
- printk(KERN_INFO "%s: Detecting guests activity, please wait...\n",
- ZR_DEVNAME(zr));
- for (i = 1; i < 8; i++) { // Don't read jpeg codec here
- guest0[i] = guest[i] = post_office_read(zr, i, 0);
- }
-
- timeout = 0;
- j = 0;
- t0 = ktime_get();
- while (timeout < 10000) {
- udelay(10);
- timeout++;
- for (i = 1; (i < 8) && (j < 8); i++) {
- res = post_office_read(zr, i, 0);
- if (res != guest[i]) {
- t1 = ktime_get();
- change[j][0] = ktime_to_us(ktime_sub(t1, t0));
- t0 = t1;
- change[j][1] = i;
- change[j][2] = res;
- j++;
- guest[i] = res;
- }
- }
- if (j >= 8)
- break;
- }
-
- printk(KERN_INFO "%s: Guests: %*ph\n", ZR_DEVNAME(zr), 8, guest0);
-
- if (j == 0) {
- printk(KERN_INFO "%s: No activity detected.\n", ZR_DEVNAME(zr));
- return;
- }
- for (i = 0; i < j; i++) {
- printk(KERN_INFO "%s: %6d: %d => 0x%02x\n", ZR_DEVNAME(zr),
- change[i][0], change[i][1], change[i][2]);
- }
-}
-
-/*
- * JPEG Codec access
- */
-
-void
-jpeg_codec_sleep (struct zoran *zr,
- int sleep)
-{
- GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_SLEEP], !sleep);
- if (!sleep) {
- dprintk(3,
- KERN_DEBUG
- "%s: jpeg_codec_sleep() - wake GPIO=0x%08x\n",
- ZR_DEVNAME(zr), btread(ZR36057_GPPGCR1));
- udelay(500);
- } else {
- dprintk(3,
- KERN_DEBUG
- "%s: jpeg_codec_sleep() - sleep GPIO=0x%08x\n",
- ZR_DEVNAME(zr), btread(ZR36057_GPPGCR1));
- udelay(2);
- }
-}
-
-int
-jpeg_codec_reset (struct zoran *zr)
-{
- /* Take the codec out of sleep */
- jpeg_codec_sleep(zr, 0);
-
- if (zr->card.gpcs[GPCS_JPEG_RESET] != 0xff) {
- post_office_write(zr, zr->card.gpcs[GPCS_JPEG_RESET], 0,
- 0);
- udelay(2);
- } else {
- GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 0);
- udelay(2);
- GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 1);
- udelay(2);
- }
-
- return 0;
-}
-
-/*
- * Set the registers for the size we have specified. Don't bother
- * trying to understand this without the ZR36057 manual in front of
- * you [AC].
- *
- * PS: The manual is free for download in .pdf format from
- * www.zoran.com - nicely done those folks.
- */
-
-static void
-zr36057_adjust_vfe (struct zoran *zr,
- enum zoran_codec_mode mode)
-{
- u32 reg;
-
- switch (mode) {
- case BUZ_MODE_MOTION_DECOMPRESS:
- btand(~ZR36057_VFESPFR_ExtFl, ZR36057_VFESPFR);
- reg = btread(ZR36057_VFEHCR);
- if ((reg & (1 << 10)) && zr->card.type != LML33R10) {
- reg += ((1 << 10) | 1);
- }
- btwrite(reg, ZR36057_VFEHCR);
- break;
- case BUZ_MODE_MOTION_COMPRESS:
- case BUZ_MODE_IDLE:
- default:
- if ((zr->norm & V4L2_STD_NTSC) ||
- (zr->card.type == LML33R10 &&
- (zr->norm & V4L2_STD_PAL)))
- btand(~ZR36057_VFESPFR_ExtFl, ZR36057_VFESPFR);
- else
- btor(ZR36057_VFESPFR_ExtFl, ZR36057_VFESPFR);
- reg = btread(ZR36057_VFEHCR);
- if (!(reg & (1 << 10)) && zr->card.type != LML33R10) {
- reg -= ((1 << 10) | 1);
- }
- btwrite(reg, ZR36057_VFEHCR);
- break;
- }
-}
-
-/*
- * set geometry
- */
-
-static void
-zr36057_set_vfe (struct zoran *zr,
- int video_width,
- int video_height,
- const struct zoran_format *format)
-{
- struct tvnorm *tvn;
- unsigned HStart, HEnd, VStart, VEnd;
- unsigned DispMode;
- unsigned VidWinWid, VidWinHt;
- unsigned hcrop1, hcrop2, vcrop1, vcrop2;
- unsigned Wa, We, Ha, He;
- unsigned X, Y, HorDcm, VerDcm;
- u32 reg;
- unsigned mask_line_size;
-
- tvn = zr->timing;
-
- Wa = tvn->Wa;
- Ha = tvn->Ha;
-
- dprintk(2, KERN_INFO "%s: set_vfe() - width = %d, height = %d\n",
- ZR_DEVNAME(zr), video_width, video_height);
-
- if (video_width < BUZ_MIN_WIDTH ||
- video_height < BUZ_MIN_HEIGHT ||
- video_width > Wa || video_height > Ha) {
- dprintk(1, KERN_ERR "%s: set_vfe: w=%d h=%d not valid\n",
- ZR_DEVNAME(zr), video_width, video_height);
- return;
- }
-
- /**** zr36057 ****/
-
- /* horizontal */
- VidWinWid = video_width;
- X = DIV_ROUND_UP(VidWinWid * 64, tvn->Wa);
- We = (VidWinWid * 64) / X;
- HorDcm = 64 - X;
- hcrop1 = 2 * ((tvn->Wa - We) / 4);
- hcrop2 = tvn->Wa - We - hcrop1;
- HStart = tvn->HStart ? tvn->HStart : 1;
- /* (Ronald) Original comment:
- * "| 1 Doesn't have any effect, tested on both a DC10 and a DC10+"
- * this is false. It inverses chroma values on the LML33R10 (so Cr
- * suddenly is shown as Cb and reverse, really cool effect if you
- * want to see blue faces, not useful otherwise). So don't use |1.
- * However, the DC10 has '0' as HStart, but does need |1, so we
- * use a dirty check...
- */
- HEnd = HStart + tvn->Wa - 1;
- HStart += hcrop1;
- HEnd -= hcrop2;
- reg = ((HStart & ZR36057_VFEHCR_Hmask) << ZR36057_VFEHCR_HStart)
- | ((HEnd & ZR36057_VFEHCR_Hmask) << ZR36057_VFEHCR_HEnd);
- if (zr->card.vfe_pol.hsync_pol)
- reg |= ZR36057_VFEHCR_HSPol;
- btwrite(reg, ZR36057_VFEHCR);
-
- /* Vertical */
- DispMode = !(video_height > BUZ_MAX_HEIGHT / 2);
- VidWinHt = DispMode ? video_height : video_height / 2;
- Y = DIV_ROUND_UP(VidWinHt * 64 * 2, tvn->Ha);
- He = (VidWinHt * 64) / Y;
- VerDcm = 64 - Y;
- vcrop1 = (tvn->Ha / 2 - He) / 2;
- vcrop2 = tvn->Ha / 2 - He - vcrop1;
- VStart = tvn->VStart;
- VEnd = VStart + tvn->Ha / 2; // - 1; FIXME SnapShot times out with -1 in 768*576 on the DC10 - LP
- VStart += vcrop1;
- VEnd -= vcrop2;
- reg = ((VStart & ZR36057_VFEVCR_Vmask) << ZR36057_VFEVCR_VStart)
- | ((VEnd & ZR36057_VFEVCR_Vmask) << ZR36057_VFEVCR_VEnd);
- if (zr->card.vfe_pol.vsync_pol)
- reg |= ZR36057_VFEVCR_VSPol;
- btwrite(reg, ZR36057_VFEVCR);
-
- /* scaler and pixel format */
- reg = 0;
- reg |= (HorDcm << ZR36057_VFESPFR_HorDcm);
- reg |= (VerDcm << ZR36057_VFESPFR_VerDcm);
- reg |= (DispMode << ZR36057_VFESPFR_DispMode);
- /* RJ: I don't know, why the following has to be the opposite
- * of the corresponding ZR36060 setting, but only this way
- * we get the correct colors when uncompressing to the screen */
- //reg |= ZR36057_VFESPFR_VCLKPol; /**/
- /* RJ: Don't know if that is needed for NTSC also */
- if (!(zr->norm & V4L2_STD_NTSC))
- reg |= ZR36057_VFESPFR_ExtFl; // NEEDED!!!!!!! Wolfgang
- reg |= ZR36057_VFESPFR_TopField;
- if (HorDcm >= 48) {
- reg |= 3 << ZR36057_VFESPFR_HFilter; /* 5 tap filter */
- } else if (HorDcm >= 32) {
- reg |= 2 << ZR36057_VFESPFR_HFilter; /* 4 tap filter */
- } else if (HorDcm >= 16) {
- reg |= 1 << ZR36057_VFESPFR_HFilter; /* 3 tap filter */
- }
- reg |= format->vfespfr;
- btwrite(reg, ZR36057_VFESPFR);
-
- /* display configuration */
- reg = (16 << ZR36057_VDCR_MinPix)
- | (VidWinHt << ZR36057_VDCR_VidWinHt)
- | (VidWinWid << ZR36057_VDCR_VidWinWid);
- if (pci_pci_problems & PCIPCI_TRITON)
- // || zr->revision < 1) // Revision 1 has also Triton support
- reg &= ~ZR36057_VDCR_Triton;
- else
- reg |= ZR36057_VDCR_Triton;
- btwrite(reg, ZR36057_VDCR);
-
- /* (Ronald) don't write this if overlay_mask = NULL */
- if (zr->overlay_mask) {
- /* Write overlay clipping mask data, but don't enable overlay clipping */
- /* RJ: since this makes only sense on the screen, we use
- * zr->overlay_settings.width instead of video_width */
-
- mask_line_size = (BUZ_MAX_WIDTH + 31) / 32;
- reg = virt_to_bus(zr->overlay_mask);
- btwrite(reg, ZR36057_MMTR);
- reg = virt_to_bus(zr->overlay_mask + mask_line_size);
- btwrite(reg, ZR36057_MMBR);
- reg =
- mask_line_size - (zr->overlay_settings.width +
- 31) / 32;
- if (DispMode == 0)
- reg += mask_line_size;
- reg <<= ZR36057_OCR_MaskStride;
- btwrite(reg, ZR36057_OCR);
- }
-
- zr36057_adjust_vfe(zr, zr->codec_mode);
-}
-
-/*
- * Switch overlay on or off
- */
-
-void
-zr36057_overlay (struct zoran *zr,
- int on)
-{
- u32 reg;
-
- if (on) {
- /* do the necessary settings ... */
- btand(~ZR36057_VDCR_VidEn, ZR36057_VDCR); /* switch it off first */
-
- zr36057_set_vfe(zr,
- zr->overlay_settings.width,
- zr->overlay_settings.height,
- zr->overlay_settings.format);
-
- /* Start and length of each line MUST be 4-byte aligned.
- * This should be already checked before the call to this routine.
- * All error messages are internal driver checking only! */
-
- /* video display top and bottom registers */
- reg = (long) zr->vbuf_base +
- zr->overlay_settings.x *
- ((zr->overlay_settings.format->depth + 7) / 8) +
- zr->overlay_settings.y *
- zr->vbuf_bytesperline;
- btwrite(reg, ZR36057_VDTR);
- if (reg & 3)
- dprintk(1,
- KERN_ERR
- "%s: zr36057_overlay() - video_address not aligned\n",
- ZR_DEVNAME(zr));
- if (zr->overlay_settings.height > BUZ_MAX_HEIGHT / 2)
- reg += zr->vbuf_bytesperline;
- btwrite(reg, ZR36057_VDBR);
-
- /* video stride, status, and frame grab register */
- reg = zr->vbuf_bytesperline -
- zr->overlay_settings.width *
- ((zr->overlay_settings.format->depth + 7) / 8);
- if (zr->overlay_settings.height > BUZ_MAX_HEIGHT / 2)
- reg += zr->vbuf_bytesperline;
- if (reg & 3)
- dprintk(1,
- KERN_ERR
- "%s: zr36057_overlay() - video_stride not aligned\n",
- ZR_DEVNAME(zr));
- reg = (reg << ZR36057_VSSFGR_DispStride);
- reg |= ZR36057_VSSFGR_VidOvf; /* clear overflow status */
- btwrite(reg, ZR36057_VSSFGR);
-
- /* Set overlay clipping */
- if (zr->overlay_settings.clipcount > 0)
- btor(ZR36057_OCR_OvlEnable, ZR36057_OCR);
-
- /* ... and switch it on */
- btor(ZR36057_VDCR_VidEn, ZR36057_VDCR);
- } else {
- /* Switch it off */
- btand(~ZR36057_VDCR_VidEn, ZR36057_VDCR);
- }
-}
-
-/*
- * The overlay mask has one bit for each pixel on a scan line,
- * and the maximum window size is BUZ_MAX_WIDTH * BUZ_MAX_HEIGHT pixels.
- */
-
-void write_overlay_mask(struct zoran_fh *fh, struct v4l2_clip *vp, int count)
-{
- struct zoran *zr = fh->zr;
- unsigned mask_line_size = (BUZ_MAX_WIDTH + 31) / 32;
- u32 *mask;
- int x, y, width, height;
- unsigned i, j, k;
-
- /* fill mask with one bits */
- memset(fh->overlay_mask, ~0, mask_line_size * 4 * BUZ_MAX_HEIGHT);
-
- for (i = 0; i < count; ++i) {
- /* pick up local copy of clip */
- x = vp[i].c.left;
- y = vp[i].c.top;
- width = vp[i].c.width;
- height = vp[i].c.height;
-
- /* trim clips that extend beyond the window */
- if (x < 0) {
- width += x;
- x = 0;
- }
- if (y < 0) {
- height += y;
- y = 0;
- }
- if (x + width > fh->overlay_settings.width) {
- width = fh->overlay_settings.width - x;
- }
- if (y + height > fh->overlay_settings.height) {
- height = fh->overlay_settings.height - y;
- }
-
- /* ignore degenerate clips */
- if (height <= 0) {
- continue;
- }
- if (width <= 0) {
- continue;
- }
-
- /* apply clip for each scan line */
- for (j = 0; j < height; ++j) {
- /* reset bit for each pixel */
- /* this can be optimized later if need be */
- mask = fh->overlay_mask + (y + j) * mask_line_size;
- for (k = 0; k < width; ++k) {
- mask[(x + k) / 32] &=
- ~((u32) 1 << (x + k) % 32);
- }
- }
- }
-}
-
-/* Enable/Disable uncompressed memory grabbing of the 36057 */
-
-void
-zr36057_set_memgrab (struct zoran *zr,
- int mode)
-{
- if (mode) {
- /* We only check SnapShot and not FrameGrab here. SnapShot==1
- * means a capture is already in progress, but FrameGrab==1
- * doesn't necessary mean that. It's more correct to say a 1
- * to 0 transition indicates a capture completed. If a
- * capture is pending when capturing is tuned off, FrameGrab
- * will be stuck at 1 until capturing is turned back on.
- */
- if (btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_SnapShot)
- dprintk(1,
- KERN_WARNING
- "%s: zr36057_set_memgrab(1) with SnapShot on!?\n",
- ZR_DEVNAME(zr));
-
- /* switch on VSync interrupts */
- btwrite(IRQ_MASK, ZR36057_ISR); // Clear Interrupts
- btor(zr->card.vsync_int, ZR36057_ICR); // SW
-
- /* enable SnapShot */
- btor(ZR36057_VSSFGR_SnapShot, ZR36057_VSSFGR);
-
- /* Set zr36057 video front end and enable video */
- zr36057_set_vfe(zr, zr->v4l_settings.width,
- zr->v4l_settings.height,
- zr->v4l_settings.format);
-
- zr->v4l_memgrab_active = 1;
- } else {
- /* switch off VSync interrupts */
- btand(~zr->card.vsync_int, ZR36057_ICR); // SW
-
- zr->v4l_memgrab_active = 0;
- zr->v4l_grab_frame = NO_GRAB_ACTIVE;
-
- /* re-enable grabbing to screen if it was running */
- if (zr->v4l_overlay_active) {
- zr36057_overlay(zr, 1);
- } else {
- btand(~ZR36057_VDCR_VidEn, ZR36057_VDCR);
- btand(~ZR36057_VSSFGR_SnapShot, ZR36057_VSSFGR);
- }
- }
-}
-
-int
-wait_grab_pending (struct zoran *zr)
-{
- unsigned long flags;
-
- /* wait until all pending grabs are finished */
-
- if (!zr->v4l_memgrab_active)
- return 0;
-
- wait_event_interruptible(zr->v4l_capq,
- (zr->v4l_pend_tail == zr->v4l_pend_head));
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- spin_lock_irqsave(&zr->spinlock, flags);
- zr36057_set_memgrab(zr, 0);
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- return 0;
-}
-
-/*****************************************************************************
- * *
- * Set up the Buz-specific MJPEG part *
- * *
- *****************************************************************************/
-
-static inline void
-set_frame (struct zoran *zr,
- int val)
-{
- GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_FRAME], val);
-}
-
-static void
-set_videobus_dir (struct zoran *zr,
- int val)
-{
- switch (zr->card.type) {
- case LML33:
- case LML33R10:
- if (!lml33dpath)
- GPIO(zr, 5, val);
- else
- GPIO(zr, 5, 1);
- break;
- default:
- GPIO(zr, zr->card.gpio[ZR_GPIO_VID_DIR],
- zr->card.gpio_pol[ZR_GPIO_VID_DIR] ? !val : val);
- break;
- }
-}
-
-static void
-init_jpeg_queue (struct zoran *zr)
-{
- int i;
-
- /* re-initialize DMA ring stuff */
- zr->jpg_que_head = 0;
- zr->jpg_dma_head = 0;
- zr->jpg_dma_tail = 0;
- zr->jpg_que_tail = 0;
- zr->jpg_seq_num = 0;
- zr->JPEG_error = 0;
- zr->num_errors = 0;
- zr->jpg_err_seq = 0;
- zr->jpg_err_shift = 0;
- zr->jpg_queued_num = 0;
- for (i = 0; i < zr->jpg_buffers.num_buffers; i++) {
- zr->jpg_buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */
- }
- for (i = 0; i < BUZ_NUM_STAT_COM; i++) {
- zr->stat_com[i] = cpu_to_le32(1); /* mark as unavailable to zr36057 */
- }
-}
-
-static void
-zr36057_set_jpg (struct zoran *zr,
- enum zoran_codec_mode mode)
-{
- struct tvnorm *tvn;
- u32 reg;
-
- tvn = zr->timing;
-
- /* assert P_Reset, disable code transfer, deassert Active */
- btwrite(0, ZR36057_JPC);
-
- /* MJPEG compression mode */
- switch (mode) {
-
- case BUZ_MODE_MOTION_COMPRESS:
- default:
- reg = ZR36057_JMC_MJPGCmpMode;
- break;
-
- case BUZ_MODE_MOTION_DECOMPRESS:
- reg = ZR36057_JMC_MJPGExpMode;
- reg |= ZR36057_JMC_SyncMstr;
- /* RJ: The following is experimental - improves the output to screen */
- //if(zr->jpg_settings.VFIFO_FB) reg |= ZR36057_JMC_VFIFO_FB; // No, it doesn't. SM
- break;
-
- case BUZ_MODE_STILL_COMPRESS:
- reg = ZR36057_JMC_JPGCmpMode;
- break;
-
- case BUZ_MODE_STILL_DECOMPRESS:
- reg = ZR36057_JMC_JPGExpMode;
- break;
-
- }
- reg |= ZR36057_JMC_JPG;
- if (zr->jpg_settings.field_per_buff == 1)
- reg |= ZR36057_JMC_Fld_per_buff;
- btwrite(reg, ZR36057_JMC);
-
- /* vertical */
- btor(ZR36057_VFEVCR_VSPol, ZR36057_VFEVCR);
- reg = (6 << ZR36057_VSP_VsyncSize) |
- (tvn->Ht << ZR36057_VSP_FrmTot);
- btwrite(reg, ZR36057_VSP);
- reg = ((zr->jpg_settings.img_y + tvn->VStart) << ZR36057_FVAP_NAY) |
- (zr->jpg_settings.img_height << ZR36057_FVAP_PAY);
- btwrite(reg, ZR36057_FVAP);
-
- /* horizontal */
- if (zr->card.vfe_pol.hsync_pol)
- btor(ZR36057_VFEHCR_HSPol, ZR36057_VFEHCR);
- else
- btand(~ZR36057_VFEHCR_HSPol, ZR36057_VFEHCR);
- reg = ((tvn->HSyncStart) << ZR36057_HSP_HsyncStart) |
- (tvn->Wt << ZR36057_HSP_LineTot);
- btwrite(reg, ZR36057_HSP);
- reg = ((zr->jpg_settings.img_x +
- tvn->HStart + 4) << ZR36057_FHAP_NAX) |
- (zr->jpg_settings.img_width << ZR36057_FHAP_PAX);
- btwrite(reg, ZR36057_FHAP);
-
- /* field process parameters */
- if (zr->jpg_settings.odd_even)
- reg = ZR36057_FPP_Odd_Even;
- else
- reg = 0;
-
- btwrite(reg, ZR36057_FPP);
-
- /* Set proper VCLK Polarity, else colors will be wrong during playback */
- //btor(ZR36057_VFESPFR_VCLKPol, ZR36057_VFESPFR);
-
- /* code base address */
- reg = virt_to_bus(zr->stat_com);
- btwrite(reg, ZR36057_JCBA);
-
- /* FIFO threshold (FIFO is 160. double words) */
- /* NOTE: decimal values here */
- switch (mode) {
-
- case BUZ_MODE_STILL_COMPRESS:
- case BUZ_MODE_MOTION_COMPRESS:
- if (zr->card.type != BUZ)
- reg = 140;
- else
- reg = 60;
- break;
-
- case BUZ_MODE_STILL_DECOMPRESS:
- case BUZ_MODE_MOTION_DECOMPRESS:
- reg = 20;
- break;
-
- default:
- reg = 80;
- break;
-
- }
- btwrite(reg, ZR36057_JCFT);
- zr36057_adjust_vfe(zr, mode);
-
-}
-
-void
-print_interrupts (struct zoran *zr)
-{
- int res, noerr = 0;
-
- printk(KERN_INFO "%s: interrupts received:", ZR_DEVNAME(zr));
- if ((res = zr->field_counter) < -1 || res > 1) {
- printk(KERN_CONT " FD:%d", res);
- }
- if ((res = zr->intr_counter_GIRQ1) != 0) {
- printk(KERN_CONT " GIRQ1:%d", res);
- noerr++;
- }
- if ((res = zr->intr_counter_GIRQ0) != 0) {
- printk(KERN_CONT " GIRQ0:%d", res);
- noerr++;
- }
- if ((res = zr->intr_counter_CodRepIRQ) != 0) {
- printk(KERN_CONT " CodRepIRQ:%d", res);
- noerr++;
- }
- if ((res = zr->intr_counter_JPEGRepIRQ) != 0) {
- printk(KERN_CONT " JPEGRepIRQ:%d", res);
- noerr++;
- }
- if (zr->JPEG_max_missed) {
- printk(KERN_CONT " JPEG delays: max=%d min=%d", zr->JPEG_max_missed,
- zr->JPEG_min_missed);
- }
- if (zr->END_event_missed) {
- printk(KERN_CONT " ENDs missed: %d", zr->END_event_missed);
- }
- //if (zr->jpg_queued_num) {
- printk(KERN_CONT " queue_state=%ld/%ld/%ld/%ld", zr->jpg_que_tail,
- zr->jpg_dma_tail, zr->jpg_dma_head, zr->jpg_que_head);
- //}
- if (!noerr) {
- printk(KERN_CONT ": no interrupts detected.");
- }
- printk(KERN_CONT "\n");
-}
-
-void
-clear_interrupt_counters (struct zoran *zr)
-{
- zr->intr_counter_GIRQ1 = 0;
- zr->intr_counter_GIRQ0 = 0;
- zr->intr_counter_CodRepIRQ = 0;
- zr->intr_counter_JPEGRepIRQ = 0;
- zr->field_counter = 0;
- zr->IRQ1_in = 0;
- zr->IRQ1_out = 0;
- zr->JPEG_in = 0;
- zr->JPEG_out = 0;
- zr->JPEG_0 = 0;
- zr->JPEG_1 = 0;
- zr->END_event_missed = 0;
- zr->JPEG_missed = 0;
- zr->JPEG_max_missed = 0;
- zr->JPEG_min_missed = 0x7fffffff;
-}
-
-static u32
-count_reset_interrupt (struct zoran *zr)
-{
- u32 isr;
-
- if ((isr = btread(ZR36057_ISR) & 0x78000000)) {
- if (isr & ZR36057_ISR_GIRQ1) {
- btwrite(ZR36057_ISR_GIRQ1, ZR36057_ISR);
- zr->intr_counter_GIRQ1++;
- }
- if (isr & ZR36057_ISR_GIRQ0) {
- btwrite(ZR36057_ISR_GIRQ0, ZR36057_ISR);
- zr->intr_counter_GIRQ0++;
- }
- if (isr & ZR36057_ISR_CodRepIRQ) {
- btwrite(ZR36057_ISR_CodRepIRQ, ZR36057_ISR);
- zr->intr_counter_CodRepIRQ++;
- }
- if (isr & ZR36057_ISR_JPEGRepIRQ) {
- btwrite(ZR36057_ISR_JPEGRepIRQ, ZR36057_ISR);
- zr->intr_counter_JPEGRepIRQ++;
- }
- }
- return isr;
-}
-
-void
-jpeg_start (struct zoran *zr)
-{
- int reg;
-
- zr->frame_num = 0;
-
- /* deassert P_reset, disable code transfer, deassert Active */
- btwrite(ZR36057_JPC_P_Reset, ZR36057_JPC);
- /* stop flushing the internal code buffer */
- btand(~ZR36057_MCTCR_CFlush, ZR36057_MCTCR);
- /* enable code transfer */
- btor(ZR36057_JPC_CodTrnsEn, ZR36057_JPC);
-
- /* clear IRQs */
- btwrite(IRQ_MASK, ZR36057_ISR);
- /* enable the JPEG IRQs */
- btwrite(zr->card.jpeg_int |
- ZR36057_ICR_JPEGRepIRQ |
- ZR36057_ICR_IntPinEn,
- ZR36057_ICR);
-
- set_frame(zr, 0); // \FRAME
-
- /* set the JPEG codec guest ID */
- reg = (zr->card.gpcs[1] << ZR36057_JCGI_JPEGuestID) |
- (0 << ZR36057_JCGI_JPEGuestReg);
- btwrite(reg, ZR36057_JCGI);
-
- if (zr->card.video_vfe == CODEC_TYPE_ZR36016 &&
- zr->card.video_codec == CODEC_TYPE_ZR36050) {
- /* Enable processing on the ZR36016 */
- if (zr->vfe)
- zr36016_write(zr->vfe, 0, 1);
-
- /* load the address of the GO register in the ZR36050 latch */
- post_office_write(zr, 0, 0, 0);
- }
-
- /* assert Active */
- btor(ZR36057_JPC_Active, ZR36057_JPC);
-
- /* enable the Go generation */
- btor(ZR36057_JMC_Go_en, ZR36057_JMC);
- udelay(30);
-
- set_frame(zr, 1); // /FRAME
-
- dprintk(3, KERN_DEBUG "%s: jpeg_start\n", ZR_DEVNAME(zr));
-}
-
-void
-zr36057_enable_jpg (struct zoran *zr,
- enum zoran_codec_mode mode)
-{
- struct vfe_settings cap;
- int field_size =
- zr->jpg_buffers.buffer_size / zr->jpg_settings.field_per_buff;
-
- zr->codec_mode = mode;
-
- cap.x = zr->jpg_settings.img_x;
- cap.y = zr->jpg_settings.img_y;
- cap.width = zr->jpg_settings.img_width;
- cap.height = zr->jpg_settings.img_height;
- cap.decimation =
- zr->jpg_settings.HorDcm | (zr->jpg_settings.VerDcm << 8);
- cap.quality = zr->jpg_settings.jpg_comp.quality;
-
- switch (mode) {
-
- case BUZ_MODE_MOTION_COMPRESS: {
- struct jpeg_app_marker app;
- struct jpeg_com_marker com;
-
- /* In motion compress mode, the decoder output must be enabled, and
- * the video bus direction set to input.
- */
- set_videobus_dir(zr, 0);
- decoder_call(zr, video, s_stream, 1);
- encoder_call(zr, video, s_routing, 0, 0, 0);
-
- /* Take the JPEG codec and the VFE out of sleep */
- jpeg_codec_sleep(zr, 0);
-
- /* set JPEG app/com marker */
- app.appn = zr->jpg_settings.jpg_comp.APPn;
- app.len = zr->jpg_settings.jpg_comp.APP_len;
- memcpy(app.data, zr->jpg_settings.jpg_comp.APP_data, 60);
- zr->codec->control(zr->codec, CODEC_S_JPEG_APP_DATA,
- sizeof(struct jpeg_app_marker), &app);
-
- com.len = zr->jpg_settings.jpg_comp.COM_len;
- memcpy(com.data, zr->jpg_settings.jpg_comp.COM_data, 60);
- zr->codec->control(zr->codec, CODEC_S_JPEG_COM_DATA,
- sizeof(struct jpeg_com_marker), &com);
-
- /* Setup the JPEG codec */
- zr->codec->control(zr->codec, CODEC_S_JPEG_TDS_BYTE,
- sizeof(int), &field_size);
- zr->codec->set_video(zr->codec, zr->timing, &cap,
- &zr->card.vfe_pol);
- zr->codec->set_mode(zr->codec, CODEC_DO_COMPRESSION);
-
- /* Setup the VFE */
- if (zr->vfe) {
- zr->vfe->control(zr->vfe, CODEC_S_JPEG_TDS_BYTE,
- sizeof(int), &field_size);
- zr->vfe->set_video(zr->vfe, zr->timing, &cap,
- &zr->card.vfe_pol);
- zr->vfe->set_mode(zr->vfe, CODEC_DO_COMPRESSION);
- }
-
- init_jpeg_queue(zr);
- zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO
-
- clear_interrupt_counters(zr);
- dprintk(2, KERN_INFO "%s: enable_jpg(MOTION_COMPRESS)\n",
- ZR_DEVNAME(zr));
- break;
- }
-
- case BUZ_MODE_MOTION_DECOMPRESS:
- /* In motion decompression mode, the decoder output must be disabled, and
- * the video bus direction set to output.
- */
- decoder_call(zr, video, s_stream, 0);
- set_videobus_dir(zr, 1);
- encoder_call(zr, video, s_routing, 1, 0, 0);
-
- /* Take the JPEG codec and the VFE out of sleep */
- jpeg_codec_sleep(zr, 0);
- /* Setup the VFE */
- if (zr->vfe) {
- zr->vfe->set_video(zr->vfe, zr->timing, &cap,
- &zr->card.vfe_pol);
- zr->vfe->set_mode(zr->vfe, CODEC_DO_EXPANSION);
- }
- /* Setup the JPEG codec */
- zr->codec->set_video(zr->codec, zr->timing, &cap,
- &zr->card.vfe_pol);
- zr->codec->set_mode(zr->codec, CODEC_DO_EXPANSION);
-
- init_jpeg_queue(zr);
- zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO
-
- clear_interrupt_counters(zr);
- dprintk(2, KERN_INFO "%s: enable_jpg(MOTION_DECOMPRESS)\n",
- ZR_DEVNAME(zr));
- break;
-
- case BUZ_MODE_IDLE:
- default:
- /* shut down processing */
- btand(~(zr->card.jpeg_int | ZR36057_ICR_JPEGRepIRQ),
- ZR36057_ICR);
- btwrite(zr->card.jpeg_int | ZR36057_ICR_JPEGRepIRQ,
- ZR36057_ISR);
- btand(~ZR36057_JMC_Go_en, ZR36057_JMC); // \Go_en
-
- msleep(50);
-
- set_videobus_dir(zr, 0);
- set_frame(zr, 1); // /FRAME
- btor(ZR36057_MCTCR_CFlush, ZR36057_MCTCR); // /CFlush
- btwrite(0, ZR36057_JPC); // \P_Reset,\CodTrnsEn,\Active
- btand(~ZR36057_JMC_VFIFO_FB, ZR36057_JMC);
- btand(~ZR36057_JMC_SyncMstr, ZR36057_JMC);
- jpeg_codec_reset(zr);
- jpeg_codec_sleep(zr, 1);
- zr36057_adjust_vfe(zr, mode);
-
- decoder_call(zr, video, s_stream, 1);
- encoder_call(zr, video, s_routing, 0, 0, 0);
-
- dprintk(2, KERN_INFO "%s: enable_jpg(IDLE)\n", ZR_DEVNAME(zr));
- break;
-
- }
-}
-
-/* when this is called the spinlock must be held */
-void
-zoran_feed_stat_com (struct zoran *zr)
-{
- /* move frames from pending queue to DMA */
-
- int frame, i, max_stat_com;
-
- max_stat_com =
- (zr->jpg_settings.TmpDcm ==
- 1) ? BUZ_NUM_STAT_COM : (BUZ_NUM_STAT_COM >> 1);
-
- while ((zr->jpg_dma_head - zr->jpg_dma_tail) < max_stat_com &&
- zr->jpg_dma_head < zr->jpg_que_head) {
-
- frame = zr->jpg_pend[zr->jpg_dma_head & BUZ_MASK_FRAME];
- if (zr->jpg_settings.TmpDcm == 1) {
- /* fill 1 stat_com entry */
- i = (zr->jpg_dma_head -
- zr->jpg_err_shift) & BUZ_MASK_STAT_COM;
- if (!(zr->stat_com[i] & cpu_to_le32(1)))
- break;
- zr->stat_com[i] =
- cpu_to_le32(zr->jpg_buffers.buffer[frame].jpg.frag_tab_bus);
- } else {
- /* fill 2 stat_com entries */
- i = ((zr->jpg_dma_head -
- zr->jpg_err_shift) & 1) * 2;
- if (!(zr->stat_com[i] & cpu_to_le32(1)))
- break;
- zr->stat_com[i] =
- cpu_to_le32(zr->jpg_buffers.buffer[frame].jpg.frag_tab_bus);
- zr->stat_com[i + 1] =
- cpu_to_le32(zr->jpg_buffers.buffer[frame].jpg.frag_tab_bus);
- }
- zr->jpg_buffers.buffer[frame].state = BUZ_STATE_DMA;
- zr->jpg_dma_head++;
-
- }
- if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS)
- zr->jpg_queued_num++;
-}
-
-/* when this is called the spinlock must be held */
-static void
-zoran_reap_stat_com (struct zoran *zr)
-{
- /* move frames from DMA queue to done queue */
-
- int i;
- u32 stat_com;
- unsigned int seq;
- unsigned int dif;
- struct zoran_buffer *buffer;
- int frame;
-
- /* In motion decompress we don't have a hardware frame counter,
- * we just count the interrupts here */
-
- if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) {
- zr->jpg_seq_num++;
- }
- while (zr->jpg_dma_tail < zr->jpg_dma_head) {
- if (zr->jpg_settings.TmpDcm == 1)
- i = (zr->jpg_dma_tail -
- zr->jpg_err_shift) & BUZ_MASK_STAT_COM;
- else
- i = ((zr->jpg_dma_tail -
- zr->jpg_err_shift) & 1) * 2 + 1;
-
- stat_com = le32_to_cpu(zr->stat_com[i]);
-
- if ((stat_com & 1) == 0) {
- return;
- }
- frame = zr->jpg_pend[zr->jpg_dma_tail & BUZ_MASK_FRAME];
- buffer = &zr->jpg_buffers.buffer[frame];
- buffer->bs.ts = ktime_get_ns();
-
- if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
- buffer->bs.length = (stat_com & 0x7fffff) >> 1;
-
- /* update sequence number with the help of the counter in stat_com */
-
- seq = ((stat_com >> 24) + zr->jpg_err_seq) & 0xff;
- dif = (seq - zr->jpg_seq_num) & 0xff;
- zr->jpg_seq_num += dif;
- } else {
- buffer->bs.length = 0;
- }
- buffer->bs.seq =
- zr->jpg_settings.TmpDcm ==
- 2 ? (zr->jpg_seq_num >> 1) : zr->jpg_seq_num;
- buffer->state = BUZ_STATE_DONE;
-
- zr->jpg_dma_tail++;
- }
-}
-
-static void zoran_restart(struct zoran *zr)
-{
- /* Now the stat_comm buffer is ready for restart */
- unsigned int status = 0;
- int mode;
-
- if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
- decoder_call(zr, video, g_input_status, &status);
- mode = CODEC_DO_COMPRESSION;
- } else {
- status = V4L2_IN_ST_NO_SIGNAL;
- mode = CODEC_DO_EXPANSION;
- }
- if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS ||
- !(status & V4L2_IN_ST_NO_SIGNAL)) {
- /********** RESTART code *************/
- jpeg_codec_reset(zr);
- zr->codec->set_mode(zr->codec, mode);
- zr36057_set_jpg(zr, zr->codec_mode);
- jpeg_start(zr);
-
- if (zr->num_errors <= 8)
- dprintk(2, KERN_INFO "%s: Restart\n",
- ZR_DEVNAME(zr));
-
- zr->JPEG_missed = 0;
- zr->JPEG_error = 2;
- /********** End RESTART code ***********/
- }
-}
-
-static void
-error_handler (struct zoran *zr,
- u32 astat,
- u32 stat)
-{
- int i;
-
- /* This is JPEG error handling part */
- if (zr->codec_mode != BUZ_MODE_MOTION_COMPRESS &&
- zr->codec_mode != BUZ_MODE_MOTION_DECOMPRESS) {
- return;
- }
-
- if ((stat & 1) == 0 &&
- zr->codec_mode == BUZ_MODE_MOTION_COMPRESS &&
- zr->jpg_dma_tail - zr->jpg_que_tail >= zr->jpg_buffers.num_buffers) {
- /* No free buffers... */
- zoran_reap_stat_com(zr);
- zoran_feed_stat_com(zr);
- wake_up_interruptible(&zr->jpg_capq);
- zr->JPEG_missed = 0;
- return;
- }
-
- if (zr->JPEG_error == 1) {
- zoran_restart(zr);
- return;
- }
-
- /*
- * First entry: error just happened during normal operation
- *
- * In BUZ_MODE_MOTION_COMPRESS:
- *
- * Possible glitch in TV signal. In this case we should
- * stop the codec and wait for good quality signal before
- * restarting it to avoid further problems
- *
- * In BUZ_MODE_MOTION_DECOMPRESS:
- *
- * Bad JPEG frame: we have to mark it as processed (codec crashed
- * and was not able to do it itself), and to remove it from queue.
- */
- btand(~ZR36057_JMC_Go_en, ZR36057_JMC);
- udelay(1);
- stat = stat | (post_office_read(zr, 7, 0) & 3) << 8;
- btwrite(0, ZR36057_JPC);
- btor(ZR36057_MCTCR_CFlush, ZR36057_MCTCR);
- jpeg_codec_reset(zr);
- jpeg_codec_sleep(zr, 1);
- zr->JPEG_error = 1;
- zr->num_errors++;
-
- /* Report error */
- if (zr36067_debug > 1 && zr->num_errors <= 8) {
- long frame;
- int j;
-
- frame = zr->jpg_pend[zr->jpg_dma_tail & BUZ_MASK_FRAME];
- printk(KERN_ERR
- "%s: JPEG error stat=0x%08x(0x%08x) queue_state=%ld/%ld/%ld/%ld seq=%ld frame=%ld. Codec stopped. ",
- ZR_DEVNAME(zr), stat, zr->last_isr,
- zr->jpg_que_tail, zr->jpg_dma_tail,
- zr->jpg_dma_head, zr->jpg_que_head,
- zr->jpg_seq_num, frame);
- printk(KERN_INFO "stat_com frames:");
- for (j = 0; j < BUZ_NUM_STAT_COM; j++) {
- for (i = 0; i < zr->jpg_buffers.num_buffers; i++) {
- if (le32_to_cpu(zr->stat_com[j]) == zr->jpg_buffers.buffer[i].jpg.frag_tab_bus)
- printk(KERN_CONT "% d->%d", j, i);
- }
- }
- printk(KERN_CONT "\n");
- }
- /* Find an entry in stat_com and rotate contents */
- if (zr->jpg_settings.TmpDcm == 1)
- i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM;
- else
- i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2;
- if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) {
- /* Mimic zr36067 operation */
- zr->stat_com[i] |= cpu_to_le32(1);
- if (zr->jpg_settings.TmpDcm != 1)
- zr->stat_com[i + 1] |= cpu_to_le32(1);
- /* Refill */
- zoran_reap_stat_com(zr);
- zoran_feed_stat_com(zr);
- wake_up_interruptible(&zr->jpg_capq);
- /* Find an entry in stat_com again after refill */
- if (zr->jpg_settings.TmpDcm == 1)
- i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM;
- else
- i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2;
- }
- if (i) {
- /* Rotate stat_comm entries to make current entry first */
- int j;
- __le32 bus_addr[BUZ_NUM_STAT_COM];
-
- /* Here we are copying the stat_com array, which
- * is already in little endian format, so
- * no endian conversions here
- */
- memcpy(bus_addr, zr->stat_com, sizeof(bus_addr));
-
- for (j = 0; j < BUZ_NUM_STAT_COM; j++)
- zr->stat_com[j] = bus_addr[(i + j) & BUZ_MASK_STAT_COM];
-
- zr->jpg_err_shift += i;
- zr->jpg_err_shift &= BUZ_MASK_STAT_COM;
- }
- if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS)
- zr->jpg_err_seq = zr->jpg_seq_num; /* + 1; */
- zoran_restart(zr);
-}
-
-irqreturn_t
-zoran_irq (int irq,
- void *dev_id)
-{
- u32 stat, astat;
- int count;
- struct zoran *zr;
- unsigned long flags;
-
- zr = dev_id;
- count = 0;
-
- if (zr->testing) {
- /* Testing interrupts */
- spin_lock_irqsave(&zr->spinlock, flags);
- while ((stat = count_reset_interrupt(zr))) {
- if (count++ > 100) {
- btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR);
- dprintk(1,
- KERN_ERR
- "%s: IRQ lockup while testing, isr=0x%08x, cleared int mask\n",
- ZR_DEVNAME(zr), stat);
- wake_up_interruptible(&zr->test_q);
- }
- }
- zr->last_isr = stat;
- spin_unlock_irqrestore(&zr->spinlock, flags);
- return IRQ_HANDLED;
- }
-
- spin_lock_irqsave(&zr->spinlock, flags);
- while (1) {
- /* get/clear interrupt status bits */
- stat = count_reset_interrupt(zr);
- astat = stat & IRQ_MASK;
- if (!astat) {
- break;
- }
- dprintk(4,
- KERN_DEBUG
- "zoran_irq: astat: 0x%08x, mask: 0x%08x\n",
- astat, btread(ZR36057_ICR));
- if (astat & zr->card.vsync_int) { // SW
-
- if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS ||
- zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
- /* count missed interrupts */
- zr->JPEG_missed++;
- }
- //post_office_read(zr,1,0);
- /* Interrupts may still happen when
- * zr->v4l_memgrab_active is switched off.
- * We simply ignore them */
-
- if (zr->v4l_memgrab_active) {
- /* A lot more checks should be here ... */
- if ((btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_SnapShot) == 0)
- dprintk(1,
- KERN_WARNING
- "%s: BuzIRQ with SnapShot off ???\n",
- ZR_DEVNAME(zr));
-
- if (zr->v4l_grab_frame != NO_GRAB_ACTIVE) {
- /* There is a grab on a frame going on, check if it has finished */
- if ((btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_FrameGrab) == 0) {
- /* it is finished, notify the user */
-
- zr->v4l_buffers.buffer[zr->v4l_grab_frame].state = BUZ_STATE_DONE;
- zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.seq = zr->v4l_grab_seq;
- zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.ts = ktime_get_ns();
- zr->v4l_grab_frame = NO_GRAB_ACTIVE;
- zr->v4l_pend_tail++;
- }
- }
-
- if (zr->v4l_grab_frame == NO_GRAB_ACTIVE)
- wake_up_interruptible(&zr->v4l_capq);
-
- /* Check if there is another grab queued */
-
- if (zr->v4l_grab_frame == NO_GRAB_ACTIVE &&
- zr->v4l_pend_tail != zr->v4l_pend_head) {
- int frame = zr->v4l_pend[zr->v4l_pend_tail & V4L_MASK_FRAME];
- u32 reg;
-
- zr->v4l_grab_frame = frame;
-
- /* Set zr36057 video front end and enable video */
-
- /* Buffer address */
-
- reg = zr->v4l_buffers.buffer[frame].v4l.fbuffer_bus;
- btwrite(reg, ZR36057_VDTR);
- if (zr->v4l_settings.height > BUZ_MAX_HEIGHT / 2)
- reg += zr->v4l_settings.bytesperline;
- btwrite(reg, ZR36057_VDBR);
-
- /* video stride, status, and frame grab register */
- reg = 0;
- if (zr->v4l_settings.height > BUZ_MAX_HEIGHT / 2)
- reg += zr->v4l_settings.bytesperline;
- reg = (reg << ZR36057_VSSFGR_DispStride);
- reg |= ZR36057_VSSFGR_VidOvf;
- reg |= ZR36057_VSSFGR_SnapShot;
- reg |= ZR36057_VSSFGR_FrameGrab;
- btwrite(reg, ZR36057_VSSFGR);
-
- btor(ZR36057_VDCR_VidEn,
- ZR36057_VDCR);
- }
- }
-
- /* even if we don't grab, we do want to increment
- * the sequence counter to see lost frames */
- zr->v4l_grab_seq++;
- }
-#if (IRQ_MASK & ZR36057_ISR_CodRepIRQ)
- if (astat & ZR36057_ISR_CodRepIRQ) {
- zr->intr_counter_CodRepIRQ++;
- IDEBUG(printk(KERN_DEBUG "%s: ZR36057_ISR_CodRepIRQ\n",
- ZR_DEVNAME(zr)));
- btand(~ZR36057_ICR_CodRepIRQ, ZR36057_ICR);
- }
-#endif /* (IRQ_MASK & ZR36057_ISR_CodRepIRQ) */
-
-#if (IRQ_MASK & ZR36057_ISR_JPEGRepIRQ)
- if ((astat & ZR36057_ISR_JPEGRepIRQ) &&
- (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS ||
- zr->codec_mode == BUZ_MODE_MOTION_COMPRESS)) {
- if (zr36067_debug > 1 && (!zr->frame_num || zr->JPEG_error)) {
- char sv[BUZ_NUM_STAT_COM + 1];
- int i;
-
- printk(KERN_INFO
- "%s: first frame ready: state=0x%08x odd_even=%d field_per_buff=%d delay=%d\n",
- ZR_DEVNAME(zr), stat,
- zr->jpg_settings.odd_even,
- zr->jpg_settings.field_per_buff,
- zr->JPEG_missed);
-
- for (i = 0; i < BUZ_NUM_STAT_COM; i++)
- sv[i] = le32_to_cpu(zr->stat_com[i]) & 1 ? '1' : '0';
- sv[BUZ_NUM_STAT_COM] = 0;
- printk(KERN_INFO
- "%s: stat_com=%s queue_state=%ld/%ld/%ld/%ld\n",
- ZR_DEVNAME(zr), sv,
- zr->jpg_que_tail,
- zr->jpg_dma_tail,
- zr->jpg_dma_head,
- zr->jpg_que_head);
- } else {
- /* Get statistics */
- if (zr->JPEG_missed > zr->JPEG_max_missed)
- zr->JPEG_max_missed = zr->JPEG_missed;
- if (zr->JPEG_missed < zr->JPEG_min_missed)
- zr->JPEG_min_missed = zr->JPEG_missed;
- }
-
- if (zr36067_debug > 2 && zr->frame_num < 6) {
- int i;
-
- printk(KERN_INFO "%s: seq=%ld stat_com:",
- ZR_DEVNAME(zr), zr->jpg_seq_num);
- for (i = 0; i < 4; i++) {
- printk(KERN_CONT " %08x",
- le32_to_cpu(zr->stat_com[i]));
- }
- printk(KERN_CONT "\n");
- }
- zr->frame_num++;
- zr->JPEG_missed = 0;
- zr->JPEG_error = 0;
- zoran_reap_stat_com(zr);
- zoran_feed_stat_com(zr);
- wake_up_interruptible(&zr->jpg_capq);
- }
-#endif /* (IRQ_MASK & ZR36057_ISR_JPEGRepIRQ) */
-
- /* DATERR, too many fields missed, error processing */
- if ((astat & zr->card.jpeg_int) ||
- zr->JPEG_missed > 25 ||
- zr->JPEG_error == 1 ||
- ((zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) &&
- (zr->frame_num && (zr->JPEG_missed > zr->jpg_settings.field_per_buff)))) {
- error_handler(zr, astat, stat);
- }
-
- count++;
- if (count > 10) {
- dprintk(2, KERN_WARNING "%s: irq loop %d\n",
- ZR_DEVNAME(zr), count);
- if (count > 20) {
- btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR);
- dprintk(2,
- KERN_ERR
- "%s: IRQ lockup, cleared int mask\n",
- ZR_DEVNAME(zr));
- break;
- }
- }
- zr->last_isr = stat;
- }
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- return IRQ_HANDLED;
-}
-
-void
-zoran_set_pci_master (struct zoran *zr,
- int set_master)
-{
- if (set_master) {
- pci_set_master(zr->pci_dev);
- } else {
- u16 command;
-
- pci_read_config_word(zr->pci_dev, PCI_COMMAND, &command);
- command &= ~PCI_COMMAND_MASTER;
- pci_write_config_word(zr->pci_dev, PCI_COMMAND, command);
- }
-}
-
-void
-zoran_init_hardware (struct zoran *zr)
-{
- /* Enable bus-mastering */
- zoran_set_pci_master(zr, 1);
-
- /* Initialize the board */
- if (zr->card.init) {
- zr->card.init(zr);
- }
-
- decoder_call(zr, core, init, 0);
- decoder_call(zr, video, s_std, zr->norm);
- decoder_call(zr, video, s_routing,
- zr->card.input[zr->input].muxsel, 0, 0);
-
- encoder_call(zr, core, init, 0);
- encoder_call(zr, video, s_std_output, zr->norm);
- encoder_call(zr, video, s_routing, 0, 0, 0);
-
- /* toggle JPEG codec sleep to sync PLL */
- jpeg_codec_sleep(zr, 1);
- jpeg_codec_sleep(zr, 0);
-
- /*
- * set individual interrupt enables (without GIRQ1)
- * but don't global enable until zoran_open()
- */
- zr36057_init_vfe(zr);
-
- zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
-
- btwrite(IRQ_MASK, ZR36057_ISR); // Clears interrupts
-}
-
-void
-zr36057_restart (struct zoran *zr)
-{
- btwrite(0, ZR36057_SPGPPCR);
- mdelay(1);
- btor(ZR36057_SPGPPCR_SoftReset, ZR36057_SPGPPCR);
- mdelay(1);
-
- /* assert P_Reset */
- btwrite(0, ZR36057_JPC);
- /* set up GPIO direction - all output */
- btwrite(ZR36057_SPGPPCR_SoftReset | 0, ZR36057_SPGPPCR);
-
- /* set up GPIO pins and guest bus timing */
- btwrite((0x81 << 24) | 0x8888, ZR36057_GPPGCR1);
-}
-
-/*
- * initialize video front end
- */
-
-static void
-zr36057_init_vfe (struct zoran *zr)
-{
- u32 reg;
-
- reg = btread(ZR36057_VFESPFR);
- reg |= ZR36057_VFESPFR_LittleEndian;
- reg &= ~ZR36057_VFESPFR_VCLKPol;
- reg |= ZR36057_VFESPFR_ExtFl;
- reg |= ZR36057_VFESPFR_TopField;
- btwrite(reg, ZR36057_VFESPFR);
- reg = btread(ZR36057_VDCR);
- if (pci_pci_problems & PCIPCI_TRITON)
- // || zr->revision < 1) // Revision 1 has also Triton support
- reg &= ~ZR36057_VDCR_Triton;
- else
- reg |= ZR36057_VDCR_Triton;
- btwrite(reg, ZR36057_VDCR);
-}
diff --git a/drivers/staging/media/zoran/zoran_device.h b/drivers/staging/media/zoran/zoran_device.h
deleted file mode 100644
index a507aaad4ebb..000000000000
--- a/drivers/staging/media/zoran/zoran_device.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Zoran zr36057/zr36067 PCI controller driver, for the
- * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
- * Media Labs LML33/LML33R10.
- *
- * This part handles card-specific data and detection
- *
- * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
- *
- * Currently maintained by:
- * Ronald Bultje <rbultje@ronald.bitfreak.net>
- * Laurent Pinchart <laurent.pinchart@skynet.be>
- * Mailinglist <mjpeg-users@lists.sf.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ZORAN_DEVICE_H__
-#define __ZORAN_DEVICE_H__
-
-/* general purpose I/O */
-extern void GPIO(struct zoran *zr,
- int bit,
- unsigned int value);
-
-/* codec (or actually: guest bus) access */
-extern int post_office_wait(struct zoran *zr);
-extern int post_office_write(struct zoran *zr,
- unsigned guest,
- unsigned reg,
- unsigned value);
-extern int post_office_read(struct zoran *zr,
- unsigned guest,
- unsigned reg);
-
-extern void detect_guest_activity(struct zoran *zr);
-
-extern void jpeg_codec_sleep(struct zoran *zr,
- int sleep);
-extern int jpeg_codec_reset(struct zoran *zr);
-
-/* zr360x7 access to raw capture */
-extern void zr36057_overlay(struct zoran *zr,
- int on);
-extern void write_overlay_mask(struct zoran_fh *fh,
- struct v4l2_clip *vp,
- int count);
-extern void zr36057_set_memgrab(struct zoran *zr,
- int mode);
-extern int wait_grab_pending(struct zoran *zr);
-
-/* interrupts */
-extern void print_interrupts(struct zoran *zr);
-extern void clear_interrupt_counters(struct zoran *zr);
-extern irqreturn_t zoran_irq(int irq, void *dev_id);
-
-/* JPEG codec access */
-extern void jpeg_start(struct zoran *zr);
-extern void zr36057_enable_jpg(struct zoran *zr,
- enum zoran_codec_mode mode);
-extern void zoran_feed_stat_com(struct zoran *zr);
-
-/* general */
-extern void zoran_set_pci_master(struct zoran *zr,
- int set_master);
-extern void zoran_init_hardware(struct zoran *zr);
-extern void zr36057_restart(struct zoran *zr);
-
-extern const struct zoran_format zoran_formats[];
-
-extern int v4l_nbufs;
-extern int v4l_bufsize;
-extern int jpg_nbufs;
-extern int jpg_bufsize;
-extern int pass_through;
-
-/* i2c */
-#define decoder_call(zr, o, f, args...) \
- v4l2_subdev_call(zr->decoder, o, f, ##args)
-#define encoder_call(zr, o, f, args...) \
- v4l2_subdev_call(zr->encoder, o, f, ##args)
-
-#endif /* __ZORAN_DEVICE_H__ */
diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c
deleted file mode 100644
index 04f88f9d6bb4..000000000000
--- a/drivers/staging/media/zoran/zoran_driver.c
+++ /dev/null
@@ -1,2850 +0,0 @@
-/*
- * Zoran zr36057/zr36067 PCI controller driver, for the
- * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
- * Media Labs LML33/LML33R10.
- *
- * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
- *
- * Changes for BUZ by Wolfgang Scherr <scherr@net4you.net>
- *
- * Changes for DC10/DC30 by Laurent Pinchart <laurent.pinchart@skynet.be>
- *
- * Changes for LML33R10 by Maxim Yevtyushkin <max@linuxmedialabs.com>
- *
- * Changes for videodev2/v4l2 by Ronald Bultje <rbultje@ronald.bitfreak.net>
- *
- * Based on
- *
- * Miro DC10 driver
- * Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net>
- *
- * Iomega Buz driver version 1.0
- * Copyright (C) 1999 Rainer Johanni <Rainer@Johanni.de>
- *
- * buz.0.0.3
- * Copyright (C) 1998 Dave Perks <dperks@ibm.net>
- *
- * bttv - Bt848 frame grabber driver
- * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
- * & Marcus Metzler (mocm@thp.uni-koeln.de)
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/wait.h>
-
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-
-#include <linux/spinlock.h>
-
-#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-event.h>
-#include "videocodec.h"
-
-#include <asm/byteorder.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <linux/proc_fs.h>
-
-#include <linux/mutex.h>
-#include "zoran.h"
-#include "zoran_device.h"
-#include "zoran_card.h"
-
-
-const struct zoran_format zoran_formats[] = {
- {
- .name = "15-bit RGB LE",
- .fourcc = V4L2_PIX_FMT_RGB555,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .depth = 15,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_OVERLAY,
- .vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif|
- ZR36057_VFESPFR_LittleEndian,
- }, {
- .name = "15-bit RGB BE",
- .fourcc = V4L2_PIX_FMT_RGB555X,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .depth = 15,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_OVERLAY,
- .vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif,
- }, {
- .name = "16-bit RGB LE",
- .fourcc = V4L2_PIX_FMT_RGB565,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .depth = 16,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_OVERLAY,
- .vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif|
- ZR36057_VFESPFR_LittleEndian,
- }, {
- .name = "16-bit RGB BE",
- .fourcc = V4L2_PIX_FMT_RGB565X,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .depth = 16,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_OVERLAY,
- .vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif,
- }, {
- .name = "24-bit RGB",
- .fourcc = V4L2_PIX_FMT_BGR24,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .depth = 24,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_OVERLAY,
- .vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_Pack24,
- }, {
- .name = "32-bit RGB LE",
- .fourcc = V4L2_PIX_FMT_BGR32,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .depth = 32,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_OVERLAY,
- .vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_LittleEndian,
- }, {
- .name = "32-bit RGB BE",
- .fourcc = V4L2_PIX_FMT_RGB32,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .depth = 32,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_OVERLAY,
- .vfespfr = ZR36057_VFESPFR_RGB888,
- }, {
- .name = "4:2:2, packed, YUYV",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- .depth = 16,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_OVERLAY,
- .vfespfr = ZR36057_VFESPFR_YUV422,
- }, {
- .name = "4:2:2, packed, UYVY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- .depth = 16,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_OVERLAY,
- .vfespfr = ZR36057_VFESPFR_YUV422|ZR36057_VFESPFR_LittleEndian,
- }, {
- .name = "Hardware-encoded Motion-JPEG",
- .fourcc = V4L2_PIX_FMT_MJPEG,
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- .depth = 0,
- .flags = ZORAN_FORMAT_CAPTURE |
- ZORAN_FORMAT_PLAYBACK |
- ZORAN_FORMAT_COMPRESSED,
- }
-};
-#define NUM_FORMATS ARRAY_SIZE(zoran_formats)
-
- /* small helper function for calculating buffersizes for v4l2
- * we calculate the nearest higher power-of-two, which
- * will be the recommended buffersize */
-static __u32
-zoran_v4l2_calc_bufsize (struct zoran_jpg_settings *settings)
-{
- __u8 div = settings->VerDcm * settings->HorDcm * settings->TmpDcm;
- __u32 num = (1024 * 512) / (div);
- __u32 result = 2;
-
- num--;
- while (num) {
- num >>= 1;
- result <<= 1;
- }
-
- if (result > jpg_bufsize)
- return jpg_bufsize;
- if (result < 8192)
- return 8192;
- return result;
-}
-
-/* forward references */
-static void v4l_fbuffer_free(struct zoran_fh *fh);
-static void jpg_fbuffer_free(struct zoran_fh *fh);
-
-/* Set mapping mode */
-static void map_mode_raw(struct zoran_fh *fh)
-{
- fh->map_mode = ZORAN_MAP_MODE_RAW;
- fh->buffers.buffer_size = v4l_bufsize;
- fh->buffers.num_buffers = v4l_nbufs;
-}
-static void map_mode_jpg(struct zoran_fh *fh, int play)
-{
- fh->map_mode = play ? ZORAN_MAP_MODE_JPG_PLAY : ZORAN_MAP_MODE_JPG_REC;
- fh->buffers.buffer_size = jpg_bufsize;
- fh->buffers.num_buffers = jpg_nbufs;
-}
-static inline const char *mode_name(enum zoran_map_mode mode)
-{
- return mode == ZORAN_MAP_MODE_RAW ? "V4L" : "JPG";
-}
-
-/*
- * Allocate the V4L grab buffers
- *
- * These have to be pysically contiguous.
- */
-
-static int v4l_fbuffer_alloc(struct zoran_fh *fh)
-{
- struct zoran *zr = fh->zr;
- int i, off;
- unsigned char *mem;
-
- for (i = 0; i < fh->buffers.num_buffers; i++) {
- if (fh->buffers.buffer[i].v4l.fbuffer)
- dprintk(2,
- KERN_WARNING
- "%s: %s - buffer %d already allocated!?\n",
- ZR_DEVNAME(zr), __func__, i);
-
- //udelay(20);
- mem = kmalloc(fh->buffers.buffer_size,
- GFP_KERNEL | __GFP_NOWARN);
- if (!mem) {
- dprintk(1,
- KERN_ERR
- "%s: %s - kmalloc for V4L buf %d failed\n",
- ZR_DEVNAME(zr), __func__, i);
- v4l_fbuffer_free(fh);
- return -ENOBUFS;
- }
- fh->buffers.buffer[i].v4l.fbuffer = mem;
- fh->buffers.buffer[i].v4l.fbuffer_phys = virt_to_phys(mem);
- fh->buffers.buffer[i].v4l.fbuffer_bus = virt_to_bus(mem);
- for (off = 0; off < fh->buffers.buffer_size;
- off += PAGE_SIZE)
- SetPageReserved(virt_to_page(mem + off));
- dprintk(4,
- KERN_INFO
- "%s: %s - V4L frame %d mem %p (bus: 0x%llx)\n",
- ZR_DEVNAME(zr), __func__, i, mem,
- (unsigned long long)virt_to_bus(mem));
- }
-
- fh->buffers.allocated = 1;
-
- return 0;
-}
-
-/* free the V4L grab buffers */
-static void v4l_fbuffer_free(struct zoran_fh *fh)
-{
- struct zoran *zr = fh->zr;
- int i, off;
- unsigned char *mem;
-
- dprintk(4, KERN_INFO "%s: %s\n", ZR_DEVNAME(zr), __func__);
-
- for (i = 0; i < fh->buffers.num_buffers; i++) {
- if (!fh->buffers.buffer[i].v4l.fbuffer)
- continue;
-
- mem = fh->buffers.buffer[i].v4l.fbuffer;
- for (off = 0; off < fh->buffers.buffer_size;
- off += PAGE_SIZE)
- ClearPageReserved(virt_to_page(mem + off));
- kfree(fh->buffers.buffer[i].v4l.fbuffer);
- fh->buffers.buffer[i].v4l.fbuffer = NULL;
- }
-
- fh->buffers.allocated = 0;
-}
-
-/*
- * Allocate the MJPEG grab buffers.
- *
- * If a Natoma chipset is present and this is a revision 1 zr36057,
- * each MJPEG buffer needs to be physically contiguous.
- * (RJ: This statement is from Dave Perks' original driver,
- * I could never check it because I have a zr36067)
- *
- * RJ: The contents grab buffers needs never be accessed in the driver.
- * Therefore there is no need to allocate them with vmalloc in order
- * to get a contiguous virtual memory space.
- * I don't understand why many other drivers first allocate them with
- * vmalloc (which uses internally also get_zeroed_page, but delivers you
- * virtual addresses) and then again have to make a lot of efforts
- * to get the physical address.
- *
- * Ben Capper:
- * On big-endian architectures (such as ppc) some extra steps
- * are needed. When reading and writing to the stat_com array
- * and fragment buffers, the device expects to see little-
- * endian values. The use of cpu_to_le32() and le32_to_cpu()
- * in this function (and one or two others in zoran_device.c)
- * ensure that these values are always stored in little-endian
- * form, regardless of architecture. The zr36057 does Very Bad
- * Things on big endian architectures if the stat_com array
- * and fragment buffers are not little-endian.
- */
-
-static int jpg_fbuffer_alloc(struct zoran_fh *fh)
-{
- struct zoran *zr = fh->zr;
- int i, j, off;
- u8 *mem;
-
- for (i = 0; i < fh->buffers.num_buffers; i++) {
- if (fh->buffers.buffer[i].jpg.frag_tab)
- dprintk(2,
- KERN_WARNING
- "%s: %s - buffer %d already allocated!?\n",
- ZR_DEVNAME(zr), __func__, i);
-
- /* Allocate fragment table for this buffer */
-
- mem = (void *)get_zeroed_page(GFP_KERNEL);
- if (!mem) {
- dprintk(1,
- KERN_ERR
- "%s: %s - get_zeroed_page (frag_tab) failed for buffer %d\n",
- ZR_DEVNAME(zr), __func__, i);
- jpg_fbuffer_free(fh);
- return -ENOBUFS;
- }
- fh->buffers.buffer[i].jpg.frag_tab = (__le32 *)mem;
- fh->buffers.buffer[i].jpg.frag_tab_bus = virt_to_bus(mem);
-
- if (fh->buffers.need_contiguous) {
- mem = kmalloc(fh->buffers.buffer_size, GFP_KERNEL);
- if (mem == NULL) {
- dprintk(1,
- KERN_ERR
- "%s: %s - kmalloc failed for buffer %d\n",
- ZR_DEVNAME(zr), __func__, i);
- jpg_fbuffer_free(fh);
- return -ENOBUFS;
- }
- fh->buffers.buffer[i].jpg.frag_tab[0] =
- cpu_to_le32(virt_to_bus(mem));
- fh->buffers.buffer[i].jpg.frag_tab[1] =
- cpu_to_le32((fh->buffers.buffer_size >> 1) | 1);
- for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE)
- SetPageReserved(virt_to_page(mem + off));
- } else {
- /* jpg_bufsize is already page aligned */
- for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) {
- mem = (void *)get_zeroed_page(GFP_KERNEL);
- if (mem == NULL) {
- dprintk(1,
- KERN_ERR
- "%s: %s - get_zeroed_page failed for buffer %d\n",
- ZR_DEVNAME(zr), __func__, i);
- jpg_fbuffer_free(fh);
- return -ENOBUFS;
- }
-
- fh->buffers.buffer[i].jpg.frag_tab[2 * j] =
- cpu_to_le32(virt_to_bus(mem));
- fh->buffers.buffer[i].jpg.frag_tab[2 * j + 1] =
- cpu_to_le32((PAGE_SIZE >> 2) << 1);
- SetPageReserved(virt_to_page(mem));
- }
-
- fh->buffers.buffer[i].jpg.frag_tab[2 * j - 1] |= cpu_to_le32(1);
- }
- }
-
- dprintk(4,
- KERN_DEBUG "%s: %s - %d KB allocated\n",
- ZR_DEVNAME(zr), __func__,
- (fh->buffers.num_buffers * fh->buffers.buffer_size) >> 10);
-
- fh->buffers.allocated = 1;
-
- return 0;
-}
-
-/* free the MJPEG grab buffers */
-static void jpg_fbuffer_free(struct zoran_fh *fh)
-{
- struct zoran *zr = fh->zr;
- int i, j, off;
- unsigned char *mem;
- __le32 frag_tab;
- struct zoran_buffer *buffer;
-
- dprintk(4, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__);
-
- for (i = 0, buffer = &fh->buffers.buffer[0];
- i < fh->buffers.num_buffers; i++, buffer++) {
- if (!buffer->jpg.frag_tab)
- continue;
-
- if (fh->buffers.need_contiguous) {
- frag_tab = buffer->jpg.frag_tab[0];
-
- if (frag_tab) {
- mem = bus_to_virt(le32_to_cpu(frag_tab));
- for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE)
- ClearPageReserved(virt_to_page(mem + off));
- kfree(mem);
- buffer->jpg.frag_tab[0] = 0;
- buffer->jpg.frag_tab[1] = 0;
- }
- } else {
- for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) {
- frag_tab = buffer->jpg.frag_tab[2 * j];
-
- if (!frag_tab)
- break;
- ClearPageReserved(virt_to_page(bus_to_virt(le32_to_cpu(frag_tab))));
- free_page((unsigned long)bus_to_virt(le32_to_cpu(frag_tab)));
- buffer->jpg.frag_tab[2 * j] = 0;
- buffer->jpg.frag_tab[2 * j + 1] = 0;
- }
- }
-
- free_page((unsigned long)buffer->jpg.frag_tab);
- buffer->jpg.frag_tab = NULL;
- }
-
- fh->buffers.allocated = 0;
-}
-
-/*
- * V4L Buffer grabbing
- */
-
-static int
-zoran_v4l_set_format (struct zoran_fh *fh,
- int width,
- int height,
- const struct zoran_format *format)
-{
- struct zoran *zr = fh->zr;
- int bpp;
-
- /* Check size and format of the grab wanted */
-
- if (height < BUZ_MIN_HEIGHT || width < BUZ_MIN_WIDTH ||
- height > BUZ_MAX_HEIGHT || width > BUZ_MAX_WIDTH) {
- dprintk(1,
- KERN_ERR
- "%s: %s - wrong frame size (%dx%d)\n",
- ZR_DEVNAME(zr), __func__, width, height);
- return -EINVAL;
- }
-
- bpp = (format->depth + 7) / 8;
-
- /* Check against available buffer size */
- if (height * width * bpp > fh->buffers.buffer_size) {
- dprintk(1,
- KERN_ERR
- "%s: %s - video buffer size (%d kB) is too small\n",
- ZR_DEVNAME(zr), __func__, fh->buffers.buffer_size >> 10);
- return -EINVAL;
- }
-
- /* The video front end needs 4-byte alinged line sizes */
-
- if ((bpp == 2 && (width & 1)) || (bpp == 3 && (width & 3))) {
- dprintk(1,
- KERN_ERR
- "%s: %s - wrong frame alignment\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
-
- fh->v4l_settings.width = width;
- fh->v4l_settings.height = height;
- fh->v4l_settings.format = format;
- fh->v4l_settings.bytesperline = bpp * fh->v4l_settings.width;
-
- return 0;
-}
-
-static int zoran_v4l_queue_frame(struct zoran_fh *fh, int num)
-{
- struct zoran *zr = fh->zr;
- unsigned long flags;
- int res = 0;
-
- if (!fh->buffers.allocated) {
- dprintk(1,
- KERN_ERR
- "%s: %s - buffers not yet allocated\n",
- ZR_DEVNAME(zr), __func__);
- res = -ENOMEM;
- }
-
- /* No grabbing outside the buffer range! */
- if (num >= fh->buffers.num_buffers || num < 0) {
- dprintk(1,
- KERN_ERR
- "%s: %s - buffer %d is out of range\n",
- ZR_DEVNAME(zr), __func__, num);
- res = -EINVAL;
- }
-
- spin_lock_irqsave(&zr->spinlock, flags);
-
- if (fh->buffers.active == ZORAN_FREE) {
- if (zr->v4l_buffers.active == ZORAN_FREE) {
- zr->v4l_buffers = fh->buffers;
- fh->buffers.active = ZORAN_ACTIVE;
- } else {
- dprintk(1,
- KERN_ERR
- "%s: %s - another session is already capturing\n",
- ZR_DEVNAME(zr), __func__);
- res = -EBUSY;
- }
- }
-
- /* make sure a grab isn't going on currently with this buffer */
- if (!res) {
- switch (zr->v4l_buffers.buffer[num].state) {
- default:
- case BUZ_STATE_PEND:
- if (zr->v4l_buffers.active == ZORAN_FREE) {
- fh->buffers.active = ZORAN_FREE;
- zr->v4l_buffers.allocated = 0;
- }
- res = -EBUSY; /* what are you doing? */
- break;
- case BUZ_STATE_DONE:
- dprintk(2,
- KERN_WARNING
- "%s: %s - queueing buffer %d in state DONE!?\n",
- ZR_DEVNAME(zr), __func__, num);
- /* fall through */
- case BUZ_STATE_USER:
- /* since there is at least one unused buffer there's room for at least
- * one more pend[] entry */
- zr->v4l_pend[zr->v4l_pend_head++ & V4L_MASK_FRAME] = num;
- zr->v4l_buffers.buffer[num].state = BUZ_STATE_PEND;
- zr->v4l_buffers.buffer[num].bs.length =
- fh->v4l_settings.bytesperline *
- zr->v4l_settings.height;
- fh->buffers.buffer[num] = zr->v4l_buffers.buffer[num];
- break;
- }
- }
-
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- if (!res && zr->v4l_buffers.active == ZORAN_FREE)
- zr->v4l_buffers.active = fh->buffers.active;
-
- return res;
-}
-
-/*
- * Sync on a V4L buffer
- */
-
-static int v4l_sync(struct zoran_fh *fh, int frame)
-{
- struct zoran *zr = fh->zr;
- unsigned long flags;
-
- if (fh->buffers.active == ZORAN_FREE) {
- dprintk(1,
- KERN_ERR
- "%s: %s - no grab active for this session\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
-
- /* check passed-in frame number */
- if (frame >= fh->buffers.num_buffers || frame < 0) {
- dprintk(1,
- KERN_ERR "%s: %s - frame %d is invalid\n",
- ZR_DEVNAME(zr), __func__, frame);
- return -EINVAL;
- }
-
- /* Check if is buffer was queued at all */
- if (zr->v4l_buffers.buffer[frame].state == BUZ_STATE_USER) {
- dprintk(1,
- KERN_ERR
- "%s: %s - attempt to sync on a buffer which was not queued?\n",
- ZR_DEVNAME(zr), __func__);
- return -EPROTO;
- }
-
- mutex_unlock(&zr->lock);
- /* wait on this buffer to get ready */
- if (!wait_event_interruptible_timeout(zr->v4l_capq,
- (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_PEND), 10*HZ)) {
- mutex_lock(&zr->lock);
- return -ETIME;
- }
- mutex_lock(&zr->lock);
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- /* buffer should now be in BUZ_STATE_DONE */
- if (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_DONE)
- dprintk(2,
- KERN_ERR "%s: %s - internal state error\n",
- ZR_DEVNAME(zr), __func__);
-
- zr->v4l_buffers.buffer[frame].state = BUZ_STATE_USER;
- fh->buffers.buffer[frame] = zr->v4l_buffers.buffer[frame];
-
- spin_lock_irqsave(&zr->spinlock, flags);
-
- /* Check if streaming capture has finished */
- if (zr->v4l_pend_tail == zr->v4l_pend_head) {
- zr36057_set_memgrab(zr, 0);
- if (zr->v4l_buffers.active == ZORAN_ACTIVE) {
- fh->buffers.active = zr->v4l_buffers.active = ZORAN_FREE;
- zr->v4l_buffers.allocated = 0;
- }
- }
-
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- return 0;
-}
-
-/*
- * Queue a MJPEG buffer for capture/playback
- */
-
-static int zoran_jpg_queue_frame(struct zoran_fh *fh, int num,
- enum zoran_codec_mode mode)
-{
- struct zoran *zr = fh->zr;
- unsigned long flags;
- int res = 0;
-
- /* Check if buffers are allocated */
- if (!fh->buffers.allocated) {
- dprintk(1,
- KERN_ERR
- "%s: %s - buffers not yet allocated\n",
- ZR_DEVNAME(zr), __func__);
- return -ENOMEM;
- }
-
- /* No grabbing outside the buffer range! */
- if (num >= fh->buffers.num_buffers || num < 0) {
- dprintk(1,
- KERN_ERR
- "%s: %s - buffer %d out of range\n",
- ZR_DEVNAME(zr), __func__, num);
- return -EINVAL;
- }
-
- /* what is the codec mode right now? */
- if (zr->codec_mode == BUZ_MODE_IDLE) {
- zr->jpg_settings = fh->jpg_settings;
- } else if (zr->codec_mode != mode) {
- /* wrong codec mode active - invalid */
- dprintk(1,
- KERN_ERR
- "%s: %s - codec in wrong mode\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
-
- if (fh->buffers.active == ZORAN_FREE) {
- if (zr->jpg_buffers.active == ZORAN_FREE) {
- zr->jpg_buffers = fh->buffers;
- fh->buffers.active = ZORAN_ACTIVE;
- } else {
- dprintk(1,
- KERN_ERR
- "%s: %s - another session is already capturing\n",
- ZR_DEVNAME(zr), __func__);
- res = -EBUSY;
- }
- }
-
- if (!res && zr->codec_mode == BUZ_MODE_IDLE) {
- /* Ok load up the jpeg codec */
- zr36057_enable_jpg(zr, mode);
- }
-
- spin_lock_irqsave(&zr->spinlock, flags);
-
- if (!res) {
- switch (zr->jpg_buffers.buffer[num].state) {
- case BUZ_STATE_DONE:
- dprintk(2,
- KERN_WARNING
- "%s: %s - queuing frame in BUZ_STATE_DONE state!?\n",
- ZR_DEVNAME(zr), __func__);
- /* fall through */
- case BUZ_STATE_USER:
- /* since there is at least one unused buffer there's room for at
- *least one more pend[] entry */
- zr->jpg_pend[zr->jpg_que_head++ & BUZ_MASK_FRAME] = num;
- zr->jpg_buffers.buffer[num].state = BUZ_STATE_PEND;
- fh->buffers.buffer[num] = zr->jpg_buffers.buffer[num];
- zoran_feed_stat_com(zr);
- break;
- default:
- case BUZ_STATE_DMA:
- case BUZ_STATE_PEND:
- if (zr->jpg_buffers.active == ZORAN_FREE) {
- fh->buffers.active = ZORAN_FREE;
- zr->jpg_buffers.allocated = 0;
- }
- res = -EBUSY; /* what are you doing? */
- break;
- }
- }
-
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- if (!res && zr->jpg_buffers.active == ZORAN_FREE)
- zr->jpg_buffers.active = fh->buffers.active;
-
- return res;
-}
-
-static int jpg_qbuf(struct zoran_fh *fh, int frame, enum zoran_codec_mode mode)
-{
- struct zoran *zr = fh->zr;
- int res = 0;
-
- /* Does the user want to stop streaming? */
- if (frame < 0) {
- if (zr->codec_mode == mode) {
- if (fh->buffers.active == ZORAN_FREE) {
- dprintk(1,
- KERN_ERR
- "%s: %s(-1) - session not active\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
- fh->buffers.active = zr->jpg_buffers.active = ZORAN_FREE;
- zr->jpg_buffers.allocated = 0;
- zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
- return 0;
- } else {
- dprintk(1,
- KERN_ERR
- "%s: %s - stop streaming but not in streaming mode\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
- }
-
- if ((res = zoran_jpg_queue_frame(fh, frame, mode)))
- return res;
-
- /* Start the jpeg codec when the first frame is queued */
- if (!res && zr->jpg_que_head == 1)
- jpeg_start(zr);
-
- return res;
-}
-
-/*
- * Sync on a MJPEG buffer
- */
-
-static int jpg_sync(struct zoran_fh *fh, struct zoran_sync *bs)
-{
- struct zoran *zr = fh->zr;
- unsigned long flags;
- int frame;
-
- if (fh->buffers.active == ZORAN_FREE) {
- dprintk(1,
- KERN_ERR
- "%s: %s - capture is not currently active\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
- if (zr->codec_mode != BUZ_MODE_MOTION_DECOMPRESS &&
- zr->codec_mode != BUZ_MODE_MOTION_COMPRESS) {
- dprintk(1,
- KERN_ERR
- "%s: %s - codec not in streaming mode\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
- mutex_unlock(&zr->lock);
- if (!wait_event_interruptible_timeout(zr->jpg_capq,
- (zr->jpg_que_tail != zr->jpg_dma_tail ||
- zr->jpg_dma_tail == zr->jpg_dma_head),
- 10*HZ)) {
- int isr;
-
- btand(~ZR36057_JMC_Go_en, ZR36057_JMC);
- udelay(1);
- zr->codec->control(zr->codec, CODEC_G_STATUS,
- sizeof(isr), &isr);
- mutex_lock(&zr->lock);
- dprintk(1,
- KERN_ERR
- "%s: %s - timeout: codec isr=0x%02x\n",
- ZR_DEVNAME(zr), __func__, isr);
-
- return -ETIME;
-
- }
- mutex_lock(&zr->lock);
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- spin_lock_irqsave(&zr->spinlock, flags);
-
- if (zr->jpg_dma_tail != zr->jpg_dma_head)
- frame = zr->jpg_pend[zr->jpg_que_tail++ & BUZ_MASK_FRAME];
- else
- frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME];
-
- /* buffer should now be in BUZ_STATE_DONE */
- if (zr->jpg_buffers.buffer[frame].state != BUZ_STATE_DONE)
- dprintk(2,
- KERN_ERR "%s: %s - internal state error\n",
- ZR_DEVNAME(zr), __func__);
-
- *bs = zr->jpg_buffers.buffer[frame].bs;
- bs->frame = frame;
- zr->jpg_buffers.buffer[frame].state = BUZ_STATE_USER;
- fh->buffers.buffer[frame] = zr->jpg_buffers.buffer[frame];
-
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- return 0;
-}
-
-static void zoran_open_init_session(struct zoran_fh *fh)
-{
- int i;
- struct zoran *zr = fh->zr;
-
- /* Per default, map the V4L Buffers */
- map_mode_raw(fh);
-
- /* take over the card's current settings */
- fh->overlay_settings = zr->overlay_settings;
- fh->overlay_settings.is_set = 0;
- fh->overlay_settings.format = zr->overlay_settings.format;
- fh->overlay_active = ZORAN_FREE;
-
- /* v4l settings */
- fh->v4l_settings = zr->v4l_settings;
- /* jpg settings */
- fh->jpg_settings = zr->jpg_settings;
-
- /* buffers */
- memset(&fh->buffers, 0, sizeof(fh->buffers));
- for (i = 0; i < MAX_FRAME; i++) {
- fh->buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */
- fh->buffers.buffer[i].bs.frame = i;
- }
- fh->buffers.allocated = 0;
- fh->buffers.active = ZORAN_FREE;
-}
-
-static void zoran_close_end_session(struct zoran_fh *fh)
-{
- struct zoran *zr = fh->zr;
-
- /* overlay */
- if (fh->overlay_active != ZORAN_FREE) {
- fh->overlay_active = zr->overlay_active = ZORAN_FREE;
- zr->v4l_overlay_active = 0;
- if (!zr->v4l_memgrab_active)
- zr36057_overlay(zr, 0);
- zr->overlay_mask = NULL;
- }
-
- if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
- /* v4l capture */
- if (fh->buffers.active != ZORAN_FREE) {
- unsigned long flags;
-
- spin_lock_irqsave(&zr->spinlock, flags);
- zr36057_set_memgrab(zr, 0);
- zr->v4l_buffers.allocated = 0;
- zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE;
- spin_unlock_irqrestore(&zr->spinlock, flags);
- }
-
- /* v4l buffers */
- if (fh->buffers.allocated)
- v4l_fbuffer_free(fh);
- } else {
- /* jpg capture */
- if (fh->buffers.active != ZORAN_FREE) {
- zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
- zr->jpg_buffers.allocated = 0;
- zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE;
- }
-
- /* jpg buffers */
- if (fh->buffers.allocated)
- jpg_fbuffer_free(fh);
- }
-}
-
-/*
- * Open a zoran card. Right now the flags stuff is just playing
- */
-
-static int zoran_open(struct file *file)
-{
- struct zoran *zr = video_drvdata(file);
- struct zoran_fh *fh;
- int res, first_open = 0;
-
- dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n",
- ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1);
-
- mutex_lock(&zr->lock);
-
- if (zr->user >= 2048) {
- dprintk(1, KERN_ERR "%s: too many users (%d) on device\n",
- ZR_DEVNAME(zr), zr->user);
- res = -EBUSY;
- goto fail_unlock;
- }
-
- /* now, create the open()-specific file_ops struct */
- fh = kzalloc(sizeof(struct zoran_fh), GFP_KERNEL);
- if (!fh) {
- dprintk(1,
- KERN_ERR
- "%s: %s - allocation of zoran_fh failed\n",
- ZR_DEVNAME(zr), __func__);
- res = -ENOMEM;
- goto fail_unlock;
- }
- v4l2_fh_init(&fh->fh, video_devdata(file));
-
- /* used to be BUZ_MAX_WIDTH/HEIGHT, but that gives overflows
- * on norm-change! */
- fh->overlay_mask =
- kmalloc(array3_size((768 + 31) / 32, 576, 4), GFP_KERNEL);
- if (!fh->overlay_mask) {
- dprintk(1,
- KERN_ERR
- "%s: %s - allocation of overlay_mask failed\n",
- ZR_DEVNAME(zr), __func__);
- res = -ENOMEM;
- goto fail_fh;
- }
-
- if (zr->user++ == 0)
- first_open = 1;
-
- /* default setup - TODO: look at flags */
- if (first_open) { /* First device open */
- zr36057_restart(zr);
- zoran_open_init_params(zr);
- zoran_init_hardware(zr);
-
- btor(ZR36057_ICR_IntPinEn, ZR36057_ICR);
- }
-
- /* set file_ops stuff */
- file->private_data = fh;
- fh->zr = zr;
- zoran_open_init_session(fh);
- v4l2_fh_add(&fh->fh);
- mutex_unlock(&zr->lock);
-
- return 0;
-
-fail_fh:
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
-fail_unlock:
- mutex_unlock(&zr->lock);
-
- dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n",
- ZR_DEVNAME(zr), res, zr->user);
-
- return res;
-}
-
-static int
-zoran_close(struct file *file)
-{
- struct zoran_fh *fh = file->private_data;
- struct zoran *zr = fh->zr;
-
- dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(+)=%d\n",
- ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user - 1);
-
- /* kernel locks (fs/device.c), so don't do that ourselves
- * (prevents deadlocks) */
- mutex_lock(&zr->lock);
-
- zoran_close_end_session(fh);
-
- if (zr->user-- == 1) { /* Last process */
- /* Clean up JPEG process */
- wake_up_interruptible(&zr->jpg_capq);
- zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
- zr->jpg_buffers.allocated = 0;
- zr->jpg_buffers.active = ZORAN_FREE;
-
- /* disable interrupts */
- btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR);
-
- if (zr36067_debug > 1)
- print_interrupts(zr);
-
- /* Overlay off */
- zr->v4l_overlay_active = 0;
- zr36057_overlay(zr, 0);
- zr->overlay_mask = NULL;
-
- /* capture off */
- wake_up_interruptible(&zr->v4l_capq);
- zr36057_set_memgrab(zr, 0);
- zr->v4l_buffers.allocated = 0;
- zr->v4l_buffers.active = ZORAN_FREE;
- zoran_set_pci_master(zr, 0);
-
- if (!pass_through) { /* Switch to color bar */
- decoder_call(zr, video, s_stream, 0);
- encoder_call(zr, video, s_routing, 2, 0, 0);
- }
- }
- mutex_unlock(&zr->lock);
-
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh->overlay_mask);
- kfree(fh);
-
- dprintk(4, KERN_INFO "%s: %s done\n", ZR_DEVNAME(zr), __func__);
-
- return 0;
-}
-
-static int setup_fbuffer(struct zoran_fh *fh,
- void *base,
- const struct zoran_format *fmt,
- int width,
- int height,
- int bytesperline)
-{
- struct zoran *zr = fh->zr;
-
- /* (Ronald) v4l/v4l2 guidelines */
- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- /* Don't allow frame buffer overlay if PCI or AGP is buggy, or on
- ALi Magik (that needs very low latency while the card needs a
- higher value always) */
-
- if (pci_pci_problems & (PCIPCI_FAIL | PCIAGP_FAIL | PCIPCI_ALIMAGIK))
- return -ENXIO;
-
- /* we need a bytesperline value, even if not given */
- if (!bytesperline)
- bytesperline = width * ((fmt->depth + 7) & ~7) / 8;
-
-#if 0
- if (zr->overlay_active) {
- /* dzjee... stupid users... don't even bother to turn off
- * overlay before changing the memory location...
- * normally, we would return errors here. However, one of
- * the tools that does this is... xawtv! and since xawtv
- * is used by +/- 99% of the users, we'd rather be user-
- * friendly and silently do as if nothing went wrong */
- dprintk(3,
- KERN_ERR
- "%s: %s - forced overlay turnoff because framebuffer changed\n",
- ZR_DEVNAME(zr), __func__);
- zr36057_overlay(zr, 0);
- }
-#endif
-
- if (!(fmt->flags & ZORAN_FORMAT_OVERLAY)) {
- dprintk(1,
- KERN_ERR
- "%s: %s - no valid overlay format given\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
- if (height <= 0 || width <= 0 || bytesperline <= 0) {
- dprintk(1,
- KERN_ERR
- "%s: %s - invalid height/width/bpl value (%d|%d|%d)\n",
- ZR_DEVNAME(zr), __func__, width, height, bytesperline);
- return -EINVAL;
- }
- if (bytesperline & 3) {
- dprintk(1,
- KERN_ERR
- "%s: %s - bytesperline (%d) must be 4-byte aligned\n",
- ZR_DEVNAME(zr), __func__, bytesperline);
- return -EINVAL;
- }
-
- zr->vbuf_base = (void *) ((unsigned long) base & ~3);
- zr->vbuf_height = height;
- zr->vbuf_width = width;
- zr->vbuf_depth = fmt->depth;
- zr->overlay_settings.format = fmt;
- zr->vbuf_bytesperline = bytesperline;
-
- /* The user should set new window parameters */
- zr->overlay_settings.is_set = 0;
-
- return 0;
-}
-
-
-static int setup_window(struct zoran_fh *fh,
- int x,
- int y,
- int width,
- int height,
- struct v4l2_clip __user *clips,
- unsigned int clipcount,
- void __user *bitmap)
-{
- struct zoran *zr = fh->zr;
- struct v4l2_clip *vcp = NULL;
- int on, end;
-
-
- if (!zr->vbuf_base) {
- dprintk(1,
- KERN_ERR
- "%s: %s - frame buffer has to be set first\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
-
- if (!fh->overlay_settings.format) {
- dprintk(1,
- KERN_ERR
- "%s: %s - no overlay format set\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
-
- if (clipcount > 2048) {
- dprintk(1,
- KERN_ERR
- "%s: %s - invalid clipcount\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
-
- /*
- * The video front end needs 4-byte alinged line sizes, we correct that
- * silently here if necessary
- */
- if (zr->vbuf_depth == 15 || zr->vbuf_depth == 16) {
- end = (x + width) & ~1; /* round down */
- x = (x + 1) & ~1; /* round up */
- width = end - x;
- }
-
- if (zr->vbuf_depth == 24) {
- end = (x + width) & ~3; /* round down */
- x = (x + 3) & ~3; /* round up */
- width = end - x;
- }
-
- if (width > BUZ_MAX_WIDTH)
- width = BUZ_MAX_WIDTH;
- if (height > BUZ_MAX_HEIGHT)
- height = BUZ_MAX_HEIGHT;
-
- /* Check for invalid parameters */
- if (width < BUZ_MIN_WIDTH || height < BUZ_MIN_HEIGHT ||
- width > BUZ_MAX_WIDTH || height > BUZ_MAX_HEIGHT) {
- dprintk(1,
- KERN_ERR
- "%s: %s - width = %d or height = %d invalid\n",
- ZR_DEVNAME(zr), __func__, width, height);
- return -EINVAL;
- }
-
- fh->overlay_settings.x = x;
- fh->overlay_settings.y = y;
- fh->overlay_settings.width = width;
- fh->overlay_settings.height = height;
- fh->overlay_settings.clipcount = clipcount;
-
- /*
- * If an overlay is running, we have to switch it off
- * and switch it on again in order to get the new settings in effect.
- *
- * We also want to avoid that the overlay mask is written
- * when an overlay is running.
- */
-
- on = zr->v4l_overlay_active && !zr->v4l_memgrab_active &&
- zr->overlay_active != ZORAN_FREE &&
- fh->overlay_active != ZORAN_FREE;
- if (on)
- zr36057_overlay(zr, 0);
-
- /*
- * Write the overlay mask if clips are wanted.
- * We prefer a bitmap.
- */
- if (bitmap) {
- /* fake value - it just means we want clips */
- fh->overlay_settings.clipcount = 1;
-
- if (copy_from_user(fh->overlay_mask, bitmap,
- (width * height + 7) / 8)) {
- return -EFAULT;
- }
- } else if (clipcount) {
- /* write our own bitmap from the clips */
- vcp = vmalloc(array_size(sizeof(struct v4l2_clip),
- clipcount + 4));
- if (vcp == NULL) {
- dprintk(1,
- KERN_ERR
- "%s: %s - Alloc of clip mask failed\n",
- ZR_DEVNAME(zr), __func__);
- return -ENOMEM;
- }
- if (copy_from_user
- (vcp, clips, sizeof(struct v4l2_clip) * clipcount)) {
- vfree(vcp);
- return -EFAULT;
- }
- write_overlay_mask(fh, vcp, clipcount);
- vfree(vcp);
- }
-
- fh->overlay_settings.is_set = 1;
- if (fh->overlay_active != ZORAN_FREE &&
- zr->overlay_active != ZORAN_FREE)
- zr->overlay_settings = fh->overlay_settings;
-
- if (on)
- zr36057_overlay(zr, 1);
-
- /* Make sure the changes come into effect */
- return wait_grab_pending(zr);
-}
-
-static int setup_overlay(struct zoran_fh *fh, int on)
-{
- struct zoran *zr = fh->zr;
-
- /* If there is nothing to do, return immediately */
- if ((on && fh->overlay_active != ZORAN_FREE) ||
- (!on && fh->overlay_active == ZORAN_FREE))
- return 0;
-
- /* check whether we're touching someone else's overlay */
- if (on && zr->overlay_active != ZORAN_FREE &&
- fh->overlay_active == ZORAN_FREE) {
- dprintk(1,
- KERN_ERR
- "%s: %s - overlay is already active for another session\n",
- ZR_DEVNAME(zr), __func__);
- return -EBUSY;
- }
- if (!on && zr->overlay_active != ZORAN_FREE &&
- fh->overlay_active == ZORAN_FREE) {
- dprintk(1,
- KERN_ERR
- "%s: %s - you cannot cancel someone else's session\n",
- ZR_DEVNAME(zr), __func__);
- return -EPERM;
- }
-
- if (on == 0) {
- zr->overlay_active = fh->overlay_active = ZORAN_FREE;
- zr->v4l_overlay_active = 0;
- /* When a grab is running, the video simply
- * won't be switched on any more */
- if (!zr->v4l_memgrab_active)
- zr36057_overlay(zr, 0);
- zr->overlay_mask = NULL;
- } else {
- if (!zr->vbuf_base || !fh->overlay_settings.is_set) {
- dprintk(1,
- KERN_ERR
- "%s: %s - buffer or window not set\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
- if (!fh->overlay_settings.format) {
- dprintk(1,
- KERN_ERR
- "%s: %s - no overlay format set\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
- zr->overlay_active = fh->overlay_active = ZORAN_LOCKED;
- zr->v4l_overlay_active = 1;
- zr->overlay_mask = fh->overlay_mask;
- zr->overlay_settings = fh->overlay_settings;
- if (!zr->v4l_memgrab_active)
- zr36057_overlay(zr, 1);
- /* When a grab is running, the video will be
- * switched on when grab is finished */
- }
-
- /* Make sure the changes come into effect */
- return wait_grab_pending(zr);
-}
-
-/* get the status of a buffer in the clients buffer queue */
-static int zoran_v4l2_buffer_status(struct zoran_fh *fh,
- struct v4l2_buffer *buf, int num)
-{
- struct zoran *zr = fh->zr;
- unsigned long flags;
-
- buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-
- switch (fh->map_mode) {
- case ZORAN_MAP_MODE_RAW:
- /* check range */
- if (num < 0 || num >= fh->buffers.num_buffers ||
- !fh->buffers.allocated) {
- dprintk(1,
- KERN_ERR
- "%s: %s - wrong number or buffers not allocated\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&zr->spinlock, flags);
- dprintk(3,
- KERN_DEBUG
- "%s: %s() - raw active=%c, buffer %d: state=%c, map=%c\n",
- ZR_DEVNAME(zr), __func__,
- "FAL"[fh->buffers.active], num,
- "UPMD"[zr->v4l_buffers.buffer[num].state],
- fh->buffers.buffer[num].map ? 'Y' : 'N');
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf->length = fh->buffers.buffer_size;
-
- /* get buffer */
- buf->bytesused = fh->buffers.buffer[num].bs.length;
- if (fh->buffers.buffer[num].state == BUZ_STATE_DONE ||
- fh->buffers.buffer[num].state == BUZ_STATE_USER) {
- buf->sequence = fh->buffers.buffer[num].bs.seq;
- buf->flags |= V4L2_BUF_FLAG_DONE;
- buf->timestamp = ns_to_timeval(fh->buffers.buffer[num].bs.ts);
- } else {
- buf->flags |= V4L2_BUF_FLAG_QUEUED;
- }
-
- if (fh->v4l_settings.height <= BUZ_MAX_HEIGHT / 2)
- buf->field = V4L2_FIELD_TOP;
- else
- buf->field = V4L2_FIELD_INTERLACED;
-
- break;
-
- case ZORAN_MAP_MODE_JPG_REC:
- case ZORAN_MAP_MODE_JPG_PLAY:
-
- /* check range */
- if (num < 0 || num >= fh->buffers.num_buffers ||
- !fh->buffers.allocated) {
- dprintk(1,
- KERN_ERR
- "%s: %s - wrong number or buffers not allocated\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
-
- buf->type = (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ?
- V4L2_BUF_TYPE_VIDEO_CAPTURE :
- V4L2_BUF_TYPE_VIDEO_OUTPUT;
- buf->length = fh->buffers.buffer_size;
-
- /* these variables are only written after frame has been captured */
- if (fh->buffers.buffer[num].state == BUZ_STATE_DONE ||
- fh->buffers.buffer[num].state == BUZ_STATE_USER) {
- buf->sequence = fh->buffers.buffer[num].bs.seq;
- buf->timestamp = ns_to_timeval(fh->buffers.buffer[num].bs.ts);
- buf->bytesused = fh->buffers.buffer[num].bs.length;
- buf->flags |= V4L2_BUF_FLAG_DONE;
- } else {
- buf->flags |= V4L2_BUF_FLAG_QUEUED;
- }
-
- /* which fields are these? */
- if (fh->jpg_settings.TmpDcm != 1)
- buf->field = fh->jpg_settings.odd_even ?
- V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
- else
- buf->field = fh->jpg_settings.odd_even ?
- V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT;
-
- break;
-
- default:
-
- dprintk(5,
- KERN_ERR
- "%s: %s - invalid buffer type|map_mode (%d|%d)\n",
- ZR_DEVNAME(zr), __func__, buf->type, fh->map_mode);
- return -EINVAL;
- }
-
- buf->memory = V4L2_MEMORY_MMAP;
- buf->index = num;
- buf->m.offset = buf->length * num;
-
- return 0;
-}
-
-static int
-zoran_set_norm (struct zoran *zr,
- v4l2_std_id norm)
-{
- int on;
-
- if (zr->v4l_buffers.active != ZORAN_FREE ||
- zr->jpg_buffers.active != ZORAN_FREE) {
- dprintk(1,
- KERN_WARNING
- "%s: %s called while in playback/capture mode\n",
- ZR_DEVNAME(zr), __func__);
- return -EBUSY;
- }
-
- if (!(norm & zr->card.norms)) {
- dprintk(1,
- KERN_ERR "%s: %s - unsupported norm %llx\n",
- ZR_DEVNAME(zr), __func__, norm);
- return -EINVAL;
- }
-
- if (norm & V4L2_STD_SECAM)
- zr->timing = zr->card.tvn[2];
- else if (norm & V4L2_STD_NTSC)
- zr->timing = zr->card.tvn[1];
- else
- zr->timing = zr->card.tvn[0];
-
- /* We switch overlay off and on since a change in the
- * norm needs different VFE settings */
- on = zr->overlay_active && !zr->v4l_memgrab_active;
- if (on)
- zr36057_overlay(zr, 0);
-
- decoder_call(zr, video, s_std, norm);
- encoder_call(zr, video, s_std_output, norm);
-
- if (on)
- zr36057_overlay(zr, 1);
-
- /* Make sure the changes come into effect */
- zr->norm = norm;
-
- return 0;
-}
-
-static int
-zoran_set_input (struct zoran *zr,
- int input)
-{
- if (input == zr->input) {
- return 0;
- }
-
- if (zr->v4l_buffers.active != ZORAN_FREE ||
- zr->jpg_buffers.active != ZORAN_FREE) {
- dprintk(1,
- KERN_WARNING
- "%s: %s called while in playback/capture mode\n",
- ZR_DEVNAME(zr), __func__);
- return -EBUSY;
- }
-
- if (input < 0 || input >= zr->card.inputs) {
- dprintk(1,
- KERN_ERR
- "%s: %s - unsupported input %d\n",
- ZR_DEVNAME(zr), __func__, input);
- return -EINVAL;
- }
-
- zr->input = input;
-
- decoder_call(zr, video, s_routing,
- zr->card.input[input].muxsel, 0, 0);
-
- return 0;
-}
-
-/*
- * ioctl routine
- */
-
-static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability *cap)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- strscpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card));
- strscpy(cap->driver, "zoran", sizeof(cap->driver));
- snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
- pci_name(zr->pci_dev));
- cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
- return 0;
-}
-
-static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag)
-{
- unsigned int num, i;
-
- for (num = i = 0; i < NUM_FORMATS; i++) {
- if (zoran_formats[i].flags & flag && num++ == fmt->index) {
- strncpy(fmt->description, zoran_formats[i].name,
- sizeof(fmt->description) - 1);
- /* fmt struct pre-zeroed, so adding '\0' not needed */
- fmt->pixelformat = zoran_formats[i].fourcc;
- if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED)
- fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
- return 0;
- }
- }
- return -EINVAL;
-}
-
-static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
- struct v4l2_fmtdesc *f)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE);
-}
-
-static int zoran_enum_fmt_vid_out(struct file *file, void *__fh,
- struct v4l2_fmtdesc *f)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- return zoran_enum_fmt(zr, f, ZORAN_FORMAT_PLAYBACK);
-}
-
-static int zoran_enum_fmt_vid_overlay(struct file *file, void *__fh,
- struct v4l2_fmtdesc *f)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- return zoran_enum_fmt(zr, f, ZORAN_FORMAT_OVERLAY);
-}
-
-static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
- struct v4l2_format *fmt)
-{
- struct zoran_fh *fh = __fh;
-
- fmt->fmt.pix.width = fh->jpg_settings.img_width / fh->jpg_settings.HorDcm;
- fmt->fmt.pix.height = fh->jpg_settings.img_height * 2 /
- (fh->jpg_settings.VerDcm * fh->jpg_settings.TmpDcm);
- fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&fh->jpg_settings);
- fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG;
- if (fh->jpg_settings.TmpDcm == 1)
- fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
- V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT);
- else
- fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
- V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
- fmt->fmt.pix.bytesperline = 0;
- fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-
- return 0;
-}
-
-static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
- struct v4l2_format *fmt)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- if (fh->map_mode != ZORAN_MAP_MODE_RAW)
- return zoran_g_fmt_vid_out(file, fh, fmt);
-
- fmt->fmt.pix.width = fh->v4l_settings.width;
- fmt->fmt.pix.height = fh->v4l_settings.height;
- fmt->fmt.pix.sizeimage = fh->v4l_settings.bytesperline *
- fh->v4l_settings.height;
- fmt->fmt.pix.pixelformat = fh->v4l_settings.format->fourcc;
- fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace;
- fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline;
- if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2))
- fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
- else
- fmt->fmt.pix.field = V4L2_FIELD_TOP;
- return 0;
-}
-
-static int zoran_g_fmt_vid_overlay(struct file *file, void *__fh,
- struct v4l2_format *fmt)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- fmt->fmt.win.w.left = fh->overlay_settings.x;
- fmt->fmt.win.w.top = fh->overlay_settings.y;
- fmt->fmt.win.w.width = fh->overlay_settings.width;
- fmt->fmt.win.w.height = fh->overlay_settings.height;
- if (fh->overlay_settings.width * 2 > BUZ_MAX_HEIGHT)
- fmt->fmt.win.field = V4L2_FIELD_INTERLACED;
- else
- fmt->fmt.win.field = V4L2_FIELD_TOP;
-
- return 0;
-}
-
-static int zoran_try_fmt_vid_overlay(struct file *file, void *__fh,
- struct v4l2_format *fmt)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- if (fmt->fmt.win.w.width > BUZ_MAX_WIDTH)
- fmt->fmt.win.w.width = BUZ_MAX_WIDTH;
- if (fmt->fmt.win.w.width < BUZ_MIN_WIDTH)
- fmt->fmt.win.w.width = BUZ_MIN_WIDTH;
- if (fmt->fmt.win.w.height > BUZ_MAX_HEIGHT)
- fmt->fmt.win.w.height = BUZ_MAX_HEIGHT;
- if (fmt->fmt.win.w.height < BUZ_MIN_HEIGHT)
- fmt->fmt.win.w.height = BUZ_MIN_HEIGHT;
-
- return 0;
-}
-
-static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
- struct v4l2_format *fmt)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- struct zoran_jpg_settings settings;
- int res = 0;
-
- if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
- return -EINVAL;
-
- settings = fh->jpg_settings;
-
- /* we actually need to set 'real' parameters now */
- if ((fmt->fmt.pix.height * 2) > BUZ_MAX_HEIGHT)
- settings.TmpDcm = 1;
- else
- settings.TmpDcm = 2;
- settings.decimation = 0;
- if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2)
- settings.VerDcm = 2;
- else
- settings.VerDcm = 1;
- if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4)
- settings.HorDcm = 4;
- else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2)
- settings.HorDcm = 2;
- else
- settings.HorDcm = 1;
- if (settings.TmpDcm == 1)
- settings.field_per_buff = 2;
- else
- settings.field_per_buff = 1;
-
- if (settings.HorDcm > 1) {
- settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
- settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
- } else {
- settings.img_x = 0;
- settings.img_width = BUZ_MAX_WIDTH;
- }
-
- /* check */
- res = zoran_check_jpg_settings(zr, &settings, 1);
- if (res)
- return res;
-
- /* tell the user what we actually did */
- fmt->fmt.pix.width = settings.img_width / settings.HorDcm;
- fmt->fmt.pix.height = settings.img_height * 2 /
- (settings.TmpDcm * settings.VerDcm);
- if (settings.TmpDcm == 1)
- fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
- V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT);
- else
- fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
- V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
-
- fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&settings);
- fmt->fmt.pix.bytesperline = 0;
- fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- return res;
-}
-
-static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
- struct v4l2_format *fmt)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int bpp;
- int i;
-
- if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
- return zoran_try_fmt_vid_out(file, fh, fmt);
-
- for (i = 0; i < NUM_FORMATS; i++)
- if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat)
- break;
-
- if (i == NUM_FORMATS)
- return -EINVAL;
-
- bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
- v4l_bound_align_image(
- &fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
- &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
- return 0;
-}
-
-static int zoran_s_fmt_vid_overlay(struct file *file, void *__fh,
- struct v4l2_format *fmt)
-{
- struct zoran_fh *fh = __fh;
- int res;
-
- dprintk(3, "x=%d, y=%d, w=%d, h=%d, cnt=%d, map=0x%p\n",
- fmt->fmt.win.w.left, fmt->fmt.win.w.top,
- fmt->fmt.win.w.width,
- fmt->fmt.win.w.height,
- fmt->fmt.win.clipcount,
- fmt->fmt.win.bitmap);
- res = setup_window(fh, fmt->fmt.win.w.left, fmt->fmt.win.w.top,
- fmt->fmt.win.w.width, fmt->fmt.win.w.height,
- (struct v4l2_clip __user *)fmt->fmt.win.clips,
- fmt->fmt.win.clipcount, fmt->fmt.win.bitmap);
- return res;
-}
-
-static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
- struct v4l2_format *fmt)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- __le32 printformat = __cpu_to_le32(fmt->fmt.pix.pixelformat);
- struct zoran_jpg_settings settings;
- int res = 0;
-
- dprintk(3, "size=%dx%d, fmt=0x%x (%4.4s)\n",
- fmt->fmt.pix.width, fmt->fmt.pix.height,
- fmt->fmt.pix.pixelformat,
- (char *) &printformat);
- if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
- return -EINVAL;
-
- if (fh->buffers.allocated) {
- dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n",
- ZR_DEVNAME(zr));
- res = -EBUSY;
- return res;
- }
-
- settings = fh->jpg_settings;
-
- /* we actually need to set 'real' parameters now */
- if (fmt->fmt.pix.height * 2 > BUZ_MAX_HEIGHT)
- settings.TmpDcm = 1;
- else
- settings.TmpDcm = 2;
- settings.decimation = 0;
- if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2)
- settings.VerDcm = 2;
- else
- settings.VerDcm = 1;
- if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4)
- settings.HorDcm = 4;
- else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2)
- settings.HorDcm = 2;
- else
- settings.HorDcm = 1;
- if (settings.TmpDcm == 1)
- settings.field_per_buff = 2;
- else
- settings.field_per_buff = 1;
-
- if (settings.HorDcm > 1) {
- settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
- settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
- } else {
- settings.img_x = 0;
- settings.img_width = BUZ_MAX_WIDTH;
- }
-
- /* check */
- res = zoran_check_jpg_settings(zr, &settings, 0);
- if (res)
- return res;
-
- /* it's ok, so set them */
- fh->jpg_settings = settings;
-
- map_mode_jpg(fh, fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT);
- fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings);
-
- /* tell the user what we actually did */
- fmt->fmt.pix.width = settings.img_width / settings.HorDcm;
- fmt->fmt.pix.height = settings.img_height * 2 /
- (settings.TmpDcm * settings.VerDcm);
- if (settings.TmpDcm == 1)
- fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
- V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT);
- else
- fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
- V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
- fmt->fmt.pix.bytesperline = 0;
- fmt->fmt.pix.sizeimage = fh->buffers.buffer_size;
- fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- return res;
-}
-
-static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
- struct v4l2_format *fmt)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int i;
- int res = 0;
-
- if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
- return zoran_s_fmt_vid_out(file, fh, fmt);
-
- for (i = 0; i < NUM_FORMATS; i++)
- if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc)
- break;
- if (i == NUM_FORMATS) {
- dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - unknown/unsupported format 0x%x\n",
- ZR_DEVNAME(zr), fmt->fmt.pix.pixelformat);
- return -EINVAL;
- }
-
- if ((fh->map_mode != ZORAN_MAP_MODE_RAW && fh->buffers.allocated) ||
- fh->buffers.active != ZORAN_FREE) {
- dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n",
- ZR_DEVNAME(zr));
- res = -EBUSY;
- return res;
- }
- if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT)
- fmt->fmt.pix.height = BUZ_MAX_HEIGHT;
- if (fmt->fmt.pix.width > BUZ_MAX_WIDTH)
- fmt->fmt.pix.width = BUZ_MAX_WIDTH;
-
- map_mode_raw(fh);
-
- res = zoran_v4l_set_format(fh, fmt->fmt.pix.width, fmt->fmt.pix.height,
- &zoran_formats[i]);
- if (res)
- return res;
-
- /* tell the user the results/missing stuff */
- fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline;
- fmt->fmt.pix.sizeimage = fh->v4l_settings.height * fh->v4l_settings.bytesperline;
- fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace;
- if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2))
- fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
- else
- fmt->fmt.pix.field = V4L2_FIELD_TOP;
- return res;
-}
-
-static int zoran_g_fbuf(struct file *file, void *__fh,
- struct v4l2_framebuffer *fb)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- memset(fb, 0, sizeof(*fb));
- fb->base = zr->vbuf_base;
- fb->fmt.width = zr->vbuf_width;
- fb->fmt.height = zr->vbuf_height;
- if (zr->overlay_settings.format)
- fb->fmt.pixelformat = fh->overlay_settings.format->fourcc;
- fb->fmt.bytesperline = zr->vbuf_bytesperline;
- fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
- fb->fmt.field = V4L2_FIELD_INTERLACED;
- fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
-
- return 0;
-}
-
-static int zoran_s_fbuf(struct file *file, void *__fh,
- const struct v4l2_framebuffer *fb)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int i, res = 0;
- __le32 printformat = __cpu_to_le32(fb->fmt.pixelformat);
-
- for (i = 0; i < NUM_FORMATS; i++)
- if (zoran_formats[i].fourcc == fb->fmt.pixelformat)
- break;
- if (i == NUM_FORMATS) {
- dprintk(1, KERN_ERR "%s: VIDIOC_S_FBUF - format=0x%x (%4.4s) not allowed\n",
- ZR_DEVNAME(zr), fb->fmt.pixelformat,
- (char *)&printformat);
- return -EINVAL;
- }
-
- res = setup_fbuffer(fh, fb->base, &zoran_formats[i], fb->fmt.width,
- fb->fmt.height, fb->fmt.bytesperline);
-
- return res;
-}
-
-static int zoran_overlay(struct file *file, void *__fh, unsigned int on)
-{
- struct zoran_fh *fh = __fh;
- int res;
-
- res = setup_overlay(fh, on);
-
- return res;
-}
-
-static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type);
-
-static int zoran_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffers *req)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int res = 0;
-
- if (req->memory != V4L2_MEMORY_MMAP) {
- dprintk(2,
- KERN_ERR
- "%s: only MEMORY_MMAP capture is supported, not %d\n",
- ZR_DEVNAME(zr), req->memory);
- return -EINVAL;
- }
-
- if (req->count == 0)
- return zoran_streamoff(file, fh, req->type);
-
- if (fh->buffers.allocated) {
- dprintk(2,
- KERN_ERR
- "%s: VIDIOC_REQBUFS - buffers already allocated\n",
- ZR_DEVNAME(zr));
- res = -EBUSY;
- return res;
- }
-
- if (fh->map_mode == ZORAN_MAP_MODE_RAW &&
- req->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- /* control user input */
- if (req->count < 2)
- req->count = 2;
- if (req->count > v4l_nbufs)
- req->count = v4l_nbufs;
-
- /* The next mmap will map the V4L buffers */
- map_mode_raw(fh);
- fh->buffers.num_buffers = req->count;
-
- if (v4l_fbuffer_alloc(fh)) {
- res = -ENOMEM;
- return res;
- }
- } else if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC ||
- fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) {
- /* we need to calculate size ourselves now */
- if (req->count < 4)
- req->count = 4;
- if (req->count > jpg_nbufs)
- req->count = jpg_nbufs;
-
- /* The next mmap will map the MJPEG buffers */
- map_mode_jpg(fh, req->type == V4L2_BUF_TYPE_VIDEO_OUTPUT);
- fh->buffers.num_buffers = req->count;
- fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings);
-
- if (jpg_fbuffer_alloc(fh)) {
- res = -ENOMEM;
- return res;
- }
- } else {
- dprintk(1,
- KERN_ERR
- "%s: VIDIOC_REQBUFS - unknown type %d\n",
- ZR_DEVNAME(zr), req->type);
- res = -EINVAL;
- return res;
- }
- return res;
-}
-
-static int zoran_querybuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
-{
- struct zoran_fh *fh = __fh;
- int res;
-
- res = zoran_v4l2_buffer_status(fh, buf, buf->index);
-
- return res;
-}
-
-static int zoran_qbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int res = 0, codec_mode, buf_type;
-
- switch (fh->map_mode) {
- case ZORAN_MAP_MODE_RAW:
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dprintk(1, KERN_ERR
- "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
- ZR_DEVNAME(zr), buf->type, fh->map_mode);
- res = -EINVAL;
- return res;
- }
-
- res = zoran_v4l_queue_frame(fh, buf->index);
- if (res)
- return res;
- if (!zr->v4l_memgrab_active && fh->buffers.active == ZORAN_LOCKED)
- zr36057_set_memgrab(zr, 1);
- break;
-
- case ZORAN_MAP_MODE_JPG_REC:
- case ZORAN_MAP_MODE_JPG_PLAY:
- if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) {
- buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- codec_mode = BUZ_MODE_MOTION_DECOMPRESS;
- } else {
- buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- codec_mode = BUZ_MODE_MOTION_COMPRESS;
- }
-
- if (buf->type != buf_type) {
- dprintk(1, KERN_ERR
- "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
- ZR_DEVNAME(zr), buf->type, fh->map_mode);
- res = -EINVAL;
- return res;
- }
-
- res = zoran_jpg_queue_frame(fh, buf->index, codec_mode);
- if (res != 0)
- return res;
- if (zr->codec_mode == BUZ_MODE_IDLE &&
- fh->buffers.active == ZORAN_LOCKED)
- zr36057_enable_jpg(zr, codec_mode);
-
- break;
-
- default:
- dprintk(1, KERN_ERR
- "%s: VIDIOC_QBUF - unsupported type %d\n",
- ZR_DEVNAME(zr), buf->type);
- res = -EINVAL;
- break;
- }
- return res;
-}
-
-static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int res = 0, buf_type, num = -1; /* compiler borks here (?) */
-
- switch (fh->map_mode) {
- case ZORAN_MAP_MODE_RAW:
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dprintk(1, KERN_ERR
- "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
- ZR_DEVNAME(zr), buf->type, fh->map_mode);
- res = -EINVAL;
- return res;
- }
-
- num = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME];
- if (file->f_flags & O_NONBLOCK &&
- zr->v4l_buffers.buffer[num].state != BUZ_STATE_DONE) {
- res = -EAGAIN;
- return res;
- }
- res = v4l_sync(fh, num);
- if (res)
- return res;
- zr->v4l_sync_tail++;
- res = zoran_v4l2_buffer_status(fh, buf, num);
- break;
-
- case ZORAN_MAP_MODE_JPG_REC:
- case ZORAN_MAP_MODE_JPG_PLAY:
- {
- struct zoran_sync bs;
-
- if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY)
- buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- else
- buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- if (buf->type != buf_type) {
- dprintk(1, KERN_ERR
- "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
- ZR_DEVNAME(zr), buf->type, fh->map_mode);
- res = -EINVAL;
- return res;
- }
-
- num = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME];
-
- if (file->f_flags & O_NONBLOCK &&
- zr->jpg_buffers.buffer[num].state != BUZ_STATE_DONE) {
- res = -EAGAIN;
- return res;
- }
- bs.frame = 0; /* suppress compiler warning */
- res = jpg_sync(fh, &bs);
- if (res)
- return res;
- res = zoran_v4l2_buffer_status(fh, buf, bs.frame);
- break;
- }
-
- default:
- dprintk(1, KERN_ERR
- "%s: VIDIOC_DQBUF - unsupported type %d\n",
- ZR_DEVNAME(zr), buf->type);
- res = -EINVAL;
- break;
- }
- return res;
-}
-
-static int zoran_streamon(struct file *file, void *__fh, enum v4l2_buf_type type)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int res = 0;
-
- switch (fh->map_mode) {
- case ZORAN_MAP_MODE_RAW: /* raw capture */
- if (zr->v4l_buffers.active != ZORAN_ACTIVE ||
- fh->buffers.active != ZORAN_ACTIVE) {
- res = -EBUSY;
- return res;
- }
-
- zr->v4l_buffers.active = fh->buffers.active = ZORAN_LOCKED;
- zr->v4l_settings = fh->v4l_settings;
-
- zr->v4l_sync_tail = zr->v4l_pend_tail;
- if (!zr->v4l_memgrab_active &&
- zr->v4l_pend_head != zr->v4l_pend_tail) {
- zr36057_set_memgrab(zr, 1);
- }
- break;
-
- case ZORAN_MAP_MODE_JPG_REC:
- case ZORAN_MAP_MODE_JPG_PLAY:
- /* what is the codec mode right now? */
- if (zr->jpg_buffers.active != ZORAN_ACTIVE ||
- fh->buffers.active != ZORAN_ACTIVE) {
- res = -EBUSY;
- return res;
- }
-
- zr->jpg_buffers.active = fh->buffers.active = ZORAN_LOCKED;
-
- if (zr->jpg_que_head != zr->jpg_que_tail) {
- /* Start the jpeg codec when the first frame is queued */
- jpeg_start(zr);
- }
- break;
-
- default:
- dprintk(1,
- KERN_ERR
- "%s: VIDIOC_STREAMON - invalid map mode %d\n",
- ZR_DEVNAME(zr), fh->map_mode);
- res = -EINVAL;
- break;
- }
- return res;
-}
-
-static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int i, res = 0;
- unsigned long flags;
-
- switch (fh->map_mode) {
- case ZORAN_MAP_MODE_RAW: /* raw capture */
- if (fh->buffers.active == ZORAN_FREE &&
- zr->v4l_buffers.active != ZORAN_FREE) {
- res = -EPERM; /* stay off other's settings! */
- return res;
- }
- if (zr->v4l_buffers.active == ZORAN_FREE)
- return res;
-
- spin_lock_irqsave(&zr->spinlock, flags);
- /* unload capture */
- if (zr->v4l_memgrab_active) {
-
- zr36057_set_memgrab(zr, 0);
- }
-
- for (i = 0; i < fh->buffers.num_buffers; i++)
- zr->v4l_buffers.buffer[i].state = BUZ_STATE_USER;
- fh->buffers = zr->v4l_buffers;
-
- zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE;
-
- zr->v4l_grab_seq = 0;
- zr->v4l_pend_head = zr->v4l_pend_tail = 0;
- zr->v4l_sync_tail = 0;
-
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- break;
-
- case ZORAN_MAP_MODE_JPG_REC:
- case ZORAN_MAP_MODE_JPG_PLAY:
- if (fh->buffers.active == ZORAN_FREE &&
- zr->jpg_buffers.active != ZORAN_FREE) {
- res = -EPERM; /* stay off other's settings! */
- return res;
- }
- if (zr->jpg_buffers.active == ZORAN_FREE)
- return res;
-
- res = jpg_qbuf(fh, -1,
- (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ?
- BUZ_MODE_MOTION_COMPRESS :
- BUZ_MODE_MOTION_DECOMPRESS);
- if (res)
- return res;
- break;
- default:
- dprintk(1, KERN_ERR
- "%s: VIDIOC_STREAMOFF - invalid map mode %d\n",
- ZR_DEVNAME(zr), fh->map_mode);
- res = -EINVAL;
- break;
- }
- return res;
-}
-static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- *std = zr->norm;
- return 0;
-}
-
-static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int res = 0;
-
- res = zoran_set_norm(zr, std);
- if (res)
- return res;
-
- res = wait_grab_pending(zr);
- return res;
-}
-
-static int zoran_enum_input(struct file *file, void *__fh,
- struct v4l2_input *inp)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- if (inp->index >= zr->card.inputs)
- return -EINVAL;
-
- strncpy(inp->name, zr->card.input[inp->index].name,
- sizeof(inp->name) - 1);
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- inp->std = V4L2_STD_ALL;
-
- /* Get status of video decoder */
- decoder_call(zr, video, g_input_status, &inp->status);
- return 0;
-}
-
-static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- *input = zr->input;
-
- return 0;
-}
-
-static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int res;
-
- res = zoran_set_input(zr, input);
- if (res)
- return res;
-
- /* Make sure the changes come into effect */
- res = wait_grab_pending(zr);
- return res;
-}
-
-static int zoran_enum_output(struct file *file, void *__fh,
- struct v4l2_output *outp)
-{
- if (outp->index != 0)
- return -EINVAL;
-
- outp->index = 0;
- outp->type = V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY;
- strncpy(outp->name, "Autodetect", sizeof(outp->name)-1);
-
- return 0;
-}
-
-static int zoran_g_output(struct file *file, void *__fh, unsigned int *output)
-{
- *output = 0;
-
- return 0;
-}
-
-static int zoran_s_output(struct file *file, void *__fh, unsigned int output)
-{
- if (output != 0)
- return -EINVAL;
-
- return 0;
-}
-
-/* cropping (sub-frame capture) */
-static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
- dprintk(1, KERN_ERR
- "%s: VIDIOC_G_SELECTION - subcapture only supported for compressed capture\n",
- ZR_DEVNAME(zr));
- return -EINVAL;
- }
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP:
- sel->r.top = fh->jpg_settings.img_y;
- sel->r.left = fh->jpg_settings.img_x;
- sel->r.width = fh->jpg_settings.img_width;
- sel->r.height = fh->jpg_settings.img_height;
- break;
- case V4L2_SEL_TGT_CROP_DEFAULT:
- sel->r.top = sel->r.left = 0;
- sel->r.width = BUZ_MIN_WIDTH;
- sel->r.height = BUZ_MIN_HEIGHT;
- break;
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.top = sel->r.left = 0;
- sel->r.width = BUZ_MAX_WIDTH;
- sel->r.height = BUZ_MAX_HEIGHT;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- struct zoran_jpg_settings settings;
- int res;
-
- if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
- dprintk(1, KERN_ERR
- "%s: VIDIOC_S_SELECTION - subcapture only supported for compressed capture\n",
- ZR_DEVNAME(zr));
- return -EINVAL;
- }
-
- settings = fh->jpg_settings;
-
- if (fh->buffers.allocated) {
- dprintk(1, KERN_ERR
- "%s: VIDIOC_S_SELECTION - cannot change settings while active\n",
- ZR_DEVNAME(zr));
- return -EBUSY;
- }
-
- /* move into a form that we understand */
- settings.img_x = sel->r.left;
- settings.img_y = sel->r.top;
- settings.img_width = sel->r.width;
- settings.img_height = sel->r.height;
-
- /* check validity */
- res = zoran_check_jpg_settings(zr, &settings, 0);
- if (res)
- return res;
-
- /* accept */
- fh->jpg_settings = settings;
- return res;
-}
-
-static int zoran_g_jpegcomp(struct file *file, void *__fh,
- struct v4l2_jpegcompression *params)
-{
- struct zoran_fh *fh = __fh;
- memset(params, 0, sizeof(*params));
-
- params->quality = fh->jpg_settings.jpg_comp.quality;
- params->APPn = fh->jpg_settings.jpg_comp.APPn;
- memcpy(params->APP_data,
- fh->jpg_settings.jpg_comp.APP_data,
- fh->jpg_settings.jpg_comp.APP_len);
- params->APP_len = fh->jpg_settings.jpg_comp.APP_len;
- memcpy(params->COM_data,
- fh->jpg_settings.jpg_comp.COM_data,
- fh->jpg_settings.jpg_comp.COM_len);
- params->COM_len = fh->jpg_settings.jpg_comp.COM_len;
- params->jpeg_markers =
- fh->jpg_settings.jpg_comp.jpeg_markers;
-
- return 0;
-}
-
-static int zoran_s_jpegcomp(struct file *file, void *__fh,
- const struct v4l2_jpegcompression *params)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int res = 0;
- struct zoran_jpg_settings settings;
-
- settings = fh->jpg_settings;
-
- settings.jpg_comp = *params;
-
- if (fh->buffers.active != ZORAN_FREE) {
- dprintk(1, KERN_WARNING
- "%s: VIDIOC_S_JPEGCOMP called while in playback/capture mode\n",
- ZR_DEVNAME(zr));
- res = -EBUSY;
- return res;
- }
-
- res = zoran_check_jpg_settings(zr, &settings, 0);
- if (res)
- return res;
- if (!fh->buffers.allocated)
- fh->buffers.buffer_size =
- zoran_v4l2_calc_bufsize(&fh->jpg_settings);
- fh->jpg_settings.jpg_comp = settings.jpg_comp;
- return res;
-}
-
-static __poll_t
-zoran_poll (struct file *file,
- poll_table *wait)
-{
- struct zoran_fh *fh = file->private_data;
- struct zoran *zr = fh->zr;
- __poll_t res = v4l2_ctrl_poll(file, wait);
- int frame;
- unsigned long flags;
-
- /* we should check whether buffers are ready to be synced on
- * (w/o waits - O_NONBLOCK) here
- * if ready for read (sync), return EPOLLIN|EPOLLRDNORM,
- * if ready for write (sync), return EPOLLOUT|EPOLLWRNORM,
- * if error, return EPOLLERR,
- * if no buffers queued or so, return EPOLLNVAL
- */
-
- switch (fh->map_mode) {
- case ZORAN_MAP_MODE_RAW:
- poll_wait(file, &zr->v4l_capq, wait);
- frame = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME];
-
- spin_lock_irqsave(&zr->spinlock, flags);
- dprintk(3,
- KERN_DEBUG
- "%s: %s() raw - active=%c, sync_tail=%lu/%c, pend_tail=%lu, pend_head=%lu\n",
- ZR_DEVNAME(zr), __func__,
- "FAL"[fh->buffers.active], zr->v4l_sync_tail,
- "UPMD"[zr->v4l_buffers.buffer[frame].state],
- zr->v4l_pend_tail, zr->v4l_pend_head);
- /* Process is the one capturing? */
- if (fh->buffers.active != ZORAN_FREE &&
- /* Buffer ready to DQBUF? */
- zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE)
- res |= EPOLLIN | EPOLLRDNORM;
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- break;
-
- case ZORAN_MAP_MODE_JPG_REC:
- case ZORAN_MAP_MODE_JPG_PLAY:
- poll_wait(file, &zr->jpg_capq, wait);
- frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME];
-
- spin_lock_irqsave(&zr->spinlock, flags);
- dprintk(3,
- KERN_DEBUG
- "%s: %s() jpg - active=%c, que_tail=%lu/%c, que_head=%lu, dma=%lu/%lu\n",
- ZR_DEVNAME(zr), __func__,
- "FAL"[fh->buffers.active], zr->jpg_que_tail,
- "UPMD"[zr->jpg_buffers.buffer[frame].state],
- zr->jpg_que_head, zr->jpg_dma_tail, zr->jpg_dma_head);
- if (fh->buffers.active != ZORAN_FREE &&
- zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) {
- if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC)
- res |= EPOLLIN | EPOLLRDNORM;
- else
- res |= EPOLLOUT | EPOLLWRNORM;
- }
- spin_unlock_irqrestore(&zr->spinlock, flags);
-
- break;
-
- default:
- dprintk(1,
- KERN_ERR
- "%s: %s - internal error, unknown map_mode=%d\n",
- ZR_DEVNAME(zr), __func__, fh->map_mode);
- res |= EPOLLERR;
- }
-
- return res;
-}
-
-
-/*
- * This maps the buffers to user space.
- *
- * Depending on the state of fh->map_mode
- * the V4L or the MJPEG buffers are mapped
- * per buffer or all together
- *
- * Note that we need to connect to some
- * unmap signal event to unmap the de-allocate
- * the buffer accordingly (zoran_vm_close())
- */
-
-static void
-zoran_vm_open (struct vm_area_struct *vma)
-{
- struct zoran_mapping *map = vma->vm_private_data;
- atomic_inc(&map->count);
-}
-
-static void
-zoran_vm_close (struct vm_area_struct *vma)
-{
- struct zoran_mapping *map = vma->vm_private_data;
- struct zoran_fh *fh = map->fh;
- struct zoran *zr = fh->zr;
- int i;
-
- dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr),
- __func__, mode_name(fh->map_mode));
-
- for (i = 0; i < fh->buffers.num_buffers; i++) {
- if (fh->buffers.buffer[i].map == map)
- fh->buffers.buffer[i].map = NULL;
- }
- kfree(map);
-
- /* Any buffers still mapped? */
- for (i = 0; i < fh->buffers.num_buffers; i++) {
- if (fh->buffers.buffer[i].map) {
- return;
- }
- }
-
- dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr),
- __func__, mode_name(fh->map_mode));
-
- if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
- if (fh->buffers.active != ZORAN_FREE) {
- unsigned long flags;
-
- spin_lock_irqsave(&zr->spinlock, flags);
- zr36057_set_memgrab(zr, 0);
- zr->v4l_buffers.allocated = 0;
- zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE;
- spin_unlock_irqrestore(&zr->spinlock, flags);
- }
- v4l_fbuffer_free(fh);
- } else {
- if (fh->buffers.active != ZORAN_FREE) {
- jpg_qbuf(fh, -1, zr->codec_mode);
- zr->jpg_buffers.allocated = 0;
- zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE;
- }
- jpg_fbuffer_free(fh);
- }
-}
-
-static const struct vm_operations_struct zoran_vm_ops = {
- .open = zoran_vm_open,
- .close = zoran_vm_close,
-};
-
-static int
-zoran_mmap (struct file *file,
- struct vm_area_struct *vma)
-{
- struct zoran_fh *fh = file->private_data;
- struct zoran *zr = fh->zr;
- unsigned long size = (vma->vm_end - vma->vm_start);
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- int i, j;
- unsigned long page, start = vma->vm_start, todo, pos, fraglen;
- int first, last;
- struct zoran_mapping *map;
- int res = 0;
-
- dprintk(3,
- KERN_INFO "%s: %s(%s) of 0x%08lx-0x%08lx (size=%lu)\n",
- ZR_DEVNAME(zr), __func__,
- mode_name(fh->map_mode), vma->vm_start, vma->vm_end, size);
-
- if (!(vma->vm_flags & VM_SHARED) || !(vma->vm_flags & VM_READ) ||
- !(vma->vm_flags & VM_WRITE)) {
- dprintk(1,
- KERN_ERR
- "%s: %s - no MAP_SHARED/PROT_{READ,WRITE} given\n",
- ZR_DEVNAME(zr), __func__);
- return -EINVAL;
- }
-
- if (!fh->buffers.allocated) {
- dprintk(1,
- KERN_ERR
- "%s: %s(%s) - buffers not yet allocated\n",
- ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode));
- res = -ENOMEM;
- return res;
- }
-
- first = offset / fh->buffers.buffer_size;
- last = first - 1 + size / fh->buffers.buffer_size;
- if (offset % fh->buffers.buffer_size != 0 ||
- size % fh->buffers.buffer_size != 0 || first < 0 ||
- last < 0 || first >= fh->buffers.num_buffers ||
- last >= fh->buffers.buffer_size) {
- dprintk(1,
- KERN_ERR
- "%s: %s(%s) - offset=%lu or size=%lu invalid for bufsize=%d and numbufs=%d\n",
- ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), offset, size,
- fh->buffers.buffer_size,
- fh->buffers.num_buffers);
- res = -EINVAL;
- return res;
- }
-
- /* Check if any buffers are already mapped */
- for (i = first; i <= last; i++) {
- if (fh->buffers.buffer[i].map) {
- dprintk(1,
- KERN_ERR
- "%s: %s(%s) - buffer %d already mapped\n",
- ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), i);
- res = -EBUSY;
- return res;
- }
- }
-
- /* map these buffers */
- map = kmalloc(sizeof(struct zoran_mapping), GFP_KERNEL);
- if (!map) {
- res = -ENOMEM;
- return res;
- }
- map->fh = fh;
- atomic_set(&map->count, 1);
-
- vma->vm_ops = &zoran_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND;
- vma->vm_private_data = map;
-
- if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
- for (i = first; i <= last; i++) {
- todo = size;
- if (todo > fh->buffers.buffer_size)
- todo = fh->buffers.buffer_size;
- page = fh->buffers.buffer[i].v4l.fbuffer_phys;
- if (remap_pfn_range(vma, start, page >> PAGE_SHIFT,
- todo, PAGE_SHARED)) {
- dprintk(1,
- KERN_ERR
- "%s: %s(V4L) - remap_pfn_range failed\n",
- ZR_DEVNAME(zr), __func__);
- res = -EAGAIN;
- return res;
- }
- size -= todo;
- start += todo;
- fh->buffers.buffer[i].map = map;
- if (size == 0)
- break;
- }
- } else {
- for (i = first; i <= last; i++) {
- for (j = 0;
- j < fh->buffers.buffer_size / PAGE_SIZE;
- j++) {
- fraglen =
- (le32_to_cpu(fh->buffers.buffer[i].jpg.
- frag_tab[2 * j + 1]) & ~1) << 1;
- todo = size;
- if (todo > fraglen)
- todo = fraglen;
- pos =
- le32_to_cpu(fh->buffers.
- buffer[i].jpg.frag_tab[2 * j]);
- /* should just be pos on i386 */
- page = virt_to_phys(bus_to_virt(pos))
- >> PAGE_SHIFT;
- if (remap_pfn_range(vma, start, page,
- todo, PAGE_SHARED)) {
- dprintk(1,
- KERN_ERR
- "%s: %s(V4L) - remap_pfn_range failed\n",
- ZR_DEVNAME(zr), __func__);
- res = -EAGAIN;
- return res;
- }
- size -= todo;
- start += todo;
- if (size == 0)
- break;
- if (le32_to_cpu(fh->buffers.buffer[i].jpg.
- frag_tab[2 * j + 1]) & 1)
- break; /* was last fragment */
- }
- fh->buffers.buffer[i].map = map;
- if (size == 0)
- break;
-
- }
- }
- return res;
-}
-
-static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
- .vidioc_querycap = zoran_querycap,
- .vidioc_s_selection = zoran_s_selection,
- .vidioc_g_selection = zoran_g_selection,
- .vidioc_enum_input = zoran_enum_input,
- .vidioc_g_input = zoran_g_input,
- .vidioc_s_input = zoran_s_input,
- .vidioc_enum_output = zoran_enum_output,
- .vidioc_g_output = zoran_g_output,
- .vidioc_s_output = zoran_s_output,
- .vidioc_g_fbuf = zoran_g_fbuf,
- .vidioc_s_fbuf = zoran_s_fbuf,
- .vidioc_g_std = zoran_g_std,
- .vidioc_s_std = zoran_s_std,
- .vidioc_g_jpegcomp = zoran_g_jpegcomp,
- .vidioc_s_jpegcomp = zoran_s_jpegcomp,
- .vidioc_overlay = zoran_overlay,
- .vidioc_reqbufs = zoran_reqbufs,
- .vidioc_querybuf = zoran_querybuf,
- .vidioc_qbuf = zoran_qbuf,
- .vidioc_dqbuf = zoran_dqbuf,
- .vidioc_streamon = zoran_streamon,
- .vidioc_streamoff = zoran_streamoff,
- .vidioc_enum_fmt_vid_cap = zoran_enum_fmt_vid_cap,
- .vidioc_enum_fmt_vid_out = zoran_enum_fmt_vid_out,
- .vidioc_enum_fmt_vid_overlay = zoran_enum_fmt_vid_overlay,
- .vidioc_g_fmt_vid_cap = zoran_g_fmt_vid_cap,
- .vidioc_g_fmt_vid_out = zoran_g_fmt_vid_out,
- .vidioc_g_fmt_vid_overlay = zoran_g_fmt_vid_overlay,
- .vidioc_s_fmt_vid_cap = zoran_s_fmt_vid_cap,
- .vidioc_s_fmt_vid_out = zoran_s_fmt_vid_out,
- .vidioc_s_fmt_vid_overlay = zoran_s_fmt_vid_overlay,
- .vidioc_try_fmt_vid_cap = zoran_try_fmt_vid_cap,
- .vidioc_try_fmt_vid_out = zoran_try_fmt_vid_out,
- .vidioc_try_fmt_vid_overlay = zoran_try_fmt_vid_overlay,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static const struct v4l2_file_operations zoran_fops = {
- .owner = THIS_MODULE,
- .open = zoran_open,
- .release = zoran_close,
- .unlocked_ioctl = video_ioctl2,
- .mmap = zoran_mmap,
- .poll = zoran_poll,
-};
-
-const struct video_device zoran_template = {
- .name = ZORAN_NAME,
- .fops = &zoran_fops,
- .ioctl_ops = &zoran_ioctl_ops,
- .release = &zoran_vdev_release,
- .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
-};
-
diff --git a/drivers/staging/media/zoran/zoran_procfs.c b/drivers/staging/media/zoran/zoran_procfs.c
deleted file mode 100644
index 78ac8f853748..000000000000
--- a/drivers/staging/media/zoran/zoran_procfs.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Zoran zr36057/zr36067 PCI controller driver, for the
- * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
- * Media Labs LML33/LML33R10.
- *
- * This part handles the procFS entries (/proc/ZORAN[%d])
- *
- * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
- *
- * Currently maintained by:
- * Ronald Bultje <rbultje@ronald.bitfreak.net>
- * Laurent Pinchart <laurent.pinchart@skynet.be>
- * Mailinglist <mjpeg-users@lists.sf.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-
-#include <linux/proc_fs.h>
-#include <linux/pci.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/videodev2.h>
-#include <linux/spinlock.h>
-#include <linux/sem.h>
-#include <linux/seq_file.h>
-
-#include <linux/ctype.h>
-#include <linux/poll.h>
-#include <asm/io.h>
-
-#include "videocodec.h"
-#include "zoran.h"
-#include "zoran_procfs.h"
-#include "zoran_card.h"
-
-#ifdef CONFIG_PROC_FS
-struct procfs_params_zr36067 {
- char *name;
- short reg;
- u32 mask;
- short bit;
-};
-
-static const struct procfs_params_zr36067 zr67[] = {
- {"HSPol", 0x000, 1, 30},
- {"HStart", 0x000, 0x3ff, 10},
- {"HEnd", 0x000, 0x3ff, 0},
-
- {"VSPol", 0x004, 1, 30},
- {"VStart", 0x004, 0x3ff, 10},
- {"VEnd", 0x004, 0x3ff, 0},
-
- {"ExtFl", 0x008, 1, 26},
- {"TopField", 0x008, 1, 25},
- {"VCLKPol", 0x008, 1, 24},
- {"DupFld", 0x008, 1, 20},
- {"LittleEndian", 0x008, 1, 0},
-
- {"HsyncStart", 0x10c, 0xffff, 16},
- {"LineTot", 0x10c, 0xffff, 0},
-
- {"NAX", 0x110, 0xffff, 16},
- {"PAX", 0x110, 0xffff, 0},
-
- {"NAY", 0x114, 0xffff, 16},
- {"PAY", 0x114, 0xffff, 0},
-
- /* {"",,,}, */
-
- {NULL, 0, 0, 0},
-};
-
-static void
-setparam (struct zoran *zr,
- char *name,
- char *sval)
-{
- int i = 0, reg0, reg, val;
-
- while (zr67[i].name != NULL) {
- if (!strncmp(name, zr67[i].name, strlen(zr67[i].name))) {
- reg = reg0 = btread(zr67[i].reg);
- reg &= ~(zr67[i].mask << zr67[i].bit);
- if (!isdigit(sval[0]))
- break;
- val = simple_strtoul(sval, NULL, 0);
- if ((val & ~zr67[i].mask))
- break;
- reg |= (val & zr67[i].mask) << zr67[i].bit;
- dprintk(4,
- KERN_INFO
- "%s: setparam: setting ZR36067 register 0x%03x: 0x%08x=>0x%08x %s=%d\n",
- ZR_DEVNAME(zr), zr67[i].reg, reg0, reg,
- zr67[i].name, val);
- btwrite(reg, zr67[i].reg);
- break;
- }
- i++;
- }
-}
-
-static int zoran_show(struct seq_file *p, void *v)
-{
- struct zoran *zr = p->private;
- int i;
-
- seq_printf(p, "ZR36067 registers:\n");
- for (i = 0; i < 0x130; i += 16)
- seq_printf(p, "%03X %08X %08X %08X %08X \n", i,
- btread(i), btread(i+4), btread(i+8), btread(i+12));
- return 0;
-}
-
-static int zoran_open(struct inode *inode, struct file *file)
-{
- struct zoran *data = PDE_DATA(inode);
- return single_open(file, zoran_show, data);
-}
-
-static ssize_t zoran_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct zoran *zr = PDE_DATA(file_inode(file));
- char *string, *sp;
- char *line, *ldelim, *varname, *svar, *tdelim;
-
- if (count > 32768) /* Stupidity filter */
- return -EINVAL;
-
- string = sp = vmalloc(count + 1);
- if (!string) {
- dprintk(1,
- KERN_ERR
- "%s: write_proc: can not allocate memory\n",
- ZR_DEVNAME(zr));
- return -ENOMEM;
- }
- if (copy_from_user(string, buffer, count)) {
- vfree (string);
- return -EFAULT;
- }
- string[count] = 0;
- dprintk(4, KERN_INFO "%s: write_proc: name=%pD count=%zu zr=%p\n",
- ZR_DEVNAME(zr), file, count, zr);
- ldelim = " \t\n";
- tdelim = "=";
- line = strpbrk(sp, ldelim);
- while (line) {
- *line = 0;
- svar = strpbrk(sp, tdelim);
- if (svar) {
- *svar = 0;
- varname = sp;
- svar++;
- setparam(zr, varname, svar);
- }
- sp = line + 1;
- line = strpbrk(sp, ldelim);
- }
- vfree(string);
-
- return count;
-}
-
-static const struct file_operations zoran_operations = {
- .owner = THIS_MODULE,
- .open = zoran_open,
- .read = seq_read,
- .write = zoran_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
-int
-zoran_proc_init (struct zoran *zr)
-{
-#ifdef CONFIG_PROC_FS
- char name[8];
-
- snprintf(name, 7, "zoran%d", zr->id);
- zr->zoran_proc = proc_create_data(name, 0, NULL, &zoran_operations, zr);
- if (zr->zoran_proc != NULL) {
- dprintk(2,
- KERN_INFO
- "%s: procfs entry /proc/%s allocated. data=%p\n",
- ZR_DEVNAME(zr), name, zr);
- } else {
- dprintk(1, KERN_ERR "%s: Unable to initialise /proc/%s\n",
- ZR_DEVNAME(zr), name);
- return 1;
- }
-#endif
- return 0;
-}
-
-void
-zoran_proc_cleanup (struct zoran *zr)
-{
-#ifdef CONFIG_PROC_FS
- char name[8];
-
- snprintf(name, 7, "zoran%d", zr->id);
- if (zr->zoran_proc)
- remove_proc_entry(name, NULL);
- zr->zoran_proc = NULL;
-#endif
-}
diff --git a/drivers/staging/media/zoran/zoran_procfs.h b/drivers/staging/media/zoran/zoran_procfs.h
deleted file mode 100644
index 0ac7cb0011f2..000000000000
--- a/drivers/staging/media/zoran/zoran_procfs.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Zoran zr36057/zr36067 PCI controller driver, for the
- * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
- * Media Labs LML33/LML33R10.
- *
- * This part handles card-specific data and detection
- *
- * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
- *
- * Currently maintained by:
- * Ronald Bultje <rbultje@ronald.bitfreak.net>
- * Laurent Pinchart <laurent.pinchart@skynet.be>
- * Mailinglist <mjpeg-users@lists.sf.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ZORAN_PROCFS_H__
-#define __ZORAN_PROCFS_H__
-
-extern int zoran_proc_init(struct zoran *zr);
-extern void zoran_proc_cleanup(struct zoran *zr);
-
-#endif /* __ZORAN_PROCFS_H__ */
diff --git a/drivers/staging/media/zoran/zr36016.c b/drivers/staging/media/zoran/zr36016.c
deleted file mode 100644
index 8736b9d8d97e..000000000000
--- a/drivers/staging/media/zoran/zr36016.c
+++ /dev/null
@@ -1,516 +0,0 @@
-/*
- * Zoran ZR36016 basic configuration functions
- *
- * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at>
- *
- * $Id: zr36016.c,v 1.1.2.14 2003/08/20 19:46:55 rbultje Exp $
- *
- * ------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ------------------------------------------------------------------------
- */
-
-#define ZR016_VERSION "v0.7"
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-
-#include <linux/types.h>
-#include <linux/wait.h>
-
-/* I/O commands, error codes */
-#include <asm/io.h>
-
-/* v4l API */
-
-/* headerfile of this module */
-#include "zr36016.h"
-
-/* codec io API */
-#include "videocodec.h"
-
-/* it doesn't make sense to have more than 20 or so,
- just to prevent some unwanted loops */
-#define MAX_CODECS 20
-
-/* amount of chips attached via this driver */
-static int zr36016_codecs;
-
-/* debugging is available via module parameter */
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0-4)");
-
-#define dprintk(num, format, args...) \
- do { \
- if (debug >= num) \
- printk(format, ##args); \
- } while (0)
-
-/* =========================================================================
- Local hardware I/O functions:
-
- read/write via codec layer (registers are located in the master device)
- ========================================================================= */
-
-/* read and write functions */
-static u8
-zr36016_read (struct zr36016 *ptr,
- u16 reg)
-{
- u8 value = 0;
-
- // just in case something is wrong...
- if (ptr->codec->master_data->readreg)
- value =
- (ptr->codec->master_data->
- readreg(ptr->codec, reg)) & 0xFF;
- else
- dprintk(1,
- KERN_ERR "%s: invalid I/O setup, nothing read!\n",
- ptr->name);
-
- dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg,
- value);
-
- return value;
-}
-
-static void
-zr36016_write (struct zr36016 *ptr,
- u16 reg,
- u8 value)
-{
- dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value,
- reg);
-
- // just in case something is wrong...
- if (ptr->codec->master_data->writereg) {
- ptr->codec->master_data->writereg(ptr->codec, reg, value);
- } else
- dprintk(1,
- KERN_ERR
- "%s: invalid I/O setup, nothing written!\n",
- ptr->name);
-}
-
-/* indirect read and write functions */
-/* the 016 supports auto-addr-increment, but
- * writing it all time cost not much and is safer... */
-static u8
-zr36016_readi (struct zr36016 *ptr,
- u16 reg)
-{
- u8 value = 0;
-
- // just in case something is wrong...
- if ((ptr->codec->master_data->writereg) &&
- (ptr->codec->master_data->readreg)) {
- ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR
- value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA
- } else
- dprintk(1,
- KERN_ERR
- "%s: invalid I/O setup, nothing read (i)!\n",
- ptr->name);
-
- dprintk(4, "%s: reading indirect from 0x%04x: %02x\n", ptr->name,
- reg, value);
- return value;
-}
-
-static void
-zr36016_writei (struct zr36016 *ptr,
- u16 reg,
- u8 value)
-{
- dprintk(4, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name,
- value, reg);
-
- // just in case something is wrong...
- if (ptr->codec->master_data->writereg) {
- ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR
- ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA
- } else
- dprintk(1,
- KERN_ERR
- "%s: invalid I/O setup, nothing written (i)!\n",
- ptr->name);
-}
-
-/* =========================================================================
- Local helper function:
-
- version read
- ========================================================================= */
-
-/* version kept in datastructure */
-static u8
-zr36016_read_version (struct zr36016 *ptr)
-{
- ptr->version = zr36016_read(ptr, 0) >> 4;
- return ptr->version;
-}
-
-/* =========================================================================
- Local helper function:
-
- basic test of "connectivity", writes/reads to/from PAX-Lo register
- ========================================================================= */
-
-static int
-zr36016_basic_test (struct zr36016 *ptr)
-{
- if (debug) {
- int i;
- zr36016_writei(ptr, ZR016I_PAX_LO, 0x55);
- dprintk(1, KERN_INFO "%s: registers: ", ptr->name);
- for (i = 0; i <= 0x0b; i++)
- dprintk(1, "%02x ", zr36016_readi(ptr, i));
- dprintk(1, "\n");
- }
- // for testing just write 0, then the default value to a register and read
- // it back in both cases
- zr36016_writei(ptr, ZR016I_PAX_LO, 0x00);
- if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, can't connect to vfe processor!\n",
- ptr->name);
- return -ENXIO;
- }
- zr36016_writei(ptr, ZR016I_PAX_LO, 0x0d0);
- if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0d0) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, can't connect to vfe processor!\n",
- ptr->name);
- return -ENXIO;
- }
- // we allow version numbers from 0-3, should be enough, though
- zr36016_read_version(ptr);
- if (ptr->version & 0x0c) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, suspicious version %d found...\n",
- ptr->name, ptr->version);
- return -ENXIO;
- }
-
- return 0; /* looks good! */
-}
-
-/* =========================================================================
- Local helper function:
-
- simple loop for pushing the init datasets - NO USE --
- ========================================================================= */
-
-#if 0
-static int zr36016_pushit (struct zr36016 *ptr,
- u16 startreg,
- u16 len,
- const char *data)
-{
- int i=0;
-
- dprintk(4, "%s: write data block to 0x%04x (len=%d)\n",
- ptr->name, startreg,len);
- while (i<len) {
- zr36016_writei(ptr, startreg++, data[i++]);
- }
-
- return i;
-}
-#endif
-
-/* =========================================================================
- Basic datasets & init:
-
- //TODO//
- ========================================================================= */
-
-static void
-zr36016_init (struct zr36016 *ptr)
-{
- // stop any processing
- zr36016_write(ptr, ZR016_GOSTOP, 0);
-
- // mode setup (yuv422 in and out, compression/expansuon due to mode)
- zr36016_write(ptr, ZR016_MODE,
- ZR016_YUV422 | ZR016_YUV422_YUV422 |
- (ptr->mode == CODEC_DO_COMPRESSION ?
- ZR016_COMPRESSION : ZR016_EXPANSION));
-
- // misc setup
- zr36016_writei(ptr, ZR016I_SETUP1,
- (ptr->xdec ? (ZR016_HRFL | ZR016_HORZ) : 0) |
- (ptr->ydec ? ZR016_VERT : 0) | ZR016_CNTI);
- zr36016_writei(ptr, ZR016I_SETUP2, ZR016_CCIR);
-
- // Window setup
- // (no extra offset for now, norm defines offset, default width height)
- zr36016_writei(ptr, ZR016I_PAX_HI, ptr->width >> 8);
- zr36016_writei(ptr, ZR016I_PAX_LO, ptr->width & 0xFF);
- zr36016_writei(ptr, ZR016I_PAY_HI, ptr->height >> 8);
- zr36016_writei(ptr, ZR016I_PAY_LO, ptr->height & 0xFF);
- zr36016_writei(ptr, ZR016I_NAX_HI, ptr->xoff >> 8);
- zr36016_writei(ptr, ZR016I_NAX_LO, ptr->xoff & 0xFF);
- zr36016_writei(ptr, ZR016I_NAY_HI, ptr->yoff >> 8);
- zr36016_writei(ptr, ZR016I_NAY_LO, ptr->yoff & 0xFF);
-
- /* shall we continue now, please? */
- zr36016_write(ptr, ZR016_GOSTOP, 1);
-}
-
-/* =========================================================================
- CODEC API FUNCTIONS
-
- this functions are accessed by the master via the API structure
- ========================================================================= */
-
-/* set compression/expansion mode and launches codec -
- this should be the last call from the master before starting processing */
-static int
-zr36016_set_mode (struct videocodec *codec,
- int mode)
-{
- struct zr36016 *ptr = (struct zr36016 *) codec->data;
-
- dprintk(2, "%s: set_mode %d call\n", ptr->name, mode);
-
- if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION))
- return -EINVAL;
-
- ptr->mode = mode;
- zr36016_init(ptr);
-
- return 0;
-}
-
-/* set picture size */
-static int
-zr36016_set_video (struct videocodec *codec,
- struct tvnorm *norm,
- struct vfe_settings *cap,
- struct vfe_polarity *pol)
-{
- struct zr36016 *ptr = (struct zr36016 *) codec->data;
-
- dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n",
- ptr->name, norm->HStart, norm->VStart,
- cap->x, cap->y, cap->width, cap->height,
- cap->decimation);
-
- /* if () return -EINVAL;
- * trust the master driver that it knows what it does - so
- * we allow invalid startx/y for now ... */
- ptr->width = cap->width;
- ptr->height = cap->height;
- /* (Ronald) This is ugly. zoran_device.c, line 387
- * already mentions what happens if HStart is even
- * (blue faces, etc., cr/cb inversed). There's probably
- * some good reason why HStart is 0 instead of 1, so I'm
- * leaving it to this for now, but really... This can be
- * done a lot simpler */
- ptr->xoff = (norm->HStart ? norm->HStart : 1) + cap->x;
- /* Something to note here (I don't understand it), setting
- * VStart too high will cause the codec to 'not work'. I
- * really don't get it. values of 16 (VStart) already break
- * it here. Just '0' seems to work. More testing needed! */
- ptr->yoff = norm->VStart + cap->y;
- /* (Ronald) dzjeeh, can't this thing do hor_decimation = 4? */
- ptr->xdec = ((cap->decimation & 0xff) == 1) ? 0 : 1;
- ptr->ydec = (((cap->decimation >> 8) & 0xff) == 1) ? 0 : 1;
-
- return 0;
-}
-
-/* additional control functions */
-static int
-zr36016_control (struct videocodec *codec,
- int type,
- int size,
- void *data)
-{
- struct zr36016 *ptr = (struct zr36016 *) codec->data;
- int *ival = (int *) data;
-
- dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type,
- size);
-
- switch (type) {
- case CODEC_G_STATUS: /* get last status - we don't know it ... */
- if (size != sizeof(int))
- return -EFAULT;
- *ival = 0;
- break;
-
- case CODEC_G_CODEC_MODE:
- if (size != sizeof(int))
- return -EFAULT;
- *ival = 0;
- break;
-
- case CODEC_S_CODEC_MODE:
- if (size != sizeof(int))
- return -EFAULT;
- if (*ival != 0)
- return -EINVAL;
- /* not needed, do nothing */
- return 0;
-
- case CODEC_G_VFE:
- case CODEC_S_VFE:
- return 0;
-
- case CODEC_S_MMAP:
- /* not available, give an error */
- return -ENXIO;
-
- default:
- return -EINVAL;
- }
-
- return size;
-}
-
-/* =========================================================================
- Exit and unregister function:
-
- Deinitializes Zoran's JPEG processor
- ========================================================================= */
-
-static int
-zr36016_unset (struct videocodec *codec)
-{
- struct zr36016 *ptr = codec->data;
-
- if (ptr) {
- /* do wee need some codec deinit here, too ???? */
-
- dprintk(1, "%s: finished codec #%d\n", ptr->name,
- ptr->num);
- kfree(ptr);
- codec->data = NULL;
-
- zr36016_codecs--;
- return 0;
- }
-
- return -EFAULT;
-}
-
-/* =========================================================================
- Setup and registry function:
-
- Initializes Zoran's JPEG processor
-
- Also sets pixel size, average code size, mode (compr./decompr.)
- (the given size is determined by the processor with the video interface)
- ========================================================================= */
-
-static int
-zr36016_setup (struct videocodec *codec)
-{
- struct zr36016 *ptr;
- int res;
-
- dprintk(2, "zr36016: initializing VFE subsystem #%d.\n",
- zr36016_codecs);
-
- if (zr36016_codecs == MAX_CODECS) {
- dprintk(1,
- KERN_ERR "zr36016: Can't attach more codecs!\n");
- return -ENOSPC;
- }
- //mem structure init
- codec->data = ptr = kzalloc(sizeof(struct zr36016), GFP_KERNEL);
- if (NULL == ptr) {
- dprintk(1, KERN_ERR "zr36016: Can't get enough memory!\n");
- return -ENOMEM;
- }
-
- snprintf(ptr->name, sizeof(ptr->name), "zr36016[%d]",
- zr36016_codecs);
- ptr->num = zr36016_codecs++;
- ptr->codec = codec;
-
- //testing
- res = zr36016_basic_test(ptr);
- if (res < 0) {
- zr36016_unset(codec);
- return res;
- }
- //final setup
- ptr->mode = CODEC_DO_COMPRESSION;
- ptr->width = 768;
- ptr->height = 288;
- ptr->xdec = 1;
- ptr->ydec = 0;
- zr36016_init(ptr);
-
- dprintk(1, KERN_INFO "%s: codec v%d attached and running\n",
- ptr->name, ptr->version);
-
- return 0;
-}
-
-static const struct videocodec zr36016_codec = {
- .owner = THIS_MODULE,
- .name = "zr36016",
- .magic = 0L, // magic not used
- .flags =
- CODEC_FLAG_HARDWARE | CODEC_FLAG_VFE | CODEC_FLAG_ENCODER |
- CODEC_FLAG_DECODER,
- .type = CODEC_TYPE_ZR36016,
- .setup = zr36016_setup, // functionality
- .unset = zr36016_unset,
- .set_mode = zr36016_set_mode,
- .set_video = zr36016_set_video,
- .control = zr36016_control,
- // others are not used
-};
-
-/* =========================================================================
- HOOK IN DRIVER AS KERNEL MODULE
- ========================================================================= */
-
-static int __init
-zr36016_init_module (void)
-{
- //dprintk(1, "ZR36016 driver %s\n",ZR016_VERSION);
- zr36016_codecs = 0;
- return videocodec_register(&zr36016_codec);
-}
-
-static void __exit
-zr36016_cleanup_module (void)
-{
- if (zr36016_codecs) {
- dprintk(1,
- "zr36016: something's wrong - %d codecs left somehow.\n",
- zr36016_codecs);
- }
- videocodec_unregister(&zr36016_codec);
-}
-
-module_init(zr36016_init_module);
-module_exit(zr36016_cleanup_module);
-
-MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>");
-MODULE_DESCRIPTION("Driver module for ZR36016 video frontends "
- ZR016_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/zr36016.h b/drivers/staging/media/zoran/zr36016.h
deleted file mode 100644
index 784bcf5727b8..000000000000
--- a/drivers/staging/media/zoran/zr36016.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Zoran ZR36016 basic configuration functions - header file
- *
- * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at>
- *
- * $Id: zr36016.h,v 1.1.2.3 2003/01/14 21:18:07 rbultje Exp $
- *
- * ------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ------------------------------------------------------------------------
- */
-
-#ifndef ZR36016_H
-#define ZR36016_H
-
-/* data stored for each zoran jpeg codec chip */
-struct zr36016 {
- char name[32];
- int num;
- /* io datastructure */
- struct videocodec *codec;
- // coder status
- __u8 version;
- // actual coder setup
- int mode;
-
- __u16 xoff;
- __u16 yoff;
- __u16 width;
- __u16 height;
- __u16 xdec;
- __u16 ydec;
-};
-
-/* direct register addresses */
-#define ZR016_GOSTOP 0x00
-#define ZR016_MODE 0x01
-#define ZR016_IADDR 0x02
-#define ZR016_IDATA 0x03
-
-/* indirect register addresses */
-#define ZR016I_SETUP1 0x00
-#define ZR016I_SETUP2 0x01
-#define ZR016I_NAX_LO 0x02
-#define ZR016I_NAX_HI 0x03
-#define ZR016I_PAX_LO 0x04
-#define ZR016I_PAX_HI 0x05
-#define ZR016I_NAY_LO 0x06
-#define ZR016I_NAY_HI 0x07
-#define ZR016I_PAY_LO 0x08
-#define ZR016I_PAY_HI 0x09
-#define ZR016I_NOL_LO 0x0a
-#define ZR016I_NOL_HI 0x0b
-
-/* possible values for mode register */
-#define ZR016_RGB444_YUV444 0x00
-#define ZR016_RGB444_YUV422 0x01
-#define ZR016_RGB444_YUV411 0x02
-#define ZR016_RGB444_Y400 0x03
-#define ZR016_RGB444_RGB444 0x04
-#define ZR016_YUV444_YUV444 0x08
-#define ZR016_YUV444_YUV422 0x09
-#define ZR016_YUV444_YUV411 0x0a
-#define ZR016_YUV444_Y400 0x0b
-#define ZR016_YUV444_RGB444 0x0c
-#define ZR016_YUV422_YUV422 0x11
-#define ZR016_YUV422_YUV411 0x12
-#define ZR016_YUV422_Y400 0x13
-#define ZR016_YUV411_YUV411 0x16
-#define ZR016_YUV411_Y400 0x17
-#define ZR016_4444_4444 0x19
-#define ZR016_100_100 0x1b
-
-#define ZR016_RGB444 0x00
-#define ZR016_YUV444 0x20
-#define ZR016_YUV422 0x40
-
-#define ZR016_COMPRESSION 0x80
-#define ZR016_EXPANSION 0x80
-
-/* possible values for setup 1 register */
-#define ZR016_CKRT 0x80
-#define ZR016_VERT 0x40
-#define ZR016_HORZ 0x20
-#define ZR016_HRFL 0x10
-#define ZR016_DSFL 0x08
-#define ZR016_SBFL 0x04
-#define ZR016_RSTR 0x02
-#define ZR016_CNTI 0x01
-
-/* possible values for setup 2 register */
-#define ZR016_SYEN 0x40
-#define ZR016_CCIR 0x04
-#define ZR016_SIGN 0x02
-#define ZR016_YMCS 0x01
-
-#endif /*fndef ZR36016_H */
diff --git a/drivers/staging/media/zoran/zr36050.c b/drivers/staging/media/zoran/zr36050.c
deleted file mode 100644
index 5ebfc16672f3..000000000000
--- a/drivers/staging/media/zoran/zr36050.c
+++ /dev/null
@@ -1,896 +0,0 @@
-/*
- * Zoran ZR36050 basic configuration functions
- *
- * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at>
- *
- * $Id: zr36050.c,v 1.1.2.11 2003/08/03 14:54:53 rbultje Exp $
- *
- * ------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ------------------------------------------------------------------------
- */
-
-#define ZR050_VERSION "v0.7.1"
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-
-#include <linux/types.h>
-#include <linux/wait.h>
-
-/* I/O commands, error codes */
-#include <asm/io.h>
-
-/* headerfile of this module */
-#include "zr36050.h"
-
-/* codec io API */
-#include "videocodec.h"
-
-/* it doesn't make sense to have more than 20 or so,
- just to prevent some unwanted loops */
-#define MAX_CODECS 20
-
-/* amount of chips attached via this driver */
-static int zr36050_codecs;
-
-/* debugging is available via module parameter */
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0-4)");
-
-#define dprintk(num, format, args...) \
- do { \
- if (debug >= num) \
- printk(format, ##args); \
- } while (0)
-
-/* =========================================================================
- Local hardware I/O functions:
-
- read/write via codec layer (registers are located in the master device)
- ========================================================================= */
-
-/* read and write functions */
-static u8
-zr36050_read (struct zr36050 *ptr,
- u16 reg)
-{
- u8 value = 0;
-
- // just in case something is wrong...
- if (ptr->codec->master_data->readreg)
- value = (ptr->codec->master_data->readreg(ptr->codec,
- reg)) & 0xFF;
- else
- dprintk(1,
- KERN_ERR "%s: invalid I/O setup, nothing read!\n",
- ptr->name);
-
- dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg,
- value);
-
- return value;
-}
-
-static void
-zr36050_write (struct zr36050 *ptr,
- u16 reg,
- u8 value)
-{
- dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value,
- reg);
-
- // just in case something is wrong...
- if (ptr->codec->master_data->writereg)
- ptr->codec->master_data->writereg(ptr->codec, reg, value);
- else
- dprintk(1,
- KERN_ERR
- "%s: invalid I/O setup, nothing written!\n",
- ptr->name);
-}
-
-/* =========================================================================
- Local helper function:
-
- status read
- ========================================================================= */
-
-/* status is kept in datastructure */
-static u8
-zr36050_read_status1 (struct zr36050 *ptr)
-{
- ptr->status1 = zr36050_read(ptr, ZR050_STATUS_1);
-
- zr36050_read(ptr, 0);
- return ptr->status1;
-}
-
-/* =========================================================================
- Local helper function:
-
- scale factor read
- ========================================================================= */
-
-/* scale factor is kept in datastructure */
-static u16
-zr36050_read_scalefactor (struct zr36050 *ptr)
-{
- ptr->scalefact = (zr36050_read(ptr, ZR050_SF_HI) << 8) |
- (zr36050_read(ptr, ZR050_SF_LO) & 0xFF);
-
- /* leave 0 selected for an eventually GO from master */
- zr36050_read(ptr, 0);
- return ptr->scalefact;
-}
-
-/* =========================================================================
- Local helper function:
-
- wait if codec is ready to proceed (end of processing) or time is over
- ========================================================================= */
-
-static void
-zr36050_wait_end (struct zr36050 *ptr)
-{
- int i = 0;
-
- while (!(zr36050_read_status1(ptr) & 0x4)) {
- udelay(1);
- if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
- dprintk(1,
- "%s: timeout at wait_end (last status: 0x%02x)\n",
- ptr->name, ptr->status1);
- break;
- }
- }
-}
-
-/* =========================================================================
- Local helper function:
-
- basic test of "connectivity", writes/reads to/from memory the SOF marker
- ========================================================================= */
-
-static int
-zr36050_basic_test (struct zr36050 *ptr)
-{
- zr36050_write(ptr, ZR050_SOF_IDX, 0x00);
- zr36050_write(ptr, ZR050_SOF_IDX + 1, 0x00);
- if ((zr36050_read(ptr, ZR050_SOF_IDX) |
- zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0x0000) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, can't connect to jpeg processor!\n",
- ptr->name);
- return -ENXIO;
- }
- zr36050_write(ptr, ZR050_SOF_IDX, 0xff);
- zr36050_write(ptr, ZR050_SOF_IDX + 1, 0xc0);
- if (((zr36050_read(ptr, ZR050_SOF_IDX) << 8) |
- zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0xffc0) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, can't connect to jpeg processor!\n",
- ptr->name);
- return -ENXIO;
- }
-
- zr36050_wait_end(ptr);
- if ((ptr->status1 & 0x4) == 0) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, jpeg processor failed (end flag)!\n",
- ptr->name);
- return -EBUSY;
- }
-
- return 0; /* looks good! */
-}
-
-/* =========================================================================
- Local helper function:
-
- simple loop for pushing the init datasets
- ========================================================================= */
-
-static int
-zr36050_pushit (struct zr36050 *ptr,
- u16 startreg,
- u16 len,
- const char *data)
-{
- int i = 0;
-
- dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name,
- startreg, len);
- while (i < len) {
- zr36050_write(ptr, startreg++, data[i++]);
- }
-
- return i;
-}
-
-/* =========================================================================
- Basic datasets:
-
- jpeg baseline setup data (you find it on lots places in internet, or just
- extract it from any regular .jpg image...)
-
- Could be variable, but until it's not needed it they are just fixed to save
- memory. Otherwise expand zr36050 structure with arrays, push the values to
- it and initialize from there, as e.g. the linux zr36057/60 driver does it.
- ========================================================================= */
-
-static const char zr36050_dqt[0x86] = {
- 0xff, 0xdb, //Marker: DQT
- 0x00, 0x84, //Length: 2*65+2
- 0x00, //Pq,Tq first table
- 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e,
- 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28,
- 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25,
- 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33,
- 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44,
- 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57,
- 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71,
- 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63,
- 0x01, //Pq,Tq second table
- 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a,
- 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
-};
-
-static const char zr36050_dht[0x1a4] = {
- 0xff, 0xc4, //Marker: DHT
- 0x01, 0xa2, //Length: 2*AC, 2*DC
- 0x00, //DC first table
- 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
- 0x01, //DC second table
- 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
- 0x10, //AC first table
- 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
- 0x05, 0x05, 0x04, 0x04, 0x00, 0x00,
- 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11,
- 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61,
- 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1,
- 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24,
- 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17,
- 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34,
- 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44,
- 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56,
- 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
- 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
- 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
- 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99,
- 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
- 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9,
- 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8,
- 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9,
- 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
- 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
- 0xF8, 0xF9, 0xFA,
- 0x11, //AC second table
- 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
- 0x07, 0x05, 0x04, 0x04, 0x00, 0x01,
- 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04,
- 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
- 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
- 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62,
- 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25,
- 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A,
- 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44,
- 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56,
- 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
- 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
- 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
- 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
- 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
- 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8,
- 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
- 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8,
- 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
- 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
- 0xF9, 0xFA
-};
-
-/* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */
-#define NO_OF_COMPONENTS 0x3 //Y,U,V
-#define BASELINE_PRECISION 0x8 //MCU size (?)
-static const char zr36050_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT
-static const char zr36050_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC
-static const char zr36050_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC
-
-/* horizontal 422 decimation setup (maybe we support 411 or so later, too) */
-static const char zr36050_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 };
-static const char zr36050_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 };
-
-/* =========================================================================
- Local helper functions:
-
- calculation and setup of parameter-dependent JPEG baseline segments
- (needed for compression only)
- ========================================================================= */
-
-/* ------------------------------------------------------------------------- */
-
-/* SOF (start of frame) segment depends on width, height and sampling ratio
- of each color component */
-
-static int
-zr36050_set_sof (struct zr36050 *ptr)
-{
- char sof_data[34]; // max. size of register set
- int i;
-
- dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name,
- ptr->width, ptr->height, NO_OF_COMPONENTS);
- sof_data[0] = 0xff;
- sof_data[1] = 0xc0;
- sof_data[2] = 0x00;
- sof_data[3] = (3 * NO_OF_COMPONENTS) + 8;
- sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36050
- sof_data[5] = (ptr->height) >> 8;
- sof_data[6] = (ptr->height) & 0xff;
- sof_data[7] = (ptr->width) >> 8;
- sof_data[8] = (ptr->width) & 0xff;
- sof_data[9] = NO_OF_COMPONENTS;
- for (i = 0; i < NO_OF_COMPONENTS; i++) {
- sof_data[10 + (i * 3)] = i; // index identifier
- sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios
- sof_data[12 + (i * 3)] = zr36050_tq[i]; // Q table selection
- }
- return zr36050_pushit(ptr, ZR050_SOF_IDX,
- (3 * NO_OF_COMPONENTS) + 10, sof_data);
-}
-
-/* ------------------------------------------------------------------------- */
-
-/* SOS (start of scan) segment depends on the used scan components
- of each color component */
-
-static int
-zr36050_set_sos (struct zr36050 *ptr)
-{
- char sos_data[16]; // max. size of register set
- int i;
-
- dprintk(3, "%s: write SOS\n", ptr->name);
- sos_data[0] = 0xff;
- sos_data[1] = 0xda;
- sos_data[2] = 0x00;
- sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3;
- sos_data[4] = NO_OF_COMPONENTS;
- for (i = 0; i < NO_OF_COMPONENTS; i++) {
- sos_data[5 + (i * 2)] = i; // index
- sos_data[6 + (i * 2)] = (zr36050_td[i] << 4) | zr36050_ta[i]; // AC/DC tbl.sel.
- }
- sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start
- sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3F;
- sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00;
- return zr36050_pushit(ptr, ZR050_SOS1_IDX,
- 4 + 1 + (2 * NO_OF_COMPONENTS) + 3,
- sos_data);
-}
-
-/* ------------------------------------------------------------------------- */
-
-/* DRI (define restart interval) */
-
-static int
-zr36050_set_dri (struct zr36050 *ptr)
-{
- char dri_data[6]; // max. size of register set
-
- dprintk(3, "%s: write DRI\n", ptr->name);
- dri_data[0] = 0xff;
- dri_data[1] = 0xdd;
- dri_data[2] = 0x00;
- dri_data[3] = 0x04;
- dri_data[4] = ptr->dri >> 8;
- dri_data[5] = ptr->dri & 0xff;
- return zr36050_pushit(ptr, ZR050_DRI_IDX, 6, dri_data);
-}
-
-/* =========================================================================
- Setup function:
-
- Setup compression/decompression of Zoran's JPEG processor
- ( see also zoran 36050 manual )
-
- ... sorry for the spaghetti code ...
- ========================================================================= */
-static void
-zr36050_init (struct zr36050 *ptr)
-{
- int sum = 0;
- long bitcnt, tmp;
-
- if (ptr->mode == CODEC_DO_COMPRESSION) {
- dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name);
-
- /* 050 communicates with 057 in master mode */
- zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR);
-
- /* encoding table preload for compression */
- zr36050_write(ptr, ZR050_MODE,
- ZR050_MO_COMP | ZR050_MO_TLM);
- zr36050_write(ptr, ZR050_OPTIONS, 0);
-
- /* disable all IRQs */
- zr36050_write(ptr, ZR050_INT_REQ_0, 0);
- zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1
-
- /* volume control settings */
- /*zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol);*/
- zr36050_write(ptr, ZR050_SF_HI, ptr->scalefact >> 8);
- zr36050_write(ptr, ZR050_SF_LO, ptr->scalefact & 0xff);
-
- zr36050_write(ptr, ZR050_AF_HI, 0xff);
- zr36050_write(ptr, ZR050_AF_M, 0xff);
- zr36050_write(ptr, ZR050_AF_LO, 0xff);
-
- /* setup the variable jpeg tables */
- sum += zr36050_set_sof(ptr);
- sum += zr36050_set_sos(ptr);
- sum += zr36050_set_dri(ptr);
-
- /* setup the fixed jpeg tables - maybe variable, though -
- * (see table init section above) */
- dprintk(3, "%s: write DQT, DHT, APP\n", ptr->name);
- sum += zr36050_pushit(ptr, ZR050_DQT_IDX,
- sizeof(zr36050_dqt), zr36050_dqt);
- sum += zr36050_pushit(ptr, ZR050_DHT_IDX,
- sizeof(zr36050_dht), zr36050_dht);
- zr36050_write(ptr, ZR050_APP_IDX, 0xff);
- zr36050_write(ptr, ZR050_APP_IDX + 1, 0xe0 + ptr->app.appn);
- zr36050_write(ptr, ZR050_APP_IDX + 2, 0x00);
- zr36050_write(ptr, ZR050_APP_IDX + 3, ptr->app.len + 2);
- sum += zr36050_pushit(ptr, ZR050_APP_IDX + 4, 60,
- ptr->app.data) + 4;
- zr36050_write(ptr, ZR050_COM_IDX, 0xff);
- zr36050_write(ptr, ZR050_COM_IDX + 1, 0xfe);
- zr36050_write(ptr, ZR050_COM_IDX + 2, 0x00);
- zr36050_write(ptr, ZR050_COM_IDX + 3, ptr->com.len + 2);
- sum += zr36050_pushit(ptr, ZR050_COM_IDX + 4, 60,
- ptr->com.data) + 4;
-
- /* do the internal huffman table preload */
- zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI);
-
- zr36050_write(ptr, ZR050_GO, 1); // launch codec
- zr36050_wait_end(ptr);
- dprintk(2, "%s: Status after table preload: 0x%02x\n",
- ptr->name, ptr->status1);
-
- if ((ptr->status1 & 0x4) == 0) {
- dprintk(1, KERN_ERR "%s: init aborted!\n",
- ptr->name);
- return; // something is wrong, its timed out!!!!
- }
-
- /* setup misc. data for compression (target code sizes) */
-
- /* size of compressed code to reach without header data */
- sum = ptr->real_code_vol - sum;
- bitcnt = sum << 3; /* need the size in bits */
-
- tmp = bitcnt >> 16;
- dprintk(3,
- "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n",
- ptr->name, sum, ptr->real_code_vol, bitcnt, tmp);
- zr36050_write(ptr, ZR050_TCV_NET_HI, tmp >> 8);
- zr36050_write(ptr, ZR050_TCV_NET_MH, tmp & 0xff);
- tmp = bitcnt & 0xffff;
- zr36050_write(ptr, ZR050_TCV_NET_ML, tmp >> 8);
- zr36050_write(ptr, ZR050_TCV_NET_LO, tmp & 0xff);
-
- bitcnt -= bitcnt >> 7; // bits without stuffing
- bitcnt -= ((bitcnt * 5) >> 6); // bits without eob
-
- tmp = bitcnt >> 16;
- dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n",
- ptr->name, bitcnt, tmp);
- zr36050_write(ptr, ZR050_TCV_DATA_HI, tmp >> 8);
- zr36050_write(ptr, ZR050_TCV_DATA_MH, tmp & 0xff);
- tmp = bitcnt & 0xffff;
- zr36050_write(ptr, ZR050_TCV_DATA_ML, tmp >> 8);
- zr36050_write(ptr, ZR050_TCV_DATA_LO, tmp & 0xff);
-
- /* compression setup with or without bitrate control */
- zr36050_write(ptr, ZR050_MODE,
- ZR050_MO_COMP | ZR050_MO_PASS2 |
- (ptr->bitrate_ctrl ? ZR050_MO_BRC : 0));
-
- /* this headers seem to deliver "valid AVI" jpeg frames */
- zr36050_write(ptr, ZR050_MARKERS_EN,
- ZR050_ME_DQT | ZR050_ME_DHT |
- ((ptr->app.len > 0) ? ZR050_ME_APP : 0) |
- ((ptr->com.len > 0) ? ZR050_ME_COM : 0));
- } else {
- dprintk(2, "%s: EXPANSION SETUP\n", ptr->name);
-
- /* 050 communicates with 055 in master mode */
- zr36050_write(ptr, ZR050_HARDWARE,
- ZR050_HW_MSTR | ZR050_HW_CFIS_2_CLK);
-
- /* encoding table preload */
- zr36050_write(ptr, ZR050_MODE, ZR050_MO_TLM);
-
- /* disable all IRQs */
- zr36050_write(ptr, ZR050_INT_REQ_0, 0);
- zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1
-
- dprintk(3, "%s: write DHT\n", ptr->name);
- zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht),
- zr36050_dht);
-
- /* do the internal huffman table preload */
- zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI);
-
- zr36050_write(ptr, ZR050_GO, 1); // launch codec
- zr36050_wait_end(ptr);
- dprintk(2, "%s: Status after table preload: 0x%02x\n",
- ptr->name, ptr->status1);
-
- if ((ptr->status1 & 0x4) == 0) {
- dprintk(1, KERN_ERR "%s: init aborted!\n",
- ptr->name);
- return; // something is wrong, its timed out!!!!
- }
-
- /* setup misc. data for expansion */
- zr36050_write(ptr, ZR050_MODE, 0);
- zr36050_write(ptr, ZR050_MARKERS_EN, 0);
- }
-
- /* adr on selected, to allow GO from master */
- zr36050_read(ptr, 0);
-}
-
-/* =========================================================================
- CODEC API FUNCTIONS
-
- this functions are accessed by the master via the API structure
- ========================================================================= */
-
-/* set compression/expansion mode and launches codec -
- this should be the last call from the master before starting processing */
-static int
-zr36050_set_mode (struct videocodec *codec,
- int mode)
-{
- struct zr36050 *ptr = (struct zr36050 *) codec->data;
-
- dprintk(2, "%s: set_mode %d call\n", ptr->name, mode);
-
- if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION))
- return -EINVAL;
-
- ptr->mode = mode;
- zr36050_init(ptr);
-
- return 0;
-}
-
-/* set picture size (norm is ignored as the codec doesn't know about it) */
-static int
-zr36050_set_video (struct videocodec *codec,
- struct tvnorm *norm,
- struct vfe_settings *cap,
- struct vfe_polarity *pol)
-{
- struct zr36050 *ptr = (struct zr36050 *) codec->data;
- int size;
-
- dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n",
- ptr->name, norm->HStart, norm->VStart,
- cap->x, cap->y, cap->width, cap->height,
- cap->decimation, cap->quality);
- /* if () return -EINVAL;
- * trust the master driver that it knows what it does - so
- * we allow invalid startx/y and norm for now ... */
- ptr->width = cap->width / (cap->decimation & 0xff);
- ptr->height = cap->height / ((cap->decimation >> 8) & 0xff);
-
- /* (KM) JPEG quality */
- size = ptr->width * ptr->height;
- size *= 16; /* size in bits */
- /* apply quality setting */
- size = size * cap->quality / 200;
-
- /* Minimum: 1kb */
- if (size < 8192)
- size = 8192;
- /* Maximum: 7/8 of code buffer */
- if (size > ptr->total_code_vol * 7)
- size = ptr->total_code_vol * 7;
-
- ptr->real_code_vol = size >> 3; /* in bytes */
-
- /* Set max_block_vol here (previously in zr36050_init, moved
- * here for consistency with zr36060 code */
- zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol);
-
- return 0;
-}
-
-/* additional control functions */
-static int
-zr36050_control (struct videocodec *codec,
- int type,
- int size,
- void *data)
-{
- struct zr36050 *ptr = (struct zr36050 *) codec->data;
- int *ival = (int *) data;
-
- dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type,
- size);
-
- switch (type) {
- case CODEC_G_STATUS: /* get last status */
- if (size != sizeof(int))
- return -EFAULT;
- zr36050_read_status1(ptr);
- *ival = ptr->status1;
- break;
-
- case CODEC_G_CODEC_MODE:
- if (size != sizeof(int))
- return -EFAULT;
- *ival = CODEC_MODE_BJPG;
- break;
-
- case CODEC_S_CODEC_MODE:
- if (size != sizeof(int))
- return -EFAULT;
- if (*ival != CODEC_MODE_BJPG)
- return -EINVAL;
- /* not needed, do nothing */
- return 0;
-
- case CODEC_G_VFE:
- case CODEC_S_VFE:
- /* not needed, do nothing */
- return 0;
-
- case CODEC_S_MMAP:
- /* not available, give an error */
- return -ENXIO;
-
- case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */
- if (size != sizeof(int))
- return -EFAULT;
- *ival = ptr->total_code_vol;
- break;
-
- case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */
- if (size != sizeof(int))
- return -EFAULT;
- ptr->total_code_vol = *ival;
- /* (Kieran Morrissey)
- * code copied from zr36060.c to ensure proper bitrate */
- ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3;
- break;
-
- case CODEC_G_JPEG_SCALE: /* get scaling factor */
- if (size != sizeof(int))
- return -EFAULT;
- *ival = zr36050_read_scalefactor(ptr);
- break;
-
- case CODEC_S_JPEG_SCALE: /* set scaling factor */
- if (size != sizeof(int))
- return -EFAULT;
- ptr->scalefact = *ival;
- break;
-
- case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */
- struct jpeg_app_marker *app = data;
-
- if (size != sizeof(struct jpeg_app_marker))
- return -EFAULT;
-
- *app = ptr->app;
- break;
- }
-
- case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */
- struct jpeg_app_marker *app = data;
-
- if (size != sizeof(struct jpeg_app_marker))
- return -EFAULT;
-
- ptr->app = *app;
- break;
- }
-
- case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */
- struct jpeg_com_marker *com = data;
-
- if (size != sizeof(struct jpeg_com_marker))
- return -EFAULT;
-
- *com = ptr->com;
- break;
- }
-
- case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */
- struct jpeg_com_marker *com = data;
-
- if (size != sizeof(struct jpeg_com_marker))
- return -EFAULT;
-
- ptr->com = *com;
- break;
- }
-
- default:
- return -EINVAL;
- }
-
- return size;
-}
-
-/* =========================================================================
- Exit and unregister function:
-
- Deinitializes Zoran's JPEG processor
- ========================================================================= */
-
-static int
-zr36050_unset (struct videocodec *codec)
-{
- struct zr36050 *ptr = codec->data;
-
- if (ptr) {
- /* do wee need some codec deinit here, too ???? */
-
- dprintk(1, "%s: finished codec #%d\n", ptr->name,
- ptr->num);
- kfree(ptr);
- codec->data = NULL;
-
- zr36050_codecs--;
- return 0;
- }
-
- return -EFAULT;
-}
-
-/* =========================================================================
- Setup and registry function:
-
- Initializes Zoran's JPEG processor
-
- Also sets pixel size, average code size, mode (compr./decompr.)
- (the given size is determined by the processor with the video interface)
- ========================================================================= */
-
-static int
-zr36050_setup (struct videocodec *codec)
-{
- struct zr36050 *ptr;
- int res;
-
- dprintk(2, "zr36050: initializing MJPEG subsystem #%d.\n",
- zr36050_codecs);
-
- if (zr36050_codecs == MAX_CODECS) {
- dprintk(1,
- KERN_ERR "zr36050: Can't attach more codecs!\n");
- return -ENOSPC;
- }
- //mem structure init
- codec->data = ptr = kzalloc(sizeof(struct zr36050), GFP_KERNEL);
- if (NULL == ptr) {
- dprintk(1, KERN_ERR "zr36050: Can't get enough memory!\n");
- return -ENOMEM;
- }
-
- snprintf(ptr->name, sizeof(ptr->name), "zr36050[%d]",
- zr36050_codecs);
- ptr->num = zr36050_codecs++;
- ptr->codec = codec;
-
- //testing
- res = zr36050_basic_test(ptr);
- if (res < 0) {
- zr36050_unset(codec);
- return res;
- }
- //final setup
- memcpy(ptr->h_samp_ratio, zr36050_decimation_h, 8);
- memcpy(ptr->v_samp_ratio, zr36050_decimation_v, 8);
-
- ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag
- * (what is the difference?) */
- ptr->mode = CODEC_DO_COMPRESSION;
- ptr->width = 384;
- ptr->height = 288;
- ptr->total_code_vol = 16000;
- ptr->max_block_vol = 240;
- ptr->scalefact = 0x100;
- ptr->dri = 1;
-
- /* no app/com marker by default */
- ptr->app.appn = 0;
- ptr->app.len = 0;
- ptr->com.len = 0;
-
- zr36050_init(ptr);
-
- dprintk(1, KERN_INFO "%s: codec attached and running\n",
- ptr->name);
-
- return 0;
-}
-
-static const struct videocodec zr36050_codec = {
- .owner = THIS_MODULE,
- .name = "zr36050",
- .magic = 0L, // magic not used
- .flags =
- CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER |
- CODEC_FLAG_DECODER,
- .type = CODEC_TYPE_ZR36050,
- .setup = zr36050_setup, // functionality
- .unset = zr36050_unset,
- .set_mode = zr36050_set_mode,
- .set_video = zr36050_set_video,
- .control = zr36050_control,
- // others are not used
-};
-
-/* =========================================================================
- HOOK IN DRIVER AS KERNEL MODULE
- ========================================================================= */
-
-static int __init
-zr36050_init_module (void)
-{
- //dprintk(1, "ZR36050 driver %s\n",ZR050_VERSION);
- zr36050_codecs = 0;
- return videocodec_register(&zr36050_codec);
-}
-
-static void __exit
-zr36050_cleanup_module (void)
-{
- if (zr36050_codecs) {
- dprintk(1,
- "zr36050: something's wrong - %d codecs left somehow.\n",
- zr36050_codecs);
- }
- videocodec_unregister(&zr36050_codec);
-}
-
-module_init(zr36050_init_module);
-module_exit(zr36050_cleanup_module);
-
-MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>");
-MODULE_DESCRIPTION("Driver module for ZR36050 jpeg processors "
- ZR050_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/zr36050.h b/drivers/staging/media/zoran/zr36050.h
deleted file mode 100644
index 9236486d3c2b..000000000000
--- a/drivers/staging/media/zoran/zr36050.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Zoran ZR36050 basic configuration functions - header file
- *
- * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at>
- *
- * $Id: zr36050.h,v 1.1.2.2 2003/01/14 21:18:22 rbultje Exp $
- *
- * ------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ------------------------------------------------------------------------
- */
-
-#ifndef ZR36050_H
-#define ZR36050_H
-
-#include "videocodec.h"
-
-/* data stored for each zoran jpeg codec chip */
-struct zr36050 {
- char name[32];
- int num;
- /* io datastructure */
- struct videocodec *codec;
- // last coder status
- __u8 status1;
- // actual coder setup
- int mode;
-
- __u16 width;
- __u16 height;
-
- __u16 bitrate_ctrl;
-
- __u32 total_code_vol;
- __u32 real_code_vol;
- __u16 max_block_vol;
-
- __u8 h_samp_ratio[8];
- __u8 v_samp_ratio[8];
- __u16 scalefact;
- __u16 dri;
-
- /* com/app marker */
- struct jpeg_com_marker com;
- struct jpeg_app_marker app;
-};
-
-/* zr36050 register addresses */
-#define ZR050_GO 0x000
-#define ZR050_HARDWARE 0x002
-#define ZR050_MODE 0x003
-#define ZR050_OPTIONS 0x004
-#define ZR050_MBCV 0x005
-#define ZR050_MARKERS_EN 0x006
-#define ZR050_INT_REQ_0 0x007
-#define ZR050_INT_REQ_1 0x008
-#define ZR050_TCV_NET_HI 0x009
-#define ZR050_TCV_NET_MH 0x00a
-#define ZR050_TCV_NET_ML 0x00b
-#define ZR050_TCV_NET_LO 0x00c
-#define ZR050_TCV_DATA_HI 0x00d
-#define ZR050_TCV_DATA_MH 0x00e
-#define ZR050_TCV_DATA_ML 0x00f
-#define ZR050_TCV_DATA_LO 0x010
-#define ZR050_SF_HI 0x011
-#define ZR050_SF_LO 0x012
-#define ZR050_AF_HI 0x013
-#define ZR050_AF_M 0x014
-#define ZR050_AF_LO 0x015
-#define ZR050_ACV_HI 0x016
-#define ZR050_ACV_MH 0x017
-#define ZR050_ACV_ML 0x018
-#define ZR050_ACV_LO 0x019
-#define ZR050_ACT_HI 0x01a
-#define ZR050_ACT_MH 0x01b
-#define ZR050_ACT_ML 0x01c
-#define ZR050_ACT_LO 0x01d
-#define ZR050_ACV_TRUN_HI 0x01e
-#define ZR050_ACV_TRUN_MH 0x01f
-#define ZR050_ACV_TRUN_ML 0x020
-#define ZR050_ACV_TRUN_LO 0x021
-#define ZR050_STATUS_0 0x02e
-#define ZR050_STATUS_1 0x02f
-
-#define ZR050_SOF_IDX 0x040
-#define ZR050_SOS1_IDX 0x07a
-#define ZR050_SOS2_IDX 0x08a
-#define ZR050_SOS3_IDX 0x09a
-#define ZR050_SOS4_IDX 0x0aa
-#define ZR050_DRI_IDX 0x0c0
-#define ZR050_DNL_IDX 0x0c6
-#define ZR050_DQT_IDX 0x0cc
-#define ZR050_DHT_IDX 0x1d4
-#define ZR050_APP_IDX 0x380
-#define ZR050_COM_IDX 0x3c0
-
-/* zr36050 hardware register bits */
-
-#define ZR050_HW_BSWD 0x80
-#define ZR050_HW_MSTR 0x40
-#define ZR050_HW_DMA 0x20
-#define ZR050_HW_CFIS_1_CLK 0x00
-#define ZR050_HW_CFIS_2_CLK 0x04
-#define ZR050_HW_CFIS_3_CLK 0x08
-#define ZR050_HW_CFIS_4_CLK 0x0C
-#define ZR050_HW_CFIS_5_CLK 0x10
-#define ZR050_HW_CFIS_6_CLK 0x14
-#define ZR050_HW_CFIS_7_CLK 0x18
-#define ZR050_HW_CFIS_8_CLK 0x1C
-#define ZR050_HW_BELE 0x01
-
-/* zr36050 mode register bits */
-
-#define ZR050_MO_COMP 0x80
-#define ZR050_MO_ATP 0x40
-#define ZR050_MO_PASS2 0x20
-#define ZR050_MO_TLM 0x10
-#define ZR050_MO_DCONLY 0x08
-#define ZR050_MO_BRC 0x04
-
-#define ZR050_MO_ATP 0x40
-#define ZR050_MO_PASS2 0x20
-#define ZR050_MO_TLM 0x10
-#define ZR050_MO_DCONLY 0x08
-
-/* zr36050 option register bits */
-
-#define ZR050_OP_NSCN_1 0x00
-#define ZR050_OP_NSCN_2 0x20
-#define ZR050_OP_NSCN_3 0x40
-#define ZR050_OP_NSCN_4 0x60
-#define ZR050_OP_NSCN_5 0x80
-#define ZR050_OP_NSCN_6 0xA0
-#define ZR050_OP_NSCN_7 0xC0
-#define ZR050_OP_NSCN_8 0xE0
-#define ZR050_OP_OVF 0x10
-
-
-/* zr36050 markers-enable register bits */
-
-#define ZR050_ME_APP 0x80
-#define ZR050_ME_COM 0x40
-#define ZR050_ME_DRI 0x20
-#define ZR050_ME_DQT 0x10
-#define ZR050_ME_DHT 0x08
-#define ZR050_ME_DNL 0x04
-#define ZR050_ME_DQTI 0x02
-#define ZR050_ME_DHTI 0x01
-
-/* zr36050 status0/1 register bit masks */
-
-#define ZR050_ST_RST_MASK 0x20
-#define ZR050_ST_SOF_MASK 0x02
-#define ZR050_ST_SOS_MASK 0x02
-#define ZR050_ST_DATRDY_MASK 0x80
-#define ZR050_ST_MRKDET_MASK 0x40
-#define ZR050_ST_RFM_MASK 0x10
-#define ZR050_ST_RFD_MASK 0x08
-#define ZR050_ST_END_MASK 0x04
-#define ZR050_ST_TCVOVF_MASK 0x02
-#define ZR050_ST_DATOVF_MASK 0x01
-
-/* pixel component idx */
-
-#define ZR050_Y_COMPONENT 0
-#define ZR050_U_COMPONENT 1
-#define ZR050_V_COMPONENT 2
-
-#endif /*fndef ZR36050_H */
diff --git a/drivers/staging/media/zoran/zr36057.h b/drivers/staging/media/zoran/zr36057.h
deleted file mode 100644
index c8acb21dcb5c..000000000000
--- a/drivers/staging/media/zoran/zr36057.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * zr36057.h - zr36057 register offsets
- *
- * Copyright (C) 1998 Dave Perks <dperks@ibm.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _ZR36057_H_
-#define _ZR36057_H_
-
-
-/* Zoran ZR36057 registers */
-
-#define ZR36057_VFEHCR 0x000 /* Video Front End, Horizontal Configuration Register */
-#define ZR36057_VFEHCR_HSPol (1<<30)
-#define ZR36057_VFEHCR_HStart 10
-#define ZR36057_VFEHCR_HEnd 0
-#define ZR36057_VFEHCR_Hmask 0x3ff
-
-#define ZR36057_VFEVCR 0x004 /* Video Front End, Vertical Configuration Register */
-#define ZR36057_VFEVCR_VSPol (1<<30)
-#define ZR36057_VFEVCR_VStart 10
-#define ZR36057_VFEVCR_VEnd 0
-#define ZR36057_VFEVCR_Vmask 0x3ff
-
-#define ZR36057_VFESPFR 0x008 /* Video Front End, Scaler and Pixel Format Register */
-#define ZR36057_VFESPFR_ExtFl (1<<26)
-#define ZR36057_VFESPFR_TopField (1<<25)
-#define ZR36057_VFESPFR_VCLKPol (1<<24)
-#define ZR36057_VFESPFR_HFilter 21
-#define ZR36057_VFESPFR_HorDcm 14
-#define ZR36057_VFESPFR_VerDcm 8
-#define ZR36057_VFESPFR_DispMode 6
-#define ZR36057_VFESPFR_YUV422 (0<<3)
-#define ZR36057_VFESPFR_RGB888 (1<<3)
-#define ZR36057_VFESPFR_RGB565 (2<<3)
-#define ZR36057_VFESPFR_RGB555 (3<<3)
-#define ZR36057_VFESPFR_ErrDif (1<<2)
-#define ZR36057_VFESPFR_Pack24 (1<<1)
-#define ZR36057_VFESPFR_LittleEndian (1<<0)
-
-#define ZR36057_VDTR 0x00c /* Video Display "Top" Register */
-
-#define ZR36057_VDBR 0x010 /* Video Display "Bottom" Register */
-
-#define ZR36057_VSSFGR 0x014 /* Video Stride, Status, and Frame Grab Register */
-#define ZR36057_VSSFGR_DispStride 16
-#define ZR36057_VSSFGR_VidOvf (1<<8)
-#define ZR36057_VSSFGR_SnapShot (1<<1)
-#define ZR36057_VSSFGR_FrameGrab (1<<0)
-
-#define ZR36057_VDCR 0x018 /* Video Display Configuration Register */
-#define ZR36057_VDCR_VidEn (1<<31)
-#define ZR36057_VDCR_MinPix 24
-#define ZR36057_VDCR_Triton (1<<24)
-#define ZR36057_VDCR_VidWinHt 12
-#define ZR36057_VDCR_VidWinWid 0
-
-#define ZR36057_MMTR 0x01c /* Masking Map "Top" Register */
-
-#define ZR36057_MMBR 0x020 /* Masking Map "Bottom" Register */
-
-#define ZR36057_OCR 0x024 /* Overlay Control Register */
-#define ZR36057_OCR_OvlEnable (1 << 15)
-#define ZR36057_OCR_MaskStride 0
-
-#define ZR36057_SPGPPCR 0x028 /* System, PCI, and General Purpose Pins Control Register */
-#define ZR36057_SPGPPCR_SoftReset (1<<24)
-
-#define ZR36057_GPPGCR1 0x02c /* General Purpose Pins and GuestBus Control Register (1) */
-
-#define ZR36057_MCSAR 0x030 /* MPEG Code Source Address Register */
-
-#define ZR36057_MCTCR 0x034 /* MPEG Code Transfer Control Register */
-#define ZR36057_MCTCR_CodTime (1 << 30)
-#define ZR36057_MCTCR_CEmpty (1 << 29)
-#define ZR36057_MCTCR_CFlush (1 << 28)
-#define ZR36057_MCTCR_CodGuestID 20
-#define ZR36057_MCTCR_CodGuestReg 16
-
-#define ZR36057_MCMPR 0x038 /* MPEG Code Memory Pointer Register */
-
-#define ZR36057_ISR 0x03c /* Interrupt Status Register */
-#define ZR36057_ISR_GIRQ1 (1<<30)
-#define ZR36057_ISR_GIRQ0 (1<<29)
-#define ZR36057_ISR_CodRepIRQ (1<<28)
-#define ZR36057_ISR_JPEGRepIRQ (1<<27)
-
-#define ZR36057_ICR 0x040 /* Interrupt Control Register */
-#define ZR36057_ICR_GIRQ1 (1<<30)
-#define ZR36057_ICR_GIRQ0 (1<<29)
-#define ZR36057_ICR_CodRepIRQ (1<<28)
-#define ZR36057_ICR_JPEGRepIRQ (1<<27)
-#define ZR36057_ICR_IntPinEn (1<<24)
-
-#define ZR36057_I2CBR 0x044 /* I2C Bus Register */
-#define ZR36057_I2CBR_SDA (1<<1)
-#define ZR36057_I2CBR_SCL (1<<0)
-
-#define ZR36057_JMC 0x100 /* JPEG Mode and Control */
-#define ZR36057_JMC_JPG (1 << 31)
-#define ZR36057_JMC_JPGExpMode (0 << 29)
-#define ZR36057_JMC_JPGCmpMode (1 << 29)
-#define ZR36057_JMC_MJPGExpMode (2 << 29)
-#define ZR36057_JMC_MJPGCmpMode (3 << 29)
-#define ZR36057_JMC_RTBUSY_FB (1 << 6)
-#define ZR36057_JMC_Go_en (1 << 5)
-#define ZR36057_JMC_SyncMstr (1 << 4)
-#define ZR36057_JMC_Fld_per_buff (1 << 3)
-#define ZR36057_JMC_VFIFO_FB (1 << 2)
-#define ZR36057_JMC_CFIFO_FB (1 << 1)
-#define ZR36057_JMC_Stll_LitEndian (1 << 0)
-
-#define ZR36057_JPC 0x104 /* JPEG Process Control */
-#define ZR36057_JPC_P_Reset (1 << 7)
-#define ZR36057_JPC_CodTrnsEn (1 << 5)
-#define ZR36057_JPC_Active (1 << 0)
-
-#define ZR36057_VSP 0x108 /* Vertical Sync Parameters */
-#define ZR36057_VSP_VsyncSize 16
-#define ZR36057_VSP_FrmTot 0
-
-#define ZR36057_HSP 0x10c /* Horizontal Sync Parameters */
-#define ZR36057_HSP_HsyncStart 16
-#define ZR36057_HSP_LineTot 0
-
-#define ZR36057_FHAP 0x110 /* Field Horizontal Active Portion */
-#define ZR36057_FHAP_NAX 16
-#define ZR36057_FHAP_PAX 0
-
-#define ZR36057_FVAP 0x114 /* Field Vertical Active Portion */
-#define ZR36057_FVAP_NAY 16
-#define ZR36057_FVAP_PAY 0
-
-#define ZR36057_FPP 0x118 /* Field Process Parameters */
-#define ZR36057_FPP_Odd_Even (1 << 0)
-
-#define ZR36057_JCBA 0x11c /* JPEG Code Base Address */
-
-#define ZR36057_JCFT 0x120 /* JPEG Code FIFO Threshold */
-
-#define ZR36057_JCGI 0x124 /* JPEG Codec Guest ID */
-#define ZR36057_JCGI_JPEGuestID 4
-#define ZR36057_JCGI_JPEGuestReg 0
-
-#define ZR36057_GCR2 0x12c /* GuestBus Control Register (2) */
-
-#define ZR36057_POR 0x200 /* Post Office Register */
-#define ZR36057_POR_POPen (1<<25)
-#define ZR36057_POR_POTime (1<<24)
-#define ZR36057_POR_PODir (1<<23)
-
-#define ZR36057_STR 0x300 /* "Still" Transfer Register */
-
-#endif
diff --git a/drivers/staging/media/zoran/zr36060.c b/drivers/staging/media/zoran/zr36060.c
deleted file mode 100644
index 2c2e8130fc96..000000000000
--- a/drivers/staging/media/zoran/zr36060.c
+++ /dev/null
@@ -1,1006 +0,0 @@
-/*
- * Zoran ZR36060 basic configuration functions
- *
- * Copyright (C) 2002 Laurent Pinchart <laurent.pinchart@skynet.be>
- *
- * $Id: zr36060.c,v 1.1.2.22 2003/05/06 09:35:36 rbultje Exp $
- *
- * ------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ------------------------------------------------------------------------
- */
-
-#define ZR060_VERSION "v0.7"
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-
-#include <linux/types.h>
-#include <linux/wait.h>
-
-/* I/O commands, error codes */
-#include <asm/io.h>
-
-/* headerfile of this module */
-#include "zr36060.h"
-
-/* codec io API */
-#include "videocodec.h"
-
-/* it doesn't make sense to have more than 20 or so,
- just to prevent some unwanted loops */
-#define MAX_CODECS 20
-
-/* amount of chips attached via this driver */
-static int zr36060_codecs;
-
-static bool low_bitrate;
-module_param(low_bitrate, bool, 0);
-MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate");
-
-/* debugging is available via module parameter */
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0-4)");
-
-#define dprintk(num, format, args...) \
- do { \
- if (debug >= num) \
- printk(format, ##args); \
- } while (0)
-
-/* =========================================================================
- Local hardware I/O functions:
-
- read/write via codec layer (registers are located in the master device)
- ========================================================================= */
-
-/* read and write functions */
-static u8
-zr36060_read (struct zr36060 *ptr,
- u16 reg)
-{
- u8 value = 0;
-
- // just in case something is wrong...
- if (ptr->codec->master_data->readreg)
- value = (ptr->codec->master_data->readreg(ptr->codec,
- reg)) & 0xff;
- else
- dprintk(1,
- KERN_ERR "%s: invalid I/O setup, nothing read!\n",
- ptr->name);
-
- //dprintk(4, "%s: reading from 0x%04x: %02x\n",ptr->name,reg,value);
-
- return value;
-}
-
-static void
-zr36060_write(struct zr36060 *ptr,
- u16 reg,
- u8 value)
-{
- //dprintk(4, "%s: writing 0x%02x to 0x%04x\n",ptr->name,value,reg);
- dprintk(4, "0x%02x @0x%04x\n", value, reg);
-
- // just in case something is wrong...
- if (ptr->codec->master_data->writereg)
- ptr->codec->master_data->writereg(ptr->codec, reg, value);
- else
- dprintk(1,
- KERN_ERR
- "%s: invalid I/O setup, nothing written!\n",
- ptr->name);
-}
-
-/* =========================================================================
- Local helper function:
-
- status read
- ========================================================================= */
-
-/* status is kept in datastructure */
-static u8
-zr36060_read_status (struct zr36060 *ptr)
-{
- ptr->status = zr36060_read(ptr, ZR060_CFSR);
-
- zr36060_read(ptr, 0);
- return ptr->status;
-}
-
-/* =========================================================================
- Local helper function:
-
- scale factor read
- ========================================================================= */
-
-/* scale factor is kept in datastructure */
-static u16
-zr36060_read_scalefactor (struct zr36060 *ptr)
-{
- ptr->scalefact = (zr36060_read(ptr, ZR060_SF_HI) << 8) |
- (zr36060_read(ptr, ZR060_SF_LO) & 0xFF);
-
- /* leave 0 selected for an eventually GO from master */
- zr36060_read(ptr, 0);
- return ptr->scalefact;
-}
-
-/* =========================================================================
- Local helper function:
-
- wait if codec is ready to proceed (end of processing) or time is over
- ========================================================================= */
-
-static void
-zr36060_wait_end (struct zr36060 *ptr)
-{
- int i = 0;
-
- while (zr36060_read_status(ptr) & ZR060_CFSR_Busy) {
- udelay(1);
- if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
- dprintk(1,
- "%s: timeout at wait_end (last status: 0x%02x)\n",
- ptr->name, ptr->status);
- break;
- }
- }
-}
-
-/* =========================================================================
- Local helper function:
-
- basic test of "connectivity", writes/reads to/from memory the SOF marker
- ========================================================================= */
-
-static int
-zr36060_basic_test (struct zr36060 *ptr)
-{
- if ((zr36060_read(ptr, ZR060_IDR_DEV) != 0x33) &&
- (zr36060_read(ptr, ZR060_IDR_REV) != 0x01)) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, can't connect to jpeg processor!\n",
- ptr->name);
- return -ENXIO;
- }
-
- zr36060_wait_end(ptr);
- if (ptr->status & ZR060_CFSR_Busy) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, jpeg processor failed (end flag)!\n",
- ptr->name);
- return -EBUSY;
- }
-
- return 0; /* looks good! */
-}
-
-/* =========================================================================
- Local helper function:
-
- simple loop for pushing the init datasets
- ========================================================================= */
-
-static int
-zr36060_pushit (struct zr36060 *ptr,
- u16 startreg,
- u16 len,
- const char *data)
-{
- int i = 0;
-
- dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name,
- startreg, len);
- while (i < len) {
- zr36060_write(ptr, startreg++, data[i++]);
- }
-
- return i;
-}
-
-/* =========================================================================
- Basic datasets:
-
- jpeg baseline setup data (you find it on lots places in internet, or just
- extract it from any regular .jpg image...)
-
- Could be variable, but until it's not needed it they are just fixed to save
- memory. Otherwise expand zr36060 structure with arrays, push the values to
- it and initialize from there, as e.g. the linux zr36057/60 driver does it.
- ========================================================================= */
-
-static const char zr36060_dqt[0x86] = {
- 0xff, 0xdb, //Marker: DQT
- 0x00, 0x84, //Length: 2*65+2
- 0x00, //Pq,Tq first table
- 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e,
- 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28,
- 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25,
- 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33,
- 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44,
- 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57,
- 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71,
- 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63,
- 0x01, //Pq,Tq second table
- 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a,
- 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
-};
-
-static const char zr36060_dht[0x1a4] = {
- 0xff, 0xc4, //Marker: DHT
- 0x01, 0xa2, //Length: 2*AC, 2*DC
- 0x00, //DC first table
- 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
- 0x01, //DC second table
- 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
- 0x10, //AC first table
- 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
- 0x05, 0x05, 0x04, 0x04, 0x00, 0x00,
- 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11,
- 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61,
- 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1,
- 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24,
- 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17,
- 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34,
- 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44,
- 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56,
- 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
- 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
- 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
- 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99,
- 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
- 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9,
- 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8,
- 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9,
- 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
- 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
- 0xF8, 0xF9, 0xFA,
- 0x11, //AC second table
- 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
- 0x07, 0x05, 0x04, 0x04, 0x00, 0x01,
- 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04,
- 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
- 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
- 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62,
- 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25,
- 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A,
- 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44,
- 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56,
- 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
- 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
- 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
- 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
- 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
- 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8,
- 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
- 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8,
- 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
- 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
- 0xF9, 0xFA
-};
-
-/* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */
-#define NO_OF_COMPONENTS 0x3 //Y,U,V
-#define BASELINE_PRECISION 0x8 //MCU size (?)
-static const char zr36060_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT
-static const char zr36060_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC
-static const char zr36060_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC
-
-/* horizontal 422 decimation setup (maybe we support 411 or so later, too) */
-static const char zr36060_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 };
-static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 };
-
-/* =========================================================================
- Local helper functions:
-
- calculation and setup of parameter-dependent JPEG baseline segments
- (needed for compression only)
- ========================================================================= */
-
-/* ------------------------------------------------------------------------- */
-
-/* SOF (start of frame) segment depends on width, height and sampling ratio
- of each color component */
-
-static int
-zr36060_set_sof (struct zr36060 *ptr)
-{
- char sof_data[34]; // max. size of register set
- int i;
-
- dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name,
- ptr->width, ptr->height, NO_OF_COMPONENTS);
- sof_data[0] = 0xff;
- sof_data[1] = 0xc0;
- sof_data[2] = 0x00;
- sof_data[3] = (3 * NO_OF_COMPONENTS) + 8;
- sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36060
- sof_data[5] = (ptr->height) >> 8;
- sof_data[6] = (ptr->height) & 0xff;
- sof_data[7] = (ptr->width) >> 8;
- sof_data[8] = (ptr->width) & 0xff;
- sof_data[9] = NO_OF_COMPONENTS;
- for (i = 0; i < NO_OF_COMPONENTS; i++) {
- sof_data[10 + (i * 3)] = i; // index identifier
- sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) |
- (ptr->v_samp_ratio[i]); // sampling ratios
- sof_data[12 + (i * 3)] = zr36060_tq[i]; // Q table selection
- }
- return zr36060_pushit(ptr, ZR060_SOF_IDX,
- (3 * NO_OF_COMPONENTS) + 10, sof_data);
-}
-
-/* ------------------------------------------------------------------------- */
-
-/* SOS (start of scan) segment depends on the used scan components
- of each color component */
-
-static int
-zr36060_set_sos (struct zr36060 *ptr)
-{
- char sos_data[16]; // max. size of register set
- int i;
-
- dprintk(3, "%s: write SOS\n", ptr->name);
- sos_data[0] = 0xff;
- sos_data[1] = 0xda;
- sos_data[2] = 0x00;
- sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3;
- sos_data[4] = NO_OF_COMPONENTS;
- for (i = 0; i < NO_OF_COMPONENTS; i++) {
- sos_data[5 + (i * 2)] = i; // index
- sos_data[6 + (i * 2)] = (zr36060_td[i] << 4) |
- zr36060_ta[i]; // AC/DC tbl.sel.
- }
- sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start
- sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3f;
- sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00;
- return zr36060_pushit(ptr, ZR060_SOS_IDX,
- 4 + 1 + (2 * NO_OF_COMPONENTS) + 3,
- sos_data);
-}
-
-/* ------------------------------------------------------------------------- */
-
-/* DRI (define restart interval) */
-
-static int
-zr36060_set_dri (struct zr36060 *ptr)
-{
- char dri_data[6]; // max. size of register set
-
- dprintk(3, "%s: write DRI\n", ptr->name);
- dri_data[0] = 0xff;
- dri_data[1] = 0xdd;
- dri_data[2] = 0x00;
- dri_data[3] = 0x04;
- dri_data[4] = (ptr->dri) >> 8;
- dri_data[5] = (ptr->dri) & 0xff;
- return zr36060_pushit(ptr, ZR060_DRI_IDX, 6, dri_data);
-}
-
-/* =========================================================================
- Setup function:
-
- Setup compression/decompression of Zoran's JPEG processor
- ( see also zoran 36060 manual )
-
- ... sorry for the spaghetti code ...
- ========================================================================= */
-static void
-zr36060_init (struct zr36060 *ptr)
-{
- int sum = 0;
- long bitcnt, tmp;
-
- if (ptr->mode == CODEC_DO_COMPRESSION) {
- dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name);
-
- zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst);
-
- /* 060 communicates with 067 in master mode */
- zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr);
-
- /* Compression with or without variable scale factor */
- /*FIXME: What about ptr->bitrate_ctrl? */
- zr36060_write(ptr, ZR060_CMR,
- ZR060_CMR_Comp | ZR060_CMR_Pass2 |
- ZR060_CMR_BRB);
-
- /* Must be zero */
- zr36060_write(ptr, ZR060_MBZ, 0x00);
- zr36060_write(ptr, ZR060_TCR_HI, 0x00);
- zr36060_write(ptr, ZR060_TCR_LO, 0x00);
-
- /* Disable all IRQs - no DataErr means autoreset */
- zr36060_write(ptr, ZR060_IMR, 0);
-
- /* volume control settings */
- zr36060_write(ptr, ZR060_SF_HI, ptr->scalefact >> 8);
- zr36060_write(ptr, ZR060_SF_LO, ptr->scalefact & 0xff);
-
- zr36060_write(ptr, ZR060_AF_HI, 0xff);
- zr36060_write(ptr, ZR060_AF_M, 0xff);
- zr36060_write(ptr, ZR060_AF_LO, 0xff);
-
- /* setup the variable jpeg tables */
- sum += zr36060_set_sof(ptr);
- sum += zr36060_set_sos(ptr);
- sum += zr36060_set_dri(ptr);
-
- /* setup the fixed jpeg tables - maybe variable, though -
- * (see table init section above) */
- sum +=
- zr36060_pushit(ptr, ZR060_DQT_IDX, sizeof(zr36060_dqt),
- zr36060_dqt);
- sum +=
- zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht),
- zr36060_dht);
- zr36060_write(ptr, ZR060_APP_IDX, 0xff);
- zr36060_write(ptr, ZR060_APP_IDX + 1, 0xe0 + ptr->app.appn);
- zr36060_write(ptr, ZR060_APP_IDX + 2, 0x00);
- zr36060_write(ptr, ZR060_APP_IDX + 3, ptr->app.len + 2);
- sum += zr36060_pushit(ptr, ZR060_APP_IDX + 4, 60,
- ptr->app.data) + 4;
- zr36060_write(ptr, ZR060_COM_IDX, 0xff);
- zr36060_write(ptr, ZR060_COM_IDX + 1, 0xfe);
- zr36060_write(ptr, ZR060_COM_IDX + 2, 0x00);
- zr36060_write(ptr, ZR060_COM_IDX + 3, ptr->com.len + 2);
- sum += zr36060_pushit(ptr, ZR060_COM_IDX + 4, 60,
- ptr->com.data) + 4;
-
- /* setup misc. data for compression (target code sizes) */
-
- /* size of compressed code to reach without header data */
- sum = ptr->real_code_vol - sum;
- bitcnt = sum << 3; /* need the size in bits */
-
- tmp = bitcnt >> 16;
- dprintk(3,
- "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n",
- ptr->name, sum, ptr->real_code_vol, bitcnt, tmp);
- zr36060_write(ptr, ZR060_TCV_NET_HI, tmp >> 8);
- zr36060_write(ptr, ZR060_TCV_NET_MH, tmp & 0xff);
- tmp = bitcnt & 0xffff;
- zr36060_write(ptr, ZR060_TCV_NET_ML, tmp >> 8);
- zr36060_write(ptr, ZR060_TCV_NET_LO, tmp & 0xff);
-
- bitcnt -= bitcnt >> 7; // bits without stuffing
- bitcnt -= ((bitcnt * 5) >> 6); // bits without eob
-
- tmp = bitcnt >> 16;
- dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n",
- ptr->name, bitcnt, tmp);
- zr36060_write(ptr, ZR060_TCV_DATA_HI, tmp >> 8);
- zr36060_write(ptr, ZR060_TCV_DATA_MH, tmp & 0xff);
- tmp = bitcnt & 0xffff;
- zr36060_write(ptr, ZR060_TCV_DATA_ML, tmp >> 8);
- zr36060_write(ptr, ZR060_TCV_DATA_LO, tmp & 0xff);
-
- /* JPEG markers to be included in the compressed stream */
- zr36060_write(ptr, ZR060_MER,
- ZR060_MER_DQT | ZR060_MER_DHT |
- ((ptr->com.len > 0) ? ZR060_MER_Com : 0) |
- ((ptr->app.len > 0) ? ZR060_MER_App : 0));
-
- /* Setup the Video Frontend */
- /* Limit pixel range to 16..235 as per CCIR-601 */
- zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range);
-
- } else {
- dprintk(2, "%s: EXPANSION SETUP\n", ptr->name);
-
- zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst);
-
- /* 060 communicates with 067 in master mode */
- zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr);
-
- /* Decompression */
- zr36060_write(ptr, ZR060_CMR, 0);
-
- /* Must be zero */
- zr36060_write(ptr, ZR060_MBZ, 0x00);
- zr36060_write(ptr, ZR060_TCR_HI, 0x00);
- zr36060_write(ptr, ZR060_TCR_LO, 0x00);
-
- /* Disable all IRQs - no DataErr means autoreset */
- zr36060_write(ptr, ZR060_IMR, 0);
-
- /* setup misc. data for expansion */
- zr36060_write(ptr, ZR060_MER, 0);
-
- /* setup the fixed jpeg tables - maybe variable, though -
- * (see table init section above) */
- zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht),
- zr36060_dht);
-
- /* Setup the Video Frontend */
- //zr36060_write(ptr, ZR060_VCR, ZR060_VCR_FIExt);
- //this doesn't seem right and doesn't work...
- zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range);
- }
-
- /* Load the tables */
- zr36060_write(ptr, ZR060_LOAD,
- ZR060_LOAD_SyncRst | ZR060_LOAD_Load);
- zr36060_wait_end(ptr);
- dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name,
- ptr->status);
-
- if (ptr->status & ZR060_CFSR_Busy) {
- dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name);
- return; // something is wrong, its timed out!!!!
- }
-}
-
-/* =========================================================================
- CODEC API FUNCTIONS
-
- this functions are accessed by the master via the API structure
- ========================================================================= */
-
-/* set compression/expansion mode and launches codec -
- this should be the last call from the master before starting processing */
-static int
-zr36060_set_mode (struct videocodec *codec,
- int mode)
-{
- struct zr36060 *ptr = (struct zr36060 *) codec->data;
-
- dprintk(2, "%s: set_mode %d call\n", ptr->name, mode);
-
- if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION))
- return -EINVAL;
-
- ptr->mode = mode;
- zr36060_init(ptr);
-
- return 0;
-}
-
-/* set picture size (norm is ignored as the codec doesn't know about it) */
-static int
-zr36060_set_video (struct videocodec *codec,
- struct tvnorm *norm,
- struct vfe_settings *cap,
- struct vfe_polarity *pol)
-{
- struct zr36060 *ptr = (struct zr36060 *) codec->data;
- u32 reg;
- int size;
-
- dprintk(2, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name,
- cap->x, cap->y, cap->width, cap->height, cap->decimation);
-
- /* if () return -EINVAL;
- * trust the master driver that it knows what it does - so
- * we allow invalid startx/y and norm for now ... */
- ptr->width = cap->width / (cap->decimation & 0xff);
- ptr->height = cap->height / (cap->decimation >> 8);
-
- zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst);
-
- /* Note that VSPol/HSPol bits in zr36060 have the opposite
- * meaning of their zr360x7 counterparts with the same names
- * N.b. for VSPol this is only true if FIVEdge = 0 (default,
- * left unchanged here - in accordance with datasheet).
- */
- reg = (!pol->vsync_pol ? ZR060_VPR_VSPol : 0)
- | (!pol->hsync_pol ? ZR060_VPR_HSPol : 0)
- | (pol->field_pol ? ZR060_VPR_FIPol : 0)
- | (pol->blank_pol ? ZR060_VPR_BLPol : 0)
- | (pol->subimg_pol ? ZR060_VPR_SImgPol : 0)
- | (pol->poe_pol ? ZR060_VPR_PoePol : 0)
- | (pol->pvalid_pol ? ZR060_VPR_PValPol : 0)
- | (pol->vclk_pol ? ZR060_VPR_VCLKPol : 0);
- zr36060_write(ptr, ZR060_VPR, reg);
-
- reg = 0;
- switch (cap->decimation & 0xff) {
- default:
- case 1:
- break;
-
- case 2:
- reg |= ZR060_SR_HScale2;
- break;
-
- case 4:
- reg |= ZR060_SR_HScale4;
- break;
- }
-
- switch (cap->decimation >> 8) {
- default:
- case 1:
- break;
-
- case 2:
- reg |= ZR060_SR_VScale;
- break;
- }
- zr36060_write(ptr, ZR060_SR, reg);
-
- zr36060_write(ptr, ZR060_BCR_Y, 0x00);
- zr36060_write(ptr, ZR060_BCR_U, 0x80);
- zr36060_write(ptr, ZR060_BCR_V, 0x80);
-
- /* sync generator */
-
- reg = norm->Ht - 1; /* Vtotal */
- zr36060_write(ptr, ZR060_SGR_VTOTAL_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_SGR_VTOTAL_LO, (reg >> 0) & 0xff);
-
- reg = norm->Wt - 1; /* Htotal */
- zr36060_write(ptr, ZR060_SGR_HTOTAL_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_SGR_HTOTAL_LO, (reg >> 0) & 0xff);
-
- reg = 6 - 1; /* VsyncSize */
- zr36060_write(ptr, ZR060_SGR_VSYNC, reg);
-
- //reg = 30 - 1; /* HsyncSize */
-///*CP*/ reg = (zr->params.norm == 1 ? 57 : 68);
- reg = 68;
- zr36060_write(ptr, ZR060_SGR_HSYNC, reg);
-
- reg = norm->VStart - 1; /* BVstart */
- zr36060_write(ptr, ZR060_SGR_BVSTART, reg);
-
- reg += norm->Ha / 2; /* BVend */
- zr36060_write(ptr, ZR060_SGR_BVEND_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_SGR_BVEND_LO, (reg >> 0) & 0xff);
-
- reg = norm->HStart - 1; /* BHstart */
- zr36060_write(ptr, ZR060_SGR_BHSTART, reg);
-
- reg += norm->Wa; /* BHend */
- zr36060_write(ptr, ZR060_SGR_BHEND_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_SGR_BHEND_LO, (reg >> 0) & 0xff);
-
- /* active area */
- reg = cap->y + norm->VStart; /* Vstart */
- zr36060_write(ptr, ZR060_AAR_VSTART_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_AAR_VSTART_LO, (reg >> 0) & 0xff);
-
- reg += cap->height; /* Vend */
- zr36060_write(ptr, ZR060_AAR_VEND_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_AAR_VEND_LO, (reg >> 0) & 0xff);
-
- reg = cap->x + norm->HStart; /* Hstart */
- zr36060_write(ptr, ZR060_AAR_HSTART_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_AAR_HSTART_LO, (reg >> 0) & 0xff);
-
- reg += cap->width; /* Hend */
- zr36060_write(ptr, ZR060_AAR_HEND_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_AAR_HEND_LO, (reg >> 0) & 0xff);
-
- /* subimage area */
- reg = norm->VStart - 4; /* SVstart */
- zr36060_write(ptr, ZR060_SWR_VSTART_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_SWR_VSTART_LO, (reg >> 0) & 0xff);
-
- reg += norm->Ha / 2 + 8; /* SVend */
- zr36060_write(ptr, ZR060_SWR_VEND_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_SWR_VEND_LO, (reg >> 0) & 0xff);
-
- reg = norm->HStart /*+ 64 */ - 4; /* SHstart */
- zr36060_write(ptr, ZR060_SWR_HSTART_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_SWR_HSTART_LO, (reg >> 0) & 0xff);
-
- reg += norm->Wa + 8; /* SHend */
- zr36060_write(ptr, ZR060_SWR_HEND_HI, (reg >> 8) & 0xff);
- zr36060_write(ptr, ZR060_SWR_HEND_LO, (reg >> 0) & 0xff);
-
- size = ptr->width * ptr->height;
- /* Target compressed field size in bits: */
- size = size * 16; /* uncompressed size in bits */
- /* (Ronald) by default, quality = 100 is a compression
- * ratio 1:2. Setting low_bitrate (insmod option) sets
- * it to 1:4 (instead of 1:2, zr36060 max) as limit because the
- * buz can't handle more at decimation=1... Use low_bitrate if
- * you have a Buz, unless you know what you're doing */
- size = size * cap->quality / (low_bitrate ? 400 : 200);
- /* Lower limit (arbitrary, 1 KB) */
- if (size < 8192)
- size = 8192;
- /* Upper limit: 7/8 of the code buffers */
- if (size > ptr->total_code_vol * 7)
- size = ptr->total_code_vol * 7;
-
- ptr->real_code_vol = size >> 3; /* in bytes */
-
- /* the MBCVR is the *maximum* block volume, according to the
- * JPEG ISO specs, this shouldn't be used, since that allows
- * for the best encoding quality. So set it to it's max value */
- reg = ptr->max_block_vol;
- zr36060_write(ptr, ZR060_MBCVR, reg);
-
- return 0;
-}
-
-/* additional control functions */
-static int
-zr36060_control (struct videocodec *codec,
- int type,
- int size,
- void *data)
-{
- struct zr36060 *ptr = (struct zr36060 *) codec->data;
- int *ival = (int *) data;
-
- dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type,
- size);
-
- switch (type) {
- case CODEC_G_STATUS: /* get last status */
- if (size != sizeof(int))
- return -EFAULT;
- zr36060_read_status(ptr);
- *ival = ptr->status;
- break;
-
- case CODEC_G_CODEC_MODE:
- if (size != sizeof(int))
- return -EFAULT;
- *ival = CODEC_MODE_BJPG;
- break;
-
- case CODEC_S_CODEC_MODE:
- if (size != sizeof(int))
- return -EFAULT;
- if (*ival != CODEC_MODE_BJPG)
- return -EINVAL;
- /* not needed, do nothing */
- return 0;
-
- case CODEC_G_VFE:
- case CODEC_S_VFE:
- /* not needed, do nothing */
- return 0;
-
- case CODEC_S_MMAP:
- /* not available, give an error */
- return -ENXIO;
-
- case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */
- if (size != sizeof(int))
- return -EFAULT;
- *ival = ptr->total_code_vol;
- break;
-
- case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */
- if (size != sizeof(int))
- return -EFAULT;
- ptr->total_code_vol = *ival;
- ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3;
- break;
-
- case CODEC_G_JPEG_SCALE: /* get scaling factor */
- if (size != sizeof(int))
- return -EFAULT;
- *ival = zr36060_read_scalefactor(ptr);
- break;
-
- case CODEC_S_JPEG_SCALE: /* set scaling factor */
- if (size != sizeof(int))
- return -EFAULT;
- ptr->scalefact = *ival;
- break;
-
- case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */
- struct jpeg_app_marker *app = data;
-
- if (size != sizeof(struct jpeg_app_marker))
- return -EFAULT;
-
- *app = ptr->app;
- break;
- }
-
- case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */
- struct jpeg_app_marker *app = data;
-
- if (size != sizeof(struct jpeg_app_marker))
- return -EFAULT;
-
- ptr->app = *app;
- break;
- }
-
- case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */
- struct jpeg_com_marker *com = data;
-
- if (size != sizeof(struct jpeg_com_marker))
- return -EFAULT;
-
- *com = ptr->com;
- break;
- }
-
- case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */
- struct jpeg_com_marker *com = data;
-
- if (size != sizeof(struct jpeg_com_marker))
- return -EFAULT;
-
- ptr->com = *com;
- break;
- }
-
- default:
- return -EINVAL;
- }
-
- return size;
-}
-
-/* =========================================================================
- Exit and unregister function:
-
- Deinitializes Zoran's JPEG processor
- ========================================================================= */
-
-static int
-zr36060_unset (struct videocodec *codec)
-{
- struct zr36060 *ptr = codec->data;
-
- if (ptr) {
- /* do wee need some codec deinit here, too ???? */
-
- dprintk(1, "%s: finished codec #%d\n", ptr->name,
- ptr->num);
- kfree(ptr);
- codec->data = NULL;
-
- zr36060_codecs--;
- return 0;
- }
-
- return -EFAULT;
-}
-
-/* =========================================================================
- Setup and registry function:
-
- Initializes Zoran's JPEG processor
-
- Also sets pixel size, average code size, mode (compr./decompr.)
- (the given size is determined by the processor with the video interface)
- ========================================================================= */
-
-static int
-zr36060_setup (struct videocodec *codec)
-{
- struct zr36060 *ptr;
- int res;
-
- dprintk(2, "zr36060: initializing MJPEG subsystem #%d.\n",
- zr36060_codecs);
-
- if (zr36060_codecs == MAX_CODECS) {
- dprintk(1,
- KERN_ERR "zr36060: Can't attach more codecs!\n");
- return -ENOSPC;
- }
- //mem structure init
- codec->data = ptr = kzalloc(sizeof(struct zr36060), GFP_KERNEL);
- if (NULL == ptr) {
- dprintk(1, KERN_ERR "zr36060: Can't get enough memory!\n");
- return -ENOMEM;
- }
-
- snprintf(ptr->name, sizeof(ptr->name), "zr36060[%d]",
- zr36060_codecs);
- ptr->num = zr36060_codecs++;
- ptr->codec = codec;
-
- //testing
- res = zr36060_basic_test(ptr);
- if (res < 0) {
- zr36060_unset(codec);
- return res;
- }
- //final setup
- memcpy(ptr->h_samp_ratio, zr36060_decimation_h, 8);
- memcpy(ptr->v_samp_ratio, zr36060_decimation_v, 8);
-
- ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag
- * (what is the difference?) */
- ptr->mode = CODEC_DO_COMPRESSION;
- ptr->width = 384;
- ptr->height = 288;
- ptr->total_code_vol = 16000; /* CHECKME */
- ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3;
- ptr->max_block_vol = 240; /* CHECKME, was 120 is 240 */
- ptr->scalefact = 0x100;
- ptr->dri = 1; /* CHECKME, was 8 is 1 */
-
- /* by default, no COM or APP markers - app should set those */
- ptr->com.len = 0;
- ptr->app.appn = 0;
- ptr->app.len = 0;
-
- zr36060_init(ptr);
-
- dprintk(1, KERN_INFO "%s: codec attached and running\n",
- ptr->name);
-
- return 0;
-}
-
-static const struct videocodec zr36060_codec = {
- .owner = THIS_MODULE,
- .name = "zr36060",
- .magic = 0L, // magic not used
- .flags =
- CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER |
- CODEC_FLAG_DECODER | CODEC_FLAG_VFE,
- .type = CODEC_TYPE_ZR36060,
- .setup = zr36060_setup, // functionality
- .unset = zr36060_unset,
- .set_mode = zr36060_set_mode,
- .set_video = zr36060_set_video,
- .control = zr36060_control,
- // others are not used
-};
-
-/* =========================================================================
- HOOK IN DRIVER AS KERNEL MODULE
- ========================================================================= */
-
-static int __init
-zr36060_init_module (void)
-{
- //dprintk(1, "zr36060 driver %s\n",ZR060_VERSION);
- zr36060_codecs = 0;
- return videocodec_register(&zr36060_codec);
-}
-
-static void __exit
-zr36060_cleanup_module (void)
-{
- if (zr36060_codecs) {
- dprintk(1,
- "zr36060: something's wrong - %d codecs left somehow.\n",
- zr36060_codecs);
- }
-
- /* however, we can't just stay alive */
- videocodec_unregister(&zr36060_codec);
-}
-
-module_init(zr36060_init_module);
-module_exit(zr36060_cleanup_module);
-
-MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@skynet.be>");
-MODULE_DESCRIPTION("Driver module for ZR36060 jpeg processors "
- ZR060_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/zr36060.h b/drivers/staging/media/zoran/zr36060.h
deleted file mode 100644
index 82911757ba78..000000000000
--- a/drivers/staging/media/zoran/zr36060.h
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Zoran ZR36060 basic configuration functions - header file
- *
- * Copyright (C) 2002 Laurent Pinchart <laurent.pinchart@skynet.be>
- *
- * $Id: zr36060.h,v 1.1.1.1.2.3 2003/01/14 21:18:47 rbultje Exp $
- *
- * ------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ------------------------------------------------------------------------
- */
-
-#ifndef ZR36060_H
-#define ZR36060_H
-
-#include "videocodec.h"
-
-/* data stored for each zoran jpeg codec chip */
-struct zr36060 {
- char name[32];
- int num;
- /* io datastructure */
- struct videocodec *codec;
- // last coder status
- __u8 status;
- // actual coder setup
- int mode;
-
- __u16 width;
- __u16 height;
-
- __u16 bitrate_ctrl;
-
- __u32 total_code_vol;
- __u32 real_code_vol;
- __u16 max_block_vol;
-
- __u8 h_samp_ratio[8];
- __u8 v_samp_ratio[8];
- __u16 scalefact;
- __u16 dri;
-
- /* app/com marker data */
- struct jpeg_app_marker app;
- struct jpeg_com_marker com;
-};
-
-/* ZR36060 register addresses */
-#define ZR060_LOAD 0x000
-#define ZR060_CFSR 0x001
-#define ZR060_CIR 0x002
-#define ZR060_CMR 0x003
-#define ZR060_MBZ 0x004
-#define ZR060_MBCVR 0x005
-#define ZR060_MER 0x006
-#define ZR060_IMR 0x007
-#define ZR060_ISR 0x008
-#define ZR060_TCV_NET_HI 0x009
-#define ZR060_TCV_NET_MH 0x00a
-#define ZR060_TCV_NET_ML 0x00b
-#define ZR060_TCV_NET_LO 0x00c
-#define ZR060_TCV_DATA_HI 0x00d
-#define ZR060_TCV_DATA_MH 0x00e
-#define ZR060_TCV_DATA_ML 0x00f
-#define ZR060_TCV_DATA_LO 0x010
-#define ZR060_SF_HI 0x011
-#define ZR060_SF_LO 0x012
-#define ZR060_AF_HI 0x013
-#define ZR060_AF_M 0x014
-#define ZR060_AF_LO 0x015
-#define ZR060_ACV_HI 0x016
-#define ZR060_ACV_MH 0x017
-#define ZR060_ACV_ML 0x018
-#define ZR060_ACV_LO 0x019
-#define ZR060_ACT_HI 0x01a
-#define ZR060_ACT_MH 0x01b
-#define ZR060_ACT_ML 0x01c
-#define ZR060_ACT_LO 0x01d
-#define ZR060_ACV_TRUN_HI 0x01e
-#define ZR060_ACV_TRUN_MH 0x01f
-#define ZR060_ACV_TRUN_ML 0x020
-#define ZR060_ACV_TRUN_LO 0x021
-#define ZR060_IDR_DEV 0x022
-#define ZR060_IDR_REV 0x023
-#define ZR060_TCR_HI 0x024
-#define ZR060_TCR_LO 0x025
-#define ZR060_VCR 0x030
-#define ZR060_VPR 0x031
-#define ZR060_SR 0x032
-#define ZR060_BCR_Y 0x033
-#define ZR060_BCR_U 0x034
-#define ZR060_BCR_V 0x035
-#define ZR060_SGR_VTOTAL_HI 0x036
-#define ZR060_SGR_VTOTAL_LO 0x037
-#define ZR060_SGR_HTOTAL_HI 0x038
-#define ZR060_SGR_HTOTAL_LO 0x039
-#define ZR060_SGR_VSYNC 0x03a
-#define ZR060_SGR_HSYNC 0x03b
-#define ZR060_SGR_BVSTART 0x03c
-#define ZR060_SGR_BHSTART 0x03d
-#define ZR060_SGR_BVEND_HI 0x03e
-#define ZR060_SGR_BVEND_LO 0x03f
-#define ZR060_SGR_BHEND_HI 0x040
-#define ZR060_SGR_BHEND_LO 0x041
-#define ZR060_AAR_VSTART_HI 0x042
-#define ZR060_AAR_VSTART_LO 0x043
-#define ZR060_AAR_VEND_HI 0x044
-#define ZR060_AAR_VEND_LO 0x045
-#define ZR060_AAR_HSTART_HI 0x046
-#define ZR060_AAR_HSTART_LO 0x047
-#define ZR060_AAR_HEND_HI 0x048
-#define ZR060_AAR_HEND_LO 0x049
-#define ZR060_SWR_VSTART_HI 0x04a
-#define ZR060_SWR_VSTART_LO 0x04b
-#define ZR060_SWR_VEND_HI 0x04c
-#define ZR060_SWR_VEND_LO 0x04d
-#define ZR060_SWR_HSTART_HI 0x04e
-#define ZR060_SWR_HSTART_LO 0x04f
-#define ZR060_SWR_HEND_HI 0x050
-#define ZR060_SWR_HEND_LO 0x051
-
-#define ZR060_SOF_IDX 0x060
-#define ZR060_SOS_IDX 0x07a
-#define ZR060_DRI_IDX 0x0c0
-#define ZR060_DQT_IDX 0x0cc
-#define ZR060_DHT_IDX 0x1d4
-#define ZR060_APP_IDX 0x380
-#define ZR060_COM_IDX 0x3c0
-
-/* ZR36060 LOAD register bits */
-
-#define ZR060_LOAD_Load (1 << 7)
-#define ZR060_LOAD_SyncRst (1 << 0)
-
-/* ZR36060 Code FIFO Status register bits */
-
-#define ZR060_CFSR_Busy (1 << 7)
-#define ZR060_CFSR_CBusy (1 << 2)
-#define ZR060_CFSR_CFIFO (3 << 0)
-
-/* ZR36060 Code Interface register */
-
-#define ZR060_CIR_Code16 (1 << 7)
-#define ZR060_CIR_Endian (1 << 6)
-#define ZR060_CIR_CFIS (1 << 2)
-#define ZR060_CIR_CodeMstr (1 << 0)
-
-/* ZR36060 Codec Mode register */
-
-#define ZR060_CMR_Comp (1 << 7)
-#define ZR060_CMR_ATP (1 << 6)
-#define ZR060_CMR_Pass2 (1 << 5)
-#define ZR060_CMR_TLM (1 << 4)
-#define ZR060_CMR_BRB (1 << 2)
-#define ZR060_CMR_FSF (1 << 1)
-
-/* ZR36060 Markers Enable register */
-
-#define ZR060_MER_App (1 << 7)
-#define ZR060_MER_Com (1 << 6)
-#define ZR060_MER_DRI (1 << 5)
-#define ZR060_MER_DQT (1 << 4)
-#define ZR060_MER_DHT (1 << 3)
-
-/* ZR36060 Interrupt Mask register */
-
-#define ZR060_IMR_EOAV (1 << 3)
-#define ZR060_IMR_EOI (1 << 2)
-#define ZR060_IMR_End (1 << 1)
-#define ZR060_IMR_DataErr (1 << 0)
-
-/* ZR36060 Interrupt Status register */
-
-#define ZR060_ISR_ProCnt (3 << 6)
-#define ZR060_ISR_EOAV (1 << 3)
-#define ZR060_ISR_EOI (1 << 2)
-#define ZR060_ISR_End (1 << 1)
-#define ZR060_ISR_DataErr (1 << 0)
-
-/* ZR36060 Video Control register */
-
-#define ZR060_VCR_Video8 (1 << 7)
-#define ZR060_VCR_Range (1 << 6)
-#define ZR060_VCR_FIDet (1 << 3)
-#define ZR060_VCR_FIVedge (1 << 2)
-#define ZR060_VCR_FIExt (1 << 1)
-#define ZR060_VCR_SyncMstr (1 << 0)
-
-/* ZR36060 Video Polarity register */
-
-#define ZR060_VPR_VCLKPol (1 << 7)
-#define ZR060_VPR_PValPol (1 << 6)
-#define ZR060_VPR_PoePol (1 << 5)
-#define ZR060_VPR_SImgPol (1 << 4)
-#define ZR060_VPR_BLPol (1 << 3)
-#define ZR060_VPR_FIPol (1 << 2)
-#define ZR060_VPR_HSPol (1 << 1)
-#define ZR060_VPR_VSPol (1 << 0)
-
-/* ZR36060 Scaling register */
-
-#define ZR060_SR_VScale (1 << 2)
-#define ZR060_SR_HScale2 (1 << 0)
-#define ZR060_SR_HScale4 (2 << 0)
-
-#endif /*fndef ZR36060_H */
diff --git a/drivers/staging/most/Documentation/ABI/configfs-most.txt b/drivers/staging/most/Documentation/ABI/configfs-most.txt
new file mode 100644
index 000000000000..25b3e18c4d91
--- /dev/null
+++ b/drivers/staging/most/Documentation/ABI/configfs-most.txt
@@ -0,0 +1,204 @@
+What: /sys/kernel/config/most_<component>
+Date: March 8, 2019
+KernelVersion: 5.2
+Description: Interface is used to configure and connect device channels
+ to component drivers.
+
+ Attributes are visible only when configfs is mounted. To mount
+ configfs in /sys/kernel/config directory use:
+ # mount -t configfs none /sys/kernel/config/
+
+
+What: /sys/kernel/config/most_cdev/<link>
+Date: March 8, 2019
+KernelVersion: 5.2
+Description:
+ The attributes:
+
+ buffer_size configure the buffer size for this channel
+
+ subbuffer_size configure the sub-buffer size for this channel
+ (needed for synchronous and isochrnous data)
+
+
+ num_buffers configure number of buffers used for this
+ channel
+
+ datatype configure type of data that will travel over
+ this channel
+
+ direction configure whether this link will be an input
+ or output
+
+ dbr_size configure DBR data buffer size (this is used
+ for MediaLB communiction only)
+
+ packets_per_xact
+ configure the number of packets that will be
+ collected from the network before being
+ transmitted via USB (this is used for USB
+ communiction only)
+
+ device name of the device the link is to be attached to
+
+ channel name of the channel the link is to be attached to
+
+ comp_params pass parameters needed by some components
+
+ create_link write '1' to this attribute to trigger the
+ creation of the link. In case of speculative
+ configuration, the creation is post-poned until
+ a physical device is being attached to the bus.
+
+ destroy_link write '1' to this attribute to destroy an
+ active link
+
+What: /sys/kernel/config/most_video/<link>
+Date: March 8, 2019
+KernelVersion: 5.2
+Description:
+ The attributes:
+
+ buffer_size configure the buffer size for this channel
+
+ subbuffer_size configure the sub-buffer size for this channel
+ (needed for synchronous and isochrnous data)
+
+
+ num_buffers configure number of buffers used for this
+ channel
+
+ datatype configure type of data that will travel over
+ this channel
+
+ direction configure whether this link will be an input
+ or output
+
+ dbr_size configure DBR data buffer size (this is used
+ for MediaLB communiction only)
+
+ packets_per_xact
+ configure the number of packets that will be
+ collected from the network before being
+ transmitted via USB (this is used for USB
+ communiction only)
+
+ device name of the device the link is to be attached to
+
+ channel name of the channel the link is to be attached to
+
+ comp_params pass parameters needed by some components
+
+ create_link write '1' to this attribute to trigger the
+ creation of the link. In case of speculative
+ configuration, the creation is post-poned until
+ a physical device is being attached to the bus.
+
+ destroy_link write '1' to this attribute to destroy an
+ active link
+
+What: /sys/kernel/config/most_net/<link>
+Date: March 8, 2019
+KernelVersion: 5.2
+Description:
+ The attributes:
+
+ buffer_size configure the buffer size for this channel
+
+ subbuffer_size configure the sub-buffer size for this channel
+ (needed for synchronous and isochrnous data)
+
+
+ num_buffers configure number of buffers used for this
+ channel
+
+ datatype configure type of data that will travel over
+ this channel
+
+ direction configure whether this link will be an input
+ or output
+
+ dbr_size configure DBR data buffer size (this is used
+ for MediaLB communiction only)
+
+ packets_per_xact
+ configure the number of packets that will be
+ collected from the network before being
+ transmitted via USB (this is used for USB
+ communiction only)
+
+ device name of the device the link is to be attached to
+
+ channel name of the channel the link is to be attached to
+
+ comp_params pass parameters needed by some components
+
+ create_link write '1' to this attribute to trigger the
+ creation of the link. In case of speculative
+ configuration, the creation is post-poned until
+ a physical device is being attached to the bus.
+
+ destroy_link write '1' to this attribute to destroy an
+ active link
+
+What: /sys/kernel/config/most_sound/<card>
+Date: March 8, 2019
+KernelVersion: 5.2
+Description:
+ The attributes:
+
+ create_card write '1' to this attribute to trigger the
+ registration of the sound card with the ALSA
+ subsystem.
+
+What: /sys/kernel/config/most_sound/<card>/<link>
+Date: March 8, 2019
+KernelVersion: 5.2
+Description:
+ The attributes:
+
+ buffer_size configure the buffer size for this channel
+
+ subbuffer_size configure the sub-buffer size for this channel
+ (needed for synchronous and isochrnous data)
+
+
+ num_buffers configure number of buffers used for this
+ channel
+
+ datatype configure type of data that will travel over
+ this channel
+
+ direction configure whether this link will be an input
+ or output
+
+ dbr_size configure DBR data buffer size (this is used
+ for MediaLB communiction only)
+
+ packets_per_xact
+ configure the number of packets that will be
+ collected from the network before being
+ transmitted via USB (this is used for USB
+ communiction only)
+
+ device name of the device the link is to be attached to
+
+ channel name of the channel the link is to be attached to
+
+ comp_params pass parameters needed by some components
+
+ create_link write '1' to this attribute to trigger the
+ creation of the link. In case of speculative
+ configuration, the creation is post-poned until
+ a physical device is being attached to the bus.
+
+ destroy_link write '1' to this attribute to destroy an
+ active link
+
+What: /sys/kernel/config/rdma_cm/<hca>/ports/<port-num>/default_roce_tos
+Date: March 8, 2019
+KernelVersion: 5.2
+Description: RDMA-CM QPs from HCA <hca> at port <port-num>
+ will be created with this TOS as default.
+ This can be overridden by using the rdma_set_option API.
+ The possible RoCE TOS values are 0-255.
diff --git a/drivers/staging/most/Documentation/driver_usage.txt b/drivers/staging/most/Documentation/driver_usage.txt
index da7a8f405b9b..56d79195bb3c 100644
--- a/drivers/staging/most/Documentation/driver_usage.txt
+++ b/drivers/staging/most/Documentation/driver_usage.txt
@@ -115,36 +115,75 @@ following components are available
Section 2 Usage of the MOST Driver
- Section 2.1 Configuration
-
-See ABI/sysfs-bus-most.txt
-
-
- Section 2.2 Routing Channels
-
-To connect a configured channel to a certain core component and make it
-accessible for user space applications, the driver attribute 'add_link' is
-used. The configuration string passed to it has the following format:
-
- "device_name:channel_name:component_name:link_name[.param]"
-
-It is the concatenation of up to four substrings separated by a colon. The
-substrings contain the names of the MOST interface, the channel, the
-component driver and a custom name with which the link is going to be
-referenced with. Since some components need additional information, the
-link name can be extended with a component-specific parameter (separated by
-a dot). In case the character device component is loaded, the handle would
-also appear as a device node in the /dev directory.
-
-Cdev component example:
- $ echo "mdev0:ep_81:cdev:my_rx_channel" >$(DRV_DIR)/add_link
-
-
-Sound component example:
-
-The sound component needs additional parameters to determine the audio
-resolution that is going to be used and to trigger the registration of a
-sound card with ALSA. The following audio formats are available:
+ Section 2.1 Configuration and Data Link
+
+The driver is to be configured via configfs. Each loaded component kernel
+object (see section 1.3) registers a subsystem with configfs, which is used to
+configure and establish communiction pathways (links) to attached devices on
+the bus. To do so, the user has to descend into the component's configuration
+directory and create a new directory (child config itmes). The name of this
+directory will be used as a reference for the link and it will contain the
+following attributes:
+
+ - buffer_size
+ configure the buffer size for this channel
+ - subbuffer_size
+ configure the sub-buffer size for this channel (needed for
+ synchronous and isochrnous data)
+ - num_buffers
+ configure number of buffers used for this channel
+ - datatype
+ configure type of data that will travel over this channel
+ - direction
+ configure whether this link will be an input or output
+ - dbr_size
+ configure DBR data buffer size (this is used for MediaLB communiction
+ only)
+ - packets_per_xact
+ configure the number of packets that will be collected from the
+ network before being transmitted via USB (this is used for USB
+ communiction only)
+ - device
+ name of the device the link is to be attached to
+ - channel
+ name of the channel the link is to be attached to
+ - comp_params
+ pass parameters needed by some components
+ - create_link
+ write '1' to this attribute to trigger the creation of the link. In
+ case of speculative configuration, the creation is post-poned until
+ a physical device is being attached to the bus.
+ - destroy_link
+ write '1' to this attribute to destroy an already established link
+
+
+See ABI/sysfs-bus-most.txt and ABI/configfs-most.txt
+
+
+ Section 2.2 Configure a Sound Card
+
+Setting up synchronous channels to be mapped as an ALSA sound adapter is a two
+step process. Firstly, a directory (child config group) has to be created
+inside the most_sound's configuration directory. This adapter dir will
+represent the sound adapter. The name of the directory is for user reference
+only and has no further influence, as all sound adapters will be given a static
+name in ALSA. The sound adapter will have the following attribute:
+
+ - create_card
+ write '1' to this attribute to trigger the registration of the card
+ with the ALSA subsystem.
+ In case of speculative configuration, the creation is post-poned
+ until a physical device is being attached to the bus.
+
+Secondly, links will have to be created inside the adapter dir as described in
+section 2.1. These links will become the PCM devices of the sound card. The
+name of a PCM device will be inherited from the directory name. When all
+channels have been configured and created, the sound card itself can be created
+by writing '1' to the create_card attribute.
+
+The sound component needs an additional parameter to determine the audio
+resolution that is going to be used.
+The following audio formats are available:
- "1x8" (Mono)
- "2x16" (16-bit stereo)
@@ -152,18 +191,8 @@ sound card with ALSA. The following audio formats are available:
- "2x32" (32-bit stereo)
- "6x16" (16-bit surround 5.1)
-To make the sound module create a sound card and register it with ALSA the
-string "create" needs to be attached to the module parameter section of the
-configuration string. To create a sound card with with two playback devices
-(linked to channel ep01 and ep02) and one capture device (linked to channel
-ep83) the following is written to the add_link file:
-
- $ echo "mdev0:ep01:sound:most51_playback.6x16" >$(DRV_DIR)/add_link
- $ echo "mdev0:ep02:sound:most_playback.2x16" >$(DRV_DIR)/add_link
- $ echo "mdev0:ep83:sound:most_capture.2x16.create" >$(DRV_DIR)/add_link
-
-The link names (most51_playback, most_playback and most_capture) will
-become the names of the PCM devices of the sound card.
+The resolution string has to be written to the link directory's comp_params
+attribute.
Section 2.3 USB Padding
@@ -174,13 +203,13 @@ hardware, which is for performance optimization purposes of the USB
transmission.
When transmitting synchronous data the allocated channel width needs to be
-written to 'set_subbuffer_size'. Additionally, the number of MOST frames
-that should travel to the host within one USB transaction need to be
-written to 'packets_per_xact'.
+written to 'subbuffer_size'. Additionally, the number of MOST frames that
+should travel to the host within one USB transaction need to be written to
+'packets_per_xact'.
The driver, then, calculates the synchronous threshold as follows:
- frame_size = set_subbuffer_size * packets_per_xact
+ frame_size = subbuffer_size * packets_per_xact
In case 'packets_per_xact' is set to 0xFF the maximum number of packets,
allocated within one MOST frame, is calculated that fit into _one_ 512 byte
@@ -192,15 +221,15 @@ This frame_size is the number of synchronous data within an USB
transaction, which renders MTU_USB - frame_size bytes for padding.
When transmitting isochronous AVP data the desired packet size needs to be
-written to 'set_subbuffer_size' and hardware will always expect two
-isochronous packets within one USB transaction. This renders
+written to 'subbuffer_size' and hardware will always expect two isochronous
+packets within one USB transaction. This renders
- MTU_USB - (2 * set_subbuffer_size)
+ MTU_USB - (2 * subbuffer_size)
bytes for padding.
-Note that at least (2 * set_subbuffer_size) bytes for isochronous data or
-(set_subbuffer_size * packts_per_xact) bytes for synchronous data need to
+Note that at least (2 * subbuffer_size) bytes for isochronous data or
+(subbuffer_size * packts_per_xact) bytes for synchronous data need to
be put in the transmission buffer and passed to the driver.
Since adapter drivers are allowed to change a chosen configuration to best
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index 20047abbe560..db32ea7d1743 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,6 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig MOST
tristate "MOST support"
- depends on HAS_DMA
+ depends on HAS_DMA && CONFIGFS_FS
default n
---help---
Say Y here if you want to enable MOST support.
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index c7662f65f6db..85ea5a434ced 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST) += most_core.o
most_core-y := core.o
+most_core-y += configfs.o
ccflags-y += -I $(srctree)/drivers/staging/
obj-$(CONFIG_MOST_CDEV) += cdev/
diff --git a/drivers/staging/most/cdev/Kconfig b/drivers/staging/most/cdev/Kconfig
index 2b04e26bcbea..330c95fe6550 100644
--- a/drivers/staging/most/cdev/Kconfig
+++ b/drivers/staging/most/cdev/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# MOST Cdev configuration
#
diff --git a/drivers/staging/most/cdev/Makefile b/drivers/staging/most/cdev/Makefile
index 21b0bd72c01d..9f4a8b8c9c27 100644
--- a/drivers/staging/most/cdev/Makefile
+++ b/drivers/staging/most/cdev/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST_CDEV) += most_cdev.o
most_cdev-objs := cdev.o
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index f2b347cda8b7..d0cc0b746107 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -425,7 +425,7 @@ static int comp_tx_completion(struct most_interface *iface, int channel_id)
* Returns 0 on success or error code otherwise.
*/
static int comp_probe(struct most_interface *iface, int channel_id,
- struct most_channel_config *cfg, char *name)
+ struct most_channel_config *cfg, char *name, char *args)
{
struct comp_channel *c;
unsigned long cl_flags;
@@ -527,8 +527,13 @@ static int __init mod_init(void)
err = most_register_component(&comp.cc);
if (err)
goto free_cdev;
+ err = most_register_configfs_subsys(&comp.cc);
+ if (err)
+ goto deregister_comp;
return 0;
+deregister_comp:
+ most_deregister_component(&comp.cc);
free_cdev:
unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
dest_ida:
@@ -543,13 +548,14 @@ static void __exit mod_exit(void)
pr_info("exit module\n");
+ most_deregister_configfs_subsys(&comp.cc);
most_deregister_component(&comp.cc);
list_for_each_entry_safe(c, tmp, &channel_list, list) {
destroy_cdev(c);
destroy_channel(c);
}
- unregister_chrdev_region(comp.devno, 1);
+ unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
ida_destroy(&comp.minor_id);
class_destroy(comp.class);
}
diff --git a/drivers/staging/most/configfs.c b/drivers/staging/most/configfs.c
new file mode 100644
index 000000000000..1d8bf29e0ffb
--- /dev/null
+++ b/drivers/staging/most/configfs.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * configfs.c - Implementation of configfs interface to the driver stack
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/configfs.h>
+#include <most/core.h>
+
+struct mdev_link {
+ struct config_item item;
+ struct list_head list;
+ bool create_link;
+ bool destroy_link;
+ u16 num_buffers;
+ u16 buffer_size;
+ u16 subbuffer_size;
+ u16 packets_per_xact;
+ u16 dbr_size;
+ char datatype[PAGE_SIZE];
+ char direction[PAGE_SIZE];
+ char name[PAGE_SIZE];
+ char device[PAGE_SIZE];
+ char channel[PAGE_SIZE];
+ char comp[PAGE_SIZE];
+ char comp_params[PAGE_SIZE];
+};
+
+static struct list_head mdev_link_list;
+
+static int set_cfg_buffer_size(struct mdev_link *link)
+{
+ if (!link->buffer_size)
+ return -ENODATA;
+ return most_set_cfg_buffer_size(link->device, link->channel,
+ link->buffer_size);
+}
+
+static int set_cfg_subbuffer_size(struct mdev_link *link)
+{
+ if (!link->subbuffer_size)
+ return -ENODATA;
+ return most_set_cfg_subbuffer_size(link->device, link->channel,
+ link->subbuffer_size);
+}
+
+static int set_cfg_dbr_size(struct mdev_link *link)
+{
+ if (!link->dbr_size)
+ return -ENODATA;
+ return most_set_cfg_dbr_size(link->device, link->channel,
+ link->dbr_size);
+}
+
+static int set_cfg_num_buffers(struct mdev_link *link)
+{
+ if (!link->num_buffers)
+ return -ENODATA;
+ return most_set_cfg_num_buffers(link->device, link->channel,
+ link->num_buffers);
+}
+
+static int set_cfg_packets_xact(struct mdev_link *link)
+{
+ if (!link->packets_per_xact)
+ return -ENODATA;
+ return most_set_cfg_packets_xact(link->device, link->channel,
+ link->packets_per_xact);
+}
+
+static int set_cfg_direction(struct mdev_link *link)
+{
+ if (!strlen(link->direction))
+ return -ENODATA;
+ return most_set_cfg_direction(link->device, link->channel,
+ link->direction);
+}
+
+static int set_cfg_datatype(struct mdev_link *link)
+{
+ if (!strlen(link->datatype))
+ return -ENODATA;
+ return most_set_cfg_datatype(link->device, link->channel,
+ link->datatype);
+}
+
+static int (*set_config_val[])(struct mdev_link *link) = {
+ set_cfg_buffer_size,
+ set_cfg_subbuffer_size,
+ set_cfg_dbr_size,
+ set_cfg_num_buffers,
+ set_cfg_packets_xact,
+ set_cfg_direction,
+ set_cfg_datatype,
+};
+
+static struct mdev_link *to_mdev_link(struct config_item *item)
+{
+ return container_of(item, struct mdev_link, item);
+}
+
+static int set_config_and_add_link(struct mdev_link *mdev_link)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(set_config_val); i++) {
+ ret = set_config_val[i](mdev_link);
+ if (ret < 0 && ret != -ENODEV) {
+ pr_err("Config failed\n");
+ return ret;
+ }
+ }
+
+ return most_add_link(mdev_link->device, mdev_link->channel,
+ mdev_link->comp, mdev_link->name,
+ mdev_link->comp_params);
+}
+
+static ssize_t mdev_link_create_link_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+ bool tmp;
+ int ret;
+
+ ret = kstrtobool(page, &tmp);
+ if (ret)
+ return ret;
+ if (!tmp)
+ return count;
+ ret = set_config_and_add_link(mdev_link);
+ if (ret && ret != -ENODEV)
+ return ret;
+ list_add_tail(&mdev_link->list, &mdev_link_list);
+ mdev_link->create_link = tmp;
+ return count;
+}
+
+static ssize_t mdev_link_destroy_link_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+ bool tmp;
+ int ret;
+
+ ret = kstrtobool(page, &tmp);
+ if (ret)
+ return ret;
+ if (!tmp)
+ return count;
+ mdev_link->destroy_link = tmp;
+ ret = most_remove_link(mdev_link->device, mdev_link->channel,
+ mdev_link->comp);
+ if (ret)
+ return ret;
+ if (!list_empty(&mdev_link_list))
+ list_del(&mdev_link->list);
+ return count;
+}
+
+static ssize_t mdev_link_direction_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->direction);
+}
+
+static ssize_t mdev_link_direction_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+
+ if (!sysfs_streq(page, "dir_rx") && !sysfs_streq(page, "rx") &&
+ !sysfs_streq(page, "dir_tx") && !sysfs_streq(page, "tx"))
+ return -EINVAL;
+ strcpy(mdev_link->direction, page);
+ return count;
+}
+
+static ssize_t mdev_link_datatype_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->datatype);
+}
+
+static ssize_t mdev_link_datatype_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+
+ if (!sysfs_streq(page, "control") && !sysfs_streq(page, "async") &&
+ !sysfs_streq(page, "sync") && !sysfs_streq(page, "isoc") &&
+ !sysfs_streq(page, "isoc_avp"))
+ return -EINVAL;
+ strcpy(mdev_link->datatype, page);
+ return count;
+}
+
+static ssize_t mdev_link_device_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->device);
+}
+
+static ssize_t mdev_link_device_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+
+ strcpy(mdev_link->device, page);
+ return count;
+}
+
+static ssize_t mdev_link_channel_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->channel);
+}
+
+static ssize_t mdev_link_channel_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+
+ strcpy(mdev_link->channel, page);
+ return count;
+}
+
+static ssize_t mdev_link_comp_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->comp);
+}
+
+static ssize_t mdev_link_comp_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+
+ strcpy(mdev_link->comp, page);
+ return count;
+}
+
+static ssize_t mdev_link_comp_params_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ to_mdev_link(item)->comp_params);
+}
+
+static ssize_t mdev_link_comp_params_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+
+ strcpy(mdev_link->comp_params, page);
+ return count;
+}
+
+static ssize_t mdev_link_num_buffers_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ to_mdev_link(item)->num_buffers);
+}
+
+static ssize_t mdev_link_num_buffers_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+ int ret;
+
+ ret = kstrtou16(page, 0, &mdev_link->num_buffers);
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t mdev_link_buffer_size_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ to_mdev_link(item)->buffer_size);
+}
+
+static ssize_t mdev_link_buffer_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+ int ret;
+
+ ret = kstrtou16(page, 0, &mdev_link->buffer_size);
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t mdev_link_subbuffer_size_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ to_mdev_link(item)->subbuffer_size);
+}
+
+static ssize_t mdev_link_subbuffer_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+ int ret;
+
+ ret = kstrtou16(page, 0, &mdev_link->subbuffer_size);
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t mdev_link_packets_per_xact_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ to_mdev_link(item)->packets_per_xact);
+}
+
+static ssize_t mdev_link_packets_per_xact_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+ int ret;
+
+ ret = kstrtou16(page, 0, &mdev_link->packets_per_xact);
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t mdev_link_dbr_size_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", to_mdev_link(item)->dbr_size);
+}
+
+static ssize_t mdev_link_dbr_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+ int ret;
+
+ ret = kstrtou16(page, 0, &mdev_link->dbr_size);
+ if (ret)
+ return ret;
+ return count;
+}
+
+CONFIGFS_ATTR_WO(mdev_link_, create_link);
+CONFIGFS_ATTR_WO(mdev_link_, destroy_link);
+CONFIGFS_ATTR(mdev_link_, device);
+CONFIGFS_ATTR(mdev_link_, channel);
+CONFIGFS_ATTR(mdev_link_, comp);
+CONFIGFS_ATTR(mdev_link_, comp_params);
+CONFIGFS_ATTR(mdev_link_, num_buffers);
+CONFIGFS_ATTR(mdev_link_, buffer_size);
+CONFIGFS_ATTR(mdev_link_, subbuffer_size);
+CONFIGFS_ATTR(mdev_link_, packets_per_xact);
+CONFIGFS_ATTR(mdev_link_, datatype);
+CONFIGFS_ATTR(mdev_link_, direction);
+CONFIGFS_ATTR(mdev_link_, dbr_size);
+
+static struct configfs_attribute *mdev_link_attrs[] = {
+ &mdev_link_attr_create_link,
+ &mdev_link_attr_destroy_link,
+ &mdev_link_attr_device,
+ &mdev_link_attr_channel,
+ &mdev_link_attr_comp,
+ &mdev_link_attr_comp_params,
+ &mdev_link_attr_num_buffers,
+ &mdev_link_attr_buffer_size,
+ &mdev_link_attr_subbuffer_size,
+ &mdev_link_attr_packets_per_xact,
+ &mdev_link_attr_datatype,
+ &mdev_link_attr_direction,
+ &mdev_link_attr_dbr_size,
+ NULL,
+};
+
+static void mdev_link_release(struct config_item *item)
+{
+ struct mdev_link *mdev_link = to_mdev_link(item);
+ int ret;
+
+ if (!list_empty(&mdev_link_list)) {
+ ret = most_remove_link(mdev_link->device, mdev_link->channel,
+ mdev_link->comp);
+ if (ret && (ret != -ENODEV))
+ pr_err("Removing link failed.\n");
+ list_del(&mdev_link->list);
+ }
+ kfree(to_mdev_link(item));
+}
+
+static struct configfs_item_operations mdev_link_item_ops = {
+ .release = mdev_link_release,
+};
+
+static const struct config_item_type mdev_link_type = {
+ .ct_item_ops = &mdev_link_item_ops,
+ .ct_attrs = mdev_link_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+struct most_common {
+ struct config_group group;
+};
+
+static struct most_common *to_most_common(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct most_common, group);
+}
+
+static struct config_item *most_common_make_item(struct config_group *group,
+ const char *name)
+{
+ struct mdev_link *mdev_link;
+
+ mdev_link = kzalloc(sizeof(*mdev_link), GFP_KERNEL);
+ if (!mdev_link)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&mdev_link->item, name,
+ &mdev_link_type);
+
+ if (!strcmp(group->cg_item.ci_namebuf, "most_cdev"))
+ strcpy(mdev_link->comp, "cdev");
+ else if (!strcmp(group->cg_item.ci_namebuf, "most_net"))
+ strcpy(mdev_link->comp, "net");
+ else if (!strcmp(group->cg_item.ci_namebuf, "most_video"))
+ strcpy(mdev_link->comp, "video");
+ strcpy(mdev_link->name, name);
+ return &mdev_link->item;
+}
+
+static void most_common_release(struct config_item *item)
+{
+ kfree(to_most_common(item));
+}
+
+static struct configfs_item_operations most_common_item_ops = {
+ .release = most_common_release,
+};
+
+static struct configfs_group_operations most_common_group_ops = {
+ .make_item = most_common_make_item,
+};
+
+static const struct config_item_type most_common_type = {
+ .ct_item_ops = &most_common_item_ops,
+ .ct_group_ops = &most_common_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem most_cdev_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_cdev",
+ .ci_type = &most_common_type,
+ },
+ },
+};
+
+static struct configfs_subsystem most_net_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_net",
+ .ci_type = &most_common_type,
+ },
+ },
+};
+
+static struct configfs_subsystem most_video_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_video",
+ .ci_type = &most_common_type,
+ },
+ },
+};
+
+struct most_snd_grp {
+ struct config_group group;
+ bool create_card;
+ struct list_head list;
+};
+
+static struct most_snd_grp *to_most_snd_grp(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct most_snd_grp, group);
+}
+
+static struct config_item *most_snd_grp_make_item(struct config_group *group,
+ const char *name)
+{
+ struct mdev_link *mdev_link;
+
+ mdev_link = kzalloc(sizeof(*mdev_link), GFP_KERNEL);
+ if (!mdev_link)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&mdev_link->item, name, &mdev_link_type);
+ mdev_link->create_link = 0;
+ strcpy(mdev_link->name, name);
+ strcpy(mdev_link->comp, "sound");
+ return &mdev_link->item;
+}
+
+static ssize_t most_snd_grp_create_card_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct most_snd_grp *snd_grp = to_most_snd_grp(item);
+ int ret;
+ bool tmp;
+
+ ret = kstrtobool(page, &tmp);
+ if (ret)
+ return ret;
+ if (tmp) {
+ ret = most_cfg_complete("sound");
+ if (ret)
+ return ret;
+ }
+ snd_grp->create_card = tmp;
+ return count;
+}
+
+CONFIGFS_ATTR_WO(most_snd_grp_, create_card);
+
+static struct configfs_attribute *most_snd_grp_attrs[] = {
+ &most_snd_grp_attr_create_card,
+ NULL,
+};
+
+static void most_snd_grp_release(struct config_item *item)
+{
+ struct most_snd_grp *group = to_most_snd_grp(item);
+
+ list_del(&group->list);
+ kfree(group);
+}
+
+static struct configfs_item_operations most_snd_grp_item_ops = {
+ .release = most_snd_grp_release,
+};
+
+static struct configfs_group_operations most_snd_grp_group_ops = {
+ .make_item = most_snd_grp_make_item,
+};
+
+static const struct config_item_type most_snd_grp_type = {
+ .ct_item_ops = &most_snd_grp_item_ops,
+ .ct_group_ops = &most_snd_grp_group_ops,
+ .ct_attrs = most_snd_grp_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+struct most_sound {
+ struct configfs_subsystem subsys;
+ struct list_head soundcard_list;
+};
+
+static struct config_group *most_sound_make_group(struct config_group *group,
+ const char *name)
+{
+ struct most_snd_grp *most;
+ struct most_sound *ms = container_of(to_configfs_subsystem(group),
+ struct most_sound, subsys);
+
+ list_for_each_entry(most, &ms->soundcard_list, list) {
+ if (!most->create_card) {
+ pr_info("adapter configuration still in progress.\n");
+ return ERR_PTR(-EPROTO);
+ }
+ }
+ most = kzalloc(sizeof(*most), GFP_KERNEL);
+ if (!most)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&most->group, name, &most_snd_grp_type);
+ list_add_tail(&most->list, &ms->soundcard_list);
+ return &most->group;
+}
+
+static struct configfs_group_operations most_sound_group_ops = {
+ .make_group = most_sound_make_group,
+};
+
+static const struct config_item_type most_sound_type = {
+ .ct_group_ops = &most_sound_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct most_sound most_sound_subsys = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_sound",
+ .ci_type = &most_sound_type,
+ },
+ },
+ },
+};
+
+int most_register_configfs_subsys(struct core_component *c)
+{
+ int ret;
+
+ if (!strcmp(c->name, "cdev"))
+ ret = configfs_register_subsystem(&most_cdev_subsys);
+ else if (!strcmp(c->name, "net"))
+ ret = configfs_register_subsystem(&most_net_subsys);
+ else if (!strcmp(c->name, "video"))
+ ret = configfs_register_subsystem(&most_video_subsys);
+ else if (!strcmp(c->name, "sound"))
+ ret = configfs_register_subsystem(&most_sound_subsys.subsys);
+ else
+ return -ENODEV;
+
+ if (ret) {
+ pr_err("Error %d while registering subsystem %s\n",
+ ret, c->name);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(most_register_configfs_subsys);
+
+void most_interface_register_notify(const char *mdev)
+{
+ bool register_snd_card = false;
+ struct mdev_link *mdev_link;
+
+ list_for_each_entry(mdev_link, &mdev_link_list, list) {
+ if (!strcmp(mdev_link->device, mdev)) {
+ set_config_and_add_link(mdev_link);
+ if (!strcmp(mdev_link->comp, "sound"))
+ register_snd_card = true;
+ }
+ }
+ if (register_snd_card)
+ most_cfg_complete("sound");
+}
+
+void most_deregister_configfs_subsys(struct core_component *c)
+{
+ if (!strcmp(c->name, "cdev"))
+ configfs_unregister_subsystem(&most_cdev_subsys);
+ else if (!strcmp(c->name, "net"))
+ configfs_unregister_subsystem(&most_net_subsys);
+ else if (!strcmp(c->name, "video"))
+ configfs_unregister_subsystem(&most_video_subsys);
+ else if (!strcmp(c->name, "sound"))
+ configfs_unregister_subsystem(&most_sound_subsys.subsys);
+}
+EXPORT_SYMBOL_GPL(most_deregister_configfs_subsys);
+
+int __init configfs_init(void)
+{
+ config_group_init(&most_cdev_subsys.su_group);
+ mutex_init(&most_cdev_subsys.su_mutex);
+
+ config_group_init(&most_net_subsys.su_group);
+ mutex_init(&most_net_subsys.su_mutex);
+
+ config_group_init(&most_video_subsys.su_group);
+ mutex_init(&most_video_subsys.su_mutex);
+
+ config_group_init(&most_sound_subsys.subsys.su_group);
+ mutex_init(&most_sound_subsys.subsys.su_mutex);
+
+ INIT_LIST_HEAD(&most_sound_subsys.soundcard_list);
+ INIT_LIST_HEAD(&mdev_link_list);
+
+ return 0;
+}
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 18936cdb1083..86a8545c8d97 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -272,19 +272,6 @@ static ssize_t set_number_of_buffers_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
}
-static ssize_t set_number_of_buffers_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct most_channel *c = to_channel(dev);
- int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
-
- if (ret)
- return ret;
- return count;
-}
-
static ssize_t set_buffer_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -294,19 +281,6 @@ static ssize_t set_buffer_size_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
}
-static ssize_t set_buffer_size_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct most_channel *c = to_channel(dev);
- int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
-
- if (ret)
- return ret;
- return count;
-}
-
static ssize_t set_direction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -320,28 +294,6 @@ static ssize_t set_direction_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
-static ssize_t set_direction_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct most_channel *c = to_channel(dev);
-
- if (!strcmp(buf, "dir_rx\n")) {
- c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "rx\n")) {
- c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "dir_tx\n")) {
- c->cfg.direction = MOST_CH_TX;
- } else if (!strcmp(buf, "tx\n")) {
- c->cfg.direction = MOST_CH_TX;
- } else {
- pr_info("WARN: invalid attribute settings\n");
- return -EINVAL;
- }
- return count;
-}
-
static ssize_t set_datatype_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -356,28 +308,6 @@ static ssize_t set_datatype_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
-static ssize_t set_datatype_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- int i;
- struct most_channel *c = to_channel(dev);
-
- for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
- if (!strcmp(buf, ch_data_type[i].name)) {
- c->cfg.data_type = ch_data_type[i].most_ch_data_type;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(ch_data_type)) {
- pr_info("WARN: invalid attribute settings\n");
- return -EINVAL;
- }
- return count;
-}
-
static ssize_t set_subbuffer_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -387,19 +317,6 @@ static ssize_t set_subbuffer_size_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
}
-static ssize_t set_subbuffer_size_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct most_channel *c = to_channel(dev);
- int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
-
- if (ret)
- return ret;
- return count;
-}
-
static ssize_t set_packets_per_xact_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -409,19 +326,6 @@ static ssize_t set_packets_per_xact_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
}
-static ssize_t set_packets_per_xact_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct most_channel *c = to_channel(dev);
- int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
-
- if (ret)
- return ret;
- return count;
-}
-
static ssize_t set_dbr_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -430,18 +334,6 @@ static ssize_t set_dbr_size_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
}
-static ssize_t set_dbr_size_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct most_channel *c = to_channel(dev);
- int ret = kstrtou16(buf, 0, &c->cfg.dbr_size);
-
- if (ret)
- return ret;
- return count;
-}
-
#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
static umode_t channel_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
@@ -469,13 +361,13 @@ static DEVICE_ATTR_RO(number_of_stream_buffers);
static DEVICE_ATTR_RO(size_of_stream_buffer);
static DEVICE_ATTR_RO(size_of_packet_buffer);
static DEVICE_ATTR_RO(channel_starving);
-static DEVICE_ATTR_RW(set_buffer_size);
-static DEVICE_ATTR_RW(set_number_of_buffers);
-static DEVICE_ATTR_RW(set_direction);
-static DEVICE_ATTR_RW(set_datatype);
-static DEVICE_ATTR_RW(set_subbuffer_size);
-static DEVICE_ATTR_RW(set_packets_per_xact);
-static DEVICE_ATTR_RW(set_dbr_size);
+static DEVICE_ATTR_RO(set_buffer_size);
+static DEVICE_ATTR_RO(set_number_of_buffers);
+static DEVICE_ATTR_RO(set_direction);
+static DEVICE_ATTR_RO(set_datatype);
+static DEVICE_ATTR_RO(set_subbuffer_size);
+static DEVICE_ATTR_RO(set_packets_per_xact);
+static DEVICE_ATTR_RO(set_dbr_size);
static struct attribute *channel_attrs[] = {
DEV_ATTR(available_directions),
@@ -701,6 +593,7 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
static
inline int link_channel_to_component(struct most_channel *c,
struct core_component *comp,
+ char *name,
char *comp_param)
{
int ret;
@@ -714,7 +607,8 @@ inline int link_channel_to_component(struct most_channel *c,
return -ENOSPC;
*comp_ptr = comp;
- ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, comp_param);
+ ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name,
+ comp_param);
if (ret) {
*comp_ptr = NULL;
return ret;
@@ -722,65 +616,118 @@ inline int link_channel_to_component(struct most_channel *c,
return 0;
}
-/**
- * add_link_store - store function for add_link attribute
- * @drv: device driver
- * @buf: buffer
- * @len: buffer length
- *
- * This parses the string given by buf and splits it into
- * four substrings. Note: last substring is optional. In case a cdev
- * component is loaded the optional 4th substring will make up the name of
- * device node in the /dev directory. If omitted, the device node will
- * inherit the channel's name within sysfs.
- *
- * Searches for (device, channel) pair and probes the component
- *
- * Example:
- * (1) echo "mdev0:ch6:cdev:my_rxchannel" >add_link
- * (2) echo "mdev1:ep81:cdev" >add_link
- *
- * (1) would create the device node /dev/my_rxchannel
- * (2) would create the device node /dev/mdev1-ep81
- */
-static ssize_t add_link_store(struct device_driver *drv,
- const char *buf,
- size_t len)
+int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val)
{
- struct most_channel *c;
- struct core_component *comp;
- char buffer[STRING_SIZE];
- char *mdev;
- char *mdev_ch;
- char *comp_name;
- char *comp_param;
- char devnod_buf[STRING_SIZE];
- int ret;
- size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+ struct most_channel *c = get_channel(mdev, mdev_ch);
- strlcpy(buffer, buf, max_len);
- ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, &comp_param);
- if (ret)
- return ret;
- comp = match_component(comp_name);
- if (!comp)
+ if (!c)
return -ENODEV;
- if (!comp_param || *comp_param == 0) {
- snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
- mdev_ch);
- comp_param = devnod_buf;
+ c->cfg.buffer_size = val;
+ return 0;
+}
+
+int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val)
+{
+ struct most_channel *c = get_channel(mdev, mdev_ch);
+
+ if (!c)
+ return -ENODEV;
+ c->cfg.subbuffer_size = val;
+ return 0;
+}
+
+int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val)
+{
+ struct most_channel *c = get_channel(mdev, mdev_ch);
+
+ if (!c)
+ return -ENODEV;
+ c->cfg.dbr_size = val;
+ return 0;
+}
+
+int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val)
+{
+ struct most_channel *c = get_channel(mdev, mdev_ch);
+
+ if (!c)
+ return -ENODEV;
+ c->cfg.num_buffers = val;
+ return 0;
+}
+
+int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
+{
+ int i;
+ struct most_channel *c = get_channel(mdev, mdev_ch);
+
+ if (!c)
+ return -ENODEV;
+ for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+ if (!strcmp(buf, ch_data_type[i].name)) {
+ c->cfg.data_type = ch_data_type[i].most_ch_data_type;
+ break;
+ }
}
- c = get_channel(mdev, mdev_ch);
+ if (i == ARRAY_SIZE(ch_data_type))
+ pr_info("WARN: invalid attribute settings\n");
+ return 0;
+}
+
+int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
+{
+ struct most_channel *c = get_channel(mdev, mdev_ch);
+
+ if (!c)
+ return -ENODEV;
+ if (!strcmp(buf, "dir_rx\n")) {
+ c->cfg.direction = MOST_CH_RX;
+ } else if (!strcmp(buf, "rx\n")) {
+ c->cfg.direction = MOST_CH_RX;
+ } else if (!strcmp(buf, "dir_tx\n")) {
+ c->cfg.direction = MOST_CH_TX;
+ } else if (!strcmp(buf, "tx\n")) {
+ c->cfg.direction = MOST_CH_TX;
+ } else {
+ pr_info("Invalid direction\n");
+ return -ENODATA;
+ }
+ return 0;
+}
+
+int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
+{
+ struct most_channel *c = get_channel(mdev, mdev_ch);
+
if (!c)
return -ENODEV;
+ c->cfg.packets_per_xact = val;
+ return 0;
+}
+
+int most_cfg_complete(char *comp_name)
+{
+ struct core_component *comp;
+
+ comp = match_component(comp_name);
+ if (!comp)
+ return -ENODEV;
- ret = link_channel_to_component(c, comp, comp_param);
- if (ret)
- return ret;
- return len;
+ return comp->cfg_complete();
}
+int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
+ char *comp_param)
+{
+ struct most_channel *c = get_channel(mdev, mdev_ch);
+ struct core_component *comp = match_component(comp_name);
+
+ if (!c || !comp)
+ return -ENODEV;
+
+ return link_channel_to_component(c, comp, link_name, comp_param);
+}
/**
* remove_link_store - store function for remove_link attribute
* @drv: device driver
@@ -823,17 +770,36 @@ static ssize_t remove_link_store(struct device_driver *drv,
return len;
}
+int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
+{
+ struct most_channel *c;
+ struct core_component *comp;
+
+ comp = match_component(comp_name);
+ if (!comp)
+ return -ENODEV;
+ c = get_channel(mdev, mdev_ch);
+ if (!c)
+ return -ENODEV;
+
+ if (comp->disconnect_channel(c->iface, c->channel_id))
+ return -EIO;
+ if (c->pipe0.comp == comp)
+ c->pipe0.comp = NULL;
+ if (c->pipe1.comp == comp)
+ c->pipe1.comp = NULL;
+ return 0;
+}
+
#define DRV_ATTR(_name) (&driver_attr_##_name.attr)
static DRIVER_ATTR_RO(links);
static DRIVER_ATTR_RO(components);
-static DRIVER_ATTR_WO(add_link);
static DRIVER_ATTR_WO(remove_link);
static struct attribute *mc_attrs[] = {
DRV_ATTR(links),
DRV_ATTR(components),
- DRV_ATTR(add_link),
DRV_ATTR(remove_link),
NULL,
};
@@ -1431,7 +1397,7 @@ int most_register_interface(struct most_interface *iface)
INIT_LIST_HEAD(&iface->p->channel_list);
iface->p->dev_id = id;
- snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
+ strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
iface->dev.init_name = iface->p->name;
iface->dev.bus = &mc.bus;
iface->dev.parent = &mc.dev;
@@ -1487,6 +1453,7 @@ int most_register_interface(struct most_interface *iface)
}
pr_info("registered new device mdev%d (%s)\n",
id, iface->description);
+ most_interface_register_notify(iface->description);
return 0;
err_free_most_channel:
@@ -1621,7 +1588,7 @@ static int __init most_init(void)
err = -ENOMEM;
goto err_unregister_driver;
}
-
+ configfs_init();
return 0;
err_unregister_driver:
diff --git a/drivers/staging/most/core.h b/drivers/staging/most/core.h
index 64cc02f161e7..652aaa771029 100644
--- a/drivers/staging/most/core.h
+++ b/drivers/staging/most/core.h
@@ -266,11 +266,13 @@ struct core_component {
struct list_head list;
const char *name;
int (*probe_channel)(struct most_interface *iface, int channel_idx,
- struct most_channel_config *cfg, char *name);
+ struct most_channel_config *cfg, char *name,
+ char *param);
int (*disconnect_channel)(struct most_interface *iface,
int channel_idx);
int (*rx_completion)(struct mbo *mbo);
int (*tx_completion)(struct most_interface *iface, int channel_idx);
+ int (*cfg_complete)(void);
};
/**
@@ -318,5 +320,19 @@ int most_start_channel(struct most_interface *iface, int channel_idx,
struct core_component *comp);
int most_stop_channel(struct most_interface *iface, int channel_idx,
struct core_component *comp);
-
+int __init configfs_init(void);
+int most_register_configfs_subsys(struct core_component *comp);
+void most_deregister_configfs_subsys(struct core_component *comp);
+int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
+ char *comp_param);
+int most_remove_link(char *mdev, char *mdev_ch, char *comp_name);
+int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf);
+int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf);
+int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val);
+int most_cfg_complete(char *comp_name);
+void most_interface_register_notify(const char *mdev_name);
#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/most/dim2/Kconfig b/drivers/staging/most/dim2/Kconfig
index 5aeef22c3cba..22f6900187b5 100644
--- a/drivers/staging/most/dim2/Kconfig
+++ b/drivers/staging/most/dim2/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# MediaLB configuration
#
diff --git a/drivers/staging/most/dim2/Makefile b/drivers/staging/most/dim2/Makefile
index 6d15f045a767..116f04d69244 100644
--- a/drivers/staging/most/dim2/Makefile
+++ b/drivers/staging/most/dim2/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST_DIM2) += most_dim2.o
most_dim2-objs := dim2.o hal.o sysfs.o
diff --git a/drivers/staging/most/dim2/errors.h b/drivers/staging/most/dim2/errors.h
index 3487510fbd2f..268332e5735e 100644
--- a/drivers/staging/most/dim2/errors.h
+++ b/drivers/staging/most/dim2/errors.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* errors.h - Definitions of errors for DIM2 HAL API
* (MediaLB, Device Interface Macro IP, OS62420)
diff --git a/drivers/staging/most/dim2/hal.h b/drivers/staging/most/dim2/hal.h
index e04a5350f134..fca6c22de8a6 100644
--- a/drivers/staging/most/dim2/hal.h
+++ b/drivers/staging/most/dim2/hal.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* hal.h - DIM2 HAL interface
* (MediaLB, Device Interface Macro IP, OS62420)
diff --git a/drivers/staging/most/dim2/reg.h b/drivers/staging/most/dim2/reg.h
index 4343a483017e..b0f36c208a57 100644
--- a/drivers/staging/most/dim2/reg.h
+++ b/drivers/staging/most/dim2/reg.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* reg.h - Definitions for registers of DIM2
* (MediaLB, Device Interface Macro IP, OS62420)
diff --git a/drivers/staging/most/dim2/sysfs.h b/drivers/staging/most/dim2/sysfs.h
index 33756a3bffe2..24277a17cff3 100644
--- a/drivers/staging/most/dim2/sysfs.h
+++ b/drivers/staging/most/dim2/sysfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* sysfs.h - MediaLB sysfs information
*
diff --git a/drivers/staging/most/i2c/Kconfig b/drivers/staging/most/i2c/Kconfig
index 79d0ff27f56d..19a094b5bee0 100644
--- a/drivers/staging/most/i2c/Kconfig
+++ b/drivers/staging/most/i2c/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# MOST I2C configuration
#
@@ -5,7 +6,7 @@
config MOST_I2C
tristate "I2C"
depends on I2C
- ---help---
+ help
Say Y here if you want to connect via I2C to network tranceiver.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/most/i2c/Makefile b/drivers/staging/most/i2c/Makefile
index c032fea979b3..2b3769dc19e7 100644
--- a/drivers/staging/most/i2c/Makefile
+++ b/drivers/staging/most/i2c/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST_I2C) += most_i2c.o
most_i2c-objs := i2c.o
diff --git a/drivers/staging/most/net/Kconfig b/drivers/staging/most/net/Kconfig
index 795330ba94ef..ed8ac7e076d1 100644
--- a/drivers/staging/most/net/Kconfig
+++ b/drivers/staging/most/net/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# MOST Networking configuration
#
@@ -6,7 +7,7 @@ config MOST_NET
tristate "Net"
depends on NET
- ---help---
+ help
Say Y here if you want to commumicate via a networking device.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/most/net/Makefile b/drivers/staging/most/net/Makefile
index 820faec6b296..f0ac64dee71b 100644
--- a/drivers/staging/most/net/Makefile
+++ b/drivers/staging/most/net/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST_NET) += most_net.o
most_net-objs := net.o
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index e20584b1b112..c8a64e209027 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -293,7 +293,8 @@ static struct net_dev_context *get_net_dev_hold(struct most_interface *iface)
}
static int comp_probe_channel(struct most_interface *iface, int channel_idx,
- struct most_channel_config *ccfg, char *name)
+ struct most_channel_config *ccfg, char *name,
+ char *args)
{
struct net_dev_context *nd;
struct net_dev_channel *ch;
diff --git a/drivers/staging/most/sound/Kconfig b/drivers/staging/most/sound/Kconfig
index 115262a58a42..ad9f7821af7d 100644
--- a/drivers/staging/most/sound/Kconfig
+++ b/drivers/staging/most/sound/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# MOST ALSA configuration
#
@@ -6,7 +7,7 @@ config MOST_SOUND
tristate "Sound"
depends on SND
select SND_PCM
- ---help---
+ help
Say Y here if you want to commumicate via ALSA/sound devices.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/most/sound/Makefile b/drivers/staging/most/sound/Makefile
index 5bb55bb108fb..a3d086c6ca70 100644
--- a/drivers/staging/most/sound/Makefile
+++ b/drivers/staging/most/sound/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST_SOUND) += most_sound.o
most_sound-objs := sound.o
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 79ab3a78c5ec..342f390d68b3 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -20,6 +20,7 @@
#include <most/core.h>
#define DRIVER_NAME "sound"
+#define STRING_SIZE 80
static struct core_component comp;
@@ -471,17 +472,11 @@ static const struct snd_pcm_ops pcm_ops = {
.page = snd_pcm_lib_get_vmalloc_page,
};
-static int split_arg_list(char *buf, char **device_name, u16 *ch_num,
- char **sample_res, u8 *create)
+static int split_arg_list(char *buf, u16 *ch_num, char **sample_res)
{
char *num;
int ret;
- *device_name = strsep(&buf, ".");
- if (!*device_name) {
- pr_err("Missing sound card name\n");
- return -EIO;
- }
num = strsep(&buf, "x");
if (!num)
goto err;
@@ -492,8 +487,6 @@ static int split_arg_list(char *buf, char **device_name, u16 *ch_num,
if (!*sample_res)
goto err;
- if (buf && !strcmp(buf, "create"))
- *create = 1;
return 0;
err:
@@ -579,7 +572,7 @@ static void release_adapter(struct sound_adapter *adpt)
*/
static int audio_probe_channel(struct most_interface *iface, int channel_id,
struct most_channel_config *cfg,
- char *arg_list)
+ char *device_name, char *arg_list)
{
struct channel *channel;
struct sound_adapter *adpt;
@@ -588,10 +581,9 @@ static int audio_probe_channel(struct most_interface *iface, int channel_id,
int capture_count = 0;
int ret;
int direction;
- char *device_name;
u16 ch_num;
- u8 create = 0;
char *sample_res;
+ char arg_list_cpy[STRING_SIZE];
if (!iface)
return -EINVAL;
@@ -600,9 +592,8 @@ static int audio_probe_channel(struct most_interface *iface, int channel_id,
pr_err("Incompatible channel type\n");
return -EINVAL;
}
-
- ret = split_arg_list(arg_list, &device_name, &ch_num, &sample_res,
- &create);
+ strlcpy(arg_list_cpy, arg_list, STRING_SIZE);
+ ret = split_arg_list(arg_list_cpy, &ch_num, &sample_res);
if (ret < 0)
return ret;
@@ -622,7 +613,7 @@ static int audio_probe_channel(struct most_interface *iface, int channel_id,
INIT_LIST_HEAD(&adpt->dev_list);
iface->priv = adpt;
list_add_tail(&adpt->list, &adpt_list);
- ret = snd_card_new(&iface->dev, -1, "INIC", THIS_MODULE,
+ ret = snd_card_new(iface->driver_dev, -1, "INIC", THIS_MODULE,
sizeof(*channel), &adpt->card);
if (ret < 0)
goto err_free_adpt;
@@ -673,12 +664,6 @@ skip_adpt_alloc:
strscpy(pcm->name, device_name, sizeof(pcm->name));
snd_pcm_set_ops(pcm, direction, &pcm_ops);
- if (create) {
- ret = snd_card_register(adpt->card);
- if (ret < 0)
- goto err_free_adpt;
- adpt->registered = true;
- }
return 0;
err_free_adpt:
@@ -686,6 +671,26 @@ err_free_adpt:
return ret;
}
+static int audio_create_sound_card(void)
+{
+ int ret;
+ struct sound_adapter *adpt;
+
+ list_for_each_entry(adpt, &adpt_list, list) {
+ if (!adpt->registered)
+ goto adpt_alloc;
+ }
+ return -ENODEV;
+adpt_alloc:
+ ret = snd_card_register(adpt->card);
+ if (ret < 0) {
+ release_adapter(adpt);
+ return ret;
+ }
+ adpt->registered = true;
+ return 0;
+}
+
/**
* audio_disconnect_channel - function to disconnect a channel
* @iface: pointer to interface instance
@@ -782,20 +787,30 @@ static struct core_component comp = {
.disconnect_channel = audio_disconnect_channel,
.rx_completion = audio_rx_completion,
.tx_completion = audio_tx_completion,
+ .cfg_complete = audio_create_sound_card,
};
static int __init audio_init(void)
{
+ int ret;
+
pr_info("init()\n");
INIT_LIST_HEAD(&adpt_list);
- return most_register_component(&comp);
+ ret = most_register_component(&comp);
+ if (ret)
+ pr_err("Failed to register %s\n", comp.name);
+ ret = most_register_configfs_subsys(&comp);
+ if (ret)
+ pr_err("Failed to register %s configfs subsys\n", comp.name);
+ return ret;
}
static void __exit audio_exit(void)
{
pr_info("exit()\n");
+ most_deregister_configfs_subsys(&comp);
most_deregister_component(&comp);
}
diff --git a/drivers/staging/most/usb/Kconfig b/drivers/staging/most/usb/Kconfig
index ebbdb573a9a6..a86f1f63def4 100644
--- a/drivers/staging/most/usb/Kconfig
+++ b/drivers/staging/most/usb/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# MOST USB configuration
#
@@ -5,7 +6,7 @@
config MOST_USB
tristate "USB"
depends on USB && NET
- ---help---
+ help
Say Y here if you want to connect via USB to network tranceiver.
This device driver depends on the networking AIM.
diff --git a/drivers/staging/most/usb/Makefile b/drivers/staging/most/usb/Makefile
index 910cd08bad7c..83cf2ead7122 100644
--- a/drivers/staging/most/usb/Makefile
+++ b/drivers/staging/most/usb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST_USB) += most_usb.o
most_usb-objs := usb.o
diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c
index c0293d8d5934..360cb5b7a10b 100644
--- a/drivers/staging/most/usb/usb.c
+++ b/drivers/staging/most/usb/usb.c
@@ -1072,7 +1072,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
mdev->iface.num_channels = num_endpoints;
snprintf(mdev->description, sizeof(mdev->description),
- "usb_device %d-%s:%d.%d",
+ "%d-%s:%d.%d",
usb_dev->bus->busnum,
usb_dev->devpath,
usb_dev->config->desc.bConfigurationValue,
diff --git a/drivers/staging/most/video/Kconfig b/drivers/staging/most/video/Kconfig
index ce6af4f951a6..e0964ca5e7b3 100644
--- a/drivers/staging/most/video/Kconfig
+++ b/drivers/staging/most/video/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# MOST V4L2 configuration
#
@@ -5,7 +6,7 @@
config MOST_VIDEO
tristate "Video"
depends on VIDEO_V4L2
- ---help---
+ help
Say Y here if you want to commumicate via Video 4 Linux.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/most/video/Makefile b/drivers/staging/most/video/Makefile
index c6e01b6ecfe6..2d857d3cbcc8 100644
--- a/drivers/staging/most/video/Makefile
+++ b/drivers/staging/most/video/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST_VIDEO) += most_video.o
most_video-objs := video.o
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index ad7e28ab9a4f..adca250062e1 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -453,7 +453,8 @@ static void comp_v4l2_dev_release(struct v4l2_device *v4l2_dev)
}
static int comp_probe_channel(struct most_interface *iface, int channel_idx,
- struct most_channel_config *ccfg, char *name)
+ struct most_channel_config *ccfg, char *name,
+ char *args)
{
int ret;
struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
diff --git a/drivers/staging/mt7621-dma/Kconfig b/drivers/staging/mt7621-dma/Kconfig
index b6e48a682c44..54a110288f92 100644
--- a/drivers/staging/mt7621-dma/Kconfig
+++ b/drivers/staging/mt7621-dma/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config MTK_HSDMA
tristate "MTK HSDMA support"
depends on RALINK && SOC_MT7621
diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
index c9e3e1619ab0..66da1bf10c32 100644
--- a/drivers/staging/mt7621-dma/Makefile
+++ b/drivers/staging/mt7621-dma/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index 97571f1d697b..0fbb9932d6bb 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -264,8 +264,7 @@ static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
/* init desc value */
for (i = 0; i < HSDMA_DESCS_NUM; i++) {
chan->tx_ring[i].addr0 = 0;
- chan->tx_ring[i].flags = HSDMA_DESC_LS0 |
- HSDMA_DESC_DONE;
+ chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
}
for (i = 0; i < HSDMA_DESCS_NUM; i++) {
chan->rx_ring[i].addr0 = 0;
@@ -435,8 +434,7 @@ static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
if (likely(status & HSDMA_INT_RX_Q0))
tasklet_schedule(&hsdma->task);
else
- dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n",
- status);
+ dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
/* clean intr bits */
mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
@@ -667,7 +665,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
if (!hsdma) {
- dev_err(&pdev->dev, "alloc dma device failed\n");
return -EINVAL;
}
diff --git a/drivers/staging/mt7621-dts/Kconfig b/drivers/staging/mt7621-dts/Kconfig
index 94a9e16c0b92..3ea08ab9d0d3 100644
--- a/drivers/staging/mt7621-dts/Kconfig
+++ b/drivers/staging/mt7621-dts/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DTB_GNUBEE1
bool "GnuBee1 NAS"
depends on SOC_MT7621 && DTB_RT_NONE
diff --git a/drivers/staging/mt7621-dts/Makefile b/drivers/staging/mt7621-dts/Makefile
index 195eba4a5c65..aeec48a4edc7 100644
--- a/drivers/staging/mt7621-dts/Makefile
+++ b/drivers/staging/mt7621-dts/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_DTB_GNUBEE1) += gbpc1.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 17020e24abd2..280ec33c8540 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -491,7 +491,7 @@
reset-names = "pcie", "pcie0", "pcie1", "pcie2";
clocks = <&clkctrl 24 &clkctrl 25 &clkctrl 26>;
clock-names = "pcie0", "pcie1", "pcie2";
- phys = <&pcie0_port>, <&pcie1_port>, <&pcie2_port>;
+ phys = <&pcie0_phy 0>, <&pcie0_phy 1>, <&pcie1_phy 0>;
phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
pcie@0,0 {
@@ -522,29 +522,12 @@
pcie0_phy: pcie-phy@1e149000 {
compatible = "mediatek,mt7621-pci-phy";
reg = <0x1e149000 0x0700>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pcie0_port: pcie-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
-
- pcie1_port: pcie-phy@1 {
- reg = <1>;
- #phy-cells = <0>;
- };
+ #phy-cells = <1>;
};
pcie1_phy: pcie-phy@1e14a000 {
compatible = "mediatek,mt7621-pci-phy";
reg = <0x1e14a000 0x0700>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pcie2_port: pcie-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
+ #phy-cells = <1>;
};
};
diff --git a/drivers/staging/mt7621-mmc/Kconfig b/drivers/staging/mt7621-mmc/Kconfig
deleted file mode 100644
index 1eb79cd6e22f..000000000000
--- a/drivers/staging/mt7621-mmc/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-config MTK_MMC
- tristate "MTK SD/MMC"
- depends on RALINK && MMC
-
-config MTK_AEE_KDUMP
- bool "MTK AEE KDUMP"
- depends on MTK_MMC
-
-config MTK_MMC_CD_POLL
- bool "Card Detect with Polling"
- depends on MTK_MMC
-
-config MTK_MMC_EMMC_8BIT
- bool "eMMC 8-bit support"
- depends on MTK_MMC && RALINK_MT7628
-
diff --git a/drivers/staging/mt7621-mmc/Makefile b/drivers/staging/mt7621-mmc/Makefile
deleted file mode 100644
index caead0b54703..000000000000
--- a/drivers/staging/mt7621-mmc/Makefile
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright Statement:
-#
-# This software/firmware and related documentation ("MediaTek Software") are
-# protected under relevant copyright laws. The information contained herein
-# is confidential and proprietary to MediaTek Inc. and/or its licensors.
-# Without the prior written permission of MediaTek inc. and/or its licensors,
-# any reproduction, modification, use or disclosure of MediaTek Software,
-# and information contained herein, in whole or in part, shall be strictly prohibited.
-#
-# MediaTek Inc. (C) 2010. All rights reserved.
-#
-# BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
-# THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
-# RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
-# AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
-# NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
-# SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
-# SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
-# THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
-# THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
-# CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
-# SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
-# STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
-# CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
-# AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
-# OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
-# MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
-#
-# The following software/firmware and/or related documentation ("MediaTek Software")
-# have been modified by MediaTek Inc. All revisions are subject to any receiver's
-# applicable license agreements with MediaTek Inc.
-
-obj-$(CONFIG_MTK_MMC) += mtk_sd.o
-mtk_sd-objs := sd.o dbg.o
-ifeq ($(CONFIG_MTK_AEE_KDUMP),y)
-EXTRA_CFLAGS += -DMT6575_SD_DEBUG
-endif
-
-clean:
- @rm -f *.o modules.order .*.cmd
diff --git a/drivers/staging/mt7621-mmc/TODO b/drivers/staging/mt7621-mmc/TODO
deleted file mode 100644
index febb32d37e07..000000000000
--- a/drivers/staging/mt7621-mmc/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-
-- general code review and clean up
-- ensure device-tree requirements are documented
-- should probably be merged with drivers/mmc/host/mtk-sd.c
-- possibly fix to work with highmem pages so a bounce buffer isn't
- needed.
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-mmc/board.h b/drivers/staging/mt7621-mmc/board.h
deleted file mode 100644
index 983791ee308d..000000000000
--- a/drivers/staging/mt7621-mmc/board.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly prohibited.
- */
-/* MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
- * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
- * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
- * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
- * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
- * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
- * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
- * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
- * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation ("MediaTek Software")
- * have been modified by MediaTek Inc. All revisions are subject to any receiver's
- * applicable license agreements with MediaTek Inc.
- */
-
-#ifndef __ARCH_ARM_MACH_BOARD_H
-#define __ARCH_ARM_MACH_BOARD_H
-
-#define MSDC_CD_PIN_EN BIT(0) /* card detection pin is wired */
-#define MSDC_WP_PIN_EN BIT(1) /* write protection pin is wired */
-#define MSDC_RST_PIN_EN BIT(2) /* emmc reset pin is wired */
-#define MSDC_REMOVABLE BIT(5) /* removable slot */
-
-#define MSDC_SMPL_RISING (0)
-#define MSDC_SMPL_FALLING (1)
-
-#define MSDC_CMD_PIN (0)
-#define MSDC_DAT_PIN (1)
-#define MSDC_CD_PIN (2)
-#define MSDC_WP_PIN (3)
-#define MSDC_RST_PIN (4)
-
-struct msdc_hw {
- unsigned char clk_src; /* host clock source */
- unsigned long flags; /* hardware capability flags */
-
- /* config gpio pull mode */
- void (*config_gpio_pin)(int type, int pull);
-};
-
-extern struct msdc_hw msdc0_hw;
-
-#endif /* __ARCH_ARM_MACH_BOARD_H */
diff --git a/drivers/staging/mt7621-mmc/dbg.c b/drivers/staging/mt7621-mmc/dbg.c
deleted file mode 100644
index c7c091fa1da0..000000000000
--- a/drivers/staging/mt7621-mmc/dbg.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly
- * prohibited.
- *
- * MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO
- * SUCH THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY
- * ACKNOWLEDGES THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY
- * THIRD PARTY ALL PROPER LICENSES CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK
- * SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO
- * RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN
- * FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED
- * HEREUNDER WILL BE, AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK
- * SOFTWARE AT ISSUE, OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE
- * PAID BY RECEIVER TO MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation
- * ("MediaTek Software") have been modified by MediaTek Inc. All revisions
- * are subject to any receiver's applicable license agreements with MediaTek
- * Inc.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-// #include <mach/mt6575_gpt.h> /* --- by chhung */
-#include "dbg.h"
-#include "mt6575_sd.h"
-#include <linux/seq_file.h>
-
-
-/* for debug zone */
-unsigned int sd_debug_zone[4] = {
- 0,
- 0,
- 0,
- 0
-};
-
-#if defined(MT6575_SD_DEBUG)
-static char cmd_buf[256];
-/* for driver profile */
-#define TICKS_ONE_MS (13000)
-u32 gpt_enable;
-u32 sdio_pro_enable; /* make sure gpt is enabled */
-u32 sdio_pro_time; /* no more than 30s */
-struct sdio_profile sdio_perfomance = {0};
-
-u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32)
-{
- u32 ret = 0;
-
- if (new_H32 == old_H32) {
- ret = new_L32 - old_L32;
- } else if (new_H32 == (old_H32 + 1)) {
- if (new_L32 > old_L32)
- pr_debug("msdc old_L<0x%x> new_L<0x%x>\n",
- old_L32, new_L32);
- ret = (0xffffffff - old_L32);
- ret += new_L32;
- } else {
- pr_debug("msdc old_H<0x%x> new_H<0x%x>\n", old_H32, new_H32);
- }
-
- return ret;
-}
-
-void msdc_sdio_profile(struct sdio_profile *result)
-{
- struct cmd_profile *cmd;
- u32 i;
-
- pr_debug("sdio === performance dump ===\n");
- pr_debug("sdio === total execute tick<%d> time<%dms> Tx<%dB> Rx<%dB>\n",
- result->total_tc, result->total_tc / TICKS_ONE_MS,
- result->total_tx_bytes, result->total_rx_bytes);
-
- /* CMD52 Dump */
- cmd = &result->cmd52_rx;
- pr_debug("sdio === CMD52 Rx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n",
- cmd->count, cmd->tot_tc, cmd->max_tc, cmd->min_tc,
- cmd->tot_tc / cmd->count);
- cmd = &result->cmd52_tx;
- pr_debug("sdio === CMD52 Tx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n",
- cmd->count, cmd->tot_tc, cmd->max_tc, cmd->min_tc,
- cmd->tot_tc / cmd->count);
-
- /* CMD53 Rx bytes + block mode */
- for (i = 0; i < 512; i++) {
- cmd = &result->cmd53_rx_byte[i];
- if (cmd->count) {
- pr_debug("sdio<%6d><%3dB>_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n",
- cmd->count, i, cmd->tot_tc, cmd->max_tc,
- cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes,
- (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
- }
- }
- for (i = 0; i < 100; i++) {
- cmd = &result->cmd53_rx_blk[i];
- if (cmd->count) {
- pr_debug("sdio<%6d><%3d>B_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n",
- cmd->count, i, cmd->tot_tc, cmd->max_tc,
- cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes,
- (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
- }
- }
-
- /* CMD53 Tx bytes + block mode */
- for (i = 0; i < 512; i++) {
- cmd = &result->cmd53_tx_byte[i];
- if (cmd->count) {
- pr_debug("sdio<%6d><%3dB>_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n",
- cmd->count, i, cmd->tot_tc, cmd->max_tc,
- cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes,
- (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
- }
- }
- for (i = 0; i < 100; i++) {
- cmd = &result->cmd53_tx_blk[i];
- if (cmd->count) {
- pr_debug("sdio<%6d><%3d>B_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n",
- cmd->count, i, cmd->tot_tc, cmd->max_tc,
- cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes,
- (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
- }
- }
-
- pr_debug("sdio === performance dump done ===\n");
-}
-
-//========= sdio command table ===========
-void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks)
-{
- struct sdio_profile *result = &sdio_perfomance;
- struct cmd_profile *cmd;
- u32 block;
-
- if (sdio_pro_enable == 0)
- return;
-
- if (opcode == 52) {
- cmd = bRx ? &result->cmd52_rx : &result->cmd52_tx;
- } else if (opcode == 53) {
- if (sizes < 512) {
- cmd = bRx ? &result->cmd53_rx_byte[sizes] : &result->cmd53_tx_byte[sizes];
- } else {
- block = sizes / 512;
- if (block >= 99) {
- pr_err("cmd53 error blocks\n");
- while (1)
- ;
- }
- cmd = bRx ? &result->cmd53_rx_blk[block] : &result->cmd53_tx_blk[block];
- }
- } else {
- return;
- }
-
- /* update the members */
- if (ticks > cmd->max_tc)
- cmd->max_tc = ticks;
- if (cmd->min_tc == 0 || ticks < cmd->min_tc)
- cmd->min_tc = ticks;
- cmd->tot_tc += ticks;
- cmd->tot_bytes += sizes;
- cmd->count++;
-
- if (bRx)
- result->total_rx_bytes += sizes;
- else
- result->total_tx_bytes += sizes;
- result->total_tc += ticks;
-
- /* dump when total_tc > 30s */
- if (result->total_tc >= sdio_pro_time * TICKS_ONE_MS * 1000) {
- msdc_sdio_profile(result);
- memset(result, 0, sizeof(struct sdio_profile));
- }
-}
-
-//========== driver proc interface ===========
-static int msdc_debug_proc_read(struct seq_file *s, void *p)
-{
- seq_puts(s, "\n=========================================\n");
- seq_puts(s, "Index<0> + Id + Zone\n");
- seq_puts(s, "-> PWR<9> WRN<8> | FIO<7> OPS<6> FUN<5> CFG<4> | INT<3> RSP<2> CMD<1> DMA<0>\n");
- seq_puts(s, "-> echo 0 3 0x3ff >msdc_bebug -> host[3] debug zone set to 0x3ff\n");
- seq_printf(s, "-> MSDC[0] Zone: 0x%.8x\n", sd_debug_zone[0]);
- seq_printf(s, "-> MSDC[1] Zone: 0x%.8x\n", sd_debug_zone[1]);
- seq_printf(s, "-> MSDC[2] Zone: 0x%.8x\n", sd_debug_zone[2]);
- seq_printf(s, "-> MSDC[3] Zone: 0x%.8x\n", sd_debug_zone[3]);
-
- seq_puts(s, "Index<3> + SDIO_PROFILE + TIME\n");
- seq_puts(s, "-> echo 3 1 0x1E >msdc_bebug -> enable sdio_profile, 30s\n");
- seq_printf(s, "-> SDIO_PROFILE<%d> TIME<%ds>\n",
- sdio_pro_enable, sdio_pro_time);
- seq_puts(s, "=========================================\n\n");
-
- return 0;
-}
-
-static ssize_t msdc_debug_proc_write(struct file *file,
- const char __user *buf,
- size_t count, loff_t *data)
-{
- int ret;
-
- int cmd, p1, p2;
- int id, zone;
- int mode, size;
-
- if (count == 0)
- return -1;
- if (count > 255)
- count = 255;
-
- if (copy_from_user(cmd_buf, buf, count))
- return -EFAULT;
-
- cmd_buf[count] = '\0';
- pr_debug("msdc Write %s\n", cmd_buf);
-
- ret = sscanf(cmd_buf, "%x %x %x", &cmd, &p1, &p2);
- if (ret != 3)
- return -EINVAL;
-
- if (cmd == SD_TOOL_ZONE) {
- id = p1;
- zone = p2;
- zone &= 0x3ff;
- pr_debug("msdc host_id<%d> zone<0x%.8x>\n", id, zone);
- if (id >= 0 && id <= 3) {
- sd_debug_zone[id] = zone;
- } else if (id == 4) {
- sd_debug_zone[0] = sd_debug_zone[1] = zone;
- sd_debug_zone[2] = sd_debug_zone[3] = zone;
- } else {
- pr_err("msdc host_id error when set debug zone\n");
- }
- } else if (cmd == SD_TOOL_SDIO_PROFILE) {
- if (p1 == 1) { /* enable profile */
- if (gpt_enable == 0)
- gpt_enable = 1;
- sdio_pro_enable = 1;
- if (p2 == 0)
- p2 = 1;
- if (p2 >= 30)
- p2 = 30;
- sdio_pro_time = p2;
- } else if (p1 == 0) {
- /* todo */
- sdio_pro_enable = 0;
- }
- }
-
- return count;
-}
-
-static int msdc_debug_show(struct inode *inode, struct file *file)
-{
- return single_open(file, msdc_debug_proc_read, NULL);
-}
-
-static const struct file_operations msdc_debug_fops = {
- .owner = THIS_MODULE,
- .open = msdc_debug_show,
- .read = seq_read,
- .write = msdc_debug_proc_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void msdc_debug_proc_init(void)
-{
- proc_create("msdc_debug", 0660, NULL, &msdc_debug_fops);
-}
-EXPORT_SYMBOL_GPL(msdc_debug_proc_init);
-#endif
diff --git a/drivers/staging/mt7621-mmc/dbg.h b/drivers/staging/mt7621-mmc/dbg.h
deleted file mode 100644
index 2d447b2d92ae..000000000000
--- a/drivers/staging/mt7621-mmc/dbg.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly
- * prohibited.
- *
- * MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
- * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY
- * ACKNOWLEDGES THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY
- * THIRD PARTY ALL PROPER LICENSES CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK
- * SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO
- * RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN
- * FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED
- * HEREUNDER WILL BE, AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK
- * SOFTWARE AT ISSUE, OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE
- * PAID BY RECEIVER TO MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation
- * ("MediaTek Software") have been modified by MediaTek Inc. All revisions are
- * subject to any receiver's applicable license agreements with MediaTek Inc.
- */
-#ifndef __MT_MSDC_DEUBG__
-#define __MT_MSDC_DEUBG__
-
-//==========================
-extern u32 sdio_pro_enable;
-/* for a type command, e.g. CMD53, 2 blocks */
-struct cmd_profile {
- u32 max_tc; /* Max tick count */
- u32 min_tc;
- u32 tot_tc; /* total tick count */
- u32 tot_bytes;
- u32 count; /* the counts of the command */
-};
-
-/* dump when total_tc and total_bytes */
-struct sdio_profile {
- u32 total_tc; /* total tick count of CMD52 and CMD53 */
- u32 total_tx_bytes; /* total bytes of CMD53 Tx */
- u32 total_rx_bytes; /* total bytes of CMD53 Rx */
-
- /*CMD52*/
- struct cmd_profile cmd52_tx;
- struct cmd_profile cmd52_rx;
-
- /*CMD53 in byte unit */
- struct cmd_profile cmd53_tx_byte[512];
- struct cmd_profile cmd53_rx_byte[512];
-
- /*CMD53 in block unit */
- struct cmd_profile cmd53_tx_blk[100];
- struct cmd_profile cmd53_rx_blk[100];
-};
-
-//==========================
-enum msdc_dbg {
- SD_TOOL_ZONE = 0,
- SD_TOOL_DMA_SIZE = 1,
- SD_TOOL_PM_ENABLE = 2,
- SD_TOOL_SDIO_PROFILE = 3,
-};
-
-/* Debug message event */
-#define DBG_EVT_NONE (0) /* No event */
-#define DBG_EVT_DMA BIT(0) /* DMA related event */
-#define DBG_EVT_CMD BIT(1) /* MSDC CMD related event */
-#define DBG_EVT_RSP BIT(2) /* MSDC CMD RSP related event */
-#define DBG_EVT_INT BIT(3) /* MSDC INT event */
-#define DBG_EVT_CFG BIT(4) /* MSDC CFG event */
-#define DBG_EVT_FUC BIT(5) /* Function event */
-#define DBG_EVT_OPS BIT(6) /* Read/Write operation event */
-#define DBG_EVT_FIO BIT(7) /* FIFO operation event */
-#define DBG_EVT_WRN BIT(8) /* Warning event */
-#define DBG_EVT_PWR BIT(9) /* Power event */
-#define DBG_EVT_ALL (0xffffffff)
-
-#define DBG_EVT_MASK (DBG_EVT_ALL)
-
-extern unsigned int sd_debug_zone[4];
-#define TAG "msdc"
-void msdc_debug_proc_init(void);
-
-u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32);
-void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks);
-
-#endif
diff --git a/drivers/staging/mt7621-mmc/mt6575_sd.h b/drivers/staging/mt7621-mmc/mt6575_sd.h
deleted file mode 100644
index 038a484a9476..000000000000
--- a/drivers/staging/mt7621-mmc/mt6575_sd.h
+++ /dev/null
@@ -1,488 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly prohibited.
- */
-/* MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
- * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
- * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
- * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
- * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
- * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
- * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
- * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
- * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation ("MediaTek Software")
- * have been modified by MediaTek Inc. All revisions are subject to any receiver's
- * applicable license agreements with MediaTek Inc.
- */
-
-#ifndef MT6575_SD_H
-#define MT6575_SD_H
-
-#include <linux/bitops.h>
-#include <linux/mmc/host.h>
-
-// #include <mach/mt6575_reg_base.h> /* --- by chhung */
-
-/*--------------------------------------------------------------------------*/
-/* Common Definition */
-/*--------------------------------------------------------------------------*/
-#define MSDC_FIFO_SZ (128)
-#define MSDC_FIFO_THD (64) // (128)
-#define MSDC_NUM (4)
-
-#define MSDC_MS (0)
-#define MSDC_SDMMC (1)
-
-#define MSDC_BUS_1BITS (0)
-#define MSDC_BUS_4BITS (1)
-#define MSDC_BUS_8BITS (2)
-
-#define MSDC_BRUST_8B (3)
-#define MSDC_BRUST_16B (4)
-#define MSDC_BRUST_32B (5)
-#define MSDC_BRUST_64B (6)
-
-#define MSDC_PIN_PULL_NONE (0)
-#define MSDC_PIN_PULL_DOWN (1)
-#define MSDC_PIN_PULL_UP (2)
-#define MSDC_PIN_KEEP (3)
-
-#define MSDC_MAX_SCLK (48000000) /* +/- by chhung */
-#define MSDC_MIN_SCLK (260000)
-
-#define MSDC_AUTOCMD12 (0x0001)
-#define MSDC_AUTOCMD23 (0x0002)
-#define MSDC_AUTOCMD19 (0x0003)
-
-#define MSDC_EMMC_BOOTMODE0 (0) /* Pull low CMD mode */
-#define MSDC_EMMC_BOOTMODE1 (1) /* Reset CMD mode */
-
-enum {
- RESP_NONE = 0,
- RESP_R1,
- RESP_R2,
- RESP_R3,
- RESP_R4,
- RESP_R5,
- RESP_R6,
- RESP_R7,
- RESP_R1B
-};
-
-/*--------------------------------------------------------------------------*/
-/* Register Offset */
-/*--------------------------------------------------------------------------*/
-#define MSDC_CFG (0x0)
-#define MSDC_IOCON (0x04)
-#define MSDC_PS (0x08)
-#define MSDC_INT (0x0c)
-#define MSDC_INTEN (0x10)
-#define MSDC_FIFOCS (0x14)
-#define MSDC_TXDATA (0x18)
-#define MSDC_RXDATA (0x1c)
-#define SDC_CFG (0x30)
-#define SDC_CMD (0x34)
-#define SDC_ARG (0x38)
-#define SDC_STS (0x3c)
-#define SDC_RESP0 (0x40)
-#define SDC_RESP1 (0x44)
-#define SDC_RESP2 (0x48)
-#define SDC_RESP3 (0x4c)
-#define SDC_BLK_NUM (0x50)
-#define SDC_CSTS (0x58)
-#define SDC_CSTS_EN (0x5c)
-#define SDC_DCRC_STS (0x60)
-#define EMMC_CFG0 (0x70)
-#define EMMC_CFG1 (0x74)
-#define EMMC_STS (0x78)
-#define EMMC_IOCON (0x7c)
-#define SDC_ACMD_RESP (0x80)
-#define SDC_ACMD19_TRG (0x84)
-#define SDC_ACMD19_STS (0x88)
-#define MSDC_DMA_SA (0x90)
-#define MSDC_DMA_CA (0x94)
-#define MSDC_DMA_CTRL (0x98)
-#define MSDC_DMA_CFG (0x9c)
-#define MSDC_DBG_SEL (0xa0)
-#define MSDC_DBG_OUT (0xa4)
-#define MSDC_PATCH_BIT (0xb0)
-#define MSDC_PATCH_BIT0 MSDC_PATCH_BIT
-#define MSDC_PATCH_BIT1 (0xb4)
-#define MSDC_PAD_CTL0 (0xe0)
-#define MSDC_PAD_CTL1 (0xe4)
-#define MSDC_PAD_CTL2 (0xe8)
-#define MSDC_PAD_TUNE (0xec)
-#define MSDC_DAT_RDDLY0 (0xf0)
-#define MSDC_DAT_RDDLY1 (0xf4)
-#define MSDC_HW_DBG (0xf8)
-#define MSDC_VERSION (0x100)
-#define MSDC_ECO_VER (0x104)
-
-/*--------------------------------------------------------------------------*/
-/* Register Mask */
-/*--------------------------------------------------------------------------*/
-
-/* MSDC_CFG mask */
-#define MSDC_CFG_MODE (0x1 << 0) /* RW */
-#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */
-#define MSDC_CFG_RST (0x1 << 2) /* RW */
-#define MSDC_CFG_PIO (0x1 << 3) /* RW */
-#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */
-#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */
-#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */
-#define MSDC_CFG_CKSTB (0x1 << 7) /* R */
-#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
-#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
-
-/* MSDC_IOCON mask */
-#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
-#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */
-#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */
-#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */
-#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */
-#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */
-#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */
-#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */
-#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */
-#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */
-#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */
-#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */
-#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */
-#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */
-#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */
-
-/* MSDC_PS mask */
-#define MSDC_PS_CDEN (0x1 << 0) /* RW */
-#define MSDC_PS_CDSTS (0x1 << 1) /* R */
-#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
-#define MSDC_PS_DAT (0xff << 16) /* R */
-#define MSDC_PS_CMD (0x1 << 24) /* R */
-#define MSDC_PS_WP (0x1UL << 31) /* R */
-
-/* MSDC_INT mask */
-#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */
-#define MSDC_INT_CDSC (0x1 << 1) /* W1C */
-#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */
-#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */
-#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */
-#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */
-#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */
-#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */
-#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */
-#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */
-#define MSDC_INT_CSTA (0x1 << 11) /* R */
-#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */
-#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */
-#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */
-#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */
-#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */
-
-/* MSDC_INTEN mask */
-#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */
-#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */
-#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */
-#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */
-#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */
-#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */
-#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */
-#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */
-#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */
-#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */
-#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */
-#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */
-#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */
-#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */
-#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */
-#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */
-
-/* MSDC_FIFOCS mask */
-#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */
-#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */
-#define MSDC_FIFOCS_CLR (0x1UL << 31) /* RW */
-
-/* SDC_CFG mask */
-#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */
-#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */
-#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */
-#define SDC_CFG_SDIO (0x1 << 19) /* RW */
-#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
-#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */
-#define SDC_CFG_DTOC (0xffUL << 24) /* RW */
-
-/* SDC_CMD mask */
-#define SDC_CMD_OPC (0x3f << 0) /* RW */
-#define SDC_CMD_BRK (0x1 << 6) /* RW */
-#define SDC_CMD_RSPTYP (0x7 << 7) /* RW */
-#define SDC_CMD_DTYP (0x3 << 11) /* RW */
-#define SDC_CMD_DTYP (0x3 << 11) /* RW */
-#define SDC_CMD_RW (0x1 << 13) /* RW */
-#define SDC_CMD_STOP (0x1 << 14) /* RW */
-#define SDC_CMD_GOIRQ (0x1 << 15) /* RW */
-#define SDC_CMD_BLKLEN (0xfff << 16) /* RW */
-#define SDC_CMD_AUTOCMD (0x3 << 28) /* RW */
-#define SDC_CMD_VOLSWTH (0x1 << 30) /* RW */
-
-/* SDC_STS mask */
-#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */
-#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
-#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
-
-/* SDC_DCRC_STS mask */
-#define SDC_DCRC_STS_NEG (0xf << 8) /* RO */
-#define SDC_DCRC_STS_POS (0xff << 0) /* RO */
-
-/* EMMC_CFG0 mask */
-#define EMMC_CFG0_BOOTSTART (0x1 << 0) /* W */
-#define EMMC_CFG0_BOOTSTOP (0x1 << 1) /* W */
-#define EMMC_CFG0_BOOTMODE (0x1 << 2) /* RW */
-#define EMMC_CFG0_BOOTACKDIS (0x1 << 3) /* RW */
-#define EMMC_CFG0_BOOTWDLY (0x7 << 12) /* RW */
-#define EMMC_CFG0_BOOTSUPP (0x1 << 15) /* RW */
-
-/* EMMC_CFG1 mask */
-#define EMMC_CFG1_BOOTDATTMC (0xfffff << 0) /* RW */
-#define EMMC_CFG1_BOOTACKTMC (0xfffUL << 20) /* RW */
-
-/* EMMC_STS mask */
-#define EMMC_STS_BOOTCRCERR (0x1 << 0) /* W1C */
-#define EMMC_STS_BOOTACKERR (0x1 << 1) /* W1C */
-#define EMMC_STS_BOOTDATTMO (0x1 << 2) /* W1C */
-#define EMMC_STS_BOOTACKTMO (0x1 << 3) /* W1C */
-#define EMMC_STS_BOOTUPSTATE (0x1 << 4) /* R */
-#define EMMC_STS_BOOTACKRCV (0x1 << 5) /* W1C */
-#define EMMC_STS_BOOTDATRCV (0x1 << 6) /* R */
-
-/* EMMC_IOCON mask */
-#define EMMC_IOCON_BOOTRST (0x1 << 0) /* RW */
-
-/* SDC_ACMD19_TRG mask */
-#define SDC_ACMD19_TRG_TUNESEL (0xf << 0) /* RW */
-
-/* MSDC_DMA_CTRL mask */
-#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
-#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
-#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */
-#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */
-#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */
-#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */
-#define MSDC_DMA_CTRL_XFERSZ (0xffffUL << 16)/* RW */
-
-/* MSDC_DMA_CFG mask */
-#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */
-#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */
-#define MSDC_DMA_CFG_BDCSERR (0x1 << 4) /* R */
-#define MSDC_DMA_CFG_GPDCSERR (0x1 << 5) /* R */
-
-/* MSDC_PATCH_BIT mask */
-#define MSDC_PATCH_BIT_WFLSMODE (0x1 << 0) /* RW */
-#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */
-#define MSDC_PATCH_BIT_CKGEN_CK (0x1 << 6) /* E2: Fixed to 1 */
-#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */
-#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */
-#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */
-#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */
-#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */
-#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */
-#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */
-#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
-#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
-
-/* MSDC_PATCH_BIT1 mask */
-#define MSDC_PATCH_BIT1_WRDAT_CRCS (0x7 << 3)
-#define MSDC_PATCH_BIT1_CMD_RSP (0x7 << 0)
-
-/* MSDC_PAD_CTL0 mask */
-#define MSDC_PAD_CTL0_CLKDRVN (0x7 << 0) /* RW */
-#define MSDC_PAD_CTL0_CLKDRVP (0x7 << 4) /* RW */
-#define MSDC_PAD_CTL0_CLKSR (0x1 << 8) /* RW */
-#define MSDC_PAD_CTL0_CLKPD (0x1 << 16) /* RW */
-#define MSDC_PAD_CTL0_CLKPU (0x1 << 17) /* RW */
-#define MSDC_PAD_CTL0_CLKSMT (0x1 << 18) /* RW */
-#define MSDC_PAD_CTL0_CLKIES (0x1 << 19) /* RW */
-#define MSDC_PAD_CTL0_CLKTDSEL (0xf << 20) /* RW */
-#define MSDC_PAD_CTL0_CLKRDSEL (0xffUL << 24) /* RW */
-
-/* MSDC_PAD_CTL1 mask */
-#define MSDC_PAD_CTL1_CMDDRVN (0x7 << 0) /* RW */
-#define MSDC_PAD_CTL1_CMDDRVP (0x7 << 4) /* RW */
-#define MSDC_PAD_CTL1_CMDSR (0x1 << 8) /* RW */
-#define MSDC_PAD_CTL1_CMDPD (0x1 << 16) /* RW */
-#define MSDC_PAD_CTL1_CMDPU (0x1 << 17) /* RW */
-#define MSDC_PAD_CTL1_CMDSMT (0x1 << 18) /* RW */
-#define MSDC_PAD_CTL1_CMDIES (0x1 << 19) /* RW */
-#define MSDC_PAD_CTL1_CMDTDSEL (0xf << 20) /* RW */
-#define MSDC_PAD_CTL1_CMDRDSEL (0xffUL << 24) /* RW */
-
-/* MSDC_PAD_CTL2 mask */
-#define MSDC_PAD_CTL2_DATDRVN (0x7 << 0) /* RW */
-#define MSDC_PAD_CTL2_DATDRVP (0x7 << 4) /* RW */
-#define MSDC_PAD_CTL2_DATSR (0x1 << 8) /* RW */
-#define MSDC_PAD_CTL2_DATPD (0x1 << 16) /* RW */
-#define MSDC_PAD_CTL2_DATPU (0x1 << 17) /* RW */
-#define MSDC_PAD_CTL2_DATIES (0x1 << 19) /* RW */
-#define MSDC_PAD_CTL2_DATSMT (0x1 << 18) /* RW */
-#define MSDC_PAD_CTL2_DATTDSEL (0xf << 20) /* RW */
-#define MSDC_PAD_CTL2_DATRDSEL (0xffUL << 24) /* RW */
-
-/* MSDC_PAD_TUNE mask */
-#define MSDC_PAD_TUNE_DATWRDLY (0x1F << 0) /* RW */
-#define MSDC_PAD_TUNE_DATRRDLY (0x1F << 8) /* RW */
-#define MSDC_PAD_TUNE_CMDRDLY (0x1F << 16) /* RW */
-#define MSDC_PAD_TUNE_CMDRRDLY (0x1FUL << 22) /* RW */
-#define MSDC_PAD_TUNE_CLKTXDLY (0x1FUL << 27) /* RW */
-
-/* MSDC_DAT_RDDLY0/1 mask */
-#define MSDC_DAT_RDDLY0_D0 (0x1F << 0) /* RW */
-#define MSDC_DAT_RDDLY0_D1 (0x1F << 8) /* RW */
-#define MSDC_DAT_RDDLY0_D2 (0x1F << 16) /* RW */
-#define MSDC_DAT_RDDLY0_D3 (0x1F << 24) /* RW */
-
-#define MSDC_DAT_RDDLY1_D4 (0x1F << 0) /* RW */
-#define MSDC_DAT_RDDLY1_D5 (0x1F << 8) /* RW */
-#define MSDC_DAT_RDDLY1_D6 (0x1F << 16) /* RW */
-#define MSDC_DAT_RDDLY1_D7 (0x1F << 24) /* RW */
-
-#define MSDC_CKGEN_MSDC_DLY_SEL (0x1F << 10)
-#define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7)
-#define MSDC_CKGEN_MSDC_CK_SEL (0x1 << 6)
-#define CARD_READY_FOR_DATA BIT(8)
-#define CARD_CURRENT_STATE(x) ((x & 0x00001E00) >> 9)
-
-/*--------------------------------------------------------------------------*/
-/* Descriptor Structure */
-/*--------------------------------------------------------------------------*/
-struct gpd {
- u32 hwo:1; /* could be changed by hw */
- u32 bdp:1;
- u32 rsv0:6;
- u32 chksum:8;
- u32 intr:1;
- u32 rsv1:15;
- void *next;
- void *ptr;
- u32 buflen:16;
- u32 extlen:8;
- u32 rsv2:8;
- u32 arg;
- u32 blknum;
- u32 cmd;
-};
-
-struct bd {
- u32 eol:1;
- u32 rsv0:7;
- u32 chksum:8;
- u32 rsv1:1;
- u32 blkpad:1;
- u32 dwpad:1;
- u32 rsv2:13;
- void *next;
- void *ptr;
- u32 buflen:16;
- u32 rsv3:16;
-};
-
-struct msdc_dma {
- struct gpd *gpd; /* pointer to gpd array */
- struct bd *bd; /* pointer to bd array */
- dma_addr_t gpd_addr; /* the physical address of gpd array */
- dma_addr_t bd_addr; /* the physical address of bd array */
-};
-
-struct msdc_host {
- struct msdc_hw *hw;
-
- struct mmc_host *mmc; /* mmc structure */
- struct mmc_command *cmd;
- struct mmc_data *data;
- struct mmc_request *mrq;
- int cmd_rsp;
-
- int error;
- spinlock_t lock; /* mutex */
- struct semaphore sem;
-
- u32 blksz; /* host block size */
- void __iomem *base; /* host base address */
- int id; /* host id */
- int pwr_ref; /* core power reference count */
-
- u32 xfer_size; /* total transferred size */
-
- struct msdc_dma dma; /* dma channel */
- u32 dma_xfer_size; /* dma transfer size in bytes */
-
- u32 timeout_ns; /* data timeout ns */
- u32 timeout_clks; /* data timeout clks */
-
- int irq; /* host interrupt */
-
- struct delayed_work card_delaywork;
-
- struct completion cmd_done;
- struct completion xfer_done;
- struct pm_message pm_state;
-
- u32 mclk; /* mmc subsystem clock */
- u32 hclk; /* host clock speed */
- u32 sclk; /* SD/MS clock speed */
- u8 core_clkon; /* Host core clock on ? */
- u8 card_clkon; /* Card clock on ? */
- u8 core_power; /* core power */
- u8 power_mode; /* host power mode */
- u8 card_inserted; /* card inserted ? */
- u8 suspend; /* host suspended ? */
- u8 app_cmd; /* for app command */
- u32 app_cmd_arg;
-};
-
-static inline void sdr_set_bits(void __iomem *reg, u32 bs)
-{
- u32 val = readl(reg);
-
- val |= bs;
- writel(val, reg);
-}
-
-static inline void sdr_clr_bits(void __iomem *reg, u32 bs)
-{
- u32 val = readl(reg);
-
- val &= ~bs;
- writel(val, reg);
-}
-
-static inline void sdr_set_field(void __iomem *reg, u32 field, u32 val)
-{
- unsigned int tv = readl(reg);
-
- tv &= ~field;
- tv |= ((val) << (ffs((unsigned int)field) - 1));
- writel(tv, reg);
-}
-
-static inline void sdr_get_field(void __iomem *reg, u32 field, u32 *val)
-{
- unsigned int tv = readl(reg);
- *val = ((tv & field) >> (ffs((unsigned int)field) - 1));
-}
-
-#endif
diff --git a/drivers/staging/mt7621-mmc/sd.c b/drivers/staging/mt7621-mmc/sd.c
deleted file mode 100644
index 4b26ec896a96..000000000000
--- a/drivers/staging/mt7621-mmc/sd.c
+++ /dev/null
@@ -1,1855 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly prohibited.
- *
- * MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
- * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
- * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
- * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
- * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
- * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
- * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
- * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
- * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation ("MediaTek Software")
- * have been modified by MediaTek Inc. All revisions are subject to any receiver's
- * applicable license agreements with MediaTek Inc.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-
-#include <linux/mmc/host.h>
-#include <linux/mmc/mmc.h>
-#include <linux/mmc/sd.h>
-#include <linux/mmc/sdio.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#include "board.h"
-#include "dbg.h"
-#include "mt6575_sd.h"
-
-#ifdef CONFIG_SOC_MT7621
-#define RALINK_SYSCTL_BASE 0xbe000000
-#else
-#define RALINK_SYSCTL_BASE 0xb0000000
-#endif
-
-#define DRV_NAME "mtk-sd"
-
-#if defined(CONFIG_SOC_MT7620)
-#define HOST_MAX_MCLK (48000000) /* +/- by chhung */
-#elif defined(CONFIG_SOC_MT7621)
-#define HOST_MAX_MCLK (50000000) /* +/- by chhung */
-#endif
-#define HOST_MIN_MCLK (260000)
-
-#define HOST_MAX_BLKSZ (2048)
-
-#define MSDC_OCR_AVAIL (MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33)
-
-#define GPIO_PULL_DOWN (0)
-#define GPIO_PULL_UP (1)
-
-#define DEFAULT_DEBOUNCE (8) /* 8 cycles */
-#define DEFAULT_DTOC (40) /* data timeout counter. 65536x40 sclk. */
-
-#define CMD_TIMEOUT (HZ / 10) /* 100ms */
-#define DAT_TIMEOUT (HZ / 2 * 5) /* 500ms x5 */
-
-#define MAX_DMA_CNT (64 * 1024 - 512) /* a single transaction for WIFI may be 50K*/
-
-#define MAX_GPD_NUM (1 + 1) /* one null gpd */
-#define MAX_BD_NUM (1024)
-
-#define MAX_HW_SGMTS (MAX_BD_NUM)
-#define MAX_SGMT_SZ (MAX_DMA_CNT)
-#define MAX_REQ_SZ (MAX_SGMT_SZ * 8)
-
-static int cd_active_low = 1;
-
-//=================================
-#define PERI_MSDC0_PDN (15)
-//#define PERI_MSDC1_PDN (16)
-//#define PERI_MSDC2_PDN (17)
-//#define PERI_MSDC3_PDN (18)
-
-/* +++ by chhung */
-struct msdc_hw msdc0_hw = {
- .clk_src = 0,
- .flags = MSDC_CD_PIN_EN | MSDC_REMOVABLE,
-// .flags = MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE,
-};
-
-/* end of +++ */
-
-static int msdc_rsp[] = {
- 0, /* RESP_NONE */
- 1, /* RESP_R1 */
- 2, /* RESP_R2 */
- 3, /* RESP_R3 */
- 4, /* RESP_R4 */
- 1, /* RESP_R5 */
- 1, /* RESP_R6 */
- 1, /* RESP_R7 */
- 7, /* RESP_R1b */
-};
-
-#define msdc_dma_on() sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO)
-
-static void msdc_reset_hw(struct msdc_host *host)
-{
- sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
- while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST)
- cpu_relax();
-}
-
-#define msdc_clr_int() \
- do { \
- volatile u32 val = readl(host->base + MSDC_INT); \
- writel(val, host->base + MSDC_INT); \
- } while (0)
-
-static void msdc_clr_fifo(struct msdc_host *host)
-{
- sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
- while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR)
- cpu_relax();
-}
-
-#define msdc_irq_save(val) \
- do { \
- val = readl(host->base + MSDC_INTEN); \
- sdr_clr_bits(host->base + MSDC_INTEN, val); \
- } while (0)
-
-/* clock source for host: global */
-#if defined(CONFIG_SOC_MT7620)
-static u32 hclks[] = {48000000}; /* +/- by chhung */
-#elif defined(CONFIG_SOC_MT7621)
-static u32 hclks[] = {50000000}; /* +/- by chhung */
-#endif
-
-#define sdc_is_busy() (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY)
-#define sdc_is_cmd_busy() (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY)
-
-#define sdc_send_cmd(cmd, arg) \
- do { \
- writel((arg), host->base + SDC_ARG); \
- writel((cmd), host->base + SDC_CMD); \
- } while (0)
-
-/* +++ by chhung */
-#ifndef __ASSEMBLY__
-#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
-#else
-#define PHYSADDR(a) ((a) & 0x1fffffff)
-#endif
-/* end of +++ */
-static unsigned int msdc_do_command(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune,
- unsigned long timeout);
-
-static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd);
-
-static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
-{
- u32 timeout, clk_ns;
-
- host->timeout_ns = ns;
- host->timeout_clks = clks;
-
- clk_ns = 1000000000UL / host->sclk;
- timeout = ns / clk_ns + clks;
- timeout = timeout >> 16; /* in 65536 sclk cycle unit */
- timeout = timeout > 1 ? timeout - 1 : 0;
- timeout = timeout > 255 ? 255 : timeout;
-
- sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, timeout);
-}
-
-static void msdc_tasklet_card(struct work_struct *work)
-{
- struct msdc_host *host = (struct msdc_host *)container_of(work,
- struct msdc_host, card_delaywork.work);
- u32 inserted;
- u32 status = 0;
-
- spin_lock(&host->lock);
-
- status = readl(host->base + MSDC_PS);
- if (cd_active_low)
- inserted = (status & MSDC_PS_CDSTS) ? 0 : 1;
- else
- inserted = (status & MSDC_PS_CDSTS) ? 1 : 0;
-
- /* Make sure: handle the last interrupt */
- host->card_inserted = inserted;
-
- if (!host->suspend) {
- host->mmc->f_max = HOST_MAX_MCLK;
- mmc_detect_change(host->mmc, msecs_to_jiffies(20));
- }
-
- spin_unlock(&host->lock);
-}
-
-static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz)
-{
- //struct msdc_hw *hw = host->hw;
- u32 mode;
- u32 flags;
- u32 div;
- u32 sclk;
- u32 hclk = host->hclk;
- //u8 clksrc = hw->clk_src;
-
- if (!hz) { // set mmc system clock to 0 ?
- msdc_reset_hw(host);
- return;
- }
-
- msdc_irq_save(flags);
-
- if (ddr) {
- mode = 0x2; /* ddr mode and use divisor */
- if (hz >= (hclk >> 2)) {
- div = 1; /* mean div = 1/4 */
- sclk = hclk >> 2; /* sclk = clk / 4 */
- } else {
- div = (hclk + ((hz << 2) - 1)) / (hz << 2);
- sclk = (hclk >> 2) / div;
- }
- } else if (hz >= hclk) { /* bug fix */
- mode = 0x1; /* no divisor and divisor is ignored */
- div = 0;
- sclk = hclk;
- } else {
- mode = 0x0; /* use divisor */
- if (hz >= (hclk >> 1)) {
- div = 0; /* mean div = 1/2 */
- sclk = hclk >> 1; /* sclk = clk / 2 */
- } else {
- div = (hclk + ((hz << 2) - 1)) / (hz << 2);
- sclk = (hclk >> 2) / div;
- }
- }
-
- /* set clock mode and divisor */
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, mode);
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKDIV, div);
-
- /* wait clock stable */
- while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
- cpu_relax();
-
- host->sclk = sclk;
- host->mclk = hz;
- msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); // need?
-
- sdr_set_bits(host->base + MSDC_INTEN, flags);
-}
-
-/* Fix me. when need to abort */
-static void msdc_abort_data(struct msdc_host *host)
-{
- struct mmc_command *stop = host->mrq->stop;
-
- dev_err(mmc_dev(host->mmc), "%d -> Need to Abort.\n", host->id);
-
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
-
- // need to check FIFO count 0 ?
-
- if (stop) { /* try to stop, but may not success */
- dev_err(mmc_dev(host->mmc), "%d -> stop when abort CMD<%d>\n",
- host->id, stop->opcode);
- (void)msdc_do_command(host, stop, 0, CMD_TIMEOUT);
- }
-
- //if (host->mclk >= 25000000) {
- // msdc_set_mclk(host, 0, host->mclk >> 1);
- //}
-}
-
-#ifdef CONFIG_PM
-/*
- * register as callback function of WIFI(combo_sdio_register_pm) .
- * can called by msdc_drv_suspend/resume too.
- */
-static void msdc_pm(pm_message_t state, void *data)
-{
- struct msdc_host *host = (struct msdc_host *)data;
- int evt = state.event;
-
- if (evt == PM_EVENT_SUSPEND || evt == PM_EVENT_USER_SUSPEND) {
- if (host->suspend) /* already suspend */ /* default 0*/
- return;
-
- /* for memory card. already power off by mmc */
- if (evt == PM_EVENT_SUSPEND && host->power_mode == MMC_POWER_OFF)
- return;
-
- host->suspend = 1;
- host->pm_state = state; /* default PMSG_RESUME */
-
- } else if (evt == PM_EVENT_RESUME || evt == PM_EVENT_USER_RESUME) {
- if (!host->suspend)
- return;
-
- /* No PM resume when USR suspend */
- if (evt == PM_EVENT_RESUME && host->pm_state.event == PM_EVENT_USER_SUSPEND) {
- dev_err(mmc_dev(host->mmc),
- "%d -> PM Resume when in USR Suspend\n",
- host->id); /* won't happen. */
- return;
- }
-
- host->suspend = 0;
- host->pm_state = state;
- }
-}
-#endif
-
-static inline u32 msdc_cmd_find_resp(struct mmc_command *cmd)
-{
- u32 opcode = cmd->opcode;
- u32 resp;
-
- if (opcode == MMC_SET_RELATIVE_ADDR) {
- resp = (mmc_cmd_type(cmd) == MMC_CMD_BCR) ? RESP_R6 : RESP_R1;
- } else if (opcode == MMC_FAST_IO) {
- resp = RESP_R4;
- } else if (opcode == MMC_GO_IRQ_STATE) {
- resp = RESP_R5;
- } else if (opcode == MMC_SELECT_CARD) {
- resp = (cmd->arg != 0) ? RESP_R1B : RESP_NONE;
- } else if (opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED) {
- resp = RESP_R1; /* SDIO workaround. */
- } else if (opcode == SD_SEND_IF_COND && (mmc_cmd_type(cmd) == MMC_CMD_BCR)) {
- resp = RESP_R1;
- } else {
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_R1:
- resp = RESP_R1;
- break;
- case MMC_RSP_R1B:
- resp = RESP_R1B;
- break;
- case MMC_RSP_R2:
- resp = RESP_R2;
- break;
- case MMC_RSP_R3:
- resp = RESP_R3;
- break;
- case MMC_RSP_NONE:
- default:
- resp = RESP_NONE;
- break;
- }
- }
-
- return resp;
-}
-
-/*--------------------------------------------------------------------------*/
-/* mmc_host_ops members */
-/*--------------------------------------------------------------------------*/
-static unsigned int msdc_command_start(struct msdc_host *host,
- struct mmc_command *cmd,
- unsigned long timeout)
-{
- u32 opcode = cmd->opcode;
- u32 rawcmd;
- u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
- MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
- MSDC_INT_ACMD19_DONE;
-
- u32 resp;
- unsigned long tmo;
-
- /* Protocol layer does not provide response type, but our hardware needs
- * to know exact type, not just size!
- */
- resp = msdc_cmd_find_resp(cmd);
-
- cmd->error = 0;
- /* rawcmd :
- * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
- * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
- */
- rawcmd = opcode | msdc_rsp[resp] << 7 | host->blksz << 16;
-
- if (opcode == MMC_READ_MULTIPLE_BLOCK) {
- rawcmd |= (2 << 11);
- } else if (opcode == MMC_READ_SINGLE_BLOCK) {
- rawcmd |= (1 << 11);
- } else if (opcode == MMC_WRITE_MULTIPLE_BLOCK) {
- rawcmd |= ((2 << 11) | (1 << 13));
- } else if (opcode == MMC_WRITE_BLOCK) {
- rawcmd |= ((1 << 11) | (1 << 13));
- } else if (opcode == SD_IO_RW_EXTENDED) {
- if (cmd->data->flags & MMC_DATA_WRITE)
- rawcmd |= (1 << 13);
- if (cmd->data->blocks > 1)
- rawcmd |= (2 << 11);
- else
- rawcmd |= (1 << 11);
- } else if (opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int)-1) {
- rawcmd |= (1 << 14);
- } else if ((opcode == SD_APP_SEND_SCR) ||
- (opcode == SD_APP_SEND_NUM_WR_BLKS) ||
- (opcode == SD_SWITCH && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
- (opcode == SD_APP_SD_STATUS && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
- (opcode == MMC_SEND_EXT_CSD && (mmc_cmd_type(cmd) == MMC_CMD_ADTC))) {
- rawcmd |= (1 << 11);
- } else if (opcode == MMC_STOP_TRANSMISSION) {
- rawcmd |= (1 << 14);
- rawcmd &= ~(0x0FFF << 16);
- }
-
- tmo = jiffies + timeout;
-
- if (opcode == MMC_SEND_STATUS) {
- for (;;) {
- if (!sdc_is_cmd_busy())
- break;
-
- if (time_after(jiffies, tmo)) {
- dev_err(mmc_dev(host->mmc),
- "%d -> XXX cmd_busy timeout: before CMD<%d>\n",
- host->id, opcode);
- cmd->error = -ETIMEDOUT;
- msdc_reset_hw(host);
- goto end;
- }
- }
- } else {
- for (;;) {
- if (!sdc_is_busy())
- break;
- if (time_after(jiffies, tmo)) {
- dev_err(mmc_dev(host->mmc),
- "%d -> XXX sdc_busy timeout: before CMD<%d>\n",
- host->id, opcode);
- cmd->error = -ETIMEDOUT;
- msdc_reset_hw(host);
- goto end;
- }
- }
- }
-
- //BUG_ON(in_interrupt());
- host->cmd = cmd;
- host->cmd_rsp = resp;
-
- init_completion(&host->cmd_done);
-
- sdr_set_bits(host->base + MSDC_INTEN, wints);
- sdc_send_cmd(rawcmd, cmd->arg);
-
-end:
- return cmd->error;
-}
-
-static unsigned int msdc_command_resp(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune,
- unsigned long timeout)
- __must_hold(&host->lock)
-{
- u32 opcode = cmd->opcode;
- //u32 rawcmd;
- u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
- MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
- MSDC_INT_ACMD19_DONE;
-
- BUG_ON(in_interrupt());
- //init_completion(&host->cmd_done);
- //sdr_set_bits(host->base + MSDC_INTEN, wints);
-
- spin_unlock(&host->lock);
- if (!wait_for_completion_timeout(&host->cmd_done, 10 * timeout)) {
- dev_err(mmc_dev(host->mmc),
- "%d -> XXX CMD<%d> wait_for_completion timeout ARG<0x%.8x>\n",
- host->id, opcode, cmd->arg);
- cmd->error = -ETIMEDOUT;
- msdc_reset_hw(host);
- }
- spin_lock(&host->lock);
-
- sdr_clr_bits(host->base + MSDC_INTEN, wints);
- host->cmd = NULL;
-
-//end:
- /* do we need to save card's RCA when SD_SEND_RELATIVE_ADDR */
-
- if (!tune)
- return cmd->error;
-
- /* memory card CRC */
- if (host->hw->flags & MSDC_REMOVABLE && cmd->error == -EIO) {
- /* check if has data phase */
- if (readl(host->base + SDC_CMD) & 0x1800) {
- msdc_abort_data(host);
- } else {
- /* do basic: reset*/
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
- }
- cmd->error = msdc_tune_cmdrsp(host, cmd);
- }
-
- // check DAT0
- /* if (resp == RESP_R1B) {
- while ((readl(host->base + MSDC_PS) & 0x10000) != 0x10000);
- } */
- /* CMD12 Error Handle */
-
- return cmd->error;
-}
-
-static unsigned int msdc_do_command(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune,
- unsigned long timeout)
-{
- if (msdc_command_start(host, cmd, timeout))
- goto end;
-
- if (msdc_command_resp(host, cmd, tune, timeout))
- goto end;
-
-end:
-
- return cmd->error;
-}
-
-static void msdc_dma_start(struct msdc_host *host)
-{
- u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR;
-
- sdr_set_bits(host->base + MSDC_INTEN, wints);
- //dsb(); /* --- by chhung */
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
-}
-
-static void msdc_dma_stop(struct msdc_host *host)
-{
- //u32 retries=500;
- u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR;
-
- //while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
-
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1);
- while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
- ;
-
- //dsb(); /* --- by chhung */
- sdr_clr_bits(host->base + MSDC_INTEN, wints); /* Not just xfer_comp */
-}
-
-/* calc checksum */
-static u8 msdc_dma_calcs(u8 *buf, u32 len)
-{
- u32 i, sum = 0;
-
- for (i = 0; i < len; i++)
- sum += buf[i];
- return 0xFF - (u8)sum;
-}
-
-static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
- struct scatterlist *sg_cmd, unsigned int sglen)
-{
- struct scatterlist *sg;
- struct gpd *gpd;
- struct bd *bd;
- u32 j;
-
- BUG_ON(sglen > MAX_BD_NUM); /* not support currently */
-
- gpd = dma->gpd;
- bd = dma->bd;
-
- /* modify gpd*/
- //gpd->intr = 0;
- gpd->hwo = 1; /* hw will clear it */
- gpd->bdp = 1;
- gpd->chksum = 0; /* need to clear first. */
- gpd->chksum = msdc_dma_calcs((u8 *)gpd, 16);
-
- /* modify bd*/
- for_each_sg(sg_cmd, sg, sglen, j) {
- bd[j].blkpad = 0;
- bd[j].dwpad = 0;
- bd[j].ptr = (void *)sg_dma_address(sg);
- bd[j].buflen = sg_dma_len(sg);
-
- if (j == sglen - 1)
- bd[j].eol = 1; /* the last bd */
- else
- bd[j].eol = 0;
-
- bd[j].chksum = 0; /* checksume need to clear first */
- bd[j].chksum = msdc_dma_calcs((u8 *)(&bd[j]), 16);
- }
-
- sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ,
- MSDC_BRUST_64B);
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1);
-
- writel(PHYSADDR((u32)dma->gpd_addr), host->base + MSDC_DMA_SA);
-}
-
-static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
- __must_hold(&host->lock)
-{
- struct msdc_host *host = mmc_priv(mmc);
- struct mmc_command *cmd;
- struct mmc_data *data;
- //u32 intsts = 0;
- int read = 1, send_type = 0;
-
-#define SND_DAT 0
-#define SND_CMD 1
-
- BUG_ON(!mmc);
- BUG_ON(!mrq);
-
- host->error = 0;
-
- cmd = mrq->cmd;
- data = mrq->cmd->data;
-
- if (!data) {
- send_type = SND_CMD;
- if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0)
- goto done;
- } else {
- BUG_ON(data->blksz > HOST_MAX_BLKSZ);
- send_type = SND_DAT;
-
- data->error = 0;
- read = data->flags & MMC_DATA_READ ? 1 : 0;
- host->data = data;
- host->xfer_size = data->blocks * data->blksz;
- host->blksz = data->blksz;
-
- if (read) {
- if ((host->timeout_ns != data->timeout_ns) ||
- (host->timeout_clks != data->timeout_clks)) {
- msdc_set_timeout(host, data->timeout_ns, data->timeout_clks);
- }
- }
-
- writel(data->blocks, host->base + SDC_BLK_NUM);
- //msdc_clr_fifo(host); /* no need */
-
- msdc_dma_on(); /* enable DMA mode first!! */
- init_completion(&host->xfer_done);
-
- /* start the command first*/
- if (msdc_command_start(host, cmd, CMD_TIMEOUT) != 0)
- goto done;
-
- data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
- msdc_dma_setup(host, &host->dma, data->sg,
- data->sg_count);
-
- /* then wait command done */
- if (msdc_command_resp(host, cmd, 1, CMD_TIMEOUT) != 0)
- goto done;
-
- /* for read, the data coming too fast, then CRC error
- * start DMA no business with CRC.
- */
- //init_completion(&host->xfer_done);
- msdc_dma_start(host);
-
- spin_unlock(&host->lock);
- if (!wait_for_completion_timeout(&host->xfer_done, DAT_TIMEOUT)) {
- dev_err(mmc_dev(host->mmc),
- "%d -> XXX CMD<%d> wait xfer_done<%d> timeout!!\n",
- host->id, cmd->opcode,
- data->blocks * data->blksz);
- dev_err(mmc_dev(host->mmc),
- "%d -> DMA_SA = 0x%x\n",
- host->id, readl(host->base + MSDC_DMA_SA));
- dev_err(mmc_dev(host->mmc),
- "%d -> DMA_CA = 0x%x\n",
- host->id, readl(host->base + MSDC_DMA_CA));
- dev_err(mmc_dev(host->mmc),
- "%d -> DMA_CTRL = 0x%x\n",
- host->id, readl(host->base + MSDC_DMA_CTRL));
- dev_err(mmc_dev(host->mmc),
- "%d -> DMA_CFG = 0x%x\n",
- host->id, readl(host->base + MSDC_DMA_CFG));
- data->error = -ETIMEDOUT;
-
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
- }
- spin_lock(&host->lock);
- msdc_dma_stop(host);
-
- /* Last: stop transfer */
- if (data->stop) {
- if (msdc_do_command(host, data->stop, 0, CMD_TIMEOUT) != 0)
- goto done;
- }
- }
-
-done:
- if (data) {
- host->data = NULL;
- dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
- host->blksz = 0;
- }
-
- if (mrq->cmd->error)
- host->error = 0x001;
- if (mrq->data && mrq->data->error)
- host->error |= 0x010;
- if (mrq->stop && mrq->stop->error)
- host->error |= 0x100;
-
- return host->error;
-}
-
-static int msdc_app_cmd(struct mmc_host *mmc, struct msdc_host *host)
-{
- struct mmc_command cmd;
- struct mmc_request mrq;
- u32 err;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
- cmd.opcode = MMC_APP_CMD;
- cmd.arg = host->app_cmd_arg;
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
-
- memset(&mrq, 0, sizeof(struct mmc_request));
- mrq.cmd = &cmd; cmd.mrq = &mrq;
- cmd.data = NULL;
-
- err = msdc_do_command(host, &cmd, 0, CMD_TIMEOUT);
- return err;
-}
-
-static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd)
-{
- int result = -1;
- u32 rsmpl, cur_rsmpl, orig_rsmpl;
- u32 rrdly, cur_rrdly = 0xffffffff, orig_rrdly;
- u32 skip = 1;
-
- /* ==== don't support 3.0 now ====
- * 1: R_SMPL[1]
- * 2: PAD_CMD_RESP_RXDLY[26:22]
- * ==========================
- */
-
- // save the previous tune result
- sdr_get_field(host->base + MSDC_IOCON, MSDC_IOCON_RSPL, &orig_rsmpl);
- sdr_get_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
- &orig_rrdly);
-
- rrdly = 0;
- do {
- for (rsmpl = 0; rsmpl < 2; rsmpl++) {
- /* Lv1: R_SMPL[1] */
- cur_rsmpl = (orig_rsmpl + rsmpl) % 2;
- if (skip == 1) {
- skip = 0;
- continue;
- }
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_RSPL,
- cur_rsmpl);
-
- if (host->app_cmd) {
- result = msdc_app_cmd(host->mmc, host);
- if (result) {
- dev_err(mmc_dev(host->mmc),
- "%d -> TUNE_CMD app_cmd<%d> failed: RESP_RXDLY<%d>,R_SMPL<%d>\n",
- host->id,
- host->mrq->cmd->opcode,
- cur_rrdly, cur_rsmpl);
- continue;
- }
- }
- result = msdc_do_command(host, cmd, 0, CMD_TIMEOUT); // not tune.
- dev_err(mmc_dev(host->mmc),
- "%d -> TUNE_CMD<%d> %s PAD_CMD_RESP_RXDLY[26:22]<%d> R_SMPL[1]<%d>\n",
- host->id, cmd->opcode,
- (result == 0) ? "PASS" : "FAIL", cur_rrdly,
- cur_rsmpl);
-
- if (result == 0)
- return 0;
- if (result != -EIO) {
- dev_err(mmc_dev(host->mmc),
- "%d -> TUNE_CMD<%d> Error<%d> not -EIO\n",
- host->id, cmd->opcode, result);
- return result;
- }
-
- /* should be EIO */
- /* check if has data phase */
- if (readl(host->base + SDC_CMD) & 0x1800)
- msdc_abort_data(host);
- }
-
- /* Lv2: PAD_CMD_RESP_RXDLY[26:22] */
- cur_rrdly = (orig_rrdly + rrdly + 1) % 32;
- sdr_set_field(host->base + MSDC_PAD_TUNE,
- MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly);
- } while (++rrdly < 32);
-
- return result;
-}
-
-/* Support SD2.0 Only */
-static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct msdc_host *host = mmc_priv(mmc);
- u32 ddr = 0;
- u32 dcrc = 0;
- u32 rxdly, cur_rxdly0, cur_rxdly1;
- u32 dsmpl, cur_dsmpl, orig_dsmpl;
- u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
- u32 cur_dat4, cur_dat5, cur_dat6, cur_dat7;
- u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
- u32 orig_dat4, orig_dat5, orig_dat6, orig_dat7;
- int result = -1;
- u32 skip = 1;
-
- sdr_get_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
-
- /* Tune Method 2. */
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
-
- rxdly = 0;
- do {
- for (dsmpl = 0; dsmpl < 2; dsmpl++) {
- cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
- if (skip == 1) {
- skip = 0;
- continue;
- }
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL,
- cur_dsmpl);
-
- if (host->app_cmd) {
- result = msdc_app_cmd(host->mmc, host);
- if (result) {
- dev_err(mmc_dev(host->mmc),
- "%d -> TUNE_BREAD app_cmd<%d> failed\n",
- host->id,
- host->mrq->cmd->opcode);
- continue;
- }
- }
- result = msdc_do_request(mmc, mrq);
-
- sdr_get_field(host->base + SDC_DCRC_STS,
- SDC_DCRC_STS_POS | SDC_DCRC_STS_NEG,
- &dcrc); /* RO */
- if (!ddr)
- dcrc &= ~SDC_DCRC_STS_NEG;
- dev_err(mmc_dev(host->mmc),
- "%d -> TUNE_BREAD<%s> dcrc<0x%x> DATRDDLY0/1<0x%x><0x%x> dsmpl<0x%x>\n",
- host->id,
- (result == 0 && dcrc == 0) ? "PASS" : "FAIL",
- dcrc, readl(host->base + MSDC_DAT_RDDLY0),
- readl(host->base + MSDC_DAT_RDDLY1),
- cur_dsmpl);
-
- /* Fix me: result is 0, but dcrc is still exist */
- if (result == 0 && dcrc == 0) {
- goto done;
- } else {
- /* there is a case: command timeout, and data phase not processed */
- if (mrq->data->error != 0 &&
- mrq->data->error != -EIO) {
- dev_err(mmc_dev(host->mmc),
- "%d -> TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>\n",
- host->id, result,
- mrq->cmd->error,
- mrq->data->error);
- goto done;
- }
- }
- }
-
- cur_rxdly0 = readl(host->base + MSDC_DAT_RDDLY0);
- cur_rxdly1 = readl(host->base + MSDC_DAT_RDDLY1);
-
- /* E1 ECO. YD: Reverse */
- if (readl(host->base + MSDC_ECO_VER) >= 4) {
- orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
- orig_dat4 = (cur_rxdly1 >> 24) & 0x1F;
- orig_dat5 = (cur_rxdly1 >> 16) & 0x1F;
- orig_dat6 = (cur_rxdly1 >> 8) & 0x1F;
- orig_dat7 = (cur_rxdly1 >> 0) & 0x1F;
- } else {
- orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
- orig_dat4 = (cur_rxdly1 >> 0) & 0x1F;
- orig_dat5 = (cur_rxdly1 >> 8) & 0x1F;
- orig_dat6 = (cur_rxdly1 >> 16) & 0x1F;
- orig_dat7 = (cur_rxdly1 >> 24) & 0x1F;
- }
-
- if (ddr) {
- cur_dat0 = (dcrc & (1 << 0) || dcrc & (1 << 8)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
- cur_dat1 = (dcrc & (1 << 1) || dcrc & (1 << 9)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
- cur_dat2 = (dcrc & (1 << 2) || dcrc & (1 << 10)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
- cur_dat3 = (dcrc & (1 << 3) || dcrc & (1 << 11)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
- } else {
- cur_dat0 = (dcrc & (1 << 0)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
- cur_dat1 = (dcrc & (1 << 1)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
- cur_dat2 = (dcrc & (1 << 2)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
- cur_dat3 = (dcrc & (1 << 3)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
- }
- cur_dat4 = (dcrc & (1 << 4)) ? ((orig_dat4 + 1) % 32) : orig_dat4;
- cur_dat5 = (dcrc & (1 << 5)) ? ((orig_dat5 + 1) % 32) : orig_dat5;
- cur_dat6 = (dcrc & (1 << 6)) ? ((orig_dat6 + 1) % 32) : orig_dat6;
- cur_dat7 = (dcrc & (1 << 7)) ? ((orig_dat7 + 1) % 32) : orig_dat7;
-
- cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
- cur_rxdly1 = (cur_dat4 << 24) | (cur_dat5 << 16) | (cur_dat6 << 8) | (cur_dat7 << 0);
-
- writel(cur_rxdly0, host->base + MSDC_DAT_RDDLY0);
- writel(cur_rxdly1, host->base + MSDC_DAT_RDDLY1);
-
- } while (++rxdly < 32);
-
-done:
- return result;
-}
-
-static int msdc_tune_bwrite(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct msdc_host *host = mmc_priv(mmc);
-
- u32 wrrdly, cur_wrrdly = 0xffffffff, orig_wrrdly;
- u32 dsmpl, cur_dsmpl, orig_dsmpl;
- u32 rxdly, cur_rxdly0;
- u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
- u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
- int result = -1;
- u32 skip = 1;
-
- // MSDC_IOCON_DDR50CKD need to check. [Fix me]
-
- sdr_get_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY,
- &orig_wrrdly);
- sdr_get_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
-
- /* Tune Method 2. just DAT0 */
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
- cur_rxdly0 = readl(host->base + MSDC_DAT_RDDLY0);
-
- /* E1 ECO. YD: Reverse */
- if (readl(host->base + MSDC_ECO_VER) >= 4) {
- orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
- } else {
- orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
- }
-
- rxdly = 0;
- do {
- wrrdly = 0;
- do {
- for (dsmpl = 0; dsmpl < 2; dsmpl++) {
- cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
- if (skip == 1) {
- skip = 0;
- continue;
- }
- sdr_set_field(host->base + MSDC_IOCON,
- MSDC_IOCON_DSPL, cur_dsmpl);
-
- if (host->app_cmd) {
- result = msdc_app_cmd(host->mmc, host);
- if (result) {
- dev_err(mmc_dev(host->mmc),
- "%d -> TUNE_BWRITE app_cmd<%d> failed\n",
- host->id,
- host->mrq->cmd->opcode);
- continue;
- }
- }
- result = msdc_do_request(mmc, mrq);
-
- dev_err(mmc_dev(host->mmc),
- "%d -> TUNE_BWRITE<%s> DSPL<%d> DATWRDLY<%d> MSDC_DAT_RDDLY0<0x%x>\n",
- host->id,
- result == 0 ? "PASS" : "FAIL",
- cur_dsmpl, cur_wrrdly, cur_rxdly0);
-
- if (result == 0) {
- goto done;
- } else {
- /* there is a case: command timeout, and data phase not processed */
- if (mrq->data->error != -EIO) {
- dev_err(mmc_dev(host->mmc),
- "%d -> TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>\n",
- host->id, result,
- mrq->cmd->error,
- mrq->data->error);
- goto done;
- }
- }
- }
- cur_wrrdly = (orig_wrrdly + wrrdly + 1) % 32;
- sdr_set_field(host->base + MSDC_PAD_TUNE,
- MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly);
- } while (++wrrdly < 32);
-
- cur_dat0 = (orig_dat0 + rxdly) % 32; /* only adjust bit-1 for crc */
- cur_dat1 = orig_dat1;
- cur_dat2 = orig_dat2;
- cur_dat3 = orig_dat3;
-
- cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
- writel(cur_rxdly0, host->base + MSDC_DAT_RDDLY0);
- } while (++rxdly < 32);
-
-done:
- return result;
-}
-
-static int msdc_get_card_status(struct mmc_host *mmc, struct msdc_host *host, u32 *status)
-{
- struct mmc_command cmd;
- struct mmc_request mrq;
- u32 err;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
- cmd.opcode = MMC_SEND_STATUS;
- if (mmc->card) {
- cmd.arg = mmc->card->rca << 16;
- } else {
- dev_err(mmc_dev(host->mmc), "%d -> cmd13 mmc card is null\n",
- host->id);
- cmd.arg = host->app_cmd_arg;
- }
- cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-
- memset(&mrq, 0, sizeof(struct mmc_request));
- mrq.cmd = &cmd; cmd.mrq = &mrq;
- cmd.data = NULL;
-
- err = msdc_do_command(host, &cmd, 1, CMD_TIMEOUT);
-
- if (status)
- *status = cmd.resp[0];
-
- return err;
-}
-
-static int msdc_check_busy(struct mmc_host *mmc, struct msdc_host *host)
-{
- u32 err = 0;
- u32 status = 0;
-
- do {
- err = msdc_get_card_status(mmc, host, &status);
- if (err)
- return err;
- /* need cmd12? */
- dev_err(mmc_dev(host->mmc), "%d -> cmd<13> resp<0x%x>\n",
- host->id, status);
- } while (R1_CURRENT_STATE(status) == 7);
-
- return err;
-}
-
-/* failed when msdc_do_request */
-static int msdc_tune_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct msdc_host *host = mmc_priv(mmc);
- struct mmc_data *data;
- //u32 base = host->base;
- int ret = 0, read;
-
- data = mrq->cmd->data;
-
- read = data->flags & MMC_DATA_READ ? 1 : 0;
-
- if (read) {
- if (data->error == -EIO)
- ret = msdc_tune_bread(mmc, mrq);
- } else {
- ret = msdc_check_busy(mmc, host);
- if (ret) {
- dev_err(mmc_dev(host->mmc),
- "%d -> XXX cmd13 wait program done failed\n",
- host->id);
- return ret;
- }
- /* CRC and TO */
- /* Fix me: don't care card status? */
- ret = msdc_tune_bwrite(mmc, mrq);
- }
-
- return ret;
-}
-
-/* ops.request */
-static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct msdc_host *host = mmc_priv(mmc);
-
- WARN_ON(host->mrq);
-
- /* start to process */
- spin_lock(&host->lock);
-
- host->mrq = mrq;
-
- if (msdc_do_request(mmc, mrq)) {
- if (host->hw->flags & MSDC_REMOVABLE && ralink_soc == MT762X_SOC_MT7621AT && mrq->data && mrq->data->error)
- msdc_tune_request(mmc, mrq);
- }
-
- /* ==== when request done, check if app_cmd ==== */
- if (mrq->cmd->opcode == MMC_APP_CMD) {
- host->app_cmd = 1;
- host->app_cmd_arg = mrq->cmd->arg; /* save the RCA */
- } else {
- host->app_cmd = 0;
- //host->app_cmd_arg = 0;
- }
-
- host->mrq = NULL;
-
- spin_unlock(&host->lock);
-
- mmc_request_done(mmc, mrq);
-}
-
-/* called by ops.set_ios */
-static void msdc_set_buswidth(struct msdc_host *host, u32 width)
-{
- u32 val = readl(host->base + SDC_CFG);
-
- val &= ~SDC_CFG_BUSWIDTH;
-
- switch (width) {
- default:
- case MMC_BUS_WIDTH_1:
- width = 1;
- val |= (MSDC_BUS_1BITS << 16);
- break;
- case MMC_BUS_WIDTH_4:
- val |= (MSDC_BUS_4BITS << 16);
- break;
- case MMC_BUS_WIDTH_8:
- val |= (MSDC_BUS_8BITS << 16);
- break;
- }
-
- writel(val, host->base + SDC_CFG);
-}
-
-/* ops.set_ios */
-static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct msdc_host *host = mmc_priv(mmc);
- u32 ddr = 0;
-
-#ifdef MT6575_SD_DEBUG
- static const char * const vdd[] = {
- "1.50v", "1.55v", "1.60v", "1.65v", "1.70v", "1.80v", "1.90v",
- "2.00v", "2.10v", "2.20v", "2.30v", "2.40v", "2.50v", "2.60v",
- "2.70v", "2.80v", "2.90v", "3.00v", "3.10v", "3.20v", "3.30v",
- "3.40v", "3.50v", "3.60v"
- };
- static const char * const power_mode[] = {
- "OFF", "UP", "ON"
- };
- static const char * const bus_mode[] = {
- "UNKNOWN", "OPENDRAIN", "PUSHPULL"
- };
- static const char * const timing[] = {
- "LEGACY", "MMC_HS", "SD_HS"
- };
-
- printk("SET_IOS: CLK(%dkHz), BUS(%s), BW(%u), PWR(%s), VDD(%s), TIMING(%s)",
- ios->clock / 1000, bus_mode[ios->bus_mode],
- (ios->bus_width == MMC_BUS_WIDTH_4) ? 4 : 1,
- power_mode[ios->power_mode], vdd[ios->vdd], timing[ios->timing]);
-#endif
-
- msdc_set_buswidth(host, ios->bus_width);
-
- /* Power control ??? */
- switch (ios->power_mode) {
- case MMC_POWER_OFF:
- case MMC_POWER_UP:
- break;
- case MMC_POWER_ON:
- host->power_mode = MMC_POWER_ON;
- break;
- default:
- break;
- }
-
- /* Clock control */
- if (host->mclk != ios->clock) {
- if (ios->clock > 25000000) {
- //if (!(host->hw->flags & MSDC_REMOVABLE)) {
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_RSPL,
- MSDC_SMPL_FALLING);
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL,
- MSDC_SMPL_FALLING);
- //} /* for tuning debug */
- } else { /* default value */
- writel(0x00000000, host->base + MSDC_IOCON);
- // writel(0x00000000, host->base + MSDC_DAT_RDDLY0);
-
- // for MT7620 E2 and afterward
- writel(0x10101010, host->base + MSDC_DAT_RDDLY0);
-
- writel(0x00000000, host->base + MSDC_DAT_RDDLY1);
- // writel(0x00000000, host->base + MSDC_PAD_TUNE);
-
- // for MT7620 E2 and afterward
- writel(0x84101010, host->base + MSDC_PAD_TUNE);
- }
- msdc_set_mclk(host, ddr, ios->clock);
- }
-}
-
-/* ops.get_ro */
-static int msdc_ops_get_ro(struct mmc_host *mmc)
-{
- struct msdc_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ro = 0;
-
- if (host->hw->flags & MSDC_WP_PIN_EN) { /* set for card */
- spin_lock_irqsave(&host->lock, flags);
- ro = (readl(host->base + MSDC_PS) >> 31);
- spin_unlock_irqrestore(&host->lock, flags);
- }
- return ro;
-}
-
-/* ops.get_cd */
-static int msdc_ops_get_cd(struct mmc_host *mmc)
-{
- struct msdc_host *host = mmc_priv(mmc);
- unsigned long flags;
- int present = 1;
-
- /* for sdio, MSDC_REMOVABLE not set, always return 1 */
- if (!(host->hw->flags & MSDC_REMOVABLE)) {
- /* For sdio, read H/W always get<1>, but may timeout some times */
-#if 1
- host->card_inserted = 1;
- return 1;
-#else
- host->card_inserted = (host->pm_state.event == PM_EVENT_USER_RESUME) ? 1 : 0;
- return host->card_inserted;
-#endif
- }
-
- /* MSDC_CD_PIN_EN set for card */
- if (host->hw->flags & MSDC_CD_PIN_EN) {
- spin_lock_irqsave(&host->lock, flags);
- // CD
- present = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS;
- if (cd_active_low)
- present = present ? 0 : 1;
- else
- present = present ? 1 : 0;
- host->card_inserted = present;
- spin_unlock_irqrestore(&host->lock, flags);
- } else {
- present = 0; /* TODO? Check DAT3 pins for card detection */
- }
-
- return present;
-}
-
-static struct mmc_host_ops mt_msdc_ops = {
- .request = msdc_ops_request,
- .set_ios = msdc_ops_set_ios,
- .get_ro = msdc_ops_get_ro,
- .get_cd = msdc_ops_get_cd,
-};
-
-/*--------------------------------------------------------------------------*/
-/* interrupt handler */
-/*--------------------------------------------------------------------------*/
-static irqreturn_t msdc_irq(int irq, void *dev_id)
-{
- struct msdc_host *host = (struct msdc_host *)dev_id;
- struct mmc_data *data = host->data;
- struct mmc_command *cmd = host->cmd;
-
- u32 cmdsts = MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | MSDC_INT_CMDRDY |
- MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | MSDC_INT_ACMDRDY |
- MSDC_INT_ACMD19_DONE;
- u32 datsts = MSDC_INT_DATCRCERR | MSDC_INT_DATTMO;
-
- u32 intsts = readl(host->base + MSDC_INT);
- u32 inten = readl(host->base + MSDC_INTEN); inten &= intsts;
-
- writel(intsts, host->base + MSDC_INT); /* clear interrupts */
- /* MSG will cause fatal error */
-
- /* card change interrupt */
- if (intsts & MSDC_INT_CDSC) {
- if (host->mmc->caps & MMC_CAP_NEEDS_POLL)
- return IRQ_HANDLED;
- schedule_delayed_work(&host->card_delaywork, HZ);
- /* tuning when plug card ? */
- }
-
- /* transfer complete interrupt */
- if (data) {
- if (inten & MSDC_INT_XFER_COMPL) {
- data->bytes_xfered = host->xfer_size;
- complete(&host->xfer_done);
- }
-
- if (intsts & datsts) {
- /* do basic reset, or stop command will sdc_busy */
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
-
- if (intsts & MSDC_INT_DATTMO)
- data->error = -ETIMEDOUT;
- else if (intsts & MSDC_INT_DATCRCERR)
- data->error = -EIO;
-
- //if(readl(MSDC_INTEN) & MSDC_INT_XFER_COMPL) {
- complete(&host->xfer_done); /* Read CRC come fast, XFER_COMPL not enabled */
- }
- }
-
- /* command interrupts */
- if (cmd && (intsts & cmdsts)) {
- if ((intsts & MSDC_INT_CMDRDY) || (intsts & MSDC_INT_ACMDRDY) ||
- (intsts & MSDC_INT_ACMD19_DONE)) {
- u32 *rsp = &cmd->resp[0];
-
- switch (host->cmd_rsp) {
- case RESP_NONE:
- break;
- case RESP_R2:
- *rsp++ = readl(host->base + SDC_RESP3);
- *rsp++ = readl(host->base + SDC_RESP2);
- *rsp++ = readl(host->base + SDC_RESP1);
- *rsp++ = readl(host->base + SDC_RESP0);
- break;
- default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
- if ((intsts & MSDC_INT_ACMDRDY) || (intsts & MSDC_INT_ACMD19_DONE))
- *rsp = readl(host->base + SDC_ACMD_RESP);
- else
- *rsp = readl(host->base + SDC_RESP0);
- break;
- }
- } else if ((intsts & MSDC_INT_RSPCRCERR) || (intsts & MSDC_INT_ACMDCRCERR)) {
- cmd->error = -EIO;
- } else if ((intsts & MSDC_INT_CMDTMO) || (intsts & MSDC_INT_ACMDTMO)) {
- cmd->error = -ETIMEDOUT;
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
- }
- complete(&host->cmd_done);
- }
-
- /* mmc irq interrupts */
- if (intsts & MSDC_INT_MMCIRQ)
- dev_info(mmc_dev(host->mmc), "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n",
- host->id, readl(host->base + SDC_CSTS));
-
- return IRQ_HANDLED;
-}
-
-/*--------------------------------------------------------------------------*/
-/* platform_driver members */
-/*--------------------------------------------------------------------------*/
-/* called by msdc_drv_probe/remove */
-static void msdc_enable_cd_irq(struct msdc_host *host, int enable)
-{
- struct msdc_hw *hw = host->hw;
-
- /* for sdio, not set */
- if ((hw->flags & MSDC_CD_PIN_EN) == 0) {
- /* Pull down card detection pin since it is not available */
- /*
- * if (hw->config_gpio_pin)
- * hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
- */
- sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
- sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
- return;
- }
-
- if (enable) {
- /* card detection circuit relies on the core power so that the core power
- * shouldn't be turned off. Here adds a reference count to keep
- * the core power alive.
- */
-
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_UP);
-
- sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE,
- DEFAULT_DEBOUNCE);
- sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
- sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
-
- /* not in document! Fix me */
- sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
- } else {
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
-
- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
- sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
- sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
-
- /* Here decreases a reference count to core power since card
- * detection circuit is shutdown.
- */
- }
-}
-
-/* called by msdc_drv_probe */
-static void msdc_init_hw(struct msdc_host *host)
-{
- /* Configure to MMC/SD mode */
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC);
-
- /* Reset */
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
-
- /* Disable card detection */
- sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
-
- /* Disable and clear all interrupts */
- sdr_clr_bits(host->base + MSDC_INTEN, readl(host->base + MSDC_INTEN));
- writel(readl(host->base + MSDC_INT), host->base + MSDC_INT);
-
-#if 1
- /* reset tuning parameter */
- writel(0x00090000, host->base + MSDC_PAD_CTL0);
- writel(0x000A0000, host->base + MSDC_PAD_CTL1);
- writel(0x000A0000, host->base + MSDC_PAD_CTL2);
- // writel( 0x00000000, host->base + MSDC_PAD_TUNE);
-
- // for MT7620 E2 and afterward
- writel(0x84101010, host->base + MSDC_PAD_TUNE);
-
- // writel(0x00000000, host->base + MSDC_DAT_RDDLY0);
-
- // for MT7620 E2 and afterward
- writel(0x10101010, host->base + MSDC_DAT_RDDLY0);
-
- writel(0x00000000, host->base + MSDC_DAT_RDDLY1);
- writel(0x00000000, host->base + MSDC_IOCON);
-
- if (readl(host->base + MSDC_ECO_VER) >= 4) {
- if (host->id == 1) {
- sdr_set_field(host->base + MSDC_PATCH_BIT1,
- MSDC_PATCH_BIT1_WRDAT_CRCS, 1);
- sdr_set_field(host->base + MSDC_PATCH_BIT1,
- MSDC_PATCH_BIT1_CMD_RSP, 1);
-
- /* internal clock: latch read data */
- sdr_set_bits(host->base + MSDC_PATCH_BIT0,
- MSDC_PATCH_BIT_CKGEN_CK);
- }
- }
-#endif
-
- /* for safety, should clear SDC_CFG.SDIO_INT_DET_EN & set SDC_CFG.SDIO in
- * pre-loader,uboot,kernel drivers. and SDC_CFG.SDIO_INT_DET_EN will be only
- * set when kernel driver wants to use SDIO bus interrupt
- */
- /* Configure to enable SDIO mode. it's must otherwise sdio cmd5 failed */
- sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
-
- /* disable detect SDIO device interrupt function */
- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
-
- /* eneable SMT for glitch filter */
- sdr_set_bits(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT);
- sdr_set_bits(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT);
- sdr_set_bits(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT);
-
-#if 1
- /* set clk, cmd, dat pad driving */
- sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 4);
-#else
- sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0);
-#endif
-
- /* set sampling edge */
-
- /* write crc timeout detection */
- sdr_set_field(host->base + MSDC_PATCH_BIT0, 1 << 30, 1);
-
- /* Configure to default data timeout */
- sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC);
-
- msdc_set_buswidth(host, MMC_BUS_WIDTH_1);
-}
-
-/* called by msdc_drv_remove */
-static void msdc_deinit_hw(struct msdc_host *host)
-{
- /* Disable and clear all interrupts */
- sdr_clr_bits(host->base + MSDC_INTEN, readl(host->base + MSDC_INTEN));
- writel(readl(host->base + MSDC_INT), host->base + MSDC_INT);
-
- /* Disable card detection */
- msdc_enable_cd_irq(host, 0);
-}
-
-/* init gpd and bd list in msdc_drv_probe */
-static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
-{
- struct gpd *gpd = dma->gpd;
- struct bd *bd = dma->bd;
- int i;
-
- /* we just support one gpd, but gpd->next must be set for desc
- * DMA. That's why we alloc 2 gpd structurs.
- */
-
- memset(gpd, 0, sizeof(struct gpd) * 2);
-
- gpd->bdp = 1; /* hwo, cs, bd pointer */
- gpd->ptr = (void *)dma->bd_addr; /* physical address */
- gpd->next = (void *)((u32)dma->gpd_addr + sizeof(struct gpd));
-
- memset(bd, 0, sizeof(struct bd) * MAX_BD_NUM);
- for (i = 0; i < (MAX_BD_NUM - 1); i++)
- bd[i].next = (void *)(dma->bd_addr + sizeof(*bd) * (i + 1));
-}
-
-static int msdc_drv_probe(struct platform_device *pdev)
-{
- struct resource *res;
- __iomem void *base;
- struct mmc_host *mmc;
- struct msdc_host *host;
- struct msdc_hw *hw;
- int ret;
-
- hw = &msdc0_hw;
-
- if (of_property_read_bool(pdev->dev.of_node, "mtk,wp-en"))
- msdc0_hw.flags |= MSDC_WP_PIN_EN;
-
- /* Allocate MMC host for this device */
- mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
- if (!mmc)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto host_free;
- }
-
- /* Set host parameters to mmc */
- mmc->ops = &mt_msdc_ops;
- mmc->f_min = HOST_MIN_MCLK;
- mmc->f_max = HOST_MAX_MCLK;
- mmc->ocr_avail = MSDC_OCR_AVAIL;
-
- mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
-
- //TODO: read this as bus-width from dt (via mmc_of_parse)
- mmc->caps |= MMC_CAP_4_BIT_DATA;
-
- cd_active_low = !of_property_read_bool(pdev->dev.of_node, "mediatek,cd-high");
-
- if (of_property_read_bool(pdev->dev.of_node, "mediatek,cd-poll"))
- mmc->caps |= MMC_CAP_NEEDS_POLL;
-
- /* MMC core transfer sizes tunable parameters */
- mmc->max_segs = MAX_HW_SGMTS;
-
- mmc->max_seg_size = MAX_SGMT_SZ;
- mmc->max_blk_size = HOST_MAX_BLKSZ;
- mmc->max_req_size = MAX_REQ_SZ;
- mmc->max_blk_count = mmc->max_req_size;
-
- host = mmc_priv(mmc);
- host->hw = hw;
- host->mmc = mmc;
- host->id = pdev->id;
- if (host->id < 0 || host->id >= 4)
- host->id = 0;
- host->error = 0;
-
- host->irq = platform_get_irq(pdev, 0);
- if (host->irq < 0) {
- ret = -EINVAL;
- goto host_free;
- }
-
- host->base = base;
- host->mclk = 0; /* mclk: the request clock of mmc sub-system */
- host->hclk = hclks[hw->clk_src]; /* hclk: clock of clock source to msdc controller */
- host->sclk = 0; /* sclk: the really clock after divition */
- host->pm_state = PMSG_RESUME;
- host->suspend = 0;
- host->core_clkon = 0;
- host->card_clkon = 0;
- host->core_power = 0;
- host->power_mode = MMC_POWER_OFF;
-// host->card_inserted = hw->flags & MSDC_REMOVABLE ? 0 : 1;
- host->timeout_ns = 0;
- host->timeout_clks = DEFAULT_DTOC * 65536;
-
- host->mrq = NULL;
- //init_MUTEX(&host->sem); /* we don't need to support multiple threads access */
-
- dma_coerce_mask_and_coherent(mmc_dev(mmc), DMA_BIT_MASK(32));
-
- /* using dma_alloc_coherent*/ /* todo: using 1, for all 4 slots */
- host->dma.gpd = dma_alloc_coherent(&pdev->dev,
- MAX_GPD_NUM * sizeof(struct gpd),
- &host->dma.gpd_addr, GFP_KERNEL);
- host->dma.bd = dma_alloc_coherent(&pdev->dev,
- MAX_BD_NUM * sizeof(struct bd),
- &host->dma.bd_addr, GFP_KERNEL);
- if (!host->dma.gpd || !host->dma.bd) {
- ret = -ENOMEM;
- goto release_mem;
- }
- msdc_init_gpd_bd(host, &host->dma);
-
- INIT_DELAYED_WORK(&host->card_delaywork, msdc_tasklet_card);
- spin_lock_init(&host->lock);
- msdc_init_hw(host);
-
- /* TODO check weather flags 0 is correct, the mtk-sd driver uses
- * IRQF_TRIGGER_LOW | IRQF_ONESHOT for flags
- *
- * for flags 0 the trigger polarity is determined by the
- * device tree, but not the oneshot flag, but maybe it is also
- * not needed because the soc could be oneshot safe.
- */
- ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq, 0, pdev->name,
- host);
- if (ret)
- goto release;
-
- platform_set_drvdata(pdev, mmc);
-
- ret = mmc_add_host(mmc);
- if (ret)
- goto release;
-
- /* Config card detection pin and enable interrupts */
- if (hw->flags & MSDC_CD_PIN_EN) { /* set for card */
- msdc_enable_cd_irq(host, 1);
- } else {
- msdc_enable_cd_irq(host, 0);
- }
-
- return 0;
-
-release:
- platform_set_drvdata(pdev, NULL);
- msdc_deinit_hw(host);
- cancel_delayed_work_sync(&host->card_delaywork);
-
-release_mem:
- if (host->dma.gpd)
- dma_free_coherent(&pdev->dev, MAX_GPD_NUM * sizeof(struct gpd),
- host->dma.gpd, host->dma.gpd_addr);
- if (host->dma.bd)
- dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct bd),
- host->dma.bd, host->dma.bd_addr);
-host_free:
- mmc_free_host(mmc);
-
- return ret;
-}
-
-/* 4 device share one driver, using "drvdata" to show difference */
-static int msdc_drv_remove(struct platform_device *pdev)
-{
- struct mmc_host *mmc;
- struct msdc_host *host;
-
- mmc = platform_get_drvdata(pdev);
- BUG_ON(!mmc);
-
- host = mmc_priv(mmc);
- BUG_ON(!host);
-
- dev_err(mmc_dev(host->mmc), "%d -> removed !!!\n",
- host->id);
-
- platform_set_drvdata(pdev, NULL);
- mmc_remove_host(host->mmc);
- msdc_deinit_hw(host);
-
- cancel_delayed_work_sync(&host->card_delaywork);
-
- dma_free_coherent(&pdev->dev, MAX_GPD_NUM * sizeof(struct gpd),
- host->dma.gpd, host->dma.gpd_addr);
- dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct bd),
- host->dma.bd, host->dma.bd_addr);
-
- mmc_free_host(host->mmc);
-
- return 0;
-}
-
-/* Fix me: Power Flow */
-#ifdef CONFIG_PM
-
-static void msdc_drv_pm(struct platform_device *pdev, pm_message_t state)
-{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
-
- if (mmc) {
- struct msdc_host *host = mmc_priv(mmc);
-
- msdc_pm(state, (void *)host);
- }
-}
-
-static int msdc_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
- if (state.event == PM_EVENT_SUSPEND)
- msdc_drv_pm(pdev, state);
- return 0;
-}
-
-static int msdc_drv_resume(struct platform_device *pdev)
-{
- struct pm_message state;
-
- state.event = PM_EVENT_RESUME;
- msdc_drv_pm(pdev, state);
- return 0;
-}
-#endif
-
-static const struct of_device_id mt7620_sdhci_match[] = {
- { .compatible = "ralink,mt7620-sdhci" },
- {},
-};
-MODULE_DEVICE_TABLE(of, mt7620_sdhci_match);
-
-static struct platform_driver mt_msdc_driver = {
- .probe = msdc_drv_probe,
- .remove = msdc_drv_remove,
-#ifdef CONFIG_PM
- .suspend = msdc_drv_suspend,
- .resume = msdc_drv_resume,
-#endif
- .driver = {
- .name = DRV_NAME,
- .of_match_table = mt7620_sdhci_match,
- },
-};
-
-/*--------------------------------------------------------------------------*/
-/* module init/exit */
-/*--------------------------------------------------------------------------*/
-static int __init mt_msdc_init(void)
-{
- int ret;
- u32 reg;
-
- // Set the pins for sdxc to sdxc mode
- //FIXME: this should be done by pinctl and not by the sd driver
- reg = readl((void __iomem *)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3 << 18);
- writel(reg, (void __iomem *)(RALINK_SYSCTL_BASE + 0x60));
-
- ret = platform_driver_register(&mt_msdc_driver);
- if (ret) {
- pr_err("%s: Can't register driver", DRV_NAME);
- return ret;
- }
-
-#if defined(MT6575_SD_DEBUG)
- msdc_debug_proc_init();
-#endif
- return 0;
-}
-
-static void __exit mt_msdc_exit(void)
-{
- platform_driver_unregister(&mt_msdc_driver);
-}
-
-module_init(mt_msdc_init);
-module_exit(mt_msdc_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MediaTek MT6575 SD/MMC Card Driver");
-MODULE_AUTHOR("Infinity Chen <infinity.chen@mediatek.com>");
diff --git a/drivers/staging/mt7621-pci-phy/Kconfig b/drivers/staging/mt7621-pci-phy/Kconfig
index b9f6ab784ee8..263e0a91c424 100644
--- a/drivers/staging/mt7621-pci-phy/Kconfig
+++ b/drivers/staging/mt7621-pci-phy/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config PCI_MT7621_PHY
tristate "MediaTek MT7621 PCI PHY Driver"
depends on RALINK && OF
diff --git a/drivers/staging/mt7621-pci-phy/Makefile b/drivers/staging/mt7621-pci-phy/Makefile
index a970056f05c1..b4d99b9119e0 100644
--- a/drivers/staging/mt7621-pci-phy/Makefile
+++ b/drivers/staging/mt7621-pci-phy/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCI_MT7621_PHY) += pci-mt7621-phy.o
diff --git a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt
index 33a8a698bdd0..a369d715378b 100644
--- a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt
+++ b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt
@@ -3,45 +3,19 @@ Mediatek Mt7621 PCIe PHY
Required properties:
- compatible: must be "mediatek,mt7621-pci-phy"
- reg: base address and length of the PCIe PHY block
-- #address-cells: must be 1
-- #size-cells: must be 0
-
-Each PCIe PHY should be represented by a child node
-
-Required properties For the child node:
-- reg: the PHY ID
-0 - PCIe RC 0
-1 - PCIe RC 1
-- #phy-cells: must be 0
+- #phy-cells: must be <1> for pcie0_phy and for pcie1_phy.
Example:
- pcie0_phy: pcie-phy@1a149000 {
+ pcie0_phy: pcie-phy@1e149000 {
compatible = "mediatek,mt7621-pci-phy";
- reg = <0x1a149000 0x0700>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pcie0_port: pcie-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
-
- pcie1_port: pcie-phy@1 {
- reg = <1>;
- #phy-cells = <0>;
- };
+ reg = <0x1e149000 0x0700>;
+ #phy-cells = <1>;
};
- pcie1_phy: pcie-phy@1a14a000 {
+ pcie1_phy: pcie-phy@1e14a000 {
compatible = "mediatek,mt7621-pci-phy";
- reg = <0x1a14a000 0x0700>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pcie2_port: pcie-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
+ reg = <0x1e14a000 0x0700>;
+ #phy-cells = <1>;
};
/* users of the PCIe phy */
@@ -49,6 +23,6 @@ Example:
pcie: pcie@1e140000 {
...
...
- phys = <&pcie0_port>, <&pcie1_port>, <&pcie2_port>;
+ phys = <&pcie0_phy 0>, <&pcie0_phy 1>, <&pcie1_phy 0>;
phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
- }; \ No newline at end of file
+ };
diff --git a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c b/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
index d3ca2f019112..2576f179e30a 100644
--- a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
+++ b/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
@@ -11,72 +11,75 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sys_soc.h>
#include <mt7621.h>
#include <ralink_regs.h>
-#define RALINK_CLKCFG1 0x30
-#define CHIP_REV_MT7621_E2 0x0101
-
-#define PCIE_PORT_CLK_EN(x) BIT(24 + (x))
-
-#define RG_PE1_PIPE_REG 0x02c
-#define RG_PE1_PIPE_RST BIT(12)
-#define RG_PE1_PIPE_CMD_FRC BIT(4)
-
-#define RG_P0_TO_P1_WIDTH 0x100
-#define RG_PE1_H_LCDDS_REG 0x49c
-#define RG_PE1_H_LCDDS_PCW GENMASK(30, 0)
-#define RG_PE1_H_LCDDS_PCW_VAL(x) ((0x7fffffff & (x)) << 0)
-
-#define RG_PE1_FRC_H_XTAL_REG 0x400
-#define RG_PE1_FRC_H_XTAL_TYPE BIT(8)
-#define RG_PE1_H_XTAL_TYPE GENMASK(10, 9)
-#define RG_PE1_H_XTAL_TYPE_VAL(x) ((0x3 & (x)) << 9)
-
-#define RG_PE1_FRC_PHY_REG 0x000
-#define RG_PE1_FRC_PHY_EN BIT(4)
-#define RG_PE1_PHY_EN BIT(5)
-
-#define RG_PE1_H_PLL_REG 0x490
-#define RG_PE1_H_PLL_BC GENMASK(23, 22)
-#define RG_PE1_H_PLL_BC_VAL(x) ((0x3 & (x)) << 22)
-#define RG_PE1_H_PLL_BP GENMASK(21, 18)
-#define RG_PE1_H_PLL_BP_VAL(x) ((0xf & (x)) << 18)
-#define RG_PE1_H_PLL_IR GENMASK(15, 12)
-#define RG_PE1_H_PLL_IR_VAL(x) ((0xf & (x)) << 12)
-#define RG_PE1_H_PLL_IC GENMASK(11, 8)
-#define RG_PE1_H_PLL_IC_VAL(x) ((0xf & (x)) << 8)
-#define RG_PE1_H_PLL_PREDIV GENMASK(7, 6)
-#define RG_PE1_H_PLL_PREDIV_VAL(x) ((0x3 & (x)) << 6)
-#define RG_PE1_PLL_DIVEN GENMASK(3, 1)
-#define RG_PE1_PLL_DIVEN_VAL(x) ((0x7 & (x)) << 1)
-
-#define RG_PE1_H_PLL_FBKSEL_REG 0x4bc
-#define RG_PE1_H_PLL_FBKSEL GENMASK(5, 4)
-#define RG_PE1_H_PLL_FBKSEL_VAL(x) ((0x3 & (x)) << 4)
-
-#define RG_PE1_H_LCDDS_SSC_PRD_REG 0x4a4
-#define RG_PE1_H_LCDDS_SSC_PRD GENMASK(15, 0)
-#define RG_PE1_H_LCDDS_SSC_PRD_VAL(x) ((0xffff & (x)) << 0)
-
-#define RG_PE1_H_LCDDS_SSC_DELTA_REG 0x4a8
-#define RG_PE1_H_LCDDS_SSC_DELTA GENMASK(11, 0)
-#define RG_PE1_H_LCDDS_SSC_DELTA_VAL(x) ((0xfff & (x)) << 0)
-#define RG_PE1_H_LCDDS_SSC_DELTA1 GENMASK(27, 16)
-#define RG_PE1_H_LCDDS_SSC_DELTA1_VAL(x) ((0xff & (x)) << 16)
-
-#define RG_PE1_LCDDS_CLK_PH_INV_REG 0x4a0
-#define RG_PE1_LCDDS_CLK_PH_INV BIT(5)
-
-#define RG_PE1_H_PLL_BR_REG 0x4ac
-#define RG_PE1_H_PLL_BR GENMASK(18, 16)
-#define RG_PE1_H_PLL_BR_VAL(x) ((0x7 & (x)) << 16)
-
-#define RG_PE1_MSTCKDIV_REG 0x414
-#define RG_PE1_MSTCKDIV GENMASK(7, 6)
-#define RG_PE1_MSTCKDIV_VAL(x) ((0x3 & (x)) << 6)
-
-#define RG_PE1_FRC_MSTCKDIV BIT(5)
+#define RALINK_CLKCFG1 0x30
+
+#define PCIE_PORT_CLK_EN(x) BIT(24 + (x))
+
+#define RG_PE1_PIPE_REG 0x02c
+#define RG_PE1_PIPE_RST BIT(12)
+#define RG_PE1_PIPE_CMD_FRC BIT(4)
+
+#define RG_P0_TO_P1_WIDTH 0x100
+#define RG_PE1_H_LCDDS_REG 0x49c
+#define RG_PE1_H_LCDDS_PCW GENMASK(30, 0)
+#define RG_PE1_H_LCDDS_PCW_VAL(x) ((0x7fffffff & (x)) << 0)
+
+#define RG_PE1_FRC_H_XTAL_REG 0x400
+#define RG_PE1_FRC_H_XTAL_TYPE BIT(8)
+#define RG_PE1_H_XTAL_TYPE GENMASK(10, 9)
+#define RG_PE1_H_XTAL_TYPE_VAL(x) ((0x3 & (x)) << 9)
+
+#define RG_PE1_FRC_PHY_REG 0x000
+#define RG_PE1_FRC_PHY_EN BIT(4)
+#define RG_PE1_PHY_EN BIT(5)
+
+#define RG_PE1_H_PLL_REG 0x490
+#define RG_PE1_H_PLL_BC GENMASK(23, 22)
+#define RG_PE1_H_PLL_BC_VAL(x) ((0x3 & (x)) << 22)
+#define RG_PE1_H_PLL_BP GENMASK(21, 18)
+#define RG_PE1_H_PLL_BP_VAL(x) ((0xf & (x)) << 18)
+#define RG_PE1_H_PLL_IR GENMASK(15, 12)
+#define RG_PE1_H_PLL_IR_VAL(x) ((0xf & (x)) << 12)
+#define RG_PE1_H_PLL_IC GENMASK(11, 8)
+#define RG_PE1_H_PLL_IC_VAL(x) ((0xf & (x)) << 8)
+#define RG_PE1_H_PLL_PREDIV GENMASK(7, 6)
+#define RG_PE1_H_PLL_PREDIV_VAL(x) ((0x3 & (x)) << 6)
+#define RG_PE1_PLL_DIVEN GENMASK(3, 1)
+#define RG_PE1_PLL_DIVEN_VAL(x) ((0x7 & (x)) << 1)
+
+#define RG_PE1_H_PLL_FBKSEL_REG 0x4bc
+#define RG_PE1_H_PLL_FBKSEL GENMASK(5, 4)
+#define RG_PE1_H_PLL_FBKSEL_VAL(x) ((0x3 & (x)) << 4)
+
+#define RG_PE1_H_LCDDS_SSC_PRD_REG 0x4a4
+#define RG_PE1_H_LCDDS_SSC_PRD GENMASK(15, 0)
+#define RG_PE1_H_LCDDS_SSC_PRD_VAL(x) ((0xffff & (x)) << 0)
+
+#define RG_PE1_H_LCDDS_SSC_DELTA_REG 0x4a8
+#define RG_PE1_H_LCDDS_SSC_DELTA GENMASK(11, 0)
+#define RG_PE1_H_LCDDS_SSC_DELTA_VAL(x) ((0xfff & (x)) << 0)
+#define RG_PE1_H_LCDDS_SSC_DELTA1 GENMASK(27, 16)
+#define RG_PE1_H_LCDDS_SSC_DELTA1_VAL(x) ((0xff & (x)) << 16)
+
+#define RG_PE1_LCDDS_CLK_PH_INV_REG 0x4a0
+#define RG_PE1_LCDDS_CLK_PH_INV BIT(5)
+
+#define RG_PE1_H_PLL_BR_REG 0x4ac
+#define RG_PE1_H_PLL_BR GENMASK(18, 16)
+#define RG_PE1_H_PLL_BR_VAL(x) ((0x7 & (x)) << 16)
+
+#define RG_PE1_MSTCKDIV_REG 0x414
+#define RG_PE1_MSTCKDIV GENMASK(7, 6)
+#define RG_PE1_MSTCKDIV_VAL(x) ((0x3 & (x)) << 6)
+
+#define RG_PE1_FRC_MSTCKDIV BIT(5)
+
+#define MAX_PHYS 2
/**
* struct mt7621_pci_phy_instance - Mt7621 Pcie PHY device
@@ -93,24 +96,32 @@ struct mt7621_pci_phy_instance {
/**
* struct mt7621_pci_phy - Mt7621 Pcie PHY core
* @dev: pointer to device
+ * @regmap: kernel regmap pointer
* @phys: pointer to Mt7621 PHY device
* @nphys: number of PHY devices for this core
+ * @bypass_pipe_rst: mark if 'mt7621_bypass_pipe_rst'
+ * needs to be executed. Depends on chip revision.
*/
struct mt7621_pci_phy {
struct device *dev;
+ struct regmap *regmap;
struct mt7621_pci_phy_instance **phys;
int nphys;
+ bool bypass_pipe_rst;
};
-static inline u32 phy_read(struct mt7621_pci_phy_instance *instance, u32 reg)
+static inline u32 phy_read(struct mt7621_pci_phy *phy, u32 reg)
{
- return readl(instance->port_base + reg);
+ u32 val;
+
+ regmap_read(phy->regmap, reg, &val);
+
+ return val;
}
-static inline void phy_write(struct mt7621_pci_phy_instance *instance,
- u32 val, u32 reg)
+static inline void phy_write(struct mt7621_pci_phy *phy, u32 val, u32 reg)
{
- writel(val, instance->port_base + reg);
+ regmap_write(phy->regmap, reg, val);
}
static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy,
@@ -120,10 +131,10 @@ static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy,
RG_PE1_PIPE_REG : RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH;
u32 reg;
- reg = phy_read(instance, offset);
+ reg = phy_read(phy, offset);
reg &= ~(RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC);
reg |= (RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC);
- phy_write(instance, reg, offset);
+ phy_write(phy, reg, offset);
}
static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy,
@@ -137,72 +148,72 @@ static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy,
reg = (reg >> 6) & 0x7;
/* Set PCIe Port PHY to disable SSC */
/* Debug Xtal Type */
- val = phy_read(instance, RG_PE1_FRC_H_XTAL_REG);
+ val = phy_read(phy, RG_PE1_FRC_H_XTAL_REG);
val &= ~(RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE);
val |= RG_PE1_FRC_H_XTAL_TYPE;
val |= RG_PE1_H_XTAL_TYPE_VAL(0x00);
- phy_write(instance, val, RG_PE1_FRC_H_XTAL_REG);
+ phy_write(phy, val, RG_PE1_FRC_H_XTAL_REG);
/* disable port */
offset = (instance->index != 1) ?
RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
- val = phy_read(instance, offset);
+ val = phy_read(phy, offset);
val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
val |= RG_PE1_FRC_PHY_EN;
- phy_write(instance, val, offset);
+ phy_write(phy, val, offset);
/* Set Pre-divider ratio (for host mode) */
- val = phy_read(instance, RG_PE1_H_PLL_REG);
+ val = phy_read(phy, RG_PE1_H_PLL_REG);
val &= ~(RG_PE1_H_PLL_PREDIV);
if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */
val |= RG_PE1_H_PLL_PREDIV_VAL(0x01);
- phy_write(instance, val, RG_PE1_H_PLL_REG);
+ phy_write(phy, val, RG_PE1_H_PLL_REG);
dev_info(dev, "Xtal is 40MHz\n");
} else { /* 25MHz | 20MHz Xtal */
val |= RG_PE1_H_PLL_PREDIV_VAL(0x00);
- phy_write(instance, val, RG_PE1_H_PLL_REG);
+ phy_write(phy, val, RG_PE1_H_PLL_REG);
if (reg >= 6) {
dev_info(dev, "Xtal is 25MHz\n");
/* Select feedback clock */
- val = phy_read(instance, RG_PE1_H_PLL_FBKSEL_REG);
+ val = phy_read(phy, RG_PE1_H_PLL_FBKSEL_REG);
val &= ~(RG_PE1_H_PLL_FBKSEL);
val |= RG_PE1_H_PLL_FBKSEL_VAL(0x01);
- phy_write(instance, val, RG_PE1_H_PLL_FBKSEL_REG);
+ phy_write(phy, val, RG_PE1_H_PLL_FBKSEL_REG);
/* DDS NCPO PCW (for host mode) */
- val = phy_read(instance, RG_PE1_H_LCDDS_SSC_PRD_REG);
+ val = phy_read(phy, RG_PE1_H_LCDDS_SSC_PRD_REG);
val &= ~(RG_PE1_H_LCDDS_SSC_PRD);
val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18000000);
- phy_write(instance, val, RG_PE1_H_LCDDS_SSC_PRD_REG);
+ phy_write(phy, val, RG_PE1_H_LCDDS_SSC_PRD_REG);
/* DDS SSC dither period control */
- val = phy_read(instance, RG_PE1_H_LCDDS_SSC_PRD_REG);
+ val = phy_read(phy, RG_PE1_H_LCDDS_SSC_PRD_REG);
val &= ~(RG_PE1_H_LCDDS_SSC_PRD);
val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18d);
- phy_write(instance, val, RG_PE1_H_LCDDS_SSC_PRD_REG);
+ phy_write(phy, val, RG_PE1_H_LCDDS_SSC_PRD_REG);
/* DDS SSC dither amplitude control */
- val = phy_read(instance, RG_PE1_H_LCDDS_SSC_DELTA_REG);
+ val = phy_read(phy, RG_PE1_H_LCDDS_SSC_DELTA_REG);
val &= ~(RG_PE1_H_LCDDS_SSC_DELTA |
RG_PE1_H_LCDDS_SSC_DELTA1);
val |= RG_PE1_H_LCDDS_SSC_DELTA_VAL(0x4a);
val |= RG_PE1_H_LCDDS_SSC_DELTA1_VAL(0x4a);
- phy_write(instance, val, RG_PE1_H_LCDDS_SSC_DELTA_REG);
+ phy_write(phy, val, RG_PE1_H_LCDDS_SSC_DELTA_REG);
} else {
dev_info(dev, "Xtal is 20MHz\n");
}
}
/* DDS clock inversion */
- val = phy_read(instance, RG_PE1_LCDDS_CLK_PH_INV_REG);
+ val = phy_read(phy, RG_PE1_LCDDS_CLK_PH_INV_REG);
val &= ~(RG_PE1_LCDDS_CLK_PH_INV);
val |= RG_PE1_LCDDS_CLK_PH_INV;
- phy_write(instance, val, RG_PE1_LCDDS_CLK_PH_INV_REG);
+ phy_write(phy, val, RG_PE1_LCDDS_CLK_PH_INV_REG);
/* Set PLL bits */
- val = phy_read(instance, RG_PE1_H_PLL_REG);
+ val = phy_read(phy, RG_PE1_H_PLL_REG);
val &= ~(RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR |
RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN);
val |= RG_PE1_H_PLL_BC_VAL(0x02);
@@ -210,19 +221,19 @@ static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy,
val |= RG_PE1_H_PLL_IR_VAL(0x02);
val |= RG_PE1_H_PLL_IC_VAL(0x01);
val |= RG_PE1_PLL_DIVEN_VAL(0x02);
- phy_write(instance, val, RG_PE1_H_PLL_REG);
+ phy_write(phy, val, RG_PE1_H_PLL_REG);
- val = phy_read(instance, RG_PE1_H_PLL_BR_REG);
+ val = phy_read(phy, RG_PE1_H_PLL_BR_REG);
val &= ~(RG_PE1_H_PLL_BR);
val |= RG_PE1_H_PLL_BR_VAL(0x00);
- phy_write(instance, val, RG_PE1_H_PLL_BR_REG);
+ phy_write(phy, val, RG_PE1_H_PLL_BR_REG);
if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */
/* set force mode enable of da_pe1_mstckdiv */
- val = phy_read(instance, RG_PE1_MSTCKDIV_REG);
+ val = phy_read(phy, RG_PE1_MSTCKDIV_REG);
val &= ~(RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV);
val |= (RG_PE1_MSTCKDIV_VAL(0x01) | RG_PE1_FRC_MSTCKDIV);
- phy_write(instance, val, RG_PE1_MSTCKDIV_REG);
+ phy_write(phy, val, RG_PE1_MSTCKDIV_REG);
}
}
@@ -230,9 +241,8 @@ static int mt7621_pci_phy_init(struct phy *phy)
{
struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent);
- u32 chip_rev_id = rt_sysc_r32(SYSC_REG_CHIP_REV);
- if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
+ if (mphy->bypass_pipe_rst)
mt7621_bypass_pipe_rst(mphy, instance);
mt7621_set_phy_for_ssc(mphy, instance);
@@ -243,15 +253,16 @@ static int mt7621_pci_phy_init(struct phy *phy)
static int mt7621_pci_phy_power_on(struct phy *phy)
{
struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
+ struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent);
u32 offset = (instance->index != 1) ?
RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
u32 val;
/* Enable PHY and disable force mode */
- val = phy_read(instance, offset);
+ val = phy_read(mphy, offset);
val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
val |= (RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
- phy_write(instance, val, offset);
+ phy_write(mphy, val, offset);
return 0;
}
@@ -259,15 +270,16 @@ static int mt7621_pci_phy_power_on(struct phy *phy)
static int mt7621_pci_phy_power_off(struct phy *phy)
{
struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
+ struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent);
u32 offset = (instance->index != 1) ?
RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
u32 val;
/* Disable PHY */
- val = phy_read(instance, offset);
+ val = phy_read(mphy, offset);
val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
val |= RG_PE1_FRC_PHY_EN;
- phy_write(instance, val, offset);
+ phy_write(mphy, val, offset);
return 0;
}
@@ -289,76 +301,100 @@ static const struct phy_ops mt7621_pci_phy_ops = {
.owner = THIS_MODULE,
};
+static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct mt7621_pci_phy *mt7621_phy = dev_get_drvdata(dev);
+
+ if (args->args_count == 0)
+ return mt7621_phy->phys[0]->phy;
+
+ if (WARN_ON(args->args[0] >= MAX_PHYS))
+ return ERR_PTR(-ENODEV);
+
+ return mt7621_phy->phys[args->args[0]]->phy;
+}
+
+static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
+ { .soc_id = "mt7621", .revision = "E2" }
+};
+
+static const struct regmap_config mt7621_pci_phy_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x700,
+};
+
static int mt7621_pci_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct device_node *child_np;
+ const struct soc_device_attribute *attr;
struct phy_provider *provider;
struct mt7621_pci_phy *phy;
- struct resource res;
- int port, ret;
+ struct resource *res;
+ int port;
void __iomem *port_base;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- phy->nphys = of_get_child_count(np);
+ phy->nphys = MAX_PHYS;
phy->phys = devm_kcalloc(dev, phy->nphys,
sizeof(*phy->phys), GFP_KERNEL);
if (!phy->phys)
return -ENOMEM;
+ attr = soc_device_match(mt7621_pci_quirks_match);
+ if (attr)
+ phy->bypass_pipe_rst = true;
+
phy->dev = dev;
platform_set_drvdata(pdev, phy);
- ret = of_address_to_resource(np, 0, &res);
- if (ret) {
- dev_err(dev, "failed to get address resource(id-%d)\n", port);
- return ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get address resource\n");
+ return -ENXIO;
}
- port_base = devm_ioremap_resource(dev, &res);
+ port_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port_base)) {
dev_err(dev, "failed to remap phy regs\n");
return PTR_ERR(port_base);
}
- port = 0;
- for_each_child_of_node(np, child_np) {
+ phy->regmap = devm_regmap_init_mmio(phy->dev, port_base,
+ &mt7621_pci_phy_regmap_config);
+ if (IS_ERR(phy->regmap))
+ return PTR_ERR(phy->regmap);
+
+ for (port = 0; port < MAX_PHYS; port++) {
struct mt7621_pci_phy_instance *instance;
struct phy *pphy;
instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
- if (!instance) {
- ret = -ENOMEM;
- goto put_child;
- }
+ if (!instance)
+ return -ENOMEM;
phy->phys[port] = instance;
- pphy = devm_phy_create(dev, child_np, &mt7621_pci_phy_ops);
+ pphy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
- ret = PTR_ERR(phy);
- goto put_child;
+ return PTR_ERR(phy);
}
instance->port_base = port_base;
instance->phy = pphy;
instance->index = port;
phy_set_drvdata(pphy, instance);
- port++;
}
- provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ provider = devm_of_phy_provider_register(dev, mt7621_pcie_phy_of_xlate);
return PTR_ERR_OR_ZERO(provider);
-
-put_child:
- of_node_put(child_np);
- return ret;
}
static const struct of_device_id mt7621_pci_phy_ids[] = {
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index c8fa17cfa807..af928b75a940 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config PCI_MT7621
tristate "MediaTek MT7621 PCI Controller"
depends on RALINK
diff --git a/drivers/staging/mt7621-pci/Makefile b/drivers/staging/mt7621-pci/Makefile
index d4655a726b61..f4e651cf7ce3 100644
--- a/drivers/staging/mt7621-pci/Makefile
+++ b/drivers/staging/mt7621-pci/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCI_MT7621) += pci-mt7621.o
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 379ae780c691..03d919a94552 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -275,7 +275,7 @@ static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie)
break;
}
- if (res != NULL)
+ if (res)
of_pci_range_to_resource(&range, node, res);
}
diff --git a/drivers/staging/mt7621-pinctrl/Kconfig b/drivers/staging/mt7621-pinctrl/Kconfig
index fc3612711307..f42974026480 100644
--- a/drivers/staging/mt7621-pinctrl/Kconfig
+++ b/drivers/staging/mt7621-pinctrl/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config PINCTRL_RT2880
bool "RT2800 pinctrl driver for RALINK/Mediatek SOCs"
depends on RALINK
diff --git a/drivers/staging/mt7621-pinctrl/Makefile b/drivers/staging/mt7621-pinctrl/Makefile
index 856102137a1e..49445f40c3cd 100644
--- a/drivers/staging/mt7621-pinctrl/Makefile
+++ b/drivers/staging/mt7621-pinctrl/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o
ccflags-y += -I$(srctree)/drivers/pinctrl
diff --git a/drivers/staging/mt7621-spi/Kconfig b/drivers/staging/mt7621-spi/Kconfig
deleted file mode 100644
index 0b90f4cfa426..000000000000
--- a/drivers/staging/mt7621-spi/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config SPI_MT7621
- tristate "MediaTek MT7621 SPI Controller"
- depends on RALINK
- help
- This selects a driver for the MediaTek MT7621 SPI Controller.
-
diff --git a/drivers/staging/mt7621-spi/Makefile b/drivers/staging/mt7621-spi/Makefile
deleted file mode 100644
index 3be508f63bac..000000000000
--- a/drivers/staging/mt7621-spi/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
diff --git a/drivers/staging/mt7621-spi/TODO b/drivers/staging/mt7621-spi/TODO
deleted file mode 100644
index fdbc5002c32a..000000000000
--- a/drivers/staging/mt7621-spi/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-
-- general code review and clean up
-- ensure device-tree requirements are documented
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/netlogic/Kconfig b/drivers/staging/netlogic/Kconfig
index c25a00dd2d5f..b2a4d4586697 100644
--- a/drivers/staging/netlogic/Kconfig
+++ b/drivers/staging/netlogic/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config NETLOGIC_XLR_NET
tristate "Netlogic XLR/XLS network device"
depends on CPU_XLR
diff --git a/drivers/staging/netlogic/Makefile b/drivers/staging/netlogic/Makefile
index f7355e3e9c4c..7e2902af26a3 100644
--- a/drivers/staging/netlogic/Makefile
+++ b/drivers/staging/netlogic/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NETLOGIC_XLR_NET) += xlr_net.o platform_net.o
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 8554fcf4321b..07a06c532dee 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -87,8 +87,7 @@ static inline unsigned char *xlr_alloc_skb(void)
if (!skb)
return NULL;
skb_data = skb->data;
- skb_put(skb, MAC_SKB_BACK_PTR_SIZE);
- skb_pull(skb, MAC_SKB_BACK_PTR_SIZE);
+ skb_reserve(skb, MAC_SKB_BACK_PTR_SIZE);
memcpy(skb_data, &skb, buf_len);
return skb->data;
@@ -185,10 +184,8 @@ static int xlr_net_fill_rx_ring(struct net_device *ndev)
for (i = 0; i < MAX_FRIN_SPILL / 4; i++) {
skb_data = xlr_alloc_skb();
- if (!skb_data) {
- netdev_err(ndev, "SKB allocation failed\n");
+ if (!skb_data)
return -ENOMEM;
- }
send_to_rfr_fifo(priv, skb_data);
}
netdev_info(ndev, "Rx ring setup done\n");
@@ -389,10 +386,8 @@ static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
base = priv->base_addr;
spill_size = size;
spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_ATOMIC);
- if (!spill) {
- pr_err("Unable to allocate memory for spill area!\n");
+ if (!spill)
return ZERO_SIZE_PTR;
- }
spill = PTR_ALIGN(spill, SMP_CACHE_BYTES);
phys_addr = virt_to_phys(spill);
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index e3a89fb1a4a2..5c12cacf75e1 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config MFD_NVEC
tristate "NV Tegra Embedded Controller SMBus Interface"
depends on I2C && GPIOLIB && ARCH_TEGRA
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
index 0b8f1d9c7056..6a5d842ee0f2 100644
--- a/drivers/staging/octeon-usb/Kconfig
+++ b/drivers/staging/octeon-usb/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config OCTEON_USB
tristate "Cavium Networks Octeon USB support"
depends on CAVIUM_OCTEON_SOC && USB
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 14982b6472a0..aeec16314e0d 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -2385,13 +2385,11 @@ static int cvmx_usb_close_pipe(struct octeon_hcd *usb,
*/
static int cvmx_usb_get_frame_number(struct octeon_hcd *usb)
{
- int frame_number;
union cvmx_usbcx_hfnum usbc_hfnum;
usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- frame_number = usbc_hfnum.s.frnum;
- return frame_number;
+ return usbc_hfnum.s.frnum;
}
static void cvmx_usb_transfer_control(struct octeon_hcd *usb,
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
index ae7ae50071ae..9ed619c93a4e 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ b/drivers/staging/octeon-usb/octeon-hcd.h
@@ -1711,7 +1711,7 @@ union cvmx_usbnx_usbp_ctl_status {
* Indicates an internal error was detected during
* the BIST sequence.
* @tdata_out: PHY Test Data Out.
- * Presents either internaly generated signals or
+ * Presents either internally generated signals or
* test register contents, based upon the value of
* test_data_out_sel.
* @siddq: Drives the USBP (USB-PHY) SIDDQ input.
@@ -1737,7 +1737,7 @@ union cvmx_usbnx_usbp_ctl_status {
* to D+. When an A/B device is acting as a host
* (downstream-facing port), dp_pulldown and
* dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
+ * during normal operation.
* @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
* This signal enables the pull-down resistance on
* the D- line. '1' pull down-resistance is connected
@@ -1745,7 +1745,7 @@ union cvmx_usbnx_usbp_ctl_status {
* to D-. When an A/B device is acting as a host
* (downstream-facing port), dp_pulldown and
* dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
+ * during normal operation.
* @hst_mode: When '0' the USB is acting as HOST, when '1'
* USB is acting as device. This field needs to be
* set while the USB is in reset.
@@ -1784,7 +1784,7 @@ union cvmx_usbnx_usbp_ctl_status {
* Used to activate BIST in the PHY.
* @tdata_sel: Test Data Out Select.
* '1' test_data_out[3:0] (PHY) register contents
- * are output. '0' internaly generated signals are
+ * are output. '0' internally generated signals are
* output.
* @taddr_in: Mode Address for Test Interface.
* Specifies the register address for writing to or
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 6e1d5f8d3ec1..1e3012b9991c 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
depends on CAVIUM_OCTEON_SOC && NETDEVICES
diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO
new file mode 100644
index 000000000000..67a0a1f6b922
--- /dev/null
+++ b/drivers/staging/octeon/TODO
@@ -0,0 +1,9 @@
+This driver is functional and supports Ethernet on OCTEON+/OCTEON2/OCTEON3
+chips at least up to CN7030.
+
+TODO:
+ - general code review and clean up
+ - make driver self-contained instead of being split between staging and
+ arch/mips/cavium-octeon.
+
+Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 317c9720467c..20f513fbaa85 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -214,8 +214,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
* Get the number of skbuffs in use
* by the hardware
*/
- skb_to_free = cvmx_fau_fetch_and_add32(
- priv->fau + qos * 4, MAX_SKB_TO_FREE);
+ skb_to_free =
+ cvmx_fau_fetch_and_add32(priv->fau +
+ qos * 4,
+ MAX_SKB_TO_FREE);
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
priv->fau +
@@ -280,9 +282,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
- hw_buffer.s.addr = XKPHYS_TO_PHYS(
- (u64)(page_address(fs->page.p) +
- fs->page_offset));
+ hw_buffer.s.addr =
+ XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) +
+ fs->page_offset));
hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
@@ -413,8 +415,8 @@ dont_put_skbuff_in_hw:
queue_type = QUEUE_HW;
}
if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(
- CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
+ FAU_TOTAL_TX_TO_CLEAN, 1);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
@@ -491,8 +493,8 @@ skip_xmit:
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
} else {
- total_to_clean = cvmx_fau_fetch_and_add32(
- FAU_TOTAL_TX_TO_CLEAN, 1);
+ total_to_clean =
+ cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
}
if (total_to_clean & 0x3ff) {
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 986db76705cc..8847a11c212f 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -421,7 +421,7 @@ int cvm_oct_common_init(struct net_device *dev)
if (priv->of_node)
mac = of_get_mac_address(priv->of_node);
- if (mac)
+ if (!IS_ERR_OR_NULL(mac))
ether_addr_copy(dev->dev_addr, mac);
else
eth_hw_addr_random(dev);
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index 192cc8d0853f..f5c716bb3413 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -1,8 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
config FB_OLPC_DCON
tristate "One Laptop Per Child Display CONtroller support"
depends on OLPC && FB
depends on I2C
- depends on BACKLIGHT_LCD_SUPPORT
depends on (GPIO_CS5535 || GPIO_CS5535=n)
select BACKLIGHT_CLASS_DEVICE
help
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
index 36c7e67fec20..cb1248c5c162 100644
--- a/drivers/staging/olpc_dcon/Makefile
+++ b/drivers/staging/olpc_dcon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
olpc-dcon-objs += olpc_dcon.o
olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o
olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index c987aaf894e7..22d976a09785 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -97,6 +97,11 @@ struct dcon_platform_data {
int (*read_status)(u8 *status);
};
+struct dcon_gpio {
+ const char *name;
+ unsigned long flags;
+};
+
#include <linux/interrupt.h>
irqreturn_t dcon_interrupt(int irq, void *id);
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index a54286498a47..02c059897784 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -26,11 +26,6 @@ enum dcon_gpios {
OLPC_DCON_BLANK,
};
-struct dcon_gpio {
- const char *name;
- unsigned long flags;
-};
-
static const struct dcon_gpio gpios_asis[] = {
[OLPC_DCON_STAT0] = { .name = "dcon_stat0", .flags = GPIOD_ASIS },
[OLPC_DCON_STAT1] = { .name = "dcon_stat1", .flags = GPIOD_ASIS },
@@ -39,7 +34,7 @@ static const struct dcon_gpio gpios_asis[] = {
[OLPC_DCON_BLANK] = { .name = "dcon_blank", .flags = GPIOD_ASIS },
};
-struct gpio_desc *gpios[5];
+static struct gpio_desc *gpios[5];
static int dcon_init_xo_1(struct dcon_priv *dcon)
{
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index 838daa2be3ef..52cdcd2a89d6 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -7,7 +7,9 @@
#include <linux/acpi.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <asm/olpc.h>
/* TODO: this eventually belongs in linux/vx855.h */
@@ -38,6 +40,33 @@
#define PREFIX "OLPC DCON:"
+enum dcon_gpios {
+ OLPC_DCON_STAT0,
+ OLPC_DCON_STAT1,
+ OLPC_DCON_LOAD,
+};
+
+struct gpiod_lookup_table gpios_table = {
+ .dev_id = NULL,
+ .table = {
+ GPIO_LOOKUP("VX855 South Bridge", VX855_GPIO(1), "dcon_load",
+ GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("VX855 South Bridge", VX855_GPI(10), "dcon_stat0",
+ GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("VX855 South Bridge", VX855_GPI(11), "dcon_stat1",
+ GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
+static const struct dcon_gpio gpios_asis[] = {
+ [OLPC_DCON_STAT0] = { .name = "dcon_stat0", .flags = GPIOD_ASIS },
+ [OLPC_DCON_STAT1] = { .name = "dcon_stat1", .flags = GPIOD_ASIS },
+ [OLPC_DCON_LOAD] = { .name = "dcon_load", .flags = GPIOD_ASIS },
+};
+
+static struct gpio_desc *gpios[3];
+
static void dcon_clear_irq(void)
{
/* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
@@ -57,6 +86,25 @@ static int dcon_was_irq(void)
static int dcon_init_xo_1_5(struct dcon_priv *dcon)
{
unsigned int irq;
+ const struct dcon_gpio *pin = &gpios_asis[0];
+ int i;
+ int ret;
+
+ /* Add GPIO look up table */
+ gpios_table.dev_id = dev_name(&dcon->client->dev);
+ gpiod_add_lookup_table(&gpios_table);
+
+ /* Get GPIO descriptor */
+ for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
+ gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
+ pin[i].flags);
+ if (IS_ERR(gpios[i])) {
+ ret = PTR_ERR(gpios[i]);
+ pr_err("failed to request %s GPIO: %d\n", pin[i].name,
+ ret);
+ return ret;
+ }
+ }
dcon_clear_irq();
@@ -131,7 +179,7 @@ static void dcon_wiggle_xo_1_5(void)
static void dcon_set_dconload_xo_1_5(int val)
{
- gpio_set_value(VX855_GPIO(1), val);
+ gpiod_set_value(gpios[OLPC_DCON_LOAD], val);
}
static int dcon_read_status_xo_1_5(u8 *status)
@@ -140,8 +188,8 @@ static int dcon_read_status_xo_1_5(u8 *status)
return -1;
/* i believe this is the same as "inb(0x44b) & 3" */
- *status = gpio_get_value(VX855_GPI(10));
- *status |= gpio_get_value(VX855_GPI(11)) << 1;
+ *status = gpiod_get_value(gpios[OLPC_DCON_STAT0]);
+ *status |= gpiod_get_value(gpios[OLPC_DCON_STAT1]) << 1;
dcon_clear_irq();
diff --git a/drivers/staging/pi433/Kconfig b/drivers/staging/pi433/Kconfig
index c7340129dd4c..8acde0814206 100644
--- a/drivers/staging/pi433/Kconfig
+++ b/drivers/staging/pi433/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config PI433
tristate "Pi433 - a 433MHz radio module for Raspberry Pi"
depends on SPI
diff --git a/drivers/staging/pi433/Makefile b/drivers/staging/pi433/Makefile
index 417f3e4d12b1..051132fe4dae 100644
--- a/drivers/staging/pi433/Makefile
+++ b/drivers/staging/pi433/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PI433) += pi433.o
pi433-objs := pi433_if.o rf69.o
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index b2314636dc89..c889f0bdf424 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -45,10 +45,10 @@
#include "pi433_if.h"
#include "rf69.h"
-#define N_PI433_MINORS BIT(MINORBITS) /*32*/ /* ... up to 256 */
-#define MAX_MSG_SIZE 900 /* min: FIFO_SIZE! */
-#define MSG_FIFO_SIZE 65536 /* 65536 = 2^16 */
-#define NUM_DIO 2
+#define N_PI433_MINORS BIT(MINORBITS) /*32*/ /* ... up to 256 */
+#define MAX_MSG_SIZE 900 /* min: FIFO_SIZE! */
+#define MSG_FIFO_SIZE 65536 /* 65536 = 2^16 */
+#define NUM_DIO 2
static dev_t pi433_dev;
static DEFINE_IDR(pi433_idr);
@@ -319,6 +319,12 @@ rf69_set_tx_cfg(struct pi433_device *dev, struct pi433_tx_cfg *tx_cfg)
}
if (tx_cfg->enable_sync == OPTION_ON) {
+ ret = rf69_set_sync_size(dev->spi, tx_cfg->sync_length);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_sync_values(dev->spi, tx_cfg->sync_pattern);
+ if (ret < 0)
+ return ret;
ret = rf69_enable_sync(dev->spi);
if (ret < 0)
return ret;
@@ -348,16 +354,6 @@ rf69_set_tx_cfg(struct pi433_device *dev, struct pi433_tx_cfg *tx_cfg)
return ret;
}
- /* configure sync, if enabled */
- if (tx_cfg->enable_sync == OPTION_ON) {
- ret = rf69_set_sync_size(dev->spi, tx_cfg->sync_length);
- if (ret < 0)
- return ret;
- ret = rf69_set_sync_values(dev->spi, tx_cfg->sync_pattern);
- if (ret < 0)
- return ret;
- }
-
return 0;
}
@@ -650,21 +646,19 @@ pi433_tx_thread(void *data)
disable_irq(device->irq_num[DIO0]);
device->tx_active = true;
+ /* clear fifo, set fifo threshold, set payload length */
+ retval = rf69_set_mode(spi, standby); /* this clears the fifo */
+ if (retval < 0)
+ return retval;
+
if (device->rx_active && !rx_interrupted) {
/*
* rx is currently waiting for a telegram;
* we need to set the radio module to standby
*/
- retval = rf69_set_mode(device->spi, standby);
- if (retval < 0)
- return retval;
rx_interrupted = true;
}
- /* clear fifo, set fifo threshold, set payload length */
- retval = rf69_set_mode(spi, standby); /* this clears the fifo */
- if (retval < 0)
- return retval;
retval = rf69_set_fifo_threshold(spi, FIFO_THRESHOLD);
if (retval < 0)
return retval;
@@ -742,7 +736,7 @@ pi433_tx_thread(void *data)
device->free_in_fifo == FIFO_SIZE ||
kthread_should_stop());
if (kthread_should_stop())
- dev_dbg(device->dev, "ABORT\n");
+ return 0;
/* STOP_TRANSMISSION */
dev_dbg(device->dev, "thread: Packet sent. Set mode to stby.");
@@ -971,7 +965,7 @@ static int pi433_open(struct inode *inode, struct file *filp)
/* instance data as context */
filp->private_data = instance;
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
return 0;
}
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index e19b9ce794a8..4cd16257f0aa 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -349,18 +349,51 @@ int rf69_disable_amplifier(struct spi_device *spi, u8 amplifier_mask)
int rf69_set_output_power_level(struct spi_device *spi, u8 power_level)
{
- // TODO: Dependency to PA0,1,2 setting
- power_level += 18;
+ u8 pa_level, ocp, test_pa1, test_pa2;
+ bool pa0, pa1, pa2, high_power;
+ u8 min_power_level;
+
+ // check register pa_level
+ pa_level = rf69_read_reg(spi, REG_PALEVEL);
+ pa0 = pa_level & MASK_PALEVEL_PA0;
+ pa1 = pa_level & MASK_PALEVEL_PA1;
+ pa2 = pa_level & MASK_PALEVEL_PA2;
+
+ // check high power mode
+ ocp = rf69_read_reg(spi, REG_OCP);
+ test_pa1 = rf69_read_reg(spi, REG_TESTPA1);
+ test_pa2 = rf69_read_reg(spi, REG_TESTPA2);
+ high_power = (ocp == 0x0f) && (test_pa1 == 0x5d) && (test_pa2 == 0x7c);
+
+ if (pa0 && !pa1 && !pa2) {
+ power_level += 18;
+ min_power_level = 0;
+ } else if (!pa0 && pa1 && !pa2) {
+ power_level += 18;
+ min_power_level = 16;
+ } else if (!pa0 && pa1 && pa2) {
+ if (high_power)
+ power_level += 11;
+ else
+ power_level += 14;
+ min_power_level = 16;
+ } else {
+ goto failed;
+ }
// check input value
- if (power_level > 0x1f) {
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
+ if (power_level > 0x1f)
+ goto failed;
+
+ if (power_level < min_power_level)
+ goto failed;
// write value
return rf69_read_mod_write(spi, REG_PALEVEL, MASK_PALEVEL_OUTPUT_POWER,
power_level);
+failed:
+ dev_dbg(&spi->dev, "set: illegal input param");
+ return -EINVAL;
}
int rf69_set_pa_ramp(struct spi_device *spi, enum pa_ramp pa_ramp)
@@ -624,9 +657,7 @@ int rf69_set_preamble_length(struct spi_device *spi, u16 preamble_length)
retval = rf69_write_reg(spi, REG_PREAMBLE_MSB, msb);
if (retval)
return retval;
- retval = rf69_write_reg(spi, REG_PREAMBLE_LSB, lsb);
-
- return retval;
+ return rf69_write_reg(spi, REG_PREAMBLE_LSB, lsb);
}
int rf69_enable_sync(struct spi_device *spi)
diff --git a/drivers/staging/ralink-gdma/Kconfig b/drivers/staging/ralink-gdma/Kconfig
index a12b2c672d48..54e8029e6b1a 100644
--- a/drivers/staging/ralink-gdma/Kconfig
+++ b/drivers/staging/ralink-gdma/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DMA_RALINK
tristate "RALINK DMA support"
depends on RALINK && !SOC_RT288X
diff --git a/drivers/staging/ralink-gdma/Makefile b/drivers/staging/ralink-gdma/Makefile
index 5d917e0729bb..5c4566b2e405 100644
--- a/drivers/staging/ralink-gdma/Makefile
+++ b/drivers/staging/ralink-gdma/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index d78042eba6dd..de3e357b2640 100644
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
* GDMA4740 DMAC support
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/dmaengine.h>
@@ -164,17 +159,11 @@ static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
}
static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
- unsigned reg, uint32_t val)
+ unsigned int reg, uint32_t val)
{
writel(val, dma_dev->base + reg);
}
-static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
-{
- return kzalloc(sizeof(struct gdma_dma_desc) +
- sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
-}
-
static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
{
if (maxburst < 2)
@@ -268,14 +257,14 @@ static int gdma_dma_terminate_all(struct dma_chan *c)
static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id)
{
- dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
- "ctr1 %08x, intr %08x, signal %08x\n", id,
- gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
- gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
- gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
+ dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, intr %08x, signal %08x\n",
+ id,
+ gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
+ gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
+ gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
+ gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
+ gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
+ gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
}
static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
@@ -283,7 +272,7 @@ static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
dma_addr_t src_addr, dst_addr;
struct gdma_dma_sg *sg;
- uint32_t ctrl0, ctrl1;
+ u32 ctrl0, ctrl1;
/* verify chan is already stopped */
ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
@@ -298,14 +287,14 @@ static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
if (chan->desc->direction == DMA_MEM_TO_DEV) {
src_addr = sg->src_addr;
dst_addr = chan->fifo_addr;
- ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED | \
- (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
+ ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED |
+ (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) |
(chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
} else if (chan->desc->direction == DMA_DEV_TO_MEM) {
src_addr = chan->fifo_addr;
dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED | \
- (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
+ ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED |
+ (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) |
(8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
} else if (chan->desc->direction == DMA_MEM_TO_MEM) {
/*
@@ -314,8 +303,8 @@ static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
*/
src_addr = sg->src_addr;
dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SW_MODE | \
- (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
+ ctrl0 = GDMA_REG_CTRL0_SW_MODE |
+ (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
(8 << GDMA_REG_CTRL1_DST_REQ_SHIFT);
} else {
dev_err(dma_dev->ddev.dev, "direction type %d error\n",
@@ -323,8 +312,8 @@ static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
return -EINVAL;
}
- ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
- (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
+ ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) |
+ (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) |
GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
@@ -342,18 +331,17 @@ static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id)
{
- dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
- "ctr1 %08x, unmask %08x, done %08x, " \
- "req %08x, ack %08x, fin %08x\n", id,
- gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
- gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
- gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
- gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
- gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
- gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
+ dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, unmask %08x, done %08x, req %08x, ack %08x, fin %08x\n",
+ id,
+ gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
+ gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
+ gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
+ gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
+ gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
+ gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
+ gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
+ gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
+ gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
}
static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
@@ -361,7 +349,7 @@ static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
dma_addr_t src_addr, dst_addr;
struct gdma_dma_sg *sg;
- uint32_t ctrl0, ctrl1;
+ u32 ctrl0, ctrl1;
/* verify chan is already stopped */
ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
@@ -377,21 +365,21 @@ static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
src_addr = sg->src_addr;
dst_addr = chan->fifo_addr;
ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED;
- ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
+ ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
(chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT);
} else if (chan->desc->direction == DMA_DEV_TO_MEM) {
src_addr = chan->fifo_addr;
dst_addr = sg->dst_addr;
ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
- ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
- (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
+ ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
+ (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) |
GDMA_REG_CTRL1_COHERENT;
} else if (chan->desc->direction == DMA_MEM_TO_MEM) {
src_addr = sg->src_addr;
dst_addr = sg->dst_addr;
ctrl0 = GDMA_REG_CTRL0_SW_MODE;
- ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
- (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
+ ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
+ (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) |
GDMA_REG_CTRL1_COHERENT;
} else {
dev_err(dma_dev->ddev.dev, "direction type %d error\n",
@@ -399,8 +387,8 @@ static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
return -EINVAL;
}
- ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
- (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
+ ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) |
+ (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) |
GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
@@ -532,7 +520,7 @@ static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
struct scatterlist *sg;
unsigned int i;
- desc = gdma_dma_alloc_desc(sg_len);
+ desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC);
if (!desc) {
dev_err(c->device->dev, "alloc sg decs error\n");
return NULL;
@@ -587,7 +575,7 @@ static struct dma_async_tx_descriptor *gdma_dma_prep_dma_memcpy(
xfer_count = GDMA_REG_CTRL0_TX_MASK;
num_periods = DIV_ROUND_UP(len, xfer_count);
- desc = gdma_dma_alloc_desc(num_periods);
+ desc = kzalloc(struct_size(desc, sg, num_periods), GFP_ATOMIC);
if (!desc) {
dev_err(c->device->dev, "alloc memcpy decs error\n");
return NULL;
@@ -632,7 +620,7 @@ static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
}
num_periods = buf_len / period_len;
- desc = gdma_dma_alloc_desc(num_periods);
+ desc = kzalloc(struct_size(desc, sg, num_periods), GFP_ATOMIC);
if (!desc) {
dev_err(c->device->dev, "alloc cyclic decs error\n");
return NULL;
@@ -741,7 +729,9 @@ static void gdma_dma_tasklet(unsigned long arg)
atomic_inc(&dma_dev->cnt);
gdma_start_transfer(dma_dev, chan);
} else {
- dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", chan->id);
+ dev_dbg(dma_dev->ddev.dev,
+ "chan %d no desc to issue\n",
+ chan->id);
}
if (!dma_dev->chan_issued)
break;
@@ -753,7 +743,7 @@ static void gdma_dma_tasklet(unsigned long arg)
static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
{
- uint32_t gct;
+ u32 gct;
/* all chans round robin */
gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR);
@@ -767,7 +757,7 @@ static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev)
{
- uint32_t gct;
+ u32 gct;
/* all chans round robin */
gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
@@ -819,13 +809,12 @@ static int gdma_dma_probe(struct platform_device *pdev)
match = of_match_device(gdma_of_match_table, &pdev->dev);
if (!match)
return -EINVAL;
- data = (struct gdma_data *) match->data;
+ data = (struct gdma_data *)match->data;
dma_dev = devm_kzalloc(&pdev->dev,
struct_size(dma_dev, chan, data->chancnt),
GFP_KERNEL);
if (!dma_dev) {
- dev_err(&pdev->dev, "alloc dma device failed\n");
return -EINVAL;
}
dma_dev->data = data;
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
index ff7832798a77..4f7ef287a0f2 100644
--- a/drivers/staging/rtl8188eu/Kconfig
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config R8188EU
tristate "Realtek RTL8188EU Wireless LAN NIC driver"
depends on WLAN && USB && CFG80211
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 94c9d9f8ee5c..51a5b71f8c25 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -68,7 +68,7 @@ static void update_BCNTIM(struct adapter *padapter)
/* update TIM IE */
p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen,
- pnetwork_mlmeext->ie_length - _FIXED_IE_LENGTH_);
+ pnetwork_mlmeext->ie_length - _FIXED_IE_LENGTH_);
if (p && tim_ielen > 0) {
tim_ielen += 2;
premainder_ie = p + tim_ielen;
@@ -89,7 +89,7 @@ static void update_BCNTIM(struct adapter *padapter)
&tmp_len, (pnetwork_mlmeext->ie_length -
_BEACON_IE_OFFSET_));
if (p)
- offset += tmp_len+2;
+ offset += tmp_len + 2;
/* DS Parameter Set IE, len = 3 */
offset += 3;
@@ -162,7 +162,7 @@ static u8 chk_sta_is_alive(struct sta_info *psta)
return ret;
}
-void expire_timeout_chk(struct adapter *padapter)
+void expire_timeout_chk(struct adapter *padapter)
{
struct list_head *phead, *plist;
u8 updated = 0;
@@ -368,7 +368,6 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
else
sta_band |= WIRELESS_11B;
-
psta->wireless_mode = sta_band;
raid = networktype_to_raid(sta_band);
@@ -923,7 +922,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pht_cap->mcs.rx_mask[0] = 0xff;
pht_cap->mcs.rx_mask[1] = 0x0;
- memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
+ memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len);
}
/* parsing HT_INFO_IE */
@@ -1123,9 +1122,11 @@ static void update_bcn_erpinfo_ie(struct adapter *padapter)
struct ndis_802_11_var_ie *pIE = (struct ndis_802_11_var_ie *)p;
if (pmlmepriv->num_sta_non_erp == 1)
- pIE->data[0] |= RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION;
+ pIE->data[0] |= RTW_ERP_INFO_NON_ERP_PRESENT |
+ RTW_ERP_INFO_USE_PROTECTION;
else
- pIE->data[0] &= ~(RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION);
+ pIE->data[0] &= ~(RTW_ERP_INFO_NON_ERP_PRESENT |
+ RTW_ERP_INFO_USE_PROTECTION);
if (pmlmepriv->num_sta_no_short_preamble > 0)
pIE->data[0] |= RTW_ERP_INFO_BARKER_PREAMBLE_MODE;
@@ -1154,12 +1155,13 @@ static void update_bcn_wps_ie(struct adapter *padapter)
if (!pwps_ie_src)
return;
- pwps_ie = rtw_get_wps_ie(ie+_FIXED_IE_LENGTH_, ielen-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+ pwps_ie = rtw_get_wps_ie(ie + _FIXED_IE_LENGTH_,
+ ielen - _FIXED_IE_LENGTH_, NULL, &wps_ielen);
if (!pwps_ie || wps_ielen == 0)
return;
- wps_offset = (uint)(pwps_ie-ie);
+ wps_offset = (uint)(pwps_ie - ie);
premainder_ie = pwps_ie + wps_ielen;
@@ -1172,15 +1174,15 @@ static void update_bcn_wps_ie(struct adapter *padapter)
}
wps_ielen = (uint)pwps_ie_src[1];/* to get ie data len */
- if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
- memcpy(pwps_ie, pwps_ie_src, wps_ielen+2);
- pwps_ie += (wps_ielen+2);
+ if (wps_offset + wps_ielen + 2 + remainder_ielen <= MAX_IE_SZ) {
+ memcpy(pwps_ie, pwps_ie_src, wps_ielen + 2);
+ pwps_ie += wps_ielen + 2;
if (pbackup_remainder_ie)
memcpy(pwps_ie, pbackup_remainder_ie, remainder_ielen);
/* update ie_length */
- pnetwork->ie_length = wps_offset + (wps_ielen+2) + remainder_ielen;
+ pnetwork->ie_length = wps_offset + wps_ielen + 2 + remainder_ielen;
}
kfree(pbackup_remainder_ie);
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 83a2e58aef53..a24b40761af2 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -405,10 +405,10 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->ie_length;
- if ((psecnetwork->ie_length-12) < (256-1))
- memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], psecnetwork->ie_length-12);
+ if (psecnetwork->ie_length - 12 < 255)
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], psecnetwork->ie_length - 12);
else
- memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], (256-1));
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], 255);
psecnetwork->ie_length = 0;
/* Added by Albert 2009/02/18 */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 094e8e78f0e8..797ffa6e64d5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -1008,10 +1008,10 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
/* parsing HT_INFO_IE */
p = rtw_get_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.ie_length - _FIXED_IE_LENGTH_);
if (p && len > 0) {
- pht_info = (struct HT_info_element *)(p + 2);
- pnetwork->BcnInfo.ht_info_infos_0 = pht_info->infos[0];
+ pht_info = (struct HT_info_element *)(p + 2);
+ pnetwork->BcnInfo.ht_info_infos_0 = pht_info->infos[0];
} else {
- pnetwork->BcnInfo.ht_info_infos_0 = 0;
+ pnetwork->BcnInfo.ht_info_infos_0 = 0;
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ca0cf8a86671..9a4aad5ec365 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -1438,7 +1438,7 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
goto exit;
}
- if (*candidate == NULL || (*candidate)->network.Rssi < competitor->network.Rssi) {
+ if (!*candidate || (*candidate)->network.Rssi < competitor->network.Rssi) {
*candidate = competitor;
updated = true;
}
@@ -1632,8 +1632,7 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
pcmd->rsp = NULL;
pcmd->rspsz = 0;
INIT_LIST_HEAD(&pcmd->list);
- res = rtw_enqueue_cmd(pcmdpriv, pcmd);
- return res;
+ return rtw_enqueue_cmd(pcmdpriv, pcmd);
err_free_parm:
kfree(psetkeyparm);
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 6a846d08d449..7b16632048b7 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -374,7 +374,7 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
}
if (pwrpriv->pwr_mode == ps_mode) {
- if (PS_MODE_ACTIVE == ps_mode)
+ if (ps_mode == PS_MODE_ACTIVE)
return;
if ((pwrpriv->smart_ps == smart_ps) &&
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 1d83affc08ce..087f6c9a5826 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -24,11 +24,11 @@ static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
static u8 rtw_bridge_tunnel_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8
};
static u8 rtw_rfc1042_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
};
static void rtw_signal_stat_timer_hdl(struct timer_list *t);
@@ -64,10 +64,10 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
precvframe = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ);
for (i = 0; i < NR_RECVFRAME; i++) {
- INIT_LIST_HEAD(&(precvframe->list));
+ INIT_LIST_HEAD(&precvframe->list);
- list_add_tail(&(precvframe->list),
- &(precvpriv->free_recv_queue.queue));
+ list_add_tail(&precvframe->list,
+ &precvpriv->free_recv_queue.queue);
precvframe->pkt = NULL;
@@ -134,9 +134,9 @@ int rtw_free_recvframe(struct recv_frame *precvframe,
spin_lock_bh(&pfree_recv_queue->lock);
- list_del_init(&(precvframe->list));
+ list_del_init(&precvframe->list);
- list_add_tail(&(precvframe->list), get_list_head(pfree_recv_queue));
+ list_add_tail(&precvframe->list, get_list_head(pfree_recv_queue));
spin_unlock_bh(&pfree_recv_queue->lock);
@@ -261,7 +261,7 @@ static int recvframe_chkmic(struct adapter *adapter,
rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
(unsigned char)prxattrib->priority); /* care the length of the data */
- pframemic = payload+datalen;
+ pframemic = payload + datalen;
bmic_err = false;
@@ -365,9 +365,9 @@ static struct recv_frame *decryptor(struct adapter *padapter,
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("prxstat->decrypted=%x prxattrib->encrypt=0x%03x\n", prxattrib->bdecrypted, prxattrib->encrypt));
if (prxattrib->encrypt > 0) {
- u8 *iv = precv_frame->pkt->data+prxattrib->hdrlen;
+ u8 *iv = precv_frame->pkt->data + prxattrib->hdrlen;
- prxattrib->key_index = (((iv[3])>>6)&0x3);
+ prxattrib->key_index = (((iv[3]) >> 6) & 0x3);
if (prxattrib->key_index > WEP_KEYS) {
DBG_88E("prxattrib->key_index(%d)>WEP_KEYS\n", prxattrib->key_index);
@@ -632,14 +632,9 @@ static void count_rx_stats(struct adapter *padapter,
}
}
-int sta2sta_data_frame(
- struct adapter *adapter,
- struct recv_frame *precv_frame,
- struct sta_info **psta
-);
-
-int sta2sta_data_frame(struct adapter *adapter, struct recv_frame *precv_frame,
- struct sta_info **psta)
+static int sta2sta_data_frame(struct adapter *adapter,
+ struct recv_frame *precv_frame,
+ struct sta_info **psta)
{
int ret = _SUCCESS;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
@@ -1160,7 +1155,7 @@ static int validate_recv_frame(struct adapter *adapter,
u8 bDumpRxPkt;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
u8 *ptr = precv_frame->pkt->data;
- u8 ver = (unsigned char)(*ptr)&0x3;
+ u8 ver = (unsigned char)(*ptr) & 0x3;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
@@ -1308,11 +1303,11 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
return _FAIL;
memcpy(ptr, pattrib->dst, ETH_ALEN);
- memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
+ memcpy(ptr + ETH_ALEN, pattrib->src, ETH_ALEN);
if (!bsnaphdr) {
be_tmp = htons(len);
- memcpy(ptr+12, &be_tmp, 2);
+ memcpy(ptr + 12, &be_tmp, 2);
}
return _SUCCESS;
@@ -1325,7 +1320,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
struct list_head *plist, *phead;
u8 wlanhdr_offset;
u8 curfragnum;
- struct recv_frame *pfhdr, *pnfhdr;
+ struct recv_frame *pnfhdr;
struct recv_frame *prframe, *pnextrframe;
struct __queue *pfree_recv_queue;
@@ -1334,11 +1329,10 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
phead = get_list_head(defrag_q);
plist = phead->next;
- pfhdr = list_entry(plist, struct recv_frame, list);
- prframe = pfhdr;
- list_del_init(&(prframe->list));
+ prframe = list_entry(plist, struct recv_frame, list);
+ list_del_init(&prframe->list);
- if (curfragnum != pfhdr->attrib.frag_num) {
+ if (curfragnum != prframe->attrib.frag_num) {
/* the first fragment number must be 0 */
/* free the whole queue */
rtw_free_recvframe(prframe, pfree_recv_queue);
@@ -1377,15 +1371,15 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
skb_pull(pnextrframe->pkt, wlanhdr_offset);
/* append to first fragment frame's tail (if privacy frame, pull the ICV) */
- skb_trim(prframe->pkt, prframe->pkt->len - pfhdr->attrib.icv_len);
+ skb_trim(prframe->pkt, prframe->pkt->len - prframe->attrib.icv_len);
/* memcpy */
- memcpy(skb_tail_pointer(pfhdr->pkt), pnfhdr->pkt->data,
+ memcpy(skb_tail_pointer(prframe->pkt), pnfhdr->pkt->data,
pnfhdr->pkt->len);
skb_put(prframe->pkt, pnfhdr->pkt->len);
- pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
+ prframe->attrib.icv_len = pnfhdr->attrib.icv_len;
plist = plist->next;
}
@@ -1663,9 +1657,9 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
break;
}
- list_del_init(&(prframe->list));
+ list_del_init(&prframe->list);
- list_add_tail(&(prframe->list), plist);
+ list_add_tail(&prframe->list, plist);
return true;
}
@@ -1704,7 +1698,7 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
("%s: indicate=%d seq=%d amsdu=%d\n",
__func__, preorder_ctrl->indicate_seq, pattrib->seq_num, pattrib->amsdu));
plist = plist->next;
- list_del_init(&(prframe->list));
+ list_del_init(&prframe->list);
if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num))
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
@@ -1763,7 +1757,8 @@ static int recv_indicatepkt_reorder(struct adapter *padapter,
preorder_ctrl->indicate_seq = pattrib->seq_num;
rtw_recv_indicatepkt(padapter, prframe);
- preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
+ preorder_ctrl->indicate_seq =
+ (preorder_ctrl->indicate_seq + 1) % 4096;
return _SUCCESS;
}
} else if (pattrib->amsdu == 1) { /* temp filter -> means didn't support A-MSDUs in a A-MPDU */
@@ -1771,7 +1766,8 @@ static int recv_indicatepkt_reorder(struct adapter *padapter,
preorder_ctrl->indicate_seq = pattrib->seq_num;
retval = amsdu_to_msdu(padapter, prframe);
- preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
+ preorder_ctrl->indicate_seq =
+ (preorder_ctrl->indicate_seq + 1) % 4096;
return retval;
}
}
@@ -1857,8 +1853,7 @@ static int process_recv_indicatepkts(struct adapter *padapter,
/* including perform A-MPDU Rx Ordering Buffer Control */
if ((!padapter->bDriverStopped) &&
(!padapter->bSurpriseRemoved)) {
- retval = _FAIL;
- return retval;
+ return _FAIL;
}
}
} else { /* B/G mode */
@@ -1877,8 +1872,7 @@ static int process_recv_indicatepkts(struct adapter *padapter,
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ %s- recv_func free_indicatepkt\n", __func__));
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved));
- retval = _FAIL;
- return retval;
+ return _FAIL;
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 4480deef95a1..f404370d6631 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -1180,12 +1180,8 @@ unsigned int update_supported_rate(unsigned char *ptn, unsigned int ptn_sz)
unsigned int update_MSC_rate(struct ieee80211_ht_cap *pHT_caps)
{
- unsigned int mask = 0;
-
- mask = (pHT_caps->mcs.rx_mask[0] << 12) |
+ return (pHT_caps->mcs.rx_mask[0] << 12) |
(pHT_caps->mcs.rx_mask[1] << 20);
-
- return mask;
}
int support_short_GI(struct adapter *padapter, struct ieee80211_ht_cap *pHT_caps)
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index ba3c3e5a8216..74f7c9c81bf6 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -107,7 +107,7 @@ u8 CCKSwingTable_Ch1_Ch13[CCK_TABLE_SIZE][8] = {
{0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
{0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
{0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
};
u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8] = {
@@ -1096,7 +1096,7 @@ void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
} else {
/* Turn Off EDCA turbo here. */
/* Restore original EDCA according to the declaration of AP. */
- if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
+ if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
usb_write32(Adapter, REG_EDCA_BE_PARAM,
Adapter->HalData->AcParam_BE);
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
diff --git a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
index 7ae476ffcd8c..149b0009ad66 100644
--- a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
@@ -394,11 +394,11 @@ static void ODM_PhyStatusQuery_92CSeries(struct odm_dm_struct *dm_odm,
{
odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus,
pPktinfo);
- if (dm_odm->RSSI_test) {
+ if (dm_odm->RSSI_test)
;/* Select the packets to do RSSI checking for antenna switching. */
- } else {
+ else
odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
- }
+
}
void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 9e5f23392d58..ab94ad9d608a 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -168,7 +168,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
struct odm_dm_struct *odmpriv = &adapt->HalData->odmpriv;
struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (adapt->registrypriv.mp_mode == 0) {
if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
@@ -486,23 +486,23 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt,
switch (pfirstframe->attrib.priority) {
case 1:
case 2:
- ptxservq = &(psta->sta_xmitpriv.bk_q);
+ ptxservq = &psta->sta_xmitpriv.bk_q;
phwxmit = pxmitpriv->hwxmits + 3;
break;
case 4:
case 5:
- ptxservq = &(psta->sta_xmitpriv.vi_q);
+ ptxservq = &psta->sta_xmitpriv.vi_q;
phwxmit = pxmitpriv->hwxmits + 1;
break;
case 6:
case 7:
- ptxservq = &(psta->sta_xmitpriv.vo_q);
+ ptxservq = &psta->sta_xmitpriv.vo_q;
phwxmit = pxmitpriv->hwxmits;
break;
case 0:
case 3:
default:
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ ptxservq = &psta->sta_xmitpriv.be_q;
phwxmit = pxmitpriv->hwxmits + 2;
break;
}
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index 6efddc8f1675..df096c37f5eb 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -24,12 +24,12 @@
#include "odm.h"
#include "odm_hwconfig.h"
#include "odm_debug.h"
-#include "../../rtlwifi/phydm/phydm_regdefine11n.h"
+#include "phydm_regdefine11n.h"
#include "hal8188e_rate_adaptive.h" /* for RA,Power training */
#include "rtl8188e_hal.h"
-#include "../../rtlwifi/phydm/phydm_reg.h"
+#include "phydm_reg.h"
#include "odm_rtl8188e.h"
diff --git a/drivers/staging/rtlwifi/phydm/phydm_features.h b/drivers/staging/rtl8188eu/include/phydm_reg.h
index b4ff293280f7..e3ae006487ba 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_features.h
+++ b/drivers/staging/rtl8188eu/include/phydm_reg.h
@@ -11,12 +11,12 @@
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
+#ifndef __HAL_ODM_REG_H__
+#define __HAL_ODM_REG_H__
-#ifndef __PHYDM_FEATURES_H__
-#define __PHYDM_FEATURES_H__
-
-/*phydm debyg report & tools*/
-
-/*Antenna Diversity*/
+#define ODM_EDCA_VO_PARAM 0x500
+#define ODM_EDCA_VI_PARAM 0x504
+#define ODM_EDCA_BE_PARAM 0x508
+#define ODM_EDCA_BK_PARAM 0x50C
#endif
diff --git a/drivers/staging/rtl8188eu/include/phydm_regdefine11n.h b/drivers/staging/rtl8188eu/include/phydm_regdefine11n.h
new file mode 100644
index 000000000000..565996828cab
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/phydm_regdefine11n.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2016 Realtek Corporation.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __ODM_REGDEFINE11N_H__
+#define __ODM_REGDEFINE11N_H__
+
+#define ODM_REG_TX_ANT_CTRL_11N 0x80C
+#define ODM_REG_RX_DEFAULT_A_11N 0x858
+#define ODM_REG_ANTSEL_CTRL_11N 0x860
+#define ODM_REG_RX_ANT_CTRL_11N 0x864
+#define ODM_REG_PIN_CTRL_11N 0x870
+#define ODM_REG_SC_CNT_11N 0x8C4
+
+#define ODM_REG_ANT_MAPPING1_11N 0x914
+
+#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define ODM_REG_CCK_CCA_11N 0xA0A
+#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
+#define ODM_REG_CCK_FA_RST_11N 0xA2C
+#define ODM_REG_CCK_FA_MSB_11N 0xA58
+#define ODM_REG_CCK_FA_LSB_11N 0xA5C
+#define ODM_REG_CCK_CCA_CNT_11N 0xA60
+#define ODM_REG_BB_PWR_SAV4_11N 0xA74
+
+#define ODM_REG_LNA_SWITCH_11N 0xB2C
+
+#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
+#define ODM_REG_IGI_A_11N 0xC50
+#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
+#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
+
+#define ODM_REG_OFDM_FA_RSTD_11N 0xD00
+#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
+#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
+#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
+
+#define ODM_REG_ANTSEL_PIN_11N 0x4C
+#define ODM_REG_RESP_TX_11N 0x6D8
+
+#define ODM_BIT_IGI_11N 0x0000007F
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 5e91b9428c16..d059240b836f 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -667,116 +667,6 @@ enum ht_cap_ampdu_factor {
#define WPS_ASSOC_STATE_ASSOCIATION_FAILURE 0x03
#define WPS_ASSOC_STATE_IP_FAILURE 0x04
-/* =====================P2P Section===================== */
-/* For P2P */
-#define P2POUI 0x506F9A09
-
-/* P2P Attribute ID */
-#define P2P_ATTR_STATUS 0x00
-#define P2P_ATTR_MINOR_REASON_CODE 0x01
-#define P2P_ATTR_CAPABILITY 0x02
-#define P2P_ATTR_DEVICE_ID 0x03
-#define P2P_ATTR_GO_INTENT 0x04
-#define P2P_ATTR_CONF_TIMEOUT 0x05
-#define P2P_ATTR_LISTEN_CH 0x06
-#define P2P_ATTR_GROUP_BSSID 0x07
-#define P2P_ATTR_EX_LISTEN_TIMING 0x08
-#define P2P_ATTR_INTENTED_IF_ADDR 0x09
-#define P2P_ATTR_MANAGEABILITY 0x0A
-#define P2P_ATTR_CH_LIST 0x0B
-#define P2P_ATTR_NOA 0x0C
-#define P2P_ATTR_DEVICE_INFO 0x0D
-#define P2P_ATTR_GROUP_INFO 0x0E
-#define P2P_ATTR_GROUP_ID 0x0F
-#define P2P_ATTR_INTERFACE 0x10
-#define P2P_ATTR_OPERATING_CH 0x11
-#define P2P_ATTR_INVITATION_FLAGS 0x12
-
-/* Value of Status Attribute */
-#define P2P_STATUS_SUCCESS 0x00
-#define P2P_STATUS_FAIL_INFO_UNAVAILABLE 0x01
-#define P2P_STATUS_FAIL_INCOMPATIBLE_PARAM 0x02
-#define P2P_STATUS_FAIL_LIMIT_REACHED 0x03
-#define P2P_STATUS_FAIL_INVALID_PARAM 0x04
-#define P2P_STATUS_FAIL_REQUEST_UNABLE 0x05
-#define P2P_STATUS_FAIL_PREVOUS_PROTO_ERR 0x06
-#define P2P_STATUS_FAIL_NO_COMMON_CH 0x07
-#define P2P_STATUS_FAIL_UNKNOWN_P2PGROUP 0x08
-#define P2P_STATUS_FAIL_BOTH_GOINTENT_15 0x09
-#define P2P_STATUS_FAIL_INCOMPATIBLE_PROVSION 0x0A
-#define P2P_STATUS_FAIL_USER_REJECT 0x0B
-
-/* Value of Invitation Flags Attribute */
-
-/* Value of Device Capability Bitmap */
-#define P2P_DEVCAP_SERVICE_DISCOVERY BIT(0)
-#define P2P_DEVCAP_CLIENT_DISCOVERABILITY BIT(1)
-#define P2P_DEVCAP_CONCURRENT_OPERATION BIT(2)
-#define P2P_DEVCAP_INFRA_MANAGED BIT(3)
-#define P2P_DEVCAP_DEVICE_LIMIT BIT(4)
-#define P2P_DEVCAP_INVITATION_PROC BIT(5)
-
-/* Value of Group Capability Bitmap */
-#define P2P_GRPCAP_GO BIT(0)
-#define P2P_GRPCAP_PERSISTENT_GROUP BIT(1)
-#define P2P_GRPCAP_GROUP_LIMIT BIT(2)
-#define P2P_GRPCAP_INTRABSS BIT(3)
-#define P2P_GRPCAP_CROSS_CONN BIT(4)
-#define P2P_GRPCAP_PERSISTENT_RECONN BIT(5)
-#define P2P_GRPCAP_GROUP_FORMATION BIT(6)
-
-/* P2P Public Action Frame (Management Frame) */
-#define P2P_PUB_ACTION_ACTION 0x09
-
-/* P2P Public Action Frame Type */
-#define P2P_GO_NEGO_REQ 0
-#define P2P_GO_NEGO_RESP 1
-#define P2P_GO_NEGO_CONF 2
-#define P2P_INVIT_REQ 3
-#define P2P_INVIT_RESP 4
-#define P2P_DEVDISC_REQ 5
-#define P2P_DEVDISC_RESP 6
-#define P2P_PROVISION_DISC_REQ 7
-#define P2P_PROVISION_DISC_RESP 8
-
-/* P2P Action Frame Type */
-#define P2P_NOTICE_OF_ABSENCE 0
-#define P2P_PRESENCE_REQUEST 1
-#define P2P_PRESENCE_RESPONSE 2
-#define P2P_GO_DISC_REQUEST 3
-
-#define P2P_PROVISIONING_SCAN_CNT 3
-
-/* default value, used when: (1)p2p disabled or (2)p2p enabled
- * but only do 1 scan phase
- */
-#define P2P_FINDPHASE_EX_NONE 0
-/* used when p2p enabled and want to do 1 scan phase and
- * P2P_FINDPHASE_EX_MAX-1 find phase
- */
-#define P2P_FINDPHASE_EX_FULL 1
-#define P2P_FINDPHASE_EX_SOCIAL_FIRST (P2P_FINDPHASE_EX_FULL+1)
-#define P2P_FINDPHASE_EX_MAX 4
-#define P2P_FINDPHASE_EX_SOCIAL_LAST P2P_FINDPHASE_EX_MAX
-
-/* 5 seconds timeout for sending the provision discovery request */
-#define P2P_PROVISION_TIMEOUT 5000
-/* 3 seconds timeout for sending the prov disc request concurrent mode */
-#define P2P_CONCURRENT_PROVISION_TIME 3000
-/* 5 seconds timeout for receiving the group negotiation response */
-#define P2P_GO_NEGO_TIMEOUT 5000
-/* 3 seconds timeout for sending the negotiation request under concurrent mode */
-#define P2P_CONCURRENT_GO_NEGO_TIME 3000
-/* 100ms */
-#define P2P_TX_PRESCAN_TIMEOUT 100
-/* 5 seconds timeout for sending the invitation request */
-#define P2P_INVITE_TIMEOUT 5000
-/* 3 seconds timeout for sending the invitation request under concurrent mode */
-#define P2P_CONCURRENT_INVITE_TIME 3000
-/* 25 seconds timeout to reset the scan channel (based on channel plan) */
-#define P2P_RESET_SCAN_CH 25000
-#define P2P_MAX_INTENT 15
-
/* WPS Configuration Method */
#define WPS_CM_NONE 0x0000
#define WPS_CM_LABEL 0x0004
@@ -791,26 +681,6 @@ enum ht_cap_ampdu_factor {
#define WPS_CM_SW_DISPLAY_P 0x2008
#define WPS_CM_LCD_DISPLAY_P 0x4008
-/* =====================WFD Section===================== */
-/* For Wi-Fi Display */
-#define WFD_ATTR_DEVICE_INFO 0x00
-#define WFD_ATTR_ASSOC_BSSID 0x01
-#define WFD_ATTR_COUPLED_SINK_INFO 0x06
-#define WFD_ATTR_LOCAL_IP_ADDR 0x08
-#define WFD_ATTR_SESSION_INFO 0x09
-#define WFD_ATTR_ALTER_MAC 0x0a
-
-/* For WFD Device Information Attribute */
-#define WFD_DEVINFO_SOURCE 0x0000
-#define WFD_DEVINFO_PSINK 0x0001
-#define WFD_DEVINFO_SSINK 0x0002
-#define WFD_DEVINFO_DUAL 0x0003
-
-#define WFD_DEVINFO_SESSION_AVAIL 0x0010
-#define WFD_DEVINFO_WSD 0x0040
-#define WFD_DEVINFO_PC_TDLS 0x0080
-#define WFD_DEVINFO_HDCP_SUPPORT 0x0100
-
#define IP_MCAST_MAC(mac) \
((mac[0] == 0x01) && (mac[1] == 0x00) && (mac[2] == 0x5e))
#define ICMPV6_MCAST_MAC(mac) \
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 8dde5a40e253..2c088af44c8b 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -245,8 +245,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index e4f2af2974ed..eedf2cd831d1 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -232,7 +232,6 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i
pIo_buf = kmalloc(MAX_USB_IO_CTL_SIZE, GFP_ATOMIC);
if (!pIo_buf) {
- DBG_88E("[%s] pIo_buf == NULL\n", __func__);
status = -ENOMEM;
goto release_mutex;
}
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 4602a47cdb4a..11528d17bb3c 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -1,9 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
config RTLLIB
tristate "Support for rtllib wireless devices"
depends on WLAN && m
- default n
select LIB80211
- ---help---
+ help
If you have a wireless card that uses rtllib, say
Y. Currently the only card is the rtl8192e.
@@ -16,7 +16,7 @@ config RTLLIB_CRYPTO_CCMP
depends on RTLLIB
select CRYPTO_AES
default y
- ---help---
+ help
CCMP crypto driver for rtllib.
If you enabled RTLLIB, you want this.
@@ -27,7 +27,7 @@ config RTLLIB_CRYPTO_TKIP
select CRYPTO_ARC4
select CRYPTO_MICHAEL_MIC
default y
- ---help---
+ help
TKIP crypto driver for rtllib.
If you enabled RTLLIB, you want this.
@@ -37,7 +37,7 @@ config RTLLIB_CRYPTO_WEP
select CRYPTO_ARC4
depends on RTLLIB
default y
- ---help---
+ help
TKIP crypto driver for rtllib.
If you enabled RTLLIB, you want this.
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 68f53013cb95..82c11caeee7a 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -1,14 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
diff --git a/drivers/staging/rtl8192e/license b/drivers/staging/rtl8192e/license
deleted file mode 100644
index 4bea9fa60daa..000000000000
--- a/drivers/staging/rtl8192e/license
+++ /dev/null
@@ -1,339 +0,0 @@
-
-"This software program is licensed subject to the GNU General Public License
-(GPL). Version 2, June 1991, available at
-<http:
-
-GNU General Public License
-
-Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license
-document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to
-share and change it. By contrast, the GNU General Public License is intended
-to guarantee your freedom to share and change free software--to make sure
-the software is free for all its users. This General Public License applies
-to most of the Free Software Foundation's software and to any other program
-whose authors commit to using it. (Some other Free Software Foundation
-software is covered by the GNU Library General Public License instead.) You
-can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our
-General Public Licenses are designed to make sure that you have the freedom
-to distribute copies of free software (and charge for this service if you
-wish), that you receive source code or can get it if you want it, that you
-can change the software or use pieces of it in new free programs; and that
-you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to
-deny you these rights or to ask you to surrender the rights. These
-restrictions translate to certain responsibilities for you if you distribute
-copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or
-for a fee, you must give the recipients all the rights that you have. You
-must make sure that they, too, receive or can get the source code. And you
-must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2)
-offer you this license which gives you legal permission to copy, distribute
-and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that
-everyone understands that there is no warranty for this free software. If
-the software is modified by someone else and passed on, we want its
-recipients to know that what they have is not the original, so that any
-problems introduced by others will not reflect on the original authors'
-reputations.
-
-Finally, any free program is threatened constantly by software patents. We
-wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program
-proprietary. To prevent this, we have made it clear that any patent must be
-licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification
-follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice
- placed by the copyright holder saying it may be distributed under the
- terms of this General Public License. The "Program", below, refers to any
- such program or work, and a "work based on the Program" means either the
- Program or any derivative work under copyright law: that is to say, a
- work containing the Program or a portion of it, either verbatim or with
- modifications and/or translated into another language. (Hereinafter,
- translation is included without limitation in the term "modification".)
- Each licensee is addressed as "you".
-
- Activities other than copying, distribution and modification are not
- covered by this License; they are outside its scope. The act of running
- the Program is not restricted, and the output from the Program is covered
- only if its contents constitute a work based on the Program (independent
- of having been made by running the Program). Whether that is true depends
- on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code
- as you receive it, in any medium, provided that you conspicuously and
- appropriately publish on each copy an appropriate copyright notice and
- disclaimer of warranty; keep intact all the notices that refer to this
- License and to the absence of any warranty; and give any other recipients
- of the Program a copy of this License along with the Program.
-
- You may charge a fee for the physical act of transferring a copy, and you
- may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it,
- thus forming a work based on the Program, and copy and distribute such
- modifications or work under the terms of Section 1 above, provided that
- you also meet all of these conditions:
-
- * a) You must cause the modified files to carry prominent notices stating
- that you changed the files and the date of any change.
-
- * b) You must cause any work that you distribute or publish, that in
- whole or in part contains or is derived from the Program or any part
- thereof, to be licensed as a whole at no charge to all third parties
- under the terms of this License.
-
- * c) If the modified program normally reads commands interactively when
- run, you must cause it, when started running for such interactive
- use in the most ordinary way, to print or display an announcement
- including an appropriate copyright notice and a notice that there is
- no warranty (or else, saying that you provide a warranty) and that
- users may redistribute the program under these conditions, and
- telling the user how to view a copy of this License. (Exception: if
- the Program itself is interactive but does not normally print such
- an announcement, your work based on the Program is not required to
- print an announcement.)
-
- These requirements apply to the modified work as a whole. If identifiable
- sections of that work are not derived from the Program, and can be
- reasonably considered independent and separate works in themselves, then
- this License, and its terms, do not apply to those sections when you
- distribute them as separate works. But when you distribute the same
- sections as part of a whole which is a work based on the Program, the
- distribution of the whole must be on the terms of this License, whose
- permissions for other licensees extend to the entire whole, and thus to
- each and every part regardless of who wrote it.
-
- Thus, it is not the intent of this section to claim rights or contest
- your rights to work written entirely by you; rather, the intent is to
- exercise the right to control the distribution of derivative or
- collective works based on the Program.
-
- In addition, mere aggregation of another work not based on the Program
- with the Program (or with a work based on the Program) on a volume of a
- storage or distribution medium does not bring the other work under the
- scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under
- Section 2) in object code or executable form under the terms of Sections
- 1 and 2 above provided that you also do one of the following:
-
- * a) Accompany it with the complete corresponding machine-readable source
- code, which must be distributed under the terms of Sections 1 and 2
- above on a medium customarily used for software interchange; or,
-
- * b) Accompany it with a written offer, valid for at least three years,
- to give any third party, for a charge no more than your cost of
- physically performing source distribution, a complete machine-
- readable copy of the corresponding source code, to be distributed
- under the terms of Sections 1 and 2 above on a medium customarily
- used for software interchange; or,
-
- * c) Accompany it with the information you received as to the offer to
- distribute corresponding source code. (This alternative is allowed
- only for noncommercial distribution and only if you received the
- program in object code or executable form with such an offer, in
- accord with Subsection b above.)
-
- The source code for a work means the preferred form of the work for
- making modifications to it. For an executable work, complete source code
- means all the source code for all modules it contains, plus any
- associated interface definition files, plus the scripts used to control
- compilation and installation of the executable. However, as a special
- exception, the source code distributed need not include anything that is
- normally distributed (in either source or binary form) with the major
- components (compiler, kernel, and so on) of the operating system on which
- the executable runs, unless that component itself accompanies the
- executable.
-
- If distribution of executable or object code is made by offering access
- to copy from a designated place, then offering equivalent access to copy
- the source code from the same place counts as distribution of the source
- code, even though third parties are not compelled to copy the source
- along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as
- expressly provided under this License. Any attempt otherwise to copy,
- modify, sublicense or distribute the Program is void, and will
- automatically terminate your rights under this License. However, parties
- who have received copies, or rights, from you under this License will not
- have their licenses terminated so long as such parties remain in full
- compliance.
-
-5. You are not required to accept this License, since you have not signed
- it. However, nothing else grants you permission to modify or distribute
- the Program or its derivative works. These actions are prohibited by law
- if you do not accept this License. Therefore, by modifying or
- distributing the Program (or any work based on the Program), you
- indicate your acceptance of this License to do so, and all its terms and
- conditions for copying, distributing or modifying the Program or works
- based on it.
-
-6. Each time you redistribute the Program (or any work based on the
- Program), the recipient automatically receives a license from the
- original licensor to copy, distribute or modify the Program subject to
- these terms and conditions. You may not impose any further restrictions
- on the recipients' exercise of the rights granted herein. You are not
- responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
- infringement or for any other reason (not limited to patent issues),
- conditions are imposed on you (whether by court order, agreement or
- otherwise) that contradict the conditions of this License, they do not
- excuse you from the conditions of this License. If you cannot distribute
- so as to satisfy simultaneously your obligations under this License and
- any other pertinent obligations, then as a consequence you may not
- distribute the Program at all. For example, if a patent license would
- not permit royalty-free redistribution of the Program by all those who
- receive copies directly or indirectly through you, then the only way you
- could satisfy both it and this License would be to refrain entirely from
- distribution of the Program.
-
- If any portion of this section is held invalid or unenforceable under any
- particular circumstance, the balance of the section is intended to apply
- and the section as a whole is intended to apply in other circumstances.
-
- It is not the purpose of this section to induce you to infringe any
- patents or other property right claims or to contest validity of any
- such claims; this section has the sole purpose of protecting the
- integrity of the free software distribution system, which is implemented
- by public license practices. Many people have made generous contributions
- to the wide range of software distributed through that system in
- reliance on consistent application of that system; it is up to the
- author/donor to decide if he or she is willing to distribute software
- through any other system and a licensee cannot impose that choice.
-
- This section is intended to make thoroughly clear what is believed to be
- a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain
- countries either by patents or by copyrighted interfaces, the original
- copyright holder who places the Program under this License may add an
- explicit geographical distribution limitation excluding those countries,
- so that distribution is permitted only in or among countries not thus
- excluded. In such case, this License incorporates the limitation as if
- written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of
- the General Public License from time to time. Such new versions will be
- similar in spirit to the present version, but may differ in detail to
- address new problems or concerns.
-
- Each version is given a distinguishing version number. If the Program
- specifies a version number of this License which applies to it and "any
- later version", you have the option of following the terms and
- conditions either of that version or of any later version published by
- the Free Software Foundation. If the Program does not specify a version
- number of this License, you may choose any version ever published by the
- Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs
- whose distribution conditions are different, write to the author to ask
- for permission. For software which is copyrighted by the Free Software
- Foundation, write to the Free Software Foundation; we sometimes make
- exceptions for this. Our decision will be guided by the two goals of
- preserving the free status of all derivatives of our free software and
- of promoting the sharing and reuse of software generally.
-
- NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
- FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
- OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
- PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
- EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
- ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
- YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
- NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
- WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
- REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
- DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
- DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
- (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
- INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
- THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
- OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it free
-software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively convey the
-exclusion of warranty; and each file should have at least the "copyright"
-line and a pointer to where the full notice is found.
-
-one line to give the program's name and an idea of what it does.
-Copyright (C) yyyy name of author
-
-This program is free software; you can redistribute it and/or modify it
-under the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2 of the License, or (at your option)
-any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc., 59
-Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when
-it starts in an interactive mode:
-
-Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
-with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free
-software, and you are welcome to redistribute it under certain conditions;
-type 'show c' for details.
-
-The hypothetical commands 'show w' and 'show c' should show the appropriate
-parts of the General Public License. Of course, the commands you use may be
-called something other than 'show w' and 'show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
-Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-'Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-signature of Ty Coon, 1 April 1989
-Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs. If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library. If this is what you want to do, use the GNU Library General Public
-License instead of this License.
diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192e/rtl8192e/Kconfig
index 7ac42a590e21..eae8167f79dd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config RTL8192E
tristate "RealTek RTL8192E Wireless LAN NIC driver"
depends on PCI && WLAN && RTLLIB
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index 03421033d14a..53fd79a28189 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
@@ -1,19 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef R8190P_DEF_H
#define R8190P_DEF_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 85f93056d28b..7876b389913a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -1,18 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_core.h"
#include "r8192E_phyreg.h"
#include "r8192E_phy.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
index bbea13b452b2..4cb483f1a152 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
@@ -1,18 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef RTL8225H
#define RTL8225H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index 467287ae6c1c..c5e44bbe997c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -1,18 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8192E_cmdpkt.h"
@@ -20,7 +11,6 @@
bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
u32 len)
{
-
bool rt_status = true;
struct r8192_priv *priv = rtllib_priv(dev);
u16 frag_length = 0, frag_offset = 0;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
index a8c63ad2ac2e..c63909199e93 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef R819XUSB_CMDPKT_H
#define R819XUSB_CMDPKT_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 19bb04b3f097..ef92ce957466 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -1,23 +1,12 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
@@ -338,7 +327,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->eeprom_ChannelPlan = usValue&0xff;
IC_Version = (usValue & 0xff00)>>8;
- ICVer8192 = (IC_Version&0xf);
+ ICVer8192 = IC_Version & 0xf;
ICVer8256 = (IC_Version & 0xf0)>>4;
RT_TRACE(COMP_INIT, "\nICVer8192 = 0x%x\n", ICVer8192);
RT_TRACE(COMP_INIT, "\nICVer8256 = 0x%x\n", ICVer8256);
@@ -420,7 +409,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
if (!priv->AutoloadFailFlag) {
usValue = rtl92e_eeprom_read(dev,
EEPROM_TxPwDiff_CrystalCap >> 1);
- priv->EEPROMAntPwDiff = (usValue&0x0fff);
+ priv->EEPROMAntPwDiff = usValue & 0x0fff;
priv->EEPROMCrystalCap = (u8)((usValue & 0xf000)
>> 12);
} else {
@@ -475,15 +464,13 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
}
priv->LegacyHTTxPowerDiff =
priv->EEPROMLegacyHTTxPowerDiff;
- priv->AntennaTxPwDiff[0] = (priv->EEPROMAntPwDiff &
- 0xf);
+ priv->AntennaTxPwDiff[0] = priv->EEPROMAntPwDiff & 0xf;
priv->AntennaTxPwDiff[1] = (priv->EEPROMAntPwDiff &
0xf0) >> 4;
priv->AntennaTxPwDiff[2] = (priv->EEPROMAntPwDiff &
0xf00) >> 8;
priv->CrystalCap = priv->EEPROMCrystalCap;
- priv->ThermalMeter[0] = (priv->EEPROMThermalMeter &
- 0xf);
+ priv->ThermalMeter[0] = priv->EEPROMThermalMeter & 0xf;
priv->ThermalMeter[1] = (priv->EEPROMThermalMeter &
0xf0) >> 4;
} else if (priv->epromtype == EEPROM_93C56) {
@@ -540,8 +527,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->AntennaTxPwDiff[1] = 0;
priv->AntennaTxPwDiff[2] = 0;
priv->CrystalCap = priv->EEPROMCrystalCap;
- priv->ThermalMeter[0] = (priv->EEPROMThermalMeter &
- 0xf);
+ priv->ThermalMeter[0] = priv->EEPROMThermalMeter & 0xf;
priv->ThermalMeter[1] = (priv->EEPROMThermalMeter &
0xf0) >> 4;
}
@@ -755,8 +741,8 @@ start:
if (priv->ResetProgress == RESET_TYPE_NORESET) {
ulRegRead = rtl92e_readl(dev, CPU_GEN);
if (priv->LoopbackMode == RTL819X_NO_LOOPBACK)
- ulRegRead = ((ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
- CPU_GEN_NO_LOOPBACK_SET);
+ ulRegRead = (ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
+ CPU_GEN_NO_LOOPBACK_SET;
else if (priv->LoopbackMode == RTL819X_MAC_LOOPBACK)
ulRegRead |= CPU_CCK_LOOPBACK;
else
@@ -1424,7 +1410,7 @@ static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate)
ret_rate = MGN_MCS15;
break;
case DESC90_RATEMCS32:
- ret_rate = (0x80|0x20);
+ ret_rate = 0x80 | 0x20;
break;
default:
@@ -1927,7 +1913,7 @@ static void _rtl92e_update_received_rate_histogram_stats(
break;
case MGN_2M:
rateIndex = 1;
- break;
+ break;
case MGN_5_5M:
rateIndex = 2;
break;
@@ -1945,7 +1931,7 @@ static void _rtl92e_update_received_rate_histogram_stats(
break;
case MGN_18M:
rateIndex = 7;
- break;
+ break;
case MGN_24M:
rateIndex = 8;
break;
@@ -2044,7 +2030,7 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
}
stats->RxDrvInfoSize = pdesc->RxDrvInfoSize;
- stats->RxBufShift = ((pdesc->Shift)&0x03);
+ stats->RxBufShift = (pdesc->Shift) & 0x03;
stats->Decrypted = !pdesc->SWDec;
pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index f4233bb12f81..1713381dc2b4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
@@ -1,23 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _RTL8192E_H
#define _RTL8192E_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 3c7831250987..9b025b9fa7ab 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -1,18 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8192E_hwimg.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
index 61c8dac826a8..b9059abc901b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef __INC_FIRMWARE_H
#define __INC_FIRMWARE_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 5c20cb476281..3e223151d4b7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
@@ -1,19 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef R8180_HW
#define R8180_HW
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
index d437a8efe933..e6fce749e65b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
@@ -1,19 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-/*Created on 2008/11/18, 3: 7*/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "r8192E_hwimg.h"
u32 Rtl8192PciEPHY_REGArray[PHY_REGArrayLengthPciE] = {0x0,};
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
index 4e2bbab6a413..7d63f5a5c1b7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef __INC_HAL8192PciE_FW_IMG_H
#define __INC_HAL8192PciE_FW_IMG_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 73497d559b77..5215a0b5fd45 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -1,18 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include <linux/bitops.h>
#include "rtl_core.h"
#include "r8192E_hw.h"
@@ -81,8 +72,7 @@ void rtl92e_set_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask,
if (dwBitMask != bMaskDWord) {
OriginalValue = rtl92e_readl(dev, dwRegAddr);
BitShift = _rtl92e_calculate_bit_shift(dwBitMask);
- NewValue = (((OriginalValue) & (~dwBitMask)) |
- (dwData << BitShift));
+ NewValue = (OriginalValue & ~dwBitMask) | (dwData << BitShift);
rtl92e_writel(dev, dwRegAddr, NewValue);
} else
rtl92e_writel(dev, dwRegAddr, dwData);
@@ -188,7 +178,7 @@ static void _rtl92e_phy_rf_write(struct net_device *dev,
NewOffset = Offset;
}
- DataAndAddr = (Data<<16) | (NewOffset&0x3f);
+ DataAndAddr = (NewOffset & 0x3f) | (Data << 16);
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
@@ -223,8 +213,7 @@ void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
Original_Value = _rtl92e_phy_rf_fw_read(dev, eRFPath,
RegAddr);
BitShift = _rtl92e_calculate_bit_shift(BitMask);
- New_Value = (((Original_Value) & (~BitMask)) |
- (Data << BitShift));
+ New_Value = (Original_Value & ~BitMask) | (Data << BitShift);
_rtl92e_phy_rf_fw_write(dev, eRFPath, RegAddr,
New_Value);
@@ -237,8 +226,7 @@ void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
Original_Value = _rtl92e_phy_rf_read(dev, eRFPath,
RegAddr);
BitShift = _rtl92e_calculate_bit_shift(BitMask);
- New_Value = (((Original_Value) & (~BitMask)) |
- (Data << BitShift));
+ New_Value = (Original_Value & ~BitMask) | (Data << BitShift);
_rtl92e_phy_rf_write(dev, eRFPath, RegAddr, New_Value);
} else
@@ -571,9 +559,9 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev)
if (priv->IC_Cut > VERSION_8190_BD) {
if (priv->rf_type == RF_2T4R)
- dwRegValue = (priv->AntennaTxPwDiff[2]<<8 |
+ dwRegValue = priv->AntennaTxPwDiff[2]<<8 |
priv->AntennaTxPwDiff[1]<<4 |
- priv->AntennaTxPwDiff[0]);
+ priv->AntennaTxPwDiff[0];
else
dwRegValue = 0x0;
rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
@@ -655,9 +643,9 @@ void rtl92e_set_tx_power(struct net_device *dev, u8 channel)
priv->AntennaTxPwDiff[1] = (u8)(ant_pwr_diff);
priv->AntennaTxPwDiff[0] = 0;
- u4RegValue = (priv->AntennaTxPwDiff[2]<<8 |
+ u4RegValue = priv->AntennaTxPwDiff[2]<<8 |
priv->AntennaTxPwDiff[1]<<4 |
- priv->AntennaTxPwDiff[0]);
+ priv->AntennaTxPwDiff[0];
rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
(bXBTxAGC|bXCTxAGC|bXDTxAGC),
@@ -1631,5 +1619,4 @@ void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation)
break;
}
}
-
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index b534d72bf708..7c9148e033d8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _R819XU_PHY_H
#define _R819XU_PHY_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
index 03d6d70b2d28..433272a2aae8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _R819XU_PHYREG_H
#define _R819XU_PHYREG_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index c62481fcf0b1..627ea1029509 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -1,23 +1,12 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index 12f01f196752..1ebd92e27441 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
@@ -1,23 +1,12 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _RTL_CAM_H
#define _RTL_CAM_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 253f1911a3f4..f932cb15e4e5 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -1,23 +1,12 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include <linux/uaccess.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 866fe4d4cb28..736f1a824cd2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -1,24 +1,12 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _RTL_CORE_H
#define _RTL_CORE_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 157bcee34067..55d857926bba 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1,17 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_core.h"
#include "rtl_dm.h"
#include "r8192E_hw.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index 52a4a1522bae..ea1b14bbcdcd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef __R8192UDM_H__
#define __R8192UDM_H__
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
index e1d305d4fa20..59532ed2156d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
@@ -1,23 +1,12 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_core.h"
#include "rtl_eeprom.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
index 6212e5eadede..66f1979bb1d5 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
@@ -1,25 +1,12 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#define EPROM_DELAY 10
u32 rtl92e_eeprom_read(struct net_device *dev, u32 addr);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
index f172f776245d..6ae7a67e767f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
@@ -1,23 +1,11 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************
+ * Contact Information: wlanfae <wlanfae@realtek.com>
*/
#include <linux/netdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index 2ff52e7dd32f..1d992d5c4e17 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
@@ -1,23 +1,12 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_pci.h"
#include "rtl_core.h"
@@ -44,12 +33,10 @@ static void _rtl92e_parse_pci_configuration(struct pci_dev *pdev,
bool rtl92e_check_adapter(struct pci_dev *pdev, struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- u16 VenderID;
u16 DeviceID;
u8 RevisionID;
u16 IrqLine;
- VenderID = pdev->vendor;
DeviceID = pdev->device;
RevisionID = pdev->revision;
pci_read_config_word(pdev, 0x3C, &IrqLine);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index 73d357d530d0..866e0efbc4fd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -1,23 +1,12 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- ******************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _RTL_PCI_H
#define _RTL_PCI_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 81a68b0b4a7f..cd3e17b41d6f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -1,18 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8190P_rtl8256.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
index 03fe79ff5a1b..e58f2bcdb1dd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
@@ -1,18 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef R8192E_PM_H
#define R8192E_PM_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 9281116366d2..9475f8c6edf7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -1,23 +1,12 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtl_ps.h"
#include "rtl_core.h"
#include "r8192E_phy.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
index a46f4cffca23..70fe5d39be9a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
@@ -1,23 +1,12 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- ******************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _RTL_PS_H
#define _RTL_PS_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 44e06cba7b7b..16bcee13f64b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -1,18 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include <linux/string.h>
#include "rtl_core.h"
#include "rtl_wx.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
index c313fb79de4d..d70a747ac1dd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
@@ -1,18 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
-
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef R819x_WX_H
#define R819x_WX_H
diff --git a/drivers/staging/rtl8192e/rtl819x_BA.h b/drivers/staging/rtl8192e/rtl819x_BA.h
index 978c9a54043e..8b6e4c26f0fb 100644
--- a/drivers/staging/rtl8192e/rtl819x_BA.h
+++ b/drivers/staging/rtl8192e/rtl819x_BA.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _BATYPE_H_
#define _BATYPE_H_
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 2d330d2bbf6d..816d31c1d5c7 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -1,17 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- ******************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/etherdevice.h>
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index 24e86620c94c..11269fe6b395 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _RTL819XU_HTTYPE_H_
#define _RTL819XU_HTTYPE_H_
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index f0e11726a72a..f02263af9624 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -1,17 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- ******************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtllib.h"
#include "rtl819x_HT.h"
u8 MCS_FILTER_ALL[16] = {
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 576241233a35..5073f9f40fdc 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef __INC_QOS_TYPE_H
#define __INC_QOS_TYPE_H
diff --git a/drivers/staging/rtl8192e/rtl819x_TS.h b/drivers/staging/rtl8192e/rtl819x_TS.h
index 654c223030e3..9dc93d41939d 100644
--- a/drivers/staging/rtl8192e/rtl819x_TS.h
+++ b/drivers/staging/rtl8192e/rtl819x_TS.h
@@ -1,17 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _TSTYPE_H_
#define _TSTYPE_H_
#include "rtl819x_Qos.h"
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index f839d2447b85..672bf0987943 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -1,17 +1,9 @@
-/******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- ******************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#include "rtllib.h"
#include <linux/etherdevice.h>
#include "rtl819x_TS.h"
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 61ebd12831c3..2dd57e88276e 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Merged with mainline rtllib.h in Aug 2004. Original ieee802_11
* remains copyright by the original authors
@@ -15,11 +16,6 @@
*
* Modified for Realtek's wi-fi cards by Andrea Merello
* <andrea.merello@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#ifndef RTLLIB_H
#define RTLLIB_H
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 55da8c9dfe50..8d2a58e706d5 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host AP crypt: host-based TKIP encryption implementation for Host AP driver
*
* Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#include <crypto/hash.h>
@@ -507,7 +503,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
int err;
desc->tfm = tfm_michael;
- desc->flags = 0;
if (crypto_shash_setkey(tfm_michael, key, 8))
return -1;
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index d11ec39171d5..b1ea650036d2 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host AP crypt: host-based WEP encryption implementation for Host AP driver
*
* Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#include <crypto/skcipher.h>
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index 7b0e6e9c4456..9065901636f5 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -1,23 +1,9 @@
-/******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *****************************************************************************/
+ * Contact Information: wlanfae <wlanfae@realtek.com>
+ */
#ifndef _RTL_DEBUG_H
#define _RTL_DEBUG_H
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index cdf4c9060c51..bb13b1de2797 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -1,5 +1,5 @@
-/*******************************************************************************
- *
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2004 Intel Corporation. All rights reserved.
*
* Portions of this file are based on the WEP enablement code provided by the
@@ -8,23 +8,10 @@
* <jkmaline@cc.hut.fi>
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* James P. Ketrenos <ipw2100-admin@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+ */
#include <linux/compiler.h>
#include <linux/errno.h>
@@ -96,7 +83,7 @@ struct net_device *alloc_rtllib(int sizeof_priv)
return NULL;
}
ieee = (struct rtllib_device *)netdev_priv_rsl(dev);
- memset(ieee, 0, sizeof(struct rtllib_device)+sizeof_priv);
+ memset(ieee, 0, sizeof(struct rtllib_device) + sizeof_priv);
ieee->dev = dev;
err = rtllib_networks_allocate(ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index debc2e40af00..0c19ac2bc3bf 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Original code based Host AP (software wireless LAN access point) driver
* for Intersil Prism2/2.5/3 - hostap.o module, common routines
@@ -7,20 +8,11 @@
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
* Copyright (c) 2004, Intel Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- ******************************************************************************
-
- Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andrea.merello@gmail.com>
-
- A special thanks goes to Realtek for their support !
-
-******************************************************************************/
-
-
+ * Few modifications for Realtek's Wi-Fi drivers by
+ * Andrea Merello <andrea.merello@gmail.com>
+ *
+ * A special thanks goes to Realtek for their support !
+ */
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index ee275857868f..e29e8d6f4611 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* IEEE 802.11 SoftMAC layer
* Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
@@ -9,11 +10,7 @@
*
* WPA code stolen from the ipw2200 driver.
* Copyright who own it's copyright.
- *
- * released under the GPL
*/
-
-
#include "rtllib.h"
#include <linux/random.h>
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 5f1412fc410d..f89799d43b1b 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* IEEE 802.11 SoftMAC layer
* Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
@@ -9,11 +10,7 @@
*
* PS wx handler mostly stolen from hostap, copyright who
* own it's copyright ;-)
- *
- * released under the GPL
*/
-
-
#include <linux/etherdevice.h>
#include "rtllib.h"
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index d314b2f602e4..8cddb2e12dc4 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -1,32 +1,16 @@
-/******************************************************************************
- *
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* James P. Ketrenos <ipw2100-admin@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
- *****************************************************************************
- *
* Few modifications for Realtek's Wi-Fi drivers by
* Andrea Merello <andrea.merello@gmail.com>
*
* A special thanks goes to Realtek for their support !
- *
- *****************************************************************************/
-
+ */
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 4f4904a300e0..beb40967936a 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -1,5 +1,5 @@
-/******************************************************************************
- *
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright(c) 2004 Intel Corporation. All rights reserved.
*
* Portions of this file are based on the WEP enablement code provided by the
@@ -8,23 +8,10 @@
* <jkmaline@cc.hut.fi>
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* James P. Ketrenos <ipw2100-admin@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
+ */
#include <linux/wireless.h>
#include <linux/kmod.h>
#include <linux/module.h>
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index 97df6507a485..22c2165e8b1c 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config RTL8192U
tristate "RealTek RTL8192U Wireless LAN NIC driver"
depends on PCI && WLAN && USB
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 8aa536d79900..d36963469015 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Merged with mainline ieee80211.h in Aug 2004. Original ieee802_11
* remains copyright by the original authors
@@ -15,11 +16,6 @@
*
* Modified for Realtek's wi-fi cards by Andrea Merello
* <andrea.merello@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#ifndef IEEE80211_H
#define IEEE80211_H
@@ -296,7 +292,7 @@ struct cb_desc {
#define ieee80211_wx_get_encode_ext ieee80211_wx_get_encode_ext_rsl
-typedef struct ieee_param {
+struct ieee_param {
u32 cmd;
u8 sta_addr[ETH_ALEN];
union {
@@ -323,7 +319,7 @@ typedef struct ieee_param {
u8 key[0];
} crypt;
} u;
-} ieee_param;
+};
// linux under 2.6.9 release may not support it, so modify it for common use
@@ -1462,23 +1458,23 @@ struct tx_pending {
struct ieee80211_txb *txb;
};
-typedef struct _bandwidth_autoswitch {
+struct bandwidth_autoswitch {
long threshold_20Mhzto40Mhz;
long threshold_40Mhzto20Mhz;
bool bforced_tx20Mhz;
bool bautoswitch_enable;
-} bandwidth_autoswitch, *pbandwidth_autoswitch;
+};
//added by amy for order
#define REORDER_WIN_SIZE 128
#define REORDER_ENTRY_NUM 128
-typedef struct _RX_REORDER_ENTRY {
+struct rx_reorder_entry {
struct list_head List;
u16 SeqNum;
struct ieee80211_rxb *prxb;
-} RX_REORDER_ENTRY, *PRX_REORDER_ENTRY;
+};
//added by amy for order
typedef enum _Fsync_State {
Default_Fsync,
@@ -1506,9 +1502,9 @@ typedef enum _RT_JOIN_ACTION {
RT_NO_ACTION = 4,
} RT_JOIN_ACTION;
-typedef struct _IbssParms {
+struct ibss_parms {
u16 atimWin;
-} IbssParms, *PIbssParms;
+};
#define MAX_NUM_RATES 264 // Max num of support rates element: 8, Max num of ext. support rate: 255. 061122, by rcnjko.
// RF state.
@@ -1518,7 +1514,7 @@ typedef enum _RT_RF_POWER_STATE {
eRfOff
} RT_RF_POWER_STATE;
-typedef struct _RT_POWER_SAVE_CONTROL {
+struct rt_power_save_control {
//
// Inactive Power Save(IPS) : Disable RF when disconnected
@@ -1554,7 +1550,7 @@ typedef struct _RT_POWER_SAVE_CONTROL {
struct octet_string tmpSuppRateSet;
u8 tmpSuppRateBuf[MAX_NUM_RATES];
bool bTmpSuppRate;
- IbssParms tmpIbpm;
+ struct ibss_parms tmpIbpm;
bool bTmpIbpm;
//
@@ -1562,7 +1558,7 @@ typedef struct _RT_POWER_SAVE_CONTROL {
//
bool bLeisurePs;
-} RT_POWER_SAVE_CONTROL, *PRT_POWER_SAVE_CONTROL;
+};
typedef u32 RT_RF_CHANGE_SOURCE;
#define RF_CHANGE_BY_SW BIT(31)
@@ -1586,7 +1582,7 @@ typedef enum {
} country_code_type_t;
#define RT_MAX_LD_SLOT_NUM 10
-typedef struct _RT_LINK_DETECT_T {
+struct rt_link_detect {
u32 NumRecvBcnInPeriod;
u32 NumRecvDataInPeriod;
@@ -1599,7 +1595,7 @@ typedef struct _RT_LINK_DETECT_T {
u32 NumTxOkInPeriod;
u32 NumRxOkInPeriod;
bool bBusyTraffic;
-} RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
+};
struct ieee80211_device {
@@ -1654,7 +1650,7 @@ struct ieee80211_device {
struct list_head Rx_TS_Unused_List;
struct rx_ts_record RxTsRecord[TOTAL_TS_NUM];
//#ifdef TO_DO_LIST
- RX_REORDER_ENTRY RxReorderEntry[128];
+ struct rx_reorder_entry RxReorderEntry[128];
struct list_head RxReorder_Unused_List;
//#endif
// Qos related. Added by Annie, 2005-11-01.
@@ -1871,14 +1867,14 @@ struct ieee80211_device {
Fsync_State fsync_state;
bool bis_any_nonbepkts;
//20Mhz 40Mhz AutoSwitch Threshold
- bandwidth_autoswitch bandwidth_auto_switch;
+ struct bandwidth_autoswitch bandwidth_auto_switch;
//for txpower tracking
bool FwRWRF;
//added by amy for AP roaming
- RT_LINK_DETECT_T LinkDetectInfo;
+ struct rt_link_detect LinkDetectInfo;
//added by amy for ps
- RT_POWER_SAVE_CONTROL PowerSaveControl;
+ struct rt_power_save_control PowerSaveControl;
//}
/* used if IEEE_SOFTMAC_TX_QUEUE is set */
struct tx_pending tx_pending;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
index 6f457812e5a3..36987fccac5d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host AP crypto routines
*
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
* Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
index 1f2aea7e0e55..d3bd5598b25b 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Original code based on Host AP (software wireless LAN access point) driver
* for Intersil Prism2/2.5/3.
@@ -10,11 +11,6 @@
* <jketreno@linux.intel.com>
*
* Copyright (c) 2004, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
/*
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index 3534ddb900d1..d7188b3f3190 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host AP crypt: host-based CCMP encryption implementation for Host AP driver
*
* Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#include <linux/module.h>
@@ -216,7 +212,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = key->tx_pn[5];
*pos++ = key->tx_pn[4];
*pos++ = 0;
- *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */;
+ *pos++ = (key->key_idx << 6) | BIT(5) /* Ext IV included */;
*pos++ = key->tx_pn[3];
*pos++ = key->tx_pn[2];
*pos++ = key->tx_pn[1];
@@ -274,7 +270,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
hdr = (struct rtl_80211_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
- if (!(keyidx & (1 << 5))) {
+ if (!(keyidx & BIT(5))) {
if (net_ratelimit()) {
netdev_dbg(skb->dev, "CCMP: received packet without ExtIV flag from %pM\n",
hdr->addr2);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 829fa4bd253c..0927b2b15151 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host AP crypt: host-based TKIP encryption implementation for Host AP driver
*
* Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#include <linux/module.h>
@@ -331,7 +327,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = rc4key[2];
}
- *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */;
+ *pos++ = (tkey->key_idx << 6) | BIT(5) /* Ext IV included */;
*pos++ = tkey->tx_iv32 & 0xff;
*pos++ = (tkey->tx_iv32 >> 8) & 0xff;
*pos++ = (tkey->tx_iv32 >> 16) & 0xff;
@@ -390,7 +386,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
hdr = (struct rtl_80211_hdr_4addr *) skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
- if (!(keyidx & (1 << 5))) {
+ if (!(keyidx & BIT(5))) {
if (net_ratelimit()) {
printk(KERN_DEBUG "TKIP: received packet without ExtIV"
" flag from %pM\n", hdr->addr2);
@@ -503,7 +499,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
int err;
desc->tfm = tfm_michael;
- desc->flags = 0;
if (crypto_shash_setkey(tfm_michael, key, 8))
return -1;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index d4a1bf0caa7a..805493a0870d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host AP crypt: host-based WEP encryption implementation for Host AP driver
*
* Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 5147f7c01e31..0e762e559675 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Original code based Host AP (software wireless LAN access point) driver
* for Intersil Prism2/2.5/3 - hostap.o module, common routines
@@ -6,11 +7,6 @@
* <jkmaline@cc.hut.fi>
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
* Copyright (c) 2004, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
@@ -151,7 +147,7 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
* should have already been received */
entry = ieee80211_frag_cache_find(ieee, seq, frag, tid,hdr->addr2,
hdr->addr1);
- if (entry != NULL) {
+ if (entry) {
entry->last_frag = frag;
skb = entry->skb;
}
@@ -190,7 +186,7 @@ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
entry = ieee80211_frag_cache_find(ieee, seq, -1, tid, hdr->addr2,
hdr->addr1);
- if (entry == NULL) {
+ if (!entry) {
IEEE80211_DEBUG_FRAG(
"could not invalidate fragment cache "
"entry (seq=%u)\n", seq);
@@ -341,7 +337,7 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
struct rtl_80211_hdr_4addr *hdr;
int res, hdrlen;
- if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
+ if (!crypt || !crypt->ops->decrypt_mpdu)
return 0;
if (ieee->hwsec_active)
{
@@ -388,7 +384,7 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *s
struct rtl_80211_hdr_4addr *hdr;
int res, hdrlen;
- if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
+ if (!crypt || !crypt->ops->decrypt_msdu)
return 0;
if (ieee->hwsec_active)
{
@@ -508,23 +504,17 @@ drop:
return 1;
}
-static bool AddReorderEntry(struct rx_ts_record *pTS, PRX_REORDER_ENTRY pReorderEntry)
+static bool AddReorderEntry(struct rx_ts_record *pTS, struct rx_reorder_entry *pReorderEntry)
{
struct list_head *pList = &pTS->rx_pending_pkt_list;
while(pList->next != &pTS->rx_pending_pkt_list)
{
- if( SN_LESS(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) )
- {
+ if (SN_LESS(pReorderEntry->SeqNum, list_entry(pList->next, struct rx_reorder_entry, List)->SeqNum))
pList = pList->next;
- }
- else if( SN_EQUAL(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) )
- {
+ else if (SN_EQUAL(pReorderEntry->SeqNum, list_entry(pList->next, struct rx_reorder_entry, List)->SeqNum))
return false;
- }
else
- {
break;
- }
}
pReorderEntry->List.next = pList->next;
pReorderEntry->List.next->prev = &pReorderEntry->List;
@@ -589,7 +579,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
struct rx_ts_record *pTS, u16 SeqNum)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- PRX_REORDER_ENTRY pReorderEntry = NULL;
+ struct rx_reorder_entry *pReorderEntry = NULL;
struct ieee80211_rxb **prxbIndicateArray;
u8 WinSize = pHTInfo->RxReorderWinSize;
u16 WinEnd = (pTS->rx_indicate_seq + WinSize - 1) % 4096;
@@ -604,9 +594,8 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
return;
/* Rx Reorder initialize condition.*/
- if (pTS->rx_indicate_seq == 0xffff) {
+ if (pTS->rx_indicate_seq == 0xffff)
pTS->rx_indicate_seq = SeqNum;
- }
/* Drop out the packet which SeqNum is smaller than WinStart */
if (SN_LESS(SeqNum, pTS->rx_indicate_seq)) {
@@ -663,7 +652,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
/* Current packet is going to be inserted into pending list.*/
//IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): We RX no ordered packed, insert to ordered list\n",__func__);
if(!list_empty(&ieee->RxReorder_Unused_List)) {
- pReorderEntry = (PRX_REORDER_ENTRY)list_entry(ieee->RxReorder_Unused_List.next,RX_REORDER_ENTRY,List);
+ pReorderEntry = list_entry(ieee->RxReorder_Unused_List.next, struct rx_reorder_entry, List);
list_del_init(&pReorderEntry->List);
/* Make a reorder entry and insert into a the packet list.*/
@@ -709,7 +698,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
/* Check if there is any packet need indicate.*/
while(!list_empty(&pTS->rx_pending_pkt_list)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__func__);
- pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->rx_pending_pkt_list.prev,RX_REORDER_ENTRY,List);
+ pReorderEntry = list_entry(pTS->rx_pending_pkt_list.prev, struct rx_reorder_entry, List);
if (SN_LESS(pReorderEntry->SeqNum, pTS->rx_indicate_seq) ||
SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
{
@@ -989,8 +978,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* allow NULL decrypt to indicate an station specific override
* for default encryption */
- if (crypt && (crypt->ops == NULL ||
- crypt->ops->decrypt_mpdu == NULL))
+ if (crypt && (!crypt->ops || !crypt->ops->decrypt_mpdu))
crypt = NULL;
if (!crypt && (fc & IEEE80211_FCTL_WEP)) {
@@ -1291,7 +1279,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
}
//added by amy for reorder
- if (!ieee->pHTInfo->bCurRxReorderEnable || pTS == NULL){
+ if (!ieee->pHTInfo->bCurRxReorderEnable || !pTS) {
//added by amy for reorder
for(i = 0; i<rxb->nr_subframes; i++) {
struct sk_buff *sub_skb = rxb->subframes[i];
@@ -1427,9 +1415,9 @@ static int ieee80211_read_qos_info_element(struct
int ret = 0;
u16 size = sizeof(struct ieee80211_qos_information_element) - 2;
- if (element_info == NULL)
+ if (!element_info)
return -1;
- if (info_element == NULL)
+ if (!info_element)
return -1;
if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
@@ -2405,22 +2393,22 @@ static inline void ieee80211_process_probe_response(
"'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
escape_essid(info_element->data, info_element->len),
beacon->header.addr3,
- (capability & (1 << 0xf)) ? '1' : '0',
- (capability & (1 << 0xe)) ? '1' : '0',
- (capability & (1 << 0xd)) ? '1' : '0',
- (capability & (1 << 0xc)) ? '1' : '0',
- (capability & (1 << 0xb)) ? '1' : '0',
- (capability & (1 << 0xa)) ? '1' : '0',
- (capability & (1 << 0x9)) ? '1' : '0',
- (capability & (1 << 0x8)) ? '1' : '0',
- (capability & (1 << 0x7)) ? '1' : '0',
- (capability & (1 << 0x6)) ? '1' : '0',
- (capability & (1 << 0x5)) ? '1' : '0',
- (capability & (1 << 0x4)) ? '1' : '0',
- (capability & (1 << 0x3)) ? '1' : '0',
- (capability & (1 << 0x2)) ? '1' : '0',
- (capability & (1 << 0x1)) ? '1' : '0',
- (capability & (1 << 0x0)) ? '1' : '0');
+ (capability & BIT(0xf)) ? '1' : '0',
+ (capability & BIT(0xe)) ? '1' : '0',
+ (capability & BIT(0xd)) ? '1' : '0',
+ (capability & BIT(0xc)) ? '1' : '0',
+ (capability & BIT(0xb)) ? '1' : '0',
+ (capability & BIT(0xa)) ? '1' : '0',
+ (capability & BIT(0x9)) ? '1' : '0',
+ (capability & BIT(0x8)) ? '1' : '0',
+ (capability & BIT(0x7)) ? '1' : '0',
+ (capability & BIT(0x6)) ? '1' : '0',
+ (capability & BIT(0x5)) ? '1' : '0',
+ (capability & BIT(0x4)) ? '1' : '0',
+ (capability & BIT(0x3)) ? '1' : '0',
+ (capability & BIT(0x2)) ? '1' : '0',
+ (capability & BIT(0x1)) ? '1' : '0',
+ (capability & BIT(0x0)) ? '1' : '0');
if (ieee80211_network_init(ieee, beacon, network, stats)) {
IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n",
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 8635faf84316..944c8894f9ff 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* IEEE 802.11 SoftMAC layer
* Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
@@ -9,8 +10,6 @@
*
* WPA code stolen from the ipw2200 driver.
* Copyright who own it's copyright.
- *
- * released under the GPL
*/
#include "ieee80211.h"
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 81020fbcdc20..aab1586fe0dd 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* IEEE 802.11 SoftMAC layer
* Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
@@ -9,8 +10,6 @@
*
* PS wx handler mostly stolen from hostap, copyright who
* own it's copyright ;-)
- *
- * released under the GPL
*/
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 024fa2702546..8e1ec4409b4f 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -1,23 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* James P. Ketrenos <ipw2100-admin@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -318,17 +303,17 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
return;
//check packet and mode later
#ifdef TO_DO_LIST
- if(pTcb->PacketLength >= 4096)
+ if (pTcb->PacketLength >= 4096)
return;
// For RTL819X, if pairwisekey = wep/tkip, we don't aggrregation.
- if(!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
+ if (!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
return;
#endif
- if(!ieee->GetNmodeSupportBySecCfg(ieee->dev))
+ if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
{
return;
}
- if(pHTInfo->bCurrentAMPDUEnable)
+ if (pHTInfo->bCurrentAMPDUEnable)
{
if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
{
@@ -398,18 +383,18 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, struct cb_desc *tcb_
tcb_desc->bUseShortGI = false;
- if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
+ if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
return;
- if(pHTInfo->bForcedShortGI)
+ if (pHTInfo->bForcedShortGI)
{
tcb_desc->bUseShortGI = true;
return;
}
- if((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
+ if ((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
tcb_desc->bUseShortGI = true;
- else if((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
+ else if ((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
tcb_desc->bUseShortGI = true;
}
@@ -420,13 +405,13 @@ static void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee,
tcb_desc->bPacketBW = false;
- if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
+ if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
return;
- if(tcb_desc->bMulticast || tcb_desc->bBroadcast)
+ if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
return;
- if((tcb_desc->data_rate & 0x80)==0) // If using legacy rate, it shall use 20MHz channel.
+ if ((tcb_desc->data_rate & 0x80)==0) // If using legacy rate, it shall use 20MHz channel.
return;
//BandWidthAutoSwitch is for auto switch to 20 or 40 in long distance
if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
@@ -851,7 +836,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
else
ieee->seq_ctrl[0]++;
}
- }else{
+ } else {
if (unlikely(skb->len < sizeof(struct rtl_80211_hdr_3addr))) {
printk(KERN_WARNING "%s: skb too small (%d).\n",
ieee->dev->name, skb->len);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index fa59c712c74b..dead134f6de0 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
Copyright(c) 2004 Intel Corporation. All rights reserved.
@@ -8,22 +9,6 @@
<jkmaline@cc.hut.fi>
Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc., 59
- Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
- The full GNU General Public License is included in this distribution in the
- file called LICENSE.
-
Contact Information:
James P. Ketrenos <ipw2100-admin@linux.intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -86,7 +71,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
for(i=0; i<ARRAY_SIZE(ieee80211_modes); i++) {
- if(network->mode&(1<<i)) {
+ if (network->mode & BIT(i)) {
sprintf(pname,ieee80211_modes[i].mode_string,ieee80211_modes[i].mode_size);
pname +=ieee80211_modes[i].mode_size;
}
@@ -195,7 +180,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
- if (ieee->wpa_enabled && network->wpa_ie_len){
+ if (ieee->wpa_enabled && network->wpa_ie_len) {
char buf[MAX_WPA_IE_LEN * 2 + 30];
// printk("WPA IE\n");
u8 *p = buf;
@@ -210,7 +195,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
- if (ieee->wpa_enabled && network->rsn_ie_len){
+ if (ieee->wpa_enabled && network->rsn_ie_len) {
char buf[MAX_WPA_IE_LEN * 2 + 30];
u8 *p = buf;
@@ -394,9 +379,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
sec.key_sizes[key] = len;
(*crypt)->ops->set_key(sec.keys[key], len, NULL,
(*crypt)->priv);
- sec.flags |= (1 << key);
+ sec.flags |= BIT(key);
/* This ensures a key will be activated if no key is
- * explicitely set */
+ * explicitly set
+ */
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
ieee->tx_keyidx = key;
@@ -415,7 +401,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
(*crypt)->ops->set_key(sec.keys[key], 13, NULL,
(*crypt)->priv);
sec.key_sizes[key] = 13;
- sec.flags |= (1 << key);
+ sec.flags |= BIT(key);
}
/* No key data - just set the default TX key index */
@@ -636,7 +622,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
if (ext->alg != IW_ENCODE_ALG_NONE) {
//memcpy(sec.keys[idx], ext->key, ext->key_len);
sec.key_sizes[idx] = ext->key_len;
- sec.flags |= (1 << idx);
+ sec.flags |= BIT(idx);
if (ext->alg == IW_ENCODE_ALG_WEP) {
// sec.encode_alg[idx] = SEC_ALG_WEP;
sec.flags |= SEC_LEVEL;
@@ -766,15 +752,13 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
case IW_AUTH_80211_AUTH_ALG:
//printk("======>%s():data->value is %d\n",__func__,data->value);
// ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM)?1:0;
- if(data->value & IW_AUTH_ALG_SHARED_KEY){
+ if (data->value & IW_AUTH_ALG_SHARED_KEY) {
ieee->open_wep = 0;
ieee->auth_mode = 1;
- }
- else if(data->value & IW_AUTH_ALG_OPEN_SYSTEM){
+ } else if (data->value & IW_AUTH_ALG_OPEN_SYSTEM) {
ieee->open_wep = 1;
ieee->auth_mode = 0;
- }
- else if(data->value & IW_AUTH_ALG_LEAP){
+ } else if (data->value & IW_AUTH_ALG_LEAP) {
ieee->open_wep = 1;
ieee->auth_mode = 2;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 109445407cec..53869b3c985c 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -108,7 +108,7 @@ void ResetBaEntry(struct ba_record *pBA)
static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, struct ba_record *pBA, u16 StatusCode, u8 type)
{
struct sk_buff *skb = NULL;
- struct rtl_80211_hdr_3addr *BAReq = NULL;
+ struct rtl_80211_hdr_3addr *BAReq = NULL;
u8 *tag = NULL;
u16 len = ieee->tx_headroom + 9;
//category(1) + action field(1) + Dialog Token(1) + BA Parameter Set(2) + BA Timeout Value(2) + BA Start SeqCtrl(2)(or StatusCode(2))
@@ -118,10 +118,8 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, s
return NULL;
}
skb = dev_alloc_skb(len + sizeof(struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
- if (!skb) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
+ if (!skb)
return NULL;
- }
memset(skb->data, 0, sizeof(struct rtl_80211_hdr_3addr)); //I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
skb_reserve(skb, ieee->tx_headroom);
@@ -189,7 +187,7 @@ static struct sk_buff *ieee80211_DELBA(
{
union delba_param_set DelbaParamSet;
struct sk_buff *skb = NULL;
- struct rtl_80211_hdr_3addr *Delba = NULL;
+ struct rtl_80211_hdr_3addr *Delba = NULL;
u8 *tag = NULL;
//len = head len + DELBA Parameter Set(2) + Reason Code(2)
u16 len = 6 + ieee->tx_headroom;
@@ -205,10 +203,8 @@ static struct sk_buff *ieee80211_DELBA(
DelbaParamSet.field.tid = pBA->param_set.field.tid;
skb = dev_alloc_skb(len + sizeof(struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
- if (!skb) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
+ if (!skb)
return NULL;
- }
// memset(skb->data, 0, len+sizeof( struct rtl_80211_hdr_3addr));
skb_reserve(skb, ieee->tx_headroom);
@@ -318,7 +314,7 @@ static void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst,
********************************************************************************************************************/
int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
{
- struct rtl_80211_hdr_3addr *req = NULL;
+ struct rtl_80211_hdr_3addr *req = NULL;
u16 rc = 0;
u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
struct ba_record *pBA = NULL;
@@ -388,9 +384,9 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
pBA->start_seq_ctrl = *pBaStartSeqCtrl;
//for half N mode we only aggregate 1 frame
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
- pBA->param_set.field.buffer_size = 1;
+ pBA->param_set.field.buffer_size = 1;
else
- pBA->param_set.field.buffer_size = 32;
+ pBA->param_set.field.buffer_size = 32;
ActivateBAEntry(ieee, pBA, pBA->timeout_value);
ieee80211_send_ADDBARsp(ieee, dst, pBA, ADDBA_STATUS_SUCCESS);
@@ -418,7 +414,7 @@ OnADDBAReq_Fail:
********************************************************************************************************************/
int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
{
- struct rtl_80211_hdr_3addr *rsp = NULL;
+ struct rtl_80211_hdr_3addr *rsp = NULL;
struct ba_record *pPendingBA, *pAdmittedBA;
struct tx_ts_record *pTS = NULL;
u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
@@ -542,7 +538,7 @@ OnADDBARsp_Reject:
********************************************************************************************************************/
int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
{
- struct rtl_80211_hdr_3addr *delba = NULL;
+ struct rtl_80211_hdr_3addr *delba = NULL;
union delba_param_set *pDelBaParamSet = NULL;
u8 *dst = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
index 64d5359cf7e2..b7769bca9740 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
@@ -107,13 +107,13 @@ typedef struct _HT_INFORMATION_ELE {
typedef enum _HT_SPEC_VER {
HT_SPEC_VER_IEEE = 0,
HT_SPEC_VER_EWC = 1,
-}HT_SPEC_VER, *PHT_SPEC_VER;
+} HT_SPEC_VER, *PHT_SPEC_VER;
typedef enum _HT_AGGRE_MODE_E {
HT_AGG_AUTO = 0,
HT_AGG_FORCE_ENABLE = 1,
HT_AGG_FORCE_DISABLE = 2,
-}HT_AGGRE_MODE_E, *PHT_AGGRE_MODE_E;
+} HT_AGGRE_MODE_E, *PHT_AGGRE_MODE_E;
/*
* The Data structure is used to keep HT related variables when card is
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index c76715ffa08b..7cac668bfb0b 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -28,7 +28,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
struct rx_ts_record *pRxTs = from_timer(pRxTs, t, rx_pkt_pending_timer);
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
- PRX_REORDER_ENTRY pReorderEntry = NULL;
+ struct rx_reorder_entry *pReorderEntry = NULL;
//u32 flags = 0;
unsigned long flags = 0;
@@ -37,18 +37,18 @@ static void RxPktPendingTimeout(struct timer_list *t)
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "==================>%s()\n", __func__);
- if(pRxTs->rx_timeout_indicate_seq != 0xffff) {
+ if (pRxTs->rx_timeout_indicate_seq != 0xffff) {
// Indicate the pending packets sequentially according to SeqNum until meet the gap.
- while(!list_empty(&pRxTs->rx_pending_pkt_list)) {
- pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTs->rx_pending_pkt_list.prev, RX_REORDER_ENTRY, List);
- if(index == 0)
+ while (!list_empty(&pRxTs->rx_pending_pkt_list)) {
+ pReorderEntry = list_entry(pRxTs->rx_pending_pkt_list.prev, struct rx_reorder_entry, List);
+ if (index == 0)
pRxTs->rx_indicate_seq = pReorderEntry->SeqNum;
- if( SN_LESS(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq) ) {
+ if (SN_LESS(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq) ||
+ SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq)) {
list_del_init(&pReorderEntry->List);
- if(SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq))
+ if (SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq))
pRxTs->rx_indicate_seq = (pRxTs->rx_indicate_seq + 1) % 4096;
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "RxPktPendingTimeout(): IndicateSeq: %d\n", pReorderEntry->SeqNum);
@@ -63,12 +63,12 @@ static void RxPktPendingTimeout(struct timer_list *t)
}
}
- if(index>0) {
+ if (index > 0) {
// Set rx_timeout_indicate_seq to 0xffff to indicate no pending packets in buffer now.
pRxTs->rx_timeout_indicate_seq = 0xffff;
// Indicate packets
- if(index > REORDER_WIN_SIZE) {
+ if (index > REORDER_WIN_SIZE) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
return;
@@ -76,7 +76,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
ieee80211_indicate_packets(ieee, ieee->stats_IndicateArray, index);
}
- if(bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) {
+ if (bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) {
pRxTs->rx_timeout_indicate_seq = pRxTs->rx_indicate_seq;
mod_timer(&pRxTs->rx_pkt_pending_timer,
jiffies + msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime));
@@ -133,7 +133,7 @@ void TSInitialize(struct ieee80211_device *ieee)
{
struct tx_ts_record *pTxTS = ieee->TxTsRecord;
struct rx_ts_record *pRxTS = ieee->RxTsRecord;
- PRX_REORDER_ENTRY pRxReorderEntry = ieee->RxReorderEntry;
+ struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry;
u8 count = 0;
IEEE80211_DEBUG(IEEE80211_DL_TS, "==========>%s()\n", __func__);
// Initialize Tx TS related info.
@@ -141,7 +141,7 @@ void TSInitialize(struct ieee80211_device *ieee)
INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List);
INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List);
- for(count = 0; count < TOTAL_TS_NUM; count++) {
+ for (count = 0; count < TOTAL_TS_NUM; count++) {
//
pTxTS->num = count;
// The timers for the operation of Traffic Stream and Block Ack.
@@ -164,7 +164,7 @@ void TSInitialize(struct ieee80211_device *ieee)
INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List);
INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List);
INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
- for(count = 0; count < TOTAL_TS_NUM; count++) {
+ for (count = 0; count < TOTAL_TS_NUM; count++) {
pRxTS->num = count;
INIT_LIST_HEAD(&pRxTS->rx_pending_pkt_list);
timer_setup(&pRxTS->ts_common_info.setup_timer, TsSetupTimeOut,
@@ -181,9 +181,9 @@ void TSInitialize(struct ieee80211_device *ieee)
// Initialize unused Rx Reorder List.
INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
//#ifdef TO_DO_LIST
- for(count = 0; count < REORDER_ENTRY_NUM; count++) {
- list_add_tail( &pRxReorderEntry->List,&ieee->RxReorder_Unused_List);
- if(count == (REORDER_ENTRY_NUM-1))
+ for (count = 0; count < REORDER_ENTRY_NUM; count++) {
+ list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List);
+ if (count == (REORDER_ENTRY_NUM-1))
break;
pRxReorderEntry = &ieee->RxReorderEntry[count+1];
}
@@ -196,7 +196,7 @@ static void AdmitTS(struct ieee80211_device *ieee,
del_timer_sync(&pTsCommonInfo->setup_timer);
del_timer_sync(&pTsCommonInfo->inact_timer);
- if(InactTime!=0)
+ if (InactTime != 0)
mod_timer(&pTsCommonInfo->inact_timer,
jiffies + msecs_to_jiffies(InactTime));
}
@@ -211,54 +211,54 @@ static struct ts_common_info *SearchAdmitTRStream(struct ieee80211_device *ieee,
bool search_dir[4] = {0};
struct list_head *psearch_list; //FIXME
struct ts_common_info *pRet = NULL;
- if(ieee->iw_mode == IW_MODE_MASTER) { //ap mode
- if(TxRxSelect == TX_DIR) {
+ if (ieee->iw_mode == IW_MODE_MASTER) { //ap mode
+ if (TxRxSelect == TX_DIR) {
search_dir[DIR_DOWN] = true;
- search_dir[DIR_BI_DIR]= true;
+ search_dir[DIR_BI_DIR] = true;
} else {
search_dir[DIR_UP] = true;
- search_dir[DIR_BI_DIR]= true;
+ search_dir[DIR_BI_DIR] = true;
}
- } else if(ieee->iw_mode == IW_MODE_ADHOC) {
- if(TxRxSelect == TX_DIR)
+ } else if (ieee->iw_mode == IW_MODE_ADHOC) {
+ if (TxRxSelect == TX_DIR)
search_dir[DIR_UP] = true;
else
search_dir[DIR_DOWN] = true;
} else {
- if(TxRxSelect == TX_DIR) {
+ if (TxRxSelect == TX_DIR) {
search_dir[DIR_UP] = true;
- search_dir[DIR_BI_DIR]= true;
- search_dir[DIR_DIRECT]= true;
+ search_dir[DIR_BI_DIR] = true;
+ search_dir[DIR_DIRECT] = true;
} else {
search_dir[DIR_DOWN] = true;
- search_dir[DIR_BI_DIR]= true;
- search_dir[DIR_DIRECT]= true;
+ search_dir[DIR_BI_DIR] = true;
+ search_dir[DIR_DIRECT] = true;
}
}
- if(TxRxSelect == TX_DIR)
+ if (TxRxSelect == TX_DIR)
psearch_list = &ieee->Tx_TS_Admit_List;
else
psearch_list = &ieee->Rx_TS_Admit_List;
//for(dir = DIR_UP; dir <= DIR_BI_DIR; dir++)
- for(dir = 0; dir <= DIR_BI_DIR; dir++) {
+ for (dir = 0; dir <= DIR_BI_DIR; dir++) {
if (!search_dir[dir])
continue;
- list_for_each_entry(pRet, psearch_list, list){
+ list_for_each_entry(pRet, psearch_list, list) {
// IEEE80211_DEBUG(IEEE80211_DL_TS, "ADD:%pM, TID:%d, dir:%d\n", pRet->Addr, pRet->TSpec.ts_info.ucTSID, pRet->TSpec.ts_info.ucDirection);
if (memcmp(pRet->addr, Addr, 6) == 0)
if (pRet->t_spec.ts_info.uc_tsid == TID)
- if(pRet->t_spec.ts_info.uc_direction == dir) {
+ if (pRet->t_spec.ts_info.uc_direction == dir) {
// printk("Bingo! got it\n");
break;
}
}
- if(&pRet->list != psearch_list)
+ if (&pRet->list != psearch_list)
break;
}
- if(&pRet->list != psearch_list)
+ if (&pRet->list != psearch_list)
return pRet ;
else
return NULL;
@@ -270,15 +270,15 @@ static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
{
u8 count;
- if(pTsCommonInfo == NULL)
+ if (pTsCommonInfo == NULL)
return;
memcpy(pTsCommonInfo->addr, Addr, 6);
- if(pTSPEC != NULL)
+ if (pTSPEC != NULL)
memcpy((u8 *)(&(pTsCommonInfo->t_spec)), (u8 *)pTSPEC, sizeof(struct tspec_body));
- for(count = 0; count < TCLAS_Num; count++)
+ for (count = 0; count < TCLAS_Num; count++)
memcpy((u8 *)(&(pTsCommonInfo->t_class[count])), (u8 *)pTCLAS, sizeof(union qos_tclas));
pTsCommonInfo->t_clas_proc = TCLAS_Proc;
@@ -342,7 +342,7 @@ bool GetTs(
Addr,
UP,
TxRxSelect);
- if(*ppTS != NULL) {
+ if (*ppTS != NULL) {
return true;
} else {
if (!bAddNewTs) {
@@ -357,23 +357,23 @@ bool GetTs(
struct tspec_body TSpec;
struct qos_tsinfo *pTSInfo = &TSpec.ts_info;
struct list_head *pUnusedList =
- (TxRxSelect == TX_DIR)?
- (&ieee->Tx_TS_Unused_List):
+ (TxRxSelect == TX_DIR) ?
+ (&ieee->Tx_TS_Unused_List) :
(&ieee->Rx_TS_Unused_List);
struct list_head *pAddmitList =
- (TxRxSelect == TX_DIR)?
- (&ieee->Tx_TS_Admit_List):
+ (TxRxSelect == TX_DIR) ?
+ (&ieee->Tx_TS_Admit_List) :
(&ieee->Rx_TS_Admit_List);
- enum direction_value Dir = (ieee->iw_mode == IW_MODE_MASTER)?
- ((TxRxSelect==TX_DIR)?DIR_DOWN:DIR_UP):
- ((TxRxSelect==TX_DIR)?DIR_UP:DIR_DOWN);
+ enum direction_value Dir = (ieee->iw_mode == IW_MODE_MASTER) ?
+ ((TxRxSelect == TX_DIR)?DIR_DOWN:DIR_UP) :
+ ((TxRxSelect == TX_DIR)?DIR_UP:DIR_DOWN);
IEEE80211_DEBUG(IEEE80211_DL_TS, "to add Ts\n");
- if(!list_empty(pUnusedList)) {
+ if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next, struct ts_common_info, list);
list_del_init(&(*ppTS)->list);
- if(TxRxSelect==TX_DIR) {
+ if (TxRxSelect == TX_DIR) {
struct tx_ts_record *tmp = container_of(*ppTS, struct tx_ts_record, ts_common_info);
ResetTxTsEntry(tmp);
} else {
@@ -416,17 +416,17 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, struct ts_common_info *
del_timer_sync(&pTs->inact_timer);
TsInitDelBA(ieee, pTs, TxRxSelect);
- if(TxRxSelect == RX_DIR) {
+ if (TxRxSelect == RX_DIR) {
//#ifdef TO_DO_LIST
- PRX_REORDER_ENTRY pRxReorderEntry;
+ struct rx_reorder_entry *pRxReorderEntry;
struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
- if(timer_pending(&pRxTS->rx_pkt_pending_timer))
+ if (timer_pending(&pRxTS->rx_pkt_pending_timer))
del_timer_sync(&pRxTS->rx_pkt_pending_timer);
- while(!list_empty(&pRxTS->rx_pending_pkt_list)) {
+ while (!list_empty(&pRxTS->rx_pending_pkt_list)) {
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
//pRxReorderEntry = list_entry(&pRxTS->rx_pending_pkt_list.prev,RX_REORDER_ENTRY,List);
- pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->rx_pending_pkt_list.prev, RX_REORDER_ENTRY, List);
+ pRxReorderEntry = list_entry(pRxTS->rx_pending_pkt_list.prev, struct rx_reorder_entry, List);
list_del_init(&pRxReorderEntry->List);
{
int i = 0;
@@ -435,13 +435,13 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, struct ts_common_info *
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
return;
}
- for(i =0; i < prxb->nr_subframes; i++)
+ for (i = 0; i < prxb->nr_subframes; i++)
dev_kfree_skb(prxb->subframes[i]);
kfree(prxb);
prxb = NULL;
}
- list_add_tail(&pRxReorderEntry->List,&ieee->RxReorder_Unused_List);
+ list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List);
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
@@ -522,9 +522,9 @@ void RemoveAllTS(struct ieee80211_device *ieee)
void TsStartAddBaProcess(struct ieee80211_device *ieee, struct tx_ts_record *pTxTS)
{
- if(!pTxTS->add_ba_req_in_progress) {
+ if (!pTxTS->add_ba_req_in_progress) {
pTxTS->add_ba_req_in_progress = true;
- if(pTxTS->add_ba_req_delayed) {
+ if (pTxTS->add_ba_req_delayed) {
IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n");
mod_timer(&pTxTS->ts_add_ba_timer,
jiffies + msecs_to_jiffies(TS_ADDBA_DELAY));
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index c414efc0662e..de83daa0c9ed 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This files contains card eeprom (93c46 or 93c56) programming routines,
* memory is addressed by 16 bits words.
*
* This is part of rtl8180 OpenSource driver.
* Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
- * Released under the terms of GPL (General Public Licence)
*
* Parts of this driver are based on the GPL part of the
* official realtek driver.
@@ -125,21 +125,21 @@ int eprom_read(struct net_device *dev, u32 addr)
if (priv->epromtype == EPROM_93c56) {
addr_str[7] = addr & 1;
- addr_str[6] = addr & (1<<1);
- addr_str[5] = addr & (1<<2);
- addr_str[4] = addr & (1<<3);
- addr_str[3] = addr & (1<<4);
- addr_str[2] = addr & (1<<5);
- addr_str[1] = addr & (1<<6);
- addr_str[0] = addr & (1<<7);
+ addr_str[6] = addr & BIT(1);
+ addr_str[5] = addr & BIT(2);
+ addr_str[4] = addr & BIT(3);
+ addr_str[3] = addr & BIT(4);
+ addr_str[2] = addr & BIT(5);
+ addr_str[1] = addr & BIT(6);
+ addr_str[0] = addr & BIT(7);
addr_len = 8;
} else {
addr_str[5] = addr & 1;
- addr_str[4] = addr & (1<<1);
- addr_str[3] = addr & (1<<2);
- addr_str[2] = addr & (1<<3);
- addr_str[1] = addr & (1<<4);
- addr_str[0] = addr & (1<<5);
+ addr_str[4] = addr & BIT(1);
+ addr_str[3] = addr & BIT(2);
+ addr_str[2] = addr & BIT(3);
+ addr_str[1] = addr & BIT(4);
+ addr_str[0] = addr & BIT(5);
addr_len = 6;
}
eprom_cs(dev, 1);
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index a8c8e8c0660d..92de92a3325a 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This is part of the rtl8192 driver
- * released under the GPL (See file COPYING for details).
*
* This files contains programming code for the rtl8256
* radio frontend.
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index f1eaab337dca..4065a4710142 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
* Linux device driver for RTL8192U
*
* Based on the r8187 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
*
* Contact Information:
* Jerry chuang <wlanfae@realtek.com>
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 6c9f9d82477d..2ba01041406b 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -43,20 +43,8 @@ u8 dm_shadow[16][256] = { {0} };
/* For Dynamic Rx Path Selection by Signal Strength */
static struct dynamic_rx_path_sel DM_RxPathSelTable;
-/*------------------------Define global variable-----------------------------*/
-
-
-/*------------------------Define local variable------------------------------*/
-/*------------------------Define local variable------------------------------*/
-
-
-/*--------------------Define export function prototype-----------------------*/
extern void dm_check_fsync(struct net_device *dev);
-/*--------------------Define export function prototype-----------------------*/
-
-
-/*---------------------Define local function prototype-----------------------*/
/* DM --> Rate Adaptive */
static void dm_check_rate_adaptive(struct net_device *dev);
@@ -1454,7 +1442,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter1, TempVal);
+ rCCK0_TxFilter1, TempVal);
/* Write 0xa24 ~ 0xa27 */
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
@@ -1462,14 +1450,14 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
(CCKSwingTable_Ch14[priv->CCK_index][5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter2, TempVal);
+ rCCK0_TxFilter2, TempVal);
/* Write 0xa28 0xa29 */
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8);
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_DebugPort, TempVal);
+ rCCK0_DebugPort, TempVal);
}
}
@@ -1520,7 +1508,7 @@ void dm_restore_dynamic_mechanism_state(struct net_device *dev)
return;
/* TODO: Only 11n mode is implemented currently, */
if (!(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
- priv->ieee80211->mode == WIRELESS_MODE_N_5G))
+ priv->ieee80211->mode == WIRELESS_MODE_N_5G))
return;
{
@@ -1751,7 +1739,7 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
/* For smooth, we can not change DIG state. */
if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) &&
- (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh))
+ (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh))
return;
/*DbgPrint("Dig by Fw False Alarm\n");*/
@@ -1814,7 +1802,7 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
u8 reset_flag = 0;
if (dm_digtable.dig_state == DM_STA_DIG_ON &&
- (priv->reset_count == reset_cnt)) {
+ (priv->reset_count == reset_cnt)) {
dm_ctrl_initgain_byrssi_highpwr(dev);
return;
}
@@ -1912,7 +1900,7 @@ static void dm_ctrl_initgain_byrssi_highpwr(
*/
if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_power_highthresh) {
if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON &&
- (priv->reset_count == reset_cnt_highpwr))
+ (priv->reset_count == reset_cnt_highpwr))
return;
dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
@@ -1928,7 +1916,7 @@ static void dm_ctrl_initgain_byrssi_highpwr(
write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
} else {
if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF &&
- (priv->reset_count == reset_cnt_highpwr))
+ (priv->reset_count == reset_cnt_highpwr))
return;
dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF;
@@ -2129,7 +2117,7 @@ static void dm_cs_ratio(
{
if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
- !initialized || force_write) {
+ !initialized || force_write) {
/*DbgPrint("Write CS_ratio state = %d\n", DM_DigTable.CurCS_ratioState);*/
if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER) {
/* Lower CS ratio for CCK. */
@@ -2646,7 +2634,7 @@ void dm_fsync_timer_callback(struct timer_list *t)
bool bDoubleTimeInterval = false;
if (priv->ieee80211->state == IEEE80211_LINKED &&
- priv->ieee80211->bfsync_enable &&
+ priv->ieee80211->bfsync_enable &&
(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) {
/* Count rate 54, MCS [7], [12, 13, 14, 15] */
u32 rate_bitmap;
@@ -2936,17 +2924,17 @@ void dm_shadow_init(struct net_device *dev)
for (page = 0; page < 5; page++)
for (offset = 0; offset < 256; offset++) {
- read_nic_byte(dev, offset+page*256, &dm_shadow[page][offset]);
+ read_nic_byte(dev, offset + page * 256, &dm_shadow[page][offset]);
/*DbgPrint("P-%d/O-%02x=%02x\r\n", page, offset, DM_Shadow[page][offset]);*/
}
for (page = 8; page < 11; page++)
for (offset = 0; offset < 256; offset++)
- read_nic_byte(dev, offset+page*256, &dm_shadow[page][offset]);
+ read_nic_byte(dev, offset + page * 256, &dm_shadow[page][offset]);
for (page = 12; page < 15; page++)
for (offset = 0; offset < 256; offset++)
- read_nic_byte(dev, offset+page*256, &dm_shadow[page][offset]);
+ read_nic_byte(dev, offset + page * 256, &dm_shadow[page][offset]);
} /* dm_shadow_init */
diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h
index 5a958335681d..95a2d2ee3c65 100644
--- a/drivers/staging/rtl8192u/r8192U_hw.h
+++ b/drivers/staging/rtl8192u/r8192U_hw.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This is part of rtl8187 OpenSource driver.
* Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- * Released under the terms of GPL (General Public Licence)
*
* Parts of this driver are based on the GPL part of the
* official Realtek driver.
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index e4e6c979bedf..5822bb7984b9 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* This file contains wireless extension handlers.
*
* This is part of rtl8180 OpenSource driver.
* Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- * Released under the terms of GPL (General Public Licence)
*
* Parts of this driver are based on the GPL part
* of the official realtek driver.
diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h
index a6c2b95e2e69..27423cd64b4c 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.h
+++ b/drivers/staging/rtl8192u/r8192U_wx.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This is part of rtl8180 OpenSource driver - v 0.3
* Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
- * Released under the terms of GPL (General Public Licence)
*
* Parts of this driver are based on the GPL part of the official realtek driver
* Parts of this driver are based on the rtl8180 driver skeleton from Patric
diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
index f160eee52f09..b6dcda77db1f 100644
--- a/drivers/staging/rtl8712/Kconfig
+++ b/drivers/staging/rtl8712/Kconfig
@@ -1,17 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
config R8712U
tristate "RealTek RTL8712U (RTL8192SU) Wireless LAN NIC driver"
depends on WLAN && USB
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
- ---help---
- This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
- If built as a module, it will be called r8712u.
+ help
+ This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
+ If built as a module, it will be called r8712u.
config R8712_TX_AGGR
bool "Realtek RTL8712U Transmit Aggregation code"
depends on R8712U && BROKEN
- ---help---
- This option provides transmit aggregation for the Realtek RTL8712 USB device.
+ help
+ This option provides transmit aggregation for the Realtek RTL8712 USB device.
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 48d62fe6c8d4..9ae86631fa8b 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -145,9 +145,9 @@ struct _adapter {
struct hal_priv halpriv;
struct led_priv ledpriv;
struct mp_priv mppriv;
- s32 bDriverStopped;
- s32 bSurpriseRemoved;
- s32 bSuspended;
+ bool driver_stopped;
+ bool surprise_removed;
+ bool suspended;
u32 IsrContent;
u32 ImrContent;
u8 EepromAddressSize;
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 4c6519ccab30..401f0e442bcf 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -100,7 +100,8 @@ static void fill_fwpriv(struct _adapter *padapter, struct fw_priv *pfwpriv)
pfwpriv->rf_config = RTL8712_RFC_1T2R;
}
pfwpriv->mp_mode = (pregpriv->mp_mode == 1) ? 1 : 0;
- pfwpriv->vcs_type = pregpriv->vrtl_carrier_sense; /* 0:off 1:on 2:auto */
+ /* 0:off 1:on 2:auto */
+ pfwpriv->vcs_type = pregpriv->vrtl_carrier_sense;
pfwpriv->vcs_mode = pregpriv->vcs_type; /* 1:RTS/CTS 2:CTS to self */
/* default enable turbo_mode */
pfwpriv->turbo_mode = ((pregpriv->wifi_test == 1) ? 0 : 1);
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index 2eae11dd6b42..4cca7390c8ef 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -238,7 +238,8 @@ check_next_ie:
return NULL;
}
-unsigned char *r8712_get_wpa2_ie(unsigned char *pie, uint *rsn_ie_len, int limit)
+unsigned char *r8712_get_wpa2_ie(unsigned char *pie, uint *rsn_ie_len,
+ int limit)
{
return r8712_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit);
}
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index 1470771daa62..8098f6905554 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -728,7 +728,8 @@ struct registry_priv;
u8 *r8712_set_ie(u8 *pbuf, sint index, uint len, u8 *source, uint *frlen);
u8 *r8712_get_ie(u8 *pbuf, sint index, uint *len, sint limit);
-unsigned char *r8712_get_wpa_ie(unsigned char *pie, uint *rsn_ie_len, int limit);
+unsigned char *r8712_get_wpa_ie(unsigned char *pie, uint *rsn_ie_len,
+ int limit);
unsigned char *r8712_get_wpa2_ie(unsigned char *pie, uint *rsn_ie_len,
int limit);
int r8712_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 2d3f38007299..c962696c9822 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -362,7 +362,7 @@ static void enable_video_mode(struct _adapter *padapter, int cbw40_value)
r8712_fw_cmd(padapter, intcmd);
}
-/**
+/*
*
* This function intends to handle the activation of an interface
* i.e. when it is brought Up/Active from a Down state.
@@ -374,8 +374,8 @@ static int netdev_open(struct net_device *pnetdev)
mutex_lock(&padapter->mutex_start);
if (!padapter->bup) {
- padapter->bDriverStopped = false;
- padapter->bSurpriseRemoved = false;
+ padapter->driver_stopped = false;
+ padapter->surprise_removed = false;
padapter->bup = true;
if (rtl871x_hal_init(padapter) != _SUCCESS)
goto netdev_open_error;
@@ -430,7 +430,7 @@ netdev_open_error:
return -1;
}
-/**
+/*
*
* This function intends to handle the shutdown of an interface
* i.e. when it is brought Down from an Up/Active state.
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 8c36acedf507..6a72a4ad176a 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -309,7 +309,7 @@ int r8712_cmd_thread(void *context)
while (1) {
if (wait_for_completion_interruptible(cmd_queue_comp))
break;
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ if (padapter->driver_stopped || padapter->surprise_removed)
break;
if (r8712_register_cmd_alive(padapter) != _SUCCESS)
continue;
@@ -360,8 +360,8 @@ _next:
pcmdbuf += 2; /* 8 bytes alignment */
memcpy((u8 *)pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
while (check_cmd_fifo(padapter, wr_sz) == _FAIL) {
- if (padapter->bDriverStopped ||
- padapter->bSurpriseRemoved)
+ if (padapter->driver_stopped ||
+ padapter->surprise_removed)
break;
msleep(100);
continue;
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 1ef86b8c592f..a34d0dd023f3 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -128,7 +128,6 @@ enum rtl8712_h2c_cmd {
MAX_H2CCMD
};
-
#define _GetBBReg_CMD_ _Read_BBREG_CMD_
#define _SetBBReg_CMD_ _Write_BBREG_CMD_
#define _GetRFReg_CMD_ _Read_RFREG_CMD_
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 39eb74374d0b..00babd011a62 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -353,7 +353,7 @@ static u8 fix_header(struct _adapter *padapter, u8 header, u16 header_addr)
}
u8 r8712_efuse_pg_packet_write(struct _adapter *padapter, const u8 offset,
- const u8 word_en, const u8 *data)
+ const u8 word_en, const u8 *data)
{
u8 pg_header = 0;
u16 efuse_addr = 0, curr_size = 0;
@@ -441,7 +441,7 @@ u8 r8712_efuse_access(struct _adapter *padapter, u8 bRead, u16 start_addr,
break;
}
res = efuse_one_byte_rw(padapter, bRead, start_addr + i,
- data + i);
+ data + i);
if (!bRead && !res)
break;
}
@@ -554,7 +554,7 @@ u8 r8712_efuse_map_write(struct _adapter *padapter, u16 addr, u16 cnts,
offset++;
if (!empty)
if (!r8712_efuse_pg_packet_read(padapter, offset,
- pktdata))
+ pktdata))
return false;
i = 0;
j = 0;
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.h b/drivers/staging/rtl8712/rtl8712_efuse.h
index dbba51cd40fb..4969d307e978 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.h
+++ b/drivers/staging/rtl8712/rtl8712_efuse.h
@@ -4,7 +4,6 @@
#include "osdep_service.h"
-
#define _REPEAT_THRESHOLD_ 3
#define EFUSE_MAX_SIZE 512
@@ -40,5 +39,5 @@ u8 r8712_efuse_access(struct _adapter *padapter, u8 bRead,
u8 r8712_efuse_map_read(struct _adapter *padapter, u16 addr,
u16 cnts, u8 *data);
u8 r8712_efuse_map_write(struct _adapter *padapter, u16 addr,
- u16 cnts, u8 *data);
+ u16 cnts, u8 *data);
#endif
diff --git a/drivers/staging/rtl8712/rtl8712_io.c b/drivers/staging/rtl8712/rtl8712_io.c
index 8eb79f73c014..384cbdb05e19 100644
--- a/drivers/staging/rtl8712/rtl8712_io.c
+++ b/drivers/staging/rtl8712/rtl8712_io.c
@@ -68,7 +68,7 @@ void r8712_read_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
{
struct intf_hdl *hdl = &adapter->pio_queue->intf;
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ if (adapter->driver_stopped || adapter->surprise_removed)
return;
hdl->io_ops._read_mem(hdl, addr, cnt, pmem);
@@ -85,7 +85,7 @@ void r8712_read_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
{
struct intf_hdl *hdl = &adapter->pio_queue->intf;
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ if (adapter->driver_stopped || adapter->surprise_removed)
return;
hdl->io_ops._read_port(hdl, addr, cnt, pmem);
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index 07fcf9b9b811..db99129d3169 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -109,7 +109,7 @@ static void SwLedOn(struct _adapter *padapter, struct LED_871x *pLed)
{
u8 LedCfg;
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ if (padapter->surprise_removed || padapter->driver_stopped)
return;
LedCfg = r8712_read8(padapter, LEDCFG);
switch (pLed->LedPin) {
@@ -137,7 +137,7 @@ static void SwLedOff(struct _adapter *padapter, struct LED_871x *pLed)
{
u8 LedCfg;
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ if (padapter->surprise_removed || padapter->driver_stopped)
return;
LedCfg = r8712_read8(padapter, LEDCFG);
switch (pLed->LedPin) {
@@ -419,7 +419,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
static void SwLedBlink2(struct LED_871x *pLed)
{
struct _adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
@@ -816,7 +816,7 @@ static void BlinkTimerCallback(struct timer_list *t)
/* This fixed the crash problem on Fedora 12 when trying to do the
* insmod;ifconfig up;rmmod commands.
*/
- if (pLed->padapter->bSurpriseRemoved || pLed->padapter->bDriverStopped)
+ if (pLed->padapter->surprise_removed || pLed->padapter->driver_stopped)
return;
schedule_work(&pLed->BlinkWorkItem);
}
@@ -883,7 +883,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
case LED_CTL_NO_LINK:
if (!pLed->bLedNoLinkBlinkInProgress) {
if (pLed->CurrLedState == LED_SCAN_BLINK ||
- IS_LED_WPS_BLINKING(pLed))
+ IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedLinkBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
@@ -1124,7 +1124,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
if (!pLed->bLedBlinkInProgress &&
check_fwstate(pmlmepriv, _FW_LINKED)) {
if (pLed->CurrLedState == LED_SCAN_BLINK ||
- IS_LED_WPS_BLINKING(pLed))
+ IS_LED_WPS_BLINKING(pLed))
return;
pLed->bLedBlinkInProgress = true;
pLed->CurrLedState = LED_TXRX_BLINK;
@@ -1704,7 +1704,6 @@ static void SwLedControlMode5(struct _adapter *padapter,
}
}
-
static void SwLedControlMode6(struct _adapter *padapter,
enum LED_CTL_MODE LedAction)
{
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 5bf9070b7a28..82ddc0c3ecd4 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -51,7 +51,7 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
if (!precvpriv->pallocated_recv_buf)
return _FAIL;
precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
- ((addr_t) (precvpriv->pallocated_recv_buf) & 3);
+ ((addr_t)(precvpriv->pallocated_recv_buf) & 3);
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
INIT_LIST_HEAD(&precvbuf->list);
@@ -135,7 +135,7 @@ int r8712_free_recvframe(union recv_frame *precvframe,
spin_lock_irqsave(&pfree_recv_queue->lock, irqL);
list_del_init(&(precvframe->u.hdr.list));
list_add_tail(&(precvframe->u.hdr.list), &pfree_recv_queue->queue);
- if (padapter != NULL) {
+ if (padapter) {
if (pfree_recv_queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt++;
}
@@ -273,7 +273,7 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
/* 0~(n-1) fragment frame
* enqueue to defraf_g
*/
- if (pdefrag_q != NULL) {
+ if (pdefrag_q) {
if (fragnum == 0) {
/*the first fragment*/
if (!list_empty(&pdefrag_q->queue)) {
@@ -299,7 +299,7 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
/* the last fragment frame
* enqueue the last fragment
*/
- if (pdefrag_q != NULL) {
+ if (pdefrag_q) {
phead = &pdefrag_q->queue;
list_add_tail(&pfhdr->list, phead);
/*call recvframe_defrag to defrag*/
@@ -313,7 +313,7 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
prtnframe = NULL;
}
}
- if ((prtnframe != NULL) && (prtnframe->u.hdr.attrib.privacy)) {
+ if (prtnframe && (prtnframe->u.hdr.attrib.privacy)) {
/* after defrag we must check tkip mic code */
if (r8712_recvframe_chkmic(padapter, prtnframe) == _FAIL) {
r8712_free_recvframe(prtnframe, pfree_recv_queue);
@@ -541,8 +541,8 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
(preorder_ctrl->indicate_seq + 1) % 4096;
/*indicate this recv_frame*/
if (!pattrib->amsdu) {
- if (!padapter->bDriverStopped &&
- !padapter->bSurpriseRemoved) {
+ if (!padapter->driver_stopped &&
+ !padapter->surprise_removed) {
/* indicate this recv_frame */
r8712_recv_indicatepkt(padapter,
prframe);
@@ -576,8 +576,8 @@ static int recv_indicatepkt_reorder(struct _adapter *padapter,
/* s1. */
r8712_wlanhdr_to_ethhdr(prframe);
if (pattrib->qos != 1) {
- if (!padapter->bDriverStopped &&
- !padapter->bSurpriseRemoved) {
+ if (!padapter->driver_stopped &&
+ !padapter->surprise_removed) {
r8712_recv_indicatepkt(padapter, prframe);
return _SUCCESS;
} else {
@@ -626,7 +626,7 @@ void r8712_reordering_ctrl_timeout_handler(void *pcontext)
struct __queue *ppending_recvframe_queue =
&preorder_ctrl->pending_recvframe_queue;
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ if (padapter->driver_stopped || padapter->surprise_removed)
return;
spin_lock_irqsave(&ppending_recvframe_queue->lock, irql);
r8712_recv_indicatepkts_in_order(padapter, preorder_ctrl, true);
@@ -643,15 +643,15 @@ static int r8712_process_recv_indicatepkts(struct _adapter *padapter,
if (phtpriv->ht_option == 1) { /*B/G/N Mode*/
if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
/* including perform A-MPDU Rx Ordering Buffer Control*/
- if (!padapter->bDriverStopped &&
- !padapter->bSurpriseRemoved)
+ if (!padapter->driver_stopped &&
+ !padapter->surprise_removed)
return _FAIL;
}
} else { /*B/G mode*/
retval = r8712_wlanhdr_to_ethhdr(prframe);
if (retval != _SUCCESS)
return retval;
- if (!padapter->bDriverStopped && !padapter->bSurpriseRemoved) {
+ if (!padapter->driver_stopped && !padapter->surprise_removed) {
/* indicate this recv_frame */
r8712_recv_indicatepkt(padapter, prframe);
} else {
@@ -889,7 +889,7 @@ static void process_link_qual(struct _adapter *padapter,
struct rx_pkt_attrib *pattrib;
struct smooth_rssi_data *sqd = &padapter->recvpriv.signal_qual_data;
- if (prframe == NULL || padapter == NULL)
+ if (!prframe || !padapter)
return;
pattrib = &prframe->u.hdr.attrib;
if (pattrib->signal_qual != 0) {
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index aa6fb516f398..7574a4b569a4 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -60,20 +60,20 @@ int r8712_txframes_sta_ac_pending(struct _adapter *padapter,
switch (priority) {
case 1:
case 2:
- ptxservq = &(psta->sta_xmitpriv.bk_q);
+ ptxservq = &psta->sta_xmitpriv.bk_q;
break;
case 4:
case 5:
- ptxservq = &(psta->sta_xmitpriv.vi_q);
+ ptxservq = &psta->sta_xmitpriv.vi_q;
break;
case 6:
case 7:
- ptxservq = &(psta->sta_xmitpriv.vo_q);
+ ptxservq = &psta->sta_xmitpriv.vo_q;
break;
case 0:
case 3:
default:
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ ptxservq = &psta->sta_xmitpriv.be_q;
break;
}
return ptxservq->qcnt;
@@ -269,7 +269,7 @@ u8 r8712_construct_txaggr_cmd_hdr(struct xmit_buf *pxmitbuf)
struct xmit_frame *pxmitframe = (struct xmit_frame *)
pxmitbuf->priv_data;
struct _adapter *padapter = pxmitframe->padapter;
- struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct cmd_hdr *pcmd_hdr = (struct cmd_hdr *)
(pxmitbuf->pbuf + TXDESC_SIZE);
@@ -532,7 +532,7 @@ static void update_txdesc(struct xmit_frame *pxmitframe, uint *pmem, int sz)
(pattrib->dhcp_pkt != 1)) {
/*Not EAP & ARP type data packet*/
if (phtpriv->ht_option == 1) { /*B/G/N Mode*/
- if (phtpriv->ampdu_enable != true)
+ if (!phtpriv->ampdu_enable)
ptxdesc->txdw2 |= cpu_to_le32(BK);
}
} else {
diff --git a/drivers/staging/rtl8712/rtl871x_eeprom.c b/drivers/staging/rtl8712/rtl871x_eeprom.c
index 948bd0c757b5..0027d8eb22fa 100644
--- a/drivers/staging/rtl8712/rtl871x_eeprom.c
+++ b/drivers/staging/rtl8712/rtl871x_eeprom.c
@@ -37,7 +37,7 @@ static void shift_out_bits(struct _adapter *padapter, u16 data, u16 count)
{
u16 x, mask;
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
goto out;
mask = 0x01 << (count - 1);
x = r8712_read8(padapter, EE_9346CR);
@@ -46,7 +46,7 @@ static void shift_out_bits(struct _adapter *padapter, u16 data, u16 count)
x &= ~_EEDI;
if (data & mask)
x |= _EEDI;
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
goto out;
r8712_write8(padapter, EE_9346CR, (u8)x);
udelay(CLOCK_RATE);
@@ -54,7 +54,7 @@ static void shift_out_bits(struct _adapter *padapter, u16 data, u16 count)
down_clk(padapter, &x);
mask >>= 1;
} while (mask);
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
goto out;
x &= ~_EEDI;
r8712_write8(padapter, EE_9346CR, (u8)x);
@@ -65,7 +65,7 @@ static u16 shift_in_bits(struct _adapter *padapter)
{
u16 x, d = 0, i;
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
goto out;
x = r8712_read8(padapter, EE_9346CR);
x &= ~(_EEDO | _EEDI);
@@ -73,7 +73,7 @@ static u16 shift_in_bits(struct _adapter *padapter)
for (i = 0; i < 16; i++) {
d <<= 1;
up_clk(padapter, &x);
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
goto out;
x = r8712_read8(padapter, EE_9346CR);
x &= ~(_EEDI);
@@ -117,17 +117,17 @@ static void eeprom_clean(struct _adapter *padapter)
{
u16 x;
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
return;
x = r8712_read8(padapter, EE_9346CR);
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
return;
x &= ~(_EECS | _EEDI);
r8712_write8(padapter, EE_9346CR, (u8)x);
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
return;
up_clk(padapter, &x);
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
return;
down_clk(padapter, &x);
}
@@ -194,11 +194,11 @@ u16 r8712_eeprom_read16(struct _adapter *padapter, u16 reg) /*ReadEEprom*/
tmp8_clk_new = tmp8_clk_ori | 0x20;
if (tmp8_clk_new != tmp8_clk_ori)
r8712_write8(padapter, 0x10250003, tmp8_clk_new);
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
goto out;
/* select EEPROM, reset bits, set _EECS */
x = r8712_read8(padapter, EE_9346CR);
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
goto out;
x &= ~(_EEDI | _EEDO | _EESK | _EEM0);
x |= _EEM1 | _EECS;
@@ -218,4 +218,3 @@ out:
r8712_write8(padapter, 0x102502f1, tmp8_ori);
return data;
}
-
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index e723357ac8c0..a7230c0c7b23 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -1102,7 +1102,7 @@ static int r871x_wx_set_mlme(struct net_device *dev,
return ret;
}
-/**
+/*
*
* This function intends to handle the Set Scan command.
* Currently, the request comes via Wireless Extensions' SIOCSIWSCAN ioctl.
@@ -1118,9 +1118,9 @@ static int r8711_wx_set_scan(struct net_device *dev,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 status = true;
- if (padapter->bDriverStopped) {
- netdev_info(dev, "In %s: bDriverStopped=%d\n",
- __func__, padapter->bDriverStopped);
+ if (padapter->driver_stopped) {
+ netdev_info(dev, "In %s: driver_stopped=%d\n",
+ __func__, padapter->driver_stopped);
return -1;
}
if (!padapter->bup)
@@ -1175,7 +1175,7 @@ static int r8711_wx_get_scan(struct net_device *dev,
char *stop = ev + wrqu->data.length;
u32 ret = 0, cnt = 0;
- if (padapter->bDriverStopped)
+ if (padapter->driver_stopped)
return -EINVAL;
while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY |
_FW_UNDER_LINKING)) {
@@ -1938,7 +1938,7 @@ static int r871x_get_ap_info(struct net_device *dev,
u8 bssid[ETH_ALEN];
char data[33];
- if (padapter->bDriverStopped || (pdata == NULL))
+ if (padapter->driver_stopped || (pdata == NULL))
return -EINVAL;
while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY |
_FW_UNDER_LINKING)) {
@@ -2002,7 +2002,7 @@ static int r871x_set_pid(struct net_device *dev,
struct _adapter *padapter = netdev_priv(dev);
struct iw_point *pdata = &wrqu->data;
- if ((padapter->bDriverStopped) || (pdata == NULL))
+ if ((padapter->driver_stopped) || (pdata == NULL))
return -EINVAL;
if (copy_from_user(&padapter->pid, pdata->pointer, sizeof(int)))
return -EINVAL;
@@ -2018,7 +2018,7 @@ static int r871x_set_chplan(struct net_device *dev,
struct iw_point *pdata = &wrqu->data;
int ch_plan = -1;
- if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ if ((padapter->driver_stopped) || (pdata == NULL)) {
ret = -EINVAL;
goto exit;
}
@@ -2038,7 +2038,7 @@ static int r871x_wps_start(struct net_device *dev,
struct iw_point *pdata = &wrqu->data;
u32 u32wps_start = 0;
- if ((padapter->bDriverStopped) || (pdata == NULL))
+ if ((padapter->driver_stopped) || (pdata == NULL))
return -EINVAL;
if (copy_from_user((void *)&u32wps_start, pdata->pointer, 4))
return -EFAULT;
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 3f17ef6f7e39..7c7267d0fc9e 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -95,7 +95,7 @@ static void _free_network(struct mlme_priv *pmlmepriv,
unsigned long irqL;
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
if (pnetwork->fixed)
return;
@@ -115,7 +115,7 @@ static void free_network_nolock(struct mlme_priv *pmlmepriv,
{
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
if (pnetwork->fixed)
return;
@@ -174,7 +174,7 @@ sint r8712_if_up(struct _adapter *padapter)
{
sint res;
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
+ if (padapter->driver_stopped || padapter->surprise_removed ||
!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
res = false;
} else {
@@ -399,7 +399,7 @@ static void update_scanned_network(struct _adapter *adapter,
/* Otherwise just pull from the free list */
/* update scan_time */
pnetwork = alloc_network(pmlmepriv);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
bssid_ex_sz = r8712_get_wlan_bssid_ex_sz(target);
target->Length = bssid_ex_sz;
@@ -1055,7 +1055,7 @@ void _r8712_join_timeout_handler(struct _adapter *adapter)
unsigned long irqL;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ if (adapter->driver_stopped || adapter->surprise_removed)
return;
spin_lock_irqsave(&pmlmepriv->lock, irqL);
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
@@ -1084,7 +1084,7 @@ void r8712_scan_timeout_handler (struct _adapter *adapter)
void _r8712_dhcp_timeout_handler (struct _adapter *adapter)
{
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ if (adapter->driver_stopped || adapter->surprise_removed)
return;
if (adapter->pwrctrlpriv.pwr_mode != adapter->registrypriv.power_mgnt)
r8712_set_ps_mode(adapter, adapter->registrypriv.power_mgnt,
@@ -1120,8 +1120,6 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
}
pnetwork = container_of(pmlmepriv->pscanned,
struct wlan_network, list);
- if (pnetwork == NULL)
- return _FAIL;
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
if (pmlmepriv->assoc_by_bssid) {
dst_ssid = pnetwork->network.MacAddress;
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index 1d5364f5a518..ba379506da3f 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -697,15 +697,14 @@ void r8712_ResetPhyRxPktCount(struct _adapter *pAdapter)
static u32 GetPhyRxPktCounts(struct _adapter *pAdapter, u32 selbit)
{
/*selection*/
- u32 phyrx_set = 0, count = 0;
+ u32 phyrx_set = 0;
u32 SelectBit;
SelectBit = selbit << 28;
phyrx_set |= (SelectBit & 0xF0000000);
r8712_write32(pAdapter, RXERR_RPT, phyrx_set);
/*Read packet count*/
- count = r8712_read32(pAdapter, RXERR_RPT) & RPTMaxCount;
- return count;
+ return r8712_read32(pAdapter, RXERR_RPT) & RPTMaxCount;
}
u32 r8712_GetPhyRxPktReceived(struct _adapter *pAdapter)
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index 351984fe254e..2beafc7742b3 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -32,7 +32,7 @@ void r8712_set_rpwm(struct _adapter *padapter, u8 val8)
if (pwrpriv->rpwm_retry == 0)
return;
}
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ if (padapter->driver_stopped || padapter->surprise_removed)
return;
rpwm = val8 | pwrpriv->tog;
switch (val8) {
@@ -117,7 +117,7 @@ static void _rpwm_check_handler (struct _adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ if (padapter->driver_stopped || padapter->surprise_removed)
return;
if (pwrpriv->cpwm != pwrpriv->rpwm)
schedule_work(&pwrpriv->rpwm_workitem);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 7c8857409916..f6fe8ea12961 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -75,8 +75,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
* and initialize free_xmit_frame below.
* Please also apply free_txobj to link_up all the xmit_frames...
*/
- pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4,
- GFP_ATOMIC);
+ pxmitpriv->pallocated_frame_buf =
+ kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4, GFP_ATOMIC);
if (!pxmitpriv->pallocated_frame_buf) {
pxmitpriv->pxmit_frame_buf = NULL;
return _FAIL;
@@ -114,8 +114,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
/*init xmit_buf*/
_init_queue(&pxmitpriv->free_xmitbuf_queue);
_init_queue(&pxmitpriv->pending_xmitbuf_queue);
- pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4,
- GFP_ATOMIC);
+ pxmitpriv->pallocated_xmitbuf =
+ kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4, GFP_ATOMIC);
if (!pxmitpriv->pallocated_xmitbuf) {
kfree(pxmitpriv->pallocated_frame_buf);
pxmitpriv->pallocated_frame_buf = NULL;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 005010de9997..7478bbd3de78 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -196,7 +196,7 @@ static int r871x_suspend(struct usb_interface *pusb_intf, pm_message_t state)
struct _adapter *padapter = netdev_priv(pnetdev);
netdev_info(pnetdev, "Suspending...\n");
- padapter->bSuspended = true;
+ padapter->suspended = true;
rtl871x_intf_stop(padapter);
if (pnetdev->netdev_ops->ndo_stop)
pnetdev->netdev_ops->ndo_stop(pnetdev);
@@ -220,7 +220,7 @@ static int r871x_resume(struct usb_interface *pusb_intf)
netif_device_attach(pnetdev);
if (pnetdev->netdev_ops->ndo_open)
pnetdev->netdev_ops->ndo_open(pnetdev);
- padapter->bSuspended = false;
+ padapter->suspended = false;
rtl871x_intf_resume(padapter);
return 0;
}
@@ -271,7 +271,7 @@ static void r8712_usb_dvobj_deinit(struct _adapter *padapter)
void rtl871x_intf_stop(struct _adapter *padapter)
{
/*disable_hw_interrupt*/
- if (!padapter->bSurpriseRemoved) {
+ if (!padapter->surprise_removed) {
/*device still exists, so driver can do i/o operation
* TODO:
*/
@@ -289,7 +289,7 @@ void r871x_dev_unload(struct _adapter *padapter)
{
if (padapter->bup) {
/*s1.*/
- padapter->bDriverStopped = true;
+ padapter->driver_stopped = true;
/*s3.*/
rtl871x_intf_stop(padapter);
@@ -298,7 +298,7 @@ void r871x_dev_unload(struct _adapter *padapter)
r8712_stop_drv_threads(padapter);
/*s5.*/
- if (!padapter->bSurpriseRemoved) {
+ if (!padapter->surprise_removed) {
padapter->hw_init_completed = false;
rtl8712_hal_deinit(padapter);
}
@@ -600,7 +600,7 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
/* never exit with a firmware callback pending */
wait_for_completion(&padapter->rtl8712_fw_ready);
if (drvpriv.drv_registered)
- padapter->bSurpriseRemoved = true;
+ padapter->surprise_removed = true;
unregister_netdev(pnetdev); /* will call netdev_close() */
flush_scheduled_work();
udelay(1);
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index ee5968808332..9d290bc2fdb7 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -147,9 +147,9 @@ static void usb_write_mem_complete(struct urb *purb)
if (purb->status != 0) {
if (purb->status == (-ESHUTDOWN))
- padapter->bDriverStopped = true;
+ padapter->driver_stopped = true;
else
- padapter->bSurpriseRemoved = true;
+ padapter->surprise_removed = true;
}
complete(&pintfpriv->io_retevt_comp);
}
@@ -164,7 +164,7 @@ void r8712_usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
struct usb_device *pusbd = pdvobj->pusbdev;
struct urb *piorw_urb = pintfpriv->piorw_urb;
- if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
+ if ((padapter->driver_stopped) || (padapter->surprise_removed) ||
(padapter->pwrctrlpriv.pnp_bstop_trx))
return;
/* translate DMA FIFO addr to pipehandle */
@@ -186,7 +186,7 @@ static void r8712_usb_read_port_complete(struct urb *purb)
struct _adapter *padapter = (struct _adapter *)precvbuf->adapter;
struct recv_priv *precvpriv = &padapter->recvpriv;
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ if (padapter->surprise_removed || padapter->driver_stopped)
return;
if (purb->status == 0) { /* SUCCESS */
if ((purb->actual_length > (MAX_RECVBUF_SZ)) ||
@@ -218,11 +218,11 @@ static void r8712_usb_read_port_complete(struct urb *purb)
case -EPIPE:
case -ENODEV:
case -ESHUTDOWN:
- padapter->bDriverStopped = true;
+ padapter->driver_stopped = true;
break;
case -ENOENT:
- if (!padapter->bSuspended) {
- padapter->bDriverStopped = true;
+ if (!padapter->suspended) {
+ padapter->driver_stopped = true;
break;
}
/* Fall through. */
@@ -254,7 +254,7 @@ u32 r8712_usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
struct recv_priv *precvpriv = &adapter->recvpriv;
struct usb_device *pusbd = pdvobj->pusbdev;
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
+ if (adapter->driver_stopped || adapter->surprise_removed ||
adapter->pwrctrlpriv.pnp_bstop_trx || !precvbuf)
return _FAIL;
r8712_init_recvbuf(adapter, precvbuf);
@@ -314,9 +314,9 @@ void r8712_xmit_bh(void *priv)
struct _adapter *padapter = priv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- if (padapter->bDriverStopped ||
- padapter->bSurpriseRemoved) {
- netdev_err(padapter->pnetdev, "xmit_bh => bDriverStopped or bSurpriseRemoved\n");
+ if (padapter->driver_stopped ||
+ padapter->surprise_removed) {
+ netdev_err(padapter->pnetdev, "xmit_bh => driver_stopped or surprise_removed\n");
return;
}
ret = r8712_xmitframe_complete(padapter, pxmitpriv, NULL);
@@ -360,7 +360,7 @@ static void usb_write_port_complete(struct urb *purb)
break;
}
}
- if (padapter->bSurpriseRemoved)
+ if (padapter->surprise_removed)
return;
switch (purb->status) {
case 0:
@@ -390,7 +390,7 @@ u32 r8712_usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
struct usb_device *pusbd = pdvobj->pusbdev;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
+ if ((padapter->driver_stopped) || (padapter->surprise_removed) ||
(padapter->pwrctrlpriv.pnp_bstop_trx))
return _FAIL;
for (i = 0; i < 8; i++) {
diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig
index deae0427ba6c..744091d46f4c 100644
--- a/drivers/staging/rtl8723bs/Kconfig
+++ b/drivers/staging/rtl8723bs/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config RTL8723BS
tristate "Realtek RTL8723BS SDIO Wireless LAN NIC driver"
depends on WLAN && MMC && CFG80211
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index cbbfef389874..bc0230672457 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -782,12 +782,8 @@ void start_bss_network(struct adapter *padapter, u8 *pbuf)
/* check if there is wps ie, */
/* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
/* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
- if (NULL == rtw_get_wps_ie(
- pnetwork->IEs+_FIXED_IE_LENGTH_,
- pnetwork->IELength-_FIXED_IE_LENGTH_,
- NULL,
- NULL
- ))
+ if (!rtw_get_wps_ie(pnetwork->IEs+_FIXED_IE_LENGTH_,
+ pnetwork->IELength-_FIXED_IE_LENGTH_, NULL, NULL))
pmlmeext->bstart_bss = true;
/* todo: update wmm, ht cap */
@@ -2340,8 +2336,8 @@ void rtw_ap_restore_network(struct adapter *padapter)
Update_RA_Entry(padapter, psta);
/* pairwise key */
/* per sta pairwise key and settings */
- if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
- (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+ if ((psecuritypriv->dot11PrivacyAlgrthm == _TKIP_) ||
+ (psecuritypriv->dot11PrivacyAlgrthm == _AES_)) {
rtw_setstakey_cmd(padapter, psta, true, false);
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 91520ca3bbad..ecaa769f12e6 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -162,9 +162,9 @@ Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
No irqsave is necessary.
*/
-sint _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- sint res = _SUCCESS;
+ int res = 0;
init_completion(&pcmdpriv->cmd_queue_comp);
init_completion(&pcmdpriv->terminate_cmdthread_comp);
@@ -177,8 +177,8 @@ sint _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
- if (pcmdpriv->cmd_allocated_buf == NULL) {
- res = _FAIL;
+ if (!pcmdpriv->cmd_allocated_buf) {
+ res = -ENOMEM;
goto exit;
}
@@ -186,8 +186,8 @@ sint _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
- if (pcmdpriv->rsp_allocated_buf == NULL) {
- res = _FAIL;
+ if (!pcmdpriv->rsp_allocated_buf) {
+ res = -ENOMEM;
goto exit;
}
@@ -201,10 +201,8 @@ exit:
}
static void c2h_wk_callback(_workitem *work);
-sint _rtw_init_evt_priv(struct evt_priv *pevtpriv)
+int rtw_init_evt_priv(struct evt_priv *pevtpriv)
{
- sint res = _SUCCESS;
-
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
atomic_set(&pevtpriv->event_seq, 0);
pevtpriv->evt_done_cnt = 0;
@@ -212,8 +210,10 @@ sint _rtw_init_evt_priv(struct evt_priv *pevtpriv)
_init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
pevtpriv->c2h_wk_alive = false;
pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN+1);
+ if (!pevtpriv->c2h_queue)
+ return -ENOMEM;
- return res;
+ return 0;
}
void _rtw_free_evt_priv(struct evt_priv *pevtpriv)
@@ -256,7 +256,7 @@ ISR/Call-Back functions can't call this sub-function.
*/
-sint _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
+int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
{
_irqL irqL;
@@ -295,22 +295,6 @@ struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
return obj;
}
-u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
-{
- u32 res;
-
- res = _rtw_init_cmd_priv(pcmdpriv);
- return res;
-}
-
-u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
-{
- int res;
-
- res = _rtw_init_evt_priv(pevtpriv);
- return res;
-}
-
void rtw_free_evt_priv(struct evt_priv *pevtpriv)
{
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("rtw_free_evt_priv\n"));
@@ -347,7 +331,7 @@ int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
-u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+int rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
int res = _FAIL;
struct adapter *padapter = pcmdpriv->padapter;
@@ -735,12 +719,12 @@ exit:
return res;
}
-u8 rtw_startbss_cmd(struct adapter *padapter, int flags)
+int rtw_startbss_cmd(struct adapter *padapter, int flags)
{
struct cmd_obj *pcmd;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct submit_ctx sctx;
- u8 res = _SUCCESS;
+ int res = _SUCCESS;
if (flags & RTW_CMDF_DIRECTLY) {
/* no need to enqueue, do the cmd hdl directly and free cmd parameter */
@@ -2007,11 +1991,6 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
case CHECK_HIQ_WK_CID:
rtw_chk_hi_queue_hdl(padapter);
break;
-#ifdef CONFIG_INTEL_WIDI
- case INTEl_WIDI_WK_CID:
- intel_widi_wk_hdl(padapter, pdrvextra_cmd->type, pdrvextra_cmd->pbuf);
- break;
-#endif /* CONFIG_INTEL_WIDI */
/* add for CONFIG_IEEE80211W, none 11w can use it */
case RESET_SECURITYPRIV:
reset_securitypriv_hdl(padapter);
@@ -2121,7 +2100,7 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
rtw_indicate_connect(padapter);
} else {
- pwlan = _rtw_alloc_network(pmlmepriv);
+ pwlan = rtw_alloc_network(pmlmepriv);
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
if (pwlan == NULL) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c
index a3c8a712bdc2..9f8446ccf771 100644
--- a/drivers/staging/rtl8723bs/core/rtw_debug.c
+++ b/drivers/staging/rtl8723bs/core/rtw_debug.c
@@ -1395,16 +1395,16 @@ ssize_t proc_set_btcoex_dbg(struct file *file, const char __user *buffer, size_t
}
num = sscanf(tmp, "%x %x", module, module+1);
- if (1 == num) {
- if (0 == module[0])
+ if (num == 1) {
+ if (module[0] == 0)
memset(module, 0, sizeof(module));
else
memset(module, 0xFF, sizeof(module));
- } else if (2 != num) {
+ } else if (num != 2) {
DBG_871X(FUNC_ADPT_FMT ": input(\"%s\") format incorrect!\n",
FUNC_ADPT_ARG(padapter), tmp);
- if (0 == num)
+ if (num == 0)
return -EFAULT;
}
@@ -1425,9 +1425,8 @@ int proc_get_btcoex_info(struct seq_file *m, void *v)
padapter = (struct adapter *)rtw_netdev_priv(dev);
pbuf = rtw_zmalloc(bufsize);
- if (NULL == pbuf) {
+ if (!pbuf)
return -ENOMEM;
- }
rtw_btcoex_DisplayBtCoexInfo(padapter, pbuf, bufsize);
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index eea3bece59a1..3b8848182221 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -125,11 +125,8 @@ Efuse_GetCurrentSize(
u8 efuseType,
bool bPseudoTest)
{
- u16 ret = 0;
-
- ret = padapter->HalFunc.EfuseGetCurrentSize(padapter, efuseType, bPseudoTest);
-
- return ret;
+ return padapter->HalFunc.EfuseGetCurrentSize(padapter, efuseType,
+ bPseudoTest);
}
/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
@@ -221,7 +218,6 @@ EFUSE_Read1Byte(
struct adapter *Adapter,
u16 Address)
{
- u8 data;
u8 Bytetemp = {0x00};
u8 temp = {0x00};
u32 k = 0;
@@ -253,8 +249,7 @@ u16 Address)
break;
}
}
- data = rtw_read8(Adapter, EFUSE_CTRL);
- return data;
+ return rtw_read8(Adapter, EFUSE_CTRL);
} else
return 0xFF;
@@ -378,11 +373,8 @@ Efuse_PgPacketRead(struct adapter *padapter,
u8 *data,
bool bPseudoTest)
{
- int ret = 0;
-
- ret = padapter->HalFunc.Efuse_PgPacketRead(padapter, offset, data, bPseudoTest);
-
- return ret;
+ return padapter->HalFunc.Efuse_PgPacketRead(padapter, offset, data,
+ bPseudoTest);
}
int
@@ -392,11 +384,8 @@ Efuse_PgPacketWrite(struct adapter *padapter,
u8 *data,
bool bPseudoTest)
{
- int ret;
-
- ret = padapter->HalFunc.Efuse_PgPacketWrite(padapter, offset, word_en, data, bPseudoTest);
-
- return ret;
+ return padapter->HalFunc.Efuse_PgPacketWrite(padapter, offset, word_en,
+ data, bPseudoTest);
}
/*-----------------------------------------------------------------------------
@@ -447,11 +436,9 @@ Efuse_WordEnableDataWrite(struct adapter *padapter,
u8 *data,
bool bPseudoTest)
{
- u8 ret = 0;
-
- ret = padapter->HalFunc.Efuse_WordEnableDataWrite(padapter, efuse_addr, word_en, data, bPseudoTest);
-
- return ret;
+ return padapter->HalFunc.Efuse_WordEnableDataWrite(padapter, efuse_addr,
+ word_en, data,
+ bPseudoTest);
}
/*-----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index ac203c01c098..aaf27438cd81 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -114,7 +114,7 @@ u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *sourc
{
memcpy((void *)pbuf, (void *)source, len);
*frlen = *frlen + len;
- return (pbuf + len);
+ return pbuf + len;
}
/* rtw_set_ie will update frame length */
@@ -136,7 +136,7 @@ u8 *rtw_set_ie
*frlen = *frlen + (len + 2);
- return (pbuf + len + 2);
+ return pbuf + len + 2;
}
/*----------------------------------------------------------------------------
@@ -706,7 +706,7 @@ int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie,
}
}
- return (*rsn_len + *wpa_len);
+ return *rsn_len + *wpa_len;
}
u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
index d341069097e2..a92bc19b196a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_io.c
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -156,7 +156,7 @@ int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapt
struct io_priv *piopriv = &padapter->iopriv;
struct intf_hdl *pintf = &piopriv->intf;
- if (set_intf_ops == NULL)
+ if (!set_intf_ops)
return _FAIL;
piopriv->padapter = padapter;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
index 0c8a050b2a81..bd75bca1ac6e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -44,7 +44,7 @@ u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid)
for (i = 0; i < ssid->SsidLength; i++) {
/* wifi, printable ascii code must be supported */
if (!((ssid->Ssid[i] >= 0x20) && (ssid->Ssid[i] <= 0x7e))) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid has nonprintabl ascii\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid has non-printable ascii\n"));
ret = false;
break;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 406e313477aa..5f78f1eaa7aa 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -13,13 +13,13 @@
extern u8 rtw_do_join(struct adapter *padapter);
-sint _rtw_init_mlme_priv(struct adapter *padapter)
+int rtw_init_mlme_priv(struct adapter *padapter)
{
- sint i;
+ int i;
u8 *pbuf;
struct wlan_network *pnetwork;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- sint res = _SUCCESS;
+ int res = _SUCCESS;
pmlmepriv->nic_hdl = (u8 *)padapter;
@@ -29,9 +29,9 @@ sint _rtw_init_mlme_priv(struct adapter *padapter)
pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
- spin_lock_init(&(pmlmepriv->lock));
- _rtw_init_queue(&(pmlmepriv->free_bss_pool));
- _rtw_init_queue(&(pmlmepriv->scanned_queue));
+ spin_lock_init(&pmlmepriv->lock);
+ _rtw_init_queue(&pmlmepriv->free_bss_pool);
+ _rtw_init_queue(&pmlmepriv->scanned_queue);
set_scanned_network_val(pmlmepriv, 0);
@@ -48,9 +48,9 @@ sint _rtw_init_mlme_priv(struct adapter *padapter)
pnetwork = (struct wlan_network *)pbuf;
for (i = 0; i < MAX_BSS_CNT; i++) {
- INIT_LIST_HEAD(&(pnetwork->list));
+ INIT_LIST_HEAD(&pnetwork->list);
- list_add_tail(&(pnetwork->list), &(pmlmepriv->free_bss_pool.queue));
+ list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
pnetwork++;
}
@@ -143,7 +143,7 @@ struct wlan_network *_rtw_dequeue_network(struct __queue *queue)
}
*/
-struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
{
struct wlan_network *pnetwork;
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
@@ -161,7 +161,8 @@ struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *f
list_del_init(&pnetwork->list);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr =%p\n", plist));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_alloc_network: ptr =%p\n", plist));
pnetwork->network_type = 0;
pnetwork->fixed = false;
pnetwork->last_scanned = jiffies;
@@ -332,7 +333,7 @@ void rtw_generate_random_ibss(u8 *pibss)
u8 *rtw_get_capability_from_ie(u8 *ie)
{
- return (ie + 8 + 2);
+ return ie + 8 + 2;
}
@@ -347,16 +348,7 @@ u16 rtw_get_capability(struct wlan_bssid_ex *bss)
u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
{
- return (ie + 8);
-}
-
-
-int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
-{
- int res;
-
- res = _rtw_init_mlme_priv(padapter);/* (pmlmepriv); */
- return res;
+ return ie + 8;
}
void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
@@ -375,15 +367,6 @@ static struct wlan_network *rtw_dequeue_network(struct __queue *queue)
}
*/
-struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv);
-struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
-{
- struct wlan_network *pnetwork;
-
- pnetwork = _rtw_alloc_network(pmlmepriv);
- return pnetwork;
-}
-
void rtw_free_network_nolock(struct adapter *padapter, struct wlan_network *pnetwork);
void rtw_free_network_nolock(struct adapter *padapter, struct wlan_network *pnetwork)
{
@@ -451,14 +434,14 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst, u8 fea
s_cap = le16_to_cpu(tmps);
d_cap = le16_to_cpu(tmpd);
- return ((src->Ssid.SsidLength == dst->Ssid.SsidLength) &&
+ return (src->Ssid.SsidLength == dst->Ssid.SsidLength) &&
/* (src->Configuration.DSConfig == dst->Configuration.DSConfig) && */
((!memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN))) &&
((!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength))) &&
((s_cap & WLAN_CAPABILITY_IBSS) ==
(d_cap & WLAN_CAPABILITY_IBSS)) &&
((s_cap & WLAN_CAPABILITY_BSS) ==
- (d_cap & WLAN_CAPABILITY_BSS)));
+ (d_cap & WLAN_CAPABILITY_BSS));
}
@@ -943,13 +926,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
|| _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)
) {
rtw_set_to_roam(adapter, 0);
-#ifdef CONFIG_INTEL_WIDI
- if (adapter->mlmepriv.widi_state == INTEL_WIDI_STATE_ROAMING) {
- memset(pmlmepriv->sa_ext, 0x00, L2SDTA_SERVICE_VE_LEN);
- intel_widi_wk_cmd(adapter, INTEL_WIDI_LISTEN_WK, NULL, 0);
- DBG_871X("change to widi listen\n");
- }
-#endif /* CONFIG_INTEL_WIDI */
rtw_free_assoc_resources(adapter, 1);
rtw_indicate_disconnect(adapter);
} else {
@@ -1108,14 +1084,6 @@ void rtw_indicate_connect(struct adapter *padapter)
}
rtw_set_to_roam(padapter, 0);
-#ifdef CONFIG_INTEL_WIDI
- if (padapter->mlmepriv.widi_state == INTEL_WIDI_STATE_ROAMING) {
- memset(pmlmepriv->sa_ext, 0x00, L2SDTA_SERVICE_VE_LEN);
- intel_widi_wk_cmd(padapter, INTEL_WIDI_LISTEN_WK, NULL, 0);
- DBG_871X("change to widi listen\n");
- }
-#endif /* CONFIG_INTEL_WIDI */
-
rtw_set_scan_deny(padapter, 3000);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("-rtw_indicate_connect: fw_state = 0x%08x\n", get_fwstate(pmlmepriv)));
@@ -1703,11 +1671,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
roam = true;
roam_target = pmlmepriv->roam_network;
}
-#ifdef CONFIG_INTEL_WIDI
- else if (adapter->mlmepriv.widi_state == INTEL_WIDI_STATE_CONNECTED) {
- roam = true;
- }
-#endif /* CONFIG_INTEL_WIDI */
if (roam == true) {
if (rtw_to_roam(adapter) > 0)
@@ -1732,11 +1695,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
}
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
-#ifdef CONFIG_INTEL_WIDI
- if (!rtw_to_roam(adapter))
- process_intel_widi_disconnect(adapter, 1);
-#endif /* CONFIG_INTEL_WIDI */
-
_rtw_roaming(adapter, roam_target);
}
@@ -1833,13 +1791,6 @@ void _rtw_join_timeout_handler(struct timer_list *t)
}
break;
} else {
-#ifdef CONFIG_INTEL_WIDI
- if (adapter->mlmepriv.widi_state == INTEL_WIDI_STATE_ROAMING) {
- memset(pmlmepriv->sa_ext, 0x00, L2SDTA_SERVICE_VE_LEN);
- intel_widi_wk_cmd(adapter, INTEL_WIDI_LISTEN_WK, NULL, 0);
- DBG_871X("change to widi listen\n");
- }
-#endif /* CONFIG_INTEL_WIDI */
DBG_871X("%s We've try roaming but fail\n", __func__);
rtw_indicate_disconnect(adapter);
break;
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index ebb45acb4446..d110d4514771 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -697,10 +697,9 @@ unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
psta->aid = 0;
DBG_871X("no room for more AIDs\n");
return _SUCCESS;
- } else {
- pstapriv->sta_aid[psta->aid - 1] = psta;
- DBG_871X("allocate new AID = (%d)\n", psta->aid);
}
+ pstapriv->sta_aid[psta->aid - 1] = psta;
+ DBG_871X("allocate new AID = (%d)\n", psta->aid);
}
psta->qos_option = 1;
@@ -816,7 +815,7 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
update_network(&(pmlmepriv->cur_network.network), pbss, padapter, true);
rtw_get_bcn_info(&(pmlmepriv->cur_network));
}
- kfree((u8 *)pbss);
+ kfree(pbss);
}
/* check the vendor of the assoc AP */
@@ -1982,7 +1981,7 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
if (status == 0) {
/* successful */
DBG_871X("agg_enable for TID =%d\n", tid);
- psta->htpriv.agg_enable_bitmap |= 1 << tid;
+ psta->htpriv.agg_enable_bitmap |= BIT(tid);
psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
} else {
psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
@@ -2000,8 +1999,10 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
case RTW_WLAN_ACTION_DELBA: /* DELBA */
if ((frame_body[3] & BIT(3)) == 0) {
- psta->htpriv.agg_enable_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
- psta->htpriv.candidate_tid_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
+ psta->htpriv.agg_enable_bitmap &=
+ ~BIT((frame_body[3] >> 4) & 0xf);
+ psta->htpriv.candidate_tid_bitmap &=
+ ~BIT((frame_body[3] >> 4) & 0xf);
/* reason_code = frame_body[4] | (frame_body[5] << 8); */
reason_code = RTW_GET_LE16(&frame_body[4]);
@@ -3512,7 +3513,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
/* da == NULL, assum it's null data for sta to ap*/
- if (da == NULL)
+ if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
psta = rtw_get_stainfo(&padapter->stapriv, da);
@@ -3569,7 +3570,6 @@ exit:
*/
s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da)
{
- int ret;
struct mlme_ext_priv *pmlmeext;
struct mlme_ext_info *pmlmeinfo;
@@ -3578,12 +3578,10 @@ s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da)
pmlmeinfo = &pmlmeext->mlmext_info;
/* da == NULL, assum it's null data for sta to ap*/
- if (da == NULL)
+ if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
- ret = _issue_nulldata(padapter, da, 0, false);
-
- return ret;
+ return _issue_nulldata(padapter, da, 0, false);
}
/* when wait_ack is ture, this function shoule be called at process context */
@@ -3675,7 +3673,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* da == NULL, assum it's null data for sta to ap*/
- if (da == NULL)
+ if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
do {
@@ -3958,7 +3956,7 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
/* A-MSDU NOT Supported */
BA_para_set = 0;
/* immediate Block Ack */
- BA_para_set |= (1 << 1) & IEEE80211_ADDBA_PARAM_POLICY_MASK;
+ BA_para_set |= BIT(1) & IEEE80211_ADDBA_PARAM_POLICY_MASK;
/* TID */
BA_para_set |= (status << 2) & IEEE80211_ADDBA_PARAM_TID_MASK;
/* max buffer size is 8 MSDU */
@@ -4579,12 +4577,6 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str
pmlmepriv->num_sta_no_ht++;
}
-#ifdef CONFIG_INTEL_WIDI
- /* process_intel_widi_query_or_tigger(padapter, bssid); */
- if (process_intel_widi_query_or_tigger(padapter, bssid))
- return _FAIL;
-#endif /* CONFIG_INTEL_WIDI */
-
#if defined(DBG_RX_SIGNAL_DISPLAY_SSID_MONITORED) & 1
if (strcmp(bssid->Ssid.Ssid, DBG_RX_SIGNAL_DISPLAY_SSID_MONITORED) == 0) {
DBG_871X("Receiving %s("MAC_FMT", DSConfig:%u) from ch%u with ss:%3u, sq:%3u, RawRSSI:%3ld\n"
@@ -5052,7 +5044,7 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
- kfree((u8 *)pcmd_obj);
+ kfree(pcmd_obj);
return;
}
@@ -5073,8 +5065,8 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
if (collect_bss_info(padapter, precv_frame, (struct wlan_bssid_ex *)&psurvey_evt->bss) == _FAIL) {
- kfree((u8 *)pcmd_obj);
- kfree((u8 *)pevtcmd);
+ kfree(pcmd_obj);
+ kfree(pevtcmd);
return;
}
@@ -5105,7 +5097,7 @@ void report_surveydone_event(struct adapter *padapter)
cmdsz = (sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
- kfree((u8 *)pcmd_obj);
+ kfree(pcmd_obj);
return;
}
@@ -5152,7 +5144,7 @@ void report_join_res(struct adapter *padapter, int res)
cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
- kfree((u8 *)pcmd_obj);
+ kfree(pcmd_obj);
return;
}
@@ -5203,7 +5195,7 @@ void report_wmm_edca_update(struct adapter *padapter)
cmdsz = (sizeof(struct wmm_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
- kfree((u8 *)pcmd_obj);
+ kfree(pcmd_obj);
return;
}
@@ -5250,7 +5242,7 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
- kfree((u8 *)pcmd_obj);
+ kfree(pcmd_obj);
return;
}
@@ -5305,7 +5297,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
- kfree((u8 *)pcmd_obj);
+ kfree(pcmd_obj);
return;
}
@@ -5713,11 +5705,6 @@ void linked_status_chk(struct adapter *padapter)
/* Marked by Kurt 20130715 */
/* For WiDi 3.5 and latered on, they don't ask WiDi sink to do roaming, so we could not check rx limit that strictly. */
/* todo: To check why we under miracast session, rx_chk would be false */
- /* ifdef CONFIG_INTEL_WIDI */
- /* if (padapter->mlmepriv.widi_state != INTEL_WIDI_STATE_NONE) */
- /* rx_chk_limit = 1; */
- /* endif */
-
psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.MacAddress);
if (psta != NULL) {
if (chk_ap_is_alive(padapter, psta) == false)
@@ -5836,7 +5823,7 @@ void survey_timer_hdl(struct timer_list *t)
psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
if (psurveyPara == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
goto exit_survey_timer_hdl;
}
@@ -6603,7 +6590,7 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
ptxBeacon_parm = rtw_zmalloc(sizeof(struct Tx_Beacon_param));
if (ptxBeacon_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index b9d36db762a9..bdc52d8d5625 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -75,7 +75,6 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
psta = (struct sta_info *)(pstapriv->pstainfo_buf);
-
for (i = 0; i < NUM_STA; i++) {
_rtw_init_stainfo(psta);
@@ -204,8 +203,7 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
if (list_empty(&pfree_sta_queue->queue)) {
/* spin_unlock_bh(&(pfree_sta_queue->lock)); */
spin_unlock_bh(&(pstapriv->sta_hash_lock));
- psta = NULL;
- return psta;
+ return NULL;
} else {
psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list);
@@ -318,7 +316,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
struct sta_priv *pstapriv = &padapter->stapriv;
struct hw_xmit *phwxmit;
- if (psta == NULL)
+ if (!psta)
goto exit;
@@ -394,7 +392,6 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
);
pstapriv->asoc_sta_count--;
-
/* re-init sta_info; 20061114 will be init in alloc_stainfo */
/* _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv); */
/* _rtw_init_sta_recv_priv(&psta->sta_recvpriv); */
@@ -437,7 +434,6 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
if (!(psta->state & WIFI_AP_STATE))
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, false);
-
/* release mac id for non-bc/mc station, */
rtw_release_macid(pstapriv->padapter, psta);
@@ -524,7 +520,7 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
u8 *addr;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- if (hwaddr == NULL)
+ if (!hwaddr)
return NULL;
if (IS_MCAST(hwaddr))
@@ -569,7 +565,7 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
- if (psta == NULL) {
+ if (!psta) {
res = _FAIL;
RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("rtw_alloc_stainfo fail"));
goto exit;
@@ -583,15 +579,12 @@ exit:
return _SUCCESS;
}
-
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
{
- struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- psta = rtw_get_stainfo(pstapriv, bc_addr);
- return psta;
+ return rtw_get_stainfo(pstapriv, bc_addr);
}
u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
@@ -620,7 +613,6 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
}
spin_unlock_bh(&(pacl_node_q->lock));
-
if (pacl_list->mode == 1) /* accept unless in deny list */
res = !match;
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 2c172966ea60..fdbf967812f9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -518,7 +518,7 @@ unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval)
else if ((bcn_interval << 2) > WAIT_FOR_BCN_TO_MAX)
return WAIT_FOR_BCN_TO_MAX;
else
- return ((bcn_interval << 2));
+ return bcn_interval << 2;
}
void invalidate_cam_all(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index b87f13a0b563..2bb679e54dc7 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -2861,8 +2861,6 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
/* spin_unlock_bh(&psta->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
-
- return;
}
void enqueue_pending_xmitbuf(
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 2b43f6d3c762..6caddd7834a1 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -75,7 +75,7 @@ static BTCDBGINFO GLBtcDbgInfo;
static void DBG_BT_INFO_INIT(PBTCDBGINFO pinfo, u8 *pbuf, u32 size)
{
- if (NULL == pinfo)
+ if (!pinfo)
return;
memset(pinfo, 0, sizeof(BTCDBGINFO));
@@ -95,7 +95,7 @@ void DBG_BT_INFO(u8 *dbgmsg)
pinfo = &GLBtcDbgInfo;
- if (NULL == pinfo->info)
+ if (!pinfo->info)
return;
msglen = strlen(dbgmsg);
@@ -112,8 +112,7 @@ void DBG_BT_INFO(u8 *dbgmsg)
/* */
static u8 halbtcoutsrc_IsBtCoexistAvailable(PBTC_COEXIST pBtCoexist)
{
- if (!pBtCoexist->bBinded ||
- NULL == pBtCoexist->Adapter){
+ if (!pBtCoexist->bBinded || !pBtCoexist->Adapter){
return false;
}
return true;
@@ -1571,7 +1570,7 @@ void hal_btcoex_SetDBG(struct adapter *padapter, u32 *pDbgModule)
u32 i;
- if (NULL == pDbgModule)
+ if (!pDbgModule)
return;
for (i = 0; i < BTC_MSG_MAX; i++)
@@ -1585,7 +1584,7 @@ u32 hal_btcoex_GetDBG(struct adapter *padapter, u8 *pStrBuf, u32 bufSize)
u32 leftSize;
- if ((NULL == pStrBuf) || (0 == bufSize))
+ if (!pStrBuf || bufSize == 0)
return 0;
pstr = pStrBuf;
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 7f8ec55b08f1..e5f1153527b9 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -18,7 +18,7 @@ u8 rtw_hal_data_init(struct adapter *padapter)
if (is_primary_adapter(padapter)) { /* if (padapter->isprimary) */
padapter->hal_data_sz = sizeof(struct hal_com_data);
padapter->HalData = vzalloc(padapter->hal_data_sz);
- if (padapter->HalData == NULL) {
+ if (!padapter->HalData) {
DBG_8192C("cannot alloc memory for HAL DATA\n");
return _FAIL;
}
@@ -909,7 +909,7 @@ s32 c2h_evt_read_88xx(struct adapter *adapter, u8 *buf)
int i;
u8 trigger;
- if (buf == NULL)
+ if (!buf)
goto exit;
trigger = rtw_read8(adapter, REG_C2HEVT_CLEAR);
@@ -982,7 +982,7 @@ void rtw_hal_update_sta_rate_mask(struct adapter *padapter, struct sta_info *pst
u8 i, rf_type, limit;
u32 tx_ra_bitmap;
- if (psta == NULL)
+ if (!psta)
return;
tx_ra_bitmap = 0;
@@ -1389,11 +1389,11 @@ bool IsHexDigit(char chTmp)
u32 MapCharToHexDigit(char chTmp)
{
if (chTmp >= '0' && chTmp <= '9')
- return (chTmp - '0');
+ return chTmp - '0';
else if (chTmp >= 'a' && chTmp <= 'f')
- return (10 + (chTmp - 'a'));
+ return 10 + (chTmp - 'a');
else if (chTmp >= 'A' && chTmp <= 'F')
- return (10 + (chTmp - 'A'));
+ return 10 + (chTmp - 'A');
else
return 0;
}
@@ -1407,7 +1407,7 @@ bool GetHexValueFromString(char *szStr, u32 *pu4bVal, u32 *pu4bMove)
char *szScan = szStr;
/* Check input parameter. */
- if (szStr == NULL || pu4bVal == NULL || pu4bMove == NULL) {
+ if (!szStr || !pu4bVal || !pu4bMove) {
DBG_871X("GetHexValueFromString(): Invalid input arguments! szStr: %p, pu4bVal: %p, pu4bMove: %p\n",
szStr, pu4bVal, pu4bMove);
return false;
@@ -1618,14 +1618,14 @@ void rtw_get_raw_rssi_info(void *sel, struct adapter *padapter)
isCCKrate = psample_pkt_rssi->data_rate <= DESC_RATE11M;
if (isCCKrate)
- psample_pkt_rssi->mimo_singal_strength[0] = psample_pkt_rssi->pwdball;
+ psample_pkt_rssi->mimo_signal_strength[0] = psample_pkt_rssi->pwdball;
for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
DBG_871X_SEL_NL(
sel,
- "RF_PATH_%d =>singal_strength:%d(%%), singal_quality:%d(%%)\n",
- rf_path, psample_pkt_rssi->mimo_singal_strength[rf_path],
- psample_pkt_rssi->mimo_singal_quality[rf_path]
+ "RF_PATH_%d =>signal_strength:%d(%%), signal_quality:%d(%%)\n",
+ rf_path, psample_pkt_rssi->mimo_signal_strength[rf_path],
+ psample_pkt_rssi->mimo_signal_quality[rf_path]
);
if (!isCCKrate) {
@@ -1651,11 +1651,11 @@ void rtw_dump_raw_rssi_info(struct adapter *padapter)
isCCKrate = psample_pkt_rssi->data_rate <= DESC_RATE11M;
if (isCCKrate)
- psample_pkt_rssi->mimo_singal_strength[0] = psample_pkt_rssi->pwdball;
+ psample_pkt_rssi->mimo_signal_strength[0] = psample_pkt_rssi->pwdball;
for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
- DBG_871X("RF_PATH_%d =>singal_strength:%d(%%), singal_quality:%d(%%)"
- , rf_path, psample_pkt_rssi->mimo_singal_strength[rf_path], psample_pkt_rssi->mimo_singal_quality[rf_path]);
+ DBG_871X("RF_PATH_%d =>signal_strength:%d(%%), signal_quality:%d(%%)"
+ , rf_path, psample_pkt_rssi->mimo_signal_strength[rf_path], psample_pkt_rssi->mimo_signal_quality[rf_path]);
if (!isCCKrate) {
printk(", rx_ofdm_pwr:%d(dBm), rx_ofdm_snr:%d(dB)\n",
@@ -1682,8 +1682,8 @@ void rtw_store_phy_info(struct adapter *padapter, union recv_frame *prframe)
psample_pkt_rssi->pwr_all = pPhyInfo->recv_signal_power;
for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
- psample_pkt_rssi->mimo_singal_strength[rf_path] = pPhyInfo->rx_mimo_signal_strength[rf_path];
- psample_pkt_rssi->mimo_singal_quality[rf_path] = pPhyInfo->rx_mimo_signal_quality[rf_path];
+ psample_pkt_rssi->mimo_signal_strength[rf_path] = pPhyInfo->rx_mimo_signal_strength[rf_path];
+ psample_pkt_rssi->mimo_signal_quality[rf_path] = pPhyInfo->rx_mimo_signal_quality[rf_path];
if (!isCCKrate) {
psample_pkt_rssi->ofdm_pwr[rf_path] = pPhyInfo->RxPwr[rf_path];
psample_pkt_rssi->ofdm_snr[rf_path] = pPhyInfo->RxSNR[rf_path];
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 12c1cd590056..336764464e7d 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -1723,7 +1723,7 @@ s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 reg_pwr_tbl_sel,
idx_rate_sctn = get_rate_sctn_idx(data_rate);
if (band_type == BAND_ON_5G && idx_rate_sctn == 0)
- DBG_871X("Wrong rate 0x%x: No CCK in 5G Band\n", DataRate);
+ DBG_871X("Wrong rate 0x%x: No CCK in 5G Band\n", DataRate);
/* workaround for wrong index combination to obtain tx power limit, */
/* OFDM only exists in BW 20M */
@@ -1749,6 +1749,7 @@ s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 reg_pwr_tbl_sel,
if (band_type == BAND_ON_2_4G) {
s8 limits[10] = {0}; u8 i = 0;
+
for (i = 0; i < MAX_REGULATION_NUM; i++)
limits[i] = hal_data->TxPwrLimit_2_4G[i]
[idx_bandwidth]
@@ -1766,6 +1767,7 @@ s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 reg_pwr_tbl_sel,
} else if (band_type == BAND_ON_5G) {
s8 limits[10] = {0}; u8 i = 0;
+
for (i = 0; i < MAX_REGULATION_NUM; ++i)
limits[i] = hal_data->TxPwrLimit_5G[i]
[idx_bandwidth]
@@ -2236,7 +2238,7 @@ int phy_ConfigMACWithParaFile(struct adapter *Adapter, char *pFileName)
memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
- if ((pHalData->mac_reg_len == 0) && (pHalData->mac_reg == NULL)) {
+ if ((pHalData->mac_reg_len == 0) && !pHalData->mac_reg) {
rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
if (rtw_is_file_readable(file_path_bs) == true) {
@@ -2311,7 +2313,7 @@ int phy_ConfigBBWithParaFile(
memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
- if ((pBufLen != NULL) && (*pBufLen == 0) && (pBuf == NULL)) {
+ if (pBufLen && (*pBufLen == 0) && !pBuf) {
rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
if (rtw_is_file_readable(file_path_bs) == true) {
@@ -2336,7 +2338,7 @@ int phy_ConfigBBWithParaFile(
}
}
} else {
- if ((pBufLen != NULL) && (*pBufLen == 0) && (pBuf == NULL)) {
+ if (pBufLen && (*pBufLen == 0) && !pBuf) {
memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
rtStatus = _SUCCESS;
} else
@@ -2680,7 +2682,7 @@ int phy_ConfigBBWithPgParaFile(struct adapter *Adapter, char *pFileName)
memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
- if ((pHalData->bb_phy_reg_pg_len == 0) && (pHalData->bb_phy_reg_pg == NULL)) {
+ if ((pHalData->bb_phy_reg_pg_len == 0) && !pHalData->bb_phy_reg_pg) {
rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
if (rtw_is_file_readable(file_path_bs) == true) {
@@ -2743,7 +2745,7 @@ int PHY_ConfigRFWithParaFile(
memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
- if ((pBufLen != NULL) && (*pBufLen == 0) && (pBuf == NULL)) {
+ if (pBufLen && (*pBufLen == 0) && !pBuf) {
rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
if (rtw_is_file_readable(file_path_bs) == true) {
@@ -2768,7 +2770,7 @@ int PHY_ConfigRFWithParaFile(
}
}
} else {
- if ((pBufLen != NULL) && (*pBufLen == 0) && (pBuf == NULL)) {
+ if (pBufLen && (*pBufLen == 0) && !pBuf) {
memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
rtStatus = _SUCCESS;
} else
@@ -2925,7 +2927,7 @@ int PHY_ConfigRFWithTxPwrTrackParaFile(struct adapter *Adapter, char *pFileName)
memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
- if ((pHalData->rf_tx_pwr_track_len == 0) && (pHalData->rf_tx_pwr_track == NULL)) {
+ if ((pHalData->rf_tx_pwr_track_len == 0) && !pHalData->rf_tx_pwr_track) {
rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
if (rtw_is_file_readable(file_path_bs) == true) {
@@ -3238,7 +3240,7 @@ int PHY_ConfigRFWithPowerLimitTableParaFile(
memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
- if ((pHalData->rf_tx_pwr_lmt_len == 0) && (pHalData->rf_tx_pwr_lmt == NULL)) {
+ if ((pHalData->rf_tx_pwr_lmt_len == 0) && !pHalData->rf_tx_pwr_lmt) {
rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
if (rtw_is_file_readable(file_path_bs) == true) {
diff --git a/drivers/staging/rtl8723bs/hal/odm.c b/drivers/staging/rtl8723bs/hal/odm.c
index 7de5161e2ac4..e3f4307f3d20 100644
--- a/drivers/staging/rtl8723bs/hal/odm.c
+++ b/drivers/staging/rtl8723bs/hal/odm.c
@@ -691,7 +691,7 @@ void ODM_CmnInfoHook(PDM_ODM_T pDM_Odm, ODM_CMNINFO_E CmnInfo, void *pValue)
/* break; */
/* case ODM_CMNINFO_MAC_STATUS: */
- /* pDM_Odm->pMacInfo = (ODM_MAC_INFO *)pValue; */
+ /* pDM_Odm->pMacInfo = (struct odm_mac_status_info *)pValue; */
/* break; */
/* To remove the compiler warning, must add an empty default statement to handle the other values. */
default:
@@ -1076,7 +1076,7 @@ u32 ODM_Get_Rate_Bitmap(
/* printk("%s ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x\n", __func__, rssi_level, WirelessMode, rate_bitmap); */
ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, (" ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x\n", rssi_level, WirelessMode, rate_bitmap));
- return (ra_mask&rate_bitmap);
+ return ra_mask & rate_bitmap;
}
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index 23ab160ac2c8..6ba77bb70889 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -82,15 +82,15 @@
/* Remove DIG by yuchen */
-typedef struct _Dynamic_Primary_CCA {
+struct dynamic_primary_CCA {
u8 PriCCA_flag;
u8 intf_flag;
u8 intf_type;
u8 DupRTS_flag;
u8 Monitor_flag;
u8 CH_offset;
- u8 MF_state;
-} Pri_CCA_T, *pPri_CCA_T;
+ u8 MF_state;
+};
typedef struct _Rate_Adaptive_Table_ {
u8 firstconnect;
@@ -261,7 +261,7 @@ struct odm_packet_info {
bool is_beacon;
};
-typedef struct _ODM_Phy_Dbg_Info_ {
+struct odm_phy_dbg_info {
/* ODM Write, debug info */
s8 RxSNRdB[4];
u32 NumQryPhyStatus;
@@ -271,11 +271,11 @@ typedef struct _ODM_Phy_Dbg_Info_ {
/* Others */
s32 RxEVM[4];
-} ODM_PHY_DBG_INFO_T;
+};
-typedef struct _ODM_Mac_Status_Info_ {
+struct odm_mac_status_info {
u8 test;
-} ODM_MAC_INFO;
+};
typedef enum tag_Dynamic_ODM_Support_Ability_Type {
/* BB Team */
@@ -1092,11 +1092,11 @@ typedef struct DM_Out_Source_Dynamic_Mechanism_Structure {
/* Define ........... */
/* Latest packet phy info (ODM write) */
- ODM_PHY_DBG_INFO_T PhyDbgInfo;
+ struct odm_phy_dbg_info PhyDbgInfo;
/* PHY_INFO_88E PhyInfo; */
/* Latest packet phy info (ODM write) */
- ODM_MAC_INFO *pMacInfo;
+ struct odm_mac_status_info *pMacInfo;
/* MAC_INFO_88E MacInfo; */
/* Different Team independt structure?? */
@@ -1112,7 +1112,7 @@ typedef struct DM_Out_Source_Dynamic_Mechanism_Structure {
FAT_T DM_FatTable;
DIG_T DM_DigTable;
PS_T DM_PSTable;
- Pri_CCA_T DM_PriCCA;
+ struct dynamic_primary_CCA DM_PriCCA;
RXHP_T DM_RXHP_Table;
RA_T DM_RA_Table;
false_ALARM_STATISTICS FalseAlmCnt;
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
index ee2c293e4f59..d802a1fde58f 100644
--- a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
@@ -19,7 +19,7 @@ static u8 odm_QueryRxPwrPercentage(s8 AntPower)
else if (AntPower >= 0)
return 100;
else
- return (100+AntPower);
+ return 100 + AntPower;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index 9f4a10aaa774..fe3891106a6d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -1480,7 +1480,7 @@ static void rtl8723b_set_FwRsvdPagePkt(
MaxRsvdPageBufSize = RsvdPageNum*PageSize;
pcmdframe = rtw_alloc_cmdxmitframe(pxmitpriv);
- if (pcmdframe == NULL) {
+ if (!pcmdframe) {
DBG_871X("%s: alloc ReservedPagePacket fail!\n", __func__);
return;
}
@@ -1620,7 +1620,7 @@ static void rtl8723b_set_FwRsvdPagePkt(
/* if the ap staion info. exists, get the kek, kck from staion info. */
psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
- if (psta == NULL) {
+ if (!psta) {
memset(kek, 0, RTW_KEK_LEN);
memset(kck, 0, RTW_KCK_LEN);
DBG_8192C("%s, KEK, KCK download rsvd page all zero\n", __func__);
@@ -1856,7 +1856,7 @@ static void rtl8723b_set_AP_FwRsvdPagePkt(
MaxRsvdPageBufSize = RsvdPageNum*PageSize;
pcmdframe = rtw_alloc_cmdxmitframe(pxmitpriv);
- if (pcmdframe == NULL) {
+ if (!pcmdframe) {
DBG_871X("%s: alloc ReservedPagePacket fail!\n", __func__);
return;
}
@@ -2069,7 +2069,7 @@ void rtl8723b_Add_RateATid(
u32 mask = bitmap&0x0FFFFFFF;
psta = pmlmeinfo->FW_sta_info[mac_id].psta;
- if (psta == NULL)
+ if (!psta)
return;
bw = psta->bw_mode;
@@ -2107,7 +2107,7 @@ static void ConstructBtNullFunctionData(
pmlmeext = &padapter->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
- if (NULL == StaAddr) {
+ if (!StaAddr) {
memcpy(bssid, myid(&padapter->eeprompriv), ETH_ALEN);
StaAddr = bssid;
}
@@ -2176,7 +2176,7 @@ static void SetFwRsvdPagePkt_BTCoex(struct adapter *padapter)
MaxRsvdPageBufSize = RsvdPageNum*PageSize;
pcmdframe = rtw_alloc_cmdxmitframe(pxmitpriv);
- if (pcmdframe == NULL) {
+ if (!pcmdframe) {
DBG_8192C("%s: alloc ReservedPagePacket fail!\n", __func__);
return;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
index 6b6fc835c601..65781477cac9 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
@@ -254,7 +254,7 @@ void rtl8723b_HalDmWatchDog_in_LPS(struct adapter *Adapter)
/* 1 Find MIN-RSSI */
psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
- if (psta == NULL)
+ if (!psta)
goto skip_lps_dm;
pdmpriv->EntryMinUndecoratedSmoothedPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 85fd12cca4ae..caa8e2f39448 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -883,7 +883,7 @@ static void hal_ReadEFuse_WiFi(
}
efuseTbl = rtw_malloc(EFUSE_MAX_MAP_LEN);
- if (efuseTbl == NULL) {
+ if (!efuseTbl) {
DBG_8192C("%s: alloc efuseTbl fail!\n", __func__);
return;
}
@@ -1463,7 +1463,7 @@ static s32 Hal_EfusePgPacketRead(
s32 ret;
- if (data == NULL)
+ if (!data)
return false;
EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, &max_section, bPseudoTest);
@@ -3664,7 +3664,7 @@ s32 c2h_handler_8723b(struct adapter *padapter, u8 *buf)
s32 ret = _SUCCESS;
u8 index = 0;
- if (pC2hEvent == NULL) {
+ if (!pC2hEvent) {
DBG_8192C("%s(): pC2hEventis NULL\n", __func__);
ret = _FAIL;
goto exit;
@@ -3714,7 +3714,7 @@ static void process_c2h_event(struct adapter *padapter, PC2H_EVT_HDR pC2hEvent,
{
u8 index = 0;
- if (c2hBuf == NULL) {
+ if (!c2hBuf) {
DBG_8192C("%s c2hbuff is NULL\n", __func__);
return;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 78a4828ecb65..4f2ad54af398 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -58,7 +58,7 @@ static u32 phy_CalculateBitShift(u32 BitMask)
*/
u32 PHY_QueryBBReg_8723B(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
{
- u32 ReturnValue = 0, OriginalValue, BitShift;
+ u32 OriginalValue, BitShift;
#if (DISABLE_BB_RF == 1)
return 0;
@@ -68,9 +68,8 @@ u32 PHY_QueryBBReg_8723B(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
OriginalValue = rtw_read32(Adapter, RegAddr);
BitShift = phy_CalculateBitShift(BitMask);
- ReturnValue = (OriginalValue & BitMask) >> BitShift;
- return ReturnValue;
+ return (OriginalValue & BitMask) >> BitShift;
}
@@ -284,18 +283,16 @@ u32 PHY_QueryRFReg_8723B(
u32 BitMask
)
{
- u32 Original_Value, Readback_Value, BitShift;
+ u32 Original_Value, BitShift;
#if (DISABLE_BB_RF == 1)
return 0;
#endif
Original_Value = phy_RFSerialRead_8723B(Adapter, eRFPath, RegAddr);
-
BitShift = phy_CalculateBitShift(BitMask);
- Readback_Value = (Original_Value & BitMask) >> BitShift;
- return Readback_Value;
+ return (Original_Value & BitMask) >> BitShift;
}
/**
@@ -827,7 +824,7 @@ static u8 phy_GetSecondaryChnl_8723B(struct adapter *Adapter)
}
RT_TRACE(_module_hal_init_c_, _drv_info_, ("SCMapping: SC Value %x\n", ((SCSettingOf40 << 4) | SCSettingOf20)));
- return ((SCSettingOf40 << 4) | SCSettingOf20);
+ return (SCSettingOf40 << 4) | SCSettingOf20;
}
static void phy_PostSetBwMode8723B(struct adapter *Adapter)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
index 76c8e6e9e6bc..86512264e280 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
@@ -34,7 +34,7 @@ static void process_link_qual(struct adapter *padapter, union recv_frame *prfram
struct rx_pkt_attrib *pattrib;
struct signal_stat *signal_stat;
- if (prframe == NULL || padapter == NULL)
+ if (!prframe || !padapter)
return;
pattrib = &prframe->u.hdr.attrib;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 26742960ed65..b269de52e535 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -170,7 +170,7 @@ static void rtl8723bs_c2h_packet_handler(struct adapter *padapter,
/* DBG_871X("+%s() length =%d\n", __func__, length); */
tmp = rtw_zmalloc(length);
- if (tmp == NULL)
+ if (!tmp)
return;
memcpy(tmp, pbuf, length);
@@ -424,7 +424,7 @@ s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
n = NR_RECVBUFF * sizeof(struct recv_buf) + 4;
precvpriv->pallocated_recv_buf = rtw_zmalloc(n);
- if (precvpriv->pallocated_recv_buf == NULL) {
+ if (!precvpriv->pallocated_recv_buf) {
res = _FAIL;
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("alloc recv_buf fail!\n"));
goto exit;
@@ -439,7 +439,7 @@ s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
if (res == _FAIL)
break;
- if (precvbuf->pskb == NULL) {
+ if (!precvbuf->pskb) {
SIZE_PTR tmpaddr = 0;
SIZE_PTR alignment = 0;
@@ -453,7 +453,7 @@ s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
}
- if (precvbuf->pskb == NULL) {
+ if (!precvbuf->pskb) {
DBG_871X("%s: alloc_skb fail!\n", __func__);
}
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index 69c4db5b5b0c..7b06aab04ee6 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -58,12 +58,12 @@ static s32 rtl8723_dequeue_writeport(struct adapter *padapter)
ret = ret || check_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
- if (true == ret)
+ if (ret == true)
pxmitbuf = dequeue_pending_xmitbuf_under_survey(pxmitpriv);
else
pxmitbuf = dequeue_pending_xmitbuf(pxmitpriv);
- if (pxmitbuf == NULL)
+ if (!pxmitbuf)
return true;
deviceId = ffaddr2deviceId(pdvobjpriv, pxmitbuf->ff_hwaddr);
@@ -283,8 +283,7 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
/* check xmit_buf size enough or not */
txlen = txdesc_size + rtw_wlan_pkt_size(pxmitframe);
- if (
- (NULL == pxmitbuf) ||
+ if( !pxmitbuf ||
((_RND(pxmitbuf->len, 8) + txlen) > max_xmit_len) ||
(k >= (rtw_hal_sdio_max_txoqt_free_space(padapter)-1))
) {
@@ -307,7 +306,7 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
}
pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
- if (pxmitbuf == NULL) {
+ if (!pxmitbuf) {
#ifdef DBG_XMIT_BUF
DBG_871X_LEVEL(_drv_err_, "%s: xmit_buf is not enough!\n", __func__);
#endif
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index 3fee34484577..a60162046e5a 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -425,12 +425,9 @@ static u32 sdio_read_port(
)
{
struct adapter *adapter;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
struct hal_com_data *hal;
u32 oldcnt;
-#ifdef SDIO_DYNAMIC_ALLOC_MEM
- u8 *oldmem;
-#endif
s32 err;
@@ -447,13 +444,6 @@ static u32 sdio_read_port(
err = _sd_read(intfhdl, addr, cnt, mem);
-#ifdef SDIO_DYNAMIC_ALLOC_MEM
- if ((oldcnt != cnt) && (oldmem)) {
- memcpy(oldmem, mem, oldcnt);
- kfree(mem);
- }
-#endif
-
if (err)
return _FAIL;
return _SUCCESS;
@@ -483,7 +473,7 @@ static u32 sdio_write_port(
)
{
struct adapter *adapter;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
s32 err;
struct xmit_buf *xmitbuf = (struct xmit_buf *)mem;
@@ -560,7 +550,7 @@ static s32 _sdio_local_read(
n = RND4(cnt);
tmpbuf = rtw_malloc(n);
if (!tmpbuf)
- return (-1);
+ return -1;
err = _sd_read(intfhdl, addr, n, tmpbuf);
if (!err)
@@ -601,7 +591,7 @@ s32 sdio_local_read(
n = RND4(cnt);
tmpbuf = rtw_malloc(n);
if (!tmpbuf)
- return (-1);
+ return -1;
err = sd_read(intfhdl, addr, n, tmpbuf);
if (!err)
@@ -646,7 +636,7 @@ s32 sdio_local_write(
tmpbuf = rtw_malloc(cnt);
if (!tmpbuf)
- return (-1);
+ return -1;
memcpy(tmpbuf, buf, cnt);
@@ -1217,7 +1207,7 @@ u8 RecvOnePkt(struct adapter *adapter, u32 size)
{
struct recv_buf *recvbuf;
struct dvobj_priv *sddev;
- PSDIO_DATA psdio_data;
+ struct sdio_data *psdio;
struct sdio_func *func;
u8 res = false;
diff --git a/drivers/staging/rtl8723bs/include/cmd_osdep.h b/drivers/staging/rtl8723bs/include/cmd_osdep.h
index 0749936df032..d3af9f44ad59 100644
--- a/drivers/staging/rtl8723bs/include/cmd_osdep.h
+++ b/drivers/staging/rtl8723bs/include/cmd_osdep.h
@@ -8,11 +8,11 @@
#define __CMD_OSDEP_H_
-extern sint _rtw_init_cmd_priv (struct cmd_priv *pcmdpriv);
-extern sint _rtw_init_evt_priv(struct evt_priv *pevtpriv);
+int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv);
+int rtw_init_evt_priv(struct evt_priv *pevtpriv);
extern void _rtw_free_evt_priv (struct evt_priv *pevtpriv);
extern void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv);
-extern sint _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj);
+int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj);
extern struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue);
#endif
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index bafb2c30e7fb..0fd84c93e72b 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -37,10 +37,6 @@ enum _NIC_VERSION {
#include <rtw_ht.h>
-#ifdef CONFIG_INTEL_WIDI
-#include <rtw_intel_widi.h>
-#endif
-
#include <rtw_cmd.h>
#include <cmd_osdep.h>
#include <rtw_security.h>
@@ -220,7 +216,6 @@ struct registry_priv
#define BSSID_SZ(field) sizeof(((struct wlan_bssid_ex *) 0)->field)
#include <drv_types_sdio.h>
-#define INTF_DATA SDIO_DATA
#define is_primary_adapter(adapter) (1)
#define get_iface_type(adapter) (IFACE_PORT0)
@@ -476,9 +471,8 @@ struct dvobj_priv
/*-------- below is for SDIO INTERFACE --------*/
-#ifdef INTF_DATA
- INTF_DATA intf_data;
-#endif
+struct sdio_data intf_data;
+
};
#define dvobj_to_pwrctl(dvobj) (&(dvobj->pwrctl_priv))
diff --git a/drivers/staging/rtl8723bs/include/drv_types_sdio.h b/drivers/staging/rtl8723bs/include/drv_types_sdio.h
index 23bf30ece2df..09263ad27ce9 100644
--- a/drivers/staging/rtl8723bs/include/drv_types_sdio.h
+++ b/drivers/staging/rtl8723bs/include/drv_types_sdio.h
@@ -16,7 +16,7 @@
#include <linux/mmc/card.h>
#endif
-typedef struct sdio_data
+struct sdio_data
{
u8 func_number;
@@ -26,6 +26,6 @@ typedef struct sdio_data
struct sdio_func *func;
void *sys_sdio_irq_thd;
-} SDIO_DATA, *PSDIO_DATA;
+};
#endif
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index 9efb4dcb9d3a..74c028fbe8f7 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -202,7 +202,7 @@ enum NETWORK_TYPE
#define IsSupportedVHT(NetType) (((NetType) & (WIRELESS_11AC)) ? true : false)
-typedef struct ieee_param {
+struct ieee_param {
u32 cmd;
u8 sta_addr[ETH_ALEN];
union {
@@ -240,13 +240,13 @@ typedef struct ieee_param {
u8 buf[0];
} bcn_ie;
} u;
-}ieee_param;
+};
-typedef struct ieee_param_ex {
+struct ieee_param_ex {
u32 cmd;
u8 sta_addr[ETH_ALEN];
u8 data[0];
-}ieee_param_ex;
+};
struct sta_data{
u16 aid;
@@ -870,13 +870,6 @@ static inline int is_zero_mac_addr(const u8 *addr)
#define CFG_IEEE80211_RESERVE_FCS (1<<0)
#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
-typedef struct tx_pending_t{
- int frag;
- struct ieee80211_txb *txb;
-}tx_pending_t;
-
-
-
#define MAXTID 16
#define IEEE_A (1<<0)
diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h
index 299b55538788..b83824ca2e31 100644
--- a/drivers/staging/rtl8723bs/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h
@@ -122,17 +122,15 @@ struct P2P_PS_CTWPeriod_t {
u8 CTWPeriod; /* TU */
};
-extern u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *obj);
+int rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *obj);
extern struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv);
extern void rtw_free_cmd_obj(struct cmd_obj *pcmd);
void rtw_stop_cmd_thread(struct adapter *adapter);
int rtw_cmd_thread(void *context);
-extern u32 rtw_init_cmd_priv (struct cmd_priv *pcmdpriv);
extern void rtw_free_cmd_priv (struct cmd_priv *pcmdpriv);
-extern u32 rtw_init_evt_priv (struct evt_priv *pevtpriv);
extern void rtw_free_evt_priv (struct evt_priv *pevtpriv);
extern void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
@@ -829,7 +827,7 @@ Result:
u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num, struct rtw_ieee80211_channel *ch, int ch_num);
extern u8 rtw_createbss_cmd(struct adapter *padapter);
-u8 rtw_startbss_cmd(struct adapter *padapter, int flags);
+int rtw_startbss_cmd(struct adapter *padapter, int flags);
struct sta_info;
extern u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_key, bool enqueue);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index 1ea9ea0e8d2e..2693b554f414 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -454,33 +454,6 @@ struct mlme_priv {
_lock bcn_update_lock;
u8 update_bcn;
-#ifdef CONFIG_INTEL_WIDI
- int widi_state;
- int listen_state;
- _timer listen_timer;
- atomic_t rx_probe_rsp; /* 1:receive probe respone from RDS source. */
- u8 *l2sdTaBuffer;
- u8 channel_idx;
- u8 group_cnt; /* In WiDi 3.5, they specified another scan algo. for WFD/RDS co-existed */
- u8 sa_ext[L2SDTA_SERVICE_VE_LEN];
-
- u8 widi_enable;
- /**
- * For WiDi 4; upper layer would set
- * p2p_primary_device_type_category_id
- * p2p_primary_device_type_sub_category_id
- * p2p_secondary_device_type_category_id
- * p2p_secondary_device_type_sub_category_id
- */
- u16 p2p_pdt_cid;
- u16 p2p_pdt_scid;
- u8 num_p2p_sdt;
- u16 p2p_sdt_cid[MAX_NUM_P2P_SDT];
- u16 p2p_sdt_scid[MAX_NUM_P2P_SDT];
- u8 p2p_reject_disable; /* When starting NL80211 wpa_supplicant/hostapd, it will call netdev_close */
- /* such that it will cause p2p disabled. Use this flag to reject. */
-#endif /* CONFIG_INTEL_WIDI */
-
u8 NumOfBcnInfoChkFail;
unsigned long timeBcnInfoChkStart;
};
@@ -619,15 +592,13 @@ void rtw_clear_scan_deny(struct adapter *adapter);
void rtw_set_scan_deny_timer_hdl(struct adapter *adapter);
void rtw_set_scan_deny(struct adapter *adapter, u32 ms);
-extern int _rtw_init_mlme_priv(struct adapter *padapter);
-
void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
extern void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
/* extern struct wlan_network* _rtw_dequeue_network(struct __queue *queue); */
-extern struct wlan_network* _rtw_alloc_network(struct mlme_priv *pmlmepriv);
+extern struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv);
extern void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall);
diff --git a/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
index e2a4c680125f..2bc922ce5ae1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
@@ -159,7 +159,7 @@ enum PS_DENY_REASON {
};
#ifdef CONFIG_PNO_SUPPORT
-typedef struct pno_nlo_info
+struct pno_nlo_info
{
u32 fast_scan_period; /* Fast scan period */
u32 ssid_num; /* number of entry */
@@ -168,26 +168,26 @@ typedef struct pno_nlo_info
u8 ssid_length[MAX_PNO_LIST_COUNT]; /* SSID Length Array */
u8 ssid_cipher_info[MAX_PNO_LIST_COUNT]; /* Cipher information for security */
u8 ssid_channel_info[MAX_PNO_LIST_COUNT]; /* channel information */
-}pno_nlo_info_t;
+};
-typedef struct pno_ssid {
+struct pno_ssid {
u32 SSID_len;
u8 SSID[32];
-} pno_ssid_t;
+};
-typedef struct pno_ssid_list {
- pno_ssid_t node[MAX_PNO_LIST_COUNT];
-}pno_ssid_list_t;
+struct pno_ssid_list {
+ struct pno_ssid node[MAX_PNO_LIST_COUNT];
+};
-typedef struct pno_scan_channel_info
+struct pno_scan_channel_info
{
u8 channel;
u8 tx_power;
u8 timeout;
u8 active; /* set 1 means active scan, or pasivite scan. */
-}pno_scan_channel_info_t;
+};
-typedef struct pno_scan_info
+struct pno_scan_info
{
u8 enableRFE; /* Enable RFE */
u8 period_scan_time; /* exclusive with fast_scan_period and slow_scan_period */
@@ -198,8 +198,8 @@ typedef struct pno_scan_info
u8 orig_ch; /* original channel */
u8 channel_num; /* number of channel */
u64 rfe_type; /* rfe_type && 0x00000000000000ff */
- pno_scan_channel_info_t ssid_channel_info[MAX_SCAN_LIST_COUNT];
-}pno_scan_info_t;
+ struct pno_scan_channel_info ssid_channel_info[MAX_SCAN_LIST_COUNT];
+};
#endif /* CONFIG_PNO_SUPPORT */
struct pwrctrl_priv
@@ -279,9 +279,9 @@ struct pwrctrl_priv
#ifdef CONFIG_PNO_SUPPORT
u8 pno_in_resume;
u8 pno_inited;
- pno_nlo_info_t *pnlo_info;
- pno_scan_info_t *pscan_info;
- pno_ssid_list_t *pno_ssid_list;
+ struct pno_nlo_info *pnlo_info;
+ struct pno_scan_info *pscan_info;
+ struct pno_ssid_list *pno_ssid_list;
#endif
u32 wowlan_pattern_context[8][5];
u64 wowlan_fw_iv;
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 1f53c1c7b0da..5de946e66302 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -120,8 +120,8 @@ struct rx_raw_rssi
u8 pwdball;
s8 pwr_all;
- u8 mimo_singal_strength[4];/* in 0~100 index */
- u8 mimo_singal_quality[4];
+ u8 mimo_signal_strength[4];/* in 0~100 index */
+ u8 mimo_signal_quality[4];
s8 ofdm_pwr[4];
u8 ofdm_snr[4];
@@ -302,7 +302,7 @@ struct recv_buf
u32 ref_cnt;
- struct adapter * adapter;
+ struct adapter *adapter;
u8 *pbuf;
u8 *pallocated_buf;
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index 37f42b2f22f1..ea1396005a13 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -83,7 +83,7 @@ do{\
#define TXDESC_OFFSET TXDESC_SIZE
-enum TXDESC_SC{
+enum TXDESC_SC {
SC_DONT_CARE = 0x00,
SC_UPPER = 0x01,
SC_LOWER = 0x02,
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
index 559bf2606fb7..8c50bbb20f3b 100644
--- a/drivers/staging/rtl8723bs/include/wifi.h
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -266,8 +266,8 @@ enum WIFI_REG_DOMAIN {
#define SetFrameType(pbuf, type) \
do { \
- *(unsigned short *)(pbuf) &= __constant_cpu_to_le16(~(BIT(3) | BIT(2))); \
- *(unsigned short *)(pbuf) |= __constant_cpu_to_le16(type); \
+ *(unsigned short *)(pbuf) &= cpu_to_le16(~(BIT(3) | BIT(2))); \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(type); \
} while (0)
#define GetFrameSubType(pbuf) (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(7) |\
@@ -374,18 +374,18 @@ __inline static unsigned char * get_da(unsigned char *pframe)
unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
switch (to_fr_ds) {
- case 0x00: /* ToDs = 0, FromDs = 0 */
- da = GetAddr1Ptr(pframe);
- break;
- case 0x01: /* ToDs = 0, FromDs = 1 */
- da = GetAddr1Ptr(pframe);
- break;
- case 0x02: /* ToDs = 1, FromDs = 0 */
- da = GetAddr3Ptr(pframe);
- break;
- default: /* ToDs = 1, FromDs = 1 */
- da = GetAddr3Ptr(pframe);
- break;
+ case 0x00: /* ToDs = 0, FromDs = 0 */
+ da = GetAddr1Ptr(pframe);
+ break;
+ case 0x01: /* ToDs = 0, FromDs = 1 */
+ da = GetAddr1Ptr(pframe);
+ break;
+ case 0x02: /* ToDs = 1, FromDs = 0 */
+ da = GetAddr3Ptr(pframe);
+ break;
+ default: /* ToDs = 1, FromDs = 1 */
+ da = GetAddr3Ptr(pframe);
+ break;
}
return da;
@@ -398,18 +398,18 @@ __inline static unsigned char * get_sa(unsigned char *pframe)
unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
switch (to_fr_ds) {
- case 0x00: /* ToDs = 0, FromDs = 0 */
- sa = GetAddr2Ptr(pframe);
- break;
- case 0x01: /* ToDs = 0, FromDs = 1 */
- sa = GetAddr3Ptr(pframe);
- break;
- case 0x02: /* ToDs = 1, FromDs = 0 */
- sa = GetAddr2Ptr(pframe);
- break;
- default: /* ToDs = 1, FromDs = 1 */
- sa = GetAddr4Ptr(pframe);
- break;
+ case 0x00: /* ToDs = 0, FromDs = 0 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ case 0x01: /* ToDs = 0, FromDs = 1 */
+ sa = GetAddr3Ptr(pframe);
+ break;
+ case 0x02: /* ToDs = 1, FromDs = 0 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ default: /* ToDs = 1, FromDs = 1 */
+ sa = GetAddr4Ptr(pframe);
+ break;
}
return sa;
@@ -421,18 +421,18 @@ __inline static unsigned char * get_hdr_bssid(unsigned char *pframe)
unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
switch (to_fr_ds) {
- case 0x00: /* ToDs = 0, FromDs = 0 */
- sa = GetAddr3Ptr(pframe);
- break;
- case 0x01: /* ToDs = 0, FromDs = 1 */
- sa = GetAddr2Ptr(pframe);
- break;
- case 0x02: /* ToDs = 1, FromDs = 0 */
- sa = GetAddr1Ptr(pframe);
- break;
- case 0x03: /* ToDs = 1, FromDs = 1 */
- sa = GetAddr1Ptr(pframe);
- break;
+ case 0x00: /* ToDs = 0, FromDs = 0 */
+ sa = GetAddr3Ptr(pframe);
+ break;
+ case 0x01: /* ToDs = 0, FromDs = 1 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ case 0x02: /* ToDs = 1, FromDs = 0 */
+ sa = GetAddr1Ptr(pframe);
+ break;
+ case 0x03: /* ToDs = 1, FromDs = 1 */
+ sa = GetAddr1Ptr(pframe);
+ break;
}
return sa;
@@ -1070,9 +1070,9 @@ enum P2P_STATE {
P2P_STATE_TX_PROVISION_DIS_REQ = 6, /* In P2P provisioning discovery */
P2P_STATE_RX_PROVISION_DIS_RSP = 7,
P2P_STATE_RX_PROVISION_DIS_REQ = 8,
- P2P_STATE_GONEGO_ING = 9, /* Doing the group owner negoitation handshake */
- P2P_STATE_GONEGO_OK = 10, /* finish the group negoitation handshake with success */
- P2P_STATE_GONEGO_FAIL = 11, /* finish the group negoitation handshake with failure */
+ P2P_STATE_GONEGO_ING = 9, /* Doing the group owner negotiation handshake */
+ P2P_STATE_GONEGO_OK = 10, /* finish the group negotiation handshake with success */
+ P2P_STATE_GONEGO_FAIL = 11, /* finish the group negotiation handshake with failure */
P2P_STATE_RECV_INVITE_REQ_MATCH = 12, /* receiving the P2P Invitation request and match with the profile. */
P2P_STATE_PROVISIONING_ING = 13, /* Doing the P2P WPS */
P2P_STATE_PROVISIONING_DONE = 14, /* Finish the P2P WPS */
@@ -1082,8 +1082,8 @@ enum P2P_STATE {
P2P_STATE_RECV_INVITE_REQ_GO = 18, /* receiving the P2P Invitation request and this wifi is GO. */
P2P_STATE_RECV_INVITE_REQ_JOIN = 19, /* receiving the P2P Invitation request to join an existing P2P Group. */
P2P_STATE_RX_INVITE_RESP_FAIL = 20, /* recveing the P2P Invitation response with failure */
- P2P_STATE_RX_INFOR_NOREADY = 21, /* receiving p2p negoitation response with information is not available */
- P2P_STATE_TX_INFOR_NOREADY = 22, /* sending p2p negoitation response with information is not available */
+ P2P_STATE_RX_INFOR_NOREADY = 21, /* receiving p2p negotiation response with information is not available */
+ P2P_STATE_TX_INFOR_NOREADY = 22, /* sending p2p negotiation response with information is not available */
};
enum P2P_WPSINFO {
diff --git a/drivers/staging/rtl8723bs/include/wlan_bssdef.h b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
index bdb14a84e5a5..88890b1c3c4c 100644
--- a/drivers/staging/rtl8723bs/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
@@ -129,15 +129,15 @@ struct ndis_801_11_ai_resfi {
typedef struct _NDIS_802_11_ASSOCIATION_INFORMATION
{
- u32 Length;
- u16 AvailableRequestFixedIEs;
- struct ndis_802_11_ai_reqfi RequestFixedIEs;
- u32 RequestIELength;
- u32 OffsetRequestIEs;
- u16 AvailableResponseFixedIEs;
- struct ndis_801_11_ai_resfi ResponseFixedIEs;
- u32 ResponseIELength;
- u32 OffsetResponseIEs;
+ u32 Length;
+ u16 AvailableRequestFixedIEs;
+ struct ndis_802_11_ai_reqfi RequestFixedIEs;
+ u32 RequestIELength;
+ u32 OffsetRequestIEs;
+ u16 AvailableResponseFixedIEs;
+ struct ndis_801_11_ai_resfi ResponseFixedIEs;
+ u32 ResponseIELength;
+ u32 OffsetResponseIEs;
} NDIS_802_11_ASSOCIATION_INFORMATION, *PNDIS_802_11_ASSOCIATION_INFORMATION;
enum NDIS_802_11_RELOAD_DEFAULTS {
@@ -148,19 +148,19 @@ enum NDIS_802_11_RELOAD_DEFAULTS {
/* Key mapping keys require a BSSID */
typedef struct _NDIS_802_11_KEY
{
- u32 Length; /* Length of this structure */
- u32 KeyIndex;
- u32 KeyLength; /* length of key in bytes */
- NDIS_802_11_MAC_ADDRESS BSSID;
- unsigned long long KeyRSC;
- u8 KeyMaterial[32]; /* variable length depending on above field */
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex;
+ u32 KeyLength; /* length of key in bytes */
+ NDIS_802_11_MAC_ADDRESS BSSID;
+ unsigned long long KeyRSC;
+ u8 KeyMaterial[32]; /* variable length depending on above field */
} NDIS_802_11_KEY, *PNDIS_802_11_KEY;
typedef struct _NDIS_802_11_REMOVE_KEY
{
- u32 Length; /* Length of this structure */
- u32 KeyIndex;
- NDIS_802_11_MAC_ADDRESS BSSID;
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex;
+ NDIS_802_11_MAC_ADDRESS BSSID;
} NDIS_802_11_REMOVE_KEY, *PNDIS_802_11_REMOVE_KEY;
struct ndis_802_11_wep {
@@ -181,7 +181,7 @@ struct ndis_802_11_wep {
#define MIC_CHECK_TIME 60000000
#ifndef Ndis802_11APMode
-#define Ndis802_11APMode (Ndis802_11InfrastructureMax+1)
+#define Ndis802_11APMode (Ndis802_11InfrastructureMax + 1)
#endif
struct wlan_phy_info {
@@ -240,15 +240,15 @@ struct wlan_network {
};
enum VRTL_CARRIER_SENSE {
- DISABLE_VCS,
- ENABLE_VCS,
- AUTO_VCS
+ DISABLE_VCS,
+ ENABLE_VCS,
+ AUTO_VCS
};
enum VCS_TYPE {
- NONE_VCS,
- RTS_CTS,
- CTS_TO_SELF
+ NONE_VCS,
+ RTS_CTS,
+ CTS_TO_SELF
};
#define PWR_CAM 0
@@ -259,9 +259,9 @@ enum VCS_TYPE {
enum UAPSD_MAX_SP {
NO_LIMIT,
- TWO_MSDU,
- FOUR_MSDU,
- SIX_MSDU
+ TWO_MSDU,
+ FOUR_MSDU,
+ SIX_MSDU
};
#define NUM_PRE_AUTH_KEY 16
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 8fb03efd588b..e3d356952875 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -961,7 +961,7 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
if (pPMK->cmd == IW_PMKSA_ADD) {
DBG_871X("[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n");
if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
- return(intReturn);
+ return intReturn;
else
intReturn = true;
@@ -2590,10 +2590,6 @@ static int rtw_wps_start(struct net_device *dev,
DBG_871X("[%s] wps_start = %d\n", __func__, u32wps_start);
-#ifdef CONFIG_INTEL_WIDI
- process_intel_widi_wps_status(padapter, u32wps_start);
-#endif /* CONFIG_INTEL_WIDI */
-
exit:
return ret;
@@ -4518,42 +4514,6 @@ static int rtw_tdls_get(struct net_device *dev,
-#ifdef CONFIG_INTEL_WIDI
-static int rtw_widi_set(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- process_intel_widi_cmd(padapter, extra);
-
- return ret;
-}
-
-static int rtw_widi_set_probe_request(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- u8 *pbuf = NULL;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- pbuf = rtw_malloc(sizeof(l2_msg_t));
- if (pbuf) {
- if (copy_from_user(pbuf, wrqu->data.pointer, wrqu->data.length))
- ret = -EFAULT;
- /* memcpy(pbuf, wrqu->data.pointer, wrqu->data.length); */
-
- if (wrqu->data.flags == 0)
- intel_widi_wk_cmd(padapter, INTEL_WIDI_ISSUE_PROB_WK, pbuf, sizeof(l2_msg_t));
- else if (wrqu->data.flags == 1)
- rtw_set_wfd_rds_sink_info(padapter, (l2_msg_t *)pbuf);
- }
- return ret;
-}
-#endif /* CONFIG_INTEL_WIDI */
-
static int rtw_test(
struct net_device *dev,
struct iw_request_info *info,
@@ -4791,17 +4751,6 @@ static const struct iw_priv_args rtw_private_args[] = {
IW_PRIV_TYPE_CHAR | 40, IW_PRIV_TYPE_CHAR | 0x7FF, "test"
},
-#ifdef CONFIG_INTEL_WIDI
- {
- SIOCIWFIRSTPRIV + 0x1E,
- IW_PRIV_TYPE_CHAR | 1024, 0, "widi_set"
- },
- {
- SIOCIWFIRSTPRIV + 0x1F,
- IW_PRIV_TYPE_CHAR | 128, 0, "widi_prob_req"
- },
-#endif /* CONFIG_INTEL_WIDI */
-
#ifdef CONFIG_WOWLAN
{ MP_WOW_ENABLE , IW_PRIV_TYPE_CHAR | 1024, 0, "wow_mode" }, /* set */
#endif
@@ -4852,10 +4801,6 @@ static iw_handler rtw_private_handler[] = {
rtw_mp_efuse_get, /* 0x1B */
NULL, /* 0x1C is reserved for hostapd */
rtw_test, /* 0x1D */
-#ifdef CONFIG_INTEL_WIDI
- rtw_widi_set, /* 0x1E */
- rtw_widi_set_probe_request, /* 0x1F */
-#endif /* CONFIG_INTEL_WIDI */
};
static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 143e3f9b31aa..8a9d838af24e 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -309,9 +309,6 @@ static uint loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc;
registry_par->max_roaming_times = (u8)rtw_max_roaming_times;
-#ifdef CONFIG_INTEL_WIDI
- registry_par->max_roaming_times = (u8)rtw_max_roaming_times + 2;
-#endif /* CONFIG_INTEL_WIDI */
registry_par->enable80211d = (u8)rtw_80211d;
@@ -404,8 +401,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -757,7 +753,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_init_hal_com_default_value(padapter);
- if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) {
+ if (rtw_init_cmd_priv(&padapter->cmdpriv)) {
RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n"));
ret8 = _FAIL;
goto exit;
@@ -765,7 +761,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
padapter->cmdpriv.padapter = padapter;
- if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) {
+ if (rtw_init_evt_priv(&padapter->evtpriv)) {
RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n"));
ret8 = _FAIL;
goto exit;
@@ -816,14 +812,6 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_hal_dm_init(padapter);
-#ifdef CONFIG_INTEL_WIDI
- if (rtw_init_intel_widi(padapter) == _FAIL) {
- DBG_871X("Can't rtw_init_intel_widi\n");
- ret8 = _FAIL;
- goto exit;
- }
-#endif /* CONFIG_INTEL_WIDI */
-
exit:
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
@@ -860,10 +848,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
{
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw"));
-#ifdef CONFIG_INTEL_WIDI
- rtw_free_intel_widi(padapter);
-#endif /* CONFIG_INTEL_WIDI */
-
free_mlme_ext_priv(&padapter->mlmeextpriv);
rtw_free_cmd_priv(&padapter->cmdpriv);
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 73b87da15eb2..a5a5a5c8226a 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -107,7 +107,7 @@ static int readFile(struct file *fp, char *buf, int len)
return -EPERM;
while (sum<len) {
- rlen =fp->f_op->read(fp, (char __force __user *)buf+sum, len-sum, &fp->f_pos);
+ rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
if (rlen>0)
sum+=rlen;
else if (0 != rlen)
@@ -116,7 +116,7 @@ static int readFile(struct file *fp, char *buf, int len)
break;
}
- return sum;
+ return sum;
}
@@ -129,22 +129,16 @@ static int isFileReadable(char *path)
{
struct file *fp;
int ret = 0;
- mm_segment_t oldfs;
char buf;
fp =filp_open(path, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- ret = PTR_ERR(fp);
- }
- else {
- oldfs = get_fs(); set_fs(KERNEL_DS);
+ if (IS_ERR(fp))
+ return PTR_ERR(fp);
- if (1!=readFile(fp, &buf, 1))
- ret = -EINVAL;
+ if (readFile(fp, &buf, 1) != 1)
+ ret = -EINVAL;
- set_fs(oldfs);
- filp_close(fp, NULL);
- }
+ filp_close(fp, NULL);
return ret;
}
@@ -158,16 +152,15 @@ static int isFileReadable(char *path)
static int retriveFromFile(char *path, u8 *buf, u32 sz)
{
int ret =-1;
- mm_segment_t oldfs;
struct file *fp;
if (path && buf) {
- if (0 == (ret =openFile(&fp, path, O_RDONLY, 0))) {
+ ret = openFile(&fp, path, O_RDONLY, 0);
+
+ if (ret == 0) {
DBG_871X("%s openFile path:%s fp =%p\n", __func__, path , fp);
- oldfs = get_fs(); set_fs(KERNEL_DS);
ret =readFile(fp, buf, sz);
- set_fs(oldfs);
closeFile(fp);
DBG_871X("%s readFile, ret:%d\n", __func__, ret);
@@ -248,7 +241,7 @@ RETURN:
return pnetdev;
}
-void rtw_free_netdev(struct net_device * netdev)
+void rtw_free_netdev(struct net_device *netdev)
{
struct rtw_netdev_priv_indicator *pnpi;
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 6d02904de63f..052482554f74 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -74,7 +74,7 @@ static void sd_sync_int_hdl(struct sdio_func *func)
static int sdio_alloc_irq(struct dvobj_priv *dvobj)
{
- PSDIO_DATA psdio_data;
+ struct sdio_data *psdio_data;
struct sdio_func *func;
int err;
@@ -102,7 +102,7 @@ static int sdio_alloc_irq(struct dvobj_priv *dvobj)
static void sdio_free_irq(struct dvobj_priv *dvobj)
{
- PSDIO_DATA psdio_data;
+ struct sdio_data *psdio_data;
struct sdio_func *func;
int err;
@@ -176,7 +176,7 @@ static void gpio_hostwakeup_free_irq(struct adapter *padapter)
static u32 sdio_init(struct dvobj_priv *dvobj)
{
- PSDIO_DATA psdio_data;
+ struct sdio_data *psdio_data;
struct sdio_func *func;
int err;
@@ -248,7 +248,7 @@ static struct dvobj_priv *sdio_dvobj_init(struct sdio_func *func)
{
int status = _FAIL;
struct dvobj_priv *dvobj = NULL;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
dvobj = devobj_init();
if (dvobj == NULL) {
@@ -327,7 +327,7 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct
int status = _FAIL;
struct net_device *pnetdev;
struct adapter *padapter = NULL;
- PSDIO_DATA psdio = &dvobj->intf_data;
+ struct sdio_data *psdio = &dvobj->intf_data;
padapter = vzalloc(sizeof(*padapter));
if (padapter == NULL) {
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
index 43a9d922e3aa..1787534487b4 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
@@ -12,7 +12,7 @@
static bool rtw_sdio_claim_host_needed(struct sdio_func *func)
{
struct dvobj_priv *dvobj = sdio_get_drvdata(func);
- PSDIO_DATA sdio_data = &dvobj->intf_data;
+ struct sdio_data *sdio_data = &dvobj->intf_data;
if (sdio_data->sys_sdio_irq_thd && sdio_data->sys_sdio_irq_thd == current)
return false;
@@ -21,7 +21,7 @@ static bool rtw_sdio_claim_host_needed(struct sdio_func *func)
inline void rtw_sdio_set_irq_thd(struct dvobj_priv *dvobj, void *thd_hdl)
{
- PSDIO_DATA sdio_data = &dvobj->intf_data;
+ struct sdio_data *sdio_data = &dvobj->intf_data;
sdio_data->sys_sdio_irq_thd = thd_hdl;
}
@@ -30,7 +30,7 @@ u8 sd_f0_read8(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
u8 v = 0;
struct sdio_func *func;
@@ -67,7 +67,7 @@ s32 _sd_cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
int err = 0, i;
struct sdio_func *func;
@@ -102,7 +102,7 @@ s32 sd_cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
int err = 0;
struct sdio_func *func;
@@ -137,7 +137,7 @@ s32 _sd_cmd52_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
int err = 0, i;
struct sdio_func *func;
@@ -172,7 +172,7 @@ s32 sd_cmd52_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
int err = 0;
struct sdio_func *func;
@@ -202,7 +202,7 @@ u8 sd_read8(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
u8 v = 0;
struct sdio_func *func;
@@ -234,7 +234,7 @@ u32 sd_read32(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
u32 v = 0;
struct sdio_func *func;
bool claim_needed;
@@ -299,7 +299,7 @@ void sd_write8(struct intf_hdl *pintfhdl, u32 addr, u8 v, s32 *err)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
struct sdio_func *func;
bool claim_needed;
@@ -328,7 +328,7 @@ void sd_write32(struct intf_hdl *pintfhdl, u32 addr, u32 v, s32 *err)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
struct sdio_func *func;
bool claim_needed;
@@ -404,7 +404,7 @@ s32 _sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
int err = -EPERM;
struct sdio_func *func;
@@ -461,7 +461,7 @@ s32 sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
struct sdio_func *func;
bool claim_needed;
@@ -505,7 +505,7 @@ s32 _sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
struct sdio_func *func;
u32 size;
@@ -565,7 +565,7 @@ s32 sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
{
struct adapter *padapter;
struct dvobj_priv *psdiodev;
- PSDIO_DATA psdio;
+ struct sdio_data *psdio;
struct sdio_func *func;
bool claim_needed;
s32 err = -EPERM;
diff --git a/drivers/staging/rtlwifi/Kconfig b/drivers/staging/rtlwifi/Kconfig
deleted file mode 100644
index 28286a87a601..000000000000
--- a/drivers/staging/rtlwifi/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-config R8822BE
- tristate "Realtek RTL8822BE Wireless Network Adapter"
- depends on PCI && MAC80211 && m
- select FW_LOADER
- help
- This is the staging driver for Realtek RTL8822BE 802.11ac PCIe
- wireless network adapters.
-
-config RTLWIFI_DEBUG_ST
- bool
- depends on R8822BE
- default y
diff --git a/drivers/staging/rtlwifi/Makefile b/drivers/staging/rtlwifi/Makefile
deleted file mode 100644
index 0d738c18b29c..000000000000
--- a/drivers/staging/rtlwifi/Makefile
+++ /dev/null
@@ -1,70 +0,0 @@
-obj-$(CONFIG_R8822BE) += r8822be.o
-
-r8822be-objs := \
- base.o \
- cam.o \
- core.o \
- debug.o \
- efuse.o \
- ps.o \
- rc.o \
- regd.o \
- stats.o \
- pci.o \
- rtl8822be/fw.o \
- rtl8822be/hw.o \
- rtl8822be/led.o \
- rtl8822be/phy.o \
- rtl8822be/sw.o \
- rtl8822be/trx.o \
- btcoexist/halbtc8822b2ant.o \
- btcoexist/halbtc8822b1ant.o \
- btcoexist/halbtc8822bwifionly.o \
- btcoexist/halbtcoutsrc.o \
- btcoexist/rtl_btc.o \
- halmac/halmac_api.o \
- halmac/halmac_88xx/halmac_api_88xx_usb.o \
- halmac/halmac_88xx/halmac_api_88xx_sdio.o \
- halmac/halmac_88xx/halmac_api_88xx.o \
- halmac/halmac_88xx/halmac_api_88xx_pcie.o \
- halmac/halmac_88xx/halmac_func_88xx.o \
- halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.o \
- halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.o \
- halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.o \
- halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.o \
- halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.o \
- halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.o \
- halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.o \
- halmac/rtl_halmac.o \
- phydm/phydm_debug.o \
- phydm/phydm_antdiv.o\
- phydm/phydm_interface.o\
- phydm/phydm_hwconfig.o\
- phydm/phydm.o\
- phydm/halphyrf_ce.o\
- phydm/phydm_edcaturbocheck.o\
- phydm/phydm_dig.o\
- phydm/phydm_rainfo.o\
- phydm/phydm_dynamicbbpowersaving.o\
- phydm/phydm_powertracking_ce.o\
- phydm/phydm_dynamictxpower.o\
- phydm/phydm_adaptivity.o\
- phydm/phydm_cfotracking.o\
- phydm/phydm_noisemonitor.o\
- phydm/phydm_acs.o\
- phydm/phydm_psd.o\
- phydm/phydm_adc_sampling.o\
- phydm/phydm_kfree.o\
- phydm/phydm_ccx.o \
- phydm/rtl8822b/halhwimg8822b_bb.o\
- phydm/rtl8822b/halhwimg8822b_mac.o\
- phydm/rtl8822b/halhwimg8822b_rf.o\
- phydm/rtl8822b/halphyrf_8822b.o\
- phydm/rtl8822b/phydm_hal_api8822b.o\
- phydm/rtl8822b/phydm_iqk_8822b.o\
- phydm/rtl8822b/phydm_regconfig8822b.o\
- phydm/rtl8822b/phydm_rtl8822b.o \
- phydm/rtl_phydm.o
-
-
-obj-$(CONFIG_R8822BE) += rtl8822be/
diff --git a/drivers/staging/rtlwifi/TODO b/drivers/staging/rtlwifi/TODO
deleted file mode 100644
index 4a084f2fc5d0..000000000000
--- a/drivers/staging/rtlwifi/TODO
+++ /dev/null
@@ -1,11 +0,0 @@
-TODO:
-- find and remove code blocks guarded by never set CONFIG_FOO defines
-- convert any remaining unusual variable types
-- find codes that can use %pM and %Nph formatting
-- checkpatch.pl fixes - most of the remaining ones are lines too long. Many
- of them will require refactoring
-- merge Realtek's bugfixes and new features into the driver
-- address any reviewers comments
-
-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
-and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtlwifi/base.c b/drivers/staging/rtlwifi/base.c
deleted file mode 100644
index 35df2cf60619..000000000000
--- a/drivers/staging/rtlwifi/base.c
+++ /dev/null
@@ -1,2815 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "wifi.h"
-#include "rc.h"
-#include "base.h"
-#include "efuse.h"
-#include "cam.h"
-#include "ps.h"
-#include "regd.h"
-#include "pci.h"
-#include <linux/ip.h>
-#include <linux/module.h>
-#include <linux/udp.h>
-
-/*
- *NOTICE!!!: This file will be very big, we should
- *keep it clear under following roles:
- *
- *This file include following parts, so, if you add new
- *functions into this file, please check which part it
- *should includes. or check if you should add new part
- *for this file:
- *
- *1) mac80211 init functions
- *2) tx information functions
- *3) functions called by core.c
- *4) wq & timer callback functions
- *5) frame process functions
- *6) IOT functions
- *7) sysfs functions
- *8) vif functions
- *9) ...
- */
-
-/*********************************************************
- *
- * mac80211 init functions
- *
- *********************************************************/
-static struct ieee80211_channel rtl_channeltable_2g[] = {
- {.center_freq = 2412, .hw_value = 1,},
- {.center_freq = 2417, .hw_value = 2,},
- {.center_freq = 2422, .hw_value = 3,},
- {.center_freq = 2427, .hw_value = 4,},
- {.center_freq = 2432, .hw_value = 5,},
- {.center_freq = 2437, .hw_value = 6,},
- {.center_freq = 2442, .hw_value = 7,},
- {.center_freq = 2447, .hw_value = 8,},
- {.center_freq = 2452, .hw_value = 9,},
- {.center_freq = 2457, .hw_value = 10,},
- {.center_freq = 2462, .hw_value = 11,},
- {.center_freq = 2467, .hw_value = 12,},
- {.center_freq = 2472, .hw_value = 13,},
- {.center_freq = 2484, .hw_value = 14,},
-};
-
-static struct ieee80211_channel rtl_channeltable_5g[] = {
- {.center_freq = 5180, .hw_value = 36,},
- {.center_freq = 5200, .hw_value = 40,},
- {.center_freq = 5220, .hw_value = 44,},
- {.center_freq = 5240, .hw_value = 48,},
- {.center_freq = 5260, .hw_value = 52,},
- {.center_freq = 5280, .hw_value = 56,},
- {.center_freq = 5300, .hw_value = 60,},
- {.center_freq = 5320, .hw_value = 64,},
- {.center_freq = 5500, .hw_value = 100,},
- {.center_freq = 5520, .hw_value = 104,},
- {.center_freq = 5540, .hw_value = 108,},
- {.center_freq = 5560, .hw_value = 112,},
- {.center_freq = 5580, .hw_value = 116,},
- {.center_freq = 5600, .hw_value = 120,},
- {.center_freq = 5620, .hw_value = 124,},
- {.center_freq = 5640, .hw_value = 128,},
- {.center_freq = 5660, .hw_value = 132,},
- {.center_freq = 5680, .hw_value = 136,},
- {.center_freq = 5700, .hw_value = 140,},
- {.center_freq = 5745, .hw_value = 149,},
- {.center_freq = 5765, .hw_value = 153,},
- {.center_freq = 5785, .hw_value = 157,},
- {.center_freq = 5805, .hw_value = 161,},
- {.center_freq = 5825, .hw_value = 165,},
-};
-
-static struct ieee80211_rate rtl_ratetable_2g[] = {
- {.bitrate = 10, .hw_value = 0x00,},
- {.bitrate = 20, .hw_value = 0x01,},
- {.bitrate = 55, .hw_value = 0x02,},
- {.bitrate = 110, .hw_value = 0x03,},
- {.bitrate = 60, .hw_value = 0x04,},
- {.bitrate = 90, .hw_value = 0x05,},
- {.bitrate = 120, .hw_value = 0x06,},
- {.bitrate = 180, .hw_value = 0x07,},
- {.bitrate = 240, .hw_value = 0x08,},
- {.bitrate = 360, .hw_value = 0x09,},
- {.bitrate = 480, .hw_value = 0x0a,},
- {.bitrate = 540, .hw_value = 0x0b,},
-};
-
-static struct ieee80211_rate rtl_ratetable_5g[] = {
- {.bitrate = 60, .hw_value = 0x04,},
- {.bitrate = 90, .hw_value = 0x05,},
- {.bitrate = 120, .hw_value = 0x06,},
- {.bitrate = 180, .hw_value = 0x07,},
- {.bitrate = 240, .hw_value = 0x08,},
- {.bitrate = 360, .hw_value = 0x09,},
- {.bitrate = 480, .hw_value = 0x0a,},
- {.bitrate = 540, .hw_value = 0x0b,},
-};
-
-static const struct ieee80211_supported_band rtl_band_2ghz = {
- .band = NL80211_BAND_2GHZ,
-
- .channels = rtl_channeltable_2g,
- .n_channels = ARRAY_SIZE(rtl_channeltable_2g),
-
- .bitrates = rtl_ratetable_2g,
- .n_bitrates = ARRAY_SIZE(rtl_ratetable_2g),
-
- .ht_cap = {0},
-};
-
-static struct ieee80211_supported_band rtl_band_5ghz = {
- .band = NL80211_BAND_5GHZ,
-
- .channels = rtl_channeltable_5g,
- .n_channels = ARRAY_SIZE(rtl_channeltable_5g),
-
- .bitrates = rtl_ratetable_5g,
- .n_bitrates = ARRAY_SIZE(rtl_ratetable_5g),
-
- .ht_cap = {0},
-};
-
-static const u8 tid_to_ac[] = {
- 2, /* IEEE80211_AC_BE */
- 3, /* IEEE80211_AC_BK */
- 3, /* IEEE80211_AC_BK */
- 2, /* IEEE80211_AC_BE */
- 1, /* IEEE80211_AC_VI */
- 1, /* IEEE80211_AC_VI */
- 0, /* IEEE80211_AC_VO */
- 0, /* IEEE80211_AC_VO */
-};
-
-u8 rtl_tid_to_ac(u8 tid)
-{
- return tid_to_ac[tid];
-}
-
-static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
- struct ieee80211_sta_ht_cap *ht_cap)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
-
- ht_cap->ht_supported = true;
- ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
-
- if (rtlpriv->rtlhal.disable_amsdu_8k)
- ht_cap->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
-
- /*
- *Maximum length of AMPDU that the STA can receive.
- *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
- */
- ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
-
- /*Minimum MPDU start spacing , */
- ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
-
- ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-
- /*hw->wiphy->bands[NL80211_BAND_2GHZ]
- *base on ant_num
- *rx_mask: RX mask
- *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
- *if rx_ant = 2 rx_mask[1]= 0xff;==>MCS8-MCS15
- *if rx_ant >= 3 rx_mask[2]= 0xff;
- *if BW_40 rx_mask[4]= 0x01;
- *highest supported RX rate
- */
- if (rtlpriv->dm.supp_phymode_switch) {
- pr_info("Support phy mode switch\n");
-
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0xFF;
- ht_cap->mcs.rx_mask[4] = 0x01;
-
- ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
- } else {
- if (get_rf_type(rtlphy) == RF_1T2R ||
- get_rf_type(rtlphy) == RF_2T2R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "1T2R or 2T2R\n");
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0xFF;
- ht_cap->mcs.rx_mask[4] = 0x01;
-
- ht_cap->mcs.rx_highest =
- cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
- } else if (get_rf_type(rtlphy) == RF_1T1R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
-
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0x00;
- ht_cap->mcs.rx_mask[4] = 0x01;
-
- ht_cap->mcs.rx_highest =
- cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
- }
- }
-}
-
-static void _rtl_init_hw_vht_capab(struct ieee80211_hw *hw,
- struct ieee80211_sta_vht_cap *vht_cap)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE ||
- rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
- u16 mcs_map;
-
- vht_cap->vht_supported = true;
- vht_cap->cap =
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_SHORT_GI_80 |
- IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
- IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
- IEEE80211_VHT_CAP_HTC_VHT |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
- IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
- IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
- 0;
-
- mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
-
- vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
- vht_cap->vht_mcs.rx_highest =
- cpu_to_le16(MAX_BIT_RATE_SHORT_GI_2NSS_80MHZ_MCS9);
- vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
- vht_cap->vht_mcs.tx_highest =
- cpu_to_le16(MAX_BIT_RATE_SHORT_GI_2NSS_80MHZ_MCS9);
- } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
- u16 mcs_map;
-
- vht_cap->vht_supported = true;
- vht_cap->cap =
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_SHORT_GI_80 |
- IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
- IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
- IEEE80211_VHT_CAP_HTC_VHT |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
- IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
- IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
- 0;
-
- mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
-
- vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
- vht_cap->vht_mcs.rx_highest =
- cpu_to_le16(MAX_BIT_RATE_SHORT_GI_1NSS_80MHZ_MCS9);
- vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
- vht_cap->vht_mcs.tx_highest =
- cpu_to_le16(MAX_BIT_RATE_SHORT_GI_1NSS_80MHZ_MCS9);
- }
-}
-
-static void _rtl_init_mac80211(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct ieee80211_supported_band *sband;
-
- if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
- rtlhal->bandset == BAND_ON_BOTH) {
- /* 1: 2.4 G bands */
- /* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &rtlmac->bands[NL80211_BAND_2GHZ];
-
- /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
- * to default value(1T1R)
- */
- memcpy(&rtlmac->bands[NL80211_BAND_2GHZ], &rtl_band_2ghz,
- sizeof(struct ieee80211_supported_band));
-
- /* <3> init ht cap base on ant_num */
- _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
-
- /* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
-
- /* 2: 5 G bands */
- /* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &rtlmac->bands[NL80211_BAND_5GHZ];
-
- /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
- * to default value(1T1R)
- */
- memcpy(&rtlmac->bands[NL80211_BAND_5GHZ], &rtl_band_5ghz,
- sizeof(struct ieee80211_supported_band));
-
- /* <3> init ht cap base on ant_num */
- _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
-
- _rtl_init_hw_vht_capab(hw, &sband->vht_cap);
- /* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
- } else {
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- /* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &rtlmac->bands[NL80211_BAND_2GHZ];
-
- /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
- * to default value(1T1R)
- */
- memcpy(&rtlmac->bands[NL80211_BAND_2GHZ],
- &rtl_band_2ghz,
- sizeof(struct ieee80211_supported_band));
-
- /* <3> init ht cap base on ant_num */
- _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
-
- /* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
- } else if (rtlhal->current_bandtype == BAND_ON_5G) {
- /* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &rtlmac->bands[NL80211_BAND_5GHZ];
-
- /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
- * to default value(1T1R)
- */
- memcpy(&rtlmac->bands[NL80211_BAND_5GHZ],
- &rtl_band_5ghz,
- sizeof(struct ieee80211_supported_band));
-
- /* <3> init ht cap base on ant_num */
- _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
-
- _rtl_init_hw_vht_capab(hw, &sband->vht_cap);
- /* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
- } else {
- pr_err("Err BAND %d\n",
- rtlhal->current_bandtype);
- }
- }
- /* <5> set hw caps */
- ieee80211_hw_set(hw, SIGNAL_DBM);
- ieee80211_hw_set(hw, RX_INCLUDES_FCS);
- ieee80211_hw_set(hw, AMPDU_AGGREGATION);
- ieee80211_hw_set(hw, CONNECTION_MONITOR);
- ieee80211_hw_set(hw, MFP_CAPABLE);
- ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
- ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
- ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
-
- /* swlps or hwlps has been set in diff chip in init_sw_vars */
- if (rtlpriv->psc.swctrl_lps) {
- ieee80211_hw_set(hw, SUPPORTS_PS);
- ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
- }
- if (rtlpriv->psc.fwctrl_lps) {
- ieee80211_hw_set(hw, SUPPORTS_PS);
- ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
- }
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
- hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
-
- hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
-
- hw->wiphy->rts_threshold = 2347;
-
- hw->queues = AC_MAX;
- hw->extra_tx_headroom = RTL_TX_HEADER_SIZE;
-
- /* TODO: Correct this value for our hw */
- hw->max_listen_interval = MAX_LISTEN_INTERVAL;
- hw->max_rate_tries = MAX_RATE_TRIES;
- /* hw->max_rates = 1; */
- hw->sta_data_size = sizeof(struct rtl_sta_info);
-
-/* wowlan is not supported by kernel if CONFIG_PM is not defined */
-#ifdef CONFIG_PM
- if (rtlpriv->psc.wo_wlan_mode) {
- if (rtlpriv->psc.wo_wlan_mode & WAKE_ON_MAGIC_PACKET)
- rtlpriv->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT;
- if (rtlpriv->psc.wo_wlan_mode & WAKE_ON_PATTERN_MATCH) {
- rtlpriv->wowlan.n_patterns =
- MAX_SUPPORT_WOL_PATTERN_NUM;
- rtlpriv->wowlan.pattern_min_len = MIN_WOL_PATTERN_SIZE;
- rtlpriv->wowlan.pattern_max_len = MAX_WOL_PATTERN_SIZE;
- }
- hw->wiphy->wowlan = &rtlpriv->wowlan;
- }
-#endif
-
- /* <6> mac address */
- if (is_valid_ether_addr(rtlefuse->dev_addr)) {
- SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
- } else {
- u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
-
- get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
- SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
- }
-}
-
-static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- /* <1> timer */
- timer_setup(&rtlpriv->works.watchdog_timer,
- rtl_watch_dog_timer_callback, 0);
- timer_setup(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
- rtl_easy_concurrent_retrytimer_callback, 0);
- /* <2> work queue */
- rtlpriv->works.hw = hw;
- rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
- INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
- (void *)rtl_watchdog_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
- (void *)rtl_ips_nic_off_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.ps_work,
- (void *)rtl_swlps_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
- (void *)rtl_swlps_rfon_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
- (void *)rtl_fwevt_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq,
- (void *)rtl_c2hcmd_wq_callback);
-}
-
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- del_timer_sync(&rtlpriv->works.watchdog_timer);
-
- cancel_delayed_work(&rtlpriv->works.watchdog_wq);
- cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
- cancel_delayed_work(&rtlpriv->works.ps_work);
- cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
- cancel_delayed_work(&rtlpriv->works.fwevt_wq);
- cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
-}
-
-void rtl_init_rfkill(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- bool radio_state;
- bool blocked;
- u8 valid = 0;
-
- /*set init state to on */
- rtlpriv->rfkill.rfkill_state = true;
- wiphy_rfkill_set_hw_state(hw->wiphy, 0);
-
- radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
-
- if (valid) {
- pr_info("rtlwifi: wireless switch is %s\n",
- rtlpriv->rfkill.rfkill_state ? "on" : "off");
-
- rtlpriv->rfkill.rfkill_state = radio_state;
-
- blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
- wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
- }
-
- wiphy_rfkill_start_polling(hw->wiphy);
-}
-
-void rtl_deinit_rfkill(struct ieee80211_hw *hw)
-{
- wiphy_rfkill_stop_polling(hw->wiphy);
-}
-
-int rtl_init_core(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
-
- /* <1> init mac80211 */
- _rtl_init_mac80211(hw);
- rtlmac->hw = hw;
-
- /* <2> rate control register */
- hw->rate_control_algorithm = "rtl_rc";
-
- /*
- * <3> init CRDA must come after init
- * mac80211 hw in _rtl_init_mac80211.
- */
- if (rtl_regd_init(hw, rtl_reg_notifier)) {
- pr_err("REGD init failed\n");
- return 1;
- }
-
- /* <4> locks */
- mutex_init(&rtlpriv->locks.conf_mutex);
- mutex_init(&rtlpriv->locks.ips_mutex);
- mutex_init(&rtlpriv->locks.lps_mutex);
- spin_lock_init(&rtlpriv->locks.irq_th_lock);
- spin_lock_init(&rtlpriv->locks.h2c_lock);
- spin_lock_init(&rtlpriv->locks.rf_ps_lock);
- spin_lock_init(&rtlpriv->locks.rf_lock);
- spin_lock_init(&rtlpriv->locks.waitq_lock);
- spin_lock_init(&rtlpriv->locks.entry_list_lock);
- spin_lock_init(&rtlpriv->locks.c2hcmd_lock);
- spin_lock_init(&rtlpriv->locks.scan_list_lock);
- spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
- spin_lock_init(&rtlpriv->locks.fw_ps_lock);
- spin_lock_init(&rtlpriv->locks.iqk_lock);
- /* <5> init list */
- INIT_LIST_HEAD(&rtlpriv->entry_list);
- INIT_LIST_HEAD(&rtlpriv->c2hcmd_list);
- INIT_LIST_HEAD(&rtlpriv->scan_list.list);
-
- rtlmac->link_state = MAC80211_NOLINK;
-
- /* <6> init deferred work */
- _rtl_init_deferred_work(hw);
-
- return 0;
-}
-
-static void rtl_free_entries_from_scan_list(struct ieee80211_hw *hw);
-
-void rtl_deinit_core(struct ieee80211_hw *hw)
-{
- rtl_c2hcmd_launcher(hw, 0);
- rtl_free_entries_from_scan_list(hw);
-}
-
-void rtl_init_rx_config(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)&mac->rx_conf);
-}
-
-/*********************************************************
- *
- * tx information functions
- *
- *********************************************************/
-static void _rtl_qurey_shortpreamble_mode(struct ieee80211_hw *hw,
- struct rtl_tcb_desc *tcb_desc,
- struct ieee80211_tx_info *info)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 rate_flag = info->control.rates[0].flags;
-
- tcb_desc->use_shortpreamble = false;
-
- /* 1M can only use Long Preamble. 11B spec */
- if (tcb_desc->hw_rate == rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M])
- return;
- else if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- tcb_desc->use_shortpreamble = true;
-}
-
-static void _rtl_query_shortgi(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct rtl_tcb_desc *tcb_desc,
- struct ieee80211_tx_info *info)
-{
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u8 rate_flag = info->control.rates[0].flags;
- u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0;
- u8 sgi_80 = 0, bw_80 = 0;
-
- tcb_desc->use_shortgi = false;
-
- if (!sta)
- return;
-
- sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
- sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
- sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
-
- if (!sta->ht_cap.ht_supported && !sta->vht_cap.vht_supported)
- return;
-
- if (!sgi_40 && !sgi_20)
- return;
-
- if (mac->opmode == NL80211_IFTYPE_STATION) {
- bw_40 = mac->bw_40;
- bw_80 = mac->bw_80;
- } else if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC ||
- mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- bw_80 = sta->vht_cap.vht_supported;
- }
-
- if (bw_80) {
- if (sgi_80)
- tcb_desc->use_shortgi = true;
- else
- tcb_desc->use_shortgi = false;
- } else {
- if (bw_40 && sgi_40)
- tcb_desc->use_shortgi = true;
- else if (!bw_40 && sgi_20)
- tcb_desc->use_shortgi = true;
- }
-
- if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI))
- tcb_desc->use_shortgi = false;
-}
-
-static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
- struct rtl_tcb_desc *tcb_desc,
- struct ieee80211_tx_info *info)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 rate_flag = info->control.rates[0].flags;
-
- /* Common Settings */
- tcb_desc->rts_stbc = false;
- tcb_desc->cts_enable = false;
- tcb_desc->rts_sc = 0;
- tcb_desc->rts_bw = false;
- tcb_desc->rts_use_shortpreamble = false;
- tcb_desc->rts_use_shortgi = false;
-
- if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- /* Use CTS-to-SELF in protection mode. */
- tcb_desc->rts_enable = true;
- tcb_desc->cts_enable = true;
- tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
- } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
- /* Use RTS-CTS in protection mode. */
- tcb_desc->rts_enable = true;
- tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
- }
-}
-
-u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index,
- enum wireless_mode wirelessmode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 ret = 0;
-
- switch (rate_index) {
- case RATR_INX_WIRELESS_NGB:
- if (rtlphy->rf_type == RF_1T1R)
- ret = RATEID_IDX_BGN_40M_1SS;
- else
- ret = RATEID_IDX_BGN_40M_2SS;
- break;
- case RATR_INX_WIRELESS_N:
- case RATR_INX_WIRELESS_NG:
- if (rtlphy->rf_type == RF_1T1R)
- ret = RATEID_IDX_GN_N1SS;
- else
- ret = RATEID_IDX_GN_N2SS;
- break;
- case RATR_INX_WIRELESS_NB:
- if (rtlphy->rf_type == RF_1T1R)
- ret = RATEID_IDX_BGN_20M_1SS_BN;
- else
- ret = RATEID_IDX_BGN_20M_2SS_BN;
- break;
- case RATR_INX_WIRELESS_GB:
- ret = RATEID_IDX_BG;
- break;
- case RATR_INX_WIRELESS_G:
- ret = RATEID_IDX_G;
- break;
- case RATR_INX_WIRELESS_B:
- ret = RATEID_IDX_B;
- break;
- case RATR_INX_WIRELESS_MC:
- if (wirelessmode == WIRELESS_MODE_B ||
- wirelessmode == WIRELESS_MODE_G ||
- wirelessmode == WIRELESS_MODE_N_24G ||
- wirelessmode == WIRELESS_MODE_AC_24G)
- ret = RATEID_IDX_BG;
- else
- ret = RATEID_IDX_G;
- break;
- case RATR_INX_WIRELESS_AC_5N:
- if (rtlphy->rf_type == RF_1T1R)
- ret = RATEID_IDX_VHT_1SS;
- else
- ret = RATEID_IDX_VHT_2SS;
- break;
- case RATR_INX_WIRELESS_AC_24N:
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
- if (rtlphy->rf_type == RF_1T1R)
- ret = RATEID_IDX_VHT_1SS;
- else
- ret = RATEID_IDX_VHT_2SS;
- } else {
- if (rtlphy->rf_type == RF_1T1R)
- ret = RATEID_IDX_MIX1;
- else
- ret = RATEID_IDX_MIX2;
- }
- break;
- default:
- ret = RATEID_IDX_BGN_40M_2SS;
- break;
- }
- return ret;
-}
-
-static inline u8 _rtl_rate_id(struct ieee80211_hw *hw,
- struct rtl_sta_info *sta_entry,
- int rate_index)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID) {
- int wireless_mode = sta_entry ?
- sta_entry->wireless_mode : WIRELESS_MODE_G;
-
- return rtl_mrate_idx_to_arfr_id(hw, rate_index, wireless_mode);
- } else {
- return rate_index;
- }
-}
-
-static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct rtl_tcb_desc *tcb_desc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_sta_info *sta_entry = NULL;
- u8 ratr_index = _rtl_rate_id(hw, sta_entry, RATR_INX_WIRELESS_MC);
-
- if (sta) {
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- ratr_index = sta_entry->ratr_index;
- }
- if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) {
- if (mac->opmode == NL80211_IFTYPE_STATION) {
- tcb_desc->ratr_index = 0;
- } else if (mac->opmode == NL80211_IFTYPE_ADHOC ||
- mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- if (tcb_desc->multicast || tcb_desc->broadcast) {
- tcb_desc->hw_rate =
- rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
- tcb_desc->use_driver_rate = 1;
- tcb_desc->ratr_index =
- _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_MC);
- } else {
- tcb_desc->ratr_index = ratr_index;
- }
- } else if (mac->opmode == NL80211_IFTYPE_AP) {
- tcb_desc->ratr_index = ratr_index;
- }
- }
-
- if (rtlpriv->dm.useramask) {
- tcb_desc->ratr_index = ratr_index;
- /* TODO we will differentiate adhoc and station future */
- if (mac->opmode == NL80211_IFTYPE_STATION ||
- mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- tcb_desc->mac_id = 0;
-
- if (sta &&
- (rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID))
- ; /* use sta_entry->ratr_index */
- else if (mac->mode == WIRELESS_MODE_AC_5G)
- tcb_desc->ratr_index =
- _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_AC_5N);
- else if (mac->mode == WIRELESS_MODE_AC_24G)
- tcb_desc->ratr_index =
- _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_AC_24N);
- else if (mac->mode == WIRELESS_MODE_N_24G)
- tcb_desc->ratr_index =
- _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_NGB);
- else if (mac->mode == WIRELESS_MODE_N_5G)
- tcb_desc->ratr_index =
- _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_NG);
- else if (mac->mode & WIRELESS_MODE_G)
- tcb_desc->ratr_index =
- _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_GB);
- else if (mac->mode & WIRELESS_MODE_B)
- tcb_desc->ratr_index =
- _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_B);
- else if (mac->mode & WIRELESS_MODE_A)
- tcb_desc->ratr_index =
- _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_G);
-
- } else if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC) {
- if (sta) {
- if (sta->aid > 0)
- tcb_desc->mac_id = sta->aid + 1;
- else
- tcb_desc->mac_id = 1;
- } else {
- tcb_desc->mac_id = 0;
- }
- }
- }
-}
-
-static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct rtl_tcb_desc *tcb_desc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- tcb_desc->packet_bw = false;
- if (!sta)
- return;
- if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC ||
- mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- if (!(sta->ht_cap.ht_supported) ||
- !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
- return;
- } else if (mac->opmode == NL80211_IFTYPE_STATION) {
- if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
- return;
- }
- if (tcb_desc->multicast || tcb_desc->broadcast)
- return;
-
- /*use legency rate, shall use 20MHz */
- if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
- return;
-
- tcb_desc->packet_bw = HT_CHANNEL_WIDTH_20_40;
-
- if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE ||
- rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8821AE ||
- (rtlpriv->cfg->spec_ver & RTL_SPEC_SUPPORT_VHT)) {
- if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC ||
- mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- if (!(sta->vht_cap.vht_supported))
- return;
- } else if (mac->opmode == NL80211_IFTYPE_STATION) {
- if (!mac->bw_80 ||
- !(sta->vht_cap.vht_supported))
- return;
- }
- if (tcb_desc->hw_rate <=
- rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15])
- return;
- tcb_desc->packet_bw = HT_CHANNEL_WIDTH_80;
- }
-}
-
-static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 hw_rate;
- u16 tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map);
-
- if ((get_rf_type(rtlphy) == RF_2T2R) &&
- (tx_mcs_map & 0x000c) != 0x000c) {
- if ((tx_mcs_map & 0x000c) >> 2 ==
- IEEE80211_VHT_MCS_SUPPORT_0_7)
- hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS7];
- else if ((tx_mcs_map & 0x000c) >> 2 ==
- IEEE80211_VHT_MCS_SUPPORT_0_8)
- hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS8];
- else
- hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9];
- } else {
- if ((tx_mcs_map & 0x0003) ==
- IEEE80211_VHT_MCS_SUPPORT_0_7)
- hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS7];
- else if ((tx_mcs_map & 0x0003) ==
- IEEE80211_VHT_MCS_SUPPORT_0_8)
- hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS8];
- else
- hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9];
- }
-
- return hw_rate;
-}
-
-static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 hw_rate;
-
- if (get_rf_type(rtlphy) == RF_2T2R &&
- sta->ht_cap.mcs.rx_mask[1] != 0)
- hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
- else
- hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
-
- return hw_rate;
-}
-
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == NL80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == NL80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
- *
- * VHT rates:
- * DESC_RATEVHT1SS_MCS0-->DESC_RATEVHT1SS_MCS9 ==> idx is 0-->9
- * DESC_RATEVHT2SS_MCS0-->DESC_RATEVHT2SS_MCS9 ==> idx is 0-->9
- */
-int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
- u8 desc_rate)
-{
- int rate_idx;
-
- if (isvht) {
- switch (desc_rate) {
- case DESC_RATEVHT1SS_MCS0:
- rate_idx = 0;
- break;
- case DESC_RATEVHT1SS_MCS1:
- rate_idx = 1;
- break;
- case DESC_RATEVHT1SS_MCS2:
- rate_idx = 2;
- break;
- case DESC_RATEVHT1SS_MCS3:
- rate_idx = 3;
- break;
- case DESC_RATEVHT1SS_MCS4:
- rate_idx = 4;
- break;
- case DESC_RATEVHT1SS_MCS5:
- rate_idx = 5;
- break;
- case DESC_RATEVHT1SS_MCS6:
- rate_idx = 6;
- break;
- case DESC_RATEVHT1SS_MCS7:
- rate_idx = 7;
- break;
- case DESC_RATEVHT1SS_MCS8:
- rate_idx = 8;
- break;
- case DESC_RATEVHT1SS_MCS9:
- rate_idx = 9;
- break;
- case DESC_RATEVHT2SS_MCS0:
- rate_idx = 0;
- break;
- case DESC_RATEVHT2SS_MCS1:
- rate_idx = 1;
- break;
- case DESC_RATEVHT2SS_MCS2:
- rate_idx = 2;
- break;
- case DESC_RATEVHT2SS_MCS3:
- rate_idx = 3;
- break;
- case DESC_RATEVHT2SS_MCS4:
- rate_idx = 4;
- break;
- case DESC_RATEVHT2SS_MCS5:
- rate_idx = 5;
- break;
- case DESC_RATEVHT2SS_MCS6:
- rate_idx = 6;
- break;
- case DESC_RATEVHT2SS_MCS7:
- rate_idx = 7;
- break;
- case DESC_RATEVHT2SS_MCS8:
- rate_idx = 8;
- break;
- case DESC_RATEVHT2SS_MCS9:
- rate_idx = 9;
- break;
- default:
- rate_idx = 0;
- break;
- }
- return rate_idx;
- }
- if (!isht) {
- if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
- switch (desc_rate) {
- case DESC_RATE1M:
- rate_idx = 0;
- break;
- case DESC_RATE2M:
- rate_idx = 1;
- break;
- case DESC_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC_RATE11M:
- rate_idx = 3;
- break;
- case DESC_RATE6M:
- rate_idx = 4;
- break;
- case DESC_RATE9M:
- rate_idx = 5;
- break;
- case DESC_RATE12M:
- rate_idx = 6;
- break;
- case DESC_RATE18M:
- rate_idx = 7;
- break;
- case DESC_RATE24M:
- rate_idx = 8;
- break;
- case DESC_RATE36M:
- rate_idx = 9;
- break;
- case DESC_RATE48M:
- rate_idx = 10;
- break;
- case DESC_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC_RATE6M:
- rate_idx = 0;
- break;
- case DESC_RATE9M:
- rate_idx = 1;
- break;
- case DESC_RATE12M:
- rate_idx = 2;
- break;
- case DESC_RATE18M:
- rate_idx = 3;
- break;
- case DESC_RATE24M:
- rate_idx = 4;
- break;
- case DESC_RATE36M:
- rate_idx = 5;
- break;
- case DESC_RATE48M:
- rate_idx = 6;
- break;
- case DESC_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
-static u8 _rtl_get_tx_hw_rate(struct ieee80211_hw *hw,
- struct ieee80211_tx_info *info)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_tx_rate *r = &info->status.rates[0];
- struct ieee80211_rate *txrate;
- u8 hw_value = 0x0;
-
- if (r->flags & IEEE80211_TX_RC_MCS) {
- /* HT MCS0-15 */
- hw_value = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15] - 15 +
- r->idx;
- } else if (r->flags & IEEE80211_TX_RC_VHT_MCS) {
- /* VHT MCS0-9, NSS */
- if (ieee80211_rate_get_vht_nss(r) == 2)
- hw_value = rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9];
- else
- hw_value = rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9];
-
- hw_value = hw_value - 9 + ieee80211_rate_get_vht_mcs(r);
- } else {
- /* legacy */
- txrate = ieee80211_get_tx_rate(hw, info);
-
- if (txrate)
- hw_value = txrate->hw_value;
- }
-
- /* check 5G band */
- if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G &&
- hw_value < rtlpriv->cfg->maps[RTL_RC_OFDM_RATE6M])
- hw_value = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE6M];
-
- return hw_value;
-}
-
-void rtl_get_tcb_desc(struct ieee80211_hw *hw,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
- struct rtl_sta_info *sta_entry =
- (sta ? (struct rtl_sta_info *)sta->drv_priv : NULL);
-
- __le16 fc = rtl_get_fc(skb);
-
- tcb_desc->hw_rate = _rtl_get_tx_hw_rate(hw, info);
-
- if (rtl_is_tx_report_skb(hw, skb))
- tcb_desc->use_spe_rpt = 1;
-
- if (!ieee80211_is_data(fc)) {
- tcb_desc->use_driver_rate = true;
- tcb_desc->ratr_index = _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_MC);
- tcb_desc->disable_ratefallback = 1;
- tcb_desc->mac_id = 0;
- tcb_desc->packet_bw = false;
-
- return;
- }
-
- /*
- * We set data rate INX 0
- * in rtl_rc.c if skb is special data or
- * mgt which need low data rate.
- */
-
- /*
- * So tcb_desc->hw_rate is just used for
- * special data and mgt frames
- */
- if (info->control.rates[0].idx == 0 || ieee80211_is_nullfunc(fc)) {
- tcb_desc->use_driver_rate = true;
- tcb_desc->ratr_index = _rtl_rate_id(hw, sta_entry,
- RATR_INX_WIRELESS_MC);
-
- tcb_desc->disable_ratefallback = 1;
- } else if (sta && sta->vht_cap.vht_supported) {
- /*
- * Because hw will never use hw_rate
- * when tcb_desc->use_driver_rate = false
- * so we never set highest N rate here,
- * and N rate will all be controlled by FW
- * when tcb_desc->use_driver_rate = false
- */
- tcb_desc->hw_rate = _rtl_get_vht_highest_n_rate(hw, sta);
- } else if (sta && sta->ht_cap.ht_supported) {
- tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw, sta);
- } else {
- enum rtl_var_map var = RTL_RC_OFDM_RATE54M;
-
- if (rtlmac->mode == WIRELESS_MODE_B)
- var = RTL_RC_CCK_RATE11M;
-
- tcb_desc->hw_rate = rtlpriv->cfg->maps[var];
- }
-
- if (is_multicast_ether_addr(hdr->addr1))
- tcb_desc->multicast = 1;
- else if (is_broadcast_ether_addr(hdr->addr1))
- tcb_desc->broadcast = 1;
-
- _rtl_txrate_selectmode(hw, sta, tcb_desc);
- _rtl_query_bandwidth_mode(hw, sta, tcb_desc);
- _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info);
- _rtl_query_shortgi(hw, sta, tcb_desc, info);
- _rtl_query_protection_mode(hw, tcb_desc, info);
-}
-
-bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- __le16 fc = rtl_get_fc(skb);
-
- if (rtlpriv->dm.supp_phymode_switch &&
- mac->link_state < MAC80211_LINKED &&
- (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) {
- if (rtlpriv->cfg->ops->chk_switch_dmdp)
- rtlpriv->cfg->ops->chk_switch_dmdp(hw);
- }
- if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
-
- mac->link_state = MAC80211_LINKING;
- /* Dul mac */
- rtlpriv->phy.need_iqk = true;
- }
- return true;
-}
-
-struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, u8 *sa,
- u8 *bssid, u16 tid);
-
-static void process_agg_start(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u16 tid)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_rx_status rx_status = { 0 };
- struct sk_buff *skb_delba = NULL;
-
- skb_delba = rtl_make_del_ba(hw, hdr->addr2, hdr->addr3, tid);
- if (skb_delba) {
- rx_status.freq = hw->conf.chandef.chan->center_freq;
- rx_status.band = hw->conf.chandef.chan->band;
- rx_status.flag |= RX_FLAG_DECRYPTED;
- rx_status.flag |= RX_FLAG_MACTIME_START;
- rx_status.rate_idx = 0;
- rx_status.signal = 50 + 10;
- memcpy(IEEE80211_SKB_RXCB(skb_delba),
- &rx_status, sizeof(rx_status));
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG,
- "fake del\n",
- skb_delba->data,
- skb_delba->len);
- ieee80211_rx_irqsafe(hw, skb_delba);
- }
-}
-
-bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
-{
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- __le16 fc = rtl_get_fc(skb);
- u8 *act = (u8 *)(((u8 *)skb->data + MAC80211_3ADDR_LEN));
- u8 category;
-
- if (!ieee80211_is_action(fc))
- return true;
-
- category = *act;
- act++;
- switch (category) {
- case ACT_CAT_BA:
- switch (*act) {
- case ACT_ADDBAREQ:
- if (mac->act_scanning)
- return false;
-
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "%s ACT_ADDBAREQ From :%pM\n",
- is_tx ? "Tx" : "Rx", hdr->addr2);
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "req\n",
- skb->data, skb->len);
- if (!is_tx) {
- struct ieee80211_sta *sta = NULL;
- struct rtl_sta_info *sta_entry = NULL;
- struct rtl_tid_data *tid_data;
- struct ieee80211_mgmt *mgmt = (void *)skb->data;
- u16 capab = 0, tid = 0;
-
- rcu_read_lock();
- sta = rtl_find_sta(hw, hdr->addr3);
- if (!sta) {
- RT_TRACE(rtlpriv, COMP_SEND | COMP_RECV,
- DBG_DMESG, "sta is NULL\n");
- rcu_read_unlock();
- return true;
- }
-
- sta_entry =
- (struct rtl_sta_info *)sta->drv_priv;
- if (!sta_entry) {
- rcu_read_unlock();
- return true;
- }
- capab =
- le16_to_cpu(mgmt->u.action.u.addba_req.capab);
- tid = (capab &
- IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
- if (tid >= MAX_TID_COUNT) {
- rcu_read_unlock();
- return true;
- }
- tid_data = &sta_entry->tids[tid];
- if (tid_data->agg.rx_agg_state ==
- RTL_RX_AGG_START)
- process_agg_start(hw, hdr, tid);
- rcu_read_unlock();
- }
- break;
- case ACT_ADDBARSP:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "%s ACT_ADDBARSP From :%pM\n",
- is_tx ? "Tx" : "Rx", hdr->addr2);
- break;
- case ACT_DELBA:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "ACT_ADDBADEL From :%pM\n", hdr->addr2);
- break;
- }
- break;
- default:
- break;
- }
-
- return true;
-}
-
-static void setup_special_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc,
- int type)
-{
- struct ieee80211_hw *hw = rtlpriv->hw;
-
- rtlpriv->ra.is_special_data = true;
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_special_packet_notify(rtlpriv,
- type);
- rtl_lps_leave(hw);
- ppsc->last_delaylps_stamp_jiffies = jiffies;
-}
-
-static const u8 *rtl_skb_ether_type_ptr(struct ieee80211_hw *hw,
- struct sk_buff *skb, bool is_enc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
- u8 encrypt_header_len = 0;
- u8 offset;
-
- switch (rtlpriv->sec.pairwise_enc_algorithm) {
- case WEP40_ENCRYPTION:
- case WEP104_ENCRYPTION:
- encrypt_header_len = 4;/*WEP_IV_LEN*/
- break;
- case TKIP_ENCRYPTION:
- encrypt_header_len = 8;/*TKIP_IV_LEN*/
- break;
- case AESCCMP_ENCRYPTION:
- encrypt_header_len = 8;/*CCMP_HDR_LEN;*/
- break;
- default:
- break;
- }
-
- offset = mac_hdr_len + SNAP_SIZE;
- if (is_enc)
- offset += encrypt_header_len;
-
- return skb->data + offset;
-}
-
-/*should call before software enc*/
-u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
- bool is_enc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- __le16 fc = rtl_get_fc(skb);
- u16 ether_type;
- const u8 *ether_type_ptr;
- const struct iphdr *ip;
-
- if (!ieee80211_is_data(fc))
- goto end;
-
- ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, is_enc);
- ether_type = be16_to_cpup((__be16 *)ether_type_ptr);
-
- if (ether_type == ETH_P_IP) {
- ip = (struct iphdr *)((u8 *)ether_type_ptr +
- PROTOC_TYPE_SIZE);
- if (ip->protocol == IPPROTO_UDP) {
- struct udphdr *udp = (struct udphdr *)((u8 *)ip +
- (ip->ihl << 2));
- if (((((u8 *)udp)[1] == 68) &&
- (((u8 *)udp)[3] == 67)) ||
- ((((u8 *)udp)[1] == 67) &&
- (((u8 *)udp)[3] == 68))) {
- /* 68 : UDP BOOTP client
- * 67 : UDP BOOTP server
- */
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
- DBG_DMESG, "dhcp %s !!\n",
- (is_tx) ? "Tx" : "Rx");
-
- if (is_tx)
- setup_special_tx(rtlpriv, ppsc,
- PACKET_DHCP);
-
- return true;
- }
- }
- } else if (ether_type == ETH_P_ARP) {
- if (is_tx)
- setup_special_tx(rtlpriv, ppsc, PACKET_ARP);
-
- return true;
- } else if (ether_type == ETH_P_PAE) {
- /* EAPOL is seen as in-4way */
- rtlpriv->btcoexist.btc_info.in_4way = true;
- rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies;
- rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies;
-
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx");
-
- if (is_tx) {
- rtlpriv->ra.is_special_data = true;
- rtl_lps_leave(hw);
- ppsc->last_delaylps_stamp_jiffies = jiffies;
-
- setup_special_tx(rtlpriv, ppsc, PACKET_EAPOL);
- }
-
- return true;
- } else if (ether_type == ETH_P_IPV6) {
- /* TODO: Handle any IPv6 cases that need special handling.
- * For now, always return false
- */
- goto end;
- }
-
-end:
- rtlpriv->ra.is_special_data = false;
- return false;
-}
-
-bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- u16 ether_type;
- const u8 *ether_type_ptr;
-
- ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, true);
- ether_type = be16_to_cpup((__be16 *)ether_type_ptr);
-
- /* EAPOL */
- if (ether_type == ETH_P_PAE)
- return true;
-
- return false;
-}
-
-static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_tx_report *tx_report = &rtlpriv->tx_report;
- u16 sn;
-
- /*
- * SW_DEFINE[11:8] are reserved (driver fills zeros)
- * SW_DEFINE[7:2] are used by driver
- * SW_DEFINE[1:0] are reserved for firmware (driver fills zeros)
- */
- sn = (atomic_inc_return(&tx_report->sn) & 0x003F) << 2;
-
- tx_report->last_sent_sn = sn;
- tx_report->last_sent_time = jiffies;
-
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Send TX-Report sn=0x%X\n", sn);
-
- return sn;
-}
-
-void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc,
- struct ieee80211_hw *hw)
-{
- if (ptcb_desc->use_spe_rpt) {
- u16 sn = rtl_get_tx_report_sn(hw);
-
- SET_TX_DESC_SPE_RPT(pdesc, 1);
- SET_TX_DESC_SW_DEFINE(pdesc, sn);
- }
-}
-
-void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_tx_report *tx_report = &rtlpriv->tx_report;
- u16 sn;
- u8 st, retry;
-
- if (rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_FW_C2H) {
- sn = tmp_buf[6];
- st = tmp_buf[7] & 0xC0;
- retry = tmp_buf[8] & 0x3F;
- } else {
- sn = ((tmp_buf[7] & 0x0F) << 8) | tmp_buf[6];
- st = tmp_buf[0] & 0xC0;
- retry = tmp_buf[2] & 0x3F;
- }
-
- tx_report->last_recv_sn = sn;
-
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
- st, sn, retry);
-}
-
-bool rtl_check_tx_report_acked(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_tx_report *tx_report = &rtlpriv->tx_report;
-
- if (tx_report->last_sent_sn == tx_report->last_recv_sn)
- return true;
-
- if (time_before(tx_report->last_sent_time + 3 * HZ, jiffies)) {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
- "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n",
- tx_report->last_sent_sn, tx_report->last_recv_sn);
- return true; /* 3 sec. (timeout) seen as acked */
- }
-
- return false;
-}
-
-void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int i;
-
- for (i = 0; i < wait_ms; i++) {
- if (rtl_check_tx_report_acked(hw))
- break;
- usleep_range(1000, 2000);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
- }
-}
-
-u32 rtl_get_hal_edca_param(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum wireless_mode wirelessmode,
- struct ieee80211_tx_queue_params *param)
-{
- u32 reg = 0;
- u8 sifstime = 10;
- u8 slottime = 20;
-
- /* AIFS = AIFSN * slot time + SIFS */
- switch (wirelessmode) {
- case WIRELESS_MODE_A:
- case WIRELESS_MODE_N_24G:
- case WIRELESS_MODE_N_5G:
- case WIRELESS_MODE_AC_5G:
- case WIRELESS_MODE_AC_24G:
- sifstime = 16;
- slottime = 9;
- break;
- case WIRELESS_MODE_G:
- slottime = (vif->bss_conf.use_short_slot ? 9 : 20);
- break;
- default:
- break;
- }
-
- reg |= (param->txop & 0x7FF) << 16;
- reg |= (fls(param->cw_max) & 0xF) << 12;
- reg |= (fls(param->cw_min) & 0xF) << 8;
- reg |= (param->aifs & 0x0F) * slottime + sifstime;
-
- return reg;
-}
-
-/*********************************************************
- *
- * functions called by core.c
- *
- *********************************************************/
-int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_tid_data *tid_data;
- struct rtl_sta_info *sta_entry = NULL;
-
- if (!sta)
- return -EINVAL;
-
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- if (!sta_entry)
- return -ENXIO;
- tid_data = &sta_entry->tids[tid];
-
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
- tid_data->seq_number);
-
- *ssn = tid_data->seq_number;
- tid_data->agg.agg_state = RTL_AGG_START;
-
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- return 0;
-}
-
-int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_tid_data *tid_data;
- struct rtl_sta_info *sta_entry = NULL;
-
- if (!sta)
- return -EINVAL;
-
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
-
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- tid_data = &sta_entry->tids[tid];
- sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP;
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- return 0;
-}
-
-int rtl_rx_agg_start(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_tid_data *tid_data;
- struct rtl_sta_info *sta_entry = NULL;
- u8 reject_agg;
-
- if (!sta)
- return -EINVAL;
-
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (rtlpriv->cfg->ops->get_btc_status()) {
- rtlpriv->btcoexist.btc_ops->btc_get_ampdu_cfg(rtlpriv,
- &reject_agg,
- NULL, NULL);
- if (reject_agg)
- return -EINVAL;
- }
-
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- if (!sta_entry)
- return -ENXIO;
- tid_data = &sta_entry->tids[tid];
-
- RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
- tid_data->seq_number);
-
- tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
- return 0;
-}
-
-int rtl_rx_agg_stop(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_sta_info *sta_entry = NULL;
-
- if (!sta)
- return -EINVAL;
-
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
-
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- sta_entry->tids[tid].agg.rx_agg_state = RTL_RX_AGG_STOP;
-
- return 0;
-}
-
-int rtl_tx_agg_oper(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_sta_info *sta_entry = NULL;
-
- if (!sta)
- return -EINVAL;
-
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
-
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- sta_entry->tids[tid].agg.agg_state = RTL_AGG_OPERATIONAL;
-
- return 0;
-}
-
-void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
-{
- struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
- u8 reject_agg = 0, ctrl_agg_size = 0, agg_size = 0;
-
- if (rtlpriv->cfg->ops->get_btc_status())
- btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
- &ctrl_agg_size, &agg_size);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
- reject_agg, ctrl_agg_size, agg_size);
-
- rtlpriv->hw->max_rx_aggregation_subframes =
- (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
-}
-
-/*********************************************************
- *
- * wq & timer callback functions
- *
- *********************************************************/
-/* this function is used for roaming */
-void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-
- if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
- return;
-
- if (rtlpriv->mac80211.link_state < MAC80211_LINKED)
- return;
-
- /* check if this really is a beacon */
- if (!ieee80211_is_beacon(hdr->frame_control) &&
- !ieee80211_is_probe_resp(hdr->frame_control))
- return;
-
- /* min. beacon length + FCS_LEN */
- if (skb->len <= 40 + FCS_LEN)
- return;
-
- /* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
- return;
-
- rtlpriv->link_info.bcn_rx_inperiod++;
-}
-
-static void rtl_free_entries_from_scan_list(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_bssid_entry *entry, *next;
-
- list_for_each_entry_safe(entry, next, &rtlpriv->scan_list.list, list) {
- list_del(&entry->list);
- kfree(entry);
- rtlpriv->scan_list.num--;
- }
-}
-
-void rtl_scan_list_expire(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_bssid_entry *entry, *next;
- unsigned long flags;
-
- spin_lock_irqsave(&rtlpriv->locks.scan_list_lock, flags);
-
- list_for_each_entry_safe(entry, next, &rtlpriv->scan_list.list, list) {
- /* 180 seconds */
- if (jiffies_to_msecs(jiffies - entry->age) < 180000)
- continue;
-
- list_del(&entry->list);
- rtlpriv->scan_list.num--;
-
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "BSSID=%pM is expire in scan list (total=%d)\n",
- entry->bssid, rtlpriv->scan_list.num);
- kfree(entry);
- }
-
- spin_unlock_irqrestore(&rtlpriv->locks.scan_list_lock, flags);
-
- rtlpriv->btcoexist.btc_info.ap_num = rtlpriv->scan_list.num;
-}
-
-void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- unsigned long flags;
-
- struct rtl_bssid_entry *entry;
- bool entry_found = false;
-
- /* check if it is scanning */
- if (!mac->act_scanning)
- return;
-
- /* check if this really is a beacon */
- if (!ieee80211_is_beacon(hdr->frame_control) &&
- !ieee80211_is_probe_resp(hdr->frame_control))
- return;
-
- spin_lock_irqsave(&rtlpriv->locks.scan_list_lock, flags);
-
- list_for_each_entry(entry, &rtlpriv->scan_list.list, list) {
- if (memcmp(entry->bssid, hdr->addr3, ETH_ALEN) == 0) {
- list_del_init(&entry->list);
- entry_found = true;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Update BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
- break;
- }
- }
-
- if (!entry_found) {
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-
- if (!entry)
- goto label_err;
-
- memcpy(entry->bssid, hdr->addr3, ETH_ALEN);
- rtlpriv->scan_list.num++;
-
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Add BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
- }
-
- entry->age = jiffies;
-
- list_add_tail(&entry->list, &rtlpriv->scan_list.list);
-
-label_err:
- spin_unlock_irqrestore(&rtlpriv->locks.scan_list_lock, flags);
-}
-
-void rtl_watchdog_wq_callback(void *data)
-{
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
- struct rtl_works,
- watchdog_wq);
- struct ieee80211_hw *hw = rtlworks->hw;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- bool busytraffic = false;
- bool tx_busy_traffic = false;
- bool rx_busy_traffic = false;
- bool higher_busytraffic = false;
- bool higher_busyrxtraffic = false;
- u8 idx, tid;
- u32 rx_cnt_inp4eriod = 0;
- u32 tx_cnt_inp4eriod = 0;
- u32 aver_rx_cnt_inperiod = 0;
- u32 aver_tx_cnt_inperiod = 0;
- u32 aver_tidtx_inperiod[MAX_TID_COUNT] = {0};
- u32 tidtx_inp4eriod[MAX_TID_COUNT] = {0};
-
- if (is_hal_stop(rtlhal))
- return;
-
- /* <1> Determine if action frame is allowed */
- if (mac->link_state > MAC80211_NOLINK) {
- if (mac->cnt_after_linked < 20)
- mac->cnt_after_linked++;
- } else {
- mac->cnt_after_linked = 0;
- }
-
- /* <2> to check if traffic busy, if
- * busytraffic we don't change channel
- */
- if (mac->link_state >= MAC80211_LINKED) {
- /* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */
- for (idx = 0; idx <= 2; idx++) {
- rtlpriv->link_info.num_rx_in4period[idx] =
- rtlpriv->link_info.num_rx_in4period[idx + 1];
- rtlpriv->link_info.num_tx_in4period[idx] =
- rtlpriv->link_info.num_tx_in4period[idx + 1];
- }
- rtlpriv->link_info.num_rx_in4period[3] =
- rtlpriv->link_info.num_rx_inperiod;
- rtlpriv->link_info.num_tx_in4period[3] =
- rtlpriv->link_info.num_tx_inperiod;
- for (idx = 0; idx <= 3; idx++) {
- rx_cnt_inp4eriod +=
- rtlpriv->link_info.num_rx_in4period[idx];
- tx_cnt_inp4eriod +=
- rtlpriv->link_info.num_tx_in4period[idx];
- }
- aver_rx_cnt_inperiod = rx_cnt_inp4eriod / 4;
- aver_tx_cnt_inperiod = tx_cnt_inp4eriod / 4;
-
- /* (2) check traffic busy */
- if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) {
- busytraffic = true;
- if (aver_rx_cnt_inperiod > aver_tx_cnt_inperiod)
- rx_busy_traffic = true;
- else
- tx_busy_traffic = false;
- }
-
- /* Higher Tx/Rx data. */
- if (aver_rx_cnt_inperiod > 4000 ||
- aver_tx_cnt_inperiod > 4000) {
- higher_busytraffic = true;
-
- /* Extremely high Rx data. */
- if (aver_rx_cnt_inperiod > 5000)
- higher_busyrxtraffic = true;
- }
-
- /* check every tid's tx traffic */
- for (tid = 0; tid <= 7; tid++) {
- for (idx = 0; idx <= 2; idx++)
- rtlpriv->link_info.tidtx_in4period[tid][idx] =
- rtlpriv->link_info.tidtx_in4period[tid]
- [idx + 1];
- rtlpriv->link_info.tidtx_in4period[tid][3] =
- rtlpriv->link_info.tidtx_inperiod[tid];
-
- for (idx = 0; idx <= 3; idx++)
- tidtx_inp4eriod[tid] +=
- rtlpriv->link_info.tidtx_in4period[tid][idx];
- aver_tidtx_inperiod[tid] = tidtx_inp4eriod[tid] / 4;
- if (aver_tidtx_inperiod[tid] > 5000)
- rtlpriv->link_info.higher_busytxtraffic[tid] =
- true;
- else
- rtlpriv->link_info.higher_busytxtraffic[tid] =
- false;
- }
-
- /* PS is controlled by coex. */
- if (rtlpriv->cfg->ops->get_btc_status() &&
- rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv))
- goto label_lps_done;
-
- if (rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod > 8 ||
- rtlpriv->link_info.num_rx_inperiod > 2)
- rtl_lps_leave(hw);
- else
- rtl_lps_enter(hw);
-
-label_lps_done:
- ;
- }
-
- rtlpriv->link_info.num_rx_inperiod = 0;
- rtlpriv->link_info.num_tx_inperiod = 0;
- for (tid = 0; tid <= 7; tid++)
- rtlpriv->link_info.tidtx_inperiod[tid] = 0;
-
- rtlpriv->link_info.busytraffic = busytraffic;
- rtlpriv->link_info.higher_busytraffic = higher_busytraffic;
- rtlpriv->link_info.rx_busy_traffic = rx_busy_traffic;
- rtlpriv->link_info.tx_busy_traffic = tx_busy_traffic;
- rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
-
- rtlpriv->stats.txbytesunicast_inperiod =
- rtlpriv->stats.txbytesunicast -
- rtlpriv->stats.txbytesunicast_last;
- rtlpriv->stats.rxbytesunicast_inperiod =
- rtlpriv->stats.rxbytesunicast -
- rtlpriv->stats.rxbytesunicast_last;
- rtlpriv->stats.txbytesunicast_last = rtlpriv->stats.txbytesunicast;
- rtlpriv->stats.rxbytesunicast_last = rtlpriv->stats.rxbytesunicast;
-
- rtlpriv->stats.txbytesunicast_inperiod_tp =
- (u32)(rtlpriv->stats.txbytesunicast_inperiod * 8 / 2 /
- 1024 / 1024);
- rtlpriv->stats.rxbytesunicast_inperiod_tp =
- (u32)(rtlpriv->stats.rxbytesunicast_inperiod * 8 / 2 /
- 1024 / 1024);
-
- /* <3> DM */
- if (!rtlpriv->cfg->mod_params->disable_watchdog)
- rtlpriv->cfg->ops->dm_watchdog(hw);
-
- /* <4> roaming */
- if (mac->link_state == MAC80211_LINKED &&
- mac->opmode == NL80211_IFTYPE_STATION) {
- if ((rtlpriv->link_info.bcn_rx_inperiod +
- rtlpriv->link_info.num_rx_inperiod) == 0) {
- rtlpriv->link_info.roam_times++;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "AP off for %d s\n",
- (rtlpriv->link_info.roam_times * 2));
-
- /* if we can't recv beacon for 10s,
- * we should reconnect this AP
- */
- if (rtlpriv->link_info.roam_times >= 5) {
- pr_err("AP off, try to reconnect now\n");
- rtlpriv->link_info.roam_times = 0;
- ieee80211_connection_loss(rtlpriv->mac80211.vif);
- }
- } else {
- rtlpriv->link_info.roam_times = 0;
- }
- }
-
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv);
-
- if (rtlpriv->btcoexist.btc_info.in_4way) {
- if (time_after(jiffies, rtlpriv->btcoexist.btc_info.in_4way_ts +
- msecs_to_jiffies(IN_4WAY_TIMEOUT_TIME)))
- rtlpriv->btcoexist.btc_info.in_4way = false;
- }
-
- rtlpriv->link_info.bcn_rx_inperiod = 0;
-
- /* <6> scan list */
- rtl_scan_list_expire(hw);
-}
-
-void rtl_watch_dog_timer_callback(struct timer_list *t)
-{
- struct rtl_priv *rtlpriv = from_timer(rtlpriv, t, works.watchdog_timer);
-
- queue_delayed_work(rtlpriv->works.rtl_wq,
- &rtlpriv->works.watchdog_wq, 0);
-
- mod_timer(&rtlpriv->works.watchdog_timer,
- jiffies + MSECS(RTL_WATCH_DOG_TIME));
-}
-
-void rtl_fwevt_wq_callback(void *data)
-{
- struct rtl_works *rtlworks =
- container_of_dwork_rtl(data, struct rtl_works, fwevt_wq);
- struct ieee80211_hw *hw = rtlworks->hw;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->cfg->ops->c2h_command_handle(hw);
-}
-
-void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- unsigned long flags;
- struct rtl_c2hcmd *c2hcmd;
-
- c2hcmd = kmalloc(sizeof(*c2hcmd),
- in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
-
- if (!c2hcmd)
- goto label_err;
-
- c2hcmd->val = kmalloc(len,
- in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
-
- if (!c2hcmd->val)
- goto label_err2;
-
- /* fill data */
- c2hcmd->tag = tag;
- c2hcmd->len = len;
- memcpy(c2hcmd->val, val, len);
-
- /* enqueue */
- spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
-
- list_add_tail(&c2hcmd->list, &rtlpriv->c2hcmd_list);
-
- spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
-
- /* wake up wq */
- queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0);
-
- return;
-
-label_err2:
- kfree(c2hcmd);
-
-label_err:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_WARNING,
- "C2H cmd enqueue fail.\n");
-}
-
-void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- unsigned long flags;
- struct rtl_c2hcmd *c2hcmd;
- int i;
-
- for (i = 0; i < 200; i++) {
- /* dequeue a task */
- spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
-
- c2hcmd = list_first_entry_or_null(&rtlpriv->c2hcmd_list,
- struct rtl_c2hcmd, list);
-
- if (c2hcmd)
- list_del(&c2hcmd->list);
-
- spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
-
- /* do it */
- if (!c2hcmd)
- break;
-
- if (rtlpriv->cfg->ops->c2h_content_parsing && exec)
- rtlpriv->cfg->ops->c2h_content_parsing(hw,
- c2hcmd->tag, c2hcmd->len, c2hcmd->val);
-
- /* free */
- kfree(c2hcmd->val);
-
- kfree(c2hcmd);
- }
-}
-
-void rtl_c2hcmd_wq_callback(void *data)
-{
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
- struct rtl_works,
- c2hcmd_wq);
- struct ieee80211_hw *hw = rtlworks->hw;
-
- rtl_c2hcmd_launcher(hw, 1);
-}
-
-void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t)
-{
- struct rtl_priv *rtlpriv =
- from_timer(rtlpriv, t, works.dualmac_easyconcurrent_retrytimer);
- struct ieee80211_hw *hw = rtlpriv->hw;
- struct rtl_priv *buddy_priv = rtlpriv->buddy_priv;
-
- if (!buddy_priv)
- return;
-
- rtlpriv->cfg->ops->dualmac_easy_concurrent(hw);
-}
-
-/*********************************************************
- *
- * frame process functions
- *
- *********************************************************/
-u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie)
-{
- struct ieee80211_mgmt *mgmt = (void *)data;
- u8 *pos, *end;
-
- pos = (u8 *)mgmt->u.beacon.variable;
- end = data + len;
- while (pos < end) {
- if (pos + 2 + pos[1] > end)
- return NULL;
-
- if (pos[0] == ie)
- return pos;
-
- pos += 2 + pos[1];
- }
- return NULL;
-}
-
-/* when we use 2 rx ants we send IEEE80211_SMPS_OFF */
-/* when we use 1 rx ant we send IEEE80211_SMPS_STATIC */
-static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
- enum ieee80211_smps_mode smps,
- u8 *da, u8 *bssid)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct sk_buff *skb;
- struct ieee80211_mgmt *action_frame;
-
- /* 27 = header + category + action + smps mode */
- skb = dev_alloc_skb(27 + hw->extra_tx_headroom);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, hw->extra_tx_headroom);
- action_frame = skb_put_zero(skb, 27);
- memcpy(action_frame->da, da, ETH_ALEN);
- memcpy(action_frame->sa, rtlefuse->dev_addr, ETH_ALEN);
- memcpy(action_frame->bssid, bssid, ETH_ALEN);
- action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_ACTION);
- action_frame->u.action.category = WLAN_CATEGORY_HT;
- action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
- switch (smps) {
- case IEEE80211_SMPS_AUTOMATIC:/* 0 */
- case IEEE80211_SMPS_NUM_MODES:/* 4 */
- WARN_ON(1);
- /* fall through */
- case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
- action_frame->u.action.u.ht_smps.smps_control =
- WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
- break;
- case IEEE80211_SMPS_STATIC:/* 2 */ /*MIMO_PS_STATIC*/
- action_frame->u.action.u.ht_smps.smps_control =
- WLAN_HT_SMPS_CONTROL_STATIC;/* 1 */
- break;
- case IEEE80211_SMPS_DYNAMIC:/* 3 */ /*MIMO_PS_DYNAMIC*/
- action_frame->u.action.u.ht_smps.smps_control =
- WLAN_HT_SMPS_CONTROL_DYNAMIC;/* 3 */
- break;
- }
-
- return skb;
-}
-
-int rtl_send_smps_action(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- enum ieee80211_smps_mode smps)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct sk_buff *skb = NULL;
- struct rtl_tcb_desc tcb_desc;
- u8 bssid[ETH_ALEN] = {0};
-
- memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
-
- if (rtlpriv->mac80211.act_scanning)
- goto err_free;
-
- if (!sta)
- goto err_free;
-
- if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
- goto err_free;
-
- if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
- goto err_free;
-
- if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP)
- memcpy(bssid, rtlpriv->efuse.dev_addr, ETH_ALEN);
- else
- memcpy(bssid, rtlpriv->mac80211.bssid, ETH_ALEN);
-
- skb = rtl_make_smps_action(hw, smps, sta->addr, bssid);
- /* this is a type = mgmt * stype = action frame */
- if (skb) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct rtl_sta_info *sta_entry =
- (struct rtl_sta_info *)sta->drv_priv;
- sta_entry->mimo_ps = smps;
- /* rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0, true); */
-
- info->control.rates[0].idx = 0;
- info->band = hw->conf.chandef.chan->band;
- rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
- }
- return 1;
-
-err_free:
- return 0;
-}
-
-void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- pr_err("Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
-/* because mac80211 have issues when can receive del ba
- * so here we just make a fake del_ba if we receive a ba_req
- * but rx_agg was opened to let mac80211 release some ba
- * related resources, so please this del_ba for tx
- */
-struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
- u8 *sa, u8 *bssid, u16 tid)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct sk_buff *skb;
- struct ieee80211_mgmt *action_frame;
- u16 params;
-
- /* 27 = header + category + action + smps mode */
- skb = dev_alloc_skb(34 + hw->extra_tx_headroom);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, hw->extra_tx_headroom);
- action_frame = skb_put_zero(skb, 34);
- memcpy(action_frame->sa, sa, ETH_ALEN);
- memcpy(action_frame->da, rtlefuse->dev_addr, ETH_ALEN);
- memcpy(action_frame->bssid, bssid, ETH_ALEN);
- action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_ACTION);
- action_frame->u.action.category = WLAN_CATEGORY_BACK;
- action_frame->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
- params = (u16)(1 << 11); /* bit 11 initiator */
- params |= (u16)(tid << 12); /* bit 15:12 TID number */
-
- action_frame->u.action.u.delba.params = cpu_to_le16(params);
- action_frame->u.action.u.delba.reason_code =
- cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
-
- return skb;
-}
-
-bool rtl_check_beacon_key(struct ieee80211_hw *hw, void *data, unsigned int len)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct ieee80211_hdr *hdr = data;
- struct ieee80211_ht_cap *ht_cap_ie;
- struct ieee80211_ht_operation *ht_oper_ie = NULL;
- struct rtl_beacon_keys bcn_key = {};
- struct rtl_beacon_keys *cur_bcn_key;
- u8 *ht_cap;
- u8 ht_cap_len;
- u8 *ht_oper;
- u8 ht_oper_len;
- u8 *ds_param;
- u8 ds_param_len;
-
- if (mac->opmode != NL80211_IFTYPE_STATION)
- return false;
-
- /* check if this really is a beacon*/
- if (!ieee80211_is_beacon(hdr->frame_control))
- return false;
-
- /* min. beacon length + FCS_LEN */
- if (len <= 40 + FCS_LEN)
- return false;
-
- cur_bcn_key = &mac->cur_beacon_keys;
-
- if (rtlpriv->mac80211.link_state == MAC80211_NOLINK) {
- if (cur_bcn_key->valid) {
- cur_bcn_key->valid = false;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
- "Reset cur_beacon_keys.valid to false!\n");
- }
- return false;
- }
-
- /* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
- return false;
-
- /***** Parsing DS Param IE ******/
- ds_param = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_DS_PARAMS);
-
- if (ds_param && !(ds_param[1] < sizeof(*ds_param))) {
- ds_param_len = ds_param[1];
- bcn_key.bcn_channel = ds_param[2];
- } else {
- ds_param = NULL;
- }
-
- /***** Parsing HT Cap. IE ******/
- ht_cap = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_HT_CAPABILITY);
-
- if (ht_cap && !(ht_cap[1] < sizeof(*ht_cap))) {
- ht_cap_len = ht_cap[1];
- ht_cap_ie = (struct ieee80211_ht_cap *)&ht_cap[2];
- bcn_key.ht_cap_info = ht_cap_ie->cap_info;
- } else {
- ht_cap = NULL;
- }
-
- /***** Parsing HT Info. IE ******/
- ht_oper = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_HT_OPERATION);
-
- if (ht_oper && !(ht_oper[1] < sizeof(*ht_oper))) {
- ht_oper_len = ht_oper[1];
- ht_oper_ie = (struct ieee80211_ht_operation *)&ht_oper[2];
- } else {
- ht_oper = NULL;
- }
-
- /* update bcn_key */
-
- if (!ds_param && ht_oper && ht_oper_ie)
- bcn_key.bcn_channel = ht_oper_ie->primary_chan;
-
- if (ht_oper && ht_oper_ie)
- bcn_key.ht_info_infos_0_sco = ht_oper_ie->ht_param & 0x03;
-
- bcn_key.valid = true;
-
- /* update cur_beacon_keys or compare beacon key */
- if (rtlpriv->mac80211.link_state != MAC80211_LINKED &&
- rtlpriv->mac80211.link_state != MAC80211_LINKED_SCANNING)
- return true;
-
- if (!cur_bcn_key->valid) {
- /* update cur_beacon_keys */
- memcpy(cur_bcn_key, &bcn_key, sizeof(bcn_key));
- cur_bcn_key->valid = true;
-
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
- "Beacon key update!ch=%d, ht_cap_info=0x%x, sco=0x%x\n",
- cur_bcn_key->bcn_channel,
- cur_bcn_key->ht_cap_info,
- cur_bcn_key->ht_info_infos_0_sco);
- return true;
- }
-
- /* compare beacon key */
- if (!memcmp(cur_bcn_key, &bcn_key, sizeof(bcn_key))) {
- /* same beacon key */
- mac->new_beacon_cnt = 0;
- goto chk_exit;
- }
-
- if (cur_bcn_key->bcn_channel == bcn_key.bcn_channel &&
- cur_bcn_key->ht_cap_info == bcn_key.ht_cap_info) {
- /* Beacon HT info IE, secondary channel offset check */
- /* 40M -> 20M */
- if (cur_bcn_key->ht_info_infos_0_sco >
- bcn_key.ht_info_infos_0_sco) {
- /* Not a new beacon */
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "Beacon BW change! sco:0x%x -> 0x%x\n",
- cur_bcn_key->ht_info_infos_0_sco,
- bcn_key.ht_info_infos_0_sco);
-
- cur_bcn_key->ht_info_infos_0_sco =
- bcn_key.ht_info_infos_0_sco;
- } else {
- /* 20M -> 40M */
- if (rtlphy->max_ht_chan_bw >= HT_CHANNEL_WIDTH_20_40) {
- /* Not a new beacon */
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "Beacon BW change! sco:0x%x -> 0x%x\n",
- cur_bcn_key->ht_info_infos_0_sco,
- bcn_key.ht_info_infos_0_sco);
-
- cur_bcn_key->ht_info_infos_0_sco =
- bcn_key.ht_info_infos_0_sco;
- } else {
- mac->new_beacon_cnt++;
- }
- }
- } else {
- mac->new_beacon_cnt++;
- }
-
- if (mac->new_beacon_cnt == 1) {
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "Get new beacon.\n");
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "Cur : ch=%d, ht_cap=0x%x, sco=0x%x\n",
- cur_bcn_key->bcn_channel,
- cur_bcn_key->ht_cap_info,
- cur_bcn_key->ht_info_infos_0_sco);
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "New RX : ch=%d, ht_cap=0x%x, sco=0x%x\n",
- bcn_key.bcn_channel,
- bcn_key.ht_cap_info,
- bcn_key.ht_info_infos_0_sco);
-
- } else if (mac->new_beacon_cnt > 1) {
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "new beacon cnt: %d\n",
- mac->new_beacon_cnt);
- }
-
- if (mac->new_beacon_cnt > 3) {
- ieee80211_connection_loss(rtlpriv->mac80211.vif);
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "new beacon cnt >3, disconnect !\n");
- }
-
-chk_exit:
-
- return true;
-}
-
-/*********************************************************
- *
- * IOT functions
- *
- *********************************************************/
-static bool rtl_chk_vendor_ouisub(struct ieee80211_hw *hw,
- struct octet_string vendor_ie)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- bool matched = false;
- static u8 athcap_1[] = { 0x00, 0x03, 0x7F };
- static u8 athcap_2[] = { 0x00, 0x13, 0x74 };
- static u8 broadcap_1[] = { 0x00, 0x10, 0x18 };
- static u8 broadcap_2[] = { 0x00, 0x0a, 0xf7 };
- static u8 broadcap_3[] = { 0x00, 0x05, 0xb5 };
- static u8 racap[] = { 0x00, 0x0c, 0x43 };
- static u8 ciscocap[] = { 0x00, 0x40, 0x96 };
- static u8 marvcap[] = { 0x00, 0x50, 0x43 };
-
- if (memcmp(vendor_ie.octet, athcap_1, 3) == 0 ||
- memcmp(vendor_ie.octet, athcap_2, 3) == 0) {
- rtlpriv->mac80211.vendor = PEER_ATH;
- matched = true;
- } else if (memcmp(vendor_ie.octet, broadcap_1, 3) == 0 ||
- memcmp(vendor_ie.octet, broadcap_2, 3) == 0 ||
- memcmp(vendor_ie.octet, broadcap_3, 3) == 0) {
- rtlpriv->mac80211.vendor = PEER_BROAD;
- matched = true;
- } else if (memcmp(vendor_ie.octet, racap, 3) == 0) {
- rtlpriv->mac80211.vendor = PEER_RAL;
- matched = true;
- } else if (memcmp(vendor_ie.octet, ciscocap, 3) == 0) {
- rtlpriv->mac80211.vendor = PEER_CISCO;
- matched = true;
- } else if (memcmp(vendor_ie.octet, marvcap, 3) == 0) {
- rtlpriv->mac80211.vendor = PEER_MARV;
- matched = true;
- }
-
- return matched;
-}
-
-static bool rtl_find_221_ie(struct ieee80211_hw *hw, u8 *data,
- unsigned int len)
-{
- struct ieee80211_mgmt *mgmt = (void *)data;
- struct octet_string vendor_ie;
- u8 *pos, *end;
-
- pos = (u8 *)mgmt->u.beacon.variable;
- end = data + len;
- while (pos < end) {
- if (pos[0] == 221) {
- vendor_ie.length = pos[1];
- vendor_ie.octet = &pos[2];
- if (rtl_chk_vendor_ouisub(hw, vendor_ie))
- return true;
- }
-
- if (pos + 2 + pos[1] > end)
- return false;
-
- pos += 2 + pos[1];
- }
- return false;
-}
-
-void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = (void *)data;
- u32 vendor = PEER_UNKNOWN;
-
- static u8 ap3_1[3] = { 0x00, 0x14, 0xbf };
- static u8 ap3_2[3] = { 0x00, 0x1a, 0x70 };
- static u8 ap3_3[3] = { 0x00, 0x1d, 0x7e };
- static u8 ap4_1[3] = { 0x00, 0x90, 0xcc };
- static u8 ap4_2[3] = { 0x00, 0x0e, 0x2e };
- static u8 ap4_3[3] = { 0x00, 0x18, 0x02 };
- static u8 ap4_4[3] = { 0x00, 0x17, 0x3f };
- static u8 ap4_5[3] = { 0x00, 0x1c, 0xdf };
- static u8 ap5_1[3] = { 0x00, 0x1c, 0xf0 };
- static u8 ap5_2[3] = { 0x00, 0x21, 0x91 };
- static u8 ap5_3[3] = { 0x00, 0x24, 0x01 };
- static u8 ap5_4[3] = { 0x00, 0x15, 0xe9 };
- static u8 ap5_5[3] = { 0x00, 0x17, 0x9A };
- static u8 ap5_6[3] = { 0x00, 0x18, 0xE7 };
- static u8 ap6_1[3] = { 0x00, 0x17, 0x94 };
- static u8 ap7_1[3] = { 0x00, 0x14, 0xa4 };
-
- if (mac->opmode != NL80211_IFTYPE_STATION)
- return;
-
- if (mac->link_state == MAC80211_NOLINK) {
- mac->vendor = PEER_UNKNOWN;
- return;
- }
-
- if (mac->cnt_after_linked > 2)
- return;
-
- /* check if this really is a beacon */
- if (!ieee80211_is_beacon(hdr->frame_control))
- return;
-
- /* min. beacon length + FCS_LEN */
- if (len <= 40 + FCS_LEN)
- return;
-
- /* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
- return;
-
- if (rtl_find_221_ie(hw, data, len))
- vendor = mac->vendor;
-
- if ((memcmp(mac->bssid, ap5_1, 3) == 0) ||
- (memcmp(mac->bssid, ap5_2, 3) == 0) ||
- (memcmp(mac->bssid, ap5_3, 3) == 0) ||
- (memcmp(mac->bssid, ap5_4, 3) == 0) ||
- (memcmp(mac->bssid, ap5_5, 3) == 0) ||
- (memcmp(mac->bssid, ap5_6, 3) == 0) ||
- vendor == PEER_ATH) {
- vendor = PEER_ATH;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
- } else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
- (memcmp(mac->bssid, ap4_5, 3) == 0) ||
- (memcmp(mac->bssid, ap4_1, 3) == 0) ||
- (memcmp(mac->bssid, ap4_2, 3) == 0) ||
- (memcmp(mac->bssid, ap4_3, 3) == 0) ||
- vendor == PEER_RAL) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
- vendor = PEER_RAL;
- } else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
- vendor == PEER_CISCO) {
- vendor = PEER_CISCO;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
- } else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
- (memcmp(mac->bssid, ap3_2, 3) == 0) ||
- (memcmp(mac->bssid, ap3_3, 3) == 0) ||
- vendor == PEER_BROAD) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
- vendor = PEER_BROAD;
- } else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
- vendor == PEER_MARV) {
- vendor = PEER_MARV;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
- }
-
- mac->vendor = vendor;
-}
-
-MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
-MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
-MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
-
-struct rtl_global_var rtl_global_var = {};
-
-int rtl_core_module_init(void)
-{
- if (rtl_rate_control_register())
- pr_err("rtl: Unable to register rtl_rc, use default RC !!\n");
-
- /* add debugfs */
- rtl_debugfs_add_topdir();
-
- /* init some global vars */
- INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
- spin_lock_init(&rtl_global_var.glb_list_lock);
-
- return 0;
-}
-
-void rtl_core_module_exit(void)
-{
- /*RC*/
- rtl_rate_control_unregister();
-
- /* remove debugfs */
- rtl_debugfs_remove_topdir();
-}
diff --git a/drivers/staging/rtlwifi/base.h b/drivers/staging/rtlwifi/base.h
deleted file mode 100644
index 591f433be1c3..000000000000
--- a/drivers/staging/rtlwifi/base.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_BASE_H__
-#define __RTL_BASE_H__
-
-enum ap_peer {
- PEER_UNKNOWN = 0,
- PEER_RTL = 1,
- PEER_RTL_92SE = 2,
- PEER_BROAD = 3,
- PEER_RAL = 4,
- PEER_ATH = 5,
- PEER_CISCO = 6,
- PEER_MARV = 7,
- PEER_AIRGO = 9,
- PEER_MAX = 10,
-};
-
-#define RTL_DUMMY_OFFSET 0
-#define RTL_DUMMY_UNIT 8
-#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
-#define RTL_TX_DESC_SIZE 32
-#define RTL_TX_HEADER_SIZE (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE)
-
-#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
-#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
-
-#define MAX_BIT_RATE_SHORT_GI_2NSS_80MHZ_MCS9 867 /* Mbps */
-#define MAX_BIT_RATE_SHORT_GI_2NSS_80MHZ_MCS7 650 /* Mbps */
-#define MAX_BIT_RATE_LONG_GI_2NSS_80MHZ_MCS9 780 /* Mbps */
-#define MAX_BIT_RATE_LONG_GI_2NSS_80MHZ_MCS7 585 /* Mbps */
-
-#define MAX_BIT_RATE_SHORT_GI_1NSS_80MHZ_MCS9 434 /* Mbps */
-#define MAX_BIT_RATE_SHORT_GI_1NSS_80MHZ_MCS7 325 /* Mbps */
-#define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS9 390 /* Mbps */
-#define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS7 293 /* Mbps */
-
-#define FRAME_OFFSET_FRAME_CONTROL 0
-#define FRAME_OFFSET_DURATION 2
-#define FRAME_OFFSET_ADDRESS1 4
-#define FRAME_OFFSET_ADDRESS2 10
-#define FRAME_OFFSET_ADDRESS3 16
-#define FRAME_OFFSET_SEQUENCE 22
-#define FRAME_OFFSET_ADDRESS4 24
-#define MAX_LISTEN_INTERVAL 10
-#define MAX_RATE_TRIES 4
-
-#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \
- WRITEEF2BYTE(_hdr, _val)
-#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \
- WRITEEF1BYTE(_hdr, _val)
-#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \
- SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
-#define SET_80211_HDR_TO_DS(_hdr, _val) \
- SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
-
-#define SET_80211_PS_POLL_AID(_hdr, _val) \
- (*(u16 *)((u8 *)(_hdr) + 2) = _val)
-#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
- ether_addr_copy(((u8 *)(_hdr)) + 4, (u8 *)(_val))
-#define SET_80211_PS_POLL_TA(_hdr, _val) \
- ether_addr_copy(((u8 *)(_hdr)) + 10, (u8 *)(_val))
-
-#define SET_80211_HDR_DURATION(_hdr, _val) \
- (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val))
-#define SET_80211_HDR_ADDRESS1(_hdr, _val) \
- CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS1, (u8 *)(_val))
-#define SET_80211_HDR_ADDRESS2(_hdr, _val) \
- CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val))
-#define SET_80211_HDR_ADDRESS3(_hdr, _val) \
- CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS3, (u8 *)(_val))
-#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \
- WRITEEF2BYTE((u8 *)(_hdr) + FRAME_OFFSET_SEQUENCE, _val)
-
-#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \
- WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val)
-#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
- WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val)
-#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
- WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val)
-#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \
- READEF2BYTE(((u8 *)(__phdr)) + 34)
-#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
- WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val)
-#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
- SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
- (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
-
-#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
-#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
-
-int rtl_init_core(struct ieee80211_hw *hw);
-void rtl_deinit_core(struct ieee80211_hw *hw);
-void rtl_init_rx_config(struct ieee80211_hw *hw);
-void rtl_init_rfkill(struct ieee80211_hw *hw);
-void rtl_deinit_rfkill(struct ieee80211_hw *hw);
-
-void rtl_watch_dog_timer_callback(struct timer_list *t);
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
-
-bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
-int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
- bool isvht, u8 desc_rate);
-bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
-u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
- bool is_enc);
-
-bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc,
- struct ieee80211_hw *hw);
-void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf,
- u8 c2h_cmd_len);
-bool rtl_check_tx_report_acked(struct ieee80211_hw *hw);
-void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms);
-u32 rtl_get_hal_edca_param(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum wireless_mode wirelessmode,
- struct ieee80211_tx_queue_params *param);
-
-void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_scan_list_expire(struct ieee80211_hw *hw);
-int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid);
-int rtl_tx_agg_oper(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid);
-int rtl_rx_agg_start(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid);
-int rtl_rx_agg_stop(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid);
-void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv);
-void rtl_watchdog_wq_callback(void *data);
-void rtl_fwevt_wq_callback(void *data);
-void rtl_c2hcmd_wq_callback(void *data);
-void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec);
-void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val);
-
-u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw,
- u8 rate_index,
- enum wireless_mode wirelessmode);
-void rtl_get_tcb_desc(struct ieee80211_hw *hw,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc);
-
-int rtl_send_smps_action(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- enum ieee80211_smps_mode smps);
-u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
-void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
-u8 rtl_tid_to_ac(u8 tid);
-void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t);
-extern struct rtl_global_var rtl_global_var;
-void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
-bool rtl_check_beacon_key(struct ieee80211_hw *hw, void *data,
- unsigned int len);
-int rtl_core_module_init(void);
-void rtl_core_module_exit(void);
-#endif
diff --git a/drivers/staging/rtlwifi/btcoexist/Makefile b/drivers/staging/rtlwifi/btcoexist/Makefile
deleted file mode 100644
index f600bcc38a15..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-btcoexist-objs := \
- halbtc8822b1ant.o \
- halbtc8822b2ant.o \
- halbtc8822bwifionly.o \
- halbtcoutsrc.o \
- rtl_btc.o
-
-obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o
diff --git a/drivers/staging/rtlwifi/btcoexist/halbt_precomp.h b/drivers/staging/rtlwifi/btcoexist/halbt_precomp.h
deleted file mode 100644
index 90d0f2462303..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/halbt_precomp.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- ******************************************************************************/
-
-#ifndef __HALBT_PRECOMP_H__
-#define __HALBT_PRECOMP_H__
-/*************************************************************
- * include files
- *************************************************************/
-#include "../wifi.h"
-#include "../efuse.h"
-#include "../base.h"
-#include "../regd.h"
-#include "../cam.h"
-#include "../ps.h"
-#include "../pci.h"
-
-#include "halbtcoutsrc.h"
-
-/* Interface type */
-#define RT_PCI_INTERFACE 1
-#define RT_USB_INTERFACE 2
-#define RT_SDIO_INTERFACE 3
-#define DEV_BUS_TYPE RT_PCI_INTERFACE
-
-#include "halbtc8822b1ant.h"
-#include "halbtc8822b2ant.h"
-#include "halbtc8822bwifionly.h"
-
-#define GETDEFAULTADAPTER(padapter) padapter
-
-#define BIT0 0x00000001
-#define BIT1 0x00000002
-#define BIT2 0x00000004
-#define BIT3 0x00000008
-#define BIT4 0x00000010
-#define BIT5 0x00000020
-#define BIT6 0x00000040
-#define BIT7 0x00000080
-#define BIT8 0x00000100
-#define BIT9 0x00000200
-#define BIT10 0x00000400
-#define BIT11 0x00000800
-#define BIT12 0x00001000
-#define BIT13 0x00002000
-#define BIT14 0x00004000
-#define BIT15 0x00008000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-
-#endif /* __HALBT_PRECOMP_H__ */
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c b/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c
deleted file mode 100644
index ade271cb4aab..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c
+++ /dev/null
@@ -1,5233 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-/* ************************************************************
- * Description:
- *
- * This file is for RTL8822B Co-exist mechanism
- *
- * History
- * 2012/11/15 Cosa first check in.
- *
- * *************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-/*only for rf4ce*/
-#include "halbt_precomp.h"
-
-/* ************************************************************
- * Global variables, these are static variables
- * *************************************************************/
-static struct coex_dm_8822b_1ant glcoex_dm_8822b_1ant;
-static struct coex_dm_8822b_1ant *coex_dm = &glcoex_dm_8822b_1ant;
-static struct coex_sta_8822b_1ant glcoex_sta_8822b_1ant;
-static struct coex_sta_8822b_1ant *coex_sta = &glcoex_sta_8822b_1ant;
-static struct psdscan_sta_8822b_1ant gl_psd_scan_8822b_1ant;
-static struct psdscan_sta_8822b_1ant *psd_scan = &gl_psd_scan_8822b_1ant;
-static struct rfe_type_8822b_1ant gl_rfe_type_8822b_1ant;
-static struct rfe_type_8822b_1ant *rfe_type = &gl_rfe_type_8822b_1ant;
-
-static const char *const glbt_info_src_8822b_1ant[] = {
- "BT Info[wifi fw]", "BT Info[bt rsp]", "BT Info[bt auto report]",
-};
-
-static u32 glcoex_ver_date_8822b_1ant = 20170327;
-static u32 glcoex_ver_8822b_1ant = 0x44;
-static u32 glcoex_ver_btdesired_8822b_1ant = 0x42;
-
-/* ************************************************************
- * local function proto type if needed
- * ************************************************************
- * ************************************************************
- * local function start with halbtc8822b1ant_
- * *************************************************************/
-
-static u8 halbtc8822b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
- u8 index, u8 level_num,
- u8 rssi_thresh, u8 rssi_thresh1)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- s32 wifi_rssi = 0;
- u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
-
- btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
-
- if (level_num == 2) {
- if ((coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_LOW) ||
- (coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_STAY_LOW)) {
- if (wifi_rssi >=
- (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8822B_1ANT))
- wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- } else {
- if (wifi_rssi < rssi_thresh)
- wifi_rssi_state = BTC_RSSI_STATE_LOW;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- }
- } else if (level_num == 3) {
- if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
- return coex_sta->pre_wifi_rssi_state[index];
- }
-
- if ((coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_LOW) ||
- (coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_STAY_LOW)) {
- if (wifi_rssi >=
- (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8822B_1ANT))
- wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- } else if ((coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_MEDIUM) ||
- (coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_STAY_MEDIUM)) {
- if (wifi_rssi >= (rssi_thresh1 +
- BTC_RSSI_COEX_THRESH_TOL_8822B_1ANT))
- wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- else if (wifi_rssi < rssi_thresh)
- wifi_rssi_state = BTC_RSSI_STATE_LOW;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- } else {
- if (wifi_rssi < rssi_thresh1)
- wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- }
- }
-
- coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
-
- return wifi_rssi_state;
-}
-
-static void halbtc8822b1ant_update_ra_mask(struct btc_coexist *btcoexist,
- bool force_exec, u32 dis_rate_mask)
-{
- coex_dm->cur_ra_mask = dis_rate_mask;
-
- if (force_exec || (coex_dm->pre_ra_mask != coex_dm->cur_ra_mask))
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK,
- &coex_dm->cur_ra_mask);
- coex_dm->pre_ra_mask = coex_dm->cur_ra_mask;
-}
-
-static void
-halbtc8822b1ant_auto_rate_fallback_retry(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
-{
- bool wifi_under_b_mode = false;
-
- coex_dm->cur_arfr_type = type;
-
- if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) {
- switch (coex_dm->cur_arfr_type) {
- case 0: /* normal mode */
- btcoexist->btc_write_4byte(btcoexist, 0x430,
- coex_dm->backup_arfr_cnt1);
- btcoexist->btc_write_4byte(btcoexist, 0x434,
- coex_dm->backup_arfr_cnt2);
- break;
- case 1:
- btcoexist->btc_get(btcoexist,
- BTC_GET_BL_WIFI_UNDER_B_MODE,
- &wifi_under_b_mode);
- if (wifi_under_b_mode) {
- btcoexist->btc_write_4byte(btcoexist, 0x430,
- 0x0);
- btcoexist->btc_write_4byte(btcoexist, 0x434,
- 0x01010101);
- } else {
- btcoexist->btc_write_4byte(btcoexist, 0x430,
- 0x0);
- btcoexist->btc_write_4byte(btcoexist, 0x434,
- 0x04030201);
- }
- break;
- default:
- break;
- }
- }
-
- coex_dm->pre_arfr_type = coex_dm->cur_arfr_type;
-}
-
-static void halbtc8822b1ant_retry_limit(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
-{
- coex_dm->cur_retry_limit_type = type;
-
- if (force_exec ||
- (coex_dm->pre_retry_limit_type != coex_dm->cur_retry_limit_type)) {
- switch (coex_dm->cur_retry_limit_type) {
- case 0: /* normal mode */
- btcoexist->btc_write_2byte(btcoexist, 0x42a,
- coex_dm->backup_retry_limit);
- break;
- case 1: /* retry limit=8 */
- btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
- break;
- default:
- break;
- }
- }
-
- coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type;
-}
-
-static void halbtc8822b1ant_ampdu_max_time(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
-{
- coex_dm->cur_ampdu_time_type = type;
-
- if (force_exec ||
- (coex_dm->pre_ampdu_time_type != coex_dm->cur_ampdu_time_type)) {
- switch (coex_dm->cur_ampdu_time_type) {
- case 0: /* normal mode */
- btcoexist->btc_write_1byte(
- btcoexist, 0x456,
- coex_dm->backup_ampdu_max_time);
- break;
- case 1: /* AMPDU timw = 0x38 * 32us */
- btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
- break;
- default:
- break;
- }
- }
-
- coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type;
-}
-
-static void halbtc8822b1ant_limited_tx(struct btc_coexist *btcoexist,
- bool force_exec, u8 ra_mask_type,
- u8 arfr_type, u8 retry_limit_type,
- u8 ampdu_time_type)
-{
- switch (ra_mask_type) {
- case 0: /* normal mode */
- halbtc8822b1ant_update_ra_mask(btcoexist, force_exec, 0x0);
- break;
- case 1: /* disable cck 1/2 */
- halbtc8822b1ant_update_ra_mask(btcoexist, force_exec,
- 0x00000003);
- break;
- case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
- halbtc8822b1ant_update_ra_mask(btcoexist, force_exec,
- 0x0001f1f7);
- break;
- default:
- break;
- }
-
- halbtc8822b1ant_auto_rate_fallback_retry(btcoexist, force_exec,
- arfr_type);
- halbtc8822b1ant_retry_limit(btcoexist, force_exec, retry_limit_type);
- halbtc8822b1ant_ampdu_max_time(btcoexist, force_exec, ampdu_time_type);
-}
-
-/*
- * rx agg size setting :
- * 1: true / don't care / don't care
- * max: false / false / don't care
- * 7: false / true / 7
- */
-
-static void halbtc8822b1ant_limited_rx(struct btc_coexist *btcoexist,
- bool force_exec, bool rej_ap_agg_pkt,
- bool bt_ctrl_agg_buf_size,
- u8 agg_buf_size)
-{
- bool reject_rx_agg = rej_ap_agg_pkt;
- bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size;
- u8 rx_agg_size = agg_buf_size;
-
- /* ============================================ */
- /* Rx Aggregation related setting */
- /* ============================================ */
- btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
- &reject_rx_agg);
- /* decide BT control aggregation buf size or not */
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
- &bt_ctrl_rx_agg_size);
- /* aggregation buf size, only work when BT control Rx aggregation size*/
- btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
- /* real update aggregation setting */
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
-}
-
-static void halbtc8822b1ant_query_bt_info(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[1] = {0};
-
- if (coex_sta->bt_disabled) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No query BT info because BT is disabled!\n");
- return;
- }
-
- h2c_parameter[0] |= BIT(0); /* trigger */
-
- btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WL query BT info!!\n");
-}
-
-static void halbtc8822b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
- u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
- static u8 num_of_bt_counter_chk, cnt_slave, cnt_autoslot_hang;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- reg_hp_txrx = 0x770;
- reg_lp_txrx = 0x774;
-
- u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
- reg_hp_tx = u32tmp & MASKLWORD;
- reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
-
- u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
- reg_lp_tx = u32tmp & MASKLWORD;
- reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
-
- coex_sta->high_priority_tx = reg_hp_tx;
- coex_sta->high_priority_rx = reg_hp_rx;
- coex_sta->low_priority_tx = reg_lp_tx;
- coex_sta->low_priority_rx = reg_lp_rx;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Hi-Pri Rx/Tx: %d/%d, Lo-Pri Rx/Tx: %d/%d\n",
- reg_hp_rx, reg_hp_tx, reg_lp_rx, reg_lp_tx);
-
- /* reset counter */
- btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
-
- if ((coex_sta->low_priority_tx > 1150) &&
- (!coex_sta->c2h_bt_inquiry_page))
- coex_sta->pop_event_cnt++;
-
- if ((coex_sta->low_priority_rx >= 1150) &&
- (coex_sta->low_priority_rx >= coex_sta->low_priority_tx) &&
- (!coex_sta->under_ips) && (!coex_sta->c2h_bt_inquiry_page) &&
- (coex_sta->bt_link_exist)) {
- if (cnt_slave >= 3) {
- bt_link_info->slave_role = true;
- cnt_slave = 3;
- } else {
- cnt_slave++;
- }
- } else {
- if (cnt_slave == 0) {
- bt_link_info->slave_role = false;
- cnt_slave = 0;
- } else {
- cnt_slave--;
- }
- }
-
- if (coex_sta->is_tdma_btautoslot) {
- if ((coex_sta->low_priority_tx >= 1300) &&
- (coex_sta->low_priority_rx <= 150)) {
- if (cnt_autoslot_hang >= 2) {
- coex_sta->is_tdma_btautoslot_hang = true;
- cnt_autoslot_hang = 2;
- } else {
- cnt_autoslot_hang++;
- }
- } else {
- if (cnt_autoslot_hang == 0) {
- coex_sta->is_tdma_btautoslot_hang = false;
- cnt_autoslot_hang = 0;
- } else {
- cnt_autoslot_hang--;
- }
- }
- }
-
- if (bt_link_info->hid_only) {
- if (coex_sta->low_priority_rx > 50)
- coex_sta->is_hid_low_pri_tx_overhead = true;
- else
- coex_sta->is_hid_low_pri_tx_overhead = false;
- }
-
- if ((coex_sta->high_priority_tx == 0) &&
- (coex_sta->high_priority_rx == 0) &&
- (coex_sta->low_priority_tx == 0) &&
- (coex_sta->low_priority_rx == 0)) {
- num_of_bt_counter_chk++;
-
- if (num_of_bt_counter_chk >= 3) {
- halbtc8822b1ant_query_bt_info(btcoexist);
- num_of_bt_counter_chk = 0;
- }
- }
-}
-
-static void halbtc8822b1ant_monitor_wifi_ctr(struct btc_coexist *btcoexist)
-{
- s32 wifi_rssi = 0;
- bool wifi_busy = false, wifi_under_b_mode = false, wifi_scan = false;
- static u8 cck_lock_counter, wl_noisy_count0, wl_noisy_count1 = 3,
- wl_noisy_count2;
- u32 total_cnt, cck_cnt;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
- &wifi_under_b_mode);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &wifi_scan);
-
- coex_sta->crc_ok_cck = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_OK_CCK");
- coex_sta->crc_ok_11g = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_OK_LEGACY");
- coex_sta->crc_ok_11n = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_OK_HT");
- coex_sta->crc_ok_11n_vht = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_OK_VHT");
-
- coex_sta->crc_err_cck = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_ERROR_CCK");
- coex_sta->crc_err_11g = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_ERROR_LEGACY");
- coex_sta->crc_err_11n = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_ERROR_HT");
- coex_sta->crc_err_11n_vht = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_ERROR_VHT");
-
- cck_cnt = coex_sta->crc_ok_cck + coex_sta->crc_err_cck;
-
- if (cck_cnt > 250) {
- if (wl_noisy_count2 < 3)
- wl_noisy_count2++;
-
- if (wl_noisy_count2 == 3) {
- wl_noisy_count0 = 0;
- wl_noisy_count1 = 0;
- }
-
- } else if (cck_cnt < 50) {
- if (wl_noisy_count0 < 3)
- wl_noisy_count0++;
-
- if (wl_noisy_count0 == 3) {
- wl_noisy_count1 = 0;
- wl_noisy_count2 = 0;
- }
-
- } else {
- if (wl_noisy_count1 < 3)
- wl_noisy_count1++;
-
- if (wl_noisy_count1 == 3) {
- wl_noisy_count0 = 0;
- wl_noisy_count2 = 0;
- }
- }
-
- if (wl_noisy_count2 == 3)
- coex_sta->wl_noisy_level = 2;
- else if (wl_noisy_count1 == 3)
- coex_sta->wl_noisy_level = 1;
- else
- coex_sta->wl_noisy_level = 0;
-
- if ((wifi_busy) && (wifi_rssi >= 30) && (!wifi_under_b_mode)) {
- total_cnt = coex_sta->crc_ok_cck + coex_sta->crc_ok_11g +
- coex_sta->crc_ok_11n + coex_sta->crc_ok_11n_vht;
-
- if ((coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_BUSY) ||
- (coex_dm->bt_status ==
- BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY) ||
- (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_SCO_BUSY)) {
- if (coex_sta->crc_ok_cck >
- (total_cnt - coex_sta->crc_ok_cck)) {
- if (cck_lock_counter < 3)
- cck_lock_counter++;
- } else {
- if (cck_lock_counter > 0)
- cck_lock_counter--;
- }
-
- } else {
- if (cck_lock_counter > 0)
- cck_lock_counter--;
- }
- } else {
- if (cck_lock_counter > 0)
- cck_lock_counter--;
- }
-
- if (!coex_sta->pre_ccklock) {
- if (cck_lock_counter >= 3)
- coex_sta->cck_lock = true;
- else
- coex_sta->cck_lock = false;
- } else {
- if (cck_lock_counter == 0)
- coex_sta->cck_lock = false;
- else
- coex_sta->cck_lock = true;
- }
-
- if (coex_sta->cck_lock)
- coex_sta->cck_ever_lock = true;
-
- coex_sta->pre_ccklock = coex_sta->cck_lock;
-}
-
-static bool
-halbtc8822b1ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static bool pre_wifi_busy, pre_under_4way, pre_bt_hs_on,
- pre_rf4ce_enabled, pre_bt_off, pre_bt_slave,
- pre_hid_low_pri_tx_overhead, pre_wifi_under_lps,
- pre_bt_setup_link;
- static u8 pre_hid_busy_num, pre_wl_noisy_level;
- bool wifi_busy = false, under_4way = false, bt_hs_on = false,
- rf4ce_enabled = false;
- bool wifi_connected = false;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
- &under_4way);
-
- if (coex_sta->bt_disabled != pre_bt_off) {
- pre_bt_off = coex_sta->bt_disabled;
-
- if (coex_sta->bt_disabled)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is disabled !!\n");
- else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is enabled !!\n");
-
- coex_sta->bt_coex_supported_feature = 0;
- coex_sta->bt_coex_supported_version = 0;
- return true;
- }
- btcoexist->btc_get(btcoexist, BTC_GET_BL_RF4CE_CONNECTED,
- &rf4ce_enabled);
-
- if (rf4ce_enabled != pre_rf4ce_enabled) {
- pre_rf4ce_enabled = rf4ce_enabled;
-
- if (rf4ce_enabled)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], rf4ce is enabled !!\n");
- else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], rf4ce is disabled !!\n");
-
- return true;
- }
-
- if (wifi_connected) {
- if (wifi_busy != pre_wifi_busy) {
- pre_wifi_busy = wifi_busy;
- return true;
- }
- if (under_4way != pre_under_4way) {
- pre_under_4way = under_4way;
- return true;
- }
- if (bt_hs_on != pre_bt_hs_on) {
- pre_bt_hs_on = bt_hs_on;
- return true;
- }
- if (coex_sta->wl_noisy_level != pre_wl_noisy_level) {
- pre_wl_noisy_level = coex_sta->wl_noisy_level;
- return true;
- }
- if (coex_sta->under_lps != pre_wifi_under_lps) {
- pre_wifi_under_lps = coex_sta->under_lps;
- if (coex_sta->under_lps)
- return true;
- }
- }
-
- if (!coex_sta->bt_disabled) {
- if (coex_sta->hid_busy_num != pre_hid_busy_num) {
- pre_hid_busy_num = coex_sta->hid_busy_num;
- return true;
- }
-
- if (bt_link_info->slave_role != pre_bt_slave) {
- pre_bt_slave = bt_link_info->slave_role;
- return true;
- }
-
- if (pre_hid_low_pri_tx_overhead !=
- coex_sta->is_hid_low_pri_tx_overhead) {
- pre_hid_low_pri_tx_overhead =
- coex_sta->is_hid_low_pri_tx_overhead;
- return true;
- }
-
- if (pre_bt_setup_link != coex_sta->is_setup_link) {
- pre_bt_setup_link = coex_sta->is_setup_link;
- return true;
- }
- }
-
- return false;
-}
-
-static void halbtc8822b1ant_update_bt_link_info(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- bool bt_busy = false;
-
- coex_sta->num_of_profile = 0;
-
- /* set link exist status */
- if (!(coex_sta->bt_info & BT_INFO_8822B_1ANT_B_CONNECTION)) {
- coex_sta->bt_link_exist = false;
- coex_sta->pan_exist = false;
- coex_sta->a2dp_exist = false;
- coex_sta->hid_exist = false;
- coex_sta->sco_exist = false;
- } else { /* connection exists */
- coex_sta->bt_link_exist = true;
- if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_FTP) {
- coex_sta->pan_exist = true;
- coex_sta->num_of_profile++;
- } else {
- coex_sta->pan_exist = false;
- }
-
- if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_A2DP) {
- coex_sta->a2dp_exist = true;
- coex_sta->num_of_profile++;
- } else {
- coex_sta->a2dp_exist = false;
- }
-
- if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_HID) {
- coex_sta->hid_exist = true;
- coex_sta->num_of_profile++;
- } else {
- coex_sta->hid_exist = false;
- }
-
- if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_SCO_ESCO) {
- coex_sta->sco_exist = true;
- coex_sta->num_of_profile++;
- } else {
- coex_sta->sco_exist = false;
- }
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
-
- bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
- bt_link_info->sco_exist = coex_sta->sco_exist;
- bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
- bt_link_info->pan_exist = coex_sta->pan_exist;
- bt_link_info->hid_exist = coex_sta->hid_exist;
- bt_link_info->acl_busy = coex_sta->acl_busy;
-
- /* work around for HS mode. */
- if (bt_hs_on) {
- bt_link_info->pan_exist = true;
- bt_link_info->bt_link_exist = true;
- }
-
- /* check if Sco only */
- if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
- !bt_link_info->pan_exist && !bt_link_info->hid_exist)
- bt_link_info->sco_only = true;
- else
- bt_link_info->sco_only = false;
-
- /* check if A2dp only */
- if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
- !bt_link_info->pan_exist && !bt_link_info->hid_exist)
- bt_link_info->a2dp_only = true;
- else
- bt_link_info->a2dp_only = false;
-
- /* check if Pan only */
- if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
- bt_link_info->pan_exist && !bt_link_info->hid_exist)
- bt_link_info->pan_only = true;
- else
- bt_link_info->pan_only = false;
-
- /* check if Hid only */
- if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
- !bt_link_info->pan_exist && bt_link_info->hid_exist)
- bt_link_info->hid_only = true;
- else
- bt_link_info->hid_only = false;
-
- if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_INQ_PAGE) {
- coex_dm->bt_status = BT_8822B_1ANT_BT_STATUS_INQ_PAGE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Inq/page!!!\n");
- } else if (!(coex_sta->bt_info & BT_INFO_8822B_1ANT_B_CONNECTION)) {
- coex_dm->bt_status = BT_8822B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
- } else if (coex_sta->bt_info == BT_INFO_8822B_1ANT_B_CONNECTION) {
- /* connection exists but no busy */
- coex_dm->bt_status = BT_8822B_1ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
- } else if (((coex_sta->bt_info & BT_INFO_8822B_1ANT_B_SCO_ESCO) ||
- (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_SCO_BUSY)) &&
- (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_ACL_BUSY)) {
- coex_dm->bt_status = BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL SCO busy!!!\n");
- } else if ((coex_sta->bt_info & BT_INFO_8822B_1ANT_B_SCO_ESCO) ||
- (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_SCO_BUSY)) {
- coex_dm->bt_status = BT_8822B_1ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
- } else if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_ACL_BUSY) {
- coex_dm->bt_status = BT_8822B_1ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
- } else {
- coex_dm->bt_status = BT_8822B_1ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
- }
-
- if ((coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_BUSY) ||
- (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_SCO_BUSY) ||
- (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY))
- bt_busy = true;
- else
- bt_busy = false;
-
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
-}
-
-static void halbtc8822b1ant_update_wifi_ch_info(struct btc_coexist *btcoexist,
- u8 type)
-{
- u8 h2c_parameter[3] = {0};
- u32 wifi_bw;
- u8 wifi_central_chnl;
-
- /* only 2.4G we need to inform bt the chnl mask */
- btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
- &wifi_central_chnl);
- if ((type == BTC_MEDIA_CONNECT) && (wifi_central_chnl <= 14)) {
- /* enable BT AFH skip WL channel for 8822b
- * because BT Rx LO interference
- */
- h2c_parameter[0] = 0x1;
- h2c_parameter[1] = wifi_central_chnl;
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- if (wifi_bw == BTC_WIFI_BW_HT40)
- h2c_parameter[2] = 0x30;
- else
- h2c_parameter[2] = 0x20;
- }
-
- coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
- coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
- coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
-
- btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
-}
-
-static u8 halbtc8822b1ant_action_algorithm(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- u8 algorithm = BT_8822B_1ANT_COEX_ALGO_UNDEFINED;
- u8 num_of_diff_profile = 0;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
-
- if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No BT link exists!!!\n");
- return algorithm;
- }
-
- if (bt_link_info->sco_exist)
- num_of_diff_profile++;
- if (bt_link_info->hid_exist)
- num_of_diff_profile++;
- if (bt_link_info->pan_exist)
- num_of_diff_profile++;
- if (bt_link_info->a2dp_exist)
- num_of_diff_profile++;
-
- if (num_of_diff_profile == 1) {
- if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO only\n");
- algorithm = BT_8822B_1ANT_COEX_ALGO_SCO;
- } else {
- if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = HID only\n");
- algorithm = BT_8822B_1ANT_COEX_ALGO_HID;
- } else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = A2DP only\n");
- algorithm = BT_8822B_1ANT_COEX_ALGO_A2DP;
- } else if (bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = PAN(HS) only\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_PANHS;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_PANEDR;
- }
- }
- }
- } else if (num_of_diff_profile == 2) {
- if (bt_link_info->sco_exist) {
- if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID\n");
- algorithm = BT_8822B_1ANT_COEX_ALGO_HID;
- } else if (bt_link_info->a2dp_exist) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
- algorithm = BT_8822B_1ANT_COEX_ALGO_SCO;
- } else if (bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
- algorithm = BT_8822B_1ANT_COEX_ALGO_SCO;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- }
- } else {
- if (bt_link_info->hid_exist &&
- bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP\n");
- algorithm = BT_8822B_1ANT_COEX_ALGO_HID_A2DP;
- } else if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_HID_A2DP;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- } else if (bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_A2DP_PANHS;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_PANEDR_A2DP;
- }
- }
- }
- } else if (num_of_diff_profile == 3) {
- if (bt_link_info->sco_exist) {
- if (bt_link_info->hid_exist &&
- bt_link_info->a2dp_exist) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
- algorithm = BT_8822B_1ANT_COEX_ALGO_HID;
- } else if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_HID_A2DP;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- } else if (bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
- algorithm = BT_8822B_1ANT_COEX_ALGO_SCO;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- }
- } else {
- if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_HID_A2DP;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
- }
- }
- }
- } else if (num_of_diff_profile >= 3) {
- if (bt_link_info->sco_exist) {
- if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
-
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
- algorithm =
- BT_8822B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- }
- }
- }
-
- return algorithm;
-}
-
-static void halbtc8822b1ant_low_penalty_ra(struct btc_coexist *btcoexist,
- bool force_exec, bool low_penalty_ra)
-{
- coex_dm->cur_low_penalty_ra = low_penalty_ra;
-
- if (!force_exec) {
- if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
- return;
- }
-
- if (low_penalty_ra)
- btcoexist->btc_phydm_modify_ra_pcr_threshold(btcoexist, 0, 25);
- else
- btcoexist->btc_phydm_modify_ra_pcr_threshold(btcoexist, 0, 0);
-
- coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
-}
-
-static void halbtc8822b1ant_write_score_board(struct btc_coexist *btcoexist,
- u16 bitpos, bool state)
-{
- static u16 originalval = 0x8002;
-
- if (state)
- originalval = originalval | bitpos;
- else
- originalval = originalval & (~bitpos);
-
- btcoexist->btc_write_2byte(btcoexist, 0xaa, originalval);
-}
-
-static void halbtc8822b1ant_read_score_board(struct btc_coexist *btcoexist,
- u16 *score_board_val)
-{
- *score_board_val =
- (btcoexist->btc_read_2byte(btcoexist, 0xaa)) & 0x7fff;
-}
-
-static void halbtc8822b1ant_post_state_to_bt(struct btc_coexist *btcoexist,
- u16 type, bool state)
-{
- halbtc8822b1ant_write_score_board(btcoexist, (u16)type, state);
-}
-
-static void
-halbtc8822b1ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u32 bt_disable_cnt;
- bool bt_active = true, bt_disabled = false, wifi_under_5g = false;
- u16 u16tmp;
-
- /* This function check if bt is disabled */
-
- /* Read BT on/off status from scoreboard[1],
- * enable this only if BT patch support this feature
- */
- halbtc8822b1ant_read_score_board(btcoexist, &u16tmp);
-
- bt_active = u16tmp & BIT(1);
-
- if (bt_active) {
- bt_disable_cnt = 0;
- bt_disabled = false;
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
- &bt_disabled);
- } else {
- bt_disable_cnt++;
- if (bt_disable_cnt >= 2) {
- bt_disabled = true;
- bt_disable_cnt = 2;
- }
-
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
- &bt_disabled);
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if ((wifi_under_5g) || (bt_disabled))
- halbtc8822b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, false);
- else
- halbtc8822b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, true);
-
- if (coex_sta->bt_disabled != bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is from %s to %s!!\n",
- (coex_sta->bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
- coex_sta->bt_disabled = bt_disabled;
- }
-}
-
-static void halbtc8822b1ant_enable_gnt_to_gpio(struct btc_coexist *btcoexist,
- bool isenable)
-{
- static u8 bit_val[5] = {0, 0, 0, 0, 0};
- static bool state;
-
- if (!btcoexist->dbg_mode_1ant)
- return;
-
- if (state == isenable)
- return;
-
- state = isenable;
-
- if (isenable) {
- /* enable GNT_WL, GNT_BT to GPIO for debug */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x73, 0x8, 0x1);
-
- /* store original value */
- bit_val[0] =
- (btcoexist->btc_read_1byte(btcoexist, 0x66) & BIT(4)) >>
- 4; /*0x66[4] */
- bit_val[1] = (btcoexist->btc_read_1byte(btcoexist, 0x67) &
- BIT(0)); /*0x66[8] */
- bit_val[2] =
- (btcoexist->btc_read_1byte(btcoexist, 0x42) & BIT(3)) >>
- 3; /*0x40[19] */
- bit_val[3] =
- (btcoexist->btc_read_1byte(btcoexist, 0x65) & BIT(7)) >>
- 7; /*0x64[15] */
- bit_val[4] =
- (btcoexist->btc_read_1byte(btcoexist, 0x72) & BIT(2)) >>
- 2; /*0x70[18] */
-
- /* switch GPIO Mux */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x66, BIT(4),
- 0x0); /*0x66[4] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, BIT(0),
- 0x0); /*0x66[8] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x42, BIT(3),
- 0x0); /*0x40[19] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x65, BIT(7),
- 0x0); /*0x64[15] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x72, BIT(2),
- 0x0); /*0x70[18] = 0 */
-
- } else {
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x73, 0x8, 0x0);
-
- /* Restore original value */
- /* switch GPIO Mux */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x66, BIT(4),
- bit_val[0]); /*0x66[4] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, BIT(0),
- bit_val[1]); /*0x66[8] = 0 */
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x42, BIT(3), bit_val[2]); /*0x40[19] = 0 */
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x65, BIT(7), bit_val[3]); /*0x64[15] = 0 */
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x72, BIT(2), bit_val[4]); /*0x70[18] = 0 */
- }
-}
-
-static u32
-halbtc8822b1ant_ltecoex_indirect_read_reg(struct btc_coexist *btcoexist,
- u16 reg_addr)
-{
- u32 delay_count = 0;
-
- /* wait for ready bit before access 0x1700 */
- while (1) {
- if ((btcoexist->btc_read_1byte(btcoexist, 0x1703) & BIT(5)) ==
- 0) {
- mdelay(50);
- delay_count++;
- if (delay_count >= 10) {
- delay_count = 0;
- break;
- }
- } else {
- break;
- }
- }
-
- btcoexist->btc_write_4byte(btcoexist, 0x1700, 0x800F0000 | reg_addr);
-
- return btcoexist->btc_read_4byte(btcoexist, 0x1708); /* get read data */
-}
-
-static void
-halbtc8822b1ant_ltecoex_indirect_write_reg(struct btc_coexist *btcoexist,
- u16 reg_addr, u32 bit_mask,
- u32 reg_value)
-{
- u32 val, i = 0, bitpos = 0, delay_count = 0;
-
- if (bit_mask == 0x0)
- return;
-
- if (bit_mask == 0xffffffff) {
- /* wait for ready bit before access 0x1700/0x1704 */
- while (1) {
- if ((btcoexist->btc_read_1byte(btcoexist, 0x1703) &
- BIT(5)) == 0) {
- mdelay(50);
- delay_count++;
- if (delay_count >= 10) {
- delay_count = 0;
- break;
- }
- } else {
- break;
- }
- }
-
- btcoexist->btc_write_4byte(btcoexist, 0x1704,
- reg_value); /* put write data */
-
- btcoexist->btc_write_4byte(btcoexist, 0x1700,
- 0xc00F0000 | reg_addr);
- } else {
- for (i = 0; i <= 31; i++) {
- if (((bit_mask >> i) & 0x1) == 0x1) {
- bitpos = i;
- break;
- }
- }
-
- /* read back register value before write */
- val = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- reg_addr);
- val = (val & (~bit_mask)) | (reg_value << bitpos);
-
- /* wait for ready bit before access 0x1700/0x1704 */
- while (1) {
- if ((btcoexist->btc_read_1byte(btcoexist, 0x1703) &
- BIT(5)) == 0) {
- mdelay(50);
- delay_count++;
- if (delay_count >= 10) {
- delay_count = 0;
- break;
- }
- } else {
- break;
- }
- }
-
- btcoexist->btc_write_4byte(btcoexist, 0x1704,
- val); /* put write data */
-
- btcoexist->btc_write_4byte(btcoexist, 0x1700,
- 0xc00F0000 | reg_addr);
- }
-}
-
-static void halbtc8822b1ant_ltecoex_enable(struct btc_coexist *btcoexist,
- bool enable)
-{
- u8 val;
-
- val = (enable) ? 1 : 0;
- /* 0x38[7] */
- halbtc8822b1ant_ltecoex_indirect_write_reg(btcoexist, 0x38, 0x80, val);
-}
-
-static void
-halbtc8822b1ant_ltecoex_pathcontrol_owner(struct btc_coexist *btcoexist,
- bool wifi_control)
-{
- u8 val;
-
- val = (wifi_control) ? 1 : 0;
- /* 0x70[26] */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x73, 0x4, val);
-}
-
-static void halbtc8822b1ant_ltecoex_set_gnt_bt(struct btc_coexist *btcoexist,
- u8 control_block,
- bool sw_control, u8 state)
-{
- u32 val = 0, bit_mask;
-
- state = state & 0x1;
- /*LTE indirect 0x38=0xccxx (sw : gnt_wl=1,sw gnt_bt=1)
- *0x38=0xddxx (sw : gnt_bt=1 , sw gnt_wl=0)
- *0x38=0x55xx(hw pta :gnt_wl /gnt_bt )
- */
- val = (sw_control) ? ((state << 1) | 0x1) : 0;
-
- switch (control_block) {
- case BT_8822B_1ANT_GNT_BLOCK_RFC_BB:
- default:
- bit_mask = 0xc000;
- halbtc8822b1ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[15:14] */
- bit_mask = 0x0c00;
- halbtc8822b1ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[11:10] */
- break;
- case BT_8822B_1ANT_GNT_BLOCK_RFC:
- bit_mask = 0xc000;
- halbtc8822b1ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[15:14] */
- break;
- case BT_8822B_1ANT_GNT_BLOCK_BB:
- bit_mask = 0x0c00;
- halbtc8822b1ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[11:10] */
- break;
- }
-}
-
-static void halbtc8822b1ant_ltecoex_set_gnt_wl(struct btc_coexist *btcoexist,
- u8 control_block,
- bool sw_control, u8 state)
-{
- u32 val = 0, bit_mask;
- /*LTE indirect 0x38=0xccxx (sw : gnt_wl=1,sw gnt_bt=1)
- *0x38=0xddxx (sw : gnt_bt=1 , sw gnt_wl=0)
- *0x38=0x55xx(hw pta :gnt_wl /gnt_bt )
- */
-
- state = state & 0x1;
- val = (sw_control) ? ((state << 1) | 0x1) : 0;
-
- switch (control_block) {
- case BT_8822B_1ANT_GNT_BLOCK_RFC_BB:
- default:
- bit_mask = 0x3000;
- halbtc8822b1ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[13:12] */
- bit_mask = 0x0300;
- halbtc8822b1ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[9:8] */
- break;
- case BT_8822B_1ANT_GNT_BLOCK_RFC:
- bit_mask = 0x3000;
- halbtc8822b1ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[13:12] */
- break;
- case BT_8822B_1ANT_GNT_BLOCK_BB:
- bit_mask = 0x0300;
- halbtc8822b1ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[9:8] */
- break;
- }
-}
-
-static void
-halbtc8822b1ant_ltecoex_set_coex_table(struct btc_coexist *btcoexist,
- u8 table_type, u16 table_content)
-{
- u16 reg_addr = 0x0000;
-
- switch (table_type) {
- case BT_8822B_1ANT_CTT_WL_VS_LTE:
- reg_addr = 0xa0;
- break;
- case BT_8822B_1ANT_CTT_BT_VS_LTE:
- reg_addr = 0xa4;
- break;
- }
-
- if (reg_addr != 0x0000)
- halbtc8822b1ant_ltecoex_indirect_write_reg(
- btcoexist, reg_addr, 0xffff,
- table_content); /* 0xa0[15:0] or 0xa4[15:0] */
-}
-
-static void halbtc8822b1ant_set_wltoggle_coex_table(
- struct btc_coexist *btcoexist, bool force_exec, u8 interval,
- u8 val0x6c4_b0, u8 val0x6c4_b1, u8 val0x6c4_b2, u8 val0x6c4_b3)
-{
- static u8 pre_h2c_parameter[6] = {0};
- u8 cur_h2c_parameter[6] = {0};
- u8 i, match_cnt = 0;
-
- cur_h2c_parameter[0] = 0x7; /* op_code, 0x7= wlan toggle slot*/
-
- cur_h2c_parameter[1] = interval;
- cur_h2c_parameter[2] = val0x6c4_b0;
- cur_h2c_parameter[3] = val0x6c4_b1;
- cur_h2c_parameter[4] = val0x6c4_b2;
- cur_h2c_parameter[5] = val0x6c4_b3;
-
- if (!force_exec) {
- for (i = 1; i <= 5; i++) {
- if (cur_h2c_parameter[i] != pre_h2c_parameter[i])
- break;
-
- match_cnt++;
- }
-
- if (match_cnt == 5)
- return;
- }
-
- for (i = 1; i <= 5; i++)
- pre_h2c_parameter[i] = cur_h2c_parameter[i];
-
- btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, cur_h2c_parameter);
-}
-
-static void halbtc8822b1ant_set_coex_table(struct btc_coexist *btcoexist,
- u32 val0x6c0, u32 val0x6c4,
- u32 val0x6c8, u8 val0x6cc)
-{
- btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
-
- btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
-
- btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
-
- btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
-}
-
-static void halbtc8822b1ant_coex_table(struct btc_coexist *btcoexist,
- bool force_exec, u32 val0x6c0,
- u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
-{
- coex_dm->cur_val0x6c0 = val0x6c0;
- coex_dm->cur_val0x6c4 = val0x6c4;
- coex_dm->cur_val0x6c8 = val0x6c8;
- coex_dm->cur_val0x6cc = val0x6cc;
-
- if (!force_exec) {
- if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
- (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
- (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
- (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
- return;
- }
- halbtc8822b1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8,
- val0x6cc);
-
- coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
- coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
- coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
- coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
-}
-
-static void halbtc8822b1ant_coex_table_with_type(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
-{
- u32 break_table;
- u8 select_table;
-
- coex_sta->coex_table_type = type;
-
- if (coex_sta->concurrent_rx_mode_on) {
- break_table = 0xf0ffffff; /* set WL hi-pri can break BT */
- select_table = 0x3; /* set Tx response = Hi-Pri
- * (ex: Transmitting ACK,BA,CTS)
- */
- } else {
- break_table = 0xffffff;
- select_table = 0x3;
- }
-
- switch (type) {
- case 0:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x55555555, break_table,
- select_table);
- break;
- case 1:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5a5a5a5a, break_table,
- select_table);
- break;
- case 2:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0xaa5a5a5a,
- 0xaa5a5a5a, break_table,
- select_table);
- break;
- case 3:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0xaa5a5a5a, break_table,
- select_table);
- break;
- case 4:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0xaa555555,
- 0xaa5a5a5a, break_table,
- select_table);
- break;
- case 5:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
- 0x5a5a5a5a, break_table,
- select_table);
- break;
- case 6:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0xaaaaaaaa, break_table,
- select_table);
- break;
- case 7:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
- 0xaaaaaaaa, break_table,
- select_table);
- break;
- case 8:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0xffffffff,
- 0xffffffff, break_table,
- select_table);
- break;
- case 9:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x5a5a5555,
- 0xaaaa5a5a, break_table,
- select_table);
- break;
- case 10:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0xaaaa5aaa,
- 0xaaaa5aaa, break_table,
- select_table);
- break;
- case 11:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0xaaaaa5aa,
- 0xaaaaaaaa, break_table,
- select_table);
- break;
- case 12:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0xaaaaa5aa,
- 0xaaaaa5aa, break_table,
- select_table);
- break;
- case 13:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0xaaaa5a5a, break_table,
- select_table);
- break;
- case 14:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x5a5a555a,
- 0xaaaa5a5a, break_table,
- select_table);
- break;
- case 15:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0xaaaa55aa, break_table,
- select_table);
- break;
- case 16:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x5a5a555a,
- 0x5a5a555a, break_table,
- select_table);
- break;
- case 17:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0xaaaa55aa,
- 0xaaaa55aa, break_table,
- select_table);
- break;
- case 18:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5aaa5a5a, break_table,
- select_table);
- break;
- case 19:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0xa5555555,
- 0xaaaa5aaa, break_table,
- select_table);
- break;
- case 20:
- halbtc8822b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0xaaaa5aaa, break_table,
- select_table);
- break;
- default:
- break;
- }
-}
-
-static void
-halbtc8822b1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
- bool enable)
-{
- u8 h2c_parameter[1] = {0};
-
- if (enable)
- h2c_parameter[0] |= BIT(0); /* function enable */
-
- btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
-}
-
-static void halbtc8822b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
- bool force_exec, bool enable)
-{
- coex_dm->cur_ignore_wlan_act = enable;
-
- if (!force_exec) {
- if (coex_dm->pre_ignore_wlan_act ==
- coex_dm->cur_ignore_wlan_act) {
- coex_dm->pre_ignore_wlan_act =
- coex_dm->cur_ignore_wlan_act;
- return;
- }
- }
-
- halbtc8822b1ant_set_fw_ignore_wlan_act(btcoexist, enable);
-
- coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
-}
-
-static void halbtc8822b1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
- u8 lps_val, u8 rpwm_val)
-{
- u8 lps = lps_val;
- u8 rpwm = rpwm_val;
-
- btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps);
- btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
-}
-
-static void halbtc8822b1ant_lps_rpwm(struct btc_coexist *btcoexist,
- bool force_exec, u8 lps_val, u8 rpwm_val)
-{
- coex_dm->cur_lps = lps_val;
- coex_dm->cur_rpwm = rpwm_val;
-
- if (!force_exec) {
- if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
- (coex_dm->pre_rpwm == coex_dm->cur_rpwm))
- return;
- }
- halbtc8822b1ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
-
- coex_dm->pre_lps = coex_dm->cur_lps;
- coex_dm->pre_rpwm = coex_dm->cur_rpwm;
-}
-
-static void halbtc8822b1ant_ps_tdma_check_for_power_save_state(
- struct btc_coexist *btcoexist, bool new_ps_state)
-{
- u8 lps_mode = 0x0;
- u8 h2c_parameter[5] = {0x8, 0, 0, 0, 0};
-
- btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
-
- if (lps_mode) { /* already under LPS state */
- if (new_ps_state) {
- /* keep state under LPS, do nothing. */
- } else {
- /* will leave LPS state, turn off psTdma first */
-
- btcoexist->btc_fill_h2c(btcoexist, 0x60, 5,
- h2c_parameter);
- }
- } else { /* NO PS state */
- if (new_ps_state) {
- /* will enter LPS state, turn off psTdma first */
-
- btcoexist->btc_fill_h2c(btcoexist, 0x60, 5,
- h2c_parameter);
- } else {
- /* keep state under NO PS state, do nothing. */
- }
- }
-}
-
-static bool halbtc8822b1ant_power_save_state(struct btc_coexist *btcoexist,
- u8 ps_type, u8 lps_val,
- u8 rpwm_val)
-{
- bool low_pwr_disable = false, result = true;
-
- switch (ps_type) {
- case BTC_PS_WIFI_NATIVE:
- /* recover to original 32k low power setting */
- coex_sta->force_lps_ctrl = false;
- low_pwr_disable = false;
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
- &low_pwr_disable);
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
- break;
- case BTC_PS_LPS_ON:
-
- coex_sta->force_lps_ctrl = true;
- halbtc8822b1ant_ps_tdma_check_for_power_save_state(btcoexist,
- true);
- halbtc8822b1ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
- rpwm_val);
- /* when coex force to enter LPS, do not enter 32k low power. */
- low_pwr_disable = true;
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
- &low_pwr_disable);
- /* power save must executed before psTdma. */
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
-
- break;
- case BTC_PS_LPS_OFF:
-
- coex_sta->force_lps_ctrl = true;
- halbtc8822b1ant_ps_tdma_check_for_power_save_state(btcoexist,
- false);
- result = btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
- NULL);
-
- break;
- default:
- break;
- }
-
- return result;
-}
-
-static void halbtc8822b1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
- u8 byte1, u8 byte2, u8 byte3,
- u8 byte4, u8 byte5)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[5] = {0};
- u8 real_byte1 = byte1, real_byte5 = byte5;
- bool ap_enable = false, result = false;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- if (byte5 & BIT(2))
- coex_sta->is_tdma_btautoslot = true;
- else
- coex_sta->is_tdma_btautoslot = false;
-
- /* release bt-auto slot for auto-slot hang is detected!! */
- if (coex_sta->is_tdma_btautoslot)
- if ((coex_sta->is_tdma_btautoslot_hang) ||
- (bt_link_info->slave_role))
- byte5 = byte5 & 0xfb;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
- &ap_enable);
-
- if ((ap_enable) && (byte1 & BIT(4) && !(byte1 & BIT(5)))) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == FW for 1Ant AP mode\n", __func__);
-
- real_byte1 &= ~BIT(4);
- real_byte1 |= BIT(5);
-
- real_byte5 |= BIT(5);
- real_byte5 &= ~BIT(6);
-
- halbtc8822b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
- 0x0, 0x0);
-
- } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == Force LPS (byte1 = 0x%x)\n",
- __func__, byte1);
- if (!halbtc8822b1ant_power_save_state(btcoexist, BTC_PS_LPS_OFF,
- 0x50, 0x4))
- result = true;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == native power save (byte1 = 0x%x)\n",
- __func__, byte1);
- halbtc8822b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
- 0x0, 0x0);
- }
-
- coex_sta->is_set_ps_state_fail = result;
-
- if (!coex_sta->is_set_ps_state_fail) {
- h2c_parameter[0] = real_byte1;
- h2c_parameter[1] = byte2;
- h2c_parameter[2] = byte3;
- h2c_parameter[3] = byte4;
- h2c_parameter[4] = real_byte5;
-
- coex_dm->ps_tdma_para[0] = real_byte1;
- coex_dm->ps_tdma_para[1] = byte2;
- coex_dm->ps_tdma_para[2] = byte3;
- coex_dm->ps_tdma_para[3] = byte4;
- coex_dm->ps_tdma_para[4] = real_byte5;
-
- btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
-
- } else {
- coex_sta->cnt_set_ps_state_fail++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == Force Leave LPS Fail (cnt = %d)\n",
- __func__, coex_sta->cnt_set_ps_state_fail);
- }
-}
-
-static void halbtc8822b1ant_ps_tdma(struct btc_coexist *btcoexist,
- bool force_exec, bool turn_on, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool wifi_busy = false;
- static u8 ps_tdma_byte4_modify, pre_ps_tdma_byte4_modify;
- static bool pre_wifi_busy;
-
- coex_dm->cur_ps_tdma_on = turn_on;
- coex_dm->cur_ps_tdma = type;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- if (wifi_busy != pre_wifi_busy) {
- force_exec = true;
- pre_wifi_busy = wifi_busy;
- }
-
- /* 0x778 = 0x1 at wifi slot (no blocking BT Low-Pri pkts) */
- if (bt_link_info->slave_role)
- ps_tdma_byte4_modify = 0x1;
- else
- ps_tdma_byte4_modify = 0x0;
-
- if (pre_ps_tdma_byte4_modify != ps_tdma_byte4_modify) {
- force_exec = true;
- pre_ps_tdma_byte4_modify = ps_tdma_byte4_modify;
- }
-
- if (!force_exec) {
- if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
- (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Skip TDMA because no change TDMA(%s, %d)\n",
- (coex_dm->cur_ps_tdma_on ? "on" : "off"),
- coex_dm->cur_ps_tdma);
- return;
- }
- }
-
- if (coex_dm->cur_ps_tdma_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
-
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x550, 0x8, 0x1); /* enable TBTT nterrupt */
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** TDMA(off, %d) **********\n",
- coex_dm->cur_ps_tdma);
- }
-
- if (turn_on) {
- /* enable TBTT nterrupt */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x550, 0x8, 0x1);
-
- switch (type) {
- default:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x35,
- 0x03, 0x11, 0x11);
- break;
- case 1:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x3a,
- 0x03, 0x11, 0x10);
- break;
- case 3:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x30,
- 0x03, 0x10, 0x50);
- break;
- case 4:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x21,
- 0x03, 0x10, 0x50);
- break;
- case 5:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x15,
- 0x3, 0x11, 0x11);
- break;
- case 6:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x20,
- 0x3, 0x11, 0x10);
- break;
- case 7:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x51, 0x10, 0x03, 0x10,
- 0x54 | ps_tdma_byte4_modify);
- break;
- case 8:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x51, 0x10, 0x03, 0x10,
- 0x14 | ps_tdma_byte4_modify);
- break;
- case 11:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x61, 0x25, 0x03, 0x11,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 12:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x51, 0x30, 0x03, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
- case 13:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x51, 0x10, 0x07, 0x10,
- 0x54 | ps_tdma_byte4_modify);
- break;
- case 14:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x51, 0x15, 0x03, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
- case 15:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x51, 0x20, 0x03, 0x10,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 17:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x61, 0x10, 0x03, 0x11,
- 0x14 | ps_tdma_byte4_modify);
- break;
- case 18:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x51, 0x10, 0x03, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
-
- case 20:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x30,
- 0x03, 0x11, 0x10);
- break;
- case 22:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x25,
- 0x03, 0x11, 0x10);
- break;
- case 27:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x10,
- 0x03, 0x11, 0x15);
- break;
- case 32:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x35,
- 0x3, 0x11, 0x11);
- break;
- case 33:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x35,
- 0x03, 0x11, 0x10);
- break;
- case 41:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x45,
- 0x3, 0x11, 0x11);
- break;
- case 42:
- halbtc8822b1ant_set_fw_pstdma(
- btcoexist, 0x51, 0x1e, 0x3, 0x10,
- 0x14 | ps_tdma_byte4_modify);
- break;
- case 43:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x45,
- 0x3, 0x10, 0x14);
- break;
- case 44:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x25,
- 0x3, 0x10, 0x10);
- break;
- case 45:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x29,
- 0x3, 0x10, 0x10);
- break;
- case 46:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x1a,
- 0x3, 0x10, 0x10);
- break;
- case 47:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x32,
- 0x3, 0x10, 0x10);
- break;
- case 48:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x29,
- 0x3, 0x10, 0x10);
- break;
- case 49:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x55, 0x10,
- 0x3, 0x10, 0x54);
- break;
- case 50:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x51, 0x4a,
- 0x3, 0x10, 0x10);
- break;
- case 51:
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x61, 0x35,
- 0x3, 0x10, 0x11);
- break;
- }
- } else {
- switch (type) {
- case 0:
- default: /* Software control, Antenna at BT side */
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x0, 0x0);
- break;
- case 8: /* PTA Control */
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x8, 0x0, 0x0,
- 0x0, 0x0);
- break;
- case 9: /* Software control, Antenna at WiFi side */
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x0, 0x0);
- break;
- case 10: /* under 5G , 0x778=1*/
- halbtc8822b1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x0, 0x0);
-
- break;
- }
- }
-
- if (!coex_sta->is_set_ps_state_fail) {
- /* update pre state */
- coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
- coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
- }
-}
-
-static void halbtc8822b1ant_sw_mechanism(struct btc_coexist *btcoexist,
- bool low_penalty_ra)
-{
- halbtc8822b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
-}
-
-/* rf4 type by efuse, and for ant at main aux inverse use,
- * because is 2x2, and control types are the same, does not need
- */
-
-static void halbtc8822b1ant_set_rfe_type(struct btc_coexist *btcoexist)
-{
- struct btc_board_info *board_info = &btcoexist->board_info;
-
- /* Ext switch buffer mux */
- btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x1991, 0x3, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbe, 0x8, 0x0);
-
- /* the following setup should be got from Efuse in the future */
- rfe_type->rfe_module_type = board_info->rfe_type;
-
- rfe_type->ext_ant_switch_ctrl_polarity = 0;
-
- switch (rfe_type->rfe_module_type) {
- case 0:
- default:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 1:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 2:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 3:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 4:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 5:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 6:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 7:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- }
-}
-
-/*anttenna control by bb mac bt antdiv pta to write 0x4c 0xcb4,0xcbd*/
-
-static void halbtc8822b1ant_set_ext_ant_switch(struct btc_coexist *btcoexist,
- bool force_exec, u8 ctrl_type,
- u8 pos_type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool switch_polatiry_inverse = false;
- u8 regval_0xcbd = 0, regval_0x64;
- u32 u32tmp1 = 0, u32tmp2 = 0, u32tmp3 = 0;
-
- /* Ext switch buffer mux */
- btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x1991, 0x3, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbe, 0x8, 0x0);
-
- if (!rfe_type->ext_ant_switch_exist)
- return;
-
- coex_dm->cur_ext_ant_switch_status = (ctrl_type << 8) + pos_type;
-
- if (!force_exec) {
- if (coex_dm->pre_ext_ant_switch_status ==
- coex_dm->cur_ext_ant_switch_status)
- return;
- }
-
- coex_dm->pre_ext_ant_switch_status = coex_dm->cur_ext_ant_switch_status;
-
- /* swap control polarity if use different switch control polarity*/
- /* Normal switch polarity for SPDT,
- * 0xcbd[1:0] = 2b'01 => Ant to BTG,
- * 0xcbd[1:0] = 2b'10 => Ant to WLG
- */
- switch_polatiry_inverse = rfe_type->ext_ant_switch_ctrl_polarity == 1;
-
- switch (pos_type) {
- default:
- case BT_8822B_1ANT_EXT_ANT_SWITCH_TO_BT:
- case BT_8822B_1ANT_EXT_ANT_SWITCH_TO_NOCARE:
-
- break;
- case BT_8822B_1ANT_EXT_ANT_SWITCH_TO_WLG:
- break;
- case BT_8822B_1ANT_EXT_ANT_SWITCH_TO_WLA:
- break;
- }
-
- if (rfe_type->ext_ant_switch_type ==
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT) {
- switch (ctrl_type) {
- default:
- case BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_BBSW:
- /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e,
- 0x80, 0x0);
- /* 0x4c[24] = 1 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f,
- 0x01, 0x1);
- /* BB SW, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin*/
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb4,
- 0xff, 0x77);
-
- /* 0xcbd[1:0] = 2b'01 for no switch_polatiry_inverse,
- * ANTSWB =1, ANTSW =0
- */
- regval_0xcbd = (!switch_polatiry_inverse ? 0x1 : 0x2);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd,
- 0x3, regval_0xcbd);
-
- break;
- case BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_PTA:
- /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e,
- 0x80, 0x0);
- /* 0x4c[24] = 1 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f,
- 0x01, 0x1);
- /* PTA, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb4,
- 0xff, 0x66);
-
- /* 0xcbd[1:0] = 2b'10 for no switch_polatiry_inverse,
- * ANTSWB =1, ANTSW =0 @ GNT_BT=1
- */
- regval_0xcbd = (!switch_polatiry_inverse ? 0x2 : 0x1);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd,
- 0x3, regval_0xcbd);
-
- break;
- case BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_ANTDIV:
- /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e,
- 0x80, 0x0);
- /* 0x4c[24] = 1 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f,
- 0x01, 0x1);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb4,
- 0xff, 0x88);
-
- /* no regval_0xcbd setup required, because
- * antenna switch control value by antenna diversity
- */
-
- break;
- case BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_MAC:
- /* 0x4c[23] = 1 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e,
- 0x80, 0x1);
-
- /* 0x64[0] = 1b'0 for no switch_polatiry_inverse,
- * DPDT_SEL_N =1, DPDT_SEL_P =0
- */
- regval_0x64 = (!switch_polatiry_inverse ? 0x0 : 0x1);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
- regval_0x64);
- break;
- case BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_BT:
- /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e,
- 0x80, 0x0);
- /* 0x4c[24] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f,
- 0x01, 0x0);
-
- /* no setup required, because antenna switch control
- * value by BT vendor 0xac[1:0]
- */
- break;
- }
- }
-
- u32tmp1 = btcoexist->btc_read_4byte(btcoexist, 0xcbc);
- u32tmp2 = btcoexist->btc_read_4byte(btcoexist, 0x4c);
- u32tmp3 = btcoexist->btc_read_4byte(btcoexist, 0x64) & 0xff;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (After Ext Ant switch setup) 0xcbc = 0x%08x, 0x4c = 0x%08x, 0x64= 0x%02x**********\n",
- u32tmp1, u32tmp2, u32tmp3);
-}
-
-/* set gnt_wl gnt_bt control by sw high low, or
- * hwpta while in power on, ini, wlan off, wlan only, wl2g non-currrent,
- * wl2g current, wl5g
- */
-
-static void halbtc8822b1ant_set_ant_path(struct btc_coexist *btcoexist,
- u8 ant_pos_type, bool force_exec,
- u8 phase)
-
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 u8tmp = 0;
- u32 u32tmp1 = 0;
- u32 u32tmp2 = 0, u32tmp3 = 0;
-
- u32tmp1 = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist, 0x38);
-
- /* To avoid indirect access fail */
- if (((u32tmp1 & 0xf000) >> 12) != ((u32tmp1 & 0x0f00) >> 8)) {
- force_exec = true;
- coex_sta->gnt_error_cnt++;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex],(Before Ant Setup) 0x38= 0x%x\n", u32tmp1);
- }
-
- /* Ext switch buffer mux */
- btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x1991, 0x3, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbe, 0x8, 0x0);
-
- coex_dm->cur_ant_pos_type = (ant_pos_type << 8) + phase;
-
- if (!force_exec) {
- if (coex_dm->cur_ant_pos_type == coex_dm->pre_ant_pos_type)
- return;
- }
-
- coex_dm->pre_ant_pos_type = coex_dm->cur_ant_pos_type;
-
- if (btcoexist->dbg_mode_1ant) {
- u32tmp1 = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0x38);
- u32tmp2 = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0x54);
- u32tmp3 = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
-
- u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x73);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (Before Ant Setup) 0xcb4 = 0x%x, 0x73 = 0x%x, 0x38= 0x%x, 0x54= 0x%x**********\n",
- u32tmp3, u8tmp, u32tmp1, u32tmp2);
- }
-
- switch (phase) {
- case BT_8822B_1ANT_PHASE_COEX_INIT:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (set_ant_path - 1ANT_PHASE_COEX_INIT) **********\n");
-
- /* Disable LTE Coex Function in WiFi side
- * (this should be on if LTE coex is required)
- */
- halbtc8822b1ant_ltecoex_enable(btcoexist, 0x0);
-
- /* GNT_WL_LTE always = 1
- * (this should be config if LTE coex is required)
- */
- halbtc8822b1ant_ltecoex_set_coex_table(
- btcoexist, BT_8822B_1ANT_CTT_WL_VS_LTE, 0xffff);
-
- /* GNT_BT_LTE always = 1
- * (this should be config if LTE coex is required)
- */
- halbtc8822b1ant_ltecoex_set_coex_table(
- btcoexist, BT_8822B_1ANT_CTT_BT_VS_LTE, 0xffff);
-
- /* set GNT_BT to SW high */
- halbtc8822b1ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW,
- BT_8822B_1ANT_SIG_STA_SET_TO_HIGH);
-
- /* set GNT_WL to SW low */
- halbtc8822b1ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW,
- BT_8822B_1ANT_SIG_STA_SET_TO_LOW);
-
- /* set Path control owner to WL at initial step */
- halbtc8822b1ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_1ANT_PCO_WLSIDE);
-
- coex_sta->run_time_state = false;
-
- /* Ext switch buffer mux */
- btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x1991, 0x3, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbe, 0x8, 0x0);
-
- if (ant_pos_type == BTC_ANT_PATH_AUTO)
- ant_pos_type = BTC_ANT_PATH_BT;
-
- break;
- case BT_8822B_1ANT_PHASE_WLANONLY_INIT:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (set_ant_path - 1ANT_PHASE_WLANONLY_INIT) **********\n");
-
- /* Disable LTE Coex Function in WiFi side
- * (this should be on if LTE coex is required)
- */
- halbtc8822b1ant_ltecoex_enable(btcoexist, 0x0);
-
- /* GNT_WL_LTE always = 1
- * (this should be config if LTE coex is required)
- */
- halbtc8822b1ant_ltecoex_set_coex_table(
- btcoexist, BT_8822B_1ANT_CTT_WL_VS_LTE, 0xffff);
-
- /* GNT_BT_LTE always = 1
- * (this should be config if LTE coex is required)
- */
- halbtc8822b1ant_ltecoex_set_coex_table(
- btcoexist, BT_8822B_1ANT_CTT_BT_VS_LTE, 0xffff);
-
- /* set GNT_BT to SW Low */
- halbtc8822b1ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW,
- BT_8822B_1ANT_SIG_STA_SET_TO_LOW);
-
- /* Set GNT_WL to SW high */
- halbtc8822b1ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW,
- BT_8822B_1ANT_SIG_STA_SET_TO_HIGH);
-
- /* set Path control owner to WL at initial step */
- halbtc8822b1ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_1ANT_PCO_WLSIDE);
-
- coex_sta->run_time_state = false;
-
- /* Ext switch buffer mux */
- btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x1991, 0x3, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbe, 0x8, 0x0);
-
- if (ant_pos_type == BTC_ANT_PATH_AUTO)
- ant_pos_type = BTC_ANT_PATH_WIFI;
-
- break;
- case BT_8822B_1ANT_PHASE_WLAN_OFF:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (set_ant_path - 1ANT_PHASE_WLAN_OFF) **********\n");
-
- /* Disable LTE Coex Function in WiFi side */
- halbtc8822b1ant_ltecoex_enable(btcoexist, 0x0);
-
- /* set Path control owner to BT */
- halbtc8822b1ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_1ANT_PCO_BTSIDE);
-
- /* Set Ext Ant Switch to BT control at wifi off step */
- halbtc8822b1ant_set_ext_ant_switch(
- btcoexist, FORCE_EXEC,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_BT,
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_NOCARE);
-
- coex_sta->run_time_state = false;
-
- break;
- case BT_8822B_1ANT_PHASE_2G_RUNTIME:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (set_ant_path - 1ANT_PHASE_2G_RUNTIME) **********\n");
-
- /* set GNT_BT to PTA */
- halbtc8822b1ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_PTA,
- BT_8822B_1ANT_SIG_STA_SET_BY_HW);
-
- /* Set GNT_WL to PTA */
- halbtc8822b1ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_PTA,
- BT_8822B_1ANT_SIG_STA_SET_BY_HW);
-
- /* set Path control owner to WL at runtime step */
- halbtc8822b1ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_1ANT_PCO_WLSIDE);
-
- coex_sta->run_time_state = true;
-
- if (ant_pos_type == BTC_ANT_PATH_AUTO)
- ant_pos_type = BTC_ANT_PATH_PTA;
-
- break;
- case BT_8822B_1ANT_PHASE_5G_RUNTIME:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (set_ant_path - 1ANT_PHASE_5G_RUNTIME) **********\n");
-
- /* set GNT_BT to SW Hi */
- halbtc8822b1ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW,
- BT_8822B_1ANT_SIG_STA_SET_TO_HIGH);
-
- /* Set GNT_WL to SW Hi */
- halbtc8822b1ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW,
- BT_8822B_1ANT_SIG_STA_SET_TO_HIGH);
-
- /* set Path control owner to WL at runtime step */
- halbtc8822b1ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_1ANT_PCO_WLSIDE);
-
- coex_sta->run_time_state = true;
-
- if (ant_pos_type == BTC_ANT_PATH_AUTO)
- ant_pos_type = BTC_ANT_PATH_WIFI5G;
-
- break;
- case BT_8822B_1ANT_PHASE_BTMPMODE:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (set_ant_path - 1ANT_PHASE_BTMPMODE) **********\n");
-
- /* Disable LTE Coex Function in WiFi side */
- halbtc8822b1ant_ltecoex_enable(btcoexist, 0x0);
-
- /* set GNT_BT to SW Hi */
- halbtc8822b1ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW,
- BT_8822B_1ANT_SIG_STA_SET_TO_HIGH);
-
- /* Set GNT_WL to SW Lo */
- halbtc8822b1ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW,
- BT_8822B_1ANT_SIG_STA_SET_TO_LOW);
-
- /* set Path control owner to WL */
- halbtc8822b1ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_1ANT_PCO_WLSIDE);
-
- coex_sta->run_time_state = false;
-
- /* Set Ext Ant Switch to BT side at BT MP mode */
- if (ant_pos_type == BTC_ANT_PATH_AUTO)
- ant_pos_type = BTC_ANT_PATH_BT;
-
- break;
- }
-
- if (phase != BT_8822B_1ANT_PHASE_WLAN_OFF) {
- switch (ant_pos_type) {
- case BTC_ANT_PATH_WIFI:
- halbtc8822b1ant_set_ext_ant_switch(
- btcoexist, force_exec,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_BBSW,
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_WLG);
- break;
- case BTC_ANT_PATH_WIFI5G:
- halbtc8822b1ant_set_ext_ant_switch(
- btcoexist, force_exec,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_BBSW,
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_WLA);
- break;
- case BTC_ANT_PATH_BT:
- halbtc8822b1ant_set_ext_ant_switch(
- btcoexist, force_exec,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_BBSW,
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_BT);
- break;
- default:
- case BTC_ANT_PATH_PTA:
- halbtc8822b1ant_set_ext_ant_switch(
- btcoexist, force_exec,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_PTA,
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_NOCARE);
- break;
- }
- }
-
- if (btcoexist->dbg_mode_1ant) {
- u32tmp1 = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0x38);
- u32tmp2 = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0x54);
- u32tmp3 = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
-
- u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x73);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (After Ant Setup) 0xcb4 = 0x%x, 0x73 = 0x%x, 0x38= 0x%x, 0x54= 0x%x**********\n",
- u32tmp3, u8tmp, u32tmp1, u32tmp2);
- }
-}
-
-static bool halbtc8822b1ant_is_common_action(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool common = false, wifi_connected = false, wifi_busy = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- if (!wifi_connected &&
- coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_NON_CONNECTED_IDLE) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
-
- /* halbtc8822b1ant_sw_mechanism(btcoexist, false); */
-
- common = true;
- } else if (wifi_connected &&
- (coex_dm->bt_status ==
- BT_8822B_1ANT_BT_STATUS_NON_CONNECTED_IDLE)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
-
- /* halbtc8822b1ant_sw_mechanism(btcoexist, false); */
-
- common = true;
- } else if (!wifi_connected && (BT_8822B_1ANT_BT_STATUS_CONNECTED_IDLE ==
- coex_dm->bt_status)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
-
- /* halbtc8822b1ant_sw_mechanism(btcoexist, false); */
-
- common = true;
- } else if (wifi_connected && (BT_8822B_1ANT_BT_STATUS_CONNECTED_IDLE ==
- coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
-
- /* halbtc8822b1ant_sw_mechanism(btcoexist, false); */
-
- common = true;
- } else if (!wifi_connected && (BT_8822B_1ANT_BT_STATUS_CONNECTED_IDLE !=
- coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
-
- /* halbtc8822b1ant_sw_mechanism(btcoexist, false); */
-
- common = true;
- } else {
- if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
- }
-
- common = false;
- }
-
- return common;
-}
-
-static void halbtc8822b1ant_action_wifi_under5g(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], under 5g start\n");
- /* for test : s3 bt disappear , fail rate 1/600*/
- /*set sw gnt wl bt high*/
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, FORCE_EXEC,
- BT_8822B_1ANT_PHASE_5G_RUNTIME);
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd, 0x3, 1);
-}
-
-static void halbtc8822b1ant_action_wifi_only(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_under_5g = false, rf4ce_enabled = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
- if (wifi_under_5g) {
- halbtc8822b1ant_action_wifi_under5g(btcoexist);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (wlan only -- under 5g ) **********\n");
- return;
- }
-
- if (rf4ce_enabled) {
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x45e, 0x8, 0x1);
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 50);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
- return;
- }
- halbtc8822b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
- halbtc8822b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, FORCE_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (wlan only -- under 2g ) **********\n");
-}
-
-static void
-halbtc8822b1ant_action_wifi_native_lps(struct btc_coexist *btcoexist)
-{
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-}
-
-/* *********************************************
- *
- * Non-Software Coex Mechanism start
- *
- * **********************************************/
-
-static void halbtc8822b1ant_action_bt_whck_test(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex],action_bt_whck_test\n");
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-}
-
-static void
-halbtc8822b1ant_action_wifi_multi_port(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex],action_wifi_multi_port\n");
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-}
-
-static void halbtc8822b1ant_action_hs(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], action_hs\n");
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
-}
-
-static void halbtc8822b1ant_action_bt_relink(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], run bt multi link function\n");
-
- if (coex_sta->is_bt_multi_link)
- return;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], run bt_re-link function\n");
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-}
-
-/*"""bt inquiry"""" + wifi any + bt any*/
-
-static void halbtc8822b1ant_action_bt_inquiry(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool wifi_connected = false, ap_enable = false, wifi_busy = false,
- bt_busy = false, rf4ce_enabled = false;
-
- bool wifi_scan = false, link = false, roam = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (bt inquiry) **********\n");
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
- &ap_enable);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &wifi_scan);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** scan = %d, link =%d, roam = %d**********\n",
- wifi_scan, link, roam);
-
- if ((link) || (roam) || (coex_sta->wifi_is_high_pri_task)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (bt inquiry wifi connect or scan ) **********\n");
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
-
- } else if ((wifi_scan) && (coex_sta->bt_create_connection)) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
-
- } else if ((!wifi_connected) && (!wifi_scan)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (bt inquiry wifi non connect) **********\n");
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- } else if ((bt_link_info->a2dp_exist) && (bt_link_info->pan_exist)) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
- } else if (bt_link_info->a2dp_exist) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
- } else if (wifi_scan) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
- } else if (wifi_busy) {
- /* for BT inquiry/page fail after S4 resume */
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
- /*aaaa->55aa for bt connect while wl busy*/
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC,
- 15);
- if (rf4ce_enabled) {
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x45e,
- 0x8, 0x1);
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 50);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 0);
- }
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (bt inquiry wifi connect) **********\n");
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
- }
-}
-
-static void
-halbtc8822b1ant_action_bt_sco_hid_only_busy(struct btc_coexist *btcoexist)
-{
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool wifi_connected = false, wifi_busy = false;
- u32 wifi_bw = 1;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- if (bt_link_info->sco_exist) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
- } else {
- if (coex_sta->is_hid_low_pri_tx_overhead) {
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 6);
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 18);
- } else if (wifi_bw == 0) { /* if 11bg mode */
-
- if (coex_sta->is_bt_multi_link) {
- halbtc8822b1ant_coex_table_with_type(
- btcoexist, NORMAL_EXEC, 11);
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- } else {
- halbtc8822b1ant_coex_table_with_type(
- btcoexist, NORMAL_EXEC, 6);
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- }
- } else {
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 6);
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 11);
- }
- }
-}
-
-static void
-halbtc8822b1ant_action_wifi_connected_bt_acl_busy(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool wifi_busy = false, wifi_turbo = false;
- u32 wifi_bw = 1;
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
- &coex_sta->scan_ap_num);
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], scan_ap_num = %d, wl_noisy_level = %d\n",
- coex_sta->scan_ap_num, coex_sta->wl_noisy_level);
-
- if ((wifi_busy) && (coex_sta->wl_noisy_level == 0))
- wifi_turbo = true;
-
- if ((coex_sta->bt_relink_downcount != 0) &&
- (!bt_link_info->pan_exist) && (wifi_busy)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Re-Link + A2DP + WL busy\n");
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- } else if ((bt_link_info->a2dp_exist) && (coex_sta->is_bt_a2dp_sink)) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
- } else if (bt_link_info->a2dp_only) { /* A2DP */
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
-
- if (wifi_turbo)
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 19);
- else
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 4);
- } else if (((bt_link_info->a2dp_exist) && (bt_link_info->pan_exist)) ||
- (bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
- bt_link_info->pan_exist)) {
- /* A2DP+PAN(OPP,FTP), HID+A2DP+PAN(OPP,FTP) */
-
- if (wifi_busy)
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 13);
- else
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 14);
-
- if (bt_link_info->hid_exist)
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- else if (wifi_turbo)
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 19);
- else
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 4);
- } else if (bt_link_info->hid_exist &&
- bt_link_info->a2dp_exist) { /* HID+A2DP */
-
- if (wifi_bw == 0) { /* if 11bg mode */
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- halbtc8822b1ant_set_wltoggle_coex_table(
- btcoexist, NORMAL_EXEC, 1, 0xaa, 0x5a, 0xaa,
- 0xaa);
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 49);
- } else {
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- halbtc8822b1ant_limited_rx(btcoexist, NORMAL_EXEC,
- false, true, 8);
- halbtc8822b1ant_set_wltoggle_coex_table(
- btcoexist, NORMAL_EXEC, 1, 0xaa, 0x5a, 0xaa,
- 0xaa);
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 49);
- }
- /* PAN(OPP,FTP), HID+PAN(OPP,FTP) */
-
- } else if ((bt_link_info->pan_only) ||
- (bt_link_info->hid_exist && bt_link_info->pan_exist)) {
- if (!wifi_busy)
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 4);
- else
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 3);
-
- if (bt_link_info->hid_exist)
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- else if (wifi_turbo)
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 19);
- else
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 4);
- } else {
- /* BT no-profile busy (0x9) */
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 33);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
- }
-}
-
-/*wifi not connected + bt action*/
-
-static void
-halbtc8822b1ant_action_wifi_not_connected(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool rf4ce_enabled = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (wifi not connect) **********\n");
-
- /* tdma and coex table */
- if (rf4ce_enabled) {
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x45e, 0x8, 0x1);
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 50);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
- return;
- }
- halbtc8822b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-}
-
-/*""""wl not connected scan"""" + bt action*/
-static void
-halbtc8822b1ant_action_wifi_not_connected_scan(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- u32 wifi_link_status = 0;
- u32 num_of_wifi_link = 0;
- bool bt_ctrl_agg_buf_size = false;
- u8 agg_buf_size = 5;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (wifi non connect scan) **********\n");
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
- &wifi_link_status);
-
- num_of_wifi_link = wifi_link_status >> 16;
-
- if (num_of_wifi_link >= 2) {
- halbtc8822b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
- halbtc8822b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
- bt_ctrl_agg_buf_size, agg_buf_size);
-
- if (coex_sta->c2h_bt_inquiry_page) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Is Inquirying\n");
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- } else {
- halbtc8822b1ant_action_wifi_multi_port(btcoexist);
- }
- return;
- }
-
- if (coex_sta->c2h_bt_inquiry_page) {
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- return;
- } else if (bt_hs_on) {
- halbtc8822b1ant_action_hs(btcoexist);
- return;
- }
-
- /* tdma and coex table */
- if (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_BUSY) {
- if (bt_link_info->a2dp_exist) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 32);
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- } else if (bt_link_info->a2dp_exist &&
- bt_link_info->pan_exist) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 22);
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- } else {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 20);
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- }
- } else if ((coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_SCO_BUSY) ||
- (BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
- coex_dm->bt_status)) {
- halbtc8822b1ant_action_bt_sco_hid_only_busy(btcoexist);
- } else {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
- }
-}
-
-/*""""wl not connected asso"""" + bt action*/
-
-static void halbtc8822b1ant_action_wifi_not_connected_asso_auth(
- struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- u32 wifi_link_status = 0;
- u32 num_of_wifi_link = 0;
- bool bt_ctrl_agg_buf_size = false;
- u8 agg_buf_size = 5;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (wifi non connect asso_auth) **********\n");
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
- &wifi_link_status);
-
- num_of_wifi_link = wifi_link_status >> 16;
-
- if (num_of_wifi_link >= 2) {
- halbtc8822b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
- halbtc8822b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
- bt_ctrl_agg_buf_size, agg_buf_size);
-
- if (coex_sta->c2h_bt_inquiry_page) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Is Inquirying\n");
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- } else {
- halbtc8822b1ant_action_wifi_multi_port(btcoexist);
- }
- return;
- }
-
- if (coex_sta->c2h_bt_inquiry_page) {
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- return;
- } else if (bt_hs_on) {
- halbtc8822b1ant_action_hs(btcoexist);
- return;
- }
-
- /* tdma and coex table */
- if ((bt_link_info->sco_exist) || (bt_link_info->hid_exist) ||
- (bt_link_info->a2dp_exist)) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
- halbtc8822b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 4);
- } else if (bt_link_info->pan_exist) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
- halbtc8822b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 4);
- } else {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2);
- }
-}
-
-/*""""wl connected scan"""" + bt action*/
-
-static void
-halbtc8822b1ant_action_wifi_connected_scan(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- u32 wifi_link_status = 0;
- u32 num_of_wifi_link = 0;
- bool bt_ctrl_agg_buf_size = false;
- u8 agg_buf_size = 5;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (wifi connect scan) **********\n");
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
- &wifi_link_status);
-
- num_of_wifi_link = wifi_link_status >> 16;
-
- if (num_of_wifi_link >= 2) {
- halbtc8822b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
- halbtc8822b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
- bt_ctrl_agg_buf_size, agg_buf_size);
-
- if (coex_sta->c2h_bt_inquiry_page) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Is Inquirying\n");
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- } else {
- halbtc8822b1ant_action_wifi_multi_port(btcoexist);
- }
- return;
- }
-
- if (coex_sta->c2h_bt_inquiry_page) {
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- return;
- } else if (bt_hs_on) {
- halbtc8822b1ant_action_hs(btcoexist);
- return;
- }
-
- /* tdma and coex table */
- if (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_BUSY) {
- if (bt_link_info->a2dp_exist) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 32);
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- } else if (bt_link_info->a2dp_exist &&
- bt_link_info->pan_exist) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 22);
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- } else {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 20);
- halbtc8822b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- }
- } else if ((coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_SCO_BUSY) ||
- (BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
- coex_dm->bt_status)) {
- halbtc8822b1ant_action_bt_sco_hid_only_busy(btcoexist);
- } else {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
- }
-}
-
-/*""""wl connected specific packet"""" + bt action*/
-
-static void halbtc8822b1ant_action_wifi_connected_specific_packet(
- struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- u32 wifi_link_status = 0;
- u32 num_of_wifi_link = 0;
- bool bt_ctrl_agg_buf_size = false;
- u8 agg_buf_size = 5;
- bool wifi_busy = false;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (wifi connect specific packet) **********\n");
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
- &wifi_link_status);
-
- num_of_wifi_link = wifi_link_status >> 16;
-
- if (num_of_wifi_link >= 2) {
- halbtc8822b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
- halbtc8822b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
- bt_ctrl_agg_buf_size, agg_buf_size);
-
- if (coex_sta->c2h_bt_inquiry_page) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Is Inquirying\n");
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- } else {
- halbtc8822b1ant_action_wifi_multi_port(btcoexist);
- }
- return;
- }
-
- if (coex_sta->c2h_bt_inquiry_page) {
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- return;
- } else if (bt_hs_on) {
- halbtc8822b1ant_action_hs(btcoexist);
- return;
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- /* no specific packet process for both WiFi and BT very busy */
- if ((wifi_busy) &&
- ((bt_link_info->pan_exist) || (coex_sta->num_of_profile >= 2)))
- return;
-
- /* tdma and coex table */
- if ((bt_link_info->sco_exist) || (bt_link_info->hid_exist)) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
- } else if (bt_link_info->a2dp_exist) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
- /*for a2dp glitch,change from 1 to 15*/
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC,
- 15);
- } else if (bt_link_info->pan_exist) {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
- } else {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
- }
-}
-
-/* wifi connected input point:
- * to set different ps and tdma case (+bt different case)
- */
-
-static void halbtc8822b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_busy = false, rf4ce_enabled = false;
- bool scan = false, link = false, roam = false;
- bool under_4way = false, ap_enable = false, wifi_under_5g = false;
- u8 wifi_rssi_state;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect()===>\n");
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if (wifi_under_5g) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 5g<===\n");
-
- halbtc8822b1ant_action_wifi_under5g(btcoexist);
-
- return;
- }
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 2g<===\n");
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
- &under_4way);
-
- if (under_4way) {
- halbtc8822b1ant_action_wifi_connected_specific_packet(
- btcoexist);
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
- return;
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- if (scan || link || roam) {
- if (scan)
- halbtc8822b1ant_action_wifi_connected_scan(btcoexist);
- else
- halbtc8822b1ant_action_wifi_connected_specific_packet(
- btcoexist);
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
- return;
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
- &ap_enable);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- /* tdma and coex table */
- if (!wifi_busy) {
- if (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_BUSY) {
- halbtc8822b1ant_action_wifi_connected_bt_acl_busy(
- btcoexist);
- } else if ((BT_8822B_1ANT_BT_STATUS_SCO_BUSY ==
- coex_dm->bt_status) ||
- (BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
- coex_dm->bt_status)) {
- halbtc8822b1ant_action_bt_sco_hid_only_busy(btcoexist);
- } else {
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
- 8);
-
- halbtc8822b1ant_set_ant_path(
- btcoexist, BTC_ANT_PATH_AUTO, NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- if ((coex_sta->high_priority_tx) +
- (coex_sta->high_priority_rx) <=
- 60)
- /*sy modify case16 -> case17*/
- halbtc8822b1ant_coex_table_with_type(
- btcoexist, NORMAL_EXEC, 1);
- else
- halbtc8822b1ant_coex_table_with_type(
- btcoexist, NORMAL_EXEC, 1);
- }
- } else {
- if (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_BUSY) {
- halbtc8822b1ant_action_wifi_connected_bt_acl_busy(
- btcoexist);
- } else if ((BT_8822B_1ANT_BT_STATUS_SCO_BUSY ==
- coex_dm->bt_status) ||
- (BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
- coex_dm->bt_status)) {
- halbtc8822b1ant_action_bt_sco_hid_only_busy(btcoexist);
- } else {
- if (rf4ce_enabled) {
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x45e, 0x8, 0x1);
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 50);
-
- halbtc8822b1ant_coex_table_with_type(
- btcoexist, NORMAL_EXEC, 1);
- return;
- }
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
- 8);
-
- halbtc8822b1ant_set_ant_path(
- btcoexist, BTC_ANT_PATH_AUTO, NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- wifi_rssi_state = halbtc8822b1ant_wifi_rssi_state(
- btcoexist, 1, 2, 25, 0);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** before **********\n");
- if (BT_8822B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
- coex_dm->bt_status) {
- if (rf4ce_enabled) {
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x45e, 0x8, 0x1);
-
- halbtc8822b1ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 50);
-
- halbtc8822b1ant_coex_table_with_type(
- btcoexist, NORMAL_EXEC, 1);
- return;
- }
-
- halbtc8822b1ant_coex_table_with_type(
- btcoexist, NORMAL_EXEC, 1);
- } else {
- halbtc8822b1ant_coex_table_with_type(
- btcoexist, NORMAL_EXEC, 1);
- }
- }
- }
-}
-
-static void
-halbtc8822b1ant_run_sw_coexist_mechanism(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 algorithm = 0;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (run sw coexmech) **********\n");
- algorithm = halbtc8822b1ant_action_algorithm(btcoexist);
- coex_dm->cur_algorithm = algorithm;
-
- if (halbtc8822b1ant_is_common_action(btcoexist)) {
- } else {
- switch (coex_dm->cur_algorithm) {
- case BT_8822B_1ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = SCO.\n");
- break;
- case BT_8822B_1ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID.\n");
- break;
- case BT_8822B_1ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = A2DP.\n");
- break;
- case BT_8822B_1ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = A2DP+PAN(HS).\n");
- break;
- case BT_8822B_1ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN(EDR).\n");
- break;
- case BT_8822B_1ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HS mode.\n");
- break;
- case BT_8822B_1ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN+A2DP.\n");
- break;
- case BT_8822B_1ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN(EDR)+HID.\n");
- break;
- case BT_8822B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID+A2DP+PAN.\n");
- break;
- case BT_8822B_1ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID+A2DP.\n");
- break;
- default:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
- break;
- }
- coex_dm->pre_algorithm = coex_dm->cur_algorithm;
- }
-}
-
-static void halbtc8822b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool wifi_connected = false, bt_hs_on = false;
- bool increase_scan_dev_num = false;
- bool bt_ctrl_agg_buf_size = false;
- bool miracast_plus_bt = false;
- u8 agg_buf_size = 5;
- u32 wifi_link_status = 0;
- u32 num_of_wifi_link = 0, wifi_bw;
- u8 iot_peer = BTC_IOT_PEER_UNKNOWN;
- bool wifi_under_5g = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
-
- if (btcoexist->manual_control) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
- return;
- }
-
- if (btcoexist->stop_coex_dm) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
- return;
- }
-
- if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
- return;
- }
-
- if ((coex_sta->under_lps) &&
- (coex_dm->bt_status != BT_8822B_1ANT_BT_STATUS_ACL_BUSY)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), wifi is under LPS !!!\n");
- halbtc8822b1ant_action_wifi_native_lps(btcoexist);
- return;
- }
-
- if (!coex_sta->run_time_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], return for run_time_state = false !!!\n");
- return;
- }
-
- if (coex_sta->freeze_coexrun_by_btinfo) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), return for freeze_coexrun_by_btinfo\n");
- return;
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
- if (wifi_under_5g) {
- halbtc8822b1ant_action_wifi_under5g(btcoexist);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under 5G!!!\n");
- return;
- }
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under 2G!!!\n");
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, NORMAL_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- if (coex_sta->bt_whck_test) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is under WHCK TEST!!!\n");
- halbtc8822b1ant_action_bt_whck_test(btcoexist);
- return;
- }
-
- if (coex_sta->bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is disabled !!!\n");
- halbtc8822b1ant_action_wifi_only(btcoexist);
- return;
- }
-
- if (coex_sta->is_setup_link) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is re-link !!!\n");
- halbtc8822b1ant_action_bt_relink(btcoexist);
- return;
- }
-
- if ((coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_BUSY) ||
- (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_SCO_BUSY) ||
- (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY))
- increase_scan_dev_num = true;
-
- btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM,
- &increase_scan_dev_num);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
- &wifi_link_status);
- num_of_wifi_link = wifi_link_status >> 16;
-
- if ((num_of_wifi_link >= 2) ||
- (wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
- num_of_wifi_link, wifi_link_status);
-
- if (bt_link_info->bt_link_exist) {
- halbtc8822b1ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 1,
- 0, 1);
- miracast_plus_bt = true;
- } else {
- halbtc8822b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0,
- 0, 0);
- miracast_plus_bt = false;
- }
- btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
- &miracast_plus_bt);
- halbtc8822b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
- bt_ctrl_agg_buf_size, agg_buf_size);
-
- if ((bt_link_info->a2dp_exist) &&
- (coex_sta->c2h_bt_inquiry_page)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Is Inquirying\n");
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- } else {
- halbtc8822b1ant_action_wifi_multi_port(btcoexist);
- }
-
- return;
- }
-
- miracast_plus_bt = false;
- btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
- &miracast_plus_bt);
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- if ((bt_link_info->bt_link_exist) && (wifi_connected)) {
- halbtc8822b1ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 1, 0, 1);
-
- btcoexist->btc_get(btcoexist, BTC_GET_U1_IOT_PEER, &iot_peer);
-
- if (iot_peer != BTC_IOT_PEER_CISCO) {
- if (bt_link_info->sco_exist)
- halbtc8822b1ant_limited_rx(btcoexist,
- NORMAL_EXEC, true,
- false, 0x5);
- else
- halbtc8822b1ant_limited_rx(btcoexist,
- NORMAL_EXEC, false,
- false, 0x5);
- } else {
- if (bt_link_info->sco_exist) {
- halbtc8822b1ant_limited_rx(btcoexist,
- NORMAL_EXEC, true,
- false, 0x5);
- } else {
- if (wifi_bw == BTC_WIFI_BW_HT40)
- halbtc8822b1ant_limited_rx(
- btcoexist, NORMAL_EXEC, false,
- true, 0x10);
- else
- halbtc8822b1ant_limited_rx(
- btcoexist, NORMAL_EXEC, false,
- true, 0x8);
- }
- }
-
- halbtc8822b1ant_sw_mechanism(btcoexist, true);
- halbtc8822b1ant_run_sw_coexist_mechanism(
- btcoexist); /* just print debug message */
- } else {
- halbtc8822b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
-
- halbtc8822b1ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
- 0x5);
-
- halbtc8822b1ant_sw_mechanism(btcoexist, false);
- halbtc8822b1ant_run_sw_coexist_mechanism(
- btcoexist); /* just print debug message */
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
- if (coex_sta->c2h_bt_inquiry_page) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Is Inquirying\n");
- halbtc8822b1ant_action_bt_inquiry(btcoexist);
- return;
- } else if (bt_hs_on) {
- halbtc8822b1ant_action_hs(btcoexist);
- return;
- }
-
- if (!wifi_connected) {
- bool scan = false, link = false, roam = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is non connected-idle !!!\n");
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-
- if (scan)
- halbtc8822b1ant_action_wifi_not_connected_scan(
- btcoexist);
- else if (link || roam)
- halbtc8822b1ant_action_wifi_not_connected_asso_auth(
- btcoexist);
- else
- halbtc8822b1ant_action_wifi_not_connected(btcoexist);
- } else { /* wifi LPS/Busy */
- halbtc8822b1ant_action_wifi_connected(btcoexist);
- }
-}
-
-static void halbtc8822b1ant_init_coex_dm(struct btc_coexist *btcoexist)
-{
- /* force to reset coex mechanism */
-
- halbtc8822b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, false);
-
- /* sw all off */
- halbtc8822b1ant_sw_mechanism(btcoexist, false);
-
- coex_sta->pop_event_cnt = 0;
-}
-
-static void halbtc8822b1ant_init_hw_config(struct btc_coexist *btcoexist,
- bool back_up, bool wifi_only)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 u8tmp = 0, i = 0;
- u32 u32tmp1 = 0, u32tmp2 = 0, u32tmp3 = 0;
-
- u32tmp3 = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- u32tmp1 = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist, 0x38);
- u32tmp2 = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist, 0x54);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (Before Init HW config) 0xcb4 = 0x%x, 0x38= 0x%x, 0x54= 0x%x**********\n",
- u32tmp3, u32tmp1, u32tmp2);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 1Ant Init HW Config!!\n");
-
- coex_sta->bt_coex_supported_feature = 0;
- coex_sta->bt_coex_supported_version = 0;
- coex_sta->bt_ble_scan_type = 0;
- coex_sta->bt_ble_scan_para[0] = 0;
- coex_sta->bt_ble_scan_para[1] = 0;
- coex_sta->bt_ble_scan_para[2] = 0;
- coex_sta->bt_reg_vendor_ac = 0xffff;
- coex_sta->bt_reg_vendor_ae = 0xffff;
- coex_sta->isolation_btween_wb = BT_8822B_1ANT_DEFAULT_ISOLATION;
- coex_sta->gnt_error_cnt = 0;
- coex_sta->bt_relink_downcount = 0;
- coex_sta->is_set_ps_state_fail = false;
- coex_sta->cnt_set_ps_state_fail = 0;
-
- for (i = 0; i <= 9; i++)
- coex_sta->bt_afh_map[i] = 0;
-
- /* Setup RF front end type */
- halbtc8822b1ant_set_rfe_type(btcoexist);
-
- /* 0xf0[15:12] --> Chip Cut information */
- coex_sta->cut_version =
- (btcoexist->btc_read_1byte(btcoexist, 0xf1) & 0xf0) >> 4;
-
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x550, 0x8,
- 0x1); /* enable TBTT nterrupt */
-
- /* BT report packet sample rate */
- /* 0x790[5:0]=0x5 */
- u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
- u8tmp &= 0xc0;
- u8tmp |= 0x5;
- btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
-
- /* Enable BT counter statistics */
- btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
-
- /* Enable PTA (3-wire function form BT side) */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x41, 0x02, 0x1);
-
- /* Enable PTA (tx/rx signal form WiFi side) */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4c6, 0x10, 0x1);
- /*GNT_BT=1 while select both */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x763, 0x10, 0x1);
-
- /* enable GNT_WL */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e, 0x40, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x1, 0x0);
-
- if (btcoexist->btc_read_1byte(btcoexist, 0x80) == 0xc6)
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ONOFF, true);
-
- /* Antenna config */
- if (coex_sta->is_rf_state_off) {
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_1ANT_PHASE_WLAN_OFF);
-
- btcoexist->stop_coex_dm = true;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** %s (RF Off)**********\n",
- __func__);
- } else if (wifi_only) {
- coex_sta->concurrent_rx_mode_on = false;
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_WIFI,
- FORCE_EXEC,
- BT_8822B_1ANT_PHASE_WLANONLY_INIT);
- } else {
- coex_sta->concurrent_rx_mode_on = true;
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_1ANT_PHASE_COEX_INIT);
- }
-
- /* PTA parameter */
- halbtc8822b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
-
- halbtc8822b1ant_enable_gnt_to_gpio(btcoexist, true);
-}
-
-void ex_btc8822b1ant_power_on_setting(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_board_info *board_info = &btcoexist->board_info;
- u8 u8tmp = 0x0;
- u16 u16tmp = 0x0;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "xxxxxxxxxxxxxxxx Execute 8822b 1-Ant PowerOn Setting!! xxxxxxxxxxxxxxxx\n");
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Ant Det Finish = %s, Ant Det Number = %d\n",
- board_info->btdm_ant_det_finish ? "Yes" : "No",
- board_info->btdm_ant_num_by_ant_det);
-
- btcoexist->dbg_mode_1ant = false;
- btcoexist->stop_coex_dm = true;
-
- /* enable BB, REG_SYS_FUNC_EN such that we can write 0x948 correctly. */
- u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x2);
- btcoexist->btc_write_2byte(btcoexist, 0x2, u16tmp | BIT(0) | BIT(1));
-
- /* set Path control owner to WiFi */
- halbtc8822b1ant_ltecoex_pathcontrol_owner(btcoexist,
- BT_8822B_1ANT_PCO_WLSIDE);
-
- /* set GNT_BT to high */
- halbtc8822b1ant_ltecoex_set_gnt_bt(btcoexist,
- BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW,
- BT_8822B_1ANT_SIG_STA_SET_TO_HIGH);
- /* Set GNT_WL to low */
- halbtc8822b1ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_1ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_1ANT_GNT_CTRL_BY_SW, BT_8822B_1ANT_SIG_STA_SET_TO_LOW);
-
- /* set WLAN_ACT = 0 */
- /* btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); */
-
- /* SD1 Chunchu red x issue */
- btcoexist->btc_write_1byte(btcoexist, 0xff1a, 0x0);
-
- halbtc8822b1ant_enable_gnt_to_gpio(btcoexist, true);
-
- /* */
- /* S0 or S1 setting and Local register setting
- * (By the setting fw can get ant number, S0/S1, ... info)
- */
- /* Local setting bit define */
- /* BIT0: "0" for no antenna inverse; "1" for antenna inverse */
- /* BIT1: "0" for internal switch; "1" for external switch */
- /* BIT2: "0" for one antenna; "1" for two antenna */
- /* NOTE: here default all internal switch and 1-antenna ==>
- * BIT1=0 and BIT2=0
- */
-
- u8tmp = 0;
- board_info->btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
-
- if (btcoexist->chip_interface == BTC_INTF_USB)
- btcoexist->btc_write_local_reg_1byte(btcoexist, 0xfe08, u8tmp);
- else if (btcoexist->chip_interface == BTC_INTF_SDIO)
- btcoexist->btc_write_local_reg_1byte(btcoexist, 0x60, u8tmp);
-}
-
-void ex_btc8822b1ant_pre_load_firmware(struct btc_coexist *btcoexist) {}
-
-void ex_btc8822b1ant_init_hw_config(struct btc_coexist *btcoexist,
- bool wifi_only)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (ini hw config) **********\n");
-
- halbtc8822b1ant_init_hw_config(btcoexist, true, wifi_only);
- btcoexist->stop_coex_dm = false;
- btcoexist->auto_report_1ant = true;
-}
-
-void ex_btc8822b1ant_init_coex_dm(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
-
- btcoexist->stop_coex_dm = false;
-
- halbtc8822b1ant_init_coex_dm(btcoexist);
-
- halbtc8822b1ant_query_bt_info(btcoexist);
-}
-
-void ex_btc8822b1ant_display_coex_info(struct btc_coexist *btcoexist,
- struct seq_file *m)
-{
- struct btc_board_info *board_info = &btcoexist->board_info;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- u8 u8tmp[4], i, ps_tdma_case = 0;
- u16 u16tmp[4];
- u32 u32tmp[4];
- u32 fa_ofdm, fa_cck, cca_ofdm, cca_cck;
- u32 fw_ver = 0, bt_patch_ver = 0, bt_coex_ver = 0;
- static u8 pop_report_in_10s;
- u32 phyver = 0;
- bool lte_coex_on = false;
- static u8 cnt;
-
- seq_puts(m, "\r\n ============[BT Coexist info]============");
-
- if (btcoexist->manual_control) {
- seq_puts(m,
- "\r\n ============[Under Manual Control]============");
- seq_puts(m, "\r\n ==========================================");
- }
- if (btcoexist->stop_coex_dm) {
- seq_puts(m, "\r\n ============[Coex is STOPPED]============");
- seq_puts(m, "\r\n ==========================================");
- }
-
- if (!coex_sta->bt_disabled) {
- if (coex_sta->bt_coex_supported_feature == 0)
- btcoexist->btc_get(
- btcoexist, BTC_GET_U4_SUPPORTED_FEATURE,
- &coex_sta->bt_coex_supported_feature);
-
- if ((coex_sta->bt_coex_supported_version == 0) ||
- (coex_sta->bt_coex_supported_version == 0xffff))
- btcoexist->btc_get(
- btcoexist, BTC_GET_U4_SUPPORTED_VERSION,
- &coex_sta->bt_coex_supported_version);
-
- if (coex_sta->bt_reg_vendor_ac == 0xffff)
- coex_sta->bt_reg_vendor_ac = (u16)(
- btcoexist->btc_get_bt_reg(btcoexist, 3, 0xac) &
- 0xffff);
-
- if (coex_sta->bt_reg_vendor_ae == 0xffff)
- coex_sta->bt_reg_vendor_ae = (u16)(
- btcoexist->btc_get_bt_reg(btcoexist, 3, 0xae) &
- 0xffff);
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
- &bt_patch_ver);
- btcoexist->bt_info.bt_get_fw_ver = bt_patch_ver;
-
- if (coex_sta->num_of_profile > 0) {
- cnt++;
-
- if (cnt >= 3) {
- btcoexist->btc_get_bt_afh_map_from_bt(
- btcoexist, 0, &coex_sta->bt_afh_map[0]);
- cnt = 0;
- }
- }
- }
-
- if (psd_scan->ant_det_try_count == 0) {
- seq_printf(
- m, "\r\n %-35s = %d/ %d/ %s / %d",
- "Ant PG Num/ Mech/ Pos/ RFE", board_info->pg_ant_num,
- board_info->btdm_ant_num,
- (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT ?
- "Main" :
- "Aux"),
- rfe_type->rfe_module_type);
- } else {
- seq_printf(
- m, "\r\n %-35s = %d/ %d/ %s/ %d (%d/%d/%d)",
- "Ant PG Num/ Mech(Ant_Det)/ Pos/ RFE",
- board_info->pg_ant_num,
- board_info->btdm_ant_num_by_ant_det,
- (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT ?
- "Main" :
- "Aux"),
- rfe_type->rfe_module_type, psd_scan->ant_det_try_count,
- psd_scan->ant_det_fail_count, psd_scan->ant_det_result);
-
- if (board_info->btdm_ant_det_finish) {
- if (psd_scan->ant_det_result != 12)
- seq_printf(m, "\r\n %-35s = %s",
- "Ant Det PSD Value",
- psd_scan->ant_det_peak_val);
- else
- seq_printf(m, "\r\n %-35s = %d",
- "Ant Det PSD Value",
- psd_scan->ant_det_psd_scan_peak_val /
- 100);
- }
- }
-
- bt_patch_ver = btcoexist->bt_info.bt_get_fw_ver;
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- phyver = btcoexist->btc_get_bt_phydm_version(btcoexist);
-
- bt_coex_ver = ((coex_sta->bt_coex_supported_version & 0xff00) >> 8);
-
- seq_printf(
- m, "\r\n %-35s = %d_%02x/ 0x%02x/ 0x%02x (%s)",
- "CoexVer WL/ BT_Desired/ BT_Report",
- glcoex_ver_date_8822b_1ant, glcoex_ver_8822b_1ant,
- glcoex_ver_btdesired_8822b_1ant, bt_coex_ver,
- (bt_coex_ver == 0xff ?
- "Unknown" :
- (coex_sta->bt_disabled ? "BT-disable" :
- (bt_coex_ver >= glcoex_ver_btdesired_8822b_1ant ?
- "Match" :
- "Mis-Match"))));
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ v%d/ %c", "W_FW/ B_FW/ Phy/ Kt",
- fw_ver, bt_patch_ver, phyver, coex_sta->cut_version + 65);
-
- seq_printf(m, "\r\n %-35s = %02x %02x %02x ", "AFH Map to BT",
- coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1],
- coex_dm->wifi_chnl_info[2]);
-
- /* wifi status */
- seq_printf(m, "\r\n %-35s", "============[Wifi Status]============");
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_WIFI_STATUS, m);
-
- seq_printf(m, "\r\n %-35s", "============[BT Status]============");
-
- pop_report_in_10s++;
- seq_printf(
- m, "\r\n %-35s = [%s/ %d dBm/ %d/ %d] ",
- "BT [status/ rssi/ retryCnt/ popCnt]",
- ((coex_sta->bt_disabled) ?
- ("disabled") :
- ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page") :
- ((BT_8822B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
- coex_dm->bt_status) ?
- "non-connected idle" :
- ((coex_dm->bt_status ==
- BT_8822B_1ANT_BT_STATUS_CONNECTED_IDLE) ?
- "connected-idle" :
- "busy")))),
- coex_sta->bt_rssi - 100, coex_sta->bt_retry_cnt,
- coex_sta->pop_event_cnt);
-
- if (pop_report_in_10s >= 5) {
- coex_sta->pop_event_cnt = 0;
- pop_report_in_10s = 0;
- }
-
- if (coex_sta->num_of_profile != 0)
- seq_printf(
- m, "\r\n %-35s = %s%s%s%s%s", "Profiles",
- ((bt_link_info->a2dp_exist) ?
- ((coex_sta->is_bt_a2dp_sink) ? "A2DP sink," :
- "A2DP,") :
- ""),
- ((bt_link_info->sco_exist) ? "HFP," : ""),
- ((bt_link_info->hid_exist) ?
- ((coex_sta->hid_busy_num >= 2) ?
- "HID(4/18)," :
- "HID(2/18),") :
- ""),
- ((bt_link_info->pan_exist) ? "PAN," : ""),
- ((coex_sta->voice_over_HOGP) ? "Voice" : ""));
- else
- seq_printf(m, "\r\n %-35s = None", "Profiles");
-
- if (bt_link_info->a2dp_exist) {
- seq_printf(m, "\r\n %-35s = %s/ %d/ %s",
- "A2DP Rate/Bitpool/Auto_Slot",
- ((coex_sta->is_A2DP_3M) ? "3M" : "No_3M"),
- coex_sta->a2dp_bit_pool,
- ((coex_sta->is_autoslot) ? "On" : "Off"));
- }
-
- if (bt_link_info->hid_exist) {
- seq_printf(m, "\r\n %-35s = %d/ %d", "HID PairNum/Forbid_Slot",
- coex_sta->hid_pair_cnt, coex_sta->forbidden_slot);
- }
-
- seq_printf(m, "\r\n %-35s = %s/ %d/ %s/ 0x%x",
- "Role/RoleSwCnt/IgnWlact/Feature",
- ((bt_link_info->slave_role) ? "Slave" : "Master"),
- coex_sta->cnt_role_switch,
- ((coex_dm->cur_ignore_wlan_act) ? "Yes" : "No"),
- coex_sta->bt_coex_supported_feature);
-
- if ((coex_sta->bt_ble_scan_type & 0x7) != 0x0) {
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "BLEScan Type/TV/Init/Ble",
- coex_sta->bt_ble_scan_type,
- (coex_sta->bt_ble_scan_type & 0x1 ?
- coex_sta->bt_ble_scan_para[0] :
- 0x0),
- (coex_sta->bt_ble_scan_type & 0x2 ?
- coex_sta->bt_ble_scan_para[1] :
- 0x0),
- (coex_sta->bt_ble_scan_type & 0x4 ?
- coex_sta->bt_ble_scan_para[2] :
- 0x0));
- }
-
- seq_printf(m, "\r\n %-35s = %d/ %d/ %d/ %d/ %d",
- "ReInit/ReLink/IgnWlact/Page/NameReq", coex_sta->cnt_reinit,
- coex_sta->cnt_setup_link, coex_sta->cnt_ign_wlan_act,
- coex_sta->cnt_page, coex_sta->cnt_remote_name_req);
-
- halbtc8822b1ant_read_score_board(btcoexist, &u16tmp[0]);
-
- if ((coex_sta->bt_reg_vendor_ae == 0xffff) ||
- (coex_sta->bt_reg_vendor_ac == 0xffff))
- seq_printf(m, "\r\n %-35s = x/ x/ %04x",
- "0xae[4]/0xac[1:0]/Scoreboard", u16tmp[0]);
- else
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ %04x",
- "0xae[4]/0xac[1:0]/Scoreboard",
- (int)((coex_sta->bt_reg_vendor_ae & BIT(4)) >> 4),
- coex_sta->bt_reg_vendor_ac & 0x3, u16tmp[0]);
-
- if (coex_sta->num_of_profile > 0) {
- seq_printf(
- m,
- "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
- "AFH MAP", coex_sta->bt_afh_map[0],
- coex_sta->bt_afh_map[1], coex_sta->bt_afh_map[2],
- coex_sta->bt_afh_map[3], coex_sta->bt_afh_map[4],
- coex_sta->bt_afh_map[5], coex_sta->bt_afh_map[6],
- coex_sta->bt_afh_map[7], coex_sta->bt_afh_map[8],
- coex_sta->bt_afh_map[9]);
- }
-
- for (i = 0; i < BT_INFO_SRC_8822B_1ANT_MAX; i++) {
- if (coex_sta->bt_info_c2h_cnt[i]) {
- seq_printf(
- m,
- "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)",
- glbt_info_src_8822b_1ant[i],
- coex_sta->bt_info_c2h[i][0],
- coex_sta->bt_info_c2h[i][1],
- coex_sta->bt_info_c2h[i][2],
- coex_sta->bt_info_c2h[i][3],
- coex_sta->bt_info_c2h[i][4],
- coex_sta->bt_info_c2h[i][5],
- coex_sta->bt_info_c2h[i][6],
- coex_sta->bt_info_c2h_cnt[i]);
- }
- }
-
- if (btcoexist->manual_control)
- seq_printf(
- m, "\r\n %-35s",
- "============[mechanisms] (before Manual)============");
- else
- seq_printf(m, "\r\n %-35s",
- "============[Mechanisms]============");
-
- ps_tdma_case = coex_dm->cur_ps_tdma;
- seq_printf(m, "\r\n %-35s = %02x %02x %02x %02x %02x (case-%d, %s)",
- "TDMA", coex_dm->ps_tdma_para[0], coex_dm->ps_tdma_para[1],
- coex_dm->ps_tdma_para[2], coex_dm->ps_tdma_para[3],
- coex_dm->ps_tdma_para[4], ps_tdma_case,
- (coex_dm->cur_ps_tdma_on ? "TDMA On" : "TDMA Off"));
-
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
- u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
- u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
- seq_printf(m, "\r\n %-35s = %d/ 0x%x/ 0x%x/ 0x%x",
- "Table/0x6c0/0x6c4/0x6c8", coex_sta->coex_table_type,
- u32tmp[0], u32tmp[1], u32tmp[2]);
-
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x", "0x778/0x6cc", u8tmp[0],
- u32tmp[0]);
-
- seq_printf(m, "\r\n %-35s = %s/ %s/ %s/ %d",
- "AntDiv/BtCtrlLPS/LPRA/PsFail",
- ((board_info->ant_div_cfg) ? "On" : "Off"),
- ((coex_sta->force_lps_ctrl) ? "On" : "Off"),
- ((coex_dm->cur_low_penalty_ra) ? "On" : "Off"),
- coex_sta->cnt_set_ps_state_fail);
-
- u32tmp[0] = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist, 0x38);
- lte_coex_on = ((u32tmp[0] & BIT(7)) >> 7) ? true : false;
-
- if (lte_coex_on) {
- u32tmp[0] = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0xa0);
- u32tmp[1] = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0xa4);
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x",
- "LTE Coex Table W_L/B_L", u32tmp[0] & 0xffff,
- u32tmp[1] & 0xffff);
-
- u32tmp[0] = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0xa8);
- u32tmp[1] = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0xac);
- u32tmp[2] = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0xb0);
- u32tmp[3] = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist,
- 0xb4);
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "LTE Break Table W_L/B_L/L_W/L_B",
- u32tmp[0] & 0xffff, u32tmp[1] & 0xffff,
- u32tmp[2] & 0xffff, u32tmp[3] & 0xffff);
- }
-
- /* Hw setting */
- seq_printf(m, "\r\n %-35s", "============[Hw setting]============");
-
- u32tmp[0] = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist, 0x38);
- u32tmp[1] = halbtc8822b1ant_ltecoex_indirect_read_reg(btcoexist, 0x54);
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x73);
-
- seq_printf(m, "\r\n %-35s = %s/ %s", "LTE Coex/Path Owner",
- ((lte_coex_on) ? "On" : "Off"),
- ((u8tmp[0] & BIT(2)) ? "WL" : "BT"));
-
- if (lte_coex_on) {
- seq_printf(m, "\r\n %-35s = %d/ %d/ %d/ %d",
- "LTE 3Wire/OPMode/UART/UARTMode",
- (int)((u32tmp[0] & BIT(6)) >> 6),
- (int)((u32tmp[0] & (BIT(5) | BIT(4))) >> 4),
- (int)((u32tmp[0] & BIT(3)) >> 3),
- (int)(u32tmp[0] & (BIT(2) | BIT(1) | BIT(0))));
-
- seq_printf(m, "\r\n %-35s = %d/ %d", "LTE_Busy/UART_Busy",
- (int)((u32tmp[1] & BIT(1)) >> 1),
- (int)(u32tmp[1] & BIT(0)));
- }
- seq_printf(m, "\r\n %-35s = %s (BB:%s)/ %s (BB:%s)/ %s %d",
- "GNT_WL_Ctrl/GNT_BT_Ctrl/Dbg",
- ((u32tmp[0] & BIT(12)) ? "SW" : "HW"),
- ((u32tmp[0] & BIT(8)) ? "SW" : "HW"),
- ((u32tmp[0] & BIT(14)) ? "SW" : "HW"),
- ((u32tmp[0] & BIT(10)) ? "SW" : "HW"),
- ((u8tmp[0] & BIT(3)) ? "On" : "Off"),
- coex_sta->gnt_error_cnt);
-
- seq_printf(m, "\r\n %-35s = %d/ %d", "GNT_WL/GNT_BT",
- (int)((u32tmp[1] & BIT(2)) >> 2),
- (int)((u32tmp[1] & BIT(3)) >> 3));
-
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb0);
- u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xcba);
-
- seq_printf(m, "\r\n %-35s = 0x%04x/ 0x%04x/ 0x%02x %s",
- "0xcb0/0xcb4/0xcb8[23:16]", u32tmp[0], u32tmp[1], u8tmp[0],
- ((u8tmp[0] & 0x1) == 0x1 ? "(BTG)" : "(WL_A+G)"));
-
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
- u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x4c6);
- u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "4c[24:23]/64[0]/4c6[4]/40[5]",
- (int)((u32tmp[0] & (BIT(24) | BIT(23))) >> 23),
- u8tmp[2] & 0x1, (int)((u8tmp[0] & BIT(4)) >> 4),
- (int)((u8tmp[1] & BIT(5)) >> 5));
-
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x953);
- u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0xc50);
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ %s/ 0x%x",
- "0x550/0x522/4-RxAGC/0xc50", u32tmp[0], u8tmp[0],
- (u8tmp[1] & 0x2) ? "On" : "Off", u8tmp[2]);
-
- fa_ofdm = btcoexist->btc_phydm_query_phy_counter(btcoexist,
- "PHYDM_INFO_FA_OFDM");
- fa_cck = btcoexist->btc_phydm_query_phy_counter(btcoexist,
- "PHYDM_INFO_FA_CCK");
- cca_ofdm = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CCA_OFDM");
- cca_cck = btcoexist->btc_phydm_query_phy_counter(btcoexist,
- "PHYDM_INFO_CCA_CCK");
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "CCK-CCA/CCK-FA/OFDM-CCA/OFDM-FA", cca_cck, fa_cck, cca_ofdm,
- fa_ofdm);
-
- seq_printf(m, "\r\n %-35s = %d/ %d/ %d/ %d", "CRC_OK CCK/11g/11n/11ac",
- coex_sta->crc_ok_cck, coex_sta->crc_ok_11g,
- coex_sta->crc_ok_11n, coex_sta->crc_ok_11n_vht);
-
- seq_printf(m, "\r\n %-35s = %d/ %d/ %d/ %d", "CRC_Err CCK/11g/11n/11ac",
- coex_sta->crc_err_cck, coex_sta->crc_err_11g,
- coex_sta->crc_err_11n, coex_sta->crc_err_11n_vht);
-
- seq_printf(m, "\r\n %-35s = %s/ %s/ %s/ %d",
- "WlHiPri/ Locking/ Locked/ Noisy",
- (coex_sta->wifi_is_high_pri_task ? "Yes" : "No"),
- (coex_sta->cck_lock ? "Yes" : "No"),
- (coex_sta->cck_ever_lock ? "Yes" : "No"),
- coex_sta->wl_noisy_level);
-
- seq_printf(m, "\r\n %-35s = %d/ %d", "0x770(Hi-pri rx/tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-
- seq_printf(m, "\r\n %-35s = %d/ %d %s", "0x774(Lo-pri rx/tx)",
- coex_sta->low_priority_rx, coex_sta->low_priority_tx,
- (bt_link_info->slave_role ?
- "(Slave!!)" :
- (coex_sta->is_tdma_btautoslot_hang ?
- "(auto-slot hang!!)" :
- "")));
-
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
-}
-
-void ex_btc8822b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- if (type == BTC_IPS_ENTER) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
- coex_sta->under_ips = true;
-
- /* Write WL "Active" in Score-board for LPS off */
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE, false);
-
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ONOFF, false);
-
- halbtc8822b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_1ANT_PHASE_WLAN_OFF);
-
- halbtc8822b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- } else if (type == BTC_IPS_LEAVE) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE, true);
-
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ONOFF, true);
-
- /*leave IPS : run ini hw config (exclude wifi only)*/
- halbtc8822b1ant_init_hw_config(btcoexist, false, false);
- /*sw all off*/
- halbtc8822b1ant_init_coex_dm(btcoexist);
- /*leave IPS : Query bt info*/
- halbtc8822b1ant_query_bt_info(btcoexist);
-
- coex_sta->under_ips = false;
- }
-}
-
-void ex_btc8822b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static bool pre_force_lps_on;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- if (type == BTC_LPS_ENABLE) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
- coex_sta->under_lps = true;
-
- if (coex_sta->force_lps_ctrl) { /* LPS No-32K */
- /* Write WL "Active" in Score-board for PS-TDMA */
- pre_force_lps_on = true;
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE,
- true);
- } else {
- /* LPS-32K, need check if this h2c 0x71 can work??
- * (2015/08/28)
- */
- /* Write WL "Non-Active" in Score-board for Native-PS */
- pre_force_lps_on = false;
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE,
- false);
- }
- } else if (type == BTC_LPS_DISABLE) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
- coex_sta->under_lps = false;
-
- /* Write WL "Active" in Score-board for LPS off */
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE, true);
-
- if ((!pre_force_lps_on) && (!coex_sta->force_lps_ctrl))
- halbtc8822b1ant_query_bt_info(btcoexist);
- }
-}
-
-void ex_btc8822b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_connected = false;
- bool wifi_under_5g = false;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- coex_sta->freeze_coexrun_by_btinfo = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
-
- if (wifi_connected)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** WL connected before SCAN\n");
- else
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** WL is not connected before SCAN\n");
-
- halbtc8822b1ant_query_bt_info(btcoexist);
-
- /*2.4 g 1*/
- if (type == BTC_SCAN_START) {
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G,
- &wifi_under_5g);
- /*5 g 1*/
-
- if (wifi_under_5g) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (scan_notify_5g_scan_start) **********\n");
- halbtc8822b1ant_action_wifi_under5g(btcoexist);
- return;
- }
-
- /* 2.4G.2.3*/
- coex_sta->wifi_is_high_pri_task = true;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (scan_notify_2g_scan_start) **********\n");
-
- if (!wifi_connected) { /* non-connected scan */
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** wifi is not connected scan **********\n");
- halbtc8822b1ant_action_wifi_not_connected_scan(
- btcoexist);
- } else { /* wifi is connected */
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** wifi is connected scan **********\n");
- halbtc8822b1ant_action_wifi_connected_scan(btcoexist);
- }
-
- return;
- }
-
- if (type == BTC_SCAN_START_2G) {
- coex_sta->wifi_is_high_pri_task = true;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (scan_notify_2g_sacn_start_for_switch_band_used) **********\n");
-
- if (!wifi_connected) { /* non-connected scan */
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** wifi is not connected **********\n");
-
- halbtc8822b1ant_action_wifi_not_connected_scan(
- btcoexist);
- } else { /* wifi is connected */
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** wifi is connected **********\n");
- halbtc8822b1ant_action_wifi_connected_scan(btcoexist);
- }
- } else {
- coex_sta->wifi_is_high_pri_task = false;
-
- /* 2.4G 5 WL scan finish, then get and update sacn ap numbers */
- /*5 g 4*/
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
- &coex_sta->scan_ap_num);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (scan_finish_notify) **********\n");
-
- if (!wifi_connected) { /* non-connected scan */
- halbtc8822b1ant_action_wifi_not_connected(btcoexist);
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** scan_finish_notify wifi is connected **********\n");
- halbtc8822b1ant_action_wifi_connected(btcoexist);
- }
- }
-}
-
-void ex_btc8822b1ant_scan_notify_without_bt(struct btc_coexist *btcoexist,
- u8 type)
-{
- bool wifi_under_5g = false;
-
- if (type == BTC_SCAN_START) {
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G,
- &wifi_under_5g);
-
- if (wifi_under_5g) {
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd,
- 0x3, 1);
- return;
- }
-
- /* under 2.4G */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd, 0x3, 2);
- return;
- }
- if (type == BTC_SCAN_START_2G)
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd, 0x3, 2);
-}
-
-void ex_btc8822b1ant_switchband_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (switchband_notify) **********\n");
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- coex_sta->switch_band_notify_to = type;
- /*2.4g 4.*/ /*5 g 2*/
- if (type == BTC_SWITCH_TO_5G) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (switchband_notify BTC_SWITCH_TO_5G) **********\n");
-
- halbtc8822b1ant_action_wifi_under5g(btcoexist);
- return;
- } else if (type == BTC_SWITCH_TO_24G_NOFORSCAN) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (switchband_notify BTC_SWITCH_TO_2G (no for scan)) **********\n");
-
- halbtc8822b1ant_run_coexist_mechanism(btcoexist);
- /*5 g 3*/
-
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (switchband_notify BTC_SWITCH_TO_2G) **********\n");
-
- ex_btc8822b1ant_scan_notify(btcoexist, BTC_SCAN_START_2G);
- }
- coex_sta->switch_band_notify_to = BTC_NOT_SWITCH;
-}
-
-void ex_btc8822b1ant_switchband_notify_without_bt(struct btc_coexist *btcoexist,
- u8 type)
-{
- bool wifi_under_5g = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if (type == BTC_SWITCH_TO_5G) {
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd, 0x3, 1);
- return;
- } else if (type == BTC_SWITCH_TO_24G_NOFORSCAN) {
- if (wifi_under_5g)
-
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd,
- 0x3, 1);
-
- else
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd,
- 0x3, 2);
- } else {
- ex_btc8822b1ant_scan_notify_without_bt(btcoexist,
- BTC_SCAN_START_2G);
- }
-}
-
-void ex_btc8822b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_connected = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (connect notify) **********\n");
-
- halbtc8822b1ant_post_state_to_bt(btcoexist,
- BT_8822B_1ANT_SCOREBOARD_SCAN, true);
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- if ((type == BTC_ASSOCIATE_5G_START) ||
- (type == BTC_ASSOCIATE_5G_FINISH)) {
- if (type == BTC_ASSOCIATE_5G_START) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (5G associate start notify) **********\n");
-
- halbtc8822b1ant_action_wifi_under5g(btcoexist);
-
- } else if (type == BTC_ASSOCIATE_5G_FINISH) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (5G associate finish notify) **********\n");
- }
-
- return;
- }
-
- if (type == BTC_ASSOCIATE_START) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2G CONNECT START notify\n");
-
- coex_sta->wifi_is_high_pri_task = true;
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- coex_dm->arp_cnt = 0;
-
- halbtc8822b1ant_action_wifi_not_connected_asso_auth(btcoexist);
-
- coex_sta->freeze_coexrun_by_btinfo = true;
-
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2G CONNECT Finish notify\n");
- coex_sta->wifi_is_high_pri_task = false;
- coex_sta->freeze_coexrun_by_btinfo = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
-
- if (!wifi_connected) /* non-connected scan */
- halbtc8822b1ant_action_wifi_not_connected(btcoexist);
- else
- halbtc8822b1ant_action_wifi_connected(btcoexist);
- }
-}
-
-void ex_btc8822b1ant_media_status_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_under_b_mode = false;
- bool wifi_under_5g = false;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if (type == BTC_MEDIA_CONNECT) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2g media connect notify");
-
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE, true);
-
- if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 5g media notify\n");
-
- halbtc8822b1ant_action_wifi_under5g(btcoexist);
- return;
- }
- /* Force antenna setup for no scan result issue */
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
- &wifi_under_b_mode);
-
- /* Set CCK Tx/Rx high Pri except 11b mode */
- if (wifi_under_b_mode) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (media status notity under b mode) **********\n");
- btcoexist->btc_write_1byte(btcoexist, 0x6cd,
- 0x00); /* CCK Tx */
- btcoexist->btc_write_1byte(btcoexist, 0x6cf,
- 0x00); /* CCK Rx */
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** (media status notity not under b mode) **********\n");
- btcoexist->btc_write_1byte(btcoexist, 0x6cd,
- 0x00); /* CCK Tx */
- btcoexist->btc_write_1byte(btcoexist, 0x6cf,
- 0x10); /* CCK Rx */
- }
-
- coex_dm->backup_arfr_cnt1 =
- btcoexist->btc_read_4byte(btcoexist, 0x430);
- coex_dm->backup_arfr_cnt2 =
- btcoexist->btc_read_4byte(btcoexist, 0x434);
- coex_dm->backup_retry_limit =
- btcoexist->btc_read_2byte(btcoexist, 0x42a);
- coex_dm->backup_ampdu_max_time =
- btcoexist->btc_read_1byte(btcoexist, 0x456);
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2g media disconnect notify\n");
- coex_dm->arp_cnt = 0;
-
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE, false);
-
- btcoexist->btc_write_1byte(btcoexist, 0x6cd, 0x0); /* CCK Tx */
- btcoexist->btc_write_1byte(btcoexist, 0x6cf, 0x0); /* CCK Rx */
-
- coex_sta->cck_ever_lock = false;
- }
-
- halbtc8822b1ant_update_wifi_ch_info(btcoexist, type);
-}
-
-void ex_btc8822b1ant_specific_packet_notify(struct btc_coexist *btcoexist,
- u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool under_4way = false, wifi_under_5g = false;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
- if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 5g special packet notify\n");
-
- halbtc8822b1ant_action_wifi_under5g(btcoexist);
- return;
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
- &under_4way);
-
- if (under_4way) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet ---- under_4way!!\n");
-
- coex_sta->wifi_is_high_pri_task = true;
- coex_sta->specific_pkt_period_cnt = 2;
- } else if (type == BTC_PACKET_ARP) {
- coex_dm->arp_cnt++;
-
- if (coex_sta->wifi_is_high_pri_task) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet ARP notify -cnt = %d\n",
- coex_dm->arp_cnt);
- }
-
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet DHCP or EAPOL notify [Type = %d]\n",
- type);
-
- coex_sta->wifi_is_high_pri_task = true;
- coex_sta->specific_pkt_period_cnt = 2;
- }
-
- if (coex_sta->wifi_is_high_pri_task)
- halbtc8822b1ant_action_wifi_connected_specific_packet(
- btcoexist);
-}
-
-void ex_btc8822b1ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
- u8 length)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 i, rsp_source = 0;
- bool wifi_connected = false;
- bool wifi_scan = false, wifi_link = false, wifi_roam = false,
- wifi_busy = false;
- static bool is_scoreboard_scan;
-
- if (psd_scan->is_ant_det_running) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt_info_notify return for AntDet is running\n");
- return;
- }
-
- rsp_source = tmp_buf[0] & 0xf;
- if (rsp_source >= BT_INFO_SRC_8822B_1ANT_MAX)
- rsp_source = BT_INFO_SRC_8822B_1ANT_WIFI_FW;
- coex_sta->bt_info_c2h_cnt[rsp_source]++;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt_info[%d], len=%d, data=[", rsp_source, length);
-
- for (i = 0; i < length; i++) {
- coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
-
- if (i == length - 1) {
- /* last one */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
- } else {
- /* normal */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "0x%02x, ",
- tmp_buf[i]);
- }
- }
-
- coex_sta->bt_info = coex_sta->bt_info_c2h[rsp_source][1];
- coex_sta->bt_info_ext = coex_sta->bt_info_c2h[rsp_source][4];
- coex_sta->bt_info_ext2 = coex_sta->bt_info_c2h[rsp_source][5];
-
- if (rsp_source != BT_INFO_SRC_8822B_1ANT_WIFI_FW) {
- /* if 0xff, it means BT is under WHCK test */
- coex_sta->bt_whck_test =
- ((coex_sta->bt_info == 0xff) ? true : false);
-
- coex_sta->bt_create_connection =
- ((coex_sta->bt_info_c2h[rsp_source][2] & 0x80) ? true :
- false);
-
- /* unit: %, value-100 to translate to unit: dBm */
- coex_sta->bt_rssi =
- coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
-
- coex_sta->c2h_bt_remote_name_req =
- ((coex_sta->bt_info_c2h[rsp_source][2] & 0x20) ? true :
- false);
-
- coex_sta->is_A2DP_3M =
- ((coex_sta->bt_info_c2h[rsp_source][2] & 0x10) ? true :
- false);
-
- coex_sta->acl_busy =
- ((coex_sta->bt_info_c2h[rsp_source][1] & 0x9) ? true :
- false);
-
- coex_sta->voice_over_HOGP =
- ((coex_sta->bt_info_ext & 0x10) ? true : false);
-
- coex_sta->c2h_bt_inquiry_page =
- ((coex_sta->bt_info & BT_INFO_8822B_1ANT_B_INQ_PAGE) ?
- true :
- false);
-
- coex_sta->a2dp_bit_pool =
- (((coex_sta->bt_info_c2h[rsp_source][1] & 0x49) ==
- 0x49) ?
- (coex_sta->bt_info_c2h[rsp_source][6] & 0x7f) :
- 0);
-
- coex_sta->is_bt_a2dp_sink =
- (coex_sta->bt_info_c2h[rsp_source][6] & 0x80) ? true :
- false;
-
- coex_sta->bt_retry_cnt =
- coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
-
- coex_sta->is_autoslot = coex_sta->bt_info_ext2 & 0x8;
-
- coex_sta->forbidden_slot = coex_sta->bt_info_ext2 & 0x7;
-
- coex_sta->hid_busy_num = (coex_sta->bt_info_ext2 & 0x30) >> 4;
-
- coex_sta->hid_pair_cnt = (coex_sta->bt_info_ext2 & 0xc0) >> 6;
- if (coex_sta->bt_retry_cnt >= 1)
- coex_sta->pop_event_cnt++;
-
- if (coex_sta->c2h_bt_remote_name_req)
- coex_sta->cnt_remote_name_req++;
-
- if (coex_sta->bt_info_ext & BIT(1))
- coex_sta->cnt_reinit++;
-
- if (coex_sta->bt_info_ext & BIT(2)) {
- coex_sta->cnt_setup_link++;
- coex_sta->is_setup_link = true;
- coex_sta->bt_relink_downcount = 2;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Re-Link start in BT info!!\n");
- } else {
- coex_sta->is_setup_link = false;
- coex_sta->bt_relink_downcount = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Re-Link stop in BT info!!\n");
- }
-
- if (coex_sta->bt_info_ext & BIT(3))
- coex_sta->cnt_ign_wlan_act++;
-
- if (coex_sta->bt_info_ext & BIT(6))
- coex_sta->cnt_role_switch++;
-
- if (coex_sta->bt_info_ext & BIT(7))
- coex_sta->is_bt_multi_link = true;
- else
- coex_sta->is_bt_multi_link = false;
-
- if (coex_sta->bt_create_connection) {
- coex_sta->cnt_page++;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY,
- &wifi_busy);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN,
- &wifi_scan);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK,
- &wifi_link);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM,
- &wifi_roam);
-
- if ((wifi_link) || (wifi_roam) || (wifi_scan) ||
- (coex_sta->wifi_is_high_pri_task) || (wifi_busy)) {
- is_scoreboard_scan = true;
- halbtc8822b1ant_post_state_to_bt(
- btcoexist,
- BT_8822B_1ANT_SCOREBOARD_SCAN, true);
-
- } else {
- halbtc8822b1ant_post_state_to_bt(
- btcoexist,
- BT_8822B_1ANT_SCOREBOARD_SCAN, false);
- }
- } else {
- if (is_scoreboard_scan) {
- halbtc8822b1ant_post_state_to_bt(
- btcoexist,
- BT_8822B_1ANT_SCOREBOARD_SCAN, false);
- is_scoreboard_scan = false;
- }
- }
-
- /* Here we need to resend some wifi info to BT */
- /* because bt is reset and loss of the info. */
-
- if ((!btcoexist->manual_control) &&
- (!btcoexist->stop_coex_dm)) {
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
-
- /* Re-Init */
- if ((coex_sta->bt_info_ext & BIT(1))) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
- if (wifi_connected)
- halbtc8822b1ant_update_wifi_ch_info(
- btcoexist, BTC_MEDIA_CONNECT);
- else
- halbtc8822b1ant_update_wifi_ch_info(
- btcoexist,
- BTC_MEDIA_DISCONNECT);
- }
-
- /* If Ignore_WLanAct && not SetUp_Link */
- if ((coex_sta->bt_info_ext & BIT(3)) &&
- (!(coex_sta->bt_info_ext & BIT(2)))) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
- halbtc8822b1ant_ignore_wlan_act(
- btcoexist, FORCE_EXEC, false);
- }
- }
- }
-
- if ((coex_sta->bt_info_ext & BIT(5))) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit4 check, query BLE Scan type!!\n");
- coex_sta->bt_ble_scan_type =
- btcoexist->btc_get_ble_scan_type_from_bt(btcoexist);
-
- if ((coex_sta->bt_ble_scan_type & 0x1) == 0x1)
- coex_sta->bt_ble_scan_para[0] =
- btcoexist->btc_get_ble_scan_para_from_bt(
- btcoexist, 0x1);
- if ((coex_sta->bt_ble_scan_type & 0x2) == 0x2)
- coex_sta->bt_ble_scan_para[1] =
- btcoexist->btc_get_ble_scan_para_from_bt(
- btcoexist, 0x2);
- if ((coex_sta->bt_ble_scan_type & 0x4) == 0x4)
- coex_sta->bt_ble_scan_para[2] =
- btcoexist->btc_get_ble_scan_para_from_bt(
- btcoexist, 0x4);
- }
-
- halbtc8822b1ant_update_bt_link_info(btcoexist);
-
- halbtc8822b1ant_run_coexist_mechanism(btcoexist);
-}
-
-void ex_btc8822b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF Status notify\n");
-
- if (type == BTC_RF_ON) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF is turned ON!!\n");
- btcoexist->stop_coex_dm = false;
-
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE, true);
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ONOFF, true);
-
- } else if (type == BTC_RF_OFF) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF is turned OFF!!\n");
-
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE, false);
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ONOFF, false);
- halbtc8822b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_1ANT_PHASE_WLAN_OFF);
- /* for test : s3 bt disppear , fail rate 1/600*/
-
- halbtc8822b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
-
- btcoexist->stop_coex_dm = true;
- }
-}
-
-void ex_btc8822b1ant_halt_notify(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
-
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE, false);
- halbtc8822b1ant_post_state_to_bt(btcoexist,
- BT_8822B_1ANT_SCOREBOARD_ONOFF, false);
-
- halbtc8822b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
-
- halbtc8822b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, FORCE_EXEC,
- BT_8822B_1ANT_PHASE_WLAN_OFF);
- /* for test : s3 bt disppear , fail rate 1/600*/
-
- halbtc8822b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
-
- ex_btc8822b1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
- btcoexist->stop_coex_dm = true;
-}
-
-void ex_btc8822b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_under_5g = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if ((pnp_state == BTC_WIFI_PNP_SLEEP) ||
- (pnp_state == BTC_WIFI_PNP_SLEEP_KEEP_ANT)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
-
- halbtc8822b1ant_post_state_to_bt(
- btcoexist, BT_8822B_1ANT_SCOREBOARD_ACTIVE |
- BT_8822B_1ANT_SCOREBOARD_ONOFF |
- BT_8822B_1ANT_SCOREBOARD_SCAN |
- BT_8822B_1ANT_SCOREBOARD_UNDERTEST,
- false);
-
- if (pnp_state == BTC_WIFI_PNP_SLEEP_KEEP_ANT) {
- if (wifi_under_5g)
- halbtc8822b1ant_set_ant_path(
- btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_1ANT_PHASE_5G_RUNTIME);
- else
- halbtc8822b1ant_set_ant_path(
- btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_1ANT_PHASE_2G_RUNTIME);
- } else {
- halbtc8822b1ant_set_ant_path(
- btcoexist, BTC_ANT_PATH_AUTO, FORCE_EXEC,
- BT_8822B_1ANT_PHASE_WLAN_OFF);
- }
-
- btcoexist->stop_coex_dm = true;
- } else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
- btcoexist->stop_coex_dm = false;
- }
-}
-
-void ex_btc8822b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], *****************Coex DM Reset*****************\n");
-
- halbtc8822b1ant_init_hw_config(btcoexist, false, false);
- halbtc8822b1ant_init_coex_dm(btcoexist);
-}
-
-void ex_btc8822b1ant_periodical(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool bt_relink_finish = false;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ==========================Periodical===========================\n");
-
- if (!btcoexist->auto_report_1ant)
- halbtc8822b1ant_query_bt_info(btcoexist);
-
- halbtc8822b1ant_monitor_bt_ctr(btcoexist);
- halbtc8822b1ant_monitor_wifi_ctr(btcoexist);
-
- halbtc8822b1ant_monitor_bt_enable_disable(btcoexist);
-
- if (coex_sta->bt_relink_downcount != 0) {
- coex_sta->bt_relink_downcount--;
-
- if (coex_sta->bt_relink_downcount == 0) {
- coex_sta->is_setup_link = false;
- bt_relink_finish = true;
- }
- }
-
- /* for 4-way, DHCP, EAPOL packet */
- if (coex_sta->specific_pkt_period_cnt > 0) {
- coex_sta->specific_pkt_period_cnt--;
-
- if ((coex_sta->specific_pkt_period_cnt == 0) &&
- (coex_sta->wifi_is_high_pri_task))
- coex_sta->wifi_is_high_pri_task = false;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ***************** Hi-Pri Task = %s*****************\n",
- (coex_sta->wifi_is_high_pri_task ? "Yes" : "No"));
- }
-
- if (halbtc8822b1ant_is_wifi_status_changed(btcoexist) ||
- (bt_relink_finish) || (coex_sta->is_set_ps_state_fail))
- halbtc8822b1ant_run_coexist_mechanism(btcoexist);
-}
-
-void ex_btc8822b1ant_antenna_detection(struct btc_coexist *btcoexist,
- u32 cent_freq, u32 offset, u32 span,
- u32 seconds)
-{
-}
-
-void ex_btc8822b1ant_antenna_isolation(struct btc_coexist *btcoexist,
- u32 cent_freq, u32 offset, u32 span,
- u32 seconds)
-{
-}
-
-void ex_btc8822b1ant_psd_scan(struct btc_coexist *btcoexist, u32 cent_freq,
- u32 offset, u32 span, u32 seconds)
-{
-}
-
-void ex_btc8822b1ant_display_ant_detection(struct btc_coexist *btcoexist) {}
-
-void ex_btc8822b1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code,
- u8 op_len, u8 *pdata)
-{
-}
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h b/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h
deleted file mode 100644
index f1bf83001164..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h
+++ /dev/null
@@ -1,433 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* *******************************************
- * The following is for 8822B 1ANT BT Co-exist definition
- * ********************************************/
-#define BT_INFO_8822B_1ANT_B_FTP BIT(7)
-#define BT_INFO_8822B_1ANT_B_A2DP BIT(6)
-#define BT_INFO_8822B_1ANT_B_HID BIT(5)
-#define BT_INFO_8822B_1ANT_B_SCO_BUSY BIT(4)
-#define BT_INFO_8822B_1ANT_B_ACL_BUSY BIT(3)
-#define BT_INFO_8822B_1ANT_B_INQ_PAGE BIT(2)
-#define BT_INFO_8822B_1ANT_B_SCO_ESCO BIT(1)
-#define BT_INFO_8822B_1ANT_B_CONNECTION BIT(0)
-
-#define BT_INFO_8822B_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \
- (((_BT_INFO_EXT_ & BIT(0))) ? true : false)
-
-#define BTC_RSSI_COEX_THRESH_TOL_8822B_1ANT 2
-
-#define BT_8822B_1ANT_WIFI_NOISY_THRESH 150 /* max: 255 */
-#define BT_8822B_1ANT_DEFAULT_ISOLATION 15 /* unit: dB */
-
-/* for Antenna detection */
-#define BT_8822B_1ANT_ANTDET_PSDTHRES_BACKGROUND 50
-#define BT_8822B_1ANT_ANTDET_PSDTHRES_2ANT_BADISOLATION 70
-#define BT_8822B_1ANT_ANTDET_PSDTHRES_2ANT_GOODISOLATION 55
-#define BT_8822B_1ANT_ANTDET_PSDTHRES_1ANT 35
-#define BT_8822B_1ANT_ANTDET_RETRY_INTERVAL \
- 10 /* retry timer if ant det is fail, unit: second */
-#define BT_8822B_1ANT_ANTDET_ENABLE 0
-#define BT_8822B_1ANT_ANTDET_COEXMECHANISMSWITCH_ENABLE 0
-
-#define BT_8822B_1ANT_LTECOEX_INDIRECTREG_ACCESS_TIMEOUT 30000
-
-enum bt_8822b_1ant_signal_state {
- BT_8822B_1ANT_SIG_STA_SET_TO_LOW = 0x0,
- BT_8822B_1ANT_SIG_STA_SET_BY_HW = 0x0,
- BT_8822B_1ANT_SIG_STA_SET_TO_HIGH = 0x1,
- BT_8822B_1ANT_SIG_STA_MAX
-};
-
-enum bt_8822b_1ant_path_ctrl_owner {
- BT_8822B_1ANT_PCO_BTSIDE = 0x0,
- BT_8822B_1ANT_PCO_WLSIDE = 0x1,
- BT_8822B_1ANT_PCO_MAX
-};
-
-enum bt_8822b_1ant_gnt_ctrl_type {
- BT_8822B_1ANT_GNT_CTRL_BY_PTA = 0x0,
- BT_8822B_1ANT_GNT_CTRL_BY_SW = 0x1,
- BT_8822B_1ANT_GNT_CTRL_MAX
-};
-
-enum bt_8822b_1ant_gnt_ctrl_block {
- BT_8822B_1ANT_GNT_BLOCK_RFC_BB = 0x0,
- BT_8822B_1ANT_GNT_BLOCK_RFC = 0x1,
- BT_8822B_1ANT_GNT_BLOCK_BB = 0x2,
- BT_8822B_1ANT_GNT_BLOCK_MAX
-};
-
-enum bt_8822b_1ant_lte_coex_table_type {
- BT_8822B_1ANT_CTT_WL_VS_LTE = 0x0,
- BT_8822B_1ANT_CTT_BT_VS_LTE = 0x1,
- BT_8822B_1ANT_CTT_MAX
-};
-
-enum bt_8822b_1ant_lte_break_table_type {
- BT_8822B_1ANT_LBTT_WL_BREAK_LTE = 0x0,
- BT_8822B_1ANT_LBTT_BT_BREAK_LTE = 0x1,
- BT_8822B_1ANT_LBTT_LTE_BREAK_WL = 0x2,
- BT_8822B_1ANT_LBTT_LTE_BREAK_BT = 0x3,
- BT_8822B_1ANT_LBTT_MAX
-};
-
-enum bt_info_src_8822b_1ant {
- BT_INFO_SRC_8822B_1ANT_WIFI_FW = 0x0,
- BT_INFO_SRC_8822B_1ANT_BT_RSP = 0x1,
- BT_INFO_SRC_8822B_1ANT_BT_ACTIVE_SEND = 0x2,
- BT_INFO_SRC_8822B_1ANT_MAX
-};
-
-enum bt_8822b_1ant_bt_status {
- BT_8822B_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
- BT_8822B_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
- BT_8822B_1ANT_BT_STATUS_INQ_PAGE = 0x2,
- BT_8822B_1ANT_BT_STATUS_ACL_BUSY = 0x3,
- BT_8822B_1ANT_BT_STATUS_SCO_BUSY = 0x4,
- BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
- BT_8822B_1ANT_BT_STATUS_MAX
-};
-
-enum bt_8822b_1ant_wifi_status {
- BT_8822B_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0,
- BT_8822B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1,
- BT_8822B_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2,
- BT_8822B_1ANT_WIFI_STATUS_CONNECTED_SPECIFIC_PKT = 0x3,
- BT_8822B_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4,
- BT_8822B_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5,
- BT_8822B_1ANT_WIFI_STATUS_MAX
-};
-
-enum bt_8822b_1ant_coex_algo {
- BT_8822B_1ANT_COEX_ALGO_UNDEFINED = 0x0,
- BT_8822B_1ANT_COEX_ALGO_SCO = 0x1,
- BT_8822B_1ANT_COEX_ALGO_HID = 0x2,
- BT_8822B_1ANT_COEX_ALGO_A2DP = 0x3,
- BT_8822B_1ANT_COEX_ALGO_A2DP_PANHS = 0x4,
- BT_8822B_1ANT_COEX_ALGO_PANEDR = 0x5,
- BT_8822B_1ANT_COEX_ALGO_PANHS = 0x6,
- BT_8822B_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
- BT_8822B_1ANT_COEX_ALGO_PANEDR_HID = 0x8,
- BT_8822B_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
- BT_8822B_1ANT_COEX_ALGO_HID_A2DP = 0xa,
- BT_8822B_1ANT_COEX_ALGO_MAX = 0xb,
-};
-
-enum bt_8822b_1ant_ext_ant_switch_type {
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SPDT = 0x0,
- BT_8822B_1ANT_EXT_ANT_SWITCH_USE_SP3T = 0x1,
- BT_8822B_1ANT_EXT_ANT_SWITCH_MAX
-};
-
-enum bt_8822b_1ant_ext_ant_switch_ctrl_type {
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_BBSW = 0x0,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_PTA = 0x1,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_ANTDIV = 0x2,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_MAC = 0x3,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_BY_BT = 0x4,
- BT_8822B_1ANT_EXT_ANT_SWITCH_CTRL_MAX
-};
-
-enum bt_8822b_1ant_ext_ant_switch_pos_type {
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_BT = 0x0,
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_WLG = 0x1,
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_WLA = 0x2,
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_NOCARE = 0x3,
- BT_8822B_1ANT_EXT_ANT_SWITCH_TO_MAX
-};
-
-enum bt_8822b_1ant_phase {
- BT_8822B_1ANT_PHASE_COEX_INIT = 0x0,
- BT_8822B_1ANT_PHASE_WLANONLY_INIT = 0x1,
- BT_8822B_1ANT_PHASE_WLAN_OFF = 0x2,
- BT_8822B_1ANT_PHASE_2G_RUNTIME = 0x3,
- BT_8822B_1ANT_PHASE_5G_RUNTIME = 0x4,
- BT_8822B_1ANT_PHASE_BTMPMODE = 0x5,
- BT_8822B_1ANT_PHASE_MAX
-};
-
-/*ADD SCOREBOARD TO FIX BT LPS 32K ISSUE WHILE WL BUSY*/
-enum bt_8822b_1ant_scoreboard {
- BT_8822B_1ANT_SCOREBOARD_ACTIVE = BIT(0),
- BT_8822B_1ANT_SCOREBOARD_ONOFF = BIT(1),
- BT_8822B_1ANT_SCOREBOARD_SCAN = BIT(2),
- BT_8822B_1ANT_SCOREBOARD_UNDERTEST = BIT(3),
- BT_8822B_1ANT_SCOREBOARD_WLBUSY = BIT(6)
-};
-
-struct coex_dm_8822b_1ant {
- /* hw setting */
- u32 pre_ant_pos_type;
- u32 cur_ant_pos_type;
- /* fw mechanism */
- bool cur_ignore_wlan_act;
- bool pre_ignore_wlan_act;
- u8 pre_ps_tdma;
- u8 cur_ps_tdma;
- u8 ps_tdma_para[5];
- u8 ps_tdma_du_adj_type;
- bool auto_tdma_adjust;
- bool pre_ps_tdma_on;
- bool cur_ps_tdma_on;
- bool pre_bt_auto_report;
- bool cur_bt_auto_report;
- u8 pre_lps;
- u8 cur_lps;
- u8 pre_rpwm;
- u8 cur_rpwm;
-
- /* sw mechanism */
- bool pre_low_penalty_ra;
- bool cur_low_penalty_ra;
- u32 pre_val0x6c0;
- u32 cur_val0x6c0;
- u32 pre_val0x6c4;
- u32 cur_val0x6c4;
- u32 pre_val0x6c8;
- u32 cur_val0x6c8;
- u8 pre_val0x6cc;
- u8 cur_val0x6cc;
- bool limited_dig;
-
- u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */
- u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */
- u16 backup_retry_limit;
- u8 backup_ampdu_max_time;
-
- /* algorithm related */
- u8 pre_algorithm;
- u8 cur_algorithm;
- u8 bt_status;
- u8 wifi_chnl_info[3];
-
- u32 pre_ra_mask;
- u32 cur_ra_mask;
- u8 pre_arfr_type;
- u8 cur_arfr_type;
- u8 pre_retry_limit_type;
- u8 cur_retry_limit_type;
- u8 pre_ampdu_time_type;
- u8 cur_ampdu_time_type;
- u32 arp_cnt;
-
- u32 pre_ext_ant_switch_status;
- u32 cur_ext_ant_switch_status;
-
- u8 error_condition;
-};
-
-struct coex_sta_8822b_1ant {
- bool bt_disabled;
- bool bt_link_exist;
- bool sco_exist;
- bool a2dp_exist;
- bool hid_exist;
- bool pan_exist;
- u8 num_of_profile;
-
- bool under_lps;
- bool under_ips;
- u32 specific_pkt_period_cnt;
- u32 high_priority_tx;
- u32 high_priority_rx;
- u32 low_priority_tx;
- u32 low_priority_rx;
- bool is_hi_pri_rx_overhead;
- s8 bt_rssi;
- u8 pre_bt_rssi_state;
- u8 pre_wifi_rssi_state[4];
- u8 bt_info_c2h[BT_INFO_SRC_8822B_1ANT_MAX][10];
- u32 bt_info_c2h_cnt[BT_INFO_SRC_8822B_1ANT_MAX];
- bool bt_whck_test;
- bool c2h_bt_inquiry_page;
- bool c2h_bt_remote_name_req;
- bool c2h_bt_page; /* Add for win8.1 page out issue */
- bool wifi_is_high_pri_task; /* Add for win8.1 page out issue */
-
- u8 bt_info_ext;
- u8 bt_info_ext2;
- u32 pop_event_cnt;
- u8 scan_ap_num;
- u8 bt_retry_cnt;
-
- u32 crc_ok_cck;
- u32 crc_ok_11g;
- u32 crc_ok_11n;
- u32 crc_ok_11n_vht;
-
- u32 crc_err_cck;
- u32 crc_err_11g;
- u32 crc_err_11n;
- u32 crc_err_11n_vht;
-
- bool cck_lock;
- bool pre_ccklock;
- bool cck_ever_lock;
- u8 coex_table_type;
-
- bool force_lps_ctrl;
-
- bool concurrent_rx_mode_on;
-
- u16 score_board;
- u8 isolation_btween_wb; /* 0~ 50 */
-
- u8 a2dp_bit_pool;
- u8 cut_version;
- bool acl_busy;
- bool bt_create_connection;
-
- u32 bt_coex_supported_feature;
- u32 bt_coex_supported_version;
-
- u8 bt_ble_scan_type;
- u32 bt_ble_scan_para[3];
-
- bool run_time_state;
- bool freeze_coexrun_by_btinfo;
-
- bool is_A2DP_3M;
- bool voice_over_HOGP;
- u8 bt_info;
- bool is_autoslot;
- u8 forbidden_slot;
- u8 hid_busy_num;
- u8 hid_pair_cnt;
-
- u32 cnt_remote_name_req;
- u32 cnt_setup_link;
- u32 cnt_reinit;
- u32 cnt_ign_wlan_act;
- u32 cnt_page;
- u32 cnt_role_switch;
-
- u16 bt_reg_vendor_ac;
- u16 bt_reg_vendor_ae;
-
- bool is_setup_link;
- u8 wl_noisy_level;
- u32 gnt_error_cnt;
- u8 bt_afh_map[10];
- u8 bt_relink_downcount;
- bool is_tdma_btautoslot;
- bool is_tdma_btautoslot_hang;
-
- u8 switch_band_notify_to;
- bool is_rf_state_off;
-
- bool is_hid_low_pri_tx_overhead;
- bool is_bt_multi_link;
- bool is_bt_a2dp_sink;
- bool rf4ce_enabled;
-
- bool is_set_ps_state_fail;
- u8 cnt_set_ps_state_fail;
-};
-
-struct rfe_type_8822b_1ant {
- u8 rfe_module_type;
- bool ext_ant_switch_exist;
- u8 ext_ant_switch_type;
- /* iF 0: ANTSW(rfe_sel9)=0, ANTSWB(rfe_sel8)=1 => Ant to BT/5G */
- u8 ext_ant_switch_ctrl_polarity;
-};
-
-#define BT_8822B_1ANT_ANTDET_PSD_POINTS 256 /* MAX:1024 */
-#define BT_8822B_1ANT_ANTDET_PSD_AVGNUM 1 /* MAX:3 */
-#define BT_8822B_1ANT_ANTDET_BUF_LEN 16
-
-struct psdscan_sta_8822b_1ant {
- u32 ant_det_bt_le_channel; /* BT LE Channel ex:2412 */
- u32 ant_det_bt_tx_time;
- u32 ant_det_pre_psdscan_peak_val;
- bool ant_det_is_ant_det_available;
- u32 ant_det_psd_scan_peak_val;
- bool ant_det_is_btreply_available;
- u32 ant_det_psd_scan_peak_freq;
-
- u8 ant_det_result;
- u8 ant_det_peak_val[BT_8822B_1ANT_ANTDET_BUF_LEN];
- u8 ant_det_peak_freq[BT_8822B_1ANT_ANTDET_BUF_LEN];
- u32 ant_det_try_count;
- u32 ant_det_fail_count;
- u32 ant_det_inteval_count;
- u32 ant_det_thres_offset;
-
- u32 real_cent_freq;
- s32 real_offset;
- u32 real_span;
-
- u32 psd_band_width; /* unit: Hz */
- u32 psd_point; /* 128/256/512/1024 */
- u32 psd_report[1024]; /* unit:dB (20logx), 0~255 */
- u32 psd_report_max_hold[1024]; /* unit:dB (20logx), 0~255 */
- u32 psd_start_point;
- u32 psd_stop_point;
- u32 psd_max_value_point;
- u32 psd_max_value;
- u32 psd_start_base;
- u32 psd_avg_num; /* 1/8/16/32 */
- u32 psd_gen_count;
- bool is_psd_running;
- bool is_psd_show_max_only;
- bool is_ant_det_running;
-};
-
-/* *******************************************
- * The following is interface which will notify coex module.
- * ********************************************/
-void ex_btc8822b1ant_power_on_setting(struct btc_coexist *btcoexist);
-void ex_btc8822b1ant_pre_load_firmware(struct btc_coexist *btcoexist);
-void ex_btc8822b1ant_init_hw_config(struct btc_coexist *btcoexist,
- bool wifi_only);
-void ex_btc8822b1ant_init_coex_dm(struct btc_coexist *btcoexist);
-void ex_btc8822b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b1ant_scan_notify_without_bt(struct btc_coexist *btcoexist,
- u8 type);
-void ex_btc8822b1ant_switchband_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b1ant_switchband_notify_without_bt(struct btc_coexist *btcoexist,
- u8 type);
-void ex_btc8822b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b1ant_media_status_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_btc8822b1ant_specific_packet_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_btc8822b1ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
- u8 length);
-void ex_btc8822b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b1ant_halt_notify(struct btc_coexist *btcoexist);
-void ex_btc8822b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
-void ex_halbtc8822b1ant_score_board_status_notify(struct btc_coexist *btcoexist,
- u8 *tmp_buf, u8 length);
-void ex_btc8822b1ant_coex_dm_reset(struct btc_coexist *btcoexist);
-void ex_btc8822b1ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8822b1ant_display_coex_info(struct btc_coexist *btcoexist,
- struct seq_file *m);
-void ex_btc8822b1ant_antenna_detection(struct btc_coexist *btcoexist,
- u32 cent_freq, u32 offset, u32 span,
- u32 seconds);
-void ex_btc8822b1ant_antenna_isolation(struct btc_coexist *btcoexist,
- u32 cent_freq, u32 offset, u32 span,
- u32 seconds);
-
-void ex_btc8822b1ant_psd_scan(struct btc_coexist *btcoexist, u32 cent_freq,
- u32 offset, u32 span, u32 seconds);
-void ex_btc8822b1ant_display_ant_detection(struct btc_coexist *btcoexist);
-
-void ex_btc8822b1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code,
- u8 op_len, u8 *pdata);
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c b/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c
deleted file mode 100644
index 7e6071059a95..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c
+++ /dev/null
@@ -1,5210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-/* ************************************************************
- * Description:
- *
- * This file is for RTL8822B Co-exist mechanism
- *
- * History
- * 2012/11/15 Cosa first check in.
- *
- * *************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-#include "halbt_precomp.h"
-
-/* ************************************************************
- * Global variables, these are static variables
- * *************************************************************/
-static struct coex_dm_8822b_2ant glcoex_dm_8822b_2ant;
-static struct coex_dm_8822b_2ant *coex_dm = &glcoex_dm_8822b_2ant;
-static struct coex_sta_8822b_2ant glcoex_sta_8822b_2ant;
-static struct coex_sta_8822b_2ant *coex_sta = &glcoex_sta_8822b_2ant;
-static struct psdscan_sta_8822b_2ant gl_psd_scan_8822b_2ant;
-static struct psdscan_sta_8822b_2ant *psd_scan = &gl_psd_scan_8822b_2ant;
-static struct rfe_type_8822b_2ant gl_rfe_type_8822b_2ant;
-static struct rfe_type_8822b_2ant *rfe_type = &gl_rfe_type_8822b_2ant;
-
-static const char *const glbt_info_src_8822b_2ant[] = {
- "BT Info[wifi fw]", "BT Info[bt rsp]", "BT Info[bt auto report]",
-};
-
-static u32 glcoex_ver_date_8822b_2ant = 20170327;
-static u32 glcoex_ver_8822b_2ant = 0x44;
-static u32 glcoex_ver_btdesired_8822b_2ant = 0x42;
-
-/* ************************************************************
- * local function proto type if needed
- * ************************************************************
- * ************************************************************
- * local function start with halbtc8822b2ant_
- * *************************************************************/
-static u8 halbtc8822b2ant_bt_rssi_state(struct btc_coexist *btcoexist,
- u8 *ppre_bt_rssi_state, u8 level_num,
- u8 rssi_thresh, u8 rssi_thresh1)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- s32 bt_rssi = 0;
- u8 bt_rssi_state = *ppre_bt_rssi_state;
-
- bt_rssi = coex_sta->bt_rssi;
-
- if (level_num == 2) {
- if ((*ppre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
- (*ppre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- if (bt_rssi >=
- (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8822B_2ANT))
- bt_rssi_state = BTC_RSSI_STATE_HIGH;
- else
- bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- } else {
- if (bt_rssi < rssi_thresh)
- bt_rssi_state = BTC_RSSI_STATE_LOW;
- else
- bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- }
- } else if (level_num == 3) {
- if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
- return *ppre_bt_rssi_state;
- }
-
- if ((*ppre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
- (*ppre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- if (bt_rssi >=
- (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8822B_2ANT))
- bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- else
- bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- } else if ((*ppre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (*ppre_bt_rssi_state ==
- BTC_RSSI_STATE_STAY_MEDIUM)) {
- if (bt_rssi >= (rssi_thresh1 +
- BTC_RSSI_COEX_THRESH_TOL_8822B_2ANT))
- bt_rssi_state = BTC_RSSI_STATE_HIGH;
- else if (bt_rssi < rssi_thresh)
- bt_rssi_state = BTC_RSSI_STATE_LOW;
- else
- bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- } else {
- if (bt_rssi < rssi_thresh1)
- bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- else
- bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- }
- }
-
- *ppre_bt_rssi_state = bt_rssi_state;
-
- return bt_rssi_state;
-}
-
-static u8 halbtc8822b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
- u8 *pprewifi_rssi_state, u8 level_num,
- u8 rssi_thresh, u8 rssi_thresh1)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- s32 wifi_rssi = 0;
- u8 wifi_rssi_state = *pprewifi_rssi_state;
-
- btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
-
- if (level_num == 2) {
- if ((*pprewifi_rssi_state == BTC_RSSI_STATE_LOW) ||
- (*pprewifi_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- if (wifi_rssi >=
- (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8822B_2ANT))
- wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- } else {
- if (wifi_rssi < rssi_thresh)
- wifi_rssi_state = BTC_RSSI_STATE_LOW;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- }
- } else if (level_num == 3) {
- if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
- return *pprewifi_rssi_state;
- }
-
- if ((*pprewifi_rssi_state == BTC_RSSI_STATE_LOW) ||
- (*pprewifi_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- if (wifi_rssi >=
- (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8822B_2ANT))
- wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- } else if ((*pprewifi_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (*pprewifi_rssi_state ==
- BTC_RSSI_STATE_STAY_MEDIUM)) {
- if (wifi_rssi >= (rssi_thresh1 +
- BTC_RSSI_COEX_THRESH_TOL_8822B_2ANT))
- wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- else if (wifi_rssi < rssi_thresh)
- wifi_rssi_state = BTC_RSSI_STATE_LOW;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- } else {
- if (wifi_rssi < rssi_thresh1)
- wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- else
- wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- }
- }
-
- *pprewifi_rssi_state = wifi_rssi_state;
-
- return wifi_rssi_state;
-}
-
-static void halbtc8822b2ant_coex_switch_threshold(struct btc_coexist *btcoexist,
- u8 isolation_measuared)
-{
- s8 interference_wl_tx = 0, interference_bt_tx = 0;
-
- interference_wl_tx =
- BT_8822B_2ANT_WIFI_MAX_TX_POWER - isolation_measuared;
- interference_bt_tx =
- BT_8822B_2ANT_BT_MAX_TX_POWER - isolation_measuared;
-
- coex_sta->wifi_coex_thres = BT_8822B_2ANT_WIFI_RSSI_COEXSWITCH_THRES1;
- coex_sta->wifi_coex_thres2 = BT_8822B_2ANT_WIFI_RSSI_COEXSWITCH_THRES2;
-
- coex_sta->bt_coex_thres = BT_8822B_2ANT_BT_RSSI_COEXSWITCH_THRES1;
- coex_sta->bt_coex_thres2 = BT_8822B_2ANT_BT_RSSI_COEXSWITCH_THRES2;
-}
-
-static void halbtc8822b2ant_query_bt_info(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[1] = {0};
-
- if (coex_sta->bt_disabled) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No query BT info because BT is disabled!\n");
- return;
- }
-
- h2c_parameter[0] |= BIT(0); /* trigger */
-
- btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
-}
-
-static void halbtc8822b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
-{
- u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
- u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
- static u8 num_of_bt_counter_chk, cnt_slave, cnt_autoslot_hang;
-
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- reg_hp_txrx = 0x770;
- reg_lp_txrx = 0x774;
-
- u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
- reg_hp_tx = u32tmp & MASKLWORD;
- reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
-
- u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
- reg_lp_tx = u32tmp & MASKLWORD;
- reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
-
- coex_sta->high_priority_tx = reg_hp_tx;
- coex_sta->high_priority_rx = reg_hp_rx;
- coex_sta->low_priority_tx = reg_lp_tx;
- coex_sta->low_priority_rx = reg_lp_rx;
-
- /* reset counter */
- btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
-
- if ((coex_sta->low_priority_tx > 1050) &&
- (!coex_sta->c2h_bt_inquiry_page))
- coex_sta->pop_event_cnt++;
-
- if ((coex_sta->low_priority_rx >= 950) &&
- (coex_sta->low_priority_rx >= coex_sta->low_priority_tx) &&
- (!coex_sta->under_ips) && (!coex_sta->c2h_bt_inquiry_page) &&
- (coex_sta->bt_link_exist)) {
- if (cnt_slave >= 2) {
- bt_link_info->slave_role = true;
- cnt_slave = 2;
- } else {
- cnt_slave++;
- }
- } else {
- if (cnt_slave == 0) {
- bt_link_info->slave_role = false;
- cnt_slave = 0;
- } else {
- cnt_slave--;
- }
- }
-
- if (coex_sta->is_tdma_btautoslot) {
- if ((coex_sta->low_priority_tx >= 1300) &&
- (coex_sta->low_priority_rx <= 150)) {
- if (cnt_autoslot_hang >= 2) {
- coex_sta->is_tdma_btautoslot_hang = true;
- cnt_autoslot_hang = 2;
- } else {
- cnt_autoslot_hang++;
- }
- } else {
- if (cnt_autoslot_hang == 0) {
- coex_sta->is_tdma_btautoslot_hang = false;
- cnt_autoslot_hang = 0;
- } else {
- cnt_autoslot_hang--;
- }
- }
- }
-
- if (coex_sta->sco_exist) {
- if ((coex_sta->high_priority_tx >= 400) &&
- (coex_sta->high_priority_rx >= 400))
- coex_sta->is_esco_mode = false;
- else
- coex_sta->is_esco_mode = true;
- }
-
- if (bt_link_info->hid_only) {
- if (coex_sta->low_priority_rx > 50)
- coex_sta->is_hid_low_pri_tx_overhead = true;
- else
- coex_sta->is_hid_low_pri_tx_overhead = false;
- }
-
- if ((coex_sta->high_priority_tx == 0) &&
- (coex_sta->high_priority_rx == 0) &&
- (coex_sta->low_priority_tx == 0) &&
- (coex_sta->low_priority_rx == 0)) {
- num_of_bt_counter_chk++;
- if (num_of_bt_counter_chk >= 3) {
- halbtc8822b2ant_query_bt_info(btcoexist);
- num_of_bt_counter_chk = 0;
- }
- }
-}
-
-static void halbtc8822b2ant_monitor_wifi_ctr(struct btc_coexist *btcoexist)
-{
- s32 wifi_rssi = 0;
- bool wifi_busy = false, wifi_under_b_mode = false, wifi_scan = false;
- bool bt_idle = false;
- static u8 cck_lock_counter, wl_noisy_count0, wl_noisy_count1 = 3,
- wl_noisy_count2;
- u32 total_cnt, cck_cnt;
- u32 cnt_crcok = 0, cnt_crcerr = 0;
- static u8 cnt;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
- &wifi_under_b_mode);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &wifi_scan);
-
- coex_sta->crc_ok_cck = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_OK_CCK");
- coex_sta->crc_ok_11g = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_OK_LEGACY");
- coex_sta->crc_ok_11n = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_OK_HT");
- coex_sta->crc_ok_11n_vht = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_OK_VHT");
-
- coex_sta->crc_err_cck = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_ERROR_CCK");
- coex_sta->crc_err_11g = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_ERROR_LEGACY");
- coex_sta->crc_err_11n = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_ERROR_HT");
- coex_sta->crc_err_11n_vht = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CRC32_ERROR_VHT");
-
- cnt_crcok = coex_sta->crc_ok_cck + coex_sta->crc_ok_11g +
- coex_sta->crc_ok_11n + coex_sta->crc_ok_11n_vht;
-
- cnt_crcerr = coex_sta->crc_err_cck + coex_sta->crc_err_11g +
- coex_sta->crc_err_11n + coex_sta->crc_err_11n_vht;
-
- if ((wifi_busy) && (cnt_crcerr != 0)) {
- coex_sta->now_crc_ratio = cnt_crcok / cnt_crcerr;
-
- if (cnt == 0)
- coex_sta->acc_crc_ratio = coex_sta->now_crc_ratio;
- else
- coex_sta->acc_crc_ratio =
- (coex_sta->acc_crc_ratio * 7 +
- coex_sta->now_crc_ratio * 3) /
- 10;
-
- if (cnt >= 10)
- cnt = 0;
- else
- cnt++;
- }
-
- cck_cnt = coex_sta->crc_ok_cck + coex_sta->crc_err_cck;
-
- if ((coex_dm->bt_status ==
- BT_8822B_2ANT_BT_STATUS_NON_CONNECTED_IDLE) ||
- (coex_dm->bt_status == BT_8822B_2ANT_BT_STATUS_CONNECTED_IDLE) ||
- (coex_sta->bt_disabled))
- bt_idle = true;
-
- if (cck_cnt > 250) {
- if (wl_noisy_count2 < 3)
- wl_noisy_count2++;
-
- if (wl_noisy_count2 == 3) {
- wl_noisy_count0 = 0;
- wl_noisy_count1 = 0;
- }
-
- } else if (cck_cnt < 50) {
- if (wl_noisy_count0 < 3)
- wl_noisy_count0++;
-
- if (wl_noisy_count0 == 3) {
- wl_noisy_count1 = 0;
- wl_noisy_count2 = 0;
- }
-
- } else {
- if (wl_noisy_count1 < 3)
- wl_noisy_count1++;
-
- if (wl_noisy_count1 == 3) {
- wl_noisy_count0 = 0;
- wl_noisy_count2 = 0;
- }
- }
-
- if (wl_noisy_count2 == 3)
- coex_sta->wl_noisy_level = 2;
- else if (wl_noisy_count1 == 3)
- coex_sta->wl_noisy_level = 1;
- else
- coex_sta->wl_noisy_level = 0;
-
- if ((wifi_busy) && (wifi_rssi >= 30) && (!wifi_under_b_mode)) {
- total_cnt = cnt_crcok;
-
- if ((coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_ACL_BUSY) ||
- (coex_dm->bt_status ==
- BT_8822B_1ANT_BT_STATUS_ACL_SCO_BUSY) ||
- (coex_dm->bt_status == BT_8822B_1ANT_BT_STATUS_SCO_BUSY)) {
- if (coex_sta->crc_ok_cck >
- (total_cnt - coex_sta->crc_ok_cck)) {
- if (cck_lock_counter < 3)
- cck_lock_counter++;
- } else {
- if (cck_lock_counter > 0)
- cck_lock_counter--;
- }
-
- } else {
- if (cck_lock_counter > 0)
- cck_lock_counter--;
- }
- } else {
- if (cck_lock_counter > 0)
- cck_lock_counter--;
- }
-
- if (!coex_sta->pre_ccklock) {
- if (cck_lock_counter >= 3)
- coex_sta->cck_lock = true;
- else
- coex_sta->cck_lock = false;
- } else {
- if (cck_lock_counter == 0)
- coex_sta->cck_lock = false;
- else
- coex_sta->cck_lock = true;
- }
-
- if (coex_sta->cck_lock)
- coex_sta->cck_ever_lock = true;
-
- coex_sta->pre_ccklock = coex_sta->cck_lock;
-}
-
-static bool
-halbtc8822b2ant_is_wifibt_status_changed(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static bool pre_wifi_busy, pre_under_4way, pre_bt_hs_on, pre_bt_off,
- pre_bt_slave, pre_hid_low_pri_tx_overhead, pre_wifi_under_lps,
- pre_bt_setup_link;
- static u8 pre_hid_busy_num, pre_wl_noisy_level;
- bool wifi_busy = false, under_4way = false, bt_hs_on = false;
- bool wifi_connected = false;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
- &under_4way);
-
- if (coex_sta->bt_disabled != pre_bt_off) {
- pre_bt_off = coex_sta->bt_disabled;
-
- if (coex_sta->bt_disabled)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is disabled !!\n");
- else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is enabled !!\n");
-
- coex_sta->bt_coex_supported_feature = 0;
- coex_sta->bt_coex_supported_version = 0;
- coex_sta->bt_ble_scan_type = 0;
- coex_sta->bt_ble_scan_para[0] = 0;
- coex_sta->bt_ble_scan_para[1] = 0;
- coex_sta->bt_ble_scan_para[2] = 0;
- coex_sta->bt_reg_vendor_ac = 0xffff;
- coex_sta->bt_reg_vendor_ae = 0xffff;
- return true;
- }
-
- if (wifi_connected) {
- if (wifi_busy != pre_wifi_busy) {
- pre_wifi_busy = wifi_busy;
- return true;
- }
- if (under_4way != pre_under_4way) {
- pre_under_4way = under_4way;
- return true;
- }
- if (bt_hs_on != pre_bt_hs_on) {
- pre_bt_hs_on = bt_hs_on;
- return true;
- }
- if (coex_sta->wl_noisy_level != pre_wl_noisy_level) {
- pre_wl_noisy_level = coex_sta->wl_noisy_level;
- return true;
- }
- if (coex_sta->under_lps != pre_wifi_under_lps) {
- pre_wifi_under_lps = coex_sta->under_lps;
- if (coex_sta->under_lps)
- return true;
- }
- }
-
- if (!coex_sta->bt_disabled) {
- if (coex_sta->hid_busy_num != pre_hid_busy_num) {
- pre_hid_busy_num = coex_sta->hid_busy_num;
- return true;
- }
-
- if (bt_link_info->slave_role != pre_bt_slave) {
- pre_bt_slave = bt_link_info->slave_role;
- return true;
- }
-
- if (pre_hid_low_pri_tx_overhead !=
- coex_sta->is_hid_low_pri_tx_overhead) {
- pre_hid_low_pri_tx_overhead =
- coex_sta->is_hid_low_pri_tx_overhead;
- return true;
- }
-
- if (pre_bt_setup_link != coex_sta->is_setup_link) {
- pre_bt_setup_link = coex_sta->is_setup_link;
- return true;
- }
- }
-
- return false;
-}
-
-static void halbtc8822b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- bool bt_busy = false;
-
- coex_sta->num_of_profile = 0;
-
- /* set link exist status */
- if (!(coex_sta->bt_info & BT_INFO_8822B_1ANT_B_CONNECTION)) {
- coex_sta->bt_link_exist = false;
- coex_sta->pan_exist = false;
- coex_sta->a2dp_exist = false;
- coex_sta->hid_exist = false;
- coex_sta->sco_exist = false;
- } else { /* connection exists */
- coex_sta->bt_link_exist = true;
- if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_FTP) {
- coex_sta->pan_exist = true;
- coex_sta->num_of_profile++;
- } else {
- coex_sta->pan_exist = false;
- }
-
- if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_A2DP) {
- coex_sta->a2dp_exist = true;
- coex_sta->num_of_profile++;
- } else {
- coex_sta->a2dp_exist = false;
- }
-
- if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_HID) {
- coex_sta->hid_exist = true;
- coex_sta->num_of_profile++;
- } else {
- coex_sta->hid_exist = false;
- }
-
- if (coex_sta->bt_info & BT_INFO_8822B_1ANT_B_SCO_ESCO) {
- coex_sta->sco_exist = true;
- coex_sta->num_of_profile++;
- } else {
- coex_sta->sco_exist = false;
- }
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
-
- bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
- bt_link_info->sco_exist = coex_sta->sco_exist;
- bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
- bt_link_info->pan_exist = coex_sta->pan_exist;
- bt_link_info->hid_exist = coex_sta->hid_exist;
- bt_link_info->acl_busy = coex_sta->acl_busy;
-
- /* work around for HS mode. */
- if (bt_hs_on) {
- bt_link_info->pan_exist = true;
- bt_link_info->bt_link_exist = true;
- }
-
- /* check if Sco only */
- if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
- !bt_link_info->pan_exist && !bt_link_info->hid_exist)
- bt_link_info->sco_only = true;
- else
- bt_link_info->sco_only = false;
-
- /* check if A2dp only */
- if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
- !bt_link_info->pan_exist && !bt_link_info->hid_exist)
- bt_link_info->a2dp_only = true;
- else
- bt_link_info->a2dp_only = false;
-
- /* check if Pan only */
- if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
- bt_link_info->pan_exist && !bt_link_info->hid_exist)
- bt_link_info->pan_only = true;
- else
- bt_link_info->pan_only = false;
-
- /* check if Hid only */
- if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
- !bt_link_info->pan_exist && bt_link_info->hid_exist)
- bt_link_info->hid_only = true;
- else
- bt_link_info->hid_only = false;
-
- if (coex_sta->bt_info & BT_INFO_8822B_2ANT_B_INQ_PAGE) {
- coex_dm->bt_status = BT_8822B_2ANT_BT_STATUS_INQ_PAGE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Inq/page!!!\n");
- } else if (!(coex_sta->bt_info & BT_INFO_8822B_2ANT_B_CONNECTION)) {
- coex_dm->bt_status = BT_8822B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
- } else if (coex_sta->bt_info == BT_INFO_8822B_2ANT_B_CONNECTION) {
- /* connection exists but no busy */
- coex_dm->bt_status = BT_8822B_2ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
- } else if (((coex_sta->bt_info & BT_INFO_8822B_2ANT_B_SCO_ESCO) ||
- (coex_sta->bt_info & BT_INFO_8822B_2ANT_B_SCO_BUSY)) &&
- (coex_sta->bt_info & BT_INFO_8822B_2ANT_B_ACL_BUSY)) {
- coex_dm->bt_status = BT_8822B_2ANT_BT_STATUS_ACL_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL SCO busy!!!\n");
- } else if ((coex_sta->bt_info & BT_INFO_8822B_2ANT_B_SCO_ESCO) ||
- (coex_sta->bt_info & BT_INFO_8822B_2ANT_B_SCO_BUSY)) {
- coex_dm->bt_status = BT_8822B_2ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
- } else if (coex_sta->bt_info & BT_INFO_8822B_2ANT_B_ACL_BUSY) {
- coex_dm->bt_status = BT_8822B_2ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
- } else {
- coex_dm->bt_status = BT_8822B_2ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
- }
-
- if ((coex_dm->bt_status == BT_8822B_2ANT_BT_STATUS_ACL_BUSY) ||
- (coex_dm->bt_status == BT_8822B_2ANT_BT_STATUS_SCO_BUSY) ||
- (coex_dm->bt_status == BT_8822B_2ANT_BT_STATUS_ACL_SCO_BUSY))
- bt_busy = true;
- else
- bt_busy = false;
-
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
-}
-
-static void halbtc8822b2ant_update_wifi_ch_info(struct btc_coexist *btcoexist,
- u8 type)
-{
- u8 h2c_parameter[3] = {0};
- u32 wifi_bw;
- u8 wifi_central_chnl;
- u32 RTL97F_8822B = 0;
-
- if (RTL97F_8822B)
- return;
-
- /* only 2.4G we need to inform bt the chnl mask */
- btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
- &wifi_central_chnl);
- if ((type == BTC_MEDIA_CONNECT) && (wifi_central_chnl <= 14)) {
- /* enable BT AFH skip WL channel for 8822b
- * because BT Rx LO interference
- */
- h2c_parameter[0] = 0x1;
- h2c_parameter[1] = wifi_central_chnl;
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (wifi_bw == BTC_WIFI_BW_HT40)
- h2c_parameter[2] = 0x30;
- else
- h2c_parameter[2] = 0x20;
- }
-
- coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
- coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
- coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
-
- btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
-}
-
-static void
-halbtc8822b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
- u8 dac_swing_lvl)
-{
- u8 h2c_parameter[1] = {0};
- u32 RTL97F_8822B = 0;
-
- if (RTL97F_8822B)
- return;
-
- /* There are several type of dacswing */
- /* 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
- h2c_parameter[0] = dac_swing_lvl;
-
- btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
-}
-
-static void halbtc8822b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
- bool force_exec,
- u8 fw_dac_swing_lvl)
-{
- u32 RTL97F_8822B = 0;
-
- if (RTL97F_8822B)
- return;
-
- coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
-
- if (!force_exec) {
- if (coex_dm->pre_fw_dac_swing_lvl ==
- coex_dm->cur_fw_dac_swing_lvl)
- return;
- }
-
- halbtc8822b2ant_set_fw_dac_swing_level(btcoexist,
- coex_dm->cur_fw_dac_swing_lvl);
-
- coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
-}
-
-static void halbtc8822b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
- u8 dec_bt_pwr_lvl)
-{
- u32 RTL97F_8822B = 0;
- u8 h2c_parameter[1] = {0};
-
- if (RTL97F_8822B)
- return;
-
- h2c_parameter[0] = dec_bt_pwr_lvl;
-
- btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
-}
-
-static void halbtc8822b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
- bool force_exec, u8 dec_bt_pwr_lvl)
-{
- coex_dm->cur_bt_dec_pwr_lvl = dec_bt_pwr_lvl;
-
- if (!force_exec) {
- if (coex_dm->pre_bt_dec_pwr_lvl == coex_dm->cur_bt_dec_pwr_lvl)
- return;
- }
- halbtc8822b2ant_set_fw_dec_bt_pwr(btcoexist,
- coex_dm->cur_bt_dec_pwr_lvl);
-
- coex_dm->pre_bt_dec_pwr_lvl = coex_dm->cur_bt_dec_pwr_lvl;
-}
-
-static void halbtc8822b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
- bool force_exec, bool low_penalty_ra)
-{
- coex_dm->cur_low_penalty_ra = low_penalty_ra;
-
- if (!force_exec) {
- if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
- return;
- }
-
- if (low_penalty_ra)
- btcoexist->btc_phydm_modify_ra_pcr_threshold(btcoexist, 0, 50);
- else
- btcoexist->btc_phydm_modify_ra_pcr_threshold(btcoexist, 0, 0);
-
- coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
-}
-
-static void halbtc8822b2ant_write_score_board(struct btc_coexist *btcoexist,
- u16 bitpos, bool state)
-{
- static u16 originalval = 0x8002;
-
- if (state)
- originalval = originalval | bitpos;
- else
- originalval = originalval & (~bitpos);
-
- btcoexist->btc_write_2byte(btcoexist, 0xaa, originalval);
-}
-
-static void halbtc8822b2ant_read_score_board(struct btc_coexist *btcoexist,
- u16 *score_board_val)
-{
- *score_board_val =
- (btcoexist->btc_read_2byte(btcoexist, 0xaa)) & 0x7fff;
-}
-
-static void halbtc8822b2ant_post_state_to_bt(struct btc_coexist *btcoexist,
- u16 type, bool state)
-{
- halbtc8822b2ant_write_score_board(btcoexist, (u16)type, state);
-}
-
-static void
-halbtc8822b2ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u32 bt_disable_cnt;
- bool bt_active = true, bt_disabled = false, wifi_under_5g = false;
- u16 u16tmp;
-
- /* This function check if bt is disabled */
-
- /* Read BT on/off status from scoreboard[1],
- * enable this only if BT patch support this feature
- */
- halbtc8822b2ant_read_score_board(btcoexist, &u16tmp);
-
- bt_active = u16tmp & BIT(1);
-
- if (bt_active) {
- bt_disable_cnt = 0;
- bt_disabled = false;
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
- &bt_disabled);
- } else {
- bt_disable_cnt++;
- if (bt_disable_cnt >= 10) {
- bt_disabled = true;
- bt_disable_cnt = 10;
- }
-
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
- &bt_disabled);
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if ((wifi_under_5g) || (bt_disabled))
- halbtc8822b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, false);
- else
- halbtc8822b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, true);
-
- if (coex_sta->bt_disabled != bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is from %s to %s!!\n",
- (coex_sta->bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
- coex_sta->bt_disabled = bt_disabled;
- }
-}
-
-static void halbtc8822b2ant_enable_gnt_to_gpio(struct btc_coexist *btcoexist,
- bool isenable)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 bit_val[5] = {0, 0, 0, 0, 0};
-
- if (!btcoexist->dbg_mode_2ant)
- return;
-
- if (isenable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], enable_gnt_to_gpio!!\n");
-
- /* enable GNT_WL, GNT_BT to GPIO for debug */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x73, 0x8, 0x1);
-
- /* store original value */
- bit_val[0] =
- (btcoexist->btc_read_1byte(btcoexist, 0x66) & BIT(4)) >>
- 4; /*0x66[4] */
- bit_val[1] = (btcoexist->btc_read_1byte(btcoexist, 0x67) &
- BIT(0)); /*0x66[8] */
- bit_val[2] =
- (btcoexist->btc_read_1byte(btcoexist, 0x42) & BIT(3)) >>
- 3; /*0x40[19] */
- bit_val[3] =
- (btcoexist->btc_read_1byte(btcoexist, 0x65) & BIT(7)) >>
- 7; /*0x64[15] */
- bit_val[4] =
- (btcoexist->btc_read_1byte(btcoexist, 0x72) & BIT(2)) >>
- 2; /*0x70[18] */
-
- /* switch GPIO Mux */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x66, BIT(4),
- 0x0); /*0x66[4] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, BIT(0),
- 0x0); /*0x66[8] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x42, BIT(3),
- 0x0); /*0x40[19] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x65, BIT(7),
- 0x0); /*0x64[15] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x72, BIT(2),
- 0x0); /*0x70[18] = 0 */
-
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], disable_gnt_to_gpio!!\n");
-
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x73, 0x8, 0x0);
-
- /* Restore original value */
- /* switch GPIO Mux */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x66, BIT(4),
- bit_val[0]); /*0x66[4] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, BIT(0),
- bit_val[1]); /*0x66[8] = 0 */
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x42, BIT(3), bit_val[2]); /*0x40[19] = 0 */
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x65, BIT(7), bit_val[3]); /*0x64[15] = 0 */
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x72, BIT(2), bit_val[4]); /*0x70[18] = 0 */
- }
-}
-
-static u32
-halbtc8822b2ant_ltecoex_indirect_read_reg(struct btc_coexist *btcoexist,
- u16 reg_addr)
-{
- u32 delay_count = 0;
-
- while (1) {
- if ((btcoexist->btc_read_1byte(btcoexist, 0x1703) & BIT(5)) ==
- 0) {
- mdelay(50);
- delay_count++;
- if (delay_count >= 10) {
- delay_count = 0;
- break;
- }
- } else {
- break;
- }
- }
-
- /* wait for ready bit before access 0x1700 */
- btcoexist->btc_write_4byte(btcoexist, 0x1700, 0x800F0000 | reg_addr);
-
- return btcoexist->btc_read_4byte(btcoexist, 0x1708); /* get read data */
-}
-
-static void
-halbtc8822b2ant_ltecoex_indirect_write_reg(struct btc_coexist *btcoexist,
- u16 reg_addr, u32 bit_mask,
- u32 reg_value)
-{
- u32 val, i = 0, bitpos = 0, delay_count = 0;
-
- if (bit_mask == 0x0)
- return;
- if (bit_mask == 0xffffffff) {
- /* wait for ready bit before access 0x1700/0x1704 */
- while (1) {
- if ((btcoexist->btc_read_1byte(btcoexist, 0x1703) &
- BIT(5)) == 0) {
- mdelay(50);
- delay_count++;
- if (delay_count >= 10) {
- delay_count = 0;
- break;
- }
- } else {
- break;
- }
- }
-
- btcoexist->btc_write_4byte(btcoexist, 0x1704,
- reg_value); /* put write data */
-
- btcoexist->btc_write_4byte(btcoexist, 0x1700,
- 0xc00F0000 | reg_addr);
- } else {
- for (i = 0; i <= 31; i++) {
- if (((bit_mask >> i) & 0x1) == 0x1) {
- bitpos = i;
- break;
- }
- }
-
- /* read back register value before write */
- val = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- reg_addr);
- val = (val & (~bit_mask)) | (reg_value << bitpos);
-
- /* wait for ready bit before access 0x1700/0x1704 */
- while (1) {
- if ((btcoexist->btc_read_1byte(btcoexist, 0x1703) &
- BIT(5)) == 0) {
- mdelay(50);
- delay_count++;
- if (delay_count >= 10) {
- delay_count = 0;
- break;
- }
- } else {
- break;
- }
- }
-
- btcoexist->btc_write_4byte(btcoexist, 0x1704,
- val); /* put write data */
-
- btcoexist->btc_write_4byte(btcoexist, 0x1700,
- 0xc00F0000 | reg_addr);
- }
-}
-
-static void halbtc8822b2ant_ltecoex_enable(struct btc_coexist *btcoexist,
- bool enable)
-{
- u8 val;
-
- val = (enable) ? 1 : 0;
- halbtc8822b2ant_ltecoex_indirect_write_reg(btcoexist, 0x38, 0x80,
- val); /* 0x38[7] */
-}
-
-static void
-halbtc8822b2ant_ltecoex_pathcontrol_owner(struct btc_coexist *btcoexist,
- bool wifi_control)
-{
- u8 val;
-
- val = (wifi_control) ? 1 : 0;
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x73, 0x4,
- val); /* 0x70[26] */
-}
-
-static void halbtc8822b2ant_ltecoex_set_gnt_bt(struct btc_coexist *btcoexist,
- u8 control_block,
- bool sw_control, u8 state)
-{
- u32 val = 0, bit_mask;
-
- state = state & 0x1;
- val = (sw_control) ? ((state << 1) | 0x1) : 0;
-
- switch (control_block) {
- case BT_8822B_2ANT_GNT_BLOCK_RFC_BB:
- default:
- bit_mask = 0xc000;
- halbtc8822b2ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[15:14] */
- bit_mask = 0x0c00;
- halbtc8822b2ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[11:10] */
- break;
- case BT_8822B_2ANT_GNT_BLOCK_RFC:
- bit_mask = 0xc000;
- halbtc8822b2ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[15:14] */
- break;
- case BT_8822B_2ANT_GNT_BLOCK_BB:
- bit_mask = 0x0c00;
- halbtc8822b2ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[11:10] */
- break;
- }
-}
-
-static void halbtc8822b2ant_ltecoex_set_gnt_wl(struct btc_coexist *btcoexist,
- u8 control_block,
- bool sw_control, u8 state)
-{
- u32 val = 0, bit_mask;
-
- state = state & 0x1;
- val = (sw_control) ? ((state << 1) | 0x1) : 0;
-
- switch (control_block) {
- case BT_8822B_2ANT_GNT_BLOCK_RFC_BB:
- default:
- bit_mask = 0x3000;
- halbtc8822b2ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[13:12] */
- bit_mask = 0x0300;
- halbtc8822b2ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[9:8] */
- break;
- case BT_8822B_2ANT_GNT_BLOCK_RFC:
- bit_mask = 0x3000;
- halbtc8822b2ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[13:12] */
- break;
- case BT_8822B_2ANT_GNT_BLOCK_BB:
- bit_mask = 0x0300;
- halbtc8822b2ant_ltecoex_indirect_write_reg(
- btcoexist, 0x38, bit_mask, val); /* 0x38[9:8] */
- break;
- }
-}
-
-static void
-halbtc8822b2ant_ltecoex_set_coex_table(struct btc_coexist *btcoexist,
- u8 table_type, u16 table_content)
-{
- u16 reg_addr = 0x0000;
-
- switch (table_type) {
- case BT_8822B_2ANT_CTT_WL_VS_LTE:
- reg_addr = 0xa0;
- break;
- case BT_8822B_2ANT_CTT_BT_VS_LTE:
- reg_addr = 0xa4;
- break;
- }
-
- if (reg_addr != 0x0000)
- halbtc8822b2ant_ltecoex_indirect_write_reg(
- btcoexist, reg_addr, 0xffff,
- table_content); /* 0xa0[15:0] or 0xa4[15:0] */
-}
-
-static void halbtc8822b2ant_set_wltoggle_coex_table(
- struct btc_coexist *btcoexist, bool force_exec, u8 interval,
- u8 val0x6c4_b0, u8 val0x6c4_b1, u8 val0x6c4_b2, u8 val0x6c4_b3)
-{
- static u8 pre_h2c_parameter[6] = {0};
- u8 cur_h2c_parameter[6] = {0};
- u8 i, match_cnt = 0;
-
- cur_h2c_parameter[0] = 0x7; /* op_code, 0x7= wlan toggle slot*/
-
- cur_h2c_parameter[1] = interval;
- cur_h2c_parameter[2] = val0x6c4_b0;
- cur_h2c_parameter[3] = val0x6c4_b1;
- cur_h2c_parameter[4] = val0x6c4_b2;
- cur_h2c_parameter[5] = val0x6c4_b3;
-
- if (!force_exec) {
- for (i = 1; i <= 5; i++) {
- if (cur_h2c_parameter[i] != pre_h2c_parameter[i])
- break;
-
- match_cnt++;
- }
-
- if (match_cnt == 5)
- return;
- }
-
- for (i = 1; i <= 5; i++)
- pre_h2c_parameter[i] = cur_h2c_parameter[i];
-
- btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, cur_h2c_parameter);
-}
-
-static void halbtc8822b2ant_set_coex_table(struct btc_coexist *btcoexist,
- u32 val0x6c0, u32 val0x6c4,
- u32 val0x6c8, u8 val0x6cc)
-{
- btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
-
- btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
-
- btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
-
- btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
-}
-
-static void halbtc8822b2ant_coex_table(struct btc_coexist *btcoexist,
- bool force_exec, u32 val0x6c0,
- u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
-{
- coex_dm->cur_val0x6c0 = val0x6c0;
- coex_dm->cur_val0x6c4 = val0x6c4;
- coex_dm->cur_val0x6c8 = val0x6c8;
- coex_dm->cur_val0x6cc = val0x6cc;
-
- if (!force_exec) {
- if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
- (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
- (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
- (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
- return;
- }
- halbtc8822b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8,
- val0x6cc);
-
- coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
- coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
- coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
- coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
-}
-
-static void halbtc8822b2ant_coex_table_with_type(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
-{
- u32 break_table;
- u8 select_table;
-
- coex_sta->coex_table_type = type;
-
- if (coex_sta->concurrent_rx_mode_on) {
- break_table = 0xf0ffffff; /* set WL hi-pri can break BT */
- /* set Tx response = Hi-Pri (ex: Transmitting ACK,BA,CTS) */
- select_table = 0xb;
- } else {
- break_table = 0xffffff;
- select_table = 0x3;
- }
-
- switch (type) {
- case 0:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
- 0xffffffff, break_table,
- select_table);
- break;
- case 1:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5a5a5a5a, break_table,
- select_table);
- break;
- case 2:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
- 0x5a5a5a5a, break_table,
- select_table);
- break;
- case 3:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5a5a5a5a, break_table,
- select_table);
- break;
- case 4:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5a5a5a5a, break_table,
- select_table);
- break;
- case 5:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x55555555, break_table,
- select_table);
- break;
- case 6:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0xa5555555,
- 0xfafafafa, break_table,
- select_table);
- break;
- case 7:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0xa5555555,
- 0xaa5a5a5a, break_table,
- select_table);
- break;
- case 8:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0xa5555555,
- 0xfafafafa, break_table,
- select_table);
- break;
- case 9:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
- 0xaaaa5aaa, break_table,
- select_table);
- break;
- case 10:
- halbtc8822b2ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5a5a555a, break_table,
- select_table);
- break;
- default:
- break;
- }
-}
-
-static void
-halbtc8822b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
- bool enable)
-{
- u8 h2c_parameter[1] = {0};
- u32 RTL97F_8822B = 0;
-
- if (RTL97F_8822B)
- return;
-
- if (enable)
- h2c_parameter[0] |= BIT(0); /* function enable */
-
- btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
-}
-
-static void halbtc8822b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
- bool force_exec, bool enable)
-{
- coex_dm->cur_ignore_wlan_act = enable;
-
- if (!force_exec) {
- if (coex_dm->pre_ignore_wlan_act ==
- coex_dm->cur_ignore_wlan_act)
- return;
- }
- halbtc8822b2ant_set_fw_ignore_wlan_act(btcoexist, enable);
-
- coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
-}
-
-static void halbtc8822b2ant_set_lps_rpwm(struct btc_coexist *btcoexist,
- u8 lps_val, u8 rpwm_val)
-{
- u8 lps = lps_val;
- u8 rpwm = rpwm_val;
-
- btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps);
- btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
-}
-
-static void halbtc8822b2ant_lps_rpwm(struct btc_coexist *btcoexist,
- bool force_exec, u8 lps_val, u8 rpwm_val)
-{
- coex_dm->cur_lps = lps_val;
- coex_dm->cur_rpwm = rpwm_val;
-
- if (!force_exec) {
- if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
- (coex_dm->pre_rpwm == coex_dm->cur_rpwm))
- return;
- }
- halbtc8822b2ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
-
- coex_dm->pre_lps = coex_dm->cur_lps;
- coex_dm->pre_rpwm = coex_dm->cur_rpwm;
-}
-
-static void halbtc8822b2ant_ps_tdma_check_for_power_save_state(
- struct btc_coexist *btcoexist, bool new_ps_state)
-{
- u8 lps_mode = 0x0;
- u8 h2c_parameter[5] = {0, 0, 0, 0x40, 0};
- u32 RTL97F_8822B = 0;
-
- if (RTL97F_8822B)
- return;
-
- btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
-
- if (lps_mode) { /* already under LPS state */
- if (new_ps_state) {
- /* keep state under LPS, do nothing. */
- } else {
- /* will leave LPS state, turn off psTdma first */
- btcoexist->btc_fill_h2c(btcoexist, 0x60, 5,
- h2c_parameter);
- }
- } else { /* NO PS state */
- if (new_ps_state) {
- /* will enter LPS state, turn off psTdma first */
- btcoexist->btc_fill_h2c(btcoexist, 0x60, 5,
- h2c_parameter);
- } else {
- /* keep state under NO PS state, do nothing. */
- }
- }
-}
-
-static bool halbtc8822b2ant_power_save_state(struct btc_coexist *btcoexist,
- u8 ps_type, u8 lps_val,
- u8 rpwm_val)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool low_pwr_disable = false, result = true;
-
- switch (ps_type) {
- case BTC_PS_WIFI_NATIVE:
- coex_sta->force_lps_ctrl = false;
- /* recover to original 32k low power setting */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == BTC_PS_WIFI_NATIVE\n", __func__);
-
- low_pwr_disable = false;
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
- &low_pwr_disable);
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
- break;
- case BTC_PS_LPS_ON:
- coex_sta->force_lps_ctrl = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == BTC_PS_LPS_ON\n", __func__);
-
- halbtc8822b2ant_ps_tdma_check_for_power_save_state(btcoexist,
- true);
- halbtc8822b2ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
- rpwm_val);
- /* when coex force to enter LPS, do not enter 32k low power. */
- low_pwr_disable = true;
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
- &low_pwr_disable);
- /* power save must executed before psTdma. */
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
- break;
- case BTC_PS_LPS_OFF:
- coex_sta->force_lps_ctrl = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == BTC_PS_LPS_OFF\n", __func__);
-
- halbtc8822b2ant_ps_tdma_check_for_power_save_state(btcoexist,
- false);
- result = btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
- NULL);
- break;
- default:
- break;
- }
-
- return result;
-}
-
-static void halbtc8822b2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
- u8 byte1, u8 byte2, u8 byte3,
- u8 byte4, u8 byte5)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[5] = {0};
- u8 real_byte1 = byte1, real_byte5 = byte5;
- bool ap_enable = false, result = false;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- if (byte5 & BIT(2))
- coex_sta->is_tdma_btautoslot = true;
- else
- coex_sta->is_tdma_btautoslot = false;
-
- /* release bt-auto slot for auto-slot hang is detected!! */
- if (coex_sta->is_tdma_btautoslot)
- if ((coex_sta->is_tdma_btautoslot_hang) ||
- (bt_link_info->slave_role))
- byte5 = byte5 & 0xfb;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
- &ap_enable);
-
- if ((ap_enable) && (byte1 & BIT(4) && !(byte1 & BIT(5)))) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == FW for AP mode\n", __func__);
-
- real_byte1 &= ~BIT(4);
- real_byte1 |= BIT(5);
-
- real_byte5 |= BIT(5);
- real_byte5 &= ~BIT(6);
-
- halbtc8822b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
- 0x0, 0x0);
- } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == Force LPS (byte1 = 0x%x)\n",
- __func__, byte1);
-
- if (!halbtc8822b2ant_power_save_state(btcoexist, BTC_PS_LPS_OFF,
- 0x50, 0x4))
- result = true;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == Native LPS (byte1 = 0x%x)\n",
- __func__, byte1);
-
- halbtc8822b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
- 0x0, 0x0);
- }
-
- coex_sta->is_set_ps_state_fail = result;
-
- if (!coex_sta->is_set_ps_state_fail) {
- h2c_parameter[0] = real_byte1;
- h2c_parameter[1] = byte2;
- h2c_parameter[2] = byte3;
- h2c_parameter[3] = byte4;
- h2c_parameter[4] = real_byte5;
-
- coex_dm->ps_tdma_para[0] = real_byte1;
- coex_dm->ps_tdma_para[1] = byte2;
- coex_dm->ps_tdma_para[2] = byte3;
- coex_dm->ps_tdma_para[3] = byte4;
- coex_dm->ps_tdma_para[4] = real_byte5;
-
- btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
- } else {
- coex_sta->cnt_set_ps_state_fail++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s == Force Leave LPS Fail (cnt = %d)\n",
- __func__, coex_sta->cnt_set_ps_state_fail);
- }
-}
-
-static void halbtc8822b2ant_ps_tdma(struct btc_coexist *btcoexist,
- bool force_exec, bool turn_on, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 ps_tdma_byte4_modify, pre_ps_tdma_byte4_modify;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- coex_dm->cur_ps_tdma_on = turn_on;
- coex_dm->cur_ps_tdma = type;
-
- /* 0x778 = 0x1 at wifi slot (no blocking BT Low-Pri pkts) */
- if (bt_link_info->slave_role)
- ps_tdma_byte4_modify = 0x1;
- else
- ps_tdma_byte4_modify = 0x0;
-
- if (pre_ps_tdma_byte4_modify != ps_tdma_byte4_modify) {
- force_exec = true;
- pre_ps_tdma_byte4_modify = ps_tdma_byte4_modify;
- }
-
- if (!force_exec) {
- if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
- (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Skip TDMA because no change TDMA(%s, %d)\n",
- (coex_dm->cur_ps_tdma_on ? "on" : "off"),
- coex_dm->cur_ps_tdma);
- return;
- }
- }
-
- if (coex_dm->cur_ps_tdma_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
-
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x550, 0x8, 0x1); /* enable TBTT nterrupt */
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** TDMA(off, %d) **********\n",
- coex_dm->cur_ps_tdma);
- }
-
- if (turn_on) {
- switch (type) {
- case 1:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x10, 0x03, 0x91,
- 0x54 | ps_tdma_byte4_modify);
- break;
- case 2:
- default:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x35, 0x03, 0x11,
- 0x11 | ps_tdma_byte4_modify);
- break;
- case 3:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x3a, 0x3, 0x91,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 4:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x21, 0x3, 0x91,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 5:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x25, 0x3, 0x91,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 6:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x10, 0x3, 0x91,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 7:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x20, 0x3, 0x91,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 8:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x61, 0x15,
- 0x03, 0x11, 0x11);
- break;
- case 10:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x61, 0x30,
- 0x03, 0x11, 0x10);
- break;
- case 11:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x35, 0x03, 0x11,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 12:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x61, 0x35,
- 0x03, 0x11, 0x11);
- break;
- case 13:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x1c, 0x03, 0x11,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 14:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x61, 0x20,
- 0x03, 0x11, 0x11);
- break;
- case 15:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x61, 0x10,
- 0x03, 0x11, 0x14);
- break;
- case 16:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x61, 0x10,
- 0x03, 0x11, 0x15);
- break;
- case 21:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x61, 0x30,
- 0x03, 0x11, 0x10);
- break;
- case 22:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x61, 0x25,
- 0x03, 0x11, 0x10);
- break;
- case 23:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x61, 0x10,
- 0x03, 0x11, 0x10);
- break;
- case 51:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x10, 0x03, 0x91,
- 0x10 | ps_tdma_byte4_modify);
- break;
- case 101:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x25, 0x03, 0x11,
- 0x11 | ps_tdma_byte4_modify);
- break;
- case 102:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x35, 0x03, 0x11,
- 0x11 | ps_tdma_byte4_modify);
- break;
- case 103:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x51, 0x3a, 0x3, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
- case 104:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x51, 0x21, 0x3, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
- case 105:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x51, 0x30, 0x3, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
- case 106:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x51, 0x10, 0x3, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
- case 107:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x51, 0x10, 0x7, 0x10,
- 0x54 | ps_tdma_byte4_modify);
- break;
- case 108:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x51, 0x30, 0x3, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
- case 109:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x51, 0x10, 0x03, 0x10,
- 0x54 | ps_tdma_byte4_modify);
- break;
- case 110:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x55, 0x30, 0x03, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
- case 111:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x61, 0x25, 0x03, 0x11,
- 0x11 | ps_tdma_byte4_modify);
- break;
- case 151:
- halbtc8822b2ant_set_fw_pstdma(
- btcoexist, 0x51, 0x10, 0x03, 0x10,
- 0x50 | ps_tdma_byte4_modify);
- break;
- }
- } else {
- /* disable PS tdma */
- switch (type) {
- case 0:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x40, 0x0);
- break;
- case 1:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x48, 0x0);
- break;
- default:
- halbtc8822b2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x40, 0x0);
- break;
- }
- }
-
- if (!coex_sta->is_set_ps_state_fail) {
- /* update pre state */
- coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
- coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
- }
-}
-
-/*anttenna control by bb mac bt antdiv pta to write 0x4c 0xcb4,0xcbd*/
-static void halbtc8822b2ant_set_ext_ant_switch(struct btc_coexist *btcoexist,
- bool force_exec, u8 ctrl_type,
- u8 pos_type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool switch_polatiry_inverse = false;
- u8 regval_0xcbc = 0, regval_0x64;
- u32 u32tmp1 = 0, u32tmp2 = 0, u32tmp3 = 0;
-
- if (!rfe_type->ext_ant_switch_exist)
- return;
-
- coex_dm->cur_ext_ant_switch_status = (ctrl_type << 8) + pos_type;
-
- if (!force_exec) {
- if (coex_dm->pre_ext_ant_switch_status ==
- coex_dm->cur_ext_ant_switch_status)
- return;
- }
- coex_dm->pre_ext_ant_switch_status = coex_dm->cur_ext_ant_switch_status;
-
- /* Ext switch buffer mux */
- btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x1991, 0x3, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbe, 0x8, 0x0);
-
- switch (ctrl_type) {
- default:
- case BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_BBSW:
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e, 0x80,
- 0x0); /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f, 0x01,
- 0x1); /* 0x4c[24] = 1 */
- /* BB SW, DPDT use RFE_ctrl8 and RFE_ctrl9 as conctrol pin */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb4, 0xff,
- 0x77);
-
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd, 0x03, 01);
-
- break;
- case BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_PTA:
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e, 0x80,
- 0x0); /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f, 0x01,
- 0x1); /* 0x4c[24] = 1 */
- /* PTA, DPDT use RFE_ctrl8 and RFE_ctrl9 as conctrol pin */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb4, 0xff,
- 0x66);
-
- /* 0xcb4[29:28] = 2b'10 for no switch_polatiry_inverse,
- * DPDT_SEL_N =1, DPDT_SEL_P =0 @ GNT_BT=1
- */
- regval_0xcbc = (!switch_polatiry_inverse ? 0x2 : 0x1);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbc, 0x03,
- regval_0xcbc);
-
- break;
- case BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_ANTDIV:
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e, 0x80,
- 0x0); /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f, 0x01,
- 0x1); /* 0x4c[24] = 1 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb4, 0xff,
- 0x88);
- break;
- case BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_MAC:
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e, 0x80,
- 0x1); /* 0x4c[23] = 1 */
-
- /* 0x64[0] = 1b'0 for no switch_polatiry_inverse,
- * DPDT_SEL_N =1, DPDT_SEL_P =0
- */
- regval_0x64 = (!switch_polatiry_inverse ? 0x0 : 0x1);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
- regval_0x64);
- break;
- case BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_BT:
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e, 0x80,
- 0x0); /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f, 0x01,
- 0x0); /* 0x4c[24] = 0 */
-
- /* no setup required, because antenna switch control value by
- * BT vendor 0x1c[1:0]
- */
- break;
- }
-
- /* PAPE, LNA_ON control by BT while WLAN off for current leakage issue*/
- if (ctrl_type == BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_BT) {
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x67, 0x20, 0x0); /* PAPE 0x64[29] = 0 */
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x67, 0x10, 0x0); /* LNA_ON 0x64[28] = 0 */
- } else {
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x67, 0x20, 0x1); /* PAPE 0x64[29] = 1 */
- btcoexist->btc_write_1byte_bitmask(
- btcoexist, 0x67, 0x10, 0x1); /* LNA_ON 0x64[28] = 1 */
- }
-
- if (btcoexist->dbg_mode_2ant) {
- u32tmp1 = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- u32tmp2 = btcoexist->btc_read_4byte(btcoexist, 0x4c);
- u32tmp3 = btcoexist->btc_read_4byte(btcoexist, 0x64) & 0xff;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], (After Ext Ant switch setup) 0xcb4 = 0x%08x, 0x4c = 0x%08x, 0x64= 0x%02x\n",
- u32tmp1, u32tmp2, u32tmp3);
- }
-}
-
-/* rf4 type by efuse, and for ant at main aux inverse use,
- * because is 2x2, and control types are the same, does not need
- */
-static void halbtc8822b2ant_set_rfe_type(struct btc_coexist *btcoexist)
-{
- struct btc_board_info *board_info = &btcoexist->board_info;
-
- rfe_type->ext_band_switch_exist = false;
- rfe_type->ext_band_switch_type =
- BT_8822B_2ANT_EXT_BAND_SWITCH_USE_SPDT; /* SPDT; */
- rfe_type->ext_band_switch_ctrl_polarity = 0;
- /* Ext switch buffer mux */
- btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x1991, 0x3, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbe, 0x8, 0x0);
-
- if (rfe_type->ext_band_switch_exist) {
- /* band switch use RFE_ctrl1 (pin name: PAPE_A) and
- * RFE_ctrl3 (pin name: LNAON_A)
- */
-
- /* set RFE_ctrl1 as software control */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb0, 0xf0, 0x7);
-
- /* set RFE_ctrl3 as software control */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb1, 0xf0, 0x7);
- }
-
- /* the following setup should be got from Efuse in the future */
- rfe_type->rfe_module_type = board_info->rfe_type;
-
- rfe_type->ext_ant_switch_ctrl_polarity = 0;
-
- switch (rfe_type->rfe_module_type) {
- case 0:
- default:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 1:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 2:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 3:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 4:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 5:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 6:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- case 7:
- rfe_type->ext_ant_switch_exist = true;
- rfe_type->ext_ant_switch_type =
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_SPDT;
- break;
- }
-}
-
-/* set gnt_wl gnt_bt control by sw high low, or hwpta while in
- * power on, ini, wlan off, wlan only, wl2g non-currrent, wl2g current, wl5g
- */
-static void halbtc8822b2ant_set_ant_path(struct btc_coexist *btcoexist,
- u8 ant_pos_type, bool force_exec,
- u8 phase)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 u8tmp = 0;
- u32 u32tmp1 = 0;
- u32 u32tmp2 = 0, u32tmp3 = 0;
-
- u32tmp1 = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist, 0x38);
-
- /* To avoid indirect access fail */
- if (((u32tmp1 & 0xf000) >> 12) != ((u32tmp1 & 0x0f00) >> 8)) {
- force_exec = true;
- coex_sta->gnt_error_cnt++;
- }
-
- /* Ext switch buffer mux */
- btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x1991, 0x3, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbe, 0x8, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e, 0x80,
- 0x0); /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f, 0x01,
- 0x1); /* 0x4c[24] = 1 */
-
- coex_dm->cur_ant_pos_type = (ant_pos_type << 8) + phase;
-
- if (!force_exec) {
- if (coex_dm->cur_ant_pos_type == coex_dm->pre_ant_pos_type)
- return;
- }
-
- coex_dm->pre_ant_pos_type = coex_dm->cur_ant_pos_type;
-
- if (btcoexist->dbg_mode_2ant) {
- u32tmp1 = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0x38);
- u32tmp2 = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0x54);
- u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x73);
-
- u32tmp3 = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], (Before Ant Setup) 0xcb4 = 0x%x, 0x73 = 0x%x, 0x38= 0x%x, 0x54= 0x%x\n",
- u32tmp3, u8tmp, u32tmp1, u32tmp2);
- }
-
- switch (phase) {
- case BT_8822B_2ANT_PHASE_COEX_POWERON:
-
- /* set Path control owner to WL at initial step */
- halbtc8822b2ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_2ANT_PCO_BTSIDE);
-
- /* set GNT_BT to SW high */
- halbtc8822b2ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
- /* Set GNT_WL to SW high */
- halbtc8822b2ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
-
- coex_sta->run_time_state = false;
-
- break;
- case BT_8822B_2ANT_PHASE_COEX_INIT:
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e, 0x80,
- 0x0); /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f, 0x01,
- 0x1); /* 0x4c[24] = 1 */
- /* Disable LTE Coex Function in WiFi side
- * (this should be on if LTE coex is required)
- */
- halbtc8822b2ant_ltecoex_enable(btcoexist, 0x0);
-
- /* GNT_WL_LTE always = 1
- * (this should be config if LTE coex is required)
- */
- halbtc8822b2ant_ltecoex_set_coex_table(
- btcoexist, BT_8822B_2ANT_CTT_WL_VS_LTE, 0xffff);
-
- /* GNT_BT_LTE always = 1
- * (this should be config if LTE coex is required)
- */
- halbtc8822b2ant_ltecoex_set_coex_table(
- btcoexist, BT_8822B_2ANT_CTT_BT_VS_LTE, 0xffff);
-
- /* set Path control owner to WL at initial step */
- halbtc8822b2ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_2ANT_PCO_WLSIDE);
-
- /* set GNT_BT to SW high */
- halbtc8822b2ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
- /* Set GNT_WL to SW high */
- halbtc8822b2ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
-
- coex_sta->run_time_state = false;
-
- break;
- case BT_8822B_2ANT_PHASE_WLANONLY_INIT:
- /* Disable LTE Coex Function in WiFi side
- * (this should be on if LTE coex is required)
- */
- halbtc8822b2ant_ltecoex_enable(btcoexist, 0x0);
-
- /* GNT_WL_LTE always = 1
- * (this should be config if LTE coex is required)
- */
- halbtc8822b2ant_ltecoex_set_coex_table(
- btcoexist, BT_8822B_2ANT_CTT_WL_VS_LTE, 0xffff);
-
- /* GNT_BT_LTE always = 1
- * (this should be config if LTE coex is required)
- */
- halbtc8822b2ant_ltecoex_set_coex_table(
- btcoexist, BT_8822B_2ANT_CTT_BT_VS_LTE, 0xffff);
-
- /* set Path control owner to WL at initial step */
- halbtc8822b2ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_2ANT_PCO_WLSIDE);
-
- /* set GNT_BT to SW Low */
- halbtc8822b2ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_LOW);
- /* Set GNT_WL to SW high */
- halbtc8822b2ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
-
- coex_sta->run_time_state = false;
-
- break;
- case BT_8822B_2ANT_PHASE_WLAN_OFF:
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4e, 0x80,
- 0x0); /* 0x4c[23] = 0 */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4f, 0x01,
- 0x0); /* 0x4c[24] = 0 */
- /* Disable LTE Coex Function in WiFi side */
- halbtc8822b2ant_ltecoex_enable(btcoexist, 0x0);
-
- /* set Path control owner to BT */
- halbtc8822b2ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_2ANT_PCO_BTSIDE);
-
- /* Set Ext Ant Switch to BT control at wifi off step */
- halbtc8822b2ant_set_ext_ant_switch(
- btcoexist, FORCE_EXEC,
- BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_BT,
- BT_8822B_2ANT_EXT_ANT_SWITCH_MAIN_TO_NOCARE);
- coex_sta->run_time_state = false;
- break;
- case BT_8822B_2ANT_PHASE_2G_RUNTIME:
- case BT_8822B_2ANT_PHASE_2G_RUNTIME_CONCURRENT:
-
- /* set Path control owner to WL at runtime step */
- halbtc8822b2ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_2ANT_PCO_WLSIDE);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb4, 0xff,
- 0x66);
- if (phase == BT_8822B_2ANT_PHASE_2G_RUNTIME_CONCURRENT) {
- /* set GNT_BT to PTA */
- halbtc8822b2ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_PTA,
- BT_8822B_2ANT_SIG_STA_SET_BY_HW);
-
- /* Set GNT_WL to SW High */
- halbtc8822b2ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
- } else {
- /* set GNT_BT to PTA */
- halbtc8822b2ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_PTA,
- BT_8822B_2ANT_SIG_STA_SET_BY_HW);
-
- /* Set GNT_WL to PTA */
- halbtc8822b2ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_PTA,
- BT_8822B_2ANT_SIG_STA_SET_BY_HW);
- }
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ************* under2g 0xcbd setting =2 *************\n");
-
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd, 0x03, 02);
- break;
-
- case BT_8822B_2ANT_PHASE_5G_RUNTIME:
-
- /* set Path control owner to WL at runtime step */
- halbtc8822b2ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_2ANT_PCO_WLSIDE);
-
- /* set GNT_BT to SW Hi */
- halbtc8822b2ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
- /* Set GNT_WL to SW Hi */
- halbtc8822b2ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
- coex_sta->run_time_state = true;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ************* under5g 0xcbd setting =1 *************\n");
-
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcbd, 0x03, 01);
-
- break;
- case BT_8822B_2ANT_PHASE_BTMPMODE:
- /* Disable LTE Coex Function in WiFi side */
- halbtc8822b2ant_ltecoex_enable(btcoexist, 0x0);
-
- /* set Path control owner to WL */
- halbtc8822b2ant_ltecoex_pathcontrol_owner(
- btcoexist, BT_8822B_2ANT_PCO_WLSIDE);
-
- /* set GNT_BT to SW Hi */
- halbtc8822b2ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
-
- /* Set GNT_WL to SW Lo */
- halbtc8822b2ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_LOW);
-
- coex_sta->run_time_state = false;
- break;
- }
-
- if (btcoexist->dbg_mode_2ant) {
- u32tmp1 = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0x38);
- u32tmp2 = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0x54);
- u32tmp3 = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x73);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], (After Ant-Setup phase---%d) 0xcb4 = 0x%x, 0x73 = 0x%x, 0x38= 0x%x, 0x54= 0x%x\n",
- phase, u32tmp3, u8tmp, u32tmp1, u32tmp2);
- }
-}
-
-static u8 halbtc8822b2ant_action_algorithm(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- u8 algorithm = BT_8822B_2ANT_COEX_ALGO_UNDEFINED;
- u8 num_of_diff_profile = 0;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
-
- if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No BT link exists!!!\n");
- return algorithm;
- }
-
- if (bt_link_info->sco_exist)
- num_of_diff_profile++;
- if (bt_link_info->hid_exist)
- num_of_diff_profile++;
- if (bt_link_info->pan_exist)
- num_of_diff_profile++;
- if (bt_link_info->a2dp_exist)
- num_of_diff_profile++;
-
- if (num_of_diff_profile == 0) {
- if (bt_link_info->acl_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No-Profile busy\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_NOPROFILEBUSY;
- }
- } else if ((bt_link_info->a2dp_exist) && (coex_sta->is_bt_a2dp_sink)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], A2DP Sink\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_A2DPSINK;
- } else if (num_of_diff_profile == 1) {
- if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO only\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_SCO;
- } else {
- if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], HID only\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_HID;
- } else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], A2DP only\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_A2DP;
- } else if (bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(HS) only\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_PANHS;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(EDR) only\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_PANEDR;
- }
- }
- }
- } else if (num_of_diff_profile == 2) {
- if (bt_link_info->sco_exist) {
- if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_SCO;
- } else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + A2DP ==> A2DP\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_A2DP;
- } else if (bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(HS)\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_SCO;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(EDR)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_PANEDR;
- }
- }
- } else {
- if (bt_link_info->hid_exist &&
- bt_link_info->a2dp_exist) {
- {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_HID_A2DP;
- }
- } else if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(HS)\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_HID;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(EDR)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_PANEDR_HID;
- }
- } else if (bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], A2DP + PAN(HS)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_A2DP_PANHS;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], A2DP + PAN(EDR)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_PANEDR_A2DP;
- }
- }
- }
- } else if (num_of_diff_profile == 3) {
- if (bt_link_info->sco_exist) {
- if (bt_link_info->hid_exist &&
- bt_link_info->a2dp_exist) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP ==> HID + A2DP\n");
- algorithm = BT_8822B_2ANT_COEX_ALGO_HID_A2DP;
- } else if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(HS)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_PANEDR_HID;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(EDR)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_PANEDR_HID;
- }
- } else if (bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(HS)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_A2DP_PANHS;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_PANEDR_A2DP;
- }
- }
- } else {
- if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(HS)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(EDR)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
- }
- }
- }
- } else if (num_of_diff_profile >= 3) {
- if (bt_link_info->sco_exist) {
- if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
- algorithm =
- BT_8822B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
- }
- }
- }
- }
-
- return algorithm;
-}
-
-static void halbtc8822b2ant_action_coex_all_off(struct btc_coexist *btcoexist)
-{
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
-
- /* fw all off */
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
-
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-}
-
-static void halbtc8822b2ant_action_wifi_under5g(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- /* fw all off */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ************* under5g *************\n");
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, FORCE_EXEC,
- BT_8822B_2ANT_PHASE_5G_RUNTIME);
-}
-
-static void
-halbtc8822b2ant_action_wifi_native_lps(struct btc_coexist *btcoexist)
-{
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
-}
-
-static void halbtc8822b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_connected = false;
- bool wifi_scan = false, wifi_link = false, wifi_roam = false;
- bool wifi_busy = false;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &wifi_scan);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &wifi_link);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &wifi_roam);
-
- if ((coex_sta->bt_create_connection) &&
- ((wifi_link) || (wifi_roam) || (wifi_scan) || (wifi_busy) ||
- (coex_sta->wifi_is_high_pri_task))) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi link/roam/Scan/busy/hi-pri-task + BT Inq/Page!!\n");
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
-
- if ((bt_link_info->a2dp_exist) && (!bt_link_info->pan_exist))
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 15);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 11);
- } else if ((!wifi_connected) && (!wifi_scan)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi no-link + no-scan + BT Inq/Page!!\n");
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (bt_link_info->pan_exist) {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
-
- } else if (bt_link_info->a2dp_exist) {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC,
- 10);
- } else {
- if ((wifi_link) || (wifi_roam) || (wifi_scan) || (wifi_busy) ||
- (coex_sta->wifi_is_high_pri_task))
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 21);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 23);
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
- }
-
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 0xd8);
-}
-
-static void
-halbtc8822b2ant_action_wifi_link_process(struct btc_coexist *btcoexist)
-{
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 0xd4);
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
-
- if (bt_link_info->pan_exist) {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
-
- } else if (bt_link_info->a2dp_exist) {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
- } else {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 21);
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
- }
-}
-
-static void
-halbtc8822b2ant_action_wifi_nonconnected(struct btc_coexist *btcoexist)
-{
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- /* fw all off */
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
-
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-}
-
-static void halbtc8822b2ant_action_bt_relink(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], run bt multi link function\n");
-
- if (coex_sta->is_bt_multi_link)
- return;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], run bt re-link function\n");
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
-}
-
-static void halbtc8822b2ant_action_bt_idle(struct btc_coexist *btcoexist)
-{
- bool wifi_busy = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- if (!wifi_busy) {
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
- } else { /* if wl busy */
-
- if (BT_8822B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
- coex_dm->bt_status) {
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
- 0);
- } else {
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 8);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 12);
- }
- }
-
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 0xd8);
-}
-
-/* SCO only or SCO+PAN(HS) */
-static void halbtc8822b2ant_action_sco(struct btc_coexist *btcoexist)
-{
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false;
- u32 wifi_bw = 1;
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- if (coex_sta->is_esco_mode)
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- else /* 2-Ant free run if SCO mode */
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
- }
-}
-
-static void halbtc8822b2ant_action_hid(struct btc_coexist *btcoexist)
-{
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false;
- u32 wifi_bw = 1;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- if (coex_sta->is_hid_low_pri_tx_overhead) {
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 4);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 108);
- } else if (wifi_bw == 0) { /* if 11bg mode */
-
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 8);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 111);
- } else {
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 8);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 111);
- }
- }
-}
-
-static void halbtc8822b2ant_action_a2dpsink(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false, wifi_turbo = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
- &coex_sta->scan_ap_num);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], scan_ap_num = %d, wl_noisy = %d\n",
- coex_sta->scan_ap_num, coex_sta->wl_noisy_level);
-
- if ((wifi_busy) && (coex_sta->wl_noisy_level == 0))
- wifi_turbo = true;
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (BTC_RSSI_HIGH(wifi_rssi_state2) &&
- BTC_RSSI_HIGH(bt_rssi_state2)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xc8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
-
- if (wifi_busy)
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 1);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 16);
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = true;
-
- if ((coex_sta->bt_relink_downcount != 0) && (wifi_busy)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Re-Link + A2DP + WL busy\n");
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
- 0);
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 5);
-
- } else {
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 8);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 105);
- }
- }
-}
-
-/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
-static void halbtc8822b2ant_action_a2dp(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false, wifi_turbo = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
- &coex_sta->scan_ap_num);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], scan_ap_num = %d, wl_noisy = %d\n",
- coex_sta->scan_ap_num, coex_sta->wl_noisy_level);
-
- if ((wifi_busy) && (coex_sta->wl_noisy_level == 0))
- wifi_turbo = true;
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (BTC_RSSI_HIGH(wifi_rssi_state2) &&
- BTC_RSSI_HIGH(bt_rssi_state2)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xc8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
-
- if (wifi_busy)
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 1);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 16);
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = true;
-
- if ((coex_sta->bt_relink_downcount != 0) && (wifi_busy)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Re-Link + A2DP + WL busy\n");
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
- 0);
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 5);
-
- } else {
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 10);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 109);
- }
- }
-}
-
-static void halbtc8822b2ant_action_pan_edr(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false, wifi_turbo = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
- &coex_sta->scan_ap_num);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], scan_ap_num = %d, wl_noisy = %d\n",
- coex_sta->scan_ap_num, coex_sta->wl_noisy_level);
-
- if ((wifi_busy) && (coex_sta->wl_noisy_level == 0))
- wifi_turbo = true;
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (BTC_RSSI_HIGH(wifi_rssi_state2) &&
- BTC_RSSI_HIGH(bt_rssi_state2)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xc8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
-
- if (wifi_busy)
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 3);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 4);
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = true;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
-
- if (wifi_busy)
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 103);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 104);
- }
-}
-
-static void halbtc8822b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false;
- u32 wifi_bw = 1;
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (BTC_RSSI_HIGH(wifi_rssi_state2) &&
- BTC_RSSI_HIGH(bt_rssi_state2)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xc8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
-
- if (wifi_busy)
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 1);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 16);
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = true;
-
- if ((coex_sta->bt_relink_downcount != 0) && (wifi_busy)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Re-Link + A2DP + WL busy\n");
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
- 0);
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 5);
- } else {
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 8);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 109);
- }
- }
-}
-
-static void halbtc8822b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false, wifi_turbo = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
- &coex_sta->scan_ap_num);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], scan_ap_num = %d, wl_noisy = %d\n",
- coex_sta->scan_ap_num, coex_sta->wl_noisy_level);
-
- if ((wifi_busy) && (coex_sta->wl_noisy_level == 0))
- wifi_turbo = true;
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- /*halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);*/
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (BTC_RSSI_HIGH(wifi_rssi_state2) &&
- BTC_RSSI_HIGH(bt_rssi_state2)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xc8);
- /*halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);*/
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
-
- if (wifi_busy) {
- if ((coex_sta->a2dp_bit_pool > 40) &&
- (coex_sta->a2dp_bit_pool < 255))
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- } else {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 6);
- }
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- /*halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);*/
-
- coex_dm->is_switch_to_1dot5_ant = true;
-
- if (wifi_turbo)
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 6);
- else
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 7);
-
- if (wifi_busy) {
- if ((coex_sta->a2dp_bit_pool > 40) &&
- (coex_sta->a2dp_bit_pool < 255))
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 107);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 105);
- } else {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 106);
- }
- }
-}
-
-/* PAN(EDR)+A2DP */
-static void halbtc8822b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false, wifi_turbo = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
- &coex_sta->scan_ap_num);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], scan_ap_num = %d, wl_noisy = %d\n",
- coex_sta->scan_ap_num, coex_sta->wl_noisy_level);
-
- if ((wifi_busy) && (coex_sta->wl_noisy_level == 0))
- wifi_turbo = true;
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (BTC_RSSI_HIGH(wifi_rssi_state2) &&
- BTC_RSSI_HIGH(bt_rssi_state2)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xc8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
-
- if (wifi_busy) {
- if (((coex_sta->a2dp_bit_pool > 40) &&
- (coex_sta->a2dp_bit_pool < 255)) ||
- (!coex_sta->is_A2DP_3M))
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- } else {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 6);
- }
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = true;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
- if (wifi_busy)
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 107);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 106);
- }
-}
-
-static void halbtc8822b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
-{
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false;
- u32 wifi_bw = 1;
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (BTC_RSSI_HIGH(wifi_rssi_state2) &&
- BTC_RSSI_HIGH(bt_rssi_state2)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xc8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
-
- if (wifi_busy)
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 3);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 4);
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
- halbtc8822b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-
- coex_dm->is_switch_to_1dot5_ant = true;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
-
- if (wifi_busy)
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 103);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 104);
- }
-}
-
-/* HID+A2DP+PAN(EDR) */
-static void
-halbtc8822b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
-{
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false;
- u32 wifi_bw = 1;
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (BTC_RSSI_HIGH(wifi_rssi_state2) &&
- BTC_RSSI_HIGH(bt_rssi_state2)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xc8);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
-
- if (wifi_busy) {
- if (((coex_sta->a2dp_bit_pool > 40) &&
- (coex_sta->a2dp_bit_pool < 255)) ||
- (!coex_sta->is_A2DP_3M))
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- else
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- } else {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 6);
- }
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-
- coex_dm->is_switch_to_1dot5_ant = true;
-
- if (coex_sta->hid_busy_num >= 2) {
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 8);
-
- if (wifi_bw == 0) {
- halbtc8822b2ant_set_wltoggle_coex_table(
- btcoexist, NORMAL_EXEC, 0x1, 0xaa, 0x5a,
- 0xaa, 0xaa);
- } else {
- halbtc8822b2ant_set_wltoggle_coex_table(
- btcoexist, NORMAL_EXEC, 0x2, 0xaa, 0x5a,
- 0xaa, 0xaa);
- }
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- 110);
- } else {
- halbtc8822b2ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
-
- if (wifi_busy) {
- if ((coex_sta->a2dp_bit_pool > 40) &&
- (coex_sta->a2dp_bit_pool < 255))
- halbtc8822b2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 107);
- else
- halbtc8822b2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 105);
- } else {
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 106);
- }
- }
- }
-}
-
-static void halbtc8822b2ant_action_bt_whck_test(struct btc_coexist *btcoexist)
-{
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
-}
-
-static void halbtc8822b2ant_action_bt_hs(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 prewifi_rssi_state = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state, bt_rssi_state;
-
- static u8 prewifi_rssi_state2 = BTC_RSSI_STATE_LOW;
- static u8 pre_bt_rssi_state2 = BTC_RSSI_STATE_LOW;
- u8 wifi_rssi_state2, bt_rssi_state2;
- bool wifi_busy = false, wifi_turbo = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
- &coex_sta->scan_ap_num);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], scan_ap_num = %d, wl_noisy = %d\n",
- coex_sta->scan_ap_num, coex_sta->wl_noisy_level);
-
- if ((wifi_busy) && (coex_sta->wl_noisy_level == 0))
- wifi_turbo = true;
-
- wifi_rssi_state = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state, 2, coex_sta->wifi_coex_thres,
- 0);
-
- wifi_rssi_state2 = halbtc8822b2ant_wifi_rssi_state(
- btcoexist, &prewifi_rssi_state2, 2, coex_sta->wifi_coex_thres2,
- 0);
-
- bt_rssi_state = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state, 2, coex_sta->bt_coex_thres, 0);
-
- bt_rssi_state2 = halbtc8822b2ant_bt_rssi_state(
- btcoexist, &pre_bt_rssi_state2, 2, coex_sta->bt_coex_thres2, 0);
-
- if (BTC_RSSI_HIGH(wifi_rssi_state) && BTC_RSSI_HIGH(bt_rssi_state)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- } else if (BTC_RSSI_HIGH(wifi_rssi_state2) &&
- BTC_RSSI_HIGH(bt_rssi_state2)) {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xc8);
-
- coex_dm->is_switch_to_1dot5_ant = false;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
-
- } else {
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-
- coex_dm->is_switch_to_1dot5_ant = true;
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- }
-}
-
-static void
-halbtc8822b2ant_action_wifi_multi_port(struct btc_coexist *btcoexist)
-{
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-
- /* hw all off */
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
-}
-
-static void halbtc8822b2ant_action_wifi_connected(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- switch (coex_dm->cur_algorithm) {
- case BT_8822B_2ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
- halbtc8822b2ant_action_sco(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID.\n");
- halbtc8822b2ant_action_hid(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP.\n");
- halbtc8822b2ant_action_a2dp(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_A2DPSINK:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP Sink.\n");
- halbtc8822b2ant_action_a2dpsink(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
- halbtc8822b2ant_action_a2dp_pan_hs(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n");
- halbtc8822b2ant_action_pan_edr(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n");
- halbtc8822b2ant_action_pan_edr_a2dp(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
- halbtc8822b2ant_action_pan_edr_hid(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
- halbtc8822b2ant_action_hid_a2dp_pan_edr(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n");
- halbtc8822b2ant_action_hid_a2dp(btcoexist);
- break;
- case BT_8822B_2ANT_COEX_ALGO_NOPROFILEBUSY:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = No-Profile busy.\n");
- halbtc8822b2ant_action_bt_idle(btcoexist);
- break;
- default:
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
- halbtc8822b2ant_action_coex_all_off(btcoexist);
- break;
- }
-
- coex_dm->pre_algorithm = coex_dm->cur_algorithm;
-}
-
-static void halbtc8822b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 algorithm = 0;
- u32 num_of_wifi_link = 0;
- u32 wifi_link_status = 0;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool miracast_plus_bt = false;
- bool scan = false, link = false, roam = false, under_4way = false,
- wifi_connected = false, wifi_under_5g = false, bt_hs_on = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
- &under_4way);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
-
- if (btcoexist->manual_control) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
- return;
- }
-
- if (btcoexist->stop_coex_dm) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
- return;
- }
-
- if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
- return;
- }
-
- if ((coex_sta->under_lps) &&
- (coex_dm->bt_status != BT_8822B_2ANT_BT_STATUS_ACL_BUSY)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), wifi is under LPS !!!\n");
- halbtc8822b2ant_action_wifi_native_lps(btcoexist);
- return;
- }
-
- if (!coex_sta->run_time_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], return for run_time_state = false !!!\n");
- return;
- }
-
- if (coex_sta->freeze_coexrun_by_btinfo) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), return for freeze_coexrun_by_btinfo\n");
- return;
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if ((wifi_under_5g) &&
- (coex_sta->switch_band_notify_to != BTC_SWITCH_TO_24G) &&
- (coex_sta->switch_band_notify_to != BTC_SWITCH_TO_24G_NOFORSCAN)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under 5G!!!\n");
-
- halbtc8822b2ant_action_wifi_under5g(btcoexist);
- return;
- }
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under 2G!!!\n");
-
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, NORMAL_EXEC,
- BT_8822B_2ANT_PHASE_2G_RUNTIME);
-
- if (coex_sta->bt_whck_test) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is under WHCK TEST!!!\n");
- halbtc8822b2ant_action_bt_whck_test(btcoexist);
- return;
- }
-
- if (coex_sta->bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is disabled!!!\n");
- halbtc8822b2ant_action_coex_all_off(btcoexist);
- return;
- }
-
- if (coex_sta->c2h_bt_inquiry_page) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is under inquiry/page scan !!\n");
- halbtc8822b2ant_action_bt_inquiry(btcoexist);
- return;
- }
-
- if (coex_sta->is_setup_link) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is re-link !!!\n");
- halbtc8822b2ant_action_bt_relink(btcoexist);
- return;
- }
-
- /* for P2P */
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
- &wifi_link_status);
- num_of_wifi_link = wifi_link_status >> 16;
-
- if ((num_of_wifi_link >= 2) ||
- (wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
- num_of_wifi_link, wifi_link_status);
-
- if (bt_link_info->bt_link_exist)
- miracast_plus_bt = true;
- else
- miracast_plus_bt = false;
-
- btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
- &miracast_plus_bt);
-
- if (scan || link || roam || under_4way) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], scan = %d, link = %d, roam = %d 4way = %d!!!\n",
- scan, link, roam, under_4way);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under linkscan process + Multi-Port !!\n");
-
- halbtc8822b2ant_action_wifi_link_process(btcoexist);
- } else {
- halbtc8822b2ant_action_wifi_multi_port(btcoexist);
- }
-
- return;
- }
-
- miracast_plus_bt = false;
- btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
- &miracast_plus_bt);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
-
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], BT Is hs\n");
- halbtc8822b2ant_action_bt_hs(btcoexist);
- return;
- }
-
- if ((BT_8822B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
- coex_dm->bt_status) ||
- (coex_dm->bt_status == BT_8822B_2ANT_BT_STATUS_CONNECTED_IDLE)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, bt idle!!.\n");
-
- halbtc8822b2ant_action_bt_idle(btcoexist);
- return;
- }
-
- algorithm = halbtc8822b2ant_action_algorithm(btcoexist);
- coex_dm->cur_algorithm = algorithm;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
-
- if (scan || link || roam || under_4way) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under Link Process !!\n");
- halbtc8822b2ant_action_wifi_link_process(btcoexist);
- } else if (wifi_connected) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, wifi connected!!.\n");
- halbtc8822b2ant_action_wifi_connected(btcoexist);
-
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, wifi not-connected!!.\n");
- halbtc8822b2ant_action_wifi_nonconnected(btcoexist);
- }
-}
-
-static void halbtc8822b2ant_init_coex_dm(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
-
- halbtc8822b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, false);
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
-
- /* fw all off */
- halbtc8822b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
-
- halbtc8822b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xd8);
-
- coex_sta->pop_event_cnt = 0;
- coex_sta->cnt_remote_name_req = 0;
- coex_sta->cnt_reinit = 0;
- coex_sta->cnt_setup_link = 0;
- coex_sta->cnt_ign_wlan_act = 0;
- coex_sta->cnt_page = 0;
- coex_sta->cnt_role_switch = 0;
- coex_sta->switch_band_notify_to = BTC_NOT_SWITCH;
-
- halbtc8822b2ant_query_bt_info(btcoexist);
-}
-
-static void halbtc8822b2ant_init_hw_config(struct btc_coexist *btcoexist,
- bool wifi_only)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u32 u32tmp1 = 0, u32tmp2 = 0, u32tmp3 = 0;
- u32 RTL97F_8822B = 0;
- u8 i = 0;
-
- u32tmp3 = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- u32tmp1 = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist, 0x38);
- u32tmp2 = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist, 0x54);
-
- if (RTL97F_8822B) {
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x66, 0x04, 0x0);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x41, 0x02, 0x0);
-
- /* set GNT_BT to SW high */
- halbtc8822b2ant_ltecoex_set_gnt_bt(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
- /* Set GNT_WL to SW high */
- halbtc8822b2ant_ltecoex_set_gnt_wl(
- btcoexist, BT_8822B_2ANT_GNT_BLOCK_RFC_BB,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH);
- return;
- }
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], (Before Init HW config) 0xcb4 = 0x%x, 0x38= 0x%x, 0x54= 0x%x\n",
- u32tmp3, u32tmp1, u32tmp2);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2Ant Init HW Config!!\n");
-
- coex_sta->bt_coex_supported_feature = 0;
- coex_sta->bt_coex_supported_version = 0;
- coex_sta->bt_ble_scan_type = 0;
- coex_sta->bt_ble_scan_para[0] = 0;
- coex_sta->bt_ble_scan_para[1] = 0;
- coex_sta->bt_ble_scan_para[2] = 0;
- coex_sta->bt_reg_vendor_ac = 0xffff;
- coex_sta->bt_reg_vendor_ae = 0xffff;
- coex_sta->isolation_btween_wb = BT_8822B_2ANT_DEFAULT_ISOLATION;
- coex_sta->gnt_error_cnt = 0;
- coex_sta->bt_relink_downcount = 0;
- coex_sta->is_set_ps_state_fail = false;
- coex_sta->cnt_set_ps_state_fail = 0;
-
- for (i = 0; i <= 9; i++)
- coex_sta->bt_afh_map[i] = 0;
-
- /* 0xf0[15:12] --> Chip Cut information */
- coex_sta->cut_version =
- (btcoexist->btc_read_1byte(btcoexist, 0xf1) & 0xf0) >> 4;
-
- coex_sta->dis_ver_info_cnt = 0;
-
- halbtc8822b2ant_coex_switch_threshold(btcoexist,
- coex_sta->isolation_btween_wb);
-
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x550, 0x8,
- 0x1); /* enable TBTT nterrupt */
-
- /* BT report packet sample rate */
- btcoexist->btc_write_1byte(btcoexist, 0x790, 0x5);
-
- /* Init 0x778 = 0x1 for 2-Ant */
- btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
-
- /* Enable PTA (3-wire function form BT side) */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x41, 0x02, 0x1);
-
- /* Enable PTA (tx/rx signal form WiFi side) */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x4c6, 0x10, 0x1);
-
- halbtc8822b2ant_enable_gnt_to_gpio(btcoexist, true);
-
- /*GNT_BT=1 while select both */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x763, 0x10, 0x1);
-
- /* check if WL firmware download ok */
- halbtc8822b2ant_post_state_to_bt(btcoexist,
- BT_8822B_2ANT_SCOREBOARD_ONOFF, true);
-
- /* Enable counter statistics */
- btcoexist->btc_write_1byte(
- btcoexist, 0x76e,
- 0x4); /* 0x76e[3] =1, WLAN_Act control by PTA */
-
- halbtc8822b2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 5);
-
- halbtc8822b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
-
- psd_scan->ant_det_is_ant_det_available = true;
-
- if (coex_sta->is_rf_state_off) {
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_WLAN_OFF);
-
- btcoexist->stop_coex_dm = true;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** %s (RF Off)**********\n",
- __func__);
- } else if (wifi_only) {
- coex_sta->concurrent_rx_mode_on = false;
- /* Path config */
- /* Set Antenna Path */
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_WLANONLY_INIT);
-
- btcoexist->stop_coex_dm = true;
- } else {
- /* Set BT polluted packet on for Tx rate adaptive not including
- * Tx retry break by PTA, 0x45c[19] =1
- */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x45e, 0x8, 0x1);
-
- coex_sta->concurrent_rx_mode_on = true;
-
- /* RF 0x1[1] = 0->Set GNT_WL_RF_Rx always = 1 for
- * con-current Rx, mask Tx only
- */
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0x2, 0x0);
-
- /* Set Antenna Path */
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_COEX_INIT);
-
- btcoexist->stop_coex_dm = false;
- }
-}
-
-/* ************************************************************
- * work around function start with wa_halbtc8822b2ant_
- * ************************************************************
- * ************************************************************
- * extern function start with ex_halbtc8822b2ant_
- * *************************************************************/
-void ex_btc8822b2ant_power_on_setting(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_board_info *board_info = &btcoexist->board_info;
- u8 u8tmp = 0x0;
- u16 u16tmp = 0x0;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "xxxxxxxxxxxxxxxx Execute 8822b 2-Ant PowerOn Setting xxxxxxxxxxxxxxxx!!\n");
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Ant Det Finish = %s, Ant Det Number = %d\n",
- (board_info->btdm_ant_det_finish ? "Yes" : "No"),
- board_info->btdm_ant_num_by_ant_det);
-
- btcoexist->dbg_mode_2ant = false;
- btcoexist->stop_coex_dm = true;
- psd_scan->ant_det_is_ant_det_available = false;
-
- /* enable BB, REG_SYS_FUNC_EN such that we can write BB Reg correctly */
- u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x2);
- btcoexist->btc_write_2byte(btcoexist, 0x2, u16tmp | BIT(0) | BIT(1));
-
- /* Local setting bit define */
- /* BIT0: "0" for no antenna inverse; "1" for antenna inverse */
- /* BIT1: "0" for internal switch; "1" for external switch */
- /* BIT2: "0" for one antenna; "1" for two antenna */
- /* NOTE: here default all internal switch and 1-antenna ==>
- * BIT1=0 and BIT2=0
- */
-
- /* Check efuse 0xc3[6] for Single Antenna Path */
-
- /* Setup RF front end type */
- halbtc8822b2ant_set_rfe_type(btcoexist);
-
- /* Set Antenna Path to BT side */
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, FORCE_EXEC,
- BT_8822B_2ANT_PHASE_COEX_POWERON);
-
- /* Save"single antenna position" info in Local register setting for
- * FW reading, because FW may not ready at power on
- */
- if (btcoexist->chip_interface == BTC_INTF_PCI)
- btcoexist->btc_write_local_reg_1byte(btcoexist, 0x3e0, u8tmp);
- else if (btcoexist->chip_interface == BTC_INTF_USB)
- btcoexist->btc_write_local_reg_1byte(btcoexist, 0xfe08, u8tmp);
- else if (btcoexist->chip_interface == BTC_INTF_SDIO)
- btcoexist->btc_write_local_reg_1byte(btcoexist, 0x60, u8tmp);
-
- /* enable GNT_WL/GNT_BT debug signal to GPIO14/15 */
- halbtc8822b2ant_enable_gnt_to_gpio(btcoexist, true);
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** LTE coex Reg 0x38 (Power-On) = 0x%x**********\n",
- halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist, 0x38));
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** MAC Reg 0x70/ BB Reg 0xcb4 (Power-On) = 0x%x / 0x%x\n",
- btcoexist->btc_read_4byte(btcoexist, 0x70),
- btcoexist->btc_read_4byte(btcoexist, 0xcb4));
-}
-
-void ex_btc8822b2ant_pre_load_firmware(struct btc_coexist *btcoexist)
-{
- struct btc_board_info *board_info = &btcoexist->board_info;
- u8 u8tmp = 0x4; /* Set BIT2 by default since it's 2ant case */
-
- /* */
- /* S0 or S1 setting and Local register setting
- * (By the setting fw can get ant number, S0/S1, ... info)
- */
- /* Local setting bit define */
- /* BIT0: "0" for no antenna inverse; "1" for antenna inverse */
- /* BIT1: "0" for internal switch; "1" for external switch */
- /* BIT2: "0" for one antenna; "1" for two antenna */
- /* NOTE: here default all internal switch and 1-antenna ==>
- * BIT1=0 and BIT2=0
- */
- if (btcoexist->chip_interface == BTC_INTF_USB) {
- /* fixed at S0 for USB interface */
- u8tmp |= 0x1; /* antenna inverse */
- btcoexist->btc_write_local_reg_1byte(btcoexist, 0xfe08, u8tmp);
- } else {
- /* for PCIE and SDIO interface, we check efuse 0xc3[6] */
- if (board_info->single_ant_path == 0) {
- } else if (board_info->single_ant_path == 1) {
- /* set to S0 */
- u8tmp |= 0x1; /* antenna inverse */
- }
-
- if (btcoexist->chip_interface == BTC_INTF_PCI)
- btcoexist->btc_write_local_reg_1byte(btcoexist, 0x3e0,
- u8tmp);
- else if (btcoexist->chip_interface == BTC_INTF_SDIO)
- btcoexist->btc_write_local_reg_1byte(btcoexist, 0x60,
- u8tmp);
- }
-}
-
-void ex_btc8822b2ant_init_hw_config(struct btc_coexist *btcoexist,
- bool wifi_only)
-{
- halbtc8822b2ant_init_hw_config(btcoexist, wifi_only);
- btcoexist->auto_report_2ant = true;
-}
-
-void ex_btc8822b2ant_init_coex_dm(struct btc_coexist *btcoexist)
-{
- halbtc8822b2ant_init_coex_dm(btcoexist);
-}
-
-void ex_btc8822b2ant_display_coex_info(struct btc_coexist *btcoexist,
- struct seq_file *m)
-{
- struct btc_board_info *board_info = &btcoexist->board_info;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-
- u8 u8tmp[4], i, ps_tdma_case = 0;
- u32 u32tmp[4];
- u16 u16tmp[4];
- u32 fa_ofdm, fa_cck, cca_ofdm, cca_cck, ratio_ofdm;
- u32 fw_ver = 0, bt_patch_ver = 0, bt_coex_ver = 0;
- static u8 pop_report_in_10s;
- u32 phyver = 0;
- bool lte_coex_on = false;
- static u8 cnt;
-
- seq_puts(m, "\r\n ============[BT Coexist info]============");
-
- if (btcoexist->manual_control) {
- seq_puts(m,
- "\r\n ============[Under Manual Control]============");
- seq_puts(m, "\r\n ==========================================");
- }
-
- if (!coex_sta->bt_disabled) {
- if (coex_sta->bt_coex_supported_feature == 0)
- btcoexist->btc_get(
- btcoexist, BTC_GET_U4_SUPPORTED_FEATURE,
- &coex_sta->bt_coex_supported_feature);
-
- if ((coex_sta->bt_coex_supported_version == 0) ||
- (coex_sta->bt_coex_supported_version == 0xffff))
- btcoexist->btc_get(
- btcoexist, BTC_GET_U4_SUPPORTED_VERSION,
- &coex_sta->bt_coex_supported_version);
-
- if (coex_sta->bt_reg_vendor_ac == 0xffff)
- coex_sta->bt_reg_vendor_ac = (u16)(
- btcoexist->btc_get_bt_reg(btcoexist, 3, 0xac) &
- 0xffff);
-
- if (coex_sta->bt_reg_vendor_ae == 0xffff)
- coex_sta->bt_reg_vendor_ae = (u16)(
- btcoexist->btc_get_bt_reg(btcoexist, 3, 0xae) &
- 0xffff);
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
- &bt_patch_ver);
- btcoexist->bt_info.bt_get_fw_ver = bt_patch_ver;
-
- if (coex_sta->num_of_profile > 0) {
- cnt++;
-
- if (cnt >= 3) {
- btcoexist->btc_get_bt_afh_map_from_bt(
- btcoexist, 0, &coex_sta->bt_afh_map[0]);
- cnt = 0;
- }
- }
- }
-
- if (psd_scan->ant_det_try_count == 0) {
- seq_printf(
- m, "\r\n %-35s = %d/ %d/ %s / %d",
- "Ant PG Num/ Mech/ Pos/ RFE", board_info->pg_ant_num,
- board_info->btdm_ant_num,
- (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT ?
- "Main" :
- "Aux"),
- rfe_type->rfe_module_type);
- } else {
- seq_printf(
- m, "\r\n %-35s = %d/ %d/ %s/ %d (%d/%d/%d)",
- "Ant PG Num/ Mech(Ant_Det)/ Pos/ RFE",
- board_info->pg_ant_num,
- board_info->btdm_ant_num_by_ant_det,
- (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT ?
- "Main" :
- "Aux"),
- rfe_type->rfe_module_type, psd_scan->ant_det_try_count,
- psd_scan->ant_det_fail_count, psd_scan->ant_det_result);
-
- if (board_info->btdm_ant_det_finish) {
- if (psd_scan->ant_det_result != 12)
- seq_printf(m, "\r\n %-35s = %s",
- "Ant Det PSD Value",
- psd_scan->ant_det_peak_val);
- else
- seq_printf(m, "\r\n %-35s = %d",
- "Ant Det PSD Value",
- psd_scan->ant_det_psd_scan_peak_val /
- 100);
- }
- }
-
- bt_patch_ver = btcoexist->bt_info.bt_get_fw_ver;
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- phyver = btcoexist->btc_get_bt_phydm_version(btcoexist);
-
- bt_coex_ver = (coex_sta->bt_coex_supported_version & 0xff);
-
- seq_printf(
- m, "\r\n %-35s = %d_%02x/ 0x%02x/ 0x%02x (%s)",
- "CoexVer WL/ BT_Desired/ BT_Report",
- glcoex_ver_date_8822b_2ant, glcoex_ver_8822b_2ant,
- glcoex_ver_btdesired_8822b_2ant, bt_coex_ver,
- (bt_coex_ver == 0xff ?
- "Unknown" :
- (coex_sta->bt_disabled ? "BT-disable" :
- (bt_coex_ver >= glcoex_ver_btdesired_8822b_2ant ?
- "Match" :
- "Mis-Match"))));
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ v%d/ %c", "W_FW/ B_FW/ Phy/ Kt",
- fw_ver, bt_patch_ver, phyver, coex_sta->cut_version + 65);
-
- seq_printf(m, "\r\n %-35s = %02x %02x %02x ", "AFH Map to BT",
- coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1],
- coex_dm->wifi_chnl_info[2]);
-
- seq_printf(m, "\r\n %-35s = %d / %d / %d ",
- "Isolation/WL_Thres/BT_Thres", coex_sta->isolation_btween_wb,
- coex_sta->wifi_coex_thres, coex_sta->bt_coex_thres);
-
- /* wifi status */
- seq_printf(m, "\r\n %-35s", "============[Wifi Status]============");
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_WIFI_STATUS, m);
-
- seq_printf(m, "\r\n %-35s", "============[BT Status]============");
-
- pop_report_in_10s++;
- seq_printf(
- m, "\r\n %-35s = [%s/ %d dBm/ %d/ %d] ",
- "BT [status/ rssi/ retryCnt/ popCnt]",
- ((coex_sta->bt_disabled) ?
- ("disabled") :
- ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page") :
- ((BT_8822B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
- coex_dm->bt_status) ?
- "non-connected idle" :
- ((coex_dm->bt_status ==
- BT_8822B_2ANT_BT_STATUS_CONNECTED_IDLE) ?
- "connected-idle" :
- "busy")))),
- coex_sta->bt_rssi - 100, coex_sta->bt_retry_cnt,
- coex_sta->pop_event_cnt);
-
- if (pop_report_in_10s >= 5) {
- coex_sta->pop_event_cnt = 0;
- pop_report_in_10s = 0;
- }
-
- if (coex_sta->num_of_profile != 0)
- seq_printf(
- m, "\r\n %-35s = %s%s%s%s%s", "Profiles",
- ((bt_link_info->a2dp_exist) ?
- ((coex_sta->is_bt_a2dp_sink) ? "A2DP sink," :
- "A2DP,") :
- ""),
- ((bt_link_info->sco_exist) ? "HFP," : ""),
- ((bt_link_info->hid_exist) ?
- ((coex_sta->hid_busy_num >= 2) ?
- "HID(4/18)," :
- "HID(2/18),") :
- ""),
- ((bt_link_info->pan_exist) ? "PAN," : ""),
- ((coex_sta->voice_over_HOGP) ? "Voice" : ""));
- else
- seq_printf(m, "\r\n %-35s = None", "Profiles");
-
- if (bt_link_info->a2dp_exist) {
- seq_printf(m, "\r\n %-35s = %s/ %d/ %s",
- "A2DP Rate/Bitpool/Auto_Slot",
- ((coex_sta->is_A2DP_3M) ? "3M" : "No_3M"),
- coex_sta->a2dp_bit_pool,
- ((coex_sta->is_autoslot) ? "On" : "Off"));
- }
-
- if (bt_link_info->hid_exist) {
- seq_printf(m, "\r\n %-35s = %d/ %d", "HID PairNum/Forbid_Slot",
- coex_sta->hid_pair_cnt, coex_sta->forbidden_slot);
- }
-
- seq_printf(m, "\r\n %-35s = %s/ %d/ %s/ 0x%x",
- "Role/RoleSwCnt/IgnWlact/Feature",
- ((bt_link_info->slave_role) ? "Slave" : "Master"),
- coex_sta->cnt_role_switch,
- ((coex_dm->cur_ignore_wlan_act) ? "Yes" : "No"),
- coex_sta->bt_coex_supported_feature);
-
- if ((coex_sta->bt_ble_scan_type & 0x7) != 0x0) {
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "BLEScan Type/TV/Init/Ble",
- coex_sta->bt_ble_scan_type,
- (coex_sta->bt_ble_scan_type & 0x1 ?
- coex_sta->bt_ble_scan_para[0] :
- 0x0),
- (coex_sta->bt_ble_scan_type & 0x2 ?
- coex_sta->bt_ble_scan_para[1] :
- 0x0),
- (coex_sta->bt_ble_scan_type & 0x4 ?
- coex_sta->bt_ble_scan_para[2] :
- 0x0));
- }
-
- seq_printf(m, "\r\n %-35s = %d/ %d/ %d/ %d/ %d",
- "ReInit/ReLink/IgnWlact/Page/NameReq", coex_sta->cnt_reinit,
- coex_sta->cnt_setup_link, coex_sta->cnt_ign_wlan_act,
- coex_sta->cnt_page, coex_sta->cnt_remote_name_req);
-
- halbtc8822b2ant_read_score_board(btcoexist, &u16tmp[0]);
-
- if ((coex_sta->bt_reg_vendor_ae == 0xffff) ||
- (coex_sta->bt_reg_vendor_ac == 0xffff))
- seq_printf(m, "\r\n %-35s = x/ x/ %04x",
- "0xae[4]/0xac[1:0]/Scoreboard", u16tmp[0]);
- else
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ %04x",
- "0xae[4]/0xac[1:0]/Scoreboard",
- (int)((coex_sta->bt_reg_vendor_ae & BIT(4)) >> 4),
- coex_sta->bt_reg_vendor_ac & 0x3, u16tmp[0]);
-
- if (coex_sta->num_of_profile > 0) {
- seq_printf(
- m,
- "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
- "AFH MAP", coex_sta->bt_afh_map[0],
- coex_sta->bt_afh_map[1], coex_sta->bt_afh_map[2],
- coex_sta->bt_afh_map[3], coex_sta->bt_afh_map[4],
- coex_sta->bt_afh_map[5], coex_sta->bt_afh_map[6],
- coex_sta->bt_afh_map[7], coex_sta->bt_afh_map[8],
- coex_sta->bt_afh_map[9]);
- }
-
- for (i = 0; i < BT_INFO_SRC_8822B_2ANT_MAX; i++) {
- if (coex_sta->bt_info_c2h_cnt[i]) {
- seq_printf(
- m,
- "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)",
- glbt_info_src_8822b_2ant[i],
- coex_sta->bt_info_c2h[i][0],
- coex_sta->bt_info_c2h[i][1],
- coex_sta->bt_info_c2h[i][2],
- coex_sta->bt_info_c2h[i][3],
- coex_sta->bt_info_c2h[i][4],
- coex_sta->bt_info_c2h[i][5],
- coex_sta->bt_info_c2h[i][6],
- coex_sta->bt_info_c2h_cnt[i]);
- }
- }
-
- /* Sw mechanism */
- if (btcoexist->manual_control)
- seq_printf(
- m, "\r\n %-35s",
- "============[mechanism] (before Manual)============");
- else
- seq_printf(m, "\r\n %-35s",
- "============[Mechanism]============");
-
- ps_tdma_case = coex_dm->cur_ps_tdma;
-
- seq_printf(m, "\r\n %-35s = %02x %02x %02x %02x %02x (case-%d, %s, %s)",
- "TDMA", coex_dm->ps_tdma_para[0], coex_dm->ps_tdma_para[1],
- coex_dm->ps_tdma_para[2], coex_dm->ps_tdma_para[3],
- coex_dm->ps_tdma_para[4], ps_tdma_case,
- (coex_dm->cur_ps_tdma_on ? "TDMA On" : "TDMA Off"),
- (coex_dm->is_switch_to_1dot5_ant ? "1.5Ant" : "2Ant"));
-
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
- u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
- u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
- seq_printf(m, "\r\n %-35s = %d/ 0x%x/ 0x%x/ 0x%x",
- "Table/0x6c0/0x6c4/0x6c8", coex_sta->coex_table_type,
- u32tmp[0], u32tmp[1], u32tmp[2]);
-
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x", "0x778/0x6cc", u8tmp[0],
- u32tmp[0]);
-
- seq_printf(m, "\r\n %-35s = %s/ %s/ %s/ %d",
- "AntDiv/BtCtrlLPS/LPRA/PsFail",
- ((board_info->ant_div_cfg) ? "On" : "Off"),
- ((coex_sta->force_lps_ctrl) ? "On" : "Off"),
- ((coex_dm->cur_low_penalty_ra) ? "On" : "Off"),
- coex_sta->cnt_set_ps_state_fail);
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x", "WL_DACSwing/ BT_Dec_Pwr",
- coex_dm->cur_fw_dac_swing_lvl, coex_dm->cur_bt_dec_pwr_lvl);
-
- u32tmp[0] = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist, 0x38);
- lte_coex_on = ((u32tmp[0] & BIT(7)) >> 7) ? true : false;
-
- if (lte_coex_on) {
- u32tmp[0] = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0xa0);
- u32tmp[1] = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0xa4);
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x",
- "LTE Coex Table W_L/B_L", u32tmp[0] & 0xffff,
- u32tmp[1] & 0xffff);
-
- u32tmp[0] = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0xa8);
- u32tmp[1] = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0xac);
- u32tmp[2] = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0xb0);
- u32tmp[3] = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist,
- 0xb4);
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "LTE Break Table W_L/B_L/L_W/L_B",
- u32tmp[0] & 0xffff, u32tmp[1] & 0xffff,
- u32tmp[2] & 0xffff, u32tmp[3] & 0xffff);
- }
-
- /* Hw setting */
- seq_printf(m, "\r\n %-35s", "============[Hw setting]============");
-
- u32tmp[0] = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist, 0x38);
- u32tmp[1] = halbtc8822b2ant_ltecoex_indirect_read_reg(btcoexist, 0x54);
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x73);
-
- seq_printf(m, "\r\n %-35s = %s/ %s", "LTE Coex/Path Owner",
- ((lte_coex_on) ? "On" : "Off"),
- ((u8tmp[0] & BIT(2)) ? "WL" : "BT"));
-
- if (lte_coex_on) {
- seq_printf(m, "\r\n %-35s = %d/ %d/ %d/ %d",
- "LTE 3Wire/OPMode/UART/UARTMode",
- (int)((u32tmp[0] & BIT(6)) >> 6),
- (int)((u32tmp[0] & (BIT(5) | BIT(4))) >> 4),
- (int)((u32tmp[0] & BIT(3)) >> 3),
- (int)(u32tmp[0] & (BIT(2) | BIT(1) | BIT(0))));
-
- seq_printf(m, "\r\n %-35s = %d/ %d", "LTE_Busy/UART_Busy",
- (int)((u32tmp[1] & BIT(1)) >> 1),
- (int)(u32tmp[1] & BIT(0)));
- }
- seq_printf(m, "\r\n %-35s = %s (BB:%s)/ %s (BB:%s)/ %s %d",
- "GNT_WL_Ctrl/GNT_BT_Ctrl/Dbg",
- ((u32tmp[0] & BIT(12)) ? "SW" : "HW"),
- ((u32tmp[0] & BIT(8)) ? "SW" : "HW"),
- ((u32tmp[0] & BIT(14)) ? "SW" : "HW"),
- ((u32tmp[0] & BIT(10)) ? "SW" : "HW"),
- ((u8tmp[0] & BIT(3)) ? "On" : "Off"),
- coex_sta->gnt_error_cnt);
-
- seq_printf(m, "\r\n %-35s = %d/ %d", "GNT_WL/GNT_BT",
- (int)((u32tmp[1] & BIT(2)) >> 2),
- (int)((u32tmp[1] & BIT(3)) >> 3));
-
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcbc);
- u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xcba);
-
- seq_printf(m, "\r\n %-35s = 0x%04x/ 0x%04x/ 0x%02x %s",
- "0xcbc/0xcb4/0xcb8[23:16]", u32tmp[0], u32tmp[1], u8tmp[0],
- ((u8tmp[0] & 0x1) == 0x1 ? "(BTG)" : "(WL_A+G)"));
-
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
- u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x4c6);
- u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "4c[24:23]/64[0]/4c6[4]/40[5]",
- (int)(u32tmp[0] & (BIT(24) | BIT(23))) >> 23, u8tmp[2] & 0x1,
- (int)((u8tmp[0] & BIT(4)) >> 4),
- (int)((u8tmp[1] & BIT(5)) >> 5));
-
- u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
- u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x953);
- u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0xc50);
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ %s/ 0x%x",
- "0x550/0x522/4-RxAGC/0xc50", u32tmp[0], u8tmp[0],
- (u8tmp[1] & 0x2) ? "On" : "Off", u8tmp[2]);
-
- fa_ofdm = btcoexist->btc_phydm_query_phy_counter(btcoexist,
- "PHYDM_INFO_FA_OFDM");
- fa_cck = btcoexist->btc_phydm_query_phy_counter(btcoexist,
- "PHYDM_INFO_FA_CCK");
- cca_ofdm = btcoexist->btc_phydm_query_phy_counter(
- btcoexist, "PHYDM_INFO_CCA_OFDM");
- cca_cck = btcoexist->btc_phydm_query_phy_counter(btcoexist,
- "PHYDM_INFO_CCA_CCK");
-
- ratio_ofdm = (fa_ofdm == 0) ? 1000 : (cca_ofdm / fa_ofdm);
-
- seq_printf(m, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x (%d)",
- "CCK-CCA/CCK-FA/OFDM-CCA/OFDM-FA", cca_cck, fa_cck, cca_ofdm,
- fa_ofdm, ratio_ofdm);
-
- seq_printf(m, "\r\n %-35s = %d/ %d/ %d/ %d", "CRC_OK CCK/11g/11n/11ac",
- coex_sta->crc_ok_cck, coex_sta->crc_ok_11g,
- coex_sta->crc_ok_11n, coex_sta->crc_ok_11n_vht);
-
- seq_printf(m, "\r\n %-35s = %d/ %d/ %d/ %d (%d, %d)",
- "CRC_Err CCK/11g/11n/11ac", coex_sta->crc_err_cck,
- coex_sta->crc_err_11g, coex_sta->crc_err_11n,
- coex_sta->crc_err_11n_vht, coex_sta->now_crc_ratio,
- coex_sta->acc_crc_ratio);
-
- seq_printf(m, "\r\n %-35s = %s/ %s/ %s/ %d",
- "WlHiPri/ Locking/ Locked/ Noisy",
- (coex_sta->wifi_is_high_pri_task ? "Yes" : "No"),
- (coex_sta->cck_lock ? "Yes" : "No"),
- (coex_sta->cck_ever_lock ? "Yes" : "No"),
- coex_sta->wl_noisy_level);
-
- seq_printf(m, "\r\n %-35s = %d/ %d", "0x770(Hi-pri rx/tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-
- seq_printf(m, "\r\n %-35s = %d/ %d %s", "0x774(Lo-pri rx/tx)",
- coex_sta->low_priority_rx, coex_sta->low_priority_tx,
- (bt_link_info->slave_role ?
- "(Slave!!)" :
- (coex_sta->is_tdma_btautoslot_hang ?
- "(auto-slot hang!!)" :
- "")));
-
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
-}
-
-void ex_btc8822b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- if (type == BTC_IPS_ENTER) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
- coex_sta->under_ips = true;
- coex_sta->under_lps = false;
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE, false);
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ONOFF, false);
-
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_WLAN_OFF);
-
- halbtc8822b2ant_action_coex_all_off(btcoexist);
- } else if (type == BTC_IPS_LEAVE) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
- coex_sta->under_ips = false;
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE, true);
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ONOFF, true);
- halbtc8822b2ant_init_hw_config(btcoexist, false);
- halbtc8822b2ant_init_coex_dm(btcoexist);
- halbtc8822b2ant_query_bt_info(btcoexist);
- }
-}
-
-void ex_btc8822b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static bool pre_force_lps_on;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- if (type == BTC_LPS_ENABLE) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
- coex_sta->under_lps = true;
- coex_sta->under_ips = false;
-
- if (coex_sta->force_lps_ctrl) { /* LPS No-32K */
- /* Write WL "Active" in Score-board for PS-TDMA */
- pre_force_lps_on = true;
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE,
- true);
-
- } else {
- /* LPS-32K, need check if this h2c 0x71 can work??
- * (2015/08/28)
- */
- /* Write WL "Non-Active" in Score-board for Native-PS */
- pre_force_lps_on = false;
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE,
- false);
- }
-
- } else if (type == BTC_LPS_DISABLE) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
- coex_sta->under_lps = false;
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE, true);
-
- if ((!pre_force_lps_on) && (!coex_sta->force_lps_ctrl))
- halbtc8822b2ant_query_bt_info(btcoexist);
- }
-}
-
-void ex_btc8822b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_connected = false;
- bool wifi_under_5g = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN notify()\n");
-
- halbtc8822b2ant_post_state_to_bt(btcoexist,
- BT_8822B_2ANT_SCOREBOARD_ACTIVE, true);
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
-
- /* this can't be removed for RF off_on event, or BT would dis-connect */
- halbtc8822b2ant_query_bt_info(btcoexist);
-
- if (type == BTC_SCAN_START) {
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G,
- &wifi_under_5g);
-
- if (wifi_under_5g) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** SCAN START notify (5g)\n");
-
- halbtc8822b2ant_action_wifi_under5g(btcoexist);
- return;
- }
-
- coex_sta->wifi_is_high_pri_task = true;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** SCAN START notify (2g)\n");
-
- halbtc8822b2ant_run_coexist_mechanism(btcoexist);
-
- return;
- }
-
- if (type == BTC_SCAN_START_2G) {
- if (!wifi_connected)
- coex_sta->wifi_is_high_pri_task = true;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify (2G)\n");
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_SCAN, true);
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE, true);
-
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b2ant_run_coexist_mechanism(btcoexist);
-
- } else if (type == BTC_SCAN_FINISH) {
- coex_sta->wifi_is_high_pri_task = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
- &coex_sta->scan_ap_num);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify (Scan-AP = %d)\n",
- coex_sta->scan_ap_num);
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_SCAN, false);
-
- halbtc8822b2ant_run_coexist_mechanism(btcoexist);
- }
-}
-
-void ex_btc8822b2ant_switchband_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
- coex_sta->switch_band_notify_to = type;
-
- if (type == BTC_SWITCH_TO_5G) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], switchband_notify --- switch to 5G\n");
-
- halbtc8822b2ant_action_wifi_under5g(btcoexist);
-
- } else if (type == BTC_SWITCH_TO_24G_NOFORSCAN) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** switchband_notify BTC_SWITCH_TO_2G (no for scan)\n");
-
- halbtc8822b2ant_run_coexist_mechanism(btcoexist);
-
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], switchband_notify --- switch to 2G\n");
-
- ex_btc8822b2ant_scan_notify(btcoexist, BTC_SCAN_START_2G);
- }
- coex_sta->switch_band_notify_to = BTC_NOT_SWITCH;
-}
-
-void ex_btc8822b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- halbtc8822b2ant_post_state_to_bt(btcoexist,
- BT_8822B_2ANT_SCOREBOARD_ACTIVE, true);
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- if ((type == BTC_ASSOCIATE_5G_START) ||
- (type == BTC_ASSOCIATE_5G_FINISH)) {
- if (type == BTC_ASSOCIATE_5G_START)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], connect_notify --- 5G start\n");
- else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], connect_notify --- 5G finish\n");
-
- halbtc8822b2ant_action_wifi_under5g(btcoexist);
- return;
- }
-
- if (type == BTC_ASSOCIATE_START) {
- coex_sta->wifi_is_high_pri_task = true;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify (2G)\n");
-
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_2G_RUNTIME);
-
- halbtc8822b2ant_action_wifi_link_process(btcoexist);
-
- /* To keep TDMA case during connect process,
- * to avoid changed by Btinfo and runcoexmechanism
- */
- coex_sta->freeze_coexrun_by_btinfo = true;
-
- coex_dm->arp_cnt = 0;
-
- } else if (type == BTC_ASSOCIATE_FINISH) {
- coex_sta->wifi_is_high_pri_task = false;
- coex_sta->freeze_coexrun_by_btinfo = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify (2G)\n");
-
- halbtc8822b2ant_run_coexist_mechanism(btcoexist);
- }
-}
-
-void ex_btc8822b2ant_media_status_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_under_b_mode = false, wifi_under_5g = false;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if (type == BTC_MEDIA_CONNECT) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE, true);
-
- if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under 5G!!!\n");
-
- halbtc8822b2ant_action_wifi_under5g(btcoexist);
- return;
- }
-
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_2G_RUNTIME);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
- &wifi_under_b_mode);
-
- /* Set CCK Tx/Rx high Pri except 11b mode */
- if (wifi_under_b_mode) {
- btcoexist->btc_write_1byte(btcoexist, 0x6cd,
- 0x00); /* CCK Tx */
- btcoexist->btc_write_1byte(btcoexist, 0x6cf,
- 0x00); /* CCK Rx */
- } else {
- btcoexist->btc_write_1byte(btcoexist, 0x6cd,
- 0x00); /* CCK Tx */
- btcoexist->btc_write_1byte(btcoexist, 0x6cf,
- 0x10); /* CCK Rx */
- }
-
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
-
- btcoexist->btc_write_1byte(btcoexist, 0x6cd, 0x0); /* CCK Tx */
- btcoexist->btc_write_1byte(btcoexist, 0x6cf, 0x0); /* CCK Rx */
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE, false);
- }
-
- halbtc8822b2ant_update_wifi_ch_info(btcoexist, type);
-}
-
-void ex_btc8822b2ant_specific_packet_notify(struct btc_coexist *btcoexist,
- u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool under_4way = false, wifi_under_5g = false;
-
- if (btcoexist->manual_control || btcoexist->stop_coex_dm)
- return;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under 5G!!!\n");
-
- halbtc8822b2ant_action_wifi_under5g(btcoexist);
- return;
- }
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
- &under_4way);
-
- if (under_4way) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet ---- under_4way!!\n");
-
- coex_sta->wifi_is_high_pri_task = true;
- coex_sta->specific_pkt_period_cnt = 2;
-
- } else if (type == BTC_PACKET_ARP) {
- coex_dm->arp_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet ARP notify -cnt = %d\n",
- coex_dm->arp_cnt);
-
- } else {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet DHCP or EAPOL notify [Type = %d]\n",
- type);
-
- coex_sta->wifi_is_high_pri_task = true;
- coex_sta->specific_pkt_period_cnt = 2;
- }
-
- if (coex_sta->wifi_is_high_pri_task)
- halbtc8822b2ant_run_coexist_mechanism(btcoexist);
-}
-
-void ex_btc8822b2ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
- u8 length)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 i, rsp_source = 0;
- bool wifi_connected = false;
- bool wifi_scan = false, wifi_link = false, wifi_roam = false,
- wifi_busy = false;
- static bool is_scoreboard_scan;
-
- rsp_source = tmp_buf[0] & 0xf;
- if (rsp_source >= BT_INFO_SRC_8822B_2ANT_MAX)
- rsp_source = BT_INFO_SRC_8822B_2ANT_WIFI_FW;
- coex_sta->bt_info_c2h_cnt[rsp_source]++;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt_info[%d], len=%d, data=[", rsp_source, length);
-
- for (i = 0; i < length; i++) {
- coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
-
- if (i == length - 1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "0x%02x, ",
- tmp_buf[i]);
- }
- }
-
- coex_sta->bt_info = coex_sta->bt_info_c2h[rsp_source][1];
- coex_sta->bt_info_ext = coex_sta->bt_info_c2h[rsp_source][4];
- coex_sta->bt_info_ext2 = coex_sta->bt_info_c2h[rsp_source][5];
-
- if (rsp_source != BT_INFO_SRC_8822B_2ANT_WIFI_FW) {
- /* if 0xff, it means BT is under WHCK test */
- coex_sta->bt_whck_test =
- ((coex_sta->bt_info == 0xff) ? true : false);
-
- coex_sta->bt_create_connection =
- ((coex_sta->bt_info_c2h[rsp_source][2] & 0x80) ? true :
- false);
-
- /* unit: %, value-100 to translate to unit: dBm */
- coex_sta->bt_rssi =
- coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
-
- coex_sta->c2h_bt_remote_name_req =
- ((coex_sta->bt_info_c2h[rsp_source][2] & 0x20) ? true :
- false);
-
- coex_sta->is_A2DP_3M =
- ((coex_sta->bt_info_c2h[rsp_source][2] & 0x10) ? true :
- false);
-
- coex_sta->acl_busy =
- ((coex_sta->bt_info_c2h[rsp_source][1] & 0x9) ? true :
- false);
-
- coex_sta->voice_over_HOGP =
- ((coex_sta->bt_info_ext & 0x10) ? true : false);
-
- coex_sta->c2h_bt_inquiry_page =
- ((coex_sta->bt_info & BT_INFO_8822B_2ANT_B_INQ_PAGE) ?
- true :
- false);
-
- coex_sta->a2dp_bit_pool =
- (((coex_sta->bt_info_c2h[rsp_source][1] & 0x49) ==
- 0x49) ?
- (coex_sta->bt_info_c2h[rsp_source][6] & 0x7f) :
- 0);
-
- coex_sta->is_bt_a2dp_sink =
- (coex_sta->bt_info_c2h[rsp_source][6] & 0x80) ? true :
- false;
-
- coex_sta->bt_retry_cnt =
- coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
-
- coex_sta->is_autoslot = coex_sta->bt_info_ext2 & 0x8;
-
- coex_sta->forbidden_slot = coex_sta->bt_info_ext2 & 0x7;
-
- coex_sta->hid_busy_num = (coex_sta->bt_info_ext2 & 0x30) >> 4;
-
- coex_sta->hid_pair_cnt = (coex_sta->bt_info_ext2 & 0xc0) >> 6;
-
- if (coex_sta->bt_retry_cnt >= 1)
- coex_sta->pop_event_cnt++;
-
- if (coex_sta->c2h_bt_remote_name_req)
- coex_sta->cnt_remote_name_req++;
-
- if (coex_sta->bt_info_ext & BIT(1))
- coex_sta->cnt_reinit++;
-
- if (coex_sta->bt_info_ext & BIT(2)) {
- coex_sta->cnt_setup_link++;
- coex_sta->is_setup_link = true;
- coex_sta->bt_relink_downcount = 2;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Re-Link start in BT info!!\n");
- } else {
- coex_sta->is_setup_link = false;
- coex_sta->bt_relink_downcount = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Re-Link stop in BT info!!\n");
- }
-
- if (coex_sta->bt_info_ext & BIT(3))
- coex_sta->cnt_ign_wlan_act++;
-
- if (coex_sta->bt_info_ext & BIT(6))
- coex_sta->cnt_role_switch++;
-
- if (coex_sta->bt_info_ext & BIT(7))
- coex_sta->is_bt_multi_link = true;
- else
- coex_sta->is_bt_multi_link = false;
-
- if (coex_sta->bt_create_connection) {
- coex_sta->cnt_page++;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY,
- &wifi_busy);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN,
- &wifi_scan);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK,
- &wifi_link);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM,
- &wifi_roam);
-
- if ((wifi_link) || (wifi_roam) || (wifi_scan) ||
- (coex_sta->wifi_is_high_pri_task) || (wifi_busy)) {
- is_scoreboard_scan = true;
- halbtc8822b2ant_post_state_to_bt(
- btcoexist,
- BT_8822B_2ANT_SCOREBOARD_SCAN, true);
-
- } else {
- halbtc8822b2ant_post_state_to_bt(
- btcoexist,
- BT_8822B_2ANT_SCOREBOARD_SCAN, false);
- }
- } else {
- if (is_scoreboard_scan) {
- halbtc8822b2ant_post_state_to_bt(
- btcoexist,
- BT_8822B_2ANT_SCOREBOARD_SCAN, false);
- is_scoreboard_scan = false;
- }
- }
-
- /* Here we need to resend some wifi info to BT */
- /* because bt is reset and loss of the info. */
-
- if ((!btcoexist->manual_control) &&
- (!btcoexist->stop_coex_dm)) {
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
-
- /* Re-Init */
- if ((coex_sta->bt_info_ext & BIT(1))) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
- if (wifi_connected)
- halbtc8822b2ant_update_wifi_ch_info(
- btcoexist, BTC_MEDIA_CONNECT);
- else
- halbtc8822b2ant_update_wifi_ch_info(
- btcoexist,
- BTC_MEDIA_DISCONNECT);
- }
-
- /* If Ignore_WLanAct && not SetUp_Link */
- if ((coex_sta->bt_info_ext & BIT(3)) &&
- (!(coex_sta->bt_info_ext & BIT(2))) &&
- (!(coex_sta->bt_info_ext & BIT(6)))) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
- halbtc8822b2ant_ignore_wlan_act(
- btcoexist, FORCE_EXEC, false);
- } else {
- if (coex_sta->bt_info_ext & BIT(2)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT ignore Wlan active because Re-link!!\n");
- } else if (coex_sta->bt_info_ext & BIT(6)) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT ignore Wlan active because Role-Switch!!\n");
- }
- }
- }
- }
-
- if ((coex_sta->bt_info_ext & BIT(5))) {
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit4 check, query BLE Scan type!!\n");
- coex_sta->bt_ble_scan_type =
- btcoexist->btc_get_ble_scan_type_from_bt(btcoexist);
-
- if ((coex_sta->bt_ble_scan_type & 0x1) == 0x1)
- coex_sta->bt_ble_scan_para[0] =
- btcoexist->btc_get_ble_scan_para_from_bt(
- btcoexist, 0x1);
- if ((coex_sta->bt_ble_scan_type & 0x2) == 0x2)
- coex_sta->bt_ble_scan_para[1] =
- btcoexist->btc_get_ble_scan_para_from_bt(
- btcoexist, 0x2);
- if ((coex_sta->bt_ble_scan_type & 0x4) == 0x4)
- coex_sta->bt_ble_scan_para[2] =
- btcoexist->btc_get_ble_scan_para_from_bt(
- btcoexist, 0x4);
- }
-
- halbtc8822b2ant_update_bt_link_info(btcoexist);
-
- halbtc8822b2ant_run_coexist_mechanism(btcoexist);
-}
-
-void ex_btc8822b2ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF Status notify\n");
-
- if (type == BTC_RF_ON) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF is turned ON!!\n");
-
- btcoexist->stop_coex_dm = false;
- coex_sta->is_rf_state_off = false;
- } else if (type == BTC_RF_OFF) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF is turned OFF!!\n");
-
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_WLAN_OFF);
-
- halbtc8822b2ant_action_coex_all_off(btcoexist);
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE |
- BT_8822B_2ANT_SCOREBOARD_ONOFF |
- BT_8822B_2ANT_SCOREBOARD_SCAN |
- BT_8822B_2ANT_SCOREBOARD_UNDERTEST,
- false);
-
- btcoexist->stop_coex_dm = true;
- coex_sta->is_rf_state_off = true;
- }
-}
-
-void ex_btc8822b2ant_halt_notify(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
-
- halbtc8822b2ant_set_ant_path(btcoexist, BTC_ANT_PATH_AUTO, FORCE_EXEC,
- BT_8822B_2ANT_PHASE_WLAN_OFF);
-
- ex_btc8822b2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE, false);
- halbtc8822b2ant_post_state_to_bt(btcoexist,
- BT_8822B_2ANT_SCOREBOARD_ONOFF, false);
-}
-
-void ex_btc8822b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_under_5g = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
-
- if ((pnp_state == BTC_WIFI_PNP_SLEEP) ||
- (pnp_state == BTC_WIFI_PNP_SLEEP_KEEP_ANT)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
-
- /* Sinda 20150819, workaround for driver skip leave IPS/LPS to
- * speed up sleep time.
- * Driver do not leave IPS/LPS when driver is going to sleep,
- * so BTCoexistence think wifi is still under IPS/LPS.
- * BT should clear UnderIPS/UnderLPS state to avoid mismatch
- * state after wakeup.
- */
- coex_sta->under_ips = false;
- coex_sta->under_lps = false;
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE, false);
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ONOFF, false);
-
- if (pnp_state == BTC_WIFI_PNP_SLEEP_KEEP_ANT) {
- if (wifi_under_5g)
- halbtc8822b2ant_set_ant_path(
- btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_5G_RUNTIME);
- else
- halbtc8822b2ant_set_ant_path(
- btcoexist, BTC_ANT_PATH_AUTO,
- FORCE_EXEC,
- BT_8822B_2ANT_PHASE_2G_RUNTIME);
- } else {
- halbtc8822b2ant_set_ant_path(
- btcoexist, BTC_ANT_PATH_AUTO, FORCE_EXEC,
- BT_8822B_2ANT_PHASE_WLAN_OFF);
- }
- } else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
-
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ACTIVE, true);
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_ONOFF, true);
- }
-}
-
-void ex_btc8822b2ant_periodical(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_busy = false;
- u16 bt_scoreboard_val = 0;
- bool bt_relink_finish = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ************* Periodical *************\n");
-
- if (!btcoexist->auto_report_2ant)
- halbtc8822b2ant_query_bt_info(btcoexist);
-
- halbtc8822b2ant_monitor_bt_ctr(btcoexist);
- halbtc8822b2ant_monitor_wifi_ctr(btcoexist);
- halbtc8822b2ant_monitor_bt_enable_disable(btcoexist);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- halbtc8822b2ant_read_score_board(btcoexist, &bt_scoreboard_val);
-
- if (wifi_busy) {
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_UNDERTEST, true);
- /*for bt lps32 clock offset*/
- if (bt_scoreboard_val & BIT(6))
- halbtc8822b2ant_query_bt_info(btcoexist);
- } else {
- halbtc8822b2ant_post_state_to_bt(
- btcoexist, BT_8822B_2ANT_SCOREBOARD_UNDERTEST, false);
- }
-
- if (coex_sta->bt_relink_downcount != 0) {
- coex_sta->bt_relink_downcount--;
-
- if (coex_sta->bt_relink_downcount == 0) {
- coex_sta->is_setup_link = false;
- bt_relink_finish = true;
- }
- }
-
- /* for 4-way, DHCP, EAPOL packet */
- if (coex_sta->specific_pkt_period_cnt > 0) {
- coex_sta->specific_pkt_period_cnt--;
-
- if ((coex_sta->specific_pkt_period_cnt == 0) &&
- (coex_sta->wifi_is_high_pri_task))
- coex_sta->wifi_is_high_pri_task = false;
-
- RT_TRACE(
- rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ***************** Hi-Pri Task = %s*****************\n",
- (coex_sta->wifi_is_high_pri_task ? "Yes" : "No"));
- }
-
- if (halbtc8822b2ant_is_wifibt_status_changed(btcoexist) ||
- (bt_relink_finish) || (coex_sta->is_set_ps_state_fail))
- halbtc8822b2ant_run_coexist_mechanism(btcoexist);
-}
-
-void ex_btc8822b2ant_antenna_detection(struct btc_coexist *btcoexist,
- u32 cent_freq, u32 offset, u32 span,
- u32 seconds)
-{
-}
-
-void ex_btc8822b2ant_display_ant_detection(struct btc_coexist *btcoexist) {}
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h b/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h
deleted file mode 100644
index c99aa6ff1d7f..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h
+++ /dev/null
@@ -1,487 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* *******************************************
- * The following is for 8822B 2Ant BT Co-exist definition
- * ********************************************/
-#define BT_INFO_8822B_2ANT_B_FTP BIT(7)
-#define BT_INFO_8822B_2ANT_B_A2DP BIT(6)
-#define BT_INFO_8822B_2ANT_B_HID BIT(5)
-#define BT_INFO_8822B_2ANT_B_SCO_BUSY BIT(4)
-#define BT_INFO_8822B_2ANT_B_ACL_BUSY BIT(3)
-#define BT_INFO_8822B_2ANT_B_INQ_PAGE BIT(2)
-#define BT_INFO_8822B_2ANT_B_SCO_ESCO BIT(1)
-#define BT_INFO_8822B_2ANT_B_CONNECTION BIT(0)
-
-#define BTC_RSSI_COEX_THRESH_TOL_8822B_2ANT 2
-
-/* unit: % WiFi RSSI Threshold for 2-Ant free-run/2-Ant TDMA translation.
- * (default = 42)
- */
-#define BT_8822B_2ANT_WIFI_RSSI_COEXSWITCH_THRES1 80
-/* unit: % BT RSSI Threshold for 2-Ant free-run/2-Ant TDMA translation.
- * (default = 46)
- */
-#define BT_8822B_2ANT_BT_RSSI_COEXSWITCH_THRES1 80
-/* unit: % WiFi RSSI Threshold for 1-Ant TDMA/1-Ant PS-TDMA translation.
- * (default = 42)
- */
-#define BT_8822B_2ANT_WIFI_RSSI_COEXSWITCH_THRES2 80
-/* unit: % BT RSSI Threshold for 1-Ant TDMA/1-Ant PS-TDMA translation.
- * (default = 46)
- */
-#define BT_8822B_2ANT_BT_RSSI_COEXSWITCH_THRES2 80
-#define BT_8822B_2ANT_DEFAULT_ISOLATION 15 /* unit: dB */
-#define BT_8822B_2ANT_WIFI_MAX_TX_POWER 15 /* unit: dBm */
-#define BT_8822B_2ANT_BT_MAX_TX_POWER 3 /* unit: dBm */
-#define BT_8822B_2ANT_WIFI_SIR_THRES1 -15 /* unit: dB */
-#define BT_8822B_2ANT_WIFI_SIR_THRES2 -30 /* unit: dB */
-#define BT_8822B_2ANT_BT_SIR_THRES1 -15 /* unit: dB */
-#define BT_8822B_2ANT_BT_SIR_THRES2 -30 /* unit: dB */
-
-/* for Antenna detection */
-#define BT_8822B_2ANT_ANTDET_PSDTHRES_BACKGROUND 50
-#define BT_8822B_2ANT_ANTDET_PSDTHRES_2ANT_BADISOLATION 70
-#define BT_8822B_2ANT_ANTDET_PSDTHRES_2ANT_GOODISOLATION 52
-#define BT_8822B_2ANT_ANTDET_PSDTHRES_1ANT 40
-#define BT_8822B_2ANT_ANTDET_RETRY_INTERVAL \
- 10 /* retry timer if ant det is fail, unit: second */
-#define BT_8822B_2ANT_ANTDET_SWEEPPOINT_DELAY 60000
-#define BT_8822B_2ANT_ANTDET_ENABLE 0
-#define BT_8822B_2ANT_ANTDET_BTTXTIME 100
-#define BT_8822B_2ANT_ANTDET_BTTXCHANNEL 39
-#define BT_8822B_2ANT_ANTDET_PSD_SWWEEPCOUNT 50
-
-#define BT_8822B_2ANT_LTECOEX_INDIRECTREG_ACCESS_TIMEOUT 30000
-
-enum bt_8822b_2ant_signal_state {
- BT_8822B_2ANT_SIG_STA_SET_TO_LOW = 0x0,
- BT_8822B_2ANT_SIG_STA_SET_BY_HW = 0x0,
- BT_8822B_2ANT_SIG_STA_SET_TO_HIGH = 0x1,
- BT_8822B_2ANT_SIG_STA_MAX
-};
-
-enum bt_8822b_2ant_path_ctrl_owner {
- BT_8822B_2ANT_PCO_BTSIDE = 0x0,
- BT_8822B_2ANT_PCO_WLSIDE = 0x1,
- BT_8822B_2ANT_PCO_MAX
-};
-
-enum bt_8822b_2ant_gnt_ctrl_type {
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_PTA = 0x0,
- BT_8822B_2ANT_GNT_TYPE_CTRL_BY_SW = 0x1,
- BT_8822B_2ANT_GNT_TYPE_MAX
-};
-
-enum bt_8822b_2ant_gnt_ctrl_block {
- BT_8822B_2ANT_GNT_BLOCK_RFC_BB = 0x0,
- BT_8822B_2ANT_GNT_BLOCK_RFC = 0x1,
- BT_8822B_2ANT_GNT_BLOCK_BB = 0x2,
- BT_8822B_2ANT_GNT_BLOCK_MAX
-};
-
-enum bt_8822b_2ant_lte_coex_table_type {
- BT_8822B_2ANT_CTT_WL_VS_LTE = 0x0,
- BT_8822B_2ANT_CTT_BT_VS_LTE = 0x1,
- BT_8822B_2ANT_CTT_MAX
-};
-
-enum bt_8822b_2ant_lte_break_table_type {
- BT_8822B_2ANT_LBTT_WL_BREAK_LTE = 0x0,
- BT_8822B_2ANT_LBTT_BT_BREAK_LTE = 0x1,
- BT_8822B_2ANT_LBTT_LTE_BREAK_WL = 0x2,
- BT_8822B_2ANT_LBTT_LTE_BREAK_BT = 0x3,
- BT_8822B_2ANT_LBTT_MAX
-};
-
-enum bt_info_src_8822b_2ant {
- BT_INFO_SRC_8822B_2ANT_WIFI_FW = 0x0,
- BT_INFO_SRC_8822B_2ANT_BT_RSP = 0x1,
- BT_INFO_SRC_8822B_2ANT_BT_ACTIVE_SEND = 0x2,
- BT_INFO_SRC_8822B_2ANT_MAX
-};
-
-enum bt_8822b_2ant_bt_status {
- BT_8822B_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
- BT_8822B_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
- BT_8822B_2ANT_BT_STATUS_INQ_PAGE = 0x2,
- BT_8822B_2ANT_BT_STATUS_ACL_BUSY = 0x3,
- BT_8822B_2ANT_BT_STATUS_SCO_BUSY = 0x4,
- BT_8822B_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
- BT_8822B_2ANT_BT_STATUS_MAX
-};
-
-enum bt_8822b_2ant_coex_algo {
- BT_8822B_2ANT_COEX_ALGO_UNDEFINED = 0x0,
- BT_8822B_2ANT_COEX_ALGO_SCO = 0x1,
- BT_8822B_2ANT_COEX_ALGO_HID = 0x2,
- BT_8822B_2ANT_COEX_ALGO_A2DP = 0x3,
- BT_8822B_2ANT_COEX_ALGO_A2DP_PANHS = 0x4,
- BT_8822B_2ANT_COEX_ALGO_PANEDR = 0x5,
- BT_8822B_2ANT_COEX_ALGO_PANHS = 0x6,
- BT_8822B_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
- BT_8822B_2ANT_COEX_ALGO_PANEDR_HID = 0x8,
- BT_8822B_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
- BT_8822B_2ANT_COEX_ALGO_HID_A2DP = 0xa,
- BT_8822B_2ANT_COEX_ALGO_NOPROFILEBUSY = 0xb,
- BT_8822B_2ANT_COEX_ALGO_A2DPSINK = 0xc,
- BT_8822B_2ANT_COEX_ALGO_MAX
-};
-
-enum bt_8822b_2ant_ext_ant_switch_type {
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_DPDT = 0x0,
- BT_8822B_2ANT_EXT_ANT_SWITCH_USE_SPDT = 0x1,
- BT_8822B_2ANT_EXT_ANT_SWITCH_NONE = 0x2,
- BT_8822B_2ANT_EXT_ANT_SWITCH_MAX
-};
-
-enum bt_8822b_2ant_ext_ant_switch_ctrl_type {
- BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_BBSW = 0x0,
- BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_PTA = 0x1,
- BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_ANTDIV = 0x2,
- BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_MAC = 0x3,
- BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_BY_BT = 0x4,
- BT_8822B_2ANT_EXT_ANT_SWITCH_CTRL_MAX
-};
-
-enum bt_8822b_2ant_ext_ant_switch_pos_type {
- BT_8822B_2ANT_EXT_ANT_SWITCH_MAIN_TO_BT = 0x0,
- BT_8822B_2ANT_EXT_ANT_SWITCH_MAIN_TO_WLG = 0x1,
- BT_8822B_2ANT_EXT_ANT_SWITCH_MAIN_TO_WLA = 0x2,
- BT_8822B_2ANT_EXT_ANT_SWITCH_MAIN_TO_NOCARE = 0x3,
- BT_8822B_2ANT_EXT_ANT_SWITCH_MAIN_TO_MAX
-};
-
-enum bt_8822b_2ant_ext_band_switch_pos_type {
- BT_8822B_2ANT_EXT_BAND_SWITCH_TO_WLG = 0x0,
- BT_8822B_2ANT_EXT_BAND_SWITCH_TO_WLA = 0x1,
- BT_8822B_2ANT_EXT_BAND_SWITCH_TO_MAX
-};
-
-enum bt_8822b_2ant_int_block {
- BT_8822B_2ANT_INT_BLOCK_SWITCH_TO_WLG_OF_BTG = 0x0,
- BT_8822B_2ANT_INT_BLOCK_SWITCH_TO_WLG_OF_WLAG = 0x1,
- BT_8822B_2ANT_INT_BLOCK_SWITCH_TO_WLA_OF_WLAG = 0x2,
- BT_8822B_2ANT_INT_BLOCK_SWITCH_TO_MAX
-};
-
-enum bt_8822b_2ant_phase {
- BT_8822B_2ANT_PHASE_COEX_INIT = 0x0,
- BT_8822B_2ANT_PHASE_WLANONLY_INIT = 0x1,
- BT_8822B_2ANT_PHASE_WLAN_OFF = 0x2,
- BT_8822B_2ANT_PHASE_2G_RUNTIME = 0x3,
- BT_8822B_2ANT_PHASE_5G_RUNTIME = 0x4,
- BT_8822B_2ANT_PHASE_BTMPMODE = 0x5,
- BT_8822B_2ANT_PHASE_ANTENNA_DET = 0x6,
- BT_8822B_2ANT_PHASE_COEX_POWERON = 0x7,
- BT_8822B_2ANT_PHASE_2G_RUNTIME_CONCURRENT = 0x8,
- BT_8822B_2ANT_PHASE_MAX
-};
-
-/*ADD SCOREBOARD TO FIX BT LPS 32K ISSUE WHILE WL BUSY*/
-
-enum bt_8822b_2ant_scoreboard {
- BT_8822B_2ANT_SCOREBOARD_ACTIVE = BIT(0),
- BT_8822B_2ANT_SCOREBOARD_ONOFF = BIT(1),
- BT_8822B_2ANT_SCOREBOARD_SCAN = BIT(2),
- BT_8822B_2ANT_SCOREBOARD_UNDERTEST = BIT(3),
- BT_8822B_2ANT_SCOREBOARD_WLBUSY = BIT(6)
-};
-
-struct coex_dm_8822b_2ant {
- /* hw setting */
- u32 pre_ant_pos_type;
- u32 cur_ant_pos_type;
- /* fw mechanism */
- u8 pre_bt_dec_pwr_lvl;
- u8 cur_bt_dec_pwr_lvl;
- u8 pre_fw_dac_swing_lvl;
- u8 cur_fw_dac_swing_lvl;
- bool cur_ignore_wlan_act;
- bool pre_ignore_wlan_act;
- u8 pre_ps_tdma;
- u8 cur_ps_tdma;
- u8 ps_tdma_para[5];
- u8 ps_tdma_du_adj_type;
- bool reset_tdma_adjust;
- bool pre_ps_tdma_on;
- bool cur_ps_tdma_on;
- bool pre_bt_auto_report;
- bool cur_bt_auto_report;
-
- /* sw mechanism */
- bool pre_rf_rx_lpf_shrink;
- bool cur_rf_rx_lpf_shrink;
- u32 bt_rf_0x1e_backup;
- bool pre_low_penalty_ra;
- bool cur_low_penalty_ra;
- bool pre_dac_swing_on;
- u32 pre_dac_swing_lvl;
- bool cur_dac_swing_on;
- u32 cur_dac_swing_lvl;
- bool pre_adc_back_off;
- bool cur_adc_back_off;
- bool pre_agc_table_en;
- bool cur_agc_table_en;
- u32 pre_val0x6c0;
- u32 cur_val0x6c0;
- u32 pre_val0x6c4;
- u32 cur_val0x6c4;
- u32 pre_val0x6c8;
- u32 cur_val0x6c8;
- u8 pre_val0x6cc;
- u8 cur_val0x6cc;
- bool limited_dig;
-
- /* algorithm related */
- u8 pre_algorithm;
- u8 cur_algorithm;
- u8 bt_status;
- u8 wifi_chnl_info[3];
-
- bool need_recover0x948;
- u32 backup0x948;
-
- u8 pre_lps;
- u8 cur_lps;
- u8 pre_rpwm;
- u8 cur_rpwm;
-
- bool is_switch_to_1dot5_ant;
- u8 switch_thres_offset;
- u32 arp_cnt;
-
- u32 pre_ext_ant_switch_status;
- u32 cur_ext_ant_switch_status;
-
- u8 pre_ext_band_switch_status;
- u8 cur_ext_band_switch_status;
-
- u8 pre_int_block_status;
- u8 cur_int_block_status;
-};
-
-struct coex_sta_8822b_2ant {
- bool bt_disabled;
- bool bt_link_exist;
- bool sco_exist;
- bool a2dp_exist;
- bool hid_exist;
- bool pan_exist;
-
- bool under_lps;
- bool under_ips;
- u32 high_priority_tx;
- u32 high_priority_rx;
- u32 low_priority_tx;
- u32 low_priority_rx;
- bool is_hi_pri_rx_overhead;
- u8 bt_rssi;
- u8 pre_bt_rssi_state;
- u8 pre_wifi_rssi_state[4];
- u8 bt_info_c2h[BT_INFO_SRC_8822B_2ANT_MAX][10];
- u32 bt_info_c2h_cnt[BT_INFO_SRC_8822B_2ANT_MAX];
- bool bt_whck_test;
- bool c2h_bt_inquiry_page;
- bool c2h_bt_remote_name_req;
-
- u8 bt_info_ext;
- u8 bt_info_ext2;
- u32 pop_event_cnt;
- u8 scan_ap_num;
- u8 bt_retry_cnt;
-
- u32 crc_ok_cck;
- u32 crc_ok_11g;
- u32 crc_ok_11n;
- u32 crc_ok_11n_vht;
-
- u32 crc_err_cck;
- u32 crc_err_11g;
- u32 crc_err_11n;
- u32 crc_err_11n_vht;
-
- u32 acc_crc_ratio;
- u32 now_crc_ratio;
-
- bool cck_lock;
- bool pre_ccklock;
- bool cck_ever_lock;
-
- u8 coex_table_type;
- bool force_lps_ctrl;
-
- u8 dis_ver_info_cnt;
-
- u8 a2dp_bit_pool;
- u8 cut_version;
-
- bool concurrent_rx_mode_on;
-
- u16 score_board;
- u8 isolation_btween_wb; /* 0~ 50 */
- u8 wifi_coex_thres;
- u8 bt_coex_thres;
- u8 wifi_coex_thres2;
- u8 bt_coex_thres2;
-
- u8 num_of_profile;
- bool acl_busy;
- bool bt_create_connection;
- bool wifi_is_high_pri_task;
- u32 specific_pkt_period_cnt;
- u32 bt_coex_supported_feature;
- u32 bt_coex_supported_version;
-
- u8 bt_ble_scan_type;
- u32 bt_ble_scan_para[3];
-
- bool run_time_state;
- bool freeze_coexrun_by_btinfo;
-
- bool is_A2DP_3M;
- bool voice_over_HOGP;
- u8 bt_info;
- bool is_autoslot;
- u8 forbidden_slot;
- u8 hid_busy_num;
- u8 hid_pair_cnt;
-
- u32 cnt_remote_name_req;
- u32 cnt_setup_link;
- u32 cnt_reinit;
- u32 cnt_ign_wlan_act;
- u32 cnt_page;
- u32 cnt_role_switch;
-
- u16 bt_reg_vendor_ac;
- u16 bt_reg_vendor_ae;
-
- bool is_setup_link;
- u8 wl_noisy_level;
- u32 gnt_error_cnt;
-
- u8 bt_afh_map[10];
- u8 bt_relink_downcount;
- bool is_tdma_btautoslot;
- bool is_tdma_btautoslot_hang;
-
- bool is_esco_mode;
- u8 switch_band_notify_to;
- bool is_rf_state_off;
-
- bool is_hid_low_pri_tx_overhead;
- bool is_bt_multi_link;
- bool is_bt_a2dp_sink;
-
- bool is_set_ps_state_fail;
- u8 cnt_set_ps_state_fail;
-};
-
-#define BT_8822B_2ANT_EXT_BAND_SWITCH_USE_DPDT 0
-#define BT_8822B_2ANT_EXT_BAND_SWITCH_USE_SPDT 1
-
-struct rfe_type_8822b_2ant {
- u8 rfe_module_type;
- bool ext_ant_switch_exist;
- u8 ext_ant_switch_type; /* 0:DPDT, 1:SPDT */
- /* iF 0: DPDT_P=0, DPDT_N=1 => BTG to Main, WL_A+G to Aux */
- u8 ext_ant_switch_ctrl_polarity;
-
- bool ext_band_switch_exist;
- u8 ext_band_switch_type; /* 0:DPDT, 1:SPDT */
- u8 ext_band_switch_ctrl_polarity;
-
- /* If true: WLG at BTG, If false: WLG at WLAG */
- bool wlg_locate_at_btg;
-
- bool ext_ant_switch_diversity; /* If diversity on */
-};
-
-#define BT_8822B_2ANT_ANTDET_PSD_POINTS 256 /* MAX:1024 */
-#define BT_8822B_2ANT_ANTDET_PSD_AVGNUM 1 /* MAX:3 */
-#define BT_8822B_2ANT_ANTDET_BUF_LEN 16
-
-struct psdscan_sta_8822b_2ant {
- u32 ant_det_bt_le_channel; /* BT LE Channel ex:2412 */
- u32 ant_det_bt_tx_time;
- u32 ant_det_pre_psdscan_peak_val;
- bool ant_det_is_ant_det_available;
- u32 ant_det_psd_scan_peak_val;
- bool ant_det_is_btreply_available;
- u32 ant_det_psd_scan_peak_freq;
-
- u8 ant_det_result;
- u8 ant_det_peak_val[BT_8822B_2ANT_ANTDET_BUF_LEN];
- u8 ant_det_peak_freq[BT_8822B_2ANT_ANTDET_BUF_LEN];
- u32 ant_det_try_count;
- u32 ant_det_fail_count;
- u32 ant_det_inteval_count;
- u32 ant_det_thres_offset;
-
- u32 real_cent_freq;
- s32 real_offset;
- u32 real_span;
-
- u32 psd_band_width; /* unit: Hz */
- u32 psd_point; /* 128/256/512/1024 */
- u32 psd_report[1024]; /* unit:dB (20logx), 0~255 */
- u32 psd_report_max_hold[1024]; /* unit:dB (20logx), 0~255 */
- u32 psd_start_point;
- u32 psd_stop_point;
- u32 psd_max_value_point;
- u32 psd_max_value;
- u32 psd_max_value2;
- /* filter loop_max_value that below BT_8822B_1ANT_ANTDET_PSDTHRES_1ANT,
- * and average the rest
- */
- u32 psd_avg_value;
- /*max value in each loop */
- u32 psd_loop_max_value[BT_8822B_2ANT_ANTDET_PSD_SWWEEPCOUNT];
- u32 psd_start_base;
- u32 psd_avg_num; /* 1/8/16/32 */
- u32 psd_gen_count;
- bool is_ant_det_running;
- bool is_psd_show_max_only;
-};
-
-/* *******************************************
- * The following is interface which will notify coex module.
- * ********************************************/
-void ex_btc8822b2ant_power_on_setting(struct btc_coexist *btcoexist);
-void ex_btc8822b2ant_pre_load_firmware(struct btc_coexist *btcoexist);
-void ex_btc8822b2ant_init_hw_config(struct btc_coexist *btcoexist,
- bool wifi_only);
-void ex_btc8822b2ant_init_coex_dm(struct btc_coexist *btcoexist);
-void ex_btc8822b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b2ant_switchband_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b2ant_media_status_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_btc8822b2ant_specific_packet_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_btc8822b2ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
- u8 length);
-void ex_btc8822b2ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_btc8822b2ant_halt_notify(struct btc_coexist *btcoexist);
-void ex_btc8822b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
-void ex_btc8822b2ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8822b2ant_display_coex_info(struct btc_coexist *btcoexist,
- struct seq_file *m);
-void ex_btc8822b2ant_antenna_detection(struct btc_coexist *btcoexist,
- u32 cent_freq, u32 offset, u32 span,
- u32 seconds);
-void ex_btc8822b2ant_display_ant_detection(struct btc_coexist *btcoexist);
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c b/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c
deleted file mode 100644
index ad7b6c42840b..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c
+++ /dev/null
@@ -1,54 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halbt_precomp.h"
-
-void ex_hal8822b_wifi_only_hw_config(struct wifi_only_cfg *wifionlycfg)
-{
- /*BB control*/
- halwifionly_phy_set_bb_reg(wifionlycfg, 0x4c, 0x01800000, 0x2);
- /*SW control*/
- halwifionly_phy_set_bb_reg(wifionlycfg, 0xcb4, 0xff, 0x77);
- /*antenna mux switch */
- halwifionly_phy_set_bb_reg(wifionlycfg, 0x974, 0x300, 0x3);
-
- halwifionly_phy_set_bb_reg(wifionlycfg, 0x1990, 0x300, 0x0);
-
- halwifionly_phy_set_bb_reg(wifionlycfg, 0xcbc, 0x80000, 0x0);
- /*switch to WL side controller and gnt_wl gnt_bt debug signal */
- halwifionly_phy_set_bb_reg(wifionlycfg, 0x70, 0xff000000, 0x0e);
- /*gnt_wl=1 , gnt_bt=0*/
- halwifionly_phy_set_bb_reg(wifionlycfg, 0x1704, 0xffffffff, 0x7700);
- halwifionly_phy_set_bb_reg(wifionlycfg, 0x1700, 0xffffffff, 0xc00f0038);
-}
-
-void ex_hal8822b_wifi_only_scannotify(struct wifi_only_cfg *wifionlycfg,
- u8 is_5g)
-{
- hal8822b_wifi_only_switch_antenna(wifionlycfg, is_5g);
-}
-
-void ex_hal8822b_wifi_only_switchbandnotify(struct wifi_only_cfg *wifionlycfg,
- u8 is_5g)
-{
- hal8822b_wifi_only_switch_antenna(wifionlycfg, is_5g);
-}
-
-void hal8822b_wifi_only_switch_antenna(struct wifi_only_cfg *wifionlycfg,
- u8 is_5g)
-{
- if (is_5g)
- halwifionly_phy_set_bb_reg(wifionlycfg, 0xcbc, 0x300, 0x1);
- else
- halwifionly_phy_set_bb_reg(wifionlycfg, 0xcbc, 0x300, 0x2);
-}
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h b/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h
deleted file mode 100644
index 5910fe1a1fb0..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __INC_HAL8822BWIFIONLYHWCFG_H
-#define __INC_HAL8822BWIFIONLYHWCFG_H
-
-void ex_hal8822b_wifi_only_hw_config(struct wifi_only_cfg *wifionlycfg);
-void ex_hal8822b_wifi_only_scannotify(struct wifi_only_cfg *wifionlycfg,
- u8 is_5g);
-void ex_hal8822b_wifi_only_switchbandnotify(struct wifi_only_cfg *wifionlycfg,
- u8 is_5g);
-void hal8822b_wifi_only_switch_antenna(struct wifi_only_cfg *wifionlycfg,
- u8 is_5g);
-#endif
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c
deleted file mode 100644
index b519d181b419..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c
+++ /dev/null
@@ -1,1837 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- ******************************************************************************/
-
-#include "halbt_precomp.h"
-
-/***************************************************
- * Debug related function
- ***************************************************/
-
-static const char *const gl_btc_wifi_bw_string[] = {
- "11bg",
- "HT20",
- "HT40",
- "HT80",
- "HT160"
-};
-
-static const char *const gl_btc_wifi_freq_string[] = {
- "2.4G",
- "5G"
-};
-
-static bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist)
-{
- if (!btcoexist->binded || NULL == btcoexist->adapter)
- return false;
-
- return true;
-}
-
-static bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv)
-{
- if (rtlpriv->link_info.busytraffic)
- return true;
- else
- return false;
-}
-
-static void halbtc_dbg_init(void)
-{
-}
-
-/***************************************************
- * helper function
- ***************************************************/
-static bool is_any_client_connect_to_ap(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- struct rtl_sta_info *drv_priv;
- u8 cnt = 0;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC ||
- mac->opmode == NL80211_IFTYPE_MESH_POINT ||
- mac->opmode == NL80211_IFTYPE_AP) {
- if (in_interrupt() > 0) {
- list_for_each_entry(drv_priv, &rtlpriv->entry_list,
- list) {
- cnt++;
- }
- } else {
- spin_lock_bh(&rtlpriv->locks.entry_list_lock);
- list_for_each_entry(drv_priv, &rtlpriv->entry_list,
- list) {
- cnt++;
- }
- spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
- }
- }
- if (cnt > 0)
- return true;
- else
- return false;
-}
-
-static bool halbtc_legacy(struct rtl_priv *adapter)
-{
- struct rtl_priv *rtlpriv = adapter;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
-
- bool is_legacy = false;
-
- if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G))
- is_legacy = true;
-
- return is_legacy;
-}
-
-bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
-{
- struct rtl_priv *rtlpriv = adapter;
-
- if (rtlpriv->link_info.tx_busy_traffic)
- return true;
- else
- return false;
-}
-
-static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv =
- (struct rtl_priv *)btcoexist->adapter;
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u32 wifi_bw = BTC_WIFI_BW_HT20;
-
- if (halbtc_legacy(rtlpriv)) {
- wifi_bw = BTC_WIFI_BW_LEGACY;
- } else {
- switch (rtlphy->current_chan_bw) {
- case HT_CHANNEL_WIDTH_20:
- wifi_bw = BTC_WIFI_BW_HT20;
- break;
- case HT_CHANNEL_WIDTH_20_40:
- wifi_bw = BTC_WIFI_BW_HT40;
- break;
- case HT_CHANNEL_WIDTH_80:
- wifi_bw = BTC_WIFI_BW_HT80;
- break;
- }
- }
-
- return wifi_bw;
-}
-
-static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 chnl = 1;
-
- if (rtlphy->current_channel != 0)
- chnl = rtlphy->current_channel;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "%s:%d\n", __func__, chnl);
- return chnl;
-}
-
-static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
-{
- return rtlpriv->btcoexist.btc_info.single_ant_path;
-}
-
-static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
-{
- return rtlpriv->btcoexist.btc_info.bt_type;
-}
-
-static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
-{
- u8 num;
-
- if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
- num = 2;
- else
- num = 1;
-
- return num;
-}
-
-static u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
- return rtlhal->package_type;
-}
-
-static
-u8 rtl_get_hwpg_rfe_type(struct rtl_priv *rtlpriv)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
- return rtlhal->rfe_type;
-}
-
-/* ************************************
- * Hal helper function
- * ************************************
- */
-static
-bool halbtc_is_hw_mailbox_exist(struct btc_coexist *btcoexist)
-{
- if (IS_HARDWARE_TYPE_8812(btcoexist->adapter))
- return false;
- else
- return true;
-}
-
-static
-bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code,
- u8 *cmd, u32 len, unsigned long wait_ms)
-{
- struct rtl_priv *rtlpriv;
- const u8 oper_ver = 0;
- u8 req_num;
-
- if (!halbtc_is_hw_mailbox_exist(btcoexist))
- return false;
-
- if (wait_ms) /* before h2c to avoid race condition */
- reinit_completion(&btcoexist->bt_mp_comp);
-
- rtlpriv = btcoexist->adapter;
-
- /*
- * fill req_num by op_code, and rtl_btc_btmpinfo_notify() use it
- * to know message type
- */
- switch (op_code) {
- case BT_OP_GET_BT_VERSION:
- req_num = BT_SEQ_GET_BT_VERSION;
- break;
- case BT_OP_GET_AFH_MAP_L:
- req_num = BT_SEQ_GET_AFH_MAP_L;
- break;
- case BT_OP_GET_AFH_MAP_M:
- req_num = BT_SEQ_GET_AFH_MAP_M;
- break;
- case BT_OP_GET_AFH_MAP_H:
- req_num = BT_SEQ_GET_AFH_MAP_H;
- break;
- case BT_OP_GET_BT_COEX_SUPPORTED_FEATURE:
- req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE;
- break;
- case BT_OP_GET_BT_COEX_SUPPORTED_VERSION:
- req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION;
- break;
- case BT_OP_GET_BT_ANT_DET_VAL:
- req_num = BT_SEQ_GET_BT_ANT_DET_VAL;
- break;
- case BT_OP_GET_BT_BLE_SCAN_PARA:
- req_num = BT_SEQ_GET_BT_BLE_SCAN_PARA;
- break;
- case BT_OP_GET_BT_BLE_SCAN_TYPE:
- req_num = BT_SEQ_GET_BT_BLE_SCAN_TYPE;
- break;
- case BT_OP_WRITE_REG_ADDR:
- case BT_OP_WRITE_REG_VALUE:
- case BT_OP_READ_REG:
- default:
- req_num = BT_SEQ_DONT_CARE;
- break;
- }
-
- cmd[0] |= (oper_ver & 0x0f); /* Set OperVer */
- cmd[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
- cmd[1] = op_code;
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, len, cmd);
-
- /* wait? */
- if (!wait_ms)
- return true;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms);
-
- if (in_interrupt())
- return false;
-
- if (wait_for_completion_timeout(&btcoexist->bt_mp_comp,
- msecs_to_jiffies(wait_ms)) == 0) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "btmpinfo wait (req_num=%d) timeout\n", req_num);
-
- return false; /* timeout */
- }
-
- return true;
-}
-
-static void halbtc_leave_lps(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv;
- bool ap_enable = false;
-
- rtlpriv = btcoexist->adapter;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
- &ap_enable);
-
- if (ap_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "%s()<--dont leave lps under AP mode\n", __func__);
- return;
- }
-
- btcoexist->bt_info.bt_ctrl_lps = true;
- btcoexist->bt_info.bt_lps_on = false;
- rtl_lps_leave(rtlpriv->mac80211.hw);
-}
-
-static void halbtc_enter_lps(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv;
- bool ap_enable = false;
-
- rtlpriv = btcoexist->adapter;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
- &ap_enable);
-
- if (ap_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "%s()<--dont enter lps under AP mode\n", __func__);
- return;
- }
-
- btcoexist->bt_info.bt_ctrl_lps = true;
- btcoexist->bt_info.bt_lps_on = true;
- rtl_lps_enter(rtlpriv->mac80211.hw);
-}
-
-static void halbtc_normal_lps(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv;
-
- rtlpriv = btcoexist->adapter;
-
- if (btcoexist->bt_info.bt_ctrl_lps) {
- btcoexist->bt_info.bt_lps_on = false;
- rtl_lps_leave(rtlpriv->mac80211.hw);
- btcoexist->bt_info.bt_ctrl_lps = false;
- }
-}
-
-static void halbtc_leave_low_power(struct btc_coexist *btcoexist)
-{
-}
-
-static void halbtc_normal_low_power(struct btc_coexist *btcoexist)
-{
-}
-
-static void halbtc_disable_low_power(struct btc_coexist *btcoexist,
- bool low_pwr_disable)
-{
- /* TODO: original/leave 32k low power */
- btcoexist->bt_info.bt_disable_low_pwr = low_pwr_disable;
-}
-
-static void halbtc_aggregation_check(struct btc_coexist *btcoexist)
-{
- bool need_to_act = false;
- static unsigned long pre_time;
- unsigned long cur_time = 0;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- /* To void continuous deleteBA=>addBA=>deleteBA=>addBA
- * This function is not allowed to continuous called
- * It can only be called after 8 seconds
- */
-
- cur_time = jiffies;
- if (jiffies_to_msecs(cur_time - pre_time) <= 8000) {
- /* over 8 seconds you can execute this function again. */
- return;
- }
- pre_time = cur_time;
-
- if (btcoexist->bt_info.reject_agg_pkt) {
- need_to_act = true;
- btcoexist->bt_info.pre_reject_agg_pkt =
- btcoexist->bt_info.reject_agg_pkt;
- } else {
- if (btcoexist->bt_info.pre_reject_agg_pkt) {
- need_to_act = true;
- btcoexist->bt_info.pre_reject_agg_pkt =
- btcoexist->bt_info.reject_agg_pkt;
- }
-
- if (btcoexist->bt_info.pre_bt_ctrl_agg_buf_size !=
- btcoexist->bt_info.bt_ctrl_agg_buf_size) {
- need_to_act = true;
- btcoexist->bt_info.pre_bt_ctrl_agg_buf_size =
- btcoexist->bt_info.bt_ctrl_agg_buf_size;
- }
-
- if (btcoexist->bt_info.bt_ctrl_agg_buf_size) {
- if (btcoexist->bt_info.pre_agg_buf_size !=
- btcoexist->bt_info.agg_buf_size) {
- need_to_act = true;
- }
- btcoexist->bt_info.pre_agg_buf_size =
- btcoexist->bt_info.agg_buf_size;
- }
-
- if (need_to_act)
- rtl_rx_ampdu_apply(rtlpriv);
- }
-}
-
-static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
-{
- u8 cmd_buffer[4] = {0};
-
- if (btcoexist->bt_info.bt_real_fw_ver)
- goto label_done;
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_VERSION,
- cmd_buffer, 4, 200);
-
-label_done:
- return btcoexist->bt_info.bt_real_fw_ver;
-}
-
-static u32 halbtc_get_bt_coex_supported_feature(void *btc_context)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- u8 cmd_buffer[4] = {0};
-
- if (btcoexist->bt_info.bt_supported_feature)
- goto label_done;
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- halbtc_send_bt_mp_operation(btcoexist,
- BT_OP_GET_BT_COEX_SUPPORTED_FEATURE,
- cmd_buffer, 4, 200);
-
-label_done:
- return btcoexist->bt_info.bt_supported_feature;
-}
-
-static u32 halbtc_get_bt_coex_supported_version(void *btc_context)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- u8 cmd_buffer[4] = {0};
-
- if (btcoexist->bt_info.bt_supported_version)
- goto label_done;
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- halbtc_send_bt_mp_operation(btcoexist,
- BT_OP_GET_BT_COEX_SUPPORTED_VERSION,
- cmd_buffer, 4, 200);
-
-label_done:
- return btcoexist->bt_info.bt_supported_version;
-}
-
-static u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
-{
- /* return value:
- * [31:16] => connected port number
- * [15:0] => port connected bit define
- */
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- u32 ret_val = 0;
- u32 port_connected_status = 0, num_of_connected_port = 0;
-
- if (mac->opmode == NL80211_IFTYPE_STATION &&
- mac->link_state >= MAC80211_LINKED) {
- port_connected_status |= WIFI_STA_CONNECTED;
- num_of_connected_port++;
- }
- /* AP & ADHOC & MESH */
- if (is_any_client_connect_to_ap(btcoexist)) {
- port_connected_status |= WIFI_AP_CONNECTED;
- num_of_connected_port++;
- }
- /* TODO: P2P Connected Status */
-
- ret_val = (num_of_connected_port << 16) | port_connected_status;
-
- return ret_val;
-}
-
-static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- bool *bool_tmp = (bool *)out_buf;
- int *s32_tmp = (int *)out_buf;
- u32 *u32_tmp = (u32 *)out_buf;
- u8 *u8_tmp = (u8 *)out_buf;
- bool tmp = false;
- bool ret = true;
-
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return false;
-
- switch (get_type) {
- case BTC_GET_BL_HS_OPERATION:
- *bool_tmp = false;
- ret = false;
- break;
- case BTC_GET_BL_HS_CONNECTING:
- *bool_tmp = false;
- ret = false;
- break;
- case BTC_GET_BL_WIFI_CONNECTED:
- if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION &&
- rtlpriv->mac80211.link_state >= MAC80211_LINKED)
- tmp = true;
- if (is_any_client_connect_to_ap(btcoexist))
- tmp = true;
- *bool_tmp = tmp;
- break;
- case BTC_GET_BL_WIFI_BUSY:
- if (halbtc_is_wifi_busy(rtlpriv))
- *bool_tmp = true;
- else
- *bool_tmp = false;
- break;
- case BTC_GET_BL_WIFI_SCAN:
- if (mac->act_scanning)
- *bool_tmp = true;
- else
- *bool_tmp = false;
- break;
- case BTC_GET_BL_WIFI_LINK:
- if (mac->link_state == MAC80211_LINKING)
- *bool_tmp = true;
- else
- *bool_tmp = false;
- break;
- case BTC_GET_BL_WIFI_ROAM:
- if (mac->link_state == MAC80211_LINKING)
- *bool_tmp = true;
- else
- *bool_tmp = false;
- break;
- case BTC_GET_BL_WIFI_4_WAY_PROGRESS:
- *bool_tmp = rtlpriv->btcoexist.btc_info.in_4way;
- break;
- case BTC_GET_BL_WIFI_UNDER_5G:
- if (rtlhal->current_bandtype == BAND_ON_5G)
- *bool_tmp = true;
- else
- *bool_tmp = false;
- break;
- case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
- if (mac->opmode == NL80211_IFTYPE_AP)
- *bool_tmp = true;
- else
- *bool_tmp = false;
- break;
- case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
- if (rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION)
- *bool_tmp = false;
- else
- *bool_tmp = true;
- break;
- case BTC_GET_BL_WIFI_UNDER_B_MODE:
- if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
- *bool_tmp = true;
- else
- *bool_tmp = false;
- break;
- case BTC_GET_BL_EXT_SWITCH:
- *bool_tmp = false;
- break;
- case BTC_GET_BL_WIFI_IS_IN_MP_MODE:
- *bool_tmp = false;
- break;
- case BTC_GET_BL_IS_ASUS_8723B:
- *bool_tmp = false;
- break;
- case BTC_GET_BL_RF4CE_CONNECTED:
- *bool_tmp = false;
- break;
- case BTC_GET_S4_WIFI_RSSI:
- *s32_tmp = rtlpriv->dm.undec_sm_pwdb;
- break;
- case BTC_GET_S4_HS_RSSI:
- *s32_tmp = 0;
- ret = false;
- break;
- case BTC_GET_U4_WIFI_BW:
- *u32_tmp = halbtc_get_wifi_bw(btcoexist);
- break;
- case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
- if (halbtc_is_wifi_uplink(rtlpriv))
- *u32_tmp = BTC_WIFI_TRAFFIC_TX;
- else
- *u32_tmp = BTC_WIFI_TRAFFIC_RX;
- break;
- case BTC_GET_U4_WIFI_FW_VER:
- *u32_tmp = (rtlhal->fw_version << 16) | rtlhal->fw_subversion;
- break;
- case BTC_GET_U4_WIFI_LINK_STATUS:
- *u32_tmp = halbtc_get_wifi_link_status(btcoexist);
- break;
- case BTC_GET_U4_BT_PATCH_VER:
- *u32_tmp = halbtc_get_bt_patch_version(btcoexist);
- break;
- case BTC_GET_U4_VENDOR:
- *u32_tmp = BTC_VENDOR_OTHER;
- break;
- case BTC_GET_U4_SUPPORTED_VERSION:
- *u32_tmp = halbtc_get_bt_coex_supported_version(btcoexist);
- break;
- case BTC_GET_U4_SUPPORTED_FEATURE:
- *u32_tmp = halbtc_get_bt_coex_supported_feature(btcoexist);
- break;
- case BTC_GET_U4_WIFI_IQK_TOTAL:
- *u32_tmp = btcoexist->btc_phydm_query_phy_counter(btcoexist,
- "IQK_TOTAL");
- break;
- case BTC_GET_U4_WIFI_IQK_OK:
- *u32_tmp = btcoexist->btc_phydm_query_phy_counter(btcoexist,
- "IQK_OK");
- break;
- case BTC_GET_U4_WIFI_IQK_FAIL:
- *u32_tmp = btcoexist->btc_phydm_query_phy_counter(btcoexist,
- "IQK_FAIL");
- break;
- case BTC_GET_U1_WIFI_DOT11_CHNL:
- *u8_tmp = rtlphy->current_channel;
- break;
- case BTC_GET_U1_WIFI_CENTRAL_CHNL:
- *u8_tmp = halbtc_get_wifi_central_chnl(btcoexist);
- break;
- case BTC_GET_U1_WIFI_HS_CHNL:
- *u8_tmp = 0;
- ret = false;
- break;
- case BTC_GET_U1_AP_NUM:
- *u8_tmp = rtlpriv->btcoexist.btc_info.ap_num;
- break;
- case BTC_GET_U1_ANT_TYPE:
- *u8_tmp = (u8)BTC_ANT_TYPE_0;
- break;
- case BTC_GET_U1_IOT_PEER:
- *u8_tmp = 0;
- break;
-
- /************* 1Ant **************/
- case BTC_GET_U1_LPS_MODE:
- *u8_tmp = btcoexist->pwr_mode_val[0];
- break;
-
- default:
- ret = false;
- break;
- }
-
- return ret;
-}
-
-static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
- bool *bool_tmp = (bool *)in_buf;
- u8 *u8_tmp = (u8 *)in_buf;
- u32 *u32_tmp = (u32 *)in_buf;
- bool ret = true;
-
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return false;
-
- switch (set_type) {
- /* set some bool type variables. */
- case BTC_SET_BL_BT_DISABLE:
- btcoexist->bt_info.bt_disabled = *bool_tmp;
- break;
- case BTC_SET_BL_BT_TRAFFIC_BUSY:
- btcoexist->bt_info.bt_busy = *bool_tmp;
- break;
- case BTC_SET_BL_BT_LIMITED_DIG:
- btcoexist->bt_info.limited_dig = *bool_tmp;
- break;
- case BTC_SET_BL_FORCE_TO_ROAM:
- btcoexist->bt_info.force_to_roam = *bool_tmp;
- break;
- case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
- btcoexist->bt_info.reject_agg_pkt = *bool_tmp;
- break;
- case BTC_SET_BL_BT_CTRL_AGG_SIZE:
- btcoexist->bt_info.bt_ctrl_agg_buf_size = *bool_tmp;
- break;
- case BTC_SET_BL_INC_SCAN_DEV_NUM:
- btcoexist->bt_info.increase_scan_dev_num = *bool_tmp;
- break;
- case BTC_SET_BL_BT_TX_RX_MASK:
- btcoexist->bt_info.bt_tx_rx_mask = *bool_tmp;
- break;
- case BTC_SET_BL_MIRACAST_PLUS_BT:
- btcoexist->bt_info.miracast_plus_bt = *bool_tmp;
- break;
- /* set some u1Byte type variables. */
- case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
- btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp;
- break;
- case BTC_SET_U1_AGG_BUF_SIZE:
- btcoexist->bt_info.agg_buf_size = *u8_tmp;
- break;
-
- /* the following are some action which will be triggered */
- case BTC_SET_ACT_GET_BT_RSSI:
- ret = false;
- break;
- case BTC_SET_ACT_AGGREGATE_CTRL:
- halbtc_aggregation_check(btcoexist);
- break;
-
- /* 1Ant */
- case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
- btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp;
- break;
- case BTC_SET_UI_SCAN_SIG_COMPENSATION:
- break;
- case BTC_SET_U1_LPS_VAL:
- btcoexist->bt_info.lps_val = *u8_tmp;
- break;
- case BTC_SET_U1_RPWM_VAL:
- btcoexist->bt_info.rpwm_val = *u8_tmp;
- break;
- /* the following are some action which will be triggered */
- case BTC_SET_ACT_LEAVE_LPS:
- halbtc_leave_lps(btcoexist);
- break;
- case BTC_SET_ACT_ENTER_LPS:
- halbtc_enter_lps(btcoexist);
- break;
- case BTC_SET_ACT_NORMAL_LPS:
- halbtc_normal_lps(btcoexist);
- break;
- case BTC_SET_ACT_DISABLE_LOW_POWER:
- halbtc_disable_low_power(btcoexist, *bool_tmp);
- break;
- case BTC_SET_ACT_UPDATE_RAMASK:
- btcoexist->bt_info.ra_mask = *u32_tmp;
- break;
- case BTC_SET_ACT_SEND_MIMO_PS:
- break;
- case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/
- break;
- case BTC_SET_ACT_CTRL_BT_COEX:
- break;
- case BTC_SET_ACT_CTRL_8723B_ANT:
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist,
- struct seq_file *m)
-{
-}
-
-static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist,
- struct seq_file *m)
-{
-}
-
-static void halbtc_display_wifi_status(struct btc_coexist *btcoexist,
- struct seq_file *m)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- s32 wifi_rssi = 0, bt_hs_rssi = 0;
- bool scan = false, link = false, roam = false, wifi_busy = false;
- bool wifi_under_b_mode = false, wifi_under_5g = false;
- u32 wifi_bw = BTC_WIFI_BW_HT20;
- u32 wifi_traffic_dir = BTC_WIFI_TRAFFIC_TX;
- u32 wifi_freq = BTC_FREQ_2_4G;
- u32 wifi_link_status = 0x0;
- bool bt_hs_on = false, under_ips = false, under_lps = false;
- bool low_power = false, dc_mode = false;
- u8 wifi_chnl = 0, wifi_hs_chnl = 0;
- u8 ap_num = 0;
-
- wifi_link_status = halbtc_get_wifi_link_status(btcoexist);
- seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d",
- "STA/vWifi/HS/p2pGo/p2pGc",
- ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_chnl);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
- seq_printf(m, "\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(High Speed)",
- wifi_chnl, wifi_hs_chnl, bt_hs_on);
-
- btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
- btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
- seq_printf(m, "\n %-35s = %d/ %d",
- "Wifi rssi/ HS rssi",
- wifi_rssi - 100, bt_hs_rssi - 100);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- seq_printf(m, "\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan",
- link, roam, scan);
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
- &wifi_traffic_dir);
- btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
- wifi_freq = (wifi_under_5g ? BTC_FREQ_5G : BTC_FREQ_2_4G);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
- &wifi_under_b_mode);
-
- seq_printf(m, "\n %-35s = %s / %s/ %s/ AP=%d ",
- "Wifi freq/ bw/ traffic",
- gl_btc_wifi_freq_string[wifi_freq],
- ((wifi_under_b_mode) ? "11b" :
- gl_btc_wifi_bw_string[wifi_bw]),
- ((!wifi_busy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX ==
- wifi_traffic_dir) ? "uplink" :
- "downlink")),
- ap_num);
-
- /* power status */
- dc_mode = true; /*TODO*/
- under_ips = rtlpriv->psc.inactive_pwrstate == ERFOFF ? 1 : 0;
- under_lps = rtlpriv->psc.dot11_psmode == EACTIVE ? 0 : 1;
- low_power = 0; /*TODO*/
- seq_printf(m, "\n %-35s = %s%s%s%s",
- "Power Status",
- (dc_mode ? "DC mode" : "AC mode"),
- (under_ips ? ", IPS ON" : ""),
- (under_lps ? ", LPS ON" : ""),
- (low_power ? ", 32k" : ""));
-
- seq_printf(m,
- "\n %-35s = %02x %02x %02x %02x %02x %02x (0x%x/0x%x)",
- "Power mode cmd(lps/rpwm)",
- btcoexist->pwr_mode_val[0], btcoexist->pwr_mode_val[1],
- btcoexist->pwr_mode_val[2], btcoexist->pwr_mode_val[3],
- btcoexist->pwr_mode_val[4], btcoexist->pwr_mode_val[5],
- btcoexist->bt_info.lps_val,
- btcoexist->bt_info.rpwm_val);
-}
-
-/************************************************************
- * IO related function
- ************************************************************/
-static u8 halbtc_read_1byte(void *bt_context, u32 reg_addr)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- return rtl_read_byte(rtlpriv, reg_addr);
-}
-
-static u16 halbtc_read_2byte(void *bt_context, u32 reg_addr)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- return rtl_read_word(rtlpriv, reg_addr);
-}
-
-static u32 halbtc_read_4byte(void *bt_context, u32 reg_addr)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- return rtl_read_dword(rtlpriv, reg_addr);
-}
-
-static void halbtc_write_1byte(void *bt_context, u32 reg_addr, u32 data)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- rtl_write_byte(rtlpriv, reg_addr, data);
-}
-
-static void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr,
- u32 bit_mask, u8 data)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 original_value, bit_shift = 0;
- u8 i;
-
- if (bit_mask != MASKDWORD) {/*if not "double word" write*/
- original_value = rtl_read_byte(rtlpriv, reg_addr);
- for (i = 0; i <= 7; i++) {
- if ((bit_mask >> i) & 0x1)
- break;
- }
- bit_shift = i;
- data = (original_value & (~bit_mask)) |
- ((data << bit_shift) & bit_mask);
- }
- rtl_write_byte(rtlpriv, reg_addr, data);
-}
-
-static void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- rtl_write_word(rtlpriv, reg_addr, data);
-}
-
-static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
-{
- struct btc_coexist *btcoexist =
- (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- rtl_write_dword(rtlpriv, reg_addr, data);
-}
-
-static void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr,
- u8 data)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (btcoexist->chip_interface == BTC_INTF_SDIO)
- ;
- else if (btcoexist->chip_interface == BTC_INTF_PCI)
- rtl_write_byte(rtlpriv, reg_addr, data);
- else if (btcoexist->chip_interface == BTC_INTF_USB)
- rtl_write_byte(rtlpriv, reg_addr, data);
-}
-
-static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask,
- u32 data)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
-}
-
-static u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
-}
-
-static void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
- u32 bit_mask, u32 data)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data);
-}
-
-static u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
- u32 bit_mask)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask);
-}
-
-static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
- u32 cmd_len, u8 *cmd_buf)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id,
- cmd_len, cmd_buf);
-}
-
-static void halbtc_send_wifi_port_id_cmd(void *bt_context)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 cmd_buf[1] = {0}; /* port id [2:0] = 0 */
-
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x71, 1,
- cmd_buf);
-}
-
-static void halbtc_set_default_port_id_cmd(void *bt_context)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct ieee80211_hw *hw = rtlpriv->mac80211.hw;
-
- if (!rtlpriv->cfg->ops->set_default_port_id_cmd)
- return;
-
- rtlpriv->cfg->ops->set_default_port_id_cmd(hw);
-}
-
-static
-void halbtc_set_bt_reg(void *btc_context, u8 reg_type, u32 offset, u32 set_val)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- u8 cmd_buffer1[4] = {0};
- u8 cmd_buffer2[4] = {0};
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- *((__le16 *)&cmd_buffer1[2]) = cpu_to_le16((u16)set_val);
- if (!halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_VALUE,
- cmd_buffer1, 4, 200))
- return;
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- cmd_buffer2[2] = reg_type;
- *((u8 *)&cmd_buffer2[3]) = (u8)offset;
- halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_ADDR,
- cmd_buffer2, 4, 200);
-}
-
-static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type,
- struct seq_file *m)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
-
- switch (disp_type) {
- case BTC_DBG_DISP_COEX_STATISTICS:
- halbtc_display_coex_statistics(btcoexist, m);
- break;
- case BTC_DBG_DISP_BT_LINK_INFO:
- halbtc_display_bt_link_info(btcoexist, m);
- break;
- case BTC_DBG_DISP_WIFI_STATUS:
- halbtc_display_wifi_status(btcoexist, m);
- break;
- default:
- break;
- }
-}
-
-static u32 halbtc_get_bt_reg(void *btc_context, u8 reg_type, u32 offset)
-{
- return 0;
-}
-
-static
-u32 halbtc_get_phydm_version(void *btc_context)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (rtlpriv->phydm.ops)
- return rtlpriv->phydm.ops->phydm_get_version(rtlpriv);
-
- return 0;
-}
-
-static
-void halbtc_phydm_modify_ra_pcr_threshold(void *btc_context,
- u8 ra_offset_direction,
- u8 ra_threshold_offset)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_phydm_ops *phydm_ops = rtlpriv->phydm.ops;
-
- if (phydm_ops)
- phydm_ops->phydm_modify_ra_pcr_threshold(rtlpriv,
- ra_offset_direction,
- ra_threshold_offset);
-}
-
-static
-u32 halbtc_phydm_query_phy_counter(void *btc_context, const char *info_type)
-{
- /* info_type may be strings below:
- * PHYDM_INFO_FA_OFDM, PHYDM_INFO_FA_CCK, PHYDM_INFO_CCA_OFDM,
- * PHYDM_INFO_CCA_CCK
- * IQK_TOTAL, IQK_OK, IQK_FAIL
- */
-
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_phydm_ops *phydm_ops = rtlpriv->phydm.ops;
-
- if (phydm_ops)
- return phydm_ops->phydm_query_counter(rtlpriv, info_type);
-
- return 0;
-}
-
-static u8 halbtc_get_ant_det_val_from_bt(void *btc_context)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- u8 cmd_buffer[4] = {0};
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_ANT_DET_VAL,
- cmd_buffer, 4, 200);
-
- /* need wait completion to return correct value */
-
- return btcoexist->bt_info.bt_ant_det_val;
-}
-
-static u8 halbtc_get_ble_scan_type_from_bt(void *btc_context)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- u8 cmd_buffer[4] = {0};
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_TYPE,
- cmd_buffer, 4, 200);
-
- /* need wait completion to return correct value */
-
- return btcoexist->bt_info.bt_ble_scan_type;
-}
-
-static u32 halbtc_get_ble_scan_para_from_bt(void *btc_context, u8 scan_type)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- u8 cmd_buffer[4] = {0};
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_PARA,
- cmd_buffer, 4, 200);
-
- /* need wait completion to return correct value */
-
- return btcoexist->bt_info.bt_ble_scan_para;
-}
-
-static bool halbtc_get_bt_afh_map_from_bt(void *btc_context, u8 map_type,
- u8 *afh_map)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- u8 cmd_buffer[2] = {0};
- bool ret;
- u32 *afh_map_l = (u32 *)afh_map;
- u32 *afh_map_m = (u32 *)(afh_map + 4);
- u16 *afh_map_h = (u16 *)(afh_map + 8);
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_L,
- cmd_buffer, 2, 200);
- if (!ret)
- goto exit;
-
- *afh_map_l = btcoexist->bt_info.afh_map_l;
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_M,
- cmd_buffer, 2, 200);
- if (!ret)
- goto exit;
-
- *afh_map_m = btcoexist->bt_info.afh_map_m;
-
- /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
- ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_H,
- cmd_buffer, 2, 200);
- if (!ret)
- goto exit;
-
- *afh_map_h = btcoexist->bt_info.afh_map_h;
-
-exit:
- return ret;
-}
-
-/*****************************************************************
- * Extern functions called by other module
- *****************************************************************/
-bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return false;
-
- halbtc_dbg_init();
-
- btcoexist->btc_read_1byte = halbtc_read_1byte;
- btcoexist->btc_write_1byte = halbtc_write_1byte;
- btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte;
- btcoexist->btc_read_2byte = halbtc_read_2byte;
- btcoexist->btc_write_2byte = halbtc_write_2byte;
- btcoexist->btc_read_4byte = halbtc_read_4byte;
- btcoexist->btc_write_4byte = halbtc_write_4byte;
- btcoexist->btc_write_local_reg_1byte = halbtc_write_local_reg_1byte;
-
- btcoexist->btc_set_bb_reg = halbtc_set_bbreg;
- btcoexist->btc_get_bb_reg = halbtc_get_bbreg;
-
- btcoexist->btc_set_rf_reg = halbtc_set_rfreg;
- btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
-
- btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
- btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
-
- btcoexist->btc_get = halbtc_get;
- btcoexist->btc_set = halbtc_set;
- btcoexist->btc_set_bt_reg = halbtc_set_bt_reg;
- btcoexist->btc_get_bt_reg = halbtc_get_bt_reg;
-
- btcoexist->bt_info.bt_ctrl_buf_size = false;
- btcoexist->bt_info.agg_buf_size = 5;
-
- btcoexist->bt_info.increase_scan_dev_num = false;
-
- btcoexist->btc_get_bt_coex_supported_feature =
- halbtc_get_bt_coex_supported_feature;
- btcoexist->btc_get_bt_coex_supported_version =
- halbtc_get_bt_coex_supported_version;
- btcoexist->btc_get_bt_phydm_version = halbtc_get_phydm_version;
- btcoexist->btc_phydm_modify_ra_pcr_threshold =
- halbtc_phydm_modify_ra_pcr_threshold;
- btcoexist->btc_phydm_query_phy_counter = halbtc_phydm_query_phy_counter;
- btcoexist->btc_get_ant_det_val_from_bt = halbtc_get_ant_det_val_from_bt;
- btcoexist->btc_get_ble_scan_type_from_bt =
- halbtc_get_ble_scan_type_from_bt;
- btcoexist->btc_get_ble_scan_para_from_bt =
- halbtc_get_ble_scan_para_from_bt;
- btcoexist->btc_get_bt_afh_map_from_bt =
- halbtc_get_bt_afh_map_from_bt;
-
- init_completion(&btcoexist->bt_mp_comp);
-
- return true;
-}
-
-bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv)
-{
- struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
- struct wifi_only_haldata *wifionly_haldata;
-
- if (!wifionly_cfg)
- return false;
-
- wifionly_cfg->adapter = rtlpriv;
-
- switch (rtlpriv->rtlhal.interface) {
- case INTF_PCI:
- wifionly_cfg->chip_interface = WIFIONLY_INTF_PCI;
- break;
- case INTF_USB:
- wifionly_cfg->chip_interface = WIFIONLY_INTF_USB;
- break;
- default:
- wifionly_cfg->chip_interface = WIFIONLY_INTF_UNKNOWN;
- break;
- }
-
- wifionly_haldata = &wifionly_cfg->haldata_info;
-
- wifionly_haldata->customer_id = CUSTOMER_NORMAL;
- wifionly_haldata->efuse_pg_antnum = rtl_get_hwpg_ant_num(rtlpriv);
- wifionly_haldata->efuse_pg_antpath =
- rtl_get_hwpg_single_ant_path(rtlpriv);
- wifionly_haldata->rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
- wifionly_haldata->ant_div_cfg = 0;
-
- return true;
-}
-
-bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
-{
- struct rtl_priv *rtlpriv = adapter;
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
- u8 ant_num = 2, chip_type, single_ant_path = 0;
-
- if (!btcoexist)
- return false;
-
- if (btcoexist->binded)
- return false;
-
- switch (rtlpriv->rtlhal.interface) {
- case INTF_PCI:
- btcoexist->chip_interface = BTC_INTF_PCI;
- break;
- case INTF_USB:
- btcoexist->chip_interface = BTC_INTF_USB;
- break;
- default:
- btcoexist->chip_interface = BTC_INTF_UNKNOWN;
- break;
- }
-
- btcoexist->binded = true;
- btcoexist->statistics.cnt_bind++;
-
- btcoexist->adapter = adapter;
-
- btcoexist->stack_info.profile_notified = false;
-
- btcoexist->bt_info.bt_ctrl_agg_buf_size = false;
- btcoexist->bt_info.agg_buf_size = 5;
-
- btcoexist->bt_info.increase_scan_dev_num = false;
- btcoexist->bt_info.miracast_plus_bt = false;
-
- chip_type = rtl_get_hwpg_bt_type(rtlpriv);
- exhalbtc_set_chip_type(btcoexist, chip_type);
- ant_num = rtl_get_hwpg_ant_num(rtlpriv);
- exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
-
- /* set default antenna position to main port */
- btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
-
- single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv);
- exhalbtc_set_single_ant_path(btcoexist, single_ant_path);
-
- if (rtl_get_hwpg_package_type(rtlpriv) == 0)
- btcoexist->board_info.tfbga_package = false;
- else if (rtl_get_hwpg_package_type(rtlpriv) == 1)
- btcoexist->board_info.tfbga_package = false;
- else
- btcoexist->board_info.tfbga_package = true;
-
- if (btcoexist->board_info.tfbga_package)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Package Type = TFBGA\n");
- else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Package Type = Non-TFBGA\n");
-
- btcoexist->board_info.rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
- btcoexist->board_info.ant_div_cfg = 0;
-
- return true;
-}
-
-void exhalbtc_power_on_setting(struct btc_coexist *btcoexist)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- btcoexist->statistics.cnt_power_on++;
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_power_on_setting(btcoexist);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_power_on_setting(btcoexist);
- }
-}
-
-void exhalbtc_pre_load_firmware(struct btc_coexist *btcoexist)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- btcoexist->statistics.cnt_pre_load_firmware++;
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_pre_load_firmware(btcoexist);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_pre_load_firmware(btcoexist);
- }
-}
-
-void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- btcoexist->statistics.cnt_init_hw_config++;
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_init_hw_config(btcoexist, wifi_only);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_init_hw_config(btcoexist, wifi_only);
-
- halbtc_set_default_port_id_cmd(btcoexist);
- halbtc_send_wifi_port_id_cmd(btcoexist);
- }
-}
-
-void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg)
-{
- if (IS_HARDWARE_TYPE_8822B(wifionly_cfg->adapter))
- ex_hal8822b_wifi_only_hw_config(wifionly_cfg);
-}
-
-void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- btcoexist->statistics.cnt_init_coex_dm++;
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_init_coex_dm(btcoexist);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_init_coex_dm(btcoexist);
- }
-
- btcoexist->initilized = true;
-}
-
-void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
-{
- u8 ips_type;
-
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_ips_notify++;
- if (btcoexist->manual_control)
- return;
-
- if (type == ERFOFF)
- ips_type = BTC_IPS_ENTER;
- else
- ips_type = BTC_IPS_LEAVE;
-
- halbtc_leave_low_power(btcoexist);
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_ips_notify(btcoexist, ips_type);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_ips_notify(btcoexist, ips_type);
- }
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
-{
- u8 lps_type;
-
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_lps_notify++;
- if (btcoexist->manual_control)
- return;
-
- if (type == EACTIVE)
- lps_type = BTC_LPS_DISABLE;
- else
- lps_type = BTC_LPS_ENABLE;
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_lps_notify(btcoexist, lps_type);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_lps_notify(btcoexist, lps_type);
- }
-}
-
-void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
-{
- u8 scan_type;
-
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_scan_notify++;
- if (btcoexist->manual_control)
- return;
-
- if (type)
- scan_type = BTC_SCAN_START;
- else
- scan_type = BTC_SCAN_FINISH;
-
- halbtc_leave_low_power(btcoexist);
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_scan_notify(btcoexist, scan_type);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_scan_notify(btcoexist, scan_type);
- }
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
- u8 is_5g)
-{
- if (IS_HARDWARE_TYPE_8822B(wifionly_cfg->adapter))
- ex_hal8822b_wifi_only_scannotify(wifionly_cfg, is_5g);
-}
-
-void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
-{
- u8 asso_type;
-
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_connect_notify++;
- if (btcoexist->manual_control)
- return;
-
- if (action)
- asso_type = BTC_ASSOCIATE_START;
- else
- asso_type = BTC_ASSOCIATE_FINISH;
-
- halbtc_leave_low_power(btcoexist);
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_connect_notify(btcoexist, asso_type);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_connect_notify(btcoexist, asso_type);
- }
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
- enum rt_media_status media_status)
-{
- u8 status;
-
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_media_status_notify++;
- if (btcoexist->manual_control)
- return;
-
- if (media_status == RT_MEDIA_CONNECT)
- status = BTC_MEDIA_CONNECT;
- else
- status = BTC_MEDIA_DISCONNECT;
-
- halbtc_leave_low_power(btcoexist);
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_media_status_notify(btcoexist, status);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_media_status_notify(btcoexist, status);
- }
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
-{
- u8 packet_type;
-
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_special_packet_notify++;
- if (btcoexist->manual_control)
- return;
-
- if (pkt_type == PACKET_DHCP) {
- packet_type = BTC_PACKET_DHCP;
- } else if (pkt_type == PACKET_EAPOL) {
- packet_type = BTC_PACKET_EAPOL;
- } else if (pkt_type == PACKET_ARP) {
- packet_type = BTC_PACKET_ARP;
- } else {
- packet_type = BTC_PACKET_UNKNOWN;
- return;
- }
-
- halbtc_leave_low_power(btcoexist);
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_specific_packet_notify(btcoexist,
- packet_type);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_specific_packet_notify(btcoexist,
- packet_type);
- }
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist,
- u8 *tmp_buf, u8 length)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_bt_info_notify++;
-
- halbtc_leave_low_power(btcoexist);
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_bt_info_notify(btcoexist, tmp_buf,
- length);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_bt_info_notify(btcoexist, tmp_buf,
- length);
- }
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_rf_status_notify(btcoexist, type);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_rf_status_notify(btcoexist, type);
- }
-}
-
-void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_stack_operation_notify++;
-}
-
-void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_halt_notify(btcoexist);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_halt_notify(btcoexist);
- }
-
- btcoexist->binded = false;
-}
-
-void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- /* currently only 1ant we have to do the notification,
- * once pnp is notified to sleep state, we have to leave LPS that
- * we can sleep normally.
- */
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_pnp_notify(btcoexist, pnp_state);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_pnp_notify(btcoexist, pnp_state);
- }
-}
-
-void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_coex_dm_switch++;
-
- halbtc_leave_low_power(btcoexist);
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_periodical(struct btc_coexist *btcoexist)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_periodical++;
-
- halbtc_leave_low_power(btcoexist);
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_periodical(btcoexist);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_periodical(btcoexist);
- }
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
- u8 code, u8 len, u8 *data)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_dbg_ctrl++;
-
- halbtc_leave_low_power(btcoexist);
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_antenna_detection(struct btc_coexist *btcoexist, u32 cent_freq,
- u32 offset, u32 span, u32 seconds)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-}
-
-void exhalbtc_stack_update_profile_info(void)
-{
-}
-
-void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- btcoexist->stack_info.min_bt_rssi = bt_rssi;
-}
-
-void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- btcoexist->stack_info.hci_version = hci_version;
-}
-
-void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist,
- u16 bt_hci_version, u16 bt_patch_version)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- btcoexist->bt_info.bt_real_fw_ver = bt_patch_version;
- btcoexist->bt_info.bt_hci_ver = bt_hci_version;
-}
-
-void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type)
-{
- switch (chip_type) {
- default:
- case BT_2WIRE:
- case BT_ISSC_3WIRE:
- case BT_ACCEL:
- case BT_RTL8756:
- btcoexist->board_info.bt_chip_type = BTC_CHIP_UNDEF;
- break;
- case BT_CSR_BC4:
- btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
- break;
- case BT_CSR_BC8:
- btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
- break;
- case BT_RTL8723A:
- btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723A;
- break;
- case BT_RTL8821A:
- btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8821;
- break;
- case BT_RTL8723B:
- btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723B;
- break;
- }
-}
-
-void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- if (type == BT_COEX_ANT_TYPE_PG) {
- btcoexist->board_info.pg_ant_num = ant_num;
- btcoexist->board_info.btdm_ant_num = ant_num;
- } else if (type == BT_COEX_ANT_TYPE_ANTDIV) {
- btcoexist->board_info.btdm_ant_num = ant_num;
- } else if (type == BT_COEX_ANT_TYPE_DETECTED) {
- btcoexist->board_info.btdm_ant_num = ant_num;
- if (rtlpriv->cfg->mod_params->ant_sel == 1)
- btcoexist->board_info.btdm_ant_pos =
- BTC_ANTENNA_AT_AUX_PORT;
- else
- btcoexist->board_info.btdm_ant_pos =
- BTC_ANTENNA_AT_MAIN_PORT;
- }
-}
-
-/* Currently used by 8723b only, S0 or S1 */
-void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist,
- u8 single_ant_path)
-{
- btcoexist->board_info.single_ant_path = single_ant_path;
-}
-
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
- struct seq_file *m)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- halbtc_leave_low_power(btcoexist);
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_display_coex_info(btcoexist, m);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_display_coex_info(btcoexist, m);
- }
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type)
-{
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
-
- if (btcoexist->manual_control)
- return;
-
- halbtc_leave_low_power(btcoexist);
-
- if (IS_HARDWARE_TYPE_8822B(btcoexist->adapter)) {
- if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8822b1ant_switchband_notify(btcoexist, type);
- else if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8822b2ant_switchband_notify(btcoexist, type);
- }
-
- halbtc_normal_low_power(btcoexist);
-}
-
-void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
- u8 is_5g)
-{
- if (IS_HARDWARE_TYPE_8822B(wifionly_cfg->adapter))
- ex_hal8822b_wifi_only_switchbandnotify(wifionly_cfg, is_5g);
-}
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h
deleted file mode 100644
index fd65de2ac8b5..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h
+++ /dev/null
@@ -1,791 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __HALBTC_OUT_SRC_H__
-#define __HALBTC_OUT_SRC_H__
-
-#include "../wifi.h"
-
-#define BTC_COEX_OFFLOAD 0
-
-#define NORMAL_EXEC false
-#define FORCE_EXEC true
-
-#define BTC_RF_OFF 0x0
-#define BTC_RF_ON 0x1
-
-#define BTC_RF_A RF90_PATH_A
-#define BTC_RF_B RF90_PATH_B
-#define BTC_RF_C RF90_PATH_C
-#define BTC_RF_D RF90_PATH_D
-
-#define BTC_SMSP SINGLEMAC_SINGLEPHY
-#define BTC_DMDP DUALMAC_DUALPHY
-#define BTC_DMSP DUALMAC_SINGLEPHY
-#define BTC_MP_UNKNOWN 0xff
-
-#define IN
-#define OUT
-
-#define BT_TMP_BUF_SIZE 100
-
-#define BT_COEX_ANT_TYPE_PG 0
-#define BT_COEX_ANT_TYPE_ANTDIV 1
-#define BT_COEX_ANT_TYPE_DETECTED 2
-
-#define BTC_MIMO_PS_STATIC 0
-#define BTC_MIMO_PS_DYNAMIC 1
-
-#define BTC_RATE_DISABLE 0
-#define BTC_RATE_ENABLE 1
-
-/* single Antenna definition */
-#define BTC_ANT_PATH_WIFI 0
-#define BTC_ANT_PATH_BT 1
-#define BTC_ANT_PATH_PTA 2
-#define BTC_ANT_PATH_WIFI5G 3
-#define BTC_ANT_PATH_AUTO 4
-/* dual Antenna definition */
-#define BTC_ANT_WIFI_AT_MAIN 0
-#define BTC_ANT_WIFI_AT_AUX 1
-#define BTC_ANT_WIFI_AT_DIVERSITY 2
-/* coupler Antenna definition */
-#define BTC_ANT_WIFI_AT_CPL_MAIN 0
-#define BTC_ANT_WIFI_AT_CPL_AUX 1
-
-enum btc_bt_reg_type {
- BTC_BT_REG_RF = 0,
- BTC_BT_REG_MODEM = 1,
- BTC_BT_REG_BLUEWIZE = 2,
- BTC_BT_REG_VENDOR = 3,
- BTC_BT_REG_LE = 4,
- BTC_BT_REG_MAX
-};
-
-enum btc_chip_interface {
- BTC_INTF_UNKNOWN = 0,
- BTC_INTF_PCI = 1,
- BTC_INTF_USB = 2,
- BTC_INTF_SDIO = 3,
- BTC_INTF_GSPI = 4,
- BTC_INTF_MAX
-};
-
-enum btc_chip_type {
- BTC_CHIP_UNDEF = 0,
- BTC_CHIP_CSR_BC4 = 1,
- BTC_CHIP_CSR_BC8 = 2,
- BTC_CHIP_RTL8723A = 3,
- BTC_CHIP_RTL8821 = 4,
- BTC_CHIP_RTL8723B = 5,
- BTC_CHIP_MAX
-};
-
-enum btc_msg_type {
- BTC_MSG_INTERFACE = 0x0,
- BTC_MSG_ALGORITHM = 0x1,
- BTC_MSG_MAX
-};
-
-/* following is for BTC_MSG_INTERFACE */
-#define INTF_INIT BIT0
-#define INTF_NOTIFY BIT2
-
-/* following is for BTC_ALGORITHM */
-#define ALGO_BT_RSSI_STATE BIT0
-#define ALGO_WIFI_RSSI_STATE BIT1
-#define ALGO_BT_MONITOR BIT2
-#define ALGO_TRACE BIT3
-#define ALGO_TRACE_FW BIT4
-#define ALGO_TRACE_FW_DETAIL BIT5
-#define ALGO_TRACE_FW_EXEC BIT6
-#define ALGO_TRACE_SW BIT7
-#define ALGO_TRACE_SW_DETAIL BIT8
-#define ALGO_TRACE_SW_EXEC BIT9
-
-/* following is for wifi link status */
-#define WIFI_STA_CONNECTED BIT0
-#define WIFI_AP_CONNECTED BIT1
-#define WIFI_HS_CONNECTED BIT2
-#define WIFI_P2P_GO_CONNECTED BIT3
-#define WIFI_P2P_GC_CONNECTED BIT4
-
-#define BTC_RSSI_HIGH(_rssi_) \
- ((_rssi_ == BTC_RSSI_STATE_HIGH || \
- _rssi_ == BTC_RSSI_STATE_STAY_HIGH) ? true : false)
-#define BTC_RSSI_MEDIUM(_rssi_) \
- ((_rssi_ == BTC_RSSI_STATE_MEDIUM || \
- _rssi_ == BTC_RSSI_STATE_STAY_MEDIUM) ? true : false)
-#define BTC_RSSI_LOW(_rssi_) \
- ((_rssi_ == BTC_RSSI_STATE_LOW || \
- _rssi_ == BTC_RSSI_STATE_STAY_LOW) ? true : false)
-
-enum btc_power_save_type {
- BTC_PS_WIFI_NATIVE = 0,
- BTC_PS_LPS_ON = 1,
- BTC_PS_LPS_OFF = 2,
- BTC_PS_LPS_MAX
-};
-
-struct btc_board_info {
- /* The following is some board information */
- u8 bt_chip_type;
- u8 pg_ant_num; /* pg ant number */
- u8 btdm_ant_num; /* ant number for btdm */
- u8 btdm_ant_num_by_ant_det;
- u8 btdm_ant_pos;
- u8 single_ant_path; /* current used for 8723b only, 1=>s0, 0=>s1 */
- bool tfbga_package;
- bool btdm_ant_det_finish;
-
- u8 rfe_type;
- u8 ant_div_cfg;
-};
-
-enum btc_dbg_opcode {
- BTC_DBG_SET_COEX_NORMAL = 0x0,
- BTC_DBG_SET_COEX_WIFI_ONLY = 0x1,
- BTC_DBG_SET_COEX_BT_ONLY = 0x2,
- BTC_DBG_MAX
-};
-
-enum btc_rssi_state {
- BTC_RSSI_STATE_HIGH = 0x0,
- BTC_RSSI_STATE_MEDIUM = 0x1,
- BTC_RSSI_STATE_LOW = 0x2,
- BTC_RSSI_STATE_STAY_HIGH = 0x3,
- BTC_RSSI_STATE_STAY_MEDIUM = 0x4,
- BTC_RSSI_STATE_STAY_LOW = 0x5,
- BTC_RSSI_MAX
-};
-
-enum btc_wifi_role {
- BTC_ROLE_STATION = 0x0,
- BTC_ROLE_AP = 0x1,
- BTC_ROLE_IBSS = 0x2,
- BTC_ROLE_HS_MODE = 0x3,
- BTC_ROLE_MAX
-};
-
-enum btc_wireless_freq {
- BTC_FREQ_2_4G = 0x0,
- BTC_FREQ_5G = 0x1,
- BTC_FREQ_MAX
-};
-
-enum btc_wifi_bw_mode {
- BTC_WIFI_BW_LEGACY = 0x0,
- BTC_WIFI_BW_HT20 = 0x1,
- BTC_WIFI_BW_HT40 = 0x2,
- BTC_WIFI_BW_HT80 = 0x3,
- BTC_WIFI_BW_MAX
-};
-
-enum btc_wifi_traffic_dir {
- BTC_WIFI_TRAFFIC_TX = 0x0,
- BTC_WIFI_TRAFFIC_RX = 0x1,
- BTC_WIFI_TRAFFIC_MAX
-};
-
-enum btc_wifi_pnp {
- BTC_WIFI_PNP_WAKE_UP = 0x0,
- BTC_WIFI_PNP_SLEEP = 0x1,
- BTC_WIFI_PNP_SLEEP_KEEP_ANT = 0x2,
- BTC_WIFI_PNP_MAX
-};
-
-enum btc_iot_peer {
- BTC_IOT_PEER_UNKNOWN = 0,
- BTC_IOT_PEER_REALTEK = 1,
- BTC_IOT_PEER_REALTEK_92SE = 2,
- BTC_IOT_PEER_BROADCOM = 3,
- BTC_IOT_PEER_RALINK = 4,
- BTC_IOT_PEER_ATHEROS = 5,
- BTC_IOT_PEER_CISCO = 6,
- BTC_IOT_PEER_MERU = 7,
- BTC_IOT_PEER_MARVELL = 8,
- BTC_IOT_PEER_REALTEK_SOFTAP = 9,
- BTC_IOT_PEER_SELF_SOFTAP = 10, /* Self is SoftAP */
- BTC_IOT_PEER_AIRGO = 11,
- BTC_IOT_PEER_REALTEK_JAGUAR_BCUTAP = 12,
- BTC_IOT_PEER_REALTEK_JAGUAR_CCUTAP = 13,
- BTC_IOT_PEER_MAX,
-};
-
-/* for 8723b-d cut large current issue */
-enum bt_wifi_coex_state {
- BTC_WIFI_STAT_INIT,
- BTC_WIFI_STAT_IQK,
- BTC_WIFI_STAT_NORMAL_OFF,
- BTC_WIFI_STAT_MP_OFF,
- BTC_WIFI_STAT_NORMAL,
- BTC_WIFI_STAT_ANT_DIV,
- BTC_WIFI_STAT_MAX
-};
-
-enum bt_ant_type {
- BTC_ANT_TYPE_0,
- BTC_ANT_TYPE_1,
- BTC_ANT_TYPE_2,
- BTC_ANT_TYPE_3,
- BTC_ANT_TYPE_4,
- BTC_ANT_TYPE_MAX
-};
-
-enum btc_get_type {
- /* type bool */
- BTC_GET_BL_HS_OPERATION,
- BTC_GET_BL_HS_CONNECTING,
- BTC_GET_BL_WIFI_CONNECTED,
- BTC_GET_BL_WIFI_BUSY,
- BTC_GET_BL_WIFI_SCAN,
- BTC_GET_BL_WIFI_LINK,
- BTC_GET_BL_WIFI_DHCP,
- BTC_GET_BL_WIFI_SOFTAP_IDLE,
- BTC_GET_BL_WIFI_SOFTAP_LINKING,
- BTC_GET_BL_WIFI_IN_EARLY_SUSPEND,
- BTC_GET_BL_WIFI_ROAM,
- BTC_GET_BL_WIFI_4_WAY_PROGRESS,
- BTC_GET_BL_WIFI_UNDER_5G,
- BTC_GET_BL_WIFI_AP_MODE_ENABLE,
- BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
- BTC_GET_BL_WIFI_UNDER_B_MODE,
- BTC_GET_BL_EXT_SWITCH,
- BTC_GET_BL_WIFI_IS_IN_MP_MODE,
- BTC_GET_BL_IS_ASUS_8723B,
- BTC_GET_BL_FW_READY,
- BTC_GET_BL_RF4CE_CONNECTED,
-
- /* type s4Byte */
- BTC_GET_S4_WIFI_RSSI,
- BTC_GET_S4_HS_RSSI,
-
- /* type u32 */
- BTC_GET_U4_WIFI_BW,
- BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
- BTC_GET_U4_WIFI_FW_VER,
- BTC_GET_U4_WIFI_LINK_STATUS,
- BTC_GET_U4_BT_PATCH_VER,
- BTC_GET_U4_VENDOR,
- BTC_GET_U4_SUPPORTED_VERSION,
- BTC_GET_U4_SUPPORTED_FEATURE,
- BTC_GET_U4_WIFI_IQK_TOTAL,
- BTC_GET_U4_WIFI_IQK_OK,
- BTC_GET_U4_WIFI_IQK_FAIL,
-
- /* type u1Byte */
- BTC_GET_U1_WIFI_DOT11_CHNL,
- BTC_GET_U1_WIFI_CENTRAL_CHNL,
- BTC_GET_U1_WIFI_HS_CHNL,
- BTC_GET_U1_MAC_PHY_MODE,
- BTC_GET_U1_AP_NUM,
- BTC_GET_U1_ANT_TYPE,
- BTC_GET_U1_IOT_PEER,
-
- /* for 1Ant */
- BTC_GET_U1_LPS_MODE,
- BTC_GET_BL_BT_SCO_BUSY,
-
- /* for test mode */
- BTC_GET_DRIVER_TEST_CFG,
- BTC_GET_MAX
-};
-
-enum btc_vendor {
- BTC_VENDOR_LENOVO,
- BTC_VENDOR_ASUS,
- BTC_VENDOR_OTHER
-};
-
-enum btc_set_type {
- /* type bool */
- BTC_SET_BL_BT_DISABLE,
- BTC_SET_BL_BT_ENABLE_DISABLE_CHANGE,
- BTC_SET_BL_BT_TRAFFIC_BUSY,
- BTC_SET_BL_BT_LIMITED_DIG,
- BTC_SET_BL_FORCE_TO_ROAM,
- BTC_SET_BL_TO_REJ_AP_AGG_PKT,
- BTC_SET_BL_BT_CTRL_AGG_SIZE,
- BTC_SET_BL_INC_SCAN_DEV_NUM,
- BTC_SET_BL_BT_TX_RX_MASK,
- BTC_SET_BL_MIRACAST_PLUS_BT,
-
- /* type u1Byte */
- BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
- BTC_SET_UI_SCAN_SIG_COMPENSATION,
- BTC_SET_U1_AGG_BUF_SIZE,
-
- /* type trigger some action */
- BTC_SET_ACT_GET_BT_RSSI,
- BTC_SET_ACT_AGGREGATE_CTRL,
- BTC_SET_ACT_ANTPOSREGRISTRY_CTRL,
-
- /********* for 1Ant **********/
- /* type bool */
- BTC_SET_BL_BT_SCO_BUSY,
- /* type u1Byte */
- BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
- BTC_SET_U1_LPS_VAL,
- BTC_SET_U1_RPWM_VAL,
- BTC_SET_U1_1ANT_LPS,
- BTC_SET_U1_1ANT_RPWM,
- /* type trigger some action */
- BTC_SET_ACT_LEAVE_LPS,
- BTC_SET_ACT_ENTER_LPS,
- BTC_SET_ACT_NORMAL_LPS,
- BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
- BTC_SET_ACT_DISABLE_LOW_POWER,
- BTC_SET_ACT_UPDATE_RAMASK,
- BTC_SET_ACT_SEND_MIMO_PS,
- /* BT Coex related */
- BTC_SET_ACT_CTRL_BT_INFO,
- BTC_SET_ACT_CTRL_BT_COEX,
- BTC_SET_ACT_CTRL_8723B_ANT,
- /***************************/
- BTC_SET_MAX
-};
-
-enum btc_dbg_disp_type {
- BTC_DBG_DISP_COEX_STATISTICS = 0x0,
- BTC_DBG_DISP_BT_LINK_INFO = 0x1,
- BTC_DBG_DISP_BT_FW_VER = 0x2,
- BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
- BTC_DBG_DISP_WIFI_STATUS = 0x04,
- BTC_DBG_DISP_MAX
-};
-
-enum btc_notify_type_ips {
- BTC_IPS_LEAVE = 0x0,
- BTC_IPS_ENTER = 0x1,
- BTC_IPS_MAX
-};
-
-enum btc_notify_type_lps {
- BTC_LPS_DISABLE = 0x0,
- BTC_LPS_ENABLE = 0x1,
- BTC_LPS_MAX
-};
-
-enum btc_notify_type_scan {
- BTC_SCAN_FINISH = 0x0,
- BTC_SCAN_START = 0x1,
- BTC_SCAN_START_2G = 0x2,
- BTC_SCAN_MAX
-};
-
-enum btc_notify_type_switchband {
- BTC_NOT_SWITCH = 0x0,
- BTC_SWITCH_TO_24G = 0x1,
- BTC_SWITCH_TO_5G = 0x2,
- BTC_SWITCH_TO_24G_NOFORSCAN = 0x3,
- BTC_SWITCH_MAX
-};
-
-enum btc_notify_type_associate {
- BTC_ASSOCIATE_FINISH = 0x0,
- BTC_ASSOCIATE_START = 0x1,
- BTC_ASSOCIATE_5G_FINISH = 0x2,
- BTC_ASSOCIATE_5G_START = 0x3,
- BTC_ASSOCIATE_MAX
-};
-
-enum btc_notify_type_media_status {
- BTC_MEDIA_DISCONNECT = 0x0,
- BTC_MEDIA_CONNECT = 0x1,
- BTC_MEDIA_MAX
-};
-
-enum btc_notify_type_special_packet {
- BTC_PACKET_UNKNOWN = 0x0,
- BTC_PACKET_DHCP = 0x1,
- BTC_PACKET_ARP = 0x2,
- BTC_PACKET_EAPOL = 0x3,
- BTC_PACKET_MAX
-};
-
-enum hci_ext_bt_operation {
- HCI_BT_OP_NONE = 0x0,
- HCI_BT_OP_INQUIRY_START = 0x1,
- HCI_BT_OP_INQUIRY_FINISH = 0x2,
- HCI_BT_OP_PAGING_START = 0x3,
- HCI_BT_OP_PAGING_SUCCESS = 0x4,
- HCI_BT_OP_PAGING_UNSUCCESS = 0x5,
- HCI_BT_OP_PAIRING_START = 0x6,
- HCI_BT_OP_PAIRING_FINISH = 0x7,
- HCI_BT_OP_BT_DEV_ENABLE = 0x8,
- HCI_BT_OP_BT_DEV_DISABLE = 0x9,
- HCI_BT_OP_MAX
-};
-
-enum btc_notify_type_stack_operation {
- BTC_STACK_OP_NONE = 0x0,
- BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1,
- BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2,
- BTC_STACK_OP_MAX
-};
-
-typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
-
-typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
-
-typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
-
-typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u32 data);
-
-typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
- u32 bit_mask, u8 data1b);
-
-typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
-
-typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
-
-typedef void (*bfp_btc_local_reg_w1)(void *btc_context, u32 reg_addr, u8 data);
-typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
- u8 bit_mask, u8 data);
-
-typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
- u32 bit_mask, u32 data);
-
-typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
- u32 bit_mask);
-
-typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
- u32 bit_mask, u32 data);
-
-typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
- u32 reg_addr, u32 bit_mask);
-
-typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
- u32 cmd_len, u8 *cmd_buffer);
-
-typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
-
-typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
-
-typedef u32 (*bfp_btc_get_bt_coex_supported_feature)(void *btcoexist);
-
-typedef u32 (*bfp_btc_get_bt_coex_supported_version)(void *btcoexist);
-
-typedef u32 (*bfp_btc_get_bt_phydm_version)(void *btcoexist);
-
-typedef void (*bfp_btc_phydm_modify_ra_pcr_threshold)(void *btcoexist,
- u8 ra_offset_direction,
- u8 ra_threshold_offset);
-
-typedef u32 (*bfp_btc_phydm_query_phy_counter)(void *btcoexist,
- const char *info_type);
-
-typedef u8 (*bfp_btc_get_ant_det_val_from_bt)(void *btcoexist);
-
-typedef u8 (*bfp_btc_get_ble_scan_type_from_bt)(void *btcoexist);
-
-typedef u32 (*bfp_btc_get_ble_scan_para_from_bt)(void *btcoexist, u8 scan_type);
-
-typedef bool (*bfp_btc_get_bt_afh_map_from_bt)(void *btcoexist, u8 map_type,
- u8 *afh_map);
-
-typedef void (*bfp_btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
- u32 value);
-typedef u32 (*bfp_btc_get_bt_reg)(void *btc_context, u8 reg_type, u32 offset);
-
-typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type,
- struct seq_file *m);
-
-struct btc_bt_info {
- bool bt_disabled;
- u8 rssi_adjust_for_agc_table_on;
- u8 rssi_adjust_for_1ant_coex_type;
- bool pre_bt_ctrl_agg_buf_size;
- bool bt_busy;
- u8 pre_agg_buf_size;
- u8 agg_buf_size;
- bool limited_dig;
- bool pre_reject_agg_pkt;
- bool reject_agg_pkt;
- bool bt_ctrl_buf_size;
- bool increase_scan_dev_num;
- bool miracast_plus_bt;
- bool bt_ctrl_agg_buf_size;
- bool bt_tx_rx_mask;
- u16 bt_hci_ver;
- u16 bt_real_fw_ver;
- u8 bt_fw_ver;
- u32 bt_get_fw_ver;
-
- bool bt_disable_low_pwr;
-
- /* the following is for 1Ant solution */
- bool bt_ctrl_lps;
- bool bt_pwr_save_mode;
- bool bt_lps_on;
- bool force_to_roam;
- u8 force_exec_pwr_cmd_cnt;
- u8 lps_val;
- u8 rpwm_val;
- u32 ra_mask;
-
- u32 afh_map_l;
- u32 afh_map_m;
- u16 afh_map_h;
- u32 bt_supported_feature;
- u32 bt_supported_version;
- u8 bt_ant_det_val;
- u8 bt_ble_scan_type;
- u32 bt_ble_scan_para;
-};
-
-struct btc_stack_info {
- bool profile_notified;
- u16 hci_version; /* stack hci version */
- u8 num_of_link;
- bool bt_link_exist;
- bool sco_exist;
- bool acl_exist;
- bool a2dp_exist;
- bool hid_exist;
- u8 num_of_hid;
- bool pan_exist;
- bool unknown_acl_exist;
- s8 min_bt_rssi;
-};
-
-struct btc_statistics {
- u32 cnt_bind;
- u32 cnt_init_hw_config;
- u32 cnt_init_coex_dm;
- u32 cnt_ips_notify;
- u32 cnt_lps_notify;
- u32 cnt_scan_notify;
- u32 cnt_connect_notify;
- u32 cnt_media_status_notify;
- u32 cnt_special_packet_notify;
- u32 cnt_bt_info_notify;
- u32 cnt_periodical;
- u32 cnt_coex_dm_switch;
- u32 cnt_stack_operation_notify;
- u32 cnt_dbg_ctrl;
- u32 cnt_pre_load_firmware;
- u32 cnt_power_on;
-};
-
-struct btc_bt_link_info {
- bool bt_link_exist;
- bool bt_hi_pri_link_exist;
- bool sco_exist;
- bool sco_only;
- bool a2dp_exist;
- bool a2dp_only;
- bool hid_exist;
- bool hid_only;
- bool pan_exist;
- bool pan_only;
- bool slave_role;
- bool acl_busy;
-};
-
-enum btc_antenna_pos {
- BTC_ANTENNA_AT_MAIN_PORT = 0x1,
- BTC_ANTENNA_AT_AUX_PORT = 0x2,
-};
-
-enum btc_mp_h2c_op_code {
- BT_OP_GET_BT_VERSION = 0,
- BT_OP_WRITE_REG_ADDR = 12,
- BT_OP_WRITE_REG_VALUE = 13,
- BT_OP_READ_REG = 17,
- BT_OP_GET_AFH_MAP_L = 30,
- BT_OP_GET_AFH_MAP_M = 31,
- BT_OP_GET_AFH_MAP_H = 32,
- BT_OP_GET_BT_COEX_SUPPORTED_FEATURE = 42,
- BT_OP_GET_BT_COEX_SUPPORTED_VERSION = 43,
- BT_OP_GET_BT_ANT_DET_VAL = 44,
- BT_OP_GET_BT_BLE_SCAN_PARA = 45,
- BT_OP_GET_BT_BLE_SCAN_TYPE = 46,
- BT_OP_MAX
-};
-
-enum btc_mp_h2c_req_num {
- /* 4 bits only */
- BT_SEQ_DONT_CARE = 0,
- BT_SEQ_GET_BT_VERSION = 0xE,
- BT_SEQ_GET_AFH_MAP_L = 0x5,
- BT_SEQ_GET_AFH_MAP_M = 0x6,
- BT_SEQ_GET_AFH_MAP_H = 0x9,
- BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE = 0x7,
- BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION = 0x8,
- BT_SEQ_GET_BT_ANT_DET_VAL = 0x2,
- BT_SEQ_GET_BT_BLE_SCAN_PARA = 0x3,
- BT_SEQ_GET_BT_BLE_SCAN_TYPE = 0x4,
-};
-
-struct btc_coexist {
- /* make sure only one adapter can bind the data context */
- bool binded;
- /* default adapter */
- void *adapter;
- struct btc_board_info board_info;
- /* some bt info referenced by non-bt module */
- struct btc_bt_info bt_info;
- struct btc_stack_info stack_info;
- enum btc_chip_interface chip_interface;
- struct btc_bt_link_info bt_link_info;
-
- /* boolean variables to replace BT_AUTO_REPORT_ONLY_XXXXY_ZANT
- * configuration parameters
- */
- bool auto_report_1ant;
- bool auto_report_2ant;
- bool dbg_mode_1ant;
- bool dbg_mode_2ant;
- bool initilized;
- bool stop_coex_dm;
- bool manual_control;
- struct btc_statistics statistics;
- u8 pwr_mode_val[10];
-
- struct completion bt_mp_comp;
-
- /* function pointers - io related */
- bfp_btc_r1 btc_read_1byte;
- bfp_btc_w1 btc_write_1byte;
- bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
- bfp_btc_r2 btc_read_2byte;
- bfp_btc_w2 btc_write_2byte;
- bfp_btc_r4 btc_read_4byte;
- bfp_btc_w4 btc_write_4byte;
- bfp_btc_local_reg_w1 btc_write_local_reg_1byte;
-
- bfp_btc_set_bb_reg btc_set_bb_reg;
- bfp_btc_get_bb_reg btc_get_bb_reg;
-
- bfp_btc_set_rf_reg btc_set_rf_reg;
- bfp_btc_get_rf_reg btc_get_rf_reg;
-
- bfp_btc_fill_h2c btc_fill_h2c;
-
- bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
-
- bfp_btc_get btc_get;
- bfp_btc_set btc_set;
-
- bfp_btc_set_bt_reg btc_set_bt_reg;
- bfp_btc_get_bt_reg btc_get_bt_reg;
-
- bfp_btc_get_bt_coex_supported_feature btc_get_bt_coex_supported_feature;
- bfp_btc_get_bt_coex_supported_version btc_get_bt_coex_supported_version;
- bfp_btc_get_bt_phydm_version btc_get_bt_phydm_version;
- bfp_btc_phydm_modify_ra_pcr_threshold btc_phydm_modify_ra_pcr_threshold;
- bfp_btc_phydm_query_phy_counter btc_phydm_query_phy_counter;
- bfp_btc_get_ant_det_val_from_bt btc_get_ant_det_val_from_bt;
- bfp_btc_get_ble_scan_type_from_bt btc_get_ble_scan_type_from_bt;
- bfp_btc_get_ble_scan_para_from_bt btc_get_ble_scan_para_from_bt;
- bfp_btc_get_bt_afh_map_from_bt btc_get_bt_afh_map_from_bt;
-
-};
-
-bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
-
-#define rtl_btc_coexist(rtlpriv) \
- ((struct btc_coexist *)((rtlpriv)->btcoexist.btc_context))
-#define rtl_btc_wifi_only(rtlpriv) \
- ((struct wifi_only_cfg *)((rtlpriv)->btcoexist.wifi_only_context))
-
-struct wifi_only_cfg;
-
-bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv);
-bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv);
-bool exhalbtc_bind_bt_coex_withadapter(void *adapter);
-void exhalbtc_power_on_setting(struct btc_coexist *btcoexist);
-void exhalbtc_pre_load_firmware(struct btc_coexist *btcoexist);
-void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only);
-void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg);
-void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
-void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
-void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
-void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
-void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
- u8 is_5g);
-void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
-void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
- enum rt_media_status media_status);
-void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
-void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
- u8 length);
-void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type);
-void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
-void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
-void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
-void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist);
-void exhalbtc_periodical(struct btc_coexist *btcoexist);
-void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
- u8 *data);
-void exhalbtc_antenna_detection(struct btc_coexist *btcoexist, u32 cent_freq,
- u32 offset, u32 span, u32 seconds);
-void exhalbtc_stack_update_profile_info(void);
-void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version);
-void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist,
- u16 bt_hci_version, u16 bt_patch_version);
-void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi);
-void exhalbtc_set_bt_exist(struct btc_coexist *btcoexist, bool bt_exist);
-void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type);
-void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
- struct seq_file *m);
-void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type);
-void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
- u8 is_5g);
-void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
- u8 *rssi_wifi, u8 *rssi_bt);
-void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
-void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
-void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist,
- u8 single_ant_path);
-
-/* The following are used by wifi_only case */
-enum wifionly_chip_interface {
- WIFIONLY_INTF_UNKNOWN = 0,
- WIFIONLY_INTF_PCI = 1,
- WIFIONLY_INTF_USB = 2,
- WIFIONLY_INTF_SDIO = 3,
- WIFIONLY_INTF_MAX
-};
-
-enum wifionly_customer_id {
- CUSTOMER_NORMAL = 0,
- CUSTOMER_HP_1 = 1,
-};
-
-struct wifi_only_haldata {
- u16 customer_id;
- u8 efuse_pg_antnum;
- u8 efuse_pg_antpath;
- u8 rfe_type;
- u8 ant_div_cfg;
-};
-
-struct wifi_only_cfg {
- void *adapter;
- struct wifi_only_haldata haldata_info;
- enum wifionly_chip_interface chip_interface;
-};
-
-static inline
-void halwifionly_phy_set_bb_reg(struct wifi_only_cfg *wifi_conly_cfg,
- u32 regaddr, u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)wifi_conly_cfg->adapter;
-
- rtl_set_bbreg(rtlpriv->hw, regaddr, bitmask, data);
-}
-
-#endif
diff --git a/drivers/staging/rtlwifi/btcoexist/rtl_btc.c b/drivers/staging/rtlwifi/btcoexist/rtl_btc.c
deleted file mode 100644
index dfd47b88e54b..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/rtl_btc.c
+++ /dev/null
@@ -1,517 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "../wifi.h"
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-
-#include "rtl_btc.h"
-#include "halbt_precomp.h"
-
-static struct rtl_btc_ops rtl_btc_operation = {
- .btc_init_variables = rtl_btc_init_variables,
- .btc_init_variables_wifi_only = rtl_btc_init_variables_wifi_only,
- .btc_deinit_variables = rtl_btc_deinit_variables,
- .btc_init_hal_vars = rtl_btc_init_hal_vars,
- .btc_power_on_setting = rtl_btc_power_on_setting,
- .btc_init_hw_config = rtl_btc_init_hw_config,
- .btc_init_hw_config_wifi_only = rtl_btc_init_hw_config_wifi_only,
- .btc_ips_notify = rtl_btc_ips_notify,
- .btc_lps_notify = rtl_btc_lps_notify,
- .btc_scan_notify = rtl_btc_scan_notify,
- .btc_scan_notify_wifi_only = rtl_btc_scan_notify_wifi_only,
- .btc_connect_notify = rtl_btc_connect_notify,
- .btc_mediastatus_notify = rtl_btc_mediastatus_notify,
- .btc_periodical = rtl_btc_periodical,
- .btc_halt_notify = rtl_btc_halt_notify,
- .btc_btinfo_notify = rtl_btc_btinfo_notify,
- .btc_btmpinfo_notify = rtl_btc_btmpinfo_notify,
- .btc_is_limited_dig = rtl_btc_is_limited_dig,
- .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
- .btc_is_bt_disabled = rtl_btc_is_bt_disabled,
- .btc_special_packet_notify = rtl_btc_special_packet_notify,
- .btc_switch_band_notify = rtl_btc_switch_band_notify,
- .btc_switch_band_notify_wifi_only = rtl_btc_switch_band_notify_wifionly,
- .btc_record_pwr_mode = rtl_btc_record_pwr_mode,
- .btc_get_lps_val = rtl_btc_get_lps_val,
- .btc_get_rpwm_val = rtl_btc_get_rpwm_val,
- .btc_is_bt_ctrl_lps = rtl_btc_is_bt_ctrl_lps,
- .btc_is_bt_lps_on = rtl_btc_is_bt_lps_on,
- .btc_get_ampdu_cfg = rtl_btc_get_ampdu_cfg,
- .btc_display_bt_coex_info = rtl_btc_display_bt_coex_info,
-};
-
-void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist) {
- seq_puts(m, "btc_coexist context is NULL!\n");
- return;
- }
-
- exhalbtc_display_bt_coex_info(btcoexist, m);
-}
-
-void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
- u8 safe_len;
-
- if (!btcoexist)
- return;
-
- safe_len = sizeof(btcoexist->pwr_mode_val);
-
- if (safe_len > len)
- safe_len = len;
-
- memcpy(btcoexist->pwr_mode_val, buf, safe_len);
-}
-
-u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return 0;
-
- return btcoexist->bt_info.lps_val;
-}
-
-u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return 0;
-
- return btcoexist->bt_info.rpwm_val;
-}
-
-bool rtl_btc_is_bt_ctrl_lps(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return false;
-
- return btcoexist->bt_info.bt_ctrl_lps;
-}
-
-bool rtl_btc_is_bt_lps_on(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return false;
-
- return btcoexist->bt_info.bt_lps_on;
-}
-
-void rtl_btc_get_ampdu_cfg(struct rtl_priv *rtlpriv, u8 *reject_agg,
- u8 *ctrl_agg_size, u8 *agg_size)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist) {
- *reject_agg = false;
- *ctrl_agg_size = false;
- return;
- }
-
- if (reject_agg)
- *reject_agg = btcoexist->bt_info.reject_agg_pkt;
- if (ctrl_agg_size)
- *ctrl_agg_size = btcoexist->bt_info.bt_ctrl_agg_buf_size;
- if (agg_size)
- *agg_size = btcoexist->bt_info.agg_buf_size;
-}
-
-static void rtl_btc_alloc_variable(struct rtl_priv *rtlpriv, bool wifi_only)
-{
- if (wifi_only)
- rtlpriv->btcoexist.wifi_only_context =
- kzalloc(sizeof(struct wifi_only_cfg), GFP_KERNEL);
- else
- rtlpriv->btcoexist.btc_context =
- kzalloc(sizeof(struct btc_coexist), GFP_KERNEL);
-}
-
-static void rtl_btc_free_variable(struct rtl_priv *rtlpriv)
-{
- kfree(rtlpriv->btcoexist.btc_context);
- rtlpriv->btcoexist.btc_context = NULL;
-
- kfree(rtlpriv->btcoexist.wifi_only_context);
- rtlpriv->btcoexist.wifi_only_context = NULL;
-}
-
-void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
-{
- rtl_btc_alloc_variable(rtlpriv, false);
-
- exhalbtc_initlize_variables(rtlpriv);
- exhalbtc_bind_bt_coex_withadapter(rtlpriv);
-}
-
-void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv)
-{
- rtl_btc_alloc_variable(rtlpriv, true);
-
- exhalbtc_initlize_variables_wifi_only(rtlpriv);
-}
-
-void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv)
-{
- rtl_btc_free_variable(rtlpriv);
-}
-
-void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
-{
- /* move ant_num, bt_type and single_ant_path to
- * exhalbtc_bind_bt_coex_withadapter()
- */
-}
-
-void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- exhalbtc_power_on_setting(btcoexist);
-}
-
-void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- u8 bt_exist;
-
- bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "%s, bt_exist is %d\n", __func__, bt_exist);
-
- if (!btcoexist)
- return;
-
- exhalbtc_init_hw_config(btcoexist, !bt_exist);
- exhalbtc_init_coex_dm(btcoexist);
-}
-
-void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv)
-{
- struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
-
- if (!wifionly_cfg)
- return;
-
- exhalbtc_init_hw_config_wifi_only(wifionly_cfg);
-}
-
-void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- exhalbtc_ips_notify(btcoexist, type);
-
- if (type == ERFON) {
- /*
- * In some situation, it doesn't scan after leaving IPS, and
- * this will cause btcoex in wrong state.
- */
- exhalbtc_scan_notify(btcoexist, 1);
- exhalbtc_scan_notify(btcoexist, 0);
- }
-}
-
-void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- exhalbtc_lps_notify(btcoexist, type);
-}
-
-void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- exhalbtc_scan_notify(btcoexist, scantype);
-}
-
-void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
- u8 is_5g = (rtlhal->current_bandtype == BAND_ON_5G);
-
- if (!wifionly_cfg)
- return;
-
- exhalbtc_scan_notify_wifi_only(wifionly_cfg, is_5g);
-}
-
-void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- exhalbtc_connect_notify(btcoexist, action);
-}
-
-void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
- enum rt_media_status mstatus)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- exhalbtc_mediastatus_notify(btcoexist, mstatus);
-}
-
-void rtl_btc_periodical(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- /*rtl_bt_dm_monitor();*/
- exhalbtc_periodical(btcoexist);
-}
-
-void rtl_btc_halt_notify(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- exhalbtc_halt_notify(btcoexist);
-}
-
-void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- exhalbtc_bt_info_notify(btcoexist, tmp_buf, length);
-}
-
-void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
- u8 extid, seq, len;
- u16 bt_real_fw_ver;
- u8 bt_fw_ver;
- u8 *data;
-
- if (!btcoexist)
- return;
-
- if ((length < 4) || (!tmp_buf))
- return;
-
- extid = tmp_buf[0];
- /* not response from BT FW then exit*/
- if (extid != 1) /* C2H_TRIG_BY_BT_FW = 1 */
- return;
-
- len = tmp_buf[1] >> 4;
- seq = tmp_buf[2] >> 4;
- data = &tmp_buf[3];
-
- /* BT Firmware version response */
- switch (seq) {
- case BT_SEQ_GET_BT_VERSION:
- bt_real_fw_ver = tmp_buf[3] | (tmp_buf[4] << 8);
- bt_fw_ver = tmp_buf[5];
-
- btcoexist->bt_info.bt_real_fw_ver = bt_real_fw_ver;
- btcoexist->bt_info.bt_fw_ver = bt_fw_ver;
- break;
- case BT_SEQ_GET_AFH_MAP_L:
- btcoexist->bt_info.afh_map_l = le32_to_cpu(*(__le32 *)data);
- break;
- case BT_SEQ_GET_AFH_MAP_M:
- btcoexist->bt_info.afh_map_m = le32_to_cpu(*(__le32 *)data);
- break;
- case BT_SEQ_GET_AFH_MAP_H:
- btcoexist->bt_info.afh_map_h = le16_to_cpu(*(__le16 *)data);
- break;
- case BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE:
- btcoexist->bt_info.bt_supported_feature = tmp_buf[3] |
- (tmp_buf[4] << 8);
- break;
- case BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION:
- btcoexist->bt_info.bt_supported_version = tmp_buf[3] |
- (tmp_buf[4] << 8);
- break;
- case BT_SEQ_GET_BT_ANT_DET_VAL:
- btcoexist->bt_info.bt_ant_det_val = tmp_buf[3];
- break;
- case BT_SEQ_GET_BT_BLE_SCAN_PARA:
- btcoexist->bt_info.bt_ble_scan_para = tmp_buf[3] |
- (tmp_buf[4] << 8) |
- (tmp_buf[5] << 16) |
- (tmp_buf[6] << 24);
- break;
- case BT_SEQ_GET_BT_BLE_SCAN_TYPE:
- btcoexist->bt_info.bt_ble_scan_type = tmp_buf[3];
- break;
- }
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "btmpinfo complete req_num=%d\n", seq);
-
- complete(&btcoexist->bt_mp_comp);
-}
-
-bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return false;
-
- return btcoexist->bt_info.limited_dig;
-}
-
-bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
-{
- bool bt_change_edca = false;
- u32 cur_edca_val;
- u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b;
- u32 edca_hs;
- u32 edca_addr = 0x504;
-
- cur_edca_val = rtl_read_dword(rtlpriv, edca_addr);
- if (halbtc_is_wifi_uplink(rtlpriv)) {
- if (cur_edca_val != edca_bt_hs_uplink) {
- edca_hs = edca_bt_hs_uplink;
- bt_change_edca = true;
- }
- } else {
- if (cur_edca_val != edca_bt_hs_downlink) {
- edca_hs = edca_bt_hs_downlink;
- bt_change_edca = true;
- }
- }
-
- if (bt_change_edca)
- rtl_write_dword(rtlpriv, edca_addr, edca_hs);
-
- return true;
-}
-
-bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return true;
-
- /* It seems 'bt_disabled' is never be initialized or set. */
- if (btcoexist->bt_info.bt_disabled)
- return true;
- else
- return false;
-}
-
-void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-
- if (!btcoexist)
- return;
-
- return exhalbtc_special_packet_notify(btcoexist, pkt_type);
-}
-
-void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type,
- bool scanning)
-{
- struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
- u8 type = BTC_NOT_SWITCH;
-
- if (!btcoexist)
- return;
-
- switch (band_type) {
- case BAND_ON_2_4G:
- if (scanning)
- type = BTC_SWITCH_TO_24G;
- else
- type = BTC_SWITCH_TO_24G_NOFORSCAN;
- break;
-
- case BAND_ON_5G:
- type = BTC_SWITCH_TO_5G;
- break;
- }
-
- if (type != BTC_NOT_SWITCH)
- exhalbtc_switch_band_notify(btcoexist, type);
-}
-
-void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type,
- bool scanning)
-{
- struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
- u8 is_5g = (band_type == BAND_ON_5G);
-
- if (!wifionly_cfg)
- return;
-
- exhalbtc_switch_band_notify_wifi_only(wifionly_cfg, is_5g);
-}
-
-struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
-{
- return &rtl_btc_operation;
-}
-
-enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- enum rt_media_status m_status = RT_MEDIA_DISCONNECT;
-
- u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
-
- if (bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED)
- m_status = RT_MEDIA_CONNECT;
-
- return m_status;
-}
-
-u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
-{
- return rtlpriv->btcoexist.btc_info.btcoexist;
-}
diff --git a/drivers/staging/rtlwifi/btcoexist/rtl_btc.h b/drivers/staging/rtlwifi/btcoexist/rtl_btc.h
deleted file mode 100644
index 0141f4641ef0..000000000000
--- a/drivers/staging/rtlwifi/btcoexist/rtl_btc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_BTC_H__
-#define __RTL_BTC_H__
-
-#include "halbt_precomp.h"
-
-void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
-void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv);
-void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv);
-void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
-void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv);
-void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
-void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv);
-void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
-void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type);
-void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
-void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype);
-void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
-void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
- enum rt_media_status mstatus);
-void rtl_btc_periodical(struct rtl_priv *rtlpriv);
-void rtl_btc_halt_notify(struct rtl_priv *rtlpriv);
-void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length);
-void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length);
-bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
-bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
-bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
-void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type);
-void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type,
- bool scanning);
-void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type,
- bool scanning);
-void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m);
-void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
-u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv);
-u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv);
-bool rtl_btc_is_bt_ctrl_lps(struct rtl_priv *rtlpriv);
-bool rtl_btc_is_bt_lps_on(struct rtl_priv *rtlpriv);
-void rtl_btc_get_ampdu_cfg(struct rtl_priv *rtlpriv, u8 *reject_agg,
- u8 *ctrl_agg_size, u8 *agg_size);
-
-struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
-
-u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
-u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
-u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
-u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv);
-u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv);
-
-enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw);
-
-#endif
diff --git a/drivers/staging/rtlwifi/cam.c b/drivers/staging/rtlwifi/cam.c
deleted file mode 100644
index e8572d654655..000000000000
--- a/drivers/staging/rtlwifi/cam.c
+++ /dev/null
@@ -1,315 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "wifi.h"
-#include "cam.h"
-#include <linux/export.h>
-
-void rtl_cam_reset_sec_info(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->sec.use_defaultkey = false;
- rtlpriv->sec.pairwise_enc_algorithm = NO_ENCRYPTION;
- rtlpriv->sec.group_enc_algorithm = NO_ENCRYPTION;
- memset(rtlpriv->sec.key_buf, 0, KEY_BUF_SIZE * MAX_KEY_LEN);
- memset(rtlpriv->sec.key_len, 0, KEY_BUF_SIZE);
- rtlpriv->sec.pairwise_key = NULL;
-}
-
-static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
- u8 *mac_addr, u8 *key_cont_128, u16 us_config)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- u32 target_command;
- u32 target_content = 0;
- int entry_i;
-
- RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :",
- key_cont_128, 16);
-
- /* 0-1 config + mac, 2-5 fill 128key,6-7 are reserved */
- for (entry_i = CAM_CONTENT_COUNT - 1; entry_i >= 0; entry_i--) {
- target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
- target_command = target_command | BIT(31) | BIT(16);
-
- if (entry_i == 0) {
- target_content = (u32)(*(mac_addr + 0)) << 16 |
- (u32)(*(mac_addr + 1)) << 24 |
- (u32)us_config;
-
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
- target_content);
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
- target_command);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[WCAMI], target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The Key ID is %d\n", entry_no);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[RWCAM], target_command);
-
- } else if (entry_i == 1) {
- target_content = (u32)(*(mac_addr + 5)) << 24 |
- (u32)(*(mac_addr + 4)) << 16 |
- (u32)(*(mac_addr + 3)) << 8 |
- (u32)(*(mac_addr + 2));
-
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
- target_content);
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
- target_command);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
- } else {
- target_content =
- (u32)(*(key_cont_128 + (entry_i * 4 - 8) + 3)) <<
- 24 | (u32)(*(key_cont_128 + (entry_i * 4 - 8) + 2))
- << 16 |
- (u32)(*(key_cont_128 + (entry_i * 4 - 8) + 1)) << 8
- | (u32)(*(key_cont_128 + (entry_i * 4 - 8) + 0));
-
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
- target_content);
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
- target_command);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
- }
- }
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "after set key, usconfig:%x\n", us_config);
-}
-
-u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
- u32 ul_default_key, u8 *key_content)
-{
- u32 us_config;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
- ul_entry_idx, ul_key_id, ul_enc_alg,
- ul_default_key, mac_addr);
-
- if (ul_key_id == TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ulKeyId exceed!\n");
- return 0;
- }
-
- if (ul_default_key == 1)
- us_config = CFG_VALID | ((u16)(ul_enc_alg) << 2);
- else
- us_config = CFG_VALID | ((ul_enc_alg) << 2) | ul_key_id;
-
- rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
- (u8 *)key_content, us_config);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "end\n");
-
- return 1;
-}
-
-int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
- u8 *mac_addr, u32 ul_key_id)
-{
- u32 ul_command;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
-
- ul_command = ul_key_id * CAM_CONTENT_COUNT;
- ul_command = ul_command | BIT(31) | BIT(16);
-
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "%s(): WRITE A4: %x\n", __func__, 0);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "%s(): WRITE A0: %x\n", __func__, ul_command);
-
- return 0;
-}
-
-void rtl_cam_reset_all_entry(struct ieee80211_hw *hw)
-{
- u32 ul_command;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- ul_command = BIT(31) | BIT(30);
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
-}
-
-void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- u32 ul_command;
- u32 ul_content;
- u32 ul_enc_algo;
-
- switch (rtlpriv->sec.pairwise_enc_algorithm) {
- case WEP40_ENCRYPTION:
- ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
- break;
- case WEP104_ENCRYPTION:
- ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
- break;
- case TKIP_ENCRYPTION:
- ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
- break;
- case AESCCMP_ENCRYPTION:
- ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
- break;
- default:
- ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
- }
-
- ul_content = (uc_index & 3) | ((u16)(ul_enc_algo) << 2);
-
- ul_content |= BIT(15);
- ul_command = CAM_CONTENT_COUNT * uc_index;
- ul_command = ul_command | BIT(31) | BIT(16);
-
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "%s(): WRITE A4: %x\n", __func__, ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "%s(): WRITE A0: %x\n", __func__, ul_command);
-}
-
-void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- u32 ul_command;
- u32 ul_content;
- u32 ul_encalgo;
- u8 entry_i;
-
- switch (rtlpriv->sec.pairwise_enc_algorithm) {
- case WEP40_ENCRYPTION:
- ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
- break;
- case WEP104_ENCRYPTION:
- ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
- break;
- case TKIP_ENCRYPTION:
- ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
- break;
- case AESCCMP_ENCRYPTION:
- ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
- break;
- default:
- ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
- }
-
- for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
- if (entry_i == 0) {
- ul_content =
- (uc_index & 0x03) | ((u16)(ul_encalgo) << 2);
- ul_content |= BIT(15);
- } else {
- ul_content = 0;
- }
-
- ul_command = CAM_CONTENT_COUNT * uc_index + entry_i;
- ul_command = ul_command | BIT(31) | BIT(16);
-
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "%s(): WRITE A4: %x\n", __func__, ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "%s(): WRITE A0: %x\n", __func__, ul_command);
- }
-}
-
-u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> 4;
- u8 entry_idx = 0;
- u8 i, *addr;
-
- if (!sta_addr) {
- pr_err("sta_addr is NULL.\n");
- return TOTAL_CAM_ENTRY;
- }
- /* Does STA already exist? */
- for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
- addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
- if (ether_addr_equal_unaligned(addr, sta_addr))
- return i;
- }
- /* Get a free CAM entry. */
- for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) {
- if ((bitmap & BIT(0)) == 0) {
- pr_err("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
- rtlpriv->sec.hwsec_cam_bitmap, entry_idx);
- rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx;
- memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx],
- sta_addr, ETH_ALEN);
- return entry_idx;
- }
- bitmap = bitmap >> 1;
- }
- return TOTAL_CAM_ENTRY;
-}
-
-void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 bitmap;
- u8 i, *addr;
-
- if (!sta_addr) {
- pr_err("sta_addr is NULL.\n");
- return;
- }
-
- if (is_zero_ether_addr(sta_addr)) {
- pr_err("sta_addr is %pM\n", sta_addr);
- return;
- }
- /* Does STA already exist? */
- for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
- addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
- bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i;
- if (((bitmap & BIT(0)) == BIT(0)) &&
- (ether_addr_equal_unaligned(addr, sta_addr))) {
- /* Remove from HW Security CAM */
- eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
- rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "&&&&&&&&&del entry %d\n", i);
- }
- }
-}
diff --git a/drivers/staging/rtlwifi/cam.h b/drivers/staging/rtlwifi/cam.h
deleted file mode 100644
index 3f1d8b5a13a5..000000000000
--- a/drivers/staging/rtlwifi/cam.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_CAM_H_
-#define __RTL_CAM_H_
-
-#define CAM_CONTENT_COUNT 8
-
-#define CFG_VALID BIT(15)
-
-#define PAIRWISE_KEYIDX 0
-#define CAM_PAIRWISE_KEY_POSITION 4
-
-#define CAM_CONFIG_NO_USEDK 0
-
-void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
- u32 ul_default_key, u8 *key_content);
-int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id);
-void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
-void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
-void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
-u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr);
-void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr);
-
-#endif
diff --git a/drivers/staging/rtlwifi/core.c b/drivers/staging/rtlwifi/core.c
deleted file mode 100644
index a9902818ae7e..000000000000
--- a/drivers/staging/rtlwifi/core.c
+++ /dev/null
@@ -1,1996 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "wifi.h"
-#include "core.h"
-#include "cam.h"
-#include "base.h"
-#include "ps.h"
-#include "pwrseqcmd.h"
-
-#include "btcoexist/rtl_btc.h"
-#include <linux/firmware.h>
-#include <linux/export.h>
-#include <net/cfg80211.h>
-
-u8 channel5g[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
- 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
- 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
- 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
- 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
- 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
- 165, 167, 169, 171, 173, 175, 177 /* Band 4 */
-};
-
-u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {
- 42, 58, 106, 122, 138, 155, 171
-};
-
-static void rtl_fw_do_work(const struct firmware *firmware, void *context,
- bool is_wow)
-{
- struct ieee80211_hw *hw = context;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int err;
-
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
- if (!firmware) {
- if (rtlpriv->cfg->alt_fw_name) {
- err = request_firmware(&firmware,
- rtlpriv->cfg->alt_fw_name,
- rtlpriv->io.dev);
- pr_info("Loading alternative firmware %s\n",
- rtlpriv->cfg->alt_fw_name);
- if (!err)
- goto found_alt;
- }
- pr_err("Selected firmware is not available\n");
- rtlpriv->max_fw_size = 0;
- return;
- }
-found_alt:
- if (firmware->size > rtlpriv->max_fw_size) {
- pr_err("Firmware is too big!\n");
- release_firmware(firmware);
- return;
- }
- if (!is_wow) {
- memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
- firmware->size);
- rtlpriv->rtlhal.fwsize = firmware->size;
- } else {
- memcpy(rtlpriv->rtlhal.wowlan_firmware, firmware->data,
- firmware->size);
- rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
- }
- rtlpriv->rtlhal.fwsize = firmware->size;
- release_firmware(firmware);
-}
-
-void rtl_fw_cb(const struct firmware *firmware, void *context)
-{
- rtl_fw_do_work(firmware, context, false);
-}
-
-void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context)
-{
- rtl_fw_do_work(firmware, context, true);
-}
-
-/*mutex for start & stop is must here. */
-static int rtl_op_start(struct ieee80211_hw *hw)
-{
- int err = 0;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- if (!is_hal_stop(rtlhal))
- return 0;
- if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
- return 0;
- mutex_lock(&rtlpriv->locks.conf_mutex);
- err = rtlpriv->intf_ops->adapter_start(hw);
- if (!err)
- rtl_watch_dog_timer_callback(&rtlpriv->works.watchdog_timer);
- mutex_unlock(&rtlpriv->locks.conf_mutex);
- return err;
-}
-
-static void rtl_op_stop(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool support_remote_wakeup = false;
-
- if (is_hal_stop(rtlhal))
- return;
-
- rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
- (u8 *)(&support_remote_wakeup));
- /* here is must, because adhoc do stop and start,
- * but stop with RFOFF may cause something wrong,
- * like adhoc TP
- */
- if (unlikely(ppsc->rfpwr_state == ERFOFF))
- rtl_ips_nic_on(hw);
-
- mutex_lock(&rtlpriv->locks.conf_mutex);
- /* if wowlan supported, DON'T clear connected info */
- if (!(support_remote_wakeup &&
- rtlhal->enter_pnp_sleep)) {
- mac->link_state = MAC80211_NOLINK;
- eth_zero_addr(mac->bssid);
- mac->vendor = PEER_UNKNOWN;
-
- /* reset sec info */
- rtl_cam_reset_sec_info(hw);
-
- rtl_deinit_deferred_work(hw);
- }
- rtlpriv->intf_ops->adapter_stop(hw);
-
- mutex_unlock(&rtlpriv->locks.conf_mutex);
-}
-
-static void rtl_op_tx(struct ieee80211_hw *hw,
- struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_tcb_desc tcb_desc;
-
- memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
-
- if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
- goto err_free;
-
- if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
- goto err_free;
-
- if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
- rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
- return;
-
-err_free:
- dev_kfree_skb_any(skb);
-}
-
-static int rtl_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- int err = 0;
- u8 retry_limit = 0x30;
-
- if (mac->vif) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "vif has been set!! mac->vif = 0x%p\n", mac->vif);
- return -EOPNOTSUPP;
- }
-
- vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
-
- rtl_ips_nic_on(hw);
-
- mutex_lock(&rtlpriv->locks.conf_mutex);
- switch (ieee80211_vif_type_p2p(vif)) {
- case NL80211_IFTYPE_P2P_CLIENT:
- mac->p2p = P2P_ROLE_CLIENT;
- /*fall through*/
- case NL80211_IFTYPE_STATION:
- if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_STATION\n");
- mac->beacon_enabled = 0;
- rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
- rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_ADHOC\n");
-
- mac->link_state = MAC80211_LINKED;
- rtlpriv->cfg->ops->set_bcn_reg(hw);
- if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
- mac->basic_rates = 0xfff;
- else
- mac->basic_rates = 0xff0;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
- (u8 *)(&mac->basic_rates));
-
- retry_limit = 0x07;
- break;
- case NL80211_IFTYPE_P2P_GO:
- mac->p2p = P2P_ROLE_GO;
- /*fall through*/
- case NL80211_IFTYPE_AP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_AP\n");
-
- mac->link_state = MAC80211_LINKED;
- rtlpriv->cfg->ops->set_bcn_reg(hw);
- if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
- mac->basic_rates = 0xfff;
- else
- mac->basic_rates = 0xff0;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
- (u8 *)(&mac->basic_rates));
-
- retry_limit = 0x07;
- break;
- case NL80211_IFTYPE_MESH_POINT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_MESH_POINT\n");
-
- mac->link_state = MAC80211_LINKED;
- rtlpriv->cfg->ops->set_bcn_reg(hw);
- if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
- mac->basic_rates = 0xfff;
- else
- mac->basic_rates = 0xff0;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
- (u8 *)(&mac->basic_rates));
-
- retry_limit = 0x07;
- break;
- default:
- pr_err("operation mode %d is not supported!\n",
- vif->type);
- err = -EOPNOTSUPP;
- goto out;
- }
-
- if (mac->p2p) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p role %x\n", vif->type);
- mac->basic_rates = 0xff0;/*disable cck rate for p2p*/
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
- (u8 *)(&mac->basic_rates));
- }
- mac->vif = vif;
- mac->opmode = vif->type;
- rtlpriv->cfg->ops->set_network_type(hw, vif->type);
- memcpy(mac->mac_addr, vif->addr, ETH_ALEN);
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
-
- mac->retry_long = retry_limit;
- mac->retry_short = retry_limit;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
- (u8 *)(&retry_limit));
-out:
- mutex_unlock(&rtlpriv->locks.conf_mutex);
- return err;
-}
-
-static void rtl_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- mutex_lock(&rtlpriv->locks.conf_mutex);
-
- /* Free beacon resources */
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) {
- if (mac->beacon_enabled == 1) {
- mac->beacon_enabled = 0;
- rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
- rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
- }
- }
-
- /*
- *Note: We assume NL80211_IFTYPE_UNSPECIFIED as
- *NO LINK for our hardware.
- */
- mac->p2p = 0;
- mac->vif = NULL;
- mac->link_state = MAC80211_NOLINK;
- eth_zero_addr(mac->bssid);
- mac->vendor = PEER_UNKNOWN;
- mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
- rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
-
- mutex_unlock(&rtlpriv->locks.conf_mutex);
-}
-
-static int rtl_op_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype new_type, bool p2p)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int ret;
-
- rtl_op_remove_interface(hw, vif);
-
- vif->type = new_type;
- vif->p2p = p2p;
- ret = rtl_op_add_interface(hw, vif);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p %x\n", p2p);
- return ret;
-}
-
-#ifdef CONFIG_PM
-static u16 crc16_ccitt(u8 data, u16 crc)
-{
- u8 shift_in, data_bit, crc_bit11, crc_bit4, crc_bit15;
- u8 i;
- u16 result;
-
- for (i = 0; i < 8; i++) {
- crc_bit15 = ((crc & BIT(15)) ? 1 : 0);
- data_bit = (data & (BIT(0) << i) ? 1 : 0);
- shift_in = crc_bit15 ^ data_bit;
-
- result = crc << 1;
- if (shift_in == 0)
- result &= (~BIT(0));
- else
- result |= BIT(0);
-
- crc_bit11 = ((crc & BIT(11)) ? 1 : 0) ^ shift_in;
- if (crc_bit11 == 0)
- result &= (~BIT(12));
- else
- result |= BIT(12);
-
- crc_bit4 = ((crc & BIT(4)) ? 1 : 0) ^ shift_in;
- if (crc_bit4 == 0)
- result &= (~BIT(5));
- else
- result |= BIT(5);
-
- crc = result;
- }
-
- return crc;
-}
-
-static u16 _calculate_wol_pattern_crc(u8 *pattern, u16 len)
-{
- u16 crc = 0xffff;
- u32 i;
-
- for (i = 0; i < len; i++)
- crc = crc16_ccitt(pattern[i], crc);
-
- crc = ~crc;
-
- return crc;
-}
-
-static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wow)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = &rtlpriv->mac80211;
- struct cfg80211_pkt_pattern *patterns = wow->patterns;
- struct rtl_wow_pattern rtl_pattern;
- const u8 *pattern_os, *mask_os;
- u8 mask[MAX_WOL_BIT_MASK_SIZE] = {0};
- u8 content[MAX_WOL_PATTERN_SIZE] = {0};
- u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 multicast_addr1[2] = {0x33, 0x33};
- u8 multicast_addr2[3] = {0x01, 0x00, 0x5e};
- u8 i, mask_len;
- u16 j, len;
-
- for (i = 0; i < wow->n_patterns; i++) {
- memset(&rtl_pattern, 0, sizeof(struct rtl_wow_pattern));
- memset(mask, 0, MAX_WOL_BIT_MASK_SIZE);
- if (patterns[i].pattern_len < 0 ||
- patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_WARNING,
- "Pattern[%d] is too long\n", i);
- continue;
- }
- pattern_os = patterns[i].pattern;
- mask_len = DIV_ROUND_UP(patterns[i].pattern_len, 8);
- mask_os = patterns[i].mask;
- RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE,
- "pattern content\n", pattern_os,
- patterns[i].pattern_len);
- RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE,
- "mask content\n", mask_os, mask_len);
- /* 1. unicast? multicast? or broadcast? */
- if (memcmp(pattern_os, broadcast_addr, 6) == 0)
- rtl_pattern.type = BROADCAST_PATTERN;
- else if (memcmp(pattern_os, multicast_addr1, 2) == 0 ||
- memcmp(pattern_os, multicast_addr2, 3) == 0)
- rtl_pattern.type = MULTICAST_PATTERN;
- else if (memcmp(pattern_os, mac->mac_addr, 6) == 0)
- rtl_pattern.type = UNICAST_PATTERN;
- else
- rtl_pattern.type = UNKNOWN_TYPE;
-
- /* 2. translate mask_from_os to mask_for_hw */
-
-/******************************************************************************
- * pattern from OS uses 'ethenet frame', like this:
-
- | 6 | 6 | 2 | 20 | Variable | 4 |
- |--------+--------+------+-----------+------------+-----|
- | 802.3 Mac Header | IP Header | TCP Packet | FCS |
- | DA | SA | Type |
-
- * BUT, packet catched by our HW is in '802.11 frame', begin from LLC,
-
- | 24 or 30 | 6 | 2 | 20 | Variable | 4 |
- |-------------------+--------+------+-----------+------------+-----|
- | 802.11 MAC Header | LLC | IP Header | TCP Packet | FCS |
- | Others | Tpye |
-
- * Therefore, we need translate mask_from_OS to mask_to_hw.
- * We should left-shift mask by 6 bits, then set the new bit[0~5] = 0,
- * because new mask[0~5] means 'SA', but our HW packet begins from LLC,
- * bit[0~5] corresponds to first 6 Bytes in LLC, they just don't match.
- ******************************************************************************/
-
- /* Shift 6 bits */
- for (j = 0; j < mask_len - 1; j++) {
- mask[j] = mask_os[j] >> 6;
- mask[j] |= (mask_os[j + 1] & 0x3F) << 2;
- }
- mask[j] = (mask_os[j] >> 6) & 0x3F;
- /* Set bit 0-5 to zero */
- mask[0] &= 0xC0;
-
- RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE,
- "mask to hw\n", mask, mask_len);
- for (j = 0; j < (MAX_WOL_BIT_MASK_SIZE + 1) / 4; j++) {
- rtl_pattern.mask[j] = mask[j * 4];
- rtl_pattern.mask[j] |= (mask[j * 4 + 1] << 8);
- rtl_pattern.mask[j] |= (mask[j * 4 + 2] << 16);
- rtl_pattern.mask[j] |= (mask[j * 4 + 3] << 24);
- }
-
- /* To get the wake up pattern from the mask.
- * We do not count first 12 bits which means
- * DA[6] and SA[6] in the pattern to match HW design.
- */
- len = 0;
- for (j = 12; j < patterns[i].pattern_len; j++) {
- if ((mask_os[j / 8] >> (j % 8)) & 0x01) {
- content[len] = pattern_os[j];
- len++;
- }
- }
-
- RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE,
- "pattern to hw\n", content, len);
- /* 3. calculate crc */
- rtl_pattern.crc = _calculate_wol_pattern_crc(content, len);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
-
- /* 4. write crc & mask_for_hw to hw */
- rtlpriv->cfg->ops->add_wowlan_pattern(hw, &rtl_pattern, i);
- }
- rtl_write_byte(rtlpriv, 0x698, wow->n_patterns);
-}
-
-static int rtl_op_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wow)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
- if (WARN_ON(!wow))
- return -EINVAL;
-
- /* to resolve s4 can not wake up*/
- rtlhal->last_suspend_sec = ktime_get_real_seconds();
-
- if ((ppsc->wo_wlan_mode & WAKE_ON_PATTERN_MATCH) && wow->n_patterns)
- _rtl_add_wowlan_patterns(hw, wow);
-
- rtlhal->driver_is_goingto_unload = true;
- rtlhal->enter_pnp_sleep = true;
-
- rtl_lps_leave(hw);
- rtl_op_stop(hw);
- device_set_wakeup_enable(wiphy_dev(hw->wiphy), true);
- return 0;
-}
-
-static int rtl_op_resume(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
- rtlhal->driver_is_goingto_unload = false;
- rtlhal->enter_pnp_sleep = false;
- rtlhal->wake_from_pnp_sleep = true;
-
- /* to resovle s4 can not wake up*/
- if (ktime_get_real_seconds() - rtlhal->last_suspend_sec < 5)
- return -1;
-
- rtl_op_start(hw);
- device_set_wakeup_enable(wiphy_dev(hw->wiphy), false);
- ieee80211_resume_disconnect(mac->vif);
- rtlhal->wake_from_pnp_sleep = false;
- return 0;
-}
-#endif
-
-static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct ieee80211_conf *conf = &hw->conf;
-
- if (mac->skip_scan)
- return 1;
-
- mutex_lock(&rtlpriv->locks.conf_mutex);
- if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /* BIT(2)*/
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
- }
-
- /*For IPS */
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- if (hw->conf.flags & IEEE80211_CONF_IDLE)
- rtl_ips_nic_off(hw);
- else
- rtl_ips_nic_on(hw);
- } else {
- /*
- *although rfoff may not cause by ips, but we will
- *check the reason in set_rf_power_state function
- */
- if (unlikely(ppsc->rfpwr_state == ERFOFF))
- rtl_ips_nic_on(hw);
- }
-
- /*For LPS */
- if ((changed & IEEE80211_CONF_CHANGE_PS) &&
- rtlpriv->psc.swctrl_lps && !rtlpriv->psc.fwctrl_lps) {
- cancel_delayed_work(&rtlpriv->works.ps_work);
- cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
- if (conf->flags & IEEE80211_CONF_PS) {
- rtlpriv->psc.sw_ps_enabled = true;
- /* sleep here is must, or we may recv the beacon and
- * cause mac80211 into wrong ps state, this will cause
- * power save nullfunc send fail, and further cause
- * pkt loss, So sleep must quickly but not immediately
- * because that will cause nullfunc send by mac80211
- * fail, and cause pkt loss, we have tested that 5mA
- * works very well
- */
- if (!rtlpriv->psc.multi_buffered)
- queue_delayed_work(rtlpriv->works.rtl_wq,
- &rtlpriv->works.ps_work,
- MSECS(5));
- } else {
- rtl_swlps_rf_awake(hw);
- rtlpriv->psc.sw_ps_enabled = false;
- }
- }
-
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count);
- /* brought up everything changes (changed == ~0) indicates first
- * open, so use our default value instead of that of wiphy.
- */
- if (changed != ~0) {
- mac->retry_long = hw->conf.long_frame_max_tx_count;
- mac->retry_short = hw->conf.long_frame_max_tx_count;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
- (u8 *)(&hw->conf.long_frame_max_tx_count));
- }
- }
-
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
- !rtlpriv->proximity.proxim_on) {
- struct ieee80211_channel *channel = hw->conf.chandef.chan;
- enum nl80211_chan_width width = hw->conf.chandef.width;
- enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
- u8 wide_chan = (u8)channel->hw_value;
-
- /* channel_type is for 20&40M */
- if (width < NL80211_CHAN_WIDTH_80)
- channel_type =
- cfg80211_get_chandef_type(&hw->conf.chandef);
- if (mac->act_scanning)
- mac->n_channels++;
-
- if (rtlpriv->dm.supp_phymode_switch &&
- mac->link_state < MAC80211_LINKED &&
- !mac->act_scanning) {
- if (rtlpriv->cfg->ops->chk_switch_dmdp)
- rtlpriv->cfg->ops->chk_switch_dmdp(hw);
- }
-
- /*
- *because we should back channel to
- *current_network.chan in in scanning,
- *So if set_chan == current_network.chan
- *we should set it.
- *because mac80211 tell us wrong bw40
- *info for cisco1253 bw20, so we modify
- *it here based on UPPER & LOWER
- */
-
- if (width >= NL80211_CHAN_WIDTH_80) {
- if (width == NL80211_CHAN_WIDTH_80) {
- u32 center = hw->conf.chandef.center_freq1;
- u32 primary =
- (u32)hw->conf.chandef.chan->center_freq;
-
- rtlphy->current_chan_bw =
- HT_CHANNEL_WIDTH_80;
- mac->bw_80 = true;
- mac->bw_40 = true;
- if (center > primary) {
- mac->cur_80_prime_sc =
- PRIME_CHNL_OFFSET_LOWER;
- if (center - primary == 10) {
- mac->cur_40_prime_sc =
- PRIME_CHNL_OFFSET_UPPER;
-
- wide_chan += 2;
- } else if (center - primary == 30) {
- mac->cur_40_prime_sc =
- PRIME_CHNL_OFFSET_LOWER;
-
- wide_chan += 6;
- }
- } else {
- mac->cur_80_prime_sc =
- PRIME_CHNL_OFFSET_UPPER;
- if (primary - center == 10) {
- mac->cur_40_prime_sc =
- PRIME_CHNL_OFFSET_LOWER;
-
- wide_chan -= 2;
- } else if (primary - center == 30) {
- mac->cur_40_prime_sc =
- PRIME_CHNL_OFFSET_UPPER;
-
- wide_chan -= 6;
- }
- }
- }
- } else {
- switch (channel_type) {
- case NL80211_CHAN_HT20:
- case NL80211_CHAN_NO_HT:
- /* SC */
- mac->cur_40_prime_sc =
- PRIME_CHNL_OFFSET_DONT_CARE;
- rtlphy->current_chan_bw =
- HT_CHANNEL_WIDTH_20;
- mac->bw_40 = false;
- mac->bw_80 = false;
- break;
- case NL80211_CHAN_HT40MINUS:
- /* SC */
- mac->cur_40_prime_sc =
- PRIME_CHNL_OFFSET_UPPER;
- rtlphy->current_chan_bw =
- HT_CHANNEL_WIDTH_20_40;
- mac->bw_40 = true;
- mac->bw_80 = false;
-
- /*wide channel */
- wide_chan -= 2;
-
- break;
- case NL80211_CHAN_HT40PLUS:
- /* SC */
- mac->cur_40_prime_sc =
- PRIME_CHNL_OFFSET_LOWER;
- rtlphy->current_chan_bw =
- HT_CHANNEL_WIDTH_20_40;
- mac->bw_40 = true;
- mac->bw_80 = false;
-
- /*wide channel */
- wide_chan += 2;
-
- break;
- default:
- mac->bw_40 = false;
- mac->bw_80 = false;
- pr_err("switch case %#x not processed\n",
- channel_type);
- break;
- }
- }
-
- if (wide_chan <= 0)
- wide_chan = 1;
-
- /* In scanning, when before we offchannel we may send a ps=1
- * null to AP, and then we may send a ps = 0 null to AP quickly,
- * but first null may have caused AP to put lots of packet to
- * hw tx buffer. These packets must be tx'd before we go off
- * channel so we must delay more time to let AP flush these
- * packets before going offchannel, or dis-association or
- * delete BA will be caused by AP
- */
- if (rtlpriv->mac80211.offchan_delay) {
- rtlpriv->mac80211.offchan_delay = false;
- mdelay(50);
- }
-
- rtlphy->current_channel = wide_chan;
-
- rtlpriv->cfg->ops->switch_channel(hw);
- rtlpriv->cfg->ops->set_channel_access(hw);
- rtlpriv->cfg->ops->set_bw_mode(hw, channel_type);
- }
-
- mutex_unlock(&rtlpriv->locks.conf_mutex);
-
- return 0;
-}
-
-static void rtl_op_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *new_flags, u64 multicast)
-{
- bool update_rcr = false;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- *new_flags &= RTL_SUPPORTED_FILTERS;
- if (changed_flags == 0)
- return;
-
- /*TODO: we disable broadcase now, so enable here */
- if (changed_flags & FIF_ALLMULTI) {
- if (*new_flags & FIF_ALLMULTI) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
- rtlpriv->cfg->maps[MAC_RCR_AB];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive multicast frame\n");
- } else {
- mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
- rtlpriv->cfg->maps[MAC_RCR_AB]);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive multicast frame\n");
- }
- update_rcr = true;
- }
-
- if (changed_flags & FIF_FCSFAIL) {
- if (*new_flags & FIF_FCSFAIL) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive FCS error frame\n");
- } else {
- mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive FCS error frame\n");
- }
- if (!update_rcr)
- update_rcr = true;
- }
-
- /* if ssid not set to hw don't check bssid
- * here just used for linked scanning, & linked
- * and nolink check bssid is set in set network_type
- */
- if (changed_flags & FIF_BCN_PRBRESP_PROMISC &&
- mac->link_state >= MAC80211_LINKED) {
- if (mac->opmode != NL80211_IFTYPE_AP &&
- mac->opmode != NL80211_IFTYPE_MESH_POINT) {
- if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
- rtlpriv->cfg->ops->set_chk_bssid(hw, false);
- else
- rtlpriv->cfg->ops->set_chk_bssid(hw, true);
- if (update_rcr)
- update_rcr = false;
- }
- }
-
- if (changed_flags & FIF_CONTROL) {
- if (*new_flags & FIF_CONTROL) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
-
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive control frame.\n");
- } else {
- mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive control frame.\n");
- }
- if (!update_rcr)
- update_rcr = true;
- }
-
- if (changed_flags & FIF_OTHER_BSS) {
- if (*new_flags & FIF_OTHER_BSS) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive other BSS's frame.\n");
- } else {
- mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive other BSS's frame.\n");
- }
- if (!update_rcr)
- update_rcr = true;
- }
-
- if (update_rcr)
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
- (u8 *)(&mac->rx_conf));
-}
-
-static int rtl_op_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_sta_info *sta_entry;
-
- if (sta) {
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- spin_lock_bh(&rtlpriv->locks.entry_list_lock);
- list_add_tail(&sta_entry->list, &rtlpriv->entry_list);
- spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- sta_entry->wireless_mode = WIRELESS_MODE_G;
- if (sta->supp_rates[0] <= 0xf)
- sta_entry->wireless_mode = WIRELESS_MODE_B;
- if (sta->ht_cap.ht_supported)
- sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
-
- if (vif->type == NL80211_IFTYPE_ADHOC)
- sta_entry->wireless_mode = WIRELESS_MODE_G;
- } else if (rtlhal->current_bandtype == BAND_ON_5G) {
- sta_entry->wireless_mode = WIRELESS_MODE_A;
- if (sta->ht_cap.ht_supported)
- sta_entry->wireless_mode = WIRELESS_MODE_N_5G;
- if (sta->vht_cap.vht_supported)
- sta_entry->wireless_mode = WIRELESS_MODE_AC_5G;
-
- if (vif->type == NL80211_IFTYPE_ADHOC)
- sta_entry->wireless_mode = WIRELESS_MODE_A;
- }
- /*disable cck rate for p2p*/
- if (mac->p2p)
- sta->supp_rates[0] &= 0xfffffff0;
-
- if (sta->ht_cap.ht_supported) {
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- rtlphy->max_ht_chan_bw = HT_CHANNEL_WIDTH_20_40;
- else
- rtlphy->max_ht_chan_bw = HT_CHANNEL_WIDTH_20;
- }
-
- if (sta->vht_cap.vht_supported)
- rtlphy->max_vht_chan_bw = HT_CHANNEL_WIDTH_80;
-
- memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "Add sta addr is %pM\n", sta->addr);
- rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0, true);
-
- if (rtlpriv->phydm.ops)
- rtlpriv->phydm.ops->phydm_add_sta(rtlpriv, sta);
- }
-
- return 0;
-}
-
-static int rtl_op_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_sta_info *sta_entry;
-
- if (sta) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "Remove sta addr is %pM\n", sta->addr);
-
- if (rtlpriv->phydm.ops)
- rtlpriv->phydm.ops->phydm_del_sta(rtlpriv, sta);
-
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- sta_entry->wireless_mode = 0;
- sta_entry->ratr_index = 0;
- spin_lock_bh(&rtlpriv->locks.entry_list_lock);
- list_del(&sta_entry->list);
- spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
- }
- return 0;
-}
-
-static int _rtl_get_hal_qnum(u16 queue)
-{
- int qnum;
-
- switch (queue) {
- case 0:
- qnum = AC3_VO;
- break;
- case 1:
- qnum = AC2_VI;
- break;
- case 2:
- qnum = AC0_BE;
- break;
- case 3:
- qnum = AC1_BK;
- break;
- default:
- qnum = AC0_BE;
- break;
- }
- return qnum;
-}
-
-static void rtl_op_sta_statistics(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct station_info *sinfo)
-{
- /* nothing filled by driver, so mac80211 will update all info */
- sinfo->filled = 0;
-}
-
-static int rtl_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
-{
- return -EOPNOTSUPP;
-}
-
-/*
- *for mac80211 VO = 0, VI = 1, BE = 2, BK = 3
- *for rtl819x BE = 0, BK = 1, VI = 2, VO = 3
- */
-static int rtl_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *param)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- int aci;
-
- if (queue >= AC_MAX) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "queue number %d is incorrect!\n", queue);
- return -EINVAL;
- }
-
- aci = _rtl_get_hal_qnum(queue);
- mac->ac[aci].aifs = param->aifs;
- mac->ac[aci].cw_min = cpu_to_le16(param->cw_min);
- mac->ac[aci].cw_max = cpu_to_le16(param->cw_max);
- mac->ac[aci].tx_op = cpu_to_le16(param->txop);
- memcpy(&mac->edca_param[aci], param, sizeof(*param));
- rtlpriv->cfg->ops->set_qos(hw, aci);
- return 0;
-}
-
-static void send_beacon_frame(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
- struct rtl_tcb_desc tcb_desc;
-
- if (skb) {
- memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
- rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
- }
-}
-
-static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changed)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- mutex_lock(&rtlpriv->locks.conf_mutex);
- if (vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT) {
- if (changed & BSS_CHANGED_BEACON ||
- (changed & BSS_CHANGED_BEACON_ENABLED &&
- bss_conf->enable_beacon)) {
- if (mac->beacon_enabled == 0) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_BEACON_ENABLED\n");
-
- /*start hw beacon interrupt. */
- /*rtlpriv->cfg->ops->set_bcn_reg(hw); */
- mac->beacon_enabled = 1;
- rtlpriv->cfg->ops->update_interrupt_mask(hw,
- rtlpriv->cfg->maps
- [RTL_IBSS_INT_MASKS], 0);
-
- if (rtlpriv->cfg->ops->linked_set_reg)
- rtlpriv->cfg->ops->linked_set_reg(hw);
- send_beacon_frame(hw, vif);
- }
- }
- if ((changed & BSS_CHANGED_BEACON_ENABLED &&
- !bss_conf->enable_beacon)) {
- if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "ADHOC DISABLE BEACON\n");
-
- mac->beacon_enabled = 0;
- rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
- rtlpriv->cfg->maps
- [RTL_IBSS_INT_MASKS]);
- }
- }
- if (changed & BSS_CHANGED_BEACON_INT) {
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE,
- "BSS_CHANGED_BEACON_INT\n");
- mac->beacon_interval = bss_conf->beacon_int;
- rtlpriv->cfg->ops->set_bcn_intv(hw);
- }
- }
-
- /*TODO: reference to enum ieee80211_bss_change */
- if (changed & BSS_CHANGED_ASSOC) {
- u8 mstatus;
-
- if (bss_conf->assoc) {
- struct ieee80211_sta *sta = NULL;
- u8 keep_alive = 10;
-
- mstatus = RT_MEDIA_CONNECT;
- /* we should reset all sec info & cam
- * before set cam after linked, we should not
- * reset in disassoc, that will cause tkip->wep
- * fail because some flag will be wrong
- * reset sec info
- */
- rtl_cam_reset_sec_info(hw);
- /* reset cam to fix wep fail issue
- * when change from wpa to wep
- */
- rtl_cam_reset_all_entry(hw);
-
- mac->link_state = MAC80211_LINKED;
- mac->cnt_after_linked = 0;
- mac->assoc_id = bss_conf->aid;
- memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
-
- if (rtlpriv->cfg->ops->linked_set_reg)
- rtlpriv->cfg->ops->linked_set_reg(hw);
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
- if (!sta) {
- rcu_read_unlock();
- goto out;
- }
- RT_TRACE(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
- "send PS STATIC frame\n");
- if (rtlpriv->dm.supp_phymode_switch) {
- if (sta->ht_cap.ht_supported)
- rtl_send_smps_action(hw, sta,
- IEEE80211_SMPS_STATIC);
- }
-
- if (rtlhal->current_bandtype == BAND_ON_5G) {
- mac->mode = WIRELESS_MODE_A;
- } else {
- if (sta->supp_rates[0] <= 0xf)
- mac->mode = WIRELESS_MODE_B;
- else
- mac->mode = WIRELESS_MODE_G;
- }
-
- if (sta->ht_cap.ht_supported) {
- if (rtlhal->current_bandtype == BAND_ON_2_4G)
- mac->mode = WIRELESS_MODE_N_24G;
- else
- mac->mode = WIRELESS_MODE_N_5G;
- }
-
- if (sta->vht_cap.vht_supported) {
- if (rtlhal->current_bandtype == BAND_ON_5G)
- mac->mode = WIRELESS_MODE_AC_5G;
- else
- mac->mode = WIRELESS_MODE_AC_24G;
- }
-
- if (vif->type == NL80211_IFTYPE_STATION)
- rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0,
- true);
- rcu_read_unlock();
-
- /* to avoid AP Disassociation caused by inactivity */
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_KEEP_ALIVE,
- (u8 *)(&keep_alive));
-
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_ASSOC\n");
- } else {
- struct cfg80211_bss *bss = NULL;
-
- mstatus = RT_MEDIA_DISCONNECT;
-
- if (mac->link_state == MAC80211_LINKED)
- rtl_lps_leave(hw);
- if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
- rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
- mac->link_state = MAC80211_NOLINK;
-
- bss = cfg80211_get_bss(hw->wiphy, NULL,
- (u8 *)mac->bssid, NULL, 0,
- IEEE80211_BSS_TYPE_ESS,
- IEEE80211_PRIVACY_OFF);
-
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid = %x-%x-%x-%x-%x-%x\n",
- mac->bssid[0], mac->bssid[1],
- mac->bssid[2], mac->bssid[3],
- mac->bssid[4], mac->bssid[5]);
-
- if (bss) {
- cfg80211_unlink_bss(hw->wiphy, bss);
- cfg80211_put_bss(hw->wiphy, bss);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "cfg80211_unlink !!\n");
- }
-
- eth_zero_addr(mac->bssid);
- mac->vendor = PEER_UNKNOWN;
- mac->mode = 0;
-
- if (rtlpriv->dm.supp_phymode_switch) {
- if (rtlpriv->cfg->ops->chk_switch_dmdp)
- rtlpriv->cfg->ops->chk_switch_dmdp(hw);
- }
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_UN_ASSOC\n");
- }
- rtlpriv->cfg->ops->set_network_type(hw, vif->type);
- /* For FW LPS:
- * To tell firmware we have connected or disconnected
- */
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_JOINBSSRPT,
- (u8 *)(&mstatus));
- ppsc->report_linked = (mstatus == RT_MEDIA_CONNECT) ?
- true : false;
-
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_mediastatus_notify(
- rtlpriv, mstatus);
- }
-
- if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_CTS_PROT\n");
- mac->use_cts_protect = bss_conf->use_cts_prot;
- }
-
- if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
- bss_conf->use_short_preamble);
-
- mac->short_preamble = bss_conf->use_short_preamble;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
- (u8 *)(&mac->short_preamble));
- }
-
- if (changed & BSS_CHANGED_ERP_SLOT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_SLOT\n");
-
- if (bss_conf->use_short_slot)
- mac->slot_time = RTL_SLOT_TIME_9;
- else
- mac->slot_time = RTL_SLOT_TIME_20;
-
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)(&mac->slot_time));
- }
-
- if (changed & BSS_CHANGED_HT) {
- struct ieee80211_sta *sta = NULL;
-
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_HT\n");
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
- if (sta) {
- if (sta->ht_cap.ampdu_density >
- mac->current_ampdu_density)
- mac->current_ampdu_density =
- sta->ht_cap.ampdu_density;
- if (sta->ht_cap.ampdu_factor <
- mac->current_ampdu_factor)
- mac->current_ampdu_factor =
- sta->ht_cap.ampdu_factor;
-
- if (sta->ht_cap.ht_supported) {
- if (sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- rtlphy->max_ht_chan_bw =
- HT_CHANNEL_WIDTH_20_40;
- else
- rtlphy->max_ht_chan_bw =
- HT_CHANNEL_WIDTH_20;
- }
- }
- rcu_read_unlock();
-
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
- (u8 *)(&mac->max_mss_density));
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
- &mac->current_ampdu_factor);
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
- &mac->current_ampdu_density);
- }
-
- if (changed & BSS_CHANGED_BANDWIDTH) {
- struct ieee80211_sta *sta = NULL;
-
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_BANDWIDTH\n");
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
-
- if (sta) {
- if (sta->ht_cap.ht_supported) {
- if (sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- rtlphy->max_ht_chan_bw =
- HT_CHANNEL_WIDTH_20_40;
- else
- rtlphy->max_ht_chan_bw =
- HT_CHANNEL_WIDTH_20;
- }
-
- if (sta->vht_cap.vht_supported)
- rtlphy->max_vht_chan_bw = HT_CHANNEL_WIDTH_80;
- }
- rcu_read_unlock();
- }
-
- if (changed & BSS_CHANGED_BSSID) {
- u32 basic_rates;
- struct ieee80211_sta *sta = NULL;
-
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
- (u8 *)bss_conf->bssid);
-
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid: %pM\n", bss_conf->bssid);
-
- mac->vendor = PEER_UNKNOWN;
- memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
- if (!sta) {
- rcu_read_unlock();
- goto out;
- }
-
- if (rtlhal->current_bandtype == BAND_ON_5G) {
- mac->mode = WIRELESS_MODE_A;
- } else {
- if (sta->supp_rates[0] <= 0xf)
- mac->mode = WIRELESS_MODE_B;
- else
- mac->mode = WIRELESS_MODE_G;
- }
-
- if (sta->ht_cap.ht_supported) {
- if (rtlhal->current_bandtype == BAND_ON_2_4G)
- mac->mode = WIRELESS_MODE_N_24G;
- else
- mac->mode = WIRELESS_MODE_N_5G;
- }
-
- if (sta->vht_cap.vht_supported) {
- if (rtlhal->current_bandtype == BAND_ON_5G)
- mac->mode = WIRELESS_MODE_AC_5G;
- else
- mac->mode = WIRELESS_MODE_AC_24G;
- }
-
- /* just station need it, because ibss & ap mode will
- * set in sta_add, and will be NULL here
- */
- if (vif->type == NL80211_IFTYPE_STATION) {
- struct rtl_sta_info *sta_entry;
-
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- sta_entry->wireless_mode = mac->mode;
- }
-
- if (sta->ht_cap.ht_supported) {
- mac->ht_enable = true;
-
- /* for cisco 1252 bw20 it's wrong
- * if (ht_cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
- * mac->bw_40 = true;
- * }
- */
- }
-
- if (sta->vht_cap.vht_supported)
- mac->vht_enable = true;
-
- if (changed & BSS_CHANGED_BASIC_RATES) {
- /* for 5G must << RATE_6M_INDEX = 4,
- * because 5G have no cck rate
- */
- if (rtlhal->current_bandtype == BAND_ON_5G)
- basic_rates = sta->supp_rates[1] << 4;
- else
- basic_rates = sta->supp_rates[0];
-
- mac->basic_rates = basic_rates;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
- (u8 *)(&basic_rates));
- }
- rcu_read_unlock();
- }
-out:
- mutex_unlock(&rtlpriv->locks.conf_mutex);
-}
-
-static u64 rtl_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u64 tsf;
-
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *)(&tsf));
- return tsf;
-}
-
-static void rtl_op_set_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u64 tsf)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
-
- mac->tsf = tsf;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *)(&bibss));
-}
-
-static void rtl_op_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp = 0;
-
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *)(&tmp));
-}
-
-static void rtl_op_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
-{
- switch (cmd) {
- case STA_NOTIFY_SLEEP:
- break;
- case STA_NOTIFY_AWAKE:
- break;
- default:
- break;
- }
-}
-
-static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_sta *sta = params->sta;
- enum ieee80211_ampdu_mlme_action action = params->action;
- u16 tid = params->tid;
- u16 *ssn = &params->ssn;
-
- switch (action) {
- case IEEE80211_AMPDU_TX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
- return rtl_tx_agg_start(hw, vif, sta, tid, ssn);
- case IEEE80211_AMPDU_TX_STOP_CONT:
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
- return rtl_tx_agg_stop(hw, vif, sta, tid);
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
- rtl_tx_agg_oper(hw, sta, tid);
- break;
- case IEEE80211_AMPDU_RX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
- return rtl_rx_agg_start(hw, sta, tid);
- case IEEE80211_AMPDU_RX_STOP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
- return rtl_rx_agg_stop(hw, sta, tid);
- default:
- pr_err("IEEE80211_AMPDU_ERR!!!!:\n");
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *mac_addr)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
- mac->act_scanning = true;
- if (rtlpriv->link_info.higher_busytraffic) {
- mac->skip_scan = true;
- return;
- }
-
- if (rtlpriv->phydm.ops)
- rtlpriv->phydm.ops->phydm_pause_dig(rtlpriv, 1);
-
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 1);
- else if (rtlpriv->btcoexist.btc_ops)
- rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv,
- 1);
-
- if (rtlpriv->dm.supp_phymode_switch) {
- if (rtlpriv->cfg->ops->chk_switch_dmdp)
- rtlpriv->cfg->ops->chk_switch_dmdp(hw);
- }
-
- if (mac->link_state == MAC80211_LINKED) {
- rtl_lps_leave(hw);
- mac->link_state = MAC80211_LINKED_SCANNING;
- } else {
- rtl_ips_nic_on(hw);
- }
-
- /* Dul mac */
- rtlpriv->rtlhal.load_imrandiqk_setting_for2g = false;
-
- rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY);
- rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP_BAND0);
-}
-
-static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
- mac->act_scanning = false;
- mac->skip_scan = false;
-
- rtlpriv->btcoexist.btc_info.ap_num = rtlpriv->scan_list.num;
-
- if (rtlpriv->link_info.higher_busytraffic)
- return;
-
- /* p2p will use 1/6/11 to scan */
- if (mac->n_channels == 3)
- mac->p2p_in_use = true;
- else
- mac->p2p_in_use = false;
- mac->n_channels = 0;
- /* Dul mac */
- rtlpriv->rtlhal.load_imrandiqk_setting_for2g = false;
-
- if (mac->link_state == MAC80211_LINKED_SCANNING) {
- mac->link_state = MAC80211_LINKED;
- if (mac->opmode == NL80211_IFTYPE_STATION) {
- /* fix fwlps issue */
- rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
- }
- }
-
- rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 0);
- else if (rtlpriv->btcoexist.btc_ops)
- rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv,
- 0);
-
- if (rtlpriv->phydm.ops)
- rtlpriv->phydm.ops->phydm_pause_dig(rtlpriv, 0);
-}
-
-static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 key_type = NO_ENCRYPTION;
- u8 key_idx;
- bool group_key = false;
- bool wep_only = false;
- int err = 0;
- u8 mac_addr[ETH_ALEN];
- u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
- rtlpriv->btcoexist.btc_info.in_4way = false;
-
- if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not open hw encryption\n");
- return -ENOSPC; /*User disabled HW-crypto */
- }
- /* To support IBSS, use sw-crypto for GTK */
- if ((vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -ENOSPC;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "%s hardware based encryption for keyidx: %d, mac: %pM\n",
- cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
- sta ? sta->addr : bcast_addr);
- rtlpriv->sec.being_setkey = true;
- rtl_ips_nic_on(hw);
- mutex_lock(&rtlpriv->locks.conf_mutex);
- /* <1> get encryption alg */
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- key_type = WEP40_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
- key_type = WEP104_ENCRYPTION;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- key_type = TKIP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- key_type = AESCCMP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- /* HW don't support CMAC encryption,
- * use software CMAC encryption
- */
- key_type = AESCMAC_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "HW don't support CMAC encryption, use software CMAC encryption\n");
- err = -EOPNOTSUPP;
- goto out_unlock;
- default:
- pr_err("alg_err:%x!!!!:\n", key->cipher);
- goto out_unlock;
- }
- if (key_type == WEP40_ENCRYPTION ||
- key_type == WEP104_ENCRYPTION ||
- vif->type == NL80211_IFTYPE_ADHOC)
- rtlpriv->sec.use_defaultkey = true;
-
- /* <2> get key_idx */
- key_idx = (u8)(key->keyidx);
- if (key_idx > 3)
- goto out_unlock;
- /* <3> if pairwise key enable_hw_sec */
- group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
-
- /* wep always be group key, but there are two conditions:
- * 1) wep only: is just for wep enc, in this condition
- * rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION
- * will be true & enable_hw_sec will be set when wep
- * ke setting.
- * 2) wep(group) + AES(pairwise): some AP like cisco
- * may use it, in this condition enable_hw_sec will not
- * be set when wep key setting.
- * we must reset sec_info after lingked before set key,
- * or some flag will be wrong
- */
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT) {
- if (!group_key || key_type == WEP40_ENCRYPTION ||
- key_type == WEP104_ENCRYPTION) {
- if (group_key)
- wep_only = true;
- rtlpriv->cfg->ops->enable_hw_sec(hw);
- }
- } else {
- if (!group_key || vif->type == NL80211_IFTYPE_ADHOC ||
- rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
- if (rtlpriv->sec.pairwise_enc_algorithm ==
- NO_ENCRYPTION &&
- (key_type == WEP40_ENCRYPTION ||
- key_type == WEP104_ENCRYPTION))
- wep_only = true;
- rtlpriv->sec.pairwise_enc_algorithm = key_type;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
- key_type);
- rtlpriv->cfg->ops->enable_hw_sec(hw);
- }
- }
- /* <4> set key based on cmd */
- switch (cmd) {
- case SET_KEY:
- if (wep_only) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set WEP(group/pairwise) key\n");
- /* Pairwise key with an assigned MAC address. */
- rtlpriv->sec.pairwise_enc_algorithm = key_type;
- rtlpriv->sec.group_enc_algorithm = key_type;
- /*set local buf about wep key. */
- memcpy(rtlpriv->sec.key_buf[key_idx],
- key->key, key->keylen);
- rtlpriv->sec.key_len[key_idx] = key->keylen;
- eth_zero_addr(mac_addr);
- } else if (group_key) { /* group key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
- /* group key */
- rtlpriv->sec.group_enc_algorithm = key_type;
- /*set local buf about group key. */
- memcpy(rtlpriv->sec.key_buf[key_idx],
- key->key, key->keylen);
- rtlpriv->sec.key_len[key_idx] = key->keylen;
- memcpy(mac_addr, bcast_addr, ETH_ALEN);
- } else { /* pairwise key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set pairwise key\n");
- if (!sta) {
- WARN_ONCE(true,
- "rtlwifi: pairwise key without mac_addr\n");
-
- err = -EOPNOTSUPP;
- goto out_unlock;
- }
- /* Pairwise key with an assigned MAC address. */
- rtlpriv->sec.pairwise_enc_algorithm = key_type;
- /*set local buf about pairwise key. */
- memcpy(rtlpriv->sec.key_buf[PAIRWISE_KEYIDX],
- key->key, key->keylen);
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX] = key->keylen;
- rtlpriv->sec.pairwise_key =
- rtlpriv->sec.key_buf[PAIRWISE_KEYIDX];
- memcpy(mac_addr, sta->addr, ETH_ALEN);
- }
- rtlpriv->cfg->ops->set_key(hw, key_idx, mac_addr,
- group_key, key_type, wep_only,
- false);
- /* <5> tell mac80211 do something: */
- /*must use sw generate IV, or can not work !!!!. */
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- key->hw_key_idx = key_idx;
- if (key_type == TKIP_ENCRYPTION)
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- /*use software CCMP encryption for management frames (MFP) */
- if (key_type == AESCCMP_ENCRYPTION)
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
- break;
- case DISABLE_KEY:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "disable key delete one entry\n");
- /*set local buf about wep key. */
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT) {
- if (sta)
- rtl_cam_del_entry(hw, sta->addr);
- }
- memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen);
- rtlpriv->sec.key_len[key_idx] = 0;
- eth_zero_addr(mac_addr);
- /*
- *mac80211 will delete entries one by one,
- *so don't use rtl_cam_reset_all_entry
- *or clear all entries here.
- */
- rtl_wait_tx_report_acked(hw, 500); /* wait 500ms for TX ack */
-
- rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
- break;
- default:
- pr_err("cmd_err:%x!!!!:\n", cmd);
- }
-out_unlock:
- mutex_unlock(&rtlpriv->locks.conf_mutex);
- rtlpriv->sec.being_setkey = false;
- return err;
-}
-
-static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- bool radio_state;
- bool blocked;
- u8 valid = 0;
-
- if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
- return;
-
- mutex_lock(&rtlpriv->locks.conf_mutex);
-
- /*if Radio On return true here */
- radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
-
- if (valid) {
- if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
- rtlpriv->rfkill.rfkill_state = radio_state;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "wireless radio switch turned %s\n",
- radio_state ? "on" : "off");
-
- blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
- wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
- }
- }
-
- mutex_unlock(&rtlpriv->locks.conf_mutex);
-}
-
-/* this function is called by mac80211 to flush tx buffer
- * before switch channel or power save, or tx buffer packet
- * maybe send after offchannel or rf sleep, this may cause
- * dis-association by AP
- */
-static void rtl_op_flush(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- u32 queues,
- bool drop)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->intf_ops->flush)
- rtlpriv->intf_ops->flush(hw, queues, drop);
-}
-
-/* Description:
- * This routine deals with the Power Configuration CMD
- * parsing for RTL8723/RTL8188E Series IC.
- * Assumption:
- * We should follow specific format that was released from HW SD.
- */
-bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
- u8 faversion, u8 interface_type,
- struct wlan_pwr_cfg pwrcfgcmd[])
-{
- struct wlan_pwr_cfg cfg_cmd;
- bool polling_bit = false;
- u32 ary_idx = 0;
- u8 value = 0;
- u32 offset = 0;
- u32 polling_count = 0;
- u32 max_polling_cnt = 5000;
-
- do {
- cfg_cmd = pwrcfgcmd[ary_idx];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
- __func__, GET_PWR_CFG_OFFSET(cfg_cmd),
- GET_PWR_CFG_CUT_MASK(cfg_cmd),
- GET_PWR_CFG_FAB_MASK(cfg_cmd),
- GET_PWR_CFG_INTF_MASK(cfg_cmd),
- GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
- GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
-
- if ((GET_PWR_CFG_FAB_MASK(cfg_cmd) & faversion) &&
- (GET_PWR_CFG_CUT_MASK(cfg_cmd) & cut_version) &&
- (GET_PWR_CFG_INTF_MASK(cfg_cmd) & interface_type)) {
- switch (GET_PWR_CFG_CMD(cfg_cmd)) {
- case PWR_CMD_READ:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_READ\n", __func__);
- break;
- case PWR_CMD_WRITE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_WRITE\n", __func__);
- offset = GET_PWR_CFG_OFFSET(cfg_cmd);
-
- /*Read the value from system register*/
- value = rtl_read_byte(rtlpriv, offset);
- value &= (~(GET_PWR_CFG_MASK(cfg_cmd)));
- value |= (GET_PWR_CFG_VALUE(cfg_cmd) &
- GET_PWR_CFG_MASK(cfg_cmd));
-
- /*Write the value back to system register*/
- rtl_write_byte(rtlpriv, offset, value);
- break;
- case PWR_CMD_POLLING:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_POLLING\n", __func__);
- polling_bit = false;
- offset = GET_PWR_CFG_OFFSET(cfg_cmd);
-
- do {
- value = rtl_read_byte(rtlpriv, offset);
-
- value &= GET_PWR_CFG_MASK(cfg_cmd);
- if (value ==
- (GET_PWR_CFG_VALUE(cfg_cmd) &
- GET_PWR_CFG_MASK(cfg_cmd)))
- polling_bit = true;
- else
- udelay(10);
-
- if (polling_count++ > max_polling_cnt)
- return false;
- } while (!polling_bit);
- break;
- case PWR_CMD_DELAY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_DELAY\n", __func__);
- if (GET_PWR_CFG_VALUE(cfg_cmd) ==
- PWRSEQ_DELAY_US)
- udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
- else
- mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
- break;
- case PWR_CMD_END:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_END\n", __func__);
- return true;
- default:
- WARN_ONCE(true,
- "rtlwifi: %s(): Unknown CMD!!\n",
- __func__);
- break;
- }
- }
- ary_idx++;
- } while (1);
-
- return true;
-}
-
-bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl8192_tx_ring *ring;
- struct rtl_tx_desc *pdesc;
- unsigned long flags;
- struct sk_buff *pskb = NULL;
-
- ring = &rtlpci->tx_ring[BEACON_QUEUE];
-
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
- pskb = __skb_dequeue(&ring->queue);
- if (pskb)
- dev_kfree_skb_irq(pskb);
-
- /*this is wrong, fill_tx_cmddesc needs update*/
- pdesc = &ring->desc[0];
-
- rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
-
- __skb_queue_tail(&ring->queue, skb);
-
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
- rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
-
- return true;
-}
-
-const struct ieee80211_ops rtl_ops = {
- .start = rtl_op_start,
- .stop = rtl_op_stop,
- .tx = rtl_op_tx,
- .add_interface = rtl_op_add_interface,
- .remove_interface = rtl_op_remove_interface,
- .change_interface = rtl_op_change_interface,
-#ifdef CONFIG_PM
- .suspend = rtl_op_suspend,
- .resume = rtl_op_resume,
-#endif
- .config = rtl_op_config,
- .configure_filter = rtl_op_configure_filter,
- .set_key = rtl_op_set_key,
- .sta_statistics = rtl_op_sta_statistics,
- .set_frag_threshold = rtl_op_set_frag_threshold,
- .conf_tx = rtl_op_conf_tx,
- .bss_info_changed = rtl_op_bss_info_changed,
- .get_tsf = rtl_op_get_tsf,
- .set_tsf = rtl_op_set_tsf,
- .reset_tsf = rtl_op_reset_tsf,
- .sta_notify = rtl_op_sta_notify,
- .ampdu_action = rtl_op_ampdu_action,
- .sw_scan_start = rtl_op_sw_scan_start,
- .sw_scan_complete = rtl_op_sw_scan_complete,
- .rfkill_poll = rtl_op_rfkill_poll,
- .sta_add = rtl_op_sta_add,
- .sta_remove = rtl_op_sta_remove,
- .flush = rtl_op_flush,
-};
-
-bool rtl_btc_status_false(void)
-{
- return false;
-}
-
-void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igvalue)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
-
- dm_digtable->dig_enable_flag = true;
- dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
- dm_digtable->cur_igvalue = cur_igvalue;
- dm_digtable->pre_igvalue = 0;
- dm_digtable->cur_sta_cstate = DIG_STA_DISCONNECT;
- dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
- dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
- dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
- dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
- dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
- dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
- dm_digtable->rx_gain_max = DM_DIG_MAX;
- dm_digtable->rx_gain_min = DM_DIG_MIN;
- dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
- dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
- dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
- dm_digtable->pre_cck_cca_thres = 0xff;
- dm_digtable->cur_cck_cca_thres = 0x83;
- dm_digtable->forbidden_igi = DM_DIG_MIN;
- dm_digtable->large_fa_hit = 0;
- dm_digtable->recover_cnt = 0;
- dm_digtable->dig_min_0 = 0x25;
- dm_digtable->dig_min_1 = 0x25;
- dm_digtable->media_connect_0 = false;
- dm_digtable->media_connect_1 = false;
- rtlpriv->dm.dm_initialgain_enable = true;
- dm_digtable->bt30_cur_igi = 0x32;
- dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
- dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
-}
diff --git a/drivers/staging/rtlwifi/core.h b/drivers/staging/rtlwifi/core.h
deleted file mode 100644
index 991af1abf8ca..000000000000
--- a/drivers/staging/rtlwifi/core.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_CORE_H__
-#define __RTL_CORE_H__
-
-#define RTL_SUPPORTED_FILTERS \
- (FIF_ALLMULTI | FIF_CONTROL | \
- FIF_OTHER_BSS | \
- FIF_FCSFAIL | \
- FIF_BCN_PRBRESP_PROMISC)
-
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1e
-#define DM_DIG_MAX_AP 0x32
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
-enum cck_packet_detection_threshold {
- CCK_PD_STAGE_LOWRSSI = 0,
- CCK_PD_STAGE_HIGHRSSI = 1,
- CCK_FA_STAGE_LOW = 2,
- CCK_FA_STAGE_HIGH = 3,
- CCK_PD_STAGE_MAX = 4,
-};
-
-enum dm_dig_ext_port_alg_e {
- DIG_EXT_PORT_STAGE_0 = 0,
- DIG_EXT_PORT_STAGE_1 = 1,
- DIG_EXT_PORT_STAGE_2 = 2,
- DIG_EXT_PORT_STAGE_3 = 3,
- DIG_EXT_PORT_STAGE_MAX = 4,
-};
-
-enum dm_dig_connect_e {
- DIG_STA_DISCONNECT,
- DIG_STA_CONNECT,
- DIG_STA_BEFORE_CONNECT,
- DIG_MULTISTA_DISCONNECT,
- DIG_MULTISTA_CONNECT,
- DIG_AP_DISCONNECT,
- DIG_AP_CONNECT,
- DIG_AP_ADD_STATION,
- DIG_CONNECT_MAX
-};
-
-extern const struct ieee80211_ops rtl_ops;
-void rtl_fw_cb(const struct firmware *firmware, void *context);
-void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context);
-bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
-bool rtl_btc_status_false(void);
-void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval);
-
-#endif
diff --git a/drivers/staging/rtlwifi/debug.c b/drivers/staging/rtlwifi/debug.c
deleted file mode 100644
index 8999feda29b4..000000000000
--- a/drivers/staging/rtlwifi/debug.c
+++ /dev/null
@@ -1,624 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *****************************************************************************/
-
-#include "wifi.h"
-#include "cam.h"
-
-#include <linux/moduleparam.h>
-#include <linux/vmalloc.h>
-
-#ifdef CONFIG_RTLWIFI_DEBUG_ST
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *fmt, ...)
-{
- if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- level <= rtlpriv->cfg->mod_params->debug_level)) {
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- pr_info(":<%lx> %pV", in_interrupt(), &vaf);
-
- va_end(args);
- }
-}
-
-void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *fmt, ...)
-{
- if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- level <= rtlpriv->cfg->mod_params->debug_level)) {
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- pr_info("%pV", &vaf);
-
- va_end(args);
- }
-}
-
-void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *titlestring,
- const void *hexdata, int hexdatalen)
-{
- if (unlikely(((comp) & rtlpriv->cfg->mod_params->debug_mask) &&
- ((level) <= rtlpriv->cfg->mod_params->debug_level))) {
- pr_info("In process \"%s\" (pid %i): %s\n",
- current->comm, current->pid, titlestring);
- print_hex_dump_bytes("", DUMP_PREFIX_NONE,
- hexdata, hexdatalen);
- }
-}
-
-struct rtl_debugfs_priv {
- struct rtl_priv *rtlpriv;
- int (*cb_read)(struct seq_file *m, void *v);
- ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
- size_t count, loff_t *loff);
- u32 cb_data;
-};
-
-static struct dentry *debugfs_topdir;
-
-static int rtl_debug_get_common(struct seq_file *m, void *v)
-{
- struct rtl_debugfs_priv *debugfs_priv = m->private;
-
- return debugfs_priv->cb_read(m, v);
-}
-
-static int dl_debug_open_common(struct inode *inode, struct file *file)
-{
- return single_open(file, rtl_debug_get_common, inode->i_private);
-}
-
-static const struct file_operations file_ops_common = {
- .open = dl_debug_open_common,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int rtl_debug_get_mac_page(struct seq_file *m, void *v)
-{
- struct rtl_debugfs_priv *debugfs_priv = m->private;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
- u32 page = debugfs_priv->cb_data;
- int i, n;
- int max = 0xff;
-
- for (n = 0; n <= max; ) {
- seq_printf(m, "\n%8.8x ", n + page);
- for (i = 0; i < 4 && n <= max; i++, n += 4)
- seq_printf(m, "%8.8x ",
- rtl_read_dword(rtlpriv, (page | n)));
- }
- seq_puts(m, "\n");
- return 0;
-}
-
-#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr) \
-static struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = { \
- .cb_read = rtl_debug_get_mac_page, \
- .cb_data = addr, \
-}
-
-RTL_DEBUG_IMPL_MAC_SERIES(0, 0x0000);
-RTL_DEBUG_IMPL_MAC_SERIES(1, 0x0100);
-RTL_DEBUG_IMPL_MAC_SERIES(2, 0x0200);
-RTL_DEBUG_IMPL_MAC_SERIES(3, 0x0300);
-RTL_DEBUG_IMPL_MAC_SERIES(4, 0x0400);
-RTL_DEBUG_IMPL_MAC_SERIES(5, 0x0500);
-RTL_DEBUG_IMPL_MAC_SERIES(6, 0x0600);
-RTL_DEBUG_IMPL_MAC_SERIES(7, 0x0700);
-RTL_DEBUG_IMPL_MAC_SERIES(10, 0x1000);
-RTL_DEBUG_IMPL_MAC_SERIES(11, 0x1100);
-RTL_DEBUG_IMPL_MAC_SERIES(12, 0x1200);
-RTL_DEBUG_IMPL_MAC_SERIES(13, 0x1300);
-RTL_DEBUG_IMPL_MAC_SERIES(14, 0x1400);
-RTL_DEBUG_IMPL_MAC_SERIES(15, 0x1500);
-RTL_DEBUG_IMPL_MAC_SERIES(16, 0x1600);
-RTL_DEBUG_IMPL_MAC_SERIES(17, 0x1700);
-
-static int rtl_debug_get_bb_page(struct seq_file *m, void *v)
-{
- struct rtl_debugfs_priv *debugfs_priv = m->private;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
- struct ieee80211_hw *hw = rtlpriv->hw;
- u32 page = debugfs_priv->cb_data;
- int i, n;
- int max = 0xff;
-
- for (n = 0; n <= max; ) {
- seq_printf(m, "\n%8.8x ", n + page);
- for (i = 0; i < 4 && n <= max; i++, n += 4)
- seq_printf(m, "%8.8x ",
- rtl_get_bbreg(hw, (page | n), 0xffffffff));
- }
- seq_puts(m, "\n");
- return 0;
-}
-
-#define RTL_DEBUG_IMPL_BB_SERIES(page, addr) \
-static struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = { \
- .cb_read = rtl_debug_get_bb_page, \
- .cb_data = addr, \
-}
-
-RTL_DEBUG_IMPL_BB_SERIES(8, 0x0800);
-RTL_DEBUG_IMPL_BB_SERIES(9, 0x0900);
-RTL_DEBUG_IMPL_BB_SERIES(a, 0x0a00);
-RTL_DEBUG_IMPL_BB_SERIES(b, 0x0b00);
-RTL_DEBUG_IMPL_BB_SERIES(c, 0x0c00);
-RTL_DEBUG_IMPL_BB_SERIES(d, 0x0d00);
-RTL_DEBUG_IMPL_BB_SERIES(e, 0x0e00);
-RTL_DEBUG_IMPL_BB_SERIES(f, 0x0f00);
-RTL_DEBUG_IMPL_BB_SERIES(18, 0x1800);
-RTL_DEBUG_IMPL_BB_SERIES(19, 0x1900);
-RTL_DEBUG_IMPL_BB_SERIES(1a, 0x1a00);
-RTL_DEBUG_IMPL_BB_SERIES(1b, 0x1b00);
-RTL_DEBUG_IMPL_BB_SERIES(1c, 0x1c00);
-RTL_DEBUG_IMPL_BB_SERIES(1d, 0x1d00);
-RTL_DEBUG_IMPL_BB_SERIES(1e, 0x1e00);
-RTL_DEBUG_IMPL_BB_SERIES(1f, 0x1f00);
-
-static int rtl_debug_get_reg_rf(struct seq_file *m, void *v)
-{
- struct rtl_debugfs_priv *debugfs_priv = m->private;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
- struct ieee80211_hw *hw = rtlpriv->hw;
- enum radio_path rfpath = debugfs_priv->cb_data;
- int i, n;
- int max = 0x40;
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- max = 0xff;
-
- seq_printf(m, "\nPATH(%d)", rfpath);
-
- for (n = 0; n <= max; ) {
- seq_printf(m, "\n%8.8x ", n);
- for (i = 0; i < 4 && n <= max; n += 1, i++)
- seq_printf(m, "%8.8x ",
- rtl_get_rfreg(hw, rfpath, n, 0xffffffff));
- }
- seq_puts(m, "\n");
- return 0;
-}
-
-#define RTL_DEBUG_IMPL_RF_SERIES(page, addr) \
-static struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = { \
- .cb_read = rtl_debug_get_reg_rf, \
- .cb_data = addr, \
-}
-
-RTL_DEBUG_IMPL_RF_SERIES(a, RF90_PATH_A);
-RTL_DEBUG_IMPL_RF_SERIES(b, RF90_PATH_B);
-
-static int rtl_debug_get_cam_register(struct seq_file *m, void *v)
-{
- struct rtl_debugfs_priv *debugfs_priv = m->private;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
- int start = debugfs_priv->cb_data;
- u32 target_cmd = 0;
- u32 target_val = 0;
- u8 entry_i = 0;
- u32 ulstatus;
- int i = 100, j = 0;
- int end = (start + 11 > TOTAL_CAM_ENTRY ? TOTAL_CAM_ENTRY : start + 11);
-
- /* This dump the current register page */
- seq_printf(m,
- "\n#################### SECURITY CAM (%d-%d) ##################\n",
- start, end - 1);
-
- for (j = start; j < end; j++) {
- seq_printf(m, "\nD: %2x > ", j);
- for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
- /* polling bit, and No Write enable, and address */
- target_cmd = entry_i + CAM_CONTENT_COUNT * j;
- target_cmd = target_cmd | BIT(31);
-
- /* Check polling bit is clear */
- while ((i--) >= 0) {
- ulstatus = rtl_read_dword(
- rtlpriv,
- rtlpriv->cfg->maps[RWCAM]);
- if (ulstatus & BIT(31))
- continue;
- else
- break;
- }
-
- rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
- target_cmd);
- target_val = rtl_read_dword(rtlpriv,
- rtlpriv->cfg->maps[RCAMO]);
- seq_printf(m, "%8.8x ", target_val);
- }
- }
- seq_puts(m, "\n");
- return 0;
-}
-
-#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr) \
-static struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = { \
- .cb_read = rtl_debug_get_cam_register, \
- .cb_data = addr, \
-}
-
-RTL_DEBUG_IMPL_CAM_SERIES(1, 0);
-RTL_DEBUG_IMPL_CAM_SERIES(2, 11);
-RTL_DEBUG_IMPL_CAM_SERIES(3, 22);
-
-static int rtl_debug_get_btcoex(struct seq_file *m, void *v)
-{
- struct rtl_debugfs_priv *debugfs_priv = m->private;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
-
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_display_bt_coex_info(rtlpriv,
- m);
-
- seq_puts(m, "\n");
-
- return 0;
-}
-
-static struct rtl_debugfs_priv rtl_debug_priv_btcoex = {
- .cb_read = rtl_debug_get_btcoex,
- .cb_data = 0,
-};
-
-static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *loff)
-{
- struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
- char tmp[32 + 1];
- int tmp_len;
- u32 addr, val, len;
- int num;
-
- if (count < 3)
- return -EFAULT;
-
- tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
-
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
-
- tmp[tmp_len] = '\0';
-
- /* write BB/MAC register */
- num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
-
- if (num != 3)
- return count;
-
- switch (len) {
- case 1:
- rtl_write_byte(rtlpriv, addr, (u8)val);
- break;
- case 2:
- rtl_write_word(rtlpriv, addr, (u16)val);
- break;
- case 4:
- rtl_write_dword(rtlpriv, addr, val);
- break;
- default:
- /*printk("error write length=%d", len);*/
- break;
- }
-
- return count;
-}
-
-static struct rtl_debugfs_priv rtl_debug_priv_write_reg = {
- .cb_write = rtl_debugfs_set_write_reg,
-};
-
-static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *loff)
-{
- struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
- struct ieee80211_hw *hw = rtlpriv->hw;
- char tmp[32 + 1];
- int tmp_len;
- u8 h2c_len, h2c_data_packed[8];
- int h2c_data[8]; /* idx 0: cmd */
- int i;
-
- if (count < 3)
- return -EFAULT;
-
- tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
-
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
-
- tmp[tmp_len] = '\0';
-
- h2c_len = sscanf(tmp, "%X %X %X %X %X %X %X %X",
- &h2c_data[0], &h2c_data[1],
- &h2c_data[2], &h2c_data[3],
- &h2c_data[4], &h2c_data[5],
- &h2c_data[6], &h2c_data[7]);
-
- if (h2c_len <= 0)
- return count;
-
- for (i = 0; i < h2c_len; i++)
- h2c_data_packed[i] = (u8)h2c_data[i];
-
- rtlpriv->cfg->ops->fill_h2c_cmd(hw, h2c_data_packed[0],
- h2c_len - 1,
- &h2c_data_packed[1]);
-
- return count;
-}
-
-static struct rtl_debugfs_priv rtl_debug_priv_write_h2c = {
- .cb_write = rtl_debugfs_set_write_h2c,
-};
-
-static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *loff)
-{
- struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
- struct ieee80211_hw *hw = rtlpriv->hw;
- char tmp[32 + 1];
- int tmp_len;
- int num;
- int path;
- u32 addr, bitmask, data;
-
- if (count < 3)
- return -EFAULT;
-
- tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
-
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
-
- tmp[tmp_len] = '\0';
-
- num = sscanf(tmp, "%X %X %X %X",
- &path, &addr, &bitmask, &data);
-
- if (num != 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "Format is <path> <addr> <mask> <data>\n");
- return count;
- }
-
- rtl_set_rfreg(hw, path, addr, bitmask, data);
-
- return count;
-}
-
-static struct rtl_debugfs_priv rtl_debug_priv_write_rfreg = {
- .cb_write = rtl_debugfs_set_write_rfreg,
-};
-
-static int rtl_debugfs_close(struct inode *inode, struct file *filp)
-{
- return 0;
-}
-
-static ssize_t rtl_debugfs_common_write(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *loff)
-{
- struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
-
- return debugfs_priv->cb_write(filp, buffer, count, loff);
-}
-
-static const struct file_operations file_ops_common_write = {
- .owner = THIS_MODULE,
- .write = rtl_debugfs_common_write,
- .open = simple_open,
- .release = rtl_debugfs_close,
-};
-
-static ssize_t rtl_debugfs_phydm_cmd(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *loff)
-{
- struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
-
- char tmp[64];
-
- if (!rtlpriv->dbg.msg_buf)
- return -ENOMEM;
-
- if (!rtlpriv->phydm.ops)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- tmp[count] = '\0';
-
- rtlpriv->phydm.ops->phydm_debug_cmd(rtlpriv, tmp, count,
- rtlpriv->dbg.msg_buf,
- 80 * 25);
- }
-
- return count;
-}
-
-static int rtl_debug_get_phydm_cmd(struct seq_file *m, void *v)
-{
- struct rtl_debugfs_priv *debugfs_priv = m->private;
- struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
-
- if (rtlpriv->dbg.msg_buf)
- seq_puts(m, rtlpriv->dbg.msg_buf);
-
- return 0;
-}
-
-static int rtl_debugfs_open_rw(struct inode *inode, struct file *filp)
-{
- int ret = 0;
-
- if (filp->f_mode & FMODE_READ)
- ret = single_open(filp, rtl_debug_get_common, inode->i_private);
- else
- filp->private_data = inode->i_private;
-
- return ret;
-}
-
-static int rtl_debugfs_close_rw(struct inode *inode, struct file *filp)
-{
- if (filp->f_mode == FMODE_READ)
- single_release(inode, filp);
-
- return 0;
-}
-
-static struct rtl_debugfs_priv rtl_debug_priv_phydm_cmd = {
- .cb_read = rtl_debug_get_phydm_cmd,
- .cb_write = rtl_debugfs_phydm_cmd,
- .cb_data = 0,
-};
-
-static const struct file_operations file_ops_common_rw = {
- .owner = THIS_MODULE,
- .open = rtl_debugfs_open_rw,
- .release = rtl_debugfs_close_rw,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = rtl_debugfs_common_write,
-};
-
-#define RTL_DEBUGFS_ADD_CORE(name, mode, fopname) \
- do { \
- rtl_debug_priv_ ##name.rtlpriv = rtlpriv; \
- debugfs_create_file(#name, mode, parent, \
- &rtl_debug_priv_ ##name, \
- &file_ops_ ##fopname); \
- } while (0)
-
-#define RTL_DEBUGFS_ADD(name) \
- RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0444, common)
-#define RTL_DEBUGFS_ADD_W(name) \
- RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0222, common_write)
-#define RTL_DEBUGFS_ADD_RW(name) \
- RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0666, common_rw)
-
-void rtl_debug_add_one(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct dentry *parent;
-
- rtlpriv->dbg.msg_buf = vzalloc(80 * 25);
-
- snprintf(rtlpriv->dbg.debugfs_name, 18, "%pMF", rtlefuse->dev_addr);
-
- rtlpriv->dbg.debugfs_dir =
- debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir);
- if (!rtlpriv->dbg.debugfs_dir) {
- pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name,
- rtlpriv->dbg.debugfs_name);
- return;
- }
-
- parent = rtlpriv->dbg.debugfs_dir;
-
- RTL_DEBUGFS_ADD(mac_0);
- RTL_DEBUGFS_ADD(mac_1);
- RTL_DEBUGFS_ADD(mac_2);
- RTL_DEBUGFS_ADD(mac_3);
- RTL_DEBUGFS_ADD(mac_4);
- RTL_DEBUGFS_ADD(mac_5);
- RTL_DEBUGFS_ADD(mac_6);
- RTL_DEBUGFS_ADD(mac_7);
- RTL_DEBUGFS_ADD(bb_8);
- RTL_DEBUGFS_ADD(bb_9);
- RTL_DEBUGFS_ADD(bb_a);
- RTL_DEBUGFS_ADD(bb_b);
- RTL_DEBUGFS_ADD(bb_c);
- RTL_DEBUGFS_ADD(bb_d);
- RTL_DEBUGFS_ADD(bb_e);
- RTL_DEBUGFS_ADD(bb_f);
- RTL_DEBUGFS_ADD(mac_10);
- RTL_DEBUGFS_ADD(mac_11);
- RTL_DEBUGFS_ADD(mac_12);
- RTL_DEBUGFS_ADD(mac_13);
- RTL_DEBUGFS_ADD(mac_14);
- RTL_DEBUGFS_ADD(mac_15);
- RTL_DEBUGFS_ADD(mac_16);
- RTL_DEBUGFS_ADD(mac_17);
- RTL_DEBUGFS_ADD(bb_18);
- RTL_DEBUGFS_ADD(bb_19);
- RTL_DEBUGFS_ADD(bb_1a);
- RTL_DEBUGFS_ADD(bb_1b);
- RTL_DEBUGFS_ADD(bb_1c);
- RTL_DEBUGFS_ADD(bb_1d);
- RTL_DEBUGFS_ADD(bb_1e);
- RTL_DEBUGFS_ADD(bb_1f);
- RTL_DEBUGFS_ADD(rf_a);
- RTL_DEBUGFS_ADD(rf_b);
-
- RTL_DEBUGFS_ADD(cam_1);
- RTL_DEBUGFS_ADD(cam_2);
- RTL_DEBUGFS_ADD(cam_3);
-
- RTL_DEBUGFS_ADD(btcoex);
-
- RTL_DEBUGFS_ADD_W(write_reg);
- RTL_DEBUGFS_ADD_W(write_h2c);
- RTL_DEBUGFS_ADD_W(write_rfreg);
-
- RTL_DEBUGFS_ADD_RW(phydm_cmd);
-}
-
-void rtl_debug_remove_one(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- debugfs_remove_recursive(rtlpriv->dbg.debugfs_dir);
- rtlpriv->dbg.debugfs_dir = NULL;
-
- vfree(rtlpriv->dbg.msg_buf);
-}
-
-void rtl_debugfs_add_topdir(void)
-{
- debugfs_topdir = debugfs_create_dir("rtlwifi", NULL);
-}
-
-void rtl_debugfs_remove_topdir(void)
-{
- debugfs_remove_recursive(debugfs_topdir);
-}
-
-#endif
diff --git a/drivers/staging/rtlwifi/debug.h b/drivers/staging/rtlwifi/debug.h
deleted file mode 100644
index 666d7bc80c48..000000000000
--- a/drivers/staging/rtlwifi/debug.h
+++ /dev/null
@@ -1,223 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *****************************************************************************/
-
-#ifndef __RTL_DEBUG_H__
-#define __RTL_DEBUG_H__
-
-/*--------------------------------------------------------------
- * Debug level
- *------------------------------------------------------------
- *
- *Fatal bug.
- *For example, Tx/Rx/IO locked up,
- *memory access violation,
- *resource allocation failed,
- *unexpected HW behavior, HW BUG
- *and so on.
- */
-/*#define DBG_EMERG 0 */
-
-/*Abnormal, rare, or unexpected cases.
- *For example, Packet/IO Ctl canceled,
- *device surprisingly removed and so on.
- */
-#define DBG_WARNING 2
-
-/*Normal case driver developer should
- *open, we can see link status like
- *assoc/AddBA/DHCP/adapter start and
- *so on basic and useful infromations.
- */
-#define DBG_DMESG 3
-
-/*Normal case with useful information
- *about current SW or HW state.
- *For example, Tx/Rx descriptor to fill,
- *Tx/Rx descriptor completed status,
- *SW protocol state change, dynamic
- *mechanism state change and so on.
- */
-#define DBG_LOUD 4
-
-/*Normal case with detail execution
- *flow or information.
- */
-#define DBG_TRACE 5
-
-/*--------------------------------------------------------------
- * Define the rt_trace components
- *--------------------------------------------------------------
- */
-#define COMP_ERR BIT(0)
-#define COMP_FW BIT(1)
-#define COMP_INIT BIT(2) /*For init/deinit */
-#define COMP_RECV BIT(3) /*For Rx. */
-#define COMP_SEND BIT(4) /*For Tx. */
-#define COMP_MLME BIT(5) /*For MLME. */
-#define COMP_SCAN BIT(6) /*For Scan. */
-#define COMP_INTR BIT(7) /*For interrupt Related. */
-#define COMP_LED BIT(8) /*For LED. */
-#define COMP_SEC BIT(9) /*For sec. */
-#define COMP_BEACON BIT(10) /*For beacon. */
-#define COMP_RATE BIT(11) /*For rate. */
-#define COMP_RXDESC BIT(12) /*For rx desc. */
-#define COMP_DIG BIT(13) /*For DIG */
-#define COMP_TXAGC BIT(14) /*For Tx power */
-#define COMP_HIPWR BIT(15) /*For High Power Mechanism */
-#define COMP_POWER BIT(16) /*For lps/ips/aspm. */
-#define COMP_POWER_TRACKING BIT(17) /*For TX POWER TRACKING */
-#define COMP_BB_POWERSAVING BIT(18)
-#define COMP_SWAS BIT(19) /*For SW Antenna Switch */
-#define COMP_RF BIT(20) /*For RF. */
-#define COMP_TURBO BIT(21) /*For EDCA TURBO. */
-#define COMP_RATR BIT(22)
-#define COMP_CMD BIT(23)
-#define COMP_EFUSE BIT(24)
-#define COMP_QOS BIT(25)
-#define COMP_MAC80211 BIT(26)
-#define COMP_REGD BIT(27)
-#define COMP_CHAN BIT(28)
-#define COMP_USB BIT(29)
-#define COMP_EASY_CONCURRENT COMP_USB /* reuse of this bit is OK */
-#define COMP_BT_COEXIST BIT(30)
-#define COMP_IQK BIT(31)
-#define COMP_TX_REPORT BIT_ULL(32)
-#define COMP_HALMAC BIT_ULL(34)
-#define COMP_PHYDM BIT_ULL(35)
-
-/*--------------------------------------------------------------
- * Define the rt_print components
- *--------------------------------------------------------------
- */
-/* Define EEPROM and EFUSE check module bit*/
-#define EEPROM_W BIT(0)
-#define EFUSE_PG BIT(1)
-#define EFUSE_READ_ALL BIT(2)
-
-/* Define init check for module bit*/
-#define INIT_EEPROM BIT(0)
-#define INIT_TXPOWER BIT(1)
-#define INIT_IQK BIT(2)
-#define INIT_RF BIT(3)
-
-/* Define PHY-BB/RF/MAC check module bit */
-#define PHY_BBR BIT(0)
-#define PHY_BBW BIT(1)
-#define PHY_RFR BIT(2)
-#define PHY_RFW BIT(3)
-#define PHY_MACR BIT(4)
-#define PHY_MACW BIT(5)
-#define PHY_ALLR BIT(6)
-#define PHY_ALLW BIT(7)
-#define PHY_TXPWR BIT(8)
-#define PHY_PWRDIFF BIT(9)
-
-/* Define Dynamic Mechanism check module bit --> FDM */
-#define WA_IOT BIT(0)
-#define DM_PWDB BIT(1)
-#define DM_MONITOR BIT(2)
-#define DM_DIG BIT(3)
-#define DM_EDCA_TURBO BIT(4)
-
-#define DM_PWDB BIT(1)
-
-enum dbgp_flag_e {
- FQOS = 0,
- FTX = 1,
- FRX = 2,
- FSEC = 3,
- FMGNT = 4,
- FMLME = 5,
- FRESOURCE = 6,
- FBEACON = 7,
- FISR = 8,
- FPHY = 9,
- FMP = 10,
- FEEPROM = 11,
- FPWR = 12,
- FDM = 13,
- FDBGCTRL = 14,
- FC2H = 15,
- FBT = 16,
- FINIT = 17,
- FIOCTL = 18,
- DBGP_TYPE_MAX
-};
-
-#ifdef CONFIG_RTLWIFI_DEBUG_ST
-
-struct rtl_priv;
-
-__printf(4, 5)
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *fmt, ...);
-
-__printf(4, 5)
-void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *fmt, ...);
-
-void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *titlestring,
- const void *hexdata, int hexdatalen);
-
-#define RT_TRACE(rtlpriv, comp, level, fmt, ...) \
- _rtl_dbg_trace(rtlpriv, comp, level, \
- fmt, ##__VA_ARGS__)
-
-#define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...) \
- _rtl_dbg_print(rtlpriv, dbgtype, dbgflag, fmt, ##__VA_ARGS__)
-
-#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \
- _hexdatalen) \
- _rtl_dbg_print_data(rtlpriv, _comp, _level, \
- _titlestring, _hexdata, _hexdatalen)
-
-#else
-
-struct rtl_priv;
-
-__printf(4, 5)
-static inline void RT_TRACE(struct rtl_priv *rtlpriv,
- u64 comp, int level,
- const char *fmt, ...)
-{
-}
-
-__printf(4, 5)
-static inline void RTPRINT(struct rtl_priv *rtlpriv,
- int dbgtype, int dbgflag,
- const char *fmt, ...)
-{
-}
-
-static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv,
- u64 comp, int level,
- const char *titlestring,
- const void *hexdata, size_t hexdatalen)
-{
-}
-
-#endif
-
-#ifdef CONFIG_RTLWIFI_DEBUG_ST
-void rtl_debug_add_one(struct ieee80211_hw *hw);
-void rtl_debug_remove_one(struct ieee80211_hw *hw);
-void rtl_debugfs_add_topdir(void);
-void rtl_debugfs_remove_topdir(void);
-#else
-#define rtl_debug_add_one(hw)
-#define rtl_debug_remove_one(hw)
-#define rtl_debugfs_add_topdir()
-#define rtl_debugfs_remove_topdir()
-#endif
-#endif
diff --git a/drivers/staging/rtlwifi/efuse.c b/drivers/staging/rtlwifi/efuse.c
deleted file mode 100644
index a7c9e186f2b2..000000000000
--- a/drivers/staging/rtlwifi/efuse.c
+++ /dev/null
@@ -1,1329 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "wifi.h"
-#include "efuse.h"
-#include "pci.h"
-#include <linux/export.h>
-
-static const u8 MAX_PGPKT_SIZE = 9;
-static const u8 PGPKT_DATA_SIZE = 8;
-static const int EFUSE_MAX_SIZE = 512;
-
-#define START_ADDRESS 0x1000
-#define REG_MCUFWDL 0x0080
-
-static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
- {0, 0, 0, 2},
- {0, 1, 0, 2},
- {0, 2, 0, 2},
- {1, 0, 0, 1},
- {1, 0, 1, 1},
- {1, 1, 0, 1},
- {1, 1, 1, 3},
- {1, 3, 0, 17},
- {3, 3, 1, 48},
- {10, 0, 0, 6},
- {10, 3, 0, 1},
- {10, 3, 1, 1},
- {11, 0, 0, 28}
-};
-
-static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset,
- u8 *value);
-static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset,
- u16 *value);
-static void efuse_shadow_read_4byte(struct ieee80211_hw *hw, u16 offset,
- u32 *value);
-static void efuse_shadow_write_1byte(struct ieee80211_hw *hw, u16 offset,
- u8 value);
-static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, u16 offset,
- u16 value);
-static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, u16 offset,
- u32 value);
-static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr,
- u8 data);
-static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse);
-static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset,
- u8 *data);
-static int efuse_pg_packet_write(struct ieee80211_hw *hw, u8 offset,
- u8 word_en, u8 *data);
-static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata,
- u8 *targetdata);
-static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
- u16 efuse_addr, u8 word_en, u8 *data);
-static u16 efuse_get_current_size(struct ieee80211_hw *hw);
-static u8 efuse_calculate_word_cnts(u8 word_en);
-
-void efuse_initialize(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 bytetemp;
- u8 temp;
-
- bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1);
- temp = bytetemp | 0x20;
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1, temp);
-
- bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1);
- temp = bytetemp & 0xFE;
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1, temp);
-
- bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
- temp = bytetemp | 0x80;
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, temp);
-
- rtl_write_byte(rtlpriv, 0x2F8, 0x3);
-
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
-}
-
-u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 data;
- u8 bytetemp;
- u8 temp;
- u32 k = 0;
- const u32 efuse_len =
- rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
-
- if (address < efuse_len) {
- temp = address & 0xFF;
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
- temp);
- bytetemp = rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
- temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
- temp);
-
- bytetemp = rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
- temp = bytetemp & 0x7F;
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
- temp);
-
- bytetemp = rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
- while (!(bytetemp & 0x80)) {
- bytetemp =
- rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
- k++;
- if (k == 1000) {
- k = 0;
- break;
- }
- }
- data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
- return data;
- }
- return 0xFF;
-}
-
-void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 bytetemp;
- u8 temp;
- u32 k = 0;
- const u32 efuse_len =
- rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
-
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
- address, value);
-
- if (address < efuse_len) {
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
-
- temp = address & 0xFF;
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
- temp);
- bytetemp = rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
-
- temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
- rtl_write_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 2, temp);
-
- bytetemp = rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
- temp = bytetemp | 0x80;
- rtl_write_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 3, temp);
-
- bytetemp = rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
-
- while (bytetemp & 0x80) {
- bytetemp =
- rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
- k++;
- if (k == 100) {
- k = 0;
- break;
- }
- }
- }
-}
-
-void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 value32;
- u8 readbyte;
- u16 retry;
-
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
- (_offset & 0xff));
- readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
- ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
-
- readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
- (readbyte & 0x7f));
-
- retry = 0;
- value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
- while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
- value32 = rtl_read_dword(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL]);
- retry++;
- }
-
- udelay(50);
- value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
-
- *pbuf = (u8)(value32 & 0xff);
-}
-
-void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 *efuse_tbl;
- u8 rtemp8[1];
- u16 efuse_addr = 0;
- u8 offset, wren;
- u8 u1temp = 0;
- u16 i;
- u16 j;
- const u16 efuse_max_section =
- rtlpriv->cfg->maps[EFUSE_MAX_SECTION_MAP];
- const u32 efuse_len =
- rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
- u16 **efuse_word;
- u16 efuse_utilized = 0;
- u8 efuse_usage;
-
- if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "%s(): Invalid offset(%#x) with read bytes(%#x)!!\n",
- __func__, _offset, _size_byte);
- return;
- }
-
- /* allocate memory for efuse_tbl and efuse_word */
- efuse_tbl = kzalloc(rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE],
- GFP_ATOMIC);
- if (!efuse_tbl)
- return;
- efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC);
- if (!efuse_word)
- goto out;
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- efuse_word[i] = kcalloc(efuse_max_section, sizeof(u16),
- GFP_ATOMIC);
- if (!efuse_word[i])
- goto done;
- }
-
- for (i = 0; i < efuse_max_section; i++)
- for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
- efuse_word[j][i] = 0xFFFF;
-
- read_efuse_byte(hw, efuse_addr, rtemp8);
- if (*rtemp8 != 0xFF) {
- efuse_utilized++;
- RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
- "Addr=%d\n", efuse_addr);
- efuse_addr++;
- }
-
- while ((*rtemp8 != 0xFF) && (efuse_addr < efuse_len)) {
- /* Check PG header for section num. */
- if ((*rtemp8 & 0x1F) == 0x0F) {/* extended header */
- u1temp = ((*rtemp8 & 0xE0) >> 5);
- read_efuse_byte(hw, efuse_addr, rtemp8);
-
- if ((*rtemp8 & 0x0F) == 0x0F) {
- efuse_addr++;
- read_efuse_byte(hw, efuse_addr, rtemp8);
-
- if (*rtemp8 != 0xFF &&
- (efuse_addr < efuse_len)) {
- efuse_addr++;
- }
- continue;
- } else {
- offset = ((*rtemp8 & 0xF0) >> 1) | u1temp;
- wren = (*rtemp8 & 0x0F);
- efuse_addr++;
- }
- } else {
- offset = ((*rtemp8 >> 4) & 0x0f);
- wren = (*rtemp8 & 0x0f);
- }
-
- if (offset < efuse_max_section) {
- RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
- "offset-%d Worden=%x\n", offset, wren);
-
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- if (!(wren & 0x01)) {
- RTPRINT(rtlpriv, FEEPROM,
- EFUSE_READ_ALL,
- "Addr=%d\n", efuse_addr);
-
- read_efuse_byte(hw, efuse_addr, rtemp8);
- efuse_addr++;
- efuse_utilized++;
- efuse_word[i][offset] =
- (*rtemp8 & 0xff);
-
- if (efuse_addr >= efuse_len)
- break;
-
- RTPRINT(rtlpriv, FEEPROM,
- EFUSE_READ_ALL,
- "Addr=%d\n", efuse_addr);
-
- read_efuse_byte(hw, efuse_addr, rtemp8);
- efuse_addr++;
- efuse_utilized++;
- efuse_word[i][offset] |=
- (((u16)*rtemp8 << 8) & 0xff00);
-
- if (efuse_addr >= efuse_len)
- break;
- }
-
- wren >>= 1;
- }
- }
-
- RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
- "Addr=%d\n", efuse_addr);
- read_efuse_byte(hw, efuse_addr, rtemp8);
- if (*rtemp8 != 0xFF && (efuse_addr < efuse_len)) {
- efuse_utilized++;
- efuse_addr++;
- }
- }
-
- for (i = 0; i < efuse_max_section; i++) {
- for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
- efuse_tbl[(i * 8) + (j * 2)] =
- (efuse_word[j][i] & 0xff);
- efuse_tbl[(i * 8) + ((j * 2) + 1)] =
- ((efuse_word[j][i] >> 8) & 0xff);
- }
- }
-
- for (i = 0; i < _size_byte; i++)
- pbuf[i] = efuse_tbl[_offset + i];
-
- rtlefuse->efuse_usedbytes = efuse_utilized;
- efuse_usage = (u8)((efuse_utilized * 100) / efuse_len);
- rtlefuse->efuse_usedpercentage = efuse_usage;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
- (u8 *)&efuse_utilized);
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
- &efuse_usage);
-done:
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
- kfree(efuse_word[i]);
- kfree(efuse_word);
-out:
- kfree(efuse_tbl);
-}
-
-bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 section_idx, i, base;
- u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used;
- bool wordchanged, result = true;
-
- for (section_idx = 0; section_idx < 16; section_idx++) {
- base = section_idx * 8;
- wordchanged = false;
-
- for (i = 0; i < 8; i = i + 2) {
- if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] !=
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]) ||
- (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i + 1] !=
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i +
- 1])) {
- words_need++;
- wordchanged = true;
- }
- }
-
- if (wordchanged)
- hdr_num++;
- }
-
- totalbytes = hdr_num + words_need * 2;
- efuse_used = rtlefuse->efuse_usedbytes;
-
- if ((totalbytes + efuse_used) >=
- (EFUSE_MAX_SIZE - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))
- result = false;
-
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "%s(): totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
- __func__, totalbytes, hdr_num, words_need, efuse_used);
-
- return result;
-}
-
-void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
- u16 offset, u32 *value)
-{
- if (type == 1)
- efuse_shadow_read_1byte(hw, offset, (u8 *)value);
- else if (type == 2)
- efuse_shadow_read_2byte(hw, offset, (u16 *)value);
- else if (type == 4)
- efuse_shadow_read_4byte(hw, offset, value);
-}
-
-void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
- u32 value)
-{
- if (type == 1)
- efuse_shadow_write_1byte(hw, offset, (u8)value);
- else if (type == 2)
- efuse_shadow_write_2byte(hw, offset, (u16)value);
- else if (type == 4)
- efuse_shadow_write_4byte(hw, offset, value);
-}
-
-bool efuse_shadow_update(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u16 i, offset, base;
- u8 word_en = 0x0F;
- u8 first_pg = false;
-
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
-
- if (!efuse_shadow_update_chk(hw)) {
- efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
- memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
- &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
-
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse out of capacity!!\n");
- return false;
- }
- efuse_power_switch(hw, true, true);
-
- for (offset = 0; offset < 16; offset++) {
- word_en = 0x0F;
- base = offset * 8;
-
- for (i = 0; i < 8; i++) {
- if (first_pg) {
- word_en &= ~(BIT(i / 2));
-
- rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
- } else {
- if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] !=
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]) {
- word_en &= ~(BIT(i / 2));
-
- rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
- }
- }
- }
- if (word_en != 0x0F) {
- u8 tmpdata[8];
-
- memcpy(tmpdata,
- &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base],
- 8);
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
- "U-efuse\n", tmpdata, 8);
-
- if (!efuse_pg_packet_write(hw, (u8)offset, word_en,
- tmpdata)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "PG section(%#x) fail!!\n", offset);
- break;
- }
- }
- }
-
- efuse_power_switch(hw, true, false);
- efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
-
- memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
- &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
-
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
- return true;
-}
-
-void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
- if (rtlefuse->autoload_failflag)
- memset((&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]),
- 0xFF, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- else
- efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
-
- memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
- &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
-}
-
-void efuse_force_write_vendor_id(struct ieee80211_hw *hw)
-{
- u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF };
-
- efuse_power_switch(hw, true, true);
-
- efuse_pg_packet_write(hw, 1, 0xD, tmpdata);
-
- efuse_power_switch(hw, true, false);
-}
-
-void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx)
-{
-}
-
-static void efuse_shadow_read_1byte(struct ieee80211_hw *hw,
- u16 offset, u8 *value)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
-}
-
-static void efuse_shadow_read_2byte(struct ieee80211_hw *hw,
- u16 offset, u16 *value)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
- *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
- *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
-}
-
-static void efuse_shadow_read_4byte(struct ieee80211_hw *hw,
- u16 offset, u32 *value)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
- *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
- *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
- *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] << 16;
- *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] << 24;
-}
-
-static void efuse_shadow_write_1byte(struct ieee80211_hw *hw,
- u16 offset, u8 value)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value;
-}
-
-static void efuse_shadow_write_2byte(struct ieee80211_hw *hw,
- u16 offset, u16 value)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value & 0x00FF;
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] = value >> 8;
-}
-
-static void efuse_shadow_write_4byte(struct ieee80211_hw *hw,
- u16 offset, u32 value)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] =
- (u8)(value & 0x000000FF);
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] =
- (u8)((value >> 8) & 0x0000FF);
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] =
- (u8)((value >> 16) & 0x00FF);
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] =
- (u8)((value >> 24) & 0xFF);
-}
-
-int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmpidx = 0;
- int result;
-
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
- (u8)(addr & 0xff));
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
- ((u8)((addr >> 8) & 0x03)) |
- (rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 2) &
- 0xFC));
-
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
-
- while (!(0x80 & rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 3)) &&
- (tmpidx < 100)) {
- tmpidx++;
- }
-
- if (tmpidx < 100) {
- *data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
- result = true;
- } else {
- *data = 0xff;
- result = false;
- }
- return result;
-}
-
-static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmpidx = 0;
-
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "Addr = %x Data=%x\n", addr, data);
-
- rtl_write_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8)(addr & 0xff));
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
- (rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_CTRL] +
- 2) & 0xFC) | (u8)((addr >> 8) & 0x03));
-
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], data);
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0xF2);
-
- while ((0x80 &
- rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3)) &&
- (tmpidx < 100)) {
- tmpidx++;
- }
-
- if (tmpidx < 100)
- return true;
- return false;
-}
-
-static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- efuse_power_switch(hw, false, true);
- read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse);
- efuse_power_switch(hw, false, false);
-}
-
-static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
- u8 efuse_data, u8 offset, u8 *tmpdata,
- u8 *readstate)
-{
- bool dataempty = true;
- u8 hoffset;
- u8 tmpidx;
- u8 hworden;
- u8 word_cnts;
-
- hoffset = (efuse_data >> 4) & 0x0F;
- hworden = efuse_data & 0x0F;
- word_cnts = efuse_calculate_word_cnts(hworden);
-
- if (hoffset == offset) {
- for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
- if (efuse_one_byte_read(hw, *efuse_addr + 1 + tmpidx,
- &efuse_data)) {
- tmpdata[tmpidx] = efuse_data;
- if (efuse_data != 0xff)
- dataempty = false;
- }
- }
-
- if (!dataempty) {
- *readstate = PG_STATE_DATA;
- } else {
- *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
- *readstate = PG_STATE_HEADER;
- }
-
- } else {
- *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
- *readstate = PG_STATE_HEADER;
- }
-}
-
-static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
-{
- u8 readstate = PG_STATE_HEADER;
-
- bool continual = true;
-
- u8 efuse_data, word_cnts = 0;
- u16 efuse_addr = 0;
- u8 tmpdata[8];
-
- if (!data)
- return false;
- if (offset > 15)
- return false;
-
- memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
- memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
-
- while (continual && (efuse_addr < EFUSE_MAX_SIZE)) {
- if (readstate & PG_STATE_HEADER) {
- if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
- (efuse_data != 0xFF))
- efuse_read_data_case1(hw, &efuse_addr,
- efuse_data, offset,
- tmpdata, &readstate);
- else
- continual = false;
- } else if (readstate & PG_STATE_DATA) {
- efuse_word_enable_data_read(0, tmpdata, data);
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- readstate = PG_STATE_HEADER;
- }
- }
-
- if ((data[0] == 0xff) && (data[1] == 0xff) &&
- (data[2] == 0xff) && (data[3] == 0xff) &&
- (data[4] == 0xff) && (data[5] == 0xff) &&
- (data[6] == 0xff) && (data[7] == 0xff))
- return false;
- return true;
-}
-
-static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
- u8 efuse_data, u8 offset,
- int *continual, u8 *write_state,
- struct pgpkt_struct *target_pkt,
- int *repeat_times, int *result, u8 word_en)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct pgpkt_struct tmp_pkt;
- int dataempty = true;
- u8 originaldata[8 * sizeof(u8)];
- u8 badworden = 0x0F;
- u8 match_word_en, tmp_word_en;
- u8 tmpindex;
- u8 tmp_header = efuse_data;
- u8 tmp_word_cnts;
-
- tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
- tmp_pkt.word_en = tmp_header & 0x0F;
- tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
-
- if (tmp_pkt.offset != target_pkt->offset) {
- *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
- *write_state = PG_STATE_HEADER;
- } else {
- for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
- if (efuse_one_byte_read(hw,
- (*efuse_addr + 1 + tmpindex),
- &efuse_data) &&
- (efuse_data != 0xFF))
- dataempty = false;
- }
-
- if (!dataempty) {
- *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
- *write_state = PG_STATE_HEADER;
- } else {
- match_word_en = 0x0F;
- if (!((target_pkt->word_en & BIT(0)) |
- (tmp_pkt.word_en & BIT(0))))
- match_word_en &= (~BIT(0));
-
- if (!((target_pkt->word_en & BIT(1)) |
- (tmp_pkt.word_en & BIT(1))))
- match_word_en &= (~BIT(1));
-
- if (!((target_pkt->word_en & BIT(2)) |
- (tmp_pkt.word_en & BIT(2))))
- match_word_en &= (~BIT(2));
-
- if (!((target_pkt->word_en & BIT(3)) |
- (tmp_pkt.word_en & BIT(3))))
- match_word_en &= (~BIT(3));
-
- if ((match_word_en & 0x0F) != 0x0F) {
- badworden =
- enable_efuse_data_write(hw,
- *efuse_addr + 1,
- tmp_pkt.word_en,
- target_pkt->data);
-
- if (0x0F != (badworden & 0x0F)) {
- u8 reorg_offset = offset;
- u8 reorg_worden = badworden;
-
- efuse_pg_packet_write(hw, reorg_offset,
- reorg_worden,
- originaldata);
- }
-
- tmp_word_en = 0x0F;
- if ((target_pkt->word_en & BIT(0)) ^
- (match_word_en & BIT(0)))
- tmp_word_en &= (~BIT(0));
-
- if ((target_pkt->word_en & BIT(1)) ^
- (match_word_en & BIT(1)))
- tmp_word_en &= (~BIT(1));
-
- if ((target_pkt->word_en & BIT(2)) ^
- (match_word_en & BIT(2)))
- tmp_word_en &= (~BIT(2));
-
- if ((target_pkt->word_en & BIT(3)) ^
- (match_word_en & BIT(3)))
- tmp_word_en &= (~BIT(3));
-
- if ((tmp_word_en & 0x0F) != 0x0F) {
- *efuse_addr =
- efuse_get_current_size(hw);
- target_pkt->offset = offset;
- target_pkt->word_en = tmp_word_en;
- } else {
- *continual = false;
- }
- *write_state = PG_STATE_HEADER;
- *repeat_times += 1;
- if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
- *continual = false;
- *result = false;
- }
- } else {
- *efuse_addr += (2 * tmp_word_cnts) + 1;
- target_pkt->offset = offset;
- target_pkt->word_en = word_en;
- *write_state = PG_STATE_HEADER;
- }
- }
- }
- RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse PG_STATE_HEADER-1\n");
-}
-
-static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
- int *continual, u8 *write_state,
- struct pgpkt_struct target_pkt,
- int *repeat_times, int *result)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct pgpkt_struct tmp_pkt;
- u8 pg_header;
- u8 tmp_header;
- u8 originaldata[8 * sizeof(u8)];
- u8 tmp_word_cnts;
- u8 badworden = 0x0F;
-
- pg_header = ((target_pkt.offset << 4) & 0xf0) | target_pkt.word_en;
- efuse_one_byte_write(hw, *efuse_addr, pg_header);
- efuse_one_byte_read(hw, *efuse_addr, &tmp_header);
-
- if (tmp_header == pg_header) {
- *write_state = PG_STATE_DATA;
- } else if (tmp_header == 0xFF) {
- *write_state = PG_STATE_HEADER;
- *repeat_times += 1;
- if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
- *continual = false;
- *result = false;
- }
- } else {
- tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
- tmp_pkt.word_en = tmp_header & 0x0F;
-
- tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
-
- memset(originaldata, 0xff, 8 * sizeof(u8));
-
- if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
- badworden = enable_efuse_data_write(hw,
- *efuse_addr + 1,
- tmp_pkt.word_en,
- originaldata);
-
- if (0x0F != (badworden & 0x0F)) {
- u8 reorg_offset = tmp_pkt.offset;
- u8 reorg_worden = badworden;
-
- efuse_pg_packet_write(hw, reorg_offset,
- reorg_worden,
- originaldata);
- *efuse_addr = efuse_get_current_size(hw);
- } else {
- *efuse_addr = *efuse_addr +
- (tmp_word_cnts * 2) + 1;
- }
- } else {
- *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
- }
-
- *write_state = PG_STATE_HEADER;
- *repeat_times += 1;
- if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
- *continual = false;
- *result = false;
- }
-
- RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- "efuse PG_STATE_HEADER-2\n");
- }
-}
-
-static int efuse_pg_packet_write(struct ieee80211_hw *hw,
- u8 offset, u8 word_en, u8 *data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct pgpkt_struct target_pkt;
- u8 write_state = PG_STATE_HEADER;
- int continual = true, result = true;
- u16 efuse_addr = 0;
- u8 efuse_data;
- u8 target_word_cnts = 0;
- u8 badworden = 0x0F;
- static int repeat_times;
-
- if (efuse_get_current_size(hw) >= (EFUSE_MAX_SIZE -
- rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
- RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- "%s error\n", __func__);
- return false;
- }
-
- target_pkt.offset = offset;
- target_pkt.word_en = word_en;
-
- memset(target_pkt.data, 0xFF, 8 * sizeof(u8));
-
- efuse_word_enable_data_read(word_en, data, target_pkt.data);
- target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
-
- RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse Power ON\n");
-
- while (continual && (efuse_addr < (EFUSE_MAX_SIZE -
- rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
- if (write_state == PG_STATE_HEADER) {
- badworden = 0x0F;
- RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- "efuse PG_STATE_HEADER\n");
-
- if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
- (efuse_data != 0xFF))
- efuse_write_data_case1(hw, &efuse_addr,
- efuse_data, offset,
- &continual,
- &write_state,
- &target_pkt,
- &repeat_times, &result,
- word_en);
- else
- efuse_write_data_case2(hw, &efuse_addr,
- &continual,
- &write_state,
- target_pkt,
- &repeat_times,
- &result);
-
- } else if (write_state == PG_STATE_DATA) {
- RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- "efuse PG_STATE_DATA\n");
- badworden = 0x0f;
- badworden =
- enable_efuse_data_write(hw, efuse_addr + 1,
- target_pkt.word_en,
- target_pkt.data);
-
- if ((badworden & 0x0F) == 0x0F) {
- continual = false;
- } else {
- efuse_addr =
- efuse_addr + (2 * target_word_cnts) + 1;
-
- target_pkt.offset = offset;
- target_pkt.word_en = badworden;
- target_word_cnts =
- efuse_calculate_word_cnts(target_pkt.word_en);
- write_state = PG_STATE_HEADER;
- repeat_times++;
- if (repeat_times > EFUSE_REPEAT_THRESHOLD_) {
- continual = false;
- result = false;
- }
- RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- "efuse PG_STATE_HEADER-3\n");
- }
- }
- }
-
- if (efuse_addr >= (EFUSE_MAX_SIZE -
- rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse_addr(%#x) Out of size!!\n", efuse_addr);
- }
-
- return true;
-}
-
-static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata,
- u8 *targetdata)
-{
- if (!(word_en & BIT(0))) {
- targetdata[0] = sourdata[0];
- targetdata[1] = sourdata[1];
- }
-
- if (!(word_en & BIT(1))) {
- targetdata[2] = sourdata[2];
- targetdata[3] = sourdata[3];
- }
-
- if (!(word_en & BIT(2))) {
- targetdata[4] = sourdata[4];
- targetdata[5] = sourdata[5];
- }
-
- if (!(word_en & BIT(3))) {
- targetdata[6] = sourdata[6];
- targetdata[7] = sourdata[7];
- }
-}
-
-static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
- u16 efuse_addr, u8 word_en, u8 *data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u16 tmpaddr;
- u16 start_addr = efuse_addr;
- u8 badworden = 0x0F;
- u8 tmpdata[8];
-
- memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
-
- if (!(word_en & BIT(0))) {
- tmpaddr = start_addr;
- efuse_one_byte_write(hw, start_addr++, data[0]);
- efuse_one_byte_write(hw, start_addr++, data[1]);
-
- efuse_one_byte_read(hw, tmpaddr, &tmpdata[0]);
- efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[1]);
- if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
- badworden &= (~BIT(0));
- }
-
- if (!(word_en & BIT(1))) {
- tmpaddr = start_addr;
- efuse_one_byte_write(hw, start_addr++, data[2]);
- efuse_one_byte_write(hw, start_addr++, data[3]);
-
- efuse_one_byte_read(hw, tmpaddr, &tmpdata[2]);
- efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[3]);
- if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
- badworden &= (~BIT(1));
- }
-
- if (!(word_en & BIT(2))) {
- tmpaddr = start_addr;
- efuse_one_byte_write(hw, start_addr++, data[4]);
- efuse_one_byte_write(hw, start_addr++, data[5]);
-
- efuse_one_byte_read(hw, tmpaddr, &tmpdata[4]);
- efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[5]);
- if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
- badworden &= (~BIT(2));
- }
-
- if (!(word_en & BIT(3))) {
- tmpaddr = start_addr;
- efuse_one_byte_write(hw, start_addr++, data[6]);
- efuse_one_byte_write(hw, start_addr++, data[7]);
-
- efuse_one_byte_read(hw, tmpaddr, &tmpdata[6]);
- efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[7]);
- if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
- badworden &= (~BIT(3));
- }
-
- return badworden;
-}
-
-void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 tempval;
- u16 tmpv16;
-
- if (pwrstate && (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)) {
- if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192CE &&
- rtlhal->hw_type != HARDWARE_TYPE_RTL8192DE) {
- rtl_write_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_ACCESS], 0x69);
- } else {
- tmpv16 =
- rtl_read_word(rtlpriv,
- rtlpriv->cfg->maps[SYS_ISO_CTRL]);
- if (!(tmpv16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
- tmpv16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V];
- rtl_write_word(rtlpriv,
- rtlpriv->cfg->maps[SYS_ISO_CTRL],
- tmpv16);
- }
- }
- tmpv16 = rtl_read_word(rtlpriv,
- rtlpriv->cfg->maps[SYS_FUNC_EN]);
- if (!(tmpv16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) {
- tmpv16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR];
- rtl_write_word(rtlpriv,
- rtlpriv->cfg->maps[SYS_FUNC_EN], tmpv16);
- }
-
- tmpv16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]);
- if ((!(tmpv16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) ||
- (!(tmpv16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) {
- tmpv16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] |
- rtlpriv->cfg->maps[EFUSE_ANA8M]);
- rtl_write_word(rtlpriv,
- rtlpriv->cfg->maps[SYS_CLK], tmpv16);
- }
- }
-
- if (pwrstate) {
- if (write) {
- tempval = rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_TEST] +
- 3);
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- tempval &= ~(BIT(3) | BIT(4) | BIT(5) | BIT(6));
- tempval |= (VOLTAGE_V25 << 3);
- } else if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) {
- tempval &= 0x0F;
- tempval |= (VOLTAGE_V25 << 4);
- }
-
- rtl_write_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_TEST] + 3,
- (tempval | 0x80));
- }
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
- 0x03);
- }
- } else {
- if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192CE &&
- rtlhal->hw_type != HARDWARE_TYPE_RTL8192DE)
- rtl_write_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_ACCESS], 0);
-
- if (write) {
- tempval = rtl_read_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_TEST] +
- 3);
- rtl_write_byte(rtlpriv,
- rtlpriv->cfg->maps[EFUSE_TEST] + 3,
- (tempval & 0x7F));
- }
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
- 0x02);
- }
- }
-}
-
-static u16 efuse_get_current_size(struct ieee80211_hw *hw)
-{
- int continual = true;
- u16 efuse_addr = 0;
- u8 hworden;
- u8 efuse_data, word_cnts;
-
- while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
- (efuse_addr < EFUSE_MAX_SIZE)) {
- if (efuse_data != 0xFF) {
- hworden = efuse_data & 0x0F;
- word_cnts = efuse_calculate_word_cnts(hworden);
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- } else {
- continual = false;
- }
- }
-
- return efuse_addr;
-}
-
-static u8 efuse_calculate_word_cnts(u8 word_en)
-{
- u8 word_cnts = 0;
-
- if (!(word_en & BIT(0)))
- word_cnts++;
- if (!(word_en & BIT(1)))
- word_cnts++;
- if (!(word_en & BIT(2)))
- word_cnts++;
- if (!(word_en & BIT(3)))
- word_cnts++;
- return word_cnts;
-}
-
-int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
- int max_size, u8 *hwinfo, int *params)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
- struct device *dev = &rtlpcipriv->dev.pdev->dev;
- u16 eeprom_id;
- u16 i, usvalue;
-
- switch (rtlefuse->epromtype) {
- case EEPROM_BOOT_EFUSE:
- rtl_efuse_shadow_map_update(hw);
- break;
-
- case EEPROM_93C46:
- pr_err("RTL8XXX did not boot from eeprom, check it !!\n");
- return 1;
-
- default:
- dev_warn(dev, "no efuse data\n");
- return 1;
- }
-
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], max_size);
-
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
- hwinfo, max_size);
-
- eeprom_id = *((u16 *)&hwinfo[0]);
- if (eeprom_id != params[0]) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
-
- if (rtlefuse->autoload_failflag)
- return 1;
-
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[params[1]];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[params[2]];
- rtlefuse->eeprom_svid = *(u16 *)&hwinfo[params[3]];
- rtlefuse->eeprom_smid = *(u16 *)&hwinfo[params[4]];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
-
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[params[5] + i];
- *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
- }
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
-
- rtlefuse->eeprom_channelplan = *&hwinfo[params[6]];
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[params[7]];
- rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *&hwinfo[params[8]];
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
-
- /* set channel plan to world wide 13 */
- rtlefuse->channel_plan = params[9];
-
- return 0;
-}
-
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *pu4byteptr = (u8 *)buffer;
- u32 i;
-
- for (i = 0; i < size; i++)
- rtl_write_byte(rtlpriv, (START_ADDRESS + i), *(pu4byteptr + i));
-}
-
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
- u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value8;
- u8 u8page = (u8)(page & 0x07);
-
- value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
- rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
- rtl_fw_block_write(hw, buffer, size);
-}
-
-void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
- u32 fwlen = *pfwlen;
- u8 remain = (u8)(fwlen % 4);
-
- remain = (remain == 0) ? 0 : (4 - remain);
-
- while (remain > 0) {
- pfwbuf[fwlen] = 0;
- fwlen++;
- remain--;
- }
-
- *pfwlen = fwlen;
-}
diff --git a/drivers/staging/rtlwifi/efuse.h b/drivers/staging/rtlwifi/efuse.h
deleted file mode 100644
index 5335d3ee6da7..000000000000
--- a/drivers/staging/rtlwifi/efuse.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_EFUSE_H_
-#define __RTL_EFUSE_H_
-
-#define EFUSE_IC_ID_OFFSET 506
-
-#define EFUSE_MAX_WORD_UNIT 4
-
-#define EFUSE_INIT_MAP 0
-#define EFUSE_MODIFY_MAP 1
-
-#define PG_STATE_HEADER 0x01
-#define PG_STATE_WORD_0 0x02
-#define PG_STATE_WORD_1 0x04
-#define PG_STATE_WORD_2 0x08
-#define PG_STATE_WORD_3 0x10
-#define PG_STATE_DATA 0x20
-
-#define EFUSE_REPEAT_THRESHOLD_ 3
-#define EFUSE_ERROE_HANDLE 1
-
-struct efuse_map {
- u8 offset;
- u8 word_start;
- u8 byte_start;
- u8 byte_cnts;
-};
-
-struct pgpkt_struct {
- u8 offset;
- u8 word_en;
- u8 data[8];
-};
-
-enum efuse_data_item {
- EFUSE_CHIP_ID = 0,
- EFUSE_LDO_SETTING,
- EFUSE_CLK_SETTING,
- EFUSE_SDIO_SETTING,
- EFUSE_CCCR,
- EFUSE_SDIO_MODE,
- EFUSE_OCR,
- EFUSE_F0CIS,
- EFUSE_F1CIS,
- EFUSE_MAC_ADDR,
- EFUSE_EEPROM_VER,
- EFUSE_CHAN_PLAN,
- EFUSE_TXPW_TAB
-};
-
-enum {
- VOLTAGE_V25 = 0x03,
- LDOE25_SHIFT = 28,
-};
-
-struct efuse_priv {
- u8 id[2];
- u8 ldo_setting[2];
- u8 clk_setting[2];
- u8 cccr;
- u8 sdio_mode;
- u8 ocr[3];
- u8 cis0[17];
- u8 cis1[48];
- u8 mac_addr[6];
- u8 eeprom_verno;
- u8 channel_plan;
- u8 tx_power_b[14];
- u8 tx_power_g[14];
-};
-
-void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
-void efuse_initialize(struct ieee80211_hw *hw);
-u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
-int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data);
-void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
-void read_efuse(struct ieee80211_hw *hw, u16 _offset,
- u16 _size_byte, u8 *pbuf);
-void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
- u16 offset, u32 *value);
-void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
- u16 offset, u32 value);
-bool efuse_shadow_update(struct ieee80211_hw *hw);
-bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
-void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
-void efuse_force_write_vendor_id(struct ieee80211_hw *hw);
-void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
-void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
-int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
- int max_size, u8 *hwinfo, int *params);
-void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
- u32 size);
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size);
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_2_platform.h b/drivers/staging/rtlwifi/halmac/halmac_2_platform.h
deleted file mode 100644
index 262304deb7fc..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_2_platform.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_2_PLATFORM_H_
-#define _HALMAC_2_PLATFORM_H_
-
-#include "../wifi.h"
-#include <asm/byteorder.h>
-
-#define HALMAC_PLATFORM_LITTLE_ENDIAN 1
-#define HALMAC_PLATFORM_BIG_ENDIAN 0
-
-/* Note : Named HALMAC_PLATFORM_LITTLE_ENDIAN / HALMAC_PLATFORM_BIG_ENDIAN
- * is not mandatory. But Little endian must be '1'. Big endian must be '0'
- */
-#if defined(__LITTLE_ENDIAN)
-#define HALMAC_SYSTEM_ENDIAN HALMAC_PLATFORM_LITTLE_ENDIAN
-#elif defined(__BIG_ENDIAN)
-#define HALMAC_SYSTEM_ENDIAN HALMAC_PLATFORM_BIG_ENDIAN
-#else
-#error
-#endif
-
-/* define the Platform SDIO Bus CLK */
-#define PLATFORM_SD_CLK 50000000 /*50MHz*/
-
-/* define the Rx FIFO expanding mode packet size unit for 8821C and 8822B */
-/* Should be 8 Byte alignment */
-#define HALMAC_RX_FIFO_EXPANDING_MODE_PKT_SIZE 16 /*Bytes*/
-
-#endif /* _HALMAC_2_PLATFORM_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h
deleted file mode 100644
index 9013baefcede..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_8822B_CFG_H_
-#define _HALMAC_8822B_CFG_H_
-
-#include "halmac_8822b_pwr_seq.h"
-#include "halmac_api_8822b.h"
-#include "halmac_api_8822b_usb.h"
-#include "halmac_api_8822b_sdio.h"
-#include "halmac_api_8822b_pcie.h"
-#include "../../halmac_bit2.h"
-#include "../../halmac_reg2.h"
-#include "../../halmac_api.h"
-
-#define HALMAC_TX_FIFO_SIZE_8822B 262144 /* 256k */
-#define HALMAC_TX_FIFO_SIZE_LA_8822B 131072 /* 128k */
-#define HALMAC_RX_FIFO_SIZE_8822B 24576 /* 24k */
-#define HALMAC_TX_PAGE_SIZE_8822B 128 /* PageSize 128Byte */
-#define HALMAC_TX_ALIGN_SIZE_8822B 8
-#define HALMAC_TX_PAGE_SIZE_2_POWER_8822B 7 /* 128 = 2^7 */
-#define HALMAC_SECURITY_CAM_ENTRY_NUM_8822B 64 /* CAM Entry size */
-#define HALMAC_TX_AGG_ALIGNMENT_SIZE_8822B 8
-#define HALMAC_TX_DESC_SIZE_8822B 48
-#define HALMAC_RX_DESC_SIZE_8822B 24
-#define HALMAC_RX_DESC_DUMMY_SIZE_MAX_8822B 120
-#define HALMAC_C2H_PKT_BUF_8822B 256
-#define HALMAC_RX_FIFO_EXPANDING_MODE_PKT_SIZE_MAX_8822B 80 /* align 8 Byte*/
-#define HALMAC_RX_FIFO_EXPANDING_UNIT_8822B \
- (HALMAC_RX_DESC_SIZE_8822B + HALMAC_RX_DESC_DUMMY_SIZE_MAX_8822B + \
- HALMAC_RX_FIFO_EXPANDING_MODE_PKT_SIZE) /* align 8 Byte*/
-#define HALMAC_RX_FIFO_EXPANDING_UNIT_MAX_8822B \
- (HALMAC_RX_DESC_SIZE_8822B + HALMAC_RX_DESC_DUMMY_SIZE_MAX_8822B + \
- HALMAC_RX_FIFO_EXPANDING_MODE_PKT_SIZE_MAX_8822B) /* align 8 Byte*/
-
-#define HALMAC_TX_FIFO_SIZE_EX_1_BLK_8822B 196608 /* 192k */
-#define HALMAC_RX_FIFO_SIZE_EX_1_BLK_8822B \
- ((((HALMAC_RX_FIFO_EXPANDING_UNIT_8822B << 8) - 1) >> 10) \
- << 10) /* < 56k*/
-#define HALMAC_RX_FIFO_SIZE_EX_1_BLK_MAX_8822B \
- ((((HALMAC_RX_FIFO_EXPANDING_UNIT_MAX_8822B << 8) - 1) >> 10) \
- << 10) /* 55k*/
-#define HALMAC_TX_FIFO_SIZE_EX_2_BLK_8822B 131072 /* 128k */
-#define HALMAC_RX_FIFO_SIZE_EX_2_BLK_8822B 155648 /* 152k */
-#define HALMAC_TX_FIFO_SIZE_EX_3_BLK_8822B 65536 /* 64k */
-#define HALMAC_RX_FIFO_SIZE_EX_3_BLK_8822B 221184 /* 216k */
-
-/* TXFIFO LAYOUT
- * HIGH_QUEUE
- * NORMAL_QUEUE
- * LOW_QUEUE
- * EXTRA_QUEUE
- * PUBLIC_QUEUE -- decided after all other queue are defined
- * GAP_QUEUE -- Used to separate AC queue and Rsvd page
- *
- * RSVD_DRIVER -- Driver used rsvd page area
- * RSVD_H2C_EXTRAINFO -- Extra Information for h2c
- * RSVD_H2C_QUEUE -- h2c queue in rsvd page
- * RSVD_CPU_INSTRUCTION -- extend fw code
- * RSVD_FW_TXBUFF -- fw used this area to send packet
- *
- * Symbol: HALMAC_MODE_QUEUE_UNIT_CHIP, ex: HALMAC_LB_2BULKOUT_FWCMD_PGNUM_8822B
- */
-#define HALMAC_EXTRA_INFO_BUFF_SIZE_FULL_FIFO_8822B \
- 16384 /*16K, only used in init case*/
-
-#define HALMAC_RSVD_DRV_PGNUM_8822B 16 /*2048*/
-#define HALMAC_RSVD_H2C_EXTRAINFO_PGNUM_8822B 32 /*4096*/
-#define HALMAC_RSVD_H2C_QUEUE_PGNUM_8822B 8 /*1024*/
-#define HALMAC_RSVD_CPU_INSTRUCTION_PGNUM_8822B 0 /*0*/
-#define HALMAC_RSVD_FW_TXBUFF_PGNUM_8822B 4 /*512*/
-
-#define HALMAC_EFUSE_SIZE_8822B 1024 /* 0x400 */
-#define HALMAC_BT_EFUSE_SIZE_8822B 128 /* 0x80 */
-#define HALMAC_EEPROM_SIZE_8822B 0x300
-#define HALMAC_CR_TRX_ENABLE_8822B \
- (BIT_HCI_TXDMA_EN | BIT_HCI_RXDMA_EN | BIT_TXDMA_EN | BIT_RXDMA_EN | \
- BIT_PROTOCOL_EN | BIT_SCHEDULE_EN | BIT_MACTXEN | BIT_MACRXEN)
-
-#define HALMAC_BLK_DESC_NUM_8822B 0x3 /* Only for USB */
-
-/* AMPDU max time (unit : 32us) */
-#define HALMAC_AMPDU_MAX_TIME_8822B 0x70
-
-/* Protect mode control */
-#define HALMAC_PROT_RTS_LEN_TH_8822B 0xFF
-#define HALMAC_PROT_RTS_TX_TIME_TH_8822B 0x08
-#define HALMAC_PROT_MAX_AGG_PKT_LIMIT_8822B 0x20
-#define HALMAC_PROT_RTS_MAX_AGG_PKT_LIMIT_8822B 0x20
-
-/* Fast EDCA setting */
-#define HALMAC_FAST_EDCA_VO_TH_8822B 0x06
-#define HALMAC_FAST_EDCA_VI_TH_8822B 0x06
-#define HALMAC_FAST_EDCA_BE_TH_8822B 0x06
-#define HALMAC_FAST_EDCA_BK_TH_8822B 0x06
-
-/* BAR setting */
-#define HALMAC_BAR_RETRY_LIMIT_8822B 0x01
-#define HALMAC_RA_TRY_RATE_AGG_LIMIT_8822B 0x08
-
-enum halmac_normal_rxagg_th_to_8822b {
- HALMAC_NORMAL_RXAGG_THRESHOLD_8822B = 0xFF,
- HALMAC_NORMAL_RXAGG_TIMEOUT_8822B = 0x01,
-};
-
-enum halmac_loopback_rxagg_th_to_8822b {
- HALMAC_LOOPBACK_RXAGG_THRESHOLD_8822B = 0xFF,
- HALMAC_LOOPBACK_RXAGG_TIMEOUT_8822B = 0x01,
-};
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c
deleted file mode 100644
index c68b9e82c2e7..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c
+++ /dev/null
@@ -1,95 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "../halmac_88xx_cfg.h"
-#include "halmac_8822b_cfg.h"
-
-/**
- * ============ip sel item list============
- * HALMAC_IP_SEL_INTF_PHY
- * USB2 : usb2 phy, 1byte value
- * USB3 : usb3 phy, 2byte value
- * PCIE1 : pcie gen1 mdio, 2byte value
- * PCIE2 : pcie gen2 mdio, 2byte value
- * HALMAC_IP_SEL_MAC
- * USB2, USB3, PCIE1, PCIE2 : mac ip, 1byte value
- * HALMAC_IP_SEL_PCIE_DBI
- * USB2 USB3 : none
- * PCIE1, PCIE2 : pcie dbi, 1byte value
- */
-
-struct halmac_intf_phy_para_ HALMAC_RTL8822B_USB2_PHY[] = {
- /* {offset, value, ip sel, cut mask, platform mask} */
- {0xFFFF, 0x00, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_ALL,
- HALMAC_INTF_PHY_PLATFORM_ALL},
-};
-
-struct halmac_intf_phy_para_ HALMAC_RTL8822B_USB3_PHY[] = {
- /* {offset, value, ip sel, cut mask, platform mask} */
- {0x0001, 0xA841, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_D,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0xFFFF, 0x0000, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_ALL,
- HALMAC_INTF_PHY_PLATFORM_ALL},
-};
-
-struct halmac_intf_phy_para_ HALMAC_RTL8822B_PCIE_PHY_GEN1[] = {
- /* {offset, value, ip sel, cut mask, platform mask} */
- {0x0001, 0xA841, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0002, 0x60C6, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0008, 0x3596, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0009, 0x321C, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x000A, 0x9623, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0020, 0x94FF, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0021, 0xFFCF, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0026, 0xC006, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0029, 0xFF0E, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x002A, 0x1840, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0xFFFF, 0x0000, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_ALL,
- HALMAC_INTF_PHY_PLATFORM_ALL},
-};
-
-struct halmac_intf_phy_para_ HALMAC_RTL8822B_PCIE_PHY_GEN2[] = {
- /* {offset, value, ip sel, cut mask, platform mask} */
- {0x0001, 0xA841, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0002, 0x60C6, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0008, 0x3597, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0009, 0x321C, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x000A, 0x9623, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0020, 0x94FF, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0021, 0xFFCF, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0026, 0xC006, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x0029, 0xFF0E, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0x002A, 0x3040, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_C,
- HALMAC_INTF_PHY_PLATFORM_ALL},
- {0xFFFF, 0x0000, HALMAC_IP_SEL_INTF_PHY, HALMAC_INTF_PHY_CUT_ALL,
- HALMAC_INTF_PHY_PLATFORM_ALL},
-};
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c
deleted file mode 100644
index 08f6536840cf..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c
+++ /dev/null
@@ -1,552 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "../halmac_88xx_cfg.h"
-#include "halmac_8822b_cfg.h"
-
-static struct halmac_wl_pwr_cfg_ HALMAC_RTL8822B_TRANS_CARDEMU_TO_ACT[] = {
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value } */
- {0x0012, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), 0}, /*SWR OCP = SWR OCP = 010 1382.40*/
- {0x0012, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), BIT(0)}, /*SWR OCP = 010 1382.40 */
- {0x0020, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK | HALMAC_PWR_INTF_SDIO_MSK,
- HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE, BIT(0),
- BIT(0)}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/
- {0x0001, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK | HALMAC_PWR_INTF_SDIO_MSK,
- HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_DELAY, 1,
- HALMAC_PWRSEQ_DELAY_MS}, /*Delay 1ms*/
- {0x0000, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK | HALMAC_PWR_INTF_SDIO_MSK,
- HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE, BIT(5),
- 0}, /*0x00[5] = 1b'0 release analog Ips to digital ,1:isolation*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- (BIT(4) | BIT(3) | BIT(2)),
- 0}, /* disable SW LPS 0x04[10]=0 and WLSUS_EN 0x04[12:11]=0*/
- {0x0075, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), BIT(0)}, /* Disable USB suspend */
- {0x0006, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, BIT(1),
- BIT(1)}, /* wait till 0x04[17] = 1 power ready*/
- {0x0075, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), 0}, /* Enable USB suspend */
- {0xFF1A, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0}, /*0xFF1A = 0 to release resume signals*/
- {0x0006, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), BIT(0)}, /* release WLON reset 0x04[16]=1*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(7), 0}, /* disable HWPDN 0x04[15]=0*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- (BIT(4) | BIT(3)), 0}, /* disable WL suspend*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), BIT(0)}, /* polling until return 0*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, BIT(0), 0},
- {0x0020, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(3), BIT(3)}, /*Enable XTAL_CLK*/
- {0x10A8, HALMAC_PWR_CUT_C_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0}, /*NFC pad enabled*/
- {0x10A9, HALMAC_PWR_CUT_C_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xef}, /*NFC pad enabled*/
- {0x10AA, HALMAC_PWR_CUT_C_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x0c}, /*NFC pad enabled*/
- {0x0068, HALMAC_PWR_CUT_C_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(4), BIT(4)}, /*SDIO pad power down disabled*/
- {0x0029, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xF9}, /*PLL seting*/
- {0x0024, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(2), 0}, /*Improve TX EVM of CH13 and some 5G channles */
- {0x0074, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(5), BIT(5)}, /*PCIE WAKE# enabled*/
- {0xFFFF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, 0, HALMAC_PWR_CMD_END, 0, 0},
-};
-
-static struct halmac_wl_pwr_cfg_ HALMAC_RTL8822B_TRANS_ACT_TO_CARDEMU[] = {
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value } */
- {0x0003, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(2), 0}, /*0x02[10] = 0 Disable MCU Core*/
- {0x0093, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(3), 0}, /*LPS option 0x93[3]=0 , SWR PFM*/
- {0x001F, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0}, /*0x1F[7:0] = 0 turn off RF*/
- {0x00EF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0}, /*0xEF[7:0] = 0 turn off RF*/
- {0xFF1A, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x30}, /*0xFF1A = 0x30 to block resume signals*/
- {0x0049, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), 0}, /*Enable rising edge triggering interrupt*/
- {0x0006, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), BIT(0)}, /* release WLON reset 0x04[16]=1*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), 0}, /* Whole BB is reset */
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), BIT(1)}, /*0x04[9] = 1 turn off MAC by HW state machine*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, BIT(1),
- 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/
- {0x0020, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(3), 0}, /* XTAL_CLK gated*/
- {0x0000, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK | HALMAC_PWR_INTF_SDIO_MSK,
- HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE, BIT(5),
- BIT(5)}, /*0x00[5] = 1b'1 analog Ips to digital ,1:isolation*/
- {0xFFFF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, 0, HALMAC_PWR_CMD_END, 0, 0},
-};
-
-static struct halmac_wl_pwr_cfg_ HALMAC_RTL8822B_TRANS_CARDEMU_TO_SUS[] = {
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value } */
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(4) | BIT(3),
- (BIT(4) | BIT(3))}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK | HALMAC_PWR_INTF_SDIO_MSK,
- HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE, BIT(3) | BIT(4),
- BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/
- {0x0007, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, 0xFF,
- 0x20}, /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(3) | BIT(4),
- BIT(3) | BIT(4)}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/
- {0x0086, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, BIT(0),
- BIT(0)}, /*Set SDIO suspend local register*/
- {0x0086, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/
- {0xFFFF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, 0, HALMAC_PWR_CMD_END, 0, 0},
-};
-
-static struct halmac_wl_pwr_cfg_ HALMAC_RTL8822B_TRANS_SUS_TO_CARDEMU[] = {
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value } */
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(3) | BIT(7), 0}, /*clear suspend enable and power down enable*/
- {0x0086, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/
- {0x0086, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_POLLING, BIT(1),
- BIT(1)}, /*wait power state to suspend*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(3) | BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
- {0xFFFF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, 0, HALMAC_PWR_CMD_END, 0, 0},
-};
-
-static struct halmac_wl_pwr_cfg_ HALMAC_RTL8822B_TRANS_CARDEMU_TO_CARDDIS[] = {
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value } */
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(7),
- BIT(7)}, /*suspend enable and power down enable*/
- {0x0007, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK | HALMAC_PWR_INTF_SDIO_MSK,
- HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE, 0xFF,
- 0x20}, /*0x07=0x20 , SOP option to disable BG/MB*/
- {0x0067, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(5), 0}, /*0x67[5]=0 , BIT_PAPE_WLBT_SEL*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK | HALMAC_PWR_INTF_SDIO_MSK,
- HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE, BIT(3) | BIT(4),
- BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(2), BIT(2)}, /*0x04[10] = 1, enable SW LPS*/
- {0x004A, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/
- {0x0067, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(5),
- 0}, /* 0: BT PAPE control ; 1: WL BB LNAON control*/
- {0x0067, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(4),
- 0}, /* 0: BT GPIO[11:10] control ; 1: WL BB LNAON control*/
- {0x004F, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(0), 0}, /* 0: BT Control*/
- {0x0067, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(1),
- 0}, /* turn off BT_3DD_SYNC_B and BT_GPIO[18] */
- {0x0046, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(6), BIT(6)}, /* GPIO[6] : Output mode*/
- {0x0067, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(2), 0}, /* turn off BT_GPIO[16] */
- {0x0046, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(7), BIT(7)}, /* GPIO[7] : Output mode*/
- {0x0062, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_WRITE, BIT(4), BIT(4)}, /* GPIO[12] : Output mode */
- {0x0086, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, BIT(0),
- BIT(0)}, /*Set SDIO suspend local register*/
- {0x0086, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/
- {0x0090, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK | HALMAC_PWR_INTF_PCI_MSK,
- HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE, BIT(1),
- 0}, /*0x90[1]=0 , disable 32k clock*/
- {0x0044, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, 0xFF,
- 0}, /*0x90[1]=0 , disable 32k clock by indirect access*/
- {0x0040, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, 0xFF,
- 0x90}, /*0x90[1]=0 , disable 32k clock by indirect access*/
- {0x0041, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, 0xFF,
- 0x00}, /*0x90[1]=0 , disable 32k clock by indirect access*/
- {0x0042, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, 0xFF,
- 0x04}, /*0x90[1]=0 , disable 32k clock by indirect access*/
- {0x0081, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(7), 0}, /*0x80[15]clean fw init ready bit*/
- {0xFFFF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, 0, HALMAC_PWR_CMD_END, 0, 0},
-};
-
-static struct halmac_wl_pwr_cfg_ HALMAC_RTL8822B_TRANS_CARDDIS_TO_CARDEMU[] = {
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value } */
- {0x0086, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/
- {0x0086, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_POLLING, BIT(1),
- BIT(1)}, /*wait power state to suspend*/
- {0x004A, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/
- {0x0005, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(3) | BIT(4) | BIT(7),
- 0}, /*clear suspend enable and power down enable*/
- {0x0301, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0},
- {0xFFFF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, 0, HALMAC_PWR_CMD_END, 0, 0},
-};
-
-static struct halmac_wl_pwr_cfg_ HALMAC_RTL8822B_TRANS_ACT_TO_LPS[] = {
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value } */
- {0x0101, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(2), BIT(2)}, /*Enable 32k calibration and thermal meter*/
- {0x0199, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(3), BIT(3)}, /*Register write data of 32K calibration*/
- {0x019B, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(7), BIT(7)}, /*Enable 32k calibration reg write*/
- {0x1138, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0) | BIT(1), BIT(0) | BIT(1)}, /*set RPWM IMR*/
- {0x0194, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), BIT(0)}, /* enable 32K CLK*/
- {0x0093, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x42}, /* LPS Option MAC OFF enable*/
- {0x0092, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x20}, /* LPS Option Enable memory to deep sleep mode*/
- {0x0090, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), BIT(1)}, /* enable reg use 32K CLK*/
- {0x0301, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xFF}, /*PCIe DMA stop*/
- {0x0522, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xFF}, /*Tx Pause*/
- {0x05F8, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, 0xFF,
- 0}, /*Should be zero if no packet is transmitting*/
- {0x05F9, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, 0xFF,
- 0}, /*Should be zero if no packet is transmitting*/
- {0x05FA, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, 0xFF,
- 0}, /*Should be zero if no packet is transmitting*/
- {0x05FB, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, 0xFF,
- 0}, /*Should be zero if no packet is transmitting*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), 0}, /*CCK and OFDM are disabled,and clock are gated*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_DELAY,
- 0, HALMAC_PWRSEQ_DELAY_US}, /*Delay 1us*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), 0}, /*Whole BB is reset*/
- {0x0100, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x3F}, /*Reset MAC TRX*/
- {0x0101, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), 0}, /*check if removed later*/
- {0x0553, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(5), BIT(5)}, /*Respond TxOK to scheduler*/
- {0x0008, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(4), BIT(4)}, /* switch TSF clock to 32K*/
- {0x0109, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, BIT(7),
- BIT(7)}, /*Polling 0x109[7]=0 TSF in 40M*/
- {0x0090, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), BIT(0)}, /* enable WL_LPS_EN*/
- {0xFFFF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, 0, HALMAC_PWR_CMD_END, 0, 0},
-};
-
-static struct halmac_wl_pwr_cfg_ HALMAC_RTL8822B_TRANS_ACT_TO_DEEP_LPS[] = {
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value } */
- {0x0101, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(2), BIT(2)}, /*Enable 32k calibration and thermal meter*/
- {0x0199, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(3), BIT(3)}, /*Register write data of 32K calibration*/
- {0x019B, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(7), BIT(7)}, /*Enable 32k calibration reg write*/
- {0x1138, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0) | BIT(1), BIT(0) | BIT(1)}, /*set RPWM IMR*/
- {0x0194, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), BIT(0)}, /* enable 32K CLK*/
- {0x0093, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x40}, /* LPS Option MAC OFF enable*/
- {0x0092, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x20}, /* LPS Option Enable memory to deep sleep mode*/
- {0x0090, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), BIT(1)}, /* enable reg use 32K CLK*/
- {0x0301, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xFF}, /*PCIe DMA stop*/
- {0x0522, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xFF}, /*Tx Pause*/
- {0x05F8, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, 0xFF,
- 0}, /*Should be zero if no packet is transmitting*/
- {0x05F9, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, 0xFF,
- 0}, /*Should be zero if no packet is transmitting*/
- {0x05FA, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, 0xFF,
- 0}, /*Should be zero if no packet is transmitting*/
- {0x05FB, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, 0xFF,
- 0}, /*Should be zero if no packet is transmitting*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), 0}, /*CCK and OFDM are disabled,and clock are gated*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_DELAY,
- 0, HALMAC_PWRSEQ_DELAY_US}, /*Delay 1us*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), 0}, /*Whole BB is reset*/
- {0x0100, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x3F}, /*Reset MAC TRX*/
- {0x0101, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), 0}, /*check if removed later*/
- {0x0553, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(5), BIT(5)}, /*Respond TxOK to scheduler*/
- {0x0008, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(4), BIT(4)}, /* switch TSF clock to 32K*/
- {0x0109, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, BIT(7),
- BIT(7)}, /*Polling 0x109[7]=1 TSF in 32K*/
- {0x0090, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(0), BIT(0)}, /* enable WL_LPS_EN*/
- {0xFFFF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, 0, HALMAC_PWR_CMD_END, 0, 0},
-};
-
-static struct halmac_wl_pwr_cfg_ HALMAC_RTL8822B_TRANS_LPS_TO_ACT[] = {
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value } */
- {0x0080, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, BIT(7), BIT(7)}, /*SDIO RPWM*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_DELAY,
- 0, HALMAC_PWRSEQ_DELAY_MS}, /*Delay*/
- {0x0080, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_SDIO_MSK, HALMAC_PWR_BASEADDR_SDIO,
- HALMAC_PWR_CMD_WRITE, BIT(7), 0}, /*SDIO RPWM*/
- {0xFE58, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_USB_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x84}, /*USB RPWM*/
- {0x0361, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_PCI_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x84}, /*PCIe RPWM*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_DELAY,
- 0, HALMAC_PWRSEQ_DELAY_MS}, /*Delay*/
- {0x0008, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(4), 0}, /* switch TSF to 40M*/
- {0x0109, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC,
- HALMAC_PWR_CMD_POLLING, BIT(7), 0}, /*Polling 0x109[7]=0 TSF in 40M*/
- {0x0101, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), BIT(1)},
- {0x0100, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xFF}, /*nable WMAC TRX*/
- {0x0002, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1) | BIT(0), BIT(1) | BIT(0)}, /*nable BB macro*/
- {0x0522, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0},
- {0x113C, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0x03}, /*clear RPWM INT*/
- {0x0124, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xFF}, /*clear FW INT*/
- {0x0125, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xFF}, /*clear FW INT*/
- {0x0126, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xFF}, /*clear FW INT*/
- {0x0127, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- 0xFF, 0xFF}, /*clear FW INT*/
- {0x0090, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(1), 0}, /* disable reg use 32K CLK*/
- {0x0101, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, HALMAC_PWR_BASEADDR_MAC, HALMAC_PWR_CMD_WRITE,
- BIT(2), 0}, /*disable 32k calibration and thermal meter*/
- {0xFFFF, HALMAC_PWR_CUT_ALL_MSK, HALMAC_PWR_FAB_ALL_MSK,
- HALMAC_PWR_INTF_ALL_MSK, 0, HALMAC_PWR_CMD_END, 0, 0},
-};
-
-/* Card Enable Array */
-struct halmac_wl_pwr_cfg_ *halmac_8822b_card_enable_flow[] = {
- HALMAC_RTL8822B_TRANS_CARDDIS_TO_CARDEMU,
- HALMAC_RTL8822B_TRANS_CARDEMU_TO_ACT, NULL};
-
-/* Card Disable Array */
-struct halmac_wl_pwr_cfg_ *halmac_8822b_card_disable_flow[] = {
- HALMAC_RTL8822B_TRANS_ACT_TO_CARDEMU,
- HALMAC_RTL8822B_TRANS_CARDEMU_TO_CARDDIS, NULL};
-
-/* Suspend Array */
-struct halmac_wl_pwr_cfg_ *halmac_8822b_suspend_flow[] = {
- HALMAC_RTL8822B_TRANS_ACT_TO_CARDEMU,
- HALMAC_RTL8822B_TRANS_CARDEMU_TO_SUS, NULL};
-
-/* Resume Array */
-struct halmac_wl_pwr_cfg_ *halmac_8822b_resume_flow[] = {
- HALMAC_RTL8822B_TRANS_SUS_TO_CARDEMU,
- HALMAC_RTL8822B_TRANS_CARDEMU_TO_ACT, NULL};
-
-/* HWPDN Array - HW behavior */
-struct halmac_wl_pwr_cfg_ *halmac_8822b_hwpdn_flow[] = {NULL};
-
-/* Enter LPS - FW behavior */
-struct halmac_wl_pwr_cfg_ *halmac_8822b_enter_lps_flow[] = {
- HALMAC_RTL8822B_TRANS_ACT_TO_LPS, NULL};
-
-/* Enter Deep LPS - FW behavior */
-struct halmac_wl_pwr_cfg_ *halmac_8822b_enter_deep_lps_flow[] = {
- HALMAC_RTL8822B_TRANS_ACT_TO_DEEP_LPS, NULL};
-
-/* Leave LPS -FW behavior */
-struct halmac_wl_pwr_cfg_ *halmac_8822b_leave_lps_flow[] = {
- HALMAC_RTL8822B_TRANS_LPS_TO_ACT, NULL};
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h
deleted file mode 100644
index 03bbec32a3e3..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef HALMAC_POWER_SEQUENCE_8822B
-#define HALMAC_POWER_SEQUENCE_8822B
-
-#include "../../halmac_pwr_seq_cmd.h"
-
-#define HALMAC_8822B_PWR_SEQ_VER "V17"
-extern struct halmac_wl_pwr_cfg_ *halmac_8822b_card_disable_flow[];
-extern struct halmac_wl_pwr_cfg_ *halmac_8822b_card_enable_flow[];
-extern struct halmac_wl_pwr_cfg_ *halmac_8822b_suspend_flow[];
-extern struct halmac_wl_pwr_cfg_ *halmac_8822b_resume_flow[];
-extern struct halmac_wl_pwr_cfg_ *halmac_8822b_hwpdn_flow[];
-extern struct halmac_wl_pwr_cfg_ *halmac_8822b_enter_lps_flow[];
-extern struct halmac_wl_pwr_cfg_ *halmac_8822b_enter_deep_lps_flow[];
-extern struct halmac_wl_pwr_cfg_ *halmac_8822b_leave_lps_flow[];
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c
deleted file mode 100644
index aea481bb2403..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c
+++ /dev/null
@@ -1,332 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halmac_8822b_cfg.h"
-#include "halmac_func_8822b.h"
-#include "../halmac_func_88xx.h"
-
-/**
- * halmac_mount_api_8822b() - attach functions to function pointer
- * @halmac_adapter
- *
- * SD1 internal use
- *
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- */
-enum halmac_ret_status
-halmac_mount_api_8822b(struct halmac_adapter *halmac_adapter)
-{
- struct halmac_api *halmac_api =
- (struct halmac_api *)halmac_adapter->halmac_api;
-
- halmac_adapter->chip_id = HALMAC_CHIP_ID_8822B;
- halmac_adapter->hw_config_info.efuse_size = HALMAC_EFUSE_SIZE_8822B;
- halmac_adapter->hw_config_info.eeprom_size = HALMAC_EEPROM_SIZE_8822B;
- halmac_adapter->hw_config_info.bt_efuse_size =
- HALMAC_BT_EFUSE_SIZE_8822B;
- halmac_adapter->hw_config_info.cam_entry_num =
- HALMAC_SECURITY_CAM_ENTRY_NUM_8822B;
- halmac_adapter->hw_config_info.txdesc_size = HALMAC_TX_DESC_SIZE_8822B;
- halmac_adapter->hw_config_info.rxdesc_size = HALMAC_RX_DESC_SIZE_8822B;
- halmac_adapter->hw_config_info.tx_fifo_size = HALMAC_TX_FIFO_SIZE_8822B;
- halmac_adapter->hw_config_info.rx_fifo_size = HALMAC_RX_FIFO_SIZE_8822B;
- halmac_adapter->hw_config_info.page_size = HALMAC_TX_PAGE_SIZE_8822B;
- halmac_adapter->hw_config_info.tx_align_size =
- HALMAC_TX_ALIGN_SIZE_8822B;
- halmac_adapter->hw_config_info.page_size_2_power =
- HALMAC_TX_PAGE_SIZE_2_POWER_8822B;
-
- halmac_adapter->txff_allocation.rsvd_drv_pg_num =
- HALMAC_RSVD_DRV_PGNUM_8822B;
-
- halmac_api->halmac_init_trx_cfg = halmac_init_trx_cfg_8822b;
- halmac_api->halmac_init_protocol_cfg = halmac_init_protocol_cfg_8822b;
- halmac_api->halmac_init_h2c = halmac_init_h2c_8822b;
-
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- halmac_api->halmac_tx_allowed_sdio =
- halmac_tx_allowed_sdio_88xx;
- halmac_api->halmac_cfg_tx_agg_align =
- halmac_cfg_tx_agg_align_sdio_not_support_88xx;
- halmac_api->halmac_mac_power_switch =
- halmac_mac_power_switch_8822b_sdio;
- halmac_api->halmac_phy_cfg = halmac_phy_cfg_8822b_sdio;
- halmac_api->halmac_interface_integration_tuning =
- halmac_interface_integration_tuning_8822b_sdio;
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_USB) {
- halmac_api->halmac_mac_power_switch =
- halmac_mac_power_switch_8822b_usb;
- halmac_api->halmac_cfg_tx_agg_align =
- halmac_cfg_tx_agg_align_usb_not_support_88xx;
- halmac_api->halmac_phy_cfg = halmac_phy_cfg_8822b_usb;
- halmac_api->halmac_interface_integration_tuning =
- halmac_interface_integration_tuning_8822b_usb;
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_PCIE) {
- halmac_api->halmac_mac_power_switch =
- halmac_mac_power_switch_8822b_pcie;
- halmac_api->halmac_cfg_tx_agg_align =
- halmac_cfg_tx_agg_align_pcie_not_support_88xx;
- halmac_api->halmac_pcie_switch = halmac_pcie_switch_8822b;
- halmac_api->halmac_phy_cfg = halmac_phy_cfg_8822b_pcie;
- halmac_api->halmac_interface_integration_tuning =
- halmac_interface_integration_tuning_8822b_pcie;
- } else {
- halmac_api->halmac_pcie_switch = halmac_pcie_switch_8822b_nc;
- }
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_init_trx_cfg_8822b() - config trx dma register
- * @halmac_adapter : the adapter of halmac
- * @halmac_trx_mode : trx mode selection
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_trx_cfg_8822b(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode)
-{
- u8 value8;
- u32 value32;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_INIT_TRX_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
- halmac_adapter->trx_mode = halmac_trx_mode;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_init_trx_cfg ==========>halmac_trx_mode = %d\n",
- halmac_trx_mode);
-
- status = halmac_txdma_queue_mapping_8822b(halmac_adapter,
- halmac_trx_mode);
-
- if (status != HALMAC_RET_SUCCESS) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_txdma_queue_mapping fail!\n");
- return status;
- }
-
- value8 = 0;
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CR, value8);
- value8 = HALMAC_CR_TRX_ENABLE_8822B;
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CR, value8);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_H2CQ_CSR, BIT(31));
-
- status = halmac_priority_queue_config_8822b(halmac_adapter,
- halmac_trx_mode);
- if (halmac_adapter->txff_allocation.rx_fifo_expanding_mode !=
- HALMAC_RX_FIFO_EXPANDING_MODE_DISABLE)
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RX_DRVINFO_SZ, 0xF);
-
- if (status != HALMAC_RET_SUCCESS) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_txdma_queue_mapping fail!\n");
- return status;
- }
-
- /* Config H2C packet buffer */
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_H2C_HEAD);
- value32 = (value32 & 0xFFFC0000) |
- (halmac_adapter->txff_allocation.rsvd_h2c_queue_pg_bndy
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_H2C_HEAD, value32);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_H2C_READ_ADDR);
- value32 = (value32 & 0xFFFC0000) |
- (halmac_adapter->txff_allocation.rsvd_h2c_queue_pg_bndy
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_H2C_READ_ADDR, value32);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_H2C_TAIL);
- value32 = (value32 & 0xFFFC0000) |
- ((halmac_adapter->txff_allocation.rsvd_h2c_queue_pg_bndy
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B) +
- (HALMAC_RSVD_H2C_QUEUE_PGNUM_8822B
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B));
- HALMAC_REG_WRITE_32(halmac_adapter, REG_H2C_TAIL, value32);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_H2C_INFO);
- value8 = (u8)((value8 & 0xFC) | 0x01);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_H2C_INFO, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_H2C_INFO);
- value8 = (u8)((value8 & 0xFB) | 0x04);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_H2C_INFO, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_TXDMA_OFFSET_CHK + 1);
- value8 = (u8)((value8 & 0x7f) | 0x80);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TXDMA_OFFSET_CHK + 1, value8);
-
- halmac_adapter->h2c_buff_size = HALMAC_RSVD_H2C_QUEUE_PGNUM_8822B
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B;
- halmac_get_h2c_buff_free_space_88xx(halmac_adapter);
-
- if (halmac_adapter->h2c_buff_size !=
- halmac_adapter->h2c_buf_free_space) {
- pr_err("get h2c free space error!\n");
- return HALMAC_RET_GET_H2C_SPACE_ERR;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_init_trx_cfg <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_init_protocol_cfg_8822b() - config protocol register
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_protocol_cfg_8822b(struct halmac_adapter *halmac_adapter)
-{
- u32 value32;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_INIT_PROTOCOL_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]%s ==========>\n", __func__);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_AMPDU_MAX_TIME_V1,
- HALMAC_AMPDU_MAX_TIME_8822B);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TX_HANG_CTRL, BIT_EN_EOF_V1);
-
- value32 = HALMAC_PROT_RTS_LEN_TH_8822B |
- (HALMAC_PROT_RTS_TX_TIME_TH_8822B << 8) |
- (HALMAC_PROT_MAX_AGG_PKT_LIMIT_8822B << 16) |
- (HALMAC_PROT_RTS_MAX_AGG_PKT_LIMIT_8822B << 24);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_PROT_MODE_CTRL, value32);
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_BAR_MODE_CTRL + 2,
- HALMAC_BAR_RETRY_LIMIT_8822B |
- HALMAC_RA_TRY_RATE_AGG_LIMIT_8822B << 8);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_FAST_EDCA_VOVI_SETTING,
- HALMAC_FAST_EDCA_VO_TH_8822B);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_FAST_EDCA_VOVI_SETTING + 2,
- HALMAC_FAST_EDCA_VI_TH_8822B);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_FAST_EDCA_BEBK_SETTING,
- HALMAC_FAST_EDCA_BE_TH_8822B);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_FAST_EDCA_BEBK_SETTING + 2,
- HALMAC_FAST_EDCA_BK_TH_8822B);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_init_h2c_8822b() - config h2c packet buffer
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_h2c_8822b(struct halmac_adapter *halmac_adapter)
-{
- u8 value8;
- u32 value32;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- value8 = 0;
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CR, value8);
- value8 = HALMAC_CR_TRX_ENABLE_8822B;
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CR, value8);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_H2C_HEAD);
- value32 = (value32 & 0xFFFC0000) |
- (halmac_adapter->txff_allocation.rsvd_h2c_queue_pg_bndy
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_H2C_HEAD, value32);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_H2C_READ_ADDR);
- value32 = (value32 & 0xFFFC0000) |
- (halmac_adapter->txff_allocation.rsvd_h2c_queue_pg_bndy
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_H2C_READ_ADDR, value32);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_H2C_TAIL);
- value32 = (value32 & 0xFFFC0000) |
- ((halmac_adapter->txff_allocation.rsvd_h2c_queue_pg_bndy
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B) +
- (HALMAC_RSVD_H2C_QUEUE_PGNUM_8822B
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B));
- HALMAC_REG_WRITE_32(halmac_adapter, REG_H2C_TAIL, value32);
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_H2C_INFO);
- value8 = (u8)((value8 & 0xFC) | 0x01);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_H2C_INFO, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_H2C_INFO);
- value8 = (u8)((value8 & 0xFB) | 0x04);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_H2C_INFO, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_TXDMA_OFFSET_CHK + 1);
- value8 = (u8)((value8 & 0x7f) | 0x80);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TXDMA_OFFSET_CHK + 1, value8);
-
- halmac_adapter->h2c_buff_size = HALMAC_RSVD_H2C_QUEUE_PGNUM_8822B
- << HALMAC_TX_PAGE_SIZE_2_POWER_8822B;
- halmac_get_h2c_buff_free_space_88xx(halmac_adapter);
-
- if (halmac_adapter->h2c_buff_size !=
- halmac_adapter->h2c_buf_free_space) {
- pr_err("get h2c free space error!\n");
- return HALMAC_RET_GET_H2C_SPACE_ERR;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "h2c free space : %d\n",
- halmac_adapter->h2c_buf_free_space);
-
- return HALMAC_RET_SUCCESS;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h
deleted file mode 100644
index 072cd40fd339..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_API_8822B_H_
-#define _HALMAC_API_8822B_H_
-
-#include "../../halmac_2_platform.h"
-#include "../../halmac_type.h"
-
-enum halmac_ret_status
-halmac_mount_api_8822b(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_init_trx_cfg_8822b(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode);
-
-enum halmac_ret_status
-halmac_init_protocol_cfg_8822b(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_init_h2c_8822b(struct halmac_adapter *halmac_adapter);
-
-#endif /* _HALMAC_API_8822B_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c
deleted file mode 100644
index a716fb532170..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c
+++ /dev/null
@@ -1,312 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "../halmac_88xx_cfg.h"
-#include "../halmac_api_88xx_pcie.h"
-#include "halmac_8822b_cfg.h"
-
-/**
- * halmac_mac_power_switch_8822b_pcie() - switch mac power
- * @halmac_adapter : the adapter of halmac
- * @halmac_power : power state
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_mac_power_switch_8822b_pcie(struct halmac_adapter *halmac_adapter,
- enum halmac_mac_power halmac_power)
-{
- u8 interface_mask;
- u8 value8;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_MAC_POWER_SWITCH);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_mac_power_switch_88xx_pcie halmac_power = %x ==========>\n",
- halmac_power);
- interface_mask = HALMAC_PWR_INTF_PCI_MSK;
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_CR);
- if (value8 == 0xEA)
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_OFF;
- else
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_ON;
-
- /* Check if power switch is needed */
- if (halmac_power == HALMAC_MAC_POWER_ON &&
- halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_ON) {
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_PWR, DBG_WARNING,
- "halmac_mac_power_switch power state unchange!\n");
- return HALMAC_RET_PWR_UNCHANGE;
- }
-
- if (halmac_power == HALMAC_MAC_POWER_OFF) {
- if (halmac_pwr_seq_parser_88xx(
- halmac_adapter, HALMAC_PWR_CUT_ALL_MSK,
- HALMAC_PWR_FAB_TSMC_MSK, interface_mask,
- halmac_8822b_card_disable_flow) !=
- HALMAC_RET_SUCCESS) {
- pr_err("Handle power off cmd error\n");
- return HALMAC_RET_POWER_OFF_FAIL;
- }
-
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_OFF;
- halmac_adapter->halmac_state.ps_state =
- HALMAC_PS_STATE_UNDEFINE;
- halmac_adapter->halmac_state.dlfw_state = HALMAC_DLFW_NONE;
- halmac_init_adapter_dynamic_para_88xx(halmac_adapter);
- } else {
- if (halmac_pwr_seq_parser_88xx(
- halmac_adapter, HALMAC_PWR_CUT_ALL_MSK,
- HALMAC_PWR_FAB_TSMC_MSK, interface_mask,
- halmac_8822b_card_enable_flow) !=
- HALMAC_RET_SUCCESS) {
- pr_err("Handle power on cmd error\n");
- return HALMAC_RET_POWER_ON_FAIL;
- }
-
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_ON;
- halmac_adapter->halmac_state.ps_state = HALMAC_PS_STATE_ACT;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_mac_power_switch_88xx_pcie <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_pcie_switch_8822b() - pcie gen1/gen2 switch
- * @halmac_adapter : the adapter of halmac
- * @pcie_cfg : gen1/gen2 selection
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_pcie_switch_8822b(struct halmac_adapter *halmac_adapter,
- enum halmac_pcie_cfg pcie_cfg)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- u8 current_link_speed = 0;
- u32 count = 0;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_PCIE_SWITCH);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- /* Link Control 2 Register[3:0] Target Link Speed
- * Defined encodings are:
- * 0001b Target Link 2.5 GT/s
- * 0010b Target Link 5.0 GT/s
- * 0100b Target Link 8.0 GT/s
- */
-
- if (pcie_cfg == HALMAC_PCIE_GEN1) {
- /* cfg 0xA0[3:0]=4'b0001 */
- halmac_dbi_write8_88xx(
- halmac_adapter, LINK_CTRL2_REG_OFFSET,
- (halmac_dbi_read8_88xx(halmac_adapter,
- LINK_CTRL2_REG_OFFSET) &
- 0xF0) | BIT(0));
-
- /* cfg 0x80C[17]=1 //PCIe DesignWave */
- halmac_dbi_write32_88xx(
- halmac_adapter, GEN2_CTRL_OFFSET,
- halmac_dbi_read32_88xx(halmac_adapter,
- GEN2_CTRL_OFFSET) |
- BIT(17));
-
- /* check link speed if GEN1 */
- /* cfg 0x82[3:0]=4'b0001 */
- current_link_speed =
- halmac_dbi_read8_88xx(halmac_adapter,
- LINK_STATUS_REG_OFFSET) &
- 0x0F;
- count = 2000;
-
- while (current_link_speed != GEN1_SPEED && count != 0) {
- usleep_range(50, 60);
- current_link_speed =
- halmac_dbi_read8_88xx(halmac_adapter,
- LINK_STATUS_REG_OFFSET) &
- 0x0F;
- count--;
- }
-
- if (current_link_speed != GEN1_SPEED) {
- pr_err("Speed change to GEN1 fail !\n");
- return HALMAC_RET_FAIL;
- }
-
- } else if (pcie_cfg == HALMAC_PCIE_GEN2) {
- /* cfg 0xA0[3:0]=4'b0010 */
- halmac_dbi_write8_88xx(
- halmac_adapter, LINK_CTRL2_REG_OFFSET,
- (halmac_dbi_read8_88xx(halmac_adapter,
- LINK_CTRL2_REG_OFFSET) &
- 0xF0) | BIT(1));
-
- /* cfg 0x80C[17]=1 //PCIe DesignWave */
- halmac_dbi_write32_88xx(
- halmac_adapter, GEN2_CTRL_OFFSET,
- halmac_dbi_read32_88xx(halmac_adapter,
- GEN2_CTRL_OFFSET) |
- BIT(17));
-
- /* check link speed if GEN2 */
- /* cfg 0x82[3:0]=4'b0010 */
- current_link_speed =
- halmac_dbi_read8_88xx(halmac_adapter,
- LINK_STATUS_REG_OFFSET) &
- 0x0F;
- count = 2000;
-
- while (current_link_speed != GEN2_SPEED && count != 0) {
- usleep_range(50, 60);
- current_link_speed =
- halmac_dbi_read8_88xx(halmac_adapter,
- LINK_STATUS_REG_OFFSET) &
- 0x0F;
- count--;
- }
-
- if (current_link_speed != GEN2_SPEED) {
- pr_err("Speed change to GEN1 fail !\n");
- return HALMAC_RET_FAIL;
- }
-
- } else {
- pr_err("Error Speed !\n");
- return HALMAC_RET_FAIL;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_pcie_switch_8822b_nc(struct halmac_adapter *halmac_adapter,
- enum halmac_pcie_cfg pcie_cfg)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_PCIE_SWITCH);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_phy_cfg_8822b_pcie() - phy config
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_phy_cfg_8822b_pcie(struct halmac_adapter *halmac_adapter,
- enum halmac_intf_phy_platform platform)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_PHY_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_phy_cfg ==========>\n");
-
- status = halmac_parse_intf_phy_88xx(halmac_adapter,
- HALMAC_RTL8822B_PCIE_PHY_GEN1,
- platform, HAL_INTF_PHY_PCIE_GEN1);
-
- if (status != HALMAC_RET_SUCCESS)
- return status;
-
- status = halmac_parse_intf_phy_88xx(halmac_adapter,
- HALMAC_RTL8822B_PCIE_PHY_GEN2,
- platform, HAL_INTF_PHY_PCIE_GEN2);
-
- if (status != HALMAC_RET_SUCCESS)
- return status;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_phy_cfg <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_interface_integration_tuning_8822b_pcie() - pcie interface fine tuning
- * @halmac_adapter : the adapter of halmac
- * Author : Rick Liu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_interface_integration_tuning_8822b_pcie(
- struct halmac_adapter *halmac_adapter)
-{
- return HALMAC_RET_SUCCESS;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h
deleted file mode 100644
index b47c50863f06..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_API_8822B_PCIE_H_
-#define _HALMAC_API_8822B_PCIE_H_
-
-#include "../../halmac_2_platform.h"
-#include "../../halmac_type.h"
-
-extern struct halmac_intf_phy_para_ HALMAC_RTL8822B_PCIE_PHY_GEN1[];
-extern struct halmac_intf_phy_para_ HALMAC_RTL8822B_PCIE_PHY_GEN2[];
-
-enum halmac_ret_status
-halmac_mac_power_switch_8822b_pcie(struct halmac_adapter *halmac_adapter,
- enum halmac_mac_power halmac_power);
-
-enum halmac_ret_status
-halmac_pcie_switch_8822b(struct halmac_adapter *halmac_adapter,
- enum halmac_pcie_cfg pcie_cfg);
-
-enum halmac_ret_status
-halmac_pcie_switch_8822b_nc(struct halmac_adapter *halmac_adapter,
- enum halmac_pcie_cfg pcie_cfg);
-
-enum halmac_ret_status
-halmac_phy_cfg_8822b_pcie(struct halmac_adapter *halmac_adapter,
- enum halmac_intf_phy_platform platform);
-
-enum halmac_ret_status halmac_interface_integration_tuning_8822b_pcie(
- struct halmac_adapter *halmac_adapter);
-
-#endif /* _HALMAC_API_8822B_PCIE_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c
deleted file mode 100644
index a6b6d7fa2689..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halmac_8822b_cfg.h"
-
-/**
- * halmac_mac_power_switch_8822b_sdio() - switch mac power
- * @halmac_adapter : the adapter of halmac
- * @halmac_power : power state
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_mac_power_switch_8822b_sdio(struct halmac_adapter *halmac_adapter,
- enum halmac_mac_power halmac_power)
-{
- u8 interface_mask;
- u8 value8;
- u8 rpwm;
- u32 imr_backup;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "[TRACE]halmac_mac_power_switch_88xx_sdio==========>\n");
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "[TRACE]halmac_power = %x ==========>\n", halmac_power);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "[TRACE]8822B pwr seq ver = %s\n",
- HALMAC_8822B_PWR_SEQ_VER);
-
- interface_mask = HALMAC_PWR_INTF_SDIO_MSK;
-
- halmac_adapter->rpwm_record =
- HALMAC_REG_READ_8(halmac_adapter, REG_SDIO_HRPWM1);
-
- /* Check FW still exist or not */
- if (HALMAC_REG_READ_16(halmac_adapter, REG_MCUFW_CTRL) == 0xC078) {
- /* Leave 32K */
- rpwm = (u8)((halmac_adapter->rpwm_record ^ BIT(7)) & 0x80);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SDIO_HRPWM1, rpwm);
- }
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_CR);
- if (value8 == 0xEA)
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_OFF;
- else
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_ON;
-
- /*Check if power switch is needed*/
- if (halmac_power == HALMAC_MAC_POWER_ON &&
- halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_ON) {
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_PWR, DBG_WARNING,
- "[WARN]halmac_mac_power_switch power state unchange!\n");
- return HALMAC_RET_PWR_UNCHANGE;
- }
-
- imr_backup = HALMAC_REG_READ_32(halmac_adapter, REG_SDIO_HIMR);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_SDIO_HIMR, 0);
-
- if (halmac_power == HALMAC_MAC_POWER_OFF) {
- if (halmac_pwr_seq_parser_88xx(
- halmac_adapter, HALMAC_PWR_CUT_ALL_MSK,
- HALMAC_PWR_FAB_TSMC_MSK, interface_mask,
- halmac_8822b_card_disable_flow) !=
- HALMAC_RET_SUCCESS) {
- pr_err("[ERR]Handle power off cmd error\n");
- HALMAC_REG_WRITE_32(halmac_adapter, REG_SDIO_HIMR,
- imr_backup);
- return HALMAC_RET_POWER_OFF_FAIL;
- }
-
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_OFF;
- halmac_adapter->halmac_state.ps_state =
- HALMAC_PS_STATE_UNDEFINE;
- halmac_adapter->halmac_state.dlfw_state = HALMAC_DLFW_NONE;
- halmac_init_adapter_dynamic_para_88xx(halmac_adapter);
- } else {
- if (halmac_pwr_seq_parser_88xx(
- halmac_adapter, HALMAC_PWR_CUT_ALL_MSK,
- HALMAC_PWR_FAB_TSMC_MSK, interface_mask,
- halmac_8822b_card_enable_flow) !=
- HALMAC_RET_SUCCESS) {
- pr_err("[ERR]Handle power on cmd error\n");
- HALMAC_REG_WRITE_32(halmac_adapter, REG_SDIO_HIMR,
- imr_backup);
- return HALMAC_RET_POWER_ON_FAIL;
- }
-
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_ON;
- halmac_adapter->halmac_state.ps_state = HALMAC_PS_STATE_ACT;
- }
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_SDIO_HIMR, imr_backup);
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "[TRACE]halmac_mac_power_switch_88xx_sdio <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_phy_cfg_8822b_sdio() - phy config
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_phy_cfg_8822b_sdio(struct halmac_adapter *halmac_adapter,
- enum halmac_intf_phy_platform platform)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_PHY_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_phy_cfg ==========>\n");
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "sdio no phy\n");
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_phy_cfg <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_interface_integration_tuning_8822b_sdio() - sdio interface fine tuning
- * @halmac_adapter : the adapter of halmac
- * Author : Ivan
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_interface_integration_tuning_8822b_sdio(
- struct halmac_adapter *halmac_adapter)
-{
- return HALMAC_RET_SUCCESS;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h
deleted file mode 100644
index 75c83f7f031e..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_API_8822B_SDIO_H_
-#define _HALMAC_API_8822B_SDIO_H_
-
-#include "../../halmac_2_platform.h"
-#include "../../halmac_type.h"
-
-enum halmac_ret_status
-halmac_mac_power_switch_8822b_sdio(struct halmac_adapter *halmac_adapter,
- enum halmac_mac_power halmac_power);
-
-enum halmac_ret_status
-halmac_phy_cfg_8822b_sdio(struct halmac_adapter *halmac_adapter,
- enum halmac_intf_phy_platform platform);
-
-enum halmac_ret_status halmac_interface_integration_tuning_8822b_sdio(
- struct halmac_adapter *halmac_adapter);
-
-#endif /* _HALMAC_API_8822B_SDIO_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c
deleted file mode 100644
index 2eaf362ca8c3..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "../halmac_88xx_cfg.h"
-#include "halmac_8822b_cfg.h"
-
-/**
- * halmac_mac_power_switch_8822b_usb() - switch mac power
- * @halmac_adapter : the adapter of halmac
- * @halmac_power : power state
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_mac_power_switch_8822b_usb(struct halmac_adapter *halmac_adapter,
- enum halmac_mac_power halmac_power)
-{
- u8 interface_mask;
- u8 value8;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_MAC_POWER_SWITCH);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_mac_power_switch_88xx_usb halmac_power = %x ==========>\n",
- halmac_power);
-
- interface_mask = HALMAC_PWR_INTF_USB_MSK;
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_CR);
- if (value8 == 0xEA) {
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_OFF;
- } else {
- if (BIT(0) ==
- (HALMAC_REG_READ_8(halmac_adapter, REG_SYS_STATUS1 + 1) &
- BIT(0)))
- halmac_adapter->halmac_state.mac_power =
- HALMAC_MAC_POWER_OFF;
- else
- halmac_adapter->halmac_state.mac_power =
- HALMAC_MAC_POWER_ON;
- }
-
- /*Check if power switch is needed*/
- if (halmac_power == HALMAC_MAC_POWER_ON &&
- halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_ON) {
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_PWR, DBG_WARNING,
- "halmac_mac_power_switch power state unchange!\n");
- return HALMAC_RET_PWR_UNCHANGE;
- }
- if (halmac_power == HALMAC_MAC_POWER_OFF) {
- if (halmac_pwr_seq_parser_88xx(
- halmac_adapter, HALMAC_PWR_CUT_ALL_MSK,
- HALMAC_PWR_FAB_TSMC_MSK, interface_mask,
- halmac_8822b_card_disable_flow) !=
- HALMAC_RET_SUCCESS) {
- pr_err("Handle power off cmd error\n");
- return HALMAC_RET_POWER_OFF_FAIL;
- }
-
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_OFF;
- halmac_adapter->halmac_state.ps_state =
- HALMAC_PS_STATE_UNDEFINE;
- halmac_adapter->halmac_state.dlfw_state = HALMAC_DLFW_NONE;
- halmac_init_adapter_dynamic_para_88xx(halmac_adapter);
- } else {
- if (halmac_pwr_seq_parser_88xx(
- halmac_adapter, HALMAC_PWR_CUT_ALL_MSK,
- HALMAC_PWR_FAB_TSMC_MSK, interface_mask,
- halmac_8822b_card_enable_flow) !=
- HALMAC_RET_SUCCESS) {
- pr_err("Handle power on cmd error\n");
- return HALMAC_RET_POWER_ON_FAIL;
- }
-
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_SYS_STATUS1 + 1,
- HALMAC_REG_READ_8(halmac_adapter, REG_SYS_STATUS1 + 1) &
- ~(BIT(0)));
-
- halmac_adapter->halmac_state.mac_power = HALMAC_MAC_POWER_ON;
- halmac_adapter->halmac_state.ps_state = HALMAC_PS_STATE_ACT;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_mac_power_switch_88xx_usb <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_phy_cfg_8822b_usb() - phy config
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_phy_cfg_8822b_usb(struct halmac_adapter *halmac_adapter,
- enum halmac_intf_phy_platform platform)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_PHY_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_phy_cfg ==========>\n");
-
- status = halmac_parse_intf_phy_88xx(halmac_adapter,
- HALMAC_RTL8822B_USB2_PHY, platform,
- HAL_INTF_PHY_USB2);
-
- if (status != HALMAC_RET_SUCCESS)
- return status;
-
- status = halmac_parse_intf_phy_88xx(halmac_adapter,
- HALMAC_RTL8822B_USB3_PHY, platform,
- HAL_INTF_PHY_USB3);
-
- if (status != HALMAC_RET_SUCCESS)
- return status;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "halmac_phy_cfg <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_interface_integration_tuning_8822b_usb() - usb interface fine tuning
- * @halmac_adapter : the adapter of halmac
- * Author : Ivan
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_interface_integration_tuning_8822b_usb(
- struct halmac_adapter *halmac_adapter)
-{
- return HALMAC_RET_SUCCESS;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h
deleted file mode 100644
index 8ba7bee0a99b..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_API_8822B_USB_H_
-#define _HALMAC_API_8822B_USB_H_
-
-extern struct halmac_intf_phy_para_ HALMAC_RTL8822B_USB2_PHY[];
-extern struct halmac_intf_phy_para_ HALMAC_RTL8822B_USB3_PHY[];
-
-#include "../../halmac_2_platform.h"
-#include "../../halmac_type.h"
-
-enum halmac_ret_status
-halmac_mac_power_switch_8822b_usb(struct halmac_adapter *halmac_adapter,
- enum halmac_mac_power halmac_power);
-
-enum halmac_ret_status
-halmac_phy_cfg_8822b_usb(struct halmac_adapter *halmac_adapter,
- enum halmac_intf_phy_platform platform);
-
-enum halmac_ret_status halmac_interface_integration_tuning_8822b_usb(
- struct halmac_adapter *halmac_adapter);
-
-#endif /* _HALMAC_API_8822B_USB_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c
deleted file mode 100644
index bcc402838bc0..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c
+++ /dev/null
@@ -1,403 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halmac_8822b_cfg.h"
-#include "halmac_func_8822b.h"
-
-/*SDIO RQPN Mapping*/
-static struct halmac_rqpn_ HALMAC_RQPN_SDIO_8822B[] = {
- /* { mode, vo_map, vi_map, be_map, bk_map, mg_map, hi_map } */
- {HALMAC_TRX_MODE_NORMAL, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_TRXSHARE, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_WMM, HALMAC_MAP2_HQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_NQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_P2P, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_LOOPBACK, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
-};
-
-/*PCIE RQPN Mapping*/
-static struct halmac_rqpn_ HALMAC_RQPN_PCIE_8822B[] = {
- /* { mode, vo_map, vi_map, be_map, bk_map, mg_map, hi_map } */
- {HALMAC_TRX_MODE_NORMAL, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_TRXSHARE, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_WMM, HALMAC_MAP2_HQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_NQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_P2P, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_LOOPBACK, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
-};
-
-/*USB 2 Bulkout RQPN Mapping*/
-static struct halmac_rqpn_ HALMAC_RQPN_2BULKOUT_8822B[] = {
- /* { mode, vo_map, vi_map, be_map, bk_map, mg_map, hi_map } */
- {HALMAC_TRX_MODE_NORMAL, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_HQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_TRXSHARE, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_NQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_WMM, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_HQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_P2P, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ,
- HALMAC_MAP2_NQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_LOOPBACK, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ,
- HALMAC_MAP2_HQ, HALMAC_MAP2_NQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ,
- HALMAC_MAP2_HQ, HALMAC_MAP2_NQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
-};
-
-/*USB 3 Bulkout RQPN Mapping*/
-static struct halmac_rqpn_ HALMAC_RQPN_3BULKOUT_8822B[] = {
- /* { mode, vo_map, vi_map, be_map, bk_map, mg_map, hi_map } */
- {HALMAC_TRX_MODE_NORMAL, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_TRXSHARE, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_WMM, HALMAC_MAP2_HQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_NQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_P2P, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_NQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_LOOPBACK, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_NQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_NQ, HALMAC_MAP2_HQ, HALMAC_MAP2_HQ},
-};
-
-/*USB 4 Bulkout RQPN Mapping*/
-static struct halmac_rqpn_ HALMAC_RQPN_4BULKOUT_8822B[] = {
- /* { mode, vo_map, vi_map, be_map, bk_map, mg_map, hi_map } */
- {HALMAC_TRX_MODE_NORMAL, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_TRXSHARE, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_WMM, HALMAC_MAP2_HQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_NQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_P2P, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ, HALMAC_MAP2_LQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_LOOPBACK, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, HALMAC_MAP2_NQ, HALMAC_MAP2_NQ,
- HALMAC_MAP2_LQ, HALMAC_MAP2_LQ, HALMAC_MAP2_EXQ, HALMAC_MAP2_HQ},
-};
-
-/*SDIO Page Number*/
-static struct halmac_pg_num_ HALMAC_PG_NUM_SDIO_8822B[] = {
- /* { mode, hq_num, nq_num, lq_num, exq_num, gap_num} */
- {HALMAC_TRX_MODE_NORMAL, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_TRXSHARE, 32, 32, 32, 32, 1},
- {HALMAC_TRX_MODE_WMM, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_P2P, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_LOOPBACK, 64, 64, 64, 64, 640},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, 64, 64, 64, 64, 640},
-};
-
-/*PCIE Page Number*/
-static struct halmac_pg_num_ HALMAC_PG_NUM_PCIE_8822B[] = {
- /* { mode, hq_num, nq_num, lq_num, exq_num, gap_num} */
- {HALMAC_TRX_MODE_NORMAL, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_TRXSHARE, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_WMM, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_P2P, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_LOOPBACK, 64, 64, 64, 64, 640},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, 64, 64, 64, 64, 640},
-};
-
-/*USB 2 Bulkout Page Number*/
-static struct halmac_pg_num_ HALMAC_PG_NUM_2BULKOUT_8822B[] = {
- /* { mode, hq_num, nq_num, lq_num, exq_num, gap_num} */
- {HALMAC_TRX_MODE_NORMAL, 64, 64, 0, 0, 1},
- {HALMAC_TRX_MODE_TRXSHARE, 64, 64, 0, 0, 1},
- {HALMAC_TRX_MODE_WMM, 64, 64, 0, 0, 1},
- {HALMAC_TRX_MODE_P2P, 64, 64, 0, 0, 1},
- {HALMAC_TRX_MODE_LOOPBACK, 64, 64, 0, 0, 1024},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, 64, 64, 0, 0, 1024},
-};
-
-/*USB 3 Bulkout Page Number*/
-static struct halmac_pg_num_ HALMAC_PG_NUM_3BULKOUT_8822B[] = {
- /* { mode, hq_num, nq_num, lq_num, exq_num, gap_num} */
- {HALMAC_TRX_MODE_NORMAL, 64, 64, 64, 0, 1},
- {HALMAC_TRX_MODE_TRXSHARE, 64, 64, 64, 0, 1},
- {HALMAC_TRX_MODE_WMM, 64, 64, 64, 0, 1},
- {HALMAC_TRX_MODE_P2P, 64, 64, 64, 0, 1},
- {HALMAC_TRX_MODE_LOOPBACK, 64, 64, 64, 0, 1024},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, 64, 64, 64, 0, 1024},
-};
-
-/*USB 4 Bulkout Page Number*/
-static struct halmac_pg_num_ HALMAC_PG_NUM_4BULKOUT_8822B[] = {
- /* { mode, hq_num, nq_num, lq_num, exq_num, gap_num} */
- {HALMAC_TRX_MODE_NORMAL, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_TRXSHARE, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_WMM, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_P2P, 64, 64, 64, 64, 1},
- {HALMAC_TRX_MODE_LOOPBACK, 64, 64, 64, 64, 640},
- {HALMAC_TRX_MODE_DELAY_LOOPBACK, 64, 64, 64, 64, 640},
-};
-
-enum halmac_ret_status
-halmac_txdma_queue_mapping_8822b(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode)
-{
- u16 value16;
- void *driver_adapter = NULL;
- struct halmac_rqpn_ *curr_rqpn_sel = NULL;
- enum halmac_ret_status status;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- curr_rqpn_sel = HALMAC_RQPN_SDIO_8822B;
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_PCIE) {
- curr_rqpn_sel = HALMAC_RQPN_PCIE_8822B;
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_USB) {
- if (halmac_adapter->halmac_bulkout_num == 2) {
- curr_rqpn_sel = HALMAC_RQPN_2BULKOUT_8822B;
- } else if (halmac_adapter->halmac_bulkout_num == 3) {
- curr_rqpn_sel = HALMAC_RQPN_3BULKOUT_8822B;
- } else if (halmac_adapter->halmac_bulkout_num == 4) {
- curr_rqpn_sel = HALMAC_RQPN_4BULKOUT_8822B;
- } else {
- pr_err("[ERR]interface not support\n");
- return HALMAC_RET_NOT_SUPPORT;
- }
- } else {
- return HALMAC_RET_NOT_SUPPORT;
- }
-
- status = halmac_rqpn_parser_88xx(halmac_adapter, halmac_trx_mode,
- curr_rqpn_sel);
- if (status != HALMAC_RET_SUCCESS)
- return status;
-
- value16 = 0;
- value16 |= BIT_TXDMA_HIQ_MAP(
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_HI]);
- value16 |= BIT_TXDMA_MGQ_MAP(
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_MG]);
- value16 |= BIT_TXDMA_BKQ_MAP(
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BK]);
- value16 |= BIT_TXDMA_BEQ_MAP(
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BE]);
- value16 |= BIT_TXDMA_VIQ_MAP(
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VI]);
- value16 |= BIT_TXDMA_VOQ_MAP(
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VO]);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_TXDMA_PQ_MAP, value16);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_priority_queue_config_8822b(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode)
-{
- u8 transfer_mode = 0;
- u8 value8;
- u32 counter;
- enum halmac_ret_status status;
- struct halmac_pg_num_ *curr_pg_num = NULL;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (halmac_adapter->txff_allocation.la_mode == HALMAC_LA_MODE_DISABLE) {
- if (halmac_adapter->txff_allocation.rx_fifo_expanding_mode ==
- HALMAC_RX_FIFO_EXPANDING_MODE_DISABLE) {
- halmac_adapter->txff_allocation.tx_fifo_pg_num =
- HALMAC_TX_FIFO_SIZE_8822B >>
- HALMAC_TX_PAGE_SIZE_2_POWER_8822B;
- } else if (halmac_adapter->txff_allocation
- .rx_fifo_expanding_mode ==
- HALMAC_RX_FIFO_EXPANDING_MODE_1_BLOCK) {
- halmac_adapter->txff_allocation.tx_fifo_pg_num =
- HALMAC_TX_FIFO_SIZE_EX_1_BLK_8822B >>
- HALMAC_TX_PAGE_SIZE_2_POWER_8822B;
- halmac_adapter->hw_config_info.tx_fifo_size =
- HALMAC_TX_FIFO_SIZE_EX_1_BLK_8822B;
- if (HALMAC_RX_FIFO_SIZE_EX_1_BLK_8822B <=
- HALMAC_RX_FIFO_SIZE_EX_1_BLK_MAX_8822B)
- halmac_adapter->hw_config_info.rx_fifo_size =
- HALMAC_RX_FIFO_SIZE_EX_1_BLK_8822B;
- else
- halmac_adapter->hw_config_info.rx_fifo_size =
- HALMAC_RX_FIFO_SIZE_EX_1_BLK_MAX_8822B;
- } else {
- halmac_adapter->txff_allocation.tx_fifo_pg_num =
- HALMAC_TX_FIFO_SIZE_8822B >>
- HALMAC_TX_PAGE_SIZE_2_POWER_8822B;
- pr_err("[ERR]rx_fifo_expanding_mode = %d not support\n",
- halmac_adapter->txff_allocation
- .rx_fifo_expanding_mode);
- }
- } else {
- halmac_adapter->txff_allocation.tx_fifo_pg_num =
- HALMAC_TX_FIFO_SIZE_LA_8822B >>
- HALMAC_TX_PAGE_SIZE_2_POWER_8822B;
- }
- halmac_adapter->txff_allocation.rsvd_pg_num =
- (halmac_adapter->txff_allocation.rsvd_drv_pg_num +
- HALMAC_RSVD_H2C_EXTRAINFO_PGNUM_8822B +
- HALMAC_RSVD_H2C_QUEUE_PGNUM_8822B +
- HALMAC_RSVD_CPU_INSTRUCTION_PGNUM_8822B +
- HALMAC_RSVD_FW_TXBUFF_PGNUM_8822B);
- if (halmac_adapter->txff_allocation.rsvd_pg_num >
- halmac_adapter->txff_allocation.tx_fifo_pg_num)
- return HALMAC_RET_CFG_TXFIFO_PAGE_FAIL;
-
- halmac_adapter->txff_allocation.ac_q_pg_num =
- halmac_adapter->txff_allocation.tx_fifo_pg_num -
- halmac_adapter->txff_allocation.rsvd_pg_num;
- halmac_adapter->txff_allocation.rsvd_pg_bndy =
- halmac_adapter->txff_allocation.tx_fifo_pg_num -
- halmac_adapter->txff_allocation.rsvd_pg_num;
- halmac_adapter->txff_allocation.rsvd_fw_txbuff_pg_bndy =
- halmac_adapter->txff_allocation.tx_fifo_pg_num -
- HALMAC_RSVD_FW_TXBUFF_PGNUM_8822B;
- halmac_adapter->txff_allocation.rsvd_cpu_instr_pg_bndy =
- halmac_adapter->txff_allocation.rsvd_fw_txbuff_pg_bndy -
- HALMAC_RSVD_CPU_INSTRUCTION_PGNUM_8822B;
- halmac_adapter->txff_allocation.rsvd_h2c_queue_pg_bndy =
- halmac_adapter->txff_allocation.rsvd_cpu_instr_pg_bndy -
- HALMAC_RSVD_H2C_QUEUE_PGNUM_8822B;
- halmac_adapter->txff_allocation.rsvd_h2c_extra_info_pg_bndy =
- halmac_adapter->txff_allocation.rsvd_h2c_queue_pg_bndy -
- HALMAC_RSVD_H2C_EXTRAINFO_PGNUM_8822B;
- halmac_adapter->txff_allocation.rsvd_drv_pg_bndy =
- halmac_adapter->txff_allocation.rsvd_h2c_extra_info_pg_bndy -
- halmac_adapter->txff_allocation.rsvd_drv_pg_num;
-
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- curr_pg_num = HALMAC_PG_NUM_SDIO_8822B;
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_PCIE) {
- curr_pg_num = HALMAC_PG_NUM_PCIE_8822B;
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_USB) {
- if (halmac_adapter->halmac_bulkout_num == 2) {
- curr_pg_num = HALMAC_PG_NUM_2BULKOUT_8822B;
- } else if (halmac_adapter->halmac_bulkout_num == 3) {
- curr_pg_num = HALMAC_PG_NUM_3BULKOUT_8822B;
- } else if (halmac_adapter->halmac_bulkout_num == 4) {
- curr_pg_num = HALMAC_PG_NUM_4BULKOUT_8822B;
- } else {
- pr_err("[ERR]interface not support\n");
- return HALMAC_RET_NOT_SUPPORT;
- }
- } else {
- return HALMAC_RET_NOT_SUPPORT;
- }
-
- status = halmac_pg_num_parser_88xx(halmac_adapter, halmac_trx_mode,
- curr_pg_num);
- if (status != HALMAC_RET_SUCCESS)
- return status;
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_INFO_1,
- halmac_adapter->txff_allocation.high_queue_pg_num);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_INFO_2,
- halmac_adapter->txff_allocation.low_queue_pg_num);
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_FIFOPAGE_INFO_3,
- halmac_adapter->txff_allocation.normal_queue_pg_num);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_INFO_4,
- halmac_adapter->txff_allocation.extra_queue_pg_num);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_INFO_5,
- halmac_adapter->txff_allocation.pub_queue_pg_num);
-
- halmac_adapter->sdio_free_space.high_queue_number =
- halmac_adapter->txff_allocation.high_queue_pg_num;
- halmac_adapter->sdio_free_space.normal_queue_number =
- halmac_adapter->txff_allocation.normal_queue_pg_num;
- halmac_adapter->sdio_free_space.low_queue_number =
- halmac_adapter->txff_allocation.low_queue_pg_num;
- halmac_adapter->sdio_free_space.public_queue_number =
- halmac_adapter->txff_allocation.pub_queue_pg_num;
- halmac_adapter->sdio_free_space.extra_queue_number =
- halmac_adapter->txff_allocation.extra_queue_pg_num;
-
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_RQPN_CTRL_2,
- HALMAC_REG_READ_32(halmac_adapter, REG_RQPN_CTRL_2) | BIT(31));
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
- HALMAC_REG_WRITE_16(halmac_adapter, REG_BCNQ_BDNY_V1,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCNQ_PGBNDY_V1));
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2 + 2,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
- HALMAC_REG_WRITE_16(halmac_adapter, REG_BCNQ1_BDNY_V1,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCNQ_PGBNDY_V1));
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_RXFF_BNDY,
- halmac_adapter->hw_config_info.rx_fifo_size -
- HALMAC_C2H_PKT_BUF_8822B - 1);
-
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_USB) {
- value8 = (u8)(
- HALMAC_REG_READ_8(halmac_adapter, REG_AUTO_LLT_V1) &
- ~(BIT_MASK_BLK_DESC_NUM << BIT_SHIFT_BLK_DESC_NUM));
- value8 = (u8)(value8 | (HALMAC_BLK_DESC_NUM_8822B
- << BIT_SHIFT_BLK_DESC_NUM));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_AUTO_LLT_V1, value8);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_AUTO_LLT_V1 + 3,
- HALMAC_BLK_DESC_NUM_8822B);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TXDMA_OFFSET_CHK + 1,
- HALMAC_REG_READ_8(halmac_adapter,
- REG_TXDMA_OFFSET_CHK + 1) |
- BIT(1));
- }
-
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_AUTO_LLT_V1,
- (u8)(HALMAC_REG_READ_8(halmac_adapter, REG_AUTO_LLT_V1) |
- BIT_AUTO_INIT_LLT_V1));
- counter = 1000;
- while (HALMAC_REG_READ_8(halmac_adapter, REG_AUTO_LLT_V1) &
- BIT_AUTO_INIT_LLT_V1) {
- counter--;
- if (counter == 0)
- return HALMAC_RET_INIT_LLT_FAIL;
- }
-
- if (halmac_trx_mode == HALMAC_TRX_MODE_DELAY_LOOPBACK) {
- transfer_mode = HALMAC_TRNSFER_LOOPBACK_DELAY;
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_WMAC_LBK_BUF_HD_V1,
- (u16)halmac_adapter->txff_allocation.rsvd_pg_bndy);
- } else if (halmac_trx_mode == HALMAC_TRX_MODE_LOOPBACK) {
- transfer_mode = HALMAC_TRNSFER_LOOPBACK_DIRECT;
- } else {
- transfer_mode = HALMAC_TRNSFER_NORMAL;
- }
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CR + 3, (u8)transfer_mode);
-
- return HALMAC_RET_SUCCESS;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h
deleted file mode 100644
index 8488fc5f98ee..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_FUNC_8822B_H_
-#define _HALMAC_FUNC_8822B_H_
-
-#include "../../halmac_type.h"
-
-enum halmac_ret_status
-halmac_txdma_queue_mapping_8822b(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode);
-
-enum halmac_ret_status
-halmac_priority_queue_config_8822b(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode);
-
-#endif /* _HALMAC_FUNC_8822B_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h
deleted file mode 100644
index ec9b10277450..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_88XX_CFG_H_
-#define _HALMAC_88XX_CFG_H_
-
-#include "../halmac_2_platform.h"
-#include "../halmac_type.h"
-#include "../halmac_api.h"
-#include "../halmac_bit2.h"
-#include "../halmac_reg2.h"
-#include "../halmac_pwr_seq_cmd.h"
-#include "halmac_func_88xx.h"
-#include "halmac_api_88xx.h"
-#include "halmac_api_88xx_usb.h"
-#include "halmac_api_88xx_pcie.h"
-#include "halmac_api_88xx_sdio.h"
-
-#define HALMAC_SVN_VER_88XX "13359M"
-
-#define HALMAC_MAJOR_VER_88XX 0x0001 /* major version, ver_1 for async_api */
-/* For halmac_api num change or prototype change, increment prototype version.
- * Otherwise, increase minor version
- */
-#define HALMAC_PROTOTYPE_VER_88XX 0x0003 /* prototype version */
-#define HALMAC_MINOR_VER_88XX 0x0005 /* minor version */
-#define HALMAC_PATCH_VER_88XX 0x0000 /* patch version */
-
-#define HALMAC_C2H_DATA_OFFSET_88XX 10
-#define HALMAC_RX_AGG_ALIGNMENT_SIZE_88XX 8
-#define HALMAC_TX_AGG_ALIGNMENT_SIZE_88XX 8
-#define HALMAC_TX_AGG_BUFF_SIZE_88XX 32768
-
-#define HALMAC_EXTRA_INFO_BUFF_SIZE_88XX 4096 /*4K*/
-#define HALMAC_EXTRA_INFO_BUFF_SIZE_FULL_FIFO_88XX 16384 /*16K*/
-#define HALMAC_FW_OFFLOAD_CMD_SIZE_88XX \
- 12 /*Fw config parameter cmd size, each 12 byte*/
-
-#define HALMAC_H2C_CMD_ORIGINAL_SIZE_88XX 8
-#define HALMAC_H2C_CMD_SIZE_UNIT_88XX 32 /* Only support 32 byte packet now */
-
-#define HALMAC_NLO_INFO_SIZE_88XX 1024
-
-/* Download FW */
-#define HALMAC_FW_SIZE_MAX_88XX 0x40000
-#define HALMAC_FWHDR_SIZE_88XX 64
-#define HALMAC_FW_CHKSUM_DUMMY_SIZE_88XX 8
-#define HALMAC_FW_MAX_DL_SIZE_88XX 0x2000 /* need power of 2 */
-/* Max dlfw size can not over 31K, because SDIO HW restriction */
-#define HALMAC_FW_CFG_MAX_DL_SIZE_MAX_88XX 0x7C00
-
-#define DLFW_RESTORE_REG_NUM_88XX 9
-#define ID_INFORM_DLEMEM_RDY 0x80
-
-/* FW header information */
-#define HALMAC_FWHDR_OFFSET_VERSION_88XX 4
-#define HALMAC_FWHDR_OFFSET_SUBVERSION_88XX 6
-#define HALMAC_FWHDR_OFFSET_SUBINDEX_88XX 7
-#define HALMAC_FWHDR_OFFSET_MEM_USAGE_88XX 24
-#define HALMAC_FWHDR_OFFSET_H2C_FORMAT_VER_88XX 28
-#define HALMAC_FWHDR_OFFSET_DMEM_ADDR_88XX 32
-#define HALMAC_FWHDR_OFFSET_DMEM_SIZE_88XX 36
-#define HALMAC_FWHDR_OFFSET_IRAM_SIZE_88XX 48
-#define HALMAC_FWHDR_OFFSET_ERAM_SIZE_88XX 52
-#define HALMAC_FWHDR_OFFSET_EMEM_ADDR_88XX 56
-#define HALMAC_FWHDR_OFFSET_IRAM_ADDR_88XX 60
-
-/* HW memory address */
-#define HALMAC_OCPBASE_TXBUF_88XX 0x18780000
-#define HALMAC_OCPBASE_DMEM_88XX 0x00200000
-#define HALMAC_OCPBASE_IMEM_88XX 0x00000000
-
-/* define the SDIO Bus CLK threshold, for avoiding CMD53 fails that
- * result from SDIO CLK sync to ana_clk fail
- */
-#define HALMAC_SD_CLK_THRESHOLD_88XX 150000000 /* 150MHz */
-
-/* MAC clock */
-#define HALMAC_MAC_CLOCK_88XX 80 /* 80M */
-
-/* H2C/C2H*/
-#define HALMAC_H2C_CMD_SIZE_88XX 32
-#define HALMAC_H2C_CMD_HDR_SIZE_88XX 8
-
-#define HALMAC_PROTECTED_EFUSE_SIZE_88XX 0x60
-
-/* Function enable */
-#define HALMAC_FUNCTION_ENABLE_88XX 0xDC
-
-/* FIFO size & packet size */
-/* #define HALMAC_WOWLAN_PATTERN_SIZE 256 */
-
-/* CFEND rate */
-#define HALMAC_BASIC_CFEND_RATE_88XX 0x5
-#define HALMAC_STBC_CFEND_RATE_88XX 0xF
-
-/* Response rate */
-#define HALMAC_RESPONSE_RATE_BITMAP_ALL_88XX 0xFFFFF
-#define HALMAC_RESPONSE_RATE_88XX HALMAC_RESPONSE_RATE_BITMAP_ALL_88XX
-
-/* Spec SIFS */
-#define HALMAC_SIFS_CCK_PTCL_88XX 16
-#define HALMAC_SIFS_OFDM_PTCL_88XX 16
-
-/* Retry limit */
-#define HALMAC_LONG_RETRY_LIMIT_88XX 8
-#define HALMAC_SHORT_RETRY_LIMIT_88XX 7
-
-/* Slot, SIFS, PIFS time */
-#define HALMAC_SLOT_TIME_88XX 0x05
-#define HALMAC_PIFS_TIME_88XX 0x19
-#define HALMAC_SIFS_CCK_CTX_88XX 0xA
-#define HALMAC_SIFS_OFDM_CTX_88XX 0xA
-#define HALMAC_SIFS_CCK_TRX_88XX 0x10
-#define HALMAC_SIFS_OFDM_TRX_88XX 0x10
-
-/* TXOP limit */
-#define HALMAC_VO_TXOP_LIMIT_88XX 0x186
-#define HALMAC_VI_TXOP_LIMIT_88XX 0x3BC
-
-/* NAV */
-#define HALMAC_RDG_NAV_88XX 0x05
-#define HALMAC_TXOP_NAV_88XX 0x1B
-
-/* TSF */
-#define HALMAC_CCK_RX_TSF_88XX 0x30
-#define HALMAC_OFDM_RX_TSF_88XX 0x30
-
-/* Send beacon related */
-#define HALMAC_TBTT_PROHIBIT_88XX 0x04
-#define HALMAC_TBTT_HOLD_TIME_88XX 0x064
-#define HALMAC_DRIVER_EARLY_INT_88XX 0x04
-#define HALMAC_BEACON_DMA_TIM_88XX 0x02
-
-/* RX filter */
-#define HALMAC_RX_FILTER0_RECIVE_ALL_88XX 0xFFFFFFF
-#define HALMAC_RX_FILTER0_88XX HALMAC_RX_FILTER0_RECIVE_ALL_88XX
-#define HALMAC_RX_FILTER_RECIVE_ALL_88XX 0xFFFF
-#define HALMAC_RX_FILTER_88XX HALMAC_RX_FILTER_RECIVE_ALL_88XX
-
-/* RCR */
-#define HALMAC_RCR_CONFIG_88XX 0xE400631E
-
-/* Security config */
-#define HALMAC_SECURITY_CONFIG_88XX 0x01CC
-
-/* CCK rate ACK timeout */
-#define HALMAC_ACK_TO_CCK_88XX 0x40
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
deleted file mode 100644
index acd7930e417d..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
+++ /dev/null
@@ -1,5970 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halmac_88xx_cfg.h"
-
-/**
- * halmac_init_adapter_para_88xx() - int halmac adapter
- * @halmac_adapter
- *
- * SD1 internal use
- *
- * Author : KaiYuan Chang/Ivan Lin
- * Return : void
- */
-void halmac_init_adapter_para_88xx(struct halmac_adapter *halmac_adapter)
-{
- halmac_adapter->api_record.array_wptr = 0;
- halmac_adapter->hal_adapter_backup = halmac_adapter;
- halmac_adapter->hal_efuse_map = (u8 *)NULL;
- halmac_adapter->hal_efuse_map_valid = false;
- halmac_adapter->efuse_end = 0;
- halmac_adapter->hal_mac_addr[0].address_l_h.address_low = 0;
- halmac_adapter->hal_mac_addr[0].address_l_h.address_high = 0;
- halmac_adapter->hal_mac_addr[1].address_l_h.address_low = 0;
- halmac_adapter->hal_mac_addr[1].address_l_h.address_high = 0;
- halmac_adapter->hal_bss_addr[0].address_l_h.address_low = 0;
- halmac_adapter->hal_bss_addr[0].address_l_h.address_high = 0;
- halmac_adapter->hal_bss_addr[1].address_l_h.address_low = 0;
- halmac_adapter->hal_bss_addr[1].address_l_h.address_high = 0;
-
- halmac_adapter->low_clk = false;
- halmac_adapter->max_download_size = HALMAC_FW_MAX_DL_SIZE_88XX;
-
- /* Init LPS Option */
- halmac_adapter->fwlps_option.mode = 0x01; /*0:Active 1:LPS 2:WMMPS*/
- halmac_adapter->fwlps_option.awake_interval = 1;
- halmac_adapter->fwlps_option.enter_32K = 1;
- halmac_adapter->fwlps_option.clk_request = 0;
- halmac_adapter->fwlps_option.rlbm = 0;
- halmac_adapter->fwlps_option.smart_ps = 0;
- halmac_adapter->fwlps_option.awake_interval = 1;
- halmac_adapter->fwlps_option.all_queue_uapsd = 0;
- halmac_adapter->fwlps_option.pwr_state = 0;
- halmac_adapter->fwlps_option.low_pwr_rx_beacon = 0;
- halmac_adapter->fwlps_option.ant_auto_switch = 0;
- halmac_adapter->fwlps_option.ps_allow_bt_high_priority = 0;
- halmac_adapter->fwlps_option.protect_bcn = 0;
- halmac_adapter->fwlps_option.silence_period = 0;
- halmac_adapter->fwlps_option.fast_bt_connect = 0;
- halmac_adapter->fwlps_option.two_antenna_en = 0;
- halmac_adapter->fwlps_option.adopt_user_setting = 1;
- halmac_adapter->fwlps_option.drv_bcn_early_shift = 0;
-
- halmac_adapter->config_para_info.cfg_para_buf = NULL;
- halmac_adapter->config_para_info.para_buf_w = NULL;
- halmac_adapter->config_para_info.para_num = 0;
- halmac_adapter->config_para_info.full_fifo_mode = false;
- halmac_adapter->config_para_info.para_buf_size = 0;
- halmac_adapter->config_para_info.avai_para_buf_size = 0;
- halmac_adapter->config_para_info.offset_accumulation = 0;
- halmac_adapter->config_para_info.value_accumulation = 0;
- halmac_adapter->config_para_info.datapack_segment = 0;
-
- halmac_adapter->ch_sw_info.ch_info_buf = NULL;
- halmac_adapter->ch_sw_info.ch_info_buf_w = NULL;
- halmac_adapter->ch_sw_info.extra_info_en = 0;
- halmac_adapter->ch_sw_info.buf_size = 0;
- halmac_adapter->ch_sw_info.avai_buf_size = 0;
- halmac_adapter->ch_sw_info.total_size = 0;
- halmac_adapter->ch_sw_info.ch_num = 0;
-
- halmac_adapter->drv_info_size = 0;
-
- memset(halmac_adapter->api_record.api_array, HALMAC_API_STUFF,
- sizeof(halmac_adapter->api_record.api_array));
-
- halmac_adapter->txff_allocation.tx_fifo_pg_num = 0;
- halmac_adapter->txff_allocation.ac_q_pg_num = 0;
- halmac_adapter->txff_allocation.rsvd_pg_bndy = 0;
- halmac_adapter->txff_allocation.rsvd_drv_pg_bndy = 0;
- halmac_adapter->txff_allocation.rsvd_h2c_extra_info_pg_bndy = 0;
- halmac_adapter->txff_allocation.rsvd_h2c_queue_pg_bndy = 0;
- halmac_adapter->txff_allocation.rsvd_cpu_instr_pg_bndy = 0;
- halmac_adapter->txff_allocation.rsvd_fw_txbuff_pg_bndy = 0;
- halmac_adapter->txff_allocation.pub_queue_pg_num = 0;
- halmac_adapter->txff_allocation.high_queue_pg_num = 0;
- halmac_adapter->txff_allocation.low_queue_pg_num = 0;
- halmac_adapter->txff_allocation.normal_queue_pg_num = 0;
- halmac_adapter->txff_allocation.extra_queue_pg_num = 0;
-
- halmac_adapter->txff_allocation.la_mode = HALMAC_LA_MODE_DISABLE;
- halmac_adapter->txff_allocation.rx_fifo_expanding_mode =
- HALMAC_RX_FIFO_EXPANDING_MODE_DISABLE;
-
- halmac_init_adapter_dynamic_para_88xx(halmac_adapter);
- halmac_init_state_machine_88xx(halmac_adapter);
-}
-
-/**
- * halmac_init_adapter_dynamic_para_88xx() - int halmac adapter
- * @halmac_adapter
- *
- * SD1 internal use
- *
- * Author : KaiYuan Chang/Ivan Lin
- * Return : void
- */
-void halmac_init_adapter_dynamic_para_88xx(
- struct halmac_adapter *halmac_adapter)
-{
- halmac_adapter->h2c_packet_seq = 0;
- halmac_adapter->h2c_buf_free_space = 0;
- halmac_adapter->gen_info_valid = false;
-}
-
-/**
- * halmac_init_state_machine_88xx() - init halmac software state machine
- * @halmac_adapter
- *
- * SD1 internal use.
- *
- * Author : KaiYuan Chang/Ivan Lin
- * Return : void
- */
-void halmac_init_state_machine_88xx(struct halmac_adapter *halmac_adapter)
-{
- struct halmac_state *state = &halmac_adapter->halmac_state;
-
- halmac_init_offload_feature_state_machine_88xx(halmac_adapter);
-
- state->api_state = HALMAC_API_STATE_INIT;
-
- state->dlfw_state = HALMAC_DLFW_NONE;
- state->mac_power = HALMAC_MAC_POWER_OFF;
- state->ps_state = HALMAC_PS_STATE_UNDEFINE;
-}
-
-/**
- * halmac_mount_api_88xx() - attach functions to function pointer
- * @halmac_adapter
- *
- * SD1 internal use
- *
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- */
-enum halmac_ret_status
-halmac_mount_api_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = halmac_adapter->driver_adapter;
- struct halmac_api *halmac_api = (struct halmac_api *)NULL;
-
- halmac_adapter->halmac_api =
- kzalloc(sizeof(struct halmac_api), GFP_KERNEL);
- if (!halmac_adapter->halmac_api)
- return HALMAC_RET_MALLOC_FAIL;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- HALMAC_SVN_VER_88XX "\n");
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "HALMAC_MAJOR_VER_88XX = %x\n", HALMAC_MAJOR_VER_88XX);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "HALMAC_PROTOTYPE_88XX = %x\n",
- HALMAC_PROTOTYPE_VER_88XX);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "HALMAC_MINOR_VER_88XX = %x\n", HALMAC_MINOR_VER_88XX);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "HALMAC_PATCH_VER_88XX = %x\n", HALMAC_PATCH_VER_88XX);
-
- /* Mount function pointer */
- halmac_api->halmac_download_firmware = halmac_download_firmware_88xx;
- halmac_api->halmac_free_download_firmware =
- halmac_free_download_firmware_88xx;
- halmac_api->halmac_get_fw_version = halmac_get_fw_version_88xx;
- halmac_api->halmac_cfg_mac_addr = halmac_cfg_mac_addr_88xx;
- halmac_api->halmac_cfg_bssid = halmac_cfg_bssid_88xx;
- halmac_api->halmac_cfg_multicast_addr = halmac_cfg_multicast_addr_88xx;
- halmac_api->halmac_pre_init_system_cfg =
- halmac_pre_init_system_cfg_88xx;
- halmac_api->halmac_init_system_cfg = halmac_init_system_cfg_88xx;
- halmac_api->halmac_init_edca_cfg = halmac_init_edca_cfg_88xx;
- halmac_api->halmac_cfg_operation_mode = halmac_cfg_operation_mode_88xx;
- halmac_api->halmac_cfg_ch_bw = halmac_cfg_ch_bw_88xx;
- halmac_api->halmac_cfg_bw = halmac_cfg_bw_88xx;
- halmac_api->halmac_init_wmac_cfg = halmac_init_wmac_cfg_88xx;
- halmac_api->halmac_init_mac_cfg = halmac_init_mac_cfg_88xx;
- halmac_api->halmac_init_sdio_cfg = halmac_init_sdio_cfg_88xx;
- halmac_api->halmac_init_usb_cfg = halmac_init_usb_cfg_88xx;
- halmac_api->halmac_init_pcie_cfg = halmac_init_pcie_cfg_88xx;
- halmac_api->halmac_deinit_sdio_cfg = halmac_deinit_sdio_cfg_88xx;
- halmac_api->halmac_deinit_usb_cfg = halmac_deinit_usb_cfg_88xx;
- halmac_api->halmac_deinit_pcie_cfg = halmac_deinit_pcie_cfg_88xx;
- halmac_api->halmac_dump_efuse_map = halmac_dump_efuse_map_88xx;
- halmac_api->halmac_dump_efuse_map_bt = halmac_dump_efuse_map_bt_88xx;
- halmac_api->halmac_write_efuse_bt = halmac_write_efuse_bt_88xx;
- halmac_api->halmac_dump_logical_efuse_map =
- halmac_dump_logical_efuse_map_88xx;
- halmac_api->halmac_pg_efuse_by_map = halmac_pg_efuse_by_map_88xx;
- halmac_api->halmac_get_efuse_size = halmac_get_efuse_size_88xx;
- halmac_api->halmac_get_efuse_available_size =
- halmac_get_efuse_available_size_88xx;
- halmac_api->halmac_get_c2h_info = halmac_get_c2h_info_88xx;
-
- halmac_api->halmac_get_logical_efuse_size =
- halmac_get_logical_efuse_size_88xx;
-
- halmac_api->halmac_write_logical_efuse =
- halmac_write_logical_efuse_88xx;
- halmac_api->halmac_read_logical_efuse = halmac_read_logical_efuse_88xx;
-
- halmac_api->halmac_cfg_fwlps_option = halmac_cfg_fwlps_option_88xx;
- halmac_api->halmac_cfg_fwips_option = halmac_cfg_fwips_option_88xx;
- halmac_api->halmac_enter_wowlan = halmac_enter_wowlan_88xx;
- halmac_api->halmac_leave_wowlan = halmac_leave_wowlan_88xx;
- halmac_api->halmac_enter_ps = halmac_enter_ps_88xx;
- halmac_api->halmac_leave_ps = halmac_leave_ps_88xx;
- halmac_api->halmac_h2c_lb = halmac_h2c_lb_88xx;
- halmac_api->halmac_debug = halmac_debug_88xx;
- halmac_api->halmac_cfg_parameter = halmac_cfg_parameter_88xx;
- halmac_api->halmac_update_datapack = halmac_update_datapack_88xx;
- halmac_api->halmac_run_datapack = halmac_run_datapack_88xx;
- halmac_api->halmac_cfg_drv_info = halmac_cfg_drv_info_88xx;
- halmac_api->halmac_send_bt_coex = halmac_send_bt_coex_88xx;
- halmac_api->halmac_verify_platform_api =
- halmac_verify_platform_api_88xx;
- halmac_api->halmac_update_packet = halmac_update_packet_88xx;
- halmac_api->halmac_bcn_ie_filter = halmac_bcn_ie_filter_88xx;
- halmac_api->halmac_cfg_txbf = halmac_cfg_txbf_88xx;
- halmac_api->halmac_cfg_mumimo = halmac_cfg_mumimo_88xx;
- halmac_api->halmac_cfg_sounding = halmac_cfg_sounding_88xx;
- halmac_api->halmac_del_sounding = halmac_del_sounding_88xx;
- halmac_api->halmac_su_bfer_entry_init = halmac_su_bfer_entry_init_88xx;
- halmac_api->halmac_su_bfee_entry_init = halmac_su_bfee_entry_init_88xx;
- halmac_api->halmac_mu_bfer_entry_init = halmac_mu_bfer_entry_init_88xx;
- halmac_api->halmac_mu_bfee_entry_init = halmac_mu_bfee_entry_init_88xx;
- halmac_api->halmac_su_bfer_entry_del = halmac_su_bfer_entry_del_88xx;
- halmac_api->halmac_su_bfee_entry_del = halmac_su_bfee_entry_del_88xx;
- halmac_api->halmac_mu_bfer_entry_del = halmac_mu_bfer_entry_del_88xx;
- halmac_api->halmac_mu_bfee_entry_del = halmac_mu_bfee_entry_del_88xx;
-
- halmac_api->halmac_add_ch_info = halmac_add_ch_info_88xx;
- halmac_api->halmac_add_extra_ch_info = halmac_add_extra_ch_info_88xx;
- halmac_api->halmac_ctrl_ch_switch = halmac_ctrl_ch_switch_88xx;
- halmac_api->halmac_p2pps = halmac_p2pps_88xx;
- halmac_api->halmac_clear_ch_info = halmac_clear_ch_info_88xx;
- halmac_api->halmac_send_general_info = halmac_send_general_info_88xx;
-
- halmac_api->halmac_start_iqk = halmac_start_iqk_88xx;
- halmac_api->halmac_ctrl_pwr_tracking = halmac_ctrl_pwr_tracking_88xx;
- halmac_api->halmac_psd = halmac_psd_88xx;
- halmac_api->halmac_cfg_la_mode = halmac_cfg_la_mode_88xx;
- halmac_api->halmac_cfg_rx_fifo_expanding_mode =
- halmac_cfg_rx_fifo_expanding_mode_88xx;
-
- halmac_api->halmac_config_security = halmac_config_security_88xx;
- halmac_api->halmac_get_used_cam_entry_num =
- halmac_get_used_cam_entry_num_88xx;
- halmac_api->halmac_read_cam_entry = halmac_read_cam_entry_88xx;
- halmac_api->halmac_write_cam = halmac_write_cam_88xx;
- halmac_api->halmac_clear_cam_entry = halmac_clear_cam_entry_88xx;
-
- halmac_api->halmac_get_hw_value = halmac_get_hw_value_88xx;
- halmac_api->halmac_set_hw_value = halmac_set_hw_value_88xx;
-
- halmac_api->halmac_cfg_drv_rsvd_pg_num =
- halmac_cfg_drv_rsvd_pg_num_88xx;
- halmac_api->halmac_get_chip_version = halmac_get_chip_version_88xx;
-
- halmac_api->halmac_query_status = halmac_query_status_88xx;
- halmac_api->halmac_reset_feature = halmac_reset_feature_88xx;
- halmac_api->halmac_check_fw_status = halmac_check_fw_status_88xx;
- halmac_api->halmac_dump_fw_dmem = halmac_dump_fw_dmem_88xx;
- halmac_api->halmac_cfg_max_dl_size = halmac_cfg_max_dl_size_88xx;
-
- halmac_api->halmac_dump_fifo = halmac_dump_fifo_88xx;
- halmac_api->halmac_get_fifo_size = halmac_get_fifo_size_88xx;
-
- halmac_api->halmac_chk_txdesc = halmac_chk_txdesc_88xx;
- halmac_api->halmac_dl_drv_rsvd_page = halmac_dl_drv_rsvd_page_88xx;
- halmac_api->halmac_cfg_csi_rate = halmac_cfg_csi_rate_88xx;
-
- halmac_api->halmac_sdio_cmd53_4byte = halmac_sdio_cmd53_4byte_88xx;
- halmac_api->halmac_txfifo_is_empty = halmac_txfifo_is_empty_88xx;
-
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- halmac_api->halmac_cfg_rx_aggregation =
- halmac_cfg_rx_aggregation_88xx_sdio;
- halmac_api->halmac_init_interface_cfg =
- halmac_init_sdio_cfg_88xx;
- halmac_api->halmac_deinit_interface_cfg =
- halmac_deinit_sdio_cfg_88xx;
- halmac_api->halmac_reg_read_8 = halmac_reg_read_8_sdio_88xx;
- halmac_api->halmac_reg_write_8 = halmac_reg_write_8_sdio_88xx;
- halmac_api->halmac_reg_read_16 = halmac_reg_read_16_sdio_88xx;
- halmac_api->halmac_reg_write_16 = halmac_reg_write_16_sdio_88xx;
- halmac_api->halmac_reg_read_32 = halmac_reg_read_32_sdio_88xx;
- halmac_api->halmac_reg_write_32 = halmac_reg_write_32_sdio_88xx;
- halmac_api->halmac_reg_read_indirect_32 =
- halmac_reg_read_indirect_32_sdio_88xx;
- halmac_api->halmac_reg_sdio_cmd53_read_n =
- halmac_reg_read_nbyte_sdio_88xx;
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_USB) {
- halmac_api->halmac_cfg_rx_aggregation =
- halmac_cfg_rx_aggregation_88xx_usb;
- halmac_api->halmac_init_interface_cfg =
- halmac_init_usb_cfg_88xx;
- halmac_api->halmac_deinit_interface_cfg =
- halmac_deinit_usb_cfg_88xx;
- halmac_api->halmac_reg_read_8 = halmac_reg_read_8_usb_88xx;
- halmac_api->halmac_reg_write_8 = halmac_reg_write_8_usb_88xx;
- halmac_api->halmac_reg_read_16 = halmac_reg_read_16_usb_88xx;
- halmac_api->halmac_reg_write_16 = halmac_reg_write_16_usb_88xx;
- halmac_api->halmac_reg_read_32 = halmac_reg_read_32_usb_88xx;
- halmac_api->halmac_reg_write_32 = halmac_reg_write_32_usb_88xx;
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_PCIE) {
- halmac_api->halmac_cfg_rx_aggregation =
- halmac_cfg_rx_aggregation_88xx_pcie;
- halmac_api->halmac_init_interface_cfg =
- halmac_init_pcie_cfg_88xx;
- halmac_api->halmac_deinit_interface_cfg =
- halmac_deinit_pcie_cfg_88xx;
- halmac_api->halmac_reg_read_8 = halmac_reg_read_8_pcie_88xx;
- halmac_api->halmac_reg_write_8 = halmac_reg_write_8_pcie_88xx;
- halmac_api->halmac_reg_read_16 = halmac_reg_read_16_pcie_88xx;
- halmac_api->halmac_reg_write_16 = halmac_reg_write_16_pcie_88xx;
- halmac_api->halmac_reg_read_32 = halmac_reg_read_32_pcie_88xx;
- halmac_api->halmac_reg_write_32 = halmac_reg_write_32_pcie_88xx;
- } else {
- pr_err("Set halmac io function Error!!\n");
- }
-
- halmac_api->halmac_set_bulkout_num = halmac_set_bulkout_num_88xx;
- halmac_api->halmac_get_sdio_tx_addr = halmac_get_sdio_tx_addr_88xx;
- halmac_api->halmac_get_usb_bulkout_id = halmac_get_usb_bulkout_id_88xx;
- halmac_api->halmac_timer_2s = halmac_timer_2s_88xx;
- halmac_api->halmac_fill_txdesc_checksum =
- halmac_fill_txdesc_check_sum_88xx;
-
- if (halmac_adapter->chip_id == HALMAC_CHIP_ID_8822B) {
- /*mount 8822b function and data*/
- halmac_mount_api_8822b(halmac_adapter);
-
- } else if (halmac_adapter->chip_id == HALMAC_CHIP_ID_8821C) {
- } else if (halmac_adapter->chip_id == HALMAC_CHIP_ID_8814B) {
- } else if (halmac_adapter->chip_id == HALMAC_CHIP_ID_8197F) {
- } else {
- pr_err("Chip ID undefine!!\n");
- return HALMAC_RET_CHIP_NOT_SUPPORT;
- }
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_download_firmware_88xx() - download Firmware
- * @halmac_adapter : the adapter of halmac
- * @hamacl_fw : firmware bin
- * @halmac_fw_size : firmware size
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_download_firmware_88xx(struct halmac_adapter *halmac_adapter,
- u8 *hamacl_fw, u32 halmac_fw_size)
-{
- u8 value8;
- u8 *file_ptr;
- u32 dest;
- u16 value16;
- u32 restore_index = 0;
- u32 halmac_h2c_ver = 0, fw_h2c_ver = 0;
- u32 iram_pkt_size, dmem_pkt_size, eram_pkt_size = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- struct halmac_restore_info restore_info[DLFW_RESTORE_REG_NUM_88XX];
- u32 temp;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DOWNLOAD_FIRMWARE);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s start!!\n", __func__);
-
- if (halmac_fw_size > HALMAC_FW_SIZE_MAX_88XX ||
- halmac_fw_size < HALMAC_FWHDR_SIZE_88XX) {
- pr_err("FW size error!\n");
- return HALMAC_RET_FW_SIZE_ERR;
- }
-
- fw_h2c_ver = le32_to_cpu(
- *((__le32 *)
- (hamacl_fw + HALMAC_FWHDR_OFFSET_H2C_FORMAT_VER_88XX)));
- halmac_h2c_ver = H2C_FORMAT_VERSION;
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac h2c/c2h format = %x, fw h2c/c2h format = %x!!\n",
- halmac_h2c_ver, fw_h2c_ver);
- if (fw_h2c_ver != halmac_h2c_ver)
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_WARNING,
- "[WARN]H2C/C2H version between HALMAC and FW is compatible!!\n");
-
- halmac_adapter->halmac_state.dlfw_state = HALMAC_DLFW_NONE;
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_SYS_FUNC_EN + 1);
- value8 = (u8)(value8 & ~(BIT(2)));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SYS_FUNC_EN + 1,
- value8); /* Disable CPU reset */
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_RSV_CTRL + 1);
- value8 = (u8)(value8 & ~(BIT(0)));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RSV_CTRL + 1, value8);
-
- restore_info[restore_index].length = 1;
- restore_info[restore_index].mac_register = REG_TXDMA_PQ_MAP + 1;
- restore_info[restore_index].value =
- HALMAC_REG_READ_8(halmac_adapter, REG_TXDMA_PQ_MAP + 1);
- restore_index++;
- value8 = HALMAC_DMA_MAPPING_HIGH << 6;
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TXDMA_PQ_MAP + 1,
- value8); /* set HIQ to hi priority */
-
- /* DLFW only use HIQ, map HIQ to hi priority */
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_HI] =
- HALMAC_DMA_MAPPING_HIGH;
- restore_info[restore_index].length = 1;
- restore_info[restore_index].mac_register = REG_CR;
- restore_info[restore_index].value =
- HALMAC_REG_READ_8(halmac_adapter, REG_CR);
- restore_index++;
- restore_info[restore_index].length = 4;
- restore_info[restore_index].mac_register = REG_H2CQ_CSR;
- restore_info[restore_index].value = BIT(31);
- restore_index++;
- value8 = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CR, value8);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_H2CQ_CSR, BIT(31));
-
- /* Config hi priority queue and public priority queue page number
- * (only for DLFW)
- */
- restore_info[restore_index].length = 2;
- restore_info[restore_index].mac_register = REG_FIFOPAGE_INFO_1;
- restore_info[restore_index].value =
- HALMAC_REG_READ_16(halmac_adapter, REG_FIFOPAGE_INFO_1);
- restore_index++;
- restore_info[restore_index].length = 4;
- restore_info[restore_index].mac_register = REG_RQPN_CTRL_2;
- restore_info[restore_index].value =
- HALMAC_REG_READ_32(halmac_adapter, REG_RQPN_CTRL_2) | BIT(31);
- restore_index++;
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_INFO_1, 0x200);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_RQPN_CTRL_2,
- restore_info[restore_index - 1].value);
-
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- HALMAC_REG_READ_32(halmac_adapter, REG_SDIO_FREE_TXPG);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_SDIO_TX_CTRL,
- 0x00000000);
- }
-
- halmac_adapter->fw_version.version = le16_to_cpu(
- *((__le16 *)(hamacl_fw + HALMAC_FWHDR_OFFSET_VERSION_88XX)));
- halmac_adapter->fw_version.sub_version =
- *(hamacl_fw + HALMAC_FWHDR_OFFSET_SUBVERSION_88XX);
- halmac_adapter->fw_version.sub_index =
- *(hamacl_fw + HALMAC_FWHDR_OFFSET_SUBINDEX_88XX);
- halmac_adapter->fw_version.h2c_version = (u16)fw_h2c_ver;
-
- dmem_pkt_size = le32_to_cpu(*((__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_DMEM_SIZE_88XX)));
- iram_pkt_size = le32_to_cpu(*((__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_IRAM_SIZE_88XX)));
- if (((*(hamacl_fw + HALMAC_FWHDR_OFFSET_MEM_USAGE_88XX)) & BIT(4)) != 0)
- eram_pkt_size =
- le32_to_cpu(*((__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_ERAM_SIZE_88XX)));
-
- dmem_pkt_size += HALMAC_FW_CHKSUM_DUMMY_SIZE_88XX;
- iram_pkt_size += HALMAC_FW_CHKSUM_DUMMY_SIZE_88XX;
- if (eram_pkt_size != 0)
- eram_pkt_size += HALMAC_FW_CHKSUM_DUMMY_SIZE_88XX;
-
- if (halmac_fw_size != (HALMAC_FWHDR_SIZE_88XX + dmem_pkt_size +
- iram_pkt_size + eram_pkt_size)) {
- pr_err("FW size mismatch the real fw size!\n");
- goto DLFW_FAIL;
- }
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_CR + 1);
- restore_info[restore_index].length = 1;
- restore_info[restore_index].mac_register = REG_CR + 1;
- restore_info[restore_index].value = value8;
- restore_index++;
- value8 = (u8)(value8 | BIT(0));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CR + 1,
- value8); /* Enable SW TX beacon */
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_BCN_CTRL);
- restore_info[restore_index].length = 1;
- restore_info[restore_index].mac_register = REG_BCN_CTRL;
- restore_info[restore_index].value = value8;
- restore_index++;
- value8 = (u8)((value8 & (~BIT(3))) | BIT(4));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_BCN_CTRL,
- value8); /* Disable beacon related functions */
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_FWHW_TXQ_CTRL + 2);
- restore_info[restore_index].length = 1;
- restore_info[restore_index].mac_register = REG_FWHW_TXQ_CTRL + 2;
- restore_info[restore_index].value = value8;
- restore_index++;
- value8 = (u8)(value8 & ~(BIT(6)));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_FWHW_TXQ_CTRL + 2,
- value8); /* Disable ptcl tx bcnq */
-
- restore_info[restore_index].length = 2;
- restore_info[restore_index].mac_register = REG_FIFOPAGE_CTRL_2;
- restore_info[restore_index].value =
- HALMAC_REG_READ_16(halmac_adapter, REG_FIFOPAGE_CTRL_2) |
- BIT(15);
- restore_index++;
- value16 = 0x8000;
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- value16); /* Set beacon header to 0 */
-
- value16 = (u16)(HALMAC_REG_READ_16(halmac_adapter, REG_MCUFW_CTRL) &
- 0x3800);
- value16 |= BIT(0);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MCUFW_CTRL,
- value16); /* MCU/FW setting */
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_CPU_DMEM_CON + 2);
- value8 &= ~(BIT(0));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CPU_DMEM_CON + 2, value8);
- value8 |= BIT(0);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CPU_DMEM_CON + 2, value8);
-
- /* Download to DMEM */
- file_ptr = hamacl_fw + HALMAC_FWHDR_SIZE_88XX;
- temp = le32_to_cpu(*((__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_DMEM_ADDR_88XX))) &
- ~(BIT(31));
- if (halmac_dlfw_to_mem_88xx(halmac_adapter, file_ptr, temp,
- dmem_pkt_size) != HALMAC_RET_SUCCESS)
- goto DLFW_END;
-
- /* Download to IMEM */
- file_ptr = hamacl_fw + HALMAC_FWHDR_SIZE_88XX + dmem_pkt_size;
- temp = le32_to_cpu(*((__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_IRAM_ADDR_88XX))) &
- ~(BIT(31));
- if (halmac_dlfw_to_mem_88xx(halmac_adapter, file_ptr, temp,
- iram_pkt_size) != HALMAC_RET_SUCCESS)
- goto DLFW_END;
-
- /* Download to EMEM */
- if (eram_pkt_size != 0) {
- file_ptr = hamacl_fw + HALMAC_FWHDR_SIZE_88XX + dmem_pkt_size +
- iram_pkt_size;
- dest = le32_to_cpu((*((__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_EMEM_ADDR_88XX)))) &
- ~(BIT(31));
- if (halmac_dlfw_to_mem_88xx(halmac_adapter, file_ptr, dest,
- eram_pkt_size) !=
- HALMAC_RET_SUCCESS)
- goto DLFW_END;
- }
-
- halmac_init_offload_feature_state_machine_88xx(halmac_adapter);
-DLFW_END:
-
- halmac_restore_mac_register_88xx(halmac_adapter, restore_info,
- DLFW_RESTORE_REG_NUM_88XX);
-
- if (halmac_dlfw_end_flow_88xx(halmac_adapter) != HALMAC_RET_SUCCESS)
- goto DLFW_FAIL;
-
- halmac_adapter->halmac_state.dlfw_state = HALMAC_DLFW_DONE;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-
-DLFW_FAIL:
-
- /* Disable FWDL_EN */
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_MCUFW_CTRL,
- (u8)(HALMAC_REG_READ_8(halmac_adapter, REG_MCUFW_CTRL) &
- ~(BIT(0))));
-
- return HALMAC_RET_DLFW_FAIL;
-}
-
-/**
- * halmac_free_download_firmware_88xx() - download specific memory firmware
- * @halmac_adapter
- * @dlfw_mem : memory selection
- * @hamacl_fw : firmware bin
- * @halmac_fw_size : firmware size
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- */
-enum halmac_ret_status
-halmac_free_download_firmware_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_dlfw_mem dlfw_mem, u8 *hamacl_fw,
- u32 halmac_fw_size)
-{
- u8 tx_pause_backup;
- u8 *file_ptr;
- u32 dest;
- u16 bcn_head_backup;
- u32 iram_pkt_size, dmem_pkt_size, eram_pkt_size = 0;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_DLFW_FAIL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]%s ==========>\n", __func__);
-
- if (halmac_fw_size > HALMAC_FW_SIZE_MAX_88XX ||
- halmac_fw_size < HALMAC_FWHDR_SIZE_88XX) {
- pr_err("[ERR]FW size error!\n");
- return HALMAC_RET_FW_SIZE_ERR;
- }
-
- dmem_pkt_size =
- le32_to_cpu(*(__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_DMEM_SIZE_88XX));
- iram_pkt_size =
- le32_to_cpu(*(__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_IRAM_SIZE_88XX));
- if (((*(hamacl_fw + HALMAC_FWHDR_OFFSET_MEM_USAGE_88XX)) & BIT(4)) != 0)
- eram_pkt_size =
- le32_to_cpu(*(__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_ERAM_SIZE_88XX));
-
- dmem_pkt_size += HALMAC_FW_CHKSUM_DUMMY_SIZE_88XX;
- iram_pkt_size += HALMAC_FW_CHKSUM_DUMMY_SIZE_88XX;
- if (eram_pkt_size != 0)
- eram_pkt_size += HALMAC_FW_CHKSUM_DUMMY_SIZE_88XX;
-
- if (halmac_fw_size != (HALMAC_FWHDR_SIZE_88XX + dmem_pkt_size +
- iram_pkt_size + eram_pkt_size)) {
- pr_err("[ERR]FW size mismatch the real fw size!\n");
- return HALMAC_RET_DLFW_FAIL;
- }
-
- tx_pause_backup = HALMAC_REG_READ_8(halmac_adapter, REG_TXPAUSE);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TXPAUSE,
- tx_pause_backup | BIT(7));
-
- bcn_head_backup =
- HALMAC_REG_READ_16(halmac_adapter, REG_FIFOPAGE_CTRL_2) |
- BIT(15);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2, 0x8000);
-
- if (eram_pkt_size != 0) {
- file_ptr = hamacl_fw + HALMAC_FWHDR_SIZE_88XX + dmem_pkt_size +
- iram_pkt_size;
- dest = le32_to_cpu(*((__le32 *)(hamacl_fw +
- HALMAC_FWHDR_OFFSET_EMEM_ADDR_88XX))) &
- ~(BIT(31));
- status = halmac_dlfw_to_mem_88xx(halmac_adapter, file_ptr, dest,
- eram_pkt_size);
- if (status != HALMAC_RET_SUCCESS)
- goto DL_FREE_FW_END;
- }
-
- status = halmac_free_dl_fw_end_flow_88xx(halmac_adapter);
-
-DL_FREE_FW_END:
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TXPAUSE, tx_pause_backup);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- bcn_head_backup);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]%s <==========\n", __func__);
-
- return status;
-}
-
-/**
- * halmac_get_fw_version_88xx() - get FW version
- * @halmac_adapter : the adapter of halmac
- * @fw_version : fw version info
- * Author : Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_get_fw_version_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_fw_version *fw_version)
-{
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_adapter->halmac_state.dlfw_state == 0)
- return HALMAC_RET_DLFW_FAIL;
-
- fw_version->version = halmac_adapter->fw_version.version;
- fw_version->sub_version = halmac_adapter->fw_version.sub_version;
- fw_version->sub_index = halmac_adapter->fw_version.sub_index;
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_mac_addr_88xx() - config mac address
- * @halmac_adapter : the adapter of halmac
- * @halmac_port :0 for port0, 1 for port1, 2 for port2, 3 for port3, 4 for port4
- * @hal_address : mac address
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_mac_addr_88xx(struct halmac_adapter *halmac_adapter, u8 halmac_port,
- union halmac_wlan_addr *hal_address)
-{
- u16 mac_address_H;
- u32 mac_address_L;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]%s ==========>\n", __func__);
-
- if (halmac_port >= HALMAC_PORTIDMAX) {
- pr_err("[ERR]port index > 5\n");
- return HALMAC_RET_PORT_NOT_SUPPORT;
- }
-
- mac_address_L = le32_to_cpu(hal_address->address_l_h.le_address_low);
- mac_address_H = le16_to_cpu(hal_address->address_l_h.le_address_high);
-
- halmac_adapter->hal_mac_addr[halmac_port].address_l_h.address_low =
- mac_address_L;
- halmac_adapter->hal_mac_addr[halmac_port].address_l_h.address_high =
- mac_address_H;
-
- switch (halmac_port) {
- case HALMAC_PORTID0:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MACID, mac_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MACID + 4,
- mac_address_H);
- break;
-
- case HALMAC_PORTID1:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MACID1, mac_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MACID1 + 4,
- mac_address_H);
- break;
-
- case HALMAC_PORTID2:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MACID2, mac_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MACID2 + 4,
- mac_address_H);
- break;
-
- case HALMAC_PORTID3:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MACID3, mac_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MACID3 + 4,
- mac_address_H);
- break;
-
- case HALMAC_PORTID4:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MACID4, mac_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MACID4 + 4,
- mac_address_H);
- break;
-
- default:
-
- break;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_bssid_88xx() - config BSSID
- * @halmac_adapter : the adapter of halmac
- * @halmac_port :0 for port0, 1 for port1, 2 for port2, 3 for port3, 4 for port4
- * @hal_address : bssid
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_bssid_88xx(struct halmac_adapter *halmac_adapter, u8 halmac_port,
- union halmac_wlan_addr *hal_address)
-{
- u16 bssid_address_H;
- u32 bssid_address_L;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]%s ==========>\n", __func__);
-
- if (halmac_port >= HALMAC_PORTIDMAX) {
- pr_err("[ERR]port index > 5\n");
- return HALMAC_RET_PORT_NOT_SUPPORT;
- }
-
- bssid_address_L = le32_to_cpu(hal_address->address_l_h.le_address_low);
- bssid_address_H = le16_to_cpu(hal_address->address_l_h.le_address_high);
-
- halmac_adapter->hal_bss_addr[halmac_port].address_l_h.address_low =
- bssid_address_L;
- halmac_adapter->hal_bss_addr[halmac_port].address_l_h.address_high =
- bssid_address_H;
-
- switch (halmac_port) {
- case HALMAC_PORTID0:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_BSSID, bssid_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_BSSID + 4,
- bssid_address_H);
- break;
-
- case HALMAC_PORTID1:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_BSSID1,
- bssid_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_BSSID1 + 4,
- bssid_address_H);
- break;
-
- case HALMAC_PORTID2:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_BSSID2,
- bssid_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_BSSID2 + 4,
- bssid_address_H);
- break;
-
- case HALMAC_PORTID3:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_BSSID3,
- bssid_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_BSSID3 + 4,
- bssid_address_H);
- break;
-
- case HALMAC_PORTID4:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_BSSID4,
- bssid_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_BSSID4 + 4,
- bssid_address_H);
- break;
-
- default:
-
- break;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_multicast_addr_88xx() - config multicast address
- * @halmac_adapter : the adapter of halmac
- * @hal_address : multicast address
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_multicast_addr_88xx(struct halmac_adapter *halmac_adapter,
- union halmac_wlan_addr *hal_address)
-{
- u16 address_H;
- u32 address_L;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_CFG_MULTICAST_ADDR);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- address_L = le32_to_cpu(hal_address->address_l_h.le_address_low);
- address_H = le16_to_cpu(hal_address->address_l_h.le_address_high);
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MAR, address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MAR + 4, address_H);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_pre_init_system_cfg_88xx() - pre-init system config
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_pre_init_system_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- u32 value32, counter;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- bool enable_bb;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_PRE_INIT_SYSTEM_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_pre_init_system_cfg ==========>\n");
-
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_SDIO_HSUS_CTRL,
- HALMAC_REG_READ_8(halmac_adapter, REG_SDIO_HSUS_CTRL) &
- ~(BIT(0)));
- counter = 10000;
- while (!(HALMAC_REG_READ_8(halmac_adapter, REG_SDIO_HSUS_CTRL) &
- 0x02)) {
- counter--;
- if (counter == 0)
- return HALMAC_RET_SDIO_LEAVE_SUSPEND_FAIL;
- }
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_USB) {
- if (HALMAC_REG_READ_8(halmac_adapter, REG_SYS_CFG2 + 3) ==
- 0x20) /* usb3.0 */
- HALMAC_REG_WRITE_8(
- halmac_adapter, 0xFE5B,
- HALMAC_REG_READ_8(halmac_adapter, 0xFE5B) |
- BIT(4));
- }
-
- /* Config PIN Mux */
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_PAD_CTRL1);
- value32 = value32 & (~(BIT(28) | BIT(29)));
- value32 = value32 | BIT(28) | BIT(29);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_PAD_CTRL1, value32);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_LED_CFG);
- value32 = value32 & (~(BIT(25) | BIT(26)));
- HALMAC_REG_WRITE_32(halmac_adapter, REG_LED_CFG, value32);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_GPIO_MUXCFG);
- value32 = value32 & (~(BIT(2)));
- value32 = value32 | BIT(2);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_GPIO_MUXCFG, value32);
-
- enable_bb = false;
- halmac_set_hw_value_88xx(halmac_adapter, HALMAC_HW_EN_BB_RF,
- &enable_bb);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_pre_init_system_cfg <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_init_system_cfg_88xx() - init system config
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_system_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_INIT_SYSTEM_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_init_system_cfg ==========>\n");
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SYS_FUNC_EN + 1,
- HALMAC_FUNCTION_ENABLE_88XX);
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_SYS_SDIO_CTRL,
- (u32)(HALMAC_REG_READ_32(halmac_adapter, REG_SYS_SDIO_CTRL) |
- BIT_LTE_MUX_CTRL_PATH));
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_CPU_DMEM_CON,
- (u32)(HALMAC_REG_READ_32(halmac_adapter, REG_CPU_DMEM_CON) |
- BIT_WL_PLATFORM_RST));
-
- /* halmac_api->halmac_init_h2c(halmac_adapter); */
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_init_system_cfg <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_init_edca_cfg_88xx() - init EDCA config
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_edca_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- u8 value8;
- u32 value32;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_INIT_EDCA_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- /* Clear TX pause */
- HALMAC_REG_WRITE_16(halmac_adapter, REG_TXPAUSE, 0x0000);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SLOT, HALMAC_SLOT_TIME_88XX);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PIFS, HALMAC_PIFS_TIME_88XX);
- value32 = HALMAC_SIFS_CCK_CTX_88XX |
- (HALMAC_SIFS_OFDM_CTX_88XX << BIT_SHIFT_SIFS_OFDM_CTX) |
- (HALMAC_SIFS_CCK_TRX_88XX << BIT_SHIFT_SIFS_CCK_TRX) |
- (HALMAC_SIFS_OFDM_TRX_88XX << BIT_SHIFT_SIFS_OFDM_TRX);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_SIFS, value32);
-
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_EDCA_VO_PARAM,
- HALMAC_REG_READ_32(halmac_adapter, REG_EDCA_VO_PARAM) & 0xFFFF);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_EDCA_VO_PARAM + 2,
- HALMAC_VO_TXOP_LIMIT_88XX);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_EDCA_VI_PARAM + 2,
- HALMAC_VI_TXOP_LIMIT_88XX);
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_RD_NAV_NXT,
- HALMAC_RDG_NAV_88XX | (HALMAC_TXOP_NAV_88XX << 16));
- HALMAC_REG_WRITE_16(halmac_adapter, REG_RXTSF_OFFSET_CCK,
- HALMAC_CCK_RX_TSF_88XX |
- (HALMAC_OFDM_RX_TSF_88XX) << 8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_RD_CTRL + 1);
- value8 |=
- (BIT_VOQ_RD_INIT_EN | BIT_VIQ_RD_INIT_EN | BIT_BEQ_RD_INIT_EN);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RD_CTRL + 1, value8);
-
- /* Set beacon cotnrol - enable TSF and other related functions */
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_BCN_CTRL,
- (u8)(HALMAC_REG_READ_8(halmac_adapter, REG_BCN_CTRL) |
- BIT_EN_BCN_FUNCTION));
-
- /* Set send beacon related registers */
- HALMAC_REG_WRITE_32(halmac_adapter, REG_TBTT_PROHIBIT,
- HALMAC_TBTT_PROHIBIT_88XX |
- (HALMAC_TBTT_HOLD_TIME_88XX
- << BIT_SHIFT_TBTT_HOLD_TIME_AP));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_DRVERLYINT,
- HALMAC_DRIVER_EARLY_INT_88XX);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_BCNDMATIM,
- HALMAC_BEACON_DMA_TIM_88XX);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_init_wmac_cfg_88xx() - init wmac config
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_wmac_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_INIT_WMAC_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_RXFLTMAP0,
- HALMAC_RX_FILTER0_88XX);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_RXFLTMAP,
- HALMAC_RX_FILTER_88XX);
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_RCR, HALMAC_RCR_CONFIG_88XX);
-
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_TCR + 1,
- (u8)(HALMAC_REG_READ_8(halmac_adapter, REG_TCR + 1) | 0x30));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TCR + 2, 0x30);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TCR + 1, 0x00);
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_WMAC_OPTION_FUNCTION + 8,
- 0x30810041);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_WMAC_OPTION_FUNCTION + 4,
- 0x50802080);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_init_mac_cfg_88xx() - config page1~page7 register
- * @halmac_adapter : the adapter of halmac
- * @mode : trx mode
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_mac_cfg_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode mode)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_INIT_MAC_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>mode = %d\n", __func__,
- mode);
-
- status = halmac_api->halmac_init_trx_cfg(halmac_adapter, mode);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_init_trx_cfg error = %x\n", status);
- return status;
- }
- status = halmac_api->halmac_init_protocol_cfg(halmac_adapter);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_init_protocol_cfg_88xx error = %x\n", status);
- return status;
- }
-
- status = halmac_init_edca_cfg_88xx(halmac_adapter);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_init_edca_cfg_88xx error = %x\n", status);
- return status;
- }
-
- status = halmac_init_wmac_cfg_88xx(halmac_adapter);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_init_wmac_cfg_88xx error = %x\n", status);
- return status;
- }
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return status;
-}
-
-/**
- * halmac_cfg_operation_mode_88xx() - config operation mode
- * @halmac_adapter : the adapter of halmac
- * @wireless_mode : 802.11 standard(b/g/n/ac)
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_operation_mode_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_wireless_mode wireless_mode)
-{
- void *driver_adapter = NULL;
- enum halmac_wireless_mode wireless_mode_local =
- HALMAC_WIRELESS_MODE_UNDEFINE;
-
- wireless_mode_local = wireless_mode;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_CFG_OPERATION_MODE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>wireless_mode = %d\n", __func__,
- wireless_mode);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_ch_bw_88xx() - config channel & bandwidth
- * @halmac_adapter : the adapter of halmac
- * @channel : WLAN channel, support 2.4G & 5G
- * @pri_ch_idx : primary channel index, idx1, idx2, idx3, idx4
- * @bw : band width, 20, 40, 80, 160, 5 ,10
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_ch_bw_88xx(struct halmac_adapter *halmac_adapter, u8 channel,
- enum halmac_pri_ch_idx pri_ch_idx, enum halmac_bw bw)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_CH_BW);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>ch = %d, idx=%d, bw=%d\n", __func__,
- channel, pri_ch_idx, bw);
-
- halmac_cfg_pri_ch_idx_88xx(halmac_adapter, pri_ch_idx);
-
- halmac_cfg_bw_88xx(halmac_adapter, bw);
-
- halmac_cfg_ch_88xx(halmac_adapter, channel);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_cfg_ch_88xx(struct halmac_adapter *halmac_adapter,
- u8 channel)
-{
- u8 value8;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_CH_BW);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>ch = %d\n", __func__, channel);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_CCK_CHECK);
- value8 = value8 & (~(BIT(7)));
-
- if (channel > 35)
- value8 = value8 | BIT(7);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CCK_CHECK, value8);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_cfg_pri_ch_idx_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_pri_ch_idx pri_ch_idx)
-{
- u8 txsc_40 = 0, txsc_20 = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_CH_BW);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========> idx=%d\n", __func__,
- pri_ch_idx);
-
- txsc_20 = pri_ch_idx;
- if (txsc_20 == HALMAC_CH_IDX_1 || txsc_20 == HALMAC_CH_IDX_3)
- txsc_40 = 9;
- else
- txsc_40 = 10;
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_DATA_SC,
- BIT_TXSC_20M(txsc_20) | BIT_TXSC_40M(txsc_40));
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_bw_88xx() - config bandwidth
- * @halmac_adapter : the adapter of halmac
- * @bw : band width, 20, 40, 80, 160, 5 ,10
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_cfg_bw_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_bw bw)
-{
- u32 value32;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_BW);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>bw=%d\n", __func__, bw);
-
- /* RF mode */
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_WMAC_TRXPTCL_CTL);
- value32 = value32 & (~(BIT(7) | BIT(8)));
-
- switch (bw) {
- case HALMAC_BW_80:
- value32 = value32 | BIT(7);
- break;
- case HALMAC_BW_40:
- value32 = value32 | BIT(8);
- break;
- case HALMAC_BW_20:
- case HALMAC_BW_10:
- case HALMAC_BW_5:
- break;
- default:
- pr_err("%s switch case not support\n", __func__);
- break;
- }
- HALMAC_REG_WRITE_32(halmac_adapter, REG_WMAC_TRXPTCL_CTL, value32);
-
- /* MAC CLK */
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_AFE_CTRL1);
- value32 = (value32 & (~(BIT(20) | BIT(21)))) |
- (HALMAC_MAC_CLOCK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_AFE_CTRL1, value32);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_USTIME_TSF,
- HALMAC_MAC_CLOCK_88XX);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_USTIME_EDCA,
- HALMAC_MAC_CLOCK_88XX);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_dump_efuse_map_88xx() - dump "physical" efuse map
- * @halmac_adapter : the adapter of halmac
- * @cfg : dump efuse method
- * Author : Ivan Lin/KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_dump_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_read_cfg cfg)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.efuse_state_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DUMP_EFUSE_MAP);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>cfg=%d\n", __func__, cfg);
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait event(dump efuse)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- if (halmac_query_efuse_curr_state_88xx(halmac_adapter) !=
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Not idle state(dump efuse)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- if (halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_OFF)
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_WARNING,
- "[WARN]Dump efuse in suspend mode\n");
-
- *process_status = HALMAC_CMD_PROCESS_IDLE;
- halmac_adapter->event_trigger.physical_efuse_map = 1;
-
- status = halmac_func_switch_efuse_bank_88xx(halmac_adapter,
- HALMAC_EFUSE_BANK_WIFI);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_func_switch_efuse_bank error = %x\n", status);
- return status;
- }
-
- status = halmac_dump_efuse_88xx(halmac_adapter, cfg);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_read_efuse error = %x\n", status);
- return status;
- }
-
- if (halmac_adapter->hal_efuse_map_valid) {
- *process_status = HALMAC_CMD_PROCESS_DONE;
-
- PLATFORM_EVENT_INDICATION(
- driver_adapter, HALMAC_FEATURE_DUMP_PHYSICAL_EFUSE,
- *process_status, halmac_adapter->hal_efuse_map,
- halmac_adapter->hw_config_info.efuse_size);
- halmac_adapter->event_trigger.physical_efuse_map = 0;
- }
-
- if (halmac_transition_efuse_state_88xx(
- halmac_adapter, HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_dump_efuse_map_bt_88xx() - dump "BT physical" efuse map
- * @halmac_adapter : the adapter of halmac
- * @halmac_efuse_bank : bt efuse bank
- * @bt_efuse_map_size : bt efuse map size. get from halmac_get_efuse_size API
- * @bt_efuse_map : bt efuse map
- * Author : Soar / Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_dump_efuse_map_bt_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_bank halmac_efuse_bank,
- u32 bt_efuse_map_size, u8 *bt_efuse_map)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.efuse_state_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DUMP_EFUSE_MAP_BT);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (halmac_adapter->hw_config_info.bt_efuse_size != bt_efuse_map_size)
- return HALMAC_RET_EFUSE_SIZE_INCORRECT;
-
- if ((halmac_efuse_bank >= HALMAC_EFUSE_BANK_MAX) ||
- halmac_efuse_bank == HALMAC_EFUSE_BANK_WIFI) {
- pr_err("Undefined BT bank\n");
- return HALMAC_RET_EFUSE_BANK_INCORRECT;
- }
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait event(dump efuse)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- if (halmac_query_efuse_curr_state_88xx(halmac_adapter) !=
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Not idle state(dump efuse)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- status = halmac_func_switch_efuse_bank_88xx(halmac_adapter,
- halmac_efuse_bank);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_func_switch_efuse_bank error = %x\n", status);
- return status;
- }
-
- status = halmac_read_hw_efuse_88xx(halmac_adapter, 0, bt_efuse_map_size,
- bt_efuse_map);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_read_hw_efuse_88xx error = %x\n", status);
- return status;
- }
-
- if (halmac_transition_efuse_state_88xx(
- halmac_adapter, HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_write_efuse_bt_88xx() - write "BT physical" efuse offset
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : offset
- * @halmac_value : Write value
- * @bt_efuse_map : bt efuse map
- * Author : Soar
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_write_efuse_bt_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_value,
- enum halmac_efuse_bank halmac_efuse_bank)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.efuse_state_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_WRITE_EFUSE_BT);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s ==========>\n", __func__);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "offset : %X value : %X Bank : %X\n", halmac_offset,
- halmac_value, halmac_efuse_bank);
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait/Rcvd event(dump efuse)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- if (halmac_query_efuse_curr_state_88xx(halmac_adapter) !=
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Not idle state(dump efuse)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- if (halmac_offset >= halmac_adapter->hw_config_info.efuse_size) {
- pr_err("Offset is too large\n");
- return HALMAC_RET_EFUSE_SIZE_INCORRECT;
- }
-
- if (halmac_efuse_bank > HALMAC_EFUSE_BANK_MAX ||
- halmac_efuse_bank == HALMAC_EFUSE_BANK_WIFI) {
- pr_err("Undefined BT bank\n");
- return HALMAC_RET_EFUSE_BANK_INCORRECT;
- }
-
- status = halmac_func_switch_efuse_bank_88xx(halmac_adapter,
- halmac_efuse_bank);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_func_switch_efuse_bank error = %x\n", status);
- return status;
- }
-
- status = halmac_func_write_efuse_88xx(halmac_adapter, halmac_offset,
- halmac_value);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_func_write_efuse error = %x\n", status);
- return status;
- }
-
- if (halmac_transition_efuse_state_88xx(
- halmac_adapter, HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_get_efuse_available_size_88xx() - get efuse available size
- * @halmac_adapter : the adapter of halmac
- * @halmac_size : physical efuse available size
- * Author : Soar
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_get_efuse_available_size_88xx(struct halmac_adapter *halmac_adapter,
- u32 *halmac_size)
-{
- enum halmac_ret_status status;
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- status = halmac_dump_logical_efuse_map_88xx(halmac_adapter,
- HALMAC_EFUSE_R_DRV);
-
- if (status != HALMAC_RET_SUCCESS)
- return status;
-
- *halmac_size = halmac_adapter->hw_config_info.efuse_size -
- HALMAC_PROTECTED_EFUSE_SIZE_88XX -
- halmac_adapter->efuse_end;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_get_efuse_size_88xx() - get "physical" efuse size
- * @halmac_adapter : the adapter of halmac
- * @halmac_size : physical efuse size
- * Author : Ivan Lin/KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_get_efuse_size_88xx(struct halmac_adapter *halmac_adapter,
- u32 *halmac_size)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_GET_EFUSE_SIZE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- *halmac_size = halmac_adapter->hw_config_info.efuse_size;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_get_logical_efuse_size_88xx() - get "logical" efuse size
- * @halmac_adapter : the adapter of halmac
- * @halmac_size : logical efuse size
- * Author : Ivan Lin/KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_get_logical_efuse_size_88xx(struct halmac_adapter *halmac_adapter,
- u32 *halmac_size)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_GET_LOGICAL_EFUSE_SIZE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- *halmac_size = halmac_adapter->hw_config_info.eeprom_size;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_dump_logical_efuse_map_88xx() - dump "logical" efuse map
- * @halmac_adapter : the adapter of halmac
- * @cfg : dump efuse method
- * Author : Soar
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_dump_logical_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_read_cfg cfg)
-{
- u8 *eeprom_map = NULL;
- u32 eeprom_size = halmac_adapter->hw_config_info.eeprom_size;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.efuse_state_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_DUMP_LOGICAL_EFUSE_MAP);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s ==========>cfg = %d\n", __func__, cfg);
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait/Rcvd event(dump efuse)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- if (halmac_query_efuse_curr_state_88xx(halmac_adapter) !=
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Not idle state(dump efuse)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- if (halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_OFF)
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_WARNING,
- "[WARN]Dump logical efuse in suspend mode\n");
-
- *process_status = HALMAC_CMD_PROCESS_IDLE;
- halmac_adapter->event_trigger.logical_efuse_map = 1;
-
- status = halmac_func_switch_efuse_bank_88xx(halmac_adapter,
- HALMAC_EFUSE_BANK_WIFI);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_func_switch_efuse_bank error = %x\n", status);
- return status;
- }
-
- status = halmac_dump_efuse_88xx(halmac_adapter, cfg);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_eeprom_parser_88xx error = %x\n", status);
- return status;
- }
-
- if (halmac_adapter->hal_efuse_map_valid) {
- *process_status = HALMAC_CMD_PROCESS_DONE;
-
- eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
- return HALMAC_RET_MALLOC_FAIL;
- }
- memset(eeprom_map, 0xFF, eeprom_size);
-
- if (halmac_eeprom_parser_88xx(halmac_adapter,
- halmac_adapter->hal_efuse_map,
- eeprom_map) != HALMAC_RET_SUCCESS) {
- kfree(eeprom_map);
- return HALMAC_RET_EEPROM_PARSING_FAIL;
- }
-
- PLATFORM_EVENT_INDICATION(
- driver_adapter, HALMAC_FEATURE_DUMP_LOGICAL_EFUSE,
- *process_status, eeprom_map, eeprom_size);
- halmac_adapter->event_trigger.logical_efuse_map = 0;
-
- kfree(eeprom_map);
- }
-
- if (halmac_transition_efuse_state_88xx(
- halmac_adapter, HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_read_logical_efuse_88xx() - read logical efuse map 1 byte
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : offset
- * @value : 1 byte efuse value
- * Author : Soar
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_read_logical_efuse_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 *value)
-{
- u8 *eeprom_map = NULL;
- u32 eeprom_size = halmac_adapter->hw_config_info.eeprom_size;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.efuse_state_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_READ_LOGICAL_EFUSE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (halmac_offset >= eeprom_size) {
- pr_err("Offset is too large\n");
- return HALMAC_RET_EFUSE_SIZE_INCORRECT;
- }
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait/Rcvd event(dump efuse)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
- if (halmac_query_efuse_curr_state_88xx(halmac_adapter) !=
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Not idle state(dump efuse)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- status = halmac_func_switch_efuse_bank_88xx(halmac_adapter,
- HALMAC_EFUSE_BANK_WIFI);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_func_switch_efuse_bank error = %x\n", status);
- return status;
- }
-
- eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
- return HALMAC_RET_MALLOC_FAIL;
- }
- memset(eeprom_map, 0xFF, eeprom_size);
-
- status = halmac_read_logical_efuse_map_88xx(halmac_adapter, eeprom_map);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_read_logical_efuse_map error = %x\n", status);
- kfree(eeprom_map);
- return status;
- }
-
- *value = *(eeprom_map + halmac_offset);
-
- if (halmac_transition_efuse_state_88xx(
- halmac_adapter, HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS) {
- kfree(eeprom_map);
- return HALMAC_RET_ERROR_STATE;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- kfree(eeprom_map);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_write_logical_efuse_88xx() - write "logical" efuse offset
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : offset
- * @halmac_value : value
- * Author : Soar
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_write_logical_efuse_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_value)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.efuse_state_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_WRITE_LOGICAL_EFUSE);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (halmac_offset >= halmac_adapter->hw_config_info.eeprom_size) {
- pr_err("Offset is too large\n");
- return HALMAC_RET_EFUSE_SIZE_INCORRECT;
- }
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait/Rcvd event(dump efuse)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- if (halmac_query_efuse_curr_state_88xx(halmac_adapter) !=
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Not idle state(dump efuse)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- status = halmac_func_switch_efuse_bank_88xx(halmac_adapter,
- HALMAC_EFUSE_BANK_WIFI);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_func_switch_efuse_bank error = %x\n", status);
- return status;
- }
-
- status = halmac_func_write_logical_efuse_88xx(
- halmac_adapter, halmac_offset, halmac_value);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_write_logical_efuse error = %x\n", status);
- return status;
- }
-
- if (halmac_transition_efuse_state_88xx(
- halmac_adapter, HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_pg_efuse_by_map_88xx() - pg logical efuse by map
- * @halmac_adapter : the adapter of halmac
- * @pg_efuse_info : efuse map information
- * @cfg : dump efuse method
- * Author : Soar
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_pg_efuse_by_map_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- enum halmac_efuse_read_cfg cfg)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.efuse_state_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_PG_EFUSE_BY_MAP);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (pg_efuse_info->efuse_map_size !=
- halmac_adapter->hw_config_info.eeprom_size) {
- pr_err("efuse_map_size is incorrect, should be %d bytes\n",
- halmac_adapter->hw_config_info.eeprom_size);
- return HALMAC_RET_EFUSE_SIZE_INCORRECT;
- }
-
- if ((pg_efuse_info->efuse_map_size & 0xF) > 0) {
- pr_err("efuse_map_size should be multiple of 16\n");
- return HALMAC_RET_EFUSE_SIZE_INCORRECT;
- }
-
- if (pg_efuse_info->efuse_mask_size !=
- pg_efuse_info->efuse_map_size >> 4) {
- pr_err("efuse_mask_size is incorrect, should be %d bytes\n",
- pg_efuse_info->efuse_map_size >> 4);
- return HALMAC_RET_EFUSE_SIZE_INCORRECT;
- }
-
- if (!pg_efuse_info->efuse_map) {
- pr_err("efuse_map is NULL\n");
- return HALMAC_RET_NULL_POINTER;
- }
-
- if (!pg_efuse_info->efuse_mask) {
- pr_err("efuse_mask is NULL\n");
- return HALMAC_RET_NULL_POINTER;
- }
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait/Rcvd event(dump efuse)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- if (halmac_query_efuse_curr_state_88xx(halmac_adapter) !=
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Not idle state(dump efuse)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- status = halmac_func_switch_efuse_bank_88xx(halmac_adapter,
- HALMAC_EFUSE_BANK_WIFI);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_func_switch_efuse_bank error = %x\n", status);
- return status;
- }
-
- status = halmac_func_pg_efuse_by_map_88xx(halmac_adapter, pg_efuse_info,
- cfg);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_pg_efuse_by_map error = %x\n", status);
- return status;
- }
-
- if (halmac_transition_efuse_state_88xx(
- halmac_adapter, HALMAC_EFUSE_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_EFUSE, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_get_c2h_info_88xx() - process halmac C2H packet
- * @halmac_adapter : the adapter of halmac
- * @halmac_buf : RX Packet pointer
- * @halmac_size : RX Packet size
- * Author : KaiYuan Chang/Ivan Lin
- *
- * Used to process c2h packet info from RX path. After receiving the packet,
- * user need to call this api and pass the packet pointer.
- *
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_get_c2h_info_88xx(struct halmac_adapter *halmac_adapter, u8 *halmac_buf,
- u32 halmac_size)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_GET_C2H_INFO);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- /* Check if it is C2H packet */
- if (GET_RX_DESC_C2H(halmac_buf)) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "C2H packet, start parsing!\n");
-
- status = halmac_parse_c2h_packet_88xx(halmac_adapter,
- halmac_buf, halmac_size);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_parse_c2h_packet_88xx error = %x\n",
- status);
- return status;
- }
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_cfg_fwlps_option_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_fwlps_option *lps_option)
-{
- void *driver_adapter = NULL;
- struct halmac_fwlps_option *hal_fwlps_option;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_FWLPS_OPTION);
-
- driver_adapter = halmac_adapter->driver_adapter;
- hal_fwlps_option = &halmac_adapter->fwlps_option;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- hal_fwlps_option->mode = lps_option->mode;
- hal_fwlps_option->clk_request = lps_option->clk_request;
- hal_fwlps_option->rlbm = lps_option->rlbm;
- hal_fwlps_option->smart_ps = lps_option->smart_ps;
- hal_fwlps_option->awake_interval = lps_option->awake_interval;
- hal_fwlps_option->all_queue_uapsd = lps_option->all_queue_uapsd;
- hal_fwlps_option->pwr_state = lps_option->pwr_state;
- hal_fwlps_option->low_pwr_rx_beacon = lps_option->low_pwr_rx_beacon;
- hal_fwlps_option->ant_auto_switch = lps_option->ant_auto_switch;
- hal_fwlps_option->ps_allow_bt_high_priority =
- lps_option->ps_allow_bt_high_priority;
- hal_fwlps_option->protect_bcn = lps_option->protect_bcn;
- hal_fwlps_option->silence_period = lps_option->silence_period;
- hal_fwlps_option->fast_bt_connect = lps_option->fast_bt_connect;
- hal_fwlps_option->two_antenna_en = lps_option->two_antenna_en;
- hal_fwlps_option->adopt_user_setting = lps_option->adopt_user_setting;
- hal_fwlps_option->drv_bcn_early_shift = lps_option->drv_bcn_early_shift;
- hal_fwlps_option->enter_32K = lps_option->enter_32K;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_cfg_fwips_option_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_fwips_option *ips_option)
-{
- void *driver_adapter = NULL;
- struct halmac_fwips_option *ips_option_local;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_FWIPS_OPTION);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- ips_option_local = ips_option;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_enter_wowlan_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_wowlan_option *wowlan_option)
-{
- void *driver_adapter = NULL;
- struct halmac_wowlan_option *wowlan_option_local;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_ENTER_WOWLAN);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- wowlan_option_local = wowlan_option;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_leave_wowlan_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_LEAVE_WOWLAN);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_enter_ps_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_ps_state ps_state)
-{
- u8 rpwm;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_ENTER_PS);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (ps_state == halmac_adapter->halmac_state.ps_state) {
- pr_err("power state is already in PS State!!\n");
- return HALMAC_RET_SUCCESS;
- }
-
- if (ps_state == HALMAC_PS_STATE_LPS) {
- status = halmac_send_h2c_set_pwr_mode_88xx(
- halmac_adapter, &halmac_adapter->fwlps_option);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_set_pwr_mode_88xx error = %x!!\n",
- status);
- return status;
- }
- } else if (ps_state == HALMAC_PS_STATE_IPS) {
- }
-
- halmac_adapter->halmac_state.ps_state = ps_state;
-
- /* Enter 32K */
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- if (halmac_adapter->fwlps_option.enter_32K) {
- rpwm = (u8)(((halmac_adapter->rpwm_record ^ (BIT(7))) |
- (BIT(0))) &
- 0x81);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SDIO_HRPWM1,
- rpwm);
- halmac_adapter->low_clk = true;
- }
- } else if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_USB) {
- if (halmac_adapter->fwlps_option.enter_32K) {
- rpwm = (u8)(((halmac_adapter->rpwm_record ^ (BIT(7))) |
- (BIT(0))) &
- 0x81);
- HALMAC_REG_WRITE_8(halmac_adapter, 0xFE58, rpwm);
- halmac_adapter->low_clk = true;
- }
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_leave_ps_88xx(struct halmac_adapter *halmac_adapter)
-{
- u8 rpwm, cpwm;
- u32 counter;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- struct halmac_fwlps_option fw_lps_option;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_LEAVE_PS);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (halmac_adapter->halmac_state.ps_state == HALMAC_PS_STATE_ACT) {
- pr_err("power state is already in active!!\n");
- return HALMAC_RET_SUCCESS;
- }
-
- if (halmac_adapter->low_clk) {
- cpwm = HALMAC_REG_READ_8(halmac_adapter, REG_SDIO_HRPWM1);
- rpwm = (u8)(
- ((halmac_adapter->rpwm_record ^ (BIT(7))) | (BIT(6))) &
- 0xC0);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SDIO_HRPWM1, rpwm);
-
- cpwm = (u8)((cpwm ^ BIT(7)) & BIT(7));
- counter = 100;
- while (cpwm !=
- (HALMAC_REG_READ_8(halmac_adapter, REG_SDIO_HRPWM1) &
- BIT(7))) {
- usleep_range(50, 60);
- counter--;
- if (counter == 0)
- return HALMAC_RET_CHANGE_PS_FAIL;
- }
- halmac_adapter->low_clk = false;
- }
-
- memcpy(&fw_lps_option, &halmac_adapter->fwlps_option,
- sizeof(struct halmac_fwlps_option));
- fw_lps_option.mode = 0;
-
- status = halmac_send_h2c_set_pwr_mode_88xx(halmac_adapter,
- &fw_lps_option);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_set_pwr_mode_88xx error!!=%x\n",
- status);
- return status;
- }
-
- halmac_adapter->halmac_state.ps_state = HALMAC_PS_STATE_ACT;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * (debug API)halmac_h2c_lb_88xx() - send h2c loopback packet
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_h2c_lb_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_H2C_LB);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_debug_88xx() - dump information for debugging
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_debug_88xx(struct halmac_adapter *halmac_adapter)
-{
- u8 temp8 = 0;
- u32 i = 0, temp32 = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DEBUG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- /* Dump CCCR, it needs new platform api */
-
- /*Dump SDIO Local Register, use CMD52*/
- for (i = 0x10250000; i < 0x102500ff; i++) {
- temp8 = PLATFORM_SDIO_CMD52_READ(halmac_adapter, i);
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_debug: sdio[%x]=%x\n", i, temp8);
- }
-
- /*Dump MAC Register*/
- for (i = 0x0000; i < 0x17ff; i++) {
- temp8 = PLATFORM_SDIO_CMD52_READ(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT,
- DBG_DMESG, "halmac_debug: mac[%x]=%x\n",
- i, temp8);
- }
-
- /*Check RX Fifo status*/
- i = REG_RXFF_PTR_V1;
- temp8 = PLATFORM_SDIO_CMD52_READ(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_debug: mac[%x]=%x\n", i, temp8);
- i = REG_RXFF_WTR_V1;
- temp8 = PLATFORM_SDIO_CMD52_READ(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_debug: mac[%x]=%x\n", i, temp8);
- i = REG_RXFF_PTR_V1;
- temp8 = PLATFORM_SDIO_CMD52_READ(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_debug: mac[%x]=%x\n", i, temp8);
- i = REG_RXFF_WTR_V1;
- temp8 = PLATFORM_SDIO_CMD52_READ(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_debug: mac[%x]=%x\n", i, temp8);
- } else {
- /*Dump MAC Register*/
- for (i = 0x0000; i < 0x17fc; i += 4) {
- temp32 = HALMAC_REG_READ_32(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT,
- DBG_DMESG, "halmac_debug: mac[%x]=%x\n",
- i, temp32);
- }
-
- /*Check RX Fifo status*/
- i = REG_RXFF_PTR_V1;
- temp32 = HALMAC_REG_READ_32(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_debug: mac[%x]=%x\n", i, temp32);
- i = REG_RXFF_WTR_V1;
- temp32 = HALMAC_REG_READ_32(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_debug: mac[%x]=%x\n", i, temp32);
- i = REG_RXFF_PTR_V1;
- temp32 = HALMAC_REG_READ_32(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_debug: mac[%x]=%x\n", i, temp32);
- i = REG_RXFF_WTR_V1;
- temp32 = HALMAC_REG_READ_32(halmac_adapter, i);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_debug: mac[%x]=%x\n", i, temp32);
- }
-
- /* TODO: Add check register code, including MAC CLK, CPU CLK */
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_parameter_88xx() - config parameter by FW
- * @halmac_adapter : the adapter of halmac
- * @para_info : cmd id, content
- * @full_fifo : parameter information
- *
- * If msk_en = true, the format of array is {reg_info, mask, value}.
- * If msk_en =_FAUSE, the format of array is {reg_info, value}
- * The format of reg_info is
- * reg_info[31]=rf_reg, 0: MAC_BB reg, 1: RF reg
- * reg_info[27:24]=rf_path, 0: path_A, 1: path_B
- * if rf_reg=0(MAC_BB reg), rf_path is meaningless.
- * ref_info[15:0]=offset
- *
- * Example: msk_en = false
- * {0x8100000a, 0x00001122}
- * =>Set RF register, path_B, offset 0xA to 0x00001122
- * {0x00000824, 0x11224433}
- * =>Set MAC_BB register, offset 0x800 to 0x11224433
- *
- * Note : full fifo mode only for init flow
- *
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_parameter_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_phy_parameter_info *para_info,
- u8 full_fifo)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status ret_status = HALMAC_RET_SUCCESS;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.cfg_para_state_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- if (halmac_adapter->fw_version.h2c_version < 4)
- return HALMAC_RET_FW_NO_SUPPORT;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_PARAMETER);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- if (halmac_adapter->halmac_state.dlfw_state == HALMAC_DLFW_NONE) {
- pr_err("%s Fail due to DLFW NONE!!\n", __func__);
- return HALMAC_RET_DLFW_FAIL;
- }
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait event(cfg para)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- if (halmac_query_cfg_para_curr_state_88xx(halmac_adapter) !=
- HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE &&
- halmac_query_cfg_para_curr_state_88xx(halmac_adapter) !=
- HALMAC_CFG_PARA_CMD_CONSTRUCT_CONSTRUCTING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Not idle state(cfg para)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- *process_status = HALMAC_CMD_PROCESS_IDLE;
-
- ret_status = halmac_send_h2c_phy_parameter_88xx(halmac_adapter,
- para_info, full_fifo);
-
- if (ret_status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_phy_parameter_88xx Fail!! = %x\n",
- ret_status);
- return ret_status;
- }
-
- return ret_status;
-}
-
-/**
- * halmac_update_packet_88xx() - send specific packet to FW
- * @halmac_adapter : the adapter of halmac
- * @pkt_id : packet id, to know the purpose of this packet
- * @pkt : packet
- * @pkt_size : packet size
- *
- * Note : TX_DESC is not included in the pkt
- *
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_update_packet_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_packet_id pkt_id, u8 *pkt, u32 pkt_size)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.update_packet_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- if (halmac_adapter->fw_version.h2c_version < 4)
- return HALMAC_RET_FW_NO_SUPPORT;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_UPDATE_PACKET);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait event(update_packet)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- *process_status = HALMAC_CMD_PROCESS_SENDING;
-
- status = halmac_send_h2c_update_packet_88xx(halmac_adapter, pkt_id, pkt,
- pkt_size);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_update_packet_88xx packet = %x, fail = %x!!\n",
- pkt_id, status);
- return status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_bcn_ie_filter_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_bcn_ie_info *bcn_ie_info)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- if (halmac_adapter->fw_version.h2c_version < 4)
- return HALMAC_RET_FW_NO_SUPPORT;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_BCN_IE_FILTER);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- status = halmac_send_h2c_update_bcn_parse_info_88xx(halmac_adapter,
- bcn_ie_info);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_update_bcn_parse_info_88xx fail = %x\n",
- status);
- return status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_update_datapack_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_data_type halmac_data_type,
- struct halmac_phy_parameter_info *para_info)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- if (halmac_adapter->fw_version.h2c_version < 4)
- return HALMAC_RET_FW_NO_SUPPORT;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_run_datapack_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_data_type halmac_data_type)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status ret_status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- if (halmac_adapter->fw_version.h2c_version < 4)
- return HALMAC_RET_FW_NO_SUPPORT;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_RUN_DATAPACK);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- ret_status = halmac_send_h2c_run_datapack_88xx(halmac_adapter,
- halmac_data_type);
-
- if (ret_status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_run_datapack_88xx Fail, datatype = %x, status = %x!!\n",
- halmac_data_type, ret_status);
- return ret_status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "halmac_update_datapack_88xx <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_drv_info_88xx() - config driver info
- * @halmac_adapter : the adapter of halmac
- * @halmac_drv_info : driver information selection
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_drv_info_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_drv_info halmac_drv_info)
-{
- u8 drv_info_size = 0;
- u8 phy_status_en = 0;
- u8 sniffer_en = 0;
- u8 plcp_hdr_en = 0;
- u32 value32;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_DRV_INFO);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "halmac_cfg_drv_info = %d\n", halmac_drv_info);
-
- switch (halmac_drv_info) {
- case HALMAC_DRV_INFO_NONE:
- drv_info_size = 0;
- phy_status_en = 0;
- sniffer_en = 0;
- plcp_hdr_en = 0;
- break;
- case HALMAC_DRV_INFO_PHY_STATUS:
- drv_info_size = 4;
- phy_status_en = 1;
- sniffer_en = 0;
- plcp_hdr_en = 0;
- break;
- case HALMAC_DRV_INFO_PHY_SNIFFER:
- drv_info_size = 5; /* phy status 4byte, sniffer info 1byte */
- phy_status_en = 1;
- sniffer_en = 1;
- plcp_hdr_en = 0;
- break;
- case HALMAC_DRV_INFO_PHY_PLCP:
- drv_info_size = 6; /* phy status 4byte, plcp header 2byte */
- phy_status_en = 1;
- sniffer_en = 0;
- plcp_hdr_en = 1;
- break;
- default:
- status = HALMAC_RET_SW_CASE_NOT_SUPPORT;
- pr_err("%s error = %x\n", __func__, status);
- return status;
- }
-
- if (halmac_adapter->txff_allocation.rx_fifo_expanding_mode !=
- HALMAC_RX_FIFO_EXPANDING_MODE_DISABLE)
- drv_info_size = 0xF;
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RX_DRVINFO_SZ, drv_info_size);
-
- halmac_adapter->drv_info_size = drv_info_size;
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_RCR);
- value32 = (value32 & (~BIT_APP_PHYSTS));
- if (phy_status_en == 1)
- value32 = value32 | BIT_APP_PHYSTS;
- HALMAC_REG_WRITE_32(halmac_adapter, REG_RCR, value32);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter,
- REG_WMAC_OPTION_FUNCTION + 4);
- value32 = (value32 & (~(BIT(8) | BIT(9))));
- if (sniffer_en == 1)
- value32 = value32 | BIT(9);
- if (plcp_hdr_en == 1)
- value32 = value32 | BIT(8);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_WMAC_OPTION_FUNCTION + 4,
- value32);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_send_bt_coex_88xx(struct halmac_adapter *halmac_adapter, u8 *bt_buf,
- u32 bt_size, u8 ack)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status ret_status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_SEND_BT_COEX);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- ret_status = halmac_send_bt_coex_cmd_88xx(halmac_adapter, bt_buf,
- bt_size, ack);
-
- if (ret_status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_bt_coex_cmd_88xx Fail = %x!!\n",
- ret_status);
- return ret_status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * (debug API)halmac_verify_platform_api_88xx() - verify platform api
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_verify_platform_api_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status ret_status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_VERIFY_PLATFORM_API);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- ret_status = halmac_verify_io_88xx(halmac_adapter);
-
- if (ret_status != HALMAC_RET_SUCCESS)
- return ret_status;
-
- if (halmac_adapter->txff_allocation.la_mode != HALMAC_LA_MODE_FULL)
- ret_status = halmac_verify_send_rsvd_page_88xx(halmac_adapter);
-
- if (ret_status != HALMAC_RET_SUCCESS)
- return ret_status;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return ret_status;
-}
-
-enum halmac_ret_status
-halmac_send_original_h2c_88xx(struct halmac_adapter *halmac_adapter,
- u8 *original_h2c, u16 *seq, u8 ack)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_SEND_ORIGINAL_H2C);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- status = halmac_func_send_original_h2c_88xx(halmac_adapter,
- original_h2c, seq, ack);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_original_h2c FAIL = %x!!\n", status);
- return status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_timer_2s_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_fill_txdesc_check_sum_88xx() - fill in tx desc check sum
- * @halmac_adapter : the adapter of halmac
- * @cur_desc : tx desc packet
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_fill_txdesc_check_sum_88xx(struct halmac_adapter *halmac_adapter,
- u8 *cur_desc)
-{
- u16 chk_result = 0;
- u16 *data = (u16 *)NULL;
- u32 i;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_FILL_TXDESC_CHECKSUM);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (!cur_desc) {
- pr_err("%s NULL PTR", __func__);
- return HALMAC_RET_NULL_POINTER;
- }
-
- SET_TX_DESC_TXDESC_CHECKSUM(cur_desc, 0x0000);
-
- data = (u16 *)(cur_desc);
-
- /* HW clculates only 32byte */
- for (i = 0; i < 8; i++)
- chk_result ^= (*(data + 2 * i) ^ *(data + (2 * i + 1)));
-
- SET_TX_DESC_TXDESC_CHECKSUM(cur_desc, chk_result);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_dump_fifo_88xx() - dump fifo data
- * @halmac_adapter : the adapter of halmac
- * @halmac_fifo_sel : FIFO selection
- * @halmac_start_addr : start address of selected FIFO
- * @halmac_fifo_dump_size : dump size of selected FIFO
- * @fifo_map : FIFO data
- *
- * Note : before dump fifo, user need to call halmac_get_fifo_size to
- * get fifo size. Then input this size to halmac_dump_fifo.
- *
- * Author : Ivan Lin/KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_dump_fifo_88xx(struct halmac_adapter *halmac_adapter,
- enum hal_fifo_sel halmac_fifo_sel, u32 halmac_start_addr,
- u32 halmac_fifo_dump_size, u8 *fifo_map)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DUMP_FIFO);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (halmac_fifo_sel == HAL_FIFO_SEL_TX &&
- (halmac_start_addr + halmac_fifo_dump_size) >
- halmac_adapter->hw_config_info.tx_fifo_size) {
- pr_err("TX fifo dump size is too large\n");
- return HALMAC_RET_DUMP_FIFOSIZE_INCORRECT;
- }
-
- if (halmac_fifo_sel == HAL_FIFO_SEL_RX &&
- (halmac_start_addr + halmac_fifo_dump_size) >
- halmac_adapter->hw_config_info.rx_fifo_size) {
- pr_err("RX fifo dump size is too large\n");
- return HALMAC_RET_DUMP_FIFOSIZE_INCORRECT;
- }
-
- if ((halmac_fifo_dump_size & (4 - 1)) != 0) {
- pr_err("halmac_fifo_dump_size shall 4byte align\n");
- return HALMAC_RET_DUMP_FIFOSIZE_INCORRECT;
- }
-
- if (!fifo_map) {
- pr_err("fifo_map address is NULL\n");
- return HALMAC_RET_NULL_POINTER;
- }
-
- status = halmac_buffer_read_88xx(halmac_adapter, halmac_start_addr,
- halmac_fifo_dump_size, halmac_fifo_sel,
- fifo_map);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_buffer_read_88xx error = %x\n", status);
- return status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_get_fifo_size_88xx() - get fifo size
- * @halmac_adapter : the adapter of halmac
- * @halmac_fifo_sel : FIFO selection
- * Author : Ivan Lin/KaiYuan Chang
- * Return : u32
- * More details of status code can be found in prototype document
- */
-u32 halmac_get_fifo_size_88xx(struct halmac_adapter *halmac_adapter,
- enum hal_fifo_sel halmac_fifo_sel)
-{
- u32 fifo_size = 0;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_GET_FIFO_SIZE);
-
- if (halmac_fifo_sel == HAL_FIFO_SEL_TX)
- fifo_size = halmac_adapter->hw_config_info.tx_fifo_size;
- else if (halmac_fifo_sel == HAL_FIFO_SEL_RX)
- fifo_size = halmac_adapter->hw_config_info.rx_fifo_size;
- else if (halmac_fifo_sel == HAL_FIFO_SEL_RSVD_PAGE)
- fifo_size =
- ((halmac_adapter->hw_config_info.tx_fifo_size >> 7) -
- halmac_adapter->txff_allocation.rsvd_pg_bndy)
- << 7;
- else if (halmac_fifo_sel == HAL_FIFO_SEL_REPORT)
- fifo_size = 65536;
- else if (halmac_fifo_sel == HAL_FIFO_SEL_LLT)
- fifo_size = 65536;
-
- return fifo_size;
-}
-
-/**
- * halmac_cfg_txbf_88xx() - enable/disable specific user's txbf
- * @halmac_adapter : the adapter of halmac
- * @userid : su bfee userid = 0 or 1 to apply TXBF
- * @bw : the sounding bandwidth
- * @txbf_en : 0: disable TXBF, 1: enable TXBF
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_txbf_88xx(struct halmac_adapter *halmac_adapter, u8 userid,
- enum halmac_bw bw, u8 txbf_en)
-{
- u16 temp42C = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_TXBF);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (txbf_en) {
- switch (bw) {
- case HALMAC_BW_80:
- temp42C |= BIT_R_TXBF0_80M;
- /* fall through */
- case HALMAC_BW_40:
- temp42C |= BIT_R_TXBF0_40M;
- /* fall through */
- case HALMAC_BW_20:
- temp42C |= BIT_R_TXBF0_20M;
- break;
- default:
- pr_err("%s invalid TXBF BW setting 0x%x of userid %d\n",
- __func__, bw, userid);
- return HALMAC_RET_INVALID_SOUNDING_SETTING;
- }
- }
-
- switch (userid) {
- case 0:
- temp42C |=
- HALMAC_REG_READ_16(halmac_adapter, REG_TXBF_CTRL) &
- ~(BIT_R_TXBF0_20M | BIT_R_TXBF0_40M | BIT_R_TXBF0_80M);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_TXBF_CTRL, temp42C);
- break;
- case 1:
- temp42C |=
- HALMAC_REG_READ_16(halmac_adapter, REG_TXBF_CTRL + 2) &
- ~(BIT_R_TXBF0_20M | BIT_R_TXBF0_40M | BIT_R_TXBF0_80M);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_TXBF_CTRL + 2, temp42C);
- break;
- default:
- pr_err("%s invalid userid %d\n", __func__, userid);
- return HALMAC_RET_INVALID_SOUNDING_SETTING;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s, txbf_en = %x <==========\n", __func__,
- txbf_en);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_mumimo_88xx() -config mumimo
- * @halmac_adapter : the adapter of halmac
- * @cfgmu : parameters to configure MU PPDU Tx/Rx
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_mumimo_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_cfg_mumimo_para *cfgmu)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- u8 i, idx, id0, id1, gid, mu_tab_sel;
- u8 mu_tab_valid = 0;
- u32 gid_valid[6] = {0};
- u8 temp14C0 = 0;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_MUMIMO);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (cfgmu->role == HAL_BFEE) {
- /*config MU BFEE*/
- temp14C0 = HALMAC_REG_READ_8(halmac_adapter, REG_MU_TX_CTL) &
- ~BIT_MASK_R_MU_TABLE_VALID;
- /*enable MU table 0 and 1, disable MU TX*/
- HALMAC_REG_WRITE_8(halmac_adapter, REG_MU_TX_CTL,
- (temp14C0 | BIT(0) | BIT(1)) & ~(BIT(7)));
-
- /*config GID valid table and user position table*/
- mu_tab_sel =
- HALMAC_REG_READ_8(halmac_adapter, REG_MU_TX_CTL + 1) &
- ~(BIT(0) | BIT(1) | BIT(2));
- for (i = 0; i < 2; i++) {
- HALMAC_REG_WRITE_8(halmac_adapter, REG_MU_TX_CTL + 1,
- mu_tab_sel | i);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MU_STA_GID_VLD,
- cfgmu->given_gid_tab[i]);
- HALMAC_REG_WRITE_32(halmac_adapter,
- REG_MU_STA_USER_POS_INFO,
- cfgmu->given_user_pos[i * 2]);
- HALMAC_REG_WRITE_32(halmac_adapter,
- REG_MU_STA_USER_POS_INFO + 4,
- cfgmu->given_user_pos[i * 2 + 1]);
- }
- } else {
- /*config MU BFER*/
- if (!cfgmu->mu_tx_en) {
- HALMAC_REG_WRITE_8(halmac_adapter, REG_MU_TX_CTL,
- HALMAC_REG_READ_8(halmac_adapter,
- REG_MU_TX_CTL) &
- ~(BIT(7)));
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s disable mu tx <==========\n", __func__);
- return HALMAC_RET_SUCCESS;
- }
-
- /*Transform BB grouping bitmap[14:0] to MAC GID_valid table*/
- for (idx = 0; idx < 15; idx++) {
- if (idx < 5) {
- /*group_bitmap bit0~4, MU_STA0 with MUSTA1~5*/
- id0 = 0;
- id1 = (u8)(idx + 1);
- } else if (idx < 9) {
- /*group_bitmap bit5~8, MU_STA1 with MUSTA2~5*/
- id0 = 1;
- id1 = (u8)(idx - 3);
- } else if (idx < 12) {
- /*group_bitmap bit9~11, MU_STA2 with MUSTA3~5*/
- id0 = 2;
- id1 = (u8)(idx - 6);
- } else if (idx < 14) {
- /*group_bitmap bit12~13, MU_STA3 with MUSTA4~5*/
- id0 = 3;
- id1 = (u8)(idx - 8);
- } else {
- /*group_bitmap bit14, MU_STA4 with MUSTA5*/
- id0 = 4;
- id1 = (u8)(idx - 9);
- }
- if (cfgmu->grouping_bitmap & BIT(idx)) {
- /*Pair 1*/
- gid = (idx << 1) + 1;
- gid_valid[id0] |= (BIT(gid));
- gid_valid[id1] |= (BIT(gid));
- /*Pair 2*/
- gid += 1;
- gid_valid[id0] |= (BIT(gid));
- gid_valid[id1] |= (BIT(gid));
- } else {
- /*Pair 1*/
- gid = (idx << 1) + 1;
- gid_valid[id0] &= ~(BIT(gid));
- gid_valid[id1] &= ~(BIT(gid));
- /*Pair 2*/
- gid += 1;
- gid_valid[id0] &= ~(BIT(gid));
- gid_valid[id1] &= ~(BIT(gid));
- }
- }
-
- /*set MU STA GID valid TABLE*/
- mu_tab_sel =
- HALMAC_REG_READ_8(halmac_adapter, REG_MU_TX_CTL + 1) &
- ~(BIT(0) | BIT(1) | BIT(2));
- for (idx = 0; idx < 6; idx++) {
- HALMAC_REG_WRITE_8(halmac_adapter, REG_MU_TX_CTL + 1,
- idx | mu_tab_sel);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MU_STA_GID_VLD,
- gid_valid[idx]);
- }
-
- /*To validate the sounding successful MU STA and enable MU TX*/
- for (i = 0; i < 6; i++) {
- if (cfgmu->sounding_sts[i])
- mu_tab_valid |= BIT(i);
- }
- HALMAC_REG_WRITE_8(halmac_adapter, REG_MU_TX_CTL,
- mu_tab_valid | BIT(7));
- }
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_sounding_88xx() - configure general sounding
- * @halmac_adapter : the adapter of halmac
- * @role : driver's role, BFer or BFee
- * @datarate : set ndpa tx rate if driver is BFer, or set csi response rate
- * if driver is BFee
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_sounding_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_snd_role role,
- enum halmac_data_rate datarate)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_SOUNDING);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- switch (role) {
- case HAL_BFER:
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_TXBF_CTRL,
- HALMAC_REG_READ_32(halmac_adapter, REG_TXBF_CTRL) |
- BIT_R_ENABLE_NDPA | BIT_USE_NDPA_PARAMETER |
- BIT_R_EN_NDPA_INT | BIT_DIS_NDP_BFEN);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_NDPA_RATE, datarate);
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_NDPA_OPT_CTRL,
- HALMAC_REG_READ_8(halmac_adapter, REG_NDPA_OPT_CTRL) &
- (~(BIT(0) | BIT(1))));
- /*service file length 2 bytes; fix non-STA1 csi start offset */
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SND_PTCL_CTRL + 1,
- 0x2 | BIT(7));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SND_PTCL_CTRL + 2, 0x2);
- break;
- case HAL_BFEE:
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SND_PTCL_CTRL, 0xDB);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SND_PTCL_CTRL + 3, 0x50);
- /*use ndpa rx rate to decide csi rate*/
- HALMAC_REG_WRITE_8(halmac_adapter, REG_BBPSF_CTRL + 3,
- HALMAC_OFDM54 | BIT(6));
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_RRSR,
- HALMAC_REG_READ_16(halmac_adapter, REG_RRSR) |
- BIT(datarate));
- /*RXFF do not accept BF Rpt Poll, avoid CSI crc error*/
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_RXFLTMAP1,
- HALMAC_REG_READ_8(halmac_adapter, REG_RXFLTMAP1) &
- (~(BIT(4))));
- /*FWFF do not accept BF Rpt Poll, avoid CSI crc error*/
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_RXFLTMAP4,
- HALMAC_REG_READ_8(halmac_adapter, REG_RXFLTMAP4) &
- (~(BIT(4))));
- break;
- default:
- pr_err("%s invalid role\n", __func__);
- return HALMAC_RET_INVALID_SOUNDING_SETTING;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_del_sounding_88xx() - reset general sounding
- * @halmac_adapter : the adapter of halmac
- * @role : driver's role, BFer or BFee
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_del_sounding_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_snd_role role)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DEL_SOUNDING);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- switch (role) {
- case HAL_BFER:
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TXBF_CTRL + 3, 0);
- break;
- case HAL_BFEE:
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SND_PTCL_CTRL, 0);
- break;
- default:
- pr_err("%s invalid role\n", __func__);
- return HALMAC_RET_INVALID_SOUNDING_SETTING;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_su_bfee_entry_init_88xx() - config SU beamformee's registers
- * @halmac_adapter : the adapter of halmac
- * @userid : SU bfee userid = 0 or 1 to be added
- * @paid : partial AID of this bfee
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_su_bfee_entry_init_88xx(struct halmac_adapter *halmac_adapter, u8 userid,
- u16 paid)
-{
- u16 temp42C = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_SU_BFEE_ENTRY_INIT);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- switch (userid) {
- case 0:
- temp42C = HALMAC_REG_READ_16(halmac_adapter, REG_TXBF_CTRL) &
- ~(BIT_MASK_R_TXBF0_AID | BIT_R_TXBF0_20M |
- BIT_R_TXBF0_40M | BIT_R_TXBF0_80M);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_TXBF_CTRL,
- temp42C | paid);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_ASSOCIATED_BFMEE_SEL,
- paid);
- break;
- case 1:
- temp42C =
- HALMAC_REG_READ_16(halmac_adapter, REG_TXBF_CTRL + 2) &
- ~(BIT_MASK_R_TXBF1_AID | BIT_R_TXBF0_20M |
- BIT_R_TXBF0_40M | BIT_R_TXBF0_80M);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_TXBF_CTRL + 2,
- temp42C | paid);
- HALMAC_REG_WRITE_16(halmac_adapter,
- REG_ASSOCIATED_BFMEE_SEL + 2,
- paid | BIT(9));
- break;
- default:
- pr_err("%s invalid userid %d\n", __func__,
- userid);
- return HALMAC_RET_INVALID_SOUNDING_SETTING;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_su_bfee_entry_init_88xx() - config SU beamformer's registers
- * @halmac_adapter : the adapter of halmac
- * @su_bfer_init : parameters to configure SU BFER entry
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_su_bfer_entry_init_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_su_bfer_init_para *su_bfer_init)
-{
- u16 mac_address_H;
- u32 mac_address_L;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_SU_BFER_ENTRY_INIT);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- /* mac_address_L = bfer_address.address_l_h.address_low; */
- /* mac_address_H = bfer_address.address_l_h.address_high; */
-
- mac_address_L = le32_to_cpu(
- su_bfer_init->bfer_address.address_l_h.le_address_low);
- mac_address_H = le16_to_cpu(
- su_bfer_init->bfer_address.address_l_h.le_address_high);
-
- switch (su_bfer_init->userid) {
- case 0:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_ASSOCIATED_BFMER0_INFO,
- mac_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter,
- REG_ASSOCIATED_BFMER0_INFO + 4,
- mac_address_H);
- HALMAC_REG_WRITE_16(halmac_adapter,
- REG_ASSOCIATED_BFMER0_INFO + 6,
- su_bfer_init->paid);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_TX_CSI_RPT_PARAM_BW20,
- su_bfer_init->csi_para);
- break;
- case 1:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_ASSOCIATED_BFMER1_INFO,
- mac_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter,
- REG_ASSOCIATED_BFMER1_INFO + 4,
- mac_address_H);
- HALMAC_REG_WRITE_16(halmac_adapter,
- REG_ASSOCIATED_BFMER1_INFO + 6,
- su_bfer_init->paid);
- HALMAC_REG_WRITE_16(halmac_adapter,
- REG_TX_CSI_RPT_PARAM_BW20 + 2,
- su_bfer_init->csi_para);
- break;
- default:
- pr_err("%s invalid userid %d\n", __func__,
- su_bfer_init->userid);
- return HALMAC_RET_INVALID_SOUNDING_SETTING;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_mu_bfee_entry_init_88xx() - config MU beamformee's registers
- * @halmac_adapter : the adapter of halmac
- * @mu_bfee_init : parameters to configure MU BFEE entry
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_mu_bfee_entry_init_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_mu_bfee_init_para *mu_bfee_init)
-{
- u16 temp168X = 0, temp14C0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_MU_BFEE_ENTRY_INIT);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- temp168X |= mu_bfee_init->paid | BIT(9);
- HALMAC_REG_WRITE_16(halmac_adapter, (0x1680 + mu_bfee_init->userid * 2),
- temp168X);
-
- temp14C0 = HALMAC_REG_READ_16(halmac_adapter, REG_MU_TX_CTL) &
- ~(BIT(8) | BIT(9) | BIT(10));
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MU_TX_CTL,
- temp14C0 | ((mu_bfee_init->userid - 2) << 8));
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MU_STA_GID_VLD, 0);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MU_STA_USER_POS_INFO,
- mu_bfee_init->user_position_l);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_MU_STA_USER_POS_INFO + 4,
- mu_bfee_init->user_position_h);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_mu_bfer_entry_init_88xx() - config MU beamformer's registers
- * @halmac_adapter : the adapter of halmac
- * @mu_bfer_init : parameters to configure MU BFER entry
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_mu_bfer_entry_init_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_mu_bfer_init_para *mu_bfer_init)
-{
- u16 temp1680 = 0;
- u16 mac_address_H;
- u32 mac_address_L;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_MU_BFER_ENTRY_INIT);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- mac_address_L =
- le32_to_cpu(mu_bfer_init->bfer_address.address_l_h.le_address_low);
- mac_address_H =
- le16_to_cpu(mu_bfer_init->bfer_address.address_l_h.le_address_high);
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_ASSOCIATED_BFMER0_INFO,
- mac_address_L);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_ASSOCIATED_BFMER0_INFO + 4,
- mac_address_H);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_ASSOCIATED_BFMER0_INFO + 6,
- mu_bfer_init->paid);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_TX_CSI_RPT_PARAM_BW20,
- mu_bfer_init->csi_para);
-
- temp1680 = HALMAC_REG_READ_16(halmac_adapter, 0x1680) & 0xC000;
- temp1680 |= mu_bfer_init->my_aid | (mu_bfer_init->csi_length_sel << 12);
- HALMAC_REG_WRITE_16(halmac_adapter, 0x1680, temp1680);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_su_bfee_entry_del_88xx() - reset SU beamformee's registers
- * @halmac_adapter : the adapter of halmac
- * @userid : the SU BFee userid to be deleted
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_su_bfee_entry_del_88xx(struct halmac_adapter *halmac_adapter, u8 userid)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_SU_BFEE_ENTRY_DEL);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- switch (userid) {
- case 0:
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_TXBF_CTRL,
- HALMAC_REG_READ_16(halmac_adapter, REG_TXBF_CTRL) &
- ~(BIT_MASK_R_TXBF0_AID | BIT_R_TXBF0_20M |
- BIT_R_TXBF0_40M | BIT_R_TXBF0_80M));
- HALMAC_REG_WRITE_16(halmac_adapter, REG_ASSOCIATED_BFMEE_SEL,
- 0);
- break;
- case 1:
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_TXBF_CTRL + 2,
- HALMAC_REG_READ_16(halmac_adapter, REG_TXBF_CTRL + 2) &
- ~(BIT_MASK_R_TXBF1_AID | BIT_R_TXBF0_20M |
- BIT_R_TXBF0_40M | BIT_R_TXBF0_80M));
- HALMAC_REG_WRITE_16(halmac_adapter,
- REG_ASSOCIATED_BFMEE_SEL + 2, 0);
- break;
- default:
- pr_err("%s invalid userid %d\n", __func__,
- userid);
- return HALMAC_RET_INVALID_SOUNDING_SETTING;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_su_bfee_entry_del_88xx() - reset SU beamformer's registers
- * @halmac_adapter : the adapter of halmac
- * @userid : the SU BFer userid to be deleted
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_su_bfer_entry_del_88xx(struct halmac_adapter *halmac_adapter, u8 userid)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_SU_BFER_ENTRY_DEL);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- switch (userid) {
- case 0:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_ASSOCIATED_BFMER0_INFO,
- 0);
- HALMAC_REG_WRITE_32(halmac_adapter,
- REG_ASSOCIATED_BFMER0_INFO + 4, 0);
- break;
- case 1:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_ASSOCIATED_BFMER1_INFO,
- 0);
- HALMAC_REG_WRITE_32(halmac_adapter,
- REG_ASSOCIATED_BFMER1_INFO + 4, 0);
- break;
- default:
- pr_err("%s invalid userid %d\n", __func__,
- userid);
- return HALMAC_RET_INVALID_SOUNDING_SETTING;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_mu_bfee_entry_del_88xx() - reset MU beamformee's registers
- * @halmac_adapter : the adapter of halmac
- * @userid : the MU STA userid to be deleted
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_mu_bfee_entry_del_88xx(struct halmac_adapter *halmac_adapter, u8 userid)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_MU_BFEE_ENTRY_DEL);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_16(halmac_adapter, 0x1680 + userid * 2, 0);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_MU_TX_CTL,
- HALMAC_REG_READ_8(halmac_adapter, REG_MU_TX_CTL) &
- ~(BIT(userid - 2)));
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_mu_bfer_entry_del_88xx() -reset MU beamformer's registers
- * @halmac_adapter : the adapter of halmac
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_mu_bfer_entry_del_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_MU_BFER_ENTRY_DEL);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_ASSOCIATED_BFMER0_INFO, 0);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_ASSOCIATED_BFMER0_INFO + 4, 0);
- HALMAC_REG_WRITE_16(halmac_adapter, 0x1680, 0);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_MU_TX_CTL, 0);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_add_ch_info_88xx() -add channel information
- * @halmac_adapter : the adapter of halmac
- * @ch_info : channel information
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_add_ch_info_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ch_info *ch_info)
-{
- void *driver_adapter = NULL;
- struct halmac_cs_info *ch_sw_info;
- enum halmac_scan_cmd_construct_state state_scan;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- ch_sw_info = &halmac_adapter->ch_sw_info;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]%s ==========>\n", __func__);
-
- if (halmac_adapter->halmac_state.dlfw_state != HALMAC_GEN_INFO_SENT) {
- pr_err("[ERR]%s: gen_info is not send to FW!!!!\n", __func__);
- return HALMAC_RET_GEN_INFO_NOT_SENT;
- }
-
- state_scan = halmac_query_scan_curr_state_88xx(halmac_adapter);
- if (state_scan != HALMAC_SCAN_CMD_CONSTRUCT_BUFFER_CLEARED &&
- state_scan != HALMAC_SCAN_CMD_CONSTRUCT_CONSTRUCTING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_WARNING,
- "[WARN]Scan machine fail(add ch info)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- if (!ch_sw_info->ch_info_buf) {
- ch_sw_info->ch_info_buf =
- kzalloc(HALMAC_EXTRA_INFO_BUFF_SIZE_88XX, GFP_KERNEL);
- if (!ch_sw_info->ch_info_buf)
- return HALMAC_RET_NULL_POINTER;
- ch_sw_info->ch_info_buf_w = ch_sw_info->ch_info_buf;
- ch_sw_info->buf_size = HALMAC_EXTRA_INFO_BUFF_SIZE_88XX;
- ch_sw_info->avai_buf_size = HALMAC_EXTRA_INFO_BUFF_SIZE_88XX;
- ch_sw_info->total_size = 0;
- ch_sw_info->extra_info_en = 0;
- ch_sw_info->ch_num = 0;
- }
-
- if (ch_sw_info->extra_info_en == 1) {
- pr_err("[ERR]%s: construct sequence wrong!!\n", __func__);
- return HALMAC_RET_CH_SW_SEQ_WRONG;
- }
-
- if (ch_sw_info->avai_buf_size < 4) {
- pr_err("[ERR]%s: no available buffer!!\n", __func__);
- return HALMAC_RET_CH_SW_NO_BUF;
- }
-
- if (halmac_transition_scan_state_88xx(
- halmac_adapter, HALMAC_SCAN_CMD_CONSTRUCT_CONSTRUCTING) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- CHANNEL_INFO_SET_CHANNEL(ch_sw_info->ch_info_buf_w, ch_info->channel);
- CHANNEL_INFO_SET_PRI_CH_IDX(ch_sw_info->ch_info_buf_w,
- ch_info->pri_ch_idx);
- CHANNEL_INFO_SET_BANDWIDTH(ch_sw_info->ch_info_buf_w, ch_info->bw);
- CHANNEL_INFO_SET_TIMEOUT(ch_sw_info->ch_info_buf_w, ch_info->timeout);
- CHANNEL_INFO_SET_ACTION_ID(ch_sw_info->ch_info_buf_w,
- ch_info->action_id);
- CHANNEL_INFO_SET_CH_EXTRA_INFO(ch_sw_info->ch_info_buf_w,
- ch_info->extra_info);
-
- ch_sw_info->avai_buf_size = ch_sw_info->avai_buf_size - 4;
- ch_sw_info->total_size = ch_sw_info->total_size + 4;
- ch_sw_info->ch_num++;
- ch_sw_info->extra_info_en = ch_info->extra_info;
- ch_sw_info->ch_info_buf_w = ch_sw_info->ch_info_buf_w + 4;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_add_extra_ch_info_88xx() -add extra channel information
- * @halmac_adapter : the adapter of halmac
- * @ch_extra_info : extra channel information
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_add_extra_ch_info_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ch_extra_info *ch_extra_info)
-{
- void *driver_adapter = NULL;
- struct halmac_cs_info *ch_sw_info;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_ADD_EXTRA_CH_INFO);
-
- driver_adapter = halmac_adapter->driver_adapter;
- ch_sw_info = &halmac_adapter->ch_sw_info;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (!ch_sw_info->ch_info_buf) {
- pr_err("%s: NULL==ch_sw_info->ch_info_buf!!\n", __func__);
- return HALMAC_RET_CH_SW_SEQ_WRONG;
- }
-
- if (ch_sw_info->extra_info_en == 0) {
- pr_err("%s: construct sequence wrong!!\n", __func__);
- return HALMAC_RET_CH_SW_SEQ_WRONG;
- }
-
- if (ch_sw_info->avai_buf_size <
- (u32)(ch_extra_info->extra_info_size + 2)) {
- /* +2: ch_extra_info_id, ch_extra_info, ch_extra_info_size
- * are totally 2Byte
- */
- pr_err("%s: no available buffer!!\n", __func__);
- return HALMAC_RET_CH_SW_NO_BUF;
- }
-
- if (halmac_query_scan_curr_state_88xx(halmac_adapter) !=
- HALMAC_SCAN_CMD_CONSTRUCT_CONSTRUCTING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Scan machine fail(add extra ch info)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- if (halmac_transition_scan_state_88xx(
- halmac_adapter, HALMAC_SCAN_CMD_CONSTRUCT_CONSTRUCTING) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- CH_EXTRA_INFO_SET_CH_EXTRA_INFO_ID(ch_sw_info->ch_info_buf_w,
- ch_extra_info->extra_action_id);
- CH_EXTRA_INFO_SET_CH_EXTRA_INFO(ch_sw_info->ch_info_buf_w,
- ch_extra_info->extra_info);
- CH_EXTRA_INFO_SET_CH_EXTRA_INFO_SIZE(ch_sw_info->ch_info_buf_w,
- ch_extra_info->extra_info_size);
- memcpy(ch_sw_info->ch_info_buf_w + 2, ch_extra_info->extra_info_data,
- ch_extra_info->extra_info_size);
-
- ch_sw_info->avai_buf_size = ch_sw_info->avai_buf_size -
- (2 + ch_extra_info->extra_info_size);
- ch_sw_info->total_size =
- ch_sw_info->total_size + (2 + ch_extra_info->extra_info_size);
- ch_sw_info->extra_info_en = ch_extra_info->extra_info;
- ch_sw_info->ch_info_buf_w = ch_sw_info->ch_info_buf_w +
- (2 + ch_extra_info->extra_info_size);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_ctrl_ch_switch_88xx() -send channel switch cmd
- * @halmac_adapter : the adapter of halmac
- * @cs_option : channel switch config
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_ctrl_ch_switch_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ch_switch_option *cs_option)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- enum halmac_scan_cmd_construct_state state_scan;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.scan_state_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- if (halmac_adapter->fw_version.h2c_version < 4)
- return HALMAC_RET_FW_NO_SUPPORT;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CTRL_CH_SWITCH);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s cs_option->switch_en = %d==========>\n", __func__,
- cs_option->switch_en);
-
- if (!cs_option->switch_en)
- *process_status = HALMAC_CMD_PROCESS_IDLE;
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING ||
- *process_status == HALMAC_CMD_PROCESS_RCVD) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait event(ctrl ch switch)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- state_scan = halmac_query_scan_curr_state_88xx(halmac_adapter);
- if (cs_option->switch_en) {
- if (state_scan != HALMAC_SCAN_CMD_CONSTRUCT_CONSTRUCTING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C,
- DBG_DMESG,
- "%s(on) invalid in state %x\n",
- __func__, state_scan);
- return HALMAC_RET_ERROR_STATE;
- }
- } else {
- if (state_scan != HALMAC_SCAN_CMD_CONSTRUCT_BUFFER_CLEARED) {
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s(off) invalid in state %x\n", __func__,
- state_scan);
- return HALMAC_RET_ERROR_STATE;
- }
- }
-
- status = halmac_func_ctrl_ch_switch_88xx(halmac_adapter, cs_option);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_ctrl_ch_switch FAIL = %x!!\n", status);
- return status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_clear_ch_info_88xx() -clear channel information
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_clear_ch_info_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CLEAR_CH_INFO);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (halmac_query_scan_curr_state_88xx(halmac_adapter) ==
- HALMAC_SCAN_CMD_CONSTRUCT_H2C_SENT) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Scan machine fail(clear ch info)...\n");
- return HALMAC_RET_ERROR_STATE;
- }
-
- if (halmac_transition_scan_state_88xx(
- halmac_adapter, HALMAC_SCAN_CMD_CONSTRUCT_BUFFER_CLEARED) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- kfree(halmac_adapter->ch_sw_info.ch_info_buf);
- halmac_adapter->ch_sw_info.ch_info_buf = NULL;
- halmac_adapter->ch_sw_info.ch_info_buf_w = NULL;
- halmac_adapter->ch_sw_info.extra_info_en = 0;
- halmac_adapter->ch_sw_info.buf_size = 0;
- halmac_adapter->ch_sw_info.avai_buf_size = 0;
- halmac_adapter->ch_sw_info.total_size = 0;
- halmac_adapter->ch_sw_info.ch_num = 0;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_p2pps_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_p2pps *p2p_ps)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- if (halmac_adapter->fw_version.h2c_version < 6)
- return HALMAC_RET_FW_NO_SUPPORT;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- status = halmac_func_p2pps_88xx(halmac_adapter, p2p_ps);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("[ERR]halmac_p2pps FAIL = %x!!\n", status);
- return status;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_func_p2pps_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_p2pps *p2p_ps)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = halmac_adapter->driver_adapter;
- struct halmac_api *halmac_api;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]halmac_p2pps !!\n");
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- P2PPS_SET_OFFLOAD_EN(h2c_buff, p2p_ps->offload_en);
- P2PPS_SET_ROLE(h2c_buff, p2p_ps->role);
- P2PPS_SET_CTWINDOW_EN(h2c_buff, p2p_ps->ctwindow_en);
- P2PPS_SET_NOA_EN(h2c_buff, p2p_ps->noa_en);
- P2PPS_SET_NOA_SEL(h2c_buff, p2p_ps->noa_sel);
- P2PPS_SET_ALLSTASLEEP(h2c_buff, p2p_ps->all_sta_sleep);
- P2PPS_SET_DISCOVERY(h2c_buff, p2p_ps->discovery);
- P2PPS_SET_P2P_PORT_ID(h2c_buff, p2p_ps->p2p_port_id);
- P2PPS_SET_P2P_GROUP(h2c_buff, p2p_ps->p2p_group);
- P2PPS_SET_P2P_MACID(h2c_buff, p2p_ps->p2p_macid);
-
- P2PPS_SET_CTWINDOW_LENGTH(h2c_buff, p2p_ps->ctwindow_length);
-
- P2PPS_SET_NOA_DURATION_PARA(h2c_buff, p2p_ps->noa_duration_para);
- P2PPS_SET_NOA_INTERVAL_PARA(h2c_buff, p2p_ps->noa_interval_para);
- P2PPS_SET_NOA_START_TIME_PARA(h2c_buff, p2p_ps->noa_start_time_para);
- P2PPS_SET_NOA_COUNT_PARA(h2c_buff, p2p_ps->noa_count_para);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_P2PPS;
- h2c_header_info.content_size = 24;
- h2c_header_info.ack = false;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, false);
-
- if (status != HALMAC_RET_SUCCESS)
- pr_err("[ERR]halmac_send_h2c_p2pps_88xx Fail = %x!!\n", status);
-
- return status;
-}
-
-/**
- * halmac_send_general_info_88xx() -send general information to FW
- * @halmac_adapter : the adapter of halmac
- * @general_info : general information
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_send_general_info_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_general_info *general_info)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- if (halmac_adapter->fw_version.h2c_version < 4)
- return HALMAC_RET_FW_NO_SUPPORT;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_SEND_GENERAL_INFO);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (halmac_adapter->halmac_state.dlfw_state == HALMAC_DLFW_NONE) {
- pr_err("%s Fail due to DLFW NONE!!\n", __func__);
- return HALMAC_RET_DLFW_FAIL;
- }
-
- status = halmac_func_send_general_info_88xx(halmac_adapter,
- general_info);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_general_info error = %x\n", status);
- return status;
- }
-
- if (halmac_adapter->halmac_state.dlfw_state == HALMAC_DLFW_DONE)
- halmac_adapter->halmac_state.dlfw_state = HALMAC_GEN_INFO_SENT;
-
- halmac_adapter->gen_info_valid = true;
- memcpy(&halmac_adapter->general_info, general_info,
- sizeof(struct halmac_general_info));
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_start_iqk_88xx() -trigger FW IQK
- * @halmac_adapter : the adapter of halmac
- * @iqk_para : IQK parameter
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_start_iqk_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_iqk_para_ *iqk_para)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_num = 0;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.iqk_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_START_IQK);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait event(iqk)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- *process_status = HALMAC_CMD_PROCESS_SENDING;
-
- IQK_SET_CLEAR(h2c_buff, iqk_para->clear);
- IQK_SET_SEGMENT_IQK(h2c_buff, iqk_para->segment_iqk);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_IQK;
- h2c_header_info.content_size = 1;
- h2c_header_info.ack = true;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_num);
-
- halmac_adapter->halmac_state.iqk_set.seq_num = h2c_seq_num;
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_pkt_88xx Fail = %x!!\n", status);
- return status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_ctrl_pwr_tracking_88xx() -trigger FW power tracking
- * @halmac_adapter : the adapter of halmac
- * @pwr_tracking_opt : power tracking option
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_ctrl_pwr_tracking_88xx(
- struct halmac_adapter *halmac_adapter,
- struct halmac_pwr_tracking_option *pwr_tracking_opt)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.power_tracking_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CTRL_PWR_TRACKING);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "halmac_start_iqk_88xx ==========>\n");
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait event(pwr tracking)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- *process_status = HALMAC_CMD_PROCESS_SENDING;
-
- POWER_TRACKING_SET_TYPE(h2c_buff, pwr_tracking_opt->type);
- POWER_TRACKING_SET_BBSWING_INDEX(h2c_buff,
- pwr_tracking_opt->bbswing_index);
- POWER_TRACKING_SET_ENABLE_A(
- h2c_buff,
- pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_A].enable);
- POWER_TRACKING_SET_TX_PWR_INDEX_A(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_A]
- .tx_pwr_index);
- POWER_TRACKING_SET_PWR_TRACKING_OFFSET_VALUE_A(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_A]
- .pwr_tracking_offset_value);
- POWER_TRACKING_SET_TSSI_VALUE_A(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_A]
- .tssi_value);
- POWER_TRACKING_SET_ENABLE_B(
- h2c_buff,
- pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_B].enable);
- POWER_TRACKING_SET_TX_PWR_INDEX_B(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_B]
- .tx_pwr_index);
- POWER_TRACKING_SET_PWR_TRACKING_OFFSET_VALUE_B(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_B]
- .pwr_tracking_offset_value);
- POWER_TRACKING_SET_TSSI_VALUE_B(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_B]
- .tssi_value);
- POWER_TRACKING_SET_ENABLE_C(
- h2c_buff,
- pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_C].enable);
- POWER_TRACKING_SET_TX_PWR_INDEX_C(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_C]
- .tx_pwr_index);
- POWER_TRACKING_SET_PWR_TRACKING_OFFSET_VALUE_C(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_C]
- .pwr_tracking_offset_value);
- POWER_TRACKING_SET_TSSI_VALUE_C(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_C]
- .tssi_value);
- POWER_TRACKING_SET_ENABLE_D(
- h2c_buff,
- pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_D].enable);
- POWER_TRACKING_SET_TX_PWR_INDEX_D(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_D]
- .tx_pwr_index);
- POWER_TRACKING_SET_PWR_TRACKING_OFFSET_VALUE_D(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_D]
- .pwr_tracking_offset_value);
- POWER_TRACKING_SET_TSSI_VALUE_D(
- h2c_buff, pwr_tracking_opt->pwr_tracking_para[HALMAC_RF_PATH_D]
- .tssi_value);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_POWER_TRACKING;
- h2c_header_info.content_size = 20;
- h2c_header_info.ack = true;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
-
- halmac_adapter->halmac_state.power_tracking_set.seq_num = h2c_seq_mum;
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_pkt_88xx Fail = %x!!\n", status);
- return status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "halmac_start_iqk_88xx <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_query_status_88xx() -query the offload feature status
- * @halmac_adapter : the adapter of halmac
- * @feature_id : feature_id
- * @process_status : feature_status
- * @data : data buffer
- * @size : data size
- *
- * Note :
- * If user wants to know the data size, use can allocate zero
- * size buffer first. If this size less than the data size, halmac
- * will return HALMAC_RET_BUFFER_TOO_SMALL. User need to
- * re-allocate data buffer with correct data size.
- *
- * Author : Ivan Lin/KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_query_status_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_feature_id feature_id,
- enum halmac_cmd_process_status *process_status,
- u8 *data, u32 *size)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_QUERY_STATE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- if (!process_status) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "null pointer!!\n");
- return HALMAC_RET_NULL_POINTER;
- }
-
- switch (feature_id) {
- case HALMAC_FEATURE_CFG_PARA:
- status = halmac_query_cfg_para_status_88xx(
- halmac_adapter, process_status, data, size);
- break;
- case HALMAC_FEATURE_DUMP_PHYSICAL_EFUSE:
- status = halmac_query_dump_physical_efuse_status_88xx(
- halmac_adapter, process_status, data, size);
- break;
- case HALMAC_FEATURE_DUMP_LOGICAL_EFUSE:
- status = halmac_query_dump_logical_efuse_status_88xx(
- halmac_adapter, process_status, data, size);
- break;
- case HALMAC_FEATURE_CHANNEL_SWITCH:
- status = halmac_query_channel_switch_status_88xx(
- halmac_adapter, process_status, data, size);
- break;
- case HALMAC_FEATURE_UPDATE_PACKET:
- status = halmac_query_update_packet_status_88xx(
- halmac_adapter, process_status, data, size);
- break;
- case HALMAC_FEATURE_IQK:
- status = halmac_query_iqk_status_88xx(
- halmac_adapter, process_status, data, size);
- break;
- case HALMAC_FEATURE_POWER_TRACKING:
- status = halmac_query_power_tracking_status_88xx(
- halmac_adapter, process_status, data, size);
- break;
- case HALMAC_FEATURE_PSD:
- status = halmac_query_psd_status_88xx(
- halmac_adapter, process_status, data, size);
- break;
- default:
- pr_err("%s invalid feature id %d\n", __func__,
- feature_id);
- return HALMAC_RET_INVALID_FEATURE_ID;
- }
-
- return status;
-}
-
-/**
- * halmac_reset_feature_88xx() -reset async api cmd status
- * @halmac_adapter : the adapter of halmac
- * @feature_id : feature_id
- * Author : Ivan Lin/KaiYuan Chang
- * Return : enum halmac_ret_status.
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reset_feature_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_feature_id feature_id)
-{
- void *driver_adapter = NULL;
- struct halmac_state *state = &halmac_adapter->halmac_state;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_RESET_FEATURE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- switch (feature_id) {
- case HALMAC_FEATURE_CFG_PARA:
- state->cfg_para_state_set.process_status =
- HALMAC_CMD_PROCESS_IDLE;
- state->cfg_para_state_set.cfg_para_cmd_construct_state =
- HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE;
- break;
- case HALMAC_FEATURE_DUMP_PHYSICAL_EFUSE:
- case HALMAC_FEATURE_DUMP_LOGICAL_EFUSE:
- state->efuse_state_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->efuse_state_set.efuse_cmd_construct_state =
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE;
- break;
- case HALMAC_FEATURE_CHANNEL_SWITCH:
- state->scan_state_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->scan_state_set.scan_cmd_construct_state =
- HALMAC_SCAN_CMD_CONSTRUCT_IDLE;
- break;
- case HALMAC_FEATURE_UPDATE_PACKET:
- state->update_packet_set.process_status =
- HALMAC_CMD_PROCESS_IDLE;
- break;
- case HALMAC_FEATURE_ALL:
- state->cfg_para_state_set.process_status =
- HALMAC_CMD_PROCESS_IDLE;
- state->cfg_para_state_set.cfg_para_cmd_construct_state =
- HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE;
- state->efuse_state_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->efuse_state_set.efuse_cmd_construct_state =
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE;
- state->scan_state_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->scan_state_set.scan_cmd_construct_state =
- HALMAC_SCAN_CMD_CONSTRUCT_IDLE;
- state->update_packet_set.process_status =
- HALMAC_CMD_PROCESS_IDLE;
- break;
- default:
- pr_err("%s invalid feature id %d\n", __func__,
- feature_id);
- return HALMAC_RET_INVALID_FEATURE_ID;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_check_fw_status_88xx() -check fw status
- * @halmac_adapter : the adapter of halmac
- * @fw_status : fw status
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_check_fw_status_88xx(struct halmac_adapter *halmac_adapter,
- bool *fw_status)
-{
- u32 value32 = 0, value32_backup = 0, i = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CHECK_FW_STATUS);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- value32 = PLATFORM_REG_READ_32(driver_adapter, REG_FW_DBG6);
-
- if (value32 != 0) {
- pr_err("halmac_check_fw_status REG_FW_DBG6 !=0\n");
- *fw_status = false;
- return status;
- }
-
- value32_backup = PLATFORM_REG_READ_32(driver_adapter, REG_FW_DBG7);
-
- for (i = 0; i <= 10; i++) {
- value32 = PLATFORM_REG_READ_32(driver_adapter, REG_FW_DBG7);
- if (value32_backup != value32)
- break;
-
- if (i == 10) {
- pr_err("halmac_check_fw_status Polling FW PC fail\n");
- *fw_status = false;
- return status;
- }
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_dump_fw_dmem_88xx(struct halmac_adapter *halmac_adapter, u8 *dmem,
- u32 *size)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DUMP_FW_DMEM);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return status;
-}
-
-/**
- * halmac_cfg_max_dl_size_88xx() - config max download FW size
- * @halmac_adapter : the adapter of halmac
- * @size : max download fw size
- *
- * Halmac uses this setting to set max packet size for
- * download FW.
- * If user has not called this API, halmac use default
- * setting for download FW
- * Note1 : size need multiple of 2
- * Note2 : max size is 31K
- *
- * Author : Ivan Lin/KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_max_dl_size_88xx(struct halmac_adapter *halmac_adapter, u32 size)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_MAX_DL_SIZE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_FW, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (size > HALMAC_FW_CFG_MAX_DL_SIZE_MAX_88XX) {
- pr_err("size > HALMAC_FW_CFG_MAX_DL_SIZE_MAX!\n");
- return HALMAC_RET_CFG_DLFW_SIZE_FAIL;
- }
-
- if ((size & (2 - 1)) != 0) {
- pr_err("size is not power of 2!\n");
- return HALMAC_RET_CFG_DLFW_SIZE_FAIL;
- }
-
- halmac_adapter->max_download_size = size;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_FW, DBG_DMESG,
- "Cfg max size is : %X\n", size);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_FW, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_psd_88xx() - trigger fw psd
- * @halmac_adapter : the adapter of halmac
- * @start_psd : start PSD
- * @end_psd : end PSD
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_psd_88xx(struct halmac_adapter *halmac_adapter,
- u16 start_psd, u16 end_psd)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.psd_set.process_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- if (halmac_fw_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_NO_DLFW;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_PSD);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (*process_status == HALMAC_CMD_PROCESS_SENDING) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Wait event(psd)...\n");
- return HALMAC_RET_BUSY_STATE;
- }
-
- kfree(halmac_adapter->halmac_state.psd_set.data);
- halmac_adapter->halmac_state.psd_set.data = (u8 *)NULL;
-
- halmac_adapter->halmac_state.psd_set.data_size = 0;
- halmac_adapter->halmac_state.psd_set.segment_size = 0;
-
- *process_status = HALMAC_CMD_PROCESS_SENDING;
-
- PSD_SET_START_PSD(h2c_buff, start_psd);
- PSD_SET_END_PSD(h2c_buff, end_psd);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_PSD;
- h2c_header_info.content_size = 4;
- h2c_header_info.ack = true;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_pkt_88xx Fail = %x!!\n", status);
- return status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_la_mode_88xx() - config la mode
- * @halmac_adapter : the adapter of halmac
- * @la_mode :
- * disable : no TXFF space reserved for LA debug
- * partial : partial TXFF space is reserved for LA debug
- * full : all TXFF space is reserved for LA debug
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_la_mode_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_la_mode la_mode)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_LA_MODE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>la_mode = %d\n", __func__,
- la_mode);
-
- halmac_adapter->txff_allocation.la_mode = la_mode;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_rx_fifo_expanding_mode_88xx() - rx fifo expanding
- * @halmac_adapter : the adapter of halmac
- * @la_mode :
- * disable : normal mode
- * 1 block : Rx FIFO + 1 FIFO block; Tx fifo - 1 FIFO block
- * 2 block : Rx FIFO + 2 FIFO block; Tx fifo - 2 FIFO block
- * 3 block : Rx FIFO + 3 FIFO block; Tx fifo - 3 FIFO block
- * Author : Soar
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_cfg_rx_fifo_expanding_mode_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_rx_fifo_expanding_mode rx_fifo_expanding_mode)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_CFG_RX_FIFO_EXPANDING_MODE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>rx_fifo_expanding_mode = %d\n", __func__,
- rx_fifo_expanding_mode);
-
- halmac_adapter->txff_allocation.rx_fifo_expanding_mode =
- rx_fifo_expanding_mode;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_config_security_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_security_setting *sec_setting)
-{
- struct halmac_api *halmac_api;
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_CR,
- (u16)(HALMAC_REG_READ_16(halmac_adapter, REG_CR) |
- BIT_MAC_SEC_EN));
-
- if (sec_setting->tx_encryption == 1)
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_SECCFG,
- HALMAC_REG_READ_8(halmac_adapter, REG_SECCFG) | BIT(2));
- else
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_SECCFG,
- HALMAC_REG_READ_8(halmac_adapter, REG_SECCFG) &
- ~(BIT(2)));
-
- if (sec_setting->rx_decryption == 1)
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_SECCFG,
- HALMAC_REG_READ_8(halmac_adapter, REG_SECCFG) | BIT(3));
- else
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_SECCFG,
- HALMAC_REG_READ_8(halmac_adapter, REG_SECCFG) &
- ~(BIT(3)));
-
- if (sec_setting->bip_enable == 1) {
- if (halmac_adapter->chip_id == HALMAC_CHIP_ID_8822B)
- return HALMAC_RET_BIP_NO_SUPPORT;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-u8 halmac_get_used_cam_entry_num_88xx(struct halmac_adapter *halmac_adapter,
- enum hal_security_type sec_type)
-{
- u8 entry_num;
- void *driver_adapter = NULL;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- switch (sec_type) {
- case HAL_SECURITY_TYPE_WEP40:
- case HAL_SECURITY_TYPE_WEP104:
- case HAL_SECURITY_TYPE_TKIP:
- case HAL_SECURITY_TYPE_AES128:
- case HAL_SECURITY_TYPE_GCMP128:
- case HAL_SECURITY_TYPE_GCMSMS4:
- case HAL_SECURITY_TYPE_BIP:
- entry_num = 1;
- break;
- case HAL_SECURITY_TYPE_WAPI:
- case HAL_SECURITY_TYPE_AES256:
- case HAL_SECURITY_TYPE_GCMP256:
- entry_num = 2;
- break;
- default:
- entry_num = 0;
- break;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return entry_num;
-}
-
-enum halmac_ret_status
-halmac_write_cam_88xx(struct halmac_adapter *halmac_adapter, u32 entry_index,
- struct halmac_cam_entry_info *cam_entry_info)
-{
- u32 i;
- u32 command = 0x80010000;
- struct halmac_api *halmac_api;
- void *driver_adapter = NULL;
- struct halmac_cam_entry_format *cam_entry_format = NULL;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "[TRACE]%s ==========>\n", __func__);
-
- if (entry_index >= halmac_adapter->hw_config_info.cam_entry_num)
- return HALMAC_RET_ENTRY_INDEX_ERROR;
-
- if (cam_entry_info->key_id > 3)
- return HALMAC_RET_FAIL;
-
- cam_entry_format = kzalloc(sizeof(*cam_entry_format), GFP_KERNEL);
- if (!cam_entry_format)
- return HALMAC_RET_NULL_POINTER;
-
- cam_entry_format->key_id = cam_entry_info->key_id;
- cam_entry_format->valid = cam_entry_info->valid;
- memcpy(cam_entry_format->mac_address, cam_entry_info->mac_address, 6);
- memcpy(cam_entry_format->key, cam_entry_info->key, 16);
-
- switch (cam_entry_info->security_type) {
- case HAL_SECURITY_TYPE_NONE:
- cam_entry_format->type = 0;
- break;
- case HAL_SECURITY_TYPE_WEP40:
- cam_entry_format->type = 1;
- break;
- case HAL_SECURITY_TYPE_WEP104:
- cam_entry_format->type = 5;
- break;
- case HAL_SECURITY_TYPE_TKIP:
- cam_entry_format->type = 2;
- break;
- case HAL_SECURITY_TYPE_AES128:
- cam_entry_format->type = 4;
- break;
- case HAL_SECURITY_TYPE_WAPI:
- cam_entry_format->type = 6;
- break;
- case HAL_SECURITY_TYPE_AES256:
- cam_entry_format->type = 4;
- cam_entry_format->ext_sectype = 1;
- break;
- case HAL_SECURITY_TYPE_GCMP128:
- cam_entry_format->type = 7;
- break;
- case HAL_SECURITY_TYPE_GCMP256:
- case HAL_SECURITY_TYPE_GCMSMS4:
- cam_entry_format->type = 7;
- cam_entry_format->ext_sectype = 1;
- break;
- case HAL_SECURITY_TYPE_BIP:
- cam_entry_format->type = cam_entry_info->unicast == 1 ? 4 : 0;
- cam_entry_format->mgnt = 1;
- cam_entry_format->grp = cam_entry_info->unicast == 1 ? 0 : 1;
- break;
- default:
- kfree(cam_entry_format);
- return HALMAC_RET_FAIL;
- }
-
- for (i = 0; i < 8; i++) {
- HALMAC_REG_WRITE_32(halmac_adapter, REG_CAMWRITE,
- *((u32 *)cam_entry_format + i));
- HALMAC_REG_WRITE_32(halmac_adapter, REG_CAMCMD,
- command | ((entry_index << 3) + i));
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "[TRACE]1 - CAM entry format : %X\n",
- *((u32 *)cam_entry_format + i));
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "[TRACE]1 - REG_CAMCMD : %X\n",
- command | ((entry_index << 3) + i));
- }
-
- if (cam_entry_info->security_type == HAL_SECURITY_TYPE_WAPI ||
- cam_entry_info->security_type == HAL_SECURITY_TYPE_AES256 ||
- cam_entry_info->security_type == HAL_SECURITY_TYPE_GCMP256 ||
- cam_entry_info->security_type == HAL_SECURITY_TYPE_GCMSMS4) {
- cam_entry_format->mic = 1;
- memcpy(cam_entry_format->key, cam_entry_info->key_ext, 16);
-
- for (i = 0; i < 8; i++) {
- HALMAC_REG_WRITE_32(halmac_adapter, REG_CAMWRITE,
- *((u32 *)cam_entry_format + i));
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_CAMCMD,
- command | (((entry_index + 1) << 3) + i));
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON,
- DBG_DMESG,
- "[TRACE]2 - CAM entry format : %X\n",
- *((u32 *)cam_entry_format + i));
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "[TRACE]2 - REG_CAMCMD : %X\n",
- command | (((entry_index + 1) << 3) + i));
- }
- }
-
- kfree(cam_entry_format);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "[TRACE]%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_read_cam_entry_88xx(struct halmac_adapter *halmac_adapter,
- u32 entry_index,
- struct halmac_cam_entry_format *content)
-{
- u32 i;
- u32 command = 0x80000000;
- struct halmac_api *halmac_api;
- void *driver_adapter = NULL;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (entry_index >= halmac_adapter->hw_config_info.cam_entry_num)
- return HALMAC_RET_ENTRY_INDEX_ERROR;
-
- for (i = 0; i < 8; i++) {
- HALMAC_REG_WRITE_32(halmac_adapter, REG_CAMCMD,
- command | ((entry_index << 3) + i));
- *((u32 *)content + i) =
- HALMAC_REG_READ_32(halmac_adapter, REG_CAMREAD);
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_clear_cam_entry_88xx(struct halmac_adapter *halmac_adapter,
- u32 entry_index)
-{
- u32 i;
- u32 command = 0x80010000;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- struct halmac_cam_entry_format *cam_entry_format;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]halmac_clear_security_cam_88xx ==========>\n");
-
- if (entry_index >= halmac_adapter->hw_config_info.cam_entry_num)
- return HALMAC_RET_ENTRY_INDEX_ERROR;
-
- cam_entry_format = kzalloc(sizeof(*cam_entry_format), GFP_KERNEL);
- if (!cam_entry_format)
- return HALMAC_RET_NULL_POINTER;
-
- for (i = 0; i < 8; i++) {
- HALMAC_REG_WRITE_32(halmac_adapter, REG_CAMWRITE,
- *((u32 *)cam_entry_format + i));
- HALMAC_REG_WRITE_32(halmac_adapter, REG_CAMCMD,
- command | ((entry_index << 3) + i));
- }
-
- kfree(cam_entry_format);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]halmac_clear_security_cam_88xx <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_get_hw_value_88xx() -get hw config value
- * @halmac_adapter : the adapter of halmac
- * @hw_id : hw id for driver to query
- * @pvalue : hw value, reference table to get data type
- * Author : KaiYuan Chang / Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_get_hw_value_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_hw_id hw_id, void *pvalue)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_GET_HW_VALUE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (!pvalue) {
- pr_err("%s (!pvalue)==========>\n", __func__);
- return HALMAC_RET_NULL_POINTER;
- }
-
- switch (hw_id) {
- case HALMAC_HW_RQPN_MAPPING:
- ((struct halmac_rqpn_map *)pvalue)->dma_map_vo =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VO];
- ((struct halmac_rqpn_map *)pvalue)->dma_map_vi =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VI];
- ((struct halmac_rqpn_map *)pvalue)->dma_map_be =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BE];
- ((struct halmac_rqpn_map *)pvalue)->dma_map_bk =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BK];
- ((struct halmac_rqpn_map *)pvalue)->dma_map_mg =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_MG];
- ((struct halmac_rqpn_map *)pvalue)->dma_map_hi =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_HI];
- break;
- case HALMAC_HW_EFUSE_SIZE:
- *(u32 *)pvalue = halmac_adapter->hw_config_info.efuse_size;
- break;
- case HALMAC_HW_EEPROM_SIZE:
- *(u32 *)pvalue = halmac_adapter->hw_config_info.eeprom_size;
- break;
- case HALMAC_HW_BT_BANK_EFUSE_SIZE:
- *(u32 *)pvalue = halmac_adapter->hw_config_info.bt_efuse_size;
- break;
- case HALMAC_HW_BT_BANK1_EFUSE_SIZE:
- case HALMAC_HW_BT_BANK2_EFUSE_SIZE:
- *(u32 *)pvalue = 0;
- break;
- case HALMAC_HW_TXFIFO_SIZE:
- *(u32 *)pvalue = halmac_adapter->hw_config_info.tx_fifo_size;
- break;
- case HALMAC_HW_RSVD_PG_BNDY:
- *(u16 *)pvalue =
- halmac_adapter->txff_allocation.rsvd_drv_pg_bndy;
- break;
- case HALMAC_HW_CAM_ENTRY_NUM:
- *(u8 *)pvalue = halmac_adapter->hw_config_info.cam_entry_num;
- break;
- case HALMAC_HW_WLAN_EFUSE_AVAILABLE_SIZE: /*Remove later*/
- status = halmac_dump_logical_efuse_map_88xx(halmac_adapter,
- HALMAC_EFUSE_R_DRV);
- if (status != HALMAC_RET_SUCCESS)
- return status;
- *(u32 *)pvalue = halmac_adapter->hw_config_info.efuse_size -
- HALMAC_PROTECTED_EFUSE_SIZE_88XX -
- halmac_adapter->efuse_end;
- break;
- case HALMAC_HW_IC_VERSION:
- *(u8 *)pvalue = halmac_adapter->chip_version;
- break;
- case HALMAC_HW_PAGE_SIZE:
- *(u32 *)pvalue = halmac_adapter->hw_config_info.page_size;
- break;
- case HALMAC_HW_TX_AGG_ALIGN_SIZE:
- *(u16 *)pvalue = halmac_adapter->hw_config_info.tx_align_size;
- break;
- case HALMAC_HW_RX_AGG_ALIGN_SIZE:
- *(u8 *)pvalue = 8;
- break;
- case HALMAC_HW_DRV_INFO_SIZE:
- *(u8 *)pvalue = halmac_adapter->drv_info_size;
- break;
- case HALMAC_HW_TXFF_ALLOCATION:
- memcpy(pvalue, &halmac_adapter->txff_allocation,
- sizeof(struct halmac_txff_allocation));
- break;
- case HALMAC_HW_TX_DESC_SIZE:
- *(u32 *)pvalue = halmac_adapter->hw_config_info.txdesc_size;
- break;
- case HALMAC_HW_RX_DESC_SIZE:
- *(u32 *)pvalue = halmac_adapter->hw_config_info.rxdesc_size;
- break;
- default:
- return HALMAC_RET_PARA_NOT_SUPPORT;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_set_hw_value_88xx() -set hw config value
- * @halmac_adapter : the adapter of halmac
- * @hw_id : hw id for driver to config
- * @pvalue : hw value, reference table to get data type
- * Author : KaiYuan Chang / Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_set_hw_value_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_hw_id hw_id, void *pvalue)
-{
- void *driver_adapter = NULL;
- enum halmac_ret_status status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_GET_HW_VALUE);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (!pvalue) {
- pr_err("%s (!pvalue)==========>\n", __func__);
- return HALMAC_RET_NULL_POINTER;
- }
-
- switch (hw_id) {
- case HALMAC_HW_USB_MODE:
- status = halmac_set_usb_mode_88xx(
- halmac_adapter, *(enum halmac_usb_mode *)pvalue);
- if (status != HALMAC_RET_SUCCESS)
- return status;
- break;
- case HALMAC_HW_SEQ_EN:
- break;
- case HALMAC_HW_BANDWIDTH:
- halmac_cfg_bw_88xx(halmac_adapter, *(enum halmac_bw *)pvalue);
- break;
- case HALMAC_HW_CHANNEL:
- halmac_cfg_ch_88xx(halmac_adapter, *(u8 *)pvalue);
- break;
- case HALMAC_HW_PRI_CHANNEL_IDX:
- halmac_cfg_pri_ch_idx_88xx(halmac_adapter,
- *(enum halmac_pri_ch_idx *)pvalue);
- break;
- case HALMAC_HW_EN_BB_RF:
- halmac_enable_bb_rf_88xx(halmac_adapter, *(u8 *)pvalue);
- break;
- case HALMAC_HW_SDIO_TX_PAGE_THRESHOLD:
- halmac_config_sdio_tx_page_threshold_88xx(
- halmac_adapter,
- (struct halmac_tx_page_threshold_info *)pvalue);
- break;
- case HALMAC_HW_AMPDU_CONFIG:
- halmac_config_ampdu_88xx(halmac_adapter,
- (struct halmac_ampdu_config *)pvalue);
- break;
- default:
- return HALMAC_RET_PARA_NOT_SUPPORT;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_drv_rsvd_pg_num_88xx() -config reserved page number for driver
- * @halmac_adapter : the adapter of halmac
- * @pg_num : page number
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_drv_rsvd_pg_num_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_drv_rsvd_pg_num pg_num)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_CFG_DRV_RSVD_PG_NUM);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>pg_num = %d\n", __func__,
- pg_num);
-
- switch (pg_num) {
- case HALMAC_RSVD_PG_NUM16:
- halmac_adapter->txff_allocation.rsvd_drv_pg_num = 16;
- break;
- case HALMAC_RSVD_PG_NUM24:
- halmac_adapter->txff_allocation.rsvd_drv_pg_num = 24;
- break;
- case HALMAC_RSVD_PG_NUM32:
- halmac_adapter->txff_allocation.rsvd_drv_pg_num = 32;
- break;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_get_chip_version_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ver *version)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s ==========>\n", __func__);
- version->major_ver = (u8)HALMAC_MAJOR_VER_88XX;
- version->prototype_ver = (u8)HALMAC_PROTOTYPE_VER_88XX;
- version->minor_ver = (u8)HALMAC_MINOR_VER_88XX;
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_chk_txdesc_88xx() -check if the tx packet format is incorrect
- * @halmac_adapter : the adapter of halmac
- * @halmac_buf : tx Packet buffer, tx desc is included
- * @halmac_size : tx packet size
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_chk_txdesc_88xx(struct halmac_adapter *halmac_adapter, u8 *halmac_buf,
- u32 halmac_size)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (GET_TX_DESC_BMC(halmac_buf))
- if (GET_TX_DESC_AGG_EN(halmac_buf))
- pr_err("TxDesc: Agg should not be set when BMC\n");
-
- if (halmac_size < (GET_TX_DESC_TXPKTSIZE(halmac_buf) +
- GET_TX_DESC_OFFSET(halmac_buf)))
- pr_err("TxDesc: PktSize too small\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_dl_drv_rsvd_page_88xx() - download packet to rsvd page
- * @halmac_adapter : the adapter of halmac
- * @pg_offset : page offset of driver's rsvd page
- * @halmac_buf : data to be downloaded, tx_desc is not included
- * @halmac_size : data size to be downloaded
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_dl_drv_rsvd_page_88xx(struct halmac_adapter *halmac_adapter,
- u8 pg_offset, u8 *halmac_buf, u32 halmac_size)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status ret_status;
- u16 drv_pg_bndy = 0;
- u32 dl_pg_num = 0;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DL_DRV_RSVD_PG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- /*check boundary and size valid*/
- dl_pg_num = halmac_size / halmac_adapter->hw_config_info.page_size +
- ((halmac_size &
- (halmac_adapter->hw_config_info.page_size - 1)) ?
- 1 :
- 0);
- if (pg_offset + dl_pg_num >
- halmac_adapter->txff_allocation.rsvd_drv_pg_num) {
- pr_err("[ERROR] driver download offset or size error ==========>\n");
- return HALMAC_RET_DRV_DL_ERR;
- }
-
- /*update to target download boundary*/
- drv_pg_bndy =
- halmac_adapter->txff_allocation.rsvd_drv_pg_bndy + pg_offset;
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(drv_pg_bndy & BIT_MASK_BCN_HEAD_1_V1));
-
- ret_status = halmac_download_rsvd_page_88xx(halmac_adapter, halmac_buf,
- halmac_size);
-
- /*restore to original bundary*/
- if (ret_status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_download_rsvd_page_88xx Fail = %x!!\n",
- ret_status);
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
- return ret_status;
- }
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s < ==========\n", __func__);
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_csi_rate_88xx() - config CSI frame Tx rate
- * @halmac_adapter : the adapter of halmac
- * @rssi : rssi in decimal value
- * @current_rate : current CSI frame rate
- * @fixrate_en : enable to fix CSI frame in VHT rate, otherwise legacy OFDM rate
- * @new_rate : API returns the final CSI frame rate
- * Author : chunchu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_csi_rate_88xx(struct halmac_adapter *halmac_adapter, u8 rssi,
- u8 current_rate, u8 fixrate_en, u8 *new_rate)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- u32 temp_csi_setting;
- u16 current_rrsr;
- enum halmac_ret_status ret_status;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_CSI_RATE);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_SND, DBG_DMESG,
- "<%s ==========>\n", __func__);
-
- temp_csi_setting = HALMAC_REG_READ_32(halmac_adapter, REG_BBPSF_CTRL) &
- ~(BIT_MASK_WMAC_CSI_RATE << BIT_SHIFT_WMAC_CSI_RATE);
-
- current_rrsr = HALMAC_REG_READ_16(halmac_adapter, REG_RRSR);
-
- if (rssi >= 40) {
- if (current_rate != HALMAC_OFDM54) {
- HALMAC_REG_WRITE_16(halmac_adapter, REG_RRSR,
- current_rrsr | BIT(HALMAC_OFDM54));
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_BBPSF_CTRL,
- temp_csi_setting |
- BIT_WMAC_CSI_RATE(HALMAC_OFDM54));
- }
- *new_rate = HALMAC_OFDM54;
- ret_status = HALMAC_RET_SUCCESS;
- } else {
- if (current_rate != HALMAC_OFDM24) {
- HALMAC_REG_WRITE_16(halmac_adapter, REG_RRSR,
- current_rrsr &
- ~(BIT(HALMAC_OFDM54)));
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_BBPSF_CTRL,
- temp_csi_setting |
- BIT_WMAC_CSI_RATE(HALMAC_OFDM24));
- }
- *new_rate = HALMAC_OFDM24;
- ret_status = HALMAC_RET_SUCCESS;
- }
-
- return ret_status;
-}
-
-/**
- * halmac_sdio_cmd53_4byte_88xx() - cmd53 only for 4byte len register IO
- * @halmac_adapter : the adapter of halmac
- * @enable : 1->CMD53 only use in 4byte reg, 0 : No limitation
- * Author : Ivan Lin/KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_sdio_cmd53_4byte_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_sdio_cmd53_4byte_mode cmd53_4byte_mode)
-{
- halmac_adapter->sdio_cmd53_4byte = cmd53_4byte_mode;
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_txfifo_is_empty_88xx() -check if txfifo is empty
- * @halmac_adapter : the adapter of halmac
- * Author : Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_txfifo_is_empty_88xx(struct halmac_adapter *halmac_adapter, u32 chk_num)
-{
- u32 counter;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- counter = (chk_num <= 10) ? 10 : chk_num;
- do {
- if (HALMAC_REG_READ_8(halmac_adapter, REG_TXPKT_EMPTY) != 0xFF)
- return HALMAC_RET_TXFIFO_NO_EMPTY;
-
- if ((HALMAC_REG_READ_8(halmac_adapter, REG_TXPKT_EMPTY + 1) &
- 0x07) != 0x07)
- return HALMAC_RET_TXFIFO_NO_EMPTY;
- counter--;
-
- } while (counter != 0);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_COMMON, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h
deleted file mode 100644
index 6c6eb85a09a3..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h
+++ /dev/null
@@ -1,385 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_API_88XX_H_
-#define _HALMAC_API_88XX_H_
-
-#include "../halmac_2_platform.h"
-#include "../halmac_type.h"
-
-void halmac_init_state_machine_88xx(struct halmac_adapter *halmac_adapter);
-
-void halmac_init_adapter_para_88xx(struct halmac_adapter *halmac_adapter);
-
-void halmac_init_adapter_dynamic_para_88xx(
- struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_mount_api_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_download_firmware_88xx(struct halmac_adapter *halmac_adapter,
- u8 *hamacl_fw, u32 halmac_fw_size);
-
-enum halmac_ret_status
-halmac_free_download_firmware_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_dlfw_mem dlfw_mem, u8 *hamacl_fw,
- u32 halmac_fw_size);
-
-enum halmac_ret_status
-halmac_get_fw_version_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_fw_version *fw_version);
-
-enum halmac_ret_status
-halmac_cfg_mac_addr_88xx(struct halmac_adapter *halmac_adapter, u8 halmac_port,
- union halmac_wlan_addr *hal_address);
-
-enum halmac_ret_status
-halmac_cfg_bssid_88xx(struct halmac_adapter *halmac_adapter, u8 halmac_port,
- union halmac_wlan_addr *hal_address);
-
-enum halmac_ret_status
-halmac_cfg_multicast_addr_88xx(struct halmac_adapter *halmac_adapter,
- union halmac_wlan_addr *hal_address);
-
-enum halmac_ret_status
-halmac_pre_init_system_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_init_system_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_cfg_rx_aggregation_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_rxagg_cfg halmac_rxagg_cfg);
-
-enum halmac_ret_status
-halmac_init_edca_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_cfg_operation_mode_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_wireless_mode wireless_mode);
-
-enum halmac_ret_status
-halmac_cfg_ch_bw_88xx(struct halmac_adapter *halmac_adapter, u8 channel,
- enum halmac_pri_ch_idx pri_ch_idx, enum halmac_bw bw);
-
-enum halmac_ret_status halmac_cfg_ch_88xx(struct halmac_adapter *halmac_adapter,
- u8 channel);
-
-enum halmac_ret_status
-halmac_cfg_pri_ch_idx_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_pri_ch_idx pri_ch_idx);
-
-enum halmac_ret_status halmac_cfg_bw_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_bw bw);
-
-enum halmac_ret_status
-halmac_init_wmac_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_init_mac_cfg_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode mode);
-
-enum halmac_ret_status
-halmac_dump_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_read_cfg cfg);
-
-enum halmac_ret_status
-halmac_dump_efuse_map_bt_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_bank halmac_efuse_bank,
- u32 bt_efuse_map_size, u8 *bt_efuse_map);
-
-enum halmac_ret_status
-halmac_write_efuse_bt_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_value,
- enum halmac_efuse_bank halmac_efuse_bank);
-
-enum halmac_ret_status
-halmac_pg_efuse_by_map_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- enum halmac_efuse_read_cfg cfg);
-
-enum halmac_ret_status
-halmac_get_efuse_size_88xx(struct halmac_adapter *halmac_adapter,
- u32 *halmac_size);
-
-enum halmac_ret_status
-halmac_get_efuse_available_size_88xx(struct halmac_adapter *halmac_adapter,
- u32 *halmac_size);
-
-enum halmac_ret_status
-halmac_get_c2h_info_88xx(struct halmac_adapter *halmac_adapter, u8 *halmac_buf,
- u32 halmac_size);
-
-enum halmac_ret_status
-halmac_get_logical_efuse_size_88xx(struct halmac_adapter *halmac_adapter,
- u32 *halmac_size);
-
-enum halmac_ret_status
-halmac_dump_logical_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_read_cfg cfg);
-
-enum halmac_ret_status
-halmac_write_logical_efuse_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_value);
-
-enum halmac_ret_status
-halmac_read_logical_efuse_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 *value);
-
-enum halmac_ret_status
-halmac_cfg_fwlps_option_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_fwlps_option *lps_option);
-
-enum halmac_ret_status
-halmac_cfg_fwips_option_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_fwips_option *ips_option);
-
-enum halmac_ret_status
-halmac_enter_wowlan_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_wowlan_option *wowlan_option);
-
-enum halmac_ret_status
-halmac_leave_wowlan_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_enter_ps_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_ps_state ps_state);
-
-enum halmac_ret_status
-halmac_leave_ps_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_h2c_lb_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status halmac_debug_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_cfg_parameter_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_phy_parameter_info *para_info,
- u8 full_fifo);
-
-enum halmac_ret_status
-halmac_update_packet_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_packet_id pkt_id, u8 *pkt, u32 pkt_size);
-
-enum halmac_ret_status
-halmac_bcn_ie_filter_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_bcn_ie_info *bcn_ie_info);
-
-enum halmac_ret_status
-halmac_send_original_h2c_88xx(struct halmac_adapter *halmac_adapter,
- u8 *original_h2c, u16 *seq, u8 ack);
-
-enum halmac_ret_status
-halmac_update_datapack_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_data_type halmac_data_type,
- struct halmac_phy_parameter_info *para_info);
-
-enum halmac_ret_status
-halmac_run_datapack_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_data_type halmac_data_type);
-
-enum halmac_ret_status
-halmac_cfg_drv_info_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_drv_info halmac_drv_info);
-
-enum halmac_ret_status
-halmac_send_bt_coex_88xx(struct halmac_adapter *halmac_adapter, u8 *bt_buf,
- u32 bt_size, u8 ack);
-
-enum halmac_ret_status
-halmac_verify_platform_api_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_timer_2s_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_fill_txdesc_check_sum_88xx(struct halmac_adapter *halmac_adapter,
- u8 *cur_desc);
-
-enum halmac_ret_status
-halmac_dump_fifo_88xx(struct halmac_adapter *halmac_adapter,
- enum hal_fifo_sel halmac_fifo_sel, u32 halmac_start_addr,
- u32 halmac_fifo_dump_size, u8 *fifo_map);
-
-u32 halmac_get_fifo_size_88xx(struct halmac_adapter *halmac_adapter,
- enum hal_fifo_sel halmac_fifo_sel);
-
-enum halmac_ret_status
-halmac_cfg_txbf_88xx(struct halmac_adapter *halmac_adapter, u8 userid,
- enum halmac_bw bw, u8 txbf_en);
-
-enum halmac_ret_status
-halmac_cfg_mumimo_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_cfg_mumimo_para *cfgmu);
-
-enum halmac_ret_status
-halmac_cfg_sounding_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_snd_role role,
- enum halmac_data_rate datarate);
-
-enum halmac_ret_status
-halmac_del_sounding_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_snd_role role);
-
-enum halmac_ret_status
-halmac_su_bfee_entry_init_88xx(struct halmac_adapter *halmac_adapter, u8 userid,
- u16 paid);
-
-enum halmac_ret_status
-halmac_su_bfer_entry_init_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_su_bfer_init_para *su_bfer_init);
-
-enum halmac_ret_status
-halmac_mu_bfee_entry_init_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_mu_bfee_init_para *mu_bfee_init);
-
-enum halmac_ret_status
-halmac_mu_bfer_entry_init_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_mu_bfer_init_para *mu_bfer_init);
-
-enum halmac_ret_status
-halmac_su_bfee_entry_del_88xx(struct halmac_adapter *halmac_adapter, u8 userid);
-
-enum halmac_ret_status
-halmac_su_bfer_entry_del_88xx(struct halmac_adapter *halmac_adapter, u8 userid);
-
-enum halmac_ret_status
-halmac_mu_bfee_entry_del_88xx(struct halmac_adapter *halmac_adapter, u8 userid);
-
-enum halmac_ret_status
-halmac_mu_bfer_entry_del_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_add_ch_info_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ch_info *ch_info);
-
-enum halmac_ret_status
-halmac_add_extra_ch_info_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ch_extra_info *ch_extra_info);
-
-enum halmac_ret_status
-halmac_ctrl_ch_switch_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ch_switch_option *cs_option);
-
-enum halmac_ret_status halmac_p2pps_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_p2pps *p2p_ps);
-
-enum halmac_ret_status
-halmac_func_p2pps_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_p2pps *p2p_ps);
-
-enum halmac_ret_status
-halmac_clear_ch_info_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_send_general_info_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_general_info *general_info);
-
-enum halmac_ret_status
-halmac_start_iqk_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_iqk_para_ *iqk_para);
-
-enum halmac_ret_status halmac_ctrl_pwr_tracking_88xx(
- struct halmac_adapter *halmac_adapter,
- struct halmac_pwr_tracking_option *pwr_tracking_opt);
-
-enum halmac_ret_status
-halmac_query_status_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_feature_id feature_id,
- enum halmac_cmd_process_status *process_status,
- u8 *data, u32 *size);
-
-enum halmac_ret_status
-halmac_reset_feature_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_feature_id feature_id);
-
-enum halmac_ret_status
-halmac_check_fw_status_88xx(struct halmac_adapter *halmac_adapter,
- bool *fw_status);
-
-enum halmac_ret_status
-halmac_dump_fw_dmem_88xx(struct halmac_adapter *halmac_adapter, u8 *dmem,
- u32 *size);
-
-enum halmac_ret_status
-halmac_cfg_max_dl_size_88xx(struct halmac_adapter *halmac_adapter, u32 size);
-
-enum halmac_ret_status halmac_psd_88xx(struct halmac_adapter *halmac_adapter,
- u16 start_psd, u16 end_psd);
-
-enum halmac_ret_status
-halmac_cfg_la_mode_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_la_mode la_mode);
-
-enum halmac_ret_status halmac_cfg_rx_fifo_expanding_mode_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_rx_fifo_expanding_mode rx_fifo_expanding_mode);
-
-enum halmac_ret_status
-halmac_config_security_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_security_setting *sec_setting);
-
-u8 halmac_get_used_cam_entry_num_88xx(struct halmac_adapter *halmac_adapter,
- enum hal_security_type sec_type);
-
-enum halmac_ret_status
-halmac_write_cam_88xx(struct halmac_adapter *halmac_adapter, u32 entry_index,
- struct halmac_cam_entry_info *cam_entry_info);
-
-enum halmac_ret_status
-halmac_read_cam_entry_88xx(struct halmac_adapter *halmac_adapter,
- u32 entry_index,
- struct halmac_cam_entry_format *content);
-
-enum halmac_ret_status
-halmac_clear_cam_entry_88xx(struct halmac_adapter *halmac_adapter,
- u32 entry_index);
-
-enum halmac_ret_status
-halmac_get_hw_value_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_hw_id hw_id, void *pvalue);
-
-enum halmac_ret_status
-halmac_set_hw_value_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_hw_id hw_id, void *pvalue);
-
-enum halmac_ret_status
-halmac_cfg_drv_rsvd_pg_num_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_drv_rsvd_pg_num pg_num);
-
-enum halmac_ret_status
-halmac_get_chip_version_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ver *version);
-
-enum halmac_ret_status
-halmac_chk_txdesc_88xx(struct halmac_adapter *halmac_adapter, u8 *halmac_buf,
- u32 halmac_size);
-
-enum halmac_ret_status
-halmac_dl_drv_rsvd_page_88xx(struct halmac_adapter *halmac_adapter,
- u8 pg_offset, u8 *halmac_buf, u32 halmac_size);
-
-enum halmac_ret_status
-halmac_cfg_csi_rate_88xx(struct halmac_adapter *halmac_adapter, u8 rssi,
- u8 current_rate, u8 fixrate_en, u8 *new_rate);
-
-enum halmac_ret_status halmac_sdio_cmd53_4byte_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_sdio_cmd53_4byte_mode cmd53_4byte_mode);
-
-enum halmac_ret_status
-halmac_txfifo_is_empty_88xx(struct halmac_adapter *halmac_adapter, u32 chk_num);
-
-#endif /* _HALMAC_API_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c
deleted file mode 100644
index 8462f23b652e..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c
+++ /dev/null
@@ -1,318 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halmac_88xx_cfg.h"
-
-/**
- * halmac_init_pcie_cfg_88xx() - init PCIe
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_pcie_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_INIT_PCIE_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_deinit_pcie_cfg_88xx() - deinit PCIE
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_deinit_pcie_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DEINIT_PCIE_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_rx_aggregation_88xx_pcie() - config rx aggregation
- * @halmac_adapter : the adapter of halmac
- * @halmac_rx_agg_mode
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_rx_aggregation_88xx_pcie(struct halmac_adapter *halmac_adapter,
- struct halmac_rxagg_cfg *phalmac_rxagg_cfg)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_CFG_RX_AGGREGATION);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_8_pcie_88xx() - read 1byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u8 halmac_reg_read_8_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- return PLATFORM_REG_READ_8(driver_adapter, halmac_offset);
-}
-
-/**
- * halmac_reg_write_8_pcie_88xx() - write 1byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_data : register value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reg_write_8_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- PLATFORM_REG_WRITE_8(driver_adapter, halmac_offset, halmac_data);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_16_pcie_88xx() - read 2byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u16 halmac_reg_read_16_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- return PLATFORM_REG_READ_16(driver_adapter, halmac_offset);
-}
-
-/**
- * halmac_reg_write_16_pcie_88xx() - write 2byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_data : register value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reg_write_16_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u16 halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- PLATFORM_REG_WRITE_16(driver_adapter, halmac_offset, halmac_data);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_32_pcie_88xx() - read 4byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u32 halmac_reg_read_32_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- return PLATFORM_REG_READ_32(driver_adapter, halmac_offset);
-}
-
-/**
- * halmac_reg_write_32_pcie_88xx() - write 4byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_data : register value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reg_write_32_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u32 halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- PLATFORM_REG_WRITE_32(driver_adapter, halmac_offset, halmac_data);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_tx_agg_align_pcie_88xx() -config sdio bus tx agg alignment
- * @halmac_adapter : the adapter of halmac
- * @enable : function enable(1)/disable(0)
- * @align_size : sdio bus tx agg alignment size (2^n, n = 3~11)
- * Author : Soar Tu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_cfg_tx_agg_align_pcie_not_support_88xx(
- struct halmac_adapter *halmac_adapter, u8 enable, u16 align_size)
-{
- struct halmac_api *halmac_api;
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_TX_AGG_ALIGN);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s not support\n", __func__);
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h
deleted file mode 100644
index dc4d98bcc68c..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_API_88XX_PCIE_H_
-#define _HALMAC_API_88XX_PCIE_H_
-
-#include "../halmac_2_platform.h"
-#include "../halmac_type.h"
-
-#define LINK_CTRL2_REG_OFFSET 0xA0
-#define GEN2_CTRL_OFFSET 0x80C
-#define LINK_STATUS_REG_OFFSET 0x82
-#define GEN1_SPEED 0x01
-#define GEN2_SPEED 0x02
-
-enum halmac_ret_status
-halmac_init_pcie_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_deinit_pcie_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_cfg_rx_aggregation_88xx_pcie(struct halmac_adapter *halmac_adapter,
- struct halmac_rxagg_cfg *phalmac_rxagg_cfg);
-
-u8 halmac_reg_read_8_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-enum halmac_ret_status
-halmac_reg_write_8_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_data);
-
-u16 halmac_reg_read_16_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-enum halmac_ret_status
-halmac_reg_write_16_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u16 halmac_data);
-
-u32 halmac_reg_read_32_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-enum halmac_ret_status
-halmac_reg_write_32_pcie_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u32 halmac_data);
-
-enum halmac_ret_status halmac_cfg_tx_agg_align_pcie_not_support_88xx(
- struct halmac_adapter *halmac_adapter, u8 enable, u16 align_size);
-
-#endif /* _HALMAC_API_88XX_PCIE_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c
deleted file mode 100644
index 979821ea54a1..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c
+++ /dev/null
@@ -1,963 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halmac_88xx_cfg.h"
-
-/**
- * halmac_init_sdio_cfg_88xx() - init SDIO
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_sdio_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_INIT_SDIO_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_REG_READ_32(halmac_adapter, REG_SDIO_FREE_TXPG);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_SDIO_TX_CTRL, 0x00000000);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_deinit_sdio_cfg_88xx() - deinit SDIO
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_deinit_sdio_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DEINIT_SDIO_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_rx_aggregation_88xx_sdio() - config rx aggregation
- * @halmac_adapter : the adapter of halmac
- * @halmac_rx_agg_mode
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_rx_aggregation_88xx_sdio(struct halmac_adapter *halmac_adapter,
- struct halmac_rxagg_cfg *phalmac_rxagg_cfg)
-{
- u8 value8;
- u8 size = 0, timeout = 0, agg_enable = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_CFG_RX_AGGREGATION);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- agg_enable = HALMAC_REG_READ_8(halmac_adapter, REG_TXDMA_PQ_MAP);
-
- switch (phalmac_rxagg_cfg->mode) {
- case HALMAC_RX_AGG_MODE_NONE:
- agg_enable &= ~(BIT_RXDMA_AGG_EN);
- break;
- case HALMAC_RX_AGG_MODE_DMA:
- case HALMAC_RX_AGG_MODE_USB:
- agg_enable |= BIT_RXDMA_AGG_EN;
- break;
- default:
- pr_err("halmac_cfg_rx_aggregation_88xx_usb switch case not support\n");
- agg_enable &= ~BIT_RXDMA_AGG_EN;
- break;
- }
-
- if (!phalmac_rxagg_cfg->threshold.drv_define) {
- size = 0xFF;
- timeout = 0x01;
- } else {
- size = phalmac_rxagg_cfg->threshold.size;
- timeout = phalmac_rxagg_cfg->threshold.timeout;
- }
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TXDMA_PQ_MAP, agg_enable);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_RXDMA_AGG_PG_TH,
- (u16)(size | (timeout << BIT_SHIFT_DMA_AGG_TO)));
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_RXDMA_MODE);
- if ((agg_enable & BIT_RXDMA_AGG_EN) != 0)
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RXDMA_MODE,
- value8 | BIT_DMA_MODE);
- else
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RXDMA_MODE,
- value8 & ~(BIT_DMA_MODE));
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_8_sdio_88xx() - read 1byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u8 halmac_reg_read_8_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- u8 value8;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if ((halmac_offset & 0xFFFF0000) == 0)
- halmac_offset |= WLAN_IOREG_OFFSET;
-
- status = halmac_convert_to_sdio_bus_offset_88xx(halmac_adapter,
- &halmac_offset);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s error = %x\n", __func__, status);
- return status;
- }
-
- value8 = PLATFORM_SDIO_CMD52_READ(driver_adapter, halmac_offset);
-
- return value8;
-}
-
-/**
- * halmac_reg_write_8_sdio_88xx() - write 1byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_data : register value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reg_write_8_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if ((halmac_offset & 0xFFFF0000) == 0)
- halmac_offset |= WLAN_IOREG_OFFSET;
-
- status = halmac_convert_to_sdio_bus_offset_88xx(halmac_adapter,
- &halmac_offset);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s error = %x\n", __func__, status);
- return status;
- }
-
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset, halmac_data);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_16_sdio_88xx() - read 2byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u16 halmac_reg_read_16_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- union {
- u16 word;
- u8 byte[2];
- __le16 le_word;
- } value16 = {0x0000};
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if ((halmac_offset & 0xFFFF0000) == 0)
- halmac_offset |= WLAN_IOREG_OFFSET;
-
- status = halmac_convert_to_sdio_bus_offset_88xx(halmac_adapter,
- &halmac_offset);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s error = %x\n", __func__, status);
- return status;
- }
-
- if (halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_OFF ||
- (halmac_offset & (2 - 1)) != 0 ||
- halmac_adapter->sdio_cmd53_4byte ==
- HALMAC_SDIO_CMD53_4BYTE_MODE_RW ||
- halmac_adapter->sdio_cmd53_4byte ==
- HALMAC_SDIO_CMD53_4BYTE_MODE_R) {
- value16.byte[0] =
- PLATFORM_SDIO_CMD52_READ(driver_adapter, halmac_offset);
- value16.byte[1] = PLATFORM_SDIO_CMD52_READ(driver_adapter,
- halmac_offset + 1);
- value16.word = le16_to_cpu(value16.le_word);
- } else {
-#if (PLATFORM_SD_CLK > HALMAC_SD_CLK_THRESHOLD_88XX)
- if ((halmac_offset & 0xffffef00) == 0x00000000) {
- value16.byte[0] = PLATFORM_SDIO_CMD52_READ(
- driver_adapter, halmac_offset);
- value16.byte[1] = PLATFORM_SDIO_CMD52_READ(
- driver_adapter, halmac_offset + 1);
- value16.word = le16_to_cpu(value16.word);
- } else {
- value16.word = PLATFORM_SDIO_CMD53_READ_16(
- driver_adapter, halmac_offset);
- }
-#else
- value16.word = PLATFORM_SDIO_CMD53_READ_16(driver_adapter,
- halmac_offset);
-#endif
- }
-
- return value16.word;
-}
-
-/**
- * halmac_reg_write_16_sdio_88xx() - write 2byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_data : register value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reg_write_16_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u16 halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if ((halmac_offset & 0xFFFF0000) == 0)
- halmac_offset |= WLAN_IOREG_OFFSET;
-
- status = halmac_convert_to_sdio_bus_offset_88xx(halmac_adapter,
- &halmac_offset);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s error = %x\n", __func__, status);
- return status;
- }
-
- if (halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_OFF ||
- (halmac_offset & (2 - 1)) != 0 ||
- halmac_adapter->sdio_cmd53_4byte ==
- HALMAC_SDIO_CMD53_4BYTE_MODE_RW ||
- halmac_adapter->sdio_cmd53_4byte ==
- HALMAC_SDIO_CMD53_4BYTE_MODE_W) {
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset,
- (u8)(halmac_data & 0xFF));
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset + 1,
- (u8)((halmac_data & 0xFF00) >> 8));
- } else {
- PLATFORM_SDIO_CMD53_WRITE_16(driver_adapter, halmac_offset,
- halmac_data);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_32_sdio_88xx() - read 4byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u32 halmac_reg_read_32_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- u32 halmac_offset_old = 0;
-
- union {
- u32 dword;
- u8 byte[4];
- __le32 le_dword;
- } value32 = {0x00000000};
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- halmac_offset_old = halmac_offset;
-
- if ((halmac_offset & 0xFFFF0000) == 0)
- halmac_offset |= WLAN_IOREG_OFFSET;
-
- status = halmac_convert_to_sdio_bus_offset_88xx(halmac_adapter,
- &halmac_offset);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s error = %x\n", __func__, status);
- return status;
- }
-
- if (halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_OFF ||
- (halmac_offset & (4 - 1)) != 0) {
- value32.byte[0] =
- PLATFORM_SDIO_CMD52_READ(driver_adapter, halmac_offset);
- value32.byte[1] = PLATFORM_SDIO_CMD52_READ(driver_adapter,
- halmac_offset + 1);
- value32.byte[2] = PLATFORM_SDIO_CMD52_READ(driver_adapter,
- halmac_offset + 2);
- value32.byte[3] = PLATFORM_SDIO_CMD52_READ(driver_adapter,
- halmac_offset + 3);
- value32.dword = le32_to_cpu(value32.le_dword);
- } else {
-#if (PLATFORM_SD_CLK > HALMAC_SD_CLK_THRESHOLD_88XX)
- if ((halmac_offset_old & 0xffffef00) == 0x00000000) {
- value32.byte[0] = PLATFORM_SDIO_CMD52_READ(
- driver_adapter, halmac_offset);
- value32.byte[1] = PLATFORM_SDIO_CMD52_READ(
- driver_adapter, halmac_offset + 1);
- value32.byte[2] = PLATFORM_SDIO_CMD52_READ(
- driver_adapter, halmac_offset + 2);
- value32.byte[3] = PLATFORM_SDIO_CMD52_READ(
- driver_adapter, halmac_offset + 3);
- value32.dword = le32_to_cpu(value32.dword);
- } else {
- value32.dword = PLATFORM_SDIO_CMD53_READ_32(
- driver_adapter, halmac_offset);
- }
-#else
- value32.dword = PLATFORM_SDIO_CMD53_READ_32(driver_adapter,
- halmac_offset);
-#endif
- }
-
- return value32.dword;
-}
-
-/**
- * halmac_reg_write_32_sdio_88xx() - write 4byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_data : register value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reg_write_32_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u32 halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if ((halmac_offset & 0xFFFF0000) == 0)
- halmac_offset |= WLAN_IOREG_OFFSET;
-
- status = halmac_convert_to_sdio_bus_offset_88xx(halmac_adapter,
- &halmac_offset);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s error = %x\n", __func__, status);
- return status;
- }
-
- if (halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_OFF ||
- (halmac_offset & (4 - 1)) != 0) {
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset,
- (u8)(halmac_data & 0xFF));
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset + 1,
- (u8)((halmac_data & 0xFF00) >> 8));
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset + 2,
- (u8)((halmac_data & 0xFF0000) >> 16));
- PLATFORM_SDIO_CMD52_WRITE(
- driver_adapter, halmac_offset + 3,
- (u8)((halmac_data & 0xFF000000) >> 24));
- } else {
- PLATFORM_SDIO_CMD53_WRITE_32(driver_adapter, halmac_offset,
- halmac_data);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_nbyte_sdio_88xx() - read n byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_size : register value size
- * @halmac_data : register value
- * Author : Soar
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u8 halmac_reg_read_nbyte_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u32 halmac_size,
- u8 *halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if ((halmac_offset & 0xFFFF0000) == 0) {
- pr_err("halmac_offset error = 0x%x\n", halmac_offset);
- return HALMAC_RET_FAIL;
- }
-
- status = halmac_convert_to_sdio_bus_offset_88xx(halmac_adapter,
- &halmac_offset);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s error = %x\n", __func__, status);
- return status;
- }
-
- if (halmac_adapter->halmac_state.mac_power == HALMAC_MAC_POWER_OFF) {
- pr_err("halmac_state error = 0x%x\n",
- halmac_adapter->halmac_state.mac_power);
- return HALMAC_RET_FAIL;
- }
-
- PLATFORM_SDIO_CMD53_READ_N(driver_adapter, halmac_offset, halmac_size,
- halmac_data);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_get_sdio_tx_addr_sdio_88xx() - get CMD53 addr for the TX packet
- * @halmac_adapter : the adapter of halmac
- * @halmac_buf : tx packet, include txdesc
- * @halmac_size : tx packet size
- * @pcmd53_addr : cmd53 addr value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_get_sdio_tx_addr_88xx(struct halmac_adapter *halmac_adapter,
- u8 *halmac_buf, u32 halmac_size, u32 *pcmd53_addr)
-{
- u32 four_byte_len;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_queue_select queue_sel;
- enum halmac_dma_mapping dma_mapping;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_GET_SDIO_TX_ADDR);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (!halmac_buf) {
- pr_err("halmac_buf is NULL!!\n");
- return HALMAC_RET_DATA_BUF_NULL;
- }
-
- if (halmac_size == 0) {
- pr_err("halmac_size is 0!!\n");
- return HALMAC_RET_DATA_SIZE_INCORRECT;
- }
-
- queue_sel = (enum halmac_queue_select)GET_TX_DESC_QSEL(halmac_buf);
-
- switch (queue_sel) {
- case HALMAC_QUEUE_SELECT_VO:
- case HALMAC_QUEUE_SELECT_VO_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VO];
- break;
- case HALMAC_QUEUE_SELECT_VI:
- case HALMAC_QUEUE_SELECT_VI_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VI];
- break;
- case HALMAC_QUEUE_SELECT_BE:
- case HALMAC_QUEUE_SELECT_BE_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BE];
- break;
- case HALMAC_QUEUE_SELECT_BK:
- case HALMAC_QUEUE_SELECT_BK_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BK];
- break;
- case HALMAC_QUEUE_SELECT_MGNT:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_MG];
- break;
- case HALMAC_QUEUE_SELECT_HIGH:
- case HALMAC_QUEUE_SELECT_BCN:
- case HALMAC_QUEUE_SELECT_CMD:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_HI];
- break;
- default:
- pr_err("Qsel is out of range\n");
- return HALMAC_RET_QSEL_INCORRECT;
- }
-
- four_byte_len = (halmac_size >> 2) + ((halmac_size & (4 - 1)) ? 1 : 0);
-
- switch (dma_mapping) {
- case HALMAC_DMA_MAPPING_HIGH:
- *pcmd53_addr = HALMAC_SDIO_CMD_ADDR_TXFF_HIGH;
- break;
- case HALMAC_DMA_MAPPING_NORMAL:
- *pcmd53_addr = HALMAC_SDIO_CMD_ADDR_TXFF_NORMAL;
- break;
- case HALMAC_DMA_MAPPING_LOW:
- *pcmd53_addr = HALMAC_SDIO_CMD_ADDR_TXFF_LOW;
- break;
- case HALMAC_DMA_MAPPING_EXTRA:
- *pcmd53_addr = HALMAC_SDIO_CMD_ADDR_TXFF_EXTRA;
- break;
- default:
- pr_err("DmaMapping is out of range\n");
- return HALMAC_RET_DMA_MAP_INCORRECT;
- }
-
- *pcmd53_addr = (*pcmd53_addr << 13) |
- (four_byte_len & HALMAC_SDIO_4BYTE_LEN_MASK);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_tx_agg_align_sdio_88xx() -config sdio bus tx agg alignment
- * @halmac_adapter : the adapter of halmac
- * @enable : function enable(1)/disable(0)
- * @align_size : sdio bus tx agg alignment size (2^n, n = 3~11)
- * Author : Soar Tu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_tx_agg_align_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u8 enable, u16 align_size)
-{
- struct halmac_api *halmac_api;
- void *driver_adapter = NULL;
- u8 i, align_size_ok = 0;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_TX_AGG_ALIGN);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if ((align_size & 0xF000) != 0) {
- pr_err("Align size is out of range\n");
- return HALMAC_RET_FAIL;
- }
-
- for (i = 3; i <= 11; i++) {
- if (align_size == 1 << i) {
- align_size_ok = 1;
- break;
- }
- }
- if (align_size_ok == 0) {
- pr_err("Align size is not 2^3 ~ 2^11\n");
- return HALMAC_RET_FAIL;
- }
-
- /*Keep sdio tx agg alignment size for driver query*/
- halmac_adapter->hw_config_info.tx_align_size = align_size;
-
- if (enable)
- HALMAC_REG_WRITE_16(halmac_adapter, REG_RQPN_CTRL_2,
- 0x8000 | align_size);
- else
- HALMAC_REG_WRITE_16(halmac_adapter, REG_RQPN_CTRL_2,
- align_size);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_cfg_tx_agg_align_sdio_not_support_88xx(
- struct halmac_adapter *halmac_adapter, u8 enable, u16 align_size)
-{
- struct halmac_api *halmac_api;
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_TX_AGG_ALIGN);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s not support\n", __func__);
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_tx_allowed_sdio_88xx() - check tx status
- * @halmac_adapter : the adapter of halmac
- * @halmac_buf : tx packet, include txdesc
- * @halmac_size : tx packet size, include txdesc
- * Author : Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_tx_allowed_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u8 *halmac_buf, u32 halmac_size)
-{
- u8 *curr_packet;
- u16 *curr_free_space;
- u32 i, counter;
- u32 tx_agg_num, packet_size = 0;
- u32 tx_required_page_num, total_required_page_num = 0;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- void *driver_adapter = NULL;
- enum halmac_dma_mapping dma_mapping;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_TX_ALLOWED_SDIO);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- tx_agg_num = GET_TX_DESC_DMA_TXAGG_NUM(halmac_buf);
- curr_packet = halmac_buf;
-
- tx_agg_num = tx_agg_num == 0 ? 1 : tx_agg_num;
-
- switch ((enum halmac_queue_select)GET_TX_DESC_QSEL(curr_packet)) {
- case HALMAC_QUEUE_SELECT_VO:
- case HALMAC_QUEUE_SELECT_VO_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VO];
- break;
- case HALMAC_QUEUE_SELECT_VI:
- case HALMAC_QUEUE_SELECT_VI_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VI];
- break;
- case HALMAC_QUEUE_SELECT_BE:
- case HALMAC_QUEUE_SELECT_BE_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BE];
- break;
- case HALMAC_QUEUE_SELECT_BK:
- case HALMAC_QUEUE_SELECT_BK_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BK];
- break;
- case HALMAC_QUEUE_SELECT_MGNT:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_MG];
- break;
- case HALMAC_QUEUE_SELECT_HIGH:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_HI];
- break;
- case HALMAC_QUEUE_SELECT_BCN:
- case HALMAC_QUEUE_SELECT_CMD:
- return HALMAC_RET_SUCCESS;
- default:
- pr_err("Qsel is out of range\n");
- return HALMAC_RET_QSEL_INCORRECT;
- }
-
- switch (dma_mapping) {
- case HALMAC_DMA_MAPPING_HIGH:
- curr_free_space =
- &halmac_adapter->sdio_free_space.high_queue_number;
- break;
- case HALMAC_DMA_MAPPING_NORMAL:
- curr_free_space =
- &halmac_adapter->sdio_free_space.normal_queue_number;
- break;
- case HALMAC_DMA_MAPPING_LOW:
- curr_free_space =
- &halmac_adapter->sdio_free_space.low_queue_number;
- break;
- case HALMAC_DMA_MAPPING_EXTRA:
- curr_free_space =
- &halmac_adapter->sdio_free_space.extra_queue_number;
- break;
- default:
- pr_err("DmaMapping is out of range\n");
- return HALMAC_RET_DMA_MAP_INCORRECT;
- }
-
- for (i = 0; i < tx_agg_num; i++) {
- packet_size = GET_TX_DESC_TXPKTSIZE(curr_packet) +
- GET_TX_DESC_OFFSET(curr_packet) +
- (GET_TX_DESC_PKT_OFFSET(curr_packet) << 3);
- tx_required_page_num =
- (packet_size >>
- halmac_adapter->hw_config_info.page_size_2_power) +
- ((packet_size &
- (halmac_adapter->hw_config_info.page_size - 1)) ?
- 1 :
- 0);
- total_required_page_num += tx_required_page_num;
-
- packet_size = HALMAC_ALIGN(packet_size, 8);
-
- curr_packet += packet_size;
- }
-
- counter = 10;
- do {
- if ((u32)(*curr_free_space +
- halmac_adapter->sdio_free_space.public_queue_number) >
- total_required_page_num) {
- if (*curr_free_space >= total_required_page_num) {
- *curr_free_space -=
- (u16)total_required_page_num;
- } else {
- halmac_adapter->sdio_free_space
- .public_queue_number -=
- (u16)(total_required_page_num -
- *curr_free_space);
- *curr_free_space = 0;
- }
-
- status = halmac_check_oqt_88xx(halmac_adapter,
- tx_agg_num, halmac_buf);
-
- if (status != HALMAC_RET_SUCCESS)
- return status;
-
- break;
- }
-
- halmac_update_sdio_free_page_88xx(halmac_adapter);
-
- counter--;
- if (counter == 0)
- return HALMAC_RET_FREE_SPACE_NOT_ENOUGH;
- } while (1);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_indirect_32_sdio_88xx() - read MAC reg by SDIO reg
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : Soar
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u32 halmac_reg_read_indirect_32_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- u8 rtemp;
- u32 counter = 1000;
- void *driver_adapter = NULL;
-
- union {
- u32 dword;
- u8 byte[4];
- } value32 = {0x00000000};
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- PLATFORM_SDIO_CMD53_WRITE_32(
- driver_adapter,
- (HALMAC_SDIO_CMD_ADDR_SDIO_REG << 13) |
- (REG_SDIO_INDIRECT_REG_CFG & HALMAC_SDIO_LOCAL_MSK),
- halmac_offset | BIT(19) | BIT(17));
-
- do {
- rtemp = PLATFORM_SDIO_CMD52_READ(
- driver_adapter,
- (HALMAC_SDIO_CMD_ADDR_SDIO_REG << 13) |
- ((REG_SDIO_INDIRECT_REG_CFG + 2) &
- HALMAC_SDIO_LOCAL_MSK));
- counter--;
- } while ((rtemp & BIT(4)) != 0 && counter > 0);
-
- value32.dword = PLATFORM_SDIO_CMD53_READ_32(
- driver_adapter,
- (HALMAC_SDIO_CMD_ADDR_SDIO_REG << 13) |
- (REG_SDIO_INDIRECT_REG_DATA & HALMAC_SDIO_LOCAL_MSK));
-
- return value32.dword;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h
deleted file mode 100644
index 2a891b0f6ab8..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_API_88XX_SDIO_H_
-#define _HALMAC_API_88XX_SDIO_H_
-
-#include "../halmac_2_platform.h"
-#include "../halmac_type.h"
-
-enum halmac_ret_status
-halmac_init_sdio_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_deinit_sdio_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_cfg_rx_aggregation_88xx_sdio(struct halmac_adapter *halmac_adapter,
- struct halmac_rxagg_cfg *phalmac_rxagg_cfg);
-
-u8 halmac_reg_read_8_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-enum halmac_ret_status
-halmac_reg_write_8_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_data);
-
-u16 halmac_reg_read_16_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-enum halmac_ret_status
-halmac_reg_write_16_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u16 halmac_data);
-
-u32 halmac_reg_read_32_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-enum halmac_ret_status
-halmac_reg_write_32_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u32 halmac_data);
-
-enum halmac_ret_status
-halmac_get_sdio_tx_addr_88xx(struct halmac_adapter *halmac_adapter,
- u8 *halmac_buf, u32 halmac_size, u32 *pcmd53_addr);
-
-enum halmac_ret_status
-halmac_cfg_tx_agg_align_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u8 enable, u16 align_size);
-
-enum halmac_ret_status halmac_cfg_tx_agg_align_sdio_not_support_88xx(
- struct halmac_adapter *halmac_adapter, u8 enable, u16 align_size);
-
-enum halmac_ret_status
-halmac_tx_allowed_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u8 *halmac_buf, u32 halmac_size);
-
-u32 halmac_reg_read_indirect_32_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-u8 halmac_reg_read_nbyte_sdio_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u32 halmac_size,
- u8 *halmac_data);
-
-#endif /* _HALMAC_API_88XX_SDIO_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c
deleted file mode 100644
index 0bd6abdd0a68..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c
+++ /dev/null
@@ -1,543 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halmac_88xx_cfg.h"
-
-/**
- * halmac_init_usb_cfg_88xx() - init USB
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_usb_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
- u8 value8 = 0;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_INIT_USB_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- value8 |= (BIT_DMA_MODE |
- (0x3 << BIT_SHIFT_BURST_CNT)); /* burst number = 4 */
-
- if (PLATFORM_REG_READ_8(driver_adapter, REG_SYS_CFG2 + 3) ==
- 0x20) { /* usb3.0 */
- value8 |= (HALMAC_USB_BURST_SIZE_3_0 << BIT_SHIFT_BURST_SIZE);
- } else {
- if ((PLATFORM_REG_READ_8(driver_adapter, REG_USB_USBSTAT) &
- 0x3) == 0x1) /* usb2.0 */
- value8 |= HALMAC_USB_BURST_SIZE_2_0_HSPEED
- << BIT_SHIFT_BURST_SIZE;
- else /* usb1.1 */
- value8 |= HALMAC_USB_BURST_SIZE_2_0_FSPEED
- << BIT_SHIFT_BURST_SIZE;
- }
-
- PLATFORM_REG_WRITE_8(driver_adapter, REG_RXDMA_MODE, value8);
- PLATFORM_REG_WRITE_16(
- driver_adapter, REG_TXDMA_OFFSET_CHK,
- PLATFORM_REG_READ_16(driver_adapter, REG_TXDMA_OFFSET_CHK) |
- BIT_DROP_DATA_EN);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_deinit_usb_cfg_88xx() - deinit USB
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_deinit_usb_cfg_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_DEINIT_USB_CFG);
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_rx_aggregation_88xx_usb() - config rx aggregation
- * @halmac_adapter : the adapter of halmac
- * @halmac_rx_agg_mode
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_cfg_rx_aggregation_88xx_usb(struct halmac_adapter *halmac_adapter,
- struct halmac_rxagg_cfg *phalmac_rxagg_cfg)
-{
- u8 dma_usb_agg;
- u8 size = 0, timeout = 0, agg_enable = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_CFG_RX_AGGREGATION);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- dma_usb_agg =
- HALMAC_REG_READ_8(halmac_adapter, REG_RXDMA_AGG_PG_TH + 3);
- agg_enable = HALMAC_REG_READ_8(halmac_adapter, REG_TXDMA_PQ_MAP);
-
- switch (phalmac_rxagg_cfg->mode) {
- case HALMAC_RX_AGG_MODE_NONE:
- agg_enable &= ~BIT_RXDMA_AGG_EN;
- break;
- case HALMAC_RX_AGG_MODE_DMA:
- agg_enable |= BIT_RXDMA_AGG_EN;
- dma_usb_agg |= BIT(7);
- break;
-
- case HALMAC_RX_AGG_MODE_USB:
- agg_enable |= BIT_RXDMA_AGG_EN;
- dma_usb_agg &= ~BIT(7);
- break;
- default:
- pr_err("%s switch case not support\n", __func__);
- agg_enable &= ~BIT_RXDMA_AGG_EN;
- break;
- }
-
- if (!phalmac_rxagg_cfg->threshold.drv_define) {
- if (PLATFORM_REG_READ_8(driver_adapter, REG_SYS_CFG2 + 3) ==
- 0x20) {
- /* usb3.0 */
- size = 0x5;
- timeout = 0xA;
- } else {
- /* usb2.0 */
- size = 0x5;
- timeout = 0x20;
- }
- } else {
- size = phalmac_rxagg_cfg->threshold.size;
- timeout = phalmac_rxagg_cfg->threshold.timeout;
- }
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_TXDMA_PQ_MAP, agg_enable);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RXDMA_AGG_PG_TH + 3,
- dma_usb_agg);
- HALMAC_REG_WRITE_16(halmac_adapter, REG_RXDMA_AGG_PG_TH,
- (u16)(size | (timeout << BIT_SHIFT_DMA_AGG_TO)));
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_8_usb_88xx() - read 1byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u8 halmac_reg_read_8_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- u8 value8;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- value8 = PLATFORM_REG_READ_8(driver_adapter, halmac_offset);
-
- return value8;
-}
-
-/**
- * halmac_reg_write_8_usb_88xx() - write 1byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_data : register value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reg_write_8_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- PLATFORM_REG_WRITE_8(driver_adapter, halmac_offset, halmac_data);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_16_usb_88xx() - read 2byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u16 halmac_reg_read_16_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- union {
- u16 word;
- u8 byte[2];
- } value16 = {0x0000};
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- value16.word = PLATFORM_REG_READ_16(driver_adapter, halmac_offset);
-
- return value16.word;
-}
-
-/**
- * halmac_reg_write_16_usb_88xx() - write 2byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_data : register value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reg_write_16_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u16 halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- PLATFORM_REG_WRITE_16(driver_adapter, halmac_offset, halmac_data);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_reg_read_32_usb_88xx() - read 4byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-u32 halmac_reg_read_32_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- union {
- u32 dword;
- u8 byte[4];
- } value32 = {0x00000000};
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- value32.dword = PLATFORM_REG_READ_32(driver_adapter, halmac_offset);
-
- return value32.dword;
-}
-
-/**
- * halmac_reg_write_32_usb_88xx() - write 4byte register
- * @halmac_adapter : the adapter of halmac
- * @halmac_offset : register offset
- * @halmac_data : register value
- * Author : KaiYuan Chang/Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_reg_write_32_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u32 halmac_data)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- PLATFORM_REG_WRITE_32(driver_adapter, halmac_offset, halmac_data);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_set_bulkout_num_usb_88xx() - inform bulk-out num
- * @halmac_adapter : the adapter of halmac
- * @bulkout_num : usb bulk-out number
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_set_bulkout_num_88xx(struct halmac_adapter *halmac_adapter,
- u8 bulkout_num)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_SET_BULKOUT_NUM);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- halmac_adapter->halmac_bulkout_num = bulkout_num;
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_get_usb_bulkout_id_usb_88xx() - get bulk out id for the TX packet
- * @halmac_adapter : the adapter of halmac
- * @halmac_buf : tx packet, include txdesc
- * @halmac_size : tx packet size
- * @bulkout_id : usb bulk-out id
- * Author : KaiYuan Chang
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_get_usb_bulkout_id_88xx(struct halmac_adapter *halmac_adapter,
- u8 *halmac_buf, u32 halmac_size, u8 *bulkout_id)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_queue_select queue_sel;
- enum halmac_dma_mapping dma_mapping;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter,
- HALMAC_API_GET_USB_BULKOUT_ID);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- if (!halmac_buf) {
- pr_err("halmac_buf is NULL!!\n");
- return HALMAC_RET_DATA_BUF_NULL;
- }
-
- if (halmac_size == 0) {
- pr_err("halmac_size is 0!!\n");
- return HALMAC_RET_DATA_SIZE_INCORRECT;
- }
-
- queue_sel = (enum halmac_queue_select)GET_TX_DESC_QSEL(halmac_buf);
-
- switch (queue_sel) {
- case HALMAC_QUEUE_SELECT_VO:
- case HALMAC_QUEUE_SELECT_VO_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VO];
- break;
- case HALMAC_QUEUE_SELECT_VI:
- case HALMAC_QUEUE_SELECT_VI_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VI];
- break;
- case HALMAC_QUEUE_SELECT_BE:
- case HALMAC_QUEUE_SELECT_BE_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BE];
- break;
- case HALMAC_QUEUE_SELECT_BK:
- case HALMAC_QUEUE_SELECT_BK_V2:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BK];
- break;
- case HALMAC_QUEUE_SELECT_MGNT:
- dma_mapping =
- halmac_adapter->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_MG];
- break;
- case HALMAC_QUEUE_SELECT_HIGH:
- case HALMAC_QUEUE_SELECT_BCN:
- case HALMAC_QUEUE_SELECT_CMD:
- dma_mapping = HALMAC_DMA_MAPPING_HIGH;
- break;
- default:
- pr_err("Qsel is out of range\n");
- return HALMAC_RET_QSEL_INCORRECT;
- }
-
- switch (dma_mapping) {
- case HALMAC_DMA_MAPPING_HIGH:
- *bulkout_id = 0;
- break;
- case HALMAC_DMA_MAPPING_NORMAL:
- *bulkout_id = 1;
- break;
- case HALMAC_DMA_MAPPING_LOW:
- *bulkout_id = 2;
- break;
- case HALMAC_DMA_MAPPING_EXTRA:
- *bulkout_id = 3;
- break;
- default:
- pr_err("DmaMapping is out of range\n");
- return HALMAC_RET_DMA_MAP_INCORRECT;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_cfg_tx_agg_align_usb_88xx() -config sdio bus tx agg alignment
- * @halmac_adapter : the adapter of halmac
- * @enable : function enable(1)/disable(0)
- * @align_size : sdio bus tx agg alignment size (2^n, n = 3~11)
- * Author : Soar Tu
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_cfg_tx_agg_align_usb_not_support_88xx(
- struct halmac_adapter *halmac_adapter, u8 enable, u16 align_size)
-{
- struct halmac_api *halmac_api;
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- if (halmac_api_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_API_INVALID;
-
- halmac_api_record_id_88xx(halmac_adapter, HALMAC_API_CFG_TX_AGG_ALIGN);
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s not support\n", __func__);
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h
deleted file mode 100644
index befa4a5415db..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_API_88XX_USB_H_
-#define _HALMAC_API_88XX_USB_H_
-
-#include "../halmac_2_platform.h"
-#include "../halmac_type.h"
-
-enum halmac_ret_status
-halmac_init_usb_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_deinit_usb_cfg_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_cfg_rx_aggregation_88xx_usb(struct halmac_adapter *halmac_adapter,
- struct halmac_rxagg_cfg *phalmac_rxagg_cfg);
-
-u8 halmac_reg_read_8_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-enum halmac_ret_status
-halmac_reg_write_8_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u8 halmac_data);
-
-u16 halmac_reg_read_16_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-enum halmac_ret_status
-halmac_reg_write_16_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u16 halmac_data);
-
-u32 halmac_reg_read_32_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
-
-enum halmac_ret_status
-halmac_reg_write_32_usb_88xx(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset, u32 halmac_data);
-
-enum halmac_ret_status
-halmac_set_bulkout_num_88xx(struct halmac_adapter *halmac_adapter,
- u8 bulkout_num);
-
-enum halmac_ret_status
-halmac_get_usb_bulkout_id_88xx(struct halmac_adapter *halmac_adapter,
- u8 *halmac_buf, u32 halmac_size, u8 *bulkout_id);
-
-enum halmac_ret_status halmac_cfg_tx_agg_align_usb_not_support_88xx(
- struct halmac_adapter *halmac_adapter, u8 enable, u16 align_size);
-
-#endif /* _HALMAC_API_88XX_USB_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
deleted file mode 100644
index ddbeff8224ab..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
+++ /dev/null
@@ -1,4465 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halmac_88xx_cfg.h"
-
-static enum halmac_ret_status
-halmac_dump_efuse_fw_88xx(struct halmac_adapter *halmac_adapter);
-
-static enum halmac_ret_status
-halmac_dump_efuse_drv_88xx(struct halmac_adapter *halmac_adapter);
-
-static enum halmac_ret_status
-halmac_update_eeprom_mask_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- u8 *eeprom_mask_updated);
-
-static enum halmac_ret_status
-halmac_check_efuse_enough_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- u8 *eeprom_mask_updated);
-
-static enum halmac_ret_status
-halmac_program_efuse_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- u8 *eeprom_mask_updated);
-
-static enum halmac_ret_status
-halmac_pwr_sub_seq_parer_88xx(struct halmac_adapter *halmac_adapter, u8 cut,
- u8 fab, u8 intf,
- struct halmac_wl_pwr_cfg_ *pwr_sub_seq_cfg);
-
-static enum halmac_ret_status
-halmac_parse_c2h_debug_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
- u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_scan_status_rpt_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_psd_data_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
- u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_efuse_data_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
- u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
- u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_enqueue_para_buff_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_phy_parameter_info *para_info,
- u8 *curr_buff_wptr, bool *end_cmd);
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_phy_efuse_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_cfg_para_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_gen_cfg_para_h2c_88xx(struct halmac_adapter *halmac_adapter,
- u8 *h2c_buff);
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_update_packet_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_update_datapack_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_run_datapack_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_channel_switch_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_iqk_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size);
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_power_tracking_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size);
-
-void halmac_init_offload_feature_state_machine_88xx(
- struct halmac_adapter *halmac_adapter)
-{
- struct halmac_state *state = &halmac_adapter->halmac_state;
-
- state->efuse_state_set.efuse_cmd_construct_state =
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE;
- state->efuse_state_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->efuse_state_set.seq_num = halmac_adapter->h2c_packet_seq;
-
- state->cfg_para_state_set.cfg_para_cmd_construct_state =
- HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE;
- state->cfg_para_state_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->cfg_para_state_set.seq_num = halmac_adapter->h2c_packet_seq;
-
- state->scan_state_set.scan_cmd_construct_state =
- HALMAC_SCAN_CMD_CONSTRUCT_IDLE;
- state->scan_state_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->scan_state_set.seq_num = halmac_adapter->h2c_packet_seq;
-
- state->update_packet_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->update_packet_set.seq_num = halmac_adapter->h2c_packet_seq;
-
- state->iqk_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->iqk_set.seq_num = halmac_adapter->h2c_packet_seq;
-
- state->power_tracking_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->power_tracking_set.seq_num = halmac_adapter->h2c_packet_seq;
-
- state->psd_set.process_status = HALMAC_CMD_PROCESS_IDLE;
- state->psd_set.seq_num = halmac_adapter->h2c_packet_seq;
- state->psd_set.data_size = 0;
- state->psd_set.segment_size = 0;
- state->psd_set.data = NULL;
-}
-
-enum halmac_ret_status
-halmac_dump_efuse_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_read_cfg cfg)
-{
- u32 chk_h2c_init;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api =
- (struct halmac_api *)halmac_adapter->halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.efuse_state_set.process_status;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- *process_status = HALMAC_CMD_PROCESS_SENDING;
-
- if (halmac_transition_efuse_state_88xx(
- halmac_adapter, HALMAC_EFUSE_CMD_CONSTRUCT_H2C_SENT) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- if (cfg == HALMAC_EFUSE_R_AUTO) {
- chk_h2c_init = HALMAC_REG_READ_32(halmac_adapter,
- REG_H2C_PKT_READADDR);
- if (halmac_adapter->halmac_state.dlfw_state ==
- HALMAC_DLFW_NONE ||
- chk_h2c_init == 0)
- status = halmac_dump_efuse_drv_88xx(halmac_adapter);
- else
- status = halmac_dump_efuse_fw_88xx(halmac_adapter);
- } else if (cfg == HALMAC_EFUSE_R_FW) {
- status = halmac_dump_efuse_fw_88xx(halmac_adapter);
- } else {
- status = halmac_dump_efuse_drv_88xx(halmac_adapter);
- }
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_read_efuse error = %x\n", status);
- return status;
- }
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_func_read_efuse_88xx(struct halmac_adapter *halmac_adapter, u32 offset,
- u32 size, u8 *efuse_map)
-{
- void *driver_adapter = NULL;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- if (!efuse_map) {
- pr_err("Malloc for dump efuse map error\n");
- return HALMAC_RET_NULL_POINTER;
- }
-
- if (halmac_adapter->hal_efuse_map_valid)
- memcpy(efuse_map, halmac_adapter->hal_efuse_map + offset, size);
- else if (halmac_read_hw_efuse_88xx(halmac_adapter, offset, size,
- efuse_map) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_EFUSE_R_FAIL;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_read_hw_efuse_88xx(struct halmac_adapter *halmac_adapter, u32 offset,
- u32 size, u8 *efuse_map)
-{
- u8 value8;
- u32 value32;
- u32 address;
- u32 tmp32, counter;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- /* Read efuse no need 2.5V LDO */
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_LDO_EFUSE_CTRL + 3);
- if (value8 & BIT(7))
- HALMAC_REG_WRITE_8(halmac_adapter, REG_LDO_EFUSE_CTRL + 3,
- (u8)(value8 & ~(BIT(7))));
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_EFUSE_CTRL);
-
- for (address = offset; address < offset + size; address++) {
- value32 = value32 &
- ~((BIT_MASK_EF_DATA) |
- (BIT_MASK_EF_ADDR << BIT_SHIFT_EF_ADDR));
- value32 = value32 |
- ((address & BIT_MASK_EF_ADDR) << BIT_SHIFT_EF_ADDR);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_EFUSE_CTRL,
- value32 & (~BIT_EF_FLAG));
-
- counter = 1000000;
- do {
- udelay(1);
- tmp32 = HALMAC_REG_READ_32(halmac_adapter,
- REG_EFUSE_CTRL);
- counter--;
- if (counter == 0) {
- pr_err("HALMAC_RET_EFUSE_R_FAIL\n");
- return HALMAC_RET_EFUSE_R_FAIL;
- }
- } while ((tmp32 & BIT_EF_FLAG) == 0);
-
- *(efuse_map + address - offset) =
- (u8)(tmp32 & BIT_MASK_EF_DATA);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_dump_efuse_drv_88xx(struct halmac_adapter *halmac_adapter)
-{
- u8 *efuse_map = NULL;
- u32 efuse_size;
- void *driver_adapter = NULL;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- efuse_size = halmac_adapter->hw_config_info.efuse_size;
-
- if (!halmac_adapter->hal_efuse_map) {
- halmac_adapter->hal_efuse_map = kzalloc(efuse_size, GFP_KERNEL);
- if (!halmac_adapter->hal_efuse_map)
- return HALMAC_RET_MALLOC_FAIL;
- }
-
- efuse_map = kzalloc(efuse_size, GFP_KERNEL);
- if (!efuse_map)
- return HALMAC_RET_MALLOC_FAIL;
-
- if (halmac_read_hw_efuse_88xx(halmac_adapter, 0, efuse_size,
- efuse_map) != HALMAC_RET_SUCCESS) {
- kfree(efuse_map);
- return HALMAC_RET_EFUSE_R_FAIL;
- }
-
- spin_lock(&halmac_adapter->efuse_lock);
- memcpy(halmac_adapter->hal_efuse_map, efuse_map, efuse_size);
- halmac_adapter->hal_efuse_map_valid = true;
- spin_unlock(&halmac_adapter->efuse_lock);
-
- kfree(efuse_map);
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_dump_efuse_fw_88xx(struct halmac_adapter *halmac_adapter)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = NULL;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_DUMP_PHYSICAL_EFUSE;
- h2c_header_info.content_size = 0;
- h2c_header_info.ack = true;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
- halmac_adapter->halmac_state.efuse_state_set.seq_num = h2c_seq_mum;
-
- if (!halmac_adapter->hal_efuse_map) {
- halmac_adapter->hal_efuse_map = kzalloc(
- halmac_adapter->hw_config_info.efuse_size, GFP_KERNEL);
- if (!halmac_adapter->hal_efuse_map)
- return HALMAC_RET_MALLOC_FAIL;
- }
-
- if (!halmac_adapter->hal_efuse_map_valid) {
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX,
- true);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_read_efuse_fw Fail = %x!!\n", status);
- return status;
- }
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_func_write_efuse_88xx(struct halmac_adapter *halmac_adapter, u32 offset,
- u8 value)
-{
- const u8 wite_protect_code = 0x69;
- u32 value32, tmp32, counter;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- spin_lock(&halmac_adapter->efuse_lock);
- halmac_adapter->hal_efuse_map_valid = false;
- spin_unlock(&halmac_adapter->efuse_lock);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PMC_DBG_CTRL2 + 3,
- wite_protect_code);
-
- /* Enable 2.5V LDO */
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_LDO_EFUSE_CTRL + 3,
- (u8)(HALMAC_REG_READ_8(halmac_adapter, REG_LDO_EFUSE_CTRL + 3) |
- BIT(7)));
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_EFUSE_CTRL);
- value32 =
- value32 &
- ~((BIT_MASK_EF_DATA) | (BIT_MASK_EF_ADDR << BIT_SHIFT_EF_ADDR));
- value32 = value32 | ((offset & BIT_MASK_EF_ADDR) << BIT_SHIFT_EF_ADDR) |
- (value & BIT_MASK_EF_DATA);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_EFUSE_CTRL,
- value32 | BIT_EF_FLAG);
-
- counter = 1000000;
- do {
- udelay(1);
- tmp32 = HALMAC_REG_READ_32(halmac_adapter, REG_EFUSE_CTRL);
- counter--;
- if (counter == 0) {
- pr_err("halmac_write_efuse Fail !!\n");
- return HALMAC_RET_EFUSE_W_FAIL;
- }
- } while ((tmp32 & BIT_EF_FLAG) == BIT_EF_FLAG);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PMC_DBG_CTRL2 + 3, 0x00);
-
- /* Disable 2.5V LDO */
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_LDO_EFUSE_CTRL + 3,
- (u8)(HALMAC_REG_READ_8(halmac_adapter, REG_LDO_EFUSE_CTRL + 3) &
- ~(BIT(7))));
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_func_switch_efuse_bank_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_bank efuse_bank)
-{
- u8 reg_value;
- struct halmac_api *halmac_api;
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (halmac_transition_efuse_state_88xx(
- halmac_adapter, HALMAC_EFUSE_CMD_CONSTRUCT_BUSY) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- reg_value = HALMAC_REG_READ_8(halmac_adapter, REG_LDO_EFUSE_CTRL + 1);
-
- if (efuse_bank == (reg_value & (BIT(0) | BIT(1))))
- return HALMAC_RET_SUCCESS;
-
- reg_value &= ~(BIT(0) | BIT(1));
- reg_value |= efuse_bank;
- HALMAC_REG_WRITE_8(halmac_adapter, REG_LDO_EFUSE_CTRL + 1, reg_value);
-
- if ((HALMAC_REG_READ_8(halmac_adapter, REG_LDO_EFUSE_CTRL + 1) &
- (BIT(0) | BIT(1))) != efuse_bank)
- return HALMAC_RET_SWITCH_EFUSE_BANK_FAIL;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_eeprom_parser_88xx(struct halmac_adapter *halmac_adapter,
- u8 *physical_efuse_map, u8 *logical_efuse_map)
-{
- u8 j;
- u8 value8;
- u8 block_index;
- u8 valid_word_enable, word_enable;
- u8 efuse_read_header, efuse_read_header2 = 0;
- u32 eeprom_index;
- u32 efuse_index = 0;
- u32 eeprom_size = halmac_adapter->hw_config_info.eeprom_size;
- void *driver_adapter = NULL;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- memset(logical_efuse_map, 0xFF, eeprom_size);
-
- do {
- value8 = *(physical_efuse_map + efuse_index);
- efuse_read_header = value8;
-
- if ((efuse_read_header & 0x1f) == 0x0f) {
- efuse_index++;
- value8 = *(physical_efuse_map + efuse_index);
- efuse_read_header2 = value8;
- block_index = ((efuse_read_header2 & 0xF0) >> 1) |
- ((efuse_read_header >> 5) & 0x07);
- word_enable = efuse_read_header2 & 0x0F;
- } else {
- block_index = (efuse_read_header & 0xF0) >> 4;
- word_enable = efuse_read_header & 0x0F;
- }
-
- if (efuse_read_header == 0xff)
- break;
-
- efuse_index++;
-
- if (efuse_index >= halmac_adapter->hw_config_info.efuse_size -
- HALMAC_PROTECTED_EFUSE_SIZE_88XX - 1)
- return HALMAC_RET_EEPROM_PARSING_FAIL;
-
- for (j = 0; j < 4; j++) {
- valid_word_enable =
- (u8)((~(word_enable >> j)) & BIT(0));
- if (valid_word_enable != 1)
- continue;
-
- eeprom_index = (block_index << 3) + (j << 1);
-
- if ((eeprom_index + 1) > eeprom_size) {
- pr_err("Error: EEPROM addr exceeds eeprom_size:0x%X, at eFuse 0x%X\n",
- eeprom_size, efuse_index - 1);
- if ((efuse_read_header & 0x1f) == 0x0f)
- pr_err("Error: EEPROM header: 0x%X, 0x%X,\n",
- efuse_read_header,
- efuse_read_header2);
- else
- pr_err("Error: EEPROM header: 0x%X,\n",
- efuse_read_header);
-
- return HALMAC_RET_EEPROM_PARSING_FAIL;
- }
-
- value8 = *(physical_efuse_map + efuse_index);
- *(logical_efuse_map + eeprom_index) = value8;
-
- eeprom_index++;
- efuse_index++;
-
- if (efuse_index >
- halmac_adapter->hw_config_info.efuse_size -
- HALMAC_PROTECTED_EFUSE_SIZE_88XX - 1)
- return HALMAC_RET_EEPROM_PARSING_FAIL;
-
- value8 = *(physical_efuse_map + efuse_index);
- *(logical_efuse_map + eeprom_index) = value8;
-
- efuse_index++;
-
- if (efuse_index >
- halmac_adapter->hw_config_info.efuse_size -
- HALMAC_PROTECTED_EFUSE_SIZE_88XX)
- return HALMAC_RET_EEPROM_PARSING_FAIL;
- }
- } while (1);
-
- halmac_adapter->efuse_end = efuse_index;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_read_logical_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
- u8 *map)
-{
- u8 *efuse_map = NULL;
- u32 efuse_size;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
- efuse_size = halmac_adapter->hw_config_info.efuse_size;
-
- if (!halmac_adapter->hal_efuse_map_valid) {
- efuse_map = kzalloc(efuse_size, GFP_KERNEL);
- if (!efuse_map)
- return HALMAC_RET_MALLOC_FAIL;
-
- status = halmac_func_read_efuse_88xx(halmac_adapter, 0,
- efuse_size, efuse_map);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("[ERR]halmac_read_efuse error = %x\n", status);
- kfree(efuse_map);
- return status;
- }
-
- if (!halmac_adapter->hal_efuse_map) {
- halmac_adapter->hal_efuse_map =
- kzalloc(efuse_size, GFP_KERNEL);
- if (!halmac_adapter->hal_efuse_map) {
- kfree(efuse_map);
- return HALMAC_RET_MALLOC_FAIL;
- }
- }
-
- spin_lock(&halmac_adapter->efuse_lock);
- memcpy(halmac_adapter->hal_efuse_map, efuse_map, efuse_size);
- halmac_adapter->hal_efuse_map_valid = true;
- spin_unlock(&halmac_adapter->efuse_lock);
-
- kfree(efuse_map);
- }
-
- if (halmac_eeprom_parser_88xx(halmac_adapter,
- halmac_adapter->hal_efuse_map,
- map) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_EEPROM_PARSING_FAIL;
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_func_write_logical_efuse_88xx(struct halmac_adapter *halmac_adapter,
- u32 offset, u8 value)
-{
- u8 pg_efuse_byte1, pg_efuse_byte2;
- u8 pg_block, pg_block_index;
- u8 pg_efuse_header, pg_efuse_header2;
- u8 *eeprom_map = NULL;
- u32 eeprom_size = halmac_adapter->hw_config_info.eeprom_size;
- u32 efuse_end, pg_efuse_num;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map)
- return HALMAC_RET_MALLOC_FAIL;
- memset(eeprom_map, 0xFF, eeprom_size);
-
- status = halmac_read_logical_efuse_map_88xx(halmac_adapter, eeprom_map);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("[ERR]halmac_read_logical_efuse_map_88xx error = %x\n",
- status);
- kfree(eeprom_map);
- return status;
- }
-
- if (*(eeprom_map + offset) != value) {
- efuse_end = halmac_adapter->efuse_end;
- pg_block = (u8)(offset >> 3);
- pg_block_index = (u8)((offset & (8 - 1)) >> 1);
-
- if (offset > 0x7f) {
- pg_efuse_header =
- (((pg_block & 0x07) << 5) & 0xE0) | 0x0F;
- pg_efuse_header2 =
- (u8)(((pg_block & 0x78) << 1) +
- ((0x1 << pg_block_index) ^ 0x0F));
- } else {
- pg_efuse_header =
- (u8)((pg_block << 4) +
- ((0x01 << pg_block_index) ^ 0x0F));
- }
-
- if ((offset & 1) == 0) {
- pg_efuse_byte1 = value;
- pg_efuse_byte2 = *(eeprom_map + offset + 1);
- } else {
- pg_efuse_byte1 = *(eeprom_map + offset - 1);
- pg_efuse_byte2 = value;
- }
-
- if (offset > 0x7f) {
- pg_efuse_num = 4;
- if (halmac_adapter->hw_config_info.efuse_size <=
- (pg_efuse_num + HALMAC_PROTECTED_EFUSE_SIZE_88XX +
- halmac_adapter->efuse_end)) {
- kfree(eeprom_map);
- return HALMAC_RET_EFUSE_NOT_ENOUGH;
- }
- halmac_func_write_efuse_88xx(halmac_adapter, efuse_end,
- pg_efuse_header);
- halmac_func_write_efuse_88xx(halmac_adapter,
- efuse_end + 1,
- pg_efuse_header2);
- halmac_func_write_efuse_88xx(
- halmac_adapter, efuse_end + 2, pg_efuse_byte1);
- status = halmac_func_write_efuse_88xx(
- halmac_adapter, efuse_end + 3, pg_efuse_byte2);
- } else {
- pg_efuse_num = 3;
- if (halmac_adapter->hw_config_info.efuse_size <=
- (pg_efuse_num + HALMAC_PROTECTED_EFUSE_SIZE_88XX +
- halmac_adapter->efuse_end)) {
- kfree(eeprom_map);
- return HALMAC_RET_EFUSE_NOT_ENOUGH;
- }
- halmac_func_write_efuse_88xx(halmac_adapter, efuse_end,
- pg_efuse_header);
- halmac_func_write_efuse_88xx(
- halmac_adapter, efuse_end + 1, pg_efuse_byte1);
- status = halmac_func_write_efuse_88xx(
- halmac_adapter, efuse_end + 2, pg_efuse_byte2);
- }
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("[ERR]halmac_write_logical_efuse error = %x\n",
- status);
- kfree(eeprom_map);
- return status;
- }
- }
-
- kfree(eeprom_map);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_func_pg_efuse_by_map_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- enum halmac_efuse_read_cfg cfg)
-{
- u8 *eeprom_mask_updated = NULL;
- u32 eeprom_mask_size = halmac_adapter->hw_config_info.eeprom_size >> 4;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- eeprom_mask_updated = kzalloc(eeprom_mask_size, GFP_KERNEL);
- if (!eeprom_mask_updated)
- return HALMAC_RET_MALLOC_FAIL;
-
- status = halmac_update_eeprom_mask_88xx(halmac_adapter, pg_efuse_info,
- eeprom_mask_updated);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("[ERR]halmac_update_eeprom_mask_88xx error = %x\n",
- status);
- kfree(eeprom_mask_updated);
- return status;
- }
-
- status = halmac_check_efuse_enough_88xx(halmac_adapter, pg_efuse_info,
- eeprom_mask_updated);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("[ERR]halmac_check_efuse_enough_88xx error = %x\n",
- status);
- kfree(eeprom_mask_updated);
- return status;
- }
-
- status = halmac_program_efuse_88xx(halmac_adapter, pg_efuse_info,
- eeprom_mask_updated);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("[ERR]halmac_program_efuse_88xx error = %x\n", status);
- kfree(eeprom_mask_updated);
- return status;
- }
-
- kfree(eeprom_mask_updated);
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_update_eeprom_mask_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- u8 *eeprom_mask_updated)
-{
- u8 *eeprom_map = NULL;
- u32 eeprom_size = halmac_adapter->hw_config_info.eeprom_size;
- u8 *eeprom_map_pg, *eeprom_mask;
- u16 i, j;
- u16 map_byte_offset, mask_byte_offset;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- void *driver_adapter = NULL;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map)
- return HALMAC_RET_MALLOC_FAIL;
-
- memset(eeprom_map, 0xFF, eeprom_size);
- memset(eeprom_mask_updated, 0x00, pg_efuse_info->efuse_mask_size);
-
- status = halmac_read_logical_efuse_map_88xx(halmac_adapter, eeprom_map);
-
- if (status != HALMAC_RET_SUCCESS) {
- kfree(eeprom_map);
- return status;
- }
-
- eeprom_map_pg = pg_efuse_info->efuse_map;
- eeprom_mask = pg_efuse_info->efuse_mask;
-
- for (i = 0; i < pg_efuse_info->efuse_mask_size; i++)
- *(eeprom_mask_updated + i) = *(eeprom_mask + i);
-
- for (i = 0; i < pg_efuse_info->efuse_map_size; i = i + 16) {
- for (j = 0; j < 16; j = j + 2) {
- map_byte_offset = i + j;
- mask_byte_offset = i >> 4;
- if (*(eeprom_map_pg + map_byte_offset) ==
- *(eeprom_map + map_byte_offset)) {
- if (*(eeprom_map_pg + map_byte_offset + 1) ==
- *(eeprom_map + map_byte_offset + 1)) {
- switch (j) {
- case 0:
- *(eeprom_mask_updated +
- mask_byte_offset) =
- *(eeprom_mask_updated +
- mask_byte_offset) &
- (BIT(4) ^ 0xFF);
- break;
- case 2:
- *(eeprom_mask_updated +
- mask_byte_offset) =
- *(eeprom_mask_updated +
- mask_byte_offset) &
- (BIT(5) ^ 0xFF);
- break;
- case 4:
- *(eeprom_mask_updated +
- mask_byte_offset) =
- *(eeprom_mask_updated +
- mask_byte_offset) &
- (BIT(6) ^ 0xFF);
- break;
- case 6:
- *(eeprom_mask_updated +
- mask_byte_offset) =
- *(eeprom_mask_updated +
- mask_byte_offset) &
- (BIT(7) ^ 0xFF);
- break;
- case 8:
- *(eeprom_mask_updated +
- mask_byte_offset) =
- *(eeprom_mask_updated +
- mask_byte_offset) &
- (BIT(0) ^ 0xFF);
- break;
- case 10:
- *(eeprom_mask_updated +
- mask_byte_offset) =
- *(eeprom_mask_updated +
- mask_byte_offset) &
- (BIT(1) ^ 0xFF);
- break;
- case 12:
- *(eeprom_mask_updated +
- mask_byte_offset) =
- *(eeprom_mask_updated +
- mask_byte_offset) &
- (BIT(2) ^ 0xFF);
- break;
- case 14:
- *(eeprom_mask_updated +
- mask_byte_offset) =
- *(eeprom_mask_updated +
- mask_byte_offset) &
- (BIT(3) ^ 0xFF);
- break;
- default:
- break;
- }
- }
- }
- }
- }
-
- kfree(eeprom_map);
-
- return status;
-}
-
-static enum halmac_ret_status
-halmac_check_efuse_enough_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- u8 *eeprom_mask_updated)
-{
- u8 pre_word_enb, word_enb;
- u8 pg_efuse_header, pg_efuse_header2;
- u8 pg_block;
- u16 i, j;
- u32 efuse_end;
- u32 tmp_eeprom_offset, pg_efuse_num = 0;
-
- efuse_end = halmac_adapter->efuse_end;
-
- for (i = 0; i < pg_efuse_info->efuse_map_size; i = i + 8) {
- tmp_eeprom_offset = i;
-
- if ((tmp_eeprom_offset & 7) > 0) {
- pre_word_enb =
- (*(eeprom_mask_updated + (i >> 4)) & 0x0F);
- word_enb = pre_word_enb ^ 0x0F;
- } else {
- pre_word_enb = (*(eeprom_mask_updated + (i >> 4)) >> 4);
- word_enb = pre_word_enb ^ 0x0F;
- }
-
- pg_block = (u8)(tmp_eeprom_offset >> 3);
-
- if (pre_word_enb > 0) {
- if (tmp_eeprom_offset > 0x7f) {
- pg_efuse_header =
- (((pg_block & 0x07) << 5) & 0xE0) |
- 0x0F;
- pg_efuse_header2 = (u8)(
- ((pg_block & 0x78) << 1) + word_enb);
- } else {
- pg_efuse_header =
- (u8)((pg_block << 4) + word_enb);
- }
-
- if (tmp_eeprom_offset > 0x7f) {
- pg_efuse_num++;
- pg_efuse_num++;
- efuse_end = efuse_end + 2;
- for (j = 0; j < 4; j++) {
- if (((pre_word_enb >> j) & 0x1) > 0) {
- pg_efuse_num++;
- pg_efuse_num++;
- efuse_end = efuse_end + 2;
- }
- }
- } else {
- pg_efuse_num++;
- efuse_end = efuse_end + 1;
- for (j = 0; j < 4; j++) {
- if (((pre_word_enb >> j) & 0x1) > 0) {
- pg_efuse_num++;
- pg_efuse_num++;
- efuse_end = efuse_end + 2;
- }
- }
- }
- }
- }
-
- if (halmac_adapter->hw_config_info.efuse_size <=
- (pg_efuse_num + HALMAC_PROTECTED_EFUSE_SIZE_88XX +
- halmac_adapter->efuse_end))
- return HALMAC_RET_EFUSE_NOT_ENOUGH;
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_program_efuse_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- u8 *eeprom_mask_updated)
-{
- u8 pre_word_enb, word_enb;
- u8 pg_efuse_header, pg_efuse_header2;
- u8 pg_block;
- u16 i, j;
- u32 efuse_end;
- u32 tmp_eeprom_offset;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- efuse_end = halmac_adapter->efuse_end;
-
- for (i = 0; i < pg_efuse_info->efuse_map_size; i = i + 8) {
- tmp_eeprom_offset = i;
-
- if (((tmp_eeprom_offset >> 3) & 1) > 0) {
- pre_word_enb =
- (*(eeprom_mask_updated + (i >> 4)) & 0x0F);
- word_enb = pre_word_enb ^ 0x0F;
- } else {
- pre_word_enb = (*(eeprom_mask_updated + (i >> 4)) >> 4);
- word_enb = pre_word_enb ^ 0x0F;
- }
-
- pg_block = (u8)(tmp_eeprom_offset >> 3);
-
- if (pre_word_enb <= 0)
- continue;
-
- if (tmp_eeprom_offset > 0x7f) {
- pg_efuse_header =
- (((pg_block & 0x07) << 5) & 0xE0) | 0x0F;
- pg_efuse_header2 =
- (u8)(((pg_block & 0x78) << 1) + word_enb);
- } else {
- pg_efuse_header = (u8)((pg_block << 4) + word_enb);
- }
-
- if (tmp_eeprom_offset > 0x7f) {
- halmac_func_write_efuse_88xx(halmac_adapter, efuse_end,
- pg_efuse_header);
- status = halmac_func_write_efuse_88xx(halmac_adapter,
- efuse_end + 1,
- pg_efuse_header2);
- efuse_end = efuse_end + 2;
- for (j = 0; j < 4; j++) {
- if (((pre_word_enb >> j) & 0x1) > 0) {
- halmac_func_write_efuse_88xx(
- halmac_adapter, efuse_end,
- *(pg_efuse_info->efuse_map +
- tmp_eeprom_offset +
- (j << 1)));
- status = halmac_func_write_efuse_88xx(
- halmac_adapter, efuse_end + 1,
- *(pg_efuse_info->efuse_map +
- tmp_eeprom_offset + (j << 1) +
- 1));
- efuse_end = efuse_end + 2;
- }
- }
- } else {
- status = halmac_func_write_efuse_88xx(
- halmac_adapter, efuse_end, pg_efuse_header);
- efuse_end = efuse_end + 1;
- for (j = 0; j < 4; j++) {
- if (((pre_word_enb >> j) & 0x1) > 0) {
- halmac_func_write_efuse_88xx(
- halmac_adapter, efuse_end,
- *(pg_efuse_info->efuse_map +
- tmp_eeprom_offset +
- (j << 1)));
- status = halmac_func_write_efuse_88xx(
- halmac_adapter, efuse_end + 1,
- *(pg_efuse_info->efuse_map +
- tmp_eeprom_offset + (j << 1) +
- 1));
- efuse_end = efuse_end + 2;
- }
- }
- }
- }
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_dlfw_to_mem_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
- u32 dest, u32 code_size)
-{
- u8 *code_ptr;
- u8 first_part;
- u32 mem_offset;
- u32 pkt_size_tmp, send_pkt_size;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- code_ptr = ram_code;
- mem_offset = 0;
- first_part = 1;
- pkt_size_tmp = code_size;
-
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_DDMA_CH0CTRL,
- HALMAC_REG_READ_32(halmac_adapter, REG_DDMA_CH0CTRL) |
- BIT_DDMACH0_RESET_CHKSUM_STS);
-
- while (pkt_size_tmp != 0) {
- if (pkt_size_tmp >= halmac_adapter->max_download_size)
- send_pkt_size = halmac_adapter->max_download_size;
- else
- send_pkt_size = pkt_size_tmp;
-
- if (halmac_send_fwpkt_88xx(
- halmac_adapter, code_ptr + mem_offset,
- send_pkt_size) != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_fwpkt_88xx fail!!\n");
- return HALMAC_RET_DLFW_FAIL;
- }
-
- if (halmac_iddma_dlfw_88xx(
- halmac_adapter,
- HALMAC_OCPBASE_TXBUF_88XX +
- halmac_adapter->hw_config_info.txdesc_size,
- dest + mem_offset, send_pkt_size,
- first_part) != HALMAC_RET_SUCCESS) {
- pr_err("halmac_iddma_dlfw_88xx fail!!\n");
- return HALMAC_RET_DLFW_FAIL;
- }
-
- first_part = 0;
- mem_offset += send_pkt_size;
- pkt_size_tmp -= send_pkt_size;
- }
-
- if (halmac_check_fw_chksum_88xx(halmac_adapter, dest) !=
- HALMAC_RET_SUCCESS) {
- pr_err("halmac_check_fw_chksum_88xx fail!!\n");
- return HALMAC_RET_DLFW_FAIL;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_send_fwpkt_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
- u32 code_size)
-{
- if (halmac_download_rsvd_page_88xx(halmac_adapter, ram_code,
- code_size) != HALMAC_RET_SUCCESS) {
- pr_err("PLATFORM_SEND_RSVD_PAGE 0 error!!\n");
- return HALMAC_RET_DL_RSVD_PAGE_FAIL;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_iddma_dlfw_88xx(struct halmac_adapter *halmac_adapter, u32 source,
- u32 dest, u32 length, u8 first)
-{
- u32 counter;
- u32 ch0_control = (u32)(BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN);
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- counter = HALMC_DDMA_POLLING_COUNT;
- while (HALMAC_REG_READ_32(halmac_adapter, REG_DDMA_CH0CTRL) &
- BIT_DDMACH0_OWN) {
- counter--;
- if (counter == 0) {
- pr_err("%s error-1!!\n", __func__);
- return HALMAC_RET_DDMA_FAIL;
- }
- }
-
- ch0_control |= (length & BIT_MASK_DDMACH0_DLEN);
- if (first == 0)
- ch0_control |= BIT_DDMACH0_CHKSUM_CONT;
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_DDMA_CH0SA, source);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_DDMA_CH0DA, dest);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_DDMA_CH0CTRL, ch0_control);
-
- counter = HALMC_DDMA_POLLING_COUNT;
- while (HALMAC_REG_READ_32(halmac_adapter, REG_DDMA_CH0CTRL) &
- BIT_DDMACH0_OWN) {
- counter--;
- if (counter == 0) {
- pr_err("%s error-2!!\n", __func__);
- return HALMAC_RET_DDMA_FAIL;
- }
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_check_fw_chksum_88xx(struct halmac_adapter *halmac_adapter,
- u32 memory_address)
-{
- u8 mcu_fw_ctrl;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- mcu_fw_ctrl = HALMAC_REG_READ_8(halmac_adapter, REG_MCUFW_CTRL);
-
- if (HALMAC_REG_READ_32(halmac_adapter, REG_DDMA_CH0CTRL) &
- BIT_DDMACH0_CHKSUM_STS) {
- if (memory_address < HALMAC_OCPBASE_DMEM_88XX) {
- mcu_fw_ctrl |= BIT_IMEM_DW_OK;
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_MCUFW_CTRL,
- (u8)(mcu_fw_ctrl & ~(BIT_IMEM_CHKSUM_OK)));
- } else {
- mcu_fw_ctrl |= BIT_DMEM_DW_OK;
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_MCUFW_CTRL,
- (u8)(mcu_fw_ctrl & ~(BIT_DMEM_CHKSUM_OK)));
- }
-
- pr_err("%s error!!\n", __func__);
-
- status = HALMAC_RET_FW_CHECKSUM_FAIL;
- } else {
- if (memory_address < HALMAC_OCPBASE_DMEM_88XX) {
- mcu_fw_ctrl |= BIT_IMEM_DW_OK;
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_MCUFW_CTRL,
- (u8)(mcu_fw_ctrl | BIT_IMEM_CHKSUM_OK));
- } else {
- mcu_fw_ctrl |= BIT_DMEM_DW_OK;
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_MCUFW_CTRL,
- (u8)(mcu_fw_ctrl | BIT_DMEM_CHKSUM_OK));
- }
-
- status = HALMAC_RET_SUCCESS;
- }
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_dlfw_end_flow_88xx(struct halmac_adapter *halmac_adapter)
-{
- u8 value8;
- u32 counter;
- void *driver_adapter = halmac_adapter->driver_adapter;
- struct halmac_api *halmac_api =
- (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_TXDMA_STATUS, BIT(2));
-
- /* Check IMEM & DMEM checksum is OK or not */
- if ((HALMAC_REG_READ_8(halmac_adapter, REG_MCUFW_CTRL) & 0x50) == 0x50)
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MCUFW_CTRL,
- (u16)(HALMAC_REG_READ_16(halmac_adapter,
- REG_MCUFW_CTRL) |
- BIT_FW_DW_RDY));
- else
- return HALMAC_RET_DLFW_FAIL;
-
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_MCUFW_CTRL,
- (u8)(HALMAC_REG_READ_8(halmac_adapter, REG_MCUFW_CTRL) &
- ~(BIT(0))));
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_RSV_CTRL + 1);
- value8 = (u8)(value8 | BIT(0));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RSV_CTRL + 1, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_SYS_FUNC_EN + 1);
- value8 = (u8)(value8 | BIT(2));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SYS_FUNC_EN + 1,
- value8); /* Release MCU reset */
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Download Finish, Reset CPU\n");
-
- counter = 10000;
- while (HALMAC_REG_READ_16(halmac_adapter, REG_MCUFW_CTRL) != 0xC078) {
- if (counter == 0) {
- pr_err("Check 0x80 = 0xC078 fail\n");
- if ((HALMAC_REG_READ_32(halmac_adapter, REG_FW_DBG7) &
- 0xFFFFFF00) == 0xFAAAAA00)
- pr_err("Key fail\n");
- return HALMAC_RET_DLFW_FAIL;
- }
- counter--;
- usleep_range(50, 60);
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Check 0x80 = 0xC078 counter = %d\n", counter);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_free_dl_fw_end_flow_88xx(struct halmac_adapter *halmac_adapter)
-{
- u32 counter;
- struct halmac_api *halmac_api =
- (struct halmac_api *)halmac_adapter->halmac_api;
-
- counter = 100;
- while (HALMAC_REG_READ_8(halmac_adapter, REG_HMETFR + 3) != 0) {
- counter--;
- if (counter == 0) {
- pr_err("[ERR]0x1CF != 0\n");
- return HALMAC_RET_DLFW_FAIL;
- }
- usleep_range(50, 60);
- }
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_HMETFR + 3,
- ID_INFORM_DLEMEM_RDY);
-
- counter = 10000;
- while (HALMAC_REG_READ_8(halmac_adapter, REG_C2HEVT_3 + 3) !=
- ID_INFORM_DLEMEM_RDY) {
- counter--;
- if (counter == 0) {
- pr_err("[ERR]0x1AF != 0x80\n");
- return HALMAC_RET_DLFW_FAIL;
- }
- usleep_range(50, 60);
- }
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_C2HEVT_3 + 3, 0);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_pwr_seq_parser_88xx(struct halmac_adapter *halmac_adapter, u8 cut,
- u8 fab, u8 intf,
- struct halmac_wl_pwr_cfg_ **pp_pwr_seq_cfg)
-{
- u32 seq_idx = 0;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- struct halmac_wl_pwr_cfg_ *seq_cmd;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- do {
- seq_cmd = pp_pwr_seq_cfg[seq_idx];
-
- if (!seq_cmd)
- break;
-
- status = halmac_pwr_sub_seq_parer_88xx(halmac_adapter, cut, fab,
- intf, seq_cmd);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("[Err]pwr sub seq parser fail, status = 0x%X!\n",
- status);
- return status;
- }
-
- seq_idx++;
- } while (1);
-
- return status;
-}
-
-static enum halmac_ret_status
-halmac_pwr_sub_seq_parer_do_cmd_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_wl_pwr_cfg_ *sub_seq_cmd,
- bool *reti)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- u8 value, flag;
- u8 polling_bit;
- u32 polling_count;
- static u32 poll_to_static;
- u32 offset;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
- *reti = true;
-
- switch (sub_seq_cmd->cmd) {
- case HALMAC_PWR_CMD_WRITE:
- if (sub_seq_cmd->base == HALMAC_PWR_BASEADDR_SDIO)
- offset = sub_seq_cmd->offset | SDIO_LOCAL_OFFSET;
- else
- offset = sub_seq_cmd->offset;
-
- value = HALMAC_REG_READ_8(halmac_adapter, offset);
- value = (u8)(value & (u8)(~(sub_seq_cmd->msk)));
- value = (u8)(value |
- (u8)(sub_seq_cmd->value & sub_seq_cmd->msk));
-
- HALMAC_REG_WRITE_8(halmac_adapter, offset, value);
- break;
- case HALMAC_PWR_CMD_POLLING:
- polling_bit = 0;
- polling_count = HALMAC_POLLING_READY_TIMEOUT_COUNT;
- flag = 0;
-
- if (sub_seq_cmd->base == HALMAC_PWR_BASEADDR_SDIO)
- offset = sub_seq_cmd->offset | SDIO_LOCAL_OFFSET;
- else
- offset = sub_seq_cmd->offset;
-
- do {
- polling_count--;
- value = HALMAC_REG_READ_8(halmac_adapter, offset);
- value = (u8)(value & sub_seq_cmd->msk);
-
- if (value == (sub_seq_cmd->value & sub_seq_cmd->msk)) {
- polling_bit = 1;
- continue;
- }
-
- if (polling_count != 0) {
- usleep_range(50, 60);
- continue;
- }
-
- if (halmac_adapter->halmac_interface ==
- HALMAC_INTERFACE_PCIE &&
- flag == 0) {
- /* For PCIE + USB package poll power bit
- * timeout issue
- */
- poll_to_static++;
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_PWR,
- DBG_WARNING,
- "[WARN]PCIE polling timeout : %d!!\n",
- poll_to_static);
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_SYS_PW_CTRL,
- HALMAC_REG_READ_8(halmac_adapter,
- REG_SYS_PW_CTRL) |
- BIT(3));
- HALMAC_REG_WRITE_8(
- halmac_adapter, REG_SYS_PW_CTRL,
- HALMAC_REG_READ_8(halmac_adapter,
- REG_SYS_PW_CTRL) &
- ~BIT(3));
- polling_bit = 0;
- polling_count =
- HALMAC_POLLING_READY_TIMEOUT_COUNT;
- flag = 1;
- } else {
- pr_err("[ERR]Pwr cmd polling timeout!!\n");
- pr_err("[ERR]Pwr cmd offset : %X!!\n",
- sub_seq_cmd->offset);
- pr_err("[ERR]Pwr cmd value : %X!!\n",
- sub_seq_cmd->value);
- pr_err("[ERR]Pwr cmd msk : %X!!\n",
- sub_seq_cmd->msk);
- pr_err("[ERR]Read offset = %X value = %X!!\n",
- offset, value);
- return HALMAC_RET_PWRSEQ_POLLING_FAIL;
- }
- } while (!polling_bit);
- break;
- case HALMAC_PWR_CMD_DELAY:
- if (sub_seq_cmd->value == HALMAC_PWRSEQ_DELAY_US)
- udelay(sub_seq_cmd->offset);
- else
- usleep_range(1000 * sub_seq_cmd->offset,
- 1000 * sub_seq_cmd->offset + 100);
-
- break;
- case HALMAC_PWR_CMD_READ:
- break;
- case HALMAC_PWR_CMD_END:
- return HALMAC_RET_SUCCESS;
- default:
- return HALMAC_RET_PWRSEQ_CMD_INCORRECT;
- }
-
- *reti = false;
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_pwr_sub_seq_parer_88xx(struct halmac_adapter *halmac_adapter, u8 cut,
- u8 fab, u8 intf,
- struct halmac_wl_pwr_cfg_ *pwr_sub_seq_cfg)
-{
- struct halmac_wl_pwr_cfg_ *sub_seq_cmd;
- bool reti;
- enum halmac_ret_status status;
-
- for (sub_seq_cmd = pwr_sub_seq_cfg;; sub_seq_cmd++) {
- if ((sub_seq_cmd->interface_msk & intf) &&
- (sub_seq_cmd->fab_msk & fab) &&
- (sub_seq_cmd->cut_msk & cut)) {
- status = halmac_pwr_sub_seq_parer_do_cmd_88xx(
- halmac_adapter, sub_seq_cmd, &reti);
-
- if (reti)
- return status;
- }
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_get_h2c_buff_free_space_88xx(struct halmac_adapter *halmac_adapter)
-{
- u32 hw_wptr, fw_rptr;
- struct halmac_api *halmac_api =
- (struct halmac_api *)halmac_adapter->halmac_api;
-
- hw_wptr = HALMAC_REG_READ_32(halmac_adapter, REG_H2C_PKT_WRITEADDR) &
- BIT_MASK_H2C_WR_ADDR;
- fw_rptr = HALMAC_REG_READ_32(halmac_adapter, REG_H2C_PKT_READADDR) &
- BIT_MASK_H2C_READ_ADDR;
-
- if (hw_wptr >= fw_rptr)
- halmac_adapter->h2c_buf_free_space =
- halmac_adapter->h2c_buff_size - (hw_wptr - fw_rptr);
- else
- halmac_adapter->h2c_buf_free_space = fw_rptr - hw_wptr;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_send_h2c_pkt_88xx(struct halmac_adapter *halmac_adapter, u8 *hal_h2c_cmd,
- u32 size, bool ack)
-{
- u32 counter = 100;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- while (halmac_adapter->h2c_buf_free_space <=
- HALMAC_H2C_CMD_SIZE_UNIT_88XX) {
- halmac_get_h2c_buff_free_space_88xx(halmac_adapter);
- counter--;
- if (counter == 0) {
- pr_err("h2c free space is not enough!!\n");
- return HALMAC_RET_H2C_SPACE_FULL;
- }
- }
-
- /* Send TxDesc + H2C_CMD */
- if (!PLATFORM_SEND_H2C_PKT(driver_adapter, hal_h2c_cmd, size)) {
- pr_err("Send H2C_CMD pkt error!!\n");
- return HALMAC_RET_SEND_H2C_FAIL;
- }
-
- halmac_adapter->h2c_buf_free_space -= HALMAC_H2C_CMD_SIZE_UNIT_88XX;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "H2C free space : %d\n",
- halmac_adapter->h2c_buf_free_space);
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_download_rsvd_page_88xx(struct halmac_adapter *halmac_adapter,
- u8 *hal_buf, u32 size)
-{
- u8 restore[3];
- u8 value8;
- u32 counter;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (size == 0) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "Rsvd page packet size is zero!!\n");
- return HALMAC_RET_ZERO_LEN_RSVD_PACKET;
- }
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_FIFOPAGE_CTRL_2 + 1);
- value8 = (u8)(value8 | BIT(7));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_FIFOPAGE_CTRL_2 + 1, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_CR + 1);
- restore[0] = value8;
- value8 = (u8)(value8 | BIT(0));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CR + 1, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_BCN_CTRL);
- restore[1] = value8;
- value8 = (u8)((value8 & ~(BIT(3))) | BIT(4));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_BCN_CTRL, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_FWHW_TXQ_CTRL + 2);
- restore[2] = value8;
- value8 = (u8)(value8 & ~(BIT(6)));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_FWHW_TXQ_CTRL + 2, value8);
-
- if (!PLATFORM_SEND_RSVD_PAGE(driver_adapter, hal_buf, size)) {
- pr_err("PLATFORM_SEND_RSVD_PAGE 1 error!!\n");
- status = HALMAC_RET_DL_RSVD_PAGE_FAIL;
- }
-
- /* Check Bcn_Valid_Bit */
- counter = 1000;
- while (!(HALMAC_REG_READ_8(halmac_adapter, REG_FIFOPAGE_CTRL_2 + 1) &
- BIT(7))) {
- udelay(10);
- counter--;
- if (counter == 0) {
- pr_err("Polling Bcn_Valid_Fail error!!\n");
- status = HALMAC_RET_POLLING_BCN_VALID_FAIL;
- break;
- }
- }
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_FIFOPAGE_CTRL_2 + 1);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_FIFOPAGE_CTRL_2 + 1,
- (value8 | BIT(7)));
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_FWHW_TXQ_CTRL + 2, restore[2]);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_BCN_CTRL, restore[1]);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_CR + 1, restore[0]);
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_set_h2c_header_88xx(struct halmac_adapter *halmac_adapter,
- u8 *hal_h2c_hdr, u16 *seq, bool ack)
-{
- void *driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s!!\n", __func__);
-
- H2C_CMD_HEADER_SET_CATEGORY(hal_h2c_hdr, 0x00);
- H2C_CMD_HEADER_SET_TOTAL_LEN(hal_h2c_hdr, 16);
-
- spin_lock(&halmac_adapter->h2c_seq_lock);
- H2C_CMD_HEADER_SET_SEQ_NUM(hal_h2c_hdr, halmac_adapter->h2c_packet_seq);
- *seq = halmac_adapter->h2c_packet_seq;
- halmac_adapter->h2c_packet_seq++;
- spin_unlock(&halmac_adapter->h2c_seq_lock);
-
- if (ack)
- H2C_CMD_HEADER_SET_ACK(hal_h2c_hdr, 1);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_set_fw_offload_h2c_header_88xx(
- struct halmac_adapter *halmac_adapter, u8 *hal_h2c_hdr,
- struct halmac_h2c_header_info *h2c_header_info, u16 *seq_num)
-{
- void *driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s!!\n", __func__);
-
- FW_OFFLOAD_H2C_SET_TOTAL_LEN(hal_h2c_hdr,
- 8 + h2c_header_info->content_size);
- FW_OFFLOAD_H2C_SET_SUB_CMD_ID(hal_h2c_hdr, h2c_header_info->sub_cmd_id);
-
- FW_OFFLOAD_H2C_SET_CATEGORY(hal_h2c_hdr, 0x01);
- FW_OFFLOAD_H2C_SET_CMD_ID(hal_h2c_hdr, 0xFF);
-
- spin_lock(&halmac_adapter->h2c_seq_lock);
- FW_OFFLOAD_H2C_SET_SEQ_NUM(hal_h2c_hdr, halmac_adapter->h2c_packet_seq);
- *seq_num = halmac_adapter->h2c_packet_seq;
- halmac_adapter->h2c_packet_seq++;
- spin_unlock(&halmac_adapter->h2c_seq_lock);
-
- if (h2c_header_info->ack)
- FW_OFFLOAD_H2C_SET_ACK(hal_h2c_hdr, 1);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_send_h2c_set_pwr_mode_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_fwlps_option *hal_fw_lps_opt)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX];
- u8 *h2c_header, *h2c_cmd;
- u16 seq = 0;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s!!\n", __func__);
-
- h2c_header = h2c_buff;
- h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
-
- memset(h2c_buff, 0x00, HALMAC_H2C_CMD_SIZE_88XX);
-
- SET_PWR_MODE_SET_CMD_ID(h2c_cmd, CMD_ID_SET_PWR_MODE);
- SET_PWR_MODE_SET_CLASS(h2c_cmd, CLASS_SET_PWR_MODE);
- SET_PWR_MODE_SET_MODE(h2c_cmd, hal_fw_lps_opt->mode);
- SET_PWR_MODE_SET_CLK_REQUEST(h2c_cmd, hal_fw_lps_opt->clk_request);
- SET_PWR_MODE_SET_RLBM(h2c_cmd, hal_fw_lps_opt->rlbm);
- SET_PWR_MODE_SET_SMART_PS(h2c_cmd, hal_fw_lps_opt->smart_ps);
- SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_cmd,
- hal_fw_lps_opt->awake_interval);
- SET_PWR_MODE_SET_B_ALL_QUEUE_UAPSD(h2c_cmd,
- hal_fw_lps_opt->all_queue_uapsd);
- SET_PWR_MODE_SET_PWR_STATE(h2c_cmd, hal_fw_lps_opt->pwr_state);
- SET_PWR_MODE_SET_ANT_AUTO_SWITCH(h2c_cmd,
- hal_fw_lps_opt->ant_auto_switch);
- SET_PWR_MODE_SET_PS_ALLOW_BT_HIGH_PRIORITY(
- h2c_cmd, hal_fw_lps_opt->ps_allow_bt_high_priority);
- SET_PWR_MODE_SET_PROTECT_BCN(h2c_cmd, hal_fw_lps_opt->protect_bcn);
- SET_PWR_MODE_SET_SILENCE_PERIOD(h2c_cmd,
- hal_fw_lps_opt->silence_period);
- SET_PWR_MODE_SET_FAST_BT_CONNECT(h2c_cmd,
- hal_fw_lps_opt->fast_bt_connect);
- SET_PWR_MODE_SET_TWO_ANTENNA_EN(h2c_cmd,
- hal_fw_lps_opt->two_antenna_en);
- SET_PWR_MODE_SET_ADOPT_USER_SETTING(h2c_cmd,
- hal_fw_lps_opt->adopt_user_setting);
- SET_PWR_MODE_SET_DRV_BCN_EARLY_SHIFT(
- h2c_cmd, hal_fw_lps_opt->drv_bcn_early_shift);
-
- halmac_set_h2c_header_88xx(halmac_adapter, h2c_header, &seq, true);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s Fail = %x!!\n", __func__, status);
- return status;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_func_send_original_h2c_88xx(struct halmac_adapter *halmac_adapter,
- u8 *original_h2c, u16 *seq, u8 ack)
-{
- u8 H2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u8 *h2c_header, *h2c_cmd;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "halmac_send_original_h2c ==========>\n");
-
- h2c_header = H2c_buff;
- h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
- memcpy(h2c_cmd, original_h2c, 8); /* Original H2C 8 byte */
-
- halmac_set_h2c_header_88xx(halmac_adapter, h2c_header, seq, ack);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, H2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, ack);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_original_h2c Fail = %x!!\n", status);
- return status;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "halmac_send_original_h2c <==========\n");
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_media_status_rpt_88xx(struct halmac_adapter *halmac_adapter, u8 op_mode,
- u8 mac_id_ind, u8 mac_id, u8 mac_id_end)
-{
- u8 H2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u8 *h2c_header, *h2c_cmd;
- u16 seq = 0;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "halmac_send_h2c_set_pwr_mode_88xx!!\n");
-
- h2c_header = H2c_buff;
- h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
-
- memset(H2c_buff, 0x00, HALMAC_H2C_CMD_SIZE_88XX);
-
- MEDIA_STATUS_RPT_SET_CMD_ID(h2c_cmd, CMD_ID_MEDIA_STATUS_RPT);
- MEDIA_STATUS_RPT_SET_CLASS(h2c_cmd, CLASS_MEDIA_STATUS_RPT);
- MEDIA_STATUS_RPT_SET_OP_MODE(h2c_cmd, op_mode);
- MEDIA_STATUS_RPT_SET_MACID_IN(h2c_cmd, mac_id_ind);
- MEDIA_STATUS_RPT_SET_MACID(h2c_cmd, mac_id);
- MEDIA_STATUS_RPT_SET_MACID_END(h2c_cmd, mac_id_end);
-
- halmac_set_h2c_header_88xx(halmac_adapter, h2c_header, &seq, true);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, H2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s Fail = %x!!\n", __func__, status);
- return status;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_send_h2c_update_packet_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_packet_id pkt_id, u8 *pkt,
- u32 pkt_size)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_ret_status ret_status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation
- .rsvd_h2c_extra_info_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
-
- ret_status =
- halmac_download_rsvd_page_88xx(halmac_adapter, pkt, pkt_size);
-
- if (ret_status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_download_rsvd_page_88xx Fail = %x!!\n",
- ret_status);
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
- return ret_status;
- }
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
-
- UPDATE_PACKET_SET_SIZE(
- h2c_buff,
- pkt_size + halmac_adapter->hw_config_info.txdesc_size);
- UPDATE_PACKET_SET_PACKET_ID(h2c_buff, pkt_id);
- UPDATE_PACKET_SET_PACKET_LOC(
- h2c_buff,
- halmac_adapter->txff_allocation.rsvd_h2c_extra_info_pg_bndy -
- halmac_adapter->txff_allocation.rsvd_pg_bndy);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_UPDATE_PACKET;
- h2c_header_info.content_size = 8;
- h2c_header_info.ack = true;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
- halmac_adapter->halmac_state.update_packet_set.seq_num = h2c_seq_mum;
-
- ret_status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (ret_status != HALMAC_RET_SUCCESS) {
- pr_err("%s Fail = %x!!\n", __func__, ret_status);
- return ret_status;
- }
-
- return ret_status;
-}
-
-enum halmac_ret_status
-halmac_send_h2c_phy_parameter_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_phy_parameter_info *para_info,
- bool full_fifo)
-{
- bool drv_trigger_send = false;
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- u32 info_size = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- struct halmac_config_para_info *config_para_info;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
- config_para_info = &halmac_adapter->config_para_info;
-
- if (!config_para_info->cfg_para_buf) {
- if (full_fifo)
- config_para_info->para_buf_size =
- HALMAC_EXTRA_INFO_BUFF_SIZE_FULL_FIFO_88XX;
- else
- config_para_info->para_buf_size =
- HALMAC_EXTRA_INFO_BUFF_SIZE_88XX;
-
- config_para_info->cfg_para_buf =
- kzalloc(config_para_info->para_buf_size, GFP_KERNEL);
-
- if (config_para_info->cfg_para_buf) {
- memset(config_para_info->cfg_para_buf, 0x00,
- config_para_info->para_buf_size);
- config_para_info->full_fifo_mode = full_fifo;
- config_para_info->para_buf_w =
- config_para_info->cfg_para_buf;
- config_para_info->para_num = 0;
- config_para_info->avai_para_buf_size =
- config_para_info->para_buf_size;
- config_para_info->value_accumulation = 0;
- config_para_info->offset_accumulation = 0;
- } else {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C,
- DBG_DMESG,
- "Allocate cfg_para_buf fail!!\n");
- return HALMAC_RET_MALLOC_FAIL;
- }
- }
-
- if (halmac_transition_cfg_para_state_88xx(
- halmac_adapter,
- HALMAC_CFG_PARA_CMD_CONSTRUCT_CONSTRUCTING) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- halmac_enqueue_para_buff_88xx(halmac_adapter, para_info,
- config_para_info->para_buf_w,
- &drv_trigger_send);
-
- if (para_info->cmd_id != HALMAC_PARAMETER_CMD_END) {
- config_para_info->para_num++;
- config_para_info->para_buf_w += HALMAC_FW_OFFLOAD_CMD_SIZE_88XX;
- config_para_info->avai_para_buf_size =
- config_para_info->avai_para_buf_size -
- HALMAC_FW_OFFLOAD_CMD_SIZE_88XX;
- }
-
- if ((config_para_info->avai_para_buf_size -
- halmac_adapter->hw_config_info.txdesc_size) >
- HALMAC_FW_OFFLOAD_CMD_SIZE_88XX &&
- !drv_trigger_send)
- return HALMAC_RET_SUCCESS;
-
- if (config_para_info->para_num == 0) {
- kfree(config_para_info->cfg_para_buf);
- config_para_info->cfg_para_buf = NULL;
- config_para_info->para_buf_w = NULL;
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_WARNING,
- "no cfg parameter element!!\n");
-
- if (halmac_transition_cfg_para_state_88xx(
- halmac_adapter,
- HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- return HALMAC_RET_SUCCESS;
- }
-
- if (halmac_transition_cfg_para_state_88xx(
- halmac_adapter, HALMAC_CFG_PARA_CMD_CONSTRUCT_H2C_SENT) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- halmac_adapter->halmac_state.cfg_para_state_set.process_status =
- HALMAC_CMD_PROCESS_SENDING;
-
- if (config_para_info->full_fifo_mode)
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2, 0);
- else
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation
- .rsvd_h2c_extra_info_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
-
- info_size =
- config_para_info->para_num * HALMAC_FW_OFFLOAD_CMD_SIZE_88XX;
-
- status = halmac_download_rsvd_page_88xx(
- halmac_adapter, (u8 *)config_para_info->cfg_para_buf,
- info_size);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_download_rsvd_page_88xx Fail!!\n");
- } else {
- halmac_gen_cfg_para_h2c_88xx(halmac_adapter, h2c_buff);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_CFG_PARAMETER;
- h2c_header_info.content_size = 4;
- h2c_header_info.ack = true;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info,
- &h2c_seq_mum);
-
- halmac_adapter->halmac_state.cfg_para_state_set.seq_num =
- h2c_seq_mum;
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX,
- true);
-
- if (status != HALMAC_RET_SUCCESS)
- pr_err("halmac_send_h2c_pkt_88xx Fail!!\n");
-
- HALMAC_RT_TRACE(
- driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "config parameter time = %d\n",
- HALMAC_REG_READ_32(halmac_adapter, REG_FW_DBG6));
- }
-
- kfree(config_para_info->cfg_para_buf);
- config_para_info->cfg_para_buf = NULL;
- config_para_info->para_buf_w = NULL;
-
- /* Restore bcn head */
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
-
- if (halmac_transition_cfg_para_state_88xx(
- halmac_adapter, HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- if (!drv_trigger_send) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "Buffer full trigger sending H2C!!\n");
- return HALMAC_RET_PARA_SENDING;
- }
-
- return status;
-}
-
-static enum halmac_ret_status
-halmac_enqueue_para_buff_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_phy_parameter_info *para_info,
- u8 *curr_buff_wptr, bool *end_cmd)
-{
- struct halmac_config_para_info *config_para_info =
- &halmac_adapter->config_para_info;
-
- *end_cmd = false;
-
- PHY_PARAMETER_INFO_SET_LENGTH(curr_buff_wptr,
- HALMAC_FW_OFFLOAD_CMD_SIZE_88XX);
- PHY_PARAMETER_INFO_SET_IO_CMD(curr_buff_wptr, para_info->cmd_id);
-
- switch (para_info->cmd_id) {
- case HALMAC_PARAMETER_CMD_BB_W8:
- case HALMAC_PARAMETER_CMD_BB_W16:
- case HALMAC_PARAMETER_CMD_BB_W32:
- case HALMAC_PARAMETER_CMD_MAC_W8:
- case HALMAC_PARAMETER_CMD_MAC_W16:
- case HALMAC_PARAMETER_CMD_MAC_W32:
- PHY_PARAMETER_INFO_SET_IO_ADDR(
- curr_buff_wptr, para_info->content.MAC_REG_W.offset);
- PHY_PARAMETER_INFO_SET_DATA(curr_buff_wptr,
- para_info->content.MAC_REG_W.value);
- PHY_PARAMETER_INFO_SET_MASK(curr_buff_wptr,
- para_info->content.MAC_REG_W.msk);
- PHY_PARAMETER_INFO_SET_MSK_EN(
- curr_buff_wptr, para_info->content.MAC_REG_W.msk_en);
- config_para_info->value_accumulation +=
- para_info->content.MAC_REG_W.value;
- config_para_info->offset_accumulation +=
- para_info->content.MAC_REG_W.offset;
- break;
- case HALMAC_PARAMETER_CMD_RF_W:
- /*In rf register, the address is only 1 byte*/
- PHY_PARAMETER_INFO_SET_RF_ADDR(
- curr_buff_wptr, para_info->content.RF_REG_W.offset);
- PHY_PARAMETER_INFO_SET_RF_PATH(
- curr_buff_wptr, para_info->content.RF_REG_W.rf_path);
- PHY_PARAMETER_INFO_SET_DATA(curr_buff_wptr,
- para_info->content.RF_REG_W.value);
- PHY_PARAMETER_INFO_SET_MASK(curr_buff_wptr,
- para_info->content.RF_REG_W.msk);
- PHY_PARAMETER_INFO_SET_MSK_EN(
- curr_buff_wptr, para_info->content.RF_REG_W.msk_en);
- config_para_info->value_accumulation +=
- para_info->content.RF_REG_W.value;
- config_para_info->offset_accumulation +=
- (para_info->content.RF_REG_W.offset +
- (para_info->content.RF_REG_W.rf_path << 8));
- break;
- case HALMAC_PARAMETER_CMD_DELAY_US:
- case HALMAC_PARAMETER_CMD_DELAY_MS:
- PHY_PARAMETER_INFO_SET_DELAY_VALUE(
- curr_buff_wptr,
- para_info->content.DELAY_TIME.delay_time);
- break;
- case HALMAC_PARAMETER_CMD_END:
- *end_cmd = true;
- break;
- default:
- pr_err(" halmac_send_h2c_phy_parameter_88xx illegal cmd_id!!\n");
- break;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_gen_cfg_para_h2c_88xx(struct halmac_adapter *halmac_adapter,
- u8 *h2c_buff)
-{
- struct halmac_config_para_info *config_para_info =
- &halmac_adapter->config_para_info;
-
- CFG_PARAMETER_SET_NUM(h2c_buff, config_para_info->para_num);
-
- if (config_para_info->full_fifo_mode) {
- CFG_PARAMETER_SET_INIT_CASE(h2c_buff, 0x1);
- CFG_PARAMETER_SET_PHY_PARAMETER_LOC(h2c_buff, 0);
- } else {
- CFG_PARAMETER_SET_INIT_CASE(h2c_buff, 0x0);
- CFG_PARAMETER_SET_PHY_PARAMETER_LOC(
- h2c_buff,
- halmac_adapter->txff_allocation
- .rsvd_h2c_extra_info_pg_bndy -
- halmac_adapter->txff_allocation.rsvd_pg_bndy);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_send_h2c_run_datapack_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_data_type halmac_data_type)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = NULL;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s!!\n", __func__);
-
- RUN_DATAPACK_SET_DATAPACK_ID(h2c_buff, halmac_data_type);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_RUN_DATAPACK;
- h2c_header_info.content_size = 4;
- h2c_header_info.ack = true;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_pkt_88xx Fail = %x!!\n", status);
- return status;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_send_bt_coex_cmd_88xx(struct halmac_adapter *halmac_adapter, u8 *bt_buf,
- u32 bt_size, u8 ack)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = NULL;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s!!\n", __func__);
-
- memcpy(h2c_buff + 8, bt_buf, bt_size);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_BT_COEX;
- h2c_header_info.content_size = (u16)bt_size;
- h2c_header_info.ack = ack;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, ack);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_pkt_88xx Fail = %x!!\n", status);
- return status;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_func_ctrl_ch_switch_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ch_switch_option *cs_option)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- enum halmac_cmd_process_status *process_status =
- &halmac_adapter->halmac_state.scan_state_set.process_status;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "halmac_ctrl_ch_switch!!\n");
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (halmac_transition_scan_state_88xx(
- halmac_adapter, HALMAC_SCAN_CMD_CONSTRUCT_H2C_SENT) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- *process_status = HALMAC_CMD_PROCESS_SENDING;
-
- if (cs_option->switch_en != 0) {
- HALMAC_REG_WRITE_16(halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation
- .rsvd_h2c_extra_info_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
-
- status = halmac_download_rsvd_page_88xx(
- halmac_adapter, halmac_adapter->ch_sw_info.ch_info_buf,
- halmac_adapter->ch_sw_info.total_size);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_download_rsvd_page_88xx Fail = %x!!\n",
- status);
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation
- .rsvd_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
- return status;
- }
-
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_FIFOPAGE_CTRL_2,
- (u16)(halmac_adapter->txff_allocation.rsvd_pg_bndy &
- BIT_MASK_BCN_HEAD_1_V1));
- }
-
- CHANNEL_SWITCH_SET_SWITCH_START(h2c_buff, cs_option->switch_en);
- CHANNEL_SWITCH_SET_CHANNEL_NUM(h2c_buff,
- halmac_adapter->ch_sw_info.ch_num);
- CHANNEL_SWITCH_SET_CHANNEL_INFO_LOC(
- h2c_buff,
- halmac_adapter->txff_allocation.rsvd_h2c_extra_info_pg_bndy -
- halmac_adapter->txff_allocation.rsvd_pg_bndy);
- CHANNEL_SWITCH_SET_DEST_CH_EN(h2c_buff, cs_option->dest_ch_en);
- CHANNEL_SWITCH_SET_DEST_CH(h2c_buff, cs_option->dest_ch);
- CHANNEL_SWITCH_SET_PRI_CH_IDX(h2c_buff, cs_option->dest_pri_ch_idx);
- CHANNEL_SWITCH_SET_ABSOLUTE_TIME(h2c_buff, cs_option->absolute_time_en);
- CHANNEL_SWITCH_SET_TSF_LOW(h2c_buff, cs_option->tsf_low);
- CHANNEL_SWITCH_SET_PERIODIC_OPTION(h2c_buff,
- cs_option->periodic_option);
- CHANNEL_SWITCH_SET_NORMAL_CYCLE(h2c_buff, cs_option->normal_cycle);
- CHANNEL_SWITCH_SET_NORMAL_PERIOD(h2c_buff, cs_option->normal_period);
- CHANNEL_SWITCH_SET_SLOW_PERIOD(h2c_buff, cs_option->phase_2_period);
- CHANNEL_SWITCH_SET_CHANNEL_INFO_SIZE(
- h2c_buff, halmac_adapter->ch_sw_info.total_size);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_CHANNEL_SWITCH;
- h2c_header_info.content_size = 20;
- h2c_header_info.ack = true;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
- halmac_adapter->halmac_state.scan_state_set.seq_num = h2c_seq_mum;
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (status != HALMAC_RET_SUCCESS)
- pr_err("halmac_send_h2c_pkt_88xx Fail = %x!!\n", status);
-
- kfree(halmac_adapter->ch_sw_info.ch_info_buf);
- halmac_adapter->ch_sw_info.ch_info_buf = NULL;
- halmac_adapter->ch_sw_info.ch_info_buf_w = NULL;
- halmac_adapter->ch_sw_info.extra_info_en = 0;
- halmac_adapter->ch_sw_info.buf_size = 0;
- halmac_adapter->ch_sw_info.avai_buf_size = 0;
- halmac_adapter->ch_sw_info.total_size = 0;
- halmac_adapter->ch_sw_info.ch_num = 0;
-
- if (halmac_transition_scan_state_88xx(halmac_adapter,
- HALMAC_SCAN_CMD_CONSTRUCT_IDLE) !=
- HALMAC_RET_SUCCESS)
- return HALMAC_RET_ERROR_STATE;
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_func_send_general_info_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_general_info *general_info)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = NULL;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "halmac_send_general_info!!\n");
-
- GENERAL_INFO_SET_REF_TYPE(h2c_buff, general_info->rfe_type);
- GENERAL_INFO_SET_RF_TYPE(h2c_buff, general_info->rf_type);
- GENERAL_INFO_SET_FW_TX_BOUNDARY(
- h2c_buff,
- halmac_adapter->txff_allocation.rsvd_fw_txbuff_pg_bndy -
- halmac_adapter->txff_allocation.rsvd_pg_bndy);
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_GENERAL_INFO;
- h2c_header_info.content_size = 4;
- h2c_header_info.ack = false;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (status != HALMAC_RET_SUCCESS)
- pr_err("halmac_send_h2c_pkt_88xx Fail = %x!!\n", status);
-
- return status;
-}
-
-enum halmac_ret_status halmac_send_h2c_update_bcn_parse_info_88xx(
- struct halmac_adapter *halmac_adapter,
- struct halmac_bcn_ie_info *bcn_ie_info)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u16 h2c_seq_mum = 0;
- void *driver_adapter = halmac_adapter->driver_adapter;
- struct halmac_h2c_header_info h2c_header_info;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s!!\n", __func__);
-
- UPDATE_BEACON_PARSING_INFO_SET_FUNC_EN(h2c_buff, bcn_ie_info->func_en);
- UPDATE_BEACON_PARSING_INFO_SET_SIZE_TH(h2c_buff, bcn_ie_info->size_th);
- UPDATE_BEACON_PARSING_INFO_SET_TIMEOUT(h2c_buff, bcn_ie_info->timeout);
-
- UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_0(
- h2c_buff, (u32)(bcn_ie_info->ie_bmp[0]));
- UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_1(
- h2c_buff, (u32)(bcn_ie_info->ie_bmp[1]));
- UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_2(
- h2c_buff, (u32)(bcn_ie_info->ie_bmp[2]));
- UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_3(
- h2c_buff, (u32)(bcn_ie_info->ie_bmp[3]));
- UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_4(
- h2c_buff, (u32)(bcn_ie_info->ie_bmp[4]));
-
- h2c_header_info.sub_cmd_id = SUB_CMD_ID_UPDATE_BEACON_PARSING_INFO;
- h2c_header_info.content_size = 24;
- h2c_header_info.ack = true;
- halmac_set_fw_offload_h2c_header_88xx(halmac_adapter, h2c_buff,
- &h2c_header_info, &h2c_seq_mum);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, true);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_pkt_88xx Fail =%x !!\n", status);
- return status;
- }
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_send_h2c_ps_tuning_para_88xx(struct halmac_adapter *halmac_adapter)
-{
- u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
- u8 *h2c_header, *h2c_cmd;
- u16 seq = 0;
- void *driver_adapter = NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "%s!!\n", __func__);
-
- h2c_header = h2c_buff;
- h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
-
- halmac_set_h2c_header_88xx(halmac_adapter, h2c_header, &seq, false);
-
- status = halmac_send_h2c_pkt_88xx(halmac_adapter, h2c_buff,
- HALMAC_H2C_CMD_SIZE_88XX, false);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_h2c_pkt_88xx Fail = %x!!\n", status);
- return status;
- }
-
- return status;
-}
-
-enum halmac_ret_status
-halmac_parse_c2h_packet_88xx(struct halmac_adapter *halmac_adapter,
- u8 *halmac_buf, u32 halmac_size)
-{
- u8 c2h_cmd, c2h_sub_cmd_id;
- u8 *c2h_buf = halmac_buf + halmac_adapter->hw_config_info.rxdesc_size;
- u32 c2h_size = halmac_size - halmac_adapter->hw_config_info.rxdesc_size;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- c2h_cmd = (u8)C2H_HDR_GET_CMD_ID(c2h_buf);
-
- /* FW offload C2H cmd is 0xFF */
- if (c2h_cmd != 0xFF) {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "C2H_PKT not for FwOffloadC2HFormat!!\n");
- return HALMAC_RET_C2H_NOT_HANDLED;
- }
-
- /* Get C2H sub cmd ID */
- c2h_sub_cmd_id = (u8)C2H_HDR_GET_C2H_SUB_CMD_ID(c2h_buf);
-
- switch (c2h_sub_cmd_id) {
- case C2H_SUB_CMD_ID_C2H_DBG:
- status = halmac_parse_c2h_debug_88xx(halmac_adapter, c2h_buf,
- c2h_size);
- break;
- case C2H_SUB_CMD_ID_H2C_ACK_HDR:
- status = halmac_parse_h2c_ack_88xx(halmac_adapter, c2h_buf,
- c2h_size);
- break;
- case C2H_SUB_CMD_ID_BT_COEX_INFO:
- status = HALMAC_RET_C2H_NOT_HANDLED;
- break;
- case C2H_SUB_CMD_ID_SCAN_STATUS_RPT:
- status = halmac_parse_scan_status_rpt_88xx(halmac_adapter,
- c2h_buf, c2h_size);
- break;
- case C2H_SUB_CMD_ID_PSD_DATA:
- status = halmac_parse_psd_data_88xx(halmac_adapter, c2h_buf,
- c2h_size);
- break;
-
- case C2H_SUB_CMD_ID_EFUSE_DATA:
- status = halmac_parse_efuse_data_88xx(halmac_adapter, c2h_buf,
- c2h_size);
- break;
- default:
- pr_err("c2h_sub_cmd_id switch case out of boundary!!\n");
- pr_err("[ERR]c2h pkt : %.8X %.8X!!\n", *(u32 *)c2h_buf,
- *(u32 *)(c2h_buf + 4));
- status = HALMAC_RET_C2H_NOT_HANDLED;
- break;
- }
-
- return status;
-}
-
-static enum halmac_ret_status
-halmac_parse_c2h_debug_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
- u32 c2h_size)
-{
- void *driver_adapter = NULL;
- u8 *c2h_buf_local = (u8 *)NULL;
- u32 c2h_size_local = 0;
- u8 dbg_content_length = 0;
- u8 dbg_seq_num = 0;
-
- driver_adapter = halmac_adapter->driver_adapter;
- c2h_buf_local = c2h_buf;
- c2h_size_local = c2h_size;
-
- dbg_content_length = (u8)C2H_HDR_GET_LEN((u8 *)c2h_buf_local);
-
- if (dbg_content_length > C2H_DBG_CONTENT_MAX_LENGTH)
- return HALMAC_RET_SUCCESS;
-
- *(c2h_buf_local + C2H_DBG_HEADER_LENGTH + dbg_content_length - 2) =
- '\n';
- dbg_seq_num = (u8)(*(c2h_buf_local + C2H_DBG_HEADER_LENGTH));
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[RTKFW, SEQ=%d]: %s", dbg_seq_num,
- (char *)(c2h_buf_local + C2H_DBG_HEADER_LENGTH + 1));
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_scan_status_rpt_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size)
-{
- u8 h2c_return_code;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status;
-
- h2c_return_code = (u8)SCAN_STATUS_RPT_GET_H2C_RETURN_CODE(c2h_buf);
- process_status = (enum halmac_h2c_return_code)h2c_return_code ==
- HALMAC_H2C_RETURN_SUCCESS ?
- HALMAC_CMD_PROCESS_DONE :
- HALMAC_CMD_PROCESS_ERROR;
-
- PLATFORM_EVENT_INDICATION(driver_adapter, HALMAC_FEATURE_CHANNEL_SWITCH,
- process_status, NULL, 0);
-
- halmac_adapter->halmac_state.scan_state_set.process_status =
- process_status;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]scan status : %X\n", process_status);
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_psd_data_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
- u32 c2h_size)
-{
- u8 segment_id = 0, segment_size = 0, h2c_seq = 0;
- u16 total_size;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status;
- struct halmac_psd_state_set *psd_set =
- &halmac_adapter->halmac_state.psd_set;
-
- h2c_seq = (u8)PSD_DATA_GET_H2C_SEQ(c2h_buf);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]Seq num : h2c -> %d c2h -> %d\n",
- psd_set->seq_num, h2c_seq);
- if (h2c_seq != psd_set->seq_num) {
- pr_err("[ERR]Seq num mismatch : h2c -> %d c2h -> %d\n",
- psd_set->seq_num, h2c_seq);
- return HALMAC_RET_SUCCESS;
- }
-
- if (psd_set->process_status != HALMAC_CMD_PROCESS_SENDING) {
- pr_err("[ERR]Not in HALMAC_CMD_PROCESS_SENDING\n");
- return HALMAC_RET_SUCCESS;
- }
-
- total_size = (u16)PSD_DATA_GET_TOTAL_SIZE(c2h_buf);
- segment_id = (u8)PSD_DATA_GET_SEGMENT_ID(c2h_buf);
- segment_size = (u8)PSD_DATA_GET_SEGMENT_SIZE(c2h_buf);
- psd_set->data_size = total_size;
-
- if (!psd_set->data) {
- psd_set->data = kzalloc(psd_set->data_size, GFP_KERNEL);
- if (!psd_set->data)
- return HALMAC_RET_MALLOC_FAIL;
- }
-
- if (segment_id == 0)
- psd_set->segment_size = segment_size;
-
- memcpy(psd_set->data + segment_id * psd_set->segment_size,
- c2h_buf + HALMAC_C2H_DATA_OFFSET_88XX, segment_size);
-
- if (!PSD_DATA_GET_END_SEGMENT(c2h_buf))
- return HALMAC_RET_SUCCESS;
-
- process_status = HALMAC_CMD_PROCESS_DONE;
- psd_set->process_status = process_status;
-
- PLATFORM_EVENT_INDICATION(driver_adapter, HALMAC_FEATURE_PSD,
- process_status, psd_set->data,
- psd_set->data_size);
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_efuse_data_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
- u32 c2h_size)
-{
- u8 segment_id = 0, segment_size = 0, h2c_seq = 0;
- u8 *eeprom_map = NULL;
- u32 eeprom_size = halmac_adapter->hw_config_info.eeprom_size;
- u8 h2c_return_code = 0;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status;
-
- h2c_seq = (u8)EFUSE_DATA_GET_H2C_SEQ(c2h_buf);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]Seq num : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.efuse_state_set.seq_num,
- h2c_seq);
- if (h2c_seq != halmac_adapter->halmac_state.efuse_state_set.seq_num) {
- pr_err("[ERR]Seq num mismatch : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.efuse_state_set.seq_num,
- h2c_seq);
- return HALMAC_RET_SUCCESS;
- }
-
- if (halmac_adapter->halmac_state.efuse_state_set.process_status !=
- HALMAC_CMD_PROCESS_SENDING) {
- pr_err("[ERR]Not in HALMAC_CMD_PROCESS_SENDING\n");
- return HALMAC_RET_SUCCESS;
- }
-
- segment_id = (u8)EFUSE_DATA_GET_SEGMENT_ID(c2h_buf);
- segment_size = (u8)EFUSE_DATA_GET_SEGMENT_SIZE(c2h_buf);
- if (segment_id == 0)
- halmac_adapter->efuse_segment_size = segment_size;
-
- eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map)
- return HALMAC_RET_MALLOC_FAIL;
- memset(eeprom_map, 0xFF, eeprom_size);
-
- spin_lock(&halmac_adapter->efuse_lock);
- memcpy(halmac_adapter->hal_efuse_map +
- segment_id * halmac_adapter->efuse_segment_size,
- c2h_buf + HALMAC_C2H_DATA_OFFSET_88XX, segment_size);
- spin_unlock(&halmac_adapter->efuse_lock);
-
- if (!EFUSE_DATA_GET_END_SEGMENT(c2h_buf)) {
- kfree(eeprom_map);
- return HALMAC_RET_SUCCESS;
- }
-
- h2c_return_code =
- halmac_adapter->halmac_state.efuse_state_set.fw_return_code;
-
- if ((enum halmac_h2c_return_code)h2c_return_code ==
- HALMAC_H2C_RETURN_SUCCESS) {
- process_status = HALMAC_CMD_PROCESS_DONE;
- halmac_adapter->halmac_state.efuse_state_set.process_status =
- process_status;
-
- spin_lock(&halmac_adapter->efuse_lock);
- halmac_adapter->hal_efuse_map_valid = true;
- spin_unlock(&halmac_adapter->efuse_lock);
-
- if (halmac_adapter->event_trigger.physical_efuse_map == 1) {
- PLATFORM_EVENT_INDICATION(
- driver_adapter,
- HALMAC_FEATURE_DUMP_PHYSICAL_EFUSE,
- process_status, halmac_adapter->hal_efuse_map,
- halmac_adapter->hw_config_info.efuse_size);
- halmac_adapter->event_trigger.physical_efuse_map = 0;
- }
-
- if (halmac_adapter->event_trigger.logical_efuse_map == 1) {
- if (halmac_eeprom_parser_88xx(
- halmac_adapter,
- halmac_adapter->hal_efuse_map,
- eeprom_map) != HALMAC_RET_SUCCESS) {
- kfree(eeprom_map);
- return HALMAC_RET_EEPROM_PARSING_FAIL;
- }
- PLATFORM_EVENT_INDICATION(
- driver_adapter,
- HALMAC_FEATURE_DUMP_LOGICAL_EFUSE,
- process_status, eeprom_map, eeprom_size);
- halmac_adapter->event_trigger.logical_efuse_map = 0;
- }
- } else {
- process_status = HALMAC_CMD_PROCESS_ERROR;
- halmac_adapter->halmac_state.efuse_state_set.process_status =
- process_status;
-
- if (halmac_adapter->event_trigger.physical_efuse_map == 1) {
- PLATFORM_EVENT_INDICATION(
- driver_adapter,
- HALMAC_FEATURE_DUMP_PHYSICAL_EFUSE,
- process_status,
- &halmac_adapter->halmac_state.efuse_state_set
- .fw_return_code,
- 1);
- halmac_adapter->event_trigger.physical_efuse_map = 0;
- }
-
- if (halmac_adapter->event_trigger.logical_efuse_map == 1) {
- if (halmac_eeprom_parser_88xx(
- halmac_adapter,
- halmac_adapter->hal_efuse_map,
- eeprom_map) != HALMAC_RET_SUCCESS) {
- kfree(eeprom_map);
- return HALMAC_RET_EEPROM_PARSING_FAIL;
- }
- PLATFORM_EVENT_INDICATION(
- driver_adapter,
- HALMAC_FEATURE_DUMP_LOGICAL_EFUSE,
- process_status,
- &halmac_adapter->halmac_state.efuse_state_set
- .fw_return_code,
- 1);
- halmac_adapter->event_trigger.logical_efuse_map = 0;
- }
- }
-
- kfree(eeprom_map);
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
- u32 c2h_size)
-{
- u8 h2c_cmd_id, h2c_sub_cmd_id;
- u8 h2c_return_code;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "Ack for C2H!!\n");
-
- h2c_return_code = (u8)H2C_ACK_HDR_GET_H2C_RETURN_CODE(c2h_buf);
- if ((enum halmac_h2c_return_code)h2c_return_code !=
- HALMAC_H2C_RETURN_SUCCESS)
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "C2H_PKT Status Error!! Status = %d\n",
- h2c_return_code);
-
- h2c_cmd_id = (u8)H2C_ACK_HDR_GET_H2C_CMD_ID(c2h_buf);
-
- if (h2c_cmd_id != 0xFF) {
- pr_err("original h2c ack is not handled!!\n");
- status = HALMAC_RET_C2H_NOT_HANDLED;
- } else {
- h2c_sub_cmd_id = (u8)H2C_ACK_HDR_GET_H2C_SUB_CMD_ID(c2h_buf);
-
- switch (h2c_sub_cmd_id) {
- case H2C_SUB_CMD_ID_DUMP_PHYSICAL_EFUSE_ACK:
- status = halmac_parse_h2c_ack_phy_efuse_88xx(
- halmac_adapter, c2h_buf, c2h_size);
- break;
- case H2C_SUB_CMD_ID_CFG_PARAMETER_ACK:
- status = halmac_parse_h2c_ack_cfg_para_88xx(
- halmac_adapter, c2h_buf, c2h_size);
- break;
- case H2C_SUB_CMD_ID_UPDATE_PACKET_ACK:
- status = halmac_parse_h2c_ack_update_packet_88xx(
- halmac_adapter, c2h_buf, c2h_size);
- break;
- case H2C_SUB_CMD_ID_UPDATE_DATAPACK_ACK:
- status = halmac_parse_h2c_ack_update_datapack_88xx(
- halmac_adapter, c2h_buf, c2h_size);
- break;
- case H2C_SUB_CMD_ID_RUN_DATAPACK_ACK:
- status = halmac_parse_h2c_ack_run_datapack_88xx(
- halmac_adapter, c2h_buf, c2h_size);
- break;
- case H2C_SUB_CMD_ID_CHANNEL_SWITCH_ACK:
- status = halmac_parse_h2c_ack_channel_switch_88xx(
- halmac_adapter, c2h_buf, c2h_size);
- break;
- case H2C_SUB_CMD_ID_IQK_ACK:
- status = halmac_parse_h2c_ack_iqk_88xx(
- halmac_adapter, c2h_buf, c2h_size);
- break;
- case H2C_SUB_CMD_ID_POWER_TRACKING_ACK:
- status = halmac_parse_h2c_ack_power_tracking_88xx(
- halmac_adapter, c2h_buf, c2h_size);
- break;
- case H2C_SUB_CMD_ID_PSD_ACK:
- break;
- default:
- pr_err("h2c_sub_cmd_id switch case out of boundary!!\n");
- status = HALMAC_RET_C2H_NOT_HANDLED;
- break;
- }
- }
-
- return status;
-}
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_phy_efuse_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size)
-{
- u8 h2c_seq = 0;
- u8 h2c_return_code;
- void *driver_adapter = halmac_adapter->driver_adapter;
-
- h2c_seq = (u8)H2C_ACK_HDR_GET_H2C_SEQ(c2h_buf);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]Seq num : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.efuse_state_set.seq_num,
- h2c_seq);
- if (h2c_seq != halmac_adapter->halmac_state.efuse_state_set.seq_num) {
- pr_err("[ERR]Seq num mismatch : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.efuse_state_set.seq_num,
- h2c_seq);
- return HALMAC_RET_SUCCESS;
- }
-
- if (halmac_adapter->halmac_state.efuse_state_set.process_status !=
- HALMAC_CMD_PROCESS_SENDING) {
- pr_err("[ERR]Not in HALMAC_CMD_PROCESS_SENDING\n");
- return HALMAC_RET_SUCCESS;
- }
-
- h2c_return_code = (u8)H2C_ACK_HDR_GET_H2C_RETURN_CODE(c2h_buf);
- halmac_adapter->halmac_state.efuse_state_set.fw_return_code =
- h2c_return_code;
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_cfg_para_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size)
-{
- u8 h2c_seq = 0;
- u8 h2c_return_code;
- u32 offset_accu = 0, value_accu = 0;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status =
- HALMAC_CMD_PROCESS_UNDEFINE;
-
- h2c_seq = (u8)H2C_ACK_HDR_GET_H2C_SEQ(c2h_buf);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "Seq num : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.cfg_para_state_set.seq_num,
- h2c_seq);
- if (h2c_seq !=
- halmac_adapter->halmac_state.cfg_para_state_set.seq_num) {
- pr_err("Seq num mismatch : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.cfg_para_state_set.seq_num,
- h2c_seq);
- return HALMAC_RET_SUCCESS;
- }
-
- if (halmac_adapter->halmac_state.cfg_para_state_set.process_status !=
- HALMAC_CMD_PROCESS_SENDING) {
- pr_err("Not in HALMAC_CMD_PROCESS_SENDING\n");
- return HALMAC_RET_SUCCESS;
- }
-
- h2c_return_code = (u8)H2C_ACK_HDR_GET_H2C_RETURN_CODE(c2h_buf);
- halmac_adapter->halmac_state.cfg_para_state_set.fw_return_code =
- h2c_return_code;
- offset_accu = CFG_PARAMETER_ACK_GET_OFFSET_ACCUMULATION(c2h_buf);
- value_accu = CFG_PARAMETER_ACK_GET_VALUE_ACCUMULATION(c2h_buf);
-
- if ((offset_accu !=
- halmac_adapter->config_para_info.offset_accumulation) ||
- (value_accu !=
- halmac_adapter->config_para_info.value_accumulation)) {
- pr_err("[C2H]offset_accu : %x, value_accu : %x!!\n",
- offset_accu, value_accu);
- pr_err("[Adapter]offset_accu : %x, value_accu : %x!!\n",
- halmac_adapter->config_para_info.offset_accumulation,
- halmac_adapter->config_para_info.value_accumulation);
- process_status = HALMAC_CMD_PROCESS_ERROR;
- }
-
- if ((enum halmac_h2c_return_code)h2c_return_code ==
- HALMAC_H2C_RETURN_SUCCESS &&
- process_status != HALMAC_CMD_PROCESS_ERROR) {
- process_status = HALMAC_CMD_PROCESS_DONE;
- halmac_adapter->halmac_state.cfg_para_state_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(driver_adapter,
- HALMAC_FEATURE_CFG_PARA,
- process_status, NULL, 0);
- } else {
- process_status = HALMAC_CMD_PROCESS_ERROR;
- halmac_adapter->halmac_state.cfg_para_state_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(
- driver_adapter, HALMAC_FEATURE_CFG_PARA, process_status,
- &halmac_adapter->halmac_state.cfg_para_state_set
- .fw_return_code,
- 1);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_update_packet_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size)
-{
- u8 h2c_seq = 0;
- u8 h2c_return_code;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status;
-
- h2c_seq = (u8)H2C_ACK_HDR_GET_H2C_SEQ(c2h_buf);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]Seq num : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.update_packet_set.seq_num,
- h2c_seq);
- if (h2c_seq != halmac_adapter->halmac_state.update_packet_set.seq_num) {
- pr_err("[ERR]Seq num mismatch : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.update_packet_set.seq_num,
- h2c_seq);
- return HALMAC_RET_SUCCESS;
- }
-
- if (halmac_adapter->halmac_state.update_packet_set.process_status !=
- HALMAC_CMD_PROCESS_SENDING) {
- pr_err("[ERR]Not in HALMAC_CMD_PROCESS_SENDING\n");
- return HALMAC_RET_SUCCESS;
- }
-
- h2c_return_code = (u8)H2C_ACK_HDR_GET_H2C_RETURN_CODE(c2h_buf);
- halmac_adapter->halmac_state.update_packet_set.fw_return_code =
- h2c_return_code;
-
- if ((enum halmac_h2c_return_code)h2c_return_code ==
- HALMAC_H2C_RETURN_SUCCESS) {
- process_status = HALMAC_CMD_PROCESS_DONE;
- halmac_adapter->halmac_state.update_packet_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(driver_adapter,
- HALMAC_FEATURE_UPDATE_PACKET,
- process_status, NULL, 0);
- } else {
- process_status = HALMAC_CMD_PROCESS_ERROR;
- halmac_adapter->halmac_state.update_packet_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(
- driver_adapter, HALMAC_FEATURE_UPDATE_PACKET,
- process_status,
- &halmac_adapter->halmac_state.update_packet_set
- .fw_return_code,
- 1);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_update_datapack_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size)
-{
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status =
- HALMAC_CMD_PROCESS_UNDEFINE;
-
- PLATFORM_EVENT_INDICATION(driver_adapter,
- HALMAC_FEATURE_UPDATE_DATAPACK,
- process_status, NULL, 0);
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_run_datapack_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size)
-{
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status =
- HALMAC_CMD_PROCESS_UNDEFINE;
-
- PLATFORM_EVENT_INDICATION(driver_adapter, HALMAC_FEATURE_RUN_DATAPACK,
- process_status, NULL, 0);
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_channel_switch_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size)
-{
- u8 h2c_seq = 0;
- u8 h2c_return_code;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status;
-
- h2c_seq = (u8)H2C_ACK_HDR_GET_H2C_SEQ(c2h_buf);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]Seq num : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.scan_state_set.seq_num,
- h2c_seq);
- if (h2c_seq != halmac_adapter->halmac_state.scan_state_set.seq_num) {
- pr_err("[ERR]Seq num misactch : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.scan_state_set.seq_num,
- h2c_seq);
- return HALMAC_RET_SUCCESS;
- }
-
- if (halmac_adapter->halmac_state.scan_state_set.process_status !=
- HALMAC_CMD_PROCESS_SENDING) {
- pr_err("[ERR]Not in HALMAC_CMD_PROCESS_SENDING\n");
- return HALMAC_RET_SUCCESS;
- }
-
- h2c_return_code = (u8)H2C_ACK_HDR_GET_H2C_RETURN_CODE(c2h_buf);
- halmac_adapter->halmac_state.scan_state_set.fw_return_code =
- h2c_return_code;
-
- if ((enum halmac_h2c_return_code)h2c_return_code ==
- HALMAC_H2C_RETURN_SUCCESS) {
- process_status = HALMAC_CMD_PROCESS_RCVD;
- halmac_adapter->halmac_state.scan_state_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(driver_adapter,
- HALMAC_FEATURE_CHANNEL_SWITCH,
- process_status, NULL, 0);
- } else {
- process_status = HALMAC_CMD_PROCESS_ERROR;
- halmac_adapter->halmac_state.scan_state_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(
- driver_adapter, HALMAC_FEATURE_CHANNEL_SWITCH,
- process_status, &halmac_adapter->halmac_state
- .scan_state_set.fw_return_code,
- 1);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_iqk_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size)
-{
- u8 h2c_seq = 0;
- u8 h2c_return_code;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status;
-
- h2c_seq = (u8)H2C_ACK_HDR_GET_H2C_SEQ(c2h_buf);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]Seq num : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.iqk_set.seq_num, h2c_seq);
- if (h2c_seq != halmac_adapter->halmac_state.iqk_set.seq_num) {
- pr_err("[ERR]Seq num misactch : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.iqk_set.seq_num, h2c_seq);
- return HALMAC_RET_SUCCESS;
- }
-
- if (halmac_adapter->halmac_state.iqk_set.process_status !=
- HALMAC_CMD_PROCESS_SENDING) {
- pr_err("[ERR]Not in HALMAC_CMD_PROCESS_SENDING\n");
- return HALMAC_RET_SUCCESS;
- }
-
- h2c_return_code = (u8)H2C_ACK_HDR_GET_H2C_RETURN_CODE(c2h_buf);
- halmac_adapter->halmac_state.iqk_set.fw_return_code = h2c_return_code;
-
- if ((enum halmac_h2c_return_code)h2c_return_code ==
- HALMAC_H2C_RETURN_SUCCESS) {
- process_status = HALMAC_CMD_PROCESS_DONE;
- halmac_adapter->halmac_state.iqk_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(driver_adapter, HALMAC_FEATURE_IQK,
- process_status, NULL, 0);
- } else {
- process_status = HALMAC_CMD_PROCESS_ERROR;
- halmac_adapter->halmac_state.iqk_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(
- driver_adapter, HALMAC_FEATURE_IQK, process_status,
- &halmac_adapter->halmac_state.iqk_set.fw_return_code,
- 1);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_parse_h2c_ack_power_tracking_88xx(struct halmac_adapter *halmac_adapter,
- u8 *c2h_buf, u32 c2h_size)
-{
- u8 h2c_seq = 0;
- u8 h2c_return_code;
- void *driver_adapter = halmac_adapter->driver_adapter;
- enum halmac_cmd_process_status process_status;
-
- h2c_seq = (u8)H2C_ACK_HDR_GET_H2C_SEQ(c2h_buf);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
- "[TRACE]Seq num : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.power_tracking_set.seq_num,
- h2c_seq);
- if (h2c_seq !=
- halmac_adapter->halmac_state.power_tracking_set.seq_num) {
- pr_err("[ERR]Seq num mismatch : h2c -> %d c2h -> %d\n",
- halmac_adapter->halmac_state.power_tracking_set.seq_num,
- h2c_seq);
- return HALMAC_RET_SUCCESS;
- }
-
- if (halmac_adapter->halmac_state.power_tracking_set.process_status !=
- HALMAC_CMD_PROCESS_SENDING) {
- pr_err("[ERR]Not in HALMAC_CMD_PROCESS_SENDING\n");
- return HALMAC_RET_SUCCESS;
- }
-
- h2c_return_code = (u8)H2C_ACK_HDR_GET_H2C_RETURN_CODE(c2h_buf);
- halmac_adapter->halmac_state.power_tracking_set.fw_return_code =
- h2c_return_code;
-
- if ((enum halmac_h2c_return_code)h2c_return_code ==
- HALMAC_H2C_RETURN_SUCCESS) {
- process_status = HALMAC_CMD_PROCESS_DONE;
- halmac_adapter->halmac_state.power_tracking_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(driver_adapter,
- HALMAC_FEATURE_POWER_TRACKING,
- process_status, NULL, 0);
- } else {
- process_status = HALMAC_CMD_PROCESS_ERROR;
- halmac_adapter->halmac_state.power_tracking_set.process_status =
- process_status;
- PLATFORM_EVENT_INDICATION(
- driver_adapter, HALMAC_FEATURE_POWER_TRACKING,
- process_status,
- &halmac_adapter->halmac_state.power_tracking_set
- .fw_return_code,
- 1);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_convert_to_sdio_bus_offset_88xx(struct halmac_adapter *halmac_adapter,
- u32 *halmac_offset)
-{
- void *driver_adapter = NULL;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- switch ((*halmac_offset) & 0xFFFF0000) {
- case WLAN_IOREG_OFFSET:
- *halmac_offset = (HALMAC_SDIO_CMD_ADDR_MAC_REG << 13) |
- (*halmac_offset & HALMAC_WLAN_MAC_REG_MSK);
- break;
- case SDIO_LOCAL_OFFSET:
- *halmac_offset = (HALMAC_SDIO_CMD_ADDR_SDIO_REG << 13) |
- (*halmac_offset & HALMAC_SDIO_LOCAL_MSK);
- break;
- default:
- *halmac_offset = 0xFFFFFFFF;
- pr_err("Unknown base address!!\n");
- return HALMAC_RET_CONVERT_SDIO_OFFSET_FAIL;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_update_sdio_free_page_88xx(struct halmac_adapter *halmac_adapter)
-{
- u32 free_page = 0, free_page2 = 0, free_page3 = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- struct halmac_sdio_free_space *sdio_free_space;
- u8 data[12] = {0};
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- sdio_free_space = &halmac_adapter->sdio_free_space;
- /*need to use HALMAC_REG_READ_N, 20160316, Soar*/
- HALMAC_REG_SDIO_CMD53_READ_N(halmac_adapter, REG_SDIO_FREE_TXPG, 12,
- data);
- free_page =
- data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
- free_page2 =
- data[4] | (data[5] << 8) | (data[6] << 16) | (data[7] << 24);
- free_page3 =
- data[8] | (data[9] << 8) | (data[10] << 16) | (data[11] << 24);
-
- sdio_free_space->high_queue_number =
- (u16)BIT_GET_HIQ_FREEPG_V1(free_page);
- sdio_free_space->normal_queue_number =
- (u16)BIT_GET_MID_FREEPG_V1(free_page);
- sdio_free_space->low_queue_number =
- (u16)BIT_GET_LOW_FREEPG_V1(free_page2);
- sdio_free_space->public_queue_number =
- (u16)BIT_GET_PUB_FREEPG_V1(free_page2);
- sdio_free_space->extra_queue_number =
- (u16)BIT_GET_EXQ_FREEPG_V1(free_page3);
- sdio_free_space->ac_oqt_number = (u8)((free_page3 >> 16) & 0xFF);
- sdio_free_space->non_ac_oqt_number = (u8)((free_page3 >> 24) & 0xFF);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_update_oqt_free_space_88xx(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- struct halmac_sdio_free_space *sdio_free_space;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- sdio_free_space = &halmac_adapter->sdio_free_space;
-
- sdio_free_space->ac_oqt_number = HALMAC_REG_READ_8(
- halmac_adapter, REG_SDIO_OQT_FREE_TXPG_V1 + 2);
- sdio_free_space->ac_empty =
- HALMAC_REG_READ_8(halmac_adapter, REG_TXPKT_EMPTY);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s <==========\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_efuse_cmd_construct_state
-halmac_query_efuse_curr_state_88xx(struct halmac_adapter *halmac_adapter)
-{
- return halmac_adapter->halmac_state.efuse_state_set
- .efuse_cmd_construct_state;
-}
-
-enum halmac_ret_status halmac_transition_efuse_state_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_cmd_construct_state dest_state)
-{
- struct halmac_efuse_state_set *efuse_state =
- &halmac_adapter->halmac_state.efuse_state_set;
-
- if (efuse_state->efuse_cmd_construct_state !=
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE &&
- efuse_state->efuse_cmd_construct_state !=
- HALMAC_EFUSE_CMD_CONSTRUCT_BUSY &&
- efuse_state->efuse_cmd_construct_state !=
- HALMAC_EFUSE_CMD_CONSTRUCT_H2C_SENT)
- return HALMAC_RET_ERROR_STATE;
-
- if (efuse_state->efuse_cmd_construct_state == dest_state)
- return HALMAC_RET_ERROR_STATE;
-
- if (dest_state == HALMAC_EFUSE_CMD_CONSTRUCT_BUSY) {
- if (efuse_state->efuse_cmd_construct_state ==
- HALMAC_EFUSE_CMD_CONSTRUCT_H2C_SENT)
- return HALMAC_RET_ERROR_STATE;
- } else if (dest_state == HALMAC_EFUSE_CMD_CONSTRUCT_H2C_SENT) {
- if (efuse_state->efuse_cmd_construct_state ==
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE)
- return HALMAC_RET_ERROR_STATE;
- }
-
- efuse_state->efuse_cmd_construct_state = dest_state;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_cfg_para_cmd_construct_state
-halmac_query_cfg_para_curr_state_88xx(struct halmac_adapter *halmac_adapter)
-{
- return halmac_adapter->halmac_state.cfg_para_state_set
- .cfg_para_cmd_construct_state;
-}
-
-enum halmac_ret_status halmac_transition_cfg_para_state_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cfg_para_cmd_construct_state dest_state)
-{
- struct halmac_cfg_para_state_set *cfg_para =
- &halmac_adapter->halmac_state.cfg_para_state_set;
-
- if (cfg_para->cfg_para_cmd_construct_state !=
- HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE &&
- cfg_para->cfg_para_cmd_construct_state !=
- HALMAC_CFG_PARA_CMD_CONSTRUCT_CONSTRUCTING &&
- cfg_para->cfg_para_cmd_construct_state !=
- HALMAC_CFG_PARA_CMD_CONSTRUCT_H2C_SENT)
- return HALMAC_RET_ERROR_STATE;
-
- if (dest_state == HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE) {
- if (cfg_para->cfg_para_cmd_construct_state ==
- HALMAC_CFG_PARA_CMD_CONSTRUCT_CONSTRUCTING)
- return HALMAC_RET_ERROR_STATE;
- } else if (dest_state == HALMAC_CFG_PARA_CMD_CONSTRUCT_CONSTRUCTING) {
- if (cfg_para->cfg_para_cmd_construct_state ==
- HALMAC_CFG_PARA_CMD_CONSTRUCT_H2C_SENT)
- return HALMAC_RET_ERROR_STATE;
- } else if (dest_state == HALMAC_CFG_PARA_CMD_CONSTRUCT_H2C_SENT) {
- if (cfg_para->cfg_para_cmd_construct_state ==
- HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE ||
- cfg_para->cfg_para_cmd_construct_state ==
- HALMAC_CFG_PARA_CMD_CONSTRUCT_H2C_SENT)
- return HALMAC_RET_ERROR_STATE;
- }
-
- cfg_para->cfg_para_cmd_construct_state = dest_state;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_scan_cmd_construct_state
-halmac_query_scan_curr_state_88xx(struct halmac_adapter *halmac_adapter)
-{
- return halmac_adapter->halmac_state.scan_state_set
- .scan_cmd_construct_state;
-}
-
-enum halmac_ret_status halmac_transition_scan_state_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_scan_cmd_construct_state dest_state)
-{
- struct halmac_scan_state_set *scan =
- &halmac_adapter->halmac_state.scan_state_set;
-
- if (scan->scan_cmd_construct_state > HALMAC_SCAN_CMD_CONSTRUCT_H2C_SENT)
- return HALMAC_RET_ERROR_STATE;
-
- if (dest_state == HALMAC_SCAN_CMD_CONSTRUCT_IDLE) {
- if (scan->scan_cmd_construct_state ==
- HALMAC_SCAN_CMD_CONSTRUCT_BUFFER_CLEARED ||
- scan->scan_cmd_construct_state ==
- HALMAC_SCAN_CMD_CONSTRUCT_CONSTRUCTING)
- return HALMAC_RET_ERROR_STATE;
- } else if (dest_state == HALMAC_SCAN_CMD_CONSTRUCT_BUFFER_CLEARED) {
- if (scan->scan_cmd_construct_state ==
- HALMAC_SCAN_CMD_CONSTRUCT_H2C_SENT)
- return HALMAC_RET_ERROR_STATE;
- } else if (dest_state == HALMAC_SCAN_CMD_CONSTRUCT_CONSTRUCTING) {
- if (scan->scan_cmd_construct_state ==
- HALMAC_SCAN_CMD_CONSTRUCT_IDLE ||
- scan->scan_cmd_construct_state ==
- HALMAC_SCAN_CMD_CONSTRUCT_H2C_SENT)
- return HALMAC_RET_ERROR_STATE;
- } else if (dest_state == HALMAC_SCAN_CMD_CONSTRUCT_H2C_SENT) {
- if (scan->scan_cmd_construct_state !=
- HALMAC_SCAN_CMD_CONSTRUCT_CONSTRUCTING &&
- scan->scan_cmd_construct_state !=
- HALMAC_SCAN_CMD_CONSTRUCT_BUFFER_CLEARED)
- return HALMAC_RET_ERROR_STATE;
- }
-
- scan->scan_cmd_construct_state = dest_state;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_query_cfg_para_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size)
-{
- struct halmac_cfg_para_state_set *cfg_para_state_set =
- &halmac_adapter->halmac_state.cfg_para_state_set;
-
- *process_status = cfg_para_state_set->process_status;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_query_dump_physical_efuse_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size)
-{
- void *driver_adapter = NULL;
- struct halmac_efuse_state_set *efuse_state_set =
- &halmac_adapter->halmac_state.efuse_state_set;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- *process_status = efuse_state_set->process_status;
-
- if (!data)
- return HALMAC_RET_NULL_POINTER;
-
- if (!size)
- return HALMAC_RET_NULL_POINTER;
-
- if (*process_status == HALMAC_CMD_PROCESS_DONE) {
- if (*size < halmac_adapter->hw_config_info.efuse_size) {
- *size = halmac_adapter->hw_config_info.efuse_size;
- return HALMAC_RET_BUFFER_TOO_SMALL;
- }
-
- *size = halmac_adapter->hw_config_info.efuse_size;
- memcpy(data, halmac_adapter->hal_efuse_map, *size);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_query_dump_logical_efuse_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size)
-{
- u8 *eeprom_map = NULL;
- u32 eeprom_size = halmac_adapter->hw_config_info.eeprom_size;
- void *driver_adapter = NULL;
- struct halmac_efuse_state_set *efuse_state_set =
- &halmac_adapter->halmac_state.efuse_state_set;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- *process_status = efuse_state_set->process_status;
-
- if (!data)
- return HALMAC_RET_NULL_POINTER;
-
- if (!size)
- return HALMAC_RET_NULL_POINTER;
-
- if (*process_status == HALMAC_CMD_PROCESS_DONE) {
- if (*size < eeprom_size) {
- *size = eeprom_size;
- return HALMAC_RET_BUFFER_TOO_SMALL;
- }
-
- *size = eeprom_size;
-
- eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map)
- return HALMAC_RET_MALLOC_FAIL;
- memset(eeprom_map, 0xFF, eeprom_size);
-
- if (halmac_eeprom_parser_88xx(
- halmac_adapter, halmac_adapter->hal_efuse_map,
- eeprom_map) != HALMAC_RET_SUCCESS) {
- kfree(eeprom_map);
- return HALMAC_RET_EEPROM_PARSING_FAIL;
- }
-
- memcpy(data, eeprom_map, *size);
-
- kfree(eeprom_map);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_query_channel_switch_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size)
-{
- struct halmac_scan_state_set *scan_state_set =
- &halmac_adapter->halmac_state.scan_state_set;
-
- *process_status = scan_state_set->process_status;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_query_update_packet_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size)
-{
- struct halmac_update_packet_state_set *update_packet_set =
- &halmac_adapter->halmac_state.update_packet_set;
-
- *process_status = update_packet_set->process_status;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_query_iqk_status_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status,
- u8 *data, u32 *size)
-{
- struct halmac_iqk_state_set *iqk_set =
- &halmac_adapter->halmac_state.iqk_set;
-
- *process_status = iqk_set->process_status;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status halmac_query_power_tracking_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size)
-{
- struct halmac_power_tracking_state_set *power_tracking_state_set =
- &halmac_adapter->halmac_state.power_tracking_set;
- ;
-
- *process_status = power_tracking_state_set->process_status;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_query_psd_status_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status,
- u8 *data, u32 *size)
-{
- void *driver_adapter = NULL;
- struct halmac_psd_state_set *psd_set =
- &halmac_adapter->halmac_state.psd_set;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- *process_status = psd_set->process_status;
-
- if (!data)
- return HALMAC_RET_NULL_POINTER;
-
- if (!size)
- return HALMAC_RET_NULL_POINTER;
-
- if (*process_status == HALMAC_CMD_PROCESS_DONE) {
- if (*size < psd_set->data_size) {
- *size = psd_set->data_size;
- return HALMAC_RET_BUFFER_TOO_SMALL;
- }
-
- *size = psd_set->data_size;
- memcpy(data, psd_set->data, *size);
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_verify_io_88xx(struct halmac_adapter *halmac_adapter)
-{
- u8 value8, wvalue8;
- u32 value32, value32_2, wvalue32;
- u32 halmac_offset;
- void *driver_adapter = NULL;
- enum halmac_ret_status ret_status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- halmac_offset = REG_PAGE5_DUMMY;
- if ((halmac_offset & 0xFFFF0000) == 0)
- halmac_offset |= WLAN_IOREG_OFFSET;
-
- ret_status = halmac_convert_to_sdio_bus_offset_88xx(
- halmac_adapter, &halmac_offset);
-
- /* Verify CMD52 R/W */
- wvalue8 = 0xab;
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset,
- wvalue8);
-
- value8 =
- PLATFORM_SDIO_CMD52_READ(driver_adapter, halmac_offset);
-
- if (value8 != wvalue8) {
- pr_err("cmd52 r/w fail write = %X read = %X\n", wvalue8,
- value8);
- ret_status = HALMAC_RET_PLATFORM_API_INCORRECT;
- } else {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT,
- DBG_DMESG, "cmd52 r/w ok\n");
- }
-
- /* Verify CMD53 R/W */
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset, 0xaa);
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset + 1,
- 0xbb);
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset + 2,
- 0xcc);
- PLATFORM_SDIO_CMD52_WRITE(driver_adapter, halmac_offset + 3,
- 0xdd);
-
- value32 = PLATFORM_SDIO_CMD53_READ_32(driver_adapter,
- halmac_offset);
-
- if (value32 != 0xddccbbaa) {
- pr_err("cmd53 r fail : read = %X\n", value32);
- ret_status = HALMAC_RET_PLATFORM_API_INCORRECT;
- } else {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT,
- DBG_DMESG, "cmd53 r ok\n");
- }
-
- wvalue32 = 0x11223344;
- PLATFORM_SDIO_CMD53_WRITE_32(driver_adapter, halmac_offset,
- wvalue32);
-
- value32 = PLATFORM_SDIO_CMD53_READ_32(driver_adapter,
- halmac_offset);
-
- if (value32 != wvalue32) {
- pr_err("cmd53 w fail\n");
- ret_status = HALMAC_RET_PLATFORM_API_INCORRECT;
- } else {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT,
- DBG_DMESG, "cmd53 w ok\n");
- }
-
- value32 = PLATFORM_SDIO_CMD53_READ_32(
- driver_adapter,
- halmac_offset + 2); /* value32 should be 0x33441122 */
-
- wvalue32 = 0x11225566;
- PLATFORM_SDIO_CMD53_WRITE_32(driver_adapter, halmac_offset,
- wvalue32);
-
- value32_2 = PLATFORM_SDIO_CMD53_READ_32(
- driver_adapter,
- halmac_offset + 2); /* value32 should be 0x55661122 */
- if (value32_2 == value32) {
- pr_err("cmd52 is used for HAL_SDIO_CMD53_READ_32\n");
- ret_status = HALMAC_RET_PLATFORM_API_INCORRECT;
- } else {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT,
- DBG_DMESG, "cmd53 is correctly used\n");
- }
- } else {
- wvalue32 = 0x77665511;
- PLATFORM_REG_WRITE_32(driver_adapter, REG_PAGE5_DUMMY,
- wvalue32);
-
- value32 = PLATFORM_REG_READ_32(driver_adapter, REG_PAGE5_DUMMY);
- if (value32 != wvalue32) {
- pr_err("reg rw\n");
- ret_status = HALMAC_RET_PLATFORM_API_INCORRECT;
- } else {
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT,
- DBG_DMESG, "reg rw ok\n");
- }
- }
-
- return ret_status;
-}
-
-enum halmac_ret_status
-halmac_verify_send_rsvd_page_88xx(struct halmac_adapter *halmac_adapter)
-{
- u8 *rsvd_buf = NULL;
- u8 *rsvd_page = NULL;
- u32 i;
- u32 h2c_pkt_verify_size = 64, h2c_pkt_verify_payload = 0xab;
- void *driver_adapter = NULL;
- enum halmac_ret_status ret_status = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- rsvd_buf = kzalloc(h2c_pkt_verify_size, GFP_KERNEL);
-
- if (!rsvd_buf)
- return HALMAC_RET_MALLOC_FAIL;
-
- memset(rsvd_buf, (u8)h2c_pkt_verify_payload, h2c_pkt_verify_size);
-
- ret_status = halmac_download_rsvd_page_88xx(halmac_adapter, rsvd_buf,
- h2c_pkt_verify_size);
-
- if (ret_status != HALMAC_RET_SUCCESS) {
- kfree(rsvd_buf);
- return ret_status;
- }
-
- rsvd_page = kzalloc(h2c_pkt_verify_size +
- halmac_adapter->hw_config_info.txdesc_size,
- GFP_KERNEL);
-
- if (!rsvd_page) {
- kfree(rsvd_buf);
- return HALMAC_RET_MALLOC_FAIL;
- }
-
- ret_status = halmac_dump_fifo_88xx(
- halmac_adapter, HAL_FIFO_SEL_RSVD_PAGE, 0,
- h2c_pkt_verify_size +
- halmac_adapter->hw_config_info.txdesc_size,
- rsvd_page);
-
- if (ret_status != HALMAC_RET_SUCCESS) {
- kfree(rsvd_buf);
- kfree(rsvd_page);
- return ret_status;
- }
-
- for (i = 0; i < h2c_pkt_verify_size; i++) {
- if (*(rsvd_buf + i) !=
- *(rsvd_page +
- (i + halmac_adapter->hw_config_info.txdesc_size))) {
- pr_err("[ERR]Compare RSVD page Fail\n");
- ret_status = HALMAC_RET_PLATFORM_API_INCORRECT;
- }
- }
-
- kfree(rsvd_buf);
- kfree(rsvd_page);
-
- return ret_status;
-}
-
-void halmac_power_save_cb_88xx(void *cb_data)
-{
- void *driver_adapter = NULL;
- struct halmac_adapter *halmac_adapter = (struct halmac_adapter *)NULL;
-
- halmac_adapter = (struct halmac_adapter *)cb_data;
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_PWR, DBG_DMESG,
- "%s\n", __func__);
-}
-
-enum halmac_ret_status
-halmac_buffer_read_88xx(struct halmac_adapter *halmac_adapter, u32 offset,
- u32 size, enum hal_fifo_sel halmac_fifo_sel,
- u8 *fifo_map)
-{
- u32 start_page, value_read;
- u32 i, counter = 0, residue;
- struct halmac_api *halmac_api;
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (halmac_fifo_sel == HAL_FIFO_SEL_RSVD_PAGE)
- offset = offset +
- (halmac_adapter->txff_allocation.rsvd_pg_bndy << 7);
-
- start_page = offset >> 12;
- residue = offset & (4096 - 1);
-
- if (halmac_fifo_sel == HAL_FIFO_SEL_TX ||
- halmac_fifo_sel == HAL_FIFO_SEL_RSVD_PAGE)
- start_page += 0x780;
- else if (halmac_fifo_sel == HAL_FIFO_SEL_RX)
- start_page += 0x700;
- else if (halmac_fifo_sel == HAL_FIFO_SEL_REPORT)
- start_page += 0x660;
- else if (halmac_fifo_sel == HAL_FIFO_SEL_LLT)
- start_page += 0x650;
- else
- return HALMAC_RET_NOT_SUPPORT;
-
- value_read = HALMAC_REG_READ_16(halmac_adapter, REG_PKTBUF_DBG_CTRL);
-
- do {
- HALMAC_REG_WRITE_16(halmac_adapter, REG_PKTBUF_DBG_CTRL,
- (u16)(start_page | (value_read & 0xF000)));
-
- for (i = 0x8000 + residue; i <= 0x8FFF; i += 4) {
- *(u32 *)(fifo_map + counter) =
- HALMAC_REG_READ_32(halmac_adapter, i);
- *(u32 *)(fifo_map + counter) =
- le32_to_cpu(*(__le32 *)(fifo_map + counter));
- counter += 4;
- if (size == counter)
- goto HALMAC_BUF_READ_OK;
- }
-
- residue = 0;
- start_page++;
- } while (1);
-
-HALMAC_BUF_READ_OK:
- HALMAC_REG_WRITE_16(halmac_adapter, REG_PKTBUF_DBG_CTRL,
- (u16)value_read);
-
- return HALMAC_RET_SUCCESS;
-}
-
-void halmac_restore_mac_register_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_restore_info *restore_info,
- u32 restore_num)
-{
- u8 value_length;
- u32 i;
- u32 mac_register;
- u32 mac_value;
- struct halmac_api *halmac_api;
- struct halmac_restore_info *curr_restore_info = restore_info;
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- for (i = 0; i < restore_num; i++) {
- mac_register = curr_restore_info->mac_register;
- mac_value = curr_restore_info->value;
- value_length = curr_restore_info->length;
-
- if (value_length == 1)
- HALMAC_REG_WRITE_8(halmac_adapter, mac_register,
- (u8)mac_value);
- else if (value_length == 2)
- HALMAC_REG_WRITE_16(halmac_adapter, mac_register,
- (u16)mac_value);
- else if (value_length == 4)
- HALMAC_REG_WRITE_32(halmac_adapter, mac_register,
- mac_value);
-
- curr_restore_info++;
- }
-}
-
-void halmac_api_record_id_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_api_id api_id)
-{
-}
-
-enum halmac_ret_status
-halmac_set_usb_mode_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_usb_mode usb_mode)
-{
- u32 usb_temp;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- enum halmac_usb_mode current_usb_mode;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- current_usb_mode =
- HALMAC_REG_READ_8(halmac_adapter, REG_SYS_CFG2 + 3) == 0x20 ?
- HALMAC_USB_MODE_U3 :
- HALMAC_USB_MODE_U2;
-
- /*check if HW supports usb2_usb3 swtich*/
- usb_temp = HALMAC_REG_READ_32(halmac_adapter, REG_PAD_CTRL2);
- if (!BIT_GET_USB23_SW_MODE_V1(usb_temp) &&
- !(usb_temp & BIT_USB3_USB2_TRANSITION)) {
- pr_err("HALMAC_HW_USB_MODE usb mode HW unsupport\n");
- return HALMAC_RET_USB2_3_SWITCH_UNSUPPORT;
- }
-
- if (usb_mode == current_usb_mode) {
- pr_err("HALMAC_HW_USB_MODE usb mode unchange\n");
- return HALMAC_RET_USB_MODE_UNCHANGE;
- }
-
- usb_temp &= ~(BIT_USB23_SW_MODE_V1(0x3));
-
- if (usb_mode == HALMAC_USB_MODE_U2) {
- /* usb3 to usb2 */
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_PAD_CTRL2,
- usb_temp | BIT_USB23_SW_MODE_V1(HALMAC_USB_MODE_U2) |
- BIT_RSM_EN_V1);
- } else {
- /* usb2 to usb3 */
- HALMAC_REG_WRITE_32(
- halmac_adapter, REG_PAD_CTRL2,
- usb_temp | BIT_USB23_SW_MODE_V1(HALMAC_USB_MODE_U3) |
- BIT_RSM_EN_V1);
- }
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PAD_CTRL2 + 1,
- 4); /* set counter down timer 4x64 ms */
- HALMAC_REG_WRITE_16(
- halmac_adapter, REG_SYS_PW_CTRL,
- HALMAC_REG_READ_16(halmac_adapter, REG_SYS_PW_CTRL) |
- BIT_APFM_OFFMAC);
- usleep_range(1000, 1100);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_PAD_CTRL2,
- HALMAC_REG_READ_32(halmac_adapter, REG_PAD_CTRL2) |
- BIT_NO_PDN_CHIPOFF_V1);
-
- return HALMAC_RET_SUCCESS;
-}
-
-void halmac_enable_bb_rf_88xx(struct halmac_adapter *halmac_adapter, u8 enable)
-{
- u8 value8;
- u32 value32;
- struct halmac_api *halmac_api;
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (enable == 1) {
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_SYS_FUNC_EN);
- value8 = value8 | BIT(0) | BIT(1);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SYS_FUNC_EN, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_RF_CTRL);
- value8 = value8 | BIT(0) | BIT(1) | BIT(2);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RF_CTRL, value8);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_WLRF1);
- value32 = value32 | BIT(24) | BIT(25) | BIT(26);
- HALMAC_REG_WRITE_32(halmac_adapter, REG_WLRF1, value32);
- } else {
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_SYS_FUNC_EN);
- value8 = value8 & (~(BIT(0) | BIT(1)));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_SYS_FUNC_EN, value8);
-
- value8 = HALMAC_REG_READ_8(halmac_adapter, REG_RF_CTRL);
- value8 = value8 & (~(BIT(0) | BIT(1) | BIT(2)));
- HALMAC_REG_WRITE_8(halmac_adapter, REG_RF_CTRL, value8);
-
- value32 = HALMAC_REG_READ_32(halmac_adapter, REG_WLRF1);
- value32 = value32 & (~(BIT(24) | BIT(25) | BIT(26)));
- HALMAC_REG_WRITE_32(halmac_adapter, REG_WLRF1, value32);
- }
-}
-
-void halmac_config_sdio_tx_page_threshold_88xx(
- struct halmac_adapter *halmac_adapter,
- struct halmac_tx_page_threshold_info *threshold_info)
-{
- struct halmac_api *halmac_api;
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- switch (threshold_info->dma_queue_sel) {
- case HALMAC_MAP2_HQ:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_TQPNT1,
- threshold_info->threshold);
- break;
- case HALMAC_MAP2_NQ:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_TQPNT2,
- threshold_info->threshold);
- break;
- case HALMAC_MAP2_LQ:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_TQPNT3,
- threshold_info->threshold);
- break;
- case HALMAC_MAP2_EXQ:
- HALMAC_REG_WRITE_32(halmac_adapter, REG_TQPNT4,
- threshold_info->threshold);
- break;
- default:
- break;
- }
-}
-
-void halmac_config_ampdu_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ampdu_config *ampdu_config)
-{
- struct halmac_api *halmac_api;
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PROT_MODE_CTRL + 2,
- ampdu_config->max_agg_num);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PROT_MODE_CTRL + 3,
- ampdu_config->max_agg_num);
-};
-
-enum halmac_ret_status
-halmac_check_oqt_88xx(struct halmac_adapter *halmac_adapter, u32 tx_agg_num,
- u8 *halmac_buf)
-{
- u32 counter = 10;
-
- /*S0, S1 are not allowed to use, 0x4E4[0] should be 0. Soar 20160323*/
- /*no need to check non_ac_oqt_number. HI and MGQ blocked will cause
- *protocal issue before H_OQT being full
- */
- switch ((enum halmac_queue_select)GET_TX_DESC_QSEL(halmac_buf)) {
- case HALMAC_QUEUE_SELECT_VO:
- case HALMAC_QUEUE_SELECT_VO_V2:
- case HALMAC_QUEUE_SELECT_VI:
- case HALMAC_QUEUE_SELECT_VI_V2:
- case HALMAC_QUEUE_SELECT_BE:
- case HALMAC_QUEUE_SELECT_BE_V2:
- case HALMAC_QUEUE_SELECT_BK:
- case HALMAC_QUEUE_SELECT_BK_V2:
- counter = 10;
- do {
- if (halmac_adapter->sdio_free_space.ac_empty > 0) {
- halmac_adapter->sdio_free_space.ac_empty -= 1;
- break;
- }
-
- if (halmac_adapter->sdio_free_space.ac_oqt_number >=
- tx_agg_num) {
- halmac_adapter->sdio_free_space.ac_oqt_number -=
- (u8)tx_agg_num;
- break;
- }
-
- halmac_update_oqt_free_space_88xx(halmac_adapter);
-
- counter--;
- if (counter == 0)
- return HALMAC_RET_OQT_NOT_ENOUGH;
- } while (1);
- break;
- default:
- break;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_rqpn_parser_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode,
- struct halmac_rqpn_ *rqpn_table)
-{
- u8 search_flag;
- u32 i;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- search_flag = 0;
- for (i = 0; i < HALMAC_TRX_MODE_MAX; i++) {
- if (halmac_trx_mode == rqpn_table[i].mode) {
- halmac_adapter
- ->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VO] =
- rqpn_table[i].dma_map_vo;
- halmac_adapter
- ->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_VI] =
- rqpn_table[i].dma_map_vi;
- halmac_adapter
- ->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BE] =
- rqpn_table[i].dma_map_be;
- halmac_adapter
- ->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_BK] =
- rqpn_table[i].dma_map_bk;
- halmac_adapter
- ->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_MG] =
- rqpn_table[i].dma_map_mg;
- halmac_adapter
- ->halmac_ptcl_queue[HALMAC_PTCL_QUEUE_HI] =
- rqpn_table[i].dma_map_hi;
- search_flag = 1;
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT,
- DBG_DMESG, "%s done\n", __func__);
- break;
- }
- }
-
- if (search_flag == 0) {
- pr_err("HALMAC_RET_TRX_MODE_NOT_SUPPORT 1 switch case not support\n");
- return HALMAC_RET_TRX_MODE_NOT_SUPPORT;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_pg_num_parser_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode,
- struct halmac_pg_num_ *pg_num_table)
-{
- u8 search_flag;
- u16 HPQ_num = 0, lpq_nnum = 0, NPQ_num = 0, GAPQ_num = 0;
- u16 EXPQ_num = 0, PUBQ_num = 0;
- u32 i = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- search_flag = 0;
- for (i = 0; i < HALMAC_TRX_MODE_MAX; i++) {
- if (halmac_trx_mode == pg_num_table[i].mode) {
- HPQ_num = pg_num_table[i].hq_num;
- lpq_nnum = pg_num_table[i].lq_num;
- NPQ_num = pg_num_table[i].nq_num;
- EXPQ_num = pg_num_table[i].exq_num;
- GAPQ_num = pg_num_table[i].gap_num;
- PUBQ_num = halmac_adapter->txff_allocation.ac_q_pg_num -
- HPQ_num - lpq_nnum - NPQ_num - EXPQ_num -
- GAPQ_num;
- search_flag = 1;
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT,
- DBG_DMESG, "%s done\n", __func__);
- break;
- }
- }
-
- if (search_flag == 0) {
- pr_err("HALMAC_RET_TRX_MODE_NOT_SUPPORT 1 switch case not support\n");
- return HALMAC_RET_TRX_MODE_NOT_SUPPORT;
- }
-
- if (halmac_adapter->txff_allocation.ac_q_pg_num <
- HPQ_num + lpq_nnum + NPQ_num + EXPQ_num + GAPQ_num)
- return HALMAC_RET_CFG_TXFIFO_PAGE_FAIL;
-
- halmac_adapter->txff_allocation.high_queue_pg_num = HPQ_num;
- halmac_adapter->txff_allocation.low_queue_pg_num = lpq_nnum;
- halmac_adapter->txff_allocation.normal_queue_pg_num = NPQ_num;
- halmac_adapter->txff_allocation.extra_queue_pg_num = EXPQ_num;
- halmac_adapter->txff_allocation.pub_queue_pg_num = PUBQ_num;
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_parse_intf_phy_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_intf_phy_para_ *intf_phy_para,
- enum halmac_intf_phy_platform platform,
- enum hal_intf_phy intf_phy)
-{
- u16 value;
- u16 curr_cut;
- u16 offset;
- u16 ip_sel;
- struct halmac_intf_phy_para_ *curr_phy_para;
- struct halmac_api *halmac_api;
- void *driver_adapter = NULL;
- u8 result = HALMAC_RET_SUCCESS;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- switch (halmac_adapter->chip_version) {
- case HALMAC_CHIP_VER_A_CUT:
- curr_cut = (u16)HALMAC_INTF_PHY_CUT_A;
- break;
- case HALMAC_CHIP_VER_B_CUT:
- curr_cut = (u16)HALMAC_INTF_PHY_CUT_B;
- break;
- case HALMAC_CHIP_VER_C_CUT:
- curr_cut = (u16)HALMAC_INTF_PHY_CUT_C;
- break;
- case HALMAC_CHIP_VER_D_CUT:
- curr_cut = (u16)HALMAC_INTF_PHY_CUT_D;
- break;
- case HALMAC_CHIP_VER_E_CUT:
- curr_cut = (u16)HALMAC_INTF_PHY_CUT_E;
- break;
- case HALMAC_CHIP_VER_F_CUT:
- curr_cut = (u16)HALMAC_INTF_PHY_CUT_F;
- break;
- case HALMAC_CHIP_VER_TEST:
- curr_cut = (u16)HALMAC_INTF_PHY_CUT_TESTCHIP;
- break;
- default:
- return HALMAC_RET_FAIL;
- }
-
- for (curr_phy_para = intf_phy_para;; curr_phy_para++) {
- if (!(curr_phy_para->cut & curr_cut) ||
- !(curr_phy_para->plaform & (u16)platform))
- continue;
-
- offset = curr_phy_para->offset;
- value = curr_phy_para->value;
- ip_sel = curr_phy_para->ip_sel;
-
- if (offset == 0xFFFF)
- break;
-
- if (ip_sel == HALMAC_IP_SEL_MAC) {
- HALMAC_REG_WRITE_8(halmac_adapter, (u32)offset,
- (u8)value);
- } else if (intf_phy == HAL_INTF_PHY_USB2) {
- result = halmac_usbphy_write_88xx(halmac_adapter,
- (u8)offset, value,
- HAL_INTF_PHY_USB2);
-
- if (result != HALMAC_RET_SUCCESS)
- pr_err("[ERR]Write USB2PHY fail!\n");
-
- } else if (intf_phy == HAL_INTF_PHY_USB3) {
- result = halmac_usbphy_write_88xx(halmac_adapter,
- (u8)offset, value,
- HAL_INTF_PHY_USB3);
-
- if (result != HALMAC_RET_SUCCESS)
- pr_err("[ERR]Write USB3PHY fail!\n");
-
- } else if (intf_phy == HAL_INTF_PHY_PCIE_GEN1) {
- if (ip_sel == HALMAC_IP_SEL_INTF_PHY)
- result = halmac_mdio_write_88xx(
- halmac_adapter, (u8)offset, value,
- HAL_INTF_PHY_PCIE_GEN1);
- else
- result = halmac_dbi_write8_88xx(
- halmac_adapter, offset, (u8)value);
-
- if (result != HALMAC_RET_SUCCESS)
- pr_err("[ERR]MDIO write GEN1 fail!\n");
-
- } else if (intf_phy == HAL_INTF_PHY_PCIE_GEN2) {
- if (ip_sel == HALMAC_IP_SEL_INTF_PHY)
- result = halmac_mdio_write_88xx(
- halmac_adapter, (u8)offset, value,
- HAL_INTF_PHY_PCIE_GEN2);
- else
- result = halmac_dbi_write8_88xx(
- halmac_adapter, offset, (u8)value);
-
- if (result != HALMAC_RET_SUCCESS)
- pr_err("[ERR]MDIO write GEN2 fail!\n");
- } else {
- pr_err("[ERR]Parse intf phy cfg error!\n");
- }
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-enum halmac_ret_status
-halmac_dbi_write32_88xx(struct halmac_adapter *halmac_adapter, u16 addr,
- u32 data)
-{
- u8 tmp_u1b = 0;
- u32 count = 0;
- u16 write_addr = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_32(halmac_adapter, REG_DBI_WDATA_V1, data);
-
- write_addr = ((addr & 0x0ffc) | (0x000F << 12));
- HALMAC_REG_WRITE_16(halmac_adapter, REG_DBI_FLAG_V1, write_addr);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_DBI, DBG_DMESG,
- "WriteAddr = %x\n", write_addr);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_DBI_FLAG_V1 + 2, 0x01);
- tmp_u1b = HALMAC_REG_READ_8(halmac_adapter, REG_DBI_FLAG_V1 + 2);
-
- count = 20;
- while (tmp_u1b && count != 0) {
- udelay(10);
- tmp_u1b =
- HALMAC_REG_READ_8(halmac_adapter, REG_DBI_FLAG_V1 + 2);
- count--;
- }
-
- if (tmp_u1b) {
- pr_err("DBI write fail!\n");
- return HALMAC_RET_FAIL;
- } else {
- return HALMAC_RET_SUCCESS;
- }
-}
-
-u32 halmac_dbi_read32_88xx(struct halmac_adapter *halmac_adapter, u16 addr)
-{
- u16 read_addr = addr & 0x0ffc;
- u8 tmp_u1b = 0;
- u32 count = 0;
- u32 ret = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_DBI_FLAG_V1, read_addr);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_DBI_FLAG_V1 + 2, 0x2);
- tmp_u1b = HALMAC_REG_READ_8(halmac_adapter, REG_DBI_FLAG_V1 + 2);
-
- count = 20;
- while (tmp_u1b && count != 0) {
- udelay(10);
- tmp_u1b =
- HALMAC_REG_READ_8(halmac_adapter, REG_DBI_FLAG_V1 + 2);
- count--;
- }
-
- if (tmp_u1b) {
- ret = 0xFFFF;
- pr_err("DBI read fail!\n");
- } else {
- ret = HALMAC_REG_READ_32(halmac_adapter, REG_DBI_RDATA_V1);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_DBI, DBG_DMESG,
- "Read Value = %x\n", ret);
- }
-
- return ret;
-}
-
-enum halmac_ret_status
-halmac_dbi_write8_88xx(struct halmac_adapter *halmac_adapter, u16 addr, u8 data)
-{
- u8 tmp_u1b = 0;
- u32 count = 0;
- u16 write_addr = 0;
- u16 remainder = addr & (4 - 1);
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_DBI_WDATA_V1 + remainder, data);
-
- write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_DBI_FLAG_V1, write_addr);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_DBI, DBG_DMESG,
- "WriteAddr = %x\n", write_addr);
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_DBI_FLAG_V1 + 2, 0x01);
-
- tmp_u1b = HALMAC_REG_READ_8(halmac_adapter, REG_DBI_FLAG_V1 + 2);
-
- count = 20;
- while (tmp_u1b && count != 0) {
- udelay(10);
- tmp_u1b =
- HALMAC_REG_READ_8(halmac_adapter, REG_DBI_FLAG_V1 + 2);
- count--;
- }
-
- if (tmp_u1b) {
- pr_err("DBI write fail!\n");
- return HALMAC_RET_FAIL;
- } else {
- return HALMAC_RET_SUCCESS;
- }
-}
-
-u8 halmac_dbi_read8_88xx(struct halmac_adapter *halmac_adapter, u16 addr)
-{
- u16 read_addr = addr & 0x0ffc;
- u8 tmp_u1b = 0;
- u32 count = 0;
- u8 ret = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_DBI_FLAG_V1, read_addr);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_DBI_FLAG_V1 + 2, 0x2);
-
- tmp_u1b = HALMAC_REG_READ_8(halmac_adapter, REG_DBI_FLAG_V1 + 2);
-
- count = 20;
- while (tmp_u1b && count != 0) {
- udelay(10);
- tmp_u1b =
- HALMAC_REG_READ_8(halmac_adapter, REG_DBI_FLAG_V1 + 2);
- count--;
- }
-
- if (tmp_u1b) {
- ret = 0xFF;
- pr_err("DBI read fail!\n");
- } else {
- ret = HALMAC_REG_READ_8(halmac_adapter,
- REG_DBI_RDATA_V1 + (addr & (4 - 1)));
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_DBI, DBG_DMESG,
- "Read Value = %x\n", ret);
- }
-
- return ret;
-}
-
-enum halmac_ret_status
-halmac_mdio_write_88xx(struct halmac_adapter *halmac_adapter, u8 addr, u16 data,
- u8 speed)
-{
- u8 tmp_u1b = 0;
- u32 count = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- u8 real_addr = 0;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_REG_WRITE_16(halmac_adapter, REG_MDIO_V1, data);
-
- /* address : 5bit */
- real_addr = (addr & 0x1F);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG, real_addr);
-
- if (speed == HAL_INTF_PHY_PCIE_GEN1) {
- /* GEN1 page 0 */
- if (addr < 0x20) {
- /* select MDIO PHY Addr : reg 0x3F8[28:24]=5'b00 */
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG + 3,
- 0x00);
-
- /* GEN1 page 1 */
- } else {
- /* select MDIO PHY Addr : reg 0x3F8[28:24]=5'b01 */
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG + 3,
- 0x01);
- }
-
- } else if (speed == HAL_INTF_PHY_PCIE_GEN2) {
- /* GEN2 page 0 */
- if (addr < 0x20) {
- /* select MDIO PHY Addr : reg 0x3F8[28:24]=5'b10 */
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG + 3,
- 0x02);
-
- /* GEN2 page 1 */
- } else {
- /* select MDIO PHY Addr : reg 0x3F8[28:24]=5'b11 */
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG + 3,
- 0x03);
- }
- } else {
- pr_err("Error Speed !\n");
- }
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG,
- HALMAC_REG_READ_8(halmac_adapter, REG_PCIE_MIX_CFG) |
- BIT_MDIO_WFLAG_V1);
-
- tmp_u1b = HALMAC_REG_READ_8(halmac_adapter, REG_PCIE_MIX_CFG) &
- BIT_MDIO_WFLAG_V1;
- count = 20;
-
- while (tmp_u1b && count != 0) {
- udelay(10);
- tmp_u1b = HALMAC_REG_READ_8(halmac_adapter, REG_PCIE_MIX_CFG) &
- BIT_MDIO_WFLAG_V1;
- count--;
- }
-
- if (tmp_u1b) {
- pr_err("MDIO write fail!\n");
- return HALMAC_RET_FAIL;
- } else {
- return HALMAC_RET_SUCCESS;
- }
-}
-
-u16 halmac_mdio_read_88xx(struct halmac_adapter *halmac_adapter, u8 addr,
- u8 speed
-
- )
-{
- u16 ret = 0;
- u8 tmp_u1b = 0;
- u32 count = 0;
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- u8 real_addr = 0;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- /* address : 5bit */
- real_addr = (addr & 0x1F);
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG, real_addr);
-
- if (speed == HAL_INTF_PHY_PCIE_GEN1) {
- /* GEN1 page 0 */
- if (addr < 0x20) {
- /* select MDIO PHY Addr : reg 0x3F8[28:24]=5'b00 */
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG + 3,
- 0x00);
-
- /* GEN1 page 1 */
- } else {
- /* select MDIO PHY Addr : reg 0x3F8[28:24]=5'b01 */
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG + 3,
- 0x01);
- }
-
- } else if (speed == HAL_INTF_PHY_PCIE_GEN2) {
- /* GEN2 page 0 */
- if (addr < 0x20) {
- /* select MDIO PHY Addr : reg 0x3F8[28:24]=5'b10 */
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG + 3,
- 0x02);
-
- /* GEN2 page 1 */
- } else {
- /* select MDIO PHY Addr : reg 0x3F8[28:24]=5'b11 */
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG + 3,
- 0x03);
- }
- } else {
- pr_err("Error Speed !\n");
- }
-
- HALMAC_REG_WRITE_8(halmac_adapter, REG_PCIE_MIX_CFG,
- HALMAC_REG_READ_8(halmac_adapter, REG_PCIE_MIX_CFG) |
- BIT_MDIO_RFLAG_V1);
-
- tmp_u1b = HALMAC_REG_READ_8(halmac_adapter, REG_PCIE_MIX_CFG) &
- BIT_MDIO_RFLAG_V1;
- count = 20;
-
- while (tmp_u1b && count != 0) {
- udelay(10);
- tmp_u1b = HALMAC_REG_READ_8(halmac_adapter, REG_PCIE_MIX_CFG) &
- BIT_MDIO_RFLAG_V1;
- count--;
- }
-
- if (tmp_u1b) {
- ret = 0xFFFF;
- pr_err("MDIO read fail!\n");
-
- } else {
- ret = HALMAC_REG_READ_16(halmac_adapter, REG_MDIO_V1 + 2);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_MDIO, DBG_DMESG,
- "Read Value = %x\n", ret);
- }
-
- return ret;
-}
-
-enum halmac_ret_status
-halmac_usbphy_write_88xx(struct halmac_adapter *halmac_adapter, u8 addr,
- u16 data, u8 speed)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (speed == HAL_INTF_PHY_USB3) {
- HALMAC_REG_WRITE_8(halmac_adapter, 0xff0d, (u8)data);
- HALMAC_REG_WRITE_8(halmac_adapter, 0xff0e, (u8)(data >> 8));
- HALMAC_REG_WRITE_8(halmac_adapter, 0xff0c, addr | BIT(7));
- } else if (speed == HAL_INTF_PHY_USB2) {
- HALMAC_REG_WRITE_8(halmac_adapter, 0xfe41, (u8)data);
- HALMAC_REG_WRITE_8(halmac_adapter, 0xfe40, addr);
- HALMAC_REG_WRITE_8(halmac_adapter, 0xfe42, 0x81);
- } else {
- pr_err("[ERR]Error USB Speed !\n");
- return HALMAC_RET_NOT_SUPPORT;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-u16 halmac_usbphy_read_88xx(struct halmac_adapter *halmac_adapter, u8 addr,
- u8 speed)
-{
- void *driver_adapter = NULL;
- struct halmac_api *halmac_api;
- u16 value = 0;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- if (speed == HAL_INTF_PHY_USB3) {
- HALMAC_REG_WRITE_8(halmac_adapter, 0xff0c, addr | BIT(6));
- value = (u16)(HALMAC_REG_READ_32(halmac_adapter, 0xff0c) >> 8);
- } else if (speed == HAL_INTF_PHY_USB2) {
- if ((addr >= 0xE0) /*&& (addr <= 0xFF)*/)
- addr -= 0x20;
- if ((addr >= 0xC0) && (addr <= 0xDF)) {
- HALMAC_REG_WRITE_8(halmac_adapter, 0xfe40, addr);
- HALMAC_REG_WRITE_8(halmac_adapter, 0xfe42, 0x81);
- value = HALMAC_REG_READ_8(halmac_adapter, 0xfe43);
- } else {
- pr_err("[ERR]Error USB2PHY offset!\n");
- return HALMAC_RET_NOT_SUPPORT;
- }
- } else {
- pr_err("[ERR]Error USB Speed !\n");
- return HALMAC_RET_NOT_SUPPORT;
- }
-
- return value;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h
deleted file mode 100644
index 86d59d9b76f3..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_FUNC_88XX_H_
-#define _HALMAC_FUNC_88XX_H_
-
-#include "../halmac_type.h"
-
-void halmac_init_offload_feature_state_machine_88xx(
- struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_send_h2c_pkt_88xx(struct halmac_adapter *halmac_adapter, u8 *hal_buff,
- u32 size, bool ack);
-
-enum halmac_ret_status
-halmac_download_rsvd_page_88xx(struct halmac_adapter *halmac_adapter,
- u8 *hal_buf, u32 size);
-
-enum halmac_ret_status
-halmac_set_h2c_header_88xx(struct halmac_adapter *halmac_adapter,
- u8 *hal_h2c_hdr, u16 *seq, bool ack);
-
-enum halmac_ret_status halmac_set_fw_offload_h2c_header_88xx(
- struct halmac_adapter *halmac_adapter, u8 *hal_h2c_hdr,
- struct halmac_h2c_header_info *h2c_header_info, u16 *seq_num);
-
-enum halmac_ret_status
-halmac_dump_efuse_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_read_cfg cfg);
-
-enum halmac_ret_status
-halmac_func_read_efuse_88xx(struct halmac_adapter *halmac_adapter, u32 offset,
- u32 size, u8 *efuse_map);
-
-enum halmac_ret_status
-halmac_func_write_efuse_88xx(struct halmac_adapter *halmac_adapter, u32 offset,
- u8 value);
-
-enum halmac_ret_status
-halmac_func_switch_efuse_bank_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_bank efuse_bank);
-
-enum halmac_ret_status
-halmac_read_logical_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
- u8 *map);
-
-enum halmac_ret_status
-halmac_func_write_logical_efuse_88xx(struct halmac_adapter *halmac_adapter,
- u32 offset, u8 value);
-
-enum halmac_ret_status
-halmac_func_pg_efuse_by_map_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- enum halmac_efuse_read_cfg cfg);
-
-enum halmac_ret_status
-halmac_eeprom_parser_88xx(struct halmac_adapter *halmac_adapter,
- u8 *physical_efuse_map, u8 *logical_efuse_map);
-
-enum halmac_ret_status
-halmac_read_hw_efuse_88xx(struct halmac_adapter *halmac_adapter, u32 offset,
- u32 size, u8 *efuse_map);
-
-enum halmac_ret_status
-halmac_dlfw_to_mem_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
- u32 dest, u32 code_size);
-
-enum halmac_ret_status
-halmac_send_fwpkt_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
- u32 code_size);
-
-enum halmac_ret_status
-halmac_iddma_dlfw_88xx(struct halmac_adapter *halmac_adapter, u32 source,
- u32 dest, u32 length, u8 first);
-
-enum halmac_ret_status
-halmac_check_fw_chksum_88xx(struct halmac_adapter *halmac_adapter,
- u32 memory_address);
-
-enum halmac_ret_status
-halmac_dlfw_end_flow_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_free_dl_fw_end_flow_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_pwr_seq_parser_88xx(struct halmac_adapter *halmac_adapter, u8 cut,
- u8 fab, u8 intf,
- struct halmac_wl_pwr_cfg_ **pp_pwr_seq_cfg
-
- );
-
-enum halmac_ret_status
-halmac_get_h2c_buff_free_space_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_send_h2c_set_pwr_mode_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_fwlps_option *hal_fw_lps_opt);
-
-enum halmac_ret_status
-halmac_func_send_original_h2c_88xx(struct halmac_adapter *halmac_adapter,
- u8 *original_h2c, u16 *seq, u8 ack);
-
-enum halmac_ret_status
-halmac_media_status_rpt_88xx(struct halmac_adapter *halmac_adapter, u8 op_mode,
- u8 mac_id_ind, u8 mac_id, u8 mac_id_end);
-
-enum halmac_ret_status halmac_send_h2c_update_datapack_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_data_type halmac_data_type,
- struct halmac_phy_parameter_info *para_info);
-
-enum halmac_ret_status
-halmac_send_h2c_run_datapack_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_data_type halmac_data_type);
-
-enum halmac_ret_status
-halmac_send_bt_coex_cmd_88xx(struct halmac_adapter *halmac_adapter, u8 *bt_buf,
- u32 bt_size, u8 ack);
-
-enum halmac_ret_status
-halmac_func_ctrl_ch_switch_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ch_switch_option *cs_option);
-
-enum halmac_ret_status
-halmac_func_send_general_info_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_general_info *general_info);
-
-enum halmac_ret_status
-halmac_send_h2c_ps_tuning_para_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_parse_c2h_packet_88xx(struct halmac_adapter *halmac_adapter,
- u8 *halmac_buf, u32 halmac_size);
-
-enum halmac_ret_status
-halmac_send_h2c_update_packet_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_packet_id pkt_id, u8 *pkt,
- u32 pkt_size);
-
-enum halmac_ret_status
-halmac_send_h2c_phy_parameter_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_phy_parameter_info *para_info,
- bool full_fifo);
-
-enum halmac_ret_status
-halmac_dump_physical_efuse_fw_88xx(struct halmac_adapter *halmac_adapter,
- u32 offset, u32 size, u8 *efuse_map);
-
-enum halmac_ret_status halmac_send_h2c_update_bcn_parse_info_88xx(
- struct halmac_adapter *halmac_adapter,
- struct halmac_bcn_ie_info *bcn_ie_info);
-
-enum halmac_ret_status
-halmac_convert_to_sdio_bus_offset_88xx(struct halmac_adapter *halmac_adapter,
- u32 *halmac_offset);
-
-enum halmac_ret_status
-halmac_update_sdio_free_page_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_update_oqt_free_space_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_efuse_cmd_construct_state
-halmac_query_efuse_curr_state_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status halmac_transition_efuse_state_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_cmd_construct_state dest_state);
-
-enum halmac_cfg_para_cmd_construct_state
-halmac_query_cfg_para_curr_state_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status halmac_transition_cfg_para_state_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cfg_para_cmd_construct_state dest_state);
-
-enum halmac_scan_cmd_construct_state
-halmac_query_scan_curr_state_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status halmac_transition_scan_state_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_scan_cmd_construct_state dest_state);
-
-enum halmac_ret_status halmac_query_cfg_para_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size);
-
-enum halmac_ret_status halmac_query_dump_physical_efuse_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size);
-
-enum halmac_ret_status halmac_query_dump_logical_efuse_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size);
-
-enum halmac_ret_status halmac_query_channel_switch_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size);
-
-enum halmac_ret_status halmac_query_update_packet_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size);
-
-enum halmac_ret_status
-halmac_query_iqk_status_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status,
- u8 *data, u32 *size);
-
-enum halmac_ret_status halmac_query_power_tracking_status_88xx(
- struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status, u8 *data, u32 *size);
-
-enum halmac_ret_status
-halmac_query_psd_status_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_cmd_process_status *process_status,
- u8 *data, u32 *size);
-
-enum halmac_ret_status
-halmac_verify_io_88xx(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status
-halmac_verify_send_rsvd_page_88xx(struct halmac_adapter *halmac_adapter);
-
-void halmac_power_save_cb_88xx(void *cb_data);
-
-enum halmac_ret_status
-halmac_buffer_read_88xx(struct halmac_adapter *halmac_adapter, u32 offset,
- u32 size, enum hal_fifo_sel halmac_fifo_sel,
- u8 *fifo_map);
-
-void halmac_restore_mac_register_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_restore_info *restore_info,
- u32 restore_num);
-
-void halmac_api_record_id_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_api_id api_id);
-
-enum halmac_ret_status
-halmac_set_usb_mode_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_usb_mode usb_mode);
-
-void halmac_enable_bb_rf_88xx(struct halmac_adapter *halmac_adapter, u8 enable);
-
-void halmac_config_sdio_tx_page_threshold_88xx(
- struct halmac_adapter *halmac_adapter,
- struct halmac_tx_page_threshold_info *threshold_info);
-
-enum halmac_ret_status
-halmac_rqpn_parser_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode,
- struct halmac_rqpn_ *pwr_seq_cfg);
-
-enum halmac_ret_status
-halmac_check_oqt_88xx(struct halmac_adapter *halmac_adapter, u32 tx_agg_num,
- u8 *halmac_buf);
-
-enum halmac_ret_status
-halmac_pg_num_parser_88xx(struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode halmac_trx_mode,
- struct halmac_pg_num_ *pg_num_table);
-
-enum halmac_ret_status
-halmac_parse_intf_phy_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_intf_phy_para_ *intf_phy_para,
- enum halmac_intf_phy_platform platform,
- enum hal_intf_phy intf_phy);
-
-enum halmac_ret_status
-halmac_dbi_write32_88xx(struct halmac_adapter *halmac_adapter, u16 addr,
- u32 data);
-
-u32 halmac_dbi_read32_88xx(struct halmac_adapter *halmac_adapter, u16 addr);
-
-enum halmac_ret_status
-halmac_dbi_write8_88xx(struct halmac_adapter *halmac_adapter, u16 addr,
- u8 data);
-
-u8 halmac_dbi_read8_88xx(struct halmac_adapter *halmac_adapter, u16 addr);
-
-u16 halmac_mdio_read_88xx(struct halmac_adapter *halmac_adapter, u8 addr,
- u8 speed
-
- );
-
-enum halmac_ret_status
-halmac_mdio_write_88xx(struct halmac_adapter *halmac_adapter, u8 addr, u16 data,
- u8 speed);
-
-void halmac_config_ampdu_88xx(struct halmac_adapter *halmac_adapter,
- struct halmac_ampdu_config *ampdu_config);
-
-enum halmac_ret_status
-halmac_usbphy_write_88xx(struct halmac_adapter *halmac_adapter, u8 addr,
- u16 data, u8 speed);
-
-u16 halmac_usbphy_read_88xx(struct halmac_adapter *halmac_adapter, u8 addr,
- u8 speed);
-#endif /* _HALMAC_FUNC_88XX_H_ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_api.c b/drivers/staging/rtlwifi/halmac/halmac_api.c
deleted file mode 100644
index e75eb42009c8..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_api.c
+++ /dev/null
@@ -1,415 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "halmac_2_platform.h"
-#include "halmac_type.h"
-#include "halmac_88xx/halmac_api_88xx.h"
-#include "halmac_88xx/halmac_88xx_cfg.h"
-
-#include "halmac_88xx/halmac_8822b/halmac_8822b_cfg.h"
-
-static enum halmac_ret_status
-halmac_check_platform_api(void *driver_adapter,
- enum halmac_interface halmac_interface,
- struct halmac_platform_api *halmac_platform_api)
-{
- void *adapter_local = NULL;
-
- adapter_local = driver_adapter;
-
- if (!halmac_platform_api)
- return HALMAC_RET_PLATFORM_API_NULL;
-
- if (halmac_interface == HALMAC_INTERFACE_SDIO) {
- if (!halmac_platform_api->SDIO_CMD52_READ) {
- pr_err("(!halmac_platform_api->SDIO_CMD52_READ)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->SDIO_CMD53_READ_8) {
- pr_err("(!halmac_platform_api->SDIO_CMD53_READ_8)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->SDIO_CMD53_READ_16) {
- pr_err("(!halmac_platform_api->SDIO_CMD53_READ_16)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->SDIO_CMD53_READ_32) {
- pr_err("(!halmac_platform_api->SDIO_CMD53_READ_32)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->SDIO_CMD53_READ_N) {
- pr_err("(!halmac_platform_api->SDIO_CMD53_READ_N)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->SDIO_CMD52_WRITE) {
- pr_err("(!halmac_platform_api->SDIO_CMD52_WRITE)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->SDIO_CMD53_WRITE_8) {
- pr_err("(!halmac_platform_api->SDIO_CMD53_WRITE_8)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->SDIO_CMD53_WRITE_16) {
- pr_err("(!halmac_platform_api->SDIO_CMD53_WRITE_16)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->SDIO_CMD53_WRITE_32) {
- pr_err("(!halmac_platform_api->SDIO_CMD53_WRITE_32)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- }
-
- if (halmac_interface == HALMAC_INTERFACE_USB ||
- halmac_interface == HALMAC_INTERFACE_PCIE) {
- if (!halmac_platform_api->REG_READ_8) {
- pr_err("(!halmac_platform_api->REG_READ_8)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->REG_READ_16) {
- pr_err("(!halmac_platform_api->REG_READ_16)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->REG_READ_32) {
- pr_err("(!halmac_platform_api->REG_READ_32)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->REG_WRITE_8) {
- pr_err("(!halmac_platform_api->REG_WRITE_8)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->REG_WRITE_16) {
- pr_err("(!halmac_platform_api->REG_WRITE_16)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- if (!halmac_platform_api->REG_WRITE_32) {
- pr_err("(!halmac_platform_api->REG_WRITE_32)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
- }
-
- if (!halmac_platform_api->EVENT_INDICATION) {
- pr_err("(!halmac_platform_api->EVENT_INDICATION)\n");
- return HALMAC_RET_PLATFORM_API_NULL;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_convert_to_sdio_bus_offset(u32 *halmac_offset)
-{
- switch ((*halmac_offset) & 0xFFFF0000) {
- case WLAN_IOREG_OFFSET:
- *halmac_offset = (HALMAC_SDIO_CMD_ADDR_MAC_REG << 13) |
- (*halmac_offset & HALMAC_WLAN_MAC_REG_MSK);
- break;
- case SDIO_LOCAL_OFFSET:
- *halmac_offset = (HALMAC_SDIO_CMD_ADDR_SDIO_REG << 13) |
- (*halmac_offset & HALMAC_SDIO_LOCAL_MSK);
- break;
- default:
- *halmac_offset = 0xFFFFFFFF;
- return HALMAC_RET_CONVERT_SDIO_OFFSET_FAIL;
- }
-
- return HALMAC_RET_SUCCESS;
-}
-
-static u8
-platform_reg_read_8_sdio(void *driver_adapter,
- struct halmac_platform_api *halmac_platform_api,
- u32 offset)
-{
- u8 value8;
- u32 halmac_offset = offset;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- if ((halmac_offset & 0xFFFF0000) == 0)
- halmac_offset |= WLAN_IOREG_OFFSET;
-
- status = halmac_convert_to_sdio_bus_offset(&halmac_offset);
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("%s error = %x\n", __func__, status);
- return status;
- }
-
- value8 = halmac_platform_api->SDIO_CMD52_READ(driver_adapter,
- halmac_offset);
-
- return value8;
-}
-
-static enum halmac_ret_status
-platform_reg_write_8_sdio(void *driver_adapter,
- struct halmac_platform_api *halmac_platform_api,
- u32 offset, u8 data)
-{
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
- u32 halmac_offset = offset;
-
- if ((halmac_offset & 0xFFFF0000) == 0)
- halmac_offset |= WLAN_IOREG_OFFSET;
-
- status = halmac_convert_to_sdio_bus_offset(&halmac_offset);
-
- if (status != HALMAC_RET_SUCCESS) {
- pr_err("halmac_reg_write_8_sdio_88xx error = %x\n", status);
- return status;
- }
- halmac_platform_api->SDIO_CMD52_WRITE(driver_adapter, halmac_offset,
- data);
-
- return HALMAC_RET_SUCCESS;
-}
-
-static enum halmac_ret_status
-halmac_get_chip_info(void *driver_adapter,
- struct halmac_platform_api *halmac_platform_api,
- enum halmac_interface halmac_interface,
- struct halmac_adapter *halmac_adapter)
-{
- struct halmac_api *halmac_api = (struct halmac_api *)NULL;
- u8 chip_id, chip_version;
- u32 polling_count;
-
- halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- /* Get Chip_id and Chip_version */
- if (halmac_adapter->halmac_interface == HALMAC_INTERFACE_SDIO) {
- platform_reg_write_8_sdio(
- driver_adapter, halmac_platform_api, REG_SDIO_HSUS_CTRL,
- platform_reg_read_8_sdio(driver_adapter,
- halmac_platform_api,
- REG_SDIO_HSUS_CTRL) &
- ~(BIT(0)));
-
- polling_count = 10000;
- while (!(platform_reg_read_8_sdio(driver_adapter,
- halmac_platform_api,
- REG_SDIO_HSUS_CTRL) &
- 0x02)) {
- polling_count--;
- if (polling_count == 0)
- return HALMAC_RET_SDIO_LEAVE_SUSPEND_FAIL;
- }
-
- chip_id = platform_reg_read_8_sdio(
- driver_adapter, halmac_platform_api, REG_SYS_CFG2);
- chip_version = platform_reg_read_8_sdio(driver_adapter,
- halmac_platform_api,
- REG_SYS_CFG1 + 1) >>
- 4;
- } else {
- chip_id = halmac_platform_api->REG_READ_8(driver_adapter,
- REG_SYS_CFG2);
- chip_version = halmac_platform_api->REG_READ_8(
- driver_adapter, REG_SYS_CFG1 + 1) >>
- 4;
- }
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]Chip id : 0x%X\n", chip_id);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]Chip version : 0x%X\n", chip_version);
-
- halmac_adapter->chip_version = (enum halmac_chip_ver)chip_version;
-
- if (chip_id == HALMAC_CHIP_ID_HW_DEF_8822B)
- halmac_adapter->chip_id = HALMAC_CHIP_ID_8822B;
- else if (chip_id == HALMAC_CHIP_ID_HW_DEF_8821C)
- halmac_adapter->chip_id = HALMAC_CHIP_ID_8821C;
- else if (chip_id == HALMAC_CHIP_ID_HW_DEF_8814B)
- halmac_adapter->chip_id = HALMAC_CHIP_ID_8814B;
- else if (chip_id == HALMAC_CHIP_ID_HW_DEF_8197F)
- halmac_adapter->chip_id = HALMAC_CHIP_ID_8197F;
- else
- halmac_adapter->chip_id = HALMAC_CHIP_ID_UNDEFINE;
-
- if (halmac_adapter->chip_id == HALMAC_CHIP_ID_UNDEFINE)
- return HALMAC_RET_CHIP_NOT_SUPPORT;
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_init_adapter() - init halmac_adapter
- * @driver_adapter : the adapter of caller
- * @halmac_platform_api : the platform APIs which is used in halmac APIs
- * @halmac_interface : bus interface
- * @pp_halmac_adapter : the adapter of halmac
- * @pp_halmac_api : the function pointer of APIs, caller shall call APIs by
- * function pointer
- * Author : KaiYuan Chang / Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_init_adapter(void *driver_adapter,
- struct halmac_platform_api *halmac_platform_api,
- enum halmac_interface halmac_interface,
- struct halmac_adapter **pp_halmac_adapter,
- struct halmac_api **pp_halmac_api)
-{
- struct halmac_adapter *halmac_adapter = (struct halmac_adapter *)NULL;
- enum halmac_ret_status status = HALMAC_RET_SUCCESS;
-
- union {
- u32 i;
- u8 x[4];
- } ENDIAN_CHECK = {0x01000000};
-
- status = halmac_check_platform_api(driver_adapter, halmac_interface,
- halmac_platform_api);
- if (status != HALMAC_RET_SUCCESS)
- return status;
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- HALMAC_SVN_VER "\n");
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "HALMAC_MAJOR_VER = %x\n", HALMAC_MAJOR_VER);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "HALMAC_PROTOTYPE_VER = %x\n", HALMAC_PROTOTYPE_VER);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "HALMAC_MINOR_VER = %x\n", HALMAC_MINOR_VER);
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "HALMAC_PATCH_VER = %x\n", HALMAC_PATCH_VER);
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_init_adapter_88xx ==========>\n");
-
- /* Check endian setting - Little endian : 1, Big endian : 0*/
- if (ENDIAN_CHECK.x[0] == HALMAC_SYSTEM_ENDIAN) {
- pr_err("Endian setting Err!!\n");
- return HALMAC_RET_ENDIAN_ERR;
- }
-
- halmac_adapter = kzalloc(sizeof(*halmac_adapter), GFP_KERNEL);
- if (!halmac_adapter) {
- /* out of memory */
- return HALMAC_RET_MALLOC_FAIL;
- }
-
- /* return halmac adapter address to caller */
- *pp_halmac_adapter = halmac_adapter;
-
- /* Record caller info */
- halmac_adapter->halmac_platform_api = halmac_platform_api;
- halmac_adapter->driver_adapter = driver_adapter;
- halmac_interface = halmac_interface == HALMAC_INTERFACE_AXI ?
- HALMAC_INTERFACE_PCIE :
- halmac_interface;
- halmac_adapter->halmac_interface = halmac_interface;
-
- spin_lock_init(&halmac_adapter->efuse_lock);
- spin_lock_init(&halmac_adapter->h2c_seq_lock);
-
- /*Get Chip*/
- if (halmac_get_chip_info(driver_adapter, halmac_platform_api,
- halmac_interface,
- halmac_adapter) != HALMAC_RET_SUCCESS) {
- pr_err("HALMAC_RET_CHIP_NOT_SUPPORT\n");
- return HALMAC_RET_CHIP_NOT_SUPPORT;
- }
-
- /* Assign function pointer to halmac API */
- halmac_init_adapter_para_88xx(halmac_adapter);
- status = halmac_mount_api_88xx(halmac_adapter);
-
- /* Return halmac API function pointer */
- *pp_halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "halmac_init_adapter_88xx <==========\n");
-
- return status;
-}
-
-/**
- * halmac_halt_api() - stop halmac_api action
- * @halmac_adapter : the adapter of halmac
- * Author : Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_halt_api(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
- struct halmac_platform_api *halmac_platform_api =
- (struct halmac_platform_api *)NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
- halmac_platform_api = halmac_adapter->halmac_platform_api;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
- halmac_adapter->halmac_state.api_state = HALMAC_API_STATE_HALT;
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "%s ==========>\n", __func__);
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_deinit_adapter() - deinit halmac adapter
- * @halmac_adapter : the adapter of halmac
- * Author : KaiYuan Chang / Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status
-halmac_deinit_adapter(struct halmac_adapter *halmac_adapter)
-{
- void *driver_adapter = NULL;
-
- if (halmac_adapter_validate(halmac_adapter) != HALMAC_RET_SUCCESS)
- return HALMAC_RET_ADAPTER_INVALID;
-
- driver_adapter = halmac_adapter->driver_adapter;
-
- HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_INIT, DBG_DMESG,
- "[TRACE]halmac_deinit_adapter_88xx ==========>\n");
-
- kfree(halmac_adapter->hal_efuse_map);
- halmac_adapter->hal_efuse_map = (u8 *)NULL;
-
- kfree(halmac_adapter->halmac_state.psd_set.data);
- halmac_adapter->halmac_state.psd_set.data = (u8 *)NULL;
-
- kfree(halmac_adapter->halmac_api);
- halmac_adapter->halmac_api = NULL;
-
- halmac_adapter->hal_adapter_backup = NULL;
- kfree(halmac_adapter);
-
- return HALMAC_RET_SUCCESS;
-}
-
-/**
- * halmac_get_version() - get HALMAC version
- * @version : return version of major, prototype and minor information
- * Author : KaiYuan Chang / Ivan Lin
- * Return : enum halmac_ret_status
- * More details of status code can be found in prototype document
- */
-enum halmac_ret_status halmac_get_version(struct halmac_ver *version)
-{
- version->major_ver = (u8)HALMAC_MAJOR_VER;
- version->prototype_ver = (u8)HALMAC_PROTOTYPE_VER;
- version->minor_ver = (u8)HALMAC_MINOR_VER;
-
- return HALMAC_RET_SUCCESS;
-}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_api.h b/drivers/staging/rtlwifi/halmac/halmac_api.h
deleted file mode 100644
index e220db39c8a7..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_api.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_API_H_
-#define _HALMAC_API_H_
-
-#define HALMAC_SVN_VER "13348M"
-
-#define HALMAC_MAJOR_VER 0x0001 /* major version, ver_1 for async_api */
-/* For halmac_api num change or prototype change, increment prototype version.
- * Otherwise, increase minor version
- */
-#define HALMAC_PROTOTYPE_VER 0x0003 /* prototype version */
-#define HALMAC_MINOR_VER 0x0005 /* minor version */
-#define HALMAC_PATCH_VER 0x0000 /* patch version */
-
-#include "halmac_2_platform.h"
-#include "halmac_type.h"
-
-#include "halmac_usb_reg.h"
-#include "halmac_sdio_reg.h"
-
-#include "halmac_bit2.h"
-#include "halmac_reg2.h"
-
-#include "halmac_tx_desc_nic.h"
-#include "halmac_rx_desc_nic.h"
-#include "halmac_tx_bd_nic.h"
-#include "halmac_rx_bd_nic.h"
-#include "halmac_fw_offload_c2h_nic.h"
-#include "halmac_fw_offload_h2c_nic.h"
-#include "halmac_h2c_extra_info_nic.h"
-#include "halmac_original_c2h_nic.h"
-#include "halmac_original_h2c_nic.h"
-
-#include "halmac_tx_desc_chip.h"
-#include "halmac_rx_desc_chip.h"
-#include "halmac_tx_bd_chip.h"
-#include "halmac_rx_bd_chip.h"
-#include "halmac_88xx/halmac_88xx_cfg.h"
-
-#include "halmac_88xx/halmac_8822b/halmac_8822b_cfg.h"
-#include "halmac_reg_8822b.h"
-#include "halmac_bit_8822b.h"
-
-enum halmac_ret_status
-halmac_init_adapter(void *driver_adapter,
- struct halmac_platform_api *halmac_platform_api,
- enum halmac_interface halmac_interface,
- struct halmac_adapter **pp_halmac_adapter,
- struct halmac_api **pp_halmac_api);
-
-enum halmac_ret_status
-halmac_deinit_adapter(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status halmac_halt_api(struct halmac_adapter *halmac_adapter);
-
-enum halmac_ret_status halmac_get_version(struct halmac_ver *version);
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_bit2.h b/drivers/staging/rtlwifi/halmac/halmac_bit2.h
deleted file mode 100644
index 5f0f8528d136..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_bit2.h
+++ /dev/null
@@ -1,13396 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __RTL_WLAN_BITDEF_H__
-#define __RTL_WLAN_BITDEF_H__
-
-/*-------------------------Modification Log-----------------------------------
- * Base on MAC_Register.doc SVN391
- *-------------------------Modification Log-----------------------------------
- */
-
-/*--------------------------Include File--------------------------------------*/
-/*--------------------------Include File--------------------------------------*/
-
-/* 3 ============Programming guide Start===================== */
-/*
- * 1. For all bit define, it should be prefixed by "BIT_"
- * 2. For all bit mask, it should be prefixed by "BIT_MASK_"
- * 3. For all bit shift, it should be prefixed by "BIT_SHIFT_"
- * 4. For other case, prefix is not needed
- *
- * Example:
- * #define BIT_SHIFT_MAX_TXDMA 16
- * #define BIT_MASK_MAX_TXDMA 0x7
- * #define BIT_MAX_TXDMA(x) \
- * (((x) & BIT_MASK_MAX_TXDMA) << BIT_SHIFT_MAX_TXDMA)
- * #define BIT_GET_MAX_TXDMA(x) \
- * (((x) >> BIT_SHIFT_MAX_TXDMA) & BIT_MASK_MAX_TXDMA)
- *
- */
-/* 3 ============Programming guide End===================== */
-
-#define CPU_OPT_WIDTH 0x1F
-
-#define BIT_SHIFT_WATCH_DOG_RECORD_V1 10
-#define BIT_MASK_WATCH_DOG_RECORD_V1 0x3fff
-#define BIT_WATCH_DOG_RECORD_V1(x) \
- (((x) & BIT_MASK_WATCH_DOG_RECORD_V1) << BIT_SHIFT_WATCH_DOG_RECORD_V1)
-#define BIT_GET_WATCH_DOG_RECORD_V1(x) \
- (((x) >> BIT_SHIFT_WATCH_DOG_RECORD_V1) & BIT_MASK_WATCH_DOG_RECORD_V1)
-
-#define BIT_R_IO_TIMEOUT_FLAG_V1 BIT(9)
-
-#define BIT_ISO_MD2PP BIT(0)
-
-#define BIT_SHIFT_R_WMAC_IPV6_MYIPAD 0
-#define BIT_MASK_R_WMAC_IPV6_MYIPAD 0xffffffffffffffffffffffffffffffffL
-#define BIT_R_WMAC_IPV6_MYIPAD(x) \
- (((x) & BIT_MASK_R_WMAC_IPV6_MYIPAD) << BIT_SHIFT_R_WMAC_IPV6_MYIPAD)
-#define BIT_GET_R_WMAC_IPV6_MYIPAD(x) \
- (((x) >> BIT_SHIFT_R_WMAC_IPV6_MYIPAD) & BIT_MASK_R_WMAC_IPV6_MYIPAD)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_SHIFT_SDIO_INT_TIMEOUT 16
-#define BIT_MASK_SDIO_INT_TIMEOUT 0xffff
-#define BIT_SDIO_INT_TIMEOUT(x) \
- (((x) & BIT_MASK_SDIO_INT_TIMEOUT) << BIT_SHIFT_SDIO_INT_TIMEOUT)
-#define BIT_GET_SDIO_INT_TIMEOUT(x) \
- (((x) >> BIT_SHIFT_SDIO_INT_TIMEOUT) & BIT_MASK_SDIO_INT_TIMEOUT)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_PWC_EV12V BIT(15)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_IO_ERR_STATUS BIT(15)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_PWC_EV25V BIT(14)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_PA33V_EN BIT(13)
-#define BIT_PA12V_EN BIT(12)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_UA33V_EN BIT(11)
-#define BIT_UA12V_EN BIT(10)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_RFDIO BIT(9)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_REPLY_ERRCRC_IN_DATA BIT(9)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_EB2CORE BIT(8)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_EN_CMD53_OVERLAP BIT(8)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_DIOE BIT(7)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_REPLY_ERR_IN_R5 BIT(7)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_WLPON2PP BIT(6)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_R18A_EN BIT(6)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_IP2MAC_WA2PP BIT(5)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_INIT_CMD_EN BIT(5)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_PD2CORE BIT(4)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_PA2PCIE BIT(3)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_UD2CORE BIT(2)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_EN_RXDMA_MASK_INT BIT(2)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_UA2USB BIT(1)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_EN_MASK_TIMER BIT(1)
-
-/* 2 REG_SYS_ISO_CTRL (Offset 0x0000) */
-
-#define BIT_ISO_WD2PP BIT(0)
-
-/* 2 REG_SDIO_TX_CTRL (Offset 0x10250000) */
-
-#define BIT_CMD_ERR_STOP_INT_EN BIT(0)
-
-/* 2 REG_SYS_FUNC_EN (Offset 0x0002) */
-
-#define BIT_FEN_MREGEN BIT(15)
-#define BIT_FEN_HWPDN BIT(14)
-
-/* 2 REG_SYS_FUNC_EN (Offset 0x0002) */
-
-#define BIT_EN_25_1 BIT(13)
-
-/* 2 REG_SYS_FUNC_EN (Offset 0x0002) */
-
-#define BIT_FEN_ELDR BIT(12)
-#define BIT_FEN_DCORE BIT(11)
-#define BIT_FEN_CPUEN BIT(10)
-#define BIT_FEN_DIOE BIT(9)
-#define BIT_FEN_PCIED BIT(8)
-#define BIT_FEN_PPLL BIT(7)
-#define BIT_FEN_PCIEA BIT(6)
-#define BIT_FEN_DIO_PCIE BIT(5)
-#define BIT_FEN_USBD BIT(4)
-#define BIT_FEN_UPLL BIT(3)
-#define BIT_FEN_USBA BIT(2)
-
-/* 2 REG_SYS_FUNC_EN (Offset 0x0002) */
-
-#define BIT_FEN_BB_GLB_RSTN BIT(1)
-#define BIT_FEN_BBRSTB BIT(0)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_SOP_EABM BIT(31)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_SOP_ACKF BIT(30)
-#define BIT_SOP_ERCK BIT(29)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_SOP_ESWR BIT(28)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_SOP_PWMM BIT(27)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_SOP_EECK BIT(26)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_SOP_EXTL BIT(24)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_SYM_OP_RING_12M BIT(22)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_ROP_SWPR BIT(21)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_DIS_HW_LPLDM BIT(20)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_OPT_SWRST_WLMCU BIT(19)
-#define BIT_RDY_SYSPWR BIT(17)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_EN_WLON BIT(16)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_APDM_HPDN BIT(15)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_AFSM_PCIE_SUS_EN BIT(12)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_AFSM_WLSUS_EN BIT(11)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_APFM_SWLPS BIT(10)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_APFM_OFFMAC BIT(9)
-#define BIT_APFN_ONMAC BIT(8)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_CHIP_PDN_EN BIT(7)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_RDY_MACDIS BIT(6)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_RING_CLK_12M_EN BIT(4)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_PFM_WOWL BIT(3)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_PFM_LDKP BIT(2)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_WL_HCI_ALD BIT(1)
-
-/* 2 REG_SYS_PW_CTRL (Offset 0x0004) */
-
-#define BIT_PFM_LDALL BIT(0)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_LDO_DUMMY BIT(15)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_CPU_CLK_EN BIT(14)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_SYMREG_CLK_EN BIT(13)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_HCI_CLK_EN BIT(12)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_MAC_CLK_EN BIT(11)
-#define BIT_SEC_CLK_EN BIT(10)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_PHY_SSC_RSTB BIT(9)
-#define BIT_EXT_32K_EN BIT(8)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_WL_CLK_TEST BIT(7)
-#define BIT_OP_SPS_PWM_EN BIT(6)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_LOADER_CLK_EN BIT(5)
-#define BIT_MACSLP BIT(4)
-#define BIT_WAKEPAD_EN BIT(3)
-#define BIT_ROMD16V_EN BIT(2)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_CKANA12M_EN BIT(1)
-
-/* 2 REG_SYS_CLK_CTRL (Offset 0x0008) */
-
-#define BIT_CNTD16V_EN BIT(0)
-
-/* 2 REG_SYS_EEPROM_CTRL (Offset 0x000A) */
-
-#define BIT_SHIFT_VPDIDX 8
-#define BIT_MASK_VPDIDX 0xff
-#define BIT_VPDIDX(x) (((x) & BIT_MASK_VPDIDX) << BIT_SHIFT_VPDIDX)
-#define BIT_GET_VPDIDX(x) (((x) >> BIT_SHIFT_VPDIDX) & BIT_MASK_VPDIDX)
-
-#define BIT_SHIFT_EEM1_0 6
-#define BIT_MASK_EEM1_0 0x3
-#define BIT_EEM1_0(x) (((x) & BIT_MASK_EEM1_0) << BIT_SHIFT_EEM1_0)
-#define BIT_GET_EEM1_0(x) (((x) >> BIT_SHIFT_EEM1_0) & BIT_MASK_EEM1_0)
-
-#define BIT_AUTOLOAD_SUS BIT(5)
-
-/* 2 REG_SYS_EEPROM_CTRL (Offset 0x000A) */
-
-#define BIT_EERPOMSEL BIT(4)
-
-/* 2 REG_SYS_EEPROM_CTRL (Offset 0x000A) */
-
-#define BIT_EECS_V1 BIT(3)
-#define BIT_EESK_V1 BIT(2)
-#define BIT_EEDI_V1 BIT(1)
-#define BIT_EEDO_V1 BIT(0)
-
-/* 2 REG_EE_VPD (Offset 0x000C) */
-
-#define BIT_SHIFT_VPD_DATA 0
-#define BIT_MASK_VPD_DATA 0xffffffffL
-#define BIT_VPD_DATA(x) (((x) & BIT_MASK_VPD_DATA) << BIT_SHIFT_VPD_DATA)
-#define BIT_GET_VPD_DATA(x) (((x) >> BIT_SHIFT_VPD_DATA) & BIT_MASK_VPD_DATA)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_C2_L_BIT0 BIT(31)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_SHIFT_C1_L 29
-#define BIT_MASK_C1_L 0x3
-#define BIT_C1_L(x) (((x) & BIT_MASK_C1_L) << BIT_SHIFT_C1_L)
-#define BIT_GET_C1_L(x) (((x) >> BIT_SHIFT_C1_L) & BIT_MASK_C1_L)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_SHIFT_REG_FREQ_L 25
-#define BIT_MASK_REG_FREQ_L 0x7
-#define BIT_REG_FREQ_L(x) (((x) & BIT_MASK_REG_FREQ_L) << BIT_SHIFT_REG_FREQ_L)
-#define BIT_GET_REG_FREQ_L(x) \
- (((x) >> BIT_SHIFT_REG_FREQ_L) & BIT_MASK_REG_FREQ_L)
-
-#define BIT_REG_EN_DUTY BIT(24)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_SHIFT_REG_MODE 22
-#define BIT_MASK_REG_MODE 0x3
-#define BIT_REG_MODE(x) (((x) & BIT_MASK_REG_MODE) << BIT_SHIFT_REG_MODE)
-#define BIT_GET_REG_MODE(x) (((x) >> BIT_SHIFT_REG_MODE) & BIT_MASK_REG_MODE)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_REG_EN_SP BIT(21)
-#define BIT_REG_AUTO_L BIT(20)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_SW18_SELD_BIT0 BIT(19)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_SW18_POWOCP BIT(18)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_SHIFT_OCP_L1 15
-#define BIT_MASK_OCP_L1 0x7
-#define BIT_OCP_L1(x) (((x) & BIT_MASK_OCP_L1) << BIT_SHIFT_OCP_L1)
-#define BIT_GET_OCP_L1(x) (((x) >> BIT_SHIFT_OCP_L1) & BIT_MASK_OCP_L1)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_SHIFT_CF_L 13
-#define BIT_MASK_CF_L 0x3
-#define BIT_CF_L(x) (((x) & BIT_MASK_CF_L) << BIT_SHIFT_CF_L)
-#define BIT_GET_CF_L(x) (((x) >> BIT_SHIFT_CF_L) & BIT_MASK_CF_L)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_SW18_FPWM BIT(11)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_SW18_SWEN BIT(9)
-#define BIT_SW18_LDEN BIT(8)
-#define BIT_MAC_ID_EN BIT(7)
-
-/* 2 REG_SYS_SWR_CTRL1 (Offset 0x0010) */
-
-#define BIT_AFE_BGEN BIT(0)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_POW_ZCD_L BIT(31)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_CRCERR_MSK BIT(31)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_AUTOZCD_L BIT(30)
-#define BIT_SDIO_HSISR3_IND_MSK BIT(30)
-#define BIT_SDIO_HSISR2_IND_MSK BIT(29)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SHIFT_REG_DELAY 28
-#define BIT_MASK_REG_DELAY 0x3
-#define BIT_REG_DELAY(x) (((x) & BIT_MASK_REG_DELAY) << BIT_SHIFT_REG_DELAY)
-#define BIT_GET_REG_DELAY(x) (((x) >> BIT_SHIFT_REG_DELAY) & BIT_MASK_REG_DELAY)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_HEISR_IND_MSK BIT(28)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_CTWEND_MSK BIT(27)
-#define BIT_SDIO_ATIMEND_E_MSK BIT(26)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIIO_ATIMEND_MSK BIT(25)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_OCPINT_MSK BIT(24)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SHIFT_V15ADJ_L1_V1 24
-#define BIT_MASK_V15ADJ_L1_V1 0x7
-#define BIT_V15ADJ_L1_V1(x) \
- (((x) & BIT_MASK_V15ADJ_L1_V1) << BIT_SHIFT_V15ADJ_L1_V1)
-#define BIT_GET_V15ADJ_L1_V1(x) \
- (((x) >> BIT_SHIFT_V15ADJ_L1_V1) & BIT_MASK_V15ADJ_L1_V1)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_PSTIMEOUT_MSK BIT(23)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_GTINT4_MSK BIT(22)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_GTINT3_MSK BIT(21)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_HSISR_IND_MSK BIT(20)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SHIFT_VOL_L1_V1 20
-#define BIT_MASK_VOL_L1_V1 0xf
-#define BIT_VOL_L1_V1(x) (((x) & BIT_MASK_VOL_L1_V1) << BIT_SHIFT_VOL_L1_V1)
-#define BIT_GET_VOL_L1_V1(x) (((x) >> BIT_SHIFT_VOL_L1_V1) & BIT_MASK_VOL_L1_V1)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_CPWM2_MSK BIT(19)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_CPWM1_MSK BIT(18)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_C2HCMD_INT_MSK BIT(17)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SHIFT_IN_L1_V1 17
-#define BIT_MASK_IN_L1_V1 0x7
-#define BIT_IN_L1_V1(x) (((x) & BIT_MASK_IN_L1_V1) << BIT_SHIFT_IN_L1_V1)
-#define BIT_GET_IN_L1_V1(x) (((x) >> BIT_SHIFT_IN_L1_V1) & BIT_MASK_IN_L1_V1)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_BCNERLY_INT_MSK BIT(16)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SHIFT_TBOX_L1 15
-#define BIT_MASK_TBOX_L1 0x3
-#define BIT_TBOX_L1(x) (((x) & BIT_MASK_TBOX_L1) << BIT_SHIFT_TBOX_L1)
-#define BIT_GET_TBOX_L1(x) (((x) >> BIT_SHIFT_TBOX_L1) & BIT_MASK_TBOX_L1)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SW18_SEL BIT(13)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SW18_SD BIT(10)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_TXBCNERR_MSK BIT(7)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SHIFT_R3_L 7
-#define BIT_MASK_R3_L 0x3
-#define BIT_R3_L(x) (((x) & BIT_MASK_R3_L) << BIT_SHIFT_R3_L)
-#define BIT_GET_R3_L(x) (((x) >> BIT_SHIFT_R3_L) & BIT_MASK_R3_L)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_TXBCNOK_MSK BIT(6)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SHIFT_SW18_R2 5
-#define BIT_MASK_SW18_R2 0x3
-#define BIT_SW18_R2(x) (((x) & BIT_MASK_SW18_R2) << BIT_SHIFT_SW18_R2)
-#define BIT_GET_SW18_R2(x) (((x) >> BIT_SHIFT_SW18_R2) & BIT_MASK_SW18_R2)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_RXFOVW_MSK BIT(5)
-#define BIT_SDIO_TXFOVW_MSK BIT(4)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SHIFT_SW18_R1 3
-#define BIT_MASK_SW18_R1 0x3
-#define BIT_SW18_R1(x) (((x) & BIT_MASK_SW18_R1) << BIT_SHIFT_SW18_R1)
-#define BIT_GET_SW18_R1(x) (((x) >> BIT_SHIFT_SW18_R1) & BIT_MASK_SW18_R1)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_RXERR_MSK BIT(3)
-#define BIT_SDIO_TXERR_MSK BIT(2)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_SDIO_AVAL_MSK BIT(1)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_SHIFT_C3_L_C3 1
-#define BIT_MASK_C3_L_C3 0x3
-#define BIT_C3_L_C3(x) (((x) & BIT_MASK_C3_L_C3) << BIT_SHIFT_C3_L_C3)
-#define BIT_GET_C3_L_C3(x) (((x) >> BIT_SHIFT_C3_L_C3) & BIT_MASK_C3_L_C3)
-
-/* 2 REG_SDIO_HIMR (Offset 0x10250014) */
-
-#define BIT_RX_REQUEST_MSK BIT(0)
-
-/* 2 REG_SYS_SWR_CTRL2 (Offset 0x0014) */
-
-#define BIT_C2_L_BIT1 BIT(0)
-
-/* 2 REG_SYS_SWR_CTRL3 (Offset 0x0018) */
-
-#define BIT_SPS18_OCP_DIS BIT(31)
-
-/* 2 REG_SDIO_HISR (Offset 0x10250018) */
-
-#define BIT_SDIO_CRCERR BIT(31)
-
-/* 2 REG_SDIO_HISR (Offset 0x10250018) */
-
-#define BIT_SDIO_HSISR3_IND BIT(30)
-#define BIT_SDIO_HSISR2_IND BIT(29)
-#define BIT_SDIO_HEISR_IND BIT(28)
-
-/* 2 REG_SDIO_HISR (Offset 0x10250018) */
-
-#define BIT_SDIO_CTWEND BIT(27)
-#define BIT_SDIO_ATIMEND_E BIT(26)
-#define BIT_SDIO_ATIMEND BIT(25)
-#define BIT_SDIO_OCPINT BIT(24)
-#define BIT_SDIO_PSTIMEOUT BIT(23)
-#define BIT_SDIO_GTINT4 BIT(22)
-#define BIT_SDIO_GTINT3 BIT(21)
-#define BIT_SDIO_HSISR_IND BIT(20)
-#define BIT_SDIO_CPWM2 BIT(19)
-#define BIT_SDIO_CPWM1 BIT(18)
-#define BIT_SDIO_C2HCMD_INT BIT(17)
-
-/* 2 REG_SYS_SWR_CTRL3 (Offset 0x0018) */
-
-#define BIT_SHIFT_SPS18_OCP_TH 16
-#define BIT_MASK_SPS18_OCP_TH 0x7fff
-#define BIT_SPS18_OCP_TH(x) \
- (((x) & BIT_MASK_SPS18_OCP_TH) << BIT_SHIFT_SPS18_OCP_TH)
-#define BIT_GET_SPS18_OCP_TH(x) \
- (((x) >> BIT_SHIFT_SPS18_OCP_TH) & BIT_MASK_SPS18_OCP_TH)
-
-/* 2 REG_SDIO_HISR (Offset 0x10250018) */
-
-#define BIT_SDIO_BCNERLY_INT BIT(16)
-#define BIT_SDIO_TXBCNERR BIT(7)
-#define BIT_SDIO_TXBCNOK BIT(6)
-#define BIT_SDIO_RXFOVW BIT(5)
-#define BIT_SDIO_TXFOVW BIT(4)
-#define BIT_SDIO_RXERR BIT(3)
-#define BIT_SDIO_TXERR BIT(2)
-#define BIT_SDIO_AVAL BIT(1)
-
-/* 2 REG_SYS_SWR_CTRL3 (Offset 0x0018) */
-
-#define BIT_SHIFT_OCP_WINDOW 0
-#define BIT_MASK_OCP_WINDOW 0xffff
-#define BIT_OCP_WINDOW(x) (((x) & BIT_MASK_OCP_WINDOW) << BIT_SHIFT_OCP_WINDOW)
-#define BIT_GET_OCP_WINDOW(x) \
- (((x) >> BIT_SHIFT_OCP_WINDOW) & BIT_MASK_OCP_WINDOW)
-
-/* 2 REG_SDIO_HISR (Offset 0x10250018) */
-
-#define BIT_RX_REQUEST BIT(0)
-
-/* 2 REG_RSV_CTRL (Offset 0x001C) */
-
-#define BIT_HREG_DBG BIT(23)
-
-/* 2 REG_RSV_CTRL (Offset 0x001C) */
-
-#define BIT_WLMCUIOIF BIT(8)
-
-/* 2 REG_RSV_CTRL (Offset 0x001C) */
-
-#define BIT_LOCK_ALL_EN BIT(7)
-
-/* 2 REG_RSV_CTRL (Offset 0x001C) */
-
-#define BIT_R_DIS_PRST BIT(6)
-
-/* 2 REG_RSV_CTRL (Offset 0x001C) */
-
-#define BIT_WLOCK_1C_B6 BIT(5)
-
-/* 2 REG_RSV_CTRL (Offset 0x001C) */
-
-#define BIT_WLOCK_40 BIT(4)
-#define BIT_WLOCK_08 BIT(3)
-#define BIT_WLOCK_04 BIT(2)
-#define BIT_WLOCK_00 BIT(1)
-#define BIT_WLOCK_ALL BIT(0)
-
-/* 2 REG_SDIO_RX_REQ_LEN (Offset 0x1025001C) */
-
-#define BIT_SHIFT_RX_REQ_LEN_V1 0
-#define BIT_MASK_RX_REQ_LEN_V1 0x3ffff
-#define BIT_RX_REQ_LEN_V1(x) \
- (((x) & BIT_MASK_RX_REQ_LEN_V1) << BIT_SHIFT_RX_REQ_LEN_V1)
-#define BIT_GET_RX_REQ_LEN_V1(x) \
- (((x) >> BIT_SHIFT_RX_REQ_LEN_V1) & BIT_MASK_RX_REQ_LEN_V1)
-
-/* 2 REG_RF_CTRL (Offset 0x001F) */
-
-#define BIT_RF_SDMRSTB BIT(2)
-
-/* 2 REG_RF_CTRL (Offset 0x001F) */
-
-#define BIT_RF_RSTB BIT(1)
-
-/* 2 REG_RF_CTRL (Offset 0x001F) */
-
-#define BIT_RF_EN BIT(0)
-
-/* 2 REG_SDIO_FREE_TXPG_SEQ_V1 (Offset 0x1025001F) */
-
-#define BIT_SHIFT_FREE_TXPG_SEQ 0
-#define BIT_MASK_FREE_TXPG_SEQ 0xff
-#define BIT_FREE_TXPG_SEQ(x) \
- (((x) & BIT_MASK_FREE_TXPG_SEQ) << BIT_SHIFT_FREE_TXPG_SEQ)
-#define BIT_GET_FREE_TXPG_SEQ(x) \
- (((x) >> BIT_SHIFT_FREE_TXPG_SEQ) & BIT_MASK_FREE_TXPG_SEQ)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_SHIFT_LPLDH12_RSV 29
-#define BIT_MASK_LPLDH12_RSV 0x7
-#define BIT_LPLDH12_RSV(x) \
- (((x) & BIT_MASK_LPLDH12_RSV) << BIT_SHIFT_LPLDH12_RSV)
-#define BIT_GET_LPLDH12_RSV(x) \
- (((x) >> BIT_SHIFT_LPLDH12_RSV) & BIT_MASK_LPLDH12_RSV)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_LPLDH12_SLP BIT(28)
-
-#define BIT_SHIFT_LPLDH12_VADJ 24
-#define BIT_MASK_LPLDH12_VADJ 0xf
-#define BIT_LPLDH12_VADJ(x) \
- (((x) & BIT_MASK_LPLDH12_VADJ) << BIT_SHIFT_LPLDH12_VADJ)
-#define BIT_GET_LPLDH12_VADJ(x) \
- (((x) >> BIT_SHIFT_LPLDH12_VADJ) & BIT_MASK_LPLDH12_VADJ)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_LDH12_EN BIT(16)
-
-/* 2 REG_SDIO_FREE_TXPG (Offset 0x10250020) */
-
-#define BIT_SHIFT_MID_FREEPG_V1 16
-#define BIT_MASK_MID_FREEPG_V1 0xfff
-#define BIT_MID_FREEPG_V1(x) \
- (((x) & BIT_MASK_MID_FREEPG_V1) << BIT_SHIFT_MID_FREEPG_V1)
-#define BIT_GET_MID_FREEPG_V1(x) \
- (((x) >> BIT_SHIFT_MID_FREEPG_V1) & BIT_MASK_MID_FREEPG_V1)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_WLBBOFF_BIG_PWC_EN BIT(14)
-#define BIT_WLBBOFF_SMALL_PWC_EN BIT(13)
-#define BIT_WLMACOFF_BIG_PWC_EN BIT(12)
-#define BIT_WLPON_PWC_EN BIT(11)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_POW_REGU_P1 BIT(10)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_LDOV12W_EN BIT(8)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_EX_XTAL_DRV_DIGI BIT(7)
-#define BIT_EX_XTAL_DRV_USB BIT(6)
-#define BIT_EX_XTAL_DRV_AFE BIT(5)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_EX_XTAL_DRV_RF2 BIT(4)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_EX_XTAL_DRV_RF1 BIT(3)
-#define BIT_POW_REGU_P0 BIT(2)
-
-/* 2 REG_AFE_LDO_CTRL (Offset 0x0020) */
-
-#define BIT_POW_PLL_LDO BIT(0)
-
-/* 2 REG_SDIO_FREE_TXPG (Offset 0x10250020) */
-
-#define BIT_SHIFT_HIQ_FREEPG_V1 0
-#define BIT_MASK_HIQ_FREEPG_V1 0xfff
-#define BIT_HIQ_FREEPG_V1(x) \
- (((x) & BIT_MASK_HIQ_FREEPG_V1) << BIT_SHIFT_HIQ_FREEPG_V1)
-#define BIT_GET_HIQ_FREEPG_V1(x) \
- (((x) >> BIT_SHIFT_HIQ_FREEPG_V1) & BIT_MASK_HIQ_FREEPG_V1)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_AGPIO_GPE BIT(31)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_SHIFT_XTAL_CAP_XI 25
-#define BIT_MASK_XTAL_CAP_XI 0x3f
-#define BIT_XTAL_CAP_XI(x) \
- (((x) & BIT_MASK_XTAL_CAP_XI) << BIT_SHIFT_XTAL_CAP_XI)
-#define BIT_GET_XTAL_CAP_XI(x) \
- (((x) >> BIT_SHIFT_XTAL_CAP_XI) & BIT_MASK_XTAL_CAP_XI)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_SHIFT_XTAL_DRV_DIGI 23
-#define BIT_MASK_XTAL_DRV_DIGI 0x3
-#define BIT_XTAL_DRV_DIGI(x) \
- (((x) & BIT_MASK_XTAL_DRV_DIGI) << BIT_SHIFT_XTAL_DRV_DIGI)
-#define BIT_GET_XTAL_DRV_DIGI(x) \
- (((x) >> BIT_SHIFT_XTAL_DRV_DIGI) & BIT_MASK_XTAL_DRV_DIGI)
-
-#define BIT_XTAL_DRV_USB_BIT1 BIT(22)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_SHIFT_MAC_CLK_SEL 20
-#define BIT_MASK_MAC_CLK_SEL 0x3
-#define BIT_MAC_CLK_SEL(x) \
- (((x) & BIT_MASK_MAC_CLK_SEL) << BIT_SHIFT_MAC_CLK_SEL)
-#define BIT_GET_MAC_CLK_SEL(x) \
- (((x) >> BIT_SHIFT_MAC_CLK_SEL) & BIT_MASK_MAC_CLK_SEL)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_XTAL_DRV_USB_BIT0 BIT(19)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_SHIFT_XTAL_DRV_AFE 17
-#define BIT_MASK_XTAL_DRV_AFE 0x3
-#define BIT_XTAL_DRV_AFE(x) \
- (((x) & BIT_MASK_XTAL_DRV_AFE) << BIT_SHIFT_XTAL_DRV_AFE)
-#define BIT_GET_XTAL_DRV_AFE(x) \
- (((x) >> BIT_SHIFT_XTAL_DRV_AFE) & BIT_MASK_XTAL_DRV_AFE)
-
-/* 2 REG_SDIO_FREE_TXPG2 (Offset 0x10250024) */
-
-#define BIT_SHIFT_PUB_FREEPG_V1 16
-#define BIT_MASK_PUB_FREEPG_V1 0xfff
-#define BIT_PUB_FREEPG_V1(x) \
- (((x) & BIT_MASK_PUB_FREEPG_V1) << BIT_SHIFT_PUB_FREEPG_V1)
-#define BIT_GET_PUB_FREEPG_V1(x) \
- (((x) >> BIT_SHIFT_PUB_FREEPG_V1) & BIT_MASK_PUB_FREEPG_V1)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_SHIFT_XTAL_DRV_RF2 15
-#define BIT_MASK_XTAL_DRV_RF2 0x3
-#define BIT_XTAL_DRV_RF2(x) \
- (((x) & BIT_MASK_XTAL_DRV_RF2) << BIT_SHIFT_XTAL_DRV_RF2)
-#define BIT_GET_XTAL_DRV_RF2(x) \
- (((x) >> BIT_SHIFT_XTAL_DRV_RF2) & BIT_MASK_XTAL_DRV_RF2)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_SHIFT_XTAL_DRV_RF1 13
-#define BIT_MASK_XTAL_DRV_RF1 0x3
-#define BIT_XTAL_DRV_RF1(x) \
- (((x) & BIT_MASK_XTAL_DRV_RF1) << BIT_SHIFT_XTAL_DRV_RF1)
-#define BIT_GET_XTAL_DRV_RF1(x) \
- (((x) >> BIT_SHIFT_XTAL_DRV_RF1) & BIT_MASK_XTAL_DRV_RF1)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_XTAL_DELAY_DIGI BIT(12)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_XTAL_DELAY_USB BIT(11)
-#define BIT_XTAL_DELAY_AFE BIT(10)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_SHIFT_XTAL_LDO_VREF 7
-#define BIT_MASK_XTAL_LDO_VREF 0x7
-#define BIT_XTAL_LDO_VREF(x) \
- (((x) & BIT_MASK_XTAL_LDO_VREF) << BIT_SHIFT_XTAL_LDO_VREF)
-#define BIT_GET_XTAL_LDO_VREF(x) \
- (((x) >> BIT_SHIFT_XTAL_LDO_VREF) & BIT_MASK_XTAL_LDO_VREF)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_XTAL_XQSEL_RF BIT(6)
-#define BIT_XTAL_XQSEL BIT(5)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_SHIFT_XTAL_GMN_V2 3
-#define BIT_MASK_XTAL_GMN_V2 0x3
-#define BIT_XTAL_GMN_V2(x) \
- (((x) & BIT_MASK_XTAL_GMN_V2) << BIT_SHIFT_XTAL_GMN_V2)
-#define BIT_GET_XTAL_GMN_V2(x) \
- (((x) >> BIT_SHIFT_XTAL_GMN_V2) & BIT_MASK_XTAL_GMN_V2)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_SHIFT_XTAL_GMP_V2 1
-#define BIT_MASK_XTAL_GMP_V2 0x3
-#define BIT_XTAL_GMP_V2(x) \
- (((x) & BIT_MASK_XTAL_GMP_V2) << BIT_SHIFT_XTAL_GMP_V2)
-#define BIT_GET_XTAL_GMP_V2(x) \
- (((x) >> BIT_SHIFT_XTAL_GMP_V2) & BIT_MASK_XTAL_GMP_V2)
-
-/* 2 REG_AFE_CTRL1 (Offset 0x0024) */
-
-#define BIT_XTAL_EN BIT(0)
-
-/* 2 REG_SDIO_FREE_TXPG2 (Offset 0x10250024) */
-
-#define BIT_SHIFT_LOW_FREEPG_V1 0
-#define BIT_MASK_LOW_FREEPG_V1 0xfff
-#define BIT_LOW_FREEPG_V1(x) \
- (((x) & BIT_MASK_LOW_FREEPG_V1) << BIT_SHIFT_LOW_FREEPG_V1)
-#define BIT_GET_LOW_FREEPG_V1(x) \
- (((x) >> BIT_SHIFT_LOW_FREEPG_V1) & BIT_MASK_LOW_FREEPG_V1)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_SHIFT_REG_C3_V4 30
-#define BIT_MASK_REG_C3_V4 0x3
-#define BIT_REG_C3_V4(x) (((x) & BIT_MASK_REG_C3_V4) << BIT_SHIFT_REG_C3_V4)
-#define BIT_GET_REG_C3_V4(x) (((x) >> BIT_SHIFT_REG_C3_V4) & BIT_MASK_REG_C3_V4)
-
-#define BIT_REG_CP_BIT1 BIT(29)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_SHIFT_REG_RS_V4 26
-#define BIT_MASK_REG_RS_V4 0x7
-#define BIT_REG_RS_V4(x) (((x) & BIT_MASK_REG_RS_V4) << BIT_SHIFT_REG_RS_V4)
-#define BIT_GET_REG_RS_V4(x) (((x) >> BIT_SHIFT_REG_RS_V4) & BIT_MASK_REG_RS_V4)
-
-/* 2 REG_SDIO_OQT_FREE_TXPG_V1 (Offset 0x10250028) */
-
-#define BIT_SHIFT_NOAC_OQT_FREEPG_V1 24
-#define BIT_MASK_NOAC_OQT_FREEPG_V1 0xff
-#define BIT_NOAC_OQT_FREEPG_V1(x) \
- (((x) & BIT_MASK_NOAC_OQT_FREEPG_V1) << BIT_SHIFT_NOAC_OQT_FREEPG_V1)
-#define BIT_GET_NOAC_OQT_FREEPG_V1(x) \
- (((x) >> BIT_SHIFT_NOAC_OQT_FREEPG_V1) & BIT_MASK_NOAC_OQT_FREEPG_V1)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_SHIFT_REG__CS 24
-#define BIT_MASK_REG__CS 0x3
-#define BIT_REG__CS(x) (((x) & BIT_MASK_REG__CS) << BIT_SHIFT_REG__CS)
-#define BIT_GET_REG__CS(x) (((x) >> BIT_SHIFT_REG__CS) & BIT_MASK_REG__CS)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_SHIFT_REG_CP_OFFSET 21
-#define BIT_MASK_REG_CP_OFFSET 0x7
-#define BIT_REG_CP_OFFSET(x) \
- (((x) & BIT_MASK_REG_CP_OFFSET) << BIT_SHIFT_REG_CP_OFFSET)
-#define BIT_GET_REG_CP_OFFSET(x) \
- (((x) >> BIT_SHIFT_REG_CP_OFFSET) & BIT_MASK_REG_CP_OFFSET)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_SHIFT_CP_BIAS 18
-#define BIT_MASK_CP_BIAS 0x7
-#define BIT_CP_BIAS(x) (((x) & BIT_MASK_CP_BIAS) << BIT_SHIFT_CP_BIAS)
-#define BIT_GET_CP_BIAS(x) (((x) >> BIT_SHIFT_CP_BIAS) & BIT_MASK_CP_BIAS)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_REG_IDOUBLE_V2 BIT(17)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_EN_SYN BIT(16)
-
-#define BIT_SHIFT_AC_OQT_FREEPG_V1 16
-#define BIT_MASK_AC_OQT_FREEPG_V1 0xff
-#define BIT_AC_OQT_FREEPG_V1(x) \
- (((x) & BIT_MASK_AC_OQT_FREEPG_V1) << BIT_SHIFT_AC_OQT_FREEPG_V1)
-#define BIT_GET_AC_OQT_FREEPG_V1(x) \
- (((x) >> BIT_SHIFT_AC_OQT_FREEPG_V1) & BIT_MASK_AC_OQT_FREEPG_V1)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_SHIFT_MCCO 14
-#define BIT_MASK_MCCO 0x3
-#define BIT_MCCO(x) (((x) & BIT_MASK_MCCO) << BIT_SHIFT_MCCO)
-#define BIT_GET_MCCO(x) (((x) >> BIT_SHIFT_MCCO) & BIT_MASK_MCCO)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_SHIFT_REG_LDO_SEL 12
-#define BIT_MASK_REG_LDO_SEL 0x3
-#define BIT_REG_LDO_SEL(x) \
- (((x) & BIT_MASK_REG_LDO_SEL) << BIT_SHIFT_REG_LDO_SEL)
-#define BIT_GET_REG_LDO_SEL(x) \
- (((x) >> BIT_SHIFT_REG_LDO_SEL) & BIT_MASK_REG_LDO_SEL)
-
-#define BIT_REG_KVCO_V2 BIT(10)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_AGPIO_GPO BIT(9)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_SHIFT_AGPIO_DRV 7
-#define BIT_MASK_AGPIO_DRV 0x3
-#define BIT_AGPIO_DRV(x) (((x) & BIT_MASK_AGPIO_DRV) << BIT_SHIFT_AGPIO_DRV)
-#define BIT_GET_AGPIO_DRV(x) (((x) >> BIT_SHIFT_AGPIO_DRV) & BIT_MASK_AGPIO_DRV)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_SHIFT_XTAL_CAP_XO 1
-#define BIT_MASK_XTAL_CAP_XO 0x3f
-#define BIT_XTAL_CAP_XO(x) \
- (((x) & BIT_MASK_XTAL_CAP_XO) << BIT_SHIFT_XTAL_CAP_XO)
-#define BIT_GET_XTAL_CAP_XO(x) \
- (((x) >> BIT_SHIFT_XTAL_CAP_XO) & BIT_MASK_XTAL_CAP_XO)
-
-/* 2 REG_AFE_CTRL2 (Offset 0x0028) */
-
-#define BIT_POW_PLL BIT(0)
-
-/* 2 REG_SDIO_OQT_FREE_TXPG_V1 (Offset 0x10250028) */
-
-#define BIT_SHIFT_EXQ_FREEPG_V1 0
-#define BIT_MASK_EXQ_FREEPG_V1 0xfff
-#define BIT_EXQ_FREEPG_V1(x) \
- (((x) & BIT_MASK_EXQ_FREEPG_V1) << BIT_SHIFT_EXQ_FREEPG_V1)
-#define BIT_GET_EXQ_FREEPG_V1(x) \
- (((x) >> BIT_SHIFT_EXQ_FREEPG_V1) & BIT_MASK_EXQ_FREEPG_V1)
-
-/* 2 REG_AFE_CTRL3 (Offset 0x002C) */
-
-#define BIT_SHIFT_PS 7
-#define BIT_MASK_PS 0x7
-#define BIT_PS(x) (((x) & BIT_MASK_PS) << BIT_SHIFT_PS)
-#define BIT_GET_PS(x) (((x) >> BIT_SHIFT_PS) & BIT_MASK_PS)
-
-/* 2 REG_AFE_CTRL3 (Offset 0x002C) */
-
-#define BIT_PSEN BIT(6)
-#define BIT_DOGENB BIT(5)
-
-/* 2 REG_AFE_CTRL3 (Offset 0x002C) */
-
-#define BIT_REG_MBIAS BIT(4)
-
-/* 2 REG_AFE_CTRL3 (Offset 0x002C) */
-
-#define BIT_SHIFT_REG_R3_V4 1
-#define BIT_MASK_REG_R3_V4 0x7
-#define BIT_REG_R3_V4(x) (((x) & BIT_MASK_REG_R3_V4) << BIT_SHIFT_REG_R3_V4)
-#define BIT_GET_REG_R3_V4(x) (((x) >> BIT_SHIFT_REG_R3_V4) & BIT_MASK_REG_R3_V4)
-
-/* 2 REG_AFE_CTRL3 (Offset 0x002C) */
-
-#define BIT_REG_CP_BIT0 BIT(0)
-
-/* 2 REG_EFUSE_CTRL (Offset 0x0030) */
-
-#define BIT_EF_FLAG BIT(31)
-
-#define BIT_SHIFT_EF_PGPD 28
-#define BIT_MASK_EF_PGPD 0x7
-#define BIT_EF_PGPD(x) (((x) & BIT_MASK_EF_PGPD) << BIT_SHIFT_EF_PGPD)
-#define BIT_GET_EF_PGPD(x) (((x) >> BIT_SHIFT_EF_PGPD) & BIT_MASK_EF_PGPD)
-
-#define BIT_SHIFT_EF_RDT 24
-#define BIT_MASK_EF_RDT 0xf
-#define BIT_EF_RDT(x) (((x) & BIT_MASK_EF_RDT) << BIT_SHIFT_EF_RDT)
-#define BIT_GET_EF_RDT(x) (((x) >> BIT_SHIFT_EF_RDT) & BIT_MASK_EF_RDT)
-
-#define BIT_SHIFT_EF_PGTS 20
-#define BIT_MASK_EF_PGTS 0xf
-#define BIT_EF_PGTS(x) (((x) & BIT_MASK_EF_PGTS) << BIT_SHIFT_EF_PGTS)
-#define BIT_GET_EF_PGTS(x) (((x) >> BIT_SHIFT_EF_PGTS) & BIT_MASK_EF_PGTS)
-
-/* 2 REG_EFUSE_CTRL (Offset 0x0030) */
-
-#define BIT_EF_PDWN BIT(19)
-
-/* 2 REG_EFUSE_CTRL (Offset 0x0030) */
-
-#define BIT_EF_ALDEN BIT(18)
-
-/* 2 REG_SDIO_HTSFR_INFO (Offset 0x10250030) */
-
-#define BIT_SHIFT_HTSFR1 16
-#define BIT_MASK_HTSFR1 0xffff
-#define BIT_HTSFR1(x) (((x) & BIT_MASK_HTSFR1) << BIT_SHIFT_HTSFR1)
-#define BIT_GET_HTSFR1(x) (((x) >> BIT_SHIFT_HTSFR1) & BIT_MASK_HTSFR1)
-
-/* 2 REG_EFUSE_CTRL (Offset 0x0030) */
-
-#define BIT_SHIFT_EF_ADDR 8
-#define BIT_MASK_EF_ADDR 0x3ff
-#define BIT_EF_ADDR(x) (((x) & BIT_MASK_EF_ADDR) << BIT_SHIFT_EF_ADDR)
-#define BIT_GET_EF_ADDR(x) (((x) >> BIT_SHIFT_EF_ADDR) & BIT_MASK_EF_ADDR)
-
-#define BIT_SHIFT_EF_DATA 0
-#define BIT_MASK_EF_DATA 0xff
-#define BIT_EF_DATA(x) (((x) & BIT_MASK_EF_DATA) << BIT_SHIFT_EF_DATA)
-#define BIT_GET_EF_DATA(x) (((x) >> BIT_SHIFT_EF_DATA) & BIT_MASK_EF_DATA)
-
-/* 2 REG_SDIO_HTSFR_INFO (Offset 0x10250030) */
-
-#define BIT_SHIFT_HTSFR0 0
-#define BIT_MASK_HTSFR0 0xffff
-#define BIT_HTSFR0(x) (((x) & BIT_MASK_HTSFR0) << BIT_SHIFT_HTSFR0)
-#define BIT_GET_HTSFR0(x) (((x) >> BIT_SHIFT_HTSFR0) & BIT_MASK_HTSFR0)
-
-/* 2 REG_LDO_EFUSE_CTRL (Offset 0x0034) */
-
-#define BIT_LDOE25_EN BIT(31)
-
-/* 2 REG_LDO_EFUSE_CTRL (Offset 0x0034) */
-
-#define BIT_SHIFT_LDOE25_V12ADJ_L 27
-#define BIT_MASK_LDOE25_V12ADJ_L 0xf
-#define BIT_LDOE25_V12ADJ_L(x) \
- (((x) & BIT_MASK_LDOE25_V12ADJ_L) << BIT_SHIFT_LDOE25_V12ADJ_L)
-#define BIT_GET_LDOE25_V12ADJ_L(x) \
- (((x) >> BIT_SHIFT_LDOE25_V12ADJ_L) & BIT_MASK_LDOE25_V12ADJ_L)
-
-/* 2 REG_LDO_EFUSE_CTRL (Offset 0x0034) */
-
-#define BIT_EF_CRES_SEL BIT(26)
-
-/* 2 REG_LDO_EFUSE_CTRL (Offset 0x0034) */
-
-#define BIT_SHIFT_EF_SCAN_START_V1 16
-#define BIT_MASK_EF_SCAN_START_V1 0x3ff
-#define BIT_EF_SCAN_START_V1(x) \
- (((x) & BIT_MASK_EF_SCAN_START_V1) << BIT_SHIFT_EF_SCAN_START_V1)
-#define BIT_GET_EF_SCAN_START_V1(x) \
- (((x) >> BIT_SHIFT_EF_SCAN_START_V1) & BIT_MASK_EF_SCAN_START_V1)
-
-/* 2 REG_LDO_EFUSE_CTRL (Offset 0x0034) */
-
-#define BIT_SHIFT_EF_SCAN_END 12
-#define BIT_MASK_EF_SCAN_END 0xf
-#define BIT_EF_SCAN_END(x) \
- (((x) & BIT_MASK_EF_SCAN_END) << BIT_SHIFT_EF_SCAN_END)
-#define BIT_GET_EF_SCAN_END(x) \
- (((x) >> BIT_SHIFT_EF_SCAN_END) & BIT_MASK_EF_SCAN_END)
-
-/* 2 REG_LDO_EFUSE_CTRL (Offset 0x0034) */
-
-#define BIT_EF_PD_DIS BIT(11)
-
-/* 2 REG_LDO_EFUSE_CTRL (Offset 0x0034) */
-
-#define BIT_SHIFT_EF_CELL_SEL 8
-#define BIT_MASK_EF_CELL_SEL 0x3
-#define BIT_EF_CELL_SEL(x) \
- (((x) & BIT_MASK_EF_CELL_SEL) << BIT_SHIFT_EF_CELL_SEL)
-#define BIT_GET_EF_CELL_SEL(x) \
- (((x) >> BIT_SHIFT_EF_CELL_SEL) & BIT_MASK_EF_CELL_SEL)
-
-/* 2 REG_LDO_EFUSE_CTRL (Offset 0x0034) */
-
-#define BIT_EF_TRPT BIT(7)
-
-#define BIT_SHIFT_EF_TTHD 0
-#define BIT_MASK_EF_TTHD 0x7f
-#define BIT_EF_TTHD(x) (((x) & BIT_MASK_EF_TTHD) << BIT_SHIFT_EF_TTHD)
-#define BIT_GET_EF_TTHD(x) (((x) >> BIT_SHIFT_EF_TTHD) & BIT_MASK_EF_TTHD)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SHIFT_DBG_SEL_V1 16
-#define BIT_MASK_DBG_SEL_V1 0xff
-#define BIT_DBG_SEL_V1(x) (((x) & BIT_MASK_DBG_SEL_V1) << BIT_SHIFT_DBG_SEL_V1)
-#define BIT_GET_DBG_SEL_V1(x) \
- (((x) >> BIT_SHIFT_DBG_SEL_V1) & BIT_MASK_DBG_SEL_V1)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SHIFT_DBG_SEL_BYTE 14
-#define BIT_MASK_DBG_SEL_BYTE 0x3
-#define BIT_DBG_SEL_BYTE(x) \
- (((x) & BIT_MASK_DBG_SEL_BYTE) << BIT_SHIFT_DBG_SEL_BYTE)
-#define BIT_GET_DBG_SEL_BYTE(x) \
- (((x) >> BIT_SHIFT_DBG_SEL_BYTE) & BIT_MASK_DBG_SEL_BYTE)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SHIFT_STD_L1_V1 12
-#define BIT_MASK_STD_L1_V1 0x3
-#define BIT_STD_L1_V1(x) (((x) & BIT_MASK_STD_L1_V1) << BIT_SHIFT_STD_L1_V1)
-#define BIT_GET_STD_L1_V1(x) (((x) >> BIT_SHIFT_STD_L1_V1) & BIT_MASK_STD_L1_V1)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SYSON_DBG_PAD_E2 BIT(11)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SYSON_LED_PAD_E2 BIT(10)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SYSON_GPEE_PAD_E2 BIT(9)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SYSON_PCI_PAD_E2 BIT(8)
-
-#define BIT_SHIFT_MATCH_CNT 8
-#define BIT_MASK_MATCH_CNT 0xff
-#define BIT_MATCH_CNT(x) (((x) & BIT_MASK_MATCH_CNT) << BIT_SHIFT_MATCH_CNT)
-#define BIT_GET_MATCH_CNT(x) (((x) >> BIT_SHIFT_MATCH_CNT) & BIT_MASK_MATCH_CNT)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_AUTO_SW_LDO_VOL_EN BIT(7)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SHIFT_SYSON_SPS0WWV_WT 4
-#define BIT_MASK_SYSON_SPS0WWV_WT 0x3
-#define BIT_SYSON_SPS0WWV_WT(x) \
- (((x) & BIT_MASK_SYSON_SPS0WWV_WT) << BIT_SHIFT_SYSON_SPS0WWV_WT)
-#define BIT_GET_SYSON_SPS0WWV_WT(x) \
- (((x) >> BIT_SHIFT_SYSON_SPS0WWV_WT) & BIT_MASK_SYSON_SPS0WWV_WT)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SHIFT_SYSON_SPS0LDO_WT 2
-#define BIT_MASK_SYSON_SPS0LDO_WT 0x3
-#define BIT_SYSON_SPS0LDO_WT(x) \
- (((x) & BIT_MASK_SYSON_SPS0LDO_WT) << BIT_SHIFT_SYSON_SPS0LDO_WT)
-#define BIT_GET_SYSON_SPS0LDO_WT(x) \
- (((x) >> BIT_SHIFT_SYSON_SPS0LDO_WT) & BIT_MASK_SYSON_SPS0LDO_WT)
-
-/* 2 REG_PWR_OPTION_CTRL (Offset 0x0038) */
-
-#define BIT_SHIFT_SYSON_RCLK_SCALE 0
-#define BIT_MASK_SYSON_RCLK_SCALE 0x3
-#define BIT_SYSON_RCLK_SCALE(x) \
- (((x) & BIT_MASK_SYSON_RCLK_SCALE) << BIT_SHIFT_SYSON_RCLK_SCALE)
-#define BIT_GET_SYSON_RCLK_SCALE(x) \
- (((x) >> BIT_SHIFT_SYSON_RCLK_SCALE) & BIT_MASK_SYSON_RCLK_SCALE)
-
-/* 2 REG_SDIO_HCPWM1_V2 (Offset 0x10250038) */
-
-#define BIT_SYS_CLK BIT(0)
-
-/* 2 REG_CAL_TIMER (Offset 0x003C) */
-
-#define BIT_SHIFT_CAL_SCAL 0
-#define BIT_MASK_CAL_SCAL 0xff
-#define BIT_CAL_SCAL(x) (((x) & BIT_MASK_CAL_SCAL) << BIT_SHIFT_CAL_SCAL)
-#define BIT_GET_CAL_SCAL(x) (((x) >> BIT_SHIFT_CAL_SCAL) & BIT_MASK_CAL_SCAL)
-
-/* 2 REG_ACLK_MON (Offset 0x003E) */
-
-#define BIT_SHIFT_RCLK_MON 5
-#define BIT_MASK_RCLK_MON 0x7ff
-#define BIT_RCLK_MON(x) (((x) & BIT_MASK_RCLK_MON) << BIT_SHIFT_RCLK_MON)
-#define BIT_GET_RCLK_MON(x) (((x) >> BIT_SHIFT_RCLK_MON) & BIT_MASK_RCLK_MON)
-
-#define BIT_CAL_EN BIT(4)
-
-#define BIT_SHIFT_DPSTU 2
-#define BIT_MASK_DPSTU 0x3
-#define BIT_DPSTU(x) (((x) & BIT_MASK_DPSTU) << BIT_SHIFT_DPSTU)
-#define BIT_GET_DPSTU(x) (((x) >> BIT_SHIFT_DPSTU) & BIT_MASK_DPSTU)
-
-#define BIT_SUS_16X BIT(1)
-
-/* 2 REG_SDIO_INDIRECT_REG_CFG (Offset 0x10250040) */
-
-#define BIT_INDIRECT_REG_RDY BIT(20)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_FSPI_EN BIT(19)
-
-/* 2 REG_SDIO_INDIRECT_REG_CFG (Offset 0x10250040) */
-
-#define BIT_INDIRECT_REG_R BIT(19)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_WL_RTS_EXT_32K_SEL BIT(18)
-
-/* 2 REG_SDIO_INDIRECT_REG_CFG (Offset 0x10250040) */
-
-#define BIT_INDIRECT_REG_W BIT(18)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_WLGP_SPI_EN BIT(16)
-
-/* 2 REG_SDIO_INDIRECT_REG_CFG (Offset 0x10250040) */
-
-#define BIT_SHIFT_INDIRECT_REG_SIZE 16
-#define BIT_MASK_INDIRECT_REG_SIZE 0x3
-#define BIT_INDIRECT_REG_SIZE(x) \
- (((x) & BIT_MASK_INDIRECT_REG_SIZE) << BIT_SHIFT_INDIRECT_REG_SIZE)
-#define BIT_GET_INDIRECT_REG_SIZE(x) \
- (((x) >> BIT_SHIFT_INDIRECT_REG_SIZE) & BIT_MASK_INDIRECT_REG_SIZE)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_SIC_LBK BIT(15)
-#define BIT_ENHTP BIT(14)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_ENSIC BIT(12)
-#define BIT_SIC_SWRST BIT(11)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_PO_WIFI_PTA_PINS BIT(10)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_PO_BT_PTA_PINS BIT(9)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_ENUART BIT(8)
-
-#define BIT_SHIFT_BTMODE 6
-#define BIT_MASK_BTMODE 0x3
-#define BIT_BTMODE(x) (((x) & BIT_MASK_BTMODE) << BIT_SHIFT_BTMODE)
-#define BIT_GET_BTMODE(x) (((x) >> BIT_SHIFT_BTMODE) & BIT_MASK_BTMODE)
-
-#define BIT_ENBT BIT(5)
-#define BIT_EROM_EN BIT(4)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_WLRFE_6_7_EN BIT(3)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_WLRFE_4_5_EN BIT(2)
-
-/* 2 REG_GPIO_MUXCFG (Offset 0x0040) */
-
-#define BIT_SHIFT_GPIOSEL 0
-#define BIT_MASK_GPIOSEL 0x3
-#define BIT_GPIOSEL(x) (((x) & BIT_MASK_GPIOSEL) << BIT_SHIFT_GPIOSEL)
-#define BIT_GET_GPIOSEL(x) (((x) >> BIT_SHIFT_GPIOSEL) & BIT_MASK_GPIOSEL)
-
-/* 2 REG_SDIO_INDIRECT_REG_CFG (Offset 0x10250040) */
-
-#define BIT_SHIFT_INDIRECT_REG_ADDR 0
-#define BIT_MASK_INDIRECT_REG_ADDR 0xffff
-#define BIT_INDIRECT_REG_ADDR(x) \
- (((x) & BIT_MASK_INDIRECT_REG_ADDR) << BIT_SHIFT_INDIRECT_REG_ADDR)
-#define BIT_GET_INDIRECT_REG_ADDR(x) \
- (((x) >> BIT_SHIFT_INDIRECT_REG_ADDR) & BIT_MASK_INDIRECT_REG_ADDR)
-
-/* 2 REG_GPIO_PIN_CTRL (Offset 0x0044) */
-
-#define BIT_SHIFT_GPIO_MOD_7_TO_0 24
-#define BIT_MASK_GPIO_MOD_7_TO_0 0xff
-#define BIT_GPIO_MOD_7_TO_0(x) \
- (((x) & BIT_MASK_GPIO_MOD_7_TO_0) << BIT_SHIFT_GPIO_MOD_7_TO_0)
-#define BIT_GET_GPIO_MOD_7_TO_0(x) \
- (((x) >> BIT_SHIFT_GPIO_MOD_7_TO_0) & BIT_MASK_GPIO_MOD_7_TO_0)
-
-#define BIT_SHIFT_GPIO_IO_SEL_7_TO_0 16
-#define BIT_MASK_GPIO_IO_SEL_7_TO_0 0xff
-#define BIT_GPIO_IO_SEL_7_TO_0(x) \
- (((x) & BIT_MASK_GPIO_IO_SEL_7_TO_0) << BIT_SHIFT_GPIO_IO_SEL_7_TO_0)
-#define BIT_GET_GPIO_IO_SEL_7_TO_0(x) \
- (((x) >> BIT_SHIFT_GPIO_IO_SEL_7_TO_0) & BIT_MASK_GPIO_IO_SEL_7_TO_0)
-
-#define BIT_SHIFT_GPIO_OUT_7_TO_0 8
-#define BIT_MASK_GPIO_OUT_7_TO_0 0xff
-#define BIT_GPIO_OUT_7_TO_0(x) \
- (((x) & BIT_MASK_GPIO_OUT_7_TO_0) << BIT_SHIFT_GPIO_OUT_7_TO_0)
-#define BIT_GET_GPIO_OUT_7_TO_0(x) \
- (((x) >> BIT_SHIFT_GPIO_OUT_7_TO_0) & BIT_MASK_GPIO_OUT_7_TO_0)
-
-#define BIT_SHIFT_GPIO_IN_7_TO_0 0
-#define BIT_MASK_GPIO_IN_7_TO_0 0xff
-#define BIT_GPIO_IN_7_TO_0(x) \
- (((x) & BIT_MASK_GPIO_IN_7_TO_0) << BIT_SHIFT_GPIO_IN_7_TO_0)
-#define BIT_GET_GPIO_IN_7_TO_0(x) \
- (((x) >> BIT_SHIFT_GPIO_IN_7_TO_0) & BIT_MASK_GPIO_IN_7_TO_0)
-
-/* 2 REG_SDIO_INDIRECT_REG_DATA (Offset 0x10250044) */
-
-#define BIT_SHIFT_INDIRECT_REG_DATA 0
-#define BIT_MASK_INDIRECT_REG_DATA 0xffffffffL
-#define BIT_INDIRECT_REG_DATA(x) \
- (((x) & BIT_MASK_INDIRECT_REG_DATA) << BIT_SHIFT_INDIRECT_REG_DATA)
-#define BIT_GET_INDIRECT_REG_DATA(x) \
- (((x) >> BIT_SHIFT_INDIRECT_REG_DATA) & BIT_MASK_INDIRECT_REG_DATA)
-
-/* 2 REG_GPIO_INTM (Offset 0x0048) */
-
-#define BIT_SHIFT_MUXDBG_SEL 30
-#define BIT_MASK_MUXDBG_SEL 0x3
-#define BIT_MUXDBG_SEL(x) (((x) & BIT_MASK_MUXDBG_SEL) << BIT_SHIFT_MUXDBG_SEL)
-#define BIT_GET_MUXDBG_SEL(x) \
- (((x) >> BIT_SHIFT_MUXDBG_SEL) & BIT_MASK_MUXDBG_SEL)
-
-/* 2 REG_GPIO_INTM (Offset 0x0048) */
-
-#define BIT_EXTWOL_SEL BIT(17)
-
-/* 2 REG_GPIO_INTM (Offset 0x0048) */
-
-#define BIT_EXTWOL_EN BIT(16)
-
-/* 2 REG_GPIO_INTM (Offset 0x0048) */
-
-#define BIT_GPIOF_INT_MD BIT(15)
-#define BIT_GPIOE_INT_MD BIT(14)
-#define BIT_GPIOD_INT_MD BIT(13)
-#define BIT_GPIOC_INT_MD BIT(12)
-#define BIT_GPIOB_INT_MD BIT(11)
-#define BIT_GPIOA_INT_MD BIT(10)
-#define BIT_GPIO9_INT_MD BIT(9)
-#define BIT_GPIO8_INT_MD BIT(8)
-#define BIT_GPIO7_INT_MD BIT(7)
-#define BIT_GPIO6_INT_MD BIT(6)
-#define BIT_GPIO5_INT_MD BIT(5)
-#define BIT_GPIO4_INT_MD BIT(4)
-#define BIT_GPIO3_INT_MD BIT(3)
-#define BIT_GPIO2_INT_MD BIT(2)
-#define BIT_GPIO1_INT_MD BIT(1)
-#define BIT_GPIO0_INT_MD BIT(0)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_GPIO3_WL_CTRL_EN BIT(27)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_LNAON_SEL_EN BIT(26)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_PAPE_SEL_EN BIT(25)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_DPDT_WLBT_SEL BIT(24)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_DPDT_SEL_EN BIT(23)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_GPIO13_14_WL_CTRL_EN BIT(22)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_LED2DIS BIT(21)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_LED2PL BIT(20)
-#define BIT_LED2SV BIT(19)
-
-#define BIT_SHIFT_LED2CM 16
-#define BIT_MASK_LED2CM 0x7
-#define BIT_LED2CM(x) (((x) & BIT_MASK_LED2CM) << BIT_SHIFT_LED2CM)
-#define BIT_GET_LED2CM(x) (((x) >> BIT_SHIFT_LED2CM) & BIT_MASK_LED2CM)
-
-#define BIT_LED1DIS BIT(15)
-#define BIT_LED1PL BIT(12)
-#define BIT_LED1SV BIT(11)
-
-#define BIT_SHIFT_LED1CM 8
-#define BIT_MASK_LED1CM 0x7
-#define BIT_LED1CM(x) (((x) & BIT_MASK_LED1CM) << BIT_SHIFT_LED1CM)
-#define BIT_GET_LED1CM(x) (((x) >> BIT_SHIFT_LED1CM) & BIT_MASK_LED1CM)
-
-#define BIT_LED0DIS BIT(7)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_SHIFT_AFE_LDO_SWR_CHECK 5
-#define BIT_MASK_AFE_LDO_SWR_CHECK 0x3
-#define BIT_AFE_LDO_SWR_CHECK(x) \
- (((x) & BIT_MASK_AFE_LDO_SWR_CHECK) << BIT_SHIFT_AFE_LDO_SWR_CHECK)
-#define BIT_GET_AFE_LDO_SWR_CHECK(x) \
- (((x) >> BIT_SHIFT_AFE_LDO_SWR_CHECK) & BIT_MASK_AFE_LDO_SWR_CHECK)
-
-/* 2 REG_LED_CFG (Offset 0x004C) */
-
-#define BIT_LED0PL BIT(4)
-#define BIT_LED0SV BIT(3)
-
-#define BIT_SHIFT_LED0CM 0
-#define BIT_MASK_LED0CM 0x7
-#define BIT_LED0CM(x) (((x) & BIT_MASK_LED0CM) << BIT_SHIFT_LED0CM)
-#define BIT_GET_LED0CM(x) (((x) >> BIT_SHIFT_LED0CM) & BIT_MASK_LED0CM)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_PDNINT_EN BIT(31)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_NFC_INT_PAD_EN BIT(30)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_SPS_OCP_INT_EN BIT(29)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_PWMERR_INT_EN BIT(28)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIOF_INT_EN BIT(27)
-#define BIT_FS_GPIOE_INT_EN BIT(26)
-#define BIT_FS_GPIOD_INT_EN BIT(25)
-#define BIT_FS_GPIOC_INT_EN BIT(24)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIOB_INT_EN BIT(23)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIOA_INT_EN BIT(22)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO9_INT_EN BIT(21)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO8_INT_EN BIT(20)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO7_INT_EN BIT(19)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO6_INT_EN BIT(18)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO5_INT_EN BIT(17)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO4_INT_EN BIT(16)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO3_INT_EN BIT(15)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO2_INT_EN BIT(14)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO1_INT_EN BIT(13)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_GPIO0_INT_EN BIT(12)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_HCI_SUS_EN BIT(11)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_HCI_RES_EN BIT(10)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_HCI_RESET_EN BIT(9)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_BTON_STS_UPDATE_MSK_EN BIT(7)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_ACT2RECOVERY_INT_EN_V1 BIT(6)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_GEN1GEN2_SWITCH BIT(5)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_HCI_TXDMA_REQ_HIMR BIT(4)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_32K_LEAVE_SETTING_MAK BIT(3)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_32K_ENTER_SETTING_MAK BIT(2)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_USB_LPMRSM_MSK BIT(1)
-
-/* 2 REG_FSIMR (Offset 0x0050) */
-
-#define BIT_FS_USB_LPMINT_MSK BIT(0)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_PDNINT BIT(31)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_SPS_OCP_INT BIT(29)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_PWMERR_INT BIT(28)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIOF_INT BIT(27)
-#define BIT_FS_GPIOE_INT BIT(26)
-#define BIT_FS_GPIOD_INT BIT(25)
-#define BIT_FS_GPIOC_INT BIT(24)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIOB_INT BIT(23)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIOA_INT BIT(22)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO9_INT BIT(21)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO8_INT BIT(20)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO7_INT BIT(19)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO6_INT BIT(18)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO5_INT BIT(17)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO4_INT BIT(16)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO3_INT BIT(15)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO2_INT BIT(14)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO1_INT BIT(13)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_GPIO0_INT BIT(12)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_HCI_SUS_INT BIT(11)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_HCI_RES_INT BIT(10)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_HCI_RESET_INT BIT(9)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_ACT2RECOVERY BIT(6)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_HCI_TXDMA_REQ_HISR BIT(4)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_32K_LEAVE_SETTING_INT BIT(3)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_32K_ENTER_SETTING_INT BIT(2)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_USB_LPMRSM_INT BIT(1)
-
-/* 2 REG_FSISR (Offset 0x0054) */
-
-#define BIT_FS_USB_LPMINT_INT BIT(0)
-
-/* 2 REG_HSIMR (Offset 0x0058) */
-
-#define BIT_GPIOF_INT_EN BIT(31)
-#define BIT_GPIOE_INT_EN BIT(30)
-#define BIT_GPIOD_INT_EN BIT(29)
-#define BIT_GPIOC_INT_EN BIT(28)
-#define BIT_GPIOB_INT_EN BIT(27)
-#define BIT_GPIOA_INT_EN BIT(26)
-#define BIT_GPIO9_INT_EN BIT(25)
-#define BIT_GPIO8_INT_EN BIT(24)
-#define BIT_GPIO7_INT_EN BIT(23)
-#define BIT_GPIO6_INT_EN BIT(22)
-#define BIT_GPIO5_INT_EN BIT(21)
-#define BIT_GPIO4_INT_EN BIT(20)
-#define BIT_GPIO3_INT_EN BIT(19)
-
-/* 2 REG_HSIMR (Offset 0x0058) */
-
-#define BIT_GPIO1_INT_EN BIT(17)
-#define BIT_GPIO0_INT_EN BIT(16)
-
-/* 2 REG_HSIMR (Offset 0x0058) */
-
-#define BIT_GPIO2_INT_EN_V1 BIT(16)
-
-/* 2 REG_HSIMR (Offset 0x0058) */
-
-#define BIT_PDNINT_EN BIT(7)
-
-/* 2 REG_HSIMR (Offset 0x0058) */
-
-#define BIT_RON_INT_EN BIT(6)
-
-/* 2 REG_HSIMR (Offset 0x0058) */
-
-#define BIT_SPS_OCP_INT_EN BIT(5)
-
-/* 2 REG_HSIMR (Offset 0x0058) */
-
-#define BIT_GPIO15_0_INT_EN BIT(0)
-
-/* 2 REG_HSISR (Offset 0x005C) */
-
-#define BIT_GPIOF_INT BIT(31)
-#define BIT_GPIOE_INT BIT(30)
-#define BIT_GPIOD_INT BIT(29)
-#define BIT_GPIOC_INT BIT(28)
-#define BIT_GPIOB_INT BIT(27)
-#define BIT_GPIOA_INT BIT(26)
-#define BIT_GPIO9_INT BIT(25)
-#define BIT_GPIO8_INT BIT(24)
-#define BIT_GPIO7_INT BIT(23)
-
-/* 2 REG_HSISR (Offset 0x005C) */
-
-#define BIT_GPIO6_INT BIT(22)
-#define BIT_GPIO5_INT BIT(21)
-#define BIT_GPIO4_INT BIT(20)
-#define BIT_GPIO3_INT BIT(19)
-
-/* 2 REG_HSISR (Offset 0x005C) */
-
-#define BIT_GPIO1_INT BIT(17)
-#define BIT_GPIO0_INT BIT(16)
-
-/* 2 REG_HSISR (Offset 0x005C) */
-
-#define BIT_GPIO2_INT_V1 BIT(16)
-
-/* 2 REG_HSISR (Offset 0x005C) */
-
-#define BIT_PDNINT BIT(7)
-
-/* 2 REG_HSISR (Offset 0x005C) */
-
-#define BIT_RON_INT BIT(6)
-
-/* 2 REG_HSISR (Offset 0x005C) */
-
-#define BIT_SPS_OCP_INT BIT(5)
-
-/* 2 REG_HSISR (Offset 0x005C) */
-
-#define BIT_GPIO15_0_INT BIT(0)
-#define BIT_MCUFWDL_EN BIT(0)
-
-/* 2 REG_GPIO_EXT_CTRL (Offset 0x0060) */
-
-#define BIT_SHIFT_GPIO_MOD_15_TO_8 24
-#define BIT_MASK_GPIO_MOD_15_TO_8 0xff
-#define BIT_GPIO_MOD_15_TO_8(x) \
- (((x) & BIT_MASK_GPIO_MOD_15_TO_8) << BIT_SHIFT_GPIO_MOD_15_TO_8)
-#define BIT_GET_GPIO_MOD_15_TO_8(x) \
- (((x) >> BIT_SHIFT_GPIO_MOD_15_TO_8) & BIT_MASK_GPIO_MOD_15_TO_8)
-
-#define BIT_SHIFT_GPIO_IO_SEL_15_TO_8 16
-#define BIT_MASK_GPIO_IO_SEL_15_TO_8 0xff
-#define BIT_GPIO_IO_SEL_15_TO_8(x) \
- (((x) & BIT_MASK_GPIO_IO_SEL_15_TO_8) << BIT_SHIFT_GPIO_IO_SEL_15_TO_8)
-#define BIT_GET_GPIO_IO_SEL_15_TO_8(x) \
- (((x) >> BIT_SHIFT_GPIO_IO_SEL_15_TO_8) & BIT_MASK_GPIO_IO_SEL_15_TO_8)
-
-#define BIT_SHIFT_GPIO_OUT_15_TO_8 8
-#define BIT_MASK_GPIO_OUT_15_TO_8 0xff
-#define BIT_GPIO_OUT_15_TO_8(x) \
- (((x) & BIT_MASK_GPIO_OUT_15_TO_8) << BIT_SHIFT_GPIO_OUT_15_TO_8)
-#define BIT_GET_GPIO_OUT_15_TO_8(x) \
- (((x) >> BIT_SHIFT_GPIO_OUT_15_TO_8) & BIT_MASK_GPIO_OUT_15_TO_8)
-
-#define BIT_SHIFT_GPIO_IN_15_TO_8 0
-#define BIT_MASK_GPIO_IN_15_TO_8 0xff
-#define BIT_GPIO_IN_15_TO_8(x) \
- (((x) & BIT_MASK_GPIO_IN_15_TO_8) << BIT_SHIFT_GPIO_IN_15_TO_8)
-#define BIT_GET_GPIO_IN_15_TO_8(x) \
- (((x) >> BIT_SHIFT_GPIO_IN_15_TO_8) & BIT_MASK_GPIO_IN_15_TO_8)
-
-/* 2 REG_SDIO_H2C (Offset 0x10250060) */
-
-#define BIT_SHIFT_SDIO_H2C_MSG 0
-#define BIT_MASK_SDIO_H2C_MSG 0xffffffffL
-#define BIT_SDIO_H2C_MSG(x) \
- (((x) & BIT_MASK_SDIO_H2C_MSG) << BIT_SHIFT_SDIO_H2C_MSG)
-#define BIT_GET_SDIO_H2C_MSG(x) \
- (((x) >> BIT_SHIFT_SDIO_H2C_MSG) & BIT_MASK_SDIO_H2C_MSG)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_PAPE_WLBT_SEL BIT(29)
-#define BIT_LNAON_WLBT_SEL BIT(28)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_BTGP_GPG3_FEN BIT(26)
-#define BIT_BTGP_GPG2_FEN BIT(25)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_BTGP_JTAG_EN BIT(24)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_XTAL_CLK_EXTARNAL_EN BIT(23)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_BTGP_UART0_EN BIT(22)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_BTGP_UART1_EN BIT(21)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_BTGP_SPI_EN BIT(20)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_BTGP_GPIO_E2 BIT(19)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_BTGP_GPIO_EN BIT(18)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_SHIFT_BTGP_GPIO_SL 16
-#define BIT_MASK_BTGP_GPIO_SL 0x3
-#define BIT_BTGP_GPIO_SL(x) \
- (((x) & BIT_MASK_BTGP_GPIO_SL) << BIT_SHIFT_BTGP_GPIO_SL)
-#define BIT_GET_BTGP_GPIO_SL(x) \
- (((x) >> BIT_SHIFT_BTGP_GPIO_SL) & BIT_MASK_BTGP_GPIO_SL)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_PAD_SDIO_SR BIT(14)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_GPIO14_OUTPUT_PL BIT(13)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_HOST_WAKE_PAD_PULL_EN BIT(12)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_HOST_WAKE_PAD_SL BIT(11)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_PAD_LNAON_SR BIT(10)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_PAD_LNAON_E2 BIT(9)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_SW_LNAON_G_SEL_DATA BIT(8)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_SW_LNAON_A_SEL_DATA BIT(7)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_PAD_PAPE_SR BIT(6)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_PAD_PAPE_E2 BIT(5)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_SW_PAPE_G_SEL_DATA BIT(4)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_SW_PAPE_A_SEL_DATA BIT(3)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_PAD_DPDT_SR BIT(2)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_PAD_DPDT_PAD_E2 BIT(1)
-
-/* 2 REG_PAD_CTRL1 (Offset 0x0064) */
-
-#define BIT_SW_DPDT_SEL_DATA BIT(0)
-
-/* 2 REG_SDIO_C2H (Offset 0x10250064) */
-
-#define BIT_SHIFT_SDIO_C2H_MSG 0
-#define BIT_MASK_SDIO_C2H_MSG 0xffffffffL
-#define BIT_SDIO_C2H_MSG(x) \
- (((x) & BIT_MASK_SDIO_C2H_MSG) << BIT_SHIFT_SDIO_C2H_MSG)
-#define BIT_GET_SDIO_C2H_MSG(x) \
- (((x) >> BIT_SHIFT_SDIO_C2H_MSG) & BIT_MASK_SDIO_C2H_MSG)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_ISO_BD2PP BIT(31)
-#define BIT_LDOV12B_EN BIT(30)
-#define BIT_CKEN_BTGPS BIT(29)
-#define BIT_FEN_BTGPS BIT(28)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_MULRW BIT(27)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_BTCPU_BOOTSEL BIT(27)
-#define BIT_SPI_SPEEDUP BIT(26)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_DEVWAKE_PAD_TYPE_SEL BIT(24)
-#define BIT_CLKREQ_PAD_TYPE_SEL BIT(23)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_EN_CPL_TIMEOUT_PS BIT(22)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_ISO_BTPON2PP BIT(22)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_REG_TXDMA_FAIL_PS BIT(21)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_EN_HWENTR_L1 BIT(19)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_BT_HWROF_EN BIT(19)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_EN_ADV_CLKGATE BIT(18)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_BT_FUNC_EN BIT(18)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_BT_HWPDN_SL BIT(17)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_BT_DISN_EN BIT(16)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_BT_PDN_PULL_EN BIT(15)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_WL_PDN_PULL_EN BIT(14)
-#define BIT_EXTERNAL_REQUEST_PL BIT(13)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_GPIO0_2_3_PULL_LOW_EN BIT(12)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_ISO_BA2PP BIT(11)
-#define BIT_BT_AFE_LDO_EN BIT(10)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_BT_AFE_PLL_EN BIT(9)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_BT_DIG_CLK_EN BIT(8)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_WL_DRV_EXIST_IDX BIT(5)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_DOP_EHPAD BIT(4)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_WL_HWROF_EN BIT(3)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_WL_FUNC_EN BIT(2)
-
-/* 2 REG_WL_BT_PWR_CTRL (Offset 0x0068) */
-
-#define BIT_WL_HWPDN_SL BIT(1)
-#define BIT_WL_HWPDN_EN BIT(0)
-
-/* 2 REG_SDM_DEBUG (Offset 0x006C) */
-
-#define BIT_SHIFT_WLCLK_PHASE 0
-#define BIT_MASK_WLCLK_PHASE 0x1f
-#define BIT_WLCLK_PHASE(x) \
- (((x) & BIT_MASK_WLCLK_PHASE) << BIT_SHIFT_WLCLK_PHASE)
-#define BIT_GET_WLCLK_PHASE(x) \
- (((x) >> BIT_SHIFT_WLCLK_PHASE) & BIT_MASK_WLCLK_PHASE)
-
-/* 2 REG_SYS_SDIO_CTRL (Offset 0x0070) */
-
-#define BIT_DBG_GNT_WL_BT BIT(27)
-
-/* 2 REG_SYS_SDIO_CTRL (Offset 0x0070) */
-
-#define BIT_LTE_MUX_CTRL_PATH BIT(26)
-
-/* 2 REG_SYS_SDIO_CTRL (Offset 0x0070) */
-
-#define BIT_LTE_COEX_UART BIT(25)
-
-/* 2 REG_SYS_SDIO_CTRL (Offset 0x0070) */
-
-#define BIT_3W_LTE_WL_GPIO BIT(24)
-
-/* 2 REG_SYS_SDIO_CTRL (Offset 0x0070) */
-
-#define BIT_SDIO_INT_POLARITY BIT(19)
-#define BIT_SDIO_INT BIT(18)
-
-/* 2 REG_SYS_SDIO_CTRL (Offset 0x0070) */
-
-#define BIT_SDIO_OFF_EN BIT(17)
-
-/* 2 REG_SYS_SDIO_CTRL (Offset 0x0070) */
-
-#define BIT_SDIO_ON_EN BIT(16)
-
-/* 2 REG_SYS_SDIO_CTRL (Offset 0x0070) */
-
-#define BIT_PCIE_WAIT_TIMEOUT_EVENT BIT(10)
-#define BIT_PCIE_WAIT_TIME BIT(9)
-
-/* 2 REG_SYS_SDIO_CTRL (Offset 0x0070) */
-
-#define BIT_MPCIE_REFCLK_XTAL_SEL BIT(8)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_SHIFT_TSFT_SEL 29
-#define BIT_MASK_TSFT_SEL 0x7
-#define BIT_TSFT_SEL(x) (((x) & BIT_MASK_TSFT_SEL) << BIT_SHIFT_TSFT_SEL)
-#define BIT_GET_TSFT_SEL(x) (((x) >> BIT_SHIFT_TSFT_SEL) & BIT_MASK_TSFT_SEL)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_SHIFT_RPWM 24
-#define BIT_MASK_RPWM 0xff
-#define BIT_RPWM(x) (((x) & BIT_MASK_RPWM) << BIT_SHIFT_RPWM)
-#define BIT_GET_RPWM(x) (((x) >> BIT_SHIFT_RPWM) & BIT_MASK_RPWM)
-
-#define BIT_ROM_DLEN BIT(19)
-
-#define BIT_SHIFT_ROM_PGE 16
-#define BIT_MASK_ROM_PGE 0x7
-#define BIT_ROM_PGE(x) (((x) & BIT_MASK_ROM_PGE) << BIT_SHIFT_ROM_PGE)
-#define BIT_GET_ROM_PGE(x) (((x) >> BIT_SHIFT_ROM_PGE) & BIT_MASK_ROM_PGE)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_USB_HOST_PWR_OFF_EN BIT(12)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_SYM_LPS_BLOCK_EN BIT(11)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_USB_LPM_ACT_EN BIT(10)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_USB_LPM_NY BIT(9)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_USB_SUS_DIS BIT(8)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_SHIFT_SDIO_PAD_E 5
-#define BIT_MASK_SDIO_PAD_E 0x7
-#define BIT_SDIO_PAD_E(x) (((x) & BIT_MASK_SDIO_PAD_E) << BIT_SHIFT_SDIO_PAD_E)
-#define BIT_GET_SDIO_PAD_E(x) \
- (((x) >> BIT_SHIFT_SDIO_PAD_E) & BIT_MASK_SDIO_PAD_E)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_USB_LPPLL_EN BIT(4)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_ROP_SW15 BIT(2)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_PCI_CKRDY_OPT BIT(1)
-
-/* 2 REG_HCI_OPT_CTRL (Offset 0x0074) */
-
-#define BIT_PCI_VAUX_EN BIT(0)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_ZCD_HW_AUTO_EN BIT(27)
-#define BIT_ZCD_REGSEL BIT(26)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_SHIFT_AUTO_ZCD_IN_CODE 21
-#define BIT_MASK_AUTO_ZCD_IN_CODE 0x1f
-#define BIT_AUTO_ZCD_IN_CODE(x) \
- (((x) & BIT_MASK_AUTO_ZCD_IN_CODE) << BIT_SHIFT_AUTO_ZCD_IN_CODE)
-#define BIT_GET_AUTO_ZCD_IN_CODE(x) \
- (((x) >> BIT_SHIFT_AUTO_ZCD_IN_CODE) & BIT_MASK_AUTO_ZCD_IN_CODE)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_SHIFT_ZCD_CODE_IN_L 16
-#define BIT_MASK_ZCD_CODE_IN_L 0x1f
-#define BIT_ZCD_CODE_IN_L(x) \
- (((x) & BIT_MASK_ZCD_CODE_IN_L) << BIT_SHIFT_ZCD_CODE_IN_L)
-#define BIT_GET_ZCD_CODE_IN_L(x) \
- (((x) >> BIT_SHIFT_ZCD_CODE_IN_L) & BIT_MASK_ZCD_CODE_IN_L)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_SHIFT_LDO_HV5_DUMMY 14
-#define BIT_MASK_LDO_HV5_DUMMY 0x3
-#define BIT_LDO_HV5_DUMMY(x) \
- (((x) & BIT_MASK_LDO_HV5_DUMMY) << BIT_SHIFT_LDO_HV5_DUMMY)
-#define BIT_GET_LDO_HV5_DUMMY(x) \
- (((x) >> BIT_SHIFT_LDO_HV5_DUMMY) & BIT_MASK_LDO_HV5_DUMMY)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_SHIFT_REG_VTUNE33_BIT0_TO_BIT1 12
-#define BIT_MASK_REG_VTUNE33_BIT0_TO_BIT1 0x3
-#define BIT_REG_VTUNE33_BIT0_TO_BIT1(x) \
- (((x) & BIT_MASK_REG_VTUNE33_BIT0_TO_BIT1) \
- << BIT_SHIFT_REG_VTUNE33_BIT0_TO_BIT1)
-#define BIT_GET_REG_VTUNE33_BIT0_TO_BIT1(x) \
- (((x) >> BIT_SHIFT_REG_VTUNE33_BIT0_TO_BIT1) & \
- BIT_MASK_REG_VTUNE33_BIT0_TO_BIT1)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_SHIFT_REG_STANDBY33_BIT0_TO_BIT1 10
-#define BIT_MASK_REG_STANDBY33_BIT0_TO_BIT1 0x3
-#define BIT_REG_STANDBY33_BIT0_TO_BIT1(x) \
- (((x) & BIT_MASK_REG_STANDBY33_BIT0_TO_BIT1) \
- << BIT_SHIFT_REG_STANDBY33_BIT0_TO_BIT1)
-#define BIT_GET_REG_STANDBY33_BIT0_TO_BIT1(x) \
- (((x) >> BIT_SHIFT_REG_STANDBY33_BIT0_TO_BIT1) & \
- BIT_MASK_REG_STANDBY33_BIT0_TO_BIT1)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_SHIFT_REG_LOAD33_BIT0_TO_BIT1 8
-#define BIT_MASK_REG_LOAD33_BIT0_TO_BIT1 0x3
-#define BIT_REG_LOAD33_BIT0_TO_BIT1(x) \
- (((x) & BIT_MASK_REG_LOAD33_BIT0_TO_BIT1) \
- << BIT_SHIFT_REG_LOAD33_BIT0_TO_BIT1)
-#define BIT_GET_REG_LOAD33_BIT0_TO_BIT1(x) \
- (((x) >> BIT_SHIFT_REG_LOAD33_BIT0_TO_BIT1) & \
- BIT_MASK_REG_LOAD33_BIT0_TO_BIT1)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_REG_BYPASS_L BIT(7)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_REG_LDOF_L BIT(6)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_REG_TYPE_L_V1 BIT(5)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_ARENB_L BIT(3)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_SHIFT_CFC_L 1
-#define BIT_MASK_CFC_L 0x3
-#define BIT_CFC_L(x) (((x) & BIT_MASK_CFC_L) << BIT_SHIFT_CFC_L)
-#define BIT_GET_CFC_L(x) (((x) >> BIT_SHIFT_CFC_L) & BIT_MASK_CFC_L)
-
-/* 2 REG_LDO_SWR_CTRL (Offset 0x007C) */
-
-#define BIT_REG_OCPS_L_V1 BIT(0)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_ANA_PORT_EN BIT(22)
-#define BIT_MAC_PORT_EN BIT(21)
-#define BIT_BOOT_FSPI_EN BIT(20)
-#define BIT_FW_INIT_RDY BIT(15)
-#define BIT_FW_DW_RDY BIT(14)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_SHIFT_CPU_CLK_SEL 12
-#define BIT_MASK_CPU_CLK_SEL 0x3
-#define BIT_CPU_CLK_SEL(x) \
- (((x) & BIT_MASK_CPU_CLK_SEL) << BIT_SHIFT_CPU_CLK_SEL)
-#define BIT_GET_CPU_CLK_SEL(x) \
- (((x) >> BIT_SHIFT_CPU_CLK_SEL) & BIT_MASK_CPU_CLK_SEL)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_CCLK_CHG_MASK BIT(11)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_EMEM__TXBUF_CHKSUM_OK BIT(10)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_EMEM_TXBUF_DW_RDY BIT(9)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_EMEM_CHKSUM_OK BIT(8)
-#define BIT_EMEM_DW_OK BIT(7)
-#define BIT_TOGGLING BIT(7)
-#define BIT_DMEM_CHKSUM_OK BIT(6)
-#define BIT_ACK BIT(6)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_DMEM_DW_OK BIT(5)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_IMEM_CHKSUM_OK BIT(4)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_IMEM_DW_OK BIT(3)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_IMEM_BOOT_LOAD_CHKSUM_OK BIT(2)
-
-/* 2 REG_MCUFW_CTRL (Offset 0x0080) */
-
-#define BIT_IMEM_BOOT_LOAD_DW_OK BIT(1)
-
-/* 2 REG_SDIO_HRPWM1 (Offset 0x10250080) */
-
-#define BIT_32K_PERMISSION BIT(0)
-
-/* 2 REG_MCU_TST_CFG (Offset 0x0084) */
-
-#define BIT_SHIFT_LBKTST 0
-#define BIT_MASK_LBKTST 0xffff
-#define BIT_LBKTST(x) (((x) & BIT_MASK_LBKTST) << BIT_SHIFT_LBKTST)
-#define BIT_GET_LBKTST(x) (((x) >> BIT_SHIFT_LBKTST) & BIT_MASK_LBKTST)
-
-/* 2 REG_SDIO_BUS_CTRL (Offset 0x10250085) */
-
-#define BIT_PAD_CLK_XHGE_EN BIT(3)
-#define BIT_INTER_CLK_EN BIT(2)
-#define BIT_EN_RPT_TXCRC BIT(1)
-#define BIT_DIS_RXDMA_STS BIT(0)
-
-/* 2 REG_SDIO_HSUS_CTRL (Offset 0x10250086) */
-
-#define BIT_INTR_CTRL BIT(4)
-#define BIT_SDIO_VOLTAGE BIT(3)
-#define BIT_BYPASS_INIT BIT(2)
-
-/* 2 REG_SDIO_HSUS_CTRL (Offset 0x10250086) */
-
-#define BIT_HCI_RESUME_RDY BIT(1)
-#define BIT_HCI_SUS_REQ BIT(0)
-
-/* 2 REG_HMEBOX_E0_E1 (Offset 0x0088) */
-
-#define BIT_SHIFT_HOST_MSG_E1 16
-#define BIT_MASK_HOST_MSG_E1 0xffff
-#define BIT_HOST_MSG_E1(x) \
- (((x) & BIT_MASK_HOST_MSG_E1) << BIT_SHIFT_HOST_MSG_E1)
-#define BIT_GET_HOST_MSG_E1(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_E1) & BIT_MASK_HOST_MSG_E1)
-
-#define BIT_SHIFT_HOST_MSG_E0 0
-#define BIT_MASK_HOST_MSG_E0 0xffff
-#define BIT_HOST_MSG_E0(x) \
- (((x) & BIT_MASK_HOST_MSG_E0) << BIT_SHIFT_HOST_MSG_E0)
-#define BIT_GET_HOST_MSG_E0(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_E0) & BIT_MASK_HOST_MSG_E0)
-
-/* 2 REG_SDIO_RESPONSE_TIMER (Offset 0x10250088) */
-
-#define BIT_SHIFT_CMDIN_2RESP_TIMER 0
-#define BIT_MASK_CMDIN_2RESP_TIMER 0xffff
-#define BIT_CMDIN_2RESP_TIMER(x) \
- (((x) & BIT_MASK_CMDIN_2RESP_TIMER) << BIT_SHIFT_CMDIN_2RESP_TIMER)
-#define BIT_GET_CMDIN_2RESP_TIMER(x) \
- (((x) >> BIT_SHIFT_CMDIN_2RESP_TIMER) & BIT_MASK_CMDIN_2RESP_TIMER)
-
-/* 2 REG_SDIO_CMD_CRC (Offset 0x1025008A) */
-
-#define BIT_SHIFT_SDIO_CMD_CRC_V1 0
-#define BIT_MASK_SDIO_CMD_CRC_V1 0xff
-#define BIT_SDIO_CMD_CRC_V1(x) \
- (((x) & BIT_MASK_SDIO_CMD_CRC_V1) << BIT_SHIFT_SDIO_CMD_CRC_V1)
-#define BIT_GET_SDIO_CMD_CRC_V1(x) \
- (((x) >> BIT_SHIFT_SDIO_CMD_CRC_V1) & BIT_MASK_SDIO_CMD_CRC_V1)
-
-/* 2 REG_HMEBOX_E2_E3 (Offset 0x008C) */
-
-#define BIT_SHIFT_HOST_MSG_E3 16
-#define BIT_MASK_HOST_MSG_E3 0xffff
-#define BIT_HOST_MSG_E3(x) \
- (((x) & BIT_MASK_HOST_MSG_E3) << BIT_SHIFT_HOST_MSG_E3)
-#define BIT_GET_HOST_MSG_E3(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_E3) & BIT_MASK_HOST_MSG_E3)
-
-#define BIT_SHIFT_HOST_MSG_E2 0
-#define BIT_MASK_HOST_MSG_E2 0xffff
-#define BIT_HOST_MSG_E2(x) \
- (((x) & BIT_MASK_HOST_MSG_E2) << BIT_SHIFT_HOST_MSG_E2)
-#define BIT_GET_HOST_MSG_E2(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_E2) & BIT_MASK_HOST_MSG_E2)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WLLPSOP_EABM BIT(31)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WLLPSOP_ACKF BIT(30)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WLLPSOP_DLDM BIT(29)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WLLPSOP_ESWR BIT(28)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WLLPSOP_PWMM BIT(27)
-#define BIT_WLLPSOP_EECK BIT(26)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WLLPSOP_WLMACOFF BIT(25)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WLLPSOP_EXTAL BIT(24)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WL_SYNPON_VOLTSPDN BIT(23)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WLLPSOP_WLBBOFF BIT(22)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WLLPSOP_WLMEM_DS BIT(21)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_SHIFT_LPLDH12_VADJ_STEP_DN 12
-#define BIT_MASK_LPLDH12_VADJ_STEP_DN 0xf
-#define BIT_LPLDH12_VADJ_STEP_DN(x) \
- (((x) & BIT_MASK_LPLDH12_VADJ_STEP_DN) \
- << BIT_SHIFT_LPLDH12_VADJ_STEP_DN)
-#define BIT_GET_LPLDH12_VADJ_STEP_DN(x) \
- (((x) >> BIT_SHIFT_LPLDH12_VADJ_STEP_DN) & \
- BIT_MASK_LPLDH12_VADJ_STEP_DN)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_SHIFT_V15ADJ_L1_STEP_DN 8
-#define BIT_MASK_V15ADJ_L1_STEP_DN 0x7
-#define BIT_V15ADJ_L1_STEP_DN(x) \
- (((x) & BIT_MASK_V15ADJ_L1_STEP_DN) << BIT_SHIFT_V15ADJ_L1_STEP_DN)
-#define BIT_GET_V15ADJ_L1_STEP_DN(x) \
- (((x) >> BIT_SHIFT_V15ADJ_L1_STEP_DN) & BIT_MASK_V15ADJ_L1_STEP_DN)
-
-#define BIT_REGU_32K_CLK_EN BIT(1)
-#define BIT_DRV_WLAN_INT_CLR BIT(1)
-
-/* 2 REG_WLLPS_CTRL (Offset 0x0090) */
-
-#define BIT_WL_LPS_EN BIT(0)
-
-/* 2 REG_SDIO_HSISR (Offset 0x10250090) */
-
-#define BIT_DRV_WLAN_INT BIT(0)
-
-/* 2 REG_SDIO_HSIMR (Offset 0x10250091) */
-
-#define BIT_HISR_MASK BIT(0)
-
-/* 2 REG_AFE_CTRL5 (Offset 0x0094) */
-
-#define BIT_BB_DBG_SEL_AFE_SDM_BIT0 BIT(31)
-
-/* 2 REG_AFE_CTRL5 (Offset 0x0094) */
-
-#define BIT_ORDER_SDM BIT(30)
-#define BIT_RFE_SEL_SDM BIT(29)
-
-#define BIT_SHIFT_REF_SEL 25
-#define BIT_MASK_REF_SEL 0xf
-#define BIT_REF_SEL(x) (((x) & BIT_MASK_REF_SEL) << BIT_SHIFT_REF_SEL)
-#define BIT_GET_REF_SEL(x) (((x) >> BIT_SHIFT_REF_SEL) & BIT_MASK_REF_SEL)
-
-/* 2 REG_AFE_CTRL5 (Offset 0x0094) */
-
-#define BIT_SHIFT_F0F_SDM 12
-#define BIT_MASK_F0F_SDM 0x1fff
-#define BIT_F0F_SDM(x) (((x) & BIT_MASK_F0F_SDM) << BIT_SHIFT_F0F_SDM)
-#define BIT_GET_F0F_SDM(x) (((x) >> BIT_SHIFT_F0F_SDM) & BIT_MASK_F0F_SDM)
-
-/* 2 REG_AFE_CTRL5 (Offset 0x0094) */
-
-#define BIT_SHIFT_F0N_SDM 9
-#define BIT_MASK_F0N_SDM 0x7
-#define BIT_F0N_SDM(x) (((x) & BIT_MASK_F0N_SDM) << BIT_SHIFT_F0N_SDM)
-#define BIT_GET_F0N_SDM(x) (((x) >> BIT_SHIFT_F0N_SDM) & BIT_MASK_F0N_SDM)
-
-/* 2 REG_AFE_CTRL5 (Offset 0x0094) */
-
-#define BIT_SHIFT_DIVN_SDM 3
-#define BIT_MASK_DIVN_SDM 0x3f
-#define BIT_DIVN_SDM(x) (((x) & BIT_MASK_DIVN_SDM) << BIT_SHIFT_DIVN_SDM)
-#define BIT_GET_DIVN_SDM(x) (((x) >> BIT_SHIFT_DIVN_SDM) & BIT_MASK_DIVN_SDM)
-
-/* 2 REG_GPIO_DEBOUNCE_CTRL (Offset 0x0098) */
-
-#define BIT_WLGP_DBC1EN BIT(15)
-
-#define BIT_SHIFT_WLGP_DBC1 8
-#define BIT_MASK_WLGP_DBC1 0xf
-#define BIT_WLGP_DBC1(x) (((x) & BIT_MASK_WLGP_DBC1) << BIT_SHIFT_WLGP_DBC1)
-#define BIT_GET_WLGP_DBC1(x) (((x) >> BIT_SHIFT_WLGP_DBC1) & BIT_MASK_WLGP_DBC1)
-
-#define BIT_WLGP_DBC0EN BIT(7)
-
-#define BIT_SHIFT_WLGP_DBC0 0
-#define BIT_MASK_WLGP_DBC0 0xf
-#define BIT_WLGP_DBC0(x) (((x) & BIT_MASK_WLGP_DBC0) << BIT_SHIFT_WLGP_DBC0)
-#define BIT_GET_WLGP_DBC0(x) (((x) >> BIT_SHIFT_WLGP_DBC0) & BIT_MASK_WLGP_DBC0)
-
-/* 2 REG_RPWM2 (Offset 0x009C) */
-
-#define BIT_SHIFT_RPWM2 16
-#define BIT_MASK_RPWM2 0xffff
-#define BIT_RPWM2(x) (((x) & BIT_MASK_RPWM2) << BIT_SHIFT_RPWM2)
-#define BIT_GET_RPWM2(x) (((x) >> BIT_SHIFT_RPWM2) & BIT_MASK_RPWM2)
-
-/* 2 REG_SYSON_FSM_MON (Offset 0x00A0) */
-
-#define BIT_SHIFT_FSM_MON_SEL 24
-#define BIT_MASK_FSM_MON_SEL 0x7
-#define BIT_FSM_MON_SEL(x) \
- (((x) & BIT_MASK_FSM_MON_SEL) << BIT_SHIFT_FSM_MON_SEL)
-#define BIT_GET_FSM_MON_SEL(x) \
- (((x) >> BIT_SHIFT_FSM_MON_SEL) & BIT_MASK_FSM_MON_SEL)
-
-#define BIT_DOP_ELDO BIT(23)
-#define BIT_FSM_MON_UPD BIT(15)
-
-#define BIT_SHIFT_FSM_PAR 0
-#define BIT_MASK_FSM_PAR 0x7fff
-#define BIT_FSM_PAR(x) (((x) & BIT_MASK_FSM_PAR) << BIT_SHIFT_FSM_PAR)
-#define BIT_GET_FSM_PAR(x) (((x) >> BIT_SHIFT_FSM_PAR) & BIT_MASK_FSM_PAR)
-
-/* 2 REG_AFE_CTRL6 (Offset 0x00A4) */
-
-#define BIT_SHIFT_BB_DBG_SEL_AFE_SDM_BIT3_1 0
-#define BIT_MASK_BB_DBG_SEL_AFE_SDM_BIT3_1 0x7
-#define BIT_BB_DBG_SEL_AFE_SDM_BIT3_1(x) \
- (((x) & BIT_MASK_BB_DBG_SEL_AFE_SDM_BIT3_1) \
- << BIT_SHIFT_BB_DBG_SEL_AFE_SDM_BIT3_1)
-#define BIT_GET_BB_DBG_SEL_AFE_SDM_BIT3_1(x) \
- (((x) >> BIT_SHIFT_BB_DBG_SEL_AFE_SDM_BIT3_1) & \
- BIT_MASK_BB_DBG_SEL_AFE_SDM_BIT3_1)
-
-/* 2 REG_PMC_DBG_CTRL1 (Offset 0x00A8) */
-
-#define BIT_BT_INT_EN BIT(31)
-
-#define BIT_SHIFT_RD_WR_WIFI_BT_INFO 16
-#define BIT_MASK_RD_WR_WIFI_BT_INFO 0x7fff
-#define BIT_RD_WR_WIFI_BT_INFO(x) \
- (((x) & BIT_MASK_RD_WR_WIFI_BT_INFO) << BIT_SHIFT_RD_WR_WIFI_BT_INFO)
-#define BIT_GET_RD_WR_WIFI_BT_INFO(x) \
- (((x) >> BIT_SHIFT_RD_WR_WIFI_BT_INFO) & BIT_MASK_RD_WR_WIFI_BT_INFO)
-
-/* 2 REG_PMC_DBG_CTRL1 (Offset 0x00A8) */
-
-#define BIT_PMC_WR_OVF BIT(8)
-
-#define BIT_SHIFT_WLPMC_ERRINT 0
-#define BIT_MASK_WLPMC_ERRINT 0xff
-#define BIT_WLPMC_ERRINT(x) \
- (((x) & BIT_MASK_WLPMC_ERRINT) << BIT_SHIFT_WLPMC_ERRINT)
-#define BIT_GET_WLPMC_ERRINT(x) \
- (((x) >> BIT_SHIFT_WLPMC_ERRINT) & BIT_MASK_WLPMC_ERRINT)
-
-/* 2 REG_AFE_CTRL7 (Offset 0x00AC) */
-
-#define BIT_SHIFT_SEL_V 30
-#define BIT_MASK_SEL_V 0x3
-#define BIT_SEL_V(x) (((x) & BIT_MASK_SEL_V) << BIT_SHIFT_SEL_V)
-#define BIT_GET_SEL_V(x) (((x) >> BIT_SHIFT_SEL_V) & BIT_MASK_SEL_V)
-
-/* 2 REG_AFE_CTRL7 (Offset 0x00AC) */
-
-#define BIT_TXFIFO_TH_INT BIT(30)
-
-/* 2 REG_AFE_CTRL7 (Offset 0x00AC) */
-
-#define BIT_SEL_LDO_PC BIT(29)
-
-/* 2 REG_AFE_CTRL7 (Offset 0x00AC) */
-
-#define BIT_SHIFT_CK_MON_SEL 26
-#define BIT_MASK_CK_MON_SEL 0x7
-#define BIT_CK_MON_SEL(x) (((x) & BIT_MASK_CK_MON_SEL) << BIT_SHIFT_CK_MON_SEL)
-#define BIT_GET_CK_MON_SEL(x) \
- (((x) >> BIT_SHIFT_CK_MON_SEL) & BIT_MASK_CK_MON_SEL)
-
-/* 2 REG_AFE_CTRL7 (Offset 0x00AC) */
-
-#define BIT_CK_MON_EN BIT(25)
-#define BIT_FREF_EDGE BIT(24)
-#define BIT_CK320M_EN BIT(23)
-#define BIT_CK_5M_EN BIT(22)
-#define BIT_TESTEN BIT(21)
-
-/* 2 REG_HIMR0 (Offset 0x00B0) */
-
-#define BIT_TIMEOUT_INTERRUPT2_MASK BIT(31)
-#define BIT_TIMEOUT_INTERRUTP1_MASK BIT(30)
-#define BIT_PSTIMEOUT_MSK BIT(29)
-#define BIT_GTINT4_MSK BIT(28)
-#define BIT_GTINT3_MSK BIT(27)
-#define BIT_TXBCN0ERR_MSK BIT(26)
-#define BIT_TXBCN0OK_MSK BIT(25)
-#define BIT_TSF_BIT32_TOGGLE_MSK BIT(24)
-#define BIT_BCNDMAINT0_MSK BIT(20)
-#define BIT_BCNDERR0_MSK BIT(16)
-#define BIT_HSISR_IND_ON_INT_MSK BIT(15)
-
-/* 2 REG_HIMR0 (Offset 0x00B0) */
-
-#define BIT_BCNDMAINT_E_MSK BIT(14)
-
-/* 2 REG_HIMR0 (Offset 0x00B0) */
-
-#define BIT_CTWEND_MSK BIT(12)
-#define BIT_HISR1_IND_MSK BIT(11)
-
-/* 2 REG_HIMR0 (Offset 0x00B0) */
-
-#define BIT_C2HCMD_MSK BIT(10)
-#define BIT_CPWM2_MSK BIT(9)
-#define BIT_CPWM_MSK BIT(8)
-#define BIT_HIGHDOK_MSK BIT(7)
-#define BIT_MGTDOK_MSK BIT(6)
-#define BIT_BKDOK_MSK BIT(5)
-#define BIT_BEDOK_MSK BIT(4)
-#define BIT_VIDOK_MSK BIT(3)
-#define BIT_VODOK_MSK BIT(2)
-#define BIT_RDU_MSK BIT(1)
-#define BIT_RXOK_MSK BIT(0)
-
-/* 2 REG_HISR0 (Offset 0x00B4) */
-
-#define BIT_TIMEOUT_INTERRUPT2 BIT(31)
-
-/* 2 REG_HISR0 (Offset 0x00B4) */
-
-#define BIT_TIMEOUT_INTERRUTP1 BIT(30)
-
-/* 2 REG_HISR0 (Offset 0x00B4) */
-
-#define BIT_PSTIMEOUT BIT(29)
-#define BIT_GTINT4 BIT(28)
-#define BIT_GTINT3 BIT(27)
-#define BIT_TXBCN0ERR BIT(26)
-#define BIT_TXBCN0OK BIT(25)
-#define BIT_TSF_BIT32_TOGGLE BIT(24)
-#define BIT_BCNDMAINT0 BIT(20)
-#define BIT_BCNDERR0 BIT(16)
-#define BIT_HSISR_IND_ON_INT BIT(15)
-
-/* 2 REG_HISR0 (Offset 0x00B4) */
-
-#define BIT_BCNDMAINT_E BIT(14)
-
-/* 2 REG_HISR0 (Offset 0x00B4) */
-
-#define BIT_CTWEND BIT(12)
-
-/* 2 REG_HISR0 (Offset 0x00B4) */
-
-#define BIT_HISR1_IND_INT BIT(11)
-#define BIT_C2HCMD BIT(10)
-#define BIT_CPWM2 BIT(9)
-#define BIT_CPWM BIT(8)
-#define BIT_HIGHDOK BIT(7)
-#define BIT_MGTDOK BIT(6)
-#define BIT_BKDOK BIT(5)
-#define BIT_BEDOK BIT(4)
-#define BIT_VIDOK BIT(3)
-#define BIT_VODOK BIT(2)
-#define BIT_RDU BIT(1)
-#define BIT_RXOK BIT(0)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_BTON_STS_UPDATE_MASK BIT(29)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_MCU_ERR_MASK BIT(28)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_BCNDMAINT7__MSK BIT(27)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_BCNDMAINT6__MSK BIT(26)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_BCNDMAINT5__MSK BIT(25)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_BCNDMAINT4__MSK BIT(24)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_BCNDMAINT3_MSK BIT(23)
-#define BIT_BCNDMAINT2_MSK BIT(22)
-#define BIT_BCNDMAINT1_MSK BIT(21)
-#define BIT_BCNDERR7_MSK BIT(20)
-#define BIT_BCNDERR6_MSK BIT(19)
-#define BIT_BCNDERR5_MSK BIT(18)
-#define BIT_BCNDERR4_MSK BIT(17)
-#define BIT_BCNDERR3_MSK BIT(16)
-#define BIT_BCNDERR2_MSK BIT(15)
-#define BIT_BCNDERR1_MSK BIT(14)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_ATIMEND_E_MSK BIT(13)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_ATIMEND__MSK BIT(12)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_TXERR_MSK BIT(11)
-#define BIT_RXERR_MSK BIT(10)
-#define BIT_TXFOVW_MSK BIT(9)
-#define BIT_FOVW_MSK BIT(8)
-
-/* 2 REG_HIMR1 (Offset 0x00B8) */
-
-#define BIT_CPU_MGQ_TXDONE_MSK BIT(5)
-#define BIT_PS_TIMER_C_MSK BIT(4)
-#define BIT_PS_TIMER_B_MSK BIT(3)
-#define BIT_PS_TIMER_A_MSK BIT(2)
-#define BIT_CPUMGQ_TX_TIMER_MSK BIT(1)
-
-/* 2 REG_HISR1 (Offset 0x00BC) */
-
-#define BIT_BTON_STS_UPDATE_INT BIT(29)
-
-/* 2 REG_HISR1 (Offset 0x00BC) */
-
-#define BIT_MCU_ERR BIT(28)
-
-/* 2 REG_HISR1 (Offset 0x00BC) */
-
-#define BIT_BCNDMAINT7 BIT(27)
-#define BIT_BCNDMAINT6 BIT(26)
-#define BIT_BCNDMAINT5 BIT(25)
-#define BIT_BCNDMAINT4 BIT(24)
-#define BIT_BCNDMAINT3 BIT(23)
-#define BIT_BCNDMAINT2 BIT(22)
-#define BIT_BCNDMAINT1 BIT(21)
-#define BIT_BCNDERR7 BIT(20)
-#define BIT_BCNDERR6 BIT(19)
-#define BIT_BCNDERR5 BIT(18)
-#define BIT_BCNDERR4 BIT(17)
-#define BIT_BCNDERR3 BIT(16)
-#define BIT_BCNDERR2 BIT(15)
-#define BIT_BCNDERR1 BIT(14)
-
-/* 2 REG_HISR1 (Offset 0x00BC) */
-
-#define BIT_ATIMEND_E BIT(13)
-
-/* 2 REG_HISR1 (Offset 0x00BC) */
-
-#define BIT_ATIMEND BIT(12)
-#define BIT_TXERR_INT BIT(11)
-#define BIT_RXERR_INT BIT(10)
-#define BIT_TXFOVW BIT(9)
-#define BIT_FOVW BIT(8)
-
-/* 2 REG_HISR1 (Offset 0x00BC) */
-
-#define BIT_CPU_MGQ_TXDONE BIT(5)
-#define BIT_PS_TIMER_C BIT(4)
-#define BIT_PS_TIMER_B BIT(3)
-#define BIT_PS_TIMER_A BIT(2)
-#define BIT_CPUMGQ_TX_TIMER BIT(1)
-
-/* 2 REG_SDIO_ERR_RPT (Offset 0x102500C0) */
-
-#define BIT_HR_FF_OVF BIT(6)
-#define BIT_HR_FF_UDN BIT(5)
-#define BIT_TXDMA_BUSY_ERR BIT(4)
-#define BIT_TXDMA_VLD_ERR BIT(3)
-#define BIT_QSEL_UNKNOWN_ERR BIT(2)
-#define BIT_QSEL_MIS_ERR BIT(1)
-
-/* 2 REG_DBG_PORT_SEL (Offset 0x00C0) */
-
-#define BIT_SHIFT_DEBUG_ST 0
-#define BIT_MASK_DEBUG_ST 0xffffffffL
-#define BIT_DEBUG_ST(x) (((x) & BIT_MASK_DEBUG_ST) << BIT_SHIFT_DEBUG_ST)
-#define BIT_GET_DEBUG_ST(x) (((x) >> BIT_SHIFT_DEBUG_ST) & BIT_MASK_DEBUG_ST)
-
-/* 2 REG_SDIO_ERR_RPT (Offset 0x102500C0) */
-
-#define BIT_SDIO_OVERRD_ERR BIT(0)
-
-/* 2 REG_SDIO_CMD_ERRCNT (Offset 0x102500C1) */
-
-#define BIT_SHIFT_CMD_CRC_ERR_CNT 0
-#define BIT_MASK_CMD_CRC_ERR_CNT 0xff
-#define BIT_CMD_CRC_ERR_CNT(x) \
- (((x) & BIT_MASK_CMD_CRC_ERR_CNT) << BIT_SHIFT_CMD_CRC_ERR_CNT)
-#define BIT_GET_CMD_CRC_ERR_CNT(x) \
- (((x) >> BIT_SHIFT_CMD_CRC_ERR_CNT) & BIT_MASK_CMD_CRC_ERR_CNT)
-
-/* 2 REG_SDIO_DATA_ERRCNT (Offset 0x102500C2) */
-
-#define BIT_SHIFT_DATA_CRC_ERR_CNT 0
-#define BIT_MASK_DATA_CRC_ERR_CNT 0xff
-#define BIT_DATA_CRC_ERR_CNT(x) \
- (((x) & BIT_MASK_DATA_CRC_ERR_CNT) << BIT_SHIFT_DATA_CRC_ERR_CNT)
-#define BIT_GET_DATA_CRC_ERR_CNT(x) \
- (((x) >> BIT_SHIFT_DATA_CRC_ERR_CNT) & BIT_MASK_DATA_CRC_ERR_CNT)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_USB3_USB2_TRANSITION BIT(20)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_SHIFT_USB23_SW_MODE_V1 18
-#define BIT_MASK_USB23_SW_MODE_V1 0x3
-#define BIT_USB23_SW_MODE_V1(x) \
- (((x) & BIT_MASK_USB23_SW_MODE_V1) << BIT_SHIFT_USB23_SW_MODE_V1)
-#define BIT_GET_USB23_SW_MODE_V1(x) \
- (((x) >> BIT_SHIFT_USB23_SW_MODE_V1) & BIT_MASK_USB23_SW_MODE_V1)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_NO_PDN_CHIPOFF_V1 BIT(17)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_RSM_EN_V1 BIT(16)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_LD_B12V_EN BIT(7)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_EECS_IOSEL_V1 BIT(6)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_EECS_DATA_O_V1 BIT(5)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_EECS_DATA_I_V1 BIT(4)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_EESK_IOSEL_V1 BIT(2)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_EESK_DATA_O_V1 BIT(1)
-
-/* 2 REG_PAD_CTRL2 (Offset 0x00C4) */
-
-#define BIT_EESK_DATA_I_V1 BIT(0)
-
-/* 2 REG_SDIO_CMD_ERR_CONTENT (Offset 0x102500C4) */
-
-#define BIT_SHIFT_SDIO_CMD_ERR_CONTENT 0
-#define BIT_MASK_SDIO_CMD_ERR_CONTENT 0xffffffffffL
-#define BIT_SDIO_CMD_ERR_CONTENT(x) \
- (((x) & BIT_MASK_SDIO_CMD_ERR_CONTENT) \
- << BIT_SHIFT_SDIO_CMD_ERR_CONTENT)
-#define BIT_GET_SDIO_CMD_ERR_CONTENT(x) \
- (((x) >> BIT_SHIFT_SDIO_CMD_ERR_CONTENT) & \
- BIT_MASK_SDIO_CMD_ERR_CONTENT)
-
-/* 2 REG_SDIO_CRC_ERR_IDX (Offset 0x102500C9) */
-
-#define BIT_D3_CRC_ERR BIT(4)
-#define BIT_D2_CRC_ERR BIT(3)
-#define BIT_D1_CRC_ERR BIT(2)
-#define BIT_D0_CRC_ERR BIT(1)
-#define BIT_CMD_CRC_ERR BIT(0)
-
-/* 2 REG_SDIO_DATA_CRC (Offset 0x102500CA) */
-
-#define BIT_SHIFT_SDIO_DATA_CRC 0
-#define BIT_MASK_SDIO_DATA_CRC 0xff
-#define BIT_SDIO_DATA_CRC(x) \
- (((x) & BIT_MASK_SDIO_DATA_CRC) << BIT_SHIFT_SDIO_DATA_CRC)
-#define BIT_GET_SDIO_DATA_CRC(x) \
- (((x) >> BIT_SHIFT_SDIO_DATA_CRC) & BIT_MASK_SDIO_DATA_CRC)
-
-/* 2 REG_SDIO_DATA_REPLY_TIME (Offset 0x102500CB) */
-
-#define BIT_SHIFT_SDIO_DATA_REPLY_TIME 0
-#define BIT_MASK_SDIO_DATA_REPLY_TIME 0x7
-#define BIT_SDIO_DATA_REPLY_TIME(x) \
- (((x) & BIT_MASK_SDIO_DATA_REPLY_TIME) \
- << BIT_SHIFT_SDIO_DATA_REPLY_TIME)
-#define BIT_GET_SDIO_DATA_REPLY_TIME(x) \
- (((x) >> BIT_SHIFT_SDIO_DATA_REPLY_TIME) & \
- BIT_MASK_SDIO_DATA_REPLY_TIME)
-
-/* 2 REG_PMC_DBG_CTRL2 (Offset 0x00CC) */
-
-#define BIT_SHIFT_EFUSE_BURN_GNT 24
-#define BIT_MASK_EFUSE_BURN_GNT 0xff
-#define BIT_EFUSE_BURN_GNT(x) \
- (((x) & BIT_MASK_EFUSE_BURN_GNT) << BIT_SHIFT_EFUSE_BURN_GNT)
-#define BIT_GET_EFUSE_BURN_GNT(x) \
- (((x) >> BIT_SHIFT_EFUSE_BURN_GNT) & BIT_MASK_EFUSE_BURN_GNT)
-
-/* 2 REG_PMC_DBG_CTRL2 (Offset 0x00CC) */
-
-#define BIT_STOP_WL_PMC BIT(9)
-#define BIT_STOP_SYM_PMC BIT(8)
-
-/* 2 REG_PMC_DBG_CTRL2 (Offset 0x00CC) */
-
-#define BIT_REG_RST_WLPMC BIT(5)
-#define BIT_REG_RST_PD12N BIT(4)
-#define BIT_SYSON_DIS_WLREG_WRMSK BIT(3)
-#define BIT_SYSON_DIS_PMCREG_WRMSK BIT(2)
-
-#define BIT_SHIFT_SYSON_REG_ARB 0
-#define BIT_MASK_SYSON_REG_ARB 0x3
-#define BIT_SYSON_REG_ARB(x) \
- (((x) & BIT_MASK_SYSON_REG_ARB) << BIT_SHIFT_SYSON_REG_ARB)
-#define BIT_GET_SYSON_REG_ARB(x) \
- (((x) >> BIT_SHIFT_SYSON_REG_ARB) & BIT_MASK_SYSON_REG_ARB)
-
-/* 2 REG_BIST_CTRL (Offset 0x00D0) */
-
-#define BIT_BIST_USB_DIS BIT(27)
-
-/* 2 REG_BIST_CTRL (Offset 0x00D0) */
-
-#define BIT_BIST_PCI_DIS BIT(26)
-
-/* 2 REG_BIST_CTRL (Offset 0x00D0) */
-
-#define BIT_BIST_BT_DIS BIT(25)
-
-/* 2 REG_BIST_CTRL (Offset 0x00D0) */
-
-#define BIT_BIST_WL_DIS BIT(24)
-
-/* 2 REG_BIST_CTRL (Offset 0x00D0) */
-
-#define BIT_SHIFT_BIST_RPT_SEL 16
-#define BIT_MASK_BIST_RPT_SEL 0xf
-#define BIT_BIST_RPT_SEL(x) \
- (((x) & BIT_MASK_BIST_RPT_SEL) << BIT_SHIFT_BIST_RPT_SEL)
-#define BIT_GET_BIST_RPT_SEL(x) \
- (((x) >> BIT_SHIFT_BIST_RPT_SEL) & BIT_MASK_BIST_RPT_SEL)
-
-/* 2 REG_BIST_CTRL (Offset 0x00D0) */
-
-#define BIT_BIST_RESUME_PS BIT(4)
-
-/* 2 REG_BIST_CTRL (Offset 0x00D0) */
-
-#define BIT_BIST_RESUME BIT(3)
-#define BIT_BIST_NORMAL BIT(2)
-
-/* 2 REG_BIST_CTRL (Offset 0x00D0) */
-
-#define BIT_BIST_RSTN BIT(1)
-#define BIT_BIST_CLK_EN BIT(0)
-
-/* 2 REG_BIST_RPT (Offset 0x00D4) */
-
-#define BIT_SHIFT_MBIST_REPORT 0
-#define BIT_MASK_MBIST_REPORT 0xffffffffL
-#define BIT_MBIST_REPORT(x) \
- (((x) & BIT_MASK_MBIST_REPORT) << BIT_SHIFT_MBIST_REPORT)
-#define BIT_GET_MBIST_REPORT(x) \
- (((x) >> BIT_SHIFT_MBIST_REPORT) & BIT_MASK_MBIST_REPORT)
-
-/* 2 REG_MEM_CTRL (Offset 0x00D8) */
-
-#define BIT_UMEM_RME BIT(31)
-
-/* 2 REG_MEM_CTRL (Offset 0x00D8) */
-
-#define BIT_SHIFT_BT_SPRAM 28
-#define BIT_MASK_BT_SPRAM 0x3
-#define BIT_BT_SPRAM(x) (((x) & BIT_MASK_BT_SPRAM) << BIT_SHIFT_BT_SPRAM)
-#define BIT_GET_BT_SPRAM(x) (((x) >> BIT_SHIFT_BT_SPRAM) & BIT_MASK_BT_SPRAM)
-
-/* 2 REG_MEM_CTRL (Offset 0x00D8) */
-
-#define BIT_SHIFT_BT_ROM 24
-#define BIT_MASK_BT_ROM 0xf
-#define BIT_BT_ROM(x) (((x) & BIT_MASK_BT_ROM) << BIT_SHIFT_BT_ROM)
-#define BIT_GET_BT_ROM(x) (((x) >> BIT_SHIFT_BT_ROM) & BIT_MASK_BT_ROM)
-
-#define BIT_SHIFT_PCI_DPRAM 10
-#define BIT_MASK_PCI_DPRAM 0x3
-#define BIT_PCI_DPRAM(x) (((x) & BIT_MASK_PCI_DPRAM) << BIT_SHIFT_PCI_DPRAM)
-#define BIT_GET_PCI_DPRAM(x) (((x) >> BIT_SHIFT_PCI_DPRAM) & BIT_MASK_PCI_DPRAM)
-
-/* 2 REG_MEM_CTRL (Offset 0x00D8) */
-
-#define BIT_SHIFT_PCI_SPRAM 8
-#define BIT_MASK_PCI_SPRAM 0x3
-#define BIT_PCI_SPRAM(x) (((x) & BIT_MASK_PCI_SPRAM) << BIT_SHIFT_PCI_SPRAM)
-#define BIT_GET_PCI_SPRAM(x) (((x) >> BIT_SHIFT_PCI_SPRAM) & BIT_MASK_PCI_SPRAM)
-
-#define BIT_SHIFT_USB_SPRAM 6
-#define BIT_MASK_USB_SPRAM 0x3
-#define BIT_USB_SPRAM(x) (((x) & BIT_MASK_USB_SPRAM) << BIT_SHIFT_USB_SPRAM)
-#define BIT_GET_USB_SPRAM(x) (((x) >> BIT_SHIFT_USB_SPRAM) & BIT_MASK_USB_SPRAM)
-
-/* 2 REG_MEM_CTRL (Offset 0x00D8) */
-
-#define BIT_SHIFT_USB_SPRF 4
-#define BIT_MASK_USB_SPRF 0x3
-#define BIT_USB_SPRF(x) (((x) & BIT_MASK_USB_SPRF) << BIT_SHIFT_USB_SPRF)
-#define BIT_GET_USB_SPRF(x) (((x) >> BIT_SHIFT_USB_SPRF) & BIT_MASK_USB_SPRF)
-
-/* 2 REG_MEM_CTRL (Offset 0x00D8) */
-
-#define BIT_SHIFT_MCU_ROM 0
-#define BIT_MASK_MCU_ROM 0xf
-#define BIT_MCU_ROM(x) (((x) & BIT_MASK_MCU_ROM) << BIT_SHIFT_MCU_ROM)
-#define BIT_GET_MCU_ROM(x) (((x) >> BIT_SHIFT_MCU_ROM) & BIT_MASK_MCU_ROM)
-
-/* 2 REG_AFE_CTRL8 (Offset 0x00DC) */
-
-#define BIT_SYN_AGPIO BIT(20)
-
-/* 2 REG_AFE_CTRL8 (Offset 0x00DC) */
-
-#define BIT_XTAL_LP BIT(4)
-#define BIT_XTAL_GM_SEP BIT(3)
-
-/* 2 REG_AFE_CTRL8 (Offset 0x00DC) */
-
-#define BIT_SHIFT_XTAL_SEL_TOK 0
-#define BIT_MASK_XTAL_SEL_TOK 0x7
-#define BIT_XTAL_SEL_TOK(x) \
- (((x) & BIT_MASK_XTAL_SEL_TOK) << BIT_SHIFT_XTAL_SEL_TOK)
-#define BIT_GET_XTAL_SEL_TOK(x) \
- (((x) >> BIT_SHIFT_XTAL_SEL_TOK) & BIT_MASK_XTAL_SEL_TOK)
-
-/* 2 REG_USB_SIE_INTF (Offset 0x00E0) */
-
-#define BIT_RD_SEL BIT(31)
-
-/* 2 REG_USB_SIE_INTF (Offset 0x00E0) */
-
-#define BIT_USB_SIE_INTF_WE_V1 BIT(30)
-#define BIT_USB_SIE_INTF_BYIOREG_V1 BIT(29)
-#define BIT_USB_SIE_SELECT BIT(28)
-
-/* 2 REG_USB_SIE_INTF (Offset 0x00E0) */
-
-#define BIT_SHIFT_USB_SIE_INTF_ADDR_V1 16
-#define BIT_MASK_USB_SIE_INTF_ADDR_V1 0x1ff
-#define BIT_USB_SIE_INTF_ADDR_V1(x) \
- (((x) & BIT_MASK_USB_SIE_INTF_ADDR_V1) \
- << BIT_SHIFT_USB_SIE_INTF_ADDR_V1)
-#define BIT_GET_USB_SIE_INTF_ADDR_V1(x) \
- (((x) >> BIT_SHIFT_USB_SIE_INTF_ADDR_V1) & \
- BIT_MASK_USB_SIE_INTF_ADDR_V1)
-
-/* 2 REG_USB_SIE_INTF (Offset 0x00E0) */
-
-#define BIT_SHIFT_USB_SIE_INTF_RD 8
-#define BIT_MASK_USB_SIE_INTF_RD 0xff
-#define BIT_USB_SIE_INTF_RD(x) \
- (((x) & BIT_MASK_USB_SIE_INTF_RD) << BIT_SHIFT_USB_SIE_INTF_RD)
-#define BIT_GET_USB_SIE_INTF_RD(x) \
- (((x) >> BIT_SHIFT_USB_SIE_INTF_RD) & BIT_MASK_USB_SIE_INTF_RD)
-
-#define BIT_SHIFT_USB_SIE_INTF_WD 0
-#define BIT_MASK_USB_SIE_INTF_WD 0xff
-#define BIT_USB_SIE_INTF_WD(x) \
- (((x) & BIT_MASK_USB_SIE_INTF_WD) << BIT_SHIFT_USB_SIE_INTF_WD)
-#define BIT_GET_USB_SIE_INTF_WD(x) \
- (((x) >> BIT_SHIFT_USB_SIE_INTF_WD) & BIT_MASK_USB_SIE_INTF_WD)
-
-/* 2 REG_PCIE_MIO_INTF (Offset 0x00E4) */
-
-#define BIT_PCIE_MIO_BYIOREG BIT(13)
-#define BIT_PCIE_MIO_RE BIT(12)
-
-#define BIT_SHIFT_PCIE_MIO_WE 8
-#define BIT_MASK_PCIE_MIO_WE 0xf
-#define BIT_PCIE_MIO_WE(x) \
- (((x) & BIT_MASK_PCIE_MIO_WE) << BIT_SHIFT_PCIE_MIO_WE)
-#define BIT_GET_PCIE_MIO_WE(x) \
- (((x) >> BIT_SHIFT_PCIE_MIO_WE) & BIT_MASK_PCIE_MIO_WE)
-
-#define BIT_SHIFT_PCIE_MIO_ADDR 0
-#define BIT_MASK_PCIE_MIO_ADDR 0xff
-#define BIT_PCIE_MIO_ADDR(x) \
- (((x) & BIT_MASK_PCIE_MIO_ADDR) << BIT_SHIFT_PCIE_MIO_ADDR)
-#define BIT_GET_PCIE_MIO_ADDR(x) \
- (((x) >> BIT_SHIFT_PCIE_MIO_ADDR) & BIT_MASK_PCIE_MIO_ADDR)
-
-/* 2 REG_PCIE_MIO_INTD (Offset 0x00E8) */
-
-#define BIT_SHIFT_PCIE_MIO_DATA 0
-#define BIT_MASK_PCIE_MIO_DATA 0xffffffffL
-#define BIT_PCIE_MIO_DATA(x) \
- (((x) & BIT_MASK_PCIE_MIO_DATA) << BIT_SHIFT_PCIE_MIO_DATA)
-#define BIT_GET_PCIE_MIO_DATA(x) \
- (((x) >> BIT_SHIFT_PCIE_MIO_DATA) & BIT_MASK_PCIE_MIO_DATA)
-
-/* 2 REG_WLRF1 (Offset 0x00EC) */
-
-#define BIT_SHIFT_WLRF1_CTRL 24
-#define BIT_MASK_WLRF1_CTRL 0xff
-#define BIT_WLRF1_CTRL(x) (((x) & BIT_MASK_WLRF1_CTRL) << BIT_SHIFT_WLRF1_CTRL)
-#define BIT_GET_WLRF1_CTRL(x) \
- (((x) >> BIT_SHIFT_WLRF1_CTRL) & BIT_MASK_WLRF1_CTRL)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_SHIFT_TRP_ICFG 28
-#define BIT_MASK_TRP_ICFG 0xf
-#define BIT_TRP_ICFG(x) (((x) & BIT_MASK_TRP_ICFG) << BIT_SHIFT_TRP_ICFG)
-#define BIT_GET_TRP_ICFG(x) (((x) >> BIT_SHIFT_TRP_ICFG) & BIT_MASK_TRP_ICFG)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_RF_TYPE_ID BIT(27)
-#define BIT_BD_HCI_SEL BIT(26)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_BD_PKG_SEL BIT(25)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_SPSLDO_SEL BIT(24)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_RTL_ID BIT(23)
-#define BIT_PAD_HWPD_IDN BIT(22)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_TESTMODE BIT(20)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_SHIFT_VENDOR_ID 16
-#define BIT_MASK_VENDOR_ID 0xf
-#define BIT_VENDOR_ID(x) (((x) & BIT_MASK_VENDOR_ID) << BIT_SHIFT_VENDOR_ID)
-#define BIT_GET_VENDOR_ID(x) (((x) >> BIT_SHIFT_VENDOR_ID) & BIT_MASK_VENDOR_ID)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_SHIFT_CHIP_VER 12
-#define BIT_MASK_CHIP_VER 0xf
-#define BIT_CHIP_VER(x) (((x) & BIT_MASK_CHIP_VER) << BIT_SHIFT_CHIP_VER)
-#define BIT_GET_CHIP_VER(x) (((x) >> BIT_SHIFT_CHIP_VER) & BIT_MASK_CHIP_VER)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_BD_MAC3 BIT(11)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_BD_MAC1 BIT(10)
-#define BIT_BD_MAC2 BIT(9)
-#define BIT_SIC_IDLE BIT(8)
-#define BIT_SW_OFFLOAD_EN BIT(7)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_OCP_SHUTDN BIT(6)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_V15_VLD BIT(5)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_PCIRSTB BIT(4)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_PCLK_VLD BIT(3)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_UCLK_VLD BIT(2)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_ACLK_VLD BIT(1)
-
-/* 2 REG_SYS_CFG1 (Offset 0x00F0) */
-
-#define BIT_XCLK_VLD BIT(0)
-
-/* 2 REG_SYS_STATUS1 (Offset 0x00F4) */
-
-#define BIT_SHIFT_RF_RL_ID 28
-#define BIT_MASK_RF_RL_ID 0xf
-#define BIT_RF_RL_ID(x) (((x) & BIT_MASK_RF_RL_ID) << BIT_SHIFT_RF_RL_ID)
-#define BIT_GET_RF_RL_ID(x) (((x) >> BIT_SHIFT_RF_RL_ID) & BIT_MASK_RF_RL_ID)
-
-/* 2 REG_SYS_STATUS1 (Offset 0x00F4) */
-
-#define BIT_HPHY_ICFG BIT(19)
-
-/* 2 REG_SYS_STATUS1 (Offset 0x00F4) */
-
-#define BIT_SHIFT_SEL_0XC0 16
-#define BIT_MASK_SEL_0XC0 0x3
-#define BIT_SEL_0XC0(x) (((x) & BIT_MASK_SEL_0XC0) << BIT_SHIFT_SEL_0XC0)
-#define BIT_GET_SEL_0XC0(x) (((x) >> BIT_SHIFT_SEL_0XC0) & BIT_MASK_SEL_0XC0)
-
-/* 2 REG_SYS_STATUS1 (Offset 0x00F4) */
-
-#define BIT_SHIFT_HCI_SEL_V3 12
-#define BIT_MASK_HCI_SEL_V3 0x7
-#define BIT_HCI_SEL_V3(x) (((x) & BIT_MASK_HCI_SEL_V3) << BIT_SHIFT_HCI_SEL_V3)
-#define BIT_GET_HCI_SEL_V3(x) \
- (((x) >> BIT_SHIFT_HCI_SEL_V3) & BIT_MASK_HCI_SEL_V3)
-
-/* 2 REG_SYS_STATUS1 (Offset 0x00F4) */
-
-#define BIT_USB_OPERATION_MODE BIT(10)
-#define BIT_BT_PDN BIT(9)
-#define BIT_AUTO_WLPON BIT(8)
-
-/* 2 REG_SYS_STATUS1 (Offset 0x00F4) */
-
-#define BIT_WL_MODE BIT(7)
-
-/* 2 REG_SYS_STATUS1 (Offset 0x00F4) */
-
-#define BIT_PKG_SEL_HCI BIT(6)
-
-/* 2 REG_SYS_STATUS1 (Offset 0x00F4) */
-
-#define BIT_SHIFT_PAD_HCI_SEL_V1 3
-#define BIT_MASK_PAD_HCI_SEL_V1 0x7
-#define BIT_PAD_HCI_SEL_V1(x) \
- (((x) & BIT_MASK_PAD_HCI_SEL_V1) << BIT_SHIFT_PAD_HCI_SEL_V1)
-#define BIT_GET_PAD_HCI_SEL_V1(x) \
- (((x) >> BIT_SHIFT_PAD_HCI_SEL_V1) & BIT_MASK_PAD_HCI_SEL_V1)
-
-/* 2 REG_SYS_STATUS1 (Offset 0x00F4) */
-
-#define BIT_SHIFT_EFS_HCI_SEL_V1 0
-#define BIT_MASK_EFS_HCI_SEL_V1 0x7
-#define BIT_EFS_HCI_SEL_V1(x) \
- (((x) & BIT_MASK_EFS_HCI_SEL_V1) << BIT_SHIFT_EFS_HCI_SEL_V1)
-#define BIT_GET_EFS_HCI_SEL_V1(x) \
- (((x) >> BIT_SHIFT_EFS_HCI_SEL_V1) & BIT_MASK_EFS_HCI_SEL_V1)
-
-/* 2 REG_SYS_STATUS2 (Offset 0x00F8) */
-
-#define BIT_SIO_ALDN BIT(19)
-#define BIT_USB_ALDN BIT(18)
-#define BIT_PCI_ALDN BIT(17)
-#define BIT_SYS_ALDN BIT(16)
-
-#define BIT_SHIFT_EPVID1 8
-#define BIT_MASK_EPVID1 0xff
-#define BIT_EPVID1(x) (((x) & BIT_MASK_EPVID1) << BIT_SHIFT_EPVID1)
-#define BIT_GET_EPVID1(x) (((x) >> BIT_SHIFT_EPVID1) & BIT_MASK_EPVID1)
-
-#define BIT_SHIFT_EPVID0 0
-#define BIT_MASK_EPVID0 0xff
-#define BIT_EPVID0(x) (((x) & BIT_MASK_EPVID0) << BIT_SHIFT_EPVID0)
-#define BIT_GET_EPVID0(x) (((x) >> BIT_SHIFT_EPVID0) & BIT_MASK_EPVID0)
-
-/* 2 REG_SYS_CFG2 (Offset 0x00FC) */
-
-#define BIT_HCI_SEL_EMBEDDED BIT(8)
-
-/* 2 REG_SYS_CFG2 (Offset 0x00FC) */
-
-#define BIT_SHIFT_HW_ID 0
-#define BIT_MASK_HW_ID 0xff
-#define BIT_HW_ID(x) (((x) & BIT_MASK_HW_ID) << BIT_SHIFT_HW_ID)
-#define BIT_GET_HW_ID(x) (((x) >> BIT_SHIFT_HW_ID) & BIT_MASK_HW_ID)
-
-/* 2 REG_CR (Offset 0x0100) */
-
-#define BIT_SHIFT_LBMODE 24
-#define BIT_MASK_LBMODE 0x1f
-#define BIT_LBMODE(x) (((x) & BIT_MASK_LBMODE) << BIT_SHIFT_LBMODE)
-#define BIT_GET_LBMODE(x) (((x) >> BIT_SHIFT_LBMODE) & BIT_MASK_LBMODE)
-
-#define BIT_SHIFT_NETYPE1 18
-#define BIT_MASK_NETYPE1 0x3
-#define BIT_NETYPE1(x) (((x) & BIT_MASK_NETYPE1) << BIT_SHIFT_NETYPE1)
-#define BIT_GET_NETYPE1(x) (((x) >> BIT_SHIFT_NETYPE1) & BIT_MASK_NETYPE1)
-
-#define BIT_SHIFT_NETYPE0 16
-#define BIT_MASK_NETYPE0 0x3
-#define BIT_NETYPE0(x) (((x) & BIT_MASK_NETYPE0) << BIT_SHIFT_NETYPE0)
-#define BIT_GET_NETYPE0(x) (((x) >> BIT_SHIFT_NETYPE0) & BIT_MASK_NETYPE0)
-
-/* 2 REG_CR (Offset 0x0100) */
-
-#define BIT_I2C_MAILBOX_EN BIT(12)
-#define BIT_SHCUT_EN BIT(11)
-
-/* 2 REG_CR (Offset 0x0100) */
-
-#define BIT_32K_CAL_TMR_EN BIT(10)
-#define BIT_MAC_SEC_EN BIT(9)
-#define BIT_ENSWBCN BIT(8)
-#define BIT_MACRXEN BIT(7)
-#define BIT_MACTXEN BIT(6)
-#define BIT_SCHEDULE_EN BIT(5)
-#define BIT_PROTOCOL_EN BIT(4)
-#define BIT_RXDMA_EN BIT(3)
-#define BIT_TXDMA_EN BIT(2)
-#define BIT_HCI_RXDMA_EN BIT(1)
-#define BIT_HCI_TXDMA_EN BIT(0)
-
-/* 2 REG_PKT_BUFF_ACCESS_CTRL (Offset 0x0106) */
-
-#define BIT_SHIFT_PKT_BUFF_ACCESS_CTRL 0
-#define BIT_MASK_PKT_BUFF_ACCESS_CTRL 0xff
-#define BIT_PKT_BUFF_ACCESS_CTRL(x) \
- (((x) & BIT_MASK_PKT_BUFF_ACCESS_CTRL) \
- << BIT_SHIFT_PKT_BUFF_ACCESS_CTRL)
-#define BIT_GET_PKT_BUFF_ACCESS_CTRL(x) \
- (((x) >> BIT_SHIFT_PKT_BUFF_ACCESS_CTRL) & \
- BIT_MASK_PKT_BUFF_ACCESS_CTRL)
-
-/* 2 REG_TSF_CLK_STATE (Offset 0x0108) */
-
-#define BIT_TSF_CLK_STABLE BIT(15)
-
-#define BIT_SHIFT_I2C_M_BUS_GNT_FW 4
-#define BIT_MASK_I2C_M_BUS_GNT_FW 0x7
-#define BIT_I2C_M_BUS_GNT_FW(x) \
- (((x) & BIT_MASK_I2C_M_BUS_GNT_FW) << BIT_SHIFT_I2C_M_BUS_GNT_FW)
-#define BIT_GET_I2C_M_BUS_GNT_FW(x) \
- (((x) >> BIT_SHIFT_I2C_M_BUS_GNT_FW) & BIT_MASK_I2C_M_BUS_GNT_FW)
-
-#define BIT_I2C_M_GNT_FW BIT(3)
-
-#define BIT_SHIFT_I2C_M_SPEED 1
-#define BIT_MASK_I2C_M_SPEED 0x3
-#define BIT_I2C_M_SPEED(x) \
- (((x) & BIT_MASK_I2C_M_SPEED) << BIT_SHIFT_I2C_M_SPEED)
-#define BIT_GET_I2C_M_SPEED(x) \
- (((x) >> BIT_SHIFT_I2C_M_SPEED) & BIT_MASK_I2C_M_SPEED)
-
-#define BIT_I2C_M_UNLOCK BIT(0)
-
-/* 2 REG_TXDMA_PQ_MAP (Offset 0x010C) */
-
-#define BIT_SHIFT_TXDMA_HIQ_MAP 14
-#define BIT_MASK_TXDMA_HIQ_MAP 0x3
-#define BIT_TXDMA_HIQ_MAP(x) \
- (((x) & BIT_MASK_TXDMA_HIQ_MAP) << BIT_SHIFT_TXDMA_HIQ_MAP)
-#define BIT_GET_TXDMA_HIQ_MAP(x) \
- (((x) >> BIT_SHIFT_TXDMA_HIQ_MAP) & BIT_MASK_TXDMA_HIQ_MAP)
-
-#define BIT_SHIFT_TXDMA_MGQ_MAP 12
-#define BIT_MASK_TXDMA_MGQ_MAP 0x3
-#define BIT_TXDMA_MGQ_MAP(x) \
- (((x) & BIT_MASK_TXDMA_MGQ_MAP) << BIT_SHIFT_TXDMA_MGQ_MAP)
-#define BIT_GET_TXDMA_MGQ_MAP(x) \
- (((x) >> BIT_SHIFT_TXDMA_MGQ_MAP) & BIT_MASK_TXDMA_MGQ_MAP)
-
-#define BIT_SHIFT_TXDMA_BKQ_MAP 10
-#define BIT_MASK_TXDMA_BKQ_MAP 0x3
-#define BIT_TXDMA_BKQ_MAP(x) \
- (((x) & BIT_MASK_TXDMA_BKQ_MAP) << BIT_SHIFT_TXDMA_BKQ_MAP)
-#define BIT_GET_TXDMA_BKQ_MAP(x) \
- (((x) >> BIT_SHIFT_TXDMA_BKQ_MAP) & BIT_MASK_TXDMA_BKQ_MAP)
-
-#define BIT_SHIFT_TXDMA_BEQ_MAP 8
-#define BIT_MASK_TXDMA_BEQ_MAP 0x3
-#define BIT_TXDMA_BEQ_MAP(x) \
- (((x) & BIT_MASK_TXDMA_BEQ_MAP) << BIT_SHIFT_TXDMA_BEQ_MAP)
-#define BIT_GET_TXDMA_BEQ_MAP(x) \
- (((x) >> BIT_SHIFT_TXDMA_BEQ_MAP) & BIT_MASK_TXDMA_BEQ_MAP)
-
-#define BIT_SHIFT_TXDMA_VIQ_MAP 6
-#define BIT_MASK_TXDMA_VIQ_MAP 0x3
-#define BIT_TXDMA_VIQ_MAP(x) \
- (((x) & BIT_MASK_TXDMA_VIQ_MAP) << BIT_SHIFT_TXDMA_VIQ_MAP)
-#define BIT_GET_TXDMA_VIQ_MAP(x) \
- (((x) >> BIT_SHIFT_TXDMA_VIQ_MAP) & BIT_MASK_TXDMA_VIQ_MAP)
-
-#define BIT_SHIFT_TXDMA_VOQ_MAP 4
-#define BIT_MASK_TXDMA_VOQ_MAP 0x3
-#define BIT_TXDMA_VOQ_MAP(x) \
- (((x) & BIT_MASK_TXDMA_VOQ_MAP) << BIT_SHIFT_TXDMA_VOQ_MAP)
-#define BIT_GET_TXDMA_VOQ_MAP(x) \
- (((x) >> BIT_SHIFT_TXDMA_VOQ_MAP) & BIT_MASK_TXDMA_VOQ_MAP)
-
-#define BIT_RXDMA_AGG_EN BIT(2)
-#define BIT_RXSHFT_EN BIT(1)
-#define BIT_RXDMA_ARBBW_EN BIT(0)
-
-/* 2 REG_TRXFF_BNDY (Offset 0x0114) */
-
-#define BIT_SHIFT_RXFFOVFL_RSV_V2 8
-#define BIT_MASK_RXFFOVFL_RSV_V2 0xf
-#define BIT_RXFFOVFL_RSV_V2(x) \
- (((x) & BIT_MASK_RXFFOVFL_RSV_V2) << BIT_SHIFT_RXFFOVFL_RSV_V2)
-#define BIT_GET_RXFFOVFL_RSV_V2(x) \
- (((x) >> BIT_SHIFT_RXFFOVFL_RSV_V2) & BIT_MASK_RXFFOVFL_RSV_V2)
-
-/* 2 REG_TRXFF_BNDY (Offset 0x0114) */
-
-#define BIT_SHIFT_TXPKTBUF_PGBNDY 0
-#define BIT_MASK_TXPKTBUF_PGBNDY 0xff
-#define BIT_TXPKTBUF_PGBNDY(x) \
- (((x) & BIT_MASK_TXPKTBUF_PGBNDY) << BIT_SHIFT_TXPKTBUF_PGBNDY)
-#define BIT_GET_TXPKTBUF_PGBNDY(x) \
- (((x) >> BIT_SHIFT_TXPKTBUF_PGBNDY) & BIT_MASK_TXPKTBUF_PGBNDY)
-
-/* 2 REG_TRXFF_BNDY (Offset 0x0114) */
-
-#define BIT_SHIFT_RXFF0_BNDY_V2 0
-#define BIT_MASK_RXFF0_BNDY_V2 0x3ffff
-#define BIT_RXFF0_BNDY_V2(x) \
- (((x) & BIT_MASK_RXFF0_BNDY_V2) << BIT_SHIFT_RXFF0_BNDY_V2)
-#define BIT_GET_RXFF0_BNDY_V2(x) \
- (((x) >> BIT_SHIFT_RXFF0_BNDY_V2) & BIT_MASK_RXFF0_BNDY_V2)
-
-#define BIT_SHIFT_RXFF0_RDPTR_V2 0
-#define BIT_MASK_RXFF0_RDPTR_V2 0x3ffff
-#define BIT_RXFF0_RDPTR_V2(x) \
- (((x) & BIT_MASK_RXFF0_RDPTR_V2) << BIT_SHIFT_RXFF0_RDPTR_V2)
-#define BIT_GET_RXFF0_RDPTR_V2(x) \
- (((x) >> BIT_SHIFT_RXFF0_RDPTR_V2) & BIT_MASK_RXFF0_RDPTR_V2)
-
-#define BIT_SHIFT_RXFF0_WTPTR_V2 0
-#define BIT_MASK_RXFF0_WTPTR_V2 0x3ffff
-#define BIT_RXFF0_WTPTR_V2(x) \
- (((x) & BIT_MASK_RXFF0_WTPTR_V2) << BIT_SHIFT_RXFF0_WTPTR_V2)
-#define BIT_GET_RXFF0_WTPTR_V2(x) \
- (((x) >> BIT_SHIFT_RXFF0_WTPTR_V2) & BIT_MASK_RXFF0_WTPTR_V2)
-
-/* 2 REG_PTA_I2C_MBOX (Offset 0x0118) */
-
-#define BIT_SHIFT_I2C_M_STATUS 8
-#define BIT_MASK_I2C_M_STATUS 0xf
-#define BIT_I2C_M_STATUS(x) \
- (((x) & BIT_MASK_I2C_M_STATUS) << BIT_SHIFT_I2C_M_STATUS)
-#define BIT_GET_I2C_M_STATUS(x) \
- (((x) >> BIT_SHIFT_I2C_M_STATUS) & BIT_MASK_I2C_M_STATUS)
-
-/* 2 REG_FE1IMR (Offset 0x0120) */
-
-#define BIT_FS_RXDMA2_DONE_INT_EN BIT(28)
-#define BIT_FS_RXDONE3_INT_EN BIT(27)
-#define BIT_FS_RXDONE2_INT_EN BIT(26)
-#define BIT_FS_RX_BCN_P4_INT_EN BIT(25)
-#define BIT_FS_RX_BCN_P3_INT_EN BIT(24)
-#define BIT_FS_RX_BCN_P2_INT_EN BIT(23)
-#define BIT_FS_RX_BCN_P1_INT_EN BIT(22)
-#define BIT_FS_RX_BCN_P0_INT_EN BIT(21)
-#define BIT_FS_RX_UMD0_INT_EN BIT(20)
-#define BIT_FS_RX_UMD1_INT_EN BIT(19)
-#define BIT_FS_RX_BMD0_INT_EN BIT(18)
-#define BIT_FS_RX_BMD1_INT_EN BIT(17)
-#define BIT_FS_RXDONE_INT_EN BIT(16)
-#define BIT_FS_WWLAN_INT_EN BIT(15)
-#define BIT_FS_SOUND_DONE_INT_EN BIT(14)
-#define BIT_FS_LP_STBY_INT_EN BIT(13)
-#define BIT_FS_TRL_MTR_INT_EN BIT(12)
-#define BIT_FS_BF1_PRETO_INT_EN BIT(11)
-#define BIT_FS_BF0_PRETO_INT_EN BIT(10)
-#define BIT_FS_PTCL_RELEASE_MACID_INT_EN BIT(9)
-
-/* 2 REG_FE1IMR (Offset 0x0120) */
-
-#define BIT_FS_LTE_COEX_EN BIT(6)
-
-/* 2 REG_FE1IMR (Offset 0x0120) */
-
-#define BIT_FS_WLACTOFF_INT_EN BIT(5)
-#define BIT_FS_WLACTON_INT_EN BIT(4)
-#define BIT_FS_BTCMD_INT_EN BIT(3)
-
-/* 2 REG_FE1IMR (Offset 0x0120) */
-
-#define BIT_FS_REG_MAILBOX_TO_I2C_INT_EN BIT(2)
-
-/* 2 REG_FE1IMR (Offset 0x0120) */
-
-#define BIT_FS_TRPC_TO_INT_EN_V1 BIT(1)
-
-/* 2 REG_FE1IMR (Offset 0x0120) */
-
-#define BIT_FS_RPC_O_T_INT_EN_V1 BIT(0)
-
-/* 2 REG_FE1ISR (Offset 0x0124) */
-
-#define BIT_FS_RXDMA2_DONE_INT BIT(28)
-#define BIT_FS_RXDONE3_INT BIT(27)
-#define BIT_FS_RXDONE2_INT BIT(26)
-#define BIT_FS_RX_BCN_P4_INT BIT(25)
-#define BIT_FS_RX_BCN_P3_INT BIT(24)
-#define BIT_FS_RX_BCN_P2_INT BIT(23)
-#define BIT_FS_RX_BCN_P1_INT BIT(22)
-#define BIT_FS_RX_BCN_P0_INT BIT(21)
-#define BIT_FS_RX_UMD0_INT BIT(20)
-#define BIT_FS_RX_UMD1_INT BIT(19)
-#define BIT_FS_RX_BMD0_INT BIT(18)
-#define BIT_FS_RX_BMD1_INT BIT(17)
-#define BIT_FS_RXDONE_INT BIT(16)
-#define BIT_FS_WWLAN_INT BIT(15)
-#define BIT_FS_SOUND_DONE_INT BIT(14)
-#define BIT_FS_LP_STBY_INT BIT(13)
-#define BIT_FS_TRL_MTR_INT BIT(12)
-#define BIT_FS_BF1_PRETO_INT BIT(11)
-#define BIT_FS_BF0_PRETO_INT BIT(10)
-#define BIT_FS_PTCL_RELEASE_MACID_INT BIT(9)
-
-/* 2 REG_FE1ISR (Offset 0x0124) */
-
-#define BIT_FS_LTE_COEX_INT BIT(6)
-
-/* 2 REG_FE1ISR (Offset 0x0124) */
-
-#define BIT_FS_WLACTOFF_INT BIT(5)
-#define BIT_FS_WLACTON_INT BIT(4)
-#define BIT_FS_BCN_RX_INT_INT BIT(3)
-
-/* 2 REG_FE1ISR (Offset 0x0124) */
-
-#define BIT_FS_MAILBOX_TO_I2C_INT BIT(2)
-
-/* 2 REG_FE1ISR (Offset 0x0124) */
-
-#define BIT_FS_TRPC_TO_INT BIT(1)
-
-/* 2 REG_FE1ISR (Offset 0x0124) */
-
-#define BIT_FS_RPC_O_T_INT BIT(0)
-
-/* 2 REG_CPWM (Offset 0x012C) */
-
-#define BIT_CPWM_TOGGLING BIT(31)
-
-#define BIT_SHIFT_CPWM_MOD 24
-#define BIT_MASK_CPWM_MOD 0x7f
-#define BIT_CPWM_MOD(x) (((x) & BIT_MASK_CPWM_MOD) << BIT_SHIFT_CPWM_MOD)
-#define BIT_GET_CPWM_MOD(x) (((x) >> BIT_SHIFT_CPWM_MOD) & BIT_MASK_CPWM_MOD)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNOK_MB7_INT_EN BIT(31)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNOK_MB6_INT_EN BIT(30)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNOK_MB5_INT_EN BIT(29)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNOK_MB4_INT_EN BIT(28)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNOK_MB3_INT_EN BIT(27)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNOK_MB2_INT_EN BIT(26)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNOK_MB1_INT_EN BIT(25)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNOK_MB0_INT_EN BIT(24)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNERR_MB7_INT_EN BIT(23)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNERR_MB6_INT_EN BIT(22)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNERR_MB5_INT_EN BIT(21)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNERR_MB4_INT_EN BIT(20)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNERR_MB3_INT_EN BIT(19)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNERR_MB2_INT_EN BIT(18)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNERR_MB1_INT_EN BIT(17)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXBCNERR_MB0_INT_EN BIT(16)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_CPU_MGQ_TXDONE_INT_EN BIT(15)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_SIFS_OVERSPEC_INT_EN BIT(14)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_MGNTQ_RPTR_RELEASE_INT_EN BIT(13)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_MGNTQFF_TO_INT_EN BIT(12)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_DDMA1_LP_INT_EN BIT(11)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_DDMA1_HP_INT_EN BIT(10)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_DDMA0_LP_INT_EN BIT(9)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_DDMA0_HP_INT_EN BIT(8)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TRXRPT_INT_EN BIT(7)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_C2H_W_READY_INT_EN BIT(6)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_HRCV_INT_EN BIT(5)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_H2CCMD_INT_EN BIT(4)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXPKTIN_INT_EN BIT(3)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_ERRORHDL_INT_EN BIT(2)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXCCX_INT_EN BIT(1)
-
-/* 2 REG_FWIMR (Offset 0x0130) */
-
-#define BIT_FS_TXCLOSE_INT_EN BIT(0)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNOK_MB7_INT BIT(31)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNOK_MB6_INT BIT(30)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNOK_MB5_INT BIT(29)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNOK_MB4_INT BIT(28)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNOK_MB3_INT BIT(27)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNOK_MB2_INT BIT(26)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNOK_MB1_INT BIT(25)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNOK_MB0_INT BIT(24)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNERR_MB7_INT BIT(23)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNERR_MB6_INT BIT(22)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNERR_MB5_INT BIT(21)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNERR_MB4_INT BIT(20)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNERR_MB3_INT BIT(19)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNERR_MB2_INT BIT(18)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNERR_MB1_INT BIT(17)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXBCNERR_MB0_INT BIT(16)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_CPU_MGQ_TXDONE_INT BIT(15)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_SIFS_OVERSPEC_INT BIT(14)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_MGNTQ_RPTR_RELEASE_INT BIT(13)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_MGNTQFF_TO_INT BIT(12)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_DDMA1_LP_INT BIT(11)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_DDMA1_HP_INT BIT(10)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_DDMA0_LP_INT BIT(9)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_DDMA0_HP_INT BIT(8)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TRXRPT_INT BIT(7)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_C2H_W_READY_INT BIT(6)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_HRCV_INT BIT(5)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_H2CCMD_INT BIT(4)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXPKTIN_INT BIT(3)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_ERRORHDL_INT BIT(2)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXCCX_INT BIT(1)
-
-/* 2 REG_FWISR (Offset 0x0134) */
-
-#define BIT_FS_TXCLOSE_INT BIT(0)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_PS_TIMER_C_EARLY_INT_EN BIT(23)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_PS_TIMER_B_EARLY_INT_EN BIT(22)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_PS_TIMER_A_EARLY_INT_EN BIT(21)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_CPUMGQ_TX_TIMER_EARLY_INT_EN BIT(20)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_PS_TIMER_C_INT_EN BIT(19)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_PS_TIMER_B_INT_EN BIT(18)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_PS_TIMER_A_INT_EN BIT(17)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_CPUMGQ_TX_TIMER_INT_EN BIT(16)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_PS_TIMEOUT2_EN BIT(15)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_PS_TIMEOUT1_EN BIT(14)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_PS_TIMEOUT0_EN BIT(13)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_GTINT8_EN BIT(8)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_GTINT7_EN BIT(7)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_GTINT6_EN BIT(6)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_GTINT5_EN BIT(5)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_GTINT4_EN BIT(4)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_GTINT3_EN BIT(3)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_GTINT2_EN BIT(2)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_GTINT1_EN BIT(1)
-
-/* 2 REG_FTIMR (Offset 0x0138) */
-
-#define BIT_FS_GTINT0_EN BIT(0)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_PS_TIMER_C_EARLY__INT BIT(23)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_PS_TIMER_B_EARLY__INT BIT(22)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_PS_TIMER_A_EARLY__INT BIT(21)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_CPUMGQ_TX_TIMER_EARLY_INT BIT(20)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_PS_TIMER_C_INT BIT(19)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_PS_TIMER_B_INT BIT(18)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_PS_TIMER_A_INT BIT(17)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_CPUMGQ_TX_TIMER_INT BIT(16)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_PS_TIMEOUT2_INT BIT(15)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_PS_TIMEOUT1_INT BIT(14)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_PS_TIMEOUT0_INT BIT(13)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_GTINT8_INT BIT(8)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_GTINT7_INT BIT(7)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_GTINT6_INT BIT(6)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_GTINT5_INT BIT(5)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_GTINT4_INT BIT(4)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_GTINT3_INT BIT(3)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_GTINT2_INT BIT(2)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_GTINT1_INT BIT(1)
-
-/* 2 REG_FTISR (Offset 0x013C) */
-
-#define BIT_FS_GTINT0_INT BIT(0)
-
-/* 2 REG_PKTBUF_DBG_CTRL (Offset 0x0140) */
-
-#define BIT_SHIFT_PKTBUF_WRITE_EN 24
-#define BIT_MASK_PKTBUF_WRITE_EN 0xff
-#define BIT_PKTBUF_WRITE_EN(x) \
- (((x) & BIT_MASK_PKTBUF_WRITE_EN) << BIT_SHIFT_PKTBUF_WRITE_EN)
-#define BIT_GET_PKTBUF_WRITE_EN(x) \
- (((x) >> BIT_SHIFT_PKTBUF_WRITE_EN) & BIT_MASK_PKTBUF_WRITE_EN)
-
-/* 2 REG_PKTBUF_DBG_CTRL (Offset 0x0140) */
-
-#define BIT_TXRPTBUF_DBG BIT(23)
-
-/* 2 REG_PKTBUF_DBG_CTRL (Offset 0x0140) */
-
-#define BIT_TXPKTBUF_DBG_V2 BIT(20)
-
-/* 2 REG_PKTBUF_DBG_CTRL (Offset 0x0140) */
-
-#define BIT_RXPKTBUF_DBG BIT(16)
-
-/* 2 REG_PKTBUF_DBG_CTRL (Offset 0x0140) */
-
-#define BIT_SHIFT_PKTBUF_DBG_ADDR 0
-#define BIT_MASK_PKTBUF_DBG_ADDR 0x1fff
-#define BIT_PKTBUF_DBG_ADDR(x) \
- (((x) & BIT_MASK_PKTBUF_DBG_ADDR) << BIT_SHIFT_PKTBUF_DBG_ADDR)
-#define BIT_GET_PKTBUF_DBG_ADDR(x) \
- (((x) >> BIT_SHIFT_PKTBUF_DBG_ADDR) & BIT_MASK_PKTBUF_DBG_ADDR)
-
-/* 2 REG_PKTBUF_DBG_DATA_L (Offset 0x0144) */
-
-#define BIT_SHIFT_PKTBUF_DBG_DATA_L 0
-#define BIT_MASK_PKTBUF_DBG_DATA_L 0xffffffffL
-#define BIT_PKTBUF_DBG_DATA_L(x) \
- (((x) & BIT_MASK_PKTBUF_DBG_DATA_L) << BIT_SHIFT_PKTBUF_DBG_DATA_L)
-#define BIT_GET_PKTBUF_DBG_DATA_L(x) \
- (((x) >> BIT_SHIFT_PKTBUF_DBG_DATA_L) & BIT_MASK_PKTBUF_DBG_DATA_L)
-
-/* 2 REG_PKTBUF_DBG_DATA_H (Offset 0x0148) */
-
-#define BIT_SHIFT_PKTBUF_DBG_DATA_H 0
-#define BIT_MASK_PKTBUF_DBG_DATA_H 0xffffffffL
-#define BIT_PKTBUF_DBG_DATA_H(x) \
- (((x) & BIT_MASK_PKTBUF_DBG_DATA_H) << BIT_SHIFT_PKTBUF_DBG_DATA_H)
-#define BIT_GET_PKTBUF_DBG_DATA_H(x) \
- (((x) >> BIT_SHIFT_PKTBUF_DBG_DATA_H) & BIT_MASK_PKTBUF_DBG_DATA_H)
-
-/* 2 REG_CPWM2 (Offset 0x014C) */
-
-#define BIT_SHIFT_L0S_TO_RCVY_NUM 16
-#define BIT_MASK_L0S_TO_RCVY_NUM 0xff
-#define BIT_L0S_TO_RCVY_NUM(x) \
- (((x) & BIT_MASK_L0S_TO_RCVY_NUM) << BIT_SHIFT_L0S_TO_RCVY_NUM)
-#define BIT_GET_L0S_TO_RCVY_NUM(x) \
- (((x) >> BIT_SHIFT_L0S_TO_RCVY_NUM) & BIT_MASK_L0S_TO_RCVY_NUM)
-
-#define BIT_CPWM2_TOGGLING BIT(15)
-
-#define BIT_SHIFT_CPWM2_MOD 0
-#define BIT_MASK_CPWM2_MOD 0x7fff
-#define BIT_CPWM2_MOD(x) (((x) & BIT_MASK_CPWM2_MOD) << BIT_SHIFT_CPWM2_MOD)
-#define BIT_GET_CPWM2_MOD(x) (((x) >> BIT_SHIFT_CPWM2_MOD) & BIT_MASK_CPWM2_MOD)
-
-/* 2 REG_TC0_CTRL (Offset 0x0150) */
-
-#define BIT_TC0INT_EN BIT(26)
-#define BIT_TC0MODE BIT(25)
-#define BIT_TC0EN BIT(24)
-
-#define BIT_SHIFT_TC0DATA 0
-#define BIT_MASK_TC0DATA 0xffffff
-#define BIT_TC0DATA(x) (((x) & BIT_MASK_TC0DATA) << BIT_SHIFT_TC0DATA)
-#define BIT_GET_TC0DATA(x) (((x) >> BIT_SHIFT_TC0DATA) & BIT_MASK_TC0DATA)
-
-/* 2 REG_TC1_CTRL (Offset 0x0154) */
-
-#define BIT_TC1INT_EN BIT(26)
-#define BIT_TC1MODE BIT(25)
-#define BIT_TC1EN BIT(24)
-
-#define BIT_SHIFT_TC1DATA 0
-#define BIT_MASK_TC1DATA 0xffffff
-#define BIT_TC1DATA(x) (((x) & BIT_MASK_TC1DATA) << BIT_SHIFT_TC1DATA)
-#define BIT_GET_TC1DATA(x) (((x) >> BIT_SHIFT_TC1DATA) & BIT_MASK_TC1DATA)
-
-/* 2 REG_TC2_CTRL (Offset 0x0158) */
-
-#define BIT_TC2INT_EN BIT(26)
-#define BIT_TC2MODE BIT(25)
-#define BIT_TC2EN BIT(24)
-
-#define BIT_SHIFT_TC2DATA 0
-#define BIT_MASK_TC2DATA 0xffffff
-#define BIT_TC2DATA(x) (((x) & BIT_MASK_TC2DATA) << BIT_SHIFT_TC2DATA)
-#define BIT_GET_TC2DATA(x) (((x) >> BIT_SHIFT_TC2DATA) & BIT_MASK_TC2DATA)
-
-/* 2 REG_TC3_CTRL (Offset 0x015C) */
-
-#define BIT_TC3INT_EN BIT(26)
-#define BIT_TC3MODE BIT(25)
-#define BIT_TC3EN BIT(24)
-
-#define BIT_SHIFT_TC3DATA 0
-#define BIT_MASK_TC3DATA 0xffffff
-#define BIT_TC3DATA(x) (((x) & BIT_MASK_TC3DATA) << BIT_SHIFT_TC3DATA)
-#define BIT_GET_TC3DATA(x) (((x) >> BIT_SHIFT_TC3DATA) & BIT_MASK_TC3DATA)
-
-/* 2 REG_TC4_CTRL (Offset 0x0160) */
-
-#define BIT_TC4INT_EN BIT(26)
-#define BIT_TC4MODE BIT(25)
-#define BIT_TC4EN BIT(24)
-
-#define BIT_SHIFT_TC4DATA 0
-#define BIT_MASK_TC4DATA 0xffffff
-#define BIT_TC4DATA(x) (((x) & BIT_MASK_TC4DATA) << BIT_SHIFT_TC4DATA)
-#define BIT_GET_TC4DATA(x) (((x) >> BIT_SHIFT_TC4DATA) & BIT_MASK_TC4DATA)
-
-/* 2 REG_TCUNIT_BASE (Offset 0x0164) */
-
-#define BIT_SHIFT_TCUNIT_BASE 0
-#define BIT_MASK_TCUNIT_BASE 0x3fff
-#define BIT_TCUNIT_BASE(x) \
- (((x) & BIT_MASK_TCUNIT_BASE) << BIT_SHIFT_TCUNIT_BASE)
-#define BIT_GET_TCUNIT_BASE(x) \
- (((x) >> BIT_SHIFT_TCUNIT_BASE) & BIT_MASK_TCUNIT_BASE)
-
-/* 2 REG_TC5_CTRL (Offset 0x0168) */
-
-#define BIT_TC5INT_EN BIT(26)
-
-/* 2 REG_TC5_CTRL (Offset 0x0168) */
-
-#define BIT_TC5MODE BIT(25)
-#define BIT_TC5EN BIT(24)
-
-#define BIT_SHIFT_TC5DATA 0
-#define BIT_MASK_TC5DATA 0xffffff
-#define BIT_TC5DATA(x) (((x) & BIT_MASK_TC5DATA) << BIT_SHIFT_TC5DATA)
-#define BIT_GET_TC5DATA(x) (((x) >> BIT_SHIFT_TC5DATA) & BIT_MASK_TC5DATA)
-
-/* 2 REG_TC6_CTRL (Offset 0x016C) */
-
-#define BIT_TC6INT_EN BIT(26)
-
-/* 2 REG_TC6_CTRL (Offset 0x016C) */
-
-#define BIT_TC6MODE BIT(25)
-#define BIT_TC6EN BIT(24)
-
-#define BIT_SHIFT_TC6DATA 0
-#define BIT_MASK_TC6DATA 0xffffff
-#define BIT_TC6DATA(x) (((x) & BIT_MASK_TC6DATA) << BIT_SHIFT_TC6DATA)
-#define BIT_GET_TC6DATA(x) (((x) >> BIT_SHIFT_TC6DATA) & BIT_MASK_TC6DATA)
-
-/* 2 REG_MBIST_FAIL (Offset 0x0170) */
-
-#define BIT_SHIFT_8051_MBIST_FAIL 26
-#define BIT_MASK_8051_MBIST_FAIL 0x7
-#define BIT_8051_MBIST_FAIL(x) \
- (((x) & BIT_MASK_8051_MBIST_FAIL) << BIT_SHIFT_8051_MBIST_FAIL)
-#define BIT_GET_8051_MBIST_FAIL(x) \
- (((x) >> BIT_SHIFT_8051_MBIST_FAIL) & BIT_MASK_8051_MBIST_FAIL)
-
-#define BIT_SHIFT_USB_MBIST_FAIL 24
-#define BIT_MASK_USB_MBIST_FAIL 0x3
-#define BIT_USB_MBIST_FAIL(x) \
- (((x) & BIT_MASK_USB_MBIST_FAIL) << BIT_SHIFT_USB_MBIST_FAIL)
-#define BIT_GET_USB_MBIST_FAIL(x) \
- (((x) >> BIT_SHIFT_USB_MBIST_FAIL) & BIT_MASK_USB_MBIST_FAIL)
-
-#define BIT_SHIFT_PCIE_MBIST_FAIL 16
-#define BIT_MASK_PCIE_MBIST_FAIL 0x3f
-#define BIT_PCIE_MBIST_FAIL(x) \
- (((x) & BIT_MASK_PCIE_MBIST_FAIL) << BIT_SHIFT_PCIE_MBIST_FAIL)
-#define BIT_GET_PCIE_MBIST_FAIL(x) \
- (((x) >> BIT_SHIFT_PCIE_MBIST_FAIL) & BIT_MASK_PCIE_MBIST_FAIL)
-
-/* 2 REG_MBIST_FAIL (Offset 0x0170) */
-
-#define BIT_SHIFT_MAC_MBIST_FAIL 0
-#define BIT_MASK_MAC_MBIST_FAIL 0xfff
-#define BIT_MAC_MBIST_FAIL(x) \
- (((x) & BIT_MASK_MAC_MBIST_FAIL) << BIT_SHIFT_MAC_MBIST_FAIL)
-#define BIT_GET_MAC_MBIST_FAIL(x) \
- (((x) >> BIT_SHIFT_MAC_MBIST_FAIL) & BIT_MASK_MAC_MBIST_FAIL)
-
-/* 2 REG_MBIST_START_PAUSE (Offset 0x0174) */
-
-#define BIT_SHIFT_8051_MBIST_START_PAUSE 26
-#define BIT_MASK_8051_MBIST_START_PAUSE 0x7
-#define BIT_8051_MBIST_START_PAUSE(x) \
- (((x) & BIT_MASK_8051_MBIST_START_PAUSE) \
- << BIT_SHIFT_8051_MBIST_START_PAUSE)
-#define BIT_GET_8051_MBIST_START_PAUSE(x) \
- (((x) >> BIT_SHIFT_8051_MBIST_START_PAUSE) & \
- BIT_MASK_8051_MBIST_START_PAUSE)
-
-#define BIT_SHIFT_USB_MBIST_START_PAUSE 24
-#define BIT_MASK_USB_MBIST_START_PAUSE 0x3
-#define BIT_USB_MBIST_START_PAUSE(x) \
- (((x) & BIT_MASK_USB_MBIST_START_PAUSE) \
- << BIT_SHIFT_USB_MBIST_START_PAUSE)
-#define BIT_GET_USB_MBIST_START_PAUSE(x) \
- (((x) >> BIT_SHIFT_USB_MBIST_START_PAUSE) & \
- BIT_MASK_USB_MBIST_START_PAUSE)
-
-#define BIT_SHIFT_PCIE_MBIST_START_PAUSE 16
-#define BIT_MASK_PCIE_MBIST_START_PAUSE 0x3f
-#define BIT_PCIE_MBIST_START_PAUSE(x) \
- (((x) & BIT_MASK_PCIE_MBIST_START_PAUSE) \
- << BIT_SHIFT_PCIE_MBIST_START_PAUSE)
-#define BIT_GET_PCIE_MBIST_START_PAUSE(x) \
- (((x) >> BIT_SHIFT_PCIE_MBIST_START_PAUSE) & \
- BIT_MASK_PCIE_MBIST_START_PAUSE)
-
-/* 2 REG_MBIST_START_PAUSE (Offset 0x0174) */
-
-#define BIT_SHIFT_MAC_MBIST_START_PAUSE 0
-#define BIT_MASK_MAC_MBIST_START_PAUSE 0xfff
-#define BIT_MAC_MBIST_START_PAUSE(x) \
- (((x) & BIT_MASK_MAC_MBIST_START_PAUSE) \
- << BIT_SHIFT_MAC_MBIST_START_PAUSE)
-#define BIT_GET_MAC_MBIST_START_PAUSE(x) \
- (((x) >> BIT_SHIFT_MAC_MBIST_START_PAUSE) & \
- BIT_MASK_MAC_MBIST_START_PAUSE)
-
-/* 2 REG_MBIST_DONE (Offset 0x0178) */
-
-#define BIT_SHIFT_8051_MBIST_DONE 26
-#define BIT_MASK_8051_MBIST_DONE 0x7
-#define BIT_8051_MBIST_DONE(x) \
- (((x) & BIT_MASK_8051_MBIST_DONE) << BIT_SHIFT_8051_MBIST_DONE)
-#define BIT_GET_8051_MBIST_DONE(x) \
- (((x) >> BIT_SHIFT_8051_MBIST_DONE) & BIT_MASK_8051_MBIST_DONE)
-
-#define BIT_SHIFT_USB_MBIST_DONE 24
-#define BIT_MASK_USB_MBIST_DONE 0x3
-#define BIT_USB_MBIST_DONE(x) \
- (((x) & BIT_MASK_USB_MBIST_DONE) << BIT_SHIFT_USB_MBIST_DONE)
-#define BIT_GET_USB_MBIST_DONE(x) \
- (((x) >> BIT_SHIFT_USB_MBIST_DONE) & BIT_MASK_USB_MBIST_DONE)
-
-#define BIT_SHIFT_PCIE_MBIST_DONE 16
-#define BIT_MASK_PCIE_MBIST_DONE 0x3f
-#define BIT_PCIE_MBIST_DONE(x) \
- (((x) & BIT_MASK_PCIE_MBIST_DONE) << BIT_SHIFT_PCIE_MBIST_DONE)
-#define BIT_GET_PCIE_MBIST_DONE(x) \
- (((x) >> BIT_SHIFT_PCIE_MBIST_DONE) & BIT_MASK_PCIE_MBIST_DONE)
-
-/* 2 REG_MBIST_DONE (Offset 0x0178) */
-
-#define BIT_SHIFT_MAC_MBIST_DONE 0
-#define BIT_MASK_MAC_MBIST_DONE 0xfff
-#define BIT_MAC_MBIST_DONE(x) \
- (((x) & BIT_MASK_MAC_MBIST_DONE) << BIT_SHIFT_MAC_MBIST_DONE)
-#define BIT_GET_MAC_MBIST_DONE(x) \
- (((x) >> BIT_SHIFT_MAC_MBIST_DONE) & BIT_MASK_MAC_MBIST_DONE)
-
-/* 2 REG_MBIST_FAIL_NRML (Offset 0x017C) */
-
-#define BIT_SHIFT_MBIST_FAIL_NRML 0
-#define BIT_MASK_MBIST_FAIL_NRML 0xffffffffL
-#define BIT_MBIST_FAIL_NRML(x) \
- (((x) & BIT_MASK_MBIST_FAIL_NRML) << BIT_SHIFT_MBIST_FAIL_NRML)
-#define BIT_GET_MBIST_FAIL_NRML(x) \
- (((x) >> BIT_SHIFT_MBIST_FAIL_NRML) & BIT_MASK_MBIST_FAIL_NRML)
-
-/* 2 REG_AES_DECRPT_DATA (Offset 0x0180) */
-
-#define BIT_SHIFT_IPS_CFG_ADDR 0
-#define BIT_MASK_IPS_CFG_ADDR 0xff
-#define BIT_IPS_CFG_ADDR(x) \
- (((x) & BIT_MASK_IPS_CFG_ADDR) << BIT_SHIFT_IPS_CFG_ADDR)
-#define BIT_GET_IPS_CFG_ADDR(x) \
- (((x) >> BIT_SHIFT_IPS_CFG_ADDR) & BIT_MASK_IPS_CFG_ADDR)
-
-/* 2 REG_AES_DECRPT_CFG (Offset 0x0184) */
-
-#define BIT_SHIFT_IPS_CFG_DATA 0
-#define BIT_MASK_IPS_CFG_DATA 0xffffffffL
-#define BIT_IPS_CFG_DATA(x) \
- (((x) & BIT_MASK_IPS_CFG_DATA) << BIT_SHIFT_IPS_CFG_DATA)
-#define BIT_GET_IPS_CFG_DATA(x) \
- (((x) >> BIT_SHIFT_IPS_CFG_DATA) & BIT_MASK_IPS_CFG_DATA)
-
-/* 2 REG_TMETER (Offset 0x0190) */
-
-#define BIT_TEMP_VALID BIT(31)
-
-#define BIT_SHIFT_TEMP_VALUE 24
-#define BIT_MASK_TEMP_VALUE 0x3f
-#define BIT_TEMP_VALUE(x) (((x) & BIT_MASK_TEMP_VALUE) << BIT_SHIFT_TEMP_VALUE)
-#define BIT_GET_TEMP_VALUE(x) \
- (((x) >> BIT_SHIFT_TEMP_VALUE) & BIT_MASK_TEMP_VALUE)
-
-#define BIT_SHIFT_REG_TMETER_TIMER 8
-#define BIT_MASK_REG_TMETER_TIMER 0xfff
-#define BIT_REG_TMETER_TIMER(x) \
- (((x) & BIT_MASK_REG_TMETER_TIMER) << BIT_SHIFT_REG_TMETER_TIMER)
-#define BIT_GET_REG_TMETER_TIMER(x) \
- (((x) >> BIT_SHIFT_REG_TMETER_TIMER) & BIT_MASK_REG_TMETER_TIMER)
-
-#define BIT_SHIFT_REG_TEMP_DELTA 2
-#define BIT_MASK_REG_TEMP_DELTA 0x3f
-#define BIT_REG_TEMP_DELTA(x) \
- (((x) & BIT_MASK_REG_TEMP_DELTA) << BIT_SHIFT_REG_TEMP_DELTA)
-#define BIT_GET_REG_TEMP_DELTA(x) \
- (((x) >> BIT_SHIFT_REG_TEMP_DELTA) & BIT_MASK_REG_TEMP_DELTA)
-
-#define BIT_REG_TMETER_EN BIT(0)
-
-/* 2 REG_OSC_32K_CTRL (Offset 0x0194) */
-
-#define BIT_SHIFT_OSC_32K_CLKGEN_0 16
-#define BIT_MASK_OSC_32K_CLKGEN_0 0xffff
-#define BIT_OSC_32K_CLKGEN_0(x) \
- (((x) & BIT_MASK_OSC_32K_CLKGEN_0) << BIT_SHIFT_OSC_32K_CLKGEN_0)
-#define BIT_GET_OSC_32K_CLKGEN_0(x) \
- (((x) >> BIT_SHIFT_OSC_32K_CLKGEN_0) & BIT_MASK_OSC_32K_CLKGEN_0)
-
-/* 2 REG_OSC_32K_CTRL (Offset 0x0194) */
-
-#define BIT_SHIFT_OSC_32K_RES_COMP 4
-#define BIT_MASK_OSC_32K_RES_COMP 0x3
-#define BIT_OSC_32K_RES_COMP(x) \
- (((x) & BIT_MASK_OSC_32K_RES_COMP) << BIT_SHIFT_OSC_32K_RES_COMP)
-#define BIT_GET_OSC_32K_RES_COMP(x) \
- (((x) >> BIT_SHIFT_OSC_32K_RES_COMP) & BIT_MASK_OSC_32K_RES_COMP)
-
-#define BIT_OSC_32K_OUT_SEL BIT(3)
-
-/* 2 REG_OSC_32K_CTRL (Offset 0x0194) */
-
-#define BIT_ISO_WL_2_OSC_32K BIT(1)
-
-/* 2 REG_OSC_32K_CTRL (Offset 0x0194) */
-
-#define BIT_POW_CKGEN BIT(0)
-
-/* 2 REG_32K_CAL_REG1 (Offset 0x0198) */
-
-#define BIT_CAL_32K_REG_WR BIT(31)
-#define BIT_CAL_32K_DBG_SEL BIT(22)
-
-#define BIT_SHIFT_CAL_32K_REG_ADDR 16
-#define BIT_MASK_CAL_32K_REG_ADDR 0x3f
-#define BIT_CAL_32K_REG_ADDR(x) \
- (((x) & BIT_MASK_CAL_32K_REG_ADDR) << BIT_SHIFT_CAL_32K_REG_ADDR)
-#define BIT_GET_CAL_32K_REG_ADDR(x) \
- (((x) >> BIT_SHIFT_CAL_32K_REG_ADDR) & BIT_MASK_CAL_32K_REG_ADDR)
-
-/* 2 REG_32K_CAL_REG1 (Offset 0x0198) */
-
-#define BIT_SHIFT_CAL_32K_REG_DATA 0
-#define BIT_MASK_CAL_32K_REG_DATA 0xffff
-#define BIT_CAL_32K_REG_DATA(x) \
- (((x) & BIT_MASK_CAL_32K_REG_DATA) << BIT_SHIFT_CAL_32K_REG_DATA)
-#define BIT_GET_CAL_32K_REG_DATA(x) \
- (((x) >> BIT_SHIFT_CAL_32K_REG_DATA) & BIT_MASK_CAL_32K_REG_DATA)
-
-/* 2 REG_C2HEVT (Offset 0x01A0) */
-
-#define BIT_SHIFT_C2HEVT_MSG 0
-#define BIT_MASK_C2HEVT_MSG 0xffffffffffffffffffffffffffffffffL
-#define BIT_C2HEVT_MSG(x) (((x) & BIT_MASK_C2HEVT_MSG) << BIT_SHIFT_C2HEVT_MSG)
-#define BIT_GET_C2HEVT_MSG(x) \
- (((x) >> BIT_SHIFT_C2HEVT_MSG) & BIT_MASK_C2HEVT_MSG)
-
-/* 2 REG_SW_DEFINED_PAGE1 (Offset 0x01B8) */
-
-#define BIT_SHIFT_SW_DEFINED_PAGE1 0
-#define BIT_MASK_SW_DEFINED_PAGE1 0xffffffffffffffffL
-#define BIT_SW_DEFINED_PAGE1(x) \
- (((x) & BIT_MASK_SW_DEFINED_PAGE1) << BIT_SHIFT_SW_DEFINED_PAGE1)
-#define BIT_GET_SW_DEFINED_PAGE1(x) \
- (((x) >> BIT_SHIFT_SW_DEFINED_PAGE1) & BIT_MASK_SW_DEFINED_PAGE1)
-
-/* 2 REG_MCUTST_I (Offset 0x01C0) */
-
-#define BIT_SHIFT_MCUDMSG_I 0
-#define BIT_MASK_MCUDMSG_I 0xffffffffL
-#define BIT_MCUDMSG_I(x) (((x) & BIT_MASK_MCUDMSG_I) << BIT_SHIFT_MCUDMSG_I)
-#define BIT_GET_MCUDMSG_I(x) (((x) >> BIT_SHIFT_MCUDMSG_I) & BIT_MASK_MCUDMSG_I)
-
-/* 2 REG_MCUTST_II (Offset 0x01C4) */
-
-#define BIT_SHIFT_MCUDMSG_II 0
-#define BIT_MASK_MCUDMSG_II 0xffffffffL
-#define BIT_MCUDMSG_II(x) (((x) & BIT_MASK_MCUDMSG_II) << BIT_SHIFT_MCUDMSG_II)
-#define BIT_GET_MCUDMSG_II(x) \
- (((x) >> BIT_SHIFT_MCUDMSG_II) & BIT_MASK_MCUDMSG_II)
-
-/* 2 REG_FMETHR (Offset 0x01C8) */
-
-#define BIT_FMSG_INT BIT(31)
-
-#define BIT_SHIFT_FW_MSG 0
-#define BIT_MASK_FW_MSG 0xffffffffL
-#define BIT_FW_MSG(x) (((x) & BIT_MASK_FW_MSG) << BIT_SHIFT_FW_MSG)
-#define BIT_GET_FW_MSG(x) (((x) >> BIT_SHIFT_FW_MSG) & BIT_MASK_FW_MSG)
-
-/* 2 REG_HMETFR (Offset 0x01CC) */
-
-#define BIT_SHIFT_HRCV_MSG 24
-#define BIT_MASK_HRCV_MSG 0xff
-#define BIT_HRCV_MSG(x) (((x) & BIT_MASK_HRCV_MSG) << BIT_SHIFT_HRCV_MSG)
-#define BIT_GET_HRCV_MSG(x) (((x) >> BIT_SHIFT_HRCV_MSG) & BIT_MASK_HRCV_MSG)
-
-#define BIT_INT_BOX3 BIT(3)
-#define BIT_INT_BOX2 BIT(2)
-#define BIT_INT_BOX1 BIT(1)
-#define BIT_INT_BOX0 BIT(0)
-
-/* 2 REG_HMEBOX0 (Offset 0x01D0) */
-
-#define BIT_SHIFT_HOST_MSG_0 0
-#define BIT_MASK_HOST_MSG_0 0xffffffffL
-#define BIT_HOST_MSG_0(x) (((x) & BIT_MASK_HOST_MSG_0) << BIT_SHIFT_HOST_MSG_0)
-#define BIT_GET_HOST_MSG_0(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_0) & BIT_MASK_HOST_MSG_0)
-
-/* 2 REG_HMEBOX1 (Offset 0x01D4) */
-
-#define BIT_SHIFT_HOST_MSG_1 0
-#define BIT_MASK_HOST_MSG_1 0xffffffffL
-#define BIT_HOST_MSG_1(x) (((x) & BIT_MASK_HOST_MSG_1) << BIT_SHIFT_HOST_MSG_1)
-#define BIT_GET_HOST_MSG_1(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_1) & BIT_MASK_HOST_MSG_1)
-
-/* 2 REG_HMEBOX2 (Offset 0x01D8) */
-
-#define BIT_SHIFT_HOST_MSG_2 0
-#define BIT_MASK_HOST_MSG_2 0xffffffffL
-#define BIT_HOST_MSG_2(x) (((x) & BIT_MASK_HOST_MSG_2) << BIT_SHIFT_HOST_MSG_2)
-#define BIT_GET_HOST_MSG_2(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_2) & BIT_MASK_HOST_MSG_2)
-
-/* 2 REG_HMEBOX3 (Offset 0x01DC) */
-
-#define BIT_SHIFT_HOST_MSG_3 0
-#define BIT_MASK_HOST_MSG_3 0xffffffffL
-#define BIT_HOST_MSG_3(x) (((x) & BIT_MASK_HOST_MSG_3) << BIT_SHIFT_HOST_MSG_3)
-#define BIT_GET_HOST_MSG_3(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_3) & BIT_MASK_HOST_MSG_3)
-
-/* 2 REG_LLT_INIT (Offset 0x01E0) */
-
-#define BIT_SHIFT_LLTE_RWM 30
-#define BIT_MASK_LLTE_RWM 0x3
-#define BIT_LLTE_RWM(x) (((x) & BIT_MASK_LLTE_RWM) << BIT_SHIFT_LLTE_RWM)
-#define BIT_GET_LLTE_RWM(x) (((x) >> BIT_SHIFT_LLTE_RWM) & BIT_MASK_LLTE_RWM)
-
-/* 2 REG_LLT_INIT (Offset 0x01E0) */
-
-#define BIT_SHIFT_LLTINI_PDATA_V1 16
-#define BIT_MASK_LLTINI_PDATA_V1 0xfff
-#define BIT_LLTINI_PDATA_V1(x) \
- (((x) & BIT_MASK_LLTINI_PDATA_V1) << BIT_SHIFT_LLTINI_PDATA_V1)
-#define BIT_GET_LLTINI_PDATA_V1(x) \
- (((x) >> BIT_SHIFT_LLTINI_PDATA_V1) & BIT_MASK_LLTINI_PDATA_V1)
-
-/* 2 REG_LLT_INIT (Offset 0x01E0) */
-
-#define BIT_SHIFT_LLTINI_HDATA_V1 0
-#define BIT_MASK_LLTINI_HDATA_V1 0xfff
-#define BIT_LLTINI_HDATA_V1(x) \
- (((x) & BIT_MASK_LLTINI_HDATA_V1) << BIT_SHIFT_LLTINI_HDATA_V1)
-#define BIT_GET_LLTINI_HDATA_V1(x) \
- (((x) >> BIT_SHIFT_LLTINI_HDATA_V1) & BIT_MASK_LLTINI_HDATA_V1)
-
-/* 2 REG_LLT_INIT_ADDR (Offset 0x01E4) */
-
-#define BIT_SHIFT_LLTINI_ADDR_V1 0
-#define BIT_MASK_LLTINI_ADDR_V1 0xfff
-#define BIT_LLTINI_ADDR_V1(x) \
- (((x) & BIT_MASK_LLTINI_ADDR_V1) << BIT_SHIFT_LLTINI_ADDR_V1)
-#define BIT_GET_LLTINI_ADDR_V1(x) \
- (((x) >> BIT_SHIFT_LLTINI_ADDR_V1) & BIT_MASK_LLTINI_ADDR_V1)
-
-/* 2 REG_BB_ACCESS_CTRL (Offset 0x01E8) */
-
-#define BIT_SHIFT_BB_WRITE_READ 30
-#define BIT_MASK_BB_WRITE_READ 0x3
-#define BIT_BB_WRITE_READ(x) \
- (((x) & BIT_MASK_BB_WRITE_READ) << BIT_SHIFT_BB_WRITE_READ)
-#define BIT_GET_BB_WRITE_READ(x) \
- (((x) >> BIT_SHIFT_BB_WRITE_READ) & BIT_MASK_BB_WRITE_READ)
-
-/* 2 REG_BB_ACCESS_CTRL (Offset 0x01E8) */
-
-#define BIT_SHIFT_BB_WRITE_EN 12
-#define BIT_MASK_BB_WRITE_EN 0xf
-#define BIT_BB_WRITE_EN(x) \
- (((x) & BIT_MASK_BB_WRITE_EN) << BIT_SHIFT_BB_WRITE_EN)
-#define BIT_GET_BB_WRITE_EN(x) \
- (((x) >> BIT_SHIFT_BB_WRITE_EN) & BIT_MASK_BB_WRITE_EN)
-
-#define BIT_SHIFT_BB_ADDR 2
-#define BIT_MASK_BB_ADDR 0x1ff
-#define BIT_BB_ADDR(x) (((x) & BIT_MASK_BB_ADDR) << BIT_SHIFT_BB_ADDR)
-#define BIT_GET_BB_ADDR(x) (((x) >> BIT_SHIFT_BB_ADDR) & BIT_MASK_BB_ADDR)
-
-/* 2 REG_BB_ACCESS_CTRL (Offset 0x01E8) */
-
-#define BIT_BB_ERRACC BIT(0)
-
-/* 2 REG_BB_ACCESS_DATA (Offset 0x01EC) */
-
-#define BIT_SHIFT_BB_DATA 0
-#define BIT_MASK_BB_DATA 0xffffffffL
-#define BIT_BB_DATA(x) (((x) & BIT_MASK_BB_DATA) << BIT_SHIFT_BB_DATA)
-#define BIT_GET_BB_DATA(x) (((x) >> BIT_SHIFT_BB_DATA) & BIT_MASK_BB_DATA)
-
-/* 2 REG_HMEBOX_E0 (Offset 0x01F0) */
-
-#define BIT_SHIFT_HMEBOX_E0 0
-#define BIT_MASK_HMEBOX_E0 0xffffffffL
-#define BIT_HMEBOX_E0(x) (((x) & BIT_MASK_HMEBOX_E0) << BIT_SHIFT_HMEBOX_E0)
-#define BIT_GET_HMEBOX_E0(x) (((x) >> BIT_SHIFT_HMEBOX_E0) & BIT_MASK_HMEBOX_E0)
-
-/* 2 REG_HMEBOX_E1 (Offset 0x01F4) */
-
-#define BIT_SHIFT_HMEBOX_E1 0
-#define BIT_MASK_HMEBOX_E1 0xffffffffL
-#define BIT_HMEBOX_E1(x) (((x) & BIT_MASK_HMEBOX_E1) << BIT_SHIFT_HMEBOX_E1)
-#define BIT_GET_HMEBOX_E1(x) (((x) >> BIT_SHIFT_HMEBOX_E1) & BIT_MASK_HMEBOX_E1)
-
-/* 2 REG_HMEBOX_E2 (Offset 0x01F8) */
-
-#define BIT_SHIFT_HMEBOX_E2 0
-#define BIT_MASK_HMEBOX_E2 0xffffffffL
-#define BIT_HMEBOX_E2(x) (((x) & BIT_MASK_HMEBOX_E2) << BIT_SHIFT_HMEBOX_E2)
-#define BIT_GET_HMEBOX_E2(x) (((x) >> BIT_SHIFT_HMEBOX_E2) & BIT_MASK_HMEBOX_E2)
-
-/* 2 REG_HMEBOX_E3 (Offset 0x01FC) */
-
-#define BIT_LD_RQPN BIT(31)
-
-#define BIT_SHIFT_HMEBOX_E3 0
-#define BIT_MASK_HMEBOX_E3 0xffffffffL
-#define BIT_HMEBOX_E3(x) (((x) & BIT_MASK_HMEBOX_E3) << BIT_SHIFT_HMEBOX_E3)
-#define BIT_GET_HMEBOX_E3(x) (((x) >> BIT_SHIFT_HMEBOX_E3) & BIT_MASK_HMEBOX_E3)
-
-/* 2 REG_FIFOPAGE_CTRL_1 (Offset 0x0200) */
-
-#define BIT_SHIFT_TX_OQT_HE_FREE_SPACE_V1 16
-#define BIT_MASK_TX_OQT_HE_FREE_SPACE_V1 0xff
-#define BIT_TX_OQT_HE_FREE_SPACE_V1(x) \
- (((x) & BIT_MASK_TX_OQT_HE_FREE_SPACE_V1) \
- << BIT_SHIFT_TX_OQT_HE_FREE_SPACE_V1)
-#define BIT_GET_TX_OQT_HE_FREE_SPACE_V1(x) \
- (((x) >> BIT_SHIFT_TX_OQT_HE_FREE_SPACE_V1) & \
- BIT_MASK_TX_OQT_HE_FREE_SPACE_V1)
-
-/* 2 REG_FIFOPAGE_CTRL_1 (Offset 0x0200) */
-
-#define BIT_SHIFT_TX_OQT_NL_FREE_SPACE_V1 0
-#define BIT_MASK_TX_OQT_NL_FREE_SPACE_V1 0xff
-#define BIT_TX_OQT_NL_FREE_SPACE_V1(x) \
- (((x) & BIT_MASK_TX_OQT_NL_FREE_SPACE_V1) \
- << BIT_SHIFT_TX_OQT_NL_FREE_SPACE_V1)
-#define BIT_GET_TX_OQT_NL_FREE_SPACE_V1(x) \
- (((x) >> BIT_SHIFT_TX_OQT_NL_FREE_SPACE_V1) & \
- BIT_MASK_TX_OQT_NL_FREE_SPACE_V1)
-
-/* 2 REG_FIFOPAGE_CTRL_2 (Offset 0x0204) */
-
-#define BIT_BCN_VALID_1_V1 BIT(31)
-
-/* 2 REG_FIFOPAGE_CTRL_2 (Offset 0x0204) */
-
-#define BIT_SHIFT_BCN_HEAD_1_V1 16
-#define BIT_MASK_BCN_HEAD_1_V1 0xfff
-#define BIT_BCN_HEAD_1_V1(x) \
- (((x) & BIT_MASK_BCN_HEAD_1_V1) << BIT_SHIFT_BCN_HEAD_1_V1)
-#define BIT_GET_BCN_HEAD_1_V1(x) \
- (((x) >> BIT_SHIFT_BCN_HEAD_1_V1) & BIT_MASK_BCN_HEAD_1_V1)
-
-#define BIT_BCN_VALID_V1 BIT(15)
-
-/* 2 REG_FIFOPAGE_CTRL_2 (Offset 0x0204) */
-
-#define BIT_SHIFT_BCN_HEAD_V1 0
-#define BIT_MASK_BCN_HEAD_V1 0xfff
-#define BIT_BCN_HEAD_V1(x) \
- (((x) & BIT_MASK_BCN_HEAD_V1) << BIT_SHIFT_BCN_HEAD_V1)
-#define BIT_GET_BCN_HEAD_V1(x) \
- (((x) >> BIT_SHIFT_BCN_HEAD_V1) & BIT_MASK_BCN_HEAD_V1)
-
-/* 2 REG_AUTO_LLT_V1 (Offset 0x0208) */
-
-#define BIT_SHIFT_MAX_TX_PKT_FOR_USB_AND_SDIO_V1 24
-#define BIT_MASK_MAX_TX_PKT_FOR_USB_AND_SDIO_V1 0xff
-#define BIT_MAX_TX_PKT_FOR_USB_AND_SDIO_V1(x) \
- (((x) & BIT_MASK_MAX_TX_PKT_FOR_USB_AND_SDIO_V1) \
- << BIT_SHIFT_MAX_TX_PKT_FOR_USB_AND_SDIO_V1)
-#define BIT_GET_MAX_TX_PKT_FOR_USB_AND_SDIO_V1(x) \
- (((x) >> BIT_SHIFT_MAX_TX_PKT_FOR_USB_AND_SDIO_V1) & \
- BIT_MASK_MAX_TX_PKT_FOR_USB_AND_SDIO_V1)
-
-/* 2 REG_AUTO_LLT_V1 (Offset 0x0208) */
-
-#define BIT_SHIFT_LLT_FREE_PAGE_V1 8
-#define BIT_MASK_LLT_FREE_PAGE_V1 0xffff
-#define BIT_LLT_FREE_PAGE_V1(x) \
- (((x) & BIT_MASK_LLT_FREE_PAGE_V1) << BIT_SHIFT_LLT_FREE_PAGE_V1)
-#define BIT_GET_LLT_FREE_PAGE_V1(x) \
- (((x) >> BIT_SHIFT_LLT_FREE_PAGE_V1) & BIT_MASK_LLT_FREE_PAGE_V1)
-
-/* 2 REG_DWBCN0_CTRL (Offset 0x0208) */
-
-#define BIT_SHIFT_BLK_DESC_NUM 4
-#define BIT_MASK_BLK_DESC_NUM 0xf
-#define BIT_BLK_DESC_NUM(x) \
- (((x) & BIT_MASK_BLK_DESC_NUM) << BIT_SHIFT_BLK_DESC_NUM)
-#define BIT_GET_BLK_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_BLK_DESC_NUM) & BIT_MASK_BLK_DESC_NUM)
-
-/* 2 REG_AUTO_LLT_V1 (Offset 0x0208) */
-
-#define BIT_R_BCN_HEAD_SEL BIT(3)
-#define BIT_R_EN_BCN_SW_HEAD_SEL BIT(2)
-#define BIT_LLT_DBG_SEL BIT(1)
-#define BIT_AUTO_INIT_LLT_V1 BIT(0)
-
-/* 2 REG_TXDMA_OFFSET_CHK (Offset 0x020C) */
-
-#define BIT_EM_CHKSUM_FIN BIT(31)
-#define BIT_EMN_PCIE_DMA_MOD BIT(30)
-
-/* 2 REG_TXDMA_OFFSET_CHK (Offset 0x020C) */
-
-#define BIT_EN_TXQUE_CLR BIT(29)
-#define BIT_EN_PCIE_FIFO_MODE BIT(28)
-
-/* 2 REG_TXDMA_OFFSET_CHK (Offset 0x020C) */
-
-#define BIT_SHIFT_PG_UNDER_TH_V1 16
-#define BIT_MASK_PG_UNDER_TH_V1 0xfff
-#define BIT_PG_UNDER_TH_V1(x) \
- (((x) & BIT_MASK_PG_UNDER_TH_V1) << BIT_SHIFT_PG_UNDER_TH_V1)
-#define BIT_GET_PG_UNDER_TH_V1(x) \
- (((x) >> BIT_SHIFT_PG_UNDER_TH_V1) & BIT_MASK_PG_UNDER_TH_V1)
-
-/* 2 REG_TXDMA_OFFSET_CHK (Offset 0x020C) */
-
-#define BIT_RESTORE_H2C_ADDRESS BIT(15)
-
-/* 2 REG_TXDMA_OFFSET_CHK (Offset 0x020C) */
-
-#define BIT_SDIO_TXDESC_CHKSUM_EN BIT(13)
-#define BIT_RST_RDPTR BIT(12)
-#define BIT_RST_WRPTR BIT(11)
-#define BIT_CHK_PG_TH_EN BIT(10)
-#define BIT_DROP_DATA_EN BIT(9)
-#define BIT_CHECK_OFFSET_EN BIT(8)
-
-#define BIT_SHIFT_CHECK_OFFSET 0
-#define BIT_MASK_CHECK_OFFSET 0xff
-#define BIT_CHECK_OFFSET(x) \
- (((x) & BIT_MASK_CHECK_OFFSET) << BIT_SHIFT_CHECK_OFFSET)
-#define BIT_GET_CHECK_OFFSET(x) \
- (((x) >> BIT_SHIFT_CHECK_OFFSET) & BIT_MASK_CHECK_OFFSET)
-
-/* 2 REG_TXDMA_STATUS (Offset 0x0210) */
-
-#define BIT_HI_OQT_UDN BIT(17)
-#define BIT_HI_OQT_OVF BIT(16)
-#define BIT_PAYLOAD_CHKSUM_ERR BIT(15)
-#define BIT_PAYLOAD_UDN BIT(14)
-#define BIT_PAYLOAD_OVF BIT(13)
-#define BIT_DSC_CHKSUM_FAIL BIT(12)
-#define BIT_UNKNOWN_QSEL BIT(11)
-#define BIT_EP_QSEL_DIFF BIT(10)
-#define BIT_TX_OFFS_UNMATCH BIT(9)
-#define BIT_TXOQT_UDN BIT(8)
-#define BIT_TXOQT_OVF BIT(7)
-#define BIT_TXDMA_SFF_UDN BIT(6)
-#define BIT_TXDMA_SFF_OVF BIT(5)
-#define BIT_LLT_NULL_PG BIT(4)
-#define BIT_PAGE_UDN BIT(3)
-#define BIT_PAGE_OVF BIT(2)
-#define BIT_TXFF_PG_UDN BIT(1)
-#define BIT_TXFF_PG_OVF BIT(0)
-
-/* 2 REG_TQPNT1 (Offset 0x0218) */
-
-#define BIT_SHIFT_HPQ_HIGH_TH_V1 16
-#define BIT_MASK_HPQ_HIGH_TH_V1 0xfff
-#define BIT_HPQ_HIGH_TH_V1(x) \
- (((x) & BIT_MASK_HPQ_HIGH_TH_V1) << BIT_SHIFT_HPQ_HIGH_TH_V1)
-#define BIT_GET_HPQ_HIGH_TH_V1(x) \
- (((x) >> BIT_SHIFT_HPQ_HIGH_TH_V1) & BIT_MASK_HPQ_HIGH_TH_V1)
-
-/* 2 REG_TQPNT1 (Offset 0x0218) */
-
-#define BIT_SHIFT_HPQ_LOW_TH_V1 0
-#define BIT_MASK_HPQ_LOW_TH_V1 0xfff
-#define BIT_HPQ_LOW_TH_V1(x) \
- (((x) & BIT_MASK_HPQ_LOW_TH_V1) << BIT_SHIFT_HPQ_LOW_TH_V1)
-#define BIT_GET_HPQ_LOW_TH_V1(x) \
- (((x) >> BIT_SHIFT_HPQ_LOW_TH_V1) & BIT_MASK_HPQ_LOW_TH_V1)
-
-/* 2 REG_TQPNT2 (Offset 0x021C) */
-
-#define BIT_SHIFT_NPQ_HIGH_TH_V1 16
-#define BIT_MASK_NPQ_HIGH_TH_V1 0xfff
-#define BIT_NPQ_HIGH_TH_V1(x) \
- (((x) & BIT_MASK_NPQ_HIGH_TH_V1) << BIT_SHIFT_NPQ_HIGH_TH_V1)
-#define BIT_GET_NPQ_HIGH_TH_V1(x) \
- (((x) >> BIT_SHIFT_NPQ_HIGH_TH_V1) & BIT_MASK_NPQ_HIGH_TH_V1)
-
-/* 2 REG_TQPNT2 (Offset 0x021C) */
-
-#define BIT_SHIFT_NPQ_LOW_TH_V1 0
-#define BIT_MASK_NPQ_LOW_TH_V1 0xfff
-#define BIT_NPQ_LOW_TH_V1(x) \
- (((x) & BIT_MASK_NPQ_LOW_TH_V1) << BIT_SHIFT_NPQ_LOW_TH_V1)
-#define BIT_GET_NPQ_LOW_TH_V1(x) \
- (((x) >> BIT_SHIFT_NPQ_LOW_TH_V1) & BIT_MASK_NPQ_LOW_TH_V1)
-
-/* 2 REG_TQPNT3 (Offset 0x0220) */
-
-#define BIT_SHIFT_LPQ_HIGH_TH_V1 16
-#define BIT_MASK_LPQ_HIGH_TH_V1 0xfff
-#define BIT_LPQ_HIGH_TH_V1(x) \
- (((x) & BIT_MASK_LPQ_HIGH_TH_V1) << BIT_SHIFT_LPQ_HIGH_TH_V1)
-#define BIT_GET_LPQ_HIGH_TH_V1(x) \
- (((x) >> BIT_SHIFT_LPQ_HIGH_TH_V1) & BIT_MASK_LPQ_HIGH_TH_V1)
-
-/* 2 REG_TQPNT3 (Offset 0x0220) */
-
-#define BIT_SHIFT_LPQ_LOW_TH_V1 0
-#define BIT_MASK_LPQ_LOW_TH_V1 0xfff
-#define BIT_LPQ_LOW_TH_V1(x) \
- (((x) & BIT_MASK_LPQ_LOW_TH_V1) << BIT_SHIFT_LPQ_LOW_TH_V1)
-#define BIT_GET_LPQ_LOW_TH_V1(x) \
- (((x) >> BIT_SHIFT_LPQ_LOW_TH_V1) & BIT_MASK_LPQ_LOW_TH_V1)
-
-/* 2 REG_TQPNT4 (Offset 0x0224) */
-
-#define BIT_SHIFT_EXQ_HIGH_TH_V1 16
-#define BIT_MASK_EXQ_HIGH_TH_V1 0xfff
-#define BIT_EXQ_HIGH_TH_V1(x) \
- (((x) & BIT_MASK_EXQ_HIGH_TH_V1) << BIT_SHIFT_EXQ_HIGH_TH_V1)
-#define BIT_GET_EXQ_HIGH_TH_V1(x) \
- (((x) >> BIT_SHIFT_EXQ_HIGH_TH_V1) & BIT_MASK_EXQ_HIGH_TH_V1)
-
-/* 2 REG_TQPNT4 (Offset 0x0224) */
-
-#define BIT_SHIFT_EXQ_LOW_TH_V1 0
-#define BIT_MASK_EXQ_LOW_TH_V1 0xfff
-#define BIT_EXQ_LOW_TH_V1(x) \
- (((x) & BIT_MASK_EXQ_LOW_TH_V1) << BIT_SHIFT_EXQ_LOW_TH_V1)
-#define BIT_GET_EXQ_LOW_TH_V1(x) \
- (((x) >> BIT_SHIFT_EXQ_LOW_TH_V1) & BIT_MASK_EXQ_LOW_TH_V1)
-
-/* 2 REG_RQPN_CTRL_1 (Offset 0x0228) */
-
-#define BIT_SHIFT_TXPKTNUM_H 16
-#define BIT_MASK_TXPKTNUM_H 0xffff
-#define BIT_TXPKTNUM_H(x) (((x) & BIT_MASK_TXPKTNUM_H) << BIT_SHIFT_TXPKTNUM_H)
-#define BIT_GET_TXPKTNUM_H(x) \
- (((x) >> BIT_SHIFT_TXPKTNUM_H) & BIT_MASK_TXPKTNUM_H)
-
-/* 2 REG_RQPN_CTRL_1 (Offset 0x0228) */
-
-#define BIT_SHIFT_TXPKTNUM_V2 0
-#define BIT_MASK_TXPKTNUM_V2 0xffff
-#define BIT_TXPKTNUM_V2(x) \
- (((x) & BIT_MASK_TXPKTNUM_V2) << BIT_SHIFT_TXPKTNUM_V2)
-#define BIT_GET_TXPKTNUM_V2(x) \
- (((x) >> BIT_SHIFT_TXPKTNUM_V2) & BIT_MASK_TXPKTNUM_V2)
-
-/* 2 REG_RQPN_CTRL_2 (Offset 0x022C) */
-
-#define BIT_EXQ_PUBLIC_DIS_V1 BIT(19)
-#define BIT_NPQ_PUBLIC_DIS_V1 BIT(18)
-#define BIT_LPQ_PUBLIC_DIS_V1 BIT(17)
-#define BIT_HPQ_PUBLIC_DIS_V1 BIT(16)
-
-/* 2 REG_FIFOPAGE_INFO_1 (Offset 0x0230) */
-
-#define BIT_SHIFT_HPQ_AVAL_PG_V1 16
-#define BIT_MASK_HPQ_AVAL_PG_V1 0xfff
-#define BIT_HPQ_AVAL_PG_V1(x) \
- (((x) & BIT_MASK_HPQ_AVAL_PG_V1) << BIT_SHIFT_HPQ_AVAL_PG_V1)
-#define BIT_GET_HPQ_AVAL_PG_V1(x) \
- (((x) >> BIT_SHIFT_HPQ_AVAL_PG_V1) & BIT_MASK_HPQ_AVAL_PG_V1)
-
-#define BIT_SHIFT_HPQ_V1 0
-#define BIT_MASK_HPQ_V1 0xfff
-#define BIT_HPQ_V1(x) (((x) & BIT_MASK_HPQ_V1) << BIT_SHIFT_HPQ_V1)
-#define BIT_GET_HPQ_V1(x) (((x) >> BIT_SHIFT_HPQ_V1) & BIT_MASK_HPQ_V1)
-
-/* 2 REG_FIFOPAGE_INFO_2 (Offset 0x0234) */
-
-#define BIT_SHIFT_LPQ_AVAL_PG_V1 16
-#define BIT_MASK_LPQ_AVAL_PG_V1 0xfff
-#define BIT_LPQ_AVAL_PG_V1(x) \
- (((x) & BIT_MASK_LPQ_AVAL_PG_V1) << BIT_SHIFT_LPQ_AVAL_PG_V1)
-#define BIT_GET_LPQ_AVAL_PG_V1(x) \
- (((x) >> BIT_SHIFT_LPQ_AVAL_PG_V1) & BIT_MASK_LPQ_AVAL_PG_V1)
-
-#define BIT_SHIFT_LPQ_V1 0
-#define BIT_MASK_LPQ_V1 0xfff
-#define BIT_LPQ_V1(x) (((x) & BIT_MASK_LPQ_V1) << BIT_SHIFT_LPQ_V1)
-#define BIT_GET_LPQ_V1(x) (((x) >> BIT_SHIFT_LPQ_V1) & BIT_MASK_LPQ_V1)
-
-/* 2 REG_FIFOPAGE_INFO_3 (Offset 0x0238) */
-
-#define BIT_SHIFT_NPQ_AVAL_PG_V1 16
-#define BIT_MASK_NPQ_AVAL_PG_V1 0xfff
-#define BIT_NPQ_AVAL_PG_V1(x) \
- (((x) & BIT_MASK_NPQ_AVAL_PG_V1) << BIT_SHIFT_NPQ_AVAL_PG_V1)
-#define BIT_GET_NPQ_AVAL_PG_V1(x) \
- (((x) >> BIT_SHIFT_NPQ_AVAL_PG_V1) & BIT_MASK_NPQ_AVAL_PG_V1)
-
-/* 2 REG_FIFOPAGE_INFO_3 (Offset 0x0238) */
-
-#define BIT_SHIFT_NPQ_V1 0
-#define BIT_MASK_NPQ_V1 0xfff
-#define BIT_NPQ_V1(x) (((x) & BIT_MASK_NPQ_V1) << BIT_SHIFT_NPQ_V1)
-#define BIT_GET_NPQ_V1(x) (((x) >> BIT_SHIFT_NPQ_V1) & BIT_MASK_NPQ_V1)
-
-/* 2 REG_FIFOPAGE_INFO_4 (Offset 0x023C) */
-
-#define BIT_SHIFT_EXQ_AVAL_PG_V1 16
-#define BIT_MASK_EXQ_AVAL_PG_V1 0xfff
-#define BIT_EXQ_AVAL_PG_V1(x) \
- (((x) & BIT_MASK_EXQ_AVAL_PG_V1) << BIT_SHIFT_EXQ_AVAL_PG_V1)
-#define BIT_GET_EXQ_AVAL_PG_V1(x) \
- (((x) >> BIT_SHIFT_EXQ_AVAL_PG_V1) & BIT_MASK_EXQ_AVAL_PG_V1)
-
-#define BIT_SHIFT_EXQ_V1 0
-#define BIT_MASK_EXQ_V1 0xfff
-#define BIT_EXQ_V1(x) (((x) & BIT_MASK_EXQ_V1) << BIT_SHIFT_EXQ_V1)
-#define BIT_GET_EXQ_V1(x) (((x) >> BIT_SHIFT_EXQ_V1) & BIT_MASK_EXQ_V1)
-
-/* 2 REG_FIFOPAGE_INFO_5 (Offset 0x0240) */
-
-#define BIT_SHIFT_PUBQ_AVAL_PG_V1 16
-#define BIT_MASK_PUBQ_AVAL_PG_V1 0xfff
-#define BIT_PUBQ_AVAL_PG_V1(x) \
- (((x) & BIT_MASK_PUBQ_AVAL_PG_V1) << BIT_SHIFT_PUBQ_AVAL_PG_V1)
-#define BIT_GET_PUBQ_AVAL_PG_V1(x) \
- (((x) >> BIT_SHIFT_PUBQ_AVAL_PG_V1) & BIT_MASK_PUBQ_AVAL_PG_V1)
-
-#define BIT_SHIFT_PUBQ_V1 0
-#define BIT_MASK_PUBQ_V1 0xfff
-#define BIT_PUBQ_V1(x) (((x) & BIT_MASK_PUBQ_V1) << BIT_SHIFT_PUBQ_V1)
-#define BIT_GET_PUBQ_V1(x) (((x) >> BIT_SHIFT_PUBQ_V1) & BIT_MASK_PUBQ_V1)
-
-/* 2 REG_H2C_HEAD (Offset 0x0244) */
-
-#define BIT_SHIFT_H2C_HEAD 0
-#define BIT_MASK_H2C_HEAD 0x3ffff
-#define BIT_H2C_HEAD(x) (((x) & BIT_MASK_H2C_HEAD) << BIT_SHIFT_H2C_HEAD)
-#define BIT_GET_H2C_HEAD(x) (((x) >> BIT_SHIFT_H2C_HEAD) & BIT_MASK_H2C_HEAD)
-
-/* 2 REG_H2C_TAIL (Offset 0x0248) */
-
-#define BIT_SHIFT_H2C_TAIL 0
-#define BIT_MASK_H2C_TAIL 0x3ffff
-#define BIT_H2C_TAIL(x) (((x) & BIT_MASK_H2C_TAIL) << BIT_SHIFT_H2C_TAIL)
-#define BIT_GET_H2C_TAIL(x) (((x) >> BIT_SHIFT_H2C_TAIL) & BIT_MASK_H2C_TAIL)
-
-/* 2 REG_H2C_READ_ADDR (Offset 0x024C) */
-
-#define BIT_SHIFT_H2C_READ_ADDR 0
-#define BIT_MASK_H2C_READ_ADDR 0x3ffff
-#define BIT_H2C_READ_ADDR(x) \
- (((x) & BIT_MASK_H2C_READ_ADDR) << BIT_SHIFT_H2C_READ_ADDR)
-#define BIT_GET_H2C_READ_ADDR(x) \
- (((x) >> BIT_SHIFT_H2C_READ_ADDR) & BIT_MASK_H2C_READ_ADDR)
-
-/* 2 REG_H2C_WR_ADDR (Offset 0x0250) */
-
-#define BIT_SHIFT_H2C_WR_ADDR 0
-#define BIT_MASK_H2C_WR_ADDR 0x3ffff
-#define BIT_H2C_WR_ADDR(x) \
- (((x) & BIT_MASK_H2C_WR_ADDR) << BIT_SHIFT_H2C_WR_ADDR)
-#define BIT_GET_H2C_WR_ADDR(x) \
- (((x) >> BIT_SHIFT_H2C_WR_ADDR) & BIT_MASK_H2C_WR_ADDR)
-
-/* 2 REG_H2C_INFO (Offset 0x0254) */
-
-#define BIT_H2C_SPACE_VLD BIT(3)
-#define BIT_H2C_WR_ADDR_RST BIT(2)
-
-#define BIT_SHIFT_H2C_LEN_SEL 0
-#define BIT_MASK_H2C_LEN_SEL 0x3
-#define BIT_H2C_LEN_SEL(x) \
- (((x) & BIT_MASK_H2C_LEN_SEL) << BIT_SHIFT_H2C_LEN_SEL)
-#define BIT_GET_H2C_LEN_SEL(x) \
- (((x) >> BIT_SHIFT_H2C_LEN_SEL) & BIT_MASK_H2C_LEN_SEL)
-
-/* 2 REG_RXDMA_AGG_PG_TH (Offset 0x0280) */
-
-#define BIT_SHIFT_RXDMA_AGG_OLD_MOD 24
-#define BIT_MASK_RXDMA_AGG_OLD_MOD 0xff
-#define BIT_RXDMA_AGG_OLD_MOD(x) \
- (((x) & BIT_MASK_RXDMA_AGG_OLD_MOD) << BIT_SHIFT_RXDMA_AGG_OLD_MOD)
-#define BIT_GET_RXDMA_AGG_OLD_MOD(x) \
- (((x) >> BIT_SHIFT_RXDMA_AGG_OLD_MOD) & BIT_MASK_RXDMA_AGG_OLD_MOD)
-
-/* 2 REG_RXDMA_AGG_PG_TH (Offset 0x0280) */
-
-#define BIT_SHIFT_PKT_NUM_WOL 16
-#define BIT_MASK_PKT_NUM_WOL 0xff
-#define BIT_PKT_NUM_WOL(x) \
- (((x) & BIT_MASK_PKT_NUM_WOL) << BIT_SHIFT_PKT_NUM_WOL)
-#define BIT_GET_PKT_NUM_WOL(x) \
- (((x) >> BIT_SHIFT_PKT_NUM_WOL) & BIT_MASK_PKT_NUM_WOL)
-
-/* 2 REG_RXDMA_AGG_PG_TH (Offset 0x0280) */
-
-#define BIT_SHIFT_DMA_AGG_TO 8
-#define BIT_MASK_DMA_AGG_TO 0xf
-#define BIT_DMA_AGG_TO(x) (((x) & BIT_MASK_DMA_AGG_TO) << BIT_SHIFT_DMA_AGG_TO)
-#define BIT_GET_DMA_AGG_TO(x) \
- (((x) >> BIT_SHIFT_DMA_AGG_TO) & BIT_MASK_DMA_AGG_TO)
-
-/* 2 REG_RXDMA_AGG_PG_TH (Offset 0x0280) */
-
-#define BIT_SHIFT_RXDMA_AGG_PG_TH_V1 0
-#define BIT_MASK_RXDMA_AGG_PG_TH_V1 0xf
-#define BIT_RXDMA_AGG_PG_TH_V1(x) \
- (((x) & BIT_MASK_RXDMA_AGG_PG_TH_V1) << BIT_SHIFT_RXDMA_AGG_PG_TH_V1)
-#define BIT_GET_RXDMA_AGG_PG_TH_V1(x) \
- (((x) >> BIT_SHIFT_RXDMA_AGG_PG_TH_V1) & BIT_MASK_RXDMA_AGG_PG_TH_V1)
-
-/* 2 REG_RXPKT_NUM (Offset 0x0284) */
-
-#define BIT_SHIFT_RXPKT_NUM 24
-#define BIT_MASK_RXPKT_NUM 0xff
-#define BIT_RXPKT_NUM(x) (((x) & BIT_MASK_RXPKT_NUM) << BIT_SHIFT_RXPKT_NUM)
-#define BIT_GET_RXPKT_NUM(x) (((x) >> BIT_SHIFT_RXPKT_NUM) & BIT_MASK_RXPKT_NUM)
-
-/* 2 REG_RXPKT_NUM (Offset 0x0284) */
-
-#define BIT_SHIFT_FW_UPD_RDPTR19_TO_16 20
-#define BIT_MASK_FW_UPD_RDPTR19_TO_16 0xf
-#define BIT_FW_UPD_RDPTR19_TO_16(x) \
- (((x) & BIT_MASK_FW_UPD_RDPTR19_TO_16) \
- << BIT_SHIFT_FW_UPD_RDPTR19_TO_16)
-#define BIT_GET_FW_UPD_RDPTR19_TO_16(x) \
- (((x) >> BIT_SHIFT_FW_UPD_RDPTR19_TO_16) & \
- BIT_MASK_FW_UPD_RDPTR19_TO_16)
-
-/* 2 REG_RXPKT_NUM (Offset 0x0284) */
-
-#define BIT_RXDMA_REQ BIT(19)
-#define BIT_RW_RELEASE_EN BIT(18)
-#define BIT_RXDMA_IDLE BIT(17)
-#define BIT_RXPKT_RELEASE_POLL BIT(16)
-
-#define BIT_SHIFT_FW_UPD_RDPTR 0
-#define BIT_MASK_FW_UPD_RDPTR 0xffff
-#define BIT_FW_UPD_RDPTR(x) \
- (((x) & BIT_MASK_FW_UPD_RDPTR) << BIT_SHIFT_FW_UPD_RDPTR)
-#define BIT_GET_FW_UPD_RDPTR(x) \
- (((x) >> BIT_SHIFT_FW_UPD_RDPTR) & BIT_MASK_FW_UPD_RDPTR)
-
-/* 2 REG_RXDMA_STATUS (Offset 0x0288) */
-
-#define BIT_C2H_PKT_OVF BIT(7)
-
-/* 2 REG_RXDMA_STATUS (Offset 0x0288) */
-
-#define BIT_AGG_CONFGI_ISSUE BIT(6)
-
-/* 2 REG_RXDMA_STATUS (Offset 0x0288) */
-
-#define BIT_FW_POLL_ISSUE BIT(5)
-#define BIT_RX_DATA_UDN BIT(4)
-#define BIT_RX_SFF_UDN BIT(3)
-#define BIT_RX_SFF_OVF BIT(2)
-
-/* 2 REG_RXDMA_STATUS (Offset 0x0288) */
-
-#define BIT_RXPKT_OVF BIT(0)
-
-/* 2 REG_RXDMA_DPR (Offset 0x028C) */
-
-#define BIT_SHIFT_RDE_DEBUG 0
-#define BIT_MASK_RDE_DEBUG 0xffffffffL
-#define BIT_RDE_DEBUG(x) (((x) & BIT_MASK_RDE_DEBUG) << BIT_SHIFT_RDE_DEBUG)
-#define BIT_GET_RDE_DEBUG(x) (((x) >> BIT_SHIFT_RDE_DEBUG) & BIT_MASK_RDE_DEBUG)
-
-/* 2 REG_RXDMA_MODE (Offset 0x0290) */
-
-#define BIT_SHIFT_PKTNUM_TH_V2 24
-#define BIT_MASK_PKTNUM_TH_V2 0x1f
-#define BIT_PKTNUM_TH_V2(x) \
- (((x) & BIT_MASK_PKTNUM_TH_V2) << BIT_SHIFT_PKTNUM_TH_V2)
-#define BIT_GET_PKTNUM_TH_V2(x) \
- (((x) >> BIT_SHIFT_PKTNUM_TH_V2) & BIT_MASK_PKTNUM_TH_V2)
-
-#define BIT_TXBA_BREAK_USBAGG BIT(23)
-
-#define BIT_SHIFT_PKTLEN_PARA 16
-#define BIT_MASK_PKTLEN_PARA 0x7
-#define BIT_PKTLEN_PARA(x) \
- (((x) & BIT_MASK_PKTLEN_PARA) << BIT_SHIFT_PKTLEN_PARA)
-#define BIT_GET_PKTLEN_PARA(x) \
- (((x) >> BIT_SHIFT_PKTLEN_PARA) & BIT_MASK_PKTLEN_PARA)
-
-/* 2 REG_RXDMA_MODE (Offset 0x0290) */
-
-#define BIT_SHIFT_BURST_SIZE 4
-#define BIT_MASK_BURST_SIZE 0x3
-#define BIT_BURST_SIZE(x) (((x) & BIT_MASK_BURST_SIZE) << BIT_SHIFT_BURST_SIZE)
-#define BIT_GET_BURST_SIZE(x) \
- (((x) >> BIT_SHIFT_BURST_SIZE) & BIT_MASK_BURST_SIZE)
-
-#define BIT_SHIFT_BURST_CNT 2
-#define BIT_MASK_BURST_CNT 0x3
-#define BIT_BURST_CNT(x) (((x) & BIT_MASK_BURST_CNT) << BIT_SHIFT_BURST_CNT)
-#define BIT_GET_BURST_CNT(x) (((x) >> BIT_SHIFT_BURST_CNT) & BIT_MASK_BURST_CNT)
-
-/* 2 REG_RXDMA_MODE (Offset 0x0290) */
-
-#define BIT_DMA_MODE BIT(1)
-
-/* 2 REG_C2H_PKT (Offset 0x0294) */
-
-#define BIT_SHIFT_R_C2H_STR_ADDR_16_TO_19 24
-#define BIT_MASK_R_C2H_STR_ADDR_16_TO_19 0xf
-#define BIT_R_C2H_STR_ADDR_16_TO_19(x) \
- (((x) & BIT_MASK_R_C2H_STR_ADDR_16_TO_19) \
- << BIT_SHIFT_R_C2H_STR_ADDR_16_TO_19)
-#define BIT_GET_R_C2H_STR_ADDR_16_TO_19(x) \
- (((x) >> BIT_SHIFT_R_C2H_STR_ADDR_16_TO_19) & \
- BIT_MASK_R_C2H_STR_ADDR_16_TO_19)
-
-#define BIT_SHIFT_MDIO_PHY_ADDR 24
-#define BIT_MASK_MDIO_PHY_ADDR 0x1f
-#define BIT_MDIO_PHY_ADDR(x) \
- (((x) & BIT_MASK_MDIO_PHY_ADDR) << BIT_SHIFT_MDIO_PHY_ADDR)
-#define BIT_GET_MDIO_PHY_ADDR(x) \
- (((x) >> BIT_SHIFT_MDIO_PHY_ADDR) & BIT_MASK_MDIO_PHY_ADDR)
-
-/* 2 REG_C2H_PKT (Offset 0x0294) */
-
-#define BIT_R_C2H_PKT_REQ BIT(16)
-#define BIT_RX_CLOSE_EN BIT(15)
-#define BIT_STOP_BCNQ BIT(14)
-#define BIT_STOP_MGQ BIT(13)
-#define BIT_STOP_VOQ BIT(12)
-#define BIT_STOP_VIQ BIT(11)
-#define BIT_STOP_BEQ BIT(10)
-#define BIT_STOP_BKQ BIT(9)
-#define BIT_STOP_RXQ BIT(8)
-#define BIT_STOP_HI7Q BIT(7)
-#define BIT_STOP_HI6Q BIT(6)
-#define BIT_STOP_HI5Q BIT(5)
-#define BIT_STOP_HI4Q BIT(4)
-#define BIT_STOP_HI3Q BIT(3)
-#define BIT_STOP_HI2Q BIT(2)
-#define BIT_STOP_HI1Q BIT(1)
-
-#define BIT_SHIFT_R_C2H_STR_ADDR 0
-#define BIT_MASK_R_C2H_STR_ADDR 0xffff
-#define BIT_R_C2H_STR_ADDR(x) \
- (((x) & BIT_MASK_R_C2H_STR_ADDR) << BIT_SHIFT_R_C2H_STR_ADDR)
-#define BIT_GET_R_C2H_STR_ADDR(x) \
- (((x) >> BIT_SHIFT_R_C2H_STR_ADDR) & BIT_MASK_R_C2H_STR_ADDR)
-
-#define BIT_STOP_HI0Q BIT(0)
-
-/* 2 REG_FWFF_C2H (Offset 0x0298) */
-
-#define BIT_SHIFT_C2H_DMA_ADDR 0
-#define BIT_MASK_C2H_DMA_ADDR 0x3ffff
-#define BIT_C2H_DMA_ADDR(x) \
- (((x) & BIT_MASK_C2H_DMA_ADDR) << BIT_SHIFT_C2H_DMA_ADDR)
-#define BIT_GET_C2H_DMA_ADDR(x) \
- (((x) >> BIT_SHIFT_C2H_DMA_ADDR) & BIT_MASK_C2H_DMA_ADDR)
-
-/* 2 REG_FWFF_CTRL (Offset 0x029C) */
-
-#define BIT_FWFF_DMAPKT_REQ BIT(31)
-
-#define BIT_SHIFT_FWFF_DMA_PKT_NUM 16
-#define BIT_MASK_FWFF_DMA_PKT_NUM 0xff
-#define BIT_FWFF_DMA_PKT_NUM(x) \
- (((x) & BIT_MASK_FWFF_DMA_PKT_NUM) << BIT_SHIFT_FWFF_DMA_PKT_NUM)
-#define BIT_GET_FWFF_DMA_PKT_NUM(x) \
- (((x) >> BIT_SHIFT_FWFF_DMA_PKT_NUM) & BIT_MASK_FWFF_DMA_PKT_NUM)
-
-#define BIT_SHIFT_FWFF_STR_ADDR 0
-#define BIT_MASK_FWFF_STR_ADDR 0xffff
-#define BIT_FWFF_STR_ADDR(x) \
- (((x) & BIT_MASK_FWFF_STR_ADDR) << BIT_SHIFT_FWFF_STR_ADDR)
-#define BIT_GET_FWFF_STR_ADDR(x) \
- (((x) >> BIT_SHIFT_FWFF_STR_ADDR) & BIT_MASK_FWFF_STR_ADDR)
-
-/* 2 REG_FWFF_PKT_INFO (Offset 0x02A0) */
-
-#define BIT_SHIFT_FWFF_PKT_QUEUED 16
-#define BIT_MASK_FWFF_PKT_QUEUED 0xff
-#define BIT_FWFF_PKT_QUEUED(x) \
- (((x) & BIT_MASK_FWFF_PKT_QUEUED) << BIT_SHIFT_FWFF_PKT_QUEUED)
-#define BIT_GET_FWFF_PKT_QUEUED(x) \
- (((x) >> BIT_SHIFT_FWFF_PKT_QUEUED) & BIT_MASK_FWFF_PKT_QUEUED)
-
-/* 2 REG_FWFF_PKT_INFO (Offset 0x02A0) */
-
-#define BIT_SHIFT_FWFF_PKT_STR_ADDR 0
-#define BIT_MASK_FWFF_PKT_STR_ADDR 0xffff
-#define BIT_FWFF_PKT_STR_ADDR(x) \
- (((x) & BIT_MASK_FWFF_PKT_STR_ADDR) << BIT_SHIFT_FWFF_PKT_STR_ADDR)
-#define BIT_GET_FWFF_PKT_STR_ADDR(x) \
- (((x) >> BIT_SHIFT_FWFF_PKT_STR_ADDR) & BIT_MASK_FWFF_PKT_STR_ADDR)
-
-/* 2 REG_PCIE_CTRL (Offset 0x0300) */
-
-#define BIT_PCIEIO_PERSTB_SEL BIT(31)
-
-/* 2 REG_PCIE_CTRL (Offset 0x0300) */
-
-#define BIT_SHIFT_PCIE_MAX_RXDMA 28
-#define BIT_MASK_PCIE_MAX_RXDMA 0x7
-#define BIT_PCIE_MAX_RXDMA(x) \
- (((x) & BIT_MASK_PCIE_MAX_RXDMA) << BIT_SHIFT_PCIE_MAX_RXDMA)
-#define BIT_GET_PCIE_MAX_RXDMA(x) \
- (((x) >> BIT_SHIFT_PCIE_MAX_RXDMA) & BIT_MASK_PCIE_MAX_RXDMA)
-
-/* 2 REG_PCIE_CTRL (Offset 0x0300) */
-
-#define BIT_SHIFT_PCIE_MAX_TXDMA 24
-#define BIT_MASK_PCIE_MAX_TXDMA 0x7
-#define BIT_PCIE_MAX_TXDMA(x) \
- (((x) & BIT_MASK_PCIE_MAX_TXDMA) << BIT_SHIFT_PCIE_MAX_TXDMA)
-#define BIT_GET_PCIE_MAX_TXDMA(x) \
- (((x) >> BIT_SHIFT_PCIE_MAX_TXDMA) & BIT_MASK_PCIE_MAX_TXDMA)
-
-/* 2 REG_PCIE_CTRL (Offset 0x0300) */
-
-#define BIT_PCIE_RST_TRXDMA_INTF BIT(20)
-
-/* 2 REG_PCIE_CTRL (Offset 0x0300) */
-
-#define BIT_PCIE_EN_SWENT_L23 BIT(17)
-
-/* 2 REG_PCIE_CTRL (Offset 0x0300) */
-
-#define BIT_PCIE_EN_HWEXT_L1 BIT(16)
-
-/* 2 REG_INT_MIG (Offset 0x0304) */
-
-#define BIT_SHIFT_TXTTIMER_MATCH_NUM 28
-#define BIT_MASK_TXTTIMER_MATCH_NUM 0xf
-#define BIT_TXTTIMER_MATCH_NUM(x) \
- (((x) & BIT_MASK_TXTTIMER_MATCH_NUM) << BIT_SHIFT_TXTTIMER_MATCH_NUM)
-#define BIT_GET_TXTTIMER_MATCH_NUM(x) \
- (((x) >> BIT_SHIFT_TXTTIMER_MATCH_NUM) & BIT_MASK_TXTTIMER_MATCH_NUM)
-
-#define BIT_SHIFT_TXPKT_NUM_MATCH 24
-#define BIT_MASK_TXPKT_NUM_MATCH 0xf
-#define BIT_TXPKT_NUM_MATCH(x) \
- (((x) & BIT_MASK_TXPKT_NUM_MATCH) << BIT_SHIFT_TXPKT_NUM_MATCH)
-#define BIT_GET_TXPKT_NUM_MATCH(x) \
- (((x) >> BIT_SHIFT_TXPKT_NUM_MATCH) & BIT_MASK_TXPKT_NUM_MATCH)
-
-#define BIT_SHIFT_RXTTIMER_MATCH_NUM 20
-#define BIT_MASK_RXTTIMER_MATCH_NUM 0xf
-#define BIT_RXTTIMER_MATCH_NUM(x) \
- (((x) & BIT_MASK_RXTTIMER_MATCH_NUM) << BIT_SHIFT_RXTTIMER_MATCH_NUM)
-#define BIT_GET_RXTTIMER_MATCH_NUM(x) \
- (((x) >> BIT_SHIFT_RXTTIMER_MATCH_NUM) & BIT_MASK_RXTTIMER_MATCH_NUM)
-
-#define BIT_SHIFT_RXPKT_NUM_MATCH 16
-#define BIT_MASK_RXPKT_NUM_MATCH 0xf
-#define BIT_RXPKT_NUM_MATCH(x) \
- (((x) & BIT_MASK_RXPKT_NUM_MATCH) << BIT_SHIFT_RXPKT_NUM_MATCH)
-#define BIT_GET_RXPKT_NUM_MATCH(x) \
- (((x) >> BIT_SHIFT_RXPKT_NUM_MATCH) & BIT_MASK_RXPKT_NUM_MATCH)
-
-#define BIT_SHIFT_MIGRATE_TIMER 0
-#define BIT_MASK_MIGRATE_TIMER 0xffff
-#define BIT_MIGRATE_TIMER(x) \
- (((x) & BIT_MASK_MIGRATE_TIMER) << BIT_SHIFT_MIGRATE_TIMER)
-#define BIT_GET_MIGRATE_TIMER(x) \
- (((x) >> BIT_SHIFT_MIGRATE_TIMER) & BIT_MASK_MIGRATE_TIMER)
-
-/* 2 REG_BCNQ_TXBD_DESA (Offset 0x0308) */
-
-#define BIT_SHIFT_BCNQ_TXBD_DESA 0
-#define BIT_MASK_BCNQ_TXBD_DESA 0xffffffffffffffffL
-#define BIT_BCNQ_TXBD_DESA(x) \
- (((x) & BIT_MASK_BCNQ_TXBD_DESA) << BIT_SHIFT_BCNQ_TXBD_DESA)
-#define BIT_GET_BCNQ_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_BCNQ_TXBD_DESA) & BIT_MASK_BCNQ_TXBD_DESA)
-
-/* 2 REG_MGQ_TXBD_DESA (Offset 0x0310) */
-
-#define BIT_SHIFT_MGQ_TXBD_DESA 0
-#define BIT_MASK_MGQ_TXBD_DESA 0xffffffffffffffffL
-#define BIT_MGQ_TXBD_DESA(x) \
- (((x) & BIT_MASK_MGQ_TXBD_DESA) << BIT_SHIFT_MGQ_TXBD_DESA)
-#define BIT_GET_MGQ_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_MGQ_TXBD_DESA) & BIT_MASK_MGQ_TXBD_DESA)
-
-/* 2 REG_VOQ_TXBD_DESA (Offset 0x0318) */
-
-#define BIT_SHIFT_VOQ_TXBD_DESA 0
-#define BIT_MASK_VOQ_TXBD_DESA 0xffffffffffffffffL
-#define BIT_VOQ_TXBD_DESA(x) \
- (((x) & BIT_MASK_VOQ_TXBD_DESA) << BIT_SHIFT_VOQ_TXBD_DESA)
-#define BIT_GET_VOQ_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_VOQ_TXBD_DESA) & BIT_MASK_VOQ_TXBD_DESA)
-
-/* 2 REG_VIQ_TXBD_DESA (Offset 0x0320) */
-
-#define BIT_SHIFT_VIQ_TXBD_DESA 0
-#define BIT_MASK_VIQ_TXBD_DESA 0xffffffffffffffffL
-#define BIT_VIQ_TXBD_DESA(x) \
- (((x) & BIT_MASK_VIQ_TXBD_DESA) << BIT_SHIFT_VIQ_TXBD_DESA)
-#define BIT_GET_VIQ_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_VIQ_TXBD_DESA) & BIT_MASK_VIQ_TXBD_DESA)
-
-/* 2 REG_BEQ_TXBD_DESA (Offset 0x0328) */
-
-#define BIT_SHIFT_BEQ_TXBD_DESA 0
-#define BIT_MASK_BEQ_TXBD_DESA 0xffffffffffffffffL
-#define BIT_BEQ_TXBD_DESA(x) \
- (((x) & BIT_MASK_BEQ_TXBD_DESA) << BIT_SHIFT_BEQ_TXBD_DESA)
-#define BIT_GET_BEQ_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_BEQ_TXBD_DESA) & BIT_MASK_BEQ_TXBD_DESA)
-
-/* 2 REG_BKQ_TXBD_DESA (Offset 0x0330) */
-
-#define BIT_SHIFT_BKQ_TXBD_DESA 0
-#define BIT_MASK_BKQ_TXBD_DESA 0xffffffffffffffffL
-#define BIT_BKQ_TXBD_DESA(x) \
- (((x) & BIT_MASK_BKQ_TXBD_DESA) << BIT_SHIFT_BKQ_TXBD_DESA)
-#define BIT_GET_BKQ_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_BKQ_TXBD_DESA) & BIT_MASK_BKQ_TXBD_DESA)
-
-/* 2 REG_RXQ_RXBD_DESA (Offset 0x0338) */
-
-#define BIT_SHIFT_RXQ_RXBD_DESA 0
-#define BIT_MASK_RXQ_RXBD_DESA 0xffffffffffffffffL
-#define BIT_RXQ_RXBD_DESA(x) \
- (((x) & BIT_MASK_RXQ_RXBD_DESA) << BIT_SHIFT_RXQ_RXBD_DESA)
-#define BIT_GET_RXQ_RXBD_DESA(x) \
- (((x) >> BIT_SHIFT_RXQ_RXBD_DESA) & BIT_MASK_RXQ_RXBD_DESA)
-
-/* 2 REG_HI0Q_TXBD_DESA (Offset 0x0340) */
-
-#define BIT_SHIFT_HI0Q_TXBD_DESA 0
-#define BIT_MASK_HI0Q_TXBD_DESA 0xffffffffffffffffL
-#define BIT_HI0Q_TXBD_DESA(x) \
- (((x) & BIT_MASK_HI0Q_TXBD_DESA) << BIT_SHIFT_HI0Q_TXBD_DESA)
-#define BIT_GET_HI0Q_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_HI0Q_TXBD_DESA) & BIT_MASK_HI0Q_TXBD_DESA)
-
-/* 2 REG_HI1Q_TXBD_DESA (Offset 0x0348) */
-
-#define BIT_SHIFT_HI1Q_TXBD_DESA 0
-#define BIT_MASK_HI1Q_TXBD_DESA 0xffffffffffffffffL
-#define BIT_HI1Q_TXBD_DESA(x) \
- (((x) & BIT_MASK_HI1Q_TXBD_DESA) << BIT_SHIFT_HI1Q_TXBD_DESA)
-#define BIT_GET_HI1Q_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_HI1Q_TXBD_DESA) & BIT_MASK_HI1Q_TXBD_DESA)
-
-/* 2 REG_HI2Q_TXBD_DESA (Offset 0x0350) */
-
-#define BIT_SHIFT_HI2Q_TXBD_DESA 0
-#define BIT_MASK_HI2Q_TXBD_DESA 0xffffffffffffffffL
-#define BIT_HI2Q_TXBD_DESA(x) \
- (((x) & BIT_MASK_HI2Q_TXBD_DESA) << BIT_SHIFT_HI2Q_TXBD_DESA)
-#define BIT_GET_HI2Q_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_HI2Q_TXBD_DESA) & BIT_MASK_HI2Q_TXBD_DESA)
-
-/* 2 REG_HI3Q_TXBD_DESA (Offset 0x0358) */
-
-#define BIT_SHIFT_HI3Q_TXBD_DESA 0
-#define BIT_MASK_HI3Q_TXBD_DESA 0xffffffffffffffffL
-#define BIT_HI3Q_TXBD_DESA(x) \
- (((x) & BIT_MASK_HI3Q_TXBD_DESA) << BIT_SHIFT_HI3Q_TXBD_DESA)
-#define BIT_GET_HI3Q_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_HI3Q_TXBD_DESA) & BIT_MASK_HI3Q_TXBD_DESA)
-
-/* 2 REG_HI4Q_TXBD_DESA (Offset 0x0360) */
-
-#define BIT_SHIFT_HI4Q_TXBD_DESA 0
-#define BIT_MASK_HI4Q_TXBD_DESA 0xffffffffffffffffL
-#define BIT_HI4Q_TXBD_DESA(x) \
- (((x) & BIT_MASK_HI4Q_TXBD_DESA) << BIT_SHIFT_HI4Q_TXBD_DESA)
-#define BIT_GET_HI4Q_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_HI4Q_TXBD_DESA) & BIT_MASK_HI4Q_TXBD_DESA)
-
-/* 2 REG_HI5Q_TXBD_DESA (Offset 0x0368) */
-
-#define BIT_SHIFT_HI5Q_TXBD_DESA 0
-#define BIT_MASK_HI5Q_TXBD_DESA 0xffffffffffffffffL
-#define BIT_HI5Q_TXBD_DESA(x) \
- (((x) & BIT_MASK_HI5Q_TXBD_DESA) << BIT_SHIFT_HI5Q_TXBD_DESA)
-#define BIT_GET_HI5Q_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_HI5Q_TXBD_DESA) & BIT_MASK_HI5Q_TXBD_DESA)
-
-/* 2 REG_HI6Q_TXBD_DESA (Offset 0x0370) */
-
-#define BIT_SHIFT_HI6Q_TXBD_DESA 0
-#define BIT_MASK_HI6Q_TXBD_DESA 0xffffffffffffffffL
-#define BIT_HI6Q_TXBD_DESA(x) \
- (((x) & BIT_MASK_HI6Q_TXBD_DESA) << BIT_SHIFT_HI6Q_TXBD_DESA)
-#define BIT_GET_HI6Q_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_HI6Q_TXBD_DESA) & BIT_MASK_HI6Q_TXBD_DESA)
-
-/* 2 REG_HI7Q_TXBD_DESA (Offset 0x0378) */
-
-#define BIT_SHIFT_HI7Q_TXBD_DESA 0
-#define BIT_MASK_HI7Q_TXBD_DESA 0xffffffffffffffffL
-#define BIT_HI7Q_TXBD_DESA(x) \
- (((x) & BIT_MASK_HI7Q_TXBD_DESA) << BIT_SHIFT_HI7Q_TXBD_DESA)
-#define BIT_GET_HI7Q_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_HI7Q_TXBD_DESA) & BIT_MASK_HI7Q_TXBD_DESA)
-
-/* 2 REG_MGQ_TXBD_NUM (Offset 0x0380) */
-
-#define BIT_PCIE_MGQ_FLAG BIT(14)
-
-/* 2 REG_MGQ_TXBD_NUM (Offset 0x0380) */
-
-#define BIT_SHIFT_MGQ_DESC_MODE 12
-#define BIT_MASK_MGQ_DESC_MODE 0x3
-#define BIT_MGQ_DESC_MODE(x) \
- (((x) & BIT_MASK_MGQ_DESC_MODE) << BIT_SHIFT_MGQ_DESC_MODE)
-#define BIT_GET_MGQ_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_MGQ_DESC_MODE) & BIT_MASK_MGQ_DESC_MODE)
-
-#define BIT_SHIFT_MGQ_DESC_NUM 0
-#define BIT_MASK_MGQ_DESC_NUM 0xfff
-#define BIT_MGQ_DESC_NUM(x) \
- (((x) & BIT_MASK_MGQ_DESC_NUM) << BIT_SHIFT_MGQ_DESC_NUM)
-#define BIT_GET_MGQ_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_MGQ_DESC_NUM) & BIT_MASK_MGQ_DESC_NUM)
-
-/* 2 REG_RX_RXBD_NUM (Offset 0x0382) */
-
-#define BIT_SYS_32_64 BIT(15)
-
-#define BIT_SHIFT_BCNQ_DESC_MODE 13
-#define BIT_MASK_BCNQ_DESC_MODE 0x3
-#define BIT_BCNQ_DESC_MODE(x) \
- (((x) & BIT_MASK_BCNQ_DESC_MODE) << BIT_SHIFT_BCNQ_DESC_MODE)
-#define BIT_GET_BCNQ_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_BCNQ_DESC_MODE) & BIT_MASK_BCNQ_DESC_MODE)
-
-/* 2 REG_RX_RXBD_NUM (Offset 0x0382) */
-
-#define BIT_PCIE_BCNQ_FLAG BIT(12)
-
-/* 2 REG_RX_RXBD_NUM (Offset 0x0382) */
-
-#define BIT_SHIFT_RXQ_DESC_NUM 0
-#define BIT_MASK_RXQ_DESC_NUM 0xfff
-#define BIT_RXQ_DESC_NUM(x) \
- (((x) & BIT_MASK_RXQ_DESC_NUM) << BIT_SHIFT_RXQ_DESC_NUM)
-#define BIT_GET_RXQ_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_RXQ_DESC_NUM) & BIT_MASK_RXQ_DESC_NUM)
-
-/* 2 REG_VOQ_TXBD_NUM (Offset 0x0384) */
-
-#define BIT_PCIE_VOQ_FLAG BIT(14)
-
-/* 2 REG_VOQ_TXBD_NUM (Offset 0x0384) */
-
-#define BIT_SHIFT_VOQ_DESC_MODE 12
-#define BIT_MASK_VOQ_DESC_MODE 0x3
-#define BIT_VOQ_DESC_MODE(x) \
- (((x) & BIT_MASK_VOQ_DESC_MODE) << BIT_SHIFT_VOQ_DESC_MODE)
-#define BIT_GET_VOQ_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_VOQ_DESC_MODE) & BIT_MASK_VOQ_DESC_MODE)
-
-#define BIT_SHIFT_VOQ_DESC_NUM 0
-#define BIT_MASK_VOQ_DESC_NUM 0xfff
-#define BIT_VOQ_DESC_NUM(x) \
- (((x) & BIT_MASK_VOQ_DESC_NUM) << BIT_SHIFT_VOQ_DESC_NUM)
-#define BIT_GET_VOQ_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_VOQ_DESC_NUM) & BIT_MASK_VOQ_DESC_NUM)
-
-/* 2 REG_VIQ_TXBD_NUM (Offset 0x0386) */
-
-#define BIT_PCIE_VIQ_FLAG BIT(14)
-
-/* 2 REG_VIQ_TXBD_NUM (Offset 0x0386) */
-
-#define BIT_SHIFT_VIQ_DESC_MODE 12
-#define BIT_MASK_VIQ_DESC_MODE 0x3
-#define BIT_VIQ_DESC_MODE(x) \
- (((x) & BIT_MASK_VIQ_DESC_MODE) << BIT_SHIFT_VIQ_DESC_MODE)
-#define BIT_GET_VIQ_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_VIQ_DESC_MODE) & BIT_MASK_VIQ_DESC_MODE)
-
-#define BIT_SHIFT_VIQ_DESC_NUM 0
-#define BIT_MASK_VIQ_DESC_NUM 0xfff
-#define BIT_VIQ_DESC_NUM(x) \
- (((x) & BIT_MASK_VIQ_DESC_NUM) << BIT_SHIFT_VIQ_DESC_NUM)
-#define BIT_GET_VIQ_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_VIQ_DESC_NUM) & BIT_MASK_VIQ_DESC_NUM)
-
-/* 2 REG_BEQ_TXBD_NUM (Offset 0x0388) */
-
-#define BIT_PCIE_BEQ_FLAG BIT(14)
-
-/* 2 REG_BEQ_TXBD_NUM (Offset 0x0388) */
-
-#define BIT_SHIFT_BEQ_DESC_MODE 12
-#define BIT_MASK_BEQ_DESC_MODE 0x3
-#define BIT_BEQ_DESC_MODE(x) \
- (((x) & BIT_MASK_BEQ_DESC_MODE) << BIT_SHIFT_BEQ_DESC_MODE)
-#define BIT_GET_BEQ_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_BEQ_DESC_MODE) & BIT_MASK_BEQ_DESC_MODE)
-
-#define BIT_SHIFT_BEQ_DESC_NUM 0
-#define BIT_MASK_BEQ_DESC_NUM 0xfff
-#define BIT_BEQ_DESC_NUM(x) \
- (((x) & BIT_MASK_BEQ_DESC_NUM) << BIT_SHIFT_BEQ_DESC_NUM)
-#define BIT_GET_BEQ_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_BEQ_DESC_NUM) & BIT_MASK_BEQ_DESC_NUM)
-
-/* 2 REG_BKQ_TXBD_NUM (Offset 0x038A) */
-
-#define BIT_PCIE_BKQ_FLAG BIT(14)
-
-/* 2 REG_BKQ_TXBD_NUM (Offset 0x038A) */
-
-#define BIT_SHIFT_BKQ_DESC_MODE 12
-#define BIT_MASK_BKQ_DESC_MODE 0x3
-#define BIT_BKQ_DESC_MODE(x) \
- (((x) & BIT_MASK_BKQ_DESC_MODE) << BIT_SHIFT_BKQ_DESC_MODE)
-#define BIT_GET_BKQ_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_BKQ_DESC_MODE) & BIT_MASK_BKQ_DESC_MODE)
-
-#define BIT_SHIFT_BKQ_DESC_NUM 0
-#define BIT_MASK_BKQ_DESC_NUM 0xfff
-#define BIT_BKQ_DESC_NUM(x) \
- (((x) & BIT_MASK_BKQ_DESC_NUM) << BIT_SHIFT_BKQ_DESC_NUM)
-#define BIT_GET_BKQ_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_BKQ_DESC_NUM) & BIT_MASK_BKQ_DESC_NUM)
-
-/* 2 REG_HI0Q_TXBD_NUM (Offset 0x038C) */
-
-#define BIT_HI0Q_FLAG BIT(14)
-
-#define BIT_SHIFT_HI0Q_DESC_MODE 12
-#define BIT_MASK_HI0Q_DESC_MODE 0x3
-#define BIT_HI0Q_DESC_MODE(x) \
- (((x) & BIT_MASK_HI0Q_DESC_MODE) << BIT_SHIFT_HI0Q_DESC_MODE)
-#define BIT_GET_HI0Q_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_HI0Q_DESC_MODE) & BIT_MASK_HI0Q_DESC_MODE)
-
-#define BIT_SHIFT_HI0Q_DESC_NUM 0
-#define BIT_MASK_HI0Q_DESC_NUM 0xfff
-#define BIT_HI0Q_DESC_NUM(x) \
- (((x) & BIT_MASK_HI0Q_DESC_NUM) << BIT_SHIFT_HI0Q_DESC_NUM)
-#define BIT_GET_HI0Q_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_HI0Q_DESC_NUM) & BIT_MASK_HI0Q_DESC_NUM)
-
-/* 2 REG_HI1Q_TXBD_NUM (Offset 0x038E) */
-
-#define BIT_HI1Q_FLAG BIT(14)
-
-#define BIT_SHIFT_HI1Q_DESC_MODE 12
-#define BIT_MASK_HI1Q_DESC_MODE 0x3
-#define BIT_HI1Q_DESC_MODE(x) \
- (((x) & BIT_MASK_HI1Q_DESC_MODE) << BIT_SHIFT_HI1Q_DESC_MODE)
-#define BIT_GET_HI1Q_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_HI1Q_DESC_MODE) & BIT_MASK_HI1Q_DESC_MODE)
-
-#define BIT_SHIFT_HI1Q_DESC_NUM 0
-#define BIT_MASK_HI1Q_DESC_NUM 0xfff
-#define BIT_HI1Q_DESC_NUM(x) \
- (((x) & BIT_MASK_HI1Q_DESC_NUM) << BIT_SHIFT_HI1Q_DESC_NUM)
-#define BIT_GET_HI1Q_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_HI1Q_DESC_NUM) & BIT_MASK_HI1Q_DESC_NUM)
-
-/* 2 REG_HI2Q_TXBD_NUM (Offset 0x0390) */
-
-#define BIT_HI2Q_FLAG BIT(14)
-
-#define BIT_SHIFT_HI2Q_DESC_MODE 12
-#define BIT_MASK_HI2Q_DESC_MODE 0x3
-#define BIT_HI2Q_DESC_MODE(x) \
- (((x) & BIT_MASK_HI2Q_DESC_MODE) << BIT_SHIFT_HI2Q_DESC_MODE)
-#define BIT_GET_HI2Q_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_HI2Q_DESC_MODE) & BIT_MASK_HI2Q_DESC_MODE)
-
-#define BIT_SHIFT_HI2Q_DESC_NUM 0
-#define BIT_MASK_HI2Q_DESC_NUM 0xfff
-#define BIT_HI2Q_DESC_NUM(x) \
- (((x) & BIT_MASK_HI2Q_DESC_NUM) << BIT_SHIFT_HI2Q_DESC_NUM)
-#define BIT_GET_HI2Q_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_HI2Q_DESC_NUM) & BIT_MASK_HI2Q_DESC_NUM)
-
-/* 2 REG_HI3Q_TXBD_NUM (Offset 0x0392) */
-
-#define BIT_HI3Q_FLAG BIT(14)
-
-#define BIT_SHIFT_HI3Q_DESC_MODE 12
-#define BIT_MASK_HI3Q_DESC_MODE 0x3
-#define BIT_HI3Q_DESC_MODE(x) \
- (((x) & BIT_MASK_HI3Q_DESC_MODE) << BIT_SHIFT_HI3Q_DESC_MODE)
-#define BIT_GET_HI3Q_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_HI3Q_DESC_MODE) & BIT_MASK_HI3Q_DESC_MODE)
-
-#define BIT_SHIFT_HI3Q_DESC_NUM 0
-#define BIT_MASK_HI3Q_DESC_NUM 0xfff
-#define BIT_HI3Q_DESC_NUM(x) \
- (((x) & BIT_MASK_HI3Q_DESC_NUM) << BIT_SHIFT_HI3Q_DESC_NUM)
-#define BIT_GET_HI3Q_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_HI3Q_DESC_NUM) & BIT_MASK_HI3Q_DESC_NUM)
-
-/* 2 REG_HI4Q_TXBD_NUM (Offset 0x0394) */
-
-#define BIT_HI4Q_FLAG BIT(14)
-
-#define BIT_SHIFT_HI4Q_DESC_MODE 12
-#define BIT_MASK_HI4Q_DESC_MODE 0x3
-#define BIT_HI4Q_DESC_MODE(x) \
- (((x) & BIT_MASK_HI4Q_DESC_MODE) << BIT_SHIFT_HI4Q_DESC_MODE)
-#define BIT_GET_HI4Q_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_HI4Q_DESC_MODE) & BIT_MASK_HI4Q_DESC_MODE)
-
-#define BIT_SHIFT_HI4Q_DESC_NUM 0
-#define BIT_MASK_HI4Q_DESC_NUM 0xfff
-#define BIT_HI4Q_DESC_NUM(x) \
- (((x) & BIT_MASK_HI4Q_DESC_NUM) << BIT_SHIFT_HI4Q_DESC_NUM)
-#define BIT_GET_HI4Q_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_HI4Q_DESC_NUM) & BIT_MASK_HI4Q_DESC_NUM)
-
-/* 2 REG_HI5Q_TXBD_NUM (Offset 0x0396) */
-
-#define BIT_HI5Q_FLAG BIT(14)
-
-#define BIT_SHIFT_HI5Q_DESC_MODE 12
-#define BIT_MASK_HI5Q_DESC_MODE 0x3
-#define BIT_HI5Q_DESC_MODE(x) \
- (((x) & BIT_MASK_HI5Q_DESC_MODE) << BIT_SHIFT_HI5Q_DESC_MODE)
-#define BIT_GET_HI5Q_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_HI5Q_DESC_MODE) & BIT_MASK_HI5Q_DESC_MODE)
-
-#define BIT_SHIFT_HI5Q_DESC_NUM 0
-#define BIT_MASK_HI5Q_DESC_NUM 0xfff
-#define BIT_HI5Q_DESC_NUM(x) \
- (((x) & BIT_MASK_HI5Q_DESC_NUM) << BIT_SHIFT_HI5Q_DESC_NUM)
-#define BIT_GET_HI5Q_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_HI5Q_DESC_NUM) & BIT_MASK_HI5Q_DESC_NUM)
-
-/* 2 REG_HI6Q_TXBD_NUM (Offset 0x0398) */
-
-#define BIT_HI6Q_FLAG BIT(14)
-
-#define BIT_SHIFT_HI6Q_DESC_MODE 12
-#define BIT_MASK_HI6Q_DESC_MODE 0x3
-#define BIT_HI6Q_DESC_MODE(x) \
- (((x) & BIT_MASK_HI6Q_DESC_MODE) << BIT_SHIFT_HI6Q_DESC_MODE)
-#define BIT_GET_HI6Q_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_HI6Q_DESC_MODE) & BIT_MASK_HI6Q_DESC_MODE)
-
-#define BIT_SHIFT_HI6Q_DESC_NUM 0
-#define BIT_MASK_HI6Q_DESC_NUM 0xfff
-#define BIT_HI6Q_DESC_NUM(x) \
- (((x) & BIT_MASK_HI6Q_DESC_NUM) << BIT_SHIFT_HI6Q_DESC_NUM)
-#define BIT_GET_HI6Q_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_HI6Q_DESC_NUM) & BIT_MASK_HI6Q_DESC_NUM)
-
-/* 2 REG_HI7Q_TXBD_NUM (Offset 0x039A) */
-
-#define BIT_HI7Q_FLAG BIT(14)
-
-#define BIT_SHIFT_HI7Q_DESC_MODE 12
-#define BIT_MASK_HI7Q_DESC_MODE 0x3
-#define BIT_HI7Q_DESC_MODE(x) \
- (((x) & BIT_MASK_HI7Q_DESC_MODE) << BIT_SHIFT_HI7Q_DESC_MODE)
-#define BIT_GET_HI7Q_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_HI7Q_DESC_MODE) & BIT_MASK_HI7Q_DESC_MODE)
-
-#define BIT_SHIFT_HI7Q_DESC_NUM 0
-#define BIT_MASK_HI7Q_DESC_NUM 0xfff
-#define BIT_HI7Q_DESC_NUM(x) \
- (((x) & BIT_MASK_HI7Q_DESC_NUM) << BIT_SHIFT_HI7Q_DESC_NUM)
-#define BIT_GET_HI7Q_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_HI7Q_DESC_NUM) & BIT_MASK_HI7Q_DESC_NUM)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_HI7Q_HW_IDX BIT(29)
-#define BIT_CLR_HI6Q_HW_IDX BIT(28)
-#define BIT_CLR_HI5Q_HW_IDX BIT(27)
-#define BIT_CLR_HI4Q_HW_IDX BIT(26)
-#define BIT_CLR_HI3Q_HW_IDX BIT(25)
-#define BIT_CLR_HI2Q_HW_IDX BIT(24)
-#define BIT_CLR_HI1Q_HW_IDX BIT(23)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_HI0Q_HW_IDX BIT(22)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_BKQ_HW_IDX BIT(21)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_BEQ_HW_IDX BIT(20)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_VIQ_HW_IDX BIT(19)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_VOQ_HW_IDX BIT(18)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_MGQ_HW_IDX BIT(17)
-
-/* 2 REG_TSFTIMER_HCI (Offset 0x039C) */
-
-#define BIT_SHIFT_TSFT2_HCI 16
-#define BIT_MASK_TSFT2_HCI 0xffff
-#define BIT_TSFT2_HCI(x) (((x) & BIT_MASK_TSFT2_HCI) << BIT_SHIFT_TSFT2_HCI)
-#define BIT_GET_TSFT2_HCI(x) (((x) >> BIT_SHIFT_TSFT2_HCI) & BIT_MASK_TSFT2_HCI)
-
-#define BIT_CLR_RXQ_HW_IDX BIT(16)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_HI7Q_HOST_IDX BIT(13)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_HI6Q_HOST_IDX BIT(12)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_HI5Q_HOST_IDX BIT(11)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_HI4Q_HOST_IDX BIT(10)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_HI3Q_HOST_IDX BIT(9)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_HI2Q_HOST_IDX BIT(8)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_HI1Q_HOST_IDX BIT(7)
-#define BIT_CLR_HI0Q_HOST_IDX BIT(6)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_BKQ_HOST_IDX BIT(5)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_BEQ_HOST_IDX BIT(4)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_VIQ_HOST_IDX BIT(3)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_VOQ_HOST_IDX BIT(2)
-
-/* 2 REG_BD_RWPTR_CLR (Offset 0x039C) */
-
-#define BIT_CLR_MGQ_HOST_IDX BIT(1)
-
-/* 2 REG_TSFTIMER_HCI (Offset 0x039C) */
-
-#define BIT_SHIFT_TSFT1_HCI 0
-#define BIT_MASK_TSFT1_HCI 0xffff
-#define BIT_TSFT1_HCI(x) (((x) & BIT_MASK_TSFT1_HCI) << BIT_SHIFT_TSFT1_HCI)
-#define BIT_GET_TSFT1_HCI(x) (((x) >> BIT_SHIFT_TSFT1_HCI) & BIT_MASK_TSFT1_HCI)
-
-#define BIT_CLR_RXQ_HOST_IDX BIT(0)
-
-/* 2 REG_VOQ_TXBD_IDX (Offset 0x03A0) */
-
-#define BIT_SHIFT_VOQ_HW_IDX 16
-#define BIT_MASK_VOQ_HW_IDX 0xfff
-#define BIT_VOQ_HW_IDX(x) (((x) & BIT_MASK_VOQ_HW_IDX) << BIT_SHIFT_VOQ_HW_IDX)
-#define BIT_GET_VOQ_HW_IDX(x) \
- (((x) >> BIT_SHIFT_VOQ_HW_IDX) & BIT_MASK_VOQ_HW_IDX)
-
-#define BIT_SHIFT_VOQ_HOST_IDX 0
-#define BIT_MASK_VOQ_HOST_IDX 0xfff
-#define BIT_VOQ_HOST_IDX(x) \
- (((x) & BIT_MASK_VOQ_HOST_IDX) << BIT_SHIFT_VOQ_HOST_IDX)
-#define BIT_GET_VOQ_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_VOQ_HOST_IDX) & BIT_MASK_VOQ_HOST_IDX)
-
-/* 2 REG_VIQ_TXBD_IDX (Offset 0x03A4) */
-
-#define BIT_SHIFT_VIQ_HW_IDX 16
-#define BIT_MASK_VIQ_HW_IDX 0xfff
-#define BIT_VIQ_HW_IDX(x) (((x) & BIT_MASK_VIQ_HW_IDX) << BIT_SHIFT_VIQ_HW_IDX)
-#define BIT_GET_VIQ_HW_IDX(x) \
- (((x) >> BIT_SHIFT_VIQ_HW_IDX) & BIT_MASK_VIQ_HW_IDX)
-
-#define BIT_SHIFT_VIQ_HOST_IDX 0
-#define BIT_MASK_VIQ_HOST_IDX 0xfff
-#define BIT_VIQ_HOST_IDX(x) \
- (((x) & BIT_MASK_VIQ_HOST_IDX) << BIT_SHIFT_VIQ_HOST_IDX)
-#define BIT_GET_VIQ_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_VIQ_HOST_IDX) & BIT_MASK_VIQ_HOST_IDX)
-
-/* 2 REG_BEQ_TXBD_IDX (Offset 0x03A8) */
-
-#define BIT_SHIFT_BEQ_HW_IDX 16
-#define BIT_MASK_BEQ_HW_IDX 0xfff
-#define BIT_BEQ_HW_IDX(x) (((x) & BIT_MASK_BEQ_HW_IDX) << BIT_SHIFT_BEQ_HW_IDX)
-#define BIT_GET_BEQ_HW_IDX(x) \
- (((x) >> BIT_SHIFT_BEQ_HW_IDX) & BIT_MASK_BEQ_HW_IDX)
-
-#define BIT_SHIFT_BEQ_HOST_IDX 0
-#define BIT_MASK_BEQ_HOST_IDX 0xfff
-#define BIT_BEQ_HOST_IDX(x) \
- (((x) & BIT_MASK_BEQ_HOST_IDX) << BIT_SHIFT_BEQ_HOST_IDX)
-#define BIT_GET_BEQ_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_BEQ_HOST_IDX) & BIT_MASK_BEQ_HOST_IDX)
-
-/* 2 REG_BKQ_TXBD_IDX (Offset 0x03AC) */
-
-#define BIT_SHIFT_BKQ_HW_IDX 16
-#define BIT_MASK_BKQ_HW_IDX 0xfff
-#define BIT_BKQ_HW_IDX(x) (((x) & BIT_MASK_BKQ_HW_IDX) << BIT_SHIFT_BKQ_HW_IDX)
-#define BIT_GET_BKQ_HW_IDX(x) \
- (((x) >> BIT_SHIFT_BKQ_HW_IDX) & BIT_MASK_BKQ_HW_IDX)
-
-#define BIT_SHIFT_BKQ_HOST_IDX 0
-#define BIT_MASK_BKQ_HOST_IDX 0xfff
-#define BIT_BKQ_HOST_IDX(x) \
- (((x) & BIT_MASK_BKQ_HOST_IDX) << BIT_SHIFT_BKQ_HOST_IDX)
-#define BIT_GET_BKQ_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_BKQ_HOST_IDX) & BIT_MASK_BKQ_HOST_IDX)
-
-/* 2 REG_MGQ_TXBD_IDX (Offset 0x03B0) */
-
-#define BIT_SHIFT_MGQ_HW_IDX 16
-#define BIT_MASK_MGQ_HW_IDX 0xfff
-#define BIT_MGQ_HW_IDX(x) (((x) & BIT_MASK_MGQ_HW_IDX) << BIT_SHIFT_MGQ_HW_IDX)
-#define BIT_GET_MGQ_HW_IDX(x) \
- (((x) >> BIT_SHIFT_MGQ_HW_IDX) & BIT_MASK_MGQ_HW_IDX)
-
-#define BIT_SHIFT_MGQ_HOST_IDX 0
-#define BIT_MASK_MGQ_HOST_IDX 0xfff
-#define BIT_MGQ_HOST_IDX(x) \
- (((x) & BIT_MASK_MGQ_HOST_IDX) << BIT_SHIFT_MGQ_HOST_IDX)
-#define BIT_GET_MGQ_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_MGQ_HOST_IDX) & BIT_MASK_MGQ_HOST_IDX)
-
-/* 2 REG_RXQ_RXBD_IDX (Offset 0x03B4) */
-
-#define BIT_SHIFT_RXQ_HW_IDX 16
-#define BIT_MASK_RXQ_HW_IDX 0xfff
-#define BIT_RXQ_HW_IDX(x) (((x) & BIT_MASK_RXQ_HW_IDX) << BIT_SHIFT_RXQ_HW_IDX)
-#define BIT_GET_RXQ_HW_IDX(x) \
- (((x) >> BIT_SHIFT_RXQ_HW_IDX) & BIT_MASK_RXQ_HW_IDX)
-
-#define BIT_SHIFT_RXQ_HOST_IDX 0
-#define BIT_MASK_RXQ_HOST_IDX 0xfff
-#define BIT_RXQ_HOST_IDX(x) \
- (((x) & BIT_MASK_RXQ_HOST_IDX) << BIT_SHIFT_RXQ_HOST_IDX)
-#define BIT_GET_RXQ_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_RXQ_HOST_IDX) & BIT_MASK_RXQ_HOST_IDX)
-
-/* 2 REG_HI0Q_TXBD_IDX (Offset 0x03B8) */
-
-#define BIT_SHIFT_HI0Q_HW_IDX 16
-#define BIT_MASK_HI0Q_HW_IDX 0xfff
-#define BIT_HI0Q_HW_IDX(x) \
- (((x) & BIT_MASK_HI0Q_HW_IDX) << BIT_SHIFT_HI0Q_HW_IDX)
-#define BIT_GET_HI0Q_HW_IDX(x) \
- (((x) >> BIT_SHIFT_HI0Q_HW_IDX) & BIT_MASK_HI0Q_HW_IDX)
-
-#define BIT_SHIFT_HI0Q_HOST_IDX 0
-#define BIT_MASK_HI0Q_HOST_IDX 0xfff
-#define BIT_HI0Q_HOST_IDX(x) \
- (((x) & BIT_MASK_HI0Q_HOST_IDX) << BIT_SHIFT_HI0Q_HOST_IDX)
-#define BIT_GET_HI0Q_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_HI0Q_HOST_IDX) & BIT_MASK_HI0Q_HOST_IDX)
-
-/* 2 REG_HI1Q_TXBD_IDX (Offset 0x03BC) */
-
-#define BIT_SHIFT_HI1Q_HW_IDX 16
-#define BIT_MASK_HI1Q_HW_IDX 0xfff
-#define BIT_HI1Q_HW_IDX(x) \
- (((x) & BIT_MASK_HI1Q_HW_IDX) << BIT_SHIFT_HI1Q_HW_IDX)
-#define BIT_GET_HI1Q_HW_IDX(x) \
- (((x) >> BIT_SHIFT_HI1Q_HW_IDX) & BIT_MASK_HI1Q_HW_IDX)
-
-#define BIT_SHIFT_HI1Q_HOST_IDX 0
-#define BIT_MASK_HI1Q_HOST_IDX 0xfff
-#define BIT_HI1Q_HOST_IDX(x) \
- (((x) & BIT_MASK_HI1Q_HOST_IDX) << BIT_SHIFT_HI1Q_HOST_IDX)
-#define BIT_GET_HI1Q_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_HI1Q_HOST_IDX) & BIT_MASK_HI1Q_HOST_IDX)
-
-/* 2 REG_HI2Q_TXBD_IDX (Offset 0x03C0) */
-
-#define BIT_SHIFT_HI2Q_HW_IDX 16
-#define BIT_MASK_HI2Q_HW_IDX 0xfff
-#define BIT_HI2Q_HW_IDX(x) \
- (((x) & BIT_MASK_HI2Q_HW_IDX) << BIT_SHIFT_HI2Q_HW_IDX)
-#define BIT_GET_HI2Q_HW_IDX(x) \
- (((x) >> BIT_SHIFT_HI2Q_HW_IDX) & BIT_MASK_HI2Q_HW_IDX)
-
-#define BIT_SHIFT_HI2Q_HOST_IDX 0
-#define BIT_MASK_HI2Q_HOST_IDX 0xfff
-#define BIT_HI2Q_HOST_IDX(x) \
- (((x) & BIT_MASK_HI2Q_HOST_IDX) << BIT_SHIFT_HI2Q_HOST_IDX)
-#define BIT_GET_HI2Q_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_HI2Q_HOST_IDX) & BIT_MASK_HI2Q_HOST_IDX)
-
-/* 2 REG_HI3Q_TXBD_IDX (Offset 0x03C4) */
-
-#define BIT_SHIFT_HI3Q_HW_IDX 16
-#define BIT_MASK_HI3Q_HW_IDX 0xfff
-#define BIT_HI3Q_HW_IDX(x) \
- (((x) & BIT_MASK_HI3Q_HW_IDX) << BIT_SHIFT_HI3Q_HW_IDX)
-#define BIT_GET_HI3Q_HW_IDX(x) \
- (((x) >> BIT_SHIFT_HI3Q_HW_IDX) & BIT_MASK_HI3Q_HW_IDX)
-
-#define BIT_SHIFT_HI3Q_HOST_IDX 0
-#define BIT_MASK_HI3Q_HOST_IDX 0xfff
-#define BIT_HI3Q_HOST_IDX(x) \
- (((x) & BIT_MASK_HI3Q_HOST_IDX) << BIT_SHIFT_HI3Q_HOST_IDX)
-#define BIT_GET_HI3Q_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_HI3Q_HOST_IDX) & BIT_MASK_HI3Q_HOST_IDX)
-
-/* 2 REG_HI4Q_TXBD_IDX (Offset 0x03C8) */
-
-#define BIT_SHIFT_HI4Q_HW_IDX 16
-#define BIT_MASK_HI4Q_HW_IDX 0xfff
-#define BIT_HI4Q_HW_IDX(x) \
- (((x) & BIT_MASK_HI4Q_HW_IDX) << BIT_SHIFT_HI4Q_HW_IDX)
-#define BIT_GET_HI4Q_HW_IDX(x) \
- (((x) >> BIT_SHIFT_HI4Q_HW_IDX) & BIT_MASK_HI4Q_HW_IDX)
-
-#define BIT_SHIFT_HI4Q_HOST_IDX 0
-#define BIT_MASK_HI4Q_HOST_IDX 0xfff
-#define BIT_HI4Q_HOST_IDX(x) \
- (((x) & BIT_MASK_HI4Q_HOST_IDX) << BIT_SHIFT_HI4Q_HOST_IDX)
-#define BIT_GET_HI4Q_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_HI4Q_HOST_IDX) & BIT_MASK_HI4Q_HOST_IDX)
-
-/* 2 REG_HI5Q_TXBD_IDX (Offset 0x03CC) */
-
-#define BIT_SHIFT_HI5Q_HW_IDX 16
-#define BIT_MASK_HI5Q_HW_IDX 0xfff
-#define BIT_HI5Q_HW_IDX(x) \
- (((x) & BIT_MASK_HI5Q_HW_IDX) << BIT_SHIFT_HI5Q_HW_IDX)
-#define BIT_GET_HI5Q_HW_IDX(x) \
- (((x) >> BIT_SHIFT_HI5Q_HW_IDX) & BIT_MASK_HI5Q_HW_IDX)
-
-#define BIT_SHIFT_HI5Q_HOST_IDX 0
-#define BIT_MASK_HI5Q_HOST_IDX 0xfff
-#define BIT_HI5Q_HOST_IDX(x) \
- (((x) & BIT_MASK_HI5Q_HOST_IDX) << BIT_SHIFT_HI5Q_HOST_IDX)
-#define BIT_GET_HI5Q_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_HI5Q_HOST_IDX) & BIT_MASK_HI5Q_HOST_IDX)
-
-/* 2 REG_HI6Q_TXBD_IDX (Offset 0x03D0) */
-
-#define BIT_SHIFT_HI6Q_HW_IDX 16
-#define BIT_MASK_HI6Q_HW_IDX 0xfff
-#define BIT_HI6Q_HW_IDX(x) \
- (((x) & BIT_MASK_HI6Q_HW_IDX) << BIT_SHIFT_HI6Q_HW_IDX)
-#define BIT_GET_HI6Q_HW_IDX(x) \
- (((x) >> BIT_SHIFT_HI6Q_HW_IDX) & BIT_MASK_HI6Q_HW_IDX)
-
-#define BIT_SHIFT_HI6Q_HOST_IDX 0
-#define BIT_MASK_HI6Q_HOST_IDX 0xfff
-#define BIT_HI6Q_HOST_IDX(x) \
- (((x) & BIT_MASK_HI6Q_HOST_IDX) << BIT_SHIFT_HI6Q_HOST_IDX)
-#define BIT_GET_HI6Q_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_HI6Q_HOST_IDX) & BIT_MASK_HI6Q_HOST_IDX)
-
-/* 2 REG_HI7Q_TXBD_IDX (Offset 0x03D4) */
-
-#define BIT_SHIFT_HI7Q_HW_IDX 16
-#define BIT_MASK_HI7Q_HW_IDX 0xfff
-#define BIT_HI7Q_HW_IDX(x) \
- (((x) & BIT_MASK_HI7Q_HW_IDX) << BIT_SHIFT_HI7Q_HW_IDX)
-#define BIT_GET_HI7Q_HW_IDX(x) \
- (((x) >> BIT_SHIFT_HI7Q_HW_IDX) & BIT_MASK_HI7Q_HW_IDX)
-
-#define BIT_SHIFT_HI7Q_HOST_IDX 0
-#define BIT_MASK_HI7Q_HOST_IDX 0xfff
-#define BIT_HI7Q_HOST_IDX(x) \
- (((x) & BIT_MASK_HI7Q_HOST_IDX) << BIT_SHIFT_HI7Q_HOST_IDX)
-#define BIT_GET_HI7Q_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_HI7Q_HOST_IDX) & BIT_MASK_HI7Q_HOST_IDX)
-
-/* 2 REG_DBG_SEL_V1 (Offset 0x03D8) */
-
-#define BIT_DIS_TXDMA_PRE BIT(7)
-#define BIT_DIS_RXDMA_PRE BIT(6)
-#define BIT_TXFLAG_EXIT_L1_EN BIT(2)
-
-#define BIT_SHIFT_DBG_SEL 0
-#define BIT_MASK_DBG_SEL 0xff
-#define BIT_DBG_SEL(x) (((x) & BIT_MASK_DBG_SEL) << BIT_SHIFT_DBG_SEL)
-#define BIT_GET_DBG_SEL(x) (((x) >> BIT_SHIFT_DBG_SEL) & BIT_MASK_DBG_SEL)
-
-/* 2 REG_PCIE_HRPWM1_V1 (Offset 0x03D9) */
-
-#define BIT_SHIFT_PCIE_HRPWM 0
-#define BIT_MASK_PCIE_HRPWM 0xff
-#define BIT_PCIE_HRPWM(x) (((x) & BIT_MASK_PCIE_HRPWM) << BIT_SHIFT_PCIE_HRPWM)
-#define BIT_GET_PCIE_HRPWM(x) \
- (((x) >> BIT_SHIFT_PCIE_HRPWM) & BIT_MASK_PCIE_HRPWM)
-
-/* 2 REG_PCIE_HCPWM1_V1 (Offset 0x03DA) */
-
-#define BIT_SHIFT_PCIE_HCPWM 0
-#define BIT_MASK_PCIE_HCPWM 0xff
-#define BIT_PCIE_HCPWM(x) (((x) & BIT_MASK_PCIE_HCPWM) << BIT_SHIFT_PCIE_HCPWM)
-#define BIT_GET_PCIE_HCPWM(x) \
- (((x) >> BIT_SHIFT_PCIE_HCPWM) & BIT_MASK_PCIE_HCPWM)
-
-/* 2 REG_PCIE_CTRL2 (Offset 0x03DB) */
-
-#define BIT_SHIFT_HPS_CLKR_PCIE 4
-#define BIT_MASK_HPS_CLKR_PCIE 0x3
-#define BIT_HPS_CLKR_PCIE(x) \
- (((x) & BIT_MASK_HPS_CLKR_PCIE) << BIT_SHIFT_HPS_CLKR_PCIE)
-#define BIT_GET_HPS_CLKR_PCIE(x) \
- (((x) >> BIT_SHIFT_HPS_CLKR_PCIE) & BIT_MASK_HPS_CLKR_PCIE)
-
-/* 2 REG_PCIE_CTRL2 (Offset 0x03DB) */
-
-#define BIT_PCIE_INT BIT(3)
-
-/* 2 REG_PCIE_CTRL2 (Offset 0x03DB) */
-
-#define BIT_EN_RXDMA_ALIGN BIT(1)
-#define BIT_EN_TXDMA_ALIGN BIT(0)
-
-/* 2 REG_PCIE_HRPWM2_V1 (Offset 0x03DC) */
-
-#define BIT_SHIFT_PCIE_HRPWM2 0
-#define BIT_MASK_PCIE_HRPWM2 0xffff
-#define BIT_PCIE_HRPWM2(x) \
- (((x) & BIT_MASK_PCIE_HRPWM2) << BIT_SHIFT_PCIE_HRPWM2)
-#define BIT_GET_PCIE_HRPWM2(x) \
- (((x) >> BIT_SHIFT_PCIE_HRPWM2) & BIT_MASK_PCIE_HRPWM2)
-
-/* 2 REG_PCIE_HCPWM2_V1 (Offset 0x03DE) */
-
-#define BIT_SHIFT_PCIE_HCPWM2 0
-#define BIT_MASK_PCIE_HCPWM2 0xffff
-#define BIT_PCIE_HCPWM2(x) \
- (((x) & BIT_MASK_PCIE_HCPWM2) << BIT_SHIFT_PCIE_HCPWM2)
-#define BIT_GET_PCIE_HCPWM2(x) \
- (((x) >> BIT_SHIFT_PCIE_HCPWM2) & BIT_MASK_PCIE_HCPWM2)
-
-/* 2 REG_PCIE_H2C_MSG_V1 (Offset 0x03E0) */
-
-#define BIT_SHIFT_DRV2FW_INFO 0
-#define BIT_MASK_DRV2FW_INFO 0xffffffffL
-#define BIT_DRV2FW_INFO(x) \
- (((x) & BIT_MASK_DRV2FW_INFO) << BIT_SHIFT_DRV2FW_INFO)
-#define BIT_GET_DRV2FW_INFO(x) \
- (((x) >> BIT_SHIFT_DRV2FW_INFO) & BIT_MASK_DRV2FW_INFO)
-
-/* 2 REG_PCIE_C2H_MSG_V1 (Offset 0x03E4) */
-
-#define BIT_SHIFT_HCI_PCIE_C2H_MSG 0
-#define BIT_MASK_HCI_PCIE_C2H_MSG 0xffffffffL
-#define BIT_HCI_PCIE_C2H_MSG(x) \
- (((x) & BIT_MASK_HCI_PCIE_C2H_MSG) << BIT_SHIFT_HCI_PCIE_C2H_MSG)
-#define BIT_GET_HCI_PCIE_C2H_MSG(x) \
- (((x) >> BIT_SHIFT_HCI_PCIE_C2H_MSG) & BIT_MASK_HCI_PCIE_C2H_MSG)
-
-/* 2 REG_DBI_WDATA_V1 (Offset 0x03E8) */
-
-#define BIT_SHIFT_DBI_WDATA 0
-#define BIT_MASK_DBI_WDATA 0xffffffffL
-#define BIT_DBI_WDATA(x) (((x) & BIT_MASK_DBI_WDATA) << BIT_SHIFT_DBI_WDATA)
-#define BIT_GET_DBI_WDATA(x) (((x) >> BIT_SHIFT_DBI_WDATA) & BIT_MASK_DBI_WDATA)
-
-/* 2 REG_DBI_RDATA_V1 (Offset 0x03EC) */
-
-#define BIT_SHIFT_DBI_RDATA 0
-#define BIT_MASK_DBI_RDATA 0xffffffffL
-#define BIT_DBI_RDATA(x) (((x) & BIT_MASK_DBI_RDATA) << BIT_SHIFT_DBI_RDATA)
-#define BIT_GET_DBI_RDATA(x) (((x) >> BIT_SHIFT_DBI_RDATA) & BIT_MASK_DBI_RDATA)
-
-/* 2 REG_DBI_FLAG_V1 (Offset 0x03F0) */
-
-#define BIT_EN_STUCK_DBG BIT(26)
-#define BIT_RX_STUCK BIT(25)
-#define BIT_TX_STUCK BIT(24)
-#define BIT_DBI_RFLAG BIT(17)
-#define BIT_DBI_WFLAG BIT(16)
-
-#define BIT_SHIFT_DBI_WREN 12
-#define BIT_MASK_DBI_WREN 0xf
-#define BIT_DBI_WREN(x) (((x) & BIT_MASK_DBI_WREN) << BIT_SHIFT_DBI_WREN)
-#define BIT_GET_DBI_WREN(x) (((x) >> BIT_SHIFT_DBI_WREN) & BIT_MASK_DBI_WREN)
-
-#define BIT_SHIFT_DBI_ADDR 0
-#define BIT_MASK_DBI_ADDR 0xfff
-#define BIT_DBI_ADDR(x) (((x) & BIT_MASK_DBI_ADDR) << BIT_SHIFT_DBI_ADDR)
-#define BIT_GET_DBI_ADDR(x) (((x) >> BIT_SHIFT_DBI_ADDR) & BIT_MASK_DBI_ADDR)
-
-/* 2 REG_MDIO_V1 (Offset 0x03F4) */
-
-#define BIT_SHIFT_MDIO_RDATA 16
-#define BIT_MASK_MDIO_RDATA 0xffff
-#define BIT_MDIO_RDATA(x) (((x) & BIT_MASK_MDIO_RDATA) << BIT_SHIFT_MDIO_RDATA)
-#define BIT_GET_MDIO_RDATA(x) \
- (((x) >> BIT_SHIFT_MDIO_RDATA) & BIT_MASK_MDIO_RDATA)
-
-#define BIT_SHIFT_MDIO_WDATA 0
-#define BIT_MASK_MDIO_WDATA 0xffff
-#define BIT_MDIO_WDATA(x) (((x) & BIT_MASK_MDIO_WDATA) << BIT_SHIFT_MDIO_WDATA)
-#define BIT_GET_MDIO_WDATA(x) \
- (((x) >> BIT_SHIFT_MDIO_WDATA) & BIT_MASK_MDIO_WDATA)
-
-/* 2 REG_PCIE_MIX_CFG (Offset 0x03F8) */
-
-#define BIT_EN_WATCH_DOG BIT(8)
-
-/* 2 REG_PCIE_MIX_CFG (Offset 0x03F8) */
-
-#define BIT_SHIFT_MDIO_REG_ADDR_V1 0
-#define BIT_MASK_MDIO_REG_ADDR_V1 0x1f
-#define BIT_MDIO_REG_ADDR_V1(x) \
- (((x) & BIT_MASK_MDIO_REG_ADDR_V1) << BIT_SHIFT_MDIO_REG_ADDR_V1)
-#define BIT_GET_MDIO_REG_ADDR_V1(x) \
- (((x) >> BIT_SHIFT_MDIO_REG_ADDR_V1) & BIT_MASK_MDIO_REG_ADDR_V1)
-
-/* 2 REG_HCI_MIX_CFG (Offset 0x03FC) */
-
-#define BIT_HOST_GEN2_SUPPORT BIT(20)
-
-#define BIT_SHIFT_TXDMA_ERR_FLAG 16
-#define BIT_MASK_TXDMA_ERR_FLAG 0xf
-#define BIT_TXDMA_ERR_FLAG(x) \
- (((x) & BIT_MASK_TXDMA_ERR_FLAG) << BIT_SHIFT_TXDMA_ERR_FLAG)
-#define BIT_GET_TXDMA_ERR_FLAG(x) \
- (((x) >> BIT_SHIFT_TXDMA_ERR_FLAG) & BIT_MASK_TXDMA_ERR_FLAG)
-
-#define BIT_SHIFT_EARLY_MODE_SEL 12
-#define BIT_MASK_EARLY_MODE_SEL 0xf
-#define BIT_EARLY_MODE_SEL(x) \
- (((x) & BIT_MASK_EARLY_MODE_SEL) << BIT_SHIFT_EARLY_MODE_SEL)
-#define BIT_GET_EARLY_MODE_SEL(x) \
- (((x) >> BIT_SHIFT_EARLY_MODE_SEL) & BIT_MASK_EARLY_MODE_SEL)
-
-#define BIT_EPHY_RX50_EN BIT(11)
-
-#define BIT_SHIFT_MSI_TIMEOUT_ID_V1 8
-#define BIT_MASK_MSI_TIMEOUT_ID_V1 0x7
-#define BIT_MSI_TIMEOUT_ID_V1(x) \
- (((x) & BIT_MASK_MSI_TIMEOUT_ID_V1) << BIT_SHIFT_MSI_TIMEOUT_ID_V1)
-#define BIT_GET_MSI_TIMEOUT_ID_V1(x) \
- (((x) >> BIT_SHIFT_MSI_TIMEOUT_ID_V1) & BIT_MASK_MSI_TIMEOUT_ID_V1)
-
-#define BIT_RADDR_RD BIT(7)
-#define BIT_EN_MUL_TAG BIT(6)
-#define BIT_EN_EARLY_MODE BIT(5)
-#define BIT_L0S_LINK_OFF BIT(4)
-#define BIT_ACT_LINK_OFF BIT(3)
-
-/* 2 REG_HCI_MIX_CFG (Offset 0x03FC) */
-
-#define BIT_EN_SLOW_MAC_TX BIT(2)
-#define BIT_EN_SLOW_MAC_RX BIT(1)
-
-/* 2 REG_Q0_INFO (Offset 0x0400) */
-
-#define BIT_SHIFT_QUEUEMACID_Q0_V1 25
-#define BIT_MASK_QUEUEMACID_Q0_V1 0x7f
-#define BIT_QUEUEMACID_Q0_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q0_V1) << BIT_SHIFT_QUEUEMACID_Q0_V1)
-#define BIT_GET_QUEUEMACID_Q0_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q0_V1) & BIT_MASK_QUEUEMACID_Q0_V1)
-
-#define BIT_SHIFT_QUEUEAC_Q0_V1 23
-#define BIT_MASK_QUEUEAC_Q0_V1 0x3
-#define BIT_QUEUEAC_Q0_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_Q0_V1) << BIT_SHIFT_QUEUEAC_Q0_V1)
-#define BIT_GET_QUEUEAC_Q0_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q0_V1) & BIT_MASK_QUEUEAC_Q0_V1)
-
-/* 2 REG_Q0_INFO (Offset 0x0400) */
-
-#define BIT_TIDEMPTY_Q0_V1 BIT(22)
-
-/* 2 REG_Q0_INFO (Offset 0x0400) */
-
-#define BIT_SHIFT_TAIL_PKT_Q0_V2 11
-#define BIT_MASK_TAIL_PKT_Q0_V2 0x7ff
-#define BIT_TAIL_PKT_Q0_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q0_V2) << BIT_SHIFT_TAIL_PKT_Q0_V2)
-#define BIT_GET_TAIL_PKT_Q0_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q0_V2) & BIT_MASK_TAIL_PKT_Q0_V2)
-
-/* 2 REG_Q0_INFO (Offset 0x0400) */
-
-#define BIT_SHIFT_HEAD_PKT_Q0_V1 0
-#define BIT_MASK_HEAD_PKT_Q0_V1 0x7ff
-#define BIT_HEAD_PKT_Q0_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q0_V1) << BIT_SHIFT_HEAD_PKT_Q0_V1)
-#define BIT_GET_HEAD_PKT_Q0_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q0_V1) & BIT_MASK_HEAD_PKT_Q0_V1)
-
-/* 2 REG_Q1_INFO (Offset 0x0404) */
-
-#define BIT_SHIFT_QUEUEMACID_Q1_V1 25
-#define BIT_MASK_QUEUEMACID_Q1_V1 0x7f
-#define BIT_QUEUEMACID_Q1_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q1_V1) << BIT_SHIFT_QUEUEMACID_Q1_V1)
-#define BIT_GET_QUEUEMACID_Q1_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q1_V1) & BIT_MASK_QUEUEMACID_Q1_V1)
-
-#define BIT_SHIFT_QUEUEAC_Q1_V1 23
-#define BIT_MASK_QUEUEAC_Q1_V1 0x3
-#define BIT_QUEUEAC_Q1_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_Q1_V1) << BIT_SHIFT_QUEUEAC_Q1_V1)
-#define BIT_GET_QUEUEAC_Q1_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q1_V1) & BIT_MASK_QUEUEAC_Q1_V1)
-
-/* 2 REG_Q1_INFO (Offset 0x0404) */
-
-#define BIT_TIDEMPTY_Q1_V1 BIT(22)
-
-/* 2 REG_Q1_INFO (Offset 0x0404) */
-
-#define BIT_SHIFT_TAIL_PKT_Q1_V2 11
-#define BIT_MASK_TAIL_PKT_Q1_V2 0x7ff
-#define BIT_TAIL_PKT_Q1_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q1_V2) << BIT_SHIFT_TAIL_PKT_Q1_V2)
-#define BIT_GET_TAIL_PKT_Q1_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q1_V2) & BIT_MASK_TAIL_PKT_Q1_V2)
-
-/* 2 REG_Q1_INFO (Offset 0x0404) */
-
-#define BIT_SHIFT_HEAD_PKT_Q1_V1 0
-#define BIT_MASK_HEAD_PKT_Q1_V1 0x7ff
-#define BIT_HEAD_PKT_Q1_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q1_V1) << BIT_SHIFT_HEAD_PKT_Q1_V1)
-#define BIT_GET_HEAD_PKT_Q1_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q1_V1) & BIT_MASK_HEAD_PKT_Q1_V1)
-
-/* 2 REG_Q2_INFO (Offset 0x0408) */
-
-#define BIT_SHIFT_QUEUEMACID_Q2_V1 25
-#define BIT_MASK_QUEUEMACID_Q2_V1 0x7f
-#define BIT_QUEUEMACID_Q2_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q2_V1) << BIT_SHIFT_QUEUEMACID_Q2_V1)
-#define BIT_GET_QUEUEMACID_Q2_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q2_V1) & BIT_MASK_QUEUEMACID_Q2_V1)
-
-#define BIT_SHIFT_QUEUEAC_Q2_V1 23
-#define BIT_MASK_QUEUEAC_Q2_V1 0x3
-#define BIT_QUEUEAC_Q2_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_Q2_V1) << BIT_SHIFT_QUEUEAC_Q2_V1)
-#define BIT_GET_QUEUEAC_Q2_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q2_V1) & BIT_MASK_QUEUEAC_Q2_V1)
-
-/* 2 REG_Q2_INFO (Offset 0x0408) */
-
-#define BIT_TIDEMPTY_Q2_V1 BIT(22)
-
-/* 2 REG_Q2_INFO (Offset 0x0408) */
-
-#define BIT_SHIFT_TAIL_PKT_Q2_V2 11
-#define BIT_MASK_TAIL_PKT_Q2_V2 0x7ff
-#define BIT_TAIL_PKT_Q2_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q2_V2) << BIT_SHIFT_TAIL_PKT_Q2_V2)
-#define BIT_GET_TAIL_PKT_Q2_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q2_V2) & BIT_MASK_TAIL_PKT_Q2_V2)
-
-/* 2 REG_Q2_INFO (Offset 0x0408) */
-
-#define BIT_SHIFT_HEAD_PKT_Q2_V1 0
-#define BIT_MASK_HEAD_PKT_Q2_V1 0x7ff
-#define BIT_HEAD_PKT_Q2_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q2_V1) << BIT_SHIFT_HEAD_PKT_Q2_V1)
-#define BIT_GET_HEAD_PKT_Q2_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q2_V1) & BIT_MASK_HEAD_PKT_Q2_V1)
-
-/* 2 REG_Q3_INFO (Offset 0x040C) */
-
-#define BIT_SHIFT_QUEUEMACID_Q3_V1 25
-#define BIT_MASK_QUEUEMACID_Q3_V1 0x7f
-#define BIT_QUEUEMACID_Q3_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q3_V1) << BIT_SHIFT_QUEUEMACID_Q3_V1)
-#define BIT_GET_QUEUEMACID_Q3_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q3_V1) & BIT_MASK_QUEUEMACID_Q3_V1)
-
-#define BIT_SHIFT_QUEUEAC_Q3_V1 23
-#define BIT_MASK_QUEUEAC_Q3_V1 0x3
-#define BIT_QUEUEAC_Q3_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_Q3_V1) << BIT_SHIFT_QUEUEAC_Q3_V1)
-#define BIT_GET_QUEUEAC_Q3_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q3_V1) & BIT_MASK_QUEUEAC_Q3_V1)
-
-/* 2 REG_Q3_INFO (Offset 0x040C) */
-
-#define BIT_TIDEMPTY_Q3_V1 BIT(22)
-
-/* 2 REG_Q3_INFO (Offset 0x040C) */
-
-#define BIT_SHIFT_TAIL_PKT_Q3_V2 11
-#define BIT_MASK_TAIL_PKT_Q3_V2 0x7ff
-#define BIT_TAIL_PKT_Q3_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q3_V2) << BIT_SHIFT_TAIL_PKT_Q3_V2)
-#define BIT_GET_TAIL_PKT_Q3_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q3_V2) & BIT_MASK_TAIL_PKT_Q3_V2)
-
-/* 2 REG_Q3_INFO (Offset 0x040C) */
-
-#define BIT_SHIFT_HEAD_PKT_Q3_V1 0
-#define BIT_MASK_HEAD_PKT_Q3_V1 0x7ff
-#define BIT_HEAD_PKT_Q3_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q3_V1) << BIT_SHIFT_HEAD_PKT_Q3_V1)
-#define BIT_GET_HEAD_PKT_Q3_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q3_V1) & BIT_MASK_HEAD_PKT_Q3_V1)
-
-/* 2 REG_MGQ_INFO (Offset 0x0410) */
-
-#define BIT_SHIFT_QUEUEMACID_MGQ_V1 25
-#define BIT_MASK_QUEUEMACID_MGQ_V1 0x7f
-#define BIT_QUEUEMACID_MGQ_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_MGQ_V1) << BIT_SHIFT_QUEUEMACID_MGQ_V1)
-#define BIT_GET_QUEUEMACID_MGQ_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_MGQ_V1) & BIT_MASK_QUEUEMACID_MGQ_V1)
-
-#define BIT_SHIFT_QUEUEAC_MGQ_V1 23
-#define BIT_MASK_QUEUEAC_MGQ_V1 0x3
-#define BIT_QUEUEAC_MGQ_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_MGQ_V1) << BIT_SHIFT_QUEUEAC_MGQ_V1)
-#define BIT_GET_QUEUEAC_MGQ_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_MGQ_V1) & BIT_MASK_QUEUEAC_MGQ_V1)
-
-/* 2 REG_MGQ_INFO (Offset 0x0410) */
-
-#define BIT_TIDEMPTY_MGQ_V1 BIT(22)
-
-/* 2 REG_MGQ_INFO (Offset 0x0410) */
-
-#define BIT_SHIFT_TAIL_PKT_MGQ_V2 11
-#define BIT_MASK_TAIL_PKT_MGQ_V2 0x7ff
-#define BIT_TAIL_PKT_MGQ_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_MGQ_V2) << BIT_SHIFT_TAIL_PKT_MGQ_V2)
-#define BIT_GET_TAIL_PKT_MGQ_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_MGQ_V2) & BIT_MASK_TAIL_PKT_MGQ_V2)
-
-/* 2 REG_MGQ_INFO (Offset 0x0410) */
-
-#define BIT_SHIFT_HEAD_PKT_MGQ_V1 0
-#define BIT_MASK_HEAD_PKT_MGQ_V1 0x7ff
-#define BIT_HEAD_PKT_MGQ_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_MGQ_V1) << BIT_SHIFT_HEAD_PKT_MGQ_V1)
-#define BIT_GET_HEAD_PKT_MGQ_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_MGQ_V1) & BIT_MASK_HEAD_PKT_MGQ_V1)
-
-/* 2 REG_HIQ_INFO (Offset 0x0414) */
-
-#define BIT_SHIFT_QUEUEMACID_HIQ_V1 25
-#define BIT_MASK_QUEUEMACID_HIQ_V1 0x7f
-#define BIT_QUEUEMACID_HIQ_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_HIQ_V1) << BIT_SHIFT_QUEUEMACID_HIQ_V1)
-#define BIT_GET_QUEUEMACID_HIQ_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_HIQ_V1) & BIT_MASK_QUEUEMACID_HIQ_V1)
-
-#define BIT_SHIFT_QUEUEAC_HIQ_V1 23
-#define BIT_MASK_QUEUEAC_HIQ_V1 0x3
-#define BIT_QUEUEAC_HIQ_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_HIQ_V1) << BIT_SHIFT_QUEUEAC_HIQ_V1)
-#define BIT_GET_QUEUEAC_HIQ_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_HIQ_V1) & BIT_MASK_QUEUEAC_HIQ_V1)
-
-/* 2 REG_HIQ_INFO (Offset 0x0414) */
-
-#define BIT_TIDEMPTY_HIQ_V1 BIT(22)
-
-/* 2 REG_HIQ_INFO (Offset 0x0414) */
-
-#define BIT_SHIFT_TAIL_PKT_HIQ_V2 11
-#define BIT_MASK_TAIL_PKT_HIQ_V2 0x7ff
-#define BIT_TAIL_PKT_HIQ_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_HIQ_V2) << BIT_SHIFT_TAIL_PKT_HIQ_V2)
-#define BIT_GET_TAIL_PKT_HIQ_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_HIQ_V2) & BIT_MASK_TAIL_PKT_HIQ_V2)
-
-/* 2 REG_HIQ_INFO (Offset 0x0414) */
-
-#define BIT_SHIFT_HEAD_PKT_HIQ_V1 0
-#define BIT_MASK_HEAD_PKT_HIQ_V1 0x7ff
-#define BIT_HEAD_PKT_HIQ_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_HIQ_V1) << BIT_SHIFT_HEAD_PKT_HIQ_V1)
-#define BIT_GET_HEAD_PKT_HIQ_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_HIQ_V1) & BIT_MASK_HEAD_PKT_HIQ_V1)
-
-/* 2 REG_BCNQ_INFO (Offset 0x0418) */
-
-#define BIT_SHIFT_BCNQ_HEAD_PG_V1 0
-#define BIT_MASK_BCNQ_HEAD_PG_V1 0xfff
-#define BIT_BCNQ_HEAD_PG_V1(x) \
- (((x) & BIT_MASK_BCNQ_HEAD_PG_V1) << BIT_SHIFT_BCNQ_HEAD_PG_V1)
-#define BIT_GET_BCNQ_HEAD_PG_V1(x) \
- (((x) >> BIT_SHIFT_BCNQ_HEAD_PG_V1) & BIT_MASK_BCNQ_HEAD_PG_V1)
-
-/* 2 REG_TXPKT_EMPTY (Offset 0x041A) */
-
-#define BIT_BCNQ_EMPTY BIT(11)
-#define BIT_HQQ_EMPTY BIT(10)
-#define BIT_MQQ_EMPTY BIT(9)
-#define BIT_MGQ_CPU_EMPTY BIT(8)
-#define BIT_AC7Q_EMPTY BIT(7)
-#define BIT_AC6Q_EMPTY BIT(6)
-#define BIT_AC5Q_EMPTY BIT(5)
-#define BIT_AC4Q_EMPTY BIT(4)
-#define BIT_AC3Q_EMPTY BIT(3)
-#define BIT_AC2Q_EMPTY BIT(2)
-#define BIT_AC1Q_EMPTY BIT(1)
-#define BIT_AC0Q_EMPTY BIT(0)
-
-/* 2 REG_CPU_MGQ_INFO (Offset 0x041C) */
-
-#define BIT_BCN1_POLL BIT(30)
-
-/* 2 REG_CPU_MGQ_INFO (Offset 0x041C) */
-
-#define BIT_CPUMGT_POLL BIT(29)
-#define BIT_BCN_POLL BIT(28)
-
-/* 2 REG_CPU_MGQ_INFO (Offset 0x041C) */
-
-#define BIT_CPUMGQ_FW_NUM_V1 BIT(12)
-
-/* 2 REG_CPU_MGQ_INFO (Offset 0x041C) */
-
-#define BIT_SHIFT_FW_FREE_TAIL_V1 0
-#define BIT_MASK_FW_FREE_TAIL_V1 0xfff
-#define BIT_FW_FREE_TAIL_V1(x) \
- (((x) & BIT_MASK_FW_FREE_TAIL_V1) << BIT_SHIFT_FW_FREE_TAIL_V1)
-#define BIT_GET_FW_FREE_TAIL_V1(x) \
- (((x) >> BIT_SHIFT_FW_FREE_TAIL_V1) & BIT_MASK_FW_FREE_TAIL_V1)
-
-/* 2 REG_FWHW_TXQ_CTRL (Offset 0x0420) */
-
-#define BIT_RTS_LIMIT_IN_OFDM BIT(23)
-#define BIT_EN_BCNQ_DL BIT(22)
-#define BIT_EN_RD_RESP_NAV_BK BIT(21)
-#define BIT_EN_WR_FREE_TAIL BIT(20)
-
-#define BIT_SHIFT_EN_QUEUE_RPT 8
-#define BIT_MASK_EN_QUEUE_RPT 0xff
-#define BIT_EN_QUEUE_RPT(x) \
- (((x) & BIT_MASK_EN_QUEUE_RPT) << BIT_SHIFT_EN_QUEUE_RPT)
-#define BIT_GET_EN_QUEUE_RPT(x) \
- (((x) >> BIT_SHIFT_EN_QUEUE_RPT) & BIT_MASK_EN_QUEUE_RPT)
-
-#define BIT_EN_RTY_BK BIT(7)
-#define BIT_EN_USE_INI_RAT BIT(6)
-#define BIT_EN_RTS_NAV_BK BIT(5)
-#define BIT_DIS_SSN_CHECK BIT(4)
-#define BIT_MACID_MATCH_RTS BIT(3)
-
-/* 2 REG_FWHW_TXQ_CTRL (Offset 0x0420) */
-
-#define BIT_EN_BCN_TRXRPT_V1 BIT(2)
-
-/* 2 REG_FWHW_TXQ_CTRL (Offset 0x0420) */
-
-#define BIT_EN_FTMACKRPT BIT(1)
-
-/* 2 REG_FWHW_TXQ_CTRL (Offset 0x0420) */
-
-#define BIT_EN_FTMRPT BIT(0)
-
-/* 2 REG_DATAFB_SEL (Offset 0x0423) */
-
-#define BIT__R_EN_RTY_BK_COD BIT(2)
-
-/* 2 REG_DATAFB_SEL (Offset 0x0423) */
-
-#define BIT_SHIFT__R_DATA_FALLBACK_SEL 0
-#define BIT_MASK__R_DATA_FALLBACK_SEL 0x3
-#define BIT__R_DATA_FALLBACK_SEL(x) \
- (((x) & BIT_MASK__R_DATA_FALLBACK_SEL) \
- << BIT_SHIFT__R_DATA_FALLBACK_SEL)
-#define BIT_GET__R_DATA_FALLBACK_SEL(x) \
- (((x) >> BIT_SHIFT__R_DATA_FALLBACK_SEL) & \
- BIT_MASK__R_DATA_FALLBACK_SEL)
-
-/* 2 REG_BCNQ_BDNY_V1 (Offset 0x0424) */
-
-#define BIT_SHIFT_BCNQ_PGBNDY_V1 0
-#define BIT_MASK_BCNQ_PGBNDY_V1 0xfff
-#define BIT_BCNQ_PGBNDY_V1(x) \
- (((x) & BIT_MASK_BCNQ_PGBNDY_V1) << BIT_SHIFT_BCNQ_PGBNDY_V1)
-#define BIT_GET_BCNQ_PGBNDY_V1(x) \
- (((x) >> BIT_SHIFT_BCNQ_PGBNDY_V1) & BIT_MASK_BCNQ_PGBNDY_V1)
-
-/* 2 REG_LIFETIME_EN (Offset 0x0426) */
-
-#define BIT_BT_INT_CPU BIT(7)
-#define BIT_BT_INT_PTA BIT(6)
-
-/* 2 REG_LIFETIME_EN (Offset 0x0426) */
-
-#define BIT_EN_CTRL_RTYBIT BIT(4)
-
-/* 2 REG_LIFETIME_EN (Offset 0x0426) */
-
-#define BIT_LIFETIME_BK_EN BIT(3)
-#define BIT_LIFETIME_BE_EN BIT(2)
-#define BIT_LIFETIME_VI_EN BIT(1)
-#define BIT_LIFETIME_VO_EN BIT(0)
-
-/* 2 REG_SPEC_SIFS (Offset 0x0428) */
-
-#define BIT_SHIFT_SPEC_SIFS_OFDM_PTCL 8
-#define BIT_MASK_SPEC_SIFS_OFDM_PTCL 0xff
-#define BIT_SPEC_SIFS_OFDM_PTCL(x) \
- (((x) & BIT_MASK_SPEC_SIFS_OFDM_PTCL) << BIT_SHIFT_SPEC_SIFS_OFDM_PTCL)
-#define BIT_GET_SPEC_SIFS_OFDM_PTCL(x) \
- (((x) >> BIT_SHIFT_SPEC_SIFS_OFDM_PTCL) & BIT_MASK_SPEC_SIFS_OFDM_PTCL)
-
-#define BIT_SHIFT_SPEC_SIFS_CCK_PTCL 0
-#define BIT_MASK_SPEC_SIFS_CCK_PTCL 0xff
-#define BIT_SPEC_SIFS_CCK_PTCL(x) \
- (((x) & BIT_MASK_SPEC_SIFS_CCK_PTCL) << BIT_SHIFT_SPEC_SIFS_CCK_PTCL)
-#define BIT_GET_SPEC_SIFS_CCK_PTCL(x) \
- (((x) >> BIT_SHIFT_SPEC_SIFS_CCK_PTCL) & BIT_MASK_SPEC_SIFS_CCK_PTCL)
-
-/* 2 REG_RETRY_LIMIT (Offset 0x042A) */
-
-#define BIT_SHIFT_SRL 8
-#define BIT_MASK_SRL 0x3f
-#define BIT_SRL(x) (((x) & BIT_MASK_SRL) << BIT_SHIFT_SRL)
-#define BIT_GET_SRL(x) (((x) >> BIT_SHIFT_SRL) & BIT_MASK_SRL)
-
-#define BIT_SHIFT_LRL 0
-#define BIT_MASK_LRL 0x3f
-#define BIT_LRL(x) (((x) & BIT_MASK_LRL) << BIT_SHIFT_LRL)
-#define BIT_GET_LRL(x) (((x) >> BIT_SHIFT_LRL) & BIT_MASK_LRL)
-
-/* 2 REG_TXBF_CTRL (Offset 0x042C) */
-
-#define BIT_R_ENABLE_NDPA BIT(31)
-#define BIT_USE_NDPA_PARAMETER BIT(30)
-#define BIT_R_PROP_TXBF BIT(29)
-#define BIT_R_EN_NDPA_INT BIT(28)
-#define BIT_R_TXBF1_80M BIT(27)
-#define BIT_R_TXBF1_40M BIT(26)
-#define BIT_R_TXBF1_20M BIT(25)
-
-#define BIT_SHIFT_R_TXBF1_AID 16
-#define BIT_MASK_R_TXBF1_AID 0x1ff
-#define BIT_R_TXBF1_AID(x) \
- (((x) & BIT_MASK_R_TXBF1_AID) << BIT_SHIFT_R_TXBF1_AID)
-#define BIT_GET_R_TXBF1_AID(x) \
- (((x) >> BIT_SHIFT_R_TXBF1_AID) & BIT_MASK_R_TXBF1_AID)
-
-/* 2 REG_TXBF_CTRL (Offset 0x042C) */
-
-#define BIT_DIS_NDP_BFEN BIT(15)
-
-/* 2 REG_TXBF_CTRL (Offset 0x042C) */
-
-#define BIT_R_TXBCN_NOBLOCK_NDP BIT(14)
-
-/* 2 REG_TXBF_CTRL (Offset 0x042C) */
-
-#define BIT_R_TXBF0_80M BIT(11)
-#define BIT_R_TXBF0_40M BIT(10)
-#define BIT_R_TXBF0_20M BIT(9)
-
-#define BIT_SHIFT_R_TXBF0_AID 0
-#define BIT_MASK_R_TXBF0_AID 0x1ff
-#define BIT_R_TXBF0_AID(x) \
- (((x) & BIT_MASK_R_TXBF0_AID) << BIT_SHIFT_R_TXBF0_AID)
-#define BIT_GET_R_TXBF0_AID(x) \
- (((x) >> BIT_SHIFT_R_TXBF0_AID) & BIT_MASK_R_TXBF0_AID)
-
-/* 2 REG_DARFRC (Offset 0x0430) */
-
-#define BIT_SHIFT_DARF_RC8 (56 & CPU_OPT_WIDTH)
-#define BIT_MASK_DARF_RC8 0x1f
-#define BIT_DARF_RC8(x) (((x) & BIT_MASK_DARF_RC8) << BIT_SHIFT_DARF_RC8)
-#define BIT_GET_DARF_RC8(x) (((x) >> BIT_SHIFT_DARF_RC8) & BIT_MASK_DARF_RC8)
-
-#define BIT_SHIFT_DARF_RC7 (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_DARF_RC7 0x1f
-#define BIT_DARF_RC7(x) (((x) & BIT_MASK_DARF_RC7) << BIT_SHIFT_DARF_RC7)
-#define BIT_GET_DARF_RC7(x) (((x) >> BIT_SHIFT_DARF_RC7) & BIT_MASK_DARF_RC7)
-
-#define BIT_SHIFT_DARF_RC6 (40 & CPU_OPT_WIDTH)
-#define BIT_MASK_DARF_RC6 0x1f
-#define BIT_DARF_RC6(x) (((x) & BIT_MASK_DARF_RC6) << BIT_SHIFT_DARF_RC6)
-#define BIT_GET_DARF_RC6(x) (((x) >> BIT_SHIFT_DARF_RC6) & BIT_MASK_DARF_RC6)
-
-#define BIT_SHIFT_DARF_RC5 (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_DARF_RC5 0x1f
-#define BIT_DARF_RC5(x) (((x) & BIT_MASK_DARF_RC5) << BIT_SHIFT_DARF_RC5)
-#define BIT_GET_DARF_RC5(x) (((x) >> BIT_SHIFT_DARF_RC5) & BIT_MASK_DARF_RC5)
-
-#define BIT_SHIFT_DARF_RC4 24
-#define BIT_MASK_DARF_RC4 0x1f
-#define BIT_DARF_RC4(x) (((x) & BIT_MASK_DARF_RC4) << BIT_SHIFT_DARF_RC4)
-#define BIT_GET_DARF_RC4(x) (((x) >> BIT_SHIFT_DARF_RC4) & BIT_MASK_DARF_RC4)
-
-#define BIT_SHIFT_DARF_RC3 16
-#define BIT_MASK_DARF_RC3 0x1f
-#define BIT_DARF_RC3(x) (((x) & BIT_MASK_DARF_RC3) << BIT_SHIFT_DARF_RC3)
-#define BIT_GET_DARF_RC3(x) (((x) >> BIT_SHIFT_DARF_RC3) & BIT_MASK_DARF_RC3)
-
-#define BIT_SHIFT_DARF_RC2 8
-#define BIT_MASK_DARF_RC2 0x1f
-#define BIT_DARF_RC2(x) (((x) & BIT_MASK_DARF_RC2) << BIT_SHIFT_DARF_RC2)
-#define BIT_GET_DARF_RC2(x) (((x) >> BIT_SHIFT_DARF_RC2) & BIT_MASK_DARF_RC2)
-
-#define BIT_SHIFT_DARF_RC1 0
-#define BIT_MASK_DARF_RC1 0x1f
-#define BIT_DARF_RC1(x) (((x) & BIT_MASK_DARF_RC1) << BIT_SHIFT_DARF_RC1)
-#define BIT_GET_DARF_RC1(x) (((x) >> BIT_SHIFT_DARF_RC1) & BIT_MASK_DARF_RC1)
-
-/* 2 REG_RARFRC (Offset 0x0438) */
-
-#define BIT_SHIFT_RARF_RC8 (56 & CPU_OPT_WIDTH)
-#define BIT_MASK_RARF_RC8 0x1f
-#define BIT_RARF_RC8(x) (((x) & BIT_MASK_RARF_RC8) << BIT_SHIFT_RARF_RC8)
-#define BIT_GET_RARF_RC8(x) (((x) >> BIT_SHIFT_RARF_RC8) & BIT_MASK_RARF_RC8)
-
-#define BIT_SHIFT_RARF_RC7 (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_RARF_RC7 0x1f
-#define BIT_RARF_RC7(x) (((x) & BIT_MASK_RARF_RC7) << BIT_SHIFT_RARF_RC7)
-#define BIT_GET_RARF_RC7(x) (((x) >> BIT_SHIFT_RARF_RC7) & BIT_MASK_RARF_RC7)
-
-#define BIT_SHIFT_RARF_RC6 (40 & CPU_OPT_WIDTH)
-#define BIT_MASK_RARF_RC6 0x1f
-#define BIT_RARF_RC6(x) (((x) & BIT_MASK_RARF_RC6) << BIT_SHIFT_RARF_RC6)
-#define BIT_GET_RARF_RC6(x) (((x) >> BIT_SHIFT_RARF_RC6) & BIT_MASK_RARF_RC6)
-
-#define BIT_SHIFT_RARF_RC5 (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_RARF_RC5 0x1f
-#define BIT_RARF_RC5(x) (((x) & BIT_MASK_RARF_RC5) << BIT_SHIFT_RARF_RC5)
-#define BIT_GET_RARF_RC5(x) (((x) >> BIT_SHIFT_RARF_RC5) & BIT_MASK_RARF_RC5)
-
-#define BIT_SHIFT_RARF_RC4 24
-#define BIT_MASK_RARF_RC4 0x1f
-#define BIT_RARF_RC4(x) (((x) & BIT_MASK_RARF_RC4) << BIT_SHIFT_RARF_RC4)
-#define BIT_GET_RARF_RC4(x) (((x) >> BIT_SHIFT_RARF_RC4) & BIT_MASK_RARF_RC4)
-
-#define BIT_SHIFT_RARF_RC3 16
-#define BIT_MASK_RARF_RC3 0x1f
-#define BIT_RARF_RC3(x) (((x) & BIT_MASK_RARF_RC3) << BIT_SHIFT_RARF_RC3)
-#define BIT_GET_RARF_RC3(x) (((x) >> BIT_SHIFT_RARF_RC3) & BIT_MASK_RARF_RC3)
-
-#define BIT_SHIFT_RARF_RC2 8
-#define BIT_MASK_RARF_RC2 0x1f
-#define BIT_RARF_RC2(x) (((x) & BIT_MASK_RARF_RC2) << BIT_SHIFT_RARF_RC2)
-#define BIT_GET_RARF_RC2(x) (((x) >> BIT_SHIFT_RARF_RC2) & BIT_MASK_RARF_RC2)
-
-#define BIT_SHIFT_RARF_RC1 0
-#define BIT_MASK_RARF_RC1 0x1f
-#define BIT_RARF_RC1(x) (((x) & BIT_MASK_RARF_RC1) << BIT_SHIFT_RARF_RC1)
-#define BIT_GET_RARF_RC1(x) (((x) >> BIT_SHIFT_RARF_RC1) & BIT_MASK_RARF_RC1)
-
-/* 2 REG_RRSR (Offset 0x0440) */
-
-#define BIT_SHIFT_RRSR_RSC 21
-#define BIT_MASK_RRSR_RSC 0x3
-#define BIT_RRSR_RSC(x) (((x) & BIT_MASK_RRSR_RSC) << BIT_SHIFT_RRSR_RSC)
-#define BIT_GET_RRSR_RSC(x) (((x) >> BIT_SHIFT_RRSR_RSC) & BIT_MASK_RRSR_RSC)
-
-#define BIT_RRSR_BW BIT(20)
-
-#define BIT_SHIFT_RRSC_BITMAP 0
-#define BIT_MASK_RRSC_BITMAP 0xfffff
-#define BIT_RRSC_BITMAP(x) \
- (((x) & BIT_MASK_RRSC_BITMAP) << BIT_SHIFT_RRSC_BITMAP)
-#define BIT_GET_RRSC_BITMAP(x) \
- (((x) >> BIT_SHIFT_RRSC_BITMAP) & BIT_MASK_RRSC_BITMAP)
-
-/* 2 REG_ARFR0 (Offset 0x0444) */
-
-#define BIT_SHIFT_ARFR0_V1 0
-#define BIT_MASK_ARFR0_V1 0xffffffffffffffffL
-#define BIT_ARFR0_V1(x) (((x) & BIT_MASK_ARFR0_V1) << BIT_SHIFT_ARFR0_V1)
-#define BIT_GET_ARFR0_V1(x) (((x) >> BIT_SHIFT_ARFR0_V1) & BIT_MASK_ARFR0_V1)
-
-/* 2 REG_ARFR1_V1 (Offset 0x044C) */
-
-#define BIT_SHIFT_ARFR1_V1 0
-#define BIT_MASK_ARFR1_V1 0xffffffffffffffffL
-#define BIT_ARFR1_V1(x) (((x) & BIT_MASK_ARFR1_V1) << BIT_SHIFT_ARFR1_V1)
-#define BIT_GET_ARFR1_V1(x) (((x) >> BIT_SHIFT_ARFR1_V1) & BIT_MASK_ARFR1_V1)
-
-/* 2 REG_CCK_CHECK (Offset 0x0454) */
-
-#define BIT_CHECK_CCK_EN BIT(7)
-#define BIT_EN_BCN_PKT_REL BIT(6)
-#define BIT_BCN_PORT_SEL BIT(5)
-#define BIT_MOREDATA_BYPASS BIT(4)
-#define BIT_EN_CLR_CMD_REL_BCN_PKT BIT(3)
-
-/* 2 REG_CCK_CHECK (Offset 0x0454) */
-
-#define BIT_R_EN_SET_MOREDATA BIT(2)
-#define BIT__R_DIS_CLEAR_MACID_RELEASE BIT(1)
-#define BIT__R_MACID_RELEASE_EN BIT(0)
-
-/* 2 REG_AMPDU_MAX_TIME (Offset 0x0456) */
-
-#define BIT_SHIFT_AMPDU_MAX_TIME 0
-#define BIT_MASK_AMPDU_MAX_TIME 0xff
-#define BIT_AMPDU_MAX_TIME(x) \
- (((x) & BIT_MASK_AMPDU_MAX_TIME) << BIT_SHIFT_AMPDU_MAX_TIME)
-#define BIT_GET_AMPDU_MAX_TIME(x) \
- (((x) >> BIT_SHIFT_AMPDU_MAX_TIME) & BIT_MASK_AMPDU_MAX_TIME)
-
-/* 2 REG_BCNQ1_BDNY_V1 (Offset 0x0456) */
-
-#define BIT_SHIFT_BCNQ1_PGBNDY_V1 0
-#define BIT_MASK_BCNQ1_PGBNDY_V1 0xfff
-#define BIT_BCNQ1_PGBNDY_V1(x) \
- (((x) & BIT_MASK_BCNQ1_PGBNDY_V1) << BIT_SHIFT_BCNQ1_PGBNDY_V1)
-#define BIT_GET_BCNQ1_PGBNDY_V1(x) \
- (((x) >> BIT_SHIFT_BCNQ1_PGBNDY_V1) & BIT_MASK_BCNQ1_PGBNDY_V1)
-
-/* 2 REG_AMPDU_MAX_LENGTH (Offset 0x0458) */
-
-#define BIT_SHIFT_AMPDU_MAX_LENGTH 0
-#define BIT_MASK_AMPDU_MAX_LENGTH 0xffffffffL
-#define BIT_AMPDU_MAX_LENGTH(x) \
- (((x) & BIT_MASK_AMPDU_MAX_LENGTH) << BIT_SHIFT_AMPDU_MAX_LENGTH)
-#define BIT_GET_AMPDU_MAX_LENGTH(x) \
- (((x) >> BIT_SHIFT_AMPDU_MAX_LENGTH) & BIT_MASK_AMPDU_MAX_LENGTH)
-
-/* 2 REG_ACQ_STOP (Offset 0x045C) */
-
-#define BIT_AC7Q_STOP BIT(7)
-#define BIT_AC6Q_STOP BIT(6)
-#define BIT_AC5Q_STOP BIT(5)
-#define BIT_AC4Q_STOP BIT(4)
-#define BIT_AC3Q_STOP BIT(3)
-#define BIT_AC2Q_STOP BIT(2)
-#define BIT_AC1Q_STOP BIT(1)
-#define BIT_AC0Q_STOP BIT(0)
-
-/* 2 REG_NDPA_RATE (Offset 0x045D) */
-
-#define BIT_SHIFT_R_NDPA_RATE_V1 0
-#define BIT_MASK_R_NDPA_RATE_V1 0xff
-#define BIT_R_NDPA_RATE_V1(x) \
- (((x) & BIT_MASK_R_NDPA_RATE_V1) << BIT_SHIFT_R_NDPA_RATE_V1)
-#define BIT_GET_R_NDPA_RATE_V1(x) \
- (((x) >> BIT_SHIFT_R_NDPA_RATE_V1) & BIT_MASK_R_NDPA_RATE_V1)
-
-/* 2 REG_TX_HANG_CTRL (Offset 0x045E) */
-
-#define BIT_R_EN_GNT_BT_AWAKE BIT(3)
-
-/* 2 REG_TX_HANG_CTRL (Offset 0x045E) */
-
-#define BIT_EN_EOF_V1 BIT(2)
-
-/* 2 REG_TX_HANG_CTRL (Offset 0x045E) */
-
-#define BIT_DIS_OQT_BLOCK BIT(1)
-#define BIT_SEARCH_QUEUE_EN BIT(0)
-
-/* 2 REG_NDPA_OPT_CTRL (Offset 0x045F) */
-
-#define BIT_R_DIS_MACID_RELEASE_RTY BIT(5)
-
-/* 2 REG_NDPA_OPT_CTRL (Offset 0x045F) */
-
-#define BIT_SHIFT_BW_SIGTA 3
-#define BIT_MASK_BW_SIGTA 0x3
-#define BIT_BW_SIGTA(x) (((x) & BIT_MASK_BW_SIGTA) << BIT_SHIFT_BW_SIGTA)
-#define BIT_GET_BW_SIGTA(x) (((x) >> BIT_SHIFT_BW_SIGTA) & BIT_MASK_BW_SIGTA)
-
-/* 2 REG_NDPA_OPT_CTRL (Offset 0x045F) */
-
-#define BIT_EN_BAR_SIGTA BIT(2)
-
-/* 2 REG_NDPA_OPT_CTRL (Offset 0x045F) */
-
-#define BIT_SHIFT_R_NDPA_BW 0
-#define BIT_MASK_R_NDPA_BW 0x3
-#define BIT_R_NDPA_BW(x) (((x) & BIT_MASK_R_NDPA_BW) << BIT_SHIFT_R_NDPA_BW)
-#define BIT_GET_R_NDPA_BW(x) (((x) >> BIT_SHIFT_R_NDPA_BW) & BIT_MASK_R_NDPA_BW)
-
-/* 2 REG_RD_RESP_PKT_TH (Offset 0x0463) */
-
-#define BIT_SHIFT_RD_RESP_PKT_TH_V1 0
-#define BIT_MASK_RD_RESP_PKT_TH_V1 0x3f
-#define BIT_RD_RESP_PKT_TH_V1(x) \
- (((x) & BIT_MASK_RD_RESP_PKT_TH_V1) << BIT_SHIFT_RD_RESP_PKT_TH_V1)
-#define BIT_GET_RD_RESP_PKT_TH_V1(x) \
- (((x) >> BIT_SHIFT_RD_RESP_PKT_TH_V1) & BIT_MASK_RD_RESP_PKT_TH_V1)
-
-/* 2 REG_CMDQ_INFO (Offset 0x0464) */
-
-#define BIT_SHIFT_QUEUEMACID_CMDQ_V1 25
-#define BIT_MASK_QUEUEMACID_CMDQ_V1 0x7f
-#define BIT_QUEUEMACID_CMDQ_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_CMDQ_V1) << BIT_SHIFT_QUEUEMACID_CMDQ_V1)
-#define BIT_GET_QUEUEMACID_CMDQ_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_CMDQ_V1) & BIT_MASK_QUEUEMACID_CMDQ_V1)
-
-/* 2 REG_CMDQ_INFO (Offset 0x0464) */
-
-#define BIT_SHIFT_QUEUEAC_CMDQ_V1 23
-#define BIT_MASK_QUEUEAC_CMDQ_V1 0x3
-#define BIT_QUEUEAC_CMDQ_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_CMDQ_V1) << BIT_SHIFT_QUEUEAC_CMDQ_V1)
-#define BIT_GET_QUEUEAC_CMDQ_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_CMDQ_V1) & BIT_MASK_QUEUEAC_CMDQ_V1)
-
-/* 2 REG_CMDQ_INFO (Offset 0x0464) */
-
-#define BIT_TIDEMPTY_CMDQ_V1 BIT(22)
-
-/* 2 REG_CMDQ_INFO (Offset 0x0464) */
-
-#define BIT_SHIFT_TAIL_PKT_CMDQ_V2 11
-#define BIT_MASK_TAIL_PKT_CMDQ_V2 0x7ff
-#define BIT_TAIL_PKT_CMDQ_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_CMDQ_V2) << BIT_SHIFT_TAIL_PKT_CMDQ_V2)
-#define BIT_GET_TAIL_PKT_CMDQ_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_CMDQ_V2) & BIT_MASK_TAIL_PKT_CMDQ_V2)
-
-/* 2 REG_CMDQ_INFO (Offset 0x0464) */
-
-#define BIT_SHIFT_HEAD_PKT_CMDQ_V1 0
-#define BIT_MASK_HEAD_PKT_CMDQ_V1 0x7ff
-#define BIT_HEAD_PKT_CMDQ_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_CMDQ_V1) << BIT_SHIFT_HEAD_PKT_CMDQ_V1)
-#define BIT_GET_HEAD_PKT_CMDQ_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_CMDQ_V1) & BIT_MASK_HEAD_PKT_CMDQ_V1)
-
-/* 2 REG_Q4_INFO (Offset 0x0468) */
-
-#define BIT_SHIFT_QUEUEMACID_Q4_V1 25
-#define BIT_MASK_QUEUEMACID_Q4_V1 0x7f
-#define BIT_QUEUEMACID_Q4_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q4_V1) << BIT_SHIFT_QUEUEMACID_Q4_V1)
-#define BIT_GET_QUEUEMACID_Q4_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q4_V1) & BIT_MASK_QUEUEMACID_Q4_V1)
-
-#define BIT_SHIFT_QUEUEAC_Q4_V1 23
-#define BIT_MASK_QUEUEAC_Q4_V1 0x3
-#define BIT_QUEUEAC_Q4_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_Q4_V1) << BIT_SHIFT_QUEUEAC_Q4_V1)
-#define BIT_GET_QUEUEAC_Q4_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q4_V1) & BIT_MASK_QUEUEAC_Q4_V1)
-
-/* 2 REG_Q4_INFO (Offset 0x0468) */
-
-#define BIT_TIDEMPTY_Q4_V1 BIT(22)
-
-/* 2 REG_Q4_INFO (Offset 0x0468) */
-
-#define BIT_SHIFT_TAIL_PKT_Q4_V2 11
-#define BIT_MASK_TAIL_PKT_Q4_V2 0x7ff
-#define BIT_TAIL_PKT_Q4_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q4_V2) << BIT_SHIFT_TAIL_PKT_Q4_V2)
-#define BIT_GET_TAIL_PKT_Q4_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q4_V2) & BIT_MASK_TAIL_PKT_Q4_V2)
-
-/* 2 REG_Q4_INFO (Offset 0x0468) */
-
-#define BIT_SHIFT_HEAD_PKT_Q4_V1 0
-#define BIT_MASK_HEAD_PKT_Q4_V1 0x7ff
-#define BIT_HEAD_PKT_Q4_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q4_V1) << BIT_SHIFT_HEAD_PKT_Q4_V1)
-#define BIT_GET_HEAD_PKT_Q4_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q4_V1) & BIT_MASK_HEAD_PKT_Q4_V1)
-
-/* 2 REG_Q5_INFO (Offset 0x046C) */
-
-#define BIT_SHIFT_QUEUEMACID_Q5_V1 25
-#define BIT_MASK_QUEUEMACID_Q5_V1 0x7f
-#define BIT_QUEUEMACID_Q5_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q5_V1) << BIT_SHIFT_QUEUEMACID_Q5_V1)
-#define BIT_GET_QUEUEMACID_Q5_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q5_V1) & BIT_MASK_QUEUEMACID_Q5_V1)
-
-#define BIT_SHIFT_QUEUEAC_Q5_V1 23
-#define BIT_MASK_QUEUEAC_Q5_V1 0x3
-#define BIT_QUEUEAC_Q5_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_Q5_V1) << BIT_SHIFT_QUEUEAC_Q5_V1)
-#define BIT_GET_QUEUEAC_Q5_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q5_V1) & BIT_MASK_QUEUEAC_Q5_V1)
-
-/* 2 REG_Q5_INFO (Offset 0x046C) */
-
-#define BIT_TIDEMPTY_Q5_V1 BIT(22)
-
-/* 2 REG_Q5_INFO (Offset 0x046C) */
-
-#define BIT_SHIFT_TAIL_PKT_Q5_V2 11
-#define BIT_MASK_TAIL_PKT_Q5_V2 0x7ff
-#define BIT_TAIL_PKT_Q5_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q5_V2) << BIT_SHIFT_TAIL_PKT_Q5_V2)
-#define BIT_GET_TAIL_PKT_Q5_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q5_V2) & BIT_MASK_TAIL_PKT_Q5_V2)
-
-/* 2 REG_Q5_INFO (Offset 0x046C) */
-
-#define BIT_SHIFT_HEAD_PKT_Q5_V1 0
-#define BIT_MASK_HEAD_PKT_Q5_V1 0x7ff
-#define BIT_HEAD_PKT_Q5_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q5_V1) << BIT_SHIFT_HEAD_PKT_Q5_V1)
-#define BIT_GET_HEAD_PKT_Q5_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q5_V1) & BIT_MASK_HEAD_PKT_Q5_V1)
-
-/* 2 REG_Q6_INFO (Offset 0x0470) */
-
-#define BIT_SHIFT_QUEUEMACID_Q6_V1 25
-#define BIT_MASK_QUEUEMACID_Q6_V1 0x7f
-#define BIT_QUEUEMACID_Q6_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q6_V1) << BIT_SHIFT_QUEUEMACID_Q6_V1)
-#define BIT_GET_QUEUEMACID_Q6_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q6_V1) & BIT_MASK_QUEUEMACID_Q6_V1)
-
-#define BIT_SHIFT_QUEUEAC_Q6_V1 23
-#define BIT_MASK_QUEUEAC_Q6_V1 0x3
-#define BIT_QUEUEAC_Q6_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_Q6_V1) << BIT_SHIFT_QUEUEAC_Q6_V1)
-#define BIT_GET_QUEUEAC_Q6_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q6_V1) & BIT_MASK_QUEUEAC_Q6_V1)
-
-/* 2 REG_Q6_INFO (Offset 0x0470) */
-
-#define BIT_TIDEMPTY_Q6_V1 BIT(22)
-
-/* 2 REG_Q6_INFO (Offset 0x0470) */
-
-#define BIT_SHIFT_TAIL_PKT_Q6_V2 11
-#define BIT_MASK_TAIL_PKT_Q6_V2 0x7ff
-#define BIT_TAIL_PKT_Q6_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q6_V2) << BIT_SHIFT_TAIL_PKT_Q6_V2)
-#define BIT_GET_TAIL_PKT_Q6_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q6_V2) & BIT_MASK_TAIL_PKT_Q6_V2)
-
-/* 2 REG_Q6_INFO (Offset 0x0470) */
-
-#define BIT_SHIFT_HEAD_PKT_Q6_V1 0
-#define BIT_MASK_HEAD_PKT_Q6_V1 0x7ff
-#define BIT_HEAD_PKT_Q6_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q6_V1) << BIT_SHIFT_HEAD_PKT_Q6_V1)
-#define BIT_GET_HEAD_PKT_Q6_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q6_V1) & BIT_MASK_HEAD_PKT_Q6_V1)
-
-/* 2 REG_Q7_INFO (Offset 0x0474) */
-
-#define BIT_SHIFT_QUEUEMACID_Q7_V1 25
-#define BIT_MASK_QUEUEMACID_Q7_V1 0x7f
-#define BIT_QUEUEMACID_Q7_V1(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q7_V1) << BIT_SHIFT_QUEUEMACID_Q7_V1)
-#define BIT_GET_QUEUEMACID_Q7_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q7_V1) & BIT_MASK_QUEUEMACID_Q7_V1)
-
-#define BIT_SHIFT_QUEUEAC_Q7_V1 23
-#define BIT_MASK_QUEUEAC_Q7_V1 0x3
-#define BIT_QUEUEAC_Q7_V1(x) \
- (((x) & BIT_MASK_QUEUEAC_Q7_V1) << BIT_SHIFT_QUEUEAC_Q7_V1)
-#define BIT_GET_QUEUEAC_Q7_V1(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q7_V1) & BIT_MASK_QUEUEAC_Q7_V1)
-
-/* 2 REG_Q7_INFO (Offset 0x0474) */
-
-#define BIT_TIDEMPTY_Q7_V1 BIT(22)
-
-/* 2 REG_Q7_INFO (Offset 0x0474) */
-
-#define BIT_SHIFT_TAIL_PKT_Q7_V2 11
-#define BIT_MASK_TAIL_PKT_Q7_V2 0x7ff
-#define BIT_TAIL_PKT_Q7_V2(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q7_V2) << BIT_SHIFT_TAIL_PKT_Q7_V2)
-#define BIT_GET_TAIL_PKT_Q7_V2(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q7_V2) & BIT_MASK_TAIL_PKT_Q7_V2)
-
-/* 2 REG_Q7_INFO (Offset 0x0474) */
-
-#define BIT_SHIFT_HEAD_PKT_Q7_V1 0
-#define BIT_MASK_HEAD_PKT_Q7_V1 0x7ff
-#define BIT_HEAD_PKT_Q7_V1(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q7_V1) << BIT_SHIFT_HEAD_PKT_Q7_V1)
-#define BIT_GET_HEAD_PKT_Q7_V1(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q7_V1) & BIT_MASK_HEAD_PKT_Q7_V1)
-
-/* 2 REG_WMAC_LBK_BUF_HD_V1 (Offset 0x0478) */
-
-#define BIT_SHIFT_WMAC_LBK_BUF_HEAD_V1 0
-#define BIT_MASK_WMAC_LBK_BUF_HEAD_V1 0xfff
-#define BIT_WMAC_LBK_BUF_HEAD_V1(x) \
- (((x) & BIT_MASK_WMAC_LBK_BUF_HEAD_V1) \
- << BIT_SHIFT_WMAC_LBK_BUF_HEAD_V1)
-#define BIT_GET_WMAC_LBK_BUF_HEAD_V1(x) \
- (((x) >> BIT_SHIFT_WMAC_LBK_BUF_HEAD_V1) & \
- BIT_MASK_WMAC_LBK_BUF_HEAD_V1)
-
-/* 2 REG_MGQ_BDNY_V1 (Offset 0x047A) */
-
-#define BIT_SHIFT_MGQ_PGBNDY_V1 0
-#define BIT_MASK_MGQ_PGBNDY_V1 0xfff
-#define BIT_MGQ_PGBNDY_V1(x) \
- (((x) & BIT_MASK_MGQ_PGBNDY_V1) << BIT_SHIFT_MGQ_PGBNDY_V1)
-#define BIT_GET_MGQ_PGBNDY_V1(x) \
- (((x) >> BIT_SHIFT_MGQ_PGBNDY_V1) & BIT_MASK_MGQ_PGBNDY_V1)
-
-/* 2 REG_TXRPT_CTRL (Offset 0x047C) */
-
-#define BIT_SHIFT_TRXRPT_TIMER_TH 24
-#define BIT_MASK_TRXRPT_TIMER_TH 0xff
-#define BIT_TRXRPT_TIMER_TH(x) \
- (((x) & BIT_MASK_TRXRPT_TIMER_TH) << BIT_SHIFT_TRXRPT_TIMER_TH)
-#define BIT_GET_TRXRPT_TIMER_TH(x) \
- (((x) >> BIT_SHIFT_TRXRPT_TIMER_TH) & BIT_MASK_TRXRPT_TIMER_TH)
-
-/* 2 REG_TXRPT_CTRL (Offset 0x047C) */
-
-#define BIT_SHIFT_TRXRPT_LEN_TH 16
-#define BIT_MASK_TRXRPT_LEN_TH 0xff
-#define BIT_TRXRPT_LEN_TH(x) \
- (((x) & BIT_MASK_TRXRPT_LEN_TH) << BIT_SHIFT_TRXRPT_LEN_TH)
-#define BIT_GET_TRXRPT_LEN_TH(x) \
- (((x) >> BIT_SHIFT_TRXRPT_LEN_TH) & BIT_MASK_TRXRPT_LEN_TH)
-
-/* 2 REG_TXRPT_CTRL (Offset 0x047C) */
-
-#define BIT_SHIFT_TRXRPT_READ_PTR 8
-#define BIT_MASK_TRXRPT_READ_PTR 0xff
-#define BIT_TRXRPT_READ_PTR(x) \
- (((x) & BIT_MASK_TRXRPT_READ_PTR) << BIT_SHIFT_TRXRPT_READ_PTR)
-#define BIT_GET_TRXRPT_READ_PTR(x) \
- (((x) >> BIT_SHIFT_TRXRPT_READ_PTR) & BIT_MASK_TRXRPT_READ_PTR)
-
-/* 2 REG_TXRPT_CTRL (Offset 0x047C) */
-
-#define BIT_SHIFT_TRXRPT_WRITE_PTR 0
-#define BIT_MASK_TRXRPT_WRITE_PTR 0xff
-#define BIT_TRXRPT_WRITE_PTR(x) \
- (((x) & BIT_MASK_TRXRPT_WRITE_PTR) << BIT_SHIFT_TRXRPT_WRITE_PTR)
-#define BIT_GET_TRXRPT_WRITE_PTR(x) \
- (((x) >> BIT_SHIFT_TRXRPT_WRITE_PTR) & BIT_MASK_TRXRPT_WRITE_PTR)
-
-/* 2 REG_INIRTS_RATE_SEL (Offset 0x0480) */
-
-#define BIT_LEAG_RTS_BW_DUP BIT(5)
-
-/* 2 REG_BASIC_CFEND_RATE (Offset 0x0481) */
-
-#define BIT_SHIFT_BASIC_CFEND_RATE 0
-#define BIT_MASK_BASIC_CFEND_RATE 0x1f
-#define BIT_BASIC_CFEND_RATE(x) \
- (((x) & BIT_MASK_BASIC_CFEND_RATE) << BIT_SHIFT_BASIC_CFEND_RATE)
-#define BIT_GET_BASIC_CFEND_RATE(x) \
- (((x) >> BIT_SHIFT_BASIC_CFEND_RATE) & BIT_MASK_BASIC_CFEND_RATE)
-
-/* 2 REG_STBC_CFEND_RATE (Offset 0x0482) */
-
-#define BIT_SHIFT_STBC_CFEND_RATE 0
-#define BIT_MASK_STBC_CFEND_RATE 0x1f
-#define BIT_STBC_CFEND_RATE(x) \
- (((x) & BIT_MASK_STBC_CFEND_RATE) << BIT_SHIFT_STBC_CFEND_RATE)
-#define BIT_GET_STBC_CFEND_RATE(x) \
- (((x) >> BIT_SHIFT_STBC_CFEND_RATE) & BIT_MASK_STBC_CFEND_RATE)
-
-/* 2 REG_DATA_SC (Offset 0x0483) */
-
-#define BIT_SHIFT_TXSC_40M 4
-#define BIT_MASK_TXSC_40M 0xf
-#define BIT_TXSC_40M(x) (((x) & BIT_MASK_TXSC_40M) << BIT_SHIFT_TXSC_40M)
-#define BIT_GET_TXSC_40M(x) (((x) >> BIT_SHIFT_TXSC_40M) & BIT_MASK_TXSC_40M)
-
-#define BIT_SHIFT_TXSC_20M 0
-#define BIT_MASK_TXSC_20M 0xf
-#define BIT_TXSC_20M(x) (((x) & BIT_MASK_TXSC_20M) << BIT_SHIFT_TXSC_20M)
-#define BIT_GET_TXSC_20M(x) (((x) >> BIT_SHIFT_TXSC_20M) & BIT_MASK_TXSC_20M)
-
-/* 2 REG_MACID_SLEEP3 (Offset 0x0484) */
-
-#define BIT_SHIFT_MACID127_96_PKTSLEEP 0
-#define BIT_MASK_MACID127_96_PKTSLEEP 0xffffffffL
-#define BIT_MACID127_96_PKTSLEEP(x) \
- (((x) & BIT_MASK_MACID127_96_PKTSLEEP) \
- << BIT_SHIFT_MACID127_96_PKTSLEEP)
-#define BIT_GET_MACID127_96_PKTSLEEP(x) \
- (((x) >> BIT_SHIFT_MACID127_96_PKTSLEEP) & \
- BIT_MASK_MACID127_96_PKTSLEEP)
-
-/* 2 REG_MACID_SLEEP1 (Offset 0x0488) */
-
-#define BIT_SHIFT_MACID63_32_PKTSLEEP 0
-#define BIT_MASK_MACID63_32_PKTSLEEP 0xffffffffL
-#define BIT_MACID63_32_PKTSLEEP(x) \
- (((x) & BIT_MASK_MACID63_32_PKTSLEEP) << BIT_SHIFT_MACID63_32_PKTSLEEP)
-#define BIT_GET_MACID63_32_PKTSLEEP(x) \
- (((x) >> BIT_SHIFT_MACID63_32_PKTSLEEP) & BIT_MASK_MACID63_32_PKTSLEEP)
-
-/* 2 REG_ARFR2_V1 (Offset 0x048C) */
-
-#define BIT_SHIFT_ARFR2_V1 0
-#define BIT_MASK_ARFR2_V1 0xffffffffffffffffL
-#define BIT_ARFR2_V1(x) (((x) & BIT_MASK_ARFR2_V1) << BIT_SHIFT_ARFR2_V1)
-#define BIT_GET_ARFR2_V1(x) (((x) >> BIT_SHIFT_ARFR2_V1) & BIT_MASK_ARFR2_V1)
-
-/* 2 REG_ARFR3_V1 (Offset 0x0494) */
-
-#define BIT_SHIFT_ARFR3_V1 0
-#define BIT_MASK_ARFR3_V1 0xffffffffffffffffL
-#define BIT_ARFR3_V1(x) (((x) & BIT_MASK_ARFR3_V1) << BIT_SHIFT_ARFR3_V1)
-#define BIT_GET_ARFR3_V1(x) (((x) >> BIT_SHIFT_ARFR3_V1) & BIT_MASK_ARFR3_V1)
-
-/* 2 REG_ARFR4 (Offset 0x049C) */
-
-#define BIT_SHIFT_ARFR4 0
-#define BIT_MASK_ARFR4 0xffffffffffffffffL
-#define BIT_ARFR4(x) (((x) & BIT_MASK_ARFR4) << BIT_SHIFT_ARFR4)
-#define BIT_GET_ARFR4(x) (((x) >> BIT_SHIFT_ARFR4) & BIT_MASK_ARFR4)
-
-/* 2 REG_ARFR5 (Offset 0x04A4) */
-
-#define BIT_SHIFT_ARFR5 0
-#define BIT_MASK_ARFR5 0xffffffffffffffffL
-#define BIT_ARFR5(x) (((x) & BIT_MASK_ARFR5) << BIT_SHIFT_ARFR5)
-#define BIT_GET_ARFR5(x) (((x) >> BIT_SHIFT_ARFR5) & BIT_MASK_ARFR5)
-
-/* 2 REG_TXRPT_START_OFFSET (Offset 0x04AC) */
-
-#define BIT_SHIFT_MACID_MURATE_OFFSET 24
-#define BIT_MASK_MACID_MURATE_OFFSET 0xff
-#define BIT_MACID_MURATE_OFFSET(x) \
- (((x) & BIT_MASK_MACID_MURATE_OFFSET) << BIT_SHIFT_MACID_MURATE_OFFSET)
-#define BIT_GET_MACID_MURATE_OFFSET(x) \
- (((x) >> BIT_SHIFT_MACID_MURATE_OFFSET) & BIT_MASK_MACID_MURATE_OFFSET)
-
-/* 2 REG_TXRPT_START_OFFSET (Offset 0x04AC) */
-
-#define BIT_RPTFIFO_SIZE_OPT BIT(16)
-
-/* 2 REG_TXRPT_START_OFFSET (Offset 0x04AC) */
-
-#define BIT_SHIFT_MACID_CTRL_OFFSET 8
-#define BIT_MASK_MACID_CTRL_OFFSET 0xff
-#define BIT_MACID_CTRL_OFFSET(x) \
- (((x) & BIT_MASK_MACID_CTRL_OFFSET) << BIT_SHIFT_MACID_CTRL_OFFSET)
-#define BIT_GET_MACID_CTRL_OFFSET(x) \
- (((x) >> BIT_SHIFT_MACID_CTRL_OFFSET) & BIT_MASK_MACID_CTRL_OFFSET)
-
-/* 2 REG_TXRPT_START_OFFSET (Offset 0x04AC) */
-
-#define BIT_SHIFT_AMPDU_TXRPT_OFFSET 0
-#define BIT_MASK_AMPDU_TXRPT_OFFSET 0xff
-#define BIT_AMPDU_TXRPT_OFFSET(x) \
- (((x) & BIT_MASK_AMPDU_TXRPT_OFFSET) << BIT_SHIFT_AMPDU_TXRPT_OFFSET)
-#define BIT_GET_AMPDU_TXRPT_OFFSET(x) \
- (((x) >> BIT_SHIFT_AMPDU_TXRPT_OFFSET) & BIT_MASK_AMPDU_TXRPT_OFFSET)
-
-/* 2 REG_POWER_STAGE1 (Offset 0x04B4) */
-
-#define BIT_PTA_WL_PRI_MASK_CPU_MGQ BIT(31)
-#define BIT_PTA_WL_PRI_MASK_BCNQ BIT(30)
-#define BIT_PTA_WL_PRI_MASK_HIQ BIT(29)
-#define BIT_PTA_WL_PRI_MASK_MGQ BIT(28)
-#define BIT_PTA_WL_PRI_MASK_BK BIT(27)
-#define BIT_PTA_WL_PRI_MASK_BE BIT(26)
-#define BIT_PTA_WL_PRI_MASK_VI BIT(25)
-#define BIT_PTA_WL_PRI_MASK_VO BIT(24)
-
-/* 2 REG_POWER_STAGE1 (Offset 0x04B4) */
-
-#define BIT_SHIFT_POWER_STAGE1 0
-#define BIT_MASK_POWER_STAGE1 0xffffff
-#define BIT_POWER_STAGE1(x) \
- (((x) & BIT_MASK_POWER_STAGE1) << BIT_SHIFT_POWER_STAGE1)
-#define BIT_GET_POWER_STAGE1(x) \
- (((x) >> BIT_SHIFT_POWER_STAGE1) & BIT_MASK_POWER_STAGE1)
-
-/* 2 REG_POWER_STAGE2 (Offset 0x04B8) */
-
-#define BIT__R_CTRL_PKT_POW_ADJ BIT(24)
-
-/* 2 REG_POWER_STAGE2 (Offset 0x04B8) */
-
-#define BIT_SHIFT_POWER_STAGE2 0
-#define BIT_MASK_POWER_STAGE2 0xffffff
-#define BIT_POWER_STAGE2(x) \
- (((x) & BIT_MASK_POWER_STAGE2) << BIT_SHIFT_POWER_STAGE2)
-#define BIT_GET_POWER_STAGE2(x) \
- (((x) >> BIT_SHIFT_POWER_STAGE2) & BIT_MASK_POWER_STAGE2)
-
-/* 2 REG_SW_AMPDU_BURST_MODE_CTRL (Offset 0x04BC) */
-
-#define BIT_SHIFT_PAD_NUM_THRES 24
-#define BIT_MASK_PAD_NUM_THRES 0x3f
-#define BIT_PAD_NUM_THRES(x) \
- (((x) & BIT_MASK_PAD_NUM_THRES) << BIT_SHIFT_PAD_NUM_THRES)
-#define BIT_GET_PAD_NUM_THRES(x) \
- (((x) >> BIT_SHIFT_PAD_NUM_THRES) & BIT_MASK_PAD_NUM_THRES)
-
-/* 2 REG_SW_AMPDU_BURST_MODE_CTRL (Offset 0x04BC) */
-
-#define BIT_R_DMA_THIS_QUEUE_BK BIT(23)
-#define BIT_R_DMA_THIS_QUEUE_BE BIT(22)
-#define BIT_R_DMA_THIS_QUEUE_VI BIT(21)
-#define BIT_R_DMA_THIS_QUEUE_VO BIT(20)
-
-#define BIT_SHIFT_R_TOTAL_LEN_TH 8
-#define BIT_MASK_R_TOTAL_LEN_TH 0xfff
-#define BIT_R_TOTAL_LEN_TH(x) \
- (((x) & BIT_MASK_R_TOTAL_LEN_TH) << BIT_SHIFT_R_TOTAL_LEN_TH)
-#define BIT_GET_R_TOTAL_LEN_TH(x) \
- (((x) >> BIT_SHIFT_R_TOTAL_LEN_TH) & BIT_MASK_R_TOTAL_LEN_TH)
-
-/* 2 REG_SW_AMPDU_BURST_MODE_CTRL (Offset 0x04BC) */
-
-#define BIT_EN_NEW_EARLY BIT(7)
-
-/* 2 REG_SW_AMPDU_BURST_MODE_CTRL (Offset 0x04BC) */
-
-#define BIT_PRE_TX_CMD BIT(6)
-
-#define BIT_SHIFT_NUM_SCL_EN 4
-#define BIT_MASK_NUM_SCL_EN 0x3
-#define BIT_NUM_SCL_EN(x) (((x) & BIT_MASK_NUM_SCL_EN) << BIT_SHIFT_NUM_SCL_EN)
-#define BIT_GET_NUM_SCL_EN(x) \
- (((x) >> BIT_SHIFT_NUM_SCL_EN) & BIT_MASK_NUM_SCL_EN)
-
-#define BIT_BK_EN BIT(3)
-#define BIT_BE_EN BIT(2)
-#define BIT_VI_EN BIT(1)
-#define BIT_VO_EN BIT(0)
-
-/* 2 REG_PKT_LIFE_TIME (Offset 0x04C0) */
-
-#define BIT_SHIFT_PKT_LIFTIME_BEBK 16
-#define BIT_MASK_PKT_LIFTIME_BEBK 0xffff
-#define BIT_PKT_LIFTIME_BEBK(x) \
- (((x) & BIT_MASK_PKT_LIFTIME_BEBK) << BIT_SHIFT_PKT_LIFTIME_BEBK)
-#define BIT_GET_PKT_LIFTIME_BEBK(x) \
- (((x) >> BIT_SHIFT_PKT_LIFTIME_BEBK) & BIT_MASK_PKT_LIFTIME_BEBK)
-
-#define BIT_SHIFT_PKT_LIFTIME_VOVI 0
-#define BIT_MASK_PKT_LIFTIME_VOVI 0xffff
-#define BIT_PKT_LIFTIME_VOVI(x) \
- (((x) & BIT_MASK_PKT_LIFTIME_VOVI) << BIT_SHIFT_PKT_LIFTIME_VOVI)
-#define BIT_GET_PKT_LIFTIME_VOVI(x) \
- (((x) >> BIT_SHIFT_PKT_LIFTIME_VOVI) & BIT_MASK_PKT_LIFTIME_VOVI)
-
-/* 2 REG_STBC_SETTING (Offset 0x04C4) */
-
-#define BIT_SHIFT_CDEND_TXTIME_L 4
-#define BIT_MASK_CDEND_TXTIME_L 0xf
-#define BIT_CDEND_TXTIME_L(x) \
- (((x) & BIT_MASK_CDEND_TXTIME_L) << BIT_SHIFT_CDEND_TXTIME_L)
-#define BIT_GET_CDEND_TXTIME_L(x) \
- (((x) >> BIT_SHIFT_CDEND_TXTIME_L) & BIT_MASK_CDEND_TXTIME_L)
-
-#define BIT_SHIFT_NESS 2
-#define BIT_MASK_NESS 0x3
-#define BIT_NESS(x) (((x) & BIT_MASK_NESS) << BIT_SHIFT_NESS)
-#define BIT_GET_NESS(x) (((x) >> BIT_SHIFT_NESS) & BIT_MASK_NESS)
-
-#define BIT_SHIFT_STBC_CFEND 0
-#define BIT_MASK_STBC_CFEND 0x3
-#define BIT_STBC_CFEND(x) (((x) & BIT_MASK_STBC_CFEND) << BIT_SHIFT_STBC_CFEND)
-#define BIT_GET_STBC_CFEND(x) \
- (((x) >> BIT_SHIFT_STBC_CFEND) & BIT_MASK_STBC_CFEND)
-
-/* 2 REG_STBC_SETTING2 (Offset 0x04C5) */
-
-#define BIT_SHIFT_CDEND_TXTIME_H 0
-#define BIT_MASK_CDEND_TXTIME_H 0x1f
-#define BIT_CDEND_TXTIME_H(x) \
- (((x) & BIT_MASK_CDEND_TXTIME_H) << BIT_SHIFT_CDEND_TXTIME_H)
-#define BIT_GET_CDEND_TXTIME_H(x) \
- (((x) >> BIT_SHIFT_CDEND_TXTIME_H) & BIT_MASK_CDEND_TXTIME_H)
-
-/* 2 REG_QUEUE_CTRL (Offset 0x04C6) */
-
-#define BIT_PTA_EDCCA_EN BIT(5)
-#define BIT_PTA_WL_TX_EN BIT(4)
-
-/* 2 REG_QUEUE_CTRL (Offset 0x04C6) */
-
-#define BIT_R_USE_DATA_BW BIT(3)
-#define BIT_TRI_PKT_INT_MODE1 BIT(2)
-#define BIT_TRI_PKT_INT_MODE0 BIT(1)
-#define BIT_ACQ_MODE_SEL BIT(0)
-
-/* 2 REG_SINGLE_AMPDU_CTRL (Offset 0x04C7) */
-
-#define BIT_EN_SINGLE_APMDU BIT(7)
-
-/* 2 REG_PROT_MODE_CTRL (Offset 0x04C8) */
-
-#define BIT_SHIFT_RTS_MAX_AGG_NUM 24
-#define BIT_MASK_RTS_MAX_AGG_NUM 0x3f
-#define BIT_RTS_MAX_AGG_NUM(x) \
- (((x) & BIT_MASK_RTS_MAX_AGG_NUM) << BIT_SHIFT_RTS_MAX_AGG_NUM)
-#define BIT_GET_RTS_MAX_AGG_NUM(x) \
- (((x) >> BIT_SHIFT_RTS_MAX_AGG_NUM) & BIT_MASK_RTS_MAX_AGG_NUM)
-
-#define BIT_SHIFT_MAX_AGG_NUM 16
-#define BIT_MASK_MAX_AGG_NUM 0x3f
-#define BIT_MAX_AGG_NUM(x) \
- (((x) & BIT_MASK_MAX_AGG_NUM) << BIT_SHIFT_MAX_AGG_NUM)
-#define BIT_GET_MAX_AGG_NUM(x) \
- (((x) >> BIT_SHIFT_MAX_AGG_NUM) & BIT_MASK_MAX_AGG_NUM)
-
-#define BIT_SHIFT_RTS_TXTIME_TH 8
-#define BIT_MASK_RTS_TXTIME_TH 0xff
-#define BIT_RTS_TXTIME_TH(x) \
- (((x) & BIT_MASK_RTS_TXTIME_TH) << BIT_SHIFT_RTS_TXTIME_TH)
-#define BIT_GET_RTS_TXTIME_TH(x) \
- (((x) >> BIT_SHIFT_RTS_TXTIME_TH) & BIT_MASK_RTS_TXTIME_TH)
-
-#define BIT_SHIFT_RTS_LEN_TH 0
-#define BIT_MASK_RTS_LEN_TH 0xff
-#define BIT_RTS_LEN_TH(x) (((x) & BIT_MASK_RTS_LEN_TH) << BIT_SHIFT_RTS_LEN_TH)
-#define BIT_GET_RTS_LEN_TH(x) \
- (((x) >> BIT_SHIFT_RTS_LEN_TH) & BIT_MASK_RTS_LEN_TH)
-
-/* 2 REG_BAR_MODE_CTRL (Offset 0x04CC) */
-
-#define BIT_SHIFT_BAR_RTY_LMT 16
-#define BIT_MASK_BAR_RTY_LMT 0x3
-#define BIT_BAR_RTY_LMT(x) \
- (((x) & BIT_MASK_BAR_RTY_LMT) << BIT_SHIFT_BAR_RTY_LMT)
-#define BIT_GET_BAR_RTY_LMT(x) \
- (((x) >> BIT_SHIFT_BAR_RTY_LMT) & BIT_MASK_BAR_RTY_LMT)
-
-#define BIT_SHIFT_BAR_PKT_TXTIME_TH 8
-#define BIT_MASK_BAR_PKT_TXTIME_TH 0xff
-#define BIT_BAR_PKT_TXTIME_TH(x) \
- (((x) & BIT_MASK_BAR_PKT_TXTIME_TH) << BIT_SHIFT_BAR_PKT_TXTIME_TH)
-#define BIT_GET_BAR_PKT_TXTIME_TH(x) \
- (((x) >> BIT_SHIFT_BAR_PKT_TXTIME_TH) & BIT_MASK_BAR_PKT_TXTIME_TH)
-
-#define BIT_BAR_EN_V1 BIT(6)
-
-#define BIT_SHIFT_BAR_PKTNUM_TH_V1 0
-#define BIT_MASK_BAR_PKTNUM_TH_V1 0x3f
-#define BIT_BAR_PKTNUM_TH_V1(x) \
- (((x) & BIT_MASK_BAR_PKTNUM_TH_V1) << BIT_SHIFT_BAR_PKTNUM_TH_V1)
-#define BIT_GET_BAR_PKTNUM_TH_V1(x) \
- (((x) >> BIT_SHIFT_BAR_PKTNUM_TH_V1) & BIT_MASK_BAR_PKTNUM_TH_V1)
-
-/* 2 REG_RA_TRY_RATE_AGG_LMT (Offset 0x04CF) */
-
-#define BIT_SHIFT_RA_TRY_RATE_AGG_LMT_V1 0
-#define BIT_MASK_RA_TRY_RATE_AGG_LMT_V1 0x3f
-#define BIT_RA_TRY_RATE_AGG_LMT_V1(x) \
- (((x) & BIT_MASK_RA_TRY_RATE_AGG_LMT_V1) \
- << BIT_SHIFT_RA_TRY_RATE_AGG_LMT_V1)
-#define BIT_GET_RA_TRY_RATE_AGG_LMT_V1(x) \
- (((x) >> BIT_SHIFT_RA_TRY_RATE_AGG_LMT_V1) & \
- BIT_MASK_RA_TRY_RATE_AGG_LMT_V1)
-
-/* 2 REG_MACID_SLEEP2 (Offset 0x04D0) */
-
-#define BIT_SHIFT_MACID95_64PKTSLEEP 0
-#define BIT_MASK_MACID95_64PKTSLEEP 0xffffffffL
-#define BIT_MACID95_64PKTSLEEP(x) \
- (((x) & BIT_MASK_MACID95_64PKTSLEEP) << BIT_SHIFT_MACID95_64PKTSLEEP)
-#define BIT_GET_MACID95_64PKTSLEEP(x) \
- (((x) >> BIT_SHIFT_MACID95_64PKTSLEEP) & BIT_MASK_MACID95_64PKTSLEEP)
-
-/* 2 REG_MACID_SLEEP (Offset 0x04D4) */
-
-#define BIT_SHIFT_MACID31_0_PKTSLEEP 0
-#define BIT_MASK_MACID31_0_PKTSLEEP 0xffffffffL
-#define BIT_MACID31_0_PKTSLEEP(x) \
- (((x) & BIT_MASK_MACID31_0_PKTSLEEP) << BIT_SHIFT_MACID31_0_PKTSLEEP)
-#define BIT_GET_MACID31_0_PKTSLEEP(x) \
- (((x) >> BIT_SHIFT_MACID31_0_PKTSLEEP) & BIT_MASK_MACID31_0_PKTSLEEP)
-
-/* 2 REG_HW_SEQ0 (Offset 0x04D8) */
-
-#define BIT_SHIFT_HW_SSN_SEQ0 0
-#define BIT_MASK_HW_SSN_SEQ0 0xfff
-#define BIT_HW_SSN_SEQ0(x) \
- (((x) & BIT_MASK_HW_SSN_SEQ0) << BIT_SHIFT_HW_SSN_SEQ0)
-#define BIT_GET_HW_SSN_SEQ0(x) \
- (((x) >> BIT_SHIFT_HW_SSN_SEQ0) & BIT_MASK_HW_SSN_SEQ0)
-
-/* 2 REG_HW_SEQ1 (Offset 0x04DA) */
-
-#define BIT_SHIFT_HW_SSN_SEQ1 0
-#define BIT_MASK_HW_SSN_SEQ1 0xfff
-#define BIT_HW_SSN_SEQ1(x) \
- (((x) & BIT_MASK_HW_SSN_SEQ1) << BIT_SHIFT_HW_SSN_SEQ1)
-#define BIT_GET_HW_SSN_SEQ1(x) \
- (((x) >> BIT_SHIFT_HW_SSN_SEQ1) & BIT_MASK_HW_SSN_SEQ1)
-
-/* 2 REG_HW_SEQ2 (Offset 0x04DC) */
-
-#define BIT_SHIFT_HW_SSN_SEQ2 0
-#define BIT_MASK_HW_SSN_SEQ2 0xfff
-#define BIT_HW_SSN_SEQ2(x) \
- (((x) & BIT_MASK_HW_SSN_SEQ2) << BIT_SHIFT_HW_SSN_SEQ2)
-#define BIT_GET_HW_SSN_SEQ2(x) \
- (((x) >> BIT_SHIFT_HW_SSN_SEQ2) & BIT_MASK_HW_SSN_SEQ2)
-
-/* 2 REG_HW_SEQ3 (Offset 0x04DE) */
-
-#define BIT_SHIFT_HW_SSN_SEQ3 0
-#define BIT_MASK_HW_SSN_SEQ3 0xfff
-#define BIT_HW_SSN_SEQ3(x) \
- (((x) & BIT_MASK_HW_SSN_SEQ3) << BIT_SHIFT_HW_SSN_SEQ3)
-#define BIT_GET_HW_SSN_SEQ3(x) \
- (((x) >> BIT_SHIFT_HW_SSN_SEQ3) & BIT_MASK_HW_SSN_SEQ3)
-
-/* 2 REG_NULL_PKT_STATUS_V1 (Offset 0x04E0) */
-
-#define BIT_SHIFT_PTCL_TOTAL_PG_V2 2
-#define BIT_MASK_PTCL_TOTAL_PG_V2 0x3fff
-#define BIT_PTCL_TOTAL_PG_V2(x) \
- (((x) & BIT_MASK_PTCL_TOTAL_PG_V2) << BIT_SHIFT_PTCL_TOTAL_PG_V2)
-#define BIT_GET_PTCL_TOTAL_PG_V2(x) \
- (((x) >> BIT_SHIFT_PTCL_TOTAL_PG_V2) & BIT_MASK_PTCL_TOTAL_PG_V2)
-
-/* 2 REG_NULL_PKT_STATUS (Offset 0x04E0) */
-
-#define BIT_TX_NULL_1 BIT(1)
-#define BIT_TX_NULL_0 BIT(0)
-
-/* 2 REG_PTCL_ERR_STATUS (Offset 0x04E2) */
-
-#define BIT_PTCL_RATE_TABLE_INVALID BIT(7)
-#define BIT_FTM_T2R_ERROR BIT(6)
-
-/* 2 REG_PTCL_ERR_STATUS (Offset 0x04E2) */
-
-#define BIT_PTCL_ERR0 BIT(5)
-#define BIT_PTCL_ERR1 BIT(4)
-#define BIT_PTCL_ERR2 BIT(3)
-#define BIT_PTCL_ERR3 BIT(2)
-#define BIT_PTCL_ERR4 BIT(1)
-#define BIT_PTCL_ERR5 BIT(0)
-
-/* 2 REG_NULL_PKT_STATUS_EXTEND (Offset 0x04E3) */
-
-#define BIT_CLI3_TX_NULL_1 BIT(7)
-#define BIT_CLI3_TX_NULL_0 BIT(6)
-#define BIT_CLI2_TX_NULL_1 BIT(5)
-#define BIT_CLI2_TX_NULL_0 BIT(4)
-#define BIT_CLI1_TX_NULL_1 BIT(3)
-#define BIT_CLI1_TX_NULL_0 BIT(2)
-#define BIT_CLI0_TX_NULL_1 BIT(1)
-
-/* 2 REG_NULL_PKT_STATUS_EXTEND (Offset 0x04E3) */
-
-#define BIT_CLI0_TX_NULL_0 BIT(0)
-
-/* 2 REG_VIDEO_ENHANCEMENT_FUN (Offset 0x04E4) */
-
-#define BIT_VIDEO_JUST_DROP BIT(1)
-#define BIT_VIDEO_ENHANCEMENT_FUN_EN BIT(0)
-
-/* 2 REG_BT_POLLUTE_PKT_CNT (Offset 0x04E8) */
-
-#define BIT_SHIFT_BT_POLLUTE_PKT_CNT 0
-#define BIT_MASK_BT_POLLUTE_PKT_CNT 0xffff
-#define BIT_BT_POLLUTE_PKT_CNT(x) \
- (((x) & BIT_MASK_BT_POLLUTE_PKT_CNT) << BIT_SHIFT_BT_POLLUTE_PKT_CNT)
-#define BIT_GET_BT_POLLUTE_PKT_CNT(x) \
- (((x) >> BIT_SHIFT_BT_POLLUTE_PKT_CNT) & BIT_MASK_BT_POLLUTE_PKT_CNT)
-
-/* 2 REG_PTCL_DBG (Offset 0x04EC) */
-
-#define BIT_SHIFT_PTCL_DBG 0
-#define BIT_MASK_PTCL_DBG 0xffffffffL
-#define BIT_PTCL_DBG(x) (((x) & BIT_MASK_PTCL_DBG) << BIT_SHIFT_PTCL_DBG)
-#define BIT_GET_PTCL_DBG(x) (((x) >> BIT_SHIFT_PTCL_DBG) & BIT_MASK_PTCL_DBG)
-
-/* 2 REG_CPUMGQ_TIMER_CTRL2 (Offset 0x04F4) */
-
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME BIT(31)
-
-#define BIT_SHIFT_GTAB_ID 28
-#define BIT_MASK_GTAB_ID 0x7
-#define BIT_GTAB_ID(x) (((x) & BIT_MASK_GTAB_ID) << BIT_SHIFT_GTAB_ID)
-#define BIT_GET_GTAB_ID(x) (((x) >> BIT_SHIFT_GTAB_ID) & BIT_MASK_GTAB_ID)
-
-#define BIT_SHIFT_TRI_HEAD_ADDR 16
-#define BIT_MASK_TRI_HEAD_ADDR 0xfff
-#define BIT_TRI_HEAD_ADDR(x) \
- (((x) & BIT_MASK_TRI_HEAD_ADDR) << BIT_SHIFT_TRI_HEAD_ADDR)
-#define BIT_GET_TRI_HEAD_ADDR(x) \
- (((x) >> BIT_SHIFT_TRI_HEAD_ADDR) & BIT_MASK_TRI_HEAD_ADDR)
-
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME_V1 BIT(15)
-
-#define BIT_SHIFT_GTAB_ID_V1 12
-#define BIT_MASK_GTAB_ID_V1 0x7
-#define BIT_GTAB_ID_V1(x) (((x) & BIT_MASK_GTAB_ID_V1) << BIT_SHIFT_GTAB_ID_V1)
-#define BIT_GET_GTAB_ID_V1(x) \
- (((x) >> BIT_SHIFT_GTAB_ID_V1) & BIT_MASK_GTAB_ID_V1)
-
-#define BIT_DROP_TH_EN BIT(8)
-
-#define BIT_SHIFT_DROP_TH 0
-#define BIT_MASK_DROP_TH 0xff
-#define BIT_DROP_TH(x) (((x) & BIT_MASK_DROP_TH) << BIT_SHIFT_DROP_TH)
-#define BIT_GET_DROP_TH(x) (((x) >> BIT_SHIFT_DROP_TH) & BIT_MASK_DROP_TH)
-
-/* 2 REG_DUMMY_PAGE4_V1 (Offset 0x04FC) */
-
-#define BIT_BCN_EN_EXTHWSEQ BIT(1)
-#define BIT_BCN_EN_HWSEQ BIT(0)
-
-/* 2 REG_MOREDATA (Offset 0x04FE) */
-
-#define BIT_MOREDATA_CTRL2_EN_V1 BIT(3)
-#define BIT_MOREDATA_CTRL1_EN_V1 BIT(2)
-#define BIT_PKTIN_MOREDATA_REPLACE_ENABLE_V1 BIT(0)
-
-/* 2 REG_EDCA_VO_PARAM (Offset 0x0500) */
-
-#define BIT_SHIFT_TXOPLIMIT 16
-#define BIT_MASK_TXOPLIMIT 0x7ff
-#define BIT_TXOPLIMIT(x) (((x) & BIT_MASK_TXOPLIMIT) << BIT_SHIFT_TXOPLIMIT)
-#define BIT_GET_TXOPLIMIT(x) (((x) >> BIT_SHIFT_TXOPLIMIT) & BIT_MASK_TXOPLIMIT)
-
-#define BIT_SHIFT_CW 8
-#define BIT_MASK_CW 0xff
-#define BIT_CW(x) (((x) & BIT_MASK_CW) << BIT_SHIFT_CW)
-#define BIT_GET_CW(x) (((x) >> BIT_SHIFT_CW) & BIT_MASK_CW)
-
-#define BIT_SHIFT_AIFS 0
-#define BIT_MASK_AIFS 0xff
-#define BIT_AIFS(x) (((x) & BIT_MASK_AIFS) << BIT_SHIFT_AIFS)
-#define BIT_GET_AIFS(x) (((x) >> BIT_SHIFT_AIFS) & BIT_MASK_AIFS)
-
-/* 2 REG_BCNTCFG (Offset 0x0510) */
-
-#define BIT_SHIFT_BCNCW_MAX 12
-#define BIT_MASK_BCNCW_MAX 0xf
-#define BIT_BCNCW_MAX(x) (((x) & BIT_MASK_BCNCW_MAX) << BIT_SHIFT_BCNCW_MAX)
-#define BIT_GET_BCNCW_MAX(x) (((x) >> BIT_SHIFT_BCNCW_MAX) & BIT_MASK_BCNCW_MAX)
-
-#define BIT_SHIFT_BCNCW_MIN 8
-#define BIT_MASK_BCNCW_MIN 0xf
-#define BIT_BCNCW_MIN(x) (((x) & BIT_MASK_BCNCW_MIN) << BIT_SHIFT_BCNCW_MIN)
-#define BIT_GET_BCNCW_MIN(x) (((x) >> BIT_SHIFT_BCNCW_MIN) & BIT_MASK_BCNCW_MIN)
-
-#define BIT_SHIFT_BCNIFS 0
-#define BIT_MASK_BCNIFS 0xff
-#define BIT_BCNIFS(x) (((x) & BIT_MASK_BCNIFS) << BIT_SHIFT_BCNIFS)
-#define BIT_GET_BCNIFS(x) (((x) >> BIT_SHIFT_BCNIFS) & BIT_MASK_BCNIFS)
-
-/* 2 REG_PIFS (Offset 0x0512) */
-
-#define BIT_SHIFT_PIFS 0
-#define BIT_MASK_PIFS 0xff
-#define BIT_PIFS(x) (((x) & BIT_MASK_PIFS) << BIT_SHIFT_PIFS)
-#define BIT_GET_PIFS(x) (((x) >> BIT_SHIFT_PIFS) & BIT_MASK_PIFS)
-
-/* 2 REG_RDG_PIFS (Offset 0x0513) */
-
-#define BIT_SHIFT_RDG_PIFS 0
-#define BIT_MASK_RDG_PIFS 0xff
-#define BIT_RDG_PIFS(x) (((x) & BIT_MASK_RDG_PIFS) << BIT_SHIFT_RDG_PIFS)
-#define BIT_GET_RDG_PIFS(x) (((x) >> BIT_SHIFT_RDG_PIFS) & BIT_MASK_RDG_PIFS)
-
-/* 2 REG_SIFS (Offset 0x0514) */
-
-#define BIT_SHIFT_SIFS_OFDM_TRX 24
-#define BIT_MASK_SIFS_OFDM_TRX 0xff
-#define BIT_SIFS_OFDM_TRX(x) \
- (((x) & BIT_MASK_SIFS_OFDM_TRX) << BIT_SHIFT_SIFS_OFDM_TRX)
-#define BIT_GET_SIFS_OFDM_TRX(x) \
- (((x) >> BIT_SHIFT_SIFS_OFDM_TRX) & BIT_MASK_SIFS_OFDM_TRX)
-
-#define BIT_SHIFT_SIFS_CCK_TRX 16
-#define BIT_MASK_SIFS_CCK_TRX 0xff
-#define BIT_SIFS_CCK_TRX(x) \
- (((x) & BIT_MASK_SIFS_CCK_TRX) << BIT_SHIFT_SIFS_CCK_TRX)
-#define BIT_GET_SIFS_CCK_TRX(x) \
- (((x) >> BIT_SHIFT_SIFS_CCK_TRX) & BIT_MASK_SIFS_CCK_TRX)
-
-#define BIT_SHIFT_SIFS_OFDM_CTX 8
-#define BIT_MASK_SIFS_OFDM_CTX 0xff
-#define BIT_SIFS_OFDM_CTX(x) \
- (((x) & BIT_MASK_SIFS_OFDM_CTX) << BIT_SHIFT_SIFS_OFDM_CTX)
-#define BIT_GET_SIFS_OFDM_CTX(x) \
- (((x) >> BIT_SHIFT_SIFS_OFDM_CTX) & BIT_MASK_SIFS_OFDM_CTX)
-
-#define BIT_SHIFT_SIFS_CCK_CTX 0
-#define BIT_MASK_SIFS_CCK_CTX 0xff
-#define BIT_SIFS_CCK_CTX(x) \
- (((x) & BIT_MASK_SIFS_CCK_CTX) << BIT_SHIFT_SIFS_CCK_CTX)
-#define BIT_GET_SIFS_CCK_CTX(x) \
- (((x) >> BIT_SHIFT_SIFS_CCK_CTX) & BIT_MASK_SIFS_CCK_CTX)
-
-/* 2 REG_TSFTR_SYN_OFFSET (Offset 0x0518) */
-
-#define BIT_SHIFT_TSFTR_SNC_OFFSET 0
-#define BIT_MASK_TSFTR_SNC_OFFSET 0xffff
-#define BIT_TSFTR_SNC_OFFSET(x) \
- (((x) & BIT_MASK_TSFTR_SNC_OFFSET) << BIT_SHIFT_TSFTR_SNC_OFFSET)
-#define BIT_GET_TSFTR_SNC_OFFSET(x) \
- (((x) >> BIT_SHIFT_TSFTR_SNC_OFFSET) & BIT_MASK_TSFTR_SNC_OFFSET)
-
-/* 2 REG_AGGR_BREAK_TIME (Offset 0x051A) */
-
-#define BIT_SHIFT_AGGR_BK_TIME 0
-#define BIT_MASK_AGGR_BK_TIME 0xff
-#define BIT_AGGR_BK_TIME(x) \
- (((x) & BIT_MASK_AGGR_BK_TIME) << BIT_SHIFT_AGGR_BK_TIME)
-#define BIT_GET_AGGR_BK_TIME(x) \
- (((x) >> BIT_SHIFT_AGGR_BK_TIME) & BIT_MASK_AGGR_BK_TIME)
-
-/* 2 REG_SLOT (Offset 0x051B) */
-
-#define BIT_SHIFT_SLOT 0
-#define BIT_MASK_SLOT 0xff
-#define BIT_SLOT(x) (((x) & BIT_MASK_SLOT) << BIT_SHIFT_SLOT)
-#define BIT_GET_SLOT(x) (((x) >> BIT_SHIFT_SLOT) & BIT_MASK_SLOT)
-
-/* 2 REG_TX_PTCL_CTRL (Offset 0x0520) */
-
-#define BIT_DIS_EDCCA BIT(15)
-#define BIT_DIS_CCA BIT(14)
-#define BIT_LSIG_TXOP_TXCMD_NAV BIT(13)
-#define BIT_SIFS_BK_EN BIT(12)
-
-#define BIT_SHIFT_TXQ_NAV_MSK 8
-#define BIT_MASK_TXQ_NAV_MSK 0xf
-#define BIT_TXQ_NAV_MSK(x) \
- (((x) & BIT_MASK_TXQ_NAV_MSK) << BIT_SHIFT_TXQ_NAV_MSK)
-#define BIT_GET_TXQ_NAV_MSK(x) \
- (((x) >> BIT_SHIFT_TXQ_NAV_MSK) & BIT_MASK_TXQ_NAV_MSK)
-
-#define BIT_DIS_CW BIT(7)
-#define BIT_NAV_END_TXOP BIT(6)
-#define BIT_RDG_END_TXOP BIT(5)
-#define BIT_AC_INBCN_HOLD BIT(4)
-#define BIT_MGTQ_TXOP_EN BIT(3)
-#define BIT_MGTQ_RTSMF_EN BIT(2)
-#define BIT_HIQ_RTSMF_EN BIT(1)
-#define BIT_BCN_RTSMF_EN BIT(0)
-
-/* 2 REG_TXPAUSE (Offset 0x0522) */
-
-#define BIT_STOP_BCN_HI_MGT BIT(7)
-#define BIT_MAC_STOPBCNQ BIT(6)
-#define BIT_MAC_STOPHIQ BIT(5)
-#define BIT_MAC_STOPMGQ BIT(4)
-#define BIT_MAC_STOPBK BIT(3)
-#define BIT_MAC_STOPBE BIT(2)
-#define BIT_MAC_STOPVI BIT(1)
-#define BIT_MAC_STOPVO BIT(0)
-
-/* 2 REG_DIS_TXREQ_CLR (Offset 0x0523) */
-
-#define BIT_DIS_BT_CCA BIT(7)
-
-/* 2 REG_DIS_TXREQ_CLR (Offset 0x0523) */
-
-#define BIT_DIS_TXREQ_CLR_HI BIT(5)
-#define BIT_DIS_TXREQ_CLR_MGQ BIT(4)
-#define BIT_DIS_TXREQ_CLR_VO BIT(3)
-#define BIT_DIS_TXREQ_CLR_VI BIT(2)
-#define BIT_DIS_TXREQ_CLR_BE BIT(1)
-#define BIT_DIS_TXREQ_CLR_BK BIT(0)
-
-/* 2 REG_RD_CTRL (Offset 0x0524) */
-
-#define BIT_EN_CLR_TXREQ_INCCA BIT(15)
-#define BIT_DIS_TX_OVER_BCNQ BIT(14)
-
-/* 2 REG_RD_CTRL (Offset 0x0524) */
-
-#define BIT_EN_BCNERR_INCCCA BIT(13)
-
-/* 2 REG_RD_CTRL (Offset 0x0524) */
-
-#define BIT_EDCCA_MSK_CNTDOWN_EN BIT(11)
-#define BIT_DIS_TXOP_CFE BIT(10)
-#define BIT_DIS_LSIG_CFE BIT(9)
-#define BIT_DIS_STBC_CFE BIT(8)
-#define BIT_BKQ_RD_INIT_EN BIT(7)
-#define BIT_BEQ_RD_INIT_EN BIT(6)
-#define BIT_VIQ_RD_INIT_EN BIT(5)
-#define BIT_VOQ_RD_INIT_EN BIT(4)
-#define BIT_BKQ_RD_RESP_EN BIT(3)
-#define BIT_BEQ_RD_RESP_EN BIT(2)
-#define BIT_VIQ_RD_RESP_EN BIT(1)
-#define BIT_VOQ_RD_RESP_EN BIT(0)
-
-/* 2 REG_MBSSID_CTRL (Offset 0x0526) */
-
-#define BIT_MBID_BCNQ7_EN BIT(7)
-#define BIT_MBID_BCNQ6_EN BIT(6)
-#define BIT_MBID_BCNQ5_EN BIT(5)
-#define BIT_MBID_BCNQ4_EN BIT(4)
-#define BIT_MBID_BCNQ3_EN BIT(3)
-#define BIT_MBID_BCNQ2_EN BIT(2)
-#define BIT_MBID_BCNQ1_EN BIT(1)
-#define BIT_MBID_BCNQ0_EN BIT(0)
-
-/* 2 REG_P2PPS_CTRL (Offset 0x0527) */
-
-#define BIT_P2P_CTW_ALLSTASLEEP BIT(7)
-#define BIT_P2P_OFF_DISTX_EN BIT(6)
-#define BIT_PWR_MGT_EN BIT(5)
-
-/* 2 REG_P2PPS_CTRL (Offset 0x0527) */
-
-#define BIT_P2P_NOA1_EN BIT(2)
-#define BIT_P2P_NOA0_EN BIT(1)
-
-/* 2 REG_PKT_LIFETIME_CTRL (Offset 0x0528) */
-
-#define BIT_EN_P2P_CTWND1 BIT(23)
-
-/* 2 REG_PKT_LIFETIME_CTRL (Offset 0x0528) */
-
-#define BIT_EN_BKF_CLR_TXREQ BIT(22)
-#define BIT_EN_TSFBIT32_RST_P2P BIT(21)
-#define BIT_EN_BCN_TX_BTCCA BIT(20)
-#define BIT_DIS_PKT_TX_ATIM BIT(19)
-#define BIT_DIS_BCN_DIS_CTN BIT(18)
-#define BIT_EN_NAVEND_RST_TXOP BIT(17)
-#define BIT_EN_FILTER_CCA BIT(16)
-
-#define BIT_SHIFT_CCA_FILTER_THRS 8
-#define BIT_MASK_CCA_FILTER_THRS 0xff
-#define BIT_CCA_FILTER_THRS(x) \
- (((x) & BIT_MASK_CCA_FILTER_THRS) << BIT_SHIFT_CCA_FILTER_THRS)
-#define BIT_GET_CCA_FILTER_THRS(x) \
- (((x) >> BIT_SHIFT_CCA_FILTER_THRS) & BIT_MASK_CCA_FILTER_THRS)
-
-#define BIT_SHIFT_EDCCA_THRS 0
-#define BIT_MASK_EDCCA_THRS 0xff
-#define BIT_EDCCA_THRS(x) (((x) & BIT_MASK_EDCCA_THRS) << BIT_SHIFT_EDCCA_THRS)
-#define BIT_GET_EDCCA_THRS(x) \
- (((x) >> BIT_SHIFT_EDCCA_THRS) & BIT_MASK_EDCCA_THRS)
-
-/* 2 REG_P2PPS_SPEC_STATE (Offset 0x052B) */
-
-#define BIT_SPEC_POWER_STATE BIT(7)
-#define BIT_SPEC_CTWINDOW_ON BIT(6)
-#define BIT_SPEC_BEACON_AREA_ON BIT(5)
-#define BIT_SPEC_CTWIN_EARLY_DISTX BIT(4)
-#define BIT_SPEC_NOA1_OFF_PERIOD BIT(3)
-#define BIT_SPEC_FORCE_DOZE1 BIT(2)
-#define BIT_SPEC_NOA0_OFF_PERIOD BIT(1)
-#define BIT_SPEC_FORCE_DOZE0 BIT(0)
-
-/* 2 REG_QUEUE_INCOL_THR (Offset 0x0538) */
-
-#define BIT_SHIFT_BK_QUEUE_THR 24
-#define BIT_MASK_BK_QUEUE_THR 0xff
-#define BIT_BK_QUEUE_THR(x) \
- (((x) & BIT_MASK_BK_QUEUE_THR) << BIT_SHIFT_BK_QUEUE_THR)
-#define BIT_GET_BK_QUEUE_THR(x) \
- (((x) >> BIT_SHIFT_BK_QUEUE_THR) & BIT_MASK_BK_QUEUE_THR)
-
-#define BIT_SHIFT_BE_QUEUE_THR 16
-#define BIT_MASK_BE_QUEUE_THR 0xff
-#define BIT_BE_QUEUE_THR(x) \
- (((x) & BIT_MASK_BE_QUEUE_THR) << BIT_SHIFT_BE_QUEUE_THR)
-#define BIT_GET_BE_QUEUE_THR(x) \
- (((x) >> BIT_SHIFT_BE_QUEUE_THR) & BIT_MASK_BE_QUEUE_THR)
-
-#define BIT_SHIFT_VI_QUEUE_THR 8
-#define BIT_MASK_VI_QUEUE_THR 0xff
-#define BIT_VI_QUEUE_THR(x) \
- (((x) & BIT_MASK_VI_QUEUE_THR) << BIT_SHIFT_VI_QUEUE_THR)
-#define BIT_GET_VI_QUEUE_THR(x) \
- (((x) >> BIT_SHIFT_VI_QUEUE_THR) & BIT_MASK_VI_QUEUE_THR)
-
-#define BIT_SHIFT_VO_QUEUE_THR 0
-#define BIT_MASK_VO_QUEUE_THR 0xff
-#define BIT_VO_QUEUE_THR(x) \
- (((x) & BIT_MASK_VO_QUEUE_THR) << BIT_SHIFT_VO_QUEUE_THR)
-#define BIT_GET_VO_QUEUE_THR(x) \
- (((x) >> BIT_SHIFT_VO_QUEUE_THR) & BIT_MASK_VO_QUEUE_THR)
-
-/* 2 REG_QUEUE_INCOL_EN (Offset 0x053C) */
-
-#define BIT_QUEUE_INCOL_EN BIT(16)
-
-/* 2 REG_QUEUE_INCOL_EN (Offset 0x053C) */
-
-#define BIT_SHIFT_BE_TRIGGER_NUM 12
-#define BIT_MASK_BE_TRIGGER_NUM 0xf
-#define BIT_BE_TRIGGER_NUM(x) \
- (((x) & BIT_MASK_BE_TRIGGER_NUM) << BIT_SHIFT_BE_TRIGGER_NUM)
-#define BIT_GET_BE_TRIGGER_NUM(x) \
- (((x) >> BIT_SHIFT_BE_TRIGGER_NUM) & BIT_MASK_BE_TRIGGER_NUM)
-
-/* 2 REG_QUEUE_INCOL_EN (Offset 0x053C) */
-
-#define BIT_SHIFT_BK_TRIGGER_NUM 8
-#define BIT_MASK_BK_TRIGGER_NUM 0xf
-#define BIT_BK_TRIGGER_NUM(x) \
- (((x) & BIT_MASK_BK_TRIGGER_NUM) << BIT_SHIFT_BK_TRIGGER_NUM)
-#define BIT_GET_BK_TRIGGER_NUM(x) \
- (((x) >> BIT_SHIFT_BK_TRIGGER_NUM) & BIT_MASK_BK_TRIGGER_NUM)
-
-/* 2 REG_QUEUE_INCOL_EN (Offset 0x053C) */
-
-#define BIT_SHIFT_VI_TRIGGER_NUM 4
-#define BIT_MASK_VI_TRIGGER_NUM 0xf
-#define BIT_VI_TRIGGER_NUM(x) \
- (((x) & BIT_MASK_VI_TRIGGER_NUM) << BIT_SHIFT_VI_TRIGGER_NUM)
-#define BIT_GET_VI_TRIGGER_NUM(x) \
- (((x) >> BIT_SHIFT_VI_TRIGGER_NUM) & BIT_MASK_VI_TRIGGER_NUM)
-
-#define BIT_SHIFT_VO_TRIGGER_NUM 0
-#define BIT_MASK_VO_TRIGGER_NUM 0xf
-#define BIT_VO_TRIGGER_NUM(x) \
- (((x) & BIT_MASK_VO_TRIGGER_NUM) << BIT_SHIFT_VO_TRIGGER_NUM)
-#define BIT_GET_VO_TRIGGER_NUM(x) \
- (((x) >> BIT_SHIFT_VO_TRIGGER_NUM) & BIT_MASK_VO_TRIGGER_NUM)
-
-/* 2 REG_TBTT_PROHIBIT (Offset 0x0540) */
-
-#define BIT_SHIFT_TBTT_HOLD_TIME_AP 8
-#define BIT_MASK_TBTT_HOLD_TIME_AP 0xfff
-#define BIT_TBTT_HOLD_TIME_AP(x) \
- (((x) & BIT_MASK_TBTT_HOLD_TIME_AP) << BIT_SHIFT_TBTT_HOLD_TIME_AP)
-#define BIT_GET_TBTT_HOLD_TIME_AP(x) \
- (((x) >> BIT_SHIFT_TBTT_HOLD_TIME_AP) & BIT_MASK_TBTT_HOLD_TIME_AP)
-
-/* 2 REG_TBTT_PROHIBIT (Offset 0x0540) */
-
-#define BIT_SHIFT_TBTT_PROHIBIT_SETUP 0
-#define BIT_MASK_TBTT_PROHIBIT_SETUP 0xf
-#define BIT_TBTT_PROHIBIT_SETUP(x) \
- (((x) & BIT_MASK_TBTT_PROHIBIT_SETUP) << BIT_SHIFT_TBTT_PROHIBIT_SETUP)
-#define BIT_GET_TBTT_PROHIBIT_SETUP(x) \
- (((x) >> BIT_SHIFT_TBTT_PROHIBIT_SETUP) & BIT_MASK_TBTT_PROHIBIT_SETUP)
-
-/* 2 REG_P2PPS_STATE (Offset 0x0543) */
-
-#define BIT_POWER_STATE BIT(7)
-#define BIT_CTWINDOW_ON BIT(6)
-#define BIT_BEACON_AREA_ON BIT(5)
-#define BIT_CTWIN_EARLY_DISTX BIT(4)
-#define BIT_NOA1_OFF_PERIOD BIT(3)
-#define BIT_FORCE_DOZE1 BIT(2)
-#define BIT_NOA0_OFF_PERIOD BIT(1)
-#define BIT_FORCE_DOZE0 BIT(0)
-
-/* 2 REG_RD_NAV_NXT (Offset 0x0544) */
-
-#define BIT_SHIFT_RD_NAV_PROT_NXT 0
-#define BIT_MASK_RD_NAV_PROT_NXT 0xffff
-#define BIT_RD_NAV_PROT_NXT(x) \
- (((x) & BIT_MASK_RD_NAV_PROT_NXT) << BIT_SHIFT_RD_NAV_PROT_NXT)
-#define BIT_GET_RD_NAV_PROT_NXT(x) \
- (((x) >> BIT_SHIFT_RD_NAV_PROT_NXT) & BIT_MASK_RD_NAV_PROT_NXT)
-
-/* 2 REG_NAV_PROT_LEN (Offset 0x0546) */
-
-#define BIT_SHIFT_NAV_PROT_LEN 0
-#define BIT_MASK_NAV_PROT_LEN 0xffff
-#define BIT_NAV_PROT_LEN(x) \
- (((x) & BIT_MASK_NAV_PROT_LEN) << BIT_SHIFT_NAV_PROT_LEN)
-#define BIT_GET_NAV_PROT_LEN(x) \
- (((x) >> BIT_SHIFT_NAV_PROT_LEN) & BIT_MASK_NAV_PROT_LEN)
-
-/* 2 REG_BCN_CTRL (Offset 0x0550) */
-
-#define BIT_DIS_RX_BSSID_FIT BIT(6)
-
-/* 2 REG_BCN_CTRL (Offset 0x0550) */
-
-#define BIT_P0_EN_TXBCN_RPT BIT(5)
-
-/* 2 REG_BCN_CTRL (Offset 0x0550) */
-
-#define BIT_DIS_TSF_UDT BIT(4)
-#define BIT_EN_BCN_FUNCTION BIT(3)
-
-/* 2 REG_BCN_CTRL (Offset 0x0550) */
-
-#define BIT_P0_EN_RXBCN_RPT BIT(2)
-
-/* 2 REG_BCN_CTRL (Offset 0x0550) */
-
-#define BIT_EN_P2P_CTWINDOW BIT(1)
-#define BIT_EN_P2P_BCNQ_AREA BIT(0)
-
-/* 2 REG_BCN_CTRL_CLINT0 (Offset 0x0551) */
-
-#define BIT_CLI0_DIS_RX_BSSID_FIT BIT(6)
-
-/* 2 REG_BCN_CTRL_CLINT0 (Offset 0x0551) */
-
-#define BIT_CLI0_DIS_TSF_UDT BIT(4)
-
-/* 2 REG_BCN_CTRL_CLINT0 (Offset 0x0551) */
-
-#define BIT_CLI0_EN_BCN_FUNCTION BIT(3)
-
-/* 2 REG_BCN_CTRL_CLINT0 (Offset 0x0551) */
-
-#define BIT_CLI0_EN_RXBCN_RPT BIT(2)
-
-/* 2 REG_BCN_CTRL_CLINT0 (Offset 0x0551) */
-
-#define BIT_CLI0_ENP2P_CTWINDOW BIT(1)
-#define BIT_CLI0_ENP2P_BCNQ_AREA BIT(0)
-
-/* 2 REG_MBID_NUM (Offset 0x0552) */
-
-#define BIT_EN_PRE_DL_BEACON BIT(3)
-
-#define BIT_SHIFT_MBID_BCN_NUM 0
-#define BIT_MASK_MBID_BCN_NUM 0x7
-#define BIT_MBID_BCN_NUM(x) \
- (((x) & BIT_MASK_MBID_BCN_NUM) << BIT_SHIFT_MBID_BCN_NUM)
-#define BIT_GET_MBID_BCN_NUM(x) \
- (((x) >> BIT_SHIFT_MBID_BCN_NUM) & BIT_MASK_MBID_BCN_NUM)
-
-/* 2 REG_DUAL_TSF_RST (Offset 0x0553) */
-
-#define BIT_FREECNT_RST BIT(5)
-
-/* 2 REG_DUAL_TSF_RST (Offset 0x0553) */
-
-#define BIT_TSFTR_CLI3_RST BIT(4)
-
-/* 2 REG_DUAL_TSF_RST (Offset 0x0553) */
-
-#define BIT_TSFTR_CLI2_RST BIT(3)
-
-/* 2 REG_DUAL_TSF_RST (Offset 0x0553) */
-
-#define BIT_TSFTR_CLI1_RST BIT(2)
-
-/* 2 REG_DUAL_TSF_RST (Offset 0x0553) */
-
-#define BIT_TSFTR_CLI0_RST BIT(1)
-
-/* 2 REG_DUAL_TSF_RST (Offset 0x0553) */
-
-#define BIT_TSFTR_RST BIT(0)
-
-/* 2 REG_MBSSID_BCN_SPACE (Offset 0x0554) */
-
-#define BIT_SHIFT_BCN_TIMER_SEL_FWRD 28
-#define BIT_MASK_BCN_TIMER_SEL_FWRD 0x7
-#define BIT_BCN_TIMER_SEL_FWRD(x) \
- (((x) & BIT_MASK_BCN_TIMER_SEL_FWRD) << BIT_SHIFT_BCN_TIMER_SEL_FWRD)
-#define BIT_GET_BCN_TIMER_SEL_FWRD(x) \
- (((x) >> BIT_SHIFT_BCN_TIMER_SEL_FWRD) & BIT_MASK_BCN_TIMER_SEL_FWRD)
-
-/* 2 REG_MBSSID_BCN_SPACE (Offset 0x0554) */
-
-#define BIT_SHIFT_BCN_SPACE_CLINT0 16
-#define BIT_MASK_BCN_SPACE_CLINT0 0xfff
-#define BIT_BCN_SPACE_CLINT0(x) \
- (((x) & BIT_MASK_BCN_SPACE_CLINT0) << BIT_SHIFT_BCN_SPACE_CLINT0)
-#define BIT_GET_BCN_SPACE_CLINT0(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE_CLINT0) & BIT_MASK_BCN_SPACE_CLINT0)
-
-/* 2 REG_MBSSID_BCN_SPACE (Offset 0x0554) */
-
-#define BIT_SHIFT_BCN_SPACE0 0
-#define BIT_MASK_BCN_SPACE0 0xffff
-#define BIT_BCN_SPACE0(x) (((x) & BIT_MASK_BCN_SPACE0) << BIT_SHIFT_BCN_SPACE0)
-#define BIT_GET_BCN_SPACE0(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE0) & BIT_MASK_BCN_SPACE0)
-
-/* 2 REG_DRVERLYINT (Offset 0x0558) */
-
-#define BIT_SHIFT_DRVERLYITV 0
-#define BIT_MASK_DRVERLYITV 0xff
-#define BIT_DRVERLYITV(x) (((x) & BIT_MASK_DRVERLYITV) << BIT_SHIFT_DRVERLYITV)
-#define BIT_GET_DRVERLYITV(x) \
- (((x) >> BIT_SHIFT_DRVERLYITV) & BIT_MASK_DRVERLYITV)
-
-/* 2 REG_BCNDMATIM (Offset 0x0559) */
-
-#define BIT_SHIFT_BCNDMATIM 0
-#define BIT_MASK_BCNDMATIM 0xff
-#define BIT_BCNDMATIM(x) (((x) & BIT_MASK_BCNDMATIM) << BIT_SHIFT_BCNDMATIM)
-#define BIT_GET_BCNDMATIM(x) (((x) >> BIT_SHIFT_BCNDMATIM) & BIT_MASK_BCNDMATIM)
-
-/* 2 REG_ATIMWND (Offset 0x055A) */
-
-#define BIT_SHIFT_ATIMWND0 0
-#define BIT_MASK_ATIMWND0 0xffff
-#define BIT_ATIMWND0(x) (((x) & BIT_MASK_ATIMWND0) << BIT_SHIFT_ATIMWND0)
-#define BIT_GET_ATIMWND0(x) (((x) >> BIT_SHIFT_ATIMWND0) & BIT_MASK_ATIMWND0)
-
-/* 2 REG_USTIME_TSF (Offset 0x055C) */
-
-#define BIT_SHIFT_USTIME_TSF_V1 0
-#define BIT_MASK_USTIME_TSF_V1 0xff
-#define BIT_USTIME_TSF_V1(x) \
- (((x) & BIT_MASK_USTIME_TSF_V1) << BIT_SHIFT_USTIME_TSF_V1)
-#define BIT_GET_USTIME_TSF_V1(x) \
- (((x) >> BIT_SHIFT_USTIME_TSF_V1) & BIT_MASK_USTIME_TSF_V1)
-
-/* 2 REG_BCN_MAX_ERR (Offset 0x055D) */
-
-#define BIT_SHIFT_BCN_MAX_ERR 0
-#define BIT_MASK_BCN_MAX_ERR 0xff
-#define BIT_BCN_MAX_ERR(x) \
- (((x) & BIT_MASK_BCN_MAX_ERR) << BIT_SHIFT_BCN_MAX_ERR)
-#define BIT_GET_BCN_MAX_ERR(x) \
- (((x) >> BIT_SHIFT_BCN_MAX_ERR) & BIT_MASK_BCN_MAX_ERR)
-
-/* 2 REG_RXTSF_OFFSET_CCK (Offset 0x055E) */
-
-#define BIT_SHIFT_CCK_RXTSF_OFFSET 0
-#define BIT_MASK_CCK_RXTSF_OFFSET 0xff
-#define BIT_CCK_RXTSF_OFFSET(x) \
- (((x) & BIT_MASK_CCK_RXTSF_OFFSET) << BIT_SHIFT_CCK_RXTSF_OFFSET)
-#define BIT_GET_CCK_RXTSF_OFFSET(x) \
- (((x) >> BIT_SHIFT_CCK_RXTSF_OFFSET) & BIT_MASK_CCK_RXTSF_OFFSET)
-
-/* 2 REG_RXTSF_OFFSET_OFDM (Offset 0x055F) */
-
-#define BIT_SHIFT_OFDM_RXTSF_OFFSET 0
-#define BIT_MASK_OFDM_RXTSF_OFFSET 0xff
-#define BIT_OFDM_RXTSF_OFFSET(x) \
- (((x) & BIT_MASK_OFDM_RXTSF_OFFSET) << BIT_SHIFT_OFDM_RXTSF_OFFSET)
-#define BIT_GET_OFDM_RXTSF_OFFSET(x) \
- (((x) >> BIT_SHIFT_OFDM_RXTSF_OFFSET) & BIT_MASK_OFDM_RXTSF_OFFSET)
-
-/* 2 REG_TSFTR (Offset 0x0560) */
-
-#define BIT_SHIFT_TSF_TIMER 0
-#define BIT_MASK_TSF_TIMER 0xffffffffffffffffL
-#define BIT_TSF_TIMER(x) (((x) & BIT_MASK_TSF_TIMER) << BIT_SHIFT_TSF_TIMER)
-#define BIT_GET_TSF_TIMER(x) (((x) >> BIT_SHIFT_TSF_TIMER) & BIT_MASK_TSF_TIMER)
-
-/* 2 REG_FREERUN_CNT (Offset 0x0568) */
-
-#define BIT_SHIFT_FREERUN_CNT 0
-#define BIT_MASK_FREERUN_CNT 0xffffffffffffffffL
-#define BIT_FREERUN_CNT(x) \
- (((x) & BIT_MASK_FREERUN_CNT) << BIT_SHIFT_FREERUN_CNT)
-#define BIT_GET_FREERUN_CNT(x) \
- (((x) >> BIT_SHIFT_FREERUN_CNT) & BIT_MASK_FREERUN_CNT)
-
-/* 2 REG_ATIMWND1_V1 (Offset 0x0570) */
-
-#define BIT_SHIFT_ATIMWND1_V1 0
-#define BIT_MASK_ATIMWND1_V1 0xff
-#define BIT_ATIMWND1_V1(x) \
- (((x) & BIT_MASK_ATIMWND1_V1) << BIT_SHIFT_ATIMWND1_V1)
-#define BIT_GET_ATIMWND1_V1(x) \
- (((x) >> BIT_SHIFT_ATIMWND1_V1) & BIT_MASK_ATIMWND1_V1)
-
-/* 2 REG_TBTT_PROHIBIT_INFRA (Offset 0x0571) */
-
-#define BIT_SHIFT_TBTT_PROHIBIT_INFRA 0
-#define BIT_MASK_TBTT_PROHIBIT_INFRA 0xff
-#define BIT_TBTT_PROHIBIT_INFRA(x) \
- (((x) & BIT_MASK_TBTT_PROHIBIT_INFRA) << BIT_SHIFT_TBTT_PROHIBIT_INFRA)
-#define BIT_GET_TBTT_PROHIBIT_INFRA(x) \
- (((x) >> BIT_SHIFT_TBTT_PROHIBIT_INFRA) & BIT_MASK_TBTT_PROHIBIT_INFRA)
-
-/* 2 REG_CTWND (Offset 0x0572) */
-
-#define BIT_SHIFT_CTWND 0
-#define BIT_MASK_CTWND 0xff
-#define BIT_CTWND(x) (((x) & BIT_MASK_CTWND) << BIT_SHIFT_CTWND)
-#define BIT_GET_CTWND(x) (((x) >> BIT_SHIFT_CTWND) & BIT_MASK_CTWND)
-
-/* 2 REG_BCNIVLCUNT (Offset 0x0573) */
-
-#define BIT_SHIFT_BCNIVLCUNT 0
-#define BIT_MASK_BCNIVLCUNT 0x7f
-#define BIT_BCNIVLCUNT(x) (((x) & BIT_MASK_BCNIVLCUNT) << BIT_SHIFT_BCNIVLCUNT)
-#define BIT_GET_BCNIVLCUNT(x) \
- (((x) >> BIT_SHIFT_BCNIVLCUNT) & BIT_MASK_BCNIVLCUNT)
-
-/* 2 REG_BCNDROPCTRL (Offset 0x0574) */
-
-#define BIT_BEACON_DROP_EN BIT(7)
-
-#define BIT_SHIFT_BEACON_DROP_IVL 0
-#define BIT_MASK_BEACON_DROP_IVL 0x7f
-#define BIT_BEACON_DROP_IVL(x) \
- (((x) & BIT_MASK_BEACON_DROP_IVL) << BIT_SHIFT_BEACON_DROP_IVL)
-#define BIT_GET_BEACON_DROP_IVL(x) \
- (((x) >> BIT_SHIFT_BEACON_DROP_IVL) & BIT_MASK_BEACON_DROP_IVL)
-
-/* 2 REG_HGQ_TIMEOUT_PERIOD (Offset 0x0575) */
-
-#define BIT_SHIFT_HGQ_TIMEOUT_PERIOD 0
-#define BIT_MASK_HGQ_TIMEOUT_PERIOD 0xff
-#define BIT_HGQ_TIMEOUT_PERIOD(x) \
- (((x) & BIT_MASK_HGQ_TIMEOUT_PERIOD) << BIT_SHIFT_HGQ_TIMEOUT_PERIOD)
-#define BIT_GET_HGQ_TIMEOUT_PERIOD(x) \
- (((x) >> BIT_SHIFT_HGQ_TIMEOUT_PERIOD) & BIT_MASK_HGQ_TIMEOUT_PERIOD)
-
-/* 2 REG_TXCMD_TIMEOUT_PERIOD (Offset 0x0576) */
-
-#define BIT_SHIFT_TXCMD_TIMEOUT_PERIOD 0
-#define BIT_MASK_TXCMD_TIMEOUT_PERIOD 0xff
-#define BIT_TXCMD_TIMEOUT_PERIOD(x) \
- (((x) & BIT_MASK_TXCMD_TIMEOUT_PERIOD) \
- << BIT_SHIFT_TXCMD_TIMEOUT_PERIOD)
-#define BIT_GET_TXCMD_TIMEOUT_PERIOD(x) \
- (((x) >> BIT_SHIFT_TXCMD_TIMEOUT_PERIOD) & \
- BIT_MASK_TXCMD_TIMEOUT_PERIOD)
-
-/* 2 REG_MISC_CTRL (Offset 0x0577) */
-
-#define BIT_DIS_TRX_CAL_BCN BIT(5)
-#define BIT_DIS_TX_CAL_TBTT BIT(4)
-#define BIT_EN_FREECNT BIT(3)
-#define BIT_BCN_AGGRESSION BIT(2)
-
-#define BIT_SHIFT_DIS_SECONDARY_CCA 0
-#define BIT_MASK_DIS_SECONDARY_CCA 0x3
-#define BIT_DIS_SECONDARY_CCA(x) \
- (((x) & BIT_MASK_DIS_SECONDARY_CCA) << BIT_SHIFT_DIS_SECONDARY_CCA)
-#define BIT_GET_DIS_SECONDARY_CCA(x) \
- (((x) >> BIT_SHIFT_DIS_SECONDARY_CCA) & BIT_MASK_DIS_SECONDARY_CCA)
-
-/* 2 REG_BCN_CTRL_CLINT1 (Offset 0x0578) */
-
-#define BIT_CLI1_DIS_RX_BSSID_FIT BIT(6)
-#define BIT_CLI1_DIS_TSF_UDT BIT(4)
-#define BIT_CLI1_EN_BCN_FUNCTION BIT(3)
-
-/* 2 REG_BCN_CTRL_CLINT1 (Offset 0x0578) */
-
-#define BIT_CLI1_EN_RXBCN_RPT BIT(2)
-
-/* 2 REG_BCN_CTRL_CLINT1 (Offset 0x0578) */
-
-#define BIT_CLI1_ENP2P_CTWINDOW BIT(1)
-#define BIT_CLI1_ENP2P_BCNQ_AREA BIT(0)
-
-/* 2 REG_BCN_CTRL_CLINT2 (Offset 0x0579) */
-
-#define BIT_CLI2_DIS_RX_BSSID_FIT BIT(6)
-#define BIT_CLI2_DIS_TSF_UDT BIT(4)
-#define BIT_CLI2_EN_BCN_FUNCTION BIT(3)
-
-/* 2 REG_BCN_CTRL_CLINT2 (Offset 0x0579) */
-
-#define BIT_CLI2_EN_RXBCN_RPT BIT(2)
-
-/* 2 REG_BCN_CTRL_CLINT2 (Offset 0x0579) */
-
-#define BIT_CLI2_ENP2P_CTWINDOW BIT(1)
-#define BIT_CLI2_ENP2P_BCNQ_AREA BIT(0)
-
-/* 2 REG_BCN_CTRL_CLINT3 (Offset 0x057A) */
-
-#define BIT_CLI3_DIS_RX_BSSID_FIT BIT(6)
-#define BIT_CLI3_DIS_TSF_UDT BIT(4)
-#define BIT_CLI3_EN_BCN_FUNCTION BIT(3)
-
-/* 2 REG_BCN_CTRL_CLINT3 (Offset 0x057A) */
-
-#define BIT_CLI3_EN_RXBCN_RPT BIT(2)
-
-/* 2 REG_BCN_CTRL_CLINT3 (Offset 0x057A) */
-
-#define BIT_CLI3_ENP2P_CTWINDOW BIT(1)
-#define BIT_CLI3_ENP2P_BCNQ_AREA BIT(0)
-
-/* 2 REG_EXTEND_CTRL (Offset 0x057B) */
-
-#define BIT_EN_TSFBIT32_RST_P2P2 BIT(5)
-#define BIT_EN_TSFBIT32_RST_P2P1 BIT(4)
-
-#define BIT_SHIFT_PORT_SEL 0
-#define BIT_MASK_PORT_SEL 0x7
-#define BIT_PORT_SEL(x) (((x) & BIT_MASK_PORT_SEL) << BIT_SHIFT_PORT_SEL)
-#define BIT_GET_PORT_SEL(x) (((x) >> BIT_SHIFT_PORT_SEL) & BIT_MASK_PORT_SEL)
-
-/* 2 REG_P2PPS1_SPEC_STATE (Offset 0x057C) */
-
-#define BIT_P2P1_SPEC_POWER_STATE BIT(7)
-#define BIT_P2P1_SPEC_CTWINDOW_ON BIT(6)
-#define BIT_P2P1_SPEC_BCN_AREA_ON BIT(5)
-#define BIT_P2P1_SPEC_CTWIN_EARLY_DISTX BIT(4)
-#define BIT_P2P1_SPEC_NOA1_OFF_PERIOD BIT(3)
-#define BIT_P2P1_SPEC_FORCE_DOZE1 BIT(2)
-#define BIT_P2P1_SPEC_NOA0_OFF_PERIOD BIT(1)
-#define BIT_P2P1_SPEC_FORCE_DOZE0 BIT(0)
-
-/* 2 REG_P2PPS1_STATE (Offset 0x057D) */
-
-#define BIT_P2P1_POWER_STATE BIT(7)
-#define BIT_P2P1_CTWINDOW_ON BIT(6)
-#define BIT_P2P1_BEACON_AREA_ON BIT(5)
-#define BIT_P2P1_CTWIN_EARLY_DISTX BIT(4)
-#define BIT_P2P1_NOA1_OFF_PERIOD BIT(3)
-#define BIT_P2P1_FORCE_DOZE1 BIT(2)
-#define BIT_P2P1_NOA0_OFF_PERIOD BIT(1)
-#define BIT_P2P1_FORCE_DOZE0 BIT(0)
-
-/* 2 REG_P2PPS2_SPEC_STATE (Offset 0x057E) */
-
-#define BIT_P2P2_SPEC_POWER_STATE BIT(7)
-#define BIT_P2P2_SPEC_CTWINDOW_ON BIT(6)
-#define BIT_P2P2_SPEC_BCN_AREA_ON BIT(5)
-#define BIT_P2P2_SPEC_CTWIN_EARLY_DISTX BIT(4)
-#define BIT_P2P2_SPEC_NOA1_OFF_PERIOD BIT(3)
-#define BIT_P2P2_SPEC_FORCE_DOZE1 BIT(2)
-#define BIT_P2P2_SPEC_NOA0_OFF_PERIOD BIT(1)
-#define BIT_P2P2_SPEC_FORCE_DOZE0 BIT(0)
-
-/* 2 REG_P2PPS2_STATE (Offset 0x057F) */
-
-#define BIT_P2P2_POWER_STATE BIT(7)
-#define BIT_P2P2_CTWINDOW_ON BIT(6)
-#define BIT_P2P2_BEACON_AREA_ON BIT(5)
-#define BIT_P2P2_CTWIN_EARLY_DISTX BIT(4)
-#define BIT_P2P2_NOA1_OFF_PERIOD BIT(3)
-#define BIT_P2P2_FORCE_DOZE1 BIT(2)
-#define BIT_P2P2_NOA0_OFF_PERIOD BIT(1)
-#define BIT_P2P2_FORCE_DOZE0 BIT(0)
-
-/* 2 REG_PS_TIMER0 (Offset 0x0580) */
-
-#define BIT_SHIFT_PSTIMER0_INT 5
-#define BIT_MASK_PSTIMER0_INT 0x7ffffff
-#define BIT_PSTIMER0_INT(x) \
- (((x) & BIT_MASK_PSTIMER0_INT) << BIT_SHIFT_PSTIMER0_INT)
-#define BIT_GET_PSTIMER0_INT(x) \
- (((x) >> BIT_SHIFT_PSTIMER0_INT) & BIT_MASK_PSTIMER0_INT)
-
-/* 2 REG_PS_TIMER1 (Offset 0x0584) */
-
-#define BIT_SHIFT_PSTIMER1_INT 5
-#define BIT_MASK_PSTIMER1_INT 0x7ffffff
-#define BIT_PSTIMER1_INT(x) \
- (((x) & BIT_MASK_PSTIMER1_INT) << BIT_SHIFT_PSTIMER1_INT)
-#define BIT_GET_PSTIMER1_INT(x) \
- (((x) >> BIT_SHIFT_PSTIMER1_INT) & BIT_MASK_PSTIMER1_INT)
-
-/* 2 REG_PS_TIMER2 (Offset 0x0588) */
-
-#define BIT_SHIFT_PSTIMER2_INT 5
-#define BIT_MASK_PSTIMER2_INT 0x7ffffff
-#define BIT_PSTIMER2_INT(x) \
- (((x) & BIT_MASK_PSTIMER2_INT) << BIT_SHIFT_PSTIMER2_INT)
-#define BIT_GET_PSTIMER2_INT(x) \
- (((x) >> BIT_SHIFT_PSTIMER2_INT) & BIT_MASK_PSTIMER2_INT)
-
-/* 2 REG_TBTT_CTN_AREA (Offset 0x058C) */
-
-#define BIT_SHIFT_TBTT_CTN_AREA 0
-#define BIT_MASK_TBTT_CTN_AREA 0xff
-#define BIT_TBTT_CTN_AREA(x) \
- (((x) & BIT_MASK_TBTT_CTN_AREA) << BIT_SHIFT_TBTT_CTN_AREA)
-#define BIT_GET_TBTT_CTN_AREA(x) \
- (((x) >> BIT_SHIFT_TBTT_CTN_AREA) & BIT_MASK_TBTT_CTN_AREA)
-
-/* 2 REG_FORCE_BCN_IFS (Offset 0x058E) */
-
-#define BIT_SHIFT_FORCE_BCN_IFS 0
-#define BIT_MASK_FORCE_BCN_IFS 0xff
-#define BIT_FORCE_BCN_IFS(x) \
- (((x) & BIT_MASK_FORCE_BCN_IFS) << BIT_SHIFT_FORCE_BCN_IFS)
-#define BIT_GET_FORCE_BCN_IFS(x) \
- (((x) >> BIT_SHIFT_FORCE_BCN_IFS) & BIT_MASK_FORCE_BCN_IFS)
-
-/* 2 REG_TXOP_MIN (Offset 0x0590) */
-
-#define BIT_SHIFT_TXOP_MIN 0
-#define BIT_MASK_TXOP_MIN 0x3fff
-#define BIT_TXOP_MIN(x) (((x) & BIT_MASK_TXOP_MIN) << BIT_SHIFT_TXOP_MIN)
-#define BIT_GET_TXOP_MIN(x) (((x) >> BIT_SHIFT_TXOP_MIN) & BIT_MASK_TXOP_MIN)
-
-/* 2 REG_PRE_BKF_TIME (Offset 0x0592) */
-
-#define BIT_SHIFT_PRE_BKF_TIME 0
-#define BIT_MASK_PRE_BKF_TIME 0xff
-#define BIT_PRE_BKF_TIME(x) \
- (((x) & BIT_MASK_PRE_BKF_TIME) << BIT_SHIFT_PRE_BKF_TIME)
-#define BIT_GET_PRE_BKF_TIME(x) \
- (((x) >> BIT_SHIFT_PRE_BKF_TIME) & BIT_MASK_PRE_BKF_TIME)
-
-/* 2 REG_CROSS_TXOP_CTRL (Offset 0x0593) */
-
-#define BIT_DTIM_BYPASS BIT(2)
-#define BIT_RTS_NAV_TXOP BIT(1)
-#define BIT_NOT_CROSS_TXOP BIT(0)
-
-/* 2 REG_ATIMWND2 (Offset 0x05A0) */
-
-#define BIT_SHIFT_ATIMWND2 0
-#define BIT_MASK_ATIMWND2 0xff
-#define BIT_ATIMWND2(x) (((x) & BIT_MASK_ATIMWND2) << BIT_SHIFT_ATIMWND2)
-#define BIT_GET_ATIMWND2(x) (((x) >> BIT_SHIFT_ATIMWND2) & BIT_MASK_ATIMWND2)
-
-/* 2 REG_ATIMWND3 (Offset 0x05A1) */
-
-#define BIT_SHIFT_ATIMWND3 0
-#define BIT_MASK_ATIMWND3 0xff
-#define BIT_ATIMWND3(x) (((x) & BIT_MASK_ATIMWND3) << BIT_SHIFT_ATIMWND3)
-#define BIT_GET_ATIMWND3(x) (((x) >> BIT_SHIFT_ATIMWND3) & BIT_MASK_ATIMWND3)
-
-/* 2 REG_ATIMWND4 (Offset 0x05A2) */
-
-#define BIT_SHIFT_ATIMWND4 0
-#define BIT_MASK_ATIMWND4 0xff
-#define BIT_ATIMWND4(x) (((x) & BIT_MASK_ATIMWND4) << BIT_SHIFT_ATIMWND4)
-#define BIT_GET_ATIMWND4(x) (((x) >> BIT_SHIFT_ATIMWND4) & BIT_MASK_ATIMWND4)
-
-/* 2 REG_ATIMWND5 (Offset 0x05A3) */
-
-#define BIT_SHIFT_ATIMWND5 0
-#define BIT_MASK_ATIMWND5 0xff
-#define BIT_ATIMWND5(x) (((x) & BIT_MASK_ATIMWND5) << BIT_SHIFT_ATIMWND5)
-#define BIT_GET_ATIMWND5(x) (((x) >> BIT_SHIFT_ATIMWND5) & BIT_MASK_ATIMWND5)
-
-/* 2 REG_ATIMWND6 (Offset 0x05A4) */
-
-#define BIT_SHIFT_ATIMWND6 0
-#define BIT_MASK_ATIMWND6 0xff
-#define BIT_ATIMWND6(x) (((x) & BIT_MASK_ATIMWND6) << BIT_SHIFT_ATIMWND6)
-#define BIT_GET_ATIMWND6(x) (((x) >> BIT_SHIFT_ATIMWND6) & BIT_MASK_ATIMWND6)
-
-/* 2 REG_ATIMWND7 (Offset 0x05A5) */
-
-#define BIT_SHIFT_ATIMWND7 0
-#define BIT_MASK_ATIMWND7 0xff
-#define BIT_ATIMWND7(x) (((x) & BIT_MASK_ATIMWND7) << BIT_SHIFT_ATIMWND7)
-#define BIT_GET_ATIMWND7(x) (((x) >> BIT_SHIFT_ATIMWND7) & BIT_MASK_ATIMWND7)
-
-/* 2 REG_ATIMUGT (Offset 0x05A6) */
-
-#define BIT_SHIFT_ATIM_URGENT 0
-#define BIT_MASK_ATIM_URGENT 0xff
-#define BIT_ATIM_URGENT(x) \
- (((x) & BIT_MASK_ATIM_URGENT) << BIT_SHIFT_ATIM_URGENT)
-#define BIT_GET_ATIM_URGENT(x) \
- (((x) >> BIT_SHIFT_ATIM_URGENT) & BIT_MASK_ATIM_URGENT)
-
-/* 2 REG_HIQ_NO_LMT_EN (Offset 0x05A7) */
-
-#define BIT_HIQ_NO_LMT_EN_VAP7 BIT(7)
-#define BIT_HIQ_NO_LMT_EN_VAP6 BIT(6)
-#define BIT_HIQ_NO_LMT_EN_VAP5 BIT(5)
-#define BIT_HIQ_NO_LMT_EN_VAP4 BIT(4)
-#define BIT_HIQ_NO_LMT_EN_VAP3 BIT(3)
-#define BIT_HIQ_NO_LMT_EN_VAP2 BIT(2)
-#define BIT_HIQ_NO_LMT_EN_VAP1 BIT(1)
-#define BIT_HIQ_NO_LMT_EN_ROOT BIT(0)
-
-/* 2 REG_DTIM_COUNTER_ROOT (Offset 0x05A8) */
-
-#define BIT_SHIFT_DTIM_COUNT_ROOT 0
-#define BIT_MASK_DTIM_COUNT_ROOT 0xff
-#define BIT_DTIM_COUNT_ROOT(x) \
- (((x) & BIT_MASK_DTIM_COUNT_ROOT) << BIT_SHIFT_DTIM_COUNT_ROOT)
-#define BIT_GET_DTIM_COUNT_ROOT(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_ROOT) & BIT_MASK_DTIM_COUNT_ROOT)
-
-/* 2 REG_DTIM_COUNTER_VAP1 (Offset 0x05A9) */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP1 0
-#define BIT_MASK_DTIM_COUNT_VAP1 0xff
-#define BIT_DTIM_COUNT_VAP1(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP1) << BIT_SHIFT_DTIM_COUNT_VAP1)
-#define BIT_GET_DTIM_COUNT_VAP1(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP1) & BIT_MASK_DTIM_COUNT_VAP1)
-
-/* 2 REG_DTIM_COUNTER_VAP2 (Offset 0x05AA) */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP2 0
-#define BIT_MASK_DTIM_COUNT_VAP2 0xff
-#define BIT_DTIM_COUNT_VAP2(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP2) << BIT_SHIFT_DTIM_COUNT_VAP2)
-#define BIT_GET_DTIM_COUNT_VAP2(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP2) & BIT_MASK_DTIM_COUNT_VAP2)
-
-/* 2 REG_DTIM_COUNTER_VAP3 (Offset 0x05AB) */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP3 0
-#define BIT_MASK_DTIM_COUNT_VAP3 0xff
-#define BIT_DTIM_COUNT_VAP3(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP3) << BIT_SHIFT_DTIM_COUNT_VAP3)
-#define BIT_GET_DTIM_COUNT_VAP3(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP3) & BIT_MASK_DTIM_COUNT_VAP3)
-
-/* 2 REG_DTIM_COUNTER_VAP4 (Offset 0x05AC) */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP4 0
-#define BIT_MASK_DTIM_COUNT_VAP4 0xff
-#define BIT_DTIM_COUNT_VAP4(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP4) << BIT_SHIFT_DTIM_COUNT_VAP4)
-#define BIT_GET_DTIM_COUNT_VAP4(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP4) & BIT_MASK_DTIM_COUNT_VAP4)
-
-/* 2 REG_DTIM_COUNTER_VAP5 (Offset 0x05AD) */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP5 0
-#define BIT_MASK_DTIM_COUNT_VAP5 0xff
-#define BIT_DTIM_COUNT_VAP5(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP5) << BIT_SHIFT_DTIM_COUNT_VAP5)
-#define BIT_GET_DTIM_COUNT_VAP5(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP5) & BIT_MASK_DTIM_COUNT_VAP5)
-
-/* 2 REG_DTIM_COUNTER_VAP6 (Offset 0x05AE) */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP6 0
-#define BIT_MASK_DTIM_COUNT_VAP6 0xff
-#define BIT_DTIM_COUNT_VAP6(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP6) << BIT_SHIFT_DTIM_COUNT_VAP6)
-#define BIT_GET_DTIM_COUNT_VAP6(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP6) & BIT_MASK_DTIM_COUNT_VAP6)
-
-/* 2 REG_DTIM_COUNTER_VAP7 (Offset 0x05AF) */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP7 0
-#define BIT_MASK_DTIM_COUNT_VAP7 0xff
-#define BIT_DTIM_COUNT_VAP7(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP7) << BIT_SHIFT_DTIM_COUNT_VAP7)
-#define BIT_GET_DTIM_COUNT_VAP7(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP7) & BIT_MASK_DTIM_COUNT_VAP7)
-
-/* 2 REG_DIS_ATIM (Offset 0x05B0) */
-
-#define BIT_DIS_ATIM_VAP7 BIT(7)
-#define BIT_DIS_ATIM_VAP6 BIT(6)
-#define BIT_DIS_ATIM_VAP5 BIT(5)
-#define BIT_DIS_ATIM_VAP4 BIT(4)
-#define BIT_DIS_ATIM_VAP3 BIT(3)
-#define BIT_DIS_ATIM_VAP2 BIT(2)
-#define BIT_DIS_ATIM_VAP1 BIT(1)
-#define BIT_DIS_ATIM_ROOT BIT(0)
-
-/* 2 REG_EARLY_128US (Offset 0x05B1) */
-
-#define BIT_SHIFT_TSFT_SEL_TIMER1 3
-#define BIT_MASK_TSFT_SEL_TIMER1 0x7
-#define BIT_TSFT_SEL_TIMER1(x) \
- (((x) & BIT_MASK_TSFT_SEL_TIMER1) << BIT_SHIFT_TSFT_SEL_TIMER1)
-#define BIT_GET_TSFT_SEL_TIMER1(x) \
- (((x) >> BIT_SHIFT_TSFT_SEL_TIMER1) & BIT_MASK_TSFT_SEL_TIMER1)
-
-#define BIT_SHIFT_EARLY_128US 0
-#define BIT_MASK_EARLY_128US 0x7
-#define BIT_EARLY_128US(x) \
- (((x) & BIT_MASK_EARLY_128US) << BIT_SHIFT_EARLY_128US)
-#define BIT_GET_EARLY_128US(x) \
- (((x) >> BIT_SHIFT_EARLY_128US) & BIT_MASK_EARLY_128US)
-
-/* 2 REG_P2PPS1_CTRL (Offset 0x05B2) */
-
-#define BIT_P2P1_CTW_ALLSTASLEEP BIT(7)
-#define BIT_P2P1_OFF_DISTX_EN BIT(6)
-#define BIT_P2P1_PWR_MGT_EN BIT(5)
-#define BIT_P2P1_NOA1_EN BIT(2)
-#define BIT_P2P1_NOA0_EN BIT(1)
-
-/* 2 REG_P2PPS2_CTRL (Offset 0x05B3) */
-
-#define BIT_P2P2_CTW_ALLSTASLEEP BIT(7)
-#define BIT_P2P2_OFF_DISTX_EN BIT(6)
-#define BIT_P2P2_PWR_MGT_EN BIT(5)
-#define BIT_P2P2_NOA1_EN BIT(2)
-#define BIT_P2P2_NOA0_EN BIT(1)
-
-/* 2 REG_TIMER0_SRC_SEL (Offset 0x05B4) */
-
-#define BIT_SHIFT_SYNC_CLI_SEL 4
-#define BIT_MASK_SYNC_CLI_SEL 0x7
-#define BIT_SYNC_CLI_SEL(x) \
- (((x) & BIT_MASK_SYNC_CLI_SEL) << BIT_SHIFT_SYNC_CLI_SEL)
-#define BIT_GET_SYNC_CLI_SEL(x) \
- (((x) >> BIT_SHIFT_SYNC_CLI_SEL) & BIT_MASK_SYNC_CLI_SEL)
-
-#define BIT_SHIFT_TSFT_SEL_TIMER0 0
-#define BIT_MASK_TSFT_SEL_TIMER0 0x7
-#define BIT_TSFT_SEL_TIMER0(x) \
- (((x) & BIT_MASK_TSFT_SEL_TIMER0) << BIT_SHIFT_TSFT_SEL_TIMER0)
-#define BIT_GET_TSFT_SEL_TIMER0(x) \
- (((x) >> BIT_SHIFT_TSFT_SEL_TIMER0) & BIT_MASK_TSFT_SEL_TIMER0)
-
-/* 2 REG_NOA_UNIT_SEL (Offset 0x05B5) */
-
-#define BIT_SHIFT_NOA_UNIT2_SEL 8
-#define BIT_MASK_NOA_UNIT2_SEL 0x7
-#define BIT_NOA_UNIT2_SEL(x) \
- (((x) & BIT_MASK_NOA_UNIT2_SEL) << BIT_SHIFT_NOA_UNIT2_SEL)
-#define BIT_GET_NOA_UNIT2_SEL(x) \
- (((x) >> BIT_SHIFT_NOA_UNIT2_SEL) & BIT_MASK_NOA_UNIT2_SEL)
-
-#define BIT_SHIFT_NOA_UNIT1_SEL 4
-#define BIT_MASK_NOA_UNIT1_SEL 0x7
-#define BIT_NOA_UNIT1_SEL(x) \
- (((x) & BIT_MASK_NOA_UNIT1_SEL) << BIT_SHIFT_NOA_UNIT1_SEL)
-#define BIT_GET_NOA_UNIT1_SEL(x) \
- (((x) >> BIT_SHIFT_NOA_UNIT1_SEL) & BIT_MASK_NOA_UNIT1_SEL)
-
-#define BIT_SHIFT_NOA_UNIT0_SEL 0
-#define BIT_MASK_NOA_UNIT0_SEL 0x7
-#define BIT_NOA_UNIT0_SEL(x) \
- (((x) & BIT_MASK_NOA_UNIT0_SEL) << BIT_SHIFT_NOA_UNIT0_SEL)
-#define BIT_GET_NOA_UNIT0_SEL(x) \
- (((x) >> BIT_SHIFT_NOA_UNIT0_SEL) & BIT_MASK_NOA_UNIT0_SEL)
-
-/* 2 REG_P2POFF_DIS_TXTIME (Offset 0x05B7) */
-
-#define BIT_SHIFT_P2POFF_DIS_TXTIME 0
-#define BIT_MASK_P2POFF_DIS_TXTIME 0xff
-#define BIT_P2POFF_DIS_TXTIME(x) \
- (((x) & BIT_MASK_P2POFF_DIS_TXTIME) << BIT_SHIFT_P2POFF_DIS_TXTIME)
-#define BIT_GET_P2POFF_DIS_TXTIME(x) \
- (((x) >> BIT_SHIFT_P2POFF_DIS_TXTIME) & BIT_MASK_P2POFF_DIS_TXTIME)
-
-/* 2 REG_MBSSID_BCN_SPACE2 (Offset 0x05B8) */
-
-#define BIT_SHIFT_BCN_SPACE_CLINT2 16
-#define BIT_MASK_BCN_SPACE_CLINT2 0xfff
-#define BIT_BCN_SPACE_CLINT2(x) \
- (((x) & BIT_MASK_BCN_SPACE_CLINT2) << BIT_SHIFT_BCN_SPACE_CLINT2)
-#define BIT_GET_BCN_SPACE_CLINT2(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE_CLINT2) & BIT_MASK_BCN_SPACE_CLINT2)
-
-#define BIT_SHIFT_BCN_SPACE_CLINT1 0
-#define BIT_MASK_BCN_SPACE_CLINT1 0xfff
-#define BIT_BCN_SPACE_CLINT1(x) \
- (((x) & BIT_MASK_BCN_SPACE_CLINT1) << BIT_SHIFT_BCN_SPACE_CLINT1)
-#define BIT_GET_BCN_SPACE_CLINT1(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE_CLINT1) & BIT_MASK_BCN_SPACE_CLINT1)
-
-/* 2 REG_MBSSID_BCN_SPACE3 (Offset 0x05BC) */
-
-#define BIT_SHIFT_SUB_BCN_SPACE 16
-#define BIT_MASK_SUB_BCN_SPACE 0xff
-#define BIT_SUB_BCN_SPACE(x) \
- (((x) & BIT_MASK_SUB_BCN_SPACE) << BIT_SHIFT_SUB_BCN_SPACE)
-#define BIT_GET_SUB_BCN_SPACE(x) \
- (((x) >> BIT_SHIFT_SUB_BCN_SPACE) & BIT_MASK_SUB_BCN_SPACE)
-
-/* 2 REG_MBSSID_BCN_SPACE3 (Offset 0x05BC) */
-
-#define BIT_SHIFT_BCN_SPACE_CLINT3 0
-#define BIT_MASK_BCN_SPACE_CLINT3 0xfff
-#define BIT_BCN_SPACE_CLINT3(x) \
- (((x) & BIT_MASK_BCN_SPACE_CLINT3) << BIT_SHIFT_BCN_SPACE_CLINT3)
-#define BIT_GET_BCN_SPACE_CLINT3(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE_CLINT3) & BIT_MASK_BCN_SPACE_CLINT3)
-
-/* 2 REG_ACMHWCTRL (Offset 0x05C0) */
-
-#define BIT_BEQ_ACM_STATUS BIT(7)
-#define BIT_VIQ_ACM_STATUS BIT(6)
-#define BIT_VOQ_ACM_STATUS BIT(5)
-#define BIT_BEQ_ACM_EN BIT(3)
-#define BIT_VIQ_ACM_EN BIT(2)
-#define BIT_VOQ_ACM_EN BIT(1)
-#define BIT_ACMHWEN BIT(0)
-
-/* 2 REG_ACMRSTCTRL (Offset 0x05C1) */
-
-#define BIT_BE_ACM_RESET_USED_TIME BIT(2)
-#define BIT_VI_ACM_RESET_USED_TIME BIT(1)
-#define BIT_VO_ACM_RESET_USED_TIME BIT(0)
-
-/* 2 REG_ACMAVG (Offset 0x05C2) */
-
-#define BIT_SHIFT_AVGPERIOD 0
-#define BIT_MASK_AVGPERIOD 0xffff
-#define BIT_AVGPERIOD(x) (((x) & BIT_MASK_AVGPERIOD) << BIT_SHIFT_AVGPERIOD)
-#define BIT_GET_AVGPERIOD(x) (((x) >> BIT_SHIFT_AVGPERIOD) & BIT_MASK_AVGPERIOD)
-
-/* 2 REG_VO_ADMTIME (Offset 0x05C4) */
-
-#define BIT_SHIFT_VO_ADMITTED_TIME 0
-#define BIT_MASK_VO_ADMITTED_TIME 0xffff
-#define BIT_VO_ADMITTED_TIME(x) \
- (((x) & BIT_MASK_VO_ADMITTED_TIME) << BIT_SHIFT_VO_ADMITTED_TIME)
-#define BIT_GET_VO_ADMITTED_TIME(x) \
- (((x) >> BIT_SHIFT_VO_ADMITTED_TIME) & BIT_MASK_VO_ADMITTED_TIME)
-
-/* 2 REG_VI_ADMTIME (Offset 0x05C6) */
-
-#define BIT_SHIFT_VI_ADMITTED_TIME 0
-#define BIT_MASK_VI_ADMITTED_TIME 0xffff
-#define BIT_VI_ADMITTED_TIME(x) \
- (((x) & BIT_MASK_VI_ADMITTED_TIME) << BIT_SHIFT_VI_ADMITTED_TIME)
-#define BIT_GET_VI_ADMITTED_TIME(x) \
- (((x) >> BIT_SHIFT_VI_ADMITTED_TIME) & BIT_MASK_VI_ADMITTED_TIME)
-
-/* 2 REG_BE_ADMTIME (Offset 0x05C8) */
-
-#define BIT_SHIFT_BE_ADMITTED_TIME 0
-#define BIT_MASK_BE_ADMITTED_TIME 0xffff
-#define BIT_BE_ADMITTED_TIME(x) \
- (((x) & BIT_MASK_BE_ADMITTED_TIME) << BIT_SHIFT_BE_ADMITTED_TIME)
-#define BIT_GET_BE_ADMITTED_TIME(x) \
- (((x) >> BIT_SHIFT_BE_ADMITTED_TIME) & BIT_MASK_BE_ADMITTED_TIME)
-
-/* 2 REG_EDCA_RANDOM_GEN (Offset 0x05CC) */
-
-#define BIT_SHIFT_RANDOM_GEN 0
-#define BIT_MASK_RANDOM_GEN 0xffffff
-#define BIT_RANDOM_GEN(x) (((x) & BIT_MASK_RANDOM_GEN) << BIT_SHIFT_RANDOM_GEN)
-#define BIT_GET_RANDOM_GEN(x) \
- (((x) >> BIT_SHIFT_RANDOM_GEN) & BIT_MASK_RANDOM_GEN)
-
-/* 2 REG_TXCMD_NOA_SEL (Offset 0x05CF) */
-
-#define BIT_SHIFT_NOA_SEL 4
-#define BIT_MASK_NOA_SEL 0x7
-#define BIT_NOA_SEL(x) (((x) & BIT_MASK_NOA_SEL) << BIT_SHIFT_NOA_SEL)
-#define BIT_GET_NOA_SEL(x) (((x) >> BIT_SHIFT_NOA_SEL) & BIT_MASK_NOA_SEL)
-
-/* 2 REG_TXCMD_NOA_SEL (Offset 0x05CF) */
-
-#define BIT_SHIFT_TXCMD_SEG_SEL 0
-#define BIT_MASK_TXCMD_SEG_SEL 0xf
-#define BIT_TXCMD_SEG_SEL(x) \
- (((x) & BIT_MASK_TXCMD_SEG_SEL) << BIT_SHIFT_TXCMD_SEG_SEL)
-#define BIT_GET_TXCMD_SEG_SEL(x) \
- (((x) >> BIT_SHIFT_TXCMD_SEG_SEL) & BIT_MASK_TXCMD_SEG_SEL)
-
-/* 2 REG_NOA_PARAM (Offset 0x05E0) */
-
-#define BIT_SHIFT_NOA_COUNT (96 & CPU_OPT_WIDTH)
-#define BIT_MASK_NOA_COUNT 0xff
-#define BIT_NOA_COUNT(x) (((x) & BIT_MASK_NOA_COUNT) << BIT_SHIFT_NOA_COUNT)
-#define BIT_GET_NOA_COUNT(x) (((x) >> BIT_SHIFT_NOA_COUNT) & BIT_MASK_NOA_COUNT)
-
-#define BIT_SHIFT_NOA_START_TIME (64 & CPU_OPT_WIDTH)
-#define BIT_MASK_NOA_START_TIME 0xffffffffL
-#define BIT_NOA_START_TIME(x) \
- (((x) & BIT_MASK_NOA_START_TIME) << BIT_SHIFT_NOA_START_TIME)
-#define BIT_GET_NOA_START_TIME(x) \
- (((x) >> BIT_SHIFT_NOA_START_TIME) & BIT_MASK_NOA_START_TIME)
-
-#define BIT_SHIFT_NOA_INTERVAL (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_NOA_INTERVAL 0xffffffffL
-#define BIT_NOA_INTERVAL(x) \
- (((x) & BIT_MASK_NOA_INTERVAL) << BIT_SHIFT_NOA_INTERVAL)
-#define BIT_GET_NOA_INTERVAL(x) \
- (((x) >> BIT_SHIFT_NOA_INTERVAL) & BIT_MASK_NOA_INTERVAL)
-
-#define BIT_SHIFT_NOA_DURATION 0
-#define BIT_MASK_NOA_DURATION 0xffffffffL
-#define BIT_NOA_DURATION(x) \
- (((x) & BIT_MASK_NOA_DURATION) << BIT_SHIFT_NOA_DURATION)
-#define BIT_GET_NOA_DURATION(x) \
- (((x) >> BIT_SHIFT_NOA_DURATION) & BIT_MASK_NOA_DURATION)
-
-/* 2 REG_P2P_RST (Offset 0x05F0) */
-
-#define BIT_P2P2_PWR_RST1 BIT(5)
-#define BIT_P2P2_PWR_RST0 BIT(4)
-#define BIT_P2P1_PWR_RST1 BIT(3)
-#define BIT_P2P1_PWR_RST0 BIT(2)
-#define BIT_P2P_PWR_RST1_V1 BIT(1)
-#define BIT_P2P_PWR_RST0_V1 BIT(0)
-
-/* 2 REG_SCHEDULER_RST (Offset 0x05F1) */
-
-#define BIT_SYNC_CLI BIT(1)
-#define BIT_SCHEDULER_RST_V1 BIT(0)
-
-/* 2 REG_SCH_TXCMD (Offset 0x05F8) */
-
-#define BIT_SHIFT_SCH_TXCMD 0
-#define BIT_MASK_SCH_TXCMD 0xffffffffL
-#define BIT_SCH_TXCMD(x) (((x) & BIT_MASK_SCH_TXCMD) << BIT_SHIFT_SCH_TXCMD)
-#define BIT_GET_SCH_TXCMD(x) (((x) >> BIT_SHIFT_SCH_TXCMD) & BIT_MASK_SCH_TXCMD)
-
-/* 2 REG_WMAC_CR (Offset 0x0600) */
-
-#define BIT_IC_MACPHY_M BIT(0)
-
-/* 2 REG_WMAC_FWPKT_CR (Offset 0x0601) */
-
-#define BIT_FWEN BIT(7)
-
-/* 2 REG_WMAC_FWPKT_CR (Offset 0x0601) */
-
-#define BIT_PHYSTS_PKT_CTRL BIT(6)
-
-/* 2 REG_WMAC_FWPKT_CR (Offset 0x0601) */
-
-#define BIT_APPHDR_MIDSRCH_FAIL BIT(4)
-#define BIT_FWPARSING_EN BIT(3)
-
-#define BIT_SHIFT_APPEND_MHDR_LEN 0
-#define BIT_MASK_APPEND_MHDR_LEN 0x7
-#define BIT_APPEND_MHDR_LEN(x) \
- (((x) & BIT_MASK_APPEND_MHDR_LEN) << BIT_SHIFT_APPEND_MHDR_LEN)
-#define BIT_GET_APPEND_MHDR_LEN(x) \
- (((x) >> BIT_SHIFT_APPEND_MHDR_LEN) & BIT_MASK_APPEND_MHDR_LEN)
-
-/* 2 REG_TCR (Offset 0x0604) */
-
-#define BIT_WMAC_EN_RTS_ADDR BIT(31)
-#define BIT_WMAC_DISABLE_CCK BIT(30)
-#define BIT_WMAC_RAW_LEN BIT(29)
-#define BIT_WMAC_NOTX_IN_RXNDP BIT(28)
-#define BIT_WMAC_EN_EOF BIT(27)
-#define BIT_WMAC_BF_SEL BIT(26)
-#define BIT_WMAC_ANTMODE_SEL BIT(25)
-
-/* 2 REG_TCR (Offset 0x0604) */
-
-#define BIT_WMAC_TCRPWRMGT_HWCTL BIT(24)
-
-/* 2 REG_TCR (Offset 0x0604) */
-
-#define BIT_WMAC_SMOOTH_VAL BIT(23)
-
-/* 2 REG_TCR (Offset 0x0604) */
-
-#define BIT_FETCH_MPDU_AFTER_WSEC_RDY BIT(20)
-
-/* 2 REG_TCR (Offset 0x0604) */
-
-#define BIT_WMAC_TCR_EN_20MST BIT(19)
-#define BIT_WMAC_DIS_SIGTA BIT(18)
-#define BIT_WMAC_DIS_A2B0 BIT(17)
-#define BIT_WMAC_MSK_SIGBCRC BIT(16)
-
-/* 2 REG_TCR (Offset 0x0604) */
-
-#define BIT_WMAC_TCR_ERRSTEN_3 BIT(15)
-#define BIT_WMAC_TCR_ERRSTEN_2 BIT(14)
-#define BIT_WMAC_TCR_ERRSTEN_1 BIT(13)
-#define BIT_WMAC_TCR_ERRSTEN_0 BIT(12)
-#define BIT_WMAC_TCR_TXSK_PERPKT BIT(11)
-#define BIT_ICV BIT(10)
-#define BIT_CFEND_FORMAT BIT(9)
-#define BIT_CRC BIT(8)
-#define BIT_PWRBIT_OW_EN BIT(7)
-#define BIT_PWR_ST BIT(6)
-#define BIT_WMAC_TCR_UPD_TIMIE BIT(5)
-#define BIT_WMAC_TCR_UPD_HGQMD BIT(4)
-
-/* 2 REG_TCR (Offset 0x0604) */
-
-#define BIT_VHTSIGA1_TXPS BIT(3)
-
-/* 2 REG_TCR (Offset 0x0604) */
-
-#define BIT_PAD_SEL BIT(2)
-#define BIT_DIS_GCLK BIT(1)
-
-/* 2 REG_RCR (Offset 0x0608) */
-
-#define BIT_APP_FCS BIT(31)
-#define BIT_APP_MIC BIT(30)
-#define BIT_APP_ICV BIT(29)
-#define BIT_APP_PHYSTS BIT(28)
-#define BIT_APP_BASSN BIT(27)
-
-/* 2 REG_RCR (Offset 0x0608) */
-
-#define BIT_VHT_DACK BIT(26)
-
-/* 2 REG_RCR (Offset 0x0608) */
-
-#define BIT_TCPOFLD_EN BIT(25)
-#define BIT_ENMBID BIT(24)
-#define BIT_LSIGEN BIT(23)
-#define BIT_MFBEN BIT(22)
-#define BIT_DISCHKPPDLLEN BIT(21)
-#define BIT_PKTCTL_DLEN BIT(20)
-#define BIT_TIM_PARSER_EN BIT(18)
-#define BIT_BC_MD_EN BIT(17)
-#define BIT_UC_MD_EN BIT(16)
-#define BIT_RXSK_PERPKT BIT(15)
-#define BIT_HTC_LOC_CTRL BIT(14)
-
-/* 2 REG_RCR (Offset 0x0608) */
-
-#define BIT_RPFM_CAM_ENABLE BIT(12)
-
-/* 2 REG_RCR (Offset 0x0608) */
-
-#define BIT_TA_BCN BIT(11)
-
-/* 2 REG_RCR (Offset 0x0608) */
-
-#define BIT_DISDECMYPKT BIT(10)
-#define BIT_AICV BIT(9)
-#define BIT_ACRC32 BIT(8)
-#define BIT_CBSSID_BCN BIT(7)
-#define BIT_CBSSID_DATA BIT(6)
-#define BIT_APWRMGT BIT(5)
-#define BIT_ADD3 BIT(4)
-#define BIT_AB BIT(3)
-#define BIT_AM BIT(2)
-#define BIT_APM BIT(1)
-#define BIT_AAP BIT(0)
-
-/* 2 REG_RX_PKT_LIMIT (Offset 0x060C) */
-
-#define BIT_SHIFT_RXPKTLMT 0
-#define BIT_MASK_RXPKTLMT 0x3f
-#define BIT_RXPKTLMT(x) (((x) & BIT_MASK_RXPKTLMT) << BIT_SHIFT_RXPKTLMT)
-#define BIT_GET_RXPKTLMT(x) (((x) >> BIT_SHIFT_RXPKTLMT) & BIT_MASK_RXPKTLMT)
-
-/* 2 REG_RX_DLK_TIME (Offset 0x060D) */
-
-#define BIT_SHIFT_RX_DLK_TIME 0
-#define BIT_MASK_RX_DLK_TIME 0xff
-#define BIT_RX_DLK_TIME(x) \
- (((x) & BIT_MASK_RX_DLK_TIME) << BIT_SHIFT_RX_DLK_TIME)
-#define BIT_GET_RX_DLK_TIME(x) \
- (((x) >> BIT_SHIFT_RX_DLK_TIME) & BIT_MASK_RX_DLK_TIME)
-
-/* 2 REG_RX_DRVINFO_SZ (Offset 0x060F) */
-
-#define BIT_DATA_RPFM15EN BIT(15)
-#define BIT_DATA_RPFM14EN BIT(14)
-#define BIT_DATA_RPFM13EN BIT(13)
-#define BIT_DATA_RPFM12EN BIT(12)
-#define BIT_DATA_RPFM11EN BIT(11)
-#define BIT_DATA_RPFM10EN BIT(10)
-#define BIT_DATA_RPFM9EN BIT(9)
-#define BIT_DATA_RPFM8EN BIT(8)
-
-/* 2 REG_RX_DRVINFO_SZ (Offset 0x060F) */
-
-#define BIT_PHYSTS_PER_PKT_MODE BIT(7)
-#define BIT_DATA_RPFM7EN BIT(7)
-
-/* 2 REG_RX_DRVINFO_SZ (Offset 0x060F) */
-
-#define BIT_DATA_RPFM6EN BIT(6)
-
-/* 2 REG_RX_DRVINFO_SZ (Offset 0x060F) */
-
-#define BIT_DATA_RPFM5EN BIT(5)
-#define BIT_DATA_RPFM4EN BIT(4)
-#define BIT_DATA_RPFM3EN BIT(3)
-#define BIT_DATA_RPFM2EN BIT(2)
-#define BIT_DATA_RPFM1EN BIT(1)
-
-/* 2 REG_RX_DRVINFO_SZ (Offset 0x060F) */
-
-#define BIT_SHIFT_DRVINFO_SZ_V1 0
-#define BIT_MASK_DRVINFO_SZ_V1 0xf
-#define BIT_DRVINFO_SZ_V1(x) \
- (((x) & BIT_MASK_DRVINFO_SZ_V1) << BIT_SHIFT_DRVINFO_SZ_V1)
-#define BIT_GET_DRVINFO_SZ_V1(x) \
- (((x) >> BIT_SHIFT_DRVINFO_SZ_V1) & BIT_MASK_DRVINFO_SZ_V1)
-
-/* 2 REG_RX_DRVINFO_SZ (Offset 0x060F) */
-
-#define BIT_DATA_RPFM0EN BIT(0)
-
-/* 2 REG_MACID (Offset 0x0610) */
-
-#define BIT_SHIFT_MACID 0
-#define BIT_MASK_MACID 0xffffffffffffL
-#define BIT_MACID(x) (((x) & BIT_MASK_MACID) << BIT_SHIFT_MACID)
-#define BIT_GET_MACID(x) (((x) >> BIT_SHIFT_MACID) & BIT_MASK_MACID)
-
-/* 2 REG_BSSID (Offset 0x0618) */
-
-#define BIT_SHIFT_BSSID 0
-#define BIT_MASK_BSSID 0xffffffffffffL
-#define BIT_BSSID(x) (((x) & BIT_MASK_BSSID) << BIT_SHIFT_BSSID)
-#define BIT_GET_BSSID(x) (((x) >> BIT_SHIFT_BSSID) & BIT_MASK_BSSID)
-
-/* 2 REG_MAR (Offset 0x0620) */
-
-#define BIT_SHIFT_MAR 0
-#define BIT_MASK_MAR 0xffffffffffffffffL
-#define BIT_MAR(x) (((x) & BIT_MASK_MAR) << BIT_SHIFT_MAR)
-#define BIT_GET_MAR(x) (((x) >> BIT_SHIFT_MAR) & BIT_MASK_MAR)
-
-/* 2 REG_MBIDCAMCFG_1 (Offset 0x0628) */
-
-#define BIT_SHIFT_MBIDCAM_RWDATA_L 0
-#define BIT_MASK_MBIDCAM_RWDATA_L 0xffffffffL
-#define BIT_MBIDCAM_RWDATA_L(x) \
- (((x) & BIT_MASK_MBIDCAM_RWDATA_L) << BIT_SHIFT_MBIDCAM_RWDATA_L)
-#define BIT_GET_MBIDCAM_RWDATA_L(x) \
- (((x) >> BIT_SHIFT_MBIDCAM_RWDATA_L) & BIT_MASK_MBIDCAM_RWDATA_L)
-
-/* 2 REG_MBIDCAMCFG_2 (Offset 0x062C) */
-
-#define BIT_MBIDCAM_POLL BIT(31)
-#define BIT_MBIDCAM_WT_EN BIT(30)
-
-#define BIT_SHIFT_MBIDCAM_ADDR 24
-#define BIT_MASK_MBIDCAM_ADDR 0x1f
-#define BIT_MBIDCAM_ADDR(x) \
- (((x) & BIT_MASK_MBIDCAM_ADDR) << BIT_SHIFT_MBIDCAM_ADDR)
-#define BIT_GET_MBIDCAM_ADDR(x) \
- (((x) >> BIT_SHIFT_MBIDCAM_ADDR) & BIT_MASK_MBIDCAM_ADDR)
-
-#define BIT_MBIDCAM_VALID BIT(23)
-#define BIT_LSIC_TXOP_EN BIT(17)
-
-/* 2 REG_MBIDCAMCFG_2 (Offset 0x062C) */
-
-#define BIT_CTS_EN BIT(16)
-
-/* 2 REG_MBIDCAMCFG_2 (Offset 0x062C) */
-
-#define BIT_SHIFT_MBIDCAM_RWDATA_H 0
-#define BIT_MASK_MBIDCAM_RWDATA_H 0xffff
-#define BIT_MBIDCAM_RWDATA_H(x) \
- (((x) & BIT_MASK_MBIDCAM_RWDATA_H) << BIT_SHIFT_MBIDCAM_RWDATA_H)
-#define BIT_GET_MBIDCAM_RWDATA_H(x) \
- (((x) >> BIT_SHIFT_MBIDCAM_RWDATA_H) & BIT_MASK_MBIDCAM_RWDATA_H)
-
-/* 2 REG_WMAC_TCR_TSFT_OFS (Offset 0x0630) */
-
-#define BIT_SHIFT_WMAC_TCR_TSFT_OFS 0
-#define BIT_MASK_WMAC_TCR_TSFT_OFS 0xffff
-#define BIT_WMAC_TCR_TSFT_OFS(x) \
- (((x) & BIT_MASK_WMAC_TCR_TSFT_OFS) << BIT_SHIFT_WMAC_TCR_TSFT_OFS)
-#define BIT_GET_WMAC_TCR_TSFT_OFS(x) \
- (((x) >> BIT_SHIFT_WMAC_TCR_TSFT_OFS) & BIT_MASK_WMAC_TCR_TSFT_OFS)
-
-/* 2 REG_UDF_THSD (Offset 0x0632) */
-
-#define BIT_SHIFT_UDF_THSD 0
-#define BIT_MASK_UDF_THSD 0xff
-#define BIT_UDF_THSD(x) (((x) & BIT_MASK_UDF_THSD) << BIT_SHIFT_UDF_THSD)
-#define BIT_GET_UDF_THSD(x) (((x) >> BIT_SHIFT_UDF_THSD) & BIT_MASK_UDF_THSD)
-
-/* 2 REG_ZLD_NUM (Offset 0x0633) */
-
-#define BIT_SHIFT_ZLD_NUM 0
-#define BIT_MASK_ZLD_NUM 0xff
-#define BIT_ZLD_NUM(x) (((x) & BIT_MASK_ZLD_NUM) << BIT_SHIFT_ZLD_NUM)
-#define BIT_GET_ZLD_NUM(x) (((x) >> BIT_SHIFT_ZLD_NUM) & BIT_MASK_ZLD_NUM)
-
-/* 2 REG_STMP_THSD (Offset 0x0634) */
-
-#define BIT_SHIFT_STMP_THSD 0
-#define BIT_MASK_STMP_THSD 0xff
-#define BIT_STMP_THSD(x) (((x) & BIT_MASK_STMP_THSD) << BIT_SHIFT_STMP_THSD)
-#define BIT_GET_STMP_THSD(x) (((x) >> BIT_SHIFT_STMP_THSD) & BIT_MASK_STMP_THSD)
-
-/* 2 REG_WMAC_TXTIMEOUT (Offset 0x0635) */
-
-#define BIT_SHIFT_WMAC_TXTIMEOUT 0
-#define BIT_MASK_WMAC_TXTIMEOUT 0xff
-#define BIT_WMAC_TXTIMEOUT(x) \
- (((x) & BIT_MASK_WMAC_TXTIMEOUT) << BIT_SHIFT_WMAC_TXTIMEOUT)
-#define BIT_GET_WMAC_TXTIMEOUT(x) \
- (((x) >> BIT_SHIFT_WMAC_TXTIMEOUT) & BIT_MASK_WMAC_TXTIMEOUT)
-
-/* 2 REG_MCU_TEST_2_V1 (Offset 0x0636) */
-
-#define BIT_SHIFT_MCU_RSVD_2_V1 0
-#define BIT_MASK_MCU_RSVD_2_V1 0xffff
-#define BIT_MCU_RSVD_2_V1(x) \
- (((x) & BIT_MASK_MCU_RSVD_2_V1) << BIT_SHIFT_MCU_RSVD_2_V1)
-#define BIT_GET_MCU_RSVD_2_V1(x) \
- (((x) >> BIT_SHIFT_MCU_RSVD_2_V1) & BIT_MASK_MCU_RSVD_2_V1)
-
-/* 2 REG_USTIME_EDCA (Offset 0x0638) */
-
-#define BIT_SHIFT_USTIME_EDCA_V1 0
-#define BIT_MASK_USTIME_EDCA_V1 0x1ff
-#define BIT_USTIME_EDCA_V1(x) \
- (((x) & BIT_MASK_USTIME_EDCA_V1) << BIT_SHIFT_USTIME_EDCA_V1)
-#define BIT_GET_USTIME_EDCA_V1(x) \
- (((x) >> BIT_SHIFT_USTIME_EDCA_V1) & BIT_MASK_USTIME_EDCA_V1)
-
-/* 2 REG_MAC_SPEC_SIFS (Offset 0x063A) */
-
-#define BIT_SHIFT_SPEC_SIFS_OFDM 8
-#define BIT_MASK_SPEC_SIFS_OFDM 0xff
-#define BIT_SPEC_SIFS_OFDM(x) \
- (((x) & BIT_MASK_SPEC_SIFS_OFDM) << BIT_SHIFT_SPEC_SIFS_OFDM)
-#define BIT_GET_SPEC_SIFS_OFDM(x) \
- (((x) >> BIT_SHIFT_SPEC_SIFS_OFDM) & BIT_MASK_SPEC_SIFS_OFDM)
-
-#define BIT_SHIFT_SPEC_SIFS_CCK 0
-#define BIT_MASK_SPEC_SIFS_CCK 0xff
-#define BIT_SPEC_SIFS_CCK(x) \
- (((x) & BIT_MASK_SPEC_SIFS_CCK) << BIT_SHIFT_SPEC_SIFS_CCK)
-#define BIT_GET_SPEC_SIFS_CCK(x) \
- (((x) >> BIT_SHIFT_SPEC_SIFS_CCK) & BIT_MASK_SPEC_SIFS_CCK)
-
-/* 2 REG_RESP_SIFS_CCK (Offset 0x063C) */
-
-#define BIT_SHIFT_SIFS_R2T_CCK 8
-#define BIT_MASK_SIFS_R2T_CCK 0xff
-#define BIT_SIFS_R2T_CCK(x) \
- (((x) & BIT_MASK_SIFS_R2T_CCK) << BIT_SHIFT_SIFS_R2T_CCK)
-#define BIT_GET_SIFS_R2T_CCK(x) \
- (((x) >> BIT_SHIFT_SIFS_R2T_CCK) & BIT_MASK_SIFS_R2T_CCK)
-
-#define BIT_SHIFT_SIFS_T2T_CCK 0
-#define BIT_MASK_SIFS_T2T_CCK 0xff
-#define BIT_SIFS_T2T_CCK(x) \
- (((x) & BIT_MASK_SIFS_T2T_CCK) << BIT_SHIFT_SIFS_T2T_CCK)
-#define BIT_GET_SIFS_T2T_CCK(x) \
- (((x) >> BIT_SHIFT_SIFS_T2T_CCK) & BIT_MASK_SIFS_T2T_CCK)
-
-/* 2 REG_RESP_SIFS_OFDM (Offset 0x063E) */
-
-#define BIT_SHIFT_SIFS_R2T_OFDM 8
-#define BIT_MASK_SIFS_R2T_OFDM 0xff
-#define BIT_SIFS_R2T_OFDM(x) \
- (((x) & BIT_MASK_SIFS_R2T_OFDM) << BIT_SHIFT_SIFS_R2T_OFDM)
-#define BIT_GET_SIFS_R2T_OFDM(x) \
- (((x) >> BIT_SHIFT_SIFS_R2T_OFDM) & BIT_MASK_SIFS_R2T_OFDM)
-
-#define BIT_SHIFT_SIFS_T2T_OFDM 0
-#define BIT_MASK_SIFS_T2T_OFDM 0xff
-#define BIT_SIFS_T2T_OFDM(x) \
- (((x) & BIT_MASK_SIFS_T2T_OFDM) << BIT_SHIFT_SIFS_T2T_OFDM)
-#define BIT_GET_SIFS_T2T_OFDM(x) \
- (((x) >> BIT_SHIFT_SIFS_T2T_OFDM) & BIT_MASK_SIFS_T2T_OFDM)
-
-/* 2 REG_ACKTO (Offset 0x0640) */
-
-#define BIT_SHIFT_ACKTO 0
-#define BIT_MASK_ACKTO 0xff
-#define BIT_ACKTO(x) (((x) & BIT_MASK_ACKTO) << BIT_SHIFT_ACKTO)
-#define BIT_GET_ACKTO(x) (((x) >> BIT_SHIFT_ACKTO) & BIT_MASK_ACKTO)
-
-/* 2 REG_CTS2TO (Offset 0x0641) */
-
-#define BIT_SHIFT_CTS2TO 0
-#define BIT_MASK_CTS2TO 0xff
-#define BIT_CTS2TO(x) (((x) & BIT_MASK_CTS2TO) << BIT_SHIFT_CTS2TO)
-#define BIT_GET_CTS2TO(x) (((x) >> BIT_SHIFT_CTS2TO) & BIT_MASK_CTS2TO)
-
-/* 2 REG_EIFS (Offset 0x0642) */
-
-#define BIT_SHIFT_EIFS 0
-#define BIT_MASK_EIFS 0xffff
-#define BIT_EIFS(x) (((x) & BIT_MASK_EIFS) << BIT_SHIFT_EIFS)
-#define BIT_GET_EIFS(x) (((x) >> BIT_SHIFT_EIFS) & BIT_MASK_EIFS)
-
-/* 2 REG_NAV_CTRL (Offset 0x0650) */
-
-#define BIT_SHIFT_NAV_UPPER 16
-#define BIT_MASK_NAV_UPPER 0xff
-#define BIT_NAV_UPPER(x) (((x) & BIT_MASK_NAV_UPPER) << BIT_SHIFT_NAV_UPPER)
-#define BIT_GET_NAV_UPPER(x) (((x) >> BIT_SHIFT_NAV_UPPER) & BIT_MASK_NAV_UPPER)
-
-#define BIT_SHIFT_RXMYRTS_NAV 8
-#define BIT_MASK_RXMYRTS_NAV 0xf
-#define BIT_RXMYRTS_NAV(x) \
- (((x) & BIT_MASK_RXMYRTS_NAV) << BIT_SHIFT_RXMYRTS_NAV)
-#define BIT_GET_RXMYRTS_NAV(x) \
- (((x) >> BIT_SHIFT_RXMYRTS_NAV) & BIT_MASK_RXMYRTS_NAV)
-
-#define BIT_SHIFT_RTSRST 0
-#define BIT_MASK_RTSRST 0xff
-#define BIT_RTSRST(x) (((x) & BIT_MASK_RTSRST) << BIT_SHIFT_RTSRST)
-#define BIT_GET_RTSRST(x) (((x) >> BIT_SHIFT_RTSRST) & BIT_MASK_RTSRST)
-
-/* 2 REG_BACAMCMD (Offset 0x0654) */
-
-#define BIT_BACAM_POLL BIT(31)
-#define BIT_BACAM_RST BIT(17)
-#define BIT_BACAM_RW BIT(16)
-
-#define BIT_SHIFT_TXSBM 14
-#define BIT_MASK_TXSBM 0x3
-#define BIT_TXSBM(x) (((x) & BIT_MASK_TXSBM) << BIT_SHIFT_TXSBM)
-#define BIT_GET_TXSBM(x) (((x) >> BIT_SHIFT_TXSBM) & BIT_MASK_TXSBM)
-
-#define BIT_SHIFT_BACAM_ADDR 0
-#define BIT_MASK_BACAM_ADDR 0x3f
-#define BIT_BACAM_ADDR(x) (((x) & BIT_MASK_BACAM_ADDR) << BIT_SHIFT_BACAM_ADDR)
-#define BIT_GET_BACAM_ADDR(x) \
- (((x) >> BIT_SHIFT_BACAM_ADDR) & BIT_MASK_BACAM_ADDR)
-
-/* 2 REG_BACAMCONTENT (Offset 0x0658) */
-
-#define BIT_SHIFT_BA_CONTENT_H (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_BA_CONTENT_H 0xffffffffL
-#define BIT_BA_CONTENT_H(x) \
- (((x) & BIT_MASK_BA_CONTENT_H) << BIT_SHIFT_BA_CONTENT_H)
-#define BIT_GET_BA_CONTENT_H(x) \
- (((x) >> BIT_SHIFT_BA_CONTENT_H) & BIT_MASK_BA_CONTENT_H)
-
-#define BIT_SHIFT_BA_CONTENT_L 0
-#define BIT_MASK_BA_CONTENT_L 0xffffffffL
-#define BIT_BA_CONTENT_L(x) \
- (((x) & BIT_MASK_BA_CONTENT_L) << BIT_SHIFT_BA_CONTENT_L)
-#define BIT_GET_BA_CONTENT_L(x) \
- (((x) >> BIT_SHIFT_BA_CONTENT_L) & BIT_MASK_BA_CONTENT_L)
-
-/* 2 REG_LBDLY (Offset 0x0660) */
-
-#define BIT_SHIFT_LBDLY 0
-#define BIT_MASK_LBDLY 0x1f
-#define BIT_LBDLY(x) (((x) & BIT_MASK_LBDLY) << BIT_SHIFT_LBDLY)
-#define BIT_GET_LBDLY(x) (((x) >> BIT_SHIFT_LBDLY) & BIT_MASK_LBDLY)
-
-/* 2 REG_WMAC_BACAM_RPMEN (Offset 0x0661) */
-
-#define BIT_SHIFT_BITMAP_SSNBK_COUNTER 2
-#define BIT_MASK_BITMAP_SSNBK_COUNTER 0x3f
-#define BIT_BITMAP_SSNBK_COUNTER(x) \
- (((x) & BIT_MASK_BITMAP_SSNBK_COUNTER) \
- << BIT_SHIFT_BITMAP_SSNBK_COUNTER)
-#define BIT_GET_BITMAP_SSNBK_COUNTER(x) \
- (((x) >> BIT_SHIFT_BITMAP_SSNBK_COUNTER) & \
- BIT_MASK_BITMAP_SSNBK_COUNTER)
-
-#define BIT_BITMAP_EN BIT(1)
-
-/* 2 REG_WMAC_BACAM_RPMEN (Offset 0x0661) */
-
-#define BIT_WMAC_BACAM_RPMEN BIT(0)
-
-/* 2 REG_TX_RX (Offset 0x0662) */
-
-#define BIT_SHIFT_RXPKT_TYPE 2
-#define BIT_MASK_RXPKT_TYPE 0x3f
-#define BIT_RXPKT_TYPE(x) (((x) & BIT_MASK_RXPKT_TYPE) << BIT_SHIFT_RXPKT_TYPE)
-#define BIT_GET_RXPKT_TYPE(x) \
- (((x) >> BIT_SHIFT_RXPKT_TYPE) & BIT_MASK_RXPKT_TYPE)
-
-#define BIT_TXACT_IND BIT(1)
-#define BIT_RXACT_IND BIT(0)
-
-/* 2 REG_WMAC_BITMAP_CTL (Offset 0x0663) */
-
-#define BIT_BITMAP_VO BIT(7)
-#define BIT_BITMAP_VI BIT(6)
-#define BIT_BITMAP_BE BIT(5)
-#define BIT_BITMAP_BK BIT(4)
-
-#define BIT_SHIFT_BITMAP_CONDITION 2
-#define BIT_MASK_BITMAP_CONDITION 0x3
-#define BIT_BITMAP_CONDITION(x) \
- (((x) & BIT_MASK_BITMAP_CONDITION) << BIT_SHIFT_BITMAP_CONDITION)
-#define BIT_GET_BITMAP_CONDITION(x) \
- (((x) >> BIT_SHIFT_BITMAP_CONDITION) & BIT_MASK_BITMAP_CONDITION)
-
-#define BIT_BITMAP_SSNBK_COUNTER_CLR BIT(1)
-#define BIT_BITMAP_FORCE BIT(0)
-
-/* 2 REG_RXERR_RPT (Offset 0x0664) */
-
-#define BIT_SHIFT_RXERR_RPT_SEL_V1_3_0 28
-#define BIT_MASK_RXERR_RPT_SEL_V1_3_0 0xf
-#define BIT_RXERR_RPT_SEL_V1_3_0(x) \
- (((x) & BIT_MASK_RXERR_RPT_SEL_V1_3_0) \
- << BIT_SHIFT_RXERR_RPT_SEL_V1_3_0)
-#define BIT_GET_RXERR_RPT_SEL_V1_3_0(x) \
- (((x) >> BIT_SHIFT_RXERR_RPT_SEL_V1_3_0) & \
- BIT_MASK_RXERR_RPT_SEL_V1_3_0)
-
-/* 2 REG_RXERR_RPT (Offset 0x0664) */
-
-#define BIT_RXERR_RPT_RST BIT(27)
-
-/* 2 REG_RXERR_RPT (Offset 0x0664) */
-
-#define BIT_RXERR_RPT_SEL_V1_4 BIT(26)
-
-/* 2 REG_RXERR_RPT (Offset 0x0664) */
-
-#define BIT_W1S BIT(23)
-
-/* 2 REG_RXERR_RPT (Offset 0x0664) */
-
-#define BIT_UD_SELECT_BSSID BIT(22)
-
-/* 2 REG_RXERR_RPT (Offset 0x0664) */
-
-#define BIT_SHIFT_UD_SUB_TYPE 18
-#define BIT_MASK_UD_SUB_TYPE 0xf
-#define BIT_UD_SUB_TYPE(x) \
- (((x) & BIT_MASK_UD_SUB_TYPE) << BIT_SHIFT_UD_SUB_TYPE)
-#define BIT_GET_UD_SUB_TYPE(x) \
- (((x) >> BIT_SHIFT_UD_SUB_TYPE) & BIT_MASK_UD_SUB_TYPE)
-
-#define BIT_SHIFT_UD_TYPE 16
-#define BIT_MASK_UD_TYPE 0x3
-#define BIT_UD_TYPE(x) (((x) & BIT_MASK_UD_TYPE) << BIT_SHIFT_UD_TYPE)
-#define BIT_GET_UD_TYPE(x) (((x) >> BIT_SHIFT_UD_TYPE) & BIT_MASK_UD_TYPE)
-
-#define BIT_SHIFT_RPT_COUNTER 0
-#define BIT_MASK_RPT_COUNTER 0xffff
-#define BIT_RPT_COUNTER(x) \
- (((x) & BIT_MASK_RPT_COUNTER) << BIT_SHIFT_RPT_COUNTER)
-#define BIT_GET_RPT_COUNTER(x) \
- (((x) >> BIT_SHIFT_RPT_COUNTER) & BIT_MASK_RPT_COUNTER)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_SHIFT_ACKBA_TYPSEL (60 & CPU_OPT_WIDTH)
-#define BIT_MASK_ACKBA_TYPSEL 0xf
-#define BIT_ACKBA_TYPSEL(x) \
- (((x) & BIT_MASK_ACKBA_TYPSEL) << BIT_SHIFT_ACKBA_TYPSEL)
-#define BIT_GET_ACKBA_TYPSEL(x) \
- (((x) >> BIT_SHIFT_ACKBA_TYPSEL) & BIT_MASK_ACKBA_TYPSEL)
-
-#define BIT_SHIFT_ACKBA_ACKPCHK (56 & CPU_OPT_WIDTH)
-#define BIT_MASK_ACKBA_ACKPCHK 0xf
-#define BIT_ACKBA_ACKPCHK(x) \
- (((x) & BIT_MASK_ACKBA_ACKPCHK) << BIT_SHIFT_ACKBA_ACKPCHK)
-#define BIT_GET_ACKBA_ACKPCHK(x) \
- (((x) >> BIT_SHIFT_ACKBA_ACKPCHK) & BIT_MASK_ACKBA_ACKPCHK)
-
-#define BIT_SHIFT_ACKBAR_TYPESEL (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_ACKBAR_TYPESEL 0xff
-#define BIT_ACKBAR_TYPESEL(x) \
- (((x) & BIT_MASK_ACKBAR_TYPESEL) << BIT_SHIFT_ACKBAR_TYPESEL)
-#define BIT_GET_ACKBAR_TYPESEL(x) \
- (((x) >> BIT_SHIFT_ACKBAR_TYPESEL) & BIT_MASK_ACKBAR_TYPESEL)
-
-#define BIT_SHIFT_ACKBAR_ACKPCHK (44 & CPU_OPT_WIDTH)
-#define BIT_MASK_ACKBAR_ACKPCHK 0xf
-#define BIT_ACKBAR_ACKPCHK(x) \
- (((x) & BIT_MASK_ACKBAR_ACKPCHK) << BIT_SHIFT_ACKBAR_ACKPCHK)
-#define BIT_GET_ACKBAR_ACKPCHK(x) \
- (((x) >> BIT_SHIFT_ACKBAR_ACKPCHK) & BIT_MASK_ACKBAR_ACKPCHK)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_RXBA_IGNOREA2 BIT(42)
-#define BIT_EN_SAVE_ALL_TXOPADDR BIT(41)
-#define BIT_EN_TXCTS_TO_TXOPOWNER_INRXNAV BIT(40)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_DIS_TXBA_AMPDUFCSERR BIT(39)
-#define BIT_DIS_TXBA_RXBARINFULL BIT(38)
-#define BIT_DIS_TXCFE_INFULL BIT(37)
-#define BIT_DIS_TXCTS_INFULL BIT(36)
-#define BIT_EN_TXACKBA_IN_TX_RDG BIT(35)
-#define BIT_EN_TXACKBA_IN_TXOP BIT(34)
-#define BIT_EN_TXCTS_IN_RXNAV BIT(33)
-#define BIT_EN_TXCTS_INTXOP BIT(32)
-#define BIT_BLK_EDCA_BBSLP BIT(31)
-#define BIT_BLK_EDCA_BBSBY BIT(30)
-#define BIT_ACKTO_BLOCK_SCH_EN BIT(27)
-#define BIT_EIFS_BLOCK_SCH_EN BIT(26)
-#define BIT_PLCPCHK_RST_EIFS BIT(25)
-#define BIT_CCA_RST_EIFS BIT(24)
-#define BIT_DIS_UPD_MYRXPKTNAV BIT(23)
-#define BIT_EARLY_TXBA BIT(22)
-
-#define BIT_SHIFT_RESP_CHNBUSY 20
-#define BIT_MASK_RESP_CHNBUSY 0x3
-#define BIT_RESP_CHNBUSY(x) \
- (((x) & BIT_MASK_RESP_CHNBUSY) << BIT_SHIFT_RESP_CHNBUSY)
-#define BIT_GET_RESP_CHNBUSY(x) \
- (((x) >> BIT_SHIFT_RESP_CHNBUSY) & BIT_MASK_RESP_CHNBUSY)
-
-#define BIT_RESP_DCTS_EN BIT(19)
-#define BIT_RESP_DCFE_EN BIT(18)
-#define BIT_RESP_SPLCPEN BIT(17)
-#define BIT_RESP_SGIEN BIT(16)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_RESP_LDPC_EN BIT(15)
-#define BIT_DIS_RESP_ACKINCCA BIT(14)
-#define BIT_DIS_RESP_CTSINCCA BIT(13)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_SHIFT_R_WMAC_SECOND_CCA_TIMER 10
-#define BIT_MASK_R_WMAC_SECOND_CCA_TIMER 0x7
-#define BIT_R_WMAC_SECOND_CCA_TIMER(x) \
- (((x) & BIT_MASK_R_WMAC_SECOND_CCA_TIMER) \
- << BIT_SHIFT_R_WMAC_SECOND_CCA_TIMER)
-#define BIT_GET_R_WMAC_SECOND_CCA_TIMER(x) \
- (((x) >> BIT_SHIFT_R_WMAC_SECOND_CCA_TIMER) & \
- BIT_MASK_R_WMAC_SECOND_CCA_TIMER)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_SHIFT_RFMOD 7
-#define BIT_MASK_RFMOD 0x3
-#define BIT_RFMOD(x) (((x) & BIT_MASK_RFMOD) << BIT_SHIFT_RFMOD)
-#define BIT_GET_RFMOD(x) (((x) >> BIT_SHIFT_RFMOD) & BIT_MASK_RFMOD)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_SHIFT_RESP_CTS_DYNBW_SEL 5
-#define BIT_MASK_RESP_CTS_DYNBW_SEL 0x3
-#define BIT_RESP_CTS_DYNBW_SEL(x) \
- (((x) & BIT_MASK_RESP_CTS_DYNBW_SEL) << BIT_SHIFT_RESP_CTS_DYNBW_SEL)
-#define BIT_GET_RESP_CTS_DYNBW_SEL(x) \
- (((x) >> BIT_SHIFT_RESP_CTS_DYNBW_SEL) & BIT_MASK_RESP_CTS_DYNBW_SEL)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_DLY_TX_WAIT_RXANTSEL BIT(4)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_TXRESP_BY_RXANTSEL BIT(3)
-
-/* 2 REG_WMAC_TRXPTCL_CTL (Offset 0x0668) */
-
-#define BIT_SHIFT_ORIG_DCTS_CHK 0
-#define BIT_MASK_ORIG_DCTS_CHK 0x3
-#define BIT_ORIG_DCTS_CHK(x) \
- (((x) & BIT_MASK_ORIG_DCTS_CHK) << BIT_SHIFT_ORIG_DCTS_CHK)
-#define BIT_GET_ORIG_DCTS_CHK(x) \
- (((x) >> BIT_SHIFT_ORIG_DCTS_CHK) & BIT_MASK_ORIG_DCTS_CHK)
-
-/* 2 REG_CAMCMD (Offset 0x0670) */
-
-#define BIT_SECCAM_POLLING BIT(31)
-#define BIT_SECCAM_CLR BIT(30)
-#define BIT_MFBCAM_CLR BIT(29)
-
-/* 2 REG_CAMCMD (Offset 0x0670) */
-
-#define BIT_SECCAM_WE BIT(16)
-
-/* 2 REG_CAMCMD (Offset 0x0670) */
-
-#define BIT_SHIFT_SECCAM_ADDR_V2 0
-#define BIT_MASK_SECCAM_ADDR_V2 0x3ff
-#define BIT_SECCAM_ADDR_V2(x) \
- (((x) & BIT_MASK_SECCAM_ADDR_V2) << BIT_SHIFT_SECCAM_ADDR_V2)
-#define BIT_GET_SECCAM_ADDR_V2(x) \
- (((x) >> BIT_SHIFT_SECCAM_ADDR_V2) & BIT_MASK_SECCAM_ADDR_V2)
-
-/* 2 REG_CAMWRITE (Offset 0x0674) */
-
-#define BIT_SHIFT_CAMW_DATA 0
-#define BIT_MASK_CAMW_DATA 0xffffffffL
-#define BIT_CAMW_DATA(x) (((x) & BIT_MASK_CAMW_DATA) << BIT_SHIFT_CAMW_DATA)
-#define BIT_GET_CAMW_DATA(x) (((x) >> BIT_SHIFT_CAMW_DATA) & BIT_MASK_CAMW_DATA)
-
-/* 2 REG_CAMREAD (Offset 0x0678) */
-
-#define BIT_SHIFT_CAMR_DATA 0
-#define BIT_MASK_CAMR_DATA 0xffffffffL
-#define BIT_CAMR_DATA(x) (((x) & BIT_MASK_CAMR_DATA) << BIT_SHIFT_CAMR_DATA)
-#define BIT_GET_CAMR_DATA(x) (((x) >> BIT_SHIFT_CAMR_DATA) & BIT_MASK_CAMR_DATA)
-
-/* 2 REG_CAMDBG (Offset 0x067C) */
-
-#define BIT_SECCAM_INFO BIT(31)
-#define BIT_SEC_KEYFOUND BIT(15)
-
-#define BIT_SHIFT_CAMDBG_SEC_TYPE 12
-#define BIT_MASK_CAMDBG_SEC_TYPE 0x7
-#define BIT_CAMDBG_SEC_TYPE(x) \
- (((x) & BIT_MASK_CAMDBG_SEC_TYPE) << BIT_SHIFT_CAMDBG_SEC_TYPE)
-#define BIT_GET_CAMDBG_SEC_TYPE(x) \
- (((x) >> BIT_SHIFT_CAMDBG_SEC_TYPE) & BIT_MASK_CAMDBG_SEC_TYPE)
-
-/* 2 REG_CAMDBG (Offset 0x067C) */
-
-#define BIT_CAMDBG_EXT_SECTYPE BIT(11)
-
-/* 2 REG_CAMDBG (Offset 0x067C) */
-
-#define BIT_SHIFT_CAMDBG_MIC_KEY_IDX 5
-#define BIT_MASK_CAMDBG_MIC_KEY_IDX 0x1f
-#define BIT_CAMDBG_MIC_KEY_IDX(x) \
- (((x) & BIT_MASK_CAMDBG_MIC_KEY_IDX) << BIT_SHIFT_CAMDBG_MIC_KEY_IDX)
-#define BIT_GET_CAMDBG_MIC_KEY_IDX(x) \
- (((x) >> BIT_SHIFT_CAMDBG_MIC_KEY_IDX) & BIT_MASK_CAMDBG_MIC_KEY_IDX)
-
-#define BIT_SHIFT_CAMDBG_SEC_KEY_IDX 0
-#define BIT_MASK_CAMDBG_SEC_KEY_IDX 0x1f
-#define BIT_CAMDBG_SEC_KEY_IDX(x) \
- (((x) & BIT_MASK_CAMDBG_SEC_KEY_IDX) << BIT_SHIFT_CAMDBG_SEC_KEY_IDX)
-#define BIT_GET_CAMDBG_SEC_KEY_IDX(x) \
- (((x) >> BIT_SHIFT_CAMDBG_SEC_KEY_IDX) & BIT_MASK_CAMDBG_SEC_KEY_IDX)
-
-/* 2 REG_SECCFG (Offset 0x0680) */
-
-#define BIT_DIS_GCLK_WAPI BIT(15)
-#define BIT_DIS_GCLK_AES BIT(14)
-#define BIT_DIS_GCLK_TKIP BIT(13)
-
-/* 2 REG_SECCFG (Offset 0x0680) */
-
-#define BIT_AES_SEL_QC_1 BIT(12)
-#define BIT_AES_SEL_QC_0 BIT(11)
-
-/* 2 REG_SECCFG (Offset 0x0680) */
-
-#define BIT_CHK_BMC BIT(9)
-
-/* 2 REG_SECCFG (Offset 0x0680) */
-
-#define BIT_CHK_KEYID BIT(8)
-#define BIT_RXBCUSEDK BIT(7)
-#define BIT_TXBCUSEDK BIT(6)
-#define BIT_NOSKMC BIT(5)
-#define BIT_SKBYA2 BIT(4)
-#define BIT_RXDEC BIT(3)
-#define BIT_TXENC BIT(2)
-#define BIT_RXUHUSEDK BIT(1)
-#define BIT_TXUHUSEDK BIT(0)
-
-/* 2 REG_RXFILTER_CATEGORY_1 (Offset 0x0682) */
-
-#define BIT_SHIFT_RXFILTER_CATEGORY_1 0
-#define BIT_MASK_RXFILTER_CATEGORY_1 0xff
-#define BIT_RXFILTER_CATEGORY_1(x) \
- (((x) & BIT_MASK_RXFILTER_CATEGORY_1) << BIT_SHIFT_RXFILTER_CATEGORY_1)
-#define BIT_GET_RXFILTER_CATEGORY_1(x) \
- (((x) >> BIT_SHIFT_RXFILTER_CATEGORY_1) & BIT_MASK_RXFILTER_CATEGORY_1)
-
-/* 2 REG_RXFILTER_ACTION_1 (Offset 0x0683) */
-
-#define BIT_SHIFT_RXFILTER_ACTION_1 0
-#define BIT_MASK_RXFILTER_ACTION_1 0xff
-#define BIT_RXFILTER_ACTION_1(x) \
- (((x) & BIT_MASK_RXFILTER_ACTION_1) << BIT_SHIFT_RXFILTER_ACTION_1)
-#define BIT_GET_RXFILTER_ACTION_1(x) \
- (((x) >> BIT_SHIFT_RXFILTER_ACTION_1) & BIT_MASK_RXFILTER_ACTION_1)
-
-/* 2 REG_RXFILTER_CATEGORY_2 (Offset 0x0684) */
-
-#define BIT_SHIFT_RXFILTER_CATEGORY_2 0
-#define BIT_MASK_RXFILTER_CATEGORY_2 0xff
-#define BIT_RXFILTER_CATEGORY_2(x) \
- (((x) & BIT_MASK_RXFILTER_CATEGORY_2) << BIT_SHIFT_RXFILTER_CATEGORY_2)
-#define BIT_GET_RXFILTER_CATEGORY_2(x) \
- (((x) >> BIT_SHIFT_RXFILTER_CATEGORY_2) & BIT_MASK_RXFILTER_CATEGORY_2)
-
-/* 2 REG_RXFILTER_ACTION_2 (Offset 0x0685) */
-
-#define BIT_SHIFT_RXFILTER_ACTION_2 0
-#define BIT_MASK_RXFILTER_ACTION_2 0xff
-#define BIT_RXFILTER_ACTION_2(x) \
- (((x) & BIT_MASK_RXFILTER_ACTION_2) << BIT_SHIFT_RXFILTER_ACTION_2)
-#define BIT_GET_RXFILTER_ACTION_2(x) \
- (((x) >> BIT_SHIFT_RXFILTER_ACTION_2) & BIT_MASK_RXFILTER_ACTION_2)
-
-/* 2 REG_RXFILTER_CATEGORY_3 (Offset 0x0686) */
-
-#define BIT_SHIFT_RXFILTER_CATEGORY_3 0
-#define BIT_MASK_RXFILTER_CATEGORY_3 0xff
-#define BIT_RXFILTER_CATEGORY_3(x) \
- (((x) & BIT_MASK_RXFILTER_CATEGORY_3) << BIT_SHIFT_RXFILTER_CATEGORY_3)
-#define BIT_GET_RXFILTER_CATEGORY_3(x) \
- (((x) >> BIT_SHIFT_RXFILTER_CATEGORY_3) & BIT_MASK_RXFILTER_CATEGORY_3)
-
-/* 2 REG_RXFILTER_ACTION_3 (Offset 0x0687) */
-
-#define BIT_SHIFT_RXFILTER_ACTION_3 0
-#define BIT_MASK_RXFILTER_ACTION_3 0xff
-#define BIT_RXFILTER_ACTION_3(x) \
- (((x) & BIT_MASK_RXFILTER_ACTION_3) << BIT_SHIFT_RXFILTER_ACTION_3)
-#define BIT_GET_RXFILTER_ACTION_3(x) \
- (((x) >> BIT_SHIFT_RXFILTER_ACTION_3) & BIT_MASK_RXFILTER_ACTION_3)
-
-/* 2 REG_RXFLTMAP3 (Offset 0x0688) */
-
-#define BIT_MGTFLT15EN_FW BIT(15)
-#define BIT_MGTFLT14EN_FW BIT(14)
-#define BIT_MGTFLT13EN_FW BIT(13)
-#define BIT_MGTFLT12EN_FW BIT(12)
-#define BIT_MGTFLT11EN_FW BIT(11)
-#define BIT_MGTFLT10EN_FW BIT(10)
-#define BIT_MGTFLT9EN_FW BIT(9)
-#define BIT_MGTFLT8EN_FW BIT(8)
-#define BIT_MGTFLT7EN_FW BIT(7)
-#define BIT_MGTFLT6EN_FW BIT(6)
-#define BIT_MGTFLT5EN_FW BIT(5)
-#define BIT_MGTFLT4EN_FW BIT(4)
-#define BIT_MGTFLT3EN_FW BIT(3)
-#define BIT_MGTFLT2EN_FW BIT(2)
-#define BIT_MGTFLT1EN_FW BIT(1)
-#define BIT_MGTFLT0EN_FW BIT(0)
-
-/* 2 REG_RXFLTMAP4 (Offset 0x068A) */
-
-#define BIT_CTRLFLT15EN_FW BIT(15)
-#define BIT_CTRLFLT14EN_FW BIT(14)
-#define BIT_CTRLFLT13EN_FW BIT(13)
-#define BIT_CTRLFLT12EN_FW BIT(12)
-#define BIT_CTRLFLT11EN_FW BIT(11)
-#define BIT_CTRLFLT10EN_FW BIT(10)
-#define BIT_CTRLFLT9EN_FW BIT(9)
-#define BIT_CTRLFLT8EN_FW BIT(8)
-#define BIT_CTRLFLT7EN_FW BIT(7)
-#define BIT_CTRLFLT6EN_FW BIT(6)
-#define BIT_CTRLFLT5EN_FW BIT(5)
-#define BIT_CTRLFLT4EN_FW BIT(4)
-#define BIT_CTRLFLT3EN_FW BIT(3)
-#define BIT_CTRLFLT2EN_FW BIT(2)
-#define BIT_CTRLFLT1EN_FW BIT(1)
-#define BIT_CTRLFLT0EN_FW BIT(0)
-
-/* 2 REG_RXFLTMAP5 (Offset 0x068C) */
-
-#define BIT_DATAFLT15EN_FW BIT(15)
-#define BIT_DATAFLT14EN_FW BIT(14)
-#define BIT_DATAFLT13EN_FW BIT(13)
-#define BIT_DATAFLT12EN_FW BIT(12)
-#define BIT_DATAFLT11EN_FW BIT(11)
-#define BIT_DATAFLT10EN_FW BIT(10)
-#define BIT_DATAFLT9EN_FW BIT(9)
-#define BIT_DATAFLT8EN_FW BIT(8)
-#define BIT_DATAFLT7EN_FW BIT(7)
-#define BIT_DATAFLT6EN_FW BIT(6)
-#define BIT_DATAFLT5EN_FW BIT(5)
-#define BIT_DATAFLT4EN_FW BIT(4)
-#define BIT_DATAFLT3EN_FW BIT(3)
-#define BIT_DATAFLT2EN_FW BIT(2)
-#define BIT_DATAFLT1EN_FW BIT(1)
-#define BIT_DATAFLT0EN_FW BIT(0)
-
-/* 2 REG_RXFLTMAP6 (Offset 0x068E) */
-
-#define BIT_ACTIONFLT15EN_FW BIT(15)
-#define BIT_ACTIONFLT14EN_FW BIT(14)
-#define BIT_ACTIONFLT13EN_FW BIT(13)
-#define BIT_ACTIONFLT12EN_FW BIT(12)
-#define BIT_ACTIONFLT11EN_FW BIT(11)
-#define BIT_ACTIONFLT10EN_FW BIT(10)
-#define BIT_ACTIONFLT9EN_FW BIT(9)
-#define BIT_ACTIONFLT8EN_FW BIT(8)
-#define BIT_ACTIONFLT7EN_FW BIT(7)
-#define BIT_ACTIONFLT6EN_FW BIT(6)
-#define BIT_ACTIONFLT5EN_FW BIT(5)
-#define BIT_ACTIONFLT4EN_FW BIT(4)
-#define BIT_ACTIONFLT3EN_FW BIT(3)
-#define BIT_ACTIONFLT2EN_FW BIT(2)
-#define BIT_ACTIONFLT1EN_FW BIT(1)
-#define BIT_ACTIONFLT0EN_FW BIT(0)
-
-/* 2 REG_WOW_CTRL (Offset 0x0690) */
-
-#define BIT_SHIFT_PSF_BSSIDSEL_B2B1 6
-#define BIT_MASK_PSF_BSSIDSEL_B2B1 0x3
-#define BIT_PSF_BSSIDSEL_B2B1(x) \
- (((x) & BIT_MASK_PSF_BSSIDSEL_B2B1) << BIT_SHIFT_PSF_BSSIDSEL_B2B1)
-#define BIT_GET_PSF_BSSIDSEL_B2B1(x) \
- (((x) >> BIT_SHIFT_PSF_BSSIDSEL_B2B1) & BIT_MASK_PSF_BSSIDSEL_B2B1)
-
-/* 2 REG_WOW_CTRL (Offset 0x0690) */
-
-#define BIT_WOWHCI BIT(5)
-
-/* 2 REG_WOW_CTRL (Offset 0x0690) */
-
-#define BIT_PSF_BSSIDSEL_B0 BIT(4)
-
-/* 2 REG_WOW_CTRL (Offset 0x0690) */
-
-#define BIT_UWF BIT(3)
-#define BIT_MAGIC BIT(2)
-#define BIT_WOWEN BIT(1)
-#define BIT_FORCE_WAKEUP BIT(0)
-
-/* 2 REG_NAN_RX_TSF_FILTER (Offset 0x0691) */
-
-#define BIT_CHK_TSF_TA BIT(2)
-#define BIT_CHK_TSF_CBSSID BIT(1)
-#define BIT_CHK_TSF_EN BIT(0)
-
-/* 2 REG_PS_RX_INFO (Offset 0x0692) */
-
-#define BIT_SHIFT_PORTSEL__PS_RX_INFO 5
-#define BIT_MASK_PORTSEL__PS_RX_INFO 0x7
-#define BIT_PORTSEL__PS_RX_INFO(x) \
- (((x) & BIT_MASK_PORTSEL__PS_RX_INFO) << BIT_SHIFT_PORTSEL__PS_RX_INFO)
-#define BIT_GET_PORTSEL__PS_RX_INFO(x) \
- (((x) >> BIT_SHIFT_PORTSEL__PS_RX_INFO) & BIT_MASK_PORTSEL__PS_RX_INFO)
-
-/* 2 REG_PS_RX_INFO (Offset 0x0692) */
-
-#define BIT_RXCTRLIN0 BIT(4)
-#define BIT_RXMGTIN0 BIT(3)
-#define BIT_RXDATAIN2 BIT(2)
-#define BIT_RXDATAIN1 BIT(1)
-#define BIT_RXDATAIN0 BIT(0)
-
-/* 2 REG_WMMPS_UAPSD_TID (Offset 0x0693) */
-
-#define BIT_WMMPS_UAPSD_TID7 BIT(7)
-#define BIT_WMMPS_UAPSD_TID6 BIT(6)
-#define BIT_WMMPS_UAPSD_TID5 BIT(5)
-#define BIT_WMMPS_UAPSD_TID4 BIT(4)
-#define BIT_WMMPS_UAPSD_TID3 BIT(3)
-#define BIT_WMMPS_UAPSD_TID2 BIT(2)
-#define BIT_WMMPS_UAPSD_TID1 BIT(1)
-#define BIT_WMMPS_UAPSD_TID0 BIT(0)
-
-/* 2 REG_LPNAV_CTRL (Offset 0x0694) */
-
-#define BIT_LPNAV_EN BIT(31)
-
-#define BIT_SHIFT_LPNAV_EARLY 16
-#define BIT_MASK_LPNAV_EARLY 0x7fff
-#define BIT_LPNAV_EARLY(x) \
- (((x) & BIT_MASK_LPNAV_EARLY) << BIT_SHIFT_LPNAV_EARLY)
-#define BIT_GET_LPNAV_EARLY(x) \
- (((x) >> BIT_SHIFT_LPNAV_EARLY) & BIT_MASK_LPNAV_EARLY)
-
-#define BIT_SHIFT_LPNAV_TH 0
-#define BIT_MASK_LPNAV_TH 0xffff
-#define BIT_LPNAV_TH(x) (((x) & BIT_MASK_LPNAV_TH) << BIT_SHIFT_LPNAV_TH)
-#define BIT_GET_LPNAV_TH(x) (((x) >> BIT_SHIFT_LPNAV_TH) & BIT_MASK_LPNAV_TH)
-
-/* 2 REG_WKFMCAM_CMD (Offset 0x0698) */
-
-#define BIT_WKFCAM_POLLING_V1 BIT(31)
-#define BIT_WKFCAM_CLR_V1 BIT(30)
-
-/* 2 REG_WKFMCAM_CMD (Offset 0x0698) */
-
-#define BIT_WKFCAM_WE BIT(16)
-
-/* 2 REG_WKFMCAM_CMD (Offset 0x0698) */
-
-#define BIT_SHIFT_WKFCAM_ADDR_V2 8
-#define BIT_MASK_WKFCAM_ADDR_V2 0xff
-#define BIT_WKFCAM_ADDR_V2(x) \
- (((x) & BIT_MASK_WKFCAM_ADDR_V2) << BIT_SHIFT_WKFCAM_ADDR_V2)
-#define BIT_GET_WKFCAM_ADDR_V2(x) \
- (((x) >> BIT_SHIFT_WKFCAM_ADDR_V2) & BIT_MASK_WKFCAM_ADDR_V2)
-
-#define BIT_SHIFT_WKFCAM_CAM_NUM_V1 0
-#define BIT_MASK_WKFCAM_CAM_NUM_V1 0xff
-#define BIT_WKFCAM_CAM_NUM_V1(x) \
- (((x) & BIT_MASK_WKFCAM_CAM_NUM_V1) << BIT_SHIFT_WKFCAM_CAM_NUM_V1)
-#define BIT_GET_WKFCAM_CAM_NUM_V1(x) \
- (((x) >> BIT_SHIFT_WKFCAM_CAM_NUM_V1) & BIT_MASK_WKFCAM_CAM_NUM_V1)
-
-/* 2 REG_WKFMCAM_RWD (Offset 0x069C) */
-
-#define BIT_SHIFT_WKFMCAM_RWD 0
-#define BIT_MASK_WKFMCAM_RWD 0xffffffffL
-#define BIT_WKFMCAM_RWD(x) \
- (((x) & BIT_MASK_WKFMCAM_RWD) << BIT_SHIFT_WKFMCAM_RWD)
-#define BIT_GET_WKFMCAM_RWD(x) \
- (((x) >> BIT_SHIFT_WKFMCAM_RWD) & BIT_MASK_WKFMCAM_RWD)
-
-/* 2 REG_RXFLTMAP0 (Offset 0x06A0) */
-
-#define BIT_MGTFLT15EN BIT(15)
-#define BIT_MGTFLT14EN BIT(14)
-
-/* 2 REG_RXFLTMAP0 (Offset 0x06A0) */
-
-#define BIT_MGTFLT13EN BIT(13)
-#define BIT_MGTFLT12EN BIT(12)
-#define BIT_MGTFLT11EN BIT(11)
-#define BIT_MGTFLT10EN BIT(10)
-#define BIT_MGTFLT9EN BIT(9)
-#define BIT_MGTFLT8EN BIT(8)
-
-/* 2 REG_RXFLTMAP0 (Offset 0x06A0) */
-
-#define BIT_MGTFLT7EN BIT(7)
-#define BIT_MGTFLT6EN BIT(6)
-
-/* 2 REG_RXFLTMAP0 (Offset 0x06A0) */
-
-#define BIT_MGTFLT5EN BIT(5)
-#define BIT_MGTFLT4EN BIT(4)
-#define BIT_MGTFLT3EN BIT(3)
-#define BIT_MGTFLT2EN BIT(2)
-#define BIT_MGTFLT1EN BIT(1)
-#define BIT_MGTFLT0EN BIT(0)
-
-/* 2 REG_RXFLTMAP1 (Offset 0x06A2) */
-
-#define BIT_CTRLFLT15EN BIT(15)
-#define BIT_CTRLFLT14EN BIT(14)
-#define BIT_CTRLFLT13EN BIT(13)
-#define BIT_CTRLFLT12EN BIT(12)
-#define BIT_CTRLFLT11EN BIT(11)
-#define BIT_CTRLFLT10EN BIT(10)
-#define BIT_CTRLFLT9EN BIT(9)
-#define BIT_CTRLFLT8EN BIT(8)
-#define BIT_CTRLFLT7EN BIT(7)
-#define BIT_CTRLFLT6EN BIT(6)
-
-/* 2 REG_RXFLTMAP1 (Offset 0x06A2) */
-
-#define BIT_CTRLFLT5EN BIT(5)
-#define BIT_CTRLFLT4EN BIT(4)
-#define BIT_CTRLFLT3EN BIT(3)
-#define BIT_CTRLFLT2EN BIT(2)
-#define BIT_CTRLFLT1EN BIT(1)
-#define BIT_CTRLFLT0EN BIT(0)
-
-/* 2 REG_RXFLTMAP (Offset 0x06A4) */
-
-#define BIT_DATAFLT15EN BIT(15)
-#define BIT_DATAFLT14EN BIT(14)
-#define BIT_DATAFLT13EN BIT(13)
-#define BIT_DATAFLT12EN BIT(12)
-#define BIT_DATAFLT11EN BIT(11)
-#define BIT_DATAFLT10EN BIT(10)
-#define BIT_DATAFLT9EN BIT(9)
-#define BIT_DATAFLT8EN BIT(8)
-#define BIT_DATAFLT7EN BIT(7)
-#define BIT_DATAFLT6EN BIT(6)
-#define BIT_DATAFLT5EN BIT(5)
-#define BIT_DATAFLT4EN BIT(4)
-#define BIT_DATAFLT3EN BIT(3)
-#define BIT_DATAFLT2EN BIT(2)
-#define BIT_DATAFLT1EN BIT(1)
-#define BIT_DATAFLT0EN BIT(0)
-
-/* 2 REG_BCN_PSR_RPT (Offset 0x06A8) */
-
-#define BIT_SHIFT_DTIM_CNT 24
-#define BIT_MASK_DTIM_CNT 0xff
-#define BIT_DTIM_CNT(x) (((x) & BIT_MASK_DTIM_CNT) << BIT_SHIFT_DTIM_CNT)
-#define BIT_GET_DTIM_CNT(x) (((x) >> BIT_SHIFT_DTIM_CNT) & BIT_MASK_DTIM_CNT)
-
-#define BIT_SHIFT_DTIM_PERIOD 16
-#define BIT_MASK_DTIM_PERIOD 0xff
-#define BIT_DTIM_PERIOD(x) \
- (((x) & BIT_MASK_DTIM_PERIOD) << BIT_SHIFT_DTIM_PERIOD)
-#define BIT_GET_DTIM_PERIOD(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD) & BIT_MASK_DTIM_PERIOD)
-
-#define BIT_DTIM BIT(15)
-#define BIT_TIM BIT(14)
-
-#define BIT_SHIFT_PS_AID_0 0
-#define BIT_MASK_PS_AID_0 0x7ff
-#define BIT_PS_AID_0(x) (((x) & BIT_MASK_PS_AID_0) << BIT_SHIFT_PS_AID_0)
-#define BIT_GET_PS_AID_0(x) (((x) >> BIT_SHIFT_PS_AID_0) & BIT_MASK_PS_AID_0)
-
-/* 2 REG_FLC_RPC (Offset 0x06AC) */
-
-#define BIT_SHIFT_FLC_RPC 0
-#define BIT_MASK_FLC_RPC 0xff
-#define BIT_FLC_RPC(x) (((x) & BIT_MASK_FLC_RPC) << BIT_SHIFT_FLC_RPC)
-#define BIT_GET_FLC_RPC(x) (((x) >> BIT_SHIFT_FLC_RPC) & BIT_MASK_FLC_RPC)
-
-/* 2 REG_FLC_RPCT (Offset 0x06AD) */
-
-#define BIT_SHIFT_FLC_RPCT 0
-#define BIT_MASK_FLC_RPCT 0xff
-#define BIT_FLC_RPCT(x) (((x) & BIT_MASK_FLC_RPCT) << BIT_SHIFT_FLC_RPCT)
-#define BIT_GET_FLC_RPCT(x) (((x) >> BIT_SHIFT_FLC_RPCT) & BIT_MASK_FLC_RPCT)
-
-/* 2 REG_FLC_PTS (Offset 0x06AE) */
-
-#define BIT_CMF BIT(2)
-#define BIT_CCF BIT(1)
-#define BIT_CDF BIT(0)
-
-/* 2 REG_FLC_TRPC (Offset 0x06AF) */
-
-#define BIT_FLC_RPCT_V1 BIT(7)
-#define BIT_MODE BIT(6)
-
-#define BIT_SHIFT_TRPCD 0
-#define BIT_MASK_TRPCD 0x3f
-#define BIT_TRPCD(x) (((x) & BIT_MASK_TRPCD) << BIT_SHIFT_TRPCD)
-#define BIT_GET_TRPCD(x) (((x) >> BIT_SHIFT_TRPCD) & BIT_MASK_TRPCD)
-
-/* 2 REG_RXPKTMON_CTRL (Offset 0x06B0) */
-
-#define BIT_SHIFT_RXBKQPKT_SEQ 20
-#define BIT_MASK_RXBKQPKT_SEQ 0xf
-#define BIT_RXBKQPKT_SEQ(x) \
- (((x) & BIT_MASK_RXBKQPKT_SEQ) << BIT_SHIFT_RXBKQPKT_SEQ)
-#define BIT_GET_RXBKQPKT_SEQ(x) \
- (((x) >> BIT_SHIFT_RXBKQPKT_SEQ) & BIT_MASK_RXBKQPKT_SEQ)
-
-#define BIT_SHIFT_RXBEQPKT_SEQ 16
-#define BIT_MASK_RXBEQPKT_SEQ 0xf
-#define BIT_RXBEQPKT_SEQ(x) \
- (((x) & BIT_MASK_RXBEQPKT_SEQ) << BIT_SHIFT_RXBEQPKT_SEQ)
-#define BIT_GET_RXBEQPKT_SEQ(x) \
- (((x) >> BIT_SHIFT_RXBEQPKT_SEQ) & BIT_MASK_RXBEQPKT_SEQ)
-
-#define BIT_SHIFT_RXVIQPKT_SEQ 12
-#define BIT_MASK_RXVIQPKT_SEQ 0xf
-#define BIT_RXVIQPKT_SEQ(x) \
- (((x) & BIT_MASK_RXVIQPKT_SEQ) << BIT_SHIFT_RXVIQPKT_SEQ)
-#define BIT_GET_RXVIQPKT_SEQ(x) \
- (((x) >> BIT_SHIFT_RXVIQPKT_SEQ) & BIT_MASK_RXVIQPKT_SEQ)
-
-#define BIT_SHIFT_RXVOQPKT_SEQ 8
-#define BIT_MASK_RXVOQPKT_SEQ 0xf
-#define BIT_RXVOQPKT_SEQ(x) \
- (((x) & BIT_MASK_RXVOQPKT_SEQ) << BIT_SHIFT_RXVOQPKT_SEQ)
-#define BIT_GET_RXVOQPKT_SEQ(x) \
- (((x) >> BIT_SHIFT_RXVOQPKT_SEQ) & BIT_MASK_RXVOQPKT_SEQ)
-
-#define BIT_RXBKQPKT_ERR BIT(7)
-#define BIT_RXBEQPKT_ERR BIT(6)
-#define BIT_RXVIQPKT_ERR BIT(5)
-#define BIT_RXVOQPKT_ERR BIT(4)
-#define BIT_RXDMA_MON_EN BIT(2)
-#define BIT_RXPKT_MON_RST BIT(1)
-#define BIT_RXPKT_MON_EN BIT(0)
-
-/* 2 REG_STATE_MON (Offset 0x06B4) */
-
-#define BIT_SHIFT_STATE_SEL 24
-#define BIT_MASK_STATE_SEL 0x1f
-#define BIT_STATE_SEL(x) (((x) & BIT_MASK_STATE_SEL) << BIT_SHIFT_STATE_SEL)
-#define BIT_GET_STATE_SEL(x) (((x) >> BIT_SHIFT_STATE_SEL) & BIT_MASK_STATE_SEL)
-
-#define BIT_SHIFT_STATE_INFO 8
-#define BIT_MASK_STATE_INFO 0xff
-#define BIT_STATE_INFO(x) (((x) & BIT_MASK_STATE_INFO) << BIT_SHIFT_STATE_INFO)
-#define BIT_GET_STATE_INFO(x) \
- (((x) >> BIT_SHIFT_STATE_INFO) & BIT_MASK_STATE_INFO)
-
-#define BIT_UPD_NXT_STATE BIT(7)
-
-/* 2 REG_STATE_MON (Offset 0x06B4) */
-
-#define BIT_SHIFT_CUR_STATE 0
-#define BIT_MASK_CUR_STATE 0x7f
-#define BIT_CUR_STATE(x) (((x) & BIT_MASK_CUR_STATE) << BIT_SHIFT_CUR_STATE)
-#define BIT_GET_CUR_STATE(x) (((x) >> BIT_SHIFT_CUR_STATE) & BIT_MASK_CUR_STATE)
-
-/* 2 REG_ERROR_MON (Offset 0x06B8) */
-
-#define BIT_MACRX_ERR_1 BIT(17)
-#define BIT_MACRX_ERR_0 BIT(16)
-#define BIT_MACTX_ERR_3 BIT(3)
-#define BIT_MACTX_ERR_2 BIT(2)
-#define BIT_MACTX_ERR_1 BIT(1)
-#define BIT_MACTX_ERR_0 BIT(0)
-
-/* 2 REG_SEARCH_MACID (Offset 0x06BC) */
-
-#define BIT_EN_TXRPTBUF_CLK BIT(31)
-
-#define BIT_SHIFT_INFO_INDEX_OFFSET 16
-#define BIT_MASK_INFO_INDEX_OFFSET 0x1fff
-#define BIT_INFO_INDEX_OFFSET(x) \
- (((x) & BIT_MASK_INFO_INDEX_OFFSET) << BIT_SHIFT_INFO_INDEX_OFFSET)
-#define BIT_GET_INFO_INDEX_OFFSET(x) \
- (((x) >> BIT_SHIFT_INFO_INDEX_OFFSET) & BIT_MASK_INFO_INDEX_OFFSET)
-
-/* 2 REG_SEARCH_MACID (Offset 0x06BC) */
-
-#define BIT_WMAC_SRCH_FIFOFULL BIT(15)
-
-/* 2 REG_SEARCH_MACID (Offset 0x06BC) */
-
-#define BIT_DIS_INFOSRCH BIT(14)
-#define BIT_DISABLE_B0 BIT(13)
-
-#define BIT_SHIFT_INFO_ADDR_OFFSET 0
-#define BIT_MASK_INFO_ADDR_OFFSET 0x1fff
-#define BIT_INFO_ADDR_OFFSET(x) \
- (((x) & BIT_MASK_INFO_ADDR_OFFSET) << BIT_SHIFT_INFO_ADDR_OFFSET)
-#define BIT_GET_INFO_ADDR_OFFSET(x) \
- (((x) >> BIT_SHIFT_INFO_ADDR_OFFSET) & BIT_MASK_INFO_ADDR_OFFSET)
-
-/* 2 REG_BT_COEX_TABLE (Offset 0x06C0) */
-
-#define BIT_PRI_MASK_RX_RESP BIT(126)
-#define BIT_PRI_MASK_RXOFDM BIT(125)
-#define BIT_PRI_MASK_RXCCK BIT(124)
-
-#define BIT_SHIFT_PRI_MASK_TXAC (117 & CPU_OPT_WIDTH)
-#define BIT_MASK_PRI_MASK_TXAC 0x7f
-#define BIT_PRI_MASK_TXAC(x) \
- (((x) & BIT_MASK_PRI_MASK_TXAC) << BIT_SHIFT_PRI_MASK_TXAC)
-#define BIT_GET_PRI_MASK_TXAC(x) \
- (((x) >> BIT_SHIFT_PRI_MASK_TXAC) & BIT_MASK_PRI_MASK_TXAC)
-
-#define BIT_SHIFT_PRI_MASK_NAV (109 & CPU_OPT_WIDTH)
-#define BIT_MASK_PRI_MASK_NAV 0xff
-#define BIT_PRI_MASK_NAV(x) \
- (((x) & BIT_MASK_PRI_MASK_NAV) << BIT_SHIFT_PRI_MASK_NAV)
-#define BIT_GET_PRI_MASK_NAV(x) \
- (((x) >> BIT_SHIFT_PRI_MASK_NAV) & BIT_MASK_PRI_MASK_NAV)
-
-#define BIT_PRI_MASK_CCK BIT(108)
-#define BIT_PRI_MASK_OFDM BIT(107)
-#define BIT_PRI_MASK_RTY BIT(106)
-
-#define BIT_SHIFT_PRI_MASK_NUM (102 & CPU_OPT_WIDTH)
-#define BIT_MASK_PRI_MASK_NUM 0xf
-#define BIT_PRI_MASK_NUM(x) \
- (((x) & BIT_MASK_PRI_MASK_NUM) << BIT_SHIFT_PRI_MASK_NUM)
-#define BIT_GET_PRI_MASK_NUM(x) \
- (((x) >> BIT_SHIFT_PRI_MASK_NUM) & BIT_MASK_PRI_MASK_NUM)
-
-#define BIT_SHIFT_PRI_MASK_TYPE (98 & CPU_OPT_WIDTH)
-#define BIT_MASK_PRI_MASK_TYPE 0xf
-#define BIT_PRI_MASK_TYPE(x) \
- (((x) & BIT_MASK_PRI_MASK_TYPE) << BIT_SHIFT_PRI_MASK_TYPE)
-#define BIT_GET_PRI_MASK_TYPE(x) \
- (((x) >> BIT_SHIFT_PRI_MASK_TYPE) & BIT_MASK_PRI_MASK_TYPE)
-
-#define BIT_OOB BIT(97)
-#define BIT_ANT_SEL BIT(96)
-
-#define BIT_SHIFT_BREAK_TABLE_2 (80 & CPU_OPT_WIDTH)
-#define BIT_MASK_BREAK_TABLE_2 0xffff
-#define BIT_BREAK_TABLE_2(x) \
- (((x) & BIT_MASK_BREAK_TABLE_2) << BIT_SHIFT_BREAK_TABLE_2)
-#define BIT_GET_BREAK_TABLE_2(x) \
- (((x) >> BIT_SHIFT_BREAK_TABLE_2) & BIT_MASK_BREAK_TABLE_2)
-
-#define BIT_SHIFT_BREAK_TABLE_1 (64 & CPU_OPT_WIDTH)
-#define BIT_MASK_BREAK_TABLE_1 0xffff
-#define BIT_BREAK_TABLE_1(x) \
- (((x) & BIT_MASK_BREAK_TABLE_1) << BIT_SHIFT_BREAK_TABLE_1)
-#define BIT_GET_BREAK_TABLE_1(x) \
- (((x) >> BIT_SHIFT_BREAK_TABLE_1) & BIT_MASK_BREAK_TABLE_1)
-
-#define BIT_SHIFT_COEX_TABLE_2 (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_COEX_TABLE_2 0xffffffffL
-#define BIT_COEX_TABLE_2(x) \
- (((x) & BIT_MASK_COEX_TABLE_2) << BIT_SHIFT_COEX_TABLE_2)
-#define BIT_GET_COEX_TABLE_2(x) \
- (((x) >> BIT_SHIFT_COEX_TABLE_2) & BIT_MASK_COEX_TABLE_2)
-
-#define BIT_SHIFT_COEX_TABLE_1 0
-#define BIT_MASK_COEX_TABLE_1 0xffffffffL
-#define BIT_COEX_TABLE_1(x) \
- (((x) & BIT_MASK_COEX_TABLE_1) << BIT_SHIFT_COEX_TABLE_1)
-#define BIT_GET_COEX_TABLE_1(x) \
- (((x) >> BIT_SHIFT_COEX_TABLE_1) & BIT_MASK_COEX_TABLE_1)
-
-/* 2 REG_RXCMD_0 (Offset 0x06D0) */
-
-#define BIT_RXCMD_EN BIT(31)
-
-#define BIT_SHIFT_RXCMD_INFO 0
-#define BIT_MASK_RXCMD_INFO 0x7fffffffL
-#define BIT_RXCMD_INFO(x) (((x) & BIT_MASK_RXCMD_INFO) << BIT_SHIFT_RXCMD_INFO)
-#define BIT_GET_RXCMD_INFO(x) \
- (((x) >> BIT_SHIFT_RXCMD_INFO) & BIT_MASK_RXCMD_INFO)
-
-/* 2 REG_RXCMD_1 (Offset 0x06D4) */
-
-#define BIT_SHIFT_RXCMD_PRD 0
-#define BIT_MASK_RXCMD_PRD 0xffff
-#define BIT_RXCMD_PRD(x) (((x) & BIT_MASK_RXCMD_PRD) << BIT_SHIFT_RXCMD_PRD)
-#define BIT_GET_RXCMD_PRD(x) (((x) >> BIT_SHIFT_RXCMD_PRD) & BIT_MASK_RXCMD_PRD)
-
-/* 2 REG_WMAC_RESP_TXINFO (Offset 0x06D8) */
-
-#define BIT_SHIFT_WMAC_RESP_MFB 25
-#define BIT_MASK_WMAC_RESP_MFB 0x7f
-#define BIT_WMAC_RESP_MFB(x) \
- (((x) & BIT_MASK_WMAC_RESP_MFB) << BIT_SHIFT_WMAC_RESP_MFB)
-#define BIT_GET_WMAC_RESP_MFB(x) \
- (((x) >> BIT_SHIFT_WMAC_RESP_MFB) & BIT_MASK_WMAC_RESP_MFB)
-
-#define BIT_SHIFT_WMAC_ANTINF_SEL 23
-#define BIT_MASK_WMAC_ANTINF_SEL 0x3
-#define BIT_WMAC_ANTINF_SEL(x) \
- (((x) & BIT_MASK_WMAC_ANTINF_SEL) << BIT_SHIFT_WMAC_ANTINF_SEL)
-#define BIT_GET_WMAC_ANTINF_SEL(x) \
- (((x) >> BIT_SHIFT_WMAC_ANTINF_SEL) & BIT_MASK_WMAC_ANTINF_SEL)
-
-#define BIT_SHIFT_WMAC_ANTSEL_SEL 21
-#define BIT_MASK_WMAC_ANTSEL_SEL 0x3
-#define BIT_WMAC_ANTSEL_SEL(x) \
- (((x) & BIT_MASK_WMAC_ANTSEL_SEL) << BIT_SHIFT_WMAC_ANTSEL_SEL)
-#define BIT_GET_WMAC_ANTSEL_SEL(x) \
- (((x) >> BIT_SHIFT_WMAC_ANTSEL_SEL) & BIT_MASK_WMAC_ANTSEL_SEL)
-
-/* 2 REG_WMAC_RESP_TXINFO (Offset 0x06D8) */
-
-#define BIT_SHIFT_R_WMAC_RESP_TXPOWER 18
-#define BIT_MASK_R_WMAC_RESP_TXPOWER 0x7
-#define BIT_R_WMAC_RESP_TXPOWER(x) \
- (((x) & BIT_MASK_R_WMAC_RESP_TXPOWER) << BIT_SHIFT_R_WMAC_RESP_TXPOWER)
-#define BIT_GET_R_WMAC_RESP_TXPOWER(x) \
- (((x) >> BIT_SHIFT_R_WMAC_RESP_TXPOWER) & BIT_MASK_R_WMAC_RESP_TXPOWER)
-
-/* 2 REG_WMAC_RESP_TXINFO (Offset 0x06D8) */
-
-#define BIT_SHIFT_WMAC_RESP_TXANT 0
-#define BIT_MASK_WMAC_RESP_TXANT 0x3ffff
-#define BIT_WMAC_RESP_TXANT(x) \
- (((x) & BIT_MASK_WMAC_RESP_TXANT) << BIT_SHIFT_WMAC_RESP_TXANT)
-#define BIT_GET_WMAC_RESP_TXANT(x) \
- (((x) >> BIT_SHIFT_WMAC_RESP_TXANT) & BIT_MASK_WMAC_RESP_TXANT)
-
-/* 2 REG_BBPSF_CTRL (Offset 0x06DC) */
-
-#define BIT_CTL_IDLE_CLR_CSI_RPT BIT(31)
-
-/* 2 REG_BBPSF_CTRL (Offset 0x06DC) */
-
-#define BIT_WMAC_USE_NDPARATE BIT(30)
-
-#define BIT_SHIFT_WMAC_CSI_RATE 24
-#define BIT_MASK_WMAC_CSI_RATE 0x3f
-#define BIT_WMAC_CSI_RATE(x) \
- (((x) & BIT_MASK_WMAC_CSI_RATE) << BIT_SHIFT_WMAC_CSI_RATE)
-#define BIT_GET_WMAC_CSI_RATE(x) \
- (((x) >> BIT_SHIFT_WMAC_CSI_RATE) & BIT_MASK_WMAC_CSI_RATE)
-
-#define BIT_SHIFT_WMAC_RESP_TXRATE 16
-#define BIT_MASK_WMAC_RESP_TXRATE 0xff
-#define BIT_WMAC_RESP_TXRATE(x) \
- (((x) & BIT_MASK_WMAC_RESP_TXRATE) << BIT_SHIFT_WMAC_RESP_TXRATE)
-#define BIT_GET_WMAC_RESP_TXRATE(x) \
- (((x) >> BIT_SHIFT_WMAC_RESP_TXRATE) & BIT_MASK_WMAC_RESP_TXRATE)
-
-/* 2 REG_BBPSF_CTRL (Offset 0x06DC) */
-
-#define BIT_BBPSF_MPDUCHKEN BIT(5)
-
-/* 2 REG_BBPSF_CTRL (Offset 0x06DC) */
-
-#define BIT_BBPSF_MHCHKEN BIT(4)
-#define BIT_BBPSF_ERRCHKEN BIT(3)
-
-#define BIT_SHIFT_BBPSF_ERRTHR 0
-#define BIT_MASK_BBPSF_ERRTHR 0x7
-#define BIT_BBPSF_ERRTHR(x) \
- (((x) & BIT_MASK_BBPSF_ERRTHR) << BIT_SHIFT_BBPSF_ERRTHR)
-#define BIT_GET_BBPSF_ERRTHR(x) \
- (((x) >> BIT_SHIFT_BBPSF_ERRTHR) & BIT_MASK_BBPSF_ERRTHR)
-
-/* 2 REG_P2P_RX_BCN_NOA (Offset 0x06E0) */
-
-#define BIT_NOA_PARSER_EN BIT(15)
-
-/* 2 REG_P2P_RX_BCN_NOA (Offset 0x06E0) */
-
-#define BIT_BSSID_SEL BIT(14)
-
-/* 2 REG_P2P_RX_BCN_NOA (Offset 0x06E0) */
-
-#define BIT_SHIFT_P2P_OUI_TYPE 0
-#define BIT_MASK_P2P_OUI_TYPE 0xff
-#define BIT_P2P_OUI_TYPE(x) \
- (((x) & BIT_MASK_P2P_OUI_TYPE) << BIT_SHIFT_P2P_OUI_TYPE)
-#define BIT_GET_P2P_OUI_TYPE(x) \
- (((x) >> BIT_SHIFT_P2P_OUI_TYPE) & BIT_MASK_P2P_OUI_TYPE)
-
-/* 2 REG_ASSOCIATED_BFMER0_INFO (Offset 0x06E4) */
-
-#define BIT_SHIFT_R_WMAC_TXCSI_AID0 (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_TXCSI_AID0 0x1ff
-#define BIT_R_WMAC_TXCSI_AID0(x) \
- (((x) & BIT_MASK_R_WMAC_TXCSI_AID0) << BIT_SHIFT_R_WMAC_TXCSI_AID0)
-#define BIT_GET_R_WMAC_TXCSI_AID0(x) \
- (((x) >> BIT_SHIFT_R_WMAC_TXCSI_AID0) & BIT_MASK_R_WMAC_TXCSI_AID0)
-
-#define BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R0 0
-#define BIT_MASK_R_WMAC_SOUNDING_RXADD_R0 0xffffffffffffL
-#define BIT_R_WMAC_SOUNDING_RXADD_R0(x) \
- (((x) & BIT_MASK_R_WMAC_SOUNDING_RXADD_R0) \
- << BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R0)
-#define BIT_GET_R_WMAC_SOUNDING_RXADD_R0(x) \
- (((x) >> BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R0) & \
- BIT_MASK_R_WMAC_SOUNDING_RXADD_R0)
-
-/* 2 REG_ASSOCIATED_BFMER1_INFO (Offset 0x06EC) */
-
-#define BIT_SHIFT_R_WMAC_TXCSI_AID1 (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_TXCSI_AID1 0x1ff
-#define BIT_R_WMAC_TXCSI_AID1(x) \
- (((x) & BIT_MASK_R_WMAC_TXCSI_AID1) << BIT_SHIFT_R_WMAC_TXCSI_AID1)
-#define BIT_GET_R_WMAC_TXCSI_AID1(x) \
- (((x) >> BIT_SHIFT_R_WMAC_TXCSI_AID1) & BIT_MASK_R_WMAC_TXCSI_AID1)
-
-#define BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R1 0
-#define BIT_MASK_R_WMAC_SOUNDING_RXADD_R1 0xffffffffffffL
-#define BIT_R_WMAC_SOUNDING_RXADD_R1(x) \
- (((x) & BIT_MASK_R_WMAC_SOUNDING_RXADD_R1) \
- << BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R1)
-#define BIT_GET_R_WMAC_SOUNDING_RXADD_R1(x) \
- (((x) >> BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R1) & \
- BIT_MASK_R_WMAC_SOUNDING_RXADD_R1)
-
-/* 2 REG_TX_CSI_RPT_PARAM_BW20 (Offset 0x06F4) */
-
-#define BIT_SHIFT_R_WMAC_BFINFO_20M_1 16
-#define BIT_MASK_R_WMAC_BFINFO_20M_1 0xfff
-#define BIT_R_WMAC_BFINFO_20M_1(x) \
- (((x) & BIT_MASK_R_WMAC_BFINFO_20M_1) << BIT_SHIFT_R_WMAC_BFINFO_20M_1)
-#define BIT_GET_R_WMAC_BFINFO_20M_1(x) \
- (((x) >> BIT_SHIFT_R_WMAC_BFINFO_20M_1) & BIT_MASK_R_WMAC_BFINFO_20M_1)
-
-#define BIT_SHIFT_R_WMAC_BFINFO_20M_0 0
-#define BIT_MASK_R_WMAC_BFINFO_20M_0 0xfff
-#define BIT_R_WMAC_BFINFO_20M_0(x) \
- (((x) & BIT_MASK_R_WMAC_BFINFO_20M_0) << BIT_SHIFT_R_WMAC_BFINFO_20M_0)
-#define BIT_GET_R_WMAC_BFINFO_20M_0(x) \
- (((x) >> BIT_SHIFT_R_WMAC_BFINFO_20M_0) & BIT_MASK_R_WMAC_BFINFO_20M_0)
-
-/* 2 REG_TX_CSI_RPT_PARAM_BW40 (Offset 0x06F8) */
-
-#define BIT_SHIFT_WMAC_RESP_ANTCD 0
-#define BIT_MASK_WMAC_RESP_ANTCD 0xf
-#define BIT_WMAC_RESP_ANTCD(x) \
- (((x) & BIT_MASK_WMAC_RESP_ANTCD) << BIT_SHIFT_WMAC_RESP_ANTCD)
-#define BIT_GET_WMAC_RESP_ANTCD(x) \
- (((x) >> BIT_SHIFT_WMAC_RESP_ANTCD) & BIT_MASK_WMAC_RESP_ANTCD)
-
-/* 2 REG_MACID1 (Offset 0x0700) */
-
-#define BIT_SHIFT_MACID1 0
-#define BIT_MASK_MACID1 0xffffffffffffL
-#define BIT_MACID1(x) (((x) & BIT_MASK_MACID1) << BIT_SHIFT_MACID1)
-#define BIT_GET_MACID1(x) (((x) >> BIT_SHIFT_MACID1) & BIT_MASK_MACID1)
-
-/* 2 REG_BSSID1 (Offset 0x0708) */
-
-#define BIT_SHIFT_BSSID1 0
-#define BIT_MASK_BSSID1 0xffffffffffffL
-#define BIT_BSSID1(x) (((x) & BIT_MASK_BSSID1) << BIT_SHIFT_BSSID1)
-#define BIT_GET_BSSID1(x) (((x) >> BIT_SHIFT_BSSID1) & BIT_MASK_BSSID1)
-
-/* 2 REG_BCN_PSR_RPT1 (Offset 0x0710) */
-
-#define BIT_SHIFT_DTIM_CNT1 24
-#define BIT_MASK_DTIM_CNT1 0xff
-#define BIT_DTIM_CNT1(x) (((x) & BIT_MASK_DTIM_CNT1) << BIT_SHIFT_DTIM_CNT1)
-#define BIT_GET_DTIM_CNT1(x) (((x) >> BIT_SHIFT_DTIM_CNT1) & BIT_MASK_DTIM_CNT1)
-
-#define BIT_SHIFT_DTIM_PERIOD1 16
-#define BIT_MASK_DTIM_PERIOD1 0xff
-#define BIT_DTIM_PERIOD1(x) \
- (((x) & BIT_MASK_DTIM_PERIOD1) << BIT_SHIFT_DTIM_PERIOD1)
-#define BIT_GET_DTIM_PERIOD1(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD1) & BIT_MASK_DTIM_PERIOD1)
-
-#define BIT_DTIM1 BIT(15)
-#define BIT_TIM1 BIT(14)
-
-#define BIT_SHIFT_PS_AID_1 0
-#define BIT_MASK_PS_AID_1 0x7ff
-#define BIT_PS_AID_1(x) (((x) & BIT_MASK_PS_AID_1) << BIT_SHIFT_PS_AID_1)
-#define BIT_GET_PS_AID_1(x) (((x) >> BIT_SHIFT_PS_AID_1) & BIT_MASK_PS_AID_1)
-
-/* 2 REG_ASSOCIATED_BFMEE_SEL (Offset 0x0714) */
-
-#define BIT_TXUSER_ID1 BIT(25)
-
-#define BIT_SHIFT_AID1 16
-#define BIT_MASK_AID1 0x1ff
-#define BIT_AID1(x) (((x) & BIT_MASK_AID1) << BIT_SHIFT_AID1)
-#define BIT_GET_AID1(x) (((x) >> BIT_SHIFT_AID1) & BIT_MASK_AID1)
-
-#define BIT_TXUSER_ID0 BIT(9)
-
-#define BIT_SHIFT_AID0 0
-#define BIT_MASK_AID0 0x1ff
-#define BIT_AID0(x) (((x) & BIT_MASK_AID0) << BIT_SHIFT_AID0)
-#define BIT_GET_AID0(x) (((x) >> BIT_SHIFT_AID0) & BIT_MASK_AID0)
-
-/* 2 REG_SND_PTCL_CTRL (Offset 0x0718) */
-
-#define BIT_SHIFT_NDP_RX_STANDBY_TIMER 24
-#define BIT_MASK_NDP_RX_STANDBY_TIMER 0xff
-#define BIT_NDP_RX_STANDBY_TIMER(x) \
- (((x) & BIT_MASK_NDP_RX_STANDBY_TIMER) \
- << BIT_SHIFT_NDP_RX_STANDBY_TIMER)
-#define BIT_GET_NDP_RX_STANDBY_TIMER(x) \
- (((x) >> BIT_SHIFT_NDP_RX_STANDBY_TIMER) & \
- BIT_MASK_NDP_RX_STANDBY_TIMER)
-
-#define BIT_SHIFT_CSI_RPT_OFFSET_HT 16
-#define BIT_MASK_CSI_RPT_OFFSET_HT 0xff
-#define BIT_CSI_RPT_OFFSET_HT(x) \
- (((x) & BIT_MASK_CSI_RPT_OFFSET_HT) << BIT_SHIFT_CSI_RPT_OFFSET_HT)
-#define BIT_GET_CSI_RPT_OFFSET_HT(x) \
- (((x) >> BIT_SHIFT_CSI_RPT_OFFSET_HT) & BIT_MASK_CSI_RPT_OFFSET_HT)
-
-/* 2 REG_SND_PTCL_CTRL (Offset 0x0718) */
-
-#define BIT_SHIFT_R_WMAC_VHT_CATEGORY 8
-#define BIT_MASK_R_WMAC_VHT_CATEGORY 0xff
-#define BIT_R_WMAC_VHT_CATEGORY(x) \
- (((x) & BIT_MASK_R_WMAC_VHT_CATEGORY) << BIT_SHIFT_R_WMAC_VHT_CATEGORY)
-#define BIT_GET_R_WMAC_VHT_CATEGORY(x) \
- (((x) >> BIT_SHIFT_R_WMAC_VHT_CATEGORY) & BIT_MASK_R_WMAC_VHT_CATEGORY)
-
-/* 2 REG_SND_PTCL_CTRL (Offset 0x0718) */
-
-#define BIT_R_WMAC_USE_NSTS BIT(7)
-#define BIT_R_DISABLE_CHECK_VHTSIGB_CRC BIT(6)
-#define BIT_R_DISABLE_CHECK_VHTSIGA_CRC BIT(5)
-#define BIT_R_WMAC_BFPARAM_SEL BIT(4)
-#define BIT_R_WMAC_CSISEQ_SEL BIT(3)
-#define BIT_R_WMAC_CSI_WITHHTC_EN BIT(2)
-#define BIT_R_WMAC_HT_NDPA_EN BIT(1)
-#define BIT_R_WMAC_VHT_NDPA_EN BIT(0)
-
-/* 2 REG_NS_ARP_CTRL (Offset 0x0720) */
-
-#define BIT_R_WMAC_NSARP_RSPEN BIT(15)
-#define BIT_R_WMAC_NSARP_RARP BIT(9)
-#define BIT_R_WMAC_NSARP_RIPV6 BIT(8)
-
-#define BIT_SHIFT_R_WMAC_NSARP_MODEN 6
-#define BIT_MASK_R_WMAC_NSARP_MODEN 0x3
-#define BIT_R_WMAC_NSARP_MODEN(x) \
- (((x) & BIT_MASK_R_WMAC_NSARP_MODEN) << BIT_SHIFT_R_WMAC_NSARP_MODEN)
-#define BIT_GET_R_WMAC_NSARP_MODEN(x) \
- (((x) >> BIT_SHIFT_R_WMAC_NSARP_MODEN) & BIT_MASK_R_WMAC_NSARP_MODEN)
-
-#define BIT_SHIFT_R_WMAC_NSARP_RSPFTP 4
-#define BIT_MASK_R_WMAC_NSARP_RSPFTP 0x3
-#define BIT_R_WMAC_NSARP_RSPFTP(x) \
- (((x) & BIT_MASK_R_WMAC_NSARP_RSPFTP) << BIT_SHIFT_R_WMAC_NSARP_RSPFTP)
-#define BIT_GET_R_WMAC_NSARP_RSPFTP(x) \
- (((x) >> BIT_SHIFT_R_WMAC_NSARP_RSPFTP) & BIT_MASK_R_WMAC_NSARP_RSPFTP)
-
-#define BIT_SHIFT_R_WMAC_NSARP_RSPSEC 0
-#define BIT_MASK_R_WMAC_NSARP_RSPSEC 0xf
-#define BIT_R_WMAC_NSARP_RSPSEC(x) \
- (((x) & BIT_MASK_R_WMAC_NSARP_RSPSEC) << BIT_SHIFT_R_WMAC_NSARP_RSPSEC)
-#define BIT_GET_R_WMAC_NSARP_RSPSEC(x) \
- (((x) >> BIT_SHIFT_R_WMAC_NSARP_RSPSEC) & BIT_MASK_R_WMAC_NSARP_RSPSEC)
-
-/* 2 REG_NS_ARP_INFO (Offset 0x0724) */
-
-#define BIT_REQ_IS_MCNS BIT(23)
-#define BIT_REQ_IS_UCNS BIT(22)
-#define BIT_REQ_IS_USNS BIT(21)
-#define BIT_REQ_IS_ARP BIT(20)
-#define BIT_EXPRSP_MH_WITHQC BIT(19)
-
-#define BIT_SHIFT_EXPRSP_SECTYPE 16
-#define BIT_MASK_EXPRSP_SECTYPE 0x7
-#define BIT_EXPRSP_SECTYPE(x) \
- (((x) & BIT_MASK_EXPRSP_SECTYPE) << BIT_SHIFT_EXPRSP_SECTYPE)
-#define BIT_GET_EXPRSP_SECTYPE(x) \
- (((x) >> BIT_SHIFT_EXPRSP_SECTYPE) & BIT_MASK_EXPRSP_SECTYPE)
-
-#define BIT_SHIFT_EXPRSP_CHKSM_7_TO_0 8
-#define BIT_MASK_EXPRSP_CHKSM_7_TO_0 0xff
-#define BIT_EXPRSP_CHKSM_7_TO_0(x) \
- (((x) & BIT_MASK_EXPRSP_CHKSM_7_TO_0) << BIT_SHIFT_EXPRSP_CHKSM_7_TO_0)
-#define BIT_GET_EXPRSP_CHKSM_7_TO_0(x) \
- (((x) >> BIT_SHIFT_EXPRSP_CHKSM_7_TO_0) & BIT_MASK_EXPRSP_CHKSM_7_TO_0)
-
-#define BIT_SHIFT_EXPRSP_CHKSM_15_TO_8 0
-#define BIT_MASK_EXPRSP_CHKSM_15_TO_8 0xff
-#define BIT_EXPRSP_CHKSM_15_TO_8(x) \
- (((x) & BIT_MASK_EXPRSP_CHKSM_15_TO_8) \
- << BIT_SHIFT_EXPRSP_CHKSM_15_TO_8)
-#define BIT_GET_EXPRSP_CHKSM_15_TO_8(x) \
- (((x) >> BIT_SHIFT_EXPRSP_CHKSM_15_TO_8) & \
- BIT_MASK_EXPRSP_CHKSM_15_TO_8)
-
-/* 2 REG_BEAMFORMING_INFO_NSARP_V1 (Offset 0x0728) */
-
-#define BIT_SHIFT_WMAC_ARPIP 0
-#define BIT_MASK_WMAC_ARPIP 0xffffffffL
-#define BIT_WMAC_ARPIP(x) (((x) & BIT_MASK_WMAC_ARPIP) << BIT_SHIFT_WMAC_ARPIP)
-#define BIT_GET_WMAC_ARPIP(x) \
- (((x) >> BIT_SHIFT_WMAC_ARPIP) & BIT_MASK_WMAC_ARPIP)
-
-/* 2 REG_BEAMFORMING_INFO_NSARP (Offset 0x072C) */
-
-#define BIT_SHIFT_BEAMFORMING_INFO 0
-#define BIT_MASK_BEAMFORMING_INFO 0xffffffffL
-#define BIT_BEAMFORMING_INFO(x) \
- (((x) & BIT_MASK_BEAMFORMING_INFO) << BIT_SHIFT_BEAMFORMING_INFO)
-#define BIT_GET_BEAMFORMING_INFO(x) \
- (((x) >> BIT_SHIFT_BEAMFORMING_INFO) & BIT_MASK_BEAMFORMING_INFO)
-
-/* 2 REG_WMAC_RTX_CTX_SUBTYPE_CFG (Offset 0x0750) */
-
-#define BIT_SHIFT_R_WMAC_CTX_SUBTYPE 4
-#define BIT_MASK_R_WMAC_CTX_SUBTYPE 0xf
-#define BIT_R_WMAC_CTX_SUBTYPE(x) \
- (((x) & BIT_MASK_R_WMAC_CTX_SUBTYPE) << BIT_SHIFT_R_WMAC_CTX_SUBTYPE)
-#define BIT_GET_R_WMAC_CTX_SUBTYPE(x) \
- (((x) >> BIT_SHIFT_R_WMAC_CTX_SUBTYPE) & BIT_MASK_R_WMAC_CTX_SUBTYPE)
-
-#define BIT_SHIFT_R_WMAC_RTX_SUBTYPE 0
-#define BIT_MASK_R_WMAC_RTX_SUBTYPE 0xf
-#define BIT_R_WMAC_RTX_SUBTYPE(x) \
- (((x) & BIT_MASK_R_WMAC_RTX_SUBTYPE) << BIT_SHIFT_R_WMAC_RTX_SUBTYPE)
-#define BIT_GET_R_WMAC_RTX_SUBTYPE(x) \
- (((x) >> BIT_SHIFT_R_WMAC_RTX_SUBTYPE) & BIT_MASK_R_WMAC_RTX_SUBTYPE)
-
-/* 2 REG_BT_COEX_V2 (Offset 0x0762) */
-
-#define BIT_GNT_BT_POLARITY BIT(12)
-#define BIT_GNT_BT_BYPASS_PRIORITY BIT(8)
-
-#define BIT_SHIFT_TIMER 0
-#define BIT_MASK_TIMER 0xff
-#define BIT_TIMER(x) (((x) & BIT_MASK_TIMER) << BIT_SHIFT_TIMER)
-#define BIT_GET_TIMER(x) (((x) >> BIT_SHIFT_TIMER) & BIT_MASK_TIMER)
-
-/* 2 REG_BT_COEX (Offset 0x0764) */
-
-#define BIT_R_GNT_BT_RFC_SW BIT(12)
-#define BIT_R_GNT_BT_RFC_SW_EN BIT(11)
-#define BIT_R_GNT_BT_BB_SW BIT(10)
-#define BIT_R_GNT_BT_BB_SW_EN BIT(9)
-#define BIT_R_BT_CNT_THREN BIT(8)
-
-#define BIT_SHIFT_R_BT_CNT_THR 0
-#define BIT_MASK_R_BT_CNT_THR 0xff
-#define BIT_R_BT_CNT_THR(x) \
- (((x) & BIT_MASK_R_BT_CNT_THR) << BIT_SHIFT_R_BT_CNT_THR)
-#define BIT_GET_R_BT_CNT_THR(x) \
- (((x) >> BIT_SHIFT_R_BT_CNT_THR) & BIT_MASK_R_BT_CNT_THR)
-
-/* 2 REG_WLAN_ACT_MASK_CTRL (Offset 0x0768) */
-
-#define BIT_WLRX_TER_BY_CTL BIT(43)
-#define BIT_WLRX_TER_BY_AD BIT(42)
-#define BIT_ANT_DIVERSITY_SEL BIT(41)
-#define BIT_ANTSEL_FOR_BT_CTRL_EN BIT(40)
-#define BIT_WLACT_LOW_GNTWL_EN BIT(34)
-#define BIT_WLACT_HIGH_GNTBT_EN BIT(33)
-
-/* 2 REG_WLAN_ACT_MASK_CTRL (Offset 0x0768) */
-
-#define BIT_NAV_UPPER_V1 BIT(32)
-
-/* 2 REG_WLAN_ACT_MASK_CTRL (Offset 0x0768) */
-
-#define BIT_SHIFT_RXMYRTS_NAV_V1 8
-#define BIT_MASK_RXMYRTS_NAV_V1 0xff
-#define BIT_RXMYRTS_NAV_V1(x) \
- (((x) & BIT_MASK_RXMYRTS_NAV_V1) << BIT_SHIFT_RXMYRTS_NAV_V1)
-#define BIT_GET_RXMYRTS_NAV_V1(x) \
- (((x) >> BIT_SHIFT_RXMYRTS_NAV_V1) & BIT_MASK_RXMYRTS_NAV_V1)
-
-#define BIT_SHIFT_RTSRST_V1 0
-#define BIT_MASK_RTSRST_V1 0xff
-#define BIT_RTSRST_V1(x) (((x) & BIT_MASK_RTSRST_V1) << BIT_SHIFT_RTSRST_V1)
-#define BIT_GET_RTSRST_V1(x) (((x) >> BIT_SHIFT_RTSRST_V1) & BIT_MASK_RTSRST_V1)
-
-/* 2 REG_BT_COEX_ENHANCED_INTR_CTRL (Offset 0x076E) */
-
-#define BIT_SHIFT_BT_STAT_DELAY 12
-#define BIT_MASK_BT_STAT_DELAY 0xf
-#define BIT_BT_STAT_DELAY(x) \
- (((x) & BIT_MASK_BT_STAT_DELAY) << BIT_SHIFT_BT_STAT_DELAY)
-#define BIT_GET_BT_STAT_DELAY(x) \
- (((x) >> BIT_SHIFT_BT_STAT_DELAY) & BIT_MASK_BT_STAT_DELAY)
-
-#define BIT_SHIFT_BT_TRX_INIT_DETECT 8
-#define BIT_MASK_BT_TRX_INIT_DETECT 0xf
-#define BIT_BT_TRX_INIT_DETECT(x) \
- (((x) & BIT_MASK_BT_TRX_INIT_DETECT) << BIT_SHIFT_BT_TRX_INIT_DETECT)
-#define BIT_GET_BT_TRX_INIT_DETECT(x) \
- (((x) >> BIT_SHIFT_BT_TRX_INIT_DETECT) & BIT_MASK_BT_TRX_INIT_DETECT)
-
-#define BIT_SHIFT_BT_PRI_DETECT_TO 4
-#define BIT_MASK_BT_PRI_DETECT_TO 0xf
-#define BIT_BT_PRI_DETECT_TO(x) \
- (((x) & BIT_MASK_BT_PRI_DETECT_TO) << BIT_SHIFT_BT_PRI_DETECT_TO)
-#define BIT_GET_BT_PRI_DETECT_TO(x) \
- (((x) >> BIT_SHIFT_BT_PRI_DETECT_TO) & BIT_MASK_BT_PRI_DETECT_TO)
-
-#define BIT_R_GRANTALL_WLMASK BIT(3)
-#define BIT_STATIS_BT_EN BIT(2)
-#define BIT_WL_ACT_MASK_ENABLE BIT(1)
-#define BIT_ENHANCED_BT BIT(0)
-
-/* 2 REG_BT_ACT_STATISTICS (Offset 0x0770) */
-
-#define BIT_SHIFT_STATIS_BT_LO_RX (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_STATIS_BT_LO_RX 0xffff
-#define BIT_STATIS_BT_LO_RX(x) \
- (((x) & BIT_MASK_STATIS_BT_LO_RX) << BIT_SHIFT_STATIS_BT_LO_RX)
-#define BIT_GET_STATIS_BT_LO_RX(x) \
- (((x) >> BIT_SHIFT_STATIS_BT_LO_RX) & BIT_MASK_STATIS_BT_LO_RX)
-
-#define BIT_SHIFT_STATIS_BT_LO_TX (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_STATIS_BT_LO_TX 0xffff
-#define BIT_STATIS_BT_LO_TX(x) \
- (((x) & BIT_MASK_STATIS_BT_LO_TX) << BIT_SHIFT_STATIS_BT_LO_TX)
-#define BIT_GET_STATIS_BT_LO_TX(x) \
- (((x) >> BIT_SHIFT_STATIS_BT_LO_TX) & BIT_MASK_STATIS_BT_LO_TX)
-
-/* 2 REG_BT_ACT_STATISTICS (Offset 0x0770) */
-
-#define BIT_SHIFT_STATIS_BT_HI_RX 16
-#define BIT_MASK_STATIS_BT_HI_RX 0xffff
-#define BIT_STATIS_BT_HI_RX(x) \
- (((x) & BIT_MASK_STATIS_BT_HI_RX) << BIT_SHIFT_STATIS_BT_HI_RX)
-#define BIT_GET_STATIS_BT_HI_RX(x) \
- (((x) >> BIT_SHIFT_STATIS_BT_HI_RX) & BIT_MASK_STATIS_BT_HI_RX)
-
-#define BIT_SHIFT_STATIS_BT_HI_TX 0
-#define BIT_MASK_STATIS_BT_HI_TX 0xffff
-#define BIT_STATIS_BT_HI_TX(x) \
- (((x) & BIT_MASK_STATIS_BT_HI_TX) << BIT_SHIFT_STATIS_BT_HI_TX)
-#define BIT_GET_STATIS_BT_HI_TX(x) \
- (((x) >> BIT_SHIFT_STATIS_BT_HI_TX) & BIT_MASK_STATIS_BT_HI_TX)
-
-/* 2 REG_BT_STATISTICS_CONTROL_REGISTER (Offset 0x0778) */
-
-#define BIT_SHIFT_R_BT_CMD_RPT 16
-#define BIT_MASK_R_BT_CMD_RPT 0xffff
-#define BIT_R_BT_CMD_RPT(x) \
- (((x) & BIT_MASK_R_BT_CMD_RPT) << BIT_SHIFT_R_BT_CMD_RPT)
-#define BIT_GET_R_BT_CMD_RPT(x) \
- (((x) >> BIT_SHIFT_R_BT_CMD_RPT) & BIT_MASK_R_BT_CMD_RPT)
-
-#define BIT_SHIFT_R_RPT_FROM_BT 8
-#define BIT_MASK_R_RPT_FROM_BT 0xff
-#define BIT_R_RPT_FROM_BT(x) \
- (((x) & BIT_MASK_R_RPT_FROM_BT) << BIT_SHIFT_R_RPT_FROM_BT)
-#define BIT_GET_R_RPT_FROM_BT(x) \
- (((x) >> BIT_SHIFT_R_RPT_FROM_BT) & BIT_MASK_R_RPT_FROM_BT)
-
-#define BIT_SHIFT_BT_HID_ISR_SET 6
-#define BIT_MASK_BT_HID_ISR_SET 0x3
-#define BIT_BT_HID_ISR_SET(x) \
- (((x) & BIT_MASK_BT_HID_ISR_SET) << BIT_SHIFT_BT_HID_ISR_SET)
-#define BIT_GET_BT_HID_ISR_SET(x) \
- (((x) >> BIT_SHIFT_BT_HID_ISR_SET) & BIT_MASK_BT_HID_ISR_SET)
-
-#define BIT_TDMA_BT_START_NOTIFY BIT(5)
-#define BIT_ENABLE_TDMA_FW_MODE BIT(4)
-#define BIT_ENABLE_PTA_TDMA_MODE BIT(3)
-#define BIT_ENABLE_COEXIST_TAB_IN_TDMA BIT(2)
-#define BIT_GPIO2_GPIO3_EXANGE_OR_NO_BT_CCA BIT(1)
-#define BIT_RTK_BT_ENABLE BIT(0)
-
-/* 2 REG_BT_STATUS_REPORT_REGISTER (Offset 0x077C) */
-
-#define BIT_SHIFT_BT_PROFILE 24
-#define BIT_MASK_BT_PROFILE 0xff
-#define BIT_BT_PROFILE(x) (((x) & BIT_MASK_BT_PROFILE) << BIT_SHIFT_BT_PROFILE)
-#define BIT_GET_BT_PROFILE(x) \
- (((x) >> BIT_SHIFT_BT_PROFILE) & BIT_MASK_BT_PROFILE)
-
-#define BIT_SHIFT_BT_POWER 16
-#define BIT_MASK_BT_POWER 0xff
-#define BIT_BT_POWER(x) (((x) & BIT_MASK_BT_POWER) << BIT_SHIFT_BT_POWER)
-#define BIT_GET_BT_POWER(x) (((x) >> BIT_SHIFT_BT_POWER) & BIT_MASK_BT_POWER)
-
-#define BIT_SHIFT_BT_PREDECT_STATUS 8
-#define BIT_MASK_BT_PREDECT_STATUS 0xff
-#define BIT_BT_PREDECT_STATUS(x) \
- (((x) & BIT_MASK_BT_PREDECT_STATUS) << BIT_SHIFT_BT_PREDECT_STATUS)
-#define BIT_GET_BT_PREDECT_STATUS(x) \
- (((x) >> BIT_SHIFT_BT_PREDECT_STATUS) & BIT_MASK_BT_PREDECT_STATUS)
-
-#define BIT_SHIFT_BT_CMD_INFO 0
-#define BIT_MASK_BT_CMD_INFO 0xff
-#define BIT_BT_CMD_INFO(x) \
- (((x) & BIT_MASK_BT_CMD_INFO) << BIT_SHIFT_BT_CMD_INFO)
-#define BIT_GET_BT_CMD_INFO(x) \
- (((x) >> BIT_SHIFT_BT_CMD_INFO) & BIT_MASK_BT_CMD_INFO)
-
-/* 2 REG_BT_INTERRUPT_CONTROL_REGISTER (Offset 0x0780) */
-
-#define BIT_EN_MAC_NULL_PKT_NOTIFY BIT(31)
-#define BIT_EN_WLAN_RPT_AND_BT_QUERY BIT(30)
-#define BIT_EN_BT_STSTUS_RPT BIT(29)
-#define BIT_EN_BT_POWER BIT(28)
-#define BIT_EN_BT_CHANNEL BIT(27)
-#define BIT_EN_BT_SLOT_CHANGE BIT(26)
-#define BIT_EN_BT_PROFILE_OR_HID BIT(25)
-#define BIT_WLAN_RPT_NOTIFY BIT(24)
-
-#define BIT_SHIFT_WLAN_RPT_DATA 16
-#define BIT_MASK_WLAN_RPT_DATA 0xff
-#define BIT_WLAN_RPT_DATA(x) \
- (((x) & BIT_MASK_WLAN_RPT_DATA) << BIT_SHIFT_WLAN_RPT_DATA)
-#define BIT_GET_WLAN_RPT_DATA(x) \
- (((x) >> BIT_SHIFT_WLAN_RPT_DATA) & BIT_MASK_WLAN_RPT_DATA)
-
-#define BIT_SHIFT_CMD_ID 8
-#define BIT_MASK_CMD_ID 0xff
-#define BIT_CMD_ID(x) (((x) & BIT_MASK_CMD_ID) << BIT_SHIFT_CMD_ID)
-#define BIT_GET_CMD_ID(x) (((x) >> BIT_SHIFT_CMD_ID) & BIT_MASK_CMD_ID)
-
-#define BIT_SHIFT_BT_DATA 0
-#define BIT_MASK_BT_DATA 0xff
-#define BIT_BT_DATA(x) (((x) & BIT_MASK_BT_DATA) << BIT_SHIFT_BT_DATA)
-#define BIT_GET_BT_DATA(x) (((x) >> BIT_SHIFT_BT_DATA) & BIT_MASK_BT_DATA)
-
-/* 2 REG_WLAN_REPORT_TIME_OUT_CONTROL_REGISTER (Offset 0x0784) */
-
-#define BIT_SHIFT_WLAN_RPT_TO 0
-#define BIT_MASK_WLAN_RPT_TO 0xff
-#define BIT_WLAN_RPT_TO(x) \
- (((x) & BIT_MASK_WLAN_RPT_TO) << BIT_SHIFT_WLAN_RPT_TO)
-#define BIT_GET_WLAN_RPT_TO(x) \
- (((x) >> BIT_SHIFT_WLAN_RPT_TO) & BIT_MASK_WLAN_RPT_TO)
-
-/* 2 REG_BT_ISOLATION_TABLE_REGISTER_REGISTER (Offset 0x0785) */
-
-#define BIT_SHIFT_ISOLATION_CHK 1
-#define BIT_MASK_ISOLATION_CHK 0x7fffffffffffffffffffL
-#define BIT_ISOLATION_CHK(x) \
- (((x) & BIT_MASK_ISOLATION_CHK) << BIT_SHIFT_ISOLATION_CHK)
-#define BIT_GET_ISOLATION_CHK(x) \
- (((x) >> BIT_SHIFT_ISOLATION_CHK) & BIT_MASK_ISOLATION_CHK)
-
-/* 2 REG_BT_ISOLATION_TABLE_REGISTER_REGISTER (Offset 0x0785) */
-
-#define BIT_ISOLATION_EN BIT(0)
-
-/* 2 REG_BT_INTERRUPT_STATUS_REGISTER (Offset 0x078F) */
-
-#define BIT_BT_HID_ISR BIT(7)
-#define BIT_BT_QUERY_ISR BIT(6)
-#define BIT_MAC_NULL_PKT_NOTIFY_ISR BIT(5)
-#define BIT_WLAN_RPT_ISR BIT(4)
-#define BIT_BT_POWER_ISR BIT(3)
-#define BIT_BT_CHANNEL_ISR BIT(2)
-#define BIT_BT_SLOT_CHANGE_ISR BIT(1)
-#define BIT_BT_PROFILE_ISR BIT(0)
-
-/* 2 REG_BT_TDMA_TIME_REGISTER (Offset 0x0790) */
-
-#define BIT_SHIFT_BT_TIME 6
-#define BIT_MASK_BT_TIME 0x3ffffff
-#define BIT_BT_TIME(x) (((x) & BIT_MASK_BT_TIME) << BIT_SHIFT_BT_TIME)
-#define BIT_GET_BT_TIME(x) (((x) >> BIT_SHIFT_BT_TIME) & BIT_MASK_BT_TIME)
-
-#define BIT_SHIFT_BT_RPT_SAMPLE_RATE 0
-#define BIT_MASK_BT_RPT_SAMPLE_RATE 0x3f
-#define BIT_BT_RPT_SAMPLE_RATE(x) \
- (((x) & BIT_MASK_BT_RPT_SAMPLE_RATE) << BIT_SHIFT_BT_RPT_SAMPLE_RATE)
-#define BIT_GET_BT_RPT_SAMPLE_RATE(x) \
- (((x) >> BIT_SHIFT_BT_RPT_SAMPLE_RATE) & BIT_MASK_BT_RPT_SAMPLE_RATE)
-
-/* 2 REG_BT_ACT_REGISTER (Offset 0x0794) */
-
-#define BIT_SHIFT_BT_EISR_EN 16
-#define BIT_MASK_BT_EISR_EN 0xff
-#define BIT_BT_EISR_EN(x) (((x) & BIT_MASK_BT_EISR_EN) << BIT_SHIFT_BT_EISR_EN)
-#define BIT_GET_BT_EISR_EN(x) \
- (((x) >> BIT_SHIFT_BT_EISR_EN) & BIT_MASK_BT_EISR_EN)
-
-#define BIT_BT_ACT_FALLING_ISR BIT(10)
-#define BIT_BT_ACT_RISING_ISR BIT(9)
-#define BIT_TDMA_TO_ISR BIT(8)
-
-#define BIT_SHIFT_BT_CH 0
-#define BIT_MASK_BT_CH 0xff
-#define BIT_BT_CH(x) (((x) & BIT_MASK_BT_CH) << BIT_SHIFT_BT_CH)
-#define BIT_GET_BT_CH(x) (((x) >> BIT_SHIFT_BT_CH) & BIT_MASK_BT_CH)
-
-/* 2 REG_OBFF_CTRL_BASIC (Offset 0x0798) */
-
-#define BIT_OBFF_EN_V1 BIT(31)
-
-#define BIT_SHIFT_OBFF_STATE_V1 28
-#define BIT_MASK_OBFF_STATE_V1 0x3
-#define BIT_OBFF_STATE_V1(x) \
- (((x) & BIT_MASK_OBFF_STATE_V1) << BIT_SHIFT_OBFF_STATE_V1)
-#define BIT_GET_OBFF_STATE_V1(x) \
- (((x) >> BIT_SHIFT_OBFF_STATE_V1) & BIT_MASK_OBFF_STATE_V1)
-
-#define BIT_OBFF_ACT_RXDMA_EN BIT(27)
-#define BIT_OBFF_BLOCK_INT_EN BIT(26)
-#define BIT_OBFF_AUTOACT_EN BIT(25)
-#define BIT_OBFF_AUTOIDLE_EN BIT(24)
-
-#define BIT_SHIFT_WAKE_MAX_PLS 20
-#define BIT_MASK_WAKE_MAX_PLS 0x7
-#define BIT_WAKE_MAX_PLS(x) \
- (((x) & BIT_MASK_WAKE_MAX_PLS) << BIT_SHIFT_WAKE_MAX_PLS)
-#define BIT_GET_WAKE_MAX_PLS(x) \
- (((x) >> BIT_SHIFT_WAKE_MAX_PLS) & BIT_MASK_WAKE_MAX_PLS)
-
-#define BIT_SHIFT_WAKE_MIN_PLS 16
-#define BIT_MASK_WAKE_MIN_PLS 0x7
-#define BIT_WAKE_MIN_PLS(x) \
- (((x) & BIT_MASK_WAKE_MIN_PLS) << BIT_SHIFT_WAKE_MIN_PLS)
-#define BIT_GET_WAKE_MIN_PLS(x) \
- (((x) >> BIT_SHIFT_WAKE_MIN_PLS) & BIT_MASK_WAKE_MIN_PLS)
-
-#define BIT_SHIFT_WAKE_MAX_F2F 12
-#define BIT_MASK_WAKE_MAX_F2F 0x7
-#define BIT_WAKE_MAX_F2F(x) \
- (((x) & BIT_MASK_WAKE_MAX_F2F) << BIT_SHIFT_WAKE_MAX_F2F)
-#define BIT_GET_WAKE_MAX_F2F(x) \
- (((x) >> BIT_SHIFT_WAKE_MAX_F2F) & BIT_MASK_WAKE_MAX_F2F)
-
-#define BIT_SHIFT_WAKE_MIN_F2F 8
-#define BIT_MASK_WAKE_MIN_F2F 0x7
-#define BIT_WAKE_MIN_F2F(x) \
- (((x) & BIT_MASK_WAKE_MIN_F2F) << BIT_SHIFT_WAKE_MIN_F2F)
-#define BIT_GET_WAKE_MIN_F2F(x) \
- (((x) >> BIT_SHIFT_WAKE_MIN_F2F) & BIT_MASK_WAKE_MIN_F2F)
-
-#define BIT_APP_CPU_ACT_V1 BIT(3)
-#define BIT_APP_OBFF_V1 BIT(2)
-#define BIT_APP_IDLE_V1 BIT(1)
-#define BIT_APP_INIT_V1 BIT(0)
-
-/* 2 REG_OBFF_CTRL2_TIMER (Offset 0x079C) */
-
-#define BIT_SHIFT_RX_HIGH_TIMER_IDX 24
-#define BIT_MASK_RX_HIGH_TIMER_IDX 0x7
-#define BIT_RX_HIGH_TIMER_IDX(x) \
- (((x) & BIT_MASK_RX_HIGH_TIMER_IDX) << BIT_SHIFT_RX_HIGH_TIMER_IDX)
-#define BIT_GET_RX_HIGH_TIMER_IDX(x) \
- (((x) >> BIT_SHIFT_RX_HIGH_TIMER_IDX) & BIT_MASK_RX_HIGH_TIMER_IDX)
-
-#define BIT_SHIFT_RX_MED_TIMER_IDX 16
-#define BIT_MASK_RX_MED_TIMER_IDX 0x7
-#define BIT_RX_MED_TIMER_IDX(x) \
- (((x) & BIT_MASK_RX_MED_TIMER_IDX) << BIT_SHIFT_RX_MED_TIMER_IDX)
-#define BIT_GET_RX_MED_TIMER_IDX(x) \
- (((x) >> BIT_SHIFT_RX_MED_TIMER_IDX) & BIT_MASK_RX_MED_TIMER_IDX)
-
-#define BIT_SHIFT_RX_LOW_TIMER_IDX 8
-#define BIT_MASK_RX_LOW_TIMER_IDX 0x7
-#define BIT_RX_LOW_TIMER_IDX(x) \
- (((x) & BIT_MASK_RX_LOW_TIMER_IDX) << BIT_SHIFT_RX_LOW_TIMER_IDX)
-#define BIT_GET_RX_LOW_TIMER_IDX(x) \
- (((x) >> BIT_SHIFT_RX_LOW_TIMER_IDX) & BIT_MASK_RX_LOW_TIMER_IDX)
-
-#define BIT_SHIFT_OBFF_INT_TIMER_IDX 0
-#define BIT_MASK_OBFF_INT_TIMER_IDX 0x7
-#define BIT_OBFF_INT_TIMER_IDX(x) \
- (((x) & BIT_MASK_OBFF_INT_TIMER_IDX) << BIT_SHIFT_OBFF_INT_TIMER_IDX)
-#define BIT_GET_OBFF_INT_TIMER_IDX(x) \
- (((x) >> BIT_SHIFT_OBFF_INT_TIMER_IDX) & BIT_MASK_OBFF_INT_TIMER_IDX)
-
-/* 2 REG_LTR_CTRL_BASIC (Offset 0x07A0) */
-
-#define BIT_LTR_EN_V1 BIT(31)
-#define BIT_LTR_HW_EN_V1 BIT(30)
-#define BIT_LRT_ACT_CTS_EN BIT(29)
-#define BIT_LTR_ACT_RXPKT_EN BIT(28)
-#define BIT_LTR_ACT_RXDMA_EN BIT(27)
-#define BIT_LTR_IDLE_NO_SNOOP BIT(26)
-#define BIT_SPDUP_MGTPKT BIT(25)
-#define BIT_RX_AGG_EN BIT(24)
-#define BIT_APP_LTR_ACT BIT(23)
-#define BIT_APP_LTR_IDLE BIT(22)
-
-#define BIT_SHIFT_HIGH_RATE_TRIG_SEL 20
-#define BIT_MASK_HIGH_RATE_TRIG_SEL 0x3
-#define BIT_HIGH_RATE_TRIG_SEL(x) \
- (((x) & BIT_MASK_HIGH_RATE_TRIG_SEL) << BIT_SHIFT_HIGH_RATE_TRIG_SEL)
-#define BIT_GET_HIGH_RATE_TRIG_SEL(x) \
- (((x) >> BIT_SHIFT_HIGH_RATE_TRIG_SEL) & BIT_MASK_HIGH_RATE_TRIG_SEL)
-
-#define BIT_SHIFT_MED_RATE_TRIG_SEL 18
-#define BIT_MASK_MED_RATE_TRIG_SEL 0x3
-#define BIT_MED_RATE_TRIG_SEL(x) \
- (((x) & BIT_MASK_MED_RATE_TRIG_SEL) << BIT_SHIFT_MED_RATE_TRIG_SEL)
-#define BIT_GET_MED_RATE_TRIG_SEL(x) \
- (((x) >> BIT_SHIFT_MED_RATE_TRIG_SEL) & BIT_MASK_MED_RATE_TRIG_SEL)
-
-#define BIT_SHIFT_LOW_RATE_TRIG_SEL 16
-#define BIT_MASK_LOW_RATE_TRIG_SEL 0x3
-#define BIT_LOW_RATE_TRIG_SEL(x) \
- (((x) & BIT_MASK_LOW_RATE_TRIG_SEL) << BIT_SHIFT_LOW_RATE_TRIG_SEL)
-#define BIT_GET_LOW_RATE_TRIG_SEL(x) \
- (((x) >> BIT_SHIFT_LOW_RATE_TRIG_SEL) & BIT_MASK_LOW_RATE_TRIG_SEL)
-
-#define BIT_SHIFT_HIGH_RATE_BD_IDX 8
-#define BIT_MASK_HIGH_RATE_BD_IDX 0x7f
-#define BIT_HIGH_RATE_BD_IDX(x) \
- (((x) & BIT_MASK_HIGH_RATE_BD_IDX) << BIT_SHIFT_HIGH_RATE_BD_IDX)
-#define BIT_GET_HIGH_RATE_BD_IDX(x) \
- (((x) >> BIT_SHIFT_HIGH_RATE_BD_IDX) & BIT_MASK_HIGH_RATE_BD_IDX)
-
-#define BIT_SHIFT_LOW_RATE_BD_IDX 0
-#define BIT_MASK_LOW_RATE_BD_IDX 0x7f
-#define BIT_LOW_RATE_BD_IDX(x) \
- (((x) & BIT_MASK_LOW_RATE_BD_IDX) << BIT_SHIFT_LOW_RATE_BD_IDX)
-#define BIT_GET_LOW_RATE_BD_IDX(x) \
- (((x) >> BIT_SHIFT_LOW_RATE_BD_IDX) & BIT_MASK_LOW_RATE_BD_IDX)
-
-/* 2 REG_LTR_CTRL2_TIMER_THRESHOLD (Offset 0x07A4) */
-
-#define BIT_SHIFT_RX_EMPTY_TIMER_IDX 24
-#define BIT_MASK_RX_EMPTY_TIMER_IDX 0x7
-#define BIT_RX_EMPTY_TIMER_IDX(x) \
- (((x) & BIT_MASK_RX_EMPTY_TIMER_IDX) << BIT_SHIFT_RX_EMPTY_TIMER_IDX)
-#define BIT_GET_RX_EMPTY_TIMER_IDX(x) \
- (((x) >> BIT_SHIFT_RX_EMPTY_TIMER_IDX) & BIT_MASK_RX_EMPTY_TIMER_IDX)
-
-#define BIT_SHIFT_RX_AFULL_TH_IDX 20
-#define BIT_MASK_RX_AFULL_TH_IDX 0x7
-#define BIT_RX_AFULL_TH_IDX(x) \
- (((x) & BIT_MASK_RX_AFULL_TH_IDX) << BIT_SHIFT_RX_AFULL_TH_IDX)
-#define BIT_GET_RX_AFULL_TH_IDX(x) \
- (((x) >> BIT_SHIFT_RX_AFULL_TH_IDX) & BIT_MASK_RX_AFULL_TH_IDX)
-
-#define BIT_SHIFT_RX_HIGH_TH_IDX 16
-#define BIT_MASK_RX_HIGH_TH_IDX 0x7
-#define BIT_RX_HIGH_TH_IDX(x) \
- (((x) & BIT_MASK_RX_HIGH_TH_IDX) << BIT_SHIFT_RX_HIGH_TH_IDX)
-#define BIT_GET_RX_HIGH_TH_IDX(x) \
- (((x) >> BIT_SHIFT_RX_HIGH_TH_IDX) & BIT_MASK_RX_HIGH_TH_IDX)
-
-#define BIT_SHIFT_RX_MED_TH_IDX 12
-#define BIT_MASK_RX_MED_TH_IDX 0x7
-#define BIT_RX_MED_TH_IDX(x) \
- (((x) & BIT_MASK_RX_MED_TH_IDX) << BIT_SHIFT_RX_MED_TH_IDX)
-#define BIT_GET_RX_MED_TH_IDX(x) \
- (((x) >> BIT_SHIFT_RX_MED_TH_IDX) & BIT_MASK_RX_MED_TH_IDX)
-
-#define BIT_SHIFT_RX_LOW_TH_IDX 8
-#define BIT_MASK_RX_LOW_TH_IDX 0x7
-#define BIT_RX_LOW_TH_IDX(x) \
- (((x) & BIT_MASK_RX_LOW_TH_IDX) << BIT_SHIFT_RX_LOW_TH_IDX)
-#define BIT_GET_RX_LOW_TH_IDX(x) \
- (((x) >> BIT_SHIFT_RX_LOW_TH_IDX) & BIT_MASK_RX_LOW_TH_IDX)
-
-#define BIT_SHIFT_LTR_SPACE_IDX 4
-#define BIT_MASK_LTR_SPACE_IDX 0x3
-#define BIT_LTR_SPACE_IDX(x) \
- (((x) & BIT_MASK_LTR_SPACE_IDX) << BIT_SHIFT_LTR_SPACE_IDX)
-#define BIT_GET_LTR_SPACE_IDX(x) \
- (((x) >> BIT_SHIFT_LTR_SPACE_IDX) & BIT_MASK_LTR_SPACE_IDX)
-
-#define BIT_SHIFT_LTR_IDLE_TIMER_IDX 0
-#define BIT_MASK_LTR_IDLE_TIMER_IDX 0x7
-#define BIT_LTR_IDLE_TIMER_IDX(x) \
- (((x) & BIT_MASK_LTR_IDLE_TIMER_IDX) << BIT_SHIFT_LTR_IDLE_TIMER_IDX)
-#define BIT_GET_LTR_IDLE_TIMER_IDX(x) \
- (((x) >> BIT_SHIFT_LTR_IDLE_TIMER_IDX) & BIT_MASK_LTR_IDLE_TIMER_IDX)
-
-/* 2 REG_LTR_IDLE_LATENCY_V1 (Offset 0x07A8) */
-
-#define BIT_SHIFT_LTR_IDLE_L 0
-#define BIT_MASK_LTR_IDLE_L 0xffffffffL
-#define BIT_LTR_IDLE_L(x) (((x) & BIT_MASK_LTR_IDLE_L) << BIT_SHIFT_LTR_IDLE_L)
-#define BIT_GET_LTR_IDLE_L(x) \
- (((x) >> BIT_SHIFT_LTR_IDLE_L) & BIT_MASK_LTR_IDLE_L)
-
-/* 2 REG_LTR_ACTIVE_LATENCY_V1 (Offset 0x07AC) */
-
-#define BIT_SHIFT_LTR_ACT_L 0
-#define BIT_MASK_LTR_ACT_L 0xffffffffL
-#define BIT_LTR_ACT_L(x) (((x) & BIT_MASK_LTR_ACT_L) << BIT_SHIFT_LTR_ACT_L)
-#define BIT_GET_LTR_ACT_L(x) (((x) >> BIT_SHIFT_LTR_ACT_L) & BIT_MASK_LTR_ACT_L)
-
-/* 2 REG_ANTENNA_TRAINING_CONTROL_REGISTER (Offset 0x07B0) */
-
-#define BIT_APPEND_MACID_IN_RESP_EN BIT(50)
-#define BIT_ADDR2_MATCH_EN BIT(49)
-#define BIT_ANTTRN_EN BIT(48)
-
-#define BIT_SHIFT_TRAIN_STA_ADDR 0
-#define BIT_MASK_TRAIN_STA_ADDR 0xffffffffffffL
-#define BIT_TRAIN_STA_ADDR(x) \
- (((x) & BIT_MASK_TRAIN_STA_ADDR) << BIT_SHIFT_TRAIN_STA_ADDR)
-#define BIT_GET_TRAIN_STA_ADDR(x) \
- (((x) >> BIT_SHIFT_TRAIN_STA_ADDR) & BIT_MASK_TRAIN_STA_ADDR)
-
-/* 2 REG_WMAC_PKTCNT_RWD (Offset 0x07B8) */
-
-#define BIT_SHIFT_PKTCNT_BSSIDMAP 4
-#define BIT_MASK_PKTCNT_BSSIDMAP 0xf
-#define BIT_PKTCNT_BSSIDMAP(x) \
- (((x) & BIT_MASK_PKTCNT_BSSIDMAP) << BIT_SHIFT_PKTCNT_BSSIDMAP)
-#define BIT_GET_PKTCNT_BSSIDMAP(x) \
- (((x) >> BIT_SHIFT_PKTCNT_BSSIDMAP) & BIT_MASK_PKTCNT_BSSIDMAP)
-
-#define BIT_PKTCNT_CNTRST BIT(1)
-#define BIT_PKTCNT_CNTEN BIT(0)
-
-/* 2 REG_WMAC_PKTCNT_CTRL (Offset 0x07BC) */
-
-#define BIT_WMAC_PKTCNT_TRST BIT(9)
-#define BIT_WMAC_PKTCNT_FEN BIT(8)
-
-#define BIT_SHIFT_WMAC_PKTCNT_CFGAD 0
-#define BIT_MASK_WMAC_PKTCNT_CFGAD 0xff
-#define BIT_WMAC_PKTCNT_CFGAD(x) \
- (((x) & BIT_MASK_WMAC_PKTCNT_CFGAD) << BIT_SHIFT_WMAC_PKTCNT_CFGAD)
-#define BIT_GET_WMAC_PKTCNT_CFGAD(x) \
- (((x) >> BIT_SHIFT_WMAC_PKTCNT_CFGAD) & BIT_MASK_WMAC_PKTCNT_CFGAD)
-
-/* 2 REG_IQ_DUMP (Offset 0x07C0) */
-
-#define BIT_SHIFT_R_WMAC_MATCH_REF_MAC (64 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_MATCH_REF_MAC 0xffffffffL
-#define BIT_R_WMAC_MATCH_REF_MAC(x) \
- (((x) & BIT_MASK_R_WMAC_MATCH_REF_MAC) \
- << BIT_SHIFT_R_WMAC_MATCH_REF_MAC)
-#define BIT_GET_R_WMAC_MATCH_REF_MAC(x) \
- (((x) >> BIT_SHIFT_R_WMAC_MATCH_REF_MAC) & \
- BIT_MASK_R_WMAC_MATCH_REF_MAC)
-
-#define BIT_SHIFT_R_WMAC_RX_FIL_LEN (64 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_RX_FIL_LEN 0xffff
-#define BIT_R_WMAC_RX_FIL_LEN(x) \
- (((x) & BIT_MASK_R_WMAC_RX_FIL_LEN) << BIT_SHIFT_R_WMAC_RX_FIL_LEN)
-#define BIT_GET_R_WMAC_RX_FIL_LEN(x) \
- (((x) >> BIT_SHIFT_R_WMAC_RX_FIL_LEN) & BIT_MASK_R_WMAC_RX_FIL_LEN)
-
-#define BIT_SHIFT_R_WMAC_RXFIFO_FULL_TH (56 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_RXFIFO_FULL_TH 0xff
-#define BIT_R_WMAC_RXFIFO_FULL_TH(x) \
- (((x) & BIT_MASK_R_WMAC_RXFIFO_FULL_TH) \
- << BIT_SHIFT_R_WMAC_RXFIFO_FULL_TH)
-#define BIT_GET_R_WMAC_RXFIFO_FULL_TH(x) \
- (((x) >> BIT_SHIFT_R_WMAC_RXFIFO_FULL_TH) & \
- BIT_MASK_R_WMAC_RXFIFO_FULL_TH)
-
-#define BIT_R_WMAC_SRCH_TXRPT_TYPE BIT(51)
-#define BIT_R_WMAC_NDP_RST BIT(50)
-#define BIT_R_WMAC_POWINT_EN BIT(49)
-#define BIT_R_WMAC_SRCH_TXRPT_PERPKT BIT(48)
-#define BIT_R_WMAC_SRCH_TXRPT_MID BIT(47)
-#define BIT_R_WMAC_PFIN_TOEN BIT(46)
-#define BIT_R_WMAC_FIL_SECERR BIT(45)
-#define BIT_R_WMAC_FIL_CTLPKTLEN BIT(44)
-#define BIT_R_WMAC_FIL_FCTYPE BIT(43)
-#define BIT_R_WMAC_FIL_FCPROVER BIT(42)
-#define BIT_R_WMAC_PHYSTS_SNIF BIT(41)
-#define BIT_R_WMAC_PHYSTS_PLCP BIT(40)
-#define BIT_R_MAC_TCR_VBONF_RD BIT(39)
-#define BIT_R_WMAC_TCR_MPAR_NDP BIT(38)
-#define BIT_R_WMAC_NDP_FILTER BIT(37)
-#define BIT_R_WMAC_RXLEN_SEL BIT(36)
-#define BIT_R_WMAC_RXLEN_SEL1 BIT(35)
-#define BIT_R_OFDM_FILTER BIT(34)
-#define BIT_R_WMAC_CHK_OFDM_LEN BIT(33)
-
-#define BIT_SHIFT_R_WMAC_MASK_LA_MAC (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_MASK_LA_MAC 0xffffffffL
-#define BIT_R_WMAC_MASK_LA_MAC(x) \
- (((x) & BIT_MASK_R_WMAC_MASK_LA_MAC) << BIT_SHIFT_R_WMAC_MASK_LA_MAC)
-#define BIT_GET_R_WMAC_MASK_LA_MAC(x) \
- (((x) >> BIT_SHIFT_R_WMAC_MASK_LA_MAC) & BIT_MASK_R_WMAC_MASK_LA_MAC)
-
-#define BIT_R_WMAC_CHK_CCK_LEN BIT(32)
-
-/* 2 REG_IQ_DUMP (Offset 0x07C0) */
-
-#define BIT_SHIFT_R_OFDM_LEN 26
-#define BIT_MASK_R_OFDM_LEN 0x3f
-#define BIT_R_OFDM_LEN(x) (((x) & BIT_MASK_R_OFDM_LEN) << BIT_SHIFT_R_OFDM_LEN)
-#define BIT_GET_R_OFDM_LEN(x) \
- (((x) >> BIT_SHIFT_R_OFDM_LEN) & BIT_MASK_R_OFDM_LEN)
-
-#define BIT_SHIFT_DUMP_OK_ADDR 15
-#define BIT_MASK_DUMP_OK_ADDR 0x1ffff
-#define BIT_DUMP_OK_ADDR(x) \
- (((x) & BIT_MASK_DUMP_OK_ADDR) << BIT_SHIFT_DUMP_OK_ADDR)
-#define BIT_GET_DUMP_OK_ADDR(x) \
- (((x) >> BIT_SHIFT_DUMP_OK_ADDR) & BIT_MASK_DUMP_OK_ADDR)
-
-#define BIT_SHIFT_R_TRIG_TIME_SEL 8
-#define BIT_MASK_R_TRIG_TIME_SEL 0x7f
-#define BIT_R_TRIG_TIME_SEL(x) \
- (((x) & BIT_MASK_R_TRIG_TIME_SEL) << BIT_SHIFT_R_TRIG_TIME_SEL)
-#define BIT_GET_R_TRIG_TIME_SEL(x) \
- (((x) >> BIT_SHIFT_R_TRIG_TIME_SEL) & BIT_MASK_R_TRIG_TIME_SEL)
-
-#define BIT_SHIFT_R_MAC_TRIG_SEL 6
-#define BIT_MASK_R_MAC_TRIG_SEL 0x3
-#define BIT_R_MAC_TRIG_SEL(x) \
- (((x) & BIT_MASK_R_MAC_TRIG_SEL) << BIT_SHIFT_R_MAC_TRIG_SEL)
-#define BIT_GET_R_MAC_TRIG_SEL(x) \
- (((x) >> BIT_SHIFT_R_MAC_TRIG_SEL) & BIT_MASK_R_MAC_TRIG_SEL)
-
-#define BIT_MAC_TRIG_REG BIT(5)
-
-#define BIT_SHIFT_R_LEVEL_PULSE_SEL 3
-#define BIT_MASK_R_LEVEL_PULSE_SEL 0x3
-#define BIT_R_LEVEL_PULSE_SEL(x) \
- (((x) & BIT_MASK_R_LEVEL_PULSE_SEL) << BIT_SHIFT_R_LEVEL_PULSE_SEL)
-#define BIT_GET_R_LEVEL_PULSE_SEL(x) \
- (((x) >> BIT_SHIFT_R_LEVEL_PULSE_SEL) & BIT_MASK_R_LEVEL_PULSE_SEL)
-
-#define BIT_EN_LA_MAC BIT(2)
-#define BIT_R_EN_IQDUMP BIT(1)
-#define BIT_R_IQDATA_DUMP BIT(0)
-
-#define BIT_SHIFT_R_CCK_LEN 0
-#define BIT_MASK_R_CCK_LEN 0xffff
-#define BIT_R_CCK_LEN(x) (((x) & BIT_MASK_R_CCK_LEN) << BIT_SHIFT_R_CCK_LEN)
-#define BIT_GET_R_CCK_LEN(x) (((x) >> BIT_SHIFT_R_CCK_LEN) & BIT_MASK_R_CCK_LEN)
-
-/* 2 REG_WMAC_FTM_CTL (Offset 0x07CC) */
-
-#define BIT_RXFTM_TXACK_SC BIT(6)
-#define BIT_RXFTM_TXACK_BW BIT(5)
-#define BIT_RXFTM_EN BIT(3)
-#define BIT_RXFTMREQ_BYDRV BIT(2)
-#define BIT_RXFTMREQ_EN BIT(1)
-#define BIT_FTM_EN BIT(0)
-
-/* 2 REG_RX_FILTER_FUNCTION (Offset 0x07DA) */
-
-#define BIT_R_WMAC_MHRDDY_LATCH BIT(14)
-
-/* 2 REG_RX_FILTER_FUNCTION (Offset 0x07DA) */
-
-#define BIT_R_WMAC_MHRDDY_CLR BIT(13)
-
-/* 2 REG_RX_FILTER_FUNCTION (Offset 0x07DA) */
-
-#define BIT_R_RXPKTCTL_FSM_BASED_MPDURDY1 BIT(12)
-
-/* 2 REG_RX_FILTER_FUNCTION (Offset 0x07DA) */
-
-#define BIT_WMAC_DIS_VHT_PLCP_CHK_MU BIT(11)
-
-/* 2 REG_RX_FILTER_FUNCTION (Offset 0x07DA) */
-
-#define BIT_R_CHK_DELIMIT_LEN BIT(10)
-#define BIT_R_REAPTER_ADDR_MATCH BIT(9)
-#define BIT_R_RXPKTCTL_FSM_BASED_MPDURDY BIT(8)
-#define BIT_R_LATCH_MACHRDY BIT(7)
-#define BIT_R_WMAC_RXFIL_REND BIT(6)
-#define BIT_R_WMAC_MPDURDY_CLR BIT(5)
-#define BIT_R_WMAC_CLRRXSEC BIT(4)
-#define BIT_R_WMAC_RXFIL_RDEL BIT(3)
-#define BIT_R_WMAC_RXFIL_FCSE BIT(2)
-#define BIT_R_WMAC_RXFIL_MESH_DEL BIT(1)
-#define BIT_R_WMAC_RXFIL_MASKM BIT(0)
-
-/* 2 REG_NDP_SIG (Offset 0x07E0) */
-
-#define BIT_SHIFT_R_WMAC_TXNDP_SIGB 0
-#define BIT_MASK_R_WMAC_TXNDP_SIGB 0x1fffff
-#define BIT_R_WMAC_TXNDP_SIGB(x) \
- (((x) & BIT_MASK_R_WMAC_TXNDP_SIGB) << BIT_SHIFT_R_WMAC_TXNDP_SIGB)
-#define BIT_GET_R_WMAC_TXNDP_SIGB(x) \
- (((x) >> BIT_SHIFT_R_WMAC_TXNDP_SIGB) & BIT_MASK_R_WMAC_TXNDP_SIGB)
-
-/* 2 REG_TXCMD_INFO_FOR_RSP_PKT (Offset 0x07E4) */
-
-#define BIT_SHIFT_R_MAC_DEBUG (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_MAC_DEBUG 0xffffffffL
-#define BIT_R_MAC_DEBUG(x) \
- (((x) & BIT_MASK_R_MAC_DEBUG) << BIT_SHIFT_R_MAC_DEBUG)
-#define BIT_GET_R_MAC_DEBUG(x) \
- (((x) >> BIT_SHIFT_R_MAC_DEBUG) & BIT_MASK_R_MAC_DEBUG)
-
-/* 2 REG_TXCMD_INFO_FOR_RSP_PKT (Offset 0x07E4) */
-
-#define BIT_SHIFT_R_MAC_DBG_SHIFT 8
-#define BIT_MASK_R_MAC_DBG_SHIFT 0x7
-#define BIT_R_MAC_DBG_SHIFT(x) \
- (((x) & BIT_MASK_R_MAC_DBG_SHIFT) << BIT_SHIFT_R_MAC_DBG_SHIFT)
-#define BIT_GET_R_MAC_DBG_SHIFT(x) \
- (((x) >> BIT_SHIFT_R_MAC_DBG_SHIFT) & BIT_MASK_R_MAC_DBG_SHIFT)
-
-#define BIT_SHIFT_R_MAC_DBG_SEL 0
-#define BIT_MASK_R_MAC_DBG_SEL 0x3
-#define BIT_R_MAC_DBG_SEL(x) \
- (((x) & BIT_MASK_R_MAC_DBG_SEL) << BIT_SHIFT_R_MAC_DBG_SEL)
-#define BIT_GET_R_MAC_DBG_SEL(x) \
- (((x) >> BIT_SHIFT_R_MAC_DBG_SEL) & BIT_MASK_R_MAC_DBG_SEL)
-
-/* 2 REG_SYS_CFG3 (Offset 0x1000) */
-
-#define BIT_PWC_MA33V BIT(15)
-
-/* 2 REG_SYS_CFG3 (Offset 0x1000) */
-
-#define BIT_PWC_MA12V BIT(14)
-#define BIT_PWC_MD12V BIT(13)
-#define BIT_PWC_PD12V BIT(12)
-#define BIT_PWC_UD12V BIT(11)
-#define BIT_ISO_MA2MD BIT(1)
-
-/* 2 REG_SYS_CFG5 (Offset 0x1070) */
-
-#define BIT_LPS_STATUS BIT(3)
-#define BIT_HCI_TXDMA_BUSY BIT(2)
-#define BIT_HCI_TXDMA_ALLOW BIT(1)
-#define BIT_FW_CTRL_HCI_TXDMA_EN BIT(0)
-
-/* 2 REG_CPU_DMEM_CON (Offset 0x1080) */
-
-#define BIT_WDT_OPT_IOWRAPPER BIT(19)
-
-/* 2 REG_CPU_DMEM_CON (Offset 0x1080) */
-
-#define BIT_ANA_PORT_IDLE BIT(18)
-#define BIT_MAC_PORT_IDLE BIT(17)
-#define BIT_WL_PLATFORM_RST BIT(16)
-#define BIT_WL_SECURITY_CLK BIT(15)
-
-/* 2 REG_CPU_DMEM_CON (Offset 0x1080) */
-
-#define BIT_SHIFT_CPU_DMEM_CON 0
-#define BIT_MASK_CPU_DMEM_CON 0xff
-#define BIT_CPU_DMEM_CON(x) \
- (((x) & BIT_MASK_CPU_DMEM_CON) << BIT_SHIFT_CPU_DMEM_CON)
-#define BIT_GET_CPU_DMEM_CON(x) \
- (((x) >> BIT_SHIFT_CPU_DMEM_CON) & BIT_MASK_CPU_DMEM_CON)
-
-/* 2 REG_BOOT_REASON (Offset 0x1088) */
-
-#define BIT_SHIFT_BOOT_REASON 0
-#define BIT_MASK_BOOT_REASON 0x7
-#define BIT_BOOT_REASON(x) \
- (((x) & BIT_MASK_BOOT_REASON) << BIT_SHIFT_BOOT_REASON)
-#define BIT_GET_BOOT_REASON(x) \
- (((x) >> BIT_SHIFT_BOOT_REASON) & BIT_MASK_BOOT_REASON)
-
-/* 2 REG_NFCPAD_CTRL (Offset 0x10A8) */
-
-#define BIT_PAD_SHUTDW BIT(18)
-#define BIT_SYSON_NFC_PAD BIT(17)
-#define BIT_NFC_INT_PAD_CTRL BIT(16)
-#define BIT_NFC_RFDIS_PAD_CTRL BIT(15)
-#define BIT_NFC_CLK_PAD_CTRL BIT(14)
-#define BIT_NFC_DATA_PAD_CTRL BIT(13)
-#define BIT_NFC_PAD_PULL_CTRL BIT(12)
-
-#define BIT_SHIFT_NFCPAD_IO_SEL 8
-#define BIT_MASK_NFCPAD_IO_SEL 0xf
-#define BIT_NFCPAD_IO_SEL(x) \
- (((x) & BIT_MASK_NFCPAD_IO_SEL) << BIT_SHIFT_NFCPAD_IO_SEL)
-#define BIT_GET_NFCPAD_IO_SEL(x) \
- (((x) >> BIT_SHIFT_NFCPAD_IO_SEL) & BIT_MASK_NFCPAD_IO_SEL)
-
-#define BIT_SHIFT_NFCPAD_OUT 4
-#define BIT_MASK_NFCPAD_OUT 0xf
-#define BIT_NFCPAD_OUT(x) (((x) & BIT_MASK_NFCPAD_OUT) << BIT_SHIFT_NFCPAD_OUT)
-#define BIT_GET_NFCPAD_OUT(x) \
- (((x) >> BIT_SHIFT_NFCPAD_OUT) & BIT_MASK_NFCPAD_OUT)
-
-#define BIT_SHIFT_NFCPAD_IN 0
-#define BIT_MASK_NFCPAD_IN 0xf
-#define BIT_NFCPAD_IN(x) (((x) & BIT_MASK_NFCPAD_IN) << BIT_SHIFT_NFCPAD_IN)
-#define BIT_GET_NFCPAD_IN(x) (((x) >> BIT_SHIFT_NFCPAD_IN) & BIT_MASK_NFCPAD_IN)
-
-/* 2 REG_HIMR2 (Offset 0x10B0) */
-
-#define BIT_BCNDMAINT_P4_MSK BIT(31)
-#define BIT_BCNDMAINT_P3_MSK BIT(30)
-#define BIT_BCNDMAINT_P2_MSK BIT(29)
-#define BIT_BCNDMAINT_P1_MSK BIT(28)
-#define BIT_ATIMEND7_MSK BIT(22)
-#define BIT_ATIMEND6_MSK BIT(21)
-#define BIT_ATIMEND5_MSK BIT(20)
-#define BIT_ATIMEND4_MSK BIT(19)
-#define BIT_ATIMEND3_MSK BIT(18)
-#define BIT_ATIMEND2_MSK BIT(17)
-#define BIT_ATIMEND1_MSK BIT(16)
-#define BIT_TXBCN7OK_MSK BIT(14)
-#define BIT_TXBCN6OK_MSK BIT(13)
-#define BIT_TXBCN5OK_MSK BIT(12)
-#define BIT_TXBCN4OK_MSK BIT(11)
-#define BIT_TXBCN3OK_MSK BIT(10)
-#define BIT_TXBCN2OK_MSK BIT(9)
-#define BIT_TXBCN1OK_MSK_V1 BIT(8)
-#define BIT_TXBCN7ERR_MSK BIT(6)
-#define BIT_TXBCN6ERR_MSK BIT(5)
-#define BIT_TXBCN5ERR_MSK BIT(4)
-#define BIT_TXBCN4ERR_MSK BIT(3)
-#define BIT_TXBCN3ERR_MSK BIT(2)
-#define BIT_TXBCN2ERR_MSK BIT(1)
-#define BIT_TXBCN1ERR_MSK_V1 BIT(0)
-
-/* 2 REG_HISR2 (Offset 0x10B4) */
-
-#define BIT_BCNDMAINT_P4 BIT(31)
-#define BIT_BCNDMAINT_P3 BIT(30)
-#define BIT_BCNDMAINT_P2 BIT(29)
-#define BIT_BCNDMAINT_P1 BIT(28)
-#define BIT_ATIMEND7 BIT(22)
-#define BIT_ATIMEND6 BIT(21)
-#define BIT_ATIMEND5 BIT(20)
-#define BIT_ATIMEND4 BIT(19)
-#define BIT_ATIMEND3 BIT(18)
-#define BIT_ATIMEND2 BIT(17)
-#define BIT_ATIMEND1 BIT(16)
-#define BIT_TXBCN7OK BIT(14)
-#define BIT_TXBCN6OK BIT(13)
-#define BIT_TXBCN5OK BIT(12)
-#define BIT_TXBCN4OK BIT(11)
-#define BIT_TXBCN3OK BIT(10)
-#define BIT_TXBCN2OK BIT(9)
-#define BIT_TXBCN1OK BIT(8)
-#define BIT_TXBCN7ERR BIT(6)
-#define BIT_TXBCN6ERR BIT(5)
-#define BIT_TXBCN5ERR BIT(4)
-#define BIT_TXBCN4ERR BIT(3)
-#define BIT_TXBCN3ERR BIT(2)
-#define BIT_TXBCN2ERR BIT(1)
-#define BIT_TXBCN1ERR BIT(0)
-
-/* 2 REG_HIMR3 (Offset 0x10B8) */
-
-#define BIT_WDT_PLATFORM_INT_MSK BIT(18)
-#define BIT_WDT_CPU_INT_MSK BIT(17)
-
-/* 2 REG_HIMR3 (Offset 0x10B8) */
-
-#define BIT_SETH2CDOK_MASK BIT(16)
-#define BIT_H2C_CMD_FULL_MASK BIT(15)
-#define BIT_PWR_INT_127_MASK BIT(14)
-#define BIT_TXSHORTCUT_TXDESUPDATEOK_MASK BIT(13)
-#define BIT_TXSHORTCUT_BKUPDATEOK_MASK BIT(12)
-#define BIT_TXSHORTCUT_BEUPDATEOK_MASK BIT(11)
-#define BIT_TXSHORTCUT_VIUPDATEOK_MAS BIT(10)
-#define BIT_TXSHORTCUT_VOUPDATEOK_MASK BIT(9)
-#define BIT_PWR_INT_127_MASK_V1 BIT(8)
-#define BIT_PWR_INT_126TO96_MASK BIT(7)
-#define BIT_PWR_INT_95TO64_MASK BIT(6)
-#define BIT_PWR_INT_63TO32_MASK BIT(5)
-#define BIT_PWR_INT_31TO0_MASK BIT(4)
-#define BIT_DDMA0_LP_INT_MSK BIT(1)
-#define BIT_DDMA0_HP_INT_MSK BIT(0)
-
-/* 2 REG_HISR3 (Offset 0x10BC) */
-
-#define BIT_WDT_PLATFORM_INT BIT(18)
-#define BIT_WDT_CPU_INT BIT(17)
-
-/* 2 REG_HISR3 (Offset 0x10BC) */
-
-#define BIT_SETH2CDOK BIT(16)
-#define BIT_H2C_CMD_FULL BIT(15)
-#define BIT_PWR_INT_127 BIT(14)
-#define BIT_TXSHORTCUT_TXDESUPDATEOK BIT(13)
-#define BIT_TXSHORTCUT_BKUPDATEOK BIT(12)
-#define BIT_TXSHORTCUT_BEUPDATEOK BIT(11)
-#define BIT_TXSHORTCUT_VIUPDATEOK BIT(10)
-#define BIT_TXSHORTCUT_VOUPDATEOK BIT(9)
-#define BIT_PWR_INT_127_V1 BIT(8)
-#define BIT_PWR_INT_126TO96 BIT(7)
-#define BIT_PWR_INT_95TO64 BIT(6)
-#define BIT_PWR_INT_63TO32 BIT(5)
-#define BIT_PWR_INT_31TO0 BIT(4)
-#define BIT_DDMA0_LP_INT BIT(1)
-#define BIT_DDMA0_HP_INT BIT(0)
-
-/* 2 REG_SW_MDIO (Offset 0x10C0) */
-
-#define BIT_DIS_TIMEOUT_IO BIT(24)
-
-/* 2 REG_SW_FLUSH (Offset 0x10C4) */
-
-#define BIT_FLUSH_HOLDN_EN BIT(25)
-#define BIT_FLUSH_WR_EN BIT(24)
-#define BIT_SW_FLASH_CONTROL BIT(23)
-#define BIT_SW_FLASH_WEN_E BIT(19)
-#define BIT_SW_FLASH_HOLDN_E BIT(18)
-#define BIT_SW_FLASH_SO_E BIT(17)
-#define BIT_SW_FLASH_SI_E BIT(16)
-#define BIT_SW_FLASH_SK_O BIT(13)
-#define BIT_SW_FLASH_CEN_O BIT(12)
-#define BIT_SW_FLASH_WEN_O BIT(11)
-#define BIT_SW_FLASH_HOLDN_O BIT(10)
-#define BIT_SW_FLASH_SO_O BIT(9)
-#define BIT_SW_FLASH_SI_O BIT(8)
-#define BIT_SW_FLASH_WEN_I BIT(3)
-#define BIT_SW_FLASH_HOLDN_I BIT(2)
-#define BIT_SW_FLASH_SO_I BIT(1)
-#define BIT_SW_FLASH_SI_I BIT(0)
-
-/* 2 REG_H2C_PKT_READADDR (Offset 0x10D0) */
-
-#define BIT_SHIFT_H2C_PKT_READADDR 0
-#define BIT_MASK_H2C_PKT_READADDR 0x3ffff
-#define BIT_H2C_PKT_READADDR(x) \
- (((x) & BIT_MASK_H2C_PKT_READADDR) << BIT_SHIFT_H2C_PKT_READADDR)
-#define BIT_GET_H2C_PKT_READADDR(x) \
- (((x) >> BIT_SHIFT_H2C_PKT_READADDR) & BIT_MASK_H2C_PKT_READADDR)
-
-/* 2 REG_H2C_PKT_WRITEADDR (Offset 0x10D4) */
-
-#define BIT_SHIFT_H2C_PKT_WRITEADDR 0
-#define BIT_MASK_H2C_PKT_WRITEADDR 0x3ffff
-#define BIT_H2C_PKT_WRITEADDR(x) \
- (((x) & BIT_MASK_H2C_PKT_WRITEADDR) << BIT_SHIFT_H2C_PKT_WRITEADDR)
-#define BIT_GET_H2C_PKT_WRITEADDR(x) \
- (((x) >> BIT_SHIFT_H2C_PKT_WRITEADDR) & BIT_MASK_H2C_PKT_WRITEADDR)
-
-/* 2 REG_MEM_PWR_CRTL (Offset 0x10D8) */
-
-#define BIT_MEM_BB_SD BIT(17)
-#define BIT_MEM_BB_DS BIT(16)
-#define BIT_MEM_BT_DS BIT(10)
-#define BIT_MEM_SDIO_LS BIT(9)
-#define BIT_MEM_SDIO_DS BIT(8)
-#define BIT_MEM_USB_LS BIT(7)
-#define BIT_MEM_USB_DS BIT(6)
-#define BIT_MEM_PCI_LS BIT(5)
-#define BIT_MEM_PCI_DS BIT(4)
-#define BIT_MEM_WLMAC_LS BIT(3)
-#define BIT_MEM_WLMAC_DS BIT(2)
-#define BIT_MEM_WLMCU_LS BIT(1)
-
-/* 2 REG_MEM_PWR_CRTL (Offset 0x10D8) */
-
-#define BIT_MEM_WLMCU_DS BIT(0)
-
-/* 2 REG_FW_DBG0 (Offset 0x10E0) */
-
-#define BIT_SHIFT_FW_DBG0 0
-#define BIT_MASK_FW_DBG0 0xffffffffL
-#define BIT_FW_DBG0(x) (((x) & BIT_MASK_FW_DBG0) << BIT_SHIFT_FW_DBG0)
-#define BIT_GET_FW_DBG0(x) (((x) >> BIT_SHIFT_FW_DBG0) & BIT_MASK_FW_DBG0)
-
-/* 2 REG_FW_DBG1 (Offset 0x10E4) */
-
-#define BIT_SHIFT_FW_DBG1 0
-#define BIT_MASK_FW_DBG1 0xffffffffL
-#define BIT_FW_DBG1(x) (((x) & BIT_MASK_FW_DBG1) << BIT_SHIFT_FW_DBG1)
-#define BIT_GET_FW_DBG1(x) (((x) >> BIT_SHIFT_FW_DBG1) & BIT_MASK_FW_DBG1)
-
-/* 2 REG_FW_DBG2 (Offset 0x10E8) */
-
-#define BIT_SHIFT_FW_DBG2 0
-#define BIT_MASK_FW_DBG2 0xffffffffL
-#define BIT_FW_DBG2(x) (((x) & BIT_MASK_FW_DBG2) << BIT_SHIFT_FW_DBG2)
-#define BIT_GET_FW_DBG2(x) (((x) >> BIT_SHIFT_FW_DBG2) & BIT_MASK_FW_DBG2)
-
-/* 2 REG_FW_DBG3 (Offset 0x10EC) */
-
-#define BIT_SHIFT_FW_DBG3 0
-#define BIT_MASK_FW_DBG3 0xffffffffL
-#define BIT_FW_DBG3(x) (((x) & BIT_MASK_FW_DBG3) << BIT_SHIFT_FW_DBG3)
-#define BIT_GET_FW_DBG3(x) (((x) >> BIT_SHIFT_FW_DBG3) & BIT_MASK_FW_DBG3)
-
-/* 2 REG_FW_DBG4 (Offset 0x10F0) */
-
-#define BIT_SHIFT_FW_DBG4 0
-#define BIT_MASK_FW_DBG4 0xffffffffL
-#define BIT_FW_DBG4(x) (((x) & BIT_MASK_FW_DBG4) << BIT_SHIFT_FW_DBG4)
-#define BIT_GET_FW_DBG4(x) (((x) >> BIT_SHIFT_FW_DBG4) & BIT_MASK_FW_DBG4)
-
-/* 2 REG_FW_DBG5 (Offset 0x10F4) */
-
-#define BIT_SHIFT_FW_DBG5 0
-#define BIT_MASK_FW_DBG5 0xffffffffL
-#define BIT_FW_DBG5(x) (((x) & BIT_MASK_FW_DBG5) << BIT_SHIFT_FW_DBG5)
-#define BIT_GET_FW_DBG5(x) (((x) >> BIT_SHIFT_FW_DBG5) & BIT_MASK_FW_DBG5)
-
-/* 2 REG_FW_DBG6 (Offset 0x10F8) */
-
-#define BIT_SHIFT_FW_DBG6 0
-#define BIT_MASK_FW_DBG6 0xffffffffL
-#define BIT_FW_DBG6(x) (((x) & BIT_MASK_FW_DBG6) << BIT_SHIFT_FW_DBG6)
-#define BIT_GET_FW_DBG6(x) (((x) >> BIT_SHIFT_FW_DBG6) & BIT_MASK_FW_DBG6)
-
-/* 2 REG_FW_DBG7 (Offset 0x10FC) */
-
-#define BIT_SHIFT_FW_DBG7 0
-#define BIT_MASK_FW_DBG7 0xffffffffL
-#define BIT_FW_DBG7(x) (((x) & BIT_MASK_FW_DBG7) << BIT_SHIFT_FW_DBG7)
-#define BIT_GET_FW_DBG7(x) (((x) >> BIT_SHIFT_FW_DBG7) & BIT_MASK_FW_DBG7)
-
-/* 2 REG_CR_EXT (Offset 0x1100) */
-
-#define BIT_SHIFT_PHY_REQ_DELAY 24
-#define BIT_MASK_PHY_REQ_DELAY 0xf
-#define BIT_PHY_REQ_DELAY(x) \
- (((x) & BIT_MASK_PHY_REQ_DELAY) << BIT_SHIFT_PHY_REQ_DELAY)
-#define BIT_GET_PHY_REQ_DELAY(x) \
- (((x) >> BIT_SHIFT_PHY_REQ_DELAY) & BIT_MASK_PHY_REQ_DELAY)
-
-#define BIT_SPD_DOWN BIT(16)
-
-#define BIT_SHIFT_NETYPE4 4
-#define BIT_MASK_NETYPE4 0x3
-#define BIT_NETYPE4(x) (((x) & BIT_MASK_NETYPE4) << BIT_SHIFT_NETYPE4)
-#define BIT_GET_NETYPE4(x) (((x) >> BIT_SHIFT_NETYPE4) & BIT_MASK_NETYPE4)
-
-#define BIT_SHIFT_NETYPE3 2
-#define BIT_MASK_NETYPE3 0x3
-#define BIT_NETYPE3(x) (((x) & BIT_MASK_NETYPE3) << BIT_SHIFT_NETYPE3)
-#define BIT_GET_NETYPE3(x) (((x) >> BIT_SHIFT_NETYPE3) & BIT_MASK_NETYPE3)
-
-#define BIT_SHIFT_NETYPE2 0
-#define BIT_MASK_NETYPE2 0x3
-#define BIT_NETYPE2(x) (((x) & BIT_MASK_NETYPE2) << BIT_SHIFT_NETYPE2)
-#define BIT_GET_NETYPE2(x) (((x) >> BIT_SHIFT_NETYPE2) & BIT_MASK_NETYPE2)
-
-/* 2 REG_FWFF (Offset 0x1114) */
-
-#define BIT_SHIFT_PKTNUM_TH_V1 24
-#define BIT_MASK_PKTNUM_TH_V1 0xff
-#define BIT_PKTNUM_TH_V1(x) \
- (((x) & BIT_MASK_PKTNUM_TH_V1) << BIT_SHIFT_PKTNUM_TH_V1)
-#define BIT_GET_PKTNUM_TH_V1(x) \
- (((x) >> BIT_SHIFT_PKTNUM_TH_V1) & BIT_MASK_PKTNUM_TH_V1)
-
-/* 2 REG_FWFF (Offset 0x1114) */
-
-#define BIT_SHIFT_TIMER_TH 16
-#define BIT_MASK_TIMER_TH 0xff
-#define BIT_TIMER_TH(x) (((x) & BIT_MASK_TIMER_TH) << BIT_SHIFT_TIMER_TH)
-#define BIT_GET_TIMER_TH(x) (((x) >> BIT_SHIFT_TIMER_TH) & BIT_MASK_TIMER_TH)
-
-/* 2 REG_FWFF (Offset 0x1114) */
-
-#define BIT_SHIFT_RXPKT1ENADDR 0
-#define BIT_MASK_RXPKT1ENADDR 0xffff
-#define BIT_RXPKT1ENADDR(x) \
- (((x) & BIT_MASK_RXPKT1ENADDR) << BIT_SHIFT_RXPKT1ENADDR)
-#define BIT_GET_RXPKT1ENADDR(x) \
- (((x) >> BIT_SHIFT_RXPKT1ENADDR) & BIT_MASK_RXPKT1ENADDR)
-
-/* 2 REG_FE2IMR (Offset 0x1120) */
-
-#define BIT__FE4ISR__IND_MSK BIT(29)
-
-/* 2 REG_FE2IMR (Offset 0x1120) */
-
-#define BIT_FS_TXSC_DESC_DONE_INT_EN BIT(28)
-#define BIT_FS_TXSC_BKDONE_INT_EN BIT(27)
-#define BIT_FS_TXSC_BEDONE_INT_EN BIT(26)
-#define BIT_FS_TXSC_VIDONE_INT_EN BIT(25)
-#define BIT_FS_TXSC_VODONE_INT_EN BIT(24)
-
-/* 2 REG_FE2IMR (Offset 0x1120) */
-
-#define BIT_FS_ATIM_MB7_INT_EN BIT(23)
-#define BIT_FS_ATIM_MB6_INT_EN BIT(22)
-#define BIT_FS_ATIM_MB5_INT_EN BIT(21)
-#define BIT_FS_ATIM_MB4_INT_EN BIT(20)
-#define BIT_FS_ATIM_MB3_INT_EN BIT(19)
-#define BIT_FS_ATIM_MB2_INT_EN BIT(18)
-#define BIT_FS_ATIM_MB1_INT_EN BIT(17)
-#define BIT_FS_ATIM_MB0_INT_EN BIT(16)
-#define BIT_FS_TBTT4INT_EN BIT(11)
-#define BIT_FS_TBTT3INT_EN BIT(10)
-#define BIT_FS_TBTT2INT_EN BIT(9)
-#define BIT_FS_TBTT1INT_EN BIT(8)
-#define BIT_FS_TBTT0_MB7INT_EN BIT(7)
-#define BIT_FS_TBTT0_MB6INT_EN BIT(6)
-#define BIT_FS_TBTT0_MB5INT_EN BIT(5)
-#define BIT_FS_TBTT0_MB4INT_EN BIT(4)
-#define BIT_FS_TBTT0_MB3INT_EN BIT(3)
-#define BIT_FS_TBTT0_MB2INT_EN BIT(2)
-#define BIT_FS_TBTT0_MB1INT_EN BIT(1)
-#define BIT_FS_TBTT0_INT_EN BIT(0)
-
-/* 2 REG_FE2ISR (Offset 0x1124) */
-
-#define BIT__FE4ISR__IND_INT BIT(29)
-
-/* 2 REG_FE2ISR (Offset 0x1124) */
-
-#define BIT_FS_TXSC_DESC_DONE_INT BIT(28)
-#define BIT_FS_TXSC_BKDONE_INT BIT(27)
-#define BIT_FS_TXSC_BEDONE_INT BIT(26)
-#define BIT_FS_TXSC_VIDONE_INT BIT(25)
-#define BIT_FS_TXSC_VODONE_INT BIT(24)
-
-/* 2 REG_FE2ISR (Offset 0x1124) */
-
-#define BIT_FS_ATIM_MB7_INT BIT(23)
-#define BIT_FS_ATIM_MB6_INT BIT(22)
-#define BIT_FS_ATIM_MB5_INT BIT(21)
-#define BIT_FS_ATIM_MB4_INT BIT(20)
-#define BIT_FS_ATIM_MB3_INT BIT(19)
-#define BIT_FS_ATIM_MB2_INT BIT(18)
-#define BIT_FS_ATIM_MB1_INT BIT(17)
-#define BIT_FS_ATIM_MB0_INT BIT(16)
-#define BIT_FS_TBTT4INT BIT(11)
-#define BIT_FS_TBTT3INT BIT(10)
-#define BIT_FS_TBTT2INT BIT(9)
-#define BIT_FS_TBTT1INT BIT(8)
-#define BIT_FS_TBTT0_MB7INT BIT(7)
-#define BIT_FS_TBTT0_MB6INT BIT(6)
-#define BIT_FS_TBTT0_MB5INT BIT(5)
-#define BIT_FS_TBTT0_MB4INT BIT(4)
-#define BIT_FS_TBTT0_MB3INT BIT(3)
-#define BIT_FS_TBTT0_MB2INT BIT(2)
-#define BIT_FS_TBTT0_MB1INT BIT(1)
-#define BIT_FS_TBTT0_INT BIT(0)
-
-/* 2 REG_FE3IMR (Offset 0x1128) */
-
-#define BIT_FS_CLI3_MTI_BCNIVLEAR_INT__EN BIT(31)
-
-/* 2 REG_FE3IMR (Offset 0x1128) */
-
-#define BIT_FS_CLI2_MTI_BCNIVLEAR_INT__EN BIT(30)
-
-/* 2 REG_FE3IMR (Offset 0x1128) */
-
-#define BIT_FS_CLI1_MTI_BCNIVLEAR_INT__EN BIT(29)
-
-/* 2 REG_FE3IMR (Offset 0x1128) */
-
-#define BIT_FS_CLI0_MTI_BCNIVLEAR_INT__EN BIT(28)
-
-/* 2 REG_FE3IMR (Offset 0x1128) */
-
-#define BIT_FS_BCNDMA4_INT_EN BIT(27)
-#define BIT_FS_BCNDMA3_INT_EN BIT(26)
-#define BIT_FS_BCNDMA2_INT_EN BIT(25)
-#define BIT_FS_BCNDMA1_INT_EN BIT(24)
-#define BIT_FS_BCNDMA0_MB7_INT_EN BIT(23)
-#define BIT_FS_BCNDMA0_MB6_INT_EN BIT(22)
-#define BIT_FS_BCNDMA0_MB5_INT_EN BIT(21)
-#define BIT_FS_BCNDMA0_MB4_INT_EN BIT(20)
-#define BIT_FS_BCNDMA0_MB3_INT_EN BIT(19)
-#define BIT_FS_BCNDMA0_MB2_INT_EN BIT(18)
-#define BIT_FS_BCNDMA0_MB1_INT_EN BIT(17)
-#define BIT_FS_BCNDMA0_INT_EN BIT(16)
-#define BIT_FS_MTI_BCNIVLEAR_INT__EN BIT(15)
-#define BIT_FS_BCNERLY4_INT_EN BIT(11)
-#define BIT_FS_BCNERLY3_INT_EN BIT(10)
-#define BIT_FS_BCNERLY2_INT_EN BIT(9)
-#define BIT_FS_BCNERLY1_INT_EN BIT(8)
-#define BIT_FS_BCNERLY0_MB7INT_EN BIT(7)
-#define BIT_FS_BCNERLY0_MB6INT_EN BIT(6)
-#define BIT_FS_BCNERLY0_MB5INT_EN BIT(5)
-#define BIT_FS_BCNERLY0_MB4INT_EN BIT(4)
-#define BIT_FS_BCNERLY0_MB3INT_EN BIT(3)
-#define BIT_FS_BCNERLY0_MB2INT_EN BIT(2)
-#define BIT_FS_BCNERLY0_MB1INT_EN BIT(1)
-#define BIT_FS_BCNERLY0_INT_EN BIT(0)
-
-/* 2 REG_FE3ISR (Offset 0x112C) */
-
-#define BIT_FS_CLI3_MTI_BCNIVLEAR_INT BIT(31)
-
-/* 2 REG_FE3ISR (Offset 0x112C) */
-
-#define BIT_FS_CLI2_MTI_BCNIVLEAR_INT BIT(30)
-
-/* 2 REG_FE3ISR (Offset 0x112C) */
-
-#define BIT_FS_CLI1_MTI_BCNIVLEAR_INT BIT(29)
-
-/* 2 REG_FE3ISR (Offset 0x112C) */
-
-#define BIT_FS_CLI0_MTI_BCNIVLEAR_INT BIT(28)
-
-/* 2 REG_FE3ISR (Offset 0x112C) */
-
-#define BIT_FS_BCNDMA4_INT BIT(27)
-#define BIT_FS_BCNDMA3_INT BIT(26)
-#define BIT_FS_BCNDMA2_INT BIT(25)
-#define BIT_FS_BCNDMA1_INT BIT(24)
-#define BIT_FS_BCNDMA0_MB7_INT BIT(23)
-#define BIT_FS_BCNDMA0_MB6_INT BIT(22)
-#define BIT_FS_BCNDMA0_MB5_INT BIT(21)
-#define BIT_FS_BCNDMA0_MB4_INT BIT(20)
-#define BIT_FS_BCNDMA0_MB3_INT BIT(19)
-#define BIT_FS_BCNDMA0_MB2_INT BIT(18)
-#define BIT_FS_BCNDMA0_MB1_INT BIT(17)
-#define BIT_FS_BCNDMA0_INT BIT(16)
-#define BIT_FS_MTI_BCNIVLEAR_INT BIT(15)
-#define BIT_FS_BCNERLY4_INT BIT(11)
-#define BIT_FS_BCNERLY3_INT BIT(10)
-#define BIT_FS_BCNERLY2_INT BIT(9)
-#define BIT_FS_BCNERLY1_INT BIT(8)
-#define BIT_FS_BCNERLY0_MB7INT BIT(7)
-#define BIT_FS_BCNERLY0_MB6INT BIT(6)
-#define BIT_FS_BCNERLY0_MB5INT BIT(5)
-#define BIT_FS_BCNERLY0_MB4INT BIT(4)
-#define BIT_FS_BCNERLY0_MB3INT BIT(3)
-#define BIT_FS_BCNERLY0_MB2INT BIT(2)
-#define BIT_FS_BCNERLY0_MB1INT BIT(1)
-#define BIT_FS_BCNERLY0_INT BIT(0)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI3_TXPKTIN_INT_EN BIT(19)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI2_TXPKTIN_INT_EN BIT(18)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI1_TXPKTIN_INT_EN BIT(17)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI0_TXPKTIN_INT_EN BIT(16)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI3_RX_UMD0_INT_EN BIT(15)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI3_RX_UMD1_INT_EN BIT(14)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI3_RX_BMD0_INT_EN BIT(13)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI3_RX_BMD1_INT_EN BIT(12)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI2_RX_UMD0_INT_EN BIT(11)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI2_RX_UMD1_INT_EN BIT(10)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI2_RX_BMD0_INT_EN BIT(9)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI2_RX_BMD1_INT_EN BIT(8)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI1_RX_UMD0_INT_EN BIT(7)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI1_RX_UMD1_INT_EN BIT(6)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI1_RX_BMD0_INT_EN BIT(5)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI1_RX_BMD1_INT_EN BIT(4)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI0_RX_UMD0_INT_EN BIT(3)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI0_RX_UMD1_INT_EN BIT(2)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI0_RX_BMD0_INT_EN BIT(1)
-
-/* 2 REG_FE4IMR (Offset 0x1130) */
-
-#define BIT_FS_CLI0_RX_BMD1_INT_EN BIT(0)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI3_TXPKTIN_INT BIT(19)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI2_TXPKTIN_INT BIT(18)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI1_TXPKTIN_INT BIT(17)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI0_TXPKTIN_INT BIT(16)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI3_RX_UMD0_INT BIT(15)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI3_RX_UMD1_INT BIT(14)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI3_RX_BMD0_INT BIT(13)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI3_RX_BMD1_INT BIT(12)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI2_RX_UMD0_INT BIT(11)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI2_RX_UMD1_INT BIT(10)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI2_RX_BMD0_INT BIT(9)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI2_RX_BMD1_INT BIT(8)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI1_RX_UMD0_INT BIT(7)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI1_RX_UMD1_INT BIT(6)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI1_RX_BMD0_INT BIT(5)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI1_RX_BMD1_INT BIT(4)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI0_RX_UMD0_INT BIT(3)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI0_RX_UMD1_INT BIT(2)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI0_RX_BMD0_INT BIT(1)
-
-/* 2 REG_FE4ISR (Offset 0x1134) */
-
-#define BIT_FS_CLI0_RX_BMD1_INT BIT(0)
-
-/* 2 REG_FT1IMR (Offset 0x1138) */
-
-#define BIT__FT2ISR__IND_MSK BIT(30)
-#define BIT_FTM_PTT_INT_EN BIT(29)
-#define BIT_RXFTMREQ_INT_EN BIT(28)
-#define BIT_RXFTM_INT_EN BIT(27)
-#define BIT_TXFTM_INT_EN BIT(26)
-
-/* 2 REG_FT1IMR (Offset 0x1138) */
-
-#define BIT_FS_H2C_CMD_OK_INT_EN BIT(25)
-#define BIT_FS_H2C_CMD_FULL_INT_EN BIT(24)
-
-/* 2 REG_FT1IMR (Offset 0x1138) */
-
-#define BIT_FS_MACID_PWRCHANGE5_INT_EN BIT(23)
-#define BIT_FS_MACID_PWRCHANGE4_INT_EN BIT(22)
-#define BIT_FS_MACID_PWRCHANGE3_INT_EN BIT(21)
-#define BIT_FS_MACID_PWRCHANGE2_INT_EN BIT(20)
-#define BIT_FS_MACID_PWRCHANGE1_INT_EN BIT(19)
-#define BIT_FS_MACID_PWRCHANGE0_INT_EN BIT(18)
-#define BIT_FS_CTWEND2_INT_EN BIT(17)
-#define BIT_FS_CTWEND1_INT_EN BIT(16)
-#define BIT_FS_CTWEND0_INT_EN BIT(15)
-#define BIT_FS_TX_NULL1_INT_EN BIT(14)
-#define BIT_FS_TX_NULL0_INT_EN BIT(13)
-#define BIT_FS_TSF_BIT32_TOGGLE_EN BIT(12)
-#define BIT_FS_P2P_RFON2_INT_EN BIT(11)
-#define BIT_FS_P2P_RFOFF2_INT_EN BIT(10)
-#define BIT_FS_P2P_RFON1_INT_EN BIT(9)
-#define BIT_FS_P2P_RFOFF1_INT_EN BIT(8)
-#define BIT_FS_P2P_RFON0_INT_EN BIT(7)
-#define BIT_FS_P2P_RFOFF0_INT_EN BIT(6)
-#define BIT_FS_RX_UAPSDMD1_EN BIT(5)
-#define BIT_FS_RX_UAPSDMD0_EN BIT(4)
-#define BIT_FS_TRIGGER_PKT_EN BIT(3)
-#define BIT_FS_EOSP_INT_EN BIT(2)
-#define BIT_FS_RPWM2_INT_EN BIT(1)
-#define BIT_FS_RPWM_INT_EN BIT(0)
-
-/* 2 REG_FT1ISR (Offset 0x113C) */
-
-#define BIT__FT2ISR__IND_INT BIT(30)
-#define BIT_FTM_PTT_INT BIT(29)
-#define BIT_RXFTMREQ_INT BIT(28)
-#define BIT_RXFTM_INT BIT(27)
-#define BIT_TXFTM_INT BIT(26)
-
-/* 2 REG_FT1ISR (Offset 0x113C) */
-
-#define BIT_FS_H2C_CMD_OK_INT BIT(25)
-#define BIT_FS_H2C_CMD_FULL_INT BIT(24)
-
-/* 2 REG_FT1ISR (Offset 0x113C) */
-
-#define BIT_FS_MACID_PWRCHANGE5_INT BIT(23)
-#define BIT_FS_MACID_PWRCHANGE4_INT BIT(22)
-#define BIT_FS_MACID_PWRCHANGE3_INT BIT(21)
-#define BIT_FS_MACID_PWRCHANGE2_INT BIT(20)
-#define BIT_FS_MACID_PWRCHANGE1_INT BIT(19)
-#define BIT_FS_MACID_PWRCHANGE0_INT BIT(18)
-#define BIT_FS_CTWEND2_INT BIT(17)
-#define BIT_FS_CTWEND1_INT BIT(16)
-#define BIT_FS_CTWEND0_INT BIT(15)
-#define BIT_FS_TX_NULL1_INT BIT(14)
-#define BIT_FS_TX_NULL0_INT BIT(13)
-#define BIT_FS_TSF_BIT32_TOGGLE_INT BIT(12)
-#define BIT_FS_P2P_RFON2_INT BIT(11)
-#define BIT_FS_P2P_RFOFF2_INT BIT(10)
-#define BIT_FS_P2P_RFON1_INT BIT(9)
-#define BIT_FS_P2P_RFOFF1_INT BIT(8)
-#define BIT_FS_P2P_RFON0_INT BIT(7)
-#define BIT_FS_P2P_RFOFF0_INT BIT(6)
-#define BIT_FS_RX_UAPSDMD1_INT BIT(5)
-#define BIT_FS_RX_UAPSDMD0_INT BIT(4)
-#define BIT_FS_TRIGGER_PKT_INT BIT(3)
-#define BIT_FS_EOSP_INT BIT(2)
-#define BIT_FS_RPWM2_INT BIT(1)
-#define BIT_FS_RPWM_INT BIT(0)
-
-/* 2 REG_SPWR0 (Offset 0x1140) */
-
-#define BIT_SHIFT_MID_31TO0 0
-#define BIT_MASK_MID_31TO0 0xffffffffL
-#define BIT_MID_31TO0(x) (((x) & BIT_MASK_MID_31TO0) << BIT_SHIFT_MID_31TO0)
-#define BIT_GET_MID_31TO0(x) (((x) >> BIT_SHIFT_MID_31TO0) & BIT_MASK_MID_31TO0)
-
-/* 2 REG_SPWR1 (Offset 0x1144) */
-
-#define BIT_SHIFT_MID_63TO32 0
-#define BIT_MASK_MID_63TO32 0xffffffffL
-#define BIT_MID_63TO32(x) (((x) & BIT_MASK_MID_63TO32) << BIT_SHIFT_MID_63TO32)
-#define BIT_GET_MID_63TO32(x) \
- (((x) >> BIT_SHIFT_MID_63TO32) & BIT_MASK_MID_63TO32)
-
-/* 2 REG_SPWR2 (Offset 0x1148) */
-
-#define BIT_SHIFT_MID_95O64 0
-#define BIT_MASK_MID_95O64 0xffffffffL
-#define BIT_MID_95O64(x) (((x) & BIT_MASK_MID_95O64) << BIT_SHIFT_MID_95O64)
-#define BIT_GET_MID_95O64(x) (((x) >> BIT_SHIFT_MID_95O64) & BIT_MASK_MID_95O64)
-
-/* 2 REG_SPWR3 (Offset 0x114C) */
-
-#define BIT_SHIFT_MID_127TO96 0
-#define BIT_MASK_MID_127TO96 0xffffffffL
-#define BIT_MID_127TO96(x) \
- (((x) & BIT_MASK_MID_127TO96) << BIT_SHIFT_MID_127TO96)
-#define BIT_GET_MID_127TO96(x) \
- (((x) >> BIT_SHIFT_MID_127TO96) & BIT_MASK_MID_127TO96)
-
-/* 2 REG_POWSEQ (Offset 0x1150) */
-
-#define BIT_SHIFT_SEQNUM_MID 16
-#define BIT_MASK_SEQNUM_MID 0xffff
-#define BIT_SEQNUM_MID(x) (((x) & BIT_MASK_SEQNUM_MID) << BIT_SHIFT_SEQNUM_MID)
-#define BIT_GET_SEQNUM_MID(x) \
- (((x) >> BIT_SHIFT_SEQNUM_MID) & BIT_MASK_SEQNUM_MID)
-
-#define BIT_SHIFT_REF_MID 0
-#define BIT_MASK_REF_MID 0x7f
-#define BIT_REF_MID(x) (((x) & BIT_MASK_REF_MID) << BIT_SHIFT_REF_MID)
-#define BIT_GET_REF_MID(x) (((x) >> BIT_SHIFT_REF_MID) & BIT_MASK_REF_MID)
-
-/* 2 REG_TC7_CTRL_V1 (Offset 0x1158) */
-
-#define BIT_TC7INT_EN BIT(26)
-#define BIT_TC7MODE BIT(25)
-#define BIT_TC7EN BIT(24)
-
-#define BIT_SHIFT_TC7DATA 0
-#define BIT_MASK_TC7DATA 0xffffff
-#define BIT_TC7DATA(x) (((x) & BIT_MASK_TC7DATA) << BIT_SHIFT_TC7DATA)
-#define BIT_GET_TC7DATA(x) (((x) >> BIT_SHIFT_TC7DATA) & BIT_MASK_TC7DATA)
-
-/* 2 REG_TC8_CTRL_V1 (Offset 0x115C) */
-
-#define BIT_TC8INT_EN BIT(26)
-#define BIT_TC8MODE BIT(25)
-#define BIT_TC8EN BIT(24)
-
-#define BIT_SHIFT_TC8DATA 0
-#define BIT_MASK_TC8DATA 0xffffff
-#define BIT_TC8DATA(x) (((x) & BIT_MASK_TC8DATA) << BIT_SHIFT_TC8DATA)
-#define BIT_GET_TC8DATA(x) (((x) >> BIT_SHIFT_TC8DATA) & BIT_MASK_TC8DATA)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI3_RX_UAPSDMD1_EN BIT(31)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI3_RX_UAPSDMD0_EN BIT(30)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI3_TRIGGER_PKT_EN BIT(29)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI3_EOSP_INT_EN BIT(28)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI2_RX_UAPSDMD1_EN BIT(27)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI2_RX_UAPSDMD0_EN BIT(26)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI2_TRIGGER_PKT_EN BIT(25)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI2_EOSP_INT_EN BIT(24)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI1_RX_UAPSDMD1_EN BIT(23)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI1_RX_UAPSDMD0_EN BIT(22)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI1_TRIGGER_PKT_EN BIT(21)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI1_EOSP_INT_EN BIT(20)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI0_RX_UAPSDMD1_EN BIT(19)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI0_RX_UAPSDMD0_EN BIT(18)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI0_TRIGGER_PKT_EN BIT(17)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI0_EOSP_INT_EN BIT(16)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_TSF_BIT32_TOGGLE_P2P2_EN BIT(9)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_TSF_BIT32_TOGGLE_P2P1_EN BIT(8)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI3_TX_NULL1_INT_EN BIT(7)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI3_TX_NULL0_INT_EN BIT(6)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI2_TX_NULL1_INT_EN BIT(5)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI2_TX_NULL0_INT_EN BIT(4)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI1_TX_NULL1_INT_EN BIT(3)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI1_TX_NULL0_INT_EN BIT(2)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI0_TX_NULL1_INT_EN BIT(1)
-
-/* 2 REG_FT2IMR (Offset 0x11E0) */
-
-#define BIT_FS_CLI0_TX_NULL0_INT_EN BIT(0)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI3_RX_UAPSDMD1_INT BIT(31)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI3_RX_UAPSDMD0_INT BIT(30)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI3_TRIGGER_PKT_INT BIT(29)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI3_EOSP_INT BIT(28)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI2_RX_UAPSDMD1_INT BIT(27)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI2_RX_UAPSDMD0_INT BIT(26)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI2_TRIGGER_PKT_INT BIT(25)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI2_EOSP_INT BIT(24)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI1_RX_UAPSDMD1_INT BIT(23)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI1_RX_UAPSDMD0_INT BIT(22)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI1_TRIGGER_PKT_INT BIT(21)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI1_EOSP_INT BIT(20)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI0_RX_UAPSDMD1_INT BIT(19)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI0_RX_UAPSDMD0_INT BIT(18)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI0_TRIGGER_PKT_INT BIT(17)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI0_EOSP_INT BIT(16)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_TSF_BIT32_TOGGLE_P2P2_INT BIT(9)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_TSF_BIT32_TOGGLE_P2P1_INT BIT(8)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI3_TX_NULL1_INT BIT(7)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI3_TX_NULL0_INT BIT(6)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI2_TX_NULL1_INT BIT(5)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI2_TX_NULL0_INT BIT(4)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI1_TX_NULL1_INT BIT(3)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI1_TX_NULL0_INT BIT(2)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI0_TX_NULL1_INT BIT(1)
-
-/* 2 REG_FT2ISR (Offset 0x11E4) */
-
-#define BIT_FS_CLI0_TX_NULL0_INT BIT(0)
-
-/* 2 REG_MSG2 (Offset 0x11F0) */
-
-#define BIT_SHIFT_FW_MSG2 0
-#define BIT_MASK_FW_MSG2 0xffffffffL
-#define BIT_FW_MSG2(x) (((x) & BIT_MASK_FW_MSG2) << BIT_SHIFT_FW_MSG2)
-#define BIT_GET_FW_MSG2(x) (((x) >> BIT_SHIFT_FW_MSG2) & BIT_MASK_FW_MSG2)
-
-/* 2 REG_MSG3 (Offset 0x11F4) */
-
-#define BIT_SHIFT_FW_MSG3 0
-#define BIT_MASK_FW_MSG3 0xffffffffL
-#define BIT_FW_MSG3(x) (((x) & BIT_MASK_FW_MSG3) << BIT_SHIFT_FW_MSG3)
-#define BIT_GET_FW_MSG3(x) (((x) >> BIT_SHIFT_FW_MSG3) & BIT_MASK_FW_MSG3)
-
-/* 2 REG_MSG4 (Offset 0x11F8) */
-
-#define BIT_SHIFT_FW_MSG4 0
-#define BIT_MASK_FW_MSG4 0xffffffffL
-#define BIT_FW_MSG4(x) (((x) & BIT_MASK_FW_MSG4) << BIT_SHIFT_FW_MSG4)
-#define BIT_GET_FW_MSG4(x) (((x) >> BIT_SHIFT_FW_MSG4) & BIT_MASK_FW_MSG4)
-
-/* 2 REG_MSG5 (Offset 0x11FC) */
-
-#define BIT_SHIFT_FW_MSG5 0
-#define BIT_MASK_FW_MSG5 0xffffffffL
-#define BIT_FW_MSG5(x) (((x) & BIT_MASK_FW_MSG5) << BIT_SHIFT_FW_MSG5)
-#define BIT_GET_FW_MSG5(x) (((x) >> BIT_SHIFT_FW_MSG5) & BIT_MASK_FW_MSG5)
-
-/* 2 REG_DDMA_CH0SA (Offset 0x1200) */
-
-#define BIT_SHIFT_DDMACH0_SA 0
-#define BIT_MASK_DDMACH0_SA 0xffffffffL
-#define BIT_DDMACH0_SA(x) (((x) & BIT_MASK_DDMACH0_SA) << BIT_SHIFT_DDMACH0_SA)
-#define BIT_GET_DDMACH0_SA(x) \
- (((x) >> BIT_SHIFT_DDMACH0_SA) & BIT_MASK_DDMACH0_SA)
-
-/* 2 REG_DDMA_CH0DA (Offset 0x1204) */
-
-#define BIT_SHIFT_DDMACH0_DA 0
-#define BIT_MASK_DDMACH0_DA 0xffffffffL
-#define BIT_DDMACH0_DA(x) (((x) & BIT_MASK_DDMACH0_DA) << BIT_SHIFT_DDMACH0_DA)
-#define BIT_GET_DDMACH0_DA(x) \
- (((x) >> BIT_SHIFT_DDMACH0_DA) & BIT_MASK_DDMACH0_DA)
-
-/* 2 REG_DDMA_CH0CTRL (Offset 0x1208) */
-
-#define BIT_DDMACH0_OWN BIT(31)
-#define BIT_DDMACH0_CHKSUM_EN BIT(29)
-#define BIT_DDMACH0_DA_W_DISABLE BIT(28)
-#define BIT_DDMACH0_CHKSUM_STS BIT(27)
-#define BIT_DDMACH0_DDMA_MODE BIT(26)
-#define BIT_DDMACH0_RESET_CHKSUM_STS BIT(25)
-#define BIT_DDMACH0_CHKSUM_CONT BIT(24)
-
-#define BIT_SHIFT_DDMACH0_DLEN 0
-#define BIT_MASK_DDMACH0_DLEN 0x3ffff
-#define BIT_DDMACH0_DLEN(x) \
- (((x) & BIT_MASK_DDMACH0_DLEN) << BIT_SHIFT_DDMACH0_DLEN)
-#define BIT_GET_DDMACH0_DLEN(x) \
- (((x) >> BIT_SHIFT_DDMACH0_DLEN) & BIT_MASK_DDMACH0_DLEN)
-
-/* 2 REG_DDMA_CH1SA (Offset 0x1210) */
-
-#define BIT_SHIFT_DDMACH1_SA 0
-#define BIT_MASK_DDMACH1_SA 0xffffffffL
-#define BIT_DDMACH1_SA(x) (((x) & BIT_MASK_DDMACH1_SA) << BIT_SHIFT_DDMACH1_SA)
-#define BIT_GET_DDMACH1_SA(x) \
- (((x) >> BIT_SHIFT_DDMACH1_SA) & BIT_MASK_DDMACH1_SA)
-
-/* 2 REG_DDMA_CH1DA (Offset 0x1214) */
-
-#define BIT_SHIFT_DDMACH1_DA 0
-#define BIT_MASK_DDMACH1_DA 0xffffffffL
-#define BIT_DDMACH1_DA(x) (((x) & BIT_MASK_DDMACH1_DA) << BIT_SHIFT_DDMACH1_DA)
-#define BIT_GET_DDMACH1_DA(x) \
- (((x) >> BIT_SHIFT_DDMACH1_DA) & BIT_MASK_DDMACH1_DA)
-
-/* 2 REG_DDMA_CH1CTRL (Offset 0x1218) */
-
-#define BIT_DDMACH1_OWN BIT(31)
-#define BIT_DDMACH1_CHKSUM_EN BIT(29)
-#define BIT_DDMACH1_DA_W_DISABLE BIT(28)
-#define BIT_DDMACH1_CHKSUM_STS BIT(27)
-#define BIT_DDMACH1_DDMA_MODE BIT(26)
-#define BIT_DDMACH1_RESET_CHKSUM_STS BIT(25)
-#define BIT_DDMACH1_CHKSUM_CONT BIT(24)
-
-#define BIT_SHIFT_DDMACH1_DLEN 0
-#define BIT_MASK_DDMACH1_DLEN 0x3ffff
-#define BIT_DDMACH1_DLEN(x) \
- (((x) & BIT_MASK_DDMACH1_DLEN) << BIT_SHIFT_DDMACH1_DLEN)
-#define BIT_GET_DDMACH1_DLEN(x) \
- (((x) >> BIT_SHIFT_DDMACH1_DLEN) & BIT_MASK_DDMACH1_DLEN)
-
-/* 2 REG_DDMA_CH2SA (Offset 0x1220) */
-
-#define BIT_SHIFT_DDMACH2_SA 0
-#define BIT_MASK_DDMACH2_SA 0xffffffffL
-#define BIT_DDMACH2_SA(x) (((x) & BIT_MASK_DDMACH2_SA) << BIT_SHIFT_DDMACH2_SA)
-#define BIT_GET_DDMACH2_SA(x) \
- (((x) >> BIT_SHIFT_DDMACH2_SA) & BIT_MASK_DDMACH2_SA)
-
-/* 2 REG_DDMA_CH2DA (Offset 0x1224) */
-
-#define BIT_SHIFT_DDMACH2_DA 0
-#define BIT_MASK_DDMACH2_DA 0xffffffffL
-#define BIT_DDMACH2_DA(x) (((x) & BIT_MASK_DDMACH2_DA) << BIT_SHIFT_DDMACH2_DA)
-#define BIT_GET_DDMACH2_DA(x) \
- (((x) >> BIT_SHIFT_DDMACH2_DA) & BIT_MASK_DDMACH2_DA)
-
-/* 2 REG_DDMA_CH2CTRL (Offset 0x1228) */
-
-#define BIT_DDMACH2_OWN BIT(31)
-#define BIT_DDMACH2_CHKSUM_EN BIT(29)
-#define BIT_DDMACH2_DA_W_DISABLE BIT(28)
-#define BIT_DDMACH2_CHKSUM_STS BIT(27)
-#define BIT_DDMACH2_DDMA_MODE BIT(26)
-#define BIT_DDMACH2_RESET_CHKSUM_STS BIT(25)
-#define BIT_DDMACH2_CHKSUM_CONT BIT(24)
-
-#define BIT_SHIFT_DDMACH2_DLEN 0
-#define BIT_MASK_DDMACH2_DLEN 0x3ffff
-#define BIT_DDMACH2_DLEN(x) \
- (((x) & BIT_MASK_DDMACH2_DLEN) << BIT_SHIFT_DDMACH2_DLEN)
-#define BIT_GET_DDMACH2_DLEN(x) \
- (((x) >> BIT_SHIFT_DDMACH2_DLEN) & BIT_MASK_DDMACH2_DLEN)
-
-/* 2 REG_DDMA_CH3SA (Offset 0x1230) */
-
-#define BIT_SHIFT_DDMACH3_SA 0
-#define BIT_MASK_DDMACH3_SA 0xffffffffL
-#define BIT_DDMACH3_SA(x) (((x) & BIT_MASK_DDMACH3_SA) << BIT_SHIFT_DDMACH3_SA)
-#define BIT_GET_DDMACH3_SA(x) \
- (((x) >> BIT_SHIFT_DDMACH3_SA) & BIT_MASK_DDMACH3_SA)
-
-/* 2 REG_DDMA_CH3DA (Offset 0x1234) */
-
-#define BIT_SHIFT_DDMACH3_DA 0
-#define BIT_MASK_DDMACH3_DA 0xffffffffL
-#define BIT_DDMACH3_DA(x) (((x) & BIT_MASK_DDMACH3_DA) << BIT_SHIFT_DDMACH3_DA)
-#define BIT_GET_DDMACH3_DA(x) \
- (((x) >> BIT_SHIFT_DDMACH3_DA) & BIT_MASK_DDMACH3_DA)
-
-/* 2 REG_DDMA_CH3CTRL (Offset 0x1238) */
-
-#define BIT_DDMACH3_OWN BIT(31)
-#define BIT_DDMACH3_CHKSUM_EN BIT(29)
-#define BIT_DDMACH3_DA_W_DISABLE BIT(28)
-#define BIT_DDMACH3_CHKSUM_STS BIT(27)
-#define BIT_DDMACH3_DDMA_MODE BIT(26)
-#define BIT_DDMACH3_RESET_CHKSUM_STS BIT(25)
-#define BIT_DDMACH3_CHKSUM_CONT BIT(24)
-
-#define BIT_SHIFT_DDMACH3_DLEN 0
-#define BIT_MASK_DDMACH3_DLEN 0x3ffff
-#define BIT_DDMACH3_DLEN(x) \
- (((x) & BIT_MASK_DDMACH3_DLEN) << BIT_SHIFT_DDMACH3_DLEN)
-#define BIT_GET_DDMACH3_DLEN(x) \
- (((x) >> BIT_SHIFT_DDMACH3_DLEN) & BIT_MASK_DDMACH3_DLEN)
-
-/* 2 REG_DDMA_CH4SA (Offset 0x1240) */
-
-#define BIT_SHIFT_DDMACH4_SA 0
-#define BIT_MASK_DDMACH4_SA 0xffffffffL
-#define BIT_DDMACH4_SA(x) (((x) & BIT_MASK_DDMACH4_SA) << BIT_SHIFT_DDMACH4_SA)
-#define BIT_GET_DDMACH4_SA(x) \
- (((x) >> BIT_SHIFT_DDMACH4_SA) & BIT_MASK_DDMACH4_SA)
-
-/* 2 REG_DDMA_CH4DA (Offset 0x1244) */
-
-#define BIT_SHIFT_DDMACH4_DA 0
-#define BIT_MASK_DDMACH4_DA 0xffffffffL
-#define BIT_DDMACH4_DA(x) (((x) & BIT_MASK_DDMACH4_DA) << BIT_SHIFT_DDMACH4_DA)
-#define BIT_GET_DDMACH4_DA(x) \
- (((x) >> BIT_SHIFT_DDMACH4_DA) & BIT_MASK_DDMACH4_DA)
-
-/* 2 REG_DDMA_CH4CTRL (Offset 0x1248) */
-
-#define BIT_DDMACH4_OWN BIT(31)
-#define BIT_DDMACH4_CHKSUM_EN BIT(29)
-#define BIT_DDMACH4_DA_W_DISABLE BIT(28)
-#define BIT_DDMACH4_CHKSUM_STS BIT(27)
-#define BIT_DDMACH4_DDMA_MODE BIT(26)
-#define BIT_DDMACH4_RESET_CHKSUM_STS BIT(25)
-#define BIT_DDMACH4_CHKSUM_CONT BIT(24)
-
-#define BIT_SHIFT_DDMACH4_DLEN 0
-#define BIT_MASK_DDMACH4_DLEN 0x3ffff
-#define BIT_DDMACH4_DLEN(x) \
- (((x) & BIT_MASK_DDMACH4_DLEN) << BIT_SHIFT_DDMACH4_DLEN)
-#define BIT_GET_DDMACH4_DLEN(x) \
- (((x) >> BIT_SHIFT_DDMACH4_DLEN) & BIT_MASK_DDMACH4_DLEN)
-
-/* 2 REG_DDMA_CH5SA (Offset 0x1250) */
-
-#define BIT_SHIFT_DDMACH5_SA 0
-#define BIT_MASK_DDMACH5_SA 0xffffffffL
-#define BIT_DDMACH5_SA(x) (((x) & BIT_MASK_DDMACH5_SA) << BIT_SHIFT_DDMACH5_SA)
-#define BIT_GET_DDMACH5_SA(x) \
- (((x) >> BIT_SHIFT_DDMACH5_SA) & BIT_MASK_DDMACH5_SA)
-
-/* 2 REG_DDMA_CH5DA (Offset 0x1254) */
-
-#define BIT_DDMACH5_OWN BIT(31)
-#define BIT_DDMACH5_CHKSUM_EN BIT(29)
-#define BIT_DDMACH5_DA_W_DISABLE BIT(28)
-#define BIT_DDMACH5_CHKSUM_STS BIT(27)
-#define BIT_DDMACH5_DDMA_MODE BIT(26)
-#define BIT_DDMACH5_RESET_CHKSUM_STS BIT(25)
-#define BIT_DDMACH5_CHKSUM_CONT BIT(24)
-
-#define BIT_SHIFT_DDMACH5_DA 0
-#define BIT_MASK_DDMACH5_DA 0xffffffffL
-#define BIT_DDMACH5_DA(x) (((x) & BIT_MASK_DDMACH5_DA) << BIT_SHIFT_DDMACH5_DA)
-#define BIT_GET_DDMACH5_DA(x) \
- (((x) >> BIT_SHIFT_DDMACH5_DA) & BIT_MASK_DDMACH5_DA)
-
-#define BIT_SHIFT_DDMACH5_DLEN 0
-#define BIT_MASK_DDMACH5_DLEN 0x3ffff
-#define BIT_DDMACH5_DLEN(x) \
- (((x) & BIT_MASK_DDMACH5_DLEN) << BIT_SHIFT_DDMACH5_DLEN)
-#define BIT_GET_DDMACH5_DLEN(x) \
- (((x) >> BIT_SHIFT_DDMACH5_DLEN) & BIT_MASK_DDMACH5_DLEN)
-
-/* 2 REG_DDMA_INT_MSK (Offset 0x12E0) */
-
-#define BIT_DDMACH5_MSK BIT(5)
-#define BIT_DDMACH4_MSK BIT(4)
-#define BIT_DDMACH3_MSK BIT(3)
-#define BIT_DDMACH2_MSK BIT(2)
-#define BIT_DDMACH1_MSK BIT(1)
-#define BIT_DDMACH0_MSK BIT(0)
-
-/* 2 REG_DDMA_CHSTATUS (Offset 0x12E8) */
-
-#define BIT_DDMACH5_BUSY BIT(5)
-#define BIT_DDMACH4_BUSY BIT(4)
-#define BIT_DDMACH3_BUSY BIT(3)
-#define BIT_DDMACH2_BUSY BIT(2)
-#define BIT_DDMACH1_BUSY BIT(1)
-#define BIT_DDMACH0_BUSY BIT(0)
-
-/* 2 REG_DDMA_CHKSUM (Offset 0x12F0) */
-
-#define BIT_SHIFT_IDDMA0_CHKSUM 0
-#define BIT_MASK_IDDMA0_CHKSUM 0xffff
-#define BIT_IDDMA0_CHKSUM(x) \
- (((x) & BIT_MASK_IDDMA0_CHKSUM) << BIT_SHIFT_IDDMA0_CHKSUM)
-#define BIT_GET_IDDMA0_CHKSUM(x) \
- (((x) >> BIT_SHIFT_IDDMA0_CHKSUM) & BIT_MASK_IDDMA0_CHKSUM)
-
-/* 2 REG_DDMA_MONITOR (Offset 0x12FC) */
-
-#define BIT_IDDMA0_PERMU_UNDERFLOW BIT(14)
-#define BIT_IDDMA0_FIFO_UNDERFLOW BIT(13)
-#define BIT_IDDMA0_FIFO_OVERFLOW BIT(12)
-#define BIT_ECRC_EN_V1 BIT(7)
-#define BIT_MDIO_RFLAG_V1 BIT(6)
-#define BIT_CH5_ERR BIT(5)
-#define BIT_MDIO_WFLAG_V1 BIT(5)
-#define BIT_CH4_ERR BIT(4)
-#define BIT_CH3_ERR BIT(3)
-#define BIT_CH2_ERR BIT(2)
-#define BIT_CH1_ERR BIT(1)
-#define BIT_CH0_ERR BIT(0)
-
-/* 2 REG_STC_INT_CS (Offset 0x1300) */
-
-#define BIT_STC_INT_EN BIT(31)
-
-#define BIT_SHIFT_STC_INT_FLAG 16
-#define BIT_MASK_STC_INT_FLAG 0xff
-#define BIT_STC_INT_FLAG(x) \
- (((x) & BIT_MASK_STC_INT_FLAG) << BIT_SHIFT_STC_INT_FLAG)
-#define BIT_GET_STC_INT_FLAG(x) \
- (((x) >> BIT_SHIFT_STC_INT_FLAG) & BIT_MASK_STC_INT_FLAG)
-
-#define BIT_SHIFT_STC_INT_IDX 8
-#define BIT_MASK_STC_INT_IDX 0x7
-#define BIT_STC_INT_IDX(x) \
- (((x) & BIT_MASK_STC_INT_IDX) << BIT_SHIFT_STC_INT_IDX)
-#define BIT_GET_STC_INT_IDX(x) \
- (((x) >> BIT_SHIFT_STC_INT_IDX) & BIT_MASK_STC_INT_IDX)
-
-#define BIT_SHIFT_STC_INT_REALTIME_CS 0
-#define BIT_MASK_STC_INT_REALTIME_CS 0x3f
-#define BIT_STC_INT_REALTIME_CS(x) \
- (((x) & BIT_MASK_STC_INT_REALTIME_CS) << BIT_SHIFT_STC_INT_REALTIME_CS)
-#define BIT_GET_STC_INT_REALTIME_CS(x) \
- (((x) >> BIT_SHIFT_STC_INT_REALTIME_CS) & BIT_MASK_STC_INT_REALTIME_CS)
-
-/* 2 REG_ST_INT_CFG (Offset 0x1304) */
-
-#define BIT_STC_INT_GRP_EN BIT(31)
-
-#define BIT_SHIFT_STC_INT_EXPECT_LS 8
-#define BIT_MASK_STC_INT_EXPECT_LS 0x3f
-#define BIT_STC_INT_EXPECT_LS(x) \
- (((x) & BIT_MASK_STC_INT_EXPECT_LS) << BIT_SHIFT_STC_INT_EXPECT_LS)
-#define BIT_GET_STC_INT_EXPECT_LS(x) \
- (((x) >> BIT_SHIFT_STC_INT_EXPECT_LS) & BIT_MASK_STC_INT_EXPECT_LS)
-
-#define BIT_SHIFT_STC_INT_EXPECT_CS 0
-#define BIT_MASK_STC_INT_EXPECT_CS 0x3f
-#define BIT_STC_INT_EXPECT_CS(x) \
- (((x) & BIT_MASK_STC_INT_EXPECT_CS) << BIT_SHIFT_STC_INT_EXPECT_CS)
-#define BIT_GET_STC_INT_EXPECT_CS(x) \
- (((x) >> BIT_SHIFT_STC_INT_EXPECT_CS) & BIT_MASK_STC_INT_EXPECT_CS)
-
-/* 2 REG_CMU_DLY_CTRL (Offset 0x1310) */
-
-#define BIT_CMU_DLY_EN BIT(31)
-#define BIT_CMU_DLY_MODE BIT(30)
-
-#define BIT_SHIFT_CMU_DLY_PRE_DIV 0
-#define BIT_MASK_CMU_DLY_PRE_DIV 0xff
-#define BIT_CMU_DLY_PRE_DIV(x) \
- (((x) & BIT_MASK_CMU_DLY_PRE_DIV) << BIT_SHIFT_CMU_DLY_PRE_DIV)
-#define BIT_GET_CMU_DLY_PRE_DIV(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_PRE_DIV) & BIT_MASK_CMU_DLY_PRE_DIV)
-
-/* 2 REG_CMU_DLY_CFG (Offset 0x1314) */
-
-#define BIT_SHIFT_CMU_DLY_LTR_A2I 24
-#define BIT_MASK_CMU_DLY_LTR_A2I 0xff
-#define BIT_CMU_DLY_LTR_A2I(x) \
- (((x) & BIT_MASK_CMU_DLY_LTR_A2I) << BIT_SHIFT_CMU_DLY_LTR_A2I)
-#define BIT_GET_CMU_DLY_LTR_A2I(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_LTR_A2I) & BIT_MASK_CMU_DLY_LTR_A2I)
-
-#define BIT_SHIFT_CMU_DLY_LTR_I2A 16
-#define BIT_MASK_CMU_DLY_LTR_I2A 0xff
-#define BIT_CMU_DLY_LTR_I2A(x) \
- (((x) & BIT_MASK_CMU_DLY_LTR_I2A) << BIT_SHIFT_CMU_DLY_LTR_I2A)
-#define BIT_GET_CMU_DLY_LTR_I2A(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_LTR_I2A) & BIT_MASK_CMU_DLY_LTR_I2A)
-
-#define BIT_SHIFT_CMU_DLY_LTR_IDLE 8
-#define BIT_MASK_CMU_DLY_LTR_IDLE 0xff
-#define BIT_CMU_DLY_LTR_IDLE(x) \
- (((x) & BIT_MASK_CMU_DLY_LTR_IDLE) << BIT_SHIFT_CMU_DLY_LTR_IDLE)
-#define BIT_GET_CMU_DLY_LTR_IDLE(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_LTR_IDLE) & BIT_MASK_CMU_DLY_LTR_IDLE)
-
-#define BIT_SHIFT_CMU_DLY_LTR_ACT 0
-#define BIT_MASK_CMU_DLY_LTR_ACT 0xff
-#define BIT_CMU_DLY_LTR_ACT(x) \
- (((x) & BIT_MASK_CMU_DLY_LTR_ACT) << BIT_SHIFT_CMU_DLY_LTR_ACT)
-#define BIT_GET_CMU_DLY_LTR_ACT(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_LTR_ACT) & BIT_MASK_CMU_DLY_LTR_ACT)
-
-/* 2 REG_H2CQ_TXBD_DESA (Offset 0x1320) */
-
-#define BIT_SHIFT_H2CQ_TXBD_DESA 0
-#define BIT_MASK_H2CQ_TXBD_DESA 0xffffffffffffffffL
-#define BIT_H2CQ_TXBD_DESA(x) \
- (((x) & BIT_MASK_H2CQ_TXBD_DESA) << BIT_SHIFT_H2CQ_TXBD_DESA)
-#define BIT_GET_H2CQ_TXBD_DESA(x) \
- (((x) >> BIT_SHIFT_H2CQ_TXBD_DESA) & BIT_MASK_H2CQ_TXBD_DESA)
-
-/* 2 REG_H2CQ_TXBD_NUM (Offset 0x1328) */
-
-#define BIT_PCIE_H2CQ_FLAG BIT(14)
-
-/* 2 REG_H2CQ_TXBD_NUM (Offset 0x1328) */
-
-#define BIT_SHIFT_H2CQ_DESC_MODE 12
-#define BIT_MASK_H2CQ_DESC_MODE 0x3
-#define BIT_H2CQ_DESC_MODE(x) \
- (((x) & BIT_MASK_H2CQ_DESC_MODE) << BIT_SHIFT_H2CQ_DESC_MODE)
-#define BIT_GET_H2CQ_DESC_MODE(x) \
- (((x) >> BIT_SHIFT_H2CQ_DESC_MODE) & BIT_MASK_H2CQ_DESC_MODE)
-
-#define BIT_SHIFT_H2CQ_DESC_NUM 0
-#define BIT_MASK_H2CQ_DESC_NUM 0xfff
-#define BIT_H2CQ_DESC_NUM(x) \
- (((x) & BIT_MASK_H2CQ_DESC_NUM) << BIT_SHIFT_H2CQ_DESC_NUM)
-#define BIT_GET_H2CQ_DESC_NUM(x) \
- (((x) >> BIT_SHIFT_H2CQ_DESC_NUM) & BIT_MASK_H2CQ_DESC_NUM)
-
-/* 2 REG_H2CQ_TXBD_IDX (Offset 0x132C) */
-
-#define BIT_SHIFT_H2CQ_HW_IDX 16
-#define BIT_MASK_H2CQ_HW_IDX 0xfff
-#define BIT_H2CQ_HW_IDX(x) \
- (((x) & BIT_MASK_H2CQ_HW_IDX) << BIT_SHIFT_H2CQ_HW_IDX)
-#define BIT_GET_H2CQ_HW_IDX(x) \
- (((x) >> BIT_SHIFT_H2CQ_HW_IDX) & BIT_MASK_H2CQ_HW_IDX)
-
-#define BIT_SHIFT_H2CQ_HOST_IDX 0
-#define BIT_MASK_H2CQ_HOST_IDX 0xfff
-#define BIT_H2CQ_HOST_IDX(x) \
- (((x) & BIT_MASK_H2CQ_HOST_IDX) << BIT_SHIFT_H2CQ_HOST_IDX)
-#define BIT_GET_H2CQ_HOST_IDX(x) \
- (((x) >> BIT_SHIFT_H2CQ_HOST_IDX) & BIT_MASK_H2CQ_HOST_IDX)
-
-/* 2 REG_H2CQ_CSR (Offset 0x1330) */
-
-#define BIT_H2CQ_FULL BIT(31)
-#define BIT_CLR_H2CQ_HOST_IDX BIT(16)
-#define BIT_CLR_H2CQ_HW_IDX BIT(8)
-
-/* 2 REG_CHANGE_PCIE_SPEED (Offset 0x1350) */
-
-#define BIT_CHANGE_PCIE_SPEED BIT(18)
-
-/* 2 REG_CHANGE_PCIE_SPEED (Offset 0x1350) */
-
-#define BIT_SHIFT_GEN1_GEN2 16
-#define BIT_MASK_GEN1_GEN2 0x3
-#define BIT_GEN1_GEN2(x) (((x) & BIT_MASK_GEN1_GEN2) << BIT_SHIFT_GEN1_GEN2)
-#define BIT_GET_GEN1_GEN2(x) (((x) >> BIT_SHIFT_GEN1_GEN2) & BIT_MASK_GEN1_GEN2)
-
-/* 2 REG_CHANGE_PCIE_SPEED (Offset 0x1350) */
-
-#define BIT_SHIFT_AUTO_HANG_RELEASE 0
-#define BIT_MASK_AUTO_HANG_RELEASE 0x7
-#define BIT_AUTO_HANG_RELEASE(x) \
- (((x) & BIT_MASK_AUTO_HANG_RELEASE) << BIT_SHIFT_AUTO_HANG_RELEASE)
-#define BIT_GET_AUTO_HANG_RELEASE(x) \
- (((x) >> BIT_SHIFT_AUTO_HANG_RELEASE) & BIT_MASK_AUTO_HANG_RELEASE)
-
-/* 2 REG_OLD_DEHANG (Offset 0x13F4) */
-
-#define BIT_OLD_DEHANG BIT(1)
-
-/* 2 REG_Q0_Q1_INFO (Offset 0x1400) */
-
-#define BIT_SHIFT_AC1_PKT_INFO 16
-#define BIT_MASK_AC1_PKT_INFO 0xfff
-#define BIT_AC1_PKT_INFO(x) \
- (((x) & BIT_MASK_AC1_PKT_INFO) << BIT_SHIFT_AC1_PKT_INFO)
-#define BIT_GET_AC1_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_AC1_PKT_INFO) & BIT_MASK_AC1_PKT_INFO)
-
-#define BIT_SHIFT_AC0_PKT_INFO 0
-#define BIT_MASK_AC0_PKT_INFO 0xfff
-#define BIT_AC0_PKT_INFO(x) \
- (((x) & BIT_MASK_AC0_PKT_INFO) << BIT_SHIFT_AC0_PKT_INFO)
-#define BIT_GET_AC0_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_AC0_PKT_INFO) & BIT_MASK_AC0_PKT_INFO)
-
-/* 2 REG_Q2_Q3_INFO (Offset 0x1404) */
-
-#define BIT_SHIFT_AC3_PKT_INFO 16
-#define BIT_MASK_AC3_PKT_INFO 0xfff
-#define BIT_AC3_PKT_INFO(x) \
- (((x) & BIT_MASK_AC3_PKT_INFO) << BIT_SHIFT_AC3_PKT_INFO)
-#define BIT_GET_AC3_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_AC3_PKT_INFO) & BIT_MASK_AC3_PKT_INFO)
-
-#define BIT_SHIFT_AC2_PKT_INFO 0
-#define BIT_MASK_AC2_PKT_INFO 0xfff
-#define BIT_AC2_PKT_INFO(x) \
- (((x) & BIT_MASK_AC2_PKT_INFO) << BIT_SHIFT_AC2_PKT_INFO)
-#define BIT_GET_AC2_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_AC2_PKT_INFO) & BIT_MASK_AC2_PKT_INFO)
-
-/* 2 REG_Q4_Q5_INFO (Offset 0x1408) */
-
-#define BIT_SHIFT_AC5_PKT_INFO 16
-#define BIT_MASK_AC5_PKT_INFO 0xfff
-#define BIT_AC5_PKT_INFO(x) \
- (((x) & BIT_MASK_AC5_PKT_INFO) << BIT_SHIFT_AC5_PKT_INFO)
-#define BIT_GET_AC5_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_AC5_PKT_INFO) & BIT_MASK_AC5_PKT_INFO)
-
-#define BIT_SHIFT_AC4_PKT_INFO 0
-#define BIT_MASK_AC4_PKT_INFO 0xfff
-#define BIT_AC4_PKT_INFO(x) \
- (((x) & BIT_MASK_AC4_PKT_INFO) << BIT_SHIFT_AC4_PKT_INFO)
-#define BIT_GET_AC4_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_AC4_PKT_INFO) & BIT_MASK_AC4_PKT_INFO)
-
-/* 2 REG_Q6_Q7_INFO (Offset 0x140C) */
-
-#define BIT_SHIFT_AC7_PKT_INFO 16
-#define BIT_MASK_AC7_PKT_INFO 0xfff
-#define BIT_AC7_PKT_INFO(x) \
- (((x) & BIT_MASK_AC7_PKT_INFO) << BIT_SHIFT_AC7_PKT_INFO)
-#define BIT_GET_AC7_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_AC7_PKT_INFO) & BIT_MASK_AC7_PKT_INFO)
-
-#define BIT_SHIFT_AC6_PKT_INFO 0
-#define BIT_MASK_AC6_PKT_INFO 0xfff
-#define BIT_AC6_PKT_INFO(x) \
- (((x) & BIT_MASK_AC6_PKT_INFO) << BIT_SHIFT_AC6_PKT_INFO)
-#define BIT_GET_AC6_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_AC6_PKT_INFO) & BIT_MASK_AC6_PKT_INFO)
-
-/* 2 REG_MGQ_HIQ_INFO (Offset 0x1410) */
-
-#define BIT_SHIFT_HIQ_PKT_INFO 16
-#define BIT_MASK_HIQ_PKT_INFO 0xfff
-#define BIT_HIQ_PKT_INFO(x) \
- (((x) & BIT_MASK_HIQ_PKT_INFO) << BIT_SHIFT_HIQ_PKT_INFO)
-#define BIT_GET_HIQ_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_HIQ_PKT_INFO) & BIT_MASK_HIQ_PKT_INFO)
-
-#define BIT_SHIFT_MGQ_PKT_INFO 0
-#define BIT_MASK_MGQ_PKT_INFO 0xfff
-#define BIT_MGQ_PKT_INFO(x) \
- (((x) & BIT_MASK_MGQ_PKT_INFO) << BIT_SHIFT_MGQ_PKT_INFO)
-#define BIT_GET_MGQ_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_MGQ_PKT_INFO) & BIT_MASK_MGQ_PKT_INFO)
-
-/* 2 REG_CMDQ_BCNQ_INFO (Offset 0x1414) */
-
-#define BIT_SHIFT_CMDQ_PKT_INFO 16
-#define BIT_MASK_CMDQ_PKT_INFO 0xfff
-#define BIT_CMDQ_PKT_INFO(x) \
- (((x) & BIT_MASK_CMDQ_PKT_INFO) << BIT_SHIFT_CMDQ_PKT_INFO)
-#define BIT_GET_CMDQ_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_CMDQ_PKT_INFO) & BIT_MASK_CMDQ_PKT_INFO)
-
-/* 2 REG_CMDQ_BCNQ_INFO (Offset 0x1414) */
-
-#define BIT_SHIFT_BCNQ_PKT_INFO 0
-#define BIT_MASK_BCNQ_PKT_INFO 0xfff
-#define BIT_BCNQ_PKT_INFO(x) \
- (((x) & BIT_MASK_BCNQ_PKT_INFO) << BIT_SHIFT_BCNQ_PKT_INFO)
-#define BIT_GET_BCNQ_PKT_INFO(x) \
- (((x) >> BIT_SHIFT_BCNQ_PKT_INFO) & BIT_MASK_BCNQ_PKT_INFO)
-
-/* 2 REG_USEREG_SETTING (Offset 0x1420) */
-
-#define BIT_NDPA_USEREG BIT(21)
-
-#define BIT_SHIFT_RETRY_USEREG 19
-#define BIT_MASK_RETRY_USEREG 0x3
-#define BIT_RETRY_USEREG(x) \
- (((x) & BIT_MASK_RETRY_USEREG) << BIT_SHIFT_RETRY_USEREG)
-#define BIT_GET_RETRY_USEREG(x) \
- (((x) >> BIT_SHIFT_RETRY_USEREG) & BIT_MASK_RETRY_USEREG)
-
-#define BIT_SHIFT_TRYPKT_USEREG 17
-#define BIT_MASK_TRYPKT_USEREG 0x3
-#define BIT_TRYPKT_USEREG(x) \
- (((x) & BIT_MASK_TRYPKT_USEREG) << BIT_SHIFT_TRYPKT_USEREG)
-#define BIT_GET_TRYPKT_USEREG(x) \
- (((x) >> BIT_SHIFT_TRYPKT_USEREG) & BIT_MASK_TRYPKT_USEREG)
-
-#define BIT_CTLPKT_USEREG BIT(16)
-
-/* 2 REG_AESIV_SETTING (Offset 0x1424) */
-
-#define BIT_SHIFT_AESIV_OFFSET 0
-#define BIT_MASK_AESIV_OFFSET 0xfff
-#define BIT_AESIV_OFFSET(x) \
- (((x) & BIT_MASK_AESIV_OFFSET) << BIT_SHIFT_AESIV_OFFSET)
-#define BIT_GET_AESIV_OFFSET(x) \
- (((x) >> BIT_SHIFT_AESIV_OFFSET) & BIT_MASK_AESIV_OFFSET)
-
-/* 2 REG_BF0_TIME_SETTING (Offset 0x1428) */
-
-#define BIT_BF0_TIMER_SET BIT(31)
-#define BIT_BF0_TIMER_CLR BIT(30)
-#define BIT_BF0_UPDATE_EN BIT(29)
-#define BIT_BF0_TIMER_EN BIT(28)
-
-#define BIT_SHIFT_BF0_PRETIME_OVER 16
-#define BIT_MASK_BF0_PRETIME_OVER 0xfff
-#define BIT_BF0_PRETIME_OVER(x) \
- (((x) & BIT_MASK_BF0_PRETIME_OVER) << BIT_SHIFT_BF0_PRETIME_OVER)
-#define BIT_GET_BF0_PRETIME_OVER(x) \
- (((x) >> BIT_SHIFT_BF0_PRETIME_OVER) & BIT_MASK_BF0_PRETIME_OVER)
-
-#define BIT_SHIFT_BF0_LIFETIME 0
-#define BIT_MASK_BF0_LIFETIME 0xffff
-#define BIT_BF0_LIFETIME(x) \
- (((x) & BIT_MASK_BF0_LIFETIME) << BIT_SHIFT_BF0_LIFETIME)
-#define BIT_GET_BF0_LIFETIME(x) \
- (((x) >> BIT_SHIFT_BF0_LIFETIME) & BIT_MASK_BF0_LIFETIME)
-
-/* 2 REG_BF1_TIME_SETTING (Offset 0x142C) */
-
-#define BIT_BF1_TIMER_SET BIT(31)
-#define BIT_BF1_TIMER_CLR BIT(30)
-#define BIT_BF1_UPDATE_EN BIT(29)
-#define BIT_BF1_TIMER_EN BIT(28)
-
-#define BIT_SHIFT_BF1_PRETIME_OVER 16
-#define BIT_MASK_BF1_PRETIME_OVER 0xfff
-#define BIT_BF1_PRETIME_OVER(x) \
- (((x) & BIT_MASK_BF1_PRETIME_OVER) << BIT_SHIFT_BF1_PRETIME_OVER)
-#define BIT_GET_BF1_PRETIME_OVER(x) \
- (((x) >> BIT_SHIFT_BF1_PRETIME_OVER) & BIT_MASK_BF1_PRETIME_OVER)
-
-#define BIT_SHIFT_BF1_LIFETIME 0
-#define BIT_MASK_BF1_LIFETIME 0xffff
-#define BIT_BF1_LIFETIME(x) \
- (((x) & BIT_MASK_BF1_LIFETIME) << BIT_SHIFT_BF1_LIFETIME)
-#define BIT_GET_BF1_LIFETIME(x) \
- (((x) >> BIT_SHIFT_BF1_LIFETIME) & BIT_MASK_BF1_LIFETIME)
-
-/* 2 REG_BF_TIMEOUT_EN (Offset 0x1430) */
-
-#define BIT_EN_VHT_LDPC BIT(9)
-#define BIT_EN_HT_LDPC BIT(8)
-#define BIT_BF1_TIMEOUT_EN BIT(1)
-#define BIT_BF0_TIMEOUT_EN BIT(0)
-
-/* 2 REG_MACID_RELEASE0 (Offset 0x1434) */
-
-#define BIT_SHIFT_MACID31_0_RELEASE 0
-#define BIT_MASK_MACID31_0_RELEASE 0xffffffffL
-#define BIT_MACID31_0_RELEASE(x) \
- (((x) & BIT_MASK_MACID31_0_RELEASE) << BIT_SHIFT_MACID31_0_RELEASE)
-#define BIT_GET_MACID31_0_RELEASE(x) \
- (((x) >> BIT_SHIFT_MACID31_0_RELEASE) & BIT_MASK_MACID31_0_RELEASE)
-
-/* 2 REG_MACID_RELEASE1 (Offset 0x1438) */
-
-#define BIT_SHIFT_MACID63_32_RELEASE 0
-#define BIT_MASK_MACID63_32_RELEASE 0xffffffffL
-#define BIT_MACID63_32_RELEASE(x) \
- (((x) & BIT_MASK_MACID63_32_RELEASE) << BIT_SHIFT_MACID63_32_RELEASE)
-#define BIT_GET_MACID63_32_RELEASE(x) \
- (((x) >> BIT_SHIFT_MACID63_32_RELEASE) & BIT_MASK_MACID63_32_RELEASE)
-
-/* 2 REG_MACID_RELEASE2 (Offset 0x143C) */
-
-#define BIT_SHIFT_MACID95_64_RELEASE 0
-#define BIT_MASK_MACID95_64_RELEASE 0xffffffffL
-#define BIT_MACID95_64_RELEASE(x) \
- (((x) & BIT_MASK_MACID95_64_RELEASE) << BIT_SHIFT_MACID95_64_RELEASE)
-#define BIT_GET_MACID95_64_RELEASE(x) \
- (((x) >> BIT_SHIFT_MACID95_64_RELEASE) & BIT_MASK_MACID95_64_RELEASE)
-
-/* 2 REG_MACID_RELEASE3 (Offset 0x1440) */
-
-#define BIT_SHIFT_MACID127_96_RELEASE 0
-#define BIT_MASK_MACID127_96_RELEASE 0xffffffffL
-#define BIT_MACID127_96_RELEASE(x) \
- (((x) & BIT_MASK_MACID127_96_RELEASE) << BIT_SHIFT_MACID127_96_RELEASE)
-#define BIT_GET_MACID127_96_RELEASE(x) \
- (((x) >> BIT_SHIFT_MACID127_96_RELEASE) & BIT_MASK_MACID127_96_RELEASE)
-
-/* 2 REG_MACID_RELEASE_SETTING (Offset 0x1444) */
-
-#define BIT_MACID_VALUE BIT(7)
-
-#define BIT_SHIFT_MACID_OFFSET 0
-#define BIT_MASK_MACID_OFFSET 0x7f
-#define BIT_MACID_OFFSET(x) \
- (((x) & BIT_MASK_MACID_OFFSET) << BIT_SHIFT_MACID_OFFSET)
-#define BIT_GET_MACID_OFFSET(x) \
- (((x) >> BIT_SHIFT_MACID_OFFSET) & BIT_MASK_MACID_OFFSET)
-
-/* 2 REG_FAST_EDCA_VOVI_SETTING (Offset 0x1448) */
-
-#define BIT_SHIFT_VI_FAST_EDCA_TO 24
-#define BIT_MASK_VI_FAST_EDCA_TO 0xff
-#define BIT_VI_FAST_EDCA_TO(x) \
- (((x) & BIT_MASK_VI_FAST_EDCA_TO) << BIT_SHIFT_VI_FAST_EDCA_TO)
-#define BIT_GET_VI_FAST_EDCA_TO(x) \
- (((x) >> BIT_SHIFT_VI_FAST_EDCA_TO) & BIT_MASK_VI_FAST_EDCA_TO)
-
-#define BIT_VI_THRESHOLD_SEL BIT(23)
-
-#define BIT_SHIFT_VI_FAST_EDCA_PKT_TH 16
-#define BIT_MASK_VI_FAST_EDCA_PKT_TH 0x7f
-#define BIT_VI_FAST_EDCA_PKT_TH(x) \
- (((x) & BIT_MASK_VI_FAST_EDCA_PKT_TH) << BIT_SHIFT_VI_FAST_EDCA_PKT_TH)
-#define BIT_GET_VI_FAST_EDCA_PKT_TH(x) \
- (((x) >> BIT_SHIFT_VI_FAST_EDCA_PKT_TH) & BIT_MASK_VI_FAST_EDCA_PKT_TH)
-
-#define BIT_SHIFT_VO_FAST_EDCA_TO 8
-#define BIT_MASK_VO_FAST_EDCA_TO 0xff
-#define BIT_VO_FAST_EDCA_TO(x) \
- (((x) & BIT_MASK_VO_FAST_EDCA_TO) << BIT_SHIFT_VO_FAST_EDCA_TO)
-#define BIT_GET_VO_FAST_EDCA_TO(x) \
- (((x) >> BIT_SHIFT_VO_FAST_EDCA_TO) & BIT_MASK_VO_FAST_EDCA_TO)
-
-#define BIT_VO_THRESHOLD_SEL BIT(7)
-
-#define BIT_SHIFT_VO_FAST_EDCA_PKT_TH 0
-#define BIT_MASK_VO_FAST_EDCA_PKT_TH 0x7f
-#define BIT_VO_FAST_EDCA_PKT_TH(x) \
- (((x) & BIT_MASK_VO_FAST_EDCA_PKT_TH) << BIT_SHIFT_VO_FAST_EDCA_PKT_TH)
-#define BIT_GET_VO_FAST_EDCA_PKT_TH(x) \
- (((x) >> BIT_SHIFT_VO_FAST_EDCA_PKT_TH) & BIT_MASK_VO_FAST_EDCA_PKT_TH)
-
-/* 2 REG_FAST_EDCA_BEBK_SETTING (Offset 0x144C) */
-
-#define BIT_SHIFT_BK_FAST_EDCA_TO 24
-#define BIT_MASK_BK_FAST_EDCA_TO 0xff
-#define BIT_BK_FAST_EDCA_TO(x) \
- (((x) & BIT_MASK_BK_FAST_EDCA_TO) << BIT_SHIFT_BK_FAST_EDCA_TO)
-#define BIT_GET_BK_FAST_EDCA_TO(x) \
- (((x) >> BIT_SHIFT_BK_FAST_EDCA_TO) & BIT_MASK_BK_FAST_EDCA_TO)
-
-#define BIT_BK_THRESHOLD_SEL BIT(23)
-
-#define BIT_SHIFT_BK_FAST_EDCA_PKT_TH 16
-#define BIT_MASK_BK_FAST_EDCA_PKT_TH 0x7f
-#define BIT_BK_FAST_EDCA_PKT_TH(x) \
- (((x) & BIT_MASK_BK_FAST_EDCA_PKT_TH) << BIT_SHIFT_BK_FAST_EDCA_PKT_TH)
-#define BIT_GET_BK_FAST_EDCA_PKT_TH(x) \
- (((x) >> BIT_SHIFT_BK_FAST_EDCA_PKT_TH) & BIT_MASK_BK_FAST_EDCA_PKT_TH)
-
-#define BIT_SHIFT_BE_FAST_EDCA_TO 8
-#define BIT_MASK_BE_FAST_EDCA_TO 0xff
-#define BIT_BE_FAST_EDCA_TO(x) \
- (((x) & BIT_MASK_BE_FAST_EDCA_TO) << BIT_SHIFT_BE_FAST_EDCA_TO)
-#define BIT_GET_BE_FAST_EDCA_TO(x) \
- (((x) >> BIT_SHIFT_BE_FAST_EDCA_TO) & BIT_MASK_BE_FAST_EDCA_TO)
-
-#define BIT_BE_THRESHOLD_SEL BIT(7)
-
-#define BIT_SHIFT_BE_FAST_EDCA_PKT_TH 0
-#define BIT_MASK_BE_FAST_EDCA_PKT_TH 0x7f
-#define BIT_BE_FAST_EDCA_PKT_TH(x) \
- (((x) & BIT_MASK_BE_FAST_EDCA_PKT_TH) << BIT_SHIFT_BE_FAST_EDCA_PKT_TH)
-#define BIT_GET_BE_FAST_EDCA_PKT_TH(x) \
- (((x) >> BIT_SHIFT_BE_FAST_EDCA_PKT_TH) & BIT_MASK_BE_FAST_EDCA_PKT_TH)
-
-/* 2 REG_MACID_DROP0 (Offset 0x1450) */
-
-#define BIT_SHIFT_MACID31_0_DROP 0
-#define BIT_MASK_MACID31_0_DROP 0xffffffffL
-#define BIT_MACID31_0_DROP(x) \
- (((x) & BIT_MASK_MACID31_0_DROP) << BIT_SHIFT_MACID31_0_DROP)
-#define BIT_GET_MACID31_0_DROP(x) \
- (((x) >> BIT_SHIFT_MACID31_0_DROP) & BIT_MASK_MACID31_0_DROP)
-
-/* 2 REG_MACID_DROP1 (Offset 0x1454) */
-
-#define BIT_SHIFT_MACID63_32_DROP 0
-#define BIT_MASK_MACID63_32_DROP 0xffffffffL
-#define BIT_MACID63_32_DROP(x) \
- (((x) & BIT_MASK_MACID63_32_DROP) << BIT_SHIFT_MACID63_32_DROP)
-#define BIT_GET_MACID63_32_DROP(x) \
- (((x) >> BIT_SHIFT_MACID63_32_DROP) & BIT_MASK_MACID63_32_DROP)
-
-/* 2 REG_MACID_DROP2 (Offset 0x1458) */
-
-#define BIT_SHIFT_MACID95_64_DROP 0
-#define BIT_MASK_MACID95_64_DROP 0xffffffffL
-#define BIT_MACID95_64_DROP(x) \
- (((x) & BIT_MASK_MACID95_64_DROP) << BIT_SHIFT_MACID95_64_DROP)
-#define BIT_GET_MACID95_64_DROP(x) \
- (((x) >> BIT_SHIFT_MACID95_64_DROP) & BIT_MASK_MACID95_64_DROP)
-
-/* 2 REG_MACID_DROP3 (Offset 0x145C) */
-
-#define BIT_SHIFT_MACID127_96_DROP 0
-#define BIT_MASK_MACID127_96_DROP 0xffffffffL
-#define BIT_MACID127_96_DROP(x) \
- (((x) & BIT_MASK_MACID127_96_DROP) << BIT_SHIFT_MACID127_96_DROP)
-#define BIT_GET_MACID127_96_DROP(x) \
- (((x) >> BIT_SHIFT_MACID127_96_DROP) & BIT_MASK_MACID127_96_DROP)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_0 (Offset 0x1460) */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_0 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_0 0xffffffffL
-#define BIT_R_MACID_RELEASE_SUCCESS_0(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_0) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_0)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_0(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_0) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_0)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_1 (Offset 0x1464) */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_1 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_1 0xffffffffL
-#define BIT_R_MACID_RELEASE_SUCCESS_1(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_1) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_1)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_1(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_1) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_1)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_2 (Offset 0x1468) */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_2 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_2 0xffffffffL
-#define BIT_R_MACID_RELEASE_SUCCESS_2(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_2) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_2)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_2(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_2) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_2)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_3 (Offset 0x146C) */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_3 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_3 0xffffffffL
-#define BIT_R_MACID_RELEASE_SUCCESS_3(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_3) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_3)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_3(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_3) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_3)
-
-/* 2 REG_MGG_FIFO_CRTL (Offset 0x1470) */
-
-#define BIT_R_MGG_FIFO_EN BIT(31)
-
-#define BIT_SHIFT_R_MGG_FIFO_PG_SIZE 28
-#define BIT_MASK_R_MGG_FIFO_PG_SIZE 0x7
-#define BIT_R_MGG_FIFO_PG_SIZE(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_PG_SIZE) << BIT_SHIFT_R_MGG_FIFO_PG_SIZE)
-#define BIT_GET_R_MGG_FIFO_PG_SIZE(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_PG_SIZE) & BIT_MASK_R_MGG_FIFO_PG_SIZE)
-
-#define BIT_SHIFT_R_MGG_FIFO_START_PG 16
-#define BIT_MASK_R_MGG_FIFO_START_PG 0xfff
-#define BIT_R_MGG_FIFO_START_PG(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_START_PG) << BIT_SHIFT_R_MGG_FIFO_START_PG)
-#define BIT_GET_R_MGG_FIFO_START_PG(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_START_PG) & BIT_MASK_R_MGG_FIFO_START_PG)
-
-#define BIT_SHIFT_R_MGG_FIFO_SIZE 14
-#define BIT_MASK_R_MGG_FIFO_SIZE 0x3
-#define BIT_R_MGG_FIFO_SIZE(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_SIZE) << BIT_SHIFT_R_MGG_FIFO_SIZE)
-#define BIT_GET_R_MGG_FIFO_SIZE(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_SIZE) & BIT_MASK_R_MGG_FIFO_SIZE)
-
-#define BIT_R_MGG_FIFO_PAUSE BIT(13)
-
-#define BIT_SHIFT_R_MGG_FIFO_RPTR 8
-#define BIT_MASK_R_MGG_FIFO_RPTR 0x1f
-#define BIT_R_MGG_FIFO_RPTR(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_RPTR) << BIT_SHIFT_R_MGG_FIFO_RPTR)
-#define BIT_GET_R_MGG_FIFO_RPTR(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_RPTR) & BIT_MASK_R_MGG_FIFO_RPTR)
-
-#define BIT_R_MGG_FIFO_OV BIT(7)
-#define BIT_R_MGG_FIFO_WPTR_ERROR BIT(6)
-#define BIT_R_EN_CPU_LIFETIME BIT(5)
-
-#define BIT_SHIFT_R_MGG_FIFO_WPTR 0
-#define BIT_MASK_R_MGG_FIFO_WPTR 0x1f
-#define BIT_R_MGG_FIFO_WPTR(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_WPTR) << BIT_SHIFT_R_MGG_FIFO_WPTR)
-#define BIT_GET_R_MGG_FIFO_WPTR(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_WPTR) & BIT_MASK_R_MGG_FIFO_WPTR)
-
-/* 2 REG_MGG_FIFO_INT (Offset 0x1474) */
-
-#define BIT_SHIFT_R_MGG_FIFO_INT_FLAG 16
-#define BIT_MASK_R_MGG_FIFO_INT_FLAG 0xffff
-#define BIT_R_MGG_FIFO_INT_FLAG(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_INT_FLAG) << BIT_SHIFT_R_MGG_FIFO_INT_FLAG)
-#define BIT_GET_R_MGG_FIFO_INT_FLAG(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_INT_FLAG) & BIT_MASK_R_MGG_FIFO_INT_FLAG)
-
-#define BIT_SHIFT_R_MGG_FIFO_INT_MASK 0
-#define BIT_MASK_R_MGG_FIFO_INT_MASK 0xffff
-#define BIT_R_MGG_FIFO_INT_MASK(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_INT_MASK) << BIT_SHIFT_R_MGG_FIFO_INT_MASK)
-#define BIT_GET_R_MGG_FIFO_INT_MASK(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_INT_MASK) & BIT_MASK_R_MGG_FIFO_INT_MASK)
-
-/* 2 REG_MGG_FIFO_LIFETIME (Offset 0x1478) */
-
-#define BIT_SHIFT_R_MGG_FIFO_LIFETIME 16
-#define BIT_MASK_R_MGG_FIFO_LIFETIME 0xffff
-#define BIT_R_MGG_FIFO_LIFETIME(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_LIFETIME) << BIT_SHIFT_R_MGG_FIFO_LIFETIME)
-#define BIT_GET_R_MGG_FIFO_LIFETIME(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_LIFETIME) & BIT_MASK_R_MGG_FIFO_LIFETIME)
-
-#define BIT_SHIFT_R_MGG_FIFO_VALID_MAP 0
-#define BIT_MASK_R_MGG_FIFO_VALID_MAP 0xffff
-#define BIT_R_MGG_FIFO_VALID_MAP(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_VALID_MAP) \
- << BIT_SHIFT_R_MGG_FIFO_VALID_MAP)
-#define BIT_GET_R_MGG_FIFO_VALID_MAP(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_VALID_MAP) & \
- BIT_MASK_R_MGG_FIFO_VALID_MAP)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET (Offset 0x147C) */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET 0x7f
-#define BIT_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET)
-
-#define BIT_SHIFT_P2PON_DIS_TXTIME 0
-#define BIT_MASK_P2PON_DIS_TXTIME 0xff
-#define BIT_P2PON_DIS_TXTIME(x) \
- (((x) & BIT_MASK_P2PON_DIS_TXTIME) << BIT_SHIFT_P2PON_DIS_TXTIME)
-#define BIT_GET_P2PON_DIS_TXTIME(x) \
- (((x) >> BIT_SHIFT_P2PON_DIS_TXTIME) & BIT_MASK_P2PON_DIS_TXTIME)
-
-/* 2 REG_MACID_SHCUT_OFFSET (Offset 0x1480) */
-
-#define BIT_SHIFT_MACID_SHCUT_OFFSET_V1 0
-#define BIT_MASK_MACID_SHCUT_OFFSET_V1 0xff
-#define BIT_MACID_SHCUT_OFFSET_V1(x) \
- (((x) & BIT_MASK_MACID_SHCUT_OFFSET_V1) \
- << BIT_SHIFT_MACID_SHCUT_OFFSET_V1)
-#define BIT_GET_MACID_SHCUT_OFFSET_V1(x) \
- (((x) >> BIT_SHIFT_MACID_SHCUT_OFFSET_V1) & \
- BIT_MASK_MACID_SHCUT_OFFSET_V1)
-
-/* 2 REG_MU_TX_CTL (Offset 0x14C0) */
-
-#define BIT_R_EN_REVERS_GTAB BIT(6)
-
-#define BIT_SHIFT_R_MU_TABLE_VALID 0
-#define BIT_MASK_R_MU_TABLE_VALID 0x3f
-#define BIT_R_MU_TABLE_VALID(x) \
- (((x) & BIT_MASK_R_MU_TABLE_VALID) << BIT_SHIFT_R_MU_TABLE_VALID)
-#define BIT_GET_R_MU_TABLE_VALID(x) \
- (((x) >> BIT_SHIFT_R_MU_TABLE_VALID) & BIT_MASK_R_MU_TABLE_VALID)
-
-#define BIT_SHIFT_R_MU_STA_GTAB_VALID 0
-#define BIT_MASK_R_MU_STA_GTAB_VALID 0xffffffffL
-#define BIT_R_MU_STA_GTAB_VALID(x) \
- (((x) & BIT_MASK_R_MU_STA_GTAB_VALID) << BIT_SHIFT_R_MU_STA_GTAB_VALID)
-#define BIT_GET_R_MU_STA_GTAB_VALID(x) \
- (((x) >> BIT_SHIFT_R_MU_STA_GTAB_VALID) & BIT_MASK_R_MU_STA_GTAB_VALID)
-
-#define BIT_SHIFT_R_MU_STA_GTAB_POSITION 0
-#define BIT_MASK_R_MU_STA_GTAB_POSITION 0xffffffffffffffffL
-#define BIT_R_MU_STA_GTAB_POSITION(x) \
- (((x) & BIT_MASK_R_MU_STA_GTAB_POSITION) \
- << BIT_SHIFT_R_MU_STA_GTAB_POSITION)
-#define BIT_GET_R_MU_STA_GTAB_POSITION(x) \
- (((x) >> BIT_SHIFT_R_MU_STA_GTAB_POSITION) & \
- BIT_MASK_R_MU_STA_GTAB_POSITION)
-
-/* 2 REG_MU_TRX_DBG_CNT (Offset 0x14D0) */
-
-#define BIT_MU_DNGCNT_RST BIT(20)
-
-#define BIT_SHIFT_MU_DBGCNT_SEL 16
-#define BIT_MASK_MU_DBGCNT_SEL 0xf
-#define BIT_MU_DBGCNT_SEL(x) \
- (((x) & BIT_MASK_MU_DBGCNT_SEL) << BIT_SHIFT_MU_DBGCNT_SEL)
-#define BIT_GET_MU_DBGCNT_SEL(x) \
- (((x) >> BIT_SHIFT_MU_DBGCNT_SEL) & BIT_MASK_MU_DBGCNT_SEL)
-
-#define BIT_SHIFT_MU_DNGCNT 0
-#define BIT_MASK_MU_DNGCNT 0xffff
-#define BIT_MU_DNGCNT(x) (((x) & BIT_MASK_MU_DNGCNT) << BIT_SHIFT_MU_DNGCNT)
-#define BIT_GET_MU_DNGCNT(x) (((x) >> BIT_SHIFT_MU_DNGCNT) & BIT_MASK_MU_DNGCNT)
-
-/* 2 REG_CPUMGQ_TX_TIMER (Offset 0x1500) */
-
-#define BIT_SHIFT_CPUMGQ_TX_TIMER_V1 0
-#define BIT_MASK_CPUMGQ_TX_TIMER_V1 0xffffffffL
-#define BIT_CPUMGQ_TX_TIMER_V1(x) \
- (((x) & BIT_MASK_CPUMGQ_TX_TIMER_V1) << BIT_SHIFT_CPUMGQ_TX_TIMER_V1)
-#define BIT_GET_CPUMGQ_TX_TIMER_V1(x) \
- (((x) >> BIT_SHIFT_CPUMGQ_TX_TIMER_V1) & BIT_MASK_CPUMGQ_TX_TIMER_V1)
-
-/* 2 REG_PS_TIMER_A (Offset 0x1504) */
-
-#define BIT_SHIFT_PS_TIMER_A_V1 0
-#define BIT_MASK_PS_TIMER_A_V1 0xffffffffL
-#define BIT_PS_TIMER_A_V1(x) \
- (((x) & BIT_MASK_PS_TIMER_A_V1) << BIT_SHIFT_PS_TIMER_A_V1)
-#define BIT_GET_PS_TIMER_A_V1(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_A_V1) & BIT_MASK_PS_TIMER_A_V1)
-
-/* 2 REG_PS_TIMER_B (Offset 0x1508) */
-
-#define BIT_SHIFT_PS_TIMER_B_V1 0
-#define BIT_MASK_PS_TIMER_B_V1 0xffffffffL
-#define BIT_PS_TIMER_B_V1(x) \
- (((x) & BIT_MASK_PS_TIMER_B_V1) << BIT_SHIFT_PS_TIMER_B_V1)
-#define BIT_GET_PS_TIMER_B_V1(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_B_V1) & BIT_MASK_PS_TIMER_B_V1)
-
-/* 2 REG_PS_TIMER_C (Offset 0x150C) */
-
-#define BIT_SHIFT_PS_TIMER_C_V1 0
-#define BIT_MASK_PS_TIMER_C_V1 0xffffffffL
-#define BIT_PS_TIMER_C_V1(x) \
- (((x) & BIT_MASK_PS_TIMER_C_V1) << BIT_SHIFT_PS_TIMER_C_V1)
-#define BIT_GET_PS_TIMER_C_V1(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_C_V1) & BIT_MASK_PS_TIMER_C_V1)
-
-/* 2 REG_PS_TIMER_ABC_CPUMGQ_TIMER_CRTL (Offset 0x1510) */
-
-#define BIT_CPUMGQ_TIMER_EN BIT(31)
-#define BIT_CPUMGQ_TX_EN BIT(28)
-
-#define BIT_SHIFT_CPUMGQ_TIMER_TSF_SEL 24
-#define BIT_MASK_CPUMGQ_TIMER_TSF_SEL 0x7
-#define BIT_CPUMGQ_TIMER_TSF_SEL(x) \
- (((x) & BIT_MASK_CPUMGQ_TIMER_TSF_SEL) \
- << BIT_SHIFT_CPUMGQ_TIMER_TSF_SEL)
-#define BIT_GET_CPUMGQ_TIMER_TSF_SEL(x) \
- (((x) >> BIT_SHIFT_CPUMGQ_TIMER_TSF_SEL) & \
- BIT_MASK_CPUMGQ_TIMER_TSF_SEL)
-
-#define BIT_PS_TIMER_C_EN BIT(23)
-
-#define BIT_SHIFT_PS_TIMER_C_TSF_SEL 16
-#define BIT_MASK_PS_TIMER_C_TSF_SEL 0x7
-#define BIT_PS_TIMER_C_TSF_SEL(x) \
- (((x) & BIT_MASK_PS_TIMER_C_TSF_SEL) << BIT_SHIFT_PS_TIMER_C_TSF_SEL)
-#define BIT_GET_PS_TIMER_C_TSF_SEL(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_C_TSF_SEL) & BIT_MASK_PS_TIMER_C_TSF_SEL)
-
-#define BIT_PS_TIMER_B_EN BIT(15)
-
-#define BIT_SHIFT_PS_TIMER_B_TSF_SEL 8
-#define BIT_MASK_PS_TIMER_B_TSF_SEL 0x7
-#define BIT_PS_TIMER_B_TSF_SEL(x) \
- (((x) & BIT_MASK_PS_TIMER_B_TSF_SEL) << BIT_SHIFT_PS_TIMER_B_TSF_SEL)
-#define BIT_GET_PS_TIMER_B_TSF_SEL(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_B_TSF_SEL) & BIT_MASK_PS_TIMER_B_TSF_SEL)
-
-#define BIT_PS_TIMER_A_EN BIT(7)
-
-#define BIT_SHIFT_PS_TIMER_A_TSF_SEL 0
-#define BIT_MASK_PS_TIMER_A_TSF_SEL 0x7
-#define BIT_PS_TIMER_A_TSF_SEL(x) \
- (((x) & BIT_MASK_PS_TIMER_A_TSF_SEL) << BIT_SHIFT_PS_TIMER_A_TSF_SEL)
-#define BIT_GET_PS_TIMER_A_TSF_SEL(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_A_TSF_SEL) & BIT_MASK_PS_TIMER_A_TSF_SEL)
-
-/* 2 REG_CPUMGQ_TX_TIMER_EARLY (Offset 0x1514) */
-
-#define BIT_SHIFT_CPUMGQ_TX_TIMER_EARLY 0
-#define BIT_MASK_CPUMGQ_TX_TIMER_EARLY 0xff
-#define BIT_CPUMGQ_TX_TIMER_EARLY(x) \
- (((x) & BIT_MASK_CPUMGQ_TX_TIMER_EARLY) \
- << BIT_SHIFT_CPUMGQ_TX_TIMER_EARLY)
-#define BIT_GET_CPUMGQ_TX_TIMER_EARLY(x) \
- (((x) >> BIT_SHIFT_CPUMGQ_TX_TIMER_EARLY) & \
- BIT_MASK_CPUMGQ_TX_TIMER_EARLY)
-
-/* 2 REG_PS_TIMER_A_EARLY (Offset 0x1515) */
-
-#define BIT_SHIFT_PS_TIMER_A_EARLY 0
-#define BIT_MASK_PS_TIMER_A_EARLY 0xff
-#define BIT_PS_TIMER_A_EARLY(x) \
- (((x) & BIT_MASK_PS_TIMER_A_EARLY) << BIT_SHIFT_PS_TIMER_A_EARLY)
-#define BIT_GET_PS_TIMER_A_EARLY(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_A_EARLY) & BIT_MASK_PS_TIMER_A_EARLY)
-
-/* 2 REG_PS_TIMER_B_EARLY (Offset 0x1516) */
-
-#define BIT_SHIFT_PS_TIMER_B_EARLY 0
-#define BIT_MASK_PS_TIMER_B_EARLY 0xff
-#define BIT_PS_TIMER_B_EARLY(x) \
- (((x) & BIT_MASK_PS_TIMER_B_EARLY) << BIT_SHIFT_PS_TIMER_B_EARLY)
-#define BIT_GET_PS_TIMER_B_EARLY(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_B_EARLY) & BIT_MASK_PS_TIMER_B_EARLY)
-
-/* 2 REG_PS_TIMER_C_EARLY (Offset 0x1517) */
-
-#define BIT_SHIFT_PS_TIMER_C_EARLY 0
-#define BIT_MASK_PS_TIMER_C_EARLY 0xff
-#define BIT_PS_TIMER_C_EARLY(x) \
- (((x) & BIT_MASK_PS_TIMER_C_EARLY) << BIT_SHIFT_PS_TIMER_C_EARLY)
-#define BIT_GET_PS_TIMER_C_EARLY(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_C_EARLY) & BIT_MASK_PS_TIMER_C_EARLY)
-
-/* 2 REG_BCN_PSR_RPT2 (Offset 0x1600) */
-
-#define BIT_SHIFT_DTIM_CNT2 24
-#define BIT_MASK_DTIM_CNT2 0xff
-#define BIT_DTIM_CNT2(x) (((x) & BIT_MASK_DTIM_CNT2) << BIT_SHIFT_DTIM_CNT2)
-#define BIT_GET_DTIM_CNT2(x) (((x) >> BIT_SHIFT_DTIM_CNT2) & BIT_MASK_DTIM_CNT2)
-
-#define BIT_SHIFT_DTIM_PERIOD2 16
-#define BIT_MASK_DTIM_PERIOD2 0xff
-#define BIT_DTIM_PERIOD2(x) \
- (((x) & BIT_MASK_DTIM_PERIOD2) << BIT_SHIFT_DTIM_PERIOD2)
-#define BIT_GET_DTIM_PERIOD2(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD2) & BIT_MASK_DTIM_PERIOD2)
-
-#define BIT_DTIM2 BIT(15)
-#define BIT_TIM2 BIT(14)
-
-#define BIT_SHIFT_PS_AID_2 0
-#define BIT_MASK_PS_AID_2 0x7ff
-#define BIT_PS_AID_2(x) (((x) & BIT_MASK_PS_AID_2) << BIT_SHIFT_PS_AID_2)
-#define BIT_GET_PS_AID_2(x) (((x) >> BIT_SHIFT_PS_AID_2) & BIT_MASK_PS_AID_2)
-
-/* 2 REG_BCN_PSR_RPT3 (Offset 0x1604) */
-
-#define BIT_SHIFT_DTIM_CNT3 24
-#define BIT_MASK_DTIM_CNT3 0xff
-#define BIT_DTIM_CNT3(x) (((x) & BIT_MASK_DTIM_CNT3) << BIT_SHIFT_DTIM_CNT3)
-#define BIT_GET_DTIM_CNT3(x) (((x) >> BIT_SHIFT_DTIM_CNT3) & BIT_MASK_DTIM_CNT3)
-
-#define BIT_SHIFT_DTIM_PERIOD3 16
-#define BIT_MASK_DTIM_PERIOD3 0xff
-#define BIT_DTIM_PERIOD3(x) \
- (((x) & BIT_MASK_DTIM_PERIOD3) << BIT_SHIFT_DTIM_PERIOD3)
-#define BIT_GET_DTIM_PERIOD3(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD3) & BIT_MASK_DTIM_PERIOD3)
-
-#define BIT_DTIM3 BIT(15)
-#define BIT_TIM3 BIT(14)
-
-#define BIT_SHIFT_PS_AID_3 0
-#define BIT_MASK_PS_AID_3 0x7ff
-#define BIT_PS_AID_3(x) (((x) & BIT_MASK_PS_AID_3) << BIT_SHIFT_PS_AID_3)
-#define BIT_GET_PS_AID_3(x) (((x) >> BIT_SHIFT_PS_AID_3) & BIT_MASK_PS_AID_3)
-
-/* 2 REG_BCN_PSR_RPT4 (Offset 0x1608) */
-
-#define BIT_SHIFT_DTIM_CNT4 24
-#define BIT_MASK_DTIM_CNT4 0xff
-#define BIT_DTIM_CNT4(x) (((x) & BIT_MASK_DTIM_CNT4) << BIT_SHIFT_DTIM_CNT4)
-#define BIT_GET_DTIM_CNT4(x) (((x) >> BIT_SHIFT_DTIM_CNT4) & BIT_MASK_DTIM_CNT4)
-
-#define BIT_SHIFT_DTIM_PERIOD4 16
-#define BIT_MASK_DTIM_PERIOD4 0xff
-#define BIT_DTIM_PERIOD4(x) \
- (((x) & BIT_MASK_DTIM_PERIOD4) << BIT_SHIFT_DTIM_PERIOD4)
-#define BIT_GET_DTIM_PERIOD4(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD4) & BIT_MASK_DTIM_PERIOD4)
-
-#define BIT_DTIM4 BIT(15)
-#define BIT_TIM4 BIT(14)
-
-#define BIT_SHIFT_PS_AID_4 0
-#define BIT_MASK_PS_AID_4 0x7ff
-#define BIT_PS_AID_4(x) (((x) & BIT_MASK_PS_AID_4) << BIT_SHIFT_PS_AID_4)
-#define BIT_GET_PS_AID_4(x) (((x) >> BIT_SHIFT_PS_AID_4) & BIT_MASK_PS_AID_4)
-
-/* 2 REG_A1_ADDR_MASK (Offset 0x160C) */
-
-#define BIT_SHIFT_A1_ADDR_MASK 0
-#define BIT_MASK_A1_ADDR_MASK 0xffffffffL
-#define BIT_A1_ADDR_MASK(x) \
- (((x) & BIT_MASK_A1_ADDR_MASK) << BIT_SHIFT_A1_ADDR_MASK)
-#define BIT_GET_A1_ADDR_MASK(x) \
- (((x) >> BIT_SHIFT_A1_ADDR_MASK) & BIT_MASK_A1_ADDR_MASK)
-
-/* 2 REG_MACID2 (Offset 0x1620) */
-
-#define BIT_SHIFT_MACID2 0
-#define BIT_MASK_MACID2 0xffffffffffffL
-#define BIT_MACID2(x) (((x) & BIT_MASK_MACID2) << BIT_SHIFT_MACID2)
-#define BIT_GET_MACID2(x) (((x) >> BIT_SHIFT_MACID2) & BIT_MASK_MACID2)
-
-/* 2 REG_BSSID2 (Offset 0x1628) */
-
-#define BIT_SHIFT_BSSID2 0
-#define BIT_MASK_BSSID2 0xffffffffffffL
-#define BIT_BSSID2(x) (((x) & BIT_MASK_BSSID2) << BIT_SHIFT_BSSID2)
-#define BIT_GET_BSSID2(x) (((x) >> BIT_SHIFT_BSSID2) & BIT_MASK_BSSID2)
-
-/* 2 REG_MACID3 (Offset 0x1630) */
-
-#define BIT_SHIFT_MACID3 0
-#define BIT_MASK_MACID3 0xffffffffffffL
-#define BIT_MACID3(x) (((x) & BIT_MASK_MACID3) << BIT_SHIFT_MACID3)
-#define BIT_GET_MACID3(x) (((x) >> BIT_SHIFT_MACID3) & BIT_MASK_MACID3)
-
-/* 2 REG_BSSID3 (Offset 0x1638) */
-
-#define BIT_SHIFT_BSSID3 0
-#define BIT_MASK_BSSID3 0xffffffffffffL
-#define BIT_BSSID3(x) (((x) & BIT_MASK_BSSID3) << BIT_SHIFT_BSSID3)
-#define BIT_GET_BSSID3(x) (((x) >> BIT_SHIFT_BSSID3) & BIT_MASK_BSSID3)
-
-/* 2 REG_MACID4 (Offset 0x1640) */
-
-#define BIT_SHIFT_MACID4 0
-#define BIT_MASK_MACID4 0xffffffffffffL
-#define BIT_MACID4(x) (((x) & BIT_MASK_MACID4) << BIT_SHIFT_MACID4)
-#define BIT_GET_MACID4(x) (((x) >> BIT_SHIFT_MACID4) & BIT_MASK_MACID4)
-
-/* 2 REG_BSSID4 (Offset 0x1648) */
-
-#define BIT_SHIFT_BSSID4 0
-#define BIT_MASK_BSSID4 0xffffffffffffL
-#define BIT_BSSID4(x) (((x) & BIT_MASK_BSSID4) << BIT_SHIFT_BSSID4)
-#define BIT_GET_BSSID4(x) (((x) >> BIT_SHIFT_BSSID4) & BIT_MASK_BSSID4)
-
-/* 2 REG_PWRBIT_SETTING (Offset 0x1660) */
-
-#define BIT_CLI3_PWRBIT_OW_EN BIT(7)
-#define BIT_CLI3_PWR_ST BIT(6)
-#define BIT_CLI2_PWRBIT_OW_EN BIT(5)
-#define BIT_CLI2_PWR_ST BIT(4)
-#define BIT_CLI1_PWRBIT_OW_EN BIT(3)
-#define BIT_CLI1_PWR_ST BIT(2)
-#define BIT_CLI0_PWRBIT_OW_EN BIT(1)
-#define BIT_CLI0_PWR_ST BIT(0)
-
-/* 2 REG_WMAC_MU_BF_OPTION (Offset 0x167C) */
-
-#define BIT_WMAC_RESP_NONSTA1_DIS BIT(7)
-
-/* 2 REG_WMAC_MU_BF_OPTION (Offset 0x167C) */
-
-#define BIT_BIT_WMAC_TXMU_ACKPOLICY_EN BIT(6)
-
-/* 2 REG_WMAC_MU_BF_OPTION (Offset 0x167C) */
-
-#define BIT_SHIFT_WMAC_TXMU_ACKPOLICY 4
-#define BIT_MASK_WMAC_TXMU_ACKPOLICY 0x3
-#define BIT_WMAC_TXMU_ACKPOLICY(x) \
- (((x) & BIT_MASK_WMAC_TXMU_ACKPOLICY) << BIT_SHIFT_WMAC_TXMU_ACKPOLICY)
-#define BIT_GET_WMAC_TXMU_ACKPOLICY(x) \
- (((x) >> BIT_SHIFT_WMAC_TXMU_ACKPOLICY) & BIT_MASK_WMAC_TXMU_ACKPOLICY)
-
-#define BIT_SHIFT_WMAC_MU_BFEE_PORT_SEL 1
-#define BIT_MASK_WMAC_MU_BFEE_PORT_SEL 0x7
-#define BIT_WMAC_MU_BFEE_PORT_SEL(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE_PORT_SEL) \
- << BIT_SHIFT_WMAC_MU_BFEE_PORT_SEL)
-#define BIT_GET_WMAC_MU_BFEE_PORT_SEL(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE_PORT_SEL) & \
- BIT_MASK_WMAC_MU_BFEE_PORT_SEL)
-
-#define BIT_WMAC_MU_BFEE_DIS BIT(0)
-
-/* 2 REG_WMAC_PAUSE_BB_CLR_TH (Offset 0x167D) */
-
-#define BIT_SHIFT_WMAC_PAUSE_BB_CLR_TH 0
-#define BIT_MASK_WMAC_PAUSE_BB_CLR_TH 0xff
-#define BIT_WMAC_PAUSE_BB_CLR_TH(x) \
- (((x) & BIT_MASK_WMAC_PAUSE_BB_CLR_TH) \
- << BIT_SHIFT_WMAC_PAUSE_BB_CLR_TH)
-#define BIT_GET_WMAC_PAUSE_BB_CLR_TH(x) \
- (((x) >> BIT_SHIFT_WMAC_PAUSE_BB_CLR_TH) & \
- BIT_MASK_WMAC_PAUSE_BB_CLR_TH)
-
-/* 2 REG_WMAC_MU_ARB (Offset 0x167E) */
-
-#define BIT_WMAC_ARB_HW_ADAPT_EN BIT(7)
-#define BIT_WMAC_ARB_SW_EN BIT(6)
-
-#define BIT_SHIFT_WMAC_ARB_SW_STATE 0
-#define BIT_MASK_WMAC_ARB_SW_STATE 0x3f
-#define BIT_WMAC_ARB_SW_STATE(x) \
- (((x) & BIT_MASK_WMAC_ARB_SW_STATE) << BIT_SHIFT_WMAC_ARB_SW_STATE)
-#define BIT_GET_WMAC_ARB_SW_STATE(x) \
- (((x) >> BIT_SHIFT_WMAC_ARB_SW_STATE) & BIT_MASK_WMAC_ARB_SW_STATE)
-
-/* 2 REG_WMAC_MU_OPTION (Offset 0x167F) */
-
-#define BIT_SHIFT_WMAC_MU_DBGSEL 5
-#define BIT_MASK_WMAC_MU_DBGSEL 0x3
-#define BIT_WMAC_MU_DBGSEL(x) \
- (((x) & BIT_MASK_WMAC_MU_DBGSEL) << BIT_SHIFT_WMAC_MU_DBGSEL)
-#define BIT_GET_WMAC_MU_DBGSEL(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_DBGSEL) & BIT_MASK_WMAC_MU_DBGSEL)
-
-#define BIT_SHIFT_WMAC_MU_CPRD_TIMEOUT 0
-#define BIT_MASK_WMAC_MU_CPRD_TIMEOUT 0x1f
-#define BIT_WMAC_MU_CPRD_TIMEOUT(x) \
- (((x) & BIT_MASK_WMAC_MU_CPRD_TIMEOUT) \
- << BIT_SHIFT_WMAC_MU_CPRD_TIMEOUT)
-#define BIT_GET_WMAC_MU_CPRD_TIMEOUT(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_CPRD_TIMEOUT) & \
- BIT_MASK_WMAC_MU_CPRD_TIMEOUT)
-
-/* 2 REG_WMAC_MU_BF_CTL (Offset 0x1680) */
-
-#define BIT_WMAC_INVLD_BFPRT_CHK BIT(15)
-#define BIT_WMAC_RETXBFRPTSEQ_UPD BIT(14)
-
-#define BIT_SHIFT_WMAC_MU_BFRPTSEG_SEL 12
-#define BIT_MASK_WMAC_MU_BFRPTSEG_SEL 0x3
-#define BIT_WMAC_MU_BFRPTSEG_SEL(x) \
- (((x) & BIT_MASK_WMAC_MU_BFRPTSEG_SEL) \
- << BIT_SHIFT_WMAC_MU_BFRPTSEG_SEL)
-#define BIT_GET_WMAC_MU_BFRPTSEG_SEL(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFRPTSEG_SEL) & \
- BIT_MASK_WMAC_MU_BFRPTSEG_SEL)
-
-#define BIT_SHIFT_WMAC_MU_BF_MYAID 0
-#define BIT_MASK_WMAC_MU_BF_MYAID 0xfff
-#define BIT_WMAC_MU_BF_MYAID(x) \
- (((x) & BIT_MASK_WMAC_MU_BF_MYAID) << BIT_SHIFT_WMAC_MU_BF_MYAID)
-#define BIT_GET_WMAC_MU_BF_MYAID(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BF_MYAID) & BIT_MASK_WMAC_MU_BF_MYAID)
-
-#define BIT_SHIFT_BFRPT_PARA 0
-#define BIT_MASK_BFRPT_PARA 0xfff
-#define BIT_BFRPT_PARA(x) (((x) & BIT_MASK_BFRPT_PARA) << BIT_SHIFT_BFRPT_PARA)
-#define BIT_GET_BFRPT_PARA(x) \
- (((x) >> BIT_SHIFT_BFRPT_PARA) & BIT_MASK_BFRPT_PARA)
-
-/* 2 REG_WMAC_MU_BFRPT_PARA (Offset 0x1682) */
-
-#define BIT_SHIFT_BIT_BFRPT_PARA_USERID_SEL 12
-#define BIT_MASK_BIT_BFRPT_PARA_USERID_SEL 0x7
-#define BIT_BIT_BFRPT_PARA_USERID_SEL(x) \
- (((x) & BIT_MASK_BIT_BFRPT_PARA_USERID_SEL) \
- << BIT_SHIFT_BIT_BFRPT_PARA_USERID_SEL)
-#define BIT_GET_BIT_BFRPT_PARA_USERID_SEL(x) \
- (((x) >> BIT_SHIFT_BIT_BFRPT_PARA_USERID_SEL) & \
- BIT_MASK_BIT_BFRPT_PARA_USERID_SEL)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE2 (Offset 0x1684) */
-
-#define BIT_STATUS_BFEE2 BIT(10)
-#define BIT_WMAC_MU_BFEE2_EN BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE2_AID 0
-#define BIT_MASK_WMAC_MU_BFEE2_AID 0x1ff
-#define BIT_WMAC_MU_BFEE2_AID(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE2_AID) << BIT_SHIFT_WMAC_MU_BFEE2_AID)
-#define BIT_GET_WMAC_MU_BFEE2_AID(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE2_AID) & BIT_MASK_WMAC_MU_BFEE2_AID)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE3 (Offset 0x1686) */
-
-#define BIT_STATUS_BFEE3 BIT(10)
-#define BIT_WMAC_MU_BFEE3_EN BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE3_AID 0
-#define BIT_MASK_WMAC_MU_BFEE3_AID 0x1ff
-#define BIT_WMAC_MU_BFEE3_AID(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE3_AID) << BIT_SHIFT_WMAC_MU_BFEE3_AID)
-#define BIT_GET_WMAC_MU_BFEE3_AID(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE3_AID) & BIT_MASK_WMAC_MU_BFEE3_AID)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE4 (Offset 0x1688) */
-
-#define BIT_STATUS_BFEE4 BIT(10)
-#define BIT_WMAC_MU_BFEE4_EN BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE4_AID 0
-#define BIT_MASK_WMAC_MU_BFEE4_AID 0x1ff
-#define BIT_WMAC_MU_BFEE4_AID(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE4_AID) << BIT_SHIFT_WMAC_MU_BFEE4_AID)
-#define BIT_GET_WMAC_MU_BFEE4_AID(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE4_AID) & BIT_MASK_WMAC_MU_BFEE4_AID)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE5 (Offset 0x168A) */
-
-#define BIT_R_WMAC_RX_SYNCFIFO_SYNC BIT(55)
-#define BIT_R_WMAC_RXRST_DLY BIT(54)
-#define BIT_R_WMAC_SRCH_TXRPT_REF_DROP BIT(53)
-#define BIT_R_WMAC_SRCH_TXRPT_UA1 BIT(52)
-#define BIT_STATUS_BFEE5 BIT(10)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE5 (Offset 0x168A) */
-
-#define BIT_WMAC_MU_BFEE5_EN BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE5_AID 0
-#define BIT_MASK_WMAC_MU_BFEE5_AID 0x1ff
-#define BIT_WMAC_MU_BFEE5_AID(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE5_AID) << BIT_SHIFT_WMAC_MU_BFEE5_AID)
-#define BIT_GET_WMAC_MU_BFEE5_AID(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE5_AID) & BIT_MASK_WMAC_MU_BFEE5_AID)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE6 (Offset 0x168C) */
-
-#define BIT_STATUS_BFEE6 BIT(10)
-#define BIT_WMAC_MU_BFEE6_EN BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE6_AID 0
-#define BIT_MASK_WMAC_MU_BFEE6_AID 0x1ff
-#define BIT_WMAC_MU_BFEE6_AID(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE6_AID) << BIT_SHIFT_WMAC_MU_BFEE6_AID)
-#define BIT_GET_WMAC_MU_BFEE6_AID(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE6_AID) & BIT_MASK_WMAC_MU_BFEE6_AID)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE7 (Offset 0x168E) */
-
-#define BIT_BIT_STATUS_BFEE4 BIT(10)
-#define BIT_WMAC_MU_BFEE7_EN BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE7_AID 0
-#define BIT_MASK_WMAC_MU_BFEE7_AID 0x1ff
-#define BIT_WMAC_MU_BFEE7_AID(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE7_AID) << BIT_SHIFT_WMAC_MU_BFEE7_AID)
-#define BIT_GET_WMAC_MU_BFEE7_AID(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE7_AID) & BIT_MASK_WMAC_MU_BFEE7_AID)
-
-/* 2 REG_WMAC_BB_STOP_RX_COUNTER (Offset 0x1690) */
-
-#define BIT_RST_ALL_COUNTER BIT(31)
-
-#define BIT_SHIFT_ABORT_RX_VBON_COUNTER 16
-#define BIT_MASK_ABORT_RX_VBON_COUNTER 0xff
-#define BIT_ABORT_RX_VBON_COUNTER(x) \
- (((x) & BIT_MASK_ABORT_RX_VBON_COUNTER) \
- << BIT_SHIFT_ABORT_RX_VBON_COUNTER)
-#define BIT_GET_ABORT_RX_VBON_COUNTER(x) \
- (((x) >> BIT_SHIFT_ABORT_RX_VBON_COUNTER) & \
- BIT_MASK_ABORT_RX_VBON_COUNTER)
-
-#define BIT_SHIFT_ABORT_RX_RDRDY_COUNTER 8
-#define BIT_MASK_ABORT_RX_RDRDY_COUNTER 0xff
-#define BIT_ABORT_RX_RDRDY_COUNTER(x) \
- (((x) & BIT_MASK_ABORT_RX_RDRDY_COUNTER) \
- << BIT_SHIFT_ABORT_RX_RDRDY_COUNTER)
-#define BIT_GET_ABORT_RX_RDRDY_COUNTER(x) \
- (((x) >> BIT_SHIFT_ABORT_RX_RDRDY_COUNTER) & \
- BIT_MASK_ABORT_RX_RDRDY_COUNTER)
-
-#define BIT_SHIFT_VBON_EARLY_FALLING_COUNTER 0
-#define BIT_MASK_VBON_EARLY_FALLING_COUNTER 0xff
-#define BIT_VBON_EARLY_FALLING_COUNTER(x) \
- (((x) & BIT_MASK_VBON_EARLY_FALLING_COUNTER) \
- << BIT_SHIFT_VBON_EARLY_FALLING_COUNTER)
-#define BIT_GET_VBON_EARLY_FALLING_COUNTER(x) \
- (((x) >> BIT_SHIFT_VBON_EARLY_FALLING_COUNTER) & \
- BIT_MASK_VBON_EARLY_FALLING_COUNTER)
-
-/* 2 REG_WMAC_PLCP_MONITOR (Offset 0x1694) */
-
-#define BIT_WMAC_PLCP_TRX_SEL BIT(31)
-
-#define BIT_SHIFT_WMAC_PLCP_RDSIG_SEL 28
-#define BIT_MASK_WMAC_PLCP_RDSIG_SEL 0x7
-#define BIT_WMAC_PLCP_RDSIG_SEL(x) \
- (((x) & BIT_MASK_WMAC_PLCP_RDSIG_SEL) << BIT_SHIFT_WMAC_PLCP_RDSIG_SEL)
-#define BIT_GET_WMAC_PLCP_RDSIG_SEL(x) \
- (((x) >> BIT_SHIFT_WMAC_PLCP_RDSIG_SEL) & BIT_MASK_WMAC_PLCP_RDSIG_SEL)
-
-#define BIT_SHIFT_WMAC_RATE_IDX 24
-#define BIT_MASK_WMAC_RATE_IDX 0xf
-#define BIT_WMAC_RATE_IDX(x) \
- (((x) & BIT_MASK_WMAC_RATE_IDX) << BIT_SHIFT_WMAC_RATE_IDX)
-#define BIT_GET_WMAC_RATE_IDX(x) \
- (((x) >> BIT_SHIFT_WMAC_RATE_IDX) & BIT_MASK_WMAC_RATE_IDX)
-
-#define BIT_SHIFT_WMAC_PLCP_RDSIG 0
-#define BIT_MASK_WMAC_PLCP_RDSIG 0xffffff
-#define BIT_WMAC_PLCP_RDSIG(x) \
- (((x) & BIT_MASK_WMAC_PLCP_RDSIG) << BIT_SHIFT_WMAC_PLCP_RDSIG)
-#define BIT_GET_WMAC_PLCP_RDSIG(x) \
- (((x) >> BIT_SHIFT_WMAC_PLCP_RDSIG) & BIT_MASK_WMAC_PLCP_RDSIG)
-
-/* 2 REG_WMAC_PLCP_MONITOR_MUTX (Offset 0x1698) */
-
-#define BIT_WMAC_MUTX_IDX BIT(24)
-
-/* 2 REG_TRANSMIT_ADDRSS_0 (Offset 0x16A0) */
-
-#define BIT_SHIFT_TA0 0
-#define BIT_MASK_TA0 0xffffffffffffL
-#define BIT_TA0(x) (((x) & BIT_MASK_TA0) << BIT_SHIFT_TA0)
-#define BIT_GET_TA0(x) (((x) >> BIT_SHIFT_TA0) & BIT_MASK_TA0)
-
-/* 2 REG_TRANSMIT_ADDRSS_1 (Offset 0x16A8) */
-
-#define BIT_SHIFT_TA1 0
-#define BIT_MASK_TA1 0xffffffffffffL
-#define BIT_TA1(x) (((x) & BIT_MASK_TA1) << BIT_SHIFT_TA1)
-#define BIT_GET_TA1(x) (((x) >> BIT_SHIFT_TA1) & BIT_MASK_TA1)
-
-/* 2 REG_TRANSMIT_ADDRSS_2 (Offset 0x16B0) */
-
-#define BIT_SHIFT_TA2 0
-#define BIT_MASK_TA2 0xffffffffffffL
-#define BIT_TA2(x) (((x) & BIT_MASK_TA2) << BIT_SHIFT_TA2)
-#define BIT_GET_TA2(x) (((x) >> BIT_SHIFT_TA2) & BIT_MASK_TA2)
-
-/* 2 REG_TRANSMIT_ADDRSS_3 (Offset 0x16B8) */
-
-#define BIT_SHIFT_TA3 0
-#define BIT_MASK_TA3 0xffffffffffffL
-#define BIT_TA3(x) (((x) & BIT_MASK_TA3) << BIT_SHIFT_TA3)
-#define BIT_GET_TA3(x) (((x) >> BIT_SHIFT_TA3) & BIT_MASK_TA3)
-
-/* 2 REG_TRANSMIT_ADDRSS_4 (Offset 0x16C0) */
-
-#define BIT_SHIFT_TA4 0
-#define BIT_MASK_TA4 0xffffffffffffL
-#define BIT_TA4(x) (((x) & BIT_MASK_TA4) << BIT_SHIFT_TA4)
-#define BIT_GET_TA4(x) (((x) >> BIT_SHIFT_TA4) & BIT_MASK_TA4)
-
-/* 2 REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1 (Offset 0x1700) */
-
-#define BIT_LTECOEX_ACCESS_START_V1 BIT(31)
-#define BIT_LTECOEX_WRITE_MODE_V1 BIT(30)
-#define BIT_LTECOEX_READY_BIT_V1 BIT(29)
-
-#define BIT_SHIFT_WRITE_BYTE_EN_V1 16
-#define BIT_MASK_WRITE_BYTE_EN_V1 0xf
-#define BIT_WRITE_BYTE_EN_V1(x) \
- (((x) & BIT_MASK_WRITE_BYTE_EN_V1) << BIT_SHIFT_WRITE_BYTE_EN_V1)
-#define BIT_GET_WRITE_BYTE_EN_V1(x) \
- (((x) >> BIT_SHIFT_WRITE_BYTE_EN_V1) & BIT_MASK_WRITE_BYTE_EN_V1)
-
-#define BIT_SHIFT_LTECOEX_REG_ADDR_V1 0
-#define BIT_MASK_LTECOEX_REG_ADDR_V1 0xffff
-#define BIT_LTECOEX_REG_ADDR_V1(x) \
- (((x) & BIT_MASK_LTECOEX_REG_ADDR_V1) << BIT_SHIFT_LTECOEX_REG_ADDR_V1)
-#define BIT_GET_LTECOEX_REG_ADDR_V1(x) \
- (((x) >> BIT_SHIFT_LTECOEX_REG_ADDR_V1) & BIT_MASK_LTECOEX_REG_ADDR_V1)
-
-/* 2 REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1 (Offset 0x1704) */
-
-#define BIT_SHIFT_LTECOEX_W_DATA_V1 0
-#define BIT_MASK_LTECOEX_W_DATA_V1 0xffffffffL
-#define BIT_LTECOEX_W_DATA_V1(x) \
- (((x) & BIT_MASK_LTECOEX_W_DATA_V1) << BIT_SHIFT_LTECOEX_W_DATA_V1)
-#define BIT_GET_LTECOEX_W_DATA_V1(x) \
- (((x) >> BIT_SHIFT_LTECOEX_W_DATA_V1) & BIT_MASK_LTECOEX_W_DATA_V1)
-
-/* 2 REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1 (Offset 0x1708) */
-
-#define BIT_SHIFT_LTECOEX_R_DATA_V1 0
-#define BIT_MASK_LTECOEX_R_DATA_V1 0xffffffffL
-#define BIT_LTECOEX_R_DATA_V1(x) \
- (((x) & BIT_MASK_LTECOEX_R_DATA_V1) << BIT_SHIFT_LTECOEX_R_DATA_V1)
-#define BIT_GET_LTECOEX_R_DATA_V1(x) \
- (((x) >> BIT_SHIFT_LTECOEX_R_DATA_V1) & BIT_MASK_LTECOEX_R_DATA_V1)
-
-#endif /* __RTL_WLAN_BITDEF_H__ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h b/drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h
deleted file mode 100644
index 481ea6d01ca5..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h
+++ /dev/null
@@ -1,12092 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __INC_HALMAC_BIT_8822B_H
-#define __INC_HALMAC_BIT_8822B_H
-
-#define CPU_OPT_WIDTH 0x1F
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_SYS_ISO_CTRL_8822B */
-#define BIT_PWC_EV12V_8822B BIT(15)
-#define BIT_PWC_EV25V_8822B BIT(14)
-#define BIT_PA33V_EN_8822B BIT(13)
-#define BIT_PA12V_EN_8822B BIT(12)
-#define BIT_UA33V_EN_8822B BIT(11)
-#define BIT_UA12V_EN_8822B BIT(10)
-#define BIT_ISO_RFDIO_8822B BIT(9)
-#define BIT_ISO_EB2CORE_8822B BIT(8)
-#define BIT_ISO_DIOE_8822B BIT(7)
-#define BIT_ISO_WLPON2PP_8822B BIT(6)
-#define BIT_ISO_IP2MAC_WA2PP_8822B BIT(5)
-#define BIT_ISO_PD2CORE_8822B BIT(4)
-#define BIT_ISO_PA2PCIE_8822B BIT(3)
-#define BIT_ISO_UD2CORE_8822B BIT(2)
-#define BIT_ISO_UA2USB_8822B BIT(1)
-#define BIT_ISO_WD2PP_8822B BIT(0)
-
-/* 2 REG_SYS_FUNC_EN_8822B */
-#define BIT_FEN_MREGEN_8822B BIT(15)
-#define BIT_FEN_HWPDN_8822B BIT(14)
-#define BIT_EN_25_1_8822B BIT(13)
-#define BIT_FEN_ELDR_8822B BIT(12)
-#define BIT_FEN_DCORE_8822B BIT(11)
-#define BIT_FEN_CPUEN_8822B BIT(10)
-#define BIT_FEN_DIOE_8822B BIT(9)
-#define BIT_FEN_PCIED_8822B BIT(8)
-#define BIT_FEN_PPLL_8822B BIT(7)
-#define BIT_FEN_PCIEA_8822B BIT(6)
-#define BIT_FEN_DIO_PCIE_8822B BIT(5)
-#define BIT_FEN_USBD_8822B BIT(4)
-#define BIT_FEN_UPLL_8822B BIT(3)
-#define BIT_FEN_USBA_8822B BIT(2)
-#define BIT_FEN_BB_GLB_RSTN_8822B BIT(1)
-#define BIT_FEN_BBRSTB_8822B BIT(0)
-
-/* 2 REG_SYS_PW_CTRL_8822B */
-#define BIT_SOP_EABM_8822B BIT(31)
-#define BIT_SOP_ACKF_8822B BIT(30)
-#define BIT_SOP_ERCK_8822B BIT(29)
-#define BIT_SOP_ESWR_8822B BIT(28)
-#define BIT_SOP_PWMM_8822B BIT(27)
-#define BIT_SOP_EECK_8822B BIT(26)
-#define BIT_SOP_EXTL_8822B BIT(24)
-#define BIT_SYM_OP_RING_12M_8822B BIT(22)
-#define BIT_ROP_SWPR_8822B BIT(21)
-#define BIT_DIS_HW_LPLDM_8822B BIT(20)
-#define BIT_OPT_SWRST_WLMCU_8822B BIT(19)
-#define BIT_RDY_SYSPWR_8822B BIT(17)
-#define BIT_EN_WLON_8822B BIT(16)
-#define BIT_APDM_HPDN_8822B BIT(15)
-#define BIT_AFSM_PCIE_SUS_EN_8822B BIT(12)
-#define BIT_AFSM_WLSUS_EN_8822B BIT(11)
-#define BIT_APFM_SWLPS_8822B BIT(10)
-#define BIT_APFM_OFFMAC_8822B BIT(9)
-#define BIT_APFN_ONMAC_8822B BIT(8)
-#define BIT_CHIP_PDN_EN_8822B BIT(7)
-#define BIT_RDY_MACDIS_8822B BIT(6)
-#define BIT_RING_CLK_12M_EN_8822B BIT(4)
-#define BIT_PFM_WOWL_8822B BIT(3)
-#define BIT_PFM_LDKP_8822B BIT(2)
-#define BIT_WL_HCI_ALD_8822B BIT(1)
-#define BIT_PFM_LDALL_8822B BIT(0)
-
-/* 2 REG_SYS_CLK_CTRL_8822B */
-#define BIT_LDO_DUMMY_8822B BIT(15)
-#define BIT_CPU_CLK_EN_8822B BIT(14)
-#define BIT_SYMREG_CLK_EN_8822B BIT(13)
-#define BIT_HCI_CLK_EN_8822B BIT(12)
-#define BIT_MAC_CLK_EN_8822B BIT(11)
-#define BIT_SEC_CLK_EN_8822B BIT(10)
-#define BIT_PHY_SSC_RSTB_8822B BIT(9)
-#define BIT_EXT_32K_EN_8822B BIT(8)
-#define BIT_WL_CLK_TEST_8822B BIT(7)
-#define BIT_OP_SPS_PWM_EN_8822B BIT(6)
-#define BIT_LOADER_CLK_EN_8822B BIT(5)
-#define BIT_MACSLP_8822B BIT(4)
-#define BIT_WAKEPAD_EN_8822B BIT(3)
-#define BIT_ROMD16V_EN_8822B BIT(2)
-#define BIT_CKANA12M_EN_8822B BIT(1)
-#define BIT_CNTD16V_EN_8822B BIT(0)
-
-/* 2 REG_SYS_EEPROM_CTRL_8822B */
-
-#define BIT_SHIFT_VPDIDX_8822B 8
-#define BIT_MASK_VPDIDX_8822B 0xff
-#define BIT_VPDIDX_8822B(x) \
- (((x) & BIT_MASK_VPDIDX_8822B) << BIT_SHIFT_VPDIDX_8822B)
-#define BIT_GET_VPDIDX_8822B(x) \
- (((x) >> BIT_SHIFT_VPDIDX_8822B) & BIT_MASK_VPDIDX_8822B)
-
-#define BIT_SHIFT_EEM1_0_8822B 6
-#define BIT_MASK_EEM1_0_8822B 0x3
-#define BIT_EEM1_0_8822B(x) \
- (((x) & BIT_MASK_EEM1_0_8822B) << BIT_SHIFT_EEM1_0_8822B)
-#define BIT_GET_EEM1_0_8822B(x) \
- (((x) >> BIT_SHIFT_EEM1_0_8822B) & BIT_MASK_EEM1_0_8822B)
-
-#define BIT_AUTOLOAD_SUS_8822B BIT(5)
-#define BIT_EERPOMSEL_8822B BIT(4)
-#define BIT_EECS_V1_8822B BIT(3)
-#define BIT_EESK_V1_8822B BIT(2)
-#define BIT_EEDI_V1_8822B BIT(1)
-#define BIT_EEDO_V1_8822B BIT(0)
-
-/* 2 REG_EE_VPD_8822B */
-
-#define BIT_SHIFT_VPD_DATA_8822B 0
-#define BIT_MASK_VPD_DATA_8822B 0xffffffffL
-#define BIT_VPD_DATA_8822B(x) \
- (((x) & BIT_MASK_VPD_DATA_8822B) << BIT_SHIFT_VPD_DATA_8822B)
-#define BIT_GET_VPD_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_VPD_DATA_8822B) & BIT_MASK_VPD_DATA_8822B)
-
-/* 2 REG_SYS_SWR_CTRL1_8822B */
-#define BIT_C2_L_BIT0_8822B BIT(31)
-
-#define BIT_SHIFT_C1_L_8822B 29
-#define BIT_MASK_C1_L_8822B 0x3
-#define BIT_C1_L_8822B(x) (((x) & BIT_MASK_C1_L_8822B) << BIT_SHIFT_C1_L_8822B)
-#define BIT_GET_C1_L_8822B(x) \
- (((x) >> BIT_SHIFT_C1_L_8822B) & BIT_MASK_C1_L_8822B)
-
-#define BIT_SHIFT_REG_FREQ_L_8822B 25
-#define BIT_MASK_REG_FREQ_L_8822B 0x7
-#define BIT_REG_FREQ_L_8822B(x) \
- (((x) & BIT_MASK_REG_FREQ_L_8822B) << BIT_SHIFT_REG_FREQ_L_8822B)
-#define BIT_GET_REG_FREQ_L_8822B(x) \
- (((x) >> BIT_SHIFT_REG_FREQ_L_8822B) & BIT_MASK_REG_FREQ_L_8822B)
-
-#define BIT_REG_EN_DUTY_8822B BIT(24)
-
-#define BIT_SHIFT_REG_MODE_8822B 22
-#define BIT_MASK_REG_MODE_8822B 0x3
-#define BIT_REG_MODE_8822B(x) \
- (((x) & BIT_MASK_REG_MODE_8822B) << BIT_SHIFT_REG_MODE_8822B)
-#define BIT_GET_REG_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_REG_MODE_8822B) & BIT_MASK_REG_MODE_8822B)
-
-#define BIT_REG_EN_SP_8822B BIT(21)
-#define BIT_REG_AUTO_L_8822B BIT(20)
-#define BIT_SW18_SELD_BIT0_8822B BIT(19)
-#define BIT_SW18_POWOCP_8822B BIT(18)
-
-#define BIT_SHIFT_OCP_L1_8822B 15
-#define BIT_MASK_OCP_L1_8822B 0x7
-#define BIT_OCP_L1_8822B(x) \
- (((x) & BIT_MASK_OCP_L1_8822B) << BIT_SHIFT_OCP_L1_8822B)
-#define BIT_GET_OCP_L1_8822B(x) \
- (((x) >> BIT_SHIFT_OCP_L1_8822B) & BIT_MASK_OCP_L1_8822B)
-
-#define BIT_SHIFT_CF_L_8822B 13
-#define BIT_MASK_CF_L_8822B 0x3
-#define BIT_CF_L_8822B(x) (((x) & BIT_MASK_CF_L_8822B) << BIT_SHIFT_CF_L_8822B)
-#define BIT_GET_CF_L_8822B(x) \
- (((x) >> BIT_SHIFT_CF_L_8822B) & BIT_MASK_CF_L_8822B)
-
-#define BIT_SW18_FPWM_8822B BIT(11)
-#define BIT_SW18_SWEN_8822B BIT(9)
-#define BIT_SW18_LDEN_8822B BIT(8)
-#define BIT_MAC_ID_EN_8822B BIT(7)
-#define BIT_AFE_BGEN_8822B BIT(0)
-
-/* 2 REG_SYS_SWR_CTRL2_8822B */
-#define BIT_POW_ZCD_L_8822B BIT(31)
-#define BIT_AUTOZCD_L_8822B BIT(30)
-
-#define BIT_SHIFT_REG_DELAY_8822B 28
-#define BIT_MASK_REG_DELAY_8822B 0x3
-#define BIT_REG_DELAY_8822B(x) \
- (((x) & BIT_MASK_REG_DELAY_8822B) << BIT_SHIFT_REG_DELAY_8822B)
-#define BIT_GET_REG_DELAY_8822B(x) \
- (((x) >> BIT_SHIFT_REG_DELAY_8822B) & BIT_MASK_REG_DELAY_8822B)
-
-#define BIT_SHIFT_V15ADJ_L1_V1_8822B 24
-#define BIT_MASK_V15ADJ_L1_V1_8822B 0x7
-#define BIT_V15ADJ_L1_V1_8822B(x) \
- (((x) & BIT_MASK_V15ADJ_L1_V1_8822B) << BIT_SHIFT_V15ADJ_L1_V1_8822B)
-#define BIT_GET_V15ADJ_L1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_V15ADJ_L1_V1_8822B) & BIT_MASK_V15ADJ_L1_V1_8822B)
-
-#define BIT_SHIFT_VOL_L1_V1_8822B 20
-#define BIT_MASK_VOL_L1_V1_8822B 0xf
-#define BIT_VOL_L1_V1_8822B(x) \
- (((x) & BIT_MASK_VOL_L1_V1_8822B) << BIT_SHIFT_VOL_L1_V1_8822B)
-#define BIT_GET_VOL_L1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_VOL_L1_V1_8822B) & BIT_MASK_VOL_L1_V1_8822B)
-
-#define BIT_SHIFT_IN_L1_V1_8822B 17
-#define BIT_MASK_IN_L1_V1_8822B 0x7
-#define BIT_IN_L1_V1_8822B(x) \
- (((x) & BIT_MASK_IN_L1_V1_8822B) << BIT_SHIFT_IN_L1_V1_8822B)
-#define BIT_GET_IN_L1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_IN_L1_V1_8822B) & BIT_MASK_IN_L1_V1_8822B)
-
-#define BIT_SHIFT_TBOX_L1_8822B 15
-#define BIT_MASK_TBOX_L1_8822B 0x3
-#define BIT_TBOX_L1_8822B(x) \
- (((x) & BIT_MASK_TBOX_L1_8822B) << BIT_SHIFT_TBOX_L1_8822B)
-#define BIT_GET_TBOX_L1_8822B(x) \
- (((x) >> BIT_SHIFT_TBOX_L1_8822B) & BIT_MASK_TBOX_L1_8822B)
-
-#define BIT_SW18_SEL_8822B BIT(13)
-
-/* 2 REG_NOT_VALID_8822B */
-#define BIT_SW18_SD_8822B BIT(10)
-
-#define BIT_SHIFT_R3_L_8822B 7
-#define BIT_MASK_R3_L_8822B 0x3
-#define BIT_R3_L_8822B(x) (((x) & BIT_MASK_R3_L_8822B) << BIT_SHIFT_R3_L_8822B)
-#define BIT_GET_R3_L_8822B(x) \
- (((x) >> BIT_SHIFT_R3_L_8822B) & BIT_MASK_R3_L_8822B)
-
-#define BIT_SHIFT_SW18_R2_8822B 5
-#define BIT_MASK_SW18_R2_8822B 0x3
-#define BIT_SW18_R2_8822B(x) \
- (((x) & BIT_MASK_SW18_R2_8822B) << BIT_SHIFT_SW18_R2_8822B)
-#define BIT_GET_SW18_R2_8822B(x) \
- (((x) >> BIT_SHIFT_SW18_R2_8822B) & BIT_MASK_SW18_R2_8822B)
-
-#define BIT_SHIFT_SW18_R1_8822B 3
-#define BIT_MASK_SW18_R1_8822B 0x3
-#define BIT_SW18_R1_8822B(x) \
- (((x) & BIT_MASK_SW18_R1_8822B) << BIT_SHIFT_SW18_R1_8822B)
-#define BIT_GET_SW18_R1_8822B(x) \
- (((x) >> BIT_SHIFT_SW18_R1_8822B) & BIT_MASK_SW18_R1_8822B)
-
-#define BIT_SHIFT_C3_L_C3_8822B 1
-#define BIT_MASK_C3_L_C3_8822B 0x3
-#define BIT_C3_L_C3_8822B(x) \
- (((x) & BIT_MASK_C3_L_C3_8822B) << BIT_SHIFT_C3_L_C3_8822B)
-#define BIT_GET_C3_L_C3_8822B(x) \
- (((x) >> BIT_SHIFT_C3_L_C3_8822B) & BIT_MASK_C3_L_C3_8822B)
-
-#define BIT_C2_L_BIT1_8822B BIT(0)
-
-/* 2 REG_SYS_SWR_CTRL3_8822B */
-#define BIT_SPS18_OCP_DIS_8822B BIT(31)
-
-#define BIT_SHIFT_SPS18_OCP_TH_8822B 16
-#define BIT_MASK_SPS18_OCP_TH_8822B 0x7fff
-#define BIT_SPS18_OCP_TH_8822B(x) \
- (((x) & BIT_MASK_SPS18_OCP_TH_8822B) << BIT_SHIFT_SPS18_OCP_TH_8822B)
-#define BIT_GET_SPS18_OCP_TH_8822B(x) \
- (((x) >> BIT_SHIFT_SPS18_OCP_TH_8822B) & BIT_MASK_SPS18_OCP_TH_8822B)
-
-#define BIT_SHIFT_OCP_WINDOW_8822B 0
-#define BIT_MASK_OCP_WINDOW_8822B 0xffff
-#define BIT_OCP_WINDOW_8822B(x) \
- (((x) & BIT_MASK_OCP_WINDOW_8822B) << BIT_SHIFT_OCP_WINDOW_8822B)
-#define BIT_GET_OCP_WINDOW_8822B(x) \
- (((x) >> BIT_SHIFT_OCP_WINDOW_8822B) & BIT_MASK_OCP_WINDOW_8822B)
-
-/* 2 REG_RSV_CTRL_8822B */
-#define BIT_HREG_DBG_8822B BIT(23)
-#define BIT_WLMCUIOIF_8822B BIT(8)
-#define BIT_LOCK_ALL_EN_8822B BIT(7)
-#define BIT_R_DIS_PRST_8822B BIT(6)
-#define BIT_WLOCK_1C_B6_8822B BIT(5)
-#define BIT_WLOCK_40_8822B BIT(4)
-#define BIT_WLOCK_08_8822B BIT(3)
-#define BIT_WLOCK_04_8822B BIT(2)
-#define BIT_WLOCK_00_8822B BIT(1)
-#define BIT_WLOCK_ALL_8822B BIT(0)
-
-/* 2 REG_RF_CTRL_8822B */
-#define BIT_RF_SDMRSTB_8822B BIT(2)
-#define BIT_RF_RSTB_8822B BIT(1)
-#define BIT_RF_EN_8822B BIT(0)
-
-/* 2 REG_AFE_LDO_CTRL_8822B */
-
-#define BIT_SHIFT_LPLDH12_RSV_8822B 29
-#define BIT_MASK_LPLDH12_RSV_8822B 0x7
-#define BIT_LPLDH12_RSV_8822B(x) \
- (((x) & BIT_MASK_LPLDH12_RSV_8822B) << BIT_SHIFT_LPLDH12_RSV_8822B)
-#define BIT_GET_LPLDH12_RSV_8822B(x) \
- (((x) >> BIT_SHIFT_LPLDH12_RSV_8822B) & BIT_MASK_LPLDH12_RSV_8822B)
-
-#define BIT_LPLDH12_SLP_8822B BIT(28)
-
-#define BIT_SHIFT_LPLDH12_VADJ_8822B 24
-#define BIT_MASK_LPLDH12_VADJ_8822B 0xf
-#define BIT_LPLDH12_VADJ_8822B(x) \
- (((x) & BIT_MASK_LPLDH12_VADJ_8822B) << BIT_SHIFT_LPLDH12_VADJ_8822B)
-#define BIT_GET_LPLDH12_VADJ_8822B(x) \
- (((x) >> BIT_SHIFT_LPLDH12_VADJ_8822B) & BIT_MASK_LPLDH12_VADJ_8822B)
-
-#define BIT_LDH12_EN_8822B BIT(16)
-#define BIT_WLBBOFF_BIG_PWC_EN_8822B BIT(14)
-#define BIT_WLBBOFF_SMALL_PWC_EN_8822B BIT(13)
-#define BIT_WLMACOFF_BIG_PWC_EN_8822B BIT(12)
-#define BIT_WLPON_PWC_EN_8822B BIT(11)
-#define BIT_POW_REGU_P1_8822B BIT(10)
-#define BIT_LDOV12W_EN_8822B BIT(8)
-#define BIT_EX_XTAL_DRV_DIGI_8822B BIT(7)
-#define BIT_EX_XTAL_DRV_USB_8822B BIT(6)
-#define BIT_EX_XTAL_DRV_AFE_8822B BIT(5)
-#define BIT_EX_XTAL_DRV_RF2_8822B BIT(4)
-#define BIT_EX_XTAL_DRV_RF1_8822B BIT(3)
-#define BIT_POW_REGU_P0_8822B BIT(2)
-
-/* 2 REG_NOT_VALID_8822B */
-#define BIT_POW_PLL_LDO_8822B BIT(0)
-
-/* 2 REG_AFE_CTRL1_8822B */
-#define BIT_AGPIO_GPE_8822B BIT(31)
-
-#define BIT_SHIFT_XTAL_CAP_XI_8822B 25
-#define BIT_MASK_XTAL_CAP_XI_8822B 0x3f
-#define BIT_XTAL_CAP_XI_8822B(x) \
- (((x) & BIT_MASK_XTAL_CAP_XI_8822B) << BIT_SHIFT_XTAL_CAP_XI_8822B)
-#define BIT_GET_XTAL_CAP_XI_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_CAP_XI_8822B) & BIT_MASK_XTAL_CAP_XI_8822B)
-
-#define BIT_SHIFT_XTAL_DRV_DIGI_8822B 23
-#define BIT_MASK_XTAL_DRV_DIGI_8822B 0x3
-#define BIT_XTAL_DRV_DIGI_8822B(x) \
- (((x) & BIT_MASK_XTAL_DRV_DIGI_8822B) << BIT_SHIFT_XTAL_DRV_DIGI_8822B)
-#define BIT_GET_XTAL_DRV_DIGI_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_DRV_DIGI_8822B) & BIT_MASK_XTAL_DRV_DIGI_8822B)
-
-#define BIT_XTAL_DRV_USB_BIT1_8822B BIT(22)
-
-#define BIT_SHIFT_MAC_CLK_SEL_8822B 20
-#define BIT_MASK_MAC_CLK_SEL_8822B 0x3
-#define BIT_MAC_CLK_SEL_8822B(x) \
- (((x) & BIT_MASK_MAC_CLK_SEL_8822B) << BIT_SHIFT_MAC_CLK_SEL_8822B)
-#define BIT_GET_MAC_CLK_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_MAC_CLK_SEL_8822B) & BIT_MASK_MAC_CLK_SEL_8822B)
-
-#define BIT_XTAL_DRV_USB_BIT0_8822B BIT(19)
-
-#define BIT_SHIFT_XTAL_DRV_AFE_8822B 17
-#define BIT_MASK_XTAL_DRV_AFE_8822B 0x3
-#define BIT_XTAL_DRV_AFE_8822B(x) \
- (((x) & BIT_MASK_XTAL_DRV_AFE_8822B) << BIT_SHIFT_XTAL_DRV_AFE_8822B)
-#define BIT_GET_XTAL_DRV_AFE_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_DRV_AFE_8822B) & BIT_MASK_XTAL_DRV_AFE_8822B)
-
-#define BIT_SHIFT_XTAL_DRV_RF2_8822B 15
-#define BIT_MASK_XTAL_DRV_RF2_8822B 0x3
-#define BIT_XTAL_DRV_RF2_8822B(x) \
- (((x) & BIT_MASK_XTAL_DRV_RF2_8822B) << BIT_SHIFT_XTAL_DRV_RF2_8822B)
-#define BIT_GET_XTAL_DRV_RF2_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_DRV_RF2_8822B) & BIT_MASK_XTAL_DRV_RF2_8822B)
-
-#define BIT_SHIFT_XTAL_DRV_RF1_8822B 13
-#define BIT_MASK_XTAL_DRV_RF1_8822B 0x3
-#define BIT_XTAL_DRV_RF1_8822B(x) \
- (((x) & BIT_MASK_XTAL_DRV_RF1_8822B) << BIT_SHIFT_XTAL_DRV_RF1_8822B)
-#define BIT_GET_XTAL_DRV_RF1_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_DRV_RF1_8822B) & BIT_MASK_XTAL_DRV_RF1_8822B)
-
-#define BIT_XTAL_DELAY_DIGI_8822B BIT(12)
-#define BIT_XTAL_DELAY_USB_8822B BIT(11)
-#define BIT_XTAL_DELAY_AFE_8822B BIT(10)
-
-#define BIT_SHIFT_XTAL_LDO_VREF_8822B 7
-#define BIT_MASK_XTAL_LDO_VREF_8822B 0x7
-#define BIT_XTAL_LDO_VREF_8822B(x) \
- (((x) & BIT_MASK_XTAL_LDO_VREF_8822B) << BIT_SHIFT_XTAL_LDO_VREF_8822B)
-#define BIT_GET_XTAL_LDO_VREF_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_LDO_VREF_8822B) & BIT_MASK_XTAL_LDO_VREF_8822B)
-
-#define BIT_XTAL_XQSEL_RF_8822B BIT(6)
-#define BIT_XTAL_XQSEL_8822B BIT(5)
-
-#define BIT_SHIFT_XTAL_GMN_V2_8822B 3
-#define BIT_MASK_XTAL_GMN_V2_8822B 0x3
-#define BIT_XTAL_GMN_V2_8822B(x) \
- (((x) & BIT_MASK_XTAL_GMN_V2_8822B) << BIT_SHIFT_XTAL_GMN_V2_8822B)
-#define BIT_GET_XTAL_GMN_V2_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_GMN_V2_8822B) & BIT_MASK_XTAL_GMN_V2_8822B)
-
-#define BIT_SHIFT_XTAL_GMP_V2_8822B 1
-#define BIT_MASK_XTAL_GMP_V2_8822B 0x3
-#define BIT_XTAL_GMP_V2_8822B(x) \
- (((x) & BIT_MASK_XTAL_GMP_V2_8822B) << BIT_SHIFT_XTAL_GMP_V2_8822B)
-#define BIT_GET_XTAL_GMP_V2_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_GMP_V2_8822B) & BIT_MASK_XTAL_GMP_V2_8822B)
-
-#define BIT_XTAL_EN_8822B BIT(0)
-
-/* 2 REG_AFE_CTRL2_8822B */
-
-#define BIT_SHIFT_REG_C3_V4_8822B 30
-#define BIT_MASK_REG_C3_V4_8822B 0x3
-#define BIT_REG_C3_V4_8822B(x) \
- (((x) & BIT_MASK_REG_C3_V4_8822B) << BIT_SHIFT_REG_C3_V4_8822B)
-#define BIT_GET_REG_C3_V4_8822B(x) \
- (((x) >> BIT_SHIFT_REG_C3_V4_8822B) & BIT_MASK_REG_C3_V4_8822B)
-
-#define BIT_REG_CP_BIT1_8822B BIT(29)
-
-#define BIT_SHIFT_REG_RS_V4_8822B 26
-#define BIT_MASK_REG_RS_V4_8822B 0x7
-#define BIT_REG_RS_V4_8822B(x) \
- (((x) & BIT_MASK_REG_RS_V4_8822B) << BIT_SHIFT_REG_RS_V4_8822B)
-#define BIT_GET_REG_RS_V4_8822B(x) \
- (((x) >> BIT_SHIFT_REG_RS_V4_8822B) & BIT_MASK_REG_RS_V4_8822B)
-
-#define BIT_SHIFT_REG__CS_8822B 24
-#define BIT_MASK_REG__CS_8822B 0x3
-#define BIT_REG__CS_8822B(x) \
- (((x) & BIT_MASK_REG__CS_8822B) << BIT_SHIFT_REG__CS_8822B)
-#define BIT_GET_REG__CS_8822B(x) \
- (((x) >> BIT_SHIFT_REG__CS_8822B) & BIT_MASK_REG__CS_8822B)
-
-#define BIT_SHIFT_REG_CP_OFFSET_8822B 21
-#define BIT_MASK_REG_CP_OFFSET_8822B 0x7
-#define BIT_REG_CP_OFFSET_8822B(x) \
- (((x) & BIT_MASK_REG_CP_OFFSET_8822B) << BIT_SHIFT_REG_CP_OFFSET_8822B)
-#define BIT_GET_REG_CP_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_REG_CP_OFFSET_8822B) & BIT_MASK_REG_CP_OFFSET_8822B)
-
-#define BIT_SHIFT_CP_BIAS_8822B 18
-#define BIT_MASK_CP_BIAS_8822B 0x7
-#define BIT_CP_BIAS_8822B(x) \
- (((x) & BIT_MASK_CP_BIAS_8822B) << BIT_SHIFT_CP_BIAS_8822B)
-#define BIT_GET_CP_BIAS_8822B(x) \
- (((x) >> BIT_SHIFT_CP_BIAS_8822B) & BIT_MASK_CP_BIAS_8822B)
-
-#define BIT_REG_IDOUBLE_V2_8822B BIT(17)
-#define BIT_EN_SYN_8822B BIT(16)
-
-#define BIT_SHIFT_MCCO_8822B 14
-#define BIT_MASK_MCCO_8822B 0x3
-#define BIT_MCCO_8822B(x) (((x) & BIT_MASK_MCCO_8822B) << BIT_SHIFT_MCCO_8822B)
-#define BIT_GET_MCCO_8822B(x) \
- (((x) >> BIT_SHIFT_MCCO_8822B) & BIT_MASK_MCCO_8822B)
-
-#define BIT_SHIFT_REG_LDO_SEL_8822B 12
-#define BIT_MASK_REG_LDO_SEL_8822B 0x3
-#define BIT_REG_LDO_SEL_8822B(x) \
- (((x) & BIT_MASK_REG_LDO_SEL_8822B) << BIT_SHIFT_REG_LDO_SEL_8822B)
-#define BIT_GET_REG_LDO_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_REG_LDO_SEL_8822B) & BIT_MASK_REG_LDO_SEL_8822B)
-
-#define BIT_REG_KVCO_V2_8822B BIT(10)
-#define BIT_AGPIO_GPO_8822B BIT(9)
-
-#define BIT_SHIFT_AGPIO_DRV_8822B 7
-#define BIT_MASK_AGPIO_DRV_8822B 0x3
-#define BIT_AGPIO_DRV_8822B(x) \
- (((x) & BIT_MASK_AGPIO_DRV_8822B) << BIT_SHIFT_AGPIO_DRV_8822B)
-#define BIT_GET_AGPIO_DRV_8822B(x) \
- (((x) >> BIT_SHIFT_AGPIO_DRV_8822B) & BIT_MASK_AGPIO_DRV_8822B)
-
-#define BIT_SHIFT_XTAL_CAP_XO_8822B 1
-#define BIT_MASK_XTAL_CAP_XO_8822B 0x3f
-#define BIT_XTAL_CAP_XO_8822B(x) \
- (((x) & BIT_MASK_XTAL_CAP_XO_8822B) << BIT_SHIFT_XTAL_CAP_XO_8822B)
-#define BIT_GET_XTAL_CAP_XO_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_CAP_XO_8822B) & BIT_MASK_XTAL_CAP_XO_8822B)
-
-#define BIT_POW_PLL_8822B BIT(0)
-
-/* 2 REG_AFE_CTRL3_8822B */
-
-#define BIT_SHIFT_PS_8822B 7
-#define BIT_MASK_PS_8822B 0x7
-#define BIT_PS_8822B(x) (((x) & BIT_MASK_PS_8822B) << BIT_SHIFT_PS_8822B)
-#define BIT_GET_PS_8822B(x) (((x) >> BIT_SHIFT_PS_8822B) & BIT_MASK_PS_8822B)
-
-#define BIT_PSEN_8822B BIT(6)
-#define BIT_DOGENB_8822B BIT(5)
-#define BIT_REG_MBIAS_8822B BIT(4)
-
-#define BIT_SHIFT_REG_R3_V4_8822B 1
-#define BIT_MASK_REG_R3_V4_8822B 0x7
-#define BIT_REG_R3_V4_8822B(x) \
- (((x) & BIT_MASK_REG_R3_V4_8822B) << BIT_SHIFT_REG_R3_V4_8822B)
-#define BIT_GET_REG_R3_V4_8822B(x) \
- (((x) >> BIT_SHIFT_REG_R3_V4_8822B) & BIT_MASK_REG_R3_V4_8822B)
-
-#define BIT_REG_CP_BIT0_8822B BIT(0)
-
-/* 2 REG_EFUSE_CTRL_8822B */
-#define BIT_EF_FLAG_8822B BIT(31)
-
-#define BIT_SHIFT_EF_PGPD_8822B 28
-#define BIT_MASK_EF_PGPD_8822B 0x7
-#define BIT_EF_PGPD_8822B(x) \
- (((x) & BIT_MASK_EF_PGPD_8822B) << BIT_SHIFT_EF_PGPD_8822B)
-#define BIT_GET_EF_PGPD_8822B(x) \
- (((x) >> BIT_SHIFT_EF_PGPD_8822B) & BIT_MASK_EF_PGPD_8822B)
-
-#define BIT_SHIFT_EF_RDT_8822B 24
-#define BIT_MASK_EF_RDT_8822B 0xf
-#define BIT_EF_RDT_8822B(x) \
- (((x) & BIT_MASK_EF_RDT_8822B) << BIT_SHIFT_EF_RDT_8822B)
-#define BIT_GET_EF_RDT_8822B(x) \
- (((x) >> BIT_SHIFT_EF_RDT_8822B) & BIT_MASK_EF_RDT_8822B)
-
-#define BIT_SHIFT_EF_PGTS_8822B 20
-#define BIT_MASK_EF_PGTS_8822B 0xf
-#define BIT_EF_PGTS_8822B(x) \
- (((x) & BIT_MASK_EF_PGTS_8822B) << BIT_SHIFT_EF_PGTS_8822B)
-#define BIT_GET_EF_PGTS_8822B(x) \
- (((x) >> BIT_SHIFT_EF_PGTS_8822B) & BIT_MASK_EF_PGTS_8822B)
-
-#define BIT_EF_PDWN_8822B BIT(19)
-#define BIT_EF_ALDEN_8822B BIT(18)
-
-#define BIT_SHIFT_EF_ADDR_8822B 8
-#define BIT_MASK_EF_ADDR_8822B 0x3ff
-#define BIT_EF_ADDR_8822B(x) \
- (((x) & BIT_MASK_EF_ADDR_8822B) << BIT_SHIFT_EF_ADDR_8822B)
-#define BIT_GET_EF_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_EF_ADDR_8822B) & BIT_MASK_EF_ADDR_8822B)
-
-#define BIT_SHIFT_EF_DATA_8822B 0
-#define BIT_MASK_EF_DATA_8822B 0xff
-#define BIT_EF_DATA_8822B(x) \
- (((x) & BIT_MASK_EF_DATA_8822B) << BIT_SHIFT_EF_DATA_8822B)
-#define BIT_GET_EF_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_EF_DATA_8822B) & BIT_MASK_EF_DATA_8822B)
-
-/* 2 REG_LDO_EFUSE_CTRL_8822B */
-#define BIT_LDOE25_EN_8822B BIT(31)
-
-#define BIT_SHIFT_LDOE25_V12ADJ_L_8822B 27
-#define BIT_MASK_LDOE25_V12ADJ_L_8822B 0xf
-#define BIT_LDOE25_V12ADJ_L_8822B(x) \
- (((x) & BIT_MASK_LDOE25_V12ADJ_L_8822B) \
- << BIT_SHIFT_LDOE25_V12ADJ_L_8822B)
-#define BIT_GET_LDOE25_V12ADJ_L_8822B(x) \
- (((x) >> BIT_SHIFT_LDOE25_V12ADJ_L_8822B) & \
- BIT_MASK_LDOE25_V12ADJ_L_8822B)
-
-#define BIT_EF_CRES_SEL_8822B BIT(26)
-
-#define BIT_SHIFT_EF_SCAN_START_V1_8822B 16
-#define BIT_MASK_EF_SCAN_START_V1_8822B 0x3ff
-#define BIT_EF_SCAN_START_V1_8822B(x) \
- (((x) & BIT_MASK_EF_SCAN_START_V1_8822B) \
- << BIT_SHIFT_EF_SCAN_START_V1_8822B)
-#define BIT_GET_EF_SCAN_START_V1_8822B(x) \
- (((x) >> BIT_SHIFT_EF_SCAN_START_V1_8822B) & \
- BIT_MASK_EF_SCAN_START_V1_8822B)
-
-#define BIT_SHIFT_EF_SCAN_END_8822B 12
-#define BIT_MASK_EF_SCAN_END_8822B 0xf
-#define BIT_EF_SCAN_END_8822B(x) \
- (((x) & BIT_MASK_EF_SCAN_END_8822B) << BIT_SHIFT_EF_SCAN_END_8822B)
-#define BIT_GET_EF_SCAN_END_8822B(x) \
- (((x) >> BIT_SHIFT_EF_SCAN_END_8822B) & BIT_MASK_EF_SCAN_END_8822B)
-
-#define BIT_EF_PD_DIS_8822B BIT(11)
-
-#define BIT_SHIFT_EF_CELL_SEL_8822B 8
-#define BIT_MASK_EF_CELL_SEL_8822B 0x3
-#define BIT_EF_CELL_SEL_8822B(x) \
- (((x) & BIT_MASK_EF_CELL_SEL_8822B) << BIT_SHIFT_EF_CELL_SEL_8822B)
-#define BIT_GET_EF_CELL_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_EF_CELL_SEL_8822B) & BIT_MASK_EF_CELL_SEL_8822B)
-
-#define BIT_EF_TRPT_8822B BIT(7)
-
-#define BIT_SHIFT_EF_TTHD_8822B 0
-#define BIT_MASK_EF_TTHD_8822B 0x7f
-#define BIT_EF_TTHD_8822B(x) \
- (((x) & BIT_MASK_EF_TTHD_8822B) << BIT_SHIFT_EF_TTHD_8822B)
-#define BIT_GET_EF_TTHD_8822B(x) \
- (((x) >> BIT_SHIFT_EF_TTHD_8822B) & BIT_MASK_EF_TTHD_8822B)
-
-/* 2 REG_PWR_OPTION_CTRL_8822B */
-
-#define BIT_SHIFT_DBG_SEL_V1_8822B 16
-#define BIT_MASK_DBG_SEL_V1_8822B 0xff
-#define BIT_DBG_SEL_V1_8822B(x) \
- (((x) & BIT_MASK_DBG_SEL_V1_8822B) << BIT_SHIFT_DBG_SEL_V1_8822B)
-#define BIT_GET_DBG_SEL_V1_8822B(x) \
- (((x) >> BIT_SHIFT_DBG_SEL_V1_8822B) & BIT_MASK_DBG_SEL_V1_8822B)
-
-#define BIT_SHIFT_DBG_SEL_BYTE_8822B 14
-#define BIT_MASK_DBG_SEL_BYTE_8822B 0x3
-#define BIT_DBG_SEL_BYTE_8822B(x) \
- (((x) & BIT_MASK_DBG_SEL_BYTE_8822B) << BIT_SHIFT_DBG_SEL_BYTE_8822B)
-#define BIT_GET_DBG_SEL_BYTE_8822B(x) \
- (((x) >> BIT_SHIFT_DBG_SEL_BYTE_8822B) & BIT_MASK_DBG_SEL_BYTE_8822B)
-
-#define BIT_SHIFT_STD_L1_V1_8822B 12
-#define BIT_MASK_STD_L1_V1_8822B 0x3
-#define BIT_STD_L1_V1_8822B(x) \
- (((x) & BIT_MASK_STD_L1_V1_8822B) << BIT_SHIFT_STD_L1_V1_8822B)
-#define BIT_GET_STD_L1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_STD_L1_V1_8822B) & BIT_MASK_STD_L1_V1_8822B)
-
-#define BIT_SYSON_DBG_PAD_E2_8822B BIT(11)
-#define BIT_SYSON_LED_PAD_E2_8822B BIT(10)
-#define BIT_SYSON_GPEE_PAD_E2_8822B BIT(9)
-#define BIT_SYSON_PCI_PAD_E2_8822B BIT(8)
-#define BIT_AUTO_SW_LDO_VOL_EN_8822B BIT(7)
-
-#define BIT_SHIFT_SYSON_SPS0WWV_WT_8822B 4
-#define BIT_MASK_SYSON_SPS0WWV_WT_8822B 0x3
-#define BIT_SYSON_SPS0WWV_WT_8822B(x) \
- (((x) & BIT_MASK_SYSON_SPS0WWV_WT_8822B) \
- << BIT_SHIFT_SYSON_SPS0WWV_WT_8822B)
-#define BIT_GET_SYSON_SPS0WWV_WT_8822B(x) \
- (((x) >> BIT_SHIFT_SYSON_SPS0WWV_WT_8822B) & \
- BIT_MASK_SYSON_SPS0WWV_WT_8822B)
-
-#define BIT_SHIFT_SYSON_SPS0LDO_WT_8822B 2
-#define BIT_MASK_SYSON_SPS0LDO_WT_8822B 0x3
-#define BIT_SYSON_SPS0LDO_WT_8822B(x) \
- (((x) & BIT_MASK_SYSON_SPS0LDO_WT_8822B) \
- << BIT_SHIFT_SYSON_SPS0LDO_WT_8822B)
-#define BIT_GET_SYSON_SPS0LDO_WT_8822B(x) \
- (((x) >> BIT_SHIFT_SYSON_SPS0LDO_WT_8822B) & \
- BIT_MASK_SYSON_SPS0LDO_WT_8822B)
-
-#define BIT_SHIFT_SYSON_RCLK_SCALE_8822B 0
-#define BIT_MASK_SYSON_RCLK_SCALE_8822B 0x3
-#define BIT_SYSON_RCLK_SCALE_8822B(x) \
- (((x) & BIT_MASK_SYSON_RCLK_SCALE_8822B) \
- << BIT_SHIFT_SYSON_RCLK_SCALE_8822B)
-#define BIT_GET_SYSON_RCLK_SCALE_8822B(x) \
- (((x) >> BIT_SHIFT_SYSON_RCLK_SCALE_8822B) & \
- BIT_MASK_SYSON_RCLK_SCALE_8822B)
-
-/* 2 REG_CAL_TIMER_8822B */
-
-#define BIT_SHIFT_MATCH_CNT_8822B 8
-#define BIT_MASK_MATCH_CNT_8822B 0xff
-#define BIT_MATCH_CNT_8822B(x) \
- (((x) & BIT_MASK_MATCH_CNT_8822B) << BIT_SHIFT_MATCH_CNT_8822B)
-#define BIT_GET_MATCH_CNT_8822B(x) \
- (((x) >> BIT_SHIFT_MATCH_CNT_8822B) & BIT_MASK_MATCH_CNT_8822B)
-
-#define BIT_SHIFT_CAL_SCAL_8822B 0
-#define BIT_MASK_CAL_SCAL_8822B 0xff
-#define BIT_CAL_SCAL_8822B(x) \
- (((x) & BIT_MASK_CAL_SCAL_8822B) << BIT_SHIFT_CAL_SCAL_8822B)
-#define BIT_GET_CAL_SCAL_8822B(x) \
- (((x) >> BIT_SHIFT_CAL_SCAL_8822B) & BIT_MASK_CAL_SCAL_8822B)
-
-/* 2 REG_ACLK_MON_8822B */
-
-#define BIT_SHIFT_RCLK_MON_8822B 5
-#define BIT_MASK_RCLK_MON_8822B 0x7ff
-#define BIT_RCLK_MON_8822B(x) \
- (((x) & BIT_MASK_RCLK_MON_8822B) << BIT_SHIFT_RCLK_MON_8822B)
-#define BIT_GET_RCLK_MON_8822B(x) \
- (((x) >> BIT_SHIFT_RCLK_MON_8822B) & BIT_MASK_RCLK_MON_8822B)
-
-#define BIT_CAL_EN_8822B BIT(4)
-
-#define BIT_SHIFT_DPSTU_8822B 2
-#define BIT_MASK_DPSTU_8822B 0x3
-#define BIT_DPSTU_8822B(x) \
- (((x) & BIT_MASK_DPSTU_8822B) << BIT_SHIFT_DPSTU_8822B)
-#define BIT_GET_DPSTU_8822B(x) \
- (((x) >> BIT_SHIFT_DPSTU_8822B) & BIT_MASK_DPSTU_8822B)
-
-#define BIT_SUS_16X_8822B BIT(1)
-
-/* 2 REG_GPIO_MUXCFG_8822B */
-#define BIT_FSPI_EN_8822B BIT(19)
-#define BIT_WL_RTS_EXT_32K_SEL_8822B BIT(18)
-#define BIT_WLGP_SPI_EN_8822B BIT(16)
-#define BIT_SIC_LBK_8822B BIT(15)
-#define BIT_ENHTP_8822B BIT(14)
-#define BIT_ENSIC_8822B BIT(12)
-#define BIT_SIC_SWRST_8822B BIT(11)
-#define BIT_PO_WIFI_PTA_PINS_8822B BIT(10)
-#define BIT_PO_BT_PTA_PINS_8822B BIT(9)
-#define BIT_ENUART_8822B BIT(8)
-
-#define BIT_SHIFT_BTMODE_8822B 6
-#define BIT_MASK_BTMODE_8822B 0x3
-#define BIT_BTMODE_8822B(x) \
- (((x) & BIT_MASK_BTMODE_8822B) << BIT_SHIFT_BTMODE_8822B)
-#define BIT_GET_BTMODE_8822B(x) \
- (((x) >> BIT_SHIFT_BTMODE_8822B) & BIT_MASK_BTMODE_8822B)
-
-#define BIT_ENBT_8822B BIT(5)
-#define BIT_EROM_EN_8822B BIT(4)
-#define BIT_WLRFE_6_7_EN_8822B BIT(3)
-#define BIT_WLRFE_4_5_EN_8822B BIT(2)
-
-#define BIT_SHIFT_GPIOSEL_8822B 0
-#define BIT_MASK_GPIOSEL_8822B 0x3
-#define BIT_GPIOSEL_8822B(x) \
- (((x) & BIT_MASK_GPIOSEL_8822B) << BIT_SHIFT_GPIOSEL_8822B)
-#define BIT_GET_GPIOSEL_8822B(x) \
- (((x) >> BIT_SHIFT_GPIOSEL_8822B) & BIT_MASK_GPIOSEL_8822B)
-
-/* 2 REG_GPIO_PIN_CTRL_8822B */
-
-#define BIT_SHIFT_GPIO_MOD_7_TO_0_8822B 24
-#define BIT_MASK_GPIO_MOD_7_TO_0_8822B 0xff
-#define BIT_GPIO_MOD_7_TO_0_8822B(x) \
- (((x) & BIT_MASK_GPIO_MOD_7_TO_0_8822B) \
- << BIT_SHIFT_GPIO_MOD_7_TO_0_8822B)
-#define BIT_GET_GPIO_MOD_7_TO_0_8822B(x) \
- (((x) >> BIT_SHIFT_GPIO_MOD_7_TO_0_8822B) & \
- BIT_MASK_GPIO_MOD_7_TO_0_8822B)
-
-#define BIT_SHIFT_GPIO_IO_SEL_7_TO_0_8822B 16
-#define BIT_MASK_GPIO_IO_SEL_7_TO_0_8822B 0xff
-#define BIT_GPIO_IO_SEL_7_TO_0_8822B(x) \
- (((x) & BIT_MASK_GPIO_IO_SEL_7_TO_0_8822B) \
- << BIT_SHIFT_GPIO_IO_SEL_7_TO_0_8822B)
-#define BIT_GET_GPIO_IO_SEL_7_TO_0_8822B(x) \
- (((x) >> BIT_SHIFT_GPIO_IO_SEL_7_TO_0_8822B) & \
- BIT_MASK_GPIO_IO_SEL_7_TO_0_8822B)
-
-#define BIT_SHIFT_GPIO_OUT_7_TO_0_8822B 8
-#define BIT_MASK_GPIO_OUT_7_TO_0_8822B 0xff
-#define BIT_GPIO_OUT_7_TO_0_8822B(x) \
- (((x) & BIT_MASK_GPIO_OUT_7_TO_0_8822B) \
- << BIT_SHIFT_GPIO_OUT_7_TO_0_8822B)
-#define BIT_GET_GPIO_OUT_7_TO_0_8822B(x) \
- (((x) >> BIT_SHIFT_GPIO_OUT_7_TO_0_8822B) & \
- BIT_MASK_GPIO_OUT_7_TO_0_8822B)
-
-#define BIT_SHIFT_GPIO_IN_7_TO_0_8822B 0
-#define BIT_MASK_GPIO_IN_7_TO_0_8822B 0xff
-#define BIT_GPIO_IN_7_TO_0_8822B(x) \
- (((x) & BIT_MASK_GPIO_IN_7_TO_0_8822B) \
- << BIT_SHIFT_GPIO_IN_7_TO_0_8822B)
-#define BIT_GET_GPIO_IN_7_TO_0_8822B(x) \
- (((x) >> BIT_SHIFT_GPIO_IN_7_TO_0_8822B) & \
- BIT_MASK_GPIO_IN_7_TO_0_8822B)
-
-/* 2 REG_GPIO_INTM_8822B */
-
-#define BIT_SHIFT_MUXDBG_SEL_8822B 30
-#define BIT_MASK_MUXDBG_SEL_8822B 0x3
-#define BIT_MUXDBG_SEL_8822B(x) \
- (((x) & BIT_MASK_MUXDBG_SEL_8822B) << BIT_SHIFT_MUXDBG_SEL_8822B)
-#define BIT_GET_MUXDBG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_MUXDBG_SEL_8822B) & BIT_MASK_MUXDBG_SEL_8822B)
-
-#define BIT_EXTWOL_SEL_8822B BIT(17)
-#define BIT_EXTWOL_EN_8822B BIT(16)
-#define BIT_GPIOF_INT_MD_8822B BIT(15)
-#define BIT_GPIOE_INT_MD_8822B BIT(14)
-#define BIT_GPIOD_INT_MD_8822B BIT(13)
-#define BIT_GPIOF_INT_MD_8822B BIT(15)
-#define BIT_GPIOE_INT_MD_8822B BIT(14)
-#define BIT_GPIOD_INT_MD_8822B BIT(13)
-#define BIT_GPIOC_INT_MD_8822B BIT(12)
-#define BIT_GPIOB_INT_MD_8822B BIT(11)
-#define BIT_GPIOA_INT_MD_8822B BIT(10)
-#define BIT_GPIO9_INT_MD_8822B BIT(9)
-#define BIT_GPIO8_INT_MD_8822B BIT(8)
-#define BIT_GPIO7_INT_MD_8822B BIT(7)
-#define BIT_GPIO6_INT_MD_8822B BIT(6)
-#define BIT_GPIO5_INT_MD_8822B BIT(5)
-#define BIT_GPIO4_INT_MD_8822B BIT(4)
-#define BIT_GPIO3_INT_MD_8822B BIT(3)
-#define BIT_GPIO2_INT_MD_8822B BIT(2)
-#define BIT_GPIO1_INT_MD_8822B BIT(1)
-#define BIT_GPIO0_INT_MD_8822B BIT(0)
-
-/* 2 REG_LED_CFG_8822B */
-#define BIT_GPIO3_WL_CTRL_EN_8822B BIT(27)
-#define BIT_LNAON_SEL_EN_8822B BIT(26)
-#define BIT_PAPE_SEL_EN_8822B BIT(25)
-#define BIT_DPDT_WLBT_SEL_8822B BIT(24)
-#define BIT_DPDT_SEL_EN_8822B BIT(23)
-#define BIT_GPIO13_14_WL_CTRL_EN_8822B BIT(22)
-#define BIT_GPIO13_14_WL_CTRL_EN_8822B BIT(22)
-#define BIT_LED2DIS_8822B BIT(21)
-#define BIT_LED2PL_8822B BIT(20)
-#define BIT_LED2SV_8822B BIT(19)
-
-#define BIT_SHIFT_LED2CM_8822B 16
-#define BIT_MASK_LED2CM_8822B 0x7
-#define BIT_LED2CM_8822B(x) \
- (((x) & BIT_MASK_LED2CM_8822B) << BIT_SHIFT_LED2CM_8822B)
-#define BIT_GET_LED2CM_8822B(x) \
- (((x) >> BIT_SHIFT_LED2CM_8822B) & BIT_MASK_LED2CM_8822B)
-
-#define BIT_LED1DIS_8822B BIT(15)
-#define BIT_LED1PL_8822B BIT(12)
-#define BIT_LED1SV_8822B BIT(11)
-
-#define BIT_SHIFT_LED1CM_8822B 8
-#define BIT_MASK_LED1CM_8822B 0x7
-#define BIT_LED1CM_8822B(x) \
- (((x) & BIT_MASK_LED1CM_8822B) << BIT_SHIFT_LED1CM_8822B)
-#define BIT_GET_LED1CM_8822B(x) \
- (((x) >> BIT_SHIFT_LED1CM_8822B) & BIT_MASK_LED1CM_8822B)
-
-#define BIT_LED0DIS_8822B BIT(7)
-
-#define BIT_SHIFT_AFE_LDO_SWR_CHECK_8822B 5
-#define BIT_MASK_AFE_LDO_SWR_CHECK_8822B 0x3
-#define BIT_AFE_LDO_SWR_CHECK_8822B(x) \
- (((x) & BIT_MASK_AFE_LDO_SWR_CHECK_8822B) \
- << BIT_SHIFT_AFE_LDO_SWR_CHECK_8822B)
-#define BIT_GET_AFE_LDO_SWR_CHECK_8822B(x) \
- (((x) >> BIT_SHIFT_AFE_LDO_SWR_CHECK_8822B) & \
- BIT_MASK_AFE_LDO_SWR_CHECK_8822B)
-
-#define BIT_LED0PL_8822B BIT(4)
-#define BIT_LED0SV_8822B BIT(3)
-
-#define BIT_SHIFT_LED0CM_8822B 0
-#define BIT_MASK_LED0CM_8822B 0x7
-#define BIT_LED0CM_8822B(x) \
- (((x) & BIT_MASK_LED0CM_8822B) << BIT_SHIFT_LED0CM_8822B)
-#define BIT_GET_LED0CM_8822B(x) \
- (((x) >> BIT_SHIFT_LED0CM_8822B) & BIT_MASK_LED0CM_8822B)
-
-/* 2 REG_FSIMR_8822B */
-#define BIT_FS_PDNINT_EN_8822B BIT(31)
-#define BIT_NFC_INT_PAD_EN_8822B BIT(30)
-#define BIT_FS_SPS_OCP_INT_EN_8822B BIT(29)
-#define BIT_FS_PWMERR_INT_EN_8822B BIT(28)
-#define BIT_FS_GPIOF_INT_EN_8822B BIT(27)
-#define BIT_FS_GPIOE_INT_EN_8822B BIT(26)
-#define BIT_FS_GPIOD_INT_EN_8822B BIT(25)
-#define BIT_FS_GPIOC_INT_EN_8822B BIT(24)
-#define BIT_FS_GPIOB_INT_EN_8822B BIT(23)
-#define BIT_FS_GPIOA_INT_EN_8822B BIT(22)
-#define BIT_FS_GPIO9_INT_EN_8822B BIT(21)
-#define BIT_FS_GPIO8_INT_EN_8822B BIT(20)
-#define BIT_FS_GPIO7_INT_EN_8822B BIT(19)
-#define BIT_FS_GPIO6_INT_EN_8822B BIT(18)
-#define BIT_FS_GPIO5_INT_EN_8822B BIT(17)
-#define BIT_FS_GPIO4_INT_EN_8822B BIT(16)
-#define BIT_FS_GPIO3_INT_EN_8822B BIT(15)
-#define BIT_FS_GPIO2_INT_EN_8822B BIT(14)
-#define BIT_FS_GPIO1_INT_EN_8822B BIT(13)
-#define BIT_FS_GPIO0_INT_EN_8822B BIT(12)
-#define BIT_FS_HCI_SUS_EN_8822B BIT(11)
-#define BIT_FS_HCI_RES_EN_8822B BIT(10)
-#define BIT_FS_HCI_RESET_EN_8822B BIT(9)
-#define BIT_FS_BTON_STS_UPDATE_MSK_EN_8822B BIT(7)
-#define BIT_ACT2RECOVERY_INT_EN_V1_8822B BIT(6)
-#define BIT_GEN1GEN2_SWITCH_8822B BIT(5)
-#define BIT_HCI_TXDMA_REQ_HIMR_8822B BIT(4)
-#define BIT_FS_32K_LEAVE_SETTING_MAK_8822B BIT(3)
-#define BIT_FS_32K_ENTER_SETTING_MAK_8822B BIT(2)
-#define BIT_FS_USB_LPMRSM_MSK_8822B BIT(1)
-#define BIT_FS_USB_LPMINT_MSK_8822B BIT(0)
-
-/* 2 REG_FSISR_8822B */
-#define BIT_FS_PDNINT_8822B BIT(31)
-#define BIT_FS_SPS_OCP_INT_8822B BIT(29)
-#define BIT_FS_PWMERR_INT_8822B BIT(28)
-#define BIT_FS_GPIOF_INT_8822B BIT(27)
-#define BIT_FS_GPIOE_INT_8822B BIT(26)
-#define BIT_FS_GPIOD_INT_8822B BIT(25)
-#define BIT_FS_GPIOC_INT_8822B BIT(24)
-#define BIT_FS_GPIOB_INT_8822B BIT(23)
-#define BIT_FS_GPIOA_INT_8822B BIT(22)
-#define BIT_FS_GPIO9_INT_8822B BIT(21)
-#define BIT_FS_GPIO8_INT_8822B BIT(20)
-#define BIT_FS_GPIO7_INT_8822B BIT(19)
-#define BIT_FS_GPIO6_INT_8822B BIT(18)
-#define BIT_FS_GPIO5_INT_8822B BIT(17)
-#define BIT_FS_GPIO4_INT_8822B BIT(16)
-#define BIT_FS_GPIO3_INT_8822B BIT(15)
-#define BIT_FS_GPIO2_INT_8822B BIT(14)
-#define BIT_FS_GPIO1_INT_8822B BIT(13)
-#define BIT_FS_GPIO0_INT_8822B BIT(12)
-#define BIT_FS_HCI_SUS_INT_8822B BIT(11)
-#define BIT_FS_HCI_RES_INT_8822B BIT(10)
-#define BIT_FS_HCI_RESET_INT_8822B BIT(9)
-#define BIT_ACT2RECOVERY_8822B BIT(6)
-#define BIT_GEN1GEN2_SWITCH_8822B BIT(5)
-#define BIT_HCI_TXDMA_REQ_HISR_8822B BIT(4)
-#define BIT_FS_32K_LEAVE_SETTING_INT_8822B BIT(3)
-#define BIT_FS_32K_ENTER_SETTING_INT_8822B BIT(2)
-#define BIT_FS_USB_LPMRSM_INT_8822B BIT(1)
-#define BIT_FS_USB_LPMINT_INT_8822B BIT(0)
-
-/* 2 REG_HSIMR_8822B */
-#define BIT_GPIOF_INT_EN_8822B BIT(31)
-#define BIT_GPIOE_INT_EN_8822B BIT(30)
-#define BIT_GPIOD_INT_EN_8822B BIT(29)
-#define BIT_GPIOC_INT_EN_8822B BIT(28)
-#define BIT_GPIOB_INT_EN_8822B BIT(27)
-#define BIT_GPIOA_INT_EN_8822B BIT(26)
-#define BIT_GPIO9_INT_EN_8822B BIT(25)
-#define BIT_GPIO8_INT_EN_8822B BIT(24)
-#define BIT_GPIO7_INT_EN_8822B BIT(23)
-#define BIT_GPIO6_INT_EN_8822B BIT(22)
-#define BIT_GPIO5_INT_EN_8822B BIT(21)
-#define BIT_GPIO4_INT_EN_8822B BIT(20)
-#define BIT_GPIO3_INT_EN_8822B BIT(19)
-#define BIT_GPIO2_INT_EN_V1_8822B BIT(16)
-#define BIT_GPIO1_INT_EN_8822B BIT(17)
-#define BIT_GPIO0_INT_EN_8822B BIT(16)
-#define BIT_PDNINT_EN_8822B BIT(7)
-#define BIT_RON_INT_EN_8822B BIT(6)
-#define BIT_SPS_OCP_INT_EN_8822B BIT(5)
-#define BIT_GPIO15_0_INT_EN_8822B BIT(0)
-
-/* 2 REG_HSISR_8822B */
-#define BIT_GPIOF_INT_8822B BIT(31)
-#define BIT_GPIOE_INT_8822B BIT(30)
-#define BIT_GPIOD_INT_8822B BIT(29)
-#define BIT_GPIOC_INT_8822B BIT(28)
-#define BIT_GPIOB_INT_8822B BIT(27)
-#define BIT_GPIOA_INT_8822B BIT(26)
-#define BIT_GPIO9_INT_8822B BIT(25)
-#define BIT_GPIO8_INT_8822B BIT(24)
-#define BIT_GPIO7_INT_8822B BIT(23)
-#define BIT_GPIO6_INT_8822B BIT(22)
-#define BIT_GPIO5_INT_8822B BIT(21)
-#define BIT_GPIO4_INT_8822B BIT(20)
-#define BIT_GPIO3_INT_8822B BIT(19)
-#define BIT_GPIO2_INT_V1_8822B BIT(16)
-#define BIT_GPIO1_INT_8822B BIT(17)
-#define BIT_GPIO0_INT_8822B BIT(16)
-#define BIT_PDNINT_8822B BIT(7)
-#define BIT_RON_INT_8822B BIT(6)
-#define BIT_SPS_OCP_INT_8822B BIT(5)
-#define BIT_GPIO15_0_INT_8822B BIT(0)
-
-/* 2 REG_GPIO_EXT_CTRL_8822B */
-
-#define BIT_SHIFT_GPIO_MOD_15_TO_8_8822B 24
-#define BIT_MASK_GPIO_MOD_15_TO_8_8822B 0xff
-#define BIT_GPIO_MOD_15_TO_8_8822B(x) \
- (((x) & BIT_MASK_GPIO_MOD_15_TO_8_8822B) \
- << BIT_SHIFT_GPIO_MOD_15_TO_8_8822B)
-#define BIT_GET_GPIO_MOD_15_TO_8_8822B(x) \
- (((x) >> BIT_SHIFT_GPIO_MOD_15_TO_8_8822B) & \
- BIT_MASK_GPIO_MOD_15_TO_8_8822B)
-
-#define BIT_SHIFT_GPIO_IO_SEL_15_TO_8_8822B 16
-#define BIT_MASK_GPIO_IO_SEL_15_TO_8_8822B 0xff
-#define BIT_GPIO_IO_SEL_15_TO_8_8822B(x) \
- (((x) & BIT_MASK_GPIO_IO_SEL_15_TO_8_8822B) \
- << BIT_SHIFT_GPIO_IO_SEL_15_TO_8_8822B)
-#define BIT_GET_GPIO_IO_SEL_15_TO_8_8822B(x) \
- (((x) >> BIT_SHIFT_GPIO_IO_SEL_15_TO_8_8822B) & \
- BIT_MASK_GPIO_IO_SEL_15_TO_8_8822B)
-
-#define BIT_SHIFT_GPIO_OUT_15_TO_8_8822B 8
-#define BIT_MASK_GPIO_OUT_15_TO_8_8822B 0xff
-#define BIT_GPIO_OUT_15_TO_8_8822B(x) \
- (((x) & BIT_MASK_GPIO_OUT_15_TO_8_8822B) \
- << BIT_SHIFT_GPIO_OUT_15_TO_8_8822B)
-#define BIT_GET_GPIO_OUT_15_TO_8_8822B(x) \
- (((x) >> BIT_SHIFT_GPIO_OUT_15_TO_8_8822B) & \
- BIT_MASK_GPIO_OUT_15_TO_8_8822B)
-
-#define BIT_SHIFT_GPIO_IN_15_TO_8_8822B 0
-#define BIT_MASK_GPIO_IN_15_TO_8_8822B 0xff
-#define BIT_GPIO_IN_15_TO_8_8822B(x) \
- (((x) & BIT_MASK_GPIO_IN_15_TO_8_8822B) \
- << BIT_SHIFT_GPIO_IN_15_TO_8_8822B)
-#define BIT_GET_GPIO_IN_15_TO_8_8822B(x) \
- (((x) >> BIT_SHIFT_GPIO_IN_15_TO_8_8822B) & \
- BIT_MASK_GPIO_IN_15_TO_8_8822B)
-
-/* 2 REG_PAD_CTRL1_8822B */
-#define BIT_PAPE_WLBT_SEL_8822B BIT(29)
-#define BIT_LNAON_WLBT_SEL_8822B BIT(28)
-#define BIT_BTGP_GPG3_FEN_8822B BIT(26)
-#define BIT_BTGP_GPG2_FEN_8822B BIT(25)
-#define BIT_BTGP_JTAG_EN_8822B BIT(24)
-#define BIT_XTAL_CLK_EXTARNAL_EN_8822B BIT(23)
-#define BIT_BTGP_UART0_EN_8822B BIT(22)
-#define BIT_BTGP_UART1_EN_8822B BIT(21)
-#define BIT_BTGP_SPI_EN_8822B BIT(20)
-#define BIT_BTGP_GPIO_E2_8822B BIT(19)
-#define BIT_BTGP_GPIO_EN_8822B BIT(18)
-
-#define BIT_SHIFT_BTGP_GPIO_SL_8822B 16
-#define BIT_MASK_BTGP_GPIO_SL_8822B 0x3
-#define BIT_BTGP_GPIO_SL_8822B(x) \
- (((x) & BIT_MASK_BTGP_GPIO_SL_8822B) << BIT_SHIFT_BTGP_GPIO_SL_8822B)
-#define BIT_GET_BTGP_GPIO_SL_8822B(x) \
- (((x) >> BIT_SHIFT_BTGP_GPIO_SL_8822B) & BIT_MASK_BTGP_GPIO_SL_8822B)
-
-#define BIT_PAD_SDIO_SR_8822B BIT(14)
-#define BIT_GPIO14_OUTPUT_PL_8822B BIT(13)
-#define BIT_HOST_WAKE_PAD_PULL_EN_8822B BIT(12)
-#define BIT_HOST_WAKE_PAD_SL_8822B BIT(11)
-#define BIT_PAD_LNAON_SR_8822B BIT(10)
-#define BIT_PAD_LNAON_E2_8822B BIT(9)
-#define BIT_SW_LNAON_G_SEL_DATA_8822B BIT(8)
-#define BIT_SW_LNAON_A_SEL_DATA_8822B BIT(7)
-#define BIT_PAD_PAPE_SR_8822B BIT(6)
-#define BIT_PAD_PAPE_E2_8822B BIT(5)
-#define BIT_SW_PAPE_G_SEL_DATA_8822B BIT(4)
-#define BIT_SW_PAPE_A_SEL_DATA_8822B BIT(3)
-#define BIT_PAD_DPDT_SR_8822B BIT(2)
-#define BIT_PAD_DPDT_PAD_E2_8822B BIT(1)
-#define BIT_SW_DPDT_SEL_DATA_8822B BIT(0)
-
-/* 2 REG_WL_BT_PWR_CTRL_8822B */
-#define BIT_ISO_BD2PP_8822B BIT(31)
-#define BIT_LDOV12B_EN_8822B BIT(30)
-#define BIT_CKEN_BTGPS_8822B BIT(29)
-#define BIT_FEN_BTGPS_8822B BIT(28)
-#define BIT_BTCPU_BOOTSEL_8822B BIT(27)
-#define BIT_SPI_SPEEDUP_8822B BIT(26)
-#define BIT_DEVWAKE_PAD_TYPE_SEL_8822B BIT(24)
-#define BIT_CLKREQ_PAD_TYPE_SEL_8822B BIT(23)
-#define BIT_ISO_BTPON2PP_8822B BIT(22)
-#define BIT_BT_HWROF_EN_8822B BIT(19)
-#define BIT_BT_FUNC_EN_8822B BIT(18)
-#define BIT_BT_HWPDN_SL_8822B BIT(17)
-#define BIT_BT_DISN_EN_8822B BIT(16)
-#define BIT_BT_PDN_PULL_EN_8822B BIT(15)
-#define BIT_WL_PDN_PULL_EN_8822B BIT(14)
-#define BIT_EXTERNAL_REQUEST_PL_8822B BIT(13)
-#define BIT_GPIO0_2_3_PULL_LOW_EN_8822B BIT(12)
-#define BIT_ISO_BA2PP_8822B BIT(11)
-#define BIT_BT_AFE_LDO_EN_8822B BIT(10)
-#define BIT_BT_AFE_PLL_EN_8822B BIT(9)
-#define BIT_BT_DIG_CLK_EN_8822B BIT(8)
-#define BIT_WL_DRV_EXIST_IDX_8822B BIT(5)
-#define BIT_DOP_EHPAD_8822B BIT(4)
-#define BIT_WL_HWROF_EN_8822B BIT(3)
-#define BIT_WL_FUNC_EN_8822B BIT(2)
-#define BIT_WL_HWPDN_SL_8822B BIT(1)
-#define BIT_WL_HWPDN_EN_8822B BIT(0)
-
-/* 2 REG_SDM_DEBUG_8822B */
-
-#define BIT_SHIFT_WLCLK_PHASE_8822B 0
-#define BIT_MASK_WLCLK_PHASE_8822B 0x1f
-#define BIT_WLCLK_PHASE_8822B(x) \
- (((x) & BIT_MASK_WLCLK_PHASE_8822B) << BIT_SHIFT_WLCLK_PHASE_8822B)
-#define BIT_GET_WLCLK_PHASE_8822B(x) \
- (((x) >> BIT_SHIFT_WLCLK_PHASE_8822B) & BIT_MASK_WLCLK_PHASE_8822B)
-
-/* 2 REG_SYS_SDIO_CTRL_8822B */
-#define BIT_DBG_GNT_WL_BT_8822B BIT(27)
-#define BIT_LTE_MUX_CTRL_PATH_8822B BIT(26)
-#define BIT_LTE_COEX_UART_8822B BIT(25)
-#define BIT_3W_LTE_WL_GPIO_8822B BIT(24)
-#define BIT_SDIO_INT_POLARITY_8822B BIT(19)
-#define BIT_SDIO_INT_8822B BIT(18)
-#define BIT_SDIO_OFF_EN_8822B BIT(17)
-#define BIT_SDIO_ON_EN_8822B BIT(16)
-#define BIT_PCIE_WAIT_TIMEOUT_EVENT_8822B BIT(10)
-#define BIT_PCIE_WAIT_TIME_8822B BIT(9)
-#define BIT_MPCIE_REFCLK_XTAL_SEL_8822B BIT(8)
-
-/* 2 REG_HCI_OPT_CTRL_8822B */
-
-#define BIT_SHIFT_TSFT_SEL_8822B 29
-#define BIT_MASK_TSFT_SEL_8822B 0x7
-#define BIT_TSFT_SEL_8822B(x) \
- (((x) & BIT_MASK_TSFT_SEL_8822B) << BIT_SHIFT_TSFT_SEL_8822B)
-#define BIT_GET_TSFT_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_TSFT_SEL_8822B) & BIT_MASK_TSFT_SEL_8822B)
-
-#define BIT_USB_HOST_PWR_OFF_EN_8822B BIT(12)
-#define BIT_SYM_LPS_BLOCK_EN_8822B BIT(11)
-#define BIT_USB_LPM_ACT_EN_8822B BIT(10)
-#define BIT_USB_LPM_NY_8822B BIT(9)
-#define BIT_USB_SUS_DIS_8822B BIT(8)
-
-#define BIT_SHIFT_SDIO_PAD_E_8822B 5
-#define BIT_MASK_SDIO_PAD_E_8822B 0x7
-#define BIT_SDIO_PAD_E_8822B(x) \
- (((x) & BIT_MASK_SDIO_PAD_E_8822B) << BIT_SHIFT_SDIO_PAD_E_8822B)
-#define BIT_GET_SDIO_PAD_E_8822B(x) \
- (((x) >> BIT_SHIFT_SDIO_PAD_E_8822B) & BIT_MASK_SDIO_PAD_E_8822B)
-
-#define BIT_USB_LPPLL_EN_8822B BIT(4)
-#define BIT_ROP_SW15_8822B BIT(2)
-#define BIT_PCI_CKRDY_OPT_8822B BIT(1)
-#define BIT_PCI_VAUX_EN_8822B BIT(0)
-
-/* 2 REG_AFE_CTRL4_8822B */
-
-/* 2 REG_LDO_SWR_CTRL_8822B */
-#define BIT_ZCD_HW_AUTO_EN_8822B BIT(27)
-#define BIT_ZCD_REGSEL_8822B BIT(26)
-
-#define BIT_SHIFT_AUTO_ZCD_IN_CODE_8822B 21
-#define BIT_MASK_AUTO_ZCD_IN_CODE_8822B 0x1f
-#define BIT_AUTO_ZCD_IN_CODE_8822B(x) \
- (((x) & BIT_MASK_AUTO_ZCD_IN_CODE_8822B) \
- << BIT_SHIFT_AUTO_ZCD_IN_CODE_8822B)
-#define BIT_GET_AUTO_ZCD_IN_CODE_8822B(x) \
- (((x) >> BIT_SHIFT_AUTO_ZCD_IN_CODE_8822B) & \
- BIT_MASK_AUTO_ZCD_IN_CODE_8822B)
-
-#define BIT_SHIFT_ZCD_CODE_IN_L_8822B 16
-#define BIT_MASK_ZCD_CODE_IN_L_8822B 0x1f
-#define BIT_ZCD_CODE_IN_L_8822B(x) \
- (((x) & BIT_MASK_ZCD_CODE_IN_L_8822B) << BIT_SHIFT_ZCD_CODE_IN_L_8822B)
-#define BIT_GET_ZCD_CODE_IN_L_8822B(x) \
- (((x) >> BIT_SHIFT_ZCD_CODE_IN_L_8822B) & BIT_MASK_ZCD_CODE_IN_L_8822B)
-
-#define BIT_SHIFT_LDO_HV5_DUMMY_8822B 14
-#define BIT_MASK_LDO_HV5_DUMMY_8822B 0x3
-#define BIT_LDO_HV5_DUMMY_8822B(x) \
- (((x) & BIT_MASK_LDO_HV5_DUMMY_8822B) << BIT_SHIFT_LDO_HV5_DUMMY_8822B)
-#define BIT_GET_LDO_HV5_DUMMY_8822B(x) \
- (((x) >> BIT_SHIFT_LDO_HV5_DUMMY_8822B) & BIT_MASK_LDO_HV5_DUMMY_8822B)
-
-#define BIT_SHIFT_REG_VTUNE33_BIT0_TO_BIT1_8822B 12
-#define BIT_MASK_REG_VTUNE33_BIT0_TO_BIT1_8822B 0x3
-#define BIT_REG_VTUNE33_BIT0_TO_BIT1_8822B(x) \
- (((x) & BIT_MASK_REG_VTUNE33_BIT0_TO_BIT1_8822B) \
- << BIT_SHIFT_REG_VTUNE33_BIT0_TO_BIT1_8822B)
-#define BIT_GET_REG_VTUNE33_BIT0_TO_BIT1_8822B(x) \
- (((x) >> BIT_SHIFT_REG_VTUNE33_BIT0_TO_BIT1_8822B) & \
- BIT_MASK_REG_VTUNE33_BIT0_TO_BIT1_8822B)
-
-#define BIT_SHIFT_REG_STANDBY33_BIT0_TO_BIT1_8822B 10
-#define BIT_MASK_REG_STANDBY33_BIT0_TO_BIT1_8822B 0x3
-#define BIT_REG_STANDBY33_BIT0_TO_BIT1_8822B(x) \
- (((x) & BIT_MASK_REG_STANDBY33_BIT0_TO_BIT1_8822B) \
- << BIT_SHIFT_REG_STANDBY33_BIT0_TO_BIT1_8822B)
-#define BIT_GET_REG_STANDBY33_BIT0_TO_BIT1_8822B(x) \
- (((x) >> BIT_SHIFT_REG_STANDBY33_BIT0_TO_BIT1_8822B) & \
- BIT_MASK_REG_STANDBY33_BIT0_TO_BIT1_8822B)
-
-#define BIT_SHIFT_REG_LOAD33_BIT0_TO_BIT1_8822B 8
-#define BIT_MASK_REG_LOAD33_BIT0_TO_BIT1_8822B 0x3
-#define BIT_REG_LOAD33_BIT0_TO_BIT1_8822B(x) \
- (((x) & BIT_MASK_REG_LOAD33_BIT0_TO_BIT1_8822B) \
- << BIT_SHIFT_REG_LOAD33_BIT0_TO_BIT1_8822B)
-#define BIT_GET_REG_LOAD33_BIT0_TO_BIT1_8822B(x) \
- (((x) >> BIT_SHIFT_REG_LOAD33_BIT0_TO_BIT1_8822B) & \
- BIT_MASK_REG_LOAD33_BIT0_TO_BIT1_8822B)
-
-#define BIT_REG_BYPASS_L_8822B BIT(7)
-#define BIT_REG_LDOF_L_8822B BIT(6)
-#define BIT_REG_TYPE_L_V1_8822B BIT(5)
-#define BIT_ARENB_L_8822B BIT(3)
-
-#define BIT_SHIFT_CFC_L_8822B 1
-#define BIT_MASK_CFC_L_8822B 0x3
-#define BIT_CFC_L_8822B(x) \
- (((x) & BIT_MASK_CFC_L_8822B) << BIT_SHIFT_CFC_L_8822B)
-#define BIT_GET_CFC_L_8822B(x) \
- (((x) >> BIT_SHIFT_CFC_L_8822B) & BIT_MASK_CFC_L_8822B)
-
-#define BIT_REG_OCPS_L_V1_8822B BIT(0)
-
-/* 2 REG_MCUFW_CTRL_8822B */
-
-#define BIT_SHIFT_RPWM_8822B 24
-#define BIT_MASK_RPWM_8822B 0xff
-#define BIT_RPWM_8822B(x) (((x) & BIT_MASK_RPWM_8822B) << BIT_SHIFT_RPWM_8822B)
-#define BIT_GET_RPWM_8822B(x) \
- (((x) >> BIT_SHIFT_RPWM_8822B) & BIT_MASK_RPWM_8822B)
-
-#define BIT_ANA_PORT_EN_8822B BIT(22)
-#define BIT_MAC_PORT_EN_8822B BIT(21)
-#define BIT_BOOT_FSPI_EN_8822B BIT(20)
-#define BIT_ROM_DLEN_8822B BIT(19)
-
-#define BIT_SHIFT_ROM_PGE_8822B 16
-#define BIT_MASK_ROM_PGE_8822B 0x7
-#define BIT_ROM_PGE_8822B(x) \
- (((x) & BIT_MASK_ROM_PGE_8822B) << BIT_SHIFT_ROM_PGE_8822B)
-#define BIT_GET_ROM_PGE_8822B(x) \
- (((x) >> BIT_SHIFT_ROM_PGE_8822B) & BIT_MASK_ROM_PGE_8822B)
-
-#define BIT_FW_INIT_RDY_8822B BIT(15)
-#define BIT_FW_DW_RDY_8822B BIT(14)
-
-#define BIT_SHIFT_CPU_CLK_SEL_8822B 12
-#define BIT_MASK_CPU_CLK_SEL_8822B 0x3
-#define BIT_CPU_CLK_SEL_8822B(x) \
- (((x) & BIT_MASK_CPU_CLK_SEL_8822B) << BIT_SHIFT_CPU_CLK_SEL_8822B)
-#define BIT_GET_CPU_CLK_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_CPU_CLK_SEL_8822B) & BIT_MASK_CPU_CLK_SEL_8822B)
-
-#define BIT_CCLK_CHG_MASK_8822B BIT(11)
-#define BIT_EMEM__TXBUF_CHKSUM_OK_8822B BIT(10)
-#define BIT_EMEM_TXBUF_DW_RDY_8822B BIT(9)
-#define BIT_EMEM_CHKSUM_OK_8822B BIT(8)
-#define BIT_EMEM_DW_OK_8822B BIT(7)
-#define BIT_DMEM_CHKSUM_OK_8822B BIT(6)
-#define BIT_DMEM_DW_OK_8822B BIT(5)
-#define BIT_IMEM_CHKSUM_OK_8822B BIT(4)
-#define BIT_IMEM_DW_OK_8822B BIT(3)
-#define BIT_IMEM_BOOT_LOAD_CHKSUM_OK_8822B BIT(2)
-#define BIT_IMEM_BOOT_LOAD_DW_OK_8822B BIT(1)
-#define BIT_MCUFWDL_EN_8822B BIT(0)
-
-/* 2 REG_MCU_TST_CFG_8822B */
-
-#define BIT_SHIFT_LBKTST_8822B 0
-#define BIT_MASK_LBKTST_8822B 0xffff
-#define BIT_LBKTST_8822B(x) \
- (((x) & BIT_MASK_LBKTST_8822B) << BIT_SHIFT_LBKTST_8822B)
-#define BIT_GET_LBKTST_8822B(x) \
- (((x) >> BIT_SHIFT_LBKTST_8822B) & BIT_MASK_LBKTST_8822B)
-
-/* 2 REG_HMEBOX_E0_E1_8822B */
-
-#define BIT_SHIFT_HOST_MSG_E1_8822B 16
-#define BIT_MASK_HOST_MSG_E1_8822B 0xffff
-#define BIT_HOST_MSG_E1_8822B(x) \
- (((x) & BIT_MASK_HOST_MSG_E1_8822B) << BIT_SHIFT_HOST_MSG_E1_8822B)
-#define BIT_GET_HOST_MSG_E1_8822B(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_E1_8822B) & BIT_MASK_HOST_MSG_E1_8822B)
-
-#define BIT_SHIFT_HOST_MSG_E0_8822B 0
-#define BIT_MASK_HOST_MSG_E0_8822B 0xffff
-#define BIT_HOST_MSG_E0_8822B(x) \
- (((x) & BIT_MASK_HOST_MSG_E0_8822B) << BIT_SHIFT_HOST_MSG_E0_8822B)
-#define BIT_GET_HOST_MSG_E0_8822B(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_E0_8822B) & BIT_MASK_HOST_MSG_E0_8822B)
-
-/* 2 REG_HMEBOX_E2_E3_8822B */
-
-#define BIT_SHIFT_HOST_MSG_E3_8822B 16
-#define BIT_MASK_HOST_MSG_E3_8822B 0xffff
-#define BIT_HOST_MSG_E3_8822B(x) \
- (((x) & BIT_MASK_HOST_MSG_E3_8822B) << BIT_SHIFT_HOST_MSG_E3_8822B)
-#define BIT_GET_HOST_MSG_E3_8822B(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_E3_8822B) & BIT_MASK_HOST_MSG_E3_8822B)
-
-#define BIT_SHIFT_HOST_MSG_E2_8822B 0
-#define BIT_MASK_HOST_MSG_E2_8822B 0xffff
-#define BIT_HOST_MSG_E2_8822B(x) \
- (((x) & BIT_MASK_HOST_MSG_E2_8822B) << BIT_SHIFT_HOST_MSG_E2_8822B)
-#define BIT_GET_HOST_MSG_E2_8822B(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_E2_8822B) & BIT_MASK_HOST_MSG_E2_8822B)
-
-/* 2 REG_WLLPS_CTRL_8822B */
-#define BIT_WLLPSOP_EABM_8822B BIT(31)
-#define BIT_WLLPSOP_ACKF_8822B BIT(30)
-#define BIT_WLLPSOP_DLDM_8822B BIT(29)
-#define BIT_WLLPSOP_ESWR_8822B BIT(28)
-#define BIT_WLLPSOP_PWMM_8822B BIT(27)
-#define BIT_WLLPSOP_EECK_8822B BIT(26)
-#define BIT_WLLPSOP_WLMACOFF_8822B BIT(25)
-#define BIT_WLLPSOP_EXTAL_8822B BIT(24)
-#define BIT_WL_SYNPON_VOLTSPDN_8822B BIT(23)
-#define BIT_WLLPSOP_WLBBOFF_8822B BIT(22)
-#define BIT_WLLPSOP_WLMEM_DS_8822B BIT(21)
-
-#define BIT_SHIFT_LPLDH12_VADJ_STEP_DN_8822B 12
-#define BIT_MASK_LPLDH12_VADJ_STEP_DN_8822B 0xf
-#define BIT_LPLDH12_VADJ_STEP_DN_8822B(x) \
- (((x) & BIT_MASK_LPLDH12_VADJ_STEP_DN_8822B) \
- << BIT_SHIFT_LPLDH12_VADJ_STEP_DN_8822B)
-#define BIT_GET_LPLDH12_VADJ_STEP_DN_8822B(x) \
- (((x) >> BIT_SHIFT_LPLDH12_VADJ_STEP_DN_8822B) & \
- BIT_MASK_LPLDH12_VADJ_STEP_DN_8822B)
-
-#define BIT_SHIFT_V15ADJ_L1_STEP_DN_8822B 8
-#define BIT_MASK_V15ADJ_L1_STEP_DN_8822B 0x7
-#define BIT_V15ADJ_L1_STEP_DN_8822B(x) \
- (((x) & BIT_MASK_V15ADJ_L1_STEP_DN_8822B) \
- << BIT_SHIFT_V15ADJ_L1_STEP_DN_8822B)
-#define BIT_GET_V15ADJ_L1_STEP_DN_8822B(x) \
- (((x) >> BIT_SHIFT_V15ADJ_L1_STEP_DN_8822B) & \
- BIT_MASK_V15ADJ_L1_STEP_DN_8822B)
-
-#define BIT_REGU_32K_CLK_EN_8822B BIT(1)
-#define BIT_WL_LPS_EN_8822B BIT(0)
-
-/* 2 REG_AFE_CTRL5_8822B */
-#define BIT_BB_DBG_SEL_AFE_SDM_BIT0_8822B BIT(31)
-#define BIT_ORDER_SDM_8822B BIT(30)
-#define BIT_RFE_SEL_SDM_8822B BIT(29)
-
-#define BIT_SHIFT_REF_SEL_8822B 25
-#define BIT_MASK_REF_SEL_8822B 0xf
-#define BIT_REF_SEL_8822B(x) \
- (((x) & BIT_MASK_REF_SEL_8822B) << BIT_SHIFT_REF_SEL_8822B)
-#define BIT_GET_REF_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_REF_SEL_8822B) & BIT_MASK_REF_SEL_8822B)
-
-#define BIT_SHIFT_F0F_SDM_8822B 12
-#define BIT_MASK_F0F_SDM_8822B 0x1fff
-#define BIT_F0F_SDM_8822B(x) \
- (((x) & BIT_MASK_F0F_SDM_8822B) << BIT_SHIFT_F0F_SDM_8822B)
-#define BIT_GET_F0F_SDM_8822B(x) \
- (((x) >> BIT_SHIFT_F0F_SDM_8822B) & BIT_MASK_F0F_SDM_8822B)
-
-#define BIT_SHIFT_F0N_SDM_8822B 9
-#define BIT_MASK_F0N_SDM_8822B 0x7
-#define BIT_F0N_SDM_8822B(x) \
- (((x) & BIT_MASK_F0N_SDM_8822B) << BIT_SHIFT_F0N_SDM_8822B)
-#define BIT_GET_F0N_SDM_8822B(x) \
- (((x) >> BIT_SHIFT_F0N_SDM_8822B) & BIT_MASK_F0N_SDM_8822B)
-
-#define BIT_SHIFT_DIVN_SDM_8822B 3
-#define BIT_MASK_DIVN_SDM_8822B 0x3f
-#define BIT_DIVN_SDM_8822B(x) \
- (((x) & BIT_MASK_DIVN_SDM_8822B) << BIT_SHIFT_DIVN_SDM_8822B)
-#define BIT_GET_DIVN_SDM_8822B(x) \
- (((x) >> BIT_SHIFT_DIVN_SDM_8822B) & BIT_MASK_DIVN_SDM_8822B)
-
-/* 2 REG_GPIO_DEBOUNCE_CTRL_8822B */
-#define BIT_WLGP_DBC1EN_8822B BIT(15)
-
-#define BIT_SHIFT_WLGP_DBC1_8822B 8
-#define BIT_MASK_WLGP_DBC1_8822B 0xf
-#define BIT_WLGP_DBC1_8822B(x) \
- (((x) & BIT_MASK_WLGP_DBC1_8822B) << BIT_SHIFT_WLGP_DBC1_8822B)
-#define BIT_GET_WLGP_DBC1_8822B(x) \
- (((x) >> BIT_SHIFT_WLGP_DBC1_8822B) & BIT_MASK_WLGP_DBC1_8822B)
-
-#define BIT_WLGP_DBC0EN_8822B BIT(7)
-
-#define BIT_SHIFT_WLGP_DBC0_8822B 0
-#define BIT_MASK_WLGP_DBC0_8822B 0xf
-#define BIT_WLGP_DBC0_8822B(x) \
- (((x) & BIT_MASK_WLGP_DBC0_8822B) << BIT_SHIFT_WLGP_DBC0_8822B)
-#define BIT_GET_WLGP_DBC0_8822B(x) \
- (((x) >> BIT_SHIFT_WLGP_DBC0_8822B) & BIT_MASK_WLGP_DBC0_8822B)
-
-/* 2 REG_RPWM2_8822B */
-
-#define BIT_SHIFT_RPWM2_8822B 16
-#define BIT_MASK_RPWM2_8822B 0xffff
-#define BIT_RPWM2_8822B(x) \
- (((x) & BIT_MASK_RPWM2_8822B) << BIT_SHIFT_RPWM2_8822B)
-#define BIT_GET_RPWM2_8822B(x) \
- (((x) >> BIT_SHIFT_RPWM2_8822B) & BIT_MASK_RPWM2_8822B)
-
-/* 2 REG_SYSON_FSM_MON_8822B */
-
-#define BIT_SHIFT_FSM_MON_SEL_8822B 24
-#define BIT_MASK_FSM_MON_SEL_8822B 0x7
-#define BIT_FSM_MON_SEL_8822B(x) \
- (((x) & BIT_MASK_FSM_MON_SEL_8822B) << BIT_SHIFT_FSM_MON_SEL_8822B)
-#define BIT_GET_FSM_MON_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_FSM_MON_SEL_8822B) & BIT_MASK_FSM_MON_SEL_8822B)
-
-#define BIT_DOP_ELDO_8822B BIT(23)
-#define BIT_FSM_MON_UPD_8822B BIT(15)
-
-#define BIT_SHIFT_FSM_PAR_8822B 0
-#define BIT_MASK_FSM_PAR_8822B 0x7fff
-#define BIT_FSM_PAR_8822B(x) \
- (((x) & BIT_MASK_FSM_PAR_8822B) << BIT_SHIFT_FSM_PAR_8822B)
-#define BIT_GET_FSM_PAR_8822B(x) \
- (((x) >> BIT_SHIFT_FSM_PAR_8822B) & BIT_MASK_FSM_PAR_8822B)
-
-/* 2 REG_AFE_CTRL6_8822B */
-
-#define BIT_SHIFT_BB_DBG_SEL_AFE_SDM_BIT3_1_8822B 0
-#define BIT_MASK_BB_DBG_SEL_AFE_SDM_BIT3_1_8822B 0x7
-#define BIT_BB_DBG_SEL_AFE_SDM_BIT3_1_8822B(x) \
- (((x) & BIT_MASK_BB_DBG_SEL_AFE_SDM_BIT3_1_8822B) \
- << BIT_SHIFT_BB_DBG_SEL_AFE_SDM_BIT3_1_8822B)
-#define BIT_GET_BB_DBG_SEL_AFE_SDM_BIT3_1_8822B(x) \
- (((x) >> BIT_SHIFT_BB_DBG_SEL_AFE_SDM_BIT3_1_8822B) & \
- BIT_MASK_BB_DBG_SEL_AFE_SDM_BIT3_1_8822B)
-
-/* 2 REG_PMC_DBG_CTRL1_8822B */
-#define BIT_BT_INT_EN_8822B BIT(31)
-
-#define BIT_SHIFT_RD_WR_WIFI_BT_INFO_8822B 16
-#define BIT_MASK_RD_WR_WIFI_BT_INFO_8822B 0x7fff
-#define BIT_RD_WR_WIFI_BT_INFO_8822B(x) \
- (((x) & BIT_MASK_RD_WR_WIFI_BT_INFO_8822B) \
- << BIT_SHIFT_RD_WR_WIFI_BT_INFO_8822B)
-#define BIT_GET_RD_WR_WIFI_BT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_RD_WR_WIFI_BT_INFO_8822B) & \
- BIT_MASK_RD_WR_WIFI_BT_INFO_8822B)
-
-#define BIT_PMC_WR_OVF_8822B BIT(8)
-
-#define BIT_SHIFT_WLPMC_ERRINT_8822B 0
-#define BIT_MASK_WLPMC_ERRINT_8822B 0xff
-#define BIT_WLPMC_ERRINT_8822B(x) \
- (((x) & BIT_MASK_WLPMC_ERRINT_8822B) << BIT_SHIFT_WLPMC_ERRINT_8822B)
-#define BIT_GET_WLPMC_ERRINT_8822B(x) \
- (((x) >> BIT_SHIFT_WLPMC_ERRINT_8822B) & BIT_MASK_WLPMC_ERRINT_8822B)
-
-/* 2 REG_AFE_CTRL7_8822B */
-
-#define BIT_SHIFT_SEL_V_8822B 30
-#define BIT_MASK_SEL_V_8822B 0x3
-#define BIT_SEL_V_8822B(x) \
- (((x) & BIT_MASK_SEL_V_8822B) << BIT_SHIFT_SEL_V_8822B)
-#define BIT_GET_SEL_V_8822B(x) \
- (((x) >> BIT_SHIFT_SEL_V_8822B) & BIT_MASK_SEL_V_8822B)
-
-#define BIT_SEL_LDO_PC_8822B BIT(29)
-
-#define BIT_SHIFT_CK_MON_SEL_8822B 26
-#define BIT_MASK_CK_MON_SEL_8822B 0x7
-#define BIT_CK_MON_SEL_8822B(x) \
- (((x) & BIT_MASK_CK_MON_SEL_8822B) << BIT_SHIFT_CK_MON_SEL_8822B)
-#define BIT_GET_CK_MON_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_CK_MON_SEL_8822B) & BIT_MASK_CK_MON_SEL_8822B)
-
-#define BIT_CK_MON_EN_8822B BIT(25)
-#define BIT_FREF_EDGE_8822B BIT(24)
-#define BIT_CK320M_EN_8822B BIT(23)
-#define BIT_CK_5M_EN_8822B BIT(22)
-#define BIT_TESTEN_8822B BIT(21)
-
-/* 2 REG_HIMR0_8822B */
-#define BIT_TIMEOUT_INTERRUPT2_MASK_8822B BIT(31)
-#define BIT_TIMEOUT_INTERRUTP1_MASK_8822B BIT(30)
-#define BIT_PSTIMEOUT_MSK_8822B BIT(29)
-#define BIT_GTINT4_MSK_8822B BIT(28)
-#define BIT_GTINT3_MSK_8822B BIT(27)
-#define BIT_TXBCN0ERR_MSK_8822B BIT(26)
-#define BIT_TXBCN0OK_MSK_8822B BIT(25)
-#define BIT_TSF_BIT32_TOGGLE_MSK_8822B BIT(24)
-#define BIT_BCNDMAINT0_MSK_8822B BIT(20)
-#define BIT_BCNDERR0_MSK_8822B BIT(16)
-#define BIT_HSISR_IND_ON_INT_MSK_8822B BIT(15)
-#define BIT_BCNDMAINT_E_MSK_8822B BIT(14)
-#define BIT_CTWEND_MSK_8822B BIT(12)
-#define BIT_HISR1_IND_MSK_8822B BIT(11)
-#define BIT_C2HCMD_MSK_8822B BIT(10)
-#define BIT_CPWM2_MSK_8822B BIT(9)
-#define BIT_CPWM_MSK_8822B BIT(8)
-#define BIT_HIGHDOK_MSK_8822B BIT(7)
-#define BIT_MGTDOK_MSK_8822B BIT(6)
-#define BIT_BKDOK_MSK_8822B BIT(5)
-#define BIT_BEDOK_MSK_8822B BIT(4)
-#define BIT_VIDOK_MSK_8822B BIT(3)
-#define BIT_VODOK_MSK_8822B BIT(2)
-#define BIT_RDU_MSK_8822B BIT(1)
-#define BIT_RXOK_MSK_8822B BIT(0)
-
-/* 2 REG_HISR0_8822B */
-#define BIT_TIMEOUT_INTERRUPT2_8822B BIT(31)
-#define BIT_TIMEOUT_INTERRUTP1_8822B BIT(30)
-#define BIT_PSTIMEOUT_8822B BIT(29)
-#define BIT_GTINT4_8822B BIT(28)
-#define BIT_GTINT3_8822B BIT(27)
-#define BIT_TXBCN0ERR_8822B BIT(26)
-#define BIT_TXBCN0OK_8822B BIT(25)
-#define BIT_TSF_BIT32_TOGGLE_8822B BIT(24)
-#define BIT_BCNDMAINT0_8822B BIT(20)
-#define BIT_BCNDERR0_8822B BIT(16)
-#define BIT_HSISR_IND_ON_INT_8822B BIT(15)
-#define BIT_BCNDMAINT_E_8822B BIT(14)
-#define BIT_CTWEND_8822B BIT(12)
-#define BIT_HISR1_IND_INT_8822B BIT(11)
-#define BIT_C2HCMD_8822B BIT(10)
-#define BIT_CPWM2_8822B BIT(9)
-#define BIT_CPWM_8822B BIT(8)
-#define BIT_HIGHDOK_8822B BIT(7)
-#define BIT_MGTDOK_8822B BIT(6)
-#define BIT_BKDOK_8822B BIT(5)
-#define BIT_BEDOK_8822B BIT(4)
-#define BIT_VIDOK_8822B BIT(3)
-#define BIT_VODOK_8822B BIT(2)
-#define BIT_RDU_8822B BIT(1)
-#define BIT_RXOK_8822B BIT(0)
-
-/* 2 REG_HIMR1_8822B */
-#define BIT_TXFIFO_TH_INT_8822B BIT(30)
-#define BIT_BTON_STS_UPDATE_MASK_8822B BIT(29)
-#define BIT_MCU_ERR_MASK_8822B BIT(28)
-#define BIT_BCNDMAINT7__MSK_8822B BIT(27)
-#define BIT_BCNDMAINT6__MSK_8822B BIT(26)
-#define BIT_BCNDMAINT5__MSK_8822B BIT(25)
-#define BIT_BCNDMAINT4__MSK_8822B BIT(24)
-#define BIT_BCNDMAINT3_MSK_8822B BIT(23)
-#define BIT_BCNDMAINT2_MSK_8822B BIT(22)
-#define BIT_BCNDMAINT1_MSK_8822B BIT(21)
-#define BIT_BCNDERR7_MSK_8822B BIT(20)
-#define BIT_BCNDERR6_MSK_8822B BIT(19)
-#define BIT_BCNDERR5_MSK_8822B BIT(18)
-#define BIT_BCNDERR4_MSK_8822B BIT(17)
-#define BIT_BCNDERR3_MSK_8822B BIT(16)
-#define BIT_BCNDERR2_MSK_8822B BIT(15)
-#define BIT_BCNDERR1_MSK_8822B BIT(14)
-#define BIT_ATIMEND_E_MSK_8822B BIT(13)
-#define BIT_ATIMEND__MSK_8822B BIT(12)
-#define BIT_TXERR_MSK_8822B BIT(11)
-#define BIT_RXERR_MSK_8822B BIT(10)
-#define BIT_TXFOVW_MSK_8822B BIT(9)
-#define BIT_FOVW_MSK_8822B BIT(8)
-#define BIT_CPU_MGQ_TXDONE_MSK_8822B BIT(5)
-#define BIT_PS_TIMER_C_MSK_8822B BIT(4)
-#define BIT_PS_TIMER_B_MSK_8822B BIT(3)
-#define BIT_PS_TIMER_A_MSK_8822B BIT(2)
-#define BIT_CPUMGQ_TX_TIMER_MSK_8822B BIT(1)
-
-/* 2 REG_HISR1_8822B */
-#define BIT_TXFIFO_TH_INT_8822B BIT(30)
-#define BIT_BTON_STS_UPDATE_INT_8822B BIT(29)
-#define BIT_MCU_ERR_8822B BIT(28)
-#define BIT_BCNDMAINT7_8822B BIT(27)
-#define BIT_BCNDMAINT6_8822B BIT(26)
-#define BIT_BCNDMAINT5_8822B BIT(25)
-#define BIT_BCNDMAINT4_8822B BIT(24)
-#define BIT_BCNDMAINT3_8822B BIT(23)
-#define BIT_BCNDMAINT2_8822B BIT(22)
-#define BIT_BCNDMAINT1_8822B BIT(21)
-#define BIT_BCNDERR7_8822B BIT(20)
-#define BIT_BCNDERR6_8822B BIT(19)
-#define BIT_BCNDERR5_8822B BIT(18)
-#define BIT_BCNDERR4_8822B BIT(17)
-#define BIT_BCNDERR3_8822B BIT(16)
-#define BIT_BCNDERR2_8822B BIT(15)
-#define BIT_BCNDERR1_8822B BIT(14)
-#define BIT_ATIMEND_E_8822B BIT(13)
-#define BIT_ATIMEND_8822B BIT(12)
-#define BIT_TXERR_INT_8822B BIT(11)
-#define BIT_RXERR_INT_8822B BIT(10)
-#define BIT_TXFOVW_8822B BIT(9)
-#define BIT_FOVW_8822B BIT(8)
-#define BIT_CPU_MGQ_TXDONE_8822B BIT(5)
-#define BIT_PS_TIMER_C_8822B BIT(4)
-#define BIT_PS_TIMER_B_8822B BIT(3)
-#define BIT_PS_TIMER_A_8822B BIT(2)
-#define BIT_CPUMGQ_TX_TIMER_8822B BIT(1)
-
-/* 2 REG_DBG_PORT_SEL_8822B */
-
-#define BIT_SHIFT_DEBUG_ST_8822B 0
-#define BIT_MASK_DEBUG_ST_8822B 0xffffffffL
-#define BIT_DEBUG_ST_8822B(x) \
- (((x) & BIT_MASK_DEBUG_ST_8822B) << BIT_SHIFT_DEBUG_ST_8822B)
-#define BIT_GET_DEBUG_ST_8822B(x) \
- (((x) >> BIT_SHIFT_DEBUG_ST_8822B) & BIT_MASK_DEBUG_ST_8822B)
-
-/* 2 REG_PAD_CTRL2_8822B */
-#define BIT_USB3_USB2_TRANSITION_8822B BIT(20)
-
-#define BIT_SHIFT_USB23_SW_MODE_V1_8822B 18
-#define BIT_MASK_USB23_SW_MODE_V1_8822B 0x3
-#define BIT_USB23_SW_MODE_V1_8822B(x) \
- (((x) & BIT_MASK_USB23_SW_MODE_V1_8822B) \
- << BIT_SHIFT_USB23_SW_MODE_V1_8822B)
-#define BIT_GET_USB23_SW_MODE_V1_8822B(x) \
- (((x) >> BIT_SHIFT_USB23_SW_MODE_V1_8822B) & \
- BIT_MASK_USB23_SW_MODE_V1_8822B)
-
-#define BIT_NO_PDN_CHIPOFF_V1_8822B BIT(17)
-#define BIT_RSM_EN_V1_8822B BIT(16)
-
-#define BIT_SHIFT_MATCH_CNT_8822B 8
-#define BIT_MASK_MATCH_CNT_8822B 0xff
-#define BIT_MATCH_CNT_8822B(x) \
- (((x) & BIT_MASK_MATCH_CNT_8822B) << BIT_SHIFT_MATCH_CNT_8822B)
-#define BIT_GET_MATCH_CNT_8822B(x) \
- (((x) >> BIT_SHIFT_MATCH_CNT_8822B) & BIT_MASK_MATCH_CNT_8822B)
-
-#define BIT_LD_B12V_EN_8822B BIT(7)
-#define BIT_EECS_IOSEL_V1_8822B BIT(6)
-#define BIT_EECS_DATA_O_V1_8822B BIT(5)
-#define BIT_EECS_DATA_I_V1_8822B BIT(4)
-#define BIT_EESK_IOSEL_V1_8822B BIT(2)
-#define BIT_EESK_DATA_O_V1_8822B BIT(1)
-#define BIT_EESK_DATA_I_V1_8822B BIT(0)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_PMC_DBG_CTRL2_8822B */
-
-#define BIT_SHIFT_EFUSE_BURN_GNT_8822B 24
-#define BIT_MASK_EFUSE_BURN_GNT_8822B 0xff
-#define BIT_EFUSE_BURN_GNT_8822B(x) \
- (((x) & BIT_MASK_EFUSE_BURN_GNT_8822B) \
- << BIT_SHIFT_EFUSE_BURN_GNT_8822B)
-#define BIT_GET_EFUSE_BURN_GNT_8822B(x) \
- (((x) >> BIT_SHIFT_EFUSE_BURN_GNT_8822B) & \
- BIT_MASK_EFUSE_BURN_GNT_8822B)
-
-#define BIT_STOP_WL_PMC_8822B BIT(9)
-#define BIT_STOP_SYM_PMC_8822B BIT(8)
-#define BIT_REG_RST_WLPMC_8822B BIT(5)
-#define BIT_REG_RST_PD12N_8822B BIT(4)
-#define BIT_SYSON_DIS_WLREG_WRMSK_8822B BIT(3)
-#define BIT_SYSON_DIS_PMCREG_WRMSK_8822B BIT(2)
-
-#define BIT_SHIFT_SYSON_REG_ARB_8822B 0
-#define BIT_MASK_SYSON_REG_ARB_8822B 0x3
-#define BIT_SYSON_REG_ARB_8822B(x) \
- (((x) & BIT_MASK_SYSON_REG_ARB_8822B) << BIT_SHIFT_SYSON_REG_ARB_8822B)
-#define BIT_GET_SYSON_REG_ARB_8822B(x) \
- (((x) >> BIT_SHIFT_SYSON_REG_ARB_8822B) & BIT_MASK_SYSON_REG_ARB_8822B)
-
-/* 2 REG_BIST_CTRL_8822B */
-#define BIT_BIST_USB_DIS_8822B BIT(27)
-#define BIT_BIST_PCI_DIS_8822B BIT(26)
-#define BIT_BIST_BT_DIS_8822B BIT(25)
-#define BIT_BIST_WL_DIS_8822B BIT(24)
-
-#define BIT_SHIFT_BIST_RPT_SEL_8822B 16
-#define BIT_MASK_BIST_RPT_SEL_8822B 0xf
-#define BIT_BIST_RPT_SEL_8822B(x) \
- (((x) & BIT_MASK_BIST_RPT_SEL_8822B) << BIT_SHIFT_BIST_RPT_SEL_8822B)
-#define BIT_GET_BIST_RPT_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_BIST_RPT_SEL_8822B) & BIT_MASK_BIST_RPT_SEL_8822B)
-
-#define BIT_BIST_RESUME_PS_8822B BIT(4)
-#define BIT_BIST_RESUME_8822B BIT(3)
-#define BIT_BIST_NORMAL_8822B BIT(2)
-#define BIT_BIST_RSTN_8822B BIT(1)
-#define BIT_BIST_CLK_EN_8822B BIT(0)
-
-/* 2 REG_BIST_RPT_8822B */
-
-#define BIT_SHIFT_MBIST_REPORT_8822B 0
-#define BIT_MASK_MBIST_REPORT_8822B 0xffffffffL
-#define BIT_MBIST_REPORT_8822B(x) \
- (((x) & BIT_MASK_MBIST_REPORT_8822B) << BIT_SHIFT_MBIST_REPORT_8822B)
-#define BIT_GET_MBIST_REPORT_8822B(x) \
- (((x) >> BIT_SHIFT_MBIST_REPORT_8822B) & BIT_MASK_MBIST_REPORT_8822B)
-
-/* 2 REG_MEM_CTRL_8822B */
-#define BIT_UMEM_RME_8822B BIT(31)
-
-#define BIT_SHIFT_BT_SPRAM_8822B 28
-#define BIT_MASK_BT_SPRAM_8822B 0x3
-#define BIT_BT_SPRAM_8822B(x) \
- (((x) & BIT_MASK_BT_SPRAM_8822B) << BIT_SHIFT_BT_SPRAM_8822B)
-#define BIT_GET_BT_SPRAM_8822B(x) \
- (((x) >> BIT_SHIFT_BT_SPRAM_8822B) & BIT_MASK_BT_SPRAM_8822B)
-
-#define BIT_SHIFT_BT_ROM_8822B 24
-#define BIT_MASK_BT_ROM_8822B 0xf
-#define BIT_BT_ROM_8822B(x) \
- (((x) & BIT_MASK_BT_ROM_8822B) << BIT_SHIFT_BT_ROM_8822B)
-#define BIT_GET_BT_ROM_8822B(x) \
- (((x) >> BIT_SHIFT_BT_ROM_8822B) & BIT_MASK_BT_ROM_8822B)
-
-#define BIT_SHIFT_PCI_DPRAM_8822B 10
-#define BIT_MASK_PCI_DPRAM_8822B 0x3
-#define BIT_PCI_DPRAM_8822B(x) \
- (((x) & BIT_MASK_PCI_DPRAM_8822B) << BIT_SHIFT_PCI_DPRAM_8822B)
-#define BIT_GET_PCI_DPRAM_8822B(x) \
- (((x) >> BIT_SHIFT_PCI_DPRAM_8822B) & BIT_MASK_PCI_DPRAM_8822B)
-
-#define BIT_SHIFT_PCI_SPRAM_8822B 8
-#define BIT_MASK_PCI_SPRAM_8822B 0x3
-#define BIT_PCI_SPRAM_8822B(x) \
- (((x) & BIT_MASK_PCI_SPRAM_8822B) << BIT_SHIFT_PCI_SPRAM_8822B)
-#define BIT_GET_PCI_SPRAM_8822B(x) \
- (((x) >> BIT_SHIFT_PCI_SPRAM_8822B) & BIT_MASK_PCI_SPRAM_8822B)
-
-#define BIT_SHIFT_USB_SPRAM_8822B 6
-#define BIT_MASK_USB_SPRAM_8822B 0x3
-#define BIT_USB_SPRAM_8822B(x) \
- (((x) & BIT_MASK_USB_SPRAM_8822B) << BIT_SHIFT_USB_SPRAM_8822B)
-#define BIT_GET_USB_SPRAM_8822B(x) \
- (((x) >> BIT_SHIFT_USB_SPRAM_8822B) & BIT_MASK_USB_SPRAM_8822B)
-
-#define BIT_SHIFT_USB_SPRF_8822B 4
-#define BIT_MASK_USB_SPRF_8822B 0x3
-#define BIT_USB_SPRF_8822B(x) \
- (((x) & BIT_MASK_USB_SPRF_8822B) << BIT_SHIFT_USB_SPRF_8822B)
-#define BIT_GET_USB_SPRF_8822B(x) \
- (((x) >> BIT_SHIFT_USB_SPRF_8822B) & BIT_MASK_USB_SPRF_8822B)
-
-#define BIT_SHIFT_MCU_ROM_8822B 0
-#define BIT_MASK_MCU_ROM_8822B 0xf
-#define BIT_MCU_ROM_8822B(x) \
- (((x) & BIT_MASK_MCU_ROM_8822B) << BIT_SHIFT_MCU_ROM_8822B)
-#define BIT_GET_MCU_ROM_8822B(x) \
- (((x) >> BIT_SHIFT_MCU_ROM_8822B) & BIT_MASK_MCU_ROM_8822B)
-
-/* 2 REG_AFE_CTRL8_8822B */
-#define BIT_SYN_AGPIO_8822B BIT(20)
-#define BIT_XTAL_LP_8822B BIT(4)
-#define BIT_XTAL_GM_SEP_8822B BIT(3)
-
-#define BIT_SHIFT_XTAL_SEL_TOK_8822B 0
-#define BIT_MASK_XTAL_SEL_TOK_8822B 0x7
-#define BIT_XTAL_SEL_TOK_8822B(x) \
- (((x) & BIT_MASK_XTAL_SEL_TOK_8822B) << BIT_SHIFT_XTAL_SEL_TOK_8822B)
-#define BIT_GET_XTAL_SEL_TOK_8822B(x) \
- (((x) >> BIT_SHIFT_XTAL_SEL_TOK_8822B) & BIT_MASK_XTAL_SEL_TOK_8822B)
-
-/* 2 REG_USB_SIE_INTF_8822B */
-#define BIT_RD_SEL_8822B BIT(31)
-#define BIT_USB_SIE_INTF_WE_V1_8822B BIT(30)
-#define BIT_USB_SIE_INTF_BYIOREG_V1_8822B BIT(29)
-#define BIT_USB_SIE_SELECT_8822B BIT(28)
-
-#define BIT_SHIFT_USB_SIE_INTF_ADDR_V1_8822B 16
-#define BIT_MASK_USB_SIE_INTF_ADDR_V1_8822B 0x1ff
-#define BIT_USB_SIE_INTF_ADDR_V1_8822B(x) \
- (((x) & BIT_MASK_USB_SIE_INTF_ADDR_V1_8822B) \
- << BIT_SHIFT_USB_SIE_INTF_ADDR_V1_8822B)
-#define BIT_GET_USB_SIE_INTF_ADDR_V1_8822B(x) \
- (((x) >> BIT_SHIFT_USB_SIE_INTF_ADDR_V1_8822B) & \
- BIT_MASK_USB_SIE_INTF_ADDR_V1_8822B)
-
-#define BIT_SHIFT_USB_SIE_INTF_RD_8822B 8
-#define BIT_MASK_USB_SIE_INTF_RD_8822B 0xff
-#define BIT_USB_SIE_INTF_RD_8822B(x) \
- (((x) & BIT_MASK_USB_SIE_INTF_RD_8822B) \
- << BIT_SHIFT_USB_SIE_INTF_RD_8822B)
-#define BIT_GET_USB_SIE_INTF_RD_8822B(x) \
- (((x) >> BIT_SHIFT_USB_SIE_INTF_RD_8822B) & \
- BIT_MASK_USB_SIE_INTF_RD_8822B)
-
-#define BIT_SHIFT_USB_SIE_INTF_WD_8822B 0
-#define BIT_MASK_USB_SIE_INTF_WD_8822B 0xff
-#define BIT_USB_SIE_INTF_WD_8822B(x) \
- (((x) & BIT_MASK_USB_SIE_INTF_WD_8822B) \
- << BIT_SHIFT_USB_SIE_INTF_WD_8822B)
-#define BIT_GET_USB_SIE_INTF_WD_8822B(x) \
- (((x) >> BIT_SHIFT_USB_SIE_INTF_WD_8822B) & \
- BIT_MASK_USB_SIE_INTF_WD_8822B)
-
-/* 2 REG_PCIE_MIO_INTF_8822B */
-#define BIT_PCIE_MIO_BYIOREG_8822B BIT(13)
-#define BIT_PCIE_MIO_RE_8822B BIT(12)
-
-#define BIT_SHIFT_PCIE_MIO_WE_8822B 8
-#define BIT_MASK_PCIE_MIO_WE_8822B 0xf
-#define BIT_PCIE_MIO_WE_8822B(x) \
- (((x) & BIT_MASK_PCIE_MIO_WE_8822B) << BIT_SHIFT_PCIE_MIO_WE_8822B)
-#define BIT_GET_PCIE_MIO_WE_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_MIO_WE_8822B) & BIT_MASK_PCIE_MIO_WE_8822B)
-
-#define BIT_SHIFT_PCIE_MIO_ADDR_8822B 0
-#define BIT_MASK_PCIE_MIO_ADDR_8822B 0xff
-#define BIT_PCIE_MIO_ADDR_8822B(x) \
- (((x) & BIT_MASK_PCIE_MIO_ADDR_8822B) << BIT_SHIFT_PCIE_MIO_ADDR_8822B)
-#define BIT_GET_PCIE_MIO_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_MIO_ADDR_8822B) & BIT_MASK_PCIE_MIO_ADDR_8822B)
-
-/* 2 REG_PCIE_MIO_INTD_8822B */
-
-#define BIT_SHIFT_PCIE_MIO_DATA_8822B 0
-#define BIT_MASK_PCIE_MIO_DATA_8822B 0xffffffffL
-#define BIT_PCIE_MIO_DATA_8822B(x) \
- (((x) & BIT_MASK_PCIE_MIO_DATA_8822B) << BIT_SHIFT_PCIE_MIO_DATA_8822B)
-#define BIT_GET_PCIE_MIO_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_MIO_DATA_8822B) & BIT_MASK_PCIE_MIO_DATA_8822B)
-
-/* 2 REG_WLRF1_8822B */
-
-#define BIT_SHIFT_WLRF1_CTRL_8822B 24
-#define BIT_MASK_WLRF1_CTRL_8822B 0xff
-#define BIT_WLRF1_CTRL_8822B(x) \
- (((x) & BIT_MASK_WLRF1_CTRL_8822B) << BIT_SHIFT_WLRF1_CTRL_8822B)
-#define BIT_GET_WLRF1_CTRL_8822B(x) \
- (((x) >> BIT_SHIFT_WLRF1_CTRL_8822B) & BIT_MASK_WLRF1_CTRL_8822B)
-
-/* 2 REG_SYS_CFG1_8822B */
-
-#define BIT_SHIFT_TRP_ICFG_8822B 28
-#define BIT_MASK_TRP_ICFG_8822B 0xf
-#define BIT_TRP_ICFG_8822B(x) \
- (((x) & BIT_MASK_TRP_ICFG_8822B) << BIT_SHIFT_TRP_ICFG_8822B)
-#define BIT_GET_TRP_ICFG_8822B(x) \
- (((x) >> BIT_SHIFT_TRP_ICFG_8822B) & BIT_MASK_TRP_ICFG_8822B)
-
-#define BIT_RF_TYPE_ID_8822B BIT(27)
-#define BIT_BD_HCI_SEL_8822B BIT(26)
-#define BIT_BD_PKG_SEL_8822B BIT(25)
-#define BIT_SPSLDO_SEL_8822B BIT(24)
-#define BIT_RTL_ID_8822B BIT(23)
-#define BIT_PAD_HWPD_IDN_8822B BIT(22)
-#define BIT_TESTMODE_8822B BIT(20)
-
-#define BIT_SHIFT_VENDOR_ID_8822B 16
-#define BIT_MASK_VENDOR_ID_8822B 0xf
-#define BIT_VENDOR_ID_8822B(x) \
- (((x) & BIT_MASK_VENDOR_ID_8822B) << BIT_SHIFT_VENDOR_ID_8822B)
-#define BIT_GET_VENDOR_ID_8822B(x) \
- (((x) >> BIT_SHIFT_VENDOR_ID_8822B) & BIT_MASK_VENDOR_ID_8822B)
-
-#define BIT_SHIFT_CHIP_VER_8822B 12
-#define BIT_MASK_CHIP_VER_8822B 0xf
-#define BIT_CHIP_VER_8822B(x) \
- (((x) & BIT_MASK_CHIP_VER_8822B) << BIT_SHIFT_CHIP_VER_8822B)
-#define BIT_GET_CHIP_VER_8822B(x) \
- (((x) >> BIT_SHIFT_CHIP_VER_8822B) & BIT_MASK_CHIP_VER_8822B)
-
-#define BIT_BD_MAC3_8822B BIT(11)
-#define BIT_BD_MAC1_8822B BIT(10)
-#define BIT_BD_MAC2_8822B BIT(9)
-#define BIT_SIC_IDLE_8822B BIT(8)
-#define BIT_SW_OFFLOAD_EN_8822B BIT(7)
-#define BIT_OCP_SHUTDN_8822B BIT(6)
-#define BIT_V15_VLD_8822B BIT(5)
-#define BIT_PCIRSTB_8822B BIT(4)
-#define BIT_PCLK_VLD_8822B BIT(3)
-#define BIT_UCLK_VLD_8822B BIT(2)
-#define BIT_ACLK_VLD_8822B BIT(1)
-#define BIT_XCLK_VLD_8822B BIT(0)
-
-/* 2 REG_SYS_STATUS1_8822B */
-
-#define BIT_SHIFT_RF_RL_ID_8822B 28
-#define BIT_MASK_RF_RL_ID_8822B 0xf
-#define BIT_RF_RL_ID_8822B(x) \
- (((x) & BIT_MASK_RF_RL_ID_8822B) << BIT_SHIFT_RF_RL_ID_8822B)
-#define BIT_GET_RF_RL_ID_8822B(x) \
- (((x) >> BIT_SHIFT_RF_RL_ID_8822B) & BIT_MASK_RF_RL_ID_8822B)
-
-#define BIT_HPHY_ICFG_8822B BIT(19)
-
-#define BIT_SHIFT_SEL_0XC0_8822B 16
-#define BIT_MASK_SEL_0XC0_8822B 0x3
-#define BIT_SEL_0XC0_8822B(x) \
- (((x) & BIT_MASK_SEL_0XC0_8822B) << BIT_SHIFT_SEL_0XC0_8822B)
-#define BIT_GET_SEL_0XC0_8822B(x) \
- (((x) >> BIT_SHIFT_SEL_0XC0_8822B) & BIT_MASK_SEL_0XC0_8822B)
-
-#define BIT_SHIFT_HCI_SEL_V3_8822B 12
-#define BIT_MASK_HCI_SEL_V3_8822B 0x7
-#define BIT_HCI_SEL_V3_8822B(x) \
- (((x) & BIT_MASK_HCI_SEL_V3_8822B) << BIT_SHIFT_HCI_SEL_V3_8822B)
-#define BIT_GET_HCI_SEL_V3_8822B(x) \
- (((x) >> BIT_SHIFT_HCI_SEL_V3_8822B) & BIT_MASK_HCI_SEL_V3_8822B)
-
-#define BIT_USB_OPERATION_MODE_8822B BIT(10)
-#define BIT_BT_PDN_8822B BIT(9)
-#define BIT_AUTO_WLPON_8822B BIT(8)
-#define BIT_WL_MODE_8822B BIT(7)
-#define BIT_PKG_SEL_HCI_8822B BIT(6)
-
-#define BIT_SHIFT_PAD_HCI_SEL_V1_8822B 3
-#define BIT_MASK_PAD_HCI_SEL_V1_8822B 0x7
-#define BIT_PAD_HCI_SEL_V1_8822B(x) \
- (((x) & BIT_MASK_PAD_HCI_SEL_V1_8822B) \
- << BIT_SHIFT_PAD_HCI_SEL_V1_8822B)
-#define BIT_GET_PAD_HCI_SEL_V1_8822B(x) \
- (((x) >> BIT_SHIFT_PAD_HCI_SEL_V1_8822B) & \
- BIT_MASK_PAD_HCI_SEL_V1_8822B)
-
-#define BIT_SHIFT_EFS_HCI_SEL_V1_8822B 0
-#define BIT_MASK_EFS_HCI_SEL_V1_8822B 0x7
-#define BIT_EFS_HCI_SEL_V1_8822B(x) \
- (((x) & BIT_MASK_EFS_HCI_SEL_V1_8822B) \
- << BIT_SHIFT_EFS_HCI_SEL_V1_8822B)
-#define BIT_GET_EFS_HCI_SEL_V1_8822B(x) \
- (((x) >> BIT_SHIFT_EFS_HCI_SEL_V1_8822B) & \
- BIT_MASK_EFS_HCI_SEL_V1_8822B)
-
-/* 2 REG_SYS_STATUS2_8822B */
-#define BIT_SIO_ALDN_8822B BIT(19)
-#define BIT_USB_ALDN_8822B BIT(18)
-#define BIT_PCI_ALDN_8822B BIT(17)
-#define BIT_SYS_ALDN_8822B BIT(16)
-
-#define BIT_SHIFT_EPVID1_8822B 8
-#define BIT_MASK_EPVID1_8822B 0xff
-#define BIT_EPVID1_8822B(x) \
- (((x) & BIT_MASK_EPVID1_8822B) << BIT_SHIFT_EPVID1_8822B)
-#define BIT_GET_EPVID1_8822B(x) \
- (((x) >> BIT_SHIFT_EPVID1_8822B) & BIT_MASK_EPVID1_8822B)
-
-#define BIT_SHIFT_EPVID0_8822B 0
-#define BIT_MASK_EPVID0_8822B 0xff
-#define BIT_EPVID0_8822B(x) \
- (((x) & BIT_MASK_EPVID0_8822B) << BIT_SHIFT_EPVID0_8822B)
-#define BIT_GET_EPVID0_8822B(x) \
- (((x) >> BIT_SHIFT_EPVID0_8822B) & BIT_MASK_EPVID0_8822B)
-
-/* 2 REG_SYS_CFG2_8822B */
-#define BIT_HCI_SEL_EMBEDDED_8822B BIT(8)
-
-#define BIT_SHIFT_HW_ID_8822B 0
-#define BIT_MASK_HW_ID_8822B 0xff
-#define BIT_HW_ID_8822B(x) \
- (((x) & BIT_MASK_HW_ID_8822B) << BIT_SHIFT_HW_ID_8822B)
-#define BIT_GET_HW_ID_8822B(x) \
- (((x) >> BIT_SHIFT_HW_ID_8822B) & BIT_MASK_HW_ID_8822B)
-
-/* 2 REG_SYS_CFG3_8822B */
-#define BIT_PWC_MA33V_8822B BIT(15)
-#define BIT_PWC_MA12V_8822B BIT(14)
-#define BIT_PWC_MD12V_8822B BIT(13)
-#define BIT_PWC_PD12V_8822B BIT(12)
-#define BIT_PWC_UD12V_8822B BIT(11)
-#define BIT_ISO_MA2MD_8822B BIT(1)
-#define BIT_ISO_MD2PP_8822B BIT(0)
-
-/* 2 REG_SYS_CFG4_8822B */
-
-/* 2 REG_SYS_CFG5_8822B */
-#define BIT_LPS_STATUS_8822B BIT(3)
-#define BIT_HCI_TXDMA_BUSY_8822B BIT(2)
-#define BIT_HCI_TXDMA_ALLOW_8822B BIT(1)
-#define BIT_FW_CTRL_HCI_TXDMA_EN_8822B BIT(0)
-
-/* 2 REG_CPU_DMEM_CON_8822B */
-#define BIT_WDT_OPT_IOWRAPPER_8822B BIT(19)
-#define BIT_ANA_PORT_IDLE_8822B BIT(18)
-#define BIT_MAC_PORT_IDLE_8822B BIT(17)
-#define BIT_WL_PLATFORM_RST_8822B BIT(16)
-#define BIT_WL_SECURITY_CLK_8822B BIT(15)
-
-#define BIT_SHIFT_CPU_DMEM_CON_8822B 0
-#define BIT_MASK_CPU_DMEM_CON_8822B 0xff
-#define BIT_CPU_DMEM_CON_8822B(x) \
- (((x) & BIT_MASK_CPU_DMEM_CON_8822B) << BIT_SHIFT_CPU_DMEM_CON_8822B)
-#define BIT_GET_CPU_DMEM_CON_8822B(x) \
- (((x) >> BIT_SHIFT_CPU_DMEM_CON_8822B) & BIT_MASK_CPU_DMEM_CON_8822B)
-
-/* 2 REG_BOOT_REASON_8822B */
-
-#define BIT_SHIFT_BOOT_REASON_8822B 0
-#define BIT_MASK_BOOT_REASON_8822B 0x7
-#define BIT_BOOT_REASON_8822B(x) \
- (((x) & BIT_MASK_BOOT_REASON_8822B) << BIT_SHIFT_BOOT_REASON_8822B)
-#define BIT_GET_BOOT_REASON_8822B(x) \
- (((x) >> BIT_SHIFT_BOOT_REASON_8822B) & BIT_MASK_BOOT_REASON_8822B)
-
-/* 2 REG_NFCPAD_CTRL_8822B */
-#define BIT_PAD_SHUTDW_8822B BIT(18)
-#define BIT_SYSON_NFC_PAD_8822B BIT(17)
-#define BIT_NFC_INT_PAD_CTRL_8822B BIT(16)
-#define BIT_NFC_RFDIS_PAD_CTRL_8822B BIT(15)
-#define BIT_NFC_CLK_PAD_CTRL_8822B BIT(14)
-#define BIT_NFC_DATA_PAD_CTRL_8822B BIT(13)
-#define BIT_NFC_PAD_PULL_CTRL_8822B BIT(12)
-
-#define BIT_SHIFT_NFCPAD_IO_SEL_8822B 8
-#define BIT_MASK_NFCPAD_IO_SEL_8822B 0xf
-#define BIT_NFCPAD_IO_SEL_8822B(x) \
- (((x) & BIT_MASK_NFCPAD_IO_SEL_8822B) << BIT_SHIFT_NFCPAD_IO_SEL_8822B)
-#define BIT_GET_NFCPAD_IO_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_NFCPAD_IO_SEL_8822B) & BIT_MASK_NFCPAD_IO_SEL_8822B)
-
-#define BIT_SHIFT_NFCPAD_OUT_8822B 4
-#define BIT_MASK_NFCPAD_OUT_8822B 0xf
-#define BIT_NFCPAD_OUT_8822B(x) \
- (((x) & BIT_MASK_NFCPAD_OUT_8822B) << BIT_SHIFT_NFCPAD_OUT_8822B)
-#define BIT_GET_NFCPAD_OUT_8822B(x) \
- (((x) >> BIT_SHIFT_NFCPAD_OUT_8822B) & BIT_MASK_NFCPAD_OUT_8822B)
-
-#define BIT_SHIFT_NFCPAD_IN_8822B 0
-#define BIT_MASK_NFCPAD_IN_8822B 0xf
-#define BIT_NFCPAD_IN_8822B(x) \
- (((x) & BIT_MASK_NFCPAD_IN_8822B) << BIT_SHIFT_NFCPAD_IN_8822B)
-#define BIT_GET_NFCPAD_IN_8822B(x) \
- (((x) >> BIT_SHIFT_NFCPAD_IN_8822B) & BIT_MASK_NFCPAD_IN_8822B)
-
-/* 2 REG_HIMR2_8822B */
-#define BIT_BCNDMAINT_P4_MSK_8822B BIT(31)
-#define BIT_BCNDMAINT_P3_MSK_8822B BIT(30)
-#define BIT_BCNDMAINT_P2_MSK_8822B BIT(29)
-#define BIT_BCNDMAINT_P1_MSK_8822B BIT(28)
-#define BIT_ATIMEND7_MSK_8822B BIT(22)
-#define BIT_ATIMEND6_MSK_8822B BIT(21)
-#define BIT_ATIMEND5_MSK_8822B BIT(20)
-#define BIT_ATIMEND4_MSK_8822B BIT(19)
-#define BIT_ATIMEND3_MSK_8822B BIT(18)
-#define BIT_ATIMEND2_MSK_8822B BIT(17)
-#define BIT_ATIMEND1_MSK_8822B BIT(16)
-#define BIT_TXBCN7OK_MSK_8822B BIT(14)
-#define BIT_TXBCN6OK_MSK_8822B BIT(13)
-#define BIT_TXBCN5OK_MSK_8822B BIT(12)
-#define BIT_TXBCN4OK_MSK_8822B BIT(11)
-#define BIT_TXBCN3OK_MSK_8822B BIT(10)
-#define BIT_TXBCN2OK_MSK_8822B BIT(9)
-#define BIT_TXBCN1OK_MSK_V1_8822B BIT(8)
-#define BIT_TXBCN7ERR_MSK_8822B BIT(6)
-#define BIT_TXBCN6ERR_MSK_8822B BIT(5)
-#define BIT_TXBCN5ERR_MSK_8822B BIT(4)
-#define BIT_TXBCN4ERR_MSK_8822B BIT(3)
-#define BIT_TXBCN3ERR_MSK_8822B BIT(2)
-#define BIT_TXBCN2ERR_MSK_8822B BIT(1)
-#define BIT_TXBCN1ERR_MSK_V1_8822B BIT(0)
-
-/* 2 REG_HISR2_8822B */
-#define BIT_BCNDMAINT_P4_8822B BIT(31)
-#define BIT_BCNDMAINT_P3_8822B BIT(30)
-#define BIT_BCNDMAINT_P2_8822B BIT(29)
-#define BIT_BCNDMAINT_P1_8822B BIT(28)
-#define BIT_ATIMEND7_8822B BIT(22)
-#define BIT_ATIMEND6_8822B BIT(21)
-#define BIT_ATIMEND5_8822B BIT(20)
-#define BIT_ATIMEND4_8822B BIT(19)
-#define BIT_ATIMEND3_8822B BIT(18)
-#define BIT_ATIMEND2_8822B BIT(17)
-#define BIT_ATIMEND1_8822B BIT(16)
-#define BIT_TXBCN7OK_8822B BIT(14)
-#define BIT_TXBCN6OK_8822B BIT(13)
-#define BIT_TXBCN5OK_8822B BIT(12)
-#define BIT_TXBCN4OK_8822B BIT(11)
-#define BIT_TXBCN3OK_8822B BIT(10)
-#define BIT_TXBCN2OK_8822B BIT(9)
-#define BIT_TXBCN1OK_8822B BIT(8)
-#define BIT_TXBCN7ERR_8822B BIT(6)
-#define BIT_TXBCN6ERR_8822B BIT(5)
-#define BIT_TXBCN5ERR_8822B BIT(4)
-#define BIT_TXBCN4ERR_8822B BIT(3)
-#define BIT_TXBCN3ERR_8822B BIT(2)
-#define BIT_TXBCN2ERR_8822B BIT(1)
-#define BIT_TXBCN1ERR_8822B BIT(0)
-
-/* 2 REG_HIMR3_8822B */
-#define BIT_WDT_PLATFORM_INT_MSK_8822B BIT(18)
-#define BIT_WDT_CPU_INT_MSK_8822B BIT(17)
-#define BIT_SETH2CDOK_MASK_8822B BIT(16)
-#define BIT_H2C_CMD_FULL_MASK_8822B BIT(15)
-#define BIT_PWR_INT_127_MASK_8822B BIT(14)
-#define BIT_TXSHORTCUT_TXDESUPDATEOK_MASK_8822B BIT(13)
-#define BIT_TXSHORTCUT_BKUPDATEOK_MASK_8822B BIT(12)
-#define BIT_TXSHORTCUT_BEUPDATEOK_MASK_8822B BIT(11)
-#define BIT_TXSHORTCUT_VIUPDATEOK_MAS_8822B BIT(10)
-#define BIT_TXSHORTCUT_VOUPDATEOK_MASK_8822B BIT(9)
-#define BIT_PWR_INT_127_MASK_V1_8822B BIT(8)
-#define BIT_PWR_INT_126TO96_MASK_8822B BIT(7)
-#define BIT_PWR_INT_95TO64_MASK_8822B BIT(6)
-#define BIT_PWR_INT_63TO32_MASK_8822B BIT(5)
-#define BIT_PWR_INT_31TO0_MASK_8822B BIT(4)
-#define BIT_DDMA0_LP_INT_MSK_8822B BIT(1)
-#define BIT_DDMA0_HP_INT_MSK_8822B BIT(0)
-
-/* 2 REG_HISR3_8822B */
-#define BIT_WDT_PLATFORM_INT_8822B BIT(18)
-#define BIT_WDT_CPU_INT_8822B BIT(17)
-#define BIT_SETH2CDOK_8822B BIT(16)
-#define BIT_H2C_CMD_FULL_8822B BIT(15)
-#define BIT_PWR_INT_127_8822B BIT(14)
-#define BIT_TXSHORTCUT_TXDESUPDATEOK_8822B BIT(13)
-#define BIT_TXSHORTCUT_BKUPDATEOK_8822B BIT(12)
-#define BIT_TXSHORTCUT_BEUPDATEOK_8822B BIT(11)
-#define BIT_TXSHORTCUT_VIUPDATEOK_8822B BIT(10)
-#define BIT_TXSHORTCUT_VOUPDATEOK_8822B BIT(9)
-#define BIT_PWR_INT_127_V1_8822B BIT(8)
-#define BIT_PWR_INT_126TO96_8822B BIT(7)
-#define BIT_PWR_INT_95TO64_8822B BIT(6)
-#define BIT_PWR_INT_63TO32_8822B BIT(5)
-#define BIT_PWR_INT_31TO0_8822B BIT(4)
-#define BIT_DDMA0_LP_INT_8822B BIT(1)
-#define BIT_DDMA0_HP_INT_8822B BIT(0)
-
-/* 2 REG_SW_MDIO_8822B */
-#define BIT_DIS_TIMEOUT_IO_8822B BIT(24)
-
-/* 2 REG_SW_FLUSH_8822B */
-#define BIT_FLUSH_HOLDN_EN_8822B BIT(25)
-#define BIT_FLUSH_WR_EN_8822B BIT(24)
-#define BIT_SW_FLASH_CONTROL_8822B BIT(23)
-#define BIT_SW_FLASH_WEN_E_8822B BIT(19)
-#define BIT_SW_FLASH_HOLDN_E_8822B BIT(18)
-#define BIT_SW_FLASH_SO_E_8822B BIT(17)
-#define BIT_SW_FLASH_SI_E_8822B BIT(16)
-#define BIT_SW_FLASH_SK_O_8822B BIT(13)
-#define BIT_SW_FLASH_CEN_O_8822B BIT(12)
-#define BIT_SW_FLASH_WEN_O_8822B BIT(11)
-#define BIT_SW_FLASH_HOLDN_O_8822B BIT(10)
-#define BIT_SW_FLASH_SO_O_8822B BIT(9)
-#define BIT_SW_FLASH_SI_O_8822B BIT(8)
-#define BIT_SW_FLASH_WEN_I_8822B BIT(3)
-#define BIT_SW_FLASH_HOLDN_I_8822B BIT(2)
-#define BIT_SW_FLASH_SO_I_8822B BIT(1)
-#define BIT_SW_FLASH_SI_I_8822B BIT(0)
-
-/* 2 REG_H2C_PKT_READADDR_8822B */
-
-#define BIT_SHIFT_H2C_PKT_READADDR_8822B 0
-#define BIT_MASK_H2C_PKT_READADDR_8822B 0x3ffff
-#define BIT_H2C_PKT_READADDR_8822B(x) \
- (((x) & BIT_MASK_H2C_PKT_READADDR_8822B) \
- << BIT_SHIFT_H2C_PKT_READADDR_8822B)
-#define BIT_GET_H2C_PKT_READADDR_8822B(x) \
- (((x) >> BIT_SHIFT_H2C_PKT_READADDR_8822B) & \
- BIT_MASK_H2C_PKT_READADDR_8822B)
-
-/* 2 REG_H2C_PKT_WRITEADDR_8822B */
-
-#define BIT_SHIFT_H2C_PKT_WRITEADDR_8822B 0
-#define BIT_MASK_H2C_PKT_WRITEADDR_8822B 0x3ffff
-#define BIT_H2C_PKT_WRITEADDR_8822B(x) \
- (((x) & BIT_MASK_H2C_PKT_WRITEADDR_8822B) \
- << BIT_SHIFT_H2C_PKT_WRITEADDR_8822B)
-#define BIT_GET_H2C_PKT_WRITEADDR_8822B(x) \
- (((x) >> BIT_SHIFT_H2C_PKT_WRITEADDR_8822B) & \
- BIT_MASK_H2C_PKT_WRITEADDR_8822B)
-
-/* 2 REG_MEM_PWR_CRTL_8822B */
-#define BIT_MEM_BB_SD_8822B BIT(17)
-#define BIT_MEM_BB_DS_8822B BIT(16)
-#define BIT_MEM_BT_DS_8822B BIT(10)
-#define BIT_MEM_SDIO_LS_8822B BIT(9)
-#define BIT_MEM_SDIO_DS_8822B BIT(8)
-#define BIT_MEM_USB_LS_8822B BIT(7)
-#define BIT_MEM_USB_DS_8822B BIT(6)
-#define BIT_MEM_PCI_LS_8822B BIT(5)
-#define BIT_MEM_PCI_DS_8822B BIT(4)
-#define BIT_MEM_WLMAC_LS_8822B BIT(3)
-#define BIT_MEM_WLMAC_DS_8822B BIT(2)
-#define BIT_MEM_WLMCU_LS_8822B BIT(1)
-#define BIT_MEM_WLMCU_DS_8822B BIT(0)
-
-/* 2 REG_FW_DBG0_8822B */
-
-#define BIT_SHIFT_FW_DBG0_8822B 0
-#define BIT_MASK_FW_DBG0_8822B 0xffffffffL
-#define BIT_FW_DBG0_8822B(x) \
- (((x) & BIT_MASK_FW_DBG0_8822B) << BIT_SHIFT_FW_DBG0_8822B)
-#define BIT_GET_FW_DBG0_8822B(x) \
- (((x) >> BIT_SHIFT_FW_DBG0_8822B) & BIT_MASK_FW_DBG0_8822B)
-
-/* 2 REG_FW_DBG1_8822B */
-
-#define BIT_SHIFT_FW_DBG1_8822B 0
-#define BIT_MASK_FW_DBG1_8822B 0xffffffffL
-#define BIT_FW_DBG1_8822B(x) \
- (((x) & BIT_MASK_FW_DBG1_8822B) << BIT_SHIFT_FW_DBG1_8822B)
-#define BIT_GET_FW_DBG1_8822B(x) \
- (((x) >> BIT_SHIFT_FW_DBG1_8822B) & BIT_MASK_FW_DBG1_8822B)
-
-/* 2 REG_FW_DBG2_8822B */
-
-#define BIT_SHIFT_FW_DBG2_8822B 0
-#define BIT_MASK_FW_DBG2_8822B 0xffffffffL
-#define BIT_FW_DBG2_8822B(x) \
- (((x) & BIT_MASK_FW_DBG2_8822B) << BIT_SHIFT_FW_DBG2_8822B)
-#define BIT_GET_FW_DBG2_8822B(x) \
- (((x) >> BIT_SHIFT_FW_DBG2_8822B) & BIT_MASK_FW_DBG2_8822B)
-
-/* 2 REG_FW_DBG3_8822B */
-
-#define BIT_SHIFT_FW_DBG3_8822B 0
-#define BIT_MASK_FW_DBG3_8822B 0xffffffffL
-#define BIT_FW_DBG3_8822B(x) \
- (((x) & BIT_MASK_FW_DBG3_8822B) << BIT_SHIFT_FW_DBG3_8822B)
-#define BIT_GET_FW_DBG3_8822B(x) \
- (((x) >> BIT_SHIFT_FW_DBG3_8822B) & BIT_MASK_FW_DBG3_8822B)
-
-/* 2 REG_FW_DBG4_8822B */
-
-#define BIT_SHIFT_FW_DBG4_8822B 0
-#define BIT_MASK_FW_DBG4_8822B 0xffffffffL
-#define BIT_FW_DBG4_8822B(x) \
- (((x) & BIT_MASK_FW_DBG4_8822B) << BIT_SHIFT_FW_DBG4_8822B)
-#define BIT_GET_FW_DBG4_8822B(x) \
- (((x) >> BIT_SHIFT_FW_DBG4_8822B) & BIT_MASK_FW_DBG4_8822B)
-
-/* 2 REG_FW_DBG5_8822B */
-
-#define BIT_SHIFT_FW_DBG5_8822B 0
-#define BIT_MASK_FW_DBG5_8822B 0xffffffffL
-#define BIT_FW_DBG5_8822B(x) \
- (((x) & BIT_MASK_FW_DBG5_8822B) << BIT_SHIFT_FW_DBG5_8822B)
-#define BIT_GET_FW_DBG5_8822B(x) \
- (((x) >> BIT_SHIFT_FW_DBG5_8822B) & BIT_MASK_FW_DBG5_8822B)
-
-/* 2 REG_FW_DBG6_8822B */
-
-#define BIT_SHIFT_FW_DBG6_8822B 0
-#define BIT_MASK_FW_DBG6_8822B 0xffffffffL
-#define BIT_FW_DBG6_8822B(x) \
- (((x) & BIT_MASK_FW_DBG6_8822B) << BIT_SHIFT_FW_DBG6_8822B)
-#define BIT_GET_FW_DBG6_8822B(x) \
- (((x) >> BIT_SHIFT_FW_DBG6_8822B) & BIT_MASK_FW_DBG6_8822B)
-
-/* 2 REG_FW_DBG7_8822B */
-
-#define BIT_SHIFT_FW_DBG7_8822B 0
-#define BIT_MASK_FW_DBG7_8822B 0xffffffffL
-#define BIT_FW_DBG7_8822B(x) \
- (((x) & BIT_MASK_FW_DBG7_8822B) << BIT_SHIFT_FW_DBG7_8822B)
-#define BIT_GET_FW_DBG7_8822B(x) \
- (((x) >> BIT_SHIFT_FW_DBG7_8822B) & BIT_MASK_FW_DBG7_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_CR_8822B */
-
-#define BIT_SHIFT_LBMODE_8822B 24
-#define BIT_MASK_LBMODE_8822B 0x1f
-#define BIT_LBMODE_8822B(x) \
- (((x) & BIT_MASK_LBMODE_8822B) << BIT_SHIFT_LBMODE_8822B)
-#define BIT_GET_LBMODE_8822B(x) \
- (((x) >> BIT_SHIFT_LBMODE_8822B) & BIT_MASK_LBMODE_8822B)
-
-#define BIT_SHIFT_NETYPE1_8822B 18
-#define BIT_MASK_NETYPE1_8822B 0x3
-#define BIT_NETYPE1_8822B(x) \
- (((x) & BIT_MASK_NETYPE1_8822B) << BIT_SHIFT_NETYPE1_8822B)
-#define BIT_GET_NETYPE1_8822B(x) \
- (((x) >> BIT_SHIFT_NETYPE1_8822B) & BIT_MASK_NETYPE1_8822B)
-
-#define BIT_SHIFT_NETYPE0_8822B 16
-#define BIT_MASK_NETYPE0_8822B 0x3
-#define BIT_NETYPE0_8822B(x) \
- (((x) & BIT_MASK_NETYPE0_8822B) << BIT_SHIFT_NETYPE0_8822B)
-#define BIT_GET_NETYPE0_8822B(x) \
- (((x) >> BIT_SHIFT_NETYPE0_8822B) & BIT_MASK_NETYPE0_8822B)
-
-#define BIT_I2C_MAILBOX_EN_8822B BIT(12)
-#define BIT_SHCUT_EN_8822B BIT(11)
-#define BIT_32K_CAL_TMR_EN_8822B BIT(10)
-#define BIT_MAC_SEC_EN_8822B BIT(9)
-#define BIT_ENSWBCN_8822B BIT(8)
-#define BIT_MACRXEN_8822B BIT(7)
-#define BIT_MACTXEN_8822B BIT(6)
-#define BIT_SCHEDULE_EN_8822B BIT(5)
-#define BIT_PROTOCOL_EN_8822B BIT(4)
-#define BIT_RXDMA_EN_8822B BIT(3)
-#define BIT_TXDMA_EN_8822B BIT(2)
-#define BIT_HCI_RXDMA_EN_8822B BIT(1)
-#define BIT_HCI_TXDMA_EN_8822B BIT(0)
-
-/* 2 REG_PKT_BUFF_ACCESS_CTRL_8822B */
-
-#define BIT_SHIFT_PKT_BUFF_ACCESS_CTRL_8822B 0
-#define BIT_MASK_PKT_BUFF_ACCESS_CTRL_8822B 0xff
-#define BIT_PKT_BUFF_ACCESS_CTRL_8822B(x) \
- (((x) & BIT_MASK_PKT_BUFF_ACCESS_CTRL_8822B) \
- << BIT_SHIFT_PKT_BUFF_ACCESS_CTRL_8822B)
-#define BIT_GET_PKT_BUFF_ACCESS_CTRL_8822B(x) \
- (((x) >> BIT_SHIFT_PKT_BUFF_ACCESS_CTRL_8822B) & \
- BIT_MASK_PKT_BUFF_ACCESS_CTRL_8822B)
-
-/* 2 REG_TSF_CLK_STATE_8822B */
-#define BIT_TSF_CLK_STABLE_8822B BIT(15)
-
-/* 2 REG_TXDMA_PQ_MAP_8822B */
-
-#define BIT_SHIFT_TXDMA_HIQ_MAP_8822B 14
-#define BIT_MASK_TXDMA_HIQ_MAP_8822B 0x3
-#define BIT_TXDMA_HIQ_MAP_8822B(x) \
- (((x) & BIT_MASK_TXDMA_HIQ_MAP_8822B) << BIT_SHIFT_TXDMA_HIQ_MAP_8822B)
-#define BIT_GET_TXDMA_HIQ_MAP_8822B(x) \
- (((x) >> BIT_SHIFT_TXDMA_HIQ_MAP_8822B) & BIT_MASK_TXDMA_HIQ_MAP_8822B)
-
-#define BIT_SHIFT_TXDMA_MGQ_MAP_8822B 12
-#define BIT_MASK_TXDMA_MGQ_MAP_8822B 0x3
-#define BIT_TXDMA_MGQ_MAP_8822B(x) \
- (((x) & BIT_MASK_TXDMA_MGQ_MAP_8822B) << BIT_SHIFT_TXDMA_MGQ_MAP_8822B)
-#define BIT_GET_TXDMA_MGQ_MAP_8822B(x) \
- (((x) >> BIT_SHIFT_TXDMA_MGQ_MAP_8822B) & BIT_MASK_TXDMA_MGQ_MAP_8822B)
-
-#define BIT_SHIFT_TXDMA_BKQ_MAP_8822B 10
-#define BIT_MASK_TXDMA_BKQ_MAP_8822B 0x3
-#define BIT_TXDMA_BKQ_MAP_8822B(x) \
- (((x) & BIT_MASK_TXDMA_BKQ_MAP_8822B) << BIT_SHIFT_TXDMA_BKQ_MAP_8822B)
-#define BIT_GET_TXDMA_BKQ_MAP_8822B(x) \
- (((x) >> BIT_SHIFT_TXDMA_BKQ_MAP_8822B) & BIT_MASK_TXDMA_BKQ_MAP_8822B)
-
-#define BIT_SHIFT_TXDMA_BEQ_MAP_8822B 8
-#define BIT_MASK_TXDMA_BEQ_MAP_8822B 0x3
-#define BIT_TXDMA_BEQ_MAP_8822B(x) \
- (((x) & BIT_MASK_TXDMA_BEQ_MAP_8822B) << BIT_SHIFT_TXDMA_BEQ_MAP_8822B)
-#define BIT_GET_TXDMA_BEQ_MAP_8822B(x) \
- (((x) >> BIT_SHIFT_TXDMA_BEQ_MAP_8822B) & BIT_MASK_TXDMA_BEQ_MAP_8822B)
-
-#define BIT_SHIFT_TXDMA_VIQ_MAP_8822B 6
-#define BIT_MASK_TXDMA_VIQ_MAP_8822B 0x3
-#define BIT_TXDMA_VIQ_MAP_8822B(x) \
- (((x) & BIT_MASK_TXDMA_VIQ_MAP_8822B) << BIT_SHIFT_TXDMA_VIQ_MAP_8822B)
-#define BIT_GET_TXDMA_VIQ_MAP_8822B(x) \
- (((x) >> BIT_SHIFT_TXDMA_VIQ_MAP_8822B) & BIT_MASK_TXDMA_VIQ_MAP_8822B)
-
-#define BIT_SHIFT_TXDMA_VOQ_MAP_8822B 4
-#define BIT_MASK_TXDMA_VOQ_MAP_8822B 0x3
-#define BIT_TXDMA_VOQ_MAP_8822B(x) \
- (((x) & BIT_MASK_TXDMA_VOQ_MAP_8822B) << BIT_SHIFT_TXDMA_VOQ_MAP_8822B)
-#define BIT_GET_TXDMA_VOQ_MAP_8822B(x) \
- (((x) >> BIT_SHIFT_TXDMA_VOQ_MAP_8822B) & BIT_MASK_TXDMA_VOQ_MAP_8822B)
-
-#define BIT_RXDMA_AGG_EN_8822B BIT(2)
-#define BIT_RXSHFT_EN_8822B BIT(1)
-#define BIT_RXDMA_ARBBW_EN_8822B BIT(0)
-
-/* 2 REG_TRXFF_BNDY_8822B */
-
-#define BIT_SHIFT_RXFFOVFL_RSV_V2_8822B 8
-#define BIT_MASK_RXFFOVFL_RSV_V2_8822B 0xf
-#define BIT_RXFFOVFL_RSV_V2_8822B(x) \
- (((x) & BIT_MASK_RXFFOVFL_RSV_V2_8822B) \
- << BIT_SHIFT_RXFFOVFL_RSV_V2_8822B)
-#define BIT_GET_RXFFOVFL_RSV_V2_8822B(x) \
- (((x) >> BIT_SHIFT_RXFFOVFL_RSV_V2_8822B) & \
- BIT_MASK_RXFFOVFL_RSV_V2_8822B)
-
-#define BIT_SHIFT_TXPKTBUF_PGBNDY_8822B 0
-#define BIT_MASK_TXPKTBUF_PGBNDY_8822B 0xff
-#define BIT_TXPKTBUF_PGBNDY_8822B(x) \
- (((x) & BIT_MASK_TXPKTBUF_PGBNDY_8822B) \
- << BIT_SHIFT_TXPKTBUF_PGBNDY_8822B)
-#define BIT_GET_TXPKTBUF_PGBNDY_8822B(x) \
- (((x) >> BIT_SHIFT_TXPKTBUF_PGBNDY_8822B) & \
- BIT_MASK_TXPKTBUF_PGBNDY_8822B)
-
-/* 2 REG_PTA_I2C_MBOX_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_I2C_M_STATUS_8822B 8
-#define BIT_MASK_I2C_M_STATUS_8822B 0xf
-#define BIT_I2C_M_STATUS_8822B(x) \
- (((x) & BIT_MASK_I2C_M_STATUS_8822B) << BIT_SHIFT_I2C_M_STATUS_8822B)
-#define BIT_GET_I2C_M_STATUS_8822B(x) \
- (((x) >> BIT_SHIFT_I2C_M_STATUS_8822B) & BIT_MASK_I2C_M_STATUS_8822B)
-
-#define BIT_SHIFT_I2C_M_BUS_GNT_FW_8822B 4
-#define BIT_MASK_I2C_M_BUS_GNT_FW_8822B 0x7
-#define BIT_I2C_M_BUS_GNT_FW_8822B(x) \
- (((x) & BIT_MASK_I2C_M_BUS_GNT_FW_8822B) \
- << BIT_SHIFT_I2C_M_BUS_GNT_FW_8822B)
-#define BIT_GET_I2C_M_BUS_GNT_FW_8822B(x) \
- (((x) >> BIT_SHIFT_I2C_M_BUS_GNT_FW_8822B) & \
- BIT_MASK_I2C_M_BUS_GNT_FW_8822B)
-
-#define BIT_I2C_M_GNT_FW_8822B BIT(3)
-
-#define BIT_SHIFT_I2C_M_SPEED_8822B 1
-#define BIT_MASK_I2C_M_SPEED_8822B 0x3
-#define BIT_I2C_M_SPEED_8822B(x) \
- (((x) & BIT_MASK_I2C_M_SPEED_8822B) << BIT_SHIFT_I2C_M_SPEED_8822B)
-#define BIT_GET_I2C_M_SPEED_8822B(x) \
- (((x) >> BIT_SHIFT_I2C_M_SPEED_8822B) & BIT_MASK_I2C_M_SPEED_8822B)
-
-#define BIT_I2C_M_UNLOCK_8822B BIT(0)
-
-/* 2 REG_RXFF_BNDY_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_RXFF0_BNDY_V2_8822B 0
-#define BIT_MASK_RXFF0_BNDY_V2_8822B 0x3ffff
-#define BIT_RXFF0_BNDY_V2_8822B(x) \
- (((x) & BIT_MASK_RXFF0_BNDY_V2_8822B) << BIT_SHIFT_RXFF0_BNDY_V2_8822B)
-#define BIT_GET_RXFF0_BNDY_V2_8822B(x) \
- (((x) >> BIT_SHIFT_RXFF0_BNDY_V2_8822B) & BIT_MASK_RXFF0_BNDY_V2_8822B)
-
-/* 2 REG_FE1IMR_8822B */
-#define BIT_FS_RXDMA2_DONE_INT_EN_8822B BIT(28)
-#define BIT_FS_RXDONE3_INT_EN_8822B BIT(27)
-#define BIT_FS_RXDONE2_INT_EN_8822B BIT(26)
-#define BIT_FS_RX_BCN_P4_INT_EN_8822B BIT(25)
-#define BIT_FS_RX_BCN_P3_INT_EN_8822B BIT(24)
-#define BIT_FS_RX_BCN_P2_INT_EN_8822B BIT(23)
-#define BIT_FS_RX_BCN_P1_INT_EN_8822B BIT(22)
-#define BIT_FS_RX_BCN_P0_INT_EN_8822B BIT(21)
-#define BIT_FS_RX_UMD0_INT_EN_8822B BIT(20)
-#define BIT_FS_RX_UMD1_INT_EN_8822B BIT(19)
-#define BIT_FS_RX_BMD0_INT_EN_8822B BIT(18)
-#define BIT_FS_RX_BMD1_INT_EN_8822B BIT(17)
-#define BIT_FS_RXDONE_INT_EN_8822B BIT(16)
-#define BIT_FS_WWLAN_INT_EN_8822B BIT(15)
-#define BIT_FS_SOUND_DONE_INT_EN_8822B BIT(14)
-#define BIT_FS_LP_STBY_INT_EN_8822B BIT(13)
-#define BIT_FS_TRL_MTR_INT_EN_8822B BIT(12)
-#define BIT_FS_BF1_PRETO_INT_EN_8822B BIT(11)
-#define BIT_FS_BF0_PRETO_INT_EN_8822B BIT(10)
-#define BIT_FS_PTCL_RELEASE_MACID_INT_EN_8822B BIT(9)
-#define BIT_FS_LTE_COEX_EN_8822B BIT(6)
-#define BIT_FS_WLACTOFF_INT_EN_8822B BIT(5)
-#define BIT_FS_WLACTON_INT_EN_8822B BIT(4)
-#define BIT_FS_BTCMD_INT_EN_8822B BIT(3)
-#define BIT_FS_REG_MAILBOX_TO_I2C_INT_EN_8822B BIT(2)
-#define BIT_FS_TRPC_TO_INT_EN_V1_8822B BIT(1)
-#define BIT_FS_RPC_O_T_INT_EN_V1_8822B BIT(0)
-
-/* 2 REG_FE1ISR_8822B */
-#define BIT_FS_RXDMA2_DONE_INT_8822B BIT(28)
-#define BIT_FS_RXDONE3_INT_8822B BIT(27)
-#define BIT_FS_RXDONE2_INT_8822B BIT(26)
-#define BIT_FS_RX_BCN_P4_INT_8822B BIT(25)
-#define BIT_FS_RX_BCN_P3_INT_8822B BIT(24)
-#define BIT_FS_RX_BCN_P2_INT_8822B BIT(23)
-#define BIT_FS_RX_BCN_P1_INT_8822B BIT(22)
-#define BIT_FS_RX_BCN_P0_INT_8822B BIT(21)
-#define BIT_FS_RX_UMD0_INT_8822B BIT(20)
-#define BIT_FS_RX_UMD1_INT_8822B BIT(19)
-#define BIT_FS_RX_BMD0_INT_8822B BIT(18)
-#define BIT_FS_RX_BMD1_INT_8822B BIT(17)
-#define BIT_FS_RXDONE_INT_8822B BIT(16)
-#define BIT_FS_WWLAN_INT_8822B BIT(15)
-#define BIT_FS_SOUND_DONE_INT_8822B BIT(14)
-#define BIT_FS_LP_STBY_INT_8822B BIT(13)
-#define BIT_FS_TRL_MTR_INT_8822B BIT(12)
-#define BIT_FS_BF1_PRETO_INT_8822B BIT(11)
-#define BIT_FS_BF0_PRETO_INT_8822B BIT(10)
-#define BIT_FS_PTCL_RELEASE_MACID_INT_8822B BIT(9)
-#define BIT_FS_LTE_COEX_INT_8822B BIT(6)
-#define BIT_FS_WLACTOFF_INT_8822B BIT(5)
-#define BIT_FS_WLACTON_INT_8822B BIT(4)
-#define BIT_FS_BCN_RX_INT_INT_8822B BIT(3)
-#define BIT_FS_MAILBOX_TO_I2C_INT_8822B BIT(2)
-#define BIT_FS_TRPC_TO_INT_8822B BIT(1)
-#define BIT_FS_RPC_O_T_INT_8822B BIT(0)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_CPWM_8822B */
-#define BIT_CPWM_TOGGLING_8822B BIT(31)
-
-#define BIT_SHIFT_CPWM_MOD_8822B 24
-#define BIT_MASK_CPWM_MOD_8822B 0x7f
-#define BIT_CPWM_MOD_8822B(x) \
- (((x) & BIT_MASK_CPWM_MOD_8822B) << BIT_SHIFT_CPWM_MOD_8822B)
-#define BIT_GET_CPWM_MOD_8822B(x) \
- (((x) >> BIT_SHIFT_CPWM_MOD_8822B) & BIT_MASK_CPWM_MOD_8822B)
-
-/* 2 REG_FWIMR_8822B */
-#define BIT_FS_TXBCNOK_MB7_INT_EN_8822B BIT(31)
-#define BIT_FS_TXBCNOK_MB6_INT_EN_8822B BIT(30)
-#define BIT_FS_TXBCNOK_MB5_INT_EN_8822B BIT(29)
-#define BIT_FS_TXBCNOK_MB4_INT_EN_8822B BIT(28)
-#define BIT_FS_TXBCNOK_MB3_INT_EN_8822B BIT(27)
-#define BIT_FS_TXBCNOK_MB2_INT_EN_8822B BIT(26)
-#define BIT_FS_TXBCNOK_MB1_INT_EN_8822B BIT(25)
-#define BIT_FS_TXBCNOK_MB0_INT_EN_8822B BIT(24)
-#define BIT_FS_TXBCNERR_MB7_INT_EN_8822B BIT(23)
-#define BIT_FS_TXBCNERR_MB6_INT_EN_8822B BIT(22)
-#define BIT_FS_TXBCNERR_MB5_INT_EN_8822B BIT(21)
-#define BIT_FS_TXBCNERR_MB4_INT_EN_8822B BIT(20)
-#define BIT_FS_TXBCNERR_MB3_INT_EN_8822B BIT(19)
-#define BIT_FS_TXBCNERR_MB2_INT_EN_8822B BIT(18)
-#define BIT_FS_TXBCNERR_MB1_INT_EN_8822B BIT(17)
-#define BIT_FS_TXBCNERR_MB0_INT_EN_8822B BIT(16)
-#define BIT_CPU_MGQ_TXDONE_INT_EN_8822B BIT(15)
-#define BIT_SIFS_OVERSPEC_INT_EN_8822B BIT(14)
-#define BIT_FS_MGNTQ_RPTR_RELEASE_INT_EN_8822B BIT(13)
-#define BIT_FS_MGNTQFF_TO_INT_EN_8822B BIT(12)
-#define BIT_FS_DDMA1_LP_INT_EN_8822B BIT(11)
-#define BIT_FS_DDMA1_HP_INT_EN_8822B BIT(10)
-#define BIT_FS_DDMA0_LP_INT_EN_8822B BIT(9)
-#define BIT_FS_DDMA0_HP_INT_EN_8822B BIT(8)
-#define BIT_FS_TRXRPT_INT_EN_8822B BIT(7)
-#define BIT_FS_C2H_W_READY_INT_EN_8822B BIT(6)
-#define BIT_FS_HRCV_INT_EN_8822B BIT(5)
-#define BIT_FS_H2CCMD_INT_EN_8822B BIT(4)
-#define BIT_FS_TXPKTIN_INT_EN_8822B BIT(3)
-#define BIT_FS_ERRORHDL_INT_EN_8822B BIT(2)
-#define BIT_FS_TXCCX_INT_EN_8822B BIT(1)
-#define BIT_FS_TXCLOSE_INT_EN_8822B BIT(0)
-
-/* 2 REG_FWISR_8822B */
-#define BIT_FS_TXBCNOK_MB7_INT_8822B BIT(31)
-#define BIT_FS_TXBCNOK_MB6_INT_8822B BIT(30)
-#define BIT_FS_TXBCNOK_MB5_INT_8822B BIT(29)
-#define BIT_FS_TXBCNOK_MB4_INT_8822B BIT(28)
-#define BIT_FS_TXBCNOK_MB3_INT_8822B BIT(27)
-#define BIT_FS_TXBCNOK_MB2_INT_8822B BIT(26)
-#define BIT_FS_TXBCNOK_MB1_INT_8822B BIT(25)
-#define BIT_FS_TXBCNOK_MB0_INT_8822B BIT(24)
-#define BIT_FS_TXBCNERR_MB7_INT_8822B BIT(23)
-#define BIT_FS_TXBCNERR_MB6_INT_8822B BIT(22)
-#define BIT_FS_TXBCNERR_MB5_INT_8822B BIT(21)
-#define BIT_FS_TXBCNERR_MB4_INT_8822B BIT(20)
-#define BIT_FS_TXBCNERR_MB3_INT_8822B BIT(19)
-#define BIT_FS_TXBCNERR_MB2_INT_8822B BIT(18)
-#define BIT_FS_TXBCNERR_MB1_INT_8822B BIT(17)
-#define BIT_FS_TXBCNERR_MB0_INT_8822B BIT(16)
-#define BIT_CPU_MGQ_TXDONE_INT_8822B BIT(15)
-#define BIT_SIFS_OVERSPEC_INT_8822B BIT(14)
-#define BIT_FS_MGNTQ_RPTR_RELEASE_INT_8822B BIT(13)
-#define BIT_FS_MGNTQFF_TO_INT_8822B BIT(12)
-#define BIT_FS_DDMA1_LP_INT_8822B BIT(11)
-#define BIT_FS_DDMA1_HP_INT_8822B BIT(10)
-#define BIT_FS_DDMA0_LP_INT_8822B BIT(9)
-#define BIT_FS_DDMA0_HP_INT_8822B BIT(8)
-#define BIT_FS_TRXRPT_INT_8822B BIT(7)
-#define BIT_FS_C2H_W_READY_INT_8822B BIT(6)
-#define BIT_FS_HRCV_INT_8822B BIT(5)
-#define BIT_FS_H2CCMD_INT_8822B BIT(4)
-#define BIT_FS_TXPKTIN_INT_8822B BIT(3)
-#define BIT_FS_ERRORHDL_INT_8822B BIT(2)
-#define BIT_FS_TXCCX_INT_8822B BIT(1)
-#define BIT_FS_TXCLOSE_INT_8822B BIT(0)
-
-/* 2 REG_FTIMR_8822B */
-#define BIT_PS_TIMER_C_EARLY_INT_EN_8822B BIT(23)
-#define BIT_PS_TIMER_B_EARLY_INT_EN_8822B BIT(22)
-#define BIT_PS_TIMER_A_EARLY_INT_EN_8822B BIT(21)
-#define BIT_CPUMGQ_TX_TIMER_EARLY_INT_EN_8822B BIT(20)
-#define BIT_PS_TIMER_C_INT_EN_8822B BIT(19)
-#define BIT_PS_TIMER_B_INT_EN_8822B BIT(18)
-#define BIT_PS_TIMER_A_INT_EN_8822B BIT(17)
-#define BIT_CPUMGQ_TX_TIMER_INT_EN_8822B BIT(16)
-#define BIT_FS_PS_TIMEOUT2_EN_8822B BIT(15)
-#define BIT_FS_PS_TIMEOUT1_EN_8822B BIT(14)
-#define BIT_FS_PS_TIMEOUT0_EN_8822B BIT(13)
-#define BIT_FS_GTINT8_EN_8822B BIT(8)
-#define BIT_FS_GTINT7_EN_8822B BIT(7)
-#define BIT_FS_GTINT6_EN_8822B BIT(6)
-#define BIT_FS_GTINT5_EN_8822B BIT(5)
-#define BIT_FS_GTINT4_EN_8822B BIT(4)
-#define BIT_FS_GTINT3_EN_8822B BIT(3)
-#define BIT_FS_GTINT2_EN_8822B BIT(2)
-#define BIT_FS_GTINT1_EN_8822B BIT(1)
-#define BIT_FS_GTINT0_EN_8822B BIT(0)
-
-/* 2 REG_FTISR_8822B */
-#define BIT_PS_TIMER_C_EARLY__INT_8822B BIT(23)
-#define BIT_PS_TIMER_B_EARLY__INT_8822B BIT(22)
-#define BIT_PS_TIMER_A_EARLY__INT_8822B BIT(21)
-#define BIT_CPUMGQ_TX_TIMER_EARLY_INT_8822B BIT(20)
-#define BIT_PS_TIMER_C_INT_8822B BIT(19)
-#define BIT_PS_TIMER_B_INT_8822B BIT(18)
-#define BIT_PS_TIMER_A_INT_8822B BIT(17)
-#define BIT_CPUMGQ_TX_TIMER_INT_8822B BIT(16)
-#define BIT_FS_PS_TIMEOUT2_INT_8822B BIT(15)
-#define BIT_FS_PS_TIMEOUT1_INT_8822B BIT(14)
-#define BIT_FS_PS_TIMEOUT0_INT_8822B BIT(13)
-#define BIT_FS_GTINT8_INT_8822B BIT(8)
-#define BIT_FS_GTINT7_INT_8822B BIT(7)
-#define BIT_FS_GTINT6_INT_8822B BIT(6)
-#define BIT_FS_GTINT5_INT_8822B BIT(5)
-#define BIT_FS_GTINT4_INT_8822B BIT(4)
-#define BIT_FS_GTINT3_INT_8822B BIT(3)
-#define BIT_FS_GTINT2_INT_8822B BIT(2)
-#define BIT_FS_GTINT1_INT_8822B BIT(1)
-#define BIT_FS_GTINT0_INT_8822B BIT(0)
-
-/* 2 REG_PKTBUF_DBG_CTRL_8822B */
-
-#define BIT_SHIFT_PKTBUF_WRITE_EN_8822B 24
-#define BIT_MASK_PKTBUF_WRITE_EN_8822B 0xff
-#define BIT_PKTBUF_WRITE_EN_8822B(x) \
- (((x) & BIT_MASK_PKTBUF_WRITE_EN_8822B) \
- << BIT_SHIFT_PKTBUF_WRITE_EN_8822B)
-#define BIT_GET_PKTBUF_WRITE_EN_8822B(x) \
- (((x) >> BIT_SHIFT_PKTBUF_WRITE_EN_8822B) & \
- BIT_MASK_PKTBUF_WRITE_EN_8822B)
-
-#define BIT_TXRPTBUF_DBG_8822B BIT(23)
-
-/* 2 REG_NOT_VALID_8822B */
-#define BIT_TXPKTBUF_DBG_V2_8822B BIT(20)
-#define BIT_RXPKTBUF_DBG_8822B BIT(16)
-
-#define BIT_SHIFT_PKTBUF_DBG_ADDR_8822B 0
-#define BIT_MASK_PKTBUF_DBG_ADDR_8822B 0x1fff
-#define BIT_PKTBUF_DBG_ADDR_8822B(x) \
- (((x) & BIT_MASK_PKTBUF_DBG_ADDR_8822B) \
- << BIT_SHIFT_PKTBUF_DBG_ADDR_8822B)
-#define BIT_GET_PKTBUF_DBG_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_PKTBUF_DBG_ADDR_8822B) & \
- BIT_MASK_PKTBUF_DBG_ADDR_8822B)
-
-/* 2 REG_PKTBUF_DBG_DATA_L_8822B */
-
-#define BIT_SHIFT_PKTBUF_DBG_DATA_L_8822B 0
-#define BIT_MASK_PKTBUF_DBG_DATA_L_8822B 0xffffffffL
-#define BIT_PKTBUF_DBG_DATA_L_8822B(x) \
- (((x) & BIT_MASK_PKTBUF_DBG_DATA_L_8822B) \
- << BIT_SHIFT_PKTBUF_DBG_DATA_L_8822B)
-#define BIT_GET_PKTBUF_DBG_DATA_L_8822B(x) \
- (((x) >> BIT_SHIFT_PKTBUF_DBG_DATA_L_8822B) & \
- BIT_MASK_PKTBUF_DBG_DATA_L_8822B)
-
-/* 2 REG_PKTBUF_DBG_DATA_H_8822B */
-
-#define BIT_SHIFT_PKTBUF_DBG_DATA_H_8822B 0
-#define BIT_MASK_PKTBUF_DBG_DATA_H_8822B 0xffffffffL
-#define BIT_PKTBUF_DBG_DATA_H_8822B(x) \
- (((x) & BIT_MASK_PKTBUF_DBG_DATA_H_8822B) \
- << BIT_SHIFT_PKTBUF_DBG_DATA_H_8822B)
-#define BIT_GET_PKTBUF_DBG_DATA_H_8822B(x) \
- (((x) >> BIT_SHIFT_PKTBUF_DBG_DATA_H_8822B) & \
- BIT_MASK_PKTBUF_DBG_DATA_H_8822B)
-
-/* 2 REG_CPWM2_8822B */
-
-#define BIT_SHIFT_L0S_TO_RCVY_NUM_8822B 16
-#define BIT_MASK_L0S_TO_RCVY_NUM_8822B 0xff
-#define BIT_L0S_TO_RCVY_NUM_8822B(x) \
- (((x) & BIT_MASK_L0S_TO_RCVY_NUM_8822B) \
- << BIT_SHIFT_L0S_TO_RCVY_NUM_8822B)
-#define BIT_GET_L0S_TO_RCVY_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_L0S_TO_RCVY_NUM_8822B) & \
- BIT_MASK_L0S_TO_RCVY_NUM_8822B)
-
-#define BIT_CPWM2_TOGGLING_8822B BIT(15)
-
-#define BIT_SHIFT_CPWM2_MOD_8822B 0
-#define BIT_MASK_CPWM2_MOD_8822B 0x7fff
-#define BIT_CPWM2_MOD_8822B(x) \
- (((x) & BIT_MASK_CPWM2_MOD_8822B) << BIT_SHIFT_CPWM2_MOD_8822B)
-#define BIT_GET_CPWM2_MOD_8822B(x) \
- (((x) >> BIT_SHIFT_CPWM2_MOD_8822B) & BIT_MASK_CPWM2_MOD_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_TC0_CTRL_8822B */
-#define BIT_TC0INT_EN_8822B BIT(26)
-#define BIT_TC0MODE_8822B BIT(25)
-#define BIT_TC0EN_8822B BIT(24)
-
-#define BIT_SHIFT_TC0DATA_8822B 0
-#define BIT_MASK_TC0DATA_8822B 0xffffff
-#define BIT_TC0DATA_8822B(x) \
- (((x) & BIT_MASK_TC0DATA_8822B) << BIT_SHIFT_TC0DATA_8822B)
-#define BIT_GET_TC0DATA_8822B(x) \
- (((x) >> BIT_SHIFT_TC0DATA_8822B) & BIT_MASK_TC0DATA_8822B)
-
-/* 2 REG_TC1_CTRL_8822B */
-#define BIT_TC1INT_EN_8822B BIT(26)
-#define BIT_TC1MODE_8822B BIT(25)
-#define BIT_TC1EN_8822B BIT(24)
-
-#define BIT_SHIFT_TC1DATA_8822B 0
-#define BIT_MASK_TC1DATA_8822B 0xffffff
-#define BIT_TC1DATA_8822B(x) \
- (((x) & BIT_MASK_TC1DATA_8822B) << BIT_SHIFT_TC1DATA_8822B)
-#define BIT_GET_TC1DATA_8822B(x) \
- (((x) >> BIT_SHIFT_TC1DATA_8822B) & BIT_MASK_TC1DATA_8822B)
-
-/* 2 REG_TC2_CTRL_8822B */
-#define BIT_TC2INT_EN_8822B BIT(26)
-#define BIT_TC2MODE_8822B BIT(25)
-#define BIT_TC2EN_8822B BIT(24)
-
-#define BIT_SHIFT_TC2DATA_8822B 0
-#define BIT_MASK_TC2DATA_8822B 0xffffff
-#define BIT_TC2DATA_8822B(x) \
- (((x) & BIT_MASK_TC2DATA_8822B) << BIT_SHIFT_TC2DATA_8822B)
-#define BIT_GET_TC2DATA_8822B(x) \
- (((x) >> BIT_SHIFT_TC2DATA_8822B) & BIT_MASK_TC2DATA_8822B)
-
-/* 2 REG_TC3_CTRL_8822B */
-#define BIT_TC3INT_EN_8822B BIT(26)
-#define BIT_TC3MODE_8822B BIT(25)
-#define BIT_TC3EN_8822B BIT(24)
-
-#define BIT_SHIFT_TC3DATA_8822B 0
-#define BIT_MASK_TC3DATA_8822B 0xffffff
-#define BIT_TC3DATA_8822B(x) \
- (((x) & BIT_MASK_TC3DATA_8822B) << BIT_SHIFT_TC3DATA_8822B)
-#define BIT_GET_TC3DATA_8822B(x) \
- (((x) >> BIT_SHIFT_TC3DATA_8822B) & BIT_MASK_TC3DATA_8822B)
-
-/* 2 REG_TC4_CTRL_8822B */
-#define BIT_TC4INT_EN_8822B BIT(26)
-#define BIT_TC4MODE_8822B BIT(25)
-#define BIT_TC4EN_8822B BIT(24)
-
-#define BIT_SHIFT_TC4DATA_8822B 0
-#define BIT_MASK_TC4DATA_8822B 0xffffff
-#define BIT_TC4DATA_8822B(x) \
- (((x) & BIT_MASK_TC4DATA_8822B) << BIT_SHIFT_TC4DATA_8822B)
-#define BIT_GET_TC4DATA_8822B(x) \
- (((x) >> BIT_SHIFT_TC4DATA_8822B) & BIT_MASK_TC4DATA_8822B)
-
-/* 2 REG_TCUNIT_BASE_8822B */
-
-#define BIT_SHIFT_TCUNIT_BASE_8822B 0
-#define BIT_MASK_TCUNIT_BASE_8822B 0x3fff
-#define BIT_TCUNIT_BASE_8822B(x) \
- (((x) & BIT_MASK_TCUNIT_BASE_8822B) << BIT_SHIFT_TCUNIT_BASE_8822B)
-#define BIT_GET_TCUNIT_BASE_8822B(x) \
- (((x) >> BIT_SHIFT_TCUNIT_BASE_8822B) & BIT_MASK_TCUNIT_BASE_8822B)
-
-/* 2 REG_TC5_CTRL_8822B */
-#define BIT_TC5INT_EN_8822B BIT(26)
-#define BIT_TC5MODE_8822B BIT(25)
-#define BIT_TC5EN_8822B BIT(24)
-
-#define BIT_SHIFT_TC5DATA_8822B 0
-#define BIT_MASK_TC5DATA_8822B 0xffffff
-#define BIT_TC5DATA_8822B(x) \
- (((x) & BIT_MASK_TC5DATA_8822B) << BIT_SHIFT_TC5DATA_8822B)
-#define BIT_GET_TC5DATA_8822B(x) \
- (((x) >> BIT_SHIFT_TC5DATA_8822B) & BIT_MASK_TC5DATA_8822B)
-
-/* 2 REG_TC6_CTRL_8822B */
-#define BIT_TC6INT_EN_8822B BIT(26)
-#define BIT_TC6MODE_8822B BIT(25)
-#define BIT_TC6EN_8822B BIT(24)
-
-#define BIT_SHIFT_TC6DATA_8822B 0
-#define BIT_MASK_TC6DATA_8822B 0xffffff
-#define BIT_TC6DATA_8822B(x) \
- (((x) & BIT_MASK_TC6DATA_8822B) << BIT_SHIFT_TC6DATA_8822B)
-#define BIT_GET_TC6DATA_8822B(x) \
- (((x) >> BIT_SHIFT_TC6DATA_8822B) & BIT_MASK_TC6DATA_8822B)
-
-/* 2 REG_MBIST_FAIL_8822B */
-
-#define BIT_SHIFT_8051_MBIST_FAIL_8822B 26
-#define BIT_MASK_8051_MBIST_FAIL_8822B 0x7
-#define BIT_8051_MBIST_FAIL_8822B(x) \
- (((x) & BIT_MASK_8051_MBIST_FAIL_8822B) \
- << BIT_SHIFT_8051_MBIST_FAIL_8822B)
-#define BIT_GET_8051_MBIST_FAIL_8822B(x) \
- (((x) >> BIT_SHIFT_8051_MBIST_FAIL_8822B) & \
- BIT_MASK_8051_MBIST_FAIL_8822B)
-
-#define BIT_SHIFT_USB_MBIST_FAIL_8822B 24
-#define BIT_MASK_USB_MBIST_FAIL_8822B 0x3
-#define BIT_USB_MBIST_FAIL_8822B(x) \
- (((x) & BIT_MASK_USB_MBIST_FAIL_8822B) \
- << BIT_SHIFT_USB_MBIST_FAIL_8822B)
-#define BIT_GET_USB_MBIST_FAIL_8822B(x) \
- (((x) >> BIT_SHIFT_USB_MBIST_FAIL_8822B) & \
- BIT_MASK_USB_MBIST_FAIL_8822B)
-
-#define BIT_SHIFT_PCIE_MBIST_FAIL_8822B 16
-#define BIT_MASK_PCIE_MBIST_FAIL_8822B 0x3f
-#define BIT_PCIE_MBIST_FAIL_8822B(x) \
- (((x) & BIT_MASK_PCIE_MBIST_FAIL_8822B) \
- << BIT_SHIFT_PCIE_MBIST_FAIL_8822B)
-#define BIT_GET_PCIE_MBIST_FAIL_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_MBIST_FAIL_8822B) & \
- BIT_MASK_PCIE_MBIST_FAIL_8822B)
-
-#define BIT_SHIFT_MAC_MBIST_FAIL_8822B 0
-#define BIT_MASK_MAC_MBIST_FAIL_8822B 0xfff
-#define BIT_MAC_MBIST_FAIL_8822B(x) \
- (((x) & BIT_MASK_MAC_MBIST_FAIL_8822B) \
- << BIT_SHIFT_MAC_MBIST_FAIL_8822B)
-#define BIT_GET_MAC_MBIST_FAIL_8822B(x) \
- (((x) >> BIT_SHIFT_MAC_MBIST_FAIL_8822B) & \
- BIT_MASK_MAC_MBIST_FAIL_8822B)
-
-/* 2 REG_MBIST_START_PAUSE_8822B */
-
-#define BIT_SHIFT_8051_MBIST_START_PAUSE_8822B 26
-#define BIT_MASK_8051_MBIST_START_PAUSE_8822B 0x7
-#define BIT_8051_MBIST_START_PAUSE_8822B(x) \
- (((x) & BIT_MASK_8051_MBIST_START_PAUSE_8822B) \
- << BIT_SHIFT_8051_MBIST_START_PAUSE_8822B)
-#define BIT_GET_8051_MBIST_START_PAUSE_8822B(x) \
- (((x) >> BIT_SHIFT_8051_MBIST_START_PAUSE_8822B) & \
- BIT_MASK_8051_MBIST_START_PAUSE_8822B)
-
-#define BIT_SHIFT_USB_MBIST_START_PAUSE_8822B 24
-#define BIT_MASK_USB_MBIST_START_PAUSE_8822B 0x3
-#define BIT_USB_MBIST_START_PAUSE_8822B(x) \
- (((x) & BIT_MASK_USB_MBIST_START_PAUSE_8822B) \
- << BIT_SHIFT_USB_MBIST_START_PAUSE_8822B)
-#define BIT_GET_USB_MBIST_START_PAUSE_8822B(x) \
- (((x) >> BIT_SHIFT_USB_MBIST_START_PAUSE_8822B) & \
- BIT_MASK_USB_MBIST_START_PAUSE_8822B)
-
-#define BIT_SHIFT_PCIE_MBIST_START_PAUSE_8822B 16
-#define BIT_MASK_PCIE_MBIST_START_PAUSE_8822B 0x3f
-#define BIT_PCIE_MBIST_START_PAUSE_8822B(x) \
- (((x) & BIT_MASK_PCIE_MBIST_START_PAUSE_8822B) \
- << BIT_SHIFT_PCIE_MBIST_START_PAUSE_8822B)
-#define BIT_GET_PCIE_MBIST_START_PAUSE_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_MBIST_START_PAUSE_8822B) & \
- BIT_MASK_PCIE_MBIST_START_PAUSE_8822B)
-
-#define BIT_SHIFT_MAC_MBIST_START_PAUSE_8822B 0
-#define BIT_MASK_MAC_MBIST_START_PAUSE_8822B 0xfff
-#define BIT_MAC_MBIST_START_PAUSE_8822B(x) \
- (((x) & BIT_MASK_MAC_MBIST_START_PAUSE_8822B) \
- << BIT_SHIFT_MAC_MBIST_START_PAUSE_8822B)
-#define BIT_GET_MAC_MBIST_START_PAUSE_8822B(x) \
- (((x) >> BIT_SHIFT_MAC_MBIST_START_PAUSE_8822B) & \
- BIT_MASK_MAC_MBIST_START_PAUSE_8822B)
-
-/* 2 REG_MBIST_DONE_8822B */
-
-#define BIT_SHIFT_8051_MBIST_DONE_8822B 26
-#define BIT_MASK_8051_MBIST_DONE_8822B 0x7
-#define BIT_8051_MBIST_DONE_8822B(x) \
- (((x) & BIT_MASK_8051_MBIST_DONE_8822B) \
- << BIT_SHIFT_8051_MBIST_DONE_8822B)
-#define BIT_GET_8051_MBIST_DONE_8822B(x) \
- (((x) >> BIT_SHIFT_8051_MBIST_DONE_8822B) & \
- BIT_MASK_8051_MBIST_DONE_8822B)
-
-#define BIT_SHIFT_USB_MBIST_DONE_8822B 24
-#define BIT_MASK_USB_MBIST_DONE_8822B 0x3
-#define BIT_USB_MBIST_DONE_8822B(x) \
- (((x) & BIT_MASK_USB_MBIST_DONE_8822B) \
- << BIT_SHIFT_USB_MBIST_DONE_8822B)
-#define BIT_GET_USB_MBIST_DONE_8822B(x) \
- (((x) >> BIT_SHIFT_USB_MBIST_DONE_8822B) & \
- BIT_MASK_USB_MBIST_DONE_8822B)
-
-#define BIT_SHIFT_PCIE_MBIST_DONE_8822B 16
-#define BIT_MASK_PCIE_MBIST_DONE_8822B 0x3f
-#define BIT_PCIE_MBIST_DONE_8822B(x) \
- (((x) & BIT_MASK_PCIE_MBIST_DONE_8822B) \
- << BIT_SHIFT_PCIE_MBIST_DONE_8822B)
-#define BIT_GET_PCIE_MBIST_DONE_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_MBIST_DONE_8822B) & \
- BIT_MASK_PCIE_MBIST_DONE_8822B)
-
-#define BIT_SHIFT_MAC_MBIST_DONE_8822B 0
-#define BIT_MASK_MAC_MBIST_DONE_8822B 0xfff
-#define BIT_MAC_MBIST_DONE_8822B(x) \
- (((x) & BIT_MASK_MAC_MBIST_DONE_8822B) \
- << BIT_SHIFT_MAC_MBIST_DONE_8822B)
-#define BIT_GET_MAC_MBIST_DONE_8822B(x) \
- (((x) >> BIT_SHIFT_MAC_MBIST_DONE_8822B) & \
- BIT_MASK_MAC_MBIST_DONE_8822B)
-
-/* 2 REG_MBIST_FAIL_NRML_8822B */
-
-#define BIT_SHIFT_MBIST_FAIL_NRML_8822B 0
-#define BIT_MASK_MBIST_FAIL_NRML_8822B 0xffffffffL
-#define BIT_MBIST_FAIL_NRML_8822B(x) \
- (((x) & BIT_MASK_MBIST_FAIL_NRML_8822B) \
- << BIT_SHIFT_MBIST_FAIL_NRML_8822B)
-#define BIT_GET_MBIST_FAIL_NRML_8822B(x) \
- (((x) >> BIT_SHIFT_MBIST_FAIL_NRML_8822B) & \
- BIT_MASK_MBIST_FAIL_NRML_8822B)
-
-/* 2 REG_AES_DECRPT_DATA_8822B */
-
-#define BIT_SHIFT_IPS_CFG_ADDR_8822B 0
-#define BIT_MASK_IPS_CFG_ADDR_8822B 0xff
-#define BIT_IPS_CFG_ADDR_8822B(x) \
- (((x) & BIT_MASK_IPS_CFG_ADDR_8822B) << BIT_SHIFT_IPS_CFG_ADDR_8822B)
-#define BIT_GET_IPS_CFG_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_IPS_CFG_ADDR_8822B) & BIT_MASK_IPS_CFG_ADDR_8822B)
-
-/* 2 REG_AES_DECRPT_CFG_8822B */
-
-#define BIT_SHIFT_IPS_CFG_DATA_8822B 0
-#define BIT_MASK_IPS_CFG_DATA_8822B 0xffffffffL
-#define BIT_IPS_CFG_DATA_8822B(x) \
- (((x) & BIT_MASK_IPS_CFG_DATA_8822B) << BIT_SHIFT_IPS_CFG_DATA_8822B)
-#define BIT_GET_IPS_CFG_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_IPS_CFG_DATA_8822B) & BIT_MASK_IPS_CFG_DATA_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_TMETER_8822B */
-#define BIT_TEMP_VALID_8822B BIT(31)
-
-#define BIT_SHIFT_TEMP_VALUE_8822B 24
-#define BIT_MASK_TEMP_VALUE_8822B 0x3f
-#define BIT_TEMP_VALUE_8822B(x) \
- (((x) & BIT_MASK_TEMP_VALUE_8822B) << BIT_SHIFT_TEMP_VALUE_8822B)
-#define BIT_GET_TEMP_VALUE_8822B(x) \
- (((x) >> BIT_SHIFT_TEMP_VALUE_8822B) & BIT_MASK_TEMP_VALUE_8822B)
-
-#define BIT_SHIFT_REG_TMETER_TIMER_8822B 8
-#define BIT_MASK_REG_TMETER_TIMER_8822B 0xfff
-#define BIT_REG_TMETER_TIMER_8822B(x) \
- (((x) & BIT_MASK_REG_TMETER_TIMER_8822B) \
- << BIT_SHIFT_REG_TMETER_TIMER_8822B)
-#define BIT_GET_REG_TMETER_TIMER_8822B(x) \
- (((x) >> BIT_SHIFT_REG_TMETER_TIMER_8822B) & \
- BIT_MASK_REG_TMETER_TIMER_8822B)
-
-#define BIT_SHIFT_REG_TEMP_DELTA_8822B 2
-#define BIT_MASK_REG_TEMP_DELTA_8822B 0x3f
-#define BIT_REG_TEMP_DELTA_8822B(x) \
- (((x) & BIT_MASK_REG_TEMP_DELTA_8822B) \
- << BIT_SHIFT_REG_TEMP_DELTA_8822B)
-#define BIT_GET_REG_TEMP_DELTA_8822B(x) \
- (((x) >> BIT_SHIFT_REG_TEMP_DELTA_8822B) & \
- BIT_MASK_REG_TEMP_DELTA_8822B)
-
-#define BIT_REG_TMETER_EN_8822B BIT(0)
-
-/* 2 REG_OSC_32K_CTRL_8822B */
-
-#define BIT_SHIFT_OSC_32K_CLKGEN_0_8822B 16
-#define BIT_MASK_OSC_32K_CLKGEN_0_8822B 0xffff
-#define BIT_OSC_32K_CLKGEN_0_8822B(x) \
- (((x) & BIT_MASK_OSC_32K_CLKGEN_0_8822B) \
- << BIT_SHIFT_OSC_32K_CLKGEN_0_8822B)
-#define BIT_GET_OSC_32K_CLKGEN_0_8822B(x) \
- (((x) >> BIT_SHIFT_OSC_32K_CLKGEN_0_8822B) & \
- BIT_MASK_OSC_32K_CLKGEN_0_8822B)
-
-#define BIT_SHIFT_OSC_32K_RES_COMP_8822B 4
-#define BIT_MASK_OSC_32K_RES_COMP_8822B 0x3
-#define BIT_OSC_32K_RES_COMP_8822B(x) \
- (((x) & BIT_MASK_OSC_32K_RES_COMP_8822B) \
- << BIT_SHIFT_OSC_32K_RES_COMP_8822B)
-#define BIT_GET_OSC_32K_RES_COMP_8822B(x) \
- (((x) >> BIT_SHIFT_OSC_32K_RES_COMP_8822B) & \
- BIT_MASK_OSC_32K_RES_COMP_8822B)
-
-#define BIT_OSC_32K_OUT_SEL_8822B BIT(3)
-#define BIT_ISO_WL_2_OSC_32K_8822B BIT(1)
-#define BIT_POW_CKGEN_8822B BIT(0)
-
-/* 2 REG_32K_CAL_REG1_8822B */
-#define BIT_CAL_32K_REG_WR_8822B BIT(31)
-#define BIT_CAL_32K_DBG_SEL_8822B BIT(22)
-
-#define BIT_SHIFT_CAL_32K_REG_ADDR_8822B 16
-#define BIT_MASK_CAL_32K_REG_ADDR_8822B 0x3f
-#define BIT_CAL_32K_REG_ADDR_8822B(x) \
- (((x) & BIT_MASK_CAL_32K_REG_ADDR_8822B) \
- << BIT_SHIFT_CAL_32K_REG_ADDR_8822B)
-#define BIT_GET_CAL_32K_REG_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_CAL_32K_REG_ADDR_8822B) & \
- BIT_MASK_CAL_32K_REG_ADDR_8822B)
-
-#define BIT_SHIFT_CAL_32K_REG_DATA_8822B 0
-#define BIT_MASK_CAL_32K_REG_DATA_8822B 0xffff
-#define BIT_CAL_32K_REG_DATA_8822B(x) \
- (((x) & BIT_MASK_CAL_32K_REG_DATA_8822B) \
- << BIT_SHIFT_CAL_32K_REG_DATA_8822B)
-#define BIT_GET_CAL_32K_REG_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_CAL_32K_REG_DATA_8822B) & \
- BIT_MASK_CAL_32K_REG_DATA_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_C2HEVT_8822B */
-
-#define BIT_SHIFT_C2HEVT_MSG_8822B 0
-#define BIT_MASK_C2HEVT_MSG_8822B 0xffffffffffffffffffffffffffffffffL
-#define BIT_C2HEVT_MSG_8822B(x) \
- (((x) & BIT_MASK_C2HEVT_MSG_8822B) << BIT_SHIFT_C2HEVT_MSG_8822B)
-#define BIT_GET_C2HEVT_MSG_8822B(x) \
- (((x) >> BIT_SHIFT_C2HEVT_MSG_8822B) & BIT_MASK_C2HEVT_MSG_8822B)
-
-/* 2 REG_SW_DEFINED_PAGE1_8822B */
-
-#define BIT_SHIFT_SW_DEFINED_PAGE1_8822B 0
-#define BIT_MASK_SW_DEFINED_PAGE1_8822B 0xffffffffffffffffL
-#define BIT_SW_DEFINED_PAGE1_8822B(x) \
- (((x) & BIT_MASK_SW_DEFINED_PAGE1_8822B) \
- << BIT_SHIFT_SW_DEFINED_PAGE1_8822B)
-#define BIT_GET_SW_DEFINED_PAGE1_8822B(x) \
- (((x) >> BIT_SHIFT_SW_DEFINED_PAGE1_8822B) & \
- BIT_MASK_SW_DEFINED_PAGE1_8822B)
-
-/* 2 REG_MCUTST_I_8822B */
-
-#define BIT_SHIFT_MCUDMSG_I_8822B 0
-#define BIT_MASK_MCUDMSG_I_8822B 0xffffffffL
-#define BIT_MCUDMSG_I_8822B(x) \
- (((x) & BIT_MASK_MCUDMSG_I_8822B) << BIT_SHIFT_MCUDMSG_I_8822B)
-#define BIT_GET_MCUDMSG_I_8822B(x) \
- (((x) >> BIT_SHIFT_MCUDMSG_I_8822B) & BIT_MASK_MCUDMSG_I_8822B)
-
-/* 2 REG_MCUTST_II_8822B */
-
-#define BIT_SHIFT_MCUDMSG_II_8822B 0
-#define BIT_MASK_MCUDMSG_II_8822B 0xffffffffL
-#define BIT_MCUDMSG_II_8822B(x) \
- (((x) & BIT_MASK_MCUDMSG_II_8822B) << BIT_SHIFT_MCUDMSG_II_8822B)
-#define BIT_GET_MCUDMSG_II_8822B(x) \
- (((x) >> BIT_SHIFT_MCUDMSG_II_8822B) & BIT_MASK_MCUDMSG_II_8822B)
-
-/* 2 REG_FMETHR_8822B */
-#define BIT_FMSG_INT_8822B BIT(31)
-
-#define BIT_SHIFT_FW_MSG_8822B 0
-#define BIT_MASK_FW_MSG_8822B 0xffffffffL
-#define BIT_FW_MSG_8822B(x) \
- (((x) & BIT_MASK_FW_MSG_8822B) << BIT_SHIFT_FW_MSG_8822B)
-#define BIT_GET_FW_MSG_8822B(x) \
- (((x) >> BIT_SHIFT_FW_MSG_8822B) & BIT_MASK_FW_MSG_8822B)
-
-/* 2 REG_HMETFR_8822B */
-
-#define BIT_SHIFT_HRCV_MSG_8822B 24
-#define BIT_MASK_HRCV_MSG_8822B 0xff
-#define BIT_HRCV_MSG_8822B(x) \
- (((x) & BIT_MASK_HRCV_MSG_8822B) << BIT_SHIFT_HRCV_MSG_8822B)
-#define BIT_GET_HRCV_MSG_8822B(x) \
- (((x) >> BIT_SHIFT_HRCV_MSG_8822B) & BIT_MASK_HRCV_MSG_8822B)
-
-#define BIT_INT_BOX3_8822B BIT(3)
-#define BIT_INT_BOX2_8822B BIT(2)
-#define BIT_INT_BOX1_8822B BIT(1)
-#define BIT_INT_BOX0_8822B BIT(0)
-
-/* 2 REG_HMEBOX0_8822B */
-
-#define BIT_SHIFT_HOST_MSG_0_8822B 0
-#define BIT_MASK_HOST_MSG_0_8822B 0xffffffffL
-#define BIT_HOST_MSG_0_8822B(x) \
- (((x) & BIT_MASK_HOST_MSG_0_8822B) << BIT_SHIFT_HOST_MSG_0_8822B)
-#define BIT_GET_HOST_MSG_0_8822B(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_0_8822B) & BIT_MASK_HOST_MSG_0_8822B)
-
-/* 2 REG_HMEBOX1_8822B */
-
-#define BIT_SHIFT_HOST_MSG_1_8822B 0
-#define BIT_MASK_HOST_MSG_1_8822B 0xffffffffL
-#define BIT_HOST_MSG_1_8822B(x) \
- (((x) & BIT_MASK_HOST_MSG_1_8822B) << BIT_SHIFT_HOST_MSG_1_8822B)
-#define BIT_GET_HOST_MSG_1_8822B(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_1_8822B) & BIT_MASK_HOST_MSG_1_8822B)
-
-/* 2 REG_HMEBOX2_8822B */
-
-#define BIT_SHIFT_HOST_MSG_2_8822B 0
-#define BIT_MASK_HOST_MSG_2_8822B 0xffffffffL
-#define BIT_HOST_MSG_2_8822B(x) \
- (((x) & BIT_MASK_HOST_MSG_2_8822B) << BIT_SHIFT_HOST_MSG_2_8822B)
-#define BIT_GET_HOST_MSG_2_8822B(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_2_8822B) & BIT_MASK_HOST_MSG_2_8822B)
-
-/* 2 REG_HMEBOX3_8822B */
-
-#define BIT_SHIFT_HOST_MSG_3_8822B 0
-#define BIT_MASK_HOST_MSG_3_8822B 0xffffffffL
-#define BIT_HOST_MSG_3_8822B(x) \
- (((x) & BIT_MASK_HOST_MSG_3_8822B) << BIT_SHIFT_HOST_MSG_3_8822B)
-#define BIT_GET_HOST_MSG_3_8822B(x) \
- (((x) >> BIT_SHIFT_HOST_MSG_3_8822B) & BIT_MASK_HOST_MSG_3_8822B)
-
-/* 2 REG_LLT_INIT_8822B */
-
-#define BIT_SHIFT_LLTE_RWM_8822B 30
-#define BIT_MASK_LLTE_RWM_8822B 0x3
-#define BIT_LLTE_RWM_8822B(x) \
- (((x) & BIT_MASK_LLTE_RWM_8822B) << BIT_SHIFT_LLTE_RWM_8822B)
-#define BIT_GET_LLTE_RWM_8822B(x) \
- (((x) >> BIT_SHIFT_LLTE_RWM_8822B) & BIT_MASK_LLTE_RWM_8822B)
-
-#define BIT_SHIFT_LLTINI_PDATA_V1_8822B 16
-#define BIT_MASK_LLTINI_PDATA_V1_8822B 0xfff
-#define BIT_LLTINI_PDATA_V1_8822B(x) \
- (((x) & BIT_MASK_LLTINI_PDATA_V1_8822B) \
- << BIT_SHIFT_LLTINI_PDATA_V1_8822B)
-#define BIT_GET_LLTINI_PDATA_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LLTINI_PDATA_V1_8822B) & \
- BIT_MASK_LLTINI_PDATA_V1_8822B)
-
-#define BIT_SHIFT_LLTINI_HDATA_V1_8822B 0
-#define BIT_MASK_LLTINI_HDATA_V1_8822B 0xfff
-#define BIT_LLTINI_HDATA_V1_8822B(x) \
- (((x) & BIT_MASK_LLTINI_HDATA_V1_8822B) \
- << BIT_SHIFT_LLTINI_HDATA_V1_8822B)
-#define BIT_GET_LLTINI_HDATA_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LLTINI_HDATA_V1_8822B) & \
- BIT_MASK_LLTINI_HDATA_V1_8822B)
-
-/* 2 REG_LLT_INIT_ADDR_8822B */
-
-#define BIT_SHIFT_LLTINI_ADDR_V1_8822B 0
-#define BIT_MASK_LLTINI_ADDR_V1_8822B 0xfff
-#define BIT_LLTINI_ADDR_V1_8822B(x) \
- (((x) & BIT_MASK_LLTINI_ADDR_V1_8822B) \
- << BIT_SHIFT_LLTINI_ADDR_V1_8822B)
-#define BIT_GET_LLTINI_ADDR_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LLTINI_ADDR_V1_8822B) & \
- BIT_MASK_LLTINI_ADDR_V1_8822B)
-
-/* 2 REG_BB_ACCESS_CTRL_8822B */
-
-#define BIT_SHIFT_BB_WRITE_READ_8822B 30
-#define BIT_MASK_BB_WRITE_READ_8822B 0x3
-#define BIT_BB_WRITE_READ_8822B(x) \
- (((x) & BIT_MASK_BB_WRITE_READ_8822B) << BIT_SHIFT_BB_WRITE_READ_8822B)
-#define BIT_GET_BB_WRITE_READ_8822B(x) \
- (((x) >> BIT_SHIFT_BB_WRITE_READ_8822B) & BIT_MASK_BB_WRITE_READ_8822B)
-
-#define BIT_SHIFT_BB_WRITE_EN_8822B 12
-#define BIT_MASK_BB_WRITE_EN_8822B 0xf
-#define BIT_BB_WRITE_EN_8822B(x) \
- (((x) & BIT_MASK_BB_WRITE_EN_8822B) << BIT_SHIFT_BB_WRITE_EN_8822B)
-#define BIT_GET_BB_WRITE_EN_8822B(x) \
- (((x) >> BIT_SHIFT_BB_WRITE_EN_8822B) & BIT_MASK_BB_WRITE_EN_8822B)
-
-#define BIT_SHIFT_BB_ADDR_8822B 2
-#define BIT_MASK_BB_ADDR_8822B 0x1ff
-#define BIT_BB_ADDR_8822B(x) \
- (((x) & BIT_MASK_BB_ADDR_8822B) << BIT_SHIFT_BB_ADDR_8822B)
-#define BIT_GET_BB_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_BB_ADDR_8822B) & BIT_MASK_BB_ADDR_8822B)
-
-#define BIT_BB_ERRACC_8822B BIT(0)
-
-/* 2 REG_BB_ACCESS_DATA_8822B */
-
-#define BIT_SHIFT_BB_DATA_8822B 0
-#define BIT_MASK_BB_DATA_8822B 0xffffffffL
-#define BIT_BB_DATA_8822B(x) \
- (((x) & BIT_MASK_BB_DATA_8822B) << BIT_SHIFT_BB_DATA_8822B)
-#define BIT_GET_BB_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_BB_DATA_8822B) & BIT_MASK_BB_DATA_8822B)
-
-/* 2 REG_HMEBOX_E0_8822B */
-
-#define BIT_SHIFT_HMEBOX_E0_8822B 0
-#define BIT_MASK_HMEBOX_E0_8822B 0xffffffffL
-#define BIT_HMEBOX_E0_8822B(x) \
- (((x) & BIT_MASK_HMEBOX_E0_8822B) << BIT_SHIFT_HMEBOX_E0_8822B)
-#define BIT_GET_HMEBOX_E0_8822B(x) \
- (((x) >> BIT_SHIFT_HMEBOX_E0_8822B) & BIT_MASK_HMEBOX_E0_8822B)
-
-/* 2 REG_HMEBOX_E1_8822B */
-
-#define BIT_SHIFT_HMEBOX_E1_8822B 0
-#define BIT_MASK_HMEBOX_E1_8822B 0xffffffffL
-#define BIT_HMEBOX_E1_8822B(x) \
- (((x) & BIT_MASK_HMEBOX_E1_8822B) << BIT_SHIFT_HMEBOX_E1_8822B)
-#define BIT_GET_HMEBOX_E1_8822B(x) \
- (((x) >> BIT_SHIFT_HMEBOX_E1_8822B) & BIT_MASK_HMEBOX_E1_8822B)
-
-/* 2 REG_HMEBOX_E2_8822B */
-
-#define BIT_SHIFT_HMEBOX_E2_8822B 0
-#define BIT_MASK_HMEBOX_E2_8822B 0xffffffffL
-#define BIT_HMEBOX_E2_8822B(x) \
- (((x) & BIT_MASK_HMEBOX_E2_8822B) << BIT_SHIFT_HMEBOX_E2_8822B)
-#define BIT_GET_HMEBOX_E2_8822B(x) \
- (((x) >> BIT_SHIFT_HMEBOX_E2_8822B) & BIT_MASK_HMEBOX_E2_8822B)
-
-/* 2 REG_HMEBOX_E3_8822B */
-
-#define BIT_SHIFT_HMEBOX_E3_8822B 0
-#define BIT_MASK_HMEBOX_E3_8822B 0xffffffffL
-#define BIT_HMEBOX_E3_8822B(x) \
- (((x) & BIT_MASK_HMEBOX_E3_8822B) << BIT_SHIFT_HMEBOX_E3_8822B)
-#define BIT_GET_HMEBOX_E3_8822B(x) \
- (((x) >> BIT_SHIFT_HMEBOX_E3_8822B) & BIT_MASK_HMEBOX_E3_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_CR_EXT_8822B */
-
-#define BIT_SHIFT_PHY_REQ_DELAY_8822B 24
-#define BIT_MASK_PHY_REQ_DELAY_8822B 0xf
-#define BIT_PHY_REQ_DELAY_8822B(x) \
- (((x) & BIT_MASK_PHY_REQ_DELAY_8822B) << BIT_SHIFT_PHY_REQ_DELAY_8822B)
-#define BIT_GET_PHY_REQ_DELAY_8822B(x) \
- (((x) >> BIT_SHIFT_PHY_REQ_DELAY_8822B) & BIT_MASK_PHY_REQ_DELAY_8822B)
-
-#define BIT_SPD_DOWN_8822B BIT(16)
-
-#define BIT_SHIFT_NETYPE4_8822B 4
-#define BIT_MASK_NETYPE4_8822B 0x3
-#define BIT_NETYPE4_8822B(x) \
- (((x) & BIT_MASK_NETYPE4_8822B) << BIT_SHIFT_NETYPE4_8822B)
-#define BIT_GET_NETYPE4_8822B(x) \
- (((x) >> BIT_SHIFT_NETYPE4_8822B) & BIT_MASK_NETYPE4_8822B)
-
-#define BIT_SHIFT_NETYPE3_8822B 2
-#define BIT_MASK_NETYPE3_8822B 0x3
-#define BIT_NETYPE3_8822B(x) \
- (((x) & BIT_MASK_NETYPE3_8822B) << BIT_SHIFT_NETYPE3_8822B)
-#define BIT_GET_NETYPE3_8822B(x) \
- (((x) >> BIT_SHIFT_NETYPE3_8822B) & BIT_MASK_NETYPE3_8822B)
-
-#define BIT_SHIFT_NETYPE2_8822B 0
-#define BIT_MASK_NETYPE2_8822B 0x3
-#define BIT_NETYPE2_8822B(x) \
- (((x) & BIT_MASK_NETYPE2_8822B) << BIT_SHIFT_NETYPE2_8822B)
-#define BIT_GET_NETYPE2_8822B(x) \
- (((x) >> BIT_SHIFT_NETYPE2_8822B) & BIT_MASK_NETYPE2_8822B)
-
-/* 2 REG_FWFF_8822B */
-
-#define BIT_SHIFT_PKTNUM_TH_V1_8822B 24
-#define BIT_MASK_PKTNUM_TH_V1_8822B 0xff
-#define BIT_PKTNUM_TH_V1_8822B(x) \
- (((x) & BIT_MASK_PKTNUM_TH_V1_8822B) << BIT_SHIFT_PKTNUM_TH_V1_8822B)
-#define BIT_GET_PKTNUM_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_PKTNUM_TH_V1_8822B) & BIT_MASK_PKTNUM_TH_V1_8822B)
-
-#define BIT_SHIFT_TIMER_TH_8822B 16
-#define BIT_MASK_TIMER_TH_8822B 0xff
-#define BIT_TIMER_TH_8822B(x) \
- (((x) & BIT_MASK_TIMER_TH_8822B) << BIT_SHIFT_TIMER_TH_8822B)
-#define BIT_GET_TIMER_TH_8822B(x) \
- (((x) >> BIT_SHIFT_TIMER_TH_8822B) & BIT_MASK_TIMER_TH_8822B)
-
-#define BIT_SHIFT_RXPKT1ENADDR_8822B 0
-#define BIT_MASK_RXPKT1ENADDR_8822B 0xffff
-#define BIT_RXPKT1ENADDR_8822B(x) \
- (((x) & BIT_MASK_RXPKT1ENADDR_8822B) << BIT_SHIFT_RXPKT1ENADDR_8822B)
-#define BIT_GET_RXPKT1ENADDR_8822B(x) \
- (((x) >> BIT_SHIFT_RXPKT1ENADDR_8822B) & BIT_MASK_RXPKT1ENADDR_8822B)
-
-/* 2 REG_RXFF_PTR_V1_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_RXFF0_RDPTR_V2_8822B 0
-#define BIT_MASK_RXFF0_RDPTR_V2_8822B 0x3ffff
-#define BIT_RXFF0_RDPTR_V2_8822B(x) \
- (((x) & BIT_MASK_RXFF0_RDPTR_V2_8822B) \
- << BIT_SHIFT_RXFF0_RDPTR_V2_8822B)
-#define BIT_GET_RXFF0_RDPTR_V2_8822B(x) \
- (((x) >> BIT_SHIFT_RXFF0_RDPTR_V2_8822B) & \
- BIT_MASK_RXFF0_RDPTR_V2_8822B)
-
-/* 2 REG_RXFF_WTR_V1_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_RXFF0_WTPTR_V2_8822B 0
-#define BIT_MASK_RXFF0_WTPTR_V2_8822B 0x3ffff
-#define BIT_RXFF0_WTPTR_V2_8822B(x) \
- (((x) & BIT_MASK_RXFF0_WTPTR_V2_8822B) \
- << BIT_SHIFT_RXFF0_WTPTR_V2_8822B)
-#define BIT_GET_RXFF0_WTPTR_V2_8822B(x) \
- (((x) >> BIT_SHIFT_RXFF0_WTPTR_V2_8822B) & \
- BIT_MASK_RXFF0_WTPTR_V2_8822B)
-
-/* 2 REG_FE2IMR_8822B */
-#define BIT__FE4ISR__IND_MSK_8822B BIT(29)
-#define BIT_FS_TXSC_DESC_DONE_INT_EN_8822B BIT(28)
-#define BIT_FS_TXSC_BKDONE_INT_EN_8822B BIT(27)
-#define BIT_FS_TXSC_BEDONE_INT_EN_8822B BIT(26)
-#define BIT_FS_TXSC_VIDONE_INT_EN_8822B BIT(25)
-#define BIT_FS_TXSC_VODONE_INT_EN_8822B BIT(24)
-#define BIT_FS_ATIM_MB7_INT_EN_8822B BIT(23)
-#define BIT_FS_ATIM_MB6_INT_EN_8822B BIT(22)
-#define BIT_FS_ATIM_MB5_INT_EN_8822B BIT(21)
-#define BIT_FS_ATIM_MB4_INT_EN_8822B BIT(20)
-#define BIT_FS_ATIM_MB3_INT_EN_8822B BIT(19)
-#define BIT_FS_ATIM_MB2_INT_EN_8822B BIT(18)
-#define BIT_FS_ATIM_MB1_INT_EN_8822B BIT(17)
-#define BIT_FS_ATIM_MB0_INT_EN_8822B BIT(16)
-#define BIT_FS_TBTT4INT_EN_8822B BIT(11)
-#define BIT_FS_TBTT3INT_EN_8822B BIT(10)
-#define BIT_FS_TBTT2INT_EN_8822B BIT(9)
-#define BIT_FS_TBTT1INT_EN_8822B BIT(8)
-#define BIT_FS_TBTT0_MB7INT_EN_8822B BIT(7)
-#define BIT_FS_TBTT0_MB6INT_EN_8822B BIT(6)
-#define BIT_FS_TBTT0_MB5INT_EN_8822B BIT(5)
-#define BIT_FS_TBTT0_MB4INT_EN_8822B BIT(4)
-#define BIT_FS_TBTT0_MB3INT_EN_8822B BIT(3)
-#define BIT_FS_TBTT0_MB2INT_EN_8822B BIT(2)
-#define BIT_FS_TBTT0_MB1INT_EN_8822B BIT(1)
-#define BIT_FS_TBTT0_INT_EN_8822B BIT(0)
-
-/* 2 REG_FE2ISR_8822B */
-#define BIT__FE4ISR__IND_INT_8822B BIT(29)
-#define BIT_FS_TXSC_DESC_DONE_INT_8822B BIT(28)
-#define BIT_FS_TXSC_BKDONE_INT_8822B BIT(27)
-#define BIT_FS_TXSC_BEDONE_INT_8822B BIT(26)
-#define BIT_FS_TXSC_VIDONE_INT_8822B BIT(25)
-#define BIT_FS_TXSC_VODONE_INT_8822B BIT(24)
-#define BIT_FS_ATIM_MB7_INT_8822B BIT(23)
-#define BIT_FS_ATIM_MB6_INT_8822B BIT(22)
-#define BIT_FS_ATIM_MB5_INT_8822B BIT(21)
-#define BIT_FS_ATIM_MB4_INT_8822B BIT(20)
-#define BIT_FS_ATIM_MB3_INT_8822B BIT(19)
-#define BIT_FS_ATIM_MB2_INT_8822B BIT(18)
-#define BIT_FS_ATIM_MB1_INT_8822B BIT(17)
-#define BIT_FS_ATIM_MB0_INT_8822B BIT(16)
-#define BIT_FS_TBTT4INT_8822B BIT(11)
-#define BIT_FS_TBTT3INT_8822B BIT(10)
-#define BIT_FS_TBTT2INT_8822B BIT(9)
-#define BIT_FS_TBTT1INT_8822B BIT(8)
-#define BIT_FS_TBTT0_MB7INT_8822B BIT(7)
-#define BIT_FS_TBTT0_MB6INT_8822B BIT(6)
-#define BIT_FS_TBTT0_MB5INT_8822B BIT(5)
-#define BIT_FS_TBTT0_MB4INT_8822B BIT(4)
-#define BIT_FS_TBTT0_MB3INT_8822B BIT(3)
-#define BIT_FS_TBTT0_MB2INT_8822B BIT(2)
-#define BIT_FS_TBTT0_MB1INT_8822B BIT(1)
-#define BIT_FS_TBTT0_INT_8822B BIT(0)
-
-/* 2 REG_FE3IMR_8822B */
-#define BIT_FS_CLI3_MTI_BCNIVLEAR_INT__EN_8822B BIT(31)
-#define BIT_FS_CLI2_MTI_BCNIVLEAR_INT__EN_8822B BIT(30)
-#define BIT_FS_CLI1_MTI_BCNIVLEAR_INT__EN_8822B BIT(29)
-#define BIT_FS_CLI0_MTI_BCNIVLEAR_INT__EN_8822B BIT(28)
-#define BIT_FS_BCNDMA4_INT_EN_8822B BIT(27)
-#define BIT_FS_BCNDMA3_INT_EN_8822B BIT(26)
-#define BIT_FS_BCNDMA2_INT_EN_8822B BIT(25)
-#define BIT_FS_BCNDMA1_INT_EN_8822B BIT(24)
-#define BIT_FS_BCNDMA0_MB7_INT_EN_8822B BIT(23)
-#define BIT_FS_BCNDMA0_MB6_INT_EN_8822B BIT(22)
-#define BIT_FS_BCNDMA0_MB5_INT_EN_8822B BIT(21)
-#define BIT_FS_BCNDMA0_MB4_INT_EN_8822B BIT(20)
-#define BIT_FS_BCNDMA0_MB3_INT_EN_8822B BIT(19)
-#define BIT_FS_BCNDMA0_MB2_INT_EN_8822B BIT(18)
-#define BIT_FS_BCNDMA0_MB1_INT_EN_8822B BIT(17)
-#define BIT_FS_BCNDMA0_INT_EN_8822B BIT(16)
-#define BIT_FS_MTI_BCNIVLEAR_INT__EN_8822B BIT(15)
-#define BIT_FS_BCNERLY4_INT_EN_8822B BIT(11)
-#define BIT_FS_BCNERLY3_INT_EN_8822B BIT(10)
-#define BIT_FS_BCNERLY2_INT_EN_8822B BIT(9)
-#define BIT_FS_BCNERLY1_INT_EN_8822B BIT(8)
-#define BIT_FS_BCNERLY0_MB7INT_EN_8822B BIT(7)
-#define BIT_FS_BCNERLY0_MB6INT_EN_8822B BIT(6)
-#define BIT_FS_BCNERLY0_MB5INT_EN_8822B BIT(5)
-#define BIT_FS_BCNERLY0_MB4INT_EN_8822B BIT(4)
-#define BIT_FS_BCNERLY0_MB3INT_EN_8822B BIT(3)
-#define BIT_FS_BCNERLY0_MB2INT_EN_8822B BIT(2)
-#define BIT_FS_BCNERLY0_MB1INT_EN_8822B BIT(1)
-#define BIT_FS_BCNERLY0_INT_EN_8822B BIT(0)
-
-/* 2 REG_FE3ISR_8822B */
-#define BIT_FS_CLI3_MTI_BCNIVLEAR_INT_8822B BIT(31)
-#define BIT_FS_CLI2_MTI_BCNIVLEAR_INT_8822B BIT(30)
-#define BIT_FS_CLI1_MTI_BCNIVLEAR_INT_8822B BIT(29)
-#define BIT_FS_CLI0_MTI_BCNIVLEAR_INT_8822B BIT(28)
-#define BIT_FS_BCNDMA4_INT_8822B BIT(27)
-#define BIT_FS_BCNDMA3_INT_8822B BIT(26)
-#define BIT_FS_BCNDMA2_INT_8822B BIT(25)
-#define BIT_FS_BCNDMA1_INT_8822B BIT(24)
-#define BIT_FS_BCNDMA0_MB7_INT_8822B BIT(23)
-#define BIT_FS_BCNDMA0_MB6_INT_8822B BIT(22)
-#define BIT_FS_BCNDMA0_MB5_INT_8822B BIT(21)
-#define BIT_FS_BCNDMA0_MB4_INT_8822B BIT(20)
-#define BIT_FS_BCNDMA0_MB3_INT_8822B BIT(19)
-#define BIT_FS_BCNDMA0_MB2_INT_8822B BIT(18)
-#define BIT_FS_BCNDMA0_MB1_INT_8822B BIT(17)
-#define BIT_FS_BCNDMA0_INT_8822B BIT(16)
-#define BIT_FS_MTI_BCNIVLEAR_INT_8822B BIT(15)
-#define BIT_FS_BCNERLY4_INT_8822B BIT(11)
-#define BIT_FS_BCNERLY3_INT_8822B BIT(10)
-#define BIT_FS_BCNERLY2_INT_8822B BIT(9)
-#define BIT_FS_BCNERLY1_INT_8822B BIT(8)
-#define BIT_FS_BCNERLY0_MB7INT_8822B BIT(7)
-#define BIT_FS_BCNERLY0_MB6INT_8822B BIT(6)
-#define BIT_FS_BCNERLY0_MB5INT_8822B BIT(5)
-#define BIT_FS_BCNERLY0_MB4INT_8822B BIT(4)
-#define BIT_FS_BCNERLY0_MB3INT_8822B BIT(3)
-#define BIT_FS_BCNERLY0_MB2INT_8822B BIT(2)
-#define BIT_FS_BCNERLY0_MB1INT_8822B BIT(1)
-#define BIT_FS_BCNERLY0_INT_8822B BIT(0)
-
-/* 2 REG_FE4IMR_8822B */
-#define BIT_FS_CLI3_TXPKTIN_INT_EN_8822B BIT(19)
-#define BIT_FS_CLI2_TXPKTIN_INT_EN_8822B BIT(18)
-#define BIT_FS_CLI1_TXPKTIN_INT_EN_8822B BIT(17)
-#define BIT_FS_CLI0_TXPKTIN_INT_EN_8822B BIT(16)
-#define BIT_FS_CLI3_RX_UMD0_INT_EN_8822B BIT(15)
-#define BIT_FS_CLI3_RX_UMD1_INT_EN_8822B BIT(14)
-#define BIT_FS_CLI3_RX_BMD0_INT_EN_8822B BIT(13)
-#define BIT_FS_CLI3_RX_BMD1_INT_EN_8822B BIT(12)
-#define BIT_FS_CLI2_RX_UMD0_INT_EN_8822B BIT(11)
-#define BIT_FS_CLI2_RX_UMD1_INT_EN_8822B BIT(10)
-#define BIT_FS_CLI2_RX_BMD0_INT_EN_8822B BIT(9)
-#define BIT_FS_CLI2_RX_BMD1_INT_EN_8822B BIT(8)
-#define BIT_FS_CLI1_RX_UMD0_INT_EN_8822B BIT(7)
-#define BIT_FS_CLI1_RX_UMD1_INT_EN_8822B BIT(6)
-#define BIT_FS_CLI1_RX_BMD0_INT_EN_8822B BIT(5)
-#define BIT_FS_CLI1_RX_BMD1_INT_EN_8822B BIT(4)
-#define BIT_FS_CLI0_RX_UMD0_INT_EN_8822B BIT(3)
-#define BIT_FS_CLI0_RX_UMD1_INT_EN_8822B BIT(2)
-#define BIT_FS_CLI0_RX_BMD0_INT_EN_8822B BIT(1)
-#define BIT_FS_CLI0_RX_BMD1_INT_EN_8822B BIT(0)
-
-/* 2 REG_FE4ISR_8822B */
-#define BIT_FS_CLI3_TXPKTIN_INT_8822B BIT(19)
-#define BIT_FS_CLI2_TXPKTIN_INT_8822B BIT(18)
-#define BIT_FS_CLI1_TXPKTIN_INT_8822B BIT(17)
-#define BIT_FS_CLI0_TXPKTIN_INT_8822B BIT(16)
-#define BIT_FS_CLI3_RX_UMD0_INT_8822B BIT(15)
-#define BIT_FS_CLI3_RX_UMD1_INT_8822B BIT(14)
-#define BIT_FS_CLI3_RX_BMD0_INT_8822B BIT(13)
-#define BIT_FS_CLI3_RX_BMD1_INT_8822B BIT(12)
-#define BIT_FS_CLI2_RX_UMD0_INT_8822B BIT(11)
-#define BIT_FS_CLI2_RX_UMD1_INT_8822B BIT(10)
-#define BIT_FS_CLI2_RX_BMD0_INT_8822B BIT(9)
-#define BIT_FS_CLI2_RX_BMD1_INT_8822B BIT(8)
-#define BIT_FS_CLI1_RX_UMD0_INT_8822B BIT(7)
-#define BIT_FS_CLI1_RX_UMD1_INT_8822B BIT(6)
-#define BIT_FS_CLI1_RX_BMD0_INT_8822B BIT(5)
-#define BIT_FS_CLI1_RX_BMD1_INT_8822B BIT(4)
-#define BIT_FS_CLI0_RX_UMD0_INT_8822B BIT(3)
-#define BIT_FS_CLI0_RX_UMD1_INT_8822B BIT(2)
-#define BIT_FS_CLI0_RX_BMD0_INT_8822B BIT(1)
-#define BIT_FS_CLI0_RX_BMD1_INT_8822B BIT(0)
-
-/* 2 REG_FT1IMR_8822B */
-#define BIT__FT2ISR__IND_MSK_8822B BIT(30)
-#define BIT_FTM_PTT_INT_EN_8822B BIT(29)
-#define BIT_RXFTMREQ_INT_EN_8822B BIT(28)
-#define BIT_RXFTM_INT_EN_8822B BIT(27)
-#define BIT_TXFTM_INT_EN_8822B BIT(26)
-#define BIT_FS_H2C_CMD_OK_INT_EN_8822B BIT(25)
-#define BIT_FS_H2C_CMD_FULL_INT_EN_8822B BIT(24)
-#define BIT_FS_MACID_PWRCHANGE5_INT_EN_8822B BIT(23)
-#define BIT_FS_MACID_PWRCHANGE4_INT_EN_8822B BIT(22)
-#define BIT_FS_MACID_PWRCHANGE3_INT_EN_8822B BIT(21)
-#define BIT_FS_MACID_PWRCHANGE2_INT_EN_8822B BIT(20)
-#define BIT_FS_MACID_PWRCHANGE1_INT_EN_8822B BIT(19)
-#define BIT_FS_MACID_PWRCHANGE0_INT_EN_8822B BIT(18)
-#define BIT_FS_CTWEND2_INT_EN_8822B BIT(17)
-#define BIT_FS_CTWEND1_INT_EN_8822B BIT(16)
-#define BIT_FS_CTWEND0_INT_EN_8822B BIT(15)
-#define BIT_FS_TX_NULL1_INT_EN_8822B BIT(14)
-#define BIT_FS_TX_NULL0_INT_EN_8822B BIT(13)
-#define BIT_FS_TSF_BIT32_TOGGLE_EN_8822B BIT(12)
-#define BIT_FS_P2P_RFON2_INT_EN_8822B BIT(11)
-#define BIT_FS_P2P_RFOFF2_INT_EN_8822B BIT(10)
-#define BIT_FS_P2P_RFON1_INT_EN_8822B BIT(9)
-#define BIT_FS_P2P_RFOFF1_INT_EN_8822B BIT(8)
-#define BIT_FS_P2P_RFON0_INT_EN_8822B BIT(7)
-#define BIT_FS_P2P_RFOFF0_INT_EN_8822B BIT(6)
-#define BIT_FS_RX_UAPSDMD1_EN_8822B BIT(5)
-#define BIT_FS_RX_UAPSDMD0_EN_8822B BIT(4)
-#define BIT_FS_TRIGGER_PKT_EN_8822B BIT(3)
-#define BIT_FS_EOSP_INT_EN_8822B BIT(2)
-#define BIT_FS_RPWM2_INT_EN_8822B BIT(1)
-#define BIT_FS_RPWM_INT_EN_8822B BIT(0)
-
-/* 2 REG_FT1ISR_8822B */
-#define BIT__FT2ISR__IND_INT_8822B BIT(30)
-#define BIT_FTM_PTT_INT_8822B BIT(29)
-#define BIT_RXFTMREQ_INT_8822B BIT(28)
-#define BIT_RXFTM_INT_8822B BIT(27)
-#define BIT_TXFTM_INT_8822B BIT(26)
-#define BIT_FS_H2C_CMD_OK_INT_8822B BIT(25)
-#define BIT_FS_H2C_CMD_FULL_INT_8822B BIT(24)
-#define BIT_FS_MACID_PWRCHANGE5_INT_8822B BIT(23)
-#define BIT_FS_MACID_PWRCHANGE4_INT_8822B BIT(22)
-#define BIT_FS_MACID_PWRCHANGE3_INT_8822B BIT(21)
-#define BIT_FS_MACID_PWRCHANGE2_INT_8822B BIT(20)
-#define BIT_FS_MACID_PWRCHANGE1_INT_8822B BIT(19)
-#define BIT_FS_MACID_PWRCHANGE0_INT_8822B BIT(18)
-#define BIT_FS_CTWEND2_INT_8822B BIT(17)
-#define BIT_FS_CTWEND1_INT_8822B BIT(16)
-#define BIT_FS_CTWEND0_INT_8822B BIT(15)
-#define BIT_FS_TX_NULL1_INT_8822B BIT(14)
-#define BIT_FS_TX_NULL0_INT_8822B BIT(13)
-#define BIT_FS_TSF_BIT32_TOGGLE_INT_8822B BIT(12)
-#define BIT_FS_P2P_RFON2_INT_8822B BIT(11)
-#define BIT_FS_P2P_RFOFF2_INT_8822B BIT(10)
-#define BIT_FS_P2P_RFON1_INT_8822B BIT(9)
-#define BIT_FS_P2P_RFOFF1_INT_8822B BIT(8)
-#define BIT_FS_P2P_RFON0_INT_8822B BIT(7)
-#define BIT_FS_P2P_RFOFF0_INT_8822B BIT(6)
-#define BIT_FS_RX_UAPSDMD1_INT_8822B BIT(5)
-#define BIT_FS_RX_UAPSDMD0_INT_8822B BIT(4)
-#define BIT_FS_TRIGGER_PKT_INT_8822B BIT(3)
-#define BIT_FS_EOSP_INT_8822B BIT(2)
-#define BIT_FS_RPWM2_INT_8822B BIT(1)
-#define BIT_FS_RPWM_INT_8822B BIT(0)
-
-/* 2 REG_SPWR0_8822B */
-
-#define BIT_SHIFT_MID_31TO0_8822B 0
-#define BIT_MASK_MID_31TO0_8822B 0xffffffffL
-#define BIT_MID_31TO0_8822B(x) \
- (((x) & BIT_MASK_MID_31TO0_8822B) << BIT_SHIFT_MID_31TO0_8822B)
-#define BIT_GET_MID_31TO0_8822B(x) \
- (((x) >> BIT_SHIFT_MID_31TO0_8822B) & BIT_MASK_MID_31TO0_8822B)
-
-/* 2 REG_SPWR1_8822B */
-
-#define BIT_SHIFT_MID_63TO32_8822B 0
-#define BIT_MASK_MID_63TO32_8822B 0xffffffffL
-#define BIT_MID_63TO32_8822B(x) \
- (((x) & BIT_MASK_MID_63TO32_8822B) << BIT_SHIFT_MID_63TO32_8822B)
-#define BIT_GET_MID_63TO32_8822B(x) \
- (((x) >> BIT_SHIFT_MID_63TO32_8822B) & BIT_MASK_MID_63TO32_8822B)
-
-/* 2 REG_SPWR2_8822B */
-
-#define BIT_SHIFT_MID_95O64_8822B 0
-#define BIT_MASK_MID_95O64_8822B 0xffffffffL
-#define BIT_MID_95O64_8822B(x) \
- (((x) & BIT_MASK_MID_95O64_8822B) << BIT_SHIFT_MID_95O64_8822B)
-#define BIT_GET_MID_95O64_8822B(x) \
- (((x) >> BIT_SHIFT_MID_95O64_8822B) & BIT_MASK_MID_95O64_8822B)
-
-/* 2 REG_SPWR3_8822B */
-
-#define BIT_SHIFT_MID_127TO96_8822B 0
-#define BIT_MASK_MID_127TO96_8822B 0xffffffffL
-#define BIT_MID_127TO96_8822B(x) \
- (((x) & BIT_MASK_MID_127TO96_8822B) << BIT_SHIFT_MID_127TO96_8822B)
-#define BIT_GET_MID_127TO96_8822B(x) \
- (((x) >> BIT_SHIFT_MID_127TO96_8822B) & BIT_MASK_MID_127TO96_8822B)
-
-/* 2 REG_POWSEQ_8822B */
-
-#define BIT_SHIFT_SEQNUM_MID_8822B 16
-#define BIT_MASK_SEQNUM_MID_8822B 0xffff
-#define BIT_SEQNUM_MID_8822B(x) \
- (((x) & BIT_MASK_SEQNUM_MID_8822B) << BIT_SHIFT_SEQNUM_MID_8822B)
-#define BIT_GET_SEQNUM_MID_8822B(x) \
- (((x) >> BIT_SHIFT_SEQNUM_MID_8822B) & BIT_MASK_SEQNUM_MID_8822B)
-
-#define BIT_SHIFT_REF_MID_8822B 0
-#define BIT_MASK_REF_MID_8822B 0x7f
-#define BIT_REF_MID_8822B(x) \
- (((x) & BIT_MASK_REF_MID_8822B) << BIT_SHIFT_REF_MID_8822B)
-#define BIT_GET_REF_MID_8822B(x) \
- (((x) >> BIT_SHIFT_REF_MID_8822B) & BIT_MASK_REF_MID_8822B)
-
-/* 2 REG_TC7_CTRL_V1_8822B */
-#define BIT_TC7INT_EN_8822B BIT(26)
-#define BIT_TC7MODE_8822B BIT(25)
-#define BIT_TC7EN_8822B BIT(24)
-
-#define BIT_SHIFT_TC7DATA_8822B 0
-#define BIT_MASK_TC7DATA_8822B 0xffffff
-#define BIT_TC7DATA_8822B(x) \
- (((x) & BIT_MASK_TC7DATA_8822B) << BIT_SHIFT_TC7DATA_8822B)
-#define BIT_GET_TC7DATA_8822B(x) \
- (((x) >> BIT_SHIFT_TC7DATA_8822B) & BIT_MASK_TC7DATA_8822B)
-
-/* 2 REG_TC8_CTRL_V1_8822B */
-#define BIT_TC8INT_EN_8822B BIT(26)
-#define BIT_TC8MODE_8822B BIT(25)
-#define BIT_TC8EN_8822B BIT(24)
-
-#define BIT_SHIFT_TC8DATA_8822B 0
-#define BIT_MASK_TC8DATA_8822B 0xffffff
-#define BIT_TC8DATA_8822B(x) \
- (((x) & BIT_MASK_TC8DATA_8822B) << BIT_SHIFT_TC8DATA_8822B)
-#define BIT_GET_TC8DATA_8822B(x) \
- (((x) >> BIT_SHIFT_TC8DATA_8822B) & BIT_MASK_TC8DATA_8822B)
-
-/* 2 REG_FT2IMR_8822B */
-#define BIT_FS_CLI3_RX_UAPSDMD1_EN_8822B BIT(31)
-#define BIT_FS_CLI3_RX_UAPSDMD0_EN_8822B BIT(30)
-#define BIT_FS_CLI3_TRIGGER_PKT_EN_8822B BIT(29)
-#define BIT_FS_CLI3_EOSP_INT_EN_8822B BIT(28)
-#define BIT_FS_CLI2_RX_UAPSDMD1_EN_8822B BIT(27)
-#define BIT_FS_CLI2_RX_UAPSDMD0_EN_8822B BIT(26)
-#define BIT_FS_CLI2_TRIGGER_PKT_EN_8822B BIT(25)
-#define BIT_FS_CLI2_EOSP_INT_EN_8822B BIT(24)
-#define BIT_FS_CLI1_RX_UAPSDMD1_EN_8822B BIT(23)
-#define BIT_FS_CLI1_RX_UAPSDMD0_EN_8822B BIT(22)
-#define BIT_FS_CLI1_TRIGGER_PKT_EN_8822B BIT(21)
-#define BIT_FS_CLI1_EOSP_INT_EN_8822B BIT(20)
-#define BIT_FS_CLI0_RX_UAPSDMD1_EN_8822B BIT(19)
-#define BIT_FS_CLI0_RX_UAPSDMD0_EN_8822B BIT(18)
-#define BIT_FS_CLI0_TRIGGER_PKT_EN_8822B BIT(17)
-#define BIT_FS_CLI0_EOSP_INT_EN_8822B BIT(16)
-#define BIT_FS_TSF_BIT32_TOGGLE_P2P2_EN_8822B BIT(9)
-#define BIT_FS_TSF_BIT32_TOGGLE_P2P1_EN_8822B BIT(8)
-#define BIT_FS_CLI3_TX_NULL1_INT_EN_8822B BIT(7)
-#define BIT_FS_CLI3_TX_NULL0_INT_EN_8822B BIT(6)
-#define BIT_FS_CLI2_TX_NULL1_INT_EN_8822B BIT(5)
-#define BIT_FS_CLI2_TX_NULL0_INT_EN_8822B BIT(4)
-#define BIT_FS_CLI1_TX_NULL1_INT_EN_8822B BIT(3)
-#define BIT_FS_CLI1_TX_NULL0_INT_EN_8822B BIT(2)
-#define BIT_FS_CLI0_TX_NULL1_INT_EN_8822B BIT(1)
-#define BIT_FS_CLI0_TX_NULL0_INT_EN_8822B BIT(0)
-
-/* 2 REG_FT2ISR_8822B */
-#define BIT_FS_CLI3_RX_UAPSDMD1_INT_8822B BIT(31)
-#define BIT_FS_CLI3_RX_UAPSDMD0_INT_8822B BIT(30)
-#define BIT_FS_CLI3_TRIGGER_PKT_INT_8822B BIT(29)
-#define BIT_FS_CLI3_EOSP_INT_8822B BIT(28)
-#define BIT_FS_CLI2_RX_UAPSDMD1_INT_8822B BIT(27)
-#define BIT_FS_CLI2_RX_UAPSDMD0_INT_8822B BIT(26)
-#define BIT_FS_CLI2_TRIGGER_PKT_INT_8822B BIT(25)
-#define BIT_FS_CLI2_EOSP_INT_8822B BIT(24)
-#define BIT_FS_CLI1_RX_UAPSDMD1_INT_8822B BIT(23)
-#define BIT_FS_CLI1_RX_UAPSDMD0_INT_8822B BIT(22)
-#define BIT_FS_CLI1_TRIGGER_PKT_INT_8822B BIT(21)
-#define BIT_FS_CLI1_EOSP_INT_8822B BIT(20)
-#define BIT_FS_CLI0_RX_UAPSDMD1_INT_8822B BIT(19)
-#define BIT_FS_CLI0_RX_UAPSDMD0_INT_8822B BIT(18)
-#define BIT_FS_CLI0_TRIGGER_PKT_INT_8822B BIT(17)
-#define BIT_FS_CLI0_EOSP_INT_8822B BIT(16)
-#define BIT_FS_TSF_BIT32_TOGGLE_P2P2_INT_8822B BIT(9)
-#define BIT_FS_TSF_BIT32_TOGGLE_P2P1_INT_8822B BIT(8)
-#define BIT_FS_CLI3_TX_NULL1_INT_8822B BIT(7)
-#define BIT_FS_CLI3_TX_NULL0_INT_8822B BIT(6)
-#define BIT_FS_CLI2_TX_NULL1_INT_8822B BIT(5)
-#define BIT_FS_CLI2_TX_NULL0_INT_8822B BIT(4)
-#define BIT_FS_CLI1_TX_NULL1_INT_8822B BIT(3)
-#define BIT_FS_CLI1_TX_NULL0_INT_8822B BIT(2)
-#define BIT_FS_CLI0_TX_NULL1_INT_8822B BIT(1)
-#define BIT_FS_CLI0_TX_NULL0_INT_8822B BIT(0)
-
-/* 2 REG_MSG2_8822B */
-
-#define BIT_SHIFT_FW_MSG2_8822B 0
-#define BIT_MASK_FW_MSG2_8822B 0xffffffffL
-#define BIT_FW_MSG2_8822B(x) \
- (((x) & BIT_MASK_FW_MSG2_8822B) << BIT_SHIFT_FW_MSG2_8822B)
-#define BIT_GET_FW_MSG2_8822B(x) \
- (((x) >> BIT_SHIFT_FW_MSG2_8822B) & BIT_MASK_FW_MSG2_8822B)
-
-/* 2 REG_MSG3_8822B */
-
-#define BIT_SHIFT_FW_MSG3_8822B 0
-#define BIT_MASK_FW_MSG3_8822B 0xffffffffL
-#define BIT_FW_MSG3_8822B(x) \
- (((x) & BIT_MASK_FW_MSG3_8822B) << BIT_SHIFT_FW_MSG3_8822B)
-#define BIT_GET_FW_MSG3_8822B(x) \
- (((x) >> BIT_SHIFT_FW_MSG3_8822B) & BIT_MASK_FW_MSG3_8822B)
-
-/* 2 REG_MSG4_8822B */
-
-#define BIT_SHIFT_FW_MSG4_8822B 0
-#define BIT_MASK_FW_MSG4_8822B 0xffffffffL
-#define BIT_FW_MSG4_8822B(x) \
- (((x) & BIT_MASK_FW_MSG4_8822B) << BIT_SHIFT_FW_MSG4_8822B)
-#define BIT_GET_FW_MSG4_8822B(x) \
- (((x) >> BIT_SHIFT_FW_MSG4_8822B) & BIT_MASK_FW_MSG4_8822B)
-
-/* 2 REG_MSG5_8822B */
-
-#define BIT_SHIFT_FW_MSG5_8822B 0
-#define BIT_MASK_FW_MSG5_8822B 0xffffffffL
-#define BIT_FW_MSG5_8822B(x) \
- (((x) & BIT_MASK_FW_MSG5_8822B) << BIT_SHIFT_FW_MSG5_8822B)
-#define BIT_GET_FW_MSG5_8822B(x) \
- (((x) >> BIT_SHIFT_FW_MSG5_8822B) & BIT_MASK_FW_MSG5_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_FIFOPAGE_CTRL_1_8822B */
-
-#define BIT_SHIFT_TX_OQT_HE_FREE_SPACE_V1_8822B 16
-#define BIT_MASK_TX_OQT_HE_FREE_SPACE_V1_8822B 0xff
-#define BIT_TX_OQT_HE_FREE_SPACE_V1_8822B(x) \
- (((x) & BIT_MASK_TX_OQT_HE_FREE_SPACE_V1_8822B) \
- << BIT_SHIFT_TX_OQT_HE_FREE_SPACE_V1_8822B)
-#define BIT_GET_TX_OQT_HE_FREE_SPACE_V1_8822B(x) \
- (((x) >> BIT_SHIFT_TX_OQT_HE_FREE_SPACE_V1_8822B) & \
- BIT_MASK_TX_OQT_HE_FREE_SPACE_V1_8822B)
-
-#define BIT_SHIFT_TX_OQT_NL_FREE_SPACE_V1_8822B 0
-#define BIT_MASK_TX_OQT_NL_FREE_SPACE_V1_8822B 0xff
-#define BIT_TX_OQT_NL_FREE_SPACE_V1_8822B(x) \
- (((x) & BIT_MASK_TX_OQT_NL_FREE_SPACE_V1_8822B) \
- << BIT_SHIFT_TX_OQT_NL_FREE_SPACE_V1_8822B)
-#define BIT_GET_TX_OQT_NL_FREE_SPACE_V1_8822B(x) \
- (((x) >> BIT_SHIFT_TX_OQT_NL_FREE_SPACE_V1_8822B) & \
- BIT_MASK_TX_OQT_NL_FREE_SPACE_V1_8822B)
-
-/* 2 REG_FIFOPAGE_CTRL_2_8822B */
-#define BIT_BCN_VALID_1_V1_8822B BIT(31)
-
-#define BIT_SHIFT_BCN_HEAD_1_V1_8822B 16
-#define BIT_MASK_BCN_HEAD_1_V1_8822B 0xfff
-#define BIT_BCN_HEAD_1_V1_8822B(x) \
- (((x) & BIT_MASK_BCN_HEAD_1_V1_8822B) << BIT_SHIFT_BCN_HEAD_1_V1_8822B)
-#define BIT_GET_BCN_HEAD_1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_BCN_HEAD_1_V1_8822B) & BIT_MASK_BCN_HEAD_1_V1_8822B)
-
-#define BIT_BCN_VALID_V1_8822B BIT(15)
-
-#define BIT_SHIFT_BCN_HEAD_V1_8822B 0
-#define BIT_MASK_BCN_HEAD_V1_8822B 0xfff
-#define BIT_BCN_HEAD_V1_8822B(x) \
- (((x) & BIT_MASK_BCN_HEAD_V1_8822B) << BIT_SHIFT_BCN_HEAD_V1_8822B)
-#define BIT_GET_BCN_HEAD_V1_8822B(x) \
- (((x) >> BIT_SHIFT_BCN_HEAD_V1_8822B) & BIT_MASK_BCN_HEAD_V1_8822B)
-
-/* 2 REG_AUTO_LLT_V1_8822B */
-
-#define BIT_SHIFT_MAX_TX_PKT_FOR_USB_AND_SDIO_V1_8822B 24
-#define BIT_MASK_MAX_TX_PKT_FOR_USB_AND_SDIO_V1_8822B 0xff
-#define BIT_MAX_TX_PKT_FOR_USB_AND_SDIO_V1_8822B(x) \
- (((x) & BIT_MASK_MAX_TX_PKT_FOR_USB_AND_SDIO_V1_8822B) \
- << BIT_SHIFT_MAX_TX_PKT_FOR_USB_AND_SDIO_V1_8822B)
-#define BIT_GET_MAX_TX_PKT_FOR_USB_AND_SDIO_V1_8822B(x) \
- (((x) >> BIT_SHIFT_MAX_TX_PKT_FOR_USB_AND_SDIO_V1_8822B) & \
- BIT_MASK_MAX_TX_PKT_FOR_USB_AND_SDIO_V1_8822B)
-
-#define BIT_SHIFT_LLT_FREE_PAGE_V1_8822B 8
-#define BIT_MASK_LLT_FREE_PAGE_V1_8822B 0xffff
-#define BIT_LLT_FREE_PAGE_V1_8822B(x) \
- (((x) & BIT_MASK_LLT_FREE_PAGE_V1_8822B) \
- << BIT_SHIFT_LLT_FREE_PAGE_V1_8822B)
-#define BIT_GET_LLT_FREE_PAGE_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LLT_FREE_PAGE_V1_8822B) & \
- BIT_MASK_LLT_FREE_PAGE_V1_8822B)
-
-#define BIT_SHIFT_BLK_DESC_NUM_8822B 4
-#define BIT_MASK_BLK_DESC_NUM_8822B 0xf
-#define BIT_BLK_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_BLK_DESC_NUM_8822B) << BIT_SHIFT_BLK_DESC_NUM_8822B)
-#define BIT_GET_BLK_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_BLK_DESC_NUM_8822B) & BIT_MASK_BLK_DESC_NUM_8822B)
-
-#define BIT_R_BCN_HEAD_SEL_8822B BIT(3)
-#define BIT_R_EN_BCN_SW_HEAD_SEL_8822B BIT(2)
-#define BIT_LLT_DBG_SEL_8822B BIT(1)
-#define BIT_AUTO_INIT_LLT_V1_8822B BIT(0)
-
-/* 2 REG_TXDMA_OFFSET_CHK_8822B */
-#define BIT_EM_CHKSUM_FIN_8822B BIT(31)
-#define BIT_EMN_PCIE_DMA_MOD_8822B BIT(30)
-#define BIT_EN_TXQUE_CLR_8822B BIT(29)
-#define BIT_EN_PCIE_FIFO_MODE_8822B BIT(28)
-
-#define BIT_SHIFT_PG_UNDER_TH_V1_8822B 16
-#define BIT_MASK_PG_UNDER_TH_V1_8822B 0xfff
-#define BIT_PG_UNDER_TH_V1_8822B(x) \
- (((x) & BIT_MASK_PG_UNDER_TH_V1_8822B) \
- << BIT_SHIFT_PG_UNDER_TH_V1_8822B)
-#define BIT_GET_PG_UNDER_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_PG_UNDER_TH_V1_8822B) & \
- BIT_MASK_PG_UNDER_TH_V1_8822B)
-
-#define BIT_RESTORE_H2C_ADDRESS_8822B BIT(15)
-#define BIT_SDIO_TXDESC_CHKSUM_EN_8822B BIT(13)
-#define BIT_RST_RDPTR_8822B BIT(12)
-#define BIT_RST_WRPTR_8822B BIT(11)
-#define BIT_CHK_PG_TH_EN_8822B BIT(10)
-#define BIT_DROP_DATA_EN_8822B BIT(9)
-#define BIT_CHECK_OFFSET_EN_8822B BIT(8)
-
-#define BIT_SHIFT_CHECK_OFFSET_8822B 0
-#define BIT_MASK_CHECK_OFFSET_8822B 0xff
-#define BIT_CHECK_OFFSET_8822B(x) \
- (((x) & BIT_MASK_CHECK_OFFSET_8822B) << BIT_SHIFT_CHECK_OFFSET_8822B)
-#define BIT_GET_CHECK_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_CHECK_OFFSET_8822B) & BIT_MASK_CHECK_OFFSET_8822B)
-
-/* 2 REG_TXDMA_STATUS_8822B */
-#define BIT_HI_OQT_UDN_8822B BIT(17)
-#define BIT_HI_OQT_OVF_8822B BIT(16)
-#define BIT_PAYLOAD_CHKSUM_ERR_8822B BIT(15)
-#define BIT_PAYLOAD_UDN_8822B BIT(14)
-#define BIT_PAYLOAD_OVF_8822B BIT(13)
-#define BIT_DSC_CHKSUM_FAIL_8822B BIT(12)
-#define BIT_UNKNOWN_QSEL_8822B BIT(11)
-#define BIT_EP_QSEL_DIFF_8822B BIT(10)
-#define BIT_TX_OFFS_UNMATCH_8822B BIT(9)
-#define BIT_TXOQT_UDN_8822B BIT(8)
-#define BIT_TXOQT_OVF_8822B BIT(7)
-#define BIT_TXDMA_SFF_UDN_8822B BIT(6)
-#define BIT_TXDMA_SFF_OVF_8822B BIT(5)
-#define BIT_LLT_NULL_PG_8822B BIT(4)
-#define BIT_PAGE_UDN_8822B BIT(3)
-#define BIT_PAGE_OVF_8822B BIT(2)
-#define BIT_TXFF_PG_UDN_8822B BIT(1)
-#define BIT_TXFF_PG_OVF_8822B BIT(0)
-
-/* 2 REG_TX_DMA_DBG_8822B */
-
-/* 2 REG_TQPNT1_8822B */
-
-#define BIT_SHIFT_HPQ_HIGH_TH_V1_8822B 16
-#define BIT_MASK_HPQ_HIGH_TH_V1_8822B 0xfff
-#define BIT_HPQ_HIGH_TH_V1_8822B(x) \
- (((x) & BIT_MASK_HPQ_HIGH_TH_V1_8822B) \
- << BIT_SHIFT_HPQ_HIGH_TH_V1_8822B)
-#define BIT_GET_HPQ_HIGH_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HPQ_HIGH_TH_V1_8822B) & \
- BIT_MASK_HPQ_HIGH_TH_V1_8822B)
-
-#define BIT_SHIFT_HPQ_LOW_TH_V1_8822B 0
-#define BIT_MASK_HPQ_LOW_TH_V1_8822B 0xfff
-#define BIT_HPQ_LOW_TH_V1_8822B(x) \
- (((x) & BIT_MASK_HPQ_LOW_TH_V1_8822B) << BIT_SHIFT_HPQ_LOW_TH_V1_8822B)
-#define BIT_GET_HPQ_LOW_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HPQ_LOW_TH_V1_8822B) & BIT_MASK_HPQ_LOW_TH_V1_8822B)
-
-/* 2 REG_TQPNT2_8822B */
-
-#define BIT_SHIFT_NPQ_HIGH_TH_V1_8822B 16
-#define BIT_MASK_NPQ_HIGH_TH_V1_8822B 0xfff
-#define BIT_NPQ_HIGH_TH_V1_8822B(x) \
- (((x) & BIT_MASK_NPQ_HIGH_TH_V1_8822B) \
- << BIT_SHIFT_NPQ_HIGH_TH_V1_8822B)
-#define BIT_GET_NPQ_HIGH_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_NPQ_HIGH_TH_V1_8822B) & \
- BIT_MASK_NPQ_HIGH_TH_V1_8822B)
-
-#define BIT_SHIFT_NPQ_LOW_TH_V1_8822B 0
-#define BIT_MASK_NPQ_LOW_TH_V1_8822B 0xfff
-#define BIT_NPQ_LOW_TH_V1_8822B(x) \
- (((x) & BIT_MASK_NPQ_LOW_TH_V1_8822B) << BIT_SHIFT_NPQ_LOW_TH_V1_8822B)
-#define BIT_GET_NPQ_LOW_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_NPQ_LOW_TH_V1_8822B) & BIT_MASK_NPQ_LOW_TH_V1_8822B)
-
-/* 2 REG_TQPNT3_8822B */
-
-#define BIT_SHIFT_LPQ_HIGH_TH_V1_8822B 16
-#define BIT_MASK_LPQ_HIGH_TH_V1_8822B 0xfff
-#define BIT_LPQ_HIGH_TH_V1_8822B(x) \
- (((x) & BIT_MASK_LPQ_HIGH_TH_V1_8822B) \
- << BIT_SHIFT_LPQ_HIGH_TH_V1_8822B)
-#define BIT_GET_LPQ_HIGH_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LPQ_HIGH_TH_V1_8822B) & \
- BIT_MASK_LPQ_HIGH_TH_V1_8822B)
-
-#define BIT_SHIFT_LPQ_LOW_TH_V1_8822B 0
-#define BIT_MASK_LPQ_LOW_TH_V1_8822B 0xfff
-#define BIT_LPQ_LOW_TH_V1_8822B(x) \
- (((x) & BIT_MASK_LPQ_LOW_TH_V1_8822B) << BIT_SHIFT_LPQ_LOW_TH_V1_8822B)
-#define BIT_GET_LPQ_LOW_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LPQ_LOW_TH_V1_8822B) & BIT_MASK_LPQ_LOW_TH_V1_8822B)
-
-/* 2 REG_TQPNT4_8822B */
-
-#define BIT_SHIFT_EXQ_HIGH_TH_V1_8822B 16
-#define BIT_MASK_EXQ_HIGH_TH_V1_8822B 0xfff
-#define BIT_EXQ_HIGH_TH_V1_8822B(x) \
- (((x) & BIT_MASK_EXQ_HIGH_TH_V1_8822B) \
- << BIT_SHIFT_EXQ_HIGH_TH_V1_8822B)
-#define BIT_GET_EXQ_HIGH_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_EXQ_HIGH_TH_V1_8822B) & \
- BIT_MASK_EXQ_HIGH_TH_V1_8822B)
-
-#define BIT_SHIFT_EXQ_LOW_TH_V1_8822B 0
-#define BIT_MASK_EXQ_LOW_TH_V1_8822B 0xfff
-#define BIT_EXQ_LOW_TH_V1_8822B(x) \
- (((x) & BIT_MASK_EXQ_LOW_TH_V1_8822B) << BIT_SHIFT_EXQ_LOW_TH_V1_8822B)
-#define BIT_GET_EXQ_LOW_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_EXQ_LOW_TH_V1_8822B) & BIT_MASK_EXQ_LOW_TH_V1_8822B)
-
-/* 2 REG_RQPN_CTRL_1_8822B */
-
-#define BIT_SHIFT_TXPKTNUM_H_8822B 16
-#define BIT_MASK_TXPKTNUM_H_8822B 0xffff
-#define BIT_TXPKTNUM_H_8822B(x) \
- (((x) & BIT_MASK_TXPKTNUM_H_8822B) << BIT_SHIFT_TXPKTNUM_H_8822B)
-#define BIT_GET_TXPKTNUM_H_8822B(x) \
- (((x) >> BIT_SHIFT_TXPKTNUM_H_8822B) & BIT_MASK_TXPKTNUM_H_8822B)
-
-#define BIT_SHIFT_TXPKTNUM_V2_8822B 0
-#define BIT_MASK_TXPKTNUM_V2_8822B 0xffff
-#define BIT_TXPKTNUM_V2_8822B(x) \
- (((x) & BIT_MASK_TXPKTNUM_V2_8822B) << BIT_SHIFT_TXPKTNUM_V2_8822B)
-#define BIT_GET_TXPKTNUM_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TXPKTNUM_V2_8822B) & BIT_MASK_TXPKTNUM_V2_8822B)
-
-/* 2 REG_RQPN_CTRL_2_8822B */
-#define BIT_LD_RQPN_8822B BIT(31)
-#define BIT_EXQ_PUBLIC_DIS_V1_8822B BIT(19)
-#define BIT_NPQ_PUBLIC_DIS_V1_8822B BIT(18)
-#define BIT_LPQ_PUBLIC_DIS_V1_8822B BIT(17)
-#define BIT_HPQ_PUBLIC_DIS_V1_8822B BIT(16)
-
-/* 2 REG_FIFOPAGE_INFO_1_8822B */
-
-#define BIT_SHIFT_HPQ_AVAL_PG_V1_8822B 16
-#define BIT_MASK_HPQ_AVAL_PG_V1_8822B 0xfff
-#define BIT_HPQ_AVAL_PG_V1_8822B(x) \
- (((x) & BIT_MASK_HPQ_AVAL_PG_V1_8822B) \
- << BIT_SHIFT_HPQ_AVAL_PG_V1_8822B)
-#define BIT_GET_HPQ_AVAL_PG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HPQ_AVAL_PG_V1_8822B) & \
- BIT_MASK_HPQ_AVAL_PG_V1_8822B)
-
-#define BIT_SHIFT_HPQ_V1_8822B 0
-#define BIT_MASK_HPQ_V1_8822B 0xfff
-#define BIT_HPQ_V1_8822B(x) \
- (((x) & BIT_MASK_HPQ_V1_8822B) << BIT_SHIFT_HPQ_V1_8822B)
-#define BIT_GET_HPQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HPQ_V1_8822B) & BIT_MASK_HPQ_V1_8822B)
-
-/* 2 REG_FIFOPAGE_INFO_2_8822B */
-
-#define BIT_SHIFT_LPQ_AVAL_PG_V1_8822B 16
-#define BIT_MASK_LPQ_AVAL_PG_V1_8822B 0xfff
-#define BIT_LPQ_AVAL_PG_V1_8822B(x) \
- (((x) & BIT_MASK_LPQ_AVAL_PG_V1_8822B) \
- << BIT_SHIFT_LPQ_AVAL_PG_V1_8822B)
-#define BIT_GET_LPQ_AVAL_PG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LPQ_AVAL_PG_V1_8822B) & \
- BIT_MASK_LPQ_AVAL_PG_V1_8822B)
-
-#define BIT_SHIFT_LPQ_V1_8822B 0
-#define BIT_MASK_LPQ_V1_8822B 0xfff
-#define BIT_LPQ_V1_8822B(x) \
- (((x) & BIT_MASK_LPQ_V1_8822B) << BIT_SHIFT_LPQ_V1_8822B)
-#define BIT_GET_LPQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LPQ_V1_8822B) & BIT_MASK_LPQ_V1_8822B)
-
-/* 2 REG_FIFOPAGE_INFO_3_8822B */
-
-#define BIT_SHIFT_NPQ_AVAL_PG_V1_8822B 16
-#define BIT_MASK_NPQ_AVAL_PG_V1_8822B 0xfff
-#define BIT_NPQ_AVAL_PG_V1_8822B(x) \
- (((x) & BIT_MASK_NPQ_AVAL_PG_V1_8822B) \
- << BIT_SHIFT_NPQ_AVAL_PG_V1_8822B)
-#define BIT_GET_NPQ_AVAL_PG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_NPQ_AVAL_PG_V1_8822B) & \
- BIT_MASK_NPQ_AVAL_PG_V1_8822B)
-
-#define BIT_SHIFT_NPQ_V1_8822B 0
-#define BIT_MASK_NPQ_V1_8822B 0xfff
-#define BIT_NPQ_V1_8822B(x) \
- (((x) & BIT_MASK_NPQ_V1_8822B) << BIT_SHIFT_NPQ_V1_8822B)
-#define BIT_GET_NPQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_NPQ_V1_8822B) & BIT_MASK_NPQ_V1_8822B)
-
-/* 2 REG_FIFOPAGE_INFO_4_8822B */
-
-#define BIT_SHIFT_EXQ_AVAL_PG_V1_8822B 16
-#define BIT_MASK_EXQ_AVAL_PG_V1_8822B 0xfff
-#define BIT_EXQ_AVAL_PG_V1_8822B(x) \
- (((x) & BIT_MASK_EXQ_AVAL_PG_V1_8822B) \
- << BIT_SHIFT_EXQ_AVAL_PG_V1_8822B)
-#define BIT_GET_EXQ_AVAL_PG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_EXQ_AVAL_PG_V1_8822B) & \
- BIT_MASK_EXQ_AVAL_PG_V1_8822B)
-
-#define BIT_SHIFT_EXQ_V1_8822B 0
-#define BIT_MASK_EXQ_V1_8822B 0xfff
-#define BIT_EXQ_V1_8822B(x) \
- (((x) & BIT_MASK_EXQ_V1_8822B) << BIT_SHIFT_EXQ_V1_8822B)
-#define BIT_GET_EXQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_EXQ_V1_8822B) & BIT_MASK_EXQ_V1_8822B)
-
-/* 2 REG_FIFOPAGE_INFO_5_8822B */
-
-#define BIT_SHIFT_PUBQ_AVAL_PG_V1_8822B 16
-#define BIT_MASK_PUBQ_AVAL_PG_V1_8822B 0xfff
-#define BIT_PUBQ_AVAL_PG_V1_8822B(x) \
- (((x) & BIT_MASK_PUBQ_AVAL_PG_V1_8822B) \
- << BIT_SHIFT_PUBQ_AVAL_PG_V1_8822B)
-#define BIT_GET_PUBQ_AVAL_PG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_PUBQ_AVAL_PG_V1_8822B) & \
- BIT_MASK_PUBQ_AVAL_PG_V1_8822B)
-
-#define BIT_SHIFT_PUBQ_V1_8822B 0
-#define BIT_MASK_PUBQ_V1_8822B 0xfff
-#define BIT_PUBQ_V1_8822B(x) \
- (((x) & BIT_MASK_PUBQ_V1_8822B) << BIT_SHIFT_PUBQ_V1_8822B)
-#define BIT_GET_PUBQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_PUBQ_V1_8822B) & BIT_MASK_PUBQ_V1_8822B)
-
-/* 2 REG_H2C_HEAD_8822B */
-
-#define BIT_SHIFT_H2C_HEAD_8822B 0
-#define BIT_MASK_H2C_HEAD_8822B 0x3ffff
-#define BIT_H2C_HEAD_8822B(x) \
- (((x) & BIT_MASK_H2C_HEAD_8822B) << BIT_SHIFT_H2C_HEAD_8822B)
-#define BIT_GET_H2C_HEAD_8822B(x) \
- (((x) >> BIT_SHIFT_H2C_HEAD_8822B) & BIT_MASK_H2C_HEAD_8822B)
-
-/* 2 REG_H2C_TAIL_8822B */
-
-#define BIT_SHIFT_H2C_TAIL_8822B 0
-#define BIT_MASK_H2C_TAIL_8822B 0x3ffff
-#define BIT_H2C_TAIL_8822B(x) \
- (((x) & BIT_MASK_H2C_TAIL_8822B) << BIT_SHIFT_H2C_TAIL_8822B)
-#define BIT_GET_H2C_TAIL_8822B(x) \
- (((x) >> BIT_SHIFT_H2C_TAIL_8822B) & BIT_MASK_H2C_TAIL_8822B)
-
-/* 2 REG_H2C_READ_ADDR_8822B */
-
-#define BIT_SHIFT_H2C_READ_ADDR_8822B 0
-#define BIT_MASK_H2C_READ_ADDR_8822B 0x3ffff
-#define BIT_H2C_READ_ADDR_8822B(x) \
- (((x) & BIT_MASK_H2C_READ_ADDR_8822B) << BIT_SHIFT_H2C_READ_ADDR_8822B)
-#define BIT_GET_H2C_READ_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_H2C_READ_ADDR_8822B) & BIT_MASK_H2C_READ_ADDR_8822B)
-
-/* 2 REG_H2C_WR_ADDR_8822B */
-
-#define BIT_SHIFT_H2C_WR_ADDR_8822B 0
-#define BIT_MASK_H2C_WR_ADDR_8822B 0x3ffff
-#define BIT_H2C_WR_ADDR_8822B(x) \
- (((x) & BIT_MASK_H2C_WR_ADDR_8822B) << BIT_SHIFT_H2C_WR_ADDR_8822B)
-#define BIT_GET_H2C_WR_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_H2C_WR_ADDR_8822B) & BIT_MASK_H2C_WR_ADDR_8822B)
-
-/* 2 REG_H2C_INFO_8822B */
-#define BIT_H2C_SPACE_VLD_8822B BIT(3)
-#define BIT_H2C_WR_ADDR_RST_8822B BIT(2)
-
-#define BIT_SHIFT_H2C_LEN_SEL_8822B 0
-#define BIT_MASK_H2C_LEN_SEL_8822B 0x3
-#define BIT_H2C_LEN_SEL_8822B(x) \
- (((x) & BIT_MASK_H2C_LEN_SEL_8822B) << BIT_SHIFT_H2C_LEN_SEL_8822B)
-#define BIT_GET_H2C_LEN_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_H2C_LEN_SEL_8822B) & BIT_MASK_H2C_LEN_SEL_8822B)
-
-/* 2 REG_RXDMA_AGG_PG_TH_8822B */
-
-#define BIT_SHIFT_RXDMA_AGG_OLD_MOD_8822B 24
-#define BIT_MASK_RXDMA_AGG_OLD_MOD_8822B 0xff
-#define BIT_RXDMA_AGG_OLD_MOD_8822B(x) \
- (((x) & BIT_MASK_RXDMA_AGG_OLD_MOD_8822B) \
- << BIT_SHIFT_RXDMA_AGG_OLD_MOD_8822B)
-#define BIT_GET_RXDMA_AGG_OLD_MOD_8822B(x) \
- (((x) >> BIT_SHIFT_RXDMA_AGG_OLD_MOD_8822B) & \
- BIT_MASK_RXDMA_AGG_OLD_MOD_8822B)
-
-#define BIT_SHIFT_PKT_NUM_WOL_8822B 16
-#define BIT_MASK_PKT_NUM_WOL_8822B 0xff
-#define BIT_PKT_NUM_WOL_8822B(x) \
- (((x) & BIT_MASK_PKT_NUM_WOL_8822B) << BIT_SHIFT_PKT_NUM_WOL_8822B)
-#define BIT_GET_PKT_NUM_WOL_8822B(x) \
- (((x) >> BIT_SHIFT_PKT_NUM_WOL_8822B) & BIT_MASK_PKT_NUM_WOL_8822B)
-
-#define BIT_SHIFT_DMA_AGG_TO_8822B 8
-#define BIT_MASK_DMA_AGG_TO_8822B 0xf
-#define BIT_DMA_AGG_TO_8822B(x) \
- (((x) & BIT_MASK_DMA_AGG_TO_8822B) << BIT_SHIFT_DMA_AGG_TO_8822B)
-#define BIT_GET_DMA_AGG_TO_8822B(x) \
- (((x) >> BIT_SHIFT_DMA_AGG_TO_8822B) & BIT_MASK_DMA_AGG_TO_8822B)
-
-#define BIT_SHIFT_RXDMA_AGG_PG_TH_V1_8822B 0
-#define BIT_MASK_RXDMA_AGG_PG_TH_V1_8822B 0xf
-#define BIT_RXDMA_AGG_PG_TH_V1_8822B(x) \
- (((x) & BIT_MASK_RXDMA_AGG_PG_TH_V1_8822B) \
- << BIT_SHIFT_RXDMA_AGG_PG_TH_V1_8822B)
-#define BIT_GET_RXDMA_AGG_PG_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_RXDMA_AGG_PG_TH_V1_8822B) & \
- BIT_MASK_RXDMA_AGG_PG_TH_V1_8822B)
-
-/* 2 REG_RXPKT_NUM_8822B */
-
-#define BIT_SHIFT_RXPKT_NUM_8822B 24
-#define BIT_MASK_RXPKT_NUM_8822B 0xff
-#define BIT_RXPKT_NUM_8822B(x) \
- (((x) & BIT_MASK_RXPKT_NUM_8822B) << BIT_SHIFT_RXPKT_NUM_8822B)
-#define BIT_GET_RXPKT_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_RXPKT_NUM_8822B) & BIT_MASK_RXPKT_NUM_8822B)
-
-#define BIT_SHIFT_FW_UPD_RDPTR19_TO_16_8822B 20
-#define BIT_MASK_FW_UPD_RDPTR19_TO_16_8822B 0xf
-#define BIT_FW_UPD_RDPTR19_TO_16_8822B(x) \
- (((x) & BIT_MASK_FW_UPD_RDPTR19_TO_16_8822B) \
- << BIT_SHIFT_FW_UPD_RDPTR19_TO_16_8822B)
-#define BIT_GET_FW_UPD_RDPTR19_TO_16_8822B(x) \
- (((x) >> BIT_SHIFT_FW_UPD_RDPTR19_TO_16_8822B) & \
- BIT_MASK_FW_UPD_RDPTR19_TO_16_8822B)
-
-#define BIT_RXDMA_REQ_8822B BIT(19)
-#define BIT_RW_RELEASE_EN_8822B BIT(18)
-#define BIT_RXDMA_IDLE_8822B BIT(17)
-#define BIT_RXPKT_RELEASE_POLL_8822B BIT(16)
-
-#define BIT_SHIFT_FW_UPD_RDPTR_8822B 0
-#define BIT_MASK_FW_UPD_RDPTR_8822B 0xffff
-#define BIT_FW_UPD_RDPTR_8822B(x) \
- (((x) & BIT_MASK_FW_UPD_RDPTR_8822B) << BIT_SHIFT_FW_UPD_RDPTR_8822B)
-#define BIT_GET_FW_UPD_RDPTR_8822B(x) \
- (((x) >> BIT_SHIFT_FW_UPD_RDPTR_8822B) & BIT_MASK_FW_UPD_RDPTR_8822B)
-
-/* 2 REG_RXDMA_STATUS_8822B */
-#define BIT_C2H_PKT_OVF_8822B BIT(7)
-#define BIT_AGG_CONFGI_ISSUE_8822B BIT(6)
-#define BIT_FW_POLL_ISSUE_8822B BIT(5)
-#define BIT_RX_DATA_UDN_8822B BIT(4)
-#define BIT_RX_SFF_UDN_8822B BIT(3)
-#define BIT_RX_SFF_OVF_8822B BIT(2)
-#define BIT_RXPKT_OVF_8822B BIT(0)
-
-/* 2 REG_RXDMA_DPR_8822B */
-
-#define BIT_SHIFT_RDE_DEBUG_8822B 0
-#define BIT_MASK_RDE_DEBUG_8822B 0xffffffffL
-#define BIT_RDE_DEBUG_8822B(x) \
- (((x) & BIT_MASK_RDE_DEBUG_8822B) << BIT_SHIFT_RDE_DEBUG_8822B)
-#define BIT_GET_RDE_DEBUG_8822B(x) \
- (((x) >> BIT_SHIFT_RDE_DEBUG_8822B) & BIT_MASK_RDE_DEBUG_8822B)
-
-/* 2 REG_RXDMA_MODE_8822B */
-
-#define BIT_SHIFT_PKTNUM_TH_V2_8822B 24
-#define BIT_MASK_PKTNUM_TH_V2_8822B 0x1f
-#define BIT_PKTNUM_TH_V2_8822B(x) \
- (((x) & BIT_MASK_PKTNUM_TH_V2_8822B) << BIT_SHIFT_PKTNUM_TH_V2_8822B)
-#define BIT_GET_PKTNUM_TH_V2_8822B(x) \
- (((x) >> BIT_SHIFT_PKTNUM_TH_V2_8822B) & BIT_MASK_PKTNUM_TH_V2_8822B)
-
-#define BIT_TXBA_BREAK_USBAGG_8822B BIT(23)
-
-#define BIT_SHIFT_PKTLEN_PARA_8822B 16
-#define BIT_MASK_PKTLEN_PARA_8822B 0x7
-#define BIT_PKTLEN_PARA_8822B(x) \
- (((x) & BIT_MASK_PKTLEN_PARA_8822B) << BIT_SHIFT_PKTLEN_PARA_8822B)
-#define BIT_GET_PKTLEN_PARA_8822B(x) \
- (((x) >> BIT_SHIFT_PKTLEN_PARA_8822B) & BIT_MASK_PKTLEN_PARA_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_BURST_SIZE_8822B 4
-#define BIT_MASK_BURST_SIZE_8822B 0x3
-#define BIT_BURST_SIZE_8822B(x) \
- (((x) & BIT_MASK_BURST_SIZE_8822B) << BIT_SHIFT_BURST_SIZE_8822B)
-#define BIT_GET_BURST_SIZE_8822B(x) \
- (((x) >> BIT_SHIFT_BURST_SIZE_8822B) & BIT_MASK_BURST_SIZE_8822B)
-
-#define BIT_SHIFT_BURST_CNT_8822B 2
-#define BIT_MASK_BURST_CNT_8822B 0x3
-#define BIT_BURST_CNT_8822B(x) \
- (((x) & BIT_MASK_BURST_CNT_8822B) << BIT_SHIFT_BURST_CNT_8822B)
-#define BIT_GET_BURST_CNT_8822B(x) \
- (((x) >> BIT_SHIFT_BURST_CNT_8822B) & BIT_MASK_BURST_CNT_8822B)
-
-#define BIT_DMA_MODE_8822B BIT(1)
-
-/* 2 REG_C2H_PKT_8822B */
-
-#define BIT_SHIFT_R_C2H_STR_ADDR_16_TO_19_8822B 24
-#define BIT_MASK_R_C2H_STR_ADDR_16_TO_19_8822B 0xf
-#define BIT_R_C2H_STR_ADDR_16_TO_19_8822B(x) \
- (((x) & BIT_MASK_R_C2H_STR_ADDR_16_TO_19_8822B) \
- << BIT_SHIFT_R_C2H_STR_ADDR_16_TO_19_8822B)
-#define BIT_GET_R_C2H_STR_ADDR_16_TO_19_8822B(x) \
- (((x) >> BIT_SHIFT_R_C2H_STR_ADDR_16_TO_19_8822B) & \
- BIT_MASK_R_C2H_STR_ADDR_16_TO_19_8822B)
-
-#define BIT_R_C2H_PKT_REQ_8822B BIT(16)
-
-#define BIT_SHIFT_R_C2H_STR_ADDR_8822B 0
-#define BIT_MASK_R_C2H_STR_ADDR_8822B 0xffff
-#define BIT_R_C2H_STR_ADDR_8822B(x) \
- (((x) & BIT_MASK_R_C2H_STR_ADDR_8822B) \
- << BIT_SHIFT_R_C2H_STR_ADDR_8822B)
-#define BIT_GET_R_C2H_STR_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_R_C2H_STR_ADDR_8822B) & \
- BIT_MASK_R_C2H_STR_ADDR_8822B)
-
-/* 2 REG_FWFF_C2H_8822B */
-
-#define BIT_SHIFT_C2H_DMA_ADDR_8822B 0
-#define BIT_MASK_C2H_DMA_ADDR_8822B 0x3ffff
-#define BIT_C2H_DMA_ADDR_8822B(x) \
- (((x) & BIT_MASK_C2H_DMA_ADDR_8822B) << BIT_SHIFT_C2H_DMA_ADDR_8822B)
-#define BIT_GET_C2H_DMA_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_C2H_DMA_ADDR_8822B) & BIT_MASK_C2H_DMA_ADDR_8822B)
-
-/* 2 REG_FWFF_CTRL_8822B */
-#define BIT_FWFF_DMAPKT_REQ_8822B BIT(31)
-
-#define BIT_SHIFT_FWFF_DMA_PKT_NUM_8822B 16
-#define BIT_MASK_FWFF_DMA_PKT_NUM_8822B 0xff
-#define BIT_FWFF_DMA_PKT_NUM_8822B(x) \
- (((x) & BIT_MASK_FWFF_DMA_PKT_NUM_8822B) \
- << BIT_SHIFT_FWFF_DMA_PKT_NUM_8822B)
-#define BIT_GET_FWFF_DMA_PKT_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_FWFF_DMA_PKT_NUM_8822B) & \
- BIT_MASK_FWFF_DMA_PKT_NUM_8822B)
-
-#define BIT_SHIFT_FWFF_STR_ADDR_8822B 0
-#define BIT_MASK_FWFF_STR_ADDR_8822B 0xffff
-#define BIT_FWFF_STR_ADDR_8822B(x) \
- (((x) & BIT_MASK_FWFF_STR_ADDR_8822B) << BIT_SHIFT_FWFF_STR_ADDR_8822B)
-#define BIT_GET_FWFF_STR_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_FWFF_STR_ADDR_8822B) & BIT_MASK_FWFF_STR_ADDR_8822B)
-
-/* 2 REG_FWFF_PKT_INFO_8822B */
-
-#define BIT_SHIFT_FWFF_PKT_QUEUED_8822B 16
-#define BIT_MASK_FWFF_PKT_QUEUED_8822B 0xff
-#define BIT_FWFF_PKT_QUEUED_8822B(x) \
- (((x) & BIT_MASK_FWFF_PKT_QUEUED_8822B) \
- << BIT_SHIFT_FWFF_PKT_QUEUED_8822B)
-#define BIT_GET_FWFF_PKT_QUEUED_8822B(x) \
- (((x) >> BIT_SHIFT_FWFF_PKT_QUEUED_8822B) & \
- BIT_MASK_FWFF_PKT_QUEUED_8822B)
-
-#define BIT_SHIFT_FWFF_PKT_STR_ADDR_8822B 0
-#define BIT_MASK_FWFF_PKT_STR_ADDR_8822B 0xffff
-#define BIT_FWFF_PKT_STR_ADDR_8822B(x) \
- (((x) & BIT_MASK_FWFF_PKT_STR_ADDR_8822B) \
- << BIT_SHIFT_FWFF_PKT_STR_ADDR_8822B)
-#define BIT_GET_FWFF_PKT_STR_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_FWFF_PKT_STR_ADDR_8822B) & \
- BIT_MASK_FWFF_PKT_STR_ADDR_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_DDMA_CH0SA_8822B */
-
-#define BIT_SHIFT_DDMACH0_SA_8822B 0
-#define BIT_MASK_DDMACH0_SA_8822B 0xffffffffL
-#define BIT_DDMACH0_SA_8822B(x) \
- (((x) & BIT_MASK_DDMACH0_SA_8822B) << BIT_SHIFT_DDMACH0_SA_8822B)
-#define BIT_GET_DDMACH0_SA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH0_SA_8822B) & BIT_MASK_DDMACH0_SA_8822B)
-
-/* 2 REG_DDMA_CH0DA_8822B */
-
-#define BIT_SHIFT_DDMACH0_DA_8822B 0
-#define BIT_MASK_DDMACH0_DA_8822B 0xffffffffL
-#define BIT_DDMACH0_DA_8822B(x) \
- (((x) & BIT_MASK_DDMACH0_DA_8822B) << BIT_SHIFT_DDMACH0_DA_8822B)
-#define BIT_GET_DDMACH0_DA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH0_DA_8822B) & BIT_MASK_DDMACH0_DA_8822B)
-
-/* 2 REG_DDMA_CH0CTRL_8822B */
-#define BIT_DDMACH0_OWN_8822B BIT(31)
-#define BIT_DDMACH0_CHKSUM_EN_8822B BIT(29)
-#define BIT_DDMACH0_DA_W_DISABLE_8822B BIT(28)
-#define BIT_DDMACH0_CHKSUM_STS_8822B BIT(27)
-#define BIT_DDMACH0_DDMA_MODE_8822B BIT(26)
-#define BIT_DDMACH0_RESET_CHKSUM_STS_8822B BIT(25)
-#define BIT_DDMACH0_CHKSUM_CONT_8822B BIT(24)
-
-#define BIT_SHIFT_DDMACH0_DLEN_8822B 0
-#define BIT_MASK_DDMACH0_DLEN_8822B 0x3ffff
-#define BIT_DDMACH0_DLEN_8822B(x) \
- (((x) & BIT_MASK_DDMACH0_DLEN_8822B) << BIT_SHIFT_DDMACH0_DLEN_8822B)
-#define BIT_GET_DDMACH0_DLEN_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH0_DLEN_8822B) & BIT_MASK_DDMACH0_DLEN_8822B)
-
-/* 2 REG_DDMA_CH1SA_8822B */
-
-#define BIT_SHIFT_DDMACH1_SA_8822B 0
-#define BIT_MASK_DDMACH1_SA_8822B 0xffffffffL
-#define BIT_DDMACH1_SA_8822B(x) \
- (((x) & BIT_MASK_DDMACH1_SA_8822B) << BIT_SHIFT_DDMACH1_SA_8822B)
-#define BIT_GET_DDMACH1_SA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH1_SA_8822B) & BIT_MASK_DDMACH1_SA_8822B)
-
-/* 2 REG_DDMA_CH1DA_8822B */
-
-#define BIT_SHIFT_DDMACH1_DA_8822B 0
-#define BIT_MASK_DDMACH1_DA_8822B 0xffffffffL
-#define BIT_DDMACH1_DA_8822B(x) \
- (((x) & BIT_MASK_DDMACH1_DA_8822B) << BIT_SHIFT_DDMACH1_DA_8822B)
-#define BIT_GET_DDMACH1_DA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH1_DA_8822B) & BIT_MASK_DDMACH1_DA_8822B)
-
-/* 2 REG_DDMA_CH1CTRL_8822B */
-#define BIT_DDMACH1_OWN_8822B BIT(31)
-#define BIT_DDMACH1_CHKSUM_EN_8822B BIT(29)
-#define BIT_DDMACH1_DA_W_DISABLE_8822B BIT(28)
-#define BIT_DDMACH1_CHKSUM_STS_8822B BIT(27)
-#define BIT_DDMACH1_DDMA_MODE_8822B BIT(26)
-#define BIT_DDMACH1_RESET_CHKSUM_STS_8822B BIT(25)
-#define BIT_DDMACH1_CHKSUM_CONT_8822B BIT(24)
-
-#define BIT_SHIFT_DDMACH1_DLEN_8822B 0
-#define BIT_MASK_DDMACH1_DLEN_8822B 0x3ffff
-#define BIT_DDMACH1_DLEN_8822B(x) \
- (((x) & BIT_MASK_DDMACH1_DLEN_8822B) << BIT_SHIFT_DDMACH1_DLEN_8822B)
-#define BIT_GET_DDMACH1_DLEN_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH1_DLEN_8822B) & BIT_MASK_DDMACH1_DLEN_8822B)
-
-/* 2 REG_DDMA_CH2SA_8822B */
-
-#define BIT_SHIFT_DDMACH2_SA_8822B 0
-#define BIT_MASK_DDMACH2_SA_8822B 0xffffffffL
-#define BIT_DDMACH2_SA_8822B(x) \
- (((x) & BIT_MASK_DDMACH2_SA_8822B) << BIT_SHIFT_DDMACH2_SA_8822B)
-#define BIT_GET_DDMACH2_SA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH2_SA_8822B) & BIT_MASK_DDMACH2_SA_8822B)
-
-/* 2 REG_DDMA_CH2DA_8822B */
-
-#define BIT_SHIFT_DDMACH2_DA_8822B 0
-#define BIT_MASK_DDMACH2_DA_8822B 0xffffffffL
-#define BIT_DDMACH2_DA_8822B(x) \
- (((x) & BIT_MASK_DDMACH2_DA_8822B) << BIT_SHIFT_DDMACH2_DA_8822B)
-#define BIT_GET_DDMACH2_DA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH2_DA_8822B) & BIT_MASK_DDMACH2_DA_8822B)
-
-/* 2 REG_DDMA_CH2CTRL_8822B */
-#define BIT_DDMACH2_OWN_8822B BIT(31)
-#define BIT_DDMACH2_CHKSUM_EN_8822B BIT(29)
-#define BIT_DDMACH2_DA_W_DISABLE_8822B BIT(28)
-#define BIT_DDMACH2_CHKSUM_STS_8822B BIT(27)
-#define BIT_DDMACH2_DDMA_MODE_8822B BIT(26)
-#define BIT_DDMACH2_RESET_CHKSUM_STS_8822B BIT(25)
-#define BIT_DDMACH2_CHKSUM_CONT_8822B BIT(24)
-
-#define BIT_SHIFT_DDMACH2_DLEN_8822B 0
-#define BIT_MASK_DDMACH2_DLEN_8822B 0x3ffff
-#define BIT_DDMACH2_DLEN_8822B(x) \
- (((x) & BIT_MASK_DDMACH2_DLEN_8822B) << BIT_SHIFT_DDMACH2_DLEN_8822B)
-#define BIT_GET_DDMACH2_DLEN_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH2_DLEN_8822B) & BIT_MASK_DDMACH2_DLEN_8822B)
-
-/* 2 REG_DDMA_CH3SA_8822B */
-
-#define BIT_SHIFT_DDMACH3_SA_8822B 0
-#define BIT_MASK_DDMACH3_SA_8822B 0xffffffffL
-#define BIT_DDMACH3_SA_8822B(x) \
- (((x) & BIT_MASK_DDMACH3_SA_8822B) << BIT_SHIFT_DDMACH3_SA_8822B)
-#define BIT_GET_DDMACH3_SA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH3_SA_8822B) & BIT_MASK_DDMACH3_SA_8822B)
-
-/* 2 REG_DDMA_CH3DA_8822B */
-
-#define BIT_SHIFT_DDMACH3_DA_8822B 0
-#define BIT_MASK_DDMACH3_DA_8822B 0xffffffffL
-#define BIT_DDMACH3_DA_8822B(x) \
- (((x) & BIT_MASK_DDMACH3_DA_8822B) << BIT_SHIFT_DDMACH3_DA_8822B)
-#define BIT_GET_DDMACH3_DA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH3_DA_8822B) & BIT_MASK_DDMACH3_DA_8822B)
-
-/* 2 REG_DDMA_CH3CTRL_8822B */
-#define BIT_DDMACH3_OWN_8822B BIT(31)
-#define BIT_DDMACH3_CHKSUM_EN_8822B BIT(29)
-#define BIT_DDMACH3_DA_W_DISABLE_8822B BIT(28)
-#define BIT_DDMACH3_CHKSUM_STS_8822B BIT(27)
-#define BIT_DDMACH3_DDMA_MODE_8822B BIT(26)
-#define BIT_DDMACH3_RESET_CHKSUM_STS_8822B BIT(25)
-#define BIT_DDMACH3_CHKSUM_CONT_8822B BIT(24)
-
-#define BIT_SHIFT_DDMACH3_DLEN_8822B 0
-#define BIT_MASK_DDMACH3_DLEN_8822B 0x3ffff
-#define BIT_DDMACH3_DLEN_8822B(x) \
- (((x) & BIT_MASK_DDMACH3_DLEN_8822B) << BIT_SHIFT_DDMACH3_DLEN_8822B)
-#define BIT_GET_DDMACH3_DLEN_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH3_DLEN_8822B) & BIT_MASK_DDMACH3_DLEN_8822B)
-
-/* 2 REG_DDMA_CH4SA_8822B */
-
-#define BIT_SHIFT_DDMACH4_SA_8822B 0
-#define BIT_MASK_DDMACH4_SA_8822B 0xffffffffL
-#define BIT_DDMACH4_SA_8822B(x) \
- (((x) & BIT_MASK_DDMACH4_SA_8822B) << BIT_SHIFT_DDMACH4_SA_8822B)
-#define BIT_GET_DDMACH4_SA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH4_SA_8822B) & BIT_MASK_DDMACH4_SA_8822B)
-
-/* 2 REG_DDMA_CH4DA_8822B */
-
-#define BIT_SHIFT_DDMACH4_DA_8822B 0
-#define BIT_MASK_DDMACH4_DA_8822B 0xffffffffL
-#define BIT_DDMACH4_DA_8822B(x) \
- (((x) & BIT_MASK_DDMACH4_DA_8822B) << BIT_SHIFT_DDMACH4_DA_8822B)
-#define BIT_GET_DDMACH4_DA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH4_DA_8822B) & BIT_MASK_DDMACH4_DA_8822B)
-
-/* 2 REG_DDMA_CH4CTRL_8822B */
-#define BIT_DDMACH4_OWN_8822B BIT(31)
-#define BIT_DDMACH4_CHKSUM_EN_8822B BIT(29)
-#define BIT_DDMACH4_DA_W_DISABLE_8822B BIT(28)
-#define BIT_DDMACH4_CHKSUM_STS_8822B BIT(27)
-#define BIT_DDMACH4_DDMA_MODE_8822B BIT(26)
-#define BIT_DDMACH4_RESET_CHKSUM_STS_8822B BIT(25)
-#define BIT_DDMACH4_CHKSUM_CONT_8822B BIT(24)
-
-#define BIT_SHIFT_DDMACH4_DLEN_8822B 0
-#define BIT_MASK_DDMACH4_DLEN_8822B 0x3ffff
-#define BIT_DDMACH4_DLEN_8822B(x) \
- (((x) & BIT_MASK_DDMACH4_DLEN_8822B) << BIT_SHIFT_DDMACH4_DLEN_8822B)
-#define BIT_GET_DDMACH4_DLEN_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH4_DLEN_8822B) & BIT_MASK_DDMACH4_DLEN_8822B)
-
-/* 2 REG_DDMA_CH5SA_8822B */
-
-#define BIT_SHIFT_DDMACH5_SA_8822B 0
-#define BIT_MASK_DDMACH5_SA_8822B 0xffffffffL
-#define BIT_DDMACH5_SA_8822B(x) \
- (((x) & BIT_MASK_DDMACH5_SA_8822B) << BIT_SHIFT_DDMACH5_SA_8822B)
-#define BIT_GET_DDMACH5_SA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH5_SA_8822B) & BIT_MASK_DDMACH5_SA_8822B)
-
-/* 2 REG_DDMA_CH5DA_8822B */
-
-#define BIT_SHIFT_DDMACH5_DA_8822B 0
-#define BIT_MASK_DDMACH5_DA_8822B 0xffffffffL
-#define BIT_DDMACH5_DA_8822B(x) \
- (((x) & BIT_MASK_DDMACH5_DA_8822B) << BIT_SHIFT_DDMACH5_DA_8822B)
-#define BIT_GET_DDMACH5_DA_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH5_DA_8822B) & BIT_MASK_DDMACH5_DA_8822B)
-
-/* 2 REG_REG_DDMA_CH5CTRL_8822B */
-#define BIT_DDMACH5_OWN_8822B BIT(31)
-#define BIT_DDMACH5_CHKSUM_EN_8822B BIT(29)
-#define BIT_DDMACH5_DA_W_DISABLE_8822B BIT(28)
-#define BIT_DDMACH5_CHKSUM_STS_8822B BIT(27)
-#define BIT_DDMACH5_DDMA_MODE_8822B BIT(26)
-#define BIT_DDMACH5_RESET_CHKSUM_STS_8822B BIT(25)
-#define BIT_DDMACH5_CHKSUM_CONT_8822B BIT(24)
-
-#define BIT_SHIFT_DDMACH5_DLEN_8822B 0
-#define BIT_MASK_DDMACH5_DLEN_8822B 0x3ffff
-#define BIT_DDMACH5_DLEN_8822B(x) \
- (((x) & BIT_MASK_DDMACH5_DLEN_8822B) << BIT_SHIFT_DDMACH5_DLEN_8822B)
-#define BIT_GET_DDMACH5_DLEN_8822B(x) \
- (((x) >> BIT_SHIFT_DDMACH5_DLEN_8822B) & BIT_MASK_DDMACH5_DLEN_8822B)
-
-/* 2 REG_DDMA_INT_MSK_8822B */
-#define BIT_DDMACH5_MSK_8822B BIT(5)
-#define BIT_DDMACH4_MSK_8822B BIT(4)
-#define BIT_DDMACH3_MSK_8822B BIT(3)
-#define BIT_DDMACH2_MSK_8822B BIT(2)
-#define BIT_DDMACH1_MSK_8822B BIT(1)
-#define BIT_DDMACH0_MSK_8822B BIT(0)
-
-/* 2 REG_DDMA_CHSTATUS_8822B */
-#define BIT_DDMACH5_BUSY_8822B BIT(5)
-#define BIT_DDMACH4_BUSY_8822B BIT(4)
-#define BIT_DDMACH3_BUSY_8822B BIT(3)
-#define BIT_DDMACH2_BUSY_8822B BIT(2)
-#define BIT_DDMACH1_BUSY_8822B BIT(1)
-#define BIT_DDMACH0_BUSY_8822B BIT(0)
-
-/* 2 REG_DDMA_CHKSUM_8822B */
-
-#define BIT_SHIFT_IDDMA0_CHKSUM_8822B 0
-#define BIT_MASK_IDDMA0_CHKSUM_8822B 0xffff
-#define BIT_IDDMA0_CHKSUM_8822B(x) \
- (((x) & BIT_MASK_IDDMA0_CHKSUM_8822B) << BIT_SHIFT_IDDMA0_CHKSUM_8822B)
-#define BIT_GET_IDDMA0_CHKSUM_8822B(x) \
- (((x) >> BIT_SHIFT_IDDMA0_CHKSUM_8822B) & BIT_MASK_IDDMA0_CHKSUM_8822B)
-
-/* 2 REG_DDMA_MONITOR_8822B */
-#define BIT_IDDMA0_PERMU_UNDERFLOW_8822B BIT(14)
-#define BIT_IDDMA0_FIFO_UNDERFLOW_8822B BIT(13)
-#define BIT_IDDMA0_FIFO_OVERFLOW_8822B BIT(12)
-#define BIT_CH5_ERR_8822B BIT(5)
-#define BIT_CH4_ERR_8822B BIT(4)
-#define BIT_CH3_ERR_8822B BIT(3)
-#define BIT_CH2_ERR_8822B BIT(2)
-#define BIT_CH1_ERR_8822B BIT(1)
-#define BIT_CH0_ERR_8822B BIT(0)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_PCIE_CTRL_8822B */
-#define BIT_PCIEIO_PERSTB_SEL_8822B BIT(31)
-
-#define BIT_SHIFT_PCIE_MAX_RXDMA_8822B 28
-#define BIT_MASK_PCIE_MAX_RXDMA_8822B 0x7
-#define BIT_PCIE_MAX_RXDMA_8822B(x) \
- (((x) & BIT_MASK_PCIE_MAX_RXDMA_8822B) \
- << BIT_SHIFT_PCIE_MAX_RXDMA_8822B)
-#define BIT_GET_PCIE_MAX_RXDMA_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_MAX_RXDMA_8822B) & \
- BIT_MASK_PCIE_MAX_RXDMA_8822B)
-
-#define BIT_MULRW_8822B BIT(27)
-
-#define BIT_SHIFT_PCIE_MAX_TXDMA_8822B 24
-#define BIT_MASK_PCIE_MAX_TXDMA_8822B 0x7
-#define BIT_PCIE_MAX_TXDMA_8822B(x) \
- (((x) & BIT_MASK_PCIE_MAX_TXDMA_8822B) \
- << BIT_SHIFT_PCIE_MAX_TXDMA_8822B)
-#define BIT_GET_PCIE_MAX_TXDMA_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_MAX_TXDMA_8822B) & \
- BIT_MASK_PCIE_MAX_TXDMA_8822B)
-
-#define BIT_EN_CPL_TIMEOUT_PS_8822B BIT(22)
-#define BIT_REG_TXDMA_FAIL_PS_8822B BIT(21)
-#define BIT_PCIE_RST_TRXDMA_INTF_8822B BIT(20)
-#define BIT_EN_HWENTR_L1_8822B BIT(19)
-#define BIT_EN_ADV_CLKGATE_8822B BIT(18)
-#define BIT_PCIE_EN_SWENT_L23_8822B BIT(17)
-#define BIT_PCIE_EN_HWEXT_L1_8822B BIT(16)
-#define BIT_RX_CLOSE_EN_8822B BIT(15)
-#define BIT_STOP_BCNQ_8822B BIT(14)
-#define BIT_STOP_MGQ_8822B BIT(13)
-#define BIT_STOP_VOQ_8822B BIT(12)
-#define BIT_STOP_VIQ_8822B BIT(11)
-#define BIT_STOP_BEQ_8822B BIT(10)
-#define BIT_STOP_BKQ_8822B BIT(9)
-#define BIT_STOP_RXQ_8822B BIT(8)
-#define BIT_STOP_HI7Q_8822B BIT(7)
-#define BIT_STOP_HI6Q_8822B BIT(6)
-#define BIT_STOP_HI5Q_8822B BIT(5)
-#define BIT_STOP_HI4Q_8822B BIT(4)
-#define BIT_STOP_HI3Q_8822B BIT(3)
-#define BIT_STOP_HI2Q_8822B BIT(2)
-#define BIT_STOP_HI1Q_8822B BIT(1)
-#define BIT_STOP_HI0Q_8822B BIT(0)
-
-/* 2 REG_INT_MIG_8822B */
-
-#define BIT_SHIFT_TXTTIMER_MATCH_NUM_8822B 28
-#define BIT_MASK_TXTTIMER_MATCH_NUM_8822B 0xf
-#define BIT_TXTTIMER_MATCH_NUM_8822B(x) \
- (((x) & BIT_MASK_TXTTIMER_MATCH_NUM_8822B) \
- << BIT_SHIFT_TXTTIMER_MATCH_NUM_8822B)
-#define BIT_GET_TXTTIMER_MATCH_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_TXTTIMER_MATCH_NUM_8822B) & \
- BIT_MASK_TXTTIMER_MATCH_NUM_8822B)
-
-#define BIT_SHIFT_TXPKT_NUM_MATCH_8822B 24
-#define BIT_MASK_TXPKT_NUM_MATCH_8822B 0xf
-#define BIT_TXPKT_NUM_MATCH_8822B(x) \
- (((x) & BIT_MASK_TXPKT_NUM_MATCH_8822B) \
- << BIT_SHIFT_TXPKT_NUM_MATCH_8822B)
-#define BIT_GET_TXPKT_NUM_MATCH_8822B(x) \
- (((x) >> BIT_SHIFT_TXPKT_NUM_MATCH_8822B) & \
- BIT_MASK_TXPKT_NUM_MATCH_8822B)
-
-#define BIT_SHIFT_RXTTIMER_MATCH_NUM_8822B 20
-#define BIT_MASK_RXTTIMER_MATCH_NUM_8822B 0xf
-#define BIT_RXTTIMER_MATCH_NUM_8822B(x) \
- (((x) & BIT_MASK_RXTTIMER_MATCH_NUM_8822B) \
- << BIT_SHIFT_RXTTIMER_MATCH_NUM_8822B)
-#define BIT_GET_RXTTIMER_MATCH_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_RXTTIMER_MATCH_NUM_8822B) & \
- BIT_MASK_RXTTIMER_MATCH_NUM_8822B)
-
-#define BIT_SHIFT_RXPKT_NUM_MATCH_8822B 16
-#define BIT_MASK_RXPKT_NUM_MATCH_8822B 0xf
-#define BIT_RXPKT_NUM_MATCH_8822B(x) \
- (((x) & BIT_MASK_RXPKT_NUM_MATCH_8822B) \
- << BIT_SHIFT_RXPKT_NUM_MATCH_8822B)
-#define BIT_GET_RXPKT_NUM_MATCH_8822B(x) \
- (((x) >> BIT_SHIFT_RXPKT_NUM_MATCH_8822B) & \
- BIT_MASK_RXPKT_NUM_MATCH_8822B)
-
-#define BIT_SHIFT_MIGRATE_TIMER_8822B 0
-#define BIT_MASK_MIGRATE_TIMER_8822B 0xffff
-#define BIT_MIGRATE_TIMER_8822B(x) \
- (((x) & BIT_MASK_MIGRATE_TIMER_8822B) << BIT_SHIFT_MIGRATE_TIMER_8822B)
-#define BIT_GET_MIGRATE_TIMER_8822B(x) \
- (((x) >> BIT_SHIFT_MIGRATE_TIMER_8822B) & BIT_MASK_MIGRATE_TIMER_8822B)
-
-/* 2 REG_BCNQ_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_BCNQ_TXBD_DESA_8822B 0
-#define BIT_MASK_BCNQ_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_BCNQ_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_BCNQ_TXBD_DESA_8822B) \
- << BIT_SHIFT_BCNQ_TXBD_DESA_8822B)
-#define BIT_GET_BCNQ_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_BCNQ_TXBD_DESA_8822B) & \
- BIT_MASK_BCNQ_TXBD_DESA_8822B)
-
-/* 2 REG_MGQ_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_MGQ_TXBD_DESA_8822B 0
-#define BIT_MASK_MGQ_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_MGQ_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_MGQ_TXBD_DESA_8822B) << BIT_SHIFT_MGQ_TXBD_DESA_8822B)
-#define BIT_GET_MGQ_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_MGQ_TXBD_DESA_8822B) & BIT_MASK_MGQ_TXBD_DESA_8822B)
-
-/* 2 REG_VOQ_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_VOQ_TXBD_DESA_8822B 0
-#define BIT_MASK_VOQ_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_VOQ_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_VOQ_TXBD_DESA_8822B) << BIT_SHIFT_VOQ_TXBD_DESA_8822B)
-#define BIT_GET_VOQ_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_VOQ_TXBD_DESA_8822B) & BIT_MASK_VOQ_TXBD_DESA_8822B)
-
-/* 2 REG_VIQ_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_VIQ_TXBD_DESA_8822B 0
-#define BIT_MASK_VIQ_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_VIQ_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_VIQ_TXBD_DESA_8822B) << BIT_SHIFT_VIQ_TXBD_DESA_8822B)
-#define BIT_GET_VIQ_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_VIQ_TXBD_DESA_8822B) & BIT_MASK_VIQ_TXBD_DESA_8822B)
-
-/* 2 REG_BEQ_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_BEQ_TXBD_DESA_8822B 0
-#define BIT_MASK_BEQ_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_BEQ_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_BEQ_TXBD_DESA_8822B) << BIT_SHIFT_BEQ_TXBD_DESA_8822B)
-#define BIT_GET_BEQ_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_BEQ_TXBD_DESA_8822B) & BIT_MASK_BEQ_TXBD_DESA_8822B)
-
-/* 2 REG_BKQ_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_BKQ_TXBD_DESA_8822B 0
-#define BIT_MASK_BKQ_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_BKQ_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_BKQ_TXBD_DESA_8822B) << BIT_SHIFT_BKQ_TXBD_DESA_8822B)
-#define BIT_GET_BKQ_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_BKQ_TXBD_DESA_8822B) & BIT_MASK_BKQ_TXBD_DESA_8822B)
-
-/* 2 REG_RXQ_RXBD_DESA_8822B */
-
-#define BIT_SHIFT_RXQ_RXBD_DESA_8822B 0
-#define BIT_MASK_RXQ_RXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_RXQ_RXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_RXQ_RXBD_DESA_8822B) << BIT_SHIFT_RXQ_RXBD_DESA_8822B)
-#define BIT_GET_RXQ_RXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_RXQ_RXBD_DESA_8822B) & BIT_MASK_RXQ_RXBD_DESA_8822B)
-
-/* 2 REG_HI0Q_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_HI0Q_TXBD_DESA_8822B 0
-#define BIT_MASK_HI0Q_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_HI0Q_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_HI0Q_TXBD_DESA_8822B) \
- << BIT_SHIFT_HI0Q_TXBD_DESA_8822B)
-#define BIT_GET_HI0Q_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_HI0Q_TXBD_DESA_8822B) & \
- BIT_MASK_HI0Q_TXBD_DESA_8822B)
-
-/* 2 REG_HI1Q_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_HI1Q_TXBD_DESA_8822B 0
-#define BIT_MASK_HI1Q_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_HI1Q_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_HI1Q_TXBD_DESA_8822B) \
- << BIT_SHIFT_HI1Q_TXBD_DESA_8822B)
-#define BIT_GET_HI1Q_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_HI1Q_TXBD_DESA_8822B) & \
- BIT_MASK_HI1Q_TXBD_DESA_8822B)
-
-/* 2 REG_HI2Q_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_HI2Q_TXBD_DESA_8822B 0
-#define BIT_MASK_HI2Q_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_HI2Q_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_HI2Q_TXBD_DESA_8822B) \
- << BIT_SHIFT_HI2Q_TXBD_DESA_8822B)
-#define BIT_GET_HI2Q_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_HI2Q_TXBD_DESA_8822B) & \
- BIT_MASK_HI2Q_TXBD_DESA_8822B)
-
-/* 2 REG_HI3Q_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_HI3Q_TXBD_DESA_8822B 0
-#define BIT_MASK_HI3Q_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_HI3Q_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_HI3Q_TXBD_DESA_8822B) \
- << BIT_SHIFT_HI3Q_TXBD_DESA_8822B)
-#define BIT_GET_HI3Q_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_HI3Q_TXBD_DESA_8822B) & \
- BIT_MASK_HI3Q_TXBD_DESA_8822B)
-
-/* 2 REG_HI4Q_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_HI4Q_TXBD_DESA_8822B 0
-#define BIT_MASK_HI4Q_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_HI4Q_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_HI4Q_TXBD_DESA_8822B) \
- << BIT_SHIFT_HI4Q_TXBD_DESA_8822B)
-#define BIT_GET_HI4Q_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_HI4Q_TXBD_DESA_8822B) & \
- BIT_MASK_HI4Q_TXBD_DESA_8822B)
-
-/* 2 REG_HI5Q_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_HI5Q_TXBD_DESA_8822B 0
-#define BIT_MASK_HI5Q_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_HI5Q_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_HI5Q_TXBD_DESA_8822B) \
- << BIT_SHIFT_HI5Q_TXBD_DESA_8822B)
-#define BIT_GET_HI5Q_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_HI5Q_TXBD_DESA_8822B) & \
- BIT_MASK_HI5Q_TXBD_DESA_8822B)
-
-/* 2 REG_HI6Q_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_HI6Q_TXBD_DESA_8822B 0
-#define BIT_MASK_HI6Q_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_HI6Q_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_HI6Q_TXBD_DESA_8822B) \
- << BIT_SHIFT_HI6Q_TXBD_DESA_8822B)
-#define BIT_GET_HI6Q_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_HI6Q_TXBD_DESA_8822B) & \
- BIT_MASK_HI6Q_TXBD_DESA_8822B)
-
-/* 2 REG_HI7Q_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_HI7Q_TXBD_DESA_8822B 0
-#define BIT_MASK_HI7Q_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_HI7Q_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_HI7Q_TXBD_DESA_8822B) \
- << BIT_SHIFT_HI7Q_TXBD_DESA_8822B)
-#define BIT_GET_HI7Q_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_HI7Q_TXBD_DESA_8822B) & \
- BIT_MASK_HI7Q_TXBD_DESA_8822B)
-
-/* 2 REG_MGQ_TXBD_NUM_8822B */
-#define BIT_PCIE_MGQ_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_MGQ_DESC_MODE_8822B 12
-#define BIT_MASK_MGQ_DESC_MODE_8822B 0x3
-#define BIT_MGQ_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_MGQ_DESC_MODE_8822B) << BIT_SHIFT_MGQ_DESC_MODE_8822B)
-#define BIT_GET_MGQ_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_MGQ_DESC_MODE_8822B) & BIT_MASK_MGQ_DESC_MODE_8822B)
-
-#define BIT_SHIFT_MGQ_DESC_NUM_8822B 0
-#define BIT_MASK_MGQ_DESC_NUM_8822B 0xfff
-#define BIT_MGQ_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_MGQ_DESC_NUM_8822B) << BIT_SHIFT_MGQ_DESC_NUM_8822B)
-#define BIT_GET_MGQ_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_MGQ_DESC_NUM_8822B) & BIT_MASK_MGQ_DESC_NUM_8822B)
-
-/* 2 REG_RX_RXBD_NUM_8822B */
-#define BIT_SYS_32_64_8822B BIT(15)
-
-#define BIT_SHIFT_BCNQ_DESC_MODE_8822B 13
-#define BIT_MASK_BCNQ_DESC_MODE_8822B 0x3
-#define BIT_BCNQ_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_BCNQ_DESC_MODE_8822B) \
- << BIT_SHIFT_BCNQ_DESC_MODE_8822B)
-#define BIT_GET_BCNQ_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_BCNQ_DESC_MODE_8822B) & \
- BIT_MASK_BCNQ_DESC_MODE_8822B)
-
-#define BIT_PCIE_BCNQ_FLAG_8822B BIT(12)
-
-#define BIT_SHIFT_RXQ_DESC_NUM_8822B 0
-#define BIT_MASK_RXQ_DESC_NUM_8822B 0xfff
-#define BIT_RXQ_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_RXQ_DESC_NUM_8822B) << BIT_SHIFT_RXQ_DESC_NUM_8822B)
-#define BIT_GET_RXQ_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_RXQ_DESC_NUM_8822B) & BIT_MASK_RXQ_DESC_NUM_8822B)
-
-/* 2 REG_VOQ_TXBD_NUM_8822B */
-#define BIT_PCIE_VOQ_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_VOQ_DESC_MODE_8822B 12
-#define BIT_MASK_VOQ_DESC_MODE_8822B 0x3
-#define BIT_VOQ_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_VOQ_DESC_MODE_8822B) << BIT_SHIFT_VOQ_DESC_MODE_8822B)
-#define BIT_GET_VOQ_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_VOQ_DESC_MODE_8822B) & BIT_MASK_VOQ_DESC_MODE_8822B)
-
-#define BIT_SHIFT_VOQ_DESC_NUM_8822B 0
-#define BIT_MASK_VOQ_DESC_NUM_8822B 0xfff
-#define BIT_VOQ_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_VOQ_DESC_NUM_8822B) << BIT_SHIFT_VOQ_DESC_NUM_8822B)
-#define BIT_GET_VOQ_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_VOQ_DESC_NUM_8822B) & BIT_MASK_VOQ_DESC_NUM_8822B)
-
-/* 2 REG_VIQ_TXBD_NUM_8822B */
-#define BIT_PCIE_VIQ_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_VIQ_DESC_MODE_8822B 12
-#define BIT_MASK_VIQ_DESC_MODE_8822B 0x3
-#define BIT_VIQ_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_VIQ_DESC_MODE_8822B) << BIT_SHIFT_VIQ_DESC_MODE_8822B)
-#define BIT_GET_VIQ_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_VIQ_DESC_MODE_8822B) & BIT_MASK_VIQ_DESC_MODE_8822B)
-
-#define BIT_SHIFT_VIQ_DESC_NUM_8822B 0
-#define BIT_MASK_VIQ_DESC_NUM_8822B 0xfff
-#define BIT_VIQ_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_VIQ_DESC_NUM_8822B) << BIT_SHIFT_VIQ_DESC_NUM_8822B)
-#define BIT_GET_VIQ_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_VIQ_DESC_NUM_8822B) & BIT_MASK_VIQ_DESC_NUM_8822B)
-
-/* 2 REG_BEQ_TXBD_NUM_8822B */
-#define BIT_PCIE_BEQ_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_BEQ_DESC_MODE_8822B 12
-#define BIT_MASK_BEQ_DESC_MODE_8822B 0x3
-#define BIT_BEQ_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_BEQ_DESC_MODE_8822B) << BIT_SHIFT_BEQ_DESC_MODE_8822B)
-#define BIT_GET_BEQ_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_BEQ_DESC_MODE_8822B) & BIT_MASK_BEQ_DESC_MODE_8822B)
-
-#define BIT_SHIFT_BEQ_DESC_NUM_8822B 0
-#define BIT_MASK_BEQ_DESC_NUM_8822B 0xfff
-#define BIT_BEQ_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_BEQ_DESC_NUM_8822B) << BIT_SHIFT_BEQ_DESC_NUM_8822B)
-#define BIT_GET_BEQ_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_BEQ_DESC_NUM_8822B) & BIT_MASK_BEQ_DESC_NUM_8822B)
-
-/* 2 REG_BKQ_TXBD_NUM_8822B */
-#define BIT_PCIE_BKQ_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_BKQ_DESC_MODE_8822B 12
-#define BIT_MASK_BKQ_DESC_MODE_8822B 0x3
-#define BIT_BKQ_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_BKQ_DESC_MODE_8822B) << BIT_SHIFT_BKQ_DESC_MODE_8822B)
-#define BIT_GET_BKQ_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_BKQ_DESC_MODE_8822B) & BIT_MASK_BKQ_DESC_MODE_8822B)
-
-#define BIT_SHIFT_BKQ_DESC_NUM_8822B 0
-#define BIT_MASK_BKQ_DESC_NUM_8822B 0xfff
-#define BIT_BKQ_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_BKQ_DESC_NUM_8822B) << BIT_SHIFT_BKQ_DESC_NUM_8822B)
-#define BIT_GET_BKQ_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_BKQ_DESC_NUM_8822B) & BIT_MASK_BKQ_DESC_NUM_8822B)
-
-/* 2 REG_HI0Q_TXBD_NUM_8822B */
-#define BIT_HI0Q_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_HI0Q_DESC_MODE_8822B 12
-#define BIT_MASK_HI0Q_DESC_MODE_8822B 0x3
-#define BIT_HI0Q_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_HI0Q_DESC_MODE_8822B) \
- << BIT_SHIFT_HI0Q_DESC_MODE_8822B)
-#define BIT_GET_HI0Q_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_HI0Q_DESC_MODE_8822B) & \
- BIT_MASK_HI0Q_DESC_MODE_8822B)
-
-#define BIT_SHIFT_HI0Q_DESC_NUM_8822B 0
-#define BIT_MASK_HI0Q_DESC_NUM_8822B 0xfff
-#define BIT_HI0Q_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_HI0Q_DESC_NUM_8822B) << BIT_SHIFT_HI0Q_DESC_NUM_8822B)
-#define BIT_GET_HI0Q_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_HI0Q_DESC_NUM_8822B) & BIT_MASK_HI0Q_DESC_NUM_8822B)
-
-/* 2 REG_HI1Q_TXBD_NUM_8822B */
-#define BIT_HI1Q_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_HI1Q_DESC_MODE_8822B 12
-#define BIT_MASK_HI1Q_DESC_MODE_8822B 0x3
-#define BIT_HI1Q_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_HI1Q_DESC_MODE_8822B) \
- << BIT_SHIFT_HI1Q_DESC_MODE_8822B)
-#define BIT_GET_HI1Q_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_HI1Q_DESC_MODE_8822B) & \
- BIT_MASK_HI1Q_DESC_MODE_8822B)
-
-#define BIT_SHIFT_HI1Q_DESC_NUM_8822B 0
-#define BIT_MASK_HI1Q_DESC_NUM_8822B 0xfff
-#define BIT_HI1Q_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_HI1Q_DESC_NUM_8822B) << BIT_SHIFT_HI1Q_DESC_NUM_8822B)
-#define BIT_GET_HI1Q_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_HI1Q_DESC_NUM_8822B) & BIT_MASK_HI1Q_DESC_NUM_8822B)
-
-/* 2 REG_HI2Q_TXBD_NUM_8822B */
-#define BIT_HI2Q_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_HI2Q_DESC_MODE_8822B 12
-#define BIT_MASK_HI2Q_DESC_MODE_8822B 0x3
-#define BIT_HI2Q_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_HI2Q_DESC_MODE_8822B) \
- << BIT_SHIFT_HI2Q_DESC_MODE_8822B)
-#define BIT_GET_HI2Q_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_HI2Q_DESC_MODE_8822B) & \
- BIT_MASK_HI2Q_DESC_MODE_8822B)
-
-#define BIT_SHIFT_HI2Q_DESC_NUM_8822B 0
-#define BIT_MASK_HI2Q_DESC_NUM_8822B 0xfff
-#define BIT_HI2Q_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_HI2Q_DESC_NUM_8822B) << BIT_SHIFT_HI2Q_DESC_NUM_8822B)
-#define BIT_GET_HI2Q_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_HI2Q_DESC_NUM_8822B) & BIT_MASK_HI2Q_DESC_NUM_8822B)
-
-/* 2 REG_HI3Q_TXBD_NUM_8822B */
-#define BIT_HI3Q_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_HI3Q_DESC_MODE_8822B 12
-#define BIT_MASK_HI3Q_DESC_MODE_8822B 0x3
-#define BIT_HI3Q_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_HI3Q_DESC_MODE_8822B) \
- << BIT_SHIFT_HI3Q_DESC_MODE_8822B)
-#define BIT_GET_HI3Q_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_HI3Q_DESC_MODE_8822B) & \
- BIT_MASK_HI3Q_DESC_MODE_8822B)
-
-#define BIT_SHIFT_HI3Q_DESC_NUM_8822B 0
-#define BIT_MASK_HI3Q_DESC_NUM_8822B 0xfff
-#define BIT_HI3Q_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_HI3Q_DESC_NUM_8822B) << BIT_SHIFT_HI3Q_DESC_NUM_8822B)
-#define BIT_GET_HI3Q_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_HI3Q_DESC_NUM_8822B) & BIT_MASK_HI3Q_DESC_NUM_8822B)
-
-/* 2 REG_HI4Q_TXBD_NUM_8822B */
-#define BIT_HI4Q_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_HI4Q_DESC_MODE_8822B 12
-#define BIT_MASK_HI4Q_DESC_MODE_8822B 0x3
-#define BIT_HI4Q_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_HI4Q_DESC_MODE_8822B) \
- << BIT_SHIFT_HI4Q_DESC_MODE_8822B)
-#define BIT_GET_HI4Q_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_HI4Q_DESC_MODE_8822B) & \
- BIT_MASK_HI4Q_DESC_MODE_8822B)
-
-#define BIT_SHIFT_HI4Q_DESC_NUM_8822B 0
-#define BIT_MASK_HI4Q_DESC_NUM_8822B 0xfff
-#define BIT_HI4Q_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_HI4Q_DESC_NUM_8822B) << BIT_SHIFT_HI4Q_DESC_NUM_8822B)
-#define BIT_GET_HI4Q_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_HI4Q_DESC_NUM_8822B) & BIT_MASK_HI4Q_DESC_NUM_8822B)
-
-/* 2 REG_HI5Q_TXBD_NUM_8822B */
-#define BIT_HI5Q_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_HI5Q_DESC_MODE_8822B 12
-#define BIT_MASK_HI5Q_DESC_MODE_8822B 0x3
-#define BIT_HI5Q_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_HI5Q_DESC_MODE_8822B) \
- << BIT_SHIFT_HI5Q_DESC_MODE_8822B)
-#define BIT_GET_HI5Q_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_HI5Q_DESC_MODE_8822B) & \
- BIT_MASK_HI5Q_DESC_MODE_8822B)
-
-#define BIT_SHIFT_HI5Q_DESC_NUM_8822B 0
-#define BIT_MASK_HI5Q_DESC_NUM_8822B 0xfff
-#define BIT_HI5Q_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_HI5Q_DESC_NUM_8822B) << BIT_SHIFT_HI5Q_DESC_NUM_8822B)
-#define BIT_GET_HI5Q_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_HI5Q_DESC_NUM_8822B) & BIT_MASK_HI5Q_DESC_NUM_8822B)
-
-/* 2 REG_HI6Q_TXBD_NUM_8822B */
-#define BIT_HI6Q_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_HI6Q_DESC_MODE_8822B 12
-#define BIT_MASK_HI6Q_DESC_MODE_8822B 0x3
-#define BIT_HI6Q_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_HI6Q_DESC_MODE_8822B) \
- << BIT_SHIFT_HI6Q_DESC_MODE_8822B)
-#define BIT_GET_HI6Q_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_HI6Q_DESC_MODE_8822B) & \
- BIT_MASK_HI6Q_DESC_MODE_8822B)
-
-#define BIT_SHIFT_HI6Q_DESC_NUM_8822B 0
-#define BIT_MASK_HI6Q_DESC_NUM_8822B 0xfff
-#define BIT_HI6Q_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_HI6Q_DESC_NUM_8822B) << BIT_SHIFT_HI6Q_DESC_NUM_8822B)
-#define BIT_GET_HI6Q_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_HI6Q_DESC_NUM_8822B) & BIT_MASK_HI6Q_DESC_NUM_8822B)
-
-/* 2 REG_HI7Q_TXBD_NUM_8822B */
-#define BIT_HI7Q_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_HI7Q_DESC_MODE_8822B 12
-#define BIT_MASK_HI7Q_DESC_MODE_8822B 0x3
-#define BIT_HI7Q_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_HI7Q_DESC_MODE_8822B) \
- << BIT_SHIFT_HI7Q_DESC_MODE_8822B)
-#define BIT_GET_HI7Q_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_HI7Q_DESC_MODE_8822B) & \
- BIT_MASK_HI7Q_DESC_MODE_8822B)
-
-#define BIT_SHIFT_HI7Q_DESC_NUM_8822B 0
-#define BIT_MASK_HI7Q_DESC_NUM_8822B 0xfff
-#define BIT_HI7Q_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_HI7Q_DESC_NUM_8822B) << BIT_SHIFT_HI7Q_DESC_NUM_8822B)
-#define BIT_GET_HI7Q_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_HI7Q_DESC_NUM_8822B) & BIT_MASK_HI7Q_DESC_NUM_8822B)
-
-/* 2 REG_TSFTIMER_HCI_8822B */
-
-#define BIT_SHIFT_TSFT2_HCI_8822B 16
-#define BIT_MASK_TSFT2_HCI_8822B 0xffff
-#define BIT_TSFT2_HCI_8822B(x) \
- (((x) & BIT_MASK_TSFT2_HCI_8822B) << BIT_SHIFT_TSFT2_HCI_8822B)
-#define BIT_GET_TSFT2_HCI_8822B(x) \
- (((x) >> BIT_SHIFT_TSFT2_HCI_8822B) & BIT_MASK_TSFT2_HCI_8822B)
-
-#define BIT_SHIFT_TSFT1_HCI_8822B 0
-#define BIT_MASK_TSFT1_HCI_8822B 0xffff
-#define BIT_TSFT1_HCI_8822B(x) \
- (((x) & BIT_MASK_TSFT1_HCI_8822B) << BIT_SHIFT_TSFT1_HCI_8822B)
-#define BIT_GET_TSFT1_HCI_8822B(x) \
- (((x) >> BIT_SHIFT_TSFT1_HCI_8822B) & BIT_MASK_TSFT1_HCI_8822B)
-
-/* 2 REG_BD_RWPTR_CLR_8822B */
-#define BIT_CLR_HI7Q_HW_IDX_8822B BIT(29)
-#define BIT_CLR_HI6Q_HW_IDX_8822B BIT(28)
-#define BIT_CLR_HI5Q_HW_IDX_8822B BIT(27)
-#define BIT_CLR_HI4Q_HW_IDX_8822B BIT(26)
-#define BIT_CLR_HI3Q_HW_IDX_8822B BIT(25)
-#define BIT_CLR_HI2Q_HW_IDX_8822B BIT(24)
-#define BIT_CLR_HI1Q_HW_IDX_8822B BIT(23)
-#define BIT_CLR_HI0Q_HW_IDX_8822B BIT(22)
-#define BIT_CLR_BKQ_HW_IDX_8822B BIT(21)
-#define BIT_CLR_BEQ_HW_IDX_8822B BIT(20)
-#define BIT_CLR_VIQ_HW_IDX_8822B BIT(19)
-#define BIT_CLR_VOQ_HW_IDX_8822B BIT(18)
-#define BIT_CLR_MGQ_HW_IDX_8822B BIT(17)
-#define BIT_CLR_RXQ_HW_IDX_8822B BIT(16)
-#define BIT_CLR_HI7Q_HOST_IDX_8822B BIT(13)
-#define BIT_CLR_HI6Q_HOST_IDX_8822B BIT(12)
-#define BIT_CLR_HI5Q_HOST_IDX_8822B BIT(11)
-#define BIT_CLR_HI4Q_HOST_IDX_8822B BIT(10)
-#define BIT_CLR_HI3Q_HOST_IDX_8822B BIT(9)
-#define BIT_CLR_HI2Q_HOST_IDX_8822B BIT(8)
-#define BIT_CLR_HI1Q_HOST_IDX_8822B BIT(7)
-#define BIT_CLR_HI0Q_HOST_IDX_8822B BIT(6)
-#define BIT_CLR_BKQ_HOST_IDX_8822B BIT(5)
-#define BIT_CLR_BEQ_HOST_IDX_8822B BIT(4)
-#define BIT_CLR_VIQ_HOST_IDX_8822B BIT(3)
-#define BIT_CLR_VOQ_HOST_IDX_8822B BIT(2)
-#define BIT_CLR_MGQ_HOST_IDX_8822B BIT(1)
-#define BIT_CLR_RXQ_HOST_IDX_8822B BIT(0)
-
-/* 2 REG_VOQ_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_VOQ_HW_IDX_8822B 16
-#define BIT_MASK_VOQ_HW_IDX_8822B 0xfff
-#define BIT_VOQ_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_VOQ_HW_IDX_8822B) << BIT_SHIFT_VOQ_HW_IDX_8822B)
-#define BIT_GET_VOQ_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_VOQ_HW_IDX_8822B) & BIT_MASK_VOQ_HW_IDX_8822B)
-
-#define BIT_SHIFT_VOQ_HOST_IDX_8822B 0
-#define BIT_MASK_VOQ_HOST_IDX_8822B 0xfff
-#define BIT_VOQ_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_VOQ_HOST_IDX_8822B) << BIT_SHIFT_VOQ_HOST_IDX_8822B)
-#define BIT_GET_VOQ_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_VOQ_HOST_IDX_8822B) & BIT_MASK_VOQ_HOST_IDX_8822B)
-
-/* 2 REG_VIQ_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_VIQ_HW_IDX_8822B 16
-#define BIT_MASK_VIQ_HW_IDX_8822B 0xfff
-#define BIT_VIQ_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_VIQ_HW_IDX_8822B) << BIT_SHIFT_VIQ_HW_IDX_8822B)
-#define BIT_GET_VIQ_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_VIQ_HW_IDX_8822B) & BIT_MASK_VIQ_HW_IDX_8822B)
-
-#define BIT_SHIFT_VIQ_HOST_IDX_8822B 0
-#define BIT_MASK_VIQ_HOST_IDX_8822B 0xfff
-#define BIT_VIQ_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_VIQ_HOST_IDX_8822B) << BIT_SHIFT_VIQ_HOST_IDX_8822B)
-#define BIT_GET_VIQ_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_VIQ_HOST_IDX_8822B) & BIT_MASK_VIQ_HOST_IDX_8822B)
-
-/* 2 REG_BEQ_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_BEQ_HW_IDX_8822B 16
-#define BIT_MASK_BEQ_HW_IDX_8822B 0xfff
-#define BIT_BEQ_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_BEQ_HW_IDX_8822B) << BIT_SHIFT_BEQ_HW_IDX_8822B)
-#define BIT_GET_BEQ_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_BEQ_HW_IDX_8822B) & BIT_MASK_BEQ_HW_IDX_8822B)
-
-#define BIT_SHIFT_BEQ_HOST_IDX_8822B 0
-#define BIT_MASK_BEQ_HOST_IDX_8822B 0xfff
-#define BIT_BEQ_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_BEQ_HOST_IDX_8822B) << BIT_SHIFT_BEQ_HOST_IDX_8822B)
-#define BIT_GET_BEQ_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_BEQ_HOST_IDX_8822B) & BIT_MASK_BEQ_HOST_IDX_8822B)
-
-/* 2 REG_BKQ_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_BKQ_HW_IDX_8822B 16
-#define BIT_MASK_BKQ_HW_IDX_8822B 0xfff
-#define BIT_BKQ_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_BKQ_HW_IDX_8822B) << BIT_SHIFT_BKQ_HW_IDX_8822B)
-#define BIT_GET_BKQ_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_BKQ_HW_IDX_8822B) & BIT_MASK_BKQ_HW_IDX_8822B)
-
-#define BIT_SHIFT_BKQ_HOST_IDX_8822B 0
-#define BIT_MASK_BKQ_HOST_IDX_8822B 0xfff
-#define BIT_BKQ_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_BKQ_HOST_IDX_8822B) << BIT_SHIFT_BKQ_HOST_IDX_8822B)
-#define BIT_GET_BKQ_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_BKQ_HOST_IDX_8822B) & BIT_MASK_BKQ_HOST_IDX_8822B)
-
-/* 2 REG_MGQ_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_MGQ_HW_IDX_8822B 16
-#define BIT_MASK_MGQ_HW_IDX_8822B 0xfff
-#define BIT_MGQ_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_MGQ_HW_IDX_8822B) << BIT_SHIFT_MGQ_HW_IDX_8822B)
-#define BIT_GET_MGQ_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_MGQ_HW_IDX_8822B) & BIT_MASK_MGQ_HW_IDX_8822B)
-
-#define BIT_SHIFT_MGQ_HOST_IDX_8822B 0
-#define BIT_MASK_MGQ_HOST_IDX_8822B 0xfff
-#define BIT_MGQ_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_MGQ_HOST_IDX_8822B) << BIT_SHIFT_MGQ_HOST_IDX_8822B)
-#define BIT_GET_MGQ_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_MGQ_HOST_IDX_8822B) & BIT_MASK_MGQ_HOST_IDX_8822B)
-
-/* 2 REG_RXQ_RXBD_IDX_8822B */
-
-#define BIT_SHIFT_RXQ_HW_IDX_8822B 16
-#define BIT_MASK_RXQ_HW_IDX_8822B 0xfff
-#define BIT_RXQ_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_RXQ_HW_IDX_8822B) << BIT_SHIFT_RXQ_HW_IDX_8822B)
-#define BIT_GET_RXQ_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RXQ_HW_IDX_8822B) & BIT_MASK_RXQ_HW_IDX_8822B)
-
-#define BIT_SHIFT_RXQ_HOST_IDX_8822B 0
-#define BIT_MASK_RXQ_HOST_IDX_8822B 0xfff
-#define BIT_RXQ_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_RXQ_HOST_IDX_8822B) << BIT_SHIFT_RXQ_HOST_IDX_8822B)
-#define BIT_GET_RXQ_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RXQ_HOST_IDX_8822B) & BIT_MASK_RXQ_HOST_IDX_8822B)
-
-/* 2 REG_HI0Q_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_HI0Q_HW_IDX_8822B 16
-#define BIT_MASK_HI0Q_HW_IDX_8822B 0xfff
-#define BIT_HI0Q_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_HI0Q_HW_IDX_8822B) << BIT_SHIFT_HI0Q_HW_IDX_8822B)
-#define BIT_GET_HI0Q_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI0Q_HW_IDX_8822B) & BIT_MASK_HI0Q_HW_IDX_8822B)
-
-#define BIT_SHIFT_HI0Q_HOST_IDX_8822B 0
-#define BIT_MASK_HI0Q_HOST_IDX_8822B 0xfff
-#define BIT_HI0Q_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_HI0Q_HOST_IDX_8822B) << BIT_SHIFT_HI0Q_HOST_IDX_8822B)
-#define BIT_GET_HI0Q_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI0Q_HOST_IDX_8822B) & BIT_MASK_HI0Q_HOST_IDX_8822B)
-
-/* 2 REG_HI1Q_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_HI1Q_HW_IDX_8822B 16
-#define BIT_MASK_HI1Q_HW_IDX_8822B 0xfff
-#define BIT_HI1Q_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_HI1Q_HW_IDX_8822B) << BIT_SHIFT_HI1Q_HW_IDX_8822B)
-#define BIT_GET_HI1Q_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI1Q_HW_IDX_8822B) & BIT_MASK_HI1Q_HW_IDX_8822B)
-
-#define BIT_SHIFT_HI1Q_HOST_IDX_8822B 0
-#define BIT_MASK_HI1Q_HOST_IDX_8822B 0xfff
-#define BIT_HI1Q_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_HI1Q_HOST_IDX_8822B) << BIT_SHIFT_HI1Q_HOST_IDX_8822B)
-#define BIT_GET_HI1Q_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI1Q_HOST_IDX_8822B) & BIT_MASK_HI1Q_HOST_IDX_8822B)
-
-/* 2 REG_HI2Q_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_HI2Q_HW_IDX_8822B 16
-#define BIT_MASK_HI2Q_HW_IDX_8822B 0xfff
-#define BIT_HI2Q_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_HI2Q_HW_IDX_8822B) << BIT_SHIFT_HI2Q_HW_IDX_8822B)
-#define BIT_GET_HI2Q_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI2Q_HW_IDX_8822B) & BIT_MASK_HI2Q_HW_IDX_8822B)
-
-#define BIT_SHIFT_HI2Q_HOST_IDX_8822B 0
-#define BIT_MASK_HI2Q_HOST_IDX_8822B 0xfff
-#define BIT_HI2Q_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_HI2Q_HOST_IDX_8822B) << BIT_SHIFT_HI2Q_HOST_IDX_8822B)
-#define BIT_GET_HI2Q_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI2Q_HOST_IDX_8822B) & BIT_MASK_HI2Q_HOST_IDX_8822B)
-
-/* 2 REG_HI3Q_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_HI3Q_HW_IDX_8822B 16
-#define BIT_MASK_HI3Q_HW_IDX_8822B 0xfff
-#define BIT_HI3Q_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_HI3Q_HW_IDX_8822B) << BIT_SHIFT_HI3Q_HW_IDX_8822B)
-#define BIT_GET_HI3Q_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI3Q_HW_IDX_8822B) & BIT_MASK_HI3Q_HW_IDX_8822B)
-
-#define BIT_SHIFT_HI3Q_HOST_IDX_8822B 0
-#define BIT_MASK_HI3Q_HOST_IDX_8822B 0xfff
-#define BIT_HI3Q_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_HI3Q_HOST_IDX_8822B) << BIT_SHIFT_HI3Q_HOST_IDX_8822B)
-#define BIT_GET_HI3Q_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI3Q_HOST_IDX_8822B) & BIT_MASK_HI3Q_HOST_IDX_8822B)
-
-/* 2 REG_HI4Q_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_HI4Q_HW_IDX_8822B 16
-#define BIT_MASK_HI4Q_HW_IDX_8822B 0xfff
-#define BIT_HI4Q_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_HI4Q_HW_IDX_8822B) << BIT_SHIFT_HI4Q_HW_IDX_8822B)
-#define BIT_GET_HI4Q_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI4Q_HW_IDX_8822B) & BIT_MASK_HI4Q_HW_IDX_8822B)
-
-#define BIT_SHIFT_HI4Q_HOST_IDX_8822B 0
-#define BIT_MASK_HI4Q_HOST_IDX_8822B 0xfff
-#define BIT_HI4Q_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_HI4Q_HOST_IDX_8822B) << BIT_SHIFT_HI4Q_HOST_IDX_8822B)
-#define BIT_GET_HI4Q_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI4Q_HOST_IDX_8822B) & BIT_MASK_HI4Q_HOST_IDX_8822B)
-
-/* 2 REG_HI5Q_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_HI5Q_HW_IDX_8822B 16
-#define BIT_MASK_HI5Q_HW_IDX_8822B 0xfff
-#define BIT_HI5Q_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_HI5Q_HW_IDX_8822B) << BIT_SHIFT_HI5Q_HW_IDX_8822B)
-#define BIT_GET_HI5Q_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI5Q_HW_IDX_8822B) & BIT_MASK_HI5Q_HW_IDX_8822B)
-
-#define BIT_SHIFT_HI5Q_HOST_IDX_8822B 0
-#define BIT_MASK_HI5Q_HOST_IDX_8822B 0xfff
-#define BIT_HI5Q_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_HI5Q_HOST_IDX_8822B) << BIT_SHIFT_HI5Q_HOST_IDX_8822B)
-#define BIT_GET_HI5Q_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI5Q_HOST_IDX_8822B) & BIT_MASK_HI5Q_HOST_IDX_8822B)
-
-/* 2 REG_HI6Q_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_HI6Q_HW_IDX_8822B 16
-#define BIT_MASK_HI6Q_HW_IDX_8822B 0xfff
-#define BIT_HI6Q_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_HI6Q_HW_IDX_8822B) << BIT_SHIFT_HI6Q_HW_IDX_8822B)
-#define BIT_GET_HI6Q_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI6Q_HW_IDX_8822B) & BIT_MASK_HI6Q_HW_IDX_8822B)
-
-#define BIT_SHIFT_HI6Q_HOST_IDX_8822B 0
-#define BIT_MASK_HI6Q_HOST_IDX_8822B 0xfff
-#define BIT_HI6Q_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_HI6Q_HOST_IDX_8822B) << BIT_SHIFT_HI6Q_HOST_IDX_8822B)
-#define BIT_GET_HI6Q_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI6Q_HOST_IDX_8822B) & BIT_MASK_HI6Q_HOST_IDX_8822B)
-
-/* 2 REG_HI7Q_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_HI7Q_HW_IDX_8822B 16
-#define BIT_MASK_HI7Q_HW_IDX_8822B 0xfff
-#define BIT_HI7Q_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_HI7Q_HW_IDX_8822B) << BIT_SHIFT_HI7Q_HW_IDX_8822B)
-#define BIT_GET_HI7Q_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI7Q_HW_IDX_8822B) & BIT_MASK_HI7Q_HW_IDX_8822B)
-
-#define BIT_SHIFT_HI7Q_HOST_IDX_8822B 0
-#define BIT_MASK_HI7Q_HOST_IDX_8822B 0xfff
-#define BIT_HI7Q_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_HI7Q_HOST_IDX_8822B) << BIT_SHIFT_HI7Q_HOST_IDX_8822B)
-#define BIT_GET_HI7Q_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HI7Q_HOST_IDX_8822B) & BIT_MASK_HI7Q_HOST_IDX_8822B)
-
-/* 2 REG_DBG_SEL_V1_8822B */
-
-#define BIT_SHIFT_DBG_SEL_8822B 0
-#define BIT_MASK_DBG_SEL_8822B 0xff
-#define BIT_DBG_SEL_8822B(x) \
- (((x) & BIT_MASK_DBG_SEL_8822B) << BIT_SHIFT_DBG_SEL_8822B)
-#define BIT_GET_DBG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_DBG_SEL_8822B) & BIT_MASK_DBG_SEL_8822B)
-
-/* 2 REG_PCIE_HRPWM1_V1_8822B */
-
-#define BIT_SHIFT_PCIE_HRPWM_8822B 0
-#define BIT_MASK_PCIE_HRPWM_8822B 0xff
-#define BIT_PCIE_HRPWM_8822B(x) \
- (((x) & BIT_MASK_PCIE_HRPWM_8822B) << BIT_SHIFT_PCIE_HRPWM_8822B)
-#define BIT_GET_PCIE_HRPWM_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_HRPWM_8822B) & BIT_MASK_PCIE_HRPWM_8822B)
-
-/* 2 REG_PCIE_HCPWM1_V1_8822B */
-
-#define BIT_SHIFT_PCIE_HCPWM_8822B 0
-#define BIT_MASK_PCIE_HCPWM_8822B 0xff
-#define BIT_PCIE_HCPWM_8822B(x) \
- (((x) & BIT_MASK_PCIE_HCPWM_8822B) << BIT_SHIFT_PCIE_HCPWM_8822B)
-#define BIT_GET_PCIE_HCPWM_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_HCPWM_8822B) & BIT_MASK_PCIE_HCPWM_8822B)
-
-/* 2 REG_PCIE_CTRL2_8822B */
-#define BIT_DIS_TXDMA_PRE_8822B BIT(7)
-#define BIT_DIS_RXDMA_PRE_8822B BIT(6)
-
-#define BIT_SHIFT_HPS_CLKR_PCIE_8822B 4
-#define BIT_MASK_HPS_CLKR_PCIE_8822B 0x3
-#define BIT_HPS_CLKR_PCIE_8822B(x) \
- (((x) & BIT_MASK_HPS_CLKR_PCIE_8822B) << BIT_SHIFT_HPS_CLKR_PCIE_8822B)
-#define BIT_GET_HPS_CLKR_PCIE_8822B(x) \
- (((x) >> BIT_SHIFT_HPS_CLKR_PCIE_8822B) & BIT_MASK_HPS_CLKR_PCIE_8822B)
-
-#define BIT_PCIE_INT_8822B BIT(3)
-#define BIT_TXFLAG_EXIT_L1_EN_8822B BIT(2)
-#define BIT_EN_RXDMA_ALIGN_8822B BIT(1)
-#define BIT_EN_TXDMA_ALIGN_8822B BIT(0)
-
-/* 2 REG_PCIE_HRPWM2_V1_8822B */
-
-#define BIT_SHIFT_PCIE_HRPWM2_8822B 0
-#define BIT_MASK_PCIE_HRPWM2_8822B 0xffff
-#define BIT_PCIE_HRPWM2_8822B(x) \
- (((x) & BIT_MASK_PCIE_HRPWM2_8822B) << BIT_SHIFT_PCIE_HRPWM2_8822B)
-#define BIT_GET_PCIE_HRPWM2_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_HRPWM2_8822B) & BIT_MASK_PCIE_HRPWM2_8822B)
-
-/* 2 REG_PCIE_HCPWM2_V1_8822B */
-
-#define BIT_SHIFT_PCIE_HCPWM2_8822B 0
-#define BIT_MASK_PCIE_HCPWM2_8822B 0xffff
-#define BIT_PCIE_HCPWM2_8822B(x) \
- (((x) & BIT_MASK_PCIE_HCPWM2_8822B) << BIT_SHIFT_PCIE_HCPWM2_8822B)
-#define BIT_GET_PCIE_HCPWM2_8822B(x) \
- (((x) >> BIT_SHIFT_PCIE_HCPWM2_8822B) & BIT_MASK_PCIE_HCPWM2_8822B)
-
-/* 2 REG_PCIE_H2C_MSG_V1_8822B */
-
-#define BIT_SHIFT_DRV2FW_INFO_8822B 0
-#define BIT_MASK_DRV2FW_INFO_8822B 0xffffffffL
-#define BIT_DRV2FW_INFO_8822B(x) \
- (((x) & BIT_MASK_DRV2FW_INFO_8822B) << BIT_SHIFT_DRV2FW_INFO_8822B)
-#define BIT_GET_DRV2FW_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_DRV2FW_INFO_8822B) & BIT_MASK_DRV2FW_INFO_8822B)
-
-/* 2 REG_PCIE_C2H_MSG_V1_8822B */
-
-#define BIT_SHIFT_HCI_PCIE_C2H_MSG_8822B 0
-#define BIT_MASK_HCI_PCIE_C2H_MSG_8822B 0xffffffffL
-#define BIT_HCI_PCIE_C2H_MSG_8822B(x) \
- (((x) & BIT_MASK_HCI_PCIE_C2H_MSG_8822B) \
- << BIT_SHIFT_HCI_PCIE_C2H_MSG_8822B)
-#define BIT_GET_HCI_PCIE_C2H_MSG_8822B(x) \
- (((x) >> BIT_SHIFT_HCI_PCIE_C2H_MSG_8822B) & \
- BIT_MASK_HCI_PCIE_C2H_MSG_8822B)
-
-/* 2 REG_DBI_WDATA_V1_8822B */
-
-#define BIT_SHIFT_DBI_WDATA_8822B 0
-#define BIT_MASK_DBI_WDATA_8822B 0xffffffffL
-#define BIT_DBI_WDATA_8822B(x) \
- (((x) & BIT_MASK_DBI_WDATA_8822B) << BIT_SHIFT_DBI_WDATA_8822B)
-#define BIT_GET_DBI_WDATA_8822B(x) \
- (((x) >> BIT_SHIFT_DBI_WDATA_8822B) & BIT_MASK_DBI_WDATA_8822B)
-
-/* 2 REG_DBI_RDATA_V1_8822B */
-
-#define BIT_SHIFT_DBI_RDATA_8822B 0
-#define BIT_MASK_DBI_RDATA_8822B 0xffffffffL
-#define BIT_DBI_RDATA_8822B(x) \
- (((x) & BIT_MASK_DBI_RDATA_8822B) << BIT_SHIFT_DBI_RDATA_8822B)
-#define BIT_GET_DBI_RDATA_8822B(x) \
- (((x) >> BIT_SHIFT_DBI_RDATA_8822B) & BIT_MASK_DBI_RDATA_8822B)
-
-/* 2 REG_DBI_FLAG_V1_8822B */
-#define BIT_EN_STUCK_DBG_8822B BIT(26)
-#define BIT_RX_STUCK_8822B BIT(25)
-#define BIT_TX_STUCK_8822B BIT(24)
-#define BIT_DBI_RFLAG_8822B BIT(17)
-#define BIT_DBI_WFLAG_8822B BIT(16)
-
-#define BIT_SHIFT_DBI_WREN_8822B 12
-#define BIT_MASK_DBI_WREN_8822B 0xf
-#define BIT_DBI_WREN_8822B(x) \
- (((x) & BIT_MASK_DBI_WREN_8822B) << BIT_SHIFT_DBI_WREN_8822B)
-#define BIT_GET_DBI_WREN_8822B(x) \
- (((x) >> BIT_SHIFT_DBI_WREN_8822B) & BIT_MASK_DBI_WREN_8822B)
-
-#define BIT_SHIFT_DBI_ADDR_8822B 0
-#define BIT_MASK_DBI_ADDR_8822B 0xfff
-#define BIT_DBI_ADDR_8822B(x) \
- (((x) & BIT_MASK_DBI_ADDR_8822B) << BIT_SHIFT_DBI_ADDR_8822B)
-#define BIT_GET_DBI_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_DBI_ADDR_8822B) & BIT_MASK_DBI_ADDR_8822B)
-
-/* 2 REG_MDIO_V1_8822B */
-
-#define BIT_SHIFT_MDIO_RDATA_8822B 16
-#define BIT_MASK_MDIO_RDATA_8822B 0xffff
-#define BIT_MDIO_RDATA_8822B(x) \
- (((x) & BIT_MASK_MDIO_RDATA_8822B) << BIT_SHIFT_MDIO_RDATA_8822B)
-#define BIT_GET_MDIO_RDATA_8822B(x) \
- (((x) >> BIT_SHIFT_MDIO_RDATA_8822B) & BIT_MASK_MDIO_RDATA_8822B)
-
-#define BIT_SHIFT_MDIO_WDATA_8822B 0
-#define BIT_MASK_MDIO_WDATA_8822B 0xffff
-#define BIT_MDIO_WDATA_8822B(x) \
- (((x) & BIT_MASK_MDIO_WDATA_8822B) << BIT_SHIFT_MDIO_WDATA_8822B)
-#define BIT_GET_MDIO_WDATA_8822B(x) \
- (((x) >> BIT_SHIFT_MDIO_WDATA_8822B) & BIT_MASK_MDIO_WDATA_8822B)
-
-/* 2 REG_PCIE_MIX_CFG_8822B */
-
-#define BIT_SHIFT_MDIO_PHY_ADDR_8822B 24
-#define BIT_MASK_MDIO_PHY_ADDR_8822B 0x1f
-#define BIT_MDIO_PHY_ADDR_8822B(x) \
- (((x) & BIT_MASK_MDIO_PHY_ADDR_8822B) << BIT_SHIFT_MDIO_PHY_ADDR_8822B)
-#define BIT_GET_MDIO_PHY_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_MDIO_PHY_ADDR_8822B) & BIT_MASK_MDIO_PHY_ADDR_8822B)
-
-#define BIT_SHIFT_WATCH_DOG_RECORD_V1_8822B 10
-#define BIT_MASK_WATCH_DOG_RECORD_V1_8822B 0x3fff
-#define BIT_WATCH_DOG_RECORD_V1_8822B(x) \
- (((x) & BIT_MASK_WATCH_DOG_RECORD_V1_8822B) \
- << BIT_SHIFT_WATCH_DOG_RECORD_V1_8822B)
-#define BIT_GET_WATCH_DOG_RECORD_V1_8822B(x) \
- (((x) >> BIT_SHIFT_WATCH_DOG_RECORD_V1_8822B) & \
- BIT_MASK_WATCH_DOG_RECORD_V1_8822B)
-
-#define BIT_R_IO_TIMEOUT_FLAG_V1_8822B BIT(9)
-#define BIT_EN_WATCH_DOG_8822B BIT(8)
-#define BIT_ECRC_EN_V1_8822B BIT(7)
-#define BIT_MDIO_RFLAG_V1_8822B BIT(6)
-#define BIT_MDIO_WFLAG_V1_8822B BIT(5)
-
-#define BIT_SHIFT_MDIO_REG_ADDR_V1_8822B 0
-#define BIT_MASK_MDIO_REG_ADDR_V1_8822B 0x1f
-#define BIT_MDIO_REG_ADDR_V1_8822B(x) \
- (((x) & BIT_MASK_MDIO_REG_ADDR_V1_8822B) \
- << BIT_SHIFT_MDIO_REG_ADDR_V1_8822B)
-#define BIT_GET_MDIO_REG_ADDR_V1_8822B(x) \
- (((x) >> BIT_SHIFT_MDIO_REG_ADDR_V1_8822B) & \
- BIT_MASK_MDIO_REG_ADDR_V1_8822B)
-
-/* 2 REG_HCI_MIX_CFG_8822B */
-#define BIT_HOST_GEN2_SUPPORT_8822B BIT(20)
-
-#define BIT_SHIFT_TXDMA_ERR_FLAG_8822B 16
-#define BIT_MASK_TXDMA_ERR_FLAG_8822B 0xf
-#define BIT_TXDMA_ERR_FLAG_8822B(x) \
- (((x) & BIT_MASK_TXDMA_ERR_FLAG_8822B) \
- << BIT_SHIFT_TXDMA_ERR_FLAG_8822B)
-#define BIT_GET_TXDMA_ERR_FLAG_8822B(x) \
- (((x) >> BIT_SHIFT_TXDMA_ERR_FLAG_8822B) & \
- BIT_MASK_TXDMA_ERR_FLAG_8822B)
-
-#define BIT_SHIFT_EARLY_MODE_SEL_8822B 12
-#define BIT_MASK_EARLY_MODE_SEL_8822B 0xf
-#define BIT_EARLY_MODE_SEL_8822B(x) \
- (((x) & BIT_MASK_EARLY_MODE_SEL_8822B) \
- << BIT_SHIFT_EARLY_MODE_SEL_8822B)
-#define BIT_GET_EARLY_MODE_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_EARLY_MODE_SEL_8822B) & \
- BIT_MASK_EARLY_MODE_SEL_8822B)
-
-#define BIT_EPHY_RX50_EN_8822B BIT(11)
-
-#define BIT_SHIFT_MSI_TIMEOUT_ID_V1_8822B 8
-#define BIT_MASK_MSI_TIMEOUT_ID_V1_8822B 0x7
-#define BIT_MSI_TIMEOUT_ID_V1_8822B(x) \
- (((x) & BIT_MASK_MSI_TIMEOUT_ID_V1_8822B) \
- << BIT_SHIFT_MSI_TIMEOUT_ID_V1_8822B)
-#define BIT_GET_MSI_TIMEOUT_ID_V1_8822B(x) \
- (((x) >> BIT_SHIFT_MSI_TIMEOUT_ID_V1_8822B) & \
- BIT_MASK_MSI_TIMEOUT_ID_V1_8822B)
-
-#define BIT_RADDR_RD_8822B BIT(7)
-#define BIT_EN_MUL_TAG_8822B BIT(6)
-#define BIT_EN_EARLY_MODE_8822B BIT(5)
-#define BIT_L0S_LINK_OFF_8822B BIT(4)
-#define BIT_ACT_LINK_OFF_8822B BIT(3)
-#define BIT_EN_SLOW_MAC_TX_8822B BIT(2)
-#define BIT_EN_SLOW_MAC_RX_8822B BIT(1)
-
-/* 2 REG_STC_INT_CS_8822B(PCIE STATE CHANGE INTERRUPT CONTROL AND STATUS) */
-#define BIT_STC_INT_EN_8822B BIT(31)
-
-#define BIT_SHIFT_STC_INT_FLAG_8822B 16
-#define BIT_MASK_STC_INT_FLAG_8822B 0xff
-#define BIT_STC_INT_FLAG_8822B(x) \
- (((x) & BIT_MASK_STC_INT_FLAG_8822B) << BIT_SHIFT_STC_INT_FLAG_8822B)
-#define BIT_GET_STC_INT_FLAG_8822B(x) \
- (((x) >> BIT_SHIFT_STC_INT_FLAG_8822B) & BIT_MASK_STC_INT_FLAG_8822B)
-
-#define BIT_SHIFT_STC_INT_IDX_8822B 8
-#define BIT_MASK_STC_INT_IDX_8822B 0x7
-#define BIT_STC_INT_IDX_8822B(x) \
- (((x) & BIT_MASK_STC_INT_IDX_8822B) << BIT_SHIFT_STC_INT_IDX_8822B)
-#define BIT_GET_STC_INT_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_STC_INT_IDX_8822B) & BIT_MASK_STC_INT_IDX_8822B)
-
-#define BIT_SHIFT_STC_INT_REALTIME_CS_8822B 0
-#define BIT_MASK_STC_INT_REALTIME_CS_8822B 0x3f
-#define BIT_STC_INT_REALTIME_CS_8822B(x) \
- (((x) & BIT_MASK_STC_INT_REALTIME_CS_8822B) \
- << BIT_SHIFT_STC_INT_REALTIME_CS_8822B)
-#define BIT_GET_STC_INT_REALTIME_CS_8822B(x) \
- (((x) >> BIT_SHIFT_STC_INT_REALTIME_CS_8822B) & \
- BIT_MASK_STC_INT_REALTIME_CS_8822B)
-
-/* 2 REG_ST_INT_CFG_8822B(PCIE STATE CHANGE INTERRUPT CONFIGURATION) */
-#define BIT_STC_INT_GRP_EN_8822B BIT(31)
-
-#define BIT_SHIFT_STC_INT_EXPECT_LS_8822B 8
-#define BIT_MASK_STC_INT_EXPECT_LS_8822B 0x3f
-#define BIT_STC_INT_EXPECT_LS_8822B(x) \
- (((x) & BIT_MASK_STC_INT_EXPECT_LS_8822B) \
- << BIT_SHIFT_STC_INT_EXPECT_LS_8822B)
-#define BIT_GET_STC_INT_EXPECT_LS_8822B(x) \
- (((x) >> BIT_SHIFT_STC_INT_EXPECT_LS_8822B) & \
- BIT_MASK_STC_INT_EXPECT_LS_8822B)
-
-#define BIT_SHIFT_STC_INT_EXPECT_CS_8822B 0
-#define BIT_MASK_STC_INT_EXPECT_CS_8822B 0x3f
-#define BIT_STC_INT_EXPECT_CS_8822B(x) \
- (((x) & BIT_MASK_STC_INT_EXPECT_CS_8822B) \
- << BIT_SHIFT_STC_INT_EXPECT_CS_8822B)
-#define BIT_GET_STC_INT_EXPECT_CS_8822B(x) \
- (((x) >> BIT_SHIFT_STC_INT_EXPECT_CS_8822B) & \
- BIT_MASK_STC_INT_EXPECT_CS_8822B)
-
-/* 2 REG_CMU_DLY_CTRL_8822B(PCIE PHY CLOCK MGT UNIT DELAY CONTROL ) */
-#define BIT_CMU_DLY_EN_8822B BIT(31)
-#define BIT_CMU_DLY_MODE_8822B BIT(30)
-
-#define BIT_SHIFT_CMU_DLY_PRE_DIV_8822B 0
-#define BIT_MASK_CMU_DLY_PRE_DIV_8822B 0xff
-#define BIT_CMU_DLY_PRE_DIV_8822B(x) \
- (((x) & BIT_MASK_CMU_DLY_PRE_DIV_8822B) \
- << BIT_SHIFT_CMU_DLY_PRE_DIV_8822B)
-#define BIT_GET_CMU_DLY_PRE_DIV_8822B(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_PRE_DIV_8822B) & \
- BIT_MASK_CMU_DLY_PRE_DIV_8822B)
-
-/* 2 REG_CMU_DLY_CFG_8822B(PCIE PHY CLOCK MGT UNIT DELAY CONFIGURATION ) */
-
-#define BIT_SHIFT_CMU_DLY_LTR_A2I_8822B 24
-#define BIT_MASK_CMU_DLY_LTR_A2I_8822B 0xff
-#define BIT_CMU_DLY_LTR_A2I_8822B(x) \
- (((x) & BIT_MASK_CMU_DLY_LTR_A2I_8822B) \
- << BIT_SHIFT_CMU_DLY_LTR_A2I_8822B)
-#define BIT_GET_CMU_DLY_LTR_A2I_8822B(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_LTR_A2I_8822B) & \
- BIT_MASK_CMU_DLY_LTR_A2I_8822B)
-
-#define BIT_SHIFT_CMU_DLY_LTR_I2A_8822B 16
-#define BIT_MASK_CMU_DLY_LTR_I2A_8822B 0xff
-#define BIT_CMU_DLY_LTR_I2A_8822B(x) \
- (((x) & BIT_MASK_CMU_DLY_LTR_I2A_8822B) \
- << BIT_SHIFT_CMU_DLY_LTR_I2A_8822B)
-#define BIT_GET_CMU_DLY_LTR_I2A_8822B(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_LTR_I2A_8822B) & \
- BIT_MASK_CMU_DLY_LTR_I2A_8822B)
-
-#define BIT_SHIFT_CMU_DLY_LTR_IDLE_8822B 8
-#define BIT_MASK_CMU_DLY_LTR_IDLE_8822B 0xff
-#define BIT_CMU_DLY_LTR_IDLE_8822B(x) \
- (((x) & BIT_MASK_CMU_DLY_LTR_IDLE_8822B) \
- << BIT_SHIFT_CMU_DLY_LTR_IDLE_8822B)
-#define BIT_GET_CMU_DLY_LTR_IDLE_8822B(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_LTR_IDLE_8822B) & \
- BIT_MASK_CMU_DLY_LTR_IDLE_8822B)
-
-#define BIT_SHIFT_CMU_DLY_LTR_ACT_8822B 0
-#define BIT_MASK_CMU_DLY_LTR_ACT_8822B 0xff
-#define BIT_CMU_DLY_LTR_ACT_8822B(x) \
- (((x) & BIT_MASK_CMU_DLY_LTR_ACT_8822B) \
- << BIT_SHIFT_CMU_DLY_LTR_ACT_8822B)
-#define BIT_GET_CMU_DLY_LTR_ACT_8822B(x) \
- (((x) >> BIT_SHIFT_CMU_DLY_LTR_ACT_8822B) & \
- BIT_MASK_CMU_DLY_LTR_ACT_8822B)
-
-/* 2 REG_H2CQ_TXBD_DESA_8822B */
-
-#define BIT_SHIFT_H2CQ_TXBD_DESA_8822B 0
-#define BIT_MASK_H2CQ_TXBD_DESA_8822B 0xffffffffffffffffL
-#define BIT_H2CQ_TXBD_DESA_8822B(x) \
- (((x) & BIT_MASK_H2CQ_TXBD_DESA_8822B) \
- << BIT_SHIFT_H2CQ_TXBD_DESA_8822B)
-#define BIT_GET_H2CQ_TXBD_DESA_8822B(x) \
- (((x) >> BIT_SHIFT_H2CQ_TXBD_DESA_8822B) & \
- BIT_MASK_H2CQ_TXBD_DESA_8822B)
-
-/* 2 REG_H2CQ_TXBD_NUM_8822B */
-#define BIT_PCIE_H2CQ_FLAG_8822B BIT(14)
-
-#define BIT_SHIFT_H2CQ_DESC_MODE_8822B 12
-#define BIT_MASK_H2CQ_DESC_MODE_8822B 0x3
-#define BIT_H2CQ_DESC_MODE_8822B(x) \
- (((x) & BIT_MASK_H2CQ_DESC_MODE_8822B) \
- << BIT_SHIFT_H2CQ_DESC_MODE_8822B)
-#define BIT_GET_H2CQ_DESC_MODE_8822B(x) \
- (((x) >> BIT_SHIFT_H2CQ_DESC_MODE_8822B) & \
- BIT_MASK_H2CQ_DESC_MODE_8822B)
-
-#define BIT_SHIFT_H2CQ_DESC_NUM_8822B 0
-#define BIT_MASK_H2CQ_DESC_NUM_8822B 0xfff
-#define BIT_H2CQ_DESC_NUM_8822B(x) \
- (((x) & BIT_MASK_H2CQ_DESC_NUM_8822B) << BIT_SHIFT_H2CQ_DESC_NUM_8822B)
-#define BIT_GET_H2CQ_DESC_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_H2CQ_DESC_NUM_8822B) & BIT_MASK_H2CQ_DESC_NUM_8822B)
-
-/* 2 REG_H2CQ_TXBD_IDX_8822B */
-
-#define BIT_SHIFT_H2CQ_HW_IDX_8822B 16
-#define BIT_MASK_H2CQ_HW_IDX_8822B 0xfff
-#define BIT_H2CQ_HW_IDX_8822B(x) \
- (((x) & BIT_MASK_H2CQ_HW_IDX_8822B) << BIT_SHIFT_H2CQ_HW_IDX_8822B)
-#define BIT_GET_H2CQ_HW_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_H2CQ_HW_IDX_8822B) & BIT_MASK_H2CQ_HW_IDX_8822B)
-
-#define BIT_SHIFT_H2CQ_HOST_IDX_8822B 0
-#define BIT_MASK_H2CQ_HOST_IDX_8822B 0xfff
-#define BIT_H2CQ_HOST_IDX_8822B(x) \
- (((x) & BIT_MASK_H2CQ_HOST_IDX_8822B) << BIT_SHIFT_H2CQ_HOST_IDX_8822B)
-#define BIT_GET_H2CQ_HOST_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_H2CQ_HOST_IDX_8822B) & BIT_MASK_H2CQ_HOST_IDX_8822B)
-
-/* 2 REG_H2CQ_CSR_8822B[31:0] (H2CQ CONTROL AND STATUS) */
-#define BIT_H2CQ_FULL_8822B BIT(31)
-#define BIT_CLR_H2CQ_HOST_IDX_8822B BIT(16)
-#define BIT_CLR_H2CQ_HW_IDX_8822B BIT(8)
-
-/* 2 REG_CHANGE_PCIE_SPEED_8822B */
-#define BIT_CHANGE_PCIE_SPEED_8822B BIT(18)
-
-#define BIT_SHIFT_GEN1_GEN2_8822B 16
-#define BIT_MASK_GEN1_GEN2_8822B 0x3
-#define BIT_GEN1_GEN2_8822B(x) \
- (((x) & BIT_MASK_GEN1_GEN2_8822B) << BIT_SHIFT_GEN1_GEN2_8822B)
-#define BIT_GET_GEN1_GEN2_8822B(x) \
- (((x) >> BIT_SHIFT_GEN1_GEN2_8822B) & BIT_MASK_GEN1_GEN2_8822B)
-
-#define BIT_SHIFT_AUTO_HANG_RELEASE_8822B 0
-#define BIT_MASK_AUTO_HANG_RELEASE_8822B 0x7
-#define BIT_AUTO_HANG_RELEASE_8822B(x) \
- (((x) & BIT_MASK_AUTO_HANG_RELEASE_8822B) \
- << BIT_SHIFT_AUTO_HANG_RELEASE_8822B)
-#define BIT_GET_AUTO_HANG_RELEASE_8822B(x) \
- (((x) >> BIT_SHIFT_AUTO_HANG_RELEASE_8822B) & \
- BIT_MASK_AUTO_HANG_RELEASE_8822B)
-
-/* 2 REG_OLD_DEHANG_8822B */
-#define BIT_OLD_DEHANG_8822B BIT(1)
-
-/* 2 REG_Q0_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_Q0_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_Q0_V1_8822B 0x7f
-#define BIT_QUEUEMACID_Q0_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q0_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_Q0_V1_8822B)
-#define BIT_GET_QUEUEMACID_Q0_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q0_V1_8822B) & \
- BIT_MASK_QUEUEMACID_Q0_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_Q0_V1_8822B 23
-#define BIT_MASK_QUEUEAC_Q0_V1_8822B 0x3
-#define BIT_QUEUEAC_Q0_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_Q0_V1_8822B) << BIT_SHIFT_QUEUEAC_Q0_V1_8822B)
-#define BIT_GET_QUEUEAC_Q0_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q0_V1_8822B) & BIT_MASK_QUEUEAC_Q0_V1_8822B)
-
-#define BIT_TIDEMPTY_Q0_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_Q0_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_Q0_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_Q0_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q0_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_Q0_V2_8822B)
-#define BIT_GET_TAIL_PKT_Q0_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q0_V2_8822B) & \
- BIT_MASK_TAIL_PKT_Q0_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_Q0_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_Q0_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_Q0_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q0_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_Q0_V1_8822B)
-#define BIT_GET_HEAD_PKT_Q0_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q0_V1_8822B) & \
- BIT_MASK_HEAD_PKT_Q0_V1_8822B)
-
-/* 2 REG_Q1_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_Q1_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_Q1_V1_8822B 0x7f
-#define BIT_QUEUEMACID_Q1_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q1_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_Q1_V1_8822B)
-#define BIT_GET_QUEUEMACID_Q1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q1_V1_8822B) & \
- BIT_MASK_QUEUEMACID_Q1_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_Q1_V1_8822B 23
-#define BIT_MASK_QUEUEAC_Q1_V1_8822B 0x3
-#define BIT_QUEUEAC_Q1_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_Q1_V1_8822B) << BIT_SHIFT_QUEUEAC_Q1_V1_8822B)
-#define BIT_GET_QUEUEAC_Q1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q1_V1_8822B) & BIT_MASK_QUEUEAC_Q1_V1_8822B)
-
-#define BIT_TIDEMPTY_Q1_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_Q1_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_Q1_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_Q1_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q1_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_Q1_V2_8822B)
-#define BIT_GET_TAIL_PKT_Q1_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q1_V2_8822B) & \
- BIT_MASK_TAIL_PKT_Q1_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_Q1_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_Q1_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_Q1_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q1_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_Q1_V1_8822B)
-#define BIT_GET_HEAD_PKT_Q1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q1_V1_8822B) & \
- BIT_MASK_HEAD_PKT_Q1_V1_8822B)
-
-/* 2 REG_Q2_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_Q2_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_Q2_V1_8822B 0x7f
-#define BIT_QUEUEMACID_Q2_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q2_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_Q2_V1_8822B)
-#define BIT_GET_QUEUEMACID_Q2_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q2_V1_8822B) & \
- BIT_MASK_QUEUEMACID_Q2_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_Q2_V1_8822B 23
-#define BIT_MASK_QUEUEAC_Q2_V1_8822B 0x3
-#define BIT_QUEUEAC_Q2_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_Q2_V1_8822B) << BIT_SHIFT_QUEUEAC_Q2_V1_8822B)
-#define BIT_GET_QUEUEAC_Q2_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q2_V1_8822B) & BIT_MASK_QUEUEAC_Q2_V1_8822B)
-
-#define BIT_TIDEMPTY_Q2_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_Q2_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_Q2_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_Q2_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q2_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_Q2_V2_8822B)
-#define BIT_GET_TAIL_PKT_Q2_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q2_V2_8822B) & \
- BIT_MASK_TAIL_PKT_Q2_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_Q2_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_Q2_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_Q2_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q2_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_Q2_V1_8822B)
-#define BIT_GET_HEAD_PKT_Q2_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q2_V1_8822B) & \
- BIT_MASK_HEAD_PKT_Q2_V1_8822B)
-
-/* 2 REG_Q3_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_Q3_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_Q3_V1_8822B 0x7f
-#define BIT_QUEUEMACID_Q3_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q3_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_Q3_V1_8822B)
-#define BIT_GET_QUEUEMACID_Q3_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q3_V1_8822B) & \
- BIT_MASK_QUEUEMACID_Q3_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_Q3_V1_8822B 23
-#define BIT_MASK_QUEUEAC_Q3_V1_8822B 0x3
-#define BIT_QUEUEAC_Q3_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_Q3_V1_8822B) << BIT_SHIFT_QUEUEAC_Q3_V1_8822B)
-#define BIT_GET_QUEUEAC_Q3_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q3_V1_8822B) & BIT_MASK_QUEUEAC_Q3_V1_8822B)
-
-#define BIT_TIDEMPTY_Q3_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_Q3_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_Q3_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_Q3_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q3_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_Q3_V2_8822B)
-#define BIT_GET_TAIL_PKT_Q3_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q3_V2_8822B) & \
- BIT_MASK_TAIL_PKT_Q3_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_Q3_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_Q3_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_Q3_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q3_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_Q3_V1_8822B)
-#define BIT_GET_HEAD_PKT_Q3_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q3_V1_8822B) & \
- BIT_MASK_HEAD_PKT_Q3_V1_8822B)
-
-/* 2 REG_MGQ_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_MGQ_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_MGQ_V1_8822B 0x7f
-#define BIT_QUEUEMACID_MGQ_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_MGQ_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_MGQ_V1_8822B)
-#define BIT_GET_QUEUEMACID_MGQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_MGQ_V1_8822B) & \
- BIT_MASK_QUEUEMACID_MGQ_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_MGQ_V1_8822B 23
-#define BIT_MASK_QUEUEAC_MGQ_V1_8822B 0x3
-#define BIT_QUEUEAC_MGQ_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_MGQ_V1_8822B) \
- << BIT_SHIFT_QUEUEAC_MGQ_V1_8822B)
-#define BIT_GET_QUEUEAC_MGQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_MGQ_V1_8822B) & \
- BIT_MASK_QUEUEAC_MGQ_V1_8822B)
-
-#define BIT_TIDEMPTY_MGQ_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_MGQ_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_MGQ_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_MGQ_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_MGQ_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_MGQ_V2_8822B)
-#define BIT_GET_TAIL_PKT_MGQ_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_MGQ_V2_8822B) & \
- BIT_MASK_TAIL_PKT_MGQ_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_MGQ_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_MGQ_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_MGQ_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_MGQ_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_MGQ_V1_8822B)
-#define BIT_GET_HEAD_PKT_MGQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_MGQ_V1_8822B) & \
- BIT_MASK_HEAD_PKT_MGQ_V1_8822B)
-
-/* 2 REG_HIQ_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_HIQ_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_HIQ_V1_8822B 0x7f
-#define BIT_QUEUEMACID_HIQ_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_HIQ_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_HIQ_V1_8822B)
-#define BIT_GET_QUEUEMACID_HIQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_HIQ_V1_8822B) & \
- BIT_MASK_QUEUEMACID_HIQ_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_HIQ_V1_8822B 23
-#define BIT_MASK_QUEUEAC_HIQ_V1_8822B 0x3
-#define BIT_QUEUEAC_HIQ_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_HIQ_V1_8822B) \
- << BIT_SHIFT_QUEUEAC_HIQ_V1_8822B)
-#define BIT_GET_QUEUEAC_HIQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_HIQ_V1_8822B) & \
- BIT_MASK_QUEUEAC_HIQ_V1_8822B)
-
-#define BIT_TIDEMPTY_HIQ_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_HIQ_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_HIQ_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_HIQ_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_HIQ_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_HIQ_V2_8822B)
-#define BIT_GET_TAIL_PKT_HIQ_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_HIQ_V2_8822B) & \
- BIT_MASK_TAIL_PKT_HIQ_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_HIQ_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_HIQ_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_HIQ_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_HIQ_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_HIQ_V1_8822B)
-#define BIT_GET_HEAD_PKT_HIQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_HIQ_V1_8822B) & \
- BIT_MASK_HEAD_PKT_HIQ_V1_8822B)
-
-/* 2 REG_BCNQ_INFO_8822B */
-
-#define BIT_SHIFT_BCNQ_HEAD_PG_V1_8822B 0
-#define BIT_MASK_BCNQ_HEAD_PG_V1_8822B 0xfff
-#define BIT_BCNQ_HEAD_PG_V1_8822B(x) \
- (((x) & BIT_MASK_BCNQ_HEAD_PG_V1_8822B) \
- << BIT_SHIFT_BCNQ_HEAD_PG_V1_8822B)
-#define BIT_GET_BCNQ_HEAD_PG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_BCNQ_HEAD_PG_V1_8822B) & \
- BIT_MASK_BCNQ_HEAD_PG_V1_8822B)
-
-/* 2 REG_TXPKT_EMPTY_8822B */
-#define BIT_BCNQ_EMPTY_8822B BIT(11)
-#define BIT_HQQ_EMPTY_8822B BIT(10)
-#define BIT_MQQ_EMPTY_8822B BIT(9)
-#define BIT_MGQ_CPU_EMPTY_8822B BIT(8)
-#define BIT_AC7Q_EMPTY_8822B BIT(7)
-#define BIT_AC6Q_EMPTY_8822B BIT(6)
-#define BIT_AC5Q_EMPTY_8822B BIT(5)
-#define BIT_AC4Q_EMPTY_8822B BIT(4)
-#define BIT_AC3Q_EMPTY_8822B BIT(3)
-#define BIT_AC2Q_EMPTY_8822B BIT(2)
-#define BIT_AC1Q_EMPTY_8822B BIT(1)
-#define BIT_AC0Q_EMPTY_8822B BIT(0)
-
-/* 2 REG_CPU_MGQ_INFO_8822B */
-#define BIT_BCN1_POLL_8822B BIT(30)
-#define BIT_CPUMGT_POLL_8822B BIT(29)
-#define BIT_BCN_POLL_8822B BIT(28)
-#define BIT_CPUMGQ_FW_NUM_V1_8822B BIT(12)
-
-#define BIT_SHIFT_FW_FREE_TAIL_V1_8822B 0
-#define BIT_MASK_FW_FREE_TAIL_V1_8822B 0xfff
-#define BIT_FW_FREE_TAIL_V1_8822B(x) \
- (((x) & BIT_MASK_FW_FREE_TAIL_V1_8822B) \
- << BIT_SHIFT_FW_FREE_TAIL_V1_8822B)
-#define BIT_GET_FW_FREE_TAIL_V1_8822B(x) \
- (((x) >> BIT_SHIFT_FW_FREE_TAIL_V1_8822B) & \
- BIT_MASK_FW_FREE_TAIL_V1_8822B)
-
-/* 2 REG_FWHW_TXQ_CTRL_8822B */
-#define BIT_RTS_LIMIT_IN_OFDM_8822B BIT(23)
-#define BIT_EN_BCNQ_DL_8822B BIT(22)
-#define BIT_EN_RD_RESP_NAV_BK_8822B BIT(21)
-#define BIT_EN_WR_FREE_TAIL_8822B BIT(20)
-
-#define BIT_SHIFT_EN_QUEUE_RPT_8822B 8
-#define BIT_MASK_EN_QUEUE_RPT_8822B 0xff
-#define BIT_EN_QUEUE_RPT_8822B(x) \
- (((x) & BIT_MASK_EN_QUEUE_RPT_8822B) << BIT_SHIFT_EN_QUEUE_RPT_8822B)
-#define BIT_GET_EN_QUEUE_RPT_8822B(x) \
- (((x) >> BIT_SHIFT_EN_QUEUE_RPT_8822B) & BIT_MASK_EN_QUEUE_RPT_8822B)
-
-#define BIT_EN_RTY_BK_8822B BIT(7)
-#define BIT_EN_USE_INI_RAT_8822B BIT(6)
-#define BIT_EN_RTS_NAV_BK_8822B BIT(5)
-#define BIT_DIS_SSN_CHECK_8822B BIT(4)
-#define BIT_MACID_MATCH_RTS_8822B BIT(3)
-#define BIT_EN_BCN_TRXRPT_V1_8822B BIT(2)
-#define BIT_EN_FTMACKRPT_8822B BIT(1)
-#define BIT_EN_FTMRPT_8822B BIT(0)
-
-/* 2 REG_DATAFB_SEL_8822B */
-#define BIT__R_EN_RTY_BK_COD_8822B BIT(2)
-
-#define BIT_SHIFT__R_DATA_FALLBACK_SEL_8822B 0
-#define BIT_MASK__R_DATA_FALLBACK_SEL_8822B 0x3
-#define BIT__R_DATA_FALLBACK_SEL_8822B(x) \
- (((x) & BIT_MASK__R_DATA_FALLBACK_SEL_8822B) \
- << BIT_SHIFT__R_DATA_FALLBACK_SEL_8822B)
-#define BIT_GET__R_DATA_FALLBACK_SEL_8822B(x) \
- (((x) >> BIT_SHIFT__R_DATA_FALLBACK_SEL_8822B) & \
- BIT_MASK__R_DATA_FALLBACK_SEL_8822B)
-
-/* 2 REG_BCNQ_BDNY_V1_8822B */
-
-#define BIT_SHIFT_BCNQ_PGBNDY_V1_8822B 0
-#define BIT_MASK_BCNQ_PGBNDY_V1_8822B 0xfff
-#define BIT_BCNQ_PGBNDY_V1_8822B(x) \
- (((x) & BIT_MASK_BCNQ_PGBNDY_V1_8822B) \
- << BIT_SHIFT_BCNQ_PGBNDY_V1_8822B)
-#define BIT_GET_BCNQ_PGBNDY_V1_8822B(x) \
- (((x) >> BIT_SHIFT_BCNQ_PGBNDY_V1_8822B) & \
- BIT_MASK_BCNQ_PGBNDY_V1_8822B)
-
-/* 2 REG_LIFETIME_EN_8822B */
-#define BIT_BT_INT_CPU_8822B BIT(7)
-#define BIT_BT_INT_PTA_8822B BIT(6)
-#define BIT_EN_CTRL_RTYBIT_8822B BIT(4)
-#define BIT_LIFETIME_BK_EN_8822B BIT(3)
-#define BIT_LIFETIME_BE_EN_8822B BIT(2)
-#define BIT_LIFETIME_VI_EN_8822B BIT(1)
-#define BIT_LIFETIME_VO_EN_8822B BIT(0)
-
-/* 2 REG_SPEC_SIFS_8822B */
-
-#define BIT_SHIFT_SPEC_SIFS_OFDM_PTCL_8822B 8
-#define BIT_MASK_SPEC_SIFS_OFDM_PTCL_8822B 0xff
-#define BIT_SPEC_SIFS_OFDM_PTCL_8822B(x) \
- (((x) & BIT_MASK_SPEC_SIFS_OFDM_PTCL_8822B) \
- << BIT_SHIFT_SPEC_SIFS_OFDM_PTCL_8822B)
-#define BIT_GET_SPEC_SIFS_OFDM_PTCL_8822B(x) \
- (((x) >> BIT_SHIFT_SPEC_SIFS_OFDM_PTCL_8822B) & \
- BIT_MASK_SPEC_SIFS_OFDM_PTCL_8822B)
-
-#define BIT_SHIFT_SPEC_SIFS_CCK_PTCL_8822B 0
-#define BIT_MASK_SPEC_SIFS_CCK_PTCL_8822B 0xff
-#define BIT_SPEC_SIFS_CCK_PTCL_8822B(x) \
- (((x) & BIT_MASK_SPEC_SIFS_CCK_PTCL_8822B) \
- << BIT_SHIFT_SPEC_SIFS_CCK_PTCL_8822B)
-#define BIT_GET_SPEC_SIFS_CCK_PTCL_8822B(x) \
- (((x) >> BIT_SHIFT_SPEC_SIFS_CCK_PTCL_8822B) & \
- BIT_MASK_SPEC_SIFS_CCK_PTCL_8822B)
-
-/* 2 REG_RETRY_LIMIT_8822B */
-
-#define BIT_SHIFT_SRL_8822B 8
-#define BIT_MASK_SRL_8822B 0x3f
-#define BIT_SRL_8822B(x) (((x) & BIT_MASK_SRL_8822B) << BIT_SHIFT_SRL_8822B)
-#define BIT_GET_SRL_8822B(x) (((x) >> BIT_SHIFT_SRL_8822B) & BIT_MASK_SRL_8822B)
-
-#define BIT_SHIFT_LRL_8822B 0
-#define BIT_MASK_LRL_8822B 0x3f
-#define BIT_LRL_8822B(x) (((x) & BIT_MASK_LRL_8822B) << BIT_SHIFT_LRL_8822B)
-#define BIT_GET_LRL_8822B(x) (((x) >> BIT_SHIFT_LRL_8822B) & BIT_MASK_LRL_8822B)
-
-/* 2 REG_TXBF_CTRL_8822B */
-#define BIT_R_ENABLE_NDPA_8822B BIT(31)
-#define BIT_USE_NDPA_PARAMETER_8822B BIT(30)
-#define BIT_R_PROP_TXBF_8822B BIT(29)
-#define BIT_R_EN_NDPA_INT_8822B BIT(28)
-#define BIT_R_TXBF1_80M_8822B BIT(27)
-#define BIT_R_TXBF1_40M_8822B BIT(26)
-#define BIT_R_TXBF1_20M_8822B BIT(25)
-
-#define BIT_SHIFT_R_TXBF1_AID_8822B 16
-#define BIT_MASK_R_TXBF1_AID_8822B 0x1ff
-#define BIT_R_TXBF1_AID_8822B(x) \
- (((x) & BIT_MASK_R_TXBF1_AID_8822B) << BIT_SHIFT_R_TXBF1_AID_8822B)
-#define BIT_GET_R_TXBF1_AID_8822B(x) \
- (((x) >> BIT_SHIFT_R_TXBF1_AID_8822B) & BIT_MASK_R_TXBF1_AID_8822B)
-
-#define BIT_DIS_NDP_BFEN_8822B BIT(15)
-#define BIT_R_TXBCN_NOBLOCK_NDP_8822B BIT(14)
-#define BIT_R_TXBF0_80M_8822B BIT(11)
-#define BIT_R_TXBF0_40M_8822B BIT(10)
-#define BIT_R_TXBF0_20M_8822B BIT(9)
-
-#define BIT_SHIFT_R_TXBF0_AID_8822B 0
-#define BIT_MASK_R_TXBF0_AID_8822B 0x1ff
-#define BIT_R_TXBF0_AID_8822B(x) \
- (((x) & BIT_MASK_R_TXBF0_AID_8822B) << BIT_SHIFT_R_TXBF0_AID_8822B)
-#define BIT_GET_R_TXBF0_AID_8822B(x) \
- (((x) >> BIT_SHIFT_R_TXBF0_AID_8822B) & BIT_MASK_R_TXBF0_AID_8822B)
-
-/* 2 REG_DARFRC_8822B */
-
-#define BIT_SHIFT_DARF_RC8_8822B (56 & CPU_OPT_WIDTH)
-#define BIT_MASK_DARF_RC8_8822B 0x1f
-#define BIT_DARF_RC8_8822B(x) \
- (((x) & BIT_MASK_DARF_RC8_8822B) << BIT_SHIFT_DARF_RC8_8822B)
-#define BIT_GET_DARF_RC8_8822B(x) \
- (((x) >> BIT_SHIFT_DARF_RC8_8822B) & BIT_MASK_DARF_RC8_8822B)
-
-#define BIT_SHIFT_DARF_RC7_8822B (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_DARF_RC7_8822B 0x1f
-#define BIT_DARF_RC7_8822B(x) \
- (((x) & BIT_MASK_DARF_RC7_8822B) << BIT_SHIFT_DARF_RC7_8822B)
-#define BIT_GET_DARF_RC7_8822B(x) \
- (((x) >> BIT_SHIFT_DARF_RC7_8822B) & BIT_MASK_DARF_RC7_8822B)
-
-#define BIT_SHIFT_DARF_RC6_8822B (40 & CPU_OPT_WIDTH)
-#define BIT_MASK_DARF_RC6_8822B 0x1f
-#define BIT_DARF_RC6_8822B(x) \
- (((x) & BIT_MASK_DARF_RC6_8822B) << BIT_SHIFT_DARF_RC6_8822B)
-#define BIT_GET_DARF_RC6_8822B(x) \
- (((x) >> BIT_SHIFT_DARF_RC6_8822B) & BIT_MASK_DARF_RC6_8822B)
-
-#define BIT_SHIFT_DARF_RC5_8822B (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_DARF_RC5_8822B 0x1f
-#define BIT_DARF_RC5_8822B(x) \
- (((x) & BIT_MASK_DARF_RC5_8822B) << BIT_SHIFT_DARF_RC5_8822B)
-#define BIT_GET_DARF_RC5_8822B(x) \
- (((x) >> BIT_SHIFT_DARF_RC5_8822B) & BIT_MASK_DARF_RC5_8822B)
-
-#define BIT_SHIFT_DARF_RC4_8822B 24
-#define BIT_MASK_DARF_RC4_8822B 0x1f
-#define BIT_DARF_RC4_8822B(x) \
- (((x) & BIT_MASK_DARF_RC4_8822B) << BIT_SHIFT_DARF_RC4_8822B)
-#define BIT_GET_DARF_RC4_8822B(x) \
- (((x) >> BIT_SHIFT_DARF_RC4_8822B) & BIT_MASK_DARF_RC4_8822B)
-
-#define BIT_SHIFT_DARF_RC3_8822B 16
-#define BIT_MASK_DARF_RC3_8822B 0x1f
-#define BIT_DARF_RC3_8822B(x) \
- (((x) & BIT_MASK_DARF_RC3_8822B) << BIT_SHIFT_DARF_RC3_8822B)
-#define BIT_GET_DARF_RC3_8822B(x) \
- (((x) >> BIT_SHIFT_DARF_RC3_8822B) & BIT_MASK_DARF_RC3_8822B)
-
-#define BIT_SHIFT_DARF_RC2_8822B 8
-#define BIT_MASK_DARF_RC2_8822B 0x1f
-#define BIT_DARF_RC2_8822B(x) \
- (((x) & BIT_MASK_DARF_RC2_8822B) << BIT_SHIFT_DARF_RC2_8822B)
-#define BIT_GET_DARF_RC2_8822B(x) \
- (((x) >> BIT_SHIFT_DARF_RC2_8822B) & BIT_MASK_DARF_RC2_8822B)
-
-#define BIT_SHIFT_DARF_RC1_8822B 0
-#define BIT_MASK_DARF_RC1_8822B 0x1f
-#define BIT_DARF_RC1_8822B(x) \
- (((x) & BIT_MASK_DARF_RC1_8822B) << BIT_SHIFT_DARF_RC1_8822B)
-#define BIT_GET_DARF_RC1_8822B(x) \
- (((x) >> BIT_SHIFT_DARF_RC1_8822B) & BIT_MASK_DARF_RC1_8822B)
-
-/* 2 REG_RARFRC_8822B */
-
-#define BIT_SHIFT_RARF_RC8_8822B (56 & CPU_OPT_WIDTH)
-#define BIT_MASK_RARF_RC8_8822B 0x1f
-#define BIT_RARF_RC8_8822B(x) \
- (((x) & BIT_MASK_RARF_RC8_8822B) << BIT_SHIFT_RARF_RC8_8822B)
-#define BIT_GET_RARF_RC8_8822B(x) \
- (((x) >> BIT_SHIFT_RARF_RC8_8822B) & BIT_MASK_RARF_RC8_8822B)
-
-#define BIT_SHIFT_RARF_RC7_8822B (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_RARF_RC7_8822B 0x1f
-#define BIT_RARF_RC7_8822B(x) \
- (((x) & BIT_MASK_RARF_RC7_8822B) << BIT_SHIFT_RARF_RC7_8822B)
-#define BIT_GET_RARF_RC7_8822B(x) \
- (((x) >> BIT_SHIFT_RARF_RC7_8822B) & BIT_MASK_RARF_RC7_8822B)
-
-#define BIT_SHIFT_RARF_RC6_8822B (40 & CPU_OPT_WIDTH)
-#define BIT_MASK_RARF_RC6_8822B 0x1f
-#define BIT_RARF_RC6_8822B(x) \
- (((x) & BIT_MASK_RARF_RC6_8822B) << BIT_SHIFT_RARF_RC6_8822B)
-#define BIT_GET_RARF_RC6_8822B(x) \
- (((x) >> BIT_SHIFT_RARF_RC6_8822B) & BIT_MASK_RARF_RC6_8822B)
-
-#define BIT_SHIFT_RARF_RC5_8822B (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_RARF_RC5_8822B 0x1f
-#define BIT_RARF_RC5_8822B(x) \
- (((x) & BIT_MASK_RARF_RC5_8822B) << BIT_SHIFT_RARF_RC5_8822B)
-#define BIT_GET_RARF_RC5_8822B(x) \
- (((x) >> BIT_SHIFT_RARF_RC5_8822B) & BIT_MASK_RARF_RC5_8822B)
-
-#define BIT_SHIFT_RARF_RC4_8822B 24
-#define BIT_MASK_RARF_RC4_8822B 0x1f
-#define BIT_RARF_RC4_8822B(x) \
- (((x) & BIT_MASK_RARF_RC4_8822B) << BIT_SHIFT_RARF_RC4_8822B)
-#define BIT_GET_RARF_RC4_8822B(x) \
- (((x) >> BIT_SHIFT_RARF_RC4_8822B) & BIT_MASK_RARF_RC4_8822B)
-
-#define BIT_SHIFT_RARF_RC3_8822B 16
-#define BIT_MASK_RARF_RC3_8822B 0x1f
-#define BIT_RARF_RC3_8822B(x) \
- (((x) & BIT_MASK_RARF_RC3_8822B) << BIT_SHIFT_RARF_RC3_8822B)
-#define BIT_GET_RARF_RC3_8822B(x) \
- (((x) >> BIT_SHIFT_RARF_RC3_8822B) & BIT_MASK_RARF_RC3_8822B)
-
-#define BIT_SHIFT_RARF_RC2_8822B 8
-#define BIT_MASK_RARF_RC2_8822B 0x1f
-#define BIT_RARF_RC2_8822B(x) \
- (((x) & BIT_MASK_RARF_RC2_8822B) << BIT_SHIFT_RARF_RC2_8822B)
-#define BIT_GET_RARF_RC2_8822B(x) \
- (((x) >> BIT_SHIFT_RARF_RC2_8822B) & BIT_MASK_RARF_RC2_8822B)
-
-#define BIT_SHIFT_RARF_RC1_8822B 0
-#define BIT_MASK_RARF_RC1_8822B 0x1f
-#define BIT_RARF_RC1_8822B(x) \
- (((x) & BIT_MASK_RARF_RC1_8822B) << BIT_SHIFT_RARF_RC1_8822B)
-#define BIT_GET_RARF_RC1_8822B(x) \
- (((x) >> BIT_SHIFT_RARF_RC1_8822B) & BIT_MASK_RARF_RC1_8822B)
-
-/* 2 REG_RRSR_8822B */
-
-#define BIT_SHIFT_RRSR_RSC_8822B 21
-#define BIT_MASK_RRSR_RSC_8822B 0x3
-#define BIT_RRSR_RSC_8822B(x) \
- (((x) & BIT_MASK_RRSR_RSC_8822B) << BIT_SHIFT_RRSR_RSC_8822B)
-#define BIT_GET_RRSR_RSC_8822B(x) \
- (((x) >> BIT_SHIFT_RRSR_RSC_8822B) & BIT_MASK_RRSR_RSC_8822B)
-
-#define BIT_RRSR_BW_8822B BIT(20)
-
-#define BIT_SHIFT_RRSC_BITMAP_8822B 0
-#define BIT_MASK_RRSC_BITMAP_8822B 0xfffff
-#define BIT_RRSC_BITMAP_8822B(x) \
- (((x) & BIT_MASK_RRSC_BITMAP_8822B) << BIT_SHIFT_RRSC_BITMAP_8822B)
-#define BIT_GET_RRSC_BITMAP_8822B(x) \
- (((x) >> BIT_SHIFT_RRSC_BITMAP_8822B) & BIT_MASK_RRSC_BITMAP_8822B)
-
-/* 2 REG_ARFR0_8822B */
-
-#define BIT_SHIFT_ARFR0_V1_8822B 0
-#define BIT_MASK_ARFR0_V1_8822B 0xffffffffffffffffL
-#define BIT_ARFR0_V1_8822B(x) \
- (((x) & BIT_MASK_ARFR0_V1_8822B) << BIT_SHIFT_ARFR0_V1_8822B)
-#define BIT_GET_ARFR0_V1_8822B(x) \
- (((x) >> BIT_SHIFT_ARFR0_V1_8822B) & BIT_MASK_ARFR0_V1_8822B)
-
-/* 2 REG_ARFR1_V1_8822B */
-
-#define BIT_SHIFT_ARFR1_V1_8822B 0
-#define BIT_MASK_ARFR1_V1_8822B 0xffffffffffffffffL
-#define BIT_ARFR1_V1_8822B(x) \
- (((x) & BIT_MASK_ARFR1_V1_8822B) << BIT_SHIFT_ARFR1_V1_8822B)
-#define BIT_GET_ARFR1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_ARFR1_V1_8822B) & BIT_MASK_ARFR1_V1_8822B)
-
-/* 2 REG_CCK_CHECK_8822B */
-#define BIT_CHECK_CCK_EN_8822B BIT(7)
-#define BIT_EN_BCN_PKT_REL_8822B BIT(6)
-#define BIT_BCN_PORT_SEL_8822B BIT(5)
-#define BIT_MOREDATA_BYPASS_8822B BIT(4)
-#define BIT_EN_CLR_CMD_REL_BCN_PKT_8822B BIT(3)
-#define BIT_R_EN_SET_MOREDATA_8822B BIT(2)
-#define BIT__R_DIS_CLEAR_MACID_RELEASE_8822B BIT(1)
-#define BIT__R_MACID_RELEASE_EN_8822B BIT(0)
-
-/* 2 REG_AMPDU_MAX_TIME_V1_8822B */
-
-#define BIT_SHIFT_AMPDU_MAX_TIME_8822B 0
-#define BIT_MASK_AMPDU_MAX_TIME_8822B 0xff
-#define BIT_AMPDU_MAX_TIME_8822B(x) \
- (((x) & BIT_MASK_AMPDU_MAX_TIME_8822B) \
- << BIT_SHIFT_AMPDU_MAX_TIME_8822B)
-#define BIT_GET_AMPDU_MAX_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_AMPDU_MAX_TIME_8822B) & \
- BIT_MASK_AMPDU_MAX_TIME_8822B)
-
-/* 2 REG_BCNQ1_BDNY_V1_8822B */
-
-#define BIT_SHIFT_BCNQ1_PGBNDY_V1_8822B 0
-#define BIT_MASK_BCNQ1_PGBNDY_V1_8822B 0xfff
-#define BIT_BCNQ1_PGBNDY_V1_8822B(x) \
- (((x) & BIT_MASK_BCNQ1_PGBNDY_V1_8822B) \
- << BIT_SHIFT_BCNQ1_PGBNDY_V1_8822B)
-#define BIT_GET_BCNQ1_PGBNDY_V1_8822B(x) \
- (((x) >> BIT_SHIFT_BCNQ1_PGBNDY_V1_8822B) & \
- BIT_MASK_BCNQ1_PGBNDY_V1_8822B)
-
-/* 2 REG_AMPDU_MAX_LENGTH_8822B */
-
-#define BIT_SHIFT_AMPDU_MAX_LENGTH_8822B 0
-#define BIT_MASK_AMPDU_MAX_LENGTH_8822B 0xffffffffL
-#define BIT_AMPDU_MAX_LENGTH_8822B(x) \
- (((x) & BIT_MASK_AMPDU_MAX_LENGTH_8822B) \
- << BIT_SHIFT_AMPDU_MAX_LENGTH_8822B)
-#define BIT_GET_AMPDU_MAX_LENGTH_8822B(x) \
- (((x) >> BIT_SHIFT_AMPDU_MAX_LENGTH_8822B) & \
- BIT_MASK_AMPDU_MAX_LENGTH_8822B)
-
-/* 2 REG_ACQ_STOP_8822B */
-#define BIT_AC7Q_STOP_8822B BIT(7)
-#define BIT_AC6Q_STOP_8822B BIT(6)
-#define BIT_AC5Q_STOP_8822B BIT(5)
-#define BIT_AC4Q_STOP_8822B BIT(4)
-#define BIT_AC3Q_STOP_8822B BIT(3)
-#define BIT_AC2Q_STOP_8822B BIT(2)
-#define BIT_AC1Q_STOP_8822B BIT(1)
-#define BIT_AC0Q_STOP_8822B BIT(0)
-
-/* 2 REG_NDPA_RATE_8822B */
-
-#define BIT_SHIFT_R_NDPA_RATE_V1_8822B 0
-#define BIT_MASK_R_NDPA_RATE_V1_8822B 0xff
-#define BIT_R_NDPA_RATE_V1_8822B(x) \
- (((x) & BIT_MASK_R_NDPA_RATE_V1_8822B) \
- << BIT_SHIFT_R_NDPA_RATE_V1_8822B)
-#define BIT_GET_R_NDPA_RATE_V1_8822B(x) \
- (((x) >> BIT_SHIFT_R_NDPA_RATE_V1_8822B) & \
- BIT_MASK_R_NDPA_RATE_V1_8822B)
-
-/* 2 REG_TX_HANG_CTRL_8822B */
-#define BIT_R_EN_GNT_BT_AWAKE_8822B BIT(3)
-#define BIT_EN_EOF_V1_8822B BIT(2)
-#define BIT_DIS_OQT_BLOCK_8822B BIT(1)
-#define BIT_SEARCH_QUEUE_EN_8822B BIT(0)
-
-/* 2 REG_NDPA_OPT_CTRL_8822B */
-#define BIT_R_DIS_MACID_RELEASE_RTY_8822B BIT(5)
-
-#define BIT_SHIFT_BW_SIGTA_8822B 3
-#define BIT_MASK_BW_SIGTA_8822B 0x3
-#define BIT_BW_SIGTA_8822B(x) \
- (((x) & BIT_MASK_BW_SIGTA_8822B) << BIT_SHIFT_BW_SIGTA_8822B)
-#define BIT_GET_BW_SIGTA_8822B(x) \
- (((x) >> BIT_SHIFT_BW_SIGTA_8822B) & BIT_MASK_BW_SIGTA_8822B)
-
-#define BIT_EN_BAR_SIGTA_8822B BIT(2)
-
-#define BIT_SHIFT_R_NDPA_BW_8822B 0
-#define BIT_MASK_R_NDPA_BW_8822B 0x3
-#define BIT_R_NDPA_BW_8822B(x) \
- (((x) & BIT_MASK_R_NDPA_BW_8822B) << BIT_SHIFT_R_NDPA_BW_8822B)
-#define BIT_GET_R_NDPA_BW_8822B(x) \
- (((x) >> BIT_SHIFT_R_NDPA_BW_8822B) & BIT_MASK_R_NDPA_BW_8822B)
-
-/* 2 REG_RD_RESP_PKT_TH_8822B */
-
-#define BIT_SHIFT_RD_RESP_PKT_TH_V1_8822B 0
-#define BIT_MASK_RD_RESP_PKT_TH_V1_8822B 0x3f
-#define BIT_RD_RESP_PKT_TH_V1_8822B(x) \
- (((x) & BIT_MASK_RD_RESP_PKT_TH_V1_8822B) \
- << BIT_SHIFT_RD_RESP_PKT_TH_V1_8822B)
-#define BIT_GET_RD_RESP_PKT_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_RD_RESP_PKT_TH_V1_8822B) & \
- BIT_MASK_RD_RESP_PKT_TH_V1_8822B)
-
-/* 2 REG_CMDQ_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_CMDQ_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_CMDQ_V1_8822B 0x7f
-#define BIT_QUEUEMACID_CMDQ_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_CMDQ_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_CMDQ_V1_8822B)
-#define BIT_GET_QUEUEMACID_CMDQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_CMDQ_V1_8822B) & \
- BIT_MASK_QUEUEMACID_CMDQ_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_CMDQ_V1_8822B 23
-#define BIT_MASK_QUEUEAC_CMDQ_V1_8822B 0x3
-#define BIT_QUEUEAC_CMDQ_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_CMDQ_V1_8822B) \
- << BIT_SHIFT_QUEUEAC_CMDQ_V1_8822B)
-#define BIT_GET_QUEUEAC_CMDQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_CMDQ_V1_8822B) & \
- BIT_MASK_QUEUEAC_CMDQ_V1_8822B)
-
-#define BIT_TIDEMPTY_CMDQ_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_CMDQ_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_CMDQ_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_CMDQ_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_CMDQ_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_CMDQ_V2_8822B)
-#define BIT_GET_TAIL_PKT_CMDQ_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_CMDQ_V2_8822B) & \
- BIT_MASK_TAIL_PKT_CMDQ_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_CMDQ_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_CMDQ_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_CMDQ_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_CMDQ_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_CMDQ_V1_8822B)
-#define BIT_GET_HEAD_PKT_CMDQ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_CMDQ_V1_8822B) & \
- BIT_MASK_HEAD_PKT_CMDQ_V1_8822B)
-
-/* 2 REG_Q4_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_Q4_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_Q4_V1_8822B 0x7f
-#define BIT_QUEUEMACID_Q4_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q4_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_Q4_V1_8822B)
-#define BIT_GET_QUEUEMACID_Q4_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q4_V1_8822B) & \
- BIT_MASK_QUEUEMACID_Q4_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_Q4_V1_8822B 23
-#define BIT_MASK_QUEUEAC_Q4_V1_8822B 0x3
-#define BIT_QUEUEAC_Q4_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_Q4_V1_8822B) << BIT_SHIFT_QUEUEAC_Q4_V1_8822B)
-#define BIT_GET_QUEUEAC_Q4_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q4_V1_8822B) & BIT_MASK_QUEUEAC_Q4_V1_8822B)
-
-#define BIT_TIDEMPTY_Q4_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_Q4_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_Q4_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_Q4_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q4_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_Q4_V2_8822B)
-#define BIT_GET_TAIL_PKT_Q4_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q4_V2_8822B) & \
- BIT_MASK_TAIL_PKT_Q4_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_Q4_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_Q4_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_Q4_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q4_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_Q4_V1_8822B)
-#define BIT_GET_HEAD_PKT_Q4_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q4_V1_8822B) & \
- BIT_MASK_HEAD_PKT_Q4_V1_8822B)
-
-/* 2 REG_Q5_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_Q5_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_Q5_V1_8822B 0x7f
-#define BIT_QUEUEMACID_Q5_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q5_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_Q5_V1_8822B)
-#define BIT_GET_QUEUEMACID_Q5_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q5_V1_8822B) & \
- BIT_MASK_QUEUEMACID_Q5_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_Q5_V1_8822B 23
-#define BIT_MASK_QUEUEAC_Q5_V1_8822B 0x3
-#define BIT_QUEUEAC_Q5_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_Q5_V1_8822B) << BIT_SHIFT_QUEUEAC_Q5_V1_8822B)
-#define BIT_GET_QUEUEAC_Q5_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q5_V1_8822B) & BIT_MASK_QUEUEAC_Q5_V1_8822B)
-
-#define BIT_TIDEMPTY_Q5_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_Q5_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_Q5_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_Q5_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q5_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_Q5_V2_8822B)
-#define BIT_GET_TAIL_PKT_Q5_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q5_V2_8822B) & \
- BIT_MASK_TAIL_PKT_Q5_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_Q5_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_Q5_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_Q5_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q5_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_Q5_V1_8822B)
-#define BIT_GET_HEAD_PKT_Q5_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q5_V1_8822B) & \
- BIT_MASK_HEAD_PKT_Q5_V1_8822B)
-
-/* 2 REG_Q6_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_Q6_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_Q6_V1_8822B 0x7f
-#define BIT_QUEUEMACID_Q6_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q6_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_Q6_V1_8822B)
-#define BIT_GET_QUEUEMACID_Q6_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q6_V1_8822B) & \
- BIT_MASK_QUEUEMACID_Q6_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_Q6_V1_8822B 23
-#define BIT_MASK_QUEUEAC_Q6_V1_8822B 0x3
-#define BIT_QUEUEAC_Q6_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_Q6_V1_8822B) << BIT_SHIFT_QUEUEAC_Q6_V1_8822B)
-#define BIT_GET_QUEUEAC_Q6_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q6_V1_8822B) & BIT_MASK_QUEUEAC_Q6_V1_8822B)
-
-#define BIT_TIDEMPTY_Q6_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_Q6_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_Q6_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_Q6_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q6_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_Q6_V2_8822B)
-#define BIT_GET_TAIL_PKT_Q6_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q6_V2_8822B) & \
- BIT_MASK_TAIL_PKT_Q6_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_Q6_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_Q6_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_Q6_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q6_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_Q6_V1_8822B)
-#define BIT_GET_HEAD_PKT_Q6_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q6_V1_8822B) & \
- BIT_MASK_HEAD_PKT_Q6_V1_8822B)
-
-/* 2 REG_Q7_INFO_8822B */
-
-#define BIT_SHIFT_QUEUEMACID_Q7_V1_8822B 25
-#define BIT_MASK_QUEUEMACID_Q7_V1_8822B 0x7f
-#define BIT_QUEUEMACID_Q7_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEMACID_Q7_V1_8822B) \
- << BIT_SHIFT_QUEUEMACID_Q7_V1_8822B)
-#define BIT_GET_QUEUEMACID_Q7_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEMACID_Q7_V1_8822B) & \
- BIT_MASK_QUEUEMACID_Q7_V1_8822B)
-
-#define BIT_SHIFT_QUEUEAC_Q7_V1_8822B 23
-#define BIT_MASK_QUEUEAC_Q7_V1_8822B 0x3
-#define BIT_QUEUEAC_Q7_V1_8822B(x) \
- (((x) & BIT_MASK_QUEUEAC_Q7_V1_8822B) << BIT_SHIFT_QUEUEAC_Q7_V1_8822B)
-#define BIT_GET_QUEUEAC_Q7_V1_8822B(x) \
- (((x) >> BIT_SHIFT_QUEUEAC_Q7_V1_8822B) & BIT_MASK_QUEUEAC_Q7_V1_8822B)
-
-#define BIT_TIDEMPTY_Q7_V1_8822B BIT(22)
-
-#define BIT_SHIFT_TAIL_PKT_Q7_V2_8822B 11
-#define BIT_MASK_TAIL_PKT_Q7_V2_8822B 0x7ff
-#define BIT_TAIL_PKT_Q7_V2_8822B(x) \
- (((x) & BIT_MASK_TAIL_PKT_Q7_V2_8822B) \
- << BIT_SHIFT_TAIL_PKT_Q7_V2_8822B)
-#define BIT_GET_TAIL_PKT_Q7_V2_8822B(x) \
- (((x) >> BIT_SHIFT_TAIL_PKT_Q7_V2_8822B) & \
- BIT_MASK_TAIL_PKT_Q7_V2_8822B)
-
-#define BIT_SHIFT_HEAD_PKT_Q7_V1_8822B 0
-#define BIT_MASK_HEAD_PKT_Q7_V1_8822B 0x7ff
-#define BIT_HEAD_PKT_Q7_V1_8822B(x) \
- (((x) & BIT_MASK_HEAD_PKT_Q7_V1_8822B) \
- << BIT_SHIFT_HEAD_PKT_Q7_V1_8822B)
-#define BIT_GET_HEAD_PKT_Q7_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HEAD_PKT_Q7_V1_8822B) & \
- BIT_MASK_HEAD_PKT_Q7_V1_8822B)
-
-/* 2 REG_WMAC_LBK_BUF_HD_V1_8822B */
-
-#define BIT_SHIFT_WMAC_LBK_BUF_HEAD_V1_8822B 0
-#define BIT_MASK_WMAC_LBK_BUF_HEAD_V1_8822B 0xfff
-#define BIT_WMAC_LBK_BUF_HEAD_V1_8822B(x) \
- (((x) & BIT_MASK_WMAC_LBK_BUF_HEAD_V1_8822B) \
- << BIT_SHIFT_WMAC_LBK_BUF_HEAD_V1_8822B)
-#define BIT_GET_WMAC_LBK_BUF_HEAD_V1_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_LBK_BUF_HEAD_V1_8822B) & \
- BIT_MASK_WMAC_LBK_BUF_HEAD_V1_8822B)
-
-/* 2 REG_MGQ_BDNY_V1_8822B */
-
-#define BIT_SHIFT_MGQ_PGBNDY_V1_8822B 0
-#define BIT_MASK_MGQ_PGBNDY_V1_8822B 0xfff
-#define BIT_MGQ_PGBNDY_V1_8822B(x) \
- (((x) & BIT_MASK_MGQ_PGBNDY_V1_8822B) << BIT_SHIFT_MGQ_PGBNDY_V1_8822B)
-#define BIT_GET_MGQ_PGBNDY_V1_8822B(x) \
- (((x) >> BIT_SHIFT_MGQ_PGBNDY_V1_8822B) & BIT_MASK_MGQ_PGBNDY_V1_8822B)
-
-/* 2 REG_TXRPT_CTRL_8822B */
-
-#define BIT_SHIFT_TRXRPT_TIMER_TH_8822B 24
-#define BIT_MASK_TRXRPT_TIMER_TH_8822B 0xff
-#define BIT_TRXRPT_TIMER_TH_8822B(x) \
- (((x) & BIT_MASK_TRXRPT_TIMER_TH_8822B) \
- << BIT_SHIFT_TRXRPT_TIMER_TH_8822B)
-#define BIT_GET_TRXRPT_TIMER_TH_8822B(x) \
- (((x) >> BIT_SHIFT_TRXRPT_TIMER_TH_8822B) & \
- BIT_MASK_TRXRPT_TIMER_TH_8822B)
-
-#define BIT_SHIFT_TRXRPT_LEN_TH_8822B 16
-#define BIT_MASK_TRXRPT_LEN_TH_8822B 0xff
-#define BIT_TRXRPT_LEN_TH_8822B(x) \
- (((x) & BIT_MASK_TRXRPT_LEN_TH_8822B) << BIT_SHIFT_TRXRPT_LEN_TH_8822B)
-#define BIT_GET_TRXRPT_LEN_TH_8822B(x) \
- (((x) >> BIT_SHIFT_TRXRPT_LEN_TH_8822B) & BIT_MASK_TRXRPT_LEN_TH_8822B)
-
-#define BIT_SHIFT_TRXRPT_READ_PTR_8822B 8
-#define BIT_MASK_TRXRPT_READ_PTR_8822B 0xff
-#define BIT_TRXRPT_READ_PTR_8822B(x) \
- (((x) & BIT_MASK_TRXRPT_READ_PTR_8822B) \
- << BIT_SHIFT_TRXRPT_READ_PTR_8822B)
-#define BIT_GET_TRXRPT_READ_PTR_8822B(x) \
- (((x) >> BIT_SHIFT_TRXRPT_READ_PTR_8822B) & \
- BIT_MASK_TRXRPT_READ_PTR_8822B)
-
-#define BIT_SHIFT_TRXRPT_WRITE_PTR_8822B 0
-#define BIT_MASK_TRXRPT_WRITE_PTR_8822B 0xff
-#define BIT_TRXRPT_WRITE_PTR_8822B(x) \
- (((x) & BIT_MASK_TRXRPT_WRITE_PTR_8822B) \
- << BIT_SHIFT_TRXRPT_WRITE_PTR_8822B)
-#define BIT_GET_TRXRPT_WRITE_PTR_8822B(x) \
- (((x) >> BIT_SHIFT_TRXRPT_WRITE_PTR_8822B) & \
- BIT_MASK_TRXRPT_WRITE_PTR_8822B)
-
-/* 2 REG_INIRTS_RATE_SEL_8822B */
-#define BIT_LEAG_RTS_BW_DUP_8822B BIT(5)
-
-/* 2 REG_BASIC_CFEND_RATE_8822B */
-
-#define BIT_SHIFT_BASIC_CFEND_RATE_8822B 0
-#define BIT_MASK_BASIC_CFEND_RATE_8822B 0x1f
-#define BIT_BASIC_CFEND_RATE_8822B(x) \
- (((x) & BIT_MASK_BASIC_CFEND_RATE_8822B) \
- << BIT_SHIFT_BASIC_CFEND_RATE_8822B)
-#define BIT_GET_BASIC_CFEND_RATE_8822B(x) \
- (((x) >> BIT_SHIFT_BASIC_CFEND_RATE_8822B) & \
- BIT_MASK_BASIC_CFEND_RATE_8822B)
-
-/* 2 REG_STBC_CFEND_RATE_8822B */
-
-#define BIT_SHIFT_STBC_CFEND_RATE_8822B 0
-#define BIT_MASK_STBC_CFEND_RATE_8822B 0x1f
-#define BIT_STBC_CFEND_RATE_8822B(x) \
- (((x) & BIT_MASK_STBC_CFEND_RATE_8822B) \
- << BIT_SHIFT_STBC_CFEND_RATE_8822B)
-#define BIT_GET_STBC_CFEND_RATE_8822B(x) \
- (((x) >> BIT_SHIFT_STBC_CFEND_RATE_8822B) & \
- BIT_MASK_STBC_CFEND_RATE_8822B)
-
-/* 2 REG_DATA_SC_8822B */
-
-#define BIT_SHIFT_TXSC_40M_8822B 4
-#define BIT_MASK_TXSC_40M_8822B 0xf
-#define BIT_TXSC_40M_8822B(x) \
- (((x) & BIT_MASK_TXSC_40M_8822B) << BIT_SHIFT_TXSC_40M_8822B)
-#define BIT_GET_TXSC_40M_8822B(x) \
- (((x) >> BIT_SHIFT_TXSC_40M_8822B) & BIT_MASK_TXSC_40M_8822B)
-
-#define BIT_SHIFT_TXSC_20M_8822B 0
-#define BIT_MASK_TXSC_20M_8822B 0xf
-#define BIT_TXSC_20M_8822B(x) \
- (((x) & BIT_MASK_TXSC_20M_8822B) << BIT_SHIFT_TXSC_20M_8822B)
-#define BIT_GET_TXSC_20M_8822B(x) \
- (((x) >> BIT_SHIFT_TXSC_20M_8822B) & BIT_MASK_TXSC_20M_8822B)
-
-/* 2 REG_MACID_SLEEP3_8822B */
-
-#define BIT_SHIFT_MACID127_96_PKTSLEEP_8822B 0
-#define BIT_MASK_MACID127_96_PKTSLEEP_8822B 0xffffffffL
-#define BIT_MACID127_96_PKTSLEEP_8822B(x) \
- (((x) & BIT_MASK_MACID127_96_PKTSLEEP_8822B) \
- << BIT_SHIFT_MACID127_96_PKTSLEEP_8822B)
-#define BIT_GET_MACID127_96_PKTSLEEP_8822B(x) \
- (((x) >> BIT_SHIFT_MACID127_96_PKTSLEEP_8822B) & \
- BIT_MASK_MACID127_96_PKTSLEEP_8822B)
-
-/* 2 REG_MACID_SLEEP1_8822B */
-
-#define BIT_SHIFT_MACID63_32_PKTSLEEP_8822B 0
-#define BIT_MASK_MACID63_32_PKTSLEEP_8822B 0xffffffffL
-#define BIT_MACID63_32_PKTSLEEP_8822B(x) \
- (((x) & BIT_MASK_MACID63_32_PKTSLEEP_8822B) \
- << BIT_SHIFT_MACID63_32_PKTSLEEP_8822B)
-#define BIT_GET_MACID63_32_PKTSLEEP_8822B(x) \
- (((x) >> BIT_SHIFT_MACID63_32_PKTSLEEP_8822B) & \
- BIT_MASK_MACID63_32_PKTSLEEP_8822B)
-
-/* 2 REG_ARFR2_V1_8822B */
-
-#define BIT_SHIFT_ARFR2_V1_8822B 0
-#define BIT_MASK_ARFR2_V1_8822B 0xffffffffffffffffL
-#define BIT_ARFR2_V1_8822B(x) \
- (((x) & BIT_MASK_ARFR2_V1_8822B) << BIT_SHIFT_ARFR2_V1_8822B)
-#define BIT_GET_ARFR2_V1_8822B(x) \
- (((x) >> BIT_SHIFT_ARFR2_V1_8822B) & BIT_MASK_ARFR2_V1_8822B)
-
-/* 2 REG_ARFR3_V1_8822B */
-
-#define BIT_SHIFT_ARFR3_V1_8822B 0
-#define BIT_MASK_ARFR3_V1_8822B 0xffffffffffffffffL
-#define BIT_ARFR3_V1_8822B(x) \
- (((x) & BIT_MASK_ARFR3_V1_8822B) << BIT_SHIFT_ARFR3_V1_8822B)
-#define BIT_GET_ARFR3_V1_8822B(x) \
- (((x) >> BIT_SHIFT_ARFR3_V1_8822B) & BIT_MASK_ARFR3_V1_8822B)
-
-/* 2 REG_ARFR4_8822B */
-
-#define BIT_SHIFT_ARFR4_8822B 0
-#define BIT_MASK_ARFR4_8822B 0xffffffffffffffffL
-#define BIT_ARFR4_8822B(x) \
- (((x) & BIT_MASK_ARFR4_8822B) << BIT_SHIFT_ARFR4_8822B)
-#define BIT_GET_ARFR4_8822B(x) \
- (((x) >> BIT_SHIFT_ARFR4_8822B) & BIT_MASK_ARFR4_8822B)
-
-/* 2 REG_ARFR5_8822B */
-
-#define BIT_SHIFT_ARFR5_8822B 0
-#define BIT_MASK_ARFR5_8822B 0xffffffffffffffffL
-#define BIT_ARFR5_8822B(x) \
- (((x) & BIT_MASK_ARFR5_8822B) << BIT_SHIFT_ARFR5_8822B)
-#define BIT_GET_ARFR5_8822B(x) \
- (((x) >> BIT_SHIFT_ARFR5_8822B) & BIT_MASK_ARFR5_8822B)
-
-/* 2 REG_TXRPT_START_OFFSET_8822B */
-
-#define BIT_SHIFT_MACID_MURATE_OFFSET_8822B 24
-#define BIT_MASK_MACID_MURATE_OFFSET_8822B 0xff
-#define BIT_MACID_MURATE_OFFSET_8822B(x) \
- (((x) & BIT_MASK_MACID_MURATE_OFFSET_8822B) \
- << BIT_SHIFT_MACID_MURATE_OFFSET_8822B)
-#define BIT_GET_MACID_MURATE_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_MACID_MURATE_OFFSET_8822B) & \
- BIT_MASK_MACID_MURATE_OFFSET_8822B)
-
-#define BIT_RPTFIFO_SIZE_OPT_8822B BIT(16)
-
-#define BIT_SHIFT_MACID_CTRL_OFFSET_8822B 8
-#define BIT_MASK_MACID_CTRL_OFFSET_8822B 0xff
-#define BIT_MACID_CTRL_OFFSET_8822B(x) \
- (((x) & BIT_MASK_MACID_CTRL_OFFSET_8822B) \
- << BIT_SHIFT_MACID_CTRL_OFFSET_8822B)
-#define BIT_GET_MACID_CTRL_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_MACID_CTRL_OFFSET_8822B) & \
- BIT_MASK_MACID_CTRL_OFFSET_8822B)
-
-#define BIT_SHIFT_AMPDU_TXRPT_OFFSET_8822B 0
-#define BIT_MASK_AMPDU_TXRPT_OFFSET_8822B 0xff
-#define BIT_AMPDU_TXRPT_OFFSET_8822B(x) \
- (((x) & BIT_MASK_AMPDU_TXRPT_OFFSET_8822B) \
- << BIT_SHIFT_AMPDU_TXRPT_OFFSET_8822B)
-#define BIT_GET_AMPDU_TXRPT_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_AMPDU_TXRPT_OFFSET_8822B) & \
- BIT_MASK_AMPDU_TXRPT_OFFSET_8822B)
-
-/* 2 REG_POWER_STAGE1_8822B */
-#define BIT_PTA_WL_PRI_MASK_CPU_MGQ_8822B BIT(31)
-#define BIT_PTA_WL_PRI_MASK_BCNQ_8822B BIT(30)
-#define BIT_PTA_WL_PRI_MASK_HIQ_8822B BIT(29)
-#define BIT_PTA_WL_PRI_MASK_MGQ_8822B BIT(28)
-#define BIT_PTA_WL_PRI_MASK_BK_8822B BIT(27)
-#define BIT_PTA_WL_PRI_MASK_BE_8822B BIT(26)
-#define BIT_PTA_WL_PRI_MASK_VI_8822B BIT(25)
-#define BIT_PTA_WL_PRI_MASK_VO_8822B BIT(24)
-
-#define BIT_SHIFT_POWER_STAGE1_8822B 0
-#define BIT_MASK_POWER_STAGE1_8822B 0xffffff
-#define BIT_POWER_STAGE1_8822B(x) \
- (((x) & BIT_MASK_POWER_STAGE1_8822B) << BIT_SHIFT_POWER_STAGE1_8822B)
-#define BIT_GET_POWER_STAGE1_8822B(x) \
- (((x) >> BIT_SHIFT_POWER_STAGE1_8822B) & BIT_MASK_POWER_STAGE1_8822B)
-
-/* 2 REG_POWER_STAGE2_8822B */
-#define BIT__R_CTRL_PKT_POW_ADJ_8822B BIT(24)
-
-#define BIT_SHIFT_POWER_STAGE2_8822B 0
-#define BIT_MASK_POWER_STAGE2_8822B 0xffffff
-#define BIT_POWER_STAGE2_8822B(x) \
- (((x) & BIT_MASK_POWER_STAGE2_8822B) << BIT_SHIFT_POWER_STAGE2_8822B)
-#define BIT_GET_POWER_STAGE2_8822B(x) \
- (((x) >> BIT_SHIFT_POWER_STAGE2_8822B) & BIT_MASK_POWER_STAGE2_8822B)
-
-/* 2 REG_SW_AMPDU_BURST_MODE_CTRL_8822B */
-
-#define BIT_SHIFT_PAD_NUM_THRES_8822B 24
-#define BIT_MASK_PAD_NUM_THRES_8822B 0x3f
-#define BIT_PAD_NUM_THRES_8822B(x) \
- (((x) & BIT_MASK_PAD_NUM_THRES_8822B) << BIT_SHIFT_PAD_NUM_THRES_8822B)
-#define BIT_GET_PAD_NUM_THRES_8822B(x) \
- (((x) >> BIT_SHIFT_PAD_NUM_THRES_8822B) & BIT_MASK_PAD_NUM_THRES_8822B)
-
-#define BIT_R_DMA_THIS_QUEUE_BK_8822B BIT(23)
-#define BIT_R_DMA_THIS_QUEUE_BE_8822B BIT(22)
-#define BIT_R_DMA_THIS_QUEUE_VI_8822B BIT(21)
-#define BIT_R_DMA_THIS_QUEUE_VO_8822B BIT(20)
-
-#define BIT_SHIFT_R_TOTAL_LEN_TH_8822B 8
-#define BIT_MASK_R_TOTAL_LEN_TH_8822B 0xfff
-#define BIT_R_TOTAL_LEN_TH_8822B(x) \
- (((x) & BIT_MASK_R_TOTAL_LEN_TH_8822B) \
- << BIT_SHIFT_R_TOTAL_LEN_TH_8822B)
-#define BIT_GET_R_TOTAL_LEN_TH_8822B(x) \
- (((x) >> BIT_SHIFT_R_TOTAL_LEN_TH_8822B) & \
- BIT_MASK_R_TOTAL_LEN_TH_8822B)
-
-#define BIT_EN_NEW_EARLY_8822B BIT(7)
-#define BIT_PRE_TX_CMD_8822B BIT(6)
-
-#define BIT_SHIFT_NUM_SCL_EN_8822B 4
-#define BIT_MASK_NUM_SCL_EN_8822B 0x3
-#define BIT_NUM_SCL_EN_8822B(x) \
- (((x) & BIT_MASK_NUM_SCL_EN_8822B) << BIT_SHIFT_NUM_SCL_EN_8822B)
-#define BIT_GET_NUM_SCL_EN_8822B(x) \
- (((x) >> BIT_SHIFT_NUM_SCL_EN_8822B) & BIT_MASK_NUM_SCL_EN_8822B)
-
-#define BIT_BK_EN_8822B BIT(3)
-#define BIT_BE_EN_8822B BIT(2)
-#define BIT_VI_EN_8822B BIT(1)
-#define BIT_VO_EN_8822B BIT(0)
-
-/* 2 REG_PKT_LIFE_TIME_8822B */
-
-#define BIT_SHIFT_PKT_LIFTIME_BEBK_8822B 16
-#define BIT_MASK_PKT_LIFTIME_BEBK_8822B 0xffff
-#define BIT_PKT_LIFTIME_BEBK_8822B(x) \
- (((x) & BIT_MASK_PKT_LIFTIME_BEBK_8822B) \
- << BIT_SHIFT_PKT_LIFTIME_BEBK_8822B)
-#define BIT_GET_PKT_LIFTIME_BEBK_8822B(x) \
- (((x) >> BIT_SHIFT_PKT_LIFTIME_BEBK_8822B) & \
- BIT_MASK_PKT_LIFTIME_BEBK_8822B)
-
-#define BIT_SHIFT_PKT_LIFTIME_VOVI_8822B 0
-#define BIT_MASK_PKT_LIFTIME_VOVI_8822B 0xffff
-#define BIT_PKT_LIFTIME_VOVI_8822B(x) \
- (((x) & BIT_MASK_PKT_LIFTIME_VOVI_8822B) \
- << BIT_SHIFT_PKT_LIFTIME_VOVI_8822B)
-#define BIT_GET_PKT_LIFTIME_VOVI_8822B(x) \
- (((x) >> BIT_SHIFT_PKT_LIFTIME_VOVI_8822B) & \
- BIT_MASK_PKT_LIFTIME_VOVI_8822B)
-
-/* 2 REG_STBC_SETTING_8822B */
-
-#define BIT_SHIFT_CDEND_TXTIME_L_8822B 4
-#define BIT_MASK_CDEND_TXTIME_L_8822B 0xf
-#define BIT_CDEND_TXTIME_L_8822B(x) \
- (((x) & BIT_MASK_CDEND_TXTIME_L_8822B) \
- << BIT_SHIFT_CDEND_TXTIME_L_8822B)
-#define BIT_GET_CDEND_TXTIME_L_8822B(x) \
- (((x) >> BIT_SHIFT_CDEND_TXTIME_L_8822B) & \
- BIT_MASK_CDEND_TXTIME_L_8822B)
-
-#define BIT_SHIFT_NESS_8822B 2
-#define BIT_MASK_NESS_8822B 0x3
-#define BIT_NESS_8822B(x) (((x) & BIT_MASK_NESS_8822B) << BIT_SHIFT_NESS_8822B)
-#define BIT_GET_NESS_8822B(x) \
- (((x) >> BIT_SHIFT_NESS_8822B) & BIT_MASK_NESS_8822B)
-
-#define BIT_SHIFT_STBC_CFEND_8822B 0
-#define BIT_MASK_STBC_CFEND_8822B 0x3
-#define BIT_STBC_CFEND_8822B(x) \
- (((x) & BIT_MASK_STBC_CFEND_8822B) << BIT_SHIFT_STBC_CFEND_8822B)
-#define BIT_GET_STBC_CFEND_8822B(x) \
- (((x) >> BIT_SHIFT_STBC_CFEND_8822B) & BIT_MASK_STBC_CFEND_8822B)
-
-/* 2 REG_STBC_SETTING2_8822B */
-
-#define BIT_SHIFT_CDEND_TXTIME_H_8822B 0
-#define BIT_MASK_CDEND_TXTIME_H_8822B 0x1f
-#define BIT_CDEND_TXTIME_H_8822B(x) \
- (((x) & BIT_MASK_CDEND_TXTIME_H_8822B) \
- << BIT_SHIFT_CDEND_TXTIME_H_8822B)
-#define BIT_GET_CDEND_TXTIME_H_8822B(x) \
- (((x) >> BIT_SHIFT_CDEND_TXTIME_H_8822B) & \
- BIT_MASK_CDEND_TXTIME_H_8822B)
-
-/* 2 REG_QUEUE_CTRL_8822B */
-#define BIT_PTA_EDCCA_EN_8822B BIT(5)
-#define BIT_PTA_WL_TX_EN_8822B BIT(4)
-#define BIT_R_USE_DATA_BW_8822B BIT(3)
-#define BIT_TRI_PKT_INT_MODE1_8822B BIT(2)
-#define BIT_TRI_PKT_INT_MODE0_8822B BIT(1)
-#define BIT_ACQ_MODE_SEL_8822B BIT(0)
-
-/* 2 REG_SINGLE_AMPDU_CTRL_8822B */
-#define BIT_EN_SINGLE_APMDU_8822B BIT(7)
-
-/* 2 REG_PROT_MODE_CTRL_8822B */
-
-#define BIT_SHIFT_RTS_MAX_AGG_NUM_8822B 24
-#define BIT_MASK_RTS_MAX_AGG_NUM_8822B 0x3f
-#define BIT_RTS_MAX_AGG_NUM_8822B(x) \
- (((x) & BIT_MASK_RTS_MAX_AGG_NUM_8822B) \
- << BIT_SHIFT_RTS_MAX_AGG_NUM_8822B)
-#define BIT_GET_RTS_MAX_AGG_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_RTS_MAX_AGG_NUM_8822B) & \
- BIT_MASK_RTS_MAX_AGG_NUM_8822B)
-
-#define BIT_SHIFT_MAX_AGG_NUM_8822B 16
-#define BIT_MASK_MAX_AGG_NUM_8822B 0x3f
-#define BIT_MAX_AGG_NUM_8822B(x) \
- (((x) & BIT_MASK_MAX_AGG_NUM_8822B) << BIT_SHIFT_MAX_AGG_NUM_8822B)
-#define BIT_GET_MAX_AGG_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_MAX_AGG_NUM_8822B) & BIT_MASK_MAX_AGG_NUM_8822B)
-
-#define BIT_SHIFT_RTS_TXTIME_TH_8822B 8
-#define BIT_MASK_RTS_TXTIME_TH_8822B 0xff
-#define BIT_RTS_TXTIME_TH_8822B(x) \
- (((x) & BIT_MASK_RTS_TXTIME_TH_8822B) << BIT_SHIFT_RTS_TXTIME_TH_8822B)
-#define BIT_GET_RTS_TXTIME_TH_8822B(x) \
- (((x) >> BIT_SHIFT_RTS_TXTIME_TH_8822B) & BIT_MASK_RTS_TXTIME_TH_8822B)
-
-#define BIT_SHIFT_RTS_LEN_TH_8822B 0
-#define BIT_MASK_RTS_LEN_TH_8822B 0xff
-#define BIT_RTS_LEN_TH_8822B(x) \
- (((x) & BIT_MASK_RTS_LEN_TH_8822B) << BIT_SHIFT_RTS_LEN_TH_8822B)
-#define BIT_GET_RTS_LEN_TH_8822B(x) \
- (((x) >> BIT_SHIFT_RTS_LEN_TH_8822B) & BIT_MASK_RTS_LEN_TH_8822B)
-
-/* 2 REG_BAR_MODE_CTRL_8822B */
-
-#define BIT_SHIFT_BAR_RTY_LMT_8822B 16
-#define BIT_MASK_BAR_RTY_LMT_8822B 0x3
-#define BIT_BAR_RTY_LMT_8822B(x) \
- (((x) & BIT_MASK_BAR_RTY_LMT_8822B) << BIT_SHIFT_BAR_RTY_LMT_8822B)
-#define BIT_GET_BAR_RTY_LMT_8822B(x) \
- (((x) >> BIT_SHIFT_BAR_RTY_LMT_8822B) & BIT_MASK_BAR_RTY_LMT_8822B)
-
-#define BIT_SHIFT_BAR_PKT_TXTIME_TH_8822B 8
-#define BIT_MASK_BAR_PKT_TXTIME_TH_8822B 0xff
-#define BIT_BAR_PKT_TXTIME_TH_8822B(x) \
- (((x) & BIT_MASK_BAR_PKT_TXTIME_TH_8822B) \
- << BIT_SHIFT_BAR_PKT_TXTIME_TH_8822B)
-#define BIT_GET_BAR_PKT_TXTIME_TH_8822B(x) \
- (((x) >> BIT_SHIFT_BAR_PKT_TXTIME_TH_8822B) & \
- BIT_MASK_BAR_PKT_TXTIME_TH_8822B)
-
-#define BIT_BAR_EN_V1_8822B BIT(6)
-
-#define BIT_SHIFT_BAR_PKTNUM_TH_V1_8822B 0
-#define BIT_MASK_BAR_PKTNUM_TH_V1_8822B 0x3f
-#define BIT_BAR_PKTNUM_TH_V1_8822B(x) \
- (((x) & BIT_MASK_BAR_PKTNUM_TH_V1_8822B) \
- << BIT_SHIFT_BAR_PKTNUM_TH_V1_8822B)
-#define BIT_GET_BAR_PKTNUM_TH_V1_8822B(x) \
- (((x) >> BIT_SHIFT_BAR_PKTNUM_TH_V1_8822B) & \
- BIT_MASK_BAR_PKTNUM_TH_V1_8822B)
-
-/* 2 REG_RA_TRY_RATE_AGG_LMT_8822B */
-
-#define BIT_SHIFT_RA_TRY_RATE_AGG_LMT_V1_8822B 0
-#define BIT_MASK_RA_TRY_RATE_AGG_LMT_V1_8822B 0x3f
-#define BIT_RA_TRY_RATE_AGG_LMT_V1_8822B(x) \
- (((x) & BIT_MASK_RA_TRY_RATE_AGG_LMT_V1_8822B) \
- << BIT_SHIFT_RA_TRY_RATE_AGG_LMT_V1_8822B)
-#define BIT_GET_RA_TRY_RATE_AGG_LMT_V1_8822B(x) \
- (((x) >> BIT_SHIFT_RA_TRY_RATE_AGG_LMT_V1_8822B) & \
- BIT_MASK_RA_TRY_RATE_AGG_LMT_V1_8822B)
-
-/* 2 REG_MACID_SLEEP2_8822B */
-
-#define BIT_SHIFT_MACID95_64PKTSLEEP_8822B 0
-#define BIT_MASK_MACID95_64PKTSLEEP_8822B 0xffffffffL
-#define BIT_MACID95_64PKTSLEEP_8822B(x) \
- (((x) & BIT_MASK_MACID95_64PKTSLEEP_8822B) \
- << BIT_SHIFT_MACID95_64PKTSLEEP_8822B)
-#define BIT_GET_MACID95_64PKTSLEEP_8822B(x) \
- (((x) >> BIT_SHIFT_MACID95_64PKTSLEEP_8822B) & \
- BIT_MASK_MACID95_64PKTSLEEP_8822B)
-
-/* 2 REG_MACID_SLEEP_8822B */
-
-#define BIT_SHIFT_MACID31_0_PKTSLEEP_8822B 0
-#define BIT_MASK_MACID31_0_PKTSLEEP_8822B 0xffffffffL
-#define BIT_MACID31_0_PKTSLEEP_8822B(x) \
- (((x) & BIT_MASK_MACID31_0_PKTSLEEP_8822B) \
- << BIT_SHIFT_MACID31_0_PKTSLEEP_8822B)
-#define BIT_GET_MACID31_0_PKTSLEEP_8822B(x) \
- (((x) >> BIT_SHIFT_MACID31_0_PKTSLEEP_8822B) & \
- BIT_MASK_MACID31_0_PKTSLEEP_8822B)
-
-/* 2 REG_HW_SEQ0_8822B */
-
-#define BIT_SHIFT_HW_SSN_SEQ0_8822B 0
-#define BIT_MASK_HW_SSN_SEQ0_8822B 0xfff
-#define BIT_HW_SSN_SEQ0_8822B(x) \
- (((x) & BIT_MASK_HW_SSN_SEQ0_8822B) << BIT_SHIFT_HW_SSN_SEQ0_8822B)
-#define BIT_GET_HW_SSN_SEQ0_8822B(x) \
- (((x) >> BIT_SHIFT_HW_SSN_SEQ0_8822B) & BIT_MASK_HW_SSN_SEQ0_8822B)
-
-/* 2 REG_HW_SEQ1_8822B */
-
-#define BIT_SHIFT_HW_SSN_SEQ1_8822B 0
-#define BIT_MASK_HW_SSN_SEQ1_8822B 0xfff
-#define BIT_HW_SSN_SEQ1_8822B(x) \
- (((x) & BIT_MASK_HW_SSN_SEQ1_8822B) << BIT_SHIFT_HW_SSN_SEQ1_8822B)
-#define BIT_GET_HW_SSN_SEQ1_8822B(x) \
- (((x) >> BIT_SHIFT_HW_SSN_SEQ1_8822B) & BIT_MASK_HW_SSN_SEQ1_8822B)
-
-/* 2 REG_HW_SEQ2_8822B */
-
-#define BIT_SHIFT_HW_SSN_SEQ2_8822B 0
-#define BIT_MASK_HW_SSN_SEQ2_8822B 0xfff
-#define BIT_HW_SSN_SEQ2_8822B(x) \
- (((x) & BIT_MASK_HW_SSN_SEQ2_8822B) << BIT_SHIFT_HW_SSN_SEQ2_8822B)
-#define BIT_GET_HW_SSN_SEQ2_8822B(x) \
- (((x) >> BIT_SHIFT_HW_SSN_SEQ2_8822B) & BIT_MASK_HW_SSN_SEQ2_8822B)
-
-/* 2 REG_HW_SEQ3_8822B */
-
-#define BIT_SHIFT_HW_SSN_SEQ3_8822B 0
-#define BIT_MASK_HW_SSN_SEQ3_8822B 0xfff
-#define BIT_HW_SSN_SEQ3_8822B(x) \
- (((x) & BIT_MASK_HW_SSN_SEQ3_8822B) << BIT_SHIFT_HW_SSN_SEQ3_8822B)
-#define BIT_GET_HW_SSN_SEQ3_8822B(x) \
- (((x) >> BIT_SHIFT_HW_SSN_SEQ3_8822B) & BIT_MASK_HW_SSN_SEQ3_8822B)
-
-/* 2 REG_NULL_PKT_STATUS_V1_8822B */
-
-#define BIT_SHIFT_PTCL_TOTAL_PG_V2_8822B 2
-#define BIT_MASK_PTCL_TOTAL_PG_V2_8822B 0x3fff
-#define BIT_PTCL_TOTAL_PG_V2_8822B(x) \
- (((x) & BIT_MASK_PTCL_TOTAL_PG_V2_8822B) \
- << BIT_SHIFT_PTCL_TOTAL_PG_V2_8822B)
-#define BIT_GET_PTCL_TOTAL_PG_V2_8822B(x) \
- (((x) >> BIT_SHIFT_PTCL_TOTAL_PG_V2_8822B) & \
- BIT_MASK_PTCL_TOTAL_PG_V2_8822B)
-
-#define BIT_TX_NULL_1_8822B BIT(1)
-#define BIT_TX_NULL_0_8822B BIT(0)
-
-/* 2 REG_PTCL_ERR_STATUS_8822B */
-#define BIT_PTCL_RATE_TABLE_INVALID_8822B BIT(7)
-#define BIT_FTM_T2R_ERROR_8822B BIT(6)
-#define BIT_PTCL_ERR0_8822B BIT(5)
-#define BIT_PTCL_ERR1_8822B BIT(4)
-#define BIT_PTCL_ERR2_8822B BIT(3)
-#define BIT_PTCL_ERR3_8822B BIT(2)
-#define BIT_PTCL_ERR4_8822B BIT(1)
-#define BIT_PTCL_ERR5_8822B BIT(0)
-
-/* 2 REG_NULL_PKT_STATUS_EXTEND_8822B */
-#define BIT_CLI3_TX_NULL_1_8822B BIT(7)
-#define BIT_CLI3_TX_NULL_0_8822B BIT(6)
-#define BIT_CLI2_TX_NULL_1_8822B BIT(5)
-#define BIT_CLI2_TX_NULL_0_8822B BIT(4)
-#define BIT_CLI1_TX_NULL_1_8822B BIT(3)
-#define BIT_CLI1_TX_NULL_0_8822B BIT(2)
-#define BIT_CLI0_TX_NULL_1_8822B BIT(1)
-#define BIT_CLI0_TX_NULL_0_8822B BIT(0)
-
-/* 2 REG_VIDEO_ENHANCEMENT_FUN_8822B */
-#define BIT_VIDEO_JUST_DROP_8822B BIT(1)
-#define BIT_VIDEO_ENHANCEMENT_FUN_EN_8822B BIT(0)
-
-/* 2 REG_BT_POLLUTE_PKT_CNT_8822B */
-
-#define BIT_SHIFT_BT_POLLUTE_PKT_CNT_8822B 0
-#define BIT_MASK_BT_POLLUTE_PKT_CNT_8822B 0xffff
-#define BIT_BT_POLLUTE_PKT_CNT_8822B(x) \
- (((x) & BIT_MASK_BT_POLLUTE_PKT_CNT_8822B) \
- << BIT_SHIFT_BT_POLLUTE_PKT_CNT_8822B)
-#define BIT_GET_BT_POLLUTE_PKT_CNT_8822B(x) \
- (((x) >> BIT_SHIFT_BT_POLLUTE_PKT_CNT_8822B) & \
- BIT_MASK_BT_POLLUTE_PKT_CNT_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_PTCL_DBG_8822B */
-
-#define BIT_SHIFT_PTCL_DBG_8822B 0
-#define BIT_MASK_PTCL_DBG_8822B 0xffffffffL
-#define BIT_PTCL_DBG_8822B(x) \
- (((x) & BIT_MASK_PTCL_DBG_8822B) << BIT_SHIFT_PTCL_DBG_8822B)
-#define BIT_GET_PTCL_DBG_8822B(x) \
- (((x) >> BIT_SHIFT_PTCL_DBG_8822B) & BIT_MASK_PTCL_DBG_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_CPUMGQ_TIMER_CTRL2_8822B */
-
-#define BIT_SHIFT_TRI_HEAD_ADDR_8822B 16
-#define BIT_MASK_TRI_HEAD_ADDR_8822B 0xfff
-#define BIT_TRI_HEAD_ADDR_8822B(x) \
- (((x) & BIT_MASK_TRI_HEAD_ADDR_8822B) << BIT_SHIFT_TRI_HEAD_ADDR_8822B)
-#define BIT_GET_TRI_HEAD_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_TRI_HEAD_ADDR_8822B) & BIT_MASK_TRI_HEAD_ADDR_8822B)
-
-#define BIT_DROP_TH_EN_8822B BIT(8)
-
-#define BIT_SHIFT_DROP_TH_8822B 0
-#define BIT_MASK_DROP_TH_8822B 0xff
-#define BIT_DROP_TH_8822B(x) \
- (((x) & BIT_MASK_DROP_TH_8822B) << BIT_SHIFT_DROP_TH_8822B)
-#define BIT_GET_DROP_TH_8822B(x) \
- (((x) >> BIT_SHIFT_DROP_TH_8822B) & BIT_MASK_DROP_TH_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_DUMMY_PAGE4_V1_8822B */
-#define BIT_BCN_EN_EXTHWSEQ_8822B BIT(1)
-#define BIT_BCN_EN_HWSEQ_8822B BIT(0)
-
-/* 2 REG_MOREDATA_8822B */
-#define BIT_MOREDATA_CTRL2_EN_V1_8822B BIT(3)
-#define BIT_MOREDATA_CTRL1_EN_V1_8822B BIT(2)
-#define BIT_PKTIN_MOREDATA_REPLACE_ENABLE_V1_8822B BIT(0)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_Q0_Q1_INFO_8822B */
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME_8822B BIT(31)
-
-#define BIT_SHIFT_GTAB_ID_8822B 28
-#define BIT_MASK_GTAB_ID_8822B 0x7
-#define BIT_GTAB_ID_8822B(x) \
- (((x) & BIT_MASK_GTAB_ID_8822B) << BIT_SHIFT_GTAB_ID_8822B)
-#define BIT_GET_GTAB_ID_8822B(x) \
- (((x) >> BIT_SHIFT_GTAB_ID_8822B) & BIT_MASK_GTAB_ID_8822B)
-
-#define BIT_SHIFT_AC1_PKT_INFO_8822B 16
-#define BIT_MASK_AC1_PKT_INFO_8822B 0xfff
-#define BIT_AC1_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_AC1_PKT_INFO_8822B) << BIT_SHIFT_AC1_PKT_INFO_8822B)
-#define BIT_GET_AC1_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_AC1_PKT_INFO_8822B) & BIT_MASK_AC1_PKT_INFO_8822B)
-
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME_V1_8822B BIT(15)
-
-#define BIT_SHIFT_GTAB_ID_V1_8822B 12
-#define BIT_MASK_GTAB_ID_V1_8822B 0x7
-#define BIT_GTAB_ID_V1_8822B(x) \
- (((x) & BIT_MASK_GTAB_ID_V1_8822B) << BIT_SHIFT_GTAB_ID_V1_8822B)
-#define BIT_GET_GTAB_ID_V1_8822B(x) \
- (((x) >> BIT_SHIFT_GTAB_ID_V1_8822B) & BIT_MASK_GTAB_ID_V1_8822B)
-
-#define BIT_SHIFT_AC0_PKT_INFO_8822B 0
-#define BIT_MASK_AC0_PKT_INFO_8822B 0xfff
-#define BIT_AC0_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_AC0_PKT_INFO_8822B) << BIT_SHIFT_AC0_PKT_INFO_8822B)
-#define BIT_GET_AC0_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_AC0_PKT_INFO_8822B) & BIT_MASK_AC0_PKT_INFO_8822B)
-
-/* 2 REG_Q2_Q3_INFO_8822B */
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME_8822B BIT(31)
-
-#define BIT_SHIFT_GTAB_ID_8822B 28
-#define BIT_MASK_GTAB_ID_8822B 0x7
-#define BIT_GTAB_ID_8822B(x) \
- (((x) & BIT_MASK_GTAB_ID_8822B) << BIT_SHIFT_GTAB_ID_8822B)
-#define BIT_GET_GTAB_ID_8822B(x) \
- (((x) >> BIT_SHIFT_GTAB_ID_8822B) & BIT_MASK_GTAB_ID_8822B)
-
-#define BIT_SHIFT_AC3_PKT_INFO_8822B 16
-#define BIT_MASK_AC3_PKT_INFO_8822B 0xfff
-#define BIT_AC3_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_AC3_PKT_INFO_8822B) << BIT_SHIFT_AC3_PKT_INFO_8822B)
-#define BIT_GET_AC3_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_AC3_PKT_INFO_8822B) & BIT_MASK_AC3_PKT_INFO_8822B)
-
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME_V1_8822B BIT(15)
-
-#define BIT_SHIFT_GTAB_ID_V1_8822B 12
-#define BIT_MASK_GTAB_ID_V1_8822B 0x7
-#define BIT_GTAB_ID_V1_8822B(x) \
- (((x) & BIT_MASK_GTAB_ID_V1_8822B) << BIT_SHIFT_GTAB_ID_V1_8822B)
-#define BIT_GET_GTAB_ID_V1_8822B(x) \
- (((x) >> BIT_SHIFT_GTAB_ID_V1_8822B) & BIT_MASK_GTAB_ID_V1_8822B)
-
-#define BIT_SHIFT_AC2_PKT_INFO_8822B 0
-#define BIT_MASK_AC2_PKT_INFO_8822B 0xfff
-#define BIT_AC2_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_AC2_PKT_INFO_8822B) << BIT_SHIFT_AC2_PKT_INFO_8822B)
-#define BIT_GET_AC2_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_AC2_PKT_INFO_8822B) & BIT_MASK_AC2_PKT_INFO_8822B)
-
-/* 2 REG_Q4_Q5_INFO_8822B */
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME_8822B BIT(31)
-
-#define BIT_SHIFT_GTAB_ID_8822B 28
-#define BIT_MASK_GTAB_ID_8822B 0x7
-#define BIT_GTAB_ID_8822B(x) \
- (((x) & BIT_MASK_GTAB_ID_8822B) << BIT_SHIFT_GTAB_ID_8822B)
-#define BIT_GET_GTAB_ID_8822B(x) \
- (((x) >> BIT_SHIFT_GTAB_ID_8822B) & BIT_MASK_GTAB_ID_8822B)
-
-#define BIT_SHIFT_AC5_PKT_INFO_8822B 16
-#define BIT_MASK_AC5_PKT_INFO_8822B 0xfff
-#define BIT_AC5_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_AC5_PKT_INFO_8822B) << BIT_SHIFT_AC5_PKT_INFO_8822B)
-#define BIT_GET_AC5_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_AC5_PKT_INFO_8822B) & BIT_MASK_AC5_PKT_INFO_8822B)
-
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME_V1_8822B BIT(15)
-
-#define BIT_SHIFT_GTAB_ID_V1_8822B 12
-#define BIT_MASK_GTAB_ID_V1_8822B 0x7
-#define BIT_GTAB_ID_V1_8822B(x) \
- (((x) & BIT_MASK_GTAB_ID_V1_8822B) << BIT_SHIFT_GTAB_ID_V1_8822B)
-#define BIT_GET_GTAB_ID_V1_8822B(x) \
- (((x) >> BIT_SHIFT_GTAB_ID_V1_8822B) & BIT_MASK_GTAB_ID_V1_8822B)
-
-#define BIT_SHIFT_AC4_PKT_INFO_8822B 0
-#define BIT_MASK_AC4_PKT_INFO_8822B 0xfff
-#define BIT_AC4_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_AC4_PKT_INFO_8822B) << BIT_SHIFT_AC4_PKT_INFO_8822B)
-#define BIT_GET_AC4_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_AC4_PKT_INFO_8822B) & BIT_MASK_AC4_PKT_INFO_8822B)
-
-/* 2 REG_Q6_Q7_INFO_8822B */
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME_8822B BIT(31)
-
-#define BIT_SHIFT_GTAB_ID_8822B 28
-#define BIT_MASK_GTAB_ID_8822B 0x7
-#define BIT_GTAB_ID_8822B(x) \
- (((x) & BIT_MASK_GTAB_ID_8822B) << BIT_SHIFT_GTAB_ID_8822B)
-#define BIT_GET_GTAB_ID_8822B(x) \
- (((x) >> BIT_SHIFT_GTAB_ID_8822B) & BIT_MASK_GTAB_ID_8822B)
-
-#define BIT_SHIFT_AC7_PKT_INFO_8822B 16
-#define BIT_MASK_AC7_PKT_INFO_8822B 0xfff
-#define BIT_AC7_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_AC7_PKT_INFO_8822B) << BIT_SHIFT_AC7_PKT_INFO_8822B)
-#define BIT_GET_AC7_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_AC7_PKT_INFO_8822B) & BIT_MASK_AC7_PKT_INFO_8822B)
-
-#define BIT_QUEUE_MACID_AC_NOT_THE_SAME_V1_8822B BIT(15)
-
-#define BIT_SHIFT_GTAB_ID_V1_8822B 12
-#define BIT_MASK_GTAB_ID_V1_8822B 0x7
-#define BIT_GTAB_ID_V1_8822B(x) \
- (((x) & BIT_MASK_GTAB_ID_V1_8822B) << BIT_SHIFT_GTAB_ID_V1_8822B)
-#define BIT_GET_GTAB_ID_V1_8822B(x) \
- (((x) >> BIT_SHIFT_GTAB_ID_V1_8822B) & BIT_MASK_GTAB_ID_V1_8822B)
-
-#define BIT_SHIFT_AC6_PKT_INFO_8822B 0
-#define BIT_MASK_AC6_PKT_INFO_8822B 0xfff
-#define BIT_AC6_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_AC6_PKT_INFO_8822B) << BIT_SHIFT_AC6_PKT_INFO_8822B)
-#define BIT_GET_AC6_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_AC6_PKT_INFO_8822B) & BIT_MASK_AC6_PKT_INFO_8822B)
-
-/* 2 REG_MGQ_HIQ_INFO_8822B */
-
-#define BIT_SHIFT_HIQ_PKT_INFO_8822B 16
-#define BIT_MASK_HIQ_PKT_INFO_8822B 0xfff
-#define BIT_HIQ_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_HIQ_PKT_INFO_8822B) << BIT_SHIFT_HIQ_PKT_INFO_8822B)
-#define BIT_GET_HIQ_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_HIQ_PKT_INFO_8822B) & BIT_MASK_HIQ_PKT_INFO_8822B)
-
-#define BIT_SHIFT_MGQ_PKT_INFO_8822B 0
-#define BIT_MASK_MGQ_PKT_INFO_8822B 0xfff
-#define BIT_MGQ_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_MGQ_PKT_INFO_8822B) << BIT_SHIFT_MGQ_PKT_INFO_8822B)
-#define BIT_GET_MGQ_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_MGQ_PKT_INFO_8822B) & BIT_MASK_MGQ_PKT_INFO_8822B)
-
-/* 2 REG_CMDQ_BCNQ_INFO_8822B */
-
-#define BIT_SHIFT_CMDQ_PKT_INFO_8822B 16
-#define BIT_MASK_CMDQ_PKT_INFO_8822B 0xfff
-#define BIT_CMDQ_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_CMDQ_PKT_INFO_8822B) << BIT_SHIFT_CMDQ_PKT_INFO_8822B)
-#define BIT_GET_CMDQ_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_CMDQ_PKT_INFO_8822B) & BIT_MASK_CMDQ_PKT_INFO_8822B)
-
-#define BIT_SHIFT_BCNQ_PKT_INFO_8822B 0
-#define BIT_MASK_BCNQ_PKT_INFO_8822B 0xfff
-#define BIT_BCNQ_PKT_INFO_8822B(x) \
- (((x) & BIT_MASK_BCNQ_PKT_INFO_8822B) << BIT_SHIFT_BCNQ_PKT_INFO_8822B)
-#define BIT_GET_BCNQ_PKT_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_BCNQ_PKT_INFO_8822B) & BIT_MASK_BCNQ_PKT_INFO_8822B)
-
-/* 2 REG_USEREG_SETTING_8822B */
-#define BIT_NDPA_USEREG_8822B BIT(21)
-
-#define BIT_SHIFT_RETRY_USEREG_8822B 19
-#define BIT_MASK_RETRY_USEREG_8822B 0x3
-#define BIT_RETRY_USEREG_8822B(x) \
- (((x) & BIT_MASK_RETRY_USEREG_8822B) << BIT_SHIFT_RETRY_USEREG_8822B)
-#define BIT_GET_RETRY_USEREG_8822B(x) \
- (((x) >> BIT_SHIFT_RETRY_USEREG_8822B) & BIT_MASK_RETRY_USEREG_8822B)
-
-#define BIT_SHIFT_TRYPKT_USEREG_8822B 17
-#define BIT_MASK_TRYPKT_USEREG_8822B 0x3
-#define BIT_TRYPKT_USEREG_8822B(x) \
- (((x) & BIT_MASK_TRYPKT_USEREG_8822B) << BIT_SHIFT_TRYPKT_USEREG_8822B)
-#define BIT_GET_TRYPKT_USEREG_8822B(x) \
- (((x) >> BIT_SHIFT_TRYPKT_USEREG_8822B) & BIT_MASK_TRYPKT_USEREG_8822B)
-
-#define BIT_CTLPKT_USEREG_8822B BIT(16)
-
-/* 2 REG_AESIV_SETTING_8822B */
-
-#define BIT_SHIFT_AESIV_OFFSET_8822B 0
-#define BIT_MASK_AESIV_OFFSET_8822B 0xfff
-#define BIT_AESIV_OFFSET_8822B(x) \
- (((x) & BIT_MASK_AESIV_OFFSET_8822B) << BIT_SHIFT_AESIV_OFFSET_8822B)
-#define BIT_GET_AESIV_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_AESIV_OFFSET_8822B) & BIT_MASK_AESIV_OFFSET_8822B)
-
-/* 2 REG_BF0_TIME_SETTING_8822B */
-#define BIT_BF0_TIMER_SET_8822B BIT(31)
-#define BIT_BF0_TIMER_CLR_8822B BIT(30)
-#define BIT_BF0_UPDATE_EN_8822B BIT(29)
-#define BIT_BF0_TIMER_EN_8822B BIT(28)
-
-#define BIT_SHIFT_BF0_PRETIME_OVER_8822B 16
-#define BIT_MASK_BF0_PRETIME_OVER_8822B 0xfff
-#define BIT_BF0_PRETIME_OVER_8822B(x) \
- (((x) & BIT_MASK_BF0_PRETIME_OVER_8822B) \
- << BIT_SHIFT_BF0_PRETIME_OVER_8822B)
-#define BIT_GET_BF0_PRETIME_OVER_8822B(x) \
- (((x) >> BIT_SHIFT_BF0_PRETIME_OVER_8822B) & \
- BIT_MASK_BF0_PRETIME_OVER_8822B)
-
-#define BIT_SHIFT_BF0_LIFETIME_8822B 0
-#define BIT_MASK_BF0_LIFETIME_8822B 0xffff
-#define BIT_BF0_LIFETIME_8822B(x) \
- (((x) & BIT_MASK_BF0_LIFETIME_8822B) << BIT_SHIFT_BF0_LIFETIME_8822B)
-#define BIT_GET_BF0_LIFETIME_8822B(x) \
- (((x) >> BIT_SHIFT_BF0_LIFETIME_8822B) & BIT_MASK_BF0_LIFETIME_8822B)
-
-/* 2 REG_BF1_TIME_SETTING_8822B */
-#define BIT_BF1_TIMER_SET_8822B BIT(31)
-#define BIT_BF1_TIMER_CLR_8822B BIT(30)
-#define BIT_BF1_UPDATE_EN_8822B BIT(29)
-#define BIT_BF1_TIMER_EN_8822B BIT(28)
-
-#define BIT_SHIFT_BF1_PRETIME_OVER_8822B 16
-#define BIT_MASK_BF1_PRETIME_OVER_8822B 0xfff
-#define BIT_BF1_PRETIME_OVER_8822B(x) \
- (((x) & BIT_MASK_BF1_PRETIME_OVER_8822B) \
- << BIT_SHIFT_BF1_PRETIME_OVER_8822B)
-#define BIT_GET_BF1_PRETIME_OVER_8822B(x) \
- (((x) >> BIT_SHIFT_BF1_PRETIME_OVER_8822B) & \
- BIT_MASK_BF1_PRETIME_OVER_8822B)
-
-#define BIT_SHIFT_BF1_LIFETIME_8822B 0
-#define BIT_MASK_BF1_LIFETIME_8822B 0xffff
-#define BIT_BF1_LIFETIME_8822B(x) \
- (((x) & BIT_MASK_BF1_LIFETIME_8822B) << BIT_SHIFT_BF1_LIFETIME_8822B)
-#define BIT_GET_BF1_LIFETIME_8822B(x) \
- (((x) >> BIT_SHIFT_BF1_LIFETIME_8822B) & BIT_MASK_BF1_LIFETIME_8822B)
-
-/* 2 REG_BF_TIMEOUT_EN_8822B */
-#define BIT_EN_VHT_LDPC_8822B BIT(9)
-#define BIT_EN_HT_LDPC_8822B BIT(8)
-#define BIT_BF1_TIMEOUT_EN_8822B BIT(1)
-#define BIT_BF0_TIMEOUT_EN_8822B BIT(0)
-
-/* 2 REG_MACID_RELEASE0_8822B */
-
-#define BIT_SHIFT_MACID31_0_RELEASE_8822B 0
-#define BIT_MASK_MACID31_0_RELEASE_8822B 0xffffffffL
-#define BIT_MACID31_0_RELEASE_8822B(x) \
- (((x) & BIT_MASK_MACID31_0_RELEASE_8822B) \
- << BIT_SHIFT_MACID31_0_RELEASE_8822B)
-#define BIT_GET_MACID31_0_RELEASE_8822B(x) \
- (((x) >> BIT_SHIFT_MACID31_0_RELEASE_8822B) & \
- BIT_MASK_MACID31_0_RELEASE_8822B)
-
-/* 2 REG_MACID_RELEASE1_8822B */
-
-#define BIT_SHIFT_MACID63_32_RELEASE_8822B 0
-#define BIT_MASK_MACID63_32_RELEASE_8822B 0xffffffffL
-#define BIT_MACID63_32_RELEASE_8822B(x) \
- (((x) & BIT_MASK_MACID63_32_RELEASE_8822B) \
- << BIT_SHIFT_MACID63_32_RELEASE_8822B)
-#define BIT_GET_MACID63_32_RELEASE_8822B(x) \
- (((x) >> BIT_SHIFT_MACID63_32_RELEASE_8822B) & \
- BIT_MASK_MACID63_32_RELEASE_8822B)
-
-/* 2 REG_MACID_RELEASE2_8822B */
-
-#define BIT_SHIFT_MACID95_64_RELEASE_8822B 0
-#define BIT_MASK_MACID95_64_RELEASE_8822B 0xffffffffL
-#define BIT_MACID95_64_RELEASE_8822B(x) \
- (((x) & BIT_MASK_MACID95_64_RELEASE_8822B) \
- << BIT_SHIFT_MACID95_64_RELEASE_8822B)
-#define BIT_GET_MACID95_64_RELEASE_8822B(x) \
- (((x) >> BIT_SHIFT_MACID95_64_RELEASE_8822B) & \
- BIT_MASK_MACID95_64_RELEASE_8822B)
-
-/* 2 REG_MACID_RELEASE3_8822B */
-
-#define BIT_SHIFT_MACID127_96_RELEASE_8822B 0
-#define BIT_MASK_MACID127_96_RELEASE_8822B 0xffffffffL
-#define BIT_MACID127_96_RELEASE_8822B(x) \
- (((x) & BIT_MASK_MACID127_96_RELEASE_8822B) \
- << BIT_SHIFT_MACID127_96_RELEASE_8822B)
-#define BIT_GET_MACID127_96_RELEASE_8822B(x) \
- (((x) >> BIT_SHIFT_MACID127_96_RELEASE_8822B) & \
- BIT_MASK_MACID127_96_RELEASE_8822B)
-
-/* 2 REG_MACID_RELEASE_SETTING_8822B */
-#define BIT_MACID_VALUE_8822B BIT(7)
-
-#define BIT_SHIFT_MACID_OFFSET_8822B 0
-#define BIT_MASK_MACID_OFFSET_8822B 0x7f
-#define BIT_MACID_OFFSET_8822B(x) \
- (((x) & BIT_MASK_MACID_OFFSET_8822B) << BIT_SHIFT_MACID_OFFSET_8822B)
-#define BIT_GET_MACID_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_MACID_OFFSET_8822B) & BIT_MASK_MACID_OFFSET_8822B)
-
-/* 2 REG_FAST_EDCA_VOVI_SETTING_8822B */
-
-#define BIT_SHIFT_VI_FAST_EDCA_TO_8822B 24
-#define BIT_MASK_VI_FAST_EDCA_TO_8822B 0xff
-#define BIT_VI_FAST_EDCA_TO_8822B(x) \
- (((x) & BIT_MASK_VI_FAST_EDCA_TO_8822B) \
- << BIT_SHIFT_VI_FAST_EDCA_TO_8822B)
-#define BIT_GET_VI_FAST_EDCA_TO_8822B(x) \
- (((x) >> BIT_SHIFT_VI_FAST_EDCA_TO_8822B) & \
- BIT_MASK_VI_FAST_EDCA_TO_8822B)
-
-#define BIT_VI_THRESHOLD_SEL_8822B BIT(23)
-
-#define BIT_SHIFT_VI_FAST_EDCA_PKT_TH_8822B 16
-#define BIT_MASK_VI_FAST_EDCA_PKT_TH_8822B 0x7f
-#define BIT_VI_FAST_EDCA_PKT_TH_8822B(x) \
- (((x) & BIT_MASK_VI_FAST_EDCA_PKT_TH_8822B) \
- << BIT_SHIFT_VI_FAST_EDCA_PKT_TH_8822B)
-#define BIT_GET_VI_FAST_EDCA_PKT_TH_8822B(x) \
- (((x) >> BIT_SHIFT_VI_FAST_EDCA_PKT_TH_8822B) & \
- BIT_MASK_VI_FAST_EDCA_PKT_TH_8822B)
-
-#define BIT_SHIFT_VO_FAST_EDCA_TO_8822B 8
-#define BIT_MASK_VO_FAST_EDCA_TO_8822B 0xff
-#define BIT_VO_FAST_EDCA_TO_8822B(x) \
- (((x) & BIT_MASK_VO_FAST_EDCA_TO_8822B) \
- << BIT_SHIFT_VO_FAST_EDCA_TO_8822B)
-#define BIT_GET_VO_FAST_EDCA_TO_8822B(x) \
- (((x) >> BIT_SHIFT_VO_FAST_EDCA_TO_8822B) & \
- BIT_MASK_VO_FAST_EDCA_TO_8822B)
-
-#define BIT_VO_THRESHOLD_SEL_8822B BIT(7)
-
-#define BIT_SHIFT_VO_FAST_EDCA_PKT_TH_8822B 0
-#define BIT_MASK_VO_FAST_EDCA_PKT_TH_8822B 0x7f
-#define BIT_VO_FAST_EDCA_PKT_TH_8822B(x) \
- (((x) & BIT_MASK_VO_FAST_EDCA_PKT_TH_8822B) \
- << BIT_SHIFT_VO_FAST_EDCA_PKT_TH_8822B)
-#define BIT_GET_VO_FAST_EDCA_PKT_TH_8822B(x) \
- (((x) >> BIT_SHIFT_VO_FAST_EDCA_PKT_TH_8822B) & \
- BIT_MASK_VO_FAST_EDCA_PKT_TH_8822B)
-
-/* 2 REG_FAST_EDCA_BEBK_SETTING_8822B */
-
-#define BIT_SHIFT_BK_FAST_EDCA_TO_8822B 24
-#define BIT_MASK_BK_FAST_EDCA_TO_8822B 0xff
-#define BIT_BK_FAST_EDCA_TO_8822B(x) \
- (((x) & BIT_MASK_BK_FAST_EDCA_TO_8822B) \
- << BIT_SHIFT_BK_FAST_EDCA_TO_8822B)
-#define BIT_GET_BK_FAST_EDCA_TO_8822B(x) \
- (((x) >> BIT_SHIFT_BK_FAST_EDCA_TO_8822B) & \
- BIT_MASK_BK_FAST_EDCA_TO_8822B)
-
-#define BIT_BK_THRESHOLD_SEL_8822B BIT(23)
-
-#define BIT_SHIFT_BK_FAST_EDCA_PKT_TH_8822B 16
-#define BIT_MASK_BK_FAST_EDCA_PKT_TH_8822B 0x7f
-#define BIT_BK_FAST_EDCA_PKT_TH_8822B(x) \
- (((x) & BIT_MASK_BK_FAST_EDCA_PKT_TH_8822B) \
- << BIT_SHIFT_BK_FAST_EDCA_PKT_TH_8822B)
-#define BIT_GET_BK_FAST_EDCA_PKT_TH_8822B(x) \
- (((x) >> BIT_SHIFT_BK_FAST_EDCA_PKT_TH_8822B) & \
- BIT_MASK_BK_FAST_EDCA_PKT_TH_8822B)
-
-#define BIT_SHIFT_BE_FAST_EDCA_TO_8822B 8
-#define BIT_MASK_BE_FAST_EDCA_TO_8822B 0xff
-#define BIT_BE_FAST_EDCA_TO_8822B(x) \
- (((x) & BIT_MASK_BE_FAST_EDCA_TO_8822B) \
- << BIT_SHIFT_BE_FAST_EDCA_TO_8822B)
-#define BIT_GET_BE_FAST_EDCA_TO_8822B(x) \
- (((x) >> BIT_SHIFT_BE_FAST_EDCA_TO_8822B) & \
- BIT_MASK_BE_FAST_EDCA_TO_8822B)
-
-#define BIT_BE_THRESHOLD_SEL_8822B BIT(7)
-
-#define BIT_SHIFT_BE_FAST_EDCA_PKT_TH_8822B 0
-#define BIT_MASK_BE_FAST_EDCA_PKT_TH_8822B 0x7f
-#define BIT_BE_FAST_EDCA_PKT_TH_8822B(x) \
- (((x) & BIT_MASK_BE_FAST_EDCA_PKT_TH_8822B) \
- << BIT_SHIFT_BE_FAST_EDCA_PKT_TH_8822B)
-#define BIT_GET_BE_FAST_EDCA_PKT_TH_8822B(x) \
- (((x) >> BIT_SHIFT_BE_FAST_EDCA_PKT_TH_8822B) & \
- BIT_MASK_BE_FAST_EDCA_PKT_TH_8822B)
-
-/* 2 REG_MACID_DROP0_8822B */
-
-#define BIT_SHIFT_MACID31_0_DROP_8822B 0
-#define BIT_MASK_MACID31_0_DROP_8822B 0xffffffffL
-#define BIT_MACID31_0_DROP_8822B(x) \
- (((x) & BIT_MASK_MACID31_0_DROP_8822B) \
- << BIT_SHIFT_MACID31_0_DROP_8822B)
-#define BIT_GET_MACID31_0_DROP_8822B(x) \
- (((x) >> BIT_SHIFT_MACID31_0_DROP_8822B) & \
- BIT_MASK_MACID31_0_DROP_8822B)
-
-/* 2 REG_MACID_DROP1_8822B */
-
-#define BIT_SHIFT_MACID63_32_DROP_8822B 0
-#define BIT_MASK_MACID63_32_DROP_8822B 0xffffffffL
-#define BIT_MACID63_32_DROP_8822B(x) \
- (((x) & BIT_MASK_MACID63_32_DROP_8822B) \
- << BIT_SHIFT_MACID63_32_DROP_8822B)
-#define BIT_GET_MACID63_32_DROP_8822B(x) \
- (((x) >> BIT_SHIFT_MACID63_32_DROP_8822B) & \
- BIT_MASK_MACID63_32_DROP_8822B)
-
-/* 2 REG_MACID_DROP2_8822B */
-
-#define BIT_SHIFT_MACID95_64_DROP_8822B 0
-#define BIT_MASK_MACID95_64_DROP_8822B 0xffffffffL
-#define BIT_MACID95_64_DROP_8822B(x) \
- (((x) & BIT_MASK_MACID95_64_DROP_8822B) \
- << BIT_SHIFT_MACID95_64_DROP_8822B)
-#define BIT_GET_MACID95_64_DROP_8822B(x) \
- (((x) >> BIT_SHIFT_MACID95_64_DROP_8822B) & \
- BIT_MASK_MACID95_64_DROP_8822B)
-
-/* 2 REG_MACID_DROP3_8822B */
-
-#define BIT_SHIFT_MACID127_96_DROP_8822B 0
-#define BIT_MASK_MACID127_96_DROP_8822B 0xffffffffL
-#define BIT_MACID127_96_DROP_8822B(x) \
- (((x) & BIT_MASK_MACID127_96_DROP_8822B) \
- << BIT_SHIFT_MACID127_96_DROP_8822B)
-#define BIT_GET_MACID127_96_DROP_8822B(x) \
- (((x) >> BIT_SHIFT_MACID127_96_DROP_8822B) & \
- BIT_MASK_MACID127_96_DROP_8822B)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_0_8822B */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_0_8822B 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_0_8822B 0xffffffffL
-#define BIT_R_MACID_RELEASE_SUCCESS_0_8822B(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_0_8822B) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_0_8822B)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_0_8822B(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_0_8822B) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_0_8822B)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_1_8822B */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_1_8822B 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_1_8822B 0xffffffffL
-#define BIT_R_MACID_RELEASE_SUCCESS_1_8822B(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_1_8822B) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_1_8822B)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_1_8822B(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_1_8822B) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_1_8822B)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_2_8822B */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_2_8822B 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_2_8822B 0xffffffffL
-#define BIT_R_MACID_RELEASE_SUCCESS_2_8822B(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_2_8822B) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_2_8822B)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_2_8822B(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_2_8822B) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_2_8822B)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_3_8822B */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_3_8822B 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_3_8822B 0xffffffffL
-#define BIT_R_MACID_RELEASE_SUCCESS_3_8822B(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_3_8822B) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_3_8822B)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_3_8822B(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_3_8822B) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_3_8822B)
-
-/* 2 REG_MGG_FIFO_CRTL_8822B */
-#define BIT_R_MGG_FIFO_EN_8822B BIT(31)
-
-#define BIT_SHIFT_R_MGG_FIFO_PG_SIZE_8822B 28
-#define BIT_MASK_R_MGG_FIFO_PG_SIZE_8822B 0x7
-#define BIT_R_MGG_FIFO_PG_SIZE_8822B(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_PG_SIZE_8822B) \
- << BIT_SHIFT_R_MGG_FIFO_PG_SIZE_8822B)
-#define BIT_GET_R_MGG_FIFO_PG_SIZE_8822B(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_PG_SIZE_8822B) & \
- BIT_MASK_R_MGG_FIFO_PG_SIZE_8822B)
-
-#define BIT_SHIFT_R_MGG_FIFO_START_PG_8822B 16
-#define BIT_MASK_R_MGG_FIFO_START_PG_8822B 0xfff
-#define BIT_R_MGG_FIFO_START_PG_8822B(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_START_PG_8822B) \
- << BIT_SHIFT_R_MGG_FIFO_START_PG_8822B)
-#define BIT_GET_R_MGG_FIFO_START_PG_8822B(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_START_PG_8822B) & \
- BIT_MASK_R_MGG_FIFO_START_PG_8822B)
-
-#define BIT_SHIFT_R_MGG_FIFO_SIZE_8822B 14
-#define BIT_MASK_R_MGG_FIFO_SIZE_8822B 0x3
-#define BIT_R_MGG_FIFO_SIZE_8822B(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_SIZE_8822B) \
- << BIT_SHIFT_R_MGG_FIFO_SIZE_8822B)
-#define BIT_GET_R_MGG_FIFO_SIZE_8822B(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_SIZE_8822B) & \
- BIT_MASK_R_MGG_FIFO_SIZE_8822B)
-
-#define BIT_R_MGG_FIFO_PAUSE_8822B BIT(13)
-
-#define BIT_SHIFT_R_MGG_FIFO_RPTR_8822B 8
-#define BIT_MASK_R_MGG_FIFO_RPTR_8822B 0x1f
-#define BIT_R_MGG_FIFO_RPTR_8822B(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_RPTR_8822B) \
- << BIT_SHIFT_R_MGG_FIFO_RPTR_8822B)
-#define BIT_GET_R_MGG_FIFO_RPTR_8822B(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_RPTR_8822B) & \
- BIT_MASK_R_MGG_FIFO_RPTR_8822B)
-
-#define BIT_R_MGG_FIFO_OV_8822B BIT(7)
-#define BIT_R_MGG_FIFO_WPTR_ERROR_8822B BIT(6)
-#define BIT_R_EN_CPU_LIFETIME_8822B BIT(5)
-
-#define BIT_SHIFT_R_MGG_FIFO_WPTR_8822B 0
-#define BIT_MASK_R_MGG_FIFO_WPTR_8822B 0x1f
-#define BIT_R_MGG_FIFO_WPTR_8822B(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_WPTR_8822B) \
- << BIT_SHIFT_R_MGG_FIFO_WPTR_8822B)
-#define BIT_GET_R_MGG_FIFO_WPTR_8822B(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_WPTR_8822B) & \
- BIT_MASK_R_MGG_FIFO_WPTR_8822B)
-
-/* 2 REG_MGG_FIFO_INT_8822B */
-
-#define BIT_SHIFT_R_MGG_FIFO_INT_FLAG_8822B 16
-#define BIT_MASK_R_MGG_FIFO_INT_FLAG_8822B 0xffff
-#define BIT_R_MGG_FIFO_INT_FLAG_8822B(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_INT_FLAG_8822B) \
- << BIT_SHIFT_R_MGG_FIFO_INT_FLAG_8822B)
-#define BIT_GET_R_MGG_FIFO_INT_FLAG_8822B(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_INT_FLAG_8822B) & \
- BIT_MASK_R_MGG_FIFO_INT_FLAG_8822B)
-
-#define BIT_SHIFT_R_MGG_FIFO_INT_MASK_8822B 0
-#define BIT_MASK_R_MGG_FIFO_INT_MASK_8822B 0xffff
-#define BIT_R_MGG_FIFO_INT_MASK_8822B(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_INT_MASK_8822B) \
- << BIT_SHIFT_R_MGG_FIFO_INT_MASK_8822B)
-#define BIT_GET_R_MGG_FIFO_INT_MASK_8822B(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_INT_MASK_8822B) & \
- BIT_MASK_R_MGG_FIFO_INT_MASK_8822B)
-
-/* 2 REG_MGG_FIFO_LIFETIME_8822B */
-
-#define BIT_SHIFT_R_MGG_FIFO_LIFETIME_8822B 16
-#define BIT_MASK_R_MGG_FIFO_LIFETIME_8822B 0xffff
-#define BIT_R_MGG_FIFO_LIFETIME_8822B(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_LIFETIME_8822B) \
- << BIT_SHIFT_R_MGG_FIFO_LIFETIME_8822B)
-#define BIT_GET_R_MGG_FIFO_LIFETIME_8822B(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_LIFETIME_8822B) & \
- BIT_MASK_R_MGG_FIFO_LIFETIME_8822B)
-
-#define BIT_SHIFT_R_MGG_FIFO_VALID_MAP_8822B 0
-#define BIT_MASK_R_MGG_FIFO_VALID_MAP_8822B 0xffff
-#define BIT_R_MGG_FIFO_VALID_MAP_8822B(x) \
- (((x) & BIT_MASK_R_MGG_FIFO_VALID_MAP_8822B) \
- << BIT_SHIFT_R_MGG_FIFO_VALID_MAP_8822B)
-#define BIT_GET_R_MGG_FIFO_VALID_MAP_8822B(x) \
- (((x) >> BIT_SHIFT_R_MGG_FIFO_VALID_MAP_8822B) & \
- BIT_MASK_R_MGG_FIFO_VALID_MAP_8822B)
-
-/* 2 REG_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B */
-
-#define BIT_SHIFT_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B 0
-#define BIT_MASK_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B 0x7f
-#define BIT_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B(x) \
- (((x) & BIT_MASK_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B) \
- << BIT_SHIFT_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B)
-#define BIT_GET_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B) & \
- BIT_MASK_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B)
-
-/* 2 REG_MACID_SHCUT_OFFSET_8822B */
-
-#define BIT_SHIFT_MACID_SHCUT_OFFSET_V1_8822B 0
-#define BIT_MASK_MACID_SHCUT_OFFSET_V1_8822B 0xff
-#define BIT_MACID_SHCUT_OFFSET_V1_8822B(x) \
- (((x) & BIT_MASK_MACID_SHCUT_OFFSET_V1_8822B) \
- << BIT_SHIFT_MACID_SHCUT_OFFSET_V1_8822B)
-#define BIT_GET_MACID_SHCUT_OFFSET_V1_8822B(x) \
- (((x) >> BIT_SHIFT_MACID_SHCUT_OFFSET_V1_8822B) & \
- BIT_MASK_MACID_SHCUT_OFFSET_V1_8822B)
-
-/* 2 REG_MU_TX_CTL_8822B */
-#define BIT_R_EN_REVERS_GTAB_8822B BIT(6)
-
-#define BIT_SHIFT_R_MU_TABLE_VALID_8822B 0
-#define BIT_MASK_R_MU_TABLE_VALID_8822B 0x3f
-#define BIT_R_MU_TABLE_VALID_8822B(x) \
- (((x) & BIT_MASK_R_MU_TABLE_VALID_8822B) \
- << BIT_SHIFT_R_MU_TABLE_VALID_8822B)
-#define BIT_GET_R_MU_TABLE_VALID_8822B(x) \
- (((x) >> BIT_SHIFT_R_MU_TABLE_VALID_8822B) & \
- BIT_MASK_R_MU_TABLE_VALID_8822B)
-
-/* 2 REG_MU_STA_GID_VLD_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_R_MU_STA_GTAB_VALID_8822B 0
-#define BIT_MASK_R_MU_STA_GTAB_VALID_8822B 0xffffffffL
-#define BIT_R_MU_STA_GTAB_VALID_8822B(x) \
- (((x) & BIT_MASK_R_MU_STA_GTAB_VALID_8822B) \
- << BIT_SHIFT_R_MU_STA_GTAB_VALID_8822B)
-#define BIT_GET_R_MU_STA_GTAB_VALID_8822B(x) \
- (((x) >> BIT_SHIFT_R_MU_STA_GTAB_VALID_8822B) & \
- BIT_MASK_R_MU_STA_GTAB_VALID_8822B)
-
-#define BIT_SHIFT_R_MU_STA_GTAB_VALID_8822B 0
-#define BIT_MASK_R_MU_STA_GTAB_VALID_8822B 0xffffffffL
-#define BIT_R_MU_STA_GTAB_VALID_8822B(x) \
- (((x) & BIT_MASK_R_MU_STA_GTAB_VALID_8822B) \
- << BIT_SHIFT_R_MU_STA_GTAB_VALID_8822B)
-#define BIT_GET_R_MU_STA_GTAB_VALID_8822B(x) \
- (((x) >> BIT_SHIFT_R_MU_STA_GTAB_VALID_8822B) & \
- BIT_MASK_R_MU_STA_GTAB_VALID_8822B)
-
-/* 2 REG_MU_STA_USER_POS_INFO_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_R_MU_STA_GTAB_POSITION_8822B 0
-#define BIT_MASK_R_MU_STA_GTAB_POSITION_8822B 0xffffffffffffffffL
-#define BIT_R_MU_STA_GTAB_POSITION_8822B(x) \
- (((x) & BIT_MASK_R_MU_STA_GTAB_POSITION_8822B) \
- << BIT_SHIFT_R_MU_STA_GTAB_POSITION_8822B)
-#define BIT_GET_R_MU_STA_GTAB_POSITION_8822B(x) \
- (((x) >> BIT_SHIFT_R_MU_STA_GTAB_POSITION_8822B) & \
- BIT_MASK_R_MU_STA_GTAB_POSITION_8822B)
-
-#define BIT_SHIFT_R_MU_STA_GTAB_POSITION_8822B 0
-#define BIT_MASK_R_MU_STA_GTAB_POSITION_8822B 0xffffffffffffffffL
-#define BIT_R_MU_STA_GTAB_POSITION_8822B(x) \
- (((x) & BIT_MASK_R_MU_STA_GTAB_POSITION_8822B) \
- << BIT_SHIFT_R_MU_STA_GTAB_POSITION_8822B)
-#define BIT_GET_R_MU_STA_GTAB_POSITION_8822B(x) \
- (((x) >> BIT_SHIFT_R_MU_STA_GTAB_POSITION_8822B) & \
- BIT_MASK_R_MU_STA_GTAB_POSITION_8822B)
-
-/* 2 REG_MU_TRX_DBG_CNT_8822B */
-#define BIT_MU_DNGCNT_RST_8822B BIT(20)
-
-#define BIT_SHIFT_MU_DBGCNT_SEL_8822B 16
-#define BIT_MASK_MU_DBGCNT_SEL_8822B 0xf
-#define BIT_MU_DBGCNT_SEL_8822B(x) \
- (((x) & BIT_MASK_MU_DBGCNT_SEL_8822B) << BIT_SHIFT_MU_DBGCNT_SEL_8822B)
-#define BIT_GET_MU_DBGCNT_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_MU_DBGCNT_SEL_8822B) & BIT_MASK_MU_DBGCNT_SEL_8822B)
-
-#define BIT_SHIFT_MU_DNGCNT_8822B 0
-#define BIT_MASK_MU_DNGCNT_8822B 0xffff
-#define BIT_MU_DNGCNT_8822B(x) \
- (((x) & BIT_MASK_MU_DNGCNT_8822B) << BIT_SHIFT_MU_DNGCNT_8822B)
-#define BIT_GET_MU_DNGCNT_8822B(x) \
- (((x) >> BIT_SHIFT_MU_DNGCNT_8822B) & BIT_MASK_MU_DNGCNT_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_EDCA_VO_PARAM_8822B */
-
-#define BIT_SHIFT_TXOPLIMIT_8822B 16
-#define BIT_MASK_TXOPLIMIT_8822B 0x7ff
-#define BIT_TXOPLIMIT_8822B(x) \
- (((x) & BIT_MASK_TXOPLIMIT_8822B) << BIT_SHIFT_TXOPLIMIT_8822B)
-#define BIT_GET_TXOPLIMIT_8822B(x) \
- (((x) >> BIT_SHIFT_TXOPLIMIT_8822B) & BIT_MASK_TXOPLIMIT_8822B)
-
-#define BIT_SHIFT_CW_8822B 8
-#define BIT_MASK_CW_8822B 0xff
-#define BIT_CW_8822B(x) (((x) & BIT_MASK_CW_8822B) << BIT_SHIFT_CW_8822B)
-#define BIT_GET_CW_8822B(x) (((x) >> BIT_SHIFT_CW_8822B) & BIT_MASK_CW_8822B)
-
-#define BIT_SHIFT_AIFS_8822B 0
-#define BIT_MASK_AIFS_8822B 0xff
-#define BIT_AIFS_8822B(x) (((x) & BIT_MASK_AIFS_8822B) << BIT_SHIFT_AIFS_8822B)
-#define BIT_GET_AIFS_8822B(x) \
- (((x) >> BIT_SHIFT_AIFS_8822B) & BIT_MASK_AIFS_8822B)
-
-/* 2 REG_EDCA_VI_PARAM_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_TXOPLIMIT_8822B 16
-#define BIT_MASK_TXOPLIMIT_8822B 0x7ff
-#define BIT_TXOPLIMIT_8822B(x) \
- (((x) & BIT_MASK_TXOPLIMIT_8822B) << BIT_SHIFT_TXOPLIMIT_8822B)
-#define BIT_GET_TXOPLIMIT_8822B(x) \
- (((x) >> BIT_SHIFT_TXOPLIMIT_8822B) & BIT_MASK_TXOPLIMIT_8822B)
-
-#define BIT_SHIFT_CW_8822B 8
-#define BIT_MASK_CW_8822B 0xff
-#define BIT_CW_8822B(x) (((x) & BIT_MASK_CW_8822B) << BIT_SHIFT_CW_8822B)
-#define BIT_GET_CW_8822B(x) (((x) >> BIT_SHIFT_CW_8822B) & BIT_MASK_CW_8822B)
-
-#define BIT_SHIFT_AIFS_8822B 0
-#define BIT_MASK_AIFS_8822B 0xff
-#define BIT_AIFS_8822B(x) (((x) & BIT_MASK_AIFS_8822B) << BIT_SHIFT_AIFS_8822B)
-#define BIT_GET_AIFS_8822B(x) \
- (((x) >> BIT_SHIFT_AIFS_8822B) & BIT_MASK_AIFS_8822B)
-
-/* 2 REG_EDCA_BE_PARAM_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_TXOPLIMIT_8822B 16
-#define BIT_MASK_TXOPLIMIT_8822B 0x7ff
-#define BIT_TXOPLIMIT_8822B(x) \
- (((x) & BIT_MASK_TXOPLIMIT_8822B) << BIT_SHIFT_TXOPLIMIT_8822B)
-#define BIT_GET_TXOPLIMIT_8822B(x) \
- (((x) >> BIT_SHIFT_TXOPLIMIT_8822B) & BIT_MASK_TXOPLIMIT_8822B)
-
-#define BIT_SHIFT_CW_8822B 8
-#define BIT_MASK_CW_8822B 0xff
-#define BIT_CW_8822B(x) (((x) & BIT_MASK_CW_8822B) << BIT_SHIFT_CW_8822B)
-#define BIT_GET_CW_8822B(x) (((x) >> BIT_SHIFT_CW_8822B) & BIT_MASK_CW_8822B)
-
-#define BIT_SHIFT_AIFS_8822B 0
-#define BIT_MASK_AIFS_8822B 0xff
-#define BIT_AIFS_8822B(x) (((x) & BIT_MASK_AIFS_8822B) << BIT_SHIFT_AIFS_8822B)
-#define BIT_GET_AIFS_8822B(x) \
- (((x) >> BIT_SHIFT_AIFS_8822B) & BIT_MASK_AIFS_8822B)
-
-/* 2 REG_EDCA_BK_PARAM_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_TXOPLIMIT_8822B 16
-#define BIT_MASK_TXOPLIMIT_8822B 0x7ff
-#define BIT_TXOPLIMIT_8822B(x) \
- (((x) & BIT_MASK_TXOPLIMIT_8822B) << BIT_SHIFT_TXOPLIMIT_8822B)
-#define BIT_GET_TXOPLIMIT_8822B(x) \
- (((x) >> BIT_SHIFT_TXOPLIMIT_8822B) & BIT_MASK_TXOPLIMIT_8822B)
-
-#define BIT_SHIFT_CW_8822B 8
-#define BIT_MASK_CW_8822B 0xff
-#define BIT_CW_8822B(x) (((x) & BIT_MASK_CW_8822B) << BIT_SHIFT_CW_8822B)
-#define BIT_GET_CW_8822B(x) (((x) >> BIT_SHIFT_CW_8822B) & BIT_MASK_CW_8822B)
-
-#define BIT_SHIFT_AIFS_8822B 0
-#define BIT_MASK_AIFS_8822B 0xff
-#define BIT_AIFS_8822B(x) (((x) & BIT_MASK_AIFS_8822B) << BIT_SHIFT_AIFS_8822B)
-#define BIT_GET_AIFS_8822B(x) \
- (((x) >> BIT_SHIFT_AIFS_8822B) & BIT_MASK_AIFS_8822B)
-
-/* 2 REG_BCNTCFG_8822B */
-
-#define BIT_SHIFT_BCNCW_MAX_8822B 12
-#define BIT_MASK_BCNCW_MAX_8822B 0xf
-#define BIT_BCNCW_MAX_8822B(x) \
- (((x) & BIT_MASK_BCNCW_MAX_8822B) << BIT_SHIFT_BCNCW_MAX_8822B)
-#define BIT_GET_BCNCW_MAX_8822B(x) \
- (((x) >> BIT_SHIFT_BCNCW_MAX_8822B) & BIT_MASK_BCNCW_MAX_8822B)
-
-#define BIT_SHIFT_BCNCW_MIN_8822B 8
-#define BIT_MASK_BCNCW_MIN_8822B 0xf
-#define BIT_BCNCW_MIN_8822B(x) \
- (((x) & BIT_MASK_BCNCW_MIN_8822B) << BIT_SHIFT_BCNCW_MIN_8822B)
-#define BIT_GET_BCNCW_MIN_8822B(x) \
- (((x) >> BIT_SHIFT_BCNCW_MIN_8822B) & BIT_MASK_BCNCW_MIN_8822B)
-
-#define BIT_SHIFT_BCNIFS_8822B 0
-#define BIT_MASK_BCNIFS_8822B 0xff
-#define BIT_BCNIFS_8822B(x) \
- (((x) & BIT_MASK_BCNIFS_8822B) << BIT_SHIFT_BCNIFS_8822B)
-#define BIT_GET_BCNIFS_8822B(x) \
- (((x) >> BIT_SHIFT_BCNIFS_8822B) & BIT_MASK_BCNIFS_8822B)
-
-/* 2 REG_PIFS_8822B */
-
-#define BIT_SHIFT_PIFS_8822B 0
-#define BIT_MASK_PIFS_8822B 0xff
-#define BIT_PIFS_8822B(x) (((x) & BIT_MASK_PIFS_8822B) << BIT_SHIFT_PIFS_8822B)
-#define BIT_GET_PIFS_8822B(x) \
- (((x) >> BIT_SHIFT_PIFS_8822B) & BIT_MASK_PIFS_8822B)
-
-/* 2 REG_RDG_PIFS_8822B */
-
-#define BIT_SHIFT_RDG_PIFS_8822B 0
-#define BIT_MASK_RDG_PIFS_8822B 0xff
-#define BIT_RDG_PIFS_8822B(x) \
- (((x) & BIT_MASK_RDG_PIFS_8822B) << BIT_SHIFT_RDG_PIFS_8822B)
-#define BIT_GET_RDG_PIFS_8822B(x) \
- (((x) >> BIT_SHIFT_RDG_PIFS_8822B) & BIT_MASK_RDG_PIFS_8822B)
-
-/* 2 REG_SIFS_8822B */
-
-#define BIT_SHIFT_SIFS_OFDM_TRX_8822B 24
-#define BIT_MASK_SIFS_OFDM_TRX_8822B 0xff
-#define BIT_SIFS_OFDM_TRX_8822B(x) \
- (((x) & BIT_MASK_SIFS_OFDM_TRX_8822B) << BIT_SHIFT_SIFS_OFDM_TRX_8822B)
-#define BIT_GET_SIFS_OFDM_TRX_8822B(x) \
- (((x) >> BIT_SHIFT_SIFS_OFDM_TRX_8822B) & BIT_MASK_SIFS_OFDM_TRX_8822B)
-
-#define BIT_SHIFT_SIFS_CCK_TRX_8822B 16
-#define BIT_MASK_SIFS_CCK_TRX_8822B 0xff
-#define BIT_SIFS_CCK_TRX_8822B(x) \
- (((x) & BIT_MASK_SIFS_CCK_TRX_8822B) << BIT_SHIFT_SIFS_CCK_TRX_8822B)
-#define BIT_GET_SIFS_CCK_TRX_8822B(x) \
- (((x) >> BIT_SHIFT_SIFS_CCK_TRX_8822B) & BIT_MASK_SIFS_CCK_TRX_8822B)
-
-#define BIT_SHIFT_SIFS_OFDM_CTX_8822B 8
-#define BIT_MASK_SIFS_OFDM_CTX_8822B 0xff
-#define BIT_SIFS_OFDM_CTX_8822B(x) \
- (((x) & BIT_MASK_SIFS_OFDM_CTX_8822B) << BIT_SHIFT_SIFS_OFDM_CTX_8822B)
-#define BIT_GET_SIFS_OFDM_CTX_8822B(x) \
- (((x) >> BIT_SHIFT_SIFS_OFDM_CTX_8822B) & BIT_MASK_SIFS_OFDM_CTX_8822B)
-
-#define BIT_SHIFT_SIFS_CCK_CTX_8822B 0
-#define BIT_MASK_SIFS_CCK_CTX_8822B 0xff
-#define BIT_SIFS_CCK_CTX_8822B(x) \
- (((x) & BIT_MASK_SIFS_CCK_CTX_8822B) << BIT_SHIFT_SIFS_CCK_CTX_8822B)
-#define BIT_GET_SIFS_CCK_CTX_8822B(x) \
- (((x) >> BIT_SHIFT_SIFS_CCK_CTX_8822B) & BIT_MASK_SIFS_CCK_CTX_8822B)
-
-/* 2 REG_TSFTR_SYN_OFFSET_8822B */
-
-#define BIT_SHIFT_TSFTR_SNC_OFFSET_8822B 0
-#define BIT_MASK_TSFTR_SNC_OFFSET_8822B 0xffff
-#define BIT_TSFTR_SNC_OFFSET_8822B(x) \
- (((x) & BIT_MASK_TSFTR_SNC_OFFSET_8822B) \
- << BIT_SHIFT_TSFTR_SNC_OFFSET_8822B)
-#define BIT_GET_TSFTR_SNC_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_TSFTR_SNC_OFFSET_8822B) & \
- BIT_MASK_TSFTR_SNC_OFFSET_8822B)
-
-/* 2 REG_AGGR_BREAK_TIME_8822B */
-
-#define BIT_SHIFT_AGGR_BK_TIME_8822B 0
-#define BIT_MASK_AGGR_BK_TIME_8822B 0xff
-#define BIT_AGGR_BK_TIME_8822B(x) \
- (((x) & BIT_MASK_AGGR_BK_TIME_8822B) << BIT_SHIFT_AGGR_BK_TIME_8822B)
-#define BIT_GET_AGGR_BK_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_AGGR_BK_TIME_8822B) & BIT_MASK_AGGR_BK_TIME_8822B)
-
-/* 2 REG_SLOT_8822B */
-
-#define BIT_SHIFT_SLOT_8822B 0
-#define BIT_MASK_SLOT_8822B 0xff
-#define BIT_SLOT_8822B(x) (((x) & BIT_MASK_SLOT_8822B) << BIT_SHIFT_SLOT_8822B)
-#define BIT_GET_SLOT_8822B(x) \
- (((x) >> BIT_SHIFT_SLOT_8822B) & BIT_MASK_SLOT_8822B)
-
-/* 2 REG_TX_PTCL_CTRL_8822B */
-#define BIT_DIS_EDCCA_8822B BIT(15)
-#define BIT_DIS_CCA_8822B BIT(14)
-#define BIT_LSIG_TXOP_TXCMD_NAV_8822B BIT(13)
-#define BIT_SIFS_BK_EN_8822B BIT(12)
-
-#define BIT_SHIFT_TXQ_NAV_MSK_8822B 8
-#define BIT_MASK_TXQ_NAV_MSK_8822B 0xf
-#define BIT_TXQ_NAV_MSK_8822B(x) \
- (((x) & BIT_MASK_TXQ_NAV_MSK_8822B) << BIT_SHIFT_TXQ_NAV_MSK_8822B)
-#define BIT_GET_TXQ_NAV_MSK_8822B(x) \
- (((x) >> BIT_SHIFT_TXQ_NAV_MSK_8822B) & BIT_MASK_TXQ_NAV_MSK_8822B)
-
-#define BIT_DIS_CW_8822B BIT(7)
-#define BIT_NAV_END_TXOP_8822B BIT(6)
-#define BIT_RDG_END_TXOP_8822B BIT(5)
-#define BIT_AC_INBCN_HOLD_8822B BIT(4)
-#define BIT_MGTQ_TXOP_EN_8822B BIT(3)
-#define BIT_MGTQ_RTSMF_EN_8822B BIT(2)
-#define BIT_HIQ_RTSMF_EN_8822B BIT(1)
-#define BIT_BCN_RTSMF_EN_8822B BIT(0)
-
-/* 2 REG_TXPAUSE_8822B */
-#define BIT_STOP_BCN_HI_MGT_8822B BIT(7)
-#define BIT_MAC_STOPBCNQ_8822B BIT(6)
-#define BIT_MAC_STOPHIQ_8822B BIT(5)
-#define BIT_MAC_STOPMGQ_8822B BIT(4)
-#define BIT_MAC_STOPBK_8822B BIT(3)
-#define BIT_MAC_STOPBE_8822B BIT(2)
-#define BIT_MAC_STOPVI_8822B BIT(1)
-#define BIT_MAC_STOPVO_8822B BIT(0)
-
-/* 2 REG_DIS_TXREQ_CLR_8822B */
-#define BIT_DIS_BT_CCA_8822B BIT(7)
-#define BIT_DIS_TXREQ_CLR_HI_8822B BIT(5)
-#define BIT_DIS_TXREQ_CLR_MGQ_8822B BIT(4)
-#define BIT_DIS_TXREQ_CLR_VO_8822B BIT(3)
-#define BIT_DIS_TXREQ_CLR_VI_8822B BIT(2)
-#define BIT_DIS_TXREQ_CLR_BE_8822B BIT(1)
-#define BIT_DIS_TXREQ_CLR_BK_8822B BIT(0)
-
-/* 2 REG_RD_CTRL_8822B */
-#define BIT_EN_CLR_TXREQ_INCCA_8822B BIT(15)
-#define BIT_DIS_TX_OVER_BCNQ_8822B BIT(14)
-#define BIT_EN_BCNERR_INCCCA_8822B BIT(13)
-#define BIT_EDCCA_MSK_CNTDOWN_EN_8822B BIT(11)
-#define BIT_DIS_TXOP_CFE_8822B BIT(10)
-#define BIT_DIS_LSIG_CFE_8822B BIT(9)
-#define BIT_DIS_STBC_CFE_8822B BIT(8)
-#define BIT_BKQ_RD_INIT_EN_8822B BIT(7)
-#define BIT_BEQ_RD_INIT_EN_8822B BIT(6)
-#define BIT_VIQ_RD_INIT_EN_8822B BIT(5)
-#define BIT_VOQ_RD_INIT_EN_8822B BIT(4)
-#define BIT_BKQ_RD_RESP_EN_8822B BIT(3)
-#define BIT_BEQ_RD_RESP_EN_8822B BIT(2)
-#define BIT_VIQ_RD_RESP_EN_8822B BIT(1)
-#define BIT_VOQ_RD_RESP_EN_8822B BIT(0)
-
-/* 2 REG_MBSSID_CTRL_8822B */
-#define BIT_MBID_BCNQ7_EN_8822B BIT(7)
-#define BIT_MBID_BCNQ6_EN_8822B BIT(6)
-#define BIT_MBID_BCNQ5_EN_8822B BIT(5)
-#define BIT_MBID_BCNQ4_EN_8822B BIT(4)
-#define BIT_MBID_BCNQ3_EN_8822B BIT(3)
-#define BIT_MBID_BCNQ2_EN_8822B BIT(2)
-#define BIT_MBID_BCNQ1_EN_8822B BIT(1)
-#define BIT_MBID_BCNQ0_EN_8822B BIT(0)
-
-/* 2 REG_P2PPS_CTRL_8822B */
-#define BIT_P2P_CTW_ALLSTASLEEP_8822B BIT(7)
-#define BIT_P2P_OFF_DISTX_EN_8822B BIT(6)
-#define BIT_PWR_MGT_EN_8822B BIT(5)
-#define BIT_P2P_NOA1_EN_8822B BIT(2)
-#define BIT_P2P_NOA0_EN_8822B BIT(1)
-
-/* 2 REG_PKT_LIFETIME_CTRL_8822B */
-#define BIT_EN_P2P_CTWND1_8822B BIT(23)
-#define BIT_EN_BKF_CLR_TXREQ_8822B BIT(22)
-#define BIT_EN_TSFBIT32_RST_P2P_8822B BIT(21)
-#define BIT_EN_BCN_TX_BTCCA_8822B BIT(20)
-#define BIT_DIS_PKT_TX_ATIM_8822B BIT(19)
-#define BIT_DIS_BCN_DIS_CTN_8822B BIT(18)
-#define BIT_EN_NAVEND_RST_TXOP_8822B BIT(17)
-#define BIT_EN_FILTER_CCA_8822B BIT(16)
-
-#define BIT_SHIFT_CCA_FILTER_THRS_8822B 8
-#define BIT_MASK_CCA_FILTER_THRS_8822B 0xff
-#define BIT_CCA_FILTER_THRS_8822B(x) \
- (((x) & BIT_MASK_CCA_FILTER_THRS_8822B) \
- << BIT_SHIFT_CCA_FILTER_THRS_8822B)
-#define BIT_GET_CCA_FILTER_THRS_8822B(x) \
- (((x) >> BIT_SHIFT_CCA_FILTER_THRS_8822B) & \
- BIT_MASK_CCA_FILTER_THRS_8822B)
-
-#define BIT_SHIFT_EDCCA_THRS_8822B 0
-#define BIT_MASK_EDCCA_THRS_8822B 0xff
-#define BIT_EDCCA_THRS_8822B(x) \
- (((x) & BIT_MASK_EDCCA_THRS_8822B) << BIT_SHIFT_EDCCA_THRS_8822B)
-#define BIT_GET_EDCCA_THRS_8822B(x) \
- (((x) >> BIT_SHIFT_EDCCA_THRS_8822B) & BIT_MASK_EDCCA_THRS_8822B)
-
-/* 2 REG_P2PPS_SPEC_STATE_8822B */
-#define BIT_SPEC_POWER_STATE_8822B BIT(7)
-#define BIT_SPEC_CTWINDOW_ON_8822B BIT(6)
-#define BIT_SPEC_BEACON_AREA_ON_8822B BIT(5)
-#define BIT_SPEC_CTWIN_EARLY_DISTX_8822B BIT(4)
-#define BIT_SPEC_NOA1_OFF_PERIOD_8822B BIT(3)
-#define BIT_SPEC_FORCE_DOZE1_8822B BIT(2)
-#define BIT_SPEC_NOA0_OFF_PERIOD_8822B BIT(1)
-#define BIT_SPEC_FORCE_DOZE0_8822B BIT(0)
-
-/* 2 REG_BAR_TX_CTRL_8822B */
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_P2PON_DIS_TXTIME_8822B 0
-#define BIT_MASK_P2PON_DIS_TXTIME_8822B 0xff
-#define BIT_P2PON_DIS_TXTIME_8822B(x) \
- (((x) & BIT_MASK_P2PON_DIS_TXTIME_8822B) \
- << BIT_SHIFT_P2PON_DIS_TXTIME_8822B)
-#define BIT_GET_P2PON_DIS_TXTIME_8822B(x) \
- (((x) >> BIT_SHIFT_P2PON_DIS_TXTIME_8822B) & \
- BIT_MASK_P2PON_DIS_TXTIME_8822B)
-
-/* 2 REG_QUEUE_INCOL_THR_8822B */
-
-#define BIT_SHIFT_BK_QUEUE_THR_8822B 24
-#define BIT_MASK_BK_QUEUE_THR_8822B 0xff
-#define BIT_BK_QUEUE_THR_8822B(x) \
- (((x) & BIT_MASK_BK_QUEUE_THR_8822B) << BIT_SHIFT_BK_QUEUE_THR_8822B)
-#define BIT_GET_BK_QUEUE_THR_8822B(x) \
- (((x) >> BIT_SHIFT_BK_QUEUE_THR_8822B) & BIT_MASK_BK_QUEUE_THR_8822B)
-
-#define BIT_SHIFT_BE_QUEUE_THR_8822B 16
-#define BIT_MASK_BE_QUEUE_THR_8822B 0xff
-#define BIT_BE_QUEUE_THR_8822B(x) \
- (((x) & BIT_MASK_BE_QUEUE_THR_8822B) << BIT_SHIFT_BE_QUEUE_THR_8822B)
-#define BIT_GET_BE_QUEUE_THR_8822B(x) \
- (((x) >> BIT_SHIFT_BE_QUEUE_THR_8822B) & BIT_MASK_BE_QUEUE_THR_8822B)
-
-#define BIT_SHIFT_VI_QUEUE_THR_8822B 8
-#define BIT_MASK_VI_QUEUE_THR_8822B 0xff
-#define BIT_VI_QUEUE_THR_8822B(x) \
- (((x) & BIT_MASK_VI_QUEUE_THR_8822B) << BIT_SHIFT_VI_QUEUE_THR_8822B)
-#define BIT_GET_VI_QUEUE_THR_8822B(x) \
- (((x) >> BIT_SHIFT_VI_QUEUE_THR_8822B) & BIT_MASK_VI_QUEUE_THR_8822B)
-
-#define BIT_SHIFT_VO_QUEUE_THR_8822B 0
-#define BIT_MASK_VO_QUEUE_THR_8822B 0xff
-#define BIT_VO_QUEUE_THR_8822B(x) \
- (((x) & BIT_MASK_VO_QUEUE_THR_8822B) << BIT_SHIFT_VO_QUEUE_THR_8822B)
-#define BIT_GET_VO_QUEUE_THR_8822B(x) \
- (((x) >> BIT_SHIFT_VO_QUEUE_THR_8822B) & BIT_MASK_VO_QUEUE_THR_8822B)
-
-/* 2 REG_QUEUE_INCOL_EN_8822B */
-#define BIT_QUEUE_INCOL_EN_8822B BIT(16)
-
-#define BIT_SHIFT_BE_TRIGGER_NUM_8822B 12
-#define BIT_MASK_BE_TRIGGER_NUM_8822B 0xf
-#define BIT_BE_TRIGGER_NUM_8822B(x) \
- (((x) & BIT_MASK_BE_TRIGGER_NUM_8822B) \
- << BIT_SHIFT_BE_TRIGGER_NUM_8822B)
-#define BIT_GET_BE_TRIGGER_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_BE_TRIGGER_NUM_8822B) & \
- BIT_MASK_BE_TRIGGER_NUM_8822B)
-
-#define BIT_SHIFT_BK_TRIGGER_NUM_8822B 8
-#define BIT_MASK_BK_TRIGGER_NUM_8822B 0xf
-#define BIT_BK_TRIGGER_NUM_8822B(x) \
- (((x) & BIT_MASK_BK_TRIGGER_NUM_8822B) \
- << BIT_SHIFT_BK_TRIGGER_NUM_8822B)
-#define BIT_GET_BK_TRIGGER_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_BK_TRIGGER_NUM_8822B) & \
- BIT_MASK_BK_TRIGGER_NUM_8822B)
-
-#define BIT_SHIFT_VI_TRIGGER_NUM_8822B 4
-#define BIT_MASK_VI_TRIGGER_NUM_8822B 0xf
-#define BIT_VI_TRIGGER_NUM_8822B(x) \
- (((x) & BIT_MASK_VI_TRIGGER_NUM_8822B) \
- << BIT_SHIFT_VI_TRIGGER_NUM_8822B)
-#define BIT_GET_VI_TRIGGER_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_VI_TRIGGER_NUM_8822B) & \
- BIT_MASK_VI_TRIGGER_NUM_8822B)
-
-#define BIT_SHIFT_VO_TRIGGER_NUM_8822B 0
-#define BIT_MASK_VO_TRIGGER_NUM_8822B 0xf
-#define BIT_VO_TRIGGER_NUM_8822B(x) \
- (((x) & BIT_MASK_VO_TRIGGER_NUM_8822B) \
- << BIT_SHIFT_VO_TRIGGER_NUM_8822B)
-#define BIT_GET_VO_TRIGGER_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_VO_TRIGGER_NUM_8822B) & \
- BIT_MASK_VO_TRIGGER_NUM_8822B)
-
-/* 2 REG_TBTT_PROHIBIT_8822B */
-
-#define BIT_SHIFT_TBTT_HOLD_TIME_AP_8822B 8
-#define BIT_MASK_TBTT_HOLD_TIME_AP_8822B 0xfff
-#define BIT_TBTT_HOLD_TIME_AP_8822B(x) \
- (((x) & BIT_MASK_TBTT_HOLD_TIME_AP_8822B) \
- << BIT_SHIFT_TBTT_HOLD_TIME_AP_8822B)
-#define BIT_GET_TBTT_HOLD_TIME_AP_8822B(x) \
- (((x) >> BIT_SHIFT_TBTT_HOLD_TIME_AP_8822B) & \
- BIT_MASK_TBTT_HOLD_TIME_AP_8822B)
-
-#define BIT_SHIFT_TBTT_PROHIBIT_SETUP_8822B 0
-#define BIT_MASK_TBTT_PROHIBIT_SETUP_8822B 0xf
-#define BIT_TBTT_PROHIBIT_SETUP_8822B(x) \
- (((x) & BIT_MASK_TBTT_PROHIBIT_SETUP_8822B) \
- << BIT_SHIFT_TBTT_PROHIBIT_SETUP_8822B)
-#define BIT_GET_TBTT_PROHIBIT_SETUP_8822B(x) \
- (((x) >> BIT_SHIFT_TBTT_PROHIBIT_SETUP_8822B) & \
- BIT_MASK_TBTT_PROHIBIT_SETUP_8822B)
-
-/* 2 REG_P2PPS_STATE_8822B */
-#define BIT_POWER_STATE_8822B BIT(7)
-#define BIT_CTWINDOW_ON_8822B BIT(6)
-#define BIT_BEACON_AREA_ON_8822B BIT(5)
-#define BIT_CTWIN_EARLY_DISTX_8822B BIT(4)
-#define BIT_NOA1_OFF_PERIOD_8822B BIT(3)
-#define BIT_FORCE_DOZE1_8822B BIT(2)
-#define BIT_NOA0_OFF_PERIOD_8822B BIT(1)
-#define BIT_FORCE_DOZE0_8822B BIT(0)
-
-/* 2 REG_RD_NAV_NXT_8822B */
-
-#define BIT_SHIFT_RD_NAV_PROT_NXT_8822B 0
-#define BIT_MASK_RD_NAV_PROT_NXT_8822B 0xffff
-#define BIT_RD_NAV_PROT_NXT_8822B(x) \
- (((x) & BIT_MASK_RD_NAV_PROT_NXT_8822B) \
- << BIT_SHIFT_RD_NAV_PROT_NXT_8822B)
-#define BIT_GET_RD_NAV_PROT_NXT_8822B(x) \
- (((x) >> BIT_SHIFT_RD_NAV_PROT_NXT_8822B) & \
- BIT_MASK_RD_NAV_PROT_NXT_8822B)
-
-/* 2 REG_NAV_PROT_LEN_8822B */
-
-#define BIT_SHIFT_NAV_PROT_LEN_8822B 0
-#define BIT_MASK_NAV_PROT_LEN_8822B 0xffff
-#define BIT_NAV_PROT_LEN_8822B(x) \
- (((x) & BIT_MASK_NAV_PROT_LEN_8822B) << BIT_SHIFT_NAV_PROT_LEN_8822B)
-#define BIT_GET_NAV_PROT_LEN_8822B(x) \
- (((x) >> BIT_SHIFT_NAV_PROT_LEN_8822B) & BIT_MASK_NAV_PROT_LEN_8822B)
-
-/* 2 REG_BCN_CTRL_8822B */
-#define BIT_DIS_RX_BSSID_FIT_8822B BIT(6)
-#define BIT_P0_EN_TXBCN_RPT_8822B BIT(5)
-#define BIT_DIS_TSF_UDT_8822B BIT(4)
-#define BIT_EN_BCN_FUNCTION_8822B BIT(3)
-#define BIT_P0_EN_RXBCN_RPT_8822B BIT(2)
-#define BIT_EN_P2P_CTWINDOW_8822B BIT(1)
-#define BIT_EN_P2P_BCNQ_AREA_8822B BIT(0)
-
-/* 2 REG_BCN_CTRL_CLINT0_8822B */
-#define BIT_CLI0_DIS_RX_BSSID_FIT_8822B BIT(6)
-#define BIT_CLI0_DIS_TSF_UDT_8822B BIT(4)
-#define BIT_CLI0_EN_BCN_FUNCTION_8822B BIT(3)
-#define BIT_CLI0_EN_RXBCN_RPT_8822B BIT(2)
-#define BIT_CLI0_ENP2P_CTWINDOW_8822B BIT(1)
-#define BIT_CLI0_ENP2P_BCNQ_AREA_8822B BIT(0)
-
-/* 2 REG_MBID_NUM_8822B */
-#define BIT_EN_PRE_DL_BEACON_8822B BIT(3)
-
-#define BIT_SHIFT_MBID_BCN_NUM_8822B 0
-#define BIT_MASK_MBID_BCN_NUM_8822B 0x7
-#define BIT_MBID_BCN_NUM_8822B(x) \
- (((x) & BIT_MASK_MBID_BCN_NUM_8822B) << BIT_SHIFT_MBID_BCN_NUM_8822B)
-#define BIT_GET_MBID_BCN_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_MBID_BCN_NUM_8822B) & BIT_MASK_MBID_BCN_NUM_8822B)
-
-/* 2 REG_DUAL_TSF_RST_8822B */
-#define BIT_FREECNT_RST_8822B BIT(5)
-#define BIT_TSFTR_CLI3_RST_8822B BIT(4)
-#define BIT_TSFTR_CLI2_RST_8822B BIT(3)
-#define BIT_TSFTR_CLI1_RST_8822B BIT(2)
-#define BIT_TSFTR_CLI0_RST_8822B BIT(1)
-#define BIT_TSFTR_RST_8822B BIT(0)
-
-/* 2 REG_MBSSID_BCN_SPACE_8822B */
-
-#define BIT_SHIFT_BCN_TIMER_SEL_FWRD_8822B 28
-#define BIT_MASK_BCN_TIMER_SEL_FWRD_8822B 0x7
-#define BIT_BCN_TIMER_SEL_FWRD_8822B(x) \
- (((x) & BIT_MASK_BCN_TIMER_SEL_FWRD_8822B) \
- << BIT_SHIFT_BCN_TIMER_SEL_FWRD_8822B)
-#define BIT_GET_BCN_TIMER_SEL_FWRD_8822B(x) \
- (((x) >> BIT_SHIFT_BCN_TIMER_SEL_FWRD_8822B) & \
- BIT_MASK_BCN_TIMER_SEL_FWRD_8822B)
-
-#define BIT_SHIFT_BCN_SPACE_CLINT0_8822B 16
-#define BIT_MASK_BCN_SPACE_CLINT0_8822B 0xfff
-#define BIT_BCN_SPACE_CLINT0_8822B(x) \
- (((x) & BIT_MASK_BCN_SPACE_CLINT0_8822B) \
- << BIT_SHIFT_BCN_SPACE_CLINT0_8822B)
-#define BIT_GET_BCN_SPACE_CLINT0_8822B(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE_CLINT0_8822B) & \
- BIT_MASK_BCN_SPACE_CLINT0_8822B)
-
-#define BIT_SHIFT_BCN_SPACE0_8822B 0
-#define BIT_MASK_BCN_SPACE0_8822B 0xffff
-#define BIT_BCN_SPACE0_8822B(x) \
- (((x) & BIT_MASK_BCN_SPACE0_8822B) << BIT_SHIFT_BCN_SPACE0_8822B)
-#define BIT_GET_BCN_SPACE0_8822B(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE0_8822B) & BIT_MASK_BCN_SPACE0_8822B)
-
-/* 2 REG_DRVERLYINT_8822B */
-
-#define BIT_SHIFT_DRVERLYITV_8822B 0
-#define BIT_MASK_DRVERLYITV_8822B 0xff
-#define BIT_DRVERLYITV_8822B(x) \
- (((x) & BIT_MASK_DRVERLYITV_8822B) << BIT_SHIFT_DRVERLYITV_8822B)
-#define BIT_GET_DRVERLYITV_8822B(x) \
- (((x) >> BIT_SHIFT_DRVERLYITV_8822B) & BIT_MASK_DRVERLYITV_8822B)
-
-/* 2 REG_BCNDMATIM_8822B */
-
-#define BIT_SHIFT_BCNDMATIM_8822B 0
-#define BIT_MASK_BCNDMATIM_8822B 0xff
-#define BIT_BCNDMATIM_8822B(x) \
- (((x) & BIT_MASK_BCNDMATIM_8822B) << BIT_SHIFT_BCNDMATIM_8822B)
-#define BIT_GET_BCNDMATIM_8822B(x) \
- (((x) >> BIT_SHIFT_BCNDMATIM_8822B) & BIT_MASK_BCNDMATIM_8822B)
-
-/* 2 REG_ATIMWND_8822B */
-
-#define BIT_SHIFT_ATIMWND0_8822B 0
-#define BIT_MASK_ATIMWND0_8822B 0xffff
-#define BIT_ATIMWND0_8822B(x) \
- (((x) & BIT_MASK_ATIMWND0_8822B) << BIT_SHIFT_ATIMWND0_8822B)
-#define BIT_GET_ATIMWND0_8822B(x) \
- (((x) >> BIT_SHIFT_ATIMWND0_8822B) & BIT_MASK_ATIMWND0_8822B)
-
-/* 2 REG_USTIME_TSF_8822B */
-
-#define BIT_SHIFT_USTIME_TSF_V1_8822B 0
-#define BIT_MASK_USTIME_TSF_V1_8822B 0xff
-#define BIT_USTIME_TSF_V1_8822B(x) \
- (((x) & BIT_MASK_USTIME_TSF_V1_8822B) << BIT_SHIFT_USTIME_TSF_V1_8822B)
-#define BIT_GET_USTIME_TSF_V1_8822B(x) \
- (((x) >> BIT_SHIFT_USTIME_TSF_V1_8822B) & BIT_MASK_USTIME_TSF_V1_8822B)
-
-/* 2 REG_BCN_MAX_ERR_8822B */
-
-#define BIT_SHIFT_BCN_MAX_ERR_8822B 0
-#define BIT_MASK_BCN_MAX_ERR_8822B 0xff
-#define BIT_BCN_MAX_ERR_8822B(x) \
- (((x) & BIT_MASK_BCN_MAX_ERR_8822B) << BIT_SHIFT_BCN_MAX_ERR_8822B)
-#define BIT_GET_BCN_MAX_ERR_8822B(x) \
- (((x) >> BIT_SHIFT_BCN_MAX_ERR_8822B) & BIT_MASK_BCN_MAX_ERR_8822B)
-
-/* 2 REG_RXTSF_OFFSET_CCK_8822B */
-
-#define BIT_SHIFT_CCK_RXTSF_OFFSET_8822B 0
-#define BIT_MASK_CCK_RXTSF_OFFSET_8822B 0xff
-#define BIT_CCK_RXTSF_OFFSET_8822B(x) \
- (((x) & BIT_MASK_CCK_RXTSF_OFFSET_8822B) \
- << BIT_SHIFT_CCK_RXTSF_OFFSET_8822B)
-#define BIT_GET_CCK_RXTSF_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_CCK_RXTSF_OFFSET_8822B) & \
- BIT_MASK_CCK_RXTSF_OFFSET_8822B)
-
-/* 2 REG_RXTSF_OFFSET_OFDM_8822B */
-
-#define BIT_SHIFT_OFDM_RXTSF_OFFSET_8822B 0
-#define BIT_MASK_OFDM_RXTSF_OFFSET_8822B 0xff
-#define BIT_OFDM_RXTSF_OFFSET_8822B(x) \
- (((x) & BIT_MASK_OFDM_RXTSF_OFFSET_8822B) \
- << BIT_SHIFT_OFDM_RXTSF_OFFSET_8822B)
-#define BIT_GET_OFDM_RXTSF_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_OFDM_RXTSF_OFFSET_8822B) & \
- BIT_MASK_OFDM_RXTSF_OFFSET_8822B)
-
-/* 2 REG_TSFTR_8822B */
-
-#define BIT_SHIFT_TSF_TIMER_8822B 0
-#define BIT_MASK_TSF_TIMER_8822B 0xffffffffffffffffL
-#define BIT_TSF_TIMER_8822B(x) \
- (((x) & BIT_MASK_TSF_TIMER_8822B) << BIT_SHIFT_TSF_TIMER_8822B)
-#define BIT_GET_TSF_TIMER_8822B(x) \
- (((x) >> BIT_SHIFT_TSF_TIMER_8822B) & BIT_MASK_TSF_TIMER_8822B)
-
-/* 2 REG_FREERUN_CNT_8822B */
-
-#define BIT_SHIFT_FREERUN_CNT_8822B 0
-#define BIT_MASK_FREERUN_CNT_8822B 0xffffffffffffffffL
-#define BIT_FREERUN_CNT_8822B(x) \
- (((x) & BIT_MASK_FREERUN_CNT_8822B) << BIT_SHIFT_FREERUN_CNT_8822B)
-#define BIT_GET_FREERUN_CNT_8822B(x) \
- (((x) >> BIT_SHIFT_FREERUN_CNT_8822B) & BIT_MASK_FREERUN_CNT_8822B)
-
-/* 2 REG_ATIMWND1_V1_8822B */
-
-#define BIT_SHIFT_ATIMWND1_V1_8822B 0
-#define BIT_MASK_ATIMWND1_V1_8822B 0xff
-#define BIT_ATIMWND1_V1_8822B(x) \
- (((x) & BIT_MASK_ATIMWND1_V1_8822B) << BIT_SHIFT_ATIMWND1_V1_8822B)
-#define BIT_GET_ATIMWND1_V1_8822B(x) \
- (((x) >> BIT_SHIFT_ATIMWND1_V1_8822B) & BIT_MASK_ATIMWND1_V1_8822B)
-
-/* 2 REG_TBTT_PROHIBIT_INFRA_8822B */
-
-#define BIT_SHIFT_TBTT_PROHIBIT_INFRA_8822B 0
-#define BIT_MASK_TBTT_PROHIBIT_INFRA_8822B 0xff
-#define BIT_TBTT_PROHIBIT_INFRA_8822B(x) \
- (((x) & BIT_MASK_TBTT_PROHIBIT_INFRA_8822B) \
- << BIT_SHIFT_TBTT_PROHIBIT_INFRA_8822B)
-#define BIT_GET_TBTT_PROHIBIT_INFRA_8822B(x) \
- (((x) >> BIT_SHIFT_TBTT_PROHIBIT_INFRA_8822B) & \
- BIT_MASK_TBTT_PROHIBIT_INFRA_8822B)
-
-/* 2 REG_CTWND_8822B */
-
-#define BIT_SHIFT_CTWND_8822B 0
-#define BIT_MASK_CTWND_8822B 0xff
-#define BIT_CTWND_8822B(x) \
- (((x) & BIT_MASK_CTWND_8822B) << BIT_SHIFT_CTWND_8822B)
-#define BIT_GET_CTWND_8822B(x) \
- (((x) >> BIT_SHIFT_CTWND_8822B) & BIT_MASK_CTWND_8822B)
-
-/* 2 REG_BCNIVLCUNT_8822B */
-
-#define BIT_SHIFT_BCNIVLCUNT_8822B 0
-#define BIT_MASK_BCNIVLCUNT_8822B 0x7f
-#define BIT_BCNIVLCUNT_8822B(x) \
- (((x) & BIT_MASK_BCNIVLCUNT_8822B) << BIT_SHIFT_BCNIVLCUNT_8822B)
-#define BIT_GET_BCNIVLCUNT_8822B(x) \
- (((x) >> BIT_SHIFT_BCNIVLCUNT_8822B) & BIT_MASK_BCNIVLCUNT_8822B)
-
-/* 2 REG_BCNDROPCTRL_8822B */
-#define BIT_BEACON_DROP_EN_8822B BIT(7)
-
-#define BIT_SHIFT_BEACON_DROP_IVL_8822B 0
-#define BIT_MASK_BEACON_DROP_IVL_8822B 0x7f
-#define BIT_BEACON_DROP_IVL_8822B(x) \
- (((x) & BIT_MASK_BEACON_DROP_IVL_8822B) \
- << BIT_SHIFT_BEACON_DROP_IVL_8822B)
-#define BIT_GET_BEACON_DROP_IVL_8822B(x) \
- (((x) >> BIT_SHIFT_BEACON_DROP_IVL_8822B) & \
- BIT_MASK_BEACON_DROP_IVL_8822B)
-
-/* 2 REG_HGQ_TIMEOUT_PERIOD_8822B */
-
-#define BIT_SHIFT_HGQ_TIMEOUT_PERIOD_8822B 0
-#define BIT_MASK_HGQ_TIMEOUT_PERIOD_8822B 0xff
-#define BIT_HGQ_TIMEOUT_PERIOD_8822B(x) \
- (((x) & BIT_MASK_HGQ_TIMEOUT_PERIOD_8822B) \
- << BIT_SHIFT_HGQ_TIMEOUT_PERIOD_8822B)
-#define BIT_GET_HGQ_TIMEOUT_PERIOD_8822B(x) \
- (((x) >> BIT_SHIFT_HGQ_TIMEOUT_PERIOD_8822B) & \
- BIT_MASK_HGQ_TIMEOUT_PERIOD_8822B)
-
-/* 2 REG_TXCMD_TIMEOUT_PERIOD_8822B */
-
-#define BIT_SHIFT_TXCMD_TIMEOUT_PERIOD_8822B 0
-#define BIT_MASK_TXCMD_TIMEOUT_PERIOD_8822B 0xff
-#define BIT_TXCMD_TIMEOUT_PERIOD_8822B(x) \
- (((x) & BIT_MASK_TXCMD_TIMEOUT_PERIOD_8822B) \
- << BIT_SHIFT_TXCMD_TIMEOUT_PERIOD_8822B)
-#define BIT_GET_TXCMD_TIMEOUT_PERIOD_8822B(x) \
- (((x) >> BIT_SHIFT_TXCMD_TIMEOUT_PERIOD_8822B) & \
- BIT_MASK_TXCMD_TIMEOUT_PERIOD_8822B)
-
-/* 2 REG_MISC_CTRL_8822B */
-#define BIT_DIS_TRX_CAL_BCN_8822B BIT(5)
-#define BIT_DIS_TX_CAL_TBTT_8822B BIT(4)
-#define BIT_EN_FREECNT_8822B BIT(3)
-#define BIT_BCN_AGGRESSION_8822B BIT(2)
-
-#define BIT_SHIFT_DIS_SECONDARY_CCA_8822B 0
-#define BIT_MASK_DIS_SECONDARY_CCA_8822B 0x3
-#define BIT_DIS_SECONDARY_CCA_8822B(x) \
- (((x) & BIT_MASK_DIS_SECONDARY_CCA_8822B) \
- << BIT_SHIFT_DIS_SECONDARY_CCA_8822B)
-#define BIT_GET_DIS_SECONDARY_CCA_8822B(x) \
- (((x) >> BIT_SHIFT_DIS_SECONDARY_CCA_8822B) & \
- BIT_MASK_DIS_SECONDARY_CCA_8822B)
-
-/* 2 REG_BCN_CTRL_CLINT1_8822B */
-#define BIT_CLI1_DIS_RX_BSSID_FIT_8822B BIT(6)
-#define BIT_CLI1_DIS_TSF_UDT_8822B BIT(4)
-#define BIT_CLI1_EN_BCN_FUNCTION_8822B BIT(3)
-#define BIT_CLI1_EN_RXBCN_RPT_8822B BIT(2)
-#define BIT_CLI1_ENP2P_CTWINDOW_8822B BIT(1)
-#define BIT_CLI1_ENP2P_BCNQ_AREA_8822B BIT(0)
-
-/* 2 REG_BCN_CTRL_CLINT2_8822B */
-#define BIT_CLI2_DIS_RX_BSSID_FIT_8822B BIT(6)
-#define BIT_CLI2_DIS_TSF_UDT_8822B BIT(4)
-#define BIT_CLI2_EN_BCN_FUNCTION_8822B BIT(3)
-#define BIT_CLI2_EN_RXBCN_RPT_8822B BIT(2)
-#define BIT_CLI2_ENP2P_CTWINDOW_8822B BIT(1)
-#define BIT_CLI2_ENP2P_BCNQ_AREA_8822B BIT(0)
-
-/* 2 REG_BCN_CTRL_CLINT3_8822B */
-#define BIT_CLI3_DIS_RX_BSSID_FIT_8822B BIT(6)
-#define BIT_CLI3_DIS_TSF_UDT_8822B BIT(4)
-#define BIT_CLI3_EN_BCN_FUNCTION_8822B BIT(3)
-#define BIT_CLI3_EN_RXBCN_RPT_8822B BIT(2)
-#define BIT_CLI3_ENP2P_CTWINDOW_8822B BIT(1)
-#define BIT_CLI3_ENP2P_BCNQ_AREA_8822B BIT(0)
-
-/* 2 REG_EXTEND_CTRL_8822B */
-#define BIT_EN_TSFBIT32_RST_P2P2_8822B BIT(5)
-#define BIT_EN_TSFBIT32_RST_P2P1_8822B BIT(4)
-
-#define BIT_SHIFT_PORT_SEL_8822B 0
-#define BIT_MASK_PORT_SEL_8822B 0x7
-#define BIT_PORT_SEL_8822B(x) \
- (((x) & BIT_MASK_PORT_SEL_8822B) << BIT_SHIFT_PORT_SEL_8822B)
-#define BIT_GET_PORT_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_PORT_SEL_8822B) & BIT_MASK_PORT_SEL_8822B)
-
-/* 2 REG_P2PPS1_SPEC_STATE_8822B */
-#define BIT_P2P1_SPEC_POWER_STATE_8822B BIT(7)
-#define BIT_P2P1_SPEC_CTWINDOW_ON_8822B BIT(6)
-#define BIT_P2P1_SPEC_BCN_AREA_ON_8822B BIT(5)
-#define BIT_P2P1_SPEC_CTWIN_EARLY_DISTX_8822B BIT(4)
-#define BIT_P2P1_SPEC_NOA1_OFF_PERIOD_8822B BIT(3)
-#define BIT_P2P1_SPEC_FORCE_DOZE1_8822B BIT(2)
-#define BIT_P2P1_SPEC_NOA0_OFF_PERIOD_8822B BIT(1)
-#define BIT_P2P1_SPEC_FORCE_DOZE0_8822B BIT(0)
-
-/* 2 REG_P2PPS1_STATE_8822B */
-#define BIT_P2P1_POWER_STATE_8822B BIT(7)
-#define BIT_P2P1_CTWINDOW_ON_8822B BIT(6)
-#define BIT_P2P1_BEACON_AREA_ON_8822B BIT(5)
-#define BIT_P2P1_CTWIN_EARLY_DISTX_8822B BIT(4)
-#define BIT_P2P1_NOA1_OFF_PERIOD_8822B BIT(3)
-#define BIT_P2P1_FORCE_DOZE1_8822B BIT(2)
-#define BIT_P2P1_NOA0_OFF_PERIOD_8822B BIT(1)
-#define BIT_P2P1_FORCE_DOZE0_8822B BIT(0)
-
-/* 2 REG_P2PPS2_SPEC_STATE_8822B */
-#define BIT_P2P2_SPEC_POWER_STATE_8822B BIT(7)
-#define BIT_P2P2_SPEC_CTWINDOW_ON_8822B BIT(6)
-#define BIT_P2P2_SPEC_BCN_AREA_ON_8822B BIT(5)
-#define BIT_P2P2_SPEC_CTWIN_EARLY_DISTX_8822B BIT(4)
-#define BIT_P2P2_SPEC_NOA1_OFF_PERIOD_8822B BIT(3)
-#define BIT_P2P2_SPEC_FORCE_DOZE1_8822B BIT(2)
-#define BIT_P2P2_SPEC_NOA0_OFF_PERIOD_8822B BIT(1)
-#define BIT_P2P2_SPEC_FORCE_DOZE0_8822B BIT(0)
-
-/* 2 REG_P2PPS2_STATE_8822B */
-#define BIT_P2P2_POWER_STATE_8822B BIT(7)
-#define BIT_P2P2_CTWINDOW_ON_8822B BIT(6)
-#define BIT_P2P2_BEACON_AREA_ON_8822B BIT(5)
-#define BIT_P2P2_CTWIN_EARLY_DISTX_8822B BIT(4)
-#define BIT_P2P2_NOA1_OFF_PERIOD_8822B BIT(3)
-#define BIT_P2P2_FORCE_DOZE1_8822B BIT(2)
-#define BIT_P2P2_NOA0_OFF_PERIOD_8822B BIT(1)
-#define BIT_P2P2_FORCE_DOZE0_8822B BIT(0)
-
-/* 2 REG_PS_TIMER0_8822B */
-
-#define BIT_SHIFT_PSTIMER0_INT_8822B 5
-#define BIT_MASK_PSTIMER0_INT_8822B 0x7ffffff
-#define BIT_PSTIMER0_INT_8822B(x) \
- (((x) & BIT_MASK_PSTIMER0_INT_8822B) << BIT_SHIFT_PSTIMER0_INT_8822B)
-#define BIT_GET_PSTIMER0_INT_8822B(x) \
- (((x) >> BIT_SHIFT_PSTIMER0_INT_8822B) & BIT_MASK_PSTIMER0_INT_8822B)
-
-/* 2 REG_PS_TIMER1_8822B */
-
-#define BIT_SHIFT_PSTIMER1_INT_8822B 5
-#define BIT_MASK_PSTIMER1_INT_8822B 0x7ffffff
-#define BIT_PSTIMER1_INT_8822B(x) \
- (((x) & BIT_MASK_PSTIMER1_INT_8822B) << BIT_SHIFT_PSTIMER1_INT_8822B)
-#define BIT_GET_PSTIMER1_INT_8822B(x) \
- (((x) >> BIT_SHIFT_PSTIMER1_INT_8822B) & BIT_MASK_PSTIMER1_INT_8822B)
-
-/* 2 REG_PS_TIMER2_8822B */
-
-#define BIT_SHIFT_PSTIMER2_INT_8822B 5
-#define BIT_MASK_PSTIMER2_INT_8822B 0x7ffffff
-#define BIT_PSTIMER2_INT_8822B(x) \
- (((x) & BIT_MASK_PSTIMER2_INT_8822B) << BIT_SHIFT_PSTIMER2_INT_8822B)
-#define BIT_GET_PSTIMER2_INT_8822B(x) \
- (((x) >> BIT_SHIFT_PSTIMER2_INT_8822B) & BIT_MASK_PSTIMER2_INT_8822B)
-
-/* 2 REG_TBTT_CTN_AREA_8822B */
-
-#define BIT_SHIFT_TBTT_CTN_AREA_8822B 0
-#define BIT_MASK_TBTT_CTN_AREA_8822B 0xff
-#define BIT_TBTT_CTN_AREA_8822B(x) \
- (((x) & BIT_MASK_TBTT_CTN_AREA_8822B) << BIT_SHIFT_TBTT_CTN_AREA_8822B)
-#define BIT_GET_TBTT_CTN_AREA_8822B(x) \
- (((x) >> BIT_SHIFT_TBTT_CTN_AREA_8822B) & BIT_MASK_TBTT_CTN_AREA_8822B)
-
-/* 2 REG_FORCE_BCN_IFS_8822B */
-
-#define BIT_SHIFT_FORCE_BCN_IFS_8822B 0
-#define BIT_MASK_FORCE_BCN_IFS_8822B 0xff
-#define BIT_FORCE_BCN_IFS_8822B(x) \
- (((x) & BIT_MASK_FORCE_BCN_IFS_8822B) << BIT_SHIFT_FORCE_BCN_IFS_8822B)
-#define BIT_GET_FORCE_BCN_IFS_8822B(x) \
- (((x) >> BIT_SHIFT_FORCE_BCN_IFS_8822B) & BIT_MASK_FORCE_BCN_IFS_8822B)
-
-/* 2 REG_TXOP_MIN_8822B */
-
-#define BIT_SHIFT_TXOP_MIN_8822B 0
-#define BIT_MASK_TXOP_MIN_8822B 0x3fff
-#define BIT_TXOP_MIN_8822B(x) \
- (((x) & BIT_MASK_TXOP_MIN_8822B) << BIT_SHIFT_TXOP_MIN_8822B)
-#define BIT_GET_TXOP_MIN_8822B(x) \
- (((x) >> BIT_SHIFT_TXOP_MIN_8822B) & BIT_MASK_TXOP_MIN_8822B)
-
-/* 2 REG_PRE_BKF_TIME_8822B */
-
-#define BIT_SHIFT_PRE_BKF_TIME_8822B 0
-#define BIT_MASK_PRE_BKF_TIME_8822B 0xff
-#define BIT_PRE_BKF_TIME_8822B(x) \
- (((x) & BIT_MASK_PRE_BKF_TIME_8822B) << BIT_SHIFT_PRE_BKF_TIME_8822B)
-#define BIT_GET_PRE_BKF_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_PRE_BKF_TIME_8822B) & BIT_MASK_PRE_BKF_TIME_8822B)
-
-/* 2 REG_CROSS_TXOP_CTRL_8822B */
-#define BIT_DTIM_BYPASS_8822B BIT(2)
-#define BIT_RTS_NAV_TXOP_8822B BIT(1)
-#define BIT_NOT_CROSS_TXOP_8822B BIT(0)
-
-/* 2 REG_ATIMWND2_8822B */
-
-#define BIT_SHIFT_ATIMWND2_8822B 0
-#define BIT_MASK_ATIMWND2_8822B 0xff
-#define BIT_ATIMWND2_8822B(x) \
- (((x) & BIT_MASK_ATIMWND2_8822B) << BIT_SHIFT_ATIMWND2_8822B)
-#define BIT_GET_ATIMWND2_8822B(x) \
- (((x) >> BIT_SHIFT_ATIMWND2_8822B) & BIT_MASK_ATIMWND2_8822B)
-
-/* 2 REG_ATIMWND3_8822B */
-
-#define BIT_SHIFT_ATIMWND3_8822B 0
-#define BIT_MASK_ATIMWND3_8822B 0xff
-#define BIT_ATIMWND3_8822B(x) \
- (((x) & BIT_MASK_ATIMWND3_8822B) << BIT_SHIFT_ATIMWND3_8822B)
-#define BIT_GET_ATIMWND3_8822B(x) \
- (((x) >> BIT_SHIFT_ATIMWND3_8822B) & BIT_MASK_ATIMWND3_8822B)
-
-/* 2 REG_ATIMWND4_8822B */
-
-#define BIT_SHIFT_ATIMWND4_8822B 0
-#define BIT_MASK_ATIMWND4_8822B 0xff
-#define BIT_ATIMWND4_8822B(x) \
- (((x) & BIT_MASK_ATIMWND4_8822B) << BIT_SHIFT_ATIMWND4_8822B)
-#define BIT_GET_ATIMWND4_8822B(x) \
- (((x) >> BIT_SHIFT_ATIMWND4_8822B) & BIT_MASK_ATIMWND4_8822B)
-
-/* 2 REG_ATIMWND5_8822B */
-
-#define BIT_SHIFT_ATIMWND5_8822B 0
-#define BIT_MASK_ATIMWND5_8822B 0xff
-#define BIT_ATIMWND5_8822B(x) \
- (((x) & BIT_MASK_ATIMWND5_8822B) << BIT_SHIFT_ATIMWND5_8822B)
-#define BIT_GET_ATIMWND5_8822B(x) \
- (((x) >> BIT_SHIFT_ATIMWND5_8822B) & BIT_MASK_ATIMWND5_8822B)
-
-/* 2 REG_ATIMWND6_8822B */
-
-#define BIT_SHIFT_ATIMWND6_8822B 0
-#define BIT_MASK_ATIMWND6_8822B 0xff
-#define BIT_ATIMWND6_8822B(x) \
- (((x) & BIT_MASK_ATIMWND6_8822B) << BIT_SHIFT_ATIMWND6_8822B)
-#define BIT_GET_ATIMWND6_8822B(x) \
- (((x) >> BIT_SHIFT_ATIMWND6_8822B) & BIT_MASK_ATIMWND6_8822B)
-
-/* 2 REG_ATIMWND7_8822B */
-
-#define BIT_SHIFT_ATIMWND7_8822B 0
-#define BIT_MASK_ATIMWND7_8822B 0xff
-#define BIT_ATIMWND7_8822B(x) \
- (((x) & BIT_MASK_ATIMWND7_8822B) << BIT_SHIFT_ATIMWND7_8822B)
-#define BIT_GET_ATIMWND7_8822B(x) \
- (((x) >> BIT_SHIFT_ATIMWND7_8822B) & BIT_MASK_ATIMWND7_8822B)
-
-/* 2 REG_ATIMUGT_8822B */
-
-#define BIT_SHIFT_ATIM_URGENT_8822B 0
-#define BIT_MASK_ATIM_URGENT_8822B 0xff
-#define BIT_ATIM_URGENT_8822B(x) \
- (((x) & BIT_MASK_ATIM_URGENT_8822B) << BIT_SHIFT_ATIM_URGENT_8822B)
-#define BIT_GET_ATIM_URGENT_8822B(x) \
- (((x) >> BIT_SHIFT_ATIM_URGENT_8822B) & BIT_MASK_ATIM_URGENT_8822B)
-
-/* 2 REG_HIQ_NO_LMT_EN_8822B */
-#define BIT_HIQ_NO_LMT_EN_VAP7_8822B BIT(7)
-#define BIT_HIQ_NO_LMT_EN_VAP6_8822B BIT(6)
-#define BIT_HIQ_NO_LMT_EN_VAP5_8822B BIT(5)
-#define BIT_HIQ_NO_LMT_EN_VAP4_8822B BIT(4)
-#define BIT_HIQ_NO_LMT_EN_VAP3_8822B BIT(3)
-#define BIT_HIQ_NO_LMT_EN_VAP2_8822B BIT(2)
-#define BIT_HIQ_NO_LMT_EN_VAP1_8822B BIT(1)
-#define BIT_HIQ_NO_LMT_EN_ROOT_8822B BIT(0)
-
-/* 2 REG_DTIM_COUNTER_ROOT_8822B */
-
-#define BIT_SHIFT_DTIM_COUNT_ROOT_8822B 0
-#define BIT_MASK_DTIM_COUNT_ROOT_8822B 0xff
-#define BIT_DTIM_COUNT_ROOT_8822B(x) \
- (((x) & BIT_MASK_DTIM_COUNT_ROOT_8822B) \
- << BIT_SHIFT_DTIM_COUNT_ROOT_8822B)
-#define BIT_GET_DTIM_COUNT_ROOT_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_ROOT_8822B) & \
- BIT_MASK_DTIM_COUNT_ROOT_8822B)
-
-/* 2 REG_DTIM_COUNTER_VAP1_8822B */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP1_8822B 0
-#define BIT_MASK_DTIM_COUNT_VAP1_8822B 0xff
-#define BIT_DTIM_COUNT_VAP1_8822B(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP1_8822B) \
- << BIT_SHIFT_DTIM_COUNT_VAP1_8822B)
-#define BIT_GET_DTIM_COUNT_VAP1_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP1_8822B) & \
- BIT_MASK_DTIM_COUNT_VAP1_8822B)
-
-/* 2 REG_DTIM_COUNTER_VAP2_8822B */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP2_8822B 0
-#define BIT_MASK_DTIM_COUNT_VAP2_8822B 0xff
-#define BIT_DTIM_COUNT_VAP2_8822B(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP2_8822B) \
- << BIT_SHIFT_DTIM_COUNT_VAP2_8822B)
-#define BIT_GET_DTIM_COUNT_VAP2_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP2_8822B) & \
- BIT_MASK_DTIM_COUNT_VAP2_8822B)
-
-/* 2 REG_DTIM_COUNTER_VAP3_8822B */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP3_8822B 0
-#define BIT_MASK_DTIM_COUNT_VAP3_8822B 0xff
-#define BIT_DTIM_COUNT_VAP3_8822B(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP3_8822B) \
- << BIT_SHIFT_DTIM_COUNT_VAP3_8822B)
-#define BIT_GET_DTIM_COUNT_VAP3_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP3_8822B) & \
- BIT_MASK_DTIM_COUNT_VAP3_8822B)
-
-/* 2 REG_DTIM_COUNTER_VAP4_8822B */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP4_8822B 0
-#define BIT_MASK_DTIM_COUNT_VAP4_8822B 0xff
-#define BIT_DTIM_COUNT_VAP4_8822B(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP4_8822B) \
- << BIT_SHIFT_DTIM_COUNT_VAP4_8822B)
-#define BIT_GET_DTIM_COUNT_VAP4_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP4_8822B) & \
- BIT_MASK_DTIM_COUNT_VAP4_8822B)
-
-/* 2 REG_DTIM_COUNTER_VAP5_8822B */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP5_8822B 0
-#define BIT_MASK_DTIM_COUNT_VAP5_8822B 0xff
-#define BIT_DTIM_COUNT_VAP5_8822B(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP5_8822B) \
- << BIT_SHIFT_DTIM_COUNT_VAP5_8822B)
-#define BIT_GET_DTIM_COUNT_VAP5_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP5_8822B) & \
- BIT_MASK_DTIM_COUNT_VAP5_8822B)
-
-/* 2 REG_DTIM_COUNTER_VAP6_8822B */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP6_8822B 0
-#define BIT_MASK_DTIM_COUNT_VAP6_8822B 0xff
-#define BIT_DTIM_COUNT_VAP6_8822B(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP6_8822B) \
- << BIT_SHIFT_DTIM_COUNT_VAP6_8822B)
-#define BIT_GET_DTIM_COUNT_VAP6_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP6_8822B) & \
- BIT_MASK_DTIM_COUNT_VAP6_8822B)
-
-/* 2 REG_DTIM_COUNTER_VAP7_8822B */
-
-#define BIT_SHIFT_DTIM_COUNT_VAP7_8822B 0
-#define BIT_MASK_DTIM_COUNT_VAP7_8822B 0xff
-#define BIT_DTIM_COUNT_VAP7_8822B(x) \
- (((x) & BIT_MASK_DTIM_COUNT_VAP7_8822B) \
- << BIT_SHIFT_DTIM_COUNT_VAP7_8822B)
-#define BIT_GET_DTIM_COUNT_VAP7_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_COUNT_VAP7_8822B) & \
- BIT_MASK_DTIM_COUNT_VAP7_8822B)
-
-/* 2 REG_DIS_ATIM_8822B */
-#define BIT_DIS_ATIM_VAP7_8822B BIT(7)
-#define BIT_DIS_ATIM_VAP6_8822B BIT(6)
-#define BIT_DIS_ATIM_VAP5_8822B BIT(5)
-#define BIT_DIS_ATIM_VAP4_8822B BIT(4)
-#define BIT_DIS_ATIM_VAP3_8822B BIT(3)
-#define BIT_DIS_ATIM_VAP2_8822B BIT(2)
-#define BIT_DIS_ATIM_VAP1_8822B BIT(1)
-#define BIT_DIS_ATIM_ROOT_8822B BIT(0)
-
-/* 2 REG_EARLY_128US_8822B */
-
-#define BIT_SHIFT_TSFT_SEL_TIMER1_8822B 3
-#define BIT_MASK_TSFT_SEL_TIMER1_8822B 0x7
-#define BIT_TSFT_SEL_TIMER1_8822B(x) \
- (((x) & BIT_MASK_TSFT_SEL_TIMER1_8822B) \
- << BIT_SHIFT_TSFT_SEL_TIMER1_8822B)
-#define BIT_GET_TSFT_SEL_TIMER1_8822B(x) \
- (((x) >> BIT_SHIFT_TSFT_SEL_TIMER1_8822B) & \
- BIT_MASK_TSFT_SEL_TIMER1_8822B)
-
-#define BIT_SHIFT_EARLY_128US_8822B 0
-#define BIT_MASK_EARLY_128US_8822B 0x7
-#define BIT_EARLY_128US_8822B(x) \
- (((x) & BIT_MASK_EARLY_128US_8822B) << BIT_SHIFT_EARLY_128US_8822B)
-#define BIT_GET_EARLY_128US_8822B(x) \
- (((x) >> BIT_SHIFT_EARLY_128US_8822B) & BIT_MASK_EARLY_128US_8822B)
-
-/* 2 REG_P2PPS1_CTRL_8822B */
-#define BIT_P2P1_CTW_ALLSTASLEEP_8822B BIT(7)
-#define BIT_P2P1_OFF_DISTX_EN_8822B BIT(6)
-#define BIT_P2P1_PWR_MGT_EN_8822B BIT(5)
-#define BIT_P2P1_NOA1_EN_8822B BIT(2)
-#define BIT_P2P1_NOA0_EN_8822B BIT(1)
-
-/* 2 REG_P2PPS2_CTRL_8822B */
-#define BIT_P2P2_CTW_ALLSTASLEEP_8822B BIT(7)
-#define BIT_P2P2_OFF_DISTX_EN_8822B BIT(6)
-#define BIT_P2P2_PWR_MGT_EN_8822B BIT(5)
-#define BIT_P2P2_NOA1_EN_8822B BIT(2)
-#define BIT_P2P2_NOA0_EN_8822B BIT(1)
-
-/* 2 REG_TIMER0_SRC_SEL_8822B */
-
-#define BIT_SHIFT_SYNC_CLI_SEL_8822B 4
-#define BIT_MASK_SYNC_CLI_SEL_8822B 0x7
-#define BIT_SYNC_CLI_SEL_8822B(x) \
- (((x) & BIT_MASK_SYNC_CLI_SEL_8822B) << BIT_SHIFT_SYNC_CLI_SEL_8822B)
-#define BIT_GET_SYNC_CLI_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_SYNC_CLI_SEL_8822B) & BIT_MASK_SYNC_CLI_SEL_8822B)
-
-#define BIT_SHIFT_TSFT_SEL_TIMER0_8822B 0
-#define BIT_MASK_TSFT_SEL_TIMER0_8822B 0x7
-#define BIT_TSFT_SEL_TIMER0_8822B(x) \
- (((x) & BIT_MASK_TSFT_SEL_TIMER0_8822B) \
- << BIT_SHIFT_TSFT_SEL_TIMER0_8822B)
-#define BIT_GET_TSFT_SEL_TIMER0_8822B(x) \
- (((x) >> BIT_SHIFT_TSFT_SEL_TIMER0_8822B) & \
- BIT_MASK_TSFT_SEL_TIMER0_8822B)
-
-/* 2 REG_NOA_UNIT_SEL_8822B */
-
-#define BIT_SHIFT_NOA_UNIT2_SEL_8822B 8
-#define BIT_MASK_NOA_UNIT2_SEL_8822B 0x7
-#define BIT_NOA_UNIT2_SEL_8822B(x) \
- (((x) & BIT_MASK_NOA_UNIT2_SEL_8822B) << BIT_SHIFT_NOA_UNIT2_SEL_8822B)
-#define BIT_GET_NOA_UNIT2_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_NOA_UNIT2_SEL_8822B) & BIT_MASK_NOA_UNIT2_SEL_8822B)
-
-#define BIT_SHIFT_NOA_UNIT1_SEL_8822B 4
-#define BIT_MASK_NOA_UNIT1_SEL_8822B 0x7
-#define BIT_NOA_UNIT1_SEL_8822B(x) \
- (((x) & BIT_MASK_NOA_UNIT1_SEL_8822B) << BIT_SHIFT_NOA_UNIT1_SEL_8822B)
-#define BIT_GET_NOA_UNIT1_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_NOA_UNIT1_SEL_8822B) & BIT_MASK_NOA_UNIT1_SEL_8822B)
-
-#define BIT_SHIFT_NOA_UNIT0_SEL_8822B 0
-#define BIT_MASK_NOA_UNIT0_SEL_8822B 0x7
-#define BIT_NOA_UNIT0_SEL_8822B(x) \
- (((x) & BIT_MASK_NOA_UNIT0_SEL_8822B) << BIT_SHIFT_NOA_UNIT0_SEL_8822B)
-#define BIT_GET_NOA_UNIT0_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_NOA_UNIT0_SEL_8822B) & BIT_MASK_NOA_UNIT0_SEL_8822B)
-
-/* 2 REG_P2POFF_DIS_TXTIME_8822B */
-
-#define BIT_SHIFT_P2POFF_DIS_TXTIME_8822B 0
-#define BIT_MASK_P2POFF_DIS_TXTIME_8822B 0xff
-#define BIT_P2POFF_DIS_TXTIME_8822B(x) \
- (((x) & BIT_MASK_P2POFF_DIS_TXTIME_8822B) \
- << BIT_SHIFT_P2POFF_DIS_TXTIME_8822B)
-#define BIT_GET_P2POFF_DIS_TXTIME_8822B(x) \
- (((x) >> BIT_SHIFT_P2POFF_DIS_TXTIME_8822B) & \
- BIT_MASK_P2POFF_DIS_TXTIME_8822B)
-
-/* 2 REG_MBSSID_BCN_SPACE2_8822B */
-
-#define BIT_SHIFT_BCN_SPACE_CLINT2_8822B 16
-#define BIT_MASK_BCN_SPACE_CLINT2_8822B 0xfff
-#define BIT_BCN_SPACE_CLINT2_8822B(x) \
- (((x) & BIT_MASK_BCN_SPACE_CLINT2_8822B) \
- << BIT_SHIFT_BCN_SPACE_CLINT2_8822B)
-#define BIT_GET_BCN_SPACE_CLINT2_8822B(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE_CLINT2_8822B) & \
- BIT_MASK_BCN_SPACE_CLINT2_8822B)
-
-#define BIT_SHIFT_BCN_SPACE_CLINT1_8822B 0
-#define BIT_MASK_BCN_SPACE_CLINT1_8822B 0xfff
-#define BIT_BCN_SPACE_CLINT1_8822B(x) \
- (((x) & BIT_MASK_BCN_SPACE_CLINT1_8822B) \
- << BIT_SHIFT_BCN_SPACE_CLINT1_8822B)
-#define BIT_GET_BCN_SPACE_CLINT1_8822B(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE_CLINT1_8822B) & \
- BIT_MASK_BCN_SPACE_CLINT1_8822B)
-
-/* 2 REG_MBSSID_BCN_SPACE3_8822B */
-
-#define BIT_SHIFT_SUB_BCN_SPACE_8822B 16
-#define BIT_MASK_SUB_BCN_SPACE_8822B 0xff
-#define BIT_SUB_BCN_SPACE_8822B(x) \
- (((x) & BIT_MASK_SUB_BCN_SPACE_8822B) << BIT_SHIFT_SUB_BCN_SPACE_8822B)
-#define BIT_GET_SUB_BCN_SPACE_8822B(x) \
- (((x) >> BIT_SHIFT_SUB_BCN_SPACE_8822B) & BIT_MASK_SUB_BCN_SPACE_8822B)
-
-#define BIT_SHIFT_BCN_SPACE_CLINT3_8822B 0
-#define BIT_MASK_BCN_SPACE_CLINT3_8822B 0xfff
-#define BIT_BCN_SPACE_CLINT3_8822B(x) \
- (((x) & BIT_MASK_BCN_SPACE_CLINT3_8822B) \
- << BIT_SHIFT_BCN_SPACE_CLINT3_8822B)
-#define BIT_GET_BCN_SPACE_CLINT3_8822B(x) \
- (((x) >> BIT_SHIFT_BCN_SPACE_CLINT3_8822B) & \
- BIT_MASK_BCN_SPACE_CLINT3_8822B)
-
-/* 2 REG_ACMHWCTRL_8822B */
-#define BIT_BEQ_ACM_STATUS_8822B BIT(7)
-#define BIT_VIQ_ACM_STATUS_8822B BIT(6)
-#define BIT_VOQ_ACM_STATUS_8822B BIT(5)
-#define BIT_BEQ_ACM_EN_8822B BIT(3)
-#define BIT_VIQ_ACM_EN_8822B BIT(2)
-#define BIT_VOQ_ACM_EN_8822B BIT(1)
-#define BIT_ACMHWEN_8822B BIT(0)
-
-/* 2 REG_ACMRSTCTRL_8822B */
-#define BIT_BE_ACM_RESET_USED_TIME_8822B BIT(2)
-#define BIT_VI_ACM_RESET_USED_TIME_8822B BIT(1)
-#define BIT_VO_ACM_RESET_USED_TIME_8822B BIT(0)
-
-/* 2 REG_ACMAVG_8822B */
-
-#define BIT_SHIFT_AVGPERIOD_8822B 0
-#define BIT_MASK_AVGPERIOD_8822B 0xffff
-#define BIT_AVGPERIOD_8822B(x) \
- (((x) & BIT_MASK_AVGPERIOD_8822B) << BIT_SHIFT_AVGPERIOD_8822B)
-#define BIT_GET_AVGPERIOD_8822B(x) \
- (((x) >> BIT_SHIFT_AVGPERIOD_8822B) & BIT_MASK_AVGPERIOD_8822B)
-
-/* 2 REG_VO_ADMTIME_8822B */
-
-#define BIT_SHIFT_VO_ADMITTED_TIME_8822B 0
-#define BIT_MASK_VO_ADMITTED_TIME_8822B 0xffff
-#define BIT_VO_ADMITTED_TIME_8822B(x) \
- (((x) & BIT_MASK_VO_ADMITTED_TIME_8822B) \
- << BIT_SHIFT_VO_ADMITTED_TIME_8822B)
-#define BIT_GET_VO_ADMITTED_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_VO_ADMITTED_TIME_8822B) & \
- BIT_MASK_VO_ADMITTED_TIME_8822B)
-
-/* 2 REG_VI_ADMTIME_8822B */
-
-#define BIT_SHIFT_VI_ADMITTED_TIME_8822B 0
-#define BIT_MASK_VI_ADMITTED_TIME_8822B 0xffff
-#define BIT_VI_ADMITTED_TIME_8822B(x) \
- (((x) & BIT_MASK_VI_ADMITTED_TIME_8822B) \
- << BIT_SHIFT_VI_ADMITTED_TIME_8822B)
-#define BIT_GET_VI_ADMITTED_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_VI_ADMITTED_TIME_8822B) & \
- BIT_MASK_VI_ADMITTED_TIME_8822B)
-
-/* 2 REG_BE_ADMTIME_8822B */
-
-#define BIT_SHIFT_BE_ADMITTED_TIME_8822B 0
-#define BIT_MASK_BE_ADMITTED_TIME_8822B 0xffff
-#define BIT_BE_ADMITTED_TIME_8822B(x) \
- (((x) & BIT_MASK_BE_ADMITTED_TIME_8822B) \
- << BIT_SHIFT_BE_ADMITTED_TIME_8822B)
-#define BIT_GET_BE_ADMITTED_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_BE_ADMITTED_TIME_8822B) & \
- BIT_MASK_BE_ADMITTED_TIME_8822B)
-
-/* 2 REG_EDCA_RANDOM_GEN_8822B */
-
-#define BIT_SHIFT_RANDOM_GEN_8822B 0
-#define BIT_MASK_RANDOM_GEN_8822B 0xffffff
-#define BIT_RANDOM_GEN_8822B(x) \
- (((x) & BIT_MASK_RANDOM_GEN_8822B) << BIT_SHIFT_RANDOM_GEN_8822B)
-#define BIT_GET_RANDOM_GEN_8822B(x) \
- (((x) >> BIT_SHIFT_RANDOM_GEN_8822B) & BIT_MASK_RANDOM_GEN_8822B)
-
-/* 2 REG_TXCMD_NOA_SEL_8822B */
-
-#define BIT_SHIFT_NOA_SEL_8822B 4
-#define BIT_MASK_NOA_SEL_8822B 0x7
-#define BIT_NOA_SEL_8822B(x) \
- (((x) & BIT_MASK_NOA_SEL_8822B) << BIT_SHIFT_NOA_SEL_8822B)
-#define BIT_GET_NOA_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_NOA_SEL_8822B) & BIT_MASK_NOA_SEL_8822B)
-
-#define BIT_SHIFT_TXCMD_SEG_SEL_8822B 0
-#define BIT_MASK_TXCMD_SEG_SEL_8822B 0xf
-#define BIT_TXCMD_SEG_SEL_8822B(x) \
- (((x) & BIT_MASK_TXCMD_SEG_SEL_8822B) << BIT_SHIFT_TXCMD_SEG_SEL_8822B)
-#define BIT_GET_TXCMD_SEG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_TXCMD_SEG_SEL_8822B) & BIT_MASK_TXCMD_SEG_SEL_8822B)
-
-/* 2 REG_NOA_PARAM_8822B */
-
-#define BIT_SHIFT_NOA_COUNT_8822B (96 & CPU_OPT_WIDTH)
-#define BIT_MASK_NOA_COUNT_8822B 0xff
-#define BIT_NOA_COUNT_8822B(x) \
- (((x) & BIT_MASK_NOA_COUNT_8822B) << BIT_SHIFT_NOA_COUNT_8822B)
-#define BIT_GET_NOA_COUNT_8822B(x) \
- (((x) >> BIT_SHIFT_NOA_COUNT_8822B) & BIT_MASK_NOA_COUNT_8822B)
-
-#define BIT_SHIFT_NOA_START_TIME_8822B (64 & CPU_OPT_WIDTH)
-#define BIT_MASK_NOA_START_TIME_8822B 0xffffffffL
-#define BIT_NOA_START_TIME_8822B(x) \
- (((x) & BIT_MASK_NOA_START_TIME_8822B) \
- << BIT_SHIFT_NOA_START_TIME_8822B)
-#define BIT_GET_NOA_START_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_NOA_START_TIME_8822B) & \
- BIT_MASK_NOA_START_TIME_8822B)
-
-#define BIT_SHIFT_NOA_INTERVAL_8822B (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_NOA_INTERVAL_8822B 0xffffffffL
-#define BIT_NOA_INTERVAL_8822B(x) \
- (((x) & BIT_MASK_NOA_INTERVAL_8822B) << BIT_SHIFT_NOA_INTERVAL_8822B)
-#define BIT_GET_NOA_INTERVAL_8822B(x) \
- (((x) >> BIT_SHIFT_NOA_INTERVAL_8822B) & BIT_MASK_NOA_INTERVAL_8822B)
-
-#define BIT_SHIFT_NOA_DURATION_8822B 0
-#define BIT_MASK_NOA_DURATION_8822B 0xffffffffL
-#define BIT_NOA_DURATION_8822B(x) \
- (((x) & BIT_MASK_NOA_DURATION_8822B) << BIT_SHIFT_NOA_DURATION_8822B)
-#define BIT_GET_NOA_DURATION_8822B(x) \
- (((x) >> BIT_SHIFT_NOA_DURATION_8822B) & BIT_MASK_NOA_DURATION_8822B)
-
-/* 2 REG_P2P_RST_8822B */
-#define BIT_P2P2_PWR_RST1_8822B BIT(5)
-#define BIT_P2P2_PWR_RST0_8822B BIT(4)
-#define BIT_P2P1_PWR_RST1_8822B BIT(3)
-#define BIT_P2P1_PWR_RST0_8822B BIT(2)
-#define BIT_P2P_PWR_RST1_V1_8822B BIT(1)
-#define BIT_P2P_PWR_RST0_V1_8822B BIT(0)
-
-/* 2 REG_SCHEDULER_RST_8822B */
-#define BIT_SYNC_CLI_8822B BIT(1)
-#define BIT_SCHEDULER_RST_V1_8822B BIT(0)
-
-/* 2 REG_SCH_TXCMD_8822B */
-
-#define BIT_SHIFT_SCH_TXCMD_8822B 0
-#define BIT_MASK_SCH_TXCMD_8822B 0xffffffffL
-#define BIT_SCH_TXCMD_8822B(x) \
- (((x) & BIT_MASK_SCH_TXCMD_8822B) << BIT_SHIFT_SCH_TXCMD_8822B)
-#define BIT_GET_SCH_TXCMD_8822B(x) \
- (((x) >> BIT_SHIFT_SCH_TXCMD_8822B) & BIT_MASK_SCH_TXCMD_8822B)
-
-/* 2 REG_PAGE5_DUMMY_8822B */
-
-/* 2 REG_CPUMGQ_TX_TIMER_8822B */
-
-#define BIT_SHIFT_CPUMGQ_TX_TIMER_V1_8822B 0
-#define BIT_MASK_CPUMGQ_TX_TIMER_V1_8822B 0xffffffffL
-#define BIT_CPUMGQ_TX_TIMER_V1_8822B(x) \
- (((x) & BIT_MASK_CPUMGQ_TX_TIMER_V1_8822B) \
- << BIT_SHIFT_CPUMGQ_TX_TIMER_V1_8822B)
-#define BIT_GET_CPUMGQ_TX_TIMER_V1_8822B(x) \
- (((x) >> BIT_SHIFT_CPUMGQ_TX_TIMER_V1_8822B) & \
- BIT_MASK_CPUMGQ_TX_TIMER_V1_8822B)
-
-/* 2 REG_PS_TIMER_A_8822B */
-
-#define BIT_SHIFT_PS_TIMER_A_V1_8822B 0
-#define BIT_MASK_PS_TIMER_A_V1_8822B 0xffffffffL
-#define BIT_PS_TIMER_A_V1_8822B(x) \
- (((x) & BIT_MASK_PS_TIMER_A_V1_8822B) << BIT_SHIFT_PS_TIMER_A_V1_8822B)
-#define BIT_GET_PS_TIMER_A_V1_8822B(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_A_V1_8822B) & BIT_MASK_PS_TIMER_A_V1_8822B)
-
-/* 2 REG_PS_TIMER_B_8822B */
-
-#define BIT_SHIFT_PS_TIMER_B_V1_8822B 0
-#define BIT_MASK_PS_TIMER_B_V1_8822B 0xffffffffL
-#define BIT_PS_TIMER_B_V1_8822B(x) \
- (((x) & BIT_MASK_PS_TIMER_B_V1_8822B) << BIT_SHIFT_PS_TIMER_B_V1_8822B)
-#define BIT_GET_PS_TIMER_B_V1_8822B(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_B_V1_8822B) & BIT_MASK_PS_TIMER_B_V1_8822B)
-
-/* 2 REG_PS_TIMER_C_8822B */
-
-#define BIT_SHIFT_PS_TIMER_C_V1_8822B 0
-#define BIT_MASK_PS_TIMER_C_V1_8822B 0xffffffffL
-#define BIT_PS_TIMER_C_V1_8822B(x) \
- (((x) & BIT_MASK_PS_TIMER_C_V1_8822B) << BIT_SHIFT_PS_TIMER_C_V1_8822B)
-#define BIT_GET_PS_TIMER_C_V1_8822B(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_C_V1_8822B) & BIT_MASK_PS_TIMER_C_V1_8822B)
-
-/* 2 REG_PS_TIMER_ABC_CPUMGQ_TIMER_CRTL_8822B */
-#define BIT_CPUMGQ_TIMER_EN_8822B BIT(31)
-#define BIT_CPUMGQ_TX_EN_8822B BIT(28)
-
-#define BIT_SHIFT_CPUMGQ_TIMER_TSF_SEL_8822B 24
-#define BIT_MASK_CPUMGQ_TIMER_TSF_SEL_8822B 0x7
-#define BIT_CPUMGQ_TIMER_TSF_SEL_8822B(x) \
- (((x) & BIT_MASK_CPUMGQ_TIMER_TSF_SEL_8822B) \
- << BIT_SHIFT_CPUMGQ_TIMER_TSF_SEL_8822B)
-#define BIT_GET_CPUMGQ_TIMER_TSF_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_CPUMGQ_TIMER_TSF_SEL_8822B) & \
- BIT_MASK_CPUMGQ_TIMER_TSF_SEL_8822B)
-
-#define BIT_PS_TIMER_C_EN_8822B BIT(23)
-
-#define BIT_SHIFT_PS_TIMER_C_TSF_SEL_8822B 16
-#define BIT_MASK_PS_TIMER_C_TSF_SEL_8822B 0x7
-#define BIT_PS_TIMER_C_TSF_SEL_8822B(x) \
- (((x) & BIT_MASK_PS_TIMER_C_TSF_SEL_8822B) \
- << BIT_SHIFT_PS_TIMER_C_TSF_SEL_8822B)
-#define BIT_GET_PS_TIMER_C_TSF_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_C_TSF_SEL_8822B) & \
- BIT_MASK_PS_TIMER_C_TSF_SEL_8822B)
-
-#define BIT_PS_TIMER_B_EN_8822B BIT(15)
-
-#define BIT_SHIFT_PS_TIMER_B_TSF_SEL_8822B 8
-#define BIT_MASK_PS_TIMER_B_TSF_SEL_8822B 0x7
-#define BIT_PS_TIMER_B_TSF_SEL_8822B(x) \
- (((x) & BIT_MASK_PS_TIMER_B_TSF_SEL_8822B) \
- << BIT_SHIFT_PS_TIMER_B_TSF_SEL_8822B)
-#define BIT_GET_PS_TIMER_B_TSF_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_B_TSF_SEL_8822B) & \
- BIT_MASK_PS_TIMER_B_TSF_SEL_8822B)
-
-#define BIT_PS_TIMER_A_EN_8822B BIT(7)
-
-#define BIT_SHIFT_PS_TIMER_A_TSF_SEL_8822B 0
-#define BIT_MASK_PS_TIMER_A_TSF_SEL_8822B 0x7
-#define BIT_PS_TIMER_A_TSF_SEL_8822B(x) \
- (((x) & BIT_MASK_PS_TIMER_A_TSF_SEL_8822B) \
- << BIT_SHIFT_PS_TIMER_A_TSF_SEL_8822B)
-#define BIT_GET_PS_TIMER_A_TSF_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_A_TSF_SEL_8822B) & \
- BIT_MASK_PS_TIMER_A_TSF_SEL_8822B)
-
-/* 2 REG_CPUMGQ_TX_TIMER_EARLY_8822B */
-
-#define BIT_SHIFT_CPUMGQ_TX_TIMER_EARLY_8822B 0
-#define BIT_MASK_CPUMGQ_TX_TIMER_EARLY_8822B 0xff
-#define BIT_CPUMGQ_TX_TIMER_EARLY_8822B(x) \
- (((x) & BIT_MASK_CPUMGQ_TX_TIMER_EARLY_8822B) \
- << BIT_SHIFT_CPUMGQ_TX_TIMER_EARLY_8822B)
-#define BIT_GET_CPUMGQ_TX_TIMER_EARLY_8822B(x) \
- (((x) >> BIT_SHIFT_CPUMGQ_TX_TIMER_EARLY_8822B) & \
- BIT_MASK_CPUMGQ_TX_TIMER_EARLY_8822B)
-
-/* 2 REG_PS_TIMER_A_EARLY_8822B */
-
-#define BIT_SHIFT_PS_TIMER_A_EARLY_8822B 0
-#define BIT_MASK_PS_TIMER_A_EARLY_8822B 0xff
-#define BIT_PS_TIMER_A_EARLY_8822B(x) \
- (((x) & BIT_MASK_PS_TIMER_A_EARLY_8822B) \
- << BIT_SHIFT_PS_TIMER_A_EARLY_8822B)
-#define BIT_GET_PS_TIMER_A_EARLY_8822B(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_A_EARLY_8822B) & \
- BIT_MASK_PS_TIMER_A_EARLY_8822B)
-
-/* 2 REG_PS_TIMER_B_EARLY_8822B */
-
-#define BIT_SHIFT_PS_TIMER_B_EARLY_8822B 0
-#define BIT_MASK_PS_TIMER_B_EARLY_8822B 0xff
-#define BIT_PS_TIMER_B_EARLY_8822B(x) \
- (((x) & BIT_MASK_PS_TIMER_B_EARLY_8822B) \
- << BIT_SHIFT_PS_TIMER_B_EARLY_8822B)
-#define BIT_GET_PS_TIMER_B_EARLY_8822B(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_B_EARLY_8822B) & \
- BIT_MASK_PS_TIMER_B_EARLY_8822B)
-
-/* 2 REG_PS_TIMER_C_EARLY_8822B */
-
-#define BIT_SHIFT_PS_TIMER_C_EARLY_8822B 0
-#define BIT_MASK_PS_TIMER_C_EARLY_8822B 0xff
-#define BIT_PS_TIMER_C_EARLY_8822B(x) \
- (((x) & BIT_MASK_PS_TIMER_C_EARLY_8822B) \
- << BIT_SHIFT_PS_TIMER_C_EARLY_8822B)
-#define BIT_GET_PS_TIMER_C_EARLY_8822B(x) \
- (((x) >> BIT_SHIFT_PS_TIMER_C_EARLY_8822B) & \
- BIT_MASK_PS_TIMER_C_EARLY_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_BWOPMODE_8822B (BW OPERATION MODE REGISTER) */
-
-/* 2 REG_WMAC_FWPKT_CR_8822B */
-#define BIT_FWEN_8822B BIT(7)
-#define BIT_PHYSTS_PKT_CTRL_8822B BIT(6)
-#define BIT_APPHDR_MIDSRCH_FAIL_8822B BIT(4)
-#define BIT_FWPARSING_EN_8822B BIT(3)
-
-#define BIT_SHIFT_APPEND_MHDR_LEN_8822B 0
-#define BIT_MASK_APPEND_MHDR_LEN_8822B 0x7
-#define BIT_APPEND_MHDR_LEN_8822B(x) \
- (((x) & BIT_MASK_APPEND_MHDR_LEN_8822B) \
- << BIT_SHIFT_APPEND_MHDR_LEN_8822B)
-#define BIT_GET_APPEND_MHDR_LEN_8822B(x) \
- (((x) >> BIT_SHIFT_APPEND_MHDR_LEN_8822B) & \
- BIT_MASK_APPEND_MHDR_LEN_8822B)
-
-/* 2 REG_WMAC_CR_8822B (WMAC CR AND APSD CONTROL REGISTER) */
-#define BIT_IC_MACPHY_M_8822B BIT(0)
-
-/* 2 REG_TCR_8822B (TRANSMISSION CONFIGURATION REGISTER) */
-#define BIT_WMAC_EN_RTS_ADDR_8822B BIT(31)
-#define BIT_WMAC_DISABLE_CCK_8822B BIT(30)
-#define BIT_WMAC_RAW_LEN_8822B BIT(29)
-#define BIT_WMAC_NOTX_IN_RXNDP_8822B BIT(28)
-#define BIT_WMAC_EN_EOF_8822B BIT(27)
-#define BIT_WMAC_BF_SEL_8822B BIT(26)
-#define BIT_WMAC_ANTMODE_SEL_8822B BIT(25)
-#define BIT_WMAC_TCRPWRMGT_HWCTL_8822B BIT(24)
-#define BIT_WMAC_SMOOTH_VAL_8822B BIT(23)
-#define BIT_FETCH_MPDU_AFTER_WSEC_RDY_8822B BIT(20)
-#define BIT_WMAC_TCR_EN_20MST_8822B BIT(19)
-#define BIT_WMAC_DIS_SIGTA_8822B BIT(18)
-#define BIT_WMAC_DIS_A2B0_8822B BIT(17)
-#define BIT_WMAC_MSK_SIGBCRC_8822B BIT(16)
-#define BIT_WMAC_TCR_ERRSTEN_3_8822B BIT(15)
-#define BIT_WMAC_TCR_ERRSTEN_2_8822B BIT(14)
-#define BIT_WMAC_TCR_ERRSTEN_1_8822B BIT(13)
-#define BIT_WMAC_TCR_ERRSTEN_0_8822B BIT(12)
-#define BIT_WMAC_TCR_TXSK_PERPKT_8822B BIT(11)
-#define BIT_ICV_8822B BIT(10)
-#define BIT_CFEND_FORMAT_8822B BIT(9)
-#define BIT_CRC_8822B BIT(8)
-#define BIT_PWRBIT_OW_EN_8822B BIT(7)
-#define BIT_PWR_ST_8822B BIT(6)
-#define BIT_WMAC_TCR_UPD_TIMIE_8822B BIT(5)
-#define BIT_WMAC_TCR_UPD_HGQMD_8822B BIT(4)
-#define BIT_VHTSIGA1_TXPS_8822B BIT(3)
-#define BIT_PAD_SEL_8822B BIT(2)
-#define BIT_DIS_GCLK_8822B BIT(1)
-
-/* 2 REG_RCR_8822B (RECEIVE CONFIGURATION REGISTER) */
-#define BIT_APP_FCS_8822B BIT(31)
-#define BIT_APP_MIC_8822B BIT(30)
-#define BIT_APP_ICV_8822B BIT(29)
-#define BIT_APP_PHYSTS_8822B BIT(28)
-#define BIT_APP_BASSN_8822B BIT(27)
-#define BIT_VHT_DACK_8822B BIT(26)
-#define BIT_TCPOFLD_EN_8822B BIT(25)
-#define BIT_ENMBID_8822B BIT(24)
-#define BIT_LSIGEN_8822B BIT(23)
-#define BIT_MFBEN_8822B BIT(22)
-#define BIT_DISCHKPPDLLEN_8822B BIT(21)
-#define BIT_PKTCTL_DLEN_8822B BIT(20)
-#define BIT_TIM_PARSER_EN_8822B BIT(18)
-#define BIT_BC_MD_EN_8822B BIT(17)
-#define BIT_UC_MD_EN_8822B BIT(16)
-#define BIT_RXSK_PERPKT_8822B BIT(15)
-#define BIT_HTC_LOC_CTRL_8822B BIT(14)
-#define BIT_RPFM_CAM_ENABLE_8822B BIT(12)
-#define BIT_TA_BCN_8822B BIT(11)
-#define BIT_DISDECMYPKT_8822B BIT(10)
-#define BIT_AICV_8822B BIT(9)
-#define BIT_ACRC32_8822B BIT(8)
-#define BIT_CBSSID_BCN_8822B BIT(7)
-#define BIT_CBSSID_DATA_8822B BIT(6)
-#define BIT_APWRMGT_8822B BIT(5)
-#define BIT_ADD3_8822B BIT(4)
-#define BIT_AB_8822B BIT(3)
-#define BIT_AM_8822B BIT(2)
-#define BIT_APM_8822B BIT(1)
-#define BIT_AAP_8822B BIT(0)
-
-/* 2 REG_RX_DRVINFO_SZ_8822B (RX DRIVER INFO SIZE REGISTER) */
-#define BIT_PHYSTS_PER_PKT_MODE_8822B BIT(7)
-
-#define BIT_SHIFT_DRVINFO_SZ_V1_8822B 0
-#define BIT_MASK_DRVINFO_SZ_V1_8822B 0xf
-#define BIT_DRVINFO_SZ_V1_8822B(x) \
- (((x) & BIT_MASK_DRVINFO_SZ_V1_8822B) << BIT_SHIFT_DRVINFO_SZ_V1_8822B)
-#define BIT_GET_DRVINFO_SZ_V1_8822B(x) \
- (((x) >> BIT_SHIFT_DRVINFO_SZ_V1_8822B) & BIT_MASK_DRVINFO_SZ_V1_8822B)
-
-/* 2 REG_RX_DLK_TIME_8822B (RX DEADLOCK TIME REGISTER) */
-
-#define BIT_SHIFT_RX_DLK_TIME_8822B 0
-#define BIT_MASK_RX_DLK_TIME_8822B 0xff
-#define BIT_RX_DLK_TIME_8822B(x) \
- (((x) & BIT_MASK_RX_DLK_TIME_8822B) << BIT_SHIFT_RX_DLK_TIME_8822B)
-#define BIT_GET_RX_DLK_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_RX_DLK_TIME_8822B) & BIT_MASK_RX_DLK_TIME_8822B)
-
-/* 2 REG_RX_PKT_LIMIT_8822B (RX PACKET LENGTH LIMIT REGISTER) */
-
-#define BIT_SHIFT_RXPKTLMT_8822B 0
-#define BIT_MASK_RXPKTLMT_8822B 0x3f
-#define BIT_RXPKTLMT_8822B(x) \
- (((x) & BIT_MASK_RXPKTLMT_8822B) << BIT_SHIFT_RXPKTLMT_8822B)
-#define BIT_GET_RXPKTLMT_8822B(x) \
- (((x) >> BIT_SHIFT_RXPKTLMT_8822B) & BIT_MASK_RXPKTLMT_8822B)
-
-/* 2 REG_MACID_8822B (MAC ID REGISTER) */
-
-#define BIT_SHIFT_MACID_8822B 0
-#define BIT_MASK_MACID_8822B 0xffffffffffffL
-#define BIT_MACID_8822B(x) \
- (((x) & BIT_MASK_MACID_8822B) << BIT_SHIFT_MACID_8822B)
-#define BIT_GET_MACID_8822B(x) \
- (((x) >> BIT_SHIFT_MACID_8822B) & BIT_MASK_MACID_8822B)
-
-/* 2 REG_BSSID_8822B (BSSID REGISTER) */
-
-#define BIT_SHIFT_BSSID_8822B 0
-#define BIT_MASK_BSSID_8822B 0xffffffffffffL
-#define BIT_BSSID_8822B(x) \
- (((x) & BIT_MASK_BSSID_8822B) << BIT_SHIFT_BSSID_8822B)
-#define BIT_GET_BSSID_8822B(x) \
- (((x) >> BIT_SHIFT_BSSID_8822B) & BIT_MASK_BSSID_8822B)
-
-/* 2 REG_MAR_8822B (MULTICAST ADDRESS REGISTER) */
-
-#define BIT_SHIFT_MAR_8822B 0
-#define BIT_MASK_MAR_8822B 0xffffffffffffffffL
-#define BIT_MAR_8822B(x) (((x) & BIT_MASK_MAR_8822B) << BIT_SHIFT_MAR_8822B)
-#define BIT_GET_MAR_8822B(x) (((x) >> BIT_SHIFT_MAR_8822B) & BIT_MASK_MAR_8822B)
-
-/* 2 REG_MBIDCAMCFG_1_8822B (MBSSID CAM CONFIGURATION REGISTER) */
-
-#define BIT_SHIFT_MBIDCAM_RWDATA_L_8822B 0
-#define BIT_MASK_MBIDCAM_RWDATA_L_8822B 0xffffffffL
-#define BIT_MBIDCAM_RWDATA_L_8822B(x) \
- (((x) & BIT_MASK_MBIDCAM_RWDATA_L_8822B) \
- << BIT_SHIFT_MBIDCAM_RWDATA_L_8822B)
-#define BIT_GET_MBIDCAM_RWDATA_L_8822B(x) \
- (((x) >> BIT_SHIFT_MBIDCAM_RWDATA_L_8822B) & \
- BIT_MASK_MBIDCAM_RWDATA_L_8822B)
-
-/* 2 REG_MBIDCAMCFG_2_8822B (MBSSID CAM CONFIGURATION REGISTER) */
-#define BIT_MBIDCAM_POLL_8822B BIT(31)
-#define BIT_MBIDCAM_WT_EN_8822B BIT(30)
-
-#define BIT_SHIFT_MBIDCAM_ADDR_8822B 24
-#define BIT_MASK_MBIDCAM_ADDR_8822B 0x1f
-#define BIT_MBIDCAM_ADDR_8822B(x) \
- (((x) & BIT_MASK_MBIDCAM_ADDR_8822B) << BIT_SHIFT_MBIDCAM_ADDR_8822B)
-#define BIT_GET_MBIDCAM_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_MBIDCAM_ADDR_8822B) & BIT_MASK_MBIDCAM_ADDR_8822B)
-
-#define BIT_MBIDCAM_VALID_8822B BIT(23)
-#define BIT_LSIC_TXOP_EN_8822B BIT(17)
-#define BIT_CTS_EN_8822B BIT(16)
-
-#define BIT_SHIFT_MBIDCAM_RWDATA_H_8822B 0
-#define BIT_MASK_MBIDCAM_RWDATA_H_8822B 0xffff
-#define BIT_MBIDCAM_RWDATA_H_8822B(x) \
- (((x) & BIT_MASK_MBIDCAM_RWDATA_H_8822B) \
- << BIT_SHIFT_MBIDCAM_RWDATA_H_8822B)
-#define BIT_GET_MBIDCAM_RWDATA_H_8822B(x) \
- (((x) >> BIT_SHIFT_MBIDCAM_RWDATA_H_8822B) & \
- BIT_MASK_MBIDCAM_RWDATA_H_8822B)
-
-/* 2 REG_ZLD_NUM_8822B */
-
-#define BIT_SHIFT_ZLD_NUM_8822B 0
-#define BIT_MASK_ZLD_NUM_8822B 0xff
-#define BIT_ZLD_NUM_8822B(x) \
- (((x) & BIT_MASK_ZLD_NUM_8822B) << BIT_SHIFT_ZLD_NUM_8822B)
-#define BIT_GET_ZLD_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_ZLD_NUM_8822B) & BIT_MASK_ZLD_NUM_8822B)
-
-/* 2 REG_UDF_THSD_8822B */
-
-#define BIT_SHIFT_UDF_THSD_8822B 0
-#define BIT_MASK_UDF_THSD_8822B 0xff
-#define BIT_UDF_THSD_8822B(x) \
- (((x) & BIT_MASK_UDF_THSD_8822B) << BIT_SHIFT_UDF_THSD_8822B)
-#define BIT_GET_UDF_THSD_8822B(x) \
- (((x) >> BIT_SHIFT_UDF_THSD_8822B) & BIT_MASK_UDF_THSD_8822B)
-
-/* 2 REG_WMAC_TCR_TSFT_OFS_8822B */
-
-#define BIT_SHIFT_WMAC_TCR_TSFT_OFS_8822B 0
-#define BIT_MASK_WMAC_TCR_TSFT_OFS_8822B 0xffff
-#define BIT_WMAC_TCR_TSFT_OFS_8822B(x) \
- (((x) & BIT_MASK_WMAC_TCR_TSFT_OFS_8822B) \
- << BIT_SHIFT_WMAC_TCR_TSFT_OFS_8822B)
-#define BIT_GET_WMAC_TCR_TSFT_OFS_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_TCR_TSFT_OFS_8822B) & \
- BIT_MASK_WMAC_TCR_TSFT_OFS_8822B)
-
-/* 2 REG_MCU_TEST_2_V1_8822B */
-
-#define BIT_SHIFT_MCU_RSVD_2_V1_8822B 0
-#define BIT_MASK_MCU_RSVD_2_V1_8822B 0xffff
-#define BIT_MCU_RSVD_2_V1_8822B(x) \
- (((x) & BIT_MASK_MCU_RSVD_2_V1_8822B) << BIT_SHIFT_MCU_RSVD_2_V1_8822B)
-#define BIT_GET_MCU_RSVD_2_V1_8822B(x) \
- (((x) >> BIT_SHIFT_MCU_RSVD_2_V1_8822B) & BIT_MASK_MCU_RSVD_2_V1_8822B)
-
-/* 2 REG_WMAC_TXTIMEOUT_8822B */
-
-#define BIT_SHIFT_WMAC_TXTIMEOUT_8822B 0
-#define BIT_MASK_WMAC_TXTIMEOUT_8822B 0xff
-#define BIT_WMAC_TXTIMEOUT_8822B(x) \
- (((x) & BIT_MASK_WMAC_TXTIMEOUT_8822B) \
- << BIT_SHIFT_WMAC_TXTIMEOUT_8822B)
-#define BIT_GET_WMAC_TXTIMEOUT_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_TXTIMEOUT_8822B) & \
- BIT_MASK_WMAC_TXTIMEOUT_8822B)
-
-/* 2 REG_STMP_THSD_8822B */
-
-#define BIT_SHIFT_STMP_THSD_8822B 0
-#define BIT_MASK_STMP_THSD_8822B 0xff
-#define BIT_STMP_THSD_8822B(x) \
- (((x) & BIT_MASK_STMP_THSD_8822B) << BIT_SHIFT_STMP_THSD_8822B)
-#define BIT_GET_STMP_THSD_8822B(x) \
- (((x) >> BIT_SHIFT_STMP_THSD_8822B) & BIT_MASK_STMP_THSD_8822B)
-
-/* 2 REG_MAC_SPEC_SIFS_8822B (SPECIFICATION SIFS REGISTER) */
-
-#define BIT_SHIFT_SPEC_SIFS_OFDM_8822B 8
-#define BIT_MASK_SPEC_SIFS_OFDM_8822B 0xff
-#define BIT_SPEC_SIFS_OFDM_8822B(x) \
- (((x) & BIT_MASK_SPEC_SIFS_OFDM_8822B) \
- << BIT_SHIFT_SPEC_SIFS_OFDM_8822B)
-#define BIT_GET_SPEC_SIFS_OFDM_8822B(x) \
- (((x) >> BIT_SHIFT_SPEC_SIFS_OFDM_8822B) & \
- BIT_MASK_SPEC_SIFS_OFDM_8822B)
-
-#define BIT_SHIFT_SPEC_SIFS_CCK_8822B 0
-#define BIT_MASK_SPEC_SIFS_CCK_8822B 0xff
-#define BIT_SPEC_SIFS_CCK_8822B(x) \
- (((x) & BIT_MASK_SPEC_SIFS_CCK_8822B) << BIT_SHIFT_SPEC_SIFS_CCK_8822B)
-#define BIT_GET_SPEC_SIFS_CCK_8822B(x) \
- (((x) >> BIT_SHIFT_SPEC_SIFS_CCK_8822B) & BIT_MASK_SPEC_SIFS_CCK_8822B)
-
-/* 2 REG_USTIME_EDCA_8822B (US TIME TUNING FOR EDCA REGISTER) */
-
-#define BIT_SHIFT_USTIME_EDCA_V1_8822B 0
-#define BIT_MASK_USTIME_EDCA_V1_8822B 0x1ff
-#define BIT_USTIME_EDCA_V1_8822B(x) \
- (((x) & BIT_MASK_USTIME_EDCA_V1_8822B) \
- << BIT_SHIFT_USTIME_EDCA_V1_8822B)
-#define BIT_GET_USTIME_EDCA_V1_8822B(x) \
- (((x) >> BIT_SHIFT_USTIME_EDCA_V1_8822B) & \
- BIT_MASK_USTIME_EDCA_V1_8822B)
-
-/* 2 REG_RESP_SIFS_OFDM_8822B (RESPONSE SIFS FOR OFDM REGISTER) */
-
-#define BIT_SHIFT_SIFS_R2T_OFDM_8822B 8
-#define BIT_MASK_SIFS_R2T_OFDM_8822B 0xff
-#define BIT_SIFS_R2T_OFDM_8822B(x) \
- (((x) & BIT_MASK_SIFS_R2T_OFDM_8822B) << BIT_SHIFT_SIFS_R2T_OFDM_8822B)
-#define BIT_GET_SIFS_R2T_OFDM_8822B(x) \
- (((x) >> BIT_SHIFT_SIFS_R2T_OFDM_8822B) & BIT_MASK_SIFS_R2T_OFDM_8822B)
-
-#define BIT_SHIFT_SIFS_T2T_OFDM_8822B 0
-#define BIT_MASK_SIFS_T2T_OFDM_8822B 0xff
-#define BIT_SIFS_T2T_OFDM_8822B(x) \
- (((x) & BIT_MASK_SIFS_T2T_OFDM_8822B) << BIT_SHIFT_SIFS_T2T_OFDM_8822B)
-#define BIT_GET_SIFS_T2T_OFDM_8822B(x) \
- (((x) >> BIT_SHIFT_SIFS_T2T_OFDM_8822B) & BIT_MASK_SIFS_T2T_OFDM_8822B)
-
-/* 2 REG_RESP_SIFS_CCK_8822B (RESPONSE SIFS FOR CCK REGISTER) */
-
-#define BIT_SHIFT_SIFS_R2T_CCK_8822B 8
-#define BIT_MASK_SIFS_R2T_CCK_8822B 0xff
-#define BIT_SIFS_R2T_CCK_8822B(x) \
- (((x) & BIT_MASK_SIFS_R2T_CCK_8822B) << BIT_SHIFT_SIFS_R2T_CCK_8822B)
-#define BIT_GET_SIFS_R2T_CCK_8822B(x) \
- (((x) >> BIT_SHIFT_SIFS_R2T_CCK_8822B) & BIT_MASK_SIFS_R2T_CCK_8822B)
-
-#define BIT_SHIFT_SIFS_T2T_CCK_8822B 0
-#define BIT_MASK_SIFS_T2T_CCK_8822B 0xff
-#define BIT_SIFS_T2T_CCK_8822B(x) \
- (((x) & BIT_MASK_SIFS_T2T_CCK_8822B) << BIT_SHIFT_SIFS_T2T_CCK_8822B)
-#define BIT_GET_SIFS_T2T_CCK_8822B(x) \
- (((x) >> BIT_SHIFT_SIFS_T2T_CCK_8822B) & BIT_MASK_SIFS_T2T_CCK_8822B)
-
-/* 2 REG_EIFS_8822B (EIFS REGISTER) */
-
-#define BIT_SHIFT_EIFS_8822B 0
-#define BIT_MASK_EIFS_8822B 0xffff
-#define BIT_EIFS_8822B(x) (((x) & BIT_MASK_EIFS_8822B) << BIT_SHIFT_EIFS_8822B)
-#define BIT_GET_EIFS_8822B(x) \
- (((x) >> BIT_SHIFT_EIFS_8822B) & BIT_MASK_EIFS_8822B)
-
-/* 2 REG_CTS2TO_8822B (CTS2 TIMEOUT REGISTER) */
-
-#define BIT_SHIFT_CTS2TO_8822B 0
-#define BIT_MASK_CTS2TO_8822B 0xff
-#define BIT_CTS2TO_8822B(x) \
- (((x) & BIT_MASK_CTS2TO_8822B) << BIT_SHIFT_CTS2TO_8822B)
-#define BIT_GET_CTS2TO_8822B(x) \
- (((x) >> BIT_SHIFT_CTS2TO_8822B) & BIT_MASK_CTS2TO_8822B)
-
-/* 2 REG_ACKTO_8822B (ACK TIMEOUT REGISTER) */
-
-#define BIT_SHIFT_ACKTO_8822B 0
-#define BIT_MASK_ACKTO_8822B 0xff
-#define BIT_ACKTO_8822B(x) \
- (((x) & BIT_MASK_ACKTO_8822B) << BIT_SHIFT_ACKTO_8822B)
-#define BIT_GET_ACKTO_8822B(x) \
- (((x) >> BIT_SHIFT_ACKTO_8822B) & BIT_MASK_ACKTO_8822B)
-
-/* 2 REG_NAV_CTRL_8822B (NAV CONTROL REGISTER) */
-
-#define BIT_SHIFT_NAV_UPPER_8822B 16
-#define BIT_MASK_NAV_UPPER_8822B 0xff
-#define BIT_NAV_UPPER_8822B(x) \
- (((x) & BIT_MASK_NAV_UPPER_8822B) << BIT_SHIFT_NAV_UPPER_8822B)
-#define BIT_GET_NAV_UPPER_8822B(x) \
- (((x) >> BIT_SHIFT_NAV_UPPER_8822B) & BIT_MASK_NAV_UPPER_8822B)
-
-#define BIT_SHIFT_RXMYRTS_NAV_8822B 8
-#define BIT_MASK_RXMYRTS_NAV_8822B 0xf
-#define BIT_RXMYRTS_NAV_8822B(x) \
- (((x) & BIT_MASK_RXMYRTS_NAV_8822B) << BIT_SHIFT_RXMYRTS_NAV_8822B)
-#define BIT_GET_RXMYRTS_NAV_8822B(x) \
- (((x) >> BIT_SHIFT_RXMYRTS_NAV_8822B) & BIT_MASK_RXMYRTS_NAV_8822B)
-
-#define BIT_SHIFT_RTSRST_8822B 0
-#define BIT_MASK_RTSRST_8822B 0xff
-#define BIT_RTSRST_8822B(x) \
- (((x) & BIT_MASK_RTSRST_8822B) << BIT_SHIFT_RTSRST_8822B)
-#define BIT_GET_RTSRST_8822B(x) \
- (((x) >> BIT_SHIFT_RTSRST_8822B) & BIT_MASK_RTSRST_8822B)
-
-/* 2 REG_BACAMCMD_8822B (BLOCK ACK CAM COMMAND REGISTER) */
-#define BIT_BACAM_POLL_8822B BIT(31)
-#define BIT_BACAM_RST_8822B BIT(17)
-#define BIT_BACAM_RW_8822B BIT(16)
-
-#define BIT_SHIFT_TXSBM_8822B 14
-#define BIT_MASK_TXSBM_8822B 0x3
-#define BIT_TXSBM_8822B(x) \
- (((x) & BIT_MASK_TXSBM_8822B) << BIT_SHIFT_TXSBM_8822B)
-#define BIT_GET_TXSBM_8822B(x) \
- (((x) >> BIT_SHIFT_TXSBM_8822B) & BIT_MASK_TXSBM_8822B)
-
-#define BIT_SHIFT_BACAM_ADDR_8822B 0
-#define BIT_MASK_BACAM_ADDR_8822B 0x3f
-#define BIT_BACAM_ADDR_8822B(x) \
- (((x) & BIT_MASK_BACAM_ADDR_8822B) << BIT_SHIFT_BACAM_ADDR_8822B)
-#define BIT_GET_BACAM_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_BACAM_ADDR_8822B) & BIT_MASK_BACAM_ADDR_8822B)
-
-/* 2 REG_BACAMCONTENT_8822B (BLOCK ACK CAM CONTENT REGISTER) */
-
-#define BIT_SHIFT_BA_CONTENT_H_8822B (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_BA_CONTENT_H_8822B 0xffffffffL
-#define BIT_BA_CONTENT_H_8822B(x) \
- (((x) & BIT_MASK_BA_CONTENT_H_8822B) << BIT_SHIFT_BA_CONTENT_H_8822B)
-#define BIT_GET_BA_CONTENT_H_8822B(x) \
- (((x) >> BIT_SHIFT_BA_CONTENT_H_8822B) & BIT_MASK_BA_CONTENT_H_8822B)
-
-#define BIT_SHIFT_BA_CONTENT_L_8822B 0
-#define BIT_MASK_BA_CONTENT_L_8822B 0xffffffffL
-#define BIT_BA_CONTENT_L_8822B(x) \
- (((x) & BIT_MASK_BA_CONTENT_L_8822B) << BIT_SHIFT_BA_CONTENT_L_8822B)
-#define BIT_GET_BA_CONTENT_L_8822B(x) \
- (((x) >> BIT_SHIFT_BA_CONTENT_L_8822B) & BIT_MASK_BA_CONTENT_L_8822B)
-
-/* 2 REG_WMAC_BITMAP_CTL_8822B */
-#define BIT_BITMAP_VO_8822B BIT(7)
-#define BIT_BITMAP_VI_8822B BIT(6)
-#define BIT_BITMAP_BE_8822B BIT(5)
-#define BIT_BITMAP_BK_8822B BIT(4)
-
-#define BIT_SHIFT_BITMAP_CONDITION_8822B 2
-#define BIT_MASK_BITMAP_CONDITION_8822B 0x3
-#define BIT_BITMAP_CONDITION_8822B(x) \
- (((x) & BIT_MASK_BITMAP_CONDITION_8822B) \
- << BIT_SHIFT_BITMAP_CONDITION_8822B)
-#define BIT_GET_BITMAP_CONDITION_8822B(x) \
- (((x) >> BIT_SHIFT_BITMAP_CONDITION_8822B) & \
- BIT_MASK_BITMAP_CONDITION_8822B)
-
-#define BIT_BITMAP_SSNBK_COUNTER_CLR_8822B BIT(1)
-#define BIT_BITMAP_FORCE_8822B BIT(0)
-
-/* 2 REG_TX_RX_8822B STATUS */
-
-#define BIT_SHIFT_RXPKT_TYPE_8822B 2
-#define BIT_MASK_RXPKT_TYPE_8822B 0x3f
-#define BIT_RXPKT_TYPE_8822B(x) \
- (((x) & BIT_MASK_RXPKT_TYPE_8822B) << BIT_SHIFT_RXPKT_TYPE_8822B)
-#define BIT_GET_RXPKT_TYPE_8822B(x) \
- (((x) >> BIT_SHIFT_RXPKT_TYPE_8822B) & BIT_MASK_RXPKT_TYPE_8822B)
-
-#define BIT_TXACT_IND_8822B BIT(1)
-#define BIT_RXACT_IND_8822B BIT(0)
-
-/* 2 REG_WMAC_BACAM_RPMEN_8822B */
-
-#define BIT_SHIFT_BITMAP_SSNBK_COUNTER_8822B 2
-#define BIT_MASK_BITMAP_SSNBK_COUNTER_8822B 0x3f
-#define BIT_BITMAP_SSNBK_COUNTER_8822B(x) \
- (((x) & BIT_MASK_BITMAP_SSNBK_COUNTER_8822B) \
- << BIT_SHIFT_BITMAP_SSNBK_COUNTER_8822B)
-#define BIT_GET_BITMAP_SSNBK_COUNTER_8822B(x) \
- (((x) >> BIT_SHIFT_BITMAP_SSNBK_COUNTER_8822B) & \
- BIT_MASK_BITMAP_SSNBK_COUNTER_8822B)
-
-#define BIT_BITMAP_EN_8822B BIT(1)
-#define BIT_WMAC_BACAM_RPMEN_8822B BIT(0)
-
-/* 2 REG_LBDLY_8822B (LOOPBACK DELAY REGISTER) */
-
-#define BIT_SHIFT_LBDLY_8822B 0
-#define BIT_MASK_LBDLY_8822B 0x1f
-#define BIT_LBDLY_8822B(x) \
- (((x) & BIT_MASK_LBDLY_8822B) << BIT_SHIFT_LBDLY_8822B)
-#define BIT_GET_LBDLY_8822B(x) \
- (((x) >> BIT_SHIFT_LBDLY_8822B) & BIT_MASK_LBDLY_8822B)
-
-/* 2 REG_RXERR_RPT_8822B (RX ERROR REPORT REGISTER) */
-
-#define BIT_SHIFT_RXERR_RPT_SEL_V1_3_0_8822B 28
-#define BIT_MASK_RXERR_RPT_SEL_V1_3_0_8822B 0xf
-#define BIT_RXERR_RPT_SEL_V1_3_0_8822B(x) \
- (((x) & BIT_MASK_RXERR_RPT_SEL_V1_3_0_8822B) \
- << BIT_SHIFT_RXERR_RPT_SEL_V1_3_0_8822B)
-#define BIT_GET_RXERR_RPT_SEL_V1_3_0_8822B(x) \
- (((x) >> BIT_SHIFT_RXERR_RPT_SEL_V1_3_0_8822B) & \
- BIT_MASK_RXERR_RPT_SEL_V1_3_0_8822B)
-
-#define BIT_RXERR_RPT_RST_8822B BIT(27)
-#define BIT_RXERR_RPT_SEL_V1_4_8822B BIT(26)
-#define BIT_W1S_8822B BIT(23)
-#define BIT_UD_SELECT_BSSID_8822B BIT(22)
-
-#define BIT_SHIFT_UD_SUB_TYPE_8822B 18
-#define BIT_MASK_UD_SUB_TYPE_8822B 0xf
-#define BIT_UD_SUB_TYPE_8822B(x) \
- (((x) & BIT_MASK_UD_SUB_TYPE_8822B) << BIT_SHIFT_UD_SUB_TYPE_8822B)
-#define BIT_GET_UD_SUB_TYPE_8822B(x) \
- (((x) >> BIT_SHIFT_UD_SUB_TYPE_8822B) & BIT_MASK_UD_SUB_TYPE_8822B)
-
-#define BIT_SHIFT_UD_TYPE_8822B 16
-#define BIT_MASK_UD_TYPE_8822B 0x3
-#define BIT_UD_TYPE_8822B(x) \
- (((x) & BIT_MASK_UD_TYPE_8822B) << BIT_SHIFT_UD_TYPE_8822B)
-#define BIT_GET_UD_TYPE_8822B(x) \
- (((x) >> BIT_SHIFT_UD_TYPE_8822B) & BIT_MASK_UD_TYPE_8822B)
-
-#define BIT_SHIFT_RPT_COUNTER_8822B 0
-#define BIT_MASK_RPT_COUNTER_8822B 0xffff
-#define BIT_RPT_COUNTER_8822B(x) \
- (((x) & BIT_MASK_RPT_COUNTER_8822B) << BIT_SHIFT_RPT_COUNTER_8822B)
-#define BIT_GET_RPT_COUNTER_8822B(x) \
- (((x) >> BIT_SHIFT_RPT_COUNTER_8822B) & BIT_MASK_RPT_COUNTER_8822B)
-
-/* 2 REG_WMAC_TRXPTCL_CTL_8822B (WMAC TX/RX PROTOCOL CONTROL REGISTER) */
-
-#define BIT_SHIFT_ACKBA_TYPSEL_8822B (60 & CPU_OPT_WIDTH)
-#define BIT_MASK_ACKBA_TYPSEL_8822B 0xf
-#define BIT_ACKBA_TYPSEL_8822B(x) \
- (((x) & BIT_MASK_ACKBA_TYPSEL_8822B) << BIT_SHIFT_ACKBA_TYPSEL_8822B)
-#define BIT_GET_ACKBA_TYPSEL_8822B(x) \
- (((x) >> BIT_SHIFT_ACKBA_TYPSEL_8822B) & BIT_MASK_ACKBA_TYPSEL_8822B)
-
-#define BIT_SHIFT_ACKBA_ACKPCHK_8822B (56 & CPU_OPT_WIDTH)
-#define BIT_MASK_ACKBA_ACKPCHK_8822B 0xf
-#define BIT_ACKBA_ACKPCHK_8822B(x) \
- (((x) & BIT_MASK_ACKBA_ACKPCHK_8822B) << BIT_SHIFT_ACKBA_ACKPCHK_8822B)
-#define BIT_GET_ACKBA_ACKPCHK_8822B(x) \
- (((x) >> BIT_SHIFT_ACKBA_ACKPCHK_8822B) & BIT_MASK_ACKBA_ACKPCHK_8822B)
-
-#define BIT_SHIFT_ACKBAR_TYPESEL_8822B (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_ACKBAR_TYPESEL_8822B 0xff
-#define BIT_ACKBAR_TYPESEL_8822B(x) \
- (((x) & BIT_MASK_ACKBAR_TYPESEL_8822B) \
- << BIT_SHIFT_ACKBAR_TYPESEL_8822B)
-#define BIT_GET_ACKBAR_TYPESEL_8822B(x) \
- (((x) >> BIT_SHIFT_ACKBAR_TYPESEL_8822B) & \
- BIT_MASK_ACKBAR_TYPESEL_8822B)
-
-#define BIT_SHIFT_ACKBAR_ACKPCHK_8822B (44 & CPU_OPT_WIDTH)
-#define BIT_MASK_ACKBAR_ACKPCHK_8822B 0xf
-#define BIT_ACKBAR_ACKPCHK_8822B(x) \
- (((x) & BIT_MASK_ACKBAR_ACKPCHK_8822B) \
- << BIT_SHIFT_ACKBAR_ACKPCHK_8822B)
-#define BIT_GET_ACKBAR_ACKPCHK_8822B(x) \
- (((x) >> BIT_SHIFT_ACKBAR_ACKPCHK_8822B) & \
- BIT_MASK_ACKBAR_ACKPCHK_8822B)
-
-#define BIT_RXBA_IGNOREA2_8822B BIT(42)
-#define BIT_EN_SAVE_ALL_TXOPADDR_8822B BIT(41)
-#define BIT_EN_TXCTS_TO_TXOPOWNER_INRXNAV_8822B BIT(40)
-#define BIT_DIS_TXBA_AMPDUFCSERR_8822B BIT(39)
-#define BIT_DIS_TXBA_RXBARINFULL_8822B BIT(38)
-#define BIT_DIS_TXCFE_INFULL_8822B BIT(37)
-#define BIT_DIS_TXCTS_INFULL_8822B BIT(36)
-#define BIT_EN_TXACKBA_IN_TX_RDG_8822B BIT(35)
-#define BIT_EN_TXACKBA_IN_TXOP_8822B BIT(34)
-#define BIT_EN_TXCTS_IN_RXNAV_8822B BIT(33)
-#define BIT_EN_TXCTS_INTXOP_8822B BIT(32)
-#define BIT_BLK_EDCA_BBSLP_8822B BIT(31)
-#define BIT_BLK_EDCA_BBSBY_8822B BIT(30)
-#define BIT_ACKTO_BLOCK_SCH_EN_8822B BIT(27)
-#define BIT_EIFS_BLOCK_SCH_EN_8822B BIT(26)
-#define BIT_PLCPCHK_RST_EIFS_8822B BIT(25)
-#define BIT_CCA_RST_EIFS_8822B BIT(24)
-#define BIT_DIS_UPD_MYRXPKTNAV_8822B BIT(23)
-#define BIT_EARLY_TXBA_8822B BIT(22)
-
-#define BIT_SHIFT_RESP_CHNBUSY_8822B 20
-#define BIT_MASK_RESP_CHNBUSY_8822B 0x3
-#define BIT_RESP_CHNBUSY_8822B(x) \
- (((x) & BIT_MASK_RESP_CHNBUSY_8822B) << BIT_SHIFT_RESP_CHNBUSY_8822B)
-#define BIT_GET_RESP_CHNBUSY_8822B(x) \
- (((x) >> BIT_SHIFT_RESP_CHNBUSY_8822B) & BIT_MASK_RESP_CHNBUSY_8822B)
-
-#define BIT_RESP_DCTS_EN_8822B BIT(19)
-#define BIT_RESP_DCFE_EN_8822B BIT(18)
-#define BIT_RESP_SPLCPEN_8822B BIT(17)
-#define BIT_RESP_SGIEN_8822B BIT(16)
-#define BIT_RESP_LDPC_EN_8822B BIT(15)
-#define BIT_DIS_RESP_ACKINCCA_8822B BIT(14)
-#define BIT_DIS_RESP_CTSINCCA_8822B BIT(13)
-
-#define BIT_SHIFT_R_WMAC_SECOND_CCA_TIMER_8822B 10
-#define BIT_MASK_R_WMAC_SECOND_CCA_TIMER_8822B 0x7
-#define BIT_R_WMAC_SECOND_CCA_TIMER_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_SECOND_CCA_TIMER_8822B) \
- << BIT_SHIFT_R_WMAC_SECOND_CCA_TIMER_8822B)
-#define BIT_GET_R_WMAC_SECOND_CCA_TIMER_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_SECOND_CCA_TIMER_8822B) & \
- BIT_MASK_R_WMAC_SECOND_CCA_TIMER_8822B)
-
-#define BIT_SHIFT_RFMOD_8822B 7
-#define BIT_MASK_RFMOD_8822B 0x3
-#define BIT_RFMOD_8822B(x) \
- (((x) & BIT_MASK_RFMOD_8822B) << BIT_SHIFT_RFMOD_8822B)
-#define BIT_GET_RFMOD_8822B(x) \
- (((x) >> BIT_SHIFT_RFMOD_8822B) & BIT_MASK_RFMOD_8822B)
-
-#define BIT_SHIFT_RESP_CTS_DYNBW_SEL_8822B 5
-#define BIT_MASK_RESP_CTS_DYNBW_SEL_8822B 0x3
-#define BIT_RESP_CTS_DYNBW_SEL_8822B(x) \
- (((x) & BIT_MASK_RESP_CTS_DYNBW_SEL_8822B) \
- << BIT_SHIFT_RESP_CTS_DYNBW_SEL_8822B)
-#define BIT_GET_RESP_CTS_DYNBW_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_RESP_CTS_DYNBW_SEL_8822B) & \
- BIT_MASK_RESP_CTS_DYNBW_SEL_8822B)
-
-#define BIT_DLY_TX_WAIT_RXANTSEL_8822B BIT(4)
-#define BIT_TXRESP_BY_RXANTSEL_8822B BIT(3)
-
-#define BIT_SHIFT_ORIG_DCTS_CHK_8822B 0
-#define BIT_MASK_ORIG_DCTS_CHK_8822B 0x3
-#define BIT_ORIG_DCTS_CHK_8822B(x) \
- (((x) & BIT_MASK_ORIG_DCTS_CHK_8822B) << BIT_SHIFT_ORIG_DCTS_CHK_8822B)
-#define BIT_GET_ORIG_DCTS_CHK_8822B(x) \
- (((x) >> BIT_SHIFT_ORIG_DCTS_CHK_8822B) & BIT_MASK_ORIG_DCTS_CHK_8822B)
-
-/* 2 REG_CAMCMD_8822B (CAM COMMAND REGISTER) */
-#define BIT_SECCAM_POLLING_8822B BIT(31)
-#define BIT_SECCAM_CLR_8822B BIT(30)
-#define BIT_MFBCAM_CLR_8822B BIT(29)
-#define BIT_SECCAM_WE_8822B BIT(16)
-
-#define BIT_SHIFT_SECCAM_ADDR_V2_8822B 0
-#define BIT_MASK_SECCAM_ADDR_V2_8822B 0x3ff
-#define BIT_SECCAM_ADDR_V2_8822B(x) \
- (((x) & BIT_MASK_SECCAM_ADDR_V2_8822B) \
- << BIT_SHIFT_SECCAM_ADDR_V2_8822B)
-#define BIT_GET_SECCAM_ADDR_V2_8822B(x) \
- (((x) >> BIT_SHIFT_SECCAM_ADDR_V2_8822B) & \
- BIT_MASK_SECCAM_ADDR_V2_8822B)
-
-/* 2 REG_CAMWRITE_8822B (CAM WRITE REGISTER) */
-
-#define BIT_SHIFT_CAMW_DATA_8822B 0
-#define BIT_MASK_CAMW_DATA_8822B 0xffffffffL
-#define BIT_CAMW_DATA_8822B(x) \
- (((x) & BIT_MASK_CAMW_DATA_8822B) << BIT_SHIFT_CAMW_DATA_8822B)
-#define BIT_GET_CAMW_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_CAMW_DATA_8822B) & BIT_MASK_CAMW_DATA_8822B)
-
-/* 2 REG_CAMREAD_8822B (CAM READ REGISTER) */
-
-#define BIT_SHIFT_CAMR_DATA_8822B 0
-#define BIT_MASK_CAMR_DATA_8822B 0xffffffffL
-#define BIT_CAMR_DATA_8822B(x) \
- (((x) & BIT_MASK_CAMR_DATA_8822B) << BIT_SHIFT_CAMR_DATA_8822B)
-#define BIT_GET_CAMR_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_CAMR_DATA_8822B) & BIT_MASK_CAMR_DATA_8822B)
-
-/* 2 REG_CAMDBG_8822B (CAM DEBUG REGISTER) */
-#define BIT_SECCAM_INFO_8822B BIT(31)
-#define BIT_SEC_KEYFOUND_8822B BIT(15)
-
-#define BIT_SHIFT_CAMDBG_SEC_TYPE_8822B 12
-#define BIT_MASK_CAMDBG_SEC_TYPE_8822B 0x7
-#define BIT_CAMDBG_SEC_TYPE_8822B(x) \
- (((x) & BIT_MASK_CAMDBG_SEC_TYPE_8822B) \
- << BIT_SHIFT_CAMDBG_SEC_TYPE_8822B)
-#define BIT_GET_CAMDBG_SEC_TYPE_8822B(x) \
- (((x) >> BIT_SHIFT_CAMDBG_SEC_TYPE_8822B) & \
- BIT_MASK_CAMDBG_SEC_TYPE_8822B)
-
-#define BIT_CAMDBG_EXT_SECTYPE_8822B BIT(11)
-
-#define BIT_SHIFT_CAMDBG_MIC_KEY_IDX_8822B 5
-#define BIT_MASK_CAMDBG_MIC_KEY_IDX_8822B 0x1f
-#define BIT_CAMDBG_MIC_KEY_IDX_8822B(x) \
- (((x) & BIT_MASK_CAMDBG_MIC_KEY_IDX_8822B) \
- << BIT_SHIFT_CAMDBG_MIC_KEY_IDX_8822B)
-#define BIT_GET_CAMDBG_MIC_KEY_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_CAMDBG_MIC_KEY_IDX_8822B) & \
- BIT_MASK_CAMDBG_MIC_KEY_IDX_8822B)
-
-#define BIT_SHIFT_CAMDBG_SEC_KEY_IDX_8822B 0
-#define BIT_MASK_CAMDBG_SEC_KEY_IDX_8822B 0x1f
-#define BIT_CAMDBG_SEC_KEY_IDX_8822B(x) \
- (((x) & BIT_MASK_CAMDBG_SEC_KEY_IDX_8822B) \
- << BIT_SHIFT_CAMDBG_SEC_KEY_IDX_8822B)
-#define BIT_GET_CAMDBG_SEC_KEY_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_CAMDBG_SEC_KEY_IDX_8822B) & \
- BIT_MASK_CAMDBG_SEC_KEY_IDX_8822B)
-
-/* 2 REG_RXFILTER_ACTION_1_8822B */
-
-#define BIT_SHIFT_RXFILTER_ACTION_1_8822B 0
-#define BIT_MASK_RXFILTER_ACTION_1_8822B 0xff
-#define BIT_RXFILTER_ACTION_1_8822B(x) \
- (((x) & BIT_MASK_RXFILTER_ACTION_1_8822B) \
- << BIT_SHIFT_RXFILTER_ACTION_1_8822B)
-#define BIT_GET_RXFILTER_ACTION_1_8822B(x) \
- (((x) >> BIT_SHIFT_RXFILTER_ACTION_1_8822B) & \
- BIT_MASK_RXFILTER_ACTION_1_8822B)
-
-/* 2 REG_RXFILTER_CATEGORY_1_8822B */
-
-#define BIT_SHIFT_RXFILTER_CATEGORY_1_8822B 0
-#define BIT_MASK_RXFILTER_CATEGORY_1_8822B 0xff
-#define BIT_RXFILTER_CATEGORY_1_8822B(x) \
- (((x) & BIT_MASK_RXFILTER_CATEGORY_1_8822B) \
- << BIT_SHIFT_RXFILTER_CATEGORY_1_8822B)
-#define BIT_GET_RXFILTER_CATEGORY_1_8822B(x) \
- (((x) >> BIT_SHIFT_RXFILTER_CATEGORY_1_8822B) & \
- BIT_MASK_RXFILTER_CATEGORY_1_8822B)
-
-/* 2 REG_SECCFG_8822B (SECURITY CONFIGURATION REGISTER) */
-#define BIT_DIS_GCLK_WAPI_8822B BIT(15)
-#define BIT_DIS_GCLK_AES_8822B BIT(14)
-#define BIT_DIS_GCLK_TKIP_8822B BIT(13)
-#define BIT_AES_SEL_QC_1_8822B BIT(12)
-#define BIT_AES_SEL_QC_0_8822B BIT(11)
-#define BIT_CHK_BMC_8822B BIT(9)
-#define BIT_CHK_KEYID_8822B BIT(8)
-#define BIT_RXBCUSEDK_8822B BIT(7)
-#define BIT_TXBCUSEDK_8822B BIT(6)
-#define BIT_NOSKMC_8822B BIT(5)
-#define BIT_SKBYA2_8822B BIT(4)
-#define BIT_RXDEC_8822B BIT(3)
-#define BIT_TXENC_8822B BIT(2)
-#define BIT_RXUHUSEDK_8822B BIT(1)
-#define BIT_TXUHUSEDK_8822B BIT(0)
-
-/* 2 REG_RXFILTER_ACTION_3_8822B */
-
-#define BIT_SHIFT_RXFILTER_ACTION_3_8822B 0
-#define BIT_MASK_RXFILTER_ACTION_3_8822B 0xff
-#define BIT_RXFILTER_ACTION_3_8822B(x) \
- (((x) & BIT_MASK_RXFILTER_ACTION_3_8822B) \
- << BIT_SHIFT_RXFILTER_ACTION_3_8822B)
-#define BIT_GET_RXFILTER_ACTION_3_8822B(x) \
- (((x) >> BIT_SHIFT_RXFILTER_ACTION_3_8822B) & \
- BIT_MASK_RXFILTER_ACTION_3_8822B)
-
-/* 2 REG_RXFILTER_CATEGORY_3_8822B */
-
-#define BIT_SHIFT_RXFILTER_CATEGORY_3_8822B 0
-#define BIT_MASK_RXFILTER_CATEGORY_3_8822B 0xff
-#define BIT_RXFILTER_CATEGORY_3_8822B(x) \
- (((x) & BIT_MASK_RXFILTER_CATEGORY_3_8822B) \
- << BIT_SHIFT_RXFILTER_CATEGORY_3_8822B)
-#define BIT_GET_RXFILTER_CATEGORY_3_8822B(x) \
- (((x) >> BIT_SHIFT_RXFILTER_CATEGORY_3_8822B) & \
- BIT_MASK_RXFILTER_CATEGORY_3_8822B)
-
-/* 2 REG_RXFILTER_ACTION_2_8822B */
-
-#define BIT_SHIFT_RXFILTER_ACTION_2_8822B 0
-#define BIT_MASK_RXFILTER_ACTION_2_8822B 0xff
-#define BIT_RXFILTER_ACTION_2_8822B(x) \
- (((x) & BIT_MASK_RXFILTER_ACTION_2_8822B) \
- << BIT_SHIFT_RXFILTER_ACTION_2_8822B)
-#define BIT_GET_RXFILTER_ACTION_2_8822B(x) \
- (((x) >> BIT_SHIFT_RXFILTER_ACTION_2_8822B) & \
- BIT_MASK_RXFILTER_ACTION_2_8822B)
-
-/* 2 REG_RXFILTER_CATEGORY_2_8822B */
-
-#define BIT_SHIFT_RXFILTER_CATEGORY_2_8822B 0
-#define BIT_MASK_RXFILTER_CATEGORY_2_8822B 0xff
-#define BIT_RXFILTER_CATEGORY_2_8822B(x) \
- (((x) & BIT_MASK_RXFILTER_CATEGORY_2_8822B) \
- << BIT_SHIFT_RXFILTER_CATEGORY_2_8822B)
-#define BIT_GET_RXFILTER_CATEGORY_2_8822B(x) \
- (((x) >> BIT_SHIFT_RXFILTER_CATEGORY_2_8822B) & \
- BIT_MASK_RXFILTER_CATEGORY_2_8822B)
-
-/* 2 REG_RXFLTMAP4_8822B (RX FILTER MAP GROUP 4) */
-#define BIT_CTRLFLT15EN_FW_8822B BIT(15)
-#define BIT_CTRLFLT14EN_FW_8822B BIT(14)
-#define BIT_CTRLFLT13EN_FW_8822B BIT(13)
-#define BIT_CTRLFLT12EN_FW_8822B BIT(12)
-#define BIT_CTRLFLT11EN_FW_8822B BIT(11)
-#define BIT_CTRLFLT10EN_FW_8822B BIT(10)
-#define BIT_CTRLFLT9EN_FW_8822B BIT(9)
-#define BIT_CTRLFLT8EN_FW_8822B BIT(8)
-#define BIT_CTRLFLT7EN_FW_8822B BIT(7)
-#define BIT_CTRLFLT6EN_FW_8822B BIT(6)
-#define BIT_CTRLFLT5EN_FW_8822B BIT(5)
-#define BIT_CTRLFLT4EN_FW_8822B BIT(4)
-#define BIT_CTRLFLT3EN_FW_8822B BIT(3)
-#define BIT_CTRLFLT2EN_FW_8822B BIT(2)
-#define BIT_CTRLFLT1EN_FW_8822B BIT(1)
-#define BIT_CTRLFLT0EN_FW_8822B BIT(0)
-
-/* 2 REG_RXFLTMAP3_8822B (RX FILTER MAP GROUP 3) */
-#define BIT_MGTFLT15EN_FW_8822B BIT(15)
-#define BIT_MGTFLT14EN_FW_8822B BIT(14)
-#define BIT_MGTFLT13EN_FW_8822B BIT(13)
-#define BIT_MGTFLT12EN_FW_8822B BIT(12)
-#define BIT_MGTFLT11EN_FW_8822B BIT(11)
-#define BIT_MGTFLT10EN_FW_8822B BIT(10)
-#define BIT_MGTFLT9EN_FW_8822B BIT(9)
-#define BIT_MGTFLT8EN_FW_8822B BIT(8)
-#define BIT_MGTFLT7EN_FW_8822B BIT(7)
-#define BIT_MGTFLT6EN_FW_8822B BIT(6)
-#define BIT_MGTFLT5EN_FW_8822B BIT(5)
-#define BIT_MGTFLT4EN_FW_8822B BIT(4)
-#define BIT_MGTFLT3EN_FW_8822B BIT(3)
-#define BIT_MGTFLT2EN_FW_8822B BIT(2)
-#define BIT_MGTFLT1EN_FW_8822B BIT(1)
-#define BIT_MGTFLT0EN_FW_8822B BIT(0)
-
-/* 2 REG_RXFLTMAP6_8822B (RX FILTER MAP GROUP 3) */
-#define BIT_ACTIONFLT15EN_FW_8822B BIT(15)
-#define BIT_ACTIONFLT14EN_FW_8822B BIT(14)
-#define BIT_ACTIONFLT13EN_FW_8822B BIT(13)
-#define BIT_ACTIONFLT12EN_FW_8822B BIT(12)
-#define BIT_ACTIONFLT11EN_FW_8822B BIT(11)
-#define BIT_ACTIONFLT10EN_FW_8822B BIT(10)
-#define BIT_ACTIONFLT9EN_FW_8822B BIT(9)
-#define BIT_ACTIONFLT8EN_FW_8822B BIT(8)
-#define BIT_ACTIONFLT7EN_FW_8822B BIT(7)
-#define BIT_ACTIONFLT6EN_FW_8822B BIT(6)
-#define BIT_ACTIONFLT5EN_FW_8822B BIT(5)
-#define BIT_ACTIONFLT4EN_FW_8822B BIT(4)
-#define BIT_ACTIONFLT3EN_FW_8822B BIT(3)
-#define BIT_ACTIONFLT2EN_FW_8822B BIT(2)
-#define BIT_ACTIONFLT1EN_FW_8822B BIT(1)
-#define BIT_ACTIONFLT0EN_FW_8822B BIT(0)
-
-/* 2 REG_RXFLTMAP5_8822B (RX FILTER MAP GROUP 3) */
-#define BIT_DATAFLT15EN_FW_8822B BIT(15)
-#define BIT_DATAFLT14EN_FW_8822B BIT(14)
-#define BIT_DATAFLT13EN_FW_8822B BIT(13)
-#define BIT_DATAFLT12EN_FW_8822B BIT(12)
-#define BIT_DATAFLT11EN_FW_8822B BIT(11)
-#define BIT_DATAFLT10EN_FW_8822B BIT(10)
-#define BIT_DATAFLT9EN_FW_8822B BIT(9)
-#define BIT_DATAFLT8EN_FW_8822B BIT(8)
-#define BIT_DATAFLT7EN_FW_8822B BIT(7)
-#define BIT_DATAFLT6EN_FW_8822B BIT(6)
-#define BIT_DATAFLT5EN_FW_8822B BIT(5)
-#define BIT_DATAFLT4EN_FW_8822B BIT(4)
-#define BIT_DATAFLT3EN_FW_8822B BIT(3)
-#define BIT_DATAFLT2EN_FW_8822B BIT(2)
-#define BIT_DATAFLT1EN_FW_8822B BIT(1)
-#define BIT_DATAFLT0EN_FW_8822B BIT(0)
-
-/* 2 REG_WMMPS_UAPSD_TID_8822B (WMM POWER SAVE UAPSD TID REGISTER) */
-#define BIT_WMMPS_UAPSD_TID7_8822B BIT(7)
-#define BIT_WMMPS_UAPSD_TID6_8822B BIT(6)
-#define BIT_WMMPS_UAPSD_TID5_8822B BIT(5)
-#define BIT_WMMPS_UAPSD_TID4_8822B BIT(4)
-#define BIT_WMMPS_UAPSD_TID3_8822B BIT(3)
-#define BIT_WMMPS_UAPSD_TID2_8822B BIT(2)
-#define BIT_WMMPS_UAPSD_TID1_8822B BIT(1)
-#define BIT_WMMPS_UAPSD_TID0_8822B BIT(0)
-
-/* 2 REG_PS_RX_INFO_8822B (POWER SAVE RX INFORMATION REGISTER) */
-
-#define BIT_SHIFT_PORTSEL__PS_RX_INFO_8822B 5
-#define BIT_MASK_PORTSEL__PS_RX_INFO_8822B 0x7
-#define BIT_PORTSEL__PS_RX_INFO_8822B(x) \
- (((x) & BIT_MASK_PORTSEL__PS_RX_INFO_8822B) \
- << BIT_SHIFT_PORTSEL__PS_RX_INFO_8822B)
-#define BIT_GET_PORTSEL__PS_RX_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_PORTSEL__PS_RX_INFO_8822B) & \
- BIT_MASK_PORTSEL__PS_RX_INFO_8822B)
-
-#define BIT_RXCTRLIN0_8822B BIT(4)
-#define BIT_RXMGTIN0_8822B BIT(3)
-#define BIT_RXDATAIN2_8822B BIT(2)
-#define BIT_RXDATAIN1_8822B BIT(1)
-#define BIT_RXDATAIN0_8822B BIT(0)
-
-/* 2 REG_NAN_RX_TSF_FILTER_8822B(NAN_RX_TSF_ADDRESS_FILTER) */
-#define BIT_CHK_TSF_TA_8822B BIT(2)
-#define BIT_CHK_TSF_CBSSID_8822B BIT(1)
-#define BIT_CHK_TSF_EN_8822B BIT(0)
-
-/* 2 REG_WOW_CTRL_8822B (WAKE ON WLAN CONTROL REGISTER) */
-
-#define BIT_SHIFT_PSF_BSSIDSEL_B2B1_8822B 6
-#define BIT_MASK_PSF_BSSIDSEL_B2B1_8822B 0x3
-#define BIT_PSF_BSSIDSEL_B2B1_8822B(x) \
- (((x) & BIT_MASK_PSF_BSSIDSEL_B2B1_8822B) \
- << BIT_SHIFT_PSF_BSSIDSEL_B2B1_8822B)
-#define BIT_GET_PSF_BSSIDSEL_B2B1_8822B(x) \
- (((x) >> BIT_SHIFT_PSF_BSSIDSEL_B2B1_8822B) & \
- BIT_MASK_PSF_BSSIDSEL_B2B1_8822B)
-
-#define BIT_WOWHCI_8822B BIT(5)
-#define BIT_PSF_BSSIDSEL_B0_8822B BIT(4)
-#define BIT_UWF_8822B BIT(3)
-#define BIT_MAGIC_8822B BIT(2)
-#define BIT_WOWEN_8822B BIT(1)
-#define BIT_FORCE_WAKEUP_8822B BIT(0)
-
-/* 2 REG_LPNAV_CTRL_8822B (LOW POWER NAV CONTROL REGISTER) */
-#define BIT_LPNAV_EN_8822B BIT(31)
-
-#define BIT_SHIFT_LPNAV_EARLY_8822B 16
-#define BIT_MASK_LPNAV_EARLY_8822B 0x7fff
-#define BIT_LPNAV_EARLY_8822B(x) \
- (((x) & BIT_MASK_LPNAV_EARLY_8822B) << BIT_SHIFT_LPNAV_EARLY_8822B)
-#define BIT_GET_LPNAV_EARLY_8822B(x) \
- (((x) >> BIT_SHIFT_LPNAV_EARLY_8822B) & BIT_MASK_LPNAV_EARLY_8822B)
-
-#define BIT_SHIFT_LPNAV_TH_8822B 0
-#define BIT_MASK_LPNAV_TH_8822B 0xffff
-#define BIT_LPNAV_TH_8822B(x) \
- (((x) & BIT_MASK_LPNAV_TH_8822B) << BIT_SHIFT_LPNAV_TH_8822B)
-#define BIT_GET_LPNAV_TH_8822B(x) \
- (((x) >> BIT_SHIFT_LPNAV_TH_8822B) & BIT_MASK_LPNAV_TH_8822B)
-
-/* 2 REG_WKFMCAM_CMD_8822B (WAKEUP FRAME CAM COMMAND REGISTER) */
-#define BIT_WKFCAM_POLLING_V1_8822B BIT(31)
-#define BIT_WKFCAM_CLR_V1_8822B BIT(30)
-#define BIT_WKFCAM_WE_8822B BIT(16)
-
-#define BIT_SHIFT_WKFCAM_ADDR_V2_8822B 8
-#define BIT_MASK_WKFCAM_ADDR_V2_8822B 0xff
-#define BIT_WKFCAM_ADDR_V2_8822B(x) \
- (((x) & BIT_MASK_WKFCAM_ADDR_V2_8822B) \
- << BIT_SHIFT_WKFCAM_ADDR_V2_8822B)
-#define BIT_GET_WKFCAM_ADDR_V2_8822B(x) \
- (((x) >> BIT_SHIFT_WKFCAM_ADDR_V2_8822B) & \
- BIT_MASK_WKFCAM_ADDR_V2_8822B)
-
-#define BIT_SHIFT_WKFCAM_CAM_NUM_V1_8822B 0
-#define BIT_MASK_WKFCAM_CAM_NUM_V1_8822B 0xff
-#define BIT_WKFCAM_CAM_NUM_V1_8822B(x) \
- (((x) & BIT_MASK_WKFCAM_CAM_NUM_V1_8822B) \
- << BIT_SHIFT_WKFCAM_CAM_NUM_V1_8822B)
-#define BIT_GET_WKFCAM_CAM_NUM_V1_8822B(x) \
- (((x) >> BIT_SHIFT_WKFCAM_CAM_NUM_V1_8822B) & \
- BIT_MASK_WKFCAM_CAM_NUM_V1_8822B)
-
-/* 2 REG_WKFMCAM_RWD_8822B (WAKEUP FRAME READ/WRITE DATA) */
-
-#define BIT_SHIFT_WKFMCAM_RWD_8822B 0
-#define BIT_MASK_WKFMCAM_RWD_8822B 0xffffffffL
-#define BIT_WKFMCAM_RWD_8822B(x) \
- (((x) & BIT_MASK_WKFMCAM_RWD_8822B) << BIT_SHIFT_WKFMCAM_RWD_8822B)
-#define BIT_GET_WKFMCAM_RWD_8822B(x) \
- (((x) >> BIT_SHIFT_WKFMCAM_RWD_8822B) & BIT_MASK_WKFMCAM_RWD_8822B)
-
-/* 2 REG_RXFLTMAP1_8822B (RX FILTER MAP GROUP 1) */
-#define BIT_CTRLFLT15EN_8822B BIT(15)
-#define BIT_CTRLFLT14EN_8822B BIT(14)
-#define BIT_CTRLFLT13EN_8822B BIT(13)
-#define BIT_CTRLFLT12EN_8822B BIT(12)
-#define BIT_CTRLFLT11EN_8822B BIT(11)
-#define BIT_CTRLFLT10EN_8822B BIT(10)
-#define BIT_CTRLFLT9EN_8822B BIT(9)
-#define BIT_CTRLFLT8EN_8822B BIT(8)
-#define BIT_CTRLFLT7EN_8822B BIT(7)
-#define BIT_CTRLFLT6EN_8822B BIT(6)
-#define BIT_CTRLFLT5EN_8822B BIT(5)
-#define BIT_CTRLFLT4EN_8822B BIT(4)
-#define BIT_CTRLFLT3EN_8822B BIT(3)
-#define BIT_CTRLFLT2EN_8822B BIT(2)
-#define BIT_CTRLFLT1EN_8822B BIT(1)
-#define BIT_CTRLFLT0EN_8822B BIT(0)
-
-/* 2 REG_RXFLTMAP0_8822B (RX FILTER MAP GROUP 0) */
-#define BIT_MGTFLT15EN_8822B BIT(15)
-#define BIT_MGTFLT14EN_8822B BIT(14)
-#define BIT_MGTFLT13EN_8822B BIT(13)
-#define BIT_MGTFLT12EN_8822B BIT(12)
-#define BIT_MGTFLT11EN_8822B BIT(11)
-#define BIT_MGTFLT10EN_8822B BIT(10)
-#define BIT_MGTFLT9EN_8822B BIT(9)
-#define BIT_MGTFLT8EN_8822B BIT(8)
-#define BIT_MGTFLT7EN_8822B BIT(7)
-#define BIT_MGTFLT6EN_8822B BIT(6)
-#define BIT_MGTFLT5EN_8822B BIT(5)
-#define BIT_MGTFLT4EN_8822B BIT(4)
-#define BIT_MGTFLT3EN_8822B BIT(3)
-#define BIT_MGTFLT2EN_8822B BIT(2)
-#define BIT_MGTFLT1EN_8822B BIT(1)
-#define BIT_MGTFLT0EN_8822B BIT(0)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_RXFLTMAP_8822B (RX FILTER MAP GROUP 2) */
-#define BIT_DATAFLT15EN_8822B BIT(15)
-#define BIT_DATAFLT14EN_8822B BIT(14)
-#define BIT_DATAFLT13EN_8822B BIT(13)
-#define BIT_DATAFLT12EN_8822B BIT(12)
-#define BIT_DATAFLT11EN_8822B BIT(11)
-#define BIT_DATAFLT10EN_8822B BIT(10)
-#define BIT_DATAFLT9EN_8822B BIT(9)
-#define BIT_DATAFLT8EN_8822B BIT(8)
-#define BIT_DATAFLT7EN_8822B BIT(7)
-#define BIT_DATAFLT6EN_8822B BIT(6)
-#define BIT_DATAFLT5EN_8822B BIT(5)
-#define BIT_DATAFLT4EN_8822B BIT(4)
-#define BIT_DATAFLT3EN_8822B BIT(3)
-#define BIT_DATAFLT2EN_8822B BIT(2)
-#define BIT_DATAFLT1EN_8822B BIT(1)
-#define BIT_DATAFLT0EN_8822B BIT(0)
-
-/* 2 REG_BCN_PSR_RPT_8822B (BEACON PARSER REPORT REGISTER) */
-
-#define BIT_SHIFT_DTIM_CNT_8822B 24
-#define BIT_MASK_DTIM_CNT_8822B 0xff
-#define BIT_DTIM_CNT_8822B(x) \
- (((x) & BIT_MASK_DTIM_CNT_8822B) << BIT_SHIFT_DTIM_CNT_8822B)
-#define BIT_GET_DTIM_CNT_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_CNT_8822B) & BIT_MASK_DTIM_CNT_8822B)
-
-#define BIT_SHIFT_DTIM_PERIOD_8822B 16
-#define BIT_MASK_DTIM_PERIOD_8822B 0xff
-#define BIT_DTIM_PERIOD_8822B(x) \
- (((x) & BIT_MASK_DTIM_PERIOD_8822B) << BIT_SHIFT_DTIM_PERIOD_8822B)
-#define BIT_GET_DTIM_PERIOD_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD_8822B) & BIT_MASK_DTIM_PERIOD_8822B)
-
-#define BIT_DTIM_8822B BIT(15)
-#define BIT_TIM_8822B BIT(14)
-
-#define BIT_SHIFT_PS_AID_0_8822B 0
-#define BIT_MASK_PS_AID_0_8822B 0x7ff
-#define BIT_PS_AID_0_8822B(x) \
- (((x) & BIT_MASK_PS_AID_0_8822B) << BIT_SHIFT_PS_AID_0_8822B)
-#define BIT_GET_PS_AID_0_8822B(x) \
- (((x) >> BIT_SHIFT_PS_AID_0_8822B) & BIT_MASK_PS_AID_0_8822B)
-
-/* 2 REG_FLC_TRPC_8822B (TIMER OF FLC_RPC) */
-#define BIT_FLC_RPCT_V1_8822B BIT(7)
-#define BIT_MODE_8822B BIT(6)
-
-#define BIT_SHIFT_TRPCD_8822B 0
-#define BIT_MASK_TRPCD_8822B 0x3f
-#define BIT_TRPCD_8822B(x) \
- (((x) & BIT_MASK_TRPCD_8822B) << BIT_SHIFT_TRPCD_8822B)
-#define BIT_GET_TRPCD_8822B(x) \
- (((x) >> BIT_SHIFT_TRPCD_8822B) & BIT_MASK_TRPCD_8822B)
-
-/* 2 REG_FLC_PTS_8822B (PKT TYPE SELECTION OF FLC_RPC T) */
-#define BIT_CMF_8822B BIT(2)
-#define BIT_CCF_8822B BIT(1)
-#define BIT_CDF_8822B BIT(0)
-
-/* 2 REG_FLC_RPCT_8822B (FLC_RPC THRESHOLD) */
-
-#define BIT_SHIFT_FLC_RPCT_8822B 0
-#define BIT_MASK_FLC_RPCT_8822B 0xff
-#define BIT_FLC_RPCT_8822B(x) \
- (((x) & BIT_MASK_FLC_RPCT_8822B) << BIT_SHIFT_FLC_RPCT_8822B)
-#define BIT_GET_FLC_RPCT_8822B(x) \
- (((x) >> BIT_SHIFT_FLC_RPCT_8822B) & BIT_MASK_FLC_RPCT_8822B)
-
-/* 2 REG_FLC_RPC_8822B (FW LPS CONDITION -- RX PKT COUNTER) */
-
-#define BIT_SHIFT_FLC_RPC_8822B 0
-#define BIT_MASK_FLC_RPC_8822B 0xff
-#define BIT_FLC_RPC_8822B(x) \
- (((x) & BIT_MASK_FLC_RPC_8822B) << BIT_SHIFT_FLC_RPC_8822B)
-#define BIT_GET_FLC_RPC_8822B(x) \
- (((x) >> BIT_SHIFT_FLC_RPC_8822B) & BIT_MASK_FLC_RPC_8822B)
-
-/* 2 REG_RXPKTMON_CTRL_8822B */
-
-#define BIT_SHIFT_RXBKQPKT_SEQ_8822B 20
-#define BIT_MASK_RXBKQPKT_SEQ_8822B 0xf
-#define BIT_RXBKQPKT_SEQ_8822B(x) \
- (((x) & BIT_MASK_RXBKQPKT_SEQ_8822B) << BIT_SHIFT_RXBKQPKT_SEQ_8822B)
-#define BIT_GET_RXBKQPKT_SEQ_8822B(x) \
- (((x) >> BIT_SHIFT_RXBKQPKT_SEQ_8822B) & BIT_MASK_RXBKQPKT_SEQ_8822B)
-
-#define BIT_SHIFT_RXBEQPKT_SEQ_8822B 16
-#define BIT_MASK_RXBEQPKT_SEQ_8822B 0xf
-#define BIT_RXBEQPKT_SEQ_8822B(x) \
- (((x) & BIT_MASK_RXBEQPKT_SEQ_8822B) << BIT_SHIFT_RXBEQPKT_SEQ_8822B)
-#define BIT_GET_RXBEQPKT_SEQ_8822B(x) \
- (((x) >> BIT_SHIFT_RXBEQPKT_SEQ_8822B) & BIT_MASK_RXBEQPKT_SEQ_8822B)
-
-#define BIT_SHIFT_RXVIQPKT_SEQ_8822B 12
-#define BIT_MASK_RXVIQPKT_SEQ_8822B 0xf
-#define BIT_RXVIQPKT_SEQ_8822B(x) \
- (((x) & BIT_MASK_RXVIQPKT_SEQ_8822B) << BIT_SHIFT_RXVIQPKT_SEQ_8822B)
-#define BIT_GET_RXVIQPKT_SEQ_8822B(x) \
- (((x) >> BIT_SHIFT_RXVIQPKT_SEQ_8822B) & BIT_MASK_RXVIQPKT_SEQ_8822B)
-
-#define BIT_SHIFT_RXVOQPKT_SEQ_8822B 8
-#define BIT_MASK_RXVOQPKT_SEQ_8822B 0xf
-#define BIT_RXVOQPKT_SEQ_8822B(x) \
- (((x) & BIT_MASK_RXVOQPKT_SEQ_8822B) << BIT_SHIFT_RXVOQPKT_SEQ_8822B)
-#define BIT_GET_RXVOQPKT_SEQ_8822B(x) \
- (((x) >> BIT_SHIFT_RXVOQPKT_SEQ_8822B) & BIT_MASK_RXVOQPKT_SEQ_8822B)
-
-#define BIT_RXBKQPKT_ERR_8822B BIT(7)
-#define BIT_RXBEQPKT_ERR_8822B BIT(6)
-#define BIT_RXVIQPKT_ERR_8822B BIT(5)
-#define BIT_RXVOQPKT_ERR_8822B BIT(4)
-#define BIT_RXDMA_MON_EN_8822B BIT(2)
-#define BIT_RXPKT_MON_RST_8822B BIT(1)
-#define BIT_RXPKT_MON_EN_8822B BIT(0)
-
-/* 2 REG_STATE_MON_8822B */
-
-#define BIT_SHIFT_STATE_SEL_8822B 24
-#define BIT_MASK_STATE_SEL_8822B 0x1f
-#define BIT_STATE_SEL_8822B(x) \
- (((x) & BIT_MASK_STATE_SEL_8822B) << BIT_SHIFT_STATE_SEL_8822B)
-#define BIT_GET_STATE_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_STATE_SEL_8822B) & BIT_MASK_STATE_SEL_8822B)
-
-#define BIT_SHIFT_STATE_INFO_8822B 8
-#define BIT_MASK_STATE_INFO_8822B 0xff
-#define BIT_STATE_INFO_8822B(x) \
- (((x) & BIT_MASK_STATE_INFO_8822B) << BIT_SHIFT_STATE_INFO_8822B)
-#define BIT_GET_STATE_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_STATE_INFO_8822B) & BIT_MASK_STATE_INFO_8822B)
-
-#define BIT_UPD_NXT_STATE_8822B BIT(7)
-
-#define BIT_SHIFT_CUR_STATE_8822B 0
-#define BIT_MASK_CUR_STATE_8822B 0x7f
-#define BIT_CUR_STATE_8822B(x) \
- (((x) & BIT_MASK_CUR_STATE_8822B) << BIT_SHIFT_CUR_STATE_8822B)
-#define BIT_GET_CUR_STATE_8822B(x) \
- (((x) >> BIT_SHIFT_CUR_STATE_8822B) & BIT_MASK_CUR_STATE_8822B)
-
-/* 2 REG_ERROR_MON_8822B */
-#define BIT_MACRX_ERR_1_8822B BIT(17)
-#define BIT_MACRX_ERR_0_8822B BIT(16)
-#define BIT_MACTX_ERR_3_8822B BIT(3)
-#define BIT_MACTX_ERR_2_8822B BIT(2)
-#define BIT_MACTX_ERR_1_8822B BIT(1)
-#define BIT_MACTX_ERR_0_8822B BIT(0)
-
-/* 2 REG_SEARCH_MACID_8822B */
-#define BIT_EN_TXRPTBUF_CLK_8822B BIT(31)
-
-#define BIT_SHIFT_INFO_INDEX_OFFSET_8822B 16
-#define BIT_MASK_INFO_INDEX_OFFSET_8822B 0x1fff
-#define BIT_INFO_INDEX_OFFSET_8822B(x) \
- (((x) & BIT_MASK_INFO_INDEX_OFFSET_8822B) \
- << BIT_SHIFT_INFO_INDEX_OFFSET_8822B)
-#define BIT_GET_INFO_INDEX_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_INFO_INDEX_OFFSET_8822B) & \
- BIT_MASK_INFO_INDEX_OFFSET_8822B)
-
-#define BIT_WMAC_SRCH_FIFOFULL_8822B BIT(15)
-#define BIT_DIS_INFOSRCH_8822B BIT(14)
-#define BIT_DISABLE_B0_8822B BIT(13)
-
-#define BIT_SHIFT_INFO_ADDR_OFFSET_8822B 0
-#define BIT_MASK_INFO_ADDR_OFFSET_8822B 0x1fff
-#define BIT_INFO_ADDR_OFFSET_8822B(x) \
- (((x) & BIT_MASK_INFO_ADDR_OFFSET_8822B) \
- << BIT_SHIFT_INFO_ADDR_OFFSET_8822B)
-#define BIT_GET_INFO_ADDR_OFFSET_8822B(x) \
- (((x) >> BIT_SHIFT_INFO_ADDR_OFFSET_8822B) & \
- BIT_MASK_INFO_ADDR_OFFSET_8822B)
-
-/* 2 REG_BT_COEX_TABLE_8822B (BT-COEXISTENCE CONTROL REGISTER) */
-#define BIT_PRI_MASK_RX_RESP_8822B BIT(126)
-#define BIT_PRI_MASK_RXOFDM_8822B BIT(125)
-#define BIT_PRI_MASK_RXCCK_8822B BIT(124)
-
-#define BIT_SHIFT_PRI_MASK_TXAC_8822B (117 & CPU_OPT_WIDTH)
-#define BIT_MASK_PRI_MASK_TXAC_8822B 0x7f
-#define BIT_PRI_MASK_TXAC_8822B(x) \
- (((x) & BIT_MASK_PRI_MASK_TXAC_8822B) << BIT_SHIFT_PRI_MASK_TXAC_8822B)
-#define BIT_GET_PRI_MASK_TXAC_8822B(x) \
- (((x) >> BIT_SHIFT_PRI_MASK_TXAC_8822B) & BIT_MASK_PRI_MASK_TXAC_8822B)
-
-#define BIT_SHIFT_PRI_MASK_NAV_8822B (109 & CPU_OPT_WIDTH)
-#define BIT_MASK_PRI_MASK_NAV_8822B 0xff
-#define BIT_PRI_MASK_NAV_8822B(x) \
- (((x) & BIT_MASK_PRI_MASK_NAV_8822B) << BIT_SHIFT_PRI_MASK_NAV_8822B)
-#define BIT_GET_PRI_MASK_NAV_8822B(x) \
- (((x) >> BIT_SHIFT_PRI_MASK_NAV_8822B) & BIT_MASK_PRI_MASK_NAV_8822B)
-
-#define BIT_PRI_MASK_CCK_8822B BIT(108)
-#define BIT_PRI_MASK_OFDM_8822B BIT(107)
-#define BIT_PRI_MASK_RTY_8822B BIT(106)
-
-#define BIT_SHIFT_PRI_MASK_NUM_8822B (102 & CPU_OPT_WIDTH)
-#define BIT_MASK_PRI_MASK_NUM_8822B 0xf
-#define BIT_PRI_MASK_NUM_8822B(x) \
- (((x) & BIT_MASK_PRI_MASK_NUM_8822B) << BIT_SHIFT_PRI_MASK_NUM_8822B)
-#define BIT_GET_PRI_MASK_NUM_8822B(x) \
- (((x) >> BIT_SHIFT_PRI_MASK_NUM_8822B) & BIT_MASK_PRI_MASK_NUM_8822B)
-
-#define BIT_SHIFT_PRI_MASK_TYPE_8822B (98 & CPU_OPT_WIDTH)
-#define BIT_MASK_PRI_MASK_TYPE_8822B 0xf
-#define BIT_PRI_MASK_TYPE_8822B(x) \
- (((x) & BIT_MASK_PRI_MASK_TYPE_8822B) << BIT_SHIFT_PRI_MASK_TYPE_8822B)
-#define BIT_GET_PRI_MASK_TYPE_8822B(x) \
- (((x) >> BIT_SHIFT_PRI_MASK_TYPE_8822B) & BIT_MASK_PRI_MASK_TYPE_8822B)
-
-#define BIT_OOB_8822B BIT(97)
-#define BIT_ANT_SEL_8822B BIT(96)
-
-#define BIT_SHIFT_BREAK_TABLE_2_8822B (80 & CPU_OPT_WIDTH)
-#define BIT_MASK_BREAK_TABLE_2_8822B 0xffff
-#define BIT_BREAK_TABLE_2_8822B(x) \
- (((x) & BIT_MASK_BREAK_TABLE_2_8822B) << BIT_SHIFT_BREAK_TABLE_2_8822B)
-#define BIT_GET_BREAK_TABLE_2_8822B(x) \
- (((x) >> BIT_SHIFT_BREAK_TABLE_2_8822B) & BIT_MASK_BREAK_TABLE_2_8822B)
-
-#define BIT_SHIFT_BREAK_TABLE_1_8822B (64 & CPU_OPT_WIDTH)
-#define BIT_MASK_BREAK_TABLE_1_8822B 0xffff
-#define BIT_BREAK_TABLE_1_8822B(x) \
- (((x) & BIT_MASK_BREAK_TABLE_1_8822B) << BIT_SHIFT_BREAK_TABLE_1_8822B)
-#define BIT_GET_BREAK_TABLE_1_8822B(x) \
- (((x) >> BIT_SHIFT_BREAK_TABLE_1_8822B) & BIT_MASK_BREAK_TABLE_1_8822B)
-
-#define BIT_SHIFT_COEX_TABLE_2_8822B (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_COEX_TABLE_2_8822B 0xffffffffL
-#define BIT_COEX_TABLE_2_8822B(x) \
- (((x) & BIT_MASK_COEX_TABLE_2_8822B) << BIT_SHIFT_COEX_TABLE_2_8822B)
-#define BIT_GET_COEX_TABLE_2_8822B(x) \
- (((x) >> BIT_SHIFT_COEX_TABLE_2_8822B) & BIT_MASK_COEX_TABLE_2_8822B)
-
-#define BIT_SHIFT_COEX_TABLE_1_8822B 0
-#define BIT_MASK_COEX_TABLE_1_8822B 0xffffffffL
-#define BIT_COEX_TABLE_1_8822B(x) \
- (((x) & BIT_MASK_COEX_TABLE_1_8822B) << BIT_SHIFT_COEX_TABLE_1_8822B)
-#define BIT_GET_COEX_TABLE_1_8822B(x) \
- (((x) >> BIT_SHIFT_COEX_TABLE_1_8822B) & BIT_MASK_COEX_TABLE_1_8822B)
-
-/* 2 REG_RXCMD_0_8822B */
-#define BIT_RXCMD_EN_8822B BIT(31)
-
-#define BIT_SHIFT_RXCMD_INFO_8822B 0
-#define BIT_MASK_RXCMD_INFO_8822B 0x7fffffffL
-#define BIT_RXCMD_INFO_8822B(x) \
- (((x) & BIT_MASK_RXCMD_INFO_8822B) << BIT_SHIFT_RXCMD_INFO_8822B)
-#define BIT_GET_RXCMD_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_RXCMD_INFO_8822B) & BIT_MASK_RXCMD_INFO_8822B)
-
-/* 2 REG_RXCMD_1_8822B */
-
-#define BIT_SHIFT_RXCMD_PRD_8822B 0
-#define BIT_MASK_RXCMD_PRD_8822B 0xffff
-#define BIT_RXCMD_PRD_8822B(x) \
- (((x) & BIT_MASK_RXCMD_PRD_8822B) << BIT_SHIFT_RXCMD_PRD_8822B)
-#define BIT_GET_RXCMD_PRD_8822B(x) \
- (((x) >> BIT_SHIFT_RXCMD_PRD_8822B) & BIT_MASK_RXCMD_PRD_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_WMAC_RESP_TXINFO_8822B (RESPONSE TXINFO REGISTER) */
-
-#define BIT_SHIFT_WMAC_RESP_MFB_8822B 25
-#define BIT_MASK_WMAC_RESP_MFB_8822B 0x7f
-#define BIT_WMAC_RESP_MFB_8822B(x) \
- (((x) & BIT_MASK_WMAC_RESP_MFB_8822B) << BIT_SHIFT_WMAC_RESP_MFB_8822B)
-#define BIT_GET_WMAC_RESP_MFB_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_RESP_MFB_8822B) & BIT_MASK_WMAC_RESP_MFB_8822B)
-
-#define BIT_SHIFT_WMAC_ANTINF_SEL_8822B 23
-#define BIT_MASK_WMAC_ANTINF_SEL_8822B 0x3
-#define BIT_WMAC_ANTINF_SEL_8822B(x) \
- (((x) & BIT_MASK_WMAC_ANTINF_SEL_8822B) \
- << BIT_SHIFT_WMAC_ANTINF_SEL_8822B)
-#define BIT_GET_WMAC_ANTINF_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_ANTINF_SEL_8822B) & \
- BIT_MASK_WMAC_ANTINF_SEL_8822B)
-
-#define BIT_SHIFT_WMAC_ANTSEL_SEL_8822B 21
-#define BIT_MASK_WMAC_ANTSEL_SEL_8822B 0x3
-#define BIT_WMAC_ANTSEL_SEL_8822B(x) \
- (((x) & BIT_MASK_WMAC_ANTSEL_SEL_8822B) \
- << BIT_SHIFT_WMAC_ANTSEL_SEL_8822B)
-#define BIT_GET_WMAC_ANTSEL_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_ANTSEL_SEL_8822B) & \
- BIT_MASK_WMAC_ANTSEL_SEL_8822B)
-
-#define BIT_SHIFT_R_WMAC_RESP_TXPOWER_8822B 18
-#define BIT_MASK_R_WMAC_RESP_TXPOWER_8822B 0x7
-#define BIT_R_WMAC_RESP_TXPOWER_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_RESP_TXPOWER_8822B) \
- << BIT_SHIFT_R_WMAC_RESP_TXPOWER_8822B)
-#define BIT_GET_R_WMAC_RESP_TXPOWER_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_RESP_TXPOWER_8822B) & \
- BIT_MASK_R_WMAC_RESP_TXPOWER_8822B)
-
-#define BIT_SHIFT_WMAC_RESP_TXANT_8822B 0
-#define BIT_MASK_WMAC_RESP_TXANT_8822B 0x3ffff
-#define BIT_WMAC_RESP_TXANT_8822B(x) \
- (((x) & BIT_MASK_WMAC_RESP_TXANT_8822B) \
- << BIT_SHIFT_WMAC_RESP_TXANT_8822B)
-#define BIT_GET_WMAC_RESP_TXANT_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_RESP_TXANT_8822B) & \
- BIT_MASK_WMAC_RESP_TXANT_8822B)
-
-/* 2 REG_BBPSF_CTRL_8822B */
-#define BIT_CTL_IDLE_CLR_CSI_RPT_8822B BIT(31)
-#define BIT_WMAC_USE_NDPARATE_8822B BIT(30)
-
-#define BIT_SHIFT_WMAC_CSI_RATE_8822B 24
-#define BIT_MASK_WMAC_CSI_RATE_8822B 0x3f
-#define BIT_WMAC_CSI_RATE_8822B(x) \
- (((x) & BIT_MASK_WMAC_CSI_RATE_8822B) << BIT_SHIFT_WMAC_CSI_RATE_8822B)
-#define BIT_GET_WMAC_CSI_RATE_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_CSI_RATE_8822B) & BIT_MASK_WMAC_CSI_RATE_8822B)
-
-#define BIT_SHIFT_WMAC_RESP_TXRATE_8822B 16
-#define BIT_MASK_WMAC_RESP_TXRATE_8822B 0xff
-#define BIT_WMAC_RESP_TXRATE_8822B(x) \
- (((x) & BIT_MASK_WMAC_RESP_TXRATE_8822B) \
- << BIT_SHIFT_WMAC_RESP_TXRATE_8822B)
-#define BIT_GET_WMAC_RESP_TXRATE_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_RESP_TXRATE_8822B) & \
- BIT_MASK_WMAC_RESP_TXRATE_8822B)
-
-#define BIT_BBPSF_MPDUCHKEN_8822B BIT(5)
-#define BIT_BBPSF_MHCHKEN_8822B BIT(4)
-#define BIT_BBPSF_ERRCHKEN_8822B BIT(3)
-
-#define BIT_SHIFT_BBPSF_ERRTHR_8822B 0
-#define BIT_MASK_BBPSF_ERRTHR_8822B 0x7
-#define BIT_BBPSF_ERRTHR_8822B(x) \
- (((x) & BIT_MASK_BBPSF_ERRTHR_8822B) << BIT_SHIFT_BBPSF_ERRTHR_8822B)
-#define BIT_GET_BBPSF_ERRTHR_8822B(x) \
- (((x) >> BIT_SHIFT_BBPSF_ERRTHR_8822B) & BIT_MASK_BBPSF_ERRTHR_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_P2P_RX_BCN_NOA_8822B (P2P RX BEACON NOA REGISTER) */
-#define BIT_NOA_PARSER_EN_8822B BIT(15)
-#define BIT_BSSID_SEL_8822B BIT(14)
-
-#define BIT_SHIFT_P2P_OUI_TYPE_8822B 0
-#define BIT_MASK_P2P_OUI_TYPE_8822B 0xff
-#define BIT_P2P_OUI_TYPE_8822B(x) \
- (((x) & BIT_MASK_P2P_OUI_TYPE_8822B) << BIT_SHIFT_P2P_OUI_TYPE_8822B)
-#define BIT_GET_P2P_OUI_TYPE_8822B(x) \
- (((x) >> BIT_SHIFT_P2P_OUI_TYPE_8822B) & BIT_MASK_P2P_OUI_TYPE_8822B)
-
-/* 2 REG_ASSOCIATED_BFMER0_INFO_8822B (ASSOCIATED BEAMFORMER0 INFO REGISTER) */
-
-#define BIT_SHIFT_R_WMAC_TXCSI_AID0_8822B (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_TXCSI_AID0_8822B 0x1ff
-#define BIT_R_WMAC_TXCSI_AID0_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_TXCSI_AID0_8822B) \
- << BIT_SHIFT_R_WMAC_TXCSI_AID0_8822B)
-#define BIT_GET_R_WMAC_TXCSI_AID0_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_TXCSI_AID0_8822B) & \
- BIT_MASK_R_WMAC_TXCSI_AID0_8822B)
-
-#define BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R0_8822B 0
-#define BIT_MASK_R_WMAC_SOUNDING_RXADD_R0_8822B 0xffffffffffffL
-#define BIT_R_WMAC_SOUNDING_RXADD_R0_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_SOUNDING_RXADD_R0_8822B) \
- << BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R0_8822B)
-#define BIT_GET_R_WMAC_SOUNDING_RXADD_R0_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R0_8822B) & \
- BIT_MASK_R_WMAC_SOUNDING_RXADD_R0_8822B)
-
-/* 2 REG_ASSOCIATED_BFMER1_INFO_8822B (ASSOCIATED BEAMFORMER1 INFO REGISTER) */
-
-#define BIT_SHIFT_R_WMAC_TXCSI_AID1_8822B (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_TXCSI_AID1_8822B 0x1ff
-#define BIT_R_WMAC_TXCSI_AID1_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_TXCSI_AID1_8822B) \
- << BIT_SHIFT_R_WMAC_TXCSI_AID1_8822B)
-#define BIT_GET_R_WMAC_TXCSI_AID1_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_TXCSI_AID1_8822B) & \
- BIT_MASK_R_WMAC_TXCSI_AID1_8822B)
-
-#define BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R1_8822B 0
-#define BIT_MASK_R_WMAC_SOUNDING_RXADD_R1_8822B 0xffffffffffffL
-#define BIT_R_WMAC_SOUNDING_RXADD_R1_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_SOUNDING_RXADD_R1_8822B) \
- << BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R1_8822B)
-#define BIT_GET_R_WMAC_SOUNDING_RXADD_R1_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_SOUNDING_RXADD_R1_8822B) & \
- BIT_MASK_R_WMAC_SOUNDING_RXADD_R1_8822B)
-
-/* 2 REG_TX_CSI_RPT_PARAM_BW20_8822B (TX CSI REPORT PARAMETER REGISTER) */
-
-#define BIT_SHIFT_R_WMAC_BFINFO_20M_1_8822B 16
-#define BIT_MASK_R_WMAC_BFINFO_20M_1_8822B 0xfff
-#define BIT_R_WMAC_BFINFO_20M_1_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_BFINFO_20M_1_8822B) \
- << BIT_SHIFT_R_WMAC_BFINFO_20M_1_8822B)
-#define BIT_GET_R_WMAC_BFINFO_20M_1_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_BFINFO_20M_1_8822B) & \
- BIT_MASK_R_WMAC_BFINFO_20M_1_8822B)
-
-#define BIT_SHIFT_R_WMAC_BFINFO_20M_0_8822B 0
-#define BIT_MASK_R_WMAC_BFINFO_20M_0_8822B 0xfff
-#define BIT_R_WMAC_BFINFO_20M_0_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_BFINFO_20M_0_8822B) \
- << BIT_SHIFT_R_WMAC_BFINFO_20M_0_8822B)
-#define BIT_GET_R_WMAC_BFINFO_20M_0_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_BFINFO_20M_0_8822B) & \
- BIT_MASK_R_WMAC_BFINFO_20M_0_8822B)
-
-/* 2 REG_TX_CSI_RPT_PARAM_BW40_8822B (TX CSI REPORT PARAMETER_BW40 REGISTER) */
-
-#define BIT_SHIFT_WMAC_RESP_ANTCD_8822B 0
-#define BIT_MASK_WMAC_RESP_ANTCD_8822B 0xf
-#define BIT_WMAC_RESP_ANTCD_8822B(x) \
- (((x) & BIT_MASK_WMAC_RESP_ANTCD_8822B) \
- << BIT_SHIFT_WMAC_RESP_ANTCD_8822B)
-#define BIT_GET_WMAC_RESP_ANTCD_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_RESP_ANTCD_8822B) & \
- BIT_MASK_WMAC_RESP_ANTCD_8822B)
-
-/* 2 REG_TX_CSI_RPT_PARAM_BW80_8822B (TX CSI REPORT PARAMETER_BW80 REGISTER) */
-
-/* 2 REG_BCN_PSR_RPT2_8822B (BEACON PARSER REPORT REGISTER2) */
-
-#define BIT_SHIFT_DTIM_CNT2_8822B 24
-#define BIT_MASK_DTIM_CNT2_8822B 0xff
-#define BIT_DTIM_CNT2_8822B(x) \
- (((x) & BIT_MASK_DTIM_CNT2_8822B) << BIT_SHIFT_DTIM_CNT2_8822B)
-#define BIT_GET_DTIM_CNT2_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_CNT2_8822B) & BIT_MASK_DTIM_CNT2_8822B)
-
-#define BIT_SHIFT_DTIM_PERIOD2_8822B 16
-#define BIT_MASK_DTIM_PERIOD2_8822B 0xff
-#define BIT_DTIM_PERIOD2_8822B(x) \
- (((x) & BIT_MASK_DTIM_PERIOD2_8822B) << BIT_SHIFT_DTIM_PERIOD2_8822B)
-#define BIT_GET_DTIM_PERIOD2_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD2_8822B) & BIT_MASK_DTIM_PERIOD2_8822B)
-
-#define BIT_DTIM2_8822B BIT(15)
-#define BIT_TIM2_8822B BIT(14)
-
-#define BIT_SHIFT_PS_AID_2_8822B 0
-#define BIT_MASK_PS_AID_2_8822B 0x7ff
-#define BIT_PS_AID_2_8822B(x) \
- (((x) & BIT_MASK_PS_AID_2_8822B) << BIT_SHIFT_PS_AID_2_8822B)
-#define BIT_GET_PS_AID_2_8822B(x) \
- (((x) >> BIT_SHIFT_PS_AID_2_8822B) & BIT_MASK_PS_AID_2_8822B)
-
-/* 2 REG_BCN_PSR_RPT3_8822B (BEACON PARSER REPORT REGISTER3) */
-
-#define BIT_SHIFT_DTIM_CNT3_8822B 24
-#define BIT_MASK_DTIM_CNT3_8822B 0xff
-#define BIT_DTIM_CNT3_8822B(x) \
- (((x) & BIT_MASK_DTIM_CNT3_8822B) << BIT_SHIFT_DTIM_CNT3_8822B)
-#define BIT_GET_DTIM_CNT3_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_CNT3_8822B) & BIT_MASK_DTIM_CNT3_8822B)
-
-#define BIT_SHIFT_DTIM_PERIOD3_8822B 16
-#define BIT_MASK_DTIM_PERIOD3_8822B 0xff
-#define BIT_DTIM_PERIOD3_8822B(x) \
- (((x) & BIT_MASK_DTIM_PERIOD3_8822B) << BIT_SHIFT_DTIM_PERIOD3_8822B)
-#define BIT_GET_DTIM_PERIOD3_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD3_8822B) & BIT_MASK_DTIM_PERIOD3_8822B)
-
-#define BIT_DTIM3_8822B BIT(15)
-#define BIT_TIM3_8822B BIT(14)
-
-#define BIT_SHIFT_PS_AID_3_8822B 0
-#define BIT_MASK_PS_AID_3_8822B 0x7ff
-#define BIT_PS_AID_3_8822B(x) \
- (((x) & BIT_MASK_PS_AID_3_8822B) << BIT_SHIFT_PS_AID_3_8822B)
-#define BIT_GET_PS_AID_3_8822B(x) \
- (((x) >> BIT_SHIFT_PS_AID_3_8822B) & BIT_MASK_PS_AID_3_8822B)
-
-/* 2 REG_BCN_PSR_RPT4_8822B (BEACON PARSER REPORT REGISTER4) */
-
-#define BIT_SHIFT_DTIM_CNT4_8822B 24
-#define BIT_MASK_DTIM_CNT4_8822B 0xff
-#define BIT_DTIM_CNT4_8822B(x) \
- (((x) & BIT_MASK_DTIM_CNT4_8822B) << BIT_SHIFT_DTIM_CNT4_8822B)
-#define BIT_GET_DTIM_CNT4_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_CNT4_8822B) & BIT_MASK_DTIM_CNT4_8822B)
-
-#define BIT_SHIFT_DTIM_PERIOD4_8822B 16
-#define BIT_MASK_DTIM_PERIOD4_8822B 0xff
-#define BIT_DTIM_PERIOD4_8822B(x) \
- (((x) & BIT_MASK_DTIM_PERIOD4_8822B) << BIT_SHIFT_DTIM_PERIOD4_8822B)
-#define BIT_GET_DTIM_PERIOD4_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD4_8822B) & BIT_MASK_DTIM_PERIOD4_8822B)
-
-#define BIT_DTIM4_8822B BIT(15)
-#define BIT_TIM4_8822B BIT(14)
-
-#define BIT_SHIFT_PS_AID_4_8822B 0
-#define BIT_MASK_PS_AID_4_8822B 0x7ff
-#define BIT_PS_AID_4_8822B(x) \
- (((x) & BIT_MASK_PS_AID_4_8822B) << BIT_SHIFT_PS_AID_4_8822B)
-#define BIT_GET_PS_AID_4_8822B(x) \
- (((x) >> BIT_SHIFT_PS_AID_4_8822B) & BIT_MASK_PS_AID_4_8822B)
-
-/* 2 REG_A1_ADDR_MASK_8822B (A1 ADDR MASK REGISTER) */
-
-#define BIT_SHIFT_A1_ADDR_MASK_8822B 0
-#define BIT_MASK_A1_ADDR_MASK_8822B 0xffffffffL
-#define BIT_A1_ADDR_MASK_8822B(x) \
- (((x) & BIT_MASK_A1_ADDR_MASK_8822B) << BIT_SHIFT_A1_ADDR_MASK_8822B)
-#define BIT_GET_A1_ADDR_MASK_8822B(x) \
- (((x) >> BIT_SHIFT_A1_ADDR_MASK_8822B) & BIT_MASK_A1_ADDR_MASK_8822B)
-
-/* 2 REG_MACID2_8822B (MAC ID2 REGISTER) */
-
-#define BIT_SHIFT_MACID2_8822B 0
-#define BIT_MASK_MACID2_8822B 0xffffffffffffL
-#define BIT_MACID2_8822B(x) \
- (((x) & BIT_MASK_MACID2_8822B) << BIT_SHIFT_MACID2_8822B)
-#define BIT_GET_MACID2_8822B(x) \
- (((x) >> BIT_SHIFT_MACID2_8822B) & BIT_MASK_MACID2_8822B)
-
-/* 2 REG_BSSID2_8822B (BSSID2 REGISTER) */
-
-#define BIT_SHIFT_BSSID2_8822B 0
-#define BIT_MASK_BSSID2_8822B 0xffffffffffffL
-#define BIT_BSSID2_8822B(x) \
- (((x) & BIT_MASK_BSSID2_8822B) << BIT_SHIFT_BSSID2_8822B)
-#define BIT_GET_BSSID2_8822B(x) \
- (((x) >> BIT_SHIFT_BSSID2_8822B) & BIT_MASK_BSSID2_8822B)
-
-/* 2 REG_MACID3_8822B (MAC ID3 REGISTER) */
-
-#define BIT_SHIFT_MACID3_8822B 0
-#define BIT_MASK_MACID3_8822B 0xffffffffffffL
-#define BIT_MACID3_8822B(x) \
- (((x) & BIT_MASK_MACID3_8822B) << BIT_SHIFT_MACID3_8822B)
-#define BIT_GET_MACID3_8822B(x) \
- (((x) >> BIT_SHIFT_MACID3_8822B) & BIT_MASK_MACID3_8822B)
-
-/* 2 REG_BSSID3_8822B (BSSID3 REGISTER) */
-
-#define BIT_SHIFT_BSSID3_8822B 0
-#define BIT_MASK_BSSID3_8822B 0xffffffffffffL
-#define BIT_BSSID3_8822B(x) \
- (((x) & BIT_MASK_BSSID3_8822B) << BIT_SHIFT_BSSID3_8822B)
-#define BIT_GET_BSSID3_8822B(x) \
- (((x) >> BIT_SHIFT_BSSID3_8822B) & BIT_MASK_BSSID3_8822B)
-
-/* 2 REG_MACID4_8822B (MAC ID4 REGISTER) */
-
-#define BIT_SHIFT_MACID4_8822B 0
-#define BIT_MASK_MACID4_8822B 0xffffffffffffL
-#define BIT_MACID4_8822B(x) \
- (((x) & BIT_MASK_MACID4_8822B) << BIT_SHIFT_MACID4_8822B)
-#define BIT_GET_MACID4_8822B(x) \
- (((x) >> BIT_SHIFT_MACID4_8822B) & BIT_MASK_MACID4_8822B)
-
-/* 2 REG_BSSID4_8822B (BSSID4 REGISTER) */
-
-#define BIT_SHIFT_BSSID4_8822B 0
-#define BIT_MASK_BSSID4_8822B 0xffffffffffffL
-#define BIT_BSSID4_8822B(x) \
- (((x) & BIT_MASK_BSSID4_8822B) << BIT_SHIFT_BSSID4_8822B)
-#define BIT_GET_BSSID4_8822B(x) \
- (((x) >> BIT_SHIFT_BSSID4_8822B) & BIT_MASK_BSSID4_8822B)
-
-/* 2 REG_NOA_REPORT_8822B */
-
-/* 2 REG_PWRBIT_SETTING_8822B */
-#define BIT_CLI3_PWRBIT_OW_EN_8822B BIT(7)
-#define BIT_CLI3_PWR_ST_8822B BIT(6)
-#define BIT_CLI2_PWRBIT_OW_EN_8822B BIT(5)
-#define BIT_CLI2_PWR_ST_8822B BIT(4)
-#define BIT_CLI1_PWRBIT_OW_EN_8822B BIT(3)
-#define BIT_CLI1_PWR_ST_8822B BIT(2)
-#define BIT_CLI0_PWRBIT_OW_EN_8822B BIT(1)
-#define BIT_CLI0_PWR_ST_8822B BIT(0)
-
-/* 2 REG_WMAC_MU_BF_OPTION_8822B */
-#define BIT_WMAC_RESP_NONSTA1_DIS_8822B BIT(7)
-#define BIT_BIT_WMAC_TXMU_ACKPOLICY_EN_8822B BIT(6)
-
-#define BIT_SHIFT_WMAC_TXMU_ACKPOLICY_8822B 4
-#define BIT_MASK_WMAC_TXMU_ACKPOLICY_8822B 0x3
-#define BIT_WMAC_TXMU_ACKPOLICY_8822B(x) \
- (((x) & BIT_MASK_WMAC_TXMU_ACKPOLICY_8822B) \
- << BIT_SHIFT_WMAC_TXMU_ACKPOLICY_8822B)
-#define BIT_GET_WMAC_TXMU_ACKPOLICY_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_TXMU_ACKPOLICY_8822B) & \
- BIT_MASK_WMAC_TXMU_ACKPOLICY_8822B)
-
-#define BIT_SHIFT_WMAC_MU_BFEE_PORT_SEL_8822B 1
-#define BIT_MASK_WMAC_MU_BFEE_PORT_SEL_8822B 0x7
-#define BIT_WMAC_MU_BFEE_PORT_SEL_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE_PORT_SEL_8822B) \
- << BIT_SHIFT_WMAC_MU_BFEE_PORT_SEL_8822B)
-#define BIT_GET_WMAC_MU_BFEE_PORT_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE_PORT_SEL_8822B) & \
- BIT_MASK_WMAC_MU_BFEE_PORT_SEL_8822B)
-
-#define BIT_WMAC_MU_BFEE_DIS_8822B BIT(0)
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_WMAC_PAUSE_BB_CLR_TH_8822B 0
-#define BIT_MASK_WMAC_PAUSE_BB_CLR_TH_8822B 0xff
-#define BIT_WMAC_PAUSE_BB_CLR_TH_8822B(x) \
- (((x) & BIT_MASK_WMAC_PAUSE_BB_CLR_TH_8822B) \
- << BIT_SHIFT_WMAC_PAUSE_BB_CLR_TH_8822B)
-#define BIT_GET_WMAC_PAUSE_BB_CLR_TH_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_PAUSE_BB_CLR_TH_8822B) & \
- BIT_MASK_WMAC_PAUSE_BB_CLR_TH_8822B)
-
-/* 2 REG_WMAC_MU_ARB_8822B */
-#define BIT_WMAC_ARB_HW_ADAPT_EN_8822B BIT(7)
-#define BIT_WMAC_ARB_SW_EN_8822B BIT(6)
-
-#define BIT_SHIFT_WMAC_ARB_SW_STATE_8822B 0
-#define BIT_MASK_WMAC_ARB_SW_STATE_8822B 0x3f
-#define BIT_WMAC_ARB_SW_STATE_8822B(x) \
- (((x) & BIT_MASK_WMAC_ARB_SW_STATE_8822B) \
- << BIT_SHIFT_WMAC_ARB_SW_STATE_8822B)
-#define BIT_GET_WMAC_ARB_SW_STATE_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_ARB_SW_STATE_8822B) & \
- BIT_MASK_WMAC_ARB_SW_STATE_8822B)
-
-/* 2 REG_WMAC_MU_OPTION_8822B */
-
-#define BIT_SHIFT_WMAC_MU_DBGSEL_8822B 5
-#define BIT_MASK_WMAC_MU_DBGSEL_8822B 0x3
-#define BIT_WMAC_MU_DBGSEL_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_DBGSEL_8822B) \
- << BIT_SHIFT_WMAC_MU_DBGSEL_8822B)
-#define BIT_GET_WMAC_MU_DBGSEL_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_DBGSEL_8822B) & \
- BIT_MASK_WMAC_MU_DBGSEL_8822B)
-
-#define BIT_SHIFT_WMAC_MU_CPRD_TIMEOUT_8822B 0
-#define BIT_MASK_WMAC_MU_CPRD_TIMEOUT_8822B 0x1f
-#define BIT_WMAC_MU_CPRD_TIMEOUT_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_CPRD_TIMEOUT_8822B) \
- << BIT_SHIFT_WMAC_MU_CPRD_TIMEOUT_8822B)
-#define BIT_GET_WMAC_MU_CPRD_TIMEOUT_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_CPRD_TIMEOUT_8822B) & \
- BIT_MASK_WMAC_MU_CPRD_TIMEOUT_8822B)
-
-/* 2 REG_WMAC_MU_BF_CTL_8822B */
-#define BIT_WMAC_INVLD_BFPRT_CHK_8822B BIT(15)
-#define BIT_WMAC_RETXBFRPTSEQ_UPD_8822B BIT(14)
-
-#define BIT_SHIFT_WMAC_MU_BFRPTSEG_SEL_8822B 12
-#define BIT_MASK_WMAC_MU_BFRPTSEG_SEL_8822B 0x3
-#define BIT_WMAC_MU_BFRPTSEG_SEL_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_BFRPTSEG_SEL_8822B) \
- << BIT_SHIFT_WMAC_MU_BFRPTSEG_SEL_8822B)
-#define BIT_GET_WMAC_MU_BFRPTSEG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFRPTSEG_SEL_8822B) & \
- BIT_MASK_WMAC_MU_BFRPTSEG_SEL_8822B)
-
-#define BIT_SHIFT_WMAC_MU_BF_MYAID_8822B 0
-#define BIT_MASK_WMAC_MU_BF_MYAID_8822B 0xfff
-#define BIT_WMAC_MU_BF_MYAID_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_BF_MYAID_8822B) \
- << BIT_SHIFT_WMAC_MU_BF_MYAID_8822B)
-#define BIT_GET_WMAC_MU_BF_MYAID_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BF_MYAID_8822B) & \
- BIT_MASK_WMAC_MU_BF_MYAID_8822B)
-
-/* 2 REG_WMAC_MU_BFRPT_PARA_8822B */
-
-#define BIT_SHIFT_BIT_BFRPT_PARA_USERID_SEL_8822B 12
-#define BIT_MASK_BIT_BFRPT_PARA_USERID_SEL_8822B 0x7
-#define BIT_BIT_BFRPT_PARA_USERID_SEL_8822B(x) \
- (((x) & BIT_MASK_BIT_BFRPT_PARA_USERID_SEL_8822B) \
- << BIT_SHIFT_BIT_BFRPT_PARA_USERID_SEL_8822B)
-#define BIT_GET_BIT_BFRPT_PARA_USERID_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_BIT_BFRPT_PARA_USERID_SEL_8822B) & \
- BIT_MASK_BIT_BFRPT_PARA_USERID_SEL_8822B)
-
-#define BIT_SHIFT_BFRPT_PARA_8822B 0
-#define BIT_MASK_BFRPT_PARA_8822B 0xfff
-#define BIT_BFRPT_PARA_8822B(x) \
- (((x) & BIT_MASK_BFRPT_PARA_8822B) << BIT_SHIFT_BFRPT_PARA_8822B)
-#define BIT_GET_BFRPT_PARA_8822B(x) \
- (((x) >> BIT_SHIFT_BFRPT_PARA_8822B) & BIT_MASK_BFRPT_PARA_8822B)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE2_8822B */
-#define BIT_STATUS_BFEE2_8822B BIT(10)
-#define BIT_WMAC_MU_BFEE2_EN_8822B BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE2_AID_8822B 0
-#define BIT_MASK_WMAC_MU_BFEE2_AID_8822B 0x1ff
-#define BIT_WMAC_MU_BFEE2_AID_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE2_AID_8822B) \
- << BIT_SHIFT_WMAC_MU_BFEE2_AID_8822B)
-#define BIT_GET_WMAC_MU_BFEE2_AID_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE2_AID_8822B) & \
- BIT_MASK_WMAC_MU_BFEE2_AID_8822B)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE3_8822B */
-#define BIT_STATUS_BFEE3_8822B BIT(10)
-#define BIT_WMAC_MU_BFEE3_EN_8822B BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE3_AID_8822B 0
-#define BIT_MASK_WMAC_MU_BFEE3_AID_8822B 0x1ff
-#define BIT_WMAC_MU_BFEE3_AID_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE3_AID_8822B) \
- << BIT_SHIFT_WMAC_MU_BFEE3_AID_8822B)
-#define BIT_GET_WMAC_MU_BFEE3_AID_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE3_AID_8822B) & \
- BIT_MASK_WMAC_MU_BFEE3_AID_8822B)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE4_8822B */
-#define BIT_STATUS_BFEE4_8822B BIT(10)
-#define BIT_WMAC_MU_BFEE4_EN_8822B BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE4_AID_8822B 0
-#define BIT_MASK_WMAC_MU_BFEE4_AID_8822B 0x1ff
-#define BIT_WMAC_MU_BFEE4_AID_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE4_AID_8822B) \
- << BIT_SHIFT_WMAC_MU_BFEE4_AID_8822B)
-#define BIT_GET_WMAC_MU_BFEE4_AID_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE4_AID_8822B) & \
- BIT_MASK_WMAC_MU_BFEE4_AID_8822B)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE5_8822B */
-#define BIT_STATUS_BFEE5_8822B BIT(10)
-#define BIT_WMAC_MU_BFEE5_EN_8822B BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE5_AID_8822B 0
-#define BIT_MASK_WMAC_MU_BFEE5_AID_8822B 0x1ff
-#define BIT_WMAC_MU_BFEE5_AID_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE5_AID_8822B) \
- << BIT_SHIFT_WMAC_MU_BFEE5_AID_8822B)
-#define BIT_GET_WMAC_MU_BFEE5_AID_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE5_AID_8822B) & \
- BIT_MASK_WMAC_MU_BFEE5_AID_8822B)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE6_8822B */
-#define BIT_STATUS_BFEE6_8822B BIT(10)
-#define BIT_WMAC_MU_BFEE6_EN_8822B BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE6_AID_8822B 0
-#define BIT_MASK_WMAC_MU_BFEE6_AID_8822B 0x1ff
-#define BIT_WMAC_MU_BFEE6_AID_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE6_AID_8822B) \
- << BIT_SHIFT_WMAC_MU_BFEE6_AID_8822B)
-#define BIT_GET_WMAC_MU_BFEE6_AID_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE6_AID_8822B) & \
- BIT_MASK_WMAC_MU_BFEE6_AID_8822B)
-
-/* 2 REG_WMAC_ASSOCIATED_MU_BFMEE7_8822B */
-#define BIT_BIT_STATUS_BFEE4_8822B BIT(10)
-#define BIT_WMAC_MU_BFEE7_EN_8822B BIT(9)
-
-#define BIT_SHIFT_WMAC_MU_BFEE7_AID_8822B 0
-#define BIT_MASK_WMAC_MU_BFEE7_AID_8822B 0x1ff
-#define BIT_WMAC_MU_BFEE7_AID_8822B(x) \
- (((x) & BIT_MASK_WMAC_MU_BFEE7_AID_8822B) \
- << BIT_SHIFT_WMAC_MU_BFEE7_AID_8822B)
-#define BIT_GET_WMAC_MU_BFEE7_AID_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_MU_BFEE7_AID_8822B) & \
- BIT_MASK_WMAC_MU_BFEE7_AID_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-#define BIT_RST_ALL_COUNTER_8822B BIT(31)
-
-#define BIT_SHIFT_ABORT_RX_VBON_COUNTER_8822B 16
-#define BIT_MASK_ABORT_RX_VBON_COUNTER_8822B 0xff
-#define BIT_ABORT_RX_VBON_COUNTER_8822B(x) \
- (((x) & BIT_MASK_ABORT_RX_VBON_COUNTER_8822B) \
- << BIT_SHIFT_ABORT_RX_VBON_COUNTER_8822B)
-#define BIT_GET_ABORT_RX_VBON_COUNTER_8822B(x) \
- (((x) >> BIT_SHIFT_ABORT_RX_VBON_COUNTER_8822B) & \
- BIT_MASK_ABORT_RX_VBON_COUNTER_8822B)
-
-#define BIT_SHIFT_ABORT_RX_RDRDY_COUNTER_8822B 8
-#define BIT_MASK_ABORT_RX_RDRDY_COUNTER_8822B 0xff
-#define BIT_ABORT_RX_RDRDY_COUNTER_8822B(x) \
- (((x) & BIT_MASK_ABORT_RX_RDRDY_COUNTER_8822B) \
- << BIT_SHIFT_ABORT_RX_RDRDY_COUNTER_8822B)
-#define BIT_GET_ABORT_RX_RDRDY_COUNTER_8822B(x) \
- (((x) >> BIT_SHIFT_ABORT_RX_RDRDY_COUNTER_8822B) & \
- BIT_MASK_ABORT_RX_RDRDY_COUNTER_8822B)
-
-#define BIT_SHIFT_VBON_EARLY_FALLING_COUNTER_8822B 0
-#define BIT_MASK_VBON_EARLY_FALLING_COUNTER_8822B 0xff
-#define BIT_VBON_EARLY_FALLING_COUNTER_8822B(x) \
- (((x) & BIT_MASK_VBON_EARLY_FALLING_COUNTER_8822B) \
- << BIT_SHIFT_VBON_EARLY_FALLING_COUNTER_8822B)
-#define BIT_GET_VBON_EARLY_FALLING_COUNTER_8822B(x) \
- (((x) >> BIT_SHIFT_VBON_EARLY_FALLING_COUNTER_8822B) & \
- BIT_MASK_VBON_EARLY_FALLING_COUNTER_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-#define BIT_WMAC_PLCP_TRX_SEL_8822B BIT(31)
-
-#define BIT_SHIFT_WMAC_PLCP_RDSIG_SEL_8822B 28
-#define BIT_MASK_WMAC_PLCP_RDSIG_SEL_8822B 0x7
-#define BIT_WMAC_PLCP_RDSIG_SEL_8822B(x) \
- (((x) & BIT_MASK_WMAC_PLCP_RDSIG_SEL_8822B) \
- << BIT_SHIFT_WMAC_PLCP_RDSIG_SEL_8822B)
-#define BIT_GET_WMAC_PLCP_RDSIG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_PLCP_RDSIG_SEL_8822B) & \
- BIT_MASK_WMAC_PLCP_RDSIG_SEL_8822B)
-
-#define BIT_SHIFT_WMAC_RATE_IDX_8822B 24
-#define BIT_MASK_WMAC_RATE_IDX_8822B 0xf
-#define BIT_WMAC_RATE_IDX_8822B(x) \
- (((x) & BIT_MASK_WMAC_RATE_IDX_8822B) << BIT_SHIFT_WMAC_RATE_IDX_8822B)
-#define BIT_GET_WMAC_RATE_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_RATE_IDX_8822B) & BIT_MASK_WMAC_RATE_IDX_8822B)
-
-#define BIT_SHIFT_WMAC_PLCP_RDSIG_8822B 0
-#define BIT_MASK_WMAC_PLCP_RDSIG_8822B 0xffffff
-#define BIT_WMAC_PLCP_RDSIG_8822B(x) \
- (((x) & BIT_MASK_WMAC_PLCP_RDSIG_8822B) \
- << BIT_SHIFT_WMAC_PLCP_RDSIG_8822B)
-#define BIT_GET_WMAC_PLCP_RDSIG_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_PLCP_RDSIG_8822B) & \
- BIT_MASK_WMAC_PLCP_RDSIG_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-#define BIT_WMAC_MUTX_IDX_8822B BIT(24)
-
-#define BIT_SHIFT_WMAC_PLCP_RDSIG_8822B 0
-#define BIT_MASK_WMAC_PLCP_RDSIG_8822B 0xffffff
-#define BIT_WMAC_PLCP_RDSIG_8822B(x) \
- (((x) & BIT_MASK_WMAC_PLCP_RDSIG_8822B) \
- << BIT_SHIFT_WMAC_PLCP_RDSIG_8822B)
-#define BIT_GET_WMAC_PLCP_RDSIG_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_PLCP_RDSIG_8822B) & \
- BIT_MASK_WMAC_PLCP_RDSIG_8822B)
-
-/* 2 REG_TRANSMIT_ADDRSS_0_8822B (TA0 REGISTER) */
-
-#define BIT_SHIFT_TA0_8822B 0
-#define BIT_MASK_TA0_8822B 0xffffffffffffL
-#define BIT_TA0_8822B(x) (((x) & BIT_MASK_TA0_8822B) << BIT_SHIFT_TA0_8822B)
-#define BIT_GET_TA0_8822B(x) (((x) >> BIT_SHIFT_TA0_8822B) & BIT_MASK_TA0_8822B)
-
-/* 2 REG_TRANSMIT_ADDRSS_1_8822B (TA1 REGISTER) */
-
-#define BIT_SHIFT_TA1_8822B 0
-#define BIT_MASK_TA1_8822B 0xffffffffffffL
-#define BIT_TA1_8822B(x) (((x) & BIT_MASK_TA1_8822B) << BIT_SHIFT_TA1_8822B)
-#define BIT_GET_TA1_8822B(x) (((x) >> BIT_SHIFT_TA1_8822B) & BIT_MASK_TA1_8822B)
-
-/* 2 REG_TRANSMIT_ADDRSS_2_8822B (TA2 REGISTER) */
-
-#define BIT_SHIFT_TA2_8822B 0
-#define BIT_MASK_TA2_8822B 0xffffffffffffL
-#define BIT_TA2_8822B(x) (((x) & BIT_MASK_TA2_8822B) << BIT_SHIFT_TA2_8822B)
-#define BIT_GET_TA2_8822B(x) (((x) >> BIT_SHIFT_TA2_8822B) & BIT_MASK_TA2_8822B)
-
-/* 2 REG_TRANSMIT_ADDRSS_3_8822B (TA3 REGISTER) */
-
-#define BIT_SHIFT_TA3_8822B 0
-#define BIT_MASK_TA3_8822B 0xffffffffffffL
-#define BIT_TA3_8822B(x) (((x) & BIT_MASK_TA3_8822B) << BIT_SHIFT_TA3_8822B)
-#define BIT_GET_TA3_8822B(x) (((x) >> BIT_SHIFT_TA3_8822B) & BIT_MASK_TA3_8822B)
-
-/* 2 REG_TRANSMIT_ADDRSS_4_8822B (TA4 REGISTER) */
-
-#define BIT_SHIFT_TA4_8822B 0
-#define BIT_MASK_TA4_8822B 0xffffffffffffL
-#define BIT_TA4_8822B(x) (((x) & BIT_MASK_TA4_8822B) << BIT_SHIFT_TA4_8822B)
-#define BIT_GET_TA4_8822B(x) (((x) >> BIT_SHIFT_TA4_8822B) & BIT_MASK_TA4_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_MACID1_8822B */
-
-#define BIT_SHIFT_MACID1_8822B 0
-#define BIT_MASK_MACID1_8822B 0xffffffffffffL
-#define BIT_MACID1_8822B(x) \
- (((x) & BIT_MASK_MACID1_8822B) << BIT_SHIFT_MACID1_8822B)
-#define BIT_GET_MACID1_8822B(x) \
- (((x) >> BIT_SHIFT_MACID1_8822B) & BIT_MASK_MACID1_8822B)
-
-/* 2 REG_BSSID1_8822B */
-
-#define BIT_SHIFT_BSSID1_8822B 0
-#define BIT_MASK_BSSID1_8822B 0xffffffffffffL
-#define BIT_BSSID1_8822B(x) \
- (((x) & BIT_MASK_BSSID1_8822B) << BIT_SHIFT_BSSID1_8822B)
-#define BIT_GET_BSSID1_8822B(x) \
- (((x) >> BIT_SHIFT_BSSID1_8822B) & BIT_MASK_BSSID1_8822B)
-
-/* 2 REG_BCN_PSR_RPT1_8822B */
-
-#define BIT_SHIFT_DTIM_CNT1_8822B 24
-#define BIT_MASK_DTIM_CNT1_8822B 0xff
-#define BIT_DTIM_CNT1_8822B(x) \
- (((x) & BIT_MASK_DTIM_CNT1_8822B) << BIT_SHIFT_DTIM_CNT1_8822B)
-#define BIT_GET_DTIM_CNT1_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_CNT1_8822B) & BIT_MASK_DTIM_CNT1_8822B)
-
-#define BIT_SHIFT_DTIM_PERIOD1_8822B 16
-#define BIT_MASK_DTIM_PERIOD1_8822B 0xff
-#define BIT_DTIM_PERIOD1_8822B(x) \
- (((x) & BIT_MASK_DTIM_PERIOD1_8822B) << BIT_SHIFT_DTIM_PERIOD1_8822B)
-#define BIT_GET_DTIM_PERIOD1_8822B(x) \
- (((x) >> BIT_SHIFT_DTIM_PERIOD1_8822B) & BIT_MASK_DTIM_PERIOD1_8822B)
-
-#define BIT_DTIM1_8822B BIT(15)
-#define BIT_TIM1_8822B BIT(14)
-
-#define BIT_SHIFT_PS_AID_1_8822B 0
-#define BIT_MASK_PS_AID_1_8822B 0x7ff
-#define BIT_PS_AID_1_8822B(x) \
- (((x) & BIT_MASK_PS_AID_1_8822B) << BIT_SHIFT_PS_AID_1_8822B)
-#define BIT_GET_PS_AID_1_8822B(x) \
- (((x) >> BIT_SHIFT_PS_AID_1_8822B) & BIT_MASK_PS_AID_1_8822B)
-
-/* 2 REG_ASSOCIATED_BFMEE_SEL_8822B */
-#define BIT_TXUSER_ID1_8822B BIT(25)
-
-#define BIT_SHIFT_AID1_8822B 16
-#define BIT_MASK_AID1_8822B 0x1ff
-#define BIT_AID1_8822B(x) (((x) & BIT_MASK_AID1_8822B) << BIT_SHIFT_AID1_8822B)
-#define BIT_GET_AID1_8822B(x) \
- (((x) >> BIT_SHIFT_AID1_8822B) & BIT_MASK_AID1_8822B)
-
-#define BIT_TXUSER_ID0_8822B BIT(9)
-
-#define BIT_SHIFT_AID0_8822B 0
-#define BIT_MASK_AID0_8822B 0x1ff
-#define BIT_AID0_8822B(x) (((x) & BIT_MASK_AID0_8822B) << BIT_SHIFT_AID0_8822B)
-#define BIT_GET_AID0_8822B(x) \
- (((x) >> BIT_SHIFT_AID0_8822B) & BIT_MASK_AID0_8822B)
-
-/* 2 REG_SND_PTCL_CTRL_8822B */
-
-#define BIT_SHIFT_NDP_RX_STANDBY_TIMER_8822B 24
-#define BIT_MASK_NDP_RX_STANDBY_TIMER_8822B 0xff
-#define BIT_NDP_RX_STANDBY_TIMER_8822B(x) \
- (((x) & BIT_MASK_NDP_RX_STANDBY_TIMER_8822B) \
- << BIT_SHIFT_NDP_RX_STANDBY_TIMER_8822B)
-#define BIT_GET_NDP_RX_STANDBY_TIMER_8822B(x) \
- (((x) >> BIT_SHIFT_NDP_RX_STANDBY_TIMER_8822B) & \
- BIT_MASK_NDP_RX_STANDBY_TIMER_8822B)
-
-#define BIT_SHIFT_CSI_RPT_OFFSET_HT_8822B 16
-#define BIT_MASK_CSI_RPT_OFFSET_HT_8822B 0xff
-#define BIT_CSI_RPT_OFFSET_HT_8822B(x) \
- (((x) & BIT_MASK_CSI_RPT_OFFSET_HT_8822B) \
- << BIT_SHIFT_CSI_RPT_OFFSET_HT_8822B)
-#define BIT_GET_CSI_RPT_OFFSET_HT_8822B(x) \
- (((x) >> BIT_SHIFT_CSI_RPT_OFFSET_HT_8822B) & \
- BIT_MASK_CSI_RPT_OFFSET_HT_8822B)
-
-#define BIT_SHIFT_R_WMAC_VHT_CATEGORY_8822B 8
-#define BIT_MASK_R_WMAC_VHT_CATEGORY_8822B 0xff
-#define BIT_R_WMAC_VHT_CATEGORY_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_VHT_CATEGORY_8822B) \
- << BIT_SHIFT_R_WMAC_VHT_CATEGORY_8822B)
-#define BIT_GET_R_WMAC_VHT_CATEGORY_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_VHT_CATEGORY_8822B) & \
- BIT_MASK_R_WMAC_VHT_CATEGORY_8822B)
-
-#define BIT_R_WMAC_USE_NSTS_8822B BIT(7)
-#define BIT_R_DISABLE_CHECK_VHTSIGB_CRC_8822B BIT(6)
-#define BIT_R_DISABLE_CHECK_VHTSIGA_CRC_8822B BIT(5)
-#define BIT_R_WMAC_BFPARAM_SEL_8822B BIT(4)
-#define BIT_R_WMAC_CSISEQ_SEL_8822B BIT(3)
-#define BIT_R_WMAC_CSI_WITHHTC_EN_8822B BIT(2)
-#define BIT_R_WMAC_HT_NDPA_EN_8822B BIT(1)
-#define BIT_R_WMAC_VHT_NDPA_EN_8822B BIT(0)
-
-/* 2 REG_RX_CSI_RPT_INFO_8822B */
-
-/* 2 REG_NS_ARP_CTRL_8822B */
-#define BIT_R_WMAC_NSARP_RSPEN_8822B BIT(15)
-#define BIT_R_WMAC_NSARP_RARP_8822B BIT(9)
-#define BIT_R_WMAC_NSARP_RIPV6_8822B BIT(8)
-
-#define BIT_SHIFT_R_WMAC_NSARP_MODEN_8822B 6
-#define BIT_MASK_R_WMAC_NSARP_MODEN_8822B 0x3
-#define BIT_R_WMAC_NSARP_MODEN_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_NSARP_MODEN_8822B) \
- << BIT_SHIFT_R_WMAC_NSARP_MODEN_8822B)
-#define BIT_GET_R_WMAC_NSARP_MODEN_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_NSARP_MODEN_8822B) & \
- BIT_MASK_R_WMAC_NSARP_MODEN_8822B)
-
-#define BIT_SHIFT_R_WMAC_NSARP_RSPFTP_8822B 4
-#define BIT_MASK_R_WMAC_NSARP_RSPFTP_8822B 0x3
-#define BIT_R_WMAC_NSARP_RSPFTP_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_NSARP_RSPFTP_8822B) \
- << BIT_SHIFT_R_WMAC_NSARP_RSPFTP_8822B)
-#define BIT_GET_R_WMAC_NSARP_RSPFTP_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_NSARP_RSPFTP_8822B) & \
- BIT_MASK_R_WMAC_NSARP_RSPFTP_8822B)
-
-#define BIT_SHIFT_R_WMAC_NSARP_RSPSEC_8822B 0
-#define BIT_MASK_R_WMAC_NSARP_RSPSEC_8822B 0xf
-#define BIT_R_WMAC_NSARP_RSPSEC_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_NSARP_RSPSEC_8822B) \
- << BIT_SHIFT_R_WMAC_NSARP_RSPSEC_8822B)
-#define BIT_GET_R_WMAC_NSARP_RSPSEC_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_NSARP_RSPSEC_8822B) & \
- BIT_MASK_R_WMAC_NSARP_RSPSEC_8822B)
-
-/* 2 REG_NS_ARP_INFO_8822B */
-#define BIT_REQ_IS_MCNS_8822B BIT(23)
-#define BIT_REQ_IS_UCNS_8822B BIT(22)
-#define BIT_REQ_IS_USNS_8822B BIT(21)
-#define BIT_REQ_IS_ARP_8822B BIT(20)
-#define BIT_EXPRSP_MH_WITHQC_8822B BIT(19)
-
-#define BIT_SHIFT_EXPRSP_SECTYPE_8822B 16
-#define BIT_MASK_EXPRSP_SECTYPE_8822B 0x7
-#define BIT_EXPRSP_SECTYPE_8822B(x) \
- (((x) & BIT_MASK_EXPRSP_SECTYPE_8822B) \
- << BIT_SHIFT_EXPRSP_SECTYPE_8822B)
-#define BIT_GET_EXPRSP_SECTYPE_8822B(x) \
- (((x) >> BIT_SHIFT_EXPRSP_SECTYPE_8822B) & \
- BIT_MASK_EXPRSP_SECTYPE_8822B)
-
-#define BIT_SHIFT_EXPRSP_CHKSM_7_TO_0_8822B 8
-#define BIT_MASK_EXPRSP_CHKSM_7_TO_0_8822B 0xff
-#define BIT_EXPRSP_CHKSM_7_TO_0_8822B(x) \
- (((x) & BIT_MASK_EXPRSP_CHKSM_7_TO_0_8822B) \
- << BIT_SHIFT_EXPRSP_CHKSM_7_TO_0_8822B)
-#define BIT_GET_EXPRSP_CHKSM_7_TO_0_8822B(x) \
- (((x) >> BIT_SHIFT_EXPRSP_CHKSM_7_TO_0_8822B) & \
- BIT_MASK_EXPRSP_CHKSM_7_TO_0_8822B)
-
-#define BIT_SHIFT_EXPRSP_CHKSM_15_TO_8_8822B 0
-#define BIT_MASK_EXPRSP_CHKSM_15_TO_8_8822B 0xff
-#define BIT_EXPRSP_CHKSM_15_TO_8_8822B(x) \
- (((x) & BIT_MASK_EXPRSP_CHKSM_15_TO_8_8822B) \
- << BIT_SHIFT_EXPRSP_CHKSM_15_TO_8_8822B)
-#define BIT_GET_EXPRSP_CHKSM_15_TO_8_8822B(x) \
- (((x) >> BIT_SHIFT_EXPRSP_CHKSM_15_TO_8_8822B) & \
- BIT_MASK_EXPRSP_CHKSM_15_TO_8_8822B)
-
-/* 2 REG_BEAMFORMING_INFO_NSARP_V1_8822B */
-
-#define BIT_SHIFT_WMAC_ARPIP_8822B 0
-#define BIT_MASK_WMAC_ARPIP_8822B 0xffffffffL
-#define BIT_WMAC_ARPIP_8822B(x) \
- (((x) & BIT_MASK_WMAC_ARPIP_8822B) << BIT_SHIFT_WMAC_ARPIP_8822B)
-#define BIT_GET_WMAC_ARPIP_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_ARPIP_8822B) & BIT_MASK_WMAC_ARPIP_8822B)
-
-/* 2 REG_BEAMFORMING_INFO_NSARP_8822B */
-
-#define BIT_SHIFT_BEAMFORMING_INFO_8822B 0
-#define BIT_MASK_BEAMFORMING_INFO_8822B 0xffffffffL
-#define BIT_BEAMFORMING_INFO_8822B(x) \
- (((x) & BIT_MASK_BEAMFORMING_INFO_8822B) \
- << BIT_SHIFT_BEAMFORMING_INFO_8822B)
-#define BIT_GET_BEAMFORMING_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_BEAMFORMING_INFO_8822B) & \
- BIT_MASK_BEAMFORMING_INFO_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-#define BIT_SHIFT_R_WMAC_IPV6_MYIPAD_8822B 0
-#define BIT_MASK_R_WMAC_IPV6_MYIPAD_8822B 0xffffffffffffffffffffffffffffffffL
-#define BIT_R_WMAC_IPV6_MYIPAD_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_IPV6_MYIPAD_8822B) \
- << BIT_SHIFT_R_WMAC_IPV6_MYIPAD_8822B)
-#define BIT_GET_R_WMAC_IPV6_MYIPAD_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_IPV6_MYIPAD_8822B) & \
- BIT_MASK_R_WMAC_IPV6_MYIPAD_8822B)
-
-/* 2 REG_RSVD_0X740_8822B */
-
-/* 2 REG_WMAC_RTX_CTX_SUBTYPE_CFG_8822B */
-
-#define BIT_SHIFT_R_WMAC_CTX_SUBTYPE_8822B 4
-#define BIT_MASK_R_WMAC_CTX_SUBTYPE_8822B 0xf
-#define BIT_R_WMAC_CTX_SUBTYPE_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_CTX_SUBTYPE_8822B) \
- << BIT_SHIFT_R_WMAC_CTX_SUBTYPE_8822B)
-#define BIT_GET_R_WMAC_CTX_SUBTYPE_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_CTX_SUBTYPE_8822B) & \
- BIT_MASK_R_WMAC_CTX_SUBTYPE_8822B)
-
-#define BIT_SHIFT_R_WMAC_RTX_SUBTYPE_8822B 0
-#define BIT_MASK_R_WMAC_RTX_SUBTYPE_8822B 0xf
-#define BIT_R_WMAC_RTX_SUBTYPE_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_RTX_SUBTYPE_8822B) \
- << BIT_SHIFT_R_WMAC_RTX_SUBTYPE_8822B)
-#define BIT_GET_R_WMAC_RTX_SUBTYPE_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_RTX_SUBTYPE_8822B) & \
- BIT_MASK_R_WMAC_RTX_SUBTYPE_8822B)
-
-/* 2 REG_WMAC_SWAES_CFG_8822B */
-
-/* 2 REG_BT_COEX_V2_8822B */
-#define BIT_GNT_BT_POLARITY_8822B BIT(12)
-#define BIT_GNT_BT_BYPASS_PRIORITY_8822B BIT(8)
-
-#define BIT_SHIFT_TIMER_8822B 0
-#define BIT_MASK_TIMER_8822B 0xff
-#define BIT_TIMER_8822B(x) \
- (((x) & BIT_MASK_TIMER_8822B) << BIT_SHIFT_TIMER_8822B)
-#define BIT_GET_TIMER_8822B(x) \
- (((x) >> BIT_SHIFT_TIMER_8822B) & BIT_MASK_TIMER_8822B)
-
-/* 2 REG_BT_COEX_8822B */
-#define BIT_R_GNT_BT_RFC_SW_8822B BIT(12)
-#define BIT_R_GNT_BT_RFC_SW_EN_8822B BIT(11)
-#define BIT_R_GNT_BT_BB_SW_8822B BIT(10)
-#define BIT_R_GNT_BT_BB_SW_EN_8822B BIT(9)
-#define BIT_R_BT_CNT_THREN_8822B BIT(8)
-
-#define BIT_SHIFT_R_BT_CNT_THR_8822B 0
-#define BIT_MASK_R_BT_CNT_THR_8822B 0xff
-#define BIT_R_BT_CNT_THR_8822B(x) \
- (((x) & BIT_MASK_R_BT_CNT_THR_8822B) << BIT_SHIFT_R_BT_CNT_THR_8822B)
-#define BIT_GET_R_BT_CNT_THR_8822B(x) \
- (((x) >> BIT_SHIFT_R_BT_CNT_THR_8822B) & BIT_MASK_R_BT_CNT_THR_8822B)
-
-/* 2 REG_WLAN_ACT_MASK_CTRL_8822B */
-#define BIT_WLRX_TER_BY_CTL_8822B BIT(43)
-#define BIT_WLRX_TER_BY_AD_8822B BIT(42)
-#define BIT_ANT_DIVERSITY_SEL_8822B BIT(41)
-#define BIT_ANTSEL_FOR_BT_CTRL_EN_8822B BIT(40)
-#define BIT_WLACT_LOW_GNTWL_EN_8822B BIT(34)
-#define BIT_WLACT_HIGH_GNTBT_EN_8822B BIT(33)
-#define BIT_NAV_UPPER_V1_8822B BIT(32)
-
-#define BIT_SHIFT_RXMYRTS_NAV_V1_8822B 8
-#define BIT_MASK_RXMYRTS_NAV_V1_8822B 0xff
-#define BIT_RXMYRTS_NAV_V1_8822B(x) \
- (((x) & BIT_MASK_RXMYRTS_NAV_V1_8822B) \
- << BIT_SHIFT_RXMYRTS_NAV_V1_8822B)
-#define BIT_GET_RXMYRTS_NAV_V1_8822B(x) \
- (((x) >> BIT_SHIFT_RXMYRTS_NAV_V1_8822B) & \
- BIT_MASK_RXMYRTS_NAV_V1_8822B)
-
-#define BIT_SHIFT_RTSRST_V1_8822B 0
-#define BIT_MASK_RTSRST_V1_8822B 0xff
-#define BIT_RTSRST_V1_8822B(x) \
- (((x) & BIT_MASK_RTSRST_V1_8822B) << BIT_SHIFT_RTSRST_V1_8822B)
-#define BIT_GET_RTSRST_V1_8822B(x) \
- (((x) >> BIT_SHIFT_RTSRST_V1_8822B) & BIT_MASK_RTSRST_V1_8822B)
-
-/* 2 REG_BT_COEX_ENHANCED_INTR_CTRL_8822B */
-
-#define BIT_SHIFT_BT_STAT_DELAY_8822B 12
-#define BIT_MASK_BT_STAT_DELAY_8822B 0xf
-#define BIT_BT_STAT_DELAY_8822B(x) \
- (((x) & BIT_MASK_BT_STAT_DELAY_8822B) << BIT_SHIFT_BT_STAT_DELAY_8822B)
-#define BIT_GET_BT_STAT_DELAY_8822B(x) \
- (((x) >> BIT_SHIFT_BT_STAT_DELAY_8822B) & BIT_MASK_BT_STAT_DELAY_8822B)
-
-#define BIT_SHIFT_BT_TRX_INIT_DETECT_8822B 8
-#define BIT_MASK_BT_TRX_INIT_DETECT_8822B 0xf
-#define BIT_BT_TRX_INIT_DETECT_8822B(x) \
- (((x) & BIT_MASK_BT_TRX_INIT_DETECT_8822B) \
- << BIT_SHIFT_BT_TRX_INIT_DETECT_8822B)
-#define BIT_GET_BT_TRX_INIT_DETECT_8822B(x) \
- (((x) >> BIT_SHIFT_BT_TRX_INIT_DETECT_8822B) & \
- BIT_MASK_BT_TRX_INIT_DETECT_8822B)
-
-#define BIT_SHIFT_BT_PRI_DETECT_TO_8822B 4
-#define BIT_MASK_BT_PRI_DETECT_TO_8822B 0xf
-#define BIT_BT_PRI_DETECT_TO_8822B(x) \
- (((x) & BIT_MASK_BT_PRI_DETECT_TO_8822B) \
- << BIT_SHIFT_BT_PRI_DETECT_TO_8822B)
-#define BIT_GET_BT_PRI_DETECT_TO_8822B(x) \
- (((x) >> BIT_SHIFT_BT_PRI_DETECT_TO_8822B) & \
- BIT_MASK_BT_PRI_DETECT_TO_8822B)
-
-#define BIT_R_GRANTALL_WLMASK_8822B BIT(3)
-#define BIT_STATIS_BT_EN_8822B BIT(2)
-#define BIT_WL_ACT_MASK_ENABLE_8822B BIT(1)
-#define BIT_ENHANCED_BT_8822B BIT(0)
-
-/* 2 REG_BT_ACT_STATISTICS_8822B */
-
-#define BIT_SHIFT_STATIS_BT_LO_RX_8822B (48 & CPU_OPT_WIDTH)
-#define BIT_MASK_STATIS_BT_LO_RX_8822B 0xffff
-#define BIT_STATIS_BT_LO_RX_8822B(x) \
- (((x) & BIT_MASK_STATIS_BT_LO_RX_8822B) \
- << BIT_SHIFT_STATIS_BT_LO_RX_8822B)
-#define BIT_GET_STATIS_BT_LO_RX_8822B(x) \
- (((x) >> BIT_SHIFT_STATIS_BT_LO_RX_8822B) & \
- BIT_MASK_STATIS_BT_LO_RX_8822B)
-
-#define BIT_SHIFT_STATIS_BT_LO_TX_8822B (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_STATIS_BT_LO_TX_8822B 0xffff
-#define BIT_STATIS_BT_LO_TX_8822B(x) \
- (((x) & BIT_MASK_STATIS_BT_LO_TX_8822B) \
- << BIT_SHIFT_STATIS_BT_LO_TX_8822B)
-#define BIT_GET_STATIS_BT_LO_TX_8822B(x) \
- (((x) >> BIT_SHIFT_STATIS_BT_LO_TX_8822B) & \
- BIT_MASK_STATIS_BT_LO_TX_8822B)
-
-#define BIT_SHIFT_STATIS_BT_HI_RX_8822B 16
-#define BIT_MASK_STATIS_BT_HI_RX_8822B 0xffff
-#define BIT_STATIS_BT_HI_RX_8822B(x) \
- (((x) & BIT_MASK_STATIS_BT_HI_RX_8822B) \
- << BIT_SHIFT_STATIS_BT_HI_RX_8822B)
-#define BIT_GET_STATIS_BT_HI_RX_8822B(x) \
- (((x) >> BIT_SHIFT_STATIS_BT_HI_RX_8822B) & \
- BIT_MASK_STATIS_BT_HI_RX_8822B)
-
-#define BIT_SHIFT_STATIS_BT_HI_TX_8822B 0
-#define BIT_MASK_STATIS_BT_HI_TX_8822B 0xffff
-#define BIT_STATIS_BT_HI_TX_8822B(x) \
- (((x) & BIT_MASK_STATIS_BT_HI_TX_8822B) \
- << BIT_SHIFT_STATIS_BT_HI_TX_8822B)
-#define BIT_GET_STATIS_BT_HI_TX_8822B(x) \
- (((x) >> BIT_SHIFT_STATIS_BT_HI_TX_8822B) & \
- BIT_MASK_STATIS_BT_HI_TX_8822B)
-
-/* 2 REG_BT_STATISTICS_CONTROL_REGISTER_8822B */
-
-#define BIT_SHIFT_R_BT_CMD_RPT_8822B 16
-#define BIT_MASK_R_BT_CMD_RPT_8822B 0xffff
-#define BIT_R_BT_CMD_RPT_8822B(x) \
- (((x) & BIT_MASK_R_BT_CMD_RPT_8822B) << BIT_SHIFT_R_BT_CMD_RPT_8822B)
-#define BIT_GET_R_BT_CMD_RPT_8822B(x) \
- (((x) >> BIT_SHIFT_R_BT_CMD_RPT_8822B) & BIT_MASK_R_BT_CMD_RPT_8822B)
-
-#define BIT_SHIFT_R_RPT_FROM_BT_8822B 8
-#define BIT_MASK_R_RPT_FROM_BT_8822B 0xff
-#define BIT_R_RPT_FROM_BT_8822B(x) \
- (((x) & BIT_MASK_R_RPT_FROM_BT_8822B) << BIT_SHIFT_R_RPT_FROM_BT_8822B)
-#define BIT_GET_R_RPT_FROM_BT_8822B(x) \
- (((x) >> BIT_SHIFT_R_RPT_FROM_BT_8822B) & BIT_MASK_R_RPT_FROM_BT_8822B)
-
-#define BIT_SHIFT_BT_HID_ISR_SET_8822B 6
-#define BIT_MASK_BT_HID_ISR_SET_8822B 0x3
-#define BIT_BT_HID_ISR_SET_8822B(x) \
- (((x) & BIT_MASK_BT_HID_ISR_SET_8822B) \
- << BIT_SHIFT_BT_HID_ISR_SET_8822B)
-#define BIT_GET_BT_HID_ISR_SET_8822B(x) \
- (((x) >> BIT_SHIFT_BT_HID_ISR_SET_8822B) & \
- BIT_MASK_BT_HID_ISR_SET_8822B)
-
-#define BIT_TDMA_BT_START_NOTIFY_8822B BIT(5)
-#define BIT_ENABLE_TDMA_FW_MODE_8822B BIT(4)
-#define BIT_ENABLE_PTA_TDMA_MODE_8822B BIT(3)
-#define BIT_ENABLE_COEXIST_TAB_IN_TDMA_8822B BIT(2)
-#define BIT_GPIO2_GPIO3_EXANGE_OR_NO_BT_CCA_8822B BIT(1)
-#define BIT_RTK_BT_ENABLE_8822B BIT(0)
-
-/* 2 REG_BT_STATUS_REPORT_REGISTER_8822B */
-
-#define BIT_SHIFT_BT_PROFILE_8822B 24
-#define BIT_MASK_BT_PROFILE_8822B 0xff
-#define BIT_BT_PROFILE_8822B(x) \
- (((x) & BIT_MASK_BT_PROFILE_8822B) << BIT_SHIFT_BT_PROFILE_8822B)
-#define BIT_GET_BT_PROFILE_8822B(x) \
- (((x) >> BIT_SHIFT_BT_PROFILE_8822B) & BIT_MASK_BT_PROFILE_8822B)
-
-#define BIT_SHIFT_BT_POWER_8822B 16
-#define BIT_MASK_BT_POWER_8822B 0xff
-#define BIT_BT_POWER_8822B(x) \
- (((x) & BIT_MASK_BT_POWER_8822B) << BIT_SHIFT_BT_POWER_8822B)
-#define BIT_GET_BT_POWER_8822B(x) \
- (((x) >> BIT_SHIFT_BT_POWER_8822B) & BIT_MASK_BT_POWER_8822B)
-
-#define BIT_SHIFT_BT_PREDECT_STATUS_8822B 8
-#define BIT_MASK_BT_PREDECT_STATUS_8822B 0xff
-#define BIT_BT_PREDECT_STATUS_8822B(x) \
- (((x) & BIT_MASK_BT_PREDECT_STATUS_8822B) \
- << BIT_SHIFT_BT_PREDECT_STATUS_8822B)
-#define BIT_GET_BT_PREDECT_STATUS_8822B(x) \
- (((x) >> BIT_SHIFT_BT_PREDECT_STATUS_8822B) & \
- BIT_MASK_BT_PREDECT_STATUS_8822B)
-
-#define BIT_SHIFT_BT_CMD_INFO_8822B 0
-#define BIT_MASK_BT_CMD_INFO_8822B 0xff
-#define BIT_BT_CMD_INFO_8822B(x) \
- (((x) & BIT_MASK_BT_CMD_INFO_8822B) << BIT_SHIFT_BT_CMD_INFO_8822B)
-#define BIT_GET_BT_CMD_INFO_8822B(x) \
- (((x) >> BIT_SHIFT_BT_CMD_INFO_8822B) & BIT_MASK_BT_CMD_INFO_8822B)
-
-/* 2 REG_BT_INTERRUPT_CONTROL_REGISTER_8822B */
-#define BIT_EN_MAC_NULL_PKT_NOTIFY_8822B BIT(31)
-#define BIT_EN_WLAN_RPT_AND_BT_QUERY_8822B BIT(30)
-#define BIT_EN_BT_STSTUS_RPT_8822B BIT(29)
-#define BIT_EN_BT_POWER_8822B BIT(28)
-#define BIT_EN_BT_CHANNEL_8822B BIT(27)
-#define BIT_EN_BT_SLOT_CHANGE_8822B BIT(26)
-#define BIT_EN_BT_PROFILE_OR_HID_8822B BIT(25)
-#define BIT_WLAN_RPT_NOTIFY_8822B BIT(24)
-
-#define BIT_SHIFT_WLAN_RPT_DATA_8822B 16
-#define BIT_MASK_WLAN_RPT_DATA_8822B 0xff
-#define BIT_WLAN_RPT_DATA_8822B(x) \
- (((x) & BIT_MASK_WLAN_RPT_DATA_8822B) << BIT_SHIFT_WLAN_RPT_DATA_8822B)
-#define BIT_GET_WLAN_RPT_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_WLAN_RPT_DATA_8822B) & BIT_MASK_WLAN_RPT_DATA_8822B)
-
-#define BIT_SHIFT_CMD_ID_8822B 8
-#define BIT_MASK_CMD_ID_8822B 0xff
-#define BIT_CMD_ID_8822B(x) \
- (((x) & BIT_MASK_CMD_ID_8822B) << BIT_SHIFT_CMD_ID_8822B)
-#define BIT_GET_CMD_ID_8822B(x) \
- (((x) >> BIT_SHIFT_CMD_ID_8822B) & BIT_MASK_CMD_ID_8822B)
-
-#define BIT_SHIFT_BT_DATA_8822B 0
-#define BIT_MASK_BT_DATA_8822B 0xff
-#define BIT_BT_DATA_8822B(x) \
- (((x) & BIT_MASK_BT_DATA_8822B) << BIT_SHIFT_BT_DATA_8822B)
-#define BIT_GET_BT_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_BT_DATA_8822B) & BIT_MASK_BT_DATA_8822B)
-
-/* 2 REG_WLAN_REPORT_TIME_OUT_CONTROL_REGISTER_8822B */
-
-#define BIT_SHIFT_WLAN_RPT_TO_8822B 0
-#define BIT_MASK_WLAN_RPT_TO_8822B 0xff
-#define BIT_WLAN_RPT_TO_8822B(x) \
- (((x) & BIT_MASK_WLAN_RPT_TO_8822B) << BIT_SHIFT_WLAN_RPT_TO_8822B)
-#define BIT_GET_WLAN_RPT_TO_8822B(x) \
- (((x) >> BIT_SHIFT_WLAN_RPT_TO_8822B) & BIT_MASK_WLAN_RPT_TO_8822B)
-
-/* 2 REG_BT_ISOLATION_TABLE_REGISTER_REGISTER_8822B */
-
-#define BIT_SHIFT_ISOLATION_CHK_8822B 1
-#define BIT_MASK_ISOLATION_CHK_8822B 0x7fffffffffffffffffffL
-#define BIT_ISOLATION_CHK_8822B(x) \
- (((x) & BIT_MASK_ISOLATION_CHK_8822B) << BIT_SHIFT_ISOLATION_CHK_8822B)
-#define BIT_GET_ISOLATION_CHK_8822B(x) \
- (((x) >> BIT_SHIFT_ISOLATION_CHK_8822B) & BIT_MASK_ISOLATION_CHK_8822B)
-
-#define BIT_ISOLATION_EN_8822B BIT(0)
-
-/* 2 REG_BT_INTERRUPT_STATUS_REGISTER_8822B */
-#define BIT_BT_HID_ISR_8822B BIT(7)
-#define BIT_BT_QUERY_ISR_8822B BIT(6)
-#define BIT_MAC_NULL_PKT_NOTIFY_ISR_8822B BIT(5)
-#define BIT_WLAN_RPT_ISR_8822B BIT(4)
-#define BIT_BT_POWER_ISR_8822B BIT(3)
-#define BIT_BT_CHANNEL_ISR_8822B BIT(2)
-#define BIT_BT_SLOT_CHANGE_ISR_8822B BIT(1)
-#define BIT_BT_PROFILE_ISR_8822B BIT(0)
-
-/* 2 REG_BT_TDMA_TIME_REGISTER_8822B */
-
-#define BIT_SHIFT_BT_TIME_8822B 6
-#define BIT_MASK_BT_TIME_8822B 0x3ffffff
-#define BIT_BT_TIME_8822B(x) \
- (((x) & BIT_MASK_BT_TIME_8822B) << BIT_SHIFT_BT_TIME_8822B)
-#define BIT_GET_BT_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_BT_TIME_8822B) & BIT_MASK_BT_TIME_8822B)
-
-#define BIT_SHIFT_BT_RPT_SAMPLE_RATE_8822B 0
-#define BIT_MASK_BT_RPT_SAMPLE_RATE_8822B 0x3f
-#define BIT_BT_RPT_SAMPLE_RATE_8822B(x) \
- (((x) & BIT_MASK_BT_RPT_SAMPLE_RATE_8822B) \
- << BIT_SHIFT_BT_RPT_SAMPLE_RATE_8822B)
-#define BIT_GET_BT_RPT_SAMPLE_RATE_8822B(x) \
- (((x) >> BIT_SHIFT_BT_RPT_SAMPLE_RATE_8822B) & \
- BIT_MASK_BT_RPT_SAMPLE_RATE_8822B)
-
-/* 2 REG_BT_ACT_REGISTER_8822B */
-
-#define BIT_SHIFT_BT_EISR_EN_8822B 16
-#define BIT_MASK_BT_EISR_EN_8822B 0xff
-#define BIT_BT_EISR_EN_8822B(x) \
- (((x) & BIT_MASK_BT_EISR_EN_8822B) << BIT_SHIFT_BT_EISR_EN_8822B)
-#define BIT_GET_BT_EISR_EN_8822B(x) \
- (((x) >> BIT_SHIFT_BT_EISR_EN_8822B) & BIT_MASK_BT_EISR_EN_8822B)
-
-#define BIT_BT_ACT_FALLING_ISR_8822B BIT(10)
-#define BIT_BT_ACT_RISING_ISR_8822B BIT(9)
-#define BIT_TDMA_TO_ISR_8822B BIT(8)
-
-#define BIT_SHIFT_BT_CH_8822B 0
-#define BIT_MASK_BT_CH_8822B 0xff
-#define BIT_BT_CH_8822B(x) \
- (((x) & BIT_MASK_BT_CH_8822B) << BIT_SHIFT_BT_CH_8822B)
-#define BIT_GET_BT_CH_8822B(x) \
- (((x) >> BIT_SHIFT_BT_CH_8822B) & BIT_MASK_BT_CH_8822B)
-
-/* 2 REG_OBFF_CTRL_BASIC_8822B */
-#define BIT_OBFF_EN_V1_8822B BIT(31)
-
-#define BIT_SHIFT_OBFF_STATE_V1_8822B 28
-#define BIT_MASK_OBFF_STATE_V1_8822B 0x3
-#define BIT_OBFF_STATE_V1_8822B(x) \
- (((x) & BIT_MASK_OBFF_STATE_V1_8822B) << BIT_SHIFT_OBFF_STATE_V1_8822B)
-#define BIT_GET_OBFF_STATE_V1_8822B(x) \
- (((x) >> BIT_SHIFT_OBFF_STATE_V1_8822B) & BIT_MASK_OBFF_STATE_V1_8822B)
-
-#define BIT_OBFF_ACT_RXDMA_EN_8822B BIT(27)
-#define BIT_OBFF_BLOCK_INT_EN_8822B BIT(26)
-#define BIT_OBFF_AUTOACT_EN_8822B BIT(25)
-#define BIT_OBFF_AUTOIDLE_EN_8822B BIT(24)
-
-#define BIT_SHIFT_WAKE_MAX_PLS_8822B 20
-#define BIT_MASK_WAKE_MAX_PLS_8822B 0x7
-#define BIT_WAKE_MAX_PLS_8822B(x) \
- (((x) & BIT_MASK_WAKE_MAX_PLS_8822B) << BIT_SHIFT_WAKE_MAX_PLS_8822B)
-#define BIT_GET_WAKE_MAX_PLS_8822B(x) \
- (((x) >> BIT_SHIFT_WAKE_MAX_PLS_8822B) & BIT_MASK_WAKE_MAX_PLS_8822B)
-
-#define BIT_SHIFT_WAKE_MIN_PLS_8822B 16
-#define BIT_MASK_WAKE_MIN_PLS_8822B 0x7
-#define BIT_WAKE_MIN_PLS_8822B(x) \
- (((x) & BIT_MASK_WAKE_MIN_PLS_8822B) << BIT_SHIFT_WAKE_MIN_PLS_8822B)
-#define BIT_GET_WAKE_MIN_PLS_8822B(x) \
- (((x) >> BIT_SHIFT_WAKE_MIN_PLS_8822B) & BIT_MASK_WAKE_MIN_PLS_8822B)
-
-#define BIT_SHIFT_WAKE_MAX_F2F_8822B 12
-#define BIT_MASK_WAKE_MAX_F2F_8822B 0x7
-#define BIT_WAKE_MAX_F2F_8822B(x) \
- (((x) & BIT_MASK_WAKE_MAX_F2F_8822B) << BIT_SHIFT_WAKE_MAX_F2F_8822B)
-#define BIT_GET_WAKE_MAX_F2F_8822B(x) \
- (((x) >> BIT_SHIFT_WAKE_MAX_F2F_8822B) & BIT_MASK_WAKE_MAX_F2F_8822B)
-
-#define BIT_SHIFT_WAKE_MIN_F2F_8822B 8
-#define BIT_MASK_WAKE_MIN_F2F_8822B 0x7
-#define BIT_WAKE_MIN_F2F_8822B(x) \
- (((x) & BIT_MASK_WAKE_MIN_F2F_8822B) << BIT_SHIFT_WAKE_MIN_F2F_8822B)
-#define BIT_GET_WAKE_MIN_F2F_8822B(x) \
- (((x) >> BIT_SHIFT_WAKE_MIN_F2F_8822B) & BIT_MASK_WAKE_MIN_F2F_8822B)
-
-#define BIT_APP_CPU_ACT_V1_8822B BIT(3)
-#define BIT_APP_OBFF_V1_8822B BIT(2)
-#define BIT_APP_IDLE_V1_8822B BIT(1)
-#define BIT_APP_INIT_V1_8822B BIT(0)
-
-/* 2 REG_OBFF_CTRL2_TIMER_8822B */
-
-#define BIT_SHIFT_RX_HIGH_TIMER_IDX_8822B 24
-#define BIT_MASK_RX_HIGH_TIMER_IDX_8822B 0x7
-#define BIT_RX_HIGH_TIMER_IDX_8822B(x) \
- (((x) & BIT_MASK_RX_HIGH_TIMER_IDX_8822B) \
- << BIT_SHIFT_RX_HIGH_TIMER_IDX_8822B)
-#define BIT_GET_RX_HIGH_TIMER_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RX_HIGH_TIMER_IDX_8822B) & \
- BIT_MASK_RX_HIGH_TIMER_IDX_8822B)
-
-#define BIT_SHIFT_RX_MED_TIMER_IDX_8822B 16
-#define BIT_MASK_RX_MED_TIMER_IDX_8822B 0x7
-#define BIT_RX_MED_TIMER_IDX_8822B(x) \
- (((x) & BIT_MASK_RX_MED_TIMER_IDX_8822B) \
- << BIT_SHIFT_RX_MED_TIMER_IDX_8822B)
-#define BIT_GET_RX_MED_TIMER_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RX_MED_TIMER_IDX_8822B) & \
- BIT_MASK_RX_MED_TIMER_IDX_8822B)
-
-#define BIT_SHIFT_RX_LOW_TIMER_IDX_8822B 8
-#define BIT_MASK_RX_LOW_TIMER_IDX_8822B 0x7
-#define BIT_RX_LOW_TIMER_IDX_8822B(x) \
- (((x) & BIT_MASK_RX_LOW_TIMER_IDX_8822B) \
- << BIT_SHIFT_RX_LOW_TIMER_IDX_8822B)
-#define BIT_GET_RX_LOW_TIMER_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RX_LOW_TIMER_IDX_8822B) & \
- BIT_MASK_RX_LOW_TIMER_IDX_8822B)
-
-#define BIT_SHIFT_OBFF_INT_TIMER_IDX_8822B 0
-#define BIT_MASK_OBFF_INT_TIMER_IDX_8822B 0x7
-#define BIT_OBFF_INT_TIMER_IDX_8822B(x) \
- (((x) & BIT_MASK_OBFF_INT_TIMER_IDX_8822B) \
- << BIT_SHIFT_OBFF_INT_TIMER_IDX_8822B)
-#define BIT_GET_OBFF_INT_TIMER_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_OBFF_INT_TIMER_IDX_8822B) & \
- BIT_MASK_OBFF_INT_TIMER_IDX_8822B)
-
-/* 2 REG_LTR_CTRL_BASIC_8822B */
-#define BIT_LTR_EN_V1_8822B BIT(31)
-#define BIT_LTR_HW_EN_V1_8822B BIT(30)
-#define BIT_LRT_ACT_CTS_EN_8822B BIT(29)
-#define BIT_LTR_ACT_RXPKT_EN_8822B BIT(28)
-#define BIT_LTR_ACT_RXDMA_EN_8822B BIT(27)
-#define BIT_LTR_IDLE_NO_SNOOP_8822B BIT(26)
-#define BIT_SPDUP_MGTPKT_8822B BIT(25)
-#define BIT_RX_AGG_EN_8822B BIT(24)
-#define BIT_APP_LTR_ACT_8822B BIT(23)
-#define BIT_APP_LTR_IDLE_8822B BIT(22)
-
-#define BIT_SHIFT_HIGH_RATE_TRIG_SEL_8822B 20
-#define BIT_MASK_HIGH_RATE_TRIG_SEL_8822B 0x3
-#define BIT_HIGH_RATE_TRIG_SEL_8822B(x) \
- (((x) & BIT_MASK_HIGH_RATE_TRIG_SEL_8822B) \
- << BIT_SHIFT_HIGH_RATE_TRIG_SEL_8822B)
-#define BIT_GET_HIGH_RATE_TRIG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_HIGH_RATE_TRIG_SEL_8822B) & \
- BIT_MASK_HIGH_RATE_TRIG_SEL_8822B)
-
-#define BIT_SHIFT_MED_RATE_TRIG_SEL_8822B 18
-#define BIT_MASK_MED_RATE_TRIG_SEL_8822B 0x3
-#define BIT_MED_RATE_TRIG_SEL_8822B(x) \
- (((x) & BIT_MASK_MED_RATE_TRIG_SEL_8822B) \
- << BIT_SHIFT_MED_RATE_TRIG_SEL_8822B)
-#define BIT_GET_MED_RATE_TRIG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_MED_RATE_TRIG_SEL_8822B) & \
- BIT_MASK_MED_RATE_TRIG_SEL_8822B)
-
-#define BIT_SHIFT_LOW_RATE_TRIG_SEL_8822B 16
-#define BIT_MASK_LOW_RATE_TRIG_SEL_8822B 0x3
-#define BIT_LOW_RATE_TRIG_SEL_8822B(x) \
- (((x) & BIT_MASK_LOW_RATE_TRIG_SEL_8822B) \
- << BIT_SHIFT_LOW_RATE_TRIG_SEL_8822B)
-#define BIT_GET_LOW_RATE_TRIG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_LOW_RATE_TRIG_SEL_8822B) & \
- BIT_MASK_LOW_RATE_TRIG_SEL_8822B)
-
-#define BIT_SHIFT_HIGH_RATE_BD_IDX_8822B 8
-#define BIT_MASK_HIGH_RATE_BD_IDX_8822B 0x7f
-#define BIT_HIGH_RATE_BD_IDX_8822B(x) \
- (((x) & BIT_MASK_HIGH_RATE_BD_IDX_8822B) \
- << BIT_SHIFT_HIGH_RATE_BD_IDX_8822B)
-#define BIT_GET_HIGH_RATE_BD_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_HIGH_RATE_BD_IDX_8822B) & \
- BIT_MASK_HIGH_RATE_BD_IDX_8822B)
-
-#define BIT_SHIFT_LOW_RATE_BD_IDX_8822B 0
-#define BIT_MASK_LOW_RATE_BD_IDX_8822B 0x7f
-#define BIT_LOW_RATE_BD_IDX_8822B(x) \
- (((x) & BIT_MASK_LOW_RATE_BD_IDX_8822B) \
- << BIT_SHIFT_LOW_RATE_BD_IDX_8822B)
-#define BIT_GET_LOW_RATE_BD_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_LOW_RATE_BD_IDX_8822B) & \
- BIT_MASK_LOW_RATE_BD_IDX_8822B)
-
-/* 2 REG_LTR_CTRL2_TIMER_THRESHOLD_8822B */
-
-#define BIT_SHIFT_RX_EMPTY_TIMER_IDX_8822B 24
-#define BIT_MASK_RX_EMPTY_TIMER_IDX_8822B 0x7
-#define BIT_RX_EMPTY_TIMER_IDX_8822B(x) \
- (((x) & BIT_MASK_RX_EMPTY_TIMER_IDX_8822B) \
- << BIT_SHIFT_RX_EMPTY_TIMER_IDX_8822B)
-#define BIT_GET_RX_EMPTY_TIMER_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RX_EMPTY_TIMER_IDX_8822B) & \
- BIT_MASK_RX_EMPTY_TIMER_IDX_8822B)
-
-#define BIT_SHIFT_RX_AFULL_TH_IDX_8822B 20
-#define BIT_MASK_RX_AFULL_TH_IDX_8822B 0x7
-#define BIT_RX_AFULL_TH_IDX_8822B(x) \
- (((x) & BIT_MASK_RX_AFULL_TH_IDX_8822B) \
- << BIT_SHIFT_RX_AFULL_TH_IDX_8822B)
-#define BIT_GET_RX_AFULL_TH_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RX_AFULL_TH_IDX_8822B) & \
- BIT_MASK_RX_AFULL_TH_IDX_8822B)
-
-#define BIT_SHIFT_RX_HIGH_TH_IDX_8822B 16
-#define BIT_MASK_RX_HIGH_TH_IDX_8822B 0x7
-#define BIT_RX_HIGH_TH_IDX_8822B(x) \
- (((x) & BIT_MASK_RX_HIGH_TH_IDX_8822B) \
- << BIT_SHIFT_RX_HIGH_TH_IDX_8822B)
-#define BIT_GET_RX_HIGH_TH_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RX_HIGH_TH_IDX_8822B) & \
- BIT_MASK_RX_HIGH_TH_IDX_8822B)
-
-#define BIT_SHIFT_RX_MED_TH_IDX_8822B 12
-#define BIT_MASK_RX_MED_TH_IDX_8822B 0x7
-#define BIT_RX_MED_TH_IDX_8822B(x) \
- (((x) & BIT_MASK_RX_MED_TH_IDX_8822B) << BIT_SHIFT_RX_MED_TH_IDX_8822B)
-#define BIT_GET_RX_MED_TH_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RX_MED_TH_IDX_8822B) & BIT_MASK_RX_MED_TH_IDX_8822B)
-
-#define BIT_SHIFT_RX_LOW_TH_IDX_8822B 8
-#define BIT_MASK_RX_LOW_TH_IDX_8822B 0x7
-#define BIT_RX_LOW_TH_IDX_8822B(x) \
- (((x) & BIT_MASK_RX_LOW_TH_IDX_8822B) << BIT_SHIFT_RX_LOW_TH_IDX_8822B)
-#define BIT_GET_RX_LOW_TH_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_RX_LOW_TH_IDX_8822B) & BIT_MASK_RX_LOW_TH_IDX_8822B)
-
-#define BIT_SHIFT_LTR_SPACE_IDX_8822B 4
-#define BIT_MASK_LTR_SPACE_IDX_8822B 0x3
-#define BIT_LTR_SPACE_IDX_8822B(x) \
- (((x) & BIT_MASK_LTR_SPACE_IDX_8822B) << BIT_SHIFT_LTR_SPACE_IDX_8822B)
-#define BIT_GET_LTR_SPACE_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_LTR_SPACE_IDX_8822B) & BIT_MASK_LTR_SPACE_IDX_8822B)
-
-#define BIT_SHIFT_LTR_IDLE_TIMER_IDX_8822B 0
-#define BIT_MASK_LTR_IDLE_TIMER_IDX_8822B 0x7
-#define BIT_LTR_IDLE_TIMER_IDX_8822B(x) \
- (((x) & BIT_MASK_LTR_IDLE_TIMER_IDX_8822B) \
- << BIT_SHIFT_LTR_IDLE_TIMER_IDX_8822B)
-#define BIT_GET_LTR_IDLE_TIMER_IDX_8822B(x) \
- (((x) >> BIT_SHIFT_LTR_IDLE_TIMER_IDX_8822B) & \
- BIT_MASK_LTR_IDLE_TIMER_IDX_8822B)
-
-/* 2 REG_LTR_IDLE_LATENCY_V1_8822B */
-
-#define BIT_SHIFT_LTR_IDLE_L_8822B 0
-#define BIT_MASK_LTR_IDLE_L_8822B 0xffffffffL
-#define BIT_LTR_IDLE_L_8822B(x) \
- (((x) & BIT_MASK_LTR_IDLE_L_8822B) << BIT_SHIFT_LTR_IDLE_L_8822B)
-#define BIT_GET_LTR_IDLE_L_8822B(x) \
- (((x) >> BIT_SHIFT_LTR_IDLE_L_8822B) & BIT_MASK_LTR_IDLE_L_8822B)
-
-/* 2 REG_LTR_ACTIVE_LATENCY_V1_8822B */
-
-#define BIT_SHIFT_LTR_ACT_L_8822B 0
-#define BIT_MASK_LTR_ACT_L_8822B 0xffffffffL
-#define BIT_LTR_ACT_L_8822B(x) \
- (((x) & BIT_MASK_LTR_ACT_L_8822B) << BIT_SHIFT_LTR_ACT_L_8822B)
-#define BIT_GET_LTR_ACT_L_8822B(x) \
- (((x) >> BIT_SHIFT_LTR_ACT_L_8822B) & BIT_MASK_LTR_ACT_L_8822B)
-
-/* 2 REG_ANTENNA_TRAINING_CONTROL_REGISTER_8822B */
-#define BIT_APPEND_MACID_IN_RESP_EN_8822B BIT(50)
-#define BIT_ADDR2_MATCH_EN_8822B BIT(49)
-#define BIT_ANTTRN_EN_8822B BIT(48)
-
-#define BIT_SHIFT_TRAIN_STA_ADDR_8822B 0
-#define BIT_MASK_TRAIN_STA_ADDR_8822B 0xffffffffffffL
-#define BIT_TRAIN_STA_ADDR_8822B(x) \
- (((x) & BIT_MASK_TRAIN_STA_ADDR_8822B) \
- << BIT_SHIFT_TRAIN_STA_ADDR_8822B)
-#define BIT_GET_TRAIN_STA_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_TRAIN_STA_ADDR_8822B) & \
- BIT_MASK_TRAIN_STA_ADDR_8822B)
-
-/* 2 REG_RSVD_0X7B4_8822B */
-
-/* 2 REG_WMAC_PKTCNT_RWD_8822B */
-
-#define BIT_SHIFT_PKTCNT_BSSIDMAP_8822B 4
-#define BIT_MASK_PKTCNT_BSSIDMAP_8822B 0xf
-#define BIT_PKTCNT_BSSIDMAP_8822B(x) \
- (((x) & BIT_MASK_PKTCNT_BSSIDMAP_8822B) \
- << BIT_SHIFT_PKTCNT_BSSIDMAP_8822B)
-#define BIT_GET_PKTCNT_BSSIDMAP_8822B(x) \
- (((x) >> BIT_SHIFT_PKTCNT_BSSIDMAP_8822B) & \
- BIT_MASK_PKTCNT_BSSIDMAP_8822B)
-
-#define BIT_PKTCNT_CNTRST_8822B BIT(1)
-#define BIT_PKTCNT_CNTEN_8822B BIT(0)
-
-/* 2 REG_WMAC_PKTCNT_CTRL_8822B */
-#define BIT_WMAC_PKTCNT_TRST_8822B BIT(9)
-#define BIT_WMAC_PKTCNT_FEN_8822B BIT(8)
-
-#define BIT_SHIFT_WMAC_PKTCNT_CFGAD_8822B 0
-#define BIT_MASK_WMAC_PKTCNT_CFGAD_8822B 0xff
-#define BIT_WMAC_PKTCNT_CFGAD_8822B(x) \
- (((x) & BIT_MASK_WMAC_PKTCNT_CFGAD_8822B) \
- << BIT_SHIFT_WMAC_PKTCNT_CFGAD_8822B)
-#define BIT_GET_WMAC_PKTCNT_CFGAD_8822B(x) \
- (((x) >> BIT_SHIFT_WMAC_PKTCNT_CFGAD_8822B) & \
- BIT_MASK_WMAC_PKTCNT_CFGAD_8822B)
-
-/* 2 REG_IQ_DUMP_8822B */
-
-#define BIT_SHIFT_R_WMAC_MATCH_REF_MAC_8822B (64 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_MATCH_REF_MAC_8822B 0xffffffffL
-#define BIT_R_WMAC_MATCH_REF_MAC_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_MATCH_REF_MAC_8822B) \
- << BIT_SHIFT_R_WMAC_MATCH_REF_MAC_8822B)
-#define BIT_GET_R_WMAC_MATCH_REF_MAC_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_MATCH_REF_MAC_8822B) & \
- BIT_MASK_R_WMAC_MATCH_REF_MAC_8822B)
-
-#define BIT_SHIFT_R_WMAC_MASK_LA_MAC_8822B (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_MASK_LA_MAC_8822B 0xffffffffL
-#define BIT_R_WMAC_MASK_LA_MAC_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_MASK_LA_MAC_8822B) \
- << BIT_SHIFT_R_WMAC_MASK_LA_MAC_8822B)
-#define BIT_GET_R_WMAC_MASK_LA_MAC_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_MASK_LA_MAC_8822B) & \
- BIT_MASK_R_WMAC_MASK_LA_MAC_8822B)
-
-#define BIT_SHIFT_DUMP_OK_ADDR_8822B 15
-#define BIT_MASK_DUMP_OK_ADDR_8822B 0x1ffff
-#define BIT_DUMP_OK_ADDR_8822B(x) \
- (((x) & BIT_MASK_DUMP_OK_ADDR_8822B) << BIT_SHIFT_DUMP_OK_ADDR_8822B)
-#define BIT_GET_DUMP_OK_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_DUMP_OK_ADDR_8822B) & BIT_MASK_DUMP_OK_ADDR_8822B)
-
-#define BIT_SHIFT_R_TRIG_TIME_SEL_8822B 8
-#define BIT_MASK_R_TRIG_TIME_SEL_8822B 0x7f
-#define BIT_R_TRIG_TIME_SEL_8822B(x) \
- (((x) & BIT_MASK_R_TRIG_TIME_SEL_8822B) \
- << BIT_SHIFT_R_TRIG_TIME_SEL_8822B)
-#define BIT_GET_R_TRIG_TIME_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_R_TRIG_TIME_SEL_8822B) & \
- BIT_MASK_R_TRIG_TIME_SEL_8822B)
-
-#define BIT_SHIFT_R_MAC_TRIG_SEL_8822B 6
-#define BIT_MASK_R_MAC_TRIG_SEL_8822B 0x3
-#define BIT_R_MAC_TRIG_SEL_8822B(x) \
- (((x) & BIT_MASK_R_MAC_TRIG_SEL_8822B) \
- << BIT_SHIFT_R_MAC_TRIG_SEL_8822B)
-#define BIT_GET_R_MAC_TRIG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_R_MAC_TRIG_SEL_8822B) & \
- BIT_MASK_R_MAC_TRIG_SEL_8822B)
-
-#define BIT_MAC_TRIG_REG_8822B BIT(5)
-
-#define BIT_SHIFT_R_LEVEL_PULSE_SEL_8822B 3
-#define BIT_MASK_R_LEVEL_PULSE_SEL_8822B 0x3
-#define BIT_R_LEVEL_PULSE_SEL_8822B(x) \
- (((x) & BIT_MASK_R_LEVEL_PULSE_SEL_8822B) \
- << BIT_SHIFT_R_LEVEL_PULSE_SEL_8822B)
-#define BIT_GET_R_LEVEL_PULSE_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_R_LEVEL_PULSE_SEL_8822B) & \
- BIT_MASK_R_LEVEL_PULSE_SEL_8822B)
-
-#define BIT_EN_LA_MAC_8822B BIT(2)
-#define BIT_R_EN_IQDUMP_8822B BIT(1)
-#define BIT_R_IQDATA_DUMP_8822B BIT(0)
-
-/* 2 REG_WMAC_FTM_CTL_8822B */
-#define BIT_RXFTM_TXACK_SC_8822B BIT(6)
-#define BIT_RXFTM_TXACK_BW_8822B BIT(5)
-#define BIT_RXFTM_EN_8822B BIT(3)
-#define BIT_RXFTMREQ_BYDRV_8822B BIT(2)
-#define BIT_RXFTMREQ_EN_8822B BIT(1)
-#define BIT_FTM_EN_8822B BIT(0)
-
-/* 2 REG_WMAC_IQ_MDPK_FUNC_8822B */
-
-/* 2 REG_WMAC_OPTION_FUNCTION_8822B */
-
-#define BIT_SHIFT_R_WMAC_RX_FIL_LEN_8822B (64 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_RX_FIL_LEN_8822B 0xffff
-#define BIT_R_WMAC_RX_FIL_LEN_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_RX_FIL_LEN_8822B) \
- << BIT_SHIFT_R_WMAC_RX_FIL_LEN_8822B)
-#define BIT_GET_R_WMAC_RX_FIL_LEN_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_RX_FIL_LEN_8822B) & \
- BIT_MASK_R_WMAC_RX_FIL_LEN_8822B)
-
-#define BIT_SHIFT_R_WMAC_RXFIFO_FULL_TH_8822B (56 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_WMAC_RXFIFO_FULL_TH_8822B 0xff
-#define BIT_R_WMAC_RXFIFO_FULL_TH_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_RXFIFO_FULL_TH_8822B) \
- << BIT_SHIFT_R_WMAC_RXFIFO_FULL_TH_8822B)
-#define BIT_GET_R_WMAC_RXFIFO_FULL_TH_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_RXFIFO_FULL_TH_8822B) & \
- BIT_MASK_R_WMAC_RXFIFO_FULL_TH_8822B)
-
-#define BIT_R_WMAC_RX_SYNCFIFO_SYNC_8822B BIT(55)
-#define BIT_R_WMAC_RXRST_DLY_8822B BIT(54)
-#define BIT_R_WMAC_SRCH_TXRPT_REF_DROP_8822B BIT(53)
-#define BIT_R_WMAC_SRCH_TXRPT_UA1_8822B BIT(52)
-#define BIT_R_WMAC_SRCH_TXRPT_TYPE_8822B BIT(51)
-#define BIT_R_WMAC_NDP_RST_8822B BIT(50)
-#define BIT_R_WMAC_POWINT_EN_8822B BIT(49)
-#define BIT_R_WMAC_SRCH_TXRPT_PERPKT_8822B BIT(48)
-#define BIT_R_WMAC_SRCH_TXRPT_MID_8822B BIT(47)
-#define BIT_R_WMAC_PFIN_TOEN_8822B BIT(46)
-#define BIT_R_WMAC_FIL_SECERR_8822B BIT(45)
-#define BIT_R_WMAC_FIL_CTLPKTLEN_8822B BIT(44)
-#define BIT_R_WMAC_FIL_FCTYPE_8822B BIT(43)
-#define BIT_R_WMAC_FIL_FCPROVER_8822B BIT(42)
-#define BIT_R_WMAC_PHYSTS_SNIF_8822B BIT(41)
-#define BIT_R_WMAC_PHYSTS_PLCP_8822B BIT(40)
-#define BIT_R_MAC_TCR_VBONF_RD_8822B BIT(39)
-#define BIT_R_WMAC_TCR_MPAR_NDP_8822B BIT(38)
-#define BIT_R_WMAC_NDP_FILTER_8822B BIT(37)
-#define BIT_R_WMAC_RXLEN_SEL_8822B BIT(36)
-#define BIT_R_WMAC_RXLEN_SEL1_8822B BIT(35)
-#define BIT_R_OFDM_FILTER_8822B BIT(34)
-#define BIT_R_WMAC_CHK_OFDM_LEN_8822B BIT(33)
-#define BIT_R_WMAC_CHK_CCK_LEN_8822B BIT(32)
-
-#define BIT_SHIFT_R_OFDM_LEN_8822B 26
-#define BIT_MASK_R_OFDM_LEN_8822B 0x3f
-#define BIT_R_OFDM_LEN_8822B(x) \
- (((x) & BIT_MASK_R_OFDM_LEN_8822B) << BIT_SHIFT_R_OFDM_LEN_8822B)
-#define BIT_GET_R_OFDM_LEN_8822B(x) \
- (((x) >> BIT_SHIFT_R_OFDM_LEN_8822B) & BIT_MASK_R_OFDM_LEN_8822B)
-
-#define BIT_SHIFT_R_CCK_LEN_8822B 0
-#define BIT_MASK_R_CCK_LEN_8822B 0xffff
-#define BIT_R_CCK_LEN_8822B(x) \
- (((x) & BIT_MASK_R_CCK_LEN_8822B) << BIT_SHIFT_R_CCK_LEN_8822B)
-#define BIT_GET_R_CCK_LEN_8822B(x) \
- (((x) >> BIT_SHIFT_R_CCK_LEN_8822B) & BIT_MASK_R_CCK_LEN_8822B)
-
-/* 2 REG_RX_FILTER_FUNCTION_8822B */
-#define BIT_R_WMAC_MHRDDY_LATCH_8822B BIT(14)
-#define BIT_R_WMAC_MHRDDY_CLR_8822B BIT(13)
-#define BIT_R_RXPKTCTL_FSM_BASED_MPDURDY1_8822B BIT(12)
-#define BIT_WMAC_DIS_VHT_PLCP_CHK_MU_8822B BIT(11)
-#define BIT_R_CHK_DELIMIT_LEN_8822B BIT(10)
-#define BIT_R_REAPTER_ADDR_MATCH_8822B BIT(9)
-#define BIT_R_RXPKTCTL_FSM_BASED_MPDURDY_8822B BIT(8)
-#define BIT_R_LATCH_MACHRDY_8822B BIT(7)
-#define BIT_R_WMAC_RXFIL_REND_8822B BIT(6)
-#define BIT_R_WMAC_MPDURDY_CLR_8822B BIT(5)
-#define BIT_R_WMAC_CLRRXSEC_8822B BIT(4)
-#define BIT_R_WMAC_RXFIL_RDEL_8822B BIT(3)
-#define BIT_R_WMAC_RXFIL_FCSE_8822B BIT(2)
-#define BIT_R_WMAC_RXFIL_MESH_DEL_8822B BIT(1)
-#define BIT_R_WMAC_RXFIL_MASKM_8822B BIT(0)
-
-/* 2 REG_NDP_SIG_8822B */
-
-#define BIT_SHIFT_R_WMAC_TXNDP_SIGB_8822B 0
-#define BIT_MASK_R_WMAC_TXNDP_SIGB_8822B 0x1fffff
-#define BIT_R_WMAC_TXNDP_SIGB_8822B(x) \
- (((x) & BIT_MASK_R_WMAC_TXNDP_SIGB_8822B) \
- << BIT_SHIFT_R_WMAC_TXNDP_SIGB_8822B)
-#define BIT_GET_R_WMAC_TXNDP_SIGB_8822B(x) \
- (((x) >> BIT_SHIFT_R_WMAC_TXNDP_SIGB_8822B) & \
- BIT_MASK_R_WMAC_TXNDP_SIGB_8822B)
-
-/* 2 REG_TXCMD_INFO_FOR_RSP_PKT_8822B */
-
-#define BIT_SHIFT_R_MAC_DEBUG_8822B (32 & CPU_OPT_WIDTH)
-#define BIT_MASK_R_MAC_DEBUG_8822B 0xffffffffL
-#define BIT_R_MAC_DEBUG_8822B(x) \
- (((x) & BIT_MASK_R_MAC_DEBUG_8822B) << BIT_SHIFT_R_MAC_DEBUG_8822B)
-#define BIT_GET_R_MAC_DEBUG_8822B(x) \
- (((x) >> BIT_SHIFT_R_MAC_DEBUG_8822B) & BIT_MASK_R_MAC_DEBUG_8822B)
-
-#define BIT_SHIFT_R_MAC_DBG_SHIFT_8822B 8
-#define BIT_MASK_R_MAC_DBG_SHIFT_8822B 0x7
-#define BIT_R_MAC_DBG_SHIFT_8822B(x) \
- (((x) & BIT_MASK_R_MAC_DBG_SHIFT_8822B) \
- << BIT_SHIFT_R_MAC_DBG_SHIFT_8822B)
-#define BIT_GET_R_MAC_DBG_SHIFT_8822B(x) \
- (((x) >> BIT_SHIFT_R_MAC_DBG_SHIFT_8822B) & \
- BIT_MASK_R_MAC_DBG_SHIFT_8822B)
-
-#define BIT_SHIFT_R_MAC_DBG_SEL_8822B 0
-#define BIT_MASK_R_MAC_DBG_SEL_8822B 0x3
-#define BIT_R_MAC_DBG_SEL_8822B(x) \
- (((x) & BIT_MASK_R_MAC_DBG_SEL_8822B) << BIT_SHIFT_R_MAC_DBG_SEL_8822B)
-#define BIT_GET_R_MAC_DBG_SEL_8822B(x) \
- (((x) >> BIT_SHIFT_R_MAC_DBG_SEL_8822B) & BIT_MASK_R_MAC_DBG_SEL_8822B)
-
-/* 2 REG_RTS_ADDRESS_0_8822B */
-
-/* 2 REG_RTS_ADDRESS_1_8822B */
-
-/* 2 REG__RPFM_MAP1_8822B
- * (RX PAYLOAD FILTER MAP FRAME TYPE CONTROL REGISTER GROUP 1
- */
-#define BIT_DATA_RPFM15EN_8822B BIT(15)
-#define BIT_DATA_RPFM14EN_8822B BIT(14)
-#define BIT_DATA_RPFM13EN_8822B BIT(13)
-#define BIT_DATA_RPFM12EN_8822B BIT(12)
-#define BIT_DATA_RPFM11EN_8822B BIT(11)
-#define BIT_DATA_RPFM10EN_8822B BIT(10)
-#define BIT_DATA_RPFM9EN_8822B BIT(9)
-#define BIT_DATA_RPFM8EN_8822B BIT(8)
-#define BIT_DATA_RPFM7EN_8822B BIT(7)
-#define BIT_DATA_RPFM6EN_8822B BIT(6)
-#define BIT_DATA_RPFM5EN_8822B BIT(5)
-#define BIT_DATA_RPFM4EN_8822B BIT(4)
-#define BIT_DATA_RPFM3EN_8822B BIT(3)
-#define BIT_DATA_RPFM2EN_8822B BIT(2)
-#define BIT_DATA_RPFM1EN_8822B BIT(1)
-#define BIT_DATA_RPFM0EN_8822B BIT(0)
-
-/* 2 REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1_8822B */
-#define BIT_LTECOEX_ACCESS_START_V1_8822B BIT(31)
-#define BIT_LTECOEX_WRITE_MODE_V1_8822B BIT(30)
-#define BIT_LTECOEX_READY_BIT_V1_8822B BIT(29)
-
-#define BIT_SHIFT_WRITE_BYTE_EN_V1_8822B 16
-#define BIT_MASK_WRITE_BYTE_EN_V1_8822B 0xf
-#define BIT_WRITE_BYTE_EN_V1_8822B(x) \
- (((x) & BIT_MASK_WRITE_BYTE_EN_V1_8822B) \
- << BIT_SHIFT_WRITE_BYTE_EN_V1_8822B)
-#define BIT_GET_WRITE_BYTE_EN_V1_8822B(x) \
- (((x) >> BIT_SHIFT_WRITE_BYTE_EN_V1_8822B) & \
- BIT_MASK_WRITE_BYTE_EN_V1_8822B)
-
-#define BIT_SHIFT_LTECOEX_REG_ADDR_V1_8822B 0
-#define BIT_MASK_LTECOEX_REG_ADDR_V1_8822B 0xffff
-#define BIT_LTECOEX_REG_ADDR_V1_8822B(x) \
- (((x) & BIT_MASK_LTECOEX_REG_ADDR_V1_8822B) \
- << BIT_SHIFT_LTECOEX_REG_ADDR_V1_8822B)
-#define BIT_GET_LTECOEX_REG_ADDR_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LTECOEX_REG_ADDR_V1_8822B) & \
- BIT_MASK_LTECOEX_REG_ADDR_V1_8822B)
-
-/* 2 REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1_8822B */
-
-#define BIT_SHIFT_LTECOEX_W_DATA_V1_8822B 0
-#define BIT_MASK_LTECOEX_W_DATA_V1_8822B 0xffffffffL
-#define BIT_LTECOEX_W_DATA_V1_8822B(x) \
- (((x) & BIT_MASK_LTECOEX_W_DATA_V1_8822B) \
- << BIT_SHIFT_LTECOEX_W_DATA_V1_8822B)
-#define BIT_GET_LTECOEX_W_DATA_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LTECOEX_W_DATA_V1_8822B) & \
- BIT_MASK_LTECOEX_W_DATA_V1_8822B)
-
-/* 2 REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1_8822B */
-
-#define BIT_SHIFT_LTECOEX_R_DATA_V1_8822B 0
-#define BIT_MASK_LTECOEX_R_DATA_V1_8822B 0xffffffffL
-#define BIT_LTECOEX_R_DATA_V1_8822B(x) \
- (((x) & BIT_MASK_LTECOEX_R_DATA_V1_8822B) \
- << BIT_SHIFT_LTECOEX_R_DATA_V1_8822B)
-#define BIT_GET_LTECOEX_R_DATA_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LTECOEX_R_DATA_V1_8822B) & \
- BIT_MASK_LTECOEX_R_DATA_V1_8822B)
-
-/* 2 REG_NOT_VALID_8822B */
-
-/* 2 REG_SDIO_TX_CTRL_8822B */
-
-#define BIT_SHIFT_SDIO_INT_TIMEOUT_8822B 16
-#define BIT_MASK_SDIO_INT_TIMEOUT_8822B 0xffff
-#define BIT_SDIO_INT_TIMEOUT_8822B(x) \
- (((x) & BIT_MASK_SDIO_INT_TIMEOUT_8822B) \
- << BIT_SHIFT_SDIO_INT_TIMEOUT_8822B)
-#define BIT_GET_SDIO_INT_TIMEOUT_8822B(x) \
- (((x) >> BIT_SHIFT_SDIO_INT_TIMEOUT_8822B) & \
- BIT_MASK_SDIO_INT_TIMEOUT_8822B)
-
-#define BIT_IO_ERR_STATUS_8822B BIT(15)
-#define BIT_REPLY_ERRCRC_IN_DATA_8822B BIT(9)
-#define BIT_EN_CMD53_OVERLAP_8822B BIT(8)
-#define BIT_REPLY_ERR_IN_R5_8822B BIT(7)
-#define BIT_R18A_EN_8822B BIT(6)
-#define BIT_INIT_CMD_EN_8822B BIT(5)
-#define BIT_EN_RXDMA_MASK_INT_8822B BIT(2)
-#define BIT_EN_MASK_TIMER_8822B BIT(1)
-#define BIT_CMD_ERR_STOP_INT_EN_8822B BIT(0)
-
-/* 2 REG_SDIO_HIMR_8822B */
-#define BIT_SDIO_CRCERR_MSK_8822B BIT(31)
-#define BIT_SDIO_HSISR3_IND_MSK_8822B BIT(30)
-#define BIT_SDIO_HSISR2_IND_MSK_8822B BIT(29)
-#define BIT_SDIO_HEISR_IND_MSK_8822B BIT(28)
-#define BIT_SDIO_CTWEND_MSK_8822B BIT(27)
-#define BIT_SDIO_ATIMEND_E_MSK_8822B BIT(26)
-#define BIT_SDIIO_ATIMEND_MSK_8822B BIT(25)
-#define BIT_SDIO_OCPINT_MSK_8822B BIT(24)
-#define BIT_SDIO_PSTIMEOUT_MSK_8822B BIT(23)
-#define BIT_SDIO_GTINT4_MSK_8822B BIT(22)
-#define BIT_SDIO_GTINT3_MSK_8822B BIT(21)
-#define BIT_SDIO_HSISR_IND_MSK_8822B BIT(20)
-#define BIT_SDIO_CPWM2_MSK_8822B BIT(19)
-#define BIT_SDIO_CPWM1_MSK_8822B BIT(18)
-#define BIT_SDIO_C2HCMD_INT_MSK_8822B BIT(17)
-#define BIT_SDIO_BCNERLY_INT_MSK_8822B BIT(16)
-#define BIT_SDIO_TXBCNERR_MSK_8822B BIT(7)
-#define BIT_SDIO_TXBCNOK_MSK_8822B BIT(6)
-#define BIT_SDIO_RXFOVW_MSK_8822B BIT(5)
-#define BIT_SDIO_TXFOVW_MSK_8822B BIT(4)
-#define BIT_SDIO_RXERR_MSK_8822B BIT(3)
-#define BIT_SDIO_TXERR_MSK_8822B BIT(2)
-#define BIT_SDIO_AVAL_MSK_8822B BIT(1)
-#define BIT_RX_REQUEST_MSK_8822B BIT(0)
-
-/* 2 REG_SDIO_HISR_8822B */
-#define BIT_SDIO_CRCERR_8822B BIT(31)
-#define BIT_SDIO_HSISR3_IND_8822B BIT(30)
-#define BIT_SDIO_HSISR2_IND_8822B BIT(29)
-#define BIT_SDIO_HEISR_IND_8822B BIT(28)
-#define BIT_SDIO_CTWEND_8822B BIT(27)
-#define BIT_SDIO_ATIMEND_E_8822B BIT(26)
-#define BIT_SDIO_ATIMEND_8822B BIT(25)
-#define BIT_SDIO_OCPINT_8822B BIT(24)
-#define BIT_SDIO_PSTIMEOUT_8822B BIT(23)
-#define BIT_SDIO_GTINT4_8822B BIT(22)
-#define BIT_SDIO_GTINT3_8822B BIT(21)
-#define BIT_SDIO_HSISR_IND_8822B BIT(20)
-#define BIT_SDIO_CPWM2_8822B BIT(19)
-#define BIT_SDIO_CPWM1_8822B BIT(18)
-#define BIT_SDIO_C2HCMD_INT_8822B BIT(17)
-#define BIT_SDIO_BCNERLY_INT_8822B BIT(16)
-#define BIT_SDIO_TXBCNERR_8822B BIT(7)
-#define BIT_SDIO_TXBCNOK_8822B BIT(6)
-#define BIT_SDIO_RXFOVW_8822B BIT(5)
-#define BIT_SDIO_TXFOVW_8822B BIT(4)
-#define BIT_SDIO_RXERR_8822B BIT(3)
-#define BIT_SDIO_TXERR_8822B BIT(2)
-#define BIT_SDIO_AVAL_8822B BIT(1)
-#define BIT_RX_REQUEST_8822B BIT(0)
-
-/* 2 REG_SDIO_RX_REQ_LEN_8822B */
-
-#define BIT_SHIFT_RX_REQ_LEN_V1_8822B 0
-#define BIT_MASK_RX_REQ_LEN_V1_8822B 0x3ffff
-#define BIT_RX_REQ_LEN_V1_8822B(x) \
- (((x) & BIT_MASK_RX_REQ_LEN_V1_8822B) << BIT_SHIFT_RX_REQ_LEN_V1_8822B)
-#define BIT_GET_RX_REQ_LEN_V1_8822B(x) \
- (((x) >> BIT_SHIFT_RX_REQ_LEN_V1_8822B) & BIT_MASK_RX_REQ_LEN_V1_8822B)
-
-/* 2 REG_SDIO_FREE_TXPG_SEQ_V1_8822B */
-
-#define BIT_SHIFT_FREE_TXPG_SEQ_8822B 0
-#define BIT_MASK_FREE_TXPG_SEQ_8822B 0xff
-#define BIT_FREE_TXPG_SEQ_8822B(x) \
- (((x) & BIT_MASK_FREE_TXPG_SEQ_8822B) << BIT_SHIFT_FREE_TXPG_SEQ_8822B)
-#define BIT_GET_FREE_TXPG_SEQ_8822B(x) \
- (((x) >> BIT_SHIFT_FREE_TXPG_SEQ_8822B) & BIT_MASK_FREE_TXPG_SEQ_8822B)
-
-/* 2 REG_SDIO_FREE_TXPG_8822B */
-
-#define BIT_SHIFT_MID_FREEPG_V1_8822B 16
-#define BIT_MASK_MID_FREEPG_V1_8822B 0xfff
-#define BIT_MID_FREEPG_V1_8822B(x) \
- (((x) & BIT_MASK_MID_FREEPG_V1_8822B) << BIT_SHIFT_MID_FREEPG_V1_8822B)
-#define BIT_GET_MID_FREEPG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_MID_FREEPG_V1_8822B) & BIT_MASK_MID_FREEPG_V1_8822B)
-
-#define BIT_SHIFT_HIQ_FREEPG_V1_8822B 0
-#define BIT_MASK_HIQ_FREEPG_V1_8822B 0xfff
-#define BIT_HIQ_FREEPG_V1_8822B(x) \
- (((x) & BIT_MASK_HIQ_FREEPG_V1_8822B) << BIT_SHIFT_HIQ_FREEPG_V1_8822B)
-#define BIT_GET_HIQ_FREEPG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_HIQ_FREEPG_V1_8822B) & BIT_MASK_HIQ_FREEPG_V1_8822B)
-
-/* 2 REG_SDIO_FREE_TXPG2_8822B */
-
-#define BIT_SHIFT_PUB_FREEPG_V1_8822B 16
-#define BIT_MASK_PUB_FREEPG_V1_8822B 0xfff
-#define BIT_PUB_FREEPG_V1_8822B(x) \
- (((x) & BIT_MASK_PUB_FREEPG_V1_8822B) << BIT_SHIFT_PUB_FREEPG_V1_8822B)
-#define BIT_GET_PUB_FREEPG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_PUB_FREEPG_V1_8822B) & BIT_MASK_PUB_FREEPG_V1_8822B)
-
-#define BIT_SHIFT_LOW_FREEPG_V1_8822B 0
-#define BIT_MASK_LOW_FREEPG_V1_8822B 0xfff
-#define BIT_LOW_FREEPG_V1_8822B(x) \
- (((x) & BIT_MASK_LOW_FREEPG_V1_8822B) << BIT_SHIFT_LOW_FREEPG_V1_8822B)
-#define BIT_GET_LOW_FREEPG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_LOW_FREEPG_V1_8822B) & BIT_MASK_LOW_FREEPG_V1_8822B)
-
-/* 2 REG_SDIO_OQT_FREE_TXPG_V1_8822B */
-
-#define BIT_SHIFT_NOAC_OQT_FREEPG_V1_8822B 24
-#define BIT_MASK_NOAC_OQT_FREEPG_V1_8822B 0xff
-#define BIT_NOAC_OQT_FREEPG_V1_8822B(x) \
- (((x) & BIT_MASK_NOAC_OQT_FREEPG_V1_8822B) \
- << BIT_SHIFT_NOAC_OQT_FREEPG_V1_8822B)
-#define BIT_GET_NOAC_OQT_FREEPG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_NOAC_OQT_FREEPG_V1_8822B) & \
- BIT_MASK_NOAC_OQT_FREEPG_V1_8822B)
-
-#define BIT_SHIFT_AC_OQT_FREEPG_V1_8822B 16
-#define BIT_MASK_AC_OQT_FREEPG_V1_8822B 0xff
-#define BIT_AC_OQT_FREEPG_V1_8822B(x) \
- (((x) & BIT_MASK_AC_OQT_FREEPG_V1_8822B) \
- << BIT_SHIFT_AC_OQT_FREEPG_V1_8822B)
-#define BIT_GET_AC_OQT_FREEPG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_AC_OQT_FREEPG_V1_8822B) & \
- BIT_MASK_AC_OQT_FREEPG_V1_8822B)
-
-#define BIT_SHIFT_EXQ_FREEPG_V1_8822B 0
-#define BIT_MASK_EXQ_FREEPG_V1_8822B 0xfff
-#define BIT_EXQ_FREEPG_V1_8822B(x) \
- (((x) & BIT_MASK_EXQ_FREEPG_V1_8822B) << BIT_SHIFT_EXQ_FREEPG_V1_8822B)
-#define BIT_GET_EXQ_FREEPG_V1_8822B(x) \
- (((x) >> BIT_SHIFT_EXQ_FREEPG_V1_8822B) & BIT_MASK_EXQ_FREEPG_V1_8822B)
-
-/* 2 REG_SDIO_HTSFR_INFO_8822B */
-
-#define BIT_SHIFT_HTSFR1_8822B 16
-#define BIT_MASK_HTSFR1_8822B 0xffff
-#define BIT_HTSFR1_8822B(x) \
- (((x) & BIT_MASK_HTSFR1_8822B) << BIT_SHIFT_HTSFR1_8822B)
-#define BIT_GET_HTSFR1_8822B(x) \
- (((x) >> BIT_SHIFT_HTSFR1_8822B) & BIT_MASK_HTSFR1_8822B)
-
-#define BIT_SHIFT_HTSFR0_8822B 0
-#define BIT_MASK_HTSFR0_8822B 0xffff
-#define BIT_HTSFR0_8822B(x) \
- (((x) & BIT_MASK_HTSFR0_8822B) << BIT_SHIFT_HTSFR0_8822B)
-#define BIT_GET_HTSFR0_8822B(x) \
- (((x) >> BIT_SHIFT_HTSFR0_8822B) & BIT_MASK_HTSFR0_8822B)
-
-/* 2 REG_SDIO_HCPWM1_V2_8822B */
-#define BIT_TOGGLING_8822B BIT(7)
-#define BIT_ACK_8822B BIT(6)
-#define BIT_SYS_CLK_8822B BIT(0)
-
-/* 2 REG_SDIO_HCPWM2_V2_8822B */
-
-/* 2 REG_SDIO_INDIRECT_REG_CFG_8822B */
-#define BIT_INDIRECT_REG_RDY_8822B BIT(20)
-#define BIT_INDIRECT_REG_R_8822B BIT(19)
-#define BIT_INDIRECT_REG_W_8822B BIT(18)
-
-#define BIT_SHIFT_INDIRECT_REG_SIZE_8822B 16
-#define BIT_MASK_INDIRECT_REG_SIZE_8822B 0x3
-#define BIT_INDIRECT_REG_SIZE_8822B(x) \
- (((x) & BIT_MASK_INDIRECT_REG_SIZE_8822B) \
- << BIT_SHIFT_INDIRECT_REG_SIZE_8822B)
-#define BIT_GET_INDIRECT_REG_SIZE_8822B(x) \
- (((x) >> BIT_SHIFT_INDIRECT_REG_SIZE_8822B) & \
- BIT_MASK_INDIRECT_REG_SIZE_8822B)
-
-#define BIT_SHIFT_INDIRECT_REG_ADDR_8822B 0
-#define BIT_MASK_INDIRECT_REG_ADDR_8822B 0xffff
-#define BIT_INDIRECT_REG_ADDR_8822B(x) \
- (((x) & BIT_MASK_INDIRECT_REG_ADDR_8822B) \
- << BIT_SHIFT_INDIRECT_REG_ADDR_8822B)
-#define BIT_GET_INDIRECT_REG_ADDR_8822B(x) \
- (((x) >> BIT_SHIFT_INDIRECT_REG_ADDR_8822B) & \
- BIT_MASK_INDIRECT_REG_ADDR_8822B)
-
-/* 2 REG_SDIO_INDIRECT_REG_DATA_8822B */
-
-#define BIT_SHIFT_INDIRECT_REG_DATA_8822B 0
-#define BIT_MASK_INDIRECT_REG_DATA_8822B 0xffffffffL
-#define BIT_INDIRECT_REG_DATA_8822B(x) \
- (((x) & BIT_MASK_INDIRECT_REG_DATA_8822B) \
- << BIT_SHIFT_INDIRECT_REG_DATA_8822B)
-#define BIT_GET_INDIRECT_REG_DATA_8822B(x) \
- (((x) >> BIT_SHIFT_INDIRECT_REG_DATA_8822B) & \
- BIT_MASK_INDIRECT_REG_DATA_8822B)
-
-/* 2 REG_SDIO_H2C_8822B */
-
-#define BIT_SHIFT_SDIO_H2C_MSG_8822B 0
-#define BIT_MASK_SDIO_H2C_MSG_8822B 0xffffffffL
-#define BIT_SDIO_H2C_MSG_8822B(x) \
- (((x) & BIT_MASK_SDIO_H2C_MSG_8822B) << BIT_SHIFT_SDIO_H2C_MSG_8822B)
-#define BIT_GET_SDIO_H2C_MSG_8822B(x) \
- (((x) >> BIT_SHIFT_SDIO_H2C_MSG_8822B) & BIT_MASK_SDIO_H2C_MSG_8822B)
-
-/* 2 REG_SDIO_C2H_8822B */
-
-#define BIT_SHIFT_SDIO_C2H_MSG_8822B 0
-#define BIT_MASK_SDIO_C2H_MSG_8822B 0xffffffffL
-#define BIT_SDIO_C2H_MSG_8822B(x) \
- (((x) & BIT_MASK_SDIO_C2H_MSG_8822B) << BIT_SHIFT_SDIO_C2H_MSG_8822B)
-#define BIT_GET_SDIO_C2H_MSG_8822B(x) \
- (((x) >> BIT_SHIFT_SDIO_C2H_MSG_8822B) & BIT_MASK_SDIO_C2H_MSG_8822B)
-
-/* 2 REG_SDIO_HRPWM1_8822B */
-#define BIT_TOGGLING_8822B BIT(7)
-#define BIT_ACK_8822B BIT(6)
-#define BIT_32K_PERMISSION_8822B BIT(0)
-
-/* 2 REG_SDIO_HRPWM2_8822B */
-
-/* 2 REG_SDIO_HPS_CLKR_8822B */
-
-/* 2 REG_SDIO_BUS_CTRL_8822B */
-#define BIT_PAD_CLK_XHGE_EN_8822B BIT(3)
-#define BIT_INTER_CLK_EN_8822B BIT(2)
-#define BIT_EN_RPT_TXCRC_8822B BIT(1)
-#define BIT_DIS_RXDMA_STS_8822B BIT(0)
-
-/* 2 REG_SDIO_HSUS_CTRL_8822B */
-#define BIT_INTR_CTRL_8822B BIT(4)
-#define BIT_SDIO_VOLTAGE_8822B BIT(3)
-#define BIT_BYPASS_INIT_8822B BIT(2)
-#define BIT_HCI_RESUME_RDY_8822B BIT(1)
-#define BIT_HCI_SUS_REQ_8822B BIT(0)
-
-/* 2 REG_SDIO_RESPONSE_TIMER_8822B */
-
-#define BIT_SHIFT_CMDIN_2RESP_TIMER_8822B 0
-#define BIT_MASK_CMDIN_2RESP_TIMER_8822B 0xffff
-#define BIT_CMDIN_2RESP_TIMER_8822B(x) \
- (((x) & BIT_MASK_CMDIN_2RESP_TIMER_8822B) \
- << BIT_SHIFT_CMDIN_2RESP_TIMER_8822B)
-#define BIT_GET_CMDIN_2RESP_TIMER_8822B(x) \
- (((x) >> BIT_SHIFT_CMDIN_2RESP_TIMER_8822B) & \
- BIT_MASK_CMDIN_2RESP_TIMER_8822B)
-
-/* 2 REG_SDIO_CMD_CRC_8822B */
-
-#define BIT_SHIFT_SDIO_CMD_CRC_V1_8822B 0
-#define BIT_MASK_SDIO_CMD_CRC_V1_8822B 0xff
-#define BIT_SDIO_CMD_CRC_V1_8822B(x) \
- (((x) & BIT_MASK_SDIO_CMD_CRC_V1_8822B) \
- << BIT_SHIFT_SDIO_CMD_CRC_V1_8822B)
-#define BIT_GET_SDIO_CMD_CRC_V1_8822B(x) \
- (((x) >> BIT_SHIFT_SDIO_CMD_CRC_V1_8822B) & \
- BIT_MASK_SDIO_CMD_CRC_V1_8822B)
-
-/* 2 REG_SDIO_HSISR_8822B */
-#define BIT_DRV_WLAN_INT_CLR_8822B BIT(1)
-#define BIT_DRV_WLAN_INT_8822B BIT(0)
-
-/* 2 REG_SDIO_HSIMR_8822B */
-#define BIT_HISR_MASK_8822B BIT(0)
-
-/* 2 REG_SDIO_ERR_RPT_8822B */
-#define BIT_HR_FF_OVF_8822B BIT(6)
-#define BIT_HR_FF_UDN_8822B BIT(5)
-#define BIT_TXDMA_BUSY_ERR_8822B BIT(4)
-#define BIT_TXDMA_VLD_ERR_8822B BIT(3)
-#define BIT_QSEL_UNKNOWN_ERR_8822B BIT(2)
-#define BIT_QSEL_MIS_ERR_8822B BIT(1)
-#define BIT_SDIO_OVERRD_ERR_8822B BIT(0)
-
-/* 2 REG_SDIO_CMD_ERRCNT_8822B */
-
-#define BIT_SHIFT_CMD_CRC_ERR_CNT_8822B 0
-#define BIT_MASK_CMD_CRC_ERR_CNT_8822B 0xff
-#define BIT_CMD_CRC_ERR_CNT_8822B(x) \
- (((x) & BIT_MASK_CMD_CRC_ERR_CNT_8822B) \
- << BIT_SHIFT_CMD_CRC_ERR_CNT_8822B)
-#define BIT_GET_CMD_CRC_ERR_CNT_8822B(x) \
- (((x) >> BIT_SHIFT_CMD_CRC_ERR_CNT_8822B) & \
- BIT_MASK_CMD_CRC_ERR_CNT_8822B)
-
-/* 2 REG_SDIO_DATA_ERRCNT_8822B */
-
-#define BIT_SHIFT_DATA_CRC_ERR_CNT_8822B 0
-#define BIT_MASK_DATA_CRC_ERR_CNT_8822B 0xff
-#define BIT_DATA_CRC_ERR_CNT_8822B(x) \
- (((x) & BIT_MASK_DATA_CRC_ERR_CNT_8822B) \
- << BIT_SHIFT_DATA_CRC_ERR_CNT_8822B)
-#define BIT_GET_DATA_CRC_ERR_CNT_8822B(x) \
- (((x) >> BIT_SHIFT_DATA_CRC_ERR_CNT_8822B) & \
- BIT_MASK_DATA_CRC_ERR_CNT_8822B)
-
-/* 2 REG_SDIO_CMD_ERR_CONTENT_8822B */
-
-#define BIT_SHIFT_SDIO_CMD_ERR_CONTENT_8822B 0
-#define BIT_MASK_SDIO_CMD_ERR_CONTENT_8822B 0xffffffffffL
-#define BIT_SDIO_CMD_ERR_CONTENT_8822B(x) \
- (((x) & BIT_MASK_SDIO_CMD_ERR_CONTENT_8822B) \
- << BIT_SHIFT_SDIO_CMD_ERR_CONTENT_8822B)
-#define BIT_GET_SDIO_CMD_ERR_CONTENT_8822B(x) \
- (((x) >> BIT_SHIFT_SDIO_CMD_ERR_CONTENT_8822B) & \
- BIT_MASK_SDIO_CMD_ERR_CONTENT_8822B)
-
-/* 2 REG_SDIO_CRC_ERR_IDX_8822B */
-#define BIT_D3_CRC_ERR_8822B BIT(4)
-#define BIT_D2_CRC_ERR_8822B BIT(3)
-#define BIT_D1_CRC_ERR_8822B BIT(2)
-#define BIT_D0_CRC_ERR_8822B BIT(1)
-#define BIT_CMD_CRC_ERR_8822B BIT(0)
-
-/* 2 REG_SDIO_DATA_CRC_8822B */
-
-#define BIT_SHIFT_SDIO_DATA_CRC_8822B 0
-#define BIT_MASK_SDIO_DATA_CRC_8822B 0xff
-#define BIT_SDIO_DATA_CRC_8822B(x) \
- (((x) & BIT_MASK_SDIO_DATA_CRC_8822B) << BIT_SHIFT_SDIO_DATA_CRC_8822B)
-#define BIT_GET_SDIO_DATA_CRC_8822B(x) \
- (((x) >> BIT_SHIFT_SDIO_DATA_CRC_8822B) & BIT_MASK_SDIO_DATA_CRC_8822B)
-
-/* 2 REG_SDIO_DATA_REPLY_TIME_8822B */
-
-#define BIT_SHIFT_SDIO_DATA_REPLY_TIME_8822B 0
-#define BIT_MASK_SDIO_DATA_REPLY_TIME_8822B 0x7
-#define BIT_SDIO_DATA_REPLY_TIME_8822B(x) \
- (((x) & BIT_MASK_SDIO_DATA_REPLY_TIME_8822B) \
- << BIT_SHIFT_SDIO_DATA_REPLY_TIME_8822B)
-#define BIT_GET_SDIO_DATA_REPLY_TIME_8822B(x) \
- (((x) >> BIT_SHIFT_SDIO_DATA_REPLY_TIME_8822B) & \
- BIT_MASK_SDIO_DATA_REPLY_TIME_8822B)
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_fw_info.h b/drivers/staging/rtlwifi/halmac/halmac_fw_info.h
deleted file mode 100644
index eb4558d0b62c..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_fw_info.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_FW_INFO_H_
-#define _HALMAC_FW_INFO_H_
-
-#define H2C_FORMAT_VERSION 6
-
-#define H2C_ACK_HDR_CONTENT_LENGTH 8
-#define CFG_PARAMETER_ACK_CONTENT_LENGTH 16
-#define SCAN_STATUS_RPT_CONTENT_LENGTH 4
-#define C2H_DBG_HEADER_LENGTH 4
-#define C2H_DBG_CONTENT_MAX_LENGTH 228
-
-#define C2H_DBG_CONTENT_SEQ_OFFSET 1
-
-/* Rename from FW SysHalCom_Debug_RAM.h */
-#define FW_REG_H2CPKT_DONE_SEQ 0x1C8
-#define fw_reg_wow_reason 0x1C7
-
-enum halmac_data_type {
- HALMAC_DATA_TYPE_MAC_REG = 0x00,
- HALMAC_DATA_TYPE_BB_REG = 0x01,
- HALMAC_DATA_TYPE_RADIO_A = 0x02,
- HALMAC_DATA_TYPE_RADIO_B = 0x03,
- HALMAC_DATA_TYPE_RADIO_C = 0x04,
- HALMAC_DATA_TYPE_RADIO_D = 0x05,
-
- HALMAC_DATA_TYPE_DRV_DEFINE_0 = 0x80,
- HALMAC_DATA_TYPE_DRV_DEFINE_1 = 0x81,
- HALMAC_DATA_TYPE_DRV_DEFINE_2 = 0x82,
- HALMAC_DATA_TYPE_DRV_DEFINE_3 = 0x83,
- HALMAC_DATA_TYPE_UNDEFINE = 0x7FFFFFFF,
-};
-
-enum halmac_packet_id {
- HALMAC_PACKET_PROBE_REQ = 0x00,
- HALMAC_PACKET_SYNC_BCN = 0x01,
- HALMAC_PACKET_DISCOVERY_BCN = 0x02,
-
- HALMAC_PACKET_UNDEFINE = 0x7FFFFFFF,
-};
-
-/* Channel Switch Action ID */
-enum halmac_cs_action_id {
- HALMAC_CS_ACTION_NONE = 0x00,
- HALMAC_CS_ACTIVE_SCAN = 0x01,
- HALMAC_CS_NAN_NONMASTER_DW = 0x02,
- HALMAC_CS_NAN_NONMASTER_NONDW = 0x03,
- HALMAC_CS_NAN_MASTER_NONDW = 0x04,
- HALMAC_CS_NAN_MASTER_DW = 0x05,
-
- HALMAC_CS_ACTION_UNDEFINE = 0x7FFFFFFF,
-};
-
-/* Channel Switch Extra Action ID */
-enum halmac_cs_extra_action_id {
- HALMAC_CS_EXTRA_ACTION_NONE = 0x00,
- HALMAC_CS_EXTRA_UPDATE_PROBE = 0x01,
- HALMAC_CS_EXTRA_UPDATE_BEACON = 0x02,
-
- HALMAC_CS_EXTRA_ACTION_UNDEFINE = 0x7FFFFFFF,
-};
-
-enum halmac_h2c_return_code {
- HALMAC_H2C_RETURN_SUCCESS = 0x00,
- HALMAC_H2C_RETURN_CFG_ERR_LEN = 0x01,
- HALMAC_H2C_RETURN_CFG_ERR_CMD = 0x02,
-
- HALMAC_H2C_RETURN_EFUSE_ERR_DUMP = 0x03,
-
- HALMAC_H2C_RETURN_DATAPACK_ERR_FULL = 0x04, /* DMEM buffer full */
- HALMAC_H2C_RETURN_DATAPACK_ERR_ID = 0x05, /* Invalid pack id */
-
- HALMAC_H2C_RETURN_RUN_ERR_EMPTY =
- 0x06, /* No data in dedicated buffer */
- HALMAC_H2C_RETURN_RUN_ERR_LEN = 0x07,
- HALMAC_H2C_RETURN_RUN_ERR_CMD = 0x08,
- HALMAC_H2C_RETURN_RUN_ERR_ID = 0x09, /* Invalid pack id */
-
- HALMAC_H2C_RETURN_PACKET_ERR_FULL = 0x0A, /* DMEM buffer full */
- HALMAC_H2C_RETURN_PACKET_ERR_ID = 0x0B, /* Invalid packet id */
-
- HALMAC_H2C_RETURN_SCAN_ERR_FULL = 0x0C, /* DMEM buffer full */
- HALMAC_H2C_RETURN_SCAN_ERR_PHYDM = 0x0D, /* PHYDM API return fail */
-
- HALMAC_H2C_RETURN_ORIG_ERR_ID = 0x0E, /* Invalid original H2C cmd id */
-
- HALMAC_H2C_RETURN_UNDEFINE = 0x7FFFFFFF,
-};
-
-enum halmac_scan_report_code {
- HALMAC_SCAN_REPORT_DONE = 0x00,
- HALMAC_SCAN_REPORT_ERR_PHYDM = 0x01, /* PHYDM API return fail */
- HALMAC_SCAN_REPORT_ERR_ID = 0x02, /* Invalid ActionID */
- HALMAC_SCAN_REPORT_ERR_TX = 0x03, /* Tx RsvdPage fail */
-
- HALMAC_SCAN_REPORT_UNDEFINE = 0x7FFFFFFF,
-};
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h b/drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h
deleted file mode 100644
index 45b5f8780baf..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HAL_FWOFFLOADC2HFORMAT_H2C_C2H_NIC_H_
-#define _HAL_FWOFFLOADC2HFORMAT_H2C_C2H_NIC_H_
-#define C2H_SUB_CMD_ID_C2H_DBG 0X00
-#define C2H_SUB_CMD_ID_BT_COEX_INFO 0X02
-#define C2H_SUB_CMD_ID_SCAN_STATUS_RPT 0X03
-#define C2H_SUB_CMD_ID_H2C_ACK_HDR 0X01
-#define C2H_SUB_CMD_ID_CFG_PARAMETER_ACK 0X01
-#define C2H_SUB_CMD_ID_BT_COEX_ACK 0X01
-#define C2H_SUB_CMD_ID_DUMP_PHYSICAL_EFUSE_ACK 0X01
-#define C2H_SUB_CMD_ID_UPDATE_PACKET_ACK 0X01
-#define C2H_SUB_CMD_ID_UPDATE_DATAPACK_ACK 0X01
-#define C2H_SUB_CMD_ID_RUN_DATAPACK_ACK 0X01
-#define C2H_SUB_CMD_ID_CHANNEL_SWITCH_ACK 0X01
-#define C2H_SUB_CMD_ID_IQK_ACK 0X01
-#define C2H_SUB_CMD_ID_POWER_TRACKING_ACK 0X01
-#define C2H_SUB_CMD_ID_PSD_ACK 0X01
-#define C2H_SUB_CMD_ID_PSD_DATA 0X04
-#define C2H_SUB_CMD_ID_EFUSE_DATA 0X05
-#define C2H_SUB_CMD_ID_IQK_DATA 0X06
-#define C2H_SUB_CMD_ID_C2H_PKT_FTM_DBG 0X07
-#define C2H_SUB_CMD_ID_C2H_PKT_FTM_2_DBG 0X08
-#define C2H_SUB_CMD_ID_C2H_PKT_FTM_3_DBG 0X09
-#define C2H_SUB_CMD_ID_C2H_PKT_FTM_4_DBG 0X0A
-#define C2H_SUB_CMD_ID_FTMACKRPT_HDL_DBG 0X0B
-#define C2H_SUB_CMD_ID_FTMC2H_RPT 0X0C
-#define C2H_SUB_CMD_ID_DRVFTMC2H_RPT 0X0D
-#define C2H_SUB_CMD_ID_C2H_PKT_FTM_5_DBG 0X0E
-#define C2H_SUB_CMD_ID_CCX_RPT 0X0F
-#define C2H_SUB_CMD_ID_C2H_PKT_NAN_RPT 0X10
-#define H2C_SUB_CMD_ID_CFG_PARAMETER_ACK SUB_CMD_ID_CFG_PARAMETER
-#define H2C_SUB_CMD_ID_BT_COEX_ACK SUB_CMD_ID_BT_COEX
-#define H2C_SUB_CMD_ID_DUMP_PHYSICAL_EFUSE_ACK SUB_CMD_ID_DUMP_PHYSICAL_EFUSE
-#define H2C_SUB_CMD_ID_UPDATE_PACKET_ACK SUB_CMD_ID_UPDATE_PACKET
-#define H2C_SUB_CMD_ID_UPDATE_DATAPACK_ACK SUB_CMD_ID_UPDATE_DATAPACK
-#define H2C_SUB_CMD_ID_RUN_DATAPACK_ACK SUB_CMD_ID_RUN_DATAPACK
-#define H2C_SUB_CMD_ID_CHANNEL_SWITCH_ACK SUB_CMD_ID_CHANNEL_SWITCH
-#define H2C_SUB_CMD_ID_IQK_ACK SUB_CMD_ID_IQK
-#define H2C_SUB_CMD_ID_POWER_TRACKING_ACK SUB_CMD_ID_POWER_TRACKING
-#define H2C_SUB_CMD_ID_PSD_ACK SUB_CMD_ID_PSD
-#define H2C_SUB_CMD_ID_CCX_RPT SUB_CMD_ID_CCX_RPT
-#define H2C_CMD_ID_CFG_PARAMETER_ACK 0XFF
-#define H2C_CMD_ID_BT_COEX_ACK 0XFF
-#define H2C_CMD_ID_DUMP_PHYSICAL_EFUSE_ACK 0XFF
-#define H2C_CMD_ID_UPDATE_PACKET_ACK 0XFF
-#define H2C_CMD_ID_UPDATE_DATAPACK_ACK 0XFF
-#define H2C_CMD_ID_RUN_DATAPACK_ACK 0XFF
-#define H2C_CMD_ID_CHANNEL_SWITCH_ACK 0XFF
-#define H2C_CMD_ID_IQK_ACK 0XFF
-#define H2C_CMD_ID_POWER_TRACKING_ACK 0XFF
-#define H2C_CMD_ID_PSD_ACK 0XFF
-#define H2C_CMD_ID_CCX_RPT 0XFF
-#define C2H_HDR_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_HDR_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_HDR_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_HDR_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_HDR_GET_C2H_SUB_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 8)
-#define C2H_HDR_SET_C2H_SUB_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 8, __value)
-#define C2H_HDR_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 24, 8)
-#define C2H_HDR_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 24, 8, __value)
-#define C2H_DBG_GET_DBG_MSG(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 8)
-#define C2H_DBG_SET_DBG_MSG(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 8, __value)
-#define BT_COEX_INFO_GET_DATA_START(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 8)
-#define BT_COEX_INFO_SET_DATA_START(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 8, __value)
-#define SCAN_STATUS_RPT_GET_H2C_RETURN_CODE(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 8)
-#define SCAN_STATUS_RPT_SET_H2C_RETURN_CODE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 8, __value)
-#define SCAN_STATUS_RPT_GET_H2C_SEQ(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 16)
-#define SCAN_STATUS_RPT_SET_H2C_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 16, __value)
-#define H2C_ACK_HDR_GET_H2C_RETURN_CODE(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 8)
-#define H2C_ACK_HDR_SET_H2C_RETURN_CODE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 8, __value)
-#define H2C_ACK_HDR_GET_H2C_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define H2C_ACK_HDR_SET_H2C_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define H2C_ACK_HDR_GET_H2C_SUB_CMD_ID(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 16)
-#define H2C_ACK_HDR_SET_H2C_SUB_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 16, __value)
-#define H2C_ACK_HDR_GET_H2C_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X08, 0, 16)
-#define H2C_ACK_HDR_SET_H2C_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 0, 16, __value)
-#define CFG_PARAMETER_ACK_GET_OFFSET_ACCUMULATION(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0XC, 0, 32)
-#define CFG_PARAMETER_ACK_SET_OFFSET_ACCUMULATION(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0XC, 0, 32, __value)
-#define CFG_PARAMETER_ACK_GET_VALUE_ACCUMULATION(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X10, 0, 32)
-#define CFG_PARAMETER_ACK_SET_VALUE_ACCUMULATION(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X10, 0, 32, __value)
-#define BT_COEX_ACK_GET_DATA_START(__c2h) LE_BITS_TO_4BYTE(__c2h + 0XC, 0, 8)
-#define BT_COEX_ACK_SET_DATA_START(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0XC, 0, 8, __value)
-#define PSD_DATA_GET_SEGMENT_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 7)
-#define PSD_DATA_SET_SEGMENT_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 7, __value)
-#define PSD_DATA_GET_END_SEGMENT(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 7, 1)
-#define PSD_DATA_SET_END_SEGMENT(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 7, 1, __value)
-#define PSD_DATA_GET_SEGMENT_SIZE(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define PSD_DATA_SET_SEGMENT_SIZE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define PSD_DATA_GET_TOTAL_SIZE(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 16)
-#define PSD_DATA_SET_TOTAL_SIZE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 16, __value)
-#define PSD_DATA_GET_H2C_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X8, 0, 16)
-#define PSD_DATA_SET_H2C_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X8, 0, 16, __value)
-#define PSD_DATA_GET_DATA_START(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X8, 16, 8)
-#define PSD_DATA_SET_DATA_START(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X8, 16, 8, __value)
-#define EFUSE_DATA_GET_SEGMENT_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 7)
-#define EFUSE_DATA_SET_SEGMENT_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 7, __value)
-#define EFUSE_DATA_GET_END_SEGMENT(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 7, 1)
-#define EFUSE_DATA_SET_END_SEGMENT(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 7, 1, __value)
-#define EFUSE_DATA_GET_SEGMENT_SIZE(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define EFUSE_DATA_SET_SEGMENT_SIZE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define EFUSE_DATA_GET_TOTAL_SIZE(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 16)
-#define EFUSE_DATA_SET_TOTAL_SIZE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 16, __value)
-#define EFUSE_DATA_GET_H2C_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X8, 0, 16)
-#define EFUSE_DATA_SET_H2C_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X8, 0, 16, __value)
-#define EFUSE_DATA_GET_DATA_START(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X8, 16, 8)
-#define EFUSE_DATA_SET_DATA_START(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X8, 16, 8, __value)
-#define IQK_DATA_GET_SEGMENT_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 7)
-#define IQK_DATA_SET_SEGMENT_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 7, __value)
-#define IQK_DATA_GET_END_SEGMENT(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 7, 1)
-#define IQK_DATA_SET_END_SEGMENT(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 7, 1, __value)
-#define IQK_DATA_GET_SEGMENT_SIZE(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define IQK_DATA_SET_SEGMENT_SIZE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define IQK_DATA_GET_TOTAL_SIZE(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 16)
-#define IQK_DATA_SET_TOTAL_SIZE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 16, __value)
-#define IQK_DATA_GET_H2C_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X8, 0, 16)
-#define IQK_DATA_SET_H2C_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X8, 0, 16, __value)
-#define IQK_DATA_GET_DATA_START(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X8, 16, 8)
-#define IQK_DATA_SET_DATA_START(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X8, 16, 8, __value)
-#define CCX_RPT_GET_CCX_RPT(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X4, 0, 129)
-#define CCX_RPT_SET_CCX_RPT(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X4, 0, 129, __value)
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h b/drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h
deleted file mode 100644
index 58e47584cedc..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h
+++ /dev/null
@@ -1,504 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HAL_FWOFFLOADH2CFORMAT_H2C_C2H_NIC_H_
-#define _HAL_FWOFFLOADH2CFORMAT_H2C_C2H_NIC_H_
-#define CMD_ID_FW_OFFLOAD_H2C 0XFF
-#define CMD_ID_CHANNEL_SWITCH 0XFF
-#define CMD_ID_DUMP_PHYSICAL_EFUSE 0XFF
-#define CMD_ID_UPDATE_BEACON_PARSING_INFO 0XFF
-#define CMD_ID_CFG_PARAMETER 0XFF
-#define CMD_ID_UPDATE_DATAPACK 0XFF
-#define CMD_ID_RUN_DATAPACK 0XFF
-#define CMD_ID_DOWNLOAD_FLASH 0XFF
-#define CMD_ID_UPDATE_PACKET 0XFF
-#define CMD_ID_GENERAL_INFO 0XFF
-#define CMD_ID_IQK 0XFF
-#define CMD_ID_POWER_TRACKING 0XFF
-#define CMD_ID_PSD 0XFF
-#define CMD_ID_P2PPS 0XFF
-#define CMD_ID_BT_COEX 0XFF
-#define CMD_ID_NAN_CTRL 0XFF
-#define CMD_ID_NAN_CHANNEL_PLAN_0 0XFF
-#define CMD_ID_NAN_CHANNEL_PLAN_1 0XFF
-#define CATEGORY_H2C_CMD_HEADER 0X00
-#define CATEGORY_FW_OFFLOAD_H2C 0X01
-#define CATEGORY_CHANNEL_SWITCH 0X01
-#define CATEGORY_DUMP_PHYSICAL_EFUSE 0X01
-#define CATEGORY_UPDATE_BEACON_PARSING_INFO 0X01
-#define CATEGORY_CFG_PARAMETER 0X01
-#define CATEGORY_UPDATE_DATAPACK 0X01
-#define CATEGORY_RUN_DATAPACK 0X01
-#define CATEGORY_DOWNLOAD_FLASH 0X01
-#define CATEGORY_UPDATE_PACKET 0X01
-#define CATEGORY_GENERAL_INFO 0X01
-#define CATEGORY_IQK 0X01
-#define CATEGORY_POWER_TRACKING 0X01
-#define CATEGORY_PSD 0X01
-#define CATEGORY_P2PPS 0X01
-#define CATEGORY_BT_COEX 0X01
-#define CATEGORY_NAN_CTRL 0X01
-#define CATEGORY_NAN_CHANNEL_PLAN_0 0X01
-#define CATEGORY_NAN_CHANNEL_PLAN_1 0X01
-#define SUB_CMD_ID_CHANNEL_SWITCH 0X02
-#define SUB_CMD_ID_DUMP_PHYSICAL_EFUSE 0X03
-#define SUB_CMD_ID_UPDATE_BEACON_PARSING_INFO 0X05
-#define SUB_CMD_ID_CFG_PARAMETER 0X08
-#define SUB_CMD_ID_UPDATE_DATAPACK 0X09
-#define SUB_CMD_ID_RUN_DATAPACK 0X0A
-#define SUB_CMD_ID_DOWNLOAD_FLASH 0X0B
-#define SUB_CMD_ID_UPDATE_PACKET 0X0C
-#define SUB_CMD_ID_GENERAL_INFO 0X0D
-#define SUB_CMD_ID_IQK 0X0E
-#define SUB_CMD_ID_POWER_TRACKING 0X0F
-#define SUB_CMD_ID_PSD 0X10
-#define SUB_CMD_ID_P2PPS 0X24
-#define SUB_CMD_ID_BT_COEX 0X60
-#define SUB_CMD_ID_NAN_CTRL 0XB2
-#define SUB_CMD_ID_NAN_CHANNEL_PLAN_0 0XB4
-#define SUB_CMD_ID_NAN_CHANNEL_PLAN_1 0XB5
-#define H2C_CMD_HEADER_GET_CATEGORY(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 7)
-#define H2C_CMD_HEADER_SET_CATEGORY(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 7, __value)
-#define H2C_CMD_HEADER_GET_ACK(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 7, 1)
-#define H2C_CMD_HEADER_SET_ACK(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 7, 1, __value)
-#define H2C_CMD_HEADER_GET_TOTAL_LEN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 16)
-#define H2C_CMD_HEADER_SET_TOTAL_LEN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 16, __value)
-#define H2C_CMD_HEADER_GET_SEQ_NUM(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 16)
-#define H2C_CMD_HEADER_SET_SEQ_NUM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 16, __value)
-#define FW_OFFLOAD_H2C_GET_CATEGORY(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 7)
-#define FW_OFFLOAD_H2C_SET_CATEGORY(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 7, __value)
-#define FW_OFFLOAD_H2C_GET_ACK(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 7, 1)
-#define FW_OFFLOAD_H2C_SET_ACK(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 7, 1, __value)
-#define FW_OFFLOAD_H2C_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define FW_OFFLOAD_H2C_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define FW_OFFLOAD_H2C_GET_SUB_CMD_ID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 16)
-#define FW_OFFLOAD_H2C_SET_SUB_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 16, __value)
-#define FW_OFFLOAD_H2C_GET_TOTAL_LEN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 16)
-#define FW_OFFLOAD_H2C_SET_TOTAL_LEN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 16, __value)
-#define FW_OFFLOAD_H2C_GET_SEQ_NUM(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 16)
-#define FW_OFFLOAD_H2C_SET_SEQ_NUM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 16, __value)
-#define CHANNEL_SWITCH_GET_SWITCH_START(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 1)
-#define CHANNEL_SWITCH_SET_SWITCH_START(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 1, __value)
-#define CHANNEL_SWITCH_GET_DEST_CH_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 1, 1)
-#define CHANNEL_SWITCH_SET_DEST_CH_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 1, 1, __value)
-#define CHANNEL_SWITCH_GET_ABSOLUTE_TIME(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 2, 1)
-#define CHANNEL_SWITCH_SET_ABSOLUTE_TIME(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 2, 1, __value)
-#define CHANNEL_SWITCH_GET_PERIODIC_OPTION(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 3, 2)
-#define CHANNEL_SWITCH_SET_PERIODIC_OPTION(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 3, 2, __value)
-#define CHANNEL_SWITCH_GET_CHANNEL_INFO_LOC(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 8, 8)
-#define CHANNEL_SWITCH_SET_CHANNEL_INFO_LOC(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 8, 8, __value)
-#define CHANNEL_SWITCH_GET_CHANNEL_NUM(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 16, 8)
-#define CHANNEL_SWITCH_SET_CHANNEL_NUM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 16, 8, __value)
-#define CHANNEL_SWITCH_GET_PRI_CH_IDX(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 24, 4)
-#define CHANNEL_SWITCH_SET_PRI_CH_IDX(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 24, 4, __value)
-#define CHANNEL_SWITCH_GET_DEST_BW(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 28, 4)
-#define CHANNEL_SWITCH_SET_DEST_BW(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 28, 4, __value)
-#define CHANNEL_SWITCH_GET_DEST_CH(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X0C, 0, 8)
-#define CHANNEL_SWITCH_SET_DEST_CH(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 0, 8, __value)
-#define CHANNEL_SWITCH_GET_NORMAL_PERIOD(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 8, 8)
-#define CHANNEL_SWITCH_SET_NORMAL_PERIOD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 8, 8, __value)
-#define CHANNEL_SWITCH_GET_SLOW_PERIOD(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 16, 8)
-#define CHANNEL_SWITCH_SET_SLOW_PERIOD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 16, 8, __value)
-#define CHANNEL_SWITCH_GET_NORMAL_CYCLE(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 24, 8)
-#define CHANNEL_SWITCH_SET_NORMAL_CYCLE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 24, 8, __value)
-#define CHANNEL_SWITCH_GET_TSF_HIGH(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X10, 0, 32)
-#define CHANNEL_SWITCH_SET_TSF_HIGH(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 0, 32, __value)
-#define CHANNEL_SWITCH_GET_TSF_LOW(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X14, 0, 32)
-#define CHANNEL_SWITCH_SET_TSF_LOW(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 0, 32, __value)
-#define CHANNEL_SWITCH_GET_CHANNEL_INFO_SIZE(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 0, 16)
-#define CHANNEL_SWITCH_SET_CHANNEL_INFO_SIZE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 0, 16, __value)
-#define UPDATE_BEACON_PARSING_INFO_GET_FUNC_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 1)
-#define UPDATE_BEACON_PARSING_INFO_SET_FUNC_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 1, __value)
-#define UPDATE_BEACON_PARSING_INFO_GET_SIZE_TH(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 8, 4)
-#define UPDATE_BEACON_PARSING_INFO_SET_SIZE_TH(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 8, 4, __value)
-#define UPDATE_BEACON_PARSING_INFO_GET_TIMEOUT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 12, 4)
-#define UPDATE_BEACON_PARSING_INFO_SET_TIMEOUT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 12, 4, __value)
-#define UPDATE_BEACON_PARSING_INFO_GET_IE_ID_BMP_0(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 0, 32)
-#define UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_0(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 0, 32, __value)
-#define UPDATE_BEACON_PARSING_INFO_GET_IE_ID_BMP_1(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X10, 0, 32)
-#define UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 0, 32, __value)
-#define UPDATE_BEACON_PARSING_INFO_GET_IE_ID_BMP_2(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X14, 0, 32)
-#define UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 0, 32, __value)
-#define UPDATE_BEACON_PARSING_INFO_GET_IE_ID_BMP_3(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 0, 32)
-#define UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_3(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 0, 32, __value)
-#define UPDATE_BEACON_PARSING_INFO_GET_IE_ID_BMP_4(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X1C, 0, 32)
-#define UPDATE_BEACON_PARSING_INFO_SET_IE_ID_BMP_4(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X1C, 0, 32, __value)
-#define CFG_PARAMETER_GET_NUM(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 16)
-#define CFG_PARAMETER_SET_NUM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 16, __value)
-#define CFG_PARAMETER_GET_INIT_CASE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 16, 1)
-#define CFG_PARAMETER_SET_INIT_CASE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 16, 1, __value)
-#define CFG_PARAMETER_GET_PHY_PARAMETER_LOC(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 24, 8)
-#define CFG_PARAMETER_SET_PHY_PARAMETER_LOC(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 24, 8, __value)
-#define UPDATE_DATAPACK_GET_SIZE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 16)
-#define UPDATE_DATAPACK_SET_SIZE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 16, __value)
-#define UPDATE_DATAPACK_GET_DATAPACK_ID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 16, 8)
-#define UPDATE_DATAPACK_SET_DATAPACK_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 16, 8, __value)
-#define UPDATE_DATAPACK_GET_DATAPACK_LOC(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 24, 8)
-#define UPDATE_DATAPACK_SET_DATAPACK_LOC(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 24, 8, __value)
-#define UPDATE_DATAPACK_GET_DATAPACK_SEGMENT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 0, 8)
-#define UPDATE_DATAPACK_SET_DATAPACK_SEGMENT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 0, 8, __value)
-#define UPDATE_DATAPACK_GET_END_SEGMENT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 8, 1)
-#define UPDATE_DATAPACK_SET_END_SEGMENT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 8, 1, __value)
-#define RUN_DATAPACK_GET_DATAPACK_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 8)
-#define RUN_DATAPACK_SET_DATAPACK_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 8, __value)
-#define DOWNLOAD_FLASH_GET_SPI_CMD(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 8)
-#define DOWNLOAD_FLASH_SET_SPI_CMD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 8, __value)
-#define DOWNLOAD_FLASH_GET_LOCATION(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 8, 16)
-#define DOWNLOAD_FLASH_SET_LOCATION(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 8, 16, __value)
-#define DOWNLOAD_FLASH_GET_SIZE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X0C, 0, 32)
-#define DOWNLOAD_FLASH_SET_SIZE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 0, 32, __value)
-#define DOWNLOAD_FLASH_GET_START_ADDR(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X10, 0, 32)
-#define DOWNLOAD_FLASH_SET_START_ADDR(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 0, 32, __value)
-#define UPDATE_PACKET_GET_SIZE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 16)
-#define UPDATE_PACKET_SET_SIZE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 16, __value)
-#define UPDATE_PACKET_GET_PACKET_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 16, 8)
-#define UPDATE_PACKET_SET_PACKET_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 16, 8, __value)
-#define UPDATE_PACKET_GET_PACKET_LOC(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 24, 8)
-#define UPDATE_PACKET_SET_PACKET_LOC(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 24, 8, __value)
-#define GENERAL_INFO_GET_REF_TYPE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 8)
-#define GENERAL_INFO_SET_REF_TYPE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 8, __value)
-#define GENERAL_INFO_GET_RF_TYPE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 8, 9)
-#define GENERAL_INFO_SET_RF_TYPE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 8, 9, __value)
-#define GENERAL_INFO_GET_FW_TX_BOUNDARY(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 16, 8)
-#define GENERAL_INFO_SET_FW_TX_BOUNDARY(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 16, 8, __value)
-#define IQK_GET_CLEAR(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 1)
-#define IQK_SET_CLEAR(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 1, __value)
-#define IQK_GET_SEGMENT_IQK(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 1, 1)
-#define IQK_SET_SEGMENT_IQK(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 1, 1, __value)
-#define POWER_TRACKING_GET_ENABLE_A(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 1)
-#define POWER_TRACKING_SET_ENABLE_A(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 1, __value)
-#define POWER_TRACKING_GET_ENABLE_B(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 1, 1)
-#define POWER_TRACKING_SET_ENABLE_B(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 1, 1, __value)
-#define POWER_TRACKING_GET_ENABLE_C(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 2, 1)
-#define POWER_TRACKING_SET_ENABLE_C(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 2, 1, __value)
-#define POWER_TRACKING_GET_ENABLE_D(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 3, 1)
-#define POWER_TRACKING_SET_ENABLE_D(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 3, 1, __value)
-#define POWER_TRACKING_GET_TYPE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 4, 3)
-#define POWER_TRACKING_SET_TYPE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 4, 3, __value)
-#define POWER_TRACKING_GET_BBSWING_INDEX(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 8, 8)
-#define POWER_TRACKING_SET_BBSWING_INDEX(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 8, 8, __value)
-#define POWER_TRACKING_GET_TX_PWR_INDEX_A(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 0, 8)
-#define POWER_TRACKING_SET_TX_PWR_INDEX_A(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 0, 8, __value)
-#define POWER_TRACKING_GET_PWR_TRACKING_OFFSET_VALUE_A(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 8, 8)
-#define POWER_TRACKING_SET_PWR_TRACKING_OFFSET_VALUE_A(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 8, 8, __value)
-#define POWER_TRACKING_GET_TSSI_VALUE_A(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 16, 8)
-#define POWER_TRACKING_SET_TSSI_VALUE_A(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 16, 8, __value)
-#define POWER_TRACKING_GET_TX_PWR_INDEX_B(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X10, 0, 8)
-#define POWER_TRACKING_SET_TX_PWR_INDEX_B(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 0, 8, __value)
-#define POWER_TRACKING_GET_PWR_TRACKING_OFFSET_VALUE_B(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X10, 8, 8)
-#define POWER_TRACKING_SET_PWR_TRACKING_OFFSET_VALUE_B(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 8, 8, __value)
-#define POWER_TRACKING_GET_TSSI_VALUE_B(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X10, 16, 8)
-#define POWER_TRACKING_SET_TSSI_VALUE_B(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 16, 8, __value)
-#define POWER_TRACKING_GET_TX_PWR_INDEX_C(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X14, 0, 8)
-#define POWER_TRACKING_SET_TX_PWR_INDEX_C(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 0, 8, __value)
-#define POWER_TRACKING_GET_PWR_TRACKING_OFFSET_VALUE_C(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X14, 8, 8)
-#define POWER_TRACKING_SET_PWR_TRACKING_OFFSET_VALUE_C(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 8, 8, __value)
-#define POWER_TRACKING_GET_TSSI_VALUE_C(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X14, 16, 8)
-#define POWER_TRACKING_SET_TSSI_VALUE_C(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 16, 8, __value)
-#define POWER_TRACKING_GET_TX_PWR_INDEX_D(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 0, 8)
-#define POWER_TRACKING_SET_TX_PWR_INDEX_D(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 0, 8, __value)
-#define POWER_TRACKING_GET_PWR_TRACKING_OFFSET_VALUE_D(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 8, 8)
-#define POWER_TRACKING_SET_PWR_TRACKING_OFFSET_VALUE_D(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 8, 8, __value)
-#define POWER_TRACKING_GET_TSSI_VALUE_D(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 16, 8)
-#define POWER_TRACKING_SET_TSSI_VALUE_D(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 16, 8, __value)
-#define PSD_GET_START_PSD(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 16)
-#define PSD_SET_START_PSD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 16, __value)
-#define PSD_GET_END_PSD(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 16, 16)
-#define PSD_SET_END_PSD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 16, 16, __value)
-#define P2PPS_GET_OFFLOAD_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 1)
-#define P2PPS_SET_OFFLOAD_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 1, __value)
-#define P2PPS_GET_ROLE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 1, 1)
-#define P2PPS_SET_ROLE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 1, 1, __value)
-#define P2PPS_GET_CTWINDOW_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 2, 1)
-#define P2PPS_SET_CTWINDOW_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 2, 1, __value)
-#define P2PPS_GET_NOA_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 3, 1)
-#define P2PPS_SET_NOA_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 3, 1, __value)
-#define P2PPS_GET_NOA_SEL(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 4, 1)
-#define P2PPS_SET_NOA_SEL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 4, 1, __value)
-#define P2PPS_GET_ALLSTASLEEP(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 5, 1)
-#define P2PPS_SET_ALLSTASLEEP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 5, 1, __value)
-#define P2PPS_GET_DISCOVERY(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 6, 1)
-#define P2PPS_SET_DISCOVERY(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 6, 1, __value)
-#define P2PPS_GET_P2P_PORT_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 8, 8)
-#define P2PPS_SET_P2P_PORT_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 8, 8, __value)
-#define P2PPS_GET_P2P_GROUP(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 16, 8)
-#define P2PPS_SET_P2P_GROUP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 16, 8, __value)
-#define P2PPS_GET_P2P_MACID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 24, 8)
-#define P2PPS_SET_P2P_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 24, 8, __value)
-#define P2PPS_GET_CTWINDOW_LENGTH(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X0C, 0, 8)
-#define P2PPS_SET_CTWINDOW_LENGTH(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 0, 8, __value)
-#define P2PPS_GET_NOA_DURATION_PARA(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X10, 0, 32)
-#define P2PPS_SET_NOA_DURATION_PARA(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 0, 32, __value)
-#define P2PPS_GET_NOA_INTERVAL_PARA(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X14, 0, 32)
-#define P2PPS_SET_NOA_INTERVAL_PARA(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 0, 32, __value)
-#define P2PPS_GET_NOA_START_TIME_PARA(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 0, 32)
-#define P2PPS_SET_NOA_START_TIME_PARA(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 0, 32, __value)
-#define P2PPS_GET_NOA_COUNT_PARA(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X1C, 0, 32)
-#define P2PPS_SET_NOA_COUNT_PARA(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X1C, 0, 32, __value)
-#define BT_COEX_GET_DATA_START(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 8)
-#define BT_COEX_SET_DATA_START(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 8, __value)
-#define NAN_CTRL_GET_NAN_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 2)
-#define NAN_CTRL_SET_NAN_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 2, __value)
-#define NAN_CTRL_GET_SUPPORT_BAND(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 8, 2)
-#define NAN_CTRL_SET_SUPPORT_BAND(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 8, 2, __value)
-#define NAN_CTRL_GET_DISABLE_2G_DISC_BCN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 10, 1)
-#define NAN_CTRL_SET_DISABLE_2G_DISC_BCN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 10, 1, __value)
-#define NAN_CTRL_GET_DISABLE_5G_DISC_BCN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 11, 1)
-#define NAN_CTRL_SET_DISABLE_5G_DISC_BCN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 11, 1, __value)
-#define NAN_CTRL_GET_BCN_RSVD_PAGE_OFFSET(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 16, 8)
-#define NAN_CTRL_SET_BCN_RSVD_PAGE_OFFSET(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 16, 8, __value)
-#define NAN_CTRL_GET_CHANNEL_2G(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X08, 24, 8)
-#define NAN_CTRL_SET_CHANNEL_2G(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 24, 8, __value)
-#define NAN_CTRL_GET_CHANNEL_5G(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X0C, 0, 8)
-#define NAN_CTRL_SET_CHANNEL_5G(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 0, 8, __value)
-#define NAN_CHANNEL_PLAN_0_GET_CHANNEL_NUMBER_0(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 8)
-#define NAN_CHANNEL_PLAN_0_SET_CHANNEL_NUMBER_0(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 8, __value)
-#define NAN_CHANNEL_PLAN_0_GET_UNPAUSE_MACID_0(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 8, 8)
-#define NAN_CHANNEL_PLAN_0_SET_UNPAUSE_MACID_0(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 8, 8, __value)
-#define NAN_CHANNEL_PLAN_0_GET_START_TIME_SLOT_0(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 0, 16)
-#define NAN_CHANNEL_PLAN_0_SET_START_TIME_SLOT_0(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 0, 16, __value)
-#define NAN_CHANNEL_PLAN_0_GET_DURATION_0(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 16, 16)
-#define NAN_CHANNEL_PLAN_0_SET_DURATION_0(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 16, 16, __value)
-#define NAN_CHANNEL_PLAN_0_GET_CHANNEL_NUMBER_1(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X10, 0, 8)
-#define NAN_CHANNEL_PLAN_0_SET_CHANNEL_NUMBER_1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 0, 8, __value)
-#define NAN_CHANNEL_PLAN_0_GET_UNPAUSE_MACID_1(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X10, 8, 8)
-#define NAN_CHANNEL_PLAN_0_SET_UNPAUSE_MACID_1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 8, 8, __value)
-#define NAN_CHANNEL_PLAN_0_GET_START_TIME_SLOT_1(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X14, 0, 16)
-#define NAN_CHANNEL_PLAN_0_SET_START_TIME_SLOT_1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 0, 16, __value)
-#define NAN_CHANNEL_PLAN_0_GET_DURATION_1(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X14, 16, 16)
-#define NAN_CHANNEL_PLAN_0_SET_DURATION_1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 16, 16, __value)
-#define NAN_CHANNEL_PLAN_0_GET_CHANNEL_NUMBER_2(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 0, 8)
-#define NAN_CHANNEL_PLAN_0_SET_CHANNEL_NUMBER_2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 0, 8, __value)
-#define NAN_CHANNEL_PLAN_0_GET_UNPAUSE_MACID_2(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 8, 8)
-#define NAN_CHANNEL_PLAN_0_SET_UNPAUSE_MACID_2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 8, 8, __value)
-#define NAN_CHANNEL_PLAN_0_GET_START_TIME_SLOT_2(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X1C, 0, 16)
-#define NAN_CHANNEL_PLAN_0_SET_START_TIME_SLOT_2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X1C, 0, 16, __value)
-#define NAN_CHANNEL_PLAN_0_GET_DURATION_2(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X1C, 16, 16)
-#define NAN_CHANNEL_PLAN_0_SET_DURATION_2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X1C, 16, 16, __value)
-#define NAN_CHANNEL_PLAN_1_GET_CHANNEL_NUMBER_3(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 0, 8)
-#define NAN_CHANNEL_PLAN_1_SET_CHANNEL_NUMBER_3(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 0, 8, __value)
-#define NAN_CHANNEL_PLAN_1_GET_UNPAUSE_MACID_3(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X08, 8, 8)
-#define NAN_CHANNEL_PLAN_1_SET_UNPAUSE_MACID_3(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X08, 8, 8, __value)
-#define NAN_CHANNEL_PLAN_1_GET_START_TIME_SLOT_3(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 0, 16)
-#define NAN_CHANNEL_PLAN_1_SET_START_TIME_SLOT_3(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 0, 16, __value)
-#define NAN_CHANNEL_PLAN_1_GET_DURATION_3(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X0C, 16, 16)
-#define NAN_CHANNEL_PLAN_1_SET_DURATION_3(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X0C, 16, 16, __value)
-#define NAN_CHANNEL_PLAN_1_GET_CHANNEL_NUMBER_4(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X10, 0, 8)
-#define NAN_CHANNEL_PLAN_1_SET_CHANNEL_NUMBER_4(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 0, 8, __value)
-#define NAN_CHANNEL_PLAN_1_GET_UNPAUSE_MACID_4(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X10, 8, 8)
-#define NAN_CHANNEL_PLAN_1_SET_UNPAUSE_MACID_4(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X10, 8, 8, __value)
-#define NAN_CHANNEL_PLAN_1_GET_START_TIME_SLOT_4(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X14, 0, 16)
-#define NAN_CHANNEL_PLAN_1_SET_START_TIME_SLOT_4(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 0, 16, __value)
-#define NAN_CHANNEL_PLAN_1_GET_DURATION_4(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X14, 16, 16)
-#define NAN_CHANNEL_PLAN_1_SET_DURATION_4(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X14, 16, 16, __value)
-#define NAN_CHANNEL_PLAN_1_GET_CHANNEL_NUMBER_5(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 0, 8)
-#define NAN_CHANNEL_PLAN_1_SET_CHANNEL_NUMBER_5(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 0, 8, __value)
-#define NAN_CHANNEL_PLAN_1_GET_UNPAUSE_MACID_5(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X18, 8, 8)
-#define NAN_CHANNEL_PLAN_1_SET_UNPAUSE_MACID_5(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X18, 8, 8, __value)
-#define NAN_CHANNEL_PLAN_1_GET_START_TIME_SLOT_5(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X1C, 0, 16)
-#define NAN_CHANNEL_PLAN_1_SET_START_TIME_SLOT_5(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X1C, 0, 16, __value)
-#define NAN_CHANNEL_PLAN_1_GET_DURATION_5(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X1C, 16, 16)
-#define NAN_CHANNEL_PLAN_1_SET_DURATION_5(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X1C, 16, 16, __value)
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h b/drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h
deleted file mode 100644
index 04cee38c33f9..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HAL_H2CEXTRAINFO_H2C_C2H_NIC_H_
-#define _HAL_H2CEXTRAINFO_H2C_C2H_NIC_H_
-#define PHY_PARAMETER_INFO_GET_LENGTH(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 0, 8)
-#define PHY_PARAMETER_INFO_SET_LENGTH(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 0, 8, __value)
-#define PHY_PARAMETER_INFO_GET_IO_CMD(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 8, 7)
-#define PHY_PARAMETER_INFO_SET_IO_CMD(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 8, 7, __value)
-#define PHY_PARAMETER_INFO_GET_MSK_EN(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 15, 1)
-#define PHY_PARAMETER_INFO_SET_MSK_EN(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 15, 1, __value)
-#define PHY_PARAMETER_INFO_GET_LLT_PG_BNDY(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 16, 8)
-#define PHY_PARAMETER_INFO_SET_LLT_PG_BNDY(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 16, 8, __value)
-#define PHY_PARAMETER_INFO_GET_EFUSE_RSVDPAGE_LOC(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 16, 8)
-#define PHY_PARAMETER_INFO_SET_EFUSE_RSVDPAGE_LOC(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 16, 8, __value)
-#define PHY_PARAMETER_INFO_GET_EFUSE_PATCH_EN(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 16, 8)
-#define PHY_PARAMETER_INFO_SET_EFUSE_PATCH_EN(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 16, 8, __value)
-#define PHY_PARAMETER_INFO_GET_RF_ADDR(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 16, 8)
-#define PHY_PARAMETER_INFO_SET_RF_ADDR(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 16, 8, __value)
-#define PHY_PARAMETER_INFO_GET_IO_ADDR(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 16, 16)
-#define PHY_PARAMETER_INFO_SET_IO_ADDR(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 16, 16, __value)
-#define PHY_PARAMETER_INFO_GET_DELAY_VALUE(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 16, 16)
-#define PHY_PARAMETER_INFO_SET_DELAY_VALUE(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 16, 16, __value)
-#define PHY_PARAMETER_INFO_GET_RF_PATH(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 24, 8)
-#define PHY_PARAMETER_INFO_SET_RF_PATH(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 24, 8, __value)
-#define PHY_PARAMETER_INFO_GET_DATA(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X04, 0, 32)
-#define PHY_PARAMETER_INFO_SET_DATA(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X04, 0, 32, __value)
-#define PHY_PARAMETER_INFO_GET_MASK(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X08, 0, 32)
-#define PHY_PARAMETER_INFO_SET_MASK(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X08, 0, 32, __value)
-#define CHANNEL_INFO_GET_CHANNEL(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 0, 8)
-#define CHANNEL_INFO_SET_CHANNEL(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 0, 8, __value)
-#define CHANNEL_INFO_GET_PRI_CH_IDX(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 8, 4)
-#define CHANNEL_INFO_SET_PRI_CH_IDX(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 8, 4, __value)
-#define CHANNEL_INFO_GET_BANDWIDTH(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 12, 4)
-#define CHANNEL_INFO_SET_BANDWIDTH(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 12, 4, __value)
-#define CHANNEL_INFO_GET_TIMEOUT(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 16, 8)
-#define CHANNEL_INFO_SET_TIMEOUT(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 16, 8, __value)
-#define CHANNEL_INFO_GET_ACTION_ID(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 24, 7)
-#define CHANNEL_INFO_SET_ACTION_ID(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 24, 7, __value)
-#define CHANNEL_INFO_GET_CH_EXTRA_INFO(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 31, 1)
-#define CHANNEL_INFO_SET_CH_EXTRA_INFO(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 31, 1, __value)
-#define CH_EXTRA_INFO_GET_CH_EXTRA_INFO_ID(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 0, 7)
-#define CH_EXTRA_INFO_SET_CH_EXTRA_INFO_ID(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 0, 7, __value)
-#define CH_EXTRA_INFO_GET_CH_EXTRA_INFO(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 7, 1)
-#define CH_EXTRA_INFO_SET_CH_EXTRA_INFO(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 7, 1, __value)
-#define CH_EXTRA_INFO_GET_CH_EXTRA_INFO_SIZE(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 8, 8)
-#define CH_EXTRA_INFO_SET_CH_EXTRA_INFO_SIZE(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 8, 8, __value)
-#define CH_EXTRA_INFO_GET_CH_EXTRA_INFO_DATA(__extra_info) \
- LE_BITS_TO_4BYTE(__extra_info + 0X00, 16, 1)
-#define CH_EXTRA_INFO_SET_CH_EXTRA_INFO_DATA(__extra_info, __value) \
- SET_BITS_TO_LE_4BYTE(__extra_info + 0X00, 16, 1, __value)
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h b/drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h
deleted file mode 100644
index bc9fb8e8bf5a..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef HALMAC_INTF_PHY_CMD
-#define HALMAC_INTF_PHY_CMD
-
-/* Cut mask */
-enum halmac_intf_phy_cut {
- HALMAC_INTF_PHY_CUT_TESTCHIP = BIT(0),
- HALMAC_INTF_PHY_CUT_A = BIT(1),
- HALMAC_INTF_PHY_CUT_B = BIT(2),
- HALMAC_INTF_PHY_CUT_C = BIT(3),
- HALMAC_INTF_PHY_CUT_D = BIT(4),
- HALMAC_INTF_PHY_CUT_E = BIT(5),
- HALMAC_INTF_PHY_CUT_F = BIT(6),
- HALMAC_INTF_PHY_CUT_G = BIT(7),
- HALMAC_INTF_PHY_CUT_ALL = 0x7FFF,
-};
-
-/* IP selection */
-enum halmac_ip_sel {
- HALMAC_IP_SEL_INTF_PHY = 0,
- HALMAC_IP_SEL_MAC = 1,
- HALMAC_IP_SEL_PCIE_DBI = 2,
- HALMAC_IP_SEL_UNDEFINE = 0x7FFF,
-};
-
-/* Platform mask */
-enum halmac_intf_phy_platform {
- HALMAC_INTF_PHY_PLATFORM_ALL = 0x7FFF,
-};
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h b/drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h
deleted file mode 100644
index f58077539e33..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h
+++ /dev/null
@@ -1,392 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HAL_ORIGINALC2HFORMAT_H2C_C2H_NIC_H_
-#define _HAL_ORIGINALC2HFORMAT_H2C_C2H_NIC_H_
-#define CMD_ID_C2H 0X00
-#define CMD_ID_DBG 0X00
-#define CMD_ID_C2H_LB 0X01
-#define CMD_ID_C2H_SND_TXBF 0X02
-#define CMD_ID_C2H_CCX_RPT 0X03
-#define CMD_ID_C2H_AP_REQ_TXRPT 0X04
-#define CMD_ID_C2H_INITIAL_RATE_COLLECTION 0X05
-#define CMD_ID_C2H_RA_RPT 0X0C
-#define CMD_ID_C2H_SPECIAL_STATISTICS 0X0D
-#define CMD_ID_C2H_RA_PARA_RPT 0X0E
-#define CMD_ID_C2H_CUR_CHANNEL 0X10
-#define CMD_ID_C2H_GPIO_WAKEUP 0X14
-#define C2H_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define DBG_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define DBG_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define DBG_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define DBG_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define DBG_GET_DBG_STR1(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 8)
-#define DBG_SET_DBG_STR1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 8, __value)
-#define DBG_GET_DBG_STR2(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 24, 8)
-#define DBG_SET_DBG_STR2(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 24, 8, __value)
-#define DBG_GET_DBG_STR3(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 8)
-#define DBG_SET_DBG_STR3(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 8, __value)
-#define DBG_GET_DBG_STR4(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define DBG_SET_DBG_STR4(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define DBG_GET_DBG_STR5(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 8)
-#define DBG_SET_DBG_STR5(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 8, __value)
-#define DBG_GET_DBG_STR6(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 24, 8)
-#define DBG_SET_DBG_STR6(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 24, 8, __value)
-#define DBG_GET_DBG_STR7(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X08, 0, 8)
-#define DBG_SET_DBG_STR7(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 0, 8, __value)
-#define DBG_GET_DBG_STR8(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X08, 8, 8)
-#define DBG_SET_DBG_STR8(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 8, 8, __value)
-#define DBG_GET_DBG_STR9(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X08, 16, 8)
-#define DBG_SET_DBG_STR9(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 16, 8, __value)
-#define DBG_GET_DBG_STR10(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X08, 24, 8)
-#define DBG_SET_DBG_STR10(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 24, 8, __value)
-#define DBG_GET_DBG_STR11(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 0, 8)
-#define DBG_SET_DBG_STR11(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 0, 8, __value)
-#define DBG_GET_DBG_STR12(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 8, 8)
-#define DBG_SET_DBG_STR12(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 8, 8, __value)
-#define DBG_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define DBG_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define DBG_GET_TRIGGER(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define DBG_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_LB_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_LB_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_LB_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_LB_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_LB_GET_PAYLOAD1(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 16)
-#define C2H_LB_SET_PAYLOAD1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 16, __value)
-#define C2H_LB_GET_PAYLOAD2(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 32)
-#define C2H_LB_SET_PAYLOAD2(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 32, __value)
-#define C2H_LB_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_LB_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_LB_GET_TRIGGER(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_LB_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_SND_TXBF_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_SND_TXBF_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_SND_TXBF_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_SND_TXBF_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_SND_TXBF_GET_SND_RESULT(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 1)
-#define C2H_SND_TXBF_SET_SND_RESULT(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 1, __value)
-#define C2H_SND_TXBF_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_SND_TXBF_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_SND_TXBF_GET_TRIGGER(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_SND_TXBF_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_CCX_RPT_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_CCX_RPT_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_CCX_RPT_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_CCX_RPT_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_CCX_RPT_GET_QSEL(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 5)
-#define C2H_CCX_RPT_SET_QSEL(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 5, __value)
-#define C2H_CCX_RPT_GET_BMC(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 21, 1)
-#define C2H_CCX_RPT_SET_BMC(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 21, 1, __value)
-#define C2H_CCX_RPT_GET_LIFE_TIME_OVER(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 22, 1)
-#define C2H_CCX_RPT_SET_LIFE_TIME_OVER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 22, 1, __value)
-#define C2H_CCX_RPT_GET_RETRY_OVER(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 23, 1)
-#define C2H_CCX_RPT_SET_RETRY_OVER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 23, 1, __value)
-#define C2H_CCX_RPT_GET_MACID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 24, 8)
-#define C2H_CCX_RPT_SET_MACID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 24, 8, __value)
-#define C2H_CCX_RPT_GET_DATA_RETRY_CNT(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 6)
-#define C2H_CCX_RPT_SET_DATA_RETRY_CNT(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 6, __value)
-#define C2H_CCX_RPT_GET_QUEUE7_0(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define C2H_CCX_RPT_SET_QUEUE7_0(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define C2H_CCX_RPT_GET_QUEUE15_8(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 8)
-#define C2H_CCX_RPT_SET_QUEUE15_8(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 8, __value)
-#define C2H_CCX_RPT_GET_FINAL_DATA_RATE(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 24, 8)
-#define C2H_CCX_RPT_SET_FINAL_DATA_RATE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 24, 8, __value)
-#define C2H_CCX_RPT_GET_SW_DEFINE_0(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X08, 0, 8)
-#define C2H_CCX_RPT_SET_SW_DEFINE_0(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 0, 8, __value)
-#define C2H_CCX_RPT_GET_SW_DEFINE_1(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X08, 8, 4)
-#define C2H_CCX_RPT_SET_SW_DEFINE_1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 8, 4, __value)
-#define C2H_CCX_RPT_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_CCX_RPT_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_CCX_RPT_GET_TRIGGER(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_CCX_RPT_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_AP_REQ_TXRPT_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_AP_REQ_TXRPT_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_STA1_MACID(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 8)
-#define C2H_AP_REQ_TXRPT_SET_STA1_MACID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_TX_OK1_0(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 24, 8)
-#define C2H_AP_REQ_TXRPT_SET_TX_OK1_0(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 24, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_TX_OK1_1(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 8)
-#define C2H_AP_REQ_TXRPT_SET_TX_OK1_1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_TX_FAIL1_0(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define C2H_AP_REQ_TXRPT_SET_TX_FAIL1_0(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_TX_FAIL1_1(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 8)
-#define C2H_AP_REQ_TXRPT_SET_TX_FAIL1_1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_INITIAL_RATE1(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 24, 8)
-#define C2H_AP_REQ_TXRPT_SET_INITIAL_RATE1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 24, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_STA2_MACID(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X08, 0, 8)
-#define C2H_AP_REQ_TXRPT_SET_STA2_MACID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 0, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_TX_OK2_0(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X08, 8, 8)
-#define C2H_AP_REQ_TXRPT_SET_TX_OK2_0(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 8, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_TX_OK2_1(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X08, 16, 8)
-#define C2H_AP_REQ_TXRPT_SET_TX_OK2_1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 16, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_TX_FAIL2_0(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X08, 24, 8)
-#define C2H_AP_REQ_TXRPT_SET_TX_FAIL2_0(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 24, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_TX_FAIL2_1(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X0C, 0, 8)
-#define C2H_AP_REQ_TXRPT_SET_TX_FAIL2_1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 0, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_INITIAL_RATE2(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X0C, 8, 8)
-#define C2H_AP_REQ_TXRPT_SET_INITIAL_RATE2(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 8, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_AP_REQ_TXRPT_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_AP_REQ_TXRPT_GET_TRIGGER(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_AP_REQ_TXRPT_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_CMD_ID(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_SEQ(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_TRYING_BITMAP(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 7)
-#define C2H_INITIAL_RATE_COLLECTION_SET_TRYING_BITMAP(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 7, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_INITIAL_RATE1(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 24, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_INITIAL_RATE1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 24, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_INITIAL_RATE2(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_INITIAL_RATE2(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_INITIAL_RATE3(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_INITIAL_RATE3(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_INITIAL_RATE4(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_INITIAL_RATE4(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_INITIAL_RATE5(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 24, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_INITIAL_RATE5(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 24, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_INITIAL_RATE6(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X08, 0, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_INITIAL_RATE6(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 0, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_INITIAL_RATE7(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X08, 8, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_INITIAL_RATE7(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 8, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_LEN(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_INITIAL_RATE_COLLECTION_GET_TRIGGER(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_INITIAL_RATE_COLLECTION_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_RA_RPT_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_RA_RPT_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_RA_RPT_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_RA_RPT_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_RA_RPT_GET_RATE(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 8)
-#define C2H_RA_RPT_SET_RATE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 8, __value)
-#define C2H_RA_RPT_GET_MACID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 24, 8)
-#define C2H_RA_RPT_SET_MACID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 24, 8, __value)
-#define C2H_RA_RPT_GET_USE_LDPC(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 1)
-#define C2H_RA_RPT_SET_USE_LDPC(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 1, __value)
-#define C2H_RA_RPT_GET_USE_TXBF(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X04, 1, 1)
-#define C2H_RA_RPT_SET_USE_TXBF(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 1, 1, __value)
-#define C2H_RA_RPT_GET_COLLISION_STATE(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define C2H_RA_RPT_SET_COLLISION_STATE(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define C2H_RA_RPT_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_RA_RPT_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_RA_RPT_GET_TRIGGER(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_RA_RPT_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_CMD_ID(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_SPECIAL_STATISTICS_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_SEQ(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_SPECIAL_STATISTICS_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_STATISTICS_IDX(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 8)
-#define C2H_SPECIAL_STATISTICS_SET_STATISTICS_IDX(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_DATA0(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 24, 8)
-#define C2H_SPECIAL_STATISTICS_SET_DATA0(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 24, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_DATA1(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 0, 8)
-#define C2H_SPECIAL_STATISTICS_SET_DATA1(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 0, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_DATA2(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 8, 8)
-#define C2H_SPECIAL_STATISTICS_SET_DATA2(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 8, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_DATA3(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 16, 8)
-#define C2H_SPECIAL_STATISTICS_SET_DATA3(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 16, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_DATA4(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X04, 24, 8)
-#define C2H_SPECIAL_STATISTICS_SET_DATA4(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X04, 24, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_DATA5(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X08, 0, 8)
-#define C2H_SPECIAL_STATISTICS_SET_DATA5(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 0, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_DATA6(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X08, 8, 8)
-#define C2H_SPECIAL_STATISTICS_SET_DATA6(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 8, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_DATA7(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X08, 16, 8)
-#define C2H_SPECIAL_STATISTICS_SET_DATA7(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X08, 16, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_LEN(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_SPECIAL_STATISTICS_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_SPECIAL_STATISTICS_GET_TRIGGER(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_SPECIAL_STATISTICS_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_RA_PARA_RPT_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_RA_PARA_RPT_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_RA_PARA_RPT_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_RA_PARA_RPT_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_RA_PARA_RPT_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_RA_PARA_RPT_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_RA_PARA_RPT_GET_TRIGGER(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_RA_PARA_RPT_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_CUR_CHANNEL_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_CUR_CHANNEL_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_CUR_CHANNEL_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_CUR_CHANNEL_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_CUR_CHANNEL_GET_CHANNEL_NUM(__c2h) \
- LE_BITS_TO_4BYTE(__c2h + 0X00, 16, 8)
-#define C2H_CUR_CHANNEL_SET_CHANNEL_NUM(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 16, 8, __value)
-#define C2H_CUR_CHANNEL_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_CUR_CHANNEL_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_CUR_CHANNEL_GET_TRIGGER(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_CUR_CHANNEL_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#define C2H_GPIO_WAKEUP_GET_CMD_ID(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 0, 8)
-#define C2H_GPIO_WAKEUP_SET_CMD_ID(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 0, 8, __value)
-#define C2H_GPIO_WAKEUP_GET_SEQ(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X00, 8, 8)
-#define C2H_GPIO_WAKEUP_SET_SEQ(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X00, 8, 8, __value)
-#define C2H_GPIO_WAKEUP_GET_LEN(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 16, 8)
-#define C2H_GPIO_WAKEUP_SET_LEN(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 16, 8, __value)
-#define C2H_GPIO_WAKEUP_GET_TRIGGER(__c2h) LE_BITS_TO_4BYTE(__c2h + 0X0C, 24, 8)
-#define C2H_GPIO_WAKEUP_SET_TRIGGER(__c2h, __value) \
- SET_BITS_TO_LE_4BYTE(__c2h + 0X0C, 24, 8, __value)
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h b/drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h
deleted file mode 100644
index ce39f0868419..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h
+++ /dev/null
@@ -1,1000 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HAL_ORIGINALH2CFORMAT_H2C_C2H_NIC_H_
-#define _HAL_ORIGINALH2CFORMAT_H2C_C2H_NIC_H_
-#define CMD_ID_ORIGINAL_H2C 0X00
-#define CMD_ID_H2C2H_LB 0X0
-#define CMD_ID_D0_SCAN_OFFLOAD_CTRL 0X06
-#define CMD_ID_RSVD_PAGE 0X0
-#define CMD_ID_MEDIA_STATUS_RPT 0X01
-#define CMD_ID_KEEP_ALIVE 0X03
-#define CMD_ID_DISCONNECT_DECISION 0X04
-#define CMD_ID_AP_OFFLOAD 0X08
-#define CMD_ID_BCN_RSVDPAGE 0X09
-#define CMD_ID_PROBE_RSP_RSVDPAGE 0X0A
-#define CMD_ID_SET_PWR_MODE 0X00
-#define CMD_ID_PS_TUNING_PARA 0X01
-#define CMD_ID_PS_TUNING_PARA_II 0X02
-#define CMD_ID_PS_LPS_PARA 0X03
-#define CMD_ID_P2P_PS_OFFLOAD 0X04
-#define CMD_ID_PS_SCAN_EN 0X05
-#define CMD_ID_SAP_PS 0X06
-#define CMD_ID_INACTIVE_PS 0X07
-#define CMD_ID_MACID_CFG 0X00
-#define CMD_ID_TXBF 0X01
-#define CMD_ID_RSSI_SETTING 0X02
-#define CMD_ID_AP_REQ_TXRPT 0X03
-#define CMD_ID_INIT_RATE_COLLECTION 0X04
-#define CMD_ID_IQK_OFFLOAD 0X05
-#define CMD_ID_MACID_CFG_3SS 0X06
-#define CMD_ID_RA_PARA_ADJUST 0X07
-#define CMD_ID_WWLAN 0X00
-#define CMD_ID_REMOTE_WAKE_CTRL 0X01
-#define CMD_ID_AOAC_GLOBAL_INFO 0X02
-#define CMD_ID_AOAC_RSVD_PAGE 0X03
-#define CMD_ID_AOAC_RSVD_PAGE2 0X04
-#define CMD_ID_D0_SCAN_OFFLOAD_INFO 0X05
-#define CMD_ID_CHANNEL_SWITCH_OFFLOAD 0X07
-#define CMD_ID_AOAC_RSVD_PAGE3 0X08
-#define CLASS_ORIGINAL_H2C 0X00
-#define CLASS_H2C2H_LB 0X07
-#define CLASS_D0_SCAN_OFFLOAD_CTRL 0X04
-#define CLASS_RSVD_PAGE 0X0
-#define CLASS_MEDIA_STATUS_RPT 0X0
-#define CLASS_KEEP_ALIVE 0X0
-#define CLASS_DISCONNECT_DECISION 0X0
-#define CLASS_AP_OFFLOAD 0X0
-#define CLASS_BCN_RSVDPAGE 0X0
-#define CLASS_PROBE_RSP_RSVDPAGE 0X0
-#define CLASS_SET_PWR_MODE 0X01
-#define CLASS_PS_TUNING_PARA 0X01
-#define CLASS_PS_TUNING_PARA_II 0X01
-#define CLASS_PS_LPS_PARA 0X01
-#define CLASS_P2P_PS_OFFLOAD 0X01
-#define CLASS_PS_SCAN_EN 0X1
-#define CLASS_SAP_PS 0X1
-#define CLASS_INACTIVE_PS 0X1
-#define CLASS_MACID_CFG 0X2
-#define CLASS_TXBF 0X2
-#define CLASS_RSSI_SETTING 0X2
-#define CLASS_AP_REQ_TXRPT 0X2
-#define CLASS_INIT_RATE_COLLECTION 0X2
-#define CLASS_IQK_OFFLOAD 0X2
-#define CLASS_MACID_CFG_3SS 0X2
-#define CLASS_RA_PARA_ADJUST 0X02
-#define CLASS_WWLAN 0X4
-#define CLASS_REMOTE_WAKE_CTRL 0X4
-#define CLASS_AOAC_GLOBAL_INFO 0X04
-#define CLASS_AOAC_RSVD_PAGE 0X04
-#define CLASS_AOAC_RSVD_PAGE2 0X04
-#define CLASS_D0_SCAN_OFFLOAD_INFO 0X04
-#define CLASS_CHANNEL_SWITCH_OFFLOAD 0X04
-#define CLASS_AOAC_RSVD_PAGE3 0X04
-#define ORIGINAL_H2C_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define ORIGINAL_H2C_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define ORIGINAL_H2C_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define ORIGINAL_H2C_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define H2C2H_LB_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define H2C2H_LB_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define H2C2H_LB_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define H2C2H_LB_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define H2C2H_LB_GET_SEQ(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define H2C2H_LB_SET_SEQ(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define H2C2H_LB_GET_PAYLOAD1(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 16)
-#define H2C2H_LB_SET_PAYLOAD1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 16, __value)
-#define H2C2H_LB_GET_PAYLOAD2(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 32)
-#define H2C2H_LB_SET_PAYLOAD2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 32, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_CMD_ID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define D0_SCAN_OFFLOAD_CTRL_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_CLASS(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define D0_SCAN_OFFLOAD_CTRL_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_D0_SCAN_FUN_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define D0_SCAN_OFFLOAD_CTRL_SET_D0_SCAN_FUN_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_RTD3FUN_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define D0_SCAN_OFFLOAD_CTRL_SET_RTD3FUN_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_U3_SCAN_FUN_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 10, 1)
-#define D0_SCAN_OFFLOAD_CTRL_SET_U3_SCAN_FUN_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 10, 1, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_NLO_FUN_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 11, 1)
-#define D0_SCAN_OFFLOAD_CTRL_SET_NLO_FUN_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 11, 1, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_IPS_DEPENDENT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 12, 1)
-#define D0_SCAN_OFFLOAD_CTRL_SET_IPS_DEPENDENT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 12, 1, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_LOC_PROBE_PACKET(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 17)
-#define D0_SCAN_OFFLOAD_CTRL_SET_LOC_PROBE_PACKET(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 17, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_LOC_SCAN_INFO(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define D0_SCAN_OFFLOAD_CTRL_SET_LOC_SCAN_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define D0_SCAN_OFFLOAD_CTRL_GET_LOC_SSID_INFO(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define D0_SCAN_OFFLOAD_CTRL_SET_LOC_SSID_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define RSVD_PAGE_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define RSVD_PAGE_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define RSVD_PAGE_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define RSVD_PAGE_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define RSVD_PAGE_GET_LOC_PROBE_RSP(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define RSVD_PAGE_SET_LOC_PROBE_RSP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define RSVD_PAGE_GET_LOC_PS_POLL(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define RSVD_PAGE_SET_LOC_PS_POLL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define RSVD_PAGE_GET_LOC_NULL_DATA(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define RSVD_PAGE_SET_LOC_NULL_DATA(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define RSVD_PAGE_GET_LOC_QOS_NULL(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define RSVD_PAGE_SET_LOC_QOS_NULL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define RSVD_PAGE_GET_LOC_BT_QOS_NULL(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define RSVD_PAGE_SET_LOC_BT_QOS_NULL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define RSVD_PAGE_GET_LOC_CTS2SELF(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 8)
-#define RSVD_PAGE_SET_LOC_CTS2SELF(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 8, __value)
-#define RSVD_PAGE_GET_LOC_LTECOEX_QOSNULL(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 24, 8)
-#define RSVD_PAGE_SET_LOC_LTECOEX_QOSNULL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 24, 8, __value)
-#define MEDIA_STATUS_RPT_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define MEDIA_STATUS_RPT_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define MEDIA_STATUS_RPT_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define MEDIA_STATUS_RPT_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define MEDIA_STATUS_RPT_GET_OP_MODE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define MEDIA_STATUS_RPT_SET_OP_MODE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define MEDIA_STATUS_RPT_GET_MACID_IN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define MEDIA_STATUS_RPT_SET_MACID_IN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define MEDIA_STATUS_RPT_GET_MACID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define MEDIA_STATUS_RPT_SET_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define MEDIA_STATUS_RPT_GET_MACID_END(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define MEDIA_STATUS_RPT_SET_MACID_END(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define KEEP_ALIVE_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define KEEP_ALIVE_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define KEEP_ALIVE_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define KEEP_ALIVE_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define KEEP_ALIVE_GET_ENABLE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define KEEP_ALIVE_SET_ENABLE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define KEEP_ALIVE_GET_ADOPT_USER_SETTING(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define KEEP_ALIVE_SET_ADOPT_USER_SETTING(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define KEEP_ALIVE_GET_PKT_TYPE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 10, 1)
-#define KEEP_ALIVE_SET_PKT_TYPE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 10, 1, __value)
-#define KEEP_ALIVE_GET_KEEP_ALIVE_CHECK_PERIOD(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define KEEP_ALIVE_SET_KEEP_ALIVE_CHECK_PERIOD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define DISCONNECT_DECISION_GET_CMD_ID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define DISCONNECT_DECISION_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define DISCONNECT_DECISION_GET_CLASS(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define DISCONNECT_DECISION_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define DISCONNECT_DECISION_GET_ENABLE(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define DISCONNECT_DECISION_SET_ENABLE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define DISCONNECT_DECISION_GET_ADOPT_USER_SETTING(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define DISCONNECT_DECISION_SET_ADOPT_USER_SETTING(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define DISCONNECT_DECISION_GET_TRY_OK_BCN_FAIL_COUNT_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 10, 1)
-#define DISCONNECT_DECISION_SET_TRY_OK_BCN_FAIL_COUNT_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 10, 1, __value)
-#define DISCONNECT_DECISION_GET_DISCONNECT_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 11, 1)
-#define DISCONNECT_DECISION_SET_DISCONNECT_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 11, 1, __value)
-#define DISCONNECT_DECISION_GET_DISCON_DECISION_CHECK_PERIOD(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define DISCONNECT_DECISION_SET_DISCON_DECISION_CHECK_PERIOD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define DISCONNECT_DECISION_GET_TRY_PKT_NUM(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define DISCONNECT_DECISION_SET_TRY_PKT_NUM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define DISCONNECT_DECISION_GET_TRY_OK_BCN_FAIL_COUNT_LIMIT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define DISCONNECT_DECISION_SET_TRY_OK_BCN_FAIL_COUNT_LIMIT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define AP_OFFLOAD_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define AP_OFFLOAD_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define AP_OFFLOAD_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define AP_OFFLOAD_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define AP_OFFLOAD_GET_ON(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define AP_OFFLOAD_SET_ON(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define AP_OFFLOAD_GET_CFG_MIFI_PLATFORM(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define AP_OFFLOAD_SET_CFG_MIFI_PLATFORM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define AP_OFFLOAD_GET_LINKED(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 10, 1)
-#define AP_OFFLOAD_SET_LINKED(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 10, 1, __value)
-#define AP_OFFLOAD_GET_EN_AUTO_WAKE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 11, 1)
-#define AP_OFFLOAD_SET_EN_AUTO_WAKE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 11, 1, __value)
-#define AP_OFFLOAD_GET_WAKE_FLAG(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 12, 1)
-#define AP_OFFLOAD_SET_WAKE_FLAG(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 12, 1, __value)
-#define AP_OFFLOAD_GET_HIDDEN_ROOT(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 1)
-#define AP_OFFLOAD_SET_HIDDEN_ROOT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 1, __value)
-#define AP_OFFLOAD_GET_HIDDEN_VAP1(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 17, 1)
-#define AP_OFFLOAD_SET_HIDDEN_VAP1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 17, 1, __value)
-#define AP_OFFLOAD_GET_HIDDEN_VAP2(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 18, 1)
-#define AP_OFFLOAD_SET_HIDDEN_VAP2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 18, 1, __value)
-#define AP_OFFLOAD_GET_HIDDEN_VAP3(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 19, 1)
-#define AP_OFFLOAD_SET_HIDDEN_VAP3(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 19, 1, __value)
-#define AP_OFFLOAD_GET_HIDDEN_VAP4(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 20, 1)
-#define AP_OFFLOAD_SET_HIDDEN_VAP4(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 20, 1, __value)
-#define AP_OFFLOAD_GET_DENYANY_ROOT(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 1)
-#define AP_OFFLOAD_SET_DENYANY_ROOT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 1, __value)
-#define AP_OFFLOAD_GET_DENYANY_VAP1(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 25, 1)
-#define AP_OFFLOAD_SET_DENYANY_VAP1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 25, 1, __value)
-#define AP_OFFLOAD_GET_DENYANY_VAP2(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 26, 1)
-#define AP_OFFLOAD_SET_DENYANY_VAP2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 26, 1, __value)
-#define AP_OFFLOAD_GET_DENYANY_VAP3(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 27, 1)
-#define AP_OFFLOAD_SET_DENYANY_VAP3(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 27, 1, __value)
-#define AP_OFFLOAD_GET_DENYANY_VAP4(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 28, 1)
-#define AP_OFFLOAD_SET_DENYANY_VAP4(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 28, 1, __value)
-#define AP_OFFLOAD_GET_WAIT_TBTT_CNT(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define AP_OFFLOAD_SET_WAIT_TBTT_CNT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define AP_OFFLOAD_GET_WAKE_TIMEOUT(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define AP_OFFLOAD_SET_WAKE_TIMEOUT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define AP_OFFLOAD_GET_LEN_IV_PAIR(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 8)
-#define AP_OFFLOAD_SET_LEN_IV_PAIR(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 8, __value)
-#define AP_OFFLOAD_GET_LEN_IV_GRP(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 24, 8)
-#define AP_OFFLOAD_SET_LEN_IV_GRP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 24, 8, __value)
-#define BCN_RSVDPAGE_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define BCN_RSVDPAGE_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define BCN_RSVDPAGE_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define BCN_RSVDPAGE_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define BCN_RSVDPAGE_GET_LOC_ROOT(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define BCN_RSVDPAGE_SET_LOC_ROOT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define BCN_RSVDPAGE_GET_LOC_VAP1(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define BCN_RSVDPAGE_SET_LOC_VAP1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define BCN_RSVDPAGE_GET_LOC_VAP2(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define BCN_RSVDPAGE_SET_LOC_VAP2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define BCN_RSVDPAGE_GET_LOC_VAP3(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define BCN_RSVDPAGE_SET_LOC_VAP3(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define BCN_RSVDPAGE_GET_LOC_VAP4(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define BCN_RSVDPAGE_SET_LOC_VAP4(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define PROBE_RSP_RSVDPAGE_GET_CMD_ID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define PROBE_RSP_RSVDPAGE_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define PROBE_RSP_RSVDPAGE_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define PROBE_RSP_RSVDPAGE_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define PROBE_RSP_RSVDPAGE_GET_LOC_ROOT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define PROBE_RSP_RSVDPAGE_SET_LOC_ROOT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define PROBE_RSP_RSVDPAGE_GET_LOC_VAP1(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define PROBE_RSP_RSVDPAGE_SET_LOC_VAP1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define PROBE_RSP_RSVDPAGE_GET_LOC_VAP2(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define PROBE_RSP_RSVDPAGE_SET_LOC_VAP2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define PROBE_RSP_RSVDPAGE_GET_LOC_VAP3(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define PROBE_RSP_RSVDPAGE_SET_LOC_VAP3(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define PROBE_RSP_RSVDPAGE_GET_LOC_VAP4(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define PROBE_RSP_RSVDPAGE_SET_LOC_VAP4(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define SET_PWR_MODE_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define SET_PWR_MODE_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define SET_PWR_MODE_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define SET_PWR_MODE_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define SET_PWR_MODE_GET_MODE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 7)
-#define SET_PWR_MODE_SET_MODE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 7, __value)
-#define SET_PWR_MODE_GET_CLK_REQUEST(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 15, 1)
-#define SET_PWR_MODE_SET_CLK_REQUEST(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 15, 1, __value)
-#define SET_PWR_MODE_GET_RLBM(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 4)
-#define SET_PWR_MODE_SET_RLBM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 4, __value)
-#define SET_PWR_MODE_GET_SMART_PS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 20, 4)
-#define SET_PWR_MODE_SET_SMART_PS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 20, 4, __value)
-#define SET_PWR_MODE_GET_AWAKE_INTERVAL(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define SET_PWR_MODE_SET_AWAKE_INTERVAL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define SET_PWR_MODE_GET_B_ALL_QUEUE_UAPSD(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 1)
-#define SET_PWR_MODE_SET_B_ALL_QUEUE_UAPSD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 1, __value)
-#define SET_PWR_MODE_GET_BCN_EARLY_RPT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 2, 1)
-#define SET_PWR_MODE_SET_BCN_EARLY_RPT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 2, 1, __value)
-#define SET_PWR_MODE_GET_PORT_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 5, 3)
-#define SET_PWR_MODE_SET_PORT_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 5, 3, __value)
-#define SET_PWR_MODE_GET_PWR_STATE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define SET_PWR_MODE_SET_PWR_STATE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define SET_PWR_MODE_GET_LOW_POWER_RX_BCN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 1)
-#define SET_PWR_MODE_SET_LOW_POWER_RX_BCN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 1, __value)
-#define SET_PWR_MODE_GET_ANT_AUTO_SWITCH(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 17, 1)
-#define SET_PWR_MODE_SET_ANT_AUTO_SWITCH(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 17, 1, __value)
-#define SET_PWR_MODE_GET_PS_ALLOW_BT_HIGH_PRIORITY(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 18, 1)
-#define SET_PWR_MODE_SET_PS_ALLOW_BT_HIGH_PRIORITY(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 18, 1, __value)
-#define SET_PWR_MODE_GET_PROTECT_BCN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 19, 1)
-#define SET_PWR_MODE_SET_PROTECT_BCN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 19, 1, __value)
-#define SET_PWR_MODE_GET_SILENCE_PERIOD(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 20, 1)
-#define SET_PWR_MODE_SET_SILENCE_PERIOD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 20, 1, __value)
-#define SET_PWR_MODE_GET_FAST_BT_CONNECT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 21, 1)
-#define SET_PWR_MODE_SET_FAST_BT_CONNECT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 21, 1, __value)
-#define SET_PWR_MODE_GET_TWO_ANTENNA_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 22, 1)
-#define SET_PWR_MODE_SET_TWO_ANTENNA_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 22, 1, __value)
-#define SET_PWR_MODE_GET_ADOPT_USER_SETTING(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 24, 1)
-#define SET_PWR_MODE_SET_ADOPT_USER_SETTING(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 24, 1, __value)
-#define SET_PWR_MODE_GET_DRV_BCN_EARLY_SHIFT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 25, 3)
-#define SET_PWR_MODE_SET_DRV_BCN_EARLY_SHIFT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 25, 3, __value)
-#define SET_PWR_MODE_GET_DRV_BCN_EARLY_SHIFT2(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 28, 4)
-#define SET_PWR_MODE_SET_DRV_BCN_EARLY_SHIFT2(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 28, 4, __value)
-#define PS_TUNING_PARA_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define PS_TUNING_PARA_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define PS_TUNING_PARA_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define PS_TUNING_PARA_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define PS_TUNING_PARA_GET_BCN_TO_LIMIT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 7)
-#define PS_TUNING_PARA_SET_BCN_TO_LIMIT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 7, __value)
-#define PS_TUNING_PARA_GET_DTIM_TIME_OUT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 15, 1)
-#define PS_TUNING_PARA_SET_DTIM_TIME_OUT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 15, 1, __value)
-#define PS_TUNING_PARA_GET_PS_TIME_OUT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 4)
-#define PS_TUNING_PARA_SET_PS_TIME_OUT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 4, __value)
-#define PS_TUNING_PARA_GET_ADOPT(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define PS_TUNING_PARA_SET_ADOPT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define PS_TUNING_PARA_II_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define PS_TUNING_PARA_II_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define PS_TUNING_PARA_II_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define PS_TUNING_PARA_II_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define PS_TUNING_PARA_II_GET_BCN_TO_PERIOD(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 7)
-#define PS_TUNING_PARA_II_SET_BCN_TO_PERIOD(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 7, __value)
-#define PS_TUNING_PARA_II_GET_ADOPT(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 15, 1)
-#define PS_TUNING_PARA_II_SET_ADOPT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 15, 1, __value)
-#define PS_TUNING_PARA_II_GET_DRV_EARLY_IVL(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define PS_TUNING_PARA_II_SET_DRV_EARLY_IVL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define PS_LPS_PARA_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define PS_LPS_PARA_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define PS_LPS_PARA_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define PS_LPS_PARA_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define PS_LPS_PARA_GET_LPS_CONTROL(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define PS_LPS_PARA_SET_LPS_CONTROL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define P2P_PS_OFFLOAD_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define P2P_PS_OFFLOAD_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define P2P_PS_OFFLOAD_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define P2P_PS_OFFLOAD_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define P2P_PS_OFFLOAD_GET_OFFLOAD_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define P2P_PS_OFFLOAD_SET_OFFLOAD_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define P2P_PS_OFFLOAD_GET_ROLE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define P2P_PS_OFFLOAD_SET_ROLE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define P2P_PS_OFFLOAD_GET_CTWINDOW_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 10, 1)
-#define P2P_PS_OFFLOAD_SET_CTWINDOW_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 10, 1, __value)
-#define P2P_PS_OFFLOAD_GET_NOA0_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 11, 1)
-#define P2P_PS_OFFLOAD_SET_NOA0_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 11, 1, __value)
-#define P2P_PS_OFFLOAD_GET_NOA1_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 12, 1)
-#define P2P_PS_OFFLOAD_SET_NOA1_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 12, 1, __value)
-#define P2P_PS_OFFLOAD_GET_ALL_STA_SLEEP(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 13, 1)
-#define P2P_PS_OFFLOAD_SET_ALL_STA_SLEEP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 13, 1, __value)
-#define P2P_PS_OFFLOAD_GET_DISCOVERY(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 14, 1)
-#define P2P_PS_OFFLOAD_SET_DISCOVERY(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 14, 1, __value)
-#define PS_SCAN_EN_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define PS_SCAN_EN_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define PS_SCAN_EN_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define PS_SCAN_EN_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define PS_SCAN_EN_GET_ENABLE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define PS_SCAN_EN_SET_ENABLE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define SAP_PS_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define SAP_PS_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define SAP_PS_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define SAP_PS_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define SAP_PS_GET_ENABLE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define SAP_PS_SET_ENABLE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define SAP_PS_GET_EN_PS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define SAP_PS_SET_EN_PS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define SAP_PS_GET_EN_LP_RX(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 10, 1)
-#define SAP_PS_SET_EN_LP_RX(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 10, 1, __value)
-#define SAP_PS_GET_MANUAL_32K(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 11, 1)
-#define SAP_PS_SET_MANUAL_32K(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 11, 1, __value)
-#define SAP_PS_GET_DURATION(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define SAP_PS_SET_DURATION(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define INACTIVE_PS_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define INACTIVE_PS_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define INACTIVE_PS_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define INACTIVE_PS_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define INACTIVE_PS_GET_ENABLE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define INACTIVE_PS_SET_ENABLE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define INACTIVE_PS_GET_IGNORE_PS_CONDITION(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define INACTIVE_PS_SET_IGNORE_PS_CONDITION(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define INACTIVE_PS_GET_FREQUENCY(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define INACTIVE_PS_SET_FREQUENCY(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define INACTIVE_PS_GET_DURATION(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define INACTIVE_PS_SET_DURATION(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define MACID_CFG_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define MACID_CFG_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define MACID_CFG_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define MACID_CFG_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define MACID_CFG_GET_MAC_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define MACID_CFG_SET_MAC_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define MACID_CFG_GET_RATE_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 5)
-#define MACID_CFG_SET_RATE_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 5, __value)
-#define MACID_CFG_GET_SGI(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 23, 1)
-#define MACID_CFG_SET_SGI(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 23, 1, __value)
-#define MACID_CFG_GET_BW(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 2)
-#define MACID_CFG_SET_BW(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 2, __value)
-#define MACID_CFG_GET_LDPC_CAP(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 26, 1)
-#define MACID_CFG_SET_LDPC_CAP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 26, 1, __value)
-#define MACID_CFG_GET_NO_UPDATE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 27, 1)
-#define MACID_CFG_SET_NO_UPDATE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 27, 1, __value)
-#define MACID_CFG_GET_WHT_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 28, 2)
-#define MACID_CFG_SET_WHT_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 28, 2, __value)
-#define MACID_CFG_GET_DISPT(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 30, 1)
-#define MACID_CFG_SET_DISPT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 30, 1, __value)
-#define MACID_CFG_GET_DISRA(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 31, 1)
-#define MACID_CFG_SET_DISRA(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 31, 1, __value)
-#define MACID_CFG_GET_RATE_MASK7_0(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define MACID_CFG_SET_RATE_MASK7_0(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define MACID_CFG_GET_RATE_MASK15_8(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define MACID_CFG_SET_RATE_MASK15_8(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define MACID_CFG_GET_RATE_MASK23_16(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 8)
-#define MACID_CFG_SET_RATE_MASK23_16(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 8, __value)
-#define MACID_CFG_GET_RATE_MASK31_24(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 24, 8)
-#define MACID_CFG_SET_RATE_MASK31_24(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 24, 8, __value)
-#define TXBF_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define TXBF_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define TXBF_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define TXBF_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define TXBF_GET_NDPA0_HEAD_PAGE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define TXBF_SET_NDPA0_HEAD_PAGE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define TXBF_GET_NDPA1_HEAD_PAGE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define TXBF_SET_NDPA1_HEAD_PAGE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define TXBF_GET_PERIOD_0(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define TXBF_SET_PERIOD_0(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define RSSI_SETTING_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define RSSI_SETTING_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define RSSI_SETTING_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define RSSI_SETTING_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define RSSI_SETTING_GET_MAC_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define RSSI_SETTING_SET_MAC_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define RSSI_SETTING_GET_RSSI(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 7)
-#define RSSI_SETTING_SET_RSSI(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 7, __value)
-#define RSSI_SETTING_GET_RA_INFO(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define RSSI_SETTING_SET_RA_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define AP_REQ_TXRPT_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define AP_REQ_TXRPT_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define AP_REQ_TXRPT_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define AP_REQ_TXRPT_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define AP_REQ_TXRPT_GET_STA1_MACID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define AP_REQ_TXRPT_SET_STA1_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define AP_REQ_TXRPT_GET_STA2_MACID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define AP_REQ_TXRPT_SET_STA2_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define AP_REQ_TXRPT_GET_RTY_OK_TOTAL(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 1)
-#define AP_REQ_TXRPT_SET_RTY_OK_TOTAL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 1, __value)
-#define AP_REQ_TXRPT_GET_RTY_CNT_MACID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 25, 1)
-#define AP_REQ_TXRPT_SET_RTY_CNT_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 25, 1, __value)
-#define INIT_RATE_COLLECTION_GET_CMD_ID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define INIT_RATE_COLLECTION_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define INIT_RATE_COLLECTION_GET_CLASS(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define INIT_RATE_COLLECTION_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define INIT_RATE_COLLECTION_GET_STA1_MACID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define INIT_RATE_COLLECTION_SET_STA1_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define INIT_RATE_COLLECTION_GET_STA2_MACID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define INIT_RATE_COLLECTION_SET_STA2_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define INIT_RATE_COLLECTION_GET_STA3_MACID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define INIT_RATE_COLLECTION_SET_STA3_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define INIT_RATE_COLLECTION_GET_STA4_MACID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define INIT_RATE_COLLECTION_SET_STA4_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define INIT_RATE_COLLECTION_GET_STA5_MACID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define INIT_RATE_COLLECTION_SET_STA5_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define INIT_RATE_COLLECTION_GET_STA6_MACID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 8)
-#define INIT_RATE_COLLECTION_SET_STA6_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 8, __value)
-#define INIT_RATE_COLLECTION_GET_STA7_MACID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 24, 8)
-#define INIT_RATE_COLLECTION_SET_STA7_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 24, 8, __value)
-#define IQK_OFFLOAD_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define IQK_OFFLOAD_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define IQK_OFFLOAD_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define IQK_OFFLOAD_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define IQK_OFFLOAD_GET_CHANNEL(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define IQK_OFFLOAD_SET_CHANNEL(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define IQK_OFFLOAD_GET_BWBAND(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define IQK_OFFLOAD_SET_BWBAND(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define IQK_OFFLOAD_GET_EXTPALNA(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define IQK_OFFLOAD_SET_EXTPALNA(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define MACID_CFG_3SS_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define MACID_CFG_3SS_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define MACID_CFG_3SS_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define MACID_CFG_3SS_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define MACID_CFG_3SS_GET_MACID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define MACID_CFG_3SS_SET_MACID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define MACID_CFG_3SS_GET_RATE_MASK_39_32(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define MACID_CFG_3SS_SET_RATE_MASK_39_32(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define MACID_CFG_3SS_GET_RATE_MASK_47_40(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define MACID_CFG_3SS_SET_RATE_MASK_47_40(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define RA_PARA_ADJUST_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define RA_PARA_ADJUST_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define RA_PARA_ADJUST_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define RA_PARA_ADJUST_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define RA_PARA_ADJUST_GET_MAC_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define RA_PARA_ADJUST_SET_MAC_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define RA_PARA_ADJUST_GET_PARAMETER_INDEX(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define RA_PARA_ADJUST_SET_PARAMETER_INDEX(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define RA_PARA_ADJUST_GET_RATE_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define RA_PARA_ADJUST_SET_RATE_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define RA_PARA_ADJUST_GET_VALUE_BYTE0(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define RA_PARA_ADJUST_SET_VALUE_BYTE0(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define RA_PARA_ADJUST_GET_VALUE_BYTE1(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define RA_PARA_ADJUST_SET_VALUE_BYTE1(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define RA_PARA_ADJUST_GET_ASK_FW_FOR_FW_PARA(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 8)
-#define RA_PARA_ADJUST_SET_ASK_FW_FOR_FW_PARA(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 8, __value)
-#define WWLAN_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define WWLAN_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define WWLAN_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define WWLAN_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define WWLAN_GET_FUNC_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define WWLAN_SET_FUNC_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define WWLAN_GET_PATTERM_MAT_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define WWLAN_SET_PATTERM_MAT_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define WWLAN_GET_MAGIC_PKT_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 10, 1)
-#define WWLAN_SET_MAGIC_PKT_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 10, 1, __value)
-#define WWLAN_GET_UNICAST_WAKEUP_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 11, 1)
-#define WWLAN_SET_UNICAST_WAKEUP_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 11, 1, __value)
-#define WWLAN_GET_ALL_PKT_DROP(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 12, 1)
-#define WWLAN_SET_ALL_PKT_DROP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 12, 1, __value)
-#define WWLAN_GET_GPIO_ACTIVE(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 13, 1)
-#define WWLAN_SET_GPIO_ACTIVE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 13, 1, __value)
-#define WWLAN_GET_REKEY_WAKEUP_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 14, 1)
-#define WWLAN_SET_REKEY_WAKEUP_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 14, 1, __value)
-#define WWLAN_GET_DEAUTH_WAKEUP_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 15, 1)
-#define WWLAN_SET_DEAUTH_WAKEUP_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 15, 1, __value)
-#define WWLAN_GET_GPIO_NUM(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 7)
-#define WWLAN_SET_GPIO_NUM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 7, __value)
-#define WWLAN_GET_DATAPIN_WAKEUP_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 23, 1)
-#define WWLAN_SET_DATAPIN_WAKEUP_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 23, 1, __value)
-#define WWLAN_GET_GPIO_DURATION(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define WWLAN_SET_GPIO_DURATION(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define WWLAN_GET_GPIO_PLUS_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 1)
-#define WWLAN_SET_GPIO_PLUS_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 1, __value)
-#define WWLAN_GET_GPIO_PULSE_COUNT(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 1, 7)
-#define WWLAN_SET_GPIO_PULSE_COUNT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 1, 7, __value)
-#define WWLAN_GET_DISABLE_UPHY(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 1)
-#define WWLAN_SET_DISABLE_UPHY(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 1, __value)
-#define WWLAN_GET_HST2DEV_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 9, 1)
-#define WWLAN_SET_HST2DEV_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 9, 1, __value)
-#define WWLAN_GET_GPIO_DURATION_MS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X04, 10, 1)
-#define WWLAN_SET_GPIO_DURATION_MS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 10, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define REMOTE_WAKE_CTRL_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define REMOTE_WAKE_CTRL_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define REMOTE_WAKE_CTRL_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define REMOTE_WAKE_CTRL_GET_REMOTE_WAKE_CTRL_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 1)
-#define REMOTE_WAKE_CTRL_SET_REMOTE_WAKE_CTRL_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_ARP_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 9, 1)
-#define REMOTE_WAKE_CTRL_SET_ARP_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 9, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_NDP_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 10, 1)
-#define REMOTE_WAKE_CTRL_SET_NDP_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 10, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_GTK_EN(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 11, 1)
-#define REMOTE_WAKE_CTRL_SET_GTK_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 11, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_NLO_OFFLOAD_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 12, 1)
-#define REMOTE_WAKE_CTRL_SET_NLO_OFFLOAD_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 12, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_REAL_WOW_V1_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 13, 1)
-#define REMOTE_WAKE_CTRL_SET_REAL_WOW_V1_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 13, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_REAL_WOW_V2_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 14, 1)
-#define REMOTE_WAKE_CTRL_SET_REAL_WOW_V2_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 14, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_FW_UNICAST(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 15, 1)
-#define REMOTE_WAKE_CTRL_SET_FW_UNICAST(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 15, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_P2P_OFFLOAD_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 1)
-#define REMOTE_WAKE_CTRL_SET_P2P_OFFLOAD_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_RUNTIME_PM_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 17, 1)
-#define REMOTE_WAKE_CTRL_SET_RUNTIME_PM_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 17, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_NET_BIOS_DROP_EN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 18, 1)
-#define REMOTE_WAKE_CTRL_SET_NET_BIOS_DROP_EN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 18, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_ARP_ACTION(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 1)
-#define REMOTE_WAKE_CTRL_SET_ARP_ACTION(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_FW_PARSING_UNTIL_WAKEUP(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 28, 1)
-#define REMOTE_WAKE_CTRL_SET_FW_PARSING_UNTIL_WAKEUP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 28, 1, __value)
-#define REMOTE_WAKE_CTRL_GET_FW_PARSING_AFTER_WAKEUP(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 29, 1)
-#define REMOTE_WAKE_CTRL_SET_FW_PARSING_AFTER_WAKEUP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 29, 1, __value)
-#define AOAC_GLOBAL_INFO_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define AOAC_GLOBAL_INFO_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define AOAC_GLOBAL_INFO_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define AOAC_GLOBAL_INFO_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define AOAC_GLOBAL_INFO_GET_PAIR_WISE_ENC_ALG(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define AOAC_GLOBAL_INFO_SET_PAIR_WISE_ENC_ALG(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define AOAC_GLOBAL_INFO_GET_GROUP_ENC_ALG(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define AOAC_GLOBAL_INFO_SET_GROUP_ENC_ALG(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define AOAC_RSVD_PAGE_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define AOAC_RSVD_PAGE_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define AOAC_RSVD_PAGE_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define AOAC_RSVD_PAGE_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define AOAC_RSVD_PAGE_GET_LOC_REMOTE_CTRL_INFO(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define AOAC_RSVD_PAGE_SET_LOC_REMOTE_CTRL_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define AOAC_RSVD_PAGE_GET_LOC_ARP_RESPONSE(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define AOAC_RSVD_PAGE_SET_LOC_ARP_RESPONSE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define AOAC_RSVD_PAGE_GET_LOC_NEIGHBOR_ADVERTISEMENT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define AOAC_RSVD_PAGE_SET_LOC_NEIGHBOR_ADVERTISEMENT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define AOAC_RSVD_PAGE_GET_LOC_GTK_RSP(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define AOAC_RSVD_PAGE_SET_LOC_GTK_RSP(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define AOAC_RSVD_PAGE_GET_LOC_GTK_INFO(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define AOAC_RSVD_PAGE_SET_LOC_GTK_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define AOAC_RSVD_PAGE_GET_LOC_GTK_EXT_MEM(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 8)
-#define AOAC_RSVD_PAGE_SET_LOC_GTK_EXT_MEM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 8, __value)
-#define AOAC_RSVD_PAGE_GET_LOC_NDP_INFO(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 24, 8)
-#define AOAC_RSVD_PAGE_SET_LOC_NDP_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 24, 8, __value)
-#define AOAC_RSVD_PAGE2_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define AOAC_RSVD_PAGE2_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define AOAC_RSVD_PAGE2_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define AOAC_RSVD_PAGE2_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define AOAC_RSVD_PAGE2_GET_LOC_ROUTER_SOLICATION(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define AOAC_RSVD_PAGE2_SET_LOC_ROUTER_SOLICATION(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define AOAC_RSVD_PAGE2_GET_LOC_BUBBLE_PACKET(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define AOAC_RSVD_PAGE2_SET_LOC_BUBBLE_PACKET(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define AOAC_RSVD_PAGE2_GET_LOC_TEREDO_INFO(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define AOAC_RSVD_PAGE2_SET_LOC_TEREDO_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define AOAC_RSVD_PAGE2_GET_LOC_REALWOW_INFO(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 0, 8)
-#define AOAC_RSVD_PAGE2_SET_LOC_REALWOW_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 0, 8, __value)
-#define AOAC_RSVD_PAGE2_GET_LOC_KEEP_ALIVE_PKT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 8, 8)
-#define AOAC_RSVD_PAGE2_SET_LOC_KEEP_ALIVE_PKT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 8, 8, __value)
-#define AOAC_RSVD_PAGE2_GET_LOC_ACK_PATTERN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 16, 8)
-#define AOAC_RSVD_PAGE2_SET_LOC_ACK_PATTERN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 16, 8, __value)
-#define AOAC_RSVD_PAGE2_GET_LOC_WAKEUP_PATTERN(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X04, 24, 8)
-#define AOAC_RSVD_PAGE2_SET_LOC_WAKEUP_PATTERN(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X04, 24, 8, __value)
-#define D0_SCAN_OFFLOAD_INFO_GET_CMD_ID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define D0_SCAN_OFFLOAD_INFO_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define D0_SCAN_OFFLOAD_INFO_GET_CLASS(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define D0_SCAN_OFFLOAD_INFO_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define D0_SCAN_OFFLOAD_INFO_GET_LOC_CHANNEL_INFO(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define D0_SCAN_OFFLOAD_INFO_SET_LOC_CHANNEL_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define CHANNEL_SWITCH_OFFLOAD_GET_CMD_ID(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define CHANNEL_SWITCH_OFFLOAD_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define CHANNEL_SWITCH_OFFLOAD_GET_CLASS(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define CHANNEL_SWITCH_OFFLOAD_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define CHANNEL_SWITCH_OFFLOAD_GET_CHANNEL_NUM(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define CHANNEL_SWITCH_OFFLOAD_SET_CHANNEL_NUM(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define CHANNEL_SWITCH_OFFLOAD_GET_EN_RFE(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define CHANNEL_SWITCH_OFFLOAD_SET_EN_RFE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#define CHANNEL_SWITCH_OFFLOAD_GET_RFE_TYPE(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 24, 8)
-#define CHANNEL_SWITCH_OFFLOAD_SET_RFE_TYPE(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 24, 8, __value)
-#define AOAC_RSVD_PAGE3_GET_CMD_ID(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 0, 5)
-#define AOAC_RSVD_PAGE3_SET_CMD_ID(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 0, 5, __value)
-#define AOAC_RSVD_PAGE3_GET_CLASS(__h2c) LE_BITS_TO_4BYTE(__h2c + 0X00, 5, 3)
-#define AOAC_RSVD_PAGE3_SET_CLASS(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 5, 3, __value)
-#define AOAC_RSVD_PAGE3_GET_LOC_NLO_INFO(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 8, 8)
-#define AOAC_RSVD_PAGE3_SET_LOC_NLO_INFO(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 8, 8, __value)
-#define AOAC_RSVD_PAGE3_GET_LOC_AOAC_REPORT(__h2c) \
- LE_BITS_TO_4BYTE(__h2c + 0X00, 16, 8)
-#define AOAC_RSVD_PAGE3_SET_LOC_AOAC_REPORT(__h2c, __value) \
- SET_BITS_TO_LE_4BYTE(__h2c + 0X00, 16, 8, __value)
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h b/drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h
deleted file mode 100644
index 802142be607d..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef HALMAC_POWER_SEQUENCE_CMD
-#define HALMAC_POWER_SEQUENCE_CMD
-
-#include "halmac_2_platform.h"
-#include "halmac_type.h"
-
-#define HALMAC_POLLING_READY_TIMEOUT_COUNT 20000
-
-/* The value of cmd : 4 bits */
-
-/* offset : the read register offset
- * msk : the mask of the read value
- * value : N/A, left by 0
- * Note : dirver shall implement this function by read & msk
- */
-#define HALMAC_PWR_CMD_READ 0x00
-/*
- * offset: the read register offset
- * msk: the mask of the write bits
- * value: write value
- * Note: driver shall implement this cmd by read & msk after write
- */
-#define HALMAC_PWR_CMD_WRITE 0x01
-/* offset: the read register offset
- * msk: the mask of the polled value
- * value: the value to be polled, masked by the msd field.
- * Note: driver shall implement this cmd by
- * do{
- * if( (Read(offset) & msk) == (value & msk) )
- * break;
- * } while(not timeout);
- */
-#define HALMAC_PWR_CMD_POLLING 0x02
-/* offset: the value to delay
- * msk: N/A
- * value: the unit of delay, 0: us, 1: ms
- */
-#define HALMAC_PWR_CMD_DELAY 0x03
-/* offset: N/A
- * msk: N/A
- * value: N/A
- */
-#define HALMAC_PWR_CMD_END 0x04
-
-/* The value of base : 4 bits */
-
-/* define the base address of each block */
-#define HALMAC_PWR_BASEADDR_MAC 0x00
-#define HALMAC_PWR_BASEADDR_USB 0x01
-#define HALMAC_PWR_BASEADDR_PCIE 0x02
-#define HALMAC_PWR_BASEADDR_SDIO 0x03
-
-/* The value of interface_msk : 4 bits */
-#define HALMAC_PWR_INTF_SDIO_MSK BIT(0)
-#define HALMAC_PWR_INTF_USB_MSK BIT(1)
-#define HALMAC_PWR_INTF_PCI_MSK BIT(2)
-#define HALMAC_PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-
-/* The value of fab_msk : 4 bits */
-#define HALMAC_PWR_FAB_TSMC_MSK BIT(0)
-#define HALMAC_PWR_FAB_UMC_MSK BIT(1)
-#define HALMAC_PWR_FAB_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-
-/* The value of cut_msk : 8 bits */
-#define HALMAC_PWR_CUT_TESTCHIP_MSK BIT(0)
-#define HALMAC_PWR_CUT_A_MSK BIT(1)
-#define HALMAC_PWR_CUT_B_MSK BIT(2)
-#define HALMAC_PWR_CUT_C_MSK BIT(3)
-#define HALMAC_PWR_CUT_D_MSK BIT(4)
-#define HALMAC_PWR_CUT_E_MSK BIT(5)
-#define HALMAC_PWR_CUT_F_MSK BIT(6)
-#define HALMAC_PWR_CUT_G_MSK BIT(7)
-#define HALMAC_PWR_CUT_ALL_MSK 0xFF
-
-enum halmac_pwrseq_cmd_delay_unit_ {
- HALMAC_PWRSEQ_DELAY_US,
- HALMAC_PWRSEQ_DELAY_MS,
-};
-
-/*Don't care endian issue, because element of pwer seq vector is fixed address*/
-struct halmac_wl_pwr_cfg_ {
- u16 offset;
- u8 cut_msk;
- u8 fab_msk : 4;
- u8 interface_msk : 4;
- u8 base : 4;
- u8 cmd : 4;
- u8 msk;
- u8 value;
-};
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_reg2.h b/drivers/staging/rtlwifi/halmac/halmac_reg2.h
deleted file mode 100644
index 34ab11d8d97b..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_reg2.h
+++ /dev/null
@@ -1,1121 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __HALMAC_COM_REG_H__
-#define __HALMAC_COM_REG_H__
-/*-------------------------Modification Log-----------------------------------
- * For Page0, it is based on Combo_And_WL_Only_Page0_Reg.xls SVN524
- * The supported IC are 8723A, 8881A, 8723B, 8192E, 8881A
- * 8812A and 8188E is not included in page0 register
- *
- * For other pages, it is based on MAC_Register.doc SVN502
- * Most IC is the same with 8812A
- *-------------------------Modification Log-----------------------------------
- */
-
-/*--------------------------Include File--------------------------------------*/
-/*--------------------------Include File--------------------------------------*/
-
-#define REG_SYS_ISO_CTRL 0x0000
-
-#define REG_SDIO_TX_CTRL 0x10250000
-
-#define REG_SYS_FUNC_EN 0x0002
-#define REG_SYS_PW_CTRL 0x0004
-#define REG_SYS_CLK_CTRL 0x0008
-#define REG_SYS_EEPROM_CTRL 0x000A
-#define REG_EE_VPD 0x000C
-#define REG_SYS_SWR_CTRL1 0x0010
-#define REG_SYS_SWR_CTRL2 0x0014
-
-#define REG_SDIO_HIMR 0x10250014
-
-#define REG_SYS_SWR_CTRL3 0x0018
-
-#define REG_SDIO_HISR 0x10250018
-
-#define REG_RSV_CTRL 0x001C
-
-#define REG_SDIO_RX_REQ_LEN 0x1025001C
-
-#define REG_RF_CTRL 0x001F
-
-#define REG_SDIO_FREE_TXPG_SEQ_V1 0x1025001F
-
-#define REG_AFE_LDO_CTRL 0x0020
-
-#define REG_SDIO_FREE_TXPG 0x10250020
-
-#define REG_AFE_CTRL1 0x0024
-
-#define REG_SDIO_FREE_TXPG2 0x10250024
-
-#define REG_AFE_CTRL2 0x0028
-
-#define REG_SDIO_OQT_FREE_TXPG_V1 0x10250028
-
-#define REG_AFE_CTRL3 0x002C
-#define REG_EFUSE_CTRL 0x0030
-
-#define REG_SDIO_HTSFR_INFO 0x10250030
-
-#define REG_LDO_EFUSE_CTRL 0x0034
-#define REG_PWR_OPTION_CTRL 0x0038
-
-#define REG_SDIO_HCPWM1_V2 0x10250038
-#define REG_SDIO_HCPWM2_V2 0x1025003A
-
-#define REG_CAL_TIMER 0x003C
-#define REG_ACLK_MON 0x003E
-#define REG_GPIO_MUXCFG 0x0040
-
-#define REG_SDIO_INDIRECT_REG_CFG 0x10250040
-
-#define REG_GPIO_PIN_CTRL 0x0044
-
-#define REG_SDIO_INDIRECT_REG_DATA 0x10250044
-
-#define REG_GPIO_INTM 0x0048
-#define REG_LED_CFG 0x004C
-#define REG_FSIMR 0x0050
-#define REG_FSISR 0x0054
-#define REG_HSIMR 0x0058
-#define REG_HSISR 0x005C
-#define REG_GPIO_EXT_CTRL 0x0060
-
-#define REG_SDIO_H2C 0x10250060
-
-#define REG_PAD_CTRL1 0x0064
-
-#define REG_SDIO_C2H 0x10250064
-
-#define REG_WL_BT_PWR_CTRL 0x0068
-
-#define REG_SDM_DEBUG 0x006C
-
-#define REG_SYS_SDIO_CTRL 0x0070
-
-#define REG_HCI_OPT_CTRL 0x0074
-
-#define REG_AFE_CTRL4 0x0078
-
-#define REG_LDO_SWR_CTRL 0x007C
-
-#define REG_MCUFW_CTRL 0x0080
-
-#define REG_SDIO_HRPWM1 0x10250080
-#define REG_SDIO_HRPWM2 0x10250082
-
-#define REG_MCU_TST_CFG 0x0084
-
-#define REG_SDIO_HPS_CLKR 0x10250084
-#define REG_SDIO_BUS_CTRL 0x10250085
-
-#define REG_SDIO_HSUS_CTRL 0x10250086
-
-#define REG_HMEBOX_E0_E1 0x0088
-
-#define REG_SDIO_RESPONSE_TIMER 0x10250088
-
-#define REG_SDIO_CMD_CRC 0x1025008A
-
-#define REG_HMEBOX_E2_E3 0x008C
-#define REG_WLLPS_CTRL 0x0090
-
-#define REG_SDIO_HSISR 0x10250090
-#define REG_SDIO_HSIMR 0x10250091
-
-#define REG_AFE_CTRL5 0x0094
-
-#define REG_GPIO_DEBOUNCE_CTRL 0x0098
-#define REG_RPWM2 0x009C
-#define REG_SYSON_FSM_MON 0x00A0
-
-#define REG_AFE_CTRL6 0x00A4
-
-#define REG_PMC_DBG_CTRL1 0x00A8
-
-#define REG_AFE_CTRL7 0x00AC
-
-#define REG_HIMR0 0x00B0
-#define REG_HISR0 0x00B4
-#define REG_HIMR1 0x00B8
-#define REG_HISR1 0x00BC
-#define REG_DBG_PORT_SEL 0x00C0
-
-#define REG_SDIO_ERR_RPT 0x102500C0
-#define REG_SDIO_CMD_ERRCNT 0x102500C1
-#define REG_SDIO_DATA_ERRCNT 0x102500C2
-
-#define REG_PAD_CTRL2 0x00C4
-
-#define REG_SDIO_CMD_ERR_CONTENT 0x102500C4
-
-#define REG_SDIO_CRC_ERR_IDX 0x102500C9
-#define REG_SDIO_DATA_CRC 0x102500CA
-#define REG_SDIO_DATA_REPLY_TIME 0x102500CB
-
-#define REG_PMC_DBG_CTRL2 0x00CC
-#define REG_BIST_CTRL 0x00D0
-#define REG_BIST_RPT 0x00D4
-#define REG_MEM_CTRL 0x00D8
-
-#define REG_AFE_CTRL8 0x00DC
-
-#define REG_USB_SIE_INTF 0x00E0
-#define REG_PCIE_MIO_INTF 0x00E4
-#define REG_PCIE_MIO_INTD 0x00E8
-
-#define REG_WLRF1 0x00EC
-
-#define REG_SYS_CFG1 0x00F0
-#define REG_SYS_STATUS1 0x00F4
-#define REG_SYS_STATUS2 0x00F8
-#define REG_SYS_CFG2 0x00FC
-#define REG_CR 0x0100
-
-#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
-
-#define REG_TSF_CLK_STATE 0x0108
-#define REG_TXDMA_PQ_MAP 0x010C
-#define REG_TRXFF_BNDY 0x0114
-
-#define REG_PTA_I2C_MBOX 0x0118
-
-#define REG_RXFF_BNDY 0x011C
-
-#define REG_FE1IMR 0x0120
-
-#define REG_FE1ISR 0x0124
-
-#define REG_CPWM 0x012C
-#define REG_FWIMR 0x0130
-#define REG_FWISR 0x0134
-#define REG_FTIMR 0x0138
-#define REG_FTISR 0x013C
-#define REG_PKTBUF_DBG_CTRL 0x0140
-#define REG_PKTBUF_DBG_DATA_L 0x0144
-#define REG_PKTBUF_DBG_DATA_H 0x0148
-#define REG_CPWM2 0x014C
-#define REG_TC0_CTRL 0x0150
-#define REG_TC1_CTRL 0x0154
-#define REG_TC2_CTRL 0x0158
-#define REG_TC3_CTRL 0x015C
-#define REG_TC4_CTRL 0x0160
-#define REG_TCUNIT_BASE 0x0164
-#define REG_TC5_CTRL 0x0168
-#define REG_TC6_CTRL 0x016C
-#define REG_MBIST_FAIL 0x0170
-#define REG_MBIST_START_PAUSE 0x0174
-#define REG_MBIST_DONE 0x0178
-
-#define REG_MBIST_FAIL_NRML 0x017C
-
-#define REG_AES_DECRPT_DATA 0x0180
-#define REG_AES_DECRPT_CFG 0x0184
-
-#define REG_TMETER 0x0190
-#define REG_OSC_32K_CTRL 0x0194
-#define REG_32K_CAL_REG1 0x0198
-#define REG_C2HEVT 0x01A0
-
-#define REG_C2HEVT_1 0x01A4
-#define REG_C2HEVT_2 0x01A8
-#define REG_C2HEVT_3 0x01AC
-
-#define REG_SW_DEFINED_PAGE1 0x01B8
-
-#define REG_MCUTST_I 0x01C0
-#define REG_MCUTST_II 0x01C4
-#define REG_FMETHR 0x01C8
-#define REG_HMETFR 0x01CC
-#define REG_HMEBOX0 0x01D0
-#define REG_HMEBOX1 0x01D4
-#define REG_HMEBOX2 0x01D8
-#define REG_HMEBOX3 0x01DC
-#define REG_LLT_INIT 0x01E0
-
-#define REG_LLT_INIT_ADDR 0x01E4
-
-#define REG_BB_ACCESS_CTRL 0x01E8
-#define REG_BB_ACCESS_DATA 0x01EC
-#define REG_HMEBOX_E0 0x01F0
-#define REG_HMEBOX_E1 0x01F4
-#define REG_HMEBOX_E2 0x01F8
-#define REG_HMEBOX_E3 0x01FC
-
-#define REG_FIFOPAGE_CTRL_1 0x0200
-
-#define REG_FIFOPAGE_CTRL_2 0x0204
-
-#define REG_AUTO_LLT_V1 0x0208
-
-#define REG_TXDMA_OFFSET_CHK 0x020C
-#define REG_TXDMA_STATUS 0x0210
-
-#define REG_TX_DMA_DBG 0x0214
-
-#define REG_TQPNT1 0x0218
-#define REG_TQPNT2 0x021C
-
-#define REG_TQPNT3 0x0220
-
-#define REG_TQPNT4 0x0224
-
-#define REG_RQPN_CTRL_1 0x0228
-#define REG_RQPN_CTRL_2 0x022C
-#define REG_FIFOPAGE_INFO_1 0x0230
-#define REG_FIFOPAGE_INFO_2 0x0234
-#define REG_FIFOPAGE_INFO_3 0x0238
-#define REG_FIFOPAGE_INFO_4 0x023C
-#define REG_FIFOPAGE_INFO_5 0x0240
-
-#define REG_H2C_HEAD 0x0244
-#define REG_H2C_TAIL 0x0248
-#define REG_H2C_READ_ADDR 0x024C
-#define REG_H2C_WR_ADDR 0x0250
-#define REG_H2C_INFO 0x0254
-
-#define REG_RXDMA_AGG_PG_TH 0x0280
-#define REG_RXPKT_NUM 0x0284
-#define REG_RXDMA_STATUS 0x0288
-#define REG_RXDMA_DPR 0x028C
-#define REG_RXDMA_MODE 0x0290
-#define REG_C2H_PKT 0x0294
-
-#define REG_FWFF_C2H 0x0298
-#define REG_FWFF_CTRL 0x029C
-#define REG_FWFF_PKT_INFO 0x02A0
-
-#define REG_PCIE_CTRL 0x0300
-
-#define REG_INT_MIG 0x0304
-#define REG_BCNQ_TXBD_DESA 0x0308
-#define REG_MGQ_TXBD_DESA 0x0310
-#define REG_VOQ_TXBD_DESA 0x0318
-#define REG_VIQ_TXBD_DESA 0x0320
-#define REG_BEQ_TXBD_DESA 0x0328
-#define REG_BKQ_TXBD_DESA 0x0330
-#define REG_RXQ_RXBD_DESA 0x0338
-#define REG_HI0Q_TXBD_DESA 0x0340
-#define REG_HI1Q_TXBD_DESA 0x0348
-#define REG_HI2Q_TXBD_DESA 0x0350
-#define REG_HI3Q_TXBD_DESA 0x0358
-#define REG_HI4Q_TXBD_DESA 0x0360
-#define REG_HI5Q_TXBD_DESA 0x0368
-#define REG_HI6Q_TXBD_DESA 0x0370
-#define REG_HI7Q_TXBD_DESA 0x0378
-#define REG_MGQ_TXBD_NUM 0x0380
-#define REG_RX_RXBD_NUM 0x0382
-#define REG_VOQ_TXBD_NUM 0x0384
-#define REG_VIQ_TXBD_NUM 0x0386
-#define REG_BEQ_TXBD_NUM 0x0388
-#define REG_BKQ_TXBD_NUM 0x038A
-#define REG_HI0Q_TXBD_NUM 0x038C
-#define REG_HI1Q_TXBD_NUM 0x038E
-#define REG_HI2Q_TXBD_NUM 0x0390
-#define REG_HI3Q_TXBD_NUM 0x0392
-#define REG_HI4Q_TXBD_NUM 0x0394
-#define REG_HI5Q_TXBD_NUM 0x0396
-#define REG_HI6Q_TXBD_NUM 0x0398
-#define REG_HI7Q_TXBD_NUM 0x039A
-#define REG_TSFTIMER_HCI 0x039C
-#define REG_BD_RWPTR_CLR 0x039C
-#define REG_VOQ_TXBD_IDX 0x03A0
-#define REG_VIQ_TXBD_IDX 0x03A4
-#define REG_BEQ_TXBD_IDX 0x03A8
-#define REG_BKQ_TXBD_IDX 0x03AC
-#define REG_MGQ_TXBD_IDX 0x03B0
-#define REG_RXQ_RXBD_IDX 0x03B4
-#define REG_HI0Q_TXBD_IDX 0x03B8
-#define REG_HI1Q_TXBD_IDX 0x03BC
-#define REG_HI2Q_TXBD_IDX 0x03C0
-#define REG_HI3Q_TXBD_IDX 0x03C4
-#define REG_HI4Q_TXBD_IDX 0x03C8
-#define REG_HI5Q_TXBD_IDX 0x03CC
-#define REG_HI6Q_TXBD_IDX 0x03D0
-#define REG_HI7Q_TXBD_IDX 0x03D4
-
-#define REG_DBG_SEL_V1 0x03D8
-
-#define REG_PCIE_HRPWM1_V1 0x03D9
-
-#define REG_PCIE_HCPWM1_V1 0x03DA
-
-#define REG_PCIE_CTRL2 0x03DB
-
-#define REG_PCIE_HRPWM2_V1 0x03DC
-
-#define REG_PCIE_HCPWM2_V1 0x03DE
-
-#define REG_PCIE_H2C_MSG_V1 0x03E0
-
-#define REG_PCIE_C2H_MSG_V1 0x03E4
-
-#define REG_DBI_WDATA_V1 0x03E8
-
-#define REG_DBI_RDATA_V1 0x03EC
-
-#define REG_DBI_FLAG_V1 0x03F0
-
-#define REG_MDIO_V1 0x03F4
-
-#define REG_PCIE_MIX_CFG 0x03F8
-
-#define REG_HCI_MIX_CFG 0x03FC
-
-#define REG_Q0_INFO 0x0400
-#define REG_Q1_INFO 0x0404
-#define REG_Q2_INFO 0x0408
-#define REG_Q3_INFO 0x040C
-#define REG_MGQ_INFO 0x0410
-#define REG_HIQ_INFO 0x0414
-#define REG_BCNQ_INFO 0x0418
-#define REG_TXPKT_EMPTY 0x041A
-#define REG_CPU_MGQ_INFO 0x041C
-#define REG_FWHW_TXQ_CTRL 0x0420
-
-#define REG_DATAFB_SEL 0x0423
-
-#define REG_BCNQ_BDNY_V1 0x0424
-
-#define REG_LIFETIME_EN 0x0426
-
-#define REG_SPEC_SIFS 0x0428
-#define REG_RETRY_LIMIT 0x042A
-#define REG_TXBF_CTRL 0x042C
-#define REG_DARFRC 0x0430
-#define REG_RARFRC 0x0438
-#define REG_RRSR 0x0440
-#define REG_ARFR0 0x0444
-#define REG_ARFR1_V1 0x044C
-#define REG_CCK_CHECK 0x0454
-
-#define REG_AMPDU_MAX_TIME_V1 0x0455
-
-#define REG_BCNQ1_BDNY_V1 0x0456
-
-#define REG_AMPDU_MAX_LENGTH 0x0458
-#define REG_ACQ_STOP 0x045C
-
-#define REG_NDPA_RATE 0x045D
-
-#define REG_TX_HANG_CTRL 0x045E
-#define REG_NDPA_OPT_CTRL 0x045F
-
-#define REG_RD_RESP_PKT_TH 0x0463
-#define REG_CMDQ_INFO 0x0464
-#define REG_Q4_INFO 0x0468
-#define REG_Q5_INFO 0x046C
-#define REG_Q6_INFO 0x0470
-#define REG_Q7_INFO 0x0474
-
-#define REG_WMAC_LBK_BUF_HD_V1 0x0478
-#define REG_MGQ_BDNY_V1 0x047A
-
-#define REG_TXRPT_CTRL 0x047C
-#define REG_INIRTS_RATE_SEL 0x0480
-#define REG_BASIC_CFEND_RATE 0x0481
-#define REG_STBC_CFEND_RATE 0x0482
-#define REG_DATA_SC 0x0483
-#define REG_MACID_SLEEP3 0x0484
-#define REG_MACID_SLEEP1 0x0488
-#define REG_ARFR2_V1 0x048C
-#define REG_ARFR3_V1 0x0494
-#define REG_ARFR4 0x049C
-#define REG_ARFR5 0x04A4
-#define REG_TXRPT_START_OFFSET 0x04AC
-
-#define REG_POWER_STAGE1 0x04B4
-
-#define REG_POWER_STAGE2 0x04B8
-
-#define REG_SW_AMPDU_BURST_MODE_CTRL 0x04BC
-#define REG_PKT_LIFE_TIME 0x04C0
-#define REG_STBC_SETTING 0x04C4
-#define REG_STBC_SETTING2 0x04C5
-#define REG_QUEUE_CTRL 0x04C6
-#define REG_SINGLE_AMPDU_CTRL 0x04C7
-#define REG_PROT_MODE_CTRL 0x04C8
-#define REG_BAR_MODE_CTRL 0x04CC
-#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
-#define REG_MACID_SLEEP2 0x04D0
-#define REG_MACID_SLEEP 0x04D4
-
-#define REG_HW_SEQ0 0x04D8
-#define REG_HW_SEQ1 0x04DA
-#define REG_HW_SEQ2 0x04DC
-#define REG_HW_SEQ3 0x04DE
-
-#define REG_NULL_PKT_STATUS_V1 0x04E0
-
-#define REG_PTCL_ERR_STATUS 0x04E2
-
-#define REG_NULL_PKT_STATUS_EXTEND 0x04E3
-
-#define REG_VIDEO_ENHANCEMENT_FUN 0x04E4
-
-#define REG_BT_POLLUTE_PKT_CNT 0x04E8
-#define REG_PTCL_DBG 0x04EC
-
-#define REG_CPUMGQ_TIMER_CTRL2 0x04F4
-
-#define REG_DUMMY_PAGE4_V1 0x04FC
-#define REG_MOREDATA 0x04FE
-
-#define REG_EDCA_VO_PARAM 0x0500
-#define REG_EDCA_VI_PARAM 0x0504
-#define REG_EDCA_BE_PARAM 0x0508
-#define REG_EDCA_BK_PARAM 0x050C
-#define REG_BCNTCFG 0x0510
-#define REG_PIFS 0x0512
-#define REG_RDG_PIFS 0x0513
-#define REG_SIFS 0x0514
-#define REG_TSFTR_SYN_OFFSET 0x0518
-#define REG_AGGR_BREAK_TIME 0x051A
-#define REG_SLOT 0x051B
-#define REG_TX_PTCL_CTRL 0x0520
-#define REG_TXPAUSE 0x0522
-#define REG_DIS_TXREQ_CLR 0x0523
-#define REG_RD_CTRL 0x0524
-#define REG_MBSSID_CTRL 0x0526
-#define REG_P2PPS_CTRL 0x0527
-#define REG_PKT_LIFETIME_CTRL 0x0528
-#define REG_P2PPS_SPEC_STATE 0x052B
-
-#define REG_BAR_TX_CTRL 0x0530
-
-#define REG_QUEUE_INCOL_THR 0x0538
-#define REG_QUEUE_INCOL_EN 0x053C
-
-#define REG_TBTT_PROHIBIT 0x0540
-#define REG_P2PPS_STATE 0x0543
-#define REG_RD_NAV_NXT 0x0544
-#define REG_NAV_PROT_LEN 0x0546
-
-#define REG_BCN_CTRL 0x0550
-
-#define REG_BCN_CTRL_CLINT0 0x0551
-
-#define REG_MBID_NUM 0x0552
-#define REG_DUAL_TSF_RST 0x0553
-#define REG_MBSSID_BCN_SPACE 0x0554
-#define REG_DRVERLYINT 0x0558
-#define REG_BCNDMATIM 0x0559
-#define REG_ATIMWND 0x055A
-#define REG_USTIME_TSF 0x055C
-#define REG_BCN_MAX_ERR 0x055D
-#define REG_RXTSF_OFFSET_CCK 0x055E
-#define REG_RXTSF_OFFSET_OFDM 0x055F
-#define REG_TSFTR 0x0560
-
-#define REG_FREERUN_CNT 0x0568
-
-#define REG_ATIMWND1_V1 0x0570
-
-#define REG_TBTT_PROHIBIT_INFRA 0x0571
-
-#define REG_CTWND 0x0572
-#define REG_BCNIVLCUNT 0x0573
-#define REG_BCNDROPCTRL 0x0574
-#define REG_HGQ_TIMEOUT_PERIOD 0x0575
-
-#define REG_TXCMD_TIMEOUT_PERIOD 0x0576
-#define REG_MISC_CTRL 0x0577
-#define REG_BCN_CTRL_CLINT1 0x0578
-#define REG_BCN_CTRL_CLINT2 0x0579
-#define REG_BCN_CTRL_CLINT3 0x057A
-
-#define REG_EXTEND_CTRL 0x057B
-
-#define REG_P2PPS1_SPEC_STATE 0x057C
-#define REG_P2PPS1_STATE 0x057D
-#define REG_P2PPS2_SPEC_STATE 0x057E
-#define REG_P2PPS2_STATE 0x057F
-
-#define REG_PS_TIMER0 0x0580
-
-#define REG_PS_TIMER1 0x0584
-
-#define REG_PS_TIMER2 0x0588
-
-#define REG_TBTT_CTN_AREA 0x058C
-#define REG_FORCE_BCN_IFS 0x058E
-#define REG_TXOP_MIN 0x0590
-#define REG_PRE_BKF_TIME 0x0592
-#define REG_CROSS_TXOP_CTRL 0x0593
-
-#define REG_ATIMWND2 0x05A0
-#define REG_ATIMWND3 0x05A1
-#define REG_ATIMWND4 0x05A2
-#define REG_ATIMWND5 0x05A3
-#define REG_ATIMWND6 0x05A4
-#define REG_ATIMWND7 0x05A5
-#define REG_ATIMUGT 0x05A6
-#define REG_HIQ_NO_LMT_EN 0x05A7
-#define REG_DTIM_COUNTER_ROOT 0x05A8
-#define REG_DTIM_COUNTER_VAP1 0x05A9
-#define REG_DTIM_COUNTER_VAP2 0x05AA
-#define REG_DTIM_COUNTER_VAP3 0x05AB
-#define REG_DTIM_COUNTER_VAP4 0x05AC
-#define REG_DTIM_COUNTER_VAP5 0x05AD
-#define REG_DTIM_COUNTER_VAP6 0x05AE
-#define REG_DTIM_COUNTER_VAP7 0x05AF
-#define REG_DIS_ATIM 0x05B0
-
-#define REG_EARLY_128US 0x05B1
-#define REG_P2PPS1_CTRL 0x05B2
-#define REG_P2PPS2_CTRL 0x05B3
-#define REG_TIMER0_SRC_SEL 0x05B4
-#define REG_NOA_UNIT_SEL 0x05B5
-#define REG_P2POFF_DIS_TXTIME 0x05B7
-#define REG_MBSSID_BCN_SPACE2 0x05B8
-#define REG_MBSSID_BCN_SPACE3 0x05BC
-
-#define REG_ACMHWCTRL 0x05C0
-#define REG_ACMRSTCTRL 0x05C1
-#define REG_ACMAVG 0x05C2
-#define REG_VO_ADMTIME 0x05C4
-#define REG_VI_ADMTIME 0x05C6
-#define REG_BE_ADMTIME 0x05C8
-#define REG_EDCA_RANDOM_GEN 0x05CC
-#define REG_TXCMD_NOA_SEL 0x05CF
-#define REG_NOA_PARAM 0x05E0
-
-#define REG_P2P_RST 0x05F0
-#define REG_SCHEDULER_RST 0x05F1
-
-#define REG_SCH_TXCMD 0x05F8
-#define REG_PAGE5_DUMMY 0x05FC
-#define REG_WMAC_CR 0x0600
-
-#define REG_WMAC_FWPKT_CR 0x0601
-
-#define REG_BWOPMODE 0x0603
-
-#define REG_TCR 0x0604
-#define REG_RCR 0x0608
-#define REG_RX_PKT_LIMIT 0x060C
-#define REG_RX_DLK_TIME 0x060D
-#define REG_RX_DRVINFO_SZ 0x060F
-#define REG_MACID 0x0610
-#define REG_BSSID 0x0618
-#define REG_MAR 0x0620
-#define REG_MBIDCAMCFG_1 0x0628
-#define REG_MBIDCAMCFG_2 0x062C
-
-#define REG_WMAC_TCR_TSFT_OFS 0x0630
-#define REG_UDF_THSD 0x0632
-#define REG_ZLD_NUM 0x0633
-
-#define REG_STMP_THSD 0x0634
-#define REG_WMAC_TXTIMEOUT 0x0635
-#define REG_MCU_TEST_2_V1 0x0636
-
-#define REG_USTIME_EDCA 0x0638
-
-#define REG_MAC_SPEC_SIFS 0x063A
-#define REG_RESP_SIFS_CCK 0x063C
-#define REG_RESP_SIFS_OFDM 0x063E
-#define REG_ACKTO 0x0640
-#define REG_CTS2TO 0x0641
-#define REG_EIFS 0x0642
-
-#define REG_NAV_CTRL 0x0650
-#define REG_BACAMCMD 0x0654
-#define REG_BACAMCONTENT 0x0658
-#define REG_LBDLY 0x0660
-
-#define REG_WMAC_BACAM_RPMEN 0x0661
-
-#define REG_TX_RX 0x0662
-
-#define REG_WMAC_BITMAP_CTL 0x0663
-
-#define REG_RXERR_RPT 0x0664
-#define REG_WMAC_TRXPTCL_CTL 0x0668
-#define REG_CAMCMD 0x0670
-#define REG_CAMWRITE 0x0674
-#define REG_CAMREAD 0x0678
-#define REG_CAMDBG 0x067C
-#define REG_SECCFG 0x0680
-
-#define REG_RXFILTER_CATEGORY_1 0x0682
-#define REG_RXFILTER_ACTION_1 0x0683
-#define REG_RXFILTER_CATEGORY_2 0x0684
-#define REG_RXFILTER_ACTION_2 0x0685
-#define REG_RXFILTER_CATEGORY_3 0x0686
-#define REG_RXFILTER_ACTION_3 0x0687
-#define REG_RXFLTMAP3 0x0688
-#define REG_RXFLTMAP4 0x068A
-#define REG_RXFLTMAP5 0x068C
-#define REG_RXFLTMAP6 0x068E
-
-#define REG_WOW_CTRL 0x0690
-
-#define REG_NAN_RX_TSF_FILTER 0x0691
-
-#define REG_PS_RX_INFO 0x0692
-#define REG_WMMPS_UAPSD_TID 0x0693
-#define REG_LPNAV_CTRL 0x0694
-
-#define REG_WKFMCAM_CMD 0x0698
-#define REG_WKFMCAM_RWD 0x069C
-
-#define REG_RXFLTMAP0 0x06A0
-#define REG_RXFLTMAP1 0x06A2
-#define REG_RXFLTMAP 0x06A4
-#define REG_BCN_PSR_RPT 0x06A8
-
-#define REG_FLC_RPC 0x06AC
-#define REG_FLC_RPCT 0x06AD
-#define REG_FLC_PTS 0x06AE
-#define REG_FLC_TRPC 0x06AF
-
-#define REG_RXPKTMON_CTRL 0x06B0
-
-#define REG_STATE_MON 0x06B4
-
-#define REG_ERROR_MON 0x06B8
-#define REG_SEARCH_MACID 0x06BC
-
-#define REG_BT_COEX_TABLE 0x06C0
-
-#define REG_RXCMD_0 0x06D0
-#define REG_RXCMD_1 0x06D4
-
-#define REG_WMAC_RESP_TXINFO 0x06D8
-
-#define REG_BBPSF_CTRL 0x06DC
-
-#define REG_P2P_RX_BCN_NOA 0x06E0
-#define REG_ASSOCIATED_BFMER0_INFO 0x06E4
-#define REG_ASSOCIATED_BFMER1_INFO 0x06EC
-#define REG_TX_CSI_RPT_PARAM_BW20 0x06F4
-#define REG_TX_CSI_RPT_PARAM_BW40 0x06F8
-#define REG_TX_CSI_RPT_PARAM_BW80 0x06FC
-#define REG_MACID1 0x0700
-
-#define REG_BSSID1 0x0708
-
-#define REG_BCN_PSR_RPT1 0x0710
-#define REG_ASSOCIATED_BFMEE_SEL 0x0714
-#define REG_SND_PTCL_CTRL 0x0718
-#define REG_RX_CSI_RPT_INFO 0x071C
-#define REG_NS_ARP_CTRL 0x0720
-#define REG_NS_ARP_INFO 0x0724
-
-#define REG_BEAMFORMING_INFO_NSARP_V1 0x0728
-
-#define REG_BEAMFORMING_INFO_NSARP 0x072C
-
-#define REG_WMAC_RTX_CTX_SUBTYPE_CFG 0x0750
-
-#define REG_WMAC_SWAES_CFG 0x0760
-
-#define REG_BT_COEX_V2 0x0762
-
-#define REG_BT_COEX 0x0764
-
-#define REG_WLAN_ACT_MASK_CTRL 0x0768
-
-#define REG_BT_COEX_ENHANCED_INTR_CTRL 0x076E
-
-#define REG_BT_ACT_STATISTICS 0x0770
-
-#define REG_BT_STATISTICS_CONTROL_REGISTER 0x0778
-
-#define REG_BT_STATUS_REPORT_REGISTER 0x077C
-
-#define REG_BT_INTERRUPT_CONTROL_REGISTER 0x0780
-
-#define REG_WLAN_REPORT_TIME_OUT_CONTROL_REGISTER 0x0784
-
-#define REG_BT_ISOLATION_TABLE_REGISTER_REGISTER 0x0785
-
-#define REG_BT_INTERRUPT_STATUS_REGISTER 0x078F
-
-#define REG_BT_TDMA_TIME_REGISTER 0x0790
-
-#define REG_BT_ACT_REGISTER 0x0794
-
-#define REG_OBFF_CTRL_BASIC 0x0798
-
-#define REG_OBFF_CTRL2_TIMER 0x079C
-
-#define REG_LTR_CTRL_BASIC 0x07A0
-
-#define REG_LTR_CTRL2_TIMER_THRESHOLD 0x07A4
-
-#define REG_LTR_IDLE_LATENCY_V1 0x07A8
-#define REG_LTR_ACTIVE_LATENCY_V1 0x07AC
-
-#define REG_ANTENNA_TRAINING_CONTROL_REGISTER 0x07B0
-
-#define REG_WMAC_PKTCNT_RWD 0x07B8
-#define REG_WMAC_PKTCNT_CTRL 0x07BC
-
-#define REG_IQ_DUMP 0x07C0
-
-#define REG_WMAC_FTM_CTL 0x07CC
-
-#define REG_WMAC_IQ_MDPK_FUNC 0x07CE
-
-#define REG_WMAC_OPTION_FUNCTION 0x07D0
-
-#define REG_RX_FILTER_FUNCTION 0x07DA
-
-#define REG_NDP_SIG 0x07E0
-#define REG_TXCMD_INFO_FOR_RSP_PKT 0x07E4
-
-#define REG_RTS_ADDRESS_0 0x07F0
-
-#define REG_RTS_ADDRESS_1 0x07F8
-
-#define REG__RPFM_MAP1 0x07FE
-
-#define REG_SYS_CFG3 0x1000
-#define REG_SYS_CFG4 0x1034
-
-#define REG_SYS_CFG5 0x1070
-
-#define REG_CPU_DMEM_CON 0x1080
-
-#define REG_BOOT_REASON 0x1088
-#define REG_NFCPAD_CTRL 0x10A8
-
-#define REG_HIMR2 0x10B0
-#define REG_HISR2 0x10B4
-#define REG_HIMR3 0x10B8
-#define REG_HISR3 0x10BC
-#define REG_SW_MDIO 0x10C0
-#define REG_SW_FLUSH 0x10C4
-
-#define REG_H2C_PKT_READADDR 0x10D0
-#define REG_H2C_PKT_WRITEADDR 0x10D4
-
-#define REG_MEM_PWR_CRTL 0x10D8
-
-#define REG_FW_DBG0 0x10E0
-#define REG_FW_DBG1 0x10E4
-#define REG_FW_DBG2 0x10E8
-#define REG_FW_DBG3 0x10EC
-#define REG_FW_DBG4 0x10F0
-#define REG_FW_DBG5 0x10F4
-#define REG_FW_DBG6 0x10F8
-#define REG_FW_DBG7 0x10FC
-#define REG_CR_EXT 0x1100
-#define REG_FWFF 0x1114
-
-#define REG_RXFF_PTR_V1 0x1118
-#define REG_RXFF_WTR_V1 0x111C
-
-#define REG_FE2IMR 0x1120
-#define REG_FE2ISR 0x1124
-#define REG_FE3IMR 0x1128
-#define REG_FE3ISR 0x112C
-#define REG_FE4IMR 0x1130
-#define REG_FE4ISR 0x1134
-#define REG_FT1IMR 0x1138
-#define REG_FT1ISR 0x113C
-#define REG_SPWR0 0x1140
-#define REG_SPWR1 0x1144
-#define REG_SPWR2 0x1148
-#define REG_SPWR3 0x114C
-#define REG_POWSEQ 0x1150
-
-#define REG_TC7_CTRL_V1 0x1158
-#define REG_TC8_CTRL_V1 0x115C
-
-#define REG_FT2IMR 0x11E0
-#define REG_FT2ISR 0x11E4
-
-#define REG_MSG2 0x11F0
-#define REG_MSG3 0x11F4
-#define REG_MSG4 0x11F8
-#define REG_MSG5 0x11FC
-#define REG_DDMA_CH0SA 0x1200
-#define REG_DDMA_CH0DA 0x1204
-#define REG_DDMA_CH0CTRL 0x1208
-#define REG_DDMA_CH1SA 0x1210
-#define REG_DDMA_CH1DA 0x1214
-#define REG_DDMA_CH1CTRL 0x1218
-#define REG_DDMA_CH2SA 0x1220
-#define REG_DDMA_CH2DA 0x1224
-#define REG_DDMA_CH2CTRL 0x1228
-#define REG_DDMA_CH3SA 0x1230
-#define REG_DDMA_CH3DA 0x1234
-#define REG_DDMA_CH3CTRL 0x1238
-#define REG_DDMA_CH4SA 0x1240
-#define REG_DDMA_CH4DA 0x1244
-#define REG_DDMA_CH4CTRL 0x1248
-#define REG_DDMA_CH5SA 0x1250
-#define REG_DDMA_CH5DA 0x1254
-
-#define REG_REG_DDMA_CH5CTRL 0x1258
-
-#define REG_DDMA_INT_MSK 0x12E0
-#define REG_DDMA_CHSTATUS 0x12E8
-#define REG_DDMA_CHKSUM 0x12F0
-#define REG_DDMA_MONITOR 0x12FC
-
-#define REG_STC_INT_CS 0x1300
-#define REG_ST_INT_CFG 0x1304
-#define REG_CMU_DLY_CTRL 0x1310
-#define REG_CMU_DLY_CFG 0x1314
-#define REG_H2CQ_TXBD_DESA 0x1320
-#define REG_H2CQ_TXBD_NUM 0x1328
-#define REG_H2CQ_TXBD_IDX 0x132C
-#define REG_H2CQ_CSR 0x1330
-
-#define REG_CHANGE_PCIE_SPEED 0x1350
-
-#define REG_OLD_DEHANG 0x13F4
-
-#define REG_Q0_Q1_INFO 0x1400
-#define REG_Q2_Q3_INFO 0x1404
-#define REG_Q4_Q5_INFO 0x1408
-#define REG_Q6_Q7_INFO 0x140C
-#define REG_MGQ_HIQ_INFO 0x1410
-#define REG_CMDQ_BCNQ_INFO 0x1414
-#define REG_USEREG_SETTING 0x1420
-#define REG_AESIV_SETTING 0x1424
-#define REG_BF0_TIME_SETTING 0x1428
-#define REG_BF1_TIME_SETTING 0x142C
-#define REG_BF_TIMEOUT_EN 0x1430
-#define REG_MACID_RELEASE0 0x1434
-#define REG_MACID_RELEASE1 0x1438
-#define REG_MACID_RELEASE2 0x143C
-#define REG_MACID_RELEASE3 0x1440
-#define REG_MACID_RELEASE_SETTING 0x1444
-#define REG_FAST_EDCA_VOVI_SETTING 0x1448
-#define REG_FAST_EDCA_BEBK_SETTING 0x144C
-#define REG_MACID_DROP0 0x1450
-#define REG_MACID_DROP1 0x1454
-#define REG_MACID_DROP2 0x1458
-#define REG_MACID_DROP3 0x145C
-
-#define REG_R_MACID_RELEASE_SUCCESS_0 0x1460
-#define REG_R_MACID_RELEASE_SUCCESS_1 0x1464
-#define REG_R_MACID_RELEASE_SUCCESS_2 0x1468
-#define REG_R_MACID_RELEASE_SUCCESS_3 0x146C
-#define REG_MGG_FIFO_CRTL 0x1470
-#define REG_MGG_FIFO_INT 0x1474
-#define REG_MGG_FIFO_LIFETIME 0x1478
-#define REG_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET 0x147C
-
-#define REG_MACID_SHCUT_OFFSET 0x1480
-
-#define REG_MU_TX_CTL 0x14C0
-#define REG_MU_STA_GID_VLD 0x14C4
-#define REG_MU_STA_USER_POS_INFO 0x14C8
-#define REG_MU_TRX_DBG_CNT 0x14D0
-
-#define REG_CPUMGQ_TX_TIMER 0x1500
-#define REG_PS_TIMER_A 0x1504
-#define REG_PS_TIMER_B 0x1508
-#define REG_PS_TIMER_C 0x150C
-#define REG_PS_TIMER_ABC_CPUMGQ_TIMER_CRTL 0x1510
-#define REG_CPUMGQ_TX_TIMER_EARLY 0x1514
-#define REG_PS_TIMER_A_EARLY 0x1515
-#define REG_PS_TIMER_B_EARLY 0x1516
-#define REG_PS_TIMER_C_EARLY 0x1517
-
-#define REG_BCN_PSR_RPT2 0x1600
-#define REG_BCN_PSR_RPT3 0x1604
-#define REG_BCN_PSR_RPT4 0x1608
-#define REG_A1_ADDR_MASK 0x160C
-#define REG_MACID2 0x1620
-#define REG_BSSID2 0x1628
-#define REG_MACID3 0x1630
-#define REG_BSSID3 0x1638
-#define REG_MACID4 0x1640
-#define REG_BSSID4 0x1648
-
-#define REG_NOA_REPORT 0x1650
-#define REG_PWRBIT_SETTING 0x1660
-#define REG_WMAC_MU_BF_OPTION 0x167C
-
-#define REG_WMAC_MU_ARB 0x167E
-#define REG_WMAC_MU_OPTION 0x167F
-#define REG_WMAC_MU_BF_CTL 0x1680
-
-#define REG_WMAC_MU_BFRPT_PARA 0x1682
-
-#define REG_WMAC_ASSOCIATED_MU_BFMEE2 0x1684
-#define REG_WMAC_ASSOCIATED_MU_BFMEE3 0x1686
-#define REG_WMAC_ASSOCIATED_MU_BFMEE4 0x1688
-#define REG_WMAC_ASSOCIATED_MU_BFMEE5 0x168A
-#define REG_WMAC_ASSOCIATED_MU_BFMEE6 0x168C
-#define REG_WMAC_ASSOCIATED_MU_BFMEE7 0x168E
-
-#define REG_TRANSMIT_ADDRSS_0 0x16A0
-#define REG_TRANSMIT_ADDRSS_1 0x16A8
-#define REG_TRANSMIT_ADDRSS_2 0x16B0
-#define REG_TRANSMIT_ADDRSS_3 0x16B8
-#define REG_TRANSMIT_ADDRSS_4 0x16C0
-
-#define REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1 0x1700
-#define REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1 0x1704
-#define REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1 0x1708
-
-/* ----------------------------------------------------- */
-/* */
-/* 0xFB00h ~ 0xFCFFh TX/RX packet buffer affress */
-/* */
-/* ----------------------------------------------------- */
-#define REG_RXPKTBUF_STARTADDR 0xFB00
-#define REG_TXPKTBUF_STARTADDR 0xFC00
-
-/* ----------------------------------------------------- */
-/* */
-/* 0xFD00h ~ 0xFDFFh 8051 CPU Local REG */
-/* */
-/* ----------------------------------------------------- */
-#define REG_SYS_CTRL 0xFD00
-#define REG_PONSTS_RPT1 0xFD01
-#define REG_PONSTS_RPT2 0xFD02
-#define REG_PONSTS_RPT3 0xFD03
-#define REG_PONSTS_RPT4 0xFD04 /* 0x84 */
-#define REG_PONSTS_RPT5 0xFD05 /* 0x85 */
-#define REG_8051ERRFLAG 0xFD08
-#define REG_8051ERRFLAG_MASK 0xFD09
-#define REG_TXADDRH 0xFD10 /* Tx Packet High address */
-#define REG_RXADDRH 0xFD11 /* Rx Packet High address */
-#define REG_TXADDRH_EXT 0xFD12 /* 0xFD12[0] : for 8051 access txpktbuf
- * high64k as external register
- */
-
-#define REG_U3_STATE 0xFD48 /* (Read only)
- * [7:4] : usb3 changed last state.
- * [3:0] : usb3 state
- */
-
-/* for MAILBOX */
-#define REG_OUTDATA0 0xFD50
-#define REG_OUTDATA1 0xFD54
-#define REG_OUTRDY 0xFD58 /* bit[0] : OutReady,
- * bit[1] : OutEmptyIntEn
- */
-
-#define REG_INDATA0 0xFD60
-#define REG_INDATA1 0xFD64
-#define REG_INRDY 0xFD68 /* bit[0] : InReady,
- * bit[1] : InRdyIntEn
- */
-
-/* MCU ERROR debug REG */
-#define REG_MCUERR_PCLSB 0xFD90 /* PC[7:0] */
-#define REG_MCUERR_PCMSB 0xFD91 /* PC[15:8] */
-#define REG_MCUERR_ACC 0xFD92
-#define REG_MCUERR_B 0xFD93
-#define REG_MCUERR_DPTRLSB 0xFD94 /* DPTR[7:0] */
-#define REG_MCUERR_DPTRMSB 0xFD95 /* DPTR[15:8] */
-#define REG_MCUERR_SP 0xFD96 /* SP[7:0] */
-#define REG_MCUERR_IE 0xFD97 /* IE[7:0] */
-#define REG_MCUERR_EIE 0xFD98 /* EIE[7:0] */
-#define REG_VERA_SIM 0xFD9F
-/* 0xFD99~0xFD9F are reserved.. */
-
-/* ----------------------------------------------------- */
-/* */
-/* 0xFE00h ~ 0xFEFFh USB Configuration */
-/* */
-/* ----------------------------------------------------- */
-
-/* RTS5101 USB Register Definition */
-#define REG_USB_SETUP_DEC_INT 0xFE00
-#define REG_USB_DMACTL 0xFE01
-#define REG_USB_IRQSTAT0 0xFE02
-#define REG_USB_IRQSTAT1 0xFE03
-#define REG_USB_IRQEN0 0xFE04
-#define REG_USB_IRQEN1 0xFE05
-#define REG_USB_AUTOPTRL 0xFE06
-#define REG_USB_AUTOPTRH 0xFE07
-#define REG_USB_AUTODAT 0xFE08
-
-#define REG_USB_SCRATCH0 0xFE09
-#define REG_USB_SCRATCH1 0xFE0A
-#define REG_USB_SEEPROM 0xFE0B
-#define REG_USB_GPIO0 0xFE0C
-#define REG_USB_GPIO0DIR 0xFE0D
-#define REG_USB_CLKSEL 0xFE0E
-#define REG_USB_BOOTCTL 0xFE0F
-
-#define REG_USB_USBCTL 0xFE10
-#define REG_USB_USBSTAT 0xFE11
-#define REG_USB_DEVADDR 0xFE12
-#define REG_USB_USBTEST 0xFE13
-#define REG_USB_FNUM0 0xFE14
-#define REG_USB_FNUM1 0xFE15
-
-#define REG_USB_EP_IDX 0xFE20
-#define REG_USB_EP_CFG 0xFE21
-#define REG_USB_EP_CTL 0xFE22
-#define REG_USB_EP_STAT 0xFE23
-#define REG_USB_EP_IRQ 0xFE24
-#define REG_USB_EP_IRQEN 0xFE25
-#define REG_USB_EP_MAXPKT0 0xFE26
-#define REG_USB_EP_MAXPKT1 0xFE27
-#define REG_USB_EP_DAT 0xFE28
-#define REG_USB_EP_BC0 0xFE29
-#define REG_USB_EP_BC1 0xFE2A
-#define REG_USB_EP_TC0 0xFE2B
-#define REG_USB_EP_TC1 0xFE2C
-#define REG_USB_EP_TC2 0xFE2D
-#define REG_USB_EP_CTL2 0xFE2E
-
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
-#define REG_USB_VID 0xFE60
-#define REG_USB_PID 0xFE62
-#define REG_USB_OPT 0xFE64
-#define REG_USB_CONFIG 0xFE65 /* RX EP setting.
- * 0xFE65 Bit[3:0] : RXQ,
- * Bit[7:4] : INTQ
- */
- /* TX EP setting.
- * 0xFE66 Bit[3:0] : TXQ0,
- * Bit[7:4] : TXQ1,
- * 0xFE67 Bit[3:0] : TXQ2
- */
-#define REG_USB_PHY_PARA1 0xFE68 /* Bit[7:4]: XCVR_SEN (USB PHY 0xE2[7:4]),
- * Bit[3:0]: XCVR_SH (USB PHY 0xE2[3:0])
- */
-#define REG_USB_PHY_PARA2 0xFE69 /* Bit[7:5]: XCVR_BG (USB PHY 0xE3[5:3]),
- * Bit[4:2]: XCVR_DR (USB PHY 0xE3[2:0]),
- * Bit[1]: SE0_LVL (USB PHY 0xE5[7]),
- * Bit[0]: FORCE_XTL_ON (USB PHY 0xE5[1])
- */
-#define REG_USB_PHY_PARA3 0xFE6A /* Bit[7:5]: XCVR_SRC (USB PHY 0xE5[4:2]),
- * Bit[4]: LATE_DLLEN (USB PHY 0xF0[4]),
- * Bit[3]: HS_LP_MODE (USB PHY 0xF0[3]),
- * Bit[2]: UTMI_POS_OUT (USB PHY 0xF1 [7]),
- * Bit[1:0]: TX_DELAY (USB PHY 0xF1 [2:1])
- */
-#define REG_USB_PHY_PARA4 0xFE6B /* (USB PHY 0xE7[7:0]) */
-#define REG_USB_OPT2 0xFE6C
-#define REG_USB_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
-#define REG_USB_MANUFACTURE_SETTING 0xFE80 /* 0xFE80~0xFE90 Max: 32 bytes*/
-#define REG_USB_PRODUCT_STRING 0xFEA0 /* 0xFEA0~0xFECF Max: 48 bytes*/
-#define REG_USB_SERIAL_NUMBER_STRING 0xFED0 /* 0xFED0~0xFEDF Max: 12 bytes*/
-
-#define REG_USB_ALTERNATE_SETTING 0xFE4F
-#define REG_USB_INT_BINTERVAL 0xFE6E
-#define REG_USB_GPS_EP_CONFIG 0xFE6D
-
-#endif /* __HALMAC_COM_REG_H__ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h b/drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h
deleted file mode 100644
index 48d5dc0df858..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h
+++ /dev/null
@@ -1,717 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __INC_HALMAC_REG_8822B_H
-#define __INC_HALMAC_REG_8822B_H
-
-#define REG_SYS_ISO_CTRL_8822B 0x0000
-#define REG_SYS_FUNC_EN_8822B 0x0002
-#define REG_SYS_PW_CTRL_8822B 0x0004
-#define REG_SYS_CLK_CTRL_8822B 0x0008
-#define REG_SYS_EEPROM_CTRL_8822B 0x000A
-#define REG_EE_VPD_8822B 0x000C
-#define REG_SYS_SWR_CTRL1_8822B 0x0010
-#define REG_SYS_SWR_CTRL2_8822B 0x0014
-#define REG_SYS_SWR_CTRL3_8822B 0x0018
-#define REG_RSV_CTRL_8822B 0x001C
-#define REG_RF_CTRL_8822B 0x001F
-#define REG_AFE_LDO_CTRL_8822B 0x0020
-#define REG_AFE_CTRL1_8822B 0x0024
-#define REG_AFE_CTRL2_8822B 0x0028
-#define REG_AFE_CTRL3_8822B 0x002C
-#define REG_EFUSE_CTRL_8822B 0x0030
-#define REG_LDO_EFUSE_CTRL_8822B 0x0034
-#define REG_PWR_OPTION_CTRL_8822B 0x0038
-#define REG_CAL_TIMER_8822B 0x003C
-#define REG_ACLK_MON_8822B 0x003E
-#define REG_GPIO_MUXCFG_8822B 0x0040
-#define REG_GPIO_PIN_CTRL_8822B 0x0044
-#define REG_GPIO_INTM_8822B 0x0048
-#define REG_LED_CFG_8822B 0x004C
-#define REG_FSIMR_8822B 0x0050
-#define REG_FSISR_8822B 0x0054
-#define REG_HSIMR_8822B 0x0058
-#define REG_HSISR_8822B 0x005C
-#define REG_GPIO_EXT_CTRL_8822B 0x0060
-#define REG_PAD_CTRL1_8822B 0x0064
-#define REG_WL_BT_PWR_CTRL_8822B 0x0068
-#define REG_SDM_DEBUG_8822B 0x006C
-#define REG_SYS_SDIO_CTRL_8822B 0x0070
-#define REG_HCI_OPT_CTRL_8822B 0x0074
-#define REG_AFE_CTRL4_8822B 0x0078
-#define REG_LDO_SWR_CTRL_8822B 0x007C
-#define REG_MCUFW_CTRL_8822B 0x0080
-#define REG_MCU_TST_CFG_8822B 0x0084
-#define REG_HMEBOX_E0_E1_8822B 0x0088
-#define REG_HMEBOX_E2_E3_8822B 0x008C
-#define REG_WLLPS_CTRL_8822B 0x0090
-#define REG_AFE_CTRL5_8822B 0x0094
-#define REG_GPIO_DEBOUNCE_CTRL_8822B 0x0098
-#define REG_RPWM2_8822B 0x009C
-#define REG_SYSON_FSM_MON_8822B 0x00A0
-#define REG_AFE_CTRL6_8822B 0x00A4
-#define REG_PMC_DBG_CTRL1_8822B 0x00A8
-#define REG_AFE_CTRL7_8822B 0x00AC
-#define REG_HIMR0_8822B 0x00B0
-#define REG_HISR0_8822B 0x00B4
-#define REG_HIMR1_8822B 0x00B8
-#define REG_HISR1_8822B 0x00BC
-#define REG_DBG_PORT_SEL_8822B 0x00C0
-#define REG_PAD_CTRL2_8822B 0x00C4
-#define REG_PMC_DBG_CTRL2_8822B 0x00CC
-#define REG_BIST_CTRL_8822B 0x00D0
-#define REG_BIST_RPT_8822B 0x00D4
-#define REG_MEM_CTRL_8822B 0x00D8
-#define REG_AFE_CTRL8_8822B 0x00DC
-#define REG_USB_SIE_INTF_8822B 0x00E0
-#define REG_PCIE_MIO_INTF_8822B 0x00E4
-#define REG_PCIE_MIO_INTD_8822B 0x00E8
-#define REG_WLRF1_8822B 0x00EC
-#define REG_SYS_CFG1_8822B 0x00F0
-#define REG_SYS_STATUS1_8822B 0x00F4
-#define REG_SYS_STATUS2_8822B 0x00F8
-#define REG_SYS_CFG2_8822B 0x00FC
-#define REG_SYS_CFG3_8822B 0x1000
-#define REG_SYS_CFG4_8822B 0x1034
-#define REG_SYS_CFG5_8822B 0x1070
-#define REG_CPU_DMEM_CON_8822B 0x1080
-#define REG_BOOT_REASON_8822B 0x1088
-#define REG_NFCPAD_CTRL_8822B 0x10A8
-#define REG_HIMR2_8822B 0x10B0
-#define REG_HISR2_8822B 0x10B4
-#define REG_HIMR3_8822B 0x10B8
-#define REG_HISR3_8822B 0x10BC
-#define REG_SW_MDIO_8822B 0x10C0
-#define REG_SW_FLUSH_8822B 0x10C4
-#define REG_H2C_PKT_READADDR_8822B 0x10D0
-#define REG_H2C_PKT_WRITEADDR_8822B 0x10D4
-#define REG_MEM_PWR_CRTL_8822B 0x10D8
-#define REG_FW_DBG0_8822B 0x10E0
-#define REG_FW_DBG1_8822B 0x10E4
-#define REG_FW_DBG2_8822B 0x10E8
-#define REG_FW_DBG3_8822B 0x10EC
-#define REG_FW_DBG4_8822B 0x10F0
-#define REG_FW_DBG5_8822B 0x10F4
-#define REG_FW_DBG6_8822B 0x10F8
-#define REG_FW_DBG7_8822B 0x10FC
-#define REG_CR_8822B 0x0100
-#define REG_PKT_BUFF_ACCESS_CTRL_8822B 0x0106
-#define REG_TSF_CLK_STATE_8822B 0x0108
-#define REG_TXDMA_PQ_MAP_8822B 0x010C
-#define REG_TRXFF_BNDY_8822B 0x0114
-#define REG_PTA_I2C_MBOX_8822B 0x0118
-#define REG_RXFF_BNDY_8822B 0x011C
-#define REG_FE1IMR_8822B 0x0120
-#define REG_FE1ISR_8822B 0x0124
-#define REG_CPWM_8822B 0x012C
-#define REG_FWIMR_8822B 0x0130
-#define REG_FWISR_8822B 0x0134
-#define REG_FTIMR_8822B 0x0138
-#define REG_FTISR_8822B 0x013C
-#define REG_PKTBUF_DBG_CTRL_8822B 0x0140
-#define REG_PKTBUF_DBG_DATA_L_8822B 0x0144
-#define REG_PKTBUF_DBG_DATA_H_8822B 0x0148
-#define REG_CPWM2_8822B 0x014C
-#define REG_TC0_CTRL_8822B 0x0150
-#define REG_TC1_CTRL_8822B 0x0154
-#define REG_TC2_CTRL_8822B 0x0158
-#define REG_TC3_CTRL_8822B 0x015C
-#define REG_TC4_CTRL_8822B 0x0160
-#define REG_TCUNIT_BASE_8822B 0x0164
-#define REG_TC5_CTRL_8822B 0x0168
-#define REG_TC6_CTRL_8822B 0x016C
-#define REG_MBIST_FAIL_8822B 0x0170
-#define REG_MBIST_START_PAUSE_8822B 0x0174
-#define REG_MBIST_DONE_8822B 0x0178
-#define REG_MBIST_FAIL_NRML_8822B 0x017C
-#define REG_AES_DECRPT_DATA_8822B 0x0180
-#define REG_AES_DECRPT_CFG_8822B 0x0184
-#define REG_TMETER_8822B 0x0190
-#define REG_OSC_32K_CTRL_8822B 0x0194
-#define REG_32K_CAL_REG1_8822B 0x0198
-#define REG_C2HEVT_8822B 0x01A0
-#define REG_SW_DEFINED_PAGE1_8822B 0x01B8
-#define REG_MCUTST_I_8822B 0x01C0
-#define REG_MCUTST_II_8822B 0x01C4
-#define REG_FMETHR_8822B 0x01C8
-#define REG_HMETFR_8822B 0x01CC
-#define REG_HMEBOX0_8822B 0x01D0
-#define REG_HMEBOX1_8822B 0x01D4
-#define REG_HMEBOX2_8822B 0x01D8
-#define REG_HMEBOX3_8822B 0x01DC
-#define REG_LLT_INIT_8822B 0x01E0
-#define REG_LLT_INIT_ADDR_8822B 0x01E4
-#define REG_BB_ACCESS_CTRL_8822B 0x01E8
-#define REG_BB_ACCESS_DATA_8822B 0x01EC
-#define REG_HMEBOX_E0_8822B 0x01F0
-#define REG_HMEBOX_E1_8822B 0x01F4
-#define REG_HMEBOX_E2_8822B 0x01F8
-#define REG_HMEBOX_E3_8822B 0x01FC
-#define REG_CR_EXT_8822B 0x1100
-#define REG_FWFF_8822B 0x1114
-#define REG_RXFF_PTR_V1_8822B 0x1118
-#define REG_RXFF_WTR_V1_8822B 0x111C
-#define REG_FE2IMR_8822B 0x1120
-#define REG_FE2ISR_8822B 0x1124
-#define REG_FE3IMR_8822B 0x1128
-#define REG_FE3ISR_8822B 0x112C
-#define REG_FE4IMR_8822B 0x1130
-#define REG_FE4ISR_8822B 0x1134
-#define REG_FT1IMR_8822B 0x1138
-#define REG_FT1ISR_8822B 0x113C
-#define REG_SPWR0_8822B 0x1140
-#define REG_SPWR1_8822B 0x1144
-#define REG_SPWR2_8822B 0x1148
-#define REG_SPWR3_8822B 0x114C
-#define REG_POWSEQ_8822B 0x1150
-#define REG_TC7_CTRL_V1_8822B 0x1158
-#define REG_TC8_CTRL_V1_8822B 0x115C
-#define REG_FT2IMR_8822B 0x11E0
-#define REG_FT2ISR_8822B 0x11E4
-#define REG_MSG2_8822B 0x11F0
-#define REG_MSG3_8822B 0x11F4
-#define REG_MSG4_8822B 0x11F8
-#define REG_MSG5_8822B 0x11FC
-#define REG_FIFOPAGE_CTRL_1_8822B 0x0200
-#define REG_FIFOPAGE_CTRL_2_8822B 0x0204
-#define REG_AUTO_LLT_V1_8822B 0x0208
-#define REG_TXDMA_OFFSET_CHK_8822B 0x020C
-#define REG_TXDMA_STATUS_8822B 0x0210
-#define REG_TX_DMA_DBG_8822B 0x0214
-#define REG_TQPNT1_8822B 0x0218
-#define REG_TQPNT2_8822B 0x021C
-#define REG_TQPNT3_8822B 0x0220
-#define REG_TQPNT4_8822B 0x0224
-#define REG_RQPN_CTRL_1_8822B 0x0228
-#define REG_RQPN_CTRL_2_8822B 0x022C
-#define REG_FIFOPAGE_INFO_1_8822B 0x0230
-#define REG_FIFOPAGE_INFO_2_8822B 0x0234
-#define REG_FIFOPAGE_INFO_3_8822B 0x0238
-#define REG_FIFOPAGE_INFO_4_8822B 0x023C
-#define REG_FIFOPAGE_INFO_5_8822B 0x0240
-#define REG_H2C_HEAD_8822B 0x0244
-#define REG_H2C_TAIL_8822B 0x0248
-#define REG_H2C_READ_ADDR_8822B 0x024C
-#define REG_H2C_WR_ADDR_8822B 0x0250
-#define REG_H2C_INFO_8822B 0x0254
-#define REG_RXDMA_AGG_PG_TH_8822B 0x0280
-#define REG_RXPKT_NUM_8822B 0x0284
-#define REG_RXDMA_STATUS_8822B 0x0288
-#define REG_RXDMA_DPR_8822B 0x028C
-#define REG_RXDMA_MODE_8822B 0x0290
-#define REG_C2H_PKT_8822B 0x0294
-#define REG_FWFF_C2H_8822B 0x0298
-#define REG_FWFF_CTRL_8822B 0x029C
-#define REG_FWFF_PKT_INFO_8822B 0x02A0
-#define REG_DDMA_CH0SA_8822B 0x1200
-#define REG_DDMA_CH0DA_8822B 0x1204
-#define REG_DDMA_CH0CTRL_8822B 0x1208
-#define REG_DDMA_CH1SA_8822B 0x1210
-#define REG_DDMA_CH1DA_8822B 0x1214
-#define REG_DDMA_CH1CTRL_8822B 0x1218
-#define REG_DDMA_CH2SA_8822B 0x1220
-#define REG_DDMA_CH2DA_8822B 0x1224
-#define REG_DDMA_CH2CTRL_8822B 0x1228
-#define REG_DDMA_CH3SA_8822B 0x1230
-#define REG_DDMA_CH3DA_8822B 0x1234
-#define REG_DDMA_CH3CTRL_8822B 0x1238
-#define REG_DDMA_CH4SA_8822B 0x1240
-#define REG_DDMA_CH4DA_8822B 0x1244
-#define REG_DDMA_CH4CTRL_8822B 0x1248
-#define REG_DDMA_CH5SA_8822B 0x1250
-#define REG_DDMA_CH5DA_8822B 0x1254
-#define REG_REG_DDMA_CH5CTRL_8822B 0x1258
-#define REG_DDMA_INT_MSK_8822B 0x12E0
-#define REG_DDMA_CHSTATUS_8822B 0x12E8
-#define REG_DDMA_CHKSUM_8822B 0x12F0
-#define REG_DDMA_MONITOR_8822B 0x12FC
-#define REG_PCIE_CTRL_8822B 0x0300
-#define REG_INT_MIG_8822B 0x0304
-#define REG_BCNQ_TXBD_DESA_8822B 0x0308
-#define REG_MGQ_TXBD_DESA_8822B 0x0310
-#define REG_VOQ_TXBD_DESA_8822B 0x0318
-#define REG_VIQ_TXBD_DESA_8822B 0x0320
-#define REG_BEQ_TXBD_DESA_8822B 0x0328
-#define REG_BKQ_TXBD_DESA_8822B 0x0330
-#define REG_RXQ_RXBD_DESA_8822B 0x0338
-#define REG_HI0Q_TXBD_DESA_8822B 0x0340
-#define REG_HI1Q_TXBD_DESA_8822B 0x0348
-#define REG_HI2Q_TXBD_DESA_8822B 0x0350
-#define REG_HI3Q_TXBD_DESA_8822B 0x0358
-#define REG_HI4Q_TXBD_DESA_8822B 0x0360
-#define REG_HI5Q_TXBD_DESA_8822B 0x0368
-#define REG_HI6Q_TXBD_DESA_8822B 0x0370
-#define REG_HI7Q_TXBD_DESA_8822B 0x0378
-#define REG_MGQ_TXBD_NUM_8822B 0x0380
-#define REG_RX_RXBD_NUM_8822B 0x0382
-#define REG_VOQ_TXBD_NUM_8822B 0x0384
-#define REG_VIQ_TXBD_NUM_8822B 0x0386
-#define REG_BEQ_TXBD_NUM_8822B 0x0388
-#define REG_BKQ_TXBD_NUM_8822B 0x038A
-#define REG_HI0Q_TXBD_NUM_8822B 0x038C
-#define REG_HI1Q_TXBD_NUM_8822B 0x038E
-#define REG_HI2Q_TXBD_NUM_8822B 0x0390
-#define REG_HI3Q_TXBD_NUM_8822B 0x0392
-#define REG_HI4Q_TXBD_NUM_8822B 0x0394
-#define REG_HI5Q_TXBD_NUM_8822B 0x0396
-#define REG_HI6Q_TXBD_NUM_8822B 0x0398
-#define REG_HI7Q_TXBD_NUM_8822B 0x039A
-#define REG_TSFTIMER_HCI_8822B 0x039C
-#define REG_BD_RWPTR_CLR_8822B 0x039C
-#define REG_VOQ_TXBD_IDX_8822B 0x03A0
-#define REG_VIQ_TXBD_IDX_8822B 0x03A4
-#define REG_BEQ_TXBD_IDX_8822B 0x03A8
-#define REG_BKQ_TXBD_IDX_8822B 0x03AC
-#define REG_MGQ_TXBD_IDX_8822B 0x03B0
-#define REG_RXQ_RXBD_IDX_8822B 0x03B4
-#define REG_HI0Q_TXBD_IDX_8822B 0x03B8
-#define REG_HI1Q_TXBD_IDX_8822B 0x03BC
-#define REG_HI2Q_TXBD_IDX_8822B 0x03C0
-#define REG_HI3Q_TXBD_IDX_8822B 0x03C4
-#define REG_HI4Q_TXBD_IDX_8822B 0x03C8
-#define REG_HI5Q_TXBD_IDX_8822B 0x03CC
-#define REG_HI6Q_TXBD_IDX_8822B 0x03D0
-#define REG_HI7Q_TXBD_IDX_8822B 0x03D4
-#define REG_DBG_SEL_V1_8822B 0x03D8
-#define REG_PCIE_HRPWM1_V1_8822B 0x03D9
-#define REG_PCIE_HCPWM1_V1_8822B 0x03DA
-#define REG_PCIE_CTRL2_8822B 0x03DB
-#define REG_PCIE_HRPWM2_V1_8822B 0x03DC
-#define REG_PCIE_HCPWM2_V1_8822B 0x03DE
-#define REG_PCIE_H2C_MSG_V1_8822B 0x03E0
-#define REG_PCIE_C2H_MSG_V1_8822B 0x03E4
-#define REG_DBI_WDATA_V1_8822B 0x03E8
-#define REG_DBI_RDATA_V1_8822B 0x03EC
-#define REG_DBI_FLAG_V1_8822B 0x03F0
-#define REG_MDIO_V1_8822B 0x03F4
-#define REG_PCIE_MIX_CFG_8822B 0x03F8
-#define REG_HCI_MIX_CFG_8822B 0x03FC
-#define REG_STC_INT_CS_8822B 0x1300
-#define REG_ST_INT_CFG_8822B 0x1304
-#define REG_CMU_DLY_CTRL_8822B 0x1310
-#define REG_CMU_DLY_CFG_8822B 0x1314
-#define REG_H2CQ_TXBD_DESA_8822B 0x1320
-#define REG_H2CQ_TXBD_NUM_8822B 0x1328
-#define REG_H2CQ_TXBD_IDX_8822B 0x132C
-#define REG_H2CQ_CSR_8822B 0x1330
-#define REG_CHANGE_PCIE_SPEED_8822B 0x1350
-#define REG_OLD_DEHANG_8822B 0x13F4
-#define REG_Q0_INFO_8822B 0x0400
-#define REG_Q1_INFO_8822B 0x0404
-#define REG_Q2_INFO_8822B 0x0408
-#define REG_Q3_INFO_8822B 0x040C
-#define REG_MGQ_INFO_8822B 0x0410
-#define REG_HIQ_INFO_8822B 0x0414
-#define REG_BCNQ_INFO_8822B 0x0418
-#define REG_TXPKT_EMPTY_8822B 0x041A
-#define REG_CPU_MGQ_INFO_8822B 0x041C
-#define REG_FWHW_TXQ_CTRL_8822B 0x0420
-#define REG_DATAFB_SEL_8822B 0x0423
-#define REG_BCNQ_BDNY_V1_8822B 0x0424
-#define REG_LIFETIME_EN_8822B 0x0426
-#define REG_SPEC_SIFS_8822B 0x0428
-#define REG_RETRY_LIMIT_8822B 0x042A
-#define REG_TXBF_CTRL_8822B 0x042C
-#define REG_DARFRC_8822B 0x0430
-#define REG_RARFRC_8822B 0x0438
-#define REG_RRSR_8822B 0x0440
-#define REG_ARFR0_8822B 0x0444
-#define REG_ARFR1_V1_8822B 0x044C
-#define REG_CCK_CHECK_8822B 0x0454
-#define REG_AMPDU_MAX_TIME_V1_8822B 0x0455
-#define REG_BCNQ1_BDNY_V1_8822B 0x0456
-#define REG_AMPDU_MAX_LENGTH_8822B 0x0458
-#define REG_ACQ_STOP_8822B 0x045C
-#define REG_NDPA_RATE_8822B 0x045D
-#define REG_TX_HANG_CTRL_8822B 0x045E
-#define REG_NDPA_OPT_CTRL_8822B 0x045F
-#define REG_RD_RESP_PKT_TH_8822B 0x0463
-#define REG_CMDQ_INFO_8822B 0x0464
-#define REG_Q4_INFO_8822B 0x0468
-#define REG_Q5_INFO_8822B 0x046C
-#define REG_Q6_INFO_8822B 0x0470
-#define REG_Q7_INFO_8822B 0x0474
-#define REG_WMAC_LBK_BUF_HD_V1_8822B 0x0478
-#define REG_MGQ_BDNY_V1_8822B 0x047A
-#define REG_TXRPT_CTRL_8822B 0x047C
-#define REG_INIRTS_RATE_SEL_8822B 0x0480
-#define REG_BASIC_CFEND_RATE_8822B 0x0481
-#define REG_STBC_CFEND_RATE_8822B 0x0482
-#define REG_DATA_SC_8822B 0x0483
-#define REG_MACID_SLEEP3_8822B 0x0484
-#define REG_MACID_SLEEP1_8822B 0x0488
-#define REG_ARFR2_V1_8822B 0x048C
-#define REG_ARFR3_V1_8822B 0x0494
-#define REG_ARFR4_8822B 0x049C
-#define REG_ARFR5_8822B 0x04A4
-#define REG_TXRPT_START_OFFSET_8822B 0x04AC
-#define REG_POWER_STAGE1_8822B 0x04B4
-#define REG_POWER_STAGE2_8822B 0x04B8
-#define REG_SW_AMPDU_BURST_MODE_CTRL_8822B 0x04BC
-#define REG_PKT_LIFE_TIME_8822B 0x04C0
-#define REG_STBC_SETTING_8822B 0x04C4
-#define REG_STBC_SETTING2_8822B 0x04C5
-#define REG_QUEUE_CTRL_8822B 0x04C6
-#define REG_SINGLE_AMPDU_CTRL_8822B 0x04C7
-#define REG_PROT_MODE_CTRL_8822B 0x04C8
-#define REG_BAR_MODE_CTRL_8822B 0x04CC
-#define REG_RA_TRY_RATE_AGG_LMT_8822B 0x04CF
-#define REG_MACID_SLEEP2_8822B 0x04D0
-#define REG_MACID_SLEEP_8822B 0x04D4
-#define REG_HW_SEQ0_8822B 0x04D8
-#define REG_HW_SEQ1_8822B 0x04DA
-#define REG_HW_SEQ2_8822B 0x04DC
-#define REG_HW_SEQ3_8822B 0x04DE
-#define REG_NULL_PKT_STATUS_V1_8822B 0x04E0
-#define REG_PTCL_ERR_STATUS_8822B 0x04E2
-#define REG_NULL_PKT_STATUS_EXTEND_8822B 0x04E3
-#define REG_VIDEO_ENHANCEMENT_FUN_8822B 0x04E4
-#define REG_BT_POLLUTE_PKT_CNT_8822B 0x04E8
-#define REG_PTCL_DBG_8822B 0x04EC
-#define REG_CPUMGQ_TIMER_CTRL2_8822B 0x04F4
-#define REG_DUMMY_PAGE4_V1_8822B 0x04FC
-#define REG_MOREDATA_8822B 0x04FE
-#define REG_Q0_Q1_INFO_8822B 0x1400
-#define REG_Q2_Q3_INFO_8822B 0x1404
-#define REG_Q4_Q5_INFO_8822B 0x1408
-#define REG_Q6_Q7_INFO_8822B 0x140C
-#define REG_MGQ_HIQ_INFO_8822B 0x1410
-#define REG_CMDQ_BCNQ_INFO_8822B 0x1414
-#define REG_USEREG_SETTING_8822B 0x1420
-#define REG_AESIV_SETTING_8822B 0x1424
-#define REG_BF0_TIME_SETTING_8822B 0x1428
-#define REG_BF1_TIME_SETTING_8822B 0x142C
-#define REG_BF_TIMEOUT_EN_8822B 0x1430
-#define REG_MACID_RELEASE0_8822B 0x1434
-#define REG_MACID_RELEASE1_8822B 0x1438
-#define REG_MACID_RELEASE2_8822B 0x143C
-#define REG_MACID_RELEASE3_8822B 0x1440
-#define REG_MACID_RELEASE_SETTING_8822B 0x1444
-#define REG_FAST_EDCA_VOVI_SETTING_8822B 0x1448
-#define REG_FAST_EDCA_BEBK_SETTING_8822B 0x144C
-#define REG_MACID_DROP0_8822B 0x1450
-#define REG_MACID_DROP1_8822B 0x1454
-#define REG_MACID_DROP2_8822B 0x1458
-#define REG_MACID_DROP3_8822B 0x145C
-#define REG_R_MACID_RELEASE_SUCCESS_0_8822B 0x1460
-#define REG_R_MACID_RELEASE_SUCCESS_1_8822B 0x1464
-#define REG_R_MACID_RELEASE_SUCCESS_2_8822B 0x1468
-#define REG_R_MACID_RELEASE_SUCCESS_3_8822B 0x146C
-#define REG_MGG_FIFO_CRTL_8822B 0x1470
-#define REG_MGG_FIFO_INT_8822B 0x1474
-#define REG_MGG_FIFO_LIFETIME_8822B 0x1478
-#define REG_R_MACID_RELEASE_SUCCESS_CLEAR_OFFSET_8822B 0x147C
-#define REG_MACID_SHCUT_OFFSET_8822B 0x1480
-#define REG_MU_TX_CTL_8822B 0x14C0
-#define REG_MU_STA_GID_VLD_8822B 0x14C4
-#define REG_MU_STA_USER_POS_INFO_8822B 0x14C8
-#define REG_MU_TRX_DBG_CNT_8822B 0x14D0
-#define REG_EDCA_VO_PARAM_8822B 0x0500
-#define REG_EDCA_VI_PARAM_8822B 0x0504
-#define REG_EDCA_BE_PARAM_8822B 0x0508
-#define REG_EDCA_BK_PARAM_8822B 0x050C
-#define REG_BCNTCFG_8822B 0x0510
-#define REG_PIFS_8822B 0x0512
-#define REG_RDG_PIFS_8822B 0x0513
-#define REG_SIFS_8822B 0x0514
-#define REG_TSFTR_SYN_OFFSET_8822B 0x0518
-#define REG_AGGR_BREAK_TIME_8822B 0x051A
-#define REG_SLOT_8822B 0x051B
-#define REG_TX_PTCL_CTRL_8822B 0x0520
-#define REG_TXPAUSE_8822B 0x0522
-#define REG_DIS_TXREQ_CLR_8822B 0x0523
-#define REG_RD_CTRL_8822B 0x0524
-#define REG_MBSSID_CTRL_8822B 0x0526
-#define REG_P2PPS_CTRL_8822B 0x0527
-#define REG_PKT_LIFETIME_CTRL_8822B 0x0528
-#define REG_P2PPS_SPEC_STATE_8822B 0x052B
-#define REG_BAR_TX_CTRL_8822B 0x0530
-#define REG_QUEUE_INCOL_THR_8822B 0x0538
-#define REG_QUEUE_INCOL_EN_8822B 0x053C
-#define REG_TBTT_PROHIBIT_8822B 0x0540
-#define REG_P2PPS_STATE_8822B 0x0543
-#define REG_RD_NAV_NXT_8822B 0x0544
-#define REG_NAV_PROT_LEN_8822B 0x0546
-#define REG_BCN_CTRL_8822B 0x0550
-#define REG_BCN_CTRL_CLINT0_8822B 0x0551
-#define REG_MBID_NUM_8822B 0x0552
-#define REG_DUAL_TSF_RST_8822B 0x0553
-#define REG_MBSSID_BCN_SPACE_8822B 0x0554
-#define REG_DRVERLYINT_8822B 0x0558
-#define REG_BCNDMATIM_8822B 0x0559
-#define REG_ATIMWND_8822B 0x055A
-#define REG_USTIME_TSF_8822B 0x055C
-#define REG_BCN_MAX_ERR_8822B 0x055D
-#define REG_RXTSF_OFFSET_CCK_8822B 0x055E
-#define REG_RXTSF_OFFSET_OFDM_8822B 0x055F
-#define REG_TSFTR_8822B 0x0560
-#define REG_FREERUN_CNT_8822B 0x0568
-#define REG_ATIMWND1_V1_8822B 0x0570
-#define REG_TBTT_PROHIBIT_INFRA_8822B 0x0571
-#define REG_CTWND_8822B 0x0572
-#define REG_BCNIVLCUNT_8822B 0x0573
-#define REG_BCNDROPCTRL_8822B 0x0574
-#define REG_HGQ_TIMEOUT_PERIOD_8822B 0x0575
-#define REG_TXCMD_TIMEOUT_PERIOD_8822B 0x0576
-#define REG_MISC_CTRL_8822B 0x0577
-#define REG_BCN_CTRL_CLINT1_8822B 0x0578
-#define REG_BCN_CTRL_CLINT2_8822B 0x0579
-#define REG_BCN_CTRL_CLINT3_8822B 0x057A
-#define REG_EXTEND_CTRL_8822B 0x057B
-#define REG_P2PPS1_SPEC_STATE_8822B 0x057C
-#define REG_P2PPS1_STATE_8822B 0x057D
-#define REG_P2PPS2_SPEC_STATE_8822B 0x057E
-#define REG_P2PPS2_STATE_8822B 0x057F
-#define REG_PS_TIMER0_8822B 0x0580
-#define REG_PS_TIMER1_8822B 0x0584
-#define REG_PS_TIMER2_8822B 0x0588
-#define REG_TBTT_CTN_AREA_8822B 0x058C
-#define REG_FORCE_BCN_IFS_8822B 0x058E
-#define REG_TXOP_MIN_8822B 0x0590
-#define REG_PRE_BKF_TIME_8822B 0x0592
-#define REG_CROSS_TXOP_CTRL_8822B 0x0593
-#define REG_ATIMWND2_8822B 0x05A0
-#define REG_ATIMWND3_8822B 0x05A1
-#define REG_ATIMWND4_8822B 0x05A2
-#define REG_ATIMWND5_8822B 0x05A3
-#define REG_ATIMWND6_8822B 0x05A4
-#define REG_ATIMWND7_8822B 0x05A5
-#define REG_ATIMUGT_8822B 0x05A6
-#define REG_HIQ_NO_LMT_EN_8822B 0x05A7
-#define REG_DTIM_COUNTER_ROOT_8822B 0x05A8
-#define REG_DTIM_COUNTER_VAP1_8822B 0x05A9
-#define REG_DTIM_COUNTER_VAP2_8822B 0x05AA
-#define REG_DTIM_COUNTER_VAP3_8822B 0x05AB
-#define REG_DTIM_COUNTER_VAP4_8822B 0x05AC
-#define REG_DTIM_COUNTER_VAP5_8822B 0x05AD
-#define REG_DTIM_COUNTER_VAP6_8822B 0x05AE
-#define REG_DTIM_COUNTER_VAP7_8822B 0x05AF
-#define REG_DIS_ATIM_8822B 0x05B0
-#define REG_EARLY_128US_8822B 0x05B1
-#define REG_P2PPS1_CTRL_8822B 0x05B2
-#define REG_P2PPS2_CTRL_8822B 0x05B3
-#define REG_TIMER0_SRC_SEL_8822B 0x05B4
-#define REG_NOA_UNIT_SEL_8822B 0x05B5
-#define REG_P2POFF_DIS_TXTIME_8822B 0x05B7
-#define REG_MBSSID_BCN_SPACE2_8822B 0x05B8
-#define REG_MBSSID_BCN_SPACE3_8822B 0x05BC
-#define REG_ACMHWCTRL_8822B 0x05C0
-#define REG_ACMRSTCTRL_8822B 0x05C1
-#define REG_ACMAVG_8822B 0x05C2
-#define REG_VO_ADMTIME_8822B 0x05C4
-#define REG_VI_ADMTIME_8822B 0x05C6
-#define REG_BE_ADMTIME_8822B 0x05C8
-#define REG_EDCA_RANDOM_GEN_8822B 0x05CC
-#define REG_TXCMD_NOA_SEL_8822B 0x05CF
-#define REG_NOA_PARAM_8822B 0x05E0
-#define REG_P2P_RST_8822B 0x05F0
-#define REG_SCHEDULER_RST_8822B 0x05F1
-#define REG_SCH_TXCMD_8822B 0x05F8
-#define REG_PAGE5_DUMMY_8822B 0x05FC
-#define REG_CPUMGQ_TX_TIMER_8822B 0x1500
-#define REG_PS_TIMER_A_8822B 0x1504
-#define REG_PS_TIMER_B_8822B 0x1508
-#define REG_PS_TIMER_C_8822B 0x150C
-#define REG_PS_TIMER_ABC_CPUMGQ_TIMER_CRTL_8822B 0x1510
-#define REG_CPUMGQ_TX_TIMER_EARLY_8822B 0x1514
-#define REG_PS_TIMER_A_EARLY_8822B 0x1515
-#define REG_PS_TIMER_B_EARLY_8822B 0x1516
-#define REG_PS_TIMER_C_EARLY_8822B 0x1517
-#define REG_WMAC_CR_8822B 0x0600
-#define REG_WMAC_FWPKT_CR_8822B 0x0601
-#define REG_BWOPMODE_8822B 0x0603
-#define REG_TCR_8822B 0x0604
-#define REG_RCR_8822B 0x0608
-#define REG_RX_PKT_LIMIT_8822B 0x060C
-#define REG_RX_DLK_TIME_8822B 0x060D
-#define REG_RX_DRVINFO_SZ_8822B 0x060F
-#define REG_MACID_8822B 0x0610
-#define REG_BSSID_8822B 0x0618
-#define REG_MAR_8822B 0x0620
-#define REG_MBIDCAMCFG_1_8822B 0x0628
-#define REG_MBIDCAMCFG_2_8822B 0x062C
-#define REG_WMAC_TCR_TSFT_OFS_8822B 0x0630
-#define REG_UDF_THSD_8822B 0x0632
-#define REG_ZLD_NUM_8822B 0x0633
-#define REG_STMP_THSD_8822B 0x0634
-#define REG_WMAC_TXTIMEOUT_8822B 0x0635
-#define REG_MCU_TEST_2_V1_8822B 0x0636
-#define REG_USTIME_EDCA_8822B 0x0638
-#define REG_MAC_SPEC_SIFS_8822B 0x063A
-#define REG_RESP_SIFS_CCK_8822B 0x063C
-#define REG_RESP_SIFS_OFDM_8822B 0x063E
-#define REG_ACKTO_8822B 0x0640
-#define REG_CTS2TO_8822B 0x0641
-#define REG_EIFS_8822B 0x0642
-#define REG_NAV_CTRL_8822B 0x0650
-#define REG_BACAMCMD_8822B 0x0654
-#define REG_BACAMCONTENT_8822B 0x0658
-#define REG_LBDLY_8822B 0x0660
-#define REG_WMAC_BACAM_RPMEN_8822B 0x0661
-#define REG_TX_RX_8822B 0x0662
-#define REG_WMAC_BITMAP_CTL_8822B 0x0663
-#define REG_RXERR_RPT_8822B 0x0664
-#define REG_WMAC_TRXPTCL_CTL_8822B 0x0668
-#define REG_CAMCMD_8822B 0x0670
-#define REG_CAMWRITE_8822B 0x0674
-#define REG_CAMREAD_8822B 0x0678
-#define REG_CAMDBG_8822B 0x067C
-#define REG_SECCFG_8822B 0x0680
-#define REG_RXFILTER_CATEGORY_1_8822B 0x0682
-#define REG_RXFILTER_ACTION_1_8822B 0x0683
-#define REG_RXFILTER_CATEGORY_2_8822B 0x0684
-#define REG_RXFILTER_ACTION_2_8822B 0x0685
-#define REG_RXFILTER_CATEGORY_3_8822B 0x0686
-#define REG_RXFILTER_ACTION_3_8822B 0x0687
-#define REG_RXFLTMAP3_8822B 0x0688
-#define REG_RXFLTMAP4_8822B 0x068A
-#define REG_RXFLTMAP5_8822B 0x068C
-#define REG_RXFLTMAP6_8822B 0x068E
-#define REG_WOW_CTRL_8822B 0x0690
-#define REG_NAN_RX_TSF_FILTER_8822B 0x0691
-#define REG_PS_RX_INFO_8822B 0x0692
-#define REG_WMMPS_UAPSD_TID_8822B 0x0693
-#define REG_LPNAV_CTRL_8822B 0x0694
-#define REG_WKFMCAM_CMD_8822B 0x0698
-#define REG_WKFMCAM_RWD_8822B 0x069C
-#define REG_RXFLTMAP0_8822B 0x06A0
-#define REG_RXFLTMAP1_8822B 0x06A2
-#define REG_RXFLTMAP_8822B 0x06A4
-#define REG_BCN_PSR_RPT_8822B 0x06A8
-#define REG_FLC_RPC_8822B 0x06AC
-#define REG_FLC_RPCT_8822B 0x06AD
-#define REG_FLC_PTS_8822B 0x06AE
-#define REG_FLC_TRPC_8822B 0x06AF
-#define REG_RXPKTMON_CTRL_8822B 0x06B0
-#define REG_STATE_MON_8822B 0x06B4
-#define REG_ERROR_MON_8822B 0x06B8
-#define REG_SEARCH_MACID_8822B 0x06BC
-#define REG_BT_COEX_TABLE_8822B 0x06C0
-#define REG_RXCMD_0_8822B 0x06D0
-#define REG_RXCMD_1_8822B 0x06D4
-#define REG_WMAC_RESP_TXINFO_8822B 0x06D8
-#define REG_BBPSF_CTRL_8822B 0x06DC
-#define REG_P2P_RX_BCN_NOA_8822B 0x06E0
-#define REG_ASSOCIATED_BFMER0_INFO_8822B 0x06E4
-#define REG_ASSOCIATED_BFMER1_INFO_8822B 0x06EC
-#define REG_TX_CSI_RPT_PARAM_BW20_8822B 0x06F4
-#define REG_TX_CSI_RPT_PARAM_BW40_8822B 0x06F8
-#define REG_TX_CSI_RPT_PARAM_BW80_8822B 0x06FC
-#define REG_BCN_PSR_RPT2_8822B 0x1600
-#define REG_BCN_PSR_RPT3_8822B 0x1604
-#define REG_BCN_PSR_RPT4_8822B 0x1608
-#define REG_A1_ADDR_MASK_8822B 0x160C
-#define REG_MACID2_8822B 0x1620
-#define REG_BSSID2_8822B 0x1628
-#define REG_MACID3_8822B 0x1630
-#define REG_BSSID3_8822B 0x1638
-#define REG_MACID4_8822B 0x1640
-#define REG_BSSID4_8822B 0x1648
-#define REG_NOA_REPORT_8822B 0x1650
-#define REG_PWRBIT_SETTING_8822B 0x1660
-#define REG_WMAC_MU_BF_OPTION_8822B 0x167C
-#define REG_WMAC_MU_ARB_8822B 0x167E
-#define REG_WMAC_MU_OPTION_8822B 0x167F
-#define REG_WMAC_MU_BF_CTL_8822B 0x1680
-#define REG_WMAC_MU_BFRPT_PARA_8822B 0x1682
-#define REG_WMAC_ASSOCIATED_MU_BFMEE2_8822B 0x1684
-#define REG_WMAC_ASSOCIATED_MU_BFMEE3_8822B 0x1686
-#define REG_WMAC_ASSOCIATED_MU_BFMEE4_8822B 0x1688
-#define REG_WMAC_ASSOCIATED_MU_BFMEE5_8822B 0x168A
-#define REG_WMAC_ASSOCIATED_MU_BFMEE6_8822B 0x168C
-#define REG_WMAC_ASSOCIATED_MU_BFMEE7_8822B 0x168E
-#define REG_TRANSMIT_ADDRSS_0_8822B 0x16A0
-#define REG_TRANSMIT_ADDRSS_1_8822B 0x16A8
-#define REG_TRANSMIT_ADDRSS_2_8822B 0x16B0
-#define REG_TRANSMIT_ADDRSS_3_8822B 0x16B8
-#define REG_TRANSMIT_ADDRSS_4_8822B 0x16C0
-#define REG_MACID1_8822B 0x0700
-#define REG_BSSID1_8822B 0x0708
-#define REG_BCN_PSR_RPT1_8822B 0x0710
-#define REG_ASSOCIATED_BFMEE_SEL_8822B 0x0714
-#define REG_SND_PTCL_CTRL_8822B 0x0718
-#define REG_RX_CSI_RPT_INFO_8822B 0x071C
-#define REG_NS_ARP_CTRL_8822B 0x0720
-#define REG_NS_ARP_INFO_8822B 0x0724
-#define REG_BEAMFORMING_INFO_NSARP_V1_8822B 0x0728
-#define REG_BEAMFORMING_INFO_NSARP_8822B 0x072C
-#define REG_WMAC_RTX_CTX_SUBTYPE_CFG_8822B 0x0750
-#define REG_WMAC_SWAES_CFG_8822B 0x0760
-#define REG_BT_COEX_V2_8822B 0x0762
-#define REG_BT_COEX_8822B 0x0764
-#define REG_WLAN_ACT_MASK_CTRL_8822B 0x0768
-#define REG_BT_COEX_ENHANCED_INTR_CTRL_8822B 0x076E
-#define REG_BT_ACT_STATISTICS_8822B 0x0770
-#define REG_BT_STATISTICS_CONTROL_REGISTER_8822B 0x0778
-#define REG_BT_STATUS_REPORT_REGISTER_8822B 0x077C
-#define REG_BT_INTERRUPT_CONTROL_REGISTER_8822B 0x0780
-#define REG_WLAN_REPORT_TIME_OUT_CONTROL_REGISTER_8822B 0x0784
-#define REG_BT_ISOLATION_TABLE_REGISTER_REGISTER_8822B 0x0785
-#define REG_BT_INTERRUPT_STATUS_REGISTER_8822B 0x078F
-#define REG_BT_TDMA_TIME_REGISTER_8822B 0x0790
-#define REG_BT_ACT_REGISTER_8822B 0x0794
-#define REG_OBFF_CTRL_BASIC_8822B 0x0798
-#define REG_OBFF_CTRL2_TIMER_8822B 0x079C
-#define REG_LTR_CTRL_BASIC_8822B 0x07A0
-#define REG_LTR_CTRL2_TIMER_THRESHOLD_8822B 0x07A4
-#define REG_LTR_IDLE_LATENCY_V1_8822B 0x07A8
-#define REG_LTR_ACTIVE_LATENCY_V1_8822B 0x07AC
-#define REG_ANTENNA_TRAINING_CONTROL_REGISTER_8822B 0x07B0
-#define REG_WMAC_PKTCNT_RWD_8822B 0x07B8
-#define REG_WMAC_PKTCNT_CTRL_8822B 0x07BC
-#define REG_IQ_DUMP_8822B 0x07C0
-#define REG_WMAC_FTM_CTL_8822B 0x07CC
-#define REG_WMAC_IQ_MDPK_FUNC_8822B 0x07CE
-#define REG_WMAC_OPTION_FUNCTION_8822B 0x07D0
-#define REG_RX_FILTER_FUNCTION_8822B 0x07DA
-#define REG_NDP_SIG_8822B 0x07E0
-#define REG_TXCMD_INFO_FOR_RSP_PKT_8822B 0x07E4
-#define REG_RTS_ADDRESS_0_8822B 0x07F0
-#define REG_RTS_ADDRESS_1_8822B 0x07F8
-#define REG__RPFM_MAP1_8822B 0x07FE
-#define REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1_8822B 0x1700
-#define REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1_8822B 0x1704
-#define REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1_8822B 0x1708
-#define REG_SDIO_TX_CTRL_8822B 0x10250000
-#define REG_SDIO_HIMR_8822B 0x10250014
-#define REG_SDIO_HISR_8822B 0x10250018
-#define REG_SDIO_RX_REQ_LEN_8822B 0x1025001C
-#define REG_SDIO_FREE_TXPG_SEQ_V1_8822B 0x1025001F
-#define REG_SDIO_FREE_TXPG_8822B 0x10250020
-#define REG_SDIO_FREE_TXPG2_8822B 0x10250024
-#define REG_SDIO_OQT_FREE_TXPG_V1_8822B 0x10250028
-#define REG_SDIO_HTSFR_INFO_8822B 0x10250030
-#define REG_SDIO_HCPWM1_V2_8822B 0x10250038
-#define REG_SDIO_HCPWM2_V2_8822B 0x1025003A
-#define REG_SDIO_INDIRECT_REG_CFG_8822B 0x10250040
-#define REG_SDIO_INDIRECT_REG_DATA_8822B 0x10250044
-#define REG_SDIO_H2C_8822B 0x10250060
-#define REG_SDIO_C2H_8822B 0x10250064
-#define REG_SDIO_HRPWM1_8822B 0x10250080
-#define REG_SDIO_HRPWM2_8822B 0x10250082
-#define REG_SDIO_HPS_CLKR_8822B 0x10250084
-#define REG_SDIO_BUS_CTRL_8822B 0x10250085
-#define REG_SDIO_HSUS_CTRL_8822B 0x10250086
-#define REG_SDIO_RESPONSE_TIMER_8822B 0x10250088
-#define REG_SDIO_CMD_CRC_8822B 0x1025008A
-#define REG_SDIO_HSISR_8822B 0x10250090
-#define REG_SDIO_HSIMR_8822B 0x10250091
-#define REG_SDIO_ERR_RPT_8822B 0x102500C0
-#define REG_SDIO_CMD_ERRCNT_8822B 0x102500C1
-#define REG_SDIO_DATA_ERRCNT_8822B 0x102500C2
-#define REG_SDIO_CMD_ERR_CONTENT_8822B 0x102500C4
-#define REG_SDIO_CRC_ERR_IDX_8822B 0x102500C9
-#define REG_SDIO_DATA_CRC_8822B 0x102500CA
-#define REG_SDIO_DATA_REPLY_TIME_8822B 0x102500CB
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h b/drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h
deleted file mode 100644
index d5ff89c91ab3..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_RX_BD_CHIP_H_
-#define _HALMAC_RX_BD_CHIP_H_
-
-/*TXBD_DW0*/
-
-#define GET_RX_BD_RXFAIL_8822B(__rx_bd) GET_RX_BD_RXFAIL(__rx_bd)
-#define GET_RX_BD_TOTALRXPKTSIZE_8822B(__rx_bd) \
- GET_RX_BD_TOTALRXPKTSIZE(__rx_bd)
-#define GET_RX_BD_RXTAG_8822B(__rx_bd) GET_RX_BD_RXTAG(__rx_bd)
-#define GET_RX_BD_FS_8822B(__rx_bd) GET_RX_BD_FS(__rx_bd)
-#define GET_RX_BD_LS_8822B(__rx_bd) GET_RX_BD_LS(__rx_bd)
-#define GET_RX_BD_RXBUFFSIZE_8822B(__rx_bd) GET_RX_BD_RXBUFFSIZE(__rx_bd)
-
-/*TXBD_DW1*/
-
-#define GET_RX_BD_PHYSICAL_ADDR_LOW_8822B(__rx_bd) \
- GET_RX_BD_PHYSICAL_ADDR_LOW(__rx_bd)
-
-/*TXBD_DW2*/
-
-#define GET_RX_BD_PHYSICAL_ADDR_HIGH_8822B(__rx_bd) \
- GET_RX_BD_PHYSICAL_ADDR_HIGH(__rx_bd)
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h b/drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h
deleted file mode 100644
index c030f2597176..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_RX_BD_NIC_H_
-#define _HALMAC_RX_BD_NIC_H_
-
-/*TXBD_DW0*/
-
-#define GET_RX_BD_RXFAIL(__rx_bd) LE_BITS_TO_4BYTE(__rx_bd + 0x00, 31, 1)
-#define GET_RX_BD_TOTALRXPKTSIZE(__rx_bd) \
- LE_BITS_TO_4BYTE(__rx_bd + 0x00, 16, 13)
-#define GET_RX_BD_RXTAG(__rx_bd) LE_BITS_TO_4BYTE(__rx_bd + 0x00, 16, 13)
-#define GET_RX_BD_FS(__rx_bd) LE_BITS_TO_4BYTE(__rx_bd + 0x00, 15, 1)
-#define GET_RX_BD_LS(__rx_bd) LE_BITS_TO_4BYTE(__rx_bd + 0x00, 14, 1)
-#define GET_RX_BD_RXBUFFSIZE(__rx_bd) LE_BITS_TO_4BYTE(__rx_bd + 0x00, 0, 14)
-
-/*TXBD_DW1*/
-
-#define GET_RX_BD_PHYSICAL_ADDR_LOW(__rx_bd) \
- LE_BITS_TO_4BYTE(__rx_bd + 0x04, 0, 32)
-
-/*TXBD_DW2*/
-
-#define GET_RX_BD_PHYSICAL_ADDR_HIGH(__rx_bd) \
- LE_BITS_TO_4BYTE(__rx_bd + 0x08, 0, 32)
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h b/drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h
deleted file mode 100644
index 5f960e2ae8c7..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_RX_DESC_CHIP_H_
-#define _HALMAC_RX_DESC_CHIP_H_
-
-/*RXDESC_WORD0*/
-
-#define GET_RX_DESC_EOR_8822B(__rx_desc) GET_RX_DESC_EOR(__rx_desc)
-#define GET_RX_DESC_PHYPKTIDC_8822B(__rx_desc) GET_RX_DESC_PHYPKTIDC(__rx_desc)
-#define GET_RX_DESC_SWDEC_8822B(__rx_desc) GET_RX_DESC_SWDEC(__rx_desc)
-#define GET_RX_DESC_PHYST_8822B(__rx_desc) GET_RX_DESC_PHYST(__rx_desc)
-#define GET_RX_DESC_SHIFT_8822B(__rx_desc) GET_RX_DESC_SHIFT(__rx_desc)
-#define GET_RX_DESC_QOS_8822B(__rx_desc) GET_RX_DESC_QOS(__rx_desc)
-#define GET_RX_DESC_SECURITY_8822B(__rx_desc) GET_RX_DESC_SECURITY(__rx_desc)
-#define GET_RX_DESC_DRV_INFO_SIZE_8822B(__rx_desc) \
- GET_RX_DESC_DRV_INFO_SIZE(__rx_desc)
-#define GET_RX_DESC_ICV_ERR_8822B(__rx_desc) GET_RX_DESC_ICV_ERR(__rx_desc)
-#define GET_RX_DESC_CRC32_8822B(__rx_desc) GET_RX_DESC_CRC32(__rx_desc)
-#define GET_RX_DESC_PKT_LEN_8822B(__rx_desc) GET_RX_DESC_PKT_LEN(__rx_desc)
-
-/*RXDESC_WORD1*/
-
-#define GET_RX_DESC_BC_8822B(__rx_desc) GET_RX_DESC_BC(__rx_desc)
-#define GET_RX_DESC_MC_8822B(__rx_desc) GET_RX_DESC_MC(__rx_desc)
-#define GET_RX_DESC_TY_PE_8822B(__rx_desc) GET_RX_DESC_TY_PE(__rx_desc)
-#define GET_RX_DESC_MF_8822B(__rx_desc) GET_RX_DESC_MF(__rx_desc)
-#define GET_RX_DESC_MD_8822B(__rx_desc) GET_RX_DESC_MD(__rx_desc)
-#define GET_RX_DESC_PWR_8822B(__rx_desc) GET_RX_DESC_PWR(__rx_desc)
-#define GET_RX_DESC_PAM_8822B(__rx_desc) GET_RX_DESC_PAM(__rx_desc)
-#define GET_RX_DESC_CHK_VLD_8822B(__rx_desc) GET_RX_DESC_CHK_VLD(__rx_desc)
-#define GET_RX_DESC_RX_IS_TCP_UDP_8822B(__rx_desc) \
- GET_RX_DESC_RX_IS_TCP_UDP(__rx_desc)
-#define GET_RX_DESC_RX_IPV_8822B(__rx_desc) GET_RX_DESC_RX_IPV(__rx_desc)
-#define GET_RX_DESC_CHKERR_8822B(__rx_desc) GET_RX_DESC_CHKERR(__rx_desc)
-#define GET_RX_DESC_PAGGR_8822B(__rx_desc) GET_RX_DESC_PAGGR(__rx_desc)
-#define GET_RX_DESC_RXID_MATCH_8822B(__rx_desc) \
- GET_RX_DESC_RXID_MATCH(__rx_desc)
-#define GET_RX_DESC_AMSDU_8822B(__rx_desc) GET_RX_DESC_AMSDU(__rx_desc)
-#define GET_RX_DESC_MACID_VLD_8822B(__rx_desc) GET_RX_DESC_MACID_VLD(__rx_desc)
-#define GET_RX_DESC_TID_8822B(__rx_desc) GET_RX_DESC_TID(__rx_desc)
-#define GET_RX_DESC_EXT_SECTYPE_8822B(__rx_desc) \
- GET_RX_DESC_EXT_SECTYPE(__rx_desc)
-#define GET_RX_DESC_MACID_8822B(__rx_desc) GET_RX_DESC_MACID(__rx_desc)
-
-/*RXDESC_WORD2*/
-
-#define GET_RX_DESC_FCS_OK_8822B(__rx_desc) GET_RX_DESC_FCS_OK(__rx_desc)
-#define GET_RX_DESC_PPDU_CNT_8822B(__rx_desc) GET_RX_DESC_PPDU_CNT(__rx_desc)
-#define GET_RX_DESC_C2H_8822B(__rx_desc) GET_RX_DESC_C2H(__rx_desc)
-#define GET_RX_DESC_HWRSVD_8822B(__rx_desc) GET_RX_DESC_HWRSVD(__rx_desc)
-#define GET_RX_DESC_WLANHD_IV_LEN_8822B(__rx_desc) \
- GET_RX_DESC_WLANHD_IV_LEN(__rx_desc)
-#define GET_RX_DESC_RX_IS_QOS_8822B(__rx_desc) GET_RX_DESC_RX_IS_QOS(__rx_desc)
-#define GET_RX_DESC_FRAG_8822B(__rx_desc) GET_RX_DESC_FRAG(__rx_desc)
-#define GET_RX_DESC_SEQ_8822B(__rx_desc) GET_RX_DESC_SEQ(__rx_desc)
-
-/*RXDESC_WORD3*/
-
-#define GET_RX_DESC_MAGIC_WAKE_8822B(__rx_desc) \
- GET_RX_DESC_MAGIC_WAKE(__rx_desc)
-#define GET_RX_DESC_UNICAST_WAKE_8822B(__rx_desc) \
- GET_RX_DESC_UNICAST_WAKE(__rx_desc)
-#define GET_RX_DESC_PATTERN_MATCH_8822B(__rx_desc) \
- GET_RX_DESC_PATTERN_MATCH(__rx_desc)
-#define GET_RX_DESC_RXPAYLOAD_MATCH_8822B(__rx_desc) \
- GET_RX_DESC_RXPAYLOAD_MATCH(__rx_desc)
-#define GET_RX_DESC_RXPAYLOAD_ID_8822B(__rx_desc) \
- GET_RX_DESC_RXPAYLOAD_ID(__rx_desc)
-#define GET_RX_DESC_DMA_AGG_NUM_8822B(__rx_desc) \
- GET_RX_DESC_DMA_AGG_NUM(__rx_desc)
-#define GET_RX_DESC_BSSID_FIT_1_0_8822B(__rx_desc) \
- GET_RX_DESC_BSSID_FIT_1_0(__rx_desc)
-#define GET_RX_DESC_EOSP_8822B(__rx_desc) GET_RX_DESC_EOSP(__rx_desc)
-#define GET_RX_DESC_HTC_8822B(__rx_desc) GET_RX_DESC_HTC(__rx_desc)
-#define GET_RX_DESC_BSSID_FIT_4_2_8822B(__rx_desc) \
- GET_RX_DESC_BSSID_FIT_4_2(__rx_desc)
-#define GET_RX_DESC_RX_RATE_8822B(__rx_desc) GET_RX_DESC_RX_RATE(__rx_desc)
-
-/*RXDESC_WORD4*/
-
-#define GET_RX_DESC_A1_FIT_8822B(__rx_desc) GET_RX_DESC_A1_FIT(__rx_desc)
-#define GET_RX_DESC_MACID_RPT_BUFF_8822B(__rx_desc) \
- GET_RX_DESC_MACID_RPT_BUFF(__rx_desc)
-#define GET_RX_DESC_RX_PRE_NDP_VLD_8822B(__rx_desc) \
- GET_RX_DESC_RX_PRE_NDP_VLD(__rx_desc)
-#define GET_RX_DESC_RX_SCRAMBLER_8822B(__rx_desc) \
- GET_RX_DESC_RX_SCRAMBLER(__rx_desc)
-#define GET_RX_DESC_RX_EOF_8822B(__rx_desc) GET_RX_DESC_RX_EOF(__rx_desc)
-#define GET_RX_DESC_PATTERN_IDX_8822B(__rx_desc) \
- GET_RX_DESC_PATTERN_IDX(__rx_desc)
-
-/*RXDESC_WORD5*/
-
-#define GET_RX_DESC_TSFL_8822B(__rx_desc) GET_RX_DESC_TSFL(__rx_desc)
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h b/drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h
deleted file mode 100644
index 413004eef0b7..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_RX_DESC_NIC_H_
-#define _HALMAC_RX_DESC_NIC_H_
-
-/*RXDESC_WORD0*/
-
-#define GET_RX_DESC_EOR(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x00, 30, 1)
-#define GET_RX_DESC_PHYPKTIDC(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x00, 28, 1)
-#define GET_RX_DESC_SWDEC(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x00, 27, 1)
-#define GET_RX_DESC_PHYST(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x00, 26, 1)
-#define GET_RX_DESC_SHIFT(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x00, 24, 2)
-#define GET_RX_DESC_QOS(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x00, 23, 1)
-#define GET_RX_DESC_SECURITY(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x00, 20, 3)
-#define GET_RX_DESC_DRV_INFO_SIZE(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x00, 16, 4)
-#define GET_RX_DESC_ICV_ERR(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x00, 15, 1)
-#define GET_RX_DESC_CRC32(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x00, 14, 1)
-#define GET_RX_DESC_PKT_LEN(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x00, 0, 14)
-
-/*RXDESC_WORD1*/
-
-#define GET_RX_DESC_BC(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 31, 1)
-#define GET_RX_DESC_MC(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 30, 1)
-#define GET_RX_DESC_TY_PE(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 28, 2)
-#define GET_RX_DESC_MF(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 27, 1)
-#define GET_RX_DESC_MD(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 26, 1)
-#define GET_RX_DESC_PWR(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 25, 1)
-#define GET_RX_DESC_PAM(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 24, 1)
-#define GET_RX_DESC_CHK_VLD(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 23, 1)
-#define GET_RX_DESC_RX_IS_TCP_UDP(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x04, 22, 1)
-#define GET_RX_DESC_RX_IPV(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 21, 1)
-#define GET_RX_DESC_CHKERR(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 20, 1)
-#define GET_RX_DESC_PAGGR(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 15, 1)
-#define GET_RX_DESC_RXID_MATCH(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x04, 14, 1)
-#define GET_RX_DESC_AMSDU(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 13, 1)
-#define GET_RX_DESC_MACID_VLD(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x04, 12, 1)
-#define GET_RX_DESC_TID(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 8, 4)
-
-#define GET_RX_DESC_EXT_SECTYPE(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x04, 7, 1)
-
-#define GET_RX_DESC_MACID(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x04, 0, 7)
-
-/*RXDESC_WORD2*/
-
-#define GET_RX_DESC_FCS_OK(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x08, 31, 1)
-
-#define GET_RX_DESC_PPDU_CNT(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x08, 29, 2)
-
-#define GET_RX_DESC_C2H(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x08, 28, 1)
-#define GET_RX_DESC_HWRSVD(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x08, 24, 4)
-#define GET_RX_DESC_WLANHD_IV_LEN(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x08, 18, 6)
-#define GET_RX_DESC_RX_IS_QOS(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x08, 16, 1)
-#define GET_RX_DESC_FRAG(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x08, 12, 4)
-#define GET_RX_DESC_SEQ(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x08, 0, 12)
-
-/*RXDESC_WORD3*/
-
-#define GET_RX_DESC_MAGIC_WAKE(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 31, 1)
-#define GET_RX_DESC_UNICAST_WAKE(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 30, 1)
-#define GET_RX_DESC_PATTERN_MATCH(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 29, 1)
-
-#define GET_RX_DESC_RXPAYLOAD_MATCH(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 28, 1)
-#define GET_RX_DESC_RXPAYLOAD_ID(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 24, 4)
-
-#define GET_RX_DESC_DMA_AGG_NUM(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 16, 8)
-#define GET_RX_DESC_BSSID_FIT_1_0(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 12, 2)
-#define GET_RX_DESC_EOSP(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 11, 1)
-#define GET_RX_DESC_HTC(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 10, 1)
-
-#define GET_RX_DESC_BSSID_FIT_4_2(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 7, 3)
-
-#define GET_RX_DESC_RX_RATE(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x0C, 0, 7)
-
-/*RXDESC_WORD4*/
-
-#define GET_RX_DESC_A1_FIT(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x10, 24, 5)
-
-#define GET_RX_DESC_MACID_RPT_BUFF(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x10, 17, 7)
-#define GET_RX_DESC_RX_PRE_NDP_VLD(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x10, 16, 1)
-#define GET_RX_DESC_RX_SCRAMBLER(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x10, 9, 7)
-#define GET_RX_DESC_RX_EOF(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x10, 8, 1)
-
-#define GET_RX_DESC_PATTERN_IDX(__rx_desc) \
- LE_BITS_TO_4BYTE(__rx_desc + 0x10, 0, 8)
-
-/*RXDESC_WORD5*/
-
-#define GET_RX_DESC_TSFL(__rx_desc) LE_BITS_TO_4BYTE(__rx_desc + 0x14, 0, 32)
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h b/drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h
deleted file mode 100644
index 7760a6b42d98..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __HALMAC_SDIO_REG_H__
-#define __HALMAC_SDIO_REG_H__
-
-/* SDIO CMD address mapping */
-
-#define HALMAC_SDIO_4BYTE_LEN_MASK 0x1FFF
-#define HALMAC_SDIO_LOCAL_MSK 0x0FFF
-#define HALMAC_WLAN_MAC_REG_MSK 0xFFFF
-#define HALMAC_WLAN_IOREG_MSK 0xFFFF
-
-/* Sdio address for SDIO Local Reg, TRX FIFO, MAC Reg */
-enum halmac_sdio_cmd_addr {
- HALMAC_SDIO_CMD_ADDR_SDIO_REG = 0,
- HALMAC_SDIO_CMD_ADDR_MAC_REG = 8,
- HALMAC_SDIO_CMD_ADDR_TXFF_HIGH = 4,
- HALMAC_SDIO_CMD_ADDR_TXFF_LOW = 6,
- HALMAC_SDIO_CMD_ADDR_TXFF_NORMAL = 5,
- HALMAC_SDIO_CMD_ADDR_TXFF_EXTRA = 7,
- HALMAC_SDIO_CMD_ADDR_RXFF = 7,
-};
-
-/* IO Bus domain address mapping */
-#define SDIO_LOCAL_OFFSET 0x10250000
-#define WLAN_IOREG_OFFSET 0x10260000
-#define FW_FIFO_OFFSET 0x10270000
-#define TX_HIQ_OFFSET 0x10310000
-#define TX_MIQ_OFFSET 0x10320000
-#define TX_LOQ_OFFSET 0x10330000
-#define TX_EXQ_OFFSET 0x10350000
-#define RX_RXOFF_OFFSET 0x10340000
-
-/* Get TX WLAN FIFO information in CMD53 addr */
-#define GET_WLAN_TXFF_DEVICE_ID(__cmd53_addr) \
- LE_BITS_TO_4BYTE((u32 *)__cmd53_addr, 13, 4)
-#define GET_WLAN_TXFF_PKT_SIZE(__cmd53_addr) \
- (LE_BITS_TO_4BYTE((u32 *)__cmd53_addr, 0, 13) << 2)
-
-#endif /* __HALMAC_SDIO_REG_H__ */
diff --git a/drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h b/drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h
deleted file mode 100644
index e1cfcfdf1990..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_TX_BD_CHIP_H_
-#define _HALMAC_TX_BD_CHIP_H_
-
-/*TXBD_DW0*/
-
-#define SET_TX_BD_OWN_8822B(__tx_bd, __value) SET_TX_BD_OWN(__tx_bd, __value)
-#define GET_TX_BD_OWN_8822B(__tx_bd) GET_TX_BD_OWN(__tx_bd)
-#define SET_TX_BD_PSB_8822B(__tx_bd, __value) SET_TX_BD_PSB(__tx_bd, __value)
-#define GET_TX_BD_PSB_8822B(__tx_bd) GET_TX_BD_PSB(__tx_bd)
-#define SET_TX_BD_TX_BUFF_SIZE0_8822B(__tx_bd, __value) \
- SET_TX_BD_TX_BUFF_SIZE0(__tx_bd, __value)
-#define GET_TX_BD_TX_BUFF_SIZE0_8822B(__tx_bd) GET_TX_BD_TX_BUFF_SIZE0(__tx_bd)
-
-/*TXBD_DW1*/
-
-#define SET_TX_BD_PHYSICAL_ADDR0_LOW_8822B(__tx_bd, __value) \
- SET_TX_BD_PHYSICAL_ADDR0_LOW(__tx_bd, __value)
-#define GET_TX_BD_PHYSICAL_ADDR0_LOW_8822B(__tx_bd) \
- GET_TX_BD_PHYSICAL_ADDR0_LOW(__tx_bd)
-
-/*TXBD_DW2*/
-
-#define SET_TX_BD_PHYSICAL_ADDR0_HIGH_8822B(__tx_bd, __value) \
- SET_TX_BD_PHYSICAL_ADDR0_HIGH(__tx_bd, __value)
-#define GET_TX_BD_PHYSICAL_ADDR0_HIGH_8822B(__tx_bd) \
- GET_TX_BD_PHYSICAL_ADDR0_HIGH(__tx_bd)
-
-/*TXBD_DW4*/
-
-#define SET_TX_BD_A1_8822B(__tx_bd, __value) SET_TX_BD_A1(__tx_bd, __value)
-#define GET_TX_BD_A1_8822B(__tx_bd) GET_TX_BD_A1(__tx_bd)
-#define SET_TX_BD_TX_BUFF_SIZE1_8822B(__tx_bd, __value) \
- SET_TX_BD_TX_BUFF_SIZE1(__tx_bd, __value)
-#define GET_TX_BD_TX_BUFF_SIZE1_8822B(__tx_bd) GET_TX_BD_TX_BUFF_SIZE1(__tx_bd)
-
-/*TXBD_DW5*/
-
-#define SET_TX_BD_PHYSICAL_ADDR1_LOW_8822B(__tx_bd, __value) \
- SET_TX_BD_PHYSICAL_ADDR1_LOW(__tx_bd, __value)
-#define GET_TX_BD_PHYSICAL_ADDR1_LOW_8822B(__tx_bd) \
- GET_TX_BD_PHYSICAL_ADDR1_LOW(__tx_bd)
-
-/*TXBD_DW6*/
-
-#define SET_TX_BD_PHYSICAL_ADDR1_HIGH_8822B(__tx_bd, __value) \
- SET_TX_BD_PHYSICAL_ADDR1_HIGH(__tx_bd, __value)
-#define GET_TX_BD_PHYSICAL_ADDR1_HIGH_8822B(__tx_bd) \
- GET_TX_BD_PHYSICAL_ADDR1_HIGH(__tx_bd)
-
-/*TXBD_DW8*/
-
-#define SET_TX_BD_A2_8822B(__tx_bd, __value) SET_TX_BD_A2(__tx_bd, __value)
-#define GET_TX_BD_A2_8822B(__tx_bd) GET_TX_BD_A2(__tx_bd)
-#define SET_TX_BD_TX_BUFF_SIZE2_8822B(__tx_bd, __value) \
- SET_TX_BD_TX_BUFF_SIZE2(__tx_bd, __value)
-#define GET_TX_BD_TX_BUFF_SIZE2_8822B(__tx_bd) GET_TX_BD_TX_BUFF_SIZE2(__tx_bd)
-
-/*TXBD_DW9*/
-
-#define SET_TX_BD_PHYSICAL_ADDR2_LOW_8822B(__tx_bd, __value) \
- SET_TX_BD_PHYSICAL_ADDR2_LOW(__tx_bd, __value)
-#define GET_TX_BD_PHYSICAL_ADDR2_LOW_8822B(__tx_bd) \
- GET_TX_BD_PHYSICAL_ADDR2_LOW(__tx_bd)
-
-/*TXBD_DW10*/
-
-#define SET_TX_BD_PHYSICAL_ADDR2_HIGH_8822B(__tx_bd, __value) \
- SET_TX_BD_PHYSICAL_ADDR2_HIGH(__tx_bd, __value)
-#define GET_TX_BD_PHYSICAL_ADDR2_HIGH_8822B(__tx_bd) \
- GET_TX_BD_PHYSICAL_ADDR2_HIGH(__tx_bd)
-
-/*TXBD_DW12*/
-
-#define SET_TX_BD_A3_8822B(__tx_bd, __value) SET_TX_BD_A3(__tx_bd, __value)
-#define GET_TX_BD_A3_8822B(__tx_bd) GET_TX_BD_A3(__tx_bd)
-#define SET_TX_BD_TX_BUFF_SIZE3_8822B(__tx_bd, __value) \
- SET_TX_BD_TX_BUFF_SIZE3(__tx_bd, __value)
-#define GET_TX_BD_TX_BUFF_SIZE3_8822B(__tx_bd) GET_TX_BD_TX_BUFF_SIZE3(__tx_bd)
-
-/*TXBD_DW13*/
-
-#define SET_TX_BD_PHYSICAL_ADDR3_LOW_8822B(__tx_bd, __value) \
- SET_TX_BD_PHYSICAL_ADDR3_LOW(__tx_bd, __value)
-#define GET_TX_BD_PHYSICAL_ADDR3_LOW_8822B(__tx_bd) \
- GET_TX_BD_PHYSICAL_ADDR3_LOW(__tx_bd)
-
-/*TXBD_DW14*/
-
-#define SET_TX_BD_PHYSICAL_ADDR3_HIGH_8822B(__tx_bd, __value) \
- SET_TX_BD_PHYSICAL_ADDR3_HIGH(__tx_bd, __value)
-#define GET_TX_BD_PHYSICAL_ADDR3_HIGH_8822B(__tx_bd) \
- GET_TX_BD_PHYSICAL_ADDR3_HIGH(__tx_bd)
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h b/drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h
deleted file mode 100644
index fd3f80bd752d..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_TX_BD_NIC_H_
-#define _HALMAC_TX_BD_NIC_H_
-
-/*TXBD_DW0*/
-
-#define SET_TX_BD_OWN(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x00, 31, 1, __value)
-#define GET_TX_BD_OWN(__tx_bd) LE_BITS_TO_4BYTE(__tx_bd + 0x00, 31, 1)
-#define SET_TX_BD_PSB(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x00, 16, 8, __value)
-#define GET_TX_BD_PSB(__tx_bd) LE_BITS_TO_4BYTE(__tx_bd + 0x00, 16, 8)
-#define SET_TX_BD_TX_BUFF_SIZE0(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x00, 0, 16, __value)
-#define GET_TX_BD_TX_BUFF_SIZE0(__tx_bd) LE_BITS_TO_4BYTE(__tx_bd + 0x00, 0, 16)
-
-/*TXBD_DW1*/
-
-#define SET_TX_BD_PHYSICAL_ADDR0_LOW(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x04, 0, 32, __value)
-#define GET_TX_BD_PHYSICAL_ADDR0_LOW(__tx_bd) \
- LE_BITS_TO_4BYTE(__tx_bd + 0x04, 0, 32)
-
-/*TXBD_DW2*/
-
-#define SET_TX_BD_PHYSICAL_ADDR0_HIGH(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x08, 0, 32, __value)
-#define GET_TX_BD_PHYSICAL_ADDR0_HIGH(__tx_bd) \
- LE_BITS_TO_4BYTE(__tx_bd + 0x08, 0, 32)
-
-/*TXBD_DW4*/
-
-#define SET_TX_BD_A1(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x10, 31, 1, __value)
-#define GET_TX_BD_A1(__tx_bd) LE_BITS_TO_4BYTE(__tx_bd + 0x10, 31, 1)
-#define SET_TX_BD_TX_BUFF_SIZE1(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x10, 0, 16, __value)
-#define GET_TX_BD_TX_BUFF_SIZE1(__tx_bd) LE_BITS_TO_4BYTE(__tx_bd + 0x10, 0, 16)
-
-/*TXBD_DW5*/
-
-#define SET_TX_BD_PHYSICAL_ADDR1_LOW(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x14, 0, 32, __value)
-#define GET_TX_BD_PHYSICAL_ADDR1_LOW(__tx_bd) \
- LE_BITS_TO_4BYTE(__tx_bd + 0x14, 0, 32)
-
-/*TXBD_DW6*/
-
-#define SET_TX_BD_PHYSICAL_ADDR1_HIGH(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x18, 0, 32, __value)
-#define GET_TX_BD_PHYSICAL_ADDR1_HIGH(__tx_bd) \
- LE_BITS_TO_4BYTE(__tx_bd + 0x18, 0, 32)
-
-/*TXBD_DW8*/
-
-#define SET_TX_BD_A2(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x20, 31, 1, __value)
-#define GET_TX_BD_A2(__tx_bd) LE_BITS_TO_4BYTE(__tx_bd + 0x20, 31, 1)
-#define SET_TX_BD_TX_BUFF_SIZE2(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x20, 0, 16, __value)
-#define GET_TX_BD_TX_BUFF_SIZE2(__tx_bd) LE_BITS_TO_4BYTE(__tx_bd + 0x20, 0, 16)
-
-/*TXBD_DW9*/
-
-#define SET_TX_BD_PHYSICAL_ADDR2_LOW(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x24, 0, 32, __value)
-#define GET_TX_BD_PHYSICAL_ADDR2_LOW(__tx_bd) \
- LE_BITS_TO_4BYTE(__tx_bd + 0x24, 0, 32)
-
-/*TXBD_DW10*/
-
-#define SET_TX_BD_PHYSICAL_ADDR2_HIGH(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x28, 0, 32, __value)
-#define GET_TX_BD_PHYSICAL_ADDR2_HIGH(__tx_bd) \
- LE_BITS_TO_4BYTE(__tx_bd + 0x28, 0, 32)
-
-/*TXBD_DW12*/
-
-#define SET_TX_BD_A3(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x30, 31, 1, __value)
-#define GET_TX_BD_A3(__tx_bd) LE_BITS_TO_4BYTE(__tx_bd + 0x30, 31, 1)
-#define SET_TX_BD_TX_BUFF_SIZE3(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x30, 0, 16, __value)
-#define GET_TX_BD_TX_BUFF_SIZE3(__tx_bd) LE_BITS_TO_4BYTE(__tx_bd + 0x30, 0, 16)
-
-/*TXBD_DW13*/
-
-#define SET_TX_BD_PHYSICAL_ADDR3_LOW(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x34, 0, 32, __value)
-#define GET_TX_BD_PHYSICAL_ADDR3_LOW(__tx_bd) \
- LE_BITS_TO_4BYTE(__tx_bd + 0x34, 0, 32)
-
-/*TXBD_DW14*/
-
-#define SET_TX_BD_PHYSICAL_ADDR3_HIGH(__tx_bd, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_bd + 0x38, 0, 32, __value)
-#define GET_TX_BD_PHYSICAL_ADDR3_HIGH(__tx_bd) \
- LE_BITS_TO_4BYTE(__tx_bd + 0x38, 0, 32)
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h b/drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h
deleted file mode 100644
index ca32f1b5f80a..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h
+++ /dev/null
@@ -1,433 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_TX_DESC_CHIP_H_
-#define _HALMAC_TX_DESC_CHIP_H_
-
-/*TXDESC_WORD0*/
-
-#define SET_TX_DESC_DISQSELSEQ_8822B(__tx_desc, __value) \
- SET_TX_DESC_DISQSELSEQ(__tx_desc, __value)
-#define GET_TX_DESC_DISQSELSEQ_8822B(__tx_desc) \
- GET_TX_DESC_DISQSELSEQ(__tx_desc)
-#define SET_TX_DESC_GF_8822B(__tx_desc, __value) \
- SET_TX_DESC_GF(__tx_desc, __value)
-#define GET_TX_DESC_GF_8822B(__tx_desc) GET_TX_DESC_GF(__tx_desc)
-#define SET_TX_DESC_NO_ACM_8822B(__tx_desc, __value) \
- SET_TX_DESC_NO_ACM(__tx_desc, __value)
-#define GET_TX_DESC_NO_ACM_8822B(__tx_desc) GET_TX_DESC_NO_ACM(__tx_desc)
-#define SET_TX_DESC_BCNPKT_TSF_CTRL_8822B(__tx_desc, __value) \
- SET_TX_DESC_BCNPKT_TSF_CTRL(__tx_desc, __value)
-#define GET_TX_DESC_BCNPKT_TSF_CTRL_8822B(__tx_desc) \
- GET_TX_DESC_BCNPKT_TSF_CTRL(__tx_desc)
-#define SET_TX_DESC_AMSDU_PAD_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_AMSDU_PAD_EN(__tx_desc, __value)
-#define GET_TX_DESC_AMSDU_PAD_EN_8822B(__tx_desc) \
- GET_TX_DESC_AMSDU_PAD_EN(__tx_desc)
-#define SET_TX_DESC_LS_8822B(__tx_desc, __value) \
- SET_TX_DESC_LS(__tx_desc, __value)
-#define GET_TX_DESC_LS_8822B(__tx_desc) GET_TX_DESC_LS(__tx_desc)
-#define SET_TX_DESC_HTC_8822B(__tx_desc, __value) \
- SET_TX_DESC_HTC(__tx_desc, __value)
-#define GET_TX_DESC_HTC_8822B(__tx_desc) GET_TX_DESC_HTC(__tx_desc)
-#define SET_TX_DESC_BMC_8822B(__tx_desc, __value) \
- SET_TX_DESC_BMC(__tx_desc, __value)
-#define GET_TX_DESC_BMC_8822B(__tx_desc) GET_TX_DESC_BMC(__tx_desc)
-#define SET_TX_DESC_OFFSET_8822B(__tx_desc, __value) \
- SET_TX_DESC_OFFSET(__tx_desc, __value)
-#define GET_TX_DESC_OFFSET_8822B(__tx_desc) GET_TX_DESC_OFFSET(__tx_desc)
-#define SET_TX_DESC_TXPKTSIZE_8822B(__tx_desc, __value) \
- SET_TX_DESC_TXPKTSIZE(__tx_desc, __value)
-#define GET_TX_DESC_TXPKTSIZE_8822B(__tx_desc) GET_TX_DESC_TXPKTSIZE(__tx_desc)
-
-/*TXDESC_WORD1*/
-
-#define SET_TX_DESC_MOREDATA_8822B(__tx_desc, __value) \
- SET_TX_DESC_MOREDATA(__tx_desc, __value)
-#define GET_TX_DESC_MOREDATA_8822B(__tx_desc) GET_TX_DESC_MOREDATA(__tx_desc)
-#define SET_TX_DESC_PKT_OFFSET_8822B(__tx_desc, __value) \
- SET_TX_DESC_PKT_OFFSET(__tx_desc, __value)
-#define GET_TX_DESC_PKT_OFFSET_8822B(__tx_desc) \
- GET_TX_DESC_PKT_OFFSET(__tx_desc)
-#define SET_TX_DESC_SEC_TYPE_8822B(__tx_desc, __value) \
- SET_TX_DESC_SEC_TYPE(__tx_desc, __value)
-#define GET_TX_DESC_SEC_TYPE_8822B(__tx_desc) GET_TX_DESC_SEC_TYPE(__tx_desc)
-#define SET_TX_DESC_EN_DESC_ID_8822B(__tx_desc, __value) \
- SET_TX_DESC_EN_DESC_ID(__tx_desc, __value)
-#define GET_TX_DESC_EN_DESC_ID_8822B(__tx_desc) \
- GET_TX_DESC_EN_DESC_ID(__tx_desc)
-#define SET_TX_DESC_RATE_ID_8822B(__tx_desc, __value) \
- SET_TX_DESC_RATE_ID(__tx_desc, __value)
-#define GET_TX_DESC_RATE_ID_8822B(__tx_desc) GET_TX_DESC_RATE_ID(__tx_desc)
-#define SET_TX_DESC_PIFS_8822B(__tx_desc, __value) \
- SET_TX_DESC_PIFS(__tx_desc, __value)
-#define GET_TX_DESC_PIFS_8822B(__tx_desc) GET_TX_DESC_PIFS(__tx_desc)
-#define SET_TX_DESC_LSIG_TXOP_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_LSIG_TXOP_EN(__tx_desc, __value)
-#define GET_TX_DESC_LSIG_TXOP_EN_8822B(__tx_desc) \
- GET_TX_DESC_LSIG_TXOP_EN(__tx_desc)
-#define SET_TX_DESC_RD_NAV_EXT_8822B(__tx_desc, __value) \
- SET_TX_DESC_RD_NAV_EXT(__tx_desc, __value)
-#define GET_TX_DESC_RD_NAV_EXT_8822B(__tx_desc) \
- GET_TX_DESC_RD_NAV_EXT(__tx_desc)
-#define SET_TX_DESC_QSEL_8822B(__tx_desc, __value) \
- SET_TX_DESC_QSEL(__tx_desc, __value)
-#define GET_TX_DESC_QSEL_8822B(__tx_desc) GET_TX_DESC_QSEL(__tx_desc)
-#define SET_TX_DESC_MACID_8822B(__tx_desc, __value) \
- SET_TX_DESC_MACID(__tx_desc, __value)
-#define GET_TX_DESC_MACID_8822B(__tx_desc) GET_TX_DESC_MACID(__tx_desc)
-
-/*TXDESC_WORD2*/
-
-#define SET_TX_DESC_HW_AES_IV_8822B(__tx_desc, __value) \
- SET_TX_DESC_HW_AES_IV(__tx_desc, __value)
-#define GET_TX_DESC_HW_AES_IV_8822B(__tx_desc) GET_TX_DESC_HW_AES_IV(__tx_desc)
-#define SET_TX_DESC_FTM_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_FTM_EN(__tx_desc, __value)
-#define GET_TX_DESC_FTM_EN_8822B(__tx_desc) GET_TX_DESC_FTM_EN(__tx_desc)
-#define SET_TX_DESC_G_ID_8822B(__tx_desc, __value) \
- SET_TX_DESC_G_ID(__tx_desc, __value)
-#define GET_TX_DESC_G_ID_8822B(__tx_desc) GET_TX_DESC_G_ID(__tx_desc)
-#define SET_TX_DESC_BT_NULL_8822B(__tx_desc, __value) \
- SET_TX_DESC_BT_NULL(__tx_desc, __value)
-#define GET_TX_DESC_BT_NULL_8822B(__tx_desc) GET_TX_DESC_BT_NULL(__tx_desc)
-#define SET_TX_DESC_AMPDU_DENSITY_8822B(__tx_desc, __value) \
- SET_TX_DESC_AMPDU_DENSITY(__tx_desc, __value)
-#define GET_TX_DESC_AMPDU_DENSITY_8822B(__tx_desc) \
- GET_TX_DESC_AMPDU_DENSITY(__tx_desc)
-#define SET_TX_DESC_SPE_RPT_8822B(__tx_desc, __value) \
- SET_TX_DESC_SPE_RPT(__tx_desc, __value)
-#define GET_TX_DESC_SPE_RPT_8822B(__tx_desc) GET_TX_DESC_SPE_RPT(__tx_desc)
-#define SET_TX_DESC_RAW_8822B(__tx_desc, __value) \
- SET_TX_DESC_RAW(__tx_desc, __value)
-#define GET_TX_DESC_RAW_8822B(__tx_desc) GET_TX_DESC_RAW(__tx_desc)
-#define SET_TX_DESC_MOREFRAG_8822B(__tx_desc, __value) \
- SET_TX_DESC_MOREFRAG(__tx_desc, __value)
-#define GET_TX_DESC_MOREFRAG_8822B(__tx_desc) GET_TX_DESC_MOREFRAG(__tx_desc)
-#define SET_TX_DESC_BK_8822B(__tx_desc, __value) \
- SET_TX_DESC_BK(__tx_desc, __value)
-#define GET_TX_DESC_BK_8822B(__tx_desc) GET_TX_DESC_BK(__tx_desc)
-#define SET_TX_DESC_NULL_1_8822B(__tx_desc, __value) \
- SET_TX_DESC_NULL_1(__tx_desc, __value)
-#define GET_TX_DESC_NULL_1_8822B(__tx_desc) GET_TX_DESC_NULL_1(__tx_desc)
-#define SET_TX_DESC_NULL_0_8822B(__tx_desc, __value) \
- SET_TX_DESC_NULL_0(__tx_desc, __value)
-#define GET_TX_DESC_NULL_0_8822B(__tx_desc) GET_TX_DESC_NULL_0(__tx_desc)
-#define SET_TX_DESC_RDG_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_RDG_EN(__tx_desc, __value)
-#define GET_TX_DESC_RDG_EN_8822B(__tx_desc) GET_TX_DESC_RDG_EN(__tx_desc)
-#define SET_TX_DESC_AGG_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_AGG_EN(__tx_desc, __value)
-#define GET_TX_DESC_AGG_EN_8822B(__tx_desc) GET_TX_DESC_AGG_EN(__tx_desc)
-#define SET_TX_DESC_CCA_RTS_8822B(__tx_desc, __value) \
- SET_TX_DESC_CCA_RTS(__tx_desc, __value)
-#define GET_TX_DESC_CCA_RTS_8822B(__tx_desc) GET_TX_DESC_CCA_RTS(__tx_desc)
-#define SET_TX_DESC_TRI_FRAME_8822B(__tx_desc, __value) \
- SET_TX_DESC_TRI_FRAME(__tx_desc, __value)
-#define GET_TX_DESC_TRI_FRAME_8822B(__tx_desc) GET_TX_DESC_TRI_FRAME(__tx_desc)
-#define SET_TX_DESC_P_AID_8822B(__tx_desc, __value) \
- SET_TX_DESC_P_AID(__tx_desc, __value)
-#define GET_TX_DESC_P_AID_8822B(__tx_desc) GET_TX_DESC_P_AID(__tx_desc)
-
-/*TXDESC_WORD3*/
-
-#define SET_TX_DESC_AMPDU_MAX_TIME_8822B(__tx_desc, __value) \
- SET_TX_DESC_AMPDU_MAX_TIME(__tx_desc, __value)
-#define GET_TX_DESC_AMPDU_MAX_TIME_8822B(__tx_desc) \
- GET_TX_DESC_AMPDU_MAX_TIME(__tx_desc)
-#define SET_TX_DESC_NDPA_8822B(__tx_desc, __value) \
- SET_TX_DESC_NDPA(__tx_desc, __value)
-#define GET_TX_DESC_NDPA_8822B(__tx_desc) GET_TX_DESC_NDPA(__tx_desc)
-#define SET_TX_DESC_MAX_AGG_NUM_8822B(__tx_desc, __value) \
- SET_TX_DESC_MAX_AGG_NUM(__tx_desc, __value)
-#define GET_TX_DESC_MAX_AGG_NUM_8822B(__tx_desc) \
- GET_TX_DESC_MAX_AGG_NUM(__tx_desc)
-#define SET_TX_DESC_USE_MAX_TIME_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_USE_MAX_TIME_EN(__tx_desc, __value)
-#define GET_TX_DESC_USE_MAX_TIME_EN_8822B(__tx_desc) \
- GET_TX_DESC_USE_MAX_TIME_EN(__tx_desc)
-#define SET_TX_DESC_NAVUSEHDR_8822B(__tx_desc, __value) \
- SET_TX_DESC_NAVUSEHDR(__tx_desc, __value)
-#define GET_TX_DESC_NAVUSEHDR_8822B(__tx_desc) GET_TX_DESC_NAVUSEHDR(__tx_desc)
-#define SET_TX_DESC_CHK_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_CHK_EN(__tx_desc, __value)
-#define GET_TX_DESC_CHK_EN_8822B(__tx_desc) GET_TX_DESC_CHK_EN(__tx_desc)
-#define SET_TX_DESC_HW_RTS_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_HW_RTS_EN(__tx_desc, __value)
-#define GET_TX_DESC_HW_RTS_EN_8822B(__tx_desc) GET_TX_DESC_HW_RTS_EN(__tx_desc)
-#define SET_TX_DESC_RTSEN_8822B(__tx_desc, __value) \
- SET_TX_DESC_RTSEN(__tx_desc, __value)
-#define GET_TX_DESC_RTSEN_8822B(__tx_desc) GET_TX_DESC_RTSEN(__tx_desc)
-#define SET_TX_DESC_CTS2SELF_8822B(__tx_desc, __value) \
- SET_TX_DESC_CTS2SELF(__tx_desc, __value)
-#define GET_TX_DESC_CTS2SELF_8822B(__tx_desc) GET_TX_DESC_CTS2SELF(__tx_desc)
-#define SET_TX_DESC_DISDATAFB_8822B(__tx_desc, __value) \
- SET_TX_DESC_DISDATAFB(__tx_desc, __value)
-#define GET_TX_DESC_DISDATAFB_8822B(__tx_desc) GET_TX_DESC_DISDATAFB(__tx_desc)
-#define SET_TX_DESC_DISRTSFB_8822B(__tx_desc, __value) \
- SET_TX_DESC_DISRTSFB(__tx_desc, __value)
-#define GET_TX_DESC_DISRTSFB_8822B(__tx_desc) GET_TX_DESC_DISRTSFB(__tx_desc)
-#define SET_TX_DESC_USE_RATE_8822B(__tx_desc, __value) \
- SET_TX_DESC_USE_RATE(__tx_desc, __value)
-#define GET_TX_DESC_USE_RATE_8822B(__tx_desc) GET_TX_DESC_USE_RATE(__tx_desc)
-#define SET_TX_DESC_HW_SSN_SEL_8822B(__tx_desc, __value) \
- SET_TX_DESC_HW_SSN_SEL(__tx_desc, __value)
-#define GET_TX_DESC_HW_SSN_SEL_8822B(__tx_desc) \
- GET_TX_DESC_HW_SSN_SEL(__tx_desc)
-#define SET_TX_DESC_WHEADER_LEN_8822B(__tx_desc, __value) \
- SET_TX_DESC_WHEADER_LEN(__tx_desc, __value)
-#define GET_TX_DESC_WHEADER_LEN_8822B(__tx_desc) \
- GET_TX_DESC_WHEADER_LEN(__tx_desc)
-
-/*TXDESC_WORD4*/
-
-#define SET_TX_DESC_PCTS_MASK_IDX_8822B(__tx_desc, __value) \
- SET_TX_DESC_PCTS_MASK_IDX(__tx_desc, __value)
-#define GET_TX_DESC_PCTS_MASK_IDX_8822B(__tx_desc) \
- GET_TX_DESC_PCTS_MASK_IDX(__tx_desc)
-#define SET_TX_DESC_PCTS_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_PCTS_EN(__tx_desc, __value)
-#define GET_TX_DESC_PCTS_EN_8822B(__tx_desc) GET_TX_DESC_PCTS_EN(__tx_desc)
-#define SET_TX_DESC_RTSRATE_8822B(__tx_desc, __value) \
- SET_TX_DESC_RTSRATE(__tx_desc, __value)
-#define GET_TX_DESC_RTSRATE_8822B(__tx_desc) GET_TX_DESC_RTSRATE(__tx_desc)
-#define SET_TX_DESC_RTS_DATA_RTY_LMT_8822B(__tx_desc, __value) \
- SET_TX_DESC_RTS_DATA_RTY_LMT(__tx_desc, __value)
-#define GET_TX_DESC_RTS_DATA_RTY_LMT_8822B(__tx_desc) \
- GET_TX_DESC_RTS_DATA_RTY_LMT(__tx_desc)
-#define SET_TX_DESC_RTY_LMT_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_RTY_LMT_EN(__tx_desc, __value)
-#define GET_TX_DESC_RTY_LMT_EN_8822B(__tx_desc) \
- GET_TX_DESC_RTY_LMT_EN(__tx_desc)
-#define SET_TX_DESC_RTS_RTY_LOWEST_RATE_8822B(__tx_desc, __value) \
- SET_TX_DESC_RTS_RTY_LOWEST_RATE(__tx_desc, __value)
-#define GET_TX_DESC_RTS_RTY_LOWEST_RATE_8822B(__tx_desc) \
- GET_TX_DESC_RTS_RTY_LOWEST_RATE(__tx_desc)
-#define SET_TX_DESC_DATA_RTY_LOWEST_RATE_8822B(__tx_desc, __value) \
- SET_TX_DESC_DATA_RTY_LOWEST_RATE(__tx_desc, __value)
-#define GET_TX_DESC_DATA_RTY_LOWEST_RATE_8822B(__tx_desc) \
- GET_TX_DESC_DATA_RTY_LOWEST_RATE(__tx_desc)
-#define SET_TX_DESC_TRY_RATE_8822B(__tx_desc, __value) \
- SET_TX_DESC_TRY_RATE(__tx_desc, __value)
-#define GET_TX_DESC_TRY_RATE_8822B(__tx_desc) GET_TX_DESC_TRY_RATE(__tx_desc)
-#define SET_TX_DESC_DATARATE_8822B(__tx_desc, __value) \
- SET_TX_DESC_DATARATE(__tx_desc, __value)
-#define GET_TX_DESC_DATARATE_8822B(__tx_desc) GET_TX_DESC_DATARATE(__tx_desc)
-
-/*TXDESC_WORD5*/
-
-#define SET_TX_DESC_POLLUTED_8822B(__tx_desc, __value) \
- SET_TX_DESC_POLLUTED(__tx_desc, __value)
-#define GET_TX_DESC_POLLUTED_8822B(__tx_desc) GET_TX_DESC_POLLUTED(__tx_desc)
-#define SET_TX_DESC_TXPWR_OFSET_8822B(__tx_desc, __value) \
- SET_TX_DESC_TXPWR_OFSET(__tx_desc, __value)
-#define GET_TX_DESC_TXPWR_OFSET_8822B(__tx_desc) \
- GET_TX_DESC_TXPWR_OFSET(__tx_desc)
-#define SET_TX_DESC_TX_ANT_8822B(__tx_desc, __value) \
- SET_TX_DESC_TX_ANT(__tx_desc, __value)
-#define GET_TX_DESC_TX_ANT_8822B(__tx_desc) GET_TX_DESC_TX_ANT(__tx_desc)
-#define SET_TX_DESC_PORT_ID_8822B(__tx_desc, __value) \
- SET_TX_DESC_PORT_ID(__tx_desc, __value)
-#define GET_TX_DESC_PORT_ID_8822B(__tx_desc) GET_TX_DESC_PORT_ID(__tx_desc)
-#define SET_TX_DESC_MULTIPLE_PORT_8822B(__tx_desc, __value) \
- SET_TX_DESC_MULTIPLE_PORT(__tx_desc, __value)
-#define GET_TX_DESC_MULTIPLE_PORT_8822B(__tx_desc) \
- GET_TX_DESC_MULTIPLE_PORT(__tx_desc)
-#define SET_TX_DESC_SIGNALING_TAPKT_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_SIGNALING_TAPKT_EN(__tx_desc, __value)
-#define GET_TX_DESC_SIGNALING_TAPKT_EN_8822B(__tx_desc) \
- GET_TX_DESC_SIGNALING_TAPKT_EN(__tx_desc)
-#define SET_TX_DESC_RTS_SC_8822B(__tx_desc, __value) \
- SET_TX_DESC_RTS_SC(__tx_desc, __value)
-#define GET_TX_DESC_RTS_SC_8822B(__tx_desc) GET_TX_DESC_RTS_SC(__tx_desc)
-#define SET_TX_DESC_RTS_SHORT_8822B(__tx_desc, __value) \
- SET_TX_DESC_RTS_SHORT(__tx_desc, __value)
-#define GET_TX_DESC_RTS_SHORT_8822B(__tx_desc) GET_TX_DESC_RTS_SHORT(__tx_desc)
-#define SET_TX_DESC_VCS_STBC_8822B(__tx_desc, __value) \
- SET_TX_DESC_VCS_STBC(__tx_desc, __value)
-#define GET_TX_DESC_VCS_STBC_8822B(__tx_desc) GET_TX_DESC_VCS_STBC(__tx_desc)
-#define SET_TX_DESC_DATA_STBC_8822B(__tx_desc, __value) \
- SET_TX_DESC_DATA_STBC(__tx_desc, __value)
-#define GET_TX_DESC_DATA_STBC_8822B(__tx_desc) GET_TX_DESC_DATA_STBC(__tx_desc)
-#define SET_TX_DESC_DATA_LDPC_8822B(__tx_desc, __value) \
- SET_TX_DESC_DATA_LDPC(__tx_desc, __value)
-#define GET_TX_DESC_DATA_LDPC_8822B(__tx_desc) GET_TX_DESC_DATA_LDPC(__tx_desc)
-#define SET_TX_DESC_DATA_BW_8822B(__tx_desc, __value) \
- SET_TX_DESC_DATA_BW(__tx_desc, __value)
-#define GET_TX_DESC_DATA_BW_8822B(__tx_desc) GET_TX_DESC_DATA_BW(__tx_desc)
-#define SET_TX_DESC_DATA_SHORT_8822B(__tx_desc, __value) \
- SET_TX_DESC_DATA_SHORT(__tx_desc, __value)
-#define GET_TX_DESC_DATA_SHORT_8822B(__tx_desc) \
- GET_TX_DESC_DATA_SHORT(__tx_desc)
-#define SET_TX_DESC_DATA_SC_8822B(__tx_desc, __value) \
- SET_TX_DESC_DATA_SC(__tx_desc, __value)
-#define GET_TX_DESC_DATA_SC_8822B(__tx_desc) GET_TX_DESC_DATA_SC(__tx_desc)
-
-/*TXDESC_WORD6*/
-
-#define SET_TX_DESC_ANTSEL_D_8822B(__tx_desc, __value) \
- SET_TX_DESC_ANTSEL_D(__tx_desc, __value)
-#define GET_TX_DESC_ANTSEL_D_8822B(__tx_desc) GET_TX_DESC_ANTSEL_D(__tx_desc)
-#define SET_TX_DESC_ANT_MAPD_8822B(__tx_desc, __value) \
- SET_TX_DESC_ANT_MAPD(__tx_desc, __value)
-#define GET_TX_DESC_ANT_MAPD_8822B(__tx_desc) GET_TX_DESC_ANT_MAPD(__tx_desc)
-#define SET_TX_DESC_ANT_MAPC_8822B(__tx_desc, __value) \
- SET_TX_DESC_ANT_MAPC(__tx_desc, __value)
-#define GET_TX_DESC_ANT_MAPC_8822B(__tx_desc) GET_TX_DESC_ANT_MAPC(__tx_desc)
-#define SET_TX_DESC_ANT_MAPB_8822B(__tx_desc, __value) \
- SET_TX_DESC_ANT_MAPB(__tx_desc, __value)
-#define GET_TX_DESC_ANT_MAPB_8822B(__tx_desc) GET_TX_DESC_ANT_MAPB(__tx_desc)
-#define SET_TX_DESC_ANT_MAPA_8822B(__tx_desc, __value) \
- SET_TX_DESC_ANT_MAPA(__tx_desc, __value)
-#define GET_TX_DESC_ANT_MAPA_8822B(__tx_desc) GET_TX_DESC_ANT_MAPA(__tx_desc)
-#define SET_TX_DESC_ANTSEL_C_8822B(__tx_desc, __value) \
- SET_TX_DESC_ANTSEL_C(__tx_desc, __value)
-#define GET_TX_DESC_ANTSEL_C_8822B(__tx_desc) GET_TX_DESC_ANTSEL_C(__tx_desc)
-#define SET_TX_DESC_ANTSEL_B_8822B(__tx_desc, __value) \
- SET_TX_DESC_ANTSEL_B(__tx_desc, __value)
-#define GET_TX_DESC_ANTSEL_B_8822B(__tx_desc) GET_TX_DESC_ANTSEL_B(__tx_desc)
-#define SET_TX_DESC_ANTSEL_A_8822B(__tx_desc, __value) \
- SET_TX_DESC_ANTSEL_A(__tx_desc, __value)
-#define GET_TX_DESC_ANTSEL_A_8822B(__tx_desc) GET_TX_DESC_ANTSEL_A(__tx_desc)
-#define SET_TX_DESC_MBSSID_8822B(__tx_desc, __value) \
- SET_TX_DESC_MBSSID(__tx_desc, __value)
-#define GET_TX_DESC_MBSSID_8822B(__tx_desc) GET_TX_DESC_MBSSID(__tx_desc)
-#define SET_TX_DESC_SW_DEFINE_8822B(__tx_desc, __value) \
- SET_TX_DESC_SW_DEFINE(__tx_desc, __value)
-#define GET_TX_DESC_SW_DEFINE_8822B(__tx_desc) GET_TX_DESC_SW_DEFINE(__tx_desc)
-
-/*TXDESC_WORD7*/
-
-#define SET_TX_DESC_DMA_TXAGG_NUM_8822B(__tx_desc, __value) \
- SET_TX_DESC_DMA_TXAGG_NUM(__tx_desc, __value)
-#define GET_TX_DESC_DMA_TXAGG_NUM_8822B(__tx_desc) \
- GET_TX_DESC_DMA_TXAGG_NUM(__tx_desc)
-#define SET_TX_DESC_FINAL_DATA_RATE_8822B(__tx_desc, __value) \
- SET_TX_DESC_FINAL_DATA_RATE(__tx_desc, __value)
-#define GET_TX_DESC_FINAL_DATA_RATE_8822B(__tx_desc) \
- GET_TX_DESC_FINAL_DATA_RATE(__tx_desc)
-#define SET_TX_DESC_NTX_MAP_8822B(__tx_desc, __value) \
- SET_TX_DESC_NTX_MAP(__tx_desc, __value)
-#define GET_TX_DESC_NTX_MAP_8822B(__tx_desc) GET_TX_DESC_NTX_MAP(__tx_desc)
-#define SET_TX_DESC_TX_BUFF_SIZE_8822B(__tx_desc, __value) \
- SET_TX_DESC_TX_BUFF_SIZE(__tx_desc, __value)
-#define GET_TX_DESC_TX_BUFF_SIZE_8822B(__tx_desc) \
- GET_TX_DESC_TX_BUFF_SIZE(__tx_desc)
-#define SET_TX_DESC_TXDESC_CHECKSUM_8822B(__tx_desc, __value) \
- SET_TX_DESC_TXDESC_CHECKSUM(__tx_desc, __value)
-#define GET_TX_DESC_TXDESC_CHECKSUM_8822B(__tx_desc) \
- GET_TX_DESC_TXDESC_CHECKSUM(__tx_desc)
-#define SET_TX_DESC_TIMESTAMP_8822B(__tx_desc, __value) \
- SET_TX_DESC_TIMESTAMP(__tx_desc, __value)
-#define GET_TX_DESC_TIMESTAMP_8822B(__tx_desc) GET_TX_DESC_TIMESTAMP(__tx_desc)
-
-/*TXDESC_WORD8*/
-
-#define SET_TX_DESC_TXWIFI_CP_8822B(__tx_desc, __value) \
- SET_TX_DESC_TXWIFI_CP(__tx_desc, __value)
-#define GET_TX_DESC_TXWIFI_CP_8822B(__tx_desc) GET_TX_DESC_TXWIFI_CP(__tx_desc)
-#define SET_TX_DESC_MAC_CP_8822B(__tx_desc, __value) \
- SET_TX_DESC_MAC_CP(__tx_desc, __value)
-#define GET_TX_DESC_MAC_CP_8822B(__tx_desc) GET_TX_DESC_MAC_CP(__tx_desc)
-#define SET_TX_DESC_STW_PKTRE_DIS_8822B(__tx_desc, __value) \
- SET_TX_DESC_STW_PKTRE_DIS(__tx_desc, __value)
-#define GET_TX_DESC_STW_PKTRE_DIS_8822B(__tx_desc) \
- GET_TX_DESC_STW_PKTRE_DIS(__tx_desc)
-#define SET_TX_DESC_STW_RB_DIS_8822B(__tx_desc, __value) \
- SET_TX_DESC_STW_RB_DIS(__tx_desc, __value)
-#define GET_TX_DESC_STW_RB_DIS_8822B(__tx_desc) \
- GET_TX_DESC_STW_RB_DIS(__tx_desc)
-#define SET_TX_DESC_STW_RATE_DIS_8822B(__tx_desc, __value) \
- SET_TX_DESC_STW_RATE_DIS(__tx_desc, __value)
-#define GET_TX_DESC_STW_RATE_DIS_8822B(__tx_desc) \
- GET_TX_DESC_STW_RATE_DIS(__tx_desc)
-#define SET_TX_DESC_STW_ANT_DIS_8822B(__tx_desc, __value) \
- SET_TX_DESC_STW_ANT_DIS(__tx_desc, __value)
-#define GET_TX_DESC_STW_ANT_DIS_8822B(__tx_desc) \
- GET_TX_DESC_STW_ANT_DIS(__tx_desc)
-#define SET_TX_DESC_STW_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_STW_EN(__tx_desc, __value)
-#define GET_TX_DESC_STW_EN_8822B(__tx_desc) GET_TX_DESC_STW_EN(__tx_desc)
-#define SET_TX_DESC_SMH_EN_8822B(__tx_desc, __value) \
- SET_TX_DESC_SMH_EN(__tx_desc, __value)
-#define GET_TX_DESC_SMH_EN_8822B(__tx_desc) GET_TX_DESC_SMH_EN(__tx_desc)
-#define SET_TX_DESC_TAILPAGE_L_8822B(__tx_desc, __value) \
- SET_TX_DESC_TAILPAGE_L(__tx_desc, __value)
-#define GET_TX_DESC_TAILPAGE_L_8822B(__tx_desc) \
- GET_TX_DESC_TAILPAGE_L(__tx_desc)
-#define SET_TX_DESC_SDIO_DMASEQ_8822B(__tx_desc, __value) \
- SET_TX_DESC_SDIO_DMASEQ(__tx_desc, __value)
-#define GET_TX_DESC_SDIO_DMASEQ_8822B(__tx_desc) \
- GET_TX_DESC_SDIO_DMASEQ(__tx_desc)
-#define SET_TX_DESC_NEXTHEADPAGE_L_8822B(__tx_desc, __value) \
- SET_TX_DESC_NEXTHEADPAGE_L(__tx_desc, __value)
-#define GET_TX_DESC_NEXTHEADPAGE_L_8822B(__tx_desc) \
- GET_TX_DESC_NEXTHEADPAGE_L(__tx_desc)
-#define SET_TX_DESC_EN_HWSEQ_8822B(__tx_desc, __value) \
- SET_TX_DESC_EN_HWSEQ(__tx_desc, __value)
-#define GET_TX_DESC_EN_HWSEQ_8822B(__tx_desc) GET_TX_DESC_EN_HWSEQ(__tx_desc)
-#define SET_TX_DESC_EN_HWEXSEQ_8822B(__tx_desc, __value) \
- SET_TX_DESC_EN_HWEXSEQ(__tx_desc, __value)
-#define GET_TX_DESC_EN_HWEXSEQ_8822B(__tx_desc) \
- GET_TX_DESC_EN_HWEXSEQ(__tx_desc)
-#define SET_TX_DESC_DATA_RC_8822B(__tx_desc, __value) \
- SET_TX_DESC_DATA_RC(__tx_desc, __value)
-#define GET_TX_DESC_DATA_RC_8822B(__tx_desc) GET_TX_DESC_DATA_RC(__tx_desc)
-#define SET_TX_DESC_BAR_RTY_TH_8822B(__tx_desc, __value) \
- SET_TX_DESC_BAR_RTY_TH(__tx_desc, __value)
-#define GET_TX_DESC_BAR_RTY_TH_8822B(__tx_desc) \
- GET_TX_DESC_BAR_RTY_TH(__tx_desc)
-#define SET_TX_DESC_RTS_RC_8822B(__tx_desc, __value) \
- SET_TX_DESC_RTS_RC(__tx_desc, __value)
-#define GET_TX_DESC_RTS_RC_8822B(__tx_desc) GET_TX_DESC_RTS_RC(__tx_desc)
-
-/*TXDESC_WORD9*/
-
-#define SET_TX_DESC_TAILPAGE_H_8822B(__tx_desc, __value) \
- SET_TX_DESC_TAILPAGE_H(__tx_desc, __value)
-#define GET_TX_DESC_TAILPAGE_H_8822B(__tx_desc) \
- GET_TX_DESC_TAILPAGE_H(__tx_desc)
-#define SET_TX_DESC_NEXTHEADPAGE_H_8822B(__tx_desc, __value) \
- SET_TX_DESC_NEXTHEADPAGE_H(__tx_desc, __value)
-#define GET_TX_DESC_NEXTHEADPAGE_H_8822B(__tx_desc) \
- GET_TX_DESC_NEXTHEADPAGE_H(__tx_desc)
-#define SET_TX_DESC_SW_SEQ_8822B(__tx_desc, __value) \
- SET_TX_DESC_SW_SEQ(__tx_desc, __value)
-#define GET_TX_DESC_SW_SEQ_8822B(__tx_desc) GET_TX_DESC_SW_SEQ(__tx_desc)
-#define SET_TX_DESC_TXBF_PATH_8822B(__tx_desc, __value) \
- SET_TX_DESC_TXBF_PATH(__tx_desc, __value)
-#define GET_TX_DESC_TXBF_PATH_8822B(__tx_desc) GET_TX_DESC_TXBF_PATH(__tx_desc)
-#define SET_TX_DESC_PADDING_LEN_8822B(__tx_desc, __value) \
- SET_TX_DESC_PADDING_LEN(__tx_desc, __value)
-#define GET_TX_DESC_PADDING_LEN_8822B(__tx_desc) \
- GET_TX_DESC_PADDING_LEN(__tx_desc)
-#define SET_TX_DESC_GROUP_BIT_IE_OFFSET_8822B(__tx_desc, __value) \
- SET_TX_DESC_GROUP_BIT_IE_OFFSET(__tx_desc, __value)
-#define GET_TX_DESC_GROUP_BIT_IE_OFFSET_8822B(__tx_desc) \
- GET_TX_DESC_GROUP_BIT_IE_OFFSET(__tx_desc)
-
-/*WORD10*/
-
-#define SET_TX_DESC_MU_DATARATE_8822B(__tx_desc, __value) \
- SET_TX_DESC_MU_DATARATE(__tx_desc, __value)
-#define GET_TX_DESC_MU_DATARATE_8822B(__tx_desc) \
- GET_TX_DESC_MU_DATARATE(__tx_desc)
-#define SET_TX_DESC_MU_RC_8822B(__tx_desc, __value) \
- SET_TX_DESC_MU_RC(__tx_desc, __value)
-#define GET_TX_DESC_MU_RC_8822B(__tx_desc) GET_TX_DESC_MU_RC(__tx_desc)
-#define SET_TX_DESC_SND_PKT_SEL_8822B(__tx_desc, __value) \
- SET_TX_DESC_SND_PKT_SEL(__tx_desc, __value)
-#define GET_TX_DESC_SND_PKT_SEL_8822B(__tx_desc) \
- GET_TX_DESC_SND_PKT_SEL(__tx_desc)
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h b/drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h
deleted file mode 100644
index 73b973d90137..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h
+++ /dev/null
@@ -1,495 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_TX_DESC_NIC_H_
-#define _HALMAC_TX_DESC_NIC_H_
-
-/*TXDESC_WORD0*/
-
-#define SET_TX_DESC_DISQSELSEQ(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 31, 1, __value)
-#define GET_TX_DESC_DISQSELSEQ(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x00, 31, 1)
-
-#define SET_TX_DESC_GF(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 30, 1, __value)
-#define GET_TX_DESC_GF(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x00, 30, 1)
-#define SET_TX_DESC_NO_ACM(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 29, 1, __value)
-#define GET_TX_DESC_NO_ACM(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x00, 29, 1)
-
-#define SET_TX_DESC_BCNPKT_TSF_CTRL(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 28, 1, __value)
-#define GET_TX_DESC_BCNPKT_TSF_CTRL(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x00, 28, 1)
-
-#define SET_TX_DESC_AMSDU_PAD_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 27, 1, __value)
-#define GET_TX_DESC_AMSDU_PAD_EN(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x00, 27, 1)
-
-#define SET_TX_DESC_LS(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 26, 1, __value)
-#define GET_TX_DESC_LS(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x00, 26, 1)
-#define SET_TX_DESC_HTC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 25, 1, __value)
-#define GET_TX_DESC_HTC(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x00, 25, 1)
-#define SET_TX_DESC_BMC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 24, 1, __value)
-#define GET_TX_DESC_BMC(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x00, 24, 1)
-#define SET_TX_DESC_OFFSET(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 16, 8, __value)
-#define GET_TX_DESC_OFFSET(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x00, 16, 8)
-#define SET_TX_DESC_TXPKTSIZE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x00, 0, 16, __value)
-#define GET_TX_DESC_TXPKTSIZE(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x00, 0, 16)
-
-/*TXDESC_WORD1*/
-
-#define SET_TX_DESC_MOREDATA(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 29, 1, __value)
-#define GET_TX_DESC_MOREDATA(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x04, 29, 1)
-#define SET_TX_DESC_PKT_OFFSET(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 24, 5, __value)
-#define GET_TX_DESC_PKT_OFFSET(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x04, 24, 5)
-#define SET_TX_DESC_SEC_TYPE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 22, 2, __value)
-#define GET_TX_DESC_SEC_TYPE(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x04, 22, 2)
-#define SET_TX_DESC_EN_DESC_ID(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 21, 1, __value)
-#define GET_TX_DESC_EN_DESC_ID(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x04, 21, 1)
-#define SET_TX_DESC_RATE_ID(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 16, 5, __value)
-#define GET_TX_DESC_RATE_ID(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x04, 16, 5)
-#define SET_TX_DESC_PIFS(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 15, 1, __value)
-#define GET_TX_DESC_PIFS(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x04, 15, 1)
-#define SET_TX_DESC_LSIG_TXOP_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 14, 1, __value)
-#define GET_TX_DESC_LSIG_TXOP_EN(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x04, 14, 1)
-#define SET_TX_DESC_RD_NAV_EXT(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 13, 1, __value)
-#define GET_TX_DESC_RD_NAV_EXT(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x04, 13, 1)
-#define SET_TX_DESC_QSEL(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 8, 5, __value)
-#define GET_TX_DESC_QSEL(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x04, 8, 5)
-#define SET_TX_DESC_MACID(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x04, 0, 7, __value)
-#define GET_TX_DESC_MACID(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x04, 0, 7)
-
-/*TXDESC_WORD2*/
-
-#define SET_TX_DESC_HW_AES_IV(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 31, 1, __value)
-#define GET_TX_DESC_HW_AES_IV(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x08, 31, 1)
-
-#define SET_TX_DESC_FTM_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 30, 1, __value)
-#define GET_TX_DESC_FTM_EN(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 30, 1)
-
-#define SET_TX_DESC_G_ID(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 24, 6, __value)
-#define GET_TX_DESC_G_ID(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 24, 6)
-#define SET_TX_DESC_BT_NULL(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 23, 1, __value)
-#define GET_TX_DESC_BT_NULL(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 23, 1)
-#define SET_TX_DESC_AMPDU_DENSITY(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 20, 3, __value)
-#define GET_TX_DESC_AMPDU_DENSITY(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x08, 20, 3)
-#ifdef SET_TX_DESC_SPE_RPT
-#undef SET_TX_DESC_SPE_RPT
-#endif
-#define SET_TX_DESC_SPE_RPT(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 19, 1, __value)
-#define GET_TX_DESC_SPE_RPT(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 19, 1)
-#define SET_TX_DESC_RAW(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 18, 1, __value)
-#define GET_TX_DESC_RAW(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 18, 1)
-#define SET_TX_DESC_MOREFRAG(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 17, 1, __value)
-#define GET_TX_DESC_MOREFRAG(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x08, 17, 1)
-#define SET_TX_DESC_BK(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 16, 1, __value)
-#define GET_TX_DESC_BK(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 16, 1)
-#define SET_TX_DESC_NULL_1(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 15, 1, __value)
-#define GET_TX_DESC_NULL_1(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 15, 1)
-#define SET_TX_DESC_NULL_0(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 14, 1, __value)
-#define GET_TX_DESC_NULL_0(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 14, 1)
-#define SET_TX_DESC_RDG_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 13, 1, __value)
-#define GET_TX_DESC_RDG_EN(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 13, 1)
-#define SET_TX_DESC_AGG_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 12, 1, __value)
-#define GET_TX_DESC_AGG_EN(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 12, 1)
-#define SET_TX_DESC_CCA_RTS(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 10, 2, __value)
-#define GET_TX_DESC_CCA_RTS(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 10, 2)
-
-#define SET_TX_DESC_TRI_FRAME(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 9, 1, __value)
-#define GET_TX_DESC_TRI_FRAME(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x08, 9, 1)
-
-#define SET_TX_DESC_P_AID(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x08, 0, 9, __value)
-#define GET_TX_DESC_P_AID(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x08, 0, 9)
-
-/*TXDESC_WORD3*/
-
-#define SET_TX_DESC_AMPDU_MAX_TIME(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 24, 8, __value)
-#define GET_TX_DESC_AMPDU_MAX_TIME(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 24, 8)
-#define SET_TX_DESC_NDPA(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 22, 2, __value)
-#define GET_TX_DESC_NDPA(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 22, 2)
-#define SET_TX_DESC_MAX_AGG_NUM(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 17, 5, __value)
-#define GET_TX_DESC_MAX_AGG_NUM(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 17, 5)
-#define SET_TX_DESC_USE_MAX_TIME_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 16, 1, __value)
-#define GET_TX_DESC_USE_MAX_TIME_EN(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 16, 1)
-#define SET_TX_DESC_NAVUSEHDR(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 15, 1, __value)
-#define GET_TX_DESC_NAVUSEHDR(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 15, 1)
-
-#define SET_TX_DESC_CHK_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 14, 1, __value)
-#define GET_TX_DESC_CHK_EN(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 14, 1)
-
-#define SET_TX_DESC_HW_RTS_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 13, 1, __value)
-#define GET_TX_DESC_HW_RTS_EN(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 13, 1)
-#define SET_TX_DESC_RTSEN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 12, 1, __value)
-#define GET_TX_DESC_RTSEN(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 12, 1)
-#define SET_TX_DESC_CTS2SELF(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 11, 1, __value)
-#define GET_TX_DESC_CTS2SELF(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 11, 1)
-#define SET_TX_DESC_DISDATAFB(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 10, 1, __value)
-#define GET_TX_DESC_DISDATAFB(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 10, 1)
-#define SET_TX_DESC_DISRTSFB(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 9, 1, __value)
-#define GET_TX_DESC_DISRTSFB(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 9, 1)
-#define SET_TX_DESC_USE_RATE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 8, 1, __value)
-#define GET_TX_DESC_USE_RATE(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 8, 1)
-#define SET_TX_DESC_HW_SSN_SEL(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 6, 2, __value)
-#define GET_TX_DESC_HW_SSN_SEL(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 6, 2)
-
-#define SET_TX_DESC_WHEADER_LEN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x0C, 0, 5, __value)
-#define GET_TX_DESC_WHEADER_LEN(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x0C, 0, 5)
-
-/*TXDESC_WORD4*/
-
-#define SET_TX_DESC_PCTS_MASK_IDX(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x10, 30, 2, __value)
-#define GET_TX_DESC_PCTS_MASK_IDX(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x10, 30, 2)
-#define SET_TX_DESC_PCTS_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x10, 29, 1, __value)
-#define GET_TX_DESC_PCTS_EN(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x10, 29, 1)
-#define SET_TX_DESC_RTSRATE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x10, 24, 5, __value)
-#define GET_TX_DESC_RTSRATE(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x10, 24, 5)
-#define SET_TX_DESC_RTS_DATA_RTY_LMT(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x10, 18, 6, __value)
-#define GET_TX_DESC_RTS_DATA_RTY_LMT(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x10, 18, 6)
-#define SET_TX_DESC_RTY_LMT_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x10, 17, 1, __value)
-#define GET_TX_DESC_RTY_LMT_EN(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x10, 17, 1)
-#define SET_TX_DESC_RTS_RTY_LOWEST_RATE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x10, 13, 4, __value)
-#define GET_TX_DESC_RTS_RTY_LOWEST_RATE(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x10, 13, 4)
-#define SET_TX_DESC_DATA_RTY_LOWEST_RATE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x10, 8, 5, __value)
-#define GET_TX_DESC_DATA_RTY_LOWEST_RATE(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x10, 8, 5)
-#define SET_TX_DESC_TRY_RATE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x10, 7, 1, __value)
-#define GET_TX_DESC_TRY_RATE(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x10, 7, 1)
-#define SET_TX_DESC_DATARATE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x10, 0, 7, __value)
-#define GET_TX_DESC_DATARATE(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x10, 0, 7)
-
-/*TXDESC_WORD5*/
-
-#define SET_TX_DESC_POLLUTED(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 31, 1, __value)
-#define GET_TX_DESC_POLLUTED(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x14, 31, 1)
-
-#define SET_TX_DESC_TXPWR_OFSET(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 28, 3, __value)
-#define GET_TX_DESC_TXPWR_OFSET(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x14, 28, 3)
-#define SET_TX_DESC_TX_ANT(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 24, 4, __value)
-#define GET_TX_DESC_TX_ANT(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x14, 24, 4)
-#define SET_TX_DESC_PORT_ID(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 21, 3, __value)
-#define GET_TX_DESC_PORT_ID(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x14, 21, 3)
-
-#define SET_TX_DESC_MULTIPLE_PORT(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 18, 3, __value)
-#define GET_TX_DESC_MULTIPLE_PORT(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x14, 18, 3)
-
-#define SET_TX_DESC_SIGNALING_TAPKT_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 17, 1, __value)
-#define GET_TX_DESC_SIGNALING_TAPKT_EN(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x14, 17, 1)
-
-#define SET_TX_DESC_RTS_SC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 13, 4, __value)
-#define GET_TX_DESC_RTS_SC(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x14, 13, 4)
-#define SET_TX_DESC_RTS_SHORT(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 12, 1, __value)
-#define GET_TX_DESC_RTS_SHORT(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x14, 12, 1)
-
-#define SET_TX_DESC_VCS_STBC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 10, 2, __value)
-#define GET_TX_DESC_VCS_STBC(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x14, 10, 2)
-
-#define SET_TX_DESC_DATA_STBC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 8, 2, __value)
-#define GET_TX_DESC_DATA_STBC(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x14, 8, 2)
-
-#define SET_TX_DESC_DATA_LDPC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 7, 1, __value)
-#define GET_TX_DESC_DATA_LDPC(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x14, 7, 1)
-
-#define SET_TX_DESC_DATA_BW(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 5, 2, __value)
-#define GET_TX_DESC_DATA_BW(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x14, 5, 2)
-#define SET_TX_DESC_DATA_SHORT(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 4, 1, __value)
-#define GET_TX_DESC_DATA_SHORT(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x14, 4, 1)
-#define SET_TX_DESC_DATA_SC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x14, 0, 4, __value)
-#define GET_TX_DESC_DATA_SC(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x14, 0, 4)
-
-/*TXDESC_WORD6*/
-
-#define SET_TX_DESC_ANTSEL_D(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 30, 2, __value)
-#define GET_TX_DESC_ANTSEL_D(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x18, 30, 2)
-#define SET_TX_DESC_ANT_MAPD(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 28, 2, __value)
-#define GET_TX_DESC_ANT_MAPD(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x18, 28, 2)
-#define SET_TX_DESC_ANT_MAPC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 26, 2, __value)
-#define GET_TX_DESC_ANT_MAPC(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x18, 26, 2)
-#define SET_TX_DESC_ANT_MAPB(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 24, 2, __value)
-#define GET_TX_DESC_ANT_MAPB(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x18, 24, 2)
-#define SET_TX_DESC_ANT_MAPA(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 22, 2, __value)
-#define GET_TX_DESC_ANT_MAPA(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x18, 22, 2)
-#define SET_TX_DESC_ANTSEL_C(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 20, 2, __value)
-#define GET_TX_DESC_ANTSEL_C(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x18, 20, 2)
-#define SET_TX_DESC_ANTSEL_B(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 18, 2, __value)
-#define GET_TX_DESC_ANTSEL_B(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x18, 18, 2)
-
-#define SET_TX_DESC_ANTSEL_A(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 16, 2, __value)
-#define GET_TX_DESC_ANTSEL_A(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x18, 16, 2)
-#define SET_TX_DESC_MBSSID(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 12, 4, __value)
-#define GET_TX_DESC_MBSSID(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x18, 12, 4)
-#ifdef SET_TX_DESC_SW_DEFINE
-#undef SET_TX_DESC_SW_DEFINE
-#endif
-#define SET_TX_DESC_SW_DEFINE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x18, 0, 12, __value)
-#define GET_TX_DESC_SW_DEFINE(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x18, 0, 12)
-
-/*TXDESC_WORD7*/
-
-#define SET_TX_DESC_DMA_TXAGG_NUM(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x1C, 24, 8, __value)
-#define GET_TX_DESC_DMA_TXAGG_NUM(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x1C, 24, 8)
-
-#define SET_TX_DESC_FINAL_DATA_RATE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x1C, 24, 8, __value)
-#define GET_TX_DESC_FINAL_DATA_RATE(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x1C, 24, 8)
-#define SET_TX_DESC_NTX_MAP(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x1C, 20, 4, __value)
-#define GET_TX_DESC_NTX_MAP(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x1C, 20, 4)
-
-#define SET_TX_DESC_TX_BUFF_SIZE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x1C, 0, 16, __value)
-#define GET_TX_DESC_TX_BUFF_SIZE(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x1C, 0, 16)
-#define SET_TX_DESC_TXDESC_CHECKSUM(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x1C, 0, 16, __value)
-#define GET_TX_DESC_TXDESC_CHECKSUM(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x1C, 0, 16)
-#define SET_TX_DESC_TIMESTAMP(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x1C, 0, 16, __value)
-#define GET_TX_DESC_TIMESTAMP(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x1C, 0, 16)
-
-/*TXDESC_WORD8*/
-
-#define SET_TX_DESC_TXWIFI_CP(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 31, 1, __value)
-#define GET_TX_DESC_TXWIFI_CP(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 31, 1)
-#define SET_TX_DESC_MAC_CP(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 30, 1, __value)
-#define GET_TX_DESC_MAC_CP(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x20, 30, 1)
-#define SET_TX_DESC_STW_PKTRE_DIS(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 29, 1, __value)
-#define GET_TX_DESC_STW_PKTRE_DIS(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 29, 1)
-#define SET_TX_DESC_STW_RB_DIS(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 28, 1, __value)
-#define GET_TX_DESC_STW_RB_DIS(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 28, 1)
-#define SET_TX_DESC_STW_RATE_DIS(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 27, 1, __value)
-#define GET_TX_DESC_STW_RATE_DIS(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 27, 1)
-#define SET_TX_DESC_STW_ANT_DIS(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 26, 1, __value)
-#define GET_TX_DESC_STW_ANT_DIS(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 26, 1)
-#define SET_TX_DESC_STW_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 25, 1, __value)
-#define GET_TX_DESC_STW_EN(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x20, 25, 1)
-#define SET_TX_DESC_SMH_EN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 24, 1, __value)
-#define GET_TX_DESC_SMH_EN(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x20, 24, 1)
-
-#define SET_TX_DESC_TAILPAGE_L(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 24, 8, __value)
-#define GET_TX_DESC_TAILPAGE_L(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 24, 8)
-
-#define SET_TX_DESC_SDIO_DMASEQ(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 16, 8, __value)
-#define GET_TX_DESC_SDIO_DMASEQ(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 16, 8)
-
-#define SET_TX_DESC_NEXTHEADPAGE_L(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 16, 8, __value)
-#define GET_TX_DESC_NEXTHEADPAGE_L(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 16, 8)
-#define SET_TX_DESC_EN_HWSEQ(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 15, 1, __value)
-#define GET_TX_DESC_EN_HWSEQ(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 15, 1)
-
-#define SET_TX_DESC_EN_HWEXSEQ(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 14, 1, __value)
-#define GET_TX_DESC_EN_HWEXSEQ(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 14, 1)
-
-#define SET_TX_DESC_DATA_RC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 8, 6, __value)
-#define GET_TX_DESC_DATA_RC(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x20, 8, 6)
-#define SET_TX_DESC_BAR_RTY_TH(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 6, 2, __value)
-#define GET_TX_DESC_BAR_RTY_TH(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x20, 6, 2)
-#define SET_TX_DESC_RTS_RC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x20, 0, 6, __value)
-#define GET_TX_DESC_RTS_RC(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x20, 0, 6)
-
-/*TXDESC_WORD9*/
-
-#define SET_TX_DESC_TAILPAGE_H(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x24, 28, 4, __value)
-#define GET_TX_DESC_TAILPAGE_H(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x24, 28, 4)
-#define SET_TX_DESC_NEXTHEADPAGE_H(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x24, 24, 4, __value)
-#define GET_TX_DESC_NEXTHEADPAGE_H(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x24, 24, 4)
-
-#define SET_TX_DESC_SW_SEQ(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x24, 12, 12, __value)
-#define GET_TX_DESC_SW_SEQ(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x24, 12, 12)
-#define SET_TX_DESC_TXBF_PATH(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x24, 11, 1, __value)
-#define GET_TX_DESC_TXBF_PATH(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x24, 11, 1)
-#define SET_TX_DESC_PADDING_LEN(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x24, 0, 11, __value)
-#define GET_TX_DESC_PADDING_LEN(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x24, 0, 11)
-#define SET_TX_DESC_GROUP_BIT_IE_OFFSET(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x24, 0, 8, __value)
-#define GET_TX_DESC_GROUP_BIT_IE_OFFSET(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x24, 0, 8)
-
-/*WORD10*/
-
-#define SET_TX_DESC_MU_DATARATE(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x28, 8, 8, __value)
-#define GET_TX_DESC_MU_DATARATE(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x28, 8, 8)
-#define SET_TX_DESC_MU_RC(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x28, 4, 4, __value)
-#define GET_TX_DESC_MU_RC(__tx_desc) LE_BITS_TO_4BYTE(__tx_desc + 0x28, 4, 4)
-#define SET_TX_DESC_SND_PKT_SEL(__tx_desc, __value) \
- SET_BITS_TO_LE_4BYTE(__tx_desc + 0x28, 0, 2, __value)
-#define GET_TX_DESC_SND_PKT_SEL(__tx_desc) \
- LE_BITS_TO_4BYTE(__tx_desc + 0x28, 0, 2)
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_type.h b/drivers/staging/rtlwifi/halmac/halmac_type.h
deleted file mode 100644
index 51d758b6433b..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_type.h
+++ /dev/null
@@ -1,1923 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _HALMAC_TYPE_H_
-#define _HALMAC_TYPE_H_
-
-#include "halmac_2_platform.h"
-#include "halmac_fw_info.h"
-#include "halmac_intf_phy_cmd.h"
-
-#define HALMAC_SCAN_CH_NUM_MAX 28
-#define HALMAC_BCN_IE_BMP_SIZE 24 /* ID0~ID191, 192/8=24 */
-#define HALMAC_PHY_PARAMETER_SIZE 12
-#define HALMAC_PHY_PARAMETER_MAX_NUM 128
-#define HALMAC_MAX_SSID_LEN 32
-#define HALMAC_SUPPORT_NLO_NUM 16
-#define HALMAC_SUPPORT_PROBE_REQ_NUM 8
-#define HALMC_DDMA_POLLING_COUNT 1000
-#define API_ARRAY_SIZE 32
-
-/* platform api */
-#define PLATFORM_SDIO_CMD52_READ \
- halmac_adapter->halmac_platform_api->SDIO_CMD52_READ
-#define PLATFORM_SDIO_CMD53_READ_8 \
- halmac_adapter->halmac_platform_api->SDIO_CMD53_READ_8
-#define PLATFORM_SDIO_CMD53_READ_16 \
- halmac_adapter->halmac_platform_api->SDIO_CMD53_READ_16
-#define PLATFORM_SDIO_CMD53_READ_32 \
- halmac_adapter->halmac_platform_api->SDIO_CMD53_READ_32
-#define PLATFORM_SDIO_CMD53_READ_N \
- halmac_adapter->halmac_platform_api->SDIO_CMD53_READ_N
-#define PLATFORM_SDIO_CMD52_WRITE \
- halmac_adapter->halmac_platform_api->SDIO_CMD52_WRITE
-#define PLATFORM_SDIO_CMD53_WRITE_8 \
- halmac_adapter->halmac_platform_api->SDIO_CMD53_WRITE_8
-#define PLATFORM_SDIO_CMD53_WRITE_16 \
- halmac_adapter->halmac_platform_api->SDIO_CMD53_WRITE_16
-#define PLATFORM_SDIO_CMD53_WRITE_32 \
- halmac_adapter->halmac_platform_api->SDIO_CMD53_WRITE_32
-
-#define PLATFORM_REG_READ_8 halmac_adapter->halmac_platform_api->REG_READ_8
-#define PLATFORM_REG_READ_16 halmac_adapter->halmac_platform_api->REG_READ_16
-#define PLATFORM_REG_READ_32 halmac_adapter->halmac_platform_api->REG_READ_32
-#define PLATFORM_REG_WRITE_8 halmac_adapter->halmac_platform_api->REG_WRITE_8
-#define PLATFORM_REG_WRITE_16 halmac_adapter->halmac_platform_api->REG_WRITE_16
-#define PLATFORM_REG_WRITE_32 halmac_adapter->halmac_platform_api->REG_WRITE_32
-
-#define PLATFORM_SEND_RSVD_PAGE \
- halmac_adapter->halmac_platform_api->SEND_RSVD_PAGE
-#define PLATFORM_SEND_H2C_PKT halmac_adapter->halmac_platform_api->SEND_H2C_PKT
-
-#define PLATFORM_EVENT_INDICATION \
- halmac_adapter->halmac_platform_api->EVENT_INDICATION
-
-#define HALMAC_RT_TRACE(drv_adapter, comp, level, fmt, ...) \
- RT_TRACE(drv_adapter, COMP_HALMAC, level, fmt, ##__VA_ARGS__)
-
-#define HALMAC_REG_READ_8 halmac_api->halmac_reg_read_8
-#define HALMAC_REG_READ_16 halmac_api->halmac_reg_read_16
-#define HALMAC_REG_READ_32 halmac_api->halmac_reg_read_32
-#define HALMAC_REG_WRITE_8 halmac_api->halmac_reg_write_8
-#define HALMAC_REG_WRITE_16 halmac_api->halmac_reg_write_16
-#define HALMAC_REG_WRITE_32 halmac_api->halmac_reg_write_32
-#define HALMAC_REG_SDIO_CMD53_READ_N halmac_api->halmac_reg_sdio_cmd53_read_n
-
-/* Swap Little-endian <-> Big-endia*/
-
-/*1->Little endian 0->Big endian*/
-#if HALMAC_SYSTEM_ENDIAN
-#else
-#endif
-
-#define HALMAC_ALIGN(x, a) HALMAC_ALIGN_MASK(x, (a) - 1)
-#define HALMAC_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
-
-/* HALMAC API return status*/
-enum halmac_ret_status {
- HALMAC_RET_SUCCESS = 0x00,
- HALMAC_RET_SUCCESS_ENQUEUE = 0x01,
- HALMAC_RET_PLATFORM_API_NULL = 0x02,
- HALMAC_RET_EFUSE_SIZE_INCORRECT = 0x03,
- HALMAC_RET_MALLOC_FAIL = 0x04,
- HALMAC_RET_ADAPTER_INVALID = 0x05,
- HALMAC_RET_ITF_INCORRECT = 0x06,
- HALMAC_RET_DLFW_FAIL = 0x07,
- HALMAC_RET_PORT_NOT_SUPPORT = 0x08,
- HALMAC_RET_TRXMODE_NOT_SUPPORT = 0x09,
- HALMAC_RET_INIT_LLT_FAIL = 0x0A,
- HALMAC_RET_POWER_STATE_INVALID = 0x0B,
- HALMAC_RET_H2C_ACK_NOT_RECEIVED = 0x0C,
- HALMAC_RET_DL_RSVD_PAGE_FAIL = 0x0D,
- HALMAC_RET_EFUSE_R_FAIL = 0x0E,
- HALMAC_RET_EFUSE_W_FAIL = 0x0F,
- HALMAC_RET_H2C_SW_RES_FAIL = 0x10,
- HALMAC_RET_SEND_H2C_FAIL = 0x11,
- HALMAC_RET_PARA_NOT_SUPPORT = 0x12,
- HALMAC_RET_PLATFORM_API_INCORRECT = 0x13,
- HALMAC_RET_ENDIAN_ERR = 0x14,
- HALMAC_RET_FW_SIZE_ERR = 0x15,
- HALMAC_RET_TRX_MODE_NOT_SUPPORT = 0x16,
- HALMAC_RET_FAIL = 0x17,
- HALMAC_RET_CHANGE_PS_FAIL = 0x18,
- HALMAC_RET_CFG_PARA_FAIL = 0x19,
- HALMAC_RET_UPDATE_PROBE_FAIL = 0x1A,
- HALMAC_RET_SCAN_FAIL = 0x1B,
- HALMAC_RET_STOP_SCAN_FAIL = 0x1C,
- HALMAC_RET_BCN_PARSER_CMD_FAIL = 0x1D,
- HALMAC_RET_POWER_ON_FAIL = 0x1E,
- HALMAC_RET_POWER_OFF_FAIL = 0x1F,
- HALMAC_RET_RX_AGG_MODE_FAIL = 0x20,
- HALMAC_RET_DATA_BUF_NULL = 0x21,
- HALMAC_RET_DATA_SIZE_INCORRECT = 0x22,
- HALMAC_RET_QSEL_INCORRECT = 0x23,
- HALMAC_RET_DMA_MAP_INCORRECT = 0x24,
- HALMAC_RET_SEND_ORIGINAL_H2C_FAIL = 0x25,
- HALMAC_RET_DDMA_FAIL = 0x26,
- HALMAC_RET_FW_CHECKSUM_FAIL = 0x27,
- HALMAC_RET_PWRSEQ_POLLING_FAIL = 0x28,
- HALMAC_RET_PWRSEQ_CMD_INCORRECT = 0x29,
- HALMAC_RET_WRITE_DATA_FAIL = 0x2A,
- HALMAC_RET_DUMP_FIFOSIZE_INCORRECT = 0x2B,
- HALMAC_RET_NULL_POINTER = 0x2C,
- HALMAC_RET_PROBE_NOT_FOUND = 0x2D,
- HALMAC_RET_FW_NO_MEMORY = 0x2E,
- HALMAC_RET_H2C_STATUS_ERR = 0x2F,
- HALMAC_RET_GET_H2C_SPACE_ERR = 0x30,
- HALMAC_RET_H2C_SPACE_FULL = 0x31,
- HALMAC_RET_DATAPACK_NO_FOUND = 0x32,
- HALMAC_RET_CANNOT_FIND_H2C_RESOURCE = 0x33,
- HALMAC_RET_TX_DMA_ERR = 0x34,
- HALMAC_RET_RX_DMA_ERR = 0x35,
- HALMAC_RET_CHIP_NOT_SUPPORT = 0x36,
- HALMAC_RET_FREE_SPACE_NOT_ENOUGH = 0x37,
- HALMAC_RET_CH_SW_SEQ_WRONG = 0x38,
- HALMAC_RET_CH_SW_NO_BUF = 0x39,
- HALMAC_RET_SW_CASE_NOT_SUPPORT = 0x3A,
- HALMAC_RET_CONVERT_SDIO_OFFSET_FAIL = 0x3B,
- HALMAC_RET_INVALID_SOUNDING_SETTING = 0x3C,
- HALMAC_RET_GEN_INFO_NOT_SENT = 0x3D,
- HALMAC_RET_STATE_INCORRECT = 0x3E,
- HALMAC_RET_H2C_BUSY = 0x3F,
- HALMAC_RET_INVALID_FEATURE_ID = 0x40,
- HALMAC_RET_BUFFER_TOO_SMALL = 0x41,
- HALMAC_RET_ZERO_LEN_RSVD_PACKET = 0x42,
- HALMAC_RET_BUSY_STATE = 0x43,
- HALMAC_RET_ERROR_STATE = 0x44,
- HALMAC_RET_API_INVALID = 0x45,
- HALMAC_RET_POLLING_BCN_VALID_FAIL = 0x46,
- HALMAC_RET_SDIO_LEAVE_SUSPEND_FAIL = 0x47,
- HALMAC_RET_EEPROM_PARSING_FAIL = 0x48,
- HALMAC_RET_EFUSE_NOT_ENOUGH = 0x49,
- HALMAC_RET_WRONG_ARGUMENT = 0x4A,
- HALMAC_RET_NOT_SUPPORT = 0x4B,
- HALMAC_RET_C2H_NOT_HANDLED = 0x4C,
- HALMAC_RET_PARA_SENDING = 0x4D,
- HALMAC_RET_CFG_DLFW_SIZE_FAIL = 0x4E,
- HALMAC_RET_CFG_TXFIFO_PAGE_FAIL = 0x4F,
- HALMAC_RET_SWITCH_CASE_ERROR = 0x50,
- HALMAC_RET_EFUSE_BANK_INCORRECT = 0x51,
- HALMAC_RET_SWITCH_EFUSE_BANK_FAIL = 0x52,
- HALMAC_RET_USB_MODE_UNCHANGE = 0x53,
- HALMAC_RET_NO_DLFW = 0x54,
- HALMAC_RET_USB2_3_SWITCH_UNSUPPORT = 0x55,
- HALMAC_RET_BIP_NO_SUPPORT = 0x56,
- HALMAC_RET_ENTRY_INDEX_ERROR = 0x57,
- HALMAC_RET_ENTRY_KEY_ID_ERROR = 0x58,
- HALMAC_RET_DRV_DL_ERR = 0x59,
- HALMAC_RET_OQT_NOT_ENOUGH = 0x5A,
- HALMAC_RET_PWR_UNCHANGE = 0x5B,
- HALMAC_RET_FW_NO_SUPPORT = 0x60,
- HALMAC_RET_TXFIFO_NO_EMPTY = 0x61,
-};
-
-enum halmac_mac_clock_hw_def {
- HALMAC_MAC_CLOCK_HW_DEF_80M = 0,
- HALMAC_MAC_CLOCK_HW_DEF_40M = 1,
- HALMAC_MAC_CLOCK_HW_DEF_20M = 2,
-};
-
-/* Rx aggregation parameters */
-enum halmac_normal_rxagg_th_to {
- HALMAC_NORMAL_RXAGG_THRESHOLD = 0xFF,
- HALMAC_NORMAL_RXAGG_TIMEOUT = 0x01,
-};
-
-enum halmac_loopback_rxagg_th_to {
- HALMAC_LOOPBACK_RXAGG_THRESHOLD = 0xFF,
- HALMAC_LOOPBACK_RXAGG_TIMEOUT = 0x01,
-};
-
-/* Chip ID*/
-enum halmac_chip_id {
- HALMAC_CHIP_ID_8822B = 0,
- HALMAC_CHIP_ID_8821C = 1,
- HALMAC_CHIP_ID_8814B = 2,
- HALMAC_CHIP_ID_8197F = 3,
- HALMAC_CHIP_ID_UNDEFINE = 0x7F,
-};
-
-enum halmac_chip_id_hw_def {
- HALMAC_CHIP_ID_HW_DEF_8723A = 0x01,
- HALMAC_CHIP_ID_HW_DEF_8188E = 0x02,
- HALMAC_CHIP_ID_HW_DEF_8881A = 0x03,
- HALMAC_CHIP_ID_HW_DEF_8812A = 0x04,
- HALMAC_CHIP_ID_HW_DEF_8821A = 0x05,
- HALMAC_CHIP_ID_HW_DEF_8723B = 0x06,
- HALMAC_CHIP_ID_HW_DEF_8192E = 0x07,
- HALMAC_CHIP_ID_HW_DEF_8814A = 0x08,
- HALMAC_CHIP_ID_HW_DEF_8821C = 0x09,
- HALMAC_CHIP_ID_HW_DEF_8822B = 0x0A,
- HALMAC_CHIP_ID_HW_DEF_8703B = 0x0B,
- HALMAC_CHIP_ID_HW_DEF_8188F = 0x0C,
- HALMAC_CHIP_ID_HW_DEF_8192F = 0x0D,
- HALMAC_CHIP_ID_HW_DEF_8197F = 0x0E,
- HALMAC_CHIP_ID_HW_DEF_8723D = 0x0F,
- HALMAC_CHIP_ID_HW_DEF_8814B = 0x10,
- HALMAC_CHIP_ID_HW_DEF_UNDEFINE = 0x7F,
- HALMAC_CHIP_ID_HW_DEF_PS = 0xEA,
-};
-
-/* Chip Version*/
-enum halmac_chip_ver {
- HALMAC_CHIP_VER_A_CUT = 0x00,
- HALMAC_CHIP_VER_B_CUT = 0x01,
- HALMAC_CHIP_VER_C_CUT = 0x02,
- HALMAC_CHIP_VER_D_CUT = 0x03,
- HALMAC_CHIP_VER_E_CUT = 0x04,
- HALMAC_CHIP_VER_F_CUT = 0x05,
- HALMAC_CHIP_VER_TEST = 0xFF,
- HALMAC_CHIP_VER_UNDEFINE = 0x7FFF,
-};
-
-/* Network type select */
-enum halmac_network_type_select {
- HALMAC_NETWORK_NO_LINK = 0,
- HALMAC_NETWORK_ADHOC = 1,
- HALMAC_NETWORK_INFRASTRUCTURE = 2,
- HALMAC_NETWORK_AP = 3,
- HALMAC_NETWORK_UNDEFINE = 0x7F,
-};
-
-/* Transfer mode select */
-enum halmac_trnsfer_mode_select {
- HALMAC_TRNSFER_NORMAL = 0x0,
- HALMAC_TRNSFER_LOOPBACK_DIRECT = 0xB,
- HALMAC_TRNSFER_LOOPBACK_DELAY = 0x3,
- HALMAC_TRNSFER_UNDEFINE = 0x7F,
-};
-
-/* Queue select */
-enum halmac_dma_mapping {
- HALMAC_DMA_MAPPING_EXTRA = 0,
- HALMAC_DMA_MAPPING_LOW = 1,
- HALMAC_DMA_MAPPING_NORMAL = 2,
- HALMAC_DMA_MAPPING_HIGH = 3,
- HALMAC_DMA_MAPPING_UNDEFINE = 0x7F,
-};
-
-#define HALMAC_MAP2_HQ HALMAC_DMA_MAPPING_HIGH
-#define HALMAC_MAP2_NQ HALMAC_DMA_MAPPING_NORMAL
-#define HALMAC_MAP2_LQ HALMAC_DMA_MAPPING_LOW
-#define HALMAC_MAP2_EXQ HALMAC_DMA_MAPPING_EXTRA
-#define HALMAC_MAP2_UNDEF HALMAC_DMA_MAPPING_UNDEFINE
-
-/* TXDESC queue select TID */
-enum halmac_txdesc_queue_tid {
- HALMAC_TXDESC_QSEL_TID0 = 0,
- HALMAC_TXDESC_QSEL_TID1 = 1,
- HALMAC_TXDESC_QSEL_TID2 = 2,
- HALMAC_TXDESC_QSEL_TID3 = 3,
- HALMAC_TXDESC_QSEL_TID4 = 4,
- HALMAC_TXDESC_QSEL_TID5 = 5,
- HALMAC_TXDESC_QSEL_TID6 = 6,
- HALMAC_TXDESC_QSEL_TID7 = 7,
- HALMAC_TXDESC_QSEL_TID8 = 8,
- HALMAC_TXDESC_QSEL_TID9 = 9,
- HALMAC_TXDESC_QSEL_TIDA = 10,
- HALMAC_TXDESC_QSEL_TIDB = 11,
- HALMAC_TXDESC_QSEL_TIDC = 12,
- HALMAC_TXDESC_QSEL_TIDD = 13,
- HALMAC_TXDESC_QSEL_TIDE = 14,
- HALMAC_TXDESC_QSEL_TIDF = 15,
-
- HALMAC_TXDESC_QSEL_BEACON = 0x10,
- HALMAC_TXDESC_QSEL_HIGH = 0x11,
- HALMAC_TXDESC_QSEL_MGT = 0x12,
- HALMAC_TXDESC_QSEL_H2C_CMD = 0x13,
-
- HALMAC_TXDESC_QSEL_UNDEFINE = 0x7F,
-};
-
-enum halmac_ptcl_queue {
- HALMAC_PTCL_QUEUE_VO = 0x0,
- HALMAC_PTCL_QUEUE_VI = 0x1,
- HALMAC_PTCL_QUEUE_BE = 0x2,
- HALMAC_PTCL_QUEUE_BK = 0x3,
- HALMAC_PTCL_QUEUE_MG = 0x4,
- HALMAC_PTCL_QUEUE_HI = 0x5,
- HALMAC_PTCL_QUEUE_NUM = 0x6,
- HALMAC_PTCL_QUEUE_UNDEFINE = 0x7F,
-};
-
-enum halmac_queue_select {
- HALMAC_QUEUE_SELECT_VO = HALMAC_TXDESC_QSEL_TID6,
- HALMAC_QUEUE_SELECT_VI = HALMAC_TXDESC_QSEL_TID4,
- HALMAC_QUEUE_SELECT_BE = HALMAC_TXDESC_QSEL_TID0,
- HALMAC_QUEUE_SELECT_BK = HALMAC_TXDESC_QSEL_TID1,
- HALMAC_QUEUE_SELECT_VO_V2 = HALMAC_TXDESC_QSEL_TID7,
- HALMAC_QUEUE_SELECT_VI_V2 = HALMAC_TXDESC_QSEL_TID5,
- HALMAC_QUEUE_SELECT_BE_V2 = HALMAC_TXDESC_QSEL_TID3,
- HALMAC_QUEUE_SELECT_BK_V2 = HALMAC_TXDESC_QSEL_TID2,
- HALMAC_QUEUE_SELECT_BCN = HALMAC_TXDESC_QSEL_BEACON,
- HALMAC_QUEUE_SELECT_HIGH = HALMAC_TXDESC_QSEL_HIGH,
- HALMAC_QUEUE_SELECT_MGNT = HALMAC_TXDESC_QSEL_MGT,
- HALMAC_QUEUE_SELECT_CMD = HALMAC_TXDESC_QSEL_H2C_CMD,
- HALMAC_QUEUE_SELECT_UNDEFINE = 0x7F,
-};
-
-/* USB burst size */
-enum halmac_usb_burst_size {
- HALMAC_USB_BURST_SIZE_3_0 = 0x0,
- HALMAC_USB_BURST_SIZE_2_0_HSPEED = 0x1,
- HALMAC_USB_BURST_SIZE_2_0_FSPEED = 0x2,
- HALMAC_USB_BURST_SIZE_2_0_OTHERS = 0x3,
- HALMAC_USB_BURST_SIZE_UNDEFINE = 0x7F,
-};
-
-/* HAL API function parameters*/
-enum halmac_interface {
- HALMAC_INTERFACE_PCIE = 0x0,
- HALMAC_INTERFACE_USB = 0x1,
- HALMAC_INTERFACE_SDIO = 0x2,
- HALMAC_INTERFACE_AXI = 0x3,
- HALMAC_INTERFACE_UNDEFINE = 0x7F,
-};
-
-enum halmac_rx_agg_mode {
- HALMAC_RX_AGG_MODE_NONE = 0x0,
- HALMAC_RX_AGG_MODE_DMA = 0x1,
- HALMAC_RX_AGG_MODE_USB = 0x2,
- HALMAC_RX_AGG_MODE_UNDEFINE = 0x7F,
-};
-
-struct halmac_rxagg_th {
- u8 drv_define;
- u8 timeout;
- u8 size;
-};
-
-struct halmac_rxagg_cfg {
- enum halmac_rx_agg_mode mode;
- struct halmac_rxagg_th threshold;
-};
-
-enum halmac_mac_power {
- HALMAC_MAC_POWER_OFF = 0x0,
- HALMAC_MAC_POWER_ON = 0x1,
- HALMAC_MAC_POWER_UNDEFINE = 0x7F,
-};
-
-enum halmac_ps_state {
- HALMAC_PS_STATE_ACT = 0x0,
- HALMAC_PS_STATE_LPS = 0x1,
- HALMAC_PS_STATE_IPS = 0x2,
- HALMAC_PS_STATE_UNDEFINE = 0x7F,
-};
-
-enum halmac_trx_mode {
- HALMAC_TRX_MODE_NORMAL = 0x0,
- HALMAC_TRX_MODE_TRXSHARE = 0x1,
- HALMAC_TRX_MODE_WMM = 0x2,
- HALMAC_TRX_MODE_P2P = 0x3,
- HALMAC_TRX_MODE_LOOPBACK = 0x4,
- HALMAC_TRX_MODE_DELAY_LOOPBACK = 0x5,
- HALMAC_TRX_MODE_MAX = 0x6,
- HALMAC_TRX_MODE_WMM_LINUX = 0x7E,
- HALMAC_TRX_MODE_UNDEFINE = 0x7F,
-};
-
-enum halmac_wireless_mode {
- HALMAC_WIRELESS_MODE_B = 0x0,
- HALMAC_WIRELESS_MODE_G = 0x1,
- HALMAC_WIRELESS_MODE_N = 0x2,
- HALMAC_WIRELESS_MODE_AC = 0x3,
- HALMAC_WIRELESS_MODE_UNDEFINE = 0x7F,
-};
-
-enum halmac_bw {
- HALMAC_BW_20 = 0x00,
- HALMAC_BW_40 = 0x01,
- HALMAC_BW_80 = 0x02,
- HALMAC_BW_160 = 0x03,
- HALMAC_BW_5 = 0x04,
- HALMAC_BW_10 = 0x05,
- HALMAC_BW_MAX = 0x06,
- HALMAC_BW_UNDEFINE = 0x7F,
-};
-
-enum halmac_efuse_read_cfg {
- HALMAC_EFUSE_R_AUTO = 0x00,
- HALMAC_EFUSE_R_DRV = 0x01,
- HALMAC_EFUSE_R_FW = 0x02,
- HALMAC_EFUSE_R_UNDEFINE = 0x7F,
-};
-
-enum halmac_dlfw_mem {
- HALMAC_DLFW_MEM_EMEM = 0x00,
- HALMAC_DLFW_MEM_UNDEFINE = 0x7F,
-};
-
-struct halmac_tx_desc {
- u32 dword0;
- u32 dword1;
- u32 dword2;
- u32 dword3;
- u32 dword4;
- u32 dword5;
- u32 dword6;
- u32 dword7;
- u32 dword8;
- u32 dword9;
- u32 dword10;
- u32 dword11;
-};
-
-struct halmac_rx_desc {
- u32 dword0;
- u32 dword1;
- u32 dword2;
- u32 dword3;
- u32 dword4;
- u32 dword5;
-};
-
-struct halmac_fwlps_option {
- u8 mode;
- u8 clk_request;
- u8 rlbm;
- u8 smart_ps;
- u8 awake_interval;
- u8 all_queue_uapsd;
- u8 pwr_state;
- u8 low_pwr_rx_beacon;
- u8 ant_auto_switch;
- u8 ps_allow_bt_high_priority;
- u8 protect_bcn;
- u8 silence_period;
- u8 fast_bt_connect;
- u8 two_antenna_en;
- u8 adopt_user_setting;
- u8 drv_bcn_early_shift;
- bool enter_32K;
-};
-
-struct halmac_fwips_option {
- u8 adopt_user_setting;
-};
-
-struct halmac_wowlan_option {
- u8 adopt_user_setting;
-};
-
-struct halmac_bcn_ie_info {
- u8 func_en;
- u8 size_th;
- u8 timeout;
- u8 ie_bmp[HALMAC_BCN_IE_BMP_SIZE];
-};
-
-enum halmac_reg_type {
- HALMAC_REG_TYPE_MAC = 0x0,
- HALMAC_REG_TYPE_BB = 0x1,
- HALMAC_REG_TYPE_RF = 0x2,
- HALMAC_REG_TYPE_UNDEFINE = 0x7F,
-};
-
-enum halmac_parameter_cmd {
- /* HALMAC_PARAMETER_CMD_LLT = 0x1, */
- /* HALMAC_PARAMETER_CMD_R_EFUSE = 0x2, */
- /* HALMAC_PARAMETER_CMD_EFUSE_PATCH = 0x3, */
- HALMAC_PARAMETER_CMD_MAC_W8 = 0x4,
- HALMAC_PARAMETER_CMD_MAC_W16 = 0x5,
- HALMAC_PARAMETER_CMD_MAC_W32 = 0x6,
- HALMAC_PARAMETER_CMD_RF_W = 0x7,
- HALMAC_PARAMETER_CMD_BB_W8 = 0x8,
- HALMAC_PARAMETER_CMD_BB_W16 = 0x9,
- HALMAC_PARAMETER_CMD_BB_W32 = 0XA,
- HALMAC_PARAMETER_CMD_DELAY_US = 0X10,
- HALMAC_PARAMETER_CMD_DELAY_MS = 0X11,
- HALMAC_PARAMETER_CMD_END = 0XFF,
-};
-
-union halmac_parameter_content {
- struct _MAC_REG_W {
- u32 value;
- u32 msk;
- u16 offset;
- u8 msk_en;
- } MAC_REG_W;
- struct _BB_REG_W {
- u32 value;
- u32 msk;
- u16 offset;
- u8 msk_en;
- } BB_REG_W;
- struct _RF_REG_W {
- u32 value;
- u32 msk;
- u8 offset;
- u8 msk_en;
- u8 rf_path;
- } RF_REG_W;
- struct _DELAY_TIME {
- u32 rsvd1;
- u32 rsvd2;
- u16 delay_time;
- u8 rsvd3;
- } DELAY_TIME;
-};
-
-struct halmac_phy_parameter_info {
- enum halmac_parameter_cmd cmd_id;
- union halmac_parameter_content content;
-};
-
-struct halmac_h2c_info {
- u16 h2c_seq_num; /* H2C sequence number */
- u8 in_use; /* 0 : empty 1 : used */
- enum halmac_h2c_return_code status;
-};
-
-struct halmac_pg_efuse_info {
- u8 *efuse_map;
- u32 efuse_map_size;
- u8 *efuse_mask;
- u32 efuse_mask_size;
-};
-
-struct halmac_txagg_buff_info {
- u8 *tx_agg_buf;
- u8 *curr_pkt_buf;
- u32 avai_buf_size;
- u32 total_pkt_size;
- u8 agg_num;
-};
-
-struct halmac_config_para_info {
- u32 para_buf_size; /* Parameter buffer size */
- u8 *cfg_para_buf; /* Buffer for config parameter */
- u8 *para_buf_w; /* Write pointer of the parameter buffer */
- u32 para_num; /* Parameter numbers in parameter buffer */
- u32 avai_para_buf_size; /* Free size of parameter buffer */
- u32 offset_accumulation;
- u32 value_accumulation;
- enum halmac_data_type data_type; /*DataType which is passed to FW*/
- u8 datapack_segment; /*DataPack Segment, from segment0...*/
- bool full_fifo_mode; /* Used full tx fifo to save cfg parameter */
-};
-
-struct halmac_hw_config_info {
- u32 efuse_size; /* Record efuse size */
- u32 eeprom_size; /* Record eeprom size */
- u32 bt_efuse_size; /* Record BT efuse size */
- u32 tx_fifo_size; /* Record tx fifo size */
- u32 rx_fifo_size; /* Record rx fifo size */
- u8 txdesc_size; /* Record tx desc size */
- u8 rxdesc_size; /* Record rx desc size */
- u32 page_size; /* Record page size */
- u16 tx_align_size;
- u8 page_size_2_power;
- u8 cam_entry_num; /* Record CAM entry number */
-};
-
-struct halmac_sdio_free_space {
- u16 high_queue_number; /* Free space of HIQ */
- u16 normal_queue_number; /* Free space of MIDQ */
- u16 low_queue_number; /* Free space of LOWQ */
- u16 public_queue_number; /* Free space of PUBQ */
- u16 extra_queue_number; /* Free space of EXBQ */
- u8 ac_oqt_number;
- u8 non_ac_oqt_number;
- u8 ac_empty;
-};
-
-enum hal_fifo_sel {
- HAL_FIFO_SEL_TX,
- HAL_FIFO_SEL_RX,
- HAL_FIFO_SEL_RSVD_PAGE,
- HAL_FIFO_SEL_REPORT,
- HAL_FIFO_SEL_LLT,
-};
-
-enum halmac_drv_info {
- HALMAC_DRV_INFO_NONE, /* No information is appended in rx_pkt */
- HALMAC_DRV_INFO_PHY_STATUS, /* PHY status is appended after rx_desc */
- HALMAC_DRV_INFO_PHY_SNIFFER, /* PHY status and sniffer info appended */
- HALMAC_DRV_INFO_PHY_PLCP, /* PHY status and plcp header are appended */
- HALMAC_DRV_INFO_UNDEFINE,
-};
-
-struct halmac_bt_coex_cmd {
- u8 element_id;
- u8 op_code;
- u8 op_code_ver;
- u8 req_num;
- u8 data0;
- u8 data1;
- u8 data2;
- u8 data3;
- u8 data4;
-};
-
-enum halmac_pri_ch_idx {
- HALMAC_CH_IDX_UNDEFINE = 0,
- HALMAC_CH_IDX_1 = 1,
- HALMAC_CH_IDX_2 = 2,
- HALMAC_CH_IDX_3 = 3,
- HALMAC_CH_IDX_4 = 4,
- HALMAC_CH_IDX_MAX = 5,
-};
-
-struct halmac_ch_info {
- enum halmac_cs_action_id action_id;
- enum halmac_bw bw;
- enum halmac_pri_ch_idx pri_ch_idx;
- u8 channel;
- u8 timeout;
- u8 extra_info;
-};
-
-struct halmac_ch_extra_info {
- u8 extra_info;
- enum halmac_cs_extra_action_id extra_action_id;
- u8 extra_info_size;
- u8 *extra_info_data;
-};
-
-enum halmac_cs_periodic_option {
- HALMAC_CS_PERIODIC_NONE,
- HALMAC_CS_PERIODIC_NORMAL,
- HALMAC_CS_PERIODIC_2_PHASE,
- HALMAC_CS_PERIODIC_SEAMLESS,
-};
-
-struct halmac_ch_switch_option {
- enum halmac_bw dest_bw;
- enum halmac_cs_periodic_option periodic_option;
- enum halmac_pri_ch_idx dest_pri_ch_idx;
- /* u32 tsf_high; */
- u32 tsf_low;
- bool switch_en;
- u8 dest_ch_en;
- u8 absolute_time_en;
- u8 dest_ch;
- u8 normal_period;
- u8 normal_cycle;
- u8 phase_2_period;
-};
-
-struct halmac_fw_version {
- u16 version;
- u8 sub_version;
- u8 sub_index;
- u16 h2c_version;
-};
-
-enum halmac_rf_type {
- HALMAC_RF_1T2R = 0,
- HALMAC_RF_2T4R = 1,
- HALMAC_RF_2T2R = 2,
- HALMAC_RF_2T3R = 3,
- HALMAC_RF_1T1R = 4,
- HALMAC_RF_2T2R_GREEN = 5,
- HALMAC_RF_3T3R = 6,
- HALMAC_RF_3T4R = 7,
- HALMAC_RF_4T4R = 8,
- HALMAC_RF_MAX_TYPE = 0xF,
-};
-
-struct halmac_general_info {
- u8 rfe_type;
- enum halmac_rf_type rf_type;
-};
-
-struct halmac_pwr_tracking_para {
- u8 enable;
- u8 tx_pwr_index;
- u8 pwr_tracking_offset_value;
- u8 tssi_value;
-};
-
-struct halmac_pwr_tracking_option {
- u8 type;
- u8 bbswing_index;
- struct halmac_pwr_tracking_para
- pwr_tracking_para[4]; /* pathA, pathB, pathC, pathD */
-};
-
-struct halmac_nlo_cfg {
- u8 num_of_ssid;
- u8 num_of_hidden_ap;
- u8 rsvd[2];
- u32 pattern_check;
- u32 rsvd1;
- u32 rsvd2;
- u8 ssid_len[HALMAC_SUPPORT_NLO_NUM];
- u8 chiper_type[HALMAC_SUPPORT_NLO_NUM];
- u8 rsvd3[HALMAC_SUPPORT_NLO_NUM];
- u8 loc_probe_req[HALMAC_SUPPORT_PROBE_REQ_NUM];
- u8 rsvd4[56];
- u8 ssid[HALMAC_SUPPORT_NLO_NUM][HALMAC_MAX_SSID_LEN];
-};
-
-enum halmac_data_rate {
- HALMAC_CCK1,
- HALMAC_CCK2,
- HALMAC_CCK5_5,
- HALMAC_CCK11,
- HALMAC_OFDM6,
- HALMAC_OFDM9,
- HALMAC_OFDM12,
- HALMAC_OFDM18,
- HALMAC_OFDM24,
- HALMAC_OFDM36,
- HALMAC_OFDM48,
- HALMAC_OFDM54,
- HALMAC_MCS0,
- HALMAC_MCS1,
- HALMAC_MCS2,
- HALMAC_MCS3,
- HALMAC_MCS4,
- HALMAC_MCS5,
- HALMAC_MCS6,
- HALMAC_MCS7,
- HALMAC_MCS8,
- HALMAC_MCS9,
- HALMAC_MCS10,
- HALMAC_MCS11,
- HALMAC_MCS12,
- HALMAC_MCS13,
- HALMAC_MCS14,
- HALMAC_MCS15,
- HALMAC_MCS16,
- HALMAC_MCS17,
- HALMAC_MCS18,
- HALMAC_MCS19,
- HALMAC_MCS20,
- HALMAC_MCS21,
- HALMAC_MCS22,
- HALMAC_MCS23,
- HALMAC_MCS24,
- HALMAC_MCS25,
- HALMAC_MCS26,
- HALMAC_MCS27,
- HALMAC_MCS28,
- HALMAC_MCS29,
- HALMAC_MCS30,
- HALMAC_MCS31,
- HALMAC_VHT_NSS1_MCS0,
- HALMAC_VHT_NSS1_MCS1,
- HALMAC_VHT_NSS1_MCS2,
- HALMAC_VHT_NSS1_MCS3,
- HALMAC_VHT_NSS1_MCS4,
- HALMAC_VHT_NSS1_MCS5,
- HALMAC_VHT_NSS1_MCS6,
- HALMAC_VHT_NSS1_MCS7,
- HALMAC_VHT_NSS1_MCS8,
- HALMAC_VHT_NSS1_MCS9,
- HALMAC_VHT_NSS2_MCS0,
- HALMAC_VHT_NSS2_MCS1,
- HALMAC_VHT_NSS2_MCS2,
- HALMAC_VHT_NSS2_MCS3,
- HALMAC_VHT_NSS2_MCS4,
- HALMAC_VHT_NSS2_MCS5,
- HALMAC_VHT_NSS2_MCS6,
- HALMAC_VHT_NSS2_MCS7,
- HALMAC_VHT_NSS2_MCS8,
- HALMAC_VHT_NSS2_MCS9,
- HALMAC_VHT_NSS3_MCS0,
- HALMAC_VHT_NSS3_MCS1,
- HALMAC_VHT_NSS3_MCS2,
- HALMAC_VHT_NSS3_MCS3,
- HALMAC_VHT_NSS3_MCS4,
- HALMAC_VHT_NSS3_MCS5,
- HALMAC_VHT_NSS3_MCS6,
- HALMAC_VHT_NSS3_MCS7,
- HALMAC_VHT_NSS3_MCS8,
- HALMAC_VHT_NSS3_MCS9,
- HALMAC_VHT_NSS4_MCS0,
- HALMAC_VHT_NSS4_MCS1,
- HALMAC_VHT_NSS4_MCS2,
- HALMAC_VHT_NSS4_MCS3,
- HALMAC_VHT_NSS4_MCS4,
- HALMAC_VHT_NSS4_MCS5,
- HALMAC_VHT_NSS4_MCS6,
- HALMAC_VHT_NSS4_MCS7,
- HALMAC_VHT_NSS4_MCS8,
- HALMAC_VHT_NSS4_MCS9
-};
-
-enum halmac_rf_path {
- HALMAC_RF_PATH_A,
- HALMAC_RF_PATH_B,
- HALMAC_RF_PATH_C,
- HALMAC_RF_PATH_D
-};
-
-enum halmac_snd_pkt_sel {
- HALMAC_UNI_NDPA,
- HALMAC_BMC_NDPA,
- HALMAC_NON_FINAL_BFRPRPOLL,
- HALMAC_FINAL_BFRPTPOLL,
-};
-
-enum hal_security_type {
- HAL_SECURITY_TYPE_NONE = 0,
- HAL_SECURITY_TYPE_WEP40 = 1,
- HAL_SECURITY_TYPE_WEP104 = 2,
- HAL_SECURITY_TYPE_TKIP = 3,
- HAL_SECURITY_TYPE_AES128 = 4,
- HAL_SECURITY_TYPE_WAPI = 5,
- HAL_SECURITY_TYPE_AES256 = 6,
- HAL_SECURITY_TYPE_GCMP128 = 7,
- HAL_SECURITY_TYPE_GCMP256 = 8,
- HAL_SECURITY_TYPE_GCMSMS4 = 9,
- HAL_SECURITY_TYPE_BIP = 10,
- HAL_SECURITY_TYPE_UNDEFINE = 0x7F,
-};
-
-enum hal_intf_phy {
- HAL_INTF_PHY_USB2 = 0,
- HAL_INTF_PHY_USB3 = 1,
- HAL_INTF_PHY_PCIE_GEN1 = 2,
- HAL_INTF_PHY_PCIE_GEN2 = 3,
- HAL_INTF_PHY_UNDEFINE = 0x7F,
-};
-
-enum halmac_dbg_msg_info {
- HALMAC_DBG_ERR,
- HALMAC_DBG_WARN,
- HALMAC_DBG_TRACE,
-};
-
-enum halmac_dbg_msg_type {
- HALMAC_MSG_INIT,
- HALMAC_MSG_EFUSE,
- HALMAC_MSG_FW,
- HALMAC_MSG_H2C,
- HALMAC_MSG_PWR,
- HALMAC_MSG_SND,
- HALMAC_MSG_COMMON,
- HALMAC_MSG_DBI,
- HALMAC_MSG_MDIO,
- HALMAC_MSG_USB
-};
-
-enum halmac_cmd_process_status {
- HALMAC_CMD_PROCESS_IDLE = 0x01, /* Init status */
- HALMAC_CMD_PROCESS_SENDING = 0x02, /* Wait ack */
- HALMAC_CMD_PROCESS_RCVD = 0x03, /* Rcvd ack */
- HALMAC_CMD_PROCESS_DONE = 0x04, /* Event done */
- HALMAC_CMD_PROCESS_ERROR = 0x05, /* Return code error */
- HALMAC_CMD_PROCESS_UNDEFINE = 0x7F,
-};
-
-enum halmac_feature_id {
- HALMAC_FEATURE_CFG_PARA, /* Support */
- HALMAC_FEATURE_DUMP_PHYSICAL_EFUSE, /* Support */
- HALMAC_FEATURE_DUMP_LOGICAL_EFUSE, /* Support */
- HALMAC_FEATURE_UPDATE_PACKET, /* Support */
- HALMAC_FEATURE_UPDATE_DATAPACK,
- HALMAC_FEATURE_RUN_DATAPACK,
- HALMAC_FEATURE_CHANNEL_SWITCH, /* Support */
- HALMAC_FEATURE_IQK, /* Support */
- HALMAC_FEATURE_POWER_TRACKING, /* Support */
- HALMAC_FEATURE_PSD, /* Support */
- HALMAC_FEATURE_ALL, /* Support, only for reset */
-};
-
-enum halmac_drv_rsvd_pg_num {
- HALMAC_RSVD_PG_NUM16, /* 2K */
- HALMAC_RSVD_PG_NUM24, /* 3K */
- HALMAC_RSVD_PG_NUM32, /* 4K */
-};
-
-enum halmac_pcie_cfg {
- HALMAC_PCIE_GEN1,
- HALMAC_PCIE_GEN2,
- HALMAC_PCIE_CFG_UNDEFINE,
-};
-
-enum halmac_portid {
- HALMAC_PORTID0 = 0,
- HALMAC_PORTID1 = 1,
- HALMAC_PORTID2 = 2,
- HALMAC_PORTID3 = 3,
- HALMAC_PORTID4 = 4,
- HALMAC_PORTIDMAX
-};
-
-struct halmac_p2pps {
- /*DW0*/
- u8 offload_en : 1;
- u8 role : 1;
- u8 ctwindow_en : 1;
- u8 noa_en : 1;
- u8 noa_sel : 1;
- u8 all_sta_sleep : 1;
- u8 discovery : 1;
- u8 rsvd2 : 1;
- u8 p2p_port_id;
- u8 p2p_group;
- u8 p2p_macid;
-
- /*DW1*/
- u8 ctwindow_length;
- u8 rsvd3;
- u8 rsvd4;
- u8 rsvd5;
-
- /*DW2*/
- u32 noa_duration_para;
-
- /*DW3*/
- u32 noa_interval_para;
-
- /*DW4*/
- u32 noa_start_time_para;
-
- /*DW5*/
- u32 noa_count_para;
-};
-
-/* Platform API setting */
-struct halmac_platform_api {
- /* R/W register */
- u8 (*SDIO_CMD52_READ)(void *driver_adapter, u32 offset);
- u8 (*SDIO_CMD53_READ_8)(void *driver_adapter, u32 offset);
- u16 (*SDIO_CMD53_READ_16)(void *driver_adapter, u32 offset);
- u32 (*SDIO_CMD53_READ_32)(void *driver_adapter, u32 offset);
- u8 (*SDIO_CMD53_READ_N)(void *driver_adapter, u32 offset, u32 size,
- u8 *data);
- void (*SDIO_CMD52_WRITE)(void *driver_adapter, u32 offset, u8 value);
- void (*SDIO_CMD53_WRITE_8)(void *driver_adapter, u32 offset, u8 value);
- void (*SDIO_CMD53_WRITE_16)(void *driver_adapter, u32 offset,
- u16 value);
- void (*SDIO_CMD53_WRITE_32)(void *driver_adapter, u32 offset,
- u32 value);
- u8 (*REG_READ_8)(void *driver_adapter, u32 offset);
- u16 (*REG_READ_16)(void *driver_adapter, u32 offset);
- u32 (*REG_READ_32)(void *driver_adapter, u32 offset);
- void (*REG_WRITE_8)(void *driver_adapter, u32 offset, u8 value);
- void (*REG_WRITE_16)(void *driver_adapter, u32 offset, u16 value);
- void (*REG_WRITE_32)(void *driver_adapter, u32 offset, u32 value);
-
- /* send buf to reserved page, the tx_desc is not included in buf,
- * driver need to fill tx_desc with qsel = bcn
- */
- bool (*SEND_RSVD_PAGE)(void *driver_adapter, u8 *buf, u32 size);
- /* send buf to h2c queue, the tx_desc is not included in buf,
- * driver need to fill tx_desc with qsel = h2c
- */
- bool (*SEND_H2C_PKT)(void *driver_adapter, u8 *buf, u32 size);
-
- bool (*EVENT_INDICATION)(void *driver_adapter,
- enum halmac_feature_id feature_id,
- enum halmac_cmd_process_status process_status,
- u8 *buf, u32 size);
-};
-
-/*1->Little endian 0->Big endian*/
-#if HALMAC_SYSTEM_ENDIAN
-
-#else
-
-#endif
-
-/* User can not use members in address_l_h, use address[6] is mandatory */
-union halmac_wlan_addr {
- u8 address[6]; /* WLAN address (MACID, BSSID, Brodcast ID).
- * address[0] is lowest, address[5] is highest
- */
- struct {
- union {
- u32 address_low;
- __le32 le_address_low;
- u8 address_low_b[4];
- };
- union {
- u16 address_high;
- __le16 le_address_high;
- u8 address_high_b[2];
- };
- } address_l_h;
-};
-
-enum halmac_snd_role {
- HAL_BFER = 0,
- HAL_BFEE = 1,
-};
-
-enum halmac_csi_seg_len {
- HAL_CSI_SEG_4K = 0,
- HAL_CSI_SEG_8K = 1,
- HAL_CSI_SEG_11K = 2,
-};
-
-struct halmac_cfg_mumimo_para {
- enum halmac_snd_role role;
- bool sounding_sts[6];
- u16 grouping_bitmap;
- bool mu_tx_en;
- u32 given_gid_tab[2];
- u32 given_user_pos[4];
-};
-
-struct halmac_su_bfer_init_para {
- u8 userid;
- u16 paid;
- u16 csi_para;
- union halmac_wlan_addr bfer_address;
-};
-
-struct halmac_mu_bfee_init_para {
- u8 userid;
- u16 paid;
- u32 user_position_l;
- u32 user_position_h;
-};
-
-struct halmac_mu_bfer_init_para {
- u16 paid;
- u16 csi_para;
- u16 my_aid;
- enum halmac_csi_seg_len csi_length_sel;
- union halmac_wlan_addr bfer_address;
-};
-
-struct halmac_snd_info {
- u16 paid;
- u8 userid;
- enum halmac_data_rate ndpa_rate;
- u16 csi_para;
- u16 my_aid;
- enum halmac_data_rate csi_rate;
- enum halmac_csi_seg_len csi_length_sel;
- enum halmac_snd_role role;
- union halmac_wlan_addr bfer_address;
- enum halmac_bw bw;
- u8 txbf_en;
- struct halmac_su_bfer_init_para *su_bfer_init;
- struct halmac_mu_bfer_init_para *mu_bfer_init;
- struct halmac_mu_bfee_init_para *mu_bfee_init;
-};
-
-struct halmac_cs_info {
- u8 *ch_info_buf;
- u8 *ch_info_buf_w;
- u8 extra_info_en;
- u32 buf_size; /* buffer size */
- u32 avai_buf_size; /* buffer size */
- u32 total_size;
- u32 accu_timeout;
- u32 ch_num;
-};
-
-struct halmac_restore_info {
- u32 mac_register;
- u32 value;
- u8 length;
-};
-
-struct halmac_event_trigger {
- u32 physical_efuse_map : 1;
- u32 logical_efuse_map : 1;
- u32 rsvd1 : 28;
-};
-
-struct halmac_h2c_header_info {
- u16 sub_cmd_id;
- u16 content_size;
- bool ack;
-};
-
-enum halmac_dlfw_state {
- HALMAC_DLFW_NONE = 0,
- HALMAC_DLFW_DONE = 1,
- HALMAC_GEN_INFO_SENT = 2,
- HALMAC_DLFW_UNDEFINED = 0x7F,
-};
-
-enum halmac_efuse_cmd_construct_state {
- HALMAC_EFUSE_CMD_CONSTRUCT_IDLE = 0,
- HALMAC_EFUSE_CMD_CONSTRUCT_BUSY = 1,
- HALMAC_EFUSE_CMD_CONSTRUCT_H2C_SENT = 2,
- HALMAC_EFUSE_CMD_CONSTRUCT_STATE_NUM = 3,
- HALMAC_EFUSE_CMD_CONSTRUCT_UNDEFINED = 0x7F,
-};
-
-enum halmac_cfg_para_cmd_construct_state {
- HALMAC_CFG_PARA_CMD_CONSTRUCT_IDLE = 0,
- HALMAC_CFG_PARA_CMD_CONSTRUCT_CONSTRUCTING = 1,
- HALMAC_CFG_PARA_CMD_CONSTRUCT_H2C_SENT = 2,
- HALMAC_CFG_PARA_CMD_CONSTRUCT_NUM = 3,
- HALMAC_CFG_PARA_CMD_CONSTRUCT_UNDEFINED = 0x7F,
-};
-
-enum halmac_scan_cmd_construct_state {
- HALMAC_SCAN_CMD_CONSTRUCT_IDLE = 0,
- HALMAC_SCAN_CMD_CONSTRUCT_BUFFER_CLEARED = 1,
- HALMAC_SCAN_CMD_CONSTRUCT_CONSTRUCTING = 2,
- HALMAC_SCAN_CMD_CONSTRUCT_H2C_SENT = 3,
- HALMAC_SCAN_CMD_CONSTRUCT_STATE_NUM = 4,
- HALMAC_SCAN_CMD_CONSTRUCT_UNDEFINED = 0x7F,
-};
-
-enum halmac_api_state {
- HALMAC_API_STATE_INIT = 0,
- HALMAC_API_STATE_HALT = 1,
- HALMAC_API_STATE_UNDEFINED = 0x7F,
-};
-
-struct halmac_efuse_state_set {
- enum halmac_efuse_cmd_construct_state efuse_cmd_construct_state;
- enum halmac_cmd_process_status process_status;
- u8 fw_return_code;
- u16 seq_num;
-};
-
-struct halmac_cfg_para_state_set {
- enum halmac_cfg_para_cmd_construct_state cfg_para_cmd_construct_state;
- enum halmac_cmd_process_status process_status;
- u8 fw_return_code;
- u16 seq_num;
-};
-
-struct halmac_scan_state_set {
- enum halmac_scan_cmd_construct_state scan_cmd_construct_state;
- enum halmac_cmd_process_status process_status;
- u8 fw_return_code;
- u16 seq_num;
-};
-
-struct halmac_update_packet_state_set {
- enum halmac_cmd_process_status process_status;
- u8 fw_return_code;
- u16 seq_num;
-};
-
-struct halmac_iqk_state_set {
- enum halmac_cmd_process_status process_status;
- u8 fw_return_code;
- u16 seq_num;
-};
-
-struct halmac_power_tracking_state_set {
- enum halmac_cmd_process_status process_status;
- u8 fw_return_code;
- u16 seq_num;
-};
-
-struct halmac_psd_state_set {
- enum halmac_cmd_process_status process_status;
- u16 data_size;
- u16 segment_size;
- u8 *data;
- u8 fw_return_code;
- u16 seq_num;
-};
-
-struct halmac_state {
- struct halmac_efuse_state_set
- efuse_state_set; /* State machine + cmd process status */
- struct halmac_cfg_para_state_set
- cfg_para_state_set; /* State machine + cmd process status */
- struct halmac_scan_state_set
- scan_state_set; /* State machine + cmd process status */
- struct halmac_update_packet_state_set
- update_packet_set; /* cmd process status */
- struct halmac_iqk_state_set iqk_set; /* cmd process status */
- struct halmac_power_tracking_state_set
- power_tracking_set; /* cmd process status */
- struct halmac_psd_state_set psd_set; /* cmd process status */
- enum halmac_api_state api_state; /* Halmac api state */
- enum halmac_mac_power mac_power; /* 0 : power off, 1 : power on*/
- enum halmac_ps_state ps_state; /* power saving state */
- enum halmac_dlfw_state dlfw_state; /* download FW state */
-};
-
-struct halmac_ver {
- u8 major_ver;
- u8 prototype_ver;
- u8 minor_ver;
-};
-
-enum halmac_api_id {
- /*stuff, need to be the 1st*/
- HALMAC_API_STUFF = 0x0,
- /*stuff, need to be the 1st*/
- HALMAC_API_MAC_POWER_SWITCH = 0x1,
- HALMAC_API_DOWNLOAD_FIRMWARE = 0x2,
- HALMAC_API_CFG_MAC_ADDR = 0x3,
- HALMAC_API_CFG_BSSID = 0x4,
- HALMAC_API_CFG_MULTICAST_ADDR = 0x5,
- HALMAC_API_PRE_INIT_SYSTEM_CFG = 0x6,
- HALMAC_API_INIT_SYSTEM_CFG = 0x7,
- HALMAC_API_INIT_TRX_CFG = 0x8,
- HALMAC_API_CFG_RX_AGGREGATION = 0x9,
- HALMAC_API_INIT_PROTOCOL_CFG = 0xA,
- HALMAC_API_INIT_EDCA_CFG = 0xB,
- HALMAC_API_CFG_OPERATION_MODE = 0xC,
- HALMAC_API_CFG_CH_BW = 0xD,
- HALMAC_API_CFG_BW = 0xE,
- HALMAC_API_INIT_WMAC_CFG = 0xF,
- HALMAC_API_INIT_MAC_CFG = 0x10,
- HALMAC_API_INIT_SDIO_CFG = 0x11,
- HALMAC_API_INIT_USB_CFG = 0x12,
- HALMAC_API_INIT_PCIE_CFG = 0x13,
- HALMAC_API_INIT_INTERFACE_CFG = 0x14,
- HALMAC_API_DEINIT_SDIO_CFG = 0x15,
- HALMAC_API_DEINIT_USB_CFG = 0x16,
- HALMAC_API_DEINIT_PCIE_CFG = 0x17,
- HALMAC_API_DEINIT_INTERFACE_CFG = 0x18,
- HALMAC_API_GET_EFUSE_SIZE = 0x19,
- HALMAC_API_DUMP_EFUSE_MAP = 0x1A,
- HALMAC_API_WRITE_EFUSE = 0x1B,
- HALMAC_API_READ_EFUSE = 0x1C,
- HALMAC_API_GET_LOGICAL_EFUSE_SIZE = 0x1D,
- HALMAC_API_DUMP_LOGICAL_EFUSE_MAP = 0x1E,
- HALMAC_API_WRITE_LOGICAL_EFUSE = 0x1F,
- HALMAC_API_READ_LOGICAL_EFUSE = 0x20,
- HALMAC_API_PG_EFUSE_BY_MAP = 0x21,
- HALMAC_API_GET_C2H_INFO = 0x22,
- HALMAC_API_CFG_FWLPS_OPTION = 0x23,
- HALMAC_API_CFG_FWIPS_OPTION = 0x24,
- HALMAC_API_ENTER_WOWLAN = 0x25,
- HALMAC_API_LEAVE_WOWLAN = 0x26,
- HALMAC_API_ENTER_PS = 0x27,
- HALMAC_API_LEAVE_PS = 0x28,
- HALMAC_API_H2C_LB = 0x29,
- HALMAC_API_DEBUG = 0x2A,
- HALMAC_API_CFG_PARAMETER = 0x2B,
- HALMAC_API_UPDATE_PACKET = 0x2C,
- HALMAC_API_BCN_IE_FILTER = 0x2D,
- HALMAC_API_REG_READ_8 = 0x2E,
- HALMAC_API_REG_WRITE_8 = 0x2F,
- HALMAC_API_REG_READ_16 = 0x30,
- HALMAC_API_REG_WRITE_16 = 0x31,
- HALMAC_API_REG_READ_32 = 0x32,
- HALMAC_API_REG_WRITE_32 = 0x33,
- HALMAC_API_TX_ALLOWED_SDIO = 0x34,
- HALMAC_API_SET_BULKOUT_NUM = 0x35,
- HALMAC_API_GET_SDIO_TX_ADDR = 0x36,
- HALMAC_API_GET_USB_BULKOUT_ID = 0x37,
- HALMAC_API_TIMER_2S = 0x38,
- HALMAC_API_FILL_TXDESC_CHECKSUM = 0x39,
- HALMAC_API_SEND_ORIGINAL_H2C = 0x3A,
- HALMAC_API_UPDATE_DATAPACK = 0x3B,
- HALMAC_API_RUN_DATAPACK = 0x3C,
- HALMAC_API_CFG_DRV_INFO = 0x3D,
- HALMAC_API_SEND_BT_COEX = 0x3E,
- HALMAC_API_VERIFY_PLATFORM_API = 0x3F,
- HALMAC_API_GET_FIFO_SIZE = 0x40,
- HALMAC_API_DUMP_FIFO = 0x41,
- HALMAC_API_CFG_TXBF = 0x42,
- HALMAC_API_CFG_MUMIMO = 0x43,
- HALMAC_API_CFG_SOUNDING = 0x44,
- HALMAC_API_DEL_SOUNDING = 0x45,
- HALMAC_API_SU_BFER_ENTRY_INIT = 0x46,
- HALMAC_API_SU_BFEE_ENTRY_INIT = 0x47,
- HALMAC_API_MU_BFER_ENTRY_INIT = 0x48,
- HALMAC_API_MU_BFEE_ENTRY_INIT = 0x49,
- HALMAC_API_SU_BFER_ENTRY_DEL = 0x4A,
- HALMAC_API_SU_BFEE_ENTRY_DEL = 0x4B,
- HALMAC_API_MU_BFER_ENTRY_DEL = 0x4C,
- HALMAC_API_MU_BFEE_ENTRY_DEL = 0x4D,
-
- HALMAC_API_ADD_CH_INFO = 0x4E,
- HALMAC_API_ADD_EXTRA_CH_INFO = 0x4F,
- HALMAC_API_CTRL_CH_SWITCH = 0x50,
- HALMAC_API_CLEAR_CH_INFO = 0x51,
-
- HALMAC_API_SEND_GENERAL_INFO = 0x52,
- HALMAC_API_START_IQK = 0x53,
- HALMAC_API_CTRL_PWR_TRACKING = 0x54,
- HALMAC_API_PSD = 0x55,
- HALMAC_API_CFG_TX_AGG_ALIGN = 0x56,
-
- HALMAC_API_QUERY_STATE = 0x57,
- HALMAC_API_RESET_FEATURE = 0x58,
- HALMAC_API_CHECK_FW_STATUS = 0x59,
- HALMAC_API_DUMP_FW_DMEM = 0x5A,
- HALMAC_API_CFG_MAX_DL_SIZE = 0x5B,
-
- HALMAC_API_INIT_OBJ = 0x5C,
- HALMAC_API_DEINIT_OBJ = 0x5D,
- HALMAC_API_CFG_LA_MODE = 0x5E,
- HALMAC_API_GET_HW_VALUE = 0x5F,
- HALMAC_API_SET_HW_VALUE = 0x60,
- HALMAC_API_CFG_DRV_RSVD_PG_NUM = 0x61,
- HALMAC_API_SWITCH_EFUSE_BANK = 0x62,
- HALMAC_API_WRITE_EFUSE_BT = 0x63,
- HALMAC_API_DUMP_EFUSE_MAP_BT = 0x64,
- HALMAC_API_DL_DRV_RSVD_PG = 0x65,
- HALMAC_API_PCIE_SWITCH = 0x66,
- HALMAC_API_PHY_CFG = 0x67,
- HALMAC_API_CFG_RX_FIFO_EXPANDING_MODE = 0x68,
- HALMAC_API_CFG_CSI_RATE = 0x69,
- HALMAC_API_MAX
-};
-
-struct halmac_api_record {
- enum halmac_api_id api_array[API_ARRAY_SIZE];
- u8 array_wptr;
-};
-
-enum halmac_la_mode {
- HALMAC_LA_MODE_DISABLE = 0,
- HALMAC_LA_MODE_PARTIAL = 1,
- HALMAC_LA_MODE_FULL = 2,
- HALMAC_LA_MODE_UNDEFINE = 0x7F,
-};
-
-enum halmac_rx_fifo_expanding_mode {
- HALMAC_RX_FIFO_EXPANDING_MODE_DISABLE = 0,
- HALMAC_RX_FIFO_EXPANDING_MODE_1_BLOCK = 1,
- HALMAC_RX_FIFO_EXPANDING_MODE_2_BLOCK = 2,
- HALMAC_RX_FIFO_EXPANDING_MODE_3_BLOCK = 3,
- HALMAC_RX_FIFO_EXPANDING_MODE_UNDEFINE = 0x7F,
-};
-
-enum halmac_sdio_cmd53_4byte_mode {
- HALMAC_SDIO_CMD53_4BYTE_MODE_DISABLE = 0,
- HALMAC_SDIO_CMD53_4BYTE_MODE_RW = 1,
- HALMAC_SDIO_CMD53_4BYTE_MODE_R = 2,
- HALMAC_SDIO_CMD53_4BYTE_MODE_W = 3,
- HALMAC_SDIO_CMD53_4BYTE_MODE_UNDEFINE = 0x7F,
-};
-
-enum halmac_usb_mode {
- HALMAC_USB_MODE_U2 = 1,
- HALMAC_USB_MODE_U3 = 2,
-};
-
-enum halmac_hw_id {
- /* Get HW value */
- HALMAC_HW_RQPN_MAPPING = 0x00,
- HALMAC_HW_EFUSE_SIZE = 0x01,
- HALMAC_HW_EEPROM_SIZE = 0x02,
- HALMAC_HW_BT_BANK_EFUSE_SIZE = 0x03,
- HALMAC_HW_BT_BANK1_EFUSE_SIZE = 0x04,
- HALMAC_HW_BT_BANK2_EFUSE_SIZE = 0x05,
- HALMAC_HW_TXFIFO_SIZE = 0x06,
- HALMAC_HW_RSVD_PG_BNDY = 0x07,
- HALMAC_HW_CAM_ENTRY_NUM = 0x08,
- HALMAC_HW_IC_VERSION = 0x09,
- HALMAC_HW_PAGE_SIZE = 0x0A,
- HALMAC_HW_TX_AGG_ALIGN_SIZE = 0x0B,
- HALMAC_HW_RX_AGG_ALIGN_SIZE = 0x0C,
- HALMAC_HW_DRV_INFO_SIZE = 0x0D,
- HALMAC_HW_TXFF_ALLOCATION = 0x0E,
- HALMAC_HW_RSVD_EFUSE_SIZE = 0x0F,
- HALMAC_HW_FW_HDR_SIZE = 0x10,
- HALMAC_HW_TX_DESC_SIZE = 0x11,
- HALMAC_HW_RX_DESC_SIZE = 0x12,
- HALMAC_HW_WLAN_EFUSE_AVAILABLE_SIZE = 0x13,
- /* Set HW value */
- HALMAC_HW_USB_MODE = 0x60,
- HALMAC_HW_SEQ_EN = 0x61,
- HALMAC_HW_BANDWIDTH = 0x62,
- HALMAC_HW_CHANNEL = 0x63,
- HALMAC_HW_PRI_CHANNEL_IDX = 0x64,
- HALMAC_HW_EN_BB_RF = 0x65,
- HALMAC_HW_SDIO_TX_PAGE_THRESHOLD = 0x66,
- HALMAC_HW_AMPDU_CONFIG = 0x67,
-
- HALMAC_HW_ID_UNDEFINE = 0x7F,
-};
-
-enum halmac_efuse_bank {
- HALMAC_EFUSE_BANK_WIFI = 0,
- HALMAC_EFUSE_BANK_BT = 1,
- HALMAC_EFUSE_BANK_BT_1 = 2,
- HALMAC_EFUSE_BANK_BT_2 = 3,
- HALMAC_EFUSE_BANK_MAX,
- HALMAC_EFUSE_BANK_UNDEFINE = 0X7F,
-};
-
-struct halmac_txff_allocation {
- u16 tx_fifo_pg_num;
- u16 rsvd_pg_num;
- u16 rsvd_drv_pg_num;
- u16 ac_q_pg_num;
- u16 high_queue_pg_num;
- u16 low_queue_pg_num;
- u16 normal_queue_pg_num;
- u16 extra_queue_pg_num;
- u16 pub_queue_pg_num;
- u16 rsvd_pg_bndy;
- u16 rsvd_drv_pg_bndy;
- u16 rsvd_h2c_extra_info_pg_bndy;
- u16 rsvd_h2c_queue_pg_bndy;
- u16 rsvd_cpu_instr_pg_bndy;
- u16 rsvd_fw_txbuff_pg_bndy;
- enum halmac_la_mode la_mode;
- enum halmac_rx_fifo_expanding_mode rx_fifo_expanding_mode;
-};
-
-struct halmac_rqpn_map {
- enum halmac_dma_mapping dma_map_vo;
- enum halmac_dma_mapping dma_map_vi;
- enum halmac_dma_mapping dma_map_be;
- enum halmac_dma_mapping dma_map_bk;
- enum halmac_dma_mapping dma_map_mg;
- enum halmac_dma_mapping dma_map_hi;
-};
-
-struct halmac_security_setting {
- u8 tx_encryption;
- u8 rx_decryption;
- u8 bip_enable;
-};
-
-struct halmac_cam_entry_info {
- enum hal_security_type security_type;
- u32 key[4];
- u32 key_ext[4];
- u8 mac_address[6];
- u8 unicast;
- u8 key_id;
- u8 valid;
-};
-
-struct halmac_cam_entry_format {
- u16 key_id : 2;
- u16 type : 3;
- u16 mic : 1;
- u16 grp : 1;
- u16 spp_mode : 1;
- u16 rpt_md : 1;
- u16 ext_sectype : 1;
- u16 mgnt : 1;
- u16 rsvd1 : 4;
- u16 valid : 1;
- u8 mac_address[6];
- u32 key[4];
- u32 rsvd[2];
-};
-
-struct halmac_tx_page_threshold_info {
- u32 threshold;
- enum halmac_dma_mapping dma_queue_sel;
-};
-
-struct halmac_ampdu_config {
- u8 max_agg_num;
-};
-
-struct halmac_port_cfg {
- u8 port0_sync_tsf;
- u8 port1_sync_tsf;
-};
-
-struct halmac_rqpn_ {
- enum halmac_trx_mode mode;
- enum halmac_dma_mapping dma_map_vo;
- enum halmac_dma_mapping dma_map_vi;
- enum halmac_dma_mapping dma_map_be;
- enum halmac_dma_mapping dma_map_bk;
- enum halmac_dma_mapping dma_map_mg;
- enum halmac_dma_mapping dma_map_hi;
-};
-
-struct halmac_pg_num_ {
- enum halmac_trx_mode mode;
- u16 hq_num;
- u16 nq_num;
- u16 lq_num;
- u16 exq_num;
- u16 gap_num; /*used for loopback mode*/
-};
-
-struct halmac_intf_phy_para_ {
- u16 offset;
- u16 value;
- u16 ip_sel;
- u16 cut;
- u16 plaform;
-};
-
-struct halmac_iqk_para_ {
- u8 clear;
- u8 segment_iqk;
-};
-
-/* Hal mac adapter */
-struct halmac_adapter {
- /* Dma mapping of protocol queues */
- enum halmac_dma_mapping halmac_ptcl_queue[HALMAC_PTCL_QUEUE_NUM];
- /* low power state option */
- struct halmac_fwlps_option fwlps_option;
- /* mac address information, suppot 2 ports */
- union halmac_wlan_addr hal_mac_addr[HALMAC_PORTIDMAX];
- /* bss address information, suppot 2 ports */
- union halmac_wlan_addr hal_bss_addr[HALMAC_PORTIDMAX];
- /* Protect h2c_packet_seq packet*/
- spinlock_t h2c_seq_lock;
- /* Protect Efuse map memory of halmac_adapter */
- spinlock_t efuse_lock;
- struct halmac_config_para_info config_para_info;
- struct halmac_cs_info ch_sw_info;
- struct halmac_event_trigger event_trigger;
- /* HW related information */
- struct halmac_hw_config_info hw_config_info;
- struct halmac_sdio_free_space sdio_free_space;
- struct halmac_snd_info snd_info;
- /* Backup HalAdapter address */
- void *hal_adapter_backup;
- /* Driver or FW adapter address. Do not write this memory*/
- void *driver_adapter;
- u8 *hal_efuse_map;
- /* Record function pointer of halmac api */
- void *halmac_api;
- /* Record function pointer of platform api */
- struct halmac_platform_api *halmac_platform_api;
- /* Record efuse used memory */
- u32 efuse_end;
- u32 h2c_buf_free_space;
- u32 h2c_buff_size;
- u32 max_download_size;
- /* Chip ID, 8822B, 8821C... */
- enum halmac_chip_id chip_id;
- /* A cut, B cut... */
- enum halmac_chip_ver chip_version;
- struct halmac_fw_version fw_version;
- struct halmac_state halmac_state;
- /* Interface information, get from driver */
- enum halmac_interface halmac_interface;
- /* Noraml, WMM, P2P, LoopBack... */
- enum halmac_trx_mode trx_mode;
- struct halmac_txff_allocation txff_allocation;
- u8 h2c_packet_seq; /* current h2c packet sequence number */
- u16 ack_h2c_packet_seq; /*the acked h2c packet sequence number */
- bool hal_efuse_map_valid;
- u8 efuse_segment_size;
- u8 rpwm_record; /* record rpwm value */
- bool low_clk; /*LPS 32K or IPS 32K*/
- u8 halmac_bulkout_num; /* USB bulkout num */
- struct halmac_api_record api_record; /* API record */
- bool gen_info_valid;
- struct halmac_general_info general_info;
- u8 drv_info_size;
- enum halmac_sdio_cmd53_4byte_mode sdio_cmd53_4byte;
-};
-
-/* Function pointer of Hal mac API */
-struct halmac_api {
- enum halmac_ret_status (*halmac_mac_power_switch)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_mac_power halmac_power);
- enum halmac_ret_status (*halmac_download_firmware)(
- struct halmac_adapter *halmac_adapter, u8 *hamacl_fw,
- u32 halmac_fw_size);
- enum halmac_ret_status (*halmac_free_download_firmware)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_dlfw_mem dlfw_mem, u8 *hamacl_fw,
- u32 halmac_fw_size);
- enum halmac_ret_status (*halmac_get_fw_version)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_fw_version *fw_version);
- enum halmac_ret_status (*halmac_cfg_mac_addr)(
- struct halmac_adapter *halmac_adapter, u8 halmac_port,
- union halmac_wlan_addr *hal_address);
- enum halmac_ret_status (*halmac_cfg_bssid)(
- struct halmac_adapter *halmac_adapter, u8 halmac_port,
- union halmac_wlan_addr *hal_address);
- enum halmac_ret_status (*halmac_cfg_multicast_addr)(
- struct halmac_adapter *halmac_adapter,
- union halmac_wlan_addr *hal_address);
- enum halmac_ret_status (*halmac_pre_init_system_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_init_system_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_init_trx_cfg)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode mode);
- enum halmac_ret_status (*halmac_init_h2c)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_cfg_rx_aggregation)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_rxagg_cfg *phalmac_rxagg_cfg);
- enum halmac_ret_status (*halmac_init_protocol_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_init_edca_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_cfg_operation_mode)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_wireless_mode wireless_mode);
- enum halmac_ret_status (*halmac_cfg_ch_bw)(
- struct halmac_adapter *halmac_adapter, u8 channel,
- enum halmac_pri_ch_idx pri_ch_idx, enum halmac_bw bw);
- enum halmac_ret_status (*halmac_cfg_bw)(
- struct halmac_adapter *halmac_adapter, enum halmac_bw bw);
- enum halmac_ret_status (*halmac_init_wmac_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_init_mac_cfg)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_trx_mode mode);
- enum halmac_ret_status (*halmac_init_sdio_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_init_usb_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_init_pcie_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_init_interface_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_deinit_sdio_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_deinit_usb_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_deinit_pcie_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_deinit_interface_cfg)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_get_efuse_size)(
- struct halmac_adapter *halmac_adapter, u32 *halmac_size);
- enum halmac_ret_status (*halmac_get_efuse_available_size)(
- struct halmac_adapter *halmac_adapter, u32 *halmac_size);
- enum halmac_ret_status (*halmac_dump_efuse_map)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_read_cfg cfg);
- enum halmac_ret_status (*halmac_dump_efuse_map_bt)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_bank halmac_efues_bank, u32 bt_efuse_map_size,
- u8 *bt_efuse_map);
- enum halmac_ret_status (*halmac_write_efuse)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset,
- u8 halmac_value);
- enum halmac_ret_status (*halmac_read_efuse)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset,
- u8 *value);
- enum halmac_ret_status (*halmac_switch_efuse_bank)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_bank halmac_efues_bank);
- enum halmac_ret_status (*halmac_write_efuse_bt)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset,
- u8 halmac_value, enum halmac_efuse_bank halmac_efues_bank);
- enum halmac_ret_status (*halmac_get_logical_efuse_size)(
- struct halmac_adapter *halmac_adapter, u32 *halmac_size);
- enum halmac_ret_status (*halmac_dump_logical_efuse_map)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_efuse_read_cfg cfg);
- enum halmac_ret_status (*halmac_write_logical_efuse)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset,
- u8 halmac_value);
- enum halmac_ret_status (*halmac_read_logical_efuse)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset,
- u8 *value);
- enum halmac_ret_status (*halmac_pg_efuse_by_map)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_pg_efuse_info *pg_efuse_info,
- enum halmac_efuse_read_cfg cfg);
- enum halmac_ret_status (*halmac_get_c2h_info)(
- struct halmac_adapter *halmac_adapter, u8 *halmac_buf,
- u32 halmac_size);
- enum halmac_ret_status (*halmac_cfg_fwlps_option)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_fwlps_option *lps_option);
- enum halmac_ret_status (*halmac_cfg_fwips_option)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_fwips_option *ips_option);
- enum halmac_ret_status (*halmac_enter_wowlan)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_wowlan_option *wowlan_option);
- enum halmac_ret_status (*halmac_leave_wowlan)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_enter_ps)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_ps_state ps_state);
- enum halmac_ret_status (*halmac_leave_ps)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_h2c_lb)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_debug)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_cfg_parameter)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_phy_parameter_info *para_info, u8 full_fifo);
- enum halmac_ret_status (*halmac_update_packet)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_packet_id pkt_id, u8 *pkt, u32 pkt_size);
- enum halmac_ret_status (*halmac_bcn_ie_filter)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_bcn_ie_info *bcn_ie_info);
- u8 (*halmac_reg_read_8)(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
- enum halmac_ret_status (*halmac_reg_write_8)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset,
- u8 halmac_data);
- u16 (*halmac_reg_read_16)(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
- enum halmac_ret_status (*halmac_reg_write_16)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset,
- u16 halmac_data);
- u32 (*halmac_reg_read_32)(struct halmac_adapter *halmac_adapter,
- u32 halmac_offset);
- u32 (*halmac_reg_read_indirect_32)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset);
- u8 (*halmac_reg_sdio_cmd53_read_n)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset,
- u32 halmac_size, u8 *halmac_data);
- enum halmac_ret_status (*halmac_reg_write_32)(
- struct halmac_adapter *halmac_adapter, u32 halmac_offset,
- u32 halmac_data);
- enum halmac_ret_status (*halmac_tx_allowed_sdio)(
- struct halmac_adapter *halmac_adapter, u8 *halmac_buf,
- u32 halmac_size);
- enum halmac_ret_status (*halmac_set_bulkout_num)(
- struct halmac_adapter *halmac_adapter, u8 bulkout_num);
- enum halmac_ret_status (*halmac_get_sdio_tx_addr)(
- struct halmac_adapter *halmac_adapter, u8 *halmac_buf,
- u32 halmac_size, u32 *pcmd53_addr);
- enum halmac_ret_status (*halmac_get_usb_bulkout_id)(
- struct halmac_adapter *halmac_adapter, u8 *halmac_buf,
- u32 halmac_size, u8 *bulkout_id);
- enum halmac_ret_status (*halmac_timer_2s)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_fill_txdesc_checksum)(
- struct halmac_adapter *halmac_adapter, u8 *cur_desc);
- enum halmac_ret_status (*halmac_update_datapack)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_data_type halmac_data_type,
- struct halmac_phy_parameter_info *para_info);
- enum halmac_ret_status (*halmac_run_datapack)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_data_type halmac_data_type);
- enum halmac_ret_status (*halmac_cfg_drv_info)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_drv_info halmac_drv_info);
- enum halmac_ret_status (*halmac_send_bt_coex)(
- struct halmac_adapter *halmac_adapter, u8 *bt_buf, u32 bt_size,
- u8 ack);
- enum halmac_ret_status (*halmac_verify_platform_api)(
- struct halmac_adapter *halmac_adapte);
- u32 (*halmac_get_fifo_size)(struct halmac_adapter *halmac_adapter,
- enum hal_fifo_sel halmac_fifo_sel);
- enum halmac_ret_status (*halmac_dump_fifo)(
- struct halmac_adapter *halmac_adapter,
- enum hal_fifo_sel halmac_fifo_sel, u32 halmac_start_addr,
- u32 halmac_fifo_dump_size, u8 *fifo_map);
- enum halmac_ret_status (*halmac_cfg_txbf)(
- struct halmac_adapter *halmac_adapter, u8 userid,
- enum halmac_bw bw, u8 txbf_en);
- enum halmac_ret_status (*halmac_cfg_mumimo)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_cfg_mumimo_para *cfgmu);
- enum halmac_ret_status (*halmac_cfg_sounding)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_snd_role role, enum halmac_data_rate datarate);
- enum halmac_ret_status (*halmac_del_sounding)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_snd_role role);
- enum halmac_ret_status (*halmac_su_bfer_entry_init)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_su_bfer_init_para *su_bfer_init);
- enum halmac_ret_status (*halmac_su_bfee_entry_init)(
- struct halmac_adapter *halmac_adapter, u8 userid, u16 paid);
- enum halmac_ret_status (*halmac_mu_bfer_entry_init)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_mu_bfer_init_para *mu_bfer_init);
- enum halmac_ret_status (*halmac_mu_bfee_entry_init)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_mu_bfee_init_para *mu_bfee_init);
- enum halmac_ret_status (*halmac_su_bfer_entry_del)(
- struct halmac_adapter *halmac_adapter, u8 userid);
- enum halmac_ret_status (*halmac_su_bfee_entry_del)(
- struct halmac_adapter *halmac_adapter, u8 userid);
- enum halmac_ret_status (*halmac_mu_bfer_entry_del)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_mu_bfee_entry_del)(
- struct halmac_adapter *halmac_adapter, u8 userid);
- enum halmac_ret_status (*halmac_add_ch_info)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_ch_info *ch_info);
- enum halmac_ret_status (*halmac_add_extra_ch_info)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_ch_extra_info *ch_extra_info);
- enum halmac_ret_status (*halmac_ctrl_ch_switch)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_ch_switch_option *cs_option);
- enum halmac_ret_status (*halmac_p2pps)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_p2pps *p2p_ps);
- enum halmac_ret_status (*halmac_clear_ch_info)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_send_general_info)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_general_info *pg_general_info);
- enum halmac_ret_status (*halmac_start_iqk)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_iqk_para_ *iqk_para);
- enum halmac_ret_status (*halmac_ctrl_pwr_tracking)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_pwr_tracking_option *pwr_tracking_opt);
- enum halmac_ret_status (*halmac_psd)(
- struct halmac_adapter *halmac_adapter, u16 start_psd,
- u16 end_psd);
- enum halmac_ret_status (*halmac_cfg_tx_agg_align)(
- struct halmac_adapter *halmac_adapter, u8 enable,
- u16 align_size);
- enum halmac_ret_status (*halmac_query_status)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_feature_id feature_id,
- enum halmac_cmd_process_status *process_status, u8 *data,
- u32 *size);
- enum halmac_ret_status (*halmac_reset_feature)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_feature_id feature_id);
- enum halmac_ret_status (*halmac_check_fw_status)(
- struct halmac_adapter *halmac_adapter, bool *fw_status);
- enum halmac_ret_status (*halmac_dump_fw_dmem)(
- struct halmac_adapter *halmac_adapter, u8 *dmem, u32 *size);
- enum halmac_ret_status (*halmac_cfg_max_dl_size)(
- struct halmac_adapter *halmac_adapter, u32 size);
- enum halmac_ret_status (*halmac_cfg_la_mode)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_la_mode la_mode);
- enum halmac_ret_status (*halmac_cfg_rx_fifo_expanding_mode)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_rx_fifo_expanding_mode rx_fifo_expanding_mode);
- enum halmac_ret_status (*halmac_config_security)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_security_setting *sec_setting);
- u8 (*halmac_get_used_cam_entry_num)(
- struct halmac_adapter *halmac_adapter,
- enum hal_security_type sec_type);
- enum halmac_ret_status (*halmac_write_cam)(
- struct halmac_adapter *halmac_adapter, u32 entry_index,
- struct halmac_cam_entry_info *cam_entry_info);
- enum halmac_ret_status (*halmac_read_cam_entry)(
- struct halmac_adapter *halmac_adapter, u32 entry_index,
- struct halmac_cam_entry_format *content);
- enum halmac_ret_status (*halmac_clear_cam_entry)(
- struct halmac_adapter *halmac_adapter, u32 entry_index);
- enum halmac_ret_status (*halmac_get_hw_value)(
- struct halmac_adapter *halmac_adapter, enum halmac_hw_id hw_id,
- void *pvalue);
- enum halmac_ret_status (*halmac_set_hw_value)(
- struct halmac_adapter *halmac_adapter, enum halmac_hw_id hw_id,
- void *pvalue);
- enum halmac_ret_status (*halmac_cfg_drv_rsvd_pg_num)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_drv_rsvd_pg_num pg_num);
- enum halmac_ret_status (*halmac_get_chip_version)(
- struct halmac_adapter *halmac_adapter,
- struct halmac_ver *version);
- enum halmac_ret_status (*halmac_chk_txdesc)(
- struct halmac_adapter *halmac_adapter, u8 *halmac_buf,
- u32 halmac_size);
- enum halmac_ret_status (*halmac_dl_drv_rsvd_page)(
- struct halmac_adapter *halmac_adapter, u8 pg_offset,
- u8 *hal_buf, u32 size);
- enum halmac_ret_status (*halmac_pcie_switch)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_pcie_cfg pcie_cfg);
- enum halmac_ret_status (*halmac_phy_cfg)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_intf_phy_platform platform);
- enum halmac_ret_status (*halmac_cfg_csi_rate)(
- struct halmac_adapter *halmac_adapter, u8 rssi, u8 current_rate,
- u8 fixrate_en, u8 *new_rate);
- enum halmac_ret_status (*halmac_sdio_cmd53_4byte)(
- struct halmac_adapter *halmac_adapter,
- enum halmac_sdio_cmd53_4byte_mode cmd53_4byte_mode);
- enum halmac_ret_status (*halmac_interface_integration_tuning)(
- struct halmac_adapter *halmac_adapter);
- enum halmac_ret_status (*halmac_txfifo_is_empty)(
- struct halmac_adapter *halmac_adapter, u32 chk_num);
-};
-
-#define HALMAC_GET_API(phalmac_adapter) \
- ((struct halmac_api *)phalmac_adapter->halmac_api)
-
-static inline enum halmac_ret_status
-halmac_adapter_validate(struct halmac_adapter *halmac_adapter)
-{
- if ((!halmac_adapter) ||
- (halmac_adapter->hal_adapter_backup != halmac_adapter))
- return HALMAC_RET_ADAPTER_INVALID;
-
- return HALMAC_RET_SUCCESS;
-}
-
-static inline enum halmac_ret_status
-halmac_api_validate(struct halmac_adapter *halmac_adapter)
-{
- if (halmac_adapter->halmac_state.api_state != HALMAC_API_STATE_INIT)
- return HALMAC_RET_API_INVALID;
-
- return HALMAC_RET_SUCCESS;
-}
-
-static inline enum halmac_ret_status
-halmac_fw_validate(struct halmac_adapter *halmac_adapter)
-{
- if (halmac_adapter->halmac_state.dlfw_state != HALMAC_DLFW_DONE &&
- halmac_adapter->halmac_state.dlfw_state != HALMAC_GEN_INFO_SENT)
- return HALMAC_RET_NO_DLFW;
-
- return HALMAC_RET_SUCCESS;
-}
-
-#endif
diff --git a/drivers/staging/rtlwifi/halmac/halmac_usb_reg.h b/drivers/staging/rtlwifi/halmac/halmac_usb_reg.h
deleted file mode 100644
index 27910a4adb4e..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_usb_reg.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __HALMAC_USB_REG_H__
-#define __HALMAC_USB_REG_H__
-
-#endif /* __HALMAC_USB_REG_H__ */
diff --git a/drivers/staging/rtlwifi/halmac/rtl_halmac.c b/drivers/staging/rtlwifi/halmac/rtl_halmac.c
deleted file mode 100644
index 7bfc9620479a..000000000000
--- a/drivers/staging/rtlwifi/halmac/rtl_halmac.c
+++ /dev/null
@@ -1,1373 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "halmac_api.h"
-#include "rtl_halmac.h"
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-
-#define DEFAULT_INDICATOR_TIMELMT msecs_to_jiffies(1000) /* ms */
-#define FIRMWARE_MAX_SIZE HALMAC_FW_SIZE_MAX_88XX
-
-static struct rtl_halmac_ops rtl_halmac_operation = {
- .halmac_init_adapter = rtl_halmac_init_adapter,
- .halmac_deinit_adapter = rtl_halmac_deinit_adapter,
- .halmac_init_hal = rtl_halmac_init_hal,
- .halmac_deinit_hal = rtl_halmac_deinit_hal,
- .halmac_poweron = rtl_halmac_poweron,
- .halmac_poweroff = rtl_halmac_poweroff,
-
- .halmac_phy_power_switch = rtl_halmac_phy_power_switch,
- .halmac_set_mac_address = rtl_halmac_set_mac_address,
- .halmac_set_bssid = rtl_halmac_set_bssid,
-
- .halmac_get_physical_efuse_size = rtl_halmac_get_physical_efuse_size,
- .halmac_read_physical_efuse_map = rtl_halmac_read_physical_efuse_map,
- .halmac_get_logical_efuse_size = rtl_halmac_get_logical_efuse_size,
- .halmac_read_logical_efuse_map = rtl_halmac_read_logical_efuse_map,
-
- .halmac_set_bandwidth = rtl_halmac_set_bandwidth,
-
- .halmac_c2h_handle = rtl_halmac_c2h_handle,
-
- .halmac_chk_txdesc = rtl_halmac_chk_txdesc,
-};
-
-struct rtl_halmac_ops *rtl_halmac_get_ops_pointer(void)
-{
- return &rtl_halmac_operation;
-}
-EXPORT_SYMBOL(rtl_halmac_get_ops_pointer);
-
-/*
- * Driver API for HALMAC operations
- */
-
-static u8 _halmac_reg_read_8(void *p, u32 offset)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)p;
-
- return rtl_read_byte(rtlpriv, offset);
-}
-
-static u16 _halmac_reg_read_16(void *p, u32 offset)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)p;
-
- return rtl_read_word(rtlpriv, offset);
-}
-
-static u32 _halmac_reg_read_32(void *p, u32 offset)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)p;
-
- return rtl_read_dword(rtlpriv, offset);
-}
-
-static void _halmac_reg_write_8(void *p, u32 offset, u8 val)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)p;
-
- rtl_write_byte(rtlpriv, offset, val);
-}
-
-static void _halmac_reg_write_16(void *p, u32 offset, u16 val)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)p;
-
- rtl_write_word(rtlpriv, offset, val);
-}
-
-static void _halmac_reg_write_32(void *p, u32 offset, u32 val)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)p;
-
- rtl_write_dword(rtlpriv, offset, val);
-}
-
-static bool _halmac_write_data_rsvd_page(void *p, u8 *buf, u32 size)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)p;
-
- if (rtlpriv->cfg->ops->halmac_cb_write_data_rsvd_page &&
- rtlpriv->cfg->ops->halmac_cb_write_data_rsvd_page(rtlpriv, buf,
- size))
- return true;
-
- return false;
-}
-
-static bool _halmac_write_data_h2c(void *p, u8 *buf, u32 size)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)p;
-
- if (rtlpriv->cfg->ops->halmac_cb_write_data_h2c &&
- rtlpriv->cfg->ops->halmac_cb_write_data_h2c(rtlpriv, buf, size))
- return true;
-
- return false;
-}
-
-static const char *const RTL_HALMAC_FEATURE_NAME[] = {
- "HALMAC_FEATURE_CFG_PARA",
- "HALMAC_FEATURE_DUMP_PHYSICAL_EFUSE",
- "HALMAC_FEATURE_DUMP_LOGICAL_EFUSE",
- "HALMAC_FEATURE_UPDATE_PACKET",
- "HALMAC_FEATURE_UPDATE_DATAPACK",
- "HALMAC_FEATURE_RUN_DATAPACK",
- "HALMAC_FEATURE_CHANNEL_SWITCH",
- "HALMAC_FEATURE_IQK",
- "HALMAC_FEATURE_POWER_TRACKING",
- "HALMAC_FEATURE_PSD",
- "HALMAC_FEATURE_ALL"};
-
-static inline bool is_valid_id_status(struct rtl_priv *rtlpriv,
- enum halmac_feature_id id,
- enum halmac_cmd_process_status status)
-{
- switch (id) {
- case HALMAC_FEATURE_CFG_PARA:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- break;
- case HALMAC_FEATURE_DUMP_PHYSICAL_EFUSE:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- if (status != HALMAC_CMD_PROCESS_DONE) {
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD,
- "%s: <WARN> id(%d) unspecified status(%d)!\n",
- __func__, id, status);
- }
- break;
- case HALMAC_FEATURE_DUMP_LOGICAL_EFUSE:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- if (status != HALMAC_CMD_PROCESS_DONE) {
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD,
- "%s: <WARN> id(%d) unspecified status(%d)!\n",
- __func__, id, status);
- }
- break;
- case HALMAC_FEATURE_UPDATE_PACKET:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- break;
- case HALMAC_FEATURE_UPDATE_DATAPACK:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- break;
- case HALMAC_FEATURE_RUN_DATAPACK:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- break;
- case HALMAC_FEATURE_CHANNEL_SWITCH:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- break;
- case HALMAC_FEATURE_IQK:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- break;
- case HALMAC_FEATURE_POWER_TRACKING:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- break;
- case HALMAC_FEATURE_PSD:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- break;
- case HALMAC_FEATURE_ALL:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: %s\n", __func__,
- RTL_HALMAC_FEATURE_NAME[id]);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD,
- "%s: unknown feature id(%d)\n", __func__, id);
- return false;
- }
-
- return true;
-}
-
-static int init_halmac_event_with_waittime(struct rtl_priv *rtlpriv,
- enum halmac_feature_id id, u8 *buf,
- u32 size, u32 time)
-{
- struct completion *comp;
-
- if (!rtlpriv->halmac.indicator[id].comp) {
- comp = kzalloc(sizeof(*comp), GFP_KERNEL);
- if (!comp)
- return -ENOMEM;
- } else {
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD,
- "%s: <WARN> id(%d) sctx is not NULL!!\n", __func__,
- id);
- comp = rtlpriv->halmac.indicator[id].comp;
- rtlpriv->halmac.indicator[id].comp = NULL;
- }
-
- init_completion(comp);
- rtlpriv->halmac.indicator[id].wait_ms = time;
-
- rtlpriv->halmac.indicator[id].buffer = buf;
- rtlpriv->halmac.indicator[id].buf_size = size;
- rtlpriv->halmac.indicator[id].ret_size = 0;
- rtlpriv->halmac.indicator[id].status = 0;
- /* fill sctx at least to sure other variables are all ready! */
- rtlpriv->halmac.indicator[id].comp = comp;
-
- return 0;
-}
-
-static inline int init_halmac_event(struct rtl_priv *rtlpriv,
- enum halmac_feature_id id, u8 *buf,
- u32 size)
-{
- return init_halmac_event_with_waittime(rtlpriv, id, buf, size,
- DEFAULT_INDICATOR_TIMELMT);
-}
-
-static void free_halmac_event(struct rtl_priv *rtlpriv,
- enum halmac_feature_id id)
-{
- struct completion *comp;
-
- if (!rtlpriv->halmac.indicator[id].comp)
- return;
-
- comp = rtlpriv->halmac.indicator[id].comp;
- rtlpriv->halmac.indicator[id].comp = NULL;
- kfree(comp);
-}
-
-static int wait_halmac_event(struct rtl_priv *rtlpriv,
- enum halmac_feature_id id)
-{
- struct completion *comp;
- int ret;
-
- comp = rtlpriv->halmac.indicator[id].comp;
- if (!comp)
- return -1;
-
- ret = wait_for_completion_timeout(
- comp, rtlpriv->halmac.indicator[id].wait_ms);
- free_halmac_event(rtlpriv, id);
- if (ret > 0)
- return 0;
-
- return -1;
-}
-
-/*
- * Return:
- * Always return true, HALMAC don't care the return value.
- */
-static bool
-_halmac_event_indication(void *p, enum halmac_feature_id feature_id,
- enum halmac_cmd_process_status process_status, u8 *buf,
- u32 size)
-{
- struct rtl_priv *rtlpriv;
- struct rtl_halmac_indicator *tbl, *indicator;
- struct completion *comp;
- u32 cpsz;
- bool ret;
-
- rtlpriv = (struct rtl_priv *)p;
- tbl = rtlpriv->halmac.indicator;
-
- ret = is_valid_id_status(rtlpriv, feature_id, process_status);
- if (!ret)
- goto exit;
-
- indicator = &tbl[feature_id];
- indicator->status = process_status;
- indicator->ret_size = size;
- if (!indicator->comp) {
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD,
- "%s: No feature id(%d) waiting!!\n", __func__,
- feature_id);
- goto exit;
- }
- comp = indicator->comp;
-
- if (process_status == HALMAC_CMD_PROCESS_ERROR) {
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD,
- "%s: Something wrong id(%d)!!\n", __func__,
- feature_id);
- complete(comp); /* may provide error code */
- goto exit;
- }
-
- if (size > indicator->buf_size) {
- RT_TRACE(
- rtlpriv, COMP_HALMAC, DBG_LOUD,
- "%s: <WARN> id(%d) buffer is not enough(%d<%d), data will be truncated!\n",
- __func__, feature_id, indicator->buf_size, size);
- cpsz = indicator->buf_size;
- } else {
- cpsz = size;
- }
-
- if (cpsz && indicator->buffer)
- memcpy(indicator->buffer, buf, cpsz);
-
- complete(comp);
-
-exit:
- return true;
-}
-
-static struct halmac_platform_api rtl_halmac_platform_api = {
- /* R/W register */
- .REG_READ_8 = _halmac_reg_read_8,
- .REG_READ_16 = _halmac_reg_read_16,
- .REG_READ_32 = _halmac_reg_read_32,
- .REG_WRITE_8 = _halmac_reg_write_8,
- .REG_WRITE_16 = _halmac_reg_write_16,
- .REG_WRITE_32 = _halmac_reg_write_32,
-
- /* Write data */
- /* impletement in HAL-IC level */
- .SEND_RSVD_PAGE = _halmac_write_data_rsvd_page,
- .SEND_H2C_PKT = _halmac_write_data_h2c,
-
- .EVENT_INDICATION = _halmac_event_indication,
-};
-
-static int init_priv(struct rtl_halmac *halmac)
-{
- struct rtl_halmac_indicator *indicator;
- u32 count, size;
-
- halmac->send_general_info = 0;
-
- count = HALMAC_FEATURE_ALL + 1;
- size = sizeof(*indicator) * count;
- indicator = kzalloc(size, GFP_KERNEL);
- if (!indicator)
- return -ENOMEM;
- halmac->indicator = indicator;
-
- return 0;
-}
-
-static void deinit_priv(struct rtl_halmac *halmac)
-{
- struct rtl_halmac_indicator *indicator;
-
- indicator = halmac->indicator;
- halmac->indicator = NULL;
- kfree(indicator);
-}
-
-int rtl_halmac_init_adapter(struct rtl_priv *rtlpriv)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- enum halmac_interface intf;
- enum halmac_ret_status status;
- int err = 0;
- struct halmac_platform_api *pf_api = &rtl_halmac_platform_api;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- if (halmac) {
- err = 0;
- goto out;
- }
-
- err = init_priv(&rtlpriv->halmac);
- if (err)
- goto out;
-
- intf = HALMAC_INTERFACE_PCIE;
- status = halmac_init_adapter(rtlpriv, pf_api, intf, &halmac, &api);
- if (status != HALMAC_RET_SUCCESS) {
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD,
- "%s: halmac_init_adapter fail!(status=%d)\n", __func__,
- status);
- err = -1;
- goto out;
- }
-
- rtlpriv->halmac.internal = halmac;
-
-out:
- if (err)
- rtl_halmac_deinit_adapter(rtlpriv);
-
- return err;
-}
-
-int rtl_halmac_deinit_adapter(struct rtl_priv *rtlpriv)
-{
- struct halmac_adapter *halmac;
- enum halmac_ret_status status;
- int err = 0;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- if (!halmac) {
- err = 0;
- goto out;
- }
-
- deinit_priv(&rtlpriv->halmac);
-
- halmac_halt_api(halmac);
-
- status = halmac_deinit_adapter(halmac);
- rtlpriv->halmac.internal = NULL;
- if (status != HALMAC_RET_SUCCESS) {
- err = -1;
- goto out;
- }
-
-out:
- return err;
-}
-
-int rtl_halmac_poweron(struct rtl_priv *rtlpriv)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- int err = -1;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- if (!halmac)
- goto out;
-
- api = HALMAC_GET_API(halmac);
-
- status = api->halmac_pre_init_system_cfg(halmac);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- status = api->halmac_mac_power_switch(halmac, HALMAC_MAC_POWER_ON);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- status = api->halmac_init_system_cfg(halmac);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- err = 0;
-out:
- return err;
-}
-
-int rtl_halmac_poweroff(struct rtl_priv *rtlpriv)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- int err = -1;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- if (!halmac)
- goto out;
-
- api = HALMAC_GET_API(halmac);
-
- status = api->halmac_mac_power_switch(halmac, HALMAC_MAC_POWER_OFF);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- err = 0;
-out:
- return err;
-}
-
-/*
- * Note:
- * When this function return, the register REG_RCR may be changed.
- */
-int rtl_halmac_config_rx_info(struct rtl_priv *rtlpriv,
- enum halmac_drv_info info)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- int err = -1;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(halmac);
-
- status = api->halmac_cfg_drv_info(halmac, info);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- err = 0;
-out:
- return err;
-}
-
-static enum halmac_ret_status init_mac_flow(struct rtl_priv *rtlpriv)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- u8 wifi_test = 0;
- int err;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(halmac);
-
- if (wifi_test)
- status = api->halmac_init_mac_cfg(halmac, HALMAC_TRX_MODE_WMM);
- else
- status = api->halmac_init_mac_cfg(halmac,
- HALMAC_TRX_MODE_NORMAL);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- err = rtl_halmac_rx_agg_switch(rtlpriv, true);
- if (err)
- goto out;
-
- if (rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS7])
- status = api->halmac_cfg_operation_mode(
- halmac, HALMAC_WIRELESS_MODE_AC);
- else if (rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7])
- status = api->halmac_cfg_operation_mode(halmac,
- HALMAC_WIRELESS_MODE_N);
- else if (rtlpriv->cfg->maps[RTL_RC_OFDM_RATE6M])
- status = api->halmac_cfg_operation_mode(halmac,
- HALMAC_WIRELESS_MODE_G);
- else
- status = api->halmac_cfg_operation_mode(halmac,
- HALMAC_WIRELESS_MODE_B);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
-out:
- return status;
-}
-
-static inline enum halmac_rf_type _rf_type_drv2halmac(enum rf_type rf_drv)
-{
- enum halmac_rf_type rf_mac;
-
- switch (rf_drv) {
- case RF_1T2R:
- rf_mac = HALMAC_RF_1T2R;
- break;
- case RF_2T2R:
- rf_mac = HALMAC_RF_2T2R;
- break;
- case RF_1T1R:
- rf_mac = HALMAC_RF_1T1R;
- break;
- case RF_2T2R_GREEN:
- rf_mac = HALMAC_RF_2T2R_GREEN;
- break;
- default:
- rf_mac = (enum halmac_rf_type)rf_drv;
- break;
- }
-
- return rf_mac;
-}
-
-static int _send_general_info(struct rtl_priv *rtlpriv)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- struct halmac_general_info info;
- enum halmac_ret_status status;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- if (!halmac)
- return -1;
- api = HALMAC_GET_API(halmac);
-
- memset(&info, 0, sizeof(info));
- info.rfe_type = rtlpriv->rtlhal.rfe_type;
- info.rf_type = _rf_type_drv2halmac(rtlpriv->phy.rf_type);
-
- status = api->halmac_send_general_info(halmac, &info);
- switch (status) {
- case HALMAC_RET_SUCCESS:
- break;
- case HALMAC_RET_NO_DLFW:
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_WARNING,
- "%s: halmac_send_general_info() fail because fw not dl!\n",
- __func__);
- /* fall through */
- default:
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Notices:
- * Make sure
- * 1. rtl_hal_get_hwreg(HW_VAR_RF_TYPE)
- * 2. HAL_DATA_TYPE.rfe_type
- * already ready for use before calling this function.
- */
-static int _halmac_init_hal(struct rtl_priv *rtlpriv, u8 *fw, u32 fwsize)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- bool ok;
- bool fw_ok = false;
- int err, err_ret = -1;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- if (!halmac)
- goto out;
- api = HALMAC_GET_API(halmac);
-
- /* StatePowerOff */
-
- /* SKIP: halmac_init_adapter (Already done before) */
-
- /* halmac_pre_Init_system_cfg */
- /* halmac_mac_power_switch(on) */
- /* halmac_Init_system_cfg */
- err = rtl_halmac_poweron(rtlpriv);
- if (err)
- goto out;
-
- /* StatePowerOn */
-
- /* DownloadFW */
- rtlpriv->halmac.send_general_info = 0;
- if (fw && fwsize) {
- err = rtl_halmac_dlfw(rtlpriv, fw, fwsize);
- if (err)
- goto out;
- fw_ok = true;
- }
-
- /* InitMACFlow */
- status = init_mac_flow(rtlpriv);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- /* halmac_send_general_info */
- if (fw_ok) {
- rtlpriv->halmac.send_general_info = 0;
- err = _send_general_info(rtlpriv);
- if (err)
- goto out;
- } else {
- rtlpriv->halmac.send_general_info = 1;
- }
-
- /* Init Phy parameter-MAC */
- if (rtlpriv->cfg->ops->halmac_cb_init_mac_register)
- ok = rtlpriv->cfg->ops->halmac_cb_init_mac_register(rtlpriv);
- else
- ok = false;
-
- if (!ok)
- goto out;
-
- /* StateMacInitialized */
-
- /* halmac_cfg_drv_info */
- err = rtl_halmac_config_rx_info(rtlpriv, HALMAC_DRV_INFO_PHY_STATUS);
- if (err)
- goto out;
-
- /* halmac_set_hw_value(HALMAC_HW_EN_BB_RF) */
- /* Init BB, RF */
- if (rtlpriv->cfg->ops->halmac_cb_init_bb_rf_register)
- ok = rtlpriv->cfg->ops->halmac_cb_init_bb_rf_register(rtlpriv);
- else
- ok = false;
-
- if (!ok)
- goto out;
-
- status = api->halmac_init_interface_cfg(halmac);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- /* SKIP: halmac_verify_platform_api */
- /* SKIP: halmac_h2c_lb */
-
- /* StateRxIdle */
-
- err_ret = 0;
-out:
- return err_ret;
-}
-
-int rtl_halmac_init_hal(struct rtl_priv *rtlpriv)
-{
- if (!rtlpriv->rtlhal.pfirmware || rtlpriv->rtlhal.fwsize == 0)
- return -1;
-
- return _halmac_init_hal(rtlpriv, rtlpriv->rtlhal.pfirmware,
- rtlpriv->rtlhal.fwsize);
-}
-
-int rtl_halmac_deinit_hal(struct rtl_priv *rtlpriv)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- int err = -1;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- if (!halmac)
- goto out;
- api = HALMAC_GET_API(halmac);
-
- status = api->halmac_deinit_interface_cfg(halmac);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- /* rtw_hal_power_off(adapter); */
- status = api->halmac_mac_power_switch(halmac, HALMAC_MAC_POWER_OFF);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- err = 0;
-out:
- return err;
-}
-
-int rtl_halmac_self_verify(struct rtl_priv *rtlpriv)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- int err = -1;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- status = api->halmac_verify_platform_api(mac);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- status = api->halmac_h2c_lb(mac);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- err = 0;
-out:
- return err;
-}
-
-int rtl_halmac_dlfw(struct rtl_priv *rtlpriv, u8 *fw, u32 fwsize)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- struct halmac_fw_version fw_version;
- int err = 0;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- if ((!fw) || (!fwsize))
- return -1;
-
- /* 1. Driver Stop Tx */
- /* ToDo */
-
- /* 2. Driver Check Tx FIFO is empty */
- /* ToDo */
-
- /* 3. Config MAX download size */
- api->halmac_cfg_max_dl_size(mac, 0x1000);
-
- /* 4. Download Firmware */
- mac->h2c_packet_seq = 0;
- status = api->halmac_download_firmware(mac, fw, fwsize);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- status = api->halmac_get_fw_version(mac, &fw_version);
- if (status == HALMAC_RET_SUCCESS) {
- rtlpriv->rtlhal.fw_version = fw_version.version;
- rtlpriv->rtlhal.fw_subversion =
- (fw_version.sub_version << 8) | (fw_version.sub_index);
-
- RT_TRACE(
- rtlpriv, COMP_HALMAC, DBG_DMESG,
- "halmac report firmware version %04X.%04X\n",
- rtlpriv->rtlhal.fw_version,
- rtlpriv->rtlhal.fw_subversion);
- }
-
- if (rtlpriv->halmac.send_general_info) {
- rtlpriv->halmac.send_general_info = 0;
- err = _send_general_info(rtlpriv);
- }
-
- /* 5. Driver resume TX if needed */
- /* ToDo */
-
- /* 6. Reset driver variables if needed */
- /*hal->LastHMEBoxNum = 0;*/
-
- return err;
-}
-
-/*
- * Description:
- * Power on/off BB/RF domain.
- *
- * Parameters:
- * enable true/false for power on/off
- *
- * Return:
- * 0 Success
- * others Fail
- */
-int rtl_halmac_phy_power_switch(struct rtl_priv *rtlpriv, u8 enable)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- enum halmac_ret_status status;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- if (!halmac)
- return -1;
- api = HALMAC_GET_API(halmac);
-
- status = api->halmac_set_hw_value(halmac, HALMAC_HW_EN_BB_RF, &enable);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- return 0;
-}
-
-static bool _is_fw_read_cmd_down(struct rtl_priv *rtlpriv, u8 msgbox_num)
-{
- bool read_down = false;
- int retry_cnts = 100;
- u8 valid;
-
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD,
- "%s, reg_1cc(%x), msg_box(%d)...\n", __func__,
- rtl_read_byte(rtlpriv, REG_HMETFR), msgbox_num);
-
- do {
- valid = rtl_read_byte(rtlpriv, REG_HMETFR) & BIT(msgbox_num);
- if (valid == 0)
- read_down = true;
- else
- mdelay(1);
- } while ((!read_down) && (retry_cnts--));
-
- return read_down;
-}
-
-int rtl_halmac_send_h2c(struct rtl_priv *rtlpriv, u8 *h2c)
-{
- u8 h2c_box_num = 0;
- u32 msgbox_addr = 0;
- u32 msgbox_ex_addr = 0;
- __le32 h2c_cmd = 0;
- __le32 h2c_cmd_ex = 0;
- s32 ret = -1;
- unsigned long flag = 0;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
- if (!h2c) {
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD, "%s: pbuf is NULL\n",
- __func__);
- return ret;
- }
-
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
-
- /* pay attention to if race condition happened in H2C cmd setting */
- h2c_box_num = rtlhal->last_hmeboxnum;
-
- if (!_is_fw_read_cmd_down(rtlpriv, h2c_box_num)) {
- RT_TRACE(rtlpriv, COMP_HALMAC, DBG_LOUD,
- " fw read cmd failed...\n");
- goto exit;
- }
-
- /* Write Ext command(byte 4 -7) */
- msgbox_ex_addr = REG_HMEBOX_E0 + (h2c_box_num * EX_MESSAGE_BOX_SIZE);
- memcpy((u8 *)(&h2c_cmd_ex), h2c + 4, EX_MESSAGE_BOX_SIZE);
- rtl_write_dword(rtlpriv, msgbox_ex_addr, le32_to_cpu(h2c_cmd_ex));
-
- /* Write command (byte 0 -3 ) */
- msgbox_addr = REG_HMEBOX0 + (h2c_box_num * MESSAGE_BOX_SIZE);
- memcpy((u8 *)(&h2c_cmd), h2c, 4);
- rtl_write_dword(rtlpriv, msgbox_addr, le32_to_cpu(h2c_cmd));
-
- /* update last msg box number */
- rtlhal->last_hmeboxnum = (h2c_box_num + 1) % MAX_H2C_BOX_NUMS;
- ret = 0;
-
-exit:
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- return ret;
-}
-
-int rtl_halmac_c2h_handle(struct rtl_priv *rtlpriv, u8 *c2h, u32 size)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- status = api->halmac_get_c2h_info(mac, c2h, size);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- return 0;
-}
-
-int rtl_halmac_get_physical_efuse_size(struct rtl_priv *rtlpriv, u32 *size)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- u32 val;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- status = api->halmac_get_efuse_size(mac, &val);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- *size = val;
- return 0;
-}
-
-int rtl_halmac_read_physical_efuse_map(struct rtl_priv *rtlpriv, u8 *map,
- u32 size)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- enum halmac_feature_id id;
- int ret;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
- id = HALMAC_FEATURE_DUMP_PHYSICAL_EFUSE;
-
- ret = init_halmac_event(rtlpriv, id, map, size);
- if (ret)
- return -1;
-
- status = api->halmac_dump_efuse_map(mac, HALMAC_EFUSE_R_DRV);
- if (status != HALMAC_RET_SUCCESS) {
- free_halmac_event(rtlpriv, id);
- return -1;
- }
-
- ret = wait_halmac_event(rtlpriv, id);
- if (ret)
- return -1;
-
- return 0;
-}
-
-int rtl_halmac_read_physical_efuse(struct rtl_priv *rtlpriv, u32 offset,
- u32 cnt, u8 *data)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- u8 v;
- u32 i;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- for (i = 0; i < cnt; i++) {
- status = api->halmac_read_efuse(mac, offset + i, &v);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
- data[i] = v;
- }
-
- return 0;
-}
-
-int rtl_halmac_write_physical_efuse(struct rtl_priv *rtlpriv, u32 offset,
- u32 cnt, u8 *data)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- u32 i;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- for (i = 0; i < cnt; i++) {
- status = api->halmac_write_efuse(mac, offset + i, data[i]);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
- }
-
- return 0;
-}
-
-int rtl_halmac_get_logical_efuse_size(struct rtl_priv *rtlpriv, u32 *size)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- u32 val;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- status = api->halmac_get_logical_efuse_size(mac, &val);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- *size = val;
- return 0;
-}
-
-int rtl_halmac_read_logical_efuse_map(struct rtl_priv *rtlpriv, u8 *map,
- u32 size)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- enum halmac_feature_id id;
- int ret;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
- id = HALMAC_FEATURE_DUMP_LOGICAL_EFUSE;
-
- ret = init_halmac_event(rtlpriv, id, map, size);
- if (ret)
- return -1;
-
- status = api->halmac_dump_logical_efuse_map(mac, HALMAC_EFUSE_R_AUTO);
- if (status != HALMAC_RET_SUCCESS) {
- free_halmac_event(rtlpriv, id);
- return -1;
- }
-
- ret = wait_halmac_event(rtlpriv, id);
- if (ret)
- return -1;
-
- return 0;
-}
-
-int rtl_halmac_write_logical_efuse_map(struct rtl_priv *rtlpriv, u8 *map,
- u32 size, u8 *maskmap, u32 masksize)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- struct halmac_pg_efuse_info pginfo;
- enum halmac_ret_status status;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- pginfo.efuse_map = map;
- pginfo.efuse_map_size = size;
- pginfo.efuse_mask = maskmap;
- pginfo.efuse_mask_size = masksize;
-
- status = api->halmac_pg_efuse_by_map(mac, &pginfo, HALMAC_EFUSE_R_AUTO);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- return 0;
-}
-
-int rtl_halmac_read_logical_efuse(struct rtl_priv *rtlpriv, u32 offset, u32 cnt,
- u8 *data)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- u8 v;
- u32 i;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- for (i = 0; i < cnt; i++) {
- status = api->halmac_read_logical_efuse(mac, offset + i, &v);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
- data[i] = v;
- }
-
- return 0;
-}
-
-int rtl_halmac_write_logical_efuse(struct rtl_priv *rtlpriv, u32 offset,
- u32 cnt, u8 *data)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- u32 i;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- for (i = 0; i < cnt; i++) {
- status = api->halmac_write_logical_efuse(mac, offset + i,
- data[i]);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
- }
-
- return 0;
-}
-
-int rtl_halmac_set_mac_address(struct rtl_priv *rtlpriv, u8 hwport, u8 *addr)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- u8 port;
- union halmac_wlan_addr hwa;
- enum halmac_ret_status status;
- int err = -1;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(halmac);
-
- port = hwport;
- memset(&hwa, 0, sizeof(hwa));
- memcpy(hwa.address, addr, 6);
-
- status = api->halmac_cfg_mac_addr(halmac, port, &hwa);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- err = 0;
-out:
- return err;
-}
-
-int rtl_halmac_set_bssid(struct rtl_priv *rtlpriv, u8 hwport, u8 *addr)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- u8 port;
- union halmac_wlan_addr hwa;
- enum halmac_ret_status status;
- int err = -1;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(halmac);
- port = hwport;
-
- memset(&hwa, 0, sizeof(union halmac_wlan_addr));
- memcpy(hwa.address, addr, 6);
- status = api->halmac_cfg_bssid(halmac, port, &hwa);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- err = 0;
-out:
- return err;
-}
-
-int rtl_halmac_set_bandwidth(struct rtl_priv *rtlpriv, u8 channel,
- u8 pri_ch_idx, u8 bw)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- status = api->halmac_cfg_ch_bw(mac, channel, pri_ch_idx, bw);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- return 0;
-}
-
-int rtl_halmac_get_hw_value(struct rtl_priv *rtlpriv, enum halmac_hw_id hw_id,
- void *pvalue)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- status = api->halmac_get_hw_value(mac, hw_id, pvalue);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- return 0;
-}
-
-int rtl_halmac_dump_fifo(struct rtl_priv *rtlpriv,
- enum hal_fifo_sel halmac_fifo_sel)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
- u8 *pfifo_map = NULL;
- u32 fifo_size = 0;
- s8 ret = 0;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- fifo_size = api->halmac_get_fifo_size(mac, halmac_fifo_sel);
- if (fifo_size)
- pfifo_map = vmalloc(fifo_size);
- if (!pfifo_map)
- return -1;
-
- status = api->halmac_dump_fifo(mac, halmac_fifo_sel, 0, fifo_size,
- pfifo_map);
-
- if (status != HALMAC_RET_SUCCESS) {
- ret = -1;
- goto _exit;
- }
-
-_exit:
- if (pfifo_map)
- vfree(pfifo_map);
- return ret;
-}
-
-int rtl_halmac_rx_agg_switch(struct rtl_priv *rtlpriv, bool enable)
-{
- struct halmac_adapter *halmac;
- struct halmac_api *api;
- struct halmac_rxagg_cfg rxaggcfg;
- enum halmac_ret_status status;
- int err = -1;
-
- halmac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(halmac);
- memset((void *)&rxaggcfg, 0, sizeof(rxaggcfg));
-
- if (enable) {
- /* enable RX agg. */
- /* PCIE do nothing */
- } else {
- /* disable RX agg. */
- rxaggcfg.mode = HALMAC_RX_AGG_MODE_NONE;
- }
-
- status = api->halmac_cfg_rx_aggregation(halmac, &rxaggcfg);
- if (status != HALMAC_RET_SUCCESS)
- goto out;
-
- err = 0;
-out:
- return err;
-}
-
-int rtl_halmac_get_wow_reason(struct rtl_priv *rtlpriv, u8 *reason)
-{
- u8 val8;
- int err = -1;
-
- val8 = rtl_read_byte(rtlpriv, 0x1C7);
- if (val8 == 0xEA)
- goto out;
-
- *reason = val8;
- err = 0;
-out:
- return err;
-}
-
-/*
- * Description:
- * Get RX driver info size. RX driver info is a small memory space between
- * scriptor and RX payload.
- *
- * +-------------------------+
- * | RX descriptor |
- * | usually 24 bytes |
- * +-------------------------+
- * | RX driver info |
- * | depends on driver cfg |
- * +-------------------------+
- * | RX paylad |
- * | |
- * +-------------------------+
- *
- * Parameter:
- * d pointer to struct dvobj_priv of driver
- * sz rx driver info size in bytes.
- *
- * Rteurn:
- * 0 Success
- * other Fail
- */
-int rtl_halmac_get_drv_info_sz(struct rtl_priv *rtlpriv, u8 *sz)
-{
- /* enum halmac_ret_status status; */
- u8 dw = 6; /* max number */
-
- *sz = dw * 8;
- return 0;
-}
-
-int rtl_halmac_get_rsvd_drv_pg_bndy(struct rtl_priv *rtlpriv, u16 *drv_pg)
-{
- enum halmac_ret_status status;
- struct halmac_adapter *halmac = rtlpriv_to_halmac(rtlpriv);
- struct halmac_api *api = HALMAC_GET_API(halmac);
-
- status = api->halmac_get_hw_value(halmac, HALMAC_HW_RSVD_PG_BNDY,
- drv_pg);
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- return 0;
-}
-
-int rtl_halmac_chk_txdesc(struct rtl_priv *rtlpriv, u8 *txdesc, u32 size)
-{
- struct halmac_adapter *mac;
- struct halmac_api *api;
- enum halmac_ret_status status;
-
- mac = rtlpriv_to_halmac(rtlpriv);
- api = HALMAC_GET_API(mac);
-
- status = api->halmac_chk_txdesc(mac, txdesc, size);
-
- if (status != HALMAC_RET_SUCCESS)
- return -1;
-
- return 0;
-}
-
-MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
-MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
diff --git a/drivers/staging/rtlwifi/halmac/rtl_halmac.h b/drivers/staging/rtlwifi/halmac/rtl_halmac.h
deleted file mode 100644
index aa511dad8d16..000000000000
--- a/drivers/staging/rtlwifi/halmac/rtl_halmac.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef _RTL_HALMAC_H_
-#define _RTL_HALMAC_H_
-
-#include "halmac_api.h"
-
-#define rtlpriv_to_halmac(priv) \
- ((struct halmac_adapter *)((priv)->halmac.internal))
-
-/* for H2C cmd */
-#define MAX_H2C_BOX_NUMS 4
-#define MESSAGE_BOX_SIZE 4
-#define EX_MESSAGE_BOX_SIZE 4
-
-/* HALMAC API for Driver(HAL) */
-int rtl_halmac_init_adapter(struct rtl_priv *rtlpriv);
-int rtl_halmac_deinit_adapter(struct rtl_priv *rtlpriv);
-int rtl_halmac_poweron(struct rtl_priv *rtlpriv);
-int rtl_halmac_poweroff(struct rtl_priv *rtlpriv);
-int rtl_halmac_init_hal(struct rtl_priv *rtlpriv);
-int rtl_halmac_init_hal_fw(struct rtl_priv *rtlpriv, u8 *fw, u32 fwsize);
-int rtl_halmac_init_hal_fw_file(struct rtl_priv *rtlpriv, u8 *fwpath);
-int rtl_halmac_deinit_hal(struct rtl_priv *rtlpriv);
-int rtl_halmac_self_verify(struct rtl_priv *rtlpriv);
-int rtl_halmac_dlfw(struct rtl_priv *rtlpriv, u8 *fw, u32 fwsize);
-int rtl_halmac_dlfw_from_file(struct rtl_priv *rtlpriv, u8 *fwpath);
-int rtl_halmac_phy_power_switch(struct rtl_priv *rtlpriv, u8 enable);
-int rtl_halmac_send_h2c(struct rtl_priv *rtlpriv, u8 *h2c);
-int rtl_halmac_c2h_handle(struct rtl_priv *rtlpriv, u8 *c2h, u32 size);
-
-int rtl_halmac_get_physical_efuse_size(struct rtl_priv *rtlpriv, u32 *size);
-int rtl_halmac_read_physical_efuse_map(struct rtl_priv *rtlpriv, u8 *map,
- u32 size);
-int rtl_halmac_read_physical_efuse(struct rtl_priv *rtlpriv, u32 offset,
- u32 cnt, u8 *data);
-int rtl_halmac_write_physical_efuse(struct rtl_priv *rtlpriv, u32 offset,
- u32 cnt, u8 *data);
-int rtl_halmac_get_logical_efuse_size(struct rtl_priv *rtlpriv, u32 *size);
-int rtl_halmac_read_logical_efuse_map(struct rtl_priv *rtlpriv, u8 *map,
- u32 size);
-int rtl_halmac_write_logical_efuse_map(struct rtl_priv *rtlpriv, u8 *map,
- u32 size, u8 *maskmap, u32 masksize);
-int rtl_halmac_read_logical_efuse(struct rtl_priv *rtlpriv, u32 offset, u32 cnt,
- u8 *data);
-int rtl_halmac_write_logical_efuse(struct rtl_priv *rtlpriv, u32 offset,
- u32 cnt, u8 *data);
-
-int rtl_halmac_config_rx_info(struct rtl_priv *rtlpriv, enum halmac_drv_info);
-int rtl_halmac_set_mac_address(struct rtl_priv *rtlpriv, u8 hwport, u8 *addr);
-int rtl_halmac_set_bssid(struct rtl_priv *d, u8 hwport, u8 *addr);
-
-int rtl_halmac_set_bandwidth(struct rtl_priv *rtlpriv, u8 channel,
- u8 pri_ch_idx, u8 bw);
-int rtl_halmac_rx_agg_switch(struct rtl_priv *rtlpriv, bool enable);
-int rtl_halmac_get_hw_value(struct rtl_priv *d, enum halmac_hw_id hw_id,
- void *pvalue);
-int rtl_halmac_dump_fifo(struct rtl_priv *rtlpriv,
- enum hal_fifo_sel halmac_fifo_sel);
-
-int rtl_halmac_get_wow_reason(struct rtl_priv *rtlpriv, u8 *reason);
-int rtl_halmac_get_drv_info_sz(struct rtl_priv *d, u8 *sz);
-
-int rtl_halmac_get_rsvd_drv_pg_bndy(struct rtl_priv *dvobj, u16 *drv_pg);
-int rtl_halmac_download_rsvd_page(struct rtl_priv *dvobj, u8 pg_offset,
- u8 *pbuf, u32 size);
-
-int rtl_halmac_chk_txdesc(struct rtl_priv *rtlpriv, u8 *txdesc, u32 size);
-
-struct rtl_halmac_ops *rtl_halmac_get_ops_pointer(void);
-
-#endif /* _RTL_HALMAC_H_ */
diff --git a/drivers/staging/rtlwifi/pci.c b/drivers/staging/rtlwifi/pci.c
deleted file mode 100644
index 4bb5703bd715..000000000000
--- a/drivers/staging/rtlwifi/pci.c
+++ /dev/null
@@ -1,2496 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "wifi.h"
-#include "core.h"
-#include "pci.h"
-#include "base.h"
-#include "ps.h"
-#include "efuse.h"
-#include <linux/interrupt.h>
-#include <linux/export.h>
-#include <linux/module.h>
-
-MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
-MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
-MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("PCI basic driver for rtlwifi");
-
-static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
- INTEL_VENDOR_ID,
- ATI_VENDOR_ID,
- AMD_VENDOR_ID,
- SIS_VENDOR_ID
-};
-
-static const u8 ac_to_hwq[] = {
- VO_QUEUE,
- VI_QUEUE,
- BE_QUEUE,
- BK_QUEUE
-};
-
-static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
- struct sk_buff *skb)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- __le16 fc = rtl_get_fc(skb);
- u8 queue_index = skb_get_queue_mapping(skb);
- struct ieee80211_hdr *hdr;
-
- if (unlikely(ieee80211_is_beacon(fc)))
- return BEACON_QUEUE;
- if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
- return MGNT_QUEUE;
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
- if (ieee80211_is_nullfunc(fc))
- return HIGH_QUEUE;
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
- hdr = rtl_get_hdr(skb);
-
- if (is_multicast_ether_addr(hdr->addr1) ||
- is_broadcast_ether_addr(hdr->addr1))
- return HIGH_QUEUE;
- }
-
- return ac_to_hwq[queue_index];
-}
-
-/* Update PCI dependent default settings*/
-static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u8 init_aspm;
-
- ppsc->reg_rfps_level = 0;
- ppsc->support_aspm = false;
-
- /*Update PCI ASPM setting */
- ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
- switch (rtlpci->const_pci_aspm) {
- case 0:
- /*No ASPM */
- break;
-
- case 1:
- /*ASPM dynamically enabled/disable. */
- ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
- break;
-
- case 2:
- /*ASPM with Clock Req dynamically enabled/disable. */
- ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
- RT_RF_OFF_LEVL_CLK_REQ);
- break;
-
- case 3:
- /* Always enable ASPM and Clock Req
- * from initialization to halt.
- */
- ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
- ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
- RT_RF_OFF_LEVL_CLK_REQ);
- break;
-
- case 4:
- /* Always enable ASPM without Clock Req
- * from initialization to halt.
- */
- ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
- RT_RF_OFF_LEVL_CLK_REQ);
- ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
- break;
- }
-
- ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
-
- /*Update Radio OFF setting */
- switch (rtlpci->const_hwsw_rfoff_d3) {
- case 1:
- if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
- ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
- break;
-
- case 2:
- if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
- ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
- ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
- break;
-
- case 3:
- ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
- break;
- }
-
- /*Set HW definition to determine if it supports ASPM. */
- switch (rtlpci->const_support_pciaspm) {
- case 0:{
- /*Not support ASPM. */
- bool support_aspm = false;
-
- ppsc->support_aspm = support_aspm;
- break;
- }
- case 1:{
- /*Support ASPM. */
- bool support_aspm = true;
- bool support_backdoor = true;
-
- ppsc->support_aspm = support_aspm;
-
- /*if (priv->oem_id == RT_CID_TOSHIBA &&
- * !priv->ndis_adapter.amd_l1_patch)
- * support_backdoor = false;
- */
-
- ppsc->support_backdoor = support_backdoor;
-
- break;
- }
- case 2:
- /*ASPM value set by chipset. */
- if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
- bool support_aspm = true;
-
- ppsc->support_aspm = support_aspm;
- }
- break;
- default:
- pr_err("switch case %#x not processed\n",
- rtlpci->const_support_pciaspm);
- break;
- }
-
- /* toshiba aspm issue, toshiba will set aspm selfly
- * so we should not set aspm in driver
- */
- pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
- if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
- init_aspm == 0x43)
- ppsc->support_aspm = false;
-}
-
-static bool _rtl_pci_platform_switch_device_pci_aspm(
- struct ieee80211_hw *hw,
- u8 value)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
- value |= 0x40;
-
- pci_write_config_byte(rtlpci->pdev, 0x80, value);
-
- return false;
-}
-
-/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
-static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- pci_write_config_byte(rtlpci->pdev, 0x81, value);
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
- udelay(100);
-}
-
-/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
-static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
- /*Retrieve original configuration settings. */
- u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
- u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.pcibridge_linkctrlreg;
- u16 aspmlevel = 0;
- u8 tmp_u1b = 0;
-
- if (!ppsc->support_aspm)
- return;
-
- if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
-
- return;
- }
-
- if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
- RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
- _rtl_pci_switch_clk_req(hw, 0x0);
- }
-
- /*for promising device will in L0 state after an I/O. */
- pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
-
- /*Set corresponding value. */
- aspmlevel |= BIT(0) | BIT(1);
- linkctrl_reg &= ~aspmlevel;
- pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
-
- _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
- udelay(50);
-
- /*4 Disable Pci Bridge ASPM */
- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
- pcibridge_linkctrlreg);
-
- udelay(50);
-}
-
-/*
- *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
- *power saving We should follow the sequence to enable
- *RTL8192SE first then enable Pci Bridge ASPM
- *or the system will show bluescreen.
- */
-static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
- u16 aspmlevel;
- u8 u_pcibridge_aspmsetting;
- u8 u_device_aspmsetting;
-
- if (!ppsc->support_aspm)
- return;
-
- if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
- return;
- }
-
- /*4 Enable Pci Bridge ASPM */
-
- u_pcibridge_aspmsetting =
- pcipriv->ndis_adapter.pcibridge_linkctrlreg |
- rtlpci->const_hostpci_aspm_setting;
-
- if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
- u_pcibridge_aspmsetting &= ~BIT(0);
-
- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
- u_pcibridge_aspmsetting);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PlatformEnableASPM(): Write reg[%x] = %x\n",
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
- u_pcibridge_aspmsetting);
-
- udelay(50);
-
- /*Get ASPM level (with/without Clock Req) */
- aspmlevel = rtlpci->const_devicepci_aspm_setting;
- u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
-
- /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
- /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
-
- u_device_aspmsetting |= aspmlevel;
-
- _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
-
- if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
- _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
- RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
- RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
- }
- udelay(100);
-}
-
-static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- bool status = false;
- u8 offset_e0;
- unsigned int offset_e4;
-
- pci_write_config_byte(rtlpci->pdev, 0xe0, 0xa0);
-
- pci_read_config_byte(rtlpci->pdev, 0xe0, &offset_e0);
-
- if (offset_e0 == 0xA0) {
- pci_read_config_dword(rtlpci->pdev, 0xe4, &offset_e4);
- if (offset_e4 & BIT(23))
- status = true;
- }
-
- return status;
-}
-
-static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
- struct rtl_priv **buddy_priv)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- bool find_buddy_priv = false;
- struct rtl_priv *tpriv;
- struct rtl_pci_priv *tpcipriv = NULL;
-
- if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
- list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
- list) {
- tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
- pcipriv->ndis_adapter.funcnumber);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
- tpcipriv->ndis_adapter.funcnumber);
-
- if ((pcipriv->ndis_adapter.busnumber ==
- tpcipriv->ndis_adapter.busnumber) &&
- (pcipriv->ndis_adapter.devnumber ==
- tpcipriv->ndis_adapter.devnumber) &&
- (pcipriv->ndis_adapter.funcnumber !=
- tpcipriv->ndis_adapter.funcnumber)) {
- find_buddy_priv = true;
- break;
- }
- }
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", find_buddy_priv);
-
- if (find_buddy_priv)
- *buddy_priv = tpriv;
-
- return find_buddy_priv;
-}
-
-static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
-{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
- u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
- u8 linkctrl_reg;
- u8 num4bbytes;
-
- num4bbytes = (capabilityoffset + 0x10) / 4;
-
- /*Read Link Control Register */
- pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
-
- pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
-}
-
-static void rtl_pci_parse_configuration(struct pci_dev *pdev,
- struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-
- u8 tmp;
- u16 linkctrl_reg;
-
- /*Link Control Register */
- pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
- pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
- pcipriv->ndis_adapter.linkctrl_reg);
-
- pci_read_config_byte(pdev, 0x98, &tmp);
- tmp |= BIT(4);
- pci_write_config_byte(pdev, 0x98, tmp);
-
- tmp = 0x17;
- pci_write_config_byte(pdev, 0x70f, tmp);
-}
-
-static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
-{
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- _rtl_pci_update_default_setting(hw);
-
- if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
- /*Always enable ASPM & Clock Req. */
- rtl_pci_enable_aspm(hw);
- RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
- }
-}
-
-static void _rtl_pci_io_handler_init(struct device *dev,
- struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->io.dev = dev;
-
- rtlpriv->io.write8_async = pci_write8_async;
- rtlpriv->io.write16_async = pci_write16_async;
- rtlpriv->io.write32_async = pci_write32_async;
-
- rtlpriv->io.read8_sync = pci_read8_sync;
- rtlpriv->io.read16_sync = pci_read16_sync;
- rtlpriv->io.read32_sync = pci_read32_sync;
-}
-
-static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct rtl_tcb_desc *tcb_desc, u8 tid)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct sk_buff *next_skb;
- u8 additionlen = FCS_LEN;
-
- /* here open is 4, wep/tkip is 8, aes is 12*/
- if (info->control.hw_key)
- additionlen += info->control.hw_key->icv_len;
-
- /* The most skb num is 6 */
- tcb_desc->empkt_num = 0;
- spin_lock_bh(&rtlpriv->locks.waitq_lock);
- skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
- struct ieee80211_tx_info *next_info;
-
- next_info = IEEE80211_SKB_CB(next_skb);
- if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
- tcb_desc->empkt_len[tcb_desc->empkt_num] =
- next_skb->len + additionlen;
- tcb_desc->empkt_num++;
- } else {
- break;
- }
-
- if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
- next_skb))
- break;
-
- if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
- break;
- }
- spin_unlock_bh(&rtlpriv->locks.waitq_lock);
-
- return true;
-}
-
-/* just for early mode now */
-static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct sk_buff *skb = NULL;
- struct ieee80211_tx_info *info = NULL;
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- int tid;
-
- if (!rtlpriv->rtlhal.earlymode_enable)
- return;
-
- if (rtlpriv->dm.supp_phymode_switch &&
- (rtlpriv->easy_concurrent_ctl.switch_in_process ||
- (rtlpriv->buddy_priv &&
- rtlpriv->buddy_priv->easy_concurrent_ctl.switch_in_process)))
- return;
- /* we just use em for BE/BK/VI/VO */
- for (tid = 7; tid >= 0; tid--) {
- u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)];
- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
-
- while (!mac->act_scanning &&
- rtlpriv->psc.rfpwr_state == ERFON) {
- struct rtl_tcb_desc tcb_desc;
-
- memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
- spin_lock_bh(&rtlpriv->locks.waitq_lock);
- if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
- (ring->entries - skb_queue_len(&ring->queue) >
- rtlhal->max_earlymode_num)) {
- skb = skb_dequeue(&mac->skb_waitq[tid]);
- } else {
- spin_unlock_bh(&rtlpriv->locks.waitq_lock);
- break;
- }
- spin_unlock_bh(&rtlpriv->locks.waitq_lock);
-
- /* Some macaddr can't do early mode. like
- * multicast/broadcast/no_qos data
- */
- info = IEEE80211_SKB_CB(skb);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- _rtl_update_earlymode_info(hw, skb,
- &tcb_desc, tid);
-
- rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
- }
- }
-}
-
-static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
-
- while (skb_queue_len(&ring->queue)) {
- struct sk_buff *skb;
- struct ieee80211_tx_info *info;
- __le16 fc;
- u8 tid;
- u8 *entry;
-
- if (rtlpriv->use_new_trx_flow)
- entry = (u8 *)(&ring->buffer_desc[ring->idx]);
- else
- entry = (u8 *)(&ring->desc[ring->idx]);
-
- if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
- return;
- ring->idx = (ring->idx + 1) % ring->entries;
-
- skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry, true,
- HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
-
- /* remove early mode header */
- if (rtlpriv->rtlhal.earlymode_enable)
- skb_pull(skb, EM_HDR_LEN);
-
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
- "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
- ring->idx,
- skb_queue_len(&ring->queue),
- *(u16 *)(skb->data + 22));
-
- if (prio == TXCMD_QUEUE) {
- dev_kfree_skb(skb);
- goto tx_status_ok;
- }
-
- /* for sw LPS, just after NULL skb send out, we can
- * sure AP knows we are sleeping, we should not let
- * rf sleep
- */
- fc = rtl_get_fc(skb);
- if (ieee80211_is_nullfunc(fc)) {
- if (ieee80211_has_pm(fc)) {
- rtlpriv->mac80211.offchan_delay = true;
- rtlpriv->psc.state_inap = true;
- } else {
- rtlpriv->psc.state_inap = false;
- }
- }
- if (ieee80211_is_action(fc)) {
- struct ieee80211_mgmt *action_frame =
- (struct ieee80211_mgmt *)skb->data;
- if (action_frame->u.action.u.ht_smps.action ==
- WLAN_HT_ACTION_SMPS) {
- dev_kfree_skb(skb);
- goto tx_status_ok;
- }
- }
-
- /* update tid tx pkt num */
- tid = rtl_get_tid(skb);
- if (tid <= 7)
- rtlpriv->link_info.tidtx_inperiod[tid]++;
-
- info = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(info);
-
- info->flags |= IEEE80211_TX_STAT_ACK;
- /*info->status.rates[0].count = 1; */
-
- ieee80211_tx_status_irqsafe(hw, skb);
-
- if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
- prio, ring->idx,
- skb_queue_len(&ring->queue));
-
- ieee80211_wake_queue(hw, skb_get_queue_mapping(skb));
- }
-tx_status_ok:
- skb = NULL;
- }
-
- if (((rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2))
- rtl_lps_leave(hw);
-}
-
-static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
- struct sk_buff *new_skb, u8 *entry,
- int rxring_idx, int desc_idx)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 bufferaddress;
- u8 tmp_one = 1;
- struct sk_buff *skb;
-
- if (likely(new_skb)) {
- skb = new_skb;
- goto remap;
- }
- skb = dev_alloc_skb(rtlpci->rxbuffersize);
- if (!skb)
- return 0;
-
-remap:
- /* just set skb->cb to mapping addr for pci_unmap_single use */
- *((dma_addr_t *)skb->cb) =
- pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
- rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
- bufferaddress = *((dma_addr_t *)skb->cb);
- if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
- return 0;
- rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
- if (rtlpriv->use_new_trx_flow) {
- /* skb->cb may be 64 bit address */
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
- HW_DESC_RX_PREPARE,
- (u8 *)(dma_addr_t *)skb->cb);
- } else {
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
- HW_DESC_RXBUFF_ADDR,
- (u8 *)&bufferaddress);
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
- HW_DESC_RXPKT_LEN,
- (u8 *)&rtlpci->rxbuffersize);
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
- HW_DESC_RXOWN,
- (u8 *)&tmp_one);
- }
- return 1;
-}
-
-/* inorder to receive 8K AMSDU we have set skb to
- * 9100bytes in init rx ring, but if this packet is
- * not a AMSDU, this large packet will be sent to
- * TCP/IP directly, this cause big packet ping fail
- * like: "ping -s 65507", so here we will realloc skb
- * based on the true size of packet, Mac80211
- * Probably will do it better, but does not yet.
- *
- * Some platform will fail when alloc skb sometimes.
- * in this condition, we will send the old skb to
- * mac80211 directly, this will not cause any other
- * issues, but only this packet will be lost by TCP/IP
- */
-static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct ieee80211_rx_status rx_status)
-{
- if (unlikely(!rtl_action_proc(hw, skb, false))) {
- dev_kfree_skb_any(skb);
- } else {
- struct sk_buff *uskb = NULL;
-
- uskb = dev_alloc_skb(skb->len + 128);
- if (likely(uskb)) {
- memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
- sizeof(rx_status));
- skb_put_data(uskb, skb->data, skb->len);
- dev_kfree_skb_any(skb);
- ieee80211_rx_irqsafe(hw, uskb);
- } else {
- ieee80211_rx_irqsafe(hw, skb);
- }
- }
-}
-
-/*hsisr interrupt handler*/
-static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR],
- rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) |
- rtlpci->sys_irq_mask);
-}
-
-static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- int rxring_idx = RTL_PCI_RX_MPDU_QUEUE;
- struct ieee80211_rx_status rx_status = { 0 };
- unsigned int count = rtlpci->rxringcount;
- u8 own;
- u8 tmp_one;
- bool unicast = false;
- u8 hw_queue = 0;
- unsigned int rx_remained_cnt = 0;
- struct rtl_stats stats = {
- .signal = 0,
- .rate = 0,
- };
-
- /*RX NORMAL PKT */
- while (count--) {
- struct ieee80211_hdr *hdr;
- __le16 fc;
- u16 len;
- /*rx buffer descriptor */
- struct rtl_rx_buffer_desc *buffer_desc = NULL;
- /*if use new trx flow, it means wifi info */
- struct rtl_rx_desc *pdesc = NULL;
- /*rx pkt */
- struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
- rtlpci->rx_ring[rxring_idx].idx];
- struct sk_buff *new_skb;
-
- if (rtlpriv->use_new_trx_flow) {
- if (rx_remained_cnt == 0)
- rx_remained_cnt =
- rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
- hw_queue);
- if (rx_remained_cnt == 0)
- return;
- buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
- rtlpci->rx_ring[rxring_idx].idx];
- pdesc = (struct rtl_rx_desc *)skb->data;
- } else { /* rx descriptor */
- pdesc = &rtlpci->rx_ring[rxring_idx].desc[
- rtlpci->rx_ring[rxring_idx].idx];
-
- own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc,
- false,
- HW_DESC_OWN);
- if (own) /* wait data to be filled by hardware */
- return;
- }
-
- /* Reaching this point means: data is filled already
- * AAAAAAttention !!!
- * We can NOT access 'skb' before 'pci_unmap_single'
- */
- pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
- rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
-
- /* get a new skb - if fail, old one will be reused */
- new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
- if (unlikely(!new_skb))
- goto no_new;
- memset(&rx_status, 0, sizeof(rx_status));
- rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
- &rx_status, (u8 *)pdesc, skb);
-
- if (rtlpriv->use_new_trx_flow)
- rtlpriv->cfg->ops->rx_check_dma_ok(hw,
- (u8 *)buffer_desc,
- hw_queue);
-
- len = rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, false,
- HW_DESC_RXPKT_LEN);
-
- if (skb->end - skb->tail > len) {
- skb_put(skb, len);
- if (rtlpriv->use_new_trx_flow)
- skb_reserve(skb, stats.rx_drvinfo_size +
- stats.rx_bufshift + 24);
- else
- skb_reserve(skb, stats.rx_drvinfo_size +
- stats.rx_bufshift);
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "skb->end - skb->tail = %d, len is %d\n",
- skb->end - skb->tail, len);
- dev_kfree_skb_any(skb);
- goto new_trx_end;
- }
- /* handle command packet here */
- if (rtlpriv->cfg->ops->rx_command_packet &&
- rtlpriv->cfg->ops->rx_command_packet(hw, &stats, skb)) {
- dev_kfree_skb_any(skb);
- goto new_trx_end;
- }
-
- /*
- * NOTICE This can not be use for mac80211,
- * this is done in mac80211 code,
- * if done here sec DHCP will fail
- * skb_trim(skb, skb->len - 4);
- */
-
- hdr = rtl_get_hdr(skb);
- fc = rtl_get_fc(skb);
-
- if (!stats.crc && !stats.hwerror) {
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
- sizeof(rx_status));
-
- if (is_broadcast_ether_addr(hdr->addr1)) {
- ;/*TODO*/
- } else if (is_multicast_ether_addr(hdr->addr1)) {
- ;/*TODO*/
- } else {
- unicast = true;
- rtlpriv->stats.rxbytesunicast += skb->len;
- }
- rtl_is_special_data(hw, skb, false, true);
-
- if (ieee80211_is_data(fc)) {
- rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
- if (unicast)
- rtlpriv->link_info.num_rx_inperiod++;
- }
-
- rtl_collect_scan_list(hw, skb);
-
- /* static bcn for roaming */
- rtl_beacon_statistic(hw, skb);
- rtl_p2p_info(hw, (void *)skb->data, skb->len);
- /* for sw lps */
- rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
- rtl_recognize_peer(hw, (void *)skb->data, skb->len);
- if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
- (rtlpriv->rtlhal.current_bandtype ==
- BAND_ON_2_4G) &&
- (ieee80211_is_beacon(fc) ||
- ieee80211_is_probe_resp(fc))) {
- dev_kfree_skb_any(skb);
- } else {
- rtl_check_beacon_key(hw, (void *)skb->data,
- skb->len);
- _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
- }
- } else {
- dev_kfree_skb_any(skb);
- }
-new_trx_end:
- if (rtlpriv->use_new_trx_flow) {
- rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
- rtlpci->rx_ring[hw_queue].next_rx_rp %=
- RTL_PCI_MAX_RX_COUNT;
-
- rx_remained_cnt--;
- rtl_write_word(rtlpriv, 0x3B4,
- rtlpci->rx_ring[hw_queue].next_rx_rp);
- }
- if (((rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2))
- rtl_lps_leave(hw);
- skb = new_skb;
-no_new:
- if (rtlpriv->use_new_trx_flow) {
- _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
- rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
- } else {
- _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
- rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
- if (rtlpci->rx_ring[rxring_idx].idx ==
- rtlpci->rxringcount - 1)
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
- false,
- HW_DESC_RXERO,
- (u8 *)&tmp_one);
- }
- rtlpci->rx_ring[rxring_idx].idx =
- (rtlpci->rx_ring[rxring_idx].idx + 1) %
- rtlpci->rxringcount;
- }
-}
-
-static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
-{
- struct ieee80211_hw *hw = dev_id;
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- unsigned long flags;
- u32 inta = 0;
- u32 intb = 0;
- u32 intc = 0;
- u32 intd = 0;
- irqreturn_t ret = IRQ_HANDLED;
-
- if (rtlpci->irq_enabled == 0)
- return ret;
-
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
- rtlpriv->cfg->ops->disable_interrupt(hw);
-
- /*read ISR: 4/8bytes */
- rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb, &intc, &intd);
-
- /*Shared IRQ or HW disappeared */
- if (!inta || inta == 0xffff)
- goto done;
-
- /*<1> beacon related */
- if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon ok interrupt!\n");
- }
-
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon err interrupt!\n");
- }
-
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
-
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "prepare beacon for interrupt!\n");
- tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
- }
-
- /*<2> Tx related */
- if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
-
- if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Manage ok interrupt!\n");
- _rtl_pci_tx_isr(hw, MGNT_QUEUE);
- }
-
- if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "HIGH_QUEUE ok interrupt!\n");
- _rtl_pci_tx_isr(hw, HIGH_QUEUE);
- }
-
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
- rtlpriv->link_info.num_tx_inperiod++;
-
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BK Tx OK interrupt!\n");
- _rtl_pci_tx_isr(hw, BK_QUEUE);
- }
-
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
- rtlpriv->link_info.num_tx_inperiod++;
-
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BE TX OK interrupt!\n");
- _rtl_pci_tx_isr(hw, BE_QUEUE);
- }
-
- if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
- rtlpriv->link_info.num_tx_inperiod++;
-
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "VI TX OK interrupt!\n");
- _rtl_pci_tx_isr(hw, VI_QUEUE);
- }
-
- if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
- rtlpriv->link_info.num_tx_inperiod++;
-
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Vo TX OK interrupt!\n");
- _rtl_pci_tx_isr(hw, VO_QUEUE);
- }
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
- if (intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
- rtlpriv->link_info.num_tx_inperiod++;
-
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "H2C TX OK interrupt!\n");
- _rtl_pci_tx_isr(hw, H2C_QUEUE);
- }
- }
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
- if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
- rtlpriv->link_info.num_tx_inperiod++;
-
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "CMD TX OK interrupt!\n");
- _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
- }
- }
-
- /*<3> Rx related */
- if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
- _rtl_pci_rx_interrupt(hw);
- }
-
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "rx descriptor unavailable!\n");
- _rtl_pci_rx_interrupt(hw);
- }
-
- if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
- _rtl_pci_rx_interrupt(hw);
- }
-
- /*<4> fw related*/
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
- if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "firmware interrupt!\n");
- queue_delayed_work(rtlpriv->works.rtl_wq,
- &rtlpriv->works.fwevt_wq, 0);
- }
- }
-
- /*<5> hsisr related*/
- /* Only 8188EE & 8723BE Supported.
- * If Other ICs Come in, System will corrupt,
- * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR]
- * are not initialized
- */
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
- rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "hsisr interrupt!\n");
- _rtl_pci_hs_interrupt(hw);
- }
- }
-
- if (rtlpriv->rtlhal.earlymode_enable)
- tasklet_schedule(&rtlpriv->works.irq_tasklet);
-
-done:
- rtlpriv->cfg->ops->enable_interrupt(hw);
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return ret;
-}
-
-static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
-{
- _rtl_pci_tx_chk_waitq(hw);
-}
-
-static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl8192_tx_ring *ring = NULL;
- struct ieee80211_hdr *hdr = NULL;
- struct ieee80211_tx_info *info = NULL;
- struct sk_buff *pskb = NULL;
- struct rtl_tx_desc *pdesc = NULL;
- struct rtl_tcb_desc tcb_desc;
- /*This is for new trx flow*/
- struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
- u8 temp_one = 1;
- u8 *entry;
-
- memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
- ring = &rtlpci->tx_ring[BEACON_QUEUE];
- pskb = __skb_dequeue(&ring->queue);
- if (rtlpriv->use_new_trx_flow)
- entry = (u8 *)(&ring->buffer_desc[ring->idx]);
- else
- entry = (u8 *)(&ring->desc[ring->idx]);
- if (pskb) {
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- hw, (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- pskb->len, PCI_DMA_TODEVICE);
- kfree_skb(pskb);
- }
-
- /*NB: the beacon data buffer must be 32-bit aligned. */
- pskb = ieee80211_beacon_get(hw, mac->vif);
- if (!pskb)
- return;
- hdr = rtl_get_hdr(pskb);
- info = IEEE80211_SKB_CB(pskb);
- pdesc = &ring->desc[0];
- if (rtlpriv->use_new_trx_flow)
- pbuffer_desc = &ring->buffer_desc[0];
-
- rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
- (u8 *)pbuffer_desc, info, NULL, pskb,
- BEACON_QUEUE, &tcb_desc);
-
- __skb_queue_tail(&ring->queue, pskb);
-
- if (rtlpriv->use_new_trx_flow) {
- temp_one = 4;
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)pbuffer_desc, true,
- HW_DESC_OWN, (u8 *)&temp_one);
- } else {
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
- &temp_one);
- }
-}
-
-static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- u8 i;
- u16 desc_num;
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
- desc_num = TX_DESC_NUM_92E;
- else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE)
- desc_num = TX_DESC_NUM_8822B;
- else
- desc_num = RT_TXDESC_NUM;
-
- for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
- rtlpci->txringcount[i] = desc_num;
-
- /*
- *we just alloc 2 desc for beacon queue,
- *because we just need first desc in hw beacon.
- */
- rtlpci->txringcount[BEACON_QUEUE] = 2;
-
- /*BE queue need more descriptor for performance
- *consideration or, No more tx desc will happen,
- *and may cause mac80211 mem leakage.
- */
- if (!rtl_priv(hw)->use_new_trx_flow)
- rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
-
- rtlpci->rxbuffersize = 9100; /*2048/1024; */
- rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
-}
-
-static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
- struct pci_dev *pdev)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- rtlpci->up_first_time = true;
- rtlpci->being_init_adapter = false;
-
- rtlhal->hw = hw;
- rtlpci->pdev = pdev;
-
- /*Tx/Rx related var */
- _rtl_pci_init_trx_var(hw);
-
- /*IBSS*/
- mac->beacon_interval = 100;
-
- /*AMPDU*/
- mac->min_space_cfg = 0;
- mac->max_mss_density = 0;
- /*set sane AMPDU defaults */
- mac->current_ampdu_density = 7;
- mac->current_ampdu_factor = 3;
-
- /*Retry Limit*/
- mac->retry_short = 7;
- mac->retry_long = 7;
-
- /*QOS*/
- rtlpci->acm_method = EACMWAY2_SW;
-
- /*task */
- tasklet_init(&rtlpriv->works.irq_tasklet,
- (void (*)(unsigned long))_rtl_pci_irq_tasklet,
- (unsigned long)hw);
- tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
- (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
- (unsigned long)hw);
- INIT_WORK(&rtlpriv->works.lps_change_work,
- rtl_lps_change_work_callback);
-}
-
-static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
- unsigned int prio, unsigned int entries)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_tx_buffer_desc *buffer_desc;
- struct rtl_tx_desc *desc;
- dma_addr_t buffer_desc_dma, desc_dma;
- u32 nextdescaddress;
- int i;
-
- /* alloc tx buffer desc for new trx flow*/
- if (rtlpriv->use_new_trx_flow) {
- buffer_desc =
- pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*buffer_desc) * entries,
- &buffer_desc_dma);
-
- if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
- pr_err("Cannot allocate TX ring (prio = %d)\n",
- prio);
- return -ENOMEM;
- }
-
- rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
- rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
-
- rtlpci->tx_ring[prio].cur_tx_rp = 0;
- rtlpci->tx_ring[prio].cur_tx_wp = 0;
- }
-
- /* alloc dma for this ring */
- desc = pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*desc) * entries, &desc_dma);
-
- if (!desc || (unsigned long)desc & 0xFF) {
- pr_err("Cannot allocate TX ring (prio = %d)\n", prio);
- return -ENOMEM;
- }
-
- rtlpci->tx_ring[prio].desc = desc;
- rtlpci->tx_ring[prio].dma = desc_dma;
-
- rtlpci->tx_ring[prio].idx = 0;
- rtlpci->tx_ring[prio].entries = entries;
- skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
- prio, desc);
-
- /* init every desc in this ring */
- if (!rtlpriv->use_new_trx_flow) {
- for (i = 0; i < entries; i++) {
- nextdescaddress = (u32)desc_dma +
- ((i + 1) % entries) *
- sizeof(*desc);
-
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)&desc[i],
- true,
- HW_DESC_TX_NEXTDESC_ADDR,
- (u8 *)&nextdescaddress);
- }
- }
- return 0;
-}
-
-static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int i;
-
- if (rtlpriv->use_new_trx_flow) {
- struct rtl_rx_buffer_desc *entry = NULL;
- /* alloc dma for this ring */
- rtlpci->rx_ring[rxring_idx].buffer_desc =
- pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
- rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
- if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
- (ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
- pr_err("Cannot allocate RX ring\n");
- return -ENOMEM;
- }
-
- /* init every desc in this ring */
- rtlpci->rx_ring[rxring_idx].idx = 0;
- for (i = 0; i < rtlpci->rxringcount; i++) {
- entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
- rxring_idx, i))
- return -ENOMEM;
- }
- } else {
- struct rtl_rx_desc *entry = NULL;
- u8 tmp_one = 1;
- /* alloc dma for this ring */
- rtlpci->rx_ring[rxring_idx].desc =
- pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
- rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
- if (!rtlpci->rx_ring[rxring_idx].desc ||
- (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
- pr_err("Cannot allocate RX ring\n");
- return -ENOMEM;
- }
-
- /* init every desc in this ring */
- rtlpci->rx_ring[rxring_idx].idx = 0;
-
- for (i = 0; i < rtlpci->rxringcount; i++) {
- entry = &rtlpci->rx_ring[rxring_idx].desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
- rxring_idx, i))
- return -ENOMEM;
- }
-
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
- HW_DESC_RXERO, &tmp_one);
- }
- return 0;
-}
-
-static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
- unsigned int prio)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
-
- /* free every desc in this ring */
- while (skb_queue_len(&ring->queue)) {
- u8 *entry;
- struct sk_buff *skb = __skb_dequeue(&ring->queue);
-
- if (rtlpriv->use_new_trx_flow)
- entry = (u8 *)(&ring->buffer_desc[ring->idx]);
- else
- entry = (u8 *)(&ring->desc[ring->idx]);
-
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
- true,
- HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
- kfree_skb(skb);
- ring->idx = (ring->idx + 1) % ring->entries;
- }
-
- /* free dma of this ring */
- pci_free_consistent(rtlpci->pdev,
- sizeof(*ring->desc) * ring->entries,
- ring->desc, ring->dma);
- ring->desc = NULL;
- if (rtlpriv->use_new_trx_flow) {
- pci_free_consistent(rtlpci->pdev,
- sizeof(*ring->buffer_desc) * ring->entries,
- ring->buffer_desc, ring->buffer_desc_dma);
- ring->buffer_desc = NULL;
- }
-}
-
-static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- int i;
-
- /* free every desc in this ring */
- for (i = 0; i < rtlpci->rxringcount; i++) {
- struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i];
-
- if (!skb)
- continue;
- pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
- rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
- kfree_skb(skb);
- }
-
- /* free dma of this ring */
- if (rtlpriv->use_new_trx_flow) {
- pci_free_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
- rtlpci->rxringcount,
- rtlpci->rx_ring[rxring_idx].buffer_desc,
- rtlpci->rx_ring[rxring_idx].dma);
- rtlpci->rx_ring[rxring_idx].buffer_desc = NULL;
- } else {
- pci_free_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
- rtlpci->rxringcount,
- rtlpci->rx_ring[rxring_idx].desc,
- rtlpci->rx_ring[rxring_idx].dma);
- rtlpci->rx_ring[rxring_idx].desc = NULL;
- }
-}
-
-static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- int ret;
- int i, rxring_idx;
-
- /* rxring_idx 0:RX_MPDU_QUEUE
- * rxring_idx 1:RX_CMD_QUEUE
- */
- for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
- ret = _rtl_pci_init_rx_ring(hw, rxring_idx);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
- ret = _rtl_pci_init_tx_ring(hw, i, rtlpci->txringcount[i]);
- if (ret)
- goto err_free_rings;
- }
-
- return 0;
-
-err_free_rings:
- for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
- _rtl_pci_free_rx_ring(hw, rxring_idx);
-
- for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
- if (rtlpci->tx_ring[i].desc ||
- rtlpci->tx_ring[i].buffer_desc)
- _rtl_pci_free_tx_ring(hw, i);
-
- return 1;
-}
-
-static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
-{
- u32 i, rxring_idx;
-
- /*free rx rings */
- for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
- _rtl_pci_free_rx_ring(hw, rxring_idx);
-
- /*free tx rings */
- for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
- _rtl_pci_free_tx_ring(hw, i);
-
- return 0;
-}
-
-int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- int i, rxring_idx;
- unsigned long flags;
- u8 tmp_one = 1;
- u32 bufferaddress;
- /* rxring_idx 0:RX_MPDU_QUEUE */
- /* rxring_idx 1:RX_CMD_QUEUE */
- for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
- /* force the rx_ring[RX_MPDU_QUEUE/
- * RX_CMD_QUEUE].idx to the first one
- * new trx flow, do nothing
- */
- if (!rtlpriv->use_new_trx_flow &&
- rtlpci->rx_ring[rxring_idx].desc) {
- struct rtl_rx_desc *entry = NULL;
-
- rtlpci->rx_ring[rxring_idx].idx = 0;
- for (i = 0; i < rtlpci->rxringcount; i++) {
- entry = &rtlpci->rx_ring[rxring_idx].desc[i];
- bufferaddress =
- rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
- false, HW_DESC_RXBUFF_ADDR);
- memset((u8 *)entry, 0,
- sizeof(*rtlpci->rx_ring
- [rxring_idx].desc));/*clear one entry*/
- if (rtlpriv->use_new_trx_flow) {
- /* This is deadcode */
- rtlpriv->cfg->ops->set_desc(hw,
- (u8 *)entry, false,
- HW_DESC_RX_PREPARE,
- (u8 *)&bufferaddress);
- } else {
- rtlpriv->cfg->ops->set_desc(hw,
- (u8 *)entry, false,
- HW_DESC_RXBUFF_ADDR,
- (u8 *)&bufferaddress);
- rtlpriv->cfg->ops->set_desc(hw,
- (u8 *)entry, false,
- HW_DESC_RXPKT_LEN,
- (u8 *)&rtlpci->rxbuffersize);
- rtlpriv->cfg->ops->set_desc(hw,
- (u8 *)entry, false,
- HW_DESC_RXOWN,
- (u8 *)&tmp_one);
- }
- }
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
- HW_DESC_RXERO, (u8 *)&tmp_one);
- }
- rtlpci->rx_ring[rxring_idx].idx = 0;
- }
-
- /*
- *after reset, release previous pending packet,
- *and force the tx idx to the first one
- */
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
- for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
- if (rtlpci->tx_ring[i].desc ||
- rtlpci->tx_ring[i].buffer_desc) {
- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
-
- while (skb_queue_len(&ring->queue)) {
- u8 *entry;
- struct sk_buff *skb =
- __skb_dequeue(&ring->queue);
- if (rtlpriv->use_new_trx_flow)
- entry = (u8 *)(&ring->buffer_desc
- [ring->idx]);
- else
- entry = (u8 *)(&ring->desc[ring->idx]);
-
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
- true, HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
- ring->idx = (ring->idx + 1) % ring->entries;
- }
-
- if (rtlpriv->use_new_trx_flow) {
- rtlpci->tx_ring[i].cur_tx_rp = 0;
- rtlpci->tx_ring[i].cur_tx_wp = 0;
- }
-
- ring->idx = 0;
- ring->entries = rtlpci->txringcount[i];
- }
- }
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
- return 0;
-}
-
-static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct sk_buff *skb)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_sta_info *sta_entry = NULL;
- u8 tid = rtl_get_tid(skb);
- __le16 fc = rtl_get_fc(skb);
-
- if (!sta)
- return false;
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
-
- if (!rtlpriv->rtlhal.earlymode_enable)
- return false;
- if (ieee80211_is_nullfunc(fc))
- return false;
- if (ieee80211_is_qos_nullfunc(fc))
- return false;
- if (ieee80211_is_pspoll(fc))
- return false;
- if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
- return false;
- if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
- return false;
- if (tid > 7)
- return false;
-
- /* maybe every tid should be checked */
- if (!rtlpriv->link_info.higher_busytxtraffic[tid])
- return false;
-
- spin_lock_bh(&rtlpriv->locks.waitq_lock);
- skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
- spin_unlock_bh(&rtlpriv->locks.waitq_lock);
-
- return true;
-}
-
-static int rtl_pci_tx(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct sk_buff *skb,
- struct rtl_tcb_desc *ptcb_desc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_sta_info *sta_entry = NULL;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct rtl8192_tx_ring *ring;
- struct rtl_tx_desc *pdesc;
- struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
- u16 idx;
- u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
- unsigned long flags;
- struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
- __le16 fc = rtl_get_fc(skb);
- u8 *pda_addr = hdr->addr1;
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*ssn */
- u8 tid = 0;
- u16 seq_number = 0;
- u8 own;
- u8 temp_one = 1;
-
- if (ieee80211_is_mgmt(fc))
- rtl_tx_mgmt_proc(hw, skb);
-
- if (rtlpriv->psc.sw_ps_enabled) {
- if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
- !ieee80211_has_pm(fc))
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
- }
-
- rtl_action_proc(hw, skb, true);
-
- if (is_multicast_ether_addr(pda_addr))
- rtlpriv->stats.txbytesmulticast += skb->len;
- else if (is_broadcast_ether_addr(pda_addr))
- rtlpriv->stats.txbytesbroadcast += skb->len;
- else
- rtlpriv->stats.txbytesunicast += skb->len;
-
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
- ring = &rtlpci->tx_ring[hw_queue];
- if (hw_queue != BEACON_QUEUE) {
- if (rtlpriv->use_new_trx_flow)
- idx = ring->cur_tx_wp;
- else
- idx = (ring->idx + skb_queue_len(&ring->queue)) %
- ring->entries;
- } else {
- idx = 0;
- }
-
- pdesc = &ring->desc[idx];
- if (rtlpriv->use_new_trx_flow) {
- ptx_bd_desc = &ring->buffer_desc[idx];
- } else {
- own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc,
- true, HW_DESC_OWN);
-
- if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue));
-
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
- flags);
- return skb->len;
- }
- }
-
- if (rtlpriv->cfg->ops->get_available_desc &&
- rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "get_available_desc fail\n");
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return skb->len;
- }
-
- if (ieee80211_is_data_qos(fc)) {
- tid = rtl_get_tid(skb);
- if (sta) {
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- seq_number = (le16_to_cpu(hdr->seq_ctrl) &
- IEEE80211_SCTL_SEQ) >> 4;
- seq_number += 1;
-
- if (!ieee80211_has_morefrags(hdr->frame_control))
- sta_entry->tids[tid].seq_number = seq_number;
- }
- }
-
- if (ieee80211_is_data(fc))
- rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
-
- rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
- (u8 *)ptx_bd_desc, info, sta, skb, hw_queue, ptcb_desc);
-
- __skb_queue_tail(&ring->queue, skb);
-
- if (rtlpriv->use_new_trx_flow) {
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
- HW_DESC_OWN, &hw_queue);
- } else {
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
- HW_DESC_OWN, &temp_one);
- }
-
- if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
- hw_queue != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue));
-
- ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
- }
-
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
- rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
-
- return 0;
-}
-
-static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u16 i = 0;
- int queue_id;
- struct rtl8192_tx_ring *ring;
-
- if (mac->skip_scan)
- return;
-
- for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
- u32 queue_len;
-
- if (((queues >> queue_id) & 0x1) == 0) {
- queue_id--;
- continue;
- }
- ring = &pcipriv->dev.tx_ring[queue_id];
- queue_len = skb_queue_len(&ring->queue);
- if (queue_len == 0 || queue_id == BEACON_QUEUE ||
- queue_id == TXCMD_QUEUE) {
- queue_id--;
- continue;
- } else {
- msleep(20);
- i++;
- }
-
- /* we just wait 1s for all queues */
- if (rtlpriv->psc.rfpwr_state == ERFOFF ||
- is_hal_stop(rtlhal) || i >= 200)
- return;
- }
-}
-
-static void rtl_pci_deinit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- _rtl_pci_deinit_trx_ring(hw);
-
- synchronize_irq(rtlpci->pdev->irq);
- tasklet_kill(&rtlpriv->works.irq_tasklet);
- cancel_work_sync(&rtlpriv->works.lps_change_work);
-
- flush_workqueue(rtlpriv->works.rtl_wq);
- destroy_workqueue(rtlpriv->works.rtl_wq);
-}
-
-static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
-{
- int err;
-
- _rtl_pci_init_struct(hw, pdev);
-
- err = _rtl_pci_init_trx_ring(hw);
- if (err) {
- pr_err("tx ring initialization failed\n");
- return err;
- }
-
- return 0;
-}
-
-static int rtl_pci_start(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
-
- int err;
-
- rtl_pci_reset_trx_ring(hw);
-
- rtlpci->driver_is_goingto_unload = false;
- if (rtlpriv->cfg->ops->get_btc_status &&
- rtlpriv->cfg->ops->get_btc_status()) {
- rtlpriv->btcoexist.btc_info.ap_num = 36;
- rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
- rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
- } else if (rtlpriv->btcoexist.btc_ops) {
- rtlpriv->btcoexist.btc_ops->btc_init_variables_wifi_only(
- rtlpriv);
- }
-
- err = rtlpriv->cfg->ops->hw_init(hw);
- if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Failed to config hardware!\n");
- return err;
- }
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
- &rtlmac->retry_long);
-
- rtlpriv->cfg->ops->enable_interrupt(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
-
- rtl_init_rx_config(hw);
-
- /*should be after adapter start and interrupt enable. */
- set_hal_start(rtlhal);
-
- RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
-
- rtlpci->up_first_time = false;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
- return 0;
-}
-
-static void rtl_pci_stop(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- unsigned long flags;
- u8 rf_timeout = 0;
-
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_halt_notify(rtlpriv);
-
- if (rtlpriv->btcoexist.btc_ops)
- rtlpriv->btcoexist.btc_ops->btc_deinit_variables(rtlpriv);
-
- /*
- *should be before disable interrupt&adapter
- *and will do it immediately.
- */
- set_hal_stop(rtlhal);
-
- rtlpci->driver_is_goingto_unload = true;
- rtlpriv->cfg->ops->disable_interrupt(hw);
- cancel_work_sync(&rtlpriv->works.lps_change_work);
-
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
- while (ppsc->rfchange_inprogress) {
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
- if (rf_timeout > 100) {
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
- break;
- }
- mdelay(1);
- rf_timeout++;
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
- }
- ppsc->rfchange_inprogress = true;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
-
- rtlpriv->cfg->ops->hw_disable(hw);
- /* some things are not needed if firmware not available */
- if (!rtlpriv->max_fw_size)
- return;
- rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
-
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
- ppsc->rfchange_inprogress = false;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
-
- rtl_pci_enable_aspm(hw);
-}
-
-static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
- struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct pci_dev *bridge_pdev = pdev->bus->self;
- u16 venderid;
- u16 deviceid;
- u8 revisionid;
- u16 irqline;
- u8 tmp;
-
- pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
- venderid = pdev->vendor;
- deviceid = pdev->device;
- pci_read_config_byte(pdev, 0x8, &revisionid);
- pci_read_config_word(pdev, 0x3C, &irqline);
-
- /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
- * r8192e_pci, and RTL8192SE, which uses this driver. If the
- * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
- * the correct driver is r8192e_pci, thus this routine should
- * return false.
- */
- if (deviceid == RTL_PCI_8192SE_DID &&
- revisionid == RTL_PCI_REVISION_ID_8192PCIE)
- return false;
-
- if (deviceid == RTL_PCI_8192_DID ||
- deviceid == RTL_PCI_0044_DID ||
- deviceid == RTL_PCI_0047_DID ||
- deviceid == RTL_PCI_8192SE_DID ||
- deviceid == RTL_PCI_8174_DID ||
- deviceid == RTL_PCI_8173_DID ||
- deviceid == RTL_PCI_8172_DID ||
- deviceid == RTL_PCI_8171_DID) {
- switch (revisionid) {
- case RTL_PCI_REVISION_ID_8192PCIE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192 PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
- rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
- return false;
- case RTL_PCI_REVISION_ID_8192SE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192SE is found - vid/did=%x/%x\n",
- venderid, deviceid);
- rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
- venderid, deviceid);
- rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
- break;
- }
- } else if (deviceid == RTL_PCI_8723AE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8723AE PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
- } else if (deviceid == RTL_PCI_8192CET_DID ||
- deviceid == RTL_PCI_8192CE_DID ||
- deviceid == RTL_PCI_8191CE_DID ||
- deviceid == RTL_PCI_8188CE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192C PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
- } else if (deviceid == RTL_PCI_8192DE_DID ||
- deviceid == RTL_PCI_8192DE_DID2) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192D PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
- } else if (deviceid == RTL_PCI_8188EE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8188EE\n");
- } else if (deviceid == RTL_PCI_8723BE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8723BE\n");
- } else if (deviceid == RTL_PCI_8192EE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8192EE\n");
- } else if (deviceid == RTL_PCI_8821AE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8821AE\n");
- } else if (deviceid == RTL_PCI_8812AE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8812AE\n");
- } else if (deviceid == RTL_PCI_8822BE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8822BE;
- rtlhal->bandset = BAND_ON_BOTH;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8822BE\n");
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
- venderid, deviceid);
-
- rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
- }
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
- if (revisionid == 0 || revisionid == 1) {
- if (revisionid == 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC0\n");
- rtlhal->interfaceindex = 0;
- } else if (revisionid == 1) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC1\n");
- rtlhal->interfaceindex = 1;
- }
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
- venderid, deviceid, revisionid);
- rtlhal->interfaceindex = 0;
- }
- }
-
- switch (rtlhal->hw_type) {
- case HARDWARE_TYPE_RTL8192EE:
- case HARDWARE_TYPE_RTL8822BE:
- /* use new trx flow */
- rtlpriv->use_new_trx_flow = true;
- break;
-
- default:
- rtlpriv->use_new_trx_flow = false;
- break;
- }
-
- /*find bus info */
- pcipriv->ndis_adapter.busnumber = pdev->bus->number;
- pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
- pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
-
- /*find bridge info */
- pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
- /* some ARM have no bridge_pdev and will crash here
- * so we should check if bridge_pdev is NULL
- */
- if (bridge_pdev) {
- /*find bridge info if available */
- pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
- for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
- if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
- pcipriv->ndis_adapter.pcibridge_vendor = tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Pci Bridge Vendor is found index: %d\n",
- tmp);
- break;
- }
- }
- }
-
- if (pcipriv->ndis_adapter.pcibridge_vendor !=
- PCI_BRIDGE_VENDOR_UNKNOWN) {
- pcipriv->ndis_adapter.pcibridge_busnum =
- bridge_pdev->bus->number;
- pcipriv->ndis_adapter.pcibridge_devnum =
- PCI_SLOT(bridge_pdev->devfn);
- pcipriv->ndis_adapter.pcibridge_funcnum =
- PCI_FUNC(bridge_pdev->devfn);
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
- pci_pcie_cap(bridge_pdev);
- pcipriv->ndis_adapter.num4bytes =
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
-
- rtl_pci_get_linkcontrol_field(hw);
-
- if (pcipriv->ndis_adapter.pcibridge_vendor ==
- PCI_BRIDGE_VENDOR_AMD) {
- pcipriv->ndis_adapter.amd_l1_patch =
- rtl_pci_get_amd_l1_patch(hw);
- }
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
- pcipriv->ndis_adapter.busnumber,
- pcipriv->ndis_adapter.devnumber,
- pcipriv->ndis_adapter.funcnumber,
- pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
- pcipriv->ndis_adapter.pcibridge_busnum,
- pcipriv->ndis_adapter.pcibridge_devnum,
- pcipriv->ndis_adapter.pcibridge_funcnum,
- pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
- pcipriv->ndis_adapter.pcibridge_linkctrlreg,
- pcipriv->ndis_adapter.amd_l1_patch);
-
- rtl_pci_parse_configuration(pdev, hw);
- list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
-
- return true;
-}
-
-static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
- int ret;
-
- ret = pci_enable_msi(rtlpci->pdev);
- if (ret < 0)
- return ret;
-
- ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, hw);
- if (ret < 0) {
- pci_disable_msi(rtlpci->pdev);
- return ret;
- }
-
- rtlpci->using_msi = true;
-
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "MSI Interrupt Mode!\n");
- return 0;
-}
-
-static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
- int ret;
-
- ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, hw);
- if (ret < 0)
- return ret;
-
- rtlpci->using_msi = false;
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "Pin-based Interrupt Mode!\n");
- return 0;
-}
-
-static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
-{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
- int ret;
-
- if (rtlpci->msi_support) {
- ret = rtl_pci_intr_mode_msi(hw);
- if (ret < 0)
- ret = rtl_pci_intr_mode_legacy(hw);
- } else {
- ret = rtl_pci_intr_mode_legacy(hw);
- }
- return ret;
-}
-
-static void platform_enable_dma64(struct pci_dev *pdev, bool dma64)
-{
- u8 value;
-
- pci_read_config_byte(pdev, 0x719, &value);
-
- /* 0x719 Bit5 is DMA64 bit fetch. */
- if (dma64)
- value |= BIT(5);
- else
- value &= ~BIT(5);
-
- pci_write_config_byte(pdev, 0x719, value);
-}
-
-int rtl_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct ieee80211_hw *hw = NULL;
-
- struct rtl_priv *rtlpriv = NULL;
- struct rtl_pci_priv *pcipriv = NULL;
- struct rtl_pci *rtlpci;
- unsigned long pmem_start, pmem_len, pmem_flags;
- int err;
-
- err = rtl_core_module_init();
- if (err)
- return err;
- err = pci_enable_device(pdev);
- if (err) {
- WARN_ONCE(true, "%s : Cannot enable new PCI device\n",
- pci_name(pdev));
- return err;
- }
-
- if (((struct rtl_hal_cfg *)(id->driver_data))->mod_params->dma64 &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- WARN_ONCE(true,
- "Unable to obtain 64bit DMA for consistent allocations\n");
- err = -ENOMEM;
- goto fail1;
- }
-
- platform_enable_dma64(pdev, true);
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- WARN_ONCE(true,
- "rtlwifi: Unable to obtain 32bit DMA for consistent allocations\n");
- err = -ENOMEM;
- goto fail1;
- }
-
- platform_enable_dma64(pdev, false);
- }
-
- pci_set_master(pdev);
-
- hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
- sizeof(struct rtl_priv), &rtl_ops);
- if (!hw) {
- WARN_ONCE(true,
- "%s : ieee80211 alloc failed\n", pci_name(pdev));
- err = -ENOMEM;
- goto fail1;
- }
-
- SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
-
- rtlpriv = hw->priv;
- rtlpriv->hw = hw;
- pcipriv = (void *)rtlpriv->priv;
- pcipriv->dev.pdev = pdev;
- init_completion(&rtlpriv->firmware_loading_complete);
- /*proximity init here*/
- rtlpriv->proximity.proxim_on = false;
-
- pcipriv = (void *)rtlpriv->priv;
- pcipriv->dev.pdev = pdev;
-
- /* init cfg & intf_ops */
- rtlpriv->rtlhal.interface = INTF_PCI;
- rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
- rtlpriv->intf_ops = &rtl_pci_ops;
- rtlpriv->glb_var = &rtl_global_var;
-
- /* MEM map */
- err = pci_request_regions(pdev, KBUILD_MODNAME);
- if (err) {
- WARN_ONCE(true, "rtlwifi: Can't obtain PCI resources\n");
- goto fail1;
- }
-
- pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
- pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
- pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
-
- /*shared mem start */
- rtlpriv->io.pci_mem_start =
- (unsigned long)pci_iomap(pdev,
- rtlpriv->cfg->bar_id, pmem_len);
- if (rtlpriv->io.pci_mem_start == 0) {
- WARN_ONCE(true, "rtlwifi: Can't map PCI mem\n");
- err = -ENOMEM;
- goto fail2;
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
- pmem_start, pmem_len, pmem_flags,
- rtlpriv->io.pci_mem_start);
-
- /* Disable Clk Request */
- pci_write_config_byte(pdev, 0x81, 0);
- /* leave D3 mode */
- pci_write_config_byte(pdev, 0x44, 0);
- pci_write_config_byte(pdev, 0x04, 0x06);
- pci_write_config_byte(pdev, 0x04, 0x07);
-
- /* find adapter */
- if (!_rtl_pci_find_adapter(pdev, hw)) {
- err = -ENODEV;
- goto fail2;
- }
-
- /* Init IO handler */
- _rtl_pci_io_handler_init(&pdev->dev, hw);
-
- /*like read eeprom and so on */
- rtlpriv->cfg->ops->read_eeprom_info(hw);
-
- if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
- pr_err("Can't init_sw_vars\n");
- err = -ENODEV;
- goto fail3;
- }
- rtlpriv->cfg->ops->init_sw_leds(hw);
-
- /*aspm */
- rtl_pci_init_aspm(hw);
-
- /* Init mac80211 sw */
- err = rtl_init_core(hw);
- if (err) {
- pr_err("Can't allocate sw for mac80211\n");
- goto fail3;
- }
-
- /* Init PCI sw */
- err = rtl_pci_init(hw, pdev);
- if (err) {
- pr_err("Failed to init PCI\n");
- goto fail3;
- }
-
- err = ieee80211_register_hw(hw);
- if (err) {
- pr_err("Can't register mac80211 hw.\n");
- err = -ENODEV;
- goto fail3;
- }
- rtlpriv->mac80211.mac80211_registered = 1;
-
- /* add for debug */
- rtl_debug_add_one(hw);
-
- /*init rfkill */
- rtl_init_rfkill(hw); /* Init PCI sw */
-
- rtlpci = rtl_pcidev(pcipriv);
- err = rtl_pci_intr_mode_decide(hw);
- if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "%s: failed to register IRQ handler\n",
- wiphy_name(hw->wiphy));
- goto fail3;
- }
- rtlpci->irq_alloc = 1;
-
- set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
- return 0;
-
-fail3:
- pci_set_drvdata(pdev, NULL);
- rtl_deinit_core(hw);
-
-fail2:
- if (rtlpriv->io.pci_mem_start != 0)
- pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
-
- pci_release_regions(pdev);
- complete(&rtlpriv->firmware_loading_complete);
-
-fail1:
- if (hw)
- ieee80211_free_hw(hw);
- pci_disable_device(pdev);
-
- return err;
-}
-
-void rtl_pci_disconnect(struct pci_dev *pdev)
-{
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
- struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
-
- /* just in case driver is removed before firmware callback */
- wait_for_completion(&rtlpriv->firmware_loading_complete);
- clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
-
- /* remove form debug */
- rtl_debug_remove_one(hw);
-
- /*ieee80211_unregister_hw will call ops_stop */
- if (rtlmac->mac80211_registered == 1) {
- ieee80211_unregister_hw(hw);
- rtlmac->mac80211_registered = 0;
- } else {
- rtl_deinit_deferred_work(hw);
- rtlpriv->intf_ops->adapter_stop(hw);
- }
- rtlpriv->cfg->ops->disable_interrupt(hw);
-
- /*deinit rfkill */
- rtl_deinit_rfkill(hw);
-
- rtl_pci_deinit(hw);
- rtl_deinit_core(hw);
- rtlpriv->cfg->ops->deinit_sw_vars(hw);
-
- if (rtlpci->irq_alloc) {
- free_irq(rtlpci->pdev->irq, hw);
- rtlpci->irq_alloc = 0;
- }
-
- if (rtlpci->using_msi)
- pci_disable_msi(rtlpci->pdev);
-
- list_del(&rtlpriv->list);
- if (rtlpriv->io.pci_mem_start != 0) {
- pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
- pci_release_regions(pdev);
- }
-
- pci_disable_device(pdev);
-
- rtl_pci_disable_aspm(hw);
-
- pci_set_drvdata(pdev, NULL);
-
- ieee80211_free_hw(hw);
- rtl_core_module_exit();
-}
-
-#ifdef CONFIG_PM_SLEEP
-/***************************************
- * kernel pci power state define:
- * PCI_D0 ((pci_power_t __force) 0)
- * PCI_D1 ((pci_power_t __force) 1)
- * PCI_D2 ((pci_power_t __force) 2)
- * PCI_D3hot ((pci_power_t __force) 3)
- * PCI_D3cold ((pci_power_t __force) 4)
- * PCI_UNKNOWN ((pci_power_t __force) 5)
-
- * This function is called when system
- * goes into suspend state mac80211 will
- * call rtl_mac_stop() from the mac80211
- * suspend function first, So there is
- * no need to call hw_disable here.
- ****************************************/
-int rtl_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->cfg->ops->hw_suspend(hw);
- rtl_deinit_rfkill(hw);
-
- return 0;
-}
-
-int rtl_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->cfg->ops->hw_resume(hw);
- rtl_init_rfkill(hw);
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-const struct rtl_intf_ops rtl_pci_ops = {
- .read_efuse_byte = read_efuse_byte,
- .adapter_start = rtl_pci_start,
- .adapter_stop = rtl_pci_stop,
- .check_buddy_priv = rtl_pci_check_buddy_priv,
- .adapter_tx = rtl_pci_tx,
- .flush = rtl_pci_flush,
- .reset_trx_ring = rtl_pci_reset_trx_ring,
- .waitq_insert = rtl_pci_tx_chk_waitq_insert,
-
- .disable_aspm = rtl_pci_disable_aspm,
- .enable_aspm = rtl_pci_enable_aspm,
-};
diff --git a/drivers/staging/rtlwifi/pci.h b/drivers/staging/rtlwifi/pci.h
deleted file mode 100644
index 0e55baec95a8..000000000000
--- a/drivers/staging/rtlwifi/pci.h
+++ /dev/null
@@ -1,319 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_PCI_H__
-#define __RTL_PCI_H__
-
-#include <linux/pci.h>
-/* 1: MSDU packet queue,
- * 2: Rx Command Queue
- */
-#define RTL_PCI_RX_MPDU_QUEUE 0
-#define RTL_PCI_RX_CMD_QUEUE 1
-#define RTL_PCI_MAX_RX_QUEUE 2
-
-#define RTL_PCI_MAX_RX_COUNT 512/*64*/
-#define RTL_PCI_MAX_TX_QUEUE_COUNT 9
-
-#define RT_TXDESC_NUM 128
-#define TX_DESC_NUM_92E 512
-#define TX_DESC_NUM_8822B 512
-#define RT_TXDESC_NUM_BE_QUEUE 256
-
-#define BK_QUEUE 0
-#define BE_QUEUE 1
-#define VI_QUEUE 2
-#define VO_QUEUE 3
-#define BEACON_QUEUE 4
-#define TXCMD_QUEUE 5
-#define MGNT_QUEUE 6
-#define HIGH_QUEUE 7
-#define HCCA_QUEUE 8
-#define H2C_QUEUE TXCMD_QUEUE /* In 8822B */
-
-#define RTL_PCI_DEVICE(vend, dev, cfg) \
- .vendor = (vend), \
- .device = (dev), \
- .subvendor = PCI_ANY_ID, \
- .subdevice = PCI_ANY_ID,\
- .driver_data = (kernel_ulong_t)&(cfg)
-
-#define INTEL_VENDOR_ID 0x8086
-#define SIS_VENDOR_ID 0x1039
-#define ATI_VENDOR_ID 0x1002
-#define ATI_DEVICE_ID 0x7914
-#define AMD_VENDOR_ID 0x1022
-
-#define PCI_MAX_BRIDGE_NUMBER 255
-#define PCI_MAX_DEVICES 32
-#define PCI_MAX_FUNCTION 8
-
-#define PCI_CONF_ADDRESS 0x0CF8 /*PCI Configuration Space Address */
-#define PCI_CONF_DATA 0x0CFC /*PCI Configuration Space Data */
-
-#define PCI_CLASS_BRIDGE_DEV 0x06
-#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
-#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
-#define PCI_CAP_ID_EXP 0x10
-
-#define U1DONTCARE 0xFF
-#define U2DONTCARE 0xFFFF
-#define U4DONTCARE 0xFFFFFFFF
-
-#define RTL_PCI_8192_DID 0x8192 /*8192 PCI-E */
-#define RTL_PCI_8192SE_DID 0x8192 /*8192 SE */
-#define RTL_PCI_8174_DID 0x8174 /*8192 SE */
-#define RTL_PCI_8173_DID 0x8173 /*8191 SE Crab */
-#define RTL_PCI_8172_DID 0x8172 /*8191 SE RE */
-#define RTL_PCI_8171_DID 0x8171 /*8191 SE Unicron */
-#define RTL_PCI_8723AE_DID 0x8723 /*8723AE */
-#define RTL_PCI_0045_DID 0x0045 /*8190 PCI for Ceraga */
-#define RTL_PCI_0046_DID 0x0046 /*8190 Cardbus for Ceraga */
-#define RTL_PCI_0044_DID 0x0044 /*8192e PCIE for Ceraga */
-#define RTL_PCI_0047_DID 0x0047 /*8192e Express Card for Ceraga */
-#define RTL_PCI_700F_DID 0x700F
-#define RTL_PCI_701F_DID 0x701F
-#define RTL_PCI_DLINK_DID 0x3304
-#define RTL_PCI_8723AE_DID 0x8723 /*8723e */
-#define RTL_PCI_8192CET_DID 0x8191 /*8192ce */
-#define RTL_PCI_8192CE_DID 0x8178 /*8192ce */
-#define RTL_PCI_8191CE_DID 0x8177 /*8192ce */
-#define RTL_PCI_8188CE_DID 0x8176 /*8192ce */
-#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */
-#define RTL_PCI_8192DE_DID 0x8193 /*8192de */
-#define RTL_PCI_8192DE_DID2 0x002B /*92DE*/
-#define RTL_PCI_8188EE_DID 0x8179 /*8188ee*/
-#define RTL_PCI_8723BE_DID 0xB723 /*8723be*/
-#define RTL_PCI_8192EE_DID 0x818B /*8192ee*/
-#define RTL_PCI_8821AE_DID 0x8821 /*8821ae*/
-#define RTL_PCI_8812AE_DID 0x8812 /*8812ae*/
-#define RTL_PCI_8822BE_DID 0xB822 /*8822be*/
-
-/*8192 support 16 pages of IO registers*/
-#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000
-#define RTL_MEM_MAPPED_IO_RANGE_8192PCIE 0x4000
-#define RTL_MEM_MAPPED_IO_RANGE_8192SE 0x4000
-#define RTL_MEM_MAPPED_IO_RANGE_8192CE 0x4000
-#define RTL_MEM_MAPPED_IO_RANGE_8192DE 0x4000
-
-#define RTL_PCI_REVISION_ID_8190PCI 0x00
-#define RTL_PCI_REVISION_ID_8192PCIE 0x01
-#define RTL_PCI_REVISION_ID_8192SE 0x10
-#define RTL_PCI_REVISION_ID_8192CE 0x1
-#define RTL_PCI_REVISION_ID_8192DE 0x0
-
-#define RTL_DEFAULT_HARDWARE_TYPE HARDWARE_TYPE_RTL8192CE
-
-enum pci_bridge_vendor {
- PCI_BRIDGE_VENDOR_INTEL = 0x0, /*0b'0000,0001 */
- PCI_BRIDGE_VENDOR_ATI, /*0b'0000,0010*/
- PCI_BRIDGE_VENDOR_AMD, /*0b'0000,0100*/
- PCI_BRIDGE_VENDOR_SIS, /*0b'0000,1000*/
- PCI_BRIDGE_VENDOR_UNKNOWN, /*0b'0100,0000*/
- PCI_BRIDGE_VENDOR_MAX,
-};
-
-struct rtl_pci_capabilities_header {
- u8 capability_id;
- u8 next;
-};
-
-/* In new TRX flow, Buffer_desc is new concept
- * But TX wifi info == TX descriptor in old flow
- * RX wifi info == RX descriptor in old flow
- */
-struct rtl_tx_buffer_desc {
- u32 dword[4 * (1 << (BUFDESC_SEG_NUM + 1))];
-} __packed;
-
-struct rtl_tx_desc {
- u32 dword[16];
-} __packed;
-
-struct rtl_rx_buffer_desc { /*rx buffer desc*/
- u32 dword[4];
-} __packed;
-
-struct rtl_rx_desc { /*old: rx desc new: rx wifi info*/
- u32 dword[8];
-} __packed;
-
-struct rtl_tx_cmd_desc {
- u32 dword[16];
-} __packed;
-
-struct rtl8192_tx_ring {
- struct rtl_tx_desc *desc;
- dma_addr_t dma;
- unsigned int idx;
- unsigned int entries;
- struct sk_buff_head queue;
- /*add for new trx flow*/
- struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
- dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/
- u16 cur_tx_wp; /* current_tx_write_point */
- u16 cur_tx_rp; /* current_tx_read_point */
-};
-
-struct rtl8192_rx_ring {
- struct rtl_rx_desc *desc;
- dma_addr_t dma;
- unsigned int idx;
- struct sk_buff *rx_buf[RTL_PCI_MAX_RX_COUNT];
- /*add for new trx flow*/
- struct rtl_rx_buffer_desc *buffer_desc; /*rx buffer descriptor*/
- u16 next_rx_rp; /* next_rx_read_point */
-};
-
-struct rtl_pci {
- struct pci_dev *pdev;
- bool irq_enabled;
-
- bool driver_is_goingto_unload;
- bool up_first_time;
- bool first_init;
- bool being_init_adapter;
- bool init_ready;
-
- /*Tx */
- struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT];
- int txringcount[RTL_PCI_MAX_TX_QUEUE_COUNT];
- u32 transmit_config;
-
- /*Rx */
- struct rtl8192_rx_ring rx_ring[RTL_PCI_MAX_RX_QUEUE];
- int rxringcount;
- u16 rxbuffersize;
- u32 receive_config;
-
- /*irq */
- u8 irq_alloc;
- u32 irq_mask[4]; /* 0-1: normal, 2: unused, 3: h2c */
- u32 sys_irq_mask;
-
- /*Bcn control register setting */
- u32 reg_bcn_ctrl_val;
-
- /*ASPM*/
- u8 const_pci_aspm;
- u8 const_amdpci_aspm;
- u8 const_hwsw_rfoff_d3;
- u8 const_support_pciaspm;
- /*pci-e bridge */
- u8 const_hostpci_aspm_setting;
- /*pci-e device */
- u8 const_devicepci_aspm_setting;
- /* If it supports ASPM, Offset[560h] = 0x40,
- * otherwise Offset[560h] = 0x00.
- */
- bool support_aspm;
- bool support_backdoor;
-
- /*QOS & EDCA */
- enum acm_method acm_method;
-
- u16 shortretry_limit;
- u16 longretry_limit;
-
- /* MSI support */
- bool msi_support;
- bool using_msi;
- /* interrupt clear before set */
- bool int_clear;
-};
-
-struct mp_adapter {
- u8 linkctrl_reg;
-
- u8 busnumber;
- u8 devnumber;
- u8 funcnumber;
-
- u8 pcibridge_busnum;
- u8 pcibridge_devnum;
- u8 pcibridge_funcnum;
-
- u8 pcibridge_vendor;
- u16 pcibridge_vendorid;
- u16 pcibridge_deviceid;
-
- u8 num4bytes;
-
- u8 pcibridge_pciehdr_offset;
- u8 pcibridge_linkctrlreg;
-
- bool amd_l1_patch;
-};
-
-struct rtl_pci_priv {
- struct bt_coexist_info bt_coexist;
- struct rtl_led_ctl ledctl;
- struct rtl_pci dev;
- struct mp_adapter ndis_adapter;
-};
-
-#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
-#define rtl_pcidev(pcipriv) (&((pcipriv)->dev))
-
-int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
-
-extern const struct rtl_intf_ops rtl_pci_ops;
-
-int rtl_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
-void rtl_pci_disconnect(struct pci_dev *pdev);
-#ifdef CONFIG_PM_SLEEP
-int rtl_pci_suspend(struct device *dev);
-int rtl_pci_resume(struct device *dev);
-#endif /* CONFIG_PM_SLEEP */
-static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
-{
- return readb((u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
-}
-
-static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
-{
- return readw((u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
-}
-
-static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
-{
- return readl((u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
-}
-
-static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
-{
- writeb(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
-}
-
-static inline void pci_write16_async(struct rtl_priv *rtlpriv,
- u32 addr, u16 val)
-{
- writew(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
-}
-
-static inline void pci_write32_async(struct rtl_priv *rtlpriv,
- u32 addr, u32 val)
-{
- writel(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
-}
-
-static inline u16 calc_fifo_space(u16 rp, u16 wp, u16 size)
-{
- if (rp <= wp)
- return size - 1 + rp - wp;
- return rp - wp - 1;
-}
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/halphyrf_ce.c b/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
deleted file mode 100644
index f77847c4206a..000000000000
--- a/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
+++ /dev/null
@@ -1,954 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-#define CALCULATE_SWINGTALBE_OFFSET(_offset, _direction, _size, \
- _delta_thermal) \
- do { \
- for (_offset = 0; _offset < _size; _offset++) { \
- if (_delta_thermal < \
- thermal_threshold[_direction][_offset]) { \
- if (_offset != 0) \
- _offset--; \
- break; \
- } \
- } \
- if (_offset >= _size) \
- _offset = _size - 1; \
- } while (0)
-
-static inline void phydm_set_calibrate_info_up(
- struct phy_dm_struct *dm, struct txpwrtrack_cfg *c, u8 delta,
- struct dm_rf_calibration_struct *cali_info,
- u8 *delta_swing_table_idx_tup_a, u8 *delta_swing_table_idx_tup_b,
- u8 *delta_swing_table_idx_tup_c, u8 *delta_swing_table_idx_tup_d)
-{
- u8 p = 0;
-
- for (p = ODM_RF_PATH_A; p < c->rf_path_count; p++) {
- cali_info->delta_power_index_last[p] =
- cali_info->delta_power_index
- [p]; /*recording poer index offset*/
- switch (p) {
- case ODM_RF_PATH_B:
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_swing_table_idx_tup_b[%d] = %d\n",
- delta, delta_swing_table_idx_tup_b[delta]);
-
- cali_info->delta_power_index[p] =
- delta_swing_table_idx_tup_b[delta];
- /*Record delta swing for mix mode pwr tracking*/
- cali_info->absolute_ofdm_swing_idx[p] =
- delta_swing_table_idx_tup_b[delta];
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "******Temp is higher and cali_info->absolute_ofdm_swing_idx[ODM_RF_PATH_B] = %d\n",
- cali_info->absolute_ofdm_swing_idx[p]);
- break;
-
- case ODM_RF_PATH_C:
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_swing_table_idx_tup_c[%d] = %d\n",
- delta, delta_swing_table_idx_tup_c[delta]);
-
- cali_info->delta_power_index[p] =
- delta_swing_table_idx_tup_c[delta];
- /*Record delta swing for mix mode pwr tracking*/
- cali_info->absolute_ofdm_swing_idx[p] =
- delta_swing_table_idx_tup_c[delta];
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "******Temp is higher and cali_info->absolute_ofdm_swing_idx[ODM_RF_PATH_C] = %d\n",
- cali_info->absolute_ofdm_swing_idx[p]);
- break;
-
- case ODM_RF_PATH_D:
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_swing_table_idx_tup_d[%d] = %d\n",
- delta, delta_swing_table_idx_tup_d[delta]);
-
- cali_info->delta_power_index[p] =
- delta_swing_table_idx_tup_d[delta];
- /*Record delta swing for mix mode pwr tracking*/
- cali_info->absolute_ofdm_swing_idx[p] =
- delta_swing_table_idx_tup_d[delta];
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "******Temp is higher and cali_info->absolute_ofdm_swing_idx[ODM_RF_PATH_D] = %d\n",
- cali_info->absolute_ofdm_swing_idx[p]);
- break;
-
- default:
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_swing_table_idx_tup_a[%d] = %d\n",
- delta, delta_swing_table_idx_tup_a[delta]);
-
- cali_info->delta_power_index[p] =
- delta_swing_table_idx_tup_a[delta];
- /*Record delta swing for mix mode pwr tracking*/
- cali_info->absolute_ofdm_swing_idx[p] =
- delta_swing_table_idx_tup_a[delta];
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "******Temp is higher and cali_info->absolute_ofdm_swing_idx[ODM_RF_PATH_A] = %d\n",
- cali_info->absolute_ofdm_swing_idx[p]);
- break;
- }
- }
-}
-
-static inline void phydm_set_calibrate_info_down(
- struct phy_dm_struct *dm, struct txpwrtrack_cfg *c, u8 delta,
- struct dm_rf_calibration_struct *cali_info,
- u8 *delta_swing_table_idx_tdown_a, u8 *delta_swing_table_idx_tdown_b,
- u8 *delta_swing_table_idx_tdown_c, u8 *delta_swing_table_idx_tdown_d)
-{
- u8 p = 0;
-
- for (p = ODM_RF_PATH_A; p < c->rf_path_count; p++) {
- cali_info->delta_power_index_last[p] =
- cali_info->delta_power_index
- [p]; /*recording poer index offset*/
-
- switch (p) {
- case ODM_RF_PATH_B:
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_swing_table_idx_tdown_b[%d] = %d\n",
- delta,
- delta_swing_table_idx_tdown_b[delta]);
- cali_info->delta_power_index[p] =
- -1 * delta_swing_table_idx_tdown_b[delta];
- /*Record delta swing for mix mode pwr tracking*/
- cali_info->absolute_ofdm_swing_idx[p] =
- -1 * delta_swing_table_idx_tdown_b[delta];
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "******Temp is lower and cali_info->absolute_ofdm_swing_idx[ODM_RF_PATH_B] = %d\n",
- cali_info->absolute_ofdm_swing_idx[p]);
- break;
-
- case ODM_RF_PATH_C:
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_swing_table_idx_tdown_c[%d] = %d\n",
- delta,
- delta_swing_table_idx_tdown_c[delta]);
- cali_info->delta_power_index[p] =
- -1 * delta_swing_table_idx_tdown_c[delta];
- /*Record delta swing for mix mode pwr tracking*/
- cali_info->absolute_ofdm_swing_idx[p] =
- -1 * delta_swing_table_idx_tdown_c[delta];
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "******Temp is lower and cali_info->absolute_ofdm_swing_idx[ODM_RF_PATH_C] = %d\n",
- cali_info->absolute_ofdm_swing_idx[p]);
- break;
-
- case ODM_RF_PATH_D:
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_swing_table_idx_tdown_d[%d] = %d\n",
- delta,
- delta_swing_table_idx_tdown_d[delta]);
- cali_info->delta_power_index[p] =
- -1 * delta_swing_table_idx_tdown_d[delta];
- /*Record delta swing for mix mode pwr tracking*/
- cali_info->absolute_ofdm_swing_idx[p] =
- -1 * delta_swing_table_idx_tdown_d[delta];
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "******Temp is lower and cali_info->absolute_ofdm_swing_idx[ODM_RF_PATH_D] = %d\n",
- cali_info->absolute_ofdm_swing_idx[p]);
- break;
-
- default:
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_swing_table_idx_tdown_a[%d] = %d\n",
- delta,
- delta_swing_table_idx_tdown_a[delta]);
- cali_info->delta_power_index[p] =
- -1 * delta_swing_table_idx_tdown_a[delta];
- /*Record delta swing for mix mode pwr tracking*/
- cali_info->absolute_ofdm_swing_idx[p] =
- -1 * delta_swing_table_idx_tdown_a[delta];
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "******Temp is lower and cali_info->absolute_ofdm_swing_idx[ODM_RF_PATH_A] = %d\n",
- cali_info->absolute_ofdm_swing_idx[p]);
- break;
- }
- }
-}
-
-static inline void phydm_odm_tx_power_set(struct phy_dm_struct *dm,
- struct txpwrtrack_cfg *c,
- u8 indexforchannel, u8 flag)
-{
- u8 p = 0;
-
- if (dm->support_ic_type == ODM_RTL8188E ||
- dm->support_ic_type == ODM_RTL8192E ||
- dm->support_ic_type == ODM_RTL8821 ||
- dm->support_ic_type == ODM_RTL8812 ||
- dm->support_ic_type == ODM_RTL8723B ||
- dm->support_ic_type == ODM_RTL8814A ||
- dm->support_ic_type == ODM_RTL8703B ||
- dm->support_ic_type == ODM_RTL8188F ||
- dm->support_ic_type == ODM_RTL8822B ||
- dm->support_ic_type == ODM_RTL8723D ||
- dm->support_ic_type == ODM_RTL8821C ||
- dm->support_ic_type == ODM_RTL8710B) { /* JJ ADD 20161014 */
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "**********Enter POWER Tracking MIX_MODE**********\n");
- for (p = ODM_RF_PATH_A; p < c->rf_path_count; p++) {
- if (flag == 0)
- (*c->odm_tx_pwr_track_set_pwr)(dm, MIX_MODE, p,
- 0);
- else
- (*c->odm_tx_pwr_track_set_pwr)(dm, MIX_MODE, p,
- indexforchannel);
- }
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "**********Enter POWER Tracking BBSWING_MODE**********\n");
- for (p = ODM_RF_PATH_A; p < c->rf_path_count; p++)
- (*c->odm_tx_pwr_track_set_pwr)(dm, BBSWING, p,
- indexforchannel);
- }
-}
-
-void configure_txpower_track(void *dm_void, struct txpwrtrack_cfg *config)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- /* JJ ADD 20161014 */
-
- if (dm->support_ic_type == ODM_RTL8822B)
- configure_txpower_track_8822b(config);
-}
-
-/* **********************************************************************
- * <20121113, Kordan> This function should be called when tx_agc changed.
- * Otherwise the previous compensation is gone, because we record the
- * delta of temperature between two TxPowerTracking watch dogs.
- *
- * NOTE: If Tx BB swing or Tx scaling is varified during run-time, still
- * need to call this function.
- * ***********************************************************************/
-void odm_clear_txpowertracking_state(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- struct rtl_efuse *rtlefu = rtl_efuse(rtlpriv);
- u8 p = 0;
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- cali_info->bb_swing_idx_cck_base = cali_info->default_cck_index;
- cali_info->bb_swing_idx_cck = cali_info->default_cck_index;
- dm->rf_calibrate_info.CCK_index = 0;
-
- for (p = ODM_RF_PATH_A; p < MAX_RF_PATH; ++p) {
- cali_info->bb_swing_idx_ofdm_base[p] =
- cali_info->default_ofdm_index;
- cali_info->bb_swing_idx_ofdm[p] = cali_info->default_ofdm_index;
- cali_info->OFDM_index[p] = cali_info->default_ofdm_index;
-
- cali_info->power_index_offset[p] = 0;
- cali_info->delta_power_index[p] = 0;
- cali_info->delta_power_index_last[p] = 0;
-
- cali_info->absolute_ofdm_swing_idx[p] =
- 0; /* Initial Mix mode power tracking*/
- cali_info->remnant_ofdm_swing_idx[p] = 0;
- cali_info->kfree_offset[p] = 0;
- }
-
- cali_info->modify_tx_agc_flag_path_a =
- false; /*Initial at Modify Tx Scaling mode*/
- cali_info->modify_tx_agc_flag_path_b =
- false; /*Initial at Modify Tx Scaling mode*/
- cali_info->modify_tx_agc_flag_path_c =
- false; /*Initial at Modify Tx Scaling mode*/
- cali_info->modify_tx_agc_flag_path_d =
- false; /*Initial at Modify Tx Scaling mode*/
- cali_info->remnant_cck_swing_idx = 0;
- cali_info->thermal_value = rtlefu->eeprom_thermalmeter;
-
- cali_info->modify_tx_agc_value_cck = 0; /* modify by Mingzhi.Guo */
- cali_info->modify_tx_agc_value_ofdm = 0; /* modify by Mingzhi.Guo */
-}
-
-void odm_txpowertracking_callback_thermal_meter(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- struct rtl_efuse *rtlefu = rtl_efuse(rtlpriv);
- void *adapter = dm->adapter;
-
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- u8 thermal_value = 0, delta, delta_LCK, delta_IQK, p = 0, i = 0;
- s8 diff_DPK[4]; /* use 'for..loop' to initialize */
- u8 thermal_value_avg_count = 0;
- u32 thermal_value_avg = 0, regc80, regcd0, regcd4, regab4;
-
- /* OFDM BB Swing should be less than +3.0dB (required by Arthur) */
- u8 OFDM_min_index = 0;
- /* get_right_chnl_place_for_iqk(hal_data->current_channel) */
- u8 indexforchannel = 0;
- u8 power_tracking_type = 0; /* no specify type */
- u8 xtal_offset_eanble = 0;
-
- struct txpwrtrack_cfg c;
-
- /* 4 1. The following TWO tables decide the final index of
- * OFDM/CCK swing table.
- */
- u8 *delta_swing_table_idx_tup_a = NULL;
- u8 *delta_swing_table_idx_tdown_a = NULL;
- u8 *delta_swing_table_idx_tup_b = NULL;
- u8 *delta_swing_table_idx_tdown_b = NULL;
- /*for 8814 add by Yu Chen*/
- u8 *delta_swing_table_idx_tup_c = NULL;
- u8 *delta_swing_table_idx_tdown_c = NULL;
- u8 *delta_swing_table_idx_tup_d = NULL;
- u8 *delta_swing_table_idx_tdown_d = NULL;
- /*for Xtal Offset by James.Tung*/
- s8 *delta_swing_table_xtal_up = NULL;
- s8 *delta_swing_table_xtal_down = NULL;
-
- /* 4 2. Initialization ( 7 steps in total ) */
-
- configure_txpower_track(dm, &c);
-
- (*c.get_delta_swing_table)(dm, (u8 **)&delta_swing_table_idx_tup_a,
- (u8 **)&delta_swing_table_idx_tdown_a,
- (u8 **)&delta_swing_table_idx_tup_b,
- (u8 **)&delta_swing_table_idx_tdown_b);
-
- if (dm->support_ic_type & ODM_RTL8814A) /*for 8814 path C & D*/
- (*c.get_delta_swing_table8814only)(
- dm, (u8 **)&delta_swing_table_idx_tup_c,
- (u8 **)&delta_swing_table_idx_tdown_c,
- (u8 **)&delta_swing_table_idx_tup_d,
- (u8 **)&delta_swing_table_idx_tdown_d);
- /* JJ ADD 20161014 */
- if (dm->support_ic_type &
- (ODM_RTL8703B | ODM_RTL8723D | ODM_RTL8710B)) /*for Xtal Offset*/
- (*c.get_delta_swing_xtal_table)(
- dm, (s8 **)&delta_swing_table_xtal_up,
- (s8 **)&delta_swing_table_xtal_down);
-
- cali_info->txpowertracking_callback_cnt++; /*cosa add for debug*/
- cali_info->is_txpowertracking_init = true;
-
- /*cali_info->txpowertrack_control = hal_data->txpowertrack_control;
- *<Kordan> We should keep updating ctrl variable according to HalData.
- *<Kordan> rf_calibrate_info.rega24 will be initialized when
- *ODM HW configuring, but MP configures with para files.
- */
- if (dm->mp_mode)
- cali_info->rega24 = 0x090e1317;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "===>%s\n cali_info->bb_swing_idx_cck_base: %d, cali_info->bb_swing_idx_ofdm_base[A]: %d, cali_info->default_ofdm_index: %d\n",
- __func__, cali_info->bb_swing_idx_cck_base,
- cali_info->bb_swing_idx_ofdm_base[ODM_RF_PATH_A],
- cali_info->default_ofdm_index);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "cali_info->txpowertrack_control=%d, rtlefu->eeprom_thermalmeter %d\n",
- cali_info->txpowertrack_control, rtlefu->eeprom_thermalmeter);
-
- thermal_value =
- (u8)odm_get_rf_reg(dm, ODM_RF_PATH_A, c.thermal_reg_addr,
- 0xfc00); /* 0x42: RF Reg[15:10] 88E */
-
- /*add log by zhao he, check c80/c94/c14/ca0 value*/
- if (dm->support_ic_type == ODM_RTL8723D) {
- regc80 = odm_get_bb_reg(dm, 0xc80, MASKDWORD);
- regcd0 = odm_get_bb_reg(dm, 0xcd0, MASKDWORD);
- regcd4 = odm_get_bb_reg(dm, 0xcd4, MASKDWORD);
- regab4 = odm_get_bb_reg(dm, 0xab4, 0x000007FF);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "0xc80 = 0x%x 0xcd0 = 0x%x 0xcd4 = 0x%x 0xab4 = 0x%x\n",
- regc80, regcd0, regcd4, regab4);
- }
- /* JJ ADD 20161014 */
- if (dm->support_ic_type == ODM_RTL8710B) {
- regc80 = odm_get_bb_reg(dm, 0xc80, MASKDWORD);
- regcd0 = odm_get_bb_reg(dm, 0xcd0, MASKDWORD);
- regcd4 = odm_get_bb_reg(dm, 0xcd4, MASKDWORD);
- regab4 = odm_get_bb_reg(dm, 0xab4, 0x000007FF);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "0xc80 = 0x%x 0xcd0 = 0x%x 0xcd4 = 0x%x 0xab4 = 0x%x\n",
- regc80, regcd0, regcd4, regab4);
- }
-
- if (!cali_info->txpowertrack_control)
- return;
-
- /*4 3. Initialize ThermalValues of rf_calibrate_info*/
-
- if (cali_info->is_reloadtxpowerindex)
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "reload ofdm index for band switch\n");
-
- /*4 4. Calculate average thermal meter*/
-
- cali_info->thermal_value_avg[cali_info->thermal_value_avg_index] =
- thermal_value;
- cali_info->thermal_value_avg_index++;
- if (cali_info->thermal_value_avg_index ==
- c.average_thermal_num) /*Average times = c.average_thermal_num*/
- cali_info->thermal_value_avg_index = 0;
-
- for (i = 0; i < c.average_thermal_num; i++) {
- if (cali_info->thermal_value_avg[i]) {
- thermal_value_avg += cali_info->thermal_value_avg[i];
- thermal_value_avg_count++;
- }
- }
-
- if (thermal_value_avg_count) {
- /* Calculate Average thermal_value after average enough times */
- thermal_value =
- (u8)(thermal_value_avg / thermal_value_avg_count);
- cali_info->thermal_value_delta =
- thermal_value - rtlefu->eeprom_thermalmeter;
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "AVG Thermal Meter = 0x%X, EFUSE Thermal base = 0x%X\n",
- thermal_value, rtlefu->eeprom_thermalmeter);
- }
-
- /* 4 5. Calculate delta, delta_LCK, delta_IQK. */
-
- /* "delta" is used to determine whether thermal value changes or not*/
- delta = (thermal_value > cali_info->thermal_value) ?
- (thermal_value - cali_info->thermal_value) :
- (cali_info->thermal_value - thermal_value);
- delta_LCK = (thermal_value > cali_info->thermal_value_lck) ?
- (thermal_value - cali_info->thermal_value_lck) :
- (cali_info->thermal_value_lck - thermal_value);
- delta_IQK = (thermal_value > cali_info->thermal_value_iqk) ?
- (thermal_value - cali_info->thermal_value_iqk) :
- (cali_info->thermal_value_iqk - thermal_value);
-
- if (cali_info->thermal_value_iqk ==
- 0xff) { /*no PG, use thermal value for IQK*/
- cali_info->thermal_value_iqk = thermal_value;
- delta_IQK =
- (thermal_value > cali_info->thermal_value_iqk) ?
- (thermal_value - cali_info->thermal_value_iqk) :
- (cali_info->thermal_value_iqk - thermal_value);
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "no PG, use thermal_value for IQK\n");
- }
-
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++)
- diff_DPK[p] = (s8)thermal_value - (s8)cali_info->dpk_thermal[p];
-
- /*4 6. If necessary, do LCK.*/
-
- if (!(dm->support_ic_type &
- ODM_RTL8821)) { /*no PG, do LCK at initial status*/
- if (cali_info->thermal_value_lck == 0xff) {
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "no PG, do LCK\n");
- cali_info->thermal_value_lck = thermal_value;
-
- /*Use RTLCK, so close power tracking driver LCK*/
- if (!(dm->support_ic_type & ODM_RTL8814A) &&
- c.phy_lc_calibrate)
- (*c.phy_lc_calibrate)(dm);
-
- delta_LCK =
- (thermal_value > cali_info->thermal_value_lck) ?
- (thermal_value -
- cali_info->thermal_value_lck) :
- (cali_info->thermal_value_lck -
- thermal_value);
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
- delta, delta_LCK, delta_IQK);
-
- /*Delta temperature is equal to or larger than 20 centigrade.*/
- if (delta_LCK >= c.threshold_iqk) {
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_LCK(%d) >= threshold_iqk(%d)\n",
- delta_LCK, c.threshold_iqk);
- cali_info->thermal_value_lck = thermal_value;
-
- /*Use RTLCK, so close power tracking driver LCK*/
- if (!(dm->support_ic_type & ODM_RTL8814A) &&
- c.phy_lc_calibrate)
- (*c.phy_lc_calibrate)(dm);
- }
- }
-
- /*3 7. If necessary, move the index of swing table to adjust Tx power.*/
-
- if (delta > 0 && cali_info->txpowertrack_control) {
- /* "delta" here is used to record the abs value of difference.*/
- delta = thermal_value > rtlefu->eeprom_thermalmeter ?
- (thermal_value - rtlefu->eeprom_thermalmeter) :
- (rtlefu->eeprom_thermalmeter - thermal_value);
- if (delta >= TXPWR_TRACK_TABLE_SIZE)
- delta = TXPWR_TRACK_TABLE_SIZE - 1;
-
- /*4 7.1 The Final Power index = BaseIndex + power_index_offset*/
-
- if (thermal_value > rtlefu->eeprom_thermalmeter) {
- phydm_set_calibrate_info_up(
- dm, &c, delta, cali_info,
- delta_swing_table_idx_tup_a,
- delta_swing_table_idx_tup_b,
- delta_swing_table_idx_tup_c,
- delta_swing_table_idx_tup_d);
- /* JJ ADD 20161014 */
- if (dm->support_ic_type &
- (ODM_RTL8703B | ODM_RTL8723D | ODM_RTL8710B)) {
- /*Save xtal_offset from Xtal table*/
-
- /*recording last Xtal offset*/
- cali_info->xtal_offset_last =
- cali_info->xtal_offset;
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "[Xtal] delta_swing_table_xtal_up[%d] = %d\n",
- delta,
- delta_swing_table_xtal_up[delta]);
- cali_info->xtal_offset =
- delta_swing_table_xtal_up[delta];
- xtal_offset_eanble =
- (cali_info->xtal_offset_last ==
- cali_info->xtal_offset) ?
- 0 :
- 1;
- }
-
- } else {
- phydm_set_calibrate_info_down(
- dm, &c, delta, cali_info,
- delta_swing_table_idx_tdown_a,
- delta_swing_table_idx_tdown_b,
- delta_swing_table_idx_tdown_c,
- delta_swing_table_idx_tdown_d);
- /* JJ ADD 20161014 */
- if (dm->support_ic_type &
- (ODM_RTL8703B | ODM_RTL8723D | ODM_RTL8710B)) {
- /*Save xtal_offset from Xtal table*/
-
- /*recording last Xtal offset*/
- cali_info->xtal_offset_last =
- cali_info->xtal_offset;
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "[Xtal] delta_swing_table_xtal_down[%d] = %d\n",
- delta,
- delta_swing_table_xtal_down[delta]);
- cali_info->xtal_offset =
- delta_swing_table_xtal_down[delta];
- xtal_offset_eanble =
- (cali_info->xtal_offset_last ==
- cali_info->xtal_offset) ?
- 0 :
- 1;
- }
- }
-
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++) {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "\n\n=========================== [path-%d] Calculating power_index_offset===========================\n",
- p);
-
- if (cali_info->delta_power_index[p] ==
- cali_info->delta_power_index_last[p]) {
- /* If Thermal value changes but lookup table
- * value still the same
- */
- cali_info->power_index_offset[p] = 0;
- } else {
- /*Power idx diff between 2 times Pwr Tracking*/
- cali_info->power_index_offset[p] =
- cali_info->delta_power_index[p] -
- cali_info->delta_power_index_last[p];
- }
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "[path-%d] power_index_offset(%d) = delta_power_index(%d) - delta_power_index_last(%d)\n",
- p, cali_info->power_index_offset[p],
- cali_info->delta_power_index[p],
- cali_info->delta_power_index_last[p]);
-
- cali_info->OFDM_index[p] =
- cali_info->bb_swing_idx_ofdm_base[p] +
- cali_info->power_index_offset[p];
- cali_info->CCK_index =
- cali_info->bb_swing_idx_cck_base +
- cali_info->power_index_offset[p];
-
- cali_info->bb_swing_idx_cck = cali_info->CCK_index;
- cali_info->bb_swing_idx_ofdm[p] =
- cali_info->OFDM_index[p];
-
- /*******Print BB Swing base and index Offset**********/
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "The 'CCK' final index(%d) = BaseIndex(%d) + power_index_offset(%d)\n",
- cali_info->bb_swing_idx_cck,
- cali_info->bb_swing_idx_cck_base,
- cali_info->power_index_offset[p]);
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "The 'OFDM' final index(%d) = BaseIndex[%d](%d) + power_index_offset(%d)\n",
- cali_info->bb_swing_idx_ofdm[p], p,
- cali_info->bb_swing_idx_ofdm_base[p],
- cali_info->power_index_offset[p]);
-
- /*4 7.1 Handle boundary conditions of index.*/
-
- if (cali_info->OFDM_index[p] >
- c.swing_table_size_ofdm - 1)
- cali_info->OFDM_index[p] =
- c.swing_table_size_ofdm - 1;
- else if (cali_info->OFDM_index[p] <= OFDM_min_index)
- cali_info->OFDM_index[p] = OFDM_min_index;
- }
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "\n\n========================================================================================================\n");
-
- if (cali_info->CCK_index > c.swing_table_size_cck - 1)
- cali_info->CCK_index = c.swing_table_size_cck - 1;
- else if (cali_info->CCK_index <= 0)
- cali_info->CCK_index = 0;
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "The thermal meter is unchanged or TxPowerTracking OFF(%d): thermal_value: %d, cali_info->thermal_value: %d\n",
- cali_info->txpowertrack_control, thermal_value,
- cali_info->thermal_value);
-
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++)
- cali_info->power_index_offset[p] = 0;
- }
-
- /*Print Swing base & current*/
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "TxPowerTracking: [CCK] Swing Current index: %d, Swing base index: %d\n",
- cali_info->CCK_index, cali_info->bb_swing_idx_cck_base);
-
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++)
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "TxPowerTracking: [OFDM] Swing Current index: %d, Swing base index[%d]: %d\n",
- cali_info->OFDM_index[p], p,
- cali_info->bb_swing_idx_ofdm_base[p]);
-
- if ((dm->support_ic_type & ODM_RTL8814A)) {
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "power_tracking_type=%d\n", power_tracking_type);
-
- if (power_tracking_type == 0) {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "**********Enter POWER Tracking MIX_MODE**********\n");
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++)
- (*c.odm_tx_pwr_track_set_pwr)(dm, MIX_MODE, p,
- 0);
- } else if (power_tracking_type == 1) {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "**********Enter POWER Tracking MIX(2G) TSSI(5G) MODE**********\n");
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++)
- (*c.odm_tx_pwr_track_set_pwr)(
- dm, MIX_2G_TSSI_5G_MODE, p, 0);
- } else if (power_tracking_type == 2) {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "**********Enter POWER Tracking MIX(5G) TSSI(2G)MODE**********\n");
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++)
- (*c.odm_tx_pwr_track_set_pwr)(
- dm, MIX_5G_TSSI_2G_MODE, p, 0);
- } else if (power_tracking_type == 3) {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "**********Enter POWER Tracking TSSI MODE**********\n");
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++)
- (*c.odm_tx_pwr_track_set_pwr)(dm, TSSI_MODE, p,
- 0);
- }
- /*Record last Power Tracking Thermal value*/
- cali_info->thermal_value = thermal_value;
-
- } else if ((cali_info->power_index_offset[ODM_RF_PATH_A] != 0 ||
- cali_info->power_index_offset[ODM_RF_PATH_B] != 0 ||
- cali_info->power_index_offset[ODM_RF_PATH_C] != 0 ||
- cali_info->power_index_offset[ODM_RF_PATH_D] != 0) &&
- cali_info->txpowertrack_control &&
- (rtlefu->eeprom_thermalmeter != 0xff)) {
- /* 4 7.2 Configure the Swing Table to adjust Tx Power. */
-
- /*Always true after Tx Power is adjusted by power tracking.*/
- cali_info->is_tx_power_changed = true;
- /* 2012/04/23 MH According to Luke's suggestion, we can not
- * write BB digital to increase TX power. Otherwise, EVM will
- * be bad.
- */
- /* 2012/04/25 MH Add for tx power tracking to set tx power in
- * tx agc for 88E.
- */
- if (thermal_value > cali_info->thermal_value) {
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++) {
- /* print temperature increasing */
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "Temperature Increasing(%d): delta_pi: %d, delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- p, cali_info->power_index_offset[p],
- delta, thermal_value,
- rtlefu->eeprom_thermalmeter,
- cali_info->thermal_value);
- }
- } else if (thermal_value <
- cali_info->thermal_value) { /*Low temperature*/
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++) {
- /* print temperature decreasing */
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "Temperature Decreasing(%d): delta_pi: %d, delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- p, cali_info->power_index_offset[p],
- delta, thermal_value,
- rtlefu->eeprom_thermalmeter,
- cali_info->thermal_value);
- }
- }
-
- if (thermal_value > rtlefu->eeprom_thermalmeter) {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "Temperature(%d) higher than PG value(%d)\n",
- thermal_value, rtlefu->eeprom_thermalmeter);
-
- phydm_odm_tx_power_set(dm, &c, indexforchannel, 0);
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "Temperature(%d) lower than PG value(%d)\n",
- thermal_value, rtlefu->eeprom_thermalmeter);
- phydm_odm_tx_power_set(dm, &c, indexforchannel, 1);
- }
-
- /*Record last time Power Tracking result as base.*/
- cali_info->bb_swing_idx_cck_base = cali_info->bb_swing_idx_cck;
-
- for (p = ODM_RF_PATH_A; p < c.rf_path_count; p++)
- cali_info->bb_swing_idx_ofdm_base[p] =
- cali_info->bb_swing_idx_ofdm[p];
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "cali_info->thermal_value = %d thermal_value= %d\n",
- cali_info->thermal_value, thermal_value);
-
- /*Record last Power Tracking Thermal value*/
- cali_info->thermal_value = thermal_value;
- }
-
- if (dm->support_ic_type == ODM_RTL8703B ||
- dm->support_ic_type == ODM_RTL8723D ||
- dm->support_ic_type == ODM_RTL8710B) { /* JJ ADD 20161014 */
-
- if (xtal_offset_eanble != 0 &&
- cali_info->txpowertrack_control &&
- rtlefu->eeprom_thermalmeter != 0xff) {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "**********Enter Xtal Tracking**********\n");
-
- if (thermal_value > rtlefu->eeprom_thermalmeter) {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "Temperature(%d) higher than PG value(%d)\n",
- thermal_value,
- rtlefu->eeprom_thermalmeter);
- (*c.odm_txxtaltrack_set_xtal)(dm);
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "Temperature(%d) lower than PG value(%d)\n",
- thermal_value,
- rtlefu->eeprom_thermalmeter);
- (*c.odm_txxtaltrack_set_xtal)(dm);
- }
- }
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "**********End Xtal Tracking**********\n");
- }
-
- if (!IS_HARDWARE_TYPE_8723B(adapter)) {
- /* Delta temperature is equal to or larger than 20 centigrade
- * (When threshold is 8).
- */
- if (delta_IQK >= c.threshold_iqk) {
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "delta_IQK(%d) >= threshold_iqk(%d)\n",
- delta_IQK, c.threshold_iqk);
- if (!cali_info->is_iqk_in_progress)
- (*c.do_iqk)(dm, delta_IQK, thermal_value, 8);
- }
- }
- if (cali_info->dpk_thermal[ODM_RF_PATH_A] != 0) {
- if (diff_DPK[ODM_RF_PATH_A] >= c.threshold_dpk) {
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x1);
- odm_set_bb_reg(
- dm, 0xcc4,
- BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10),
- (diff_DPK[ODM_RF_PATH_A] / c.threshold_dpk));
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x0);
- } else if ((diff_DPK[ODM_RF_PATH_A] <= -1 * c.threshold_dpk)) {
- s32 value = 0x20 +
- (diff_DPK[ODM_RF_PATH_A] / c.threshold_dpk);
-
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x1);
- odm_set_bb_reg(dm, 0xcc4, BIT(14) | BIT(13) | BIT(12) |
- BIT(11) | BIT(10),
- value);
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x0);
- } else {
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x1);
- odm_set_bb_reg(dm, 0xcc4, BIT(14) | BIT(13) | BIT(12) |
- BIT(11) | BIT(10),
- 0);
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x0);
- }
- }
- if (cali_info->dpk_thermal[ODM_RF_PATH_B] != 0) {
- if (diff_DPK[ODM_RF_PATH_B] >= c.threshold_dpk) {
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x1);
- odm_set_bb_reg(
- dm, 0xec4,
- BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10),
- (diff_DPK[ODM_RF_PATH_B] / c.threshold_dpk));
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x0);
- } else if ((diff_DPK[ODM_RF_PATH_B] <= -1 * c.threshold_dpk)) {
- s32 value = 0x20 +
- (diff_DPK[ODM_RF_PATH_B] / c.threshold_dpk);
-
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x1);
- odm_set_bb_reg(dm, 0xec4, BIT(14) | BIT(13) | BIT(12) |
- BIT(11) | BIT(10),
- value);
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x0);
- } else {
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x1);
- odm_set_bb_reg(dm, 0xec4, BIT(14) | BIT(13) | BIT(12) |
- BIT(11) | BIT(10),
- 0);
- odm_set_bb_reg(dm, 0x82c, BIT(31), 0x0);
- }
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK, "<===%s\n", __func__);
-
- cali_info->tx_powercount = 0;
-}
-
-/* 3============================================================
- * 3 IQ Calibration
- * 3============================================================
- */
-
-void odm_reset_iqk_result(void *dm_void) { return; }
-
-u8 odm_get_right_chnl_place_for_iqk(u8 chnl)
-{
- u8 channel_all[ODM_TARGET_CHNL_NUM_2G_5G] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54,
- 56, 58, 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136,
- 138, 140, 149, 151, 153, 155, 157, 159, 161, 163, 165};
- u8 place = chnl;
-
- if (chnl > 14) {
- for (place = 14; place < sizeof(channel_all); place++) {
- if (channel_all[place] == chnl)
- return place - 13;
- }
- }
- return 0;
-}
-
-static void odm_iq_calibrate(struct phy_dm_struct *dm)
-{
- void *adapter = dm->adapter;
-
- if (IS_HARDWARE_TYPE_8812AU(adapter))
- return;
-
- if (dm->is_linked) {
- if ((*dm->channel != dm->pre_channel) &&
- (!*dm->is_scan_in_process)) {
- dm->pre_channel = *dm->channel;
- dm->linked_interval = 0;
- }
-
- if (dm->linked_interval < 3)
- dm->linked_interval++;
-
- if (dm->linked_interval == 2) {
- if (IS_HARDWARE_TYPE_8814A(adapter))
- ;
-
- else if (IS_HARDWARE_TYPE_8822B(adapter))
- phy_iq_calibrate_8822b(dm, false);
- }
- } else {
- dm->linked_interval = 0;
- }
-}
-
-void phydm_rf_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- odm_txpowertracking_init(dm);
-
- odm_clear_txpowertracking_state(dm);
-}
-
-void phydm_rf_watchdog(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- odm_txpowertracking_check(dm);
- if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- odm_iq_calibrate(dm);
-}
diff --git a/drivers/staging/rtlwifi/phydm/halphyrf_ce.h b/drivers/staging/rtlwifi/phydm/halphyrf_ce.h
deleted file mode 100644
index c542efc7d0e0..000000000000
--- a/drivers/staging/rtlwifi/phydm/halphyrf_ce.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __HAL_PHY_RF_H__
-#define __HAL_PHY_RF_H__
-
-#include "phydm_kfree.h"
-
-#include "rtl8822b/phydm_iqk_8822b.h"
-
-#include "phydm_powertracking_ce.h"
-
-enum spur_cal_method { PLL_RESET, AFE_PHASE_SEL };
-
-enum pwrtrack_method {
- BBSWING,
- TXAGC,
- MIX_MODE,
- TSSI_MODE,
- MIX_2G_TSSI_5G_MODE,
- MIX_5G_TSSI_2G_MODE
-};
-
-typedef void (*func_set_pwr)(void *, enum pwrtrack_method, u8, u8);
-typedef void (*func_iqk)(void *, u8, u8, u8);
-typedef void (*func_lck)(void *);
-typedef void (*func_swing)(void *, u8 **, u8 **, u8 **, u8 **);
-typedef void (*func_swing8814only)(void *, u8 **, u8 **, u8 **, u8 **);
-typedef void (*func_swing_xtal)(void *, s8 **, s8 **);
-typedef void (*func_set_xtal)(void *);
-
-struct txpwrtrack_cfg {
- u8 swing_table_size_cck;
- u8 swing_table_size_ofdm;
- u8 threshold_iqk;
- u8 threshold_dpk;
- u8 average_thermal_num;
- u8 rf_path_count;
- u32 thermal_reg_addr;
- func_set_pwr odm_tx_pwr_track_set_pwr;
- func_iqk do_iqk;
- func_lck phy_lc_calibrate;
- func_swing get_delta_swing_table;
- func_swing8814only get_delta_swing_table8814only;
- func_swing_xtal get_delta_swing_xtal_table;
- func_set_xtal odm_txxtaltrack_set_xtal;
-};
-
-void configure_txpower_track(void *dm_void, struct txpwrtrack_cfg *config);
-
-void odm_clear_txpowertracking_state(void *dm_void);
-
-void odm_txpowertracking_callback_thermal_meter(void *dm);
-
-#define ODM_TARGET_CHNL_NUM_2G_5G 59
-
-void odm_reset_iqk_result(void *dm_void);
-u8 odm_get_right_chnl_place_for_iqk(u8 chnl);
-
-void phydm_rf_init(void *dm_void);
-void phydm_rf_watchdog(void *dm_void);
-
-#endif /* #ifndef __HAL_PHY_RF_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/mp_precomp.h b/drivers/staging/rtlwifi/phydm/mp_precomp.h
deleted file mode 100644
index 8e9caca695ff..000000000000
--- a/drivers/staging/rtlwifi/phydm/mp_precomp.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
diff --git a/drivers/staging/rtlwifi/phydm/phydm.c b/drivers/staging/rtlwifi/phydm/phydm.c
deleted file mode 100644
index 52b85b3ddb34..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm.c
+++ /dev/null
@@ -1,1975 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-static const u16 db_invert_table[12][8] = {
- {1, 1, 1, 2, 2, 2, 2, 3},
- {3, 3, 4, 4, 4, 5, 6, 6},
- {7, 8, 9, 10, 11, 13, 14, 16},
- {18, 20, 22, 25, 28, 32, 35, 40},
- {45, 50, 56, 63, 71, 79, 89, 100},
- {112, 126, 141, 158, 178, 200, 224, 251},
- {282, 316, 355, 398, 447, 501, 562, 631},
- {708, 794, 891, 1000, 1122, 1259, 1413, 1585},
- {1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
- {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000},
- {11220, 12589, 14125, 15849, 17783, 19953, 22387, 25119},
- {28184, 31623, 35481, 39811, 44668, 50119, 56234, 65535},
-};
-
-/* ************************************************************
- * Local Function predefine.
- * *************************************************************/
-
-/* START------------COMMON INFO RELATED--------------- */
-
-static void odm_update_power_training_state(struct phy_dm_struct *dm);
-
-/* ************************************************************
- * 3 Export Interface
- * *************************************************************/
-
-/*Y = 10*log(X)*/
-s32 odm_pwdb_conversion(s32 X, u32 total_bit, u32 decimal_bit)
-{
- s32 Y, integer = 0, decimal = 0;
- u32 i;
-
- if (X == 0)
- X = 1; /* log2(x), x can't be 0 */
-
- for (i = (total_bit - 1); i > 0; i--) {
- if (X & BIT(i)) {
- integer = i;
- if (i > 0) {
- /* decimal is 0.5dB*3=1.5dB~=2dB */
- decimal = (X & BIT(i - 1)) ? 2 : 0;
- }
- break;
- }
- }
-
- Y = 3 * (integer - decimal_bit) + decimal; /* 10*log(x)=3*log2(x), */
-
- return Y;
-}
-
-s32 odm_sign_conversion(s32 value, u32 total_bit)
-{
- if (value & BIT(total_bit - 1))
- value -= BIT(total_bit);
- return value;
-}
-
-void phydm_seq_sorting(void *dm_void, u32 *value, u32 *rank_idx, u32 *idx_out,
- u8 seq_length)
-{
- u8 i = 0, j = 0;
- u32 tmp_a, tmp_b;
- u32 tmp_idx_a, tmp_idx_b;
-
- for (i = 0; i < seq_length; i++) {
- rank_idx[i] = i;
- /**/
- }
-
- for (i = 0; i < (seq_length - 1); i++) {
- for (j = 0; j < (seq_length - 1 - i); j++) {
- tmp_a = value[j];
- tmp_b = value[j + 1];
-
- tmp_idx_a = rank_idx[j];
- tmp_idx_b = rank_idx[j + 1];
-
- if (tmp_a < tmp_b) {
- value[j] = tmp_b;
- value[j + 1] = tmp_a;
-
- rank_idx[j] = tmp_idx_b;
- rank_idx[j + 1] = tmp_idx_a;
- }
- }
- }
-
- for (i = 0; i < seq_length; i++) {
- idx_out[rank_idx[i]] = i + 1;
- /**/
- }
-}
-
-void odm_init_mp_driver_status(struct phy_dm_struct *dm)
-{
- dm->mp_mode = false;
-}
-
-static void odm_update_mp_driver_status(struct phy_dm_struct *dm)
-{
- /* Do nothing. */
-}
-
-static void phydm_init_trx_antenna_setting(struct phy_dm_struct *dm)
-{
- /*#if (RTL8814A_SUPPORT == 1)*/
-
- if (dm->support_ic_type & (ODM_RTL8814A)) {
- u8 rx_ant = 0, tx_ant = 0;
-
- rx_ant = (u8)odm_get_bb_reg(dm, ODM_REG(BB_RX_PATH, dm),
- ODM_BIT(BB_RX_PATH, dm));
- tx_ant = (u8)odm_get_bb_reg(dm, ODM_REG(BB_TX_PATH, dm),
- ODM_BIT(BB_TX_PATH, dm));
- dm->tx_ant_status = (tx_ant & 0xf);
- dm->rx_ant_status = (rx_ant & 0xf);
- } else if (dm->support_ic_type & (ODM_RTL8723D | ODM_RTL8821C |
- ODM_RTL8710B)) { /* JJ ADD 20161014 */
- dm->tx_ant_status = 0x1;
- dm->rx_ant_status = 0x1;
- }
- /*#endif*/
-}
-
-static void phydm_traffic_load_decision(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- /*---TP & Traffic-load calculation---*/
-
- if (dm->last_tx_ok_cnt > *dm->num_tx_bytes_unicast)
- dm->last_tx_ok_cnt = *dm->num_tx_bytes_unicast;
-
- if (dm->last_rx_ok_cnt > *dm->num_rx_bytes_unicast)
- dm->last_rx_ok_cnt = *dm->num_rx_bytes_unicast;
-
- dm->cur_tx_ok_cnt = *dm->num_tx_bytes_unicast - dm->last_tx_ok_cnt;
- dm->cur_rx_ok_cnt = *dm->num_rx_bytes_unicast - dm->last_rx_ok_cnt;
- dm->last_tx_ok_cnt = *dm->num_tx_bytes_unicast;
- dm->last_rx_ok_cnt = *dm->num_rx_bytes_unicast;
-
- dm->tx_tp = ((dm->tx_tp) >> 1) +
- (u32)(((dm->cur_tx_ok_cnt) >> 18) >>
- 1); /* <<3(8bit), >>20(10^6,M), >>1(2sec)*/
- dm->rx_tp = ((dm->rx_tp) >> 1) +
- (u32)(((dm->cur_rx_ok_cnt) >> 18) >>
- 1); /* <<3(8bit), >>20(10^6,M), >>1(2sec)*/
- dm->total_tp = dm->tx_tp + dm->rx_tp;
-
- dm->pre_traffic_load = dm->traffic_load;
-
- if (dm->cur_tx_ok_cnt > 1875000 ||
- dm->cur_rx_ok_cnt >
- 1875000) { /* ( 1.875M * 8bit ) / 2sec= 7.5M bits /sec )*/
-
- dm->traffic_load = TRAFFIC_HIGH;
- /**/
- } else if (
- dm->cur_tx_ok_cnt > 500000 ||
- dm->cur_rx_ok_cnt >
- 500000) { /*( 0.5M * 8bit ) / 2sec = 2M bits /sec )*/
-
- dm->traffic_load = TRAFFIC_MID;
- /**/
- } else if (
- dm->cur_tx_ok_cnt > 100000 ||
- dm->cur_rx_ok_cnt >
- 100000) { /*( 0.1M * 8bit ) / 2sec = 0.4M bits /sec )*/
-
- dm->traffic_load = TRAFFIC_LOW;
- /**/
- } else {
- dm->traffic_load = TRAFFIC_ULTRA_LOW;
- /**/
- }
-}
-
-static void phydm_config_ofdm_tx_path(struct phy_dm_struct *dm, u32 path) {}
-
-void phydm_config_ofdm_rx_path(struct phy_dm_struct *dm, u32 path)
-{
- u8 ofdm_rx_path = 0;
-
- if (dm->support_ic_type & (ODM_RTL8192E)) {
- } else if (dm->support_ic_type & (ODM_RTL8812 | ODM_RTL8822B)) {
- if (path == PHYDM_A) {
- ofdm_rx_path = 1;
- /**/
- } else if (path == PHYDM_B) {
- ofdm_rx_path = 2;
- /**/
- } else if (path == PHYDM_AB) {
- ofdm_rx_path = 3;
- /**/
- }
-
- odm_set_bb_reg(dm, 0x808, MASKBYTE0,
- ((ofdm_rx_path << 4) | ofdm_rx_path));
- }
-}
-
-static void phydm_config_cck_rx_antenna_init(struct phy_dm_struct *dm) {}
-
-static void phydm_config_cck_rx_path(struct phy_dm_struct *dm, u8 path,
- u8 path_div_en)
-{
-}
-
-void phydm_config_trx_path(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- /* CCK */
- if (dm_value[0] == 0) {
- if (dm_value[1] == 1) { /*TX*/
- if (dm_value[2] == 1)
- odm_set_bb_reg(dm, 0xa04, 0xf0000000, 0x8);
- else if (dm_value[2] == 2)
- odm_set_bb_reg(dm, 0xa04, 0xf0000000, 0x4);
- else if (dm_value[2] == 3)
- odm_set_bb_reg(dm, 0xa04, 0xf0000000, 0xc);
- } else if (dm_value[1] == 2) { /*RX*/
-
- phydm_config_cck_rx_antenna_init(dm);
-
- if (dm_value[2] == 1)
- phydm_config_cck_rx_path(dm, PHYDM_A,
- CCA_PATHDIV_DISABLE);
- else if (dm_value[2] == 2)
- phydm_config_cck_rx_path(dm, PHYDM_B,
- CCA_PATHDIV_DISABLE);
- else if (dm_value[2] == 3 &&
- dm_value[3] == 1) /*enable path diversity*/
- phydm_config_cck_rx_path(dm, PHYDM_AB,
- CCA_PATHDIV_ENABLE);
- else if (dm_value[2] == 3 && dm_value[3] != 1)
- phydm_config_cck_rx_path(dm, PHYDM_B,
- CCA_PATHDIV_DISABLE);
- }
- }
- /* OFDM */
- else if (dm_value[0] == 1) {
- if (dm_value[1] == 1) { /*TX*/
- phydm_config_ofdm_tx_path(dm, dm_value[2]);
- /**/
- } else if (dm_value[1] == 2) { /*RX*/
- phydm_config_ofdm_rx_path(dm, dm_value[2]);
- /**/
- }
- }
-
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "PHYDM Set path [%s] [%s] = [%s%s%s%s]\n",
- (dm_value[0] == 1) ? "OFDM" : "CCK",
- (dm_value[1] == 1) ? "TX" : "RX",
- (dm_value[2] & 0x1) ? "A" : "", (dm_value[2] & 0x2) ? "B" : "",
- (dm_value[2] & 0x4) ? "C" : "", (dm_value[2] & 0x8) ? "D" : "");
-}
-
-static void phydm_init_cck_setting(struct phy_dm_struct *dm)
-{
- dm->is_cck_high_power = (bool)odm_get_bb_reg(
- dm, ODM_REG(CCK_RPT_FORMAT, dm), ODM_BIT(CCK_RPT_FORMAT, dm));
-
- /* JJ ADD 20161014 */
- /* JJ ADD 20161014 */
- if (dm->support_ic_type & (ODM_RTL8723D | ODM_RTL8822B | ODM_RTL8197F |
- ODM_RTL8821C | ODM_RTL8710B))
- dm->cck_new_agc = odm_get_bb_reg(dm, 0xa9c, BIT(17)) ?
- true :
- false; /*1: new agc 0: old agc*/
- else
- dm->cck_new_agc = false;
-}
-
-static void phydm_init_soft_ml_setting(struct phy_dm_struct *dm)
-{
- if (!dm->mp_mode) {
- if (dm->support_ic_type & ODM_RTL8822B)
- odm_set_bb_reg(dm, 0x19a8, MASKDWORD, 0xc10a0000);
- }
-}
-
-static void phydm_init_hw_info_by_rfe(struct phy_dm_struct *dm)
-{
- if (dm->support_ic_type & ODM_RTL8822B)
- phydm_init_hw_info_by_rfe_type_8822b(dm);
-}
-
-static void odm_common_info_self_init(struct phy_dm_struct *dm)
-{
- phydm_init_cck_setting(dm);
- dm->rf_path_rx_enable = (u8)odm_get_bb_reg(dm, ODM_REG(BB_RX_PATH, dm),
- ODM_BIT(BB_RX_PATH, dm));
- odm_init_mp_driver_status(dm);
- phydm_init_trx_antenna_setting(dm);
- phydm_init_soft_ml_setting(dm);
-
- dm->phydm_period = PHYDM_WATCH_DOG_PERIOD;
- dm->phydm_sys_up_time = 0;
-
- if (dm->support_ic_type & ODM_IC_1SS)
- dm->num_rf_path = 1;
- else if (dm->support_ic_type & ODM_IC_2SS)
- dm->num_rf_path = 2;
- else if (dm->support_ic_type & ODM_IC_3SS)
- dm->num_rf_path = 3;
- else if (dm->support_ic_type & ODM_IC_4SS)
- dm->num_rf_path = 4;
-
- dm->tx_rate = 0xFF;
-
- dm->number_linked_client = 0;
- dm->pre_number_linked_client = 0;
- dm->number_active_client = 0;
- dm->pre_number_active_client = 0;
-
- dm->last_tx_ok_cnt = 0;
- dm->last_rx_ok_cnt = 0;
- dm->tx_tp = 0;
- dm->rx_tp = 0;
- dm->total_tp = 0;
- dm->traffic_load = TRAFFIC_LOW;
-
- dm->nbi_set_result = 0;
- dm->is_init_hw_info_by_rfe = false;
- dm->pre_dbg_priority = BB_DBGPORT_RELEASE;
-}
-
-static void odm_common_info_self_update(struct phy_dm_struct *dm)
-{
- u8 entry_cnt = 0, num_active_client = 0;
- u32 i, one_entry_macid = 0;
- struct rtl_sta_info *entry;
-
- /* THis variable cannot be used because it is wrong*/
- if (*dm->band_width == ODM_BW40M) {
- if (*dm->sec_ch_offset == 1)
- dm->control_channel = *dm->channel - 2;
- else if (*dm->sec_ch_offset == 2)
- dm->control_channel = *dm->channel + 2;
- } else {
- dm->control_channel = *dm->channel;
- }
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- entry = dm->odm_sta_info[i];
- if (IS_STA_VALID(entry)) {
- entry_cnt++;
- if (entry_cnt == 1)
- one_entry_macid = i;
- }
- }
-
- if (entry_cnt == 1) {
- dm->is_one_entry_only = true;
- dm->one_entry_macid = one_entry_macid;
- } else {
- dm->is_one_entry_only = false;
- }
-
- dm->pre_number_linked_client = dm->number_linked_client;
- dm->pre_number_active_client = dm->number_active_client;
-
- dm->number_linked_client = entry_cnt;
- dm->number_active_client = num_active_client;
-
- /* Update MP driver status*/
- odm_update_mp_driver_status(dm);
-
- /*Traffic load information update*/
- phydm_traffic_load_decision(dm);
-
- dm->phydm_sys_up_time += dm->phydm_period;
-}
-
-static void odm_common_info_self_reset(struct phy_dm_struct *dm)
-{
- dm->phy_dbg_info.num_qry_beacon_pkt = 0;
-}
-
-void *phydm_get_structure(struct phy_dm_struct *dm, u8 structure_type)
-
-{
- void *p_struct = NULL;
-
- switch (structure_type) {
- case PHYDM_FALSEALMCNT:
- p_struct = &dm->false_alm_cnt;
- break;
-
- case PHYDM_CFOTRACK:
- p_struct = &dm->dm_cfo_track;
- break;
-
- case PHYDM_ADAPTIVITY:
- p_struct = &dm->adaptivity;
- break;
-
- default:
- break;
- }
-
- return p_struct;
-}
-
-static void odm_hw_setting(struct phy_dm_struct *dm)
-{
- if (dm->support_ic_type & ODM_RTL8822B)
- phydm_hwsetting_8822b(dm);
-}
-
-static void phydm_supportability_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 support_ability = 0;
-
- if (dm->support_ic_type != ODM_RTL8821C)
- return;
-
- switch (dm->support_ic_type) {
- /*---------------AC Series-------------------*/
-
- case ODM_RTL8822B:
- support_ability |= ODM_BB_DIG | ODM_BB_FA_CNT | ODM_BB_CCK_PD |
- ODM_BB_CFO_TRACKING | ODM_BB_RATE_ADAPTIVE |
- ODM_BB_RSSI_MONITOR | ODM_BB_RA_MASK |
- ODM_RF_TX_PWR_TRACK;
- break;
-
- default:
- support_ability |= ODM_BB_DIG | ODM_BB_FA_CNT | ODM_BB_CCK_PD |
- ODM_BB_CFO_TRACKING | ODM_BB_RATE_ADAPTIVE |
- ODM_BB_RSSI_MONITOR | ODM_BB_RA_MASK |
- ODM_RF_TX_PWR_TRACK;
-
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "[Warning] Supportability Init Warning !!!\n");
- break;
- }
-
- if (*dm->enable_antdiv)
- support_ability |= ODM_BB_ANT_DIV;
-
- if (*dm->enable_adaptivity) {
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "ODM adaptivity is set to Enabled!!!\n");
-
- support_ability |= ODM_BB_ADAPTIVITY;
-
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "ODM adaptivity is set to disabled!!!\n");
- /**/
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "PHYDM support_ability = ((0x%x))\n",
- support_ability);
- odm_cmn_info_init(dm, ODM_CMNINFO_ABILITY, support_ability);
-}
-
-/*
- * 2011/09/21 MH Add to describe different team necessary resource allocate??
- */
-void odm_dm_init(struct phy_dm_struct *dm)
-{
- phydm_supportability_init(dm);
- odm_common_info_self_init(dm);
- odm_dig_init(dm);
- phydm_nhm_counter_statistics_init(dm);
- phydm_adaptivity_init(dm);
- phydm_ra_info_init(dm);
- odm_rate_adaptive_mask_init(dm);
- odm_cfo_tracking_init(dm);
- odm_edca_turbo_init(dm);
- odm_rssi_monitor_init(dm);
- phydm_rf_init(dm);
- odm_txpowertracking_init(dm);
-
- if (dm->support_ic_type & ODM_RTL8822B)
- phydm_txcurrentcalibration(dm);
-
- odm_antenna_diversity_init(dm);
- odm_auto_channel_select_init(dm);
- odm_dynamic_tx_power_init(dm);
- phydm_init_ra_info(dm);
- adc_smp_init(dm);
-
- phydm_beamforming_init(dm);
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- /* 11n series */
- odm_dynamic_bb_power_saving_init(dm);
- }
-
- phydm_psd_init(dm);
-}
-
-void odm_dm_reset(struct phy_dm_struct *dm)
-{
- struct dig_thres *dig_tab = &dm->dm_dig_table;
-
- odm_ant_div_reset(dm);
- phydm_set_edcca_threshold_api(dm, dig_tab->cur_ig_value);
-}
-
-void phydm_support_ability_debug(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 pre_support_ability;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- pre_support_ability = dm->support_ability;
- PHYDM_SNPRINTF(output + used, out_len - used, "\n%s\n",
- "================================");
- if (dm_value[0] == 100) {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "[Supportability] PhyDM Selection\n");
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "================================");
- PHYDM_SNPRINTF(
- output + used, out_len - used, "00. (( %s ))DIG\n",
- ((dm->support_ability & ODM_BB_DIG) ? ("V") : (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "01. (( %s ))RA_MASK\n",
- ((dm->support_ability & ODM_BB_RA_MASK) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "02. (( %s ))DYNAMIC_TXPWR\n",
- ((dm->support_ability & ODM_BB_DYNAMIC_TXPWR) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "03. (( %s ))FA_CNT\n",
- ((dm->support_ability & ODM_BB_FA_CNT) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "04. (( %s ))RSSI_MONITOR\n",
- ((dm->support_ability & ODM_BB_RSSI_MONITOR) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "05. (( %s ))CCK_PD\n",
- ((dm->support_ability & ODM_BB_CCK_PD) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "06. (( %s ))ANT_DIV\n",
- ((dm->support_ability & ODM_BB_ANT_DIV) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "08. (( %s ))PWR_TRAIN\n",
- ((dm->support_ability & ODM_BB_PWR_TRAIN) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "09. (( %s ))RATE_ADAPTIVE\n",
- ((dm->support_ability & ODM_BB_RATE_ADAPTIVE) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "10. (( %s ))PATH_DIV\n",
- ((dm->support_ability & ODM_BB_PATH_DIV) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "13. (( %s ))ADAPTIVITY\n",
- ((dm->support_ability & ODM_BB_ADAPTIVITY) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "14. (( %s ))struct cfo_tracking\n",
- ((dm->support_ability & ODM_BB_CFO_TRACKING) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "15. (( %s ))NHM_CNT\n",
- ((dm->support_ability & ODM_BB_NHM_CNT) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "16. (( %s ))PRIMARY_CCA\n",
- ((dm->support_ability & ODM_BB_PRIMARY_CCA) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "17. (( %s ))TXBF\n",
- ((dm->support_ability & ODM_BB_TXBF) ? ("V") : (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "18. (( %s ))DYNAMIC_ARFR\n",
- ((dm->support_ability & ODM_BB_DYNAMIC_ARFR) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "20. (( %s ))EDCA_TURBO\n",
- ((dm->support_ability & ODM_MAC_EDCA_TURBO) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "21. (( %s ))DYNAMIC_RX_PATH\n",
- ((dm->support_ability & ODM_BB_DYNAMIC_RX_PATH) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "24. (( %s ))TX_PWR_TRACK\n",
- ((dm->support_ability & ODM_RF_TX_PWR_TRACK) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "25. (( %s ))RX_GAIN_TRACK\n",
- ((dm->support_ability & ODM_RF_RX_GAIN_TRACK) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "26. (( %s ))RF_CALIBRATION\n",
- ((dm->support_ability & ODM_RF_CALIBRATION) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "================================");
- } else {
- if (dm_value[1] == 1) { /* enable */
- dm->support_ability |= BIT(dm_value[0]);
- } else if (dm_value[1] == 2) /* disable */
- dm->support_ability &= ~(BIT(dm_value[0]));
- else {
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "[Warning!!!] 1:enable, 2:disable");
- }
- }
- PHYDM_SNPRINTF(output + used, out_len - used,
- "pre-support_ability = 0x%x\n", pre_support_ability);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Curr-support_ability = 0x%x\n", dm->support_ability);
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "================================");
-}
-
-void phydm_watchdog_mp(struct phy_dm_struct *dm) {}
-/*
- * 2011/09/20 MH This is the entry pointer for all team to execute HW outsrc DM.
- * You can not add any dummy function here, be care, you can only use DM struct
- * to perform any new ODM_DM.
- */
-void odm_dm_watchdog(struct phy_dm_struct *dm)
-{
- odm_common_info_self_update(dm);
- phydm_basic_dbg_message(dm);
- odm_hw_setting(dm);
-
- odm_false_alarm_counter_statistics(dm);
- phydm_noisy_detection(dm);
-
- odm_rssi_monitor_check(dm);
-
- if (*dm->is_power_saving) {
- odm_dig_by_rssi_lps(dm);
- phydm_adaptivity(dm);
- odm_antenna_diversity(
- dm); /*enable AntDiv in PS mode, request from SD4 Jeff*/
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "DMWatchdog in power saving mode\n");
- return;
- }
-
- phydm_check_adaptivity(dm);
- odm_update_power_training_state(dm);
- odm_DIG(dm);
- phydm_adaptivity(dm);
- odm_cck_packet_detection_thresh(dm);
-
- phydm_ra_info_watchdog(dm);
- odm_edca_turbo_check(dm);
- odm_cfo_tracking(dm);
- odm_dynamic_tx_power(dm);
- odm_antenna_diversity(dm);
-
- phydm_beamforming_watchdog(dm);
-
- phydm_rf_watchdog(dm);
-
- odm_dtc(dm);
-
- odm_common_info_self_reset(dm);
-}
-
-/*
- * Init /.. Fixed HW value. Only init time.
- */
-void odm_cmn_info_init(struct phy_dm_struct *dm, enum odm_cmninfo cmn_info,
- u32 value)
-{
- /* This section is used for init value */
- switch (cmn_info) {
- /* Fixed ODM value. */
- case ODM_CMNINFO_ABILITY:
- dm->support_ability = (u32)value;
- break;
-
- case ODM_CMNINFO_RF_TYPE:
- dm->rf_type = (u8)value;
- break;
-
- case ODM_CMNINFO_PLATFORM:
- dm->support_platform = (u8)value;
- break;
-
- case ODM_CMNINFO_INTERFACE:
- dm->support_interface = (u8)value;
- break;
-
- case ODM_CMNINFO_MP_TEST_CHIP:
- dm->is_mp_chip = (u8)value;
- break;
-
- case ODM_CMNINFO_IC_TYPE:
- dm->support_ic_type = value;
- break;
-
- case ODM_CMNINFO_CUT_VER:
- dm->cut_version = (u8)value;
- break;
-
- case ODM_CMNINFO_FAB_VER:
- dm->fab_version = (u8)value;
- break;
-
- case ODM_CMNINFO_RFE_TYPE:
- dm->rfe_type = (u8)value;
- phydm_init_hw_info_by_rfe(dm);
- break;
-
- case ODM_CMNINFO_RF_ANTENNA_TYPE:
- dm->ant_div_type = (u8)value;
- break;
-
- case ODM_CMNINFO_WITH_EXT_ANTENNA_SWITCH:
- dm->with_extenal_ant_switch = (u8)value;
- break;
-
- case ODM_CMNINFO_BE_FIX_TX_ANT:
- dm->dm_fat_table.b_fix_tx_ant = (u8)value;
- break;
-
- case ODM_CMNINFO_BOARD_TYPE:
- if (!dm->is_init_hw_info_by_rfe)
- dm->board_type = (u8)value;
- break;
-
- case ODM_CMNINFO_PACKAGE_TYPE:
- if (!dm->is_init_hw_info_by_rfe)
- dm->package_type = (u8)value;
- break;
-
- case ODM_CMNINFO_EXT_LNA:
- if (!dm->is_init_hw_info_by_rfe)
- dm->ext_lna = (u8)value;
- break;
-
- case ODM_CMNINFO_5G_EXT_LNA:
- if (!dm->is_init_hw_info_by_rfe)
- dm->ext_lna_5g = (u8)value;
- break;
-
- case ODM_CMNINFO_EXT_PA:
- if (!dm->is_init_hw_info_by_rfe)
- dm->ext_pa = (u8)value;
- break;
-
- case ODM_CMNINFO_5G_EXT_PA:
- if (!dm->is_init_hw_info_by_rfe)
- dm->ext_pa_5g = (u8)value;
- break;
-
- case ODM_CMNINFO_GPA:
- if (!dm->is_init_hw_info_by_rfe)
- dm->type_gpa = (u16)value;
- break;
-
- case ODM_CMNINFO_APA:
- if (!dm->is_init_hw_info_by_rfe)
- dm->type_apa = (u16)value;
- break;
-
- case ODM_CMNINFO_GLNA:
- if (!dm->is_init_hw_info_by_rfe)
- dm->type_glna = (u16)value;
- break;
-
- case ODM_CMNINFO_ALNA:
- if (!dm->is_init_hw_info_by_rfe)
- dm->type_alna = (u16)value;
- break;
-
- case ODM_CMNINFO_EXT_TRSW:
- if (!dm->is_init_hw_info_by_rfe)
- dm->ext_trsw = (u8)value;
- break;
- case ODM_CMNINFO_EXT_LNA_GAIN:
- dm->ext_lna_gain = (u8)value;
- break;
- case ODM_CMNINFO_PATCH_ID:
- dm->patch_id = (u8)value;
- break;
- case ODM_CMNINFO_BINHCT_TEST:
- dm->is_in_hct_test = (bool)value;
- break;
- case ODM_CMNINFO_BWIFI_TEST:
- dm->wifi_test = (u8)value;
- break;
- case ODM_CMNINFO_SMART_CONCURRENT:
- dm->is_dual_mac_smart_concurrent = (bool)value;
- break;
- case ODM_CMNINFO_DOMAIN_CODE_2G:
- dm->odm_regulation_2_4g = (u8)value;
- break;
- case ODM_CMNINFO_DOMAIN_CODE_5G:
- dm->odm_regulation_5g = (u8)value;
- break;
- case ODM_CMNINFO_CONFIG_BB_RF:
- dm->config_bbrf = (bool)value;
- break;
- case ODM_CMNINFO_IQKFWOFFLOAD:
- dm->iqk_fw_offload = (u8)value;
- break;
- case ODM_CMNINFO_IQKPAOFF:
- dm->rf_calibrate_info.is_iqk_pa_off = (bool)value;
- break;
- case ODM_CMNINFO_REGRFKFREEENABLE:
- dm->rf_calibrate_info.reg_rf_kfree_enable = (u8)value;
- break;
- case ODM_CMNINFO_RFKFREEENABLE:
- dm->rf_calibrate_info.rf_kfree_enable = (u8)value;
- break;
- case ODM_CMNINFO_NORMAL_RX_PATH_CHANGE:
- dm->normal_rx_path = (u8)value;
- break;
- case ODM_CMNINFO_EFUSE0X3D8:
- dm->efuse0x3d8 = (u8)value;
- break;
- case ODM_CMNINFO_EFUSE0X3D7:
- dm->efuse0x3d7 = (u8)value;
- break;
- /* To remove the compiler warning, must add an empty default statement
- * to handle the other values.
- */
- default:
- /* do nothing */
- break;
- }
-}
-
-void odm_cmn_info_hook(struct phy_dm_struct *dm, enum odm_cmninfo cmn_info,
- void *value)
-{
- /* */
- /* Hook call by reference pointer. */
- /* */
- switch (cmn_info) {
- /* */
- /* Dynamic call by reference pointer. */
- /* */
- case ODM_CMNINFO_MAC_PHY_MODE:
- dm->mac_phy_mode = (u8 *)value;
- break;
-
- case ODM_CMNINFO_TX_UNI:
- dm->num_tx_bytes_unicast = (u64 *)value;
- break;
-
- case ODM_CMNINFO_RX_UNI:
- dm->num_rx_bytes_unicast = (u64 *)value;
- break;
-
- case ODM_CMNINFO_WM_MODE:
- dm->wireless_mode = (u8 *)value;
- break;
-
- case ODM_CMNINFO_BAND:
- dm->band_type = (u8 *)value;
- break;
-
- case ODM_CMNINFO_SEC_CHNL_OFFSET:
- dm->sec_ch_offset = (u8 *)value;
- break;
-
- case ODM_CMNINFO_SEC_MODE:
- dm->security = (u8 *)value;
- break;
-
- case ODM_CMNINFO_BW:
- dm->band_width = (u8 *)value;
- break;
-
- case ODM_CMNINFO_CHNL:
- dm->channel = (u8 *)value;
- break;
-
- case ODM_CMNINFO_DMSP_GET_VALUE:
- dm->is_get_value_from_other_mac = (bool *)value;
- break;
-
- case ODM_CMNINFO_BUDDY_ADAPTOR:
- dm->buddy_adapter = (void **)value;
- break;
-
- case ODM_CMNINFO_DMSP_IS_MASTER:
- dm->is_master_of_dmsp = (bool *)value;
- break;
-
- case ODM_CMNINFO_SCAN:
- dm->is_scan_in_process = (bool *)value;
- break;
-
- case ODM_CMNINFO_POWER_SAVING:
- dm->is_power_saving = (bool *)value;
- break;
-
- case ODM_CMNINFO_ONE_PATH_CCA:
- dm->one_path_cca = (u8 *)value;
- break;
-
- case ODM_CMNINFO_DRV_STOP:
- dm->is_driver_stopped = (bool *)value;
- break;
-
- case ODM_CMNINFO_PNP_IN:
- dm->is_driver_is_going_to_pnp_set_power_sleep = (bool *)value;
- break;
-
- case ODM_CMNINFO_INIT_ON:
- dm->pinit_adpt_in_progress = (bool *)value;
- break;
-
- case ODM_CMNINFO_ANT_TEST:
- dm->antenna_test = (u8 *)value;
- break;
-
- case ODM_CMNINFO_NET_CLOSED:
- dm->is_net_closed = (bool *)value;
- break;
-
- case ODM_CMNINFO_FORCED_RATE:
- dm->forced_data_rate = (u16 *)value;
- break;
- case ODM_CMNINFO_ANT_DIV:
- dm->enable_antdiv = (u8 *)value;
- break;
- case ODM_CMNINFO_ADAPTIVITY:
- dm->enable_adaptivity = (u8 *)value;
- break;
- case ODM_CMNINFO_FORCED_IGI_LB:
- dm->pu1_forced_igi_lb = (u8 *)value;
- break;
-
- case ODM_CMNINFO_P2P_LINK:
- dm->dm_dig_table.is_p2p_in_process = (u8 *)value;
- break;
-
- case ODM_CMNINFO_IS1ANTENNA:
- dm->is_1_antenna = (bool *)value;
- break;
-
- case ODM_CMNINFO_RFDEFAULTPATH:
- dm->rf_default_path = (u8 *)value;
- break;
-
- case ODM_CMNINFO_FCS_MODE:
- dm->is_fcs_mode_enable = (bool *)value;
- break;
- /*add by YuChen for beamforming PhyDM*/
- case ODM_CMNINFO_HUBUSBMODE:
- dm->hub_usb_mode = (u8 *)value;
- break;
- case ODM_CMNINFO_FWDWRSVDPAGEINPROGRESS:
- dm->is_fw_dw_rsvd_page_in_progress = (bool *)value;
- break;
- case ODM_CMNINFO_TX_TP:
- dm->current_tx_tp = (u32 *)value;
- break;
- case ODM_CMNINFO_RX_TP:
- dm->current_rx_tp = (u32 *)value;
- break;
- case ODM_CMNINFO_SOUNDING_SEQ:
- dm->sounding_seq = (u8 *)value;
- break;
- case ODM_CMNINFO_FORCE_TX_ANT_BY_TXDESC:
- dm->dm_fat_table.p_force_tx_ant_by_desc = (u8 *)value;
- break;
- case ODM_CMNINFO_SET_S0S1_DEFAULT_ANTENNA:
- dm->dm_fat_table.p_default_s0_s1 = (u8 *)value;
- break;
-
- default:
- /*do nothing*/
- break;
- }
-}
-
-void odm_cmn_info_ptr_array_hook(struct phy_dm_struct *dm,
- enum odm_cmninfo cmn_info, u16 index,
- void *value)
-{
- /*Hook call by reference pointer.*/
- switch (cmn_info) {
- /*Dynamic call by reference pointer. */
- case ODM_CMNINFO_STA_STATUS:
- dm->odm_sta_info[index] = (struct rtl_sta_info *)value;
-
- if (IS_STA_VALID(dm->odm_sta_info[index]))
- dm->platform2phydm_macid_table[index] = index;
-
- break;
- /* To remove the compiler warning, must add an empty default statement
- * to handle the other values.
- */
- default:
- /* do nothing */
- break;
- }
-}
-
-/*
- * Update band/CHannel/.. The values are dynamic but non-per-packet.
- */
-void odm_cmn_info_update(struct phy_dm_struct *dm, u32 cmn_info, u64 value)
-{
- /* This init variable may be changed in run time. */
- switch (cmn_info) {
- case ODM_CMNINFO_LINK_IN_PROGRESS:
- dm->is_link_in_process = (bool)value;
- break;
-
- case ODM_CMNINFO_ABILITY:
- dm->support_ability = (u32)value;
- break;
-
- case ODM_CMNINFO_RF_TYPE:
- dm->rf_type = (u8)value;
- break;
-
- case ODM_CMNINFO_WIFI_DIRECT:
- dm->is_wifi_direct = (bool)value;
- break;
-
- case ODM_CMNINFO_WIFI_DISPLAY:
- dm->is_wifi_display = (bool)value;
- break;
-
- case ODM_CMNINFO_LINK:
- dm->is_linked = (bool)value;
- break;
-
- case ODM_CMNINFO_CMW500LINK:
- dm->is_linkedcmw500 = (bool)value;
- break;
-
- case ODM_CMNINFO_LPSPG:
- dm->is_in_lps_pg = (bool)value;
- break;
-
- case ODM_CMNINFO_STATION_STATE:
- dm->bsta_state = (bool)value;
- break;
-
- case ODM_CMNINFO_RSSI_MIN:
- dm->rssi_min = (u8)value;
- break;
-
- case ODM_CMNINFO_DBG_COMP:
- dm->debug_components = (u32)value;
- break;
-
- case ODM_CMNINFO_DBG_LEVEL:
- dm->debug_level = (u32)value;
- break;
- case ODM_CMNINFO_RA_THRESHOLD_HIGH:
- dm->rate_adaptive.high_rssi_thresh = (u8)value;
- break;
-
- case ODM_CMNINFO_RA_THRESHOLD_LOW:
- dm->rate_adaptive.low_rssi_thresh = (u8)value;
- break;
- /* The following is for BT HS mode and BT coexist mechanism. */
- case ODM_CMNINFO_BT_ENABLED:
- dm->is_bt_enabled = (bool)value;
- break;
-
- case ODM_CMNINFO_BT_HS_CONNECT_PROCESS:
- dm->is_bt_connect_process = (bool)value;
- break;
-
- case ODM_CMNINFO_BT_HS_RSSI:
- dm->bt_hs_rssi = (u8)value;
- break;
-
- case ODM_CMNINFO_BT_OPERATION:
- dm->is_bt_hs_operation = (bool)value;
- break;
-
- case ODM_CMNINFO_BT_LIMITED_DIG:
- dm->is_bt_limited_dig = (bool)value;
- break;
-
- case ODM_CMNINFO_BT_DIG:
- dm->bt_hs_dig_val = (u8)value;
- break;
-
- case ODM_CMNINFO_BT_BUSY:
- dm->is_bt_busy = (bool)value;
- break;
-
- case ODM_CMNINFO_BT_DISABLE_EDCA:
- dm->is_bt_disable_edca_turbo = (bool)value;
- break;
-
- case ODM_CMNINFO_AP_TOTAL_NUM:
- dm->ap_total_num = (u8)value;
- break;
-
- case ODM_CMNINFO_POWER_TRAINING:
- dm->is_disable_power_training = (bool)value;
- break;
-
- default:
- /* do nothing */
- break;
- }
-}
-
-u32 phydm_cmn_info_query(struct phy_dm_struct *dm,
- enum phydm_info_query info_type)
-{
- struct false_alarm_stat *false_alm_cnt =
- (struct false_alarm_stat *)phydm_get_structure(
- dm, PHYDM_FALSEALMCNT);
-
- switch (info_type) {
- case PHYDM_INFO_FA_OFDM:
- return false_alm_cnt->cnt_ofdm_fail;
-
- case PHYDM_INFO_FA_CCK:
- return false_alm_cnt->cnt_cck_fail;
-
- case PHYDM_INFO_FA_TOTAL:
- return false_alm_cnt->cnt_all;
-
- case PHYDM_INFO_CCA_OFDM:
- return false_alm_cnt->cnt_ofdm_cca;
-
- case PHYDM_INFO_CCA_CCK:
- return false_alm_cnt->cnt_cck_cca;
-
- case PHYDM_INFO_CCA_ALL:
- return false_alm_cnt->cnt_cca_all;
-
- case PHYDM_INFO_CRC32_OK_VHT:
- return false_alm_cnt->cnt_vht_crc32_ok;
-
- case PHYDM_INFO_CRC32_OK_HT:
- return false_alm_cnt->cnt_ht_crc32_ok;
-
- case PHYDM_INFO_CRC32_OK_LEGACY:
- return false_alm_cnt->cnt_ofdm_crc32_ok;
-
- case PHYDM_INFO_CRC32_OK_CCK:
- return false_alm_cnt->cnt_cck_crc32_ok;
-
- case PHYDM_INFO_CRC32_ERROR_VHT:
- return false_alm_cnt->cnt_vht_crc32_error;
-
- case PHYDM_INFO_CRC32_ERROR_HT:
- return false_alm_cnt->cnt_ht_crc32_error;
-
- case PHYDM_INFO_CRC32_ERROR_LEGACY:
- return false_alm_cnt->cnt_ofdm_crc32_error;
-
- case PHYDM_INFO_CRC32_ERROR_CCK:
- return false_alm_cnt->cnt_cck_crc32_error;
-
- case PHYDM_INFO_EDCCA_FLAG:
- return false_alm_cnt->edcca_flag;
-
- case PHYDM_INFO_OFDM_ENABLE:
- return false_alm_cnt->ofdm_block_enable;
-
- case PHYDM_INFO_CCK_ENABLE:
- return false_alm_cnt->cck_block_enable;
-
- case PHYDM_INFO_DBG_PORT_0:
- return false_alm_cnt->dbg_port0;
-
- default:
- return 0xffffffff;
- }
-}
-
-void odm_init_all_timers(struct phy_dm_struct *dm) {}
-
-void odm_cancel_all_timers(struct phy_dm_struct *dm) {}
-
-void odm_release_all_timers(struct phy_dm_struct *dm) {}
-
-/* 3============================================================
- * 3 Tx Power Tracking
- * 3============================================================
- */
-
-/* need to ODM CE Platform
- * move to here for ANT detection mechanism using
- */
-
-u32 odm_convert_to_db(u32 value)
-{
- u8 i;
- u8 j;
- u32 dB;
-
- value = value & 0xFFFF;
-
- for (i = 0; i < 12; i++) {
- if (value <= db_invert_table[i][7])
- break;
- }
-
- if (i >= 12)
- return 96; /* maximum 96 dB */
-
- for (j = 0; j < 8; j++) {
- if (value <= db_invert_table[i][j])
- break;
- }
-
- dB = (i << 3) + j + 1;
-
- return dB;
-}
-
-u32 odm_convert_to_linear(u32 value)
-{
- u8 i;
- u8 j;
- u32 linear;
-
- /* 1dB~96dB */
-
- value = value & 0xFF;
-
- i = (u8)((value - 1) >> 3);
- j = (u8)(value - 1) - (i << 3);
-
- linear = db_invert_table[i][j];
-
- return linear;
-}
-
-/*
- * ODM multi-port consideration, added by Roger, 2013.10.01.
- */
-void odm_asoc_entry_init(struct phy_dm_struct *dm) {}
-
-/* Justin: According to the current RRSI to adjust Response Frame TX power */
-void odm_dtc(struct phy_dm_struct *dm) {}
-
-static void odm_update_power_training_state(struct phy_dm_struct *dm)
-{
- struct false_alarm_stat *false_alm_cnt =
- (struct false_alarm_stat *)phydm_get_structure(
- dm, PHYDM_FALSEALMCNT);
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- u32 score = 0;
-
- if (!(dm->support_ability & ODM_BB_PWR_TRAIN))
- return;
-
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK, "%s()============>\n", __func__);
- dm->is_change_state = false;
-
- /* Debug command */
- if (dm->force_power_training_state) {
- if (dm->force_power_training_state == 1 &&
- !dm->is_disable_power_training) {
- dm->is_change_state = true;
- dm->is_disable_power_training = true;
- } else if (dm->force_power_training_state == 2 &&
- dm->is_disable_power_training) {
- dm->is_change_state = true;
- dm->is_disable_power_training = false;
- }
-
- dm->PT_score = 0;
- dm->phy_dbg_info.num_qry_phy_status_ofdm = 0;
- dm->phy_dbg_info.num_qry_phy_status_cck = 0;
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "%s(): force_power_training_state = %d\n",
- __func__, dm->force_power_training_state);
- return;
- }
-
- if (!dm->is_linked)
- return;
-
- /* First connect */
- if (dm->is_linked && !dig_tab->is_media_connect_0) {
- dm->PT_score = 0;
- dm->is_change_state = true;
- dm->phy_dbg_info.num_qry_phy_status_ofdm = 0;
- dm->phy_dbg_info.num_qry_phy_status_cck = 0;
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK, "%s(): First Connect\n",
- __func__);
- return;
- }
-
- /* Compute score */
- if (dm->nhm_cnt_0 >= 215) {
- score = 2;
- } else if (dm->nhm_cnt_0 >= 190) {
- score = 1; /* unknown state */
- } else {
- u32 rx_pkt_cnt;
-
- rx_pkt_cnt = (u32)(dm->phy_dbg_info.num_qry_phy_status_ofdm) +
- (u32)(dm->phy_dbg_info.num_qry_phy_status_cck);
-
- if ((false_alm_cnt->cnt_cca_all > 31 && rx_pkt_cnt > 31) &&
- false_alm_cnt->cnt_cca_all >= rx_pkt_cnt) {
- if ((rx_pkt_cnt + (rx_pkt_cnt >> 1)) <=
- false_alm_cnt->cnt_cca_all)
- score = 0;
- else if ((rx_pkt_cnt + (rx_pkt_cnt >> 2)) <=
- false_alm_cnt->cnt_cca_all)
- score = 1;
- else
- score = 2;
- }
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "%s(): rx_pkt_cnt = %d, cnt_cca_all = %d\n",
- __func__, rx_pkt_cnt, false_alm_cnt->cnt_cca_all);
- }
- ODM_RT_TRACE(
- dm, ODM_COMP_RA_MASK,
- "%s(): num_qry_phy_status_ofdm = %d, num_qry_phy_status_cck = %d\n",
- __func__, (u32)(dm->phy_dbg_info.num_qry_phy_status_ofdm),
- (u32)(dm->phy_dbg_info.num_qry_phy_status_cck));
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK, "%s(): nhm_cnt_0 = %d, score = %d\n",
- __func__, dm->nhm_cnt_0, score);
-
- /* smoothing */
- dm->PT_score = (score << 4) + (dm->PT_score >> 1) + (dm->PT_score >> 2);
- score = (dm->PT_score + 32) >> 6;
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "%s(): PT_score = %d, score after smoothing = %d\n",
- __func__, dm->PT_score, score);
-
- /* mode decision */
- if (score == 2) {
- if (dm->is_disable_power_training) {
- dm->is_change_state = true;
- dm->is_disable_power_training = false;
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "%s(): Change state\n", __func__);
- }
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "%s(): Enable Power Training\n", __func__);
- } else if (score == 0) {
- if (!dm->is_disable_power_training) {
- dm->is_change_state = true;
- dm->is_disable_power_training = true;
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "%s(): Change state\n", __func__);
- }
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "%s(): Disable Power Training\n", __func__);
- }
-
- dm->phy_dbg_info.num_qry_phy_status_ofdm = 0;
- dm->phy_dbg_info.num_qry_phy_status_cck = 0;
-}
-
-/*===========================================================*/
-/* The following is for compile only*/
-/*===========================================================*/
-/*#define TARGET_CHNL_NUM_2G_5G 59*/
-/*===========================================================*/
-
-void phydm_noisy_detection(struct phy_dm_struct *dm)
-{
- u32 total_fa_cnt, total_cca_cnt;
- u32 score = 0, i, score_smooth;
-
- total_cca_cnt = dm->false_alm_cnt.cnt_cca_all;
- total_fa_cnt = dm->false_alm_cnt.cnt_all;
-
- for (i = 0; i <= 16; i++) {
- if (total_fa_cnt * 16 >= total_cca_cnt * (16 - i)) {
- score = 16 - i;
- break;
- }
- }
-
- /* noisy_decision_smooth = noisy_decision_smooth>>1 + (score<<3)>>1; */
- dm->noisy_decision_smooth =
- (dm->noisy_decision_smooth >> 1) + (score << 2);
-
- /* Round the noisy_decision_smooth: +"3" comes from (2^3)/2-1 */
- score_smooth = (total_cca_cnt >= 300) ?
- ((dm->noisy_decision_smooth + 3) >> 3) :
- 0;
-
- dm->noisy_decision = (score_smooth >= 3) ? 1 : 0;
- ODM_RT_TRACE(
- dm, ODM_COMP_NOISY_DETECT,
- "[NoisyDetection] total_cca_cnt=%d, total_fa_cnt=%d, noisy_decision_smooth=%d, score=%d, score_smooth=%d, dm->noisy_decision=%d\n",
- total_cca_cnt, total_fa_cnt, dm->noisy_decision_smooth, score,
- score_smooth, dm->noisy_decision);
-}
-
-void phydm_set_ext_switch(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 ext_ant_switch = dm_value[0];
-
- if (dm->support_ic_type & (ODM_RTL8821 | ODM_RTL8881A)) {
- /*Output Pin Settings*/
- odm_set_mac_reg(dm, 0x4C, BIT(23),
- 0); /*select DPDT_P and DPDT_N as output pin*/
- odm_set_mac_reg(dm, 0x4C, BIT(24), 1); /*by WLAN control*/
-
- odm_set_bb_reg(dm, 0xCB4, 0xF, 7); /*DPDT_P = 1b'0*/
- odm_set_bb_reg(dm, 0xCB4, 0xF0, 7); /*DPDT_N = 1b'0*/
-
- if (ext_ant_switch == MAIN_ANT) {
- odm_set_bb_reg(dm, 0xCB4, (BIT(29) | BIT(28)), 1);
- ODM_RT_TRACE(
- dm, ODM_COMP_API,
- "***8821A set ant switch = 2b'01 (Main)\n");
- } else if (ext_ant_switch == AUX_ANT) {
- odm_set_bb_reg(dm, 0xCB4, BIT(29) | BIT(28), 2);
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "***8821A set ant switch = 2b'10 (Aux)\n");
- }
- }
-}
-
-static void phydm_csi_mask_enable(void *dm_void, u32 enable)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 reg_value = 0;
-
- reg_value = (enable == CSI_MASK_ENABLE) ? 1 : 0;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- odm_set_bb_reg(dm, 0xD2C, BIT(28), reg_value);
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "Enable CSI Mask: Reg 0xD2C[28] = ((0x%x))\n",
- reg_value);
-
- } else if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, 0x874, BIT(0), reg_value);
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "Enable CSI Mask: Reg 0x874[0] = ((0x%x))\n",
- reg_value);
- }
-}
-
-static void phydm_clean_all_csi_mask(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- odm_set_bb_reg(dm, 0xD40, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0xD44, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0xD48, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0xD4c, MASKDWORD, 0);
-
- } else if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, 0x880, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0x884, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0x888, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0x88c, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0x890, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0x894, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0x898, MASKDWORD, 0);
- odm_set_bb_reg(dm, 0x89c, MASKDWORD, 0);
- }
-}
-
-static void phydm_set_csi_mask_reg(void *dm_void, u32 tone_idx_tmp,
- u8 tone_direction)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 byte_offset, bit_offset;
- u32 target_reg;
- u8 reg_tmp_value;
- u32 tone_num = 64;
- u32 tone_num_shift = 0;
- u32 csi_mask_reg_p = 0, csi_mask_reg_n = 0;
-
- /* calculate real tone idx*/
- if ((tone_idx_tmp % 10) >= 5)
- tone_idx_tmp += 10;
-
- tone_idx_tmp = (tone_idx_tmp / 10);
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- tone_num = 64;
- csi_mask_reg_p = 0xD40;
- csi_mask_reg_n = 0xD48;
-
- } else if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- tone_num = 128;
- csi_mask_reg_p = 0x880;
- csi_mask_reg_n = 0x890;
- }
-
- if (tone_direction == FREQ_POSITIVE) {
- if (tone_idx_tmp >= (tone_num - 1))
- tone_idx_tmp = (tone_num - 1);
-
- byte_offset = (u8)(tone_idx_tmp >> 3);
- bit_offset = (u8)(tone_idx_tmp & 0x7);
- target_reg = csi_mask_reg_p + byte_offset;
-
- } else {
- tone_num_shift = tone_num;
-
- if (tone_idx_tmp >= tone_num)
- tone_idx_tmp = tone_num;
-
- tone_idx_tmp = tone_num - tone_idx_tmp;
-
- byte_offset = (u8)(tone_idx_tmp >> 3);
- bit_offset = (u8)(tone_idx_tmp & 0x7);
- target_reg = csi_mask_reg_n + byte_offset;
- }
-
- reg_tmp_value = odm_read_1byte(dm, target_reg);
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "Pre Mask tone idx[%d]: Reg0x%x = ((0x%x))\n",
- (tone_idx_tmp + tone_num_shift), target_reg,
- reg_tmp_value);
- reg_tmp_value |= BIT(bit_offset);
- odm_write_1byte(dm, target_reg, reg_tmp_value);
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "New Mask tone idx[%d]: Reg0x%x = ((0x%x))\n",
- (tone_idx_tmp + tone_num_shift), target_reg,
- reg_tmp_value);
-}
-
-static void phydm_set_nbi_reg(void *dm_void, u32 tone_idx_tmp, u32 bw)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 nbi_table_128[NBI_TABLE_SIZE_128] = {
- 25, 55, 85, 115, 135, 155, 185, 205, 225, 245,
- /*1~10*/ /*tone_idx X 10*/
- 265, 285, 305, 335, 355, 375, 395, 415, 435, 455, /*11~20*/
- 485, 505, 525, 555, 585, 615, 635}; /*21~27*/
-
- u32 nbi_table_256[NBI_TABLE_SIZE_256] = {
- 25, 55, 85, 115, 135, 155, 175, 195, 225,
- 245, /*1~10*/
- 265, 285, 305, 325, 345, 365, 385, 405, 425,
- 445, /*11~20*/
- 465, 485, 505, 525, 545, 565, 585, 605, 625,
- 645, /*21~30*/
- 665, 695, 715, 735, 755, 775, 795, 815, 835,
- 855, /*31~40*/
- 875, 895, 915, 935, 955, 975, 995, 1015, 1035,
- 1055, /*41~50*/
- 1085, 1105, 1125, 1145, 1175, 1195, 1225, 1255, 1275}; /*51~59*/
-
- u32 reg_idx = 0;
- u32 i;
- u8 nbi_table_idx = FFT_128_TYPE;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- nbi_table_idx = FFT_128_TYPE;
- } else if (dm->support_ic_type & ODM_IC_11AC_1_SERIES) {
- nbi_table_idx = FFT_256_TYPE;
- } else if (dm->support_ic_type & ODM_IC_11AC_2_SERIES) {
- if (bw == 80)
- nbi_table_idx = FFT_256_TYPE;
- else /*20M, 40M*/
- nbi_table_idx = FFT_128_TYPE;
- }
-
- if (nbi_table_idx == FFT_128_TYPE) {
- for (i = 0; i < NBI_TABLE_SIZE_128; i++) {
- if (tone_idx_tmp < nbi_table_128[i]) {
- reg_idx = i + 1;
- break;
- }
- }
-
- } else if (nbi_table_idx == FFT_256_TYPE) {
- for (i = 0; i < NBI_TABLE_SIZE_256; i++) {
- if (tone_idx_tmp < nbi_table_256[i]) {
- reg_idx = i + 1;
- break;
- }
- }
- }
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- odm_set_bb_reg(dm, 0xc40, 0x1f000000, reg_idx);
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "Set tone idx: Reg0xC40[28:24] = ((0x%x))\n",
- reg_idx);
- /**/
- } else {
- odm_set_bb_reg(dm, 0x87c, 0xfc000, reg_idx);
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "Set tone idx: Reg0x87C[19:14] = ((0x%x))\n",
- reg_idx);
- /**/
- }
-}
-
-static void phydm_nbi_enable(void *dm_void, u32 enable)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 reg_value = 0;
-
- reg_value = (enable == NBI_ENABLE) ? 1 : 0;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- odm_set_bb_reg(dm, 0xc40, BIT(9), reg_value);
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "Enable NBI Reg0xC40[9] = ((0x%x))\n", reg_value);
-
- } else if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, 0x87c, BIT(13), reg_value);
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "Enable NBI Reg0x87C[13] = ((0x%x))\n", reg_value);
- }
-}
-
-static u8 phydm_calculate_fc(void *dm_void, u32 channel, u32 bw, u32 second_ch,
- u32 *fc_in)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 fc = *fc_in;
- u32 start_ch_per_40m[NUM_START_CH_40M + 1] = {
- 36, 44, 52, 60, 100, 108, 116, 124,
- 132, 140, 149, 157, 165, 173, 173 + 8,
- };
- u32 start_ch_per_80m[NUM_START_CH_80M + 1] = {
- 36, 52, 100, 116, 132, 149, 165, 165 + 16,
- };
- u32 *start_ch = &start_ch_per_40m[0];
- u32 num_start_channel = NUM_START_CH_40M;
- u32 channel_offset = 0;
- u32 i;
-
- /*2.4G*/
- if (channel <= 14 && channel > 0) {
- if (bw == 80)
- return SET_ERROR;
-
- fc = 2412 + (channel - 1) * 5;
-
- if (bw == 40 && second_ch == PHYDM_ABOVE) {
- if (channel >= 10) {
- ODM_RT_TRACE(
- dm, ODM_COMP_API,
- "CH = ((%d)), Scnd_CH = ((%d)) Error setting\n",
- channel, second_ch);
- return SET_ERROR;
- }
- fc += 10;
- } else if (bw == 40 && (second_ch == PHYDM_BELOW)) {
- if (channel <= 2) {
- ODM_RT_TRACE(
- dm, ODM_COMP_API,
- "CH = ((%d)), Scnd_CH = ((%d)) Error setting\n",
- channel, second_ch);
- return SET_ERROR;
- }
- fc -= 10;
- }
- }
- /*5G*/
- else if (channel >= 36 && channel <= 177) {
- if (bw == 20) {
- fc = 5180 + (channel - 36) * 5;
- *fc_in = fc;
- return SET_SUCCESS;
- }
-
- if (bw == 40) {
- num_start_channel = NUM_START_CH_40M;
- start_ch = &start_ch_per_40m[0];
- channel_offset = CH_OFFSET_40M;
- } else if (bw == 80) {
- num_start_channel = NUM_START_CH_80M;
- start_ch = &start_ch_per_80m[0];
- channel_offset = CH_OFFSET_80M;
- }
-
- for (i = 0; i < num_start_channel; i++) {
- if (channel < start_ch[i + 1]) {
- channel = start_ch[i] + channel_offset;
- break;
- }
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_API, "Mod_CH = ((%d))\n", channel);
-
- fc = 5180 + (channel - 36) * 5;
-
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_API, "CH = ((%d)) Error setting\n",
- channel);
- return SET_ERROR;
- }
-
- *fc_in = fc;
-
- return SET_SUCCESS;
-}
-
-static u8 phydm_calculate_intf_distance(void *dm_void, u32 bw, u32 fc,
- u32 f_interference,
- u32 *tone_idx_tmp_in)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 bw_up, bw_low;
- u32 int_distance;
- u32 tone_idx_tmp;
- u8 set_result = SET_NO_NEED;
-
- bw_up = fc + bw / 2;
- bw_low = fc - bw / 2;
-
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "[f_l, fc, fh] = [ %d, %d, %d ], f_int = ((%d))\n", bw_low,
- fc, bw_up, f_interference);
-
- if (f_interference >= bw_low && f_interference <= bw_up) {
- int_distance = (fc >= f_interference) ? (fc - f_interference) :
- (f_interference - fc);
- tone_idx_tmp =
- (int_distance << 5); /* =10*(int_distance /0.3125) */
- ODM_RT_TRACE(
- dm, ODM_COMP_API,
- "int_distance = ((%d MHz)) Mhz, tone_idx_tmp = ((%d.%d))\n",
- int_distance, (tone_idx_tmp / 10), (tone_idx_tmp % 10));
- *tone_idx_tmp_in = tone_idx_tmp;
- set_result = SET_SUCCESS;
- }
-
- return set_result;
-}
-
-static u8 phydm_csi_mask_setting(void *dm_void, u32 enable, u32 channel, u32 bw,
- u32 f_interference, u32 second_ch)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 fc;
- u8 tone_direction;
- u32 tone_idx_tmp;
- u8 set_result = SET_SUCCESS;
-
- if (enable == CSI_MASK_DISABLE) {
- set_result = SET_SUCCESS;
- phydm_clean_all_csi_mask(dm);
-
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_API,
- "[Set CSI MASK_] CH = ((%d)), BW = ((%d)), f_intf = ((%d)), Scnd_CH = ((%s))\n",
- channel, bw, f_interference,
- (((bw == 20) || (channel > 14)) ?
- "Don't care" :
- (second_ch == PHYDM_ABOVE) ? "H" : "L"));
-
- /*calculate fc*/
- if (phydm_calculate_fc(dm, channel, bw, second_ch, &fc) ==
- SET_ERROR) {
- set_result = SET_ERROR;
- } else {
- /*calculate interference distance*/
- if (phydm_calculate_intf_distance(
- dm, bw, fc, f_interference,
- &tone_idx_tmp) == SET_SUCCESS) {
- tone_direction = (f_interference >= fc) ?
- FREQ_POSITIVE :
- FREQ_NEGATIVE;
- phydm_set_csi_mask_reg(dm, tone_idx_tmp,
- tone_direction);
- set_result = SET_SUCCESS;
- } else {
- set_result = SET_NO_NEED;
- }
- }
- }
-
- if (set_result == SET_SUCCESS)
- phydm_csi_mask_enable(dm, enable);
- else
- phydm_csi_mask_enable(dm, CSI_MASK_DISABLE);
-
- return set_result;
-}
-
-u8 phydm_nbi_setting(void *dm_void, u32 enable, u32 channel, u32 bw,
- u32 f_interference, u32 second_ch)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 fc;
- u32 tone_idx_tmp;
- u8 set_result = SET_SUCCESS;
-
- if (enable == NBI_DISABLE) {
- set_result = SET_SUCCESS;
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_API,
- "[Set NBI] CH = ((%d)), BW = ((%d)), f_intf = ((%d)), Scnd_CH = ((%s))\n",
- channel, bw, f_interference,
- (((second_ch == PHYDM_DONT_CARE) || (bw == 20) ||
- (channel > 14)) ?
- "Don't care" :
- (second_ch == PHYDM_ABOVE) ? "H" : "L"));
-
- /*calculate fc*/
- if (phydm_calculate_fc(dm, channel, bw, second_ch, &fc) ==
- SET_ERROR) {
- set_result = SET_ERROR;
- } else {
- /*calculate interference distance*/
- if (phydm_calculate_intf_distance(
- dm, bw, fc, f_interference,
- &tone_idx_tmp) == SET_SUCCESS) {
- phydm_set_nbi_reg(dm, tone_idx_tmp, bw);
- set_result = SET_SUCCESS;
- } else {
- set_result = SET_NO_NEED;
- }
- }
- }
-
- if (set_result == SET_SUCCESS)
- phydm_nbi_enable(dm, enable);
- else
- phydm_nbi_enable(dm, NBI_DISABLE);
-
- return set_result;
-}
-
-void phydm_api_debug(void *dm_void, u32 function_map, u32 *const dm_value,
- u32 *_used, char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 used = *_used;
- u32 out_len = *_out_len;
- u32 channel = dm_value[1];
- u32 bw = dm_value[2];
- u32 f_interference = dm_value[3];
- u32 second_ch = dm_value[4];
- u8 set_result = 0;
-
- /*PHYDM_API_NBI*/
- /*--------------------------------------------------------------------*/
- if (function_map == PHYDM_API_NBI) {
- if (dm_value[0] == 100) {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "[HELP-NBI] EN(on=1, off=2) CH BW(20/40/80) f_intf(Mhz) Scnd_CH(L=1, H=2)\n");
- return;
-
- } else if (dm_value[0] == NBI_ENABLE) {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "[Enable NBI] CH = ((%d)), BW = ((%d)), f_intf = ((%d)), Scnd_CH = ((%s))\n",
- channel, bw, f_interference,
- ((second_ch == PHYDM_DONT_CARE) || (bw == 20) ||
- (channel > 14)) ?
- "Don't care" :
- ((second_ch == PHYDM_ABOVE) ? "H" :
- "L"));
- set_result =
- phydm_nbi_setting(dm, NBI_ENABLE, channel, bw,
- f_interference, second_ch);
-
- } else if (dm_value[0] == NBI_DISABLE) {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "[Disable NBI]\n");
- set_result =
- phydm_nbi_setting(dm, NBI_DISABLE, channel, bw,
- f_interference, second_ch);
-
- } else {
- set_result = SET_ERROR;
- }
-
- PHYDM_SNPRINTF(
- output + used, out_len - used, "[NBI set result: %s]\n",
- (set_result == SET_SUCCESS) ?
- "Success" :
- ((set_result == SET_NO_NEED) ? "No need" :
- "Error"));
- }
-
- /*PHYDM_CSI_MASK*/
- /*--------------------------------------------------------------------*/
- else if (function_map == PHYDM_API_CSI_MASK) {
- if (dm_value[0] == 100) {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "[HELP-CSI MASK] EN(on=1, off=2) CH BW(20/40/80) f_intf(Mhz) Scnd_CH(L=1, H=2)\n");
- return;
-
- } else if (dm_value[0] == CSI_MASK_ENABLE) {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "[Enable CSI MASK] CH = ((%d)), BW = ((%d)), f_intf = ((%d)), Scnd_CH = ((%s))\n",
- channel, bw, f_interference,
- (channel > 14) ?
- "Don't care" :
- (((second_ch == PHYDM_DONT_CARE) ||
- (bw == 20) || (channel > 14)) ?
- "H" :
- "L"));
- set_result = phydm_csi_mask_setting(
- dm, CSI_MASK_ENABLE, channel, bw,
- f_interference, second_ch);
-
- } else if (dm_value[0] == CSI_MASK_DISABLE) {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "[Disable CSI MASK]\n");
- set_result = phydm_csi_mask_setting(
- dm, CSI_MASK_DISABLE, channel, bw,
- f_interference, second_ch);
-
- } else {
- set_result = SET_ERROR;
- }
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "[CSI MASK set result: %s]\n",
- (set_result == SET_SUCCESS) ?
- "Success" :
- ((set_result == SET_NO_NEED) ?
- "No need" :
- "Error"));
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm.h b/drivers/staging/rtlwifi/phydm/phydm.h
deleted file mode 100644
index 8c3ad3f56273..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm.h
+++ /dev/null
@@ -1,935 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __HALDMOUTSRC_H__
-#define __HALDMOUTSRC_H__
-
-/*============================================================*/
-/*include files*/
-/*============================================================*/
-#include "phydm_pre_define.h"
-#include "phydm_dig.h"
-#include "phydm_edcaturbocheck.h"
-#include "phydm_antdiv.h"
-#include "phydm_dynamicbbpowersaving.h"
-#include "phydm_rainfo.h"
-#include "phydm_dynamictxpower.h"
-#include "phydm_cfotracking.h"
-#include "phydm_acs.h"
-#include "phydm_adaptivity.h"
-#include "phydm_iqk.h"
-#include "phydm_dfs.h"
-#include "phydm_ccx.h"
-#include "txbf/phydm_hal_txbf_api.h"
-
-#include "phydm_adc_sampling.h"
-#include "phydm_dynamic_rx_path.h"
-#include "phydm_psd.h"
-
-#include "phydm_beamforming.h"
-
-#include "phydm_noisemonitor.h"
-#include "halphyrf_ce.h"
-
-/*============================================================*/
-/*Definition */
-/*============================================================*/
-
-/* Traffic load decision */
-#define TRAFFIC_ULTRA_LOW 1
-#define TRAFFIC_LOW 2
-#define TRAFFIC_MID 3
-#define TRAFFIC_HIGH 4
-
-#define NONE 0
-
-/*NBI API------------------------------------*/
-#define NBI_ENABLE 1
-#define NBI_DISABLE 2
-
-#define NBI_TABLE_SIZE_128 27
-#define NBI_TABLE_SIZE_256 59
-
-#define NUM_START_CH_80M 7
-#define NUM_START_CH_40M 14
-
-#define CH_OFFSET_40M 2
-#define CH_OFFSET_80M 6
-
-/*CSI MASK API------------------------------------*/
-#define CSI_MASK_ENABLE 1
-#define CSI_MASK_DISABLE 2
-
-/*------------------------------------------------*/
-
-#define FFT_128_TYPE 1
-#define FFT_256_TYPE 2
-
-#define SET_SUCCESS 1
-#define SET_ERROR 2
-#define SET_NO_NEED 3
-
-#define FREQ_POSITIVE 1
-#define FREQ_NEGATIVE 2
-
-#define PHYDM_WATCH_DOG_PERIOD 2
-
-/*============================================================*/
-/*structure and define*/
-/*============================================================*/
-
-/*2011/09/20 MH Add for AP/ADSLpseudo DM structuer requirement.*/
-/*We need to remove to other position???*/
-
-struct rtl8192cd_priv {
- u8 temp;
-};
-
-struct dyn_primary_cca {
- u8 pri_cca_flag;
- u8 intf_flag;
- u8 intf_type;
- u8 dup_rts_flag;
- u8 monitor_flag;
- u8 ch_offset;
- u8 mf_state;
-};
-
-#define dm_type_by_fw 0
-#define dm_type_by_driver 1
-
-/*Declare for common info*/
-
-#define IQK_THRESHOLD 8
-#define DPK_THRESHOLD 4
-
-struct dm_phy_status_info {
- /* */
- /* Be care, if you want to add any element please insert between */
- /* rx_pwdb_all & signal_strength. */
- /* */
- u8 rx_pwdb_all;
- u8 signal_quality; /* in 0-100 index. */
- s8 rx_mimo_signal_quality[4]; /* per-path's EVM translate to 0~100% */
- u8 rx_mimo_evm_dbm[4]; /* per-path's original EVM (dbm) */
- u8 rx_mimo_signal_strength[4]; /* in 0~100 index */
- s16 cfo_short[4]; /* per-path's cfo_short */
- s16 cfo_tail[4]; /* per-path's cfo_tail */
- s8 rx_power; /* in dBm Translate from PWdB */
- s8 recv_signal_power; /* Real power in dBm for this packet,
- * no beautification and aggregation.
- * Keep this raw info to be used for the other
- * procedures.
- */
- u8 bt_rx_rssi_percentage;
- u8 signal_strength; /* in 0-100 index. */
- s8 rx_pwr[4]; /* per-path's pwdb */
- s8 rx_snr[4]; /* per-path's SNR */
- /* s8 BB_Backup[13]; backup reg. */
- u8 rx_count : 2; /* RX path counter---*/
- u8 band_width : 2;
- u8 rxsc : 4; /* sub-channel---*/
- u8 bt_coex_pwr_adjust;
- u8 channel; /* channel number---*/
- bool is_mu_packet; /* is MU packet or not---*/
- bool is_beamformed; /* BF packet---*/
-};
-
-struct dm_per_pkt_info {
- u8 data_rate;
- u8 station_id;
- bool is_packet_match_bssid;
- bool is_packet_to_self;
- bool is_packet_beacon;
- bool is_to_self;
- u8 ppdu_cnt;
-};
-
-struct odm_phy_dbg_info {
- /*ODM Write,debug info*/
- s8 rx_snr_db[4];
- u32 num_qry_phy_status;
- u32 num_qry_phy_status_cck;
- u32 num_qry_phy_status_ofdm;
- u32 num_qry_mu_pkt;
- u32 num_qry_bf_pkt;
- u32 num_qry_mu_vht_pkt[40];
- u32 num_qry_vht_pkt[40];
- bool is_ldpc_pkt;
- bool is_stbc_pkt;
- u8 num_of_ppdu[4];
- u8 gid_num[4];
- u8 num_qry_beacon_pkt;
- /* Others */
- s32 rx_evm[4];
-};
-
-/*2011/20/20 MH For MP driver RT_WLAN_STA = struct rtl_sta_info*/
-/*Please declare below ODM relative info in your STA info structure.*/
-
-struct odm_sta_info {
- /*Driver Write*/
- bool is_used; /*record the sta status link or not?*/
- u8 iot_peer; /*Enum value. HT_IOT_PEER_E*/
-
- /*ODM Write*/
- /*PHY_STATUS_INFO*/
- u8 rssi_path[4];
- u8 rssi_ave;
- u8 RXEVM[4];
- u8 RXSNR[4];
-};
-
-enum odm_cmninfo {
- /*Fixed value*/
- /*-----------HOOK BEFORE REG INIT-----------*/
- ODM_CMNINFO_PLATFORM = 0,
- ODM_CMNINFO_ABILITY,
- ODM_CMNINFO_INTERFACE,
- ODM_CMNINFO_MP_TEST_CHIP,
- ODM_CMNINFO_IC_TYPE,
- ODM_CMNINFO_CUT_VER,
- ODM_CMNINFO_FAB_VER,
- ODM_CMNINFO_RF_TYPE,
- ODM_CMNINFO_RFE_TYPE,
- ODM_CMNINFO_BOARD_TYPE,
- ODM_CMNINFO_PACKAGE_TYPE,
- ODM_CMNINFO_EXT_LNA,
- ODM_CMNINFO_5G_EXT_LNA,
- ODM_CMNINFO_EXT_PA,
- ODM_CMNINFO_5G_EXT_PA,
- ODM_CMNINFO_GPA,
- ODM_CMNINFO_APA,
- ODM_CMNINFO_GLNA,
- ODM_CMNINFO_ALNA,
- ODM_CMNINFO_EXT_TRSW,
- ODM_CMNINFO_DPK_EN,
- ODM_CMNINFO_EXT_LNA_GAIN,
- ODM_CMNINFO_PATCH_ID,
- ODM_CMNINFO_BINHCT_TEST,
- ODM_CMNINFO_BWIFI_TEST,
- ODM_CMNINFO_SMART_CONCURRENT,
- ODM_CMNINFO_CONFIG_BB_RF,
- ODM_CMNINFO_DOMAIN_CODE_2G,
- ODM_CMNINFO_DOMAIN_CODE_5G,
- ODM_CMNINFO_IQKFWOFFLOAD,
- ODM_CMNINFO_IQKPAOFF,
- ODM_CMNINFO_HUBUSBMODE,
- ODM_CMNINFO_FWDWRSVDPAGEINPROGRESS,
- ODM_CMNINFO_TX_TP,
- ODM_CMNINFO_RX_TP,
- ODM_CMNINFO_SOUNDING_SEQ,
- ODM_CMNINFO_REGRFKFREEENABLE,
- ODM_CMNINFO_RFKFREEENABLE,
- ODM_CMNINFO_NORMAL_RX_PATH_CHANGE,
- ODM_CMNINFO_EFUSE0X3D8,
- ODM_CMNINFO_EFUSE0X3D7,
- /*-----------HOOK BEFORE REG INIT-----------*/
-
- /*Dynamic value:*/
-
- /*--------- POINTER REFERENCE-----------*/
- ODM_CMNINFO_MAC_PHY_MODE,
- ODM_CMNINFO_TX_UNI,
- ODM_CMNINFO_RX_UNI,
- ODM_CMNINFO_WM_MODE,
- ODM_CMNINFO_BAND,
- ODM_CMNINFO_SEC_CHNL_OFFSET,
- ODM_CMNINFO_SEC_MODE,
- ODM_CMNINFO_BW,
- ODM_CMNINFO_CHNL,
- ODM_CMNINFO_FORCED_RATE,
- ODM_CMNINFO_ANT_DIV,
- ODM_CMNINFO_ADAPTIVITY,
- ODM_CMNINFO_DMSP_GET_VALUE,
- ODM_CMNINFO_BUDDY_ADAPTOR,
- ODM_CMNINFO_DMSP_IS_MASTER,
- ODM_CMNINFO_SCAN,
- ODM_CMNINFO_POWER_SAVING,
- ODM_CMNINFO_ONE_PATH_CCA,
- ODM_CMNINFO_DRV_STOP,
- ODM_CMNINFO_PNP_IN,
- ODM_CMNINFO_INIT_ON,
- ODM_CMNINFO_ANT_TEST,
- ODM_CMNINFO_NET_CLOSED,
- ODM_CMNINFO_FORCED_IGI_LB,
- ODM_CMNINFO_P2P_LINK,
- ODM_CMNINFO_FCS_MODE,
- ODM_CMNINFO_IS1ANTENNA,
- ODM_CMNINFO_RFDEFAULTPATH,
- ODM_CMNINFO_DFS_MASTER_ENABLE,
- ODM_CMNINFO_FORCE_TX_ANT_BY_TXDESC,
- ODM_CMNINFO_SET_S0S1_DEFAULT_ANTENNA,
- /*--------- POINTER REFERENCE-----------*/
-
- /*------------CALL BY VALUE-------------*/
- ODM_CMNINFO_WIFI_DIRECT,
- ODM_CMNINFO_WIFI_DISPLAY,
- ODM_CMNINFO_LINK_IN_PROGRESS,
- ODM_CMNINFO_LINK,
- ODM_CMNINFO_CMW500LINK,
- ODM_CMNINFO_LPSPG,
- ODM_CMNINFO_STATION_STATE,
- ODM_CMNINFO_RSSI_MIN,
- ODM_CMNINFO_DBG_COMP,
- ODM_CMNINFO_DBG_LEVEL,
- ODM_CMNINFO_RA_THRESHOLD_HIGH,
- ODM_CMNINFO_RA_THRESHOLD_LOW,
- ODM_CMNINFO_RF_ANTENNA_TYPE,
- ODM_CMNINFO_WITH_EXT_ANTENNA_SWITCH,
- ODM_CMNINFO_BE_FIX_TX_ANT,
- ODM_CMNINFO_BT_ENABLED,
- ODM_CMNINFO_BT_HS_CONNECT_PROCESS,
- ODM_CMNINFO_BT_HS_RSSI,
- ODM_CMNINFO_BT_OPERATION,
- ODM_CMNINFO_BT_LIMITED_DIG,
- ODM_CMNINFO_BT_DIG,
- ODM_CMNINFO_BT_BUSY,
- ODM_CMNINFO_BT_DISABLE_EDCA,
- ODM_CMNINFO_AP_TOTAL_NUM,
- ODM_CMNINFO_POWER_TRAINING,
- ODM_CMNINFO_DFS_REGION_DOMAIN,
- /*------------CALL BY VALUE-------------*/
-
- /*Dynamic ptr array hook itms.*/
- ODM_CMNINFO_STA_STATUS,
- ODM_CMNINFO_MAX,
-
-};
-
-enum phydm_info_query {
- PHYDM_INFO_FA_OFDM,
- PHYDM_INFO_FA_CCK,
- PHYDM_INFO_FA_TOTAL,
- PHYDM_INFO_CCA_OFDM,
- PHYDM_INFO_CCA_CCK,
- PHYDM_INFO_CCA_ALL,
- PHYDM_INFO_CRC32_OK_VHT,
- PHYDM_INFO_CRC32_OK_HT,
- PHYDM_INFO_CRC32_OK_LEGACY,
- PHYDM_INFO_CRC32_OK_CCK,
- PHYDM_INFO_CRC32_ERROR_VHT,
- PHYDM_INFO_CRC32_ERROR_HT,
- PHYDM_INFO_CRC32_ERROR_LEGACY,
- PHYDM_INFO_CRC32_ERROR_CCK,
- PHYDM_INFO_EDCCA_FLAG,
- PHYDM_INFO_OFDM_ENABLE,
- PHYDM_INFO_CCK_ENABLE,
- PHYDM_INFO_DBG_PORT_0
-};
-
-enum phydm_api {
- PHYDM_API_NBI = 1,
- PHYDM_API_CSI_MASK,
-
-};
-
-/*2011/10/20 MH Define ODM support ability. ODM_CMNINFO_ABILITY*/
-enum odm_ability {
- /*BB ODM section BIT 0-19*/
- ODM_BB_DIG = BIT(0),
- ODM_BB_RA_MASK = BIT(1),
- ODM_BB_DYNAMIC_TXPWR = BIT(2),
- ODM_BB_FA_CNT = BIT(3),
- ODM_BB_RSSI_MONITOR = BIT(4),
- ODM_BB_CCK_PD = BIT(5),
- ODM_BB_ANT_DIV = BIT(6),
- ODM_BB_PWR_TRAIN = BIT(8),
- ODM_BB_RATE_ADAPTIVE = BIT(9),
- ODM_BB_PATH_DIV = BIT(10),
- ODM_BB_ADAPTIVITY = BIT(13),
- ODM_BB_CFO_TRACKING = BIT(14),
- ODM_BB_NHM_CNT = BIT(15),
- ODM_BB_PRIMARY_CCA = BIT(16),
- ODM_BB_TXBF = BIT(17),
- ODM_BB_DYNAMIC_ARFR = BIT(18),
-
- ODM_MAC_EDCA_TURBO = BIT(20),
- ODM_BB_DYNAMIC_RX_PATH = BIT(21),
-
- /*RF ODM section BIT 24-31*/
- ODM_RF_TX_PWR_TRACK = BIT(24),
- ODM_RF_RX_GAIN_TRACK = BIT(25),
- ODM_RF_CALIBRATION = BIT(26),
-
-};
-
-/*ODM_CMNINFO_ONE_PATH_CCA*/
-enum odm_cca_path {
- ODM_CCA_2R = 0,
- ODM_CCA_1R_A = 1,
- ODM_CCA_1R_B = 2,
-};
-
-enum cca_pathdiv_en {
- CCA_PATHDIV_DISABLE = 0,
- CCA_PATHDIV_ENABLE = 1,
-
-};
-
-enum phy_reg_pg_type {
- PHY_REG_PG_RELATIVE_VALUE = 0,
- PHY_REG_PG_EXACT_VALUE = 1
-};
-
-/*2011/09/22 MH Copy from SD4 defined structure.
- *We use to support PHY DM integration.
- */
-
-struct phy_dm_struct {
- /*Add for different team use temporarily*/
- void *adapter; /*For CE/NIC team*/
- struct rtl8192cd_priv *priv; /*For AP/ADSL team*/
- /*When you use adapter or priv pointer,
- *you must make sure the pointer is ready.
- */
- bool odm_ready;
-
- struct rtl8192cd_priv fake_priv;
-
- enum phy_reg_pg_type phy_reg_pg_value_type;
- u8 phy_reg_pg_version;
-
- u32 debug_components;
- u32 fw_debug_components;
- u32 debug_level;
-
- u32 num_qry_phy_status_all; /*CCK + OFDM*/
- u32 last_num_qry_phy_status_all;
- u32 rx_pwdb_ave;
- bool MPDIG_2G; /*off MPDIG*/
- u8 times_2g;
- bool is_init_hw_info_by_rfe;
-
- /*------ ODM HANDLE, DRIVER NEEDS NOT TO HOOK------*/
- bool is_cck_high_power;
- u8 rf_path_rx_enable;
- u8 control_channel;
- /*------ ODM HANDLE, DRIVER NEEDS NOT TO HOOK------*/
-
- /* 1 COMMON INFORMATION */
-
- /*Init value*/
- /*-----------HOOK BEFORE REG INIT-----------*/
- /*ODM Platform info AP/ADSL/CE/MP = 1/2/3/4*/
- u8 support_platform;
- /* ODM Platform info WIN/AP/CE = 1/2/3 */
- u8 normal_rx_path;
- /*ODM Support Ability DIG/RATR/TX_PWR_TRACK/ ... = 1/2/3/...*/
- u32 support_ability;
- /*ODM PCIE/USB/SDIO = 1/2/3*/
- u8 support_interface;
- /*ODM composite or independent. Bit oriented/ 92C+92D+ .... or
- *any other type = 1/2/3/...
- */
- u32 support_ic_type;
- /*cut version TestChip/A-cut/B-cut... = 0/1/2/3/...*/
- u8 cut_version;
- /*Fab version TSMC/UMC = 0/1*/
- u8 fab_version;
- /*RF type 4T4R/3T3R/2T2R/1T2R/1T1R/...*/
- u8 rf_type;
- u8 rfe_type;
- /*Board type Normal/HighPower/MiniCard/SLIM/Combo/... = 0/1/2/3/4/...*/
- /*Enable Function DPK OFF/ON = 0/1*/
- u8 dpk_en;
- u8 board_type;
- u8 package_type;
- u16 type_glna;
- u16 type_gpa;
- u16 type_alna;
- u16 type_apa;
- /*with external LNA NO/Yes = 0/1*/
- u8 ext_lna; /*2G*/
- u8 ext_lna_5g; /*5G*/
- /*with external PA NO/Yes = 0/1*/
- u8 ext_pa; /*2G*/
- u8 ext_pa_5g; /*5G*/
- /*with Efuse number*/
- u8 efuse0x3d7;
- u8 efuse0x3d8;
- /*with external TRSW NO/Yes = 0/1*/
- u8 ext_trsw;
- u8 ext_lna_gain; /*2G*/
- u8 patch_id; /*Customer ID*/
- bool is_in_hct_test;
- u8 wifi_test;
-
- bool is_dual_mac_smart_concurrent;
- u32 bk_support_ability;
- u8 ant_div_type;
- u8 with_extenal_ant_switch;
- bool config_bbrf;
- u8 odm_regulation_2_4g;
- u8 odm_regulation_5g;
- u8 iqk_fw_offload;
- bool cck_new_agc;
- u8 phydm_period;
- u32 phydm_sys_up_time;
- u8 num_rf_path;
- /*-----------HOOK BEFORE REG INIT-----------*/
-
- /*Dynamic value*/
-
- /*--------- POINTER REFERENCE-----------*/
-
- u8 u1_byte_temp;
- bool BOOLEAN_temp;
- void *PADAPTER_temp;
-
- /*MAC PHY mode SMSP/DMSP/DMDP = 0/1/2*/
- u8 *mac_phy_mode;
- /*TX Unicast byte count*/
- u64 *num_tx_bytes_unicast;
- /*RX Unicast byte count*/
- u64 *num_rx_bytes_unicast;
- /*Wireless mode B/G/A/N = BIT0/BIT1/BIT2/BIT3*/
- u8 *wireless_mode;
- /*Frequence band 2.4G/5G = 0/1*/
- u8 *band_type;
- /*Secondary channel offset don't_care/below/above = 0/1/2*/
- u8 *sec_ch_offset;
- /*security mode Open/WEP/AES/TKIP = 0/1/2/3*/
- u8 *security;
- /*BW info 20M/40M/80M = 0/1/2*/
- u8 *band_width;
- /*Central channel location Ch1/Ch2/....*/
- u8 *channel; /*central channel number*/
- bool dpk_done;
- /*Common info for 92D DMSP*/
-
- bool *is_get_value_from_other_mac;
- void **buddy_adapter;
- bool *is_master_of_dmsp; /* MAC0: master, MAC1: slave */
- /*Common info for status*/
- bool *is_scan_in_process;
- bool *is_power_saving;
- /*CCA path 2-path/path-A/path-B = 0/1/2; using enum odm_cca_path.*/
- u8 *one_path_cca;
- u8 *antenna_test;
- bool *is_net_closed;
- u8 *pu1_forced_igi_lb;
- bool *is_fcs_mode_enable;
- /*--------- For 8723B IQK-----------*/
- bool *is_1_antenna;
- u8 *rf_default_path;
- /* 0:S1, 1:S0 */
-
- /*--------- POINTER REFERENCE-----------*/
- u16 *forced_data_rate;
- u8 *enable_antdiv;
- u8 *enable_adaptivity;
- u8 *hub_usb_mode;
- bool *is_fw_dw_rsvd_page_in_progress;
- u32 *current_tx_tp;
- u32 *current_rx_tp;
- u8 *sounding_seq;
- /*------------CALL BY VALUE-------------*/
- bool is_link_in_process;
- bool is_wifi_direct;
- bool is_wifi_display;
- bool is_linked;
- bool is_linkedcmw500;
- bool is_in_lps_pg;
- bool bsta_state;
- u8 rssi_min;
- u8 interface_index; /*Add for 92D dual MAC: 0--Mac0 1--Mac1*/
- bool is_mp_chip;
- bool is_one_entry_only;
- bool mp_mode;
- u32 one_entry_macid;
- u8 pre_number_linked_client;
- u8 number_linked_client;
- u8 pre_number_active_client;
- u8 number_active_client;
- /*Common info for BTDM*/
- bool is_bt_enabled; /*BT is enabled*/
- bool is_bt_connect_process; /*BT HS is under connection progress.*/
- u8 bt_hs_rssi; /*BT HS mode wifi rssi value.*/
- bool is_bt_hs_operation; /*BT HS mode is under progress*/
- u8 bt_hs_dig_val; /*use BT rssi to decide the DIG value*/
- bool is_bt_disable_edca_turbo; /*Under some condition, don't enable*/
- bool is_bt_busy; /*BT is busy.*/
- bool is_bt_limited_dig; /*BT is busy.*/
- bool is_disable_phy_api;
- /*------------CALL BY VALUE-------------*/
- u8 rssi_a;
- u8 rssi_b;
- u8 rssi_c;
- u8 rssi_d;
- u64 rssi_trsw;
- u64 rssi_trsw_h;
- u64 rssi_trsw_l;
- u64 rssi_trsw_iso;
- u8 tx_ant_status;
- u8 rx_ant_status;
- u8 cck_lna_idx;
- u8 cck_vga_idx;
- u8 curr_station_id;
- u8 ofdm_agc_idx[4];
-
- u8 rx_rate;
- bool is_noisy_state;
- u8 tx_rate;
- u8 linked_interval;
- u8 pre_channel;
- u32 txagc_offset_value_a;
- bool is_txagc_offset_positive_a;
- u32 txagc_offset_value_b;
- bool is_txagc_offset_positive_b;
- u32 tx_tp;
- u32 rx_tp;
- u32 total_tp;
- u64 cur_tx_ok_cnt;
- u64 cur_rx_ok_cnt;
- u64 last_tx_ok_cnt;
- u64 last_rx_ok_cnt;
- u32 bb_swing_offset_a;
- bool is_bb_swing_offset_positive_a;
- u32 bb_swing_offset_b;
- bool is_bb_swing_offset_positive_b;
- u8 igi_lower_bound;
- u8 igi_upper_bound;
- u8 antdiv_rssi;
- u8 fat_comb_a;
- u8 fat_comb_b;
- u8 antdiv_intvl;
- u8 ant_type;
- u8 pre_ant_type;
- u8 antdiv_period;
- u8 evm_antdiv_period;
- u8 antdiv_select;
- u8 path_select;
- u8 antdiv_evm_en;
- u8 bdc_holdstate;
- u8 ndpa_period;
- bool h2c_rarpt_connect;
- bool cck_agc_report_type;
-
- u8 dm_dig_max_TH;
- u8 dm_dig_min_TH;
- u8 print_agc;
- u8 traffic_load;
- u8 pre_traffic_load;
- /*8821C Antenna BTG/WLG/WLA Select*/
- u8 current_rf_set_8821c;
- u8 default_rf_set_8821c;
- /*For Adaptivtiy*/
- u16 nhm_cnt_0;
- u16 nhm_cnt_1;
- s8 TH_L2H_default;
- s8 th_edcca_hl_diff_default;
- s8 th_l2h_ini;
- s8 th_edcca_hl_diff;
- s8 th_l2h_ini_mode2;
- s8 th_edcca_hl_diff_mode2;
- bool carrier_sense_enable;
- u8 adaptivity_igi_upper;
- bool adaptivity_flag;
- u8 dc_backoff;
- bool adaptivity_enable;
- u8 ap_total_num;
- bool edcca_enable;
- u8 pre_dbg_priority;
- struct adaptivity_statistics adaptivity;
- /*For Adaptivtiy*/
- u8 last_usb_hub;
- u8 tx_bf_data_rate;
-
- u8 nbi_set_result;
-
- u8 c2h_cmd_start;
- u8 fw_debug_trace[60];
- u8 pre_c2h_seq;
- bool fw_buff_is_enpty;
- u32 data_frame_num;
-
- /*for noise detection*/
- bool noisy_decision; /*b_noisy*/
- bool pre_b_noisy;
- u32 noisy_decision_smooth;
- bool is_disable_dym_ecs;
-
- struct odm_noise_monitor noise_level;
- /*Define STA info.*/
- /*odm_sta_info*/
- /*2012/01/12 MH For MP,
- *we need to reduce one array pointer for default port.??
- */
- struct rtl_sta_info *odm_sta_info[ODM_ASSOCIATE_ENTRY_NUM];
- u16 platform2phydm_macid_table[ODM_ASSOCIATE_ENTRY_NUM];
- /* platform_macid_table[platform_macid] = phydm_macid */
- s32 accumulate_pwdb[ODM_ASSOCIATE_ENTRY_NUM];
-
- /*2012/02/14 MH Add to share 88E ra with other SW team.*/
- /*We need to colelct all support abilit to a proper area.*/
-
- bool ra_support88e;
-
- struct odm_phy_dbg_info phy_dbg_info;
-
- /*ODM Structure*/
- struct fast_antenna_training dm_fat_table;
- struct dig_thres dm_dig_table;
- struct dyn_pwr_saving dm_ps_table;
- struct dyn_primary_cca dm_pri_cca;
- struct ra_table dm_ra_table;
- struct false_alarm_stat false_alm_cnt;
- struct false_alarm_stat flase_alm_cnt_buddy_adapter;
- struct sw_antenna_switch dm_swat_table;
- struct cfo_tracking dm_cfo_track;
- struct acs_info dm_acs;
- struct ccx_info dm_ccx_info;
- struct psd_info dm_psd_table;
-
- struct rt_adcsmp adcsmp;
-
- struct dm_iqk_info IQK_info;
-
- struct edca_turbo dm_edca_table;
- u32 WMMEDCA_BE;
-
- bool *is_driver_stopped;
- bool *is_driver_is_going_to_pnp_set_power_sleep;
- bool *pinit_adpt_in_progress;
-
- /*PSD*/
- bool is_user_assign_level;
- u8 RSSI_BT; /*come from BT*/
- bool is_psd_in_process;
- bool is_psd_active;
- bool is_dm_initial_gain_enable;
-
- /*MPT DIG*/
- struct timer_list mpt_dig_timer;
-
- /*for rate adaptive, in fact, 88c/92c fw will handle this*/
- u8 is_use_ra_mask;
-
- /* for dynamic SoML control */
- bool bsomlenabled;
-
- struct odm_rate_adaptive rate_adaptive;
- struct dm_rf_calibration_struct rf_calibrate_info;
- u32 n_iqk_cnt;
- u32 n_iqk_ok_cnt;
- u32 n_iqk_fail_cnt;
-
- /*Power Training*/
- u8 force_power_training_state;
- bool is_change_state;
- u32 PT_score;
- u64 ofdm_rx_cnt;
- u64 cck_rx_cnt;
- bool is_disable_power_training;
- u8 dynamic_tx_high_power_lvl;
- u8 last_dtp_lvl;
- u32 tx_agc_ofdm_18_6;
- u8 rx_pkt_type;
-
- /*ODM relative time.*/
- struct timer_list path_div_switch_timer;
- /*2011.09.27 add for path Diversity*/
- struct timer_list cck_path_diversity_timer;
- struct timer_list fast_ant_training_timer;
- struct timer_list sbdcnt_timer;
-
- /*ODM relative workitem.*/
-};
-
-enum phydm_structure_type {
- PHYDM_FALSEALMCNT,
- PHYDM_CFOTRACK,
- PHYDM_ADAPTIVITY,
- PHYDM_ROMINFO,
-
-};
-
-enum odm_rf_content {
- odm_radioa_txt = 0x1000,
- odm_radiob_txt = 0x1001,
- odm_radioc_txt = 0x1002,
- odm_radiod_txt = 0x1003
-};
-
-enum odm_bb_config_type {
- CONFIG_BB_PHY_REG,
- CONFIG_BB_AGC_TAB,
- CONFIG_BB_AGC_TAB_2G,
- CONFIG_BB_AGC_TAB_5G,
- CONFIG_BB_PHY_REG_PG,
- CONFIG_BB_PHY_REG_MP,
- CONFIG_BB_AGC_TAB_DIFF,
-};
-
-enum odm_rf_config_type {
- CONFIG_RF_RADIO,
- CONFIG_RF_TXPWR_LMT,
-};
-
-enum odm_fw_config_type {
- CONFIG_FW_NIC,
- CONFIG_FW_NIC_2,
- CONFIG_FW_AP,
- CONFIG_FW_AP_2,
- CONFIG_FW_MP,
- CONFIG_FW_WOWLAN,
- CONFIG_FW_WOWLAN_2,
- CONFIG_FW_AP_WOWLAN,
- CONFIG_FW_BT,
-};
-
-/*status code*/
-enum rt_status {
- RT_STATUS_SUCCESS,
- RT_STATUS_FAILURE,
- RT_STATUS_PENDING,
- RT_STATUS_RESOURCE,
- RT_STATUS_INVALID_CONTEXT,
- RT_STATUS_INVALID_PARAMETER,
- RT_STATUS_NOT_SUPPORT,
- RT_STATUS_OS_API_FAILED,
-};
-
-/*===========================================================*/
-/*AGC RX High Power mode*/
-/*===========================================================*/
-#define lna_low_gain_1 0x64
-#define lna_low_gain_2 0x5A
-#define lna_low_gain_3 0x58
-
-#define FA_RXHP_TH1 5000
-#define FA_RXHP_TH2 1500
-#define FA_RXHP_TH3 800
-#define FA_RXHP_TH4 600
-#define FA_RXHP_TH5 500
-
-enum dm_1r_cca {
- CCA_1R = 0,
- CCA_2R = 1,
- CCA_MAX = 2,
-};
-
-enum dm_rf {
- rf_save = 0,
- rf_normal = 1,
- RF_MAX = 2,
-};
-
-/*check Sta pointer valid or not*/
-
-#define IS_STA_VALID(sta) (sta)
-
-u32 odm_convert_to_db(u32 value);
-
-u32 odm_convert_to_linear(u32 value);
-
-s32 odm_pwdb_conversion(s32 X, u32 total_bit, u32 decimal_bit);
-
-s32 odm_sign_conversion(s32 value, u32 total_bit);
-
-void odm_init_mp_driver_status(struct phy_dm_struct *dm);
-
-void phydm_txcurrentcalibration(struct phy_dm_struct *dm);
-
-void phydm_seq_sorting(void *dm_void, u32 *value, u32 *rank_idx, u32 *idx_out,
- u8 seq_length);
-
-void odm_dm_init(struct phy_dm_struct *dm);
-
-void odm_dm_reset(struct phy_dm_struct *dm);
-
-void phydm_support_ability_debug(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len);
-
-void phydm_config_ofdm_rx_path(struct phy_dm_struct *dm, u32 path);
-
-void phydm_config_trx_path(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len);
-
-void odm_dm_watchdog(struct phy_dm_struct *dm);
-
-void phydm_watchdog_mp(struct phy_dm_struct *dm);
-
-void odm_cmn_info_init(struct phy_dm_struct *dm, enum odm_cmninfo cmn_info,
- u32 value);
-
-void odm_cmn_info_hook(struct phy_dm_struct *dm, enum odm_cmninfo cmn_info,
- void *value);
-
-void odm_cmn_info_ptr_array_hook(struct phy_dm_struct *dm,
- enum odm_cmninfo cmn_info, u16 index,
- void *value);
-
-void odm_cmn_info_update(struct phy_dm_struct *dm, u32 cmn_info, u64 value);
-
-u32 phydm_cmn_info_query(struct phy_dm_struct *dm,
- enum phydm_info_query info_type);
-
-void odm_init_all_timers(struct phy_dm_struct *dm);
-
-void odm_cancel_all_timers(struct phy_dm_struct *dm);
-
-void odm_release_all_timers(struct phy_dm_struct *dm);
-
-void odm_asoc_entry_init(struct phy_dm_struct *dm);
-
-void *phydm_get_structure(struct phy_dm_struct *dm, u8 structure_type);
-
-/*===========================================================*/
-/* The following is for compile only*/
-/*===========================================================*/
-
-#define IS_HARDWARE_TYPE_8188E(_adapter) false
-#define IS_HARDWARE_TYPE_8188F(_adapter) false
-#define IS_HARDWARE_TYPE_8703B(_adapter) false
-#define IS_HARDWARE_TYPE_8723D(_adapter) false
-#define IS_HARDWARE_TYPE_8821C(_adapter) false
-#define IS_HARDWARE_TYPE_8812AU(_adapter) false
-#define IS_HARDWARE_TYPE_8814A(_adapter) false
-#define IS_HARDWARE_TYPE_8814AU(_adapter) false
-#define IS_HARDWARE_TYPE_8814AE(_adapter) false
-#define IS_HARDWARE_TYPE_8814AS(_adapter) false
-#define IS_HARDWARE_TYPE_8723BU(_adapter) false
-#define IS_HARDWARE_TYPE_8822BU(_adapter) false
-#define IS_HARDWARE_TYPE_8822BS(_adapter) false
-#define IS_HARDWARE_TYPE_JAGUAR(_adapter) \
- (IS_HARDWARE_TYPE_8812(_adapter) || IS_HARDWARE_TYPE_8821(_adapter))
-#define IS_HARDWARE_TYPE_8723AE(_adapter) false
-#define IS_HARDWARE_TYPE_8192C(_adapter) false
-#define IS_HARDWARE_TYPE_8192D(_adapter) false
-#define RF_T_METER_92D 0x42
-
-#define GET_RX_STATUS_DESC_RX_MCS(__prx_status_desc) \
- LE_BITS_TO_1BYTE(__prx_status_desc + 12, 0, 6)
-
-#define REG_CONFIG_RAM64X16 0xb2c
-
-#define TARGET_CHNL_NUM_2G_5G 59
-
-/* *********************************************************** */
-
-void odm_dtc(struct phy_dm_struct *dm);
-
-void phydm_noisy_detection(struct phy_dm_struct *dm);
-
-void phydm_set_ext_switch(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len);
-
-void phydm_api_debug(void *dm_void, u32 function_map, u32 *const dm_value,
- u32 *_used, char *output, u32 *_out_len);
-
-u8 phydm_nbi_setting(void *dm_void, u32 enable, u32 channel, u32 bw,
- u32 f_interference, u32 second_ch);
-#endif /* __HALDMOUTSRC_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/phydm_acs.c b/drivers/staging/rtlwifi/phydm/phydm_acs.c
deleted file mode 100644
index f47b245e77e3..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_acs.c
+++ /dev/null
@@ -1,189 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-u8 odm_get_auto_channel_select_result(void *dm_void, u8 band)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct acs_info *acs = &dm->dm_acs;
- u8 result;
-
- if (band == ODM_BAND_2_4G) {
- ODM_RT_TRACE(
- dm, ODM_COMP_ACS,
- "[struct acs_info] %s(): clean_channel_2g(%d)\n",
- __func__, acs->clean_channel_2g);
- result = (u8)acs->clean_channel_2g;
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_ACS,
- "[struct acs_info] %s(): clean_channel_5g(%d)\n",
- __func__, acs->clean_channel_5g);
- result = (u8)acs->clean_channel_5g;
- }
-
- return result;
-}
-
-static void odm_auto_channel_select_setting(void *dm_void, bool is_enable)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u16 period = 0x2710; /* 40ms in default */
- u16 nhm_type = 0x7;
-
- ODM_RT_TRACE(dm, ODM_COMP_ACS, "%s()=========>\n", __func__);
-
- if (is_enable) {
- /* 20 ms */
- period = 0x1388;
- nhm_type = 0x1;
- }
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- /* PHY parameters initialize for ac series */
-
- /* 0x990[31:16]=0x2710
- * Time duration for NHM unit: 4us, 0x2710=40ms
- */
- odm_write_2byte(dm, ODM_REG_CCX_PERIOD_11AC + 2, period);
- } else if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- /* PHY parameters initialize for n series */
-
- /* 0x894[31:16]=0x2710
- * Time duration for NHM unit: 4us, 0x2710=40ms
- */
- odm_write_2byte(dm, ODM_REG_CCX_PERIOD_11N + 2, period);
- }
-}
-
-void odm_auto_channel_select_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct acs_info *acs = &dm->dm_acs;
- u8 i;
-
- if (!(dm->support_ability & ODM_BB_NHM_CNT))
- return;
-
- if (acs->is_force_acs_result)
- return;
-
- ODM_RT_TRACE(dm, ODM_COMP_ACS, "%s()=========>\n", __func__);
-
- acs->clean_channel_2g = 1;
- acs->clean_channel_5g = 36;
-
- for (i = 0; i < ODM_MAX_CHANNEL_2G; ++i) {
- acs->channel_info_2g[0][i] = 0;
- acs->channel_info_2g[1][i] = 0;
- }
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- for (i = 0; i < ODM_MAX_CHANNEL_5G; ++i) {
- acs->channel_info_5g[0][i] = 0;
- acs->channel_info_5g[1][i] = 0;
- }
- }
-}
-
-void odm_auto_channel_select_reset(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct acs_info *acs = &dm->dm_acs;
-
- if (!(dm->support_ability & ODM_BB_NHM_CNT))
- return;
-
- if (acs->is_force_acs_result)
- return;
-
- ODM_RT_TRACE(dm, ODM_COMP_ACS, "%s()=========>\n", __func__);
-
- odm_auto_channel_select_setting(dm, true); /* for 20ms measurement */
- phydm_nhm_counter_statistics_reset(dm);
-}
-
-void odm_auto_channel_select(void *dm_void, u8 channel)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct acs_info *acs = &dm->dm_acs;
- u8 channel_idx = 0, search_idx = 0;
- u16 max_score = 0;
-
- if (!(dm->support_ability & ODM_BB_NHM_CNT)) {
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): Return: support_ability ODM_BB_NHM_CNT is disabled\n",
- __func__);
- return;
- }
-
- if (acs->is_force_acs_result) {
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): Force 2G clean channel = %d, 5G clean channel = %d\n",
- __func__, acs->clean_channel_2g, acs->clean_channel_5g);
- return;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_ACS, "%s(): channel = %d=========>\n",
- __func__, channel);
-
- phydm_get_nhm_counter_statistics(dm);
- odm_auto_channel_select_setting(dm, false);
-
- if (channel >= 1 && channel <= 14) {
- channel_idx = channel - 1;
- acs->channel_info_2g[1][channel_idx]++;
-
- if (acs->channel_info_2g[1][channel_idx] >= 2)
- acs->channel_info_2g[0][channel_idx] =
- (acs->channel_info_2g[0][channel_idx] >> 1) +
- (acs->channel_info_2g[0][channel_idx] >> 2) +
- (dm->nhm_cnt_0 >> 2);
- else
- acs->channel_info_2g[0][channel_idx] = dm->nhm_cnt_0;
-
- ODM_RT_TRACE(dm, ODM_COMP_ACS, "%s(): nhm_cnt_0 = %d\n",
- __func__, dm->nhm_cnt_0);
- ODM_RT_TRACE(
- dm, ODM_COMP_ACS,
- "%s(): Channel_Info[0][%d] = %d, Channel_Info[1][%d] = %d\n",
- __func__, channel_idx,
- acs->channel_info_2g[0][channel_idx], channel_idx,
- acs->channel_info_2g[1][channel_idx]);
-
- for (search_idx = 0; search_idx < ODM_MAX_CHANNEL_2G;
- search_idx++) {
- if (acs->channel_info_2g[1][search_idx] != 0 &&
- acs->channel_info_2g[0][search_idx] >= max_score) {
- max_score = acs->channel_info_2g[0][search_idx];
- acs->clean_channel_2g = search_idx + 1;
- }
- }
- ODM_RT_TRACE(
- dm, ODM_COMP_ACS,
- "(1)%s(): 2G: clean_channel_2g = %d, max_score = %d\n",
- __func__, acs->clean_channel_2g, max_score);
-
- } else if (channel >= 36) {
- /* Need to do */
- acs->clean_channel_5g = channel;
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_acs.h b/drivers/staging/rtlwifi/phydm/phydm_acs.h
deleted file mode 100644
index c6516b871fdb..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_acs.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMACS_H__
-#define __PHYDMACS_H__
-
-#define ACS_VERSION "1.1" /*20150729 by YuChen*/
-#define CLM_VERSION "1.0"
-
-#define ODM_MAX_CHANNEL_2G 14
-#define ODM_MAX_CHANNEL_5G 24
-
-/* For phydm_auto_channel_select_setting_ap() */
-#define STORE_DEFAULT_NHM_SETTING 0
-#define RESTORE_DEFAULT_NHM_SETTING 1
-#define ACS_NHM_SETTING 2
-
-struct acs_info {
- bool is_force_acs_result;
- u8 clean_channel_2g;
- u8 clean_channel_5g;
- /* channel_info[1]: channel score, channel_info[2]:channel_scan_times */
- u16 channel_info_2g[2][ODM_MAX_CHANNEL_2G];
- u16 channel_info_5g[2][ODM_MAX_CHANNEL_5G];
-};
-
-void odm_auto_channel_select_init(void *dm_void);
-
-void odm_auto_channel_select_reset(void *dm_void);
-
-void odm_auto_channel_select(void *dm_void, u8 channel);
-
-u8 odm_get_auto_channel_select_result(void *dm_void, u8 band);
-
-#endif /* #ifndef __PHYDMACS_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
deleted file mode 100644
index 58ec3999391c..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
+++ /dev/null
@@ -1,930 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-void phydm_check_adaptivity(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct adaptivity_statistics *adaptivity =
- (struct adaptivity_statistics *)phydm_get_structure(
- dm, PHYDM_ADAPTIVITY);
-
- if (dm->support_ability & ODM_BB_ADAPTIVITY) {
- if (adaptivity->dynamic_link_adaptivity ||
- adaptivity->acs_for_adaptivity) {
- if (dm->is_linked && !adaptivity->is_check) {
- phydm_nhm_counter_statistics(dm);
- phydm_check_environment(dm);
- } else if (!dm->is_linked) {
- adaptivity->is_check = false;
- }
- } else {
- dm->adaptivity_enable = true;
-
- if (dm->support_ic_type & (ODM_IC_11AC_GAIN_IDX_EDCCA |
- ODM_IC_11N_GAIN_IDX_EDCCA))
- dm->adaptivity_flag = false;
- else
- dm->adaptivity_flag = true;
- }
- } else {
- dm->adaptivity_enable = false;
- dm->adaptivity_flag = false;
- }
-}
-
-void phydm_nhm_counter_statistics_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- /*PHY parameters initialize for n series*/
-
- /*0x894[31:16]=0x0xC350
- *Time duration for NHM unit: us, 0xc350=200ms
- */
- odm_write_2byte(dm, ODM_REG_CCX_PERIOD_11N + 2, 0xC350);
- /*0x890[31:16]=0xffff th_9, th_10*/
- odm_write_2byte(dm, ODM_REG_NHM_TH9_TH10_11N + 2, 0xffff);
- /*0x898=0xffffff52 th_3, th_2, th_1, th_0*/
- odm_write_4byte(dm, ODM_REG_NHM_TH3_TO_TH0_11N, 0xffffff50);
- /*0x89c=0xffffffff th_7, th_6, th_5, th_4*/
- odm_write_4byte(dm, ODM_REG_NHM_TH7_TO_TH4_11N, 0xffffffff);
- /*0xe28[7:0]=0xff th_8*/
- odm_set_bb_reg(dm, ODM_REG_FPGA0_IQK_11N, MASKBYTE0, 0xff);
- /*0x890[10:8]=1 ignoreCCA ignore PHYTXON enable CCX*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N,
- BIT(10) | BIT(9) | BIT(8), 0x1);
- /*0xc0c[7]=1 max power among all RX ants*/
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_RSTC_11N, BIT(7), 0x1);
- } else if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- /*PHY parameters initialize for ac series*/
-
- odm_write_2byte(dm, ODM_REG_CCX_PERIOD_11AC + 2, 0xC350);
- /*0x994[31:16]=0xffff th_9, th_10*/
- odm_write_2byte(dm, ODM_REG_NHM_TH9_TH10_11AC + 2, 0xffff);
- /*0x998=0xffffff52 th_3, th_2, th_1, th_0*/
- odm_write_4byte(dm, ODM_REG_NHM_TH3_TO_TH0_11AC, 0xffffff50);
- /*0x99c=0xffffffff th_7, th_6, th_5, th_4*/
- odm_write_4byte(dm, ODM_REG_NHM_TH7_TO_TH4_11AC, 0xffffffff);
- /*0x9a0[7:0]=0xff th_8*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH8_11AC, MASKBYTE0, 0xff);
- /*0x994[10:8]=1 ignoreCCA ignore PHYTXON enable CCX*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC,
- BIT(8) | BIT(9) | BIT(10), 0x1);
- /*0x9e8[7]=1 max power among all RX ants*/
- odm_set_bb_reg(dm, ODM_REG_NHM_9E8_11AC, BIT(0), 0x1);
- }
-}
-
-void phydm_nhm_counter_statistics(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (!(dm->support_ability & ODM_BB_NHM_CNT))
- return;
-
- /*Get NHM report*/
- phydm_get_nhm_counter_statistics(dm);
-
- /*Reset NHM counter*/
- phydm_nhm_counter_statistics_reset(dm);
-}
-
-void phydm_get_nhm_counter_statistics(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 value32 = 0;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- value32 = odm_get_bb_reg(dm, ODM_REG_NHM_CNT_11AC, MASKDWORD);
- else if (dm->support_ic_type & ODM_IC_11N_SERIES)
- value32 = odm_get_bb_reg(dm, ODM_REG_NHM_CNT_11N, MASKDWORD);
-
- dm->nhm_cnt_0 = (u8)(value32 & MASKBYTE0);
- dm->nhm_cnt_1 = (u8)((value32 & MASKBYTE1) >> 8);
-}
-
-void phydm_nhm_counter_statistics_reset(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, BIT(1), 0);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, BIT(1), 1);
- } else if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(1), 0);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(1), 1);
- }
-}
-
-void phydm_set_edcca_threshold(void *dm_void, s8 H2L, s8 L2H)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES)
- odm_set_bb_reg(dm, REG_OFDM_0_ECCA_THRESHOLD,
- MASKBYTE2 | MASKBYTE0,
- (u32)((u8)L2H | (u8)H2L << 16));
- else if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- odm_set_bb_reg(dm, REG_FPGA0_XB_LSSI_READ_BACK, MASKLWORD,
- (u16)((u8)L2H | (u8)H2L << 8));
-}
-
-static void phydm_set_lna(void *dm_void, enum phydm_set_lna type)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & (ODM_RTL8188E | ODM_RTL8192E)) {
- if (type == phydm_disable_lna) {
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x31, 0xfffff,
- 0x0000f);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x32, 0xfffff,
- 0x37f82); /*disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x0);
- if (dm->rf_type > ODM_1T1R) {
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xef, 0x80000,
- 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x30, 0xfffff,
- 0x18000);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x31, 0xfffff,
- 0x0000f);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x32, 0xfffff,
- 0x37f82);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xef, 0x80000,
- 0x0);
- }
- } else if (type == phydm_enable_lna) {
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x31, 0xfffff,
- 0x0000f);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x32, 0xfffff,
- 0x77f82); /*back to normal*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x0);
- if (dm->rf_type > ODM_1T1R) {
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xef, 0x80000,
- 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x30, 0xfffff,
- 0x18000);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x31, 0xfffff,
- 0x0000f);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x32, 0xfffff,
- 0x77f82);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xef, 0x80000,
- 0x0);
- }
- }
- } else if (dm->support_ic_type & ODM_RTL8723B) {
- if (type == phydm_disable_lna) {
- /*S0*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x31, 0xfffff,
- 0x0001f);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x32, 0xfffff,
- 0xe6137); /*disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x0);
- /*S1*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xed, 0x00020, 0x1);
- odm_set_rf_reg(
- dm, ODM_RF_PATH_A, 0x43, 0xfffff,
- 0x3008d); /*select Rx mode and disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xed, 0x00020, 0x0);
- } else if (type == phydm_enable_lna) {
- /*S0*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x31, 0xfffff,
- 0x0001f);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x32, 0xfffff,
- 0xe6177); /*disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x0);
- /*S1*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xed, 0x00020, 0x1);
- odm_set_rf_reg(
- dm, ODM_RF_PATH_A, 0x43, 0xfffff,
- 0x300bd); /*select Rx mode and disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xed, 0x00020, 0x0);
- }
-
- } else if (dm->support_ic_type & ODM_RTL8812) {
- if (type == phydm_disable_lna) {
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x31, 0xfffff,
- 0x3f7ff);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x32, 0xfffff,
- 0xc22bf); /*disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x0);
- if (dm->rf_type > ODM_1T1R) {
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xef, 0x80000,
- 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x31, 0xfffff,
- 0x3f7ff);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x32, 0xfffff,
- 0xc22bf); /*disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xef, 0x80000,
- 0x0);
- }
- } else if (type == phydm_enable_lna) {
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x31, 0xfffff,
- 0x3f7ff);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x32, 0xfffff,
- 0xc26bf); /*disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x0);
- if (dm->rf_type > ODM_1T1R) {
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xef, 0x80000,
- 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x31, 0xfffff,
- 0x3f7ff);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x32, 0xfffff,
- 0xc26bf); /*disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xef, 0x80000,
- 0x0);
- }
- }
- } else if (dm->support_ic_type & (ODM_RTL8821 | ODM_RTL8881A)) {
- if (type == phydm_disable_lna) {
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x31, 0xfffff,
- 0x0002f);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x32, 0xfffff,
- 0xfb09b); /*disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x0);
- } else if (type == phydm_enable_lna) {
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x1);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x30, 0xfffff,
- 0x18000); /*select Rx mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x31, 0xfffff,
- 0x0002f);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x32, 0xfffff,
- 0xfb0bb); /*disable LNA*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, 0x80000, 0x0);
- }
- }
-}
-
-void phydm_set_trx_mux(void *dm_void, enum phydm_trx_mux_type tx_mode,
- enum phydm_trx_mux_type rx_mode)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- /*set TXmod to standby mode to remove outside noise affect*/
- odm_set_bb_reg(dm, ODM_REG_CCK_RPT_FORMAT_11N,
- BIT(3) | BIT(2) | BIT(1), tx_mode);
- /*set RXmod to standby mode to remove outside noise affect*/
- odm_set_bb_reg(dm, ODM_REG_CCK_RPT_FORMAT_11N,
- BIT(22) | BIT(21) | BIT(20), rx_mode);
- if (dm->rf_type > ODM_1T1R) {
- /*set TXmod to standby mode to rm outside noise affect*/
- odm_set_bb_reg(dm, ODM_REG_CCK_RPT_FORMAT_11N_B,
- BIT(3) | BIT(2) | BIT(1), tx_mode);
- /*set RXmod to standby mode to rm outside noise affect*/
- odm_set_bb_reg(dm, ODM_REG_CCK_RPT_FORMAT_11N_B,
- BIT(22) | BIT(21) | BIT(20), rx_mode);
- }
- } else if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- /*set TXmod to standby mode to remove outside noise affect*/
- odm_set_bb_reg(dm, ODM_REG_TRMUX_11AC,
- BIT(11) | BIT(10) | BIT(9) | BIT(8), tx_mode);
- /*set RXmod to standby mode to remove outside noise affect*/
- odm_set_bb_reg(dm, ODM_REG_TRMUX_11AC,
- BIT(7) | BIT(6) | BIT(5) | BIT(4), rx_mode);
- if (dm->rf_type > ODM_1T1R) {
- /*set TXmod to standby mode to rm outside noise affect*/
- odm_set_bb_reg(dm, ODM_REG_TRMUX_11AC_B,
- BIT(11) | BIT(10) | BIT(9) | BIT(8),
- tx_mode);
- /*set RXmod to standby mode to rm outside noise affect*/
- odm_set_bb_reg(dm, ODM_REG_TRMUX_11AC_B,
- BIT(7) | BIT(6) | BIT(5) | BIT(4),
- rx_mode);
- }
- }
-}
-
-void phydm_mac_edcca_state(void *dm_void, enum phydm_mac_edcca_type state)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (state == phydm_ignore_edcca) {
- /*ignore EDCCA reg520[15]=1*/
- odm_set_mac_reg(dm, REG_TX_PTCL_CTRL, BIT(15), 1);
- } else { /*don't set MAC ignore EDCCA signal*/
- /*don't ignore EDCCA reg520[15]=0*/
- odm_set_mac_reg(dm, REG_TX_PTCL_CTRL, BIT(15), 0);
- }
- ODM_RT_TRACE(dm, PHYDM_COMP_ADAPTIVITY, "EDCCA enable state = %d\n",
- state);
-}
-
-bool phydm_cal_nhm_cnt(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u16 base = 0;
-
- base = dm->nhm_cnt_0 + dm->nhm_cnt_1;
-
- if (base != 0) {
- dm->nhm_cnt_0 = ((dm->nhm_cnt_0) << 8) / base;
- dm->nhm_cnt_1 = ((dm->nhm_cnt_1) << 8) / base;
- }
- if ((dm->nhm_cnt_0 - dm->nhm_cnt_1) >= 100)
- return true; /*clean environment*/
- else
- return false; /*noisy environment*/
-}
-
-void phydm_check_environment(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct adaptivity_statistics *adaptivity =
- (struct adaptivity_statistics *)phydm_get_structure(
- dm, PHYDM_ADAPTIVITY);
- bool is_clean_environment = false;
-
- if (adaptivity->is_first_link) {
- if (dm->support_ic_type &
- (ODM_IC_11AC_GAIN_IDX_EDCCA | ODM_IC_11N_GAIN_IDX_EDCCA))
- dm->adaptivity_flag = false;
- else
- dm->adaptivity_flag = true;
-
- adaptivity->is_first_link = false;
- return;
- }
-
- if (adaptivity->nhm_wait < 3) { /*Start enter NHM after 4 nhm_wait*/
- adaptivity->nhm_wait++;
- phydm_nhm_counter_statistics(dm);
- return;
- }
-
- phydm_nhm_counter_statistics(dm);
- is_clean_environment = phydm_cal_nhm_cnt(dm);
-
- if (is_clean_environment) {
- dm->th_l2h_ini =
- adaptivity->th_l2h_ini_backup; /*adaptivity mode*/
- dm->th_edcca_hl_diff = adaptivity->th_edcca_hl_diff_backup;
-
- dm->adaptivity_enable = true;
-
- if (dm->support_ic_type &
- (ODM_IC_11AC_GAIN_IDX_EDCCA | ODM_IC_11N_GAIN_IDX_EDCCA))
- dm->adaptivity_flag = false;
- else
- dm->adaptivity_flag = true;
- } else {
- if (!adaptivity->acs_for_adaptivity) {
- dm->th_l2h_ini = dm->th_l2h_ini_mode2; /*mode2*/
- dm->th_edcca_hl_diff = dm->th_edcca_hl_diff_mode2;
-
- dm->adaptivity_flag = false;
- dm->adaptivity_enable = false;
- }
- }
-
- adaptivity->nhm_wait = 0;
- adaptivity->is_first_link = true;
- adaptivity->is_check = true;
-}
-
-void phydm_search_pwdb_lower_bound(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct adaptivity_statistics *adaptivity =
- (struct adaptivity_statistics *)phydm_get_structure(
- dm, PHYDM_ADAPTIVITY);
- u32 value32 = 0, reg_value32 = 0;
- u8 cnt, try_count = 0;
- u8 tx_edcca1 = 0, tx_edcca0 = 0;
- bool is_adjust = true;
- s8 th_l2h_dmc, th_h2l_dmc, igi_target = 0x32;
- s8 diff;
- u8 IGI = adaptivity->igi_base + 30 + (u8)dm->th_l2h_ini -
- (u8)dm->th_edcca_hl_diff;
-
- if (dm->support_ic_type & (ODM_RTL8723B | ODM_RTL8188E | ODM_RTL8192E |
- ODM_RTL8812 | ODM_RTL8821 | ODM_RTL8881A)) {
- phydm_set_lna(dm, phydm_disable_lna);
- } else {
- phydm_set_trx_mux(dm, phydm_standby_mode, phydm_standby_mode);
- odm_pause_dig(dm, PHYDM_PAUSE, PHYDM_PAUSE_LEVEL_0, 0x7e);
- }
-
- diff = igi_target - (s8)IGI;
- th_l2h_dmc = dm->th_l2h_ini + diff;
- if (th_l2h_dmc > 10)
- th_l2h_dmc = 10;
- th_h2l_dmc = th_l2h_dmc - dm->th_edcca_hl_diff;
-
- phydm_set_edcca_threshold(dm, th_h2l_dmc, th_l2h_dmc);
- ODM_delay_ms(30);
-
- while (is_adjust) {
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11N, MASKDWORD, 0x0);
- reg_value32 =
- odm_get_bb_reg(dm, ODM_REG_RPT_11N, MASKDWORD);
- } else if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD,
- 0x0);
- reg_value32 =
- odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- }
- while (reg_value32 & BIT(3) && try_count < 3) {
- ODM_delay_ms(3);
- try_count = try_count + 1;
- if (dm->support_ic_type & ODM_IC_11N_SERIES)
- reg_value32 = odm_get_bb_reg(
- dm, ODM_REG_RPT_11N, MASKDWORD);
- else if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- reg_value32 = odm_get_bb_reg(
- dm, ODM_REG_RPT_11AC, MASKDWORD);
- }
- try_count = 0;
-
- for (cnt = 0; cnt < 20; cnt++) {
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11N,
- MASKDWORD, 0x208);
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11N,
- MASKDWORD);
- } else if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC,
- MASKDWORD, 0x209);
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC,
- MASKDWORD);
- }
- if (value32 & BIT(30) &&
- (dm->support_ic_type &
- (ODM_RTL8723B | ODM_RTL8188E)))
- tx_edcca1 = tx_edcca1 + 1;
- else if (value32 & BIT(29))
- tx_edcca1 = tx_edcca1 + 1;
- else
- tx_edcca0 = tx_edcca0 + 1;
- }
-
- if (tx_edcca1 > 1) {
- IGI = IGI - 1;
- th_l2h_dmc = th_l2h_dmc + 1;
- if (th_l2h_dmc > 10)
- th_l2h_dmc = 10;
- th_h2l_dmc = th_l2h_dmc - dm->th_edcca_hl_diff;
-
- phydm_set_edcca_threshold(dm, th_h2l_dmc, th_l2h_dmc);
- if (th_l2h_dmc == 10) {
- is_adjust = false;
- adaptivity->h2l_lb = th_h2l_dmc;
- adaptivity->l2h_lb = th_l2h_dmc;
- dm->adaptivity_igi_upper = IGI;
- }
-
- tx_edcca1 = 0;
- tx_edcca0 = 0;
-
- } else {
- is_adjust = false;
- adaptivity->h2l_lb = th_h2l_dmc;
- adaptivity->l2h_lb = th_l2h_dmc;
- dm->adaptivity_igi_upper = IGI;
- tx_edcca1 = 0;
- tx_edcca0 = 0;
- }
- }
-
- dm->adaptivity_igi_upper = dm->adaptivity_igi_upper - dm->dc_backoff;
- adaptivity->h2l_lb = adaptivity->h2l_lb + dm->dc_backoff;
- adaptivity->l2h_lb = adaptivity->l2h_lb + dm->dc_backoff;
-
- if (dm->support_ic_type & (ODM_RTL8723B | ODM_RTL8188E | ODM_RTL8192E |
- ODM_RTL8812 | ODM_RTL8821 | ODM_RTL8881A)) {
- phydm_set_lna(dm, phydm_enable_lna);
- } else {
- phydm_set_trx_mux(dm, phydm_tx_mode, phydm_rx_mode);
- odm_pause_dig(dm, PHYDM_RESUME, PHYDM_PAUSE_LEVEL_0, NONE);
- }
-
- phydm_set_edcca_threshold(dm, 0x7f, 0x7f); /*resume to no link state*/
-}
-
-static bool phydm_re_search_condition(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 adaptivity_igi_upper;
- u8 count = 0;
-
- adaptivity_igi_upper = dm->adaptivity_igi_upper + dm->dc_backoff;
-
- if (adaptivity_igi_upper <= 0x26 && count < 3) {
- count = count + 1;
- return true;
- }
-
- return false;
-}
-
-void phydm_adaptivity_info_init(void *dm_void, enum phydm_adapinfo cmn_info,
- u32 value)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct adaptivity_statistics *adaptivity =
- (struct adaptivity_statistics *)phydm_get_structure(
- dm, PHYDM_ADAPTIVITY);
-
- switch (cmn_info) {
- case PHYDM_ADAPINFO_CARRIER_SENSE_ENABLE:
- dm->carrier_sense_enable = (bool)value;
- break;
-
- case PHYDM_ADAPINFO_DCBACKOFF:
- dm->dc_backoff = (u8)value;
- break;
-
- case PHYDM_ADAPINFO_DYNAMICLINKADAPTIVITY:
- adaptivity->dynamic_link_adaptivity = (bool)value;
- break;
-
- case PHYDM_ADAPINFO_TH_L2H_INI:
- dm->th_l2h_ini = (s8)value;
- break;
-
- case PHYDM_ADAPINFO_TH_EDCCA_HL_DIFF:
- dm->th_edcca_hl_diff = (s8)value;
- break;
-
- case PHYDM_ADAPINFO_AP_NUM_TH:
- adaptivity->ap_num_th = (u8)value;
- break;
-
- default:
- break;
- }
-}
-
-void phydm_adaptivity_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct adaptivity_statistics *adaptivity =
- (struct adaptivity_statistics *)phydm_get_structure(
- dm, PHYDM_ADAPTIVITY);
- s8 igi_target = 0x32;
-
- if (!dm->carrier_sense_enable) {
- if (dm->th_l2h_ini == 0)
- dm->th_l2h_ini = 0xf5;
- } else {
- dm->th_l2h_ini = 0xa;
- }
-
- if (dm->th_edcca_hl_diff == 0)
- dm->th_edcca_hl_diff = 7;
- if (dm->wifi_test || dm->mp_mode) {
- /*even no adaptivity, we still enable EDCCA, AP use mib ctrl*/
- dm->edcca_enable = false;
- } else {
- dm->edcca_enable = true;
- }
-
- dm->adaptivity_igi_upper = 0;
- dm->adaptivity_enable =
- false; /*use this flag to decide enable or disable*/
-
- dm->th_l2h_ini_mode2 = 20;
- dm->th_edcca_hl_diff_mode2 = 8;
- adaptivity->th_l2h_ini_backup = dm->th_l2h_ini;
- adaptivity->th_edcca_hl_diff_backup = dm->th_edcca_hl_diff;
-
- adaptivity->igi_base = 0x32;
- adaptivity->igi_target = 0x1c;
- adaptivity->h2l_lb = 0;
- adaptivity->l2h_lb = 0;
- adaptivity->nhm_wait = 0;
- adaptivity->is_check = false;
- adaptivity->is_first_link = true;
- adaptivity->adajust_igi_level = 0;
- adaptivity->is_stop_edcca = false;
- adaptivity->backup_h2l = 0;
- adaptivity->backup_l2h = 0;
-
- phydm_mac_edcca_state(dm, phydm_dont_ignore_edcca);
-
- /*Search pwdB lower bound*/
- if (dm->support_ic_type & ODM_IC_11N_SERIES)
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11N, MASKDWORD, 0x208);
- else if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0x209);
-
- if (dm->support_ic_type & ODM_IC_11N_GAIN_IDX_EDCCA) {
- if (dm->support_ic_type & ODM_RTL8197F) {
- /*set to page B1*/
- odm_set_bb_reg(dm, ODM_REG_PAGE_B1_97F, BIT(30), 0x1);
- /*0:rx_dfir, 1: dcnf_out, 2 :rx_iq, 3: rx_nbi_nf_out*/
- odm_set_bb_reg(dm, ODM_REG_EDCCA_DCNF_97F,
- BIT(27) | BIT(26), 0x1);
- odm_set_bb_reg(dm, ODM_REG_PAGE_B1_97F, BIT(30), 0x0);
- } else {
- /*0:rx_dfir, 1: dcnf_out, 2 :rx_iq, 3: rx_nbi_nf_out*/
- odm_set_bb_reg(dm, ODM_REG_EDCCA_DCNF_11N,
- BIT(21) | BIT(20), 0x1);
- }
- }
- /*8814a no need to find pwdB lower bound, maybe*/
- if (dm->support_ic_type & ODM_IC_11AC_GAIN_IDX_EDCCA) {
- /*0:rx_dfir, 1: dcnf_out, 2 :rx_iq, 3: rx_nbi_nf_out*/
- odm_set_bb_reg(dm, ODM_REG_ACBB_EDCCA_ENHANCE,
- BIT(29) | BIT(28), 0x1);
- }
-
- if (!(dm->support_ic_type &
- (ODM_IC_11AC_GAIN_IDX_EDCCA | ODM_IC_11N_GAIN_IDX_EDCCA))) {
- phydm_search_pwdb_lower_bound(dm);
- if (phydm_re_search_condition(dm))
- phydm_search_pwdb_lower_bound(dm);
- }
-
- /*we need to consider PwdB upper bound for 8814 later IC*/
- adaptivity->adajust_igi_level =
- (u8)((dm->th_l2h_ini + igi_target) - pwdb_upper_bound +
- dfir_loss); /*IGI = L2H - PwdB - dfir_loss*/
-
- ODM_RT_TRACE(
- dm, PHYDM_COMP_ADAPTIVITY,
- "th_l2h_ini = 0x%x, th_edcca_hl_diff = 0x%x, adaptivity->adajust_igi_level = 0x%x\n",
- dm->th_l2h_ini, dm->th_edcca_hl_diff,
- adaptivity->adajust_igi_level);
-
- /*Check this later on Windows*/
- /*phydm_set_edcca_threshold_api(dm, dig_tab->cur_ig_value);*/
-}
-
-void phydm_adaptivity(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- u8 IGI = dig_tab->cur_ig_value;
- s8 th_l2h_dmc, th_h2l_dmc;
- s8 diff = 0, igi_target;
- struct adaptivity_statistics *adaptivity =
- (struct adaptivity_statistics *)phydm_get_structure(
- dm, PHYDM_ADAPTIVITY);
-
- if (!dm->edcca_enable || adaptivity->is_stop_edcca) {
- ODM_RT_TRACE(dm, PHYDM_COMP_ADAPTIVITY, "Disable EDCCA!!!\n");
- return;
- }
-
- if (!(dm->support_ability & ODM_BB_ADAPTIVITY)) {
- ODM_RT_TRACE(dm, PHYDM_COMP_ADAPTIVITY,
- "adaptivity disable, enable EDCCA mode!!!\n");
- dm->th_l2h_ini = dm->th_l2h_ini_mode2;
- dm->th_edcca_hl_diff = dm->th_edcca_hl_diff_mode2;
- }
-
- ODM_RT_TRACE(dm, PHYDM_COMP_ADAPTIVITY, "%s() =====>\n", __func__);
- ODM_RT_TRACE(dm, PHYDM_COMP_ADAPTIVITY,
- "igi_base=0x%x, th_l2h_ini = %d, th_edcca_hl_diff = %d\n",
- adaptivity->igi_base, dm->th_l2h_ini,
- dm->th_edcca_hl_diff);
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- /*fix AC series when enable EDCCA hang issue*/
- odm_set_bb_reg(dm, 0x800, BIT(10), 1); /*ADC_mask disable*/
- odm_set_bb_reg(dm, 0x800, BIT(10), 0); /*ADC_mask enable*/
- }
- if (*dm->band_width == ODM_BW20M) /*CHANNEL_WIDTH_20*/
- igi_target = adaptivity->igi_base;
- else if (*dm->band_width == ODM_BW40M)
- igi_target = adaptivity->igi_base + 2;
- else if (*dm->band_width == ODM_BW80M)
- igi_target = adaptivity->igi_base + 2;
- else
- igi_target = adaptivity->igi_base;
- adaptivity->igi_target = (u8)igi_target;
-
- ODM_RT_TRACE(
- dm, PHYDM_COMP_ADAPTIVITY,
- "band_width=%s, igi_target=0x%x, dynamic_link_adaptivity = %d, acs_for_adaptivity = %d\n",
- (*dm->band_width == ODM_BW80M) ?
- "80M" :
- ((*dm->band_width == ODM_BW40M) ? "40M" : "20M"),
- igi_target, adaptivity->dynamic_link_adaptivity,
- adaptivity->acs_for_adaptivity);
- ODM_RT_TRACE(
- dm, PHYDM_COMP_ADAPTIVITY,
- "rssi_min = %d, adaptivity->adajust_igi_level= 0x%x, adaptivity_flag = %d, adaptivity_enable = %d\n",
- dm->rssi_min, adaptivity->adajust_igi_level,
- dm->adaptivity_flag, dm->adaptivity_enable);
-
- if (adaptivity->dynamic_link_adaptivity && !dm->is_linked &&
- !dm->adaptivity_enable) {
- phydm_set_edcca_threshold(dm, 0x7f, 0x7f);
- ODM_RT_TRACE(
- dm, PHYDM_COMP_ADAPTIVITY,
- "In DynamicLink mode(noisy) and No link, Turn off EDCCA!!\n");
- return;
- }
-
- if (dm->support_ic_type &
- (ODM_IC_11AC_GAIN_IDX_EDCCA | ODM_IC_11N_GAIN_IDX_EDCCA)) {
- if (adaptivity->adajust_igi_level > IGI &&
- dm->adaptivity_enable)
- diff = adaptivity->adajust_igi_level - IGI;
-
- th_l2h_dmc = dm->th_l2h_ini - diff + igi_target;
- th_h2l_dmc = th_l2h_dmc - dm->th_edcca_hl_diff;
- } else {
- diff = igi_target - (s8)IGI;
- th_l2h_dmc = dm->th_l2h_ini + diff;
- if (th_l2h_dmc > 10 && dm->adaptivity_enable)
- th_l2h_dmc = 10;
-
- th_h2l_dmc = th_l2h_dmc - dm->th_edcca_hl_diff;
-
- /*replace lower bound to prevent EDCCA always equal 1*/
- if (th_h2l_dmc < adaptivity->h2l_lb)
- th_h2l_dmc = adaptivity->h2l_lb;
- if (th_l2h_dmc < adaptivity->l2h_lb)
- th_l2h_dmc = adaptivity->l2h_lb;
- }
- ODM_RT_TRACE(dm, PHYDM_COMP_ADAPTIVITY,
- "IGI=0x%x, th_l2h_dmc = %d, th_h2l_dmc = %d\n", IGI,
- th_l2h_dmc, th_h2l_dmc);
- ODM_RT_TRACE(
- dm, PHYDM_COMP_ADAPTIVITY,
- "adaptivity_igi_upper=0x%x, h2l_lb = 0x%x, l2h_lb = 0x%x\n",
- dm->adaptivity_igi_upper, adaptivity->h2l_lb,
- adaptivity->l2h_lb);
-
- phydm_set_edcca_threshold(dm, th_h2l_dmc, th_l2h_dmc);
-
- if (dm->adaptivity_enable)
- odm_set_mac_reg(dm, REG_RD_CTRL, BIT(11), 1);
-}
-
-/*This is for solving USB can't Tx problem due to USB3.0 interference in 2.4G*/
-void phydm_pause_edcca(void *dm_void, bool is_pasue_edcca)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct adaptivity_statistics *adaptivity =
- (struct adaptivity_statistics *)phydm_get_structure(
- dm, PHYDM_ADAPTIVITY);
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- u8 IGI = dig_tab->cur_ig_value;
- s8 diff = 0;
-
- if (is_pasue_edcca) {
- adaptivity->is_stop_edcca = true;
-
- if (dm->support_ic_type &
- (ODM_IC_11AC_GAIN_IDX_EDCCA | ODM_IC_11N_GAIN_IDX_EDCCA)) {
- if (adaptivity->adajust_igi_level > IGI)
- diff = adaptivity->adajust_igi_level - IGI;
-
- adaptivity->backup_l2h =
- dm->th_l2h_ini - diff + adaptivity->igi_target;
- adaptivity->backup_h2l =
- adaptivity->backup_l2h - dm->th_edcca_hl_diff;
- } else {
- diff = adaptivity->igi_target - (s8)IGI;
- adaptivity->backup_l2h = dm->th_l2h_ini + diff;
- if (adaptivity->backup_l2h > 10)
- adaptivity->backup_l2h = 10;
-
- adaptivity->backup_h2l =
- adaptivity->backup_l2h - dm->th_edcca_hl_diff;
-
- /*replace lower bound to prevent EDCCA always equal 1*/
- if (adaptivity->backup_h2l < adaptivity->h2l_lb)
- adaptivity->backup_h2l = adaptivity->h2l_lb;
- if (adaptivity->backup_l2h < adaptivity->l2h_lb)
- adaptivity->backup_l2h = adaptivity->l2h_lb;
- }
- ODM_RT_TRACE(
- dm, PHYDM_COMP_ADAPTIVITY,
- "pauseEDCCA : L2Hbak = 0x%x, H2Lbak = 0x%x, IGI = 0x%x\n",
- adaptivity->backup_l2h, adaptivity->backup_h2l, IGI);
-
- /*Disable EDCCA*/
- phydm_pause_edcca_work_item_callback(dm);
-
- } else {
- adaptivity->is_stop_edcca = false;
- ODM_RT_TRACE(
- dm, PHYDM_COMP_ADAPTIVITY,
- "resumeEDCCA : L2Hbak = 0x%x, H2Lbak = 0x%x, IGI = 0x%x\n",
- adaptivity->backup_l2h, adaptivity->backup_h2l, IGI);
- /*Resume EDCCA*/
- phydm_resume_edcca_work_item_callback(dm);
- }
-}
-
-void phydm_pause_edcca_work_item_callback(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES)
- odm_set_bb_reg(dm, REG_OFDM_0_ECCA_THRESHOLD,
- MASKBYTE2 | MASKBYTE0, (u32)(0x7f | 0x7f << 16));
- else if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- odm_set_bb_reg(dm, REG_FPGA0_XB_LSSI_READ_BACK, MASKLWORD,
- (u16)(0x7f | 0x7f << 8));
-}
-
-void phydm_resume_edcca_work_item_callback(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct adaptivity_statistics *adaptivity =
- (struct adaptivity_statistics *)phydm_get_structure(
- dm, PHYDM_ADAPTIVITY);
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES)
- odm_set_bb_reg(dm, REG_OFDM_0_ECCA_THRESHOLD,
- MASKBYTE2 | MASKBYTE0,
- (u32)((u8)adaptivity->backup_l2h |
- (u8)adaptivity->backup_h2l << 16));
- else if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- odm_set_bb_reg(dm, REG_FPGA0_XB_LSSI_READ_BACK, MASKLWORD,
- (u16)((u8)adaptivity->backup_l2h |
- (u8)adaptivity->backup_h2l << 8));
-}
-
-void phydm_set_edcca_threshold_api(void *dm_void, u8 IGI)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct adaptivity_statistics *adaptivity =
- (struct adaptivity_statistics *)phydm_get_structure(
- dm, PHYDM_ADAPTIVITY);
- s8 th_l2h_dmc, th_h2l_dmc;
- s8 diff = 0, igi_target = 0x32;
-
- if (dm->support_ability & ODM_BB_ADAPTIVITY) {
- if (dm->support_ic_type &
- (ODM_IC_11AC_GAIN_IDX_EDCCA | ODM_IC_11N_GAIN_IDX_EDCCA)) {
- if (adaptivity->adajust_igi_level > IGI)
- diff = adaptivity->adajust_igi_level - IGI;
-
- th_l2h_dmc = dm->th_l2h_ini - diff + igi_target;
- th_h2l_dmc = th_l2h_dmc - dm->th_edcca_hl_diff;
- } else {
- diff = igi_target - (s8)IGI;
- th_l2h_dmc = dm->th_l2h_ini + diff;
- if (th_l2h_dmc > 10)
- th_l2h_dmc = 10;
-
- th_h2l_dmc = th_l2h_dmc - dm->th_edcca_hl_diff;
-
- /*replace lower bound to prevent EDCCA always equal 1*/
- if (th_h2l_dmc < adaptivity->h2l_lb)
- th_h2l_dmc = adaptivity->h2l_lb;
- if (th_l2h_dmc < adaptivity->l2h_lb)
- th_l2h_dmc = adaptivity->l2h_lb;
- }
- ODM_RT_TRACE(
- dm, PHYDM_COMP_ADAPTIVITY,
- "API :IGI=0x%x, th_l2h_dmc = %d, th_h2l_dmc = %d\n",
- IGI, th_l2h_dmc, th_h2l_dmc);
- ODM_RT_TRACE(
- dm, PHYDM_COMP_ADAPTIVITY,
- "API :adaptivity_igi_upper=0x%x, h2l_lb = 0x%x, l2h_lb = 0x%x\n",
- dm->adaptivity_igi_upper, adaptivity->h2l_lb,
- adaptivity->l2h_lb);
-
- phydm_set_edcca_threshold(dm, th_h2l_dmc, th_l2h_dmc);
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.h b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.h
deleted file mode 100644
index a88c34cd30e0..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMADAPTIVITY_H__
-#define __PHYDMADAPTIVITY_H__
-
-/*20160902 changed by Kevin, refine method for searching pwdb lower bound*/
-#define ADAPTIVITY_VERSION "9.3.5"
-
-#define pwdb_upper_bound 7
-#define dfir_loss 5
-
-enum phydm_adapinfo {
- PHYDM_ADAPINFO_CARRIER_SENSE_ENABLE = 0,
- PHYDM_ADAPINFO_DCBACKOFF,
- PHYDM_ADAPINFO_DYNAMICLINKADAPTIVITY,
- PHYDM_ADAPINFO_TH_L2H_INI,
- PHYDM_ADAPINFO_TH_EDCCA_HL_DIFF,
- PHYDM_ADAPINFO_AP_NUM_TH
-
-};
-
-enum phydm_set_lna {
- phydm_disable_lna = 0,
- phydm_enable_lna = 1,
-};
-
-enum phydm_trx_mux_type {
- phydm_shutdown = 0,
- phydm_standby_mode = 1,
- phydm_tx_mode = 2,
- phydm_rx_mode = 3
-};
-
-enum phydm_mac_edcca_type {
- phydm_ignore_edcca = 0,
- phydm_dont_ignore_edcca = 1
-};
-
-struct adaptivity_statistics {
- s8 th_l2h_ini_backup;
- s8 th_edcca_hl_diff_backup;
- s8 igi_base;
- u8 igi_target;
- u8 nhm_wait;
- s8 h2l_lb;
- s8 l2h_lb;
- bool is_first_link;
- bool is_check;
- bool dynamic_link_adaptivity;
- u8 ap_num_th;
- u8 adajust_igi_level;
- bool acs_for_adaptivity;
- s8 backup_l2h;
- s8 backup_h2l;
- bool is_stop_edcca;
-};
-
-void phydm_pause_edcca(void *dm_void, bool is_pasue_edcca);
-
-void phydm_check_adaptivity(void *dm_void);
-
-void phydm_check_environment(void *dm_void);
-
-void phydm_nhm_counter_statistics_init(void *dm_void);
-
-void phydm_nhm_counter_statistics(void *dm_void);
-
-void phydm_nhm_counter_statistics_reset(void *dm_void);
-
-void phydm_get_nhm_counter_statistics(void *dm_void);
-
-void phydm_mac_edcca_state(void *dm_void, enum phydm_mac_edcca_type state);
-
-void phydm_set_edcca_threshold(void *dm_void, s8 H2L, s8 L2H);
-
-void phydm_set_trx_mux(void *dm_void, enum phydm_trx_mux_type tx_mode,
- enum phydm_trx_mux_type rx_mode);
-
-bool phydm_cal_nhm_cnt(void *dm_void);
-
-void phydm_search_pwdb_lower_bound(void *dm_void);
-
-void phydm_adaptivity_info_init(void *dm_void, enum phydm_adapinfo cmn_info,
- u32 value);
-
-void phydm_adaptivity_init(void *dm_void);
-
-void phydm_adaptivity(void *dm_void);
-
-void phydm_set_edcca_threshold_api(void *dm_void, u8 IGI);
-
-void phydm_pause_edcca_work_item_callback(void *dm_void);
-
-void phydm_resume_edcca_work_item_callback(void *dm_void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c b/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c
deleted file mode 100644
index f2468b82f745..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c
+++ /dev/null
@@ -1,616 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-static bool phydm_la_buffer_allocate(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- struct rt_adcsmp_string *adc_smp_buf = &adc_smp->adc_smp_buf;
- bool ret = false;
-
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, "[LA mode BufferAllocate]\n");
-
- if (adc_smp_buf->length == 0) {
- odm_allocate_memory(dm, (void **)&adc_smp_buf->octet,
- adc_smp_buf->buffer_size);
- if (!adc_smp_buf->octet) {
- ret = false;
- } else {
- adc_smp_buf->length = adc_smp_buf->buffer_size;
- ret = true;
- }
- }
-
- return ret;
-}
-
-static void phydm_la_get_tx_pkt_buf(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- struct rt_adcsmp_string *adc_smp_buf = &adc_smp->adc_smp_buf;
- u32 i = 0, value32, data_l = 0, data_h = 0;
- u32 addr, finish_addr;
- u32 end_addr = (adc_smp_buf->start_pos + adc_smp_buf->buffer_size) -
- 1; /*end_addr = 0x3ffff;*/
- bool is_round_up;
- static u32 page = 0xFF;
- u32 smp_cnt = 0, smp_number = 0, addr_8byte = 0;
-
- odm_memory_set(dm, adc_smp_buf->octet, 0, adc_smp_buf->length);
- odm_write_1byte(dm, 0x0106, 0x69);
-
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, "GetTxPktBuf\n");
-
- value32 = odm_read_4byte(dm, 0x7c0);
- is_round_up = (bool)((value32 & BIT(31)) >> 31);
- /*Reg7C0[30:16]: finish addr (unit: 8byte)*/
- finish_addr = (value32 & 0x7FFF0000) >> 16;
-
- if (is_round_up) {
- addr = (finish_addr + 1) << 3;
- ODM_RT_TRACE(
- dm, ODM_COMP_UNCOND,
- "is_round_up = ((%d)), finish_addr=((0x%x)), 0x7c0=((0x%x))\n",
- is_round_up, finish_addr, value32);
- /*Byte to 64Byte*/
- smp_number = ((adc_smp_buf->buffer_size) >> 3);
- } else {
- addr = adc_smp_buf->start_pos;
-
- addr_8byte = addr >> 3;
- if (addr_8byte > finish_addr)
- smp_number = addr_8byte - finish_addr;
- else
- smp_number = finish_addr - addr_8byte;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_UNCOND,
- "is_round_up = ((%d)), finish_addr=((0x%x * 8Byte)), Start_Addr = ((0x%x * 8Byte)), smp_number = ((%d))\n",
- is_round_up, finish_addr, addr_8byte, smp_number);
- }
-
- if (dm->support_ic_type & ODM_RTL8197F) {
- /*64K byte*/
- for (addr = 0x0, i = 0; addr < end_addr; addr += 8, i += 2) {
- if ((addr & 0xfff) == 0)
- odm_set_bb_reg(dm, 0x0140, MASKLWORD,
- 0x780 + (addr >> 12));
- data_l = odm_get_bb_reg(dm, 0x8000 + (addr & 0xfff),
- MASKDWORD);
- data_h = odm_get_bb_reg(dm, 0x8000 + (addr & 0xfff) + 4,
- MASKDWORD);
-
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, "%08x%08x\n", data_h,
- data_l);
- }
- } else {
- while (addr != (finish_addr << 3)) {
- if (page != (addr >> 12)) {
- /*Reg140=0x780+(addr>>12),
- *addr=0x30~0x3F, total 16 pages
- */
- page = (addr >> 12);
- }
- odm_set_bb_reg(dm, 0x0140, MASKLWORD, 0x780 + page);
-
- /*pDataL = 0x8000+(addr&0xfff);*/
- data_l = odm_get_bb_reg(dm, 0x8000 + (addr & 0xfff),
- MASKDWORD);
- data_h = odm_get_bb_reg(dm, 0x8000 + (addr & 0xfff) + 4,
- MASKDWORD);
-
- adc_smp_buf->octet[i] = data_h;
- adc_smp_buf->octet[i + 1] = data_l;
-
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, "%08x%08x\n", data_h,
- data_l);
-
- i = i + 2;
-
- if ((addr + 8) >= end_addr)
- addr = adc_smp_buf->start_pos;
- else
- addr = addr + 8;
-
- smp_cnt++;
- if (smp_cnt >= (smp_number - 1))
- break;
- }
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, "smp_cnt = ((%d))\n",
- smp_cnt);
- }
-}
-
-static void phydm_la_mode_set_mac_iq_dump(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- u32 reg_value;
-
- odm_write_1byte(dm, 0x7c0, 0); /*clear all 0x7c0*/
- odm_set_mac_reg(dm, 0x7c0, BIT(0), 1); /*Enable LA mode HW block*/
-
- if (adc_smp->la_trig_mode == PHYDM_MAC_TRIG) {
- adc_smp->is_bb_trigger = 0;
- odm_set_mac_reg(dm, 0x7c0, BIT(2),
- 1); /*polling bit for MAC mode*/
- odm_set_mac_reg(
- dm, 0x7c0, BIT(4) | BIT(3),
- adc_smp->la_trigger_edge); /*trigger mode for MAC*/
-
- ODM_RT_TRACE(
- dm, ODM_COMP_UNCOND,
- "[MAC_trig] ref_mask = ((0x%x)), ref_value = ((0x%x)), dbg_port = ((0x%x))\n",
- adc_smp->la_mac_ref_mask, adc_smp->la_trig_sig_sel,
- adc_smp->la_dbg_port);
- /*[Set MAC Debug Port]*/
- odm_set_mac_reg(dm, 0xF4, BIT(16), 1);
- odm_set_mac_reg(dm, 0x38, 0xff0000, adc_smp->la_dbg_port);
- odm_set_mac_reg(dm, 0x7c4, MASKDWORD, adc_smp->la_mac_ref_mask);
- odm_set_mac_reg(dm, 0x7c8, MASKDWORD, adc_smp->la_trig_sig_sel);
-
- } else {
- adc_smp->is_bb_trigger = 1;
- odm_set_mac_reg(dm, 0x7c0, BIT(1),
- 1); /*polling bit for BB ADC mode*/
-
- if (adc_smp->la_trig_mode == PHYDM_ADC_MAC_TRIG) {
- odm_set_mac_reg(
- dm, 0x7c0, BIT(3),
- 1); /*polling bit for MAC trigger event*/
- odm_set_mac_reg(dm, 0x7c0, BIT(7) | BIT(6),
- adc_smp->la_trig_sig_sel);
-
- if (adc_smp->la_trig_sig_sel == ADCSMP_TRIG_REG)
- odm_set_mac_reg(
- dm, 0x7c0, BIT(5),
- 1); /* manual trigger 0x7C0[5] = 0->1*/
- }
- }
-
- reg_value = odm_get_bb_reg(dm, 0x7c0, 0xff);
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "4. [Set MAC IQ dump] 0x7c0[7:0] = ((0x%x))\n", reg_value);
-}
-
-static void phydm_la_mode_set_dma_type(void *dm_void, u8 la_dma_type)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "2. [LA mode DMA setting] Dma_type = ((%d))\n",
- la_dma_type);
-
- if (dm->support_ic_type & ODM_N_ANTDIV_SUPPORT)
- odm_set_bb_reg(dm, 0x9a0, 0xf00, la_dma_type); /*0x9A0[11:8]*/
- else
- odm_set_bb_reg(dm, odm_adc_trigger_jaguar2, 0xf00,
- la_dma_type); /*0x95C[11:8]*/
-}
-
-static void phydm_adc_smp_start(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- u8 tmp_u1b;
- u8 while_cnt = 0;
- u8 polling_ok = false, target_polling_bit;
-
- phydm_la_mode_bb_setting(dm);
- phydm_la_mode_set_dma_type(dm, adc_smp->la_dma_type);
- phydm_la_mode_set_trigger_time(dm, adc_smp->la_trigger_time);
-
- if (dm->support_ic_type & ODM_RTL8197F) {
- odm_set_bb_reg(dm, 0xd00, BIT(26), 0x1);
- } else { /*for 8814A and 8822B?*/
- odm_write_1byte(dm, 0x198c, 0x7);
- odm_write_1byte(dm, 0x8b4, 0x80);
- /* odm_set_bb_reg(dm, 0x8b4, BIT(7), 1); */
- }
-
- phydm_la_mode_set_mac_iq_dump(dm);
- /* return; */
-
- target_polling_bit = (adc_smp->is_bb_trigger) ? BIT(1) : BIT(2);
- do { /*Poll time always use 100ms, when it exceed 2s, break while loop*/
- tmp_u1b = odm_read_1byte(dm, 0x7c0);
-
- if (adc_smp->adc_smp_state != ADCSMP_STATE_SET) {
- ODM_RT_TRACE(
- dm, ODM_COMP_UNCOND,
- "[state Error] adc_smp_state != ADCSMP_STATE_SET\n");
- break;
-
- } else if (tmp_u1b & target_polling_bit) {
- ODM_delay_ms(100);
- while_cnt = while_cnt + 1;
- continue;
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "[LA Query OK] polling_bit=((0x%x))\n",
- target_polling_bit);
- polling_ok = true;
- if (dm->support_ic_type & ODM_RTL8197F)
- odm_set_bb_reg(dm, 0x7c0, BIT(0), 0x0);
- break;
- }
- } while (while_cnt < 20);
-
- if (adc_smp->adc_smp_state == ADCSMP_STATE_SET) {
- if (polling_ok)
- phydm_la_get_tx_pkt_buf(dm);
- else
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "[Polling timeout]\n");
- }
-
- if (adc_smp->adc_smp_state == ADCSMP_STATE_SET)
- adc_smp->adc_smp_state = ADCSMP_STATE_QUERY;
-
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "[LA mode] LA_pattern_count = ((%d))\n",
- adc_smp->la_count);
-
- adc_smp_stop(dm);
-
- if (adc_smp->la_count == 0) {
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "LA Dump finished ---------->\n\n\n");
- /**/
- } else {
- adc_smp->la_count--;
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "LA Dump more ---------->\n\n\n");
- adc_smp_set(dm, adc_smp->la_trig_mode, adc_smp->la_trig_sig_sel,
- adc_smp->la_dma_type, adc_smp->la_trigger_time, 0);
- }
-}
-
-void adc_smp_set(void *dm_void, u8 trig_mode, u32 trig_sig_sel,
- u8 dma_data_sig_sel, u32 trigger_time, u16 polling_time)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- bool is_set_success = true;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
-
- adc_smp->la_trig_mode = trig_mode;
- adc_smp->la_trig_sig_sel = trig_sig_sel;
- adc_smp->la_dma_type = dma_data_sig_sel;
- adc_smp->la_trigger_time = trigger_time;
-
- if (adc_smp->adc_smp_state != ADCSMP_STATE_IDLE)
- is_set_success = false;
- else if (adc_smp->adc_smp_buf.length == 0)
- is_set_success = phydm_la_buffer_allocate(dm);
-
- if (is_set_success) {
- adc_smp->adc_smp_state = ADCSMP_STATE_SET;
-
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "[LA Set Success] LA_State=((%d))\n",
- adc_smp->adc_smp_state);
-
- phydm_adc_smp_start(dm);
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND,
- "[LA Set Fail] LA_State=((%d))\n",
- adc_smp->adc_smp_state);
- }
-}
-
-void adc_smp_query(void *dm_void, void *output, u32 out_len, u32 *pused)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- struct rt_adcsmp_string *adc_smp_buf = &adc_smp->adc_smp_buf;
- u32 used = *pused;
- u32 i;
-
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, "%s adc_smp_state %d", __func__,
- adc_smp->adc_smp_state);
-
- for (i = 0; i < (adc_smp_buf->length >> 2) - 2; i += 2) {
- PHYDM_SNPRINTF(output + used, out_len - used, "%08x%08x\n",
- adc_smp_buf->octet[i],
- adc_smp_buf->octet[i + 1]);
- }
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\n");
- *pused = used;
-}
-
-s32 adc_smp_get_sample_counts(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- struct rt_adcsmp_string *adc_smp_buf = &adc_smp->adc_smp_buf;
-
- return (adc_smp_buf->length >> 2) - 2;
-}
-
-s32 adc_smp_query_single_data(void *dm_void, void *output, u32 out_len,
- u32 index)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- struct rt_adcsmp_string *adc_smp_buf = &adc_smp->adc_smp_buf;
- u32 used = 0;
-
- if (adc_smp->adc_smp_state != ADCSMP_STATE_QUERY) {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Error: la data is not ready yet ...\n");
- return -1;
- }
-
- if (index < ((adc_smp_buf->length >> 2) - 2)) {
- PHYDM_SNPRINTF(output + used, out_len - used, "%08x%08x\n",
- adc_smp_buf->octet[index],
- adc_smp_buf->octet[index + 1]);
- }
- return 0;
-}
-
-void adc_smp_stop(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
-
- adc_smp->adc_smp_state = ADCSMP_STATE_IDLE;
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, "[LA_Stop] LA_state = ((%d))\n",
- adc_smp->adc_smp_state);
-}
-
-void adc_smp_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- struct rt_adcsmp_string *adc_smp_buf = &adc_smp->adc_smp_buf;
-
- adc_smp->adc_smp_state = ADCSMP_STATE_IDLE;
-
- if (dm->support_ic_type & ODM_RTL8814A) {
- adc_smp_buf->start_pos = 0x30000;
- adc_smp_buf->buffer_size = 0x10000;
- } else if (dm->support_ic_type & ODM_RTL8822B) {
- adc_smp_buf->start_pos = 0x20000;
- adc_smp_buf->buffer_size = 0x20000;
- } else if (dm->support_ic_type & ODM_RTL8197F) {
- adc_smp_buf->start_pos = 0x00000;
- adc_smp_buf->buffer_size = 0x10000;
- } else if (dm->support_ic_type & ODM_RTL8821C) {
- adc_smp_buf->start_pos = 0x8000;
- adc_smp_buf->buffer_size = 0x8000;
- }
-}
-
-void adc_smp_de_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- struct rt_adcsmp_string *adc_smp_buf = &adc_smp->adc_smp_buf;
-
- adc_smp_stop(dm);
-
- if (adc_smp_buf->length != 0x0) {
- odm_free_memory(dm, adc_smp_buf->octet, adc_smp_buf->length);
- adc_smp_buf->length = 0x0;
- }
-}
-
-void phydm_la_mode_bb_setting(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
-
- u8 trig_mode = adc_smp->la_trig_mode;
- u32 trig_sig_sel = adc_smp->la_trig_sig_sel;
- u32 dbg_port = adc_smp->la_dbg_port;
- u8 is_trigger_edge = adc_smp->la_trigger_edge;
- u8 sampling_rate = adc_smp->la_smp_rate;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_UNCOND,
- "1. [LA mode bb_setting] trig_mode = ((%d)), dbg_port = ((0x%x)), Trig_Edge = ((%d)), smp_rate = ((%d)), Trig_Sel = ((0x%x))\n",
- trig_mode, dbg_port, is_trigger_edge, sampling_rate,
- trig_sig_sel);
-
- if (trig_mode == PHYDM_MAC_TRIG)
- trig_sig_sel = 0; /*ignore this setting*/
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- if (trig_mode == PHYDM_ADC_RF0_TRIG) {
- /*DBGOUT_RFC_a[31:0]*/
- odm_set_bb_reg(dm, 0x8f8,
- BIT(25) | BIT(24) | BIT(23) | BIT(22),
- 9);
- } else if (trig_mode == PHYDM_ADC_RF1_TRIG) {
- /*DBGOUT_RFC_b[31:0]*/
- odm_set_bb_reg(dm, 0x8f8,
- BIT(25) | BIT(24) | BIT(23) | BIT(22),
- 8);
- } else {
- odm_set_bb_reg(dm, 0x8f8,
- BIT(25) | BIT(24) | BIT(23) | BIT(22),
- 0);
- }
- /*
- * (0:) '{ofdm_dbg[31:0]}'
- * (1:) '{cca,crc32_fail,dbg_ofdm[29:0]}'
- * (2:) '{vbon,crc32_fail,dbg_ofdm[29:0]}'
- * (3:) '{cca,crc32_ok,dbg_ofdm[29:0]}'
- * (4:) '{vbon,crc32_ok,dbg_ofdm[29:0]}'
- * (5:) '{dbg_iqk_anta}'
- * (6:) '{cca,ofdm_crc_ok,dbg_dp_anta[29:0]}'
- * (7:) '{dbg_iqk_antb}'
- * (8:) '{DBGOUT_RFC_b[31:0]}'
- * (9:) '{DBGOUT_RFC_a[31:0]}'
- * (a:) '{dbg_ofdm}'
- * (b:) '{dbg_cck}'
- */
-
- /*disable dbg clk gating*/
- odm_set_bb_reg(dm, 0x198C, BIT(2) | BIT(1) | BIT(0), 7);
-
- /*0x95C[4:0], BB debug port bit*/
- odm_set_bb_reg(dm, 0x95C, 0x1f, trig_sig_sel);
- odm_set_bb_reg(dm, 0x8FC, MASKDWORD, dbg_port);
- /*0: posedge, 1: negedge*/
- odm_set_bb_reg(dm, 0x95C, BIT(31), is_trigger_edge);
- odm_set_bb_reg(dm, 0x95c, 0xe0, sampling_rate);
- /* (0:) '80MHz'
- * (1:) '40MHz'
- * (2:) '20MHz'
- * (3:) '10MHz'
- * (4:) '5MHz'
- * (5:) '2.5MHz'
- * (6:) '1.25MHz'
- * (7:) '160MHz (for BW160 ic)'
- */
- } else {
- /*0x9A0[4:0], BB debug port bit*/
- odm_set_bb_reg(dm, 0x9a0, 0x1f, trig_sig_sel);
- odm_set_bb_reg(dm, 0x908, MASKDWORD, dbg_port);
- /*0: posedge, 1: negedge*/
- odm_set_bb_reg(dm, 0x9A0, BIT(31), is_trigger_edge);
- odm_set_bb_reg(dm, 0x9A0, 0xe0, sampling_rate);
- /* (0:) '80MHz'
- * (1:) '40MHz'
- * (2:) '20MHz'
- * (3:) '10MHz'
- * (4:) '5MHz'
- * (5:) '2.5MHz'
- * (6:) '1.25MHz'
- * (7:) '160MHz (for BW160 ic)'
- */
- }
-}
-
-void phydm_la_mode_set_trigger_time(void *dm_void, u32 trigger_time_mu_sec)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 trigger_time_unit_num;
- u32 time_unit = 0;
-
- if (trigger_time_mu_sec < 128)
- time_unit = 0; /*unit: 1mu sec*/
- else if (trigger_time_mu_sec < 256)
- time_unit = 1; /*unit: 2mu sec*/
- else if (trigger_time_mu_sec < 512)
- time_unit = 2; /*unit: 4mu sec*/
- else if (trigger_time_mu_sec < 1024)
- time_unit = 3; /*unit: 8mu sec*/
- else if (trigger_time_mu_sec < 2048)
- time_unit = 4; /*unit: 16mu sec*/
- else if (trigger_time_mu_sec < 4096)
- time_unit = 5; /*unit: 32mu sec*/
- else if (trigger_time_mu_sec < 8192)
- time_unit = 6; /*unit: 64mu sec*/
-
- trigger_time_unit_num = (u8)(trigger_time_mu_sec >> time_unit);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_UNCOND,
- "3. [Set Trigger Time] Trig_Time = ((%d)) * unit = ((2^%d us))\n",
- trigger_time_unit_num, time_unit);
-
- odm_set_mac_reg(dm, 0x7cc, BIT(20) | BIT(19) | BIT(18), time_unit);
- odm_set_mac_reg(dm, 0x7c0, 0x7f00, (trigger_time_unit_num & 0x7f));
-}
-
-void phydm_lamode_trigger_setting(void *dm_void, char input[][16], u32 *_used,
- char *output, u32 *_out_len, u32 input_num)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- u8 trig_mode, dma_data_sig_sel;
- u32 trig_sig_sel;
- bool is_enable_la_mode;
- u32 trigger_time_mu_sec;
- char help[] = "-h";
- u32 var1[10] = {0};
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- if (dm->support_ic_type & PHYDM_IC_SUPPORT_LA_MODE) {
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &var1[0]);
- is_enable_la_mode = (bool)var1[0];
- /*dbg_print("echo cmd input_num = %d\n", input_num);*/
-
- if ((strcmp(input[1], help) == 0)) {
- PHYDM_SNPRINTF(output + used,
- out_len - used,
- "{En} {0:BB,1:BB_MAC,2:RF0,3:RF1,4:MAC}\n {BB:dbg_port[bit],BB_MAC:0-ok/1-fail/2-cca,MAC:ref} {DMA type} {TrigTime}\n {polling_time/ref_mask} {dbg_port} {0:P_Edge, 1:N_Edge} {SpRate:0-80M,1-40M,2-20M} {Capture num}\n");
- } else if (is_enable_la_mode) {
- PHYDM_SSCANF(input[2], DCMD_DECIMAL, &var1[1]);
-
- trig_mode = (u8)var1[1];
-
- if (trig_mode == PHYDM_MAC_TRIG)
- PHYDM_SSCANF(input[3], DCMD_HEX, &var1[2]);
- else
- PHYDM_SSCANF(input[3], DCMD_DECIMAL, &var1[2]);
- trig_sig_sel = var1[2];
-
- PHYDM_SSCANF(input[4], DCMD_DECIMAL, &var1[3]);
- PHYDM_SSCANF(input[5], DCMD_DECIMAL, &var1[4]);
- PHYDM_SSCANF(input[6], DCMD_HEX, &var1[5]);
- PHYDM_SSCANF(input[7], DCMD_HEX, &var1[6]);
- PHYDM_SSCANF(input[8], DCMD_DECIMAL, &var1[7]);
- PHYDM_SSCANF(input[9], DCMD_DECIMAL, &var1[8]);
- PHYDM_SSCANF(input[10], DCMD_DECIMAL, &var1[9]);
-
- dma_data_sig_sel = (u8)var1[3];
- trigger_time_mu_sec = var1[4]; /* unit: us */
-
- adc_smp->la_mac_ref_mask = var1[5];
- adc_smp->la_dbg_port = var1[6];
- adc_smp->la_trigger_edge = (u8)var1[7];
- adc_smp->la_smp_rate = (u8)(var1[8] & 0x7);
- adc_smp->la_count = var1[9];
-
- ODM_RT_TRACE(
- dm, ODM_COMP_UNCOND,
- "echo lamode %d %d %d %d %d %d %x %d %d %d\n",
- var1[0], var1[1], var1[2], var1[3], var1[4],
- var1[5], var1[6], var1[7], var1[8], var1[9]);
-
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "a.En= ((1)), b.mode = ((%d)), c.Trig_Sel = ((0x%x)), d.Dma_type = ((%d))\n",
- trig_mode, trig_sig_sel, dma_data_sig_sel);
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "e.Trig_Time = ((%dus)), f.mac_ref_mask = ((0x%x)), g.dbg_port = ((0x%x))\n",
- trigger_time_mu_sec, adc_smp->la_mac_ref_mask,
- adc_smp->la_dbg_port);
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "h.Trig_edge = ((%d)), i.smp rate = ((%d MHz)), j.Cap_num = ((%d))\n",
- adc_smp->la_trigger_edge,
- (80 >> adc_smp->la_smp_rate),
- adc_smp->la_count);
-
- adc_smp_set(dm, trig_mode, trig_sig_sel,
- dma_data_sig_sel, trigger_time_mu_sec, 0);
-
- } else {
- adc_smp_stop(dm);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Disable LA mode\n");
- }
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h b/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h
deleted file mode 100644
index 300a22b075e0..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __INC_ADCSMP_H
-#define __INC_ADCSMP_H
-
-#define DYNAMIC_LA_MODE "1.0" /*2016.07.15 Dino */
-
-struct rt_adcsmp_string {
- u32 *octet;
- u32 length;
- u32 buffer_size;
- u32 start_pos;
-};
-
-enum rt_adcsmp_trig_sel {
- PHYDM_ADC_BB_TRIG = 0,
- PHYDM_ADC_MAC_TRIG = 1,
- PHYDM_ADC_RF0_TRIG = 2,
- PHYDM_ADC_RF1_TRIG = 3,
- PHYDM_MAC_TRIG = 4
-};
-
-enum rt_adcsmp_trig_sig_sel {
- ADCSMP_TRIG_CRCOK = 0,
- ADCSMP_TRIG_CRCFAIL = 1,
- ADCSMP_TRIG_CCA = 2,
- ADCSMP_TRIG_REG = 3
-};
-
-enum rt_adcsmp_state {
- ADCSMP_STATE_IDLE = 0,
- ADCSMP_STATE_SET = 1,
- ADCSMP_STATE_QUERY = 2
-};
-
-struct rt_adcsmp {
- struct rt_adcsmp_string adc_smp_buf;
- enum rt_adcsmp_state adc_smp_state;
- u8 la_trig_mode;
- u32 la_trig_sig_sel;
- u8 la_dma_type;
- u32 la_trigger_time;
- u32 la_mac_ref_mask;
- u32 la_dbg_port;
- u8 la_trigger_edge;
- u8 la_smp_rate;
- u32 la_count;
- u8 is_bb_trigger;
- u8 la_work_item_index;
-};
-
-void adc_smp_set(void *dm_void, u8 trig_mode, u32 trig_sig_sel,
- u8 dma_data_sig_sel, u32 trigger_time, u16 polling_time);
-
-void adc_smp_query(void *dm_void, void *output, u32 out_len, u32 *pused);
-
-s32 adc_smp_get_sample_counts(void *dm_void);
-
-s32 adc_smp_query_single_data(void *dm_void, void *output, u32 out_len,
- u32 index);
-
-void adc_smp_stop(void *dm_void);
-
-void adc_smp_init(void *dm_void);
-
-void adc_smp_de_init(void *dm_void);
-
-void phydm_la_mode_bb_setting(void *dm_void);
-
-void phydm_la_mode_set_trigger_time(void *dm_void, u32 trigger_time_mu_sec);
-
-void phydm_lamode_trigger_setting(void *dm_void, char input[][16], u32 *_used,
- char *output, u32 *_out_len, u32 input_num);
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_antdiv.c b/drivers/staging/rtlwifi/phydm/phydm_antdiv.c
deleted file mode 100644
index 5a62e6b73a10..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_antdiv.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-/* ******************************************************
- * when antenna test utility is on or some testing need to disable antenna
- * diversity, call this function to disable all ODM related mechanisms which
- * will switch antenna.
- * *******************************************************/
-void odm_stop_antenna_switch_dm(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- /* disable ODM antenna diversity */
- dm->support_ability &= ~ODM_BB_ANT_DIV;
- ODM_RT_TRACE(dm, ODM_COMP_ANT_DIV, "STOP Antenna Diversity\n");
-}
-
-void phydm_enable_antenna_diversity(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- dm->support_ability |= ODM_BB_ANT_DIV;
- ODM_RT_TRACE(dm, ODM_COMP_ANT_DIV,
- "AntDiv is enabled & Re-Init AntDiv\n");
- odm_antenna_diversity_init(dm);
-}
-
-void odm_set_ant_config(void *dm_void, u8 ant_setting /* 0=A, 1=B, 2=C, .... */
- )
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type == ODM_RTL8723B) {
- if (ant_setting == 0) /* ant A*/
- odm_set_bb_reg(dm, 0x948, MASKDWORD, 0x00000000);
- else if (ant_setting == 1)
- odm_set_bb_reg(dm, 0x948, MASKDWORD, 0x00000280);
- } else if (dm->support_ic_type == ODM_RTL8723D) {
- if (ant_setting == 0) /* ant A*/
- odm_set_bb_reg(dm, 0x948, MASKLWORD, 0x0000);
- else if (ant_setting == 1)
- odm_set_bb_reg(dm, 0x948, MASKLWORD, 0x0280);
- }
-}
-
-/* ****************************************************** */
-
-void odm_sw_ant_div_rest_after_link(void *dm_void) {}
-
-void odm_ant_div_reset(void *dm_void) {}
-
-void odm_antenna_diversity_init(void *dm_void) {}
-
-void odm_antenna_diversity(void *dm_void) {}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_antdiv.h b/drivers/staging/rtlwifi/phydm/phydm_antdiv.h
deleted file mode 100644
index 4a58163bda4c..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_antdiv.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMANTDIV_H__
-#define __PHYDMANTDIV_H__
-
-/* 2.0 2014.11.04
- * 2.1 2015.01.13 Dino
- * 2.2 2015.01.16 Dino
- * 3.1 2015.07.29 YuChen, remove 92c 92d 8723a
- * 3.2 2015.08.11 Stanley, disable antenna diversity when BT is enable for 8723B
- * 3.3 2015.08.12 Stanley. 8723B does not need to check the antenna is control
- * by BT, because antenna diversity only works when BT is disable
- * or radio off
- * 3.4 2015.08.28 Dino 1.Add 8821A Smart Antenna 2. Add 8188F SW S0S1 Antenna
- * Diversity
- * 3.5 2015.10.07 Stanley Always check antenna detection result from BT-coex.
- * for 8723B, not from PHYDM
- * 3.6 2015.11.16 Stanley
- * 3.7 2015.11.20 Dino Add SmartAnt FAT Patch
- * 3.8 2015.12.21 Dino, Add SmartAnt dynamic training packet num
- * 3.9 2016.01.05 Dino, Add SmartAnt cmd for converting single & two smtant, and
- * add cmd for adjust truth table
- */
-#define ANTDIV_VERSION "3.9"
-
-/* 1 ============================================================
- * 1 Definition
- * 1 ============================================================
- */
-
-#define ANTDIV_INIT 0xff
-#define MAIN_ANT 1 /*ant A or ant Main or S1*/
-#define AUX_ANT 2 /*AntB or ant Aux or S0*/
-#define MAX_ANT 3 /* 3 for AP using*/
-
-#define ANT1_2G 0 /* = ANT2_5G for 8723D BTG S1 RX S0S1 diversity for 8723D,
- * TX fixed at S1
- */
-#define ANT2_2G 1 /* = ANT1_5G for 8723D BTG S0 RX S0S1 diversity for 8723D,
- * TX fixed at S1
- */
-/*smart antenna*/
-#define SUPPORT_RF_PATH_NUM 4
-#define SUPPORT_BEAM_PATTERN_NUM 4
-#define NUM_ANTENNA_8821A 2
-
-#define SUPPORT_BEAM_SET_PATTERN_NUM 8
-
-#define NO_FIX_TX_ANT 0
-#define FIX_TX_AT_MAIN 1
-#define FIX_AUX_AT_MAIN 2
-
-/* Antenna Diversty Control type */
-#define ODM_AUTO_ANT 0
-#define ODM_FIX_MAIN_ANT 1
-#define ODM_FIX_AUX_ANT 2
-
-#define ODM_N_ANTDIV_SUPPORT \
- (ODM_RTL8188E | ODM_RTL8192E | ODM_RTL8723B | ODM_RTL8188F | \
- ODM_RTL8723D | ODM_RTL8195A)
-#define ODM_AC_ANTDIV_SUPPORT \
- (ODM_RTL8821 | ODM_RTL8881A | ODM_RTL8812 | ODM_RTL8821C | \
- ODM_RTL8822B | ODM_RTL8814B)
-#define ODM_ANTDIV_SUPPORT (ODM_N_ANTDIV_SUPPORT | ODM_AC_ANTDIV_SUPPORT)
-#define ODM_SMART_ANT_SUPPORT (ODM_RTL8188E | ODM_RTL8192E)
-#define ODM_HL_SMART_ANT_TYPE1_SUPPORT (ODM_RTL8821 | ODM_RTL8822B)
-
-#define ODM_ANTDIV_2G_SUPPORT_IC \
- (ODM_RTL8188E | ODM_RTL8192E | ODM_RTL8723B | ODM_RTL8881A | \
- ODM_RTL8188F | ODM_RTL8723D)
-#define ODM_ANTDIV_5G_SUPPORT_IC \
- (ODM_RTL8821 | ODM_RTL8881A | ODM_RTL8812 | ODM_RTL8821C)
-
-#define ODM_EVM_ENHANCE_ANTDIV_SUPPORT_IC (ODM_RTL8192E)
-
-#define ODM_ANTDIV_2G BIT(0)
-#define ODM_ANTDIV_5G BIT(1)
-
-#define ANTDIV_ON 1
-#define ANTDIV_OFF 0
-
-#define FAT_ON 1
-#define FAT_OFF 0
-
-#define TX_BY_DESC 1
-#define TX_BY_REG 0
-
-#define RSSI_METHOD 0
-#define EVM_METHOD 1
-#define CRC32_METHOD 2
-
-#define INIT_ANTDIV_TIMMER 0
-#define CANCEL_ANTDIV_TIMMER 1
-#define RELEASE_ANTDIV_TIMMER 2
-
-#define CRC32_FAIL 1
-#define CRC32_OK 0
-
-#define evm_rssi_th_high 25
-#define evm_rssi_th_low 20
-
-#define NORMAL_STATE_MIAN 1
-#define NORMAL_STATE_AUX 2
-#define TRAINING_STATE 3
-
-#define FORCE_RSSI_DIFF 10
-
-#define CSI_ON 1
-#define CSI_OFF 0
-
-#define DIVON_CSIOFF 1
-#define DIVOFF_CSION 2
-
-#define BDC_DIV_TRAIN_STATE 0
-#define bdc_bfer_train_state 1
-#define BDC_DECISION_STATE 2
-#define BDC_BF_HOLD_STATE 3
-#define BDC_DIV_HOLD_STATE 4
-
-#define BDC_MODE_1 1
-#define BDC_MODE_2 2
-#define BDC_MODE_3 3
-#define BDC_MODE_4 4
-#define BDC_MODE_NULL 0xff
-
-/*SW S0S1 antenna diversity*/
-#define SWAW_STEP_INIT 0xff
-#define SWAW_STEP_PEEK 0
-#define SWAW_STEP_DETERMINE 1
-
-#define RSSI_CHECK_RESET_PERIOD 10
-#define RSSI_CHECK_THRESHOLD 50
-
-/*Hong Lin Smart antenna*/
-#define HL_SMTANT_2WIRE_DATA_LEN 24
-
-/* 1 ============================================================
- * 1 structure
- * 1 ============================================================
- */
-
-struct sw_antenna_switch {
- u8 double_chk_flag; /*If current antenna RSSI > "RSSI_CHECK_THRESHOLD",
- *than check this antenna again
- */
- u8 try_flag;
- s32 pre_rssi;
- u8 cur_antenna;
- u8 pre_antenna;
- u8 rssi_trying;
- u8 reset_idx;
- u8 train_time;
- u8 train_time_flag; /*base on RSSI difference between two antennas*/
- struct timer_list phydm_sw_antenna_switch_timer;
- u32 pkt_cnt_sw_ant_div_by_ctrl_frame;
- bool is_sw_ant_div_by_ctrl_frame;
-
- /* AntDect (Before link Antenna Switch check) need to be moved*/
- u16 single_ant_counter;
- u16 dual_ant_counter;
- u16 aux_fail_detec_counter;
- u16 retry_counter;
- u8 swas_no_link_state;
- u32 swas_no_link_bk_reg948;
- bool ANTA_ON; /*To indicate ant A is or not*/
- bool ANTB_ON; /*To indicate ant B is on or not*/
- bool pre_aux_fail_detec;
- bool rssi_ant_dect_result;
- u8 ant_5g;
- u8 ant_2g;
-};
-
-struct fast_antenna_training {
- u8 bssid[6];
- u8 antsel_rx_keep_0;
- u8 antsel_rx_keep_1;
- u8 antsel_rx_keep_2;
- u8 antsel_rx_keep_3;
- u32 ant_sum_rssi[7];
- u32 ant_rssi_cnt[7];
- u32 ant_ave_rssi[7];
- u8 fat_state;
- u32 train_idx;
- u8 antsel_a[ODM_ASSOCIATE_ENTRY_NUM];
- u8 antsel_b[ODM_ASSOCIATE_ENTRY_NUM];
- u8 antsel_c[ODM_ASSOCIATE_ENTRY_NUM];
- u16 main_ant_sum[ODM_ASSOCIATE_ENTRY_NUM];
- u16 aux_ant_sum[ODM_ASSOCIATE_ENTRY_NUM];
- u16 main_ant_cnt[ODM_ASSOCIATE_ENTRY_NUM];
- u16 aux_ant_cnt[ODM_ASSOCIATE_ENTRY_NUM];
- u16 main_ant_sum_cck[ODM_ASSOCIATE_ENTRY_NUM];
- u16 aux_ant_sum_cck[ODM_ASSOCIATE_ENTRY_NUM];
- u16 main_ant_cnt_cck[ODM_ASSOCIATE_ENTRY_NUM];
- u16 aux_ant_cnt_cck[ODM_ASSOCIATE_ENTRY_NUM];
- u8 rx_idle_ant;
- u8 ant_div_on_off;
- bool is_become_linked;
- u32 min_max_rssi;
- u8 idx_ant_div_counter_2g;
- u8 idx_ant_div_counter_5g;
- u8 ant_div_2g_5g;
-
- u32 cck_ctrl_frame_cnt_main;
- u32 cck_ctrl_frame_cnt_aux;
- u32 ofdm_ctrl_frame_cnt_main;
- u32 ofdm_ctrl_frame_cnt_aux;
- u32 main_ant_ctrl_frame_sum;
- u32 aux_ant_ctrl_frame_sum;
- u32 main_ant_ctrl_frame_cnt;
- u32 aux_ant_ctrl_frame_cnt;
- u8 b_fix_tx_ant;
- bool fix_ant_bfee;
- bool enable_ctrl_frame_antdiv;
- bool use_ctrl_frame_antdiv;
- u8 hw_antsw_occur;
- u8 *p_force_tx_ant_by_desc;
- u8 force_tx_ant_by_desc; /*A temp value, will hook to driver team's
- *outer parameter later
- */
- u8 *p_default_s0_s1;
- u8 default_s0_s1;
-};
-
-/* 1 ============================================================
- * 1 enumeration
- * 1 ============================================================
- */
-
-/*Fast antenna training*/
-enum fat_state {
- FAT_BEFORE_LINK_STATE = 0,
- FAT_PREPARE_STATE = 1,
- FAT_TRAINING_STATE = 2,
- FAT_DECISION_STATE = 3
-};
-
-enum ant_div_type {
- NO_ANTDIV = 0xFF,
- CG_TRX_HW_ANTDIV = 0x01,
- CGCS_RX_HW_ANTDIV = 0x02,
- FIXED_HW_ANTDIV = 0x03,
- CG_TRX_SMART_ANTDIV = 0x04,
- CGCS_RX_SW_ANTDIV = 0x05,
- /*8723B intrnal switch S0 S1*/
- S0S1_SW_ANTDIV = 0x06,
- /*TRX S0S1 diversity for 8723D*/
- S0S1_TRX_HW_ANTDIV = 0x07,
- /*Hong-Lin Smart antenna use for 8821AE which is a 2 ant. entitys, and
- *each ant. is equipped with 4 antenna patterns
- */
- HL_SW_SMART_ANT_TYPE1 = 0x10,
- /*Hong-Bo Smart antenna use for 8822B which is a 2 ant. entitys*/
- HL_SW_SMART_ANT_TYPE2 = 0x11,
-};
-
-/* 1 ============================================================
- * 1 function prototype
- * 1 ============================================================
- */
-
-void odm_stop_antenna_switch_dm(void *dm_void);
-
-void phydm_enable_antenna_diversity(void *dm_void);
-
-void odm_set_ant_config(void *dm_void, u8 ant_setting /* 0=A, 1=B, 2=C, .... */
- );
-
-#define sw_ant_div_rest_after_link odm_sw_ant_div_rest_after_link
-
-void odm_sw_ant_div_rest_after_link(void *dm_void);
-
-void odm_ant_div_reset(void *dm_void);
-
-void odm_antenna_diversity_init(void *dm_void);
-
-void odm_antenna_diversity(void *dm_void);
-
-#endif /*#ifndef __ODMANTDIV_H__*/
diff --git a/drivers/staging/rtlwifi/phydm/phydm_beamforming.h b/drivers/staging/rtlwifi/phydm/phydm_beamforming.h
deleted file mode 100644
index a0bcdb620698..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_beamforming.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __INC_PHYDM_BEAMFORMING_H
-#define __INC_PHYDM_BEAMFORMING_H
-
-/*Beamforming Related*/
-#include "txbf/halcomtxbf.h"
-#include "txbf/haltxbfjaguar.h"
-#include "txbf/haltxbf8822b.h"
-#include "txbf/haltxbfinterface.h"
-
-#define beamforming_gid_paid(adapter, tcb)
-#define phydm_acting_determine(dm, type) false
-#define beamforming_enter(dm, sta_idx)
-#define beamforming_leave(dm, RA)
-#define beamforming_end_fw(dm)
-#define beamforming_control_v1(dm, RA, AID, mode, BW, rate) true
-#define beamforming_control_v2(dm, idx, mode, BW, period) true
-#define phydm_beamforming_end_sw(dm, _status)
-#define beamforming_timer_callback(dm)
-#define phydm_beamforming_init(dm)
-#define phydm_beamforming_control_v2(dm, _idx, _mode, _BW, _period) false
-#define beamforming_watchdog(dm)
-#define phydm_beamforming_watchdog(dm)
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_ccx.c b/drivers/staging/rtlwifi/phydm/phydm_ccx.c
deleted file mode 100644
index 5e01ea8f4b68..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_ccx.c
+++ /dev/null
@@ -1,447 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-/*Set NHM period, threshold, disable ignore cca or not,
- *disable ignore txon or not
- */
-void phydm_nhm_setting(void *dm_void, u8 nhm_setting)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ccx_info *ccx_info = &dm->dm_ccx_info;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- if (nhm_setting == SET_NHM_SETTING) {
- /*Set inexclude_cca, inexclude_txon*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(9),
- ccx_info->nhm_inexclude_cca);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(10),
- ccx_info->nhm_inexclude_txon);
-
- /*Set NHM period*/
- odm_set_bb_reg(dm, ODM_REG_CCX_PERIOD_11AC, MASKHWORD,
- ccx_info->NHM_period);
-
- /*Set NHM threshold*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11AC,
- MASKBYTE0, ccx_info->NHM_th[0]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11AC,
- MASKBYTE1, ccx_info->NHM_th[1]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11AC,
- MASKBYTE2, ccx_info->NHM_th[2]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11AC,
- MASKBYTE3, ccx_info->NHM_th[3]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11AC,
- MASKBYTE0, ccx_info->NHM_th[4]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11AC,
- MASKBYTE1, ccx_info->NHM_th[5]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11AC,
- MASKBYTE2, ccx_info->NHM_th[6]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11AC,
- MASKBYTE3, ccx_info->NHM_th[7]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH8_11AC, MASKBYTE0,
- ccx_info->NHM_th[8]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, MASKBYTE2,
- ccx_info->NHM_th[9]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, MASKBYTE3,
- ccx_info->NHM_th[10]);
-
- /*CCX EN*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(8),
- CCX_EN);
- } else if (nhm_setting == STORE_NHM_SETTING) {
- /*Store prev. disable_ignore_cca, disable_ignore_txon*/
- ccx_info->NHM_inexclude_cca_restore =
- (enum nhm_inexclude_cca)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(9));
- ccx_info->NHM_inexclude_txon_restore =
- (enum nhm_inexclude_txon)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(10));
-
- /*Store pervious NHM period*/
- ccx_info->NHM_period_restore = (u16)odm_get_bb_reg(
- dm, ODM_REG_CCX_PERIOD_11AC, MASKHWORD);
-
- /*Store NHM threshold*/
- ccx_info->NHM_th_restore[0] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH3_TO_TH0_11AC, MASKBYTE0);
- ccx_info->NHM_th_restore[1] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH3_TO_TH0_11AC, MASKBYTE1);
- ccx_info->NHM_th_restore[2] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH3_TO_TH0_11AC, MASKBYTE2);
- ccx_info->NHM_th_restore[3] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH3_TO_TH0_11AC, MASKBYTE3);
- ccx_info->NHM_th_restore[4] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH7_TO_TH4_11AC, MASKBYTE0);
- ccx_info->NHM_th_restore[5] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH7_TO_TH4_11AC, MASKBYTE1);
- ccx_info->NHM_th_restore[6] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH7_TO_TH4_11AC, MASKBYTE2);
- ccx_info->NHM_th_restore[7] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH7_TO_TH4_11AC, MASKBYTE3);
- ccx_info->NHM_th_restore[8] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH8_11AC, MASKBYTE0);
- ccx_info->NHM_th_restore[9] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH9_TH10_11AC, MASKBYTE2);
- ccx_info->NHM_th_restore[10] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH9_TH10_11AC, MASKBYTE3);
- } else if (nhm_setting == RESTORE_NHM_SETTING) {
- /*Set disable_ignore_cca, disable_ignore_txon*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(9),
- ccx_info->NHM_inexclude_cca_restore);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(10),
- ccx_info->NHM_inexclude_txon_restore);
-
- /*Set NHM period*/
- odm_set_bb_reg(dm, ODM_REG_CCX_PERIOD_11AC, MASKHWORD,
- ccx_info->NHM_period);
-
- /*Set NHM threshold*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11AC,
- MASKBYTE0, ccx_info->NHM_th_restore[0]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11AC,
- MASKBYTE1, ccx_info->NHM_th_restore[1]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11AC,
- MASKBYTE2, ccx_info->NHM_th_restore[2]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11AC,
- MASKBYTE3, ccx_info->NHM_th_restore[3]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11AC,
- MASKBYTE0, ccx_info->NHM_th_restore[4]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11AC,
- MASKBYTE1, ccx_info->NHM_th_restore[5]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11AC,
- MASKBYTE2, ccx_info->NHM_th_restore[6]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11AC,
- MASKBYTE3, ccx_info->NHM_th_restore[7]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH8_11AC, MASKBYTE0,
- ccx_info->NHM_th_restore[8]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, MASKBYTE2,
- ccx_info->NHM_th_restore[9]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, MASKBYTE3,
- ccx_info->NHM_th_restore[10]);
- } else {
- return;
- }
- }
-
- else if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- if (nhm_setting == SET_NHM_SETTING) {
- /*Set disable_ignore_cca, disable_ignore_txon*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, BIT(9),
- ccx_info->nhm_inexclude_cca);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, BIT(10),
- ccx_info->nhm_inexclude_txon);
-
- /*Set NHM period*/
- odm_set_bb_reg(dm, ODM_REG_CCX_PERIOD_11N, MASKHWORD,
- ccx_info->NHM_period);
-
- /*Set NHM threshold*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11N,
- MASKBYTE0, ccx_info->NHM_th[0]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11N,
- MASKBYTE1, ccx_info->NHM_th[1]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11N,
- MASKBYTE2, ccx_info->NHM_th[2]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11N,
- MASKBYTE3, ccx_info->NHM_th[3]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11N,
- MASKBYTE0, ccx_info->NHM_th[4]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11N,
- MASKBYTE1, ccx_info->NHM_th[5]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11N,
- MASKBYTE2, ccx_info->NHM_th[6]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11N,
- MASKBYTE3, ccx_info->NHM_th[7]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH8_11N, MASKBYTE0,
- ccx_info->NHM_th[8]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, MASKBYTE2,
- ccx_info->NHM_th[9]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, MASKBYTE3,
- ccx_info->NHM_th[10]);
-
- /*CCX EN*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, BIT(8),
- CCX_EN);
- } else if (nhm_setting == STORE_NHM_SETTING) {
- /*Store prev. disable_ignore_cca, disable_ignore_txon*/
- ccx_info->NHM_inexclude_cca_restore =
- (enum nhm_inexclude_cca)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH9_TH10_11N, BIT(9));
- ccx_info->NHM_inexclude_txon_restore =
- (enum nhm_inexclude_txon)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH9_TH10_11N, BIT(10));
-
- /*Store pervious NHM period*/
- ccx_info->NHM_period_restore = (u16)odm_get_bb_reg(
- dm, ODM_REG_CCX_PERIOD_11N, MASKHWORD);
-
- /*Store NHM threshold*/
- ccx_info->NHM_th_restore[0] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH3_TO_TH0_11N, MASKBYTE0);
- ccx_info->NHM_th_restore[1] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH3_TO_TH0_11N, MASKBYTE1);
- ccx_info->NHM_th_restore[2] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH3_TO_TH0_11N, MASKBYTE2);
- ccx_info->NHM_th_restore[3] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH3_TO_TH0_11N, MASKBYTE3);
- ccx_info->NHM_th_restore[4] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH7_TO_TH4_11N, MASKBYTE0);
- ccx_info->NHM_th_restore[5] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH7_TO_TH4_11N, MASKBYTE1);
- ccx_info->NHM_th_restore[6] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH7_TO_TH4_11N, MASKBYTE2);
- ccx_info->NHM_th_restore[7] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH7_TO_TH4_11N, MASKBYTE3);
- ccx_info->NHM_th_restore[8] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH8_11N, MASKBYTE0);
- ccx_info->NHM_th_restore[9] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH9_TH10_11N, MASKBYTE2);
- ccx_info->NHM_th_restore[10] = (u8)odm_get_bb_reg(
- dm, ODM_REG_NHM_TH9_TH10_11N, MASKBYTE3);
- } else if (nhm_setting == RESTORE_NHM_SETTING) {
- /*Set disable_ignore_cca, disable_ignore_txon*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, BIT(9),
- ccx_info->NHM_inexclude_cca_restore);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, BIT(10),
- ccx_info->NHM_inexclude_txon_restore);
-
- /*Set NHM period*/
- odm_set_bb_reg(dm, ODM_REG_CCX_PERIOD_11N, MASKHWORD,
- ccx_info->NHM_period_restore);
-
- /*Set NHM threshold*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11N,
- MASKBYTE0, ccx_info->NHM_th_restore[0]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11N,
- MASKBYTE1, ccx_info->NHM_th_restore[1]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11N,
- MASKBYTE2, ccx_info->NHM_th_restore[2]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH3_TO_TH0_11N,
- MASKBYTE3, ccx_info->NHM_th_restore[3]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11N,
- MASKBYTE0, ccx_info->NHM_th_restore[4]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11N,
- MASKBYTE1, ccx_info->NHM_th_restore[5]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11N,
- MASKBYTE2, ccx_info->NHM_th_restore[6]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH7_TO_TH4_11N,
- MASKBYTE3, ccx_info->NHM_th_restore[7]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH8_11N, MASKBYTE0,
- ccx_info->NHM_th_restore[8]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, MASKBYTE2,
- ccx_info->NHM_th_restore[9]);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, MASKBYTE3,
- ccx_info->NHM_th_restore[10]);
- } else {
- return;
- }
- }
-}
-
-void phydm_nhm_trigger(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- /*Trigger NHM*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(1), 0);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11AC, BIT(1), 1);
- } else if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- /*Trigger NHM*/
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, BIT(1), 0);
- odm_set_bb_reg(dm, ODM_REG_NHM_TH9_TH10_11N, BIT(1), 1);
- }
-}
-
-void phydm_get_nhm_result(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 value32;
- struct ccx_info *ccx_info = &dm->dm_ccx_info;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- value32 = odm_read_4byte(dm, ODM_REG_NHM_CNT_11AC);
- ccx_info->NHM_result[0] = (u8)(value32 & MASKBYTE0);
- ccx_info->NHM_result[1] = (u8)((value32 & MASKBYTE1) >> 8);
- ccx_info->NHM_result[2] = (u8)((value32 & MASKBYTE2) >> 16);
- ccx_info->NHM_result[3] = (u8)((value32 & MASKBYTE3) >> 24);
-
- value32 = odm_read_4byte(dm, ODM_REG_NHM_CNT7_TO_CNT4_11AC);
- ccx_info->NHM_result[4] = (u8)(value32 & MASKBYTE0);
- ccx_info->NHM_result[5] = (u8)((value32 & MASKBYTE1) >> 8);
- ccx_info->NHM_result[6] = (u8)((value32 & MASKBYTE2) >> 16);
- ccx_info->NHM_result[7] = (u8)((value32 & MASKBYTE3) >> 24);
-
- value32 = odm_read_4byte(dm, ODM_REG_NHM_CNT11_TO_CNT8_11AC);
- ccx_info->NHM_result[8] = (u8)(value32 & MASKBYTE0);
- ccx_info->NHM_result[9] = (u8)((value32 & MASKBYTE1) >> 8);
- ccx_info->NHM_result[10] = (u8)((value32 & MASKBYTE2) >> 16);
- ccx_info->NHM_result[11] = (u8)((value32 & MASKBYTE3) >> 24);
-
- /*Get NHM duration*/
- value32 = odm_read_4byte(dm, ODM_REG_NHM_DUR_READY_11AC);
- ccx_info->NHM_duration = (u16)(value32 & MASKLWORD);
- }
-
- else if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- value32 = odm_read_4byte(dm, ODM_REG_NHM_CNT_11N);
- ccx_info->NHM_result[0] = (u8)(value32 & MASKBYTE0);
- ccx_info->NHM_result[1] = (u8)((value32 & MASKBYTE1) >> 8);
- ccx_info->NHM_result[2] = (u8)((value32 & MASKBYTE2) >> 16);
- ccx_info->NHM_result[3] = (u8)((value32 & MASKBYTE3) >> 24);
-
- value32 = odm_read_4byte(dm, ODM_REG_NHM_CNT7_TO_CNT4_11N);
- ccx_info->NHM_result[4] = (u8)(value32 & MASKBYTE0);
- ccx_info->NHM_result[5] = (u8)((value32 & MASKBYTE1) >> 8);
- ccx_info->NHM_result[6] = (u8)((value32 & MASKBYTE2) >> 16);
- ccx_info->NHM_result[7] = (u8)((value32 & MASKBYTE3) >> 24);
-
- value32 = odm_read_4byte(dm, ODM_REG_NHM_CNT9_TO_CNT8_11N);
- ccx_info->NHM_result[8] = (u8)((value32 & MASKBYTE2) >> 16);
- ccx_info->NHM_result[9] = (u8)((value32 & MASKBYTE3) >> 24);
-
- value32 = odm_read_4byte(dm, ODM_REG_NHM_CNT10_TO_CNT11_11N);
- ccx_info->NHM_result[10] = (u8)((value32 & MASKBYTE2) >> 16);
- ccx_info->NHM_result[11] = (u8)((value32 & MASKBYTE3) >> 24);
-
- /* Get NHM duration */
- value32 = odm_read_4byte(dm, ODM_REG_NHM_CNT10_TO_CNT11_11N);
- ccx_info->NHM_duration = (u16)(value32 & MASKLWORD);
- }
-}
-
-bool phydm_check_nhm_ready(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 value32 = 0;
- u8 i;
- bool ret = false;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- value32 = odm_get_bb_reg(dm,
- ODM_REG_CLM_RESULT_11AC,
- MASKDWORD);
-
- for (i = 0; i < 200; i++) {
- ODM_delay_ms(1);
- if (odm_get_bb_reg(dm, ODM_REG_NHM_DUR_READY_11AC,
- BIT(17))) {
- ret = true;
- break;
- }
- }
- }
-
- else if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- value32 = odm_get_bb_reg(dm, ODM_REG_CLM_READY_11N, MASKDWORD);
-
- for (i = 0; i < 200; i++) {
- ODM_delay_ms(1);
- if (odm_get_bb_reg(dm, ODM_REG_NHM_DUR_READY_11AC,
- BIT(17))) {
- ret = true;
- break;
- }
- }
- }
- return ret;
-}
-
-void phydm_clm_setting(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ccx_info *ccx_info = &dm->dm_ccx_info;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_CCX_PERIOD_11AC, MASKLWORD,
- ccx_info->CLM_period); /*4us sample 1 time*/
- odm_set_bb_reg(dm, ODM_REG_CLM_11AC, BIT(8),
- 0x1); /*Enable CCX for CLM*/
-
- } else if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_CCX_PERIOD_11N, MASKLWORD,
- ccx_info->CLM_period); /*4us sample 1 time*/
- odm_set_bb_reg(dm, ODM_REG_CLM_11N, BIT(8),
- 0x1); /*Enable CCX for CLM*/
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_CCX, "[%s] : CLM period = %dus\n", __func__,
- ccx_info->CLM_period * 4);
-}
-
-void phydm_clm_trigger(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_CLM_11AC, BIT(0),
- 0x0); /*Trigger CLM*/
- odm_set_bb_reg(dm, ODM_REG_CLM_11AC, BIT(0), 0x1);
- } else if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- odm_set_bb_reg(dm, ODM_REG_CLM_11N, BIT(0),
- 0x0); /*Trigger CLM*/
- odm_set_bb_reg(dm, ODM_REG_CLM_11N, BIT(0), 0x1);
- }
-}
-
-bool phydm_check_cl_mready(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 value32 = 0;
- bool ret = false;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- value32 = odm_get_bb_reg(
- dm, ODM_REG_CLM_RESULT_11AC,
- MASKDWORD); /*make sure CLM calc is ready*/
- else if (dm->support_ic_type & ODM_IC_11N_SERIES)
- value32 = odm_get_bb_reg(
- dm, ODM_REG_CLM_READY_11N,
- MASKDWORD); /*make sure CLM calc is ready*/
-
- if ((dm->support_ic_type & ODM_IC_11AC_SERIES) && (value32 & BIT(16)))
- ret = true;
- else if ((dm->support_ic_type & ODM_IC_11N_SERIES) &&
- (value32 & BIT(16)))
- ret = true;
- else
- ret = false;
-
- ODM_RT_TRACE(dm, ODM_COMP_CCX, "[%s] : CLM ready = %d\n", __func__,
- ret);
-
- return ret;
-}
-
-void phydm_get_cl_mresult(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ccx_info *ccx_info = &dm->dm_ccx_info;
-
- u32 value32 = 0;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- value32 = odm_get_bb_reg(dm, ODM_REG_CLM_RESULT_11AC,
- MASKDWORD); /*read CLM calc result*/
- else if (dm->support_ic_type & ODM_IC_11N_SERIES)
- value32 = odm_get_bb_reg(dm, ODM_REG_CLM_RESULT_11N,
- MASKDWORD); /*read CLM calc result*/
-
- ccx_info->CLM_result = (u16)(value32 & MASKLWORD);
-
- ODM_RT_TRACE(dm, ODM_COMP_CCX, "[%s] : CLM result = %dus\n", __func__,
- ccx_info->CLM_result * 4);
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_ccx.h b/drivers/staging/rtlwifi/phydm/phydm_ccx.h
deleted file mode 100644
index b3e3e0bae582..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_ccx.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __PHYDMCCX_H__
-#define __PHYDMCCX_H__
-
-#define CCX_EN 1
-
-#define SET_NHM_SETTING 0
-#define STORE_NHM_SETTING 1
-#define RESTORE_NHM_SETTING 2
-
-enum nhm_inexclude_cca { NHM_EXCLUDE_CCA, NHM_INCLUDE_CCA };
-
-enum nhm_inexclude_txon { NHM_EXCLUDE_TXON, NHM_INCLUDE_TXON };
-
-struct ccx_info {
- /*Settings*/
- u8 NHM_th[11];
- u16 NHM_period; /* 4us per unit */
- u16 CLM_period; /* 4us per unit */
- enum nhm_inexclude_txon nhm_inexclude_txon;
- enum nhm_inexclude_cca nhm_inexclude_cca;
-
- /*Previous Settings*/
- u8 NHM_th_restore[11];
- u16 NHM_period_restore; /* 4us per unit */
- u16 CLM_period_restore; /* 4us per unit */
- enum nhm_inexclude_txon NHM_inexclude_txon_restore;
- enum nhm_inexclude_cca NHM_inexclude_cca_restore;
-
- /*Report*/
- u8 NHM_result[12];
- u16 NHM_duration;
- u16 CLM_result;
-
- bool echo_NHM_en;
- bool echo_CLM_en;
- u8 echo_IGI;
-};
-
-/*NHM*/
-
-void phydm_nhm_setting(void *dm_void, u8 nhm_setting);
-
-void phydm_nhm_trigger(void *dm_void);
-
-void phydm_get_nhm_result(void *dm_void);
-
-bool phydm_check_nhm_ready(void *dm_void);
-
-/*CLM*/
-
-void phydm_clm_setting(void *dm_void);
-
-void phydm_clm_trigger(void *dm_void);
-
-bool phydm_check_cl_mready(void *dm_void);
-
-void phydm_get_cl_mresult(void *dm_void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_cfotracking.c b/drivers/staging/rtlwifi/phydm/phydm_cfotracking.c
deleted file mode 100644
index cf35601efe94..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_cfotracking.c
+++ /dev/null
@@ -1,332 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-static void odm_set_crystal_cap(void *dm_void, u8 crystal_cap)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct cfo_tracking *cfo_track =
- (struct cfo_tracking *)phydm_get_structure(dm, PHYDM_CFOTRACK);
-
- if (cfo_track->crystal_cap == crystal_cap)
- return;
-
- cfo_track->crystal_cap = crystal_cap;
-
- if (dm->support_ic_type & (ODM_RTL8188E | ODM_RTL8188F)) {
- /* write 0x24[22:17] = 0x24[16:11] = crystal_cap */
- crystal_cap = crystal_cap & 0x3F;
- odm_set_bb_reg(dm, REG_AFE_XTAL_CTRL, 0x007ff800,
- (crystal_cap | (crystal_cap << 6)));
- } else if (dm->support_ic_type & ODM_RTL8812) {
- /* write 0x2C[30:25] = 0x2C[24:19] = crystal_cap */
- crystal_cap = crystal_cap & 0x3F;
- odm_set_bb_reg(dm, REG_MAC_PHY_CTRL, 0x7FF80000,
- (crystal_cap | (crystal_cap << 6)));
- } else if ((dm->support_ic_type & (ODM_RTL8703B | ODM_RTL8723B |
- ODM_RTL8192E | ODM_RTL8821))) {
- /* 0x2C[23:18] = 0x2C[17:12] = crystal_cap */
- crystal_cap = crystal_cap & 0x3F;
- odm_set_bb_reg(dm, REG_MAC_PHY_CTRL, 0x00FFF000,
- (crystal_cap | (crystal_cap << 6)));
- } else if (dm->support_ic_type & ODM_RTL8814A) {
- /* write 0x2C[26:21] = 0x2C[20:15] = crystal_cap */
- crystal_cap = crystal_cap & 0x3F;
- odm_set_bb_reg(dm, REG_MAC_PHY_CTRL, 0x07FF8000,
- (crystal_cap | (crystal_cap << 6)));
- } else if (dm->support_ic_type & (ODM_RTL8822B | ODM_RTL8821C)) {
- /* write 0x24[30:25] = 0x28[6:1] = crystal_cap */
- crystal_cap = crystal_cap & 0x3F;
- odm_set_bb_reg(dm, REG_AFE_XTAL_CTRL, 0x7e000000, crystal_cap);
- odm_set_bb_reg(dm, REG_AFE_PLL_CTRL, 0x7e, crystal_cap);
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "%s(): Use default setting.\n", __func__);
- odm_set_bb_reg(dm, REG_MAC_PHY_CTRL, 0xFFF000,
- (crystal_cap | (crystal_cap << 6)));
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING, "%s(): crystal_cap = 0x%x\n",
- __func__, crystal_cap);
-
- /* JJ modified 20161115 */
-}
-
-static u8 odm_get_default_crytaltal_cap(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 crystal_cap = 0x20;
-
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
-
- crystal_cap = rtlefuse->crystalcap;
-
- crystal_cap = crystal_cap & 0x3f;
-
- return crystal_cap;
-}
-
-static void odm_set_atc_status(void *dm_void, bool atc_status)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct cfo_tracking *cfo_track =
- (struct cfo_tracking *)phydm_get_structure(dm, PHYDM_CFOTRACK);
-
- if (cfo_track->is_atc_status == atc_status)
- return;
-
- odm_set_bb_reg(dm, ODM_REG(BB_ATC, dm), ODM_BIT(BB_ATC, dm),
- atc_status);
- cfo_track->is_atc_status = atc_status;
-}
-
-static bool odm_get_atc_status(void *dm_void)
-{
- bool atc_status;
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- atc_status = (bool)odm_get_bb_reg(dm, ODM_REG(BB_ATC, dm),
- ODM_BIT(BB_ATC, dm));
- return atc_status;
-}
-
-void odm_cfo_tracking_reset(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct cfo_tracking *cfo_track =
- (struct cfo_tracking *)phydm_get_structure(dm, PHYDM_CFOTRACK);
-
- cfo_track->def_x_cap = odm_get_default_crytaltal_cap(dm);
- cfo_track->is_adjust = true;
-
- if (cfo_track->crystal_cap > cfo_track->def_x_cap) {
- odm_set_crystal_cap(dm, cfo_track->crystal_cap - 1);
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "%s(): approch default value (0x%x)\n", __func__,
- cfo_track->crystal_cap);
- } else if (cfo_track->crystal_cap < cfo_track->def_x_cap) {
- odm_set_crystal_cap(dm, cfo_track->crystal_cap + 1);
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "%s(): approch default value (0x%x)\n", __func__,
- cfo_track->crystal_cap);
- }
-
- odm_set_atc_status(dm, true);
-}
-
-void odm_cfo_tracking_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct cfo_tracking *cfo_track =
- (struct cfo_tracking *)phydm_get_structure(dm, PHYDM_CFOTRACK);
-
- cfo_track->crystal_cap = odm_get_default_crytaltal_cap(dm);
- cfo_track->def_x_cap = cfo_track->crystal_cap;
- cfo_track->is_atc_status = odm_get_atc_status(dm);
- cfo_track->is_adjust = true;
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING, "%s()=========>\n", __func__);
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "%s(): is_atc_status = %d, crystal_cap = 0x%x\n", __func__,
- cfo_track->is_atc_status, cfo_track->def_x_cap);
-
- /* Crystal cap. control by WiFi */
- if (dm->support_ic_type & ODM_RTL8822B)
- odm_set_bb_reg(dm, 0x10, 0x40, 0x1);
-}
-
-void odm_cfo_tracking(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct cfo_tracking *cfo_track =
- (struct cfo_tracking *)phydm_get_structure(dm, PHYDM_CFOTRACK);
- s32 cfo_ave = 0;
- u32 cfo_rpt_sum, cfo_khz_avg[4] = {0};
- s32 cfo_ave_diff;
- s8 crystal_cap = cfo_track->crystal_cap;
- u8 adjust_xtal = 1, i, valid_path_cnt = 0;
-
- /* 4 Support ability */
- if (!(dm->support_ability & ODM_BB_CFO_TRACKING)) {
- ODM_RT_TRACE(
- dm, ODM_COMP_CFO_TRACKING,
- "%s(): Return: support_ability ODM_BB_CFO_TRACKING is disabled\n",
- __func__);
- return;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING, "%s()=========>\n", __func__);
-
- if (!dm->is_linked || !dm->is_one_entry_only) {
- /* 4 No link or more than one entry */
- odm_cfo_tracking_reset(dm);
- ODM_RT_TRACE(
- dm, ODM_COMP_CFO_TRACKING,
- "%s(): Reset: is_linked = %d, is_one_entry_only = %d\n",
- __func__, dm->is_linked, dm->is_one_entry_only);
- } else {
- /* 3 1. CFO Tracking */
- /* 4 1.1 No new packet */
- if (cfo_track->packet_count == cfo_track->packet_count_pre) {
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "%s(): packet counter doesn't change\n",
- __func__);
- return;
- }
- cfo_track->packet_count_pre = cfo_track->packet_count;
-
- /* 4 1.2 Calculate CFO */
- for (i = 0; i < dm->num_rf_path; i++) {
- if (cfo_track->CFO_cnt[i] == 0)
- continue;
-
- valid_path_cnt++;
- cfo_rpt_sum =
- (u32)((cfo_track->CFO_tail[i] < 0) ?
- (0 - cfo_track->CFO_tail[i]) :
- cfo_track->CFO_tail[i]);
- cfo_khz_avg[i] = CFO_HW_RPT_2_MHZ(cfo_rpt_sum) /
- cfo_track->CFO_cnt[i];
-
- ODM_RT_TRACE(
- dm, ODM_COMP_CFO_TRACKING,
- "[path %d] cfo_rpt_sum = (( %d )), CFO_cnt = (( %d )) , CFO_avg= (( %s%d )) kHz\n",
- i, cfo_rpt_sum, cfo_track->CFO_cnt[i],
- ((cfo_track->CFO_tail[i] < 0) ? "-" : " "),
- cfo_khz_avg[i]);
- }
-
- for (i = 0; i < valid_path_cnt; i++) {
- if (cfo_track->CFO_tail[i] < 0) {
- /* */
- cfo_ave += (0 - (s32)cfo_khz_avg[i]);
- } else {
- cfo_ave += (s32)cfo_khz_avg[i];
- }
- }
-
- if (valid_path_cnt >= 2)
- cfo_ave = cfo_ave / valid_path_cnt;
-
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "valid_path_cnt = ((%d)), cfo_ave = ((%d kHz))\n",
- valid_path_cnt, cfo_ave);
-
- /*reset counter*/
- for (i = 0; i < dm->num_rf_path; i++) {
- cfo_track->CFO_tail[i] = 0;
- cfo_track->CFO_cnt[i] = 0;
- }
-
- /* 4 1.3 Avoid abnormal large CFO */
- cfo_ave_diff = (cfo_track->CFO_ave_pre >= cfo_ave) ?
- (cfo_track->CFO_ave_pre - cfo_ave) :
- (cfo_ave - cfo_track->CFO_ave_pre);
- if (cfo_ave_diff > 20 && cfo_track->large_cfo_hit == 0 &&
- !cfo_track->is_adjust) {
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "%s(): first large CFO hit\n", __func__);
- cfo_track->large_cfo_hit = 1;
- return;
- }
-
- cfo_track->large_cfo_hit = 0;
- cfo_track->CFO_ave_pre = cfo_ave;
-
- /* 4 1.4 Dynamic Xtal threshold */
- if (!cfo_track->is_adjust) {
- if (cfo_ave > CFO_TH_XTAL_HIGH ||
- cfo_ave < (-CFO_TH_XTAL_HIGH))
- cfo_track->is_adjust = true;
- } else {
- if (cfo_ave < CFO_TH_XTAL_LOW &&
- cfo_ave > (-CFO_TH_XTAL_LOW))
- cfo_track->is_adjust = false;
- }
-
- /* 4 1.5 BT case: Disable CFO tracking */
- if (dm->is_bt_enabled) {
- cfo_track->is_adjust = false;
- odm_set_crystal_cap(dm, cfo_track->def_x_cap);
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "%s(): Disable CFO tracking for BT!!\n",
- __func__);
- }
-
- /* 4 1.7 Adjust Crystal Cap. */
- if (cfo_track->is_adjust) {
- if (cfo_ave > CFO_TH_XTAL_LOW)
- crystal_cap = crystal_cap + adjust_xtal;
- else if (cfo_ave < (-CFO_TH_XTAL_LOW))
- crystal_cap = crystal_cap - adjust_xtal;
-
- if (crystal_cap > 0x3f)
- crystal_cap = 0x3f;
- else if (crystal_cap < 0)
- crystal_cap = 0;
-
- odm_set_crystal_cap(dm, (u8)crystal_cap);
- }
- ODM_RT_TRACE(
- dm, ODM_COMP_CFO_TRACKING,
- "%s(): Crystal cap = 0x%x, Default Crystal cap = 0x%x\n",
- __func__, cfo_track->crystal_cap, cfo_track->def_x_cap);
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- return;
-
- /* 3 2. Dynamic ATC switch */
- if (cfo_ave < CFO_TH_ATC && cfo_ave > -CFO_TH_ATC) {
- odm_set_atc_status(dm, false);
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "%s(): Disable ATC!!\n", __func__);
- } else {
- odm_set_atc_status(dm, true);
- ODM_RT_TRACE(dm, ODM_COMP_CFO_TRACKING,
- "%s(): Enable ATC!!\n", __func__);
- }
- }
-}
-
-void odm_parsing_cfo(void *dm_void, void *pktinfo_void, s8 *pcfotail, u8 num_ss)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_per_pkt_info *pktinfo =
- (struct dm_per_pkt_info *)pktinfo_void;
- struct cfo_tracking *cfo_track =
- (struct cfo_tracking *)phydm_get_structure(dm, PHYDM_CFOTRACK);
- u8 i;
-
- if (!(dm->support_ability & ODM_BB_CFO_TRACKING))
- return;
-
- if (pktinfo->is_packet_match_bssid) {
- if (num_ss > dm->num_rf_path) /*For fool proof*/
- num_ss = dm->num_rf_path;
-
- /* 3 Update CFO report for path-A & path-B */
- /* Only paht-A and path-B have CFO tail and short CFO */
- for (i = 0; i < num_ss; i++) {
- cfo_track->CFO_tail[i] += pcfotail[i];
- cfo_track->CFO_cnt[i]++;
- }
-
- /* 3 Update packet counter */
- if (cfo_track->packet_count == 0xffffffff)
- cfo_track->packet_count = 0;
- else
- cfo_track->packet_count++;
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_cfotracking.h b/drivers/staging/rtlwifi/phydm/phydm_cfotracking.h
deleted file mode 100644
index 1ab015669dea..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_cfotracking.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMCFOTRACK_H__
-#define __PHYDMCFOTRACK_H__
-
-#define CFO_TRACKING_VERSION "1.4" /*2015.10.01 Stanley, Modify for 8822B*/
-
-#define CFO_TH_XTAL_HIGH 20 /* kHz */
-#define CFO_TH_XTAL_LOW 10 /* kHz */
-#define CFO_TH_ATC 80 /* kHz */
-
-struct cfo_tracking {
- bool is_atc_status;
- bool large_cfo_hit;
- bool is_adjust;
- u8 crystal_cap;
- u8 def_x_cap;
- s32 CFO_tail[4];
- u32 CFO_cnt[4];
- s32 CFO_ave_pre;
- u32 packet_count;
- u32 packet_count_pre;
-
- bool is_force_xtal_cap;
- bool is_reset;
-};
-
-void odm_cfo_tracking_reset(void *dm_void);
-
-void odm_cfo_tracking_init(void *dm_void);
-
-void odm_cfo_tracking(void *dm_void);
-
-void odm_parsing_cfo(void *dm_void, void *pktinfo_void, s8 *pcfotail,
- u8 num_ss);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_debug.c b/drivers/staging/rtlwifi/phydm/phydm_debug.c
deleted file mode 100644
index 91f2c054d83b..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_debug.c
+++ /dev/null
@@ -1,2888 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-#include <linux/kernel.h>
-
-bool phydm_api_set_txagc(struct phy_dm_struct *, u32, enum odm_rf_radio_path,
- u8, bool);
-static inline void phydm_check_dmval_txagc(struct phy_dm_struct *dm, u32 used,
- u32 out_len, u32 *const dm_value,
- char *output)
-{
- if ((u8)dm_value[2] != 0xff) {
- if (phydm_api_set_txagc(dm, dm_value[3],
- (enum odm_rf_radio_path)dm_value[1],
- (u8)dm_value[2], true))
- PHYDM_SNPRINTF(output + used, out_len - used,
- " %s%d %s%x%s%x\n", "Write path-",
- dm_value[1], "rate index-0x",
- dm_value[2], " = 0x", dm_value[3]);
- else
- PHYDM_SNPRINTF(output + used, out_len - used,
- " %s%d %s%x%s\n", "Write path-",
- (dm_value[1] & 0x1), "rate index-0x",
- (dm_value[2] & 0x7f), " fail");
- } else {
- u8 i;
- u32 power_index;
- bool status = true;
-
- power_index = (dm_value[3] & 0x3f);
-
- if (dm->support_ic_type & (ODM_RTL8822B | ODM_RTL8821C)) {
- power_index = (power_index << 24) |
- (power_index << 16) | (power_index << 8) |
- (power_index);
- for (i = 0; i < ODM_RATEVHTSS2MCS9; i += 4)
- status = (status &
- phydm_api_set_txagc(
- dm, power_index,
- (enum odm_rf_radio_path)
- dm_value[1],
- i, false));
- } else if (dm->support_ic_type & ODM_RTL8197F) {
- for (i = 0; i <= ODM_RATEMCS15; i++)
- status = (status &
- phydm_api_set_txagc(
- dm, power_index,
- (enum odm_rf_radio_path)
- dm_value[1],
- i, false));
- }
-
- if (status)
- PHYDM_SNPRINTF(output + used, out_len - used,
- " %s%d %s%x\n",
- "Write all TXAGC of path-", dm_value[1],
- " = 0x", dm_value[3]);
- else
- PHYDM_SNPRINTF(output + used, out_len - used,
- " %s%d %s\n",
- "Write all TXAGC of path-", dm_value[1],
- " fail");
- }
-}
-
-static inline void phydm_print_nhm_trigger(char *output, u32 used, u32 out_len,
- struct ccx_info *ccx_info)
-{
- int i;
-
- for (i = 0; i <= 10; i++) {
- if (i == 5)
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "\r\n NHM_th[%d] = 0x%x, echo_IGI = 0x%x", i,
- ccx_info->NHM_th[i], ccx_info->echo_IGI);
- else if (i == 10)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n NHM_th[%d] = 0x%x\n", i,
- ccx_info->NHM_th[i]);
- else
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n NHM_th[%d] = 0x%x", i,
- ccx_info->NHM_th[i]);
- }
-}
-
-static inline void phydm_print_nhm_result(char *output, u32 used, u32 out_len,
- struct ccx_info *ccx_info)
-{
- int i;
-
- for (i = 0; i <= 11; i++) {
- if (i == 5)
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "\r\n nhm_result[%d] = %d, echo_IGI = 0x%x", i,
- ccx_info->NHM_result[i], ccx_info->echo_IGI);
- else if (i == 11)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n nhm_result[%d] = %d\n", i,
- ccx_info->NHM_result[i]);
- else
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n nhm_result[%d] = %d", i,
- ccx_info->NHM_result[i]);
- }
-}
-
-static inline void phydm_print_csi(struct phy_dm_struct *dm, u32 used,
- u32 out_len, char *output)
-{
- int index, ptr;
- u32 dword_h, dword_l;
-
- for (index = 0; index < 80; index++) {
- ptr = index + 256;
-
- if (ptr > 311)
- ptr -= 312;
-
- odm_set_bb_reg(dm, 0x1910, 0x03FF0000, ptr); /*Select Address*/
- dword_h = odm_get_bb_reg(dm, 0xF74, MASKDWORD);
- dword_l = odm_get_bb_reg(dm, 0xF5C, MASKDWORD);
-
- PHYDM_SNPRINTF(output + used,
- out_len - used,
- "%02x %02x %02x %02x %02x %02x %02x %02x\n",
- dword_l & MASKBYTE0,
- (dword_l & MASKBYTE1) >> 8,
- (dword_l & MASKBYTE2) >> 16,
- (dword_l & MASKBYTE3) >> 24,
- dword_h & MASKBYTE0,
- (dword_h & MASKBYTE1) >> 8,
- (dword_h & MASKBYTE2) >> 16,
- (dword_h & MASKBYTE3) >> 24);
- }
-}
-
-void phydm_init_debug_setting(struct phy_dm_struct *dm)
-{
- dm->debug_level = ODM_DBG_TRACE;
-
- dm->fw_debug_components = 0;
- dm->debug_components = 0;
-
- dm->fw_buff_is_enpty = true;
- dm->pre_c2h_seq = 0;
-}
-
-u8 phydm_set_bb_dbg_port(void *dm_void, u8 curr_dbg_priority, u32 debug_port)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 dbg_port_result = false;
-
- if (curr_dbg_priority > dm->pre_dbg_priority) {
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, 0x8fc, MASKDWORD, debug_port);
- /**/
- } else /*if (dm->support_ic_type & ODM_IC_11N_SERIES)*/ {
- odm_set_bb_reg(dm, 0x908, MASKDWORD, debug_port);
- /**/
- }
- ODM_RT_TRACE(
- dm, ODM_COMP_API,
- "DbgPort set success, Reg((0x%x)), Cur_priority=((%d)), Pre_priority=((%d))\n",
- debug_port, curr_dbg_priority, dm->pre_dbg_priority);
- dm->pre_dbg_priority = curr_dbg_priority;
- dbg_port_result = true;
- }
-
- return dbg_port_result;
-}
-
-void phydm_release_bb_dbg_port(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- dm->pre_dbg_priority = BB_DBGPORT_RELEASE;
- ODM_RT_TRACE(dm, ODM_COMP_API, "Release BB dbg_port\n");
-}
-
-u32 phydm_get_bb_dbg_port_value(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 dbg_port_value = 0;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- dbg_port_value = odm_get_bb_reg(dm, 0xfa0, MASKDWORD);
- /**/
- } else /*if (dm->support_ic_type & ODM_IC_11N_SERIES)*/ {
- dbg_port_value = odm_get_bb_reg(dm, 0xdf4, MASKDWORD);
- /**/
- }
- ODM_RT_TRACE(dm, ODM_COMP_API, "dbg_port_value = 0x%x\n",
- dbg_port_value);
- return dbg_port_value;
-}
-
-static void phydm_bb_rx_hang_info(void *dm_void, u32 *_used, char *output,
- u32 *_out_len)
-{
- u32 value32 = 0;
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES)
- return;
-
- value32 = odm_get_bb_reg(dm, 0xF80, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = 0x%x",
- "rptreg of sc/bw/ht/...", value32);
-
- if (dm->support_ic_type & ODM_RTL8822B)
- odm_set_bb_reg(dm, 0x198c, BIT(2) | BIT(1) | BIT(0), 7);
-
- /* dbg_port = basic state machine */
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0x000);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "basic state machine",
- value32);
- }
-
- /* dbg_port = state machine */
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0x007);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "state machine", value32);
- }
-
- /* dbg_port = CCA-related*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0x204);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "CCA-related", value32);
- }
-
- /* dbg_port = edcca/rxd*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0x278);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "edcca/rxd", value32);
- }
-
- /* dbg_port = rx_state/mux_state/ADC_MASK_OFDM*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0x290);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x",
- "rx_state/mux_state/ADC_MASK_OFDM", value32);
- }
-
- /* dbg_port = bf-related*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0x2B2);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "bf-related", value32);
- }
-
- /* dbg_port = bf-related*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0x2B8);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "bf-related", value32);
- }
-
- /* dbg_port = txon/rxd*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0xA03);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "txon/rxd", value32);
- }
-
- /* dbg_port = l_rate/l_length*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0xA0B);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "l_rate/l_length", value32);
- }
-
- /* dbg_port = rxd/rxd_hit*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0xA0D);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "rxd/rxd_hit", value32);
- }
-
- /* dbg_port = dis_cca*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0xAA0);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "dis_cca", value32);
- }
-
- /* dbg_port = tx*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0xAB0);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "tx", value32);
- }
-
- /* dbg_port = rx plcp*/
- {
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0xAD0);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "rx plcp", value32);
-
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0xAD1);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "rx plcp", value32);
-
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0xAD2);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "rx plcp", value32);
-
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD, 0xAD3);
- value32 = odm_get_bb_reg(dm, ODM_REG_DBG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "0x8fc", value32);
-
- value32 = odm_get_bb_reg(dm, ODM_REG_RPT_11AC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = 0x%x", "rx plcp", value32);
- }
-}
-
-static void phydm_bb_debug_info_n_series(void *dm_void, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- u32 value32 = 0, value32_1 = 0;
- u8 rf_gain_a = 0, rf_gain_b = 0, rf_gain_c = 0, rf_gain_d = 0;
- u8 rx_snr_a = 0, rx_snr_b = 0, rx_snr_c = 0, rx_snr_d = 0;
-
- s8 rxevm_0 = 0, rxevm_1 = 0;
- s32 short_cfo_a = 0, short_cfo_b = 0, long_cfo_a = 0, long_cfo_b = 0;
- s32 scfo_a = 0, scfo_b = 0, avg_cfo_a = 0, avg_cfo_b = 0;
- s32 cfo_end_a = 0, cfo_end_b = 0, acq_cfo_a = 0, acq_cfo_b = 0;
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s\n",
- "BB Report Info");
-
- /*AGC result*/
- value32 = odm_get_bb_reg(dm, 0xdd0, MASKDWORD);
- rf_gain_a = (u8)(value32 & 0x3f);
- rf_gain_a = rf_gain_a << 1;
-
- rf_gain_b = (u8)((value32 >> 8) & 0x3f);
- rf_gain_b = rf_gain_b << 1;
-
- rf_gain_c = (u8)((value32 >> 16) & 0x3f);
- rf_gain_c = rf_gain_c << 1;
-
- rf_gain_d = (u8)((value32 >> 24) & 0x3f);
- rf_gain_d = rf_gain_d << 1;
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d / %d",
- "OFDM RX RF Gain(A/B/C/D)", rf_gain_a, rf_gain_b,
- rf_gain_c, rf_gain_d);
-
- /*SNR report*/
- value32 = odm_get_bb_reg(dm, 0xdd4, MASKDWORD);
- rx_snr_a = (u8)(value32 & 0xff);
- rx_snr_a = rx_snr_a >> 1;
-
- rx_snr_b = (u8)((value32 >> 8) & 0xff);
- rx_snr_b = rx_snr_b >> 1;
-
- rx_snr_c = (u8)((value32 >> 16) & 0xff);
- rx_snr_c = rx_snr_c >> 1;
-
- rx_snr_d = (u8)((value32 >> 24) & 0xff);
- rx_snr_d = rx_snr_d >> 1;
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d / %d", "RXSNR(A/B/C/D, dB)",
- rx_snr_a, rx_snr_b, rx_snr_c, rx_snr_d);
-
- /* PostFFT related info*/
- value32 = odm_get_bb_reg(dm, 0xdd8, MASKDWORD);
-
- rxevm_0 = (s8)((value32 & MASKBYTE2) >> 16);
- rxevm_0 /= 2;
- if (rxevm_0 < -63)
- rxevm_0 = 0;
-
- rxevm_1 = (s8)((value32 & MASKBYTE3) >> 24);
- rxevm_1 /= 2;
- if (rxevm_1 < -63)
- rxevm_1 = 0;
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "RXEVM (1ss/2ss)", rxevm_0, rxevm_1);
-
- /*CFO Report Info*/
- odm_set_bb_reg(dm, 0xd00, BIT(26), 1);
-
- /*Short CFO*/
- value32 = odm_get_bb_reg(dm, 0xdac, MASKDWORD);
- value32_1 = odm_get_bb_reg(dm, 0xdb0, MASKDWORD);
-
- short_cfo_b = (s32)(value32 & 0xfff); /*S(12,11)*/
- short_cfo_a = (s32)((value32 & 0x0fff0000) >> 16);
-
- long_cfo_b = (s32)(value32_1 & 0x1fff); /*S(13,12)*/
- long_cfo_a = (s32)((value32_1 & 0x1fff0000) >> 16);
-
- /*SFO 2's to dec*/
- if (short_cfo_a > 2047)
- short_cfo_a = short_cfo_a - 4096;
- if (short_cfo_b > 2047)
- short_cfo_b = short_cfo_b - 4096;
-
- short_cfo_a = (short_cfo_a * 312500) / 2048;
- short_cfo_b = (short_cfo_b * 312500) / 2048;
-
- /*LFO 2's to dec*/
-
- if (long_cfo_a > 4095)
- long_cfo_a = long_cfo_a - 8192;
-
- if (long_cfo_b > 4095)
- long_cfo_b = long_cfo_b - 8192;
-
- long_cfo_a = long_cfo_a * 312500 / 4096;
- long_cfo_b = long_cfo_b * 312500 / 4096;
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s",
- "CFO Report Info");
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "Short CFO(Hz) <A/B>", short_cfo_a, short_cfo_b);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "Long CFO(Hz) <A/B>", long_cfo_a, long_cfo_b);
-
- /*SCFO*/
- value32 = odm_get_bb_reg(dm, 0xdb8, MASKDWORD);
- value32_1 = odm_get_bb_reg(dm, 0xdb4, MASKDWORD);
-
- scfo_b = (s32)(value32 & 0x7ff); /*S(11,10)*/
- scfo_a = (s32)((value32 & 0x07ff0000) >> 16);
-
- if (scfo_a > 1023)
- scfo_a = scfo_a - 2048;
-
- if (scfo_b > 1023)
- scfo_b = scfo_b - 2048;
-
- scfo_a = scfo_a * 312500 / 1024;
- scfo_b = scfo_b * 312500 / 1024;
-
- avg_cfo_b = (s32)(value32_1 & 0x1fff); /*S(13,12)*/
- avg_cfo_a = (s32)((value32_1 & 0x1fff0000) >> 16);
-
- if (avg_cfo_a > 4095)
- avg_cfo_a = avg_cfo_a - 8192;
-
- if (avg_cfo_b > 4095)
- avg_cfo_b = avg_cfo_b - 8192;
-
- avg_cfo_a = avg_cfo_a * 312500 / 4096;
- avg_cfo_b = avg_cfo_b * 312500 / 4096;
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "value SCFO(Hz) <A/B>", scfo_a, scfo_b);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "Avg CFO(Hz) <A/B>", avg_cfo_a, avg_cfo_b);
-
- value32 = odm_get_bb_reg(dm, 0xdbc, MASKDWORD);
- value32_1 = odm_get_bb_reg(dm, 0xde0, MASKDWORD);
-
- cfo_end_b = (s32)(value32 & 0x1fff); /*S(13,12)*/
- cfo_end_a = (s32)((value32 & 0x1fff0000) >> 16);
-
- if (cfo_end_a > 4095)
- cfo_end_a = cfo_end_a - 8192;
-
- if (cfo_end_b > 4095)
- cfo_end_b = cfo_end_b - 8192;
-
- cfo_end_a = cfo_end_a * 312500 / 4096;
- cfo_end_b = cfo_end_b * 312500 / 4096;
-
- acq_cfo_b = (s32)(value32_1 & 0x1fff); /*S(13,12)*/
- acq_cfo_a = (s32)((value32_1 & 0x1fff0000) >> 16);
-
- if (acq_cfo_a > 4095)
- acq_cfo_a = acq_cfo_a - 8192;
-
- if (acq_cfo_b > 4095)
- acq_cfo_b = acq_cfo_b - 8192;
-
- acq_cfo_a = acq_cfo_a * 312500 / 4096;
- acq_cfo_b = acq_cfo_b * 312500 / 4096;
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "End CFO(Hz) <A/B>", cfo_end_a, cfo_end_b);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "ACQ CFO(Hz) <A/B>", acq_cfo_a, acq_cfo_b);
-}
-
-static void phydm_bb_debug_info(void *dm_void, u32 *_used, char *output,
- u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- char *tmp_string = NULL;
-
- u8 rx_ht_bw, rx_vht_bw, rxsc, rx_ht, rx_bw;
- static u8 v_rx_bw;
- u32 value32, value32_1, value32_2, value32_3;
- s32 sfo_a, sfo_b, sfo_c, sfo_d;
- s32 lfo_a, lfo_b, lfo_c, lfo_d;
- static u8 MCSS, tail, parity, rsv, vrsv, idx, smooth, htsound, agg,
- stbc, vstbc, fec, fecext, sgi, sgiext, htltf, vgid, v_nsts,
- vtxops, vrsv2, vbrsv, bf, vbcrc;
- static u16 h_length, htcrc8, length;
- static u16 vpaid;
- static u16 v_length, vhtcrc8, v_mcss, v_tail, vb_tail;
- static u8 hmcss, hrx_bw;
-
- u8 pwdb;
- s8 rxevm_0, rxevm_1, rxevm_2;
- u8 rf_gain_path_a, rf_gain_path_b, rf_gain_path_c, rf_gain_path_d;
- u8 rx_snr_path_a, rx_snr_path_b, rx_snr_path_c, rx_snr_path_d;
- s32 sig_power;
-
- const char *L_rate[8] = {"6M", "9M", "12M", "18M",
- "24M", "36M", "48M", "54M"};
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- phydm_bb_debug_info_n_series(dm, &used, output, &out_len);
- return;
- }
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s\n",
- "BB Report Info");
-
- /*BW & mode Detection*/
-
- value32 = odm_get_bb_reg(dm, 0xf80, MASKDWORD);
- value32_2 = value32;
- rx_ht_bw = (u8)(value32 & 0x1);
- rx_vht_bw = (u8)((value32 >> 1) & 0x3);
- rxsc = (u8)(value32 & 0x78);
- value32_1 = (value32 & 0x180) >> 7;
- rx_ht = (u8)(value32_1);
-
- rx_bw = 0;
-
- if (rx_ht == 2) {
- if (rx_vht_bw == 0)
- tmp_string = "20M";
- else if (rx_vht_bw == 1)
- tmp_string = "40M";
- else
- tmp_string = "80M";
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s %s %s", "mode", "VHT", tmp_string);
- rx_bw = rx_vht_bw;
- } else if (rx_ht == 1) {
- if (rx_ht_bw == 0)
- tmp_string = "20M";
- else if (rx_ht_bw == 1)
- tmp_string = "40M";
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s %s %s", "mode", "HT", tmp_string);
- rx_bw = rx_ht_bw;
- } else {
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s %s",
- "mode", "Legacy");
- }
- if (rx_ht != 0) {
- if (rxsc == 0)
- tmp_string = "duplicate/full bw";
- else if (rxsc == 1)
- tmp_string = "usc20-1";
- else if (rxsc == 2)
- tmp_string = "lsc20-1";
- else if (rxsc == 3)
- tmp_string = "usc20-2";
- else if (rxsc == 4)
- tmp_string = "lsc20-2";
- else if (rxsc == 9)
- tmp_string = "usc40";
- else if (rxsc == 10)
- tmp_string = "lsc40";
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s",
- tmp_string);
- }
-
- /* RX signal power and AGC related info*/
-
- value32 = odm_get_bb_reg(dm, 0xF90, MASKDWORD);
- pwdb = (u8)((value32 & MASKBYTE1) >> 8);
- pwdb = pwdb >> 1;
- sig_power = -110 + pwdb;
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d",
- "OFDM RX Signal Power(dB)", sig_power);
-
- value32 = odm_get_bb_reg(dm, 0xd14, MASKDWORD);
- rx_snr_path_a = (u8)(value32 & 0xFF) >> 1;
- rf_gain_path_a = (s8)((value32 & MASKBYTE1) >> 8);
- rf_gain_path_a *= 2;
- value32 = odm_get_bb_reg(dm, 0xd54, MASKDWORD);
- rx_snr_path_b = (u8)(value32 & 0xFF) >> 1;
- rf_gain_path_b = (s8)((value32 & MASKBYTE1) >> 8);
- rf_gain_path_b *= 2;
- value32 = odm_get_bb_reg(dm, 0xd94, MASKDWORD);
- rx_snr_path_c = (u8)(value32 & 0xFF) >> 1;
- rf_gain_path_c = (s8)((value32 & MASKBYTE1) >> 8);
- rf_gain_path_c *= 2;
- value32 = odm_get_bb_reg(dm, 0xdd4, MASKDWORD);
- rx_snr_path_d = (u8)(value32 & 0xFF) >> 1;
- rf_gain_path_d = (s8)((value32 & MASKBYTE1) >> 8);
- rf_gain_path_d *= 2;
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d / %d",
- "OFDM RX RF Gain(A/B/C/D)", rf_gain_path_a,
- rf_gain_path_b, rf_gain_path_c, rf_gain_path_d);
-
- /* RX counter related info*/
-
- value32 = odm_get_bb_reg(dm, 0xF08, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d",
- "OFDM CCA counter", ((value32 & 0xFFFF0000) >> 16));
-
- value32 = odm_get_bb_reg(dm, 0xFD0, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d",
- "OFDM SBD Fail counter", value32 & 0xFFFF);
-
- value32 = odm_get_bb_reg(dm, 0xFC4, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "VHT SIGA/SIGB CRC8 Fail counter", value32 & 0xFFFF,
- ((value32 & 0xFFFF0000) >> 16));
-
- value32 = odm_get_bb_reg(dm, 0xFCC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d",
- "CCK CCA counter", value32 & 0xFFFF);
-
- value32 = odm_get_bb_reg(dm, 0xFBC, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "LSIG (parity Fail/rate Illegal) counter",
- value32 & 0xFFFF, ((value32 & 0xFFFF0000) >> 16));
-
- value32_1 = odm_get_bb_reg(dm, 0xFC8, MASKDWORD);
- value32_2 = odm_get_bb_reg(dm, 0xFC0, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "HT/VHT MCS NOT SUPPORT counter",
- ((value32_2 & 0xFFFF0000) >> 16), value32_1 & 0xFFFF);
-
- /* PostFFT related info*/
- value32 = odm_get_bb_reg(dm, 0xF8c, MASKDWORD);
- rxevm_0 = (s8)((value32 & MASKBYTE2) >> 16);
- rxevm_0 /= 2;
- if (rxevm_0 < -63)
- rxevm_0 = 0;
-
- rxevm_1 = (s8)((value32 & MASKBYTE3) >> 24);
- rxevm_1 /= 2;
- value32 = odm_get_bb_reg(dm, 0xF88, MASKDWORD);
- rxevm_2 = (s8)((value32 & MASKBYTE2) >> 16);
- rxevm_2 /= 2;
-
- if (rxevm_1 < -63)
- rxevm_1 = 0;
- if (rxevm_2 < -63)
- rxevm_2 = 0;
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d", "RXEVM (1ss/2ss/3ss)",
- rxevm_0, rxevm_1, rxevm_2);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d / %d", "RXSNR(A/B/C/D, dB)",
- rx_snr_path_a, rx_snr_path_b, rx_snr_path_c,
- rx_snr_path_d);
-
- value32 = odm_get_bb_reg(dm, 0xF8C, MASKDWORD);
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s = %d / %d",
- "CSI_1st /CSI_2nd", value32 & 0xFFFF,
- ((value32 & 0xFFFF0000) >> 16));
-
- /*BW & mode Detection*/
-
- /*Reset Page F counter*/
- odm_set_bb_reg(dm, 0xB58, BIT(0), 1);
- odm_set_bb_reg(dm, 0xB58, BIT(0), 0);
-
- /*CFO Report Info*/
- /*Short CFO*/
- value32 = odm_get_bb_reg(dm, 0xd0c, MASKDWORD);
- value32_1 = odm_get_bb_reg(dm, 0xd4c, MASKDWORD);
- value32_2 = odm_get_bb_reg(dm, 0xd8c, MASKDWORD);
- value32_3 = odm_get_bb_reg(dm, 0xdcc, MASKDWORD);
-
- sfo_a = (s32)(value32 & 0xfff);
- sfo_b = (s32)(value32_1 & 0xfff);
- sfo_c = (s32)(value32_2 & 0xfff);
- sfo_d = (s32)(value32_3 & 0xfff);
-
- lfo_a = (s32)(value32 >> 16);
- lfo_b = (s32)(value32_1 >> 16);
- lfo_c = (s32)(value32_2 >> 16);
- lfo_d = (s32)(value32_3 >> 16);
-
- /*SFO 2's to dec*/
- if (sfo_a > 2047)
- sfo_a = sfo_a - 4096;
- sfo_a = (sfo_a * 312500) / 2048;
- if (sfo_b > 2047)
- sfo_b = sfo_b - 4096;
- sfo_b = (sfo_b * 312500) / 2048;
- if (sfo_c > 2047)
- sfo_c = sfo_c - 4096;
- sfo_c = (sfo_c * 312500) / 2048;
- if (sfo_d > 2047)
- sfo_d = sfo_d - 4096;
- sfo_d = (sfo_d * 312500) / 2048;
-
- /*LFO 2's to dec*/
-
- if (lfo_a > 4095)
- lfo_a = lfo_a - 8192;
-
- if (lfo_b > 4095)
- lfo_b = lfo_b - 8192;
-
- if (lfo_c > 4095)
- lfo_c = lfo_c - 8192;
-
- if (lfo_d > 4095)
- lfo_d = lfo_d - 8192;
- lfo_a = lfo_a * 312500 / 4096;
- lfo_b = lfo_b * 312500 / 4096;
- lfo_c = lfo_c * 312500 / 4096;
- lfo_d = lfo_d * 312500 / 4096;
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s",
- "CFO Report Info");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d /%d",
- "Short CFO(Hz) <A/B/C/D>", sfo_a, sfo_b, sfo_c, sfo_d);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d /%d",
- "Long CFO(Hz) <A/B/C/D>", lfo_a, lfo_b, lfo_c, lfo_d);
-
- /*SCFO*/
- value32 = odm_get_bb_reg(dm, 0xd10, MASKDWORD);
- value32_1 = odm_get_bb_reg(dm, 0xd50, MASKDWORD);
- value32_2 = odm_get_bb_reg(dm, 0xd90, MASKDWORD);
- value32_3 = odm_get_bb_reg(dm, 0xdd0, MASKDWORD);
-
- sfo_a = (s32)(value32 & 0x7ff);
- sfo_b = (s32)(value32_1 & 0x7ff);
- sfo_c = (s32)(value32_2 & 0x7ff);
- sfo_d = (s32)(value32_3 & 0x7ff);
-
- if (sfo_a > 1023)
- sfo_a = sfo_a - 2048;
-
- if (sfo_b > 2047)
- sfo_b = sfo_b - 4096;
-
- if (sfo_c > 2047)
- sfo_c = sfo_c - 4096;
-
- if (sfo_d > 2047)
- sfo_d = sfo_d - 4096;
-
- sfo_a = sfo_a * 312500 / 1024;
- sfo_b = sfo_b * 312500 / 1024;
- sfo_c = sfo_c * 312500 / 1024;
- sfo_d = sfo_d * 312500 / 1024;
-
- lfo_a = (s32)(value32 >> 16);
- lfo_b = (s32)(value32_1 >> 16);
- lfo_c = (s32)(value32_2 >> 16);
- lfo_d = (s32)(value32_3 >> 16);
-
- if (lfo_a > 4095)
- lfo_a = lfo_a - 8192;
-
- if (lfo_b > 4095)
- lfo_b = lfo_b - 8192;
-
- if (lfo_c > 4095)
- lfo_c = lfo_c - 8192;
-
- if (lfo_d > 4095)
- lfo_d = lfo_d - 8192;
- lfo_a = lfo_a * 312500 / 4096;
- lfo_b = lfo_b * 312500 / 4096;
- lfo_c = lfo_c * 312500 / 4096;
- lfo_d = lfo_d * 312500 / 4096;
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d /%d",
- "value SCFO(Hz) <A/B/C/D>", sfo_a, sfo_b, sfo_c, sfo_d);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d /%d", "ACQ CFO(Hz) <A/B/C/D>",
- lfo_a, lfo_b, lfo_c, lfo_d);
-
- value32 = odm_get_bb_reg(dm, 0xd14, MASKDWORD);
- value32_1 = odm_get_bb_reg(dm, 0xd54, MASKDWORD);
- value32_2 = odm_get_bb_reg(dm, 0xd94, MASKDWORD);
- value32_3 = odm_get_bb_reg(dm, 0xdd4, MASKDWORD);
-
- lfo_a = (s32)(value32 >> 16);
- lfo_b = (s32)(value32_1 >> 16);
- lfo_c = (s32)(value32_2 >> 16);
- lfo_d = (s32)(value32_3 >> 16);
-
- if (lfo_a > 4095)
- lfo_a = lfo_a - 8192;
-
- if (lfo_b > 4095)
- lfo_b = lfo_b - 8192;
-
- if (lfo_c > 4095)
- lfo_c = lfo_c - 8192;
-
- if (lfo_d > 4095)
- lfo_d = lfo_d - 8192;
-
- lfo_a = lfo_a * 312500 / 4096;
- lfo_b = lfo_b * 312500 / 4096;
- lfo_c = lfo_c * 312500 / 4096;
- lfo_d = lfo_d * 312500 / 4096;
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d / %d / %d /%d", "End CFO(Hz) <A/B/C/D>",
- lfo_a, lfo_b, lfo_c, lfo_d);
-
- value32 = odm_get_bb_reg(dm, 0xf20, MASKDWORD); /*L SIG*/
-
- tail = (u8)((value32 & 0xfc0000) >> 16);
- parity = (u8)((value32 & 0x20000) >> 16);
- length = (u16)((value32 & 0x1ffe00) >> 8);
- rsv = (u8)(value32 & 0x10);
- MCSS = (u8)(value32 & 0x0f);
-
- switch (MCSS) {
- case 0x0b:
- idx = 0;
- break;
- case 0x0f:
- idx = 1;
- break;
- case 0x0a:
- idx = 2;
- break;
- case 0x0e:
- idx = 3;
- break;
- case 0x09:
- idx = 4;
- break;
- case 0x08:
- idx = 5;
- break;
- case 0x0c:
- idx = 6;
- break;
- default:
- idx = 6;
- break;
- }
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s", "L-SIG");
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s : %s", "rate",
- L_rate[idx]);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %x / %x / %x", "Rsv/length/parity", rsv,
- rx_bw, length);
-
- value32 = odm_get_bb_reg(dm, 0xf2c, MASKDWORD); /*HT SIG*/
- if (rx_ht == 1) {
- hmcss = (u8)(value32 & 0x7F);
- hrx_bw = (u8)(value32 & 0x80);
- h_length = (u16)((value32 >> 8) & 0xffff);
- }
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s", "HT-SIG1");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %x / %x / %x", "MCS/BW/length", hmcss,
- hrx_bw, h_length);
-
- value32 = odm_get_bb_reg(dm, 0xf30, MASKDWORD); /*HT SIG*/
-
- if (rx_ht == 1) {
- smooth = (u8)(value32 & 0x01);
- htsound = (u8)(value32 & 0x02);
- rsv = (u8)(value32 & 0x04);
- agg = (u8)(value32 & 0x08);
- stbc = (u8)(value32 & 0x30);
- fec = (u8)(value32 & 0x40);
- sgi = (u8)(value32 & 0x80);
- htltf = (u8)((value32 & 0x300) >> 8);
- htcrc8 = (u16)((value32 & 0x3fc00) >> 8);
- tail = (u8)((value32 & 0xfc0000) >> 16);
- }
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s", "HT-SIG2");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %x / %x / %x / %x / %x / %x",
- "Smooth/NoSound/Rsv/Aggregate/STBC/LDPC", smooth,
- htsound, rsv, agg, stbc, fec);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %x / %x / %x / %x",
- "SGI/E-HT-LTFs/CRC/tail", sgi, htltf, htcrc8, tail);
-
- value32 = odm_get_bb_reg(dm, 0xf2c, MASKDWORD); /*VHT SIG A1*/
- if (rx_ht == 2) {
- /* value32 = odm_get_bb_reg(dm, 0xf2c,MASKDWORD);*/
- v_rx_bw = (u8)(value32 & 0x03);
- vrsv = (u8)(value32 & 0x04);
- vstbc = (u8)(value32 & 0x08);
- vgid = (u8)((value32 & 0x3f0) >> 4);
- v_nsts = (u8)(((value32 & 0x1c00) >> 8) + 1);
- vpaid = (u16)(value32 & 0x3fe);
- vtxops = (u8)((value32 & 0x400000) >> 20);
- vrsv2 = (u8)((value32 & 0x800000) >> 20);
- }
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s",
- "VHT-SIG-A1");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %x / %x / %x / %x / %x / %x / %x / %x",
- "BW/Rsv1/STBC/GID/Nsts/PAID/TXOPPS/Rsv2", v_rx_bw, vrsv,
- vstbc, vgid, v_nsts, vpaid, vtxops, vrsv2);
-
- value32 = odm_get_bb_reg(dm, 0xf30, MASKDWORD); /*VHT SIG*/
-
- if (rx_ht == 2) {
- /*value32 = odm_get_bb_reg(dm, 0xf30,MASKDWORD); */ /*VHT SIG*/
-
- /* sgi=(u8)(value32&0x01); */
- sgiext = (u8)(value32 & 0x03);
- /* fec = (u8)(value32&0x04); */
- fecext = (u8)(value32 & 0x0C);
-
- v_mcss = (u8)(value32 & 0xf0);
- bf = (u8)((value32 & 0x100) >> 8);
- vrsv = (u8)((value32 & 0x200) >> 8);
- vhtcrc8 = (u16)((value32 & 0x3fc00) >> 8);
- v_tail = (u8)((value32 & 0xfc0000) >> 16);
- }
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s",
- "VHT-SIG-A2");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %x / %x / %x / %x / %x / %x / %x",
- "SGI/FEC/MCS/BF/Rsv/CRC/tail", sgiext, fecext, v_mcss,
- bf, vrsv, vhtcrc8, v_tail);
-
- value32 = odm_get_bb_reg(dm, 0xf34, MASKDWORD); /*VHT SIG*/
- {
- v_length = (u16)(value32 & 0x1fffff);
- vbrsv = (u8)((value32 & 0x600000) >> 20);
- vb_tail = (u16)((value32 & 0x1f800000) >> 20);
- vbcrc = (u8)((value32 & 0x80000000) >> 28);
- }
- PHYDM_SNPRINTF(output + used, out_len - used, "\r\n %-35s",
- "VHT-SIG-B");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %x / %x / %x / %x", "length/Rsv/tail/CRC",
- v_length, vbrsv, vb_tail, vbcrc);
-
- /*for Condition number*/
- if (dm->support_ic_type & ODM_RTL8822B) {
- s32 condition_num = 0;
- char *factor = NULL;
-
- /*enable report condition number*/
- odm_set_bb_reg(dm, 0x1988, BIT(22), 0x1);
-
- condition_num = odm_get_bb_reg(dm, 0xf84, MASKDWORD);
- condition_num = (condition_num & 0x3ffff) >> 4;
-
- if (*dm->band_width == ODM_BW80M) {
- factor = "256/234";
- } else if (*dm->band_width == ODM_BW40M) {
- factor = "128/108";
- } else if (*dm->band_width == ODM_BW20M) {
- if (rx_ht == 2 || rx_ht == 1)
- factor = "64/52"; /*HT or VHT*/
- else
- factor = "64/48"; /*legacy*/
- }
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n %-35s = %d (factor = %s)",
- "Condition number", condition_num, factor);
- }
-}
-
-void phydm_basic_dbg_message(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct false_alarm_stat *false_alm_cnt =
- (struct false_alarm_stat *)phydm_get_structure(
- dm, PHYDM_FALSEALMCNT);
- struct cfo_tracking *cfo_track =
- (struct cfo_tracking *)phydm_get_structure(dm, PHYDM_CFOTRACK);
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- struct ra_table *ra_tab = &dm->dm_ra_table;
- u16 macid, phydm_macid, client_cnt = 0;
- struct rtl_sta_info *entry;
- s32 tmp_val = 0;
- u8 tmp_val_u1 = 0;
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "[PHYDM Common MSG] System up time: ((%d sec))----->\n",
- dm->phydm_sys_up_time);
-
- if (dm->is_linked) {
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "ID=%d, BW=((%d)), CH=((%d))\n",
- dm->curr_station_id, 20 << *dm->band_width,
- *dm->channel);
-
- /*Print RX rate*/
- if (dm->rx_rate <= ODM_RATE11M)
- ODM_RT_TRACE(
- dm, ODM_COMP_COMMON,
- "[CCK AGC Report] LNA_idx = 0x%x, VGA_idx = 0x%x\n",
- dm->cck_lna_idx, dm->cck_vga_idx);
- else
- ODM_RT_TRACE(
- dm, ODM_COMP_COMMON,
- "[OFDM AGC Report] { 0x%x, 0x%x, 0x%x, 0x%x }\n",
- dm->ofdm_agc_idx[0], dm->ofdm_agc_idx[1],
- dm->ofdm_agc_idx[2], dm->ofdm_agc_idx[3]);
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "RSSI: { %d, %d, %d, %d }, rx_rate:",
- (dm->rssi_a == 0xff) ? 0 : dm->rssi_a,
- (dm->rssi_b == 0xff) ? 0 : dm->rssi_b,
- (dm->rssi_c == 0xff) ? 0 : dm->rssi_c,
- (dm->rssi_d == 0xff) ? 0 : dm->rssi_d);
-
- phydm_print_rate(dm, dm->rx_rate, ODM_COMP_COMMON);
-
- /*Print TX rate*/
- for (macid = 0; macid < ODM_ASSOCIATE_ENTRY_NUM; macid++) {
- entry = dm->odm_sta_info[macid];
- if (!IS_STA_VALID(entry))
- continue;
-
- phydm_macid = (dm->platform2phydm_macid_table[macid]);
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "TXRate [%d]:",
- macid);
- phydm_print_rate(dm, ra_tab->link_tx_rate[macid],
- ODM_COMP_COMMON);
-
- client_cnt++;
-
- if (client_cnt == dm->number_linked_client)
- break;
- }
-
- ODM_RT_TRACE(
- dm, ODM_COMP_COMMON,
- "TP { TX, RX, total} = {%d, %d, %d }Mbps, traffic_load = (%d))\n",
- dm->tx_tp, dm->rx_tp, dm->total_tp, dm->traffic_load);
-
- tmp_val_u1 =
- (cfo_track->crystal_cap > cfo_track->def_x_cap) ?
- (cfo_track->crystal_cap -
- cfo_track->def_x_cap) :
- (cfo_track->def_x_cap - cfo_track->crystal_cap);
- ODM_RT_TRACE(
- dm, ODM_COMP_COMMON,
- "CFO_avg = ((%d kHz)) , CrystalCap_tracking = ((%s%d))\n",
- cfo_track->CFO_ave_pre,
- ((cfo_track->crystal_cap > cfo_track->def_x_cap) ? "+" :
- "-"),
- tmp_val_u1);
-
- /* Condition number */
- if (dm->support_ic_type == ODM_RTL8822B) {
- tmp_val = phydm_get_condition_number_8822B(dm);
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "Condition number = ((%d))\n", tmp_val);
- }
-
- /*STBC or LDPC pkt*/
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "LDPC = %s, STBC = %s\n",
- (dm->phy_dbg_info.is_ldpc_pkt) ? "Y" : "N",
- (dm->phy_dbg_info.is_stbc_pkt) ? "Y" : "N");
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "No Link !!!\n");
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "[CCA Cnt] {CCK, OFDM, Total} = {%d, %d, %d}\n",
- false_alm_cnt->cnt_cck_cca, false_alm_cnt->cnt_ofdm_cca,
- false_alm_cnt->cnt_cca_all);
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "[FA Cnt] {CCK, OFDM, Total} = {%d, %d, %d}\n",
- false_alm_cnt->cnt_cck_fail, false_alm_cnt->cnt_ofdm_fail,
- false_alm_cnt->cnt_all);
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES)
- ODM_RT_TRACE(
- dm, ODM_COMP_COMMON,
- "[OFDM FA Detail] Parity_Fail = (( %d )), Rate_Illegal = (( %d )), CRC8_fail = (( %d )), Mcs_fail = (( %d )), Fast_Fsync = (( %d )), SB_Search_fail = (( %d ))\n",
- false_alm_cnt->cnt_parity_fail,
- false_alm_cnt->cnt_rate_illegal,
- false_alm_cnt->cnt_crc8_fail,
- false_alm_cnt->cnt_mcs_fail,
- false_alm_cnt->cnt_fast_fsync,
- false_alm_cnt->cnt_sb_search_fail);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_COMMON,
- "is_linked = %d, Num_client = %d, rssi_min = %d, current_igi = 0x%x, bNoisy=%d\n\n",
- dm->is_linked, dm->number_linked_client, dm->rssi_min,
- dig_tab->cur_ig_value, dm->noisy_decision);
-}
-
-void phydm_basic_profile(void *dm_void, u32 *_used, char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- char *cut = NULL;
- char *ic_type = NULL;
- u32 used = *_used;
- u32 out_len = *_out_len;
- u32 date = 0;
- char *commit_by = NULL;
- u32 release_ver = 0;
-
- PHYDM_SNPRINTF(output + used, out_len - used, "%-35s\n",
- "% Basic Profile %");
-
- if (dm->support_ic_type == ODM_RTL8188E) {
- } else if (dm->support_ic_type == ODM_RTL8822B) {
- ic_type = "RTL8822B";
- date = RELEASE_DATE_8822B;
- commit_by = COMMIT_BY_8822B;
- release_ver = RELEASE_VERSION_8822B;
- }
-
- /* JJ ADD 20161014 */
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- " %-35s: %s (MP Chip: %s)\n", "IC type", ic_type,
- dm->is_mp_chip ? "Yes" : "No");
-
- if (dm->cut_version == ODM_CUT_A)
- cut = "A";
- else if (dm->cut_version == ODM_CUT_B)
- cut = "B";
- else if (dm->cut_version == ODM_CUT_C)
- cut = "C";
- else if (dm->cut_version == ODM_CUT_D)
- cut = "D";
- else if (dm->cut_version == ODM_CUT_E)
- cut = "E";
- else if (dm->cut_version == ODM_CUT_F)
- cut = "F";
- else if (dm->cut_version == ODM_CUT_I)
- cut = "I";
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "cut version", cut);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %d\n",
- "PHY Parameter version", odm_get_hw_img_version(dm));
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %d\n",
- "PHY Parameter Commit date", date);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "PHY Parameter Commit by", commit_by);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %d\n",
- "PHY Parameter Release version", release_ver);
-
- {
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- " %-35s: %d (Subversion: %d)\n", "FW version",
- rtlhal->fw_version, rtlhal->fw_subversion);
- }
- /* 1 PHY DM version List */
- PHYDM_SNPRINTF(output + used, out_len - used, "%-35s\n",
- "% PHYDM version %");
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "Code base", PHYDM_CODE_BASE);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "Release Date", PHYDM_RELEASE_DATE);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "adaptivity", ADAPTIVITY_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n", "DIG",
- DIG_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "Dynamic BB PowerSaving", DYNAMIC_BBPWRSAV_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "CFO Tracking", CFO_TRACKING_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "Antenna Diversity", ANTDIV_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "Power Tracking", POWRTRACKING_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "Dynamic TxPower", DYNAMIC_TXPWR_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "RA Info", RAINFO_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "Auto channel Selection", ACS_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "EDCA Turbo", EDCATURBO_VERSION);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "LA mode", DYNAMIC_LA_MODE);
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "Dynamic RX path", DYNAMIC_RX_PATH_VERSION);
-
- if (dm->support_ic_type & ODM_RTL8822B)
- PHYDM_SNPRINTF(output + used, out_len - used, " %-35s: %s\n",
- "PHY config 8822B", PHY_CONFIG_VERSION_8822B);
-
- *_used = used;
- *_out_len = out_len;
-}
-
-void phydm_fw_trace_en_h2c(void *dm_void, bool enable, u32 fw_debug_component,
- u32 monitor_mode, u32 macid)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 h2c_parameter[7] = {0};
- u8 cmd_length;
-
- if (dm->support_ic_type & PHYDM_IC_3081_SERIES) {
- h2c_parameter[0] = enable;
- h2c_parameter[1] = (u8)(fw_debug_component & MASKBYTE0);
- h2c_parameter[2] = (u8)((fw_debug_component & MASKBYTE1) >> 8);
- h2c_parameter[3] = (u8)((fw_debug_component & MASKBYTE2) >> 16);
- h2c_parameter[4] = (u8)((fw_debug_component & MASKBYTE3) >> 24);
- h2c_parameter[5] = (u8)monitor_mode;
- h2c_parameter[6] = (u8)macid;
- cmd_length = 7;
-
- } else {
- h2c_parameter[0] = enable;
- h2c_parameter[1] = (u8)monitor_mode;
- h2c_parameter[2] = (u8)macid;
- cmd_length = 3;
- }
-
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "---->\n");
- if (monitor_mode == 0)
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "[H2C] FW_debug_en: (( %d ))\n", enable);
- else
- ODM_RT_TRACE(
- dm, ODM_FW_DEBUG_TRACE,
- "[H2C] FW_debug_en: (( %d )), mode: (( %d )), macid: (( %d ))\n",
- enable, monitor_mode, macid);
- odm_fill_h2c_cmd(dm, PHYDM_H2C_FW_TRACE_EN, cmd_length, h2c_parameter);
-}
-
-bool phydm_api_set_txagc(struct phy_dm_struct *dm, u32 power_index,
- enum odm_rf_radio_path path, u8 hw_rate,
- bool is_single_rate)
-{
- bool ret = false;
-
- if (dm->support_ic_type & (ODM_RTL8822B | ODM_RTL8821C)) {
- if (is_single_rate) {
- if (dm->support_ic_type == ODM_RTL8822B)
- ret = phydm_write_txagc_1byte_8822b(
- dm, power_index, path, hw_rate);
-
- } else {
- if (dm->support_ic_type == ODM_RTL8822B)
- ret = config_phydm_write_txagc_8822b(
- dm, power_index, path, hw_rate);
- }
- }
-
- return ret;
-}
-
-static u8 phydm_api_get_txagc(struct phy_dm_struct *dm,
- enum odm_rf_radio_path path, u8 hw_rate)
-{
- u8 ret = 0;
-
- if (dm->support_ic_type & ODM_RTL8822B)
- ret = config_phydm_read_txagc_8822b(dm, path, hw_rate);
-
- return ret;
-}
-
-static bool phydm_api_switch_bw_channel(struct phy_dm_struct *dm, u8 central_ch,
- u8 primary_ch_idx,
- enum odm_bw bandwidth)
-{
- bool ret = false;
-
- if (dm->support_ic_type & ODM_RTL8822B)
- ret = config_phydm_switch_channel_bw_8822b(
- dm, central_ch, primary_ch_idx, bandwidth);
-
- return ret;
-}
-
-bool phydm_api_trx_mode(struct phy_dm_struct *dm, enum odm_rf_path tx_path,
- enum odm_rf_path rx_path, bool is_tx2_path)
-{
- bool ret = false;
-
- if (dm->support_ic_type & ODM_RTL8822B)
- ret = config_phydm_trx_mode_8822b(dm, tx_path, rx_path,
- is_tx2_path);
-
- return ret;
-}
-
-static void phydm_get_per_path_txagc(void *dm_void, u8 path, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 rate_idx;
- u8 txagc;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- if (((dm->support_ic_type & (ODM_RTL8822B | ODM_RTL8197F)) &&
- path <= ODM_RF_PATH_B) ||
- ((dm->support_ic_type & (ODM_RTL8821C)) &&
- path <= ODM_RF_PATH_A)) {
- for (rate_idx = 0; rate_idx <= 0x53; rate_idx++) {
- if (rate_idx == ODM_RATE1M)
- PHYDM_SNPRINTF(output + used, out_len - used,
- " %-35s\n", "CCK====>");
- else if (rate_idx == ODM_RATE6M)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n %-35s\n", "OFDM====>");
- else if (rate_idx == ODM_RATEMCS0)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n %-35s\n", "HT 1ss====>");
- else if (rate_idx == ODM_RATEMCS8)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n %-35s\n", "HT 2ss====>");
- else if (rate_idx == ODM_RATEMCS16)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n %-35s\n", "HT 3ss====>");
- else if (rate_idx == ODM_RATEMCS24)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n %-35s\n", "HT 4ss====>");
- else if (rate_idx == ODM_RATEVHTSS1MCS0)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n %-35s\n", "VHT 1ss====>");
- else if (rate_idx == ODM_RATEVHTSS2MCS0)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n %-35s\n", "VHT 2ss====>");
- else if (rate_idx == ODM_RATEVHTSS3MCS0)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n %-35s\n", "VHT 3ss====>");
- else if (rate_idx == ODM_RATEVHTSS4MCS0)
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n %-35s\n", "VHT 4ss====>");
-
- txagc = phydm_api_get_txagc(
- dm, (enum odm_rf_radio_path)path, rate_idx);
- if (config_phydm_read_txagc_check(txagc))
- PHYDM_SNPRINTF(output + used, out_len - used,
- " 0x%02x ", txagc);
- else
- PHYDM_SNPRINTF(output + used, out_len - used,
- " 0x%s ", "xx");
- }
- }
-}
-
-static void phydm_get_txagc(void *dm_void, u32 *_used, char *output,
- u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- /* path-A */
- PHYDM_SNPRINTF(output + used, out_len - used, "%-35s\n",
- "path-A====================");
- phydm_get_per_path_txagc(dm, ODM_RF_PATH_A, _used, output, _out_len);
-
- /* path-B */
- PHYDM_SNPRINTF(output + used, out_len - used, "\n%-35s\n",
- "path-B====================");
- phydm_get_per_path_txagc(dm, ODM_RF_PATH_B, _used, output, _out_len);
-
- /* path-C */
- PHYDM_SNPRINTF(output + used, out_len - used, "\n%-35s\n",
- "path-C====================");
- phydm_get_per_path_txagc(dm, ODM_RF_PATH_C, _used, output, _out_len);
-
- /* path-D */
- PHYDM_SNPRINTF(output + used, out_len - used, "\n%-35s\n",
- "path-D====================");
- phydm_get_per_path_txagc(dm, ODM_RF_PATH_D, _used, output, _out_len);
-}
-
-static void phydm_set_txagc(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- /*dm_value[1] = path*/
- /*dm_value[2] = hw_rate*/
- /*dm_value[3] = power_index*/
-
- if (dm->support_ic_type &
- (ODM_RTL8822B | ODM_RTL8197F | ODM_RTL8821C)) {
- if (dm_value[1] <= 1) {
- phydm_check_dmval_txagc(dm, used, out_len, dm_value,
- output);
- } else {
- PHYDM_SNPRINTF(output + used, out_len - used,
- " %s%d %s%x%s\n", "Write path-",
- (dm_value[1] & 0x1), "rate index-0x",
- (dm_value[2] & 0x7f), " fail");
- }
- }
-}
-
-static void phydm_debug_trace(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 pre_debug_components, one = 1;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- pre_debug_components = dm->debug_components;
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\n%s\n",
- "================================");
- if (dm_value[0] == 100) {
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "[Debug Message] PhyDM Selection");
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "================================");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "00. (( %s ))DIG\n",
- ((dm->debug_components & ODM_COMP_DIG) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "01. (( %s ))RA_MASK\n",
- ((dm->debug_components & ODM_COMP_RA_MASK) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "02. (( %s ))DYNAMIC_TXPWR\n",
- ((dm->debug_components & ODM_COMP_DYNAMIC_TXPWR) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "03. (( %s ))FA_CNT\n",
- ((dm->debug_components & ODM_COMP_FA_CNT) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "04. (( %s ))RSSI_MONITOR\n",
- ((dm->debug_components & ODM_COMP_RSSI_MONITOR) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "05. (( %s ))SNIFFER\n",
- ((dm->debug_components & ODM_COMP_SNIFFER) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "06. (( %s ))ANT_DIV\n",
- ((dm->debug_components & ODM_COMP_ANT_DIV) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "07. (( %s ))DFS\n",
- ((dm->debug_components & ODM_COMP_DFS) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "08. (( %s ))NOISY_DETECT\n",
- ((dm->debug_components & ODM_COMP_NOISY_DETECT) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "09. (( %s ))RATE_ADAPTIVE\n",
- ((dm->debug_components & ODM_COMP_RATE_ADAPTIVE) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "10. (( %s ))PATH_DIV\n",
- ((dm->debug_components & ODM_COMP_PATH_DIV) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "12. (( %s ))DYNAMIC_PRICCA\n",
- ((dm->debug_components & ODM_COMP_DYNAMIC_PRICCA) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "14. (( %s ))MP\n",
- ((dm->debug_components & ODM_COMP_MP) ? ("V") : (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "15. (( %s ))struct cfo_tracking\n",
- ((dm->debug_components & ODM_COMP_CFO_TRACKING) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "16. (( %s ))struct acs_info\n",
- ((dm->debug_components & ODM_COMP_ACS) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "17. (( %s ))ADAPTIVITY\n",
- ((dm->debug_components & PHYDM_COMP_ADAPTIVITY) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "18. (( %s ))RA_DBG\n",
- ((dm->debug_components & PHYDM_COMP_RA_DBG) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "19. (( %s ))TXBF\n",
- ((dm->debug_components & PHYDM_COMP_TXBF) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "20. (( %s ))EDCA_TURBO\n",
- ((dm->debug_components & ODM_COMP_EDCA_TURBO) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "22. (( %s ))FW_DEBUG_TRACE\n",
- ((dm->debug_components & ODM_FW_DEBUG_TRACE) ?
- ("V") :
- (".")));
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "24. (( %s ))TX_PWR_TRACK\n",
- ((dm->debug_components & ODM_COMP_TX_PWR_TRACK) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "26. (( %s ))CALIBRATION\n",
- ((dm->debug_components & ODM_COMP_CALIBRATION) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "28. (( %s ))PHY_CONFIG\n",
- ((dm->debug_components & ODM_PHY_CONFIG) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "29. (( %s ))INIT\n",
- ((dm->debug_components & ODM_COMP_INIT) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(
- output + used, out_len - used, "30. (( %s ))COMMON\n",
- ((dm->debug_components & ODM_COMP_COMMON) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "31. (( %s ))API\n",
- ((dm->debug_components & ODM_COMP_API) ? ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "================================");
-
- } else if (dm_value[0] == 101) {
- dm->debug_components = 0;
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "Disable all debug components");
- } else {
- if (dm_value[1] == 1) /*enable*/
- dm->debug_components |= (one << dm_value[0]);
- else if (dm_value[1] == 2) /*disable*/
- dm->debug_components &= ~(one << dm_value[0]);
- else
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "[Warning!!!] 1:enable, 2:disable");
- }
- PHYDM_SNPRINTF(output + used, out_len - used,
- "pre-DbgComponents = 0x%x\n", pre_debug_components);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Curr-DbgComponents = 0x%x\n", dm->debug_components);
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "================================");
-}
-
-static void phydm_fw_debug_trace(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 pre_fw_debug_components, one = 1;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- pre_fw_debug_components = dm->fw_debug_components;
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\n%s\n",
- "================================");
- if (dm_value[0] == 100) {
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "[FW Debug Component]");
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "================================");
- PHYDM_SNPRINTF(
- output + used, out_len - used, "00. (( %s ))RA\n",
- ((dm->fw_debug_components & PHYDM_FW_COMP_RA) ? ("V") :
- (".")));
-
- if (dm->support_ic_type & PHYDM_IC_3081_SERIES) {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "01. (( %s ))MU\n",
- ((dm->fw_debug_components & PHYDM_FW_COMP_MU) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "02. (( %s ))path Div\n",
- ((dm->fw_debug_components &
- PHYDM_FW_COMP_PHY_CONFIG) ?
- ("V") :
- (".")));
- PHYDM_SNPRINTF(output + used, out_len - used,
- "03. (( %s ))Phy Config\n",
- ((dm->fw_debug_components &
- PHYDM_FW_COMP_PHY_CONFIG) ?
- ("V") :
- (".")));
- }
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "================================");
-
- } else {
- if (dm_value[0] == 101) {
- dm->fw_debug_components = 0;
- PHYDM_SNPRINTF(output + used, out_len - used, "%s\n",
- "Clear all fw debug components");
- } else {
- if (dm_value[1] == 1) /*enable*/
- dm->fw_debug_components |= (one << dm_value[0]);
- else if (dm_value[1] == 2) /*disable*/
- dm->fw_debug_components &=
- ~(one << dm_value[0]);
- else
- PHYDM_SNPRINTF(
- output + used, out_len - used, "%s\n",
- "[Warning!!!] 1:enable, 2:disable");
- }
-
- if (dm->fw_debug_components == 0) {
- dm->debug_components &= ~ODM_FW_DEBUG_TRACE;
- phydm_fw_trace_en_h2c(
- dm, false, dm->fw_debug_components, dm_value[2],
- dm_value[3]); /*H2C to enable C2H Msg*/
- } else {
- dm->debug_components |= ODM_FW_DEBUG_TRACE;
- phydm_fw_trace_en_h2c(
- dm, true, dm->fw_debug_components, dm_value[2],
- dm_value[3]); /*H2C to enable C2H Msg*/
- }
- }
-}
-
-static void phydm_dump_bb_reg(void *dm_void, u32 *_used, char *output,
- u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 addr = 0;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- /* For Nseries IC we only need to dump page8 to pageF using 3 digits*/
- for (addr = 0x800; addr < 0xfff; addr += 4) {
- if (dm->support_ic_type & ODM_IC_11N_SERIES)
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%03x 0x%08x\n", addr,
- odm_get_bb_reg(dm, addr, MASKDWORD));
- else
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%04x 0x%08x\n", addr,
- odm_get_bb_reg(dm, addr, MASKDWORD));
- }
-
- if (dm->support_ic_type &
- (ODM_RTL8822B | ODM_RTL8814A | ODM_RTL8821C)) {
- if (dm->rf_type > ODM_2T2R) {
- for (addr = 0x1800; addr < 0x18ff; addr += 4)
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%04x 0x%08x\n", addr,
- odm_get_bb_reg(dm, addr, MASKDWORD));
- }
-
- if (dm->rf_type > ODM_3T3R) {
- for (addr = 0x1a00; addr < 0x1aff; addr += 4)
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%04x 0x%08x\n", addr,
- odm_get_bb_reg(dm, addr, MASKDWORD));
- }
-
- for (addr = 0x1900; addr < 0x19ff; addr += 4)
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%04x 0x%08x\n", addr,
- odm_get_bb_reg(dm, addr, MASKDWORD));
-
- for (addr = 0x1c00; addr < 0x1cff; addr += 4)
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%04x 0x%08x\n", addr,
- odm_get_bb_reg(dm, addr, MASKDWORD));
-
- for (addr = 0x1f00; addr < 0x1fff; addr += 4)
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%04x 0x%08x\n", addr,
- odm_get_bb_reg(dm, addr, MASKDWORD));
- }
-}
-
-static void phydm_dump_all_reg(void *dm_void, u32 *_used, char *output,
- u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 addr = 0;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- /* dump MAC register */
- PHYDM_VAST_INFO_SNPRINTF(output + used, out_len - used,
- "MAC==========\n");
- for (addr = 0; addr < 0x7ff; addr += 4)
- PHYDM_VAST_INFO_SNPRINTF(output + used, out_len - used,
- "0x%04x 0x%08x\n", addr,
- odm_get_bb_reg(dm, addr, MASKDWORD));
-
- for (addr = 0x1000; addr < 0x17ff; addr += 4)
- PHYDM_VAST_INFO_SNPRINTF(output + used, out_len - used,
- "0x%04x 0x%08x\n", addr,
- odm_get_bb_reg(dm, addr, MASKDWORD));
-
- /* dump BB register */
- PHYDM_VAST_INFO_SNPRINTF(output + used, out_len - used,
- "BB==========\n");
- phydm_dump_bb_reg(dm, &used, output, &out_len);
-
- /* dump RF register */
- PHYDM_VAST_INFO_SNPRINTF(output + used, out_len - used,
- "RF-A==========\n");
- for (addr = 0; addr < 0xFF; addr++)
- PHYDM_VAST_INFO_SNPRINTF(output + used, out_len - used,
- "0x%02x 0x%05x\n", addr,
- odm_get_rf_reg(dm, ODM_RF_PATH_A, addr,
- RFREGOFFSETMASK));
-
- if (dm->rf_type > ODM_1T1R) {
- PHYDM_VAST_INFO_SNPRINTF(output + used, out_len - used,
- "RF-B==========\n");
- for (addr = 0; addr < 0xFF; addr++)
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%02x 0x%05x\n", addr,
- odm_get_rf_reg(dm, ODM_RF_PATH_B, addr,
- RFREGOFFSETMASK));
- }
-
- if (dm->rf_type > ODM_2T2R) {
- PHYDM_VAST_INFO_SNPRINTF(output + used, out_len - used,
- "RF-C==========\n");
- for (addr = 0; addr < 0xFF; addr++)
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%02x 0x%05x\n", addr,
- odm_get_rf_reg(dm, ODM_RF_PATH_C, addr,
- RFREGOFFSETMASK));
- }
-
- if (dm->rf_type > ODM_3T3R) {
- PHYDM_VAST_INFO_SNPRINTF(output + used, out_len - used,
- "RF-D==========\n");
- for (addr = 0; addr < 0xFF; addr++)
- PHYDM_VAST_INFO_SNPRINTF(
- output + used, out_len - used,
- "0x%02x 0x%05x\n", addr,
- odm_get_rf_reg(dm, ODM_RF_PATH_D, addr,
- RFREGOFFSETMASK));
- }
-}
-
-static void phydm_enable_big_jump(struct phy_dm_struct *dm, bool state)
-{
- struct dig_thres *dig_tab = &dm->dm_dig_table;
-
- if (!state) {
- dm->dm_dig_table.enable_adjust_big_jump = false;
- odm_set_bb_reg(dm, 0x8c8, 0xfe,
- ((dig_tab->big_jump_step3 << 5) |
- (dig_tab->big_jump_step2 << 3) |
- dig_tab->big_jump_step1));
- } else {
- dm->dm_dig_table.enable_adjust_big_jump = true;
- }
-}
-
-static void phydm_show_rx_rate(struct phy_dm_struct *dm, u32 *_used,
- char *output, u32 *_out_len)
-{
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "=====Rx SU rate Statistics=====\n");
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "1SS MCS0 = %d, 1SS MCS1 = %d, 1SS MCS2 = %d, 1SS MCS 3 = %d\n",
- dm->phy_dbg_info.num_qry_vht_pkt[0],
- dm->phy_dbg_info.num_qry_vht_pkt[1],
- dm->phy_dbg_info.num_qry_vht_pkt[2],
- dm->phy_dbg_info.num_qry_vht_pkt[3]);
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "1SS MCS4 = %d, 1SS MCS5 = %d, 1SS MCS6 = %d, 1SS MCS 7 = %d\n",
- dm->phy_dbg_info.num_qry_vht_pkt[4],
- dm->phy_dbg_info.num_qry_vht_pkt[5],
- dm->phy_dbg_info.num_qry_vht_pkt[6],
- dm->phy_dbg_info.num_qry_vht_pkt[7]);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "1SS MCS8 = %d, 1SS MCS9 = %d\n",
- dm->phy_dbg_info.num_qry_vht_pkt[8],
- dm->phy_dbg_info.num_qry_vht_pkt[9]);
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "2SS MCS0 = %d, 2SS MCS1 = %d, 2SS MCS2 = %d, 2SS MCS 3 = %d\n",
- dm->phy_dbg_info.num_qry_vht_pkt[10],
- dm->phy_dbg_info.num_qry_vht_pkt[11],
- dm->phy_dbg_info.num_qry_vht_pkt[12],
- dm->phy_dbg_info.num_qry_vht_pkt[13]);
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "2SS MCS4 = %d, 2SS MCS5 = %d, 2SS MCS6 = %d, 2SS MCS 7 = %d\n",
- dm->phy_dbg_info.num_qry_vht_pkt[14],
- dm->phy_dbg_info.num_qry_vht_pkt[15],
- dm->phy_dbg_info.num_qry_vht_pkt[16],
- dm->phy_dbg_info.num_qry_vht_pkt[17]);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "2SS MCS8 = %d, 2SS MCS9 = %d\n",
- dm->phy_dbg_info.num_qry_vht_pkt[18],
- dm->phy_dbg_info.num_qry_vht_pkt[19]);
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "=====Rx MU rate Statistics=====\n");
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "1SS MCS0 = %d, 1SS MCS1 = %d, 1SS MCS2 = %d, 1SS MCS 3 = %d\n",
- dm->phy_dbg_info.num_qry_mu_vht_pkt[0],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[1],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[2],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[3]);
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "1SS MCS4 = %d, 1SS MCS5 = %d, 1SS MCS6 = %d, 1SS MCS 7 = %d\n",
- dm->phy_dbg_info.num_qry_mu_vht_pkt[4],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[5],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[6],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[7]);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "1SS MCS8 = %d, 1SS MCS9 = %d\n",
- dm->phy_dbg_info.num_qry_mu_vht_pkt[8],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[9]);
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "2SS MCS0 = %d, 2SS MCS1 = %d, 2SS MCS2 = %d, 2SS MCS 3 = %d\n",
- dm->phy_dbg_info.num_qry_mu_vht_pkt[10],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[11],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[12],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[13]);
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "2SS MCS4 = %d, 2SS MCS5 = %d, 2SS MCS6 = %d, 2SS MCS 7 = %d\n",
- dm->phy_dbg_info.num_qry_mu_vht_pkt[14],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[15],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[16],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[17]);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "2SS MCS8 = %d, 2SS MCS9 = %d\n",
- dm->phy_dbg_info.num_qry_mu_vht_pkt[18],
- dm->phy_dbg_info.num_qry_mu_vht_pkt[19]);
-}
-
-struct phydm_command {
- char name[16];
- u8 id;
-};
-
-enum PHYDM_CMD_ID {
- PHYDM_HELP,
- PHYDM_DEMO,
- PHYDM_RA,
- PHYDM_PROFILE,
- PHYDM_ANTDIV,
- PHYDM_PATHDIV,
- PHYDM_DEBUG,
- PHYDM_FW_DEBUG,
- PHYDM_SUPPORT_ABILITY,
- PHYDM_GET_TXAGC,
- PHYDM_SET_TXAGC,
- PHYDM_SMART_ANT,
- PHYDM_API,
- PHYDM_TRX_PATH,
- PHYDM_LA_MODE,
- PHYDM_DUMP_REG,
- PHYDM_MU_MIMO,
- PHYDM_HANG,
- PHYDM_BIG_JUMP,
- PHYDM_SHOW_RXRATE,
- PHYDM_NBI_EN,
- PHYDM_CSI_MASK_EN,
- PHYDM_DFS,
- PHYDM_IQK,
- PHYDM_NHM,
- PHYDM_CLM,
- PHYDM_BB_INFO,
- PHYDM_TXBF,
- PHYDM_PAUSE_DIG_EN,
- PHYDM_H2C,
- PHYDM_ANT_SWITCH,
- PHYDM_DYNAMIC_RA_PATH,
- PHYDM_PSD,
- PHYDM_DEBUG_PORT
-};
-
-static struct phydm_command phy_dm_ary[] = {
- {"-h", PHYDM_HELP}, /*do not move this element to other position*/
- {"demo", PHYDM_DEMO}, /*do not move this element to other position*/
- {"ra", PHYDM_RA},
- {"profile", PHYDM_PROFILE},
- {"antdiv", PHYDM_ANTDIV},
- {"pathdiv", PHYDM_PATHDIV},
- {"dbg", PHYDM_DEBUG},
- {"fw_dbg", PHYDM_FW_DEBUG},
- {"ability", PHYDM_SUPPORT_ABILITY},
- {"get_txagc", PHYDM_GET_TXAGC},
- {"set_txagc", PHYDM_SET_TXAGC},
- {"smtant", PHYDM_SMART_ANT},
- {"api", PHYDM_API},
- {"trxpath", PHYDM_TRX_PATH},
- {"lamode", PHYDM_LA_MODE},
- {"dumpreg", PHYDM_DUMP_REG},
- {"mu", PHYDM_MU_MIMO},
- {"hang", PHYDM_HANG},
- {"bigjump", PHYDM_BIG_JUMP},
- {"rxrate", PHYDM_SHOW_RXRATE},
- {"nbi", PHYDM_NBI_EN},
- {"csi_mask", PHYDM_CSI_MASK_EN},
- {"dfs", PHYDM_DFS},
- {"iqk", PHYDM_IQK},
- {"nhm", PHYDM_NHM},
- {"clm", PHYDM_CLM},
- {"bbinfo", PHYDM_BB_INFO},
- {"txbf", PHYDM_TXBF},
- {"pause_dig", PHYDM_PAUSE_DIG_EN},
- {"h2c", PHYDM_H2C},
- {"ant_switch", PHYDM_ANT_SWITCH},
- {"drp", PHYDM_DYNAMIC_RA_PATH},
- {"psd", PHYDM_PSD},
- {"dbgport", PHYDM_DEBUG_PORT},
-};
-
-void phydm_cmd_parser(struct phy_dm_struct *dm, char input[][MAX_ARGV],
- u32 input_num, u8 flag, char *output, u32 out_len)
-{
- u32 used = 0;
- u8 id = 0;
- int var1[10] = {0};
- int i, input_idx = 0, phydm_ary_size;
- char help[] = "-h";
-
- bool is_enable_dbg_mode;
- u8 central_ch, primary_ch_idx, bandwidth;
-
- if (flag == 0) {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "GET, nothing to print\n");
- return;
- }
-
- PHYDM_SNPRINTF(output + used, out_len - used, "\n");
-
- /* Parsing Cmd ID */
- if (input_num) {
- phydm_ary_size = ARRAY_SIZE(phy_dm_ary);
- for (i = 0; i < phydm_ary_size; i++) {
- if (strcmp(phy_dm_ary[i].name, input[0]) == 0) {
- id = phy_dm_ary[i].id;
- break;
- }
- }
- if (i == phydm_ary_size) {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "SET, command not found!\n");
- return;
- }
- }
-
- switch (id) {
- case PHYDM_HELP: {
- PHYDM_SNPRINTF(output + used, out_len - used, "BB cmd ==>\n");
- for (i = 0; i < phydm_ary_size - 2; i++) {
- PHYDM_SNPRINTF(output + used, out_len - used,
- " %-5d: %s\n", i,
- phy_dm_ary[i + 2].name);
- /**/
- }
- } break;
-
- case PHYDM_DEMO: { /*echo demo 10 0x3a z abcde >cmd*/
- u32 directory = 0;
-
- char char_temp;
-
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &directory);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Decimal value = %d\n", directory);
- PHYDM_SSCANF(input[2], DCMD_HEX, &directory);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Hex value = 0x%x\n", directory);
- PHYDM_SSCANF(input[3], DCMD_CHAR, &char_temp);
- PHYDM_SNPRINTF(output + used, out_len - used, "Char = %c\n",
- char_temp);
- PHYDM_SNPRINTF(output + used, out_len - used, "String = %s\n",
- input[4]);
- } break;
-
- case PHYDM_RA:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
-
- input_idx++;
- }
- }
-
- if (input_idx >= 1) {
- phydm_RA_debug_PCR(dm, (u32 *)var1, &used, output,
- &out_len);
- }
-
- break;
-
- case PHYDM_ANTDIV:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_HEX, &var1[i]);
-
- input_idx++;
- }
- }
-
- break;
-
- case PHYDM_PATHDIV:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_HEX, &var1[i]);
-
- input_idx++;
- }
- }
-
- break;
-
- case PHYDM_DEBUG:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
-
- input_idx++;
- }
- }
-
- if (input_idx >= 1) {
- phydm_debug_trace(dm, (u32 *)var1, &used, output,
- &out_len);
- }
-
- break;
-
- case PHYDM_FW_DEBUG:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
- input_idx++;
- }
- }
-
- if (input_idx >= 1)
- phydm_fw_debug_trace(dm, (u32 *)var1, &used, output,
- &out_len);
-
- break;
-
- case PHYDM_SUPPORT_ABILITY:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
-
- input_idx++;
- }
- }
-
- if (input_idx >= 1) {
- phydm_support_ability_debug(dm, (u32 *)var1, &used,
- output, &out_len);
- }
-
- break;
-
- case PHYDM_SMART_ANT:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_HEX, &var1[i]);
- input_idx++;
- }
- }
-
- break;
-
- case PHYDM_API:
- if (!(dm->support_ic_type &
- (ODM_RTL8822B | ODM_RTL8197F | ODM_RTL8821C))) {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "This IC doesn't support PHYDM API function\n");
- }
-
- for (i = 0; i < 4; i++) {
- if (input[i + 1])
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
- }
-
- is_enable_dbg_mode = (bool)var1[0];
- central_ch = (u8)var1[1];
- primary_ch_idx = (u8)var1[2];
- bandwidth = (enum odm_bw)var1[3];
-
- if (is_enable_dbg_mode) {
- dm->is_disable_phy_api = false;
- phydm_api_switch_bw_channel(dm, central_ch,
- primary_ch_idx,
- (enum odm_bw)bandwidth);
- dm->is_disable_phy_api = true;
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "central_ch = %d, primary_ch_idx = %d, bandwidth = %d\n",
- central_ch, primary_ch_idx, bandwidth);
- } else {
- dm->is_disable_phy_api = false;
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Disable API debug mode\n");
- }
- break;
-
- case PHYDM_PROFILE: /*echo profile, >cmd*/
- phydm_basic_profile(dm, &used, output, &out_len);
- break;
-
- case PHYDM_GET_TXAGC:
- phydm_get_txagc(dm, &used, output, &out_len);
- break;
-
- case PHYDM_SET_TXAGC: {
- bool is_enable_dbg_mode;
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_HEX, &var1[i]);
- input_idx++;
- }
- }
-
- if ((strcmp(input[1], help) == 0)) {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "{En} {pathA~D(0~3)} {rate_idx(Hex), All_rate:0xff} {txagc_idx (Hex)}\n");
- /**/
-
- } else {
- is_enable_dbg_mode = (bool)var1[0];
- if (is_enable_dbg_mode) {
- dm->is_disable_phy_api = false;
- phydm_set_txagc(dm, (u32 *)var1, &used, output,
- &out_len);
- dm->is_disable_phy_api = true;
- } else {
- dm->is_disable_phy_api = false;
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Disable API debug mode\n");
- }
- }
- } break;
-
- case PHYDM_TRX_PATH:
-
- for (i = 0; i < 4; i++) {
- if (input[i + 1])
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
- }
- if (dm->support_ic_type & (ODM_RTL8822B | ODM_RTL8197F)) {
- u8 tx_path, rx_path;
- bool is_enable_dbg_mode, is_tx2_path;
-
- is_enable_dbg_mode = (bool)var1[0];
- tx_path = (u8)var1[1];
- rx_path = (u8)var1[2];
- is_tx2_path = (bool)var1[3];
-
- if (is_enable_dbg_mode) {
- dm->is_disable_phy_api = false;
- phydm_api_trx_mode(
- dm, (enum odm_rf_path)tx_path,
- (enum odm_rf_path)rx_path, is_tx2_path);
- dm->is_disable_phy_api = true;
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "tx_path = 0x%x, rx_path = 0x%x, is_tx2_path = %d\n",
- tx_path, rx_path, is_tx2_path);
- } else {
- dm->is_disable_phy_api = false;
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Disable API debug mode\n");
- }
- } else {
- phydm_config_trx_path(dm, (u32 *)var1, &used, output,
- &out_len);
- }
- break;
-
- case PHYDM_LA_MODE:
-
- dm->support_ability &= ~(ODM_BB_FA_CNT);
- phydm_lamode_trigger_setting(dm, &input[0], &used, output,
- &out_len, input_num);
- dm->support_ability |= ODM_BB_FA_CNT;
-
- break;
-
- case PHYDM_DUMP_REG: {
- u8 type = 0;
-
- if (input[1]) {
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &var1[0]);
- type = (u8)var1[0];
- }
-
- if (type == 0)
- phydm_dump_bb_reg(dm, &used, output, &out_len);
- else if (type == 1)
- phydm_dump_all_reg(dm, &used, output, &out_len);
- } break;
-
- case PHYDM_MU_MIMO:
-
- if (input[1])
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &var1[0]);
- else
- var1[0] = 0;
-
- if (var1[0] == 1) {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Get MU BFee CSI\n");
- odm_set_bb_reg(dm, 0x9e8, BIT(17) | BIT(16),
- 2); /*Read BFee*/
- odm_set_bb_reg(dm, 0x1910, BIT(15),
- 1); /*Select BFee's CSI report*/
- odm_set_bb_reg(dm, 0x19b8, BIT(6),
- 1); /*set as CSI report*/
- odm_set_bb_reg(dm, 0x19a8, 0xFFFF,
- 0xFFFF); /*disable gated_clk*/
- phydm_print_csi(dm, used, out_len, output);
-
- } else if (var1[0] == 2) {
- PHYDM_SSCANF(input[2], DCMD_DECIMAL, &var1[1]);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Get MU BFer's STA%d CSI\n", var1[1]);
- odm_set_bb_reg(dm, 0x9e8, BIT(24), 0); /*Read BFer*/
- odm_set_bb_reg(dm, 0x9e8, BIT(25),
- 1); /*enable Read/Write RAM*/
- odm_set_bb_reg(dm, 0x9e8, BIT(30) | BIT(29) | BIT(28),
- var1[1]); /*read which STA's CSI report*/
- odm_set_bb_reg(dm, 0x1910, BIT(15),
- 0); /*select BFer's CSI*/
- odm_set_bb_reg(dm, 0x19e0, 0x00003FC0,
- 0xFF); /*disable gated_clk*/
- phydm_print_csi(dm, used, out_len, output);
- }
- break;
-
- case PHYDM_BIG_JUMP: {
- if (input[1]) {
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &var1[0]);
- phydm_enable_big_jump(dm, (bool)(var1[0]));
- } else {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "unknown command!\n");
- }
- break;
- }
-
- case PHYDM_HANG:
- phydm_bb_rx_hang_info(dm, &used, output, &out_len);
- break;
-
- case PHYDM_SHOW_RXRATE: {
- u8 rate_idx;
-
- if (input[1])
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &var1[0]);
-
- if (var1[0] == 1) {
- phydm_show_rx_rate(dm, &used, output, &out_len);
- } else {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Reset Rx rate counter\n");
-
- for (rate_idx = 0; rate_idx < 40; rate_idx++) {
- dm->phy_dbg_info.num_qry_vht_pkt[rate_idx] = 0;
- dm->phy_dbg_info.num_qry_mu_vht_pkt[rate_idx] =
- 0;
- }
- }
- } break;
-
- case PHYDM_NBI_EN:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
- input_idx++;
- }
- }
-
- if (input_idx >= 1) {
- phydm_api_debug(dm, PHYDM_API_NBI, (u32 *)var1, &used,
- output, &out_len);
- /**/
- }
-
- break;
-
- case PHYDM_CSI_MASK_EN:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
- input_idx++;
- }
- }
-
- if (input_idx >= 1) {
- phydm_api_debug(dm, PHYDM_API_CSI_MASK, (u32 *)var1,
- &used, output, &out_len);
- /**/
- }
-
- break;
-
- case PHYDM_DFS:
- break;
-
- case PHYDM_IQK:
- break;
-
- case PHYDM_NHM: {
- u8 target_rssi;
- u16 nhm_period = 0xC350; /* 200ms */
- u8 IGI;
- struct ccx_info *ccx_info = &dm->dm_ccx_info;
-
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &var1[0]);
-
- if (input_num == 1) {
- ccx_info->echo_NHM_en = false;
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n Trigger NHM: echo nhm 1\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r (Exclude CCA)\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r Trigger NHM: echo nhm 2\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r (Include CCA)\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r Get NHM results: echo nhm 3\n");
-
- return;
- }
-
- /* NMH trigger */
- if (var1[0] <= 2 && var1[0] != 0) {
- ccx_info->echo_NHM_en = true;
- ccx_info->echo_IGI =
- (u8)odm_get_bb_reg(dm, 0xC50, MASKBYTE0);
-
- target_rssi = ccx_info->echo_IGI - 10;
-
- ccx_info->NHM_th[0] = (target_rssi - 15 + 10) * 2;
-
- for (i = 1; i <= 10; i++)
- ccx_info->NHM_th[i] =
- ccx_info->NHM_th[0] + 6 * i;
-
- /* 4 1. store previous NHM setting */
- phydm_nhm_setting(dm, STORE_NHM_SETTING);
-
- /* 4 2. Set NHM period, 0x990[31:16]=0xC350,
- * Time duration for NHM unit: 4us, 0xC350=200ms
- */
- ccx_info->NHM_period = nhm_period;
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n Monitor NHM for %d us",
- nhm_period * 4);
-
- /* 4 3. Set NHM inexclude_txon, inexclude_cca, ccx_en */
-
- ccx_info->nhm_inexclude_cca = (var1[0] == 1) ?
- NHM_EXCLUDE_CCA :
- NHM_INCLUDE_CCA;
- ccx_info->nhm_inexclude_txon = NHM_EXCLUDE_TXON;
-
- phydm_nhm_setting(dm, SET_NHM_SETTING);
- phydm_print_nhm_trigger(output, used, out_len,
- ccx_info);
-
- /* 4 4. Trigger NHM */
- phydm_nhm_trigger(dm);
- }
-
- /*Get NHM results*/
- else if (var1[0] == 3) {
- IGI = (u8)odm_get_bb_reg(dm, 0xC50, MASKBYTE0);
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n Cur_IGI = 0x%x", IGI);
-
- phydm_get_nhm_result(dm);
-
- /* 4 Resotre NHM setting */
- phydm_nhm_setting(dm, RESTORE_NHM_SETTING);
- phydm_print_nhm_result(output, used, out_len, ccx_info);
-
- ccx_info->echo_NHM_en = false;
- } else {
- ccx_info->echo_NHM_en = false;
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n Trigger NHM: echo nhm 1\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r (Exclude CCA)\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r Trigger NHM: echo nhm 2\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r (Include CCA)\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r Get NHM results: echo nhm 3\n");
-
- return;
- }
- } break;
-
- case PHYDM_CLM: {
- struct ccx_info *ccx_info = &dm->dm_ccx_info;
-
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &var1[0]);
-
- if (input_num == 1) {
- ccx_info->echo_CLM_en = false;
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n Trigger CLM: echo clm 1\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r Get CLM results: echo clm 2\n");
- return;
- }
-
- /* Set & trigger CLM */
- if (var1[0] == 1) {
- ccx_info->echo_CLM_en = true;
- ccx_info->CLM_period = 0xC350; /*100ms*/
- phydm_clm_setting(dm);
- phydm_clm_trigger(dm);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n Monitor CLM for 200ms\n");
- }
-
- /* Get CLM results */
- else if (var1[0] == 2) {
- ccx_info->echo_CLM_en = false;
- phydm_get_cl_mresult(dm);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n CLM_result = %d us\n",
- ccx_info->CLM_result * 4);
-
- } else {
- ccx_info->echo_CLM_en = false;
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\n\r Error command !\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r Trigger CLM: echo clm 1\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r Get CLM results: echo clm 2\n");
- }
- } break;
-
- case PHYDM_BB_INFO: {
- s32 value32 = 0;
-
- phydm_bb_debug_info(dm, &used, output, &out_len);
-
- if (dm->support_ic_type & ODM_RTL8822B && input[1]) {
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &var1[0]);
- odm_set_bb_reg(dm, 0x1988, 0x003fff00, var1[0]);
- value32 = odm_get_bb_reg(dm, 0xf84, MASKDWORD);
- value32 = (value32 & 0xff000000) >> 24;
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "\r\n %-35s = condition num = %d, subcarriers = %d\n",
- "Over condition num subcarrier", var1[0],
- value32);
- odm_set_bb_reg(dm, 0x1988, BIT(22),
- 0x0); /*disable report condition number*/
- }
- } break;
-
- case PHYDM_TXBF: {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "\r\n no TxBF !!\n");
- } break;
-
- case PHYDM_PAUSE_DIG_EN:
-
- for (i = 0; i < 5; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_HEX, &var1[i]);
- input_idx++;
- }
- }
-
- if (input_idx >= 1) {
- if (var1[0] == 0) {
- odm_pause_dig(dm, PHYDM_PAUSE,
- PHYDM_PAUSE_LEVEL_7, (u8)var1[1]);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Set IGI_value = ((%x))\n",
- var1[1]);
- } else if (var1[0] == 1) {
- odm_pause_dig(dm, PHYDM_RESUME,
- PHYDM_PAUSE_LEVEL_7, (u8)var1[1]);
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Resume IGI_value\n");
- } else {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "echo (1:pause, 2resume) (IGI_value)\n");
- }
- }
- break;
- case PHYDM_H2C:
-
- for (i = 0; i < 8; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_HEX, &var1[i]);
- input_idx++;
- }
- }
-
- if (input_idx >= 1)
- phydm_h2C_debug(dm, (u32 *)var1, &used, output,
- &out_len);
-
- break;
-
- case PHYDM_ANT_SWITCH:
-
- for (i = 0; i < 8; i++) {
- if (input[i + 1]) {
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
- input_idx++;
- }
- }
-
- if (input_idx >= 1) {
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Not Support IC");
- }
-
- break;
-
- case PHYDM_DYNAMIC_RA_PATH:
-
- PHYDM_SNPRINTF(output + used, out_len - used, "Not Support IC");
-
- break;
-
- case PHYDM_PSD:
-
- phydm_psd_debug(dm, &input[0], &used, output, &out_len,
- input_num);
-
- break;
-
- case PHYDM_DEBUG_PORT: {
- u32 dbg_port_value;
-
- PHYDM_SSCANF(input[1], DCMD_HEX, &var1[0]);
-
- if (phydm_set_bb_dbg_port(dm, BB_DBGPORT_PRIORITY_3,
- var1[0])) { /*set debug port to 0x0*/
-
- dbg_port_value = phydm_get_bb_dbg_port_value(dm);
- phydm_release_bb_dbg_port(dm);
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Debug Port[0x%x] = ((0x%x))\n", var1[1],
- dbg_port_value);
- }
- } break;
-
- default:
- PHYDM_SNPRINTF(output + used, out_len - used,
- "SET, unknown command!\n");
- break;
- }
-}
-
-s32 phydm_cmd(struct phy_dm_struct *dm, char *input, u32 in_len, u8 flag,
- char *output, u32 out_len)
-{
- char *token;
- u32 argc = 0;
- char argv[MAX_ARGC][MAX_ARGV];
-
- do {
- token = strsep(&input, ", ");
- if (token) {
- strcpy(argv[argc], token);
- argc++;
- } else {
- break;
- }
- } while (argc < MAX_ARGC);
-
- if (argc == 1)
- argv[0][strlen(argv[0]) - 1] = '\0';
-
- phydm_cmd_parser(dm, argv, argc, flag, output, out_len);
-
- return 0;
-}
-
-void phydm_fw_trace_handler(void *dm_void, u8 *cmd_buf, u8 cmd_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- /*u8 debug_trace_11byte[60];*/
- u8 freg_num, c2h_seq, buf_0 = 0;
-
- if (!(dm->support_ic_type & PHYDM_IC_3081_SERIES))
- return;
-
- if (cmd_len > 12)
- return;
-
- buf_0 = cmd_buf[0];
- freg_num = (buf_0 & 0xf);
- c2h_seq = (buf_0 & 0xf0) >> 4;
-
- if (c2h_seq != dm->pre_c2h_seq && !dm->fw_buff_is_enpty) {
- dm->fw_debug_trace[dm->c2h_cmd_start] = '\0';
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "[FW Dbg Queue Overflow] %s\n",
- dm->fw_debug_trace);
- dm->c2h_cmd_start = 0;
- }
-
- if ((cmd_len - 1) > (60 - dm->c2h_cmd_start)) {
- dm->fw_debug_trace[dm->c2h_cmd_start] = '\0';
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "[FW Dbg Queue error: wrong C2H length] %s\n",
- dm->fw_debug_trace);
- dm->c2h_cmd_start = 0;
- return;
- }
-
- strncpy((char *)&dm->fw_debug_trace[dm->c2h_cmd_start],
- (char *)&cmd_buf[1], (cmd_len - 1));
- dm->c2h_cmd_start += (cmd_len - 1);
- dm->fw_buff_is_enpty = false;
-
- if (freg_num == 0 || dm->c2h_cmd_start >= 60) {
- if (dm->c2h_cmd_start < 60)
- dm->fw_debug_trace[dm->c2h_cmd_start] = '\0';
- else
- dm->fw_debug_trace[59] = '\0';
-
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "[FW DBG Msg] %s\n",
- dm->fw_debug_trace);
- /*dbg_print("[FW DBG Msg] %s\n", dm->fw_debug_trace);*/
- dm->c2h_cmd_start = 0;
- dm->fw_buff_is_enpty = true;
- }
-
- dm->pre_c2h_seq = c2h_seq;
-}
-
-void phydm_fw_trace_handler_code(void *dm_void, u8 *buffer, u8 cmd_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 function = buffer[0];
- u8 dbg_num = buffer[1];
- u16 content_0 = (((u16)buffer[3]) << 8) | ((u16)buffer[2]);
- u16 content_1 = (((u16)buffer[5]) << 8) | ((u16)buffer[4]);
- u16 content_2 = (((u16)buffer[7]) << 8) | ((u16)buffer[6]);
- u16 content_3 = (((u16)buffer[9]) << 8) | ((u16)buffer[8]);
- u16 content_4 = (((u16)buffer[11]) << 8) | ((u16)buffer[10]);
-
- if (cmd_len > 12)
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "[FW Msg] Invalid cmd length (( %d )) >12\n",
- cmd_len);
-
- /*--------------------------------------------*/
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "[FW][general][%d, %d, %d] = {%d, %d, %d, %d}\n", function,
- dbg_num, content_0, content_1, content_2, content_3,
- content_4);
- /*--------------------------------------------*/
-}
-
-void phydm_fw_trace_handler_8051(void *dm_void, u8 *buffer, u8 cmd_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- int i = 0;
- u8 extend_c2h_sub_id = 0, extend_c2h_dbg_len = 0,
- extend_c2h_dbg_seq = 0;
- u8 fw_debug_trace[128];
- u8 *extend_c2h_dbg_content = NULL;
-
- if (cmd_len > 127)
- return;
-
- extend_c2h_sub_id = buffer[0];
- extend_c2h_dbg_len = buffer[1];
- extend_c2h_dbg_content = buffer + 2; /*DbgSeq+DbgContent for show HEX*/
-
-go_backfor_aggre_dbg_pkt:
- i = 0;
- extend_c2h_dbg_seq = buffer[2];
- extend_c2h_dbg_content = buffer + 3;
-
- for (;; i++) {
- fw_debug_trace[i] = extend_c2h_dbg_content[i];
- if (extend_c2h_dbg_content[i + 1] == '\0') {
- fw_debug_trace[i + 1] = '\0';
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "[FW DBG Msg] %s",
- &fw_debug_trace[0]);
- break;
- } else if (extend_c2h_dbg_content[i] == '\n') {
- fw_debug_trace[i + 1] = '\0';
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "[FW DBG Msg] %s",
- &fw_debug_trace[0]);
- buffer = extend_c2h_dbg_content + i + 3;
- goto go_backfor_aggre_dbg_pkt;
- }
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_debug.h b/drivers/staging/rtlwifi/phydm/phydm_debug.h
deleted file mode 100644
index 1010bf61ca3c..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_debug.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __ODM_DBG_H__
-#define __ODM_DBG_H__
-
-/*#define DEBUG_VERSION "1.1"*/ /*2015.07.29 YuChen*/
-/*#define DEBUG_VERSION "1.2"*/ /*2015.08.28 Dino*/
-#define DEBUG_VERSION "1.3" /*2016.04.28 YuChen*/
-#define ODM_DBG_TRACE 5
-
-/*FW DBG MSG*/
-#define RATE_DECISION BIT(0)
-#define INIT_RA_TABLE BIT(1)
-#define RATE_UP BIT(2)
-#define RATE_DOWN BIT(3)
-#define TRY_DONE BIT(4)
-#define RA_H2C BIT(5)
-#define F_RATE_AP_RPT BIT(7)
-
-/* -----------------------------------------------------------------------------
- * Define the tracing components
- *
- * -----------------------------------------------------------------------------
- */
-/*BB FW Functions*/
-#define PHYDM_FW_COMP_RA BIT(0)
-#define PHYDM_FW_COMP_MU BIT(1)
-#define PHYDM_FW_COMP_PATH_DIV BIT(2)
-#define PHYDM_FW_COMP_PHY_CONFIG BIT(3)
-
-/*BB Driver Functions*/
-#define ODM_COMP_DIG BIT(0)
-#define ODM_COMP_RA_MASK BIT(1)
-#define ODM_COMP_DYNAMIC_TXPWR BIT(2)
-#define ODM_COMP_FA_CNT BIT(3)
-#define ODM_COMP_RSSI_MONITOR BIT(4)
-#define ODM_COMP_SNIFFER BIT(5)
-#define ODM_COMP_ANT_DIV BIT(6)
-#define ODM_COMP_DFS BIT(7)
-#define ODM_COMP_NOISY_DETECT BIT(8)
-#define ODM_COMP_RATE_ADAPTIVE BIT(9)
-#define ODM_COMP_PATH_DIV BIT(10)
-#define ODM_COMP_CCX BIT(11)
-
-#define ODM_COMP_DYNAMIC_PRICCA BIT(12)
-/*BIT13 TBD*/
-#define ODM_COMP_MP BIT(14)
-#define ODM_COMP_CFO_TRACKING BIT(15)
-#define ODM_COMP_ACS BIT(16)
-#define PHYDM_COMP_ADAPTIVITY BIT(17)
-#define PHYDM_COMP_RA_DBG BIT(18)
-#define PHYDM_COMP_TXBF BIT(19)
-/* MAC Functions */
-#define ODM_COMP_EDCA_TURBO BIT(20)
-#define ODM_COMP_DYNAMIC_RX_PATH BIT(21)
-#define ODM_FW_DEBUG_TRACE BIT(22)
-/* RF Functions */
-/*BIT23 TBD*/
-#define ODM_COMP_TX_PWR_TRACK BIT(24)
-/*BIT25 TBD*/
-#define ODM_COMP_CALIBRATION BIT(26)
-/* Common Functions */
-/*BIT27 TBD*/
-#define ODM_PHY_CONFIG BIT(28)
-#define ODM_COMP_INIT BIT(29)
-#define ODM_COMP_COMMON BIT(30)
-#define ODM_COMP_API BIT(31)
-
-#define ODM_COMP_UNCOND 0xFFFFFFFF
-
-/*------------------------Export Marco Definition---------------------------*/
-
-#define config_phydm_read_txagc_check(data) (data != INVALID_TXAGC_DATA)
-
-#define ODM_RT_TRACE(dm, comp, fmt, ...) \
- do { \
- if (((comp) & dm->debug_components) || \
- ((comp) == ODM_COMP_UNCOND)) \
- RT_TRACE(dm->adapter, COMP_PHYDM, DBG_DMESG, fmt, \
- ##__VA_ARGS__); \
- } while (0)
-
-#define BB_DBGPORT_PRIORITY_3 3 /*Debug function (the highest priority)*/
-#define BB_DBGPORT_PRIORITY_2 2 /*Check hang function & Strong function*/
-#define BB_DBGPORT_PRIORITY_1 1 /*Watch dog function*/
-#define BB_DBGPORT_RELEASE 0 /*Init value (the lowest priority)*/
-
-void phydm_init_debug_setting(struct phy_dm_struct *dm);
-
-u8 phydm_set_bb_dbg_port(void *dm_void, u8 curr_dbg_priority, u32 debug_port);
-
-void phydm_release_bb_dbg_port(void *dm_void);
-
-u32 phydm_get_bb_dbg_port_value(void *dm_void);
-
-void phydm_basic_dbg_message(void *dm_void);
-
-#define PHYDM_DBGPRINT 0
-#define MAX_ARGC 20
-#define MAX_ARGV 16
-#define DCMD_DECIMAL "%d"
-#define DCMD_CHAR "%c"
-#define DCMD_HEX "%x"
-
-#define PHYDM_SSCANF(x, y, z) \
- do { \
- if (sscanf(x, y, z) != 1) \
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, \
- "%s:%d sscanf fail!", __func__, \
- __LINE__); \
- } while (0)
-
-#define PHYDM_VAST_INFO_SNPRINTF(msg, ...) \
- do { \
- snprintf(msg, ##__VA_ARGS__); \
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, output); \
- } while (0)
-
-#if (PHYDM_DBGPRINT == 1)
-#define PHYDM_SNPRINTF(msg, ...) \
- do { \
- snprintf(msg, ##__VA_ARGS__); \
- ODM_RT_TRACE(dm, ODM_COMP_UNCOND, output); \
- } while (0)
-#else
-#define PHYDM_SNPRINTF(msg, ...) \
- do { \
- if (out_len > used) \
- used += snprintf(msg, ##__VA_ARGS__); \
- } while (0)
-#endif
-
-void phydm_basic_profile(void *dm_void, u32 *_used, char *output,
- u32 *_out_len);
-s32 phydm_cmd(struct phy_dm_struct *dm, char *input, u32 in_len, u8 flag,
- char *output, u32 out_len);
-void phydm_cmd_parser(struct phy_dm_struct *dm, char input[][16], u32 input_num,
- u8 flag, char *output, u32 out_len);
-
-bool phydm_api_trx_mode(struct phy_dm_struct *dm, enum odm_rf_path tx_path,
- enum odm_rf_path rx_path, bool is_tx2_path);
-
-void phydm_fw_trace_en_h2c(void *dm_void, bool enable, u32 fw_debug_component,
- u32 monitor_mode, u32 macid);
-
-void phydm_fw_trace_handler(void *dm_void, u8 *cmd_buf, u8 cmd_len);
-
-void phydm_fw_trace_handler_code(void *dm_void, u8 *buffer, u8 cmd_len);
-
-void phydm_fw_trace_handler_8051(void *dm_void, u8 *cmd_buf, u8 cmd_len);
-
-#endif /* __ODM_DBG_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dfs.h b/drivers/staging/rtlwifi/phydm/phydm_dfs.h
deleted file mode 100644
index c0358253d79a..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_dfs.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDM_DFS_H__
-#define __PHYDM_DFS_H__
-
-#define DFS_VERSION "0.0"
-
-/* ============================================================
- * Definition
- * ============================================================
- */
-
-/* ============================================================
- * 1 structure
- * ============================================================
- */
-
-/* ============================================================
- * enumeration
- * ============================================================
- */
-
-enum phydm_dfs_region_domain {
- PHYDM_DFS_DOMAIN_UNKNOWN = 0,
- PHYDM_DFS_DOMAIN_FCC = 1,
- PHYDM_DFS_DOMAIN_MKK = 2,
- PHYDM_DFS_DOMAIN_ETSI = 3,
-};
-
-/* ============================================================
- * function prototype
- * ============================================================
- */
-#define phydm_dfs_master_enabled(dm) false
-
-#endif /*#ifndef __PHYDM_DFS_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dig.c b/drivers/staging/rtlwifi/phydm/phydm_dig.c
deleted file mode 100644
index 99c805cc380b..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_dig.c
+++ /dev/null
@@ -1,1521 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-static int get_igi_for_diff(int);
-
-static inline void phydm_check_ap_write_dig(struct phy_dm_struct *dm,
- u8 current_igi)
-{
- switch (*dm->one_path_cca) {
- case ODM_CCA_2R:
- odm_set_bb_reg(dm, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm),
- current_igi);
-
- if (dm->rf_type > ODM_1T1R)
- odm_set_bb_reg(dm, ODM_REG(IGI_B, dm), ODM_BIT(IGI, dm),
- current_igi);
- break;
- case ODM_CCA_1R_A:
- odm_set_bb_reg(dm, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm),
- current_igi);
- if (dm->rf_type != ODM_1T1R)
- odm_set_bb_reg(dm, ODM_REG(IGI_B, dm), ODM_BIT(IGI, dm),
- get_igi_for_diff(current_igi));
- break;
- case ODM_CCA_1R_B:
- odm_set_bb_reg(dm, ODM_REG(IGI_B, dm), ODM_BIT(IGI, dm),
- get_igi_for_diff(current_igi));
- if (dm->rf_type != ODM_1T1R)
- odm_set_bb_reg(dm, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm),
- current_igi);
- break;
- }
-}
-
-static inline u8 phydm_get_current_igi(u8 dig_max_of_min, u8 rssi_min,
- u8 current_igi)
-{
- if (rssi_min < dig_max_of_min) {
- if (current_igi < rssi_min)
- return rssi_min;
- } else {
- if (current_igi < dig_max_of_min)
- return dig_max_of_min;
- }
- return current_igi;
-}
-
-void odm_change_dynamic_init_gain_thresh(void *dm_void, u32 dm_type,
- u32 dm_value)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
-
- if (dm_type == DIG_TYPE_THRESH_HIGH) {
- dig_tab->rssi_high_thresh = dm_value;
- } else if (dm_type == DIG_TYPE_THRESH_LOW) {
- dig_tab->rssi_low_thresh = dm_value;
- } else if (dm_type == DIG_TYPE_ENABLE) {
- dig_tab->dig_enable_flag = true;
- } else if (dm_type == DIG_TYPE_DISABLE) {
- dig_tab->dig_enable_flag = false;
- } else if (dm_type == DIG_TYPE_BACKOFF) {
- if (dm_value > 30)
- dm_value = 30;
- dig_tab->backoff_val = (u8)dm_value;
- } else if (dm_type == DIG_TYPE_RX_GAIN_MIN) {
- if (dm_value == 0)
- dm_value = 0x1;
- dig_tab->rx_gain_range_min = (u8)dm_value;
- } else if (dm_type == DIG_TYPE_RX_GAIN_MAX) {
- if (dm_value > 0x50)
- dm_value = 0x50;
- dig_tab->rx_gain_range_max = (u8)dm_value;
- }
-} /* dm_change_dynamic_init_gain_thresh */
-
-static int get_igi_for_diff(int value_IGI)
-{
-#define ONERCCA_LOW_TH 0x30
-#define ONERCCA_LOW_DIFF 8
-
- if (value_IGI < ONERCCA_LOW_TH) {
- if ((ONERCCA_LOW_TH - value_IGI) < ONERCCA_LOW_DIFF)
- return ONERCCA_LOW_TH;
- else
- return value_IGI + ONERCCA_LOW_DIFF;
- }
-
- return value_IGI;
-}
-
-static void odm_fa_threshold_check(void *dm_void, bool is_dfs_band,
- bool is_performance, u32 rx_tp, u32 tx_tp,
- u32 *dm_FA_thres)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->is_linked && (is_performance || is_dfs_band)) {
- /*For NIC*/
- dm_FA_thres[0] = DM_DIG_FA_TH0;
- dm_FA_thres[1] = DM_DIG_FA_TH1;
- dm_FA_thres[2] = DM_DIG_FA_TH2;
- } else {
- if (is_dfs_band) {
- /* For DFS band and no link */
- dm_FA_thres[0] = 250;
- dm_FA_thres[1] = 1000;
- dm_FA_thres[2] = 2000;
- } else {
- dm_FA_thres[0] = 2000;
- dm_FA_thres[1] = 4000;
- dm_FA_thres[2] = 5000;
- }
- }
-}
-
-static u8 odm_forbidden_igi_check(void *dm_void, u8 dig_dynamic_min,
- u8 current_igi)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- struct false_alarm_stat *fa_cnt =
- (struct false_alarm_stat *)phydm_get_structure(
- dm, PHYDM_FALSEALMCNT);
- u8 rx_gain_range_min = dig_tab->rx_gain_range_min;
-
- if (dig_tab->large_fa_timeout) {
- if (--dig_tab->large_fa_timeout == 0)
- dig_tab->large_fa_hit = 0;
- }
-
- if (fa_cnt->cnt_all > 10000) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Abnormally false alarm case.\n", __func__);
-
- if (dig_tab->large_fa_hit != 3)
- dig_tab->large_fa_hit++;
-
- if (dig_tab->forbidden_igi < current_igi) {
- dig_tab->forbidden_igi = current_igi;
- dig_tab->large_fa_hit = 1;
- dig_tab->large_fa_timeout = LARGE_FA_TIMEOUT;
- }
-
- if (dig_tab->large_fa_hit >= 3) {
- if ((dig_tab->forbidden_igi + 2) >
- dig_tab->rx_gain_range_max)
- rx_gain_range_min = dig_tab->rx_gain_range_max;
- else
- rx_gain_range_min =
- (dig_tab->forbidden_igi + 2);
- dig_tab->recover_cnt = 1800;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): Abnormally false alarm case: recover_cnt = %d\n",
- __func__, dig_tab->recover_cnt);
- }
- }
-
- else if (fa_cnt->cnt_all > 2000) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "Abnormally false alarm case.\n");
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "cnt_all=%d, cnt_all_pre=%d, current_igi=0x%x, pre_ig_value=0x%x\n",
- fa_cnt->cnt_all, fa_cnt->cnt_all_pre, current_igi,
- dig_tab->pre_ig_value);
-
- /* fa_cnt->cnt_all = 1.1875*fa_cnt->cnt_all_pre */
- if ((fa_cnt->cnt_all >
- (fa_cnt->cnt_all_pre + (fa_cnt->cnt_all_pre >> 3) +
- (fa_cnt->cnt_all_pre >> 4))) &&
- current_igi < dig_tab->pre_ig_value) {
- if (dig_tab->large_fa_hit != 3)
- dig_tab->large_fa_hit++;
-
- if (dig_tab->forbidden_igi < current_igi) {
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "Updating forbidden_igi by current_igi, forbidden_igi=0x%x, current_igi=0x%x\n",
- dig_tab->forbidden_igi, current_igi);
-
- dig_tab->forbidden_igi = current_igi;
- dig_tab->large_fa_hit = 1;
- dig_tab->large_fa_timeout = LARGE_FA_TIMEOUT;
- }
- }
-
- if (dig_tab->large_fa_hit >= 3) {
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "FaHit is greater than 3, rx_gain_range_max=0x%x, rx_gain_range_min=0x%x, forbidden_igi=0x%x\n",
- dig_tab->rx_gain_range_max, rx_gain_range_min,
- dig_tab->forbidden_igi);
-
- if ((dig_tab->forbidden_igi + 1) >
- dig_tab->rx_gain_range_max)
- rx_gain_range_min = dig_tab->rx_gain_range_max;
- else
- rx_gain_range_min =
- (dig_tab->forbidden_igi + 1);
-
- dig_tab->recover_cnt = 1200;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "Abnormally false alarm case: recover_cnt = %d, rx_gain_range_min = 0x%x\n",
- dig_tab->recover_cnt, rx_gain_range_min);
- }
- } else {
- if (dig_tab->recover_cnt != 0) {
- dig_tab->recover_cnt--;
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Normal Case: recover_cnt = %d\n",
- __func__, dig_tab->recover_cnt);
- return rx_gain_range_min;
- }
-
- if (dig_tab->large_fa_hit >= 3) {
- dig_tab->large_fa_hit = 0;
- return rx_gain_range_min;
- }
-
- if ((dig_tab->forbidden_igi - 2) <
- dig_dynamic_min) { /* DM_DIG_MIN) */
- dig_tab->forbidden_igi =
- dig_dynamic_min; /* DM_DIG_MIN; */
- rx_gain_range_min = dig_dynamic_min; /* DM_DIG_MIN; */
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Normal Case: At Lower Bound\n",
- __func__);
- } else {
- if (dig_tab->large_fa_hit == 0) {
- dig_tab->forbidden_igi -= 2;
- rx_gain_range_min =
- (dig_tab->forbidden_igi + 2);
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): Normal Case: Approach Lower Bound\n",
- __func__);
- }
- }
- }
-
- return rx_gain_range_min;
-}
-
-static void phydm_set_big_jump_step(void *dm_void, u8 current_igi)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- u8 step1[8] = {24, 30, 40, 50, 60, 70, 80, 90};
- u8 i;
-
- if (dig_tab->enable_adjust_big_jump == 0)
- return;
-
- for (i = 0; i <= dig_tab->big_jump_step1; i++) {
- if ((current_igi + step1[i]) >
- dig_tab->big_jump_lmt[dig_tab->agc_table_idx]) {
- if (i != 0)
- i = i - 1;
- break;
- } else if (i == dig_tab->big_jump_step1) {
- break;
- }
- }
- if (dm->support_ic_type & ODM_RTL8822B)
- odm_set_bb_reg(dm, 0x8c8, 0xe, i);
- else if (dm->support_ic_type & ODM_RTL8197F)
- odm_set_bb_reg(dm, ODM_REG_BB_AGC_SET_2_11N, 0xe, i);
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): bigjump = %d (ori = 0x%x), LMT=0x%x\n", __func__, i,
- dig_tab->big_jump_step1,
- dig_tab->big_jump_lmt[dig_tab->agc_table_idx]);
-}
-
-void odm_write_dig(void *dm_void, u8 current_igi)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
-
- if (dig_tab->is_stop_dig) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): Stop Writing IGI\n",
- __func__);
- return;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): ODM_REG(IGI_A,dm)=0x%x, ODM_BIT(IGI,dm)=0x%x\n",
- __func__, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm));
-
- /* 1 Check initial gain by upper bound */
- if (!dig_tab->is_psd_in_progress && dm->is_linked) {
- if (current_igi > dig_tab->rx_gain_range_max) {
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): current_igi(0x%02x) is larger than upper bound !!\n",
- __func__, current_igi);
- current_igi = dig_tab->rx_gain_range_max;
- }
- if (dm->support_ability & ODM_BB_ADAPTIVITY &&
- dm->adaptivity_flag) {
- if (current_igi > dm->adaptivity_igi_upper)
- current_igi = dm->adaptivity_igi_upper;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): adaptivity case: Force upper bound to 0x%x !!!!!!\n",
- __func__, current_igi);
- }
- }
-
- if (dig_tab->cur_ig_value != current_igi) {
- /* Modify big jump step for 8822B and 8197F */
- if (dm->support_ic_type & (ODM_RTL8822B | ODM_RTL8197F))
- phydm_set_big_jump_step(dm, current_igi);
-
- /* Set IGI value of CCK for new CCK AGC */
- if (dm->cck_new_agc) {
- if (dm->support_ic_type & ODM_IC_PHY_STATUE_NEW_TYPE)
- odm_set_bb_reg(dm, 0xa0c, 0x00003f00,
- (current_igi >> 1));
- }
-
- /*Add by YuChen for USB IO too slow issue*/
- if ((dm->support_ability & ODM_BB_ADAPTIVITY) &&
- current_igi > dig_tab->cur_ig_value) {
- dig_tab->cur_ig_value = current_igi;
- phydm_adaptivity(dm);
- }
-
- /* 1 Set IGI value */
- if (dm->support_platform & (ODM_WIN | ODM_CE)) {
- odm_set_bb_reg(dm, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm),
- current_igi);
-
- if (dm->rf_type > ODM_1T1R)
- odm_set_bb_reg(dm, ODM_REG(IGI_B, dm),
- ODM_BIT(IGI, dm), current_igi);
-
- } else if (dm->support_platform & (ODM_AP)) {
- phydm_check_ap_write_dig(dm, current_igi);
- }
-
- dig_tab->cur_ig_value = current_igi;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): current_igi(0x%02x).\n", __func__,
- current_igi);
-}
-
-void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type,
- enum phydm_pause_level pause_level, u8 igi_value)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- s8 max_level;
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s()=========> level = %d\n", __func__,
- pause_level);
-
- if (dig_tab->pause_dig_level == 0 &&
- (!(dm->support_ability & ODM_BB_DIG) ||
- !(dm->support_ability & ODM_BB_FA_CNT))) {
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): Return: support_ability DIG or FA is disabled !!\n",
- __func__);
- return;
- }
-
- if (pause_level > DM_DIG_MAX_PAUSE_TYPE) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Return: Wrong pause level !!\n", __func__);
- return;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): pause level = 0x%x, Current value = 0x%x\n",
- __func__, dig_tab->pause_dig_level, igi_value);
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): pause value = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- __func__, dig_tab->pause_dig_value[7],
- dig_tab->pause_dig_value[6], dig_tab->pause_dig_value[5],
- dig_tab->pause_dig_value[4], dig_tab->pause_dig_value[3],
- dig_tab->pause_dig_value[2], dig_tab->pause_dig_value[1],
- dig_tab->pause_dig_value[0]);
-
- switch (pause_type) {
- /* Pause DIG */
- case PHYDM_PAUSE: {
- /* Disable DIG */
- odm_cmn_info_update(dm, ODM_CMNINFO_ABILITY,
- dm->support_ability & (~ODM_BB_DIG));
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): Pause DIG !!\n",
- __func__);
-
- /* Backup IGI value */
- if (dig_tab->pause_dig_level == 0) {
- dig_tab->igi_backup = dig_tab->cur_ig_value;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): Backup IGI = 0x%x, new IGI = 0x%x\n",
- __func__, dig_tab->igi_backup, igi_value);
- }
-
- /* Record IGI value */
- dig_tab->pause_dig_value[pause_level] = igi_value;
-
- /* Update pause level */
- dig_tab->pause_dig_level =
- (dig_tab->pause_dig_level | BIT(pause_level));
-
- /* Write new IGI value */
- if (BIT(pause_level + 1) > dig_tab->pause_dig_level) {
- odm_write_dig(dm, igi_value);
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): IGI of higher level = 0x%x\n",
- __func__, igi_value);
- }
- break;
- }
- /* Resume DIG */
- case PHYDM_RESUME: {
- /* check if the level is illegal or not */
- if ((dig_tab->pause_dig_level & (BIT(pause_level))) != 0) {
- dig_tab->pause_dig_level = dig_tab->pause_dig_level &
- (~(BIT(pause_level)));
- dig_tab->pause_dig_value[pause_level] = 0;
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): Resume DIG !!\n",
- __func__);
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Wrong resume level !!\n", __func__);
- break;
- }
-
- /* Resume DIG */
- if (dig_tab->pause_dig_level == 0) {
- /* Write backup IGI value */
- odm_write_dig(dm, dig_tab->igi_backup);
- dig_tab->is_ignore_dig = true;
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Write original IGI = 0x%x\n",
- __func__, dig_tab->igi_backup);
-
- /* Enable DIG */
- odm_cmn_info_update(dm, ODM_CMNINFO_ABILITY,
- dm->support_ability | ODM_BB_DIG);
- break;
- }
-
- if (BIT(pause_level) <= dig_tab->pause_dig_level)
- break;
-
- /* Calculate the maximum level now */
- for (max_level = (pause_level - 1); max_level >= 0;
- max_level--) {
- if ((dig_tab->pause_dig_level & BIT(max_level)) > 0)
- break;
- }
-
- /* pin max_level to be >= 0 */
- max_level = max_t(s8, 0, max_level);
- /* write IGI of lower level */
- odm_write_dig(dm, dig_tab->pause_dig_value[max_level]);
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Write IGI (0x%x) of level (%d)\n", __func__,
- dig_tab->pause_dig_value[max_level], max_level);
- break;
- }
- default:
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): Wrong type !!\n",
- __func__);
- break;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): pause level = 0x%x, Current value = 0x%x\n",
- __func__, dig_tab->pause_dig_level, igi_value);
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): pause value = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- __func__, dig_tab->pause_dig_value[7],
- dig_tab->pause_dig_value[6], dig_tab->pause_dig_value[5],
- dig_tab->pause_dig_value[4], dig_tab->pause_dig_value[3],
- dig_tab->pause_dig_value[2], dig_tab->pause_dig_value[1],
- dig_tab->pause_dig_value[0]);
-}
-
-static bool odm_dig_abort(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
-
- /* support_ability */
- if (!(dm->support_ability & ODM_BB_FA_CNT)) {
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): Return: support_ability ODM_BB_FA_CNT is disabled\n",
- __func__);
- return true;
- }
-
- /* support_ability */
- if (!(dm->support_ability & ODM_BB_DIG)) {
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): Return: support_ability ODM_BB_DIG is disabled\n",
- __func__);
- return true;
- }
-
- /* ScanInProcess */
- if (*dm->is_scan_in_process) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Return: In Scan Progress\n", __func__);
- return true;
- }
-
- if (dig_tab->is_ignore_dig) {
- dig_tab->is_ignore_dig = false;
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): Return: Ignore DIG\n",
- __func__);
- return true;
- }
-
- /* add by Neil Chen to avoid PSD is processing */
- if (!dm->is_dm_initial_gain_enable) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Return: PSD is Processing\n", __func__);
- return true;
- }
-
- return false;
-}
-
-void odm_dig_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- u32 ret_value;
- u8 i;
-
- dig_tab->is_stop_dig = false;
- dig_tab->is_ignore_dig = false;
- dig_tab->is_psd_in_progress = false;
- dig_tab->cur_ig_value =
- (u8)odm_get_bb_reg(dm, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm));
- dig_tab->pre_ig_value = 0;
- dig_tab->rssi_low_thresh = DM_DIG_THRESH_LOW;
- dig_tab->rssi_high_thresh = DM_DIG_THRESH_HIGH;
- dig_tab->fa_low_thresh = DM_FALSEALARM_THRESH_LOW;
- dig_tab->fa_high_thresh = DM_FALSEALARM_THRESH_HIGH;
- dig_tab->backoff_val = DM_DIG_BACKOFF_DEFAULT;
- dig_tab->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
- dig_tab->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
- dig_tab->pre_cck_cca_thres = 0xFF;
- dig_tab->cur_cck_cca_thres = 0x83;
- dig_tab->forbidden_igi = DM_DIG_MIN_NIC;
- dig_tab->large_fa_hit = 0;
- dig_tab->large_fa_timeout = 0;
- dig_tab->recover_cnt = 0;
- dig_tab->is_media_connect_0 = false;
- dig_tab->is_media_connect_1 = false;
-
- /*To initialize dm->is_dm_initial_gain_enable==false to avoid DIG err*/
- dm->is_dm_initial_gain_enable = true;
-
- dig_tab->dig_dynamic_min_0 = DM_DIG_MIN_NIC;
- dig_tab->dig_dynamic_min_1 = DM_DIG_MIN_NIC;
-
- /* To Initi BT30 IGI */
- dig_tab->bt30_cur_igi = 0x32;
-
- odm_memory_set(dm, dig_tab->pause_dig_value, 0,
- (DM_DIG_MAX_PAUSE_TYPE + 1));
- dig_tab->pause_dig_level = 0;
- odm_memory_set(dm, dig_tab->pause_cckpd_value, 0,
- (DM_DIG_MAX_PAUSE_TYPE + 1));
- dig_tab->pause_cckpd_level = 0;
-
- dig_tab->rx_gain_range_max = DM_DIG_MAX_NIC;
- dig_tab->rx_gain_range_min = DM_DIG_MIN_NIC;
-
- dig_tab->enable_adjust_big_jump = 1;
- if (dm->support_ic_type & ODM_RTL8822B) {
- ret_value = odm_get_bb_reg(dm, 0x8c8, MASKLWORD);
- dig_tab->big_jump_step1 = (u8)(ret_value & 0xe) >> 1;
- dig_tab->big_jump_step2 = (u8)(ret_value & 0x30) >> 4;
- dig_tab->big_jump_step3 = (u8)(ret_value & 0xc0) >> 6;
-
- } else if (dm->support_ic_type & ODM_RTL8197F) {
- ret_value =
- odm_get_bb_reg(dm, ODM_REG_BB_AGC_SET_2_11N, MASKLWORD);
- dig_tab->big_jump_step1 = (u8)(ret_value & 0xe) >> 1;
- dig_tab->big_jump_step2 = (u8)(ret_value & 0x30) >> 4;
- dig_tab->big_jump_step3 = (u8)(ret_value & 0xc0) >> 6;
- }
- if (dm->support_ic_type & (ODM_RTL8822B | ODM_RTL8197F)) {
- for (i = 0; i < sizeof(dig_tab->big_jump_lmt); i++) {
- if (dig_tab->big_jump_lmt[i] == 0)
- dig_tab->big_jump_lmt[i] =
- 0x64; /* Set -10dBm as default value */
- }
- }
-}
-
-void odm_DIG(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- /* Common parameters */
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- struct false_alarm_stat *fa_cnt =
- (struct false_alarm_stat *)phydm_get_structure(
- dm, PHYDM_FALSEALMCNT);
- bool first_connect, first_dis_connect;
- u8 dig_max_of_min, dig_dynamic_min;
- u8 dm_dig_max, dm_dig_min;
- u8 current_igi = dig_tab->cur_ig_value;
- u8 offset;
- u32 dm_FA_thres[3];
- u32 tx_tp = 0, rx_tp = 0;
- bool is_dfs_band = false;
- bool is_performance = true, is_first_tp_target = false,
- is_first_coverage = false;
-
- if (odm_dig_abort(dm))
- return;
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "DIG Start===>\n");
-
- /* 1 Update status */
- {
- dig_dynamic_min = dig_tab->dig_dynamic_min_0;
- first_connect = (dm->is_linked) && !dig_tab->is_media_connect_0;
- first_dis_connect =
- (!dm->is_linked) && dig_tab->is_media_connect_0;
- }
-
- /* 1 Boundary Decision */
- {
- /* 2 For WIN\CE */
- if (dm->support_ic_type >= ODM_RTL8188E)
- dm_dig_max = 0x5A;
- else
- dm_dig_max = DM_DIG_MAX_NIC;
-
- if (dm->support_ic_type != ODM_RTL8821)
- dm_dig_min = DM_DIG_MIN_NIC;
- else
- dm_dig_min = 0x1C;
-
- dig_max_of_min = DM_DIG_MAX_AP;
-
- /* Modify lower bound for DFS band */
- if ((((*dm->channel >= 52) && (*dm->channel <= 64)) ||
- ((*dm->channel >= 100) && (*dm->channel <= 140))) &&
- phydm_dfs_master_enabled(dm)) {
- is_dfs_band = true;
- if (*dm->band_width == ODM_BW20M)
- dm_dig_min = DM_DIG_MIN_AP_DFS + 2;
- else
- dm_dig_min = DM_DIG_MIN_AP_DFS;
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "DIG: ====== In DFS band ======\n");
- }
- }
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "DIG: Absolutly upper bound = 0x%x, lower bound = 0x%x\n",
- dm_dig_max, dm_dig_min);
-
- if (dm->pu1_forced_igi_lb && (*dm->pu1_forced_igi_lb > 0)) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "DIG: Force IGI lb to: 0x%02x\n",
- *dm->pu1_forced_igi_lb);
- dm_dig_min = *dm->pu1_forced_igi_lb;
- dm_dig_max = (dm_dig_min <= dm_dig_max) ? (dm_dig_max) :
- (dm_dig_min + 1);
- }
-
- /* 1 Adjust boundary by RSSI */
- if (dm->is_linked && is_performance) {
- /* 2 Modify DIG upper bound */
- /* 4 Modify DIG upper bound for 92E, 8723A\B, 8821 & 8812 BT */
- if ((dm->support_ic_type & (ODM_RTL8192E | ODM_RTL8723B |
- ODM_RTL8812 | ODM_RTL8821)) &&
- dm->is_bt_limited_dig == 1) {
- offset = 10;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: Coex. case: Force upper bound to RSSI + %d\n",
- offset);
- } else {
- offset = 15;
- }
-
- if ((dm->rssi_min + offset) > dm_dig_max)
- dig_tab->rx_gain_range_max = dm_dig_max;
- else if ((dm->rssi_min + offset) < dm_dig_min)
- dig_tab->rx_gain_range_max = dm_dig_min;
- else
- dig_tab->rx_gain_range_max = dm->rssi_min + offset;
-
- /* 2 Modify DIG lower bound */
- /* if(dm->is_one_entry_only) */
- {
- if (dm->rssi_min < dm_dig_min)
- dig_dynamic_min = dm_dig_min;
- else if (dm->rssi_min > dig_max_of_min)
- dig_dynamic_min = dig_max_of_min;
- else
- dig_dynamic_min = dm->rssi_min;
-
- if (is_dfs_band) {
- dig_dynamic_min = dm_dig_min;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: DFS band: Force lower bound to 0x%x after link\n",
- dm_dig_min);
- }
- }
- } else {
- if (is_performance && is_dfs_band) {
- dig_tab->rx_gain_range_max = 0x28;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: DFS band: Force upper bound to 0x%x before link\n",
- dig_tab->rx_gain_range_max);
- } else {
- if (is_performance)
- dig_tab->rx_gain_range_max = DM_DIG_MAX_OF_MIN;
- else
- dig_tab->rx_gain_range_max = dm_dig_max;
- }
- dig_dynamic_min = dm_dig_min;
- }
-
- /* 1 Force Lower Bound for AntDiv */
- if (dm->is_linked && !dm->is_one_entry_only &&
- (dm->support_ic_type & ODM_ANTDIV_SUPPORT) &&
- (dm->support_ability & ODM_BB_ANT_DIV)) {
- if (dm->ant_div_type == CG_TRX_HW_ANTDIV ||
- dm->ant_div_type == CG_TRX_SMART_ANTDIV) {
- if (dig_tab->ant_div_rssi_max > dig_max_of_min)
- dig_dynamic_min = dig_max_of_min;
- else
- dig_dynamic_min = (u8)dig_tab->ant_div_rssi_max;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: AntDiv case: Force lower bound to 0x%x\n",
- dig_dynamic_min);
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "DIG: AntDiv case: rssi_max = 0x%x\n",
- dig_tab->ant_div_rssi_max);
- }
- }
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: Adjust boundary by RSSI Upper bound = 0x%x, Lower bound = 0x%x\n",
- dig_tab->rx_gain_range_max, dig_dynamic_min);
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: Link status: is_linked = %d, RSSI = %d, bFirstConnect = %d, bFirsrDisConnect = %d\n",
- dm->is_linked, dm->rssi_min, first_connect, first_dis_connect);
-
- /* 1 Modify DIG lower bound, deal with abnormal case */
- /* 2 Abnormal false alarm case */
- if (is_dfs_band) {
- dig_tab->rx_gain_range_min = dig_dynamic_min;
- } else {
- if (!dm->is_linked) {
- dig_tab->rx_gain_range_min = dig_dynamic_min;
-
- if (first_dis_connect)
- dig_tab->forbidden_igi = dig_dynamic_min;
- } else {
- dig_tab->rx_gain_range_min = odm_forbidden_igi_check(
- dm, dig_dynamic_min, current_igi);
- }
- }
-
- /* 2 Abnormal # beacon case */
- if (dm->is_linked && !first_connect) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "Beacon Num (%d)\n",
- dm->phy_dbg_info.num_qry_beacon_pkt);
- if (dm->phy_dbg_info.num_qry_beacon_pkt < 5 &&
- dm->bsta_state) {
- dig_tab->rx_gain_range_min = 0x1c;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: Abnormal #beacon (%d) case in STA mode: Force lower bound to 0x%x\n",
- dm->phy_dbg_info.num_qry_beacon_pkt,
- dig_tab->rx_gain_range_min);
- }
- }
-
- /* 2 Abnormal lower bound case */
- if (dig_tab->rx_gain_range_min > dig_tab->rx_gain_range_max) {
- dig_tab->rx_gain_range_min = dig_tab->rx_gain_range_max;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: Abnormal lower bound case: Force lower bound to 0x%x\n",
- dig_tab->rx_gain_range_min);
- }
-
- /* 1 False alarm threshold decision */
- odm_fa_threshold_check(dm, is_dfs_band, is_performance, rx_tp, tx_tp,
- dm_FA_thres);
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "DIG: False alarm threshold = %d, %d, %d\n",
- dm_FA_thres[0], dm_FA_thres[1], dm_FA_thres[2]);
-
- /* 1 Adjust initial gain by false alarm */
- if (dm->is_linked && is_performance) {
- /* 2 After link */
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "DIG: Adjust IGI after link\n");
-
- if (is_first_tp_target || (first_connect && is_performance)) {
- dig_tab->large_fa_hit = 0;
-
- if (is_dfs_band) {
- u8 rssi = dm->rssi_min;
-
- current_igi =
- (dm->rssi_min > 0x28) ? 0x28 : rssi;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: DFS band: One-shot to 0x28 upmost\n");
- } else {
- current_igi = phydm_get_current_igi(
- dig_max_of_min, dm->rssi_min,
- current_igi);
- }
-
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: First connect case: IGI does on-shot to 0x%x\n",
- current_igi);
-
- } else {
- if (fa_cnt->cnt_all > dm_FA_thres[2])
- current_igi = current_igi + 4;
- else if (fa_cnt->cnt_all > dm_FA_thres[1])
- current_igi = current_igi + 2;
- else if (fa_cnt->cnt_all < dm_FA_thres[0])
- current_igi = current_igi - 2;
-
- /* 4 Abnormal # beacon case */
- if (dm->phy_dbg_info.num_qry_beacon_pkt < 5 &&
- fa_cnt->cnt_all < DM_DIG_FA_TH1 &&
- dm->bsta_state) {
- current_igi = dig_tab->rx_gain_range_min;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: Abnormal #beacon (%d) case: IGI does one-shot to 0x%x\n",
- dm->phy_dbg_info.num_qry_beacon_pkt,
- current_igi);
- }
- }
- } else {
- /* 2 Before link */
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "DIG: Adjust IGI before link\n");
-
- if (first_dis_connect || is_first_coverage) {
- current_igi = dm_dig_min;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "DIG: First disconnect case: IGI does on-shot to lower bound\n");
- } else {
- if (fa_cnt->cnt_all > dm_FA_thres[2])
- current_igi = current_igi + 4;
- else if (fa_cnt->cnt_all > dm_FA_thres[1])
- current_igi = current_igi + 2;
- else if (fa_cnt->cnt_all < dm_FA_thres[0])
- current_igi = current_igi - 2;
- }
- }
-
- /* 1 Check initial gain by upper/lower bound */
- if (current_igi < dig_tab->rx_gain_range_min)
- current_igi = dig_tab->rx_gain_range_min;
-
- if (current_igi > dig_tab->rx_gain_range_max)
- current_igi = dig_tab->rx_gain_range_max;
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "DIG: cur_ig_value=0x%x, TotalFA = %d\n",
- current_igi, fa_cnt->cnt_all);
-
- /* 1 Update status */
- if (dm->is_bt_hs_operation) {
- if (dm->is_linked) {
- if (dig_tab->bt30_cur_igi > (current_igi))
- odm_write_dig(dm, current_igi);
- else
- odm_write_dig(dm, dig_tab->bt30_cur_igi);
-
- dig_tab->is_media_connect_0 = dm->is_linked;
- dig_tab->dig_dynamic_min_0 = dig_dynamic_min;
- } else {
- if (dm->is_link_in_process)
- odm_write_dig(dm, 0x1c);
- else if (dm->is_bt_connect_process)
- odm_write_dig(dm, 0x28);
- else
- odm_write_dig(dm, dig_tab->bt30_cur_igi);
- }
- } else { /* BT is not using */
- odm_write_dig(dm, current_igi);
- dig_tab->is_media_connect_0 = dm->is_linked;
- dig_tab->dig_dynamic_min_0 = dig_dynamic_min;
- }
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "DIG end\n");
-}
-
-void odm_dig_by_rssi_lps(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct false_alarm_stat *fa_cnt =
- (struct false_alarm_stat *)phydm_get_structure(
- dm, PHYDM_FALSEALMCNT);
-
- u8 rssi_lower = DM_DIG_MIN_NIC; /* 0x1E or 0x1C */
- u8 current_igi = dm->rssi_min;
-
- if (odm_dig_abort(dm))
- return;
-
- current_igi = current_igi + RSSI_OFFSET_DIG;
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s()==>\n", __func__);
-
- /* Using FW PS mode to make IGI */
- /* Adjust by FA in LPS MODE */
- if (fa_cnt->cnt_all > DM_DIG_FA_TH2_LPS)
- current_igi = current_igi + 4;
- else if (fa_cnt->cnt_all > DM_DIG_FA_TH1_LPS)
- current_igi = current_igi + 2;
- else if (fa_cnt->cnt_all < DM_DIG_FA_TH0_LPS)
- current_igi = current_igi - 2;
-
- /* Lower bound checking */
-
- /* RSSI Lower bound check */
- if ((dm->rssi_min - 10) > DM_DIG_MIN_NIC)
- rssi_lower = (dm->rssi_min - 10);
- else
- rssi_lower = DM_DIG_MIN_NIC;
-
- /* Upper and Lower Bound checking */
- if (current_igi > DM_DIG_MAX_NIC)
- current_igi = DM_DIG_MAX_NIC;
- else if (current_igi < rssi_lower)
- current_igi = rssi_lower;
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): fa_cnt->cnt_all = %d\n", __func__,
- fa_cnt->cnt_all);
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): dm->rssi_min = %d\n", __func__,
- dm->rssi_min);
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): current_igi = 0x%x\n", __func__,
- current_igi);
-
- odm_write_dig(
- dm,
- current_igi); /* odm_write_dig(dm, dig_tab->cur_ig_value); */
-}
-
-/* 3============================================================
- * 3 FASLE ALARM CHECK
- * 3============================================================
- */
-
-void odm_false_alarm_counter_statistics(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct false_alarm_stat *false_alm_cnt =
- (struct false_alarm_stat *)phydm_get_structure(
- dm, PHYDM_FALSEALMCNT);
- struct rt_adcsmp *adc_smp = &dm->adcsmp;
- u32 ret_value;
-
- if (!(dm->support_ability & ODM_BB_FA_CNT))
- return;
-
- ODM_RT_TRACE(dm, ODM_COMP_FA_CNT, "%s()======>\n", __func__);
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES) {
- /* hold ofdm counter */
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31),
- 1); /* hold page C counter */
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_RSTD_11N, BIT(31),
- 1); /* hold page D counter */
-
- ret_value = odm_get_bb_reg(dm, ODM_REG_OFDM_FA_TYPE1_11N,
- MASKDWORD);
- false_alm_cnt->cnt_fast_fsync = (ret_value & 0xffff);
- false_alm_cnt->cnt_sb_search_fail =
- ((ret_value & 0xffff0000) >> 16);
-
- ret_value = odm_get_bb_reg(dm, ODM_REG_OFDM_FA_TYPE2_11N,
- MASKDWORD);
- false_alm_cnt->cnt_ofdm_cca = (ret_value & 0xffff);
- false_alm_cnt->cnt_parity_fail =
- ((ret_value & 0xffff0000) >> 16);
-
- ret_value = odm_get_bb_reg(dm, ODM_REG_OFDM_FA_TYPE3_11N,
- MASKDWORD);
- false_alm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
- false_alm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
-
- ret_value = odm_get_bb_reg(dm, ODM_REG_OFDM_FA_TYPE4_11N,
- MASKDWORD);
- false_alm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
-
- false_alm_cnt->cnt_ofdm_fail =
- false_alm_cnt->cnt_parity_fail +
- false_alm_cnt->cnt_rate_illegal +
- false_alm_cnt->cnt_crc8_fail +
- false_alm_cnt->cnt_mcs_fail +
- false_alm_cnt->cnt_fast_fsync +
- false_alm_cnt->cnt_sb_search_fail;
-
- /* read CCK CRC32 counter */
- false_alm_cnt->cnt_cck_crc32_error = odm_get_bb_reg(
- dm, ODM_REG_CCK_CRC32_ERROR_CNT_11N, MASKDWORD);
- false_alm_cnt->cnt_cck_crc32_ok = odm_get_bb_reg(
- dm, ODM_REG_CCK_CRC32_OK_CNT_11N, MASKDWORD);
-
- /* read OFDM CRC32 counter */
- ret_value = odm_get_bb_reg(dm, ODM_REG_OFDM_CRC32_CNT_11N,
- MASKDWORD);
- false_alm_cnt->cnt_ofdm_crc32_error =
- (ret_value & 0xffff0000) >> 16;
- false_alm_cnt->cnt_ofdm_crc32_ok = ret_value & 0xffff;
-
- /* read HT CRC32 counter */
- ret_value =
- odm_get_bb_reg(dm, ODM_REG_HT_CRC32_CNT_11N, MASKDWORD);
- false_alm_cnt->cnt_ht_crc32_error =
- (ret_value & 0xffff0000) >> 16;
- false_alm_cnt->cnt_ht_crc32_ok = ret_value & 0xffff;
-
- /* read VHT CRC32 counter */
- false_alm_cnt->cnt_vht_crc32_error = 0;
- false_alm_cnt->cnt_vht_crc32_ok = 0;
-
- {
- /* hold cck counter */
- odm_set_bb_reg(dm, ODM_REG_CCK_FA_RST_11N, BIT(12), 1);
- odm_set_bb_reg(dm, ODM_REG_CCK_FA_RST_11N, BIT(14), 1);
-
- ret_value = odm_get_bb_reg(dm, ODM_REG_CCK_FA_LSB_11N,
- MASKBYTE0);
- false_alm_cnt->cnt_cck_fail = ret_value;
-
- ret_value = odm_get_bb_reg(dm, ODM_REG_CCK_FA_MSB_11N,
- MASKBYTE3);
- false_alm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
-
- ret_value = odm_get_bb_reg(dm, ODM_REG_CCK_CCA_CNT_11N,
- MASKDWORD);
- false_alm_cnt->cnt_cck_cca =
- ((ret_value & 0xFF) << 8) |
- ((ret_value & 0xFF00) >> 8);
- }
-
- false_alm_cnt->cnt_all_pre = false_alm_cnt->cnt_all;
-
- false_alm_cnt->cnt_all = (false_alm_cnt->cnt_fast_fsync +
- false_alm_cnt->cnt_sb_search_fail +
- false_alm_cnt->cnt_parity_fail +
- false_alm_cnt->cnt_rate_illegal +
- false_alm_cnt->cnt_crc8_fail +
- false_alm_cnt->cnt_mcs_fail +
- false_alm_cnt->cnt_cck_fail);
-
- false_alm_cnt->cnt_cca_all = false_alm_cnt->cnt_ofdm_cca +
- false_alm_cnt->cnt_cck_cca;
-
- if (dm->support_ic_type >= ODM_RTL8188E) {
- /*reset false alarm counter registers*/
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_RSTC_11N, BIT(31),
- 1);
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_RSTC_11N, BIT(31),
- 0);
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_RSTD_11N, BIT(27),
- 1);
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_RSTD_11N, BIT(27),
- 0);
-
- /*update ofdm counter*/
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31),
- 0); /*update page C counter*/
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_RSTD_11N, BIT(31),
- 0); /*update page D counter*/
-
- /*reset CCK CCA counter*/
- odm_set_bb_reg(dm, ODM_REG_CCK_FA_RST_11N,
- BIT(13) | BIT(12), 0);
- odm_set_bb_reg(dm, ODM_REG_CCK_FA_RST_11N,
- BIT(13) | BIT(12), 2);
-
- /*reset CCK FA counter*/
- odm_set_bb_reg(dm, ODM_REG_CCK_FA_RST_11N,
- BIT(15) | BIT(14), 0);
- odm_set_bb_reg(dm, ODM_REG_CCK_FA_RST_11N,
- BIT(15) | BIT(14), 2);
-
- /*reset CRC32 counter*/
- odm_set_bb_reg(dm, ODM_REG_PAGE_F_RST_11N, BIT(16), 1);
- odm_set_bb_reg(dm, ODM_REG_PAGE_F_RST_11N, BIT(16), 0);
- }
-
- /* Get debug port 0 */
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11N, MASKDWORD, 0x0);
- false_alm_cnt->dbg_port0 =
- odm_get_bb_reg(dm, ODM_REG_RPT_11N, MASKDWORD);
-
- /* Get EDCCA flag */
- odm_set_bb_reg(dm, ODM_REG_DBG_RPT_11N, MASKDWORD, 0x208);
- false_alm_cnt->edcca_flag =
- (bool)odm_get_bb_reg(dm, ODM_REG_RPT_11N, BIT(30));
-
- ODM_RT_TRACE(
- dm, ODM_COMP_FA_CNT,
- "[OFDM FA Detail] Parity_Fail = (( %d )), Rate_Illegal = (( %d )), CRC8_fail = (( %d )), Mcs_fail = (( %d )), Fast_Fsync = (( %d )), SB_Search_fail = (( %d ))\n",
- false_alm_cnt->cnt_parity_fail,
- false_alm_cnt->cnt_rate_illegal,
- false_alm_cnt->cnt_crc8_fail,
- false_alm_cnt->cnt_mcs_fail,
- false_alm_cnt->cnt_fast_fsync,
- false_alm_cnt->cnt_sb_search_fail);
- }
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- u32 cck_enable;
-
- /* read OFDM FA counter */
- false_alm_cnt->cnt_ofdm_fail =
- odm_get_bb_reg(dm, ODM_REG_OFDM_FA_11AC, MASKLWORD);
-
- /* Read CCK FA counter */
- false_alm_cnt->cnt_cck_fail =
- odm_get_bb_reg(dm, ODM_REG_CCK_FA_11AC, MASKLWORD);
-
- /* read CCK/OFDM CCA counter */
- ret_value =
- odm_get_bb_reg(dm, ODM_REG_CCK_CCA_CNT_11AC, MASKDWORD);
- false_alm_cnt->cnt_ofdm_cca = (ret_value & 0xffff0000) >> 16;
- false_alm_cnt->cnt_cck_cca = ret_value & 0xffff;
-
- /* read CCK CRC32 counter */
- ret_value = odm_get_bb_reg(dm, ODM_REG_CCK_CRC32_CNT_11AC,
- MASKDWORD);
- false_alm_cnt->cnt_cck_crc32_error =
- (ret_value & 0xffff0000) >> 16;
- false_alm_cnt->cnt_cck_crc32_ok = ret_value & 0xffff;
-
- /* read OFDM CRC32 counter */
- ret_value = odm_get_bb_reg(dm, ODM_REG_OFDM_CRC32_CNT_11AC,
- MASKDWORD);
- false_alm_cnt->cnt_ofdm_crc32_error =
- (ret_value & 0xffff0000) >> 16;
- false_alm_cnt->cnt_ofdm_crc32_ok = ret_value & 0xffff;
-
- /* read HT CRC32 counter */
- ret_value = odm_get_bb_reg(dm, ODM_REG_HT_CRC32_CNT_11AC,
- MASKDWORD);
- false_alm_cnt->cnt_ht_crc32_error =
- (ret_value & 0xffff0000) >> 16;
- false_alm_cnt->cnt_ht_crc32_ok = ret_value & 0xffff;
-
- /* read VHT CRC32 counter */
- ret_value = odm_get_bb_reg(dm, ODM_REG_VHT_CRC32_CNT_11AC,
- MASKDWORD);
- false_alm_cnt->cnt_vht_crc32_error =
- (ret_value & 0xffff0000) >> 16;
- false_alm_cnt->cnt_vht_crc32_ok = ret_value & 0xffff;
-
- /* reset OFDM FA counter */
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
- odm_set_bb_reg(dm, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
-
- /* reset CCK FA counter */
- odm_set_bb_reg(dm, ODM_REG_CCK_FA_RST_11AC, BIT(15), 0);
- odm_set_bb_reg(dm, ODM_REG_CCK_FA_RST_11AC, BIT(15), 1);
-
- /* reset CCA counter */
- odm_set_bb_reg(dm, ODM_REG_RST_RPT_11AC, BIT(0), 1);
- odm_set_bb_reg(dm, ODM_REG_RST_RPT_11AC, BIT(0), 0);
-
- cck_enable =
- odm_get_bb_reg(dm, ODM_REG_BB_RX_PATH_11AC, BIT(28));
- if (cck_enable) { /* if(*dm->band_type == ODM_BAND_2_4G) */
- false_alm_cnt->cnt_all = false_alm_cnt->cnt_ofdm_fail +
- false_alm_cnt->cnt_cck_fail;
- false_alm_cnt->cnt_cca_all =
- false_alm_cnt->cnt_cck_cca +
- false_alm_cnt->cnt_ofdm_cca;
- } else {
- false_alm_cnt->cnt_all = false_alm_cnt->cnt_ofdm_fail;
- false_alm_cnt->cnt_cca_all =
- false_alm_cnt->cnt_ofdm_cca;
- }
-
- if (adc_smp->adc_smp_state == ADCSMP_STATE_IDLE) {
- if (phydm_set_bb_dbg_port(
- dm, BB_DBGPORT_PRIORITY_1,
- 0x0)) { /*set debug port to 0x0*/
- false_alm_cnt->dbg_port0 =
- phydm_get_bb_dbg_port_value(dm);
- phydm_release_bb_dbg_port(dm);
- }
-
- if (phydm_set_bb_dbg_port(
- dm, BB_DBGPORT_PRIORITY_1,
- 0x209)) { /*set debug port to 0x0*/
- false_alm_cnt->edcca_flag =
- (bool)((phydm_get_bb_dbg_port_value(
- dm) &
- BIT(30)) >>
- 30);
- phydm_release_bb_dbg_port(dm);
- }
- }
- }
-
- false_alm_cnt->cnt_crc32_error_all =
- false_alm_cnt->cnt_vht_crc32_error +
- false_alm_cnt->cnt_ht_crc32_error +
- false_alm_cnt->cnt_ofdm_crc32_error +
- false_alm_cnt->cnt_cck_crc32_error;
- false_alm_cnt->cnt_crc32_ok_all = false_alm_cnt->cnt_vht_crc32_ok +
- false_alm_cnt->cnt_ht_crc32_ok +
- false_alm_cnt->cnt_ofdm_crc32_ok +
- false_alm_cnt->cnt_cck_crc32_ok;
-
- ODM_RT_TRACE(dm, ODM_COMP_FA_CNT,
- "[CCA Cnt] {CCK, OFDM, Total} = {%d, %d, %d}\n",
- false_alm_cnt->cnt_cck_cca, false_alm_cnt->cnt_ofdm_cca,
- false_alm_cnt->cnt_cca_all);
-
- ODM_RT_TRACE(dm, ODM_COMP_FA_CNT,
- "[FA Cnt] {CCK, OFDM, Total} = {%d, %d, %d}\n",
- false_alm_cnt->cnt_cck_fail, false_alm_cnt->cnt_ofdm_fail,
- false_alm_cnt->cnt_all);
-
- ODM_RT_TRACE(dm, ODM_COMP_FA_CNT,
- "[CCK] CRC32 {error, ok}= {%d, %d}\n",
- false_alm_cnt->cnt_cck_crc32_error,
- false_alm_cnt->cnt_cck_crc32_ok);
- ODM_RT_TRACE(dm, ODM_COMP_FA_CNT, "[OFDM]CRC32 {error, ok}= {%d, %d}\n",
- false_alm_cnt->cnt_ofdm_crc32_error,
- false_alm_cnt->cnt_ofdm_crc32_ok);
- ODM_RT_TRACE(dm, ODM_COMP_FA_CNT,
- "[ HT ] CRC32 {error, ok}= {%d, %d}\n",
- false_alm_cnt->cnt_ht_crc32_error,
- false_alm_cnt->cnt_ht_crc32_ok);
- ODM_RT_TRACE(dm, ODM_COMP_FA_CNT,
- "[VHT] CRC32 {error, ok}= {%d, %d}\n",
- false_alm_cnt->cnt_vht_crc32_error,
- false_alm_cnt->cnt_vht_crc32_ok);
- ODM_RT_TRACE(dm, ODM_COMP_FA_CNT,
- "[VHT] CRC32 {error, ok}= {%d, %d}\n",
- false_alm_cnt->cnt_crc32_error_all,
- false_alm_cnt->cnt_crc32_ok_all);
- ODM_RT_TRACE(dm, ODM_COMP_FA_CNT,
- "FA_Cnt: Dbg port 0x0 = 0x%x, EDCCA = %d\n\n",
- false_alm_cnt->dbg_port0, false_alm_cnt->edcca_flag);
-}
-
-/* 3============================================================
- * 3 CCK Packet Detect threshold
- * 3============================================================
- */
-
-void odm_pause_cck_packet_detection(void *dm_void,
- enum phydm_pause_type pause_type,
- enum phydm_pause_level pause_level,
- u8 cck_pd_threshold)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- s8 max_level;
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s()=========> level = %d\n", __func__,
- pause_level);
-
- if (dig_tab->pause_cckpd_level == 0 &&
- (!(dm->support_ability & ODM_BB_CCK_PD) ||
- !(dm->support_ability & ODM_BB_FA_CNT))) {
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "Return: support_ability ODM_BB_CCK_PD or ODM_BB_FA_CNT is disabled\n");
- return;
- }
-
- if (pause_level > DM_DIG_MAX_PAUSE_TYPE) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Return: Wrong pause level !!\n", __func__);
- return;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): pause level = 0x%x, Current value = 0x%x\n",
- __func__, dig_tab->pause_cckpd_level, cck_pd_threshold);
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): pause value = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- __func__, dig_tab->pause_cckpd_value[7],
- dig_tab->pause_cckpd_value[6], dig_tab->pause_cckpd_value[5],
- dig_tab->pause_cckpd_value[4], dig_tab->pause_cckpd_value[3],
- dig_tab->pause_cckpd_value[2], dig_tab->pause_cckpd_value[1],
- dig_tab->pause_cckpd_value[0]);
-
- switch (pause_type) {
- /* Pause CCK Packet Detection threshold */
- case PHYDM_PAUSE: {
- /* Disable CCK PD */
- odm_cmn_info_update(dm, ODM_CMNINFO_ABILITY,
- dm->support_ability & (~ODM_BB_CCK_PD));
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Pause CCK packet detection threshold !!\n",
- __func__);
-
- /*Backup original CCK PD threshold decided by CCK PD mechanism*/
- if (dig_tab->pause_cckpd_level == 0) {
- dig_tab->cck_pd_backup = dig_tab->cur_cck_cca_thres;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): Backup CCKPD = 0x%x, new CCKPD = 0x%x\n",
- __func__, dig_tab->cck_pd_backup,
- cck_pd_threshold);
- }
-
- /* Update pause level */
- dig_tab->pause_cckpd_level =
- (dig_tab->pause_cckpd_level | BIT(pause_level));
-
- /* Record CCK PD threshold */
- dig_tab->pause_cckpd_value[pause_level] = cck_pd_threshold;
-
- /* Write new CCK PD threshold */
- if (BIT(pause_level + 1) > dig_tab->pause_cckpd_level) {
- odm_write_cck_cca_thres(dm, cck_pd_threshold);
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): CCKPD of higher level = 0x%x\n",
- __func__, cck_pd_threshold);
- }
- break;
- }
- /* Resume CCK Packet Detection threshold */
- case PHYDM_RESUME: {
- /* check if the level is illegal or not */
- if ((dig_tab->pause_cckpd_level & (BIT(pause_level))) != 0) {
- dig_tab->pause_cckpd_level =
- dig_tab->pause_cckpd_level &
- (~(BIT(pause_level)));
- dig_tab->pause_cckpd_value[pause_level] = 0;
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Resume CCK PD !!\n", __func__);
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Wrong resume level !!\n", __func__);
- break;
- }
-
- /* Resume DIG */
- if (dig_tab->pause_cckpd_level == 0) {
- /* Write backup IGI value */
- odm_write_cck_cca_thres(dm, dig_tab->cck_pd_backup);
- /* dig_tab->is_ignore_dig = true; */
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Write original CCKPD = 0x%x\n",
- __func__, dig_tab->cck_pd_backup);
-
- /* Enable DIG */
- odm_cmn_info_update(dm, ODM_CMNINFO_ABILITY,
- dm->support_ability |
- ODM_BB_CCK_PD);
- break;
- }
-
- if (BIT(pause_level) <= dig_tab->pause_cckpd_level)
- break;
-
- /* Calculate the maximum level now */
- for (max_level = (pause_level - 1); max_level >= 0;
- max_level--) {
- if ((dig_tab->pause_cckpd_level & BIT(max_level)) > 0)
- break;
- }
-
- /* write CCKPD of lower level */
- odm_write_cck_cca_thres(dm,
- dig_tab->pause_cckpd_value[max_level]);
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): Write CCKPD (0x%x) of level (%d)\n",
- __func__, dig_tab->pause_cckpd_value[max_level],
- max_level);
- break;
- }
- default:
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s(): Wrong type !!\n",
- __func__);
- break;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG,
- "%s(): pause level = 0x%x, Current value = 0x%x\n",
- __func__, dig_tab->pause_cckpd_level, cck_pd_threshold);
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "%s(): pause value = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- __func__, dig_tab->pause_cckpd_value[7],
- dig_tab->pause_cckpd_value[6], dig_tab->pause_cckpd_value[5],
- dig_tab->pause_cckpd_value[4], dig_tab->pause_cckpd_value[3],
- dig_tab->pause_cckpd_value[2], dig_tab->pause_cckpd_value[1],
- dig_tab->pause_cckpd_value[0]);
-}
-
-void odm_cck_packet_detection_thresh(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- struct false_alarm_stat *false_alm_cnt =
- (struct false_alarm_stat *)phydm_get_structure(
- dm, PHYDM_FALSEALMCNT);
- u8 cur_cck_cca_thres = dig_tab->cur_cck_cca_thres, rssi_thd = 35;
-
- if ((!(dm->support_ability & ODM_BB_CCK_PD)) ||
- (!(dm->support_ability & ODM_BB_FA_CNT))) {
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "CCK_PD: return==========\n");
- return;
- }
-
- if (dm->ext_lna)
- return;
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "CCK_PD: ==========>\n");
-
- if (dig_tab->cck_fa_ma == 0xffffffff)
- dig_tab->cck_fa_ma = false_alm_cnt->cnt_cck_fail;
- else
- dig_tab->cck_fa_ma =
- ((dig_tab->cck_fa_ma << 1) + dig_tab->cck_fa_ma +
- false_alm_cnt->cnt_cck_fail) >>
- 2;
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "CCK_PD: CCK FA moving average = %d\n",
- dig_tab->cck_fa_ma);
-
- if (dm->is_linked) {
- if (dm->rssi_min > rssi_thd) {
- cur_cck_cca_thres = 0xcd;
- } else if (dm->rssi_min > 20) {
- if (dig_tab->cck_fa_ma >
- ((DM_DIG_FA_TH1 >> 1) + (DM_DIG_FA_TH1 >> 3)))
- cur_cck_cca_thres = 0xcd;
- else if (dig_tab->cck_fa_ma < (DM_DIG_FA_TH0 >> 1))
- cur_cck_cca_thres = 0x83;
- } else if (dm->rssi_min > 7) {
- cur_cck_cca_thres = 0x83;
- } else {
- cur_cck_cca_thres = 0x40;
- }
-
- } else {
- if (dig_tab->cck_fa_ma > 0x400)
- cur_cck_cca_thres = 0x83;
- else if (dig_tab->cck_fa_ma < 0x200)
- cur_cck_cca_thres = 0x40;
- }
-
- {
- odm_write_cck_cca_thres(dm, cur_cck_cca_thres);
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_DIG, "CCK_PD: cck_cca_th=((0x%x))\n\n",
- cur_cck_cca_thres);
-}
-
-void odm_write_cck_cca_thres(void *dm_void, u8 cur_cck_cca_thres)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dig_thres *dig_tab = &dm->dm_dig_table;
-
- if (dig_tab->cur_cck_cca_thres !=
- cur_cck_cca_thres) { /* modify by Guo.Mingzhi 2012-01-03 */
- odm_write_1byte(dm, ODM_REG(CCK_CCA, dm), cur_cck_cca_thres);
- dig_tab->cck_fa_ma = 0xffffffff;
- }
- dig_tab->pre_cck_cca_thres = dig_tab->cur_cck_cca_thres;
- dig_tab->cur_cck_cca_thres = cur_cck_cca_thres;
-}
-
-bool phydm_dig_go_up_check(void *dm_void)
-{
- bool ret = true;
-
- return ret;
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dig.h b/drivers/staging/rtlwifi/phydm/phydm_dig.h
deleted file mode 100644
index f618b4dd1fd3..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_dig.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMDIG_H__
-#define __PHYDMDIG_H__
-
-#define DIG_VERSION "1.32" /* 2016.09.02 YuChen. add CCK PD for 8197F*/
-
-/* Pause DIG & CCKPD */
-#define DM_DIG_MAX_PAUSE_TYPE 0x7
-
-enum dig_goupcheck_level {
- DIG_GOUPCHECK_LEVEL_0,
- DIG_GOUPCHECK_LEVEL_1,
- DIG_GOUPCHECK_LEVEL_2
-
-};
-
-struct dig_thres {
- bool is_stop_dig; /* for debug */
- bool is_ignore_dig;
- bool is_psd_in_progress;
-
- u8 dig_enable_flag;
- u8 dig_ext_port_stage;
-
- int rssi_low_thresh;
- int rssi_high_thresh;
-
- u32 fa_low_thresh;
- u32 fa_high_thresh;
-
- u8 cur_sta_connect_state;
- u8 pre_sta_connect_state;
- u8 cur_multi_sta_connect_state;
-
- u8 pre_ig_value;
- u8 cur_ig_value;
- u8 backup_ig_value; /* MP DIG */
- u8 bt30_cur_igi;
- u8 igi_backup;
-
- s8 backoff_val;
- s8 backoff_val_range_max;
- s8 backoff_val_range_min;
- u8 rx_gain_range_max;
- u8 rx_gain_range_min;
- u8 rssi_val_min;
-
- u8 pre_cck_cca_thres;
- u8 cur_cck_cca_thres;
- u8 pre_cck_pd_state;
- u8 cur_cck_pd_state;
- u8 cck_pd_backup;
- u8 pause_cckpd_level;
- u8 pause_cckpd_value[DM_DIG_MAX_PAUSE_TYPE + 1];
-
- u8 large_fa_hit;
- u8 large_fa_timeout; /*if (large_fa_hit), monitor "large_fa_timeout"
- *sec, if timeout, large_fa_hit=0
- */
- u8 forbidden_igi;
- u32 recover_cnt;
-
- u8 dig_dynamic_min_0;
- u8 dig_dynamic_min_1;
- bool is_media_connect_0;
- bool is_media_connect_1;
-
- u32 ant_div_rssi_max;
- u32 rssi_max;
-
- u8 *is_p2p_in_process;
-
- u8 pause_dig_level;
- u8 pause_dig_value[DM_DIG_MAX_PAUSE_TYPE + 1];
-
- u32 cck_fa_ma;
- enum dig_goupcheck_level dig_go_up_check_level;
- u8 aaa_default;
-
- u8 rf_gain_idx;
- u8 agc_table_idx;
- u8 big_jump_lmt[16];
- u8 enable_adjust_big_jump : 1;
- u8 big_jump_step1 : 3;
- u8 big_jump_step2 : 2;
- u8 big_jump_step3 : 2;
-};
-
-struct false_alarm_stat {
- u32 cnt_parity_fail;
- u32 cnt_rate_illegal;
- u32 cnt_crc8_fail;
- u32 cnt_mcs_fail;
- u32 cnt_ofdm_fail;
- u32 cnt_ofdm_fail_pre; /* For RTL8881A */
- u32 cnt_cck_fail;
- u32 cnt_all;
- u32 cnt_all_pre;
- u32 cnt_fast_fsync;
- u32 cnt_sb_search_fail;
- u32 cnt_ofdm_cca;
- u32 cnt_cck_cca;
- u32 cnt_cca_all;
- u32 cnt_bw_usc; /* Gary */
- u32 cnt_bw_lsc; /* Gary */
- u32 cnt_cck_crc32_error;
- u32 cnt_cck_crc32_ok;
- u32 cnt_ofdm_crc32_error;
- u32 cnt_ofdm_crc32_ok;
- u32 cnt_ht_crc32_error;
- u32 cnt_ht_crc32_ok;
- u32 cnt_vht_crc32_error;
- u32 cnt_vht_crc32_ok;
- u32 cnt_crc32_error_all;
- u32 cnt_crc32_ok_all;
- bool cck_block_enable;
- bool ofdm_block_enable;
- u32 dbg_port0;
- bool edcca_flag;
-};
-
-enum dm_dig_op {
- DIG_TYPE_THRESH_HIGH = 0,
- DIG_TYPE_THRESH_LOW = 1,
- DIG_TYPE_BACKOFF = 2,
- DIG_TYPE_RX_GAIN_MIN = 3,
- DIG_TYPE_RX_GAIN_MAX = 4,
- DIG_TYPE_ENABLE = 5,
- DIG_TYPE_DISABLE = 6,
- DIG_OP_TYPE_MAX
-};
-
-enum phydm_pause_type { PHYDM_PAUSE = BIT(0), PHYDM_RESUME = BIT(1) };
-
-enum phydm_pause_level {
- /* number of pause level can't exceed DM_DIG_MAX_PAUSE_TYPE */
- PHYDM_PAUSE_LEVEL_0 = 0,
- PHYDM_PAUSE_LEVEL_1 = 1,
- PHYDM_PAUSE_LEVEL_2 = 2,
- PHYDM_PAUSE_LEVEL_3 = 3,
- PHYDM_PAUSE_LEVEL_4 = 4,
- PHYDM_PAUSE_LEVEL_5 = 5,
- PHYDM_PAUSE_LEVEL_6 = 6,
- PHYDM_PAUSE_LEVEL_7 = DM_DIG_MAX_PAUSE_TYPE /* maximum level */
-};
-
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX_NIC 0x3e
-#define DM_DIG_MIN_NIC 0x20
-#define DM_DIG_MAX_OF_MIN_NIC 0x3e
-
-#define DM_DIG_MAX_AP 0x3e
-#define DM_DIG_MIN_AP 0x20
-#define DM_DIG_MAX_OF_MIN 0x2A /* 0x32 */
-#define DM_DIG_MIN_AP_DFS 0x20
-
-#define DM_DIG_MAX_NIC_HP 0x46
-#define DM_DIG_MIN_NIC_HP 0x2e
-
-#define DM_DIG_MAX_AP_HP 0x42
-#define DM_DIG_MIN_AP_HP 0x30
-
-/* vivi 92c&92d has different definition, 20110504
- * this is for 92c
- */
-#define DM_DIG_FA_TH0 0x200 /* 0x20 */
-
-#define DM_DIG_FA_TH1 0x300
-#define DM_DIG_FA_TH2 0x400
-/* this is for 92d */
-#define DM_DIG_FA_TH0_92D 0x100
-#define DM_DIG_FA_TH1_92D 0x400
-#define DM_DIG_FA_TH2_92D 0x600
-
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
-#define DM_DIG_FA_TH0_LPS 4 /* -> 4 in lps */
-#define DM_DIG_FA_TH1_LPS 15 /* -> 15 lps */
-#define DM_DIG_FA_TH2_LPS 30 /* -> 30 lps */
-#define RSSI_OFFSET_DIG 0x05
-#define LARGE_FA_TIMEOUT 60
-
-void odm_change_dynamic_init_gain_thresh(void *dm_void, u32 dm_type,
- u32 dm_value);
-
-void odm_write_dig(void *dm_void, u8 current_igi);
-
-void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type,
- enum phydm_pause_level pause_level, u8 igi_value);
-
-void odm_dig_init(void *dm_void);
-
-void odm_DIG(void *dm_void);
-
-void odm_dig_by_rssi_lps(void *dm_void);
-
-void odm_false_alarm_counter_statistics(void *dm_void);
-
-void odm_pause_cck_packet_detection(void *dm_void,
- enum phydm_pause_type pause_type,
- enum phydm_pause_level pause_level,
- u8 cck_pd_threshold);
-
-void odm_cck_packet_detection_thresh(void *dm_void);
-
-void odm_write_cck_cca_thres(void *dm_void, u8 cur_cck_cca_thres);
-
-bool phydm_dig_go_up_check(void *dm_void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h b/drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h
deleted file mode 100644
index 61e29df5c3f9..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMDYMICRXPATH_H__
-#define __PHYDMDYMICRXPATH_H__
-
-#define DYNAMIC_RX_PATH_VERSION "1.0" /*2016.07.15 Dino */
-
-#define DRP_RSSI_TH 35
-
-#define INIT_DRP_TIMMER 0
-#define CANCEL_DRP_TIMMER 1
-#define RELEASE_DRP_TIMMER 2
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c b/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c
deleted file mode 100644
index d3f74d1506b4..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-static inline void phydm_update_rf_state(struct phy_dm_struct *dm,
- struct dyn_pwr_saving *dm_ps_table,
- int _rssi_up_bound,
- int _rssi_low_bound,
- int _is_force_in_normal)
-{
- if (_is_force_in_normal) {
- dm_ps_table->cur_rf_state = rf_normal;
- return;
- }
-
- if (dm->rssi_min == 0xFF) {
- dm_ps_table->cur_rf_state = RF_MAX;
- return;
- }
-
- if (dm_ps_table->pre_rf_state == rf_normal) {
- if (dm->rssi_min >= _rssi_up_bound)
- dm_ps_table->cur_rf_state = rf_save;
- else
- dm_ps_table->cur_rf_state = rf_normal;
- } else {
- if (dm->rssi_min <= _rssi_low_bound)
- dm_ps_table->cur_rf_state = rf_normal;
- else
- dm_ps_table->cur_rf_state = rf_save;
- }
-}
-
-void odm_dynamic_bb_power_saving_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dyn_pwr_saving *dm_ps_table = &dm->dm_ps_table;
-
- dm_ps_table->pre_cca_state = CCA_MAX;
- dm_ps_table->cur_cca_state = CCA_MAX;
- dm_ps_table->pre_rf_state = RF_MAX;
- dm_ps_table->cur_rf_state = RF_MAX;
- dm_ps_table->rssi_val_min = 0;
- dm_ps_table->initialize = 0;
-}
-
-void odm_rf_saving(void *dm_void, u8 is_force_in_normal)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dyn_pwr_saving *dm_ps_table = &dm->dm_ps_table;
- u8 rssi_up_bound = 30;
- u8 rssi_low_bound = 25;
-
- if (dm->patch_id == 40) { /* RT_CID_819x_FUNAI_TV */
- rssi_up_bound = 50;
- rssi_low_bound = 45;
- }
- if (dm_ps_table->initialize == 0) {
- dm_ps_table->reg874 =
- (odm_get_bb_reg(dm, 0x874, MASKDWORD) & 0x1CC000) >> 14;
- dm_ps_table->regc70 =
- (odm_get_bb_reg(dm, 0xc70, MASKDWORD) & BIT(3)) >> 3;
- dm_ps_table->reg85c =
- (odm_get_bb_reg(dm, 0x85c, MASKDWORD) & 0xFF000000) >>
- 24;
- dm_ps_table->rega74 =
- (odm_get_bb_reg(dm, 0xa74, MASKDWORD) & 0xF000) >> 12;
- /* Reg818 = phy_query_bb_reg(adapter, 0x818, MASKDWORD); */
- dm_ps_table->initialize = 1;
- }
-
- phydm_update_rf_state(dm, dm_ps_table, rssi_up_bound, rssi_low_bound,
- is_force_in_normal);
-
- if (dm_ps_table->pre_rf_state != dm_ps_table->cur_rf_state) {
- if (dm_ps_table->cur_rf_state == rf_save) {
- odm_set_bb_reg(dm, 0x874, 0x1C0000,
- 0x2); /* reg874[20:18]=3'b010 */
- odm_set_bb_reg(dm, 0xc70, BIT(3),
- 0); /* regc70[3]=1'b0 */
- odm_set_bb_reg(dm, 0x85c, 0xFF000000,
- 0x63); /* reg85c[31:24]=0x63 */
- odm_set_bb_reg(dm, 0x874, 0xC000,
- 0x2); /* reg874[15:14]=2'b10 */
- odm_set_bb_reg(dm, 0xa74, 0xF000,
- 0x3); /* RegA75[7:4]=0x3 */
- odm_set_bb_reg(dm, 0x818, BIT(28),
- 0x0); /* Reg818[28]=1'b0 */
- odm_set_bb_reg(dm, 0x818, BIT(28),
- 0x1); /* Reg818[28]=1'b1 */
- } else {
- odm_set_bb_reg(dm, 0x874, 0x1CC000,
- dm_ps_table->reg874);
- odm_set_bb_reg(dm, 0xc70, BIT(3), dm_ps_table->regc70);
- odm_set_bb_reg(dm, 0x85c, 0xFF000000,
- dm_ps_table->reg85c);
- odm_set_bb_reg(dm, 0xa74, 0xF000, dm_ps_table->rega74);
- odm_set_bb_reg(dm, 0x818, BIT(28), 0x0);
- }
- dm_ps_table->pre_rf_state = dm_ps_table->cur_rf_state;
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h b/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h
deleted file mode 100644
index 3ea68066ccdb..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMDYNAMICBBPOWERSAVING_H__
-#define __PHYDMDYNAMICBBPOWERSAVING_H__
-
-#define DYNAMIC_BBPWRSAV_VERSION "1.1"
-
-struct dyn_pwr_saving {
- u8 pre_cca_state;
- u8 cur_cca_state;
-
- u8 pre_rf_state;
- u8 cur_rf_state;
-
- int rssi_val_min;
-
- u8 initialize;
- u32 reg874, regc70, reg85c, rega74;
-};
-
-#define dm_rf_saving odm_rf_saving
-
-void odm_rf_saving(void *dm_void, u8 is_force_in_normal);
-
-void odm_dynamic_bb_power_saving_init(void *dm_void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c b/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c
deleted file mode 100644
index afe650e5313f..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c
+++ /dev/null
@@ -1,91 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-void odm_dynamic_tx_power_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- dm->last_dtp_lvl = tx_high_pwr_level_normal;
- dm->dynamic_tx_high_power_lvl = tx_high_pwr_level_normal;
- dm->tx_agc_ofdm_18_6 =
- odm_get_bb_reg(dm, 0xC24, MASKDWORD); /*TXAGC {18M 12M 9M 6M}*/
-}
-
-void odm_dynamic_tx_power_save_power_index(void *dm_void) {}
-
-void odm_dynamic_tx_power_restore_power_index(void *dm_void) {}
-
-void odm_dynamic_tx_power_write_power_index(void *dm_void, u8 value)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 index;
- u32 power_index_reg[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a};
-
- for (index = 0; index < 6; index++)
- odm_write_1byte(dm, power_index_reg[index], value);
-}
-
-static void odm_dynamic_tx_power_nic_ce(void *dm_void) {}
-
-void odm_dynamic_tx_power(void *dm_void)
-{
- /* */
- /* For AP/ADSL use struct rtl8192cd_priv* */
- /* For CE/NIC use struct void* */
- /* */
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (!(dm->support_ability & ODM_BB_DYNAMIC_TXPWR))
- return;
- /* 2011/09/29 MH In HW integration first stage, we provide 4 different
- * handle to operate at the same time.
- * In the stage2/3, we need to prive universal interface and merge all
- * HW dynamic mechanism.
- */
- switch (dm->support_platform) {
- case ODM_WIN:
- odm_dynamic_tx_power_nic(dm);
- break;
- case ODM_CE:
- odm_dynamic_tx_power_nic_ce(dm);
- break;
- case ODM_AP:
- odm_dynamic_tx_power_ap(dm);
- break;
- default:
- break;
- }
-}
-
-void odm_dynamic_tx_power_nic(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (!(dm->support_ability & ODM_BB_DYNAMIC_TXPWR))
- return;
-}
-
-void odm_dynamic_tx_power_ap(void *dm_void
-
- )
-{
-}
-
-void odm_dynamic_tx_power_8821(void *dm_void, u8 *desc, u8 mac_id) {}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h b/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h
deleted file mode 100644
index afde69db6ad2..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMDYNAMICTXPOWER_H__
-#define __PHYDMDYNAMICTXPOWER_H__
-
-/*#define DYNAMIC_TXPWR_VERSION "1.0"*/
-/*#define DYNAMIC_TXPWR_VERSION "1.3" */ /*2015.08.26, Add 8814 Dynamic TX pwr*/
-#define DYNAMIC_TXPWR_VERSION "1.4" /*2015.11.06,Add CE 8821A Dynamic TX pwr*/
-
-#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
-#define TX_POWER_NEAR_FIELD_THRESH_LVL1 60
-
-#define tx_high_pwr_level_normal 0
-#define tx_high_pwr_level_level1 1
-#define tx_high_pwr_level_level2 2
-
-#define tx_high_pwr_level_bt1 3
-#define tx_high_pwr_level_bt2 4
-#define tx_high_pwr_level_15 5
-#define tx_high_pwr_level_35 6
-#define tx_high_pwr_level_50 7
-#define tx_high_pwr_level_70 8
-#define tx_high_pwr_level_100 9
-
-void odm_dynamic_tx_power_init(void *dm_void);
-
-void odm_dynamic_tx_power_restore_power_index(void *dm_void);
-
-void odm_dynamic_tx_power_nic(void *dm_void);
-
-void odm_dynamic_tx_power_save_power_index(void *dm_void);
-
-void odm_dynamic_tx_power_write_power_index(void *dm_void, u8 value);
-
-void odm_dynamic_tx_power_8821(void *dm_void, u8 *desc, u8 mac_id);
-
-void odm_dynamic_tx_power(void *dm_void);
-
-void odm_dynamic_tx_power_ap(void *dm_void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c b/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c
deleted file mode 100644
index b5bd4fb20c90..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c
+++ /dev/null
@@ -1,128 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-void odm_edca_turbo_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- dm->dm_edca_table.is_current_turbo_edca = false;
- dm->dm_edca_table.is_cur_rdl_state = false;
-
- ODM_RT_TRACE(dm, ODM_COMP_EDCA_TURBO, "Original VO PARAM: 0x%x\n",
- odm_read_4byte(dm, ODM_EDCA_VO_PARAM));
- ODM_RT_TRACE(dm, ODM_COMP_EDCA_TURBO, "Original VI PARAM: 0x%x\n",
- odm_read_4byte(dm, ODM_EDCA_VI_PARAM));
- ODM_RT_TRACE(dm, ODM_COMP_EDCA_TURBO, "Original BE PARAM: 0x%x\n",
- odm_read_4byte(dm, ODM_EDCA_BE_PARAM));
- ODM_RT_TRACE(dm, ODM_COMP_EDCA_TURBO, "Original BK PARAM: 0x%x\n",
- odm_read_4byte(dm, ODM_EDCA_BK_PARAM));
-
-} /* ODM_InitEdcaTurbo */
-
-void odm_edca_turbo_check(void *dm_void)
-{
- /* For AP/ADSL use struct rtl8192cd_priv* */
- /* For CE/NIC use struct void* */
-
- /* 2011/09/29 MH In HW integration first stage, we provide 4 different
- * handle to operate at the same time.
- * In the stage2/3, we need to prive universal interface and merge all
- * HW dynamic mechanism.
- */
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- ODM_RT_TRACE(dm, ODM_COMP_EDCA_TURBO,
- "%s========================>\n", __func__);
-
- if (!(dm->support_ability & ODM_MAC_EDCA_TURBO))
- return;
-
- switch (dm->support_platform) {
- case ODM_WIN:
-
- break;
-
- case ODM_CE:
- odm_edca_turbo_check_ce(dm);
- break;
- }
- ODM_RT_TRACE(dm, ODM_COMP_EDCA_TURBO,
- "<========================%s\n", __func__);
-
-} /* odm_CheckEdcaTurbo */
-
-void odm_edca_turbo_check_ce(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- u64 cur_txok_cnt = 0;
- u64 cur_rxok_cnt = 0;
- u32 edca_be_ul = 0x5ea42b;
- u32 edca_be_dl = 0x5ea42b;
- u32 edca_be = 0x5ea42b;
- bool is_cur_rdlstate;
- bool edca_turbo_on = false;
-
- if (dm->wifi_test)
- return;
-
- if (!dm->is_linked) {
- rtlpriv->dm.is_any_nonbepkts = false;
- return;
- }
-
- if (rtlpriv->dm.dbginfo.num_non_be_pkt > 0x100)
- rtlpriv->dm.is_any_nonbepkts = true;
- rtlpriv->dm.dbginfo.num_non_be_pkt = 0;
-
- cur_txok_cnt = rtlpriv->stats.txbytesunicast_inperiod;
- cur_rxok_cnt = rtlpriv->stats.rxbytesunicast_inperiod;
-
- /*b_bias_on_rx = false;*/
- edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) &&
- (!rtlpriv->dm.disable_framebursting)) ?
- true :
- false;
-
- if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
- goto label_exit;
-
- if (edca_turbo_on) {
- is_cur_rdlstate =
- (cur_rxok_cnt > cur_txok_cnt * 4) ? true : false;
-
- edca_be = is_cur_rdlstate ? edca_be_dl : edca_be_ul;
- rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM_8822B, edca_be);
- rtlpriv->dm.is_cur_rdlstate = is_cur_rdlstate;
- rtlpriv->dm.current_turbo_edca = true;
- } else {
- if (rtlpriv->dm.current_turbo_edca) {
- u8 tmp = AC0_BE;
-
- rtlpriv->cfg->ops->set_hw_reg(rtlpriv->hw,
- HW_VAR_AC_PARAM,
- (u8 *)(&tmp));
- rtlpriv->dm.current_turbo_edca = false;
- }
- }
-
-label_exit:
- rtlpriv->dm.is_any_nonbepkts = false;
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h b/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h
deleted file mode 100644
index c10b5fcc6f4e..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMEDCATURBOCHECK_H__
-#define __PHYDMEDCATURBOCHECK_H__
-
-/*#define EDCATURBO_VERSION "2.1"*/
-#define EDCATURBO_VERSION "2.3" /*2015.07.29 by YuChen*/
-
-struct edca_turbo {
- bool is_current_turbo_edca;
- bool is_cur_rdl_state;
-
- u32 prv_traffic_idx; /* edca turbo */
-};
-
-void odm_edca_turbo_check(void *dm_void);
-void odm_edca_turbo_init(void *dm_void);
-
-void odm_edca_turbo_check_ce(void *dm_void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_hwconfig.c b/drivers/staging/rtlwifi/phydm/phydm_hwconfig.c
deleted file mode 100644
index a4ad39ab3ddf..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_hwconfig.c
+++ /dev/null
@@ -1,1848 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-#define READ_AND_CONFIG_MP(ic, txt) (odm_read_and_config_mp_##ic##txt(dm))
-#define READ_AND_CONFIG_TC(ic, txt) (odm_read_and_config_tc_##ic##txt(dm))
-
-#define READ_AND_CONFIG READ_AND_CONFIG_MP
-
-#define READ_FIRMWARE_MP(ic, txt) \
- (odm_read_firmware_mp_##ic##txt(dm, p_firmware, size))
-#define READ_FIRMWARE_TC(ic, txt) \
- (odm_read_firmware_tc_##ic##txt(dm, p_firmware, size))
-
-#define READ_FIRMWARE READ_FIRMWARE_MP
-
-#define GET_VERSION_MP(ic, txt) (odm_get_version_mp_##ic##txt())
-#define GET_VERSION_TC(ic, txt) (odm_get_version_tc_##ic##txt())
-
-#define GET_VERSION(ic, txt) GET_VERSION_MP(ic, txt)
-
-static u32 phydm_process_rssi_pwdb(struct phy_dm_struct *dm,
- struct rtl_sta_info *entry,
- struct dm_per_pkt_info *pktinfo,
- u32 undecorated_smoothed_ofdm,
- u32 undecorated_smoothed_cck)
-{
- u32 weighting = 0, undecorated_smoothed_pwdb;
- /* 2011.07.28 LukeLee: modified to prevent unstable CCK RSSI */
-
- if (entry->rssi_stat.ofdm_pkt == 64) {
- /* speed up when all packets are OFDM */
- undecorated_smoothed_pwdb = undecorated_smoothed_ofdm;
- ODM_RT_TRACE(dm, ODM_COMP_RSSI_MONITOR,
- "PWDB_0[%d] = (( %d ))\n", pktinfo->station_id,
- undecorated_smoothed_cck);
- } else {
- if (entry->rssi_stat.valid_bit < 64)
- entry->rssi_stat.valid_bit++;
-
- if (entry->rssi_stat.valid_bit == 64) {
- weighting = ((entry->rssi_stat.ofdm_pkt) > 4) ?
- 64 :
- (entry->rssi_stat.ofdm_pkt << 4);
- undecorated_smoothed_pwdb =
- (weighting * undecorated_smoothed_ofdm +
- (64 - weighting) * undecorated_smoothed_cck) >>
- 6;
- ODM_RT_TRACE(dm, ODM_COMP_RSSI_MONITOR,
- "PWDB_1[%d] = (( %d )), W = (( %d ))\n",
- pktinfo->station_id,
- undecorated_smoothed_cck, weighting);
- } else {
- if (entry->rssi_stat.valid_bit != 0)
- undecorated_smoothed_pwdb =
- (entry->rssi_stat.ofdm_pkt *
- undecorated_smoothed_ofdm +
- (entry->rssi_stat.valid_bit -
- entry->rssi_stat.ofdm_pkt) *
- undecorated_smoothed_cck) /
- entry->rssi_stat.valid_bit;
- else
- undecorated_smoothed_pwdb = 0;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_RSSI_MONITOR,
- "PWDB_2[%d] = (( %d )), ofdm_pkt = (( %d )), Valid_Bit = (( %d ))\n",
- pktinfo->station_id, undecorated_smoothed_cck,
- entry->rssi_stat.ofdm_pkt,
- entry->rssi_stat.valid_bit);
- }
- }
-
- return undecorated_smoothed_pwdb;
-}
-
-static u32 phydm_process_rssi_cck(struct phy_dm_struct *dm,
- struct dm_phy_status_info *phy_info,
- struct rtl_sta_info *entry,
- u32 undecorated_smoothed_cck)
-{
- u32 rssi_ave;
- u8 i;
-
- rssi_ave = phy_info->rx_pwdb_all;
- dm->rssi_a = (u8)phy_info->rx_pwdb_all;
- dm->rssi_b = 0xFF;
- dm->rssi_c = 0xFF;
- dm->rssi_d = 0xFF;
-
- if (entry->rssi_stat.cck_pkt <= 63)
- entry->rssi_stat.cck_pkt++;
-
- /* 1 Process CCK RSSI */
- if (undecorated_smoothed_cck <= 0) { /* initialize */
- undecorated_smoothed_cck = phy_info->rx_pwdb_all;
- entry->rssi_stat.cck_sum_power =
- (u16)phy_info->rx_pwdb_all; /*reset*/
- entry->rssi_stat.cck_pkt = 1; /*reset*/
- ODM_RT_TRACE(dm, ODM_COMP_RSSI_MONITOR, "CCK_INIT: (( %d ))\n",
- undecorated_smoothed_cck);
- } else if (entry->rssi_stat.cck_pkt <= CCK_RSSI_INIT_COUNT) {
- entry->rssi_stat.cck_sum_power =
- entry->rssi_stat.cck_sum_power +
- (u16)phy_info->rx_pwdb_all;
- undecorated_smoothed_cck = entry->rssi_stat.cck_sum_power /
- entry->rssi_stat.cck_pkt;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_RSSI_MONITOR,
- "CCK_0: (( %d )), SumPow = (( %d )), cck_pkt = (( %d ))\n",
- undecorated_smoothed_cck,
- entry->rssi_stat.cck_sum_power,
- entry->rssi_stat.cck_pkt);
- } else {
- if (phy_info->rx_pwdb_all > (u32)undecorated_smoothed_cck) {
- undecorated_smoothed_cck =
- (((undecorated_smoothed_cck) *
- (RX_SMOOTH_FACTOR - 1)) +
- (phy_info->rx_pwdb_all)) /
- (RX_SMOOTH_FACTOR);
- undecorated_smoothed_cck = undecorated_smoothed_cck + 1;
- ODM_RT_TRACE(dm, ODM_COMP_RSSI_MONITOR,
- "CCK_1: (( %d ))\n",
- undecorated_smoothed_cck);
- } else {
- undecorated_smoothed_cck =
- (((undecorated_smoothed_cck) *
- (RX_SMOOTH_FACTOR - 1)) +
- (phy_info->rx_pwdb_all)) /
- (RX_SMOOTH_FACTOR);
- ODM_RT_TRACE(dm, ODM_COMP_RSSI_MONITOR,
- "CCK_2: (( %d ))\n",
- undecorated_smoothed_cck);
- }
- }
-
- i = 63;
- entry->rssi_stat.ofdm_pkt -=
- (u8)((entry->rssi_stat.packet_map >> i) & BIT(0));
- entry->rssi_stat.packet_map = entry->rssi_stat.packet_map << 1;
- return undecorated_smoothed_cck;
-}
-
-static u32 phydm_process_rssi_ofdm(struct phy_dm_struct *dm,
- struct dm_phy_status_info *phy_info,
- struct rtl_sta_info *entry,
- u32 undecorated_smoothed_ofdm)
-{
- u32 rssi_ave;
- u8 rssi_max, rssi_min, i;
-
- if (dm->support_ic_type & (ODM_RTL8814A | ODM_RTL8822B)) {
- u8 rx_count = 0;
- u32 rssi_linear = 0;
-
- if (dm->rx_ant_status & ODM_RF_A) {
- dm->rssi_a = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_A];
- rx_count++;
- rssi_linear += odm_convert_to_linear(
- phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_A]);
- } else {
- dm->rssi_a = 0;
- }
-
- if (dm->rx_ant_status & ODM_RF_B) {
- dm->rssi_b = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_B];
- rx_count++;
- rssi_linear += odm_convert_to_linear(
- phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_B]);
- } else {
- dm->rssi_b = 0;
- }
-
- if (dm->rx_ant_status & ODM_RF_C) {
- dm->rssi_c = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_C];
- rx_count++;
- rssi_linear += odm_convert_to_linear(
- phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_C]);
- } else {
- dm->rssi_c = 0;
- }
-
- if (dm->rx_ant_status & ODM_RF_D) {
- dm->rssi_d = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_D];
- rx_count++;
- rssi_linear += odm_convert_to_linear(
- phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_D]);
- } else {
- dm->rssi_d = 0;
- }
-
- /* Calculate average RSSI */
- switch (rx_count) {
- case 2:
- rssi_linear = (rssi_linear >> 1);
- break;
- case 3:
- /* rssi_linear/3 ~ rssi_linear*11/32 */
- rssi_linear = ((rssi_linear) + (rssi_linear << 1) +
- (rssi_linear << 3)) >>
- 5;
- break;
- case 4:
- rssi_linear = (rssi_linear >> 2);
- break;
- }
-
- rssi_ave = odm_convert_to_db(rssi_linear);
- } else {
- if (phy_info->rx_mimo_signal_strength[ODM_RF_PATH_B] == 0) {
- rssi_ave = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_A];
- dm->rssi_a = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_A];
- dm->rssi_b = 0;
- } else {
- dm->rssi_a = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_A];
- dm->rssi_b = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_B];
-
- if (phy_info->rx_mimo_signal_strength[ODM_RF_PATH_A] >
- phy_info->rx_mimo_signal_strength[ODM_RF_PATH_B]) {
- rssi_max = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_A];
- rssi_min = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_B];
- } else {
- rssi_max = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_B];
- rssi_min = phy_info->rx_mimo_signal_strength
- [ODM_RF_PATH_A];
- }
- if ((rssi_max - rssi_min) < 3)
- rssi_ave = rssi_max;
- else if ((rssi_max - rssi_min) < 6)
- rssi_ave = rssi_max - 1;
- else if ((rssi_max - rssi_min) < 10)
- rssi_ave = rssi_max - 2;
- else
- rssi_ave = rssi_max - 3;
- }
- }
-
- /* 1 Process OFDM RSSI */
- if (undecorated_smoothed_ofdm <= 0) { /* initialize */
- undecorated_smoothed_ofdm = phy_info->rx_pwdb_all;
- ODM_RT_TRACE(dm, ODM_COMP_RSSI_MONITOR, "OFDM_INIT: (( %d ))\n",
- undecorated_smoothed_ofdm);
- } else {
- if (phy_info->rx_pwdb_all > (u32)undecorated_smoothed_ofdm) {
- undecorated_smoothed_ofdm =
- (((undecorated_smoothed_ofdm) *
- (RX_SMOOTH_FACTOR - 1)) +
- (rssi_ave)) /
- (RX_SMOOTH_FACTOR);
- undecorated_smoothed_ofdm =
- undecorated_smoothed_ofdm + 1;
- ODM_RT_TRACE(dm, ODM_COMP_RSSI_MONITOR,
- "OFDM_1: (( %d ))\n",
- undecorated_smoothed_ofdm);
- } else {
- undecorated_smoothed_ofdm =
- (((undecorated_smoothed_ofdm) *
- (RX_SMOOTH_FACTOR - 1)) +
- (rssi_ave)) /
- (RX_SMOOTH_FACTOR);
- ODM_RT_TRACE(dm, ODM_COMP_RSSI_MONITOR,
- "OFDM_2: (( %d ))\n",
- undecorated_smoothed_ofdm);
- }
- }
-
- if (entry->rssi_stat.ofdm_pkt != 64) {
- i = 63;
- entry->rssi_stat.ofdm_pkt -=
- (u8)(((entry->rssi_stat.packet_map >> i) & BIT(0)) - 1);
- }
-
- entry->rssi_stat.packet_map =
- (entry->rssi_stat.packet_map << 1) | BIT(0);
- return undecorated_smoothed_ofdm;
-}
-
-static u8 odm_evm_db_to_percentage(s8);
-static u8 odm_evm_dbm_jaguar_series(s8);
-
-static inline u32 phydm_get_rssi_average(struct phy_dm_struct *dm,
- struct dm_phy_status_info *phy_info)
-{
- u8 rssi_max = 0, rssi_min = 0;
-
- dm->rssi_a = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_A];
- dm->rssi_b = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_B];
-
- if (phy_info->rx_mimo_signal_strength[ODM_RF_PATH_A] >
- phy_info->rx_mimo_signal_strength[ODM_RF_PATH_B]) {
- rssi_max = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_A];
- rssi_min = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_B];
- } else {
- rssi_max = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_B];
- rssi_min = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_A];
- }
- if ((rssi_max - rssi_min) < 3)
- return rssi_max;
- else if ((rssi_max - rssi_min) < 6)
- return rssi_max - 1;
- else if ((rssi_max - rssi_min) < 10)
- return rssi_max - 2;
- else
- return rssi_max - 3;
-}
-
-static inline u8 phydm_get_evm_dbm(u8 i, u8 EVM,
- struct phy_status_rpt_8812 *phy_sta_rpt,
- struct dm_phy_status_info *phy_info)
-{
- if (i < ODM_RF_PATH_C)
- return odm_evm_dbm_jaguar_series(phy_sta_rpt->rxevm[i]);
- else
- return odm_evm_dbm_jaguar_series(phy_sta_rpt->rxevm_cd[i - 2]);
- /*RT_DISP(FRX, RX_PHY_SQ, ("RXRATE=%x RXEVM=%x EVM=%s%d\n",*/
- /*pktinfo->data_rate, phy_sta_rpt->rxevm[i], "%", EVM));*/
-}
-
-static inline u8 phydm_get_odm_evm(u8 i, struct dm_per_pkt_info *pktinfo,
- struct phy_status_rpt_8812 *phy_sta_rpt)
-{
- u8 evm = 0;
-
- if (pktinfo->data_rate >= ODM_RATE6M &&
- pktinfo->data_rate <= ODM_RATE54M) {
- if (i == ODM_RF_PATH_A) {
- evm = odm_evm_db_to_percentage(
- (phy_sta_rpt->sigevm)); /*dbm*/
- evm += 20;
- if (evm > 100)
- evm = 100;
- }
- } else {
- if (i < ODM_RF_PATH_C) {
- if (phy_sta_rpt->rxevm[i] == -128)
- phy_sta_rpt->rxevm[i] = -25;
- evm = odm_evm_db_to_percentage(
- (phy_sta_rpt->rxevm[i])); /*dbm*/
- } else {
- if (phy_sta_rpt->rxevm_cd[i - 2] == -128)
- phy_sta_rpt->rxevm_cd[i - 2] = -25;
- evm = odm_evm_db_to_percentage(
- (phy_sta_rpt->rxevm_cd[i - 2])); /*dbm*/
- }
- }
-
- return evm;
-}
-
-static inline s8 phydm_get_rx_pwr(u8 LNA_idx, u8 VGA_idx, u8 cck_highpwr)
-{
- switch (LNA_idx) {
- case 7:
- if (VGA_idx <= 27)
- return -100 + 2 * (27 - VGA_idx); /*VGA_idx = 27~2*/
- else
- return -100;
- break;
- case 6:
- return -48 + 2 * (2 - VGA_idx); /*VGA_idx = 2~0*/
- case 5:
- return -42 + 2 * (7 - VGA_idx); /*VGA_idx = 7~5*/
- case 4:
- return -36 + 2 * (7 - VGA_idx); /*VGA_idx = 7~4*/
- case 3:
- return -24 + 2 * (7 - VGA_idx); /*VGA_idx = 7~0*/
- case 2:
- if (cck_highpwr)
- return -12 + 2 * (5 - VGA_idx); /*VGA_idx = 5~0*/
- else
- return -6 + 2 * (5 - VGA_idx);
- break;
- case 1:
- return 8 - 2 * VGA_idx;
- case 0:
- return 14 - 2 * VGA_idx;
- default:
- break;
- }
- return 0;
-}
-
-static inline u8 phydm_adjust_pwdb(u8 cck_highpwr, u8 pwdb_all)
-{
- if (!cck_highpwr) {
- if (pwdb_all >= 80)
- return ((pwdb_all - 80) << 1) + ((pwdb_all - 80) >> 1) +
- 80;
- else if ((pwdb_all <= 78) && (pwdb_all >= 20))
- return pwdb_all + 3;
- if (pwdb_all > 100)
- return 100;
- }
- return pwdb_all;
-}
-
-static inline u8
-phydm_get_signal_quality_8812(struct dm_phy_status_info *phy_info,
- struct phy_dm_struct *dm,
- struct phy_status_rpt_8812 *phy_sta_rpt)
-{
- u8 sq_rpt;
-
- if (phy_info->rx_pwdb_all > 40 && !dm->is_in_hct_test)
- return 100;
-
- sq_rpt = phy_sta_rpt->pwdb_all;
-
- if (sq_rpt > 64)
- return 0;
- else if (sq_rpt < 20)
- return 100;
- else
- return ((64 - sq_rpt) * 100) / 44;
-}
-
-static inline u8
-phydm_get_signal_quality_8192(struct dm_phy_status_info *phy_info,
- struct phy_dm_struct *dm,
- struct phy_status_rpt_8192cd *phy_sta_rpt)
-{
- u8 sq_rpt;
-
- if (phy_info->rx_pwdb_all > 40 && !dm->is_in_hct_test)
- return 100;
-
- sq_rpt = phy_sta_rpt->cck_sig_qual_ofdm_pwdb_all;
-
- if (sq_rpt > 64)
- return 0;
- else if (sq_rpt < 20)
- return 100;
- else
- return ((64 - sq_rpt) * 100) / 44;
-}
-
-static u8 odm_query_rx_pwr_percentage(s8 ant_power)
-{
- if ((ant_power <= -100) || (ant_power >= 20))
- return 0;
- else if (ant_power >= 0)
- return 100;
- else
- return 100 + ant_power;
-}
-
-static u8 odm_evm_db_to_percentage(s8 value)
-{
- /* -33dB~0dB to 0%~99% */
- s8 ret_val;
-
- ret_val = value;
- ret_val /= 2;
-
- if (ret_val >= 0)
- ret_val = 0;
-
- if (ret_val <= -33)
- ret_val = -33;
-
- ret_val = 0 - ret_val;
- ret_val *= 3;
-
- if (ret_val == 99)
- ret_val = 100;
-
- return (u8)ret_val;
-}
-
-static u8 odm_evm_dbm_jaguar_series(s8 value)
-{
- s8 ret_val = value;
-
- /* -33dB~0dB to 33dB ~ 0dB */
- if (ret_val == -128)
- ret_val = 127;
- else if (ret_val < 0)
- ret_val = 0 - ret_val;
-
- ret_val = ret_val >> 1;
- return (u8)ret_val;
-}
-
-static s16 odm_cfo(s8 value)
-{
- s16 ret_val;
-
- if (value < 0) {
- ret_val = 0 - value;
- ret_val = (ret_val << 1) + (ret_val >> 1); /* *2.5~=312.5/2^7 */
- ret_val =
- ret_val | BIT(12); /* set bit12 as 1 for negative cfo */
- } else {
- ret_val = value;
- ret_val = (ret_val << 1) + (ret_val >> 1); /* *2.5~=312.5/2^7 */
- }
- return ret_val;
-}
-
-static u8 phydm_rate_to_num_ss(struct phy_dm_struct *dm, u8 data_rate)
-{
- u8 num_ss = 1;
-
- if (data_rate <= ODM_RATE54M)
- num_ss = 1;
- else if (data_rate <= ODM_RATEMCS31)
- num_ss = ((data_rate - ODM_RATEMCS0) >> 3) + 1;
- else if (data_rate <= ODM_RATEVHTSS1MCS9)
- num_ss = 1;
- else if (data_rate <= ODM_RATEVHTSS2MCS9)
- num_ss = 2;
- else if (data_rate <= ODM_RATEVHTSS3MCS9)
- num_ss = 3;
- else if (data_rate <= ODM_RATEVHTSS4MCS9)
- num_ss = 4;
-
- return num_ss;
-}
-
-static void odm_rx_phy_status92c_series_parsing(
- struct phy_dm_struct *dm, struct dm_phy_status_info *phy_info,
- u8 *phy_status, struct dm_per_pkt_info *pktinfo)
-{
- u8 i, max_spatial_stream;
- s8 rx_pwr[4], rx_pwr_all = 0;
- u8 EVM, pwdb_all = 0, pwdb_all_bt;
- u8 RSSI, total_rssi = 0;
- bool is_cck_rate = false;
- u8 rf_rx_num = 0;
- u8 LNA_idx = 0;
- u8 VGA_idx = 0;
- u8 cck_agc_rpt;
- u8 num_ss;
- struct phy_status_rpt_8192cd *phy_sta_rpt =
- (struct phy_status_rpt_8192cd *)phy_status;
-
- is_cck_rate = (pktinfo->data_rate <= ODM_RATE11M) ? true : false;
-
- if (pktinfo->is_to_self)
- dm->curr_station_id = pktinfo->station_id;
-
- phy_info->rx_mimo_signal_quality[ODM_RF_PATH_A] = -1;
- phy_info->rx_mimo_signal_quality[ODM_RF_PATH_B] = -1;
-
- if (is_cck_rate) {
- dm->phy_dbg_info.num_qry_phy_status_cck++;
- cck_agc_rpt = phy_sta_rpt->cck_agc_rpt_ofdm_cfosho_a;
-
- if (dm->support_ic_type & (ODM_RTL8703B)) {
- } else { /*3 bit LNA*/
-
- LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
- VGA_idx = (cck_agc_rpt & 0x1F);
- }
-
- ODM_RT_TRACE(
- dm, ODM_COMP_RSSI_MONITOR,
- "ext_lna_gain (( %d )), LNA_idx: (( 0x%x )), VGA_idx: (( 0x%x )), rx_pwr_all: (( %d ))\n",
- dm->ext_lna_gain, LNA_idx, VGA_idx, rx_pwr_all);
-
- if (dm->board_type & ODM_BOARD_EXT_LNA)
- rx_pwr_all -= dm->ext_lna_gain;
-
- pwdb_all = odm_query_rx_pwr_percentage(rx_pwr_all);
-
- if (pktinfo->is_to_self) {
- dm->cck_lna_idx = LNA_idx;
- dm->cck_vga_idx = VGA_idx;
- }
- phy_info->rx_pwdb_all = pwdb_all;
-
- phy_info->bt_rx_rssi_percentage = pwdb_all;
- phy_info->recv_signal_power = rx_pwr_all;
- /* (3) Get Signal Quality (EVM) */
- {
- u8 sq;
-
- sq = phydm_get_signal_quality_8192(phy_info, dm,
- phy_sta_rpt);
- phy_info->signal_quality = sq;
- phy_info->rx_mimo_signal_quality[ODM_RF_PATH_A] = sq;
- phy_info->rx_mimo_signal_quality[ODM_RF_PATH_B] = -1;
- }
-
- for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX; i++) {
- if (i == 0)
- phy_info->rx_mimo_signal_strength[0] = pwdb_all;
- else
- phy_info->rx_mimo_signal_strength[1] = 0;
- }
- } else { /* 2 is OFDM rate */
- dm->phy_dbg_info.num_qry_phy_status_ofdm++;
-
- /* */
- /* (1)Get RSSI for HT rate */
- /* */
-
- for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX; i++) {
- /* 2008/01/30 MH we will judge RF RX path now. */
- if (dm->rf_path_rx_enable & BIT(i))
- rf_rx_num++;
- /* else */
- /* continue; */
-
- rx_pwr[i] =
- ((phy_sta_rpt->path_agc[i].gain & 0x3F) * 2) -
- 110;
-
- if (pktinfo->is_to_self) {
- dm->ofdm_agc_idx[i] =
- (phy_sta_rpt->path_agc[i].gain & 0x3F);
- /**/
- }
-
- phy_info->rx_pwr[i] = rx_pwr[i];
-
- /* Translate DBM to percentage. */
- RSSI = odm_query_rx_pwr_percentage(rx_pwr[i]);
- total_rssi += RSSI;
-
- phy_info->rx_mimo_signal_strength[i] = (u8)RSSI;
-
- /* Get Rx snr value in DB */
- dm->phy_dbg_info.rx_snr_db[i] =
- (s32)(phy_sta_rpt->path_rxsnr[i] / 2);
- phy_info->rx_snr[i] = dm->phy_dbg_info.rx_snr_db[i];
-
- /* Record Signal Strength for next packet */
- /* if(pktinfo->is_packet_match_bssid) */
- {
- }
- }
-
- /* */
- /* (2)PWDB, Average PWDB calcuated by hardware (for RA) */
- /* */
- rx_pwr_all = (((phy_sta_rpt->cck_sig_qual_ofdm_pwdb_all) >> 1) &
- 0x7f) -
- 110;
-
- pwdb_all = odm_query_rx_pwr_percentage(rx_pwr_all);
- pwdb_all_bt = pwdb_all;
-
- phy_info->rx_pwdb_all = pwdb_all;
- phy_info->bt_rx_rssi_percentage = pwdb_all_bt;
- phy_info->rx_power = rx_pwr_all;
- phy_info->recv_signal_power = rx_pwr_all;
-
- if ((dm->support_platform == ODM_WIN) && (dm->patch_id == 19)) {
- /* do nothing */
- } else if ((dm->support_platform == ODM_WIN) &&
- (dm->patch_id == 25)) {
- /* do nothing */
- } else { /* mgnt_info->customer_id != RT_CID_819X_LENOVO */
- /* */
- /* (3)EVM of HT rate */
- /* */
- if (pktinfo->data_rate >= ODM_RATEMCS8 &&
- pktinfo->data_rate <= ODM_RATEMCS15) {
- /* both spatial stream make sense */
- max_spatial_stream = 2;
- } else {
- /* only spatial stream 1 makes sense */
- max_spatial_stream = 1;
- }
-
- for (i = 0; i < max_spatial_stream; i++) {
- /*Don't use shift operation like "rx_evmX >>= 1"
- *because the compilor of free build environment
- *fill most significant bit to "zero" when doing
- *shifting operation which may change a negative
- *value to positive one, then the dbm value
- *(which is supposed to be negative) is not
- *correct anymore.
- */
- EVM = odm_evm_db_to_percentage(
- (phy_sta_rpt
- ->stream_rxevm[i])); /* dbm */
-
- /* Fill value in RFD, Get the first spatial
- * stream only
- */
- if (i == ODM_RF_PATH_A)
- phy_info->signal_quality =
- (u8)(EVM & 0xff);
- phy_info->rx_mimo_signal_quality[i] =
- (u8)(EVM & 0xff);
- }
- }
-
- num_ss = phydm_rate_to_num_ss(dm, pktinfo->data_rate);
- odm_parsing_cfo(dm, pktinfo, phy_sta_rpt->path_cfotail, num_ss);
- }
- /* UI BSS List signal strength(in percentage), make it good looking,
- * from 0~100.
- */
- /* It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp(). */
- if (is_cck_rate)
- phy_info->signal_strength = pwdb_all;
- else if (rf_rx_num != 0)
- phy_info->signal_strength = (total_rssi /= rf_rx_num);
-
- /* For 92C/92D HW (Hybrid) Antenna Diversity */
-}
-
-static void
-odm_rx_phy_bw_jaguar_series_parsing(struct dm_phy_status_info *phy_info,
- struct dm_per_pkt_info *pktinfo,
- struct phy_status_rpt_8812 *phy_sta_rpt)
-{
- if (pktinfo->data_rate <= ODM_RATE54M) {
- switch (phy_sta_rpt->r_RFMOD) {
- case 1:
- if (phy_sta_rpt->sub_chnl == 0)
- phy_info->band_width = 1;
- else
- phy_info->band_width = 0;
- break;
-
- case 2:
- if (phy_sta_rpt->sub_chnl == 0)
- phy_info->band_width = 2;
- else if (phy_sta_rpt->sub_chnl == 9 ||
- phy_sta_rpt->sub_chnl == 10)
- phy_info->band_width = 1;
- else
- phy_info->band_width = 0;
- break;
-
- default:
- case 0:
- phy_info->band_width = 0;
- break;
- }
- }
-}
-
-static void odm_rx_phy_status_jaguar_series_parsing(
- struct phy_dm_struct *dm, struct dm_phy_status_info *phy_info,
- u8 *phy_status, struct dm_per_pkt_info *pktinfo)
-{
- u8 i, max_spatial_stream;
- s8 rx_pwr[4], rx_pwr_all = 0;
- u8 EVM = 0, evm_dbm, pwdb_all = 0, pwdb_all_bt;
- u8 RSSI, avg_rssi = 0, best_rssi = 0, second_rssi = 0;
- u8 is_cck_rate = 0;
- u8 rf_rx_num = 0;
- u8 cck_highpwr = 0;
- u8 LNA_idx, VGA_idx;
- struct phy_status_rpt_8812 *phy_sta_rpt =
- (struct phy_status_rpt_8812 *)phy_status;
- struct fast_antenna_training *fat_tab = &dm->dm_fat_table;
- u8 num_ss;
-
- odm_rx_phy_bw_jaguar_series_parsing(phy_info, pktinfo, phy_sta_rpt);
-
- if (pktinfo->data_rate <= ODM_RATE11M)
- is_cck_rate = true;
- else
- is_cck_rate = false;
-
- if (pktinfo->is_to_self)
- dm->curr_station_id = pktinfo->station_id;
- else
- dm->curr_station_id = 0xff;
-
- phy_info->rx_mimo_signal_quality[ODM_RF_PATH_A] = -1;
- phy_info->rx_mimo_signal_quality[ODM_RF_PATH_B] = -1;
- phy_info->rx_mimo_signal_quality[ODM_RF_PATH_C] = -1;
- phy_info->rx_mimo_signal_quality[ODM_RF_PATH_D] = -1;
-
- if (is_cck_rate) {
- u8 cck_agc_rpt;
-
- dm->phy_dbg_info.num_qry_phy_status_cck++;
-
- /*(1)Hardware does not provide RSSI for CCK*/
- /*(2)PWDB, Average PWDB calculated by hardware (for RA)*/
-
- cck_highpwr = dm->is_cck_high_power;
-
- cck_agc_rpt = phy_sta_rpt->cfosho[0];
- LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
- VGA_idx = (cck_agc_rpt & 0x1F);
-
- if (dm->support_ic_type == ODM_RTL8812) {
- rx_pwr_all =
- phydm_get_rx_pwr(LNA_idx, VGA_idx, cck_highpwr);
- rx_pwr_all += 6;
- pwdb_all = odm_query_rx_pwr_percentage(rx_pwr_all);
- pwdb_all = phydm_adjust_pwdb(cck_highpwr, pwdb_all);
-
- } else if (dm->support_ic_type & (ODM_RTL8821 | ODM_RTL8881A)) {
- s8 pout = -6;
-
- switch (LNA_idx) {
- case 5:
- rx_pwr_all = pout - 32 - (2 * VGA_idx);
- break;
- case 4:
- rx_pwr_all = pout - 24 - (2 * VGA_idx);
- break;
- case 2:
- rx_pwr_all = pout - 11 - (2 * VGA_idx);
- break;
- case 1:
- rx_pwr_all = pout + 5 - (2 * VGA_idx);
- break;
- case 0:
- rx_pwr_all = pout + 21 - (2 * VGA_idx);
- break;
- }
- pwdb_all = odm_query_rx_pwr_percentage(rx_pwr_all);
- } else if (dm->support_ic_type == ODM_RTL8814A ||
- dm->support_ic_type == ODM_RTL8822B) {
- s8 pout = -6;
-
- switch (LNA_idx) {
- /*CCK only use LNA: 2, 3, 5, 7*/
- case 7:
- rx_pwr_all = pout - 32 - (2 * VGA_idx);
- break;
- case 5:
- rx_pwr_all = pout - 22 - (2 * VGA_idx);
- break;
- case 3:
- rx_pwr_all = pout - 2 - (2 * VGA_idx);
- break;
- case 2:
- rx_pwr_all = pout + 5 - (2 * VGA_idx);
- break;
- default:
- break;
- }
- pwdb_all = odm_query_rx_pwr_percentage(rx_pwr_all);
- }
-
- dm->cck_lna_idx = LNA_idx;
- dm->cck_vga_idx = VGA_idx;
- phy_info->rx_pwdb_all = pwdb_all;
- phy_info->bt_rx_rssi_percentage = pwdb_all;
- phy_info->recv_signal_power = rx_pwr_all;
- /*(3) Get Signal Quality (EVM)*/
- {
- u8 sq = 0;
-
- if (!(dm->support_platform == ODM_WIN &&
- dm->patch_id == RT_CID_819X_LENOVO))
- sq = phydm_get_signal_quality_8812(phy_info, dm,
- phy_sta_rpt);
-
- phy_info->signal_quality = sq;
- phy_info->rx_mimo_signal_quality[ODM_RF_PATH_A] = sq;
- }
-
- for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX_JAGUAR; i++) {
- if (i == 0)
- phy_info->rx_mimo_signal_strength[0] = pwdb_all;
- else
- phy_info->rx_mimo_signal_strength[i] = 0;
- }
- } else {
- /*is OFDM rate*/
- fat_tab->hw_antsw_occur = phy_sta_rpt->hw_antsw_occur;
-
- dm->phy_dbg_info.num_qry_phy_status_ofdm++;
-
- /*(1)Get RSSI for OFDM rate*/
-
- for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX_JAGUAR; i++) {
- /*2008/01/30 MH we will judge RF RX path now.*/
- if (dm->rf_path_rx_enable & BIT(i))
- rf_rx_num++;
- /*2012.05.25 LukeLee: Testchip AGC report is wrong,
- *it should be restored back to old formula in MP chip
- */
- if (i < ODM_RF_PATH_C)
- rx_pwr[i] = (phy_sta_rpt->gain_trsw[i] & 0x7F) -
- 110;
- else
- rx_pwr[i] = (phy_sta_rpt->gain_trsw_cd[i - 2] &
- 0x7F) -
- 110;
-
- phy_info->rx_pwr[i] = rx_pwr[i];
-
- /* Translate DBM to percentage. */
- RSSI = odm_query_rx_pwr_percentage(rx_pwr[i]);
-
- /*total_rssi += RSSI;*/
- /*Get the best two RSSI*/
- if (RSSI > best_rssi && RSSI > second_rssi) {
- second_rssi = best_rssi;
- best_rssi = RSSI;
- } else if (RSSI > second_rssi && RSSI <= best_rssi) {
- second_rssi = RSSI;
- }
-
- phy_info->rx_mimo_signal_strength[i] = (u8)RSSI;
-
- /*Get Rx snr value in DB*/
- if (i < ODM_RF_PATH_C)
- phy_info->rx_snr[i] =
- dm->phy_dbg_info.rx_snr_db[i] =
- phy_sta_rpt->rxsnr[i] / 2;
- else if (dm->support_ic_type &
- (ODM_RTL8814A | ODM_RTL8822B))
- phy_info->rx_snr[i] = dm->phy_dbg_info
- .rx_snr_db[i] =
- phy_sta_rpt->csi_current[i - 2] / 2;
-
- /*(2) CFO_short & CFO_tail*/
- if (i < ODM_RF_PATH_C) {
- phy_info->cfo_short[i] =
- odm_cfo((phy_sta_rpt->cfosho[i]));
- phy_info->cfo_tail[i] =
- odm_cfo((phy_sta_rpt->cfotail[i]));
- }
- }
-
- /*(3)PWDB, Average PWDB calculated by hardware (for RA)*/
-
- /*2012.05.25 LukeLee: Testchip AGC report is wrong, it should be
- *restored back to old formula in MP chip
- */
- if ((dm->support_ic_type &
- (ODM_RTL8812 | ODM_RTL8821 | ODM_RTL8881A)) &&
- (!dm->is_mp_chip))
- rx_pwr_all = (phy_sta_rpt->pwdb_all & 0x7f) - 110;
- else
- rx_pwr_all = (((phy_sta_rpt->pwdb_all) >> 1) & 0x7f) -
- 110; /*OLD FORMULA*/
-
- pwdb_all = odm_query_rx_pwr_percentage(rx_pwr_all);
- pwdb_all_bt = pwdb_all;
-
- phy_info->rx_pwdb_all = pwdb_all;
- phy_info->bt_rx_rssi_percentage = pwdb_all_bt;
- phy_info->rx_power = rx_pwr_all;
- phy_info->recv_signal_power = rx_pwr_all;
-
- if ((dm->support_platform == ODM_WIN) && (dm->patch_id == 19)) {
- /*do nothing*/
- } else {
- /*mgnt_info->customer_id != RT_CID_819X_LENOVO*/
-
- /*(4)EVM of OFDM rate*/
-
- if ((pktinfo->data_rate >= ODM_RATEMCS8) &&
- (pktinfo->data_rate <= ODM_RATEMCS15))
- max_spatial_stream = 2;
- else if ((pktinfo->data_rate >= ODM_RATEVHTSS2MCS0) &&
- (pktinfo->data_rate <= ODM_RATEVHTSS2MCS9))
- max_spatial_stream = 2;
- else if ((pktinfo->data_rate >= ODM_RATEMCS16) &&
- (pktinfo->data_rate <= ODM_RATEMCS23))
- max_spatial_stream = 3;
- else if ((pktinfo->data_rate >= ODM_RATEVHTSS3MCS0) &&
- (pktinfo->data_rate <= ODM_RATEVHTSS3MCS9))
- max_spatial_stream = 3;
- else
- max_spatial_stream = 1;
-
- for (i = 0; i < max_spatial_stream; i++) {
- /*Don't use shift operation like "rx_evmX >>= 1"
- *because the compilor of free build environment
- *fill most significant bit to "zero" when doing
- *shifting operation which may change a negative
- *value to positive one, then the dbm value
- *(which is supposed to be negative) is not
- *correct anymore.
- */
-
- EVM = phydm_get_odm_evm(i, pktinfo,
- phy_sta_rpt);
- evm_dbm = phydm_get_evm_dbm(i, EVM, phy_sta_rpt,
- phy_info);
- phy_info->rx_mimo_signal_quality[i] = EVM;
- phy_info->rx_mimo_evm_dbm[i] = evm_dbm;
- }
- }
-
- num_ss = phydm_rate_to_num_ss(dm, pktinfo->data_rate);
- odm_parsing_cfo(dm, pktinfo, phy_sta_rpt->cfotail, num_ss);
- }
-
- /*UI BSS List signal strength(in percentage), make it good looking,
- *from 0~100.
- */
- /*It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp().*/
- if (is_cck_rate) {
- phy_info->signal_strength = pwdb_all;
- } else if (rf_rx_num != 0) {
- /* 2015/01 Sean, use the best two RSSI only,
- * suggested by Ynlin and ChenYu.
- */
- if (rf_rx_num == 1)
- avg_rssi = best_rssi;
- else
- avg_rssi = (best_rssi + second_rssi) / 2;
-
- phy_info->signal_strength = avg_rssi;
- }
-
- dm->rx_pwdb_ave = dm->rx_pwdb_ave + phy_info->rx_pwdb_all;
-
- dm->dm_fat_table.antsel_rx_keep_0 = phy_sta_rpt->antidx_anta;
- dm->dm_fat_table.antsel_rx_keep_1 = phy_sta_rpt->antidx_antb;
- dm->dm_fat_table.antsel_rx_keep_2 = phy_sta_rpt->antidx_antc;
- dm->dm_fat_table.antsel_rx_keep_3 = phy_sta_rpt->antidx_antd;
-}
-
-void phydm_reset_rssi_for_dm(struct phy_dm_struct *dm, u8 station_id)
-{
- struct rtl_sta_info *entry;
-
- entry = dm->odm_sta_info[station_id];
-
- if (!IS_STA_VALID(entry))
- return;
-
- ODM_RT_TRACE(dm, ODM_COMP_RSSI_MONITOR,
- "Reset RSSI for macid = (( %d ))\n", station_id);
-
- entry->rssi_stat.undecorated_smoothed_cck = -1;
- entry->rssi_stat.undecorated_smoothed_ofdm = -1;
- entry->rssi_stat.undecorated_smoothed_pwdb = -1;
- entry->rssi_stat.ofdm_pkt = 0;
- entry->rssi_stat.cck_pkt = 0;
- entry->rssi_stat.cck_sum_power = 0;
- entry->rssi_stat.is_send_rssi = RA_RSSI_STATE_INIT;
- entry->rssi_stat.packet_map = 0;
- entry->rssi_stat.valid_bit = 0;
-}
-
-void odm_init_rssi_for_dm(struct phy_dm_struct *dm) {}
-
-static void odm_process_rssi_for_dm(struct phy_dm_struct *dm,
- struct dm_phy_status_info *phy_info,
- struct dm_per_pkt_info *pktinfo)
-{
- s32 undecorated_smoothed_pwdb, undecorated_smoothed_cck,
- undecorated_smoothed_ofdm;
- u8 is_cck_rate = 0;
- u8 send_rssi_2_fw = 0;
- struct rtl_sta_info *entry;
-
- if (pktinfo->station_id >= ODM_ASSOCIATE_ENTRY_NUM)
- return;
-
- /* 2012/05/30 MH/Luke.Lee Add some description */
- /* In windows driver: AP/IBSS mode STA */
- entry = dm->odm_sta_info[pktinfo->station_id];
-
- if (!IS_STA_VALID(entry))
- return;
-
- {
- if ((!pktinfo->is_packet_match_bssid)) /*data frame only*/
- return;
- }
-
- if (pktinfo->is_packet_beacon)
- dm->phy_dbg_info.num_qry_beacon_pkt++;
-
- is_cck_rate = (pktinfo->data_rate <= ODM_RATE11M) ? true : false;
- dm->rx_rate = pktinfo->data_rate;
-
- /* --------------Statistic for antenna/path diversity---------------- */
-
- /* -----------------Smart Antenna Debug Message------------------ */
-
- undecorated_smoothed_cck = entry->rssi_stat.undecorated_smoothed_cck;
- undecorated_smoothed_ofdm = entry->rssi_stat.undecorated_smoothed_ofdm;
- undecorated_smoothed_pwdb = entry->rssi_stat.undecorated_smoothed_pwdb;
-
- if (pktinfo->is_packet_to_self || pktinfo->is_packet_beacon) {
- if (!is_cck_rate) /* ofdm rate */
- undecorated_smoothed_ofdm = phydm_process_rssi_ofdm(
- dm, phy_info, entry, undecorated_smoothed_ofdm);
- else
- undecorated_smoothed_cck = phydm_process_rssi_cck(
- dm, phy_info, entry, undecorated_smoothed_cck);
-
- undecorated_smoothed_pwdb = phydm_process_rssi_pwdb(
- dm, entry, pktinfo, undecorated_smoothed_ofdm,
- undecorated_smoothed_cck);
-
- if ((entry->rssi_stat.ofdm_pkt >= 1 ||
- entry->rssi_stat.cck_pkt >= 5) &&
- (entry->rssi_stat.is_send_rssi == RA_RSSI_STATE_INIT)) {
- send_rssi_2_fw = 1;
- entry->rssi_stat.is_send_rssi = RA_RSSI_STATE_SEND;
- }
-
- entry->rssi_stat.undecorated_smoothed_cck =
- undecorated_smoothed_cck;
- entry->rssi_stat.undecorated_smoothed_ofdm =
- undecorated_smoothed_ofdm;
- entry->rssi_stat.undecorated_smoothed_pwdb =
- undecorated_smoothed_pwdb;
-
- if (send_rssi_2_fw) { /* Trigger init rate by RSSI */
-
- if (entry->rssi_stat.ofdm_pkt != 0)
- entry->rssi_stat.undecorated_smoothed_pwdb =
- undecorated_smoothed_ofdm;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_RSSI_MONITOR,
- "[Send to FW] PWDB = (( %d )), ofdm_pkt = (( %d )), cck_pkt = (( %d ))\n",
- undecorated_smoothed_pwdb,
- entry->rssi_stat.ofdm_pkt,
- entry->rssi_stat.cck_pkt);
- }
- }
-}
-
-/*
- * Endianness before calling this API
- */
-static void odm_phy_status_query_92c_series(struct phy_dm_struct *dm,
- struct dm_phy_status_info *phy_info,
- u8 *phy_status,
- struct dm_per_pkt_info *pktinfo)
-{
- odm_rx_phy_status92c_series_parsing(dm, phy_info, phy_status, pktinfo);
- odm_process_rssi_for_dm(dm, phy_info, pktinfo);
-}
-
-/*
- * Endianness before calling this API
- */
-
-static void odm_phy_status_query_jaguar_series(
- struct phy_dm_struct *dm, struct dm_phy_status_info *phy_info,
- u8 *phy_status, struct dm_per_pkt_info *pktinfo)
-{
- odm_rx_phy_status_jaguar_series_parsing(dm, phy_info, phy_status,
- pktinfo);
- odm_process_rssi_for_dm(dm, phy_info, pktinfo);
-}
-
-void odm_phy_status_query(struct phy_dm_struct *dm,
- struct dm_phy_status_info *phy_info, u8 *phy_status,
- struct dm_per_pkt_info *pktinfo)
-{
- if (dm->support_ic_type & ODM_IC_PHY_STATUE_NEW_TYPE) {
- phydm_rx_phy_status_new_type(dm, phy_status, pktinfo, phy_info);
- return;
- }
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- odm_phy_status_query_jaguar_series(dm, phy_info, phy_status,
- pktinfo);
-
- if (dm->support_ic_type & ODM_IC_11N_SERIES)
- odm_phy_status_query_92c_series(dm, phy_info, phy_status,
- pktinfo);
-}
-
-/* For future use. */
-void odm_mac_status_query(struct phy_dm_struct *dm, u8 *mac_status, u8 mac_id,
- bool is_packet_match_bssid, bool is_packet_to_self,
- bool is_packet_beacon)
-{
- /* 2011/10/19 Driver team will handle in the future. */
-}
-
-/*
- * If you want to add a new IC, Please follow below template and generate
- * a new one.
- */
-
-enum hal_status
-odm_config_rf_with_header_file(struct phy_dm_struct *dm,
- enum odm_rf_config_type config_type,
- enum odm_rf_radio_path e_rf_path)
-{
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===>%s (%s)\n", __func__,
- (dm->is_mp_chip) ? "MPChip" : "TestChip");
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "dm->support_platform: 0x%X, dm->support_interface: 0x%X, dm->board_type: 0x%X\n",
- dm->support_platform, dm->support_interface, dm->board_type);
-
- /* 1 AP doesn't use PHYDM power tracking table in these ICs */
- /* JJ ADD 20161014 */
-
- /* 1 All platforms support */
- if (dm->support_ic_type == ODM_RTL8822B) {
- if (config_type == CONFIG_RF_RADIO) {
- if (e_rf_path == ODM_RF_PATH_A)
- READ_AND_CONFIG_MP(8822b, _radioa);
- else if (e_rf_path == ODM_RF_PATH_B)
- READ_AND_CONFIG_MP(8822b, _radiob);
- } else if (config_type == CONFIG_RF_TXPWR_LMT) {
- if (dm->rfe_type == 5)
- READ_AND_CONFIG_MP(8822b, _txpwr_lmt_type5);
- else
- READ_AND_CONFIG_MP(8822b, _txpwr_lmt);
- }
- }
-
- return HAL_STATUS_SUCCESS;
-}
-
-enum hal_status
-odm_config_rf_with_tx_pwr_track_header_file(struct phy_dm_struct *dm)
-{
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===>%s (%s)\n", __func__,
- (dm->is_mp_chip) ? "MPChip" : "TestChip");
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "dm->support_platform: 0x%X, dm->support_interface: 0x%X, dm->board_type: 0x%X\n",
- dm->support_platform, dm->support_interface, dm->board_type);
-
- /* 1 AP doesn't use PHYDM power tracking table in these ICs */
- /* JJ ADD 20161014 */
-
- /* 1 All platforms support */
-
- if (dm->support_ic_type == ODM_RTL8822B) {
- if (dm->rfe_type == 0)
- READ_AND_CONFIG_MP(8822b, _txpowertrack_type0);
- else if (dm->rfe_type == 1)
- READ_AND_CONFIG_MP(8822b, _txpowertrack_type1);
- else if (dm->rfe_type == 2)
- READ_AND_CONFIG_MP(8822b, _txpowertrack_type2);
- else if ((dm->rfe_type == 3) || (dm->rfe_type == 5))
- READ_AND_CONFIG_MP(8822b, _txpowertrack_type3_type5);
- else if (dm->rfe_type == 4)
- READ_AND_CONFIG_MP(8822b, _txpowertrack_type4);
- else if (dm->rfe_type == 6)
- READ_AND_CONFIG_MP(8822b, _txpowertrack_type6);
- else if (dm->rfe_type == 7)
- READ_AND_CONFIG_MP(8822b, _txpowertrack_type7);
- else if (dm->rfe_type == 8)
- READ_AND_CONFIG_MP(8822b, _txpowertrack_type8);
- else if (dm->rfe_type == 9)
- READ_AND_CONFIG_MP(8822b, _txpowertrack_type9);
- else
- READ_AND_CONFIG_MP(8822b, _txpowertrack);
- }
-
- return HAL_STATUS_SUCCESS;
-}
-
-enum hal_status
-odm_config_bb_with_header_file(struct phy_dm_struct *dm,
- enum odm_bb_config_type config_type)
-{
- /* 1 AP doesn't use PHYDM initialization in these ICs */
- /* JJ ADD 20161014 */
-
- /* 1 All platforms support */
- if (dm->support_ic_type == ODM_RTL8822B) {
- if (config_type == CONFIG_BB_PHY_REG)
- READ_AND_CONFIG_MP(8822b, _phy_reg);
- else if (config_type == CONFIG_BB_AGC_TAB)
- READ_AND_CONFIG_MP(8822b, _agc_tab);
- else if (config_type == CONFIG_BB_PHY_REG_PG)
- READ_AND_CONFIG_MP(8822b, _phy_reg_pg);
- /*else if (config_type == CONFIG_BB_PHY_REG_MP)*/
- /*READ_AND_CONFIG_MP(8822b, _phy_reg_mp);*/
- }
-
- return HAL_STATUS_SUCCESS;
-}
-
-enum hal_status odm_config_mac_with_header_file(struct phy_dm_struct *dm)
-{
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===>%s (%s)\n", __func__,
- (dm->is_mp_chip) ? "MPChip" : "TestChip");
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "dm->support_platform: 0x%X, dm->support_interface: 0x%X, dm->board_type: 0x%X\n",
- dm->support_platform, dm->support_interface, dm->board_type);
-
- /* 1 AP doesn't use PHYDM initialization in these ICs */
- /* JJ ADD 20161014 */
-
- /* 1 All platforms support */
- if (dm->support_ic_type == ODM_RTL8822B)
- READ_AND_CONFIG_MP(8822b, _mac_reg);
-
- return HAL_STATUS_SUCCESS;
-}
-
-enum hal_status
-odm_config_fw_with_header_file(struct phy_dm_struct *dm,
- enum odm_fw_config_type config_type,
- u8 *p_firmware, u32 *size)
-{
- return HAL_STATUS_SUCCESS;
-}
-
-u32 odm_get_hw_img_version(struct phy_dm_struct *dm)
-{
- u32 version = 0;
-
- /* 1 AP doesn't use PHYDM initialization in these ICs */
- /* JJ ADD 20161014 */
-
- /*1 All platforms support*/
- if (dm->support_ic_type == ODM_RTL8822B)
- version = GET_VERSION_MP(8822b, _mac_reg);
-
- return version;
-}
-
-/* For 8822B only!! need to move to FW finally */
-/*==============================================*/
-
-bool phydm_query_is_mu_api(struct phy_dm_struct *phydm, u8 ppdu_idx,
- u8 *p_data_rate, u8 *p_gid)
-{
- u8 data_rate = 0, gid = 0;
- bool is_mu = false;
-
- data_rate = phydm->phy_dbg_info.num_of_ppdu[ppdu_idx];
- gid = phydm->phy_dbg_info.gid_num[ppdu_idx];
-
- if (data_rate & BIT(7)) {
- is_mu = true;
- data_rate = data_rate & ~(BIT(7));
- } else {
- is_mu = false;
- }
-
- *p_data_rate = data_rate;
- *p_gid = gid;
-
- return is_mu;
-}
-
-static void phydm_rx_statistic_cal(struct phy_dm_struct *phydm, u8 *phy_status,
- struct dm_per_pkt_info *pktinfo)
-{
- struct phy_status_rpt_jaguar2_type1 *phy_sta_rpt =
- (struct phy_status_rpt_jaguar2_type1 *)phy_status;
- u8 date_rate = pktinfo->data_rate & ~(BIT(7));
-
- if ((phy_sta_rpt->gid != 0) && (phy_sta_rpt->gid != 63)) {
- if (date_rate >= ODM_RATEVHTSS1MCS0) {
- phydm->phy_dbg_info
- .num_qry_mu_vht_pkt[date_rate - 0x2C]++;
- phydm->phy_dbg_info.num_of_ppdu[pktinfo->ppdu_cnt] =
- date_rate | BIT(7);
- phydm->phy_dbg_info.gid_num[pktinfo->ppdu_cnt] =
- phy_sta_rpt->gid;
- }
-
- } else {
- if (date_rate >= ODM_RATEVHTSS1MCS0) {
- phydm->phy_dbg_info.num_qry_vht_pkt[date_rate - 0x2C]++;
- phydm->phy_dbg_info.num_of_ppdu[pktinfo->ppdu_cnt] =
- date_rate;
- phydm->phy_dbg_info.gid_num[pktinfo->ppdu_cnt] =
- phy_sta_rpt->gid;
- }
- }
-}
-
-static void phydm_reset_phy_info(struct phy_dm_struct *phydm,
- struct dm_phy_status_info *phy_info)
-{
- phy_info->rx_pwdb_all = 0;
- phy_info->signal_quality = 0;
- phy_info->band_width = 0;
- phy_info->rx_count = 0;
- odm_memory_set(phydm, phy_info->rx_mimo_signal_quality, 0, 4);
- odm_memory_set(phydm, phy_info->rx_mimo_signal_strength, 0, 4);
- odm_memory_set(phydm, phy_info->rx_snr, 0, 4);
-
- phy_info->rx_power = -110;
- phy_info->recv_signal_power = -110;
- phy_info->bt_rx_rssi_percentage = 0;
- phy_info->signal_strength = 0;
- phy_info->bt_coex_pwr_adjust = 0;
- phy_info->channel = 0;
- phy_info->is_mu_packet = 0;
- phy_info->is_beamformed = 0;
- phy_info->rxsc = 0;
- odm_memory_set(phydm, phy_info->rx_pwr, -110, 4);
- odm_memory_set(phydm, phy_info->rx_mimo_evm_dbm, 0, 4);
- odm_memory_set(phydm, phy_info->cfo_short, 0, 8);
- odm_memory_set(phydm, phy_info->cfo_tail, 0, 8);
-}
-
-static void phydm_set_per_path_phy_info(u8 rx_path, s8 rx_pwr, s8 rx_evm,
- s8 cfo_tail, s8 rx_snr,
- struct dm_phy_status_info *phy_info)
-{
- u8 evm_dbm = 0;
- u8 evm_percentage = 0;
-
- /* SNR is S(8,1), EVM is S(8,1), CFO is S(8,7) */
-
- if (rx_evm < 0) {
- /* Calculate EVM in dBm */
- evm_dbm = ((u8)(0 - rx_evm) >> 1);
-
- /* Calculate EVM in percentage */
- if (evm_dbm >= 33)
- evm_percentage = 100;
- else
- evm_percentage = (evm_dbm << 1) + (evm_dbm);
- }
-
- phy_info->rx_pwr[rx_path] = rx_pwr;
- phy_info->rx_mimo_evm_dbm[rx_path] = evm_dbm;
-
- /* CFO = CFO_tail * 312.5 / 2^7 ~= CFO tail * 39/512 (kHz)*/
- phy_info->cfo_tail[rx_path] = cfo_tail;
- phy_info->cfo_tail[rx_path] = ((phy_info->cfo_tail[rx_path] << 5) +
- (phy_info->cfo_tail[rx_path] << 2) +
- (phy_info->cfo_tail[rx_path] << 1) +
- (phy_info->cfo_tail[rx_path])) >>
- 9;
-
- phy_info->rx_mimo_signal_strength[rx_path] =
- odm_query_rx_pwr_percentage(rx_pwr);
- phy_info->rx_mimo_signal_quality[rx_path] = evm_percentage;
- phy_info->rx_snr[rx_path] = rx_snr >> 1;
-}
-
-static void phydm_set_common_phy_info(s8 rx_power, u8 channel,
- bool is_beamformed, bool is_mu_packet,
- u8 bandwidth, u8 signal_quality, u8 rxsc,
- struct dm_phy_status_info *phy_info)
-{
- phy_info->rx_power = rx_power; /* RSSI in dB */
- phy_info->recv_signal_power = rx_power; /* RSSI in dB */
- phy_info->channel = channel; /* channel number */
- phy_info->is_beamformed = is_beamformed; /* apply BF */
- phy_info->is_mu_packet = is_mu_packet; /* MU packet */
- phy_info->rxsc = rxsc;
- phy_info->rx_pwdb_all =
- odm_query_rx_pwr_percentage(rx_power); /* RSSI in percentage */
- phy_info->signal_quality = signal_quality; /* signal quality */
- phy_info->band_width = bandwidth; /* bandwidth */
-}
-
-static void phydm_get_rx_phy_status_type0(struct phy_dm_struct *dm,
- u8 *phy_status,
- struct dm_per_pkt_info *pktinfo,
- struct dm_phy_status_info *phy_info)
-{
- /* type 0 is used for cck packet */
-
- struct phy_status_rpt_jaguar2_type0 *phy_sta_rpt =
- (struct phy_status_rpt_jaguar2_type0 *)phy_status;
- u8 sq = 0;
- s8 rx_power = phy_sta_rpt->pwdb - 110;
-
- /* JJ ADD 20161014 */
-
- /* Calculate Signal Quality*/
- if (pktinfo->is_packet_match_bssid) {
- if (phy_sta_rpt->signal_quality >= 64) {
- sq = 0;
- } else if (phy_sta_rpt->signal_quality <= 20) {
- sq = 100;
- } else {
- /* mapping to 2~99% */
- sq = 64 - phy_sta_rpt->signal_quality;
- sq = ((sq << 3) + sq) >> 2;
- }
- }
-
- /* Modify CCK PWDB if old AGC */
- if (!dm->cck_new_agc) {
- u8 lna_idx, vga_idx;
-
- lna_idx = ((phy_sta_rpt->lna_h << 3) | phy_sta_rpt->lna_l);
- vga_idx = phy_sta_rpt->vga;
-
- /* JJ ADD 20161014 */
-
- /* Need to do !! */
- /*if (dm->support_ic_type & ODM_RTL8822B) */
- /*rx_power = odm_CCKRSSI_8822B(LNA_idx, VGA_idx);*/
- }
-
- /* Update CCK packet counter */
- dm->phy_dbg_info.num_qry_phy_status_cck++;
-
- /*CCK no STBC and LDPC*/
- dm->phy_dbg_info.is_ldpc_pkt = false;
- dm->phy_dbg_info.is_stbc_pkt = false;
-
- /* Update Common information */
- phydm_set_common_phy_info(rx_power, phy_sta_rpt->channel, false, false,
- ODM_BW20M, sq, phy_sta_rpt->rxsc, phy_info);
-
- /* Update CCK pwdb */
- /* Update per-path information */
- phydm_set_per_path_phy_info(ODM_RF_PATH_A, rx_power, 0, 0, 0, phy_info);
-
- dm->dm_fat_table.antsel_rx_keep_0 = phy_sta_rpt->antidx_a;
- dm->dm_fat_table.antsel_rx_keep_1 = phy_sta_rpt->antidx_b;
- dm->dm_fat_table.antsel_rx_keep_2 = phy_sta_rpt->antidx_c;
- dm->dm_fat_table.antsel_rx_keep_3 = phy_sta_rpt->antidx_d;
-}
-
-static void phydm_get_rx_phy_status_type1(struct phy_dm_struct *dm,
- u8 *phy_status,
- struct dm_per_pkt_info *pktinfo,
- struct dm_phy_status_info *phy_info)
-{
- /* type 1 is used for ofdm packet */
-
- struct phy_status_rpt_jaguar2_type1 *phy_sta_rpt =
- (struct phy_status_rpt_jaguar2_type1 *)phy_status;
- s8 rx_pwr_db = -120;
- u8 i, rxsc, bw = ODM_BW20M, rx_count = 0;
- bool is_mu;
- u8 num_ss;
-
- /* Update OFDM packet counter */
- dm->phy_dbg_info.num_qry_phy_status_ofdm++;
-
- /* Update per-path information */
- for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX_JAGUAR; i++) {
- if (dm->rx_ant_status & BIT(i)) {
- s8 rx_path_pwr_db;
-
- /* RX path counter */
- rx_count++;
-
- /* Update per-path information
- * (RSSI_dB RSSI_percentage EVM SNR CFO sq)
- */
- /* EVM report is reported by stream, not path */
- rx_path_pwr_db = phy_sta_rpt->pwdb[i] -
- 110; /* per-path pwdb in dB domain */
- phydm_set_per_path_phy_info(
- i, rx_path_pwr_db,
- phy_sta_rpt->rxevm[rx_count - 1],
- phy_sta_rpt->cfo_tail[i], phy_sta_rpt->rxsnr[i],
- phy_info);
-
- /* search maximum pwdb */
- if (rx_path_pwr_db > rx_pwr_db)
- rx_pwr_db = rx_path_pwr_db;
- }
- }
-
- /* mapping RX counter from 1~4 to 0~3 */
- if (rx_count > 0)
- phy_info->rx_count = rx_count - 1;
-
- /* Check if MU packet or not */
- if ((phy_sta_rpt->gid != 0) && (phy_sta_rpt->gid != 63)) {
- is_mu = true;
- dm->phy_dbg_info.num_qry_mu_pkt++;
- } else {
- is_mu = false;
- }
-
- /* count BF packet */
- dm->phy_dbg_info.num_qry_bf_pkt =
- dm->phy_dbg_info.num_qry_bf_pkt + phy_sta_rpt->beamformed;
-
- /*STBC or LDPC pkt*/
- dm->phy_dbg_info.is_ldpc_pkt = phy_sta_rpt->ldpc;
- dm->phy_dbg_info.is_stbc_pkt = phy_sta_rpt->stbc;
-
- /* Check sub-channel */
- if ((pktinfo->data_rate > ODM_RATE11M) &&
- (pktinfo->data_rate < ODM_RATEMCS0))
- rxsc = phy_sta_rpt->l_rxsc;
- else
- rxsc = phy_sta_rpt->ht_rxsc;
-
- /* Check RX bandwidth */
- if (dm->support_ic_type & ODM_RTL8822B) {
- if ((rxsc >= 1) && (rxsc <= 8))
- bw = ODM_BW20M;
- else if ((rxsc >= 9) && (rxsc <= 12))
- bw = ODM_BW40M;
- else if (rxsc >= 13)
- bw = ODM_BW80M;
- else
- bw = phy_sta_rpt->rf_mode;
- } else if (dm->support_ic_type & (ODM_RTL8197F | ODM_RTL8723D |
- ODM_RTL8710B)) { /* JJ ADD 20161014 */
- if (phy_sta_rpt->rf_mode == 0)
- bw = ODM_BW20M;
- else if ((rxsc == 1) || (rxsc == 2))
- bw = ODM_BW20M;
- else
- bw = ODM_BW40M;
- }
-
- /* Update packet information */
- phydm_set_common_phy_info(
- rx_pwr_db, phy_sta_rpt->channel, (bool)phy_sta_rpt->beamformed,
- is_mu, bw, odm_evm_db_to_percentage(phy_sta_rpt->rxevm[0]),
- rxsc, phy_info);
-
- num_ss = phydm_rate_to_num_ss(dm, pktinfo->data_rate);
-
- odm_parsing_cfo(dm, pktinfo, phy_sta_rpt->cfo_tail, num_ss);
- dm->dm_fat_table.antsel_rx_keep_0 = phy_sta_rpt->antidx_a;
- dm->dm_fat_table.antsel_rx_keep_1 = phy_sta_rpt->antidx_b;
- dm->dm_fat_table.antsel_rx_keep_2 = phy_sta_rpt->antidx_c;
- dm->dm_fat_table.antsel_rx_keep_3 = phy_sta_rpt->antidx_d;
-
- if (pktinfo->is_packet_match_bssid) {
- /* */
- phydm_rx_statistic_cal(dm, phy_status, pktinfo);
- }
-}
-
-static void phydm_get_rx_phy_status_type2(struct phy_dm_struct *dm,
- u8 *phy_status,
- struct dm_per_pkt_info *pktinfo,
- struct dm_phy_status_info *phy_info)
-{
- struct phy_status_rpt_jaguar2_type2 *phy_sta_rpt =
- (struct phy_status_rpt_jaguar2_type2 *)phy_status;
- s8 rx_pwr_db = -120;
- u8 i, rxsc, bw = ODM_BW20M, rx_count = 0;
-
- /* Update OFDM packet counter */
- dm->phy_dbg_info.num_qry_phy_status_ofdm++;
-
- /* Update per-path information */
- for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX_JAGUAR; i++) {
- if (dm->rx_ant_status & BIT(i)) {
- s8 rx_path_pwr_db;
-
- /* RX path counter */
- rx_count++;
-
- /* Update per-path information
- * (RSSI_dB RSSI_percentage EVM SNR CFO sq)
- */
- rx_path_pwr_db = phy_sta_rpt->pwdb[i] -
- 110; /* per-path pwdb in dB domain */
-
- phydm_set_per_path_phy_info(i, rx_path_pwr_db, 0, 0, 0,
- phy_info);
-
- /* search maximum pwdb */
- if (rx_path_pwr_db > rx_pwr_db)
- rx_pwr_db = rx_path_pwr_db;
- }
- }
-
- /* mapping RX counter from 1~4 to 0~3 */
- if (rx_count > 0)
- phy_info->rx_count = rx_count - 1;
-
- /* Check RX sub-channel */
- if ((pktinfo->data_rate > ODM_RATE11M) &&
- (pktinfo->data_rate < ODM_RATEMCS0))
- rxsc = phy_sta_rpt->l_rxsc;
- else
- rxsc = phy_sta_rpt->ht_rxsc;
-
- /*STBC or LDPC pkt*/
- dm->phy_dbg_info.is_ldpc_pkt = phy_sta_rpt->ldpc;
- dm->phy_dbg_info.is_stbc_pkt = phy_sta_rpt->stbc;
-
- /* Check RX bandwidth */
- /* the BW information of sc=0 is useless, because there is
- * no information of RF mode
- */
-
- if (dm->support_ic_type & ODM_RTL8822B) {
- if ((rxsc >= 1) && (rxsc <= 8))
- bw = ODM_BW20M;
- else if ((rxsc >= 9) && (rxsc <= 12))
- bw = ODM_BW40M;
- else if (rxsc >= 13)
- bw = ODM_BW80M;
- else
- bw = ODM_BW20M;
- } else if (dm->support_ic_type & (ODM_RTL8197F | ODM_RTL8723D |
- ODM_RTL8710B)) { /* JJ ADD 20161014 */
- if (rxsc == 3)
- bw = ODM_BW40M;
- else
- bw = ODM_BW20M;
- }
-
- /* Update packet information */
- phydm_set_common_phy_info(rx_pwr_db, phy_sta_rpt->channel,
- (bool)phy_sta_rpt->beamformed, false, bw, 0,
- rxsc, phy_info);
-}
-
-static void
-phydm_process_rssi_for_dm_new_type(struct phy_dm_struct *dm,
- struct dm_phy_status_info *phy_info,
- struct dm_per_pkt_info *pktinfo)
-{
- s32 undecorated_smoothed_pwdb, accumulate_pwdb;
- u32 rssi_ave;
- u8 i;
- struct rtl_sta_info *entry;
- u8 scaling_factor = 4;
-
- if (pktinfo->station_id >= ODM_ASSOCIATE_ENTRY_NUM)
- return;
-
- entry = dm->odm_sta_info[pktinfo->station_id];
-
- if (!IS_STA_VALID(entry))
- return;
-
- if ((!pktinfo->is_packet_match_bssid)) /*data frame only*/
- return;
-
- if (pktinfo->is_packet_beacon)
- dm->phy_dbg_info.num_qry_beacon_pkt++;
-
- if (pktinfo->is_packet_to_self || pktinfo->is_packet_beacon) {
- u32 rssi_linear = 0;
-
- dm->rx_rate = pktinfo->data_rate;
- undecorated_smoothed_pwdb =
- entry->rssi_stat.undecorated_smoothed_pwdb;
- accumulate_pwdb = dm->accumulate_pwdb[pktinfo->station_id];
- dm->rssi_a = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_A];
- dm->rssi_b = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_B];
- dm->rssi_c = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_C];
- dm->rssi_d = phy_info->rx_mimo_signal_strength[ODM_RF_PATH_D];
-
- for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX_JAGUAR; i++) {
- if (phy_info->rx_mimo_signal_strength[i] != 0)
- rssi_linear += odm_convert_to_linear(
- phy_info->rx_mimo_signal_strength[i]);
- }
-
- switch (phy_info->rx_count + 1) {
- case 2:
- rssi_linear = (rssi_linear >> 1);
- break;
- case 3:
- /* rssi_linear/3 ~ rssi_linear*11/32 */
- rssi_linear = ((rssi_linear) + (rssi_linear << 1) +
- (rssi_linear << 3)) >>
- 5;
- break;
- case 4:
- rssi_linear = (rssi_linear >> 2);
- break;
- }
- rssi_ave = odm_convert_to_db(rssi_linear);
-
- if (undecorated_smoothed_pwdb <= 0) {
- accumulate_pwdb =
- (phy_info->rx_pwdb_all << scaling_factor);
- undecorated_smoothed_pwdb = phy_info->rx_pwdb_all;
- } else {
- accumulate_pwdb = accumulate_pwdb -
- (accumulate_pwdb >> scaling_factor) +
- rssi_ave;
- undecorated_smoothed_pwdb =
- (accumulate_pwdb +
- (1 << (scaling_factor - 1))) >>
- scaling_factor;
- }
-
- entry->rssi_stat.undecorated_smoothed_pwdb =
- undecorated_smoothed_pwdb;
- dm->accumulate_pwdb[pktinfo->station_id] = accumulate_pwdb;
- }
-}
-
-void phydm_rx_phy_status_new_type(struct phy_dm_struct *phydm, u8 *phy_status,
- struct dm_per_pkt_info *pktinfo,
- struct dm_phy_status_info *phy_info)
-{
- u8 phy_status_type = (*phy_status & 0xf);
-
- /* Memory reset */
- phydm_reset_phy_info(phydm, phy_info);
-
- /* Phy status parsing */
- switch (phy_status_type) {
- case 0: {
- phydm_get_rx_phy_status_type0(phydm, phy_status, pktinfo,
- phy_info);
- break;
- }
- case 1: {
- phydm_get_rx_phy_status_type1(phydm, phy_status, pktinfo,
- phy_info);
- break;
- }
- case 2: {
- phydm_get_rx_phy_status_type2(phydm, phy_status, pktinfo,
- phy_info);
- break;
- }
- default:
- return;
- }
-
- /* Update signal strength to UI, and phy_info->rx_pwdb_all is the
- * maximum RSSI of all path
- */
- phy_info->signal_strength = phy_info->rx_pwdb_all;
-
- /* Calculate average RSSI and smoothed RSSI */
- phydm_process_rssi_for_dm_new_type(phydm, phy_info, pktinfo);
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_hwconfig.h b/drivers/staging/rtlwifi/phydm/phydm_hwconfig.h
deleted file mode 100644
index ee4b9f0af2a1..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_hwconfig.h
+++ /dev/null
@@ -1,487 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __HALHWOUTSRC_H__
-#define __HALHWOUTSRC_H__
-
-/*--------------------------Define -------------------------------------------*/
-#define CCK_RSSI_INIT_COUNT 5
-
-#define RA_RSSI_STATE_INIT 0
-#define RA_RSSI_STATE_SEND 1
-#define RA_RSSI_STATE_HOLD 2
-
-#define CFO_HW_RPT_2_MHZ(val) ((val << 1) + (val >> 1))
-/* ((X* 3125) / 10)>>7 = (X*10)>>2 = X*2.5 = X<<1 + X>>1 */
-
-#define AGC_DIFF_CONFIG_MP(ic, band) \
- (odm_read_and_config_mp_##ic##_agc_tab_diff( \
- dm, array_mp_##ic##_agc_tab_diff_##band, \
- sizeof(array_mp_##ic##_agc_tab_diff_##band) / sizeof(u32)))
-#define AGC_DIFF_CONFIG_TC(ic, band) \
- (odm_read_and_config_tc_##ic##_agc_tab_diff( \
- dm, array_tc_##ic##_agc_tab_diff_##band, \
- sizeof(array_tc_##ic##_agc_tab_diff_##band) / sizeof(u32)))
-
-#define AGC_DIFF_CONFIG(ic, band) \
- do { \
- if (dm->is_mp_chip) \
- AGC_DIFF_CONFIG_MP(ic, band); \
- else \
- AGC_DIFF_CONFIG_TC(ic, band); \
- } while (0)
-
-/* ************************************************************
- * structure and define
- * *************************************************************/
-
-struct phy_rx_agc_info {
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 gain : 7, trsw : 1;
-#else
- u8 trsw : 1, gain : 7;
-#endif
-};
-
-struct phy_status_rpt_8192cd {
- struct phy_rx_agc_info path_agc[2];
- u8 ch_corr[2];
- u8 cck_sig_qual_ofdm_pwdb_all;
- u8 cck_agc_rpt_ofdm_cfosho_a;
- u8 cck_rpt_b_ofdm_cfosho_b;
- u8 rsvd_1; /*ch_corr_msb;*/
- u8 noise_power_db_msb;
- s8 path_cfotail[2];
- u8 pcts_mask[2];
- s8 stream_rxevm[2];
- u8 path_rxsnr[2];
- u8 noise_power_db_lsb;
- u8 rsvd_2[3];
- u8 stream_csi[2];
- u8 stream_target_csi[2];
- s8 sig_evm;
- u8 rsvd_3;
-
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 antsel_rx_keep_2 : 1; /*ex_intf_flg:1;*/
- u8 sgi_en : 1;
- u8 rxsc : 2;
- u8 idle_long : 1;
- u8 r_ant_train_en : 1;
- u8 ant_sel_b : 1;
- u8 ant_sel : 1;
-#else /*_BIG_ENDIAN_ */
- u8 ant_sel : 1;
- u8 ant_sel_b : 1;
- u8 r_ant_train_en : 1;
- u8 idle_long : 1;
- u8 rxsc : 2;
- u8 sgi_en : 1;
- u8 antsel_rx_keep_2 : 1; /*ex_intf_flg:1;*/
-#endif
-};
-
-struct phy_status_rpt_8812 {
- /* DWORD 0*/
- u8 gain_trsw[2]; /*path-A and path-B {TRSW, gain[6:0] }*/
- u8 chl_num_LSB; /*channel number[7:0]*/
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 chl_num_MSB : 2; /*channel number[9:8]*/
- u8 sub_chnl : 4; /*sub-channel location[3:0]*/
- u8 r_RFMOD : 2; /*RF mode[1:0]*/
-#else /*_BIG_ENDIAN_ */
- u8 r_RFMOD : 2;
- u8 sub_chnl : 4;
- u8 chl_num_MSB : 2;
-#endif
-
- /* DWORD 1*/
- u8 pwdb_all; /*CCK signal quality / OFDM pwdb all*/
- s8 cfosho[2]; /*DW1 byte 1 DW1 byte2 */
-/*CCK AGC report and CCK_BB_Power / OFDM path-A and path-B short CFO*/
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- /*this should be checked again
- *because the definition of 8812 and 8814 is different
- */
- u8 resvd_0 : 6;
- u8 bt_RF_ch_MSB : 2; /*8812A:2'b0, 8814A: bt rf channel keep[7:6]*/
-#else /*_BIG_ENDIAN_*/
- u8 bt_RF_ch_MSB : 2;
- u8 resvd_0 : 6;
-#endif
-
-/* DWORD 2*/
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 ant_div_sw_a : 1; /*8812A: ant_div_sw_a, 8814A: 1'b0*/
- u8 ant_div_sw_b : 1; /*8812A: ant_div_sw_b, 8814A: 1'b0*/
- u8 bt_RF_ch_LSB : 6; /*8812A: 6'b0, 8814A: bt rf channel keep[5:0]*/
-#else /*_BIG_ENDIAN_ */
- u8 bt_RF_ch_LSB : 6;
- u8 ant_div_sw_b : 1;
- u8 ant_div_sw_a : 1;
-#endif
- s8 cfotail[2]; /*DW2 byte 1 DW2 byte 2 path-A and path-B CFO tail*/
- u8 PCTS_MSK_RPT_0; /*PCTS mask report[7:0]*/
- u8 PCTS_MSK_RPT_1; /*PCTS mask report[15:8]*/
-
- /* DWORD 3*/
- s8 rxevm[2]; /*DW3 byte 1 DW3 byte 2 stream 1 and stream 2 RX EVM*/
- s8 rxsnr[2]; /*DW3 byte 3 DW4 byte 0 path-A and path-B RX SNR*/
-
- /* DWORD 4*/
- u8 PCTS_MSK_RPT_2; /*PCTS mask report[23:16]*/
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 PCTS_MSK_RPT_3 : 6; /*PCTS mask report[29:24]*/
- u8 pcts_rpt_valid : 1; /*pcts_rpt_valid*/
- u8 resvd_1 : 1; /*1'b0*/
-#else /*_BIG_ENDIAN_*/
- u8 resvd_1 : 1;
- u8 pcts_rpt_valid : 1;
- u8 PCTS_MSK_RPT_3 : 6;
-#endif
- s8 rxevm_cd[2]; /*DW 4 byte 3 DW5 byte 0 */
- /* 8812A: 16'b0, 8814A: stream 3 and stream 4 RX EVM*/
-
- /* DWORD 5*/
- u8 csi_current[2]; /*DW5 byte 1 DW5 byte 2 */
- /* 8812A: stream 1 and 2 CSI, 8814A: path-C and path-D RX SNR*/
- u8 gain_trsw_cd[2]; /*DW5 byte 3 DW6 byte 0 */
- /* path-C and path-D {TRSW, gain[6:0] }*/
-
- /* DWORD 6*/
- s8 sigevm; /*signal field EVM*/
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 antidx_antc : 3; /*8812A: 3'b0 8814A: antidx_antc[2:0]*/
- u8 antidx_antd : 3; /*8812A: 3'b0 8814A: antidx_antd[2:0]*/
- u8 dpdt_ctrl_keep : 1; /*8812A: 1'b0 8814A: dpdt_ctrl_keep*/
- u8 GNT_BT_keep : 1; /*8812A: 1'b0 8814A: GNT_BT_keep*/
-#else /*_BIG_ENDIAN_*/
- u8 GNT_BT_keep : 1;
- u8 dpdt_ctrl_keep : 1;
- u8 antidx_antd : 3;
- u8 antidx_antc : 3;
-#endif
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 antidx_anta : 3; /*antidx_anta[2:0]*/
- u8 antidx_antb : 3; /*antidx_antb[2:0]*/
- u8 hw_antsw_occur : 2; /*1'b0*/
-#else /*_BIG_ENDIAN_*/
- u8 hw_antsw_occur : 2;
- u8 antidx_antb : 3;
- u8 antidx_anta : 3;
-#endif
-};
-
-void phydm_reset_rssi_for_dm(struct phy_dm_struct *dm, u8 station_id);
-
-void odm_init_rssi_for_dm(struct phy_dm_struct *dm);
-
-void odm_phy_status_query(struct phy_dm_struct *dm,
- struct dm_phy_status_info *phy_info, u8 *phy_status,
- struct dm_per_pkt_info *pktinfo);
-
-void odm_mac_status_query(struct phy_dm_struct *dm, u8 *mac_status, u8 mac_id,
- bool is_packet_match_bssid, bool is_packet_to_self,
- bool is_packet_beacon);
-
-enum hal_status
-odm_config_rf_with_tx_pwr_track_header_file(struct phy_dm_struct *dm);
-
-enum hal_status
-odm_config_rf_with_header_file(struct phy_dm_struct *dm,
- enum odm_rf_config_type config_type,
- enum odm_rf_radio_path e_rf_path);
-
-enum hal_status
-odm_config_bb_with_header_file(struct phy_dm_struct *dm,
- enum odm_bb_config_type config_type);
-
-enum hal_status odm_config_mac_with_header_file(struct phy_dm_struct *dm);
-
-enum hal_status
-odm_config_fw_with_header_file(struct phy_dm_struct *dm,
- enum odm_fw_config_type config_type,
- u8 *p_firmware, u32 *size);
-
-u32 odm_get_hw_img_version(struct phy_dm_struct *dm);
-
-/*For 8822B only!! need to move to FW finally */
-/*==============================================*/
-void phydm_rx_phy_status_new_type(struct phy_dm_struct *phydm, u8 *phy_status,
- struct dm_per_pkt_info *pktinfo,
- struct dm_phy_status_info *phy_info);
-
-bool phydm_query_is_mu_api(struct phy_dm_struct *phydm, u8 ppdu_idx,
- u8 *p_data_rate, u8 *p_gid);
-
-struct phy_status_rpt_jaguar2_type0 {
- /* DW0 */
- u8 page_num;
- u8 pwdb;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 gain : 6;
- u8 rsvd_0 : 1;
- u8 trsw : 1;
-#else
- u8 trsw : 1;
- u8 rsvd_0 : 1;
- u8 gain : 6;
-#endif
- u8 rsvd_1;
-
- /* DW1 */
- u8 rsvd_2;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 rxsc : 4;
- u8 agc_table : 4;
-#else
- u8 agc_table : 4;
- u8 rxsc : 4;
-#endif
- u8 channel;
- u8 band;
-
- /* DW2 */
- u16 length;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 antidx_a : 3;
- u8 antidx_b : 3;
- u8 rsvd_3 : 2;
- u8 antidx_c : 3;
- u8 antidx_d : 3;
- u8 rsvd_4 : 2;
-#else
- u8 rsvd_3 : 2;
- u8 antidx_b : 3;
- u8 antidx_a : 3;
- u8 rsvd_4 : 2;
- u8 antidx_d : 3;
- u8 antidx_c : 3;
-#endif
-
- /* DW3 */
- u8 signal_quality;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 vga : 5;
- u8 lna_l : 3;
- u8 bb_power : 6;
- u8 rsvd_9 : 1;
- u8 lna_h : 1;
-#else
- u8 lna_l : 3;
- u8 vga : 5;
- u8 lna_h : 1;
- u8 rsvd_9 : 1;
- u8 bb_power : 6;
-#endif
- u8 rsvd_5;
-
- /* DW4 */
- u32 rsvd_6;
-
- /* DW5 */
- u32 rsvd_7;
-
- /* DW6 */
- u32 rsvd_8;
-};
-
-struct phy_status_rpt_jaguar2_type1 {
- /* DW0 and DW1 */
- u8 page_num;
- u8 pwdb[4];
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 l_rxsc : 4;
- u8 ht_rxsc : 4;
-#else
- u8 ht_rxsc : 4;
- u8 l_rxsc : 4;
-#endif
- u8 channel;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 band : 2;
- u8 rsvd_0 : 1;
- u8 hw_antsw_occu : 1;
- u8 gnt_bt : 1;
- u8 ldpc : 1;
- u8 stbc : 1;
- u8 beamformed : 1;
-#else
- u8 beamformed : 1;
- u8 stbc : 1;
- u8 ldpc : 1;
- u8 gnt_bt : 1;
- u8 hw_antsw_occu : 1;
- u8 rsvd_0 : 1;
- u8 band : 2;
-#endif
-
- /* DW2 */
- u16 lsig_length;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 antidx_a : 3;
- u8 antidx_b : 3;
- u8 rsvd_1 : 2;
- u8 antidx_c : 3;
- u8 antidx_d : 3;
- u8 rsvd_2 : 2;
-#else
- u8 rsvd_1 : 2;
- u8 antidx_b : 3;
- u8 antidx_a : 3;
- u8 rsvd_2 : 2;
- u8 antidx_d : 3;
- u8 antidx_c : 3;
-#endif
-
- /* DW3 */
- u8 paid;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 paid_msb : 1;
- u8 gid : 6;
- u8 rsvd_3 : 1;
-#else
- u8 rsvd_3 : 1;
- u8 gid : 6;
- u8 paid_msb : 1;
-#endif
- u8 intf_pos;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 intf_pos_msb : 1;
- u8 rsvd_4 : 2;
- u8 nb_intf_flag : 1;
- u8 rf_mode : 2;
- u8 rsvd_5 : 2;
-#else
- u8 rsvd_5 : 2;
- u8 rf_mode : 2;
- u8 nb_intf_flag : 1;
- u8 rsvd_4 : 2;
- u8 intf_pos_msb : 1;
-#endif
-
- /* DW4 */
- s8 rxevm[4]; /* s(8,1) */
-
- /* DW5 */
- s8 cfo_tail[4]; /* s(8,7) */
-
- /* DW6 */
- s8 rxsnr[4]; /* s(8,1) */
-};
-
-struct phy_status_rpt_jaguar2_type2 {
- /* DW0 ane DW1 */
- u8 page_num;
- u8 pwdb[4];
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 l_rxsc : 4;
- u8 ht_rxsc : 4;
-#else
- u8 ht_rxsc : 4;
- u8 l_rxsc : 4;
-#endif
- u8 channel;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 band : 2;
- u8 rsvd_0 : 1;
- u8 hw_antsw_occu : 1;
- u8 gnt_bt : 1;
- u8 ldpc : 1;
- u8 stbc : 1;
- u8 beamformed : 1;
-#else
- u8 beamformed : 1;
- u8 stbc : 1;
- u8 ldpc : 1;
- u8 gnt_bt : 1;
- u8 hw_antsw_occu : 1;
- u8 rsvd_0 : 1;
- u8 band : 2;
-#endif
-
-/* DW2 */
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 shift_l_map : 6;
- u8 rsvd_1 : 2;
-#else
- u8 rsvd_1 : 2;
- u8 shift_l_map : 6;
-#endif
- u8 cnt_pw2cca;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 agc_table_a : 4;
- u8 agc_table_b : 4;
- u8 agc_table_c : 4;
- u8 agc_table_d : 4;
-#else
- u8 agc_table_b : 4;
- u8 agc_table_a : 4;
- u8 agc_table_d : 4;
- u8 agc_table_c : 4;
-#endif
-
- /* DW3 ~ DW6*/
- u8 cnt_cca2agc_rdy;
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 gain_a : 6;
- u8 rsvd_2 : 1;
- u8 trsw_a : 1;
- u8 gain_b : 6;
- u8 rsvd_3 : 1;
- u8 trsw_b : 1;
- u8 gain_c : 6;
- u8 rsvd_4 : 1;
- u8 trsw_c : 1;
- u8 gain_d : 6;
- u8 rsvd_5 : 1;
- u8 trsw_d : 1;
- u8 aagc_step_a : 2;
- u8 aagc_step_b : 2;
- u8 aagc_step_c : 2;
- u8 aagc_step_d : 2;
-#else
- u8 trsw_a : 1;
- u8 rsvd_2 : 1;
- u8 gain_a : 6;
- u8 trsw_b : 1;
- u8 rsvd_3 : 1;
- u8 gain_b : 6;
- u8 trsw_c : 1;
- u8 rsvd_4 : 1;
- u8 gain_c : 6;
- u8 trsw_d : 1;
- u8 rsvd_5 : 1;
- u8 gain_d : 6;
- u8 aagc_step_d : 2;
- u8 aagc_step_c : 2;
- u8 aagc_step_b : 2;
- u8 aagc_step_a : 2;
-#endif
- u8 ht_aagc_gain[4];
- u8 dagc_gain[4];
-#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
- u8 counter : 6;
- u8 rsvd_6 : 2;
- u8 syn_count : 5;
- u8 rsvd_7 : 3;
-#else
- u8 rsvd_6 : 2;
- u8 counter : 6;
- u8 rsvd_7 : 3;
- u8 syn_count : 5;
-#endif
-};
-
-#endif /*#ifndef __HALHWOUTSRC_H__*/
diff --git a/drivers/staging/rtlwifi/phydm/phydm_interface.c b/drivers/staging/rtlwifi/phydm/phydm_interface.c
deleted file mode 100644
index f5ecde505153..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_interface.c
+++ /dev/null
@@ -1,307 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-/*
- * ODM IO Relative API.
- */
-
-u8 odm_read_1byte(struct phy_dm_struct *dm, u32 reg_addr)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- return rtl_read_byte(rtlpriv, reg_addr);
-}
-
-u16 odm_read_2byte(struct phy_dm_struct *dm, u32 reg_addr)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- return rtl_read_word(rtlpriv, reg_addr);
-}
-
-u32 odm_read_4byte(struct phy_dm_struct *dm, u32 reg_addr)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- return rtl_read_dword(rtlpriv, reg_addr);
-}
-
-void odm_write_1byte(struct phy_dm_struct *dm, u32 reg_addr, u8 data)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- rtl_write_byte(rtlpriv, reg_addr, data);
-}
-
-void odm_write_2byte(struct phy_dm_struct *dm, u32 reg_addr, u16 data)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- rtl_write_word(rtlpriv, reg_addr, data);
-}
-
-void odm_write_4byte(struct phy_dm_struct *dm, u32 reg_addr, u32 data)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- rtl_write_dword(rtlpriv, reg_addr, data);
-}
-
-void odm_set_mac_reg(struct phy_dm_struct *dm, u32 reg_addr, u32 bit_mask,
- u32 data)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- rtl_set_bbreg(rtlpriv->hw, reg_addr, bit_mask, data);
-}
-
-u32 odm_get_mac_reg(struct phy_dm_struct *dm, u32 reg_addr, u32 bit_mask)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- return rtl_get_bbreg(rtlpriv->hw, reg_addr, bit_mask);
-}
-
-void odm_set_bb_reg(struct phy_dm_struct *dm, u32 reg_addr, u32 bit_mask,
- u32 data)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- rtl_set_bbreg(rtlpriv->hw, reg_addr, bit_mask, data);
-}
-
-u32 odm_get_bb_reg(struct phy_dm_struct *dm, u32 reg_addr, u32 bit_mask)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- return rtl_get_bbreg(rtlpriv->hw, reg_addr, bit_mask);
-}
-
-void odm_set_rf_reg(struct phy_dm_struct *dm, enum odm_rf_radio_path e_rf_path,
- u32 reg_addr, u32 bit_mask, u32 data)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- rtl_set_rfreg(rtlpriv->hw, (enum radio_path)e_rf_path, reg_addr,
- bit_mask, data);
-}
-
-u32 odm_get_rf_reg(struct phy_dm_struct *dm, enum odm_rf_radio_path e_rf_path,
- u32 reg_addr, u32 bit_mask)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
-
- return rtl_get_rfreg(rtlpriv->hw, (enum radio_path)e_rf_path, reg_addr,
- bit_mask);
-}
-
-/*
- * ODM Memory relative API.
- */
-void odm_allocate_memory(struct phy_dm_struct *dm, void **ptr, u32 length)
-{
- *ptr = kmalloc(length, GFP_ATOMIC);
-}
-
-/* length could be ignored, used to detect memory leakage. */
-void odm_free_memory(struct phy_dm_struct *dm, void *ptr, u32 length)
-{
- kfree(ptr);
-}
-
-void odm_move_memory(struct phy_dm_struct *dm, void *p_dest, void *src,
- u32 length)
-{
- memcpy(p_dest, src, length);
-}
-
-void odm_memory_set(struct phy_dm_struct *dm, void *pbuf, s8 value, u32 length)
-{
- memset(pbuf, value, length);
-}
-
-s32 odm_compare_memory(struct phy_dm_struct *dm, void *p_buf1, void *buf2,
- u32 length)
-{
- return memcmp(p_buf1, buf2, length);
-}
-
-/*
- * ODM MISC relative API.
- */
-void odm_acquire_spin_lock(struct phy_dm_struct *dm, enum rt_spinlock_type type)
-{
-}
-
-void odm_release_spin_lock(struct phy_dm_struct *dm, enum rt_spinlock_type type)
-{
-}
-
-/*
- * ODM Timer relative API.
- */
-void odm_stall_execution(u32 us_delay) { udelay(us_delay); }
-
-void ODM_delay_ms(u32 ms) { mdelay(ms); }
-
-void ODM_delay_us(u32 us) { udelay(us); }
-
-void ODM_sleep_ms(u32 ms) { msleep(ms); }
-
-void ODM_sleep_us(u32 us) { usleep_range(us, us + 1); }
-
-static u8 phydm_trans_h2c_id(struct phy_dm_struct *dm, u8 phydm_h2c_id)
-{
- u8 platform_h2c_id = phydm_h2c_id;
-
- switch (phydm_h2c_id) {
- /* 1 [0] */
- case ODM_H2C_RSSI_REPORT:
-
- break;
-
- /* 1 [3] */
- case ODM_H2C_WIFI_CALIBRATION:
-
- break;
-
- /* 1 [4] */
- case ODM_H2C_IQ_CALIBRATION:
-
- break;
- /* 1 [5] */
- case ODM_H2C_RA_PARA_ADJUST:
-
- break;
-
- /* 1 [6] */
- case PHYDM_H2C_DYNAMIC_TX_PATH:
-
- break;
-
- /* [7]*/
- case PHYDM_H2C_FW_TRACE_EN:
-
- platform_h2c_id = 0x49;
-
- break;
-
- case PHYDM_H2C_TXBF:
- break;
-
- case PHYDM_H2C_MU:
- platform_h2c_id = 0x4a; /*H2C_MU*/
- break;
-
- default:
- platform_h2c_id = phydm_h2c_id;
- break;
- }
-
- return platform_h2c_id;
-}
-
-/*ODM FW relative API.*/
-
-void odm_fill_h2c_cmd(struct phy_dm_struct *dm, u8 phydm_h2c_id, u32 cmd_len,
- u8 *cmd_buffer)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- u8 platform_h2c_id;
-
- platform_h2c_id = phydm_trans_h2c_id(dm, phydm_h2c_id);
-
- ODM_RT_TRACE(dm, PHYDM_COMP_RA_DBG,
- "[H2C] platform_h2c_id = ((0x%x))\n", platform_h2c_id);
-
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->hw, platform_h2c_id, cmd_len,
- cmd_buffer);
-}
-
-u8 phydm_c2H_content_parsing(void *dm_void, u8 c2h_cmd_id, u8 c2h_cmd_len,
- u8 *tmp_buf)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 extend_c2h_sub_id = 0;
- u8 find_c2h_cmd = true;
-
- switch (c2h_cmd_id) {
- case PHYDM_C2H_DBG:
- phydm_fw_trace_handler(dm, tmp_buf, c2h_cmd_len);
- break;
-
- case PHYDM_C2H_RA_RPT:
- phydm_c2h_ra_report_handler(dm, tmp_buf, c2h_cmd_len);
- break;
-
- case PHYDM_C2H_RA_PARA_RPT:
- odm_c2h_ra_para_report_handler(dm, tmp_buf, c2h_cmd_len);
- break;
-
- case PHYDM_C2H_DYNAMIC_TX_PATH_RPT:
- break;
-
- case PHYDM_C2H_IQK_FINISH:
- break;
-
- case PHYDM_C2H_DBG_CODE:
- phydm_fw_trace_handler_code(dm, tmp_buf, c2h_cmd_len);
- break;
-
- case PHYDM_C2H_EXTEND:
- extend_c2h_sub_id = tmp_buf[0];
- if (extend_c2h_sub_id == PHYDM_EXTEND_C2H_DBG_PRINT)
- phydm_fw_trace_handler_8051(dm, tmp_buf, c2h_cmd_len);
-
- break;
-
- default:
- find_c2h_cmd = false;
- break;
- }
-
- return find_c2h_cmd;
-}
-
-u64 odm_get_current_time(struct phy_dm_struct *dm) { return jiffies; }
-
-u64 odm_get_progressing_time(struct phy_dm_struct *dm, u64 start_time)
-{
- return jiffies_to_msecs(jiffies - (u32)start_time);
-}
-
-void odm_set_tx_power_index_by_rate_section(struct phy_dm_struct *dm,
- u8 rf_path, u8 channel,
- u8 rate_section)
-{
- void *adapter = dm->adapter;
-
- phy_set_tx_power_index_by_rs(adapter, channel, rf_path, rate_section);
-}
-
-u8 odm_get_tx_power_index(struct phy_dm_struct *dm, u8 rf_path, u8 tx_rate,
- u8 band_width, u8 channel)
-{
- void *adapter = dm->adapter;
-
- return phy_get_tx_power_index(adapter, (enum odm_rf_radio_path)rf_path,
- tx_rate, band_width, channel);
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_interface.h b/drivers/staging/rtlwifi/phydm/phydm_interface.h
deleted file mode 100644
index 6ef289201d9d..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_interface.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __ODM_INTERFACE_H__
-#define __ODM_INTERFACE_H__
-
-#define INTERFACE_VERSION "1.1" /*2015.07.29 YuChen*/
-
-/*
- * =========== Constant/Structure/Enum/... Define
- */
-
-/*
- * =========== Macro Define
- */
-
-#define _reg_all(_name) ODM_##_name
-#define _reg_ic(_name, _ic) ODM_##_name##_ic
-#define _bit_all(_name) BIT_##_name
-#define _bit_ic(_name, _ic) BIT_##_name##_ic
-
-/* _cat: implemented by Token-Pasting Operator. */
-
-/*===================================
- *
- * #define ODM_REG_DIG_11N 0xC50
- * #define ODM_REG_DIG_11AC 0xDDD
- *
- * ODM_REG(DIG,_pdm_odm)
- * ===================================
- */
-
-#define _reg_11N(_name) ODM_REG_##_name##_11N
-#define _reg_11AC(_name) ODM_REG_##_name##_11AC
-#define _bit_11N(_name) ODM_BIT_##_name##_11N
-#define _bit_11AC(_name) ODM_BIT_##_name##_11AC
-
-#define _cat(_name, _ic_type, _func) \
- (((_ic_type) & ODM_IC_11N_SERIES) ? _func##_11N(_name) : \
- _func##_11AC(_name))
-
-/* _name: name of register or bit.
- * Example: "ODM_REG(R_A_AGC_CORE1, dm)"
- * gets "ODM_R_A_AGC_CORE1" or "ODM_R_A_AGC_CORE1_8192C",
- * depends on support_ic_type.
- */
-#define ODM_REG(_name, _pdm_odm) _cat(_name, _pdm_odm->support_ic_type, _reg)
-#define ODM_BIT(_name, _pdm_odm) _cat(_name, _pdm_odm->support_ic_type, _bit)
-enum phydm_h2c_cmd {
- PHYDM_H2C_TXBF = 0x41,
- ODM_H2C_RSSI_REPORT = 0x42,
- ODM_H2C_IQ_CALIBRATION = 0x45,
- ODM_H2C_RA_PARA_ADJUST = 0x47,
- PHYDM_H2C_DYNAMIC_TX_PATH = 0x48,
- PHYDM_H2C_FW_TRACE_EN = 0x49,
- ODM_H2C_WIFI_CALIBRATION = 0x6d,
- PHYDM_H2C_MU = 0x4a,
- ODM_MAX_H2CCMD
-};
-
-enum phydm_c2h_evt {
- PHYDM_C2H_DBG = 0,
- PHYDM_C2H_LB = 1,
- PHYDM_C2H_XBF = 2,
- PHYDM_C2H_TX_REPORT = 3,
- PHYDM_C2H_INFO = 9,
- PHYDM_C2H_BT_MP = 11,
- PHYDM_C2H_RA_RPT = 12,
- PHYDM_C2H_RA_PARA_RPT = 14,
- PHYDM_C2H_DYNAMIC_TX_PATH_RPT = 15,
- PHYDM_C2H_IQK_FINISH = 17, /*0x11*/
- PHYDM_C2H_DBG_CODE = 0xFE,
- PHYDM_C2H_EXTEND = 0xFF,
-};
-
-enum phydm_extend_c2h_evt {
- PHYDM_EXTEND_C2H_DBG_PRINT = 0
-
-};
-
-/*
- * =========== Extern Variable ??? It should be forbidden.
- */
-
-/*
- * =========== EXtern Function Prototype
- */
-
-u8 odm_read_1byte(struct phy_dm_struct *dm, u32 reg_addr);
-
-u16 odm_read_2byte(struct phy_dm_struct *dm, u32 reg_addr);
-
-u32 odm_read_4byte(struct phy_dm_struct *dm, u32 reg_addr);
-
-void odm_write_1byte(struct phy_dm_struct *dm, u32 reg_addr, u8 data);
-
-void odm_write_2byte(struct phy_dm_struct *dm, u32 reg_addr, u16 data);
-
-void odm_write_4byte(struct phy_dm_struct *dm, u32 reg_addr, u32 data);
-
-void odm_set_mac_reg(struct phy_dm_struct *dm, u32 reg_addr, u32 bit_mask,
- u32 data);
-
-u32 odm_get_mac_reg(struct phy_dm_struct *dm, u32 reg_addr, u32 bit_mask);
-
-void odm_set_bb_reg(struct phy_dm_struct *dm, u32 reg_addr, u32 bit_mask,
- u32 data);
-
-u32 odm_get_bb_reg(struct phy_dm_struct *dm, u32 reg_addr, u32 bit_mask);
-
-void odm_set_rf_reg(struct phy_dm_struct *dm, enum odm_rf_radio_path e_rf_path,
- u32 reg_addr, u32 bit_mask, u32 data);
-
-u32 odm_get_rf_reg(struct phy_dm_struct *dm, enum odm_rf_radio_path e_rf_path,
- u32 reg_addr, u32 bit_mask);
-
-/*
- * Memory Relative Function.
- */
-void odm_allocate_memory(struct phy_dm_struct *dm, void **ptr, u32 length);
-void odm_free_memory(struct phy_dm_struct *dm, void *ptr, u32 length);
-
-void odm_move_memory(struct phy_dm_struct *dm, void *p_dest, void *src,
- u32 length);
-
-s32 odm_compare_memory(struct phy_dm_struct *dm, void *p_buf1, void *buf2,
- u32 length);
-
-void odm_memory_set(struct phy_dm_struct *dm, void *pbuf, s8 value, u32 length);
-
-/*
- * ODM MISC-spin lock relative API.
- */
-void odm_acquire_spin_lock(struct phy_dm_struct *dm,
- enum rt_spinlock_type type);
-
-void odm_release_spin_lock(struct phy_dm_struct *dm,
- enum rt_spinlock_type type);
-
-/*
- * ODM Timer relative API.
- */
-void odm_stall_execution(u32 us_delay);
-
-void ODM_delay_ms(u32 ms);
-
-void ODM_delay_us(u32 us);
-
-void ODM_sleep_ms(u32 ms);
-
-void ODM_sleep_us(u32 us);
-
-/*
- * ODM FW relative API.
- */
-void odm_fill_h2c_cmd(struct phy_dm_struct *dm, u8 element_id, u32 cmd_len,
- u8 *cmd_buffer);
-
-u8 phydm_c2H_content_parsing(void *dm_void, u8 c2h_cmd_id, u8 c2h_cmd_len,
- u8 *tmp_buf);
-
-u64 odm_get_current_time(struct phy_dm_struct *dm);
-u64 odm_get_progressing_time(struct phy_dm_struct *dm, u64 start_time);
-
-void odm_set_tx_power_index_by_rate_section(struct phy_dm_struct *dm,
- u8 rf_path, u8 channel,
- u8 rate_section);
-
-u8 odm_get_tx_power_index(struct phy_dm_struct *dm, u8 rf_path, u8 tx_rate,
- u8 band_width, u8 channel);
-
-#endif /* __ODM_INTERFACE_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/phydm_iqk.h b/drivers/staging/rtlwifi/phydm/phydm_iqk.h
deleted file mode 100644
index 0ed21e06fc33..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_iqk.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMIQK_H__
-#define __PHYDMIQK_H__
-
-/*--------------------------Define Parameters-------------------------------*/
-#define LOK_delay 1
-#define WBIQK_delay 10
-#define TX_IQK 0
-#define RX_IQK 1
-#define TXIQK 0
-#define RXIQK1 1
-#define RXIQK2 2
-#define GSRXK1 0
-#define GSRXK2 1
-#define kcount_limit_80m 2
-#define kcount_limit_others 4
-#define rxiqk_gs_limit 4
-
-#define NUM 4
-/*----------------------End Define Parameters-------------------------------*/
-
-struct dm_iqk_info {
- bool lok_fail[NUM];
- bool iqk_fail[2][NUM];
- u32 iqc_matrix[2][NUM];
- u8 iqk_times;
- u32 rf_reg18;
- u32 lna_idx;
- u8 rxiqk_step;
- u8 tmp1bcc;
- u8 kcount;
-
- u32 iqk_channel[2];
- bool iqk_fail_report[2][4][2]; /*channel/path/TRX(TX:0, RX:1) */
- u32 iqk_cfir_real[2][4][2]
- [8]; /*channel / path / TRX(TX:0, RX:1) / CFIR_real*/
- u32 iqk_cfir_imag[2][4][2]
- [8]; /*channel / path / TRX(TX:0, RX:1) / CFIR_imag*/
- u8 retry_count[2][4][3]; /* channel / path / (TXK:0, RXK1:1, RXK2:2) */
- u8 gs_retry_count[2][4][2]; /* channel / path / (GSRXK1:0, GSRXK2:1) */
- u8 rxiqk_fail_code[2][4]; /* channel / path
- * 0:SRXK1 fail, 1:RXK1 fail 2:RXK2 fail
- */
- u32 lok_idac[2][4]; /*channel / path*/
- u16 rxiqk_agc[2][4]; /*channel / path*/
- u32 bypass_iqk[2][4]; /*channel / 0xc94/0xe94*/
- u32 tmp_gntwl;
- bool is_btg;
- bool isbnd;
-};
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_kfree.c b/drivers/staging/rtlwifi/phydm/phydm_kfree.c
deleted file mode 100644
index 1a52500f97a1..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_kfree.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/*============================================================*/
-/*include files*/
-/*============================================================*/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-/*<YuChen, 150720> Add for KFree Feature Requested by RF David.*/
-/*This is a phydm API*/
-
-static void phydm_set_kfree_to_rf_8814a(void *dm_void, u8 e_rf_path, u8 data)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
- bool is_odd;
-
- if ((data % 2) != 0) { /*odd->positive*/
- data = data - 1;
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(19),
- 1);
- is_odd = true;
- } else { /*even->negative*/
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(19),
- 0);
- is_odd = false;
- }
- ODM_RT_TRACE(dm, ODM_COMP_MP, "%s(): RF_0x55[19]= %d\n", __func__,
- is_odd);
- switch (data) {
- case 0:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 0);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 0);
- cali_info->kfree_offset[e_rf_path] = 0;
- break;
- case 2:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 1);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 0);
- cali_info->kfree_offset[e_rf_path] = 0;
- break;
- case 4:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 0);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 1);
- cali_info->kfree_offset[e_rf_path] = 1;
- break;
- case 6:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 1);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 1);
- cali_info->kfree_offset[e_rf_path] = 1;
- break;
- case 8:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 0);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 2);
- cali_info->kfree_offset[e_rf_path] = 2;
- break;
- case 10:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 1);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 2);
- cali_info->kfree_offset[e_rf_path] = 2;
- break;
- case 12:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 0);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 3);
- cali_info->kfree_offset[e_rf_path] = 3;
- break;
- case 14:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 1);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 3);
- cali_info->kfree_offset[e_rf_path] = 3;
- break;
- case 16:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 0);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 4);
- cali_info->kfree_offset[e_rf_path] = 4;
- break;
- case 18:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 1);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 4);
- cali_info->kfree_offset[e_rf_path] = 4;
- break;
- case 20:
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET, BIT(14),
- 0);
- odm_set_rf_reg(dm, e_rf_path, REG_RF_TX_GAIN_OFFSET,
- BIT(17) | BIT(16) | BIT(15), 5);
- cali_info->kfree_offset[e_rf_path] = 5;
- break;
-
- default:
- break;
- }
-
- if (!is_odd) {
- /*that means Kfree offset is negative, we need to record it.*/
- cali_info->kfree_offset[e_rf_path] =
- (-1) * cali_info->kfree_offset[e_rf_path];
- ODM_RT_TRACE(dm, ODM_COMP_MP, "%s(): kfree_offset = %d\n",
- __func__, cali_info->kfree_offset[e_rf_path]);
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_MP, "%s(): kfree_offset = %d\n",
- __func__, cali_info->kfree_offset[e_rf_path]);
- }
-}
-
-static void phydm_set_kfree_to_rf(void *dm_void, u8 e_rf_path, u8 data)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_RTL8814A)
- phydm_set_kfree_to_rf_8814a(dm, e_rf_path, data);
-}
-
-void phydm_config_kfree(void *dm_void, u8 channel_to_sw, u8 *kfree_table)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
- u8 rfpath = 0, max_rf_path = 0;
- u8 channel_idx = 0;
-
- if (dm->support_ic_type & ODM_RTL8814A)
- max_rf_path = 4; /*0~3*/
- else if (dm->support_ic_type &
- (ODM_RTL8812 | ODM_RTL8192E | ODM_RTL8822B))
- max_rf_path = 2; /*0~1*/
- else
- max_rf_path = 1;
-
- ODM_RT_TRACE(dm, ODM_COMP_MP, "===>%s()\n", __func__);
-
- if (cali_info->reg_rf_kfree_enable == 2) {
- ODM_RT_TRACE(dm, ODM_COMP_MP,
- "%s(): reg_rf_kfree_enable == 2, Disable\n",
- __func__);
- return;
- }
-
- if (cali_info->reg_rf_kfree_enable != 1 &&
- cali_info->reg_rf_kfree_enable != 0) {
- ODM_RT_TRACE(dm, ODM_COMP_MP, "<===%s()\n", __func__);
- return;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_MP, "%s(): reg_rf_kfree_enable == true\n",
- __func__);
- /*Make sure the targetval is defined*/
- if (((cali_info->reg_rf_kfree_enable == 1) &&
- (kfree_table[0] != 0xFF)) ||
- cali_info->rf_kfree_enable) {
- /*if kfree_table[0] == 0xff, means no Kfree*/
- if (*dm->band_type == ODM_BAND_2_4G) {
- if (channel_to_sw <= 14 && channel_to_sw >= 1)
- channel_idx = PHYDM_2G;
- } else if (*dm->band_type == ODM_BAND_5G) {
- if (channel_to_sw >= 36 && channel_to_sw <= 48)
- channel_idx = PHYDM_5GLB1;
- if (channel_to_sw >= 52 && channel_to_sw <= 64)
- channel_idx = PHYDM_5GLB2;
- if (channel_to_sw >= 100 && channel_to_sw <= 120)
- channel_idx = PHYDM_5GMB1;
- if (channel_to_sw >= 124 && channel_to_sw <= 144)
- channel_idx = PHYDM_5GMB2;
- if (channel_to_sw >= 149 && channel_to_sw <= 177)
- channel_idx = PHYDM_5GHB;
- }
-
- for (rfpath = ODM_RF_PATH_A; rfpath < max_rf_path; rfpath++) {
- ODM_RT_TRACE(dm, ODM_COMP_MP, "%s(): PATH_%d: %#x\n",
- __func__, rfpath,
- kfree_table[channel_idx * max_rf_path +
- rfpath]);
- phydm_set_kfree_to_rf(
- dm, rfpath,
- kfree_table[channel_idx * max_rf_path +
- rfpath]);
- }
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_MP,
- "%s(): targetval not defined, Don't execute KFree Process.\n",
- __func__);
- return;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_MP, "<===%s()\n", __func__);
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_kfree.h b/drivers/staging/rtlwifi/phydm/phydm_kfree.h
deleted file mode 100644
index feeeb69d5202..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_kfree.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMKFREE_H__
-#define __PHYDMKFREE_H__
-
-#define KFREE_VERSION "1.0"
-
-enum phydm_kfree_channeltosw {
- PHYDM_2G = 0,
- PHYDM_5GLB1 = 1,
- PHYDM_5GLB2 = 2,
- PHYDM_5GMB1 = 3,
- PHYDM_5GMB2 = 4,
- PHYDM_5GHB = 5,
-};
-
-void phydm_config_kfree(void *dm_void, u8 channel_to_sw, u8 *kfree_table);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c b/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c
deleted file mode 100644
index 63f52623a8cf..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c
+++ /dev/null
@@ -1,319 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-#include "phydm_noisemonitor.h"
-
-/* *************************************************
- * This function is for inband noise test utility only
- * To obtain the inband noise level(dbm), do the following.
- * 1. disable DIG and Power Saving
- * 2. Set initial gain = 0x1a
- * 3. Stop updating idle time pwer report (for driver read)
- * - 0x80c[25]
- *
- * **************************************************/
-
-#define VALID_MIN -35
-#define VALID_MAX 10
-#define VALID_CNT 5
-
-static inline void phydm_set_noise_data_sum(struct noise_level *noise_data,
- u8 max_rf_path)
-{
- u8 rf_path;
-
- for (rf_path = ODM_RF_PATH_A; rf_path < max_rf_path; rf_path++) {
- if (noise_data->valid_cnt[rf_path])
- noise_data->sum[rf_path] /=
- noise_data->valid_cnt[rf_path];
- else
- noise_data->sum[rf_path] = 0;
- }
-}
-
-static s16 odm_inband_noise_monitor_n_series(struct phy_dm_struct *dm,
- u8 is_pause_dig, u8 igi_value,
- u32 max_time)
-{
- u32 tmp4b;
- u8 max_rf_path = 0, rf_path;
- u8 reg_c50, reg_c58, valid_done = 0;
- struct noise_level noise_data;
- u64 start = 0, func_start = 0, func_end = 0;
-
- func_start = odm_get_current_time(dm);
- dm->noise_level.noise_all = 0;
-
- if ((dm->rf_type == ODM_1T2R) || (dm->rf_type == ODM_2T2R))
- max_rf_path = 2;
- else
- max_rf_path = 1;
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "%s() ==>\n", __func__);
-
- odm_memory_set(dm, &noise_data, 0, sizeof(struct noise_level));
-
- /* */
- /* step 1. Disable DIG && Set initial gain. */
- /* */
-
- if (is_pause_dig)
- odm_pause_dig(dm, PHYDM_PAUSE, PHYDM_PAUSE_LEVEL_1, igi_value);
- /* */
- /* step 2. Disable all power save for read registers */
- /* */
- /* dcmd_DebugControlPowerSave(adapter, PSDisable); */
-
- /* */
- /* step 3. Get noise power level */
- /* */
- start = odm_get_current_time(dm);
- while (1) {
- /* Stop updating idle time pwer report (for driver read) */
- odm_set_bb_reg(dm, REG_FPGA0_TX_GAIN_STAGE, BIT(25), 1);
-
- /* Read Noise Floor Report */
- tmp4b = odm_get_bb_reg(dm, 0x8f8, MASKDWORD);
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "Noise Floor Report (0x8f8) = 0x%08x\n", tmp4b);
-
- /* update idle time pwer report per 5us */
- odm_set_bb_reg(dm, REG_FPGA0_TX_GAIN_STAGE, BIT(25), 0);
-
- noise_data.value[ODM_RF_PATH_A] = (u8)(tmp4b & 0xff);
- noise_data.value[ODM_RF_PATH_B] = (u8)((tmp4b & 0xff00) >> 8);
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "value_a = 0x%x(%d), value_b = 0x%x(%d)\n",
- noise_data.value[ODM_RF_PATH_A],
- noise_data.value[ODM_RF_PATH_A],
- noise_data.value[ODM_RF_PATH_B],
- noise_data.value[ODM_RF_PATH_B]);
-
- for (rf_path = ODM_RF_PATH_A; rf_path < max_rf_path;
- rf_path++) {
- noise_data.sval[rf_path] =
- (s8)noise_data.value[rf_path];
- noise_data.sval[rf_path] /= 2;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "sval_a = %d, sval_b = %d\n",
- noise_data.sval[ODM_RF_PATH_A],
- noise_data.sval[ODM_RF_PATH_B]);
-
- for (rf_path = ODM_RF_PATH_A; rf_path < max_rf_path;
- rf_path++) {
- if (!(noise_data.valid_cnt[rf_path] < VALID_CNT) ||
- !(noise_data.sval[rf_path] < VALID_MAX &&
- noise_data.sval[rf_path] >= VALID_MIN)) {
- continue;
- }
-
- noise_data.valid_cnt[rf_path]++;
- noise_data.sum[rf_path] += noise_data.sval[rf_path];
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "rf_path:%d Valid sval = %d\n", rf_path,
- noise_data.sval[rf_path]);
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "Sum of sval = %d,\n",
- noise_data.sum[rf_path]);
- if (noise_data.valid_cnt[rf_path] == VALID_CNT) {
- valid_done++;
- ODM_RT_TRACE(
- dm, ODM_COMP_COMMON,
- "After divided, rf_path:%d,sum = %d\n",
- rf_path, noise_data.sum[rf_path]);
- }
- }
-
- if ((valid_done == max_rf_path) ||
- (odm_get_progressing_time(dm, start) > max_time)) {
- phydm_set_noise_data_sum(&noise_data, max_rf_path);
- break;
- }
- }
- reg_c50 = (u8)odm_get_bb_reg(dm, REG_OFDM_0_XA_AGC_CORE1, MASKBYTE0);
- reg_c50 &= ~BIT(7);
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "0x%x = 0x%02x(%d)\n",
- REG_OFDM_0_XA_AGC_CORE1, reg_c50, reg_c50);
- dm->noise_level.noise[ODM_RF_PATH_A] =
- (u8)(-110 + reg_c50 + noise_data.sum[ODM_RF_PATH_A]);
- dm->noise_level.noise_all += dm->noise_level.noise[ODM_RF_PATH_A];
-
- if (max_rf_path == 2) {
- reg_c58 = (u8)odm_get_bb_reg(dm, REG_OFDM_0_XB_AGC_CORE1,
- MASKBYTE0);
- reg_c58 &= ~BIT(7);
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "0x%x = 0x%02x(%d)\n",
- REG_OFDM_0_XB_AGC_CORE1, reg_c58, reg_c58);
- dm->noise_level.noise[ODM_RF_PATH_B] =
- (u8)(-110 + reg_c58 + noise_data.sum[ODM_RF_PATH_B]);
- dm->noise_level.noise_all +=
- dm->noise_level.noise[ODM_RF_PATH_B];
- }
- dm->noise_level.noise_all /= max_rf_path;
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "noise_a = %d, noise_b = %d\n",
- dm->noise_level.noise[ODM_RF_PATH_A],
- dm->noise_level.noise[ODM_RF_PATH_B]);
-
- /* */
- /* step 4. Recover the Dig */
- /* */
- if (is_pause_dig)
- odm_pause_dig(dm, PHYDM_RESUME, PHYDM_PAUSE_LEVEL_1, igi_value);
- func_end = odm_get_progressing_time(dm, func_start);
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "%s() <==\n", __func__);
- return dm->noise_level.noise_all;
-}
-
-static s16 odm_inband_noise_monitor_ac_series(struct phy_dm_struct *dm,
- u8 is_pause_dig, u8 igi_value,
- u32 max_time)
-{
- s32 rxi_buf_anta, rxq_buf_anta; /*rxi_buf_antb, rxq_buf_antb;*/
- s32 value32, pwdb_A = 0, sval, noise, sum;
- bool pd_flag;
- u8 valid_cnt;
- u64 start = 0, func_start = 0, func_end = 0;
-
- if (!(dm->support_ic_type & (ODM_RTL8812 | ODM_RTL8821 | ODM_RTL8814A)))
- return 0;
-
- func_start = odm_get_current_time(dm);
- dm->noise_level.noise_all = 0;
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "%s() ==>\n", __func__);
-
- /* step 1. Disable DIG && Set initial gain. */
- if (is_pause_dig)
- odm_pause_dig(dm, PHYDM_PAUSE, PHYDM_PAUSE_LEVEL_1, igi_value);
-
- /* step 2. Disable all power save for read registers */
- /*dcmd_DebugControlPowerSave(adapter, PSDisable); */
-
- /* step 3. Get noise power level */
- start = odm_get_current_time(dm);
-
- /* reset counters */
- sum = 0;
- valid_cnt = 0;
-
- /* step 3. Get noise power level */
- while (1) {
- /*Set IGI=0x1C */
- odm_write_dig(dm, 0x1C);
- /*stop CK320&CK88 */
- odm_set_bb_reg(dm, 0x8B4, BIT(6), 1);
- /*Read path-A */
- odm_set_bb_reg(dm, 0x8FC, MASKDWORD, 0x200); /*set debug port*/
- value32 = odm_get_bb_reg(dm, 0xFA0,
- MASKDWORD); /*read debug port*/
-
- rxi_buf_anta = (value32 & 0xFFC00) >>
- 10; /*rxi_buf_anta=RegFA0[19:10]*/
- rxq_buf_anta = value32 & 0x3FF; /*rxq_buf_anta=RegFA0[19:10]*/
-
- pd_flag = (bool)((value32 & BIT(31)) >> 31);
-
- /*Not in packet detection period or Tx state */
- if ((!pd_flag) || (rxi_buf_anta != 0x200)) {
- /*sign conversion*/
- rxi_buf_anta = odm_sign_conversion(rxi_buf_anta, 10);
- rxq_buf_anta = odm_sign_conversion(rxq_buf_anta, 10);
-
- pwdb_A = odm_pwdb_conversion(
- rxi_buf_anta * rxi_buf_anta +
- rxq_buf_anta * rxq_buf_anta,
- 20, 18); /*S(10,9)*S(10,9)=S(20,18)*/
-
- ODM_RT_TRACE(
- dm, ODM_COMP_COMMON,
- "pwdb_A= %d dB, rxi_buf_anta= 0x%x, rxq_buf_anta= 0x%x\n",
- pwdb_A, rxi_buf_anta & 0x3FF,
- rxq_buf_anta & 0x3FF);
- }
- /*Start CK320&CK88*/
- odm_set_bb_reg(dm, 0x8B4, BIT(6), 0);
- /*BB Reset*/
- odm_write_1byte(dm, 0x02, odm_read_1byte(dm, 0x02) & (~BIT(0)));
- odm_write_1byte(dm, 0x02, odm_read_1byte(dm, 0x02) | BIT(0));
- /*PMAC Reset*/
- odm_write_1byte(dm, 0xB03,
- odm_read_1byte(dm, 0xB03) & (~BIT(0)));
- odm_write_1byte(dm, 0xB03, odm_read_1byte(dm, 0xB03) | BIT(0));
- /*CCK Reset*/
- if (odm_read_1byte(dm, 0x80B) & BIT(4)) {
- odm_write_1byte(dm, 0x80B,
- odm_read_1byte(dm, 0x80B) & (~BIT(4)));
- odm_write_1byte(dm, 0x80B,
- odm_read_1byte(dm, 0x80B) | BIT(4));
- }
-
- sval = pwdb_A;
-
- if ((sval < 0 && sval >= -27) && (valid_cnt < VALID_CNT)) {
- valid_cnt++;
- sum += sval;
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "Valid sval = %d\n",
- sval);
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "Sum of sval = %d,\n",
- sum);
- if ((valid_cnt >= VALID_CNT) ||
- (odm_get_progressing_time(dm, start) > max_time)) {
- sum /= VALID_CNT;
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "After divided, sum = %d\n", sum);
- break;
- }
- }
- }
-
- /*ADC backoff is 12dB,*/
- /*Ptarget=0x1C-110=-82dBm*/
- noise = sum + 12 + 0x1C - 110;
-
- /*Offset*/
- noise = noise - 3;
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "noise = %d\n", noise);
- dm->noise_level.noise_all = (s16)noise;
-
- /* step 4. Recover the Dig*/
- if (is_pause_dig)
- odm_pause_dig(dm, PHYDM_RESUME, PHYDM_PAUSE_LEVEL_1, igi_value);
-
- func_end = odm_get_progressing_time(dm, func_start);
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "%s() <==\n", __func__);
-
- return dm->noise_level.noise_all;
-}
-
-s16 odm_inband_noise_monitor(void *dm_void, u8 is_pause_dig, u8 igi_value,
- u32 max_time)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES)
- return odm_inband_noise_monitor_ac_series(dm, is_pause_dig,
- igi_value, max_time);
- else
- return odm_inband_noise_monitor_n_series(dm, is_pause_dig,
- igi_value, max_time);
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h b/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h
deleted file mode 100644
index 7bce088678d3..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __ODMNOISEMONITOR_H__
-#define __ODMNOISEMONITOR_H__
-
-#define ODM_MAX_CHANNEL_NUM 38 /* 14+24 */
-struct noise_level {
- u8 value[MAX_RF_PATH];
- s8 sval[MAX_RF_PATH];
-
- s32 sum[MAX_RF_PATH];
- u8 valid[MAX_RF_PATH];
- u8 valid_cnt[MAX_RF_PATH];
-};
-
-struct odm_noise_monitor {
- s8 noise[MAX_RF_PATH];
- s16 noise_all;
-};
-
-s16 odm_inband_noise_monitor(void *dm_void, u8 is_pause_dig, u8 igi_value,
- u32 max_time);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c b/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c
deleted file mode 100644
index c98de4bb3c57..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c
+++ /dev/null
@@ -1,633 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/*============================================================ */
-/* include files */
-/*============================================================ */
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-/* ************************************************************
- * Global var
- * *************************************************************/
-
-u32 ofdm_swing_table[OFDM_TABLE_SIZE] = {
- 0x7f8001fe, /* 0, +6.0dB */
- 0x788001e2, /* 1, +5.5dB */
- 0x71c001c7, /* 2, +5.0dB*/
- 0x6b8001ae, /* 3, +4.5dB*/
- 0x65400195, /* 4, +4.0dB*/
- 0x5fc0017f, /* 5, +3.5dB*/
- 0x5a400169, /* 6, +3.0dB*/
- 0x55400155, /* 7, +2.5dB*/
- 0x50800142, /* 8, +2.0dB*/
- 0x4c000130, /* 9, +1.5dB*/
- 0x47c0011f, /* 10, +1.0dB*/
- 0x43c0010f, /* 11, +0.5dB*/
- 0x40000100, /* 12, +0dB*/
- 0x3c8000f2, /* 13, -0.5dB*/
- 0x390000e4, /* 14, -1.0dB*/
- 0x35c000d7, /* 15, -1.5dB*/
- 0x32c000cb, /* 16, -2.0dB*/
- 0x300000c0, /* 17, -2.5dB*/
- 0x2d4000b5, /* 18, -3.0dB*/
- 0x2ac000ab, /* 19, -3.5dB*/
- 0x288000a2, /* 20, -4.0dB*/
- 0x26000098, /* 21, -4.5dB*/
- 0x24000090, /* 22, -5.0dB*/
- 0x22000088, /* 23, -5.5dB*/
- 0x20000080, /* 24, -6.0dB*/
- 0x1e400079, /* 25, -6.5dB*/
- 0x1c800072, /* 26, -7.0dB*/
- 0x1b00006c, /* 27. -7.5dB*/
- 0x19800066, /* 28, -8.0dB*/
- 0x18000060, /* 29, -8.5dB*/
- 0x16c0005b, /* 30, -9.0dB*/
- 0x15800056, /* 31, -9.5dB*/
- 0x14400051, /* 32, -10.0dB*/
- 0x1300004c, /* 33, -10.5dB*/
- 0x12000048, /* 34, -11.0dB*/
- 0x11000044, /* 35, -11.5dB*/
- 0x10000040, /* 36, -12.0dB*/
-};
-
-u8 cck_swing_table_ch1_ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB*/
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB*/
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB*/
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB*/
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB*/
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB*/
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB*/
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04,
- 0x02}, /* 12, -6.0dB <== default */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB*/
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB*/
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB*/
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB*/
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB*/
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB*/
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB*/
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB*/
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB*/
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB*/
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB*/
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB*/
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB*/
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB*/
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB*/
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB*/
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB*/
-};
-
-u8 cck_swing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB*/
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB*/
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB*/
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB*/
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00,
- 0x00}, /* 12, -6.0dB <== default*/
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB*/
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB*/
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB*/
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB*/
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB*/
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB*/
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB*/
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB*/
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB*/
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB*/
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB*/
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB*/
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB*/
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB*/
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB*/
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB*/
-};
-
-u32 ofdm_swing_table_new[OFDM_TABLE_SIZE] = {
- 0x0b40002d, /* 0, -15.0dB */
- 0x0c000030, /* 1, -14.5dB*/
- 0x0cc00033, /* 2, -14.0dB*/
- 0x0d800036, /* 3, -13.5dB*/
- 0x0e400039, /* 4, -13.0dB */
- 0x0f00003c, /* 5, -12.5dB*/
- 0x10000040, /* 6, -12.0dB*/
- 0x11000044, /* 7, -11.5dB*/
- 0x12000048, /* 8, -11.0dB*/
- 0x1300004c, /* 9, -10.5dB*/
- 0x14400051, /* 10, -10.0dB*/
- 0x15800056, /* 11, -9.5dB*/
- 0x16c0005b, /* 12, -9.0dB*/
- 0x18000060, /* 13, -8.5dB*/
- 0x19800066, /* 14, -8.0dB*/
- 0x1b00006c, /* 15, -7.5dB*/
- 0x1c800072, /* 16, -7.0dB*/
- 0x1e400079, /* 17, -6.5dB*/
- 0x20000080, /* 18, -6.0dB*/
- 0x22000088, /* 19, -5.5dB*/
- 0x24000090, /* 20, -5.0dB*/
- 0x26000098, /* 21, -4.5dB*/
- 0x288000a2, /* 22, -4.0dB*/
- 0x2ac000ab, /* 23, -3.5dB*/
- 0x2d4000b5, /* 24, -3.0dB*/
- 0x300000c0, /* 25, -2.5dB*/
- 0x32c000cb, /* 26, -2.0dB*/
- 0x35c000d7, /* 27, -1.5dB*/
- 0x390000e4, /* 28, -1.0dB*/
- 0x3c8000f2, /* 29, -0.5dB*/
- 0x40000100, /* 30, +0dB*/
- 0x43c0010f, /* 31, +0.5dB*/
- 0x47c0011f, /* 32, +1.0dB*/
- 0x4c000130, /* 33, +1.5dB*/
- 0x50800142, /* 34, +2.0dB*/
- 0x55400155, /* 35, +2.5dB*/
- 0x5a400169, /* 36, +3.0dB*/
- 0x5fc0017f, /* 37, +3.5dB*/
- 0x65400195, /* 38, +4.0dB*/
- 0x6b8001ae, /* 39, +4.5dB*/
- 0x71c001c7, /* 40, +5.0dB*/
- 0x788001e2, /* 41, +5.5dB*/
- 0x7f8001fe /* 42, +6.0dB*/
-};
-
-u8 cck_swing_table_ch1_ch14_88f[CCK_TABLE_SIZE_88F][16] = {
- {0x44, 0x42, 0x3C, 0x33, 0x28, 0x1C, 0x13, 0x0B, 0x05, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-16dB*/
- {0x48, 0x46, 0x3F, 0x36, 0x2A, 0x1E, 0x14, 0x0B, 0x05, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-15.5dB*/
- {0x4D, 0x4A, 0x43, 0x39, 0x2C, 0x20, 0x15, 0x0C, 0x06, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-15dB*/
- {0x51, 0x4F, 0x47, 0x3C, 0x2F, 0x22, 0x16, 0x0D, 0x06, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-14.5dB*/
- {0x56, 0x53, 0x4B, 0x40, 0x32, 0x24, 0x17, 0x0E, 0x06, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-14dB*/
- {0x5B, 0x58, 0x50, 0x43, 0x35, 0x26, 0x19, 0x0E, 0x07, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-13.5dB*/
- {0x60, 0x5D, 0x54, 0x47, 0x38, 0x28, 0x1A, 0x0F, 0x07, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-13dB*/
- {0x66, 0x63, 0x59, 0x4C, 0x3B, 0x2B, 0x1C, 0x10, 0x08, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-12.5dB*/
- {0x6C, 0x69, 0x5F, 0x50, 0x3F, 0x2D, 0x1E, 0x11, 0x08, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-12dB*/
- {0x73, 0x6F, 0x64, 0x55, 0x42, 0x30, 0x1F, 0x12, 0x08, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-11.5dB*/
- {0x79, 0x76, 0x6A, 0x5A, 0x46, 0x33, 0x21, 0x13, 0x09, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-11dB*/
- {0x81, 0x7C, 0x71, 0x5F, 0x4A, 0x36, 0x23, 0x14, 0x0A, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-10.5dB*/
- {0x88, 0x84, 0x77, 0x65, 0x4F, 0x39, 0x25, 0x15, 0x0A, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-10dB*/
- {0x90, 0x8C, 0x7E, 0x6B, 0x54, 0x3C, 0x27, 0x17, 0x0B, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-9.5dB*/
- {0x99, 0x94, 0x86, 0x71, 0x58, 0x40, 0x2A, 0x18, 0x0B, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-9dB*/
- {0xA2, 0x9D, 0x8E, 0x78, 0x5E, 0x43, 0x2C, 0x19, 0x0C, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-8.5dB*/
- {0xAC, 0xA6, 0x96, 0x7F, 0x63, 0x47, 0x2F, 0x1B, 0x0D, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-8dB*/
- {0xB6, 0xB0, 0x9F, 0x87, 0x69, 0x4C, 0x32, 0x1D, 0x0D, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-7.5dB*/
- {0xC1, 0xBA, 0xA8, 0x8F, 0x6F, 0x50, 0x35, 0x1E, 0x0E, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-7dB*/
- {0xCC, 0xC5, 0xB2, 0x97, 0x76, 0x55, 0x38, 0x20, 0x0F, 0x05, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-6.5dB*/
- {0xD8, 0xD1, 0xBD, 0xA0, 0x7D, 0x5A, 0x3B, 0x22, 0x10, 0x05, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00} /*-6dB*/
-};
-
-u8 cck_swing_table_ch1_ch13_88f[CCK_TABLE_SIZE_88F][16] = {
- {0x44, 0x42, 0x3C, 0x33, 0x28, 0x1C, 0x13, 0x0B, 0x05, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-16dB*/
- {0x48, 0x46, 0x3F, 0x36, 0x2A, 0x1E, 0x14, 0x0B, 0x05, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-15.5dB*/
- {0x4D, 0x4A, 0x43, 0x39, 0x2C, 0x20, 0x15, 0x0C, 0x06, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-15dB*/
- {0x51, 0x4F, 0x47, 0x3C, 0x2F, 0x22, 0x16, 0x0D, 0x06, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-14.5dB*/
- {0x56, 0x53, 0x4B, 0x40, 0x32, 0x24, 0x17, 0x0E, 0x06, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-14dB*/
- {0x5B, 0x58, 0x50, 0x43, 0x35, 0x26, 0x19, 0x0E, 0x07, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-13.5dB*/
- {0x60, 0x5D, 0x54, 0x47, 0x38, 0x28, 0x1A, 0x0F, 0x07, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-13dB*/
- {0x66, 0x63, 0x59, 0x4C, 0x3B, 0x2B, 0x1C, 0x10, 0x08, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-12.5dB*/
- {0x6C, 0x69, 0x5F, 0x50, 0x3F, 0x2D, 0x1E, 0x11, 0x08, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-12dB*/
- {0x73, 0x6F, 0x64, 0x55, 0x42, 0x30, 0x1F, 0x12, 0x08, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-11.5dB*/
- {0x79, 0x76, 0x6A, 0x5A, 0x46, 0x33, 0x21, 0x13, 0x09, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-11dB*/
- {0x81, 0x7C, 0x71, 0x5F, 0x4A, 0x36, 0x23, 0x14, 0x0A, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-10.5dB*/
- {0x88, 0x84, 0x77, 0x65, 0x4F, 0x39, 0x25, 0x15, 0x0A, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-10dB*/
- {0x90, 0x8C, 0x7E, 0x6B, 0x54, 0x3C, 0x27, 0x17, 0x0B, 0x03, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-9.5dB*/
- {0x99, 0x94, 0x86, 0x71, 0x58, 0x40, 0x2A, 0x18, 0x0B, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-9dB*/
- {0xA2, 0x9D, 0x8E, 0x78, 0x5E, 0x43, 0x2C, 0x19, 0x0C, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-8.5dB*/
- {0xAC, 0xA6, 0x96, 0x7F, 0x63, 0x47, 0x2F, 0x1B, 0x0D, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-8dB*/
- {0xB6, 0xB0, 0x9F, 0x87, 0x69, 0x4C, 0x32, 0x1D, 0x0D, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-7.5dB*/
- {0xC1, 0xBA, 0xA8, 0x8F, 0x6F, 0x50, 0x35, 0x1E, 0x0E, 0x04, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-7dB*/
- {0xCC, 0xC5, 0xB2, 0x97, 0x76, 0x55, 0x38, 0x20, 0x0F, 0x05, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-6.5dB*/
- {0xD8, 0xD1, 0xBD, 0xA0, 0x7D, 0x5A, 0x3B, 0x22, 0x10, 0x05, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00} /*-6dB*/
-};
-
-u8 cck_swing_table_ch14_88f[CCK_TABLE_SIZE_88F][16] = {
- {0x44, 0x42, 0x3C, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-16dB*/
- {0x48, 0x46, 0x3F, 0x2A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-15.5dB*/
- {0x4D, 0x4A, 0x43, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-15dB*/
- {0x51, 0x4F, 0x47, 0x2F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-14.5dB*/
- {0x56, 0x53, 0x4B, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-14dB*/
- {0x5B, 0x58, 0x50, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-13.5dB*/
- {0x60, 0x5D, 0x54, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-13dB*/
- {0x66, 0x63, 0x59, 0x3B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-12.5dB*/
- {0x6C, 0x69, 0x5F, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-12dB*/
- {0x73, 0x6F, 0x64, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-11.5dB*/
- {0x79, 0x76, 0x6A, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-11dB*/
- {0x81, 0x7C, 0x71, 0x4A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-10.5dB*/
- {0x88, 0x84, 0x77, 0x4F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-10dB*/
- {0x90, 0x8C, 0x7E, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-9.5dB*/
- {0x99, 0x94, 0x86, 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-9dB*/
- {0xA2, 0x9D, 0x8E, 0x5E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-8.5dB*/
- {0xAC, 0xA6, 0x96, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-8dB*/
- {0xB6, 0xB0, 0x9F, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-7.5dB*/
- {0xC1, 0xBA, 0xA8, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-7dB*/
- {0xCC, 0xC5, 0xB2, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, /*-6.5dB*/
- {0xD8, 0xD1, 0xBD, 0x7D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00} /*-6dB*/
-};
-
-u8 cck_swing_table_ch1_ch13_new[CCK_TABLE_SIZE][8] = {
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB*/
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB*/
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB*/
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB*/
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB*/
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB*/
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB*/
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB*/
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB*/
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB*/
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB*/
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB*/
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB*/
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB*/
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB*/
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB*/
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB*/
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /*20, -6.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB*/
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB*/
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB*/
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB*/
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB*/
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB*/
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB*/
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB*/
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB*/
-};
-
-u8 cck_swing_table_ch14_new[CCK_TABLE_SIZE][8] = {
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB*/
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB*/
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB*/
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB*/
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB*/
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /*5, -13.5dB*/
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB*/
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB*/
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB*/
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB*/
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB*/
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /*11, -10.5dB*/
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB*/
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB*/
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /*14, -9.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB*/
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB*/
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB*/
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /*23, -4.5dB*/
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /*27, -2.5dB*/
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /*29, -1.5dB*/
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */
-};
-
-u32 cck_swing_table_ch1_ch14_8723d[CCK_TABLE_SIZE_8723D] = {
- 0x0CD, /*0 , -20dB*/
- 0x0D9, 0x0E6, 0x0F3, 0x102, 0x111, 0x121, 0x132, 0x144, 0x158, 0x16C,
- 0x182, 0x198, 0x1B1, 0x1CA, 0x1E5, 0x202, 0x221, 0x241, 0x263, 0x287,
- 0x2AE, 0x2D6, 0x301, 0x32F, 0x35F, 0x392, 0x3C9, 0x402, 0x43F, 0x47F,
- 0x4C3, 0x50C, 0x558, 0x5A9, 0x5FF, 0x65A, 0x6BA, 0x720, 0x78C, 0x7FF,
-};
-
-/* JJ ADD 20161014 */
-u32 cck_swing_table_ch1_ch14_8710b[CCK_TABLE_SIZE_8710B] = {
- 0x0CD, /*0 , -20dB*/
- 0x0D9, 0x0E6, 0x0F3, 0x102, 0x111, 0x121, 0x132, 0x144, 0x158, 0x16C,
- 0x182, 0x198, 0x1B1, 0x1CA, 0x1E5, 0x202, 0x221, 0x241, 0x263, 0x287,
- 0x2AE, 0x2D6, 0x301, 0x32F, 0x35F, 0x392, 0x3C9, 0x402, 0x43F, 0x47F,
- 0x4C3, 0x50C, 0x558, 0x5A9, 0x5FF, 0x65A, 0x6BA, 0x720, 0x78C, 0x7FF,
-};
-
-u32 tx_scaling_table_jaguar[TXSCALE_TABLE_SIZE] = {
- 0x081, /* 0, -12.0dB*/
- 0x088, /* 1, -11.5dB*/
- 0x090, /* 2, -11.0dB*/
- 0x099, /* 3, -10.5dB*/
- 0x0A2, /* 4, -10.0dB*/
- 0x0AC, /* 5, -9.5dB*/
- 0x0B6, /* 6, -9.0dB*/
- 0x0C0, /*7, -8.5dB*/
- 0x0CC, /* 8, -8.0dB*/
- 0x0D8, /* 9, -7.5dB*/
- 0x0E5, /* 10, -7.0dB*/
- 0x0F2, /* 11, -6.5dB*/
- 0x101, /* 12, -6.0dB*/
- 0x110, /* 13, -5.5dB*/
- 0x120, /* 14, -5.0dB*/
- 0x131, /* 15, -4.5dB*/
- 0x143, /* 16, -4.0dB*/
- 0x156, /* 17, -3.5dB*/
- 0x16A, /* 18, -3.0dB*/
- 0x180, /* 19, -2.5dB*/
- 0x197, /* 20, -2.0dB*/
- 0x1AF, /* 21, -1.5dB*/
- 0x1C8, /* 22, -1.0dB*/
- 0x1E3, /* 23, -0.5dB*/
- 0x200, /* 24, +0 dB*/
- 0x21E, /* 25, +0.5dB*/
- 0x23E, /* 26, +1.0dB*/
- 0x261, /* 27, +1.5dB*/
- 0x285, /* 28, +2.0dB*/
- 0x2AB, /* 29, +2.5dB*/
- 0x2D3, /*30, +3.0dB*/
- 0x2FE, /* 31, +3.5dB*/
- 0x32B, /* 32, +4.0dB*/
- 0x35C, /* 33, +4.5dB*/
- 0x38E, /* 34, +5.0dB*/
- 0x3C4, /* 35, +5.5dB*/
- 0x3FE /* 36, +6.0dB */
-};
-
-void odm_txpowertracking_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- odm_txpowertracking_thermal_meter_init(dm);
-}
-
-static u8 get_swing_index(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 i = 0;
- u32 bb_swing;
- u32 swing_table_size;
- u32 *swing_table;
-
- if (dm->support_ic_type == ODM_RTL8188E ||
- dm->support_ic_type == ODM_RTL8723B ||
- dm->support_ic_type == ODM_RTL8192E ||
- dm->support_ic_type == ODM_RTL8188F ||
- dm->support_ic_type == ODM_RTL8703B) {
- bb_swing = odm_get_bb_reg(dm, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
- 0xFFC00000);
-
- swing_table = ofdm_swing_table_new;
- swing_table_size = OFDM_TABLE_SIZE;
- } else {
- {
- bb_swing = 0;
- swing_table = ofdm_swing_table;
- swing_table_size = OFDM_TABLE_SIZE;
- }
- }
-
- for (i = 0; i < swing_table_size; ++i) {
- u32 table_value = swing_table[i];
-
- if (table_value >= 0x100000)
- table_value >>= 22;
- if (bb_swing == table_value)
- break;
- }
- return i;
-}
-
-void odm_txpowertracking_thermal_meter_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 default_swing_index = get_swing_index(dm);
- u8 p = 0;
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- struct rtl_efuse *rtlefu = rtl_efuse(rtlpriv);
-
- cali_info->is_txpowertracking = true;
- cali_info->tx_powercount = 0;
- cali_info->is_txpowertracking_init = false;
-
- if (!dm->mp_mode)
- cali_info->txpowertrack_control = true;
- else
- cali_info->txpowertrack_control = false;
-
- if (!dm->mp_mode)
- cali_info->txpowertrack_control = true;
-
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION, "dm txpowertrack_control = %d\n",
- cali_info->txpowertrack_control);
-
- /* dm->rf_calibrate_info.txpowertrack_control = true; */
- cali_info->thermal_value = rtlefu->eeprom_thermalmeter;
- cali_info->thermal_value_iqk = rtlefu->eeprom_thermalmeter;
- cali_info->thermal_value_lck = rtlefu->eeprom_thermalmeter;
-
- if (!cali_info->default_bb_swing_index_flag) {
- /*The index of "0 dB" in SwingTable.*/
- if (dm->support_ic_type == ODM_RTL8188E ||
- dm->support_ic_type == ODM_RTL8723B ||
- dm->support_ic_type == ODM_RTL8192E ||
- dm->support_ic_type == ODM_RTL8703B) {
- cali_info->default_ofdm_index =
- (default_swing_index >= OFDM_TABLE_SIZE) ?
- 30 :
- default_swing_index;
- cali_info->default_cck_index = 20;
- } else if (dm->support_ic_type ==
- ODM_RTL8188F) { /*add by Mingzhi.Guo 2015-03-23*/
- cali_info->default_ofdm_index = 28; /*OFDM: -1dB*/
- cali_info->default_cck_index = 20; /*CCK:-6dB*/
- } else if (dm->support_ic_type ==
- ODM_RTL8723D) { /*add by zhaohe 2015-10-27*/
- cali_info->default_ofdm_index = 28; /*OFDM: -1dB*/
- cali_info->default_cck_index = 28; /*CCK: -6dB*/
- } else if (dm->support_ic_type ==
- ODM_RTL8710B) { /* JJ ADD 20161014 */
- cali_info->default_ofdm_index = 28; /*OFDM: -1dB*/
- cali_info->default_cck_index = 28; /*CCK: -6dB*/
- } else {
- cali_info->default_ofdm_index =
- (default_swing_index >= TXSCALE_TABLE_SIZE) ?
- 24 :
- default_swing_index;
- cali_info->default_cck_index = 24;
- }
- cali_info->default_bb_swing_index_flag = true;
- }
-
- cali_info->bb_swing_idx_cck_base = cali_info->default_cck_index;
- cali_info->CCK_index = cali_info->default_cck_index;
-
- for (p = ODM_RF_PATH_A; p < MAX_RF_PATH; ++p) {
- cali_info->bb_swing_idx_ofdm_base[p] =
- cali_info->default_ofdm_index;
- cali_info->OFDM_index[p] = cali_info->default_ofdm_index;
- cali_info->delta_power_index[p] = 0;
- cali_info->delta_power_index_last[p] = 0;
- cali_info->power_index_offset[p] = 0;
- }
- cali_info->modify_tx_agc_value_ofdm = 0;
- cali_info->modify_tx_agc_value_cck = 0;
-}
-
-void odm_txpowertracking_check(void *dm_void)
-{
- /* 2011/09/29 MH In HW integration first stage, we provide 4 different
- * handle to operate at the same time.
- * In the stage2/3, we need to prive universal interface and merge all
- * HW dynamic mechanism.
- */
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- switch (dm->support_platform) {
- case ODM_WIN:
- odm_txpowertracking_check_mp(dm);
- break;
-
- case ODM_CE:
- odm_txpowertracking_check_ce(dm);
- break;
-
- case ODM_AP:
- odm_txpowertracking_check_ap(dm);
- break;
-
- default:
- break;
- }
-}
-
-void odm_txpowertracking_check_ce(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- void *adapter = dm->adapter;
-
- if (!(dm->support_ability & ODM_RF_TX_PWR_TRACK))
- return;
-
- if (!dm->rf_calibrate_info.tm_trigger) {
- if (IS_HARDWARE_TYPE_8188E(adapter) ||
- IS_HARDWARE_TYPE_8188F(adapter) ||
- IS_HARDWARE_TYPE_8192E(adapter) ||
- IS_HARDWARE_TYPE_8723B(adapter) ||
- IS_HARDWARE_TYPE_JAGUAR(adapter) ||
- IS_HARDWARE_TYPE_8814A(adapter) ||
- IS_HARDWARE_TYPE_8703B(adapter) ||
- IS_HARDWARE_TYPE_8723D(adapter) ||
- IS_HARDWARE_TYPE_8822B(adapter) ||
- IS_HARDWARE_TYPE_8821C(adapter) ||
- (dm->support_ic_type == ODM_RTL8710B)) /* JJ ADD 20161014 */
- odm_set_rf_reg(dm, ODM_RF_PATH_A, RF_T_METER_NEW,
- (BIT(17) | BIT(16)), 0x03);
- else
- odm_set_rf_reg(dm, ODM_RF_PATH_A, RF_T_METER_OLD,
- RFREGOFFSETMASK, 0x60);
-
- dm->rf_calibrate_info.tm_trigger = 1;
- return;
- }
-
- odm_txpowertracking_callback_thermal_meter(dm);
- dm->rf_calibrate_info.tm_trigger = 0;
-}
-
-void odm_txpowertracking_check_mp(void *dm_void) {}
-
-void odm_txpowertracking_check_ap(void *dm_void) {}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h b/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h
deleted file mode 100644
index eb635de2d693..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMPOWERTRACKING_H__
-#define __PHYDMPOWERTRACKING_H__
-
-#define POWRTRACKING_VERSION "1.1"
-
-#define DPK_DELTA_MAPPING_NUM 13
-#define index_mapping_HP_NUM 15
-#define OFDM_TABLE_SIZE 43
-#define CCK_TABLE_SIZE 33
-#define CCK_TABLE_SIZE_88F 21
-#define TXSCALE_TABLE_SIZE 37
-#define CCK_TABLE_SIZE_8723D 41
-/* JJ ADD 20161014 */
-#define CCK_TABLE_SIZE_8710B 41
-
-#define TXPWR_TRACK_TABLE_SIZE 30
-#define DELTA_SWINGIDX_SIZE 30
-#define DELTA_SWINTSSI_SIZE 61
-#define BAND_NUM 4
-
-#define AVG_THERMAL_NUM 8
-#define HP_THERMAL_NUM 8
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM_MAX 10
-
-#define IQK_BB_REG_NUM 9
-
-#define iqk_matrix_reg_num 8
-
-extern u32 ofdm_swing_table[OFDM_TABLE_SIZE];
-extern u8 cck_swing_table_ch1_ch13[CCK_TABLE_SIZE][8];
-extern u8 cck_swing_table_ch14[CCK_TABLE_SIZE][8];
-
-extern u32 ofdm_swing_table_new[OFDM_TABLE_SIZE];
-extern u8 cck_swing_table_ch1_ch13_new[CCK_TABLE_SIZE][8];
-extern u8 cck_swing_table_ch14_new[CCK_TABLE_SIZE][8];
-extern u8 cck_swing_table_ch1_ch14_88f[CCK_TABLE_SIZE_88F][16];
-extern u8 cck_swing_table_ch1_ch13_88f[CCK_TABLE_SIZE_88F][16];
-extern u8 cck_swing_table_ch14_88f[CCK_TABLE_SIZE_88F][16];
-extern u32 cck_swing_table_ch1_ch14_8723d[CCK_TABLE_SIZE_8723D];
-/* JJ ADD 20161014 */
-extern u32 cck_swing_table_ch1_ch14_8710b[CCK_TABLE_SIZE_8710B];
-
-extern u32 tx_scaling_table_jaguar[TXSCALE_TABLE_SIZE];
-
-/* <20121018, Kordan> In case fail to read TxPowerTrack.txt,
- * we use the table of 88E as the default table.
- */
-
-#define dm_check_txpowertracking odm_txpowertracking_check
-
-struct iqk_matrix_regs_setting {
- bool is_iqk_done;
- s32 value[3][iqk_matrix_reg_num];
- bool is_bw_iqk_result_saved[3];
-};
-
-struct dm_rf_calibration_struct {
- /* for tx power tracking */
-
- u32 rega24; /* for TempCCK */
- s32 rege94;
- s32 rege9c;
- s32 regeb4;
- s32 regebc;
-
- u8 tx_powercount;
- bool is_txpowertracking_init;
- bool is_txpowertracking;
- /* for mp mode, turn off txpwrtracking as default */
- u8 txpowertrack_control;
- u8 tm_trigger;
- u8 internal_pa_5g[2]; /* pathA / pathB */
-
- u8 thermal_meter
- [2]; /* thermal_meter, index 0 for RFIC0, and 1 for RFIC1 */
- u8 thermal_value;
- u8 thermal_value_lck;
- u8 thermal_value_iqk;
- s8 thermal_value_delta; /* delta of thermal_value and efuse thermal */
- u8 thermal_value_dpk;
- u8 thermal_value_avg[AVG_THERMAL_NUM];
- u8 thermal_value_avg_index;
- u8 thermal_value_rx_gain;
- u8 thermal_value_crystal;
- u8 thermal_value_dpk_store;
- u8 thermal_value_dpk_track;
- bool txpowertracking_in_progress;
-
- bool is_reloadtxpowerindex;
- u8 is_rf_pi_enable;
- u32 txpowertracking_callback_cnt; /* cosa add for debug */
-
- /* ---------------------- Tx power Tracking ------------------------- */
- u8 is_cck_in_ch14;
- u8 CCK_index;
- u8 OFDM_index[MAX_RF_PATH];
- s8 power_index_offset[MAX_RF_PATH];
- s8 delta_power_index[MAX_RF_PATH];
- s8 delta_power_index_last[MAX_RF_PATH];
- bool is_tx_power_changed;
- s8 xtal_offset;
- s8 xtal_offset_last;
-
- u8 thermal_value_hp[HP_THERMAL_NUM];
- u8 thermal_value_hp_index;
- struct iqk_matrix_regs_setting
- iqk_matrix_reg_setting[IQK_MATRIX_SETTINGS_NUM];
- u8 delta_lck;
- s8 bb_swing_diff_2g, bb_swing_diff_5g; /* Unit: dB */
- u8 delta_swing_table_idx_2g_cck_a_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2g_cck_a_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2g_cck_b_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2g_cck_b_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2g_cck_c_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2g_cck_c_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2g_cck_d_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2g_cck_d_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2ga_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2ga_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2gb_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2gb_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2gc_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2gc_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2gd_p[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2gd_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5ga_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5ga_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5gb_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5gb_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5gc_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5gc_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5gd_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_5gd_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 delta_swing_tssi_table_2g_cck_a[DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_2g_cck_b[DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_2g_cck_c[DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_2g_cck_d[DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_2ga[DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_2gb[DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_2gc[DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_2gd[DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_5ga[BAND_NUM][DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_5gb[BAND_NUM][DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_5gc[BAND_NUM][DELTA_SWINTSSI_SIZE];
- u8 delta_swing_tssi_table_5gd[BAND_NUM][DELTA_SWINTSSI_SIZE];
- s8 delta_swing_table_xtal_p[DELTA_SWINGIDX_SIZE];
- s8 delta_swing_table_xtal_n[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2ga_p_8188e[DELTA_SWINGIDX_SIZE];
- u8 delta_swing_table_idx_2ga_n_8188e[DELTA_SWINGIDX_SIZE];
-
- u8 bb_swing_idx_ofdm[MAX_RF_PATH];
- u8 bb_swing_idx_ofdm_current;
- u8 bb_swing_idx_ofdm_base[MAX_RF_PATH];
- bool default_bb_swing_index_flag;
- bool bb_swing_flag_ofdm;
- u8 bb_swing_idx_cck;
- u8 bb_swing_idx_cck_current;
- u8 bb_swing_idx_cck_base;
- u8 default_ofdm_index;
- u8 default_cck_index;
- bool bb_swing_flag_cck;
-
- s8 absolute_ofdm_swing_idx[MAX_RF_PATH];
- s8 remnant_ofdm_swing_idx[MAX_RF_PATH];
- s8 absolute_cck_swing_idx[MAX_RF_PATH];
- s8 remnant_cck_swing_idx;
- s8 modify_tx_agc_value; /*Remnat compensate value at tx_agc */
- bool modify_tx_agc_flag_path_a;
- bool modify_tx_agc_flag_path_b;
- bool modify_tx_agc_flag_path_c;
- bool modify_tx_agc_flag_path_d;
- bool modify_tx_agc_flag_path_a_cck;
-
- s8 kfree_offset[MAX_RF_PATH];
-
- /* ------------------------------------------------------------------ */
-
- /* for IQK */
- u32 regc04;
- u32 reg874;
- u32 regc08;
- u32 regb68;
- u32 regb6c;
- u32 reg870;
- u32 reg860;
- u32 reg864;
-
- bool is_iqk_initialized;
- bool is_lck_in_progress;
- bool is_antenna_detected;
- bool is_need_iqk;
- bool is_iqk_in_progress;
- bool is_iqk_pa_off;
- u8 delta_iqk;
- u32 ADDA_backup[IQK_ADDA_REG_NUM];
- u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
- u32 IQK_BB_backup_recover[9];
- /* { {S1: 0xc94, 0xc80, 0xc4c} , {S0: 0xc9c, 0xc88, 0xc4c}} */
- u32 IQK_BB_backup[IQK_BB_REG_NUM];
- u32 tx_iqc_8723b[2][3][2];
- /* { {S1: 0xc14, 0xca0} , {S0: 0xc14, 0xca0}} */
- u32 rx_iqc_8723b[2][2][2];
- /* { {S1: 0xc94, 0xc80, 0xc4c} , {S0: 0xc9c, 0xc88, 0xc4c}}*/
- u32 tx_iqc_8703b[3][2];
- /* { {S1: 0xc14, 0xca0} , {S0: 0xc14, 0xca0}}*/
- u32 rx_iqc_8703b[2][2];
- /* { {S1: 0xc94, 0xc80, 0xc4c} , {S0: 0xc9c, 0xc88, 0xc4c}}*/
- u32 tx_iqc_8723d[2][3][2];
- /* { {S1: 0xc14, 0xca0} , {S0: 0xc14, 0xca0}}*/
- u32 rx_iqc_8723d[2][2][2];
- /* JJ ADD 20161014 */
- /* { {S1: 0xc94, 0xc80, 0xc4c} , {S0: 0xc9c, 0xc88, 0xc4c}}*/
- u32 tx_iqc_8710b[2][3][2];
- /* { {S1: 0xc14, 0xca0} , {S0: 0xc14, 0xca0}}*/
- u32 rx_iqc_8710b[2][2][2];
-
- u8 iqk_step;
- u8 kcount;
- u8 retry_count[4][2]; /* [4]: path ABCD, [2] TXK, RXK */
- bool is_mp_mode;
-
- /* <James> IQK time measurement */
- u64 iqk_start_time;
- u64 iqk_progressing_time;
- u64 iqk_total_progressing_time;
-
- u32 lok_result;
-
- /* for APK */
- u32 ap_koutput[2][2]; /* path A/B; output1_1a/output1_2a */
- u8 is_ap_kdone;
- u8 is_apk_thermal_meter_ignore;
-
- /* DPK */
- bool is_dpk_fail;
- u8 is_dp_done;
- u8 is_dp_path_aok;
- u8 is_dp_path_bok;
-
- u32 tx_lok[2];
- u32 dpk_tx_agc;
- s32 dpk_gain;
- u32 dpk_thermal[4];
- s8 modify_tx_agc_value_ofdm;
- s8 modify_tx_agc_value_cck;
-
- /*Add by Yuchen for Kfree Phydm*/
- u8 reg_rf_kfree_enable; /*for registry*/
- u8 rf_kfree_enable; /*for efuse enable check*/
-};
-
-void odm_txpowertracking_check(void *dm_void);
-
-void odm_txpowertracking_init(void *dm_void);
-
-void odm_txpowertracking_check_ap(void *dm_void);
-
-void odm_txpowertracking_thermal_meter_init(void *dm_void);
-
-void odm_txpowertracking_init(void *dm_void);
-
-void odm_txpowertracking_check_mp(void *dm_void);
-
-void odm_txpowertracking_check_ce(void *dm_void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_pre_define.h b/drivers/staging/rtlwifi/phydm/phydm_pre_define.h
deleted file mode 100644
index ce9a076b32cb..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_pre_define.h
+++ /dev/null
@@ -1,602 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMPREDEFINE_H__
-#define __PHYDMPREDEFINE_H__
-
-/* 1 ============================================================
- * 1 Definition
- * 1 ============================================================
- */
-
-#define PHYDM_CODE_BASE "PHYDM_TRUNK"
-#define PHYDM_RELEASE_DATE "00000000"
-
-/* Max path of IC */
-#define MAX_PATH_NUM_8188E 1
-#define MAX_PATH_NUM_8192E 2
-#define MAX_PATH_NUM_8723B 1
-#define MAX_PATH_NUM_8812A 2
-#define MAX_PATH_NUM_8821A 1
-#define MAX_PATH_NUM_8814A 4
-#define MAX_PATH_NUM_8822B 2
-#define MAX_PATH_NUM_8821B 2
-#define MAX_PATH_NUM_8703B 1
-#define MAX_PATH_NUM_8188F 1
-#define MAX_PATH_NUM_8723D 1
-#define MAX_PATH_NUM_8197F 2
-#define MAX_PATH_NUM_8821C 1
-/* JJ ADD 20161014 */
-#define MAX_PATH_NUM_8710B 1
-
-/* Max RF path */
-#define ODM_RF_PATH_MAX 2
-#define ODM_RF_PATH_MAX_JAGUAR 4
-
-/*Bit define path*/
-#define PHYDM_A BIT(0)
-#define PHYDM_B BIT(1)
-#define PHYDM_C BIT(2)
-#define PHYDM_D BIT(3)
-#define PHYDM_AB (BIT(0) | BIT(1))
-#define PHYDM_AC (BIT(0) | BIT(2))
-#define PHYDM_AD (BIT(0) | BIT(3))
-#define PHYDM_BC (BIT(1) | BIT(2))
-#define PHYDM_BD (BIT(1) | BIT(3))
-#define PHYDM_CD (BIT(2) | BIT(3))
-#define PHYDM_ABC (BIT(0) | BIT(1) | BIT(2))
-#define PHYDM_ABD (BIT(0) | BIT(1) | BIT(3))
-#define PHYDM_ACD (BIT(0) | BIT(2) | BIT(3))
-#define PHYDM_BCD (BIT(1) | BIT(2) | BIT(3))
-#define PHYDM_ABCD (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-
-/* number of entry */
-/* defined in wifi.h (32+1) */
-#define ODM_ASSOCIATE_ENTRY_NUM ASSOCIATE_ENTRY_NUM
-
-#define RX_SMOOTH_FACTOR 20
-
-/* -----MGN rate--------------------------------- */
-
-enum ODM_MGN_RATE {
- ODM_MGN_1M = 0x02,
- ODM_MGN_2M = 0x04,
- ODM_MGN_5_5M = 0x0B,
- ODM_MGN_6M = 0x0C,
- ODM_MGN_9M = 0x12,
- ODM_MGN_11M = 0x16,
- ODM_MGN_12M = 0x18,
- ODM_MGN_18M = 0x24,
- ODM_MGN_24M = 0x30,
- ODM_MGN_36M = 0x48,
- ODM_MGN_48M = 0x60,
- ODM_MGN_54M = 0x6C,
- ODM_MGN_MCS32 = 0x7F,
- ODM_MGN_MCS0,
- ODM_MGN_MCS1,
- ODM_MGN_MCS2,
- ODM_MGN_MCS3,
- ODM_MGN_MCS4,
- ODM_MGN_MCS5,
- ODM_MGN_MCS6,
- ODM_MGN_MCS7,
- ODM_MGN_MCS8,
- ODM_MGN_MCS9,
- ODM_MGN_MCS10,
- ODM_MGN_MCS11,
- ODM_MGN_MCS12,
- ODM_MGN_MCS13,
- ODM_MGN_MCS14,
- ODM_MGN_MCS15,
- ODM_MGN_MCS16,
- ODM_MGN_MCS17,
- ODM_MGN_MCS18,
- ODM_MGN_MCS19,
- ODM_MGN_MCS20,
- ODM_MGN_MCS21,
- ODM_MGN_MCS22,
- ODM_MGN_MCS23,
- ODM_MGN_MCS24,
- ODM_MGN_MCS25,
- ODM_MGN_MCS26,
- ODM_MGN_MCS27,
- ODM_MGN_MCS28,
- ODM_MGN_MCS29,
- ODM_MGN_MCS30,
- ODM_MGN_MCS31,
- ODM_MGN_VHT1SS_MCS0,
- ODM_MGN_VHT1SS_MCS1,
- ODM_MGN_VHT1SS_MCS2,
- ODM_MGN_VHT1SS_MCS3,
- ODM_MGN_VHT1SS_MCS4,
- ODM_MGN_VHT1SS_MCS5,
- ODM_MGN_VHT1SS_MCS6,
- ODM_MGN_VHT1SS_MCS7,
- ODM_MGN_VHT1SS_MCS8,
- ODM_MGN_VHT1SS_MCS9,
- ODM_MGN_VHT2SS_MCS0,
- ODM_MGN_VHT2SS_MCS1,
- ODM_MGN_VHT2SS_MCS2,
- ODM_MGN_VHT2SS_MCS3,
- ODM_MGN_VHT2SS_MCS4,
- ODM_MGN_VHT2SS_MCS5,
- ODM_MGN_VHT2SS_MCS6,
- ODM_MGN_VHT2SS_MCS7,
- ODM_MGN_VHT2SS_MCS8,
- ODM_MGN_VHT2SS_MCS9,
- ODM_MGN_VHT3SS_MCS0,
- ODM_MGN_VHT3SS_MCS1,
- ODM_MGN_VHT3SS_MCS2,
- ODM_MGN_VHT3SS_MCS3,
- ODM_MGN_VHT3SS_MCS4,
- ODM_MGN_VHT3SS_MCS5,
- ODM_MGN_VHT3SS_MCS6,
- ODM_MGN_VHT3SS_MCS7,
- ODM_MGN_VHT3SS_MCS8,
- ODM_MGN_VHT3SS_MCS9,
- ODM_MGN_VHT4SS_MCS0,
- ODM_MGN_VHT4SS_MCS1,
- ODM_MGN_VHT4SS_MCS2,
- ODM_MGN_VHT4SS_MCS3,
- ODM_MGN_VHT4SS_MCS4,
- ODM_MGN_VHT4SS_MCS5,
- ODM_MGN_VHT4SS_MCS6,
- ODM_MGN_VHT4SS_MCS7,
- ODM_MGN_VHT4SS_MCS8,
- ODM_MGN_VHT4SS_MCS9,
- ODM_MGN_UNKNOWN
-};
-
-#define ODM_MGN_MCS0_SG 0xc0
-#define ODM_MGN_MCS1_SG 0xc1
-#define ODM_MGN_MCS2_SG 0xc2
-#define ODM_MGN_MCS3_SG 0xc3
-#define ODM_MGN_MCS4_SG 0xc4
-#define ODM_MGN_MCS5_SG 0xc5
-#define ODM_MGN_MCS6_SG 0xc6
-#define ODM_MGN_MCS7_SG 0xc7
-#define ODM_MGN_MCS8_SG 0xc8
-#define ODM_MGN_MCS9_SG 0xc9
-#define ODM_MGN_MCS10_SG 0xca
-#define ODM_MGN_MCS11_SG 0xcb
-#define ODM_MGN_MCS12_SG 0xcc
-#define ODM_MGN_MCS13_SG 0xcd
-#define ODM_MGN_MCS14_SG 0xce
-#define ODM_MGN_MCS15_SG 0xcf
-
-/* -----DESC rate--------------------------------- */
-
-#define ODM_RATEMCS15_SG 0x1c
-#define ODM_RATEMCS32 0x20
-
-/* CCK Rates, TxHT = 0 */
-#define ODM_RATE1M 0x00
-#define ODM_RATE2M 0x01
-#define ODM_RATE5_5M 0x02
-#define ODM_RATE11M 0x03
-/* OFDM Rates, TxHT = 0 */
-#define ODM_RATE6M 0x04
-#define ODM_RATE9M 0x05
-#define ODM_RATE12M 0x06
-#define ODM_RATE18M 0x07
-#define ODM_RATE24M 0x08
-#define ODM_RATE36M 0x09
-#define ODM_RATE48M 0x0A
-#define ODM_RATE54M 0x0B
-/* MCS Rates, TxHT = 1 */
-#define ODM_RATEMCS0 0x0C
-#define ODM_RATEMCS1 0x0D
-#define ODM_RATEMCS2 0x0E
-#define ODM_RATEMCS3 0x0F
-#define ODM_RATEMCS4 0x10
-#define ODM_RATEMCS5 0x11
-#define ODM_RATEMCS6 0x12
-#define ODM_RATEMCS7 0x13
-#define ODM_RATEMCS8 0x14
-#define ODM_RATEMCS9 0x15
-#define ODM_RATEMCS10 0x16
-#define ODM_RATEMCS11 0x17
-#define ODM_RATEMCS12 0x18
-#define ODM_RATEMCS13 0x19
-#define ODM_RATEMCS14 0x1A
-#define ODM_RATEMCS15 0x1B
-#define ODM_RATEMCS16 0x1C
-#define ODM_RATEMCS17 0x1D
-#define ODM_RATEMCS18 0x1E
-#define ODM_RATEMCS19 0x1F
-#define ODM_RATEMCS20 0x20
-#define ODM_RATEMCS21 0x21
-#define ODM_RATEMCS22 0x22
-#define ODM_RATEMCS23 0x23
-#define ODM_RATEMCS24 0x24
-#define ODM_RATEMCS25 0x25
-#define ODM_RATEMCS26 0x26
-#define ODM_RATEMCS27 0x27
-#define ODM_RATEMCS28 0x28
-#define ODM_RATEMCS29 0x29
-#define ODM_RATEMCS30 0x2A
-#define ODM_RATEMCS31 0x2B
-#define ODM_RATEVHTSS1MCS0 0x2C
-#define ODM_RATEVHTSS1MCS1 0x2D
-#define ODM_RATEVHTSS1MCS2 0x2E
-#define ODM_RATEVHTSS1MCS3 0x2F
-#define ODM_RATEVHTSS1MCS4 0x30
-#define ODM_RATEVHTSS1MCS5 0x31
-#define ODM_RATEVHTSS1MCS6 0x32
-#define ODM_RATEVHTSS1MCS7 0x33
-#define ODM_RATEVHTSS1MCS8 0x34
-#define ODM_RATEVHTSS1MCS9 0x35
-#define ODM_RATEVHTSS2MCS0 0x36
-#define ODM_RATEVHTSS2MCS1 0x37
-#define ODM_RATEVHTSS2MCS2 0x38
-#define ODM_RATEVHTSS2MCS3 0x39
-#define ODM_RATEVHTSS2MCS4 0x3A
-#define ODM_RATEVHTSS2MCS5 0x3B
-#define ODM_RATEVHTSS2MCS6 0x3C
-#define ODM_RATEVHTSS2MCS7 0x3D
-#define ODM_RATEVHTSS2MCS8 0x3E
-#define ODM_RATEVHTSS2MCS9 0x3F
-#define ODM_RATEVHTSS3MCS0 0x40
-#define ODM_RATEVHTSS3MCS1 0x41
-#define ODM_RATEVHTSS3MCS2 0x42
-#define ODM_RATEVHTSS3MCS3 0x43
-#define ODM_RATEVHTSS3MCS4 0x44
-#define ODM_RATEVHTSS3MCS5 0x45
-#define ODM_RATEVHTSS3MCS6 0x46
-#define ODM_RATEVHTSS3MCS7 0x47
-#define ODM_RATEVHTSS3MCS8 0x48
-#define ODM_RATEVHTSS3MCS9 0x49
-#define ODM_RATEVHTSS4MCS0 0x4A
-#define ODM_RATEVHTSS4MCS1 0x4B
-#define ODM_RATEVHTSS4MCS2 0x4C
-#define ODM_RATEVHTSS4MCS3 0x4D
-#define ODM_RATEVHTSS4MCS4 0x4E
-#define ODM_RATEVHTSS4MCS5 0x4F
-#define ODM_RATEVHTSS4MCS6 0x50
-#define ODM_RATEVHTSS4MCS7 0x51
-#define ODM_RATEVHTSS4MCS8 0x52
-#define ODM_RATEVHTSS4MCS9 0x53
-
-#define ODM_NUM_RATE_IDX (ODM_RATEVHTSS4MCS9 + 1)
-
-/* 1 ============================================================
- * 1 enumeration
- * 1 ============================================================
- */
-
-/* ODM_CMNINFO_INTERFACE */
-enum odm_interface {
- ODM_ITRF_PCIE = 0x1,
- ODM_ITRF_USB = 0x2,
- ODM_ITRF_SDIO = 0x4,
- ODM_ITRF_ALL = 0x7,
-};
-
-/* ODM_CMNINFO_IC_TYPE */
-enum odm_ic_type {
- ODM_RTL8188E = BIT(0),
- ODM_RTL8812 = BIT(1),
- ODM_RTL8821 = BIT(2),
- ODM_RTL8192E = BIT(3),
- ODM_RTL8723B = BIT(4),
- ODM_RTL8814A = BIT(5),
- ODM_RTL8881A = BIT(6),
- ODM_RTL8822B = BIT(7),
- ODM_RTL8703B = BIT(8),
- ODM_RTL8195A = BIT(9),
- ODM_RTL8188F = BIT(10),
- ODM_RTL8723D = BIT(11),
- ODM_RTL8197F = BIT(12),
- ODM_RTL8821C = BIT(13),
- ODM_RTL8814B = BIT(14),
- ODM_RTL8198F = BIT(15),
- /* JJ ADD 20161014 */
- ODM_RTL8710B = BIT(16),
-};
-
-/* JJ ADD 20161014 */
-#define ODM_IC_1SS \
- (ODM_RTL8188E | ODM_RTL8188F | ODM_RTL8723B | ODM_RTL8703B | \
- ODM_RTL8723D | ODM_RTL8881A | ODM_RTL8821 | ODM_RTL8821C | \
- ODM_RTL8195A | ODM_RTL8710B)
-#define ODM_IC_2SS (ODM_RTL8192E | ODM_RTL8197F | ODM_RTL8812 | ODM_RTL8822B)
-#define ODM_IC_3SS (ODM_RTL8814A)
-#define ODM_IC_4SS (ODM_RTL8814B | ODM_RTL8198F)
-
-/* JJ ADD 20161014 */
-#define ODM_IC_11N_SERIES \
- (ODM_RTL8188E | ODM_RTL8192E | ODM_RTL8723B | ODM_RTL8703B | \
- ODM_RTL8188F | ODM_RTL8723D | ODM_RTL8197F | ODM_RTL8710B)
-#define ODM_IC_11AC_SERIES \
- (ODM_RTL8812 | ODM_RTL8821 | ODM_RTL8814A | ODM_RTL8881A | \
- ODM_RTL8822B | ODM_RTL8821C)
-#define ODM_IC_11AC_1_SERIES (ODM_RTL8812 | ODM_RTL8821 | ODM_RTL8881A)
-#define ODM_IC_11AC_2_SERIES (ODM_RTL8814A | ODM_RTL8822B | ODM_RTL8821C)
-#define ODM_IC_TXBF_SUPPORT \
- (ODM_RTL8192E | ODM_RTL8812 | ODM_RTL8821 | ODM_RTL8814A | \
- ODM_RTL8881A | ODM_RTL8822B | ODM_RTL8197F | ODM_RTL8821C)
-#define ODM_IC_11N_GAIN_IDX_EDCCA \
- (ODM_RTL8195A | ODM_RTL8703B | ODM_RTL8188F | ODM_RTL8723D | \
- ODM_RTL8197F | ODM_RTL8710B)
-#define ODM_IC_11AC_GAIN_IDX_EDCCA (ODM_RTL8814A | ODM_RTL8822B | ODM_RTL8821C)
-#define ODM_IC_PHY_STATUE_NEW_TYPE \
- (ODM_RTL8197F | ODM_RTL8822B | ODM_RTL8723D | ODM_RTL8821C | \
- ODM_RTL8710B)
-
-#define PHYDM_IC_8051_SERIES \
- (ODM_RTL8881A | ODM_RTL8812 | ODM_RTL8821 | ODM_RTL8188E | \
- ODM_RTL8192E | ODM_RTL8723B | ODM_RTL8703B | ODM_RTL8188F)
-#define PHYDM_IC_3081_SERIES \
- (ODM_RTL8814A | ODM_RTL8822B | ODM_RTL8197F | ODM_RTL8821C)
-
-#define PHYDM_IC_SUPPORT_LA_MODE \
- (ODM_RTL8814A | ODM_RTL8822B | ODM_RTL8197F | ODM_RTL8821C)
-
-/* JJ ADD 20161014 */
-
-/* ODM_CMNINFO_CUT_VER */
-enum odm_cut_version {
- ODM_CUT_A = 0,
- ODM_CUT_B = 1,
- ODM_CUT_C = 2,
- ODM_CUT_D = 3,
- ODM_CUT_E = 4,
- ODM_CUT_F = 5,
-
- ODM_CUT_I = 8,
- ODM_CUT_J = 9,
- ODM_CUT_K = 10,
- ODM_CUT_TEST = 15,
-};
-
-/* ODM_CMNINFO_FAB_VER */
-enum odm_fab {
- ODM_TSMC = 0,
- ODM_UMC = 1,
-};
-
-/* ODM_CMNINFO_RF_TYPE
- *
- * For example 1T2R (A+AB = BIT(0)|BIT(4)|BIT(5))
- */
-enum odm_rf_path {
- ODM_RF_A = BIT(0),
- ODM_RF_B = BIT(1),
- ODM_RF_C = BIT(2),
- ODM_RF_D = BIT(3),
-};
-
-enum odm_rf_tx_num {
- ODM_1T = 1,
- ODM_2T = 2,
- ODM_3T = 3,
- ODM_4T = 4,
-};
-
-enum odm_rf_type {
- ODM_1T1R,
- ODM_1T2R,
- ODM_2T2R,
- ODM_2T2R_GREEN,
- ODM_2T3R,
- ODM_2T4R,
- ODM_3T3R,
- ODM_3T4R,
- ODM_4T4R,
- ODM_XTXR
-};
-
-enum odm_mac_phy_mode {
- ODM_SMSP = 0,
- ODM_DMSP = 1,
- ODM_DMDP = 2,
-};
-
-enum odm_bt_coexist {
- ODM_BT_BUSY = 1,
- ODM_BT_ON = 2,
- ODM_BT_OFF = 3,
- ODM_BT_NONE = 4,
-};
-
-/* ODM_CMNINFO_OP_MODE */
-enum odm_operation_mode {
- ODM_NO_LINK = BIT(0),
- ODM_LINK = BIT(1),
- ODM_SCAN = BIT(2),
- ODM_POWERSAVE = BIT(3),
- ODM_AP_MODE = BIT(4),
- ODM_CLIENT_MODE = BIT(5),
- ODM_AD_HOC = BIT(6),
- ODM_WIFI_DIRECT = BIT(7),
- ODM_WIFI_DISPLAY = BIT(8),
-};
-
-/* ODM_CMNINFO_WM_MODE */
-enum odm_wireless_mode {
- ODM_WM_UNKNOWN = 0x0,
- ODM_WM_B = BIT(0),
- ODM_WM_G = BIT(1),
- ODM_WM_A = BIT(2),
- ODM_WM_N24G = BIT(3),
- ODM_WM_N5G = BIT(4),
- ODM_WM_AUTO = BIT(5),
- ODM_WM_AC = BIT(6),
-};
-
-/* ODM_CMNINFO_BAND */
-enum odm_band_type {
- ODM_BAND_2_4G = 0,
- ODM_BAND_5G,
- ODM_BAND_ON_BOTH,
- ODM_BANDMAX
-};
-
-/* ODM_CMNINFO_SEC_CHNL_OFFSET */
-enum phydm_sec_chnl_offset {
- PHYDM_DONT_CARE = 0,
- PHYDM_BELOW = 1,
- PHYDM_ABOVE = 2
-};
-
-/* ODM_CMNINFO_SEC_MODE */
-enum odm_security {
- ODM_SEC_OPEN = 0,
- ODM_SEC_WEP40 = 1,
- ODM_SEC_TKIP = 2,
- ODM_SEC_RESERVE = 3,
- ODM_SEC_AESCCMP = 4,
- ODM_SEC_WEP104 = 5,
- ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
- ODM_SEC_SMS4 = 7,
-};
-
-/* ODM_CMNINFO_BW */
-enum odm_bw {
- ODM_BW20M = 0,
- ODM_BW40M = 1,
- ODM_BW80M = 2,
- ODM_BW160M = 3,
- ODM_BW5M = 4,
- ODM_BW10M = 5,
- ODM_BW_MAX = 6
-};
-
-/* ODM_CMNINFO_CHNL */
-
-/* ODM_CMNINFO_BOARD_TYPE */
-enum odm_board_type {
- ODM_BOARD_DEFAULT = 0, /* The DEFAULT case. */
- ODM_BOARD_MINICARD = BIT(0), /* 0 = non-mini card, 1= mini card. */
- ODM_BOARD_SLIM = BIT(1), /* 0 = non-slim card, 1 = slim card */
- ODM_BOARD_BT = BIT(2), /* 0 = without BT card, 1 = with BT */
- ODM_BOARD_EXT_PA =
- BIT(3), /* 0 = no 2G ext-PA, 1 = existing 2G ext-PA */
- ODM_BOARD_EXT_LNA =
- BIT(4), /* 0 = no 2G ext-LNA, 1 = existing 2G ext-LNA */
- ODM_BOARD_EXT_TRSW =
- BIT(5), /* 0 = no ext-TRSW, 1 = existing ext-TRSW */
- ODM_BOARD_EXT_PA_5G =
- BIT(6), /* 0 = no 5G ext-PA, 1 = existing 5G ext-PA */
- ODM_BOARD_EXT_LNA_5G =
- BIT(7), /* 0 = no 5G ext-LNA, 1 = existing 5G ext-LNA */
-};
-
-enum odm_package_type {
- ODM_PACKAGE_DEFAULT = 0,
- ODM_PACKAGE_QFN68 = BIT(0),
- ODM_PACKAGE_TFBGA90 = BIT(1),
- ODM_PACKAGE_TFBGA79 = BIT(2),
-};
-
-enum odm_type_gpa {
- TYPE_GPA0 = 0x0000,
- TYPE_GPA1 = 0x0055,
- TYPE_GPA2 = 0x00AA,
- TYPE_GPA3 = 0x00FF,
- TYPE_GPA4 = 0x5500,
- TYPE_GPA5 = 0x5555,
- TYPE_GPA6 = 0x55AA,
- TYPE_GPA7 = 0x55FF,
- TYPE_GPA8 = 0xAA00,
- TYPE_GPA9 = 0xAA55,
- TYPE_GPA10 = 0xAAAA,
- TYPE_GPA11 = 0xAAFF,
- TYPE_GPA12 = 0xFF00,
- TYPE_GPA13 = 0xFF55,
- TYPE_GPA14 = 0xFFAA,
- TYPE_GPA15 = 0xFFFF,
-};
-
-enum odm_type_apa {
- TYPE_APA0 = 0x0000,
- TYPE_APA1 = 0x0055,
- TYPE_APA2 = 0x00AA,
- TYPE_APA3 = 0x00FF,
- TYPE_APA4 = 0x5500,
- TYPE_APA5 = 0x5555,
- TYPE_APA6 = 0x55AA,
- TYPE_APA7 = 0x55FF,
- TYPE_APA8 = 0xAA00,
- TYPE_APA9 = 0xAA55,
- TYPE_APA10 = 0xAAAA,
- TYPE_APA11 = 0xAAFF,
- TYPE_APA12 = 0xFF00,
- TYPE_APA13 = 0xFF55,
- TYPE_APA14 = 0xFFAA,
- TYPE_APA15 = 0xFFFF,
-};
-
-enum odm_type_glna {
- TYPE_GLNA0 = 0x0000,
- TYPE_GLNA1 = 0x0055,
- TYPE_GLNA2 = 0x00AA,
- TYPE_GLNA3 = 0x00FF,
- TYPE_GLNA4 = 0x5500,
- TYPE_GLNA5 = 0x5555,
- TYPE_GLNA6 = 0x55AA,
- TYPE_GLNA7 = 0x55FF,
- TYPE_GLNA8 = 0xAA00,
- TYPE_GLNA9 = 0xAA55,
- TYPE_GLNA10 = 0xAAAA,
- TYPE_GLNA11 = 0xAAFF,
- TYPE_GLNA12 = 0xFF00,
- TYPE_GLNA13 = 0xFF55,
- TYPE_GLNA14 = 0xFFAA,
- TYPE_GLNA15 = 0xFFFF,
-};
-
-enum odm_type_alna {
- TYPE_ALNA0 = 0x0000,
- TYPE_ALNA1 = 0x0055,
- TYPE_ALNA2 = 0x00AA,
- TYPE_ALNA3 = 0x00FF,
- TYPE_ALNA4 = 0x5500,
- TYPE_ALNA5 = 0x5555,
- TYPE_ALNA6 = 0x55AA,
- TYPE_ALNA7 = 0x55FF,
- TYPE_ALNA8 = 0xAA00,
- TYPE_ALNA9 = 0xAA55,
- TYPE_ALNA10 = 0xAAAA,
- TYPE_ALNA11 = 0xAAFF,
- TYPE_ALNA12 = 0xFF00,
- TYPE_ALNA13 = 0xFF55,
- TYPE_ALNA14 = 0xFFAA,
- TYPE_ALNA15 = 0xFFFF,
-};
-
-enum odm_rf_radio_path {
- ODM_RF_PATH_A = 0, /* Radio path A */
- ODM_RF_PATH_B = 1, /* Radio path B */
- ODM_RF_PATH_C = 2, /* Radio path C */
- ODM_RF_PATH_D = 3, /* Radio path D */
- ODM_RF_PATH_AB,
- ODM_RF_PATH_AC,
- ODM_RF_PATH_AD,
- ODM_RF_PATH_BC,
- ODM_RF_PATH_BD,
- ODM_RF_PATH_CD,
- ODM_RF_PATH_ABC,
- ODM_RF_PATH_ACD,
- ODM_RF_PATH_BCD,
- ODM_RF_PATH_ABCD,
- /* ODM_RF_PATH_MAX, */ /* Max RF number 90 support */
-};
-
-enum odm_parameter_init {
- ODM_PRE_SETTING = 0,
- ODM_POST_SETTING = 1,
-};
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_precomp.h b/drivers/staging/rtlwifi/phydm/phydm_precomp.h
deleted file mode 100644
index 39988d532340..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_precomp.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __ODM_PRECOMP_H__
-#define __ODM_PRECOMP_H__
-
-#include "phydm_types.h"
-
-/* 2 Config Flags and Structs - defined by each ODM type */
-
-#include "../wifi.h"
-#include "rtl_phydm.h"
-
-/* 2 OutSrc Header Files */
-
-#include "phydm.h"
-#include "phydm_hwconfig.h"
-#include "phydm_debug.h"
-#include "phydm_regdefine11ac.h"
-#include "phydm_regdefine11n.h"
-#include "phydm_interface.h"
-#include "phydm_reg.h"
-
-#include "phydm_adc_sampling.h"
-
-/* JJ ADD 20161014 */
-
-#include "../halmac/halmac_reg2.h"
-
-#define LDPC_HT_ENABLE_RX BIT(0)
-#define LDPC_HT_ENABLE_TX BIT(1)
-#define LDPC_HT_TEST_TX_ENABLE BIT(2)
-#define LDPC_HT_CAP_TX BIT(3)
-
-#define STBC_HT_ENABLE_RX BIT(0)
-#define STBC_HT_ENABLE_TX BIT(1)
-#define STBC_HT_TEST_TX_ENABLE BIT(2)
-#define STBC_HT_CAP_TX BIT(3)
-
-#define LDPC_VHT_ENABLE_RX BIT(0)
-#define LDPC_VHT_ENABLE_TX BIT(1)
-#define LDPC_VHT_TEST_TX_ENABLE BIT(2)
-#define LDPC_VHT_CAP_TX BIT(3)
-
-#define STBC_VHT_ENABLE_RX BIT(0)
-#define STBC_VHT_ENABLE_TX BIT(1)
-#define STBC_VHT_TEST_TX_ENABLE BIT(2)
-#define STBC_VHT_CAP_TX BIT(3)
-
-#include "rtl8822b/halhwimg8822b_mac.h"
-#include "rtl8822b/halhwimg8822b_rf.h"
-#include "rtl8822b/halhwimg8822b_bb.h"
-#include "rtl8822b/phydm_regconfig8822b.h"
-#include "rtl8822b/halphyrf_8822b.h"
-#include "rtl8822b/phydm_rtl8822b.h"
-#include "rtl8822b/phydm_hal_api8822b.h"
-#include "rtl8822b/version_rtl8822b.h"
-
-#include "../halmac/halmac_reg_8822b.h"
-
-/* JJ ADD 20161014 */
-
-#endif /* __ODM_PRECOMP_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/phydm_psd.c b/drivers/staging/rtlwifi/phydm/phydm_psd.c
deleted file mode 100644
index c93d871f1eb6..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_psd.c
+++ /dev/null
@@ -1,406 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/*============================================================
- * include files
- *============================================================
- */
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-u32 phydm_get_psd_data(void *dm_void, u32 psd_tone_idx, u32 igi)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct psd_info *dm_psd_table = &dm->dm_psd_table;
- u32 psd_report = 0;
-
- odm_set_bb_reg(dm, dm_psd_table->psd_reg, 0x3ff, psd_tone_idx);
-
- odm_set_bb_reg(dm, dm_psd_table->psd_reg, BIT(22),
- 1); /*PSD trigger start*/
- ODM_delay_us(10);
- odm_set_bb_reg(dm, dm_psd_table->psd_reg, BIT(22),
- 0); /*PSD trigger stop*/
-
- psd_report = odm_get_bb_reg(dm, dm_psd_table->psd_report_reg, 0xffff);
- psd_report = odm_convert_to_db(psd_report) + igi;
-
- return psd_report;
-}
-
-static u8 phydm_psd_stop_trx(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u32 i;
- u8 trx_idle_success = false;
- u32 dbg_port_value = 0;
-
- /*[Stop TRX]----------------------------------------------------------*/
- if (!phydm_set_bb_dbg_port(dm, BB_DBGPORT_PRIORITY_3,
- 0x0)) /*set debug port to 0x0*/
- return STOP_TRX_FAIL;
-
- for (i = 0; i < 10000; i++) {
- dbg_port_value = phydm_get_bb_dbg_port_value(dm);
- if ((dbg_port_value & (BIT(17) | BIT(3))) ==
- 0) /* PHYTXON && CCA_all */ {
- ODM_RT_TRACE(dm, ODM_COMP_API,
- "PSD wait for ((%d)) times\n", i);
-
- trx_idle_success = true;
- break;
- }
- }
-
- if (trx_idle_success) {
- /*pause all TX queue*/
- odm_set_bb_reg(dm, 0x520, 0xff0000, 0xff);
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- /*disable CCK block*/
- odm_set_bb_reg(dm, 0x808, BIT(28), 0);
- /*disable OFDM RX CCA*/
- odm_set_bb_reg(dm, 0x838, BIT(1), 1);
- } else {
- /*TBD*/
- /* disable whole CCK block */
- odm_set_bb_reg(dm, 0x800, BIT(24), 0);
- /*[ Set IQK Matrix = 0 ] equivalent to [ Turn off CCA]*/
- odm_set_bb_reg(dm, 0xC14, MASKDWORD, 0x0);
- }
-
- } else {
- return STOP_TRX_FAIL;
- }
-
- phydm_release_bb_dbg_port(dm);
-
- return STOP_TRX_SUCCESS;
-}
-
-static u8 psd_result_cali_tone_8821[7] = {21, 28, 33, 93, 98, 105, 127};
-static u8 psd_result_cali_val_8821[7] = {67, 69, 71, 72, 71, 69, 67};
-
-void phydm_psd(void *dm_void, u32 igi, u16 start_point, u16 stop_point)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct psd_info *dm_psd_table = &dm->dm_psd_table;
- u32 i = 0, mod_tone_idx;
- u32 t = 0;
- u16 fft_max_half_bw;
- u32 psd_igi_a_reg;
- u32 psd_igi_b_reg;
- u16 psd_fc_channel = dm_psd_table->psd_fc_channel;
- u8 ag_rf_mode_reg = 0;
- u8 rf_reg18_9_8 = 0;
- u32 psd_result_tmp = 0;
- u8 psd_result = 0;
- u8 psd_result_cali_tone[7] = {0};
- u8 psd_result_cali_val[7] = {0};
- u8 noise_table_idx = 0;
-
- if (dm->support_ic_type == ODM_RTL8821) {
- odm_move_memory(dm, psd_result_cali_tone,
- psd_result_cali_tone_8821, 7);
- odm_move_memory(dm, psd_result_cali_val,
- psd_result_cali_val_8821, 7);
- }
-
- dm_psd_table->psd_in_progress = 1;
-
- /*[Stop DIG]*/
- dm->support_ability &= ~(ODM_BB_DIG);
- dm->support_ability &= ~(ODM_BB_FA_CNT);
-
- ODM_RT_TRACE(dm, ODM_COMP_API, "PSD Start =>\n");
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- psd_igi_a_reg = 0xc50;
- psd_igi_b_reg = 0xe50;
- } else {
- psd_igi_a_reg = 0xc50;
- psd_igi_b_reg = 0xc58;
- }
-
- /*[back up IGI]*/
- dm_psd_table->initial_gain_backup =
- odm_get_bb_reg(dm, psd_igi_a_reg, 0xff);
- odm_set_bb_reg(dm, psd_igi_a_reg, 0xff,
- 0x6e); /*IGI target at 0dBm & make it can't CCA*/
- odm_set_bb_reg(dm, psd_igi_b_reg, 0xff,
- 0x6e); /*IGI target at 0dBm & make it can't CCA*/
- ODM_delay_us(10);
-
- if (phydm_psd_stop_trx(dm) == STOP_TRX_FAIL) {
- ODM_RT_TRACE(dm, ODM_COMP_API, "STOP_TRX_FAIL\n");
- return;
- }
-
- /*[Set IGI]*/
- odm_set_bb_reg(dm, psd_igi_a_reg, 0xff, igi);
- odm_set_bb_reg(dm, psd_igi_b_reg, 0xff, igi);
-
- /*[Backup RF Reg]*/
- dm_psd_table->rf_0x18_bkp =
- odm_get_rf_reg(dm, ODM_RF_PATH_A, 0x18, RFREGOFFSETMASK);
-
- if (psd_fc_channel > 14) {
- rf_reg18_9_8 = 1;
-
- if (psd_fc_channel >= 36 && psd_fc_channel <= 64)
- ag_rf_mode_reg = 0x1;
- else if (psd_fc_channel >= 100 && psd_fc_channel <= 140)
- ag_rf_mode_reg = 0x3;
- else if (psd_fc_channel > 140)
- ag_rf_mode_reg = 0x5;
- }
-
- /* Set RF fc*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x18, 0xff, psd_fc_channel);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x18, 0x300, rf_reg18_9_8);
- /*2b'11: 20MHz, 2b'10: 40MHz, 2b'01: 80MHz */
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x18, 0xc00,
- dm_psd_table->psd_bw_rf_reg);
- /* Set RF ag fc mode*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x18, 0xf0000, ag_rf_mode_reg);
-
- ODM_RT_TRACE(dm, ODM_COMP_API, "0xc50=((0x%x))\n",
- odm_get_bb_reg(dm, 0xc50, MASKDWORD));
- ODM_RT_TRACE(dm, ODM_COMP_API, "RF0x18=((0x%x))\n",
- odm_get_rf_reg(dm, ODM_RF_PATH_A, 0x18, RFREGOFFSETMASK));
-
- /*[Stop 3-wires]*/
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, 0xc00, 0xf, 0x4); /* hardware 3-wire off */
- odm_set_bb_reg(dm, 0xe00, 0xf, 0x4); /* hardware 3-wire off */
- } else {
- odm_set_bb_reg(dm, 0x88c, 0xf00000,
- 0xf); /* 3 wire Disable 88c[23:20]=0xf */
- }
- ODM_delay_us(10);
-
- if (stop_point > (dm_psd_table->fft_smp_point - 1))
- stop_point = (dm_psd_table->fft_smp_point - 1);
-
- if (start_point > (dm_psd_table->fft_smp_point - 1))
- start_point = (dm_psd_table->fft_smp_point - 1);
-
- if (start_point > stop_point)
- stop_point = start_point;
-
- if (stop_point > 127) /* limit of psd_result[128] */
- stop_point = 127;
-
- for (i = start_point; i <= stop_point; i++) {
- fft_max_half_bw = (dm_psd_table->fft_smp_point) >> 1;
-
- if (i < fft_max_half_bw)
- mod_tone_idx = i + fft_max_half_bw;
- else
- mod_tone_idx = i - fft_max_half_bw;
-
- psd_result_tmp = 0;
- for (t = 0; t < dm_psd_table->sw_avg_time; t++)
- psd_result_tmp +=
- phydm_get_psd_data(dm, mod_tone_idx, igi);
- psd_result =
- (u8)((psd_result_tmp / dm_psd_table->sw_avg_time)) -
- dm_psd_table->psd_pwr_common_offset;
-
- if (dm_psd_table->fft_smp_point == 128 &&
- (dm_psd_table->noise_k_en)) {
- if (i > psd_result_cali_tone[noise_table_idx])
- noise_table_idx++;
-
- if (noise_table_idx > 6)
- noise_table_idx = 6;
-
- if (psd_result >= psd_result_cali_val[noise_table_idx])
- psd_result =
- psd_result -
- psd_result_cali_val[noise_table_idx];
- else
- psd_result = 0;
-
- dm_psd_table->psd_result[i] = psd_result;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_API, "[%d] N_cali = %d, PSD = %d\n",
- mod_tone_idx, psd_result_cali_val[noise_table_idx],
- psd_result);
- }
-
- /*[Start 3-wires]*/
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, 0xc00, 0xf, 0x7); /* hardware 3-wire on */
- odm_set_bb_reg(dm, 0xe00, 0xf, 0x7); /* hardware 3-wire on */
- } else {
- odm_set_bb_reg(dm, 0x88c, 0xf00000,
- 0x0); /* 3 wire enable 88c[23:20]=0x0 */
- }
- ODM_delay_us(10);
-
- /*[Revert Reg]*/
- odm_set_bb_reg(dm, 0x520, 0xff0000, 0x0); /*start all TX queue*/
- odm_set_bb_reg(dm, 0x808, BIT(28), 1); /*enable CCK block*/
- odm_set_bb_reg(dm, 0x838, BIT(1), 0); /*enable OFDM RX CCA*/
-
- odm_set_bb_reg(dm, psd_igi_a_reg, 0xff,
- dm_psd_table->initial_gain_backup);
- odm_set_bb_reg(dm, psd_igi_b_reg, 0xff,
- dm_psd_table->initial_gain_backup);
-
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x18, RFREGOFFSETMASK,
- dm_psd_table->rf_0x18_bkp);
-
- ODM_RT_TRACE(dm, ODM_COMP_API, "PSD finished\n\n");
-
- dm->support_ability |= ODM_BB_DIG;
- dm->support_ability |= ODM_BB_FA_CNT;
- dm_psd_table->psd_in_progress = 0;
-}
-
-void phydm_psd_para_setting(void *dm_void, u8 sw_avg_time, u8 hw_avg_time,
- u8 i_q_setting, u16 fft_smp_point, u8 ant_sel,
- u8 psd_input, u8 channel, u8 noise_k_en)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct psd_info *dm_psd_table = &dm->dm_psd_table;
- u8 fft_smp_point_idx = 0;
-
- dm_psd_table->fft_smp_point = fft_smp_point;
-
- if (sw_avg_time == 0)
- sw_avg_time = 1;
-
- dm_psd_table->sw_avg_time = sw_avg_time;
- dm_psd_table->psd_fc_channel = channel;
- dm_psd_table->noise_k_en = noise_k_en;
-
- if (fft_smp_point == 128)
- fft_smp_point_idx = 0;
- else if (fft_smp_point == 256)
- fft_smp_point_idx = 1;
- else if (fft_smp_point == 512)
- fft_smp_point_idx = 2;
- else if (fft_smp_point == 1024)
- fft_smp_point_idx = 3;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- odm_set_bb_reg(dm, 0x910, BIT(11) | BIT(10), i_q_setting);
- odm_set_bb_reg(dm, 0x910, BIT(13) | BIT(12), hw_avg_time);
- odm_set_bb_reg(dm, 0x910, BIT(15) | BIT(14), fft_smp_point_idx);
- odm_set_bb_reg(dm, 0x910, BIT(17) | BIT(16), ant_sel);
- odm_set_bb_reg(dm, 0x910, BIT(23), psd_input);
- }
-
- /*bw = (*dm->band_width); //ODM_BW20M */
- /*channel = *(dm->channel);*/
-}
-
-void phydm_psd_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct psd_info *dm_psd_table = &dm->dm_psd_table;
-
- ODM_RT_TRACE(dm, ODM_COMP_API, "PSD para init\n");
-
- dm_psd_table->psd_in_progress = false;
-
- if (dm->support_ic_type & ODM_IC_11AC_SERIES) {
- dm_psd_table->psd_reg = 0x910;
- dm_psd_table->psd_report_reg = 0xF44;
-
- if (ODM_IC_11AC_2_SERIES)
- dm_psd_table->psd_bw_rf_reg =
- 1; /*2b'11: 20MHz, 2b'10: 40MHz, 2b'01: 80MHz */
- else
- dm_psd_table->psd_bw_rf_reg =
- 2; /*2b'11: 20MHz, 2b'10: 40MHz, 2b'01: 80MHz */
-
- } else {
- dm_psd_table->psd_reg = 0x808;
- dm_psd_table->psd_report_reg = 0x8B4;
- dm_psd_table->psd_bw_rf_reg =
- 2; /*2b'11: 20MHz, 2b'10: 40MHz, 2b'01: 80MHz */
- }
-
- dm_psd_table->psd_pwr_common_offset = 0;
-
- phydm_psd_para_setting(dm, 1, 2, 3, 128, 0, 0, 7, 0);
- /*phydm_psd(dm, 0x3c, 0, 127);*/ /* target at -50dBm */
-}
-
-void phydm_psd_debug(void *dm_void, char input[][16], u32 *_used, char *output,
- u32 *_out_len, u32 input_num)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- char help[] = "-h";
- u32 var1[10] = {0};
- u32 used = *_used;
- u32 out_len = *_out_len;
- u8 i;
-
- if ((strcmp(input[1], help) == 0)) {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "{0} {sw_avg} {hw_avg 0:3} {1:I,2:Q,3:IQ} {fft_point: 128*(1:4)} {path_sel 0~3} {0:ADC, 1:RXIQC} {CH} {noise_k}\n");
- PHYDM_SNPRINTF(output + used, out_len - used,
- "{1} {IGI(hex)} {start_point} {stop_point}\n");
- return;
- }
-
- PHYDM_SSCANF(input[1], DCMD_DECIMAL, &var1[0]);
-
- if (var1[0] == 0) {
- for (i = 1; i < 10; i++) {
- if (input[i + 1])
- PHYDM_SSCANF(input[i + 1], DCMD_DECIMAL,
- &var1[i]);
- }
-
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "sw_avg_time=((%d)), hw_avg_time=((%d)), IQ=((%d)), fft=((%d)), path=((%d)), input =((%d)) ch=((%d)), noise_k=((%d))\n",
- var1[1], var1[2], var1[3], var1[4], var1[5], var1[6],
- (u8)var1[7], (u8)var1[8]);
- phydm_psd_para_setting(dm, (u8)var1[1], (u8)var1[2],
- (u8)var1[3], (u16)var1[4], (u8)var1[5],
- (u8)var1[6], (u8)var1[7], (u8)var1[8]);
-
- } else if (var1[0] == 1) {
- PHYDM_SSCANF(input[2], DCMD_HEX, &var1[1]);
- PHYDM_SSCANF(input[3], DCMD_DECIMAL, &var1[2]);
- PHYDM_SSCANF(input[4], DCMD_DECIMAL, &var1[3]);
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "IGI=((0x%x)), start_point=((%d)), stop_point=((%d))\n",
- var1[1], var1[2], var1[3]);
- dm->debug_components |= ODM_COMP_API;
- phydm_psd(dm, var1[1], (u16)var1[2], (u16)var1[3]);
- dm->debug_components &= (~ODM_COMP_API);
- }
-}
-
-u8 phydm_get_psd_result_table(void *dm_void, int index)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct psd_info *dm_psd_table = &dm->dm_psd_table;
- u8 temp_result = 0;
-
- if (index < 128)
- temp_result = dm_psd_table->psd_result[index];
-
- return temp_result;
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_psd.h b/drivers/staging/rtlwifi/phydm/phydm_psd.h
deleted file mode 100644
index 0fd45c1cb6ec..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_psd.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMPSD_H__
-#define __PHYDMPSD_H__
-
-/*#define PSD_VERSION "1.0"*/ /*2016.09.22 Dino*/
-#define PSD_VERSION "1.1" /*2016.10.07 Dino, Add Option for PSD Tone index
- *Selection
- */
-
-#define STOP_TRX_SUCCESS 1
-#define STOP_TRX_FAIL 0
-
-struct psd_info {
- u8 psd_in_progress;
- u32 psd_reg;
- u32 psd_report_reg;
- u8 psd_pwr_common_offset;
- u16 sw_avg_time;
- u16 fft_smp_point;
- u32 initial_gain_backup;
- u32 rf_0x18_bkp;
- u16 psd_fc_channel;
- u32 psd_bw_rf_reg;
- u8 psd_result[128];
- u8 noise_k_en;
-};
-
-u32 phydm_get_psd_data(void *dm_void, u32 psd_tone_idx, u32 igi);
-
-void phydm_psd_debug(void *dm_void, char input[][16], u32 *_used, char *output,
- u32 *_out_len, u32 input_num);
-
-void phydm_psd(void *dm_void, u32 igi, u16 start_point, u16 stop_point);
-
-void phydm_psd_para_setting(void *dm_void, u8 sw_avg_time, u8 hw_avg_time,
- u8 i_q_setting, u16 fft_smp_point, u8 ant_sel,
- u8 psd_input, u8 channel, u8 noise_k_en);
-
-void phydm_psd_init(void *dm_void);
-
-u8 phydm_get_psd_result_table(void *dm_void, int index);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_rainfo.c b/drivers/staging/rtlwifi/phydm/phydm_rainfo.c
deleted file mode 100644
index ed740a93c8b6..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_rainfo.c
+++ /dev/null
@@ -1,1196 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/* ************************************************************
- * include files
- * *************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-
-void phydm_h2C_debug(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 h2c_parameter[H2C_MAX_LENGTH] = {0};
- u8 phydm_h2c_id = (u8)dm_value[0];
- u8 i;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- PHYDM_SNPRINTF(output + used, out_len - used,
- "Phydm Send H2C_ID (( 0x%x))\n", phydm_h2c_id);
- for (i = 0; i < H2C_MAX_LENGTH; i++) {
- h2c_parameter[i] = (u8)dm_value[i + 1];
- PHYDM_SNPRINTF(output + used, out_len - used,
- "H2C: Byte[%d] = ((0x%x))\n", i,
- h2c_parameter[i]);
- }
-
- odm_fill_h2c_cmd(dm, phydm_h2c_id, H2C_MAX_LENGTH, h2c_parameter);
-}
-
-void phydm_RA_debug_PCR(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ra_table *ra_tab = &dm->dm_ra_table;
- u32 used = *_used;
- u32 out_len = *_out_len;
-
- if (dm_value[0] == 100) {
- PHYDM_SNPRINTF(
- output + used, out_len - used,
- "[Get] PCR RA_threshold_offset = (( %s%d ))\n",
- ((ra_tab->RA_threshold_offset == 0) ?
- " " :
- ((ra_tab->RA_offset_direction) ? "+" : "-")),
- ra_tab->RA_threshold_offset);
- /**/
- } else if (dm_value[0] == 0) {
- ra_tab->RA_offset_direction = 0;
- ra_tab->RA_threshold_offset = (u8)dm_value[1];
- PHYDM_SNPRINTF(output + used, out_len - used,
- "[Set] PCR RA_threshold_offset = (( -%d ))\n",
- ra_tab->RA_threshold_offset);
- } else if (dm_value[0] == 1) {
- ra_tab->RA_offset_direction = 1;
- ra_tab->RA_threshold_offset = (u8)dm_value[1];
- PHYDM_SNPRINTF(output + used, out_len - used,
- "[Set] PCR RA_threshold_offset = (( +%d ))\n",
- ra_tab->RA_threshold_offset);
- } else {
- PHYDM_SNPRINTF(output + used, out_len - used, "[Set] Error\n");
- /**/
- }
-}
-
-void odm_c2h_ra_para_report_handler(void *dm_void, u8 *cmd_buf, u8 cmd_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- u8 para_idx = cmd_buf[0]; /*Retry Penalty, NH, NL*/
- u8 i;
-
- ODM_RT_TRACE(dm, PHYDM_COMP_RA_DBG,
- "[ From FW C2H RA Para ] cmd_buf[0]= (( %d ))\n",
- cmd_buf[0]);
-
- if (para_idx == RADBG_DEBUG_MONITOR1) {
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "-------------------------------\n");
- if (dm->support_ic_type & PHYDM_IC_3081_SERIES) {
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "RSSI =", cmd_buf[1]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "rate =", cmd_buf[2] & 0x7f);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "SGI =", (cmd_buf[2] & 0x80) >> 7);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "BW =", cmd_buf[3]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "BW_max =", cmd_buf[4]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "multi_rate0 =", cmd_buf[5]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "multi_rate1 =", cmd_buf[6]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "DISRA =", cmd_buf[7]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "VHT_EN =", cmd_buf[8]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "SGI_support =", cmd_buf[9]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "try_ness =", cmd_buf[10]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "pre_rate =", cmd_buf[11]);
- } else {
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "RSSI =", cmd_buf[1]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %x\n",
- "BW =", cmd_buf[2]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "DISRA =", cmd_buf[3]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "VHT_EN =", cmd_buf[4]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "Highest rate =", cmd_buf[5]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "Lowest rate =", cmd_buf[6]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "SGI_support =", cmd_buf[7]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "Rate_ID =", cmd_buf[8]);
- }
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "-------------------------------\n");
- } else if (para_idx == RADBG_DEBUG_MONITOR2) {
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "-------------------------------\n");
- if (dm->support_ic_type & PHYDM_IC_3081_SERIES) {
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "rate_id =", cmd_buf[1]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "highest_rate =", cmd_buf[2]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "lowest_rate =", cmd_buf[3]);
-
- for (i = 4; i <= 11; i++)
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "RAMASK = 0x%x\n", cmd_buf[i]);
- } else {
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "%5s %x%x %x%x %x%x %x%x\n",
- "RA Mask:", cmd_buf[8], cmd_buf[7],
- cmd_buf[6], cmd_buf[5], cmd_buf[4],
- cmd_buf[3], cmd_buf[2], cmd_buf[1]);
- }
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "-------------------------------\n");
- } else if (para_idx == RADBG_DEBUG_MONITOR3) {
- for (i = 0; i < (cmd_len - 1); i++)
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
- "content[%d] = %d\n", i, cmd_buf[1 + i]);
- } else if (para_idx == RADBG_DEBUG_MONITOR4) {
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s {%d.%d}\n",
- "RA version =", cmd_buf[1], cmd_buf[2]);
- } else if (para_idx == RADBG_DEBUG_MONITOR5) {
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "Current rate =", cmd_buf[1]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "Retry ratio =", cmd_buf[2]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "rate down ratio =", cmd_buf[3]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
- "highest rate =", cmd_buf[4]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s {0x%x 0x%x}\n",
- "Muti-try =", cmd_buf[5], cmd_buf[6]);
- ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x%x%x%x%x\n",
- "RA mask =", cmd_buf[11], cmd_buf[10], cmd_buf[9],
- cmd_buf[8], cmd_buf[7]);
- }
-}
-
-void phydm_ra_dynamic_retry_count(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (!(dm->support_ability & ODM_BB_DYNAMIC_ARFR))
- return;
-
- if (dm->pre_b_noisy != dm->noisy_decision) {
- if (dm->noisy_decision) {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE,
- "->Noisy Env. RA fallback value\n");
- odm_set_mac_reg(dm, 0x430, MASKDWORD, 0x0);
- odm_set_mac_reg(dm, 0x434, MASKDWORD, 0x04030201);
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE,
- "->Clean Env. RA fallback value\n");
- odm_set_mac_reg(dm, 0x430, MASKDWORD, 0x01000000);
- odm_set_mac_reg(dm, 0x434, MASKDWORD, 0x06050402);
- }
- dm->pre_b_noisy = dm->noisy_decision;
- }
-}
-
-void phydm_ra_dynamic_retry_limit(void *dm_void) {}
-
-void phydm_print_rate(void *dm_void, u8 rate, u32 dbg_component)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 legacy_table[12] = {1, 2, 5, 11, 6, 9, 12, 18, 24, 36, 48, 54};
- u8 rate_idx = rate & 0x7f; /*remove bit7 SGI*/
- u8 vht_en = (rate_idx >= ODM_RATEVHTSS1MCS0) ? 1 : 0;
- u8 b_sgi = (rate & 0x80) >> 7;
-
- ODM_RT_TRACE(dm, dbg_component, "( %s%s%s%s%d%s%s)\n",
- ((rate_idx >= ODM_RATEVHTSS1MCS0) &&
- (rate_idx <= ODM_RATEVHTSS1MCS9)) ?
- "VHT 1ss " :
- "",
- ((rate_idx >= ODM_RATEVHTSS2MCS0) &&
- (rate_idx <= ODM_RATEVHTSS2MCS9)) ?
- "VHT 2ss " :
- "",
- ((rate_idx >= ODM_RATEVHTSS3MCS0) &&
- (rate_idx <= ODM_RATEVHTSS3MCS9)) ?
- "VHT 3ss " :
- "",
- (rate_idx >= ODM_RATEMCS0) ? "MCS " : "",
- (vht_en) ? ((rate_idx - ODM_RATEVHTSS1MCS0) % 10) :
- ((rate_idx >= ODM_RATEMCS0) ?
- (rate_idx - ODM_RATEMCS0) :
- ((rate_idx <= ODM_RATE54M) ?
- legacy_table[rate_idx] :
- 0)),
- (b_sgi) ? "-S" : " ",
- (rate_idx >= ODM_RATEMCS0) ? "" : "M");
-}
-
-void phydm_c2h_ra_report_handler(void *dm_void, u8 *cmd_buf, u8 cmd_len)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ra_table *ra_tab = &dm->dm_ra_table;
- u8 macid = cmd_buf[1];
- u8 rate = cmd_buf[0];
- u8 rate_idx = rate & 0x7f; /*remove bit7 SGI*/
- u8 rate_order;
-
- if (cmd_len >= 4) {
- if (cmd_buf[3] == 0) {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE,
- "TX Init-rate Update[%d]:", macid);
- /**/
- } else if (cmd_buf[3] == 0xff) {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE,
- "FW Level: Fix rate[%d]:", macid);
- /**/
- } else if (cmd_buf[3] == 1) {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE,
- "Try Success[%d]:", macid);
- /**/
- } else if (cmd_buf[3] == 2) {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE,
- "Try Fail & Try Again[%d]:", macid);
- /**/
- } else if (cmd_buf[3] == 3) {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE,
- "rate Back[%d]:", macid);
- /**/
- } else if (cmd_buf[3] == 4) {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE,
- "start rate by RSSI[%d]:", macid);
- /**/
- } else if (cmd_buf[3] == 5) {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE,
- "Try rate[%d]:", macid);
- /**/
- }
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_RATE_ADAPTIVE, "Tx rate Update[%d]:",
- macid);
- /**/
- }
-
- phydm_print_rate(dm, rate, ODM_COMP_RATE_ADAPTIVE);
-
- ra_tab->link_tx_rate[macid] = rate;
-
- /*trigger power training*/
-
- rate_order = phydm_rate_order_compute(dm, rate_idx);
-
- if ((dm->is_one_entry_only) ||
- ((rate_order > ra_tab->highest_client_tx_order) &&
- (ra_tab->power_tracking_flag == 1))) {
- phydm_update_pwr_track(dm, rate_idx);
- ra_tab->power_tracking_flag = 0;
- }
-
- /*trigger dynamic rate ID*/
-}
-
-void odm_rssi_monitor_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ra_table *ra_tab = &dm->dm_ra_table;
-
- ra_tab->firstconnect = false;
-}
-
-void odm_ra_post_action_on_assoc(void *dm_void) {}
-
-void phydm_init_ra_info(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (dm->support_ic_type == ODM_RTL8822B) {
- u32 ret_value;
-
- ret_value = odm_get_bb_reg(dm, 0x4c8, MASKBYTE2);
- odm_set_bb_reg(dm, 0x4cc, MASKBYTE3, (ret_value - 1));
- }
-}
-
-void phydm_modify_RA_PCR_threshold(void *dm_void, u8 RA_offset_direction,
- u8 RA_threshold_offset
-
- )
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ra_table *ra_tab = &dm->dm_ra_table;
-
- ra_tab->RA_offset_direction = RA_offset_direction;
- ra_tab->RA_threshold_offset = RA_threshold_offset;
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "Set RA_threshold_offset = (( %s%d ))\n",
- ((RA_threshold_offset == 0) ?
- " " :
- ((RA_offset_direction) ? "+" : "-")),
- RA_threshold_offset);
-}
-
-static void odm_rssi_monitor_check_mp(void *dm_void) {}
-
-static void odm_rssi_monitor_check_ce(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ra_table *ra_tab = &dm->dm_ra_table;
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- struct rtl_sta_info *entry;
- int i;
- int tmp_entry_min_pwdb = 0xff;
- unsigned long cur_tx_ok_cnt = 0, cur_rx_ok_cnt = 0;
- u8 UL_DL_STATE = 0, STBC_TX = 0, tx_bf_en = 0;
- u8 h2c_parameter[H2C_0X42_LENGTH] = {0};
- u8 cmdlen = H2C_0X42_LENGTH;
- u8 macid = 0;
-
- if (!dm->is_linked)
- return;
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- entry = (struct rtl_sta_info *)dm->odm_sta_info[i];
- if (!IS_STA_VALID(entry))
- continue;
-
- if (is_multicast_ether_addr(entry->mac_addr) ||
- is_broadcast_ether_addr(entry->mac_addr))
- continue;
-
- if (entry->rssi_stat.undecorated_smoothed_pwdb == (-1))
- continue;
-
- /* calculate min_pwdb */
- if (entry->rssi_stat.undecorated_smoothed_pwdb <
- tmp_entry_min_pwdb)
- tmp_entry_min_pwdb =
- entry->rssi_stat.undecorated_smoothed_pwdb;
-
- /* report RSSI */
- cur_tx_ok_cnt = rtlpriv->stats.txbytesunicast_inperiod;
- cur_rx_ok_cnt = rtlpriv->stats.rxbytesunicast_inperiod;
-
- if (cur_rx_ok_cnt > (cur_tx_ok_cnt * 6))
- UL_DL_STATE = 1;
- else
- UL_DL_STATE = 0;
-
- if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC) {
- struct ieee80211_sta *sta = container_of(
- (void *)entry, struct ieee80211_sta, drv_priv);
- macid = sta->aid + 1;
- }
-
- h2c_parameter[0] = macid;
- h2c_parameter[2] =
- entry->rssi_stat.undecorated_smoothed_pwdb & 0x7F;
-
- if (UL_DL_STATE)
- h2c_parameter[3] |= RAINFO_BE_RX_STATE;
-
- if (tx_bf_en)
- h2c_parameter[3] |= RAINFO_BF_STATE;
- if (STBC_TX)
- h2c_parameter[3] |= RAINFO_STBC_STATE;
- if (dm->noisy_decision)
- h2c_parameter[3] |= RAINFO_NOISY_STATE;
-
- if (entry->rssi_stat.is_send_rssi == RA_RSSI_STATE_SEND) {
- h2c_parameter[3] |= RAINFO_INIT_RSSI_RATE_STATE;
- entry->rssi_stat.is_send_rssi = RA_RSSI_STATE_HOLD;
- }
-
- h2c_parameter[4] = (ra_tab->RA_threshold_offset & 0x7f) |
- (ra_tab->RA_offset_direction << 7);
-
- odm_fill_h2c_cmd(dm, ODM_H2C_RSSI_REPORT, cmdlen,
- h2c_parameter);
- }
-
- if (tmp_entry_min_pwdb != 0xff)
- dm->rssi_min = tmp_entry_min_pwdb;
-}
-
-static void odm_rssi_monitor_check_ap(void *dm_void) {}
-
-void odm_rssi_monitor_check(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- if (!(dm->support_ability & ODM_BB_RSSI_MONITOR))
- return;
-
- switch (dm->support_platform) {
- case ODM_WIN:
- odm_rssi_monitor_check_mp(dm);
- break;
-
- case ODM_CE:
- odm_rssi_monitor_check_ce(dm);
- break;
-
- case ODM_AP:
- odm_rssi_monitor_check_ap(dm);
- break;
-
- default:
- break;
- }
-}
-
-void odm_rate_adaptive_mask_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct odm_rate_adaptive *odm_ra = &dm->rate_adaptive;
-
- odm_ra->type = dm_type_by_driver;
- if (odm_ra->type == dm_type_by_driver)
- dm->is_use_ra_mask = true;
- else
- dm->is_use_ra_mask = false;
-
- odm_ra->ratr_state = DM_RATR_STA_INIT;
-
- odm_ra->ldpc_thres = 35;
- odm_ra->is_use_ldpc = false;
-
- odm_ra->high_rssi_thresh = 50;
- odm_ra->low_rssi_thresh = 20;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: odm_refresh_rate_adaptive_mask()
- *
- * Overview: Update rate table mask according to rssi
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/27/2009 hpfan Create version 0.
- *
- *---------------------------------------------------------------------------
- */
-void odm_refresh_rate_adaptive_mask(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ra_table *ra_tab = &dm->dm_ra_table;
-
- if (!dm->is_linked)
- return;
-
- if (!(dm->support_ability & ODM_BB_RA_MASK)) {
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "%s(): Return cos not supported\n", __func__);
- return;
- }
-
- ra_tab->force_update_ra_mask_count++;
- /* 2011/09/29 MH In HW integration first stage, we provide 4 different
- * handle to operate at the same time.
- * In the stage2/3, we need to prive universal interface and merge all
- * HW dynamic mechanism.
- */
- switch (dm->support_platform) {
- case ODM_WIN:
- odm_refresh_rate_adaptive_mask_mp(dm);
- break;
-
- case ODM_CE:
- odm_refresh_rate_adaptive_mask_ce(dm);
- break;
-
- case ODM_AP:
- odm_refresh_rate_adaptive_mask_apadsl(dm);
- break;
- }
-}
-
-static u8 phydm_trans_platform_bw(void *dm_void, u8 BW)
-{
- if (BW == HT_CHANNEL_WIDTH_20)
- BW = PHYDM_BW_20;
-
- else if (BW == HT_CHANNEL_WIDTH_20_40)
- BW = PHYDM_BW_40;
-
- else if (BW == HT_CHANNEL_WIDTH_80)
- BW = PHYDM_BW_80;
-
- return BW;
-}
-
-static u8 phydm_trans_platform_rf_type(void *dm_void, u8 rf_type)
-{
- if (rf_type == RF_1T2R)
- rf_type = PHYDM_RF_1T2R;
-
- else if (rf_type == RF_2T4R)
- rf_type = PHYDM_RF_2T4R;
-
- else if (rf_type == RF_2T2R)
- rf_type = PHYDM_RF_2T2R;
-
- else if (rf_type == RF_1T1R)
- rf_type = PHYDM_RF_1T1R;
-
- else if (rf_type == RF_2T2R_GREEN)
- rf_type = PHYDM_RF_2T2R_GREEN;
-
- else if (rf_type == RF_3T3R)
- rf_type = PHYDM_RF_3T3R;
-
- else if (rf_type == RF_4T4R)
- rf_type = PHYDM_RF_4T4R;
-
- else if (rf_type == RF_2T3R)
- rf_type = PHYDM_RF_1T2R;
-
- else if (rf_type == RF_3T4R)
- rf_type = PHYDM_RF_3T4R;
-
- return rf_type;
-}
-
-static u32 phydm_trans_platform_wireless_mode(void *dm_void, u32 wireless_mode)
-{
- return wireless_mode;
-}
-
-u8 phydm_vht_en_mapping(void *dm_void, u32 wireless_mode)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 vht_en_out = 0;
-
- if ((wireless_mode == PHYDM_WIRELESS_MODE_AC_5G) ||
- (wireless_mode == PHYDM_WIRELESS_MODE_AC_24G) ||
- (wireless_mode == PHYDM_WIRELESS_MODE_AC_ONLY)) {
- vht_en_out = 1;
- /**/
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "wireless_mode= (( 0x%x )), VHT_EN= (( %d ))\n",
- wireless_mode, vht_en_out);
- return vht_en_out;
-}
-
-u8 phydm_rate_id_mapping(void *dm_void, u32 wireless_mode, u8 rf_type, u8 bw)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 rate_id_idx = 0;
- u8 phydm_BW;
- u8 phydm_rf_type;
-
- phydm_BW = phydm_trans_platform_bw(dm, bw);
- phydm_rf_type = phydm_trans_platform_rf_type(dm, rf_type);
- wireless_mode = phydm_trans_platform_wireless_mode(dm, wireless_mode);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_RA_MASK,
- "wireless_mode= (( 0x%x )), rf_type = (( 0x%x )), BW = (( 0x%x ))\n",
- wireless_mode, phydm_rf_type, phydm_BW);
-
- switch (wireless_mode) {
- case PHYDM_WIRELESS_MODE_N_24G: {
- if (phydm_BW == PHYDM_BW_40) {
- if (phydm_rf_type == PHYDM_RF_1T1R)
- rate_id_idx = PHYDM_BGN_40M_1SS;
- else if (phydm_rf_type == PHYDM_RF_2T2R)
- rate_id_idx = PHYDM_BGN_40M_2SS;
- else
- rate_id_idx = PHYDM_ARFR5_N_3SS;
-
- } else {
- if (phydm_rf_type == PHYDM_RF_1T1R)
- rate_id_idx = PHYDM_BGN_20M_1SS;
- else if (phydm_rf_type == PHYDM_RF_2T2R)
- rate_id_idx = PHYDM_BGN_20M_2SS;
- else
- rate_id_idx = PHYDM_ARFR5_N_3SS;
- }
- } break;
-
- case PHYDM_WIRELESS_MODE_N_5G: {
- if (phydm_rf_type == PHYDM_RF_1T1R)
- rate_id_idx = PHYDM_GN_N1SS;
- else if (phydm_rf_type == PHYDM_RF_2T2R)
- rate_id_idx = PHYDM_GN_N2SS;
- else
- rate_id_idx = PHYDM_ARFR5_N_3SS;
- }
-
- break;
-
- case PHYDM_WIRELESS_MODE_G:
- rate_id_idx = PHYDM_BG;
- break;
-
- case PHYDM_WIRELESS_MODE_A:
- rate_id_idx = PHYDM_G;
- break;
-
- case PHYDM_WIRELESS_MODE_B:
- rate_id_idx = PHYDM_B_20M;
- break;
-
- case PHYDM_WIRELESS_MODE_AC_5G:
- case PHYDM_WIRELESS_MODE_AC_ONLY: {
- if (phydm_rf_type == PHYDM_RF_1T1R)
- rate_id_idx = PHYDM_ARFR1_AC_1SS;
- else if (phydm_rf_type == PHYDM_RF_2T2R)
- rate_id_idx = PHYDM_ARFR0_AC_2SS;
- else
- rate_id_idx = PHYDM_ARFR4_AC_3SS;
- } break;
-
- case PHYDM_WIRELESS_MODE_AC_24G: {
- /*Becareful to set "Lowest rate" while using PHYDM_ARFR4_AC_3SS
- *in 2.4G/5G
- */
- if (phydm_BW >= PHYDM_BW_80) {
- if (phydm_rf_type == PHYDM_RF_1T1R)
- rate_id_idx = PHYDM_ARFR1_AC_1SS;
- else if (phydm_rf_type == PHYDM_RF_2T2R)
- rate_id_idx = PHYDM_ARFR0_AC_2SS;
- else
- rate_id_idx = PHYDM_ARFR4_AC_3SS;
- } else {
- if (phydm_rf_type == PHYDM_RF_1T1R)
- rate_id_idx = PHYDM_ARFR2_AC_2G_1SS;
- else if (phydm_rf_type == PHYDM_RF_2T2R)
- rate_id_idx = PHYDM_ARFR3_AC_2G_2SS;
- else
- rate_id_idx = PHYDM_ARFR4_AC_3SS;
- }
- } break;
-
- default:
- rate_id_idx = 0;
- break;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK, "RA rate ID = (( 0x%x ))\n",
- rate_id_idx);
-
- return rate_id_idx;
-}
-
-void phydm_update_hal_ra_mask(void *dm_void, u32 wireless_mode, u8 rf_type,
- u8 BW, u8 mimo_ps_enable, u8 disable_cck_rate,
- u32 *ratr_bitmap_msb_in, u32 *ratr_bitmap_lsb_in,
- u8 tx_rate_level)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 phydm_rf_type;
- u8 phydm_BW;
- u32 ratr_bitmap = *ratr_bitmap_lsb_in,
- ratr_bitmap_msb = *ratr_bitmap_msb_in;
-
- wireless_mode = phydm_trans_platform_wireless_mode(dm, wireless_mode);
-
- phydm_rf_type = phydm_trans_platform_rf_type(dm, rf_type);
- phydm_BW = phydm_trans_platform_bw(dm, BW);
-
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "Platform original RA Mask = (( 0x %x | %x ))\n",
- ratr_bitmap_msb, ratr_bitmap);
-
- switch (wireless_mode) {
- case PHYDM_WIRELESS_MODE_B: {
- ratr_bitmap &= 0x0000000f;
- } break;
-
- case PHYDM_WIRELESS_MODE_G: {
- ratr_bitmap &= 0x00000ff5;
- } break;
-
- case PHYDM_WIRELESS_MODE_A: {
- ratr_bitmap &= 0x00000ff0;
- } break;
-
- case PHYDM_WIRELESS_MODE_N_24G:
- case PHYDM_WIRELESS_MODE_N_5G: {
- if (mimo_ps_enable)
- phydm_rf_type = PHYDM_RF_1T1R;
-
- if (phydm_rf_type == PHYDM_RF_1T1R) {
- if (phydm_BW == PHYDM_BW_40)
- ratr_bitmap &= 0x000ff015;
- else
- ratr_bitmap &= 0x000ff005;
- } else if (phydm_rf_type == PHYDM_RF_2T2R ||
- phydm_rf_type == PHYDM_RF_2T4R ||
- phydm_rf_type == PHYDM_RF_2T3R) {
- if (phydm_BW == PHYDM_BW_40)
- ratr_bitmap &= 0x0ffff015;
- else
- ratr_bitmap &= 0x0ffff005;
- } else { /*3T*/
-
- ratr_bitmap &= 0xfffff015;
- ratr_bitmap_msb &= 0xf;
- }
- } break;
-
- case PHYDM_WIRELESS_MODE_AC_24G: {
- if (phydm_rf_type == PHYDM_RF_1T1R) {
- ratr_bitmap &= 0x003ff015;
- } else if (phydm_rf_type == PHYDM_RF_2T2R ||
- phydm_rf_type == PHYDM_RF_2T4R ||
- phydm_rf_type == PHYDM_RF_2T3R) {
- ratr_bitmap &= 0xfffff015;
- } else { /*3T*/
-
- ratr_bitmap &= 0xfffff010;
- ratr_bitmap_msb &= 0x3ff;
- }
-
- if (phydm_BW ==
- PHYDM_BW_20) { /* AC 20MHz doesn't support MCS9 */
- ratr_bitmap &= 0x7fdfffff;
- ratr_bitmap_msb &= 0x1ff;
- }
- } break;
-
- case PHYDM_WIRELESS_MODE_AC_5G: {
- if (phydm_rf_type == PHYDM_RF_1T1R) {
- ratr_bitmap &= 0x003ff010;
- } else if (phydm_rf_type == PHYDM_RF_2T2R ||
- phydm_rf_type == PHYDM_RF_2T4R ||
- phydm_rf_type == PHYDM_RF_2T3R) {
- ratr_bitmap &= 0xfffff010;
- } else { /*3T*/
-
- ratr_bitmap &= 0xfffff010;
- ratr_bitmap_msb &= 0x3ff;
- }
-
- if (phydm_BW ==
- PHYDM_BW_20) { /* AC 20MHz doesn't support MCS9 */
- ratr_bitmap &= 0x7fdfffff;
- ratr_bitmap_msb &= 0x1ff;
- }
- } break;
-
- default:
- break;
- }
-
- if (wireless_mode != PHYDM_WIRELESS_MODE_B) {
- if (tx_rate_level == 0)
- ratr_bitmap &= 0xffffffff;
- else if (tx_rate_level == 1)
- ratr_bitmap &= 0xfffffff0;
- else if (tx_rate_level == 2)
- ratr_bitmap &= 0xffffefe0;
- else if (tx_rate_level == 3)
- ratr_bitmap &= 0xffffcfc0;
- else if (tx_rate_level == 4)
- ratr_bitmap &= 0xffff8f80;
- else if (tx_rate_level >= 5)
- ratr_bitmap &= 0xffff0f00;
- }
-
- if (disable_cck_rate)
- ratr_bitmap &= 0xfffffff0;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_RA_MASK,
- "wireless_mode= (( 0x%x )), rf_type = (( 0x%x )), BW = (( 0x%x )), MimoPs_en = (( %d )), tx_rate_level= (( 0x%x ))\n",
- wireless_mode, phydm_rf_type, phydm_BW, mimo_ps_enable,
- tx_rate_level);
-
- *ratr_bitmap_lsb_in = ratr_bitmap;
- *ratr_bitmap_msb_in = ratr_bitmap_msb;
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "Phydm modified RA Mask = (( 0x %x | %x ))\n",
- *ratr_bitmap_msb_in, *ratr_bitmap_lsb_in);
-}
-
-u8 phydm_RA_level_decision(void *dm_void, u32 rssi, u8 ratr_state)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 ra_rate_floor_table[RA_FLOOR_TABLE_SIZE] = {
- 20, 34, 38, 42,
- 46, 50, 100}; /*MCS0 ~ MCS4 , VHT1SS MCS0 ~ MCS4 , G 6M~24M*/
- u8 new_ratr_state = 0;
- u8 i;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_RA_MASK,
- "curr RA level = ((%d)), Rate_floor_table ori [ %d , %d, %d , %d, %d, %d]\n",
- ratr_state, ra_rate_floor_table[0], ra_rate_floor_table[1],
- ra_rate_floor_table[2], ra_rate_floor_table[3],
- ra_rate_floor_table[4], ra_rate_floor_table[5]);
-
- for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
- if (i >= (ratr_state))
- ra_rate_floor_table[i] += RA_FLOOR_UP_GAP;
- }
-
- ODM_RT_TRACE(
- dm, ODM_COMP_RA_MASK,
- "RSSI = ((%d)), Rate_floor_table_mod [ %d , %d, %d , %d, %d, %d]\n",
- rssi, ra_rate_floor_table[0], ra_rate_floor_table[1],
- ra_rate_floor_table[2], ra_rate_floor_table[3],
- ra_rate_floor_table[4], ra_rate_floor_table[5]);
-
- for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
- if (rssi < ra_rate_floor_table[i]) {
- new_ratr_state = i;
- break;
- }
- }
-
- return new_ratr_state;
-}
-
-void odm_refresh_rate_adaptive_mask_mp(void *dm_void) {}
-
-void odm_refresh_rate_adaptive_mask_ce(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ra_table *ra_tab = &dm->dm_ra_table;
- void *adapter = dm->adapter;
- u32 i;
- struct rtl_sta_info *entry;
- u8 ratr_state_new;
-
- if (!dm->is_use_ra_mask) {
- ODM_RT_TRACE(
- dm, ODM_COMP_RA_MASK,
- "<---- %s(): driver does not control rate adaptive mask\n",
- __func__);
- return;
- }
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- entry = dm->odm_sta_info[i];
-
- if (!IS_STA_VALID(entry))
- continue;
-
- if (is_multicast_ether_addr(entry->mac_addr))
- continue;
- else if (is_broadcast_ether_addr(entry->mac_addr))
- continue;
-
- ratr_state_new = phydm_RA_level_decision(
- dm, entry->rssi_stat.undecorated_smoothed_pwdb,
- entry->rssi_level);
-
- if ((entry->rssi_level != ratr_state_new) ||
- (ra_tab->force_update_ra_mask_count >=
- FORCED_UPDATE_RAMASK_PERIOD)) {
- ra_tab->force_update_ra_mask_count = 0;
- ODM_RT_TRACE(
- dm, ODM_COMP_RA_MASK,
- "Update Tx RA Level: ((%x)) -> ((%x)), RSSI = ((%d))\n",
- entry->rssi_level, ratr_state_new,
- entry->rssi_stat.undecorated_smoothed_pwdb);
-
- entry->rssi_level = ratr_state_new;
- rtl_hal_update_ra_mask(adapter, entry,
- entry->rssi_level);
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "Stay in RA level = (( %d ))\n\n",
- ratr_state_new);
- /**/
- }
- }
-}
-
-void odm_refresh_rate_adaptive_mask_apadsl(void *dm_void) {}
-
-void odm_refresh_basic_rate_mask(void *dm_void) {}
-
-u8 phydm_rate_order_compute(void *dm_void, u8 rate_idx)
-{
- u8 rate_order = 0;
-
- if (rate_idx >= ODM_RATEVHTSS4MCS0) {
- rate_idx -= ODM_RATEVHTSS4MCS0;
- /**/
- } else if (rate_idx >= ODM_RATEVHTSS3MCS0) {
- rate_idx -= ODM_RATEVHTSS3MCS0;
- /**/
- } else if (rate_idx >= ODM_RATEVHTSS2MCS0) {
- rate_idx -= ODM_RATEVHTSS2MCS0;
- /**/
- } else if (rate_idx >= ODM_RATEVHTSS1MCS0) {
- rate_idx -= ODM_RATEVHTSS1MCS0;
- /**/
- } else if (rate_idx >= ODM_RATEMCS24) {
- rate_idx -= ODM_RATEMCS24;
- /**/
- } else if (rate_idx >= ODM_RATEMCS16) {
- rate_idx -= ODM_RATEMCS16;
- /**/
- } else if (rate_idx >= ODM_RATEMCS8) {
- rate_idx -= ODM_RATEMCS8;
- /**/
- }
- rate_order = rate_idx;
-
- return rate_order;
-}
-
-static void phydm_ra_common_info_update(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ra_table *ra_tab = &dm->dm_ra_table;
- u16 macid;
- u8 rate_order_tmp;
- u8 cnt = 0;
-
- ra_tab->highest_client_tx_order = 0;
- ra_tab->power_tracking_flag = 1;
-
- if (dm->number_linked_client != 0) {
- for (macid = 0; macid < ODM_ASSOCIATE_ENTRY_NUM; macid++) {
- rate_order_tmp = phydm_rate_order_compute(
- dm, ((ra_tab->link_tx_rate[macid]) & 0x7f));
-
- if (rate_order_tmp >=
- (ra_tab->highest_client_tx_order)) {
- ra_tab->highest_client_tx_order =
- rate_order_tmp;
- ra_tab->highest_client_tx_rate_order = macid;
- }
-
- cnt++;
-
- if (cnt == dm->number_linked_client)
- break;
- }
- ODM_RT_TRACE(
- dm, ODM_COMP_RATE_ADAPTIVE,
- "MACID[%d], Highest Tx order Update for power tracking: %d\n",
- (ra_tab->highest_client_tx_rate_order),
- (ra_tab->highest_client_tx_order));
- }
-}
-
-void phydm_ra_info_watchdog(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- phydm_ra_common_info_update(dm);
- phydm_ra_dynamic_retry_limit(dm);
- phydm_ra_dynamic_retry_count(dm);
- odm_refresh_rate_adaptive_mask(dm);
- odm_refresh_basic_rate_mask(dm);
-}
-
-void phydm_ra_info_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct ra_table *ra_tab = &dm->dm_ra_table;
-
- ra_tab->highest_client_tx_rate_order = 0;
- ra_tab->highest_client_tx_order = 0;
- ra_tab->RA_threshold_offset = 0;
- ra_tab->RA_offset_direction = 0;
-}
-
-u8 odm_find_rts_rate(void *dm_void, u8 tx_rate, bool is_erp_protect)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- u8 rts_ini_rate = ODM_RATE6M;
-
- if (is_erp_protect) { /* use CCK rate as RTS*/
- rts_ini_rate = ODM_RATE1M;
- } else {
- switch (tx_rate) {
- case ODM_RATEVHTSS3MCS9:
- case ODM_RATEVHTSS3MCS8:
- case ODM_RATEVHTSS3MCS7:
- case ODM_RATEVHTSS3MCS6:
- case ODM_RATEVHTSS3MCS5:
- case ODM_RATEVHTSS3MCS4:
- case ODM_RATEVHTSS3MCS3:
- case ODM_RATEVHTSS2MCS9:
- case ODM_RATEVHTSS2MCS8:
- case ODM_RATEVHTSS2MCS7:
- case ODM_RATEVHTSS2MCS6:
- case ODM_RATEVHTSS2MCS5:
- case ODM_RATEVHTSS2MCS4:
- case ODM_RATEVHTSS2MCS3:
- case ODM_RATEVHTSS1MCS9:
- case ODM_RATEVHTSS1MCS8:
- case ODM_RATEVHTSS1MCS7:
- case ODM_RATEVHTSS1MCS6:
- case ODM_RATEVHTSS1MCS5:
- case ODM_RATEVHTSS1MCS4:
- case ODM_RATEVHTSS1MCS3:
- case ODM_RATEMCS15:
- case ODM_RATEMCS14:
- case ODM_RATEMCS13:
- case ODM_RATEMCS12:
- case ODM_RATEMCS11:
- case ODM_RATEMCS7:
- case ODM_RATEMCS6:
- case ODM_RATEMCS5:
- case ODM_RATEMCS4:
- case ODM_RATEMCS3:
- case ODM_RATE54M:
- case ODM_RATE48M:
- case ODM_RATE36M:
- case ODM_RATE24M:
- rts_ini_rate = ODM_RATE24M;
- break;
- case ODM_RATEVHTSS3MCS2:
- case ODM_RATEVHTSS3MCS1:
- case ODM_RATEVHTSS2MCS2:
- case ODM_RATEVHTSS2MCS1:
- case ODM_RATEVHTSS1MCS2:
- case ODM_RATEVHTSS1MCS1:
- case ODM_RATEMCS10:
- case ODM_RATEMCS9:
- case ODM_RATEMCS2:
- case ODM_RATEMCS1:
- case ODM_RATE18M:
- case ODM_RATE12M:
- rts_ini_rate = ODM_RATE12M;
- break;
- case ODM_RATEVHTSS3MCS0:
- case ODM_RATEVHTSS2MCS0:
- case ODM_RATEVHTSS1MCS0:
- case ODM_RATEMCS8:
- case ODM_RATEMCS0:
- case ODM_RATE9M:
- case ODM_RATE6M:
- rts_ini_rate = ODM_RATE6M;
- break;
- case ODM_RATE11M:
- case ODM_RATE5_5M:
- case ODM_RATE2M:
- case ODM_RATE1M:
- rts_ini_rate = ODM_RATE1M;
- break;
- default:
- rts_ini_rate = ODM_RATE6M;
- break;
- }
- }
-
- if (*dm->band_type == 1) {
- if (rts_ini_rate < ODM_RATE6M)
- rts_ini_rate = ODM_RATE6M;
- }
- return rts_ini_rate;
-}
-
-static void odm_set_ra_dm_arfb_by_noisy(struct phy_dm_struct *dm) {}
-
-void odm_update_noisy_state(void *dm_void, bool is_noisy_state_from_c2h)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- /* JJ ADD 20161014 */
- if (dm->support_ic_type == ODM_RTL8821 ||
- dm->support_ic_type == ODM_RTL8812 ||
- dm->support_ic_type == ODM_RTL8723B ||
- dm->support_ic_type == ODM_RTL8192E ||
- dm->support_ic_type == ODM_RTL8188E ||
- dm->support_ic_type == ODM_RTL8723D ||
- dm->support_ic_type == ODM_RTL8710B)
- dm->is_noisy_state = is_noisy_state_from_c2h;
- odm_set_ra_dm_arfb_by_noisy(dm);
-};
-
-void phydm_update_pwr_track(void *dm_void, u8 rate)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK, "Pwr Track Get rate=0x%x\n",
- rate);
-
- dm->tx_rate = rate;
-}
-
-/* RA_MASK_PHYDMLIZE, will delete it later*/
-
-bool odm_ra_state_check(void *dm_void, s32 rssi, bool is_force_update,
- u8 *ra_tr_state)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct odm_rate_adaptive *ra = &dm->rate_adaptive;
- const u8 go_up_gap = 5;
- u8 high_rssi_thresh_for_ra = ra->high_rssi_thresh;
- u8 low_rssi_thresh_for_ra = ra->low_rssi_thresh;
- u8 ratr_state;
-
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "RSSI= (( %d )), Current_RSSI_level = (( %d ))\n", rssi,
- *ra_tr_state);
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "[Ori RA RSSI Thresh] High= (( %d )), Low = (( %d ))\n",
- high_rssi_thresh_for_ra, low_rssi_thresh_for_ra);
- /* threshold Adjustment:
- * when RSSI state trends to go up one or two levels, make sure RSSI is
- * high enough. Here go_up_gap is added to solve the boundary's level
- * alternation issue.
- */
-
- switch (*ra_tr_state) {
- case DM_RATR_STA_INIT:
- case DM_RATR_STA_HIGH:
- break;
-
- case DM_RATR_STA_MIDDLE:
- high_rssi_thresh_for_ra += go_up_gap;
- break;
-
- case DM_RATR_STA_LOW:
- high_rssi_thresh_for_ra += go_up_gap;
- low_rssi_thresh_for_ra += go_up_gap;
- break;
-
- default:
- WARN_ONCE(true, "wrong rssi level setting %d !", *ra_tr_state);
- break;
- }
-
- /* Decide ratr_state by RSSI.*/
- if (rssi > high_rssi_thresh_for_ra)
- ratr_state = DM_RATR_STA_HIGH;
- else if (rssi > low_rssi_thresh_for_ra)
- ratr_state = DM_RATR_STA_MIDDLE;
-
- else
- ratr_state = DM_RATR_STA_LOW;
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "[Mod RA RSSI Thresh] High= (( %d )), Low = (( %d ))\n",
- high_rssi_thresh_for_ra, low_rssi_thresh_for_ra);
-
- if (*ra_tr_state != ratr_state || is_force_update) {
- ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "[RSSI Level Update] %d->%d\n", *ra_tr_state,
- ratr_state);
- *ra_tr_state = ratr_state;
- return true;
- }
-
- return false;
-}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_rainfo.h b/drivers/staging/rtlwifi/phydm/phydm_rainfo.h
deleted file mode 100644
index 6c1f30e758bc..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_rainfo.h
+++ /dev/null
@@ -1,258 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __PHYDMRAINFO_H__
-#define __PHYDMRAINFO_H__
-
-/*#define RAINFO_VERSION "2.0"*/ /*2014.11.04*/
-/*#define RAINFO_VERSION "3.0"*/ /*2015.01.13 Dino*/
-/*#define RAINFO_VERSION "3.1"*/ /*2015.01.14 Dino*/
-/*#define RAINFO_VERSION "3.3"*/ /*2015.07.29 YuChen*/
-/*#define RAINFO_VERSION "3.4"*/ /*2015.12.15 Stanley*/
-/*#define RAINFO_VERSION "4.0"*/ /*2016.03.24 Dino, Add more RA mask
- *state and Phydm-lize partial ra mask
- *function
- */
-/*#define RAINFO_VERSION "4.1"*/ /*2016.04.20 Dino, Add new function to
- *adjust PCR RA threshold
- */
-/*#define RAINFO_VERSION "4.2"*/ /*2016.05.17 Dino, Add H2C debug cmd */
-#define RAINFO_VERSION "4.3" /*2016.07.11 Dino, Fix RA hang in CCK 1M problem*/
-
-#define FORCED_UPDATE_RAMASK_PERIOD 5
-
-#define H2C_0X42_LENGTH 5
-#define H2C_MAX_LENGTH 7
-
-#define RA_FLOOR_UP_GAP 3
-#define RA_FLOOR_TABLE_SIZE 7
-
-#define ACTIVE_TP_THRESHOLD 150
-#define RA_RETRY_DESCEND_NUM 2
-#define RA_RETRY_LIMIT_LOW 4
-#define RA_RETRY_LIMIT_HIGH 32
-
-#define RAINFO_BE_RX_STATE BIT(0) /* 1:RX */ /* ULDL */
-#define RAINFO_STBC_STATE BIT(1)
-/* #define RAINFO_LDPC_STATE BIT2 */
-#define RAINFO_NOISY_STATE BIT(2) /* set by Noisy_Detection */
-#define RAINFO_SHURTCUT_STATE BIT(3)
-#define RAINFO_SHURTCUT_FLAG BIT(4)
-#define RAINFO_INIT_RSSI_RATE_STATE BIT(5)
-#define RAINFO_BF_STATE BIT(6)
-#define RAINFO_BE_TX_STATE BIT(7) /* 1:TX */
-
-#define RA_MASK_CCK 0xf
-#define RA_MASK_OFDM 0xff0
-#define RA_MASK_HT1SS 0xff000
-#define RA_MASK_HT2SS 0xff00000
-/*#define RA_MASK_MCS3SS */
-#define RA_MASK_HT4SS 0xff0
-#define RA_MASK_VHT1SS 0x3ff000
-#define RA_MASK_VHT2SS 0xffc00000
-
-#define RA_FIRST_MACID 0
-
-#define ap_init_rate_adaptive_state odm_rate_adaptive_state_ap_init
-
-#define DM_RATR_STA_INIT 0
-#define DM_RATR_STA_HIGH 1
-#define DM_RATR_STA_MIDDLE 2
-#define DM_RATR_STA_LOW 3
-#define DM_RATR_STA_ULTRA_LOW 4
-
-enum phydm_ra_arfr_num {
- ARFR_0_RATE_ID = 0x9,
- ARFR_1_RATE_ID = 0xa,
- ARFR_2_RATE_ID = 0xb,
- ARFR_3_RATE_ID = 0xc,
- ARFR_4_RATE_ID = 0xd,
- ARFR_5_RATE_ID = 0xe
-};
-
-enum phydm_ra_dbg_para {
- RADBG_PCR_TH_OFFSET = 0,
- RADBG_RTY_PENALTY = 1,
- RADBG_N_HIGH = 2,
- RADBG_N_LOW = 3,
- RADBG_TRATE_UP_TABLE = 4,
- RADBG_TRATE_DOWN_TABLE = 5,
- RADBG_TRYING_NECESSARY = 6,
- RADBG_TDROPING_NECESSARY = 7,
- RADBG_RATE_UP_RTY_RATIO = 8,
- RADBG_RATE_DOWN_RTY_RATIO = 9, /* u8 */
-
- RADBG_DEBUG_MONITOR1 = 0xc,
- RADBG_DEBUG_MONITOR2 = 0xd,
- RADBG_DEBUG_MONITOR3 = 0xe,
- RADBG_DEBUG_MONITOR4 = 0xf,
- RADBG_DEBUG_MONITOR5 = 0x10,
- NUM_RA_PARA
-};
-
-enum phydm_wireless_mode {
- PHYDM_WIRELESS_MODE_UNKNOWN = 0x00,
- PHYDM_WIRELESS_MODE_A = 0x01,
- PHYDM_WIRELESS_MODE_B = 0x02,
- PHYDM_WIRELESS_MODE_G = 0x04,
- PHYDM_WIRELESS_MODE_AUTO = 0x08,
- PHYDM_WIRELESS_MODE_N_24G = 0x10,
- PHYDM_WIRELESS_MODE_N_5G = 0x20,
- PHYDM_WIRELESS_MODE_AC_5G = 0x40,
- PHYDM_WIRELESS_MODE_AC_24G = 0x80,
- PHYDM_WIRELESS_MODE_AC_ONLY = 0x100,
- PHYDM_WIRELESS_MODE_MAX = 0x800,
- PHYDM_WIRELESS_MODE_ALL = 0xFFFF
-};
-
-enum phydm_rateid_idx {
- PHYDM_BGN_40M_2SS = 0,
- PHYDM_BGN_40M_1SS = 1,
- PHYDM_BGN_20M_2SS = 2,
- PHYDM_BGN_20M_1SS = 3,
- PHYDM_GN_N2SS = 4,
- PHYDM_GN_N1SS = 5,
- PHYDM_BG = 6,
- PHYDM_G = 7,
- PHYDM_B_20M = 8,
- PHYDM_ARFR0_AC_2SS = 9,
- PHYDM_ARFR1_AC_1SS = 10,
- PHYDM_ARFR2_AC_2G_1SS = 11,
- PHYDM_ARFR3_AC_2G_2SS = 12,
- PHYDM_ARFR4_AC_3SS = 13,
- PHYDM_ARFR5_N_3SS = 14
-};
-
-enum phydm_rf_type_def {
- PHYDM_RF_1T1R = 0,
- PHYDM_RF_1T2R,
- PHYDM_RF_2T2R,
- PHYDM_RF_2T2R_GREEN,
- PHYDM_RF_2T3R,
- PHYDM_RF_2T4R,
- PHYDM_RF_3T3R,
- PHYDM_RF_3T4R,
- PHYDM_RF_4T4R,
- PHYDM_RF_MAX_TYPE
-};
-
-enum phydm_bw {
- PHYDM_BW_20 = 0,
- PHYDM_BW_40,
- PHYDM_BW_80,
- PHYDM_BW_80_80,
- PHYDM_BW_160,
- PHYDM_BW_10,
- PHYDM_BW_5
-};
-
-struct ra_table {
- u8 firstconnect;
-
- u8 link_tx_rate[ODM_ASSOCIATE_ENTRY_NUM];
- u8 highest_client_tx_order;
- u16 highest_client_tx_rate_order;
- u8 power_tracking_flag;
- u8 RA_threshold_offset;
- u8 RA_offset_direction;
- u8 force_update_ra_mask_count;
-};
-
-struct odm_rate_adaptive {
- /* dm_type_by_fw/dm_type_by_driver */
- u8 type;
- /* if RSSI > high_rssi_thresh => ratr_state is DM_RATR_STA_HIGH */
- u8 high_rssi_thresh;
- /* if RSSI <= low_rssi_thresh => ratr_state is DM_RATR_STA_LOW */
- u8 low_rssi_thresh;
- /* Cur RSSI level, DM_RATR_STA_HIGH/DM_RATR_STA_MIDDLE/DM_RATR_STA_LOW*/
- u8 ratr_state;
-
- /* if RSSI > ldpc_thres => switch from LPDC to BCC */
- u8 ldpc_thres;
- bool is_lower_rts_rate;
-
- bool is_use_ldpc;
-};
-
-void phydm_h2C_debug(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len);
-
-void phydm_RA_debug_PCR(void *dm_void, u32 *const dm_value, u32 *_used,
- char *output, u32 *_out_len);
-
-void odm_c2h_ra_para_report_handler(void *dm_void, u8 *cmd_buf, u8 cmd_len);
-
-void odm_ra_para_adjust(void *dm_void);
-
-void phydm_ra_dynamic_retry_count(void *dm_void);
-
-void phydm_ra_dynamic_retry_limit(void *dm_void);
-
-void phydm_ra_dynamic_rate_id_on_assoc(void *dm_void, u8 wireless_mode,
- u8 init_rate_id);
-
-void phydm_print_rate(void *dm_void, u8 rate, u32 dbg_component);
-
-void phydm_c2h_ra_report_handler(void *dm_void, u8 *cmd_buf, u8 cmd_len);
-
-u8 phydm_rate_order_compute(void *dm_void, u8 rate_idx);
-
-void phydm_ra_info_watchdog(void *dm_void);
-
-void phydm_ra_info_init(void *dm_void);
-
-void odm_rssi_monitor_init(void *dm_void);
-
-void phydm_modify_RA_PCR_threshold(void *dm_void, u8 RA_offset_direction,
- u8 RA_threshold_offset);
-
-void odm_rssi_monitor_check(void *dm_void);
-
-void phydm_init_ra_info(void *dm_void);
-
-u8 phydm_vht_en_mapping(void *dm_void, u32 wireless_mode);
-
-u8 phydm_rate_id_mapping(void *dm_void, u32 wireless_mode, u8 rf_type, u8 bw);
-
-void phydm_update_hal_ra_mask(void *dm_void, u32 wireless_mode, u8 rf_type,
- u8 BW, u8 mimo_ps_enable, u8 disable_cck_rate,
- u32 *ratr_bitmap_msb_in, u32 *ratr_bitmap_in,
- u8 tx_rate_level);
-
-void odm_rate_adaptive_mask_init(void *dm_void);
-
-void odm_refresh_rate_adaptive_mask(void *dm_void);
-
-void odm_refresh_rate_adaptive_mask_mp(void *dm_void);
-
-void odm_refresh_rate_adaptive_mask_ce(void *dm_void);
-
-void odm_refresh_rate_adaptive_mask_apadsl(void *dm_void);
-
-u8 phydm_RA_level_decision(void *dm_void, u32 rssi, u8 ratr_state);
-
-bool odm_ra_state_check(void *dm_void, s32 RSSI, bool is_force_update,
- u8 *ra_tr_state);
-
-void odm_refresh_basic_rate_mask(void *dm_void);
-void odm_ra_post_action_on_assoc(void *dm);
-
-u8 odm_find_rts_rate(void *dm_void, u8 tx_rate, bool is_erp_protect);
-
-void odm_update_noisy_state(void *dm_void, bool is_noisy_state_from_c2h);
-
-void phydm_update_pwr_track(void *dm_void, u8 rate);
-
-#endif /*#ifndef __ODMRAINFO_H__*/
diff --git a/drivers/staging/rtlwifi/phydm/phydm_reg.h b/drivers/staging/rtlwifi/phydm/phydm_reg.h
deleted file mode 100644
index 562c1199d669..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_reg.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-/* ************************************************************
- * File Name: odm_reg.h
- *
- * Description:
- *
- * This file is for general register definition.
- *
- *
- * *************************************************************/
-#ifndef __HAL_ODM_REG_H__
-#define __HAL_ODM_REG_H__
-
-/*
- * Register Definition
- */
-
-/* MAC REG */
-#define ODM_BB_RESET 0x002
-#define ODM_DUMMY 0x4fe
-#define RF_T_METER_OLD 0x24
-#define RF_T_METER_NEW 0x42
-
-#define ODM_EDCA_VO_PARAM 0x500
-#define ODM_EDCA_VI_PARAM 0x504
-#define ODM_EDCA_BE_PARAM 0x508
-#define ODM_EDCA_BK_PARAM 0x50C
-#define ODM_TXPAUSE 0x522
-
-/* LTE_COEX */
-#define REG_LTECOEX_CTRL 0x07C0
-#define REG_LTECOEX_WRITE_DATA 0x07C4
-#define REG_LTECOEX_READ_DATA 0x07C8
-#define REG_LTECOEX_PATH_CONTROL 0x70
-
-/* BB REG */
-#define ODM_FPGA_PHY0_PAGE8 0x800
-#define ODM_PSD_SETTING 0x808
-#define ODM_AFE_SETTING 0x818
-#define ODM_TXAGC_B_6_18 0x830
-#define ODM_TXAGC_B_24_54 0x834
-#define ODM_TXAGC_B_MCS32_5 0x838
-#define ODM_TXAGC_B_MCS0_MCS3 0x83c
-#define ODM_TXAGC_B_MCS4_MCS7 0x848
-#define ODM_TXAGC_B_MCS8_MCS11 0x84c
-#define ODM_ANALOG_REGISTER 0x85c
-#define ODM_RF_INTERFACE_OUTPUT 0x860
-#define ODM_TXAGC_B_MCS12_MCS15 0x868
-#define ODM_TXAGC_B_11_A_2_11 0x86c
-#define ODM_AD_DA_LSB_MASK 0x874
-#define ODM_ENABLE_3_WIRE 0x88c
-#define ODM_PSD_REPORT 0x8b4
-#define ODM_R_ANT_SELECT 0x90c
-#define ODM_CCK_ANT_SELECT 0xa07
-#define ODM_CCK_PD_THRESH 0xa0a
-#define ODM_CCK_RF_REG1 0xa11
-#define ODM_CCK_MATCH_FILTER 0xa20
-#define ODM_CCK_RAKE_MAC 0xa2e
-#define ODM_CCK_CNT_RESET 0xa2d
-#define ODM_CCK_TX_DIVERSITY 0xa2f
-#define ODM_CCK_FA_CNT_MSB 0xa5b
-#define ODM_CCK_FA_CNT_LSB 0xa5c
-#define ODM_CCK_NEW_FUNCTION 0xa75
-#define ODM_OFDM_PHY0_PAGE_C 0xc00
-#define ODM_OFDM_RX_ANT 0xc04
-#define ODM_R_A_RXIQI 0xc14
-#define ODM_R_A_AGC_CORE1 0xc50
-#define ODM_R_A_AGC_CORE2 0xc54
-#define ODM_R_B_AGC_CORE1 0xc58
-#define ODM_R_AGC_PAR 0xc70
-#define ODM_R_HTSTF_AGC_PAR 0xc7c
-#define ODM_TX_PWR_TRAINING_A 0xc90
-#define ODM_TX_PWR_TRAINING_B 0xc98
-#define ODM_OFDM_FA_CNT1 0xcf0
-#define ODM_OFDM_PHY0_PAGE_D 0xd00
-#define ODM_OFDM_FA_CNT2 0xda0
-#define ODM_OFDM_FA_CNT3 0xda4
-#define ODM_OFDM_FA_CNT4 0xda8
-#define ODM_TXAGC_A_6_18 0xe00
-#define ODM_TXAGC_A_24_54 0xe04
-#define ODM_TXAGC_A_1_MCS32 0xe08
-#define ODM_TXAGC_A_MCS0_MCS3 0xe10
-#define ODM_TXAGC_A_MCS4_MCS7 0xe14
-#define ODM_TXAGC_A_MCS8_MCS11 0xe18
-#define ODM_TXAGC_A_MCS12_MCS15 0xe1c
-
-/* RF REG */
-#define ODM_GAIN_SETTING 0x00
-#define ODM_CHANNEL 0x18
-#define ODM_RF_T_METER 0x24
-#define ODM_RF_T_METER_92D 0x42
-#define ODM_RF_T_METER_88E 0x42
-#define ODM_RF_T_METER_92E 0x42
-#define ODM_RF_T_METER_8812 0x42
-#define REG_RF_TX_GAIN_OFFSET 0x55
-
-/* ant Detect Reg */
-#define ODM_DPDT 0x300
-
-/* PSD Init */
-#define ODM_PSDREG 0x808
-
-/* 92D path Div */
-#define PATHDIV_REG 0xB30
-#define PATHDIV_TRI 0xBA0
-
-/*
- * Bitmap Definition
- */
-
-#define BIT_FA_RESET BIT(0)
-
-#define REG_OFDM_0_XA_TX_IQ_IMBALANCE 0xC80
-#define REG_OFDM_0_ECCA_THRESHOLD 0xC4C
-#define REG_FPGA0_XB_LSSI_READ_BACK 0x8A4
-#define REG_FPGA0_TX_GAIN_STAGE 0x80C
-#define REG_OFDM_0_XA_AGC_CORE1 0xC50
-#define REG_OFDM_0_XB_AGC_CORE1 0xC58
-#define REG_A_TX_SCALE_JAGUAR 0xC1C
-#define REG_B_TX_SCALE_JAGUAR 0xE1C
-
-#define REG_AFE_XTAL_CTRL 0x0024
-#define REG_AFE_PLL_CTRL 0x0028
-#define REG_MAC_PHY_CTRL 0x002C
-
-#define RF_CHNLBW 0x18
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h b/drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h
deleted file mode 100644
index 5b59dffc72a5..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __ODM_REGDEFINE11AC_H__
-#define __ODM_REGDEFINE11AC_H__
-
-/* 2 RF REG LIST */
-
-/* 2 BB REG LIST */
-/* PAGE 8 */
-#define ODM_REG_CCK_RPT_FORMAT_11AC 0x804
-#define ODM_REG_BB_RX_PATH_11AC 0x808
-#define ODM_REG_BB_TX_PATH_11AC 0x80c
-#define ODM_REG_BB_ATC_11AC 0x860
-#define ODM_REG_EDCCA_POWER_CAL 0x8dc
-#define ODM_REG_DBG_RPT_11AC 0x8fc
-/* PAGE 9 */
-#define ODM_REG_EDCCA_DOWN_OPT 0x900
-#define ODM_REG_ACBB_EDCCA_ENHANCE 0x944
-#define odm_adc_trigger_jaguar2 0x95C /*ADC sample mode*/
-#define ODM_REG_OFDM_FA_RST_11AC 0x9A4
-#define ODM_REG_CCX_PERIOD_11AC 0x990
-#define ODM_REG_NHM_TH9_TH10_11AC 0x994
-#define ODM_REG_CLM_11AC 0x994
-#define ODM_REG_NHM_TH3_TO_TH0_11AC 0x998
-#define ODM_REG_NHM_TH7_TO_TH4_11AC 0x99c
-#define ODM_REG_NHM_TH8_11AC 0x9a0
-#define ODM_REG_NHM_9E8_11AC 0x9e8
-#define ODM_REG_CSI_CONTENT_VALUE 0x9b4
-/* PAGE A */
-#define ODM_REG_CCK_CCA_11AC 0xA0A
-#define ODM_REG_CCK_FA_RST_11AC 0xA2C
-#define ODM_REG_CCK_FA_11AC 0xA5C
-/* PAGE B */
-#define ODM_REG_RST_RPT_11AC 0xB58
-/* PAGE C */
-#define ODM_REG_TRMUX_11AC 0xC08
-#define ODM_REG_IGI_A_11AC 0xC50
-/* PAGE E */
-#define ODM_REG_IGI_B_11AC 0xE50
-#define ODM_REG_TRMUX_11AC_B 0xE08
-/* PAGE F */
-#define ODM_REG_CCK_CRC32_CNT_11AC 0xF04
-#define ODM_REG_CCK_CCA_CNT_11AC 0xF08
-#define ODM_REG_VHT_CRC32_CNT_11AC 0xF0c
-#define ODM_REG_HT_CRC32_CNT_11AC 0xF10
-#define ODM_REG_OFDM_CRC32_CNT_11AC 0xF14
-#define ODM_REG_OFDM_FA_11AC 0xF48
-#define ODM_REG_RPT_11AC 0xfa0
-#define ODM_REG_CLM_RESULT_11AC 0xfa4
-#define ODM_REG_NHM_CNT_11AC 0xfa8
-#define ODM_REG_NHM_DUR_READY_11AC 0xfb4
-
-#define ODM_REG_NHM_CNT7_TO_CNT4_11AC 0xfac
-#define ODM_REG_NHM_CNT11_TO_CNT8_11AC 0xfb0
-#define ODM_REG_OFDM_FA_TYPE2_11AC 0xFD0
-/* PAGE 18 */
-#define ODM_REG_IGI_C_11AC 0x1850
-/* PAGE 1A */
-#define ODM_REG_IGI_D_11AC 0x1A50
-
-/* 2 MAC REG LIST */
-#define ODM_REG_RESP_TX_11AC 0x6D8
-
-/* DIG Related */
-#define ODM_BIT_IGI_11AC 0xFFFFFFFF
-#define ODM_BIT_CCK_RPT_FORMAT_11AC BIT(16)
-#define ODM_BIT_BB_RX_PATH_11AC 0xF
-#define ODM_BIT_BB_TX_PATH_11AC 0xF
-#define ODM_BIT_BB_ATC_11AC BIT(14)
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h b/drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h
deleted file mode 100644
index 765e0a0c8c7b..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __ODM_REGDEFINE11N_H__
-#define __ODM_REGDEFINE11N_H__
-
-/* 2 RF REG LIST */
-#define ODM_REG_RF_MODE_11N 0x00
-#define ODM_REG_RF_0B_11N 0x0B
-#define ODM_REG_CHNBW_11N 0x18
-#define ODM_REG_T_METER_11N 0x24
-#define ODM_REG_RF_25_11N 0x25
-#define ODM_REG_RF_26_11N 0x26
-#define ODM_REG_RF_27_11N 0x27
-#define ODM_REG_RF_2B_11N 0x2B
-#define ODM_REG_RF_2C_11N 0x2C
-#define ODM_REG_RXRF_A3_11N 0x3C
-#define ODM_REG_T_METER_92D_11N 0x42
-#define ODM_REG_T_METER_88E_11N 0x42
-
-/* 2 BB REG LIST */
-/* PAGE 8 */
-#define ODM_REG_BB_CTRL_11N 0x800
-#define ODM_REG_RF_PIN_11N 0x804
-#define ODM_REG_PSD_CTRL_11N 0x808
-#define ODM_REG_TX_ANT_CTRL_11N 0x80C
-#define ODM_REG_BB_PWR_SAV5_11N 0x818
-#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
-#define ODM_REG_CCK_RPT_FORMAT_11N_B 0x82C
-#define ODM_REG_RX_DEFAULT_A_11N 0x858
-#define ODM_REG_RX_DEFAULT_B_11N 0x85A
-#define ODM_REG_BB_PWR_SAV3_11N 0x85C
-#define ODM_REG_ANTSEL_CTRL_11N 0x860
-#define ODM_REG_RX_ANT_CTRL_11N 0x864
-#define ODM_REG_PIN_CTRL_11N 0x870
-#define ODM_REG_BB_PWR_SAV1_11N 0x874
-#define ODM_REG_ANTSEL_PATH_11N 0x878
-#define ODM_REG_BB_3WIRE_11N 0x88C
-#define ODM_REG_SC_CNT_11N 0x8C4
-#define ODM_REG_PSD_DATA_11N 0x8B4
-#define ODM_REG_CCX_PERIOD_11N 0x894
-#define ODM_REG_NHM_TH9_TH10_11N 0x890
-#define ODM_REG_CLM_11N 0x890
-#define ODM_REG_NHM_TH3_TO_TH0_11N 0x898
-#define ODM_REG_NHM_TH7_TO_TH4_11N 0x89c
-#define ODM_REG_NHM_TH8_11N 0xe28
-#define ODM_REG_CLM_READY_11N 0x8b4
-#define ODM_REG_CLM_RESULT_11N 0x8d0
-#define ODM_REG_NHM_CNT_11N 0x8d8
-
-/* For struct acs_info, Jeffery, 2014-12-26 */
-#define ODM_REG_NHM_CNT7_TO_CNT4_11N 0x8dc
-#define ODM_REG_NHM_CNT9_TO_CNT8_11N 0x8d0
-#define ODM_REG_NHM_CNT10_TO_CNT11_11N 0x8d4
-
-/* PAGE 9 */
-#define ODM_REG_BB_CTRL_PAGE9_11N 0x900
-#define ODM_REG_DBG_RPT_11N 0x908
-#define ODM_REG_BB_TX_PATH_11N 0x90c
-#define ODM_REG_ANT_MAPPING1_11N 0x914
-#define ODM_REG_ANT_MAPPING2_11N 0x918
-#define ODM_REG_EDCCA_DOWN_OPT_11N 0x948
-#define ODM_REG_RX_DFIR_MOD_97F 0x948
-
-/* PAGE A */
-#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
-#define ODM_REG_CCK_ANT_SEL_11N 0xA04
-#define ODM_REG_CCK_CCA_11N 0xA0A
-#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
-#define ODM_REG_CCK_ANTDIV_PARA3_11N 0xA10
-#define ODM_REG_CCK_ANTDIV_PARA4_11N 0xA14
-#define ODM_REG_CCK_FILTER_PARA1_11N 0xA22
-#define ODM_REG_CCK_FILTER_PARA2_11N 0xA23
-#define ODM_REG_CCK_FILTER_PARA3_11N 0xA24
-#define ODM_REG_CCK_FILTER_PARA4_11N 0xA25
-#define ODM_REG_CCK_FILTER_PARA5_11N 0xA26
-#define ODM_REG_CCK_FILTER_PARA6_11N 0xA27
-#define ODM_REG_CCK_FILTER_PARA7_11N 0xA28
-#define ODM_REG_CCK_FILTER_PARA8_11N 0xA29
-#define ODM_REG_CCK_FA_RST_11N 0xA2C
-#define ODM_REG_CCK_FA_MSB_11N 0xA58
-#define ODM_REG_CCK_FA_LSB_11N 0xA5C
-#define ODM_REG_CCK_CCA_CNT_11N 0xA60
-#define ODM_REG_BB_PWR_SAV4_11N 0xA74
-/* PAGE B */
-#define ODM_REG_LNA_SWITCH_11N 0xB2C
-#define ODM_REG_PATH_SWITCH_11N 0xB30
-#define ODM_REG_RSSI_CTRL_11N 0xB38
-#define ODM_REG_CONFIG_ANTA_11N 0xB68
-#define ODM_REG_RSSI_BT_11N 0xB9C
-#define ODM_REG_RXCK_RFMOD 0xBB0
-#define ODM_REG_EDCCA_DCNF_97F 0xBC0
-
-/* PAGE C */
-#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
-#define ODM_REG_BB_RX_PATH_11N 0xC04
-#define ODM_REG_TRMUX_11N 0xC08
-#define ODM_REG_OFDM_FA_RSTC_11N 0xC0C
-#define ODM_REG_DOWNSAM_FACTOR_11N 0xC10
-#define ODM_REG_RXIQI_MATRIX_11N 0xC14
-#define ODM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
-#define ODM_REG_IGI_A_11N 0xC50
-#define ODM_REG_ANTDIV_PARA2_11N 0xC54
-#define ODM_REG_IGI_B_11N 0xC58
-#define ODM_REG_ANTDIV_PARA3_11N 0xC5C
-#define ODM_REG_L1SBD_PD_CH_11N 0XC6C
-#define ODM_REG_BB_PWR_SAV2_11N 0xC70
-#define ODM_REG_BB_AGC_SET_2_11N 0xc74
-#define ODM_REG_RX_OFF_11N 0xC7C
-#define ODM_REG_TXIQK_MATRIXA_11N 0xC80
-#define ODM_REG_TXIQK_MATRIXB_11N 0xC88
-#define ODM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
-#define ODM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
-#define ODM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
-#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
-#define ODM_REG_SMALL_BANDWIDTH_11N 0xCE4
-#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
-/* PAGE D */
-#define ODM_REG_OFDM_FA_RSTD_11N 0xD00
-#define ODM_REG_BB_RX_ANT_11N 0xD04
-#define ODM_REG_BB_ATC_11N 0xD2C
-#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
-#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
-#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
-#define ODM_REG_RPT_11N 0xDF4
-/* PAGE E */
-#define ODM_REG_TXAGC_A_6_18_11N 0xE00
-#define ODM_REG_TXAGC_A_24_54_11N 0xE04
-#define ODM_REG_TXAGC_A_1_MCS32_11N 0xE08
-#define ODM_REG_TXAGC_A_MCS0_3_11N 0xE10
-#define ODM_REG_TXAGC_A_MCS4_7_11N 0xE14
-#define ODM_REG_TXAGC_A_MCS8_11_11N 0xE18
-#define ODM_REG_TXAGC_A_MCS12_15_11N 0xE1C
-#define ODM_REG_EDCCA_DCNF_11N 0xE24
-#define ODM_REG_TAP_UPD_97F 0xE24
-#define ODM_REG_FPGA0_IQK_11N 0xE28
-#define ODM_REG_PAGE_B1_97F 0xE28
-#define ODM_REG_TXIQK_TONE_A_11N 0xE30
-#define ODM_REG_RXIQK_TONE_A_11N 0xE34
-#define ODM_REG_TXIQK_PI_A_11N 0xE38
-#define ODM_REG_RXIQK_PI_A_11N 0xE3C
-#define ODM_REG_TXIQK_11N 0xE40
-#define ODM_REG_RXIQK_11N 0xE44
-#define ODM_REG_IQK_AGC_PTS_11N 0xE48
-#define ODM_REG_IQK_AGC_RSP_11N 0xE4C
-#define ODM_REG_BLUETOOTH_11N 0xE6C
-#define ODM_REG_RX_WAIT_CCA_11N 0xE70
-#define ODM_REG_TX_CCK_RFON_11N 0xE74
-#define ODM_REG_TX_CCK_BBON_11N 0xE78
-#define ODM_REG_OFDM_RFON_11N 0xE7C
-#define ODM_REG_OFDM_BBON_11N 0xE80
-#define ODM_REG_TX2RX_11N 0xE84
-#define ODM_REG_TX2TX_11N 0xE88
-#define ODM_REG_RX_CCK_11N 0xE8C
-#define ODM_REG_RX_OFDM_11N 0xED0
-#define ODM_REG_RX_WAIT_RIFS_11N 0xED4
-#define ODM_REG_RX2RX_11N 0xED8
-#define ODM_REG_STANDBY_11N 0xEDC
-#define ODM_REG_SLEEP_11N 0xEE0
-#define ODM_REG_PMPD_ANAEN_11N 0xEEC
-/* PAGE F */
-#define ODM_REG_PAGE_F_RST_11N 0xF14
-#define ODM_REG_IGI_C_11N 0xF84
-#define ODM_REG_IGI_D_11N 0xF88
-#define ODM_REG_CCK_CRC32_ERROR_CNT_11N 0xF84
-#define ODM_REG_CCK_CRC32_OK_CNT_11N 0xF88
-#define ODM_REG_HT_CRC32_CNT_11N 0xF90
-#define ODM_REG_OFDM_CRC32_CNT_11N 0xF94
-
-/* 2 MAC REG LIST */
-#define ODM_REG_BB_RST_11N 0x02
-#define ODM_REG_ANTSEL_PIN_11N 0x4C
-#define ODM_REG_EARLY_MODE_11N 0x4D0
-#define ODM_REG_RSSI_MONITOR_11N 0x4FE
-#define ODM_REG_EDCA_VO_11N 0x500
-#define ODM_REG_EDCA_VI_11N 0x504
-#define ODM_REG_EDCA_BE_11N 0x508
-#define ODM_REG_EDCA_BK_11N 0x50C
-#define ODM_REG_TXPAUSE_11N 0x522
-#define ODM_REG_RESP_TX_11N 0x6D8
-#define ODM_REG_ANT_TRAIN_PARA1_11N 0x7b0
-#define ODM_REG_ANT_TRAIN_PARA2_11N 0x7b4
-
-/* DIG Related */
-#define ODM_BIT_IGI_11N 0x0000007F
-#define ODM_BIT_CCK_RPT_FORMAT_11N BIT(9)
-#define ODM_BIT_BB_RX_PATH_11N 0xF
-#define ODM_BIT_BB_TX_PATH_11N 0xF
-#define ODM_BIT_BB_ATC_11N BIT(11)
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/phydm_types.h b/drivers/staging/rtlwifi/phydm/phydm_types.h
deleted file mode 100644
index 082bb03a99d4..000000000000
--- a/drivers/staging/rtlwifi/phydm/phydm_types.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __ODM_TYPES_H__
-#define __ODM_TYPES_H__
-
-/*Define Different SW team support*/
-#define ODM_AP 0x01 /*BIT0*/
-#define ODM_CE 0x04 /*BIT2*/
-#define ODM_WIN 0x08 /*BIT3*/
-#define ODM_ADSL 0x10 /*BIT4*/
-#define ODM_IOT 0x20 /*BIT5*/
-
-/*Deifne HW endian support*/
-#define ODM_ENDIAN_BIG 0
-#define ODM_ENDIAN_LITTLE 1
-
-#define GET_PDM_ODM(__padapter) \
- ((struct phy_dm_struct *)(&(GET_HAL_DATA(__padapter))->odmpriv))
-
-enum hal_status {
- HAL_STATUS_SUCCESS,
- HAL_STATUS_FAILURE,
-};
-
-/*
- * Declare for ODM spin lock definition temporarily fro compile pass.
- */
-enum rt_spinlock_type {
- RT_TX_SPINLOCK = 1,
- RT_RX_SPINLOCK = 2,
- RT_RM_SPINLOCK = 3,
- RT_CAM_SPINLOCK = 4,
- RT_SCAN_SPINLOCK = 5,
- RT_LOG_SPINLOCK = 7,
- RT_BW_SPINLOCK = 8,
- RT_CHNLOP_SPINLOCK = 9,
- RT_RF_OPERATE_SPINLOCK = 10,
- RT_INITIAL_SPINLOCK = 11,
- RT_RF_STATE_SPINLOCK =
- 12, /* For RF state. Added by Bruce, 2007-10-30. */
- /* Shall we define Ndis 6.2 SpinLock Here ? */
- RT_PORT_SPINLOCK = 16,
- RT_VNIC_SPINLOCK = 17,
- RT_HVL_SPINLOCK = 18,
- RT_H2C_SPINLOCK = 20, /* For H2C cmd. Added by tynli. 2009.11.09. */
-
- rt_bt_data_spinlock = 25,
-
- RT_WAPI_OPTION_SPINLOCK = 26,
- RT_WAPI_RX_SPINLOCK = 27,
-
- /* add for 92D CCK control issue */
- RT_CCK_PAGEA_SPINLOCK = 28,
- RT_BUFFER_SPINLOCK = 29,
- RT_CHANNEL_AND_BANDWIDTH_SPINLOCK = 30,
- RT_GEN_TEMP_BUF_SPINLOCK = 31,
- RT_AWB_SPINLOCK = 32,
- RT_FW_PS_SPINLOCK = 33,
- RT_HW_TIMER_SPIN_LOCK = 34,
- RT_MPT_WI_SPINLOCK = 35,
- RT_P2P_SPIN_LOCK = 36, /* Protect P2P context */
- RT_DBG_SPIN_LOCK = 37,
- RT_IQK_SPINLOCK = 38,
- RT_PENDED_OID_SPINLOCK = 39,
- RT_CHNLLIST_SPINLOCK = 40,
- RT_INDIC_SPINLOCK = 41, /* protect indication */
- RT_RFD_SPINLOCK = 42,
- RT_SYNC_IO_CNT_SPINLOCK = 43,
- RT_LAST_SPINLOCK,
-};
-
-#include <asm/byteorder.h>
-
-#if defined(__LITTLE_ENDIAN)
-#define ODM_ENDIAN_TYPE ODM_ENDIAN_LITTLE
-#elif defined(__BIG_ENDIAN)
-#define ODM_ENDIAN_TYPE ODM_ENDIAN_BIG
-#else
-#error
-#endif
-
-#define COND_ELSE 2
-#define COND_ENDIF 3
-
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK7BITS 0x7f
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASK20BITS 0xfffff
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-#define RFREGOFFSETMASK 0xfffff
-#define MASKH3BYTES 0xffffff00
-#define MASKL3BYTES 0x00ffffff
-#define MASKBYTE2HIGHNIBBLE 0x00f00000
-#define MASKBYTE3LOWNIBBLE 0x0f000000
-#define MASKL3BYTES 0x00ffffff
-#define RFREGOFFSETMASK 0xfffff
-
-#include "phydm_features.h"
-
-#endif /* __ODM_TYPES_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
deleted file mode 100644
index 52a113d731d9..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
+++ /dev/null
@@ -1,1956 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/*Image2HeaderVersion: 3.2*/
-#include "../mp_precomp.h"
-#include "../phydm_precomp.h"
-#include <linux/kernel.h>
-
-static bool check_positive(struct phy_dm_struct *dm, const u32 condition1,
- const u32 condition2, const u32 condition3,
- const u32 condition4)
-{
- u8 _board_type = ((dm->board_type & BIT(4)) >> 4) << 0 | /* _GLNA*/
- ((dm->board_type & BIT(3)) >> 3) << 1 | /* _GPA*/
- ((dm->board_type & BIT(7)) >> 7) << 2 | /* _ALNA*/
- ((dm->board_type & BIT(6)) >> 6) << 3 | /* _APA */
- ((dm->board_type & BIT(2)) >> 2) << 4; /* _BT*/
-
- u32 cond1 = condition1, cond2 = condition2, cond3 = condition3,
- cond4 = condition4;
-
- u8 cut_version_for_para =
- (dm->cut_version == ODM_CUT_A) ? 14 : dm->cut_version;
- u8 pkg_type_for_para = (dm->package_type == 0) ? 14 : dm->package_type;
-
- u32 driver1 = cut_version_for_para << 24 |
- (dm->support_interface & 0xF0) << 16 |
- dm->support_platform << 16 | pkg_type_for_para << 12 |
- (dm->support_interface & 0x0F) << 8 | _board_type;
-
- u32 driver2 = (dm->type_glna & 0xFF) << 0 | (dm->type_gpa & 0xFF) << 8 |
- (dm->type_alna & 0xFF) << 16 |
- (dm->type_apa & 0xFF) << 24;
-
- u32 driver3 = 0;
-
- u32 driver4 = (dm->type_glna & 0xFF00) >> 8 | (dm->type_gpa & 0xFF00) |
- (dm->type_alna & 0xFF00) << 8 |
- (dm->type_apa & 0xFF00) << 16;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "===> %s (cond1, cond2, cond3, cond4) = (0x%X 0x%X 0x%X 0x%X)\n",
- __func__, cond1, cond2, cond3, cond4);
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "===> %s (driver1, driver2, driver3, driver4) = (0x%X 0x%X 0x%X 0x%X)\n",
- __func__, driver1, driver2, driver3, driver4);
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- " (Platform, Interface) = (0x%X, 0x%X)\n",
- dm->support_platform, dm->support_interface);
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- " (Board, Package) = (0x%X, 0x%X)\n",
- dm->board_type, dm->package_type);
-
- /*============== value Defined Check ===============*/
- /*QFN type [15:12] and cut version [27:24] need to do value check*/
-
- if (((cond1 & 0x0000F000) != 0) &&
- ((cond1 & 0x0000F000) != (driver1 & 0x0000F000)))
- return false;
- if (((cond1 & 0x0F000000) != 0) &&
- ((cond1 & 0x0F000000) != (driver1 & 0x0F000000)))
- return false;
-
- /*=============== Bit Defined Check ================*/
- /* We don't care [31:28] */
-
- cond1 &= 0x00FF0FFF;
- driver1 &= 0x00FF0FFF;
-
- if ((cond1 & driver1) == cond1) {
- u32 bit_mask = 0;
-
- if ((cond1 & 0x0F) == 0) /* board_type is DONTCARE*/
- return true;
-
- if ((cond1 & BIT(0)) != 0) /*GLNA*/
- bit_mask |= 0x000000FF;
- if ((cond1 & BIT(1)) != 0) /*GPA*/
- bit_mask |= 0x0000FF00;
- if ((cond1 & BIT(2)) != 0) /*ALNA*/
- bit_mask |= 0x00FF0000;
- if ((cond1 & BIT(3)) != 0) /*APA*/
- bit_mask |= 0xFF000000;
-
- if (((cond2 & bit_mask) == (driver2 & bit_mask)) &&
- ((cond4 & bit_mask) ==
- (driver4 &
- bit_mask))) /* board_type of each RF path is matched*/
- return true;
- else
- return false;
- } else {
- return false;
- }
-}
-
-/******************************************************************************
- * agc_tab.TXT
- ******************************************************************************/
-
-static u32 array_mp_8822b_agc_tab[] = {
- 0x8000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x81C, 0xFF000003,
- 0x81C, 0xF5000003, 0x81C, 0xF4020003, 0x81C, 0xF3040003,
- 0x81C, 0xF2060003, 0x81C, 0xF1080003, 0x81C, 0xF00A0003,
- 0x81C, 0xEF0C0003, 0x81C, 0xEE0E0003, 0x81C, 0xED100003,
- 0x81C, 0xEC120003, 0x81C, 0xEB140003, 0x81C, 0xEA160003,
- 0x81C, 0xE9180003, 0x81C, 0xE81A0003, 0x81C, 0xE71C0003,
- 0x81C, 0xE61E0003, 0x81C, 0xE5200003, 0x81C, 0xE4220003,
- 0x81C, 0xE3240003, 0x81C, 0xE2260003, 0x81C, 0xE1280003,
- 0x81C, 0xE02A0003, 0x81C, 0xC32C0003, 0x81C, 0xC22E0003,
- 0x81C, 0xC1300003, 0x81C, 0xC0320003, 0x81C, 0xA4340003,
- 0x81C, 0xA3360003, 0x81C, 0xA2380003, 0x81C, 0xA13A0003,
- 0x81C, 0xA03C0003, 0x81C, 0x823E0003, 0x81C, 0x81400003,
- 0x81C, 0x80420003, 0x81C, 0x64440003, 0x81C, 0x63460003,
- 0x81C, 0x62480003, 0x81C, 0x614A0003, 0x81C, 0x604C0003,
- 0x81C, 0x454E0003, 0x81C, 0x44500003, 0x81C, 0x43520003,
- 0x81C, 0x42540003, 0x81C, 0x41560003, 0x81C, 0x40580003,
- 0x81C, 0x055A0003, 0x81C, 0x045C0003, 0x81C, 0x035E0003,
- 0x81C, 0x02600003, 0x81C, 0x01620003, 0x81C, 0x00640003,
- 0x81C, 0x00660003, 0x81C, 0x00680003, 0x81C, 0x006A0003,
- 0x81C, 0x006C0003, 0x81C, 0x006E0003, 0x81C, 0x00700003,
- 0x81C, 0x00720003, 0x81C, 0x00740003, 0x81C, 0x00760003,
- 0x81C, 0x00780003, 0x81C, 0x007A0003, 0x81C, 0x007C0003,
- 0x81C, 0x007E0003, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x81C, 0xFF000003, 0x81C, 0xF5000003, 0x81C, 0xF4020003,
- 0x81C, 0xF3040003, 0x81C, 0xF2060003, 0x81C, 0xF1080003,
- 0x81C, 0xF00A0003, 0x81C, 0xEF0C0003, 0x81C, 0xEE0E0003,
- 0x81C, 0xED100003, 0x81C, 0xEC120003, 0x81C, 0xEB140003,
- 0x81C, 0xEA160003, 0x81C, 0xE9180003, 0x81C, 0xE81A0003,
- 0x81C, 0xE71C0003, 0x81C, 0xE61E0003, 0x81C, 0xE5200003,
- 0x81C, 0xE4220003, 0x81C, 0xE3240003, 0x81C, 0xE2260003,
- 0x81C, 0xE1280003, 0x81C, 0xE02A0003, 0x81C, 0xC32C0003,
- 0x81C, 0xC22E0003, 0x81C, 0xC1300003, 0x81C, 0xC0320003,
- 0x81C, 0xA4340003, 0x81C, 0xA3360003, 0x81C, 0xA2380003,
- 0x81C, 0xA13A0003, 0x81C, 0xA03C0003, 0x81C, 0x823E0003,
- 0x81C, 0x81400003, 0x81C, 0x80420003, 0x81C, 0x64440003,
- 0x81C, 0x63460003, 0x81C, 0x62480003, 0x81C, 0x614A0003,
- 0x81C, 0x604C0003, 0x81C, 0x454E0003, 0x81C, 0x44500003,
- 0x81C, 0x43520003, 0x81C, 0x42540003, 0x81C, 0x41560003,
- 0x81C, 0x40580003, 0x81C, 0x055A0003, 0x81C, 0x045C0003,
- 0x81C, 0x035E0003, 0x81C, 0x02600003, 0x81C, 0x01620003,
- 0x81C, 0x00640003, 0x81C, 0x00660003, 0x81C, 0x00680003,
- 0x81C, 0x006A0003, 0x81C, 0x006C0003, 0x81C, 0x006E0003,
- 0x81C, 0x00700003, 0x81C, 0x00720003, 0x81C, 0x00740003,
- 0x81C, 0x00760003, 0x81C, 0x00780003, 0x81C, 0x007A0003,
- 0x81C, 0x007C0003, 0x81C, 0x007E0003, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFF000003, 0x81C, 0xF5000003,
- 0x81C, 0xF4020003, 0x81C, 0xF3040003, 0x81C, 0xF2060003,
- 0x81C, 0xF1080003, 0x81C, 0xF00A0003, 0x81C, 0xEF0C0003,
- 0x81C, 0xEE0E0003, 0x81C, 0xED100003, 0x81C, 0xEC120003,
- 0x81C, 0xEB140003, 0x81C, 0xEA160003, 0x81C, 0xE9180003,
- 0x81C, 0xE81A0003, 0x81C, 0xE71C0003, 0x81C, 0xE61E0003,
- 0x81C, 0xE5200003, 0x81C, 0xE4220003, 0x81C, 0xE3240003,
- 0x81C, 0xE2260003, 0x81C, 0xE1280003, 0x81C, 0xE02A0003,
- 0x81C, 0xC32C0003, 0x81C, 0xC22E0003, 0x81C, 0xC1300003,
- 0x81C, 0xC0320003, 0x81C, 0xA4340003, 0x81C, 0xA3360003,
- 0x81C, 0xA2380003, 0x81C, 0xA13A0003, 0x81C, 0xA03C0003,
- 0x81C, 0x823E0003, 0x81C, 0x81400003, 0x81C, 0x80420003,
- 0x81C, 0x64440003, 0x81C, 0x63460003, 0x81C, 0x62480003,
- 0x81C, 0x614A0003, 0x81C, 0x604C0003, 0x81C, 0x454E0003,
- 0x81C, 0x44500003, 0x81C, 0x43520003, 0x81C, 0x42540003,
- 0x81C, 0x41560003, 0x81C, 0x40580003, 0x81C, 0x055A0003,
- 0x81C, 0x045C0003, 0x81C, 0x035E0003, 0x81C, 0x02600003,
- 0x81C, 0x01620003, 0x81C, 0x00640003, 0x81C, 0x00660003,
- 0x81C, 0x00680003, 0x81C, 0x006A0003, 0x81C, 0x006C0003,
- 0x81C, 0x006E0003, 0x81C, 0x00700003, 0x81C, 0x00720003,
- 0x81C, 0x00740003, 0x81C, 0x00760003, 0x81C, 0x00780003,
- 0x81C, 0x007A0003, 0x81C, 0x007C0003, 0x81C, 0x007E0003,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFF000003,
- 0x81C, 0xF5000003, 0x81C, 0xF4020003, 0x81C, 0xF3040003,
- 0x81C, 0xF2060003, 0x81C, 0xF1080003, 0x81C, 0xF00A0003,
- 0x81C, 0xEF0C0003, 0x81C, 0xEE0E0003, 0x81C, 0xED100003,
- 0x81C, 0xEC120003, 0x81C, 0xEB140003, 0x81C, 0xEA160003,
- 0x81C, 0xE9180003, 0x81C, 0xE81A0003, 0x81C, 0xE71C0003,
- 0x81C, 0xE61E0003, 0x81C, 0xE5200003, 0x81C, 0xE4220003,
- 0x81C, 0xE3240003, 0x81C, 0xE2260003, 0x81C, 0xE1280003,
- 0x81C, 0xE02A0003, 0x81C, 0xC32C0003, 0x81C, 0xC22E0003,
- 0x81C, 0xC1300003, 0x81C, 0xC0320003, 0x81C, 0xA4340003,
- 0x81C, 0xA3360003, 0x81C, 0xA2380003, 0x81C, 0xA13A0003,
- 0x81C, 0xA03C0003, 0x81C, 0x823E0003, 0x81C, 0x81400003,
- 0x81C, 0x80420003, 0x81C, 0x64440003, 0x81C, 0x63460003,
- 0x81C, 0x62480003, 0x81C, 0x614A0003, 0x81C, 0x604C0003,
- 0x81C, 0x454E0003, 0x81C, 0x44500003, 0x81C, 0x43520003,
- 0x81C, 0x42540003, 0x81C, 0x41560003, 0x81C, 0x40580003,
- 0x81C, 0x055A0003, 0x81C, 0x045C0003, 0x81C, 0x035E0003,
- 0x81C, 0x02600003, 0x81C, 0x01620003, 0x81C, 0x00640003,
- 0x81C, 0x00660003, 0x81C, 0x00680003, 0x81C, 0x006A0003,
- 0x81C, 0x006C0003, 0x81C, 0x006E0003, 0x81C, 0x00700003,
- 0x81C, 0x00720003, 0x81C, 0x00740003, 0x81C, 0x00760003,
- 0x81C, 0x00780003, 0x81C, 0x007A0003, 0x81C, 0x007C0003,
- 0x81C, 0x007E0003, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x81C, 0xFF000003, 0x81C, 0xFD000003, 0x81C, 0xFC020003,
- 0x81C, 0xFB040003, 0x81C, 0xFA060003, 0x81C, 0xF9080003,
- 0x81C, 0xF80A0003, 0x81C, 0xF70C0003, 0x81C, 0xF60E0003,
- 0x81C, 0xF5100003, 0x81C, 0xF4120003, 0x81C, 0xF3140003,
- 0x81C, 0xF2160003, 0x81C, 0xF1180003, 0x81C, 0xF01A0003,
- 0x81C, 0xEF1C0003, 0x81C, 0xEE1E0003, 0x81C, 0xED200003,
- 0x81C, 0xEC220003, 0x81C, 0xEB240003, 0x81C, 0xEA260003,
- 0x81C, 0xE9280003, 0x81C, 0xE82A0003, 0x81C, 0xE72C0003,
- 0x81C, 0xE62E0003, 0x81C, 0xE5300003, 0x81C, 0xC8320003,
- 0x81C, 0xC7340003, 0x81C, 0xC6360003, 0x81C, 0xC5380003,
- 0x81C, 0xC43A0003, 0x81C, 0xC33C0003, 0x81C, 0xC23E0003,
- 0x81C, 0xC1400003, 0x81C, 0xC0420003, 0x81C, 0xA5440003,
- 0x81C, 0xA4460003, 0x81C, 0xA3480003, 0x81C, 0xA24A0003,
- 0x81C, 0xA14C0003, 0x81C, 0x834E0003, 0x81C, 0x82500003,
- 0x81C, 0x81520003, 0x81C, 0x80540003, 0x81C, 0x65560003,
- 0x81C, 0x64580003, 0x81C, 0x635A0003, 0x81C, 0x625C0003,
- 0x81C, 0x435E0003, 0x81C, 0x42600003, 0x81C, 0x41620003,
- 0x81C, 0x40640003, 0x81C, 0x06660003, 0x81C, 0x05680003,
- 0x81C, 0x046A0003, 0x81C, 0x036C0003, 0x81C, 0x026E0003,
- 0x81C, 0x01700003, 0x81C, 0x00720003, 0x81C, 0x00740003,
- 0x81C, 0x00760003, 0x81C, 0x00780003, 0x81C, 0x007A0003,
- 0x81C, 0x007C0003, 0x81C, 0x007E0003, 0x90012100, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFF000003, 0x81C, 0xFE000003,
- 0x81C, 0xFD020003, 0x81C, 0xFC040003, 0x81C, 0xFB060003,
- 0x81C, 0xFA080003, 0x81C, 0xF90A0003, 0x81C, 0xF80C0003,
- 0x81C, 0xF70E0003, 0x81C, 0xF6100003, 0x81C, 0xF5120003,
- 0x81C, 0xF4140003, 0x81C, 0xF3160003, 0x81C, 0xF2180003,
- 0x81C, 0xF11A0003, 0x81C, 0xF01C0003, 0x81C, 0xEF1E0003,
- 0x81C, 0xEE200003, 0x81C, 0xED220003, 0x81C, 0xEC240003,
- 0x81C, 0xEB260003, 0x81C, 0xEA280003, 0x81C, 0xE92A0003,
- 0x81C, 0xE82C0003, 0x81C, 0xE72E0003, 0x81C, 0xE6300003,
- 0x81C, 0xE5320003, 0x81C, 0xC8340003, 0x81C, 0xC7360003,
- 0x81C, 0xC6380003, 0x81C, 0xC53A0003, 0x81C, 0xC43C0003,
- 0x81C, 0xC33E0003, 0x81C, 0xC2400003, 0x81C, 0xC1420003,
- 0x81C, 0xC0440003, 0x81C, 0xA3460003, 0x81C, 0xA2480003,
- 0x81C, 0xA14A0003, 0x81C, 0xA04C0003, 0x81C, 0x824E0003,
- 0x81C, 0x81500003, 0x81C, 0x80520003, 0x81C, 0x64540003,
- 0x81C, 0x63560003, 0x81C, 0x62580003, 0x81C, 0x445A0003,
- 0x81C, 0x435C0003, 0x81C, 0x425E0003, 0x81C, 0x41600003,
- 0x81C, 0x40620003, 0x81C, 0x05640003, 0x81C, 0x04660003,
- 0x81C, 0x03680003, 0x81C, 0x026A0003, 0x81C, 0x016C0003,
- 0x81C, 0x006E0003, 0x81C, 0x00700003, 0x81C, 0x00720003,
- 0x81C, 0x00740003, 0x81C, 0x00760003, 0x81C, 0x00780003,
- 0x81C, 0x007A0003, 0x81C, 0x007C0003, 0x81C, 0x007E0003,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFF000003,
- 0x81C, 0xF5000003, 0x81C, 0xF4020003, 0x81C, 0xF3040003,
- 0x81C, 0xF2060003, 0x81C, 0xF1080003, 0x81C, 0xF00A0003,
- 0x81C, 0xEF0C0003, 0x81C, 0xEE0E0003, 0x81C, 0xED100003,
- 0x81C, 0xEC120003, 0x81C, 0xEB140003, 0x81C, 0xEA160003,
- 0x81C, 0xE9180003, 0x81C, 0xE81A0003, 0x81C, 0xE71C0003,
- 0x81C, 0xE61E0003, 0x81C, 0xE5200003, 0x81C, 0xE4220003,
- 0x81C, 0xE3240003, 0x81C, 0xE2260003, 0x81C, 0xE1280003,
- 0x81C, 0xE02A0003, 0x81C, 0xC32C0003, 0x81C, 0xC22E0003,
- 0x81C, 0xC1300003, 0x81C, 0xC0320003, 0x81C, 0xA4340003,
- 0x81C, 0xA3360003, 0x81C, 0xA2380003, 0x81C, 0xA13A0003,
- 0x81C, 0xA03C0003, 0x81C, 0x823E0003, 0x81C, 0x81400003,
- 0x81C, 0x80420003, 0x81C, 0x64440003, 0x81C, 0x63460003,
- 0x81C, 0x62480003, 0x81C, 0x614A0003, 0x81C, 0x604C0003,
- 0x81C, 0x454E0003, 0x81C, 0x44500003, 0x81C, 0x43520003,
- 0x81C, 0x42540003, 0x81C, 0x41560003, 0x81C, 0x40580003,
- 0x81C, 0x055A0003, 0x81C, 0x045C0003, 0x81C, 0x035E0003,
- 0x81C, 0x02600003, 0x81C, 0x01620003, 0x81C, 0x00640003,
- 0x81C, 0x00660003, 0x81C, 0x00680003, 0x81C, 0x006A0003,
- 0x81C, 0x006C0003, 0x81C, 0x006E0003, 0x81C, 0x00700003,
- 0x81C, 0x00720003, 0x81C, 0x00740003, 0x81C, 0x00760003,
- 0x81C, 0x00780003, 0x81C, 0x007A0003, 0x81C, 0x007C0003,
- 0x81C, 0x007E0003, 0x90011000, 0x00000000, 0x40000000, 0x00000000,
- 0x81C, 0xFF000003, 0x81C, 0xFE000003, 0x81C, 0xFD020003,
- 0x81C, 0xFC040003, 0x81C, 0xFB060003, 0x81C, 0xFA080003,
- 0x81C, 0xF90A0003, 0x81C, 0xF80C0003, 0x81C, 0xF70E0003,
- 0x81C, 0xF6100003, 0x81C, 0xF5120003, 0x81C, 0xF4140003,
- 0x81C, 0xF3160003, 0x81C, 0xF2180003, 0x81C, 0xF11A0003,
- 0x81C, 0xF01C0003, 0x81C, 0xEF1E0003, 0x81C, 0xEE200003,
- 0x81C, 0xED220003, 0x81C, 0xEC240003, 0x81C, 0xEB260003,
- 0x81C, 0xEA280003, 0x81C, 0xE92A0003, 0x81C, 0xE82C0003,
- 0x81C, 0xE72E0003, 0x81C, 0xE6300003, 0x81C, 0xE5320003,
- 0x81C, 0xC8340003, 0x81C, 0xC7360003, 0x81C, 0xC6380003,
- 0x81C, 0xC53A0003, 0x81C, 0xC43C0003, 0x81C, 0xC33E0003,
- 0x81C, 0xC2400003, 0x81C, 0xC1420003, 0x81C, 0xC0440003,
- 0x81C, 0xA3460003, 0x81C, 0xA2480003, 0x81C, 0xA14A0003,
- 0x81C, 0xA04C0003, 0x81C, 0x824E0003, 0x81C, 0x81500003,
- 0x81C, 0x80520003, 0x81C, 0x64540003, 0x81C, 0x63560003,
- 0x81C, 0x62580003, 0x81C, 0x445A0003, 0x81C, 0x435C0003,
- 0x81C, 0x425E0003, 0x81C, 0x41600003, 0x81C, 0x40620003,
- 0x81C, 0x05640003, 0x81C, 0x04660003, 0x81C, 0x03680003,
- 0x81C, 0x026A0003, 0x81C, 0x016C0003, 0x81C, 0x006E0003,
- 0x81C, 0x00700003, 0x81C, 0x00720003, 0x81C, 0x00740003,
- 0x81C, 0x00760003, 0x81C, 0x00780003, 0x81C, 0x007A0003,
- 0x81C, 0x007C0003, 0x81C, 0x007E0003, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFF000003, 0x81C, 0xFD000003,
- 0x81C, 0xFC020003, 0x81C, 0xFB040003, 0x81C, 0xFA060003,
- 0x81C, 0xF9080003, 0x81C, 0xF80A0003, 0x81C, 0xF70C0003,
- 0x81C, 0xF60E0003, 0x81C, 0xF5100003, 0x81C, 0xF4120003,
- 0x81C, 0xF3140003, 0x81C, 0xF2160003, 0x81C, 0xF1180003,
- 0x81C, 0xF01A0003, 0x81C, 0xEF1C0003, 0x81C, 0xEE1E0003,
- 0x81C, 0xED200003, 0x81C, 0xEC220003, 0x81C, 0xEB240003,
- 0x81C, 0xEA260003, 0x81C, 0xE9280003, 0x81C, 0xE82A0003,
- 0x81C, 0xE72C0003, 0x81C, 0xE62E0003, 0x81C, 0xE5300003,
- 0x81C, 0xC8320003, 0x81C, 0xC7340003, 0x81C, 0xC6360003,
- 0x81C, 0xC5380003, 0x81C, 0xC43A0003, 0x81C, 0xC33C0003,
- 0x81C, 0xC23E0003, 0x81C, 0xC1400003, 0x81C, 0xC0420003,
- 0x81C, 0xA5440003, 0x81C, 0xA4460003, 0x81C, 0xA3480003,
- 0x81C, 0xA24A0003, 0x81C, 0xA14C0003, 0x81C, 0x834E0003,
- 0x81C, 0x82500003, 0x81C, 0x81520003, 0x81C, 0x80540003,
- 0x81C, 0x65560003, 0x81C, 0x64580003, 0x81C, 0x635A0003,
- 0x81C, 0x625C0003, 0x81C, 0x435E0003, 0x81C, 0x42600003,
- 0x81C, 0x41620003, 0x81C, 0x40640003, 0x81C, 0x06660003,
- 0x81C, 0x05680003, 0x81C, 0x046A0003, 0x81C, 0x036C0003,
- 0x81C, 0x026E0003, 0x81C, 0x01700003, 0x81C, 0x00720003,
- 0x81C, 0x00740003, 0x81C, 0x00760003, 0x81C, 0x00780003,
- 0x81C, 0x007A0003, 0x81C, 0x007C0003, 0x81C, 0x007E0003,
- 0x90002000, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFF000003,
- 0x81C, 0xFD000003, 0x81C, 0xFC020003, 0x81C, 0xFB040003,
- 0x81C, 0xFA060003, 0x81C, 0xF9080003, 0x81C, 0xF80A0003,
- 0x81C, 0xF70C0003, 0x81C, 0xF60E0003, 0x81C, 0xF5100003,
- 0x81C, 0xF4120003, 0x81C, 0xF3140003, 0x81C, 0xF2160003,
- 0x81C, 0xF1180003, 0x81C, 0xF01A0003, 0x81C, 0xEF1C0003,
- 0x81C, 0xEE1E0003, 0x81C, 0xED200003, 0x81C, 0xEC220003,
- 0x81C, 0xEB240003, 0x81C, 0xEA260003, 0x81C, 0xE9280003,
- 0x81C, 0xE82A0003, 0x81C, 0xE72C0003, 0x81C, 0xE62E0003,
- 0x81C, 0xE5300003, 0x81C, 0xC8320003, 0x81C, 0xC7340003,
- 0x81C, 0xC6360003, 0x81C, 0xC5380003, 0x81C, 0xC43A0003,
- 0x81C, 0xC33C0003, 0x81C, 0xC23E0003, 0x81C, 0xC1400003,
- 0x81C, 0xC0420003, 0x81C, 0xA5440003, 0x81C, 0xA4460003,
- 0x81C, 0xA3480003, 0x81C, 0xA24A0003, 0x81C, 0xA14C0003,
- 0x81C, 0x834E0003, 0x81C, 0x82500003, 0x81C, 0x81520003,
- 0x81C, 0x80540003, 0x81C, 0x65560003, 0x81C, 0x64580003,
- 0x81C, 0x635A0003, 0x81C, 0x625C0003, 0x81C, 0x435E0003,
- 0x81C, 0x42600003, 0x81C, 0x41620003, 0x81C, 0x40640003,
- 0x81C, 0x06660003, 0x81C, 0x05680003, 0x81C, 0x046A0003,
- 0x81C, 0x036C0003, 0x81C, 0x026E0003, 0x81C, 0x01700003,
- 0x81C, 0x00720003, 0x81C, 0x00740003, 0x81C, 0x00760003,
- 0x81C, 0x00780003, 0x81C, 0x007A0003, 0x81C, 0x007C0003,
- 0x81C, 0x007E0003, 0xA0000000, 0x00000000, 0x81C, 0xFF000003,
- 0x81C, 0xFE000003, 0x81C, 0xFD020003, 0x81C, 0xFC040003,
- 0x81C, 0xFB060003, 0x81C, 0xFA080003, 0x81C, 0xF90A0003,
- 0x81C, 0xF80C0003, 0x81C, 0xF70E0003, 0x81C, 0xF6100003,
- 0x81C, 0xF5120003, 0x81C, 0xF4140003, 0x81C, 0xF3160003,
- 0x81C, 0xF2180003, 0x81C, 0xF11A0003, 0x81C, 0xF01C0003,
- 0x81C, 0xEF1E0003, 0x81C, 0xEE200003, 0x81C, 0xED220003,
- 0x81C, 0xEC240003, 0x81C, 0xEB260003, 0x81C, 0xEA280003,
- 0x81C, 0xE92A0003, 0x81C, 0xE82C0003, 0x81C, 0xE72E0003,
- 0x81C, 0xE6300003, 0x81C, 0xE5320003, 0x81C, 0xC8340003,
- 0x81C, 0xC7360003, 0x81C, 0xC6380003, 0x81C, 0xC53A0003,
- 0x81C, 0xC43C0003, 0x81C, 0xC33E0003, 0x81C, 0xC2400003,
- 0x81C, 0xC1420003, 0x81C, 0xC0440003, 0x81C, 0xA3460003,
- 0x81C, 0xA2480003, 0x81C, 0xA14A0003, 0x81C, 0xA04C0003,
- 0x81C, 0x824E0003, 0x81C, 0x81500003, 0x81C, 0x80520003,
- 0x81C, 0x64540003, 0x81C, 0x63560003, 0x81C, 0x62580003,
- 0x81C, 0x445A0003, 0x81C, 0x435C0003, 0x81C, 0x425E0003,
- 0x81C, 0x41600003, 0x81C, 0x40620003, 0x81C, 0x05640003,
- 0x81C, 0x04660003, 0x81C, 0x03680003, 0x81C, 0x026A0003,
- 0x81C, 0x016C0003, 0x81C, 0x006E0003, 0x81C, 0x00700003,
- 0x81C, 0x00720003, 0x81C, 0x00740003, 0x81C, 0x00760003,
- 0x81C, 0x00780003, 0x81C, 0x007A0003, 0x81C, 0x007C0003,
- 0x81C, 0x007E0003, 0xB0000000, 0x00000000, 0x8000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x81C, 0xF8000103, 0x81C, 0xF7020103,
- 0x81C, 0xF6040103, 0x81C, 0xF5060103, 0x81C, 0xF4080103,
- 0x81C, 0xF30A0103, 0x81C, 0xF20C0103, 0x81C, 0xF10E0103,
- 0x81C, 0xF0100103, 0x81C, 0xEF120103, 0x81C, 0xEE140103,
- 0x81C, 0xED160103, 0x81C, 0xEC180103, 0x81C, 0xEB1A0103,
- 0x81C, 0xEA1C0103, 0x81C, 0xE91E0103, 0x81C, 0xE8200103,
- 0x81C, 0xE7220103, 0x81C, 0xE6240103, 0x81C, 0xE5260103,
- 0x81C, 0xE4280103, 0x81C, 0xE32A0103, 0x81C, 0xE22C0103,
- 0x81C, 0xC32E0103, 0x81C, 0xC2300103, 0x81C, 0xC1320103,
- 0x81C, 0xA3340103, 0x81C, 0xA2360103, 0x81C, 0xA1380103,
- 0x81C, 0xA03A0103, 0x81C, 0x823C0103, 0x81C, 0x813E0103,
- 0x81C, 0x80400103, 0x81C, 0x64420103, 0x81C, 0x63440103,
- 0x81C, 0x62460103, 0x81C, 0x61480103, 0x81C, 0x434A0103,
- 0x81C, 0x424C0103, 0x81C, 0x414E0103, 0x81C, 0x40500103,
- 0x81C, 0x22520103, 0x81C, 0x21540103, 0x81C, 0x20560103,
- 0x81C, 0x04580103, 0x81C, 0x035A0103, 0x81C, 0x025C0103,
- 0x81C, 0x015E0103, 0x81C, 0x00600103, 0x81C, 0x00620103,
- 0x81C, 0x00640103, 0x81C, 0x00660103, 0x81C, 0x00680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x81C, 0xFA000103, 0x81C, 0xF9020103,
- 0x81C, 0xF8040103, 0x81C, 0xF7060103, 0x81C, 0xF6080103,
- 0x81C, 0xF50A0103, 0x81C, 0xF40C0103, 0x81C, 0xF30E0103,
- 0x81C, 0xF2100103, 0x81C, 0xF1120103, 0x81C, 0xF0140103,
- 0x81C, 0xEF160103, 0x81C, 0xEE180103, 0x81C, 0xED1A0103,
- 0x81C, 0xEC1C0103, 0x81C, 0xEB1E0103, 0x81C, 0xEA200103,
- 0x81C, 0xE9220103, 0x81C, 0xE8240103, 0x81C, 0xE7260103,
- 0x81C, 0xE6280103, 0x81C, 0xE52A0103, 0x81C, 0xC42C0103,
- 0x81C, 0xC32E0103, 0x81C, 0xC2300103, 0x81C, 0xC1320103,
- 0x81C, 0xA4340103, 0x81C, 0xA3360103, 0x81C, 0xA2380103,
- 0x81C, 0xA13A0103, 0x81C, 0x833C0103, 0x81C, 0x823E0103,
- 0x81C, 0x81400103, 0x81C, 0x63420103, 0x81C, 0x62440103,
- 0x81C, 0x61460103, 0x81C, 0x60480103, 0x81C, 0x424A0103,
- 0x81C, 0x414C0103, 0x81C, 0x404E0103, 0x81C, 0x22500103,
- 0x81C, 0x21520103, 0x81C, 0x20540103, 0x81C, 0x03560103,
- 0x81C, 0x02580103, 0x81C, 0x015A0103, 0x81C, 0x005C0103,
- 0x81C, 0x005E0103, 0x81C, 0x00600103, 0x81C, 0x00620103,
- 0x81C, 0x00640103, 0x81C, 0x00660103, 0x81C, 0x00680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF8000103, 0x81C, 0xF7020103,
- 0x81C, 0xF6040103, 0x81C, 0xF5060103, 0x81C, 0xF4080103,
- 0x81C, 0xF30A0103, 0x81C, 0xF20C0103, 0x81C, 0xF10E0103,
- 0x81C, 0xF0100103, 0x81C, 0xEF120103, 0x81C, 0xEE140103,
- 0x81C, 0xED160103, 0x81C, 0xEC180103, 0x81C, 0xEB1A0103,
- 0x81C, 0xEA1C0103, 0x81C, 0xE91E0103, 0x81C, 0xE8200103,
- 0x81C, 0xE7220103, 0x81C, 0xE6240103, 0x81C, 0xE5260103,
- 0x81C, 0xE4280103, 0x81C, 0xE32A0103, 0x81C, 0xC32C0103,
- 0x81C, 0xC22E0103, 0x81C, 0xC1300103, 0x81C, 0xC0320103,
- 0x81C, 0xA3340103, 0x81C, 0xA2360103, 0x81C, 0xA1380103,
- 0x81C, 0xA03A0103, 0x81C, 0x823C0103, 0x81C, 0x813E0103,
- 0x81C, 0x80400103, 0x81C, 0x63420103, 0x81C, 0x62440103,
- 0x81C, 0x61460103, 0x81C, 0x60480103, 0x81C, 0x424A0103,
- 0x81C, 0x414C0103, 0x81C, 0x404E0103, 0x81C, 0x06500103,
- 0x81C, 0x05520103, 0x81C, 0x04540103, 0x81C, 0x03560103,
- 0x81C, 0x02580103, 0x81C, 0x015A0103, 0x81C, 0x005C0103,
- 0x81C, 0x005E0103, 0x81C, 0x00600103, 0x81C, 0x00620103,
- 0x81C, 0x00640103, 0x81C, 0x00660103, 0x81C, 0x00680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF8000103, 0x81C, 0xF7020103,
- 0x81C, 0xF6040103, 0x81C, 0xF5060103, 0x81C, 0xF4080103,
- 0x81C, 0xF30A0103, 0x81C, 0xF20C0103, 0x81C, 0xF10E0103,
- 0x81C, 0xF0100103, 0x81C, 0xEF120103, 0x81C, 0xEE140103,
- 0x81C, 0xED160103, 0x81C, 0xEC180103, 0x81C, 0xEB1A0103,
- 0x81C, 0xEA1C0103, 0x81C, 0xE91E0103, 0x81C, 0xE8200103,
- 0x81C, 0xE7220103, 0x81C, 0xE6240103, 0x81C, 0xE5260103,
- 0x81C, 0xE4280103, 0x81C, 0xE32A0103, 0x81C, 0xC32C0103,
- 0x81C, 0xC22E0103, 0x81C, 0xC1300103, 0x81C, 0xC0320103,
- 0x81C, 0xA3340103, 0x81C, 0xA2360103, 0x81C, 0xA1380103,
- 0x81C, 0xA03A0103, 0x81C, 0x823C0103, 0x81C, 0x813E0103,
- 0x81C, 0x80400103, 0x81C, 0x63420103, 0x81C, 0x62440103,
- 0x81C, 0x61460103, 0x81C, 0x60480103, 0x81C, 0x424A0103,
- 0x81C, 0x414C0103, 0x81C, 0x404E0103, 0x81C, 0x22500103,
- 0x81C, 0x21520103, 0x81C, 0x20540103, 0x81C, 0x03560103,
- 0x81C, 0x02580103, 0x81C, 0x015A0103, 0x81C, 0x005C0103,
- 0x81C, 0x005E0103, 0x81C, 0x00600103, 0x81C, 0x00620103,
- 0x81C, 0x00640103, 0x81C, 0x00660103, 0x81C, 0x00680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF8000103, 0x81C, 0xF7020103,
- 0x81C, 0xF6040103, 0x81C, 0xF5060103, 0x81C, 0xF4080103,
- 0x81C, 0xF30A0103, 0x81C, 0xF20C0103, 0x81C, 0xF10E0103,
- 0x81C, 0xF0100103, 0x81C, 0xEF120103, 0x81C, 0xEE140103,
- 0x81C, 0xED160103, 0x81C, 0xEC180103, 0x81C, 0xEB1A0103,
- 0x81C, 0xEA1C0103, 0x81C, 0xE91E0103, 0x81C, 0xE8200103,
- 0x81C, 0xE7220103, 0x81C, 0xE6240103, 0x81C, 0xE5260103,
- 0x81C, 0xE4280103, 0x81C, 0xE32A0103, 0x81C, 0xC32C0103,
- 0x81C, 0xC22E0103, 0x81C, 0xC1300103, 0x81C, 0xC0320103,
- 0x81C, 0xA3340103, 0x81C, 0xA2360103, 0x81C, 0xA1380103,
- 0x81C, 0xA03A0103, 0x81C, 0x823C0103, 0x81C, 0x813E0103,
- 0x81C, 0x80400103, 0x81C, 0x63420103, 0x81C, 0x62440103,
- 0x81C, 0x61460103, 0x81C, 0x60480103, 0x81C, 0x424A0103,
- 0x81C, 0x414C0103, 0x81C, 0x404E0103, 0x81C, 0x22500103,
- 0x81C, 0x21520103, 0x81C, 0x20540103, 0x81C, 0x03560103,
- 0x81C, 0x02580103, 0x81C, 0x015A0103, 0x81C, 0x005C0103,
- 0x81C, 0x005E0103, 0x81C, 0x00600103, 0x81C, 0x00620103,
- 0x81C, 0x00640103, 0x81C, 0x00660103, 0x81C, 0x00680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0x90012100, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFD000103, 0x81C, 0xFC020103,
- 0x81C, 0xFB040103, 0x81C, 0xFA060103, 0x81C, 0xF9080103,
- 0x81C, 0xF80A0103, 0x81C, 0xF70C0103, 0x81C, 0xF60E0103,
- 0x81C, 0xF5100103, 0x81C, 0xF4120103, 0x81C, 0xF3140103,
- 0x81C, 0xF2160103, 0x81C, 0xF1180103, 0x81C, 0xF01A0103,
- 0x81C, 0xEF1C0103, 0x81C, 0xEE1E0103, 0x81C, 0xED200103,
- 0x81C, 0xEC220103, 0x81C, 0xEB240103, 0x81C, 0xEA260103,
- 0x81C, 0xE9280103, 0x81C, 0xE82A0103, 0x81C, 0xE72C0103,
- 0x81C, 0xE62E0103, 0x81C, 0xE5300103, 0x81C, 0xE4320103,
- 0x81C, 0xE3340103, 0x81C, 0xC6360103, 0x81C, 0xC5380103,
- 0x81C, 0xC43A0103, 0x81C, 0xC33C0103, 0x81C, 0xC23E0103,
- 0x81C, 0xA5400103, 0x81C, 0xA4420103, 0x81C, 0xA3440103,
- 0x81C, 0xA2460103, 0x81C, 0xA1480103, 0x81C, 0x834A0103,
- 0x81C, 0x824C0103, 0x81C, 0x814E0103, 0x81C, 0x63500103,
- 0x81C, 0x62520103, 0x81C, 0x61540103, 0x81C, 0x43560103,
- 0x81C, 0x42580103, 0x81C, 0x245A0103, 0x81C, 0x235C0103,
- 0x81C, 0x225E0103, 0x81C, 0x21600103, 0x81C, 0x04620103,
- 0x81C, 0x03640103, 0x81C, 0x02660103, 0x81C, 0x01680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF8000103, 0x81C, 0xF7020103,
- 0x81C, 0xF6040103, 0x81C, 0xF5060103, 0x81C, 0xF4080103,
- 0x81C, 0xF30A0103, 0x81C, 0xF20C0103, 0x81C, 0xF10E0103,
- 0x81C, 0xF0100103, 0x81C, 0xEF120103, 0x81C, 0xEE140103,
- 0x81C, 0xED160103, 0x81C, 0xEC180103, 0x81C, 0xEB1A0103,
- 0x81C, 0xEA1C0103, 0x81C, 0xE91E0103, 0x81C, 0xE8200103,
- 0x81C, 0xE7220103, 0x81C, 0xE6240103, 0x81C, 0xE5260103,
- 0x81C, 0xE4280103, 0x81C, 0xE32A0103, 0x81C, 0xE22C0103,
- 0x81C, 0xC32E0103, 0x81C, 0xC2300103, 0x81C, 0xC1320103,
- 0x81C, 0xA3340103, 0x81C, 0xA2360103, 0x81C, 0xA1380103,
- 0x81C, 0xA03A0103, 0x81C, 0x823C0103, 0x81C, 0x813E0103,
- 0x81C, 0x80400103, 0x81C, 0x64420103, 0x81C, 0x63440103,
- 0x81C, 0x62460103, 0x81C, 0x61480103, 0x81C, 0x434A0103,
- 0x81C, 0x424C0103, 0x81C, 0x414E0103, 0x81C, 0x40500103,
- 0x81C, 0x22520103, 0x81C, 0x21540103, 0x81C, 0x20560103,
- 0x81C, 0x04580103, 0x81C, 0x035A0103, 0x81C, 0x025C0103,
- 0x81C, 0x015E0103, 0x81C, 0x00600103, 0x81C, 0x00620103,
- 0x81C, 0x00640103, 0x81C, 0x00660103, 0x81C, 0x00680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0x90011000, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFD000103, 0x81C, 0xFC020103,
- 0x81C, 0xFB040103, 0x81C, 0xFA060103, 0x81C, 0xF9080103,
- 0x81C, 0xF80A0103, 0x81C, 0xF70C0103, 0x81C, 0xF60E0103,
- 0x81C, 0xF5100103, 0x81C, 0xF4120103, 0x81C, 0xF3140103,
- 0x81C, 0xF2160103, 0x81C, 0xF1180103, 0x81C, 0xF01A0103,
- 0x81C, 0xEE1C0103, 0x81C, 0xED1E0103, 0x81C, 0xEC200103,
- 0x81C, 0xEB220103, 0x81C, 0xEA240103, 0x81C, 0xE9260103,
- 0x81C, 0xE8280103, 0x81C, 0xE72A0103, 0x81C, 0xE62C0103,
- 0x81C, 0xE52E0103, 0x81C, 0xE4300103, 0x81C, 0xE3320103,
- 0x81C, 0xE2340103, 0x81C, 0xC5360103, 0x81C, 0xC4380103,
- 0x81C, 0xC33A0103, 0x81C, 0xC23C0103, 0x81C, 0xA53E0103,
- 0x81C, 0xA4400103, 0x81C, 0xA3420103, 0x81C, 0xA2440103,
- 0x81C, 0xA1460103, 0x81C, 0x83480103, 0x81C, 0x824A0103,
- 0x81C, 0x814C0103, 0x81C, 0x804E0103, 0x81C, 0x63500103,
- 0x81C, 0x62520103, 0x81C, 0x61540103, 0x81C, 0x43560103,
- 0x81C, 0x42580103, 0x81C, 0x415A0103, 0x81C, 0x405C0103,
- 0x81C, 0x225E0103, 0x81C, 0x21600103, 0x81C, 0x20620103,
- 0x81C, 0x03640103, 0x81C, 0x02660103, 0x81C, 0x01680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFD000103, 0x81C, 0xFC020103,
- 0x81C, 0xFB040103, 0x81C, 0xFA060103, 0x81C, 0xF9080103,
- 0x81C, 0xF80A0103, 0x81C, 0xF70C0103, 0x81C, 0xF60E0103,
- 0x81C, 0xF5100103, 0x81C, 0xF4120103, 0x81C, 0xF3140103,
- 0x81C, 0xF2160103, 0x81C, 0xF1180103, 0x81C, 0xF01A0103,
- 0x81C, 0xEF1C0103, 0x81C, 0xEE1E0103, 0x81C, 0xED200103,
- 0x81C, 0xEC220103, 0x81C, 0xEB240103, 0x81C, 0xEA260103,
- 0x81C, 0xE9280103, 0x81C, 0xE82A0103, 0x81C, 0xE72C0103,
- 0x81C, 0xE62E0103, 0x81C, 0xE5300103, 0x81C, 0xE4320103,
- 0x81C, 0xE3340103, 0x81C, 0xE2360103, 0x81C, 0xC5380103,
- 0x81C, 0xC43A0103, 0x81C, 0xC33C0103, 0x81C, 0xC23E0103,
- 0x81C, 0xA5400103, 0x81C, 0xA4420103, 0x81C, 0xA3440103,
- 0x81C, 0xA2460103, 0x81C, 0xA1480103, 0x81C, 0x834A0103,
- 0x81C, 0x824C0103, 0x81C, 0x814E0103, 0x81C, 0x64500103,
- 0x81C, 0x63520103, 0x81C, 0x62540103, 0x81C, 0x61560103,
- 0x81C, 0x42580103, 0x81C, 0x415A0103, 0x81C, 0x405C0103,
- 0x81C, 0x065E0103, 0x81C, 0x05600103, 0x81C, 0x04620103,
- 0x81C, 0x03640103, 0x81C, 0x02660103, 0x81C, 0x01680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFE000103, 0x81C, 0xFD020103,
- 0x81C, 0xFC040103, 0x81C, 0xFB060103, 0x81C, 0xFA080103,
- 0x81C, 0xF90A0103, 0x81C, 0xF80C0103, 0x81C, 0xF70E0103,
- 0x81C, 0xF6100103, 0x81C, 0xF5120103, 0x81C, 0xF4140103,
- 0x81C, 0xF3160103, 0x81C, 0xF2180103, 0x81C, 0xF11A0103,
- 0x81C, 0xF01C0103, 0x81C, 0xEF1E0103, 0x81C, 0xEE200103,
- 0x81C, 0xED220103, 0x81C, 0xEC240103, 0x81C, 0xEB260103,
- 0x81C, 0xEA280103, 0x81C, 0xE92A0103, 0x81C, 0xE82C0103,
- 0x81C, 0xE72E0103, 0x81C, 0xE6300103, 0x81C, 0xE5320103,
- 0x81C, 0xE4340103, 0x81C, 0xE3360103, 0x81C, 0xC6380103,
- 0x81C, 0xC53A0103, 0x81C, 0xC43C0103, 0x81C, 0xC33E0103,
- 0x81C, 0xA5400103, 0x81C, 0xA4420103, 0x81C, 0xA3440103,
- 0x81C, 0xA2460103, 0x81C, 0xA1480103, 0x81C, 0xA04A0103,
- 0x81C, 0x824C0103, 0x81C, 0x814E0103, 0x81C, 0x80500103,
- 0x81C, 0x64520103, 0x81C, 0x63540103, 0x81C, 0x62560103,
- 0x81C, 0x61580103, 0x81C, 0x605A0103, 0x81C, 0x235C0103,
- 0x81C, 0x225E0103, 0x81C, 0x21600103, 0x81C, 0x20620103,
- 0x81C, 0x03640103, 0x81C, 0x02660103, 0x81C, 0x01680103,
- 0x81C, 0x006A0103, 0x81C, 0x006C0103, 0x81C, 0x006E0103,
- 0x81C, 0x00700103, 0x81C, 0x00720103, 0x81C, 0x00740103,
- 0x81C, 0x00760103, 0x81C, 0x00780103, 0x81C, 0x007A0103,
- 0x81C, 0x007C0103, 0x81C, 0x007E0103, 0xA0000000, 0x00000000,
- 0x81C, 0xFE000103, 0x81C, 0xFD020103, 0x81C, 0xFC040103,
- 0x81C, 0xFB060103, 0x81C, 0xFA080103, 0x81C, 0xF90A0103,
- 0x81C, 0xF80C0103, 0x81C, 0xF70E0103, 0x81C, 0xF6100103,
- 0x81C, 0xF5120103, 0x81C, 0xF4140103, 0x81C, 0xF3160103,
- 0x81C, 0xF2180103, 0x81C, 0xF11A0103, 0x81C, 0xF01C0103,
- 0x81C, 0xEF1E0103, 0x81C, 0xEE200103, 0x81C, 0xED220103,
- 0x81C, 0xEC240103, 0x81C, 0xEB260103, 0x81C, 0xEA280103,
- 0x81C, 0xE92A0103, 0x81C, 0xE82C0103, 0x81C, 0xE72E0103,
- 0x81C, 0xE6300103, 0x81C, 0xE5320103, 0x81C, 0xE4340103,
- 0x81C, 0xE3360103, 0x81C, 0xC6380103, 0x81C, 0xC53A0103,
- 0x81C, 0xC43C0103, 0x81C, 0xC33E0103, 0x81C, 0xA5400103,
- 0x81C, 0xA4420103, 0x81C, 0xA3440103, 0x81C, 0xA2460103,
- 0x81C, 0xA1480103, 0x81C, 0xA04A0103, 0x81C, 0x824C0103,
- 0x81C, 0x814E0103, 0x81C, 0x80500103, 0x81C, 0x64520103,
- 0x81C, 0x63540103, 0x81C, 0x62560103, 0x81C, 0x61580103,
- 0x81C, 0x605A0103, 0x81C, 0x235C0103, 0x81C, 0x225E0103,
- 0x81C, 0x21600103, 0x81C, 0x20620103, 0x81C, 0x03640103,
- 0x81C, 0x02660103, 0x81C, 0x01680103, 0x81C, 0x006A0103,
- 0x81C, 0x006C0103, 0x81C, 0x006E0103, 0x81C, 0x00700103,
- 0x81C, 0x00720103, 0x81C, 0x00740103, 0x81C, 0x00760103,
- 0x81C, 0x00780103, 0x81C, 0x007A0103, 0x81C, 0x007C0103,
- 0x81C, 0x007E0103, 0xB0000000, 0x00000000, 0x8000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x81C, 0xF8000203, 0x81C, 0xF7020203,
- 0x81C, 0xF6040203, 0x81C, 0xF5060203, 0x81C, 0xF4080203,
- 0x81C, 0xF30A0203, 0x81C, 0xF20C0203, 0x81C, 0xF10E0203,
- 0x81C, 0xF0100203, 0x81C, 0xEF120203, 0x81C, 0xEE140203,
- 0x81C, 0xED160203, 0x81C, 0xEC180203, 0x81C, 0xEB1A0203,
- 0x81C, 0xEA1C0203, 0x81C, 0xE91E0203, 0x81C, 0xE8200203,
- 0x81C, 0xE7220203, 0x81C, 0xE6240203, 0x81C, 0xE5260203,
- 0x81C, 0xE4280203, 0x81C, 0xE32A0203, 0x81C, 0xC42C0203,
- 0x81C, 0xC32E0203, 0x81C, 0xC2300203, 0x81C, 0xC1320203,
- 0x81C, 0xA3340203, 0x81C, 0xA2360203, 0x81C, 0xA1380203,
- 0x81C, 0xA03A0203, 0x81C, 0x823C0203, 0x81C, 0x813E0203,
- 0x81C, 0x80400203, 0x81C, 0x65420203, 0x81C, 0x64440203,
- 0x81C, 0x63460203, 0x81C, 0x62480203, 0x81C, 0x614A0203,
- 0x81C, 0x424C0203, 0x81C, 0x414E0203, 0x81C, 0x40500203,
- 0x81C, 0x22520203, 0x81C, 0x21540203, 0x81C, 0x20560203,
- 0x81C, 0x04580203, 0x81C, 0x035A0203, 0x81C, 0x025C0203,
- 0x81C, 0x015E0203, 0x81C, 0x00600203, 0x81C, 0x00620203,
- 0x81C, 0x00640203, 0x81C, 0x00660203, 0x81C, 0x00680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x81C, 0xF9000203, 0x81C, 0xF8020203,
- 0x81C, 0xF7040203, 0x81C, 0xF6060203, 0x81C, 0xF5080203,
- 0x81C, 0xF40A0203, 0x81C, 0xF30C0203, 0x81C, 0xF20E0203,
- 0x81C, 0xF1100203, 0x81C, 0xF0120203, 0x81C, 0xEF140203,
- 0x81C, 0xEE160203, 0x81C, 0xED180203, 0x81C, 0xEC1A0203,
- 0x81C, 0xEB1C0203, 0x81C, 0xEA1E0203, 0x81C, 0xE9200203,
- 0x81C, 0xE8220203, 0x81C, 0xE7240203, 0x81C, 0xE6260203,
- 0x81C, 0xE5280203, 0x81C, 0xC42A0203, 0x81C, 0xC32C0203,
- 0x81C, 0xC22E0203, 0x81C, 0xC1300203, 0x81C, 0xC0320203,
- 0x81C, 0xA3340203, 0x81C, 0xA2360203, 0x81C, 0xA1380203,
- 0x81C, 0xA03A0203, 0x81C, 0x823C0203, 0x81C, 0x813E0203,
- 0x81C, 0x80400203, 0x81C, 0x64420203, 0x81C, 0x63440203,
- 0x81C, 0x62460203, 0x81C, 0x61480203, 0x81C, 0x604A0203,
- 0x81C, 0x414C0203, 0x81C, 0x404E0203, 0x81C, 0x22500203,
- 0x81C, 0x21520203, 0x81C, 0x20540203, 0x81C, 0x03560203,
- 0x81C, 0x02580203, 0x81C, 0x015A0203, 0x81C, 0x005C0203,
- 0x81C, 0x005E0203, 0x81C, 0x00600203, 0x81C, 0x00620203,
- 0x81C, 0x00640203, 0x81C, 0x00660203, 0x81C, 0x00680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF7000203, 0x81C, 0xF6020203,
- 0x81C, 0xF5040203, 0x81C, 0xF4060203, 0x81C, 0xF3080203,
- 0x81C, 0xF20A0203, 0x81C, 0xF10C0203, 0x81C, 0xF00E0203,
- 0x81C, 0xEF100203, 0x81C, 0xEE120203, 0x81C, 0xED140203,
- 0x81C, 0xEC160203, 0x81C, 0xEB180203, 0x81C, 0xEA1A0203,
- 0x81C, 0xE91C0203, 0x81C, 0xE81E0203, 0x81C, 0xE7200203,
- 0x81C, 0xE6220203, 0x81C, 0xE5240203, 0x81C, 0xE4260203,
- 0x81C, 0xE3280203, 0x81C, 0xC42A0203, 0x81C, 0xC32C0203,
- 0x81C, 0xC22E0203, 0x81C, 0xC1300203, 0x81C, 0xC0320203,
- 0x81C, 0xA3340203, 0x81C, 0xA2360203, 0x81C, 0xA1380203,
- 0x81C, 0xA03A0203, 0x81C, 0x823C0203, 0x81C, 0x813E0203,
- 0x81C, 0x80400203, 0x81C, 0x63420203, 0x81C, 0x62440203,
- 0x81C, 0x61460203, 0x81C, 0x60480203, 0x81C, 0x424A0203,
- 0x81C, 0x414C0203, 0x81C, 0x404E0203, 0x81C, 0x06500203,
- 0x81C, 0x05520203, 0x81C, 0x04540203, 0x81C, 0x03560203,
- 0x81C, 0x02580203, 0x81C, 0x015A0203, 0x81C, 0x005C0203,
- 0x81C, 0x005E0203, 0x81C, 0x00600203, 0x81C, 0x00620203,
- 0x81C, 0x00640203, 0x81C, 0x00660203, 0x81C, 0x00680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF7000203, 0x81C, 0xF6020203,
- 0x81C, 0xF5040203, 0x81C, 0xF4060203, 0x81C, 0xF3080203,
- 0x81C, 0xF20A0203, 0x81C, 0xF10C0203, 0x81C, 0xF00E0203,
- 0x81C, 0xEF100203, 0x81C, 0xEE120203, 0x81C, 0xED140203,
- 0x81C, 0xEC160203, 0x81C, 0xEB180203, 0x81C, 0xEA1A0203,
- 0x81C, 0xE91C0203, 0x81C, 0xE81E0203, 0x81C, 0xE7200203,
- 0x81C, 0xE6220203, 0x81C, 0xE5240203, 0x81C, 0xE4260203,
- 0x81C, 0xE3280203, 0x81C, 0xC42A0203, 0x81C, 0xC32C0203,
- 0x81C, 0xC22E0203, 0x81C, 0xC1300203, 0x81C, 0xC0320203,
- 0x81C, 0xA3340203, 0x81C, 0xA2360203, 0x81C, 0xA1380203,
- 0x81C, 0xA03A0203, 0x81C, 0x823C0203, 0x81C, 0x813E0203,
- 0x81C, 0x80400203, 0x81C, 0x64420203, 0x81C, 0x63440203,
- 0x81C, 0x62460203, 0x81C, 0x61480203, 0x81C, 0x604A0203,
- 0x81C, 0x414C0203, 0x81C, 0x404E0203, 0x81C, 0x22500203,
- 0x81C, 0x21520203, 0x81C, 0x20540203, 0x81C, 0x03560203,
- 0x81C, 0x02580203, 0x81C, 0x015A0203, 0x81C, 0x005C0203,
- 0x81C, 0x005E0203, 0x81C, 0x00600203, 0x81C, 0x00620203,
- 0x81C, 0x00640203, 0x81C, 0x00660203, 0x81C, 0x00680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF7000203, 0x81C, 0xF6020203,
- 0x81C, 0xF5040203, 0x81C, 0xF4060203, 0x81C, 0xF3080203,
- 0x81C, 0xF20A0203, 0x81C, 0xF10C0203, 0x81C, 0xF00E0203,
- 0x81C, 0xEF100203, 0x81C, 0xEE120203, 0x81C, 0xED140203,
- 0x81C, 0xEC160203, 0x81C, 0xEB180203, 0x81C, 0xEA1A0203,
- 0x81C, 0xE91C0203, 0x81C, 0xE81E0203, 0x81C, 0xE7200203,
- 0x81C, 0xE6220203, 0x81C, 0xE5240203, 0x81C, 0xE4260203,
- 0x81C, 0xE3280203, 0x81C, 0xC42A0203, 0x81C, 0xC32C0203,
- 0x81C, 0xC22E0203, 0x81C, 0xC1300203, 0x81C, 0xC0320203,
- 0x81C, 0xA3340203, 0x81C, 0xA2360203, 0x81C, 0xA1380203,
- 0x81C, 0xA03A0203, 0x81C, 0x823C0203, 0x81C, 0x813E0203,
- 0x81C, 0x80400203, 0x81C, 0x64420203, 0x81C, 0x63440203,
- 0x81C, 0x62460203, 0x81C, 0x61480203, 0x81C, 0x604A0203,
- 0x81C, 0x414C0203, 0x81C, 0x404E0203, 0x81C, 0x22500203,
- 0x81C, 0x21520203, 0x81C, 0x20540203, 0x81C, 0x03560203,
- 0x81C, 0x02580203, 0x81C, 0x015A0203, 0x81C, 0x005C0203,
- 0x81C, 0x005E0203, 0x81C, 0x00600203, 0x81C, 0x00620203,
- 0x81C, 0x00640203, 0x81C, 0x00660203, 0x81C, 0x00680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0x90012100, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFB000203, 0x81C, 0xFA020203,
- 0x81C, 0xF9040203, 0x81C, 0xF8060203, 0x81C, 0xF7080203,
- 0x81C, 0xF60A0203, 0x81C, 0xF50C0203, 0x81C, 0xF40E0203,
- 0x81C, 0xF3100203, 0x81C, 0xF2120203, 0x81C, 0xF1140203,
- 0x81C, 0xF0160203, 0x81C, 0xEF180203, 0x81C, 0xEE1A0203,
- 0x81C, 0xED1C0203, 0x81C, 0xEC1E0203, 0x81C, 0xEB200203,
- 0x81C, 0xEA220203, 0x81C, 0xE9240203, 0x81C, 0xE8260203,
- 0x81C, 0xE7280203, 0x81C, 0xE62A0203, 0x81C, 0xE52C0203,
- 0x81C, 0xE42E0203, 0x81C, 0xE3300203, 0x81C, 0xE2320203,
- 0x81C, 0xC6340203, 0x81C, 0xC5360203, 0x81C, 0xC4380203,
- 0x81C, 0xC33A0203, 0x81C, 0xC23C0203, 0x81C, 0xC13E0203,
- 0x81C, 0xC0400203, 0x81C, 0xA3420203, 0x81C, 0xA2440203,
- 0x81C, 0xA1460203, 0x81C, 0xA0480203, 0x81C, 0x824A0203,
- 0x81C, 0x814C0203, 0x81C, 0x804E0203, 0x81C, 0x63500203,
- 0x81C, 0x62520203, 0x81C, 0x61540203, 0x81C, 0x60560203,
- 0x81C, 0x24580203, 0x81C, 0x235A0203, 0x81C, 0x225C0203,
- 0x81C, 0x215E0203, 0x81C, 0x20600203, 0x81C, 0x03620203,
- 0x81C, 0x02640203, 0x81C, 0x01660203, 0x81C, 0x00680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF8000203, 0x81C, 0xF7020203,
- 0x81C, 0xF6040203, 0x81C, 0xF5060203, 0x81C, 0xF4080203,
- 0x81C, 0xF30A0203, 0x81C, 0xF20C0203, 0x81C, 0xF10E0203,
- 0x81C, 0xF0100203, 0x81C, 0xEF120203, 0x81C, 0xEE140203,
- 0x81C, 0xED160203, 0x81C, 0xEC180203, 0x81C, 0xEB1A0203,
- 0x81C, 0xEA1C0203, 0x81C, 0xE91E0203, 0x81C, 0xE8200203,
- 0x81C, 0xE7220203, 0x81C, 0xE6240203, 0x81C, 0xE5260203,
- 0x81C, 0xE4280203, 0x81C, 0xE32A0203, 0x81C, 0xC42C0203,
- 0x81C, 0xC32E0203, 0x81C, 0xC2300203, 0x81C, 0xC1320203,
- 0x81C, 0xA3340203, 0x81C, 0xA2360203, 0x81C, 0xA1380203,
- 0x81C, 0xA03A0203, 0x81C, 0x823C0203, 0x81C, 0x813E0203,
- 0x81C, 0x80400203, 0x81C, 0x65420203, 0x81C, 0x64440203,
- 0x81C, 0x63460203, 0x81C, 0x62480203, 0x81C, 0x614A0203,
- 0x81C, 0x424C0203, 0x81C, 0x414E0203, 0x81C, 0x40500203,
- 0x81C, 0x22520203, 0x81C, 0x21540203, 0x81C, 0x20560203,
- 0x81C, 0x04580203, 0x81C, 0x035A0203, 0x81C, 0x025C0203,
- 0x81C, 0x015E0203, 0x81C, 0x00600203, 0x81C, 0x00620203,
- 0x81C, 0x00640203, 0x81C, 0x00660203, 0x81C, 0x00680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0x90011000, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFC000203, 0x81C, 0xFB020203,
- 0x81C, 0xFA040203, 0x81C, 0xF9060203, 0x81C, 0xF8080203,
- 0x81C, 0xF70A0203, 0x81C, 0xF60C0203, 0x81C, 0xF50E0203,
- 0x81C, 0xF4100203, 0x81C, 0xF3120203, 0x81C, 0xF2140203,
- 0x81C, 0xF1160203, 0x81C, 0xF0180203, 0x81C, 0xEE1A0203,
- 0x81C, 0xED1C0203, 0x81C, 0xEC1E0203, 0x81C, 0xEB200203,
- 0x81C, 0xEA220203, 0x81C, 0xE9240203, 0x81C, 0xE8260203,
- 0x81C, 0xE7280203, 0x81C, 0xE62A0203, 0x81C, 0xE52C0203,
- 0x81C, 0xE42E0203, 0x81C, 0xE3300203, 0x81C, 0xE2320203,
- 0x81C, 0xC6340203, 0x81C, 0xC5360203, 0x81C, 0xC4380203,
- 0x81C, 0xC33A0203, 0x81C, 0xA63C0203, 0x81C, 0xA53E0203,
- 0x81C, 0xA4400203, 0x81C, 0xA3420203, 0x81C, 0xA2440203,
- 0x81C, 0xA1460203, 0x81C, 0x83480203, 0x81C, 0x824A0203,
- 0x81C, 0x814C0203, 0x81C, 0x804E0203, 0x81C, 0x63500203,
- 0x81C, 0x62520203, 0x81C, 0x61540203, 0x81C, 0x42560203,
- 0x81C, 0x41580203, 0x81C, 0x405A0203, 0x81C, 0x225C0203,
- 0x81C, 0x215E0203, 0x81C, 0x20600203, 0x81C, 0x04620203,
- 0x81C, 0x03640203, 0x81C, 0x02660203, 0x81C, 0x01680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFC000203, 0x81C, 0xFB020203,
- 0x81C, 0xFA040203, 0x81C, 0xF9060203, 0x81C, 0xF8080203,
- 0x81C, 0xF70A0203, 0x81C, 0xF60C0203, 0x81C, 0xF50E0203,
- 0x81C, 0xF4100203, 0x81C, 0xF3120203, 0x81C, 0xF2140203,
- 0x81C, 0xF1160203, 0x81C, 0xF0180203, 0x81C, 0xEF1A0203,
- 0x81C, 0xEE1C0203, 0x81C, 0xED1E0203, 0x81C, 0xEC200203,
- 0x81C, 0xEB220203, 0x81C, 0xEA240203, 0x81C, 0xE9260203,
- 0x81C, 0xE8280203, 0x81C, 0xE72A0203, 0x81C, 0xE62C0203,
- 0x81C, 0xE52E0203, 0x81C, 0xE4300203, 0x81C, 0xE3320203,
- 0x81C, 0xE2340203, 0x81C, 0xE1360203, 0x81C, 0xC5380203,
- 0x81C, 0xC43A0203, 0x81C, 0xC33C0203, 0x81C, 0xC23E0203,
- 0x81C, 0xC1400203, 0x81C, 0xA3420203, 0x81C, 0xA2440203,
- 0x81C, 0xA1460203, 0x81C, 0xA0480203, 0x81C, 0x834A0203,
- 0x81C, 0x824C0203, 0x81C, 0x814E0203, 0x81C, 0x64500203,
- 0x81C, 0x63520203, 0x81C, 0x62540203, 0x81C, 0x61560203,
- 0x81C, 0x25580203, 0x81C, 0x245A0203, 0x81C, 0x235C0203,
- 0x81C, 0x225E0203, 0x81C, 0x21600203, 0x81C, 0x04620203,
- 0x81C, 0x03640203, 0x81C, 0x02660203, 0x81C, 0x01680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFC000203, 0x81C, 0xFB020203,
- 0x81C, 0xFA040203, 0x81C, 0xF9060203, 0x81C, 0xF8080203,
- 0x81C, 0xF70A0203, 0x81C, 0xF60C0203, 0x81C, 0xF50E0203,
- 0x81C, 0xF4100203, 0x81C, 0xF3120203, 0x81C, 0xF2140203,
- 0x81C, 0xF1160203, 0x81C, 0xF0180203, 0x81C, 0xEF1A0203,
- 0x81C, 0xEE1C0203, 0x81C, 0xED1E0203, 0x81C, 0xEC200203,
- 0x81C, 0xEB220203, 0x81C, 0xEA240203, 0x81C, 0xE9260203,
- 0x81C, 0xE8280203, 0x81C, 0xE72A0203, 0x81C, 0xE62C0203,
- 0x81C, 0xE52E0203, 0x81C, 0xE4300203, 0x81C, 0xE3320203,
- 0x81C, 0xE2340203, 0x81C, 0xC6360203, 0x81C, 0xC5380203,
- 0x81C, 0xC43A0203, 0x81C, 0xC33C0203, 0x81C, 0xA63E0203,
- 0x81C, 0xA5400203, 0x81C, 0xA4420203, 0x81C, 0xA3440203,
- 0x81C, 0xA2460203, 0x81C, 0xA1480203, 0x81C, 0x834A0203,
- 0x81C, 0x824C0203, 0x81C, 0x814E0203, 0x81C, 0x64500203,
- 0x81C, 0x63520203, 0x81C, 0x62540203, 0x81C, 0x61560203,
- 0x81C, 0x60580203, 0x81C, 0x405A0203, 0x81C, 0x215C0203,
- 0x81C, 0x205E0203, 0x81C, 0x03600203, 0x81C, 0x02620203,
- 0x81C, 0x01640203, 0x81C, 0x00660203, 0x81C, 0x00680203,
- 0x81C, 0x006A0203, 0x81C, 0x006C0203, 0x81C, 0x006E0203,
- 0x81C, 0x00700203, 0x81C, 0x00720203, 0x81C, 0x00740203,
- 0x81C, 0x00760203, 0x81C, 0x00780203, 0x81C, 0x007A0203,
- 0x81C, 0x007C0203, 0x81C, 0x007E0203, 0xA0000000, 0x00000000,
- 0x81C, 0xFD000203, 0x81C, 0xFC020203, 0x81C, 0xFB040203,
- 0x81C, 0xFA060203, 0x81C, 0xF9080203, 0x81C, 0xF80A0203,
- 0x81C, 0xF70C0203, 0x81C, 0xF60E0203, 0x81C, 0xF5100203,
- 0x81C, 0xF4120203, 0x81C, 0xF3140203, 0x81C, 0xF2160203,
- 0x81C, 0xF1180203, 0x81C, 0xF01A0203, 0x81C, 0xEF1C0203,
- 0x81C, 0xEE1E0203, 0x81C, 0xED200203, 0x81C, 0xEC220203,
- 0x81C, 0xEB240203, 0x81C, 0xEA260203, 0x81C, 0xE9280203,
- 0x81C, 0xE82A0203, 0x81C, 0xE72C0203, 0x81C, 0xE62E0203,
- 0x81C, 0xE5300203, 0x81C, 0xE4320203, 0x81C, 0xE3340203,
- 0x81C, 0xC6360203, 0x81C, 0xC5380203, 0x81C, 0xC43A0203,
- 0x81C, 0xC33C0203, 0x81C, 0xA63E0203, 0x81C, 0xA5400203,
- 0x81C, 0xA4420203, 0x81C, 0xA3440203, 0x81C, 0xA2460203,
- 0x81C, 0xA1480203, 0x81C, 0x834A0203, 0x81C, 0x824C0203,
- 0x81C, 0x814E0203, 0x81C, 0x64500203, 0x81C, 0x63520203,
- 0x81C, 0x62540203, 0x81C, 0x61560203, 0x81C, 0x60580203,
- 0x81C, 0x235A0203, 0x81C, 0x225C0203, 0x81C, 0x215E0203,
- 0x81C, 0x20600203, 0x81C, 0x03620203, 0x81C, 0x02640203,
- 0x81C, 0x01660203, 0x81C, 0x00680203, 0x81C, 0x006A0203,
- 0x81C, 0x006C0203, 0x81C, 0x006E0203, 0x81C, 0x00700203,
- 0x81C, 0x00720203, 0x81C, 0x00740203, 0x81C, 0x00760203,
- 0x81C, 0x00780203, 0x81C, 0x007A0203, 0x81C, 0x007C0203,
- 0x81C, 0x007E0203, 0xB0000000, 0x00000000, 0x8000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x81C, 0xF8000303, 0x81C, 0xF7020303,
- 0x81C, 0xF6040303, 0x81C, 0xF5060303, 0x81C, 0xF4080303,
- 0x81C, 0xF30A0303, 0x81C, 0xF20C0303, 0x81C, 0xF10E0303,
- 0x81C, 0xF0100303, 0x81C, 0xEF120303, 0x81C, 0xEE140303,
- 0x81C, 0xED160303, 0x81C, 0xEC180303, 0x81C, 0xEB1A0303,
- 0x81C, 0xEA1C0303, 0x81C, 0xE91E0303, 0x81C, 0xCA200303,
- 0x81C, 0xC9220303, 0x81C, 0xC8240303, 0x81C, 0xC7260303,
- 0x81C, 0xC6280303, 0x81C, 0xC52A0303, 0x81C, 0xC42C0303,
- 0x81C, 0xC32E0303, 0x81C, 0xC2300303, 0x81C, 0xC1320303,
- 0x81C, 0xA3340303, 0x81C, 0xA2360303, 0x81C, 0xA1380303,
- 0x81C, 0xA03A0303, 0x81C, 0x823C0303, 0x81C, 0x813E0303,
- 0x81C, 0x80400303, 0x81C, 0x65420303, 0x81C, 0x64440303,
- 0x81C, 0x63460303, 0x81C, 0x62480303, 0x81C, 0x614A0303,
- 0x81C, 0x424C0303, 0x81C, 0x414E0303, 0x81C, 0x40500303,
- 0x81C, 0x22520303, 0x81C, 0x21540303, 0x81C, 0x20560303,
- 0x81C, 0x04580303, 0x81C, 0x035A0303, 0x81C, 0x025C0303,
- 0x81C, 0x015E0303, 0x81C, 0x00600303, 0x81C, 0x00620303,
- 0x81C, 0x00640303, 0x81C, 0x00660303, 0x81C, 0x00680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x81C, 0xF9000303, 0x81C, 0xF8020303,
- 0x81C, 0xF7040303, 0x81C, 0xF6060303, 0x81C, 0xF5080303,
- 0x81C, 0xF40A0303, 0x81C, 0xF30C0303, 0x81C, 0xF20E0303,
- 0x81C, 0xF1100303, 0x81C, 0xF0120303, 0x81C, 0xEF140303,
- 0x81C, 0xEE160303, 0x81C, 0xED180303, 0x81C, 0xEC1A0303,
- 0x81C, 0xEB1C0303, 0x81C, 0xEA1E0303, 0x81C, 0xC9200303,
- 0x81C, 0xC8220303, 0x81C, 0xC7240303, 0x81C, 0xC6260303,
- 0x81C, 0xC5280303, 0x81C, 0xC42A0303, 0x81C, 0xC32C0303,
- 0x81C, 0xC22E0303, 0x81C, 0xC1300303, 0x81C, 0xC0320303,
- 0x81C, 0xA3340303, 0x81C, 0xA2360303, 0x81C, 0xA1380303,
- 0x81C, 0xA03A0303, 0x81C, 0x823C0303, 0x81C, 0x813E0303,
- 0x81C, 0x80400303, 0x81C, 0x64420303, 0x81C, 0x63440303,
- 0x81C, 0x62460303, 0x81C, 0x61480303, 0x81C, 0x604A0303,
- 0x81C, 0x414C0303, 0x81C, 0x404E0303, 0x81C, 0x22500303,
- 0x81C, 0x21520303, 0x81C, 0x20540303, 0x81C, 0x03560303,
- 0x81C, 0x02580303, 0x81C, 0x015A0303, 0x81C, 0x005C0303,
- 0x81C, 0x005E0303, 0x81C, 0x00600303, 0x81C, 0x00620303,
- 0x81C, 0x00640303, 0x81C, 0x00660303, 0x81C, 0x00680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF7000303, 0x81C, 0xF6020303,
- 0x81C, 0xF5040303, 0x81C, 0xF4060303, 0x81C, 0xF3080303,
- 0x81C, 0xF20A0303, 0x81C, 0xF10C0303, 0x81C, 0xF00E0303,
- 0x81C, 0xEF100303, 0x81C, 0xEE120303, 0x81C, 0xED140303,
- 0x81C, 0xEC160303, 0x81C, 0xEB180303, 0x81C, 0xEA1A0303,
- 0x81C, 0xE91C0303, 0x81C, 0xCA1E0303, 0x81C, 0xC9200303,
- 0x81C, 0xC8220303, 0x81C, 0xC7240303, 0x81C, 0xC6260303,
- 0x81C, 0xC5280303, 0x81C, 0xC42A0303, 0x81C, 0xC32C0303,
- 0x81C, 0xC22E0303, 0x81C, 0xC1300303, 0x81C, 0xA4320303,
- 0x81C, 0xA3340303, 0x81C, 0xA2360303, 0x81C, 0xA1380303,
- 0x81C, 0xA03A0303, 0x81C, 0x823C0303, 0x81C, 0x813E0303,
- 0x81C, 0x80400303, 0x81C, 0x64420303, 0x81C, 0x63440303,
- 0x81C, 0x62460303, 0x81C, 0x61480303, 0x81C, 0x604A0303,
- 0x81C, 0x414C0303, 0x81C, 0x404E0303, 0x81C, 0x06500303,
- 0x81C, 0x05520303, 0x81C, 0x04540303, 0x81C, 0x03560303,
- 0x81C, 0x02580303, 0x81C, 0x015A0303, 0x81C, 0x005C0303,
- 0x81C, 0x005E0303, 0x81C, 0x00600303, 0x81C, 0x00620303,
- 0x81C, 0x00640303, 0x81C, 0x00660303, 0x81C, 0x00680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF7000303, 0x81C, 0xF6020303,
- 0x81C, 0xF5040303, 0x81C, 0xF4060303, 0x81C, 0xF3080303,
- 0x81C, 0xF20A0303, 0x81C, 0xF10C0303, 0x81C, 0xF00E0303,
- 0x81C, 0xEF100303, 0x81C, 0xEE120303, 0x81C, 0xED140303,
- 0x81C, 0xEC160303, 0x81C, 0xEB180303, 0x81C, 0xEA1A0303,
- 0x81C, 0xE91C0303, 0x81C, 0xCA1E0303, 0x81C, 0xC9200303,
- 0x81C, 0xC8220303, 0x81C, 0xC7240303, 0x81C, 0xC6260303,
- 0x81C, 0xC5280303, 0x81C, 0xC42A0303, 0x81C, 0xC32C0303,
- 0x81C, 0xC22E0303, 0x81C, 0xC1300303, 0x81C, 0xA4320303,
- 0x81C, 0xA3340303, 0x81C, 0xA2360303, 0x81C, 0xA1380303,
- 0x81C, 0xA03A0303, 0x81C, 0x823C0303, 0x81C, 0x813E0303,
- 0x81C, 0x80400303, 0x81C, 0x64420303, 0x81C, 0x63440303,
- 0x81C, 0x62460303, 0x81C, 0x61480303, 0x81C, 0x604A0303,
- 0x81C, 0x414C0303, 0x81C, 0x404E0303, 0x81C, 0x22500303,
- 0x81C, 0x21520303, 0x81C, 0x20540303, 0x81C, 0x03560303,
- 0x81C, 0x02580303, 0x81C, 0x015A0303, 0x81C, 0x005C0303,
- 0x81C, 0x005E0303, 0x81C, 0x00600303, 0x81C, 0x00620303,
- 0x81C, 0x00640303, 0x81C, 0x00660303, 0x81C, 0x00680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF7000303, 0x81C, 0xF6020303,
- 0x81C, 0xF5040303, 0x81C, 0xF4060303, 0x81C, 0xF3080303,
- 0x81C, 0xF20A0303, 0x81C, 0xF10C0303, 0x81C, 0xF00E0303,
- 0x81C, 0xEF100303, 0x81C, 0xEE120303, 0x81C, 0xED140303,
- 0x81C, 0xEC160303, 0x81C, 0xEB180303, 0x81C, 0xEA1A0303,
- 0x81C, 0xE91C0303, 0x81C, 0xCA1E0303, 0x81C, 0xC9200303,
- 0x81C, 0xC8220303, 0x81C, 0xC7240303, 0x81C, 0xC6260303,
- 0x81C, 0xC5280303, 0x81C, 0xC42A0303, 0x81C, 0xC32C0303,
- 0x81C, 0xC22E0303, 0x81C, 0xC1300303, 0x81C, 0xA4320303,
- 0x81C, 0xA3340303, 0x81C, 0xA2360303, 0x81C, 0xA1380303,
- 0x81C, 0xA03A0303, 0x81C, 0x823C0303, 0x81C, 0x813E0303,
- 0x81C, 0x80400303, 0x81C, 0x64420303, 0x81C, 0x63440303,
- 0x81C, 0x62460303, 0x81C, 0x61480303, 0x81C, 0x604A0303,
- 0x81C, 0x414C0303, 0x81C, 0x404E0303, 0x81C, 0x22500303,
- 0x81C, 0x21520303, 0x81C, 0x20540303, 0x81C, 0x03560303,
- 0x81C, 0x02580303, 0x81C, 0x015A0303, 0x81C, 0x005C0303,
- 0x81C, 0x005E0303, 0x81C, 0x00600303, 0x81C, 0x00620303,
- 0x81C, 0x00640303, 0x81C, 0x00660303, 0x81C, 0x00680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0x90012100, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFB000303, 0x81C, 0xFA020303,
- 0x81C, 0xF9040303, 0x81C, 0xF8060303, 0x81C, 0xF7080303,
- 0x81C, 0xF60A0303, 0x81C, 0xF50C0303, 0x81C, 0xF40E0303,
- 0x81C, 0xF3100303, 0x81C, 0xF2120303, 0x81C, 0xF1140303,
- 0x81C, 0xF0160303, 0x81C, 0xEF180303, 0x81C, 0xEE1A0303,
- 0x81C, 0xED1C0303, 0x81C, 0xEC1E0303, 0x81C, 0xEB200303,
- 0x81C, 0xEA220303, 0x81C, 0xE9240303, 0x81C, 0xE8260303,
- 0x81C, 0xE7280303, 0x81C, 0xE62A0303, 0x81C, 0xE52C0303,
- 0x81C, 0xE42E0303, 0x81C, 0xE3300303, 0x81C, 0xE2320303,
- 0x81C, 0xC6340303, 0x81C, 0xC5360303, 0x81C, 0xC4380303,
- 0x81C, 0xC33A0303, 0x81C, 0xC23C0303, 0x81C, 0xC13E0303,
- 0x81C, 0xA4400303, 0x81C, 0xA3420303, 0x81C, 0xA2440303,
- 0x81C, 0xA1460303, 0x81C, 0x83480303, 0x81C, 0x824A0303,
- 0x81C, 0x814C0303, 0x81C, 0x804E0303, 0x81C, 0x63500303,
- 0x81C, 0x62520303, 0x81C, 0x43540303, 0x81C, 0x42560303,
- 0x81C, 0x41580303, 0x81C, 0x235A0303, 0x81C, 0x225C0303,
- 0x81C, 0x215E0303, 0x81C, 0x20600303, 0x81C, 0x04620303,
- 0x81C, 0x03640303, 0x81C, 0x02660303, 0x81C, 0x01680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xF8000303, 0x81C, 0xF7020303,
- 0x81C, 0xF6040303, 0x81C, 0xF5060303, 0x81C, 0xF4080303,
- 0x81C, 0xF30A0303, 0x81C, 0xF20C0303, 0x81C, 0xF10E0303,
- 0x81C, 0xF0100303, 0x81C, 0xEF120303, 0x81C, 0xEE140303,
- 0x81C, 0xED160303, 0x81C, 0xEC180303, 0x81C, 0xEB1A0303,
- 0x81C, 0xEA1C0303, 0x81C, 0xE91E0303, 0x81C, 0xCA200303,
- 0x81C, 0xC9220303, 0x81C, 0xC8240303, 0x81C, 0xC7260303,
- 0x81C, 0xC6280303, 0x81C, 0xC52A0303, 0x81C, 0xC42C0303,
- 0x81C, 0xC32E0303, 0x81C, 0xC2300303, 0x81C, 0xC1320303,
- 0x81C, 0xA3340303, 0x81C, 0xA2360303, 0x81C, 0xA1380303,
- 0x81C, 0xA03A0303, 0x81C, 0x823C0303, 0x81C, 0x813E0303,
- 0x81C, 0x80400303, 0x81C, 0x65420303, 0x81C, 0x64440303,
- 0x81C, 0x63460303, 0x81C, 0x62480303, 0x81C, 0x614A0303,
- 0x81C, 0x424C0303, 0x81C, 0x414E0303, 0x81C, 0x40500303,
- 0x81C, 0x22520303, 0x81C, 0x21540303, 0x81C, 0x20560303,
- 0x81C, 0x04580303, 0x81C, 0x035A0303, 0x81C, 0x025C0303,
- 0x81C, 0x015E0303, 0x81C, 0x00600303, 0x81C, 0x00620303,
- 0x81C, 0x00640303, 0x81C, 0x00660303, 0x81C, 0x00680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0x90011000, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFB000303, 0x81C, 0xFA020303,
- 0x81C, 0xF9040303, 0x81C, 0xF8060303, 0x81C, 0xF7080303,
- 0x81C, 0xF60A0303, 0x81C, 0xF50C0303, 0x81C, 0xF40E0303,
- 0x81C, 0xF3100303, 0x81C, 0xF2120303, 0x81C, 0xF1140303,
- 0x81C, 0xF0160303, 0x81C, 0xEE180303, 0x81C, 0xED1A0303,
- 0x81C, 0xEC1C0303, 0x81C, 0xEB1E0303, 0x81C, 0xEA200303,
- 0x81C, 0xE9220303, 0x81C, 0xE8240303, 0x81C, 0xE7260303,
- 0x81C, 0xE6280303, 0x81C, 0xE52A0303, 0x81C, 0xE42C0303,
- 0x81C, 0xE32E0303, 0x81C, 0xE2300303, 0x81C, 0xE1320303,
- 0x81C, 0xC6340303, 0x81C, 0xC5360303, 0x81C, 0xC4380303,
- 0x81C, 0xC33A0303, 0x81C, 0xA63C0303, 0x81C, 0xA53E0303,
- 0x81C, 0xA4400303, 0x81C, 0xA3420303, 0x81C, 0xA2440303,
- 0x81C, 0xA1460303, 0x81C, 0x83480303, 0x81C, 0x824A0303,
- 0x81C, 0x814C0303, 0x81C, 0x804E0303, 0x81C, 0x63500303,
- 0x81C, 0x62520303, 0x81C, 0x61540303, 0x81C, 0x42560303,
- 0x81C, 0x41580303, 0x81C, 0x405A0303, 0x81C, 0x225C0303,
- 0x81C, 0x215E0303, 0x81C, 0x20600303, 0x81C, 0x04620303,
- 0x81C, 0x03640303, 0x81C, 0x02660303, 0x81C, 0x01680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFB000303, 0x81C, 0xFA020303,
- 0x81C, 0xF9040303, 0x81C, 0xF8060303, 0x81C, 0xF7080303,
- 0x81C, 0xF60A0303, 0x81C, 0xF50C0303, 0x81C, 0xF40E0303,
- 0x81C, 0xF3100303, 0x81C, 0xF2120303, 0x81C, 0xF1140303,
- 0x81C, 0xF0160303, 0x81C, 0xEF180303, 0x81C, 0xEE1A0303,
- 0x81C, 0xED1C0303, 0x81C, 0xEC1E0303, 0x81C, 0xEB200303,
- 0x81C, 0xEA220303, 0x81C, 0xE9240303, 0x81C, 0xE8260303,
- 0x81C, 0xE7280303, 0x81C, 0xE62A0303, 0x81C, 0xE52C0303,
- 0x81C, 0xE42E0303, 0x81C, 0xE3300303, 0x81C, 0xE2320303,
- 0x81C, 0xE1340303, 0x81C, 0xC5360303, 0x81C, 0xC4380303,
- 0x81C, 0xC33A0303, 0x81C, 0xC23C0303, 0x81C, 0xC13E0303,
- 0x81C, 0xA4400303, 0x81C, 0xA3420303, 0x81C, 0xA2440303,
- 0x81C, 0xA1460303, 0x81C, 0x83480303, 0x81C, 0x824A0303,
- 0x81C, 0x814C0303, 0x81C, 0x804E0303, 0x81C, 0x64500303,
- 0x81C, 0x63520303, 0x81C, 0x62540303, 0x81C, 0x61560303,
- 0x81C, 0x60580303, 0x81C, 0x235A0303, 0x81C, 0x225C0303,
- 0x81C, 0x215E0303, 0x81C, 0x20600303, 0x81C, 0x04620303,
- 0x81C, 0x03640303, 0x81C, 0x02660303, 0x81C, 0x01680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFC000303, 0x81C, 0xFB020303,
- 0x81C, 0xFA040303, 0x81C, 0xF9060303, 0x81C, 0xF8080303,
- 0x81C, 0xF70A0303, 0x81C, 0xF60C0303, 0x81C, 0xF50E0303,
- 0x81C, 0xF4100303, 0x81C, 0xF3120303, 0x81C, 0xF2140303,
- 0x81C, 0xF1160303, 0x81C, 0xF0180303, 0x81C, 0xEF1A0303,
- 0x81C, 0xEE1C0303, 0x81C, 0xED1E0303, 0x81C, 0xEC200303,
- 0x81C, 0xEB220303, 0x81C, 0xEA240303, 0x81C, 0xE9260303,
- 0x81C, 0xE8280303, 0x81C, 0xE72A0303, 0x81C, 0xE62C0303,
- 0x81C, 0xE52E0303, 0x81C, 0xE4300303, 0x81C, 0xE3320303,
- 0x81C, 0xE2340303, 0x81C, 0xC6360303, 0x81C, 0xC5380303,
- 0x81C, 0xC43A0303, 0x81C, 0xC33C0303, 0x81C, 0xA63E0303,
- 0x81C, 0xA5400303, 0x81C, 0xA4420303, 0x81C, 0xA3440303,
- 0x81C, 0xA2460303, 0x81C, 0x84480303, 0x81C, 0x834A0303,
- 0x81C, 0x824C0303, 0x81C, 0x814E0303, 0x81C, 0x80500303,
- 0x81C, 0x63520303, 0x81C, 0x62540303, 0x81C, 0x61560303,
- 0x81C, 0x60580303, 0x81C, 0x225A0303, 0x81C, 0x055C0303,
- 0x81C, 0x045E0303, 0x81C, 0x03600303, 0x81C, 0x02620303,
- 0x81C, 0x01640303, 0x81C, 0x00660303, 0x81C, 0x00680303,
- 0x81C, 0x006A0303, 0x81C, 0x006C0303, 0x81C, 0x006E0303,
- 0x81C, 0x00700303, 0x81C, 0x00720303, 0x81C, 0x00740303,
- 0x81C, 0x00760303, 0x81C, 0x00780303, 0x81C, 0x007A0303,
- 0x81C, 0x007C0303, 0x81C, 0x007E0303, 0xA0000000, 0x00000000,
- 0x81C, 0xFC000303, 0x81C, 0xFB020303, 0x81C, 0xFA040303,
- 0x81C, 0xF9060303, 0x81C, 0xF8080303, 0x81C, 0xF70A0303,
- 0x81C, 0xF60C0303, 0x81C, 0xF50E0303, 0x81C, 0xF4100303,
- 0x81C, 0xF3120303, 0x81C, 0xF2140303, 0x81C, 0xF1160303,
- 0x81C, 0xF0180303, 0x81C, 0xEF1A0303, 0x81C, 0xEE1C0303,
- 0x81C, 0xED1E0303, 0x81C, 0xEC200303, 0x81C, 0xEB220303,
- 0x81C, 0xEA240303, 0x81C, 0xE9260303, 0x81C, 0xE8280303,
- 0x81C, 0xE72A0303, 0x81C, 0xE62C0303, 0x81C, 0xE52E0303,
- 0x81C, 0xE4300303, 0x81C, 0xE3320303, 0x81C, 0xE2340303,
- 0x81C, 0xC6360303, 0x81C, 0xC5380303, 0x81C, 0xC43A0303,
- 0x81C, 0xC33C0303, 0x81C, 0xA63E0303, 0x81C, 0xA5400303,
- 0x81C, 0xA4420303, 0x81C, 0xA3440303, 0x81C, 0xA2460303,
- 0x81C, 0x84480303, 0x81C, 0x834A0303, 0x81C, 0x824C0303,
- 0x81C, 0x814E0303, 0x81C, 0x80500303, 0x81C, 0x63520303,
- 0x81C, 0x62540303, 0x81C, 0x61560303, 0x81C, 0x60580303,
- 0x81C, 0x235A0303, 0x81C, 0x225C0303, 0x81C, 0x215E0303,
- 0x81C, 0x20600303, 0x81C, 0x03620303, 0x81C, 0x02640303,
- 0x81C, 0x01660303, 0x81C, 0x00680303, 0x81C, 0x006A0303,
- 0x81C, 0x006C0303, 0x81C, 0x006E0303, 0x81C, 0x00700303,
- 0x81C, 0x00720303, 0x81C, 0x00740303, 0x81C, 0x00760303,
- 0x81C, 0x00780303, 0x81C, 0x007A0303, 0x81C, 0x007C0303,
- 0x81C, 0x007E0303, 0xB0000000, 0x00000000, 0x8000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x81C, 0xFF000403, 0x81C, 0xF5000403,
- 0x81C, 0xF4020403, 0x81C, 0xF3040403, 0x81C, 0xF2060403,
- 0x81C, 0xF1080403, 0x81C, 0xF00A0403, 0x81C, 0xEF0C0403,
- 0x81C, 0xEE0E0403, 0x81C, 0xED100403, 0x81C, 0xEC120403,
- 0x81C, 0xEB140403, 0x81C, 0xEA160403, 0x81C, 0xE9180403,
- 0x81C, 0xE81A0403, 0x81C, 0xE71C0403, 0x81C, 0xE61E0403,
- 0x81C, 0xE5200403, 0x81C, 0xE4220403, 0x81C, 0xE3240403,
- 0x81C, 0xE2260403, 0x81C, 0xE1280403, 0x81C, 0xE02A0403,
- 0x81C, 0xC32C0403, 0x81C, 0xC22E0403, 0x81C, 0xC1300403,
- 0x81C, 0xC0320403, 0x81C, 0xA4340403, 0x81C, 0xA3360403,
- 0x81C, 0xA2380403, 0x81C, 0xA13A0403, 0x81C, 0xA03C0403,
- 0x81C, 0x823E0403, 0x81C, 0x81400403, 0x81C, 0x80420403,
- 0x81C, 0x64440403, 0x81C, 0x63460403, 0x81C, 0x62480403,
- 0x81C, 0x614A0403, 0x81C, 0x604C0403, 0x81C, 0x454E0403,
- 0x81C, 0x44500403, 0x81C, 0x43520403, 0x81C, 0x42540403,
- 0x81C, 0x41560403, 0x81C, 0x40580403, 0x81C, 0x055A0403,
- 0x81C, 0x045C0403, 0x81C, 0x035E0403, 0x81C, 0x02600403,
- 0x81C, 0x01620403, 0x81C, 0x00640403, 0x81C, 0x00660403,
- 0x81C, 0x00680403, 0x81C, 0x006A0403, 0x81C, 0x006C0403,
- 0x81C, 0x006E0403, 0x81C, 0x00700403, 0x81C, 0x00720403,
- 0x81C, 0x00740403, 0x81C, 0x00760403, 0x81C, 0x00780403,
- 0x81C, 0x007A0403, 0x81C, 0x007C0403, 0x81C, 0x007E0403,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x81C, 0xFF000403,
- 0x81C, 0xF5000403, 0x81C, 0xF4020403, 0x81C, 0xF3040403,
- 0x81C, 0xF2060403, 0x81C, 0xF1080403, 0x81C, 0xF00A0403,
- 0x81C, 0xEF0C0403, 0x81C, 0xEE0E0403, 0x81C, 0xED100403,
- 0x81C, 0xEC120403, 0x81C, 0xEB140403, 0x81C, 0xEA160403,
- 0x81C, 0xE9180403, 0x81C, 0xE81A0403, 0x81C, 0xE71C0403,
- 0x81C, 0xE61E0403, 0x81C, 0xE5200403, 0x81C, 0xE4220403,
- 0x81C, 0xE3240403, 0x81C, 0xE2260403, 0x81C, 0xE1280403,
- 0x81C, 0xE02A0403, 0x81C, 0xC32C0403, 0x81C, 0xC22E0403,
- 0x81C, 0xC1300403, 0x81C, 0xC0320403, 0x81C, 0xA4340403,
- 0x81C, 0xA3360403, 0x81C, 0xA2380403, 0x81C, 0xA13A0403,
- 0x81C, 0xA03C0403, 0x81C, 0x823E0403, 0x81C, 0x81400403,
- 0x81C, 0x80420403, 0x81C, 0x64440403, 0x81C, 0x63460403,
- 0x81C, 0x62480403, 0x81C, 0x614A0403, 0x81C, 0x604C0403,
- 0x81C, 0x454E0403, 0x81C, 0x44500403, 0x81C, 0x43520403,
- 0x81C, 0x42540403, 0x81C, 0x41560403, 0x81C, 0x40580403,
- 0x81C, 0x055A0403, 0x81C, 0x045C0403, 0x81C, 0x035E0403,
- 0x81C, 0x02600403, 0x81C, 0x01620403, 0x81C, 0x00640403,
- 0x81C, 0x00660403, 0x81C, 0x00680403, 0x81C, 0x006A0403,
- 0x81C, 0x006C0403, 0x81C, 0x006E0403, 0x81C, 0x00700403,
- 0x81C, 0x00720403, 0x81C, 0x00740403, 0x81C, 0x00760403,
- 0x81C, 0x00780403, 0x81C, 0x007A0403, 0x81C, 0x007C0403,
- 0x81C, 0x007E0403, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x81C, 0xFF000403, 0x81C, 0xF5000403, 0x81C, 0xF4020403,
- 0x81C, 0xF3040403, 0x81C, 0xF2060403, 0x81C, 0xF1080403,
- 0x81C, 0xF00A0403, 0x81C, 0xEF0C0403, 0x81C, 0xEE0E0403,
- 0x81C, 0xED100403, 0x81C, 0xEC120403, 0x81C, 0xEB140403,
- 0x81C, 0xEA160403, 0x81C, 0xE9180403, 0x81C, 0xE81A0403,
- 0x81C, 0xE71C0403, 0x81C, 0xE61E0403, 0x81C, 0xE5200403,
- 0x81C, 0xE4220403, 0x81C, 0xE3240403, 0x81C, 0xE2260403,
- 0x81C, 0xE1280403, 0x81C, 0xE02A0403, 0x81C, 0xC32C0403,
- 0x81C, 0xC22E0403, 0x81C, 0xC1300403, 0x81C, 0xC0320403,
- 0x81C, 0xA4340403, 0x81C, 0xA3360403, 0x81C, 0xA2380403,
- 0x81C, 0xA13A0403, 0x81C, 0xA03C0403, 0x81C, 0x823E0403,
- 0x81C, 0x81400403, 0x81C, 0x80420403, 0x81C, 0x64440403,
- 0x81C, 0x63460403, 0x81C, 0x62480403, 0x81C, 0x614A0403,
- 0x81C, 0x604C0403, 0x81C, 0x454E0403, 0x81C, 0x44500403,
- 0x81C, 0x43520403, 0x81C, 0x42540403, 0x81C, 0x41560403,
- 0x81C, 0x40580403, 0x81C, 0x055A0403, 0x81C, 0x045C0403,
- 0x81C, 0x035E0403, 0x81C, 0x02600403, 0x81C, 0x01620403,
- 0x81C, 0x00640403, 0x81C, 0x00660403, 0x81C, 0x00680403,
- 0x81C, 0x006A0403, 0x81C, 0x006C0403, 0x81C, 0x006E0403,
- 0x81C, 0x00700403, 0x81C, 0x00720403, 0x81C, 0x00740403,
- 0x81C, 0x00760403, 0x81C, 0x00780403, 0x81C, 0x007A0403,
- 0x81C, 0x007C0403, 0x81C, 0x007E0403, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFF000403, 0x81C, 0xF5000403,
- 0x81C, 0xF4020403, 0x81C, 0xF3040403, 0x81C, 0xF2060403,
- 0x81C, 0xF1080403, 0x81C, 0xF00A0403, 0x81C, 0xEF0C0403,
- 0x81C, 0xEE0E0403, 0x81C, 0xED100403, 0x81C, 0xEC120403,
- 0x81C, 0xEB140403, 0x81C, 0xEA160403, 0x81C, 0xE9180403,
- 0x81C, 0xE81A0403, 0x81C, 0xE71C0403, 0x81C, 0xE61E0403,
- 0x81C, 0xE5200403, 0x81C, 0xE4220403, 0x81C, 0xE3240403,
- 0x81C, 0xE2260403, 0x81C, 0xE1280403, 0x81C, 0xE02A0403,
- 0x81C, 0xC32C0403, 0x81C, 0xC22E0403, 0x81C, 0xC1300403,
- 0x81C, 0xC0320403, 0x81C, 0xA4340403, 0x81C, 0xA3360403,
- 0x81C, 0xA2380403, 0x81C, 0xA13A0403, 0x81C, 0xA03C0403,
- 0x81C, 0x823E0403, 0x81C, 0x81400403, 0x81C, 0x80420403,
- 0x81C, 0x64440403, 0x81C, 0x63460403, 0x81C, 0x62480403,
- 0x81C, 0x614A0403, 0x81C, 0x604C0403, 0x81C, 0x454E0403,
- 0x81C, 0x44500403, 0x81C, 0x43520403, 0x81C, 0x42540403,
- 0x81C, 0x41560403, 0x81C, 0x40580403, 0x81C, 0x055A0403,
- 0x81C, 0x045C0403, 0x81C, 0x035E0403, 0x81C, 0x02600403,
- 0x81C, 0x01620403, 0x81C, 0x00640403, 0x81C, 0x00660403,
- 0x81C, 0x00680403, 0x81C, 0x006A0403, 0x81C, 0x006C0403,
- 0x81C, 0x006E0403, 0x81C, 0x00700403, 0x81C, 0x00720403,
- 0x81C, 0x00740403, 0x81C, 0x00760403, 0x81C, 0x00780403,
- 0x81C, 0x007A0403, 0x81C, 0x007C0403, 0x81C, 0x007E0403,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFF000403,
- 0x81C, 0xFF000403, 0x81C, 0xFF020403, 0x81C, 0xFE040403,
- 0x81C, 0xFD060403, 0x81C, 0xFC080403, 0x81C, 0xFB0A0403,
- 0x81C, 0xFA0C0403, 0x81C, 0xF90E0403, 0x81C, 0xF8100403,
- 0x81C, 0xF7120403, 0x81C, 0xF6140403, 0x81C, 0xF5160403,
- 0x81C, 0xF4180403, 0x81C, 0xF31A0403, 0x81C, 0xF21C0403,
- 0x81C, 0xD51E0403, 0x81C, 0xD4200403, 0x81C, 0xD3220403,
- 0x81C, 0xD2240403, 0x81C, 0xB6260403, 0x81C, 0xB5280403,
- 0x81C, 0xB42A0403, 0x81C, 0xB32C0403, 0x81C, 0xB22E0403,
- 0x81C, 0xB1300403, 0x81C, 0xB0320403, 0x81C, 0xAF340403,
- 0x81C, 0xAE360403, 0x81C, 0xAD380403, 0x81C, 0xAC3A0403,
- 0x81C, 0xAB3C0403, 0x81C, 0xAA3E0403, 0x81C, 0xA9400403,
- 0x81C, 0xA8420403, 0x81C, 0xA7440403, 0x81C, 0xA6460403,
- 0x81C, 0xA5480403, 0x81C, 0xA44A0403, 0x81C, 0xA34C0403,
- 0x81C, 0x854E0403, 0x81C, 0x84500403, 0x81C, 0x83520403,
- 0x81C, 0x82540403, 0x81C, 0x81560403, 0x81C, 0x80580403,
- 0x81C, 0x485A0403, 0x81C, 0x475C0403, 0x81C, 0x465E0403,
- 0x81C, 0x45600403, 0x81C, 0x44620403, 0x81C, 0x0A640403,
- 0x81C, 0x09660403, 0x81C, 0x08680403, 0x81C, 0x076A0403,
- 0x81C, 0x066C0403, 0x81C, 0x056E0403, 0x81C, 0x04700403,
- 0x81C, 0x03720403, 0x81C, 0x02740403, 0x81C, 0x01760403,
- 0x81C, 0x00780403, 0x81C, 0x007A0403, 0x81C, 0x007C0403,
- 0x81C, 0x007E0403, 0x90012100, 0x00000000, 0x40000000, 0x00000000,
- 0x81C, 0xFF000403, 0x81C, 0xFF000403, 0x81C, 0xFF020403,
- 0x81C, 0xFE040403, 0x81C, 0xFD060403, 0x81C, 0xFC080403,
- 0x81C, 0xFB0A0403, 0x81C, 0xFA0C0403, 0x81C, 0xF90E0403,
- 0x81C, 0xF8100403, 0x81C, 0xF7120403, 0x81C, 0xF6140403,
- 0x81C, 0xF5160403, 0x81C, 0xF4180403, 0x81C, 0xF31A0403,
- 0x81C, 0xF21C0403, 0x81C, 0xD51E0403, 0x81C, 0xD4200403,
- 0x81C, 0xD3220403, 0x81C, 0xD2240403, 0x81C, 0xB6260403,
- 0x81C, 0xB5280403, 0x81C, 0xB42A0403, 0x81C, 0xB32C0403,
- 0x81C, 0xB22E0403, 0x81C, 0xB1300403, 0x81C, 0xB0320403,
- 0x81C, 0xAF340403, 0x81C, 0xAE360403, 0x81C, 0xAD380403,
- 0x81C, 0xAC3A0403, 0x81C, 0xAB3C0403, 0x81C, 0xAA3E0403,
- 0x81C, 0xA9400403, 0x81C, 0xA8420403, 0x81C, 0xA7440403,
- 0x81C, 0xA6460403, 0x81C, 0xA5480403, 0x81C, 0xA44A0403,
- 0x81C, 0xA34C0403, 0x81C, 0x854E0403, 0x81C, 0x84500403,
- 0x81C, 0x83520403, 0x81C, 0x82540403, 0x81C, 0x81560403,
- 0x81C, 0x80580403, 0x81C, 0x485A0403, 0x81C, 0x475C0403,
- 0x81C, 0x465E0403, 0x81C, 0x45600403, 0x81C, 0x44620403,
- 0x81C, 0x0A640403, 0x81C, 0x09660403, 0x81C, 0x08680403,
- 0x81C, 0x076A0403, 0x81C, 0x066C0403, 0x81C, 0x056E0403,
- 0x81C, 0x04700403, 0x81C, 0x03720403, 0x81C, 0x02740403,
- 0x81C, 0x01760403, 0x81C, 0x00780403, 0x81C, 0x007A0403,
- 0x81C, 0x007C0403, 0x81C, 0x007E0403, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFF000403, 0x81C, 0xF5000403,
- 0x81C, 0xF4020403, 0x81C, 0xF3040403, 0x81C, 0xF2060403,
- 0x81C, 0xF1080403, 0x81C, 0xF00A0403, 0x81C, 0xEF0C0403,
- 0x81C, 0xEE0E0403, 0x81C, 0xED100403, 0x81C, 0xEC120403,
- 0x81C, 0xEB140403, 0x81C, 0xEA160403, 0x81C, 0xE9180403,
- 0x81C, 0xE81A0403, 0x81C, 0xE71C0403, 0x81C, 0xE61E0403,
- 0x81C, 0xE5200403, 0x81C, 0xE4220403, 0x81C, 0xE3240403,
- 0x81C, 0xE2260403, 0x81C, 0xE1280403, 0x81C, 0xE02A0403,
- 0x81C, 0xC32C0403, 0x81C, 0xC22E0403, 0x81C, 0xC1300403,
- 0x81C, 0xC0320403, 0x81C, 0xA4340403, 0x81C, 0xA3360403,
- 0x81C, 0xA2380403, 0x81C, 0xA13A0403, 0x81C, 0xA03C0403,
- 0x81C, 0x823E0403, 0x81C, 0x81400403, 0x81C, 0x80420403,
- 0x81C, 0x64440403, 0x81C, 0x63460403, 0x81C, 0x62480403,
- 0x81C, 0x614A0403, 0x81C, 0x604C0403, 0x81C, 0x454E0403,
- 0x81C, 0x44500403, 0x81C, 0x43520403, 0x81C, 0x42540403,
- 0x81C, 0x41560403, 0x81C, 0x40580403, 0x81C, 0x055A0403,
- 0x81C, 0x045C0403, 0x81C, 0x035E0403, 0x81C, 0x02600403,
- 0x81C, 0x01620403, 0x81C, 0x00640403, 0x81C, 0x00660403,
- 0x81C, 0x00680403, 0x81C, 0x006A0403, 0x81C, 0x006C0403,
- 0x81C, 0x006E0403, 0x81C, 0x00700403, 0x81C, 0x00720403,
- 0x81C, 0x00740403, 0x81C, 0x00760403, 0x81C, 0x00780403,
- 0x81C, 0x007A0403, 0x81C, 0x007C0403, 0x81C, 0x007E0403,
- 0x90011000, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFF000403,
- 0x81C, 0xFF000403, 0x81C, 0xFF020403, 0x81C, 0xFE040403,
- 0x81C, 0xFD060403, 0x81C, 0xFC080403, 0x81C, 0xFB0A0403,
- 0x81C, 0xFA0C0403, 0x81C, 0xF90E0403, 0x81C, 0xF8100403,
- 0x81C, 0xF7120403, 0x81C, 0xF6140403, 0x81C, 0xF5160403,
- 0x81C, 0xF4180403, 0x81C, 0xF31A0403, 0x81C, 0xF21C0403,
- 0x81C, 0xD51E0403, 0x81C, 0xD4200403, 0x81C, 0xD3220403,
- 0x81C, 0xD2240403, 0x81C, 0xB6260403, 0x81C, 0xB5280403,
- 0x81C, 0xB42A0403, 0x81C, 0xB32C0403, 0x81C, 0xB22E0403,
- 0x81C, 0xB1300403, 0x81C, 0xB0320403, 0x81C, 0xAF340403,
- 0x81C, 0xAE360403, 0x81C, 0xAD380403, 0x81C, 0xAC3A0403,
- 0x81C, 0xAB3C0403, 0x81C, 0xAA3E0403, 0x81C, 0xA9400403,
- 0x81C, 0xA8420403, 0x81C, 0xA7440403, 0x81C, 0xA6460403,
- 0x81C, 0xA5480403, 0x81C, 0xA44A0403, 0x81C, 0xA34C0403,
- 0x81C, 0x854E0403, 0x81C, 0x84500403, 0x81C, 0x83520403,
- 0x81C, 0x82540403, 0x81C, 0x81560403, 0x81C, 0x80580403,
- 0x81C, 0x485A0403, 0x81C, 0x475C0403, 0x81C, 0x465E0403,
- 0x81C, 0x45600403, 0x81C, 0x44620403, 0x81C, 0x0A640403,
- 0x81C, 0x09660403, 0x81C, 0x08680403, 0x81C, 0x076A0403,
- 0x81C, 0x066C0403, 0x81C, 0x056E0403, 0x81C, 0x04700403,
- 0x81C, 0x03720403, 0x81C, 0x02740403, 0x81C, 0x01760403,
- 0x81C, 0x00780403, 0x81C, 0x007A0403, 0x81C, 0x007C0403,
- 0x81C, 0x007E0403, 0x90002100, 0x00000000, 0x40000000, 0x00000000,
- 0x81C, 0xFF000403, 0x81C, 0xFF000403, 0x81C, 0xFF020403,
- 0x81C, 0xFE040403, 0x81C, 0xFD060403, 0x81C, 0xFC080403,
- 0x81C, 0xFB0A0403, 0x81C, 0xFA0C0403, 0x81C, 0xF90E0403,
- 0x81C, 0xF8100403, 0x81C, 0xF7120403, 0x81C, 0xF6140403,
- 0x81C, 0xF5160403, 0x81C, 0xF4180403, 0x81C, 0xF31A0403,
- 0x81C, 0xF21C0403, 0x81C, 0xD51E0403, 0x81C, 0xD4200403,
- 0x81C, 0xD3220403, 0x81C, 0xD2240403, 0x81C, 0xB6260403,
- 0x81C, 0xB5280403, 0x81C, 0xB42A0403, 0x81C, 0xB32C0403,
- 0x81C, 0xB22E0403, 0x81C, 0xB1300403, 0x81C, 0xB0320403,
- 0x81C, 0xAF340403, 0x81C, 0xAE360403, 0x81C, 0xAD380403,
- 0x81C, 0xAC3A0403, 0x81C, 0xAB3C0403, 0x81C, 0xAA3E0403,
- 0x81C, 0xA9400403, 0x81C, 0xA8420403, 0x81C, 0xA7440403,
- 0x81C, 0xA6460403, 0x81C, 0xA5480403, 0x81C, 0xA44A0403,
- 0x81C, 0xA34C0403, 0x81C, 0x854E0403, 0x81C, 0x84500403,
- 0x81C, 0x83520403, 0x81C, 0x82540403, 0x81C, 0x81560403,
- 0x81C, 0x80580403, 0x81C, 0x485A0403, 0x81C, 0x475C0403,
- 0x81C, 0x465E0403, 0x81C, 0x45600403, 0x81C, 0x44620403,
- 0x81C, 0x0A640403, 0x81C, 0x09660403, 0x81C, 0x08680403,
- 0x81C, 0x076A0403, 0x81C, 0x066C0403, 0x81C, 0x056E0403,
- 0x81C, 0x04700403, 0x81C, 0x03720403, 0x81C, 0x02740403,
- 0x81C, 0x01760403, 0x81C, 0x00780403, 0x81C, 0x007A0403,
- 0x81C, 0x007C0403, 0x81C, 0x007E0403, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x81C, 0xFF000403, 0x81C, 0xFF000403,
- 0x81C, 0xFF020403, 0x81C, 0xFE040403, 0x81C, 0xFD060403,
- 0x81C, 0xFC080403, 0x81C, 0xFB0A0403, 0x81C, 0xFA0C0403,
- 0x81C, 0xF90E0403, 0x81C, 0xF8100403, 0x81C, 0xF7120403,
- 0x81C, 0xF6140403, 0x81C, 0xF5160403, 0x81C, 0xF4180403,
- 0x81C, 0xF31A0403, 0x81C, 0xF21C0403, 0x81C, 0xD51E0403,
- 0x81C, 0xD4200403, 0x81C, 0xD3220403, 0x81C, 0xD2240403,
- 0x81C, 0xB6260403, 0x81C, 0xB5280403, 0x81C, 0xB42A0403,
- 0x81C, 0xB32C0403, 0x81C, 0xB22E0403, 0x81C, 0xB1300403,
- 0x81C, 0xB0320403, 0x81C, 0xAF340403, 0x81C, 0xAE360403,
- 0x81C, 0xAD380403, 0x81C, 0xAC3A0403, 0x81C, 0xAB3C0403,
- 0x81C, 0xAA3E0403, 0x81C, 0xA9400403, 0x81C, 0xA8420403,
- 0x81C, 0xA7440403, 0x81C, 0xA6460403, 0x81C, 0xA5480403,
- 0x81C, 0xA44A0403, 0x81C, 0xA34C0403, 0x81C, 0x854E0403,
- 0x81C, 0x84500403, 0x81C, 0x83520403, 0x81C, 0x82540403,
- 0x81C, 0x81560403, 0x81C, 0x80580403, 0x81C, 0x485A0403,
- 0x81C, 0x475C0403, 0x81C, 0x465E0403, 0x81C, 0x45600403,
- 0x81C, 0x44620403, 0x81C, 0x0A640403, 0x81C, 0x09660403,
- 0x81C, 0x08680403, 0x81C, 0x076A0403, 0x81C, 0x066C0403,
- 0x81C, 0x056E0403, 0x81C, 0x04700403, 0x81C, 0x03720403,
- 0x81C, 0x02740403, 0x81C, 0x01760403, 0x81C, 0x00780403,
- 0x81C, 0x007A0403, 0x81C, 0x007C0403, 0x81C, 0x007E0403,
- 0xA0000000, 0x00000000, 0x81C, 0xFF000403, 0x81C, 0xFF000403,
- 0x81C, 0xFF020403, 0x81C, 0xFE040403, 0x81C, 0xFD060403,
- 0x81C, 0xFC080403, 0x81C, 0xFB0A0403, 0x81C, 0xFA0C0403,
- 0x81C, 0xF90E0403, 0x81C, 0xF8100403, 0x81C, 0xF7120403,
- 0x81C, 0xF6140403, 0x81C, 0xF5160403, 0x81C, 0xF4180403,
- 0x81C, 0xF31A0403, 0x81C, 0xF21C0403, 0x81C, 0xD51E0403,
- 0x81C, 0xD4200403, 0x81C, 0xD3220403, 0x81C, 0xD2240403,
- 0x81C, 0xB6260403, 0x81C, 0xB5280403, 0x81C, 0xB42A0403,
- 0x81C, 0xB32C0403, 0x81C, 0xB22E0403, 0x81C, 0xB1300403,
- 0x81C, 0xB0320403, 0x81C, 0xAF340403, 0x81C, 0xAE360403,
- 0x81C, 0xAD380403, 0x81C, 0xAC3A0403, 0x81C, 0xAB3C0403,
- 0x81C, 0xAA3E0403, 0x81C, 0xA9400403, 0x81C, 0xA8420403,
- 0x81C, 0xA7440403, 0x81C, 0xA6460403, 0x81C, 0xA5480403,
- 0x81C, 0xA44A0403, 0x81C, 0xA34C0403, 0x81C, 0x854E0403,
- 0x81C, 0x84500403, 0x81C, 0x83520403, 0x81C, 0x82540403,
- 0x81C, 0x81560403, 0x81C, 0x80580403, 0x81C, 0x485A0403,
- 0x81C, 0x475C0403, 0x81C, 0x465E0403, 0x81C, 0x45600403,
- 0x81C, 0x44620403, 0x81C, 0x0A640403, 0x81C, 0x09660403,
- 0x81C, 0x08680403, 0x81C, 0x076A0403, 0x81C, 0x066C0403,
- 0x81C, 0x056E0403, 0x81C, 0x04700403, 0x81C, 0x03720403,
- 0x81C, 0x02740403, 0x81C, 0x01760403, 0x81C, 0x00780403,
- 0x81C, 0x007A0403, 0x81C, 0x007C0403, 0x81C, 0x007E0403,
- 0xB0000000, 0x00000000, 0xC50, 0x00000022, 0xC50, 0x00000020,
- 0xE50, 0x00000022, 0xE50, 0x00000020,
-
-};
-
-void odm_read_and_config_mp_8822b_agc_tab(struct phy_dm_struct *dm)
-{
- u32 i = 0;
- u8 c_cond;
- bool is_matched = true, is_skipped = false;
- u32 *array = array_mp_8822b_agc_tab;
-
- u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===> %s\n", __func__);
-
- for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_agc_tab); i = i + 2) {
- v1 = array[i];
- v2 = array[i + 1];
-
- if (v1 & BIT(31)) { /* positive condition*/
- c_cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28);
- if (c_cond == COND_ENDIF) { /*end*/
- is_matched = true;
- is_skipped = false;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ENDIF\n");
- } else if (c_cond == COND_ELSE) { /*else*/
- is_matched = is_skipped ? false : true;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ELSE\n");
- } else { /*if , else if*/
- pre_v1 = v1;
- pre_v2 = v2;
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "IF or ELSE IF\n");
- }
- } else if (v1 & BIT(30)) { /*negative condition*/
- if (is_skipped) {
- is_matched = false;
- continue;
- }
-
- if (check_positive(dm, pre_v1, pre_v2, v1, v2)) {
- is_matched = true;
- is_skipped = true;
- } else {
- is_matched = false;
- is_skipped = false;
- }
- } else if (is_matched) {
- odm_config_bb_agc_8822b(dm, v1, MASKDWORD, v2);
- }
- }
-}
-
-u32 odm_get_version_mp_8822b_agc_tab(void) { return 67; }
-
-/******************************************************************************
- * phy_reg.TXT
- ******************************************************************************/
-
-static u32 array_mp_8822b_phy_reg[] = {
- 0x800, 0x9020D010, 0x804, 0x800181A0, 0x808, 0x0E028233,
- 0x80C, 0x10000013, 0x810, 0x21101263, 0x814, 0x020C3D10,
- 0x818, 0x84A10385, 0x81C, 0x1E1E081F, 0x820, 0x0001AAAA,
- 0x824, 0x00030FE0, 0x828, 0x0000CCCC, 0x82C, 0x75CB7010,
- 0x830, 0x79A0EA2A, 0x834, 0x072E6986, 0x838, 0x87766441,
- 0x83C, 0x9194B2B6, 0x840, 0x171740E0, 0x844, 0x4D3D7CDB,
- 0x848, 0x4AD0408B, 0x84C, 0x6AFBF7A5, 0x850, 0x28A74706,
- 0x854, 0x0001520C, 0x858, 0x4060C000, 0x85C, 0x74010160,
- 0x860, 0x68A7C321, 0x864, 0x79F27432, 0x868, 0x8CA7A314,
- 0x86C, 0x778C2878, 0x870, 0x77777777, 0x874, 0x27612C2E,
- 0x878, 0xC0003152, 0x87C, 0x5C8FC000, 0x880, 0x00000000,
- 0x884, 0x00000000, 0x888, 0x00000000, 0x88C, 0x00000000,
- 0x890, 0x00000000, 0x894, 0x00000000, 0x898, 0x00000000,
- 0x89C, 0x00000000, 0x8A0, 0x00000013, 0x8A4, 0x7F7F7F7F,
- 0x8A8, 0x2202033E, 0x8AC, 0xF00F000A, 0x8B0, 0x00000600,
- 0x8B4, 0x000FC080, 0x8B8, 0xEC0057F7, 0x8BC, 0xACB520A3,
- 0x8C0, 0xFFE04020, 0x8C4, 0x47C00000, 0x8C8, 0x000251A5,
- 0x8CC, 0x08108000, 0x8D0, 0x0000B800, 0x8D4, 0x860308A0,
- 0x8D8, 0x21095612, 0x8DC, 0x00000000, 0x8E0, 0x32D16777,
- 0x8E4, 0x4C098935, 0x8E8, 0xFFFFC42C, 0x8EC, 0x99999999,
- 0x8F0, 0x00009999, 0x8F4, 0x00D80FA1, 0x8F8, 0x40000080,
- 0x8FC, 0x00000130, 0x900, 0x00800000, 0x904, 0x00000000,
- 0x908, 0x00000000, 0x90C, 0xD3000000, 0x910, 0x0000FC00,
- 0x914, 0xC6380000, 0x918, 0x1C1028C0, 0x91C, 0x64B11A1C,
- 0x920, 0xE0767233, 0x924, 0x855A2500, 0x928, 0x4AB0E4E4,
- 0x92C, 0xFFFEB200, 0x930, 0xFFFFFFFE, 0x934, 0x001FFFFF,
- 0x938, 0x00008480, 0x93C, 0xE41C0642, 0x940, 0x0E470430,
- 0x944, 0x00000000, 0x948, 0xAC000000, 0x94C, 0x10000083,
- 0x950, 0x32010080, 0x954, 0x84510080, 0x958, 0x00000001,
- 0x95C, 0x04248000, 0x960, 0x00000000, 0x964, 0x00000000,
- 0x968, 0x00000000, 0x96C, 0x00000000, 0x970, 0x00001FFF,
- 0x974, 0x44000FFF, 0x978, 0x00000000, 0x97C, 0x00000000,
- 0x980, 0x00000000, 0x984, 0x00000000, 0x988, 0x00000000,
- 0x98C, 0x23440000, 0x990, 0x27100000, 0x994, 0xFFFF0100,
- 0x998, 0xFFFFFF5C, 0x99C, 0xFFFFFFFF, 0x9A0, 0x000000FF,
- 0x9A4, 0x80000088, 0x9A8, 0x0C2F0000, 0x9AC, 0x01560000,
- 0x9B0, 0x70000000, 0x9B4, 0x00000000, 0x9B8, 0x00000000,
- 0x9BC, 0x00000000, 0x9C0, 0x00000000, 0x9C4, 0x00000000,
- 0x9C8, 0x00000000, 0x9CC, 0x00000000, 0x9D0, 0x00000000,
- 0x9D4, 0x00000000, 0x9D8, 0x00000000, 0x9DC, 0x00000000,
- 0x9E0, 0x00000000, 0x9E4, 0x02000402, 0x9E8, 0x000022D4,
- 0x9EC, 0x00000000, 0x9F0, 0x00010080, 0x9F4, 0x00000000,
- 0x9F8, 0x00000000, 0x9FC, 0xEFFFF7F7, 0xA00, 0x00D047C8,
- 0xA04, 0x81FF800C, 0xA08, 0x8C838300, 0xA0C, 0x2E20100F,
- 0xA10, 0x9500BB78, 0xA14, 0x1114D028, 0xA18, 0x00881117,
- 0xA1C, 0x89140F00, 0xA20, 0x84880000, 0xA24, 0x384F6577,
- 0xA28, 0x00001525, 0xA2C, 0x00920000, 0xA70, 0x101FFF00,
- 0xA74, 0x00000148, 0xA78, 0x00000900, 0xA7C, 0x225B0606,
- 0xA80, 0x218675B2, 0xA84, 0x80208C00, 0xA88, 0x040C0000,
- 0xA8C, 0x12345678, 0xA90, 0xABCDEF00, 0xA94, 0x001B1B89,
- 0xA98, 0x030A0000, 0xA9C, 0x00060000, 0xAA0, 0x00000000,
- 0xAA4, 0x0004000F, 0xAA8, 0x00000200, 0xB00, 0xE1000440,
- 0xB04, 0x00800000, 0xB08, 0xFF02030B, 0xB0C, 0x01EAA406,
- 0xB10, 0x00030690, 0xB14, 0x006000FA, 0xB18, 0x00000002,
- 0xB1C, 0x00000002, 0xB20, 0x4B00001F, 0xB24, 0x4E8E3E40,
- 0xB28, 0x03020100, 0xB2C, 0x07060504, 0xB30, 0x0B0A0908,
- 0xB34, 0x0F0E0D0C, 0xB38, 0x13121110, 0xB3C, 0x0000003A,
- 0xB40, 0x00000000, 0xB44, 0x80000000, 0xB48, 0x3F0000FA,
- 0xB4C, 0x88C80020, 0xB50, 0x00000000, 0xB54, 0x00004241,
- 0xB58, 0xE0008208, 0xB5C, 0x41EFFFF9, 0xB60, 0x00000000,
- 0xB64, 0x00200063, 0xB68, 0x0000003A, 0xB6C, 0x00000102,
- 0xB70, 0x4E6D1870, 0xB74, 0x03020100, 0xB78, 0x07060504,
- 0xB7C, 0x0B0A0908, 0xB80, 0x0F0E0D0C, 0xB84, 0x13121110,
- 0xB88, 0x00000000, 0xB8C, 0x00000000, 0xC00, 0x00000007,
- 0xC04, 0x00000020, 0xC08, 0x60403231, 0xC0C, 0x00012345,
- 0xC10, 0x00000100, 0xC14, 0x01000000, 0xC18, 0x00000000,
- 0xC1C, 0x40040053, 0xC20, 0x40020103, 0xC24, 0x00000000,
- 0xC28, 0x00000000, 0xC2C, 0x00000000, 0xC30, 0x00000000,
- 0xC34, 0x00000000, 0xC38, 0x00000000, 0xC3C, 0x00000000,
- 0xC40, 0x00000000, 0xC44, 0x00000000, 0xC48, 0x00000000,
- 0xC4C, 0x00000000, 0xC50, 0x00000020, 0xC54, 0x00000000,
- 0xC58, 0xD8020402, 0xC5C, 0xDE000120, 0xC68, 0x5979993F,
- 0xC6C, 0x0000122A, 0xC70, 0x99795979, 0xC74, 0x99795979,
- 0xC78, 0x99799979, 0xC7C, 0x99791979, 0xC80, 0x19791979,
- 0xC84, 0x19791979, 0xC88, 0x00000000, 0xC8C, 0x07000000,
- 0xC94, 0x01000100, 0xC98, 0x201C8000, 0xC9C, 0x00000000,
- 0xCA0, 0x0000A555, 0xCA4, 0x08040201, 0xCA8, 0x80402010,
- 0xCAC, 0x00000000, 0xCB0, 0x77777777, 0xCB4, 0x00007777,
- 0xCB8, 0x00000000, 0xCBC, 0x00000000, 0xCC0, 0x00000000,
- 0xCC4, 0x00000000, 0xCC8, 0x00000000, 0xCCC, 0x00000000,
- 0xCD0, 0x00000000, 0xCD4, 0x00000000, 0xCD8, 0x00000000,
- 0xCDC, 0x00000000, 0xCE0, 0x00000000, 0xCE4, 0x00000000,
- 0xCE8, 0x00000000, 0xCEC, 0x00000000, 0xE00, 0x00000007,
- 0xE04, 0x00000020, 0xE08, 0x60403231, 0xE0C, 0x00012345,
- 0xE10, 0x00000100, 0xE14, 0x01000000, 0xE18, 0x00000000,
- 0xE1C, 0x40040053, 0xE20, 0x40020103, 0xE24, 0x00000000,
- 0xE28, 0x00000000, 0xE2C, 0x00000000, 0xE30, 0x00000000,
- 0xE34, 0x00000000, 0xE38, 0x00000000, 0xE3C, 0x00000000,
- 0xE40, 0x00000000, 0xE44, 0x00000000, 0xE48, 0x00000000,
- 0xE4C, 0x00000000, 0xE50, 0x00000020, 0xE54, 0x00000000,
- 0xE58, 0xD8020402, 0xE5C, 0xDE000120, 0xE68, 0x5979993F,
- 0xE6C, 0x0000122A, 0xE70, 0x99795979, 0xE74, 0x99795979,
- 0xE78, 0x99799979, 0xE7C, 0x99791979, 0xE80, 0x19791979,
- 0xE84, 0x19791979, 0xE88, 0x00000000, 0xE8C, 0x07000000,
- 0xE94, 0x01000100, 0xE98, 0x201C8000, 0xE9C, 0x00000000,
- 0xEA0, 0x0000A555, 0xEA4, 0x08040201, 0xEA8, 0x80402010,
- 0xEAC, 0x00000000, 0xEB0, 0x77777777, 0xEB4, 0x00007777,
- 0xEB8, 0x00000000, 0xEBC, 0x00000000, 0xEC0, 0x00000000,
- 0xEC4, 0x00000000, 0xEC8, 0x00000000, 0xECC, 0x00000000,
- 0xED0, 0x00000000, 0xED4, 0x00000000, 0xED8, 0x00000000,
- 0xEDC, 0x00000000, 0xEE0, 0x00000000, 0xEE4, 0x00000000,
- 0xEE8, 0x00000000, 0xEEC, 0x00000000, 0x1900, 0x00000000,
- 0x1904, 0x00238000, 0x1908, 0x00000000, 0x190C, 0x00000000,
- 0x1910, 0x00000000, 0x1914, 0x00000000, 0x1918, 0x00000000,
- 0x191C, 0x00000000, 0x1920, 0x00000000, 0x1924, 0x00000000,
- 0x1928, 0x00000000, 0x192C, 0x00000000, 0x1930, 0x00000000,
- 0x1934, 0x00000000, 0x1938, 0x00000000, 0x193C, 0x00000000,
- 0x1940, 0x00000000, 0x1944, 0x00000000, 0x1948, 0x00000000,
- 0x194C, 0x00000000, 0x1950, 0x00000000, 0x1954, 0x00000000,
- 0x1958, 0x00000000, 0x195C, 0x00000000, 0x1960, 0x00000000,
- 0x1964, 0x00000000, 0x1968, 0x00000000, 0x196C, 0x00000000,
- 0x1970, 0x00000000, 0x1974, 0x00000000, 0x1978, 0x00000000,
- 0x197C, 0x00000000, 0x1980, 0x00000000, 0x1984, 0x03000000,
- 0x1988, 0x21401E88, 0x198C, 0x00004000, 0x1990, 0x00000000,
- 0x1994, 0x00000000, 0x1998, 0x00000053, 0x199C, 0x00000000,
- 0x19A0, 0x00000000, 0x19A4, 0x00000000, 0x19A8, 0x00000000,
- 0x19AC, 0x0E47E47F, 0x19B0, 0x00000000, 0x19B4, 0x0E47E47F,
- 0x19B8, 0x00000000, 0x19BC, 0x00000000, 0x19C0, 0x00000000,
- 0x19C4, 0x00000000, 0x19C8, 0x00000000, 0x19CC, 0x00000000,
- 0x19D0, 0x00000000, 0x19D4, 0xAAAAAAAA, 0x19D8, 0x00000AAA,
- 0x19DC, 0x133E0F37, 0x19E0, 0x00000000, 0x19E4, 0x00000000,
- 0x19E8, 0x00000000, 0x19EC, 0x00000000, 0x19F0, 0x00000000,
- 0x19F4, 0x00000000, 0x19F8, 0x01A00000, 0x19FC, 0x00000000,
- 0x1C00, 0x00000100, 0x1C04, 0x01000000, 0x1C08, 0x00000100,
- 0x1C0C, 0x01000000, 0x1C10, 0x00000100, 0x1C14, 0x01000000,
- 0x1C18, 0x00000100, 0x1C1C, 0x01000000, 0x1C20, 0x00000100,
- 0x1C24, 0x01000000, 0x1C28, 0x00000100, 0x1C2C, 0x01000000,
- 0x1C30, 0x00000100, 0x1C34, 0x01000000, 0x1C38, 0x00000000,
- 0x1C3C, 0x00000000, 0x1C40, 0x000C0100, 0x1C44, 0x000000F3,
- 0x1C48, 0x1A8249A8, 0x1C4C, 0x1461C826, 0x1C50, 0x0001469E,
- 0x1C54, 0x58D158D1, 0x1C58, 0x04490088, 0x1C5C, 0x04004400,
- 0x1C60, 0x00000000, 0x1C64, 0x04004400, 0x1C68, 0x00000100,
- 0x1C6C, 0x01000000, 0x1C70, 0x00000100, 0x1C74, 0x01000000,
- 0x1C78, 0x00000000, 0x1C7C, 0x00000010, 0x1C80, 0x5FFF5FFF,
- 0x1C84, 0x5FFF5FFF, 0x1C88, 0x5FFF5FFF, 0x1C8C, 0x5FFF5FFF,
- 0x1C90, 0x5FFF5FFF, 0x1C94, 0x5FFF5FFF, 0x1C98, 0x5FFF5FFF,
- 0x1C9C, 0x5FFF5FFF, 0x1CA0, 0x00000100, 0x1CA4, 0x01000000,
- 0x1CA8, 0x00000100, 0x1CAC, 0x5FFF5FFF, 0x1CB0, 0x00000100,
- 0x1CB4, 0x01000000, 0x1CB8, 0x00000000, 0x1CBC, 0x00000000,
- 0x1CC0, 0x00000100, 0x1CC4, 0x01000000, 0x1CC8, 0x00000100,
- 0x1CCC, 0x01000000, 0x1CD0, 0x00000100, 0x1CD4, 0x01000000,
- 0x1CD8, 0x00000100, 0x1CDC, 0x01000000, 0x1CE0, 0x00000100,
- 0x1CE4, 0x01000000, 0x1CE8, 0x00000100, 0x1CEC, 0x01000000,
- 0x1CF0, 0x00000100, 0x1CF4, 0x01000000, 0x1CF8, 0x00000000,
- 0x1CFC, 0x00000000, 0xC60, 0x70038040, 0xC60, 0x70038040,
- 0xC60, 0x70146040, 0xC60, 0x70246040, 0xC60, 0x70346040,
- 0xC60, 0x70446040, 0xC60, 0x70532040, 0xC60, 0x70646040,
- 0xC60, 0x70738040, 0xC60, 0x70838040, 0xC60, 0x70938040,
- 0xC60, 0x70A38040, 0xC60, 0x70B36040, 0xC60, 0x70C06040,
- 0xC60, 0x70D06040, 0xC60, 0x70E76040, 0xC60, 0x70F06040,
- 0xE60, 0x70038040, 0xE60, 0x70038040, 0xE60, 0x70146040,
- 0xE60, 0x70246040, 0xE60, 0x70346040, 0xE60, 0x70446040,
- 0xE60, 0x70532040, 0xE60, 0x70646040, 0xE60, 0x70738040,
- 0xE60, 0x70838040, 0xE60, 0x70938040, 0xE60, 0x70A38040,
- 0xE60, 0x70B36040, 0xE60, 0x70C06040, 0xE60, 0x70D06040,
- 0xE60, 0x70E76040, 0xE60, 0x70F06040, 0xC64, 0x00800000,
- 0xC64, 0x08800001, 0xC64, 0x00800002, 0xC64, 0x00800003,
- 0xC64, 0x00800004, 0xC64, 0x00800005, 0xC64, 0x00800006,
- 0xC64, 0x08800007, 0xC64, 0x00004000, 0xE64, 0x00800000,
- 0xE64, 0x08800001, 0xE64, 0x00800002, 0xE64, 0x00800003,
- 0xE64, 0x00800004, 0xE64, 0x00800005, 0xE64, 0x00800006,
- 0xE64, 0x08800007, 0xE64, 0x00004000, 0x1B00, 0xF8000008,
- 0x1B00, 0xF80A7008, 0x1B00, 0xF8015008, 0x1B00, 0xF8000008,
- 0x1B04, 0xE24629D2, 0x1B08, 0x00000080, 0x1B0C, 0x00000000,
- 0x1B10, 0x00010C00, 0x1B14, 0x00000000, 0x1B18, 0x00292903,
- 0x1B1C, 0xA2193C32, 0x1B20, 0x01840008, 0x1B24, 0x01860008,
- 0x1B28, 0x80060300, 0x1B2C, 0x00000003, 0x1B30, 0x20000000,
- 0x1B34, 0x00000800, 0x1B3C, 0x20000000, 0x1BC0, 0x01000000,
- 0x1BCC, 0x00000000, 0x1B00, 0xF800000A, 0x1B1C, 0xA2193C32,
- 0x1B20, 0x01840008, 0x1B24, 0x01860008, 0x1B28, 0x80060300,
- 0x1B2C, 0x00000003, 0x1B30, 0x20000000, 0x1B34, 0x00000800,
- 0x1B3C, 0x20000000, 0x1BC0, 0x01000000, 0x1BCC, 0x00000000,
- 0x1B00, 0xF8000000, 0x1B80, 0x00000007, 0x1B80, 0x090A0005,
- 0x1B80, 0x090A0007, 0x1B80, 0x0FFE0015, 0x1B80, 0x0FFE0017,
- 0x1B80, 0x00220025, 0x1B80, 0x00220027, 0x1B80, 0x00040035,
- 0x1B80, 0x00040037, 0x1B80, 0x05C00045, 0x1B80, 0x05C00047,
- 0x1B80, 0x00070055, 0x1B80, 0x00070057, 0x1B80, 0x64000065,
- 0x1B80, 0x64000067, 0x1B80, 0x00020075, 0x1B80, 0x00020077,
- 0x1B80, 0x00080085, 0x1B80, 0x00080087, 0x1B80, 0x80000095,
- 0x1B80, 0x80000097, 0x1B80, 0x090800A5, 0x1B80, 0x090800A7,
- 0x1B80, 0x0F0200B5, 0x1B80, 0x0F0200B7, 0x1B80, 0x002200C5,
- 0x1B80, 0x002200C7, 0x1B80, 0x000400D5, 0x1B80, 0x000400D7,
- 0x1B80, 0x05C000E5, 0x1B80, 0x05C000E7, 0x1B80, 0x000700F5,
- 0x1B80, 0x000700F7, 0x1B80, 0x64020105, 0x1B80, 0x64020107,
- 0x1B80, 0x00020115, 0x1B80, 0x00020117, 0x1B80, 0x00040125,
- 0x1B80, 0x00040127, 0x1B80, 0x4A000135, 0x1B80, 0x4A000137,
- 0x1B80, 0x4B040145, 0x1B80, 0x4B040147, 0x1B80, 0x85030155,
- 0x1B80, 0x85030157, 0x1B80, 0x40090165, 0x1B80, 0x40090167,
- 0x1B80, 0xE0210175, 0x1B80, 0xE0210177, 0x1B80, 0x4B050185,
- 0x1B80, 0x4B050187, 0x1B80, 0x86030195, 0x1B80, 0x86030197,
- 0x1B80, 0x400B01A5, 0x1B80, 0x400B01A7, 0x1B80, 0xE02101B5,
- 0x1B80, 0xE02101B7, 0x1B80, 0x4B0001C5, 0x1B80, 0x4B0001C7,
- 0x1B80, 0x000701D5, 0x1B80, 0x000701D7, 0x1B80, 0x4C0001E5,
- 0x1B80, 0x4C0001E7, 0x1B80, 0x000401F5, 0x1B80, 0x000401F7,
- 0x1B80, 0x30000205, 0x1B80, 0x30000207, 0x1B80, 0xFE000215,
- 0x1B80, 0xFE000217, 0x1B80, 0xFF000225, 0x1B80, 0xFF000227,
- 0x1B80, 0xE1750235, 0x1B80, 0xE1750237, 0x1B80, 0xF00D0245,
- 0x1B80, 0xF00D0247, 0x1B80, 0xF10D0255, 0x1B80, 0xF10D0257,
- 0x1B80, 0xF20D0265, 0x1B80, 0xF20D0267, 0x1B80, 0xF30D0275,
- 0x1B80, 0xF30D0277, 0x1B80, 0xF40D0285, 0x1B80, 0xF40D0287,
- 0x1B80, 0xF50D0295, 0x1B80, 0xF50D0297, 0x1B80, 0xF60D02A5,
- 0x1B80, 0xF60D02A7, 0x1B80, 0xF70D02B5, 0x1B80, 0xF70D02B7,
- 0x1B80, 0xF80D02C5, 0x1B80, 0xF80D02C7, 0x1B80, 0xF90D02D5,
- 0x1B80, 0xF90D02D7, 0x1B80, 0xFA0D02E5, 0x1B80, 0xFA0D02E7,
- 0x1B80, 0xFB0D02F5, 0x1B80, 0xFB0D02F7, 0x1B80, 0x00010305,
- 0x1B80, 0x00010307, 0x1B80, 0x303D0315, 0x1B80, 0x303D0317,
- 0x1B80, 0x30550325, 0x1B80, 0x30550327, 0x1B80, 0x30A00335,
- 0x1B80, 0x30A00337, 0x1B80, 0x30A30345, 0x1B80, 0x30A30347,
- 0x1B80, 0x30570355, 0x1B80, 0x30570357, 0x1B80, 0x30620365,
- 0x1B80, 0x30620367, 0x1B80, 0x306D0375, 0x1B80, 0x306D0377,
- 0x1B80, 0x30AD0385, 0x1B80, 0x30AD0387, 0x1B80, 0x30A70395,
- 0x1B80, 0x30A70397, 0x1B80, 0x30BB03A5, 0x1B80, 0x30BB03A7,
- 0x1B80, 0x30C603B5, 0x1B80, 0x30C603B7, 0x1B80, 0x30D103C5,
- 0x1B80, 0x30D103C7, 0x1B80, 0xE11403D5, 0x1B80, 0xE11403D7,
- 0x1B80, 0x4D0403E5, 0x1B80, 0x4D0403E7, 0x1B80, 0x208003F5,
- 0x1B80, 0x208003F7, 0x1B80, 0x00000405, 0x1B80, 0x00000407,
- 0x1B80, 0x4D000415, 0x1B80, 0x4D000417, 0x1B80, 0x55070425,
- 0x1B80, 0x55070427, 0x1B80, 0xE10C0435, 0x1B80, 0xE10C0437,
- 0x1B80, 0xE10C0445, 0x1B80, 0xE10C0447, 0x1B80, 0x4D040455,
- 0x1B80, 0x4D040457, 0x1B80, 0x20880465, 0x1B80, 0x20880467,
- 0x1B80, 0x02000475, 0x1B80, 0x02000477, 0x1B80, 0x4D000485,
- 0x1B80, 0x4D000487, 0x1B80, 0x550F0495, 0x1B80, 0x550F0497,
- 0x1B80, 0xE10C04A5, 0x1B80, 0xE10C04A7, 0x1B80, 0x4F0204B5,
- 0x1B80, 0x4F0204B7, 0x1B80, 0x4E0004C5, 0x1B80, 0x4E0004C7,
- 0x1B80, 0x530204D5, 0x1B80, 0x530204D7, 0x1B80, 0x520104E5,
- 0x1B80, 0x520104E7, 0x1B80, 0xE11004F5, 0x1B80, 0xE11004F7,
- 0x1B80, 0x4D080505, 0x1B80, 0x4D080507, 0x1B80, 0x57100515,
- 0x1B80, 0x57100517, 0x1B80, 0x57000525, 0x1B80, 0x57000527,
- 0x1B80, 0x4D000535, 0x1B80, 0x4D000537, 0x1B80, 0x00010545,
- 0x1B80, 0x00010547, 0x1B80, 0xE1140555, 0x1B80, 0xE1140557,
- 0x1B80, 0x00010565, 0x1B80, 0x00010567, 0x1B80, 0x30770575,
- 0x1B80, 0x30770577, 0x1B80, 0x00230585, 0x1B80, 0x00230587,
- 0x1B80, 0xE1680595, 0x1B80, 0xE1680597, 0x1B80, 0x000205A5,
- 0x1B80, 0x000205A7, 0x1B80, 0x54E905B5, 0x1B80, 0x54E905B7,
- 0x1B80, 0x0BA605C5, 0x1B80, 0x0BA605C7, 0x1B80, 0x002305D5,
- 0x1B80, 0x002305D7, 0x1B80, 0xE16805E5, 0x1B80, 0xE16805E7,
- 0x1B80, 0x000205F5, 0x1B80, 0x000205F7, 0x1B80, 0x4D300605,
- 0x1B80, 0x4D300607, 0x1B80, 0x30900615, 0x1B80, 0x30900617,
- 0x1B80, 0x30730625, 0x1B80, 0x30730627, 0x1B80, 0x00220635,
- 0x1B80, 0x00220637, 0x1B80, 0xE1680645, 0x1B80, 0xE1680647,
- 0x1B80, 0x00020655, 0x1B80, 0x00020657, 0x1B80, 0x54E80665,
- 0x1B80, 0x54E80667, 0x1B80, 0x0BA60675, 0x1B80, 0x0BA60677,
- 0x1B80, 0x00220685, 0x1B80, 0x00220687, 0x1B80, 0xE1680695,
- 0x1B80, 0xE1680697, 0x1B80, 0x000206A5, 0x1B80, 0x000206A7,
- 0x1B80, 0x4D3006B5, 0x1B80, 0x4D3006B7, 0x1B80, 0x309006C5,
- 0x1B80, 0x309006C7, 0x1B80, 0x63F106D5, 0x1B80, 0x63F106D7,
- 0x1B80, 0xE11406E5, 0x1B80, 0xE11406E7, 0x1B80, 0xE16806F5,
- 0x1B80, 0xE16806F7, 0x1B80, 0x63F40705, 0x1B80, 0x63F40707,
- 0x1B80, 0xE1140715, 0x1B80, 0xE1140717, 0x1B80, 0xE1680725,
- 0x1B80, 0xE1680727, 0x1B80, 0x0BA80735, 0x1B80, 0x0BA80737,
- 0x1B80, 0x63F80745, 0x1B80, 0x63F80747, 0x1B80, 0xE1140755,
- 0x1B80, 0xE1140757, 0x1B80, 0xE1680765, 0x1B80, 0xE1680767,
- 0x1B80, 0x0BA90775, 0x1B80, 0x0BA90777, 0x1B80, 0x63FC0785,
- 0x1B80, 0x63FC0787, 0x1B80, 0xE1140795, 0x1B80, 0xE1140797,
- 0x1B80, 0xE16807A5, 0x1B80, 0xE16807A7, 0x1B80, 0x63FF07B5,
- 0x1B80, 0x63FF07B7, 0x1B80, 0xE11407C5, 0x1B80, 0xE11407C7,
- 0x1B80, 0xE16807D5, 0x1B80, 0xE16807D7, 0x1B80, 0x630007E5,
- 0x1B80, 0x630007E7, 0x1B80, 0xE11407F5, 0x1B80, 0xE11407F7,
- 0x1B80, 0xE1680805, 0x1B80, 0xE1680807, 0x1B80, 0x63030815,
- 0x1B80, 0x63030817, 0x1B80, 0xE1140825, 0x1B80, 0xE1140827,
- 0x1B80, 0xE1680835, 0x1B80, 0xE1680837, 0x1B80, 0xF4D40845,
- 0x1B80, 0xF4D40847, 0x1B80, 0x63070855, 0x1B80, 0x63070857,
- 0x1B80, 0xE1140865, 0x1B80, 0xE1140867, 0x1B80, 0xE1680875,
- 0x1B80, 0xE1680877, 0x1B80, 0xF5DB0885, 0x1B80, 0xF5DB0887,
- 0x1B80, 0x630B0895, 0x1B80, 0x630B0897, 0x1B80, 0xE11408A5,
- 0x1B80, 0xE11408A7, 0x1B80, 0xE16808B5, 0x1B80, 0xE16808B7,
- 0x1B80, 0x630E08C5, 0x1B80, 0x630E08C7, 0x1B80, 0xE11408D5,
- 0x1B80, 0xE11408D7, 0x1B80, 0xE16808E5, 0x1B80, 0xE16808E7,
- 0x1B80, 0x4D3008F5, 0x1B80, 0x4D3008F7, 0x1B80, 0x55010905,
- 0x1B80, 0x55010907, 0x1B80, 0x57040915, 0x1B80, 0x57040917,
- 0x1B80, 0x57000925, 0x1B80, 0x57000927, 0x1B80, 0x96000935,
- 0x1B80, 0x96000937, 0x1B80, 0x57080945, 0x1B80, 0x57080947,
- 0x1B80, 0x57000955, 0x1B80, 0x57000957, 0x1B80, 0x95000965,
- 0x1B80, 0x95000967, 0x1B80, 0x4D000975, 0x1B80, 0x4D000977,
- 0x1B80, 0x6C070985, 0x1B80, 0x6C070987, 0x1B80, 0x7B200995,
- 0x1B80, 0x7B200997, 0x1B80, 0x7A0009A5, 0x1B80, 0x7A0009A7,
- 0x1B80, 0x790009B5, 0x1B80, 0x790009B7, 0x1B80, 0x7F2009C5,
- 0x1B80, 0x7F2009C7, 0x1B80, 0x7E0009D5, 0x1B80, 0x7E0009D7,
- 0x1B80, 0x7D0009E5, 0x1B80, 0x7D0009E7, 0x1B80, 0x000109F5,
- 0x1B80, 0x000109F7, 0x1B80, 0x62850A05, 0x1B80, 0x62850A07,
- 0x1B80, 0xE1140A15, 0x1B80, 0xE1140A17, 0x1B80, 0x00010A25,
- 0x1B80, 0x00010A27, 0x1B80, 0x5C320A35, 0x1B80, 0x5C320A37,
- 0x1B80, 0xE1640A45, 0x1B80, 0xE1640A47, 0x1B80, 0xE1420A55,
- 0x1B80, 0xE1420A57, 0x1B80, 0x00010A65, 0x1B80, 0x00010A67,
- 0x1B80, 0x5C320A75, 0x1B80, 0x5C320A77, 0x1B80, 0x63F40A85,
- 0x1B80, 0x63F40A87, 0x1B80, 0x62850A95, 0x1B80, 0x62850A97,
- 0x1B80, 0x0BB00AA5, 0x1B80, 0x0BB00AA7, 0x1B80, 0xE1140AB5,
- 0x1B80, 0xE1140AB7, 0x1B80, 0xE1680AC5, 0x1B80, 0xE1680AC7,
- 0x1B80, 0x5C320AD5, 0x1B80, 0x5C320AD7, 0x1B80, 0x63FC0AE5,
- 0x1B80, 0x63FC0AE7, 0x1B80, 0x62850AF5, 0x1B80, 0x62850AF7,
- 0x1B80, 0x0BB10B05, 0x1B80, 0x0BB10B07, 0x1B80, 0xE1140B15,
- 0x1B80, 0xE1140B17, 0x1B80, 0xE1680B25, 0x1B80, 0xE1680B27,
- 0x1B80, 0x63030B35, 0x1B80, 0x63030B37, 0x1B80, 0xE1140B45,
- 0x1B80, 0xE1140B47, 0x1B80, 0xE1680B55, 0x1B80, 0xE1680B57,
- 0x1B80, 0xF7040B65, 0x1B80, 0xF7040B67, 0x1B80, 0x630B0B75,
- 0x1B80, 0x630B0B77, 0x1B80, 0xE1140B85, 0x1B80, 0xE1140B87,
- 0x1B80, 0xE1680B95, 0x1B80, 0xE1680B97, 0x1B80, 0x00010BA5,
- 0x1B80, 0x00010BA7, 0x1B80, 0x30DF0BB5, 0x1B80, 0x30DF0BB7,
- 0x1B80, 0x00230BC5, 0x1B80, 0x00230BC7, 0x1B80, 0xE16D0BD5,
- 0x1B80, 0xE16D0BD7, 0x1B80, 0x00020BE5, 0x1B80, 0x00020BE7,
- 0x1B80, 0x54E90BF5, 0x1B80, 0x54E90BF7, 0x1B80, 0x0BA60C05,
- 0x1B80, 0x0BA60C07, 0x1B80, 0x00230C15, 0x1B80, 0x00230C17,
- 0x1B80, 0xE16D0C25, 0x1B80, 0xE16D0C27, 0x1B80, 0x00020C35,
- 0x1B80, 0x00020C37, 0x1B80, 0x4D100C45, 0x1B80, 0x4D100C47,
- 0x1B80, 0x30900C55, 0x1B80, 0x30900C57, 0x1B80, 0x30D90C65,
- 0x1B80, 0x30D90C67, 0x1B80, 0x00220C75, 0x1B80, 0x00220C77,
- 0x1B80, 0xE16D0C85, 0x1B80, 0xE16D0C87, 0x1B80, 0x00020C95,
- 0x1B80, 0x00020C97, 0x1B80, 0x54E80CA5, 0x1B80, 0x54E80CA7,
- 0x1B80, 0x0BA60CB5, 0x1B80, 0x0BA60CB7, 0x1B80, 0x00220CC5,
- 0x1B80, 0x00220CC7, 0x1B80, 0xE16D0CD5, 0x1B80, 0xE16D0CD7,
- 0x1B80, 0x00020CE5, 0x1B80, 0x00020CE7, 0x1B80, 0x4D100CF5,
- 0x1B80, 0x4D100CF7, 0x1B80, 0x30900D05, 0x1B80, 0x30900D07,
- 0x1B80, 0x5C320D15, 0x1B80, 0x5C320D17, 0x1B80, 0x54F00D25,
- 0x1B80, 0x54F00D27, 0x1B80, 0x67F10D35, 0x1B80, 0x67F10D37,
- 0x1B80, 0xE1420D45, 0x1B80, 0xE1420D47, 0x1B80, 0xE16D0D55,
- 0x1B80, 0xE16D0D57, 0x1B80, 0x67F40D65, 0x1B80, 0x67F40D67,
- 0x1B80, 0xE1420D75, 0x1B80, 0xE1420D77, 0x1B80, 0xE16D0D85,
- 0x1B80, 0xE16D0D87, 0x1B80, 0x5C320D95, 0x1B80, 0x5C320D97,
- 0x1B80, 0x54F10DA5, 0x1B80, 0x54F10DA7, 0x1B80, 0x0BA80DB5,
- 0x1B80, 0x0BA80DB7, 0x1B80, 0x67F80DC5, 0x1B80, 0x67F80DC7,
- 0x1B80, 0xE1420DD5, 0x1B80, 0xE1420DD7, 0x1B80, 0xE16D0DE5,
- 0x1B80, 0xE16D0DE7, 0x1B80, 0x5C320DF5, 0x1B80, 0x5C320DF7,
- 0x1B80, 0x54F10E05, 0x1B80, 0x54F10E07, 0x1B80, 0x0BA90E15,
- 0x1B80, 0x0BA90E17, 0x1B80, 0x67FC0E25, 0x1B80, 0x67FC0E27,
- 0x1B80, 0xE1420E35, 0x1B80, 0xE1420E37, 0x1B80, 0xE16D0E45,
- 0x1B80, 0xE16D0E47, 0x1B80, 0x67FF0E55, 0x1B80, 0x67FF0E57,
- 0x1B80, 0xE1420E65, 0x1B80, 0xE1420E67, 0x1B80, 0xE16D0E75,
- 0x1B80, 0xE16D0E77, 0x1B80, 0x5C320E85, 0x1B80, 0x5C320E87,
- 0x1B80, 0x54F20E95, 0x1B80, 0x54F20E97, 0x1B80, 0x67000EA5,
- 0x1B80, 0x67000EA7, 0x1B80, 0xE1420EB5, 0x1B80, 0xE1420EB7,
- 0x1B80, 0xE16D0EC5, 0x1B80, 0xE16D0EC7, 0x1B80, 0x67030ED5,
- 0x1B80, 0x67030ED7, 0x1B80, 0xE1420EE5, 0x1B80, 0xE1420EE7,
- 0x1B80, 0xE16D0EF5, 0x1B80, 0xE16D0EF7, 0x1B80, 0xF9CC0F05,
- 0x1B80, 0xF9CC0F07, 0x1B80, 0x67070F15, 0x1B80, 0x67070F17,
- 0x1B80, 0xE1420F25, 0x1B80, 0xE1420F27, 0x1B80, 0xE16D0F35,
- 0x1B80, 0xE16D0F37, 0x1B80, 0xFAD30F45, 0x1B80, 0xFAD30F47,
- 0x1B80, 0x5C320F55, 0x1B80, 0x5C320F57, 0x1B80, 0x54F30F65,
- 0x1B80, 0x54F30F67, 0x1B80, 0x670B0F75, 0x1B80, 0x670B0F77,
- 0x1B80, 0xE1420F85, 0x1B80, 0xE1420F87, 0x1B80, 0xE16D0F95,
- 0x1B80, 0xE16D0F97, 0x1B80, 0x670E0FA5, 0x1B80, 0x670E0FA7,
- 0x1B80, 0xE1420FB5, 0x1B80, 0xE1420FB7, 0x1B80, 0xE16D0FC5,
- 0x1B80, 0xE16D0FC7, 0x1B80, 0x4D100FD5, 0x1B80, 0x4D100FD7,
- 0x1B80, 0x30900FE5, 0x1B80, 0x30900FE7, 0x1B80, 0x00010FF5,
- 0x1B80, 0x00010FF7, 0x1B80, 0x7B241005, 0x1B80, 0x7B241007,
- 0x1B80, 0x7A401015, 0x1B80, 0x7A401017, 0x1B80, 0x79001025,
- 0x1B80, 0x79001027, 0x1B80, 0x55031035, 0x1B80, 0x55031037,
- 0x1B80, 0x310C1045, 0x1B80, 0x310C1047, 0x1B80, 0x7B1C1055,
- 0x1B80, 0x7B1C1057, 0x1B80, 0x7A401065, 0x1B80, 0x7A401067,
- 0x1B80, 0x550B1075, 0x1B80, 0x550B1077, 0x1B80, 0x310C1085,
- 0x1B80, 0x310C1087, 0x1B80, 0x7B201095, 0x1B80, 0x7B201097,
- 0x1B80, 0x7A0010A5, 0x1B80, 0x7A0010A7, 0x1B80, 0x551310B5,
- 0x1B80, 0x551310B7, 0x1B80, 0x740110C5, 0x1B80, 0x740110C7,
- 0x1B80, 0x740010D5, 0x1B80, 0x740010D7, 0x1B80, 0x8E0010E5,
- 0x1B80, 0x8E0010E7, 0x1B80, 0x000110F5, 0x1B80, 0x000110F7,
- 0x1B80, 0x57021105, 0x1B80, 0x57021107, 0x1B80, 0x57001115,
- 0x1B80, 0x57001117, 0x1B80, 0x97001125, 0x1B80, 0x97001127,
- 0x1B80, 0x00011135, 0x1B80, 0x00011137, 0x1B80, 0x4F781145,
- 0x1B80, 0x4F781147, 0x1B80, 0x53881155, 0x1B80, 0x53881157,
- 0x1B80, 0xE1221165, 0x1B80, 0xE1221167, 0x1B80, 0x54801175,
- 0x1B80, 0x54801177, 0x1B80, 0x54001185, 0x1B80, 0x54001187,
- 0x1B80, 0xE1221195, 0x1B80, 0xE1221197, 0x1B80, 0x548111A5,
- 0x1B80, 0x548111A7, 0x1B80, 0x540011B5, 0x1B80, 0x540011B7,
- 0x1B80, 0xE12211C5, 0x1B80, 0xE12211C7, 0x1B80, 0x548211D5,
- 0x1B80, 0x548211D7, 0x1B80, 0x540011E5, 0x1B80, 0x540011E7,
- 0x1B80, 0xE12D11F5, 0x1B80, 0xE12D11F7, 0x1B80, 0xBF1D1205,
- 0x1B80, 0xBF1D1207, 0x1B80, 0x301D1215, 0x1B80, 0x301D1217,
- 0x1B80, 0xE1001225, 0x1B80, 0xE1001227, 0x1B80, 0xE1051235,
- 0x1B80, 0xE1051237, 0x1B80, 0xE1091245, 0x1B80, 0xE1091247,
- 0x1B80, 0xE1101255, 0x1B80, 0xE1101257, 0x1B80, 0xE1641265,
- 0x1B80, 0xE1641267, 0x1B80, 0x55131275, 0x1B80, 0x55131277,
- 0x1B80, 0xE10C1285, 0x1B80, 0xE10C1287, 0x1B80, 0x55151295,
- 0x1B80, 0x55151297, 0x1B80, 0xE11012A5, 0x1B80, 0xE11012A7,
- 0x1B80, 0xE16412B5, 0x1B80, 0xE16412B7, 0x1B80, 0x000112C5,
- 0x1B80, 0x000112C7, 0x1B80, 0x54BF12D5, 0x1B80, 0x54BF12D7,
- 0x1B80, 0x54C012E5, 0x1B80, 0x54C012E7, 0x1B80, 0x54A312F5,
- 0x1B80, 0x54A312F7, 0x1B80, 0x54C11305, 0x1B80, 0x54C11307,
- 0x1B80, 0x54A41315, 0x1B80, 0x54A41317, 0x1B80, 0x4C181325,
- 0x1B80, 0x4C181327, 0x1B80, 0xBF071335, 0x1B80, 0xBF071337,
- 0x1B80, 0x54C21345, 0x1B80, 0x54C21347, 0x1B80, 0x54A41355,
- 0x1B80, 0x54A41357, 0x1B80, 0xBF041365, 0x1B80, 0xBF041367,
- 0x1B80, 0x54C11375, 0x1B80, 0x54C11377, 0x1B80, 0x54A31385,
- 0x1B80, 0x54A31387, 0x1B80, 0xBF011395, 0x1B80, 0xBF011397,
- 0x1B80, 0xE17213A5, 0x1B80, 0xE17213A7, 0x1B80, 0x54DF13B5,
- 0x1B80, 0x54DF13B7, 0x1B80, 0x000113C5, 0x1B80, 0x000113C7,
- 0x1B80, 0x54BF13D5, 0x1B80, 0x54BF13D7, 0x1B80, 0x54E513E5,
- 0x1B80, 0x54E513E7, 0x1B80, 0x050A13F5, 0x1B80, 0x050A13F7,
- 0x1B80, 0x54DF1405, 0x1B80, 0x54DF1407, 0x1B80, 0x00011415,
- 0x1B80, 0x00011417, 0x1B80, 0x7F201425, 0x1B80, 0x7F201427,
- 0x1B80, 0x7E001435, 0x1B80, 0x7E001437, 0x1B80, 0x7D001445,
- 0x1B80, 0x7D001447, 0x1B80, 0x55011455, 0x1B80, 0x55011457,
- 0x1B80, 0x5C311465, 0x1B80, 0x5C311467, 0x1B80, 0xE10C1475,
- 0x1B80, 0xE10C1477, 0x1B80, 0xE1101485, 0x1B80, 0xE1101487,
- 0x1B80, 0x54801495, 0x1B80, 0x54801497, 0x1B80, 0x540014A5,
- 0x1B80, 0x540014A7, 0x1B80, 0xE10C14B5, 0x1B80, 0xE10C14B7,
- 0x1B80, 0xE11014C5, 0x1B80, 0xE11014C7, 0x1B80, 0x548114D5,
- 0x1B80, 0x548114D7, 0x1B80, 0x540014E5, 0x1B80, 0x540014E7,
- 0x1B80, 0xE10C14F5, 0x1B80, 0xE10C14F7, 0x1B80, 0xE1101505,
- 0x1B80, 0xE1101507, 0x1B80, 0x54821515, 0x1B80, 0x54821517,
- 0x1B80, 0x54001525, 0x1B80, 0x54001527, 0x1B80, 0xE12D1535,
- 0x1B80, 0xE12D1537, 0x1B80, 0xBFE91545, 0x1B80, 0xBFE91547,
- 0x1B80, 0x301D1555, 0x1B80, 0x301D1557, 0x1B80, 0x00231565,
- 0x1B80, 0x00231567, 0x1B80, 0x7B201575, 0x1B80, 0x7B201577,
- 0x1B80, 0x7A001585, 0x1B80, 0x7A001587, 0x1B80, 0x79001595,
- 0x1B80, 0x79001597, 0x1B80, 0xE16815A5, 0x1B80, 0xE16815A7,
- 0x1B80, 0x000215B5, 0x1B80, 0x000215B7, 0x1B80, 0x000115C5,
- 0x1B80, 0x000115C7, 0x1B80, 0x002215D5, 0x1B80, 0x002215D7,
- 0x1B80, 0x7B2015E5, 0x1B80, 0x7B2015E7, 0x1B80, 0x7A0015F5,
- 0x1B80, 0x7A0015F7, 0x1B80, 0x79001605, 0x1B80, 0x79001607,
- 0x1B80, 0xE1681615, 0x1B80, 0xE1681617, 0x1B80, 0x00021625,
- 0x1B80, 0x00021627, 0x1B80, 0x00011635, 0x1B80, 0x00011637,
- 0x1B80, 0x549F1645, 0x1B80, 0x549F1647, 0x1B80, 0x54FF1655,
- 0x1B80, 0x54FF1657, 0x1B80, 0x54001665, 0x1B80, 0x54001667,
- 0x1B80, 0x00011675, 0x1B80, 0x00011677, 0x1B80, 0x5C311685,
- 0x1B80, 0x5C311687, 0x1B80, 0x07141695, 0x1B80, 0x07141697,
- 0x1B80, 0x540016A5, 0x1B80, 0x540016A7, 0x1B80, 0x5C3216B5,
- 0x1B80, 0x5C3216B7, 0x1B80, 0x000116C5, 0x1B80, 0x000116C7,
- 0x1B80, 0x5C3216D5, 0x1B80, 0x5C3216D7, 0x1B80, 0x071416E5,
- 0x1B80, 0x071416E7, 0x1B80, 0x540016F5, 0x1B80, 0x540016F7,
- 0x1B80, 0x5C311705, 0x1B80, 0x5C311707, 0x1B80, 0x00011715,
- 0x1B80, 0x00011717, 0x1B80, 0x4C981725, 0x1B80, 0x4C981727,
- 0x1B80, 0x4C181735, 0x1B80, 0x4C181737, 0x1B80, 0x00011745,
- 0x1B80, 0x00011747, 0x1B80, 0x5C321755, 0x1B80, 0x5C321757,
- 0x1B80, 0x62841765, 0x1B80, 0x62841767, 0x1B80, 0x66861775,
- 0x1B80, 0x66861777, 0x1B80, 0x6C031785, 0x1B80, 0x6C031787,
- 0x1B80, 0x7B201795, 0x1B80, 0x7B201797, 0x1B80, 0x7A0017A5,
- 0x1B80, 0x7A0017A7, 0x1B80, 0x790017B5, 0x1B80, 0x790017B7,
- 0x1B80, 0x7F2017C5, 0x1B80, 0x7F2017C7, 0x1B80, 0x7E0017D5,
- 0x1B80, 0x7E0017D7, 0x1B80, 0x7D0017E5, 0x1B80, 0x7D0017E7,
- 0x1B80, 0x090117F5, 0x1B80, 0x090117F7, 0x1B80, 0x0C011805,
- 0x1B80, 0x0C011807, 0x1B80, 0x0BA61815, 0x1B80, 0x0BA61817,
- 0x1B80, 0x00011825, 0x1B80, 0x00011827, 0x1B80, 0x00000006,
- 0x1B80, 0x00000002,
-
-};
-
-void odm_read_and_config_mp_8822b_phy_reg(struct phy_dm_struct *dm)
-{
- u32 i = 0;
- u8 c_cond;
- bool is_matched = true, is_skipped = false;
- u32 *array = array_mp_8822b_phy_reg;
-
- u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===> %s\n", __func__);
-
- for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_phy_reg); i = i + 2) {
- v1 = array[i];
- v2 = array[i + 1];
-
- if (v1 & BIT(31)) { /* positive condition*/
- c_cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28);
- if (c_cond == COND_ENDIF) { /*end*/
- is_matched = true;
- is_skipped = false;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ENDIF\n");
- } else if (c_cond == COND_ELSE) { /*else*/
- is_matched = is_skipped ? false : true;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ELSE\n");
- } else { /*if , else if*/
- pre_v1 = v1;
- pre_v2 = v2;
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "IF or ELSE IF\n");
- }
- } else if (v1 & BIT(30)) { /*negative condition*/
- if (is_skipped) {
- is_matched = false;
- continue;
- }
-
- if (check_positive(dm, pre_v1, pre_v2, v1, v2)) {
- is_matched = true;
- is_skipped = true;
- } else {
- is_matched = false;
- is_skipped = false;
- }
- } else if (is_matched) {
- odm_config_bb_phy_8822b(dm, v1, MASKDWORD, v2);
- }
- }
-}
-
-u32 odm_get_version_mp_8822b_phy_reg(void) { return 67; }
-
-/******************************************************************************
- * phy_reg_pg.TXT
- ******************************************************************************/
-
-static u32 array_mp_8822b_phy_reg_pg[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426,
-};
-
-void odm_read_and_config_mp_8822b_phy_reg_pg(struct phy_dm_struct *dm)
-{
- u32 i = 0;
- u32 *array = array_mp_8822b_phy_reg_pg;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===> %s\n", __func__);
-
- dm->phy_reg_pg_version = 1;
- dm->phy_reg_pg_value_type = PHY_REG_PG_EXACT_VALUE;
-
- for (i = 0; i < ARRAY_SIZE(array_mp_8822b_phy_reg_pg); i += 6) {
- u32 v1 = array[i];
- u32 v2 = array[i + 1];
- u32 v3 = array[i + 2];
- u32 v4 = array[i + 3];
- u32 v5 = array[i + 4];
- u32 v6 = array[i + 5];
-
- odm_config_bb_phy_reg_pg_8822b(dm, v1, v2, v3, v4, v5, v6);
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h
deleted file mode 100644
index a12745051678..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/*Image2HeaderVersion: 3.2*/
-#ifndef __INC_MP_BB_HW_IMG_8822B_H
-#define __INC_MP_BB_HW_IMG_8822B_H
-
-/******************************************************************************
- * agc_tab.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_agc_tab(/* tc: Test Chip, mp: mp Chip*/
- struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_agc_tab(void);
-
-/******************************************************************************
- * phy_reg.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_phy_reg(/* tc: Test Chip, mp: mp Chip*/
- struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_phy_reg(void);
-
-/******************************************************************************
- * phy_reg_pg.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_phy_reg_pg(/* tc: Test Chip, mp: mp Chip*/
- struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_phy_reg_pg(void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
deleted file mode 100644
index aed97e437e76..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
+++ /dev/null
@@ -1,211 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/*Image2HeaderVersion: 3.2*/
-#include "../mp_precomp.h"
-#include "../phydm_precomp.h"
-#include <linux/kernel.h>
-
-static bool check_positive(struct phy_dm_struct *dm, const u32 condition1,
- const u32 condition2, const u32 condition3,
- const u32 condition4)
-{
- u8 _board_type = ((dm->board_type & BIT(4)) >> 4) << 0 | /* _GLNA*/
- ((dm->board_type & BIT(3)) >> 3) << 1 | /* _GPA*/
- ((dm->board_type & BIT(7)) >> 7) << 2 | /* _ALNA*/
- ((dm->board_type & BIT(6)) >> 6) << 3 | /* _APA */
- ((dm->board_type & BIT(2)) >> 2) << 4; /* _BT*/
-
- u32 cond1 = condition1, cond2 = condition2, cond3 = condition3,
- cond4 = condition4;
-
- u8 cut_version_for_para =
- (dm->cut_version == ODM_CUT_A) ? 14 : dm->cut_version;
- u8 pkg_type_for_para = (dm->package_type == 0) ? 14 : dm->package_type;
-
- u32 driver1 = cut_version_for_para << 24 |
- (dm->support_interface & 0xF0) << 16 |
- dm->support_platform << 16 | pkg_type_for_para << 12 |
- (dm->support_interface & 0x0F) << 8 | _board_type;
-
- u32 driver2 = (dm->type_glna & 0xFF) << 0 | (dm->type_gpa & 0xFF) << 8 |
- (dm->type_alna & 0xFF) << 16 |
- (dm->type_apa & 0xFF) << 24;
-
- u32 driver3 = 0;
-
- u32 driver4 = (dm->type_glna & 0xFF00) >> 8 | (dm->type_gpa & 0xFF00) |
- (dm->type_alna & 0xFF00) << 8 |
- (dm->type_apa & 0xFF00) << 16;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "===> %s (cond1, cond2, cond3, cond4) = (0x%X 0x%X 0x%X 0x%X)\n",
- __func__, cond1, cond2, cond3, cond4);
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "===> %s (driver1, driver2, driver3, driver4) = (0x%X 0x%X 0x%X 0x%X)\n",
- __func__, driver1, driver2, driver3, driver4);
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- " (Platform, Interface) = (0x%X, 0x%X)\n",
- dm->support_platform, dm->support_interface);
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- " (Board, Package) = (0x%X, 0x%X)\n",
- dm->board_type, dm->package_type);
-
- /*============== value Defined Check ===============*/
- /*QFN type [15:12] and cut version [27:24] need to do value check*/
-
- if (((cond1 & 0x0000F000) != 0) &&
- ((cond1 & 0x0000F000) != (driver1 & 0x0000F000)))
- return false;
- if (((cond1 & 0x0F000000) != 0) &&
- ((cond1 & 0x0F000000) != (driver1 & 0x0F000000)))
- return false;
-
- /*=============== Bit Defined Check ================*/
- /* We don't care [31:28] */
-
- cond1 &= 0x00FF0FFF;
- driver1 &= 0x00FF0FFF;
-
- if ((cond1 & driver1) == cond1) {
- u32 bit_mask = 0;
-
- if ((cond1 & 0x0F) == 0) /* board_type is DONTCARE*/
- return true;
-
- if ((cond1 & BIT(0)) != 0) /*GLNA*/
- bit_mask |= 0x000000FF;
- if ((cond1 & BIT(1)) != 0) /*GPA*/
- bit_mask |= 0x0000FF00;
- if ((cond1 & BIT(2)) != 0) /*ALNA*/
- bit_mask |= 0x00FF0000;
- if ((cond1 & BIT(3)) != 0) /*APA*/
- bit_mask |= 0xFF000000;
-
- if (((cond2 & bit_mask) == (driver2 & bit_mask)) &&
- ((cond4 & bit_mask) ==
- (driver4 &
- bit_mask))) /* board_type of each RF path is matched*/
- return true;
- else
- return false;
- } else {
- return false;
- }
-}
-
-/******************************************************************************
- * mac_reg.TXT
- ******************************************************************************/
-
-static u32 array_mp_8822b_mac_reg[] = {
- 0x029, 0x000000F9, 0x420, 0x00000080, 0x421, 0x0000000F,
- 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000,
- 0x431, 0x00000000, 0x432, 0x00000000, 0x433, 0x00000001,
- 0x434, 0x00000004, 0x435, 0x00000005, 0x436, 0x00000007,
- 0x437, 0x00000008, 0x43C, 0x00000004, 0x43D, 0x00000005,
- 0x43E, 0x00000007, 0x43F, 0x00000008, 0x440, 0x0000005D,
- 0x441, 0x00000001, 0x442, 0x00000000, 0x444, 0x00000010,
- 0x445, 0x000000F0, 0x446, 0x00000001, 0x447, 0x000000FE,
- 0x448, 0x00000000, 0x449, 0x00000000, 0x44A, 0x00000000,
- 0x44B, 0x00000040, 0x44C, 0x00000010, 0x44D, 0x000000F0,
- 0x44E, 0x0000003F, 0x44F, 0x00000000, 0x450, 0x00000000,
- 0x451, 0x00000000, 0x452, 0x00000000, 0x453, 0x00000040,
- 0x455, 0x00000070, 0x45E, 0x00000004, 0x49C, 0x00000010,
- 0x49D, 0x000000F0, 0x49E, 0x00000000, 0x49F, 0x00000006,
- 0x4A0, 0x000000E0, 0x4A1, 0x00000003, 0x4A2, 0x00000000,
- 0x4A3, 0x00000040, 0x4A4, 0x00000015, 0x4A5, 0x000000F0,
- 0x4A6, 0x00000000, 0x4A7, 0x00000006, 0x4A8, 0x000000E0,
- 0x4A9, 0x00000000, 0x4AA, 0x00000000, 0x4AB, 0x00000000,
- 0x7DA, 0x00000008, 0x1448, 0x00000006, 0x144A, 0x00000006,
- 0x144C, 0x00000006, 0x144E, 0x00000006, 0x4C8, 0x000000FF,
- 0x4C9, 0x00000008, 0x4CA, 0x00000020, 0x4CB, 0x00000020,
- 0x4CC, 0x000000FF, 0x4CD, 0x000000FF, 0x4CE, 0x00000001,
- 0x4CF, 0x00000008, 0x500, 0x00000026, 0x501, 0x000000A2,
- 0x502, 0x0000002F, 0x503, 0x00000000, 0x504, 0x00000028,
- 0x505, 0x000000A3, 0x506, 0x0000005E, 0x507, 0x00000000,
- 0x508, 0x0000002B, 0x509, 0x000000A4, 0x50A, 0x0000005E,
- 0x50B, 0x00000000, 0x50C, 0x0000004F, 0x50D, 0x000000A4,
- 0x50E, 0x00000000, 0x50F, 0x00000000, 0x512, 0x0000001C,
- 0x514, 0x0000000A, 0x516, 0x0000000A, 0x521, 0x0000002F,
- 0x525, 0x0000004F, 0x551, 0x00000010, 0x559, 0x00000002,
- 0x55C, 0x00000050, 0x55D, 0x000000FF, 0x577, 0x0000000B,
- 0x5BE, 0x00000064, 0x605, 0x00000030, 0x608, 0x0000000E,
- 0x609, 0x00000022, 0x60C, 0x00000018, 0x6A0, 0x000000FF,
- 0x6A1, 0x000000FF, 0x6A2, 0x000000FF, 0x6A3, 0x000000FF,
- 0x6A4, 0x000000FF, 0x6A5, 0x000000FF, 0x6DE, 0x00000084,
- 0x620, 0x000000FF, 0x621, 0x000000FF, 0x622, 0x000000FF,
- 0x623, 0x000000FF, 0x624, 0x000000FF, 0x625, 0x000000FF,
- 0x626, 0x000000FF, 0x627, 0x000000FF, 0x638, 0x00000050,
- 0x63C, 0x0000000A, 0x63D, 0x0000000A, 0x63E, 0x0000000E,
- 0x63F, 0x0000000E, 0x640, 0x00000040, 0x642, 0x00000040,
- 0x643, 0x00000000, 0x652, 0x000000C8, 0x66E, 0x00000005,
- 0x718, 0x00000040, 0x7D4, 0x00000098,
-
-};
-
-void odm_read_and_config_mp_8822b_mac_reg(struct phy_dm_struct *dm)
-{
- u32 i = 0;
- u8 c_cond;
- bool is_matched = true, is_skipped = false;
- u32 *array = array_mp_8822b_mac_reg;
-
- u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===> %s\n", __func__);
-
- for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_mac_reg); i = i + 2) {
- v1 = array[i];
- v2 = array[i + 1];
-
- if (v1 & BIT(31)) { /* positive condition*/
- c_cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28);
- if (c_cond == COND_ENDIF) { /*end*/
- is_matched = true;
- is_skipped = false;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ENDIF\n");
- } else if (c_cond == COND_ELSE) { /*else*/
- is_matched = is_skipped ? false : true;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ELSE\n");
- } else { /*if , else if*/
- pre_v1 = v1;
- pre_v2 = v2;
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "IF or ELSE IF\n");
- }
- } else if (v1 & BIT(30)) { /*negative condition*/
- if (is_skipped) {
- is_matched = false;
- continue;
- }
-
- if (check_positive(dm, pre_v1, pre_v2, v1, v2)) {
- is_matched = true;
- is_skipped = true;
- } else {
- is_matched = false;
- is_skipped = false;
- }
- } else if (is_matched) {
- odm_config_mac_8822b(dm, v1, (u8)v2);
- }
- }
-}
-
-u32 odm_get_version_mp_8822b_mac_reg(void) { return 67; }
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h
deleted file mode 100644
index 2f8107bd0205..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/*Image2HeaderVersion: 3.2*/
-#ifndef __INC_MP_MAC_HW_IMG_8822B_H
-#define __INC_MP_MAC_HW_IMG_8822B_H
-
-/******************************************************************************
- * mac_reg.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_mac_reg(/* tc: Test Chip, mp: mp Chip*/
- struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_mac_reg(void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
deleted file mode 100644
index b8d33d7637b5..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
+++ /dev/null
@@ -1,4730 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/*Image2HeaderVersion: 3.2*/
-#include "../mp_precomp.h"
-#include "../phydm_precomp.h"
-#include <linux/kernel.h>
-
-static bool check_positive(struct phy_dm_struct *dm, const u32 condition1,
- const u32 condition2, const u32 condition3,
- const u32 condition4)
-{
- u8 _board_type = ((dm->board_type & BIT(4)) >> 4) << 0 | /* _GLNA*/
- ((dm->board_type & BIT(3)) >> 3) << 1 | /* _GPA*/
- ((dm->board_type & BIT(7)) >> 7) << 2 | /* _ALNA*/
- ((dm->board_type & BIT(6)) >> 6) << 3 | /* _APA */
- ((dm->board_type & BIT(2)) >> 2) << 4; /* _BT*/
-
- u32 cond1 = condition1, cond2 = condition2, cond3 = condition3,
- cond4 = condition4;
-
- u8 cut_version_for_para =
- (dm->cut_version == ODM_CUT_A) ? 14 : dm->cut_version;
- u8 pkg_type_for_para = (dm->package_type == 0) ? 14 : dm->package_type;
-
- u32 driver1 = cut_version_for_para << 24 |
- (dm->support_interface & 0xF0) << 16 |
- dm->support_platform << 16 | pkg_type_for_para << 12 |
- (dm->support_interface & 0x0F) << 8 | _board_type;
-
- u32 driver2 = (dm->type_glna & 0xFF) << 0 | (dm->type_gpa & 0xFF) << 8 |
- (dm->type_alna & 0xFF) << 16 |
- (dm->type_apa & 0xFF) << 24;
-
- u32 driver3 = 0;
-
- u32 driver4 = (dm->type_glna & 0xFF00) >> 8 | (dm->type_gpa & 0xFF00) |
- (dm->type_alna & 0xFF00) << 8 |
- (dm->type_apa & 0xFF00) << 16;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "===> %s (cond1, cond2, cond3, cond4) = (0x%X 0x%X 0x%X 0x%X)\n",
- __func__, cond1, cond2, cond3, cond4);
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "===> %s (driver1, driver2, driver3, driver4) = (0x%X 0x%X 0x%X 0x%X)\n",
- __func__, driver1, driver2, driver3, driver4);
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- " (Platform, Interface) = (0x%X, 0x%X)\n",
- dm->support_platform, dm->support_interface);
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- " (Board, Package) = (0x%X, 0x%X)\n",
- dm->board_type, dm->package_type);
-
- /*============== value Defined Check ===============*/
- /*QFN type [15:12] and cut version [27:24] need to do value check*/
-
- if (((cond1 & 0x0000F000) != 0) &&
- ((cond1 & 0x0000F000) != (driver1 & 0x0000F000)))
- return false;
- if (((cond1 & 0x0F000000) != 0) &&
- ((cond1 & 0x0F000000) != (driver1 & 0x0F000000)))
- return false;
-
- /*=============== Bit Defined Check ================*/
- /* We don't care [31:28] */
-
- cond1 &= 0x00FF0FFF;
- driver1 &= 0x00FF0FFF;
-
- if ((cond1 & driver1) == cond1) {
- u32 bit_mask = 0;
-
- if ((cond1 & 0x0F) == 0) /* board_type is DONTCARE*/
- return true;
-
- if ((cond1 & BIT(0)) != 0) /*GLNA*/
- bit_mask |= 0x000000FF;
- if ((cond1 & BIT(1)) != 0) /*GPA*/
- bit_mask |= 0x0000FF00;
- if ((cond1 & BIT(2)) != 0) /*ALNA*/
- bit_mask |= 0x00FF0000;
- if ((cond1 & BIT(3)) != 0) /*APA*/
- bit_mask |= 0xFF000000;
-
- if (((cond2 & bit_mask) == (driver2 & bit_mask)) &&
- ((cond4 & bit_mask) ==
- (driver4 &
- bit_mask))) /* board_type of each RF path is matched*/
- return true;
- else
- return false;
- } else {
- return false;
- }
-}
-
-/******************************************************************************
- * radioa.TXT
- ******************************************************************************/
-
-static u32 array_mp_8822b_radioa[] = {
- 0x000, 0x00030000, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x90002100, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x90002000, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0xA0000000, 0x00000000, 0x001, 0x00040029,
- 0xB0000000, 0x00000000, 0x018, 0x00010D24, 0x0EF, 0x00080000,
- 0x033, 0x00000002, 0x03E, 0x0000003F, 0x03F, 0x000C0F4E,
- 0x033, 0x00000001, 0x03E, 0x00000034, 0x03F, 0x0004080E,
- 0x0EF, 0x00080000, 0x0DF, 0x00002449, 0x033, 0x00000024,
- 0x03E, 0x0000003F, 0x03F, 0x00060FDE, 0x0EF, 0x00000000,
- 0x0EF, 0x00080000, 0x033, 0x00000025, 0x03E, 0x00000037,
- 0x03F, 0x0007EFCE, 0x0EF, 0x00000000, 0x0EF, 0x00080000,
- 0x033, 0x00000026, 0x03E, 0x00000037, 0x03F, 0x000DEFCE,
- 0x0EF, 0x00000000, 0x07F, 0x00000000, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x0B0, 0x000FF0F8, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x0B0, 0x000FF0F8, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FF0F8, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FB0F8, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x0B0, 0x000FF0F8, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x0B0, 0x000FF0F8, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FF0F8, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FB0F8, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FB0F8, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FB0F8, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FB0F8, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FF0F8, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FB0F8, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FF0F8, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FB0F8, 0x93001000, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FF0F8, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FB0F8, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x0B0, 0x000FB0F8, 0xA0000000, 0x00000000,
- 0x0B0, 0x000FF0F8, 0xB0000000, 0x00000000, 0x0B1, 0x0007DBE4,
- 0x0B2, 0x000225D1, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x0B3, 0x000FC760, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x0B3, 0x0007C330, 0xA0000000, 0x00000000, 0x0B3, 0x000FC760,
- 0xB0000000, 0x00000000, 0x0B4, 0x00099DD0, 0x0B5, 0x000400FC,
- 0x0B6, 0x000187F0, 0x0B7, 0x00030018, 0x0B8, 0x00080800,
- 0x0B9, 0x00000000, 0x0BA, 0x00008000, 0x0BB, 0x00000000,
- 0x0BC, 0x00040030, 0x0BD, 0x00000000, 0x0BE, 0x00000000,
- 0x0BF, 0x00000000, 0x0C0, 0x00000000, 0x0C1, 0x00000000,
- 0x0C2, 0x00000000, 0x0C3, 0x00000000, 0x0C4, 0x00002402,
- 0x0C5, 0x00000009, 0x0C6, 0x00040299, 0x0C7, 0x00055555,
- 0x0C8, 0x0000C16C, 0x0C9, 0x0001C140, 0x0CA, 0x00000000,
- 0x0CB, 0x00000000, 0x0CC, 0x00000000, 0x0CD, 0x00000000,
- 0x0CE, 0x00090C00, 0x0CF, 0x0006D200, 0x0DF, 0x00000009,
- 0x018, 0x00010524, 0x089, 0x00000207, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x08A, 0x000FF186, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x08A, 0x000FE186, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x08A, 0x000FF186, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x08A, 0x000FF186, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x08A, 0x000FF186, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x08A, 0x000FE186, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x08A, 0x000FF186, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x08A, 0x000FF186, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x08A, 0x000FF186, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x08A, 0x000FF186, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x08A, 0x000FE186, 0xA0000000, 0x00000000,
- 0x08A, 0x000FF186, 0xB0000000, 0x00000000, 0x08B, 0x00061E3C,
- 0x08C, 0x000112C7, 0x08D, 0x000F4988, 0x08E, 0x00064D40,
- 0x0EF, 0x00020000, 0x033, 0x00000007, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x00004080, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0xA0000000, 0x00000000,
- 0x03E, 0x00004000, 0xB0000000, 0x00000000, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000DFF86, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C0006, 0x93001000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0xA0000000, 0x00000000,
- 0x03F, 0x000C3186, 0xB0000000, 0x00000000, 0x033, 0x00000006,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00004080,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x00004080,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004080,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004080,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00004080,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0xA0000000, 0x00000000, 0x03E, 0x00004080, 0xB0000000, 0x00000000,
- 0x03F, 0x000C3186, 0x033, 0x00000005, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x000040C8, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x000040C8, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x000040C8, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x000040C8, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x000040C8, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x00004084, 0xA0000000, 0x00000000,
- 0x03E, 0x000040C8, 0xB0000000, 0x00000000, 0x03F, 0x000C3186,
- 0x033, 0x00000004, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x03E, 0x00004190, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x03E, 0x00004190, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004190, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004190, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x03E, 0x00004190, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x03E, 0x00004108, 0xA0000000, 0x00000000, 0x03E, 0x00004190,
- 0xB0000000, 0x00000000, 0x03F, 0x000C3186, 0x033, 0x00000003,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00004998,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x00004998,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004998,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004998,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00004998,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x0000490C,
- 0xA0000000, 0x00000000, 0x03E, 0x00004998, 0xB0000000, 0x00000000,
- 0x03F, 0x000C3186, 0x033, 0x00000002, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x00005840, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x00005840, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00005840, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00005840, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x00005840, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x00005E00, 0xA0000000, 0x00000000,
- 0x03E, 0x00005840, 0xB0000000, 0x00000000, 0x03F, 0x000C3186,
- 0x033, 0x00000001, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x03E, 0x000058C2, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x03E, 0x000058C2, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x000058C2, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x000058C2, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x03E, 0x000058C2, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x03E, 0x00005862, 0xA0000000, 0x00000000, 0x03E, 0x000058C2,
- 0xB0000000, 0x00000000, 0x03F, 0x000C3186, 0x033, 0x00000000,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00005930,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x00005930,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00005930,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00005930,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00005930,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x00005948,
- 0xA0000000, 0x00000000, 0x03E, 0x00005930, 0xB0000000, 0x00000000,
- 0x03F, 0x000C3186, 0x033, 0x0000000F, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x00004080, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x00004080, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0xA0000000, 0x00000000,
- 0x03E, 0x00004000, 0xB0000000, 0x00000000, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000DFF86, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000DFF86, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C0006, 0x93001000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0xA0000000, 0x00000000,
- 0x03F, 0x000C3186, 0xB0000000, 0x00000000, 0x033, 0x0000000E,
- 0x03E, 0x00004080, 0x03F, 0x000C3186, 0x033, 0x0000000D,
- 0x03E, 0x000040C8, 0x03F, 0x000C3186, 0x033, 0x0000000C,
- 0x03E, 0x00004190, 0x03F, 0x000C3186, 0x033, 0x0000000B,
- 0x03E, 0x00004998, 0x03F, 0x000C3186, 0x033, 0x0000000A,
- 0x03E, 0x00005840, 0x03F, 0x000C3186, 0x033, 0x00000009,
- 0x03E, 0x000058C2, 0x03F, 0x000C3186, 0x033, 0x00000008,
- 0x03E, 0x00005930, 0x03F, 0x000C3186, 0x033, 0x00000017,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x00004080,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x00004080,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9300200c, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004000,
- 0x93002100, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004000,
- 0x93011000, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004000,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0xA0000000, 0x00000000, 0x03E, 0x00004000, 0xB0000000, 0x00000000,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9300200c, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x93002100, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x93011000, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x93002000, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C0006,
- 0x93001000, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0xA0000000, 0x00000000, 0x03F, 0x000C3186, 0xB0000000, 0x00000000,
- 0x033, 0x00000016, 0x03E, 0x00004080, 0x03F, 0x000C3186,
- 0x033, 0x00000015, 0x03E, 0x000040C8, 0x03F, 0x000C3186,
- 0x033, 0x00000014, 0x03E, 0x00004190, 0x03F, 0x000C3186,
- 0x033, 0x00000013, 0x03E, 0x00004998, 0x03F, 0x000C3186,
- 0x033, 0x00000012, 0x03E, 0x00005840, 0x03F, 0x000C3186,
- 0x033, 0x00000011, 0x03E, 0x000058C2, 0x03F, 0x000C3186,
- 0x033, 0x00000010, 0x03E, 0x00005930, 0x03F, 0x000C3186,
- 0x0EF, 0x00000000, 0x0EF, 0x00004000, 0x033, 0x00000000,
- 0x03F, 0x0000000A, 0x033, 0x00000001, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000005, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000006, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000005, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x93001000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000005, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0xA0000000, 0x00000000,
- 0x03F, 0x00000005, 0xB0000000, 0x00000000, 0x033, 0x00000002,
- 0x03F, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x00000401,
- 0x084, 0x00001209, 0x086, 0x000001A0, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0xA0000000, 0x00000000,
- 0x087, 0x000E8180, 0xB0000000, 0x00000000, 0x088, 0x00070020,
- 0x0DE, 0x00000010, 0x0EF, 0x00008000, 0x033, 0x0000000F,
- 0x03F, 0x0000003C, 0x033, 0x0000000E, 0x03F, 0x00000038,
- 0x033, 0x0000000D, 0x03F, 0x00000030, 0x033, 0x0000000C,
- 0x03F, 0x00000028, 0x033, 0x0000000B, 0x03F, 0x00000020,
- 0x033, 0x0000000A, 0x03F, 0x00000018, 0x033, 0x00000009,
- 0x03F, 0x00000010, 0x033, 0x00000008, 0x03F, 0x00000008,
- 0x033, 0x00000007, 0x03F, 0x0000003C, 0x033, 0x00000006,
- 0x03F, 0x00000038, 0x033, 0x00000005, 0x03F, 0x00000030,
- 0x033, 0x00000004, 0x03F, 0x00000028, 0x033, 0x00000003,
- 0x03F, 0x00000020, 0x033, 0x00000002, 0x03F, 0x00000018,
- 0x033, 0x00000001, 0x03F, 0x00000010, 0x033, 0x00000000,
- 0x03F, 0x00000008, 0x0EF, 0x00000000, 0x0B8, 0x00080A00,
- 0x0B0, 0x000FF0FA, 0x0FE, 0x00000000, 0x0CA, 0x00080000,
- 0x0C9, 0x0001C141, 0x0FE, 0x00000000, 0x0B0, 0x000FF0F8,
- 0x018, 0x00018D24, 0xFFE, 0x00000000, 0xFFE, 0x00000000,
- 0xFFE, 0x00000000, 0xFFE, 0x00000000, 0x018, 0x00010D24,
- 0x01B, 0x00075A40, 0x0EE, 0x00000002, 0x033, 0x00000000,
- 0x03F, 0x00000004, 0x033, 0x00000001, 0x03F, 0x00000004,
- 0x033, 0x00000002, 0x03F, 0x00000004, 0x033, 0x00000003,
- 0x03F, 0x00000004, 0x033, 0x00000004, 0x03F, 0x00000004,
- 0x033, 0x00000005, 0x03F, 0x00000006, 0x033, 0x00000006,
- 0x03F, 0x00000002, 0x033, 0x00000007, 0x03F, 0x00000000,
- 0x0EE, 0x00000000, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x061, 0x0005D4A0, 0x062, 0x0000D203, 0x063, 0x00000062,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x061, 0x0005D4A0,
- 0x062, 0x0000D203, 0x063, 0x00000062, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x061, 0x0005D4A0, 0x062, 0x0000D203,
- 0x063, 0x00000062, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D2A1, 0x062, 0x0000D3A2, 0x063, 0x00000062,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x061, 0x0005D4A0,
- 0x062, 0x0000D203, 0x063, 0x00000062, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x061, 0x0005D4A0, 0x062, 0x0000D203,
- 0x063, 0x00000062, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D4A0, 0x062, 0x0000D203, 0x063, 0x00000062,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x0005D2A1,
- 0x062, 0x0000D3A2, 0x063, 0x00000062, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x061, 0x0005D2A1, 0x062, 0x0000D3A2,
- 0x063, 0x00000062, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D301, 0x062, 0x0000D303, 0x063, 0x00000002,
- 0x93002100, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x0005D301,
- 0x062, 0x0000D303, 0x063, 0x00000002, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x061, 0x0005D3D1, 0x062, 0x0000D3A2,
- 0x063, 0x00000002, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D2A1, 0x062, 0x0000D3A2, 0x063, 0x00000062,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x0005D3D1,
- 0x062, 0x0000D3A2, 0x063, 0x00000002, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x061, 0x0005D301, 0x062, 0x0000D303,
- 0x063, 0x00000002, 0x93001000, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D3D1, 0x062, 0x0000D3A2, 0x063, 0x00000002,
- 0x90002100, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x0005D301,
- 0x062, 0x0000D303, 0x063, 0x00000002, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x061, 0x0005D301, 0x062, 0x0000D303,
- 0x063, 0x00000002, 0xA0000000, 0x00000000, 0x061, 0x0005D3D0,
- 0x062, 0x0000D303, 0x063, 0x00000002, 0xB0000000, 0x00000000,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x0EF, 0x00000200,
- 0x030, 0x000004A3, 0x030, 0x000014A3, 0x030, 0x000024A3,
- 0x030, 0x000034A3, 0x030, 0x000044A3, 0x030, 0x000054A3,
- 0x030, 0x000064A3, 0x030, 0x000074A3, 0x030, 0x000084A3,
- 0x030, 0x000094A3, 0x030, 0x0000A4A3, 0x030, 0x0000B4A3,
- 0x0EF, 0x00000000, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x0EF, 0x00000200, 0x030, 0x000004A3, 0x030, 0x000014A3,
- 0x030, 0x000024A3, 0x030, 0x000034A3, 0x030, 0x000044A3,
- 0x030, 0x000054A3, 0x030, 0x000064A3, 0x030, 0x000074A3,
- 0x030, 0x000084A3, 0x030, 0x000094A3, 0x030, 0x0000A4A3,
- 0x030, 0x0000B4A3, 0x0EF, 0x00000000, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000200, 0x030, 0x000004A3,
- 0x030, 0x000014A3, 0x030, 0x000024A3, 0x030, 0x000034A3,
- 0x030, 0x000044A3, 0x030, 0x000054A3, 0x030, 0x000064A3,
- 0x030, 0x000074A3, 0x030, 0x000084A3, 0x030, 0x000094A3,
- 0x030, 0x0000A4A3, 0x030, 0x0000B4A3, 0x0EF, 0x00000000,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x0EF, 0x00000200,
- 0x030, 0x000002A6, 0x030, 0x000012A6, 0x030, 0x000022A6,
- 0x030, 0x000032A6, 0x030, 0x000042A6, 0x030, 0x000052A6,
- 0x030, 0x000062A6, 0x030, 0x000072A6, 0x030, 0x000082A6,
- 0x030, 0x000092A6, 0x030, 0x0000A2A6, 0x030, 0x0000B2A6,
- 0x0EF, 0x00000000, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x0EF, 0x00000200, 0x030, 0x000004A0, 0x030, 0x000014A0,
- 0x030, 0x000024A0, 0x030, 0x000034A0, 0x030, 0x000044A0,
- 0x030, 0x000054A0, 0x030, 0x000064A0, 0x030, 0x000074A0,
- 0x030, 0x000084A0, 0x030, 0x000094A0, 0x030, 0x0000A4A0,
- 0x030, 0x0000B4A0, 0x0EF, 0x00000000, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x0EF, 0x00000200, 0x030, 0x000004A0,
- 0x030, 0x000014A0, 0x030, 0x000024A0, 0x030, 0x000034A0,
- 0x030, 0x000044A0, 0x030, 0x000054A0, 0x030, 0x000064A0,
- 0x030, 0x000074A0, 0x030, 0x000084A0, 0x030, 0x000094A0,
- 0x030, 0x0000A4A0, 0x030, 0x0000B4A0, 0x0EF, 0x00000000,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x0EF, 0x00000200,
- 0x030, 0x000004A0, 0x030, 0x000014A0, 0x030, 0x000024A0,
- 0x030, 0x000034A0, 0x030, 0x000044A0, 0x030, 0x000054A0,
- 0x030, 0x000064A0, 0x030, 0x000074A0, 0x030, 0x000084A0,
- 0x030, 0x000094A0, 0x030, 0x0000A4A0, 0x030, 0x0000B4A0,
- 0x0EF, 0x00000000, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000200, 0x030, 0x000002A1, 0x030, 0x000012A1,
- 0x030, 0x000022A1, 0x030, 0x000032A1, 0x030, 0x000042A1,
- 0x030, 0x000052A1, 0x030, 0x000062A1, 0x030, 0x000072A1,
- 0x030, 0x000082A1, 0x030, 0x000092A1, 0x030, 0x0000A2A1,
- 0x030, 0x0000B2A1, 0x0EF, 0x00000000, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000200, 0x030, 0x000002A6,
- 0x030, 0x000012A6, 0x030, 0x000022A6, 0x030, 0x000032A6,
- 0x030, 0x000042A6, 0x030, 0x000052A6, 0x030, 0x000062A6,
- 0x030, 0x000072A6, 0x030, 0x000082A6, 0x030, 0x000092A6,
- 0x030, 0x0000A2A6, 0x030, 0x0000B2A6, 0x0EF, 0x00000000,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x0EF, 0x00000200,
- 0x030, 0x00000384, 0x030, 0x00001384, 0x030, 0x00002384,
- 0x030, 0x00003384, 0x030, 0x00004425, 0x030, 0x00005425,
- 0x030, 0x00006425, 0x030, 0x00007425, 0x030, 0x000083A4,
- 0x030, 0x000093A4, 0x030, 0x0000A3A4, 0x030, 0x0000B3A4,
- 0x0EF, 0x00000000, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000200, 0x030, 0x000003A3, 0x030, 0x000013A3,
- 0x030, 0x000023A3, 0x030, 0x000033A3, 0x030, 0x00004355,
- 0x030, 0x00005355, 0x030, 0x00006355, 0x030, 0x00007355,
- 0x030, 0x00008314, 0x030, 0x00009314, 0x030, 0x0000A314,
- 0x030, 0x0000B314, 0x0EF, 0x00000000, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000200, 0x030, 0x000003A1,
- 0x030, 0x000013A1, 0x030, 0x000023A1, 0x030, 0x000033A1,
- 0x030, 0x000043A3, 0x030, 0x000053A3, 0x030, 0x000063A3,
- 0x030, 0x000073A3, 0x030, 0x000083A5, 0x030, 0x000093A5,
- 0x030, 0x0000A3A5, 0x030, 0x0000B3A5, 0x0EF, 0x00000000,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x0EF, 0x00000200,
- 0x030, 0x000002A1, 0x030, 0x000012A1, 0x030, 0x000022A1,
- 0x030, 0x000032A1, 0x030, 0x000042A1, 0x030, 0x000052A1,
- 0x030, 0x000062A1, 0x030, 0x000072A1, 0x030, 0x000082A1,
- 0x030, 0x000092A1, 0x030, 0x0000A2A1, 0x030, 0x0000B2A1,
- 0x0EF, 0x00000000, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000200, 0x030, 0x00000463, 0x030, 0x00001463,
- 0x030, 0x00002463, 0x030, 0x00003463, 0x030, 0x00004545,
- 0x030, 0x00005545, 0x030, 0x00006545, 0x030, 0x00007545,
- 0x030, 0x00008565, 0x030, 0x00009565, 0x030, 0x0000A565,
- 0x030, 0x0000B565, 0x0EF, 0x00000000, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000200, 0x030, 0x00000303,
- 0x030, 0x00001303, 0x030, 0x00002303, 0x030, 0x00003303,
- 0x030, 0x000043A4, 0x030, 0x000053A4, 0x030, 0x000063A4,
- 0x030, 0x000073A4, 0x030, 0x00008365, 0x030, 0x00009365,
- 0x030, 0x0000A365, 0x030, 0x0000B365, 0x0EF, 0x00000000,
- 0x93001000, 0x00000000, 0x40000000, 0x00000000, 0x0EF, 0x00000200,
- 0x030, 0x000003A2, 0x030, 0x000013A2, 0x030, 0x000023A2,
- 0x030, 0x000033A2, 0x030, 0x00004343, 0x030, 0x00005343,
- 0x030, 0x00006343, 0x030, 0x00007343, 0x030, 0x00008364,
- 0x030, 0x00009364, 0x030, 0x0000A364, 0x030, 0x0000B364,
- 0x0EF, 0x00000000, 0x90002100, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000200, 0x030, 0x000003A0, 0x030, 0x000013A0,
- 0x030, 0x000023A0, 0x030, 0x000033A0, 0x030, 0x00004430,
- 0x030, 0x00005430, 0x030, 0x00006430, 0x030, 0x00007430,
- 0x030, 0x00008372, 0x030, 0x00009372, 0x030, 0x0000A372,
- 0x030, 0x0000B372, 0x0EF, 0x00000000, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000200, 0x030, 0x000003A0,
- 0x030, 0x000013A0, 0x030, 0x000023A0, 0x030, 0x000033A0,
- 0x030, 0x000043A1, 0x030, 0x000053A1, 0x030, 0x000063A1,
- 0x030, 0x000073A1, 0x030, 0x000083A2, 0x030, 0x000093A2,
- 0x030, 0x0000A3A2, 0x030, 0x0000B3A2, 0x0EF, 0x00000000,
- 0xA0000000, 0x00000000, 0x0EF, 0x00000200, 0x030, 0x000003D0,
- 0x030, 0x000013D0, 0x030, 0x000023D0, 0x030, 0x000033D0,
- 0x030, 0x000043D0, 0x030, 0x000053D0, 0x030, 0x000063D0,
- 0x030, 0x000073D0, 0x030, 0x000083D0, 0x030, 0x000093D0,
- 0x030, 0x0000A3D0, 0x030, 0x0000B3D0, 0x0EF, 0x00000000,
- 0xB0000000, 0x00000000, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x00000203, 0x030, 0x00001203,
- 0x030, 0x00002203, 0x030, 0x00003203, 0x030, 0x00004203,
- 0x030, 0x00005203, 0x030, 0x00006203, 0x030, 0x00007203,
- 0x030, 0x00008203, 0x030, 0x00009203, 0x030, 0x0000A203,
- 0x030, 0x0000B203, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x00000203, 0x030, 0x00001203,
- 0x030, 0x00002203, 0x030, 0x00003203, 0x030, 0x00004203,
- 0x030, 0x00005203, 0x030, 0x00006203, 0x030, 0x00007203,
- 0x030, 0x00008203, 0x030, 0x00009203, 0x030, 0x0000A203,
- 0x030, 0x0000B203, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x00000203, 0x030, 0x00001203,
- 0x030, 0x00002203, 0x030, 0x00003203, 0x030, 0x00004203,
- 0x030, 0x00005203, 0x030, 0x00006203, 0x030, 0x00007203,
- 0x030, 0x00008203, 0x030, 0x00009203, 0x030, 0x0000A203,
- 0x030, 0x0000B203, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x00000203, 0x030, 0x00001203,
- 0x030, 0x00002203, 0x030, 0x00003203, 0x030, 0x00004203,
- 0x030, 0x00005203, 0x030, 0x00006203, 0x030, 0x00007203,
- 0x030, 0x00008203, 0x030, 0x00009203, 0x030, 0x0000A203,
- 0x030, 0x0000B203, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x00000203, 0x030, 0x00001203,
- 0x030, 0x00002203, 0x030, 0x00003203, 0x030, 0x00004203,
- 0x030, 0x00005203, 0x030, 0x00006203, 0x030, 0x00007203,
- 0x030, 0x00008203, 0x030, 0x00009203, 0x030, 0x0000A203,
- 0x030, 0x0000B203, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x00000203, 0x030, 0x00001203,
- 0x030, 0x00002203, 0x030, 0x00003203, 0x030, 0x00004203,
- 0x030, 0x00005203, 0x030, 0x00006203, 0x030, 0x00007203,
- 0x030, 0x00008203, 0x030, 0x00009203, 0x030, 0x0000A203,
- 0x030, 0x0000B203, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A3, 0x030, 0x000013A3,
- 0x030, 0x000023A3, 0x030, 0x000033A3, 0x030, 0x000043A3,
- 0x030, 0x000053A3, 0x030, 0x000063A3, 0x030, 0x000073A3,
- 0x030, 0x000083A3, 0x030, 0x000093A3, 0x030, 0x0000A3A3,
- 0x030, 0x0000B3A3, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x93011000, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x93001000, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x90002100, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x90002000, 0x00000000, 0x40000000, 0x00000000,
- 0x0EF, 0x00000080, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0xA0000000, 0x00000000, 0x0EF, 0x00000080,
- 0x030, 0x000003A2, 0x030, 0x000013A2, 0x030, 0x000023A2,
- 0x030, 0x000033A2, 0x030, 0x000043A2, 0x030, 0x000053A2,
- 0x030, 0x000063A2, 0x030, 0x000073A2, 0x030, 0x000083A2,
- 0x030, 0x000093A2, 0x030, 0x0000A3A2, 0x030, 0x0000B3A2,
- 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004777,
- 0x030, 0x00005777, 0x030, 0x00006777, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004777,
- 0x030, 0x00005777, 0x030, 0x00006777, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000660,
- 0x030, 0x00001443, 0x030, 0x00002221, 0x030, 0x00004777,
- 0x030, 0x00005777, 0x030, 0x00006777, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000776,
- 0x030, 0x00001455, 0x030, 0x00002325, 0x030, 0x00004777,
- 0x030, 0x00005777, 0x030, 0x00006777, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000764,
- 0x030, 0x00001632, 0x030, 0x00002421, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000764,
- 0x030, 0x00001632, 0x030, 0x00002421, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000777,
- 0x030, 0x00001442, 0x030, 0x00002222, 0x030, 0x00004777,
- 0x030, 0x00005777, 0x030, 0x00006777, 0x93001000, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000764,
- 0x030, 0x00001632, 0x030, 0x00002421, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000775,
- 0x030, 0x00001343, 0x030, 0x00002210, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x0EF, 0x00000040, 0x030, 0x00000775,
- 0x030, 0x00001422, 0x030, 0x00002210, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0xA0000000, 0x00000000,
- 0x0EF, 0x00000040, 0x030, 0x00000764, 0x030, 0x00001632,
- 0x030, 0x00002421, 0x030, 0x00004000, 0x030, 0x00005000,
- 0x030, 0x00006000, 0xB0000000, 0x00000000, 0x0EF, 0x00000000,
- 0x0EF, 0x00000800, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000007, 0x033, 0x00000021,
- 0x03F, 0x0000000A, 0x033, 0x00000022, 0x03F, 0x0000000D,
- 0x033, 0x00000023, 0x03F, 0x0000002A, 0x033, 0x00000024,
- 0x03F, 0x0000002D, 0x033, 0x00000025, 0x03F, 0x00000030,
- 0x033, 0x00000026, 0x03F, 0x0000006D, 0x033, 0x00000027,
- 0x03F, 0x00000070, 0x033, 0x00000028, 0x03F, 0x000000ED,
- 0x033, 0x00000029, 0x03F, 0x000000F0, 0x033, 0x0000002A,
- 0x03F, 0x000000F3, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000007, 0x033, 0x00000021,
- 0x03F, 0x0000000A, 0x033, 0x00000022, 0x03F, 0x0000000D,
- 0x033, 0x00000023, 0x03F, 0x0000002A, 0x033, 0x00000024,
- 0x03F, 0x0000002D, 0x033, 0x00000025, 0x03F, 0x00000030,
- 0x033, 0x00000026, 0x03F, 0x0000006D, 0x033, 0x00000027,
- 0x03F, 0x00000070, 0x033, 0x00000028, 0x03F, 0x000000ED,
- 0x033, 0x00000029, 0x03F, 0x000000F0, 0x033, 0x0000002A,
- 0x03F, 0x000000F3, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000007, 0x033, 0x00000021,
- 0x03F, 0x0000000A, 0x033, 0x00000022, 0x03F, 0x0000000D,
- 0x033, 0x00000023, 0x03F, 0x0000002A, 0x033, 0x00000024,
- 0x03F, 0x0000002D, 0x033, 0x00000025, 0x03F, 0x00000030,
- 0x033, 0x00000026, 0x03F, 0x0000006D, 0x033, 0x00000027,
- 0x03F, 0x00000070, 0x033, 0x00000028, 0x03F, 0x000000ED,
- 0x033, 0x00000029, 0x03F, 0x000000F0, 0x033, 0x0000002A,
- 0x03F, 0x000000F3, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000005, 0x033, 0x00000021,
- 0x03F, 0x00000008, 0x033, 0x00000022, 0x03F, 0x0000000B,
- 0x033, 0x00000023, 0x03F, 0x0000000E, 0x033, 0x00000024,
- 0x03F, 0x0000002B, 0x033, 0x00000025, 0x03F, 0x00000068,
- 0x033, 0x00000026, 0x03F, 0x0000006B, 0x033, 0x00000027,
- 0x03F, 0x0000006E, 0x033, 0x00000028, 0x03F, 0x00000071,
- 0x033, 0x00000029, 0x03F, 0x00000074, 0x033, 0x0000002A,
- 0x03F, 0x00000077, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000007, 0x033, 0x00000021,
- 0x03F, 0x0000000A, 0x033, 0x00000022, 0x03F, 0x0000000D,
- 0x033, 0x00000023, 0x03F, 0x0000002A, 0x033, 0x00000024,
- 0x03F, 0x0000002D, 0x033, 0x00000025, 0x03F, 0x00000030,
- 0x033, 0x00000026, 0x03F, 0x0000006D, 0x033, 0x00000027,
- 0x03F, 0x00000070, 0x033, 0x00000028, 0x03F, 0x000000ED,
- 0x033, 0x00000029, 0x03F, 0x000000F0, 0x033, 0x0000002A,
- 0x03F, 0x000000F3, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000007, 0x033, 0x00000021,
- 0x03F, 0x0000000A, 0x033, 0x00000022, 0x03F, 0x0000000D,
- 0x033, 0x00000023, 0x03F, 0x0000002A, 0x033, 0x00000024,
- 0x03F, 0x0000002D, 0x033, 0x00000025, 0x03F, 0x00000030,
- 0x033, 0x00000026, 0x03F, 0x0000006D, 0x033, 0x00000027,
- 0x03F, 0x00000070, 0x033, 0x00000028, 0x03F, 0x000000ED,
- 0x033, 0x00000029, 0x03F, 0x000000F0, 0x033, 0x0000002A,
- 0x03F, 0x000000F3, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000007, 0x033, 0x00000021,
- 0x03F, 0x0000000A, 0x033, 0x00000022, 0x03F, 0x0000000D,
- 0x033, 0x00000023, 0x03F, 0x0000002A, 0x033, 0x00000024,
- 0x03F, 0x0000002D, 0x033, 0x00000025, 0x03F, 0x00000030,
- 0x033, 0x00000026, 0x03F, 0x0000006D, 0x033, 0x00000027,
- 0x03F, 0x00000070, 0x033, 0x00000028, 0x03F, 0x000000ED,
- 0x033, 0x00000029, 0x03F, 0x000000F0, 0x033, 0x0000002A,
- 0x03F, 0x000000F3, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000005, 0x033, 0x00000021,
- 0x03F, 0x00000008, 0x033, 0x00000022, 0x03F, 0x0000000B,
- 0x033, 0x00000023, 0x03F, 0x0000000E, 0x033, 0x00000024,
- 0x03F, 0x0000002B, 0x033, 0x00000025, 0x03F, 0x00000068,
- 0x033, 0x00000026, 0x03F, 0x0000006B, 0x033, 0x00000027,
- 0x03F, 0x0000006E, 0x033, 0x00000028, 0x03F, 0x00000071,
- 0x033, 0x00000029, 0x03F, 0x00000074, 0x033, 0x0000002A,
- 0x03F, 0x00000077, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000005, 0x033, 0x00000021,
- 0x03F, 0x00000008, 0x033, 0x00000022, 0x03F, 0x0000000B,
- 0x033, 0x00000023, 0x03F, 0x0000000E, 0x033, 0x00000024,
- 0x03F, 0x0000002B, 0x033, 0x00000025, 0x03F, 0x00000068,
- 0x033, 0x00000026, 0x03F, 0x0000006B, 0x033, 0x00000027,
- 0x03F, 0x0000006E, 0x033, 0x00000028, 0x03F, 0x00000071,
- 0x033, 0x00000029, 0x03F, 0x00000074, 0x033, 0x0000002A,
- 0x03F, 0x00000077, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000C0C, 0x033, 0x00000021,
- 0x03F, 0x00000C29, 0x033, 0x00000022, 0x03F, 0x00000C2C,
- 0x033, 0x00000023, 0x03F, 0x00000C69, 0x033, 0x00000024,
- 0x03F, 0x00000CA8, 0x033, 0x00000025, 0x03F, 0x00000CE8,
- 0x033, 0x00000026, 0x03F, 0x00000CEB, 0x033, 0x00000027,
- 0x03F, 0x00000CEE, 0x033, 0x00000028, 0x03F, 0x00000CF1,
- 0x033, 0x00000029, 0x03F, 0x00000CF4, 0x033, 0x0000002A,
- 0x03F, 0x00000CF7, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x0000042B, 0x033, 0x00000021,
- 0x03F, 0x0000082A, 0x033, 0x00000022, 0x03F, 0x00000849,
- 0x033, 0x00000023, 0x03F, 0x0000084C, 0x033, 0x00000024,
- 0x03F, 0x00000C4C, 0x033, 0x00000025, 0x03F, 0x00000CA9,
- 0x033, 0x00000026, 0x03F, 0x00000CEA, 0x033, 0x00000027,
- 0x03F, 0x00000CED, 0x033, 0x00000028, 0x03F, 0x00000CF0,
- 0x033, 0x00000029, 0x03F, 0x00000CF3, 0x033, 0x0000002A,
- 0x03F, 0x00000CF6, 0x93011000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000C09, 0x033, 0x00000021,
- 0x03F, 0x00000C0C, 0x033, 0x00000022, 0x03F, 0x00000C0F,
- 0x033, 0x00000023, 0x03F, 0x00000C2C, 0x033, 0x00000024,
- 0x03F, 0x00000C2F, 0x033, 0x00000025, 0x03F, 0x00000C8A,
- 0x033, 0x00000026, 0x03F, 0x00000C8D, 0x033, 0x00000027,
- 0x03F, 0x00000C90, 0x033, 0x00000028, 0x03F, 0x00000CD0,
- 0x033, 0x00000029, 0x03F, 0x00000CF2, 0x033, 0x0000002A,
- 0x03F, 0x00000CF5, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000005, 0x033, 0x00000021,
- 0x03F, 0x00000008, 0x033, 0x00000022, 0x03F, 0x0000000B,
- 0x033, 0x00000023, 0x03F, 0x0000000E, 0x033, 0x00000024,
- 0x03F, 0x0000002B, 0x033, 0x00000025, 0x03F, 0x00000068,
- 0x033, 0x00000026, 0x03F, 0x0000006B, 0x033, 0x00000027,
- 0x03F, 0x0000006E, 0x033, 0x00000028, 0x03F, 0x00000071,
- 0x033, 0x00000029, 0x03F, 0x00000074, 0x033, 0x0000002A,
- 0x03F, 0x00000077, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000C09, 0x033, 0x00000021,
- 0x03F, 0x00000C0C, 0x033, 0x00000022, 0x03F, 0x00000C0F,
- 0x033, 0x00000023, 0x03F, 0x00000C2C, 0x033, 0x00000024,
- 0x03F, 0x00000C2F, 0x033, 0x00000025, 0x03F, 0x00000C8A,
- 0x033, 0x00000026, 0x03F, 0x00000C8D, 0x033, 0x00000027,
- 0x03F, 0x00000C90, 0x033, 0x00000028, 0x03F, 0x00000CD0,
- 0x033, 0x00000029, 0x03F, 0x00000CF2, 0x033, 0x0000002A,
- 0x03F, 0x00000CF5, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000429, 0x033, 0x00000021,
- 0x03F, 0x00000828, 0x033, 0x00000022, 0x03F, 0x00000847,
- 0x033, 0x00000023, 0x03F, 0x0000084A, 0x033, 0x00000024,
- 0x03F, 0x00000C4B, 0x033, 0x00000025, 0x03F, 0x00000C8A,
- 0x033, 0x00000026, 0x03F, 0x00000CEA, 0x033, 0x00000027,
- 0x03F, 0x00000CED, 0x033, 0x00000028, 0x03F, 0x00000CF0,
- 0x033, 0x00000029, 0x03F, 0x00000CF3, 0x033, 0x0000002A,
- 0x03F, 0x00000CF6, 0x93001000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x00000C09, 0x033, 0x00000021,
- 0x03F, 0x00000C0C, 0x033, 0x00000022, 0x03F, 0x00000C0F,
- 0x033, 0x00000023, 0x03F, 0x00000C2C, 0x033, 0x00000024,
- 0x03F, 0x00000C2F, 0x033, 0x00000025, 0x03F, 0x00000C8A,
- 0x033, 0x00000026, 0x03F, 0x00000C8D, 0x033, 0x00000027,
- 0x03F, 0x00000C90, 0x033, 0x00000028, 0x03F, 0x00000CD0,
- 0x033, 0x00000029, 0x03F, 0x00000CF2, 0x033, 0x0000002A,
- 0x03F, 0x00000CF5, 0x90002100, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x0000042B, 0x033, 0x00000021,
- 0x03F, 0x0000082A, 0x033, 0x00000022, 0x03F, 0x00000849,
- 0x033, 0x00000023, 0x03F, 0x0000084C, 0x033, 0x00000024,
- 0x03F, 0x00000C4C, 0x033, 0x00000025, 0x03F, 0x00000C8A,
- 0x033, 0x00000026, 0x03F, 0x00000C8D, 0x033, 0x00000027,
- 0x03F, 0x00000CEB, 0x033, 0x00000028, 0x03F, 0x00000CEE,
- 0x033, 0x00000029, 0x03F, 0x00000CF1, 0x033, 0x0000002A,
- 0x03F, 0x00000CF4, 0x90002000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000020, 0x03F, 0x0000042B, 0x033, 0x00000021,
- 0x03F, 0x0000082A, 0x033, 0x00000022, 0x03F, 0x00000849,
- 0x033, 0x00000023, 0x03F, 0x0000084C, 0x033, 0x00000024,
- 0x03F, 0x00000C4C, 0x033, 0x00000025, 0x03F, 0x00000C8A,
- 0x033, 0x00000026, 0x03F, 0x00000C8D, 0x033, 0x00000027,
- 0x03F, 0x00000CEB, 0x033, 0x00000028, 0x03F, 0x00000CEE,
- 0x033, 0x00000029, 0x03F, 0x00000CF1, 0x033, 0x0000002A,
- 0x03F, 0x00000CF4, 0xA0000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000C09, 0x033, 0x00000021, 0x03F, 0x00000C0C,
- 0x033, 0x00000022, 0x03F, 0x00000C0F, 0x033, 0x00000023,
- 0x03F, 0x00000C2C, 0x033, 0x00000024, 0x03F, 0x00000C2F,
- 0x033, 0x00000025, 0x03F, 0x00000C8A, 0x033, 0x00000026,
- 0x03F, 0x00000C8D, 0x033, 0x00000027, 0x03F, 0x00000C90,
- 0x033, 0x00000028, 0x03F, 0x00000CD0, 0x033, 0x00000029,
- 0x03F, 0x00000CF2, 0x033, 0x0000002A, 0x03F, 0x00000CF5,
- 0xB0000000, 0x00000000, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000007, 0x033, 0x00000061,
- 0x03F, 0x0000000A, 0x033, 0x00000062, 0x03F, 0x0000000D,
- 0x033, 0x00000063, 0x03F, 0x0000002A, 0x033, 0x00000064,
- 0x03F, 0x0000002D, 0x033, 0x00000065, 0x03F, 0x00000030,
- 0x033, 0x00000066, 0x03F, 0x0000006D, 0x033, 0x00000067,
- 0x03F, 0x00000070, 0x033, 0x00000068, 0x03F, 0x000000ED,
- 0x033, 0x00000069, 0x03F, 0x000000F0, 0x033, 0x0000006A,
- 0x03F, 0x000000F3, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000007, 0x033, 0x00000061,
- 0x03F, 0x0000000A, 0x033, 0x00000062, 0x03F, 0x0000000D,
- 0x033, 0x00000063, 0x03F, 0x0000002A, 0x033, 0x00000064,
- 0x03F, 0x0000002D, 0x033, 0x00000065, 0x03F, 0x00000030,
- 0x033, 0x00000066, 0x03F, 0x0000006D, 0x033, 0x00000067,
- 0x03F, 0x00000070, 0x033, 0x00000068, 0x03F, 0x000000ED,
- 0x033, 0x00000069, 0x03F, 0x000000F0, 0x033, 0x0000006A,
- 0x03F, 0x000000F3, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000007, 0x033, 0x00000061,
- 0x03F, 0x0000000A, 0x033, 0x00000062, 0x03F, 0x0000000D,
- 0x033, 0x00000063, 0x03F, 0x0000002A, 0x033, 0x00000064,
- 0x03F, 0x0000002D, 0x033, 0x00000065, 0x03F, 0x00000030,
- 0x033, 0x00000066, 0x03F, 0x0000006D, 0x033, 0x00000067,
- 0x03F, 0x00000070, 0x033, 0x00000068, 0x03F, 0x000000ED,
- 0x033, 0x00000069, 0x03F, 0x000000F0, 0x033, 0x0000006A,
- 0x03F, 0x000000F3, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000005, 0x033, 0x00000061,
- 0x03F, 0x00000008, 0x033, 0x00000062, 0x03F, 0x0000000B,
- 0x033, 0x00000063, 0x03F, 0x0000000E, 0x033, 0x00000064,
- 0x03F, 0x0000002B, 0x033, 0x00000065, 0x03F, 0x00000068,
- 0x033, 0x00000066, 0x03F, 0x0000006B, 0x033, 0x00000067,
- 0x03F, 0x0000006E, 0x033, 0x00000068, 0x03F, 0x00000071,
- 0x033, 0x00000069, 0x03F, 0x00000074, 0x033, 0x0000006A,
- 0x03F, 0x00000077, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000007, 0x033, 0x00000061,
- 0x03F, 0x0000000A, 0x033, 0x00000062, 0x03F, 0x0000000D,
- 0x033, 0x00000063, 0x03F, 0x0000002A, 0x033, 0x00000064,
- 0x03F, 0x0000002D, 0x033, 0x00000065, 0x03F, 0x00000030,
- 0x033, 0x00000066, 0x03F, 0x0000006D, 0x033, 0x00000067,
- 0x03F, 0x00000070, 0x033, 0x00000068, 0x03F, 0x000000ED,
- 0x033, 0x00000069, 0x03F, 0x000000F0, 0x033, 0x0000006A,
- 0x03F, 0x000000F3, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000007, 0x033, 0x00000061,
- 0x03F, 0x0000000A, 0x033, 0x00000062, 0x03F, 0x0000000D,
- 0x033, 0x00000063, 0x03F, 0x0000002A, 0x033, 0x00000064,
- 0x03F, 0x0000002D, 0x033, 0x00000065, 0x03F, 0x00000030,
- 0x033, 0x00000066, 0x03F, 0x0000006D, 0x033, 0x00000067,
- 0x03F, 0x00000070, 0x033, 0x00000068, 0x03F, 0x000000ED,
- 0x033, 0x00000069, 0x03F, 0x000000F0, 0x033, 0x0000006A,
- 0x03F, 0x000000F3, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000007, 0x033, 0x00000061,
- 0x03F, 0x0000000A, 0x033, 0x00000062, 0x03F, 0x0000000D,
- 0x033, 0x00000063, 0x03F, 0x0000002A, 0x033, 0x00000064,
- 0x03F, 0x0000002D, 0x033, 0x00000065, 0x03F, 0x00000030,
- 0x033, 0x00000066, 0x03F, 0x0000006D, 0x033, 0x00000067,
- 0x03F, 0x00000070, 0x033, 0x00000068, 0x03F, 0x000000ED,
- 0x033, 0x00000069, 0x03F, 0x000000F0, 0x033, 0x0000006A,
- 0x03F, 0x000000F3, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000005, 0x033, 0x00000061,
- 0x03F, 0x00000008, 0x033, 0x00000062, 0x03F, 0x0000000B,
- 0x033, 0x00000063, 0x03F, 0x0000000E, 0x033, 0x00000064,
- 0x03F, 0x0000002B, 0x033, 0x00000065, 0x03F, 0x00000068,
- 0x033, 0x00000066, 0x03F, 0x0000006B, 0x033, 0x00000067,
- 0x03F, 0x0000006E, 0x033, 0x00000068, 0x03F, 0x00000071,
- 0x033, 0x00000069, 0x03F, 0x00000074, 0x033, 0x0000006A,
- 0x03F, 0x00000077, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000005, 0x033, 0x00000061,
- 0x03F, 0x00000008, 0x033, 0x00000062, 0x03F, 0x0000000B,
- 0x033, 0x00000063, 0x03F, 0x0000000E, 0x033, 0x00000064,
- 0x03F, 0x0000002B, 0x033, 0x00000065, 0x03F, 0x00000068,
- 0x033, 0x00000066, 0x03F, 0x0000006B, 0x033, 0x00000067,
- 0x03F, 0x0000006E, 0x033, 0x00000068, 0x03F, 0x00000071,
- 0x033, 0x00000069, 0x03F, 0x00000074, 0x033, 0x0000006A,
- 0x03F, 0x00000077, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x0000080B, 0x033, 0x00000061,
- 0x03F, 0x0000080E, 0x033, 0x00000062, 0x03F, 0x00000848,
- 0x033, 0x00000063, 0x03F, 0x00000869, 0x033, 0x00000064,
- 0x03F, 0x000008A9, 0x033, 0x00000065, 0x03F, 0x00000CE8,
- 0x033, 0x00000066, 0x03F, 0x00000CEB, 0x033, 0x00000067,
- 0x03F, 0x00000CEE, 0x033, 0x00000068, 0x03F, 0x00000CF1,
- 0x033, 0x00000069, 0x03F, 0x00000CF4, 0x033, 0x0000006A,
- 0x03F, 0x00000CF7, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x0000042B, 0x033, 0x00000061,
- 0x03F, 0x0000082A, 0x033, 0x00000062, 0x03F, 0x00000849,
- 0x033, 0x00000063, 0x03F, 0x0000084C, 0x033, 0x00000064,
- 0x03F, 0x00000C4C, 0x033, 0x00000065, 0x03F, 0x00000CA9,
- 0x033, 0x00000066, 0x03F, 0x00000CEA, 0x033, 0x00000067,
- 0x03F, 0x00000CED, 0x033, 0x00000068, 0x03F, 0x00000CF0,
- 0x033, 0x00000069, 0x03F, 0x00000CF3, 0x033, 0x0000006A,
- 0x03F, 0x00000CF6, 0x93011000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000C0A, 0x033, 0x00000061,
- 0x03F, 0x00000C0D, 0x033, 0x00000062, 0x03F, 0x00000C2A,
- 0x033, 0x00000063, 0x03F, 0x00000C2D, 0x033, 0x00000064,
- 0x03F, 0x00000C6A, 0x033, 0x00000065, 0x03F, 0x00000CAA,
- 0x033, 0x00000066, 0x03F, 0x00000CAD, 0x033, 0x00000067,
- 0x03F, 0x00000CB0, 0x033, 0x00000068, 0x03F, 0x00000CF1,
- 0x033, 0x00000069, 0x03F, 0x00000CF4, 0x033, 0x0000006A,
- 0x03F, 0x00000CF7, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000005, 0x033, 0x00000061,
- 0x03F, 0x00000008, 0x033, 0x00000062, 0x03F, 0x0000000B,
- 0x033, 0x00000063, 0x03F, 0x0000000E, 0x033, 0x00000064,
- 0x03F, 0x0000002B, 0x033, 0x00000065, 0x03F, 0x00000068,
- 0x033, 0x00000066, 0x03F, 0x0000006B, 0x033, 0x00000067,
- 0x03F, 0x0000006E, 0x033, 0x00000068, 0x03F, 0x00000071,
- 0x033, 0x00000069, 0x03F, 0x00000074, 0x033, 0x0000006A,
- 0x03F, 0x00000077, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000C0A, 0x033, 0x00000061,
- 0x03F, 0x00000C0D, 0x033, 0x00000062, 0x03F, 0x00000C2A,
- 0x033, 0x00000063, 0x03F, 0x00000C2D, 0x033, 0x00000064,
- 0x03F, 0x00000C6A, 0x033, 0x00000065, 0x03F, 0x00000CAA,
- 0x033, 0x00000066, 0x03F, 0x00000CAD, 0x033, 0x00000067,
- 0x03F, 0x00000CB0, 0x033, 0x00000068, 0x03F, 0x00000CF1,
- 0x033, 0x00000069, 0x03F, 0x00000CF4, 0x033, 0x0000006A,
- 0x03F, 0x00000CF7, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000429, 0x033, 0x00000061,
- 0x03F, 0x00000828, 0x033, 0x00000062, 0x03F, 0x00000847,
- 0x033, 0x00000063, 0x03F, 0x0000084A, 0x033, 0x00000064,
- 0x03F, 0x00000C4B, 0x033, 0x00000065, 0x03F, 0x00000C8A,
- 0x033, 0x00000066, 0x03F, 0x00000CEA, 0x033, 0x00000067,
- 0x03F, 0x00000CED, 0x033, 0x00000068, 0x03F, 0x00000CF0,
- 0x033, 0x00000069, 0x03F, 0x00000CF3, 0x033, 0x0000006A,
- 0x03F, 0x00000CF6, 0x93001000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x00000C0A, 0x033, 0x00000061,
- 0x03F, 0x00000C0D, 0x033, 0x00000062, 0x03F, 0x00000C2A,
- 0x033, 0x00000063, 0x03F, 0x00000C2D, 0x033, 0x00000064,
- 0x03F, 0x00000C6A, 0x033, 0x00000065, 0x03F, 0x00000CAA,
- 0x033, 0x00000066, 0x03F, 0x00000CAD, 0x033, 0x00000067,
- 0x03F, 0x00000CB0, 0x033, 0x00000068, 0x03F, 0x00000CF1,
- 0x033, 0x00000069, 0x03F, 0x00000CF4, 0x033, 0x0000006A,
- 0x03F, 0x00000CF7, 0x90002100, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x0000042C, 0x033, 0x00000061,
- 0x03F, 0x0000082B, 0x033, 0x00000062, 0x03F, 0x0000084A,
- 0x033, 0x00000063, 0x03F, 0x0000084D, 0x033, 0x00000064,
- 0x03F, 0x00000C4D, 0x033, 0x00000065, 0x03F, 0x00000C8B,
- 0x033, 0x00000066, 0x03F, 0x00000C8E, 0x033, 0x00000067,
- 0x03F, 0x00000CEC, 0x033, 0x00000068, 0x03F, 0x00000CEF,
- 0x033, 0x00000069, 0x03F, 0x00000CF2, 0x033, 0x0000006A,
- 0x03F, 0x00000CF5, 0x90002000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000060, 0x03F, 0x0000042C, 0x033, 0x00000061,
- 0x03F, 0x0000082B, 0x033, 0x00000062, 0x03F, 0x0000084A,
- 0x033, 0x00000063, 0x03F, 0x0000084D, 0x033, 0x00000064,
- 0x03F, 0x00000C4D, 0x033, 0x00000065, 0x03F, 0x00000C8B,
- 0x033, 0x00000066, 0x03F, 0x00000C8E, 0x033, 0x00000067,
- 0x03F, 0x00000CEC, 0x033, 0x00000068, 0x03F, 0x00000CEF,
- 0x033, 0x00000069, 0x03F, 0x00000CF2, 0x033, 0x0000006A,
- 0x03F, 0x00000CF5, 0xA0000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000C0A, 0x033, 0x00000061, 0x03F, 0x00000C0D,
- 0x033, 0x00000062, 0x03F, 0x00000C2A, 0x033, 0x00000063,
- 0x03F, 0x00000C2D, 0x033, 0x00000064, 0x03F, 0x00000C6A,
- 0x033, 0x00000065, 0x03F, 0x00000CAA, 0x033, 0x00000066,
- 0x03F, 0x00000CAD, 0x033, 0x00000067, 0x03F, 0x00000CB0,
- 0x033, 0x00000068, 0x03F, 0x00000CF1, 0x033, 0x00000069,
- 0x03F, 0x00000CF4, 0x033, 0x0000006A, 0x03F, 0x00000CF7,
- 0xB0000000, 0x00000000, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000007, 0x033, 0x000000A1,
- 0x03F, 0x0000000A, 0x033, 0x000000A2, 0x03F, 0x0000000D,
- 0x033, 0x000000A3, 0x03F, 0x0000002A, 0x033, 0x000000A4,
- 0x03F, 0x0000002D, 0x033, 0x000000A5, 0x03F, 0x00000030,
- 0x033, 0x000000A6, 0x03F, 0x0000006D, 0x033, 0x000000A7,
- 0x03F, 0x00000070, 0x033, 0x000000A8, 0x03F, 0x000000ED,
- 0x033, 0x000000A9, 0x03F, 0x000000F0, 0x033, 0x000000AA,
- 0x03F, 0x000000F3, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000007, 0x033, 0x000000A1,
- 0x03F, 0x0000000A, 0x033, 0x000000A2, 0x03F, 0x0000000D,
- 0x033, 0x000000A3, 0x03F, 0x0000002A, 0x033, 0x000000A4,
- 0x03F, 0x0000002D, 0x033, 0x000000A5, 0x03F, 0x00000030,
- 0x033, 0x000000A6, 0x03F, 0x0000006D, 0x033, 0x000000A7,
- 0x03F, 0x00000070, 0x033, 0x000000A8, 0x03F, 0x000000ED,
- 0x033, 0x000000A9, 0x03F, 0x000000F0, 0x033, 0x000000AA,
- 0x03F, 0x000000F3, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000007, 0x033, 0x000000A1,
- 0x03F, 0x0000000A, 0x033, 0x000000A2, 0x03F, 0x0000000D,
- 0x033, 0x000000A3, 0x03F, 0x0000002A, 0x033, 0x000000A4,
- 0x03F, 0x0000002D, 0x033, 0x000000A5, 0x03F, 0x00000030,
- 0x033, 0x000000A6, 0x03F, 0x0000006D, 0x033, 0x000000A7,
- 0x03F, 0x00000070, 0x033, 0x000000A8, 0x03F, 0x000000ED,
- 0x033, 0x000000A9, 0x03F, 0x000000F0, 0x033, 0x000000AA,
- 0x03F, 0x000000F3, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000005, 0x033, 0x000000A1,
- 0x03F, 0x00000008, 0x033, 0x000000A2, 0x03F, 0x0000000B,
- 0x033, 0x000000A3, 0x03F, 0x0000000E, 0x033, 0x000000A4,
- 0x03F, 0x00000047, 0x033, 0x000000A5, 0x03F, 0x0000004A,
- 0x033, 0x000000A6, 0x03F, 0x0000004D, 0x033, 0x000000A7,
- 0x03F, 0x00000050, 0x033, 0x000000A8, 0x03F, 0x00000053,
- 0x033, 0x000000A9, 0x03F, 0x00000056, 0x033, 0x000000AA,
- 0x03F, 0x00000094, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000007, 0x033, 0x000000A1,
- 0x03F, 0x0000000A, 0x033, 0x000000A2, 0x03F, 0x0000000D,
- 0x033, 0x000000A3, 0x03F, 0x0000002A, 0x033, 0x000000A4,
- 0x03F, 0x0000002D, 0x033, 0x000000A5, 0x03F, 0x00000030,
- 0x033, 0x000000A6, 0x03F, 0x0000006D, 0x033, 0x000000A7,
- 0x03F, 0x00000070, 0x033, 0x000000A8, 0x03F, 0x000000ED,
- 0x033, 0x000000A9, 0x03F, 0x000000F0, 0x033, 0x000000AA,
- 0x03F, 0x000000F3, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000007, 0x033, 0x000000A1,
- 0x03F, 0x0000000A, 0x033, 0x000000A2, 0x03F, 0x0000000D,
- 0x033, 0x000000A3, 0x03F, 0x0000002A, 0x033, 0x000000A4,
- 0x03F, 0x0000002D, 0x033, 0x000000A5, 0x03F, 0x00000030,
- 0x033, 0x000000A6, 0x03F, 0x0000006D, 0x033, 0x000000A7,
- 0x03F, 0x00000070, 0x033, 0x000000A8, 0x03F, 0x000000ED,
- 0x033, 0x000000A9, 0x03F, 0x000000F0, 0x033, 0x000000AA,
- 0x03F, 0x000000F3, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000007, 0x033, 0x000000A1,
- 0x03F, 0x0000000A, 0x033, 0x000000A2, 0x03F, 0x0000000D,
- 0x033, 0x000000A3, 0x03F, 0x0000002A, 0x033, 0x000000A4,
- 0x03F, 0x0000002D, 0x033, 0x000000A5, 0x03F, 0x00000030,
- 0x033, 0x000000A6, 0x03F, 0x0000006D, 0x033, 0x000000A7,
- 0x03F, 0x00000070, 0x033, 0x000000A8, 0x03F, 0x000000ED,
- 0x033, 0x000000A9, 0x03F, 0x000000F0, 0x033, 0x000000AA,
- 0x03F, 0x000000F3, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000005, 0x033, 0x000000A1,
- 0x03F, 0x00000008, 0x033, 0x000000A2, 0x03F, 0x0000000B,
- 0x033, 0x000000A3, 0x03F, 0x0000000E, 0x033, 0x000000A4,
- 0x03F, 0x00000047, 0x033, 0x000000A5, 0x03F, 0x0000004A,
- 0x033, 0x000000A6, 0x03F, 0x0000004D, 0x033, 0x000000A7,
- 0x03F, 0x00000050, 0x033, 0x000000A8, 0x03F, 0x00000053,
- 0x033, 0x000000A9, 0x03F, 0x00000056, 0x033, 0x000000AA,
- 0x03F, 0x00000094, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000005, 0x033, 0x000000A1,
- 0x03F, 0x00000008, 0x033, 0x000000A2, 0x03F, 0x0000000B,
- 0x033, 0x000000A3, 0x03F, 0x0000000E, 0x033, 0x000000A4,
- 0x03F, 0x00000047, 0x033, 0x000000A5, 0x03F, 0x0000004A,
- 0x033, 0x000000A6, 0x03F, 0x0000004D, 0x033, 0x000000A7,
- 0x03F, 0x00000050, 0x033, 0x000000A8, 0x03F, 0x00000053,
- 0x033, 0x000000A9, 0x03F, 0x00000056, 0x033, 0x000000AA,
- 0x03F, 0x00000094, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000C0A, 0x033, 0x000000A1,
- 0x03F, 0x00000C0D, 0x033, 0x000000A2, 0x03F, 0x00000C2A,
- 0x033, 0x000000A3, 0x03F, 0x00000C2D, 0x033, 0x000000A4,
- 0x03F, 0x00000C6A, 0x033, 0x000000A5, 0x03F, 0x00000CE8,
- 0x033, 0x000000A6, 0x03F, 0x00000CEB, 0x033, 0x000000A7,
- 0x03F, 0x00000CEE, 0x033, 0x000000A8, 0x03F, 0x00000CF1,
- 0x033, 0x000000A9, 0x03F, 0x00000CF4, 0x033, 0x000000AA,
- 0x03F, 0x00000CF7, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x0000042A, 0x033, 0x000000A1,
- 0x03F, 0x00000829, 0x033, 0x000000A2, 0x03F, 0x00000848,
- 0x033, 0x000000A3, 0x03F, 0x0000084B, 0x033, 0x000000A4,
- 0x03F, 0x00000C4C, 0x033, 0x000000A5, 0x03F, 0x00000CA9,
- 0x033, 0x000000A6, 0x03F, 0x00000CEA, 0x033, 0x000000A7,
- 0x03F, 0x00000CED, 0x033, 0x000000A8, 0x03F, 0x00000CF0,
- 0x033, 0x000000A9, 0x03F, 0x00000CF3, 0x033, 0x000000AA,
- 0x03F, 0x00000CF6, 0x93011000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000C09, 0x033, 0x000000A1,
- 0x03F, 0x00000C0C, 0x033, 0x000000A2, 0x03F, 0x00000C0F,
- 0x033, 0x000000A3, 0x03F, 0x00000C2C, 0x033, 0x000000A4,
- 0x03F, 0x00000C2F, 0x033, 0x000000A5, 0x03F, 0x00000C8A,
- 0x033, 0x000000A6, 0x03F, 0x00000C8D, 0x033, 0x000000A7,
- 0x03F, 0x00000C90, 0x033, 0x000000A8, 0x03F, 0x00000CEF,
- 0x033, 0x000000A9, 0x03F, 0x00000CF2, 0x033, 0x000000AA,
- 0x03F, 0x00000CF5, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000005, 0x033, 0x000000A1,
- 0x03F, 0x00000008, 0x033, 0x000000A2, 0x03F, 0x0000000B,
- 0x033, 0x000000A3, 0x03F, 0x0000000E, 0x033, 0x000000A4,
- 0x03F, 0x00000047, 0x033, 0x000000A5, 0x03F, 0x0000004A,
- 0x033, 0x000000A6, 0x03F, 0x0000004D, 0x033, 0x000000A7,
- 0x03F, 0x00000050, 0x033, 0x000000A8, 0x03F, 0x00000053,
- 0x033, 0x000000A9, 0x03F, 0x00000056, 0x033, 0x000000AA,
- 0x03F, 0x00000094, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000C09, 0x033, 0x000000A1,
- 0x03F, 0x00000C0C, 0x033, 0x000000A2, 0x03F, 0x00000C0F,
- 0x033, 0x000000A3, 0x03F, 0x00000C2C, 0x033, 0x000000A4,
- 0x03F, 0x00000C2F, 0x033, 0x000000A5, 0x03F, 0x00000C8A,
- 0x033, 0x000000A6, 0x03F, 0x00000C8D, 0x033, 0x000000A7,
- 0x03F, 0x00000C90, 0x033, 0x000000A8, 0x03F, 0x00000CEF,
- 0x033, 0x000000A9, 0x03F, 0x00000CF2, 0x033, 0x000000AA,
- 0x03F, 0x00000CF5, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000429, 0x033, 0x000000A1,
- 0x03F, 0x00000828, 0x033, 0x000000A2, 0x03F, 0x00000847,
- 0x033, 0x000000A3, 0x03F, 0x0000084A, 0x033, 0x000000A4,
- 0x03F, 0x00000C4B, 0x033, 0x000000A5, 0x03F, 0x00000C8A,
- 0x033, 0x000000A6, 0x03F, 0x00000CEA, 0x033, 0x000000A7,
- 0x03F, 0x00000CED, 0x033, 0x000000A8, 0x03F, 0x00000CF0,
- 0x033, 0x000000A9, 0x03F, 0x00000CF3, 0x033, 0x000000AA,
- 0x03F, 0x00000CF6, 0x93001000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x00000C09, 0x033, 0x000000A1,
- 0x03F, 0x00000C0C, 0x033, 0x000000A2, 0x03F, 0x00000C0F,
- 0x033, 0x000000A3, 0x03F, 0x00000C2C, 0x033, 0x000000A4,
- 0x03F, 0x00000C2F, 0x033, 0x000000A5, 0x03F, 0x00000C8A,
- 0x033, 0x000000A6, 0x03F, 0x00000C8D, 0x033, 0x000000A7,
- 0x03F, 0x00000C90, 0x033, 0x000000A8, 0x03F, 0x00000CEF,
- 0x033, 0x000000A9, 0x03F, 0x00000CF2, 0x033, 0x000000AA,
- 0x03F, 0x00000CF5, 0x90002100, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x0000042A, 0x033, 0x000000A1,
- 0x03F, 0x00000829, 0x033, 0x000000A2, 0x03F, 0x00000848,
- 0x033, 0x000000A3, 0x03F, 0x0000084B, 0x033, 0x000000A4,
- 0x03F, 0x00000C4C, 0x033, 0x000000A5, 0x03F, 0x00000C8A,
- 0x033, 0x000000A6, 0x03F, 0x00000C8D, 0x033, 0x000000A7,
- 0x03F, 0x00000CEB, 0x033, 0x000000A8, 0x03F, 0x00000CEE,
- 0x033, 0x000000A9, 0x03F, 0x00000CF1, 0x033, 0x000000AA,
- 0x03F, 0x00000CF4, 0x90002000, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x000000A0, 0x03F, 0x0000042A, 0x033, 0x000000A1,
- 0x03F, 0x00000829, 0x033, 0x000000A2, 0x03F, 0x00000848,
- 0x033, 0x000000A3, 0x03F, 0x0000084B, 0x033, 0x000000A4,
- 0x03F, 0x00000C4C, 0x033, 0x000000A5, 0x03F, 0x00000C8A,
- 0x033, 0x000000A6, 0x03F, 0x00000C8D, 0x033, 0x000000A7,
- 0x03F, 0x00000CEB, 0x033, 0x000000A8, 0x03F, 0x00000CEE,
- 0x033, 0x000000A9, 0x03F, 0x00000CF1, 0x033, 0x000000AA,
- 0x03F, 0x00000CF4, 0xA0000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000C09, 0x033, 0x000000A1, 0x03F, 0x00000C0C,
- 0x033, 0x000000A2, 0x03F, 0x00000C0F, 0x033, 0x000000A3,
- 0x03F, 0x00000C2C, 0x033, 0x000000A4, 0x03F, 0x00000C2F,
- 0x033, 0x000000A5, 0x03F, 0x00000C8A, 0x033, 0x000000A6,
- 0x03F, 0x00000C8D, 0x033, 0x000000A7, 0x03F, 0x00000C90,
- 0x033, 0x000000A8, 0x03F, 0x00000CEF, 0x033, 0x000000A9,
- 0x03F, 0x00000CF2, 0x033, 0x000000AA, 0x03F, 0x00000CF5,
- 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x00000400,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x0000047C, 0x033, 0x00000001, 0x03F, 0x0000047C,
- 0x033, 0x00000002, 0x03F, 0x0000047C, 0x033, 0x00000003,
- 0x03F, 0x0000047C, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x0000047C, 0x033, 0x00000001,
- 0x03F, 0x0000047C, 0x033, 0x00000002, 0x03F, 0x0000047C,
- 0x033, 0x00000003, 0x03F, 0x0000047C, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x0000047C,
- 0x033, 0x00000001, 0x03F, 0x0000047C, 0x033, 0x00000002,
- 0x03F, 0x0000047C, 0x033, 0x00000003, 0x03F, 0x0000047C,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x0000047C, 0x033, 0x00000001, 0x03F, 0x0000047C,
- 0x033, 0x00000002, 0x03F, 0x0000047C, 0x033, 0x00000003,
- 0x03F, 0x0000047C, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x0000047C, 0x033, 0x00000001,
- 0x03F, 0x0000047C, 0x033, 0x00000002, 0x03F, 0x0000047C,
- 0x033, 0x00000003, 0x03F, 0x0000047C, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x0000047C,
- 0x033, 0x00000001, 0x03F, 0x0000047C, 0x033, 0x00000002,
- 0x03F, 0x0000047C, 0x033, 0x00000003, 0x03F, 0x0000047C,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x0000047C, 0x033, 0x00000001, 0x03F, 0x0000047C,
- 0x033, 0x00000002, 0x03F, 0x0000047C, 0x033, 0x00000003,
- 0x03F, 0x0000047C, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x0000047C, 0x033, 0x00000001,
- 0x03F, 0x0000047C, 0x033, 0x00000002, 0x03F, 0x0000047C,
- 0x033, 0x00000003, 0x03F, 0x0000047C, 0xA0000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x000004BB, 0x033, 0x00000001,
- 0x03F, 0x000004BB, 0x033, 0x00000002, 0x03F, 0x000004BB,
- 0x033, 0x00000003, 0x03F, 0x000004BB, 0xB0000000, 0x00000000,
- 0x0EF, 0x00000000, 0x0EF, 0x00000100, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00001726,
- 0x033, 0x00000001, 0x03F, 0x00001726, 0x033, 0x00000002,
- 0x03F, 0x00001726, 0x033, 0x00000003, 0x03F, 0x00001726,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00001726, 0x033, 0x00000001, 0x03F, 0x00001726,
- 0x033, 0x00000002, 0x03F, 0x00001726, 0x033, 0x00000003,
- 0x03F, 0x00001726, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x00001726, 0x033, 0x00000001,
- 0x03F, 0x00001726, 0x033, 0x00000002, 0x03F, 0x00001726,
- 0x033, 0x00000003, 0x03F, 0x00001726, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00001726,
- 0x033, 0x00000001, 0x03F, 0x00001726, 0x033, 0x00000002,
- 0x03F, 0x00001726, 0x033, 0x00000003, 0x03F, 0x00001726,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00001726, 0x033, 0x00000001, 0x03F, 0x00001726,
- 0x033, 0x00000002, 0x03F, 0x00001726, 0x033, 0x00000003,
- 0x03F, 0x00001726, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x00001726, 0x033, 0x00000001,
- 0x03F, 0x00001726, 0x033, 0x00000002, 0x03F, 0x00001726,
- 0x033, 0x00000003, 0x03F, 0x00001726, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00001726,
- 0x033, 0x00000001, 0x03F, 0x00001726, 0x033, 0x00000002,
- 0x03F, 0x00001726, 0x033, 0x00000003, 0x03F, 0x00001726,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00001726, 0x033, 0x00000001, 0x03F, 0x00001726,
- 0x033, 0x00000002, 0x03F, 0x00001726, 0x033, 0x00000003,
- 0x03F, 0x00001726, 0xA0000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000F34, 0x033, 0x00000001, 0x03F, 0x00000F34,
- 0x033, 0x00000002, 0x03F, 0x00000F34, 0x033, 0x00000003,
- 0x03F, 0x00000F34, 0xB0000000, 0x00000000, 0x0EF, 0x00000000,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x081, 0x0000F400,
- 0x087, 0x00016040, 0x051, 0x00000808, 0x052, 0x00098002,
- 0x053, 0x0000FA47, 0x054, 0x00058032, 0x056, 0x00051000,
- 0x057, 0x0000CE0A, 0x058, 0x00082030, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x081, 0x0000F400, 0x087, 0x00016040,
- 0x051, 0x00000808, 0x052, 0x00098002, 0x053, 0x0000FA47,
- 0x054, 0x00058032, 0x056, 0x00051000, 0x057, 0x0000CE0A,
- 0x058, 0x00082030, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x081, 0x0000F400, 0x087, 0x00016040, 0x051, 0x00000808,
- 0x052, 0x00098002, 0x053, 0x0000FA47, 0x054, 0x00058032,
- 0x056, 0x00051000, 0x057, 0x0000CE0A, 0x058, 0x00082030,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x081, 0x0000F400,
- 0x087, 0x00016040, 0x051, 0x00000808, 0x052, 0x00098002,
- 0x053, 0x0000FA47, 0x054, 0x00058032, 0x056, 0x00051000,
- 0x057, 0x0000CE0A, 0x058, 0x00082030, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x081, 0x0000F400, 0x087, 0x00016040,
- 0x051, 0x00000808, 0x052, 0x00098002, 0x053, 0x0000FA47,
- 0x054, 0x00058032, 0x056, 0x00051000, 0x057, 0x0000CE0A,
- 0x058, 0x00082030, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x081, 0x0000F400, 0x087, 0x00016040, 0x051, 0x00000808,
- 0x052, 0x00098002, 0x053, 0x0000FA47, 0x054, 0x00058032,
- 0x056, 0x00051000, 0x057, 0x0000CE0A, 0x058, 0x00082030,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x081, 0x0000F400,
- 0x087, 0x00016040, 0x051, 0x00000808, 0x052, 0x00098002,
- 0x053, 0x0000FA47, 0x054, 0x00058032, 0x056, 0x00051000,
- 0x057, 0x0000CE0A, 0x058, 0x00082030, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x081, 0x0000F400, 0x087, 0x00016040,
- 0x051, 0x00000808, 0x052, 0x00098002, 0x053, 0x0000FA47,
- 0x054, 0x00058032, 0x056, 0x00051000, 0x057, 0x0000CE0A,
- 0x058, 0x00082030, 0xA0000000, 0x00000000, 0x081, 0x0000F000,
- 0x087, 0x00016040, 0x051, 0x00000C00, 0x052, 0x0007C241,
- 0x053, 0x0001C069, 0x054, 0x00078032, 0x057, 0x0000CE0A,
- 0x058, 0x00058750, 0xB0000000, 0x00000000, 0x0EF, 0x00000800,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000003, 0x033, 0x00000001, 0x03F, 0x00000006,
- 0x033, 0x00000002, 0x03F, 0x00000009, 0x033, 0x00000003,
- 0x03F, 0x00000026, 0x033, 0x00000004, 0x03F, 0x00000029,
- 0x033, 0x00000005, 0x03F, 0x0000002C, 0x033, 0x00000006,
- 0x03F, 0x0000002F, 0x033, 0x00000007, 0x03F, 0x00000033,
- 0x033, 0x00000008, 0x03F, 0x00000036, 0x033, 0x00000009,
- 0x03F, 0x00000039, 0x033, 0x0000000A, 0x03F, 0x0000003C,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000003, 0x033, 0x00000001, 0x03F, 0x00000006,
- 0x033, 0x00000002, 0x03F, 0x00000009, 0x033, 0x00000003,
- 0x03F, 0x00000026, 0x033, 0x00000004, 0x03F, 0x00000029,
- 0x033, 0x00000005, 0x03F, 0x0000002C, 0x033, 0x00000006,
- 0x03F, 0x0000002F, 0x033, 0x00000007, 0x03F, 0x00000033,
- 0x033, 0x00000008, 0x03F, 0x00000036, 0x033, 0x00000009,
- 0x03F, 0x00000039, 0x033, 0x0000000A, 0x03F, 0x0000003C,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000003, 0x033, 0x00000001, 0x03F, 0x00000006,
- 0x033, 0x00000002, 0x03F, 0x00000009, 0x033, 0x00000003,
- 0x03F, 0x00000026, 0x033, 0x00000004, 0x03F, 0x00000029,
- 0x033, 0x00000005, 0x03F, 0x0000002C, 0x033, 0x00000006,
- 0x03F, 0x0000002F, 0x033, 0x00000007, 0x03F, 0x00000033,
- 0x033, 0x00000008, 0x03F, 0x00000036, 0x033, 0x00000009,
- 0x03F, 0x00000039, 0x033, 0x0000000A, 0x03F, 0x0000003C,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000003, 0x033, 0x00000001, 0x03F, 0x00000006,
- 0x033, 0x00000002, 0x03F, 0x00000009, 0x033, 0x00000003,
- 0x03F, 0x00000026, 0x033, 0x00000004, 0x03F, 0x00000029,
- 0x033, 0x00000005, 0x03F, 0x0000002C, 0x033, 0x00000006,
- 0x03F, 0x0000002F, 0x033, 0x00000007, 0x03F, 0x00000033,
- 0x033, 0x00000008, 0x03F, 0x00000036, 0x033, 0x00000009,
- 0x03F, 0x00000039, 0x033, 0x0000000A, 0x03F, 0x0000003C,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000003, 0x033, 0x00000001, 0x03F, 0x00000006,
- 0x033, 0x00000002, 0x03F, 0x00000009, 0x033, 0x00000003,
- 0x03F, 0x00000026, 0x033, 0x00000004, 0x03F, 0x00000029,
- 0x033, 0x00000005, 0x03F, 0x0000002C, 0x033, 0x00000006,
- 0x03F, 0x0000002F, 0x033, 0x00000007, 0x03F, 0x00000033,
- 0x033, 0x00000008, 0x03F, 0x00000036, 0x033, 0x00000009,
- 0x03F, 0x00000039, 0x033, 0x0000000A, 0x03F, 0x0000003C,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000003, 0x033, 0x00000001, 0x03F, 0x00000006,
- 0x033, 0x00000002, 0x03F, 0x00000009, 0x033, 0x00000003,
- 0x03F, 0x00000026, 0x033, 0x00000004, 0x03F, 0x00000029,
- 0x033, 0x00000005, 0x03F, 0x0000002C, 0x033, 0x00000006,
- 0x03F, 0x0000002F, 0x033, 0x00000007, 0x03F, 0x00000033,
- 0x033, 0x00000008, 0x03F, 0x00000036, 0x033, 0x00000009,
- 0x03F, 0x00000039, 0x033, 0x0000000A, 0x03F, 0x0000003C,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000003, 0x033, 0x00000001, 0x03F, 0x00000006,
- 0x033, 0x00000002, 0x03F, 0x00000009, 0x033, 0x00000003,
- 0x03F, 0x00000026, 0x033, 0x00000004, 0x03F, 0x00000029,
- 0x033, 0x00000005, 0x03F, 0x0000002C, 0x033, 0x00000006,
- 0x03F, 0x0000002F, 0x033, 0x00000007, 0x03F, 0x00000033,
- 0x033, 0x00000008, 0x03F, 0x00000036, 0x033, 0x00000009,
- 0x03F, 0x00000039, 0x033, 0x0000000A, 0x03F, 0x0000003C,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000003, 0x033, 0x00000001, 0x03F, 0x00000006,
- 0x033, 0x00000002, 0x03F, 0x00000009, 0x033, 0x00000003,
- 0x03F, 0x00000026, 0x033, 0x00000004, 0x03F, 0x00000029,
- 0x033, 0x00000005, 0x03F, 0x0000002C, 0x033, 0x00000006,
- 0x03F, 0x0000002F, 0x033, 0x00000007, 0x03F, 0x00000033,
- 0x033, 0x00000008, 0x03F, 0x00000036, 0x033, 0x00000009,
- 0x03F, 0x00000039, 0x033, 0x0000000A, 0x03F, 0x0000003C,
- 0xA0000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x0005142C,
- 0x033, 0x00000001, 0x03F, 0x0005144B, 0x033, 0x00000002,
- 0x03F, 0x0005144E, 0x033, 0x00000003, 0x03F, 0x00051C69,
- 0x033, 0x00000004, 0x03F, 0x00051C6C, 0x033, 0x00000005,
- 0x03F, 0x00051C6F, 0x033, 0x00000006, 0x03F, 0x00051CEB,
- 0x033, 0x00000007, 0x03F, 0x00051CEE, 0x033, 0x00000008,
- 0x03F, 0x00051CF1, 0x033, 0x00000009, 0x03F, 0x00051CF4,
- 0x033, 0x0000000A, 0x03F, 0x00051CF7, 0xB0000000, 0x00000000,
- 0x0EF, 0x00000000, 0x0EF, 0x00000010, 0x033, 0x00000000,
- 0x008, 0x0009C060, 0x033, 0x00000001, 0x008, 0x0009C060,
- 0x0EF, 0x00000000, 0x033, 0x000000A2, 0x0EF, 0x00080000,
- 0x03E, 0x0000593F, 0x03F, 0x000C0F4F, 0x0EF, 0x00000000,
- 0x033, 0x000000A3, 0x0EF, 0x00080000, 0x03E, 0x00005934,
- 0x03F, 0x0005AFCF, 0x0EF, 0x00000000,
-
-};
-
-void odm_read_and_config_mp_8822b_radioa(struct phy_dm_struct *dm)
-{
- u32 i = 0;
- u8 c_cond;
- bool is_matched = true, is_skipped = false;
- u32 *array = array_mp_8822b_radioa;
-
- u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===> %s\n", __func__);
-
- for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_radioa); i = i + 2) {
- v1 = array[i];
- v2 = array[i + 1];
-
- if (v1 & BIT(31)) { /* positive condition*/
- c_cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28);
- if (c_cond == COND_ENDIF) { /*end*/
- is_matched = true;
- is_skipped = false;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ENDIF\n");
- } else if (c_cond == COND_ELSE) { /*else*/
- is_matched = is_skipped ? false : true;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ELSE\n");
- } else { /*if , else if*/
- pre_v1 = v1;
- pre_v2 = v2;
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "IF or ELSE IF\n");
- }
- } else if (v1 & BIT(30)) { /*negative condition*/
- if (is_skipped) {
- is_matched = false;
- continue;
- }
-
- if (check_positive(dm, pre_v1, pre_v2, v1, v2)) {
- is_matched = true;
- is_skipped = true;
- } else {
- is_matched = false;
- is_skipped = false;
- }
- } else if (is_matched) {
- odm_config_rf_radio_a_8822b(dm, v1, v2);
- }
- }
-}
-
-u32 odm_get_version_mp_8822b_radioa(void) { return 67; }
-
-/******************************************************************************
- * radiob.TXT
- ******************************************************************************/
-
-static u32 array_mp_8822b_radiob[] = {
- 0x000, 0x00030000, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x0004002D, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x90002100, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0x90002000, 0x00000000, 0x40000000, 0x00000000,
- 0x001, 0x00040029, 0xA0000000, 0x00000000, 0x001, 0x00040029,
- 0xB0000000, 0x00000000, 0x018, 0x00010D24, 0x0EF, 0x00080000,
- 0x033, 0x00000002, 0x03E, 0x0000003F, 0x03F, 0x000C0F4E,
- 0x033, 0x00000001, 0x03E, 0x00000034, 0x03F, 0x0004080E,
- 0x0EF, 0x00080000, 0x0DF, 0x00002449, 0x033, 0x00000024,
- 0x03E, 0x0000003F, 0x03F, 0x00060FDE, 0x0EF, 0x00000000,
- 0x0EF, 0x00080000, 0x033, 0x00000025, 0x03E, 0x00000037,
- 0x03F, 0x0007EFCE, 0x0EF, 0x00000000, 0x0EF, 0x00080000,
- 0x033, 0x00000026, 0x03E, 0x00000037, 0x03F, 0x000DEFCE,
- 0x0EF, 0x00000000, 0x0DF, 0x00000009, 0x018, 0x00010524,
- 0x089, 0x00000207, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x08A, 0x000FF186, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x08A, 0x000FE186, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x08A, 0x000FF186, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x08A, 0x000FF186, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x08A, 0x000FF186, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x08A, 0x000FE186, 0xA0000000, 0x00000000, 0x08A, 0x000FF186,
- 0xB0000000, 0x00000000, 0x08B, 0x00061E3C, 0x08C, 0x000112C7,
- 0x08D, 0x000F4988, 0x08E, 0x00064D40, 0x0EF, 0x00020000,
- 0x033, 0x00000007, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x03E, 0x00004040, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x03E, 0x00004080, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004040, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004040, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x03E, 0x00004040, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x03E, 0x00004080, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004040, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004040, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004040, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004000, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004000, 0x93011000, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004000, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004040, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004040, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x03E, 0x00004000, 0xA0000000, 0x00000000, 0x03E, 0x00004000,
- 0xB0000000, 0x00000000, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C0006, 0x93011000, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C0006, 0x93001000, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000C3186, 0xA0000000, 0x00000000, 0x03F, 0x000C3186,
- 0xB0000000, 0x00000000, 0x033, 0x00000006, 0x03E, 0x00004080,
- 0x03F, 0x000C3186, 0x033, 0x00000005, 0x03E, 0x000040C8,
- 0x03F, 0x000C3186, 0x033, 0x00000004, 0x03E, 0x00004190,
- 0x03F, 0x000C3186, 0x033, 0x00000003, 0x03E, 0x00004998,
- 0x03F, 0x000C3186, 0x033, 0x00000002, 0x03E, 0x00005840,
- 0x03F, 0x000C3186, 0x033, 0x00000001, 0x03E, 0x000058C2,
- 0x03F, 0x000C3186, 0x033, 0x00000000, 0x03E, 0x00005930,
- 0x03F, 0x000C3186, 0x033, 0x0000000F, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x00004080, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03E, 0x00004080, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004040, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x03E, 0x00004000, 0xA0000000, 0x00000000,
- 0x03E, 0x00004000, 0xB0000000, 0x00000000, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C0006, 0x93001000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x000C3186, 0xA0000000, 0x00000000,
- 0x03F, 0x000C3186, 0xB0000000, 0x00000000, 0x033, 0x0000000E,
- 0x03E, 0x00004080, 0x03F, 0x000C3186, 0x033, 0x0000000D,
- 0x03E, 0x000040C8, 0x03F, 0x000C3186, 0x033, 0x0000000C,
- 0x03E, 0x00004190, 0x03F, 0x000C3186, 0x033, 0x0000000B,
- 0x03E, 0x00004998, 0x03F, 0x000C3186, 0x033, 0x0000000A,
- 0x03E, 0x00005840, 0x03F, 0x000C3186, 0x033, 0x00000009,
- 0x03E, 0x000058C2, 0x03F, 0x000C3186, 0x033, 0x00000008,
- 0x03E, 0x00005930, 0x03F, 0x000C3186, 0x033, 0x00000017,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x00004080,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x03E, 0x00004080,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x9300200c, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004000,
- 0x93002100, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004000,
- 0x93011000, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004000,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004040,
- 0x93002000, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00004000,
- 0xA0000000, 0x00000000, 0x03E, 0x00004000, 0xB0000000, 0x00000000,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9300200c, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x93002100, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000DFF86,
- 0x93011000, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0x93002000, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C0006,
- 0x93001000, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x000C3186,
- 0xA0000000, 0x00000000, 0x03F, 0x000C3186, 0xB0000000, 0x00000000,
- 0x033, 0x00000016, 0x03E, 0x00004080, 0x03F, 0x000C3186,
- 0x033, 0x00000015, 0x03E, 0x000040C8, 0x03F, 0x000C3186,
- 0x033, 0x00000014, 0x03E, 0x00004190, 0x03F, 0x000C3186,
- 0x033, 0x00000013, 0x03E, 0x00004998, 0x03F, 0x000C3186,
- 0x033, 0x00000012, 0x03E, 0x00005840, 0x03F, 0x000C3186,
- 0x033, 0x00000011, 0x03E, 0x000058C2, 0x03F, 0x000C3186,
- 0x033, 0x00000010, 0x03E, 0x00005930, 0x03F, 0x000C3186,
- 0x0EF, 0x00000000, 0x0EF, 0x00004000, 0x033, 0x00000000,
- 0x03F, 0x0000000A, 0x033, 0x00000001, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000002, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000005, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x93001000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000005, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x03F, 0x00000000, 0xA0000000, 0x00000000,
- 0x03F, 0x00000005, 0xB0000000, 0x00000000, 0x033, 0x00000002,
- 0x03F, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x00000401,
- 0x084, 0x00001209, 0x086, 0x000001A0, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x087, 0x00068080, 0xA0000000, 0x00000000,
- 0x087, 0x000E8180, 0xB0000000, 0x00000000, 0x088, 0x00070020,
- 0x0DE, 0x00000010, 0x0EF, 0x00008000, 0x033, 0x0000000F,
- 0x03F, 0x0000003C, 0x033, 0x0000000E, 0x03F, 0x00000038,
- 0x033, 0x0000000D, 0x03F, 0x00000030, 0x033, 0x0000000C,
- 0x03F, 0x00000028, 0x033, 0x0000000B, 0x03F, 0x00000020,
- 0x033, 0x0000000A, 0x03F, 0x00000018, 0x033, 0x00000009,
- 0x03F, 0x00000010, 0x033, 0x00000008, 0x03F, 0x00000008,
- 0x033, 0x00000007, 0x03F, 0x0000003C, 0x033, 0x00000006,
- 0x03F, 0x00000038, 0x033, 0x00000005, 0x03F, 0x00000030,
- 0x033, 0x00000004, 0x03F, 0x00000028, 0x033, 0x00000003,
- 0x03F, 0x00000020, 0x033, 0x00000002, 0x03F, 0x00000018,
- 0x033, 0x00000001, 0x03F, 0x00000010, 0x033, 0x00000000,
- 0x03F, 0x00000008, 0x0EF, 0x00000000, 0x018, 0x00018D24,
- 0xFFE, 0x00000000, 0xFFE, 0x00000000, 0xFFE, 0x00000000,
- 0xFFE, 0x00000000, 0x018, 0x00010D24, 0x01B, 0x00075A40,
- 0x0EE, 0x00000002, 0x033, 0x00000000, 0x03F, 0x00000004,
- 0x033, 0x00000001, 0x03F, 0x00000004, 0x033, 0x00000002,
- 0x03F, 0x00000004, 0x033, 0x00000003, 0x03F, 0x00000004,
- 0x033, 0x00000004, 0x03F, 0x00000004, 0x033, 0x00000005,
- 0x03F, 0x00000006, 0x033, 0x00000006, 0x03F, 0x00000002,
- 0x033, 0x00000007, 0x03F, 0x00000000, 0x0EE, 0x00000000,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x061, 0x0005D4A0,
- 0x062, 0x0000D203, 0x063, 0x00000062, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x061, 0x0005D4A0, 0x062, 0x0000D203,
- 0x063, 0x00000062, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D4A0, 0x062, 0x0000D203, 0x063, 0x00000062,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x0005D2A1,
- 0x062, 0x0000D3A2, 0x063, 0x00000062, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x061, 0x0005D4A0, 0x062, 0x0000D203,
- 0x063, 0x00000062, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x061, 0x0005D4A0, 0x062, 0x0000D203, 0x063, 0x00000062,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x0005D4A0,
- 0x062, 0x0000D203, 0x063, 0x00000062, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x061, 0x0005D2A1, 0x062, 0x0000D3A2,
- 0x063, 0x00000062, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D2A1, 0x062, 0x0000D3A2, 0x063, 0x00000062,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x0005D2A1,
- 0x062, 0x0000D3A2, 0x063, 0x00000002, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x061, 0x0005D2A1, 0x062, 0x0000D3A2,
- 0x063, 0x00000002, 0x93011000, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D3D1, 0x062, 0x0000D3A2, 0x063, 0x00000002,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x0005D2A1,
- 0x062, 0x0000D3A2, 0x063, 0x00000062, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x061, 0x0005D3D1, 0x062, 0x0000D3A2,
- 0x063, 0x00000002, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D2A1, 0x062, 0x0000D3A2, 0x063, 0x00000002,
- 0x93001000, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x0005D3D1,
- 0x062, 0x0000D3A2, 0x063, 0x00000002, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x061, 0x0005D2A1, 0x062, 0x0000D3A2,
- 0x063, 0x00000002, 0x90002000, 0x00000000, 0x40000000, 0x00000000,
- 0x061, 0x0005D2A1, 0x062, 0x0000D3A2, 0x063, 0x00000002,
- 0xA0000000, 0x00000000, 0x061, 0x0005D3D0, 0x062, 0x0000D303,
- 0x063, 0x00000002, 0xB0000000, 0x00000000, 0x0EF, 0x00000200,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x030, 0x000004A3,
- 0x030, 0x000014A3, 0x030, 0x000024A3, 0x030, 0x000034A3,
- 0x030, 0x000044A3, 0x030, 0x000054A3, 0x030, 0x000064A3,
- 0x030, 0x000074A3, 0x030, 0x000084A3, 0x030, 0x000094A3,
- 0x030, 0x0000A4A3, 0x030, 0x0000B4A3, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x030, 0x000004A3, 0x030, 0x000014A3,
- 0x030, 0x000024A3, 0x030, 0x000034A3, 0x030, 0x000044A3,
- 0x030, 0x000054A3, 0x030, 0x000064A3, 0x030, 0x000074A3,
- 0x030, 0x000084A3, 0x030, 0x000094A3, 0x030, 0x0000A4A3,
- 0x030, 0x0000B4A3, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x000004A3, 0x030, 0x000014A3, 0x030, 0x000024A3,
- 0x030, 0x000034A3, 0x030, 0x000044A3, 0x030, 0x000054A3,
- 0x030, 0x000064A3, 0x030, 0x000074A3, 0x030, 0x000084A3,
- 0x030, 0x000094A3, 0x030, 0x0000A4A3, 0x030, 0x0000B4A3,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x000002A6,
- 0x030, 0x000012A6, 0x030, 0x000022A6, 0x030, 0x000032A6,
- 0x030, 0x000042A6, 0x030, 0x000052A6, 0x030, 0x000062A6,
- 0x030, 0x000072A6, 0x030, 0x000082A6, 0x030, 0x000092A6,
- 0x030, 0x0000A2A6, 0x030, 0x0000B2A6, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x030, 0x000004A0, 0x030, 0x000014A0,
- 0x030, 0x000024A0, 0x030, 0x000034A0, 0x030, 0x000044A0,
- 0x030, 0x000054A0, 0x030, 0x000064A0, 0x030, 0x000074A0,
- 0x030, 0x000084A0, 0x030, 0x000094A0, 0x030, 0x0000A4A0,
- 0x030, 0x0000B4A0, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x030, 0x000004A0, 0x030, 0x000014A0, 0x030, 0x000024A0,
- 0x030, 0x000034A0, 0x030, 0x000044A0, 0x030, 0x000054A0,
- 0x030, 0x000064A0, 0x030, 0x000074A0, 0x030, 0x000084A0,
- 0x030, 0x000094A0, 0x030, 0x0000A4A0, 0x030, 0x0000B4A0,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x000004A0,
- 0x030, 0x000014A0, 0x030, 0x000024A0, 0x030, 0x000034A0,
- 0x030, 0x000044A0, 0x030, 0x000054A0, 0x030, 0x000064A0,
- 0x030, 0x000074A0, 0x030, 0x000084A0, 0x030, 0x000094A0,
- 0x030, 0x0000A4A0, 0x030, 0x0000B4A0, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x000002A1, 0x030, 0x000012A1,
- 0x030, 0x000022A1, 0x030, 0x000032A1, 0x030, 0x000042A1,
- 0x030, 0x000052A1, 0x030, 0x000062A1, 0x030, 0x000072A1,
- 0x030, 0x000082A1, 0x030, 0x000092A1, 0x030, 0x0000A2A1,
- 0x030, 0x0000B2A1, 0x9300200c, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x000002A6, 0x030, 0x000012A6, 0x030, 0x000022A6,
- 0x030, 0x000032A6, 0x030, 0x000042A6, 0x030, 0x000052A6,
- 0x030, 0x000062A6, 0x030, 0x000072A6, 0x030, 0x000082A6,
- 0x030, 0x000092A6, 0x030, 0x0000A2A6, 0x030, 0x0000B2A6,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x000002F4,
- 0x030, 0x000012F4, 0x030, 0x000022F4, 0x030, 0x000032F4,
- 0x030, 0x00004365, 0x030, 0x00005365, 0x030, 0x00006365,
- 0x030, 0x00007365, 0x030, 0x000082A4, 0x030, 0x000092A4,
- 0x030, 0x0000A2A4, 0x030, 0x0000B2A4, 0x93002100, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x000004A4, 0x030, 0x000014A4,
- 0x030, 0x000024A4, 0x030, 0x000034A4, 0x030, 0x000043A4,
- 0x030, 0x000053A4, 0x030, 0x000063A4, 0x030, 0x000073A4,
- 0x030, 0x000083A5, 0x030, 0x000093A5, 0x030, 0x0000A3A5,
- 0x030, 0x0000B3A5, 0x93011000, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x000003A1, 0x030, 0x000013A1, 0x030, 0x000023A1,
- 0x030, 0x000033A1, 0x030, 0x000043A4, 0x030, 0x000053A4,
- 0x030, 0x000063A4, 0x030, 0x000073A4, 0x030, 0x000083A6,
- 0x030, 0x000093A6, 0x030, 0x0000A3A6, 0x030, 0x0000B3A6,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x000002A1,
- 0x030, 0x000012A1, 0x030, 0x000022A1, 0x030, 0x000032A1,
- 0x030, 0x000042A1, 0x030, 0x000052A1, 0x030, 0x000062A1,
- 0x030, 0x000072A1, 0x030, 0x000082A1, 0x030, 0x000092A1,
- 0x030, 0x0000A2A1, 0x030, 0x0000B2A1, 0x90001004, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x00000382, 0x030, 0x00001382,
- 0x030, 0x00002382, 0x030, 0x00003382, 0x030, 0x00004445,
- 0x030, 0x00005445, 0x030, 0x00006445, 0x030, 0x00007445,
- 0x030, 0x00008425, 0x030, 0x00009425, 0x030, 0x0000A425,
- 0x030, 0x0000B425, 0x93002000, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x00000303, 0x030, 0x00001303, 0x030, 0x00002303,
- 0x030, 0x00003303, 0x030, 0x000043A4, 0x030, 0x000053A4,
- 0x030, 0x000063A4, 0x030, 0x000073A4, 0x030, 0x00008365,
- 0x030, 0x00009365, 0x030, 0x0000A365, 0x030, 0x0000B365,
- 0x93001000, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x000003A1,
- 0x030, 0x000013A1, 0x030, 0x000023A1, 0x030, 0x000033A1,
- 0x030, 0x00004364, 0x030, 0x00005364, 0x030, 0x00006364,
- 0x030, 0x00007364, 0x030, 0x00008564, 0x030, 0x00009564,
- 0x030, 0x0000A564, 0x030, 0x0000B564, 0x90002100, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x000004A1, 0x030, 0x000014A1,
- 0x030, 0x000024A1, 0x030, 0x000034A1, 0x030, 0x000043A1,
- 0x030, 0x000053A1, 0x030, 0x000063A1, 0x030, 0x000073A1,
- 0x030, 0x000083A1, 0x030, 0x000093A1, 0x030, 0x0000A3A1,
- 0x030, 0x0000B3A1, 0x90002000, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x000004A0, 0x030, 0x000014A0, 0x030, 0x000024A0,
- 0x030, 0x000034A0, 0x030, 0x000043A1, 0x030, 0x000053A1,
- 0x030, 0x000063A1, 0x030, 0x000073A1, 0x030, 0x000083A2,
- 0x030, 0x000093A2, 0x030, 0x0000A3A2, 0x030, 0x0000B3A2,
- 0xA0000000, 0x00000000, 0x030, 0x000002D0, 0x030, 0x000012D0,
- 0x030, 0x000022D0, 0x030, 0x000032D0, 0x030, 0x000042D0,
- 0x030, 0x000052D0, 0x030, 0x000062D0, 0x030, 0x000072D0,
- 0x030, 0x000082D0, 0x030, 0x000092D0, 0x030, 0x0000A2D0,
- 0x030, 0x0000B2D0, 0xB0000000, 0x00000000, 0x0EF, 0x00000000,
- 0x0EF, 0x00000080, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x030, 0x00000203, 0x030, 0x00001203, 0x030, 0x00002203,
- 0x030, 0x00003203, 0x030, 0x00004203, 0x030, 0x00005203,
- 0x030, 0x00006203, 0x030, 0x00007203, 0x030, 0x00008203,
- 0x030, 0x00009203, 0x030, 0x0000A203, 0x030, 0x0000B203,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x030, 0x00000203,
- 0x030, 0x00001203, 0x030, 0x00002203, 0x030, 0x00003203,
- 0x030, 0x00004203, 0x030, 0x00005203, 0x030, 0x00006203,
- 0x030, 0x00007203, 0x030, 0x00008203, 0x030, 0x00009203,
- 0x030, 0x0000A203, 0x030, 0x0000B203, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x00000203, 0x030, 0x00001203,
- 0x030, 0x00002203, 0x030, 0x00003203, 0x030, 0x00004203,
- 0x030, 0x00005203, 0x030, 0x00006203, 0x030, 0x00007203,
- 0x030, 0x00008203, 0x030, 0x00009203, 0x030, 0x0000A203,
- 0x030, 0x0000B203, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x000003A2, 0x030, 0x000013A2, 0x030, 0x000023A2,
- 0x030, 0x000033A2, 0x030, 0x000043A2, 0x030, 0x000053A2,
- 0x030, 0x000063A2, 0x030, 0x000073A2, 0x030, 0x000083A2,
- 0x030, 0x000093A2, 0x030, 0x0000A3A2, 0x030, 0x0000B3A2,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x030, 0x00000203,
- 0x030, 0x00001203, 0x030, 0x00002203, 0x030, 0x00003203,
- 0x030, 0x00004203, 0x030, 0x00005203, 0x030, 0x00006203,
- 0x030, 0x00007203, 0x030, 0x00008203, 0x030, 0x00009203,
- 0x030, 0x0000A203, 0x030, 0x0000B203, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x030, 0x00000203, 0x030, 0x00001203,
- 0x030, 0x00002203, 0x030, 0x00003203, 0x030, 0x00004203,
- 0x030, 0x00005203, 0x030, 0x00006203, 0x030, 0x00007203,
- 0x030, 0x00008203, 0x030, 0x00009203, 0x030, 0x0000A203,
- 0x030, 0x0000B203, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x00000203, 0x030, 0x00001203, 0x030, 0x00002203,
- 0x030, 0x00003203, 0x030, 0x00004203, 0x030, 0x00005203,
- 0x030, 0x00006203, 0x030, 0x00007203, 0x030, 0x00008203,
- 0x030, 0x00009203, 0x030, 0x0000A203, 0x030, 0x0000B203,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x000003A2,
- 0x030, 0x000013A2, 0x030, 0x000023A2, 0x030, 0x000033A2,
- 0x030, 0x000043A2, 0x030, 0x000053A2, 0x030, 0x000063A2,
- 0x030, 0x000073A2, 0x030, 0x000083A2, 0x030, 0x000093A2,
- 0x030, 0x0000A3A2, 0x030, 0x0000B3A2, 0x9300200c, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x93012100, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x000003A3, 0x030, 0x000013A3, 0x030, 0x000023A3,
- 0x030, 0x000033A3, 0x030, 0x000043A4, 0x030, 0x000053A4,
- 0x030, 0x000063A4, 0x030, 0x000073A4, 0x030, 0x000083A3,
- 0x030, 0x000093A3, 0x030, 0x0000A3A3, 0x030, 0x0000B3A3,
- 0x93002100, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x000003A2,
- 0x030, 0x000013A2, 0x030, 0x000023A2, 0x030, 0x000033A2,
- 0x030, 0x000043A2, 0x030, 0x000053A2, 0x030, 0x000063A2,
- 0x030, 0x000073A2, 0x030, 0x000083A2, 0x030, 0x000093A2,
- 0x030, 0x0000A3A2, 0x030, 0x0000B3A2, 0x93011000, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x9000200c, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x000003A2, 0x030, 0x000013A2, 0x030, 0x000023A2,
- 0x030, 0x000033A2, 0x030, 0x000043A2, 0x030, 0x000053A2,
- 0x030, 0x000063A2, 0x030, 0x000073A2, 0x030, 0x000083A2,
- 0x030, 0x000093A2, 0x030, 0x0000A3A2, 0x030, 0x0000B3A2,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x000003A2,
- 0x030, 0x000013A2, 0x030, 0x000023A2, 0x030, 0x000033A2,
- 0x030, 0x000043A2, 0x030, 0x000053A2, 0x030, 0x000063A2,
- 0x030, 0x000073A2, 0x030, 0x000083A2, 0x030, 0x000093A2,
- 0x030, 0x0000A3A2, 0x030, 0x0000B3A2, 0x93002000, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0x93001000, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x000003A2, 0x030, 0x000013A2, 0x030, 0x000023A2,
- 0x030, 0x000033A2, 0x030, 0x000043A2, 0x030, 0x000053A2,
- 0x030, 0x000063A2, 0x030, 0x000073A2, 0x030, 0x000083A2,
- 0x030, 0x000093A2, 0x030, 0x0000A3A2, 0x030, 0x0000B3A2,
- 0x90002100, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x000003A2,
- 0x030, 0x000013A2, 0x030, 0x000023A2, 0x030, 0x000033A2,
- 0x030, 0x000043A2, 0x030, 0x000053A2, 0x030, 0x000063A2,
- 0x030, 0x000073A2, 0x030, 0x000083A2, 0x030, 0x000093A2,
- 0x030, 0x0000A3A2, 0x030, 0x0000B3A2, 0x90002000, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x000003A2, 0x030, 0x000013A2,
- 0x030, 0x000023A2, 0x030, 0x000033A2, 0x030, 0x000043A2,
- 0x030, 0x000053A2, 0x030, 0x000063A2, 0x030, 0x000073A2,
- 0x030, 0x000083A2, 0x030, 0x000093A2, 0x030, 0x0000A3A2,
- 0x030, 0x0000B3A2, 0xA0000000, 0x00000000, 0x030, 0x000003A2,
- 0x030, 0x000013A2, 0x030, 0x000023A2, 0x030, 0x000033A2,
- 0x030, 0x000043A2, 0x030, 0x000053A2, 0x030, 0x000063A2,
- 0x030, 0x000073A2, 0x030, 0x000083A2, 0x030, 0x000093A2,
- 0x030, 0x0000A3A2, 0x030, 0x0000B3A2, 0xB0000000, 0x00000000,
- 0x0EF, 0x00000000, 0x0EF, 0x00000040, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x030, 0x00000645, 0x030, 0x00001333,
- 0x030, 0x00002011, 0x030, 0x00004000, 0x030, 0x00005000,
- 0x030, 0x00006000, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x030, 0x00000645, 0x030, 0x00001333, 0x030, 0x00002011,
- 0x030, 0x00004000, 0x030, 0x00005000, 0x030, 0x00006000,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x00000645, 0x030, 0x00001333,
- 0x030, 0x00002011, 0x030, 0x00004777, 0x030, 0x00005777,
- 0x030, 0x00006777, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x030, 0x00000645, 0x030, 0x00001333, 0x030, 0x00002011,
- 0x030, 0x00004000, 0x030, 0x00005000, 0x030, 0x00006000,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x00000645, 0x030, 0x00001333,
- 0x030, 0x00002011, 0x030, 0x00004000, 0x030, 0x00005000,
- 0x030, 0x00006000, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x00000645, 0x030, 0x00001333, 0x030, 0x00002011,
- 0x030, 0x00004000, 0x030, 0x00005000, 0x030, 0x00006000,
- 0x9300200c, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x00000645,
- 0x030, 0x00001333, 0x030, 0x00002011, 0x030, 0x00004777,
- 0x030, 0x00005777, 0x030, 0x00006777, 0x93012100, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x00000660, 0x030, 0x00001341,
- 0x030, 0x00002220, 0x030, 0x00004777, 0x030, 0x00005777,
- 0x030, 0x00006777, 0x93002100, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x00000764, 0x030, 0x00001452, 0x030, 0x00002220,
- 0x030, 0x00004777, 0x030, 0x00005777, 0x030, 0x00006777,
- 0x93011000, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x00000764,
- 0x030, 0x00001632, 0x030, 0x00002421, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0x9000200c, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x00000645, 0x030, 0x00001333,
- 0x030, 0x00002011, 0x030, 0x00004000, 0x030, 0x00005000,
- 0x030, 0x00006000, 0x90001004, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x00000764, 0x030, 0x00001632, 0x030, 0x00002421,
- 0x030, 0x00004000, 0x030, 0x00005000, 0x030, 0x00006000,
- 0x93002000, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x00000777,
- 0x030, 0x00001442, 0x030, 0x00002222, 0x030, 0x00004777,
- 0x030, 0x00005777, 0x030, 0x00006777, 0x93001000, 0x00000000,
- 0x40000000, 0x00000000, 0x030, 0x00000764, 0x030, 0x00001632,
- 0x030, 0x00002421, 0x030, 0x00004000, 0x030, 0x00005000,
- 0x030, 0x00006000, 0x90002100, 0x00000000, 0x40000000, 0x00000000,
- 0x030, 0x00000775, 0x030, 0x00001222, 0x030, 0x00002210,
- 0x030, 0x00004000, 0x030, 0x00005000, 0x030, 0x00006000,
- 0x90002000, 0x00000000, 0x40000000, 0x00000000, 0x030, 0x00000775,
- 0x030, 0x00001422, 0x030, 0x00002210, 0x030, 0x00004000,
- 0x030, 0x00005000, 0x030, 0x00006000, 0xA0000000, 0x00000000,
- 0x030, 0x00000764, 0x030, 0x00001632, 0x030, 0x00002421,
- 0x030, 0x00004000, 0x030, 0x00005000, 0x030, 0x00006000,
- 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x00000800,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000007, 0x033, 0x00000021, 0x03F, 0x0000000A,
- 0x033, 0x00000022, 0x03F, 0x0000000D, 0x033, 0x00000023,
- 0x03F, 0x0000002A, 0x033, 0x00000024, 0x03F, 0x0000002D,
- 0x033, 0x00000025, 0x03F, 0x00000030, 0x033, 0x00000026,
- 0x03F, 0x0000006D, 0x033, 0x00000027, 0x03F, 0x00000070,
- 0x033, 0x00000028, 0x03F, 0x000000ED, 0x033, 0x00000029,
- 0x03F, 0x000000F0, 0x033, 0x0000002A, 0x03F, 0x000000F3,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000007, 0x033, 0x00000021, 0x03F, 0x0000000A,
- 0x033, 0x00000022, 0x03F, 0x0000000D, 0x033, 0x00000023,
- 0x03F, 0x0000002A, 0x033, 0x00000024, 0x03F, 0x0000002D,
- 0x033, 0x00000025, 0x03F, 0x00000030, 0x033, 0x00000026,
- 0x03F, 0x0000006D, 0x033, 0x00000027, 0x03F, 0x00000070,
- 0x033, 0x00000028, 0x03F, 0x000000ED, 0x033, 0x00000029,
- 0x03F, 0x000000F0, 0x033, 0x0000002A, 0x03F, 0x000000F3,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000007, 0x033, 0x00000021, 0x03F, 0x0000000A,
- 0x033, 0x00000022, 0x03F, 0x0000000D, 0x033, 0x00000023,
- 0x03F, 0x0000002A, 0x033, 0x00000024, 0x03F, 0x0000002D,
- 0x033, 0x00000025, 0x03F, 0x00000030, 0x033, 0x00000026,
- 0x03F, 0x0000006D, 0x033, 0x00000027, 0x03F, 0x00000070,
- 0x033, 0x00000028, 0x03F, 0x000000ED, 0x033, 0x00000029,
- 0x03F, 0x000000F0, 0x033, 0x0000002A, 0x03F, 0x000000F3,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000005, 0x033, 0x00000021, 0x03F, 0x00000008,
- 0x033, 0x00000022, 0x03F, 0x0000000B, 0x033, 0x00000023,
- 0x03F, 0x0000000E, 0x033, 0x00000024, 0x03F, 0x0000002B,
- 0x033, 0x00000025, 0x03F, 0x00000068, 0x033, 0x00000026,
- 0x03F, 0x0000006B, 0x033, 0x00000027, 0x03F, 0x0000006E,
- 0x033, 0x00000028, 0x03F, 0x00000071, 0x033, 0x00000029,
- 0x03F, 0x00000074, 0x033, 0x0000002A, 0x03F, 0x00000077,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000007, 0x033, 0x00000021, 0x03F, 0x0000000A,
- 0x033, 0x00000022, 0x03F, 0x0000000D, 0x033, 0x00000023,
- 0x03F, 0x0000002A, 0x033, 0x00000024, 0x03F, 0x0000002D,
- 0x033, 0x00000025, 0x03F, 0x00000030, 0x033, 0x00000026,
- 0x03F, 0x0000006D, 0x033, 0x00000027, 0x03F, 0x00000070,
- 0x033, 0x00000028, 0x03F, 0x000000ED, 0x033, 0x00000029,
- 0x03F, 0x000000F0, 0x033, 0x0000002A, 0x03F, 0x000000F3,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000007, 0x033, 0x00000021, 0x03F, 0x0000000A,
- 0x033, 0x00000022, 0x03F, 0x0000000D, 0x033, 0x00000023,
- 0x03F, 0x0000002A, 0x033, 0x00000024, 0x03F, 0x0000002D,
- 0x033, 0x00000025, 0x03F, 0x00000030, 0x033, 0x00000026,
- 0x03F, 0x0000006D, 0x033, 0x00000027, 0x03F, 0x00000070,
- 0x033, 0x00000028, 0x03F, 0x000000ED, 0x033, 0x00000029,
- 0x03F, 0x000000F0, 0x033, 0x0000002A, 0x03F, 0x000000F3,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000007, 0x033, 0x00000021, 0x03F, 0x0000000A,
- 0x033, 0x00000022, 0x03F, 0x0000000D, 0x033, 0x00000023,
- 0x03F, 0x0000002A, 0x033, 0x00000024, 0x03F, 0x0000002D,
- 0x033, 0x00000025, 0x03F, 0x00000030, 0x033, 0x00000026,
- 0x03F, 0x0000006D, 0x033, 0x00000027, 0x03F, 0x00000070,
- 0x033, 0x00000028, 0x03F, 0x000000ED, 0x033, 0x00000029,
- 0x03F, 0x000000F0, 0x033, 0x0000002A, 0x03F, 0x000000F3,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000005, 0x033, 0x00000021, 0x03F, 0x00000008,
- 0x033, 0x00000022, 0x03F, 0x0000000B, 0x033, 0x00000023,
- 0x03F, 0x0000000E, 0x033, 0x00000024, 0x03F, 0x0000002B,
- 0x033, 0x00000025, 0x03F, 0x00000068, 0x033, 0x00000026,
- 0x03F, 0x0000006B, 0x033, 0x00000027, 0x03F, 0x0000006E,
- 0x033, 0x00000028, 0x03F, 0x00000071, 0x033, 0x00000029,
- 0x03F, 0x00000074, 0x033, 0x0000002A, 0x03F, 0x00000077,
- 0x9300200c, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000005, 0x033, 0x00000021, 0x03F, 0x00000008,
- 0x033, 0x00000022, 0x03F, 0x0000000B, 0x033, 0x00000023,
- 0x03F, 0x0000000E, 0x033, 0x00000024, 0x03F, 0x0000002B,
- 0x033, 0x00000025, 0x03F, 0x00000068, 0x033, 0x00000026,
- 0x03F, 0x0000006B, 0x033, 0x00000027, 0x03F, 0x0000006E,
- 0x033, 0x00000028, 0x03F, 0x00000071, 0x033, 0x00000029,
- 0x03F, 0x00000074, 0x033, 0x0000002A, 0x03F, 0x00000077,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000828, 0x033, 0x00000021, 0x03F, 0x0000082B,
- 0x033, 0x00000022, 0x03F, 0x00000868, 0x033, 0x00000023,
- 0x03F, 0x00000889, 0x033, 0x00000024, 0x03F, 0x000008AA,
- 0x033, 0x00000025, 0x03F, 0x00000CE8, 0x033, 0x00000026,
- 0x03F, 0x00000CEB, 0x033, 0x00000027, 0x03F, 0x00000CEE,
- 0x033, 0x00000028, 0x03F, 0x00000CF1, 0x033, 0x00000029,
- 0x03F, 0x00000CF4, 0x033, 0x0000002A, 0x03F, 0x00000CF7,
- 0x93002100, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x0000042A, 0x033, 0x00000021, 0x03F, 0x00000829,
- 0x033, 0x00000022, 0x03F, 0x00000848, 0x033, 0x00000023,
- 0x03F, 0x0000084B, 0x033, 0x00000024, 0x03F, 0x00000C4C,
- 0x033, 0x00000025, 0x03F, 0x00000C8B, 0x033, 0x00000026,
- 0x03F, 0x00000CEA, 0x033, 0x00000027, 0x03F, 0x00000CED,
- 0x033, 0x00000028, 0x03F, 0x00000CF0, 0x033, 0x00000029,
- 0x03F, 0x00000CF3, 0x033, 0x0000002A, 0x03F, 0x00000CF6,
- 0x93011000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000C09, 0x033, 0x00000021, 0x03F, 0x00000C0C,
- 0x033, 0x00000022, 0x03F, 0x00000C0F, 0x033, 0x00000023,
- 0x03F, 0x00000C2C, 0x033, 0x00000024, 0x03F, 0x00000C2F,
- 0x033, 0x00000025, 0x03F, 0x00000C8A, 0x033, 0x00000026,
- 0x03F, 0x00000C8D, 0x033, 0x00000027, 0x03F, 0x00000C90,
- 0x033, 0x00000028, 0x03F, 0x00000CD0, 0x033, 0x00000029,
- 0x03F, 0x00000CF2, 0x033, 0x0000002A, 0x03F, 0x00000CF5,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000005, 0x033, 0x00000021, 0x03F, 0x00000008,
- 0x033, 0x00000022, 0x03F, 0x0000000B, 0x033, 0x00000023,
- 0x03F, 0x0000000E, 0x033, 0x00000024, 0x03F, 0x0000002B,
- 0x033, 0x00000025, 0x03F, 0x00000068, 0x033, 0x00000026,
- 0x03F, 0x0000006B, 0x033, 0x00000027, 0x03F, 0x0000006E,
- 0x033, 0x00000028, 0x03F, 0x00000071, 0x033, 0x00000029,
- 0x03F, 0x00000074, 0x033, 0x0000002A, 0x03F, 0x00000077,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000C09, 0x033, 0x00000021, 0x03F, 0x00000C0C,
- 0x033, 0x00000022, 0x03F, 0x00000C0F, 0x033, 0x00000023,
- 0x03F, 0x00000C2C, 0x033, 0x00000024, 0x03F, 0x00000C2F,
- 0x033, 0x00000025, 0x03F, 0x00000C8A, 0x033, 0x00000026,
- 0x03F, 0x00000C8D, 0x033, 0x00000027, 0x03F, 0x00000C90,
- 0x033, 0x00000028, 0x03F, 0x00000CD0, 0x033, 0x00000029,
- 0x03F, 0x00000CF2, 0x033, 0x0000002A, 0x03F, 0x00000CF5,
- 0x93002000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000429, 0x033, 0x00000021, 0x03F, 0x00000828,
- 0x033, 0x00000022, 0x03F, 0x00000847, 0x033, 0x00000023,
- 0x03F, 0x0000084A, 0x033, 0x00000024, 0x03F, 0x00000C4B,
- 0x033, 0x00000025, 0x03F, 0x00000C8A, 0x033, 0x00000026,
- 0x03F, 0x00000CEA, 0x033, 0x00000027, 0x03F, 0x00000CED,
- 0x033, 0x00000028, 0x03F, 0x00000CF0, 0x033, 0x00000029,
- 0x03F, 0x00000CF3, 0x033, 0x0000002A, 0x03F, 0x00000CF6,
- 0x93001000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x00000C09, 0x033, 0x00000021, 0x03F, 0x00000C0C,
- 0x033, 0x00000022, 0x03F, 0x00000C0F, 0x033, 0x00000023,
- 0x03F, 0x00000C2C, 0x033, 0x00000024, 0x03F, 0x00000C2F,
- 0x033, 0x00000025, 0x03F, 0x00000C8A, 0x033, 0x00000026,
- 0x03F, 0x00000C8D, 0x033, 0x00000027, 0x03F, 0x00000C90,
- 0x033, 0x00000028, 0x03F, 0x00000CD0, 0x033, 0x00000029,
- 0x03F, 0x00000CF2, 0x033, 0x0000002A, 0x03F, 0x00000CF5,
- 0x90002100, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x0000042B, 0x033, 0x00000021, 0x03F, 0x0000082A,
- 0x033, 0x00000022, 0x03F, 0x00000849, 0x033, 0x00000023,
- 0x03F, 0x0000084C, 0x033, 0x00000024, 0x03F, 0x00000C4C,
- 0x033, 0x00000025, 0x03F, 0x00000C8A, 0x033, 0x00000026,
- 0x03F, 0x00000C8D, 0x033, 0x00000027, 0x03F, 0x00000CEB,
- 0x033, 0x00000028, 0x03F, 0x00000CEE, 0x033, 0x00000029,
- 0x03F, 0x00000CF1, 0x033, 0x0000002A, 0x03F, 0x00000CF4,
- 0x90002000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020,
- 0x03F, 0x0000042B, 0x033, 0x00000021, 0x03F, 0x0000082A,
- 0x033, 0x00000022, 0x03F, 0x00000849, 0x033, 0x00000023,
- 0x03F, 0x0000084C, 0x033, 0x00000024, 0x03F, 0x00000C4C,
- 0x033, 0x00000025, 0x03F, 0x00000C8A, 0x033, 0x00000026,
- 0x03F, 0x00000C8D, 0x033, 0x00000027, 0x03F, 0x00000CEB,
- 0x033, 0x00000028, 0x03F, 0x00000CEE, 0x033, 0x00000029,
- 0x03F, 0x00000CF1, 0x033, 0x0000002A, 0x03F, 0x00000CF4,
- 0xA0000000, 0x00000000, 0x033, 0x00000020, 0x03F, 0x00000C09,
- 0x033, 0x00000021, 0x03F, 0x00000C0C, 0x033, 0x00000022,
- 0x03F, 0x00000C0F, 0x033, 0x00000023, 0x03F, 0x00000C2C,
- 0x033, 0x00000024, 0x03F, 0x00000C2F, 0x033, 0x00000025,
- 0x03F, 0x00000C8A, 0x033, 0x00000026, 0x03F, 0x00000C8D,
- 0x033, 0x00000027, 0x03F, 0x00000C90, 0x033, 0x00000028,
- 0x03F, 0x00000CD0, 0x033, 0x00000029, 0x03F, 0x00000CF2,
- 0x033, 0x0000002A, 0x03F, 0x00000CF5, 0xB0000000, 0x00000000,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000007, 0x033, 0x00000061, 0x03F, 0x0000000A,
- 0x033, 0x00000062, 0x03F, 0x0000000D, 0x033, 0x00000063,
- 0x03F, 0x0000002A, 0x033, 0x00000064, 0x03F, 0x0000002D,
- 0x033, 0x00000065, 0x03F, 0x00000030, 0x033, 0x00000066,
- 0x03F, 0x0000006D, 0x033, 0x00000067, 0x03F, 0x00000070,
- 0x033, 0x00000068, 0x03F, 0x000000ED, 0x033, 0x00000069,
- 0x03F, 0x000000F0, 0x033, 0x0000006A, 0x03F, 0x000000F3,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000007, 0x033, 0x00000061, 0x03F, 0x0000000A,
- 0x033, 0x00000062, 0x03F, 0x0000000D, 0x033, 0x00000063,
- 0x03F, 0x0000002A, 0x033, 0x00000064, 0x03F, 0x0000002D,
- 0x033, 0x00000065, 0x03F, 0x00000030, 0x033, 0x00000066,
- 0x03F, 0x0000006D, 0x033, 0x00000067, 0x03F, 0x00000070,
- 0x033, 0x00000068, 0x03F, 0x000000ED, 0x033, 0x00000069,
- 0x03F, 0x000000F0, 0x033, 0x0000006A, 0x03F, 0x000000F3,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000007, 0x033, 0x00000061, 0x03F, 0x0000000A,
- 0x033, 0x00000062, 0x03F, 0x0000000D, 0x033, 0x00000063,
- 0x03F, 0x0000002A, 0x033, 0x00000064, 0x03F, 0x0000002D,
- 0x033, 0x00000065, 0x03F, 0x00000030, 0x033, 0x00000066,
- 0x03F, 0x0000006D, 0x033, 0x00000067, 0x03F, 0x00000070,
- 0x033, 0x00000068, 0x03F, 0x000000ED, 0x033, 0x00000069,
- 0x03F, 0x000000F0, 0x033, 0x0000006A, 0x03F, 0x000000F3,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000005, 0x033, 0x00000061, 0x03F, 0x00000008,
- 0x033, 0x00000062, 0x03F, 0x0000000B, 0x033, 0x00000063,
- 0x03F, 0x0000000E, 0x033, 0x00000064, 0x03F, 0x0000002B,
- 0x033, 0x00000065, 0x03F, 0x00000068, 0x033, 0x00000066,
- 0x03F, 0x0000006B, 0x033, 0x00000067, 0x03F, 0x0000006E,
- 0x033, 0x00000068, 0x03F, 0x00000071, 0x033, 0x00000069,
- 0x03F, 0x00000074, 0x033, 0x0000006A, 0x03F, 0x00000077,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000007, 0x033, 0x00000061, 0x03F, 0x0000000A,
- 0x033, 0x00000062, 0x03F, 0x0000000D, 0x033, 0x00000063,
- 0x03F, 0x0000002A, 0x033, 0x00000064, 0x03F, 0x0000002D,
- 0x033, 0x00000065, 0x03F, 0x00000030, 0x033, 0x00000066,
- 0x03F, 0x0000006D, 0x033, 0x00000067, 0x03F, 0x00000070,
- 0x033, 0x00000068, 0x03F, 0x000000ED, 0x033, 0x00000069,
- 0x03F, 0x000000F0, 0x033, 0x0000006A, 0x03F, 0x000000F3,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000007, 0x033, 0x00000061, 0x03F, 0x0000000A,
- 0x033, 0x00000062, 0x03F, 0x0000000D, 0x033, 0x00000063,
- 0x03F, 0x0000002A, 0x033, 0x00000064, 0x03F, 0x0000002D,
- 0x033, 0x00000065, 0x03F, 0x00000030, 0x033, 0x00000066,
- 0x03F, 0x0000006D, 0x033, 0x00000067, 0x03F, 0x00000070,
- 0x033, 0x00000068, 0x03F, 0x000000ED, 0x033, 0x00000069,
- 0x03F, 0x000000F0, 0x033, 0x0000006A, 0x03F, 0x000000F3,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000007, 0x033, 0x00000061, 0x03F, 0x0000000A,
- 0x033, 0x00000062, 0x03F, 0x0000000D, 0x033, 0x00000063,
- 0x03F, 0x0000002A, 0x033, 0x00000064, 0x03F, 0x0000002D,
- 0x033, 0x00000065, 0x03F, 0x00000030, 0x033, 0x00000066,
- 0x03F, 0x0000006D, 0x033, 0x00000067, 0x03F, 0x00000070,
- 0x033, 0x00000068, 0x03F, 0x000000ED, 0x033, 0x00000069,
- 0x03F, 0x000000F0, 0x033, 0x0000006A, 0x03F, 0x000000F3,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000005, 0x033, 0x00000061, 0x03F, 0x00000008,
- 0x033, 0x00000062, 0x03F, 0x0000000B, 0x033, 0x00000063,
- 0x03F, 0x0000000E, 0x033, 0x00000064, 0x03F, 0x0000002B,
- 0x033, 0x00000065, 0x03F, 0x00000068, 0x033, 0x00000066,
- 0x03F, 0x0000006B, 0x033, 0x00000067, 0x03F, 0x0000006E,
- 0x033, 0x00000068, 0x03F, 0x00000071, 0x033, 0x00000069,
- 0x03F, 0x00000074, 0x033, 0x0000006A, 0x03F, 0x00000077,
- 0x9300200c, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000005, 0x033, 0x00000061, 0x03F, 0x00000008,
- 0x033, 0x00000062, 0x03F, 0x0000000B, 0x033, 0x00000063,
- 0x03F, 0x0000000E, 0x033, 0x00000064, 0x03F, 0x0000002B,
- 0x033, 0x00000065, 0x03F, 0x00000068, 0x033, 0x00000066,
- 0x03F, 0x0000006B, 0x033, 0x00000067, 0x03F, 0x0000006E,
- 0x033, 0x00000068, 0x03F, 0x00000071, 0x033, 0x00000069,
- 0x03F, 0x00000074, 0x033, 0x0000006A, 0x03F, 0x00000077,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000842, 0x033, 0x00000061, 0x03F, 0x00000845,
- 0x033, 0x00000062, 0x03F, 0x00000866, 0x033, 0x00000063,
- 0x03F, 0x000008A6, 0x033, 0x00000064, 0x03F, 0x000008C8,
- 0x033, 0x00000065, 0x03F, 0x00000CE8, 0x033, 0x00000066,
- 0x03F, 0x00000CEB, 0x033, 0x00000067, 0x03F, 0x00000CEE,
- 0x033, 0x00000068, 0x03F, 0x00000CF1, 0x033, 0x00000069,
- 0x03F, 0x00000CF4, 0x033, 0x0000006A, 0x03F, 0x00000CF7,
- 0x93002100, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x0000042A, 0x033, 0x00000061, 0x03F, 0x00000829,
- 0x033, 0x00000062, 0x03F, 0x00000848, 0x033, 0x00000063,
- 0x03F, 0x0000084B, 0x033, 0x00000064, 0x03F, 0x00000C69,
- 0x033, 0x00000065, 0x03F, 0x00000CA9, 0x033, 0x00000066,
- 0x03F, 0x00000CEA, 0x033, 0x00000067, 0x03F, 0x00000CED,
- 0x033, 0x00000068, 0x03F, 0x00000CF0, 0x033, 0x00000069,
- 0x03F, 0x00000CF3, 0x033, 0x0000006A, 0x03F, 0x00000CF6,
- 0x93011000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000C0A, 0x033, 0x00000061, 0x03F, 0x00000C0D,
- 0x033, 0x00000062, 0x03F, 0x00000C2A, 0x033, 0x00000063,
- 0x03F, 0x00000C2D, 0x033, 0x00000064, 0x03F, 0x00000C6A,
- 0x033, 0x00000065, 0x03F, 0x00000CAA, 0x033, 0x00000066,
- 0x03F, 0x00000CAD, 0x033, 0x00000067, 0x03F, 0x00000CB0,
- 0x033, 0x00000068, 0x03F, 0x00000CF1, 0x033, 0x00000069,
- 0x03F, 0x00000CF4, 0x033, 0x0000006A, 0x03F, 0x00000CF7,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000005, 0x033, 0x00000061, 0x03F, 0x00000008,
- 0x033, 0x00000062, 0x03F, 0x0000000B, 0x033, 0x00000063,
- 0x03F, 0x0000000E, 0x033, 0x00000064, 0x03F, 0x0000002B,
- 0x033, 0x00000065, 0x03F, 0x00000068, 0x033, 0x00000066,
- 0x03F, 0x0000006B, 0x033, 0x00000067, 0x03F, 0x0000006E,
- 0x033, 0x00000068, 0x03F, 0x00000071, 0x033, 0x00000069,
- 0x03F, 0x00000074, 0x033, 0x0000006A, 0x03F, 0x00000077,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000C0A, 0x033, 0x00000061, 0x03F, 0x00000C0D,
- 0x033, 0x00000062, 0x03F, 0x00000C2A, 0x033, 0x00000063,
- 0x03F, 0x00000C2D, 0x033, 0x00000064, 0x03F, 0x00000C6A,
- 0x033, 0x00000065, 0x03F, 0x00000CAA, 0x033, 0x00000066,
- 0x03F, 0x00000CAD, 0x033, 0x00000067, 0x03F, 0x00000CB0,
- 0x033, 0x00000068, 0x03F, 0x00000CF1, 0x033, 0x00000069,
- 0x03F, 0x00000CF4, 0x033, 0x0000006A, 0x03F, 0x00000CF7,
- 0x93002000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000429, 0x033, 0x00000061, 0x03F, 0x00000828,
- 0x033, 0x00000062, 0x03F, 0x00000847, 0x033, 0x00000063,
- 0x03F, 0x0000084A, 0x033, 0x00000064, 0x03F, 0x00000C4B,
- 0x033, 0x00000065, 0x03F, 0x00000C8A, 0x033, 0x00000066,
- 0x03F, 0x00000CEA, 0x033, 0x00000067, 0x03F, 0x00000CED,
- 0x033, 0x00000068, 0x03F, 0x00000CF0, 0x033, 0x00000069,
- 0x03F, 0x00000CF3, 0x033, 0x0000006A, 0x03F, 0x00000CF6,
- 0x93001000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x00000C0A, 0x033, 0x00000061, 0x03F, 0x00000C0D,
- 0x033, 0x00000062, 0x03F, 0x00000C2A, 0x033, 0x00000063,
- 0x03F, 0x00000C2D, 0x033, 0x00000064, 0x03F, 0x00000C6A,
- 0x033, 0x00000065, 0x03F, 0x00000CAA, 0x033, 0x00000066,
- 0x03F, 0x00000CAD, 0x033, 0x00000067, 0x03F, 0x00000CB0,
- 0x033, 0x00000068, 0x03F, 0x00000CF1, 0x033, 0x00000069,
- 0x03F, 0x00000CF4, 0x033, 0x0000006A, 0x03F, 0x00000CF7,
- 0x90002100, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x0000042C, 0x033, 0x00000061, 0x03F, 0x0000082B,
- 0x033, 0x00000062, 0x03F, 0x0000084A, 0x033, 0x00000063,
- 0x03F, 0x0000084D, 0x033, 0x00000064, 0x03F, 0x00000C4E,
- 0x033, 0x00000065, 0x03F, 0x00000C8C, 0x033, 0x00000066,
- 0x03F, 0x00000C8F, 0x033, 0x00000067, 0x03F, 0x00000CEC,
- 0x033, 0x00000068, 0x03F, 0x00000CEF, 0x033, 0x00000069,
- 0x03F, 0x00000CF2, 0x033, 0x0000006A, 0x03F, 0x00000CF5,
- 0x90002000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060,
- 0x03F, 0x0000042C, 0x033, 0x00000061, 0x03F, 0x0000082B,
- 0x033, 0x00000062, 0x03F, 0x0000084A, 0x033, 0x00000063,
- 0x03F, 0x0000084D, 0x033, 0x00000064, 0x03F, 0x00000C4E,
- 0x033, 0x00000065, 0x03F, 0x00000C8C, 0x033, 0x00000066,
- 0x03F, 0x00000C8F, 0x033, 0x00000067, 0x03F, 0x00000CEC,
- 0x033, 0x00000068, 0x03F, 0x00000CEF, 0x033, 0x00000069,
- 0x03F, 0x00000CF2, 0x033, 0x0000006A, 0x03F, 0x00000CF5,
- 0xA0000000, 0x00000000, 0x033, 0x00000060, 0x03F, 0x00000C0A,
- 0x033, 0x00000061, 0x03F, 0x00000C0D, 0x033, 0x00000062,
- 0x03F, 0x00000C2A, 0x033, 0x00000063, 0x03F, 0x00000C2D,
- 0x033, 0x00000064, 0x03F, 0x00000C6A, 0x033, 0x00000065,
- 0x03F, 0x00000CAA, 0x033, 0x00000066, 0x03F, 0x00000CAD,
- 0x033, 0x00000067, 0x03F, 0x00000CB0, 0x033, 0x00000068,
- 0x03F, 0x00000CF1, 0x033, 0x00000069, 0x03F, 0x00000CF4,
- 0x033, 0x0000006A, 0x03F, 0x00000CF7, 0xB0000000, 0x00000000,
- 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000007, 0x033, 0x000000A1, 0x03F, 0x0000000A,
- 0x033, 0x000000A2, 0x03F, 0x0000000D, 0x033, 0x000000A3,
- 0x03F, 0x0000002A, 0x033, 0x000000A4, 0x03F, 0x0000002D,
- 0x033, 0x000000A5, 0x03F, 0x00000030, 0x033, 0x000000A6,
- 0x03F, 0x0000006D, 0x033, 0x000000A7, 0x03F, 0x00000070,
- 0x033, 0x000000A8, 0x03F, 0x000000ED, 0x033, 0x000000A9,
- 0x03F, 0x000000F0, 0x033, 0x000000AA, 0x03F, 0x000000F3,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000007, 0x033, 0x000000A1, 0x03F, 0x0000000A,
- 0x033, 0x000000A2, 0x03F, 0x0000000D, 0x033, 0x000000A3,
- 0x03F, 0x0000002A, 0x033, 0x000000A4, 0x03F, 0x0000002D,
- 0x033, 0x000000A5, 0x03F, 0x00000030, 0x033, 0x000000A6,
- 0x03F, 0x0000006D, 0x033, 0x000000A7, 0x03F, 0x00000070,
- 0x033, 0x000000A8, 0x03F, 0x000000ED, 0x033, 0x000000A9,
- 0x03F, 0x000000F0, 0x033, 0x000000AA, 0x03F, 0x000000F3,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000007, 0x033, 0x000000A1, 0x03F, 0x0000000A,
- 0x033, 0x000000A2, 0x03F, 0x0000000D, 0x033, 0x000000A3,
- 0x03F, 0x0000002A, 0x033, 0x000000A4, 0x03F, 0x0000002D,
- 0x033, 0x000000A5, 0x03F, 0x00000030, 0x033, 0x000000A6,
- 0x03F, 0x0000006D, 0x033, 0x000000A7, 0x03F, 0x00000070,
- 0x033, 0x000000A8, 0x03F, 0x000000ED, 0x033, 0x000000A9,
- 0x03F, 0x000000F0, 0x033, 0x000000AA, 0x03F, 0x000000F3,
- 0x9300200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000005, 0x033, 0x000000A1, 0x03F, 0x00000008,
- 0x033, 0x000000A2, 0x03F, 0x0000000B, 0x033, 0x000000A3,
- 0x03F, 0x0000000E, 0x033, 0x000000A4, 0x03F, 0x00000047,
- 0x033, 0x000000A5, 0x03F, 0x0000004A, 0x033, 0x000000A6,
- 0x03F, 0x0000004D, 0x033, 0x000000A7, 0x03F, 0x00000050,
- 0x033, 0x000000A8, 0x03F, 0x00000053, 0x033, 0x000000A9,
- 0x03F, 0x00000056, 0x033, 0x000000AA, 0x03F, 0x00000094,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000007, 0x033, 0x000000A1, 0x03F, 0x0000000A,
- 0x033, 0x000000A2, 0x03F, 0x0000000D, 0x033, 0x000000A3,
- 0x03F, 0x0000002A, 0x033, 0x000000A4, 0x03F, 0x0000002D,
- 0x033, 0x000000A5, 0x03F, 0x00000030, 0x033, 0x000000A6,
- 0x03F, 0x0000006D, 0x033, 0x000000A7, 0x03F, 0x00000070,
- 0x033, 0x000000A8, 0x03F, 0x000000ED, 0x033, 0x000000A9,
- 0x03F, 0x000000F0, 0x033, 0x000000AA, 0x03F, 0x000000F3,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000007, 0x033, 0x000000A1, 0x03F, 0x0000000A,
- 0x033, 0x000000A2, 0x03F, 0x0000000D, 0x033, 0x000000A3,
- 0x03F, 0x0000002A, 0x033, 0x000000A4, 0x03F, 0x0000002D,
- 0x033, 0x000000A5, 0x03F, 0x00000030, 0x033, 0x000000A6,
- 0x03F, 0x0000006D, 0x033, 0x000000A7, 0x03F, 0x00000070,
- 0x033, 0x000000A8, 0x03F, 0x000000ED, 0x033, 0x000000A9,
- 0x03F, 0x000000F0, 0x033, 0x000000AA, 0x03F, 0x000000F3,
- 0x9000100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000007, 0x033, 0x000000A1, 0x03F, 0x0000000A,
- 0x033, 0x000000A2, 0x03F, 0x0000000D, 0x033, 0x000000A3,
- 0x03F, 0x0000002A, 0x033, 0x000000A4, 0x03F, 0x0000002D,
- 0x033, 0x000000A5, 0x03F, 0x00000030, 0x033, 0x000000A6,
- 0x03F, 0x0000006D, 0x033, 0x000000A7, 0x03F, 0x00000070,
- 0x033, 0x000000A8, 0x03F, 0x000000ED, 0x033, 0x000000A9,
- 0x03F, 0x000000F0, 0x033, 0x000000AA, 0x03F, 0x000000F3,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000005, 0x033, 0x000000A1, 0x03F, 0x00000008,
- 0x033, 0x000000A2, 0x03F, 0x0000000B, 0x033, 0x000000A3,
- 0x03F, 0x0000000E, 0x033, 0x000000A4, 0x03F, 0x00000047,
- 0x033, 0x000000A5, 0x03F, 0x0000004A, 0x033, 0x000000A6,
- 0x03F, 0x0000004D, 0x033, 0x000000A7, 0x03F, 0x00000050,
- 0x033, 0x000000A8, 0x03F, 0x00000053, 0x033, 0x000000A9,
- 0x03F, 0x00000056, 0x033, 0x000000AA, 0x03F, 0x00000094,
- 0x9300200c, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000005, 0x033, 0x000000A1, 0x03F, 0x00000008,
- 0x033, 0x000000A2, 0x03F, 0x0000000B, 0x033, 0x000000A3,
- 0x03F, 0x0000000E, 0x033, 0x000000A4, 0x03F, 0x00000047,
- 0x033, 0x000000A5, 0x03F, 0x0000004A, 0x033, 0x000000A6,
- 0x03F, 0x0000004D, 0x033, 0x000000A7, 0x03F, 0x00000050,
- 0x033, 0x000000A8, 0x03F, 0x00000053, 0x033, 0x000000A9,
- 0x03F, 0x00000056, 0x033, 0x000000AA, 0x03F, 0x00000094,
- 0x93012100, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000826, 0x033, 0x000000A1, 0x03F, 0x00000829,
- 0x033, 0x000000A2, 0x03F, 0x0000082C, 0x033, 0x000000A3,
- 0x03F, 0x0000082F, 0x033, 0x000000A4, 0x03F, 0x0000086C,
- 0x033, 0x000000A5, 0x03F, 0x00000CE8, 0x033, 0x000000A6,
- 0x03F, 0x00000CEB, 0x033, 0x000000A7, 0x03F, 0x00000CEE,
- 0x033, 0x000000A8, 0x03F, 0x00000CF1, 0x033, 0x000000A9,
- 0x03F, 0x00000CF4, 0x033, 0x000000AA, 0x03F, 0x00000CF7,
- 0x93002100, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x0000042A, 0x033, 0x000000A1, 0x03F, 0x00000829,
- 0x033, 0x000000A2, 0x03F, 0x00000848, 0x033, 0x000000A3,
- 0x03F, 0x0000084B, 0x033, 0x000000A4, 0x03F, 0x00000C4C,
- 0x033, 0x000000A5, 0x03F, 0x00000CA9, 0x033, 0x000000A6,
- 0x03F, 0x00000CEA, 0x033, 0x000000A7, 0x03F, 0x00000CED,
- 0x033, 0x000000A8, 0x03F, 0x00000CF0, 0x033, 0x000000A9,
- 0x03F, 0x00000CF3, 0x033, 0x000000AA, 0x03F, 0x00000CF6,
- 0x93011000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000C09, 0x033, 0x000000A1, 0x03F, 0x00000C0C,
- 0x033, 0x000000A2, 0x03F, 0x00000C0F, 0x033, 0x000000A3,
- 0x03F, 0x00000C2C, 0x033, 0x000000A4, 0x03F, 0x00000C2F,
- 0x033, 0x000000A5, 0x03F, 0x00000C8A, 0x033, 0x000000A6,
- 0x03F, 0x00000C8D, 0x033, 0x000000A7, 0x03F, 0x00000C90,
- 0x033, 0x000000A8, 0x03F, 0x00000CEF, 0x033, 0x000000A9,
- 0x03F, 0x00000CF2, 0x033, 0x000000AA, 0x03F, 0x00000CF5,
- 0x9000200c, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000005, 0x033, 0x000000A1, 0x03F, 0x00000008,
- 0x033, 0x000000A2, 0x03F, 0x0000000B, 0x033, 0x000000A3,
- 0x03F, 0x0000000E, 0x033, 0x000000A4, 0x03F, 0x00000047,
- 0x033, 0x000000A5, 0x03F, 0x0000004A, 0x033, 0x000000A6,
- 0x03F, 0x0000004D, 0x033, 0x000000A7, 0x03F, 0x00000050,
- 0x033, 0x000000A8, 0x03F, 0x00000053, 0x033, 0x000000A9,
- 0x03F, 0x00000056, 0x033, 0x000000AA, 0x03F, 0x00000094,
- 0x90001004, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000C09, 0x033, 0x000000A1, 0x03F, 0x00000C0C,
- 0x033, 0x000000A2, 0x03F, 0x00000C0F, 0x033, 0x000000A3,
- 0x03F, 0x00000C2C, 0x033, 0x000000A4, 0x03F, 0x00000C2F,
- 0x033, 0x000000A5, 0x03F, 0x00000C8A, 0x033, 0x000000A6,
- 0x03F, 0x00000C8D, 0x033, 0x000000A7, 0x03F, 0x00000C90,
- 0x033, 0x000000A8, 0x03F, 0x00000CEF, 0x033, 0x000000A9,
- 0x03F, 0x00000CF2, 0x033, 0x000000AA, 0x03F, 0x00000CF5,
- 0x93002000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000429, 0x033, 0x000000A1, 0x03F, 0x00000828,
- 0x033, 0x000000A2, 0x03F, 0x00000847, 0x033, 0x000000A3,
- 0x03F, 0x0000084A, 0x033, 0x000000A4, 0x03F, 0x00000C4B,
- 0x033, 0x000000A5, 0x03F, 0x00000C8A, 0x033, 0x000000A6,
- 0x03F, 0x00000CEA, 0x033, 0x000000A7, 0x03F, 0x00000CED,
- 0x033, 0x000000A8, 0x03F, 0x00000CF0, 0x033, 0x000000A9,
- 0x03F, 0x00000CF3, 0x033, 0x000000AA, 0x03F, 0x00000CF6,
- 0x93001000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x00000C09, 0x033, 0x000000A1, 0x03F, 0x00000C0C,
- 0x033, 0x000000A2, 0x03F, 0x00000C0F, 0x033, 0x000000A3,
- 0x03F, 0x00000C2C, 0x033, 0x000000A4, 0x03F, 0x00000C2F,
- 0x033, 0x000000A5, 0x03F, 0x00000C8A, 0x033, 0x000000A6,
- 0x03F, 0x00000C8D, 0x033, 0x000000A7, 0x03F, 0x00000C90,
- 0x033, 0x000000A8, 0x03F, 0x00000CEF, 0x033, 0x000000A9,
- 0x03F, 0x00000CF2, 0x033, 0x000000AA, 0x03F, 0x00000CF5,
- 0x90002100, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x0000042A, 0x033, 0x000000A1, 0x03F, 0x00000829,
- 0x033, 0x000000A2, 0x03F, 0x00000848, 0x033, 0x000000A3,
- 0x03F, 0x0000084B, 0x033, 0x000000A4, 0x03F, 0x00000C4C,
- 0x033, 0x000000A5, 0x03F, 0x00000C8A, 0x033, 0x000000A6,
- 0x03F, 0x00000C8D, 0x033, 0x000000A7, 0x03F, 0x00000CEC,
- 0x033, 0x000000A8, 0x03F, 0x00000CEF, 0x033, 0x000000A9,
- 0x03F, 0x00000CF2, 0x033, 0x000000AA, 0x03F, 0x00000CF5,
- 0x90002000, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x000000A0,
- 0x03F, 0x0000042A, 0x033, 0x000000A1, 0x03F, 0x00000829,
- 0x033, 0x000000A2, 0x03F, 0x00000848, 0x033, 0x000000A3,
- 0x03F, 0x0000084B, 0x033, 0x000000A4, 0x03F, 0x00000C4C,
- 0x033, 0x000000A5, 0x03F, 0x00000C8A, 0x033, 0x000000A6,
- 0x03F, 0x00000C8D, 0x033, 0x000000A7, 0x03F, 0x00000CEC,
- 0x033, 0x000000A8, 0x03F, 0x00000CEF, 0x033, 0x000000A9,
- 0x03F, 0x00000CF2, 0x033, 0x000000AA, 0x03F, 0x00000CF5,
- 0xA0000000, 0x00000000, 0x033, 0x000000A0, 0x03F, 0x00000C09,
- 0x033, 0x000000A1, 0x03F, 0x00000C0C, 0x033, 0x000000A2,
- 0x03F, 0x00000C0F, 0x033, 0x000000A3, 0x03F, 0x00000C2C,
- 0x033, 0x000000A4, 0x03F, 0x00000C2F, 0x033, 0x000000A5,
- 0x03F, 0x00000C8A, 0x033, 0x000000A6, 0x03F, 0x00000C8D,
- 0x033, 0x000000A7, 0x03F, 0x00000C90, 0x033, 0x000000A8,
- 0x03F, 0x00000CEF, 0x033, 0x000000A9, 0x03F, 0x00000CF2,
- 0x033, 0x000000AA, 0x03F, 0x00000CF5, 0xB0000000, 0x00000000,
- 0x0EF, 0x00000000, 0x0EF, 0x00000400, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x0000265A,
- 0x033, 0x00000001, 0x03F, 0x0000265A, 0x033, 0x00000002,
- 0x03F, 0x0000265A, 0x033, 0x00000003, 0x03F, 0x0000265A,
- 0x9300100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x0000265A, 0x033, 0x00000001, 0x03F, 0x0000265A,
- 0x033, 0x00000002, 0x03F, 0x0000265A, 0x033, 0x00000003,
- 0x03F, 0x0000265A, 0x9300100f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x0000265A, 0x033, 0x00000001,
- 0x03F, 0x0000265A, 0x033, 0x00000002, 0x03F, 0x0000265A,
- 0x033, 0x00000003, 0x03F, 0x0000265A, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x0000265A,
- 0x033, 0x00000001, 0x03F, 0x0000265A, 0x033, 0x00000002,
- 0x03F, 0x0000265A, 0x033, 0x00000003, 0x03F, 0x0000265A,
- 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x0000265A, 0x033, 0x00000001, 0x03F, 0x0000265A,
- 0x033, 0x00000002, 0x03F, 0x0000265A, 0x033, 0x00000003,
- 0x03F, 0x0000265A, 0x9000100f, 0x05050505, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x0000265A, 0x033, 0x00000001,
- 0x03F, 0x0000265A, 0x033, 0x00000002, 0x03F, 0x0000265A,
- 0x033, 0x00000003, 0x03F, 0x0000265A, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x0000265A,
- 0x033, 0x00000001, 0x03F, 0x0000265A, 0x033, 0x00000002,
- 0x03F, 0x0000265A, 0x033, 0x00000003, 0x03F, 0x0000265A,
- 0x9000200f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x0000265A, 0x033, 0x00000001, 0x03F, 0x0000265A,
- 0x033, 0x00000002, 0x03F, 0x0000265A, 0x033, 0x00000003,
- 0x03F, 0x0000265A, 0xA0000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x000004BB, 0x033, 0x00000001, 0x03F, 0x000004BB,
- 0x033, 0x00000002, 0x03F, 0x000004BB, 0x033, 0x00000003,
- 0x03F, 0x000004BB, 0xB0000000, 0x00000000, 0x0EF, 0x00000000,
- 0x0EF, 0x00000100, 0x8300100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x00000745, 0x033, 0x00000001,
- 0x03F, 0x00000745, 0x033, 0x00000002, 0x03F, 0x00000745,
- 0x033, 0x00000003, 0x03F, 0x00000745, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000745,
- 0x033, 0x00000001, 0x03F, 0x00000745, 0x033, 0x00000002,
- 0x03F, 0x00000745, 0x033, 0x00000003, 0x03F, 0x00000745,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000745, 0x033, 0x00000001, 0x03F, 0x00000745,
- 0x033, 0x00000002, 0x03F, 0x00000745, 0x033, 0x00000003,
- 0x03F, 0x00000745, 0x9300200f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x00000745, 0x033, 0x00000001,
- 0x03F, 0x00000745, 0x033, 0x00000002, 0x03F, 0x00000745,
- 0x033, 0x00000003, 0x03F, 0x00000745, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000745,
- 0x033, 0x00000001, 0x03F, 0x00000745, 0x033, 0x00000002,
- 0x03F, 0x00000745, 0x033, 0x00000003, 0x03F, 0x00000745,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x033, 0x00000000,
- 0x03F, 0x00000745, 0x033, 0x00000001, 0x03F, 0x00000745,
- 0x033, 0x00000002, 0x03F, 0x00000745, 0x033, 0x00000003,
- 0x03F, 0x00000745, 0x9000100f, 0x00000000, 0x40000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x00000745, 0x033, 0x00000001,
- 0x03F, 0x00000745, 0x033, 0x00000002, 0x03F, 0x00000745,
- 0x033, 0x00000003, 0x03F, 0x00000745, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000745,
- 0x033, 0x00000001, 0x03F, 0x00000745, 0x033, 0x00000002,
- 0x03F, 0x00000745, 0x033, 0x00000003, 0x03F, 0x00000745,
- 0xA0000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000F34,
- 0x033, 0x00000001, 0x03F, 0x00000F34, 0x033, 0x00000002,
- 0x03F, 0x00000F34, 0x033, 0x00000003, 0x03F, 0x00000F34,
- 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x081, 0x0000F400, 0x087, 0x00016040,
- 0x051, 0x00000808, 0x052, 0x00098002, 0x053, 0x0000FA47,
- 0x054, 0x00058032, 0x056, 0x00051000, 0x057, 0x0000CE0A,
- 0x058, 0x00082030, 0x9300100f, 0x05050505, 0x40000000, 0x00000000,
- 0x081, 0x0000F400, 0x087, 0x00016040, 0x051, 0x00000808,
- 0x052, 0x00098002, 0x053, 0x0000FA47, 0x054, 0x00058032,
- 0x056, 0x00051000, 0x057, 0x0000CE0A, 0x058, 0x00082030,
- 0x9300100f, 0x00000000, 0x40000000, 0x00000000, 0x081, 0x0000F400,
- 0x087, 0x00016040, 0x051, 0x00000808, 0x052, 0x00098002,
- 0x053, 0x0000FA47, 0x054, 0x00058032, 0x056, 0x00051000,
- 0x057, 0x0000CE0A, 0x058, 0x00082030, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x081, 0x0000F400, 0x087, 0x00016040,
- 0x051, 0x00000808, 0x052, 0x00098002, 0x053, 0x0000FA47,
- 0x054, 0x00058032, 0x056, 0x00051000, 0x057, 0x0000CE0A,
- 0x058, 0x00082030, 0x9000100f, 0x0a0a0a0a, 0x40000000, 0x00000000,
- 0x081, 0x0000F400, 0x087, 0x00016040, 0x051, 0x00000808,
- 0x052, 0x00098002, 0x053, 0x0000FA47, 0x054, 0x00058032,
- 0x056, 0x00051000, 0x057, 0x0000CE0A, 0x058, 0x00082030,
- 0x9000100f, 0x05050505, 0x40000000, 0x00000000, 0x081, 0x0000F400,
- 0x087, 0x00016040, 0x051, 0x00000808, 0x052, 0x00098002,
- 0x053, 0x0000FA47, 0x054, 0x00058032, 0x056, 0x00051000,
- 0x057, 0x0000CE0A, 0x058, 0x00082030, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x081, 0x0000F400, 0x087, 0x00016040,
- 0x051, 0x00000808, 0x052, 0x00098002, 0x053, 0x0000FA47,
- 0x054, 0x00058032, 0x056, 0x00051000, 0x057, 0x0000CE0A,
- 0x058, 0x00082030, 0x9000200f, 0x00000000, 0x40000000, 0x00000000,
- 0x081, 0x0000F400, 0x087, 0x00016040, 0x051, 0x00000808,
- 0x052, 0x00098002, 0x053, 0x0000FA47, 0x054, 0x00058032,
- 0x056, 0x00051000, 0x057, 0x0000CE0A, 0x058, 0x00082030,
- 0xA0000000, 0x00000000, 0x081, 0x0000F000, 0x087, 0x00016040,
- 0x051, 0x00000C00, 0x052, 0x0007C241, 0x053, 0x0001C069,
- 0x054, 0x00078032, 0x057, 0x0000CE0A, 0x058, 0x00058750,
- 0xB0000000, 0x00000000, 0x0EF, 0x00000800, 0x8300100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000003,
- 0x033, 0x00000001, 0x03F, 0x00000006, 0x033, 0x00000002,
- 0x03F, 0x00000009, 0x033, 0x00000003, 0x03F, 0x00000026,
- 0x033, 0x00000004, 0x03F, 0x00000029, 0x033, 0x00000005,
- 0x03F, 0x0000002C, 0x033, 0x00000006, 0x03F, 0x0000002F,
- 0x033, 0x00000007, 0x03F, 0x00000033, 0x033, 0x00000008,
- 0x03F, 0x00000036, 0x033, 0x00000009, 0x03F, 0x00000039,
- 0x033, 0x0000000A, 0x03F, 0x0000003C, 0x9300100f, 0x05050505,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000003,
- 0x033, 0x00000001, 0x03F, 0x00000006, 0x033, 0x00000002,
- 0x03F, 0x00000009, 0x033, 0x00000003, 0x03F, 0x00000026,
- 0x033, 0x00000004, 0x03F, 0x00000029, 0x033, 0x00000005,
- 0x03F, 0x0000002C, 0x033, 0x00000006, 0x03F, 0x0000002F,
- 0x033, 0x00000007, 0x03F, 0x00000033, 0x033, 0x00000008,
- 0x03F, 0x00000036, 0x033, 0x00000009, 0x03F, 0x00000039,
- 0x033, 0x0000000A, 0x03F, 0x0000003C, 0x9300100f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000003,
- 0x033, 0x00000001, 0x03F, 0x00000006, 0x033, 0x00000002,
- 0x03F, 0x00000009, 0x033, 0x00000003, 0x03F, 0x00000026,
- 0x033, 0x00000004, 0x03F, 0x00000029, 0x033, 0x00000005,
- 0x03F, 0x0000002C, 0x033, 0x00000006, 0x03F, 0x0000002F,
- 0x033, 0x00000007, 0x03F, 0x00000033, 0x033, 0x00000008,
- 0x03F, 0x00000036, 0x033, 0x00000009, 0x03F, 0x00000039,
- 0x033, 0x0000000A, 0x03F, 0x0000003C, 0x9300200f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000003,
- 0x033, 0x00000001, 0x03F, 0x00000006, 0x033, 0x00000002,
- 0x03F, 0x00000009, 0x033, 0x00000003, 0x03F, 0x00000026,
- 0x033, 0x00000004, 0x03F, 0x00000029, 0x033, 0x00000005,
- 0x03F, 0x0000002C, 0x033, 0x00000006, 0x03F, 0x0000002F,
- 0x033, 0x00000007, 0x03F, 0x00000033, 0x033, 0x00000008,
- 0x03F, 0x00000036, 0x033, 0x00000009, 0x03F, 0x00000039,
- 0x033, 0x0000000A, 0x03F, 0x0000003C, 0x9000100f, 0x0a0a0a0a,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000003,
- 0x033, 0x00000001, 0x03F, 0x00000006, 0x033, 0x00000002,
- 0x03F, 0x00000009, 0x033, 0x00000003, 0x03F, 0x00000026,
- 0x033, 0x00000004, 0x03F, 0x00000029, 0x033, 0x00000005,
- 0x03F, 0x0000002C, 0x033, 0x00000006, 0x03F, 0x0000002F,
- 0x033, 0x00000007, 0x03F, 0x00000033, 0x033, 0x00000008,
- 0x03F, 0x00000036, 0x033, 0x00000009, 0x03F, 0x00000039,
- 0x033, 0x0000000A, 0x03F, 0x0000003C, 0x9000100f, 0x05050505,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000003,
- 0x033, 0x00000001, 0x03F, 0x00000006, 0x033, 0x00000002,
- 0x03F, 0x00000009, 0x033, 0x00000003, 0x03F, 0x00000026,
- 0x033, 0x00000004, 0x03F, 0x00000029, 0x033, 0x00000005,
- 0x03F, 0x0000002C, 0x033, 0x00000006, 0x03F, 0x0000002F,
- 0x033, 0x00000007, 0x03F, 0x00000033, 0x033, 0x00000008,
- 0x03F, 0x00000036, 0x033, 0x00000009, 0x03F, 0x00000039,
- 0x033, 0x0000000A, 0x03F, 0x0000003C, 0x9000100f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000003,
- 0x033, 0x00000001, 0x03F, 0x00000006, 0x033, 0x00000002,
- 0x03F, 0x00000009, 0x033, 0x00000003, 0x03F, 0x00000026,
- 0x033, 0x00000004, 0x03F, 0x00000029, 0x033, 0x00000005,
- 0x03F, 0x0000002C, 0x033, 0x00000006, 0x03F, 0x0000002F,
- 0x033, 0x00000007, 0x03F, 0x00000033, 0x033, 0x00000008,
- 0x03F, 0x00000036, 0x033, 0x00000009, 0x03F, 0x00000039,
- 0x033, 0x0000000A, 0x03F, 0x0000003C, 0x9000200f, 0x00000000,
- 0x40000000, 0x00000000, 0x033, 0x00000000, 0x03F, 0x00000003,
- 0x033, 0x00000001, 0x03F, 0x00000006, 0x033, 0x00000002,
- 0x03F, 0x00000009, 0x033, 0x00000003, 0x03F, 0x00000026,
- 0x033, 0x00000004, 0x03F, 0x00000029, 0x033, 0x00000005,
- 0x03F, 0x0000002C, 0x033, 0x00000006, 0x03F, 0x0000002F,
- 0x033, 0x00000007, 0x03F, 0x00000033, 0x033, 0x00000008,
- 0x03F, 0x00000036, 0x033, 0x00000009, 0x03F, 0x00000039,
- 0x033, 0x0000000A, 0x03F, 0x0000003C, 0xA0000000, 0x00000000,
- 0x033, 0x00000000, 0x03F, 0x0005142C, 0x033, 0x00000001,
- 0x03F, 0x0005142F, 0x033, 0x00000002, 0x03F, 0x00051432,
- 0x033, 0x00000003, 0x03F, 0x00051C87, 0x033, 0x00000004,
- 0x03F, 0x00051C8A, 0x033, 0x00000005, 0x03F, 0x00051C8D,
- 0x033, 0x00000006, 0x03F, 0x00051CEB, 0x033, 0x00000007,
- 0x03F, 0x00051CEE, 0x033, 0x00000008, 0x03F, 0x00051CF1,
- 0x033, 0x00000009, 0x03F, 0x00051CF4, 0x033, 0x0000000A,
- 0x03F, 0x00051CF7, 0xB0000000, 0x00000000, 0x0EF, 0x00000000,
- 0x0EF, 0x00000010, 0x033, 0x00000000, 0x008, 0x0009C060,
- 0x033, 0x00000001, 0x008, 0x0009C060, 0x0EF, 0x00000000,
- 0x033, 0x000000A2, 0x0EF, 0x00080000, 0x03E, 0x0000593F,
- 0x03F, 0x000C0F4F, 0x0EF, 0x00000000, 0x033, 0x000000A3,
- 0x0EF, 0x00080000, 0x03E, 0x00005934, 0x03F, 0x0005AFCF,
- 0x0EF, 0x00000000,
-
-};
-
-void odm_read_and_config_mp_8822b_radiob(struct phy_dm_struct *dm)
-{
- u32 i = 0;
- u8 c_cond;
- bool is_matched = true, is_skipped = false;
- u32 *array = array_mp_8822b_radiob;
-
- u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===> %s\n", __func__);
-
- for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_radiob); i = i + 2) {
- v1 = array[i];
- v2 = array[i + 1];
-
- if (v1 & BIT(31)) { /* positive condition*/
- c_cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28);
- if (c_cond == COND_ENDIF) { /*end*/
- is_matched = true;
- is_skipped = false;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ENDIF\n");
- } else if (c_cond == COND_ELSE) { /*else*/
- is_matched = is_skipped ? false : true;
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "ELSE\n");
- } else { /*if , else if*/
- pre_v1 = v1;
- pre_v2 = v2;
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "IF or ELSE IF\n");
- }
- } else if (v1 & BIT(30)) { /*negative condition*/
- if (is_skipped) {
- is_matched = false;
- continue;
- }
-
- if (check_positive(dm, pre_v1, pre_v2, v1, v2)) {
- is_matched = true;
- is_skipped = true;
- } else {
- is_matched = false;
- is_skipped = false;
- }
- } else if (is_matched) {
- odm_config_rf_radio_b_8822b(dm, v1, v2);
- }
- }
-}
-
-u32 odm_get_version_mp_8822b_radiob(void) { return 67; }
-
-/******************************************************************************
- * txpowertrack.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_8822b[][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10,
- 11, 11, 12, 12, 12, 13, 13, 14, 14, 14, 15, 15, 15, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 8,
- 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14},
- {0, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9,
- 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_8822b[][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 11,
- 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 19, 19, 19, 19},
- {0, 1, 2, 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10,
- 11, 12, 12, 13, 14, 15, 16, 17, 17, 18, 18, 18, 18, 18, 18},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10,
- 10, 11, 12, 13, 14, 15, 15, 16, 16, 17, 17, 17, 17, 17, 17},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_8822b[][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10,
- 11, 11, 12, 12, 12, 13, 13, 14, 14, 14, 15, 15, 15, 15, 15},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9,
- 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14},
- {0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9,
- 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_8822b[][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 20, 20, 20, 20},
- {0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9,
- 10, 11, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, 18, 18},
- {0, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10,
- 11, 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 18, 18, 18, 18},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-void odm_read_and_config_mp_8822b_txpowertrack(struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpowertrack_type0.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_type0_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 8,
- 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 15, 15},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_type0_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 10, 10, 11, 12, 13, 14, 14, 15, 15, 15, 16, 16},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_type0_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 13, 14, 14},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_type0_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 15},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_type0_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_type0_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_type0_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_type0_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type0_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type0_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type0_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type0_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-void odm_read_and_config_mp_8822b_txpowertrack_type0(struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_type0_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpowertrack_type1.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_type1_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10,
- 11, 11, 12, 12, 12, 13, 13, 14, 14, 14, 15, 15, 15, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 8,
- 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14},
- {0, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9,
- 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_type1_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 11,
- 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 19, 19, 19, 19},
- {0, 1, 2, 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10,
- 11, 12, 12, 13, 14, 15, 16, 17, 17, 18, 18, 18, 18, 18, 18},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10,
- 10, 11, 12, 13, 14, 15, 15, 16, 16, 17, 17, 17, 17, 17, 17},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_type1_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10,
- 11, 11, 12, 12, 12, 13, 13, 14, 14, 14, 15, 15, 15, 15, 15},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9,
- 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14},
- {0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9,
- 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_type1_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 20, 20, 20, 20},
- {0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9,
- 10, 11, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, 18, 18},
- {0, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10,
- 11, 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 18, 18, 18, 18},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_type1_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_type1_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_type1_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_type1_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type1_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type1_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type1_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type1_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-void odm_read_and_config_mp_8822b_txpowertrack_type1(struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_type1_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpowertrack_type2.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_type2_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_type2_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_type2_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_type2_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_type2_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_type2_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_type2_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_type2_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type2_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type2_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type2_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type2_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-void odm_read_and_config_mp_8822b_txpowertrack_type2(struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_type2_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpowertrack_type3_type5.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_type3_type5_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_type3_type5_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_type3_type5_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_type3_type5_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_type3_type5_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_type3_type5_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_type3_type5_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_type3_type5_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type3_type5_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type3_type5_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type3_type5_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type3_type5_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-void odm_read_and_config_mp_8822b_txpowertrack_type3_type5(
- struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(
- dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(
- dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(
- dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(
- dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_type3_type5_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpowertrack_type4.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_type4_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_type4_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_type4_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11,
- 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_type4_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 13, 14, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_type4_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_type4_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_type4_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_type4_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type4_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type4_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type4_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type4_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-void odm_read_and_config_mp_8822b_txpowertrack_type4(struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_type4_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpowertrack_type6.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_type6_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10,
- 11, 11, 12, 12, 12, 13, 13, 14, 14, 14, 15, 15, 15, 15, 15},
- {0, 1, 2, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 10,
- 11, 12, 12, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16},
- {0, 1, 2, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12,
- 12, 13, 13, 14, 15, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_type6_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 11,
- 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 19, 19, 19, 19},
- {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 9, 11, 11, 12,
- 13, 14, 15, 16, 17, 18, 19, 20, 20, 21, 21, 21, 21, 21, 21},
- {0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 8, 9, 10, 11, 12,
- 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 20, 20, 21, 21, 21},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_type6_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11,
- 12, 12, 13, 14, 14, 15, 15, 16, 16, 16, 17, 17, 17, 17, 17},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10,
- 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15},
- {0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 9, 9, 10,
- 11, 12, 12, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_type6_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12,
- 13, 14, 15, 15, 16, 17, 18, 19, 20, 20, 21, 21, 21, 21, 21},
- {0, 1, 2, 2, 3, 4, 4, 5, 7, 7, 8, 9, 10, 11, 11,
- 12, 13, 13, 14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 21, 21},
- {0, 1, 2, 3, 3, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 14, 15, 16, 17, 17, 18, 19, 19, 20, 20, 20, 20, 20},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_type6_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_type6_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_type6_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_type6_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type6_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type6_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type6_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type6_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-void odm_read_and_config_mp_8822b_txpowertrack_type6(struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_type6_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpowertrack_type7.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_type7_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10,
- 11, 11, 12, 12, 12, 13, 13, 14, 14, 14, 15, 15, 15, 15, 15},
- {0, 1, 2, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 10,
- 11, 12, 12, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16},
- {0, 1, 2, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12,
- 12, 13, 13, 14, 15, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_type7_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 11,
- 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 19, 19, 19, 19},
- {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 9, 11, 11, 12,
- 13, 14, 15, 16, 17, 18, 19, 20, 20, 21, 21, 21, 21, 21, 21},
- {0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 8, 9, 10, 11, 12,
- 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 20, 20, 21, 21, 21},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_type7_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11,
- 12, 12, 13, 14, 14, 15, 15, 16, 16, 16, 17, 17, 17, 17, 17},
- {0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10,
- 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15},
- {0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 9, 9, 10,
- 11, 12, 12, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_type7_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12,
- 13, 14, 15, 15, 16, 17, 18, 19, 20, 20, 21, 21, 21, 21, 21},
- {0, 1, 2, 2, 3, 4, 4, 5, 7, 7, 8, 9, 10, 11, 11,
- 12, 13, 13, 14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 21, 21},
- {0, 1, 2, 3, 3, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 14, 15, 16, 17, 17, 18, 19, 19, 20, 20, 20, 20, 20},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_type7_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_type7_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_type7_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_type7_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type7_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type7_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type7_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type7_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-void odm_read_and_config_mp_8822b_txpowertrack_type7(struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_type7_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpowertrack_type8.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_type8_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_type8_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_type8_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_type8_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8,
- 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_type8_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_type8_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_type8_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_type8_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type8_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type8_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type8_8822b[] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
- 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type8_8822b[] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15};
-
-void odm_read_and_config_mp_8822b_txpowertrack_type8(struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_type8_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpowertrack_type9.TXT
- ******************************************************************************/
-
-static u8 delta_swing_index_mp_5gb_n_txpwrtrack_type9_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8,
- 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14},
- {0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 8,
- 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 15, 15},
-};
-
-static u8 delta_swing_index_mp_5gb_p_txpwrtrack_type9_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 10, 10, 11, 12, 13, 14, 14, 15, 15, 15, 16, 16},
-};
-
-static u8 delta_swing_index_mp_5ga_n_txpwrtrack_type9_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 13, 14, 14},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14},
-};
-
-static u8 delta_swing_index_mp_5ga_p_txpwrtrack_type9_8822b
- [][DELTA_SWINGIDX_SIZE] = {
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 15, 15},
- {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 15},
-};
-
-static u8 delta_swing_index_mp_2gb_n_txpwrtrack_type9_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2gb_p_txpwrtrack_type9_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2ga_n_txpwrtrack_type9_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2ga_p_txpwrtrack_type9_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type9_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17};
-
-static u8 delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type9_8822b[] = {
- 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-static u8 delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type9_8822b[] = {
- 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18};
-
-static u8 delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type9_8822b[] = {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
- 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 22, 22};
-
-void odm_read_and_config_mp_8822b_txpowertrack_type9(struct phy_dm_struct *dm)
-{
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> ODM_ReadAndConfig_MP_mp_8822b\n");
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_p,
- delta_swing_index_mp_2ga_p_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2ga_n,
- delta_swing_index_mp_2ga_n_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_p,
- delta_swing_index_mp_2gb_p_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2gb_n,
- delta_swing_index_mp_2gb_n_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_p,
- delta_swing_index_mp_2g_cck_a_p_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_a_n,
- delta_swing_index_mp_2g_cck_a_n_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_p,
- delta_swing_index_mp_2g_cck_b_p_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_2g_cck_b_n,
- delta_swing_index_mp_2g_cck_b_n_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE);
-
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_p,
- delta_swing_index_mp_5ga_p_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5ga_n,
- delta_swing_index_mp_5ga_n_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_p,
- delta_swing_index_mp_5gb_p_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE * 3);
- odm_move_memory(dm, cali_info->delta_swing_table_idx_5gb_n,
- delta_swing_index_mp_5gb_n_txpwrtrack_type9_8822b,
- DELTA_SWINGIDX_SIZE * 3);
-}
-
-/******************************************************************************
- * txpwr_lmt.TXT
- ******************************************************************************/
-
-static const char *const array_mp_8822b_txpwr_lmt[] = {
- "FCC", "2.4G", "20M", "CCK", "1T", "01", "32", "ETSI", "2.4G",
- "20M", "CCK", "1T", "01", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "01", "30", "FCC", "2.4G", "20M", "CCK", "1T", "02",
- "32", "ETSI", "2.4G", "20M", "CCK", "1T", "02", "28", "MKK",
- "2.4G", "20M", "CCK", "1T", "02", "30", "FCC", "2.4G", "20M",
- "CCK", "1T", "03", "32", "ETSI", "2.4G", "20M", "CCK", "1T",
- "03", "28", "MKK", "2.4G", "20M", "CCK", "1T", "03", "30",
- "FCC", "2.4G", "20M", "CCK", "1T", "04", "32", "ETSI", "2.4G",
- "20M", "CCK", "1T", "04", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "04", "30", "FCC", "2.4G", "20M", "CCK", "1T", "05",
- "32", "ETSI", "2.4G", "20M", "CCK", "1T", "05", "28", "MKK",
- "2.4G", "20M", "CCK", "1T", "05", "30", "FCC", "2.4G", "20M",
- "CCK", "1T", "06", "32", "ETSI", "2.4G", "20M", "CCK", "1T",
- "06", "28", "MKK", "2.4G", "20M", "CCK", "1T", "06", "30",
- "FCC", "2.4G", "20M", "CCK", "1T", "07", "32", "ETSI", "2.4G",
- "20M", "CCK", "1T", "07", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "07", "30", "FCC", "2.4G", "20M", "CCK", "1T", "08",
- "32", "ETSI", "2.4G", "20M", "CCK", "1T", "08", "28", "MKK",
- "2.4G", "20M", "CCK", "1T", "08", "30", "FCC", "2.4G", "20M",
- "CCK", "1T", "09", "32", "ETSI", "2.4G", "20M", "CCK", "1T",
- "09", "28", "MKK", "2.4G", "20M", "CCK", "1T", "09", "30",
- "FCC", "2.4G", "20M", "CCK", "1T", "10", "32", "ETSI", "2.4G",
- "20M", "CCK", "1T", "10", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "10", "30", "FCC", "2.4G", "20M", "CCK", "1T", "11",
- "32", "ETSI", "2.4G", "20M", "CCK", "1T", "11", "28", "MKK",
- "2.4G", "20M", "CCK", "1T", "11", "30", "FCC", "2.4G", "20M",
- "CCK", "1T", "12", "26", "ETSI", "2.4G", "20M", "CCK", "1T",
- "12", "28", "MKK", "2.4G", "20M", "CCK", "1T", "12", "30",
- "FCC", "2.4G", "20M", "CCK", "1T", "13", "20", "ETSI", "2.4G",
- "20M", "CCK", "1T", "13", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "13", "28", "FCC", "2.4G", "20M", "CCK", "1T", "14",
- "63", "ETSI", "2.4G", "20M", "CCK", "1T", "14", "63", "MKK",
- "2.4G", "20M", "CCK", "1T", "14", "32", "FCC", "2.4G", "20M",
- "OFDM", "1T", "01", "26", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "01", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "01", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "02", "30", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "02", "30", "MKK", "2.4G", "20M", "OFDM",
- "1T", "02", "34", "FCC", "2.4G", "20M", "OFDM", "1T", "03",
- "32", "ETSI", "2.4G", "20M", "OFDM", "1T", "03", "30", "MKK",
- "2.4G", "20M", "OFDM", "1T", "03", "34", "FCC", "2.4G", "20M",
- "OFDM", "1T", "04", "34", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "04", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "04", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "05", "34", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "05", "30", "MKK", "2.4G", "20M", "OFDM",
- "1T", "05", "34", "FCC", "2.4G", "20M", "OFDM", "1T", "06",
- "34", "ETSI", "2.4G", "20M", "OFDM", "1T", "06", "30", "MKK",
- "2.4G", "20M", "OFDM", "1T", "06", "34", "FCC", "2.4G", "20M",
- "OFDM", "1T", "07", "34", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "07", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "07", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "08", "34", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "08", "30", "MKK", "2.4G", "20M", "OFDM",
- "1T", "08", "34", "FCC", "2.4G", "20M", "OFDM", "1T", "09",
- "32", "ETSI", "2.4G", "20M", "OFDM", "1T", "09", "30", "MKK",
- "2.4G", "20M", "OFDM", "1T", "09", "34", "FCC", "2.4G", "20M",
- "OFDM", "1T", "10", "30", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "10", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "10", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "11", "28", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "11", "30", "MKK", "2.4G", "20M", "OFDM",
- "1T", "11", "34", "FCC", "2.4G", "20M", "OFDM", "1T", "12",
- "22", "ETSI", "2.4G", "20M", "OFDM", "1T", "12", "30", "MKK",
- "2.4G", "20M", "OFDM", "1T", "12", "34", "FCC", "2.4G", "20M",
- "OFDM", "1T", "13", "14", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "13", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "13", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "14", "63", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "14", "63", "MKK", "2.4G", "20M", "OFDM",
- "1T", "14", "63", "FCC", "2.4G", "20M", "HT", "1T", "01",
- "26", "ETSI", "2.4G", "20M", "HT", "1T", "01", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "01", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "02", "30", "ETSI", "2.4G", "20M", "HT", "1T",
- "02", "30", "MKK", "2.4G", "20M", "HT", "1T", "02", "34",
- "FCC", "2.4G", "20M", "HT", "1T", "03", "32", "ETSI", "2.4G",
- "20M", "HT", "1T", "03", "30", "MKK", "2.4G", "20M", "HT",
- "1T", "03", "34", "FCC", "2.4G", "20M", "HT", "1T", "04",
- "34", "ETSI", "2.4G", "20M", "HT", "1T", "04", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "04", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "05", "34", "ETSI", "2.4G", "20M", "HT", "1T",
- "05", "30", "MKK", "2.4G", "20M", "HT", "1T", "05", "34",
- "FCC", "2.4G", "20M", "HT", "1T", "06", "34", "ETSI", "2.4G",
- "20M", "HT", "1T", "06", "30", "MKK", "2.4G", "20M", "HT",
- "1T", "06", "34", "FCC", "2.4G", "20M", "HT", "1T", "07",
- "34", "ETSI", "2.4G", "20M", "HT", "1T", "07", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "07", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "08", "34", "ETSI", "2.4G", "20M", "HT", "1T",
- "08", "30", "MKK", "2.4G", "20M", "HT", "1T", "08", "34",
- "FCC", "2.4G", "20M", "HT", "1T", "09", "32", "ETSI", "2.4G",
- "20M", "HT", "1T", "09", "30", "MKK", "2.4G", "20M", "HT",
- "1T", "09", "34", "FCC", "2.4G", "20M", "HT", "1T", "10",
- "30", "ETSI", "2.4G", "20M", "HT", "1T", "10", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "10", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "11", "26", "ETSI", "2.4G", "20M", "HT", "1T",
- "11", "30", "MKK", "2.4G", "20M", "HT", "1T", "11", "34",
- "FCC", "2.4G", "20M", "HT", "1T", "12", "20", "ETSI", "2.4G",
- "20M", "HT", "1T", "12", "30", "MKK", "2.4G", "20M", "HT",
- "1T", "12", "34", "FCC", "2.4G", "20M", "HT", "1T", "13",
- "14", "ETSI", "2.4G", "20M", "HT", "1T", "13", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "13", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "14", "63", "ETSI", "2.4G", "20M", "HT", "1T",
- "14", "63", "MKK", "2.4G", "20M", "HT", "1T", "14", "63",
- "FCC", "2.4G", "20M", "HT", "2T", "01", "26", "ETSI", "2.4G",
- "20M", "HT", "2T", "01", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "01", "30", "FCC", "2.4G", "20M", "HT", "2T", "02",
- "28", "ETSI", "2.4G", "20M", "HT", "2T", "02", "18", "MKK",
- "2.4G", "20M", "HT", "2T", "02", "30", "FCC", "2.4G", "20M",
- "HT", "2T", "03", "30", "ETSI", "2.4G", "20M", "HT", "2T",
- "03", "18", "MKK", "2.4G", "20M", "HT", "2T", "03", "30",
- "FCC", "2.4G", "20M", "HT", "2T", "04", "30", "ETSI", "2.4G",
- "20M", "HT", "2T", "04", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "04", "30", "FCC", "2.4G", "20M", "HT", "2T", "05",
- "32", "ETSI", "2.4G", "20M", "HT", "2T", "05", "18", "MKK",
- "2.4G", "20M", "HT", "2T", "05", "30", "FCC", "2.4G", "20M",
- "HT", "2T", "06", "32", "ETSI", "2.4G", "20M", "HT", "2T",
- "06", "18", "MKK", "2.4G", "20M", "HT", "2T", "06", "30",
- "FCC", "2.4G", "20M", "HT", "2T", "07", "32", "ETSI", "2.4G",
- "20M", "HT", "2T", "07", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "07", "30", "FCC", "2.4G", "20M", "HT", "2T", "08",
- "30", "ETSI", "2.4G", "20M", "HT", "2T", "08", "18", "MKK",
- "2.4G", "20M", "HT", "2T", "08", "30", "FCC", "2.4G", "20M",
- "HT", "2T", "09", "30", "ETSI", "2.4G", "20M", "HT", "2T",
- "09", "18", "MKK", "2.4G", "20M", "HT", "2T", "09", "30",
- "FCC", "2.4G", "20M", "HT", "2T", "10", "28", "ETSI", "2.4G",
- "20M", "HT", "2T", "10", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "10", "30", "FCC", "2.4G", "20M", "HT", "2T", "11",
- "26", "ETSI", "2.4G", "20M", "HT", "2T", "11", "18", "MKK",
- "2.4G", "20M", "HT", "2T", "11", "30", "FCC", "2.4G", "20M",
- "HT", "2T", "12", "20", "ETSI", "2.4G", "20M", "HT", "2T",
- "12", "18", "MKK", "2.4G", "20M", "HT", "2T", "12", "30",
- "FCC", "2.4G", "20M", "HT", "2T", "13", "14", "ETSI", "2.4G",
- "20M", "HT", "2T", "13", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "13", "30", "FCC", "2.4G", "20M", "HT", "2T", "14",
- "63", "ETSI", "2.4G", "20M", "HT", "2T", "14", "63", "MKK",
- "2.4G", "20M", "HT", "2T", "14", "63", "FCC", "2.4G", "40M",
- "HT", "1T", "01", "63", "ETSI", "2.4G", "40M", "HT", "1T",
- "01", "63", "MKK", "2.4G", "40M", "HT", "1T", "01", "63",
- "FCC", "2.4G", "40M", "HT", "1T", "02", "63", "ETSI", "2.4G",
- "40M", "HT", "1T", "02", "63", "MKK", "2.4G", "40M", "HT",
- "1T", "02", "63", "FCC", "2.4G", "40M", "HT", "1T", "03",
- "26", "ETSI", "2.4G", "40M", "HT", "1T", "03", "30", "MKK",
- "2.4G", "40M", "HT", "1T", "03", "34", "FCC", "2.4G", "40M",
- "HT", "1T", "04", "26", "ETSI", "2.4G", "40M", "HT", "1T",
- "04", "30", "MKK", "2.4G", "40M", "HT", "1T", "04", "34",
- "FCC", "2.4G", "40M", "HT", "1T", "05", "30", "ETSI", "2.4G",
- "40M", "HT", "1T", "05", "30", "MKK", "2.4G", "40M", "HT",
- "1T", "05", "34", "FCC", "2.4G", "40M", "HT", "1T", "06",
- "32", "ETSI", "2.4G", "40M", "HT", "1T", "06", "30", "MKK",
- "2.4G", "40M", "HT", "1T", "06", "34", "FCC", "2.4G", "40M",
- "HT", "1T", "07", "30", "ETSI", "2.4G", "40M", "HT", "1T",
- "07", "30", "MKK", "2.4G", "40M", "HT", "1T", "07", "34",
- "FCC", "2.4G", "40M", "HT", "1T", "08", "26", "ETSI", "2.4G",
- "40M", "HT", "1T", "08", "30", "MKK", "2.4G", "40M", "HT",
- "1T", "08", "34", "FCC", "2.4G", "40M", "HT", "1T", "09",
- "26", "ETSI", "2.4G", "40M", "HT", "1T", "09", "30", "MKK",
- "2.4G", "40M", "HT", "1T", "09", "34", "FCC", "2.4G", "40M",
- "HT", "1T", "10", "20", "ETSI", "2.4G", "40M", "HT", "1T",
- "10", "30", "MKK", "2.4G", "40M", "HT", "1T", "10", "34",
- "FCC", "2.4G", "40M", "HT", "1T", "11", "14", "ETSI", "2.4G",
- "40M", "HT", "1T", "11", "30", "MKK", "2.4G", "40M", "HT",
- "1T", "11", "34", "FCC", "2.4G", "40M", "HT", "1T", "12",
- "63", "ETSI", "2.4G", "40M", "HT", "1T", "12", "63", "MKK",
- "2.4G", "40M", "HT", "1T", "12", "63", "FCC", "2.4G", "40M",
- "HT", "1T", "13", "63", "ETSI", "2.4G", "40M", "HT", "1T",
- "13", "63", "MKK", "2.4G", "40M", "HT", "1T", "13", "63",
- "FCC", "2.4G", "40M", "HT", "1T", "14", "63", "ETSI", "2.4G",
- "40M", "HT", "1T", "14", "63", "MKK", "2.4G", "40M", "HT",
- "1T", "14", "63", "FCC", "2.4G", "40M", "HT", "2T", "01",
- "63", "ETSI", "2.4G", "40M", "HT", "2T", "01", "63", "MKK",
- "2.4G", "40M", "HT", "2T", "01", "63", "FCC", "2.4G", "40M",
- "HT", "2T", "02", "63", "ETSI", "2.4G", "40M", "HT", "2T",
- "02", "63", "MKK", "2.4G", "40M", "HT", "2T", "02", "63",
- "FCC", "2.4G", "40M", "HT", "2T", "03", "24", "ETSI", "2.4G",
- "40M", "HT", "2T", "03", "18", "MKK", "2.4G", "40M", "HT",
- "2T", "03", "30", "FCC", "2.4G", "40M", "HT", "2T", "04",
- "24", "ETSI", "2.4G", "40M", "HT", "2T", "04", "18", "MKK",
- "2.4G", "40M", "HT", "2T", "04", "30", "FCC", "2.4G", "40M",
- "HT", "2T", "05", "26", "ETSI", "2.4G", "40M", "HT", "2T",
- "05", "18", "MKK", "2.4G", "40M", "HT", "2T", "05", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "06", "28", "ETSI", "2.4G",
- "40M", "HT", "2T", "06", "18", "MKK", "2.4G", "40M", "HT",
- "2T", "06", "30", "FCC", "2.4G", "40M", "HT", "2T", "07",
- "26", "ETSI", "2.4G", "40M", "HT", "2T", "07", "18", "MKK",
- "2.4G", "40M", "HT", "2T", "07", "30", "FCC", "2.4G", "40M",
- "HT", "2T", "08", "26", "ETSI", "2.4G", "40M", "HT", "2T",
- "08", "18", "MKK", "2.4G", "40M", "HT", "2T", "08", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "09", "26", "ETSI", "2.4G",
- "40M", "HT", "2T", "09", "18", "MKK", "2.4G", "40M", "HT",
- "2T", "09", "30", "FCC", "2.4G", "40M", "HT", "2T", "10",
- "20", "ETSI", "2.4G", "40M", "HT", "2T", "10", "18", "MKK",
- "2.4G", "40M", "HT", "2T", "10", "30", "FCC", "2.4G", "40M",
- "HT", "2T", "11", "14", "ETSI", "2.4G", "40M", "HT", "2T",
- "11", "18", "MKK", "2.4G", "40M", "HT", "2T", "11", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "12", "63", "ETSI", "2.4G",
- "40M", "HT", "2T", "12", "63", "MKK", "2.4G", "40M", "HT",
- "2T", "12", "63", "FCC", "2.4G", "40M", "HT", "2T", "13",
- "63", "ETSI", "2.4G", "40M", "HT", "2T", "13", "63", "MKK",
- "2.4G", "40M", "HT", "2T", "13", "63", "FCC", "2.4G", "40M",
- "HT", "2T", "14", "63", "ETSI", "2.4G", "40M", "HT", "2T",
- "14", "63", "MKK", "2.4G", "40M", "HT", "2T", "14", "63",
- "FCC", "5G", "20M", "OFDM", "1T", "36", "30", "ETSI", "5G",
- "20M", "OFDM", "1T", "36", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "36", "30", "FCC", "5G", "20M", "OFDM", "1T", "40",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "40", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "40", "30", "FCC", "5G", "20M",
- "OFDM", "1T", "44", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "44", "32", "MKK", "5G", "20M", "OFDM", "1T", "44", "30",
- "FCC", "5G", "20M", "OFDM", "1T", "48", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "48", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "48", "30", "FCC", "5G", "20M", "OFDM", "1T", "52",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "52", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "52", "28", "FCC", "5G", "20M",
- "OFDM", "1T", "56", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "56", "32", "MKK", "5G", "20M", "OFDM", "1T", "56", "28",
- "FCC", "5G", "20M", "OFDM", "1T", "60", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "60", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "60", "28", "FCC", "5G", "20M", "OFDM", "1T", "64",
- "28", "ETSI", "5G", "20M", "OFDM", "1T", "64", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "64", "28", "FCC", "5G", "20M",
- "OFDM", "1T", "100", "26", "ETSI", "5G", "20M", "OFDM", "1T",
- "100", "32", "MKK", "5G", "20M", "OFDM", "1T", "100", "32",
- "FCC", "5G", "20M", "OFDM", "1T", "104", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "104", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "104", "32", "FCC", "5G", "20M", "OFDM", "1T", "108",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "108", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "108", "32", "FCC", "5G", "20M",
- "OFDM", "1T", "112", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "112", "32", "MKK", "5G", "20M", "OFDM", "1T", "112", "32",
- "FCC", "5G", "20M", "OFDM", "1T", "116", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "116", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "116", "32", "FCC", "5G", "20M", "OFDM", "1T", "120",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "120", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "120", "32", "FCC", "5G", "20M",
- "OFDM", "1T", "124", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "124", "32", "MKK", "5G", "20M", "OFDM", "1T", "124", "32",
- "FCC", "5G", "20M", "OFDM", "1T", "128", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "128", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "128", "32", "FCC", "5G", "20M", "OFDM", "1T", "132",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "132", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "132", "32", "FCC", "5G", "20M",
- "OFDM", "1T", "136", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "136", "32", "MKK", "5G", "20M", "OFDM", "1T", "136", "32",
- "FCC", "5G", "20M", "OFDM", "1T", "140", "28", "ETSI", "5G",
- "20M", "OFDM", "1T", "140", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "140", "32", "FCC", "5G", "20M", "OFDM", "1T", "144",
- "28", "ETSI", "5G", "20M", "OFDM", "1T", "144", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "144", "63", "FCC", "5G", "20M",
- "OFDM", "1T", "149", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "149", "63", "MKK", "5G", "20M", "OFDM", "1T", "149", "63",
- "FCC", "5G", "20M", "OFDM", "1T", "153", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "153", "63", "MKK", "5G", "20M", "OFDM",
- "1T", "153", "63", "FCC", "5G", "20M", "OFDM", "1T", "157",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "157", "63", "MKK",
- "5G", "20M", "OFDM", "1T", "157", "63", "FCC", "5G", "20M",
- "OFDM", "1T", "161", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "161", "63", "MKK", "5G", "20M", "OFDM", "1T", "161", "63",
- "FCC", "5G", "20M", "OFDM", "1T", "165", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "165", "63", "MKK", "5G", "20M", "OFDM",
- "1T", "165", "63", "FCC", "5G", "20M", "HT", "1T", "36",
- "30", "ETSI", "5G", "20M", "HT", "1T", "36", "32", "MKK",
- "5G", "20M", "HT", "1T", "36", "28", "FCC", "5G", "20M",
- "HT", "1T", "40", "32", "ETSI", "5G", "20M", "HT", "1T",
- "40", "32", "MKK", "5G", "20M", "HT", "1T", "40", "28",
- "FCC", "5G", "20M", "HT", "1T", "44", "32", "ETSI", "5G",
- "20M", "HT", "1T", "44", "32", "MKK", "5G", "20M", "HT",
- "1T", "44", "28", "FCC", "5G", "20M", "HT", "1T", "48",
- "32", "ETSI", "5G", "20M", "HT", "1T", "48", "32", "MKK",
- "5G", "20M", "HT", "1T", "48", "28", "FCC", "5G", "20M",
- "HT", "1T", "52", "32", "ETSI", "5G", "20M", "HT", "1T",
- "52", "32", "MKK", "5G", "20M", "HT", "1T", "52", "28",
- "FCC", "5G", "20M", "HT", "1T", "56", "32", "ETSI", "5G",
- "20M", "HT", "1T", "56", "32", "MKK", "5G", "20M", "HT",
- "1T", "56", "28", "FCC", "5G", "20M", "HT", "1T", "60",
- "32", "ETSI", "5G", "20M", "HT", "1T", "60", "32", "MKK",
- "5G", "20M", "HT", "1T", "60", "28", "FCC", "5G", "20M",
- "HT", "1T", "64", "28", "ETSI", "5G", "20M", "HT", "1T",
- "64", "32", "MKK", "5G", "20M", "HT", "1T", "64", "28",
- "FCC", "5G", "20M", "HT", "1T", "100", "26", "ETSI", "5G",
- "20M", "HT", "1T", "100", "32", "MKK", "5G", "20M", "HT",
- "1T", "100", "32", "FCC", "5G", "20M", "HT", "1T", "104",
- "32", "ETSI", "5G", "20M", "HT", "1T", "104", "32", "MKK",
- "5G", "20M", "HT", "1T", "104", "32", "FCC", "5G", "20M",
- "HT", "1T", "108", "32", "ETSI", "5G", "20M", "HT", "1T",
- "108", "32", "MKK", "5G", "20M", "HT", "1T", "108", "32",
- "FCC", "5G", "20M", "HT", "1T", "112", "32", "ETSI", "5G",
- "20M", "HT", "1T", "112", "32", "MKK", "5G", "20M", "HT",
- "1T", "112", "32", "FCC", "5G", "20M", "HT", "1T", "116",
- "32", "ETSI", "5G", "20M", "HT", "1T", "116", "32", "MKK",
- "5G", "20M", "HT", "1T", "116", "32", "FCC", "5G", "20M",
- "HT", "1T", "120", "32", "ETSI", "5G", "20M", "HT", "1T",
- "120", "32", "MKK", "5G", "20M", "HT", "1T", "120", "32",
- "FCC", "5G", "20M", "HT", "1T", "124", "32", "ETSI", "5G",
- "20M", "HT", "1T", "124", "32", "MKK", "5G", "20M", "HT",
- "1T", "124", "32", "FCC", "5G", "20M", "HT", "1T", "128",
- "32", "ETSI", "5G", "20M", "HT", "1T", "128", "32", "MKK",
- "5G", "20M", "HT", "1T", "128", "32", "FCC", "5G", "20M",
- "HT", "1T", "132", "32", "ETSI", "5G", "20M", "HT", "1T",
- "132", "32", "MKK", "5G", "20M", "HT", "1T", "132", "32",
- "FCC", "5G", "20M", "HT", "1T", "136", "32", "ETSI", "5G",
- "20M", "HT", "1T", "136", "32", "MKK", "5G", "20M", "HT",
- "1T", "136", "32", "FCC", "5G", "20M", "HT", "1T", "140",
- "26", "ETSI", "5G", "20M", "HT", "1T", "140", "32", "MKK",
- "5G", "20M", "HT", "1T", "140", "32", "FCC", "5G", "20M",
- "HT", "1T", "144", "26", "ETSI", "5G", "20M", "HT", "1T",
- "144", "63", "MKK", "5G", "20M", "HT", "1T", "144", "63",
- "FCC", "5G", "20M", "HT", "1T", "149", "32", "ETSI", "5G",
- "20M", "HT", "1T", "149", "63", "MKK", "5G", "20M", "HT",
- "1T", "149", "63", "FCC", "5G", "20M", "HT", "1T", "153",
- "32", "ETSI", "5G", "20M", "HT", "1T", "153", "63", "MKK",
- "5G", "20M", "HT", "1T", "153", "63", "FCC", "5G", "20M",
- "HT", "1T", "157", "32", "ETSI", "5G", "20M", "HT", "1T",
- "157", "63", "MKK", "5G", "20M", "HT", "1T", "157", "63",
- "FCC", "5G", "20M", "HT", "1T", "161", "32", "ETSI", "5G",
- "20M", "HT", "1T", "161", "63", "MKK", "5G", "20M", "HT",
- "1T", "161", "63", "FCC", "5G", "20M", "HT", "1T", "165",
- "32", "ETSI", "5G", "20M", "HT", "1T", "165", "63", "MKK",
- "5G", "20M", "HT", "1T", "165", "63", "FCC", "5G", "20M",
- "HT", "2T", "36", "28", "ETSI", "5G", "20M", "HT", "2T",
- "36", "20", "MKK", "5G", "20M", "HT", "2T", "36", "22",
- "FCC", "5G", "20M", "HT", "2T", "40", "30", "ETSI", "5G",
- "20M", "HT", "2T", "40", "20", "MKK", "5G", "20M", "HT",
- "2T", "40", "22", "FCC", "5G", "20M", "HT", "2T", "44",
- "30", "ETSI", "5G", "20M", "HT", "2T", "44", "20", "MKK",
- "5G", "20M", "HT", "2T", "44", "22", "FCC", "5G", "20M",
- "HT", "2T", "48", "30", "ETSI", "5G", "20M", "HT", "2T",
- "48", "20", "MKK", "5G", "20M", "HT", "2T", "48", "22",
- "FCC", "5G", "20M", "HT", "2T", "52", "30", "ETSI", "5G",
- "20M", "HT", "2T", "52", "20", "MKK", "5G", "20M", "HT",
- "2T", "52", "22", "FCC", "5G", "20M", "HT", "2T", "56",
- "30", "ETSI", "5G", "20M", "HT", "2T", "56", "20", "MKK",
- "5G", "20M", "HT", "2T", "56", "22", "FCC", "5G", "20M",
- "HT", "2T", "60", "30", "ETSI", "5G", "20M", "HT", "2T",
- "60", "20", "MKK", "5G", "20M", "HT", "2T", "60", "22",
- "FCC", "5G", "20M", "HT", "2T", "64", "28", "ETSI", "5G",
- "20M", "HT", "2T", "64", "20", "MKK", "5G", "20M", "HT",
- "2T", "64", "22", "FCC", "5G", "20M", "HT", "2T", "100",
- "26", "ETSI", "5G", "20M", "HT", "2T", "100", "20", "MKK",
- "5G", "20M", "HT", "2T", "100", "30", "FCC", "5G", "20M",
- "HT", "2T", "104", "30", "ETSI", "5G", "20M", "HT", "2T",
- "104", "20", "MKK", "5G", "20M", "HT", "2T", "104", "30",
- "FCC", "5G", "20M", "HT", "2T", "108", "32", "ETSI", "5G",
- "20M", "HT", "2T", "108", "20", "MKK", "5G", "20M", "HT",
- "2T", "108", "30", "FCC", "5G", "20M", "HT", "2T", "112",
- "32", "ETSI", "5G", "20M", "HT", "2T", "112", "20", "MKK",
- "5G", "20M", "HT", "2T", "112", "30", "FCC", "5G", "20M",
- "HT", "2T", "116", "32", "ETSI", "5G", "20M", "HT", "2T",
- "116", "20", "MKK", "5G", "20M", "HT", "2T", "116", "30",
- "FCC", "5G", "20M", "HT", "2T", "120", "32", "ETSI", "5G",
- "20M", "HT", "2T", "120", "20", "MKK", "5G", "20M", "HT",
- "2T", "120", "30", "FCC", "5G", "20M", "HT", "2T", "124",
- "32", "ETSI", "5G", "20M", "HT", "2T", "124", "20", "MKK",
- "5G", "20M", "HT", "2T", "124", "30", "FCC", "5G", "20M",
- "HT", "2T", "128", "32", "ETSI", "5G", "20M", "HT", "2T",
- "128", "20", "MKK", "5G", "20M", "HT", "2T", "128", "30",
- "FCC", "5G", "20M", "HT", "2T", "132", "32", "ETSI", "5G",
- "20M", "HT", "2T", "132", "20", "MKK", "5G", "20M", "HT",
- "2T", "132", "30", "FCC", "5G", "20M", "HT", "2T", "136",
- "30", "ETSI", "5G", "20M", "HT", "2T", "136", "20", "MKK",
- "5G", "20M", "HT", "2T", "136", "30", "FCC", "5G", "20M",
- "HT", "2T", "140", "26", "ETSI", "5G", "20M", "HT", "2T",
- "140", "20", "MKK", "5G", "20M", "HT", "2T", "140", "30",
- "FCC", "5G", "20M", "HT", "2T", "144", "26", "ETSI", "5G",
- "20M", "HT", "2T", "144", "63", "MKK", "5G", "20M", "HT",
- "2T", "144", "63", "FCC", "5G", "20M", "HT", "2T", "149",
- "32", "ETSI", "5G", "20M", "HT", "2T", "149", "63", "MKK",
- "5G", "20M", "HT", "2T", "149", "63", "FCC", "5G", "20M",
- "HT", "2T", "153", "32", "ETSI", "5G", "20M", "HT", "2T",
- "153", "63", "MKK", "5G", "20M", "HT", "2T", "153", "63",
- "FCC", "5G", "20M", "HT", "2T", "157", "32", "ETSI", "5G",
- "20M", "HT", "2T", "157", "63", "MKK", "5G", "20M", "HT",
- "2T", "157", "63", "FCC", "5G", "20M", "HT", "2T", "161",
- "32", "ETSI", "5G", "20M", "HT", "2T", "161", "63", "MKK",
- "5G", "20M", "HT", "2T", "161", "63", "FCC", "5G", "20M",
- "HT", "2T", "165", "32", "ETSI", "5G", "20M", "HT", "2T",
- "165", "63", "MKK", "5G", "20M", "HT", "2T", "165", "63",
- "FCC", "5G", "40M", "HT", "1T", "38", "22", "ETSI", "5G",
- "40M", "HT", "1T", "38", "30", "MKK", "5G", "40M", "HT",
- "1T", "38", "30", "FCC", "5G", "40M", "HT", "1T", "46",
- "30", "ETSI", "5G", "40M", "HT", "1T", "46", "30", "MKK",
- "5G", "40M", "HT", "1T", "46", "30", "FCC", "5G", "40M",
- "HT", "1T", "54", "30", "ETSI", "5G", "40M", "HT", "1T",
- "54", "30", "MKK", "5G", "40M", "HT", "1T", "54", "30",
- "FCC", "5G", "40M", "HT", "1T", "62", "24", "ETSI", "5G",
- "40M", "HT", "1T", "62", "30", "MKK", "5G", "40M", "HT",
- "1T", "62", "30", "FCC", "5G", "40M", "HT", "1T", "102",
- "24", "ETSI", "5G", "40M", "HT", "1T", "102", "30", "MKK",
- "5G", "40M", "HT", "1T", "102", "30", "FCC", "5G", "40M",
- "HT", "1T", "110", "30", "ETSI", "5G", "40M", "HT", "1T",
- "110", "30", "MKK", "5G", "40M", "HT", "1T", "110", "30",
- "FCC", "5G", "40M", "HT", "1T", "118", "30", "ETSI", "5G",
- "40M", "HT", "1T", "118", "30", "MKK", "5G", "40M", "HT",
- "1T", "118", "30", "FCC", "5G", "40M", "HT", "1T", "126",
- "30", "ETSI", "5G", "40M", "HT", "1T", "126", "30", "MKK",
- "5G", "40M", "HT", "1T", "126", "30", "FCC", "5G", "40M",
- "HT", "1T", "134", "30", "ETSI", "5G", "40M", "HT", "1T",
- "134", "30", "MKK", "5G", "40M", "HT", "1T", "134", "30",
- "FCC", "5G", "40M", "HT", "1T", "142", "30", "ETSI", "5G",
- "40M", "HT", "1T", "142", "63", "MKK", "5G", "40M", "HT",
- "1T", "142", "63", "FCC", "5G", "40M", "HT", "1T", "151",
- "30", "ETSI", "5G", "40M", "HT", "1T", "151", "63", "MKK",
- "5G", "40M", "HT", "1T", "151", "63", "FCC", "5G", "40M",
- "HT", "1T", "159", "30", "ETSI", "5G", "40M", "HT", "1T",
- "159", "63", "MKK", "5G", "40M", "HT", "1T", "159", "63",
- "FCC", "5G", "40M", "HT", "2T", "38", "20", "ETSI", "5G",
- "40M", "HT", "2T", "38", "20", "MKK", "5G", "40M", "HT",
- "2T", "38", "22", "FCC", "5G", "40M", "HT", "2T", "46",
- "30", "ETSI", "5G", "40M", "HT", "2T", "46", "20", "MKK",
- "5G", "40M", "HT", "2T", "46", "22", "FCC", "5G", "40M",
- "HT", "2T", "54", "30", "ETSI", "5G", "40M", "HT", "2T",
- "54", "20", "MKK", "5G", "40M", "HT", "2T", "54", "22",
- "FCC", "5G", "40M", "HT", "2T", "62", "22", "ETSI", "5G",
- "40M", "HT", "2T", "62", "20", "MKK", "5G", "40M", "HT",
- "2T", "62", "22", "FCC", "5G", "40M", "HT", "2T", "102",
- "22", "ETSI", "5G", "40M", "HT", "2T", "102", "20", "MKK",
- "5G", "40M", "HT", "2T", "102", "30", "FCC", "5G", "40M",
- "HT", "2T", "110", "30", "ETSI", "5G", "40M", "HT", "2T",
- "110", "20", "MKK", "5G", "40M", "HT", "2T", "110", "30",
- "FCC", "5G", "40M", "HT", "2T", "118", "30", "ETSI", "5G",
- "40M", "HT", "2T", "118", "20", "MKK", "5G", "40M", "HT",
- "2T", "118", "30", "FCC", "5G", "40M", "HT", "2T", "126",
- "30", "ETSI", "5G", "40M", "HT", "2T", "126", "20", "MKK",
- "5G", "40M", "HT", "2T", "126", "30", "FCC", "5G", "40M",
- "HT", "2T", "134", "30", "ETSI", "5G", "40M", "HT", "2T",
- "134", "20", "MKK", "5G", "40M", "HT", "2T", "134", "30",
- "FCC", "5G", "40M", "HT", "2T", "142", "30", "ETSI", "5G",
- "40M", "HT", "2T", "142", "63", "MKK", "5G", "40M", "HT",
- "2T", "142", "63", "FCC", "5G", "40M", "HT", "2T", "151",
- "30", "ETSI", "5G", "40M", "HT", "2T", "151", "63", "MKK",
- "5G", "40M", "HT", "2T", "151", "63", "FCC", "5G", "40M",
- "HT", "2T", "159", "30", "ETSI", "5G", "40M", "HT", "2T",
- "159", "63", "MKK", "5G", "40M", "HT", "2T", "159", "63",
- "FCC", "5G", "80M", "VHT", "1T", "42", "20", "ETSI", "5G",
- "80M", "VHT", "1T", "42", "30", "MKK", "5G", "80M", "VHT",
- "1T", "42", "28", "FCC", "5G", "80M", "VHT", "1T", "58",
- "20", "ETSI", "5G", "80M", "VHT", "1T", "58", "30", "MKK",
- "5G", "80M", "VHT", "1T", "58", "28", "FCC", "5G", "80M",
- "VHT", "1T", "106", "20", "ETSI", "5G", "80M", "VHT", "1T",
- "106", "30", "MKK", "5G", "80M", "VHT", "1T", "106", "30",
- "FCC", "5G", "80M", "VHT", "1T", "122", "30", "ETSI", "5G",
- "80M", "VHT", "1T", "122", "30", "MKK", "5G", "80M", "VHT",
- "1T", "122", "30", "FCC", "5G", "80M", "VHT", "1T", "138",
- "30", "ETSI", "5G", "80M", "VHT", "1T", "138", "63", "MKK",
- "5G", "80M", "VHT", "1T", "138", "63", "FCC", "5G", "80M",
- "VHT", "1T", "155", "30", "ETSI", "5G", "80M", "VHT", "1T",
- "155", "63", "MKK", "5G", "80M", "VHT", "1T", "155", "63",
- "FCC", "5G", "80M", "VHT", "2T", "42", "18", "ETSI", "5G",
- "80M", "VHT", "2T", "42", "20", "MKK", "5G", "80M", "VHT",
- "2T", "42", "22", "FCC", "5G", "80M", "VHT", "2T", "58",
- "18", "ETSI", "5G", "80M", "VHT", "2T", "58", "20", "MKK",
- "5G", "80M", "VHT", "2T", "58", "22", "FCC", "5G", "80M",
- "VHT", "2T", "106", "20", "ETSI", "5G", "80M", "VHT", "2T",
- "106", "20", "MKK", "5G", "80M", "VHT", "2T", "106", "30",
- "FCC", "5G", "80M", "VHT", "2T", "122", "30", "ETSI", "5G",
- "80M", "VHT", "2T", "122", "20", "MKK", "5G", "80M", "VHT",
- "2T", "122", "30", "FCC", "5G", "80M", "VHT", "2T", "138",
- "30", "ETSI", "5G", "80M", "VHT", "2T", "138", "63", "MKK",
- "5G", "80M", "VHT", "2T", "138", "63", "FCC", "5G", "80M",
- "VHT", "2T", "155", "30", "ETSI", "5G", "80M", "VHT", "2T",
- "155", "63", "MKK", "5G", "80M", "VHT", "2T", "155", "63"};
-
-void odm_read_and_config_mp_8822b_txpwr_lmt(struct phy_dm_struct *dm)
-{
- u32 i = 0;
- u8 **array = (u8 **)array_mp_8822b_txpwr_lmt;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===> %s\n", __func__);
-
- for (i = 0; i < ARRAY_SIZE(array_mp_8822b_txpwr_lmt); i += 7) {
- u8 *regulation = array[i];
- u8 *band = array[i + 1];
- u8 *bandwidth = array[i + 2];
- u8 *rate = array[i + 3];
- u8 *rf_path = array[i + 4];
- u8 *chnl = array[i + 5];
- u8 *val = array[i + 6];
-
- odm_config_bb_txpwr_lmt_8822b(dm, regulation, band, bandwidth,
- rate, rf_path, chnl, val);
- }
-}
-
-/******************************************************************************
-* txpwr_lmt_type5.TXT
-******************************************************************************/
-
-static const char *const array_mp_8822b_txpwr_lmt_type5[] = {
- "FCC", "2.4G", "20M", "CCK", "1T", "01", "32", "ETSI", "2.4G",
- "20M", "CCK", "1T", "01", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "01", "30", "FCC", "2.4G", "20M", "CCK", "1T", "02",
- "32", "ETSI", "2.4G", "20M", "CCK", "1T", "02", "28", "MKK",
- "2.4G", "20M", "CCK", "1T", "02", "30", "FCC", "2.4G", "20M",
- "CCK", "1T", "03", "32", "ETSI", "2.4G", "20M", "CCK", "1T",
- "03", "28", "MKK", "2.4G", "20M", "CCK", "1T", "03", "30",
- "FCC", "2.4G", "20M", "CCK", "1T", "04", "32", "ETSI", "2.4G",
- "20M", "CCK", "1T", "04", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "04", "30", "FCC", "2.4G", "20M", "CCK", "1T", "05",
- "32", "ETSI", "2.4G", "20M", "CCK", "1T", "05", "28", "MKK",
- "2.4G", "20M", "CCK", "1T", "05", "30", "FCC", "2.4G", "20M",
- "CCK", "1T", "06", "32", "ETSI", "2.4G", "20M", "CCK", "1T",
- "06", "28", "MKK", "2.4G", "20M", "CCK", "1T", "06", "30",
- "FCC", "2.4G", "20M", "CCK", "1T", "07", "32", "ETSI", "2.4G",
- "20M", "CCK", "1T", "07", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "07", "30", "FCC", "2.4G", "20M", "CCK", "1T", "08",
- "32", "ETSI", "2.4G", "20M", "CCK", "1T", "08", "28", "MKK",
- "2.4G", "20M", "CCK", "1T", "08", "30", "FCC", "2.4G", "20M",
- "CCK", "1T", "09", "32", "ETSI", "2.4G", "20M", "CCK", "1T",
- "09", "28", "MKK", "2.4G", "20M", "CCK", "1T", "09", "30",
- "FCC", "2.4G", "20M", "CCK", "1T", "10", "32", "ETSI", "2.4G",
- "20M", "CCK", "1T", "10", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "10", "30", "FCC", "2.4G", "20M", "CCK", "1T", "11",
- "32", "ETSI", "2.4G", "20M", "CCK", "1T", "11", "28", "MKK",
- "2.4G", "20M", "CCK", "1T", "11", "30", "FCC", "2.4G", "20M",
- "CCK", "1T", "12", "26", "ETSI", "2.4G", "20M", "CCK", "1T",
- "12", "28", "MKK", "2.4G", "20M", "CCK", "1T", "12", "30",
- "FCC", "2.4G", "20M", "CCK", "1T", "13", "20", "ETSI", "2.4G",
- "20M", "CCK", "1T", "13", "28", "MKK", "2.4G", "20M", "CCK",
- "1T", "13", "28", "FCC", "2.4G", "20M", "CCK", "1T", "14",
- "63", "ETSI", "2.4G", "20M", "CCK", "1T", "14", "63", "MKK",
- "2.4G", "20M", "CCK", "1T", "14", "32", "FCC", "2.4G", "20M",
- "OFDM", "1T", "01", "26", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "01", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "01", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "02", "30", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "02", "30", "MKK", "2.4G", "20M", "OFDM",
- "1T", "02", "34", "FCC", "2.4G", "20M", "OFDM", "1T", "03",
- "32", "ETSI", "2.4G", "20M", "OFDM", "1T", "03", "30", "MKK",
- "2.4G", "20M", "OFDM", "1T", "03", "34", "FCC", "2.4G", "20M",
- "OFDM", "1T", "04", "34", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "04", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "04", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "05", "34", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "05", "30", "MKK", "2.4G", "20M", "OFDM",
- "1T", "05", "34", "FCC", "2.4G", "20M", "OFDM", "1T", "06",
- "34", "ETSI", "2.4G", "20M", "OFDM", "1T", "06", "30", "MKK",
- "2.4G", "20M", "OFDM", "1T", "06", "34", "FCC", "2.4G", "20M",
- "OFDM", "1T", "07", "34", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "07", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "07", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "08", "34", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "08", "30", "MKK", "2.4G", "20M", "OFDM",
- "1T", "08", "34", "FCC", "2.4G", "20M", "OFDM", "1T", "09",
- "32", "ETSI", "2.4G", "20M", "OFDM", "1T", "09", "30", "MKK",
- "2.4G", "20M", "OFDM", "1T", "09", "34", "FCC", "2.4G", "20M",
- "OFDM", "1T", "10", "30", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "10", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "10", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "11", "28", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "11", "30", "MKK", "2.4G", "20M", "OFDM",
- "1T", "11", "34", "FCC", "2.4G", "20M", "OFDM", "1T", "12",
- "22", "ETSI", "2.4G", "20M", "OFDM", "1T", "12", "30", "MKK",
- "2.4G", "20M", "OFDM", "1T", "12", "34", "FCC", "2.4G", "20M",
- "OFDM", "1T", "13", "14", "ETSI", "2.4G", "20M", "OFDM", "1T",
- "13", "30", "MKK", "2.4G", "20M", "OFDM", "1T", "13", "34",
- "FCC", "2.4G", "20M", "OFDM", "1T", "14", "63", "ETSI", "2.4G",
- "20M", "OFDM", "1T", "14", "63", "MKK", "2.4G", "20M", "OFDM",
- "1T", "14", "63", "FCC", "2.4G", "20M", "HT", "1T", "01",
- "26", "ETSI", "2.4G", "20M", "HT", "1T", "01", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "01", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "02", "30", "ETSI", "2.4G", "20M", "HT", "1T",
- "02", "30", "MKK", "2.4G", "20M", "HT", "1T", "02", "34",
- "FCC", "2.4G", "20M", "HT", "1T", "03", "32", "ETSI", "2.4G",
- "20M", "HT", "1T", "03", "30", "MKK", "2.4G", "20M", "HT",
- "1T", "03", "34", "FCC", "2.4G", "20M", "HT", "1T", "04",
- "34", "ETSI", "2.4G", "20M", "HT", "1T", "04", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "04", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "05", "34", "ETSI", "2.4G", "20M", "HT", "1T",
- "05", "30", "MKK", "2.4G", "20M", "HT", "1T", "05", "34",
- "FCC", "2.4G", "20M", "HT", "1T", "06", "34", "ETSI", "2.4G",
- "20M", "HT", "1T", "06", "30", "MKK", "2.4G", "20M", "HT",
- "1T", "06", "34", "FCC", "2.4G", "20M", "HT", "1T", "07",
- "34", "ETSI", "2.4G", "20M", "HT", "1T", "07", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "07", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "08", "34", "ETSI", "2.4G", "20M", "HT", "1T",
- "08", "30", "MKK", "2.4G", "20M", "HT", "1T", "08", "34",
- "FCC", "2.4G", "20M", "HT", "1T", "09", "32", "ETSI", "2.4G",
- "20M", "HT", "1T", "09", "30", "MKK", "2.4G", "20M", "HT",
- "1T", "09", "34", "FCC", "2.4G", "20M", "HT", "1T", "10",
- "30", "ETSI", "2.4G", "20M", "HT", "1T", "10", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "10", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "11", "26", "ETSI", "2.4G", "20M", "HT", "1T",
- "11", "30", "MKK", "2.4G", "20M", "HT", "1T", "11", "34",
- "FCC", "2.4G", "20M", "HT", "1T", "12", "20", "ETSI", "2.4G",
- "20M", "HT", "1T", "12", "30", "MKK", "2.4G", "20M", "HT",
- "1T", "12", "34", "FCC", "2.4G", "20M", "HT", "1T", "13",
- "14", "ETSI", "2.4G", "20M", "HT", "1T", "13", "30", "MKK",
- "2.4G", "20M", "HT", "1T", "13", "34", "FCC", "2.4G", "20M",
- "HT", "1T", "14", "63", "ETSI", "2.4G", "20M", "HT", "1T",
- "14", "63", "MKK", "2.4G", "20M", "HT", "1T", "14", "63",
- "FCC", "2.4G", "20M", "HT", "2T", "01", "26", "ETSI", "2.4G",
- "20M", "HT", "2T", "01", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "01", "30", "FCC", "2.4G", "20M", "HT", "2T", "02",
- "28", "ETSI", "2.4G", "20M", "HT", "2T", "02", "18", "MKK",
- "2.4G", "20M", "HT", "2T", "02", "30", "FCC", "2.4G", "20M",
- "HT", "2T", "03", "30", "ETSI", "2.4G", "20M", "HT", "2T",
- "03", "18", "MKK", "2.4G", "20M", "HT", "2T", "03", "30",
- "FCC", "2.4G", "20M", "HT", "2T", "04", "30", "ETSI", "2.4G",
- "20M", "HT", "2T", "04", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "04", "30", "FCC", "2.4G", "20M", "HT", "2T", "05",
- "32", "ETSI", "2.4G", "20M", "HT", "2T", "05", "18", "MKK",
- "2.4G", "20M", "HT", "2T", "05", "30", "FCC", "2.4G", "20M",
- "HT", "2T", "06", "32", "ETSI", "2.4G", "20M", "HT", "2T",
- "06", "18", "MKK", "2.4G", "20M", "HT", "2T", "06", "30",
- "FCC", "2.4G", "20M", "HT", "2T", "07", "32", "ETSI", "2.4G",
- "20M", "HT", "2T", "07", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "07", "30", "FCC", "2.4G", "20M", "HT", "2T", "08",
- "30", "ETSI", "2.4G", "20M", "HT", "2T", "08", "18", "MKK",
- "2.4G", "20M", "HT", "2T", "08", "30", "FCC", "2.4G", "20M",
- "HT", "2T", "09", "30", "ETSI", "2.4G", "20M", "HT", "2T",
- "09", "18", "MKK", "2.4G", "20M", "HT", "2T", "09", "30",
- "FCC", "2.4G", "20M", "HT", "2T", "10", "28", "ETSI", "2.4G",
- "20M", "HT", "2T", "10", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "10", "30", "FCC", "2.4G", "20M", "HT", "2T", "11",
- "26", "ETSI", "2.4G", "20M", "HT", "2T", "11", "18", "MKK",
- "2.4G", "20M", "HT", "2T", "11", "30", "FCC", "2.4G", "20M",
- "HT", "2T", "12", "20", "ETSI", "2.4G", "20M", "HT", "2T",
- "12", "18", "MKK", "2.4G", "20M", "HT", "2T", "12", "30",
- "FCC", "2.4G", "20M", "HT", "2T", "13", "14", "ETSI", "2.4G",
- "20M", "HT", "2T", "13", "18", "MKK", "2.4G", "20M", "HT",
- "2T", "13", "30", "FCC", "2.4G", "20M", "HT", "2T", "14",
- "63", "ETSI", "2.4G", "20M", "HT", "2T", "14", "63", "MKK",
- "2.4G", "20M", "HT", "2T", "14", "63", "FCC", "2.4G", "40M",
- "HT", "1T", "01", "63", "ETSI", "2.4G", "40M", "HT", "1T",
- "01", "63", "MKK", "2.4G", "40M", "HT", "1T", "01", "63",
- "FCC", "2.4G", "40M", "HT", "1T", "02", "63", "ETSI", "2.4G",
- "40M", "HT", "1T", "02", "63", "MKK", "2.4G", "40M", "HT",
- "1T", "02", "63", "FCC", "2.4G", "40M", "HT", "1T", "03",
- "26", "ETSI", "2.4G", "40M", "HT", "1T", "03", "30", "MKK",
- "2.4G", "40M", "HT", "1T", "03", "34", "FCC", "2.4G", "40M",
- "HT", "1T", "04", "26", "ETSI", "2.4G", "40M", "HT", "1T",
- "04", "30", "MKK", "2.4G", "40M", "HT", "1T", "04", "34",
- "FCC", "2.4G", "40M", "HT", "1T", "05", "30", "ETSI", "2.4G",
- "40M", "HT", "1T", "05", "30", "MKK", "2.4G", "40M", "HT",
- "1T", "05", "34", "FCC", "2.4G", "40M", "HT", "1T", "06",
- "32", "ETSI", "2.4G", "40M", "HT", "1T", "06", "30", "MKK",
- "2.4G", "40M", "HT", "1T", "06", "34", "FCC", "2.4G", "40M",
- "HT", "1T", "07", "30", "ETSI", "2.4G", "40M", "HT", "1T",
- "07", "30", "MKK", "2.4G", "40M", "HT", "1T", "07", "34",
- "FCC", "2.4G", "40M", "HT", "1T", "08", "26", "ETSI", "2.4G",
- "40M", "HT", "1T", "08", "30", "MKK", "2.4G", "40M", "HT",
- "1T", "08", "34", "FCC", "2.4G", "40M", "HT", "1T", "09",
- "26", "ETSI", "2.4G", "40M", "HT", "1T", "09", "30", "MKK",
- "2.4G", "40M", "HT", "1T", "09", "34", "FCC", "2.4G", "40M",
- "HT", "1T", "10", "20", "ETSI", "2.4G", "40M", "HT", "1T",
- "10", "30", "MKK", "2.4G", "40M", "HT", "1T", "10", "34",
- "FCC", "2.4G", "40M", "HT", "1T", "11", "14", "ETSI", "2.4G",
- "40M", "HT", "1T", "11", "30", "MKK", "2.4G", "40M", "HT",
- "1T", "11", "34", "FCC", "2.4G", "40M", "HT", "1T", "12",
- "63", "ETSI", "2.4G", "40M", "HT", "1T", "12", "63", "MKK",
- "2.4G", "40M", "HT", "1T", "12", "63", "FCC", "2.4G", "40M",
- "HT", "1T", "13", "63", "ETSI", "2.4G", "40M", "HT", "1T",
- "13", "63", "MKK", "2.4G", "40M", "HT", "1T", "13", "63",
- "FCC", "2.4G", "40M", "HT", "1T", "14", "63", "ETSI", "2.4G",
- "40M", "HT", "1T", "14", "63", "MKK", "2.4G", "40M", "HT",
- "1T", "14", "63", "FCC", "2.4G", "40M", "HT", "2T", "01",
- "63", "ETSI", "2.4G", "40M", "HT", "2T", "01", "63", "MKK",
- "2.4G", "40M", "HT", "2T", "01", "63", "FCC", "2.4G", "40M",
- "HT", "2T", "02", "63", "ETSI", "2.4G", "40M", "HT", "2T",
- "02", "63", "MKK", "2.4G", "40M", "HT", "2T", "02", "63",
- "FCC", "2.4G", "40M", "HT", "2T", "03", "24", "ETSI", "2.4G",
- "40M", "HT", "2T", "03", "18", "MKK", "2.4G", "40M", "HT",
- "2T", "03", "30", "FCC", "2.4G", "40M", "HT", "2T", "04",
- "24", "ETSI", "2.4G", "40M", "HT", "2T", "04", "18", "MKK",
- "2.4G", "40M", "HT", "2T", "04", "30", "FCC", "2.4G", "40M",
- "HT", "2T", "05", "26", "ETSI", "2.4G", "40M", "HT", "2T",
- "05", "18", "MKK", "2.4G", "40M", "HT", "2T", "05", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "06", "28", "ETSI", "2.4G",
- "40M", "HT", "2T", "06", "18", "MKK", "2.4G", "40M", "HT",
- "2T", "06", "30", "FCC", "2.4G", "40M", "HT", "2T", "07",
- "26", "ETSI", "2.4G", "40M", "HT", "2T", "07", "18", "MKK",
- "2.4G", "40M", "HT", "2T", "07", "30", "FCC", "2.4G", "40M",
- "HT", "2T", "08", "26", "ETSI", "2.4G", "40M", "HT", "2T",
- "08", "18", "MKK", "2.4G", "40M", "HT", "2T", "08", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "09", "26", "ETSI", "2.4G",
- "40M", "HT", "2T", "09", "18", "MKK", "2.4G", "40M", "HT",
- "2T", "09", "30", "FCC", "2.4G", "40M", "HT", "2T", "10",
- "20", "ETSI", "2.4G", "40M", "HT", "2T", "10", "18", "MKK",
- "2.4G", "40M", "HT", "2T", "10", "30", "FCC", "2.4G", "40M",
- "HT", "2T", "11", "14", "ETSI", "2.4G", "40M", "HT", "2T",
- "11", "18", "MKK", "2.4G", "40M", "HT", "2T", "11", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "12", "63", "ETSI", "2.4G",
- "40M", "HT", "2T", "12", "63", "MKK", "2.4G", "40M", "HT",
- "2T", "12", "63", "FCC", "2.4G", "40M", "HT", "2T", "13",
- "63", "ETSI", "2.4G", "40M", "HT", "2T", "13", "63", "MKK",
- "2.4G", "40M", "HT", "2T", "13", "63", "FCC", "2.4G", "40M",
- "HT", "2T", "14", "63", "ETSI", "2.4G", "40M", "HT", "2T",
- "14", "63", "MKK", "2.4G", "40M", "HT", "2T", "14", "63",
- "FCC", "5G", "20M", "OFDM", "1T", "36", "30", "ETSI", "5G",
- "20M", "OFDM", "1T", "36", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "36", "30", "FCC", "5G", "20M", "OFDM", "1T", "40",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "40", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "40", "30", "FCC", "5G", "20M",
- "OFDM", "1T", "44", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "44", "32", "MKK", "5G", "20M", "OFDM", "1T", "44", "30",
- "FCC", "5G", "20M", "OFDM", "1T", "48", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "48", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "48", "30", "FCC", "5G", "20M", "OFDM", "1T", "52",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "52", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "52", "28", "FCC", "5G", "20M",
- "OFDM", "1T", "56", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "56", "32", "MKK", "5G", "20M", "OFDM", "1T", "56", "28",
- "FCC", "5G", "20M", "OFDM", "1T", "60", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "60", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "60", "28", "FCC", "5G", "20M", "OFDM", "1T", "64",
- "28", "ETSI", "5G", "20M", "OFDM", "1T", "64", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "64", "28", "FCC", "5G", "20M",
- "OFDM", "1T", "100", "26", "ETSI", "5G", "20M", "OFDM", "1T",
- "100", "32", "MKK", "5G", "20M", "OFDM", "1T", "100", "32",
- "FCC", "5G", "20M", "OFDM", "1T", "104", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "104", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "104", "32", "FCC", "5G", "20M", "OFDM", "1T", "108",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "108", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "108", "32", "FCC", "5G", "20M",
- "OFDM", "1T", "112", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "112", "32", "MKK", "5G", "20M", "OFDM", "1T", "112", "32",
- "FCC", "5G", "20M", "OFDM", "1T", "116", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "116", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "116", "32", "FCC", "5G", "20M", "OFDM", "1T", "120",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "120", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "120", "32", "FCC", "5G", "20M",
- "OFDM", "1T", "124", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "124", "32", "MKK", "5G", "20M", "OFDM", "1T", "124", "32",
- "FCC", "5G", "20M", "OFDM", "1T", "128", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "128", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "128", "32", "FCC", "5G", "20M", "OFDM", "1T", "132",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "132", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "132", "32", "FCC", "5G", "20M",
- "OFDM", "1T", "136", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "136", "32", "MKK", "5G", "20M", "OFDM", "1T", "136", "32",
- "FCC", "5G", "20M", "OFDM", "1T", "140", "28", "ETSI", "5G",
- "20M", "OFDM", "1T", "140", "32", "MKK", "5G", "20M", "OFDM",
- "1T", "140", "32", "FCC", "5G", "20M", "OFDM", "1T", "144",
- "28", "ETSI", "5G", "20M", "OFDM", "1T", "144", "32", "MKK",
- "5G", "20M", "OFDM", "1T", "144", "63", "FCC", "5G", "20M",
- "OFDM", "1T", "149", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "149", "63", "MKK", "5G", "20M", "OFDM", "1T", "149", "63",
- "FCC", "5G", "20M", "OFDM", "1T", "153", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "153", "63", "MKK", "5G", "20M", "OFDM",
- "1T", "153", "63", "FCC", "5G", "20M", "OFDM", "1T", "157",
- "32", "ETSI", "5G", "20M", "OFDM", "1T", "157", "63", "MKK",
- "5G", "20M", "OFDM", "1T", "157", "63", "FCC", "5G", "20M",
- "OFDM", "1T", "161", "32", "ETSI", "5G", "20M", "OFDM", "1T",
- "161", "63", "MKK", "5G", "20M", "OFDM", "1T", "161", "63",
- "FCC", "5G", "20M", "OFDM", "1T", "165", "32", "ETSI", "5G",
- "20M", "OFDM", "1T", "165", "63", "MKK", "5G", "20M", "OFDM",
- "1T", "165", "63", "FCC", "5G", "20M", "HT", "1T", "36",
- "30", "ETSI", "5G", "20M", "HT", "1T", "36", "32", "MKK",
- "5G", "20M", "HT", "1T", "36", "28", "FCC", "5G", "20M",
- "HT", "1T", "40", "32", "ETSI", "5G", "20M", "HT", "1T",
- "40", "32", "MKK", "5G", "20M", "HT", "1T", "40", "28",
- "FCC", "5G", "20M", "HT", "1T", "44", "32", "ETSI", "5G",
- "20M", "HT", "1T", "44", "32", "MKK", "5G", "20M", "HT",
- "1T", "44", "28", "FCC", "5G", "20M", "HT", "1T", "48",
- "32", "ETSI", "5G", "20M", "HT", "1T", "48", "32", "MKK",
- "5G", "20M", "HT", "1T", "48", "28", "FCC", "5G", "20M",
- "HT", "1T", "52", "32", "ETSI", "5G", "20M", "HT", "1T",
- "52", "32", "MKK", "5G", "20M", "HT", "1T", "52", "28",
- "FCC", "5G", "20M", "HT", "1T", "56", "32", "ETSI", "5G",
- "20M", "HT", "1T", "56", "32", "MKK", "5G", "20M", "HT",
- "1T", "56", "28", "FCC", "5G", "20M", "HT", "1T", "60",
- "32", "ETSI", "5G", "20M", "HT", "1T", "60", "32", "MKK",
- "5G", "20M", "HT", "1T", "60", "28", "FCC", "5G", "20M",
- "HT", "1T", "64", "28", "ETSI", "5G", "20M", "HT", "1T",
- "64", "32", "MKK", "5G", "20M", "HT", "1T", "64", "28",
- "FCC", "5G", "20M", "HT", "1T", "100", "26", "ETSI", "5G",
- "20M", "HT", "1T", "100", "32", "MKK", "5G", "20M", "HT",
- "1T", "100", "32", "FCC", "5G", "20M", "HT", "1T", "104",
- "32", "ETSI", "5G", "20M", "HT", "1T", "104", "32", "MKK",
- "5G", "20M", "HT", "1T", "104", "32", "FCC", "5G", "20M",
- "HT", "1T", "108", "32", "ETSI", "5G", "20M", "HT", "1T",
- "108", "32", "MKK", "5G", "20M", "HT", "1T", "108", "32",
- "FCC", "5G", "20M", "HT", "1T", "112", "32", "ETSI", "5G",
- "20M", "HT", "1T", "112", "32", "MKK", "5G", "20M", "HT",
- "1T", "112", "32", "FCC", "5G", "20M", "HT", "1T", "116",
- "32", "ETSI", "5G", "20M", "HT", "1T", "116", "32", "MKK",
- "5G", "20M", "HT", "1T", "116", "32", "FCC", "5G", "20M",
- "HT", "1T", "120", "32", "ETSI", "5G", "20M", "HT", "1T",
- "120", "32", "MKK", "5G", "20M", "HT", "1T", "120", "32",
- "FCC", "5G", "20M", "HT", "1T", "124", "32", "ETSI", "5G",
- "20M", "HT", "1T", "124", "32", "MKK", "5G", "20M", "HT",
- "1T", "124", "32", "FCC", "5G", "20M", "HT", "1T", "128",
- "32", "ETSI", "5G", "20M", "HT", "1T", "128", "32", "MKK",
- "5G", "20M", "HT", "1T", "128", "32", "FCC", "5G", "20M",
- "HT", "1T", "132", "32", "ETSI", "5G", "20M", "HT", "1T",
- "132", "32", "MKK", "5G", "20M", "HT", "1T", "132", "32",
- "FCC", "5G", "20M", "HT", "1T", "136", "32", "ETSI", "5G",
- "20M", "HT", "1T", "136", "32", "MKK", "5G", "20M", "HT",
- "1T", "136", "32", "FCC", "5G", "20M", "HT", "1T", "140",
- "26", "ETSI", "5G", "20M", "HT", "1T", "140", "32", "MKK",
- "5G", "20M", "HT", "1T", "140", "32", "FCC", "5G", "20M",
- "HT", "1T", "144", "26", "ETSI", "5G", "20M", "HT", "1T",
- "144", "63", "MKK", "5G", "20M", "HT", "1T", "144", "63",
- "FCC", "5G", "20M", "HT", "1T", "149", "32", "ETSI", "5G",
- "20M", "HT", "1T", "149", "63", "MKK", "5G", "20M", "HT",
- "1T", "149", "63", "FCC", "5G", "20M", "HT", "1T", "153",
- "32", "ETSI", "5G", "20M", "HT", "1T", "153", "63", "MKK",
- "5G", "20M", "HT", "1T", "153", "63", "FCC", "5G", "20M",
- "HT", "1T", "157", "32", "ETSI", "5G", "20M", "HT", "1T",
- "157", "63", "MKK", "5G", "20M", "HT", "1T", "157", "63",
- "FCC", "5G", "20M", "HT", "1T", "161", "32", "ETSI", "5G",
- "20M", "HT", "1T", "161", "63", "MKK", "5G", "20M", "HT",
- "1T", "161", "63", "FCC", "5G", "20M", "HT", "1T", "165",
- "32", "ETSI", "5G", "20M", "HT", "1T", "165", "63", "MKK",
- "5G", "20M", "HT", "1T", "165", "63", "FCC", "5G", "20M",
- "HT", "2T", "36", "28", "ETSI", "5G", "20M", "HT", "2T",
- "36", "20", "MKK", "5G", "20M", "HT", "2T", "36", "22",
- "FCC", "5G", "20M", "HT", "2T", "40", "30", "ETSI", "5G",
- "20M", "HT", "2T", "40", "20", "MKK", "5G", "20M", "HT",
- "2T", "40", "22", "FCC", "5G", "20M", "HT", "2T", "44",
- "30", "ETSI", "5G", "20M", "HT", "2T", "44", "20", "MKK",
- "5G", "20M", "HT", "2T", "44", "22", "FCC", "5G", "20M",
- "HT", "2T", "48", "30", "ETSI", "5G", "20M", "HT", "2T",
- "48", "20", "MKK", "5G", "20M", "HT", "2T", "48", "22",
- "FCC", "5G", "20M", "HT", "2T", "52", "30", "ETSI", "5G",
- "20M", "HT", "2T", "52", "20", "MKK", "5G", "20M", "HT",
- "2T", "52", "22", "FCC", "5G", "20M", "HT", "2T", "56",
- "30", "ETSI", "5G", "20M", "HT", "2T", "56", "20", "MKK",
- "5G", "20M", "HT", "2T", "56", "22", "FCC", "5G", "20M",
- "HT", "2T", "60", "30", "ETSI", "5G", "20M", "HT", "2T",
- "60", "20", "MKK", "5G", "20M", "HT", "2T", "60", "22",
- "FCC", "5G", "20M", "HT", "2T", "64", "28", "ETSI", "5G",
- "20M", "HT", "2T", "64", "20", "MKK", "5G", "20M", "HT",
- "2T", "64", "22", "FCC", "5G", "20M", "HT", "2T", "100",
- "26", "ETSI", "5G", "20M", "HT", "2T", "100", "20", "MKK",
- "5G", "20M", "HT", "2T", "100", "30", "FCC", "5G", "20M",
- "HT", "2T", "104", "30", "ETSI", "5G", "20M", "HT", "2T",
- "104", "20", "MKK", "5G", "20M", "HT", "2T", "104", "30",
- "FCC", "5G", "20M", "HT", "2T", "108", "32", "ETSI", "5G",
- "20M", "HT", "2T", "108", "20", "MKK", "5G", "20M", "HT",
- "2T", "108", "30", "FCC", "5G", "20M", "HT", "2T", "112",
- "32", "ETSI", "5G", "20M", "HT", "2T", "112", "20", "MKK",
- "5G", "20M", "HT", "2T", "112", "30", "FCC", "5G", "20M",
- "HT", "2T", "116", "32", "ETSI", "5G", "20M", "HT", "2T",
- "116", "20", "MKK", "5G", "20M", "HT", "2T", "116", "30",
- "FCC", "5G", "20M", "HT", "2T", "120", "32", "ETSI", "5G",
- "20M", "HT", "2T", "120", "20", "MKK", "5G", "20M", "HT",
- "2T", "120", "30", "FCC", "5G", "20M", "HT", "2T", "124",
- "32", "ETSI", "5G", "20M", "HT", "2T", "124", "20", "MKK",
- "5G", "20M", "HT", "2T", "124", "30", "FCC", "5G", "20M",
- "HT", "2T", "128", "32", "ETSI", "5G", "20M", "HT", "2T",
- "128", "20", "MKK", "5G", "20M", "HT", "2T", "128", "30",
- "FCC", "5G", "20M", "HT", "2T", "132", "32", "ETSI", "5G",
- "20M", "HT", "2T", "132", "20", "MKK", "5G", "20M", "HT",
- "2T", "132", "30", "FCC", "5G", "20M", "HT", "2T", "136",
- "30", "ETSI", "5G", "20M", "HT", "2T", "136", "20", "MKK",
- "5G", "20M", "HT", "2T", "136", "30", "FCC", "5G", "20M",
- "HT", "2T", "140", "26", "ETSI", "5G", "20M", "HT", "2T",
- "140", "20", "MKK", "5G", "20M", "HT", "2T", "140", "30",
- "FCC", "5G", "20M", "HT", "2T", "144", "26", "ETSI", "5G",
- "20M", "HT", "2T", "144", "63", "MKK", "5G", "20M", "HT",
- "2T", "144", "63", "FCC", "5G", "20M", "HT", "2T", "149",
- "32", "ETSI", "5G", "20M", "HT", "2T", "149", "63", "MKK",
- "5G", "20M", "HT", "2T", "149", "63", "FCC", "5G", "20M",
- "HT", "2T", "153", "32", "ETSI", "5G", "20M", "HT", "2T",
- "153", "63", "MKK", "5G", "20M", "HT", "2T", "153", "63",
- "FCC", "5G", "20M", "HT", "2T", "157", "32", "ETSI", "5G",
- "20M", "HT", "2T", "157", "63", "MKK", "5G", "20M", "HT",
- "2T", "157", "63", "FCC", "5G", "20M", "HT", "2T", "161",
- "32", "ETSI", "5G", "20M", "HT", "2T", "161", "63", "MKK",
- "5G", "20M", "HT", "2T", "161", "63", "FCC", "5G", "20M",
- "HT", "2T", "165", "32", "ETSI", "5G", "20M", "HT", "2T",
- "165", "63", "MKK", "5G", "20M", "HT", "2T", "165", "63",
- "FCC", "5G", "40M", "HT", "1T", "38", "22", "ETSI", "5G",
- "40M", "HT", "1T", "38", "30", "MKK", "5G", "40M", "HT",
- "1T", "38", "30", "FCC", "5G", "40M", "HT", "1T", "46",
- "30", "ETSI", "5G", "40M", "HT", "1T", "46", "30", "MKK",
- "5G", "40M", "HT", "1T", "46", "30", "FCC", "5G", "40M",
- "HT", "1T", "54", "30", "ETSI", "5G", "40M", "HT", "1T",
- "54", "30", "MKK", "5G", "40M", "HT", "1T", "54", "30",
- "FCC", "5G", "40M", "HT", "1T", "62", "24", "ETSI", "5G",
- "40M", "HT", "1T", "62", "30", "MKK", "5G", "40M", "HT",
- "1T", "62", "30", "FCC", "5G", "40M", "HT", "1T", "102",
- "24", "ETSI", "5G", "40M", "HT", "1T", "102", "30", "MKK",
- "5G", "40M", "HT", "1T", "102", "30", "FCC", "5G", "40M",
- "HT", "1T", "110", "30", "ETSI", "5G", "40M", "HT", "1T",
- "110", "30", "MKK", "5G", "40M", "HT", "1T", "110", "30",
- "FCC", "5G", "40M", "HT", "1T", "118", "30", "ETSI", "5G",
- "40M", "HT", "1T", "118", "30", "MKK", "5G", "40M", "HT",
- "1T", "118", "30", "FCC", "5G", "40M", "HT", "1T", "126",
- "30", "ETSI", "5G", "40M", "HT", "1T", "126", "30", "MKK",
- "5G", "40M", "HT", "1T", "126", "30", "FCC", "5G", "40M",
- "HT", "1T", "134", "30", "ETSI", "5G", "40M", "HT", "1T",
- "134", "30", "MKK", "5G", "40M", "HT", "1T", "134", "30",
- "FCC", "5G", "40M", "HT", "1T", "142", "30", "ETSI", "5G",
- "40M", "HT", "1T", "142", "63", "MKK", "5G", "40M", "HT",
- "1T", "142", "63", "FCC", "5G", "40M", "HT", "1T", "151",
- "30", "ETSI", "5G", "40M", "HT", "1T", "151", "63", "MKK",
- "5G", "40M", "HT", "1T", "151", "63", "FCC", "5G", "40M",
- "HT", "1T", "159", "30", "ETSI", "5G", "40M", "HT", "1T",
- "159", "63", "MKK", "5G", "40M", "HT", "1T", "159", "63",
- "FCC", "5G", "40M", "HT", "2T", "38", "20", "ETSI", "5G",
- "40M", "HT", "2T", "38", "20", "MKK", "5G", "40M", "HT",
- "2T", "38", "22", "FCC", "5G", "40M", "HT", "2T", "46",
- "30", "ETSI", "5G", "40M", "HT", "2T", "46", "20", "MKK",
- "5G", "40M", "HT", "2T", "46", "22", "FCC", "5G", "40M",
- "HT", "2T", "54", "30", "ETSI", "5G", "40M", "HT", "2T",
- "54", "20", "MKK", "5G", "40M", "HT", "2T", "54", "22",
- "FCC", "5G", "40M", "HT", "2T", "62", "22", "ETSI", "5G",
- "40M", "HT", "2T", "62", "20", "MKK", "5G", "40M", "HT",
- "2T", "62", "22", "FCC", "5G", "40M", "HT", "2T", "102",
- "22", "ETSI", "5G", "40M", "HT", "2T", "102", "20", "MKK",
- "5G", "40M", "HT", "2T", "102", "30", "FCC", "5G", "40M",
- "HT", "2T", "110", "30", "ETSI", "5G", "40M", "HT", "2T",
- "110", "20", "MKK", "5G", "40M", "HT", "2T", "110", "30",
- "FCC", "5G", "40M", "HT", "2T", "118", "30", "ETSI", "5G",
- "40M", "HT", "2T", "118", "20", "MKK", "5G", "40M", "HT",
- "2T", "118", "30", "FCC", "5G", "40M", "HT", "2T", "126",
- "30", "ETSI", "5G", "40M", "HT", "2T", "126", "20", "MKK",
- "5G", "40M", "HT", "2T", "126", "30", "FCC", "5G", "40M",
- "HT", "2T", "134", "30", "ETSI", "5G", "40M", "HT", "2T",
- "134", "20", "MKK", "5G", "40M", "HT", "2T", "134", "30",
- "FCC", "5G", "40M", "HT", "2T", "142", "30", "ETSI", "5G",
- "40M", "HT", "2T", "142", "63", "MKK", "5G", "40M", "HT",
- "2T", "142", "63", "FCC", "5G", "40M", "HT", "2T", "151",
- "30", "ETSI", "5G", "40M", "HT", "2T", "151", "63", "MKK",
- "5G", "40M", "HT", "2T", "151", "63", "FCC", "5G", "40M",
- "HT", "2T", "159", "30", "ETSI", "5G", "40M", "HT", "2T",
- "159", "63", "MKK", "5G", "40M", "HT", "2T", "159", "63",
- "FCC", "5G", "80M", "VHT", "1T", "42", "20", "ETSI", "5G",
- "80M", "VHT", "1T", "42", "30", "MKK", "5G", "80M", "VHT",
- "1T", "42", "28", "FCC", "5G", "80M", "VHT", "1T", "58",
- "20", "ETSI", "5G", "80M", "VHT", "1T", "58", "30", "MKK",
- "5G", "80M", "VHT", "1T", "58", "28", "FCC", "5G", "80M",
- "VHT", "1T", "106", "20", "ETSI", "5G", "80M", "VHT", "1T",
- "106", "30", "MKK", "5G", "80M", "VHT", "1T", "106", "30",
- "FCC", "5G", "80M", "VHT", "1T", "122", "30", "ETSI", "5G",
- "80M", "VHT", "1T", "122", "30", "MKK", "5G", "80M", "VHT",
- "1T", "122", "30", "FCC", "5G", "80M", "VHT", "1T", "138",
- "30", "ETSI", "5G", "80M", "VHT", "1T", "138", "63", "MKK",
- "5G", "80M", "VHT", "1T", "138", "63", "FCC", "5G", "80M",
- "VHT", "1T", "155", "30", "ETSI", "5G", "80M", "VHT", "1T",
- "155", "63", "MKK", "5G", "80M", "VHT", "1T", "155", "63",
- "FCC", "5G", "80M", "VHT", "2T", "42", "18", "ETSI", "5G",
- "80M", "VHT", "2T", "42", "20", "MKK", "5G", "80M", "VHT",
- "2T", "42", "22", "FCC", "5G", "80M", "VHT", "2T", "58",
- "18", "ETSI", "5G", "80M", "VHT", "2T", "58", "20", "MKK",
- "5G", "80M", "VHT", "2T", "58", "22", "FCC", "5G", "80M",
- "VHT", "2T", "106", "20", "ETSI", "5G", "80M", "VHT", "2T",
- "106", "20", "MKK", "5G", "80M", "VHT", "2T", "106", "30",
- "FCC", "5G", "80M", "VHT", "2T", "122", "30", "ETSI", "5G",
- "80M", "VHT", "2T", "122", "20", "MKK", "5G", "80M", "VHT",
- "2T", "122", "30", "FCC", "5G", "80M", "VHT", "2T", "138",
- "30", "ETSI", "5G", "80M", "VHT", "2T", "138", "63", "MKK",
- "5G", "80M", "VHT", "2T", "138", "63", "FCC", "5G", "80M",
- "VHT", "2T", "155", "30", "ETSI", "5G", "80M", "VHT", "2T",
- "155", "63", "MKK", "5G", "80M", "VHT", "2T", "155", "63"};
-
-void odm_read_and_config_mp_8822b_txpwr_lmt_type5(struct phy_dm_struct *dm)
-{
- u32 i = 0;
- u8 **array = (u8 **)array_mp_8822b_txpwr_lmt_type5;
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT,
- "===> odm_read_and_config_mp_8822b_txpwr_lmt_type5\n");
-
- for (i = 0; i < ARRAY_SIZE(array_mp_8822b_txpwr_lmt_type5); i += 7) {
- u8 *regulation = array[i];
- u8 *band = array[i + 1];
- u8 *bandwidth = array[i + 2];
- u8 *rate = array[i + 3];
- u8 *rf_path = array[i + 4];
- u8 *chnl = array[i + 5];
- u8 *val = array[i + 6];
-
- odm_config_bb_txpwr_lmt_8822b(dm, regulation, band, bandwidth,
- rate, rf_path, chnl, val);
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h
deleted file mode 100644
index 5e259846c67f..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-/*Image2HeaderVersion: 3.2*/
-#ifndef __INC_MP_RF_HW_IMG_8822B_H
-#define __INC_MP_RF_HW_IMG_8822B_H
-
-/******************************************************************************
- * radioa.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_radioa(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_radioa(void);
-
-/******************************************************************************
- * radiob.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_radiob(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_radiob(void);
-
-/******************************************************************************
- * txpowertrack.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack(void);
-
-/******************************************************************************
- * txpowertrack_type0.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack_type0(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack_type0(void);
-
-/******************************************************************************
- * txpowertrack_type1.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack_type1(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack_type1(void);
-
-/******************************************************************************
- * txpowertrack_type2.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack_type2(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack_type2(void);
-
-/******************************************************************************
- * txpowertrack_type3_type5.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack_type3_type5(
- struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack_type3_type5(void);
-
-/******************************************************************************
- * txpowertrack_type4.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack_type4(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack_type4(void);
-
-/******************************************************************************
- * txpowertrack_type6.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack_type6(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack_type6(void);
-
-/******************************************************************************
- * txpowertrack_type7.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack_type7(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack_type7(void);
-
-/******************************************************************************
- * txpowertrack_type8.TXT
- *****************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack_type8(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack_type8(void);
-
-/******************************************************************************
- * txpowertrack_type9.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpowertrack_type9(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpowertrack_type9(void);
-
-/******************************************************************************
- * txpwr_lmt.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpwr_lmt(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpwr_lmt(void);
-
-/******************************************************************************
- * txpwr_lmt_type5.TXT
- ******************************************************************************/
-
-void odm_read_and_config_mp_8822b_txpwr_lmt_type5(struct phy_dm_struct *dm);
-u32 odm_get_version_mp_8822b_txpwr_lmt_type5(void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c
deleted file mode 100644
index 9e92a81dc6d1..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c
+++ /dev/null
@@ -1,340 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../mp_precomp.h"
-#include "../phydm_precomp.h"
-
-static bool
-get_mix_mode_tx_agc_bb_swing_offset_8822b(void *dm_void,
- enum pwrtrack_method method,
- u8 rf_path, u8 tx_power_index_offset)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- u8 bb_swing_upper_bound = cali_info->default_ofdm_index + 10;
- u8 bb_swing_lower_bound = 0;
-
- s8 tx_agc_index = 0;
- u8 tx_bb_swing_index = cali_info->default_ofdm_index;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "Path_%d cali_info->absolute_ofdm_swing_idx[rf_path]=%d, tx_power_index_offset=%d\n",
- rf_path, cali_info->absolute_ofdm_swing_idx[rf_path],
- tx_power_index_offset);
-
- if (tx_power_index_offset > 0XF)
- tx_power_index_offset = 0XF;
-
- if (cali_info->absolute_ofdm_swing_idx[rf_path] >= 0 &&
- cali_info->absolute_ofdm_swing_idx[rf_path] <=
- tx_power_index_offset) {
- tx_agc_index = cali_info->absolute_ofdm_swing_idx[rf_path];
- tx_bb_swing_index = cali_info->default_ofdm_index;
- } else if (cali_info->absolute_ofdm_swing_idx[rf_path] >
- tx_power_index_offset) {
- tx_agc_index = tx_power_index_offset;
- cali_info->remnant_ofdm_swing_idx[rf_path] =
- cali_info->absolute_ofdm_swing_idx[rf_path] -
- tx_power_index_offset;
- tx_bb_swing_index = cali_info->default_ofdm_index +
- cali_info->remnant_ofdm_swing_idx[rf_path];
-
- if (tx_bb_swing_index > bb_swing_upper_bound)
- tx_bb_swing_index = bb_swing_upper_bound;
- } else {
- tx_agc_index = 0;
-
- if (cali_info->default_ofdm_index >
- (cali_info->absolute_ofdm_swing_idx[rf_path] * (-1)))
- tx_bb_swing_index =
- cali_info->default_ofdm_index +
- cali_info->absolute_ofdm_swing_idx[rf_path];
- else
- tx_bb_swing_index = bb_swing_lower_bound;
-
- if (tx_bb_swing_index < bb_swing_lower_bound)
- tx_bb_swing_index = bb_swing_lower_bound;
- }
-
- cali_info->absolute_ofdm_swing_idx[rf_path] = tx_agc_index;
- cali_info->bb_swing_idx_ofdm[rf_path] = tx_bb_swing_index;
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "MixMode Offset Path_%d cali_info->absolute_ofdm_swing_idx[rf_path]=%d cali_info->bb_swing_idx_ofdm[rf_path]=%d tx_power_index_offset=%d\n",
- rf_path, cali_info->absolute_ofdm_swing_idx[rf_path],
- cali_info->bb_swing_idx_ofdm[rf_path], tx_power_index_offset);
-
- return true;
-}
-
-void odm_tx_pwr_track_set_pwr8822b(void *dm_void, enum pwrtrack_method method,
- u8 rf_path, u8 channel_mapped_index)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
- u8 tx_power_index_offset = 0;
- u8 tx_power_index = 0;
-
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 channel = rtlphy->current_channel;
- u8 band_width = rtlphy->current_chan_bw;
- u8 tx_rate = 0xFF;
-
- if (!dm->mp_mode) {
- u16 rate = *dm->forced_data_rate;
-
- if (!rate) /*auto rate*/
- tx_rate = dm->tx_rate;
- else /*force rate*/
- tx_rate = (u8)rate;
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK, "Call:%s tx_rate=0x%X\n",
- __func__, tx_rate);
-
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "pRF->default_ofdm_index=%d pRF->default_cck_index=%d\n",
- cali_info->default_ofdm_index,
- cali_info->default_cck_index);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "pRF->absolute_ofdm_swing_idx=%d pRF->remnant_ofdm_swing_idx=%d pRF->absolute_cck_swing_idx=%d pRF->remnant_cck_swing_idx=%d rf_path=%d\n",
- cali_info->absolute_ofdm_swing_idx[rf_path],
- cali_info->remnant_ofdm_swing_idx[rf_path],
- cali_info->absolute_cck_swing_idx[rf_path],
- cali_info->remnant_cck_swing_idx, rf_path);
-
- if (dm->number_linked_client != 0)
- tx_power_index = odm_get_tx_power_index(
- dm, (enum odm_rf_radio_path)rf_path, tx_rate,
- band_width, channel);
-
- if (tx_power_index >= 63)
- tx_power_index = 63;
-
- tx_power_index_offset = 63 - tx_power_index;
-
- ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "tx_power_index=%d tx_power_index_offset=%d rf_path=%d\n",
- tx_power_index, tx_power_index_offset, rf_path);
-
- if (method ==
- BBSWING) { /*use for mp driver clean power tracking status*/
- switch (rf_path) {
- case ODM_RF_PATH_A:
- odm_set_bb_reg(
- dm, 0xC94, (BIT(29) | BIT(28) | BIT(27) |
- BIT(26) | BIT(25)),
- cali_info->absolute_ofdm_swing_idx[rf_path]);
- odm_set_bb_reg(
- dm, REG_A_TX_SCALE_JAGUAR, 0xFFE00000,
- tx_scaling_table_jaguar
- [cali_info
- ->bb_swing_idx_ofdm[rf_path]]);
- break;
- case ODM_RF_PATH_B:
- odm_set_bb_reg(
- dm, 0xE94, (BIT(29) | BIT(28) | BIT(27) |
- BIT(26) | BIT(25)),
- cali_info->absolute_ofdm_swing_idx[rf_path]);
- odm_set_bb_reg(
- dm, REG_B_TX_SCALE_JAGUAR, 0xFFE00000,
- tx_scaling_table_jaguar
- [cali_info
- ->bb_swing_idx_ofdm[rf_path]]);
- break;
-
- default:
- break;
- }
- } else if (method == MIX_MODE) {
- switch (rf_path) {
- case ODM_RF_PATH_A:
- get_mix_mode_tx_agc_bb_swing_offset_8822b(
- dm, method, rf_path, tx_power_index_offset);
- odm_set_bb_reg(
- dm, 0xC94, (BIT(29) | BIT(28) | BIT(27) |
- BIT(26) | BIT(25)),
- cali_info->absolute_ofdm_swing_idx[rf_path]);
- odm_set_bb_reg(
- dm, REG_A_TX_SCALE_JAGUAR, 0xFFE00000,
- tx_scaling_table_jaguar
- [cali_info
- ->bb_swing_idx_ofdm[rf_path]]);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "TXAGC(0xC94)=0x%x BBSwing(0xc1c)=0x%x BBSwingIndex=%d rf_path=%d\n",
- odm_get_bb_reg(dm, 0xC94,
- (BIT(29) | BIT(28) | BIT(27) |
- BIT(26) | BIT(25))),
- odm_get_bb_reg(dm, 0xc1c, 0xFFE00000),
- cali_info->bb_swing_idx_ofdm[rf_path], rf_path);
- break;
-
- case ODM_RF_PATH_B:
- get_mix_mode_tx_agc_bb_swing_offset_8822b(
- dm, method, rf_path, tx_power_index_offset);
- odm_set_bb_reg(
- dm, 0xE94, (BIT(29) | BIT(28) | BIT(27) |
- BIT(26) | BIT(25)),
- cali_info->absolute_ofdm_swing_idx[rf_path]);
- odm_set_bb_reg(
- dm, REG_B_TX_SCALE_JAGUAR, 0xFFE00000,
- tx_scaling_table_jaguar
- [cali_info
- ->bb_swing_idx_ofdm[rf_path]]);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_TX_PWR_TRACK,
- "TXAGC(0xE94)=0x%x BBSwing(0xe1c)=0x%x BBSwingIndex=%d rf_path=%d\n",
- odm_get_bb_reg(dm, 0xE94,
- (BIT(29) | BIT(28) | BIT(27) |
- BIT(26) | BIT(25))),
- odm_get_bb_reg(dm, 0xe1c, 0xFFE00000),
- cali_info->bb_swing_idx_ofdm[rf_path], rf_path);
- break;
-
- default:
- break;
- }
- }
-}
-
-void get_delta_swing_table_8822b(void *dm_void, u8 **temperature_up_a,
- u8 **temperature_down_a, u8 **temperature_up_b,
- u8 **temperature_down_b)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
-
- struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 channel = rtlphy->current_channel;
-
- *temperature_up_a = cali_info->delta_swing_table_idx_2ga_p;
- *temperature_down_a = cali_info->delta_swing_table_idx_2ga_n;
- *temperature_up_b = cali_info->delta_swing_table_idx_2gb_p;
- *temperature_down_b = cali_info->delta_swing_table_idx_2gb_n;
-
- if (channel >= 36 && channel <= 64) {
- *temperature_up_a = cali_info->delta_swing_table_idx_5ga_p[0];
- *temperature_down_a = cali_info->delta_swing_table_idx_5ga_n[0];
- *temperature_up_b = cali_info->delta_swing_table_idx_5gb_p[0];
- *temperature_down_b = cali_info->delta_swing_table_idx_5gb_n[0];
- } else if (channel >= 100 && channel <= 144) {
- *temperature_up_a = cali_info->delta_swing_table_idx_5ga_p[1];
- *temperature_down_a = cali_info->delta_swing_table_idx_5ga_n[1];
- *temperature_up_b = cali_info->delta_swing_table_idx_5gb_p[1];
- *temperature_down_b = cali_info->delta_swing_table_idx_5gb_n[1];
- } else if (channel >= 149 && channel <= 177) {
- *temperature_up_a = cali_info->delta_swing_table_idx_5ga_p[2];
- *temperature_down_a = cali_info->delta_swing_table_idx_5ga_n[2];
- *temperature_up_b = cali_info->delta_swing_table_idx_5gb_p[2];
- *temperature_down_b = cali_info->delta_swing_table_idx_5gb_n[2];
- }
-}
-
-static void _phy_lc_calibrate_8822b(struct phy_dm_struct *dm)
-{
- u32 lc_cal = 0, cnt = 0;
-
- /*backup RF0x18*/
- lc_cal = odm_get_rf_reg(dm, ODM_RF_PATH_A, RF_CHNLBW, RFREGOFFSETMASK);
-
- /*Start LCK*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, RF_CHNLBW, RFREGOFFSETMASK,
- lc_cal | 0x08000);
-
- ODM_delay_ms(100);
-
- for (cnt = 0; cnt < 100; cnt++) {
- if (odm_get_rf_reg(dm, ODM_RF_PATH_A, RF_CHNLBW, 0x8000) != 0x1)
- break;
- ODM_delay_ms(10);
- }
-
- /*Recover channel number*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, RF_CHNLBW, RFREGOFFSETMASK, lc_cal);
-}
-
-void phy_lc_calibrate_8822b(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- bool is_start_cont_tx = false, is_single_tone = false,
- is_carrier_suppression = false;
- u64 start_time;
- u64 progressing_time;
-
- if (is_start_cont_tx || is_single_tone || is_carrier_suppression) {
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[LCK]continues TX ing !!! LCK return\n");
- return;
- }
-
- start_time = odm_get_current_time(dm);
- _phy_lc_calibrate_8822b(dm);
- progressing_time = odm_get_progressing_time(dm, start_time);
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[LCK]LCK progressing_time = %lld\n", progressing_time);
-}
-
-void configure_txpower_track_8822b(struct txpwrtrack_cfg *config)
-{
- config->swing_table_size_cck = TXSCALE_TABLE_SIZE;
- config->swing_table_size_ofdm = TXSCALE_TABLE_SIZE;
- config->threshold_iqk = IQK_THRESHOLD;
- config->threshold_dpk = DPK_THRESHOLD;
- config->average_thermal_num = AVG_THERMAL_NUM_8822B;
- config->rf_path_count = MAX_PATH_NUM_8822B;
- config->thermal_reg_addr = RF_T_METER_8822B;
-
- config->odm_tx_pwr_track_set_pwr = odm_tx_pwr_track_set_pwr8822b;
- config->do_iqk = do_iqk_8822b;
- config->phy_lc_calibrate = phy_lc_calibrate_8822b;
-
- config->get_delta_swing_table = get_delta_swing_table_8822b;
-}
-
-void phy_set_rf_path_switch_8822b(struct phy_dm_struct *dm, bool is_main)
-{
- /*BY SY Request */
- odm_set_bb_reg(dm, 0x4C, (BIT(24) | BIT(23)), 0x2);
- odm_set_bb_reg(dm, 0x974, 0xff, 0xff);
-
- /*odm_set_bb_reg(dm, 0x1991, 0x3, 0x0);*/
- odm_set_bb_reg(dm, 0x1990, (BIT(9) | BIT(8)), 0x0);
-
- /*odm_set_bb_reg(dm, 0xCBE, 0x8, 0x0);*/
- odm_set_bb_reg(dm, 0xCBC, BIT(19), 0x0);
-
- odm_set_bb_reg(dm, 0xCB4, 0xff, 0x77);
-
- odm_set_bb_reg(dm, 0x70, MASKBYTE3, 0x0e);
- odm_set_bb_reg(dm, 0x1704, MASKDWORD, 0x0000ff00);
- odm_set_bb_reg(dm, 0x1700, MASKDWORD, 0xc00f0038);
-
- if (is_main) {
- /*odm_set_bb_reg(dm, 0xCBD, 0x3, 0x2); WiFi */
- odm_set_bb_reg(dm, 0xCBC, (BIT(9) | BIT(8)), 0x2); /*WiFi */
- } else {
- /*odm_set_bb_reg(dm, 0xCBD, 0x3, 0x1); BT*/
- odm_set_bb_reg(dm, 0xCBC, (BIT(9) | BIT(8)), 0x1); /*BT*/
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h
deleted file mode 100644
index 794ee33ea7df..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __HAL_PHY_RF_8822B_H__
-#define __HAL_PHY_RF_8822B_H__
-
-#define AVG_THERMAL_NUM_8822B 4
-#define RF_T_METER_8822B 0x42
-
-void configure_txpower_track_8822b(struct txpwrtrack_cfg *config);
-
-void odm_tx_pwr_track_set_pwr8822b(void *dm_void, enum pwrtrack_method method,
- u8 rf_path, u8 channel_mapped_index);
-
-void get_delta_swing_table_8822b(void *dm_void, u8 **temperature_up_a,
- u8 **temperature_down_a, u8 **temperature_up_b,
- u8 **temperature_down_b);
-
-void phy_lc_calibrate_8822b(void *dm_void);
-
-void phy_set_rf_path_switch_8822b(struct phy_dm_struct *dm, bool is_main);
-
-#endif /* #ifndef __HAL_PHY_RF_8822B_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c
deleted file mode 100644
index 776096164b80..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c
+++ /dev/null
@@ -1,1804 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../mp_precomp.h"
-#include "../phydm_precomp.h"
-
-/* ======================================================================== */
-/* These following functions can be used for PHY DM only*/
-
-static u32 reg82c_8822b;
-static u32 reg838_8822b;
-static u32 reg830_8822b;
-static u32 reg83c_8822b;
-static u32 rega20_8822b;
-static u32 rega24_8822b;
-static u32 rega28_8822b;
-static enum odm_bw bw_8822b;
-static u8 central_ch_8822b;
-
-static u32 cca_ifem_ccut[12][4] = {
- /*20M*/
- {0x75D97010, 0x75D97010, 0x75D97010, 0x75D97010}, /*Reg82C*/
- {0x00000000, 0x79a0ea2c, 0x00000000, 0x00000000}, /*Reg830*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg83C*/
- /*40M*/
- {0x75D97010, 0x75D97010, 0x75D97010, 0x75D97010}, /*Reg82C*/
- {0x00000000, 0x79a0ea2c, 0x00000000, 0x79a0ea28}, /*Reg830*/
- {0x87765541, 0x87766341, 0x87765541, 0x87766341}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg83C*/
- /*80M*/
- {0x75C97010, 0x75C97010, 0x75C97010, 0x75C97010}, /*Reg82C*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg830*/
- {0x00000000, 0x87746641, 0x00000000, 0x87746641}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000},
-}; /*Reg83C*/
-static u32 cca_efem_ccut[12][4] = {
- /*20M*/
- {0x75A76010, 0x75A76010, 0x75A76010, 0x75A75010}, /*Reg82C*/
- {0x00000000, 0x79a0ea2c, 0x00000000, 0x00000000}, /*Reg830*/
- {0x87766651, 0x87766431, 0x87766451, 0x87766431}, /*Reg838*/
- {0x9194b2b9, 0x9194b2b9, 0x9194b2b9, 0x9194b2b9}, /*Reg83C*/
- /*40M*/
- {0x75A85010, 0x75A75010, 0x75A85010, 0x75A75010}, /*Reg82C*/
- {0x00000000, 0x79a0ea2c, 0x00000000, 0x00000000}, /*Reg830*/
- {0x87766431, 0x87766431, 0x87766431, 0x87766431}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg83C*/
- /*80M*/
- {0x76BA7010, 0x75BA7010, 0x76BA7010, 0x75BA7010}, /*Reg82C*/
- {0x79a0ea28, 0x00000000, 0x79a0ea28, 0x00000000}, /*Reg830*/
- {0x87766431, 0x87766431, 0x87766431, 0x87766431}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000},
-}; /*Reg83C*/
-static u32 cca_ifem_ccut_rfetype5[12][4] = {
- /*20M*/
- {0x75D97010, 0x75D97010, 0x75D97010, 0x75D97010}, /*Reg82C*/
- {0x00000000, 0x79a0ea2c, 0x00000000, 0x00000000}, /*Reg830*/
- {0x00000000, 0x00000000, 0x87766461, 0x87766461}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg83C*/
- /*40M*/
- {0x75D97010, 0x75D97010, 0x75D97010, 0x75D97010}, /*Reg82C*/
- {0x00000000, 0x79a0ea2c, 0x00000000, 0x79a0ea28}, /*Reg830*/
- {0x87765541, 0x87766341, 0x87765541, 0x87766341}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg83C*/
- /*80M*/
- {0x75C97010, 0x75C97010, 0x75C97010, 0x75C97010}, /*Reg82C*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg830*/
- {0x00000000, 0x76666641, 0x00000000, 0x76666641}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000},
-}; /*Reg83C*/
-static u32 cca_ifem_ccut_rfetype3[12][4] = {
- /*20M*/
- {0x75D97010, 0x75D97010, 0x75D97010, 0x75D97010}, /*Reg82C*/
- {0x00000000, 0x79a0ea2c, 0x00000000, 0x00000000}, /*Reg830*/
- {0x00000000, 0x00000000, 0x87766461, 0x87766461}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg83C*/
- /*40M*/
- {0x75D97010, 0x75D97010, 0x75D97010, 0x75D97010}, /*Reg82C*/
- {0x00000000, 0x79a0ea2c, 0x00000000, 0x79a0ea28}, /*Reg830*/
- {0x87765541, 0x87766341, 0x87765541, 0x87766341}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg83C*/
- /*80M*/
- {0x75C97010, 0x75C97010, 0x75C97010, 0x75C97010}, /*Reg82C*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000}, /*Reg830*/
- {0x00000000, 0x76666641, 0x00000000, 0x76666641}, /*Reg838*/
- {0x00000000, 0x00000000, 0x00000000, 0x00000000},
-}; /*Reg83C*/
-
-static inline u32 phydm_check_bit_mask(u32 bit_mask, u32 data_original,
- u32 data)
-{
- u8 bit_shift;
-
- if (bit_mask != 0xfffff) {
- for (bit_shift = 0; bit_shift <= 19; bit_shift++) {
- if (((bit_mask >> bit_shift) & 0x1) == 1)
- break;
- }
- return ((data_original) & (~bit_mask)) | (data << bit_shift);
- }
- return data;
-}
-
-static bool phydm_rfe_8822b(struct phy_dm_struct *dm, u8 channel)
-{
- if (dm->rfe_type == 4) {
- /* Default setting is in PHY parameters */
-
- if (channel <= 14) {
- /* signal source */
- odm_set_bb_reg(dm, 0xcb0, (MASKBYTE2 | MASKLWORD),
- 0x745774);
- odm_set_bb_reg(dm, 0xeb0, (MASKBYTE2 | MASKLWORD),
- 0x745774);
- odm_set_bb_reg(dm, 0xcb4, MASKBYTE1, 0x57);
- odm_set_bb_reg(dm, 0xeb4, MASKBYTE1, 0x57);
-
- /* inverse or not */
- odm_set_bb_reg(dm, 0xcbc, (BIT(5) | BIT(4) | BIT(3) |
- BIT(2) | BIT(1) | BIT(0)),
- 0x8);
- odm_set_bb_reg(dm, 0xcbc, (BIT(11) | BIT(10)), 0x2);
- odm_set_bb_reg(dm, 0xebc, (BIT(5) | BIT(4) | BIT(3) |
- BIT(2) | BIT(1) | BIT(0)),
- 0x8);
- odm_set_bb_reg(dm, 0xebc, (BIT(11) | BIT(10)), 0x2);
-
- /* antenna switch table */
- if ((dm->rx_ant_status == (ODM_RF_A | ODM_RF_B)) ||
- (dm->tx_ant_status == (ODM_RF_A | ODM_RF_B))) {
- /* 2TX or 2RX */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xf050);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xf050);
- } else if (dm->rx_ant_status == dm->tx_ant_status) {
- /* TXA+RXA or TXB+RXB */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xf055);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xf055);
- } else {
- /* TXB+RXA or TXA+RXB */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xf550);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xf550);
- }
-
- } else if (channel > 35) {
- /* signal source */
- odm_set_bb_reg(dm, 0xcb0, (MASKBYTE2 | MASKLWORD),
- 0x477547);
- odm_set_bb_reg(dm, 0xeb0, (MASKBYTE2 | MASKLWORD),
- 0x477547);
- odm_set_bb_reg(dm, 0xcb4, MASKBYTE1, 0x75);
- odm_set_bb_reg(dm, 0xeb4, MASKBYTE1, 0x75);
-
- /* inverse or not */
- odm_set_bb_reg(dm, 0xcbc, (BIT(5) | BIT(4) | BIT(3) |
- BIT(2) | BIT(1) | BIT(0)),
- 0x0);
- odm_set_bb_reg(dm, 0xcbc, (BIT(11) | BIT(10)), 0x0);
- odm_set_bb_reg(dm, 0xebc, (BIT(5) | BIT(4) | BIT(3) |
- BIT(2) | BIT(1) | BIT(0)),
- 0x0);
- odm_set_bb_reg(dm, 0xebc, (BIT(11) | BIT(10)), 0x0);
-
- /* antenna switch table */
- if ((dm->rx_ant_status == (ODM_RF_A | ODM_RF_B)) ||
- (dm->tx_ant_status == (ODM_RF_A | ODM_RF_B))) {
- /* 2TX or 2RX */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa501);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa501);
- } else if (dm->rx_ant_status == dm->tx_ant_status) {
- /* TXA+RXA or TXB+RXB */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa500);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa500);
- } else {
- /* TXB+RXA or TXA+RXB */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa005);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa005);
- }
- } else {
- return false;
- }
-
- } else if ((dm->rfe_type == 1) || (dm->rfe_type == 2) ||
- (dm->rfe_type == 7) || (dm->rfe_type == 9)) {
- /* eFem */
- if (((dm->cut_version == ODM_CUT_A) ||
- (dm->cut_version == ODM_CUT_B)) &&
- (dm->rfe_type < 2)) {
- if (channel <= 14) {
- /* signal source */
- odm_set_bb_reg(dm, 0xcb0,
- (MASKBYTE2 | MASKLWORD),
- 0x704570);
- odm_set_bb_reg(dm, 0xeb0,
- (MASKBYTE2 | MASKLWORD),
- 0x704570);
- odm_set_bb_reg(dm, 0xcb4, MASKBYTE1, 0x45);
- odm_set_bb_reg(dm, 0xeb4, MASKBYTE1, 0x45);
- } else if (channel > 35) {
- odm_set_bb_reg(dm, 0xcb0,
- (MASKBYTE2 | MASKLWORD),
- 0x174517);
- odm_set_bb_reg(dm, 0xeb0,
- (MASKBYTE2 | MASKLWORD),
- 0x174517);
- odm_set_bb_reg(dm, 0xcb4, MASKBYTE1, 0x45);
- odm_set_bb_reg(dm, 0xeb4, MASKBYTE1, 0x45);
- } else {
- return false;
- }
-
- /* delay 400ns for PAPE */
- odm_set_bb_reg(dm, 0x810,
- MASKBYTE3 | BIT(20) | BIT(21) | BIT(22) |
- BIT(23),
- 0x211);
-
- /* antenna switch table */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa555);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa555);
-
- /* inverse or not */
- odm_set_bb_reg(dm, 0xcbc, (BIT(5) | BIT(4) | BIT(3) |
- BIT(2) | BIT(1) | BIT(0)),
- 0x0);
- odm_set_bb_reg(dm, 0xcbc, (BIT(11) | BIT(10)), 0x0);
- odm_set_bb_reg(dm, 0xebc, (BIT(5) | BIT(4) | BIT(3) |
- BIT(2) | BIT(1) | BIT(0)),
- 0x0);
- odm_set_bb_reg(dm, 0xebc, (BIT(11) | BIT(10)), 0x0);
-
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s: Using old RFE control pin setting for A-cut and B-cut\n",
- __func__);
- } else {
- if (channel <= 14) {
- /* signal source */
- odm_set_bb_reg(dm, 0xcb0,
- (MASKBYTE2 | MASKLWORD),
- 0x705770);
- odm_set_bb_reg(dm, 0xeb0,
- (MASKBYTE2 | MASKLWORD),
- 0x705770);
- odm_set_bb_reg(dm, 0xcb4, MASKBYTE1, 0x57);
- odm_set_bb_reg(dm, 0xeb4, MASKBYTE1, 0x57);
- odm_set_bb_reg(dm, 0xcb8, BIT(4), 0);
- odm_set_bb_reg(dm, 0xeb8, BIT(4), 0);
- } else if (channel > 35) {
- /* signal source */
- odm_set_bb_reg(dm, 0xcb0,
- (MASKBYTE2 | MASKLWORD),
- 0x177517);
- odm_set_bb_reg(dm, 0xeb0,
- (MASKBYTE2 | MASKLWORD),
- 0x177517);
- odm_set_bb_reg(dm, 0xcb4, MASKBYTE1, 0x75);
- odm_set_bb_reg(dm, 0xeb4, MASKBYTE1, 0x75);
- odm_set_bb_reg(dm, 0xcb8, BIT(5), 0);
- odm_set_bb_reg(dm, 0xeb8, BIT(5), 0);
- } else {
- return false;
- }
-
- /* inverse or not */
- odm_set_bb_reg(dm, 0xcbc, (BIT(5) | BIT(4) | BIT(3) |
- BIT(2) | BIT(1) | BIT(0)),
- 0x0);
- odm_set_bb_reg(dm, 0xcbc, (BIT(11) | BIT(10)), 0x0);
- odm_set_bb_reg(dm, 0xebc, (BIT(5) | BIT(4) | BIT(3) |
- BIT(2) | BIT(1) | BIT(0)),
- 0x0);
- odm_set_bb_reg(dm, 0xebc, (BIT(11) | BIT(10)), 0x0);
-
- /* antenna switch table */
- if ((dm->rx_ant_status == (ODM_RF_A | ODM_RF_B)) ||
- (dm->tx_ant_status == (ODM_RF_A | ODM_RF_B))) {
- /* 2TX or 2RX */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa501);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa501);
- } else if (dm->rx_ant_status == dm->tx_ant_status) {
- /* TXA+RXA or TXB+RXB */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa500);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa500);
- } else {
- /* TXB+RXA or TXA+RXB */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa005);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa005);
- }
- }
- } else if ((dm->rfe_type == 0) || (dm->rfe_type == 3) ||
- (dm->rfe_type == 5) || (dm->rfe_type == 6) ||
- (dm->rfe_type == 8) || (dm->rfe_type == 10)) {
- /* iFEM */
- if (channel <= 14) {
- /* signal source */
-
- odm_set_bb_reg(dm, 0xcb0, (MASKBYTE2 | MASKLWORD),
- 0x745774);
- odm_set_bb_reg(dm, 0xeb0, (MASKBYTE2 | MASKLWORD),
- 0x745774);
- odm_set_bb_reg(dm, 0xcb4, MASKBYTE1, 0x57);
- odm_set_bb_reg(dm, 0xeb4, MASKBYTE1, 0x57);
-
- } else if (channel > 35) {
- /* signal source */
-
- odm_set_bb_reg(dm, 0xcb0, (MASKBYTE2 | MASKLWORD),
- 0x477547);
- odm_set_bb_reg(dm, 0xeb0, (MASKBYTE2 | MASKLWORD),
- 0x477547);
- odm_set_bb_reg(dm, 0xcb4, MASKBYTE1, 0x75);
- odm_set_bb_reg(dm, 0xeb4, MASKBYTE1, 0x75);
-
- } else {
- return false;
- }
-
- /* inverse or not */
- odm_set_bb_reg(dm, 0xcbc, (BIT(5) | BIT(4) | BIT(3) | BIT(2) |
- BIT(1) | BIT(0)),
- 0x0);
- odm_set_bb_reg(dm, 0xcbc, (BIT(11) | BIT(10)), 0x0);
- odm_set_bb_reg(dm, 0xebc, (BIT(5) | BIT(4) | BIT(3) | BIT(2) |
- BIT(1) | BIT(0)),
- 0x0);
- odm_set_bb_reg(dm, 0xebc, (BIT(11) | BIT(10)), 0x0);
-
- /* antenna switch table */
- if (channel <= 14) {
- if ((dm->rx_ant_status == (ODM_RF_A | ODM_RF_B)) ||
- (dm->tx_ant_status == (ODM_RF_A | ODM_RF_B))) {
- /* 2TX or 2RX */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa501);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa501);
- } else if (dm->rx_ant_status == dm->tx_ant_status) {
- /* TXA+RXA or TXB+RXB */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa500);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa500);
- } else {
- /* TXB+RXA or TXA+RXB */
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa005);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa005);
- }
- } else if (channel > 35) {
- odm_set_bb_reg(dm, 0xca0, MASKLWORD, 0xa5a5);
- odm_set_bb_reg(dm, 0xea0, MASKLWORD, 0xa5a5);
- }
- }
-
- /* chip top mux */
- odm_set_bb_reg(dm, 0x64, BIT(29) | BIT(28), 0x3);
- odm_set_bb_reg(dm, 0x4c, BIT(26) | BIT(25), 0x0);
- odm_set_bb_reg(dm, 0x40, BIT(2), 0x1);
-
- /* from s0 or s1 */
- odm_set_bb_reg(dm, 0x1990,
- (BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)),
- 0x30);
- odm_set_bb_reg(dm, 0x1990, (BIT(11) | BIT(10)), 0x3);
-
- /* input or output */
- odm_set_bb_reg(dm, 0x974,
- (BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)),
- 0x3f);
- odm_set_bb_reg(dm, 0x974, (BIT(11) | BIT(10)), 0x3);
-
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s: Update RFE control pin setting (ch%d, tx_path 0x%x, rx_path 0x%x)\n",
- __func__, channel, dm->tx_ant_status, dm->rx_ant_status);
-
- return true;
-}
-
-static void phydm_ccapar_by_rfe_8822b(struct phy_dm_struct *dm)
-{
- u32 cca_ifem[12][4], cca_efem[12][4];
- u8 row, col;
- u32 reg82c, reg830, reg838, reg83c;
-
- if (dm->cut_version == ODM_CUT_A)
- return;
- {
- odm_move_memory(dm, cca_efem, cca_efem_ccut, 48 * 4);
- if (dm->rfe_type == 5)
- odm_move_memory(dm, cca_ifem, cca_ifem_ccut_rfetype5,
- 48 * 4);
- else if (dm->rfe_type == 3)
- odm_move_memory(dm, cca_ifem, cca_ifem_ccut_rfetype3,
- 48 * 4);
- else
- odm_move_memory(dm, cca_ifem, cca_ifem_ccut, 48 * 4);
-
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s: Update CCA parameters for Ccut\n", __func__);
- }
-
- if (bw_8822b == ODM_BW20M)
- row = 0;
- else if (bw_8822b == ODM_BW40M)
- row = 4;
- else
- row = 8;
-
- if (central_ch_8822b <= 14) {
- if ((dm->rx_ant_status == ODM_RF_A) ||
- (dm->rx_ant_status == ODM_RF_B))
- col = 0;
- else
- col = 1;
- } else {
- if ((dm->rx_ant_status == ODM_RF_A) ||
- (dm->rx_ant_status == ODM_RF_B))
- col = 2;
- else
- col = 3;
- }
-
- if ((dm->rfe_type == 1) || (dm->rfe_type == 4) || (dm->rfe_type == 6) ||
- (dm->rfe_type == 7)) {
- /*eFEM => RFE type 1 & RFE type 4 & RFE type 6 & RFE type 7*/
- reg82c = (cca_efem[row][col] != 0) ? cca_efem[row][col] :
- reg82c_8822b;
- reg830 = (cca_efem[row + 1][col] != 0) ?
- cca_efem[row + 1][col] :
- reg830_8822b;
- reg838 = (cca_efem[row + 2][col] != 0) ?
- cca_efem[row + 2][col] :
- reg838_8822b;
- reg83c = (cca_efem[row + 3][col] != 0) ?
- cca_efem[row + 3][col] :
- reg83c_8822b;
- } else if ((dm->rfe_type == 2) || (dm->rfe_type == 9)) {
- /*5G eFEM, 2G iFEM => RFE type 2, 5G eFEM => RFE type 9 */
- if (central_ch_8822b <= 14) {
- reg82c = (cca_ifem[row][col] != 0) ?
- cca_ifem[row][col] :
- reg82c_8822b;
- reg830 = (cca_ifem[row + 1][col] != 0) ?
- cca_ifem[row + 1][col] :
- reg830_8822b;
- reg838 = (cca_ifem[row + 2][col] != 0) ?
- cca_ifem[row + 2][col] :
- reg838_8822b;
- reg83c = (cca_ifem[row + 3][col] != 0) ?
- cca_ifem[row + 3][col] :
- reg83c_8822b;
- } else {
- reg82c = (cca_efem[row][col] != 0) ?
- cca_efem[row][col] :
- reg82c_8822b;
- reg830 = (cca_efem[row + 1][col] != 0) ?
- cca_efem[row + 1][col] :
- reg830_8822b;
- reg838 = (cca_efem[row + 2][col] != 0) ?
- cca_efem[row + 2][col] :
- reg838_8822b;
- reg83c = (cca_efem[row + 3][col] != 0) ?
- cca_efem[row + 3][col] :
- reg83c_8822b;
- }
- } else {
- /* iFEM =>RFE type 3 & RFE type 5 & RFE type 0 & RFE type 8 &
- * RFE type 10
- */
- reg82c = (cca_ifem[row][col] != 0) ? cca_ifem[row][col] :
- reg82c_8822b;
- reg830 = (cca_ifem[row + 1][col] != 0) ?
- cca_ifem[row + 1][col] :
- reg830_8822b;
- reg838 = (cca_ifem[row + 2][col] != 0) ?
- cca_ifem[row + 2][col] :
- reg838_8822b;
- reg83c = (cca_ifem[row + 3][col] != 0) ?
- cca_ifem[row + 3][col] :
- reg83c_8822b;
- }
-
- odm_set_bb_reg(dm, 0x82c, MASKDWORD, reg82c);
- odm_set_bb_reg(dm, 0x830, MASKDWORD, reg830);
- odm_set_bb_reg(dm, 0x838, MASKDWORD, reg838);
- odm_set_bb_reg(dm, 0x83c, MASKDWORD, reg83c);
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s: (Pkt%d, Intf%d, RFE%d), row = %d, col = %d\n",
- __func__, dm->package_type, dm->support_interface,
- dm->rfe_type, row, col);
-}
-
-static void phydm_ccapar_by_bw_8822b(struct phy_dm_struct *dm,
- enum odm_bw bandwidth)
-{
- u32 reg82c;
-
- if (dm->cut_version != ODM_CUT_A)
- return;
-
- /* A-cut */
- reg82c = odm_get_bb_reg(dm, 0x82c, MASKDWORD);
-
- if (bandwidth == ODM_BW20M) {
- /* 82c[15:12] = 4 */
- /* 82c[27:24] = 6 */
-
- reg82c &= (~(0x0f00f000));
- reg82c |= ((0x4) << 12);
- reg82c |= ((0x6) << 24);
- } else if (bandwidth == ODM_BW40M) {
- /* 82c[19:16] = 9 */
- /* 82c[27:24] = 6 */
-
- reg82c &= (~(0x0f0f0000));
- reg82c |= ((0x9) << 16);
- reg82c |= ((0x6) << 24);
- } else if (bandwidth == ODM_BW80M) {
- /* 82c[15:12] 7 */
- /* 82c[19:16] b */
- /* 82c[23:20] d */
- /* 82c[27:24] 3 */
-
- reg82c &= (~(0x0ffff000));
- reg82c |= ((0xdb7) << 12);
- reg82c |= ((0x3) << 24);
- }
-
- odm_set_bb_reg(dm, 0x82c, MASKDWORD, reg82c);
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Update CCA parameters for Acut\n", __func__);
-}
-
-static void phydm_ccapar_by_rxpath_8822b(struct phy_dm_struct *dm)
-{
- if (dm->cut_version != ODM_CUT_A)
- return;
-
- if ((dm->rx_ant_status == ODM_RF_A) ||
- (dm->rx_ant_status == ODM_RF_B)) {
- /* 838[7:4] = 8 */
- /* 838[11:8] = 7 */
- /* 838[15:12] = 6 */
- /* 838[19:16] = 7 */
- /* 838[23:20] = 7 */
- /* 838[27:24] = 7 */
- odm_set_bb_reg(dm, 0x838, 0x0ffffff0, 0x777678);
- } else {
- /* 838[7:4] = 3 */
- /* 838[11:8] = 3 */
- /* 838[15:12] = 6 */
- /* 838[19:16] = 6 */
- /* 838[23:20] = 7 */
- /* 838[27:24] = 7 */
- odm_set_bb_reg(dm, 0x838, 0x0ffffff0, 0x776633);
- }
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Update CCA parameters for Acut\n", __func__);
-}
-
-static void phydm_rxdfirpar_by_bw_8822b(struct phy_dm_struct *dm,
- enum odm_bw bandwidth)
-{
- if (bandwidth == ODM_BW40M) {
- /* RX DFIR for BW40 */
- odm_set_bb_reg(dm, 0x948, BIT(29) | BIT(28), 0x1);
- odm_set_bb_reg(dm, 0x94c, BIT(29) | BIT(28), 0x0);
- odm_set_bb_reg(dm, 0xc20, BIT(31), 0x0);
- odm_set_bb_reg(dm, 0xe20, BIT(31), 0x0);
- } else if (bandwidth == ODM_BW80M) {
- /* RX DFIR for BW80 */
- odm_set_bb_reg(dm, 0x948, BIT(29) | BIT(28), 0x2);
- odm_set_bb_reg(dm, 0x94c, BIT(29) | BIT(28), 0x1);
- odm_set_bb_reg(dm, 0xc20, BIT(31), 0x0);
- odm_set_bb_reg(dm, 0xe20, BIT(31), 0x0);
- } else {
- /* RX DFIR for BW20, BW10 and BW5*/
- odm_set_bb_reg(dm, 0x948, BIT(29) | BIT(28), 0x2);
- odm_set_bb_reg(dm, 0x94c, BIT(29) | BIT(28), 0x2);
- odm_set_bb_reg(dm, 0xc20, BIT(31), 0x1);
- odm_set_bb_reg(dm, 0xe20, BIT(31), 0x1);
- }
-}
-
-bool phydm_write_txagc_1byte_8822b(struct phy_dm_struct *dm, u32 power_index,
- enum odm_rf_radio_path path, u8 hw_rate)
-{
- u32 offset_txagc[2] = {0x1d00, 0x1d80};
- u8 rate_idx = (hw_rate & 0xfc), i;
- u8 rate_offset = (hw_rate & 0x3);
- u32 txagc_content = 0x0;
-
- /* For debug command only!!!! */
-
- /* Error handling */
- if ((path > ODM_RF_PATH_B) || (hw_rate > 0x53)) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): unsupported path (%d)\n", __func__, path);
- return false;
- }
-
- /* For HW limitation, We can't write TXAGC once a byte. */
- for (i = 0; i < 4; i++) {
- if (i != rate_offset)
- txagc_content =
- txagc_content | (config_phydm_read_txagc_8822b(
- dm, path, rate_idx + i)
- << (i << 3));
- else
- txagc_content = txagc_content |
- ((power_index & 0x3f) << (i << 3));
- }
- odm_set_bb_reg(dm, (offset_txagc[path] + rate_idx), MASKDWORD,
- txagc_content);
-
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): path-%d rate index 0x%x (0x%x) = 0x%x\n", __func__,
- path, hw_rate, (offset_txagc[path] + hw_rate),
- power_index);
- return true;
-}
-
-void phydm_init_hw_info_by_rfe_type_8822b(struct phy_dm_struct *dm)
-{
- u16 mask_path_a = 0x0303;
- u16 mask_path_b = 0x0c0c;
- /*u16 mask_path_c = 0x3030;*/
- /*u16 mask_path_d = 0xc0c0;*/
-
- dm->is_init_hw_info_by_rfe = false;
-
- if ((dm->rfe_type == 1) || (dm->rfe_type == 6) || (dm->rfe_type == 7)) {
- odm_cmn_info_init(dm, ODM_CMNINFO_BOARD_TYPE,
- (ODM_BOARD_EXT_LNA | ODM_BOARD_EXT_LNA_5G |
- ODM_BOARD_EXT_PA | ODM_BOARD_EXT_PA_5G));
-
- if (dm->rfe_type == 6) {
- odm_cmn_info_init(
- dm, ODM_CMNINFO_GPA,
- (TYPE_GPA1 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(
- dm, ODM_CMNINFO_APA,
- (TYPE_APA1 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(
- dm, ODM_CMNINFO_GLNA,
- (TYPE_GLNA1 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(
- dm, ODM_CMNINFO_ALNA,
- (TYPE_ALNA1 & (mask_path_a | mask_path_b)));
- } else if (dm->rfe_type == 7) {
- odm_cmn_info_init(
- dm, ODM_CMNINFO_GPA,
- (TYPE_GPA2 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(
- dm, ODM_CMNINFO_APA,
- (TYPE_APA2 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(
- dm, ODM_CMNINFO_GLNA,
- (TYPE_GLNA2 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(
- dm, ODM_CMNINFO_ALNA,
- (TYPE_ALNA2 & (mask_path_a | mask_path_b)));
- } else {
- odm_cmn_info_init(
- dm, ODM_CMNINFO_GPA,
- (TYPE_GPA0 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(
- dm, ODM_CMNINFO_APA,
- (TYPE_APA0 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(
- dm, ODM_CMNINFO_GLNA,
- (TYPE_GLNA0 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(
- dm, ODM_CMNINFO_ALNA,
- (TYPE_ALNA0 & (mask_path_a | mask_path_b)));
- }
-
- odm_cmn_info_init(dm, ODM_CMNINFO_PACKAGE_TYPE, 1);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_LNA, true);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_LNA, true);
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_PA, true);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_PA, true);
- } else if (dm->rfe_type == 2) {
- odm_cmn_info_init(dm, ODM_CMNINFO_BOARD_TYPE,
- (ODM_BOARD_EXT_LNA_5G | ODM_BOARD_EXT_PA_5G));
- odm_cmn_info_init(dm, ODM_CMNINFO_APA,
- (TYPE_APA0 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(dm, ODM_CMNINFO_ALNA,
- (TYPE_ALNA0 & (mask_path_a | mask_path_b)));
-
- odm_cmn_info_init(dm, ODM_CMNINFO_PACKAGE_TYPE, 2);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_LNA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_LNA, true);
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_PA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_PA, true);
- } else if (dm->rfe_type == 9) {
- odm_cmn_info_init(dm, ODM_CMNINFO_BOARD_TYPE,
- (ODM_BOARD_EXT_LNA_5G));
- odm_cmn_info_init(dm, ODM_CMNINFO_ALNA,
- (TYPE_ALNA0 & (mask_path_a | mask_path_b)));
-
- odm_cmn_info_init(dm, ODM_CMNINFO_PACKAGE_TYPE, 1);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_LNA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_LNA, true);
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_PA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_PA, false);
- } else if ((dm->rfe_type == 3) || (dm->rfe_type == 5)) {
- /* RFE type 3: 8822BS\8822BU TFBGA iFEM */
- /* RFE type 5: 8822BE TFBGA iFEM */
- odm_cmn_info_init(dm, ODM_CMNINFO_BOARD_TYPE, 0);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_PACKAGE_TYPE, 2);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_LNA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_LNA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_PA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_PA, false);
- } else if (dm->rfe_type == 4) {
- odm_cmn_info_init(dm, ODM_CMNINFO_BOARD_TYPE,
- (ODM_BOARD_EXT_LNA | ODM_BOARD_EXT_LNA_5G |
- ODM_BOARD_EXT_PA | ODM_BOARD_EXT_PA_5G));
- odm_cmn_info_init(dm, ODM_CMNINFO_GPA,
- (TYPE_GPA0 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(dm, ODM_CMNINFO_APA,
- (TYPE_APA0 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(dm, ODM_CMNINFO_GLNA,
- (TYPE_GLNA0 & (mask_path_a | mask_path_b)));
- odm_cmn_info_init(dm, ODM_CMNINFO_ALNA,
- (TYPE_ALNA0 & (mask_path_a | mask_path_b)));
-
- odm_cmn_info_init(dm, ODM_CMNINFO_PACKAGE_TYPE, 2);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_LNA, true);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_LNA, true);
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_PA, true);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_PA, true);
- } else if (dm->rfe_type == 8) {
- /* RFE type 8: TFBGA iFEM AP */
- odm_cmn_info_init(dm, ODM_CMNINFO_BOARD_TYPE, 0);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_PACKAGE_TYPE, 2);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_LNA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_LNA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_PA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_PA, false);
- } else {
- /* RFE Type 0 & 9 & 10: QFN iFEM */
- odm_cmn_info_init(dm, ODM_CMNINFO_BOARD_TYPE, 0);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_PACKAGE_TYPE, 1);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_LNA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_LNA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_PA, false);
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_PA, false);
- }
-
- dm->is_init_hw_info_by_rfe = true;
-
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): RFE type (%d), Board type (0x%x), Package type (%d)\n",
- __func__, dm->rfe_type, dm->board_type, dm->package_type);
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): 5G ePA (%d), 5G eLNA (%d), 2G ePA (%d), 2G eLNA (%d)\n",
- __func__, dm->ext_pa_5g, dm->ext_lna_5g, dm->ext_pa,
- dm->ext_lna);
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): 5G PA type (%d), 5G LNA type (%d), 2G PA type (%d), 2G LNA type (%d)\n",
- __func__, dm->type_apa, dm->type_alna, dm->type_gpa,
- dm->type_glna);
-}
-
-s32 phydm_get_condition_number_8822B(struct phy_dm_struct *dm)
-{
- s32 ret_val;
-
- odm_set_bb_reg(dm, 0x1988, BIT(22), 0x1);
- ret_val =
- (s32)odm_get_bb_reg(dm, 0xf84, (BIT(17) | BIT(16) | MASKLWORD));
-
- if (bw_8822b == 0) {
- ret_val = ret_val << (8 - 4);
- ret_val = ret_val / 234;
- } else if (bw_8822b == 1) {
- ret_val = ret_val << (7 - 4);
- ret_val = ret_val / 108;
- } else if (bw_8822b == 2) {
- ret_val = ret_val << (6 - 4);
- ret_val = ret_val / 52;
- }
-
- return ret_val;
-}
-
-/* ======================================================================== */
-
-/* ======================================================================== */
-/* These following functions can be used by driver*/
-
-u32 config_phydm_read_rf_reg_8822b(struct phy_dm_struct *dm,
- enum odm_rf_radio_path rf_path, u32 reg_addr,
- u32 bit_mask)
-{
- u32 readback_value, direct_addr;
- u32 offset_read_rf[2] = {0x2800, 0x2c00};
- u32 power_RF[2] = {0x1c, 0xec};
-
- /* Error handling.*/
- if (rf_path > ODM_RF_PATH_B) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): unsupported path (%d)\n", __func__,
- rf_path);
- return INVALID_RF_DATA;
- }
-
- /* Error handling. Check if RF power is enable or not */
- /* 0xffffffff means RF power is disable */
- if (odm_get_mac_reg(dm, power_RF[rf_path], MASKBYTE3) != 0x7) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Read fail, RF is disabled\n", __func__);
- return INVALID_RF_DATA;
- }
-
- /* Calculate offset */
- reg_addr &= 0xff;
- direct_addr = offset_read_rf[rf_path] + (reg_addr << 2);
-
- /* RF register only has 20bits */
- bit_mask &= RFREGOFFSETMASK;
-
- /* Read RF register directly */
- readback_value = odm_get_bb_reg(dm, direct_addr, bit_mask);
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): RF-%d 0x%x = 0x%x, bit mask = 0x%x\n", __func__,
- rf_path, reg_addr, readback_value, bit_mask);
- return readback_value;
-}
-
-bool config_phydm_write_rf_reg_8822b(struct phy_dm_struct *dm,
- enum odm_rf_radio_path rf_path,
- u32 reg_addr, u32 bit_mask, u32 data)
-{
- u32 data_and_addr = 0, data_original = 0;
- u32 offset_write_rf[2] = {0xc90, 0xe90};
- u32 power_RF[2] = {0x1c, 0xec};
-
- /* Error handling.*/
- if (rf_path > ODM_RF_PATH_B) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): unsupported path (%d)\n", __func__,
- rf_path);
- return false;
- }
-
- /* Read RF register content first */
- reg_addr &= 0xff;
- bit_mask = bit_mask & RFREGOFFSETMASK;
-
- if (bit_mask != RFREGOFFSETMASK) {
- data_original = config_phydm_read_rf_reg_8822b(
- dm, rf_path, reg_addr, RFREGOFFSETMASK);
-
- /* Error handling. RF is disabled */
- if (!config_phydm_read_rf_check_8822b(data_original)) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Write fail, RF is disable\n",
- __func__);
- return false;
- }
-
- /* check bit mask */
- data = phydm_check_bit_mask(bit_mask, data_original, data);
- } else if (odm_get_mac_reg(dm, power_RF[rf_path], MASKBYTE3) != 0x7) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Write fail, RF is disabled\n", __func__);
- return false;
- }
-
- /* Put write addr in [27:20] and write data in [19:00] */
- data_and_addr = ((reg_addr << 20) | (data & 0x000fffff)) & 0x0fffffff;
-
- /* Write operation */
- odm_set_bb_reg(dm, offset_write_rf[rf_path], MASKDWORD, data_and_addr);
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): RF-%d 0x%x = 0x%x (original: 0x%x), bit mask = 0x%x\n",
- __func__, rf_path, reg_addr, data, data_original, bit_mask);
- return true;
-}
-
-bool config_phydm_write_txagc_8822b(struct phy_dm_struct *dm, u32 power_index,
- enum odm_rf_radio_path path, u8 hw_rate)
-{
- u32 offset_txagc[2] = {0x1d00, 0x1d80};
- u8 rate_idx = (hw_rate & 0xfc);
-
- /* Input need to be HW rate index, not driver rate index!!!! */
-
- if (dm->is_disable_phy_api) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): disable PHY API for debug!!\n", __func__);
- return true;
- }
-
- /* Error handling */
- if ((path > ODM_RF_PATH_B) || (hw_rate > 0x53)) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): unsupported path (%d)\n", __func__, path);
- return false;
- }
-
- /* driver need to construct a 4-byte power index */
- odm_set_bb_reg(dm, (offset_txagc[path] + rate_idx), MASKDWORD,
- power_index);
-
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): path-%d rate index 0x%x (0x%x) = 0x%x\n", __func__,
- path, hw_rate, (offset_txagc[path] + hw_rate),
- power_index);
- return true;
-}
-
-u8 config_phydm_read_txagc_8822b(struct phy_dm_struct *dm,
- enum odm_rf_radio_path path, u8 hw_rate)
-{
- u8 read_back_data;
-
- /* Input need to be HW rate index, not driver rate index!!!! */
-
- /* Error handling */
- if ((path > ODM_RF_PATH_B) || (hw_rate > 0x53)) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): unsupported path (%d)\n", __func__, path);
- return INVALID_TXAGC_DATA;
- }
-
- /* Disable TX AGC report */
- odm_set_bb_reg(dm, 0x1998, BIT(16), 0x0); /* need to check */
-
- /* Set data rate index (bit0~6) and path index (bit7) */
- odm_set_bb_reg(dm, 0x1998, MASKBYTE0, (hw_rate | (path << 7)));
-
- /* Enable TXAGC report */
- odm_set_bb_reg(dm, 0x1998, BIT(16), 0x1);
-
- /* Read TX AGC report */
- read_back_data = (u8)odm_get_bb_reg(dm, 0xd30, 0x7f0000);
-
- /* Driver have to disable TXAGC report after reading TXAGC
- * (ref. user guide v11)
- */
- odm_set_bb_reg(dm, 0x1998, BIT(16), 0x0);
-
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): path-%d rate index 0x%x = 0x%x\n", __func__, path,
- hw_rate, read_back_data);
- return read_back_data;
-}
-
-bool config_phydm_switch_band_8822b(struct phy_dm_struct *dm, u8 central_ch)
-{
- u32 rf_reg18;
- bool rf_reg_status = true;
-
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG, "%s()======================>\n",
- __func__);
-
- if (dm->is_disable_phy_api) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): disable PHY API for debug!!\n", __func__);
- return true;
- }
-
- rf_reg18 = config_phydm_read_rf_reg_8822b(dm, ODM_RF_PATH_A, 0x18,
- RFREGOFFSETMASK);
- rf_reg_status =
- rf_reg_status & config_phydm_read_rf_check_8822b(rf_reg18);
-
- if (central_ch <= 14) {
- /* 2.4G */
-
- /* Enable CCK block */
- odm_set_bb_reg(dm, 0x808, BIT(28), 0x1);
-
- /* Disable MAC CCK check */
- odm_set_bb_reg(dm, 0x454, BIT(7), 0x0);
-
- /* Disable BB CCK check */
- odm_set_bb_reg(dm, 0xa80, BIT(18), 0x0);
-
- /*CCA Mask*/
- odm_set_bb_reg(dm, 0x814, 0x0000FC00, 15); /*default value*/
-
- /* RF band */
- rf_reg18 = (rf_reg18 & (~(BIT(16) | BIT(9) | BIT(8))));
-
- /* RxHP dynamic control */
- if ((dm->rfe_type == 2) || (dm->rfe_type == 3) ||
- (dm->rfe_type == 5)) {
- odm_set_bb_reg(dm, 0x8cc, MASKDWORD, 0x08108492);
- odm_set_bb_reg(dm, 0x8d8, MASKDWORD, 0x29095612);
- }
-
- } else if (central_ch > 35) {
- /* 5G */
-
- /* Enable BB CCK check */
- odm_set_bb_reg(dm, 0xa80, BIT(18), 0x1);
-
- /* Enable CCK check */
- odm_set_bb_reg(dm, 0x454, BIT(7), 0x1);
-
- /* Disable CCK block */
- odm_set_bb_reg(dm, 0x808, BIT(28), 0x0);
-
- /*CCA Mask*/
- odm_set_bb_reg(dm, 0x814, 0x0000FC00, 15); /*default value*/
-
- /* RF band */
- rf_reg18 = (rf_reg18 & (~(BIT(16) | BIT(9) | BIT(8))));
- rf_reg18 = (rf_reg18 | BIT(8) | BIT(16));
-
- /* RxHP dynamic control */
- if ((dm->rfe_type == 2) || (dm->rfe_type == 3) ||
- (dm->rfe_type == 5)) {
- odm_set_bb_reg(dm, 0x8cc, MASKDWORD, 0x08100000);
- odm_set_bb_reg(dm, 0x8d8, MASKDWORD, 0x21095612);
- }
-
- } else {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch band (ch: %d)\n", __func__,
- central_ch);
- return false;
- }
-
- rf_reg_status = rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x18,
- RFREGOFFSETMASK, rf_reg18);
-
- if (dm->rf_type > ODM_1T1R)
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_B, 0x18,
- RFREGOFFSETMASK, rf_reg18);
-
- if (!phydm_rfe_8822b(dm, central_ch))
- return false;
-
- if (!rf_reg_status) {
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch band (ch: %d), because writing RF register is fail\n",
- __func__, central_ch);
- return false;
- }
-
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Success to switch band (ch: %d)\n", __func__,
- central_ch);
- return true;
-}
-
-bool config_phydm_switch_channel_8822b(struct phy_dm_struct *dm, u8 central_ch)
-{
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- u32 rf_reg18 = 0, rf_reg_b8 = 0, rf_reg_be = 0xff;
- bool rf_reg_status = true;
- u8 low_band[15] = {0x7, 0x6, 0x6, 0x5, 0x0, 0x0, 0x7, 0xff,
- 0x6, 0x5, 0x0, 0x0, 0x7, 0x6, 0x6};
- u8 middle_band[23] = {0x6, 0x5, 0x0, 0x0, 0x7, 0x6, 0x6, 0xff,
- 0x0, 0x0, 0x7, 0x6, 0x6, 0x5, 0x0, 0xff,
- 0x7, 0x6, 0x6, 0x5, 0x0, 0x0, 0x7};
- u8 high_band[15] = {0x5, 0x5, 0x0, 0x7, 0x7, 0x6, 0x5, 0xff,
- 0x0, 0x7, 0x7, 0x6, 0x5, 0x5, 0x0};
-
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG, "%s()====================>\n",
- __func__);
-
- if (dm->is_disable_phy_api) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): disable PHY API for debug!!\n", __func__);
- return true;
- }
-
- central_ch_8822b = central_ch;
- rf_reg18 = config_phydm_read_rf_reg_8822b(dm, ODM_RF_PATH_A, 0x18,
- RFREGOFFSETMASK);
- rf_reg_status =
- rf_reg_status & config_phydm_read_rf_check_8822b(rf_reg18);
- rf_reg18 = (rf_reg18 & (~(BIT(18) | BIT(17) | MASKBYTE0)));
-
- if (dm->cut_version == ODM_CUT_A) {
- rf_reg_b8 = config_phydm_read_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xb8, RFREGOFFSETMASK);
- rf_reg_status = rf_reg_status &
- config_phydm_read_rf_check_8822b(rf_reg_b8);
- }
-
- /* Switch band and channel */
- if (central_ch <= 14) {
- /* 2.4G */
-
- /* 1. RF band and channel*/
- rf_reg18 = (rf_reg18 | central_ch);
-
- /* 2. AGC table selection */
- odm_set_bb_reg(dm, 0x958, 0x1f, 0x0);
- dig_tab->agc_table_idx = 0x0;
-
- /* 3. Set central frequency for clock offset tracking */
- odm_set_bb_reg(dm, 0x860, 0x1ffe0000, 0x96a);
-
- /* Fix A-cut LCK fail issue @ 5285MHz~5375MHz, 0xb8[19]=0x0 */
- if (dm->cut_version == ODM_CUT_A)
- rf_reg_b8 = rf_reg_b8 | BIT(19);
-
- /* CCK TX filter parameters */
- if (central_ch == 14) {
- odm_set_bb_reg(dm, 0xa20, MASKHWORD, 0x8488);
- odm_set_bb_reg(dm, 0xa24, MASKDWORD, 0x00006577);
- odm_set_bb_reg(dm, 0xa28, MASKLWORD, 0x0000);
- } else {
- odm_set_bb_reg(dm, 0xa20, MASKHWORD,
- (rega20_8822b >> 16));
- odm_set_bb_reg(dm, 0xa24, MASKDWORD, rega24_8822b);
- odm_set_bb_reg(dm, 0xa28, MASKLWORD,
- (rega28_8822b & MASKLWORD));
- }
-
- } else if (central_ch > 35) {
- /* 5G */
-
- /* 1. RF band and channel*/
- rf_reg18 = (rf_reg18 | central_ch);
-
- /* 2. AGC table selection */
- if ((central_ch >= 36) && (central_ch <= 64)) {
- odm_set_bb_reg(dm, 0x958, 0x1f, 0x1);
- dig_tab->agc_table_idx = 0x1;
- } else if ((central_ch >= 100) && (central_ch <= 144)) {
- odm_set_bb_reg(dm, 0x958, 0x1f, 0x2);
- dig_tab->agc_table_idx = 0x2;
- } else if (central_ch >= 149) {
- odm_set_bb_reg(dm, 0x958, 0x1f, 0x3);
- dig_tab->agc_table_idx = 0x3;
- } else {
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch channel (AGC) (ch: %d)\n",
- __func__, central_ch);
- return false;
- }
-
- /* 3. Set central frequency for clock offset tracking */
- if ((central_ch >= 36) && (central_ch <= 48)) {
- odm_set_bb_reg(dm, 0x860, 0x1ffe0000, 0x494);
- } else if ((central_ch >= 52) && (central_ch <= 64)) {
- odm_set_bb_reg(dm, 0x860, 0x1ffe0000, 0x453);
- } else if ((central_ch >= 100) && (central_ch <= 116)) {
- odm_set_bb_reg(dm, 0x860, 0x1ffe0000, 0x452);
- } else if ((central_ch >= 118) && (central_ch <= 177)) {
- odm_set_bb_reg(dm, 0x860, 0x1ffe0000, 0x412);
- } else {
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch channel (fc_area) (ch: %d)\n",
- __func__, central_ch);
- return false;
- }
-
- /* Fix A-cut LCK fail issue @ 5285MHz~5375MHz, 0xb8[19]=0x0 */
- if (dm->cut_version == ODM_CUT_A) {
- if ((central_ch >= 57) && (central_ch <= 75))
- rf_reg_b8 = rf_reg_b8 & (~BIT(19));
- else
- rf_reg_b8 = rf_reg_b8 | BIT(19);
- }
- } else {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch channel (ch: %d)\n",
- __func__, central_ch);
- return false;
- }
-
- /* Modify IGI for MP driver to aviod PCIE interference */
- if (dm->mp_mode && ((dm->rfe_type == 3) || (dm->rfe_type == 5))) {
- if (central_ch == 14)
- odm_write_dig(dm, 0x26);
- else
- odm_write_dig(dm, 0x20);
- }
-
- /* Modify the setting of register 0xBE to reduce phase noise */
- if (central_ch <= 14)
- rf_reg_be = 0x0;
- else if ((central_ch >= 36) && (central_ch <= 64))
- rf_reg_be = low_band[(central_ch - 36) >> 1];
- else if ((central_ch >= 100) && (central_ch <= 144))
- rf_reg_be = middle_band[(central_ch - 100) >> 1];
- else if ((central_ch >= 149) && (central_ch <= 177))
- rf_reg_be = high_band[(central_ch - 149) >> 1];
- else
- rf_reg_be = 0xff;
-
- if (rf_reg_be != 0xff) {
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xbe,
- (BIT(17) | BIT(16) | BIT(15)),
- rf_reg_be);
- } else {
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch channel (ch: %d, Phase noise)\n",
- __func__, central_ch);
- return false;
- }
-
- /* Fix channel 144 issue, ask by RFSI Alvin*/
- /* 00 when freq < 5400; 01 when 5400<=freq<=5720; 10 when freq > 5720;
- * 2G don't care
- */
- /* need to set 0xdf[18]=1 before writing RF18 when channel 144 */
- if (central_ch == 144) {
- rf_reg_status = rf_reg_status &
- config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xdf, BIT(18), 0x1);
- rf_reg18 = (rf_reg18 | BIT(17));
- } else {
- rf_reg_status = rf_reg_status &
- config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xdf, BIT(18), 0x0);
-
- if (central_ch > 144)
- rf_reg18 = (rf_reg18 | BIT(18));
- else if (central_ch >= 80)
- rf_reg18 = (rf_reg18 | BIT(17));
- }
-
- rf_reg_status = rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x18,
- RFREGOFFSETMASK, rf_reg18);
-
- if (dm->cut_version == ODM_CUT_A)
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xb8,
- RFREGOFFSETMASK, rf_reg_b8);
-
- if (dm->rf_type > ODM_1T1R) {
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_B, 0x18,
- RFREGOFFSETMASK, rf_reg18);
-
- if (dm->cut_version == ODM_CUT_A)
- rf_reg_status = rf_reg_status &
- config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_B, 0xb8,
- RFREGOFFSETMASK, rf_reg_b8);
- }
-
- if (!rf_reg_status) {
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch channel (ch: %d), because writing RF register is fail\n",
- __func__, central_ch);
- return false;
- }
-
- phydm_ccapar_by_rfe_8822b(dm);
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Success to switch channel (ch: %d)\n", __func__,
- central_ch);
- return true;
-}
-
-bool config_phydm_switch_bandwidth_8822b(struct phy_dm_struct *dm,
- u8 primary_ch_idx,
- enum odm_bw bandwidth)
-{
- u32 rf_reg18;
- bool rf_reg_status = true;
- u8 IGI = 0;
-
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG, "%s()===================>\n",
- __func__);
-
- if (dm->is_disable_phy_api) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): disable PHY API for debug!!\n", __func__);
- return true;
- }
-
- /* Error handling */
- if ((bandwidth >= ODM_BW_MAX) ||
- ((bandwidth == ODM_BW40M) && (primary_ch_idx > 2)) ||
- ((bandwidth == ODM_BW80M) && (primary_ch_idx > 4))) {
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch bandwidth (bw: %d, primary ch: %d)\n",
- __func__, bandwidth, primary_ch_idx);
- return false;
- }
-
- bw_8822b = bandwidth;
- rf_reg18 = config_phydm_read_rf_reg_8822b(dm, ODM_RF_PATH_A, 0x18,
- RFREGOFFSETMASK);
- rf_reg_status =
- rf_reg_status & config_phydm_read_rf_check_8822b(rf_reg18);
-
- /* Switch bandwidth */
- switch (bandwidth) {
- case ODM_BW20M: {
- /* Small BW([7:6]) = 0, primary channel ([5:2]) = 0,
- * rf mode([1:0]) = 20M
- */
- odm_set_bb_reg(dm, 0x8ac, MASKBYTE0, ODM_BW20M);
-
- /* ADC clock = 160M clock for BW20 */
- odm_set_bb_reg(dm, 0x8ac, (BIT(9) | BIT(8)), 0x0);
- odm_set_bb_reg(dm, 0x8ac, BIT(16), 0x1);
-
- /* DAC clock = 160M clock for BW20 */
- odm_set_bb_reg(dm, 0x8ac, (BIT(21) | BIT(20)), 0x0);
- odm_set_bb_reg(dm, 0x8ac, BIT(28), 0x1);
-
- /* ADC buffer clock */
- odm_set_bb_reg(dm, 0x8c4, BIT(30), 0x1);
-
- /* RF bandwidth */
- rf_reg18 = (rf_reg18 | BIT(11) | BIT(10));
-
- break;
- }
- case ODM_BW40M: {
- /* Small BW([7:6]) = 0, primary channel ([5:2]) = sub-channel,
- * rf mode([1:0]) = 40M
- */
- odm_set_bb_reg(dm, 0x8ac, MASKBYTE0,
- (((primary_ch_idx & 0xf) << 2) | ODM_BW40M));
-
- /* CCK primary channel */
- if (primary_ch_idx == 1)
- odm_set_bb_reg(dm, 0xa00, BIT(4), primary_ch_idx);
- else
- odm_set_bb_reg(dm, 0xa00, BIT(4), 0);
-
- /* ADC clock = 160M clock for BW40 */
- odm_set_bb_reg(dm, 0x8ac, (BIT(11) | BIT(10)), 0x0);
- odm_set_bb_reg(dm, 0x8ac, BIT(17), 0x1);
-
- /* DAC clock = 160M clock for BW20 */
- odm_set_bb_reg(dm, 0x8ac, (BIT(23) | BIT(22)), 0x0);
- odm_set_bb_reg(dm, 0x8ac, BIT(29), 0x1);
-
- /* ADC buffer clock */
- odm_set_bb_reg(dm, 0x8c4, BIT(30), 0x1);
-
- /* RF bandwidth */
- rf_reg18 = (rf_reg18 & (~(BIT(11) | BIT(10))));
- rf_reg18 = (rf_reg18 | BIT(11));
-
- break;
- }
- case ODM_BW80M: {
- /* Small BW([7:6]) = 0, primary channel ([5:2]) = sub-channel,
- * rf mode([1:0]) = 80M
- */
- odm_set_bb_reg(dm, 0x8ac, MASKBYTE0,
- (((primary_ch_idx & 0xf) << 2) | ODM_BW80M));
-
- /* ADC clock = 160M clock for BW80 */
- odm_set_bb_reg(dm, 0x8ac, (BIT(13) | BIT(12)), 0x0);
- odm_set_bb_reg(dm, 0x8ac, BIT(18), 0x1);
-
- /* DAC clock = 160M clock for BW20 */
- odm_set_bb_reg(dm, 0x8ac, (BIT(25) | BIT(24)), 0x0);
- odm_set_bb_reg(dm, 0x8ac, BIT(30), 0x1);
-
- /* ADC buffer clock */
- odm_set_bb_reg(dm, 0x8c4, BIT(30), 0x1);
-
- /* RF bandwidth */
- rf_reg18 = (rf_reg18 & (~(BIT(11) | BIT(10))));
- rf_reg18 = (rf_reg18 | BIT(10));
-
- break;
- }
- case ODM_BW5M: {
- /* Small BW([7:6]) = 1, primary channel ([5:2]) = 0,
- * rf mode([1:0]) = 20M
- */
- odm_set_bb_reg(dm, 0x8ac, MASKBYTE0, (BIT(6) | ODM_BW20M));
-
- /* ADC clock = 40M clock */
- odm_set_bb_reg(dm, 0x8ac, (BIT(9) | BIT(8)), 0x2);
- odm_set_bb_reg(dm, 0x8ac, BIT(16), 0x0);
-
- /* DAC clock = 160M clock for BW20 */
- odm_set_bb_reg(dm, 0x8ac, (BIT(21) | BIT(20)), 0x2);
- odm_set_bb_reg(dm, 0x8ac, BIT(28), 0x0);
-
- /* ADC buffer clock */
- odm_set_bb_reg(dm, 0x8c4, BIT(30), 0x0);
- odm_set_bb_reg(dm, 0x8c8, BIT(31), 0x1);
-
- /* RF bandwidth */
- rf_reg18 = (rf_reg18 | BIT(11) | BIT(10));
-
- break;
- }
- case ODM_BW10M: {
- /* Small BW([7:6]) = 1, primary channel ([5:2]) = 0,
- * rf mode([1:0]) = 20M
- */
- odm_set_bb_reg(dm, 0x8ac, MASKBYTE0, (BIT(7) | ODM_BW20M));
-
- /* ADC clock = 80M clock */
- odm_set_bb_reg(dm, 0x8ac, (BIT(9) | BIT(8)), 0x3);
- odm_set_bb_reg(dm, 0x8ac, BIT(16), 0x0);
-
- /* DAC clock = 160M clock for BW20 */
- odm_set_bb_reg(dm, 0x8ac, (BIT(21) | BIT(20)), 0x3);
- odm_set_bb_reg(dm, 0x8ac, BIT(28), 0x0);
-
- /* ADC buffer clock */
- odm_set_bb_reg(dm, 0x8c4, BIT(30), 0x0);
- odm_set_bb_reg(dm, 0x8c8, BIT(31), 0x1);
-
- /* RF bandwidth */
- rf_reg18 = (rf_reg18 | BIT(11) | BIT(10));
-
- break;
- }
- default:
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch bandwidth (bw: %d, primary ch: %d)\n",
- __func__, bandwidth, primary_ch_idx);
- }
-
- /* Write RF register */
- rf_reg_status = rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x18,
- RFREGOFFSETMASK, rf_reg18);
-
- if (dm->rf_type > ODM_1T1R)
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_B, 0x18,
- RFREGOFFSETMASK, rf_reg18);
-
- if (!rf_reg_status) {
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to switch bandwidth (bw: %d, primary ch: %d), because writing RF register is fail\n",
- __func__, bandwidth, primary_ch_idx);
- return false;
- }
-
- /* Modify RX DFIR parameters */
- phydm_rxdfirpar_by_bw_8822b(dm, bandwidth);
-
- /* Modify CCA parameters */
- phydm_ccapar_by_bw_8822b(dm, bandwidth);
- phydm_ccapar_by_rfe_8822b(dm);
-
- /* Toggle RX path to avoid RX dead zone issue */
- odm_set_bb_reg(dm, 0x808, MASKBYTE0, 0x0);
- odm_set_bb_reg(dm, 0x808, MASKBYTE0,
- (dm->rx_ant_status | (dm->rx_ant_status << 4)));
-
- /* Toggle IGI to let RF enter RX mode */
- IGI = (u8)odm_get_bb_reg(dm, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm));
- odm_set_bb_reg(dm, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm), IGI - 2);
- odm_set_bb_reg(dm, ODM_REG(IGI_B, dm), ODM_BIT(IGI, dm), IGI - 2);
- odm_set_bb_reg(dm, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm), IGI);
- odm_set_bb_reg(dm, ODM_REG(IGI_B, dm), ODM_BIT(IGI, dm), IGI);
-
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Success to switch bandwidth (bw: %d, primary ch: %d)\n",
- __func__, bandwidth, primary_ch_idx);
- return true;
-}
-
-bool config_phydm_switch_channel_bw_8822b(struct phy_dm_struct *dm,
- u8 central_ch, u8 primary_ch_idx,
- enum odm_bw bandwidth)
-{
- /* Switch band */
- if (!config_phydm_switch_band_8822b(dm, central_ch))
- return false;
-
- /* Switch channel */
- if (!config_phydm_switch_channel_8822b(dm, central_ch))
- return false;
-
- /* Switch bandwidth */
- if (!config_phydm_switch_bandwidth_8822b(dm, primary_ch_idx, bandwidth))
- return false;
-
- return true;
-}
-
-bool config_phydm_trx_mode_8822b(struct phy_dm_struct *dm,
- enum odm_rf_path tx_path,
- enum odm_rf_path rx_path, bool is_tx2_path)
-{
- bool rf_reg_status = true;
- u8 IGI;
- u32 rf_reg33 = 0;
- u16 counter = 0;
-
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG, "%s()=====================>\n",
- __func__);
-
- if (dm->is_disable_phy_api) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): disable PHY API for debug!!\n", __func__);
- return true;
- }
-
- if ((tx_path & (~(ODM_RF_A | ODM_RF_B))) != 0) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Wrong TX setting (TX: 0x%x)\n", __func__,
- tx_path);
- return false;
- }
-
- if ((rx_path & (~(ODM_RF_A | ODM_RF_B))) != 0) {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Wrong RX setting (RX: 0x%x)\n", __func__,
- rx_path);
- return false;
- }
-
- /* RF mode of path-A and path-B */
- /* Cannot shut down path-A, beacause synthesizer will be shut down when
- * path-A is in shut down mode
- */
- if ((tx_path | rx_path) & ODM_RF_A)
- odm_set_bb_reg(dm, 0xc08, MASKLWORD, 0x3231);
- else
- odm_set_bb_reg(dm, 0xc08, MASKLWORD, 0x1111);
-
- if ((tx_path | rx_path) & ODM_RF_B)
- odm_set_bb_reg(dm, 0xe08, MASKLWORD, 0x3231);
- else
- odm_set_bb_reg(dm, 0xe08, MASKLWORD, 0x1111);
-
- /* Set TX antenna by Nsts */
- odm_set_bb_reg(dm, 0x93c, (BIT(19) | BIT(18)), 0x3);
- odm_set_bb_reg(dm, 0x80c, (BIT(29) | BIT(28)), 0x1);
-
- /* Control CCK TX path by 0xa07[7] */
- odm_set_bb_reg(dm, 0x80c, BIT(30), 0x1);
-
- /* TX logic map and TX path en for Nsts = 1, and CCK TX path*/
- if (tx_path & ODM_RF_A) {
- odm_set_bb_reg(dm, 0x93c, 0xfff00000, 0x001);
- odm_set_bb_reg(dm, 0xa04, 0xf0000000, 0x8);
- } else if (tx_path & ODM_RF_B) {
- odm_set_bb_reg(dm, 0x93c, 0xfff00000, 0x002);
- odm_set_bb_reg(dm, 0xa04, 0xf0000000, 0x4);
- }
-
- /* TX logic map and TX path en for Nsts = 2*/
- if ((tx_path == ODM_RF_A) || (tx_path == ODM_RF_B))
- odm_set_bb_reg(dm, 0x940, 0xfff0, 0x01);
- else
- odm_set_bb_reg(dm, 0x940, 0xfff0, 0x43);
-
- /* TX path enable */
- odm_set_bb_reg(dm, 0x80c, MASKBYTE0, ((tx_path << 4) | tx_path));
-
- /* Tx2path for 1ss */
- if (!((tx_path == ODM_RF_A) || (tx_path == ODM_RF_B))) {
- if (is_tx2_path || dm->mp_mode) {
- /* 2Tx for OFDM */
- odm_set_bb_reg(dm, 0x93c, 0xfff00000, 0x043);
-
- /* 2Tx for CCK */
- odm_set_bb_reg(dm, 0xa04, 0xf0000000, 0xc);
- }
- }
-
- /* Always disable MRC for CCK CCA */
- odm_set_bb_reg(dm, 0xa2c, BIT(22), 0x0);
-
- /* Always disable MRC for CCK barker */
- odm_set_bb_reg(dm, 0xa2c, BIT(18), 0x0);
-
- /* CCK RX 1st and 2nd path setting*/
- if (rx_path & ODM_RF_A)
- odm_set_bb_reg(dm, 0xa04, 0x0f000000, 0x0);
- else if (rx_path & ODM_RF_B)
- odm_set_bb_reg(dm, 0xa04, 0x0f000000, 0x5);
-
- /* RX path enable */
- odm_set_bb_reg(dm, 0x808, MASKBYTE0, ((rx_path << 4) | rx_path));
-
- if ((rx_path == ODM_RF_A) || (rx_path == ODM_RF_B)) {
- /* 1R */
-
- /* Disable MRC for CCA */
- /* odm_set_bb_reg(dm, 0xa2c, BIT22, 0x0); */
-
- /* Disable MRC for barker */
- /* odm_set_bb_reg(dm, 0xa2c, BIT18, 0x0); */
-
- /* Disable CCK antenna diversity */
- /* odm_set_bb_reg(dm, 0xa00, BIT15, 0x0); */
-
- /* Disable Antenna weighting */
- odm_set_bb_reg(dm, 0x1904, BIT(16), 0x0);
- odm_set_bb_reg(dm, 0x800, BIT(28), 0x0);
- odm_set_bb_reg(dm, 0x850, BIT(23), 0x0);
- } else {
- /* 2R */
-
- /* Enable MRC for CCA */
- /* odm_set_bb_reg(dm, 0xa2c, BIT22, 0x1); */
-
- /* Enable MRC for barker */
- /* odm_set_bb_reg(dm, 0xa2c, BIT18, 0x1); */
-
- /* Disable CCK antenna diversity */
- /* odm_set_bb_reg(dm, 0xa00, BIT15, 0x0); */
-
- /* Enable Antenna weighting */
- odm_set_bb_reg(dm, 0x1904, BIT(16), 0x1);
- odm_set_bb_reg(dm, 0x800, BIT(28), 0x1);
- odm_set_bb_reg(dm, 0x850, BIT(23), 0x1);
- }
-
- /* Update TXRX antenna status for PHYDM */
- dm->tx_ant_status = (tx_path & 0x3);
- dm->rx_ant_status = (rx_path & 0x3);
-
- /* MP driver need to support path-B TX\RX */
-
- while (1) {
- counter++;
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xef,
- RFREGOFFSETMASK, 0x80000);
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x33,
- RFREGOFFSETMASK, 0x00001);
-
- ODM_delay_us(2);
- rf_reg33 = config_phydm_read_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x33, RFREGOFFSETMASK);
-
- if ((rf_reg33 == 0x00001) &&
- (config_phydm_read_rf_check_8822b(rf_reg33)))
- break;
- else if (counter == 100) {
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to set TRx mode setting, because writing RF mode table is fail\n",
- __func__);
- return false;
- }
- }
-
- if ((dm->mp_mode) || *dm->antenna_test || (dm->normal_rx_path)) {
- /* 0xef 0x80000 0x33 0x00001 0x3e 0x00034 0x3f 0x4080e
- * 0xef 0x00000 suggested by Lucas
- */
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xef,
- RFREGOFFSETMASK, 0x80000);
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x33,
- RFREGOFFSETMASK, 0x00001);
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x3e,
- RFREGOFFSETMASK, 0x00034);
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x3f,
- RFREGOFFSETMASK, 0x4080e);
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xef,
- RFREGOFFSETMASK, 0x00000);
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): MP mode or Antenna test mode!! support path-B TX and RX\n",
- __func__);
- } else {
- /* 0xef 0x80000 0x33 0x00001 0x3e 0x00034 0x3f 0x4080c
- * 0xef 0x00000
- */
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xef,
- RFREGOFFSETMASK, 0x80000);
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x33,
- RFREGOFFSETMASK, 0x00001);
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x3e,
- RFREGOFFSETMASK, 0x00034);
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0x3f,
- RFREGOFFSETMASK, 0x4080c);
- rf_reg_status =
- rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xef,
- RFREGOFFSETMASK, 0x00000);
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Normal mode!! Do not support path-B TX and RX\n",
- __func__);
- }
-
- rf_reg_status = rf_reg_status & config_phydm_write_rf_reg_8822b(
- dm, ODM_RF_PATH_A, 0xef,
- RFREGOFFSETMASK, 0x00000);
-
- if (!rf_reg_status) {
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Fail to set TRx mode setting (TX: 0x%x, RX: 0x%x), because writing RF register is fail\n",
- __func__, tx_path, rx_path);
- return false;
- }
-
- /* Toggle IGI to let RF enter RX mode,
- * because BB doesn't send 3-wire command when RX path is enable
- */
- IGI = (u8)odm_get_bb_reg(dm, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm));
- odm_write_dig(dm, IGI - 2);
- odm_write_dig(dm, IGI);
-
- /* Modify CCA parameters */
- phydm_ccapar_by_rxpath_8822b(dm);
- phydm_ccapar_by_rfe_8822b(dm);
- phydm_rfe_8822b(dm, central_ch_8822b);
-
- ODM_RT_TRACE(
- dm, ODM_PHY_CONFIG,
- "%s(): Success to set TRx mode setting (TX: 0x%x, RX: 0x%x)\n",
- __func__, tx_path, rx_path);
- return true;
-}
-
-bool config_phydm_parameter_init(struct phy_dm_struct *dm,
- enum odm_parameter_init type)
-{
- if (type == ODM_PRE_SETTING) {
- odm_set_bb_reg(dm, 0x808, (BIT(28) | BIT(29)), 0x0);
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Pre setting: disable OFDM and CCK block\n",
- __func__);
- } else if (type == ODM_POST_SETTING) {
- odm_set_bb_reg(dm, 0x808, (BIT(28) | BIT(29)), 0x3);
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG,
- "%s(): Post setting: enable OFDM and CCK block\n",
- __func__);
- reg82c_8822b = odm_get_bb_reg(dm, 0x82c, MASKDWORD);
- reg838_8822b = odm_get_bb_reg(dm, 0x838, MASKDWORD);
- reg830_8822b = odm_get_bb_reg(dm, 0x830, MASKDWORD);
- reg83c_8822b = odm_get_bb_reg(dm, 0x83c, MASKDWORD);
- rega20_8822b = odm_get_bb_reg(dm, 0xa20, MASKDWORD);
- rega24_8822b = odm_get_bb_reg(dm, 0xa24, MASKDWORD);
- rega28_8822b = odm_get_bb_reg(dm, 0xa28, MASKDWORD);
- } else {
- ODM_RT_TRACE(dm, ODM_PHY_CONFIG, "%s(): Wrong type!!\n",
- __func__);
- return false;
- }
-
- return true;
-}
-
-/* ======================================================================== */
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h
deleted file mode 100644
index 5c5370af6ece..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __INC_PHYDM_API_H_8822B__
-#define __INC_PHYDM_API_H_8822B__
-
-/*2016.08.01 (HW user guide version: R27, SW user guide version: R05,
- * Modification: R31)
- */
-#define PHY_CONFIG_VERSION_8822B "27.5.31"
-
-#define INVALID_RF_DATA 0xffffffff
-#define INVALID_TXAGC_DATA 0xff
-
-#define config_phydm_read_rf_check_8822b(data) (data != INVALID_RF_DATA)
-#define config_phydm_read_txagc_check_8822b(data) (data != INVALID_TXAGC_DATA)
-
-u32 config_phydm_read_rf_reg_8822b(struct phy_dm_struct *dm,
- enum odm_rf_radio_path rf_path, u32 reg_addr,
- u32 bit_mask);
-
-bool config_phydm_write_rf_reg_8822b(struct phy_dm_struct *dm,
- enum odm_rf_radio_path rf_path,
- u32 reg_addr, u32 bit_mask, u32 data);
-
-bool config_phydm_write_txagc_8822b(struct phy_dm_struct *dm, u32 power_index,
- enum odm_rf_radio_path path, u8 hw_rate);
-
-u8 config_phydm_read_txagc_8822b(struct phy_dm_struct *dm,
- enum odm_rf_radio_path path, u8 hw_rate);
-
-bool config_phydm_switch_band_8822b(struct phy_dm_struct *dm, u8 central_ch);
-
-bool config_phydm_switch_channel_8822b(struct phy_dm_struct *dm, u8 central_ch);
-
-bool config_phydm_switch_bandwidth_8822b(struct phy_dm_struct *dm,
- u8 primary_ch_idx,
- enum odm_bw bandwidth);
-
-bool config_phydm_switch_channel_bw_8822b(struct phy_dm_struct *dm,
- u8 central_ch, u8 primary_ch_idx,
- enum odm_bw bandwidth);
-
-bool config_phydm_trx_mode_8822b(struct phy_dm_struct *dm,
- enum odm_rf_path tx_path,
- enum odm_rf_path rx_path, bool is_tx2_path);
-
-bool config_phydm_parameter_init(struct phy_dm_struct *dm,
- enum odm_parameter_init type);
-
-/* ======================================================================== */
-/* These following functions can be used for PHY DM only*/
-
-bool phydm_write_txagc_1byte_8822b(struct phy_dm_struct *dm, u32 power_index,
- enum odm_rf_radio_path path, u8 hw_rate);
-
-void phydm_init_hw_info_by_rfe_type_8822b(struct phy_dm_struct *dm);
-
-s32 phydm_get_condition_number_8822B(struct phy_dm_struct *dm);
-
-/* ======================================================================== */
-
-#endif /* __INC_PHYDM_API_H_8822B__ */
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c
deleted file mode 100644
index 3ce49322b686..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c
+++ /dev/null
@@ -1,1399 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../mp_precomp.h"
-#include "../phydm_precomp.h"
-
-/*---------------------------Define Local Constant---------------------------*/
-
-static bool _iqk_rx_iqk_by_path_8822b(void *, u8);
-
-static inline void phydm_set_iqk_info(struct phy_dm_struct *dm,
- struct dm_iqk_info *iqk_info, u8 status)
-{
- bool KFAIL = true;
-
- while (1) {
- KFAIL = _iqk_rx_iqk_by_path_8822b(dm, ODM_RF_PATH_A);
- if (status == 0)
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S0RXK KFail = 0x%x\n", KFAIL);
- else if (status == 1)
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S1RXK KFail = 0x%x\n", KFAIL);
- if (iqk_info->rxiqk_step == 5) {
- dm->rf_calibrate_info.iqk_step++;
- iqk_info->rxiqk_step = 1;
- if (KFAIL && status == 0)
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S0RXK fail code: %d!!!\n",
- iqk_info->rxiqk_fail_code
- [0][ODM_RF_PATH_A]);
- else if (KFAIL && status == 1)
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S1RXK fail code: %d!!!\n",
- iqk_info->rxiqk_fail_code
- [0][ODM_RF_PATH_A]);
- break;
- }
- }
-
- iqk_info->kcount++;
-}
-
-static inline void phydm_init_iqk_information(struct dm_iqk_info *iqk_info)
-{
- u8 i, j, k, m;
-
- for (i = 0; i < 2; i++) {
- iqk_info->iqk_channel[i] = 0x0;
-
- for (j = 0; j < SS_8822B; j++) {
- iqk_info->lok_idac[i][j] = 0x0;
- iqk_info->rxiqk_agc[i][j] = 0x0;
- iqk_info->bypass_iqk[i][j] = 0x0;
-
- for (k = 0; k < 2; k++) {
- iqk_info->iqk_fail_report[i][j][k] = true;
- for (m = 0; m < 8; m++) {
- iqk_info->iqk_cfir_real[i][j][k][m] =
- 0x0;
- iqk_info->iqk_cfir_imag[i][j][k][m] =
- 0x0;
- }
- }
-
- for (k = 0; k < 3; k++)
- iqk_info->retry_count[i][j][k] = 0x0;
- }
- }
-}
-
-static inline void phydm_backup_iqk_information(struct dm_iqk_info *iqk_info)
-{
- u8 i, j, k;
-
- iqk_info->iqk_channel[1] = iqk_info->iqk_channel[0];
- for (i = 0; i < 2; i++) {
- iqk_info->lok_idac[1][i] = iqk_info->lok_idac[0][i];
- iqk_info->rxiqk_agc[1][i] = iqk_info->rxiqk_agc[0][i];
- iqk_info->bypass_iqk[1][i] = iqk_info->bypass_iqk[0][i];
- iqk_info->rxiqk_fail_code[1][i] =
- iqk_info->rxiqk_fail_code[0][i];
- for (j = 0; j < 2; j++) {
- iqk_info->iqk_fail_report[1][i][j] =
- iqk_info->iqk_fail_report[0][i][j];
- for (k = 0; k < 8; k++) {
- iqk_info->iqk_cfir_real[1][i][j][k] =
- iqk_info->iqk_cfir_real[0][i][j][k];
- iqk_info->iqk_cfir_imag[1][i][j][k] =
- iqk_info->iqk_cfir_imag[0][i][j][k];
- }
- }
- }
-
- for (i = 0; i < 4; i++) {
- iqk_info->rxiqk_fail_code[0][i] = 0x0;
- iqk_info->rxiqk_agc[0][i] = 0x0;
- for (j = 0; j < 2; j++) {
- iqk_info->iqk_fail_report[0][i][j] = true;
- iqk_info->gs_retry_count[0][i][j] = 0x0;
- }
- for (j = 0; j < 3; j++)
- iqk_info->retry_count[0][i][j] = 0x0;
- }
-}
-
-static inline void phydm_set_iqk_cfir(struct phy_dm_struct *dm,
- struct dm_iqk_info *iqk_info, u8 path)
-{
- u8 idx, i;
- u32 tmp;
-
- for (idx = 0; idx < 2; idx++) {
- odm_set_bb_reg(dm, 0x1b00, MASKDWORD, 0xf8000008 | path << 1);
-
- if (idx == 0)
- odm_set_bb_reg(dm, 0x1b0c, BIT(13) | BIT(12), 0x3);
- else
- odm_set_bb_reg(dm, 0x1b0c, BIT(13) | BIT(12), 0x1);
-
- odm_set_bb_reg(dm, 0x1bd4,
- BIT(20) | BIT(19) | BIT(18) | BIT(17) | BIT(16),
- 0x10);
-
- for (i = 0; i < 8; i++) {
- odm_set_bb_reg(dm, 0x1bd8, MASKDWORD,
- 0xe0000001 + (i * 4));
- tmp = odm_get_bb_reg(dm, 0x1bfc, MASKDWORD);
- iqk_info->iqk_cfir_real[0][path][idx][i] =
- (tmp & 0x0fff0000) >> 16;
- iqk_info->iqk_cfir_imag[0][path][idx][i] = tmp & 0xfff;
- }
- }
-}
-
-static inline void phydm_get_read_counter(struct phy_dm_struct *dm)
-{
- u32 counter = 0x0;
-
- while (1) {
- if (((odm_read_4byte(dm, 0x1bf0) >> 24) == 0x7f) ||
- (counter > 300))
- break;
-
- counter++;
- ODM_delay_ms(1);
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION, "[IQK]counter = %d\n", counter);
-}
-
-/*---------------------------Define Local Constant---------------------------*/
-
-void do_iqk_8822b(void *dm_void, u8 delta_thermal_index, u8 thermal_value,
- u8 threshold)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- odm_reset_iqk_result(dm);
-
- dm->rf_calibrate_info.thermal_value_iqk = thermal_value;
-
- phy_iq_calibrate_8822b(dm, true);
-}
-
-static void _iqk_fill_iqk_report_8822b(void *dm_void, u8 channel)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- u32 tmp1 = 0x0, tmp2 = 0x0, tmp3 = 0x0;
- u8 i;
-
- for (i = 0; i < SS_8822B; i++) {
- tmp1 = tmp1 +
- ((iqk_info->iqk_fail_report[channel][i][TX_IQK] & 0x1)
- << i);
- tmp2 = tmp2 +
- ((iqk_info->iqk_fail_report[channel][i][RX_IQK] & 0x1)
- << (i + 4));
- tmp3 = tmp3 + ((iqk_info->rxiqk_fail_code[channel][i] & 0x3)
- << (i * 2 + 8));
- }
- odm_write_4byte(dm, 0x1b00, 0xf8000008);
- odm_set_bb_reg(dm, 0x1bf0, 0x0000ffff, tmp1 | tmp2 | tmp3);
-
- for (i = 0; i < 2; i++)
- odm_write_4byte(
- dm, 0x1be8 + (i * 4),
- (iqk_info->rxiqk_agc[channel][(i * 2) + 1] << 16) |
- iqk_info->rxiqk_agc[channel][i * 2]);
-}
-
-static void _iqk_backup_mac_bb_8822b(struct phy_dm_struct *dm, u32 *MAC_backup,
- u32 *BB_backup, u32 *backup_mac_reg,
- u32 *backup_bb_reg)
-{
- u32 i;
-
- for (i = 0; i < MAC_REG_NUM_8822B; i++)
- MAC_backup[i] = odm_read_4byte(dm, backup_mac_reg[i]);
-
- for (i = 0; i < BB_REG_NUM_8822B; i++)
- BB_backup[i] = odm_read_4byte(dm, backup_bb_reg[i]);
-}
-
-static void _iqk_backup_rf_8822b(struct phy_dm_struct *dm, u32 RF_backup[][2],
- u32 *backup_rf_reg)
-{
- u32 i;
-
- for (i = 0; i < RF_REG_NUM_8822B; i++) {
- RF_backup[i][ODM_RF_PATH_A] = odm_get_rf_reg(
- dm, ODM_RF_PATH_A, backup_rf_reg[i], RFREGOFFSETMASK);
- RF_backup[i][ODM_RF_PATH_B] = odm_get_rf_reg(
- dm, ODM_RF_PATH_B, backup_rf_reg[i], RFREGOFFSETMASK);
- }
-}
-
-static void _iqk_agc_bnd_int_8822b(struct phy_dm_struct *dm)
-{
- /*initialize RX AGC bnd, it must do after bbreset*/
- odm_write_4byte(dm, 0x1b00, 0xf8000008);
- odm_write_4byte(dm, 0x1b00, 0xf80a7008);
- odm_write_4byte(dm, 0x1b00, 0xf8015008);
- odm_write_4byte(dm, 0x1b00, 0xf8000008);
-}
-
-static void _iqk_bb_reset_8822b(struct phy_dm_struct *dm)
-{
- bool cca_ing = false;
- u32 count = 0;
-
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x0, RFREGOFFSETMASK, 0x10000);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x0, RFREGOFFSETMASK, 0x10000);
-
- while (1) {
- odm_write_4byte(dm, 0x8fc, 0x0);
- odm_set_bb_reg(dm, 0x198c, 0x7, 0x7);
- cca_ing = (bool)odm_get_bb_reg(dm, 0xfa0, BIT(3));
-
- if (count > 30)
- cca_ing = false;
-
- if (cca_ing) {
- ODM_delay_ms(1);
- count++;
- } else {
- odm_write_1byte(dm, 0x808, 0x0); /*RX ant off*/
- odm_set_bb_reg(dm, 0xa04,
- BIT(27) | BIT(26) | BIT(25) | BIT(24),
- 0x0); /*CCK RX path off*/
-
- /*BBreset*/
- odm_set_bb_reg(dm, 0x0, BIT(16), 0x0);
- odm_set_bb_reg(dm, 0x0, BIT(16), 0x1);
-
- if (odm_get_bb_reg(dm, 0x660, BIT(16)))
- odm_write_4byte(dm, 0x6b4, 0x89000006);
- break;
- }
- }
-}
-
-static void _iqk_afe_setting_8822b(struct phy_dm_struct *dm, bool do_iqk)
-{
- if (do_iqk) {
- odm_write_4byte(dm, 0xc60, 0x50000000);
- odm_write_4byte(dm, 0xc60, 0x70070040);
- odm_write_4byte(dm, 0xe60, 0x50000000);
- odm_write_4byte(dm, 0xe60, 0x70070040);
-
- odm_write_4byte(dm, 0xc58, 0xd8000402);
- odm_write_4byte(dm, 0xc5c, 0xd1000120);
- odm_write_4byte(dm, 0xc6c, 0x00000a15);
- odm_write_4byte(dm, 0xe58, 0xd8000402);
- odm_write_4byte(dm, 0xe5c, 0xd1000120);
- odm_write_4byte(dm, 0xe6c, 0x00000a15);
- _iqk_bb_reset_8822b(dm);
- } else {
- odm_write_4byte(dm, 0xc60, 0x50000000);
- odm_write_4byte(dm, 0xc60, 0x70038040);
- odm_write_4byte(dm, 0xe60, 0x50000000);
- odm_write_4byte(dm, 0xe60, 0x70038040);
-
- odm_write_4byte(dm, 0xc58, 0xd8020402);
- odm_write_4byte(dm, 0xc5c, 0xde000120);
- odm_write_4byte(dm, 0xc6c, 0x0000122a);
- odm_write_4byte(dm, 0xe58, 0xd8020402);
- odm_write_4byte(dm, 0xe5c, 0xde000120);
- odm_write_4byte(dm, 0xe6c, 0x0000122a);
- }
-}
-
-static void _iqk_restore_mac_bb_8822b(struct phy_dm_struct *dm, u32 *MAC_backup,
- u32 *BB_backup, u32 *backup_mac_reg,
- u32 *backup_bb_reg)
-{
- u32 i;
-
- for (i = 0; i < MAC_REG_NUM_8822B; i++)
- odm_write_4byte(dm, backup_mac_reg[i], MAC_backup[i]);
- for (i = 0; i < BB_REG_NUM_8822B; i++)
- odm_write_4byte(dm, backup_bb_reg[i], BB_backup[i]);
-}
-
-static void _iqk_restore_rf_8822b(struct phy_dm_struct *dm, u32 *backup_rf_reg,
- u32 RF_backup[][2])
-{
- u32 i;
-
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xef, RFREGOFFSETMASK, 0x0);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xef, RFREGOFFSETMASK, 0x0);
- /*0xdf[4]=0*/
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0xdf, RFREGOFFSETMASK,
- RF_backup[0][ODM_RF_PATH_A] & (~BIT(4)));
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0xdf, RFREGOFFSETMASK,
- RF_backup[0][ODM_RF_PATH_B] & (~BIT(4)));
-
- for (i = 1; i < RF_REG_NUM_8822B; i++) {
- odm_set_rf_reg(dm, ODM_RF_PATH_A, backup_rf_reg[i],
- RFREGOFFSETMASK, RF_backup[i][ODM_RF_PATH_A]);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, backup_rf_reg[i],
- RFREGOFFSETMASK, RF_backup[i][ODM_RF_PATH_B]);
- }
-}
-
-static void _iqk_backup_iqk_8822b(struct phy_dm_struct *dm, u8 step)
-{
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- u8 path;
- u16 iqk_apply[2] = {0xc94, 0xe94};
-
- if (step == 0x0) {
- phydm_backup_iqk_information(iqk_info);
- } else {
- iqk_info->iqk_channel[0] = iqk_info->rf_reg18;
- for (path = 0; path < 2; path++) {
- iqk_info->lok_idac[0][path] =
- odm_get_rf_reg(dm, (enum odm_rf_radio_path)path,
- 0x58, RFREGOFFSETMASK);
- iqk_info->bypass_iqk[0][path] =
- odm_get_bb_reg(dm, iqk_apply[path], MASKDWORD);
-
- phydm_set_iqk_cfir(dm, iqk_info, path);
- odm_set_bb_reg(dm, 0x1bd8, MASKDWORD, 0x0);
- odm_set_bb_reg(dm, 0x1b0c, BIT(13) | BIT(12), 0x0);
- }
- }
-}
-
-static void _iqk_reload_iqk_setting_8822b(
- struct phy_dm_struct *dm, u8 channel,
- u8 reload_idx /*1: reload TX, 2: reload LO, TX, RX*/
- )
-{
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- u8 i, path, idx;
- u16 iqk_apply[2] = {0xc94, 0xe94};
-
- for (path = 0; path < 2; path++) {
- if (reload_idx == 2) {
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0xdf,
- BIT(4), 0x1);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x58,
- RFREGOFFSETMASK,
- iqk_info->lok_idac[channel][path]);
- }
-
- for (idx = 0; idx < reload_idx; idx++) {
- odm_set_bb_reg(dm, 0x1b00, MASKDWORD,
- 0xf8000008 | path << 1);
- odm_set_bb_reg(dm, 0x1b2c, MASKDWORD, 0x7);
- odm_set_bb_reg(dm, 0x1b38, MASKDWORD, 0x20000000);
- odm_set_bb_reg(dm, 0x1b3c, MASKDWORD, 0x20000000);
- odm_set_bb_reg(dm, 0x1bcc, MASKDWORD, 0x00000000);
-
- if (idx == 0)
- odm_set_bb_reg(dm, 0x1b0c, BIT(13) | BIT(12),
- 0x3);
- else
- odm_set_bb_reg(dm, 0x1b0c, BIT(13) | BIT(12),
- 0x1);
-
- odm_set_bb_reg(dm, 0x1bd4, BIT(20) | BIT(19) | BIT(18) |
- BIT(17) | BIT(16),
- 0x10);
-
- for (i = 0; i < 8; i++) {
- odm_write_4byte(
- dm, 0x1bd8,
- ((0xc0000000 >> idx) + 0x3) + (i * 4) +
- (iqk_info->iqk_cfir_real
- [channel][path][idx][i]
- << 9));
- odm_write_4byte(
- dm, 0x1bd8,
- ((0xc0000000 >> idx) + 0x1) + (i * 4) +
- (iqk_info->iqk_cfir_imag
- [channel][path][idx][i]
- << 9));
- }
- }
- odm_set_bb_reg(dm, iqk_apply[path], MASKDWORD,
- iqk_info->bypass_iqk[channel][path]);
-
- odm_set_bb_reg(dm, 0x1bd8, MASKDWORD, 0x0);
- odm_set_bb_reg(dm, 0x1b0c, BIT(13) | BIT(12), 0x0);
- }
-}
-
-static bool _iqk_reload_iqk_8822b(struct phy_dm_struct *dm, bool reset)
-{
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- u8 i;
- bool reload = false;
-
- if (reset) {
- for (i = 0; i < 2; i++)
- iqk_info->iqk_channel[i] = 0x0;
- } else {
- iqk_info->rf_reg18 = odm_get_rf_reg(dm, ODM_RF_PATH_A, 0x18,
- RFREGOFFSETMASK);
-
- for (i = 0; i < 2; i++) {
- if (iqk_info->rf_reg18 == iqk_info->iqk_channel[i]) {
- _iqk_reload_iqk_setting_8822b(dm, i, 2);
- _iqk_fill_iqk_report_8822b(dm, i);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]reload IQK result before!!!!\n");
- reload = true;
- }
- }
- }
- return reload;
-}
-
-static void _iqk_rfe_setting_8822b(struct phy_dm_struct *dm, bool ext_pa_on)
-{
- if (ext_pa_on) {
- /*RFE setting*/
- odm_write_4byte(dm, 0xcb0, 0x77777777);
- odm_write_4byte(dm, 0xcb4, 0x00007777);
- odm_write_4byte(dm, 0xcbc, 0x0000083B);
- odm_write_4byte(dm, 0xeb0, 0x77777777);
- odm_write_4byte(dm, 0xeb4, 0x00007777);
- odm_write_4byte(dm, 0xebc, 0x0000083B);
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]external PA on!!!!\n");
- } else {
- /*RFE setting*/
- odm_write_4byte(dm, 0xcb0, 0x77777777);
- odm_write_4byte(dm, 0xcb4, 0x00007777);
- odm_write_4byte(dm, 0xcbc, 0x00000100);
- odm_write_4byte(dm, 0xeb0, 0x77777777);
- odm_write_4byte(dm, 0xeb4, 0x00007777);
- odm_write_4byte(dm, 0xebc, 0x00000100);
- }
-}
-
-static void _iqk_rf_setting_8822b(struct phy_dm_struct *dm)
-{
- u8 path;
- u32 tmp;
-
- odm_write_4byte(dm, 0x1b00, 0xf8000008);
- odm_write_4byte(dm, 0x1bb8, 0x00000000);
-
- for (path = 0; path < 2; path++) {
- /*0xdf:B11 = 1,B4 = 0, B1 = 1*/
- tmp = odm_get_rf_reg(dm, (enum odm_rf_radio_path)path, 0xdf,
- RFREGOFFSETMASK);
- tmp = (tmp & (~BIT(4))) | BIT(1) | BIT(11);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0xdf,
- RFREGOFFSETMASK, tmp);
-
- /*release 0x56 TXBB*/
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x65,
- RFREGOFFSETMASK, 0x09000);
-
- if (*dm->band_type == ODM_BAND_5G) {
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0xef,
- BIT(19), 0x1);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x33,
- RFREGOFFSETMASK, 0x00026);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x3e,
- RFREGOFFSETMASK, 0x00037);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x3f,
- RFREGOFFSETMASK, 0xdefce);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0xef,
- BIT(19), 0x0);
- } else {
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0xef,
- BIT(19), 0x1);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x33,
- RFREGOFFSETMASK, 0x00026);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x3e,
- RFREGOFFSETMASK, 0x00037);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x3f,
- RFREGOFFSETMASK, 0x5efce);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0xef,
- BIT(19), 0x0);
- }
- }
-}
-
-static void _iqk_configure_macbb_8822b(struct phy_dm_struct *dm)
-{
- /*MACBB register setting*/
- odm_write_1byte(dm, 0x522, 0x7f);
- odm_set_bb_reg(dm, 0x550, BIT(11) | BIT(3), 0x0);
- odm_set_bb_reg(dm, 0x90c, BIT(15),
- 0x1); /*0x90c[15]=1: dac_buf reset selection*/
- odm_set_bb_reg(dm, 0x9a4, BIT(31),
- 0x0); /*0x9a4[31]=0: Select da clock*/
- /*0xc94[0]=1, 0xe94[0]=1: let tx through iqk*/
- odm_set_bb_reg(dm, 0xc94, BIT(0), 0x1);
- odm_set_bb_reg(dm, 0xe94, BIT(0), 0x1);
- /* 3-wire off*/
- odm_write_4byte(dm, 0xc00, 0x00000004);
- odm_write_4byte(dm, 0xe00, 0x00000004);
-}
-
-static void _iqk_lok_setting_8822b(struct phy_dm_struct *dm, u8 path)
-{
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
- odm_write_4byte(dm, 0x1bcc, 0x9);
- odm_write_1byte(dm, 0x1b23, 0x00);
-
- switch (*dm->band_type) {
- case ODM_BAND_2_4G:
- odm_write_1byte(dm, 0x1b2b, 0x00);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x56,
- RFREGOFFSETMASK, 0x50df2);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x8f,
- RFREGOFFSETMASK, 0xadc00);
- /* WE_LUT_TX_LOK*/
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0xef, BIT(4),
- 0x1);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x33,
- BIT(1) | BIT(0), 0x0);
- break;
- case ODM_BAND_5G:
- odm_write_1byte(dm, 0x1b2b, 0x80);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x56,
- RFREGOFFSETMASK, 0x5086c);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x8f,
- RFREGOFFSETMASK, 0xa9c00);
- /* WE_LUT_TX_LOK*/
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0xef, BIT(4),
- 0x1);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x33,
- BIT(1) | BIT(0), 0x1);
- break;
- }
-}
-
-static void _iqk_txk_setting_8822b(struct phy_dm_struct *dm, u8 path)
-{
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
- odm_write_4byte(dm, 0x1bcc, 0x9);
- odm_write_4byte(dm, 0x1b20, 0x01440008);
-
- if (path == 0x0)
- odm_write_4byte(dm, 0x1b00, 0xf800000a);
- else
- odm_write_4byte(dm, 0x1b00, 0xf8000008);
- odm_write_4byte(dm, 0x1bcc, 0x3f);
-
- switch (*dm->band_type) {
- case ODM_BAND_2_4G:
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x56,
- RFREGOFFSETMASK, 0x50df2);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x8f,
- RFREGOFFSETMASK, 0xadc00);
- odm_write_1byte(dm, 0x1b2b, 0x00);
- break;
- case ODM_BAND_5G:
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x56,
- RFREGOFFSETMASK, 0x500ef);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x8f,
- RFREGOFFSETMASK, 0xa9c00);
- odm_write_1byte(dm, 0x1b2b, 0x80);
- break;
- }
-}
-
-static void _iqk_rxk1_setting_8822b(struct phy_dm_struct *dm, u8 path)
-{
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
-
- switch (*dm->band_type) {
- case ODM_BAND_2_4G:
- odm_write_1byte(dm, 0x1bcc, 0x9);
- odm_write_1byte(dm, 0x1b2b, 0x00);
- odm_write_4byte(dm, 0x1b20, 0x01450008);
- odm_write_4byte(dm, 0x1b24, 0x01460c88);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x56,
- RFREGOFFSETMASK, 0x510e0);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x8f,
- RFREGOFFSETMASK, 0xacc00);
- break;
- case ODM_BAND_5G:
- odm_write_1byte(dm, 0x1bcc, 0x09);
- odm_write_1byte(dm, 0x1b2b, 0x80);
- odm_write_4byte(dm, 0x1b20, 0x00850008);
- odm_write_4byte(dm, 0x1b24, 0x00460048);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x56,
- RFREGOFFSETMASK, 0x510e0);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x8f,
- RFREGOFFSETMASK, 0xadc00);
- break;
- }
-}
-
-static void _iqk_rxk2_setting_8822b(struct phy_dm_struct *dm, u8 path,
- bool is_gs)
-{
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
-
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
-
- switch (*dm->band_type) {
- case ODM_BAND_2_4G:
- if (is_gs)
- iqk_info->tmp1bcc = 0x12;
- odm_write_1byte(dm, 0x1bcc, iqk_info->tmp1bcc);
- odm_write_1byte(dm, 0x1b2b, 0x00);
- odm_write_4byte(dm, 0x1b20, 0x01450008);
- odm_write_4byte(dm, 0x1b24, 0x01460848);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x56,
- RFREGOFFSETMASK, 0x510e0);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x8f,
- RFREGOFFSETMASK, 0xa9c00);
- break;
- case ODM_BAND_5G:
- if (is_gs) {
- if (path == ODM_RF_PATH_A)
- iqk_info->tmp1bcc = 0x12;
- else
- iqk_info->tmp1bcc = 0x09;
- }
- odm_write_1byte(dm, 0x1bcc, iqk_info->tmp1bcc);
- odm_write_1byte(dm, 0x1b2b, 0x80);
- odm_write_4byte(dm, 0x1b20, 0x00850008);
- odm_write_4byte(dm, 0x1b24, 0x00460848);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x56,
- RFREGOFFSETMASK, 0x51060);
- odm_set_rf_reg(dm, (enum odm_rf_radio_path)path, 0x8f,
- RFREGOFFSETMASK, 0xa9c00);
- break;
- }
-}
-
-static bool _iqk_check_cal_8822b(struct phy_dm_struct *dm, u32 IQK_CMD)
-{
- bool notready = true, fail = true;
- u32 delay_count = 0x0;
-
- while (notready) {
- if (odm_read_4byte(dm, 0x1b00) == (IQK_CMD & 0xffffff0f)) {
- fail = (bool)odm_get_bb_reg(dm, 0x1b08, BIT(26));
- notready = false;
- } else {
- ODM_delay_ms(1);
- delay_count++;
- }
-
- if (delay_count >= 50) {
- fail = true;
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]IQK timeout!!!\n");
- break;
- }
- }
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION, "[IQK]delay count = 0x%x!!!\n",
- delay_count);
- return fail;
-}
-
-static bool _iqk_rx_iqk_gain_search_fail_8822b(struct phy_dm_struct *dm,
- u8 path, u8 step)
-{
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- bool fail = true;
- u32 IQK_CMD = 0x0, rf_reg0, tmp, bb_idx;
- u8 IQMUX[4] = {0x9, 0x12, 0x1b, 0x24};
- u8 idx;
-
- for (idx = 0; idx < 4; idx++)
- if (iqk_info->tmp1bcc == IQMUX[idx])
- break;
-
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
- odm_write_4byte(dm, 0x1bcc, iqk_info->tmp1bcc);
-
- if (step == RXIQK1)
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]============ S%d RXIQK GainSearch ============\n",
- path);
-
- if (step == RXIQK1)
- IQK_CMD = 0xf8000208 | (1 << (path + 4));
- else
- IQK_CMD = 0xf8000308 | (1 << (path + 4));
-
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION, "[IQK]S%d GS%d_Trigger = 0x%x\n",
- path, step, IQK_CMD);
-
- odm_write_4byte(dm, 0x1b00, IQK_CMD);
- odm_write_4byte(dm, 0x1b00, IQK_CMD + 0x1);
- ODM_delay_ms(GS_delay_8822B);
- fail = _iqk_check_cal_8822b(dm, IQK_CMD);
-
- if (step == RXIQK2) {
- rf_reg0 = odm_get_rf_reg(dm, (enum odm_rf_radio_path)path, 0x0,
- RFREGOFFSETMASK);
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]S%d ==> RF0x0 = 0x%x, tmp1bcc = 0x%x, idx = %d, 0x1b3c = 0x%x\n",
- path, rf_reg0, iqk_info->tmp1bcc, idx,
- odm_read_4byte(dm, 0x1b3c));
- tmp = (rf_reg0 & 0x1fe0) >> 5;
- iqk_info->lna_idx = tmp >> 5;
- bb_idx = tmp & 0x1f;
- if (bb_idx == 0x1) {
- if (iqk_info->lna_idx != 0x0)
- iqk_info->lna_idx--;
- else if (idx != 3)
- idx++;
- else
- iqk_info->isbnd = true;
- fail = true;
- } else if (bb_idx == 0xa) {
- if (idx != 0)
- idx--;
- else if (iqk_info->lna_idx != 0x7)
- iqk_info->lna_idx++;
- else
- iqk_info->isbnd = true;
- fail = true;
- } else {
- fail = false;
- }
-
- if (iqk_info->isbnd)
- fail = false;
-
- iqk_info->tmp1bcc = IQMUX[idx];
-
- if (fail) {
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
- odm_write_4byte(
- dm, 0x1b24,
- (odm_read_4byte(dm, 0x1b24) & 0xffffe3ff) |
- (iqk_info->lna_idx << 10));
- }
- }
-
- return fail;
-}
-
-static bool _lok_one_shot_8822b(void *dm_void, u8 path)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- u8 delay_count = 0;
- bool LOK_notready = false;
- u32 LOK_temp = 0;
- u32 IQK_CMD = 0x0;
-
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]==========S%d LOK ==========\n", path);
-
- IQK_CMD = 0xf8000008 | (1 << (4 + path));
-
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION, "[IQK]LOK_Trigger = 0x%x\n",
- IQK_CMD);
-
- odm_write_4byte(dm, 0x1b00, IQK_CMD);
- odm_write_4byte(dm, 0x1b00, IQK_CMD + 1);
- /*LOK: CMD ID = 0 {0xf8000018, 0xf8000028}*/
- /*LOK: CMD ID = 0 {0xf8000019, 0xf8000029}*/
- ODM_delay_ms(LOK_delay_8822B);
-
- delay_count = 0;
- LOK_notready = true;
-
- while (LOK_notready) {
- if (odm_read_4byte(dm, 0x1b00) == (IQK_CMD & 0xffffff0f))
- LOK_notready = false;
- else
- LOK_notready = true;
-
- if (LOK_notready) {
- ODM_delay_ms(1);
- delay_count++;
- }
-
- if (delay_count >= 50) {
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S%d LOK timeout!!!\n", path);
- break;
- }
- }
-
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S%d ==> delay_count = 0x%x\n", path, delay_count);
- if (ODM_COMP_CALIBRATION) {
- if (!LOK_notready) {
- LOK_temp =
- odm_get_rf_reg(dm, (enum odm_rf_radio_path)path,
- 0x58, RFREGOFFSETMASK);
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]0x58 = 0x%x\n", LOK_temp);
- } else {
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]==>S%d LOK Fail!!!\n", path);
- }
- }
- iqk_info->lok_fail[path] = LOK_notready;
- return LOK_notready;
-}
-
-static bool _iqk_one_shot_8822b(void *dm_void, u8 path, u8 idx)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- u8 delay_count = 0;
- bool notready = true, fail = true;
- u32 IQK_CMD = 0x0;
- u16 iqk_apply[2] = {0xc94, 0xe94};
-
- if (idx == TXIQK)
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]============ S%d WBTXIQK ============\n",
- path);
- else if (idx == RXIQK1)
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]============ S%d WBRXIQK STEP1============\n",
- path);
- else
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]============ S%d WBRXIQK STEP2============\n",
- path);
-
- if (idx == TXIQK) {
- IQK_CMD = 0xf8000008 | ((*dm->band_width + 4) << 8) |
- (1 << (path + 4));
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]TXK_Trigger = 0x%x\n", IQK_CMD);
- /*{0xf8000418, 0xf800042a} ==> 20 WBTXK (CMD = 4)*/
- /*{0xf8000518, 0xf800052a} ==> 40 WBTXK (CMD = 5)*/
- /*{0xf8000618, 0xf800062a} ==> 80 WBTXK (CMD = 6)*/
- } else if (idx == RXIQK1) {
- if (*dm->band_width == 2)
- IQK_CMD = 0xf8000808 | (1 << (path + 4));
- else
- IQK_CMD = 0xf8000708 | (1 << (path + 4));
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]RXK1_Trigger = 0x%x\n", IQK_CMD);
- /*{0xf8000718, 0xf800072a} ==> 20 WBTXK (CMD = 7)*/
- /*{0xf8000718, 0xf800072a} ==> 40 WBTXK (CMD = 7)*/
- /*{0xf8000818, 0xf800082a} ==> 80 WBTXK (CMD = 8)*/
- } else if (idx == RXIQK2) {
- IQK_CMD = 0xf8000008 | ((*dm->band_width + 9) << 8) |
- (1 << (path + 4));
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]RXK2_Trigger = 0x%x\n", IQK_CMD);
- /*{0xf8000918, 0xf800092a} ==> 20 WBRXK (CMD = 9)*/
- /*{0xf8000a18, 0xf8000a2a} ==> 40 WBRXK (CMD = 10)*/
- /*{0xf8000b18, 0xf8000b2a} ==> 80 WBRXK (CMD = 11)*/
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
- odm_write_4byte(dm, 0x1b24,
- (odm_read_4byte(dm, 0x1b24) & 0xffffe3ff) |
- ((iqk_info->lna_idx & 0x7) << 10));
- }
- odm_write_4byte(dm, 0x1b00, IQK_CMD);
- odm_write_4byte(dm, 0x1b00, IQK_CMD + 0x1);
- ODM_delay_ms(WBIQK_delay_8822B);
-
- while (notready) {
- if (odm_read_4byte(dm, 0x1b00) == (IQK_CMD & 0xffffff0f))
- notready = false;
- else
- notready = true;
-
- if (notready) {
- ODM_delay_ms(1);
- delay_count++;
- } else {
- fail = (bool)odm_get_bb_reg(dm, 0x1b08, BIT(26));
- break;
- }
-
- if (delay_count >= 50) {
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S%d IQK timeout!!!\n", path);
- break;
- }
- }
-
- if (dm->debug_components & ODM_COMP_CALIBRATION) {
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S%d ==> 0x1b00 = 0x%x, 0x1b08 = 0x%x\n",
- path, odm_read_4byte(dm, 0x1b00),
- odm_read_4byte(dm, 0x1b08));
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S%d ==> delay_count = 0x%x\n", path,
- delay_count);
- if (idx != TXIQK)
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]S%d ==> RF0x0 = 0x%x, RF0x56 = 0x%x\n",
- path,
- odm_get_rf_reg(dm, (enum odm_rf_radio_path)path,
- 0x0, RFREGOFFSETMASK),
- odm_get_rf_reg(dm, (enum odm_rf_radio_path)path,
- 0x56, RFREGOFFSETMASK));
- }
-
- odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
-
- if (idx == TXIQK)
- if (fail)
- odm_set_bb_reg(dm, iqk_apply[path], BIT(0), 0x0);
-
- if (idx == RXIQK2) {
- iqk_info->rxiqk_agc[0][path] =
- (u16)(((odm_get_rf_reg(dm, (enum odm_rf_radio_path)path,
- 0x0, RFREGOFFSETMASK) >>
- 5) &
- 0xff) |
- (iqk_info->tmp1bcc << 8));
-
- odm_write_4byte(dm, 0x1b38, 0x20000000);
-
- if (!fail)
- odm_set_bb_reg(dm, iqk_apply[path], (BIT(11) | BIT(10)),
- 0x1);
- else
- odm_set_bb_reg(dm, iqk_apply[path], (BIT(11) | BIT(10)),
- 0x0);
- }
-
- if (idx == TXIQK)
- iqk_info->iqk_fail_report[0][path][TXIQK] = fail;
- else
- iqk_info->iqk_fail_report[0][path][RXIQK] = fail;
-
- return fail;
-}
-
-static bool _iqk_rx_iqk_by_path_8822b(void *dm_void, u8 path)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- bool KFAIL = true, gonext;
-
- switch (iqk_info->rxiqk_step) {
- case 1: /*gain search_RXK1*/
- _iqk_rxk1_setting_8822b(dm, path);
- gonext = false;
- while (1) {
- KFAIL = _iqk_rx_iqk_gain_search_fail_8822b(dm, path,
- RXIQK1);
- if (KFAIL &&
- (iqk_info->gs_retry_count[0][path][GSRXK1] < 2))
- iqk_info->gs_retry_count[0][path][GSRXK1]++;
- else if (KFAIL) {
- iqk_info->rxiqk_fail_code[0][path] = 0;
- iqk_info->rxiqk_step = 5;
- gonext = true;
- } else {
- iqk_info->rxiqk_step++;
- gonext = true;
- }
- if (gonext)
- break;
- }
- break;
- case 2: /*gain search_RXK2*/
- _iqk_rxk2_setting_8822b(dm, path, true);
- iqk_info->isbnd = false;
- while (1) {
- KFAIL = _iqk_rx_iqk_gain_search_fail_8822b(dm, path,
- RXIQK2);
- if (KFAIL &&
- (iqk_info->gs_retry_count[0][path][GSRXK2] <
- rxiqk_gs_limit)) {
- iqk_info->gs_retry_count[0][path][GSRXK2]++;
- } else {
- iqk_info->rxiqk_step++;
- break;
- }
- }
- break;
- case 3: /*RXK1*/
- _iqk_rxk1_setting_8822b(dm, path);
- gonext = false;
- while (1) {
- KFAIL = _iqk_one_shot_8822b(dm, path, RXIQK1);
- if (KFAIL &&
- (iqk_info->retry_count[0][path][RXIQK1] < 2))
- iqk_info->retry_count[0][path][RXIQK1]++;
- else if (KFAIL) {
- iqk_info->rxiqk_fail_code[0][path] = 1;
- iqk_info->rxiqk_step = 5;
- gonext = true;
- } else {
- iqk_info->rxiqk_step++;
- gonext = true;
- }
- if (gonext)
- break;
- }
- break;
- case 4: /*RXK2*/
- _iqk_rxk2_setting_8822b(dm, path, false);
- gonext = false;
- while (1) {
- KFAIL = _iqk_one_shot_8822b(dm, path, RXIQK2);
- if (KFAIL &&
- (iqk_info->retry_count[0][path][RXIQK2] < 2))
- iqk_info->retry_count[0][path][RXIQK2]++;
- else if (KFAIL) {
- iqk_info->rxiqk_fail_code[0][path] = 2;
- iqk_info->rxiqk_step = 5;
- gonext = true;
- } else {
- iqk_info->rxiqk_step++;
- gonext = true;
- }
- if (gonext)
- break;
- }
- break;
- }
- return KFAIL;
-}
-
-static void _iqk_iqk_by_path_8822b(void *dm_void, bool segment_iqk)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- bool KFAIL = true;
- u8 i, kcount_limit;
-
- if (*dm->band_width == 2)
- kcount_limit = kcount_limit_80m;
- else
- kcount_limit = kcount_limit_others;
-
- while (1) {
- switch (dm->rf_calibrate_info.iqk_step) {
- case 1: /*S0 LOK*/
- _iqk_lok_setting_8822b(dm, ODM_RF_PATH_A);
- _lok_one_shot_8822b(dm, ODM_RF_PATH_A);
- dm->rf_calibrate_info.iqk_step++;
- break;
- case 2: /*S1 LOK*/
- _iqk_lok_setting_8822b(dm, ODM_RF_PATH_B);
- _lok_one_shot_8822b(dm, ODM_RF_PATH_B);
- dm->rf_calibrate_info.iqk_step++;
- break;
- case 3: /*S0 TXIQK*/
- _iqk_txk_setting_8822b(dm, ODM_RF_PATH_A);
- KFAIL = _iqk_one_shot_8822b(dm, ODM_RF_PATH_A, TXIQK);
- iqk_info->kcount++;
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S0TXK KFail = 0x%x\n", KFAIL);
-
- if (KFAIL &&
- (iqk_info->retry_count[0][ODM_RF_PATH_A][TXIQK] <
- 3))
- iqk_info->retry_count[0][ODM_RF_PATH_A]
- [TXIQK]++;
- else
- dm->rf_calibrate_info.iqk_step++;
- break;
- case 4: /*S1 TXIQK*/
- _iqk_txk_setting_8822b(dm, ODM_RF_PATH_B);
- KFAIL = _iqk_one_shot_8822b(dm, ODM_RF_PATH_B, TXIQK);
- iqk_info->kcount++;
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]S1TXK KFail = 0x%x\n", KFAIL);
- if (KFAIL &&
- iqk_info->retry_count[0][ODM_RF_PATH_B][TXIQK] < 3)
- iqk_info->retry_count[0][ODM_RF_PATH_B]
- [TXIQK]++;
- else
- dm->rf_calibrate_info.iqk_step++;
- break;
- case 5: /*S0 RXIQK*/
- phydm_set_iqk_info(dm, iqk_info, 0);
- break;
- case 6: /*S1 RXIQK*/
- phydm_set_iqk_info(dm, iqk_info, 1);
- break;
- }
-
- if (dm->rf_calibrate_info.iqk_step == 7) {
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]==========LOK summary ==========\n");
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]PathA_LOK_notready = %d, PathB_LOK1_notready = %d\n",
- iqk_info->lok_fail[ODM_RF_PATH_A],
- iqk_info->lok_fail[ODM_RF_PATH_B]);
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]==========IQK summary ==========\n");
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]PathA_TXIQK_fail = %d, PathB_TXIQK_fail = %d\n",
- iqk_info->iqk_fail_report[0][ODM_RF_PATH_A]
- [TXIQK],
- iqk_info->iqk_fail_report[0][ODM_RF_PATH_B]
- [TXIQK]);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]PathA_RXIQK_fail = %d, PathB_RXIQK_fail = %d\n",
- iqk_info->iqk_fail_report[0][ODM_RF_PATH_A]
- [RXIQK],
- iqk_info->iqk_fail_report[0][ODM_RF_PATH_B]
- [RXIQK]);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]PathA_TXIQK_retry = %d, PathB_TXIQK_retry = %d\n",
- iqk_info->retry_count[0][ODM_RF_PATH_A][TXIQK],
- iqk_info->retry_count[0][ODM_RF_PATH_B][TXIQK]);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]PathA_RXK1_retry = %d, PathA_RXK2_retry = %d, PathB_RXK1_retry = %d, PathB_RXK2_retry = %d\n",
- iqk_info->retry_count[0][ODM_RF_PATH_A][RXIQK1],
- iqk_info->retry_count[0][ODM_RF_PATH_A][RXIQK2],
- iqk_info->retry_count[0][ODM_RF_PATH_B][RXIQK1],
- iqk_info->retry_count[0][ODM_RF_PATH_B]
- [RXIQK2]);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]PathA_GS1_retry = %d, PathA_GS2_retry = %d, PathB_GS1_retry = %d, PathB_GS2_retry = %d\n",
- iqk_info->gs_retry_count[0][ODM_RF_PATH_A]
- [GSRXK1],
- iqk_info->gs_retry_count[0][ODM_RF_PATH_A]
- [GSRXK2],
- iqk_info->gs_retry_count[0][ODM_RF_PATH_B]
- [GSRXK1],
- iqk_info->gs_retry_count[0][ODM_RF_PATH_B]
- [GSRXK2]);
- for (i = 0; i < 2; i++) {
- odm_write_4byte(dm, 0x1b00,
- 0xf8000008 | i << 1);
- odm_write_4byte(dm, 0x1b2c, 0x7);
- odm_write_4byte(dm, 0x1bcc, 0x0);
- }
- break;
- }
-
- if (segment_iqk && (iqk_info->kcount == kcount_limit))
- break;
- }
-}
-
-static void _iqk_start_iqk_8822b(struct phy_dm_struct *dm, bool segment_iqk)
-{
- u32 tmp;
-
- /*GNT_WL = 1*/
- tmp = odm_get_rf_reg(dm, ODM_RF_PATH_A, 0x1, RFREGOFFSETMASK);
- tmp = tmp | BIT(5) | BIT(0);
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x1, RFREGOFFSETMASK, tmp);
-
- tmp = odm_get_rf_reg(dm, ODM_RF_PATH_B, 0x1, RFREGOFFSETMASK);
- tmp = tmp | BIT(5) | BIT(0);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x1, RFREGOFFSETMASK, tmp);
-
- _iqk_iqk_by_path_8822b(dm, segment_iqk);
-}
-
-static void _iq_calibrate_8822b_init(void *dm_void)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
- u8 i, j;
-
- if (iqk_info->iqk_times == 0) {
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]=====>PHY_IQCalibrate_8822B_Init\n");
-
- for (i = 0; i < SS_8822B; i++) {
- for (j = 0; j < 2; j++) {
- iqk_info->lok_fail[i] = true;
- iqk_info->iqk_fail[j][i] = true;
- iqk_info->iqc_matrix[j][i] = 0x20000000;
- }
- }
-
- phydm_init_iqk_information(iqk_info);
- }
-}
-
-static void _phy_iq_calibrate_8822b(struct phy_dm_struct *dm, bool reset)
-{
- u32 MAC_backup[MAC_REG_NUM_8822B], BB_backup[BB_REG_NUM_8822B],
- RF_backup[RF_REG_NUM_8822B][SS_8822B];
- u32 backup_mac_reg[MAC_REG_NUM_8822B] = {0x520, 0x550};
- u32 backup_bb_reg[BB_REG_NUM_8822B] = {
- 0x808, 0x90c, 0xc00, 0xcb0, 0xcb4, 0xcbc, 0xe00,
- 0xeb0, 0xeb4, 0xebc, 0x1990, 0x9a4, 0xa04};
- u32 backup_rf_reg[RF_REG_NUM_8822B] = {0xdf, 0x8f, 0x65, 0x0, 0x1};
- bool segment_iqk = false, is_mp = false;
-
- struct dm_iqk_info *iqk_info = &dm->IQK_info;
-
- if (dm->mp_mode)
- is_mp = true;
- else if (dm->is_linked)
- segment_iqk = true;
-
- if (!is_mp)
- if (_iqk_reload_iqk_8822b(dm, reset))
- return;
-
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]==========IQK strat!!!!!==========\n");
-
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]band_type = %s, band_width = %d, ExtPA2G = %d, ext_pa_5g = %d\n",
- (*dm->band_type == ODM_BAND_5G) ? "5G" : "2G", *dm->band_width,
- dm->ext_pa, dm->ext_pa_5g);
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]Interface = %d, cut_version = %x\n",
- dm->support_interface, dm->cut_version);
-
- iqk_info->iqk_times++;
-
- iqk_info->kcount = 0;
- dm->rf_calibrate_info.iqk_total_progressing_time = 0;
- dm->rf_calibrate_info.iqk_step = 1;
- iqk_info->rxiqk_step = 1;
-
- _iqk_backup_iqk_8822b(dm, 0);
- _iqk_backup_mac_bb_8822b(dm, MAC_backup, BB_backup, backup_mac_reg,
- backup_bb_reg);
- _iqk_backup_rf_8822b(dm, RF_backup, backup_rf_reg);
-
- while (1) {
- if (!is_mp)
- dm->rf_calibrate_info.iqk_start_time =
- odm_get_current_time(dm);
-
- _iqk_configure_macbb_8822b(dm);
- _iqk_afe_setting_8822b(dm, true);
- _iqk_rfe_setting_8822b(dm, false);
- _iqk_agc_bnd_int_8822b(dm);
- _iqk_rf_setting_8822b(dm);
-
- _iqk_start_iqk_8822b(dm, segment_iqk);
-
- _iqk_afe_setting_8822b(dm, false);
- _iqk_restore_mac_bb_8822b(dm, MAC_backup, BB_backup,
- backup_mac_reg, backup_bb_reg);
- _iqk_restore_rf_8822b(dm, backup_rf_reg, RF_backup);
-
- if (!is_mp) {
- dm->rf_calibrate_info.iqk_progressing_time =
- odm_get_progressing_time(
- dm,
- dm->rf_calibrate_info.iqk_start_time);
- dm->rf_calibrate_info.iqk_total_progressing_time +=
- odm_get_progressing_time(
- dm,
- dm->rf_calibrate_info.iqk_start_time);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]IQK progressing_time = %lld ms\n",
- dm->rf_calibrate_info.iqk_progressing_time);
- }
-
- if (dm->rf_calibrate_info.iqk_step == 7)
- break;
-
- iqk_info->kcount = 0;
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION, "[IQK]delay 50ms!!!\n");
- ODM_delay_ms(50);
- }
-
- _iqk_backup_iqk_8822b(dm, 1);
- _iqk_fill_iqk_report_8822b(dm, 0);
-
- if (!is_mp)
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]Total IQK progressing_time = %lld ms\n",
- dm->rf_calibrate_info.iqk_total_progressing_time);
-
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]==========IQK end!!!!!==========\n");
-}
-
-static void _phy_iq_calibrate_by_fw_8822b(void *dm_void, u8 clear) {}
-
-/*IQK version:v3.3, NCTL v0.6*/
-/*1.The new gainsearch method for RXIQK*/
-/*2.The new format of IQK report register: 0x1be8/0x1bec*/
-/*3. add the option of segment IQK*/
-void phy_iq_calibrate_8822b(void *dm_void, bool clear)
-{
- struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
-
- dm->iqk_fw_offload = 0;
-
- /*FW IQK*/
- if (dm->iqk_fw_offload) {
- if (!dm->rf_calibrate_info.is_iqk_in_progress) {
- odm_acquire_spin_lock(dm, RT_IQK_SPINLOCK);
- dm->rf_calibrate_info.is_iqk_in_progress = true;
- odm_release_spin_lock(dm, RT_IQK_SPINLOCK);
-
- dm->rf_calibrate_info.iqk_start_time =
- odm_get_current_time(dm);
-
- odm_write_4byte(dm, 0x1b00, 0xf8000008);
- odm_set_bb_reg(dm, 0x1bf0, 0xff000000, 0xff);
- ODM_RT_TRACE(dm, ODM_COMP_CALIBRATION,
- "[IQK]0x1bf0 = 0x%x\n",
- odm_read_4byte(dm, 0x1bf0));
-
- _phy_iq_calibrate_by_fw_8822b(dm, clear);
- phydm_get_read_counter(dm);
-
- dm->rf_calibrate_info.iqk_progressing_time =
- odm_get_progressing_time(
- dm,
- dm->rf_calibrate_info.iqk_start_time);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]IQK progressing_time = %lld ms\n",
- dm->rf_calibrate_info.iqk_progressing_time);
-
- odm_acquire_spin_lock(dm, RT_IQK_SPINLOCK);
- dm->rf_calibrate_info.is_iqk_in_progress = false;
- odm_release_spin_lock(dm, RT_IQK_SPINLOCK);
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "== Return the IQK CMD, because the IQK in Progress ==\n");
- }
-
- } else {
- _iq_calibrate_8822b_init(dm_void);
-
- if (!dm->rf_calibrate_info.is_iqk_in_progress) {
- odm_acquire_spin_lock(dm, RT_IQK_SPINLOCK);
- dm->rf_calibrate_info.is_iqk_in_progress = true;
- odm_release_spin_lock(dm, RT_IQK_SPINLOCK);
- if (dm->mp_mode)
- dm->rf_calibrate_info.iqk_start_time =
- odm_get_current_time(dm);
-
- _phy_iq_calibrate_8822b(dm, clear);
- if (dm->mp_mode) {
- dm->rf_calibrate_info.iqk_progressing_time =
- odm_get_progressing_time(
- dm, dm->rf_calibrate_info
- .iqk_start_time);
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]IQK progressing_time = %lld ms\n",
- dm->rf_calibrate_info
- .iqk_progressing_time);
- }
- odm_acquire_spin_lock(dm, RT_IQK_SPINLOCK);
- dm->rf_calibrate_info.is_iqk_in_progress = false;
- odm_release_spin_lock(dm, RT_IQK_SPINLOCK);
- } else {
- ODM_RT_TRACE(
- dm, ODM_COMP_CALIBRATION,
- "[IQK]== Return the IQK CMD, because the IQK in Progress ==\n");
- }
- }
-}
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h
deleted file mode 100644
index 246518e8bf8f..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __PHYDM_IQK_8822B_H__
-#define __PHYDM_IQK_8822B_H__
-
-/*--------------------------Define Parameters-------------------------------*/
-#define MAC_REG_NUM_8822B 2
-#define BB_REG_NUM_8822B 13
-#define RF_REG_NUM_8822B 5
-
-#define LOK_delay_8822B 2
-#define GS_delay_8822B 2
-#define WBIQK_delay_8822B 2
-
-#define TXIQK 0
-#define RXIQK 1
-#define SS_8822B 2
-
-/*------------------------End Define Parameters-------------------------------*/
-
-void do_iqk_8822b(void *dm_void, u8 delta_thermal_index, u8 thermal_value,
- u8 threshold);
-
-void phy_iq_calibrate_8822b(void *dm_void, bool clear);
-
-#endif /* #ifndef __PHYDM_IQK_8822B_H__*/
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c
deleted file mode 100644
index 8f96c77974cc..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../mp_precomp.h"
-#include "../phydm_precomp.h"
-
-void odm_config_rf_reg_8822b(struct phy_dm_struct *dm, u32 addr, u32 data,
- enum odm_rf_radio_path RF_PATH, u32 reg_addr)
-{
- if (addr == 0xffe) {
- ODM_sleep_ms(50);
- } else if (addr == 0xfe) {
- ODM_delay_us(100);
- } else {
- odm_set_rf_reg(dm, RF_PATH, reg_addr, RFREGOFFSETMASK, data);
-
- /* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
- }
-}
-
-void odm_config_rf_radio_a_8822b(struct phy_dm_struct *dm, u32 addr, u32 data)
-{
- u32 content = 0x1000; /* RF_Content: radioa_txt */
- u32 maskfor_phy_set = (u32)(content & 0xE000);
-
- odm_config_rf_reg_8822b(dm, addr, data, ODM_RF_PATH_A,
- addr | maskfor_phy_set);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "===> odm_config_rf_with_header_file: [RadioA] %08X %08X\n",
- addr, data);
-}
-
-void odm_config_rf_radio_b_8822b(struct phy_dm_struct *dm, u32 addr, u32 data)
-{
- u32 content = 0x1001; /* RF_Content: radiob_txt */
- u32 maskfor_phy_set = (u32)(content & 0xE000);
-
- odm_config_rf_reg_8822b(dm, addr, data, ODM_RF_PATH_B,
- addr | maskfor_phy_set);
-
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "===> odm_config_rf_with_header_file: [RadioB] %08X %08X\n",
- addr, data);
-}
-
-void odm_config_mac_8822b(struct phy_dm_struct *dm, u32 addr, u8 data)
-{
- odm_write_1byte(dm, addr, data);
- ODM_RT_TRACE(
- dm, ODM_COMP_INIT,
- "===> odm_config_mac_with_header_file: [MAC_REG] %08X %08X\n",
- addr, data);
-}
-
-void odm_update_agc_big_jump_lmt_8822b(struct phy_dm_struct *dm, u32 addr,
- u32 data)
-{
- struct dig_thres *dig_tab = &dm->dm_dig_table;
- u8 rf_gain_idx = (u8)((data & 0xFF000000) >> 24);
- u8 bb_gain_idx = (u8)((data & 0x00ff0000) >> 16);
- u8 agc_table_idx = (u8)((data & 0x00000f00) >> 8);
- static bool is_limit;
-
- if (addr != 0x81c)
- return;
-
- if (bb_gain_idx > 0x3c) {
- if ((rf_gain_idx == dig_tab->rf_gain_idx) && !is_limit) {
- is_limit = true;
- dig_tab->big_jump_lmt[agc_table_idx] = bb_gain_idx - 2;
- ODM_RT_TRACE(
- dm, ODM_COMP_DIG,
- "===> [AGC_TAB] big_jump_lmt [%d] = 0x%x\n",
- agc_table_idx,
- dig_tab->big_jump_lmt[agc_table_idx]);
- }
- } else {
- is_limit = false;
- }
-
- dig_tab->rf_gain_idx = rf_gain_idx;
-}
-
-void odm_config_bb_agc_8822b(struct phy_dm_struct *dm, u32 addr, u32 bitmask,
- u32 data)
-{
- odm_update_agc_big_jump_lmt_8822b(dm, addr, data);
-
- odm_set_bb_reg(dm, addr, bitmask, data);
-
- /* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
-
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> %s: [AGC_TAB] %08X %08X\n",
- __func__, addr, data);
-}
-
-void odm_config_bb_phy_reg_pg_8822b(struct phy_dm_struct *dm, u32 band,
- u32 rf_path, u32 tx_num, u32 addr,
- u32 bitmask, u32 data)
-{
- if (addr == 0xfe || addr == 0xffe) {
- ODM_sleep_ms(50);
- } else {
- phy_store_tx_power_by_rate(dm->adapter, band, rf_path, tx_num,
- addr, bitmask, data);
- }
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> %s: [PHY_REG] %08X %08X %08X\n",
- __func__, addr, bitmask, data);
-}
-
-void odm_config_bb_phy_8822b(struct phy_dm_struct *dm, u32 addr, u32 bitmask,
- u32 data)
-{
- if (addr == 0xfe)
- ODM_sleep_ms(50);
- else if (addr == 0xfd)
- ODM_delay_ms(5);
- else if (addr == 0xfc)
- ODM_delay_ms(1);
- else if (addr == 0xfb)
- ODM_delay_us(50);
- else if (addr == 0xfa)
- ODM_delay_us(5);
- else if (addr == 0xf9)
- ODM_delay_us(1);
- else
- odm_set_bb_reg(dm, addr, bitmask, data);
-
- /* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
- ODM_RT_TRACE(dm, ODM_COMP_INIT, "===> %s: [PHY_REG] %08X %08X\n",
- __func__, addr, data);
-}
-
-void odm_config_bb_txpwr_lmt_8822b(struct phy_dm_struct *dm, u8 *regulation,
- u8 *band, u8 *bandwidth, u8 *rate_section,
- u8 *rf_path, u8 *channel, u8 *power_limit)
-{
- phy_set_tx_power_limit(dm, regulation, band, bandwidth, rate_section,
- rf_path, channel, power_limit);
-}
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h
deleted file mode 100644
index 4606427bd273..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __INC_ODM_REGCONFIG_H_8822B
-#define __INC_ODM_REGCONFIG_H_8822B
-
-void odm_config_rf_reg_8822b(struct phy_dm_struct *dm, u32 addr, u32 data,
- enum odm_rf_radio_path RF_PATH, u32 reg_addr);
-
-void odm_config_rf_radio_a_8822b(struct phy_dm_struct *dm, u32 addr, u32 data);
-
-void odm_config_rf_radio_b_8822b(struct phy_dm_struct *dm, u32 addr, u32 data);
-
-void odm_config_mac_8822b(struct phy_dm_struct *dm, u32 addr, u8 data);
-
-void odm_update_agc_big_jump_lmt_8822b(struct phy_dm_struct *dm, u32 addr,
- u32 data);
-
-void odm_config_bb_agc_8822b(struct phy_dm_struct *dm, u32 addr, u32 bitmask,
- u32 data);
-
-void odm_config_bb_phy_reg_pg_8822b(struct phy_dm_struct *dm, u32 band,
- u32 rf_path, u32 tx_num, u32 addr,
- u32 bitmask, u32 data);
-
-void odm_config_bb_phy_8822b(struct phy_dm_struct *dm, u32 addr, u32 bitmask,
- u32 data);
-
-void odm_config_bb_txpwr_lmt_8822b(struct phy_dm_struct *dm, u8 *regulation,
- u8 *band, u8 *bandwidth, u8 *rate_section,
- u8 *rf_path, u8 *channel, u8 *power_limit);
-
-#endif /* RTL8822B_SUPPORT == 1*/
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c
deleted file mode 100644
index a05c8aa53b0e..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c
+++ /dev/null
@@ -1,214 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../mp_precomp.h"
-#include "../phydm_precomp.h"
-
-static void phydm_dynamic_switch_htstf_mumimo_8822b(struct phy_dm_struct *dm)
-{
- /*if rssi > 40dBm, enable HT-STF gain controller,
- *otherwise, if rssi < 40dBm, disable the controller
- */
- /*add by Chun-Hung Ho 20160711 */
- if (dm->rssi_min >= 40)
- odm_set_bb_reg(dm, 0x8d8, BIT(17), 0x1);
- else if (dm->rssi_min < 35)
- odm_set_bb_reg(dm, 0x8d8, BIT(17), 0x0);
-
- ODM_RT_TRACE(dm, ODM_COMP_COMMON, "%s, rssi_min = %d\n", __func__,
- dm->rssi_min);
-}
-
-static void _set_tx_a_cali_value(struct phy_dm_struct *dm, u8 rf_path,
- u8 offset, u8 tx_a_bias_offset)
-{
- u32 modi_tx_a_value = 0;
- u8 tmp1_byte = 0;
- bool is_minus = false;
- u8 comp_value = 0;
-
- switch (offset) {
- case 0x0:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X10124);
- break;
- case 0x1:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X10524);
- break;
- case 0x2:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X10924);
- break;
- case 0x3:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X10D24);
- break;
- case 0x4:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X30164);
- break;
- case 0x5:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X30564);
- break;
- case 0x6:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X30964);
- break;
- case 0x7:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X30D64);
- break;
- case 0x8:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X50195);
- break;
- case 0x9:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X50595);
- break;
- case 0xa:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X50995);
- break;
- case 0xb:
- odm_set_rf_reg(dm, rf_path, 0x18, 0xFFFFF, 0X50D95);
- break;
- default:
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "Invalid TxA band offset...\n");
- return;
- }
-
- /* Get TxA value */
- modi_tx_a_value = odm_get_rf_reg(dm, rf_path, 0x61, 0xFFFFF);
- tmp1_byte = (u8)modi_tx_a_value & (BIT(3) | BIT(2) | BIT(1) | BIT(0));
-
- /* check how much need to calibration */
- switch (tx_a_bias_offset) {
- case 0xF6:
- is_minus = true;
- comp_value = 3;
- break;
-
- case 0xF4:
- is_minus = true;
- comp_value = 2;
- break;
-
- case 0xF2:
- is_minus = true;
- comp_value = 1;
- break;
-
- case 0xF3:
- is_minus = false;
- comp_value = 1;
- break;
-
- case 0xF5:
- is_minus = false;
- comp_value = 2;
- break;
-
- case 0xF7:
- is_minus = false;
- comp_value = 3;
- break;
-
- case 0xF9:
- is_minus = false;
- comp_value = 4;
- break;
-
- /* do nothing case */
- case 0xF0:
- default:
- ODM_RT_TRACE(dm, ODM_COMP_COMMON,
- "No need to do TxA bias current calibration\n");
- return;
- }
-
- /* calc correct value to calibrate */
- if (is_minus) {
- if (tmp1_byte >= comp_value) {
- tmp1_byte -= comp_value;
- /*modi_tx_a_value += tmp1_byte;*/
- } else {
- tmp1_byte = 0;
- }
- } else {
- tmp1_byte += comp_value;
- if (tmp1_byte >= 7)
- tmp1_byte = 7;
- }
-
- /* Write back to RF reg */
- odm_set_rf_reg(dm, rf_path, 0x30, 0xFFFF,
- (offset << 12 | (modi_tx_a_value & 0xFF0) | tmp1_byte));
-}
-
-static void _txa_bias_cali_4_each_path(struct phy_dm_struct *dm, u8 rf_path,
- u8 efuse_value)
-{
- /* switch on set TxA bias */
- odm_set_rf_reg(dm, rf_path, 0xEF, 0xFFFFF, 0x200);
-
- /* Set 12 sets of TxA value */
- _set_tx_a_cali_value(dm, rf_path, 0x0, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0x1, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0x2, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0x3, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0x4, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0x5, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0x6, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0x7, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0x8, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0x9, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0xa, efuse_value);
- _set_tx_a_cali_value(dm, rf_path, 0xb, efuse_value);
-
- /* switch off set TxA bias */
- odm_set_rf_reg(dm, rf_path, 0xEF, 0xFFFFF, 0x0);
-}
-
-/*
- * for 8822B PCIE D-cut patch only
- * Normal driver and MP driver need this patch
- */
-
-void phydm_txcurrentcalibration(struct phy_dm_struct *dm)
-{
- u8 efuse0x3D8, efuse0x3D7;
- u32 orig_rf0x18_path_a = 0, orig_rf0x18_path_b = 0;
-
- /* save original 0x18 value */
- orig_rf0x18_path_a = odm_get_rf_reg(dm, ODM_RF_PATH_A, 0x18, 0xFFFFF);
- orig_rf0x18_path_b = odm_get_rf_reg(dm, ODM_RF_PATH_B, 0x18, 0xFFFFF);
-
- /* define efuse content */
- efuse0x3D8 = dm->efuse0x3d8;
- efuse0x3D7 = dm->efuse0x3d7;
-
- /* check efuse content to judge whether need to calibration or not */
- if (efuse0x3D7 == 0xFF) {
- ODM_RT_TRACE(
- dm, ODM_COMP_COMMON,
- "efuse content 0x3D7 == 0xFF, No need to do TxA cali\n");
- return;
- }
-
- /* write RF register for calibration */
- _txa_bias_cali_4_each_path(dm, ODM_RF_PATH_A, efuse0x3D7);
- _txa_bias_cali_4_each_path(dm, ODM_RF_PATH_B, efuse0x3D8);
-
- /* restore original 0x18 value */
- odm_set_rf_reg(dm, ODM_RF_PATH_A, 0x18, 0xFFFFF, orig_rf0x18_path_a);
- odm_set_rf_reg(dm, ODM_RF_PATH_B, 0x18, 0xFFFFF, orig_rf0x18_path_b);
-}
-
-void phydm_hwsetting_8822b(struct phy_dm_struct *dm)
-{
- phydm_dynamic_switch_htstf_mumimo_8822b(dm);
-}
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h
deleted file mode 100644
index 788258e8c3d1..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __ODM_RTL8822B_H__
-#define __ODM_RTL8822B_H__
-
-void phydm_hwsetting_8822b(struct phy_dm_struct *dm);
-
-#endif /* #define __ODM_RTL8822B_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h
deleted file mode 100644
index 53fd51aacdf2..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-/*RTL8822B PHY Parameters*/
-/*
- * [Caution]
- * Since 01/Aug/2015, the commit rules will be simplified.
- * You do not need to fill up the version.h anymore,
- * only the maintenance supervisor fills it before formal release.
- */
-#define RELEASE_DATE_8822B 20161103
-#define COMMIT_BY_8822B "BB_JOE"
-#define RELEASE_VERSION_8822B 67
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
deleted file mode 100644
index 4cc77b2016e1..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ /dev/null
@@ -1,865 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "mp_precomp.h"
-#include "phydm_precomp.h"
-#include <linux/module.h>
-
-static int _rtl_phydm_init_com_info(struct rtl_priv *rtlpriv,
- enum odm_ic_type ic_type,
- struct rtl_phydm_params *params)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
- u8 odm_board_type = ODM_BOARD_DEFAULT;
- u32 support_ability;
- int i;
-
- dm->adapter = (void *)rtlpriv;
-
- odm_cmn_info_init(dm, ODM_CMNINFO_PLATFORM, ODM_CE);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_IC_TYPE, ic_type);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_INTERFACE, ODM_ITRF_PCIE);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_MP_TEST_CHIP, params->mp_chip);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_PATCH_ID, rtlhal->oem_id);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_BWIFI_TEST, 1);
-
- if (rtlphy->rf_type == RF_1T1R)
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_1T1R);
- else if (rtlphy->rf_type == RF_1T2R)
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_1T2R);
- else if (rtlphy->rf_type == RF_2T2R)
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_2T2R);
- else if (rtlphy->rf_type == RF_2T2R_GREEN)
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_2T2R_GREEN);
- else if (rtlphy->rf_type == RF_2T3R)
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_2T3R);
- else if (rtlphy->rf_type == RF_2T4R)
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_2T4R);
- else if (rtlphy->rf_type == RF_3T3R)
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_3T3R);
- else if (rtlphy->rf_type == RF_3T4R)
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_3T4R);
- else if (rtlphy->rf_type == RF_4T4R)
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_4T4R);
- else
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_TYPE, ODM_XTXR);
-
- /* 1 ======= BoardType: ODM_CMNINFO_BOARD_TYPE ======= */
- if (rtlhal->external_lna_2g != 0) {
- odm_board_type |= ODM_BOARD_EXT_LNA;
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_LNA, 1);
- }
- if (rtlhal->external_lna_5g != 0) {
- odm_board_type |= ODM_BOARD_EXT_LNA_5G;
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_LNA, 1);
- }
- if (rtlhal->external_pa_2g != 0) {
- odm_board_type |= ODM_BOARD_EXT_PA;
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_PA, 1);
- }
- if (rtlhal->external_pa_5g != 0) {
- odm_board_type |= ODM_BOARD_EXT_PA_5G;
- odm_cmn_info_init(dm, ODM_CMNINFO_5G_EXT_PA, 1);
- }
- if (rtlpriv->cfg->ops->get_btc_status())
- odm_board_type |= ODM_BOARD_BT;
-
- odm_cmn_info_init(dm, ODM_CMNINFO_BOARD_TYPE, odm_board_type);
- /* 1 ============== End of BoardType ============== */
-
- odm_cmn_info_init(dm, ODM_CMNINFO_GPA, rtlhal->type_gpa);
- odm_cmn_info_init(dm, ODM_CMNINFO_APA, rtlhal->type_apa);
- odm_cmn_info_init(dm, ODM_CMNINFO_GLNA, rtlhal->type_glna);
- odm_cmn_info_init(dm, ODM_CMNINFO_ALNA, rtlhal->type_alna);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_RFE_TYPE, rtlhal->rfe_type);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_EXT_TRSW, 0);
-
- /*Add by YuChen for kfree init*/
- odm_cmn_info_init(dm, ODM_CMNINFO_REGRFKFREEENABLE, 2);
- odm_cmn_info_init(dm, ODM_CMNINFO_RFKFREEENABLE, 0);
-
- /*Antenna diversity relative parameters*/
- odm_cmn_info_hook(dm, ODM_CMNINFO_ANT_DIV,
- &rtlefuse->antenna_div_cfg);
- odm_cmn_info_init(dm, ODM_CMNINFO_RF_ANTENNA_TYPE,
- rtlefuse->antenna_div_type);
- odm_cmn_info_init(dm, ODM_CMNINFO_BE_FIX_TX_ANT, 0);
- odm_cmn_info_init(dm, ODM_CMNINFO_WITH_EXT_ANTENNA_SWITCH, 0);
-
- /* (8822B) efuse 0x3D7 & 0x3D8 for TX PA bias */
- odm_cmn_info_init(dm, ODM_CMNINFO_EFUSE0X3D7, params->efuse0x3d7);
- odm_cmn_info_init(dm, ODM_CMNINFO_EFUSE0X3D8, params->efuse0x3d8);
-
- /*Add by YuChen for adaptivity init*/
- odm_cmn_info_hook(dm, ODM_CMNINFO_ADAPTIVITY,
- &rtlpriv->phydm.adaptivity_en);
- phydm_adaptivity_info_init(dm, PHYDM_ADAPINFO_CARRIER_SENSE_ENABLE,
- false);
- phydm_adaptivity_info_init(dm, PHYDM_ADAPINFO_DCBACKOFF, 0);
- phydm_adaptivity_info_init(dm, PHYDM_ADAPINFO_DYNAMICLINKADAPTIVITY,
- false);
- phydm_adaptivity_info_init(dm, PHYDM_ADAPINFO_TH_L2H_INI, 0);
- phydm_adaptivity_info_init(dm, PHYDM_ADAPINFO_TH_EDCCA_HL_DIFF, 0);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_IQKFWOFFLOAD, 0);
-
- /* Pointer reference */
- odm_cmn_info_hook(dm, ODM_CMNINFO_TX_UNI,
- &rtlpriv->stats.txbytesunicast);
- odm_cmn_info_hook(dm, ODM_CMNINFO_RX_UNI,
- &rtlpriv->stats.rxbytesunicast);
- odm_cmn_info_hook(dm, ODM_CMNINFO_BAND, &rtlhal->current_bandtype);
- odm_cmn_info_hook(dm, ODM_CMNINFO_FORCED_RATE,
- &rtlpriv->phydm.forced_data_rate);
- odm_cmn_info_hook(dm, ODM_CMNINFO_FORCED_IGI_LB,
- &rtlpriv->phydm.forced_igi_lb);
-
- odm_cmn_info_hook(dm, ODM_CMNINFO_SEC_CHNL_OFFSET,
- &mac->cur_40_prime_sc);
- odm_cmn_info_hook(dm, ODM_CMNINFO_BW, &rtlphy->current_chan_bw);
- odm_cmn_info_hook(dm, ODM_CMNINFO_CHNL, &rtlphy->current_channel);
-
- odm_cmn_info_hook(dm, ODM_CMNINFO_SCAN, &mac->act_scanning);
- odm_cmn_info_hook(dm, ODM_CMNINFO_POWER_SAVING,
- &ppsc->dot11_psmode); /* may add new boolean flag */
- /*Add by Yuchen for phydm beamforming*/
- odm_cmn_info_hook(dm, ODM_CMNINFO_TX_TP,
- &rtlpriv->stats.txbytesunicast_inperiod_tp);
- odm_cmn_info_hook(dm, ODM_CMNINFO_RX_TP,
- &rtlpriv->stats.rxbytesunicast_inperiod_tp);
- odm_cmn_info_hook(dm, ODM_CMNINFO_ANT_TEST,
- &rtlpriv->phydm.antenna_test);
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++)
- odm_cmn_info_ptr_array_hook(dm, ODM_CMNINFO_STA_STATUS, i,
- NULL);
-
- phydm_init_debug_setting(dm);
-
- odm_cmn_info_init(dm, ODM_CMNINFO_FAB_VER, params->fab_ver);
- odm_cmn_info_init(dm, ODM_CMNINFO_CUT_VER, params->cut_ver);
-
- /* after ifup, ability is updated again */
- support_ability = ODM_RF_CALIBRATION | ODM_RF_TX_PWR_TRACK;
- odm_cmn_info_update(dm, ODM_CMNINFO_ABILITY, support_ability);
-
- return 0;
-}
-
-static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
- struct rtl_phydm_params *params)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum odm_ic_type ic;
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- ic = ODM_RTL8822B;
- else
- return 0;
-
- rtlpriv->phydm.internal =
- kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
- if (!rtlpriv->phydm.internal)
- return 0;
-
- _rtl_phydm_init_com_info(rtlpriv, ic, params);
-
- odm_init_all_timers(dm);
-
- return 1;
-}
-
-static int rtl_phydm_deinit_priv(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- odm_cancel_all_timers(dm);
-
- kfree(rtlpriv->phydm.internal);
- rtlpriv->phydm.internal = NULL;
-
- return 0;
-}
-
-static bool rtl_phydm_load_txpower_by_rate(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum hal_status status;
-
- status = odm_config_bb_with_header_file(dm, CONFIG_BB_PHY_REG_PG);
- if (status != HAL_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool rtl_phydm_load_txpower_limit(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum hal_status status;
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv)) {
- odm_read_and_config_mp_8822b_txpwr_lmt(dm);
- } else {
- status = odm_config_rf_with_header_file(dm, CONFIG_RF_TXPWR_LMT,
- 0);
- if (status != HAL_STATUS_SUCCESS)
- return false;
- }
-
- return true;
-}
-
-static int rtl_phydm_init_dm(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- u32 support_ability = 0;
-
- /* clang-format off */
- support_ability = 0
- | ODM_BB_DIG
- | ODM_BB_RA_MASK
- | ODM_BB_DYNAMIC_TXPWR
- | ODM_BB_FA_CNT
- | ODM_BB_RSSI_MONITOR
- | ODM_BB_CCK_PD
- /* | ODM_BB_PWR_SAVE*/
- | ODM_BB_CFO_TRACKING
- | ODM_MAC_EDCA_TURBO
- | ODM_RF_TX_PWR_TRACK
- | ODM_RF_CALIBRATION
- | ODM_BB_NHM_CNT
- /* | ODM_BB_PWR_TRAIN*/
- ;
- /* clang-format on */
-
- odm_cmn_info_update(dm, ODM_CMNINFO_ABILITY, support_ability);
-
- odm_dm_init(dm);
-
- return 0;
-}
-
-static int rtl_phydm_deinit_dm(struct rtl_priv *rtlpriv)
-{
- return 0;
-}
-
-static int rtl_phydm_reset_dm(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- odm_dm_reset(dm);
-
- return 0;
-}
-
-static bool rtl_phydm_parameter_init(struct rtl_priv *rtlpriv, bool post)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- return config_phydm_parameter_init(dm, post ? ODM_POST_SETTING :
- ODM_PRE_SETTING);
-
- return false;
-}
-
-static bool rtl_phydm_phy_bb_config(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum hal_status status;
-
- status = odm_config_bb_with_header_file(dm, CONFIG_BB_PHY_REG);
- if (status != HAL_STATUS_SUCCESS)
- return false;
-
- status = odm_config_bb_with_header_file(dm, CONFIG_BB_AGC_TAB);
- if (status != HAL_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool rtl_phydm_phy_rf_config(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- enum hal_status status;
- enum odm_rf_radio_path rfpath;
-
- for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
- status = odm_config_rf_with_header_file(dm, CONFIG_RF_RADIO,
- rfpath);
- if (status != HAL_STATUS_SUCCESS)
- return false;
- }
-
- status = odm_config_rf_with_tx_pwr_track_header_file(dm);
- if (status != HAL_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool rtl_phydm_phy_mac_config(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum hal_status status;
-
- status = odm_config_mac_with_header_file(dm);
- if (status != HAL_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool rtl_phydm_trx_mode(struct rtl_priv *rtlpriv,
- enum radio_mask tx_path, enum radio_mask rx_path,
- bool is_tx2_path)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- return config_phydm_trx_mode_8822b(dm,
- (enum odm_rf_path)tx_path,
- (enum odm_rf_path)rx_path,
- is_tx2_path);
-
- return false;
-}
-
-static bool rtl_phydm_watchdog(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
- bool fw_current_inpsmode = false;
- bool fw_ps_awake = true;
- u8 is_linked = false;
- u8 bsta_state = false;
- u8 is_bt_enabled = false;
-
- /* check whether do watchdog */
- rtlpriv->cfg->ops->get_hw_reg(rtlpriv->hw, HW_VAR_FW_PSMODE_STATUS,
- (u8 *)(&fw_current_inpsmode));
- rtlpriv->cfg->ops->get_hw_reg(rtlpriv->hw, HW_VAR_FWLPS_RF_ON,
- (u8 *)(&fw_ps_awake));
- if (ppsc->p2p_ps_info.p2p_ps_mode)
- fw_ps_awake = false;
-
- if ((ppsc->rfpwr_state == ERFON) &&
- ((!fw_current_inpsmode) && fw_ps_awake) &&
- (!ppsc->rfchange_inprogress))
- ;
- else
- return false;
-
- /* update common info before doing watchdog */
- if (mac->link_state >= MAC80211_LINKED) {
- is_linked = true;
- if (mac->vif && mac->vif->type == NL80211_IFTYPE_STATION)
- bsta_state = true;
- }
-
- if (rtlpriv->cfg->ops->get_btc_status())
- is_bt_enabled = !rtlpriv->btcoexist.btc_ops->btc_is_bt_disabled(
- rtlpriv);
-
- odm_cmn_info_update(dm, ODM_CMNINFO_LINK, is_linked);
- odm_cmn_info_update(dm, ODM_CMNINFO_STATION_STATE, bsta_state);
- odm_cmn_info_update(dm, ODM_CMNINFO_BT_ENABLED, is_bt_enabled);
-
- /* do watchdog */
- odm_dm_watchdog(dm);
-
- return true;
-}
-
-static bool rtl_phydm_switch_band(struct rtl_priv *rtlpriv, u8 central_ch)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- return config_phydm_switch_band_8822b(dm, central_ch);
-
- return false;
-}
-
-static bool rtl_phydm_switch_channel(struct rtl_priv *rtlpriv, u8 central_ch)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- return config_phydm_switch_channel_8822b(dm, central_ch);
-
- return false;
-}
-
-static bool rtl_phydm_switch_bandwidth(struct rtl_priv *rtlpriv,
- u8 primary_ch_idx,
- enum ht_channel_width bandwidth)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum odm_bw odm_bw = (enum odm_bw)bandwidth;
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- return config_phydm_switch_bandwidth_8822b(dm, primary_ch_idx,
- odm_bw);
-
- return false;
-}
-
-static bool rtl_phydm_iq_calibrate(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- phy_iq_calibrate_8822b(dm, false);
- else
- return false;
-
- return true;
-}
-
-static bool rtl_phydm_clear_txpowertracking_state(struct rtl_priv *rtlpriv)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- odm_clear_txpowertracking_state(dm);
-
- return true;
-}
-
-static bool rtl_phydm_pause_dig(struct rtl_priv *rtlpriv, bool pause)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- if (pause)
- odm_pause_dig(dm, PHYDM_PAUSE, PHYDM_PAUSE_LEVEL_0, 0x1e);
- else /* resume */
- odm_pause_dig(dm, PHYDM_RESUME, PHYDM_PAUSE_LEVEL_0, 0xff);
-
- return true;
-}
-
-static u32 rtl_phydm_read_rf_reg(struct rtl_priv *rtlpriv,
- enum radio_path rfpath, u32 addr, u32 mask)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum odm_rf_radio_path odm_rfpath = (enum odm_rf_radio_path)rfpath;
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- return config_phydm_read_rf_reg_8822b(dm, odm_rfpath, addr,
- mask);
-
- return -1;
-}
-
-static bool rtl_phydm_write_rf_reg(struct rtl_priv *rtlpriv,
- enum radio_path rfpath, u32 addr, u32 mask,
- u32 data)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum odm_rf_radio_path odm_rfpath = (enum odm_rf_radio_path)rfpath;
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- return config_phydm_write_rf_reg_8822b(dm, odm_rfpath, addr,
- mask, data);
-
- return false;
-}
-
-static u8 rtl_phydm_read_txagc(struct rtl_priv *rtlpriv, enum radio_path rfpath,
- u8 hw_rate)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum odm_rf_radio_path odm_rfpath = (enum odm_rf_radio_path)rfpath;
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- return config_phydm_read_txagc_8822b(dm, odm_rfpath, hw_rate);
-
- return -1;
-}
-
-static bool rtl_phydm_write_txagc(struct rtl_priv *rtlpriv, u32 power_index,
- enum radio_path rfpath, u8 hw_rate)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- enum odm_rf_radio_path odm_rfpath = (enum odm_rf_radio_path)rfpath;
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- return config_phydm_write_txagc_8822b(dm, power_index,
- odm_rfpath, hw_rate);
-
- return false;
-}
-
-static bool rtl_phydm_c2h_content_parsing(struct rtl_priv *rtlpriv, u8 cmd_id,
- u8 cmd_len, u8 *content)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- if (phydm_c2H_content_parsing(dm, cmd_id, cmd_len, content))
- return true;
-
- return false;
-}
-
-static bool rtl_phydm_query_phy_status(struct rtl_priv *rtlpriv, u8 *phystrpt,
- struct ieee80211_hdr *hdr,
- struct rtl_stats *pstatus)
-{
- /* NOTE: phystrpt may be NULL, and need to fill default value */
-
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- struct dm_per_pkt_info pktinfo; /* input of pydm */
- struct dm_phy_status_info phy_info; /* output of phydm */
- __le16 fc = hdr->frame_control;
-
- /* fill driver pstatus */
- ether_addr_copy(pstatus->psaddr, ieee80211_get_SA(hdr));
-
- /* fill pktinfo */
- memset(&pktinfo, 0, sizeof(pktinfo));
-
- pktinfo.data_rate = pstatus->rate;
-
- if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION) {
- pktinfo.station_id = 0;
- } else {
- /* TODO: use rtl_find_sta() to find ID */
- pktinfo.station_id = 0xFF;
- }
-
- pktinfo.is_packet_match_bssid =
- (!ieee80211_is_ctl(fc) &&
- (ether_addr_equal(mac->bssid,
- ieee80211_has_tods(fc) ?
- hdr->addr1 :
- ieee80211_has_fromds(fc) ?
- hdr->addr2 :
- hdr->addr3)) &&
- (!pstatus->hwerror) && (!pstatus->crc) && (!pstatus->icv));
- pktinfo.is_packet_to_self =
- pktinfo.is_packet_match_bssid &&
- (ether_addr_equal(hdr->addr1, rtlefuse->dev_addr));
- pktinfo.is_to_self = (!pstatus->icv) && (!pstatus->crc) &&
- (ether_addr_equal(hdr->addr1, rtlefuse->dev_addr));
- pktinfo.is_packet_beacon = (ieee80211_is_beacon(fc) ? true : false);
-
- /* query phy status */
- if (phystrpt)
- odm_phy_status_query(dm, &phy_info, phystrpt, &pktinfo);
- else
- memset(&phy_info, 0, sizeof(phy_info));
-
- /* copy phy_info from phydm to driver */
- pstatus->rx_pwdb_all = phy_info.rx_pwdb_all;
- pstatus->bt_rx_rssi_percentage = phy_info.bt_rx_rssi_percentage;
- pstatus->recvsignalpower = phy_info.recv_signal_power;
- pstatus->signalquality = phy_info.signal_quality;
- pstatus->rx_mimo_signalquality[0] = phy_info.rx_mimo_signal_quality[0];
- pstatus->rx_mimo_signalquality[1] = phy_info.rx_mimo_signal_quality[1];
- pstatus->rx_packet_bw =
- phy_info.band_width; /* HT_CHANNEL_WIDTH_20 <- ODM_BW20M */
-
- /* fill driver pstatus */
- pstatus->packet_matchbssid = pktinfo.is_packet_match_bssid;
- pstatus->packet_toself = pktinfo.is_packet_to_self;
- pstatus->packet_beacon = pktinfo.is_packet_beacon;
-
- return true;
-}
-
-static u8 rtl_phydm_rate_id_mapping(struct rtl_priv *rtlpriv,
- enum wireless_mode wireless_mode,
- enum rf_type rf_type,
- enum ht_channel_width bw)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- return phydm_rate_id_mapping(dm, wireless_mode, rf_type, bw);
-}
-
-static bool rtl_phydm_get_ra_bitmap(struct rtl_priv *rtlpriv,
- enum wireless_mode wireless_mode,
- enum rf_type rf_type,
- enum ht_channel_width bw,
- u8 tx_rate_level, /* 0~6 */
- u32 *tx_bitmap_msb,
- u32 *tx_bitmap_lsb)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- const u8 mimo_ps_enable = 0;
- const u8 disable_cck_rate = 0;
-
- phydm_update_hal_ra_mask(dm, wireless_mode, rf_type, bw, mimo_ps_enable,
- disable_cck_rate, tx_bitmap_msb, tx_bitmap_lsb,
- tx_rate_level);
-
- return true;
-}
-
-static u8 _rtl_phydm_get_macid(struct rtl_priv *rtlpriv,
- struct ieee80211_sta *sta)
-{
- struct rtl_mac *mac = rtl_mac(rtlpriv);
-
- if (mac->opmode == NL80211_IFTYPE_STATION ||
- mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- return 0;
- } else if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC)
- return sta->aid + 1;
-
- return 0;
-}
-
-static bool rtl_phydm_add_sta(struct rtl_priv *rtlpriv,
- struct ieee80211_sta *sta)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- struct rtl_sta_info *sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- u8 mac_id = _rtl_phydm_get_macid(rtlpriv, sta);
-
- odm_cmn_info_ptr_array_hook(dm, ODM_CMNINFO_STA_STATUS, mac_id,
- sta_entry);
-
- return true;
-}
-
-static bool rtl_phydm_del_sta(struct rtl_priv *rtlpriv,
- struct ieee80211_sta *sta)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- u8 mac_id = _rtl_phydm_get_macid(rtlpriv, sta);
-
- odm_cmn_info_ptr_array_hook(dm, ODM_CMNINFO_STA_STATUS, mac_id, NULL);
-
- return true;
-}
-
-static u32 rtl_phydm_get_version(struct rtl_priv *rtlpriv)
-{
- u32 ver = 0;
-
- if (IS_HARDWARE_TYPE_8822B(rtlpriv))
- ver = RELEASE_VERSION_8822B;
-
- return ver;
-}
-
-static bool rtl_phydm_modify_ra_pcr_threshold(struct rtl_priv *rtlpriv,
- u8 ra_offset_direction,
- u8 ra_threshold_offset)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- phydm_modify_RA_PCR_threshold(dm, ra_offset_direction,
- ra_threshold_offset);
-
- return true;
-}
-
-static u32 rtl_phydm_query_counter(struct rtl_priv *rtlpriv,
- const char *info_type)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
- static const struct query_entry {
- const char *query_name;
- enum phydm_info_query query_id;
- } query_table[] = {
-#define QUERY_ENTRY(name) {#name, name}
- QUERY_ENTRY(PHYDM_INFO_FA_OFDM),
- QUERY_ENTRY(PHYDM_INFO_FA_CCK),
- QUERY_ENTRY(PHYDM_INFO_CCA_OFDM),
- QUERY_ENTRY(PHYDM_INFO_CCA_CCK),
- QUERY_ENTRY(PHYDM_INFO_CRC32_OK_CCK),
- QUERY_ENTRY(PHYDM_INFO_CRC32_OK_LEGACY),
- QUERY_ENTRY(PHYDM_INFO_CRC32_OK_HT),
- QUERY_ENTRY(PHYDM_INFO_CRC32_OK_VHT),
- QUERY_ENTRY(PHYDM_INFO_CRC32_ERROR_CCK),
- QUERY_ENTRY(PHYDM_INFO_CRC32_ERROR_LEGACY),
- QUERY_ENTRY(PHYDM_INFO_CRC32_ERROR_HT),
- QUERY_ENTRY(PHYDM_INFO_CRC32_ERROR_VHT),
- };
-#define QUERY_TABLE_SIZE ARRAY_SIZE(query_table)
-
- int i;
- const struct query_entry *entry;
-
- if (!strcmp(info_type, "IQK_TOTAL"))
- return dm->n_iqk_cnt;
-
- if (!strcmp(info_type, "IQK_OK"))
- return dm->n_iqk_ok_cnt;
-
- if (!strcmp(info_type, "IQK_FAIL"))
- return dm->n_iqk_fail_cnt;
-
- for (i = 0; i < QUERY_TABLE_SIZE; i++) {
- entry = &query_table[i];
-
- if (!strcmp(info_type, entry->query_name))
- return phydm_cmn_info_query(dm, entry->query_id);
- }
-
- pr_err("Unrecognized info_type:%s!!!!:\n", info_type);
-
- return 0xDEADDEAD;
-}
-
-static bool rtl_phydm_debug_cmd(struct rtl_priv *rtlpriv, char *in, u32 in_len,
- char *out, u32 out_len)
-{
- struct phy_dm_struct *dm = rtlpriv_to_phydm(rtlpriv);
-
- phydm_cmd(dm, in, in_len, 1, out, out_len);
-
- return true;
-}
-
-static struct rtl_phydm_ops rtl_phydm_operation = {
- /* init/deinit priv */
- .phydm_init_priv = rtl_phydm_init_priv,
- .phydm_deinit_priv = rtl_phydm_deinit_priv,
- .phydm_load_txpower_by_rate = rtl_phydm_load_txpower_by_rate,
- .phydm_load_txpower_limit = rtl_phydm_load_txpower_limit,
-
- /* init hw */
- .phydm_init_dm = rtl_phydm_init_dm,
- .phydm_deinit_dm = rtl_phydm_deinit_dm,
- .phydm_reset_dm = rtl_phydm_reset_dm,
- .phydm_parameter_init = rtl_phydm_parameter_init,
- .phydm_phy_bb_config = rtl_phydm_phy_bb_config,
- .phydm_phy_rf_config = rtl_phydm_phy_rf_config,
- .phydm_phy_mac_config = rtl_phydm_phy_mac_config,
- .phydm_trx_mode = rtl_phydm_trx_mode,
-
- /* watchdog */
- .phydm_watchdog = rtl_phydm_watchdog,
-
- /* channel */
- .phydm_switch_band = rtl_phydm_switch_band,
- .phydm_switch_channel = rtl_phydm_switch_channel,
- .phydm_switch_bandwidth = rtl_phydm_switch_bandwidth,
- .phydm_iq_calibrate = rtl_phydm_iq_calibrate,
- .phydm_clear_txpowertracking_state =
- rtl_phydm_clear_txpowertracking_state,
- .phydm_pause_dig = rtl_phydm_pause_dig,
-
- /* read/write reg */
- .phydm_read_rf_reg = rtl_phydm_read_rf_reg,
- .phydm_write_rf_reg = rtl_phydm_write_rf_reg,
- .phydm_read_txagc = rtl_phydm_read_txagc,
- .phydm_write_txagc = rtl_phydm_write_txagc,
-
- /* RX */
- .phydm_c2h_content_parsing = rtl_phydm_c2h_content_parsing,
- .phydm_query_phy_status = rtl_phydm_query_phy_status,
-
- /* TX */
- .phydm_rate_id_mapping = rtl_phydm_rate_id_mapping,
- .phydm_get_ra_bitmap = rtl_phydm_get_ra_bitmap,
-
- /* STA */
- .phydm_add_sta = rtl_phydm_add_sta,
- .phydm_del_sta = rtl_phydm_del_sta,
-
- /* BTC */
- .phydm_get_version = rtl_phydm_get_version,
- .phydm_modify_ra_pcr_threshold = rtl_phydm_modify_ra_pcr_threshold,
- .phydm_query_counter = rtl_phydm_query_counter,
-
- /* debug */
- .phydm_debug_cmd = rtl_phydm_debug_cmd,
-};
-
-struct rtl_phydm_ops *rtl_phydm_get_ops_pointer(void)
-{
- return &rtl_phydm_operation;
-}
-EXPORT_SYMBOL(rtl_phydm_get_ops_pointer);
-
-/* ********************************************************
- * Define phydm callout function in below
- * ********************************************************
- */
-
-u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate,
- enum ht_channel_width bandwidth, u8 channel)
-{
- /* rate: DESC_RATE1M */
- struct rtl_priv *rtlpriv = (struct rtl_priv *)adapter;
-
- return rtlpriv->cfg->ops->get_txpower_index(rtlpriv->hw, rf_path, rate,
- bandwidth, channel);
-}
-
-void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)adapter;
-
- return rtlpriv->cfg->ops->set_tx_power_index_by_rs(rtlpriv->hw, ch,
- path, rs);
-}
-
-void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
- u32 regaddr, u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)adapter;
-
- rtlpriv->cfg->ops->store_tx_power_by_rate(
- rtlpriv->hw, band, rfpath, txnum, regaddr, bitmask, data);
-}
-
-void phy_set_tx_power_limit(void *dm, u8 *regulation, u8 *band, u8 *bandwidth,
- u8 *rate_section, u8 *rf_path, u8 *channel,
- u8 *power_limit)
-{
- struct rtl_priv *rtlpriv =
- (struct rtl_priv *)((struct phy_dm_struct *)dm)->adapter;
-
- rtlpriv->cfg->ops->phy_set_txpower_limit(rtlpriv->hw, regulation, band,
- bandwidth, rate_section,
- rf_path, channel, power_limit);
-}
-
-void rtl_hal_update_ra_mask(void *adapter, struct rtl_sta_info *psta,
- u8 rssi_level)
-{
- struct rtl_priv *rtlpriv = (struct rtl_priv *)adapter;
- struct ieee80211_sta *sta =
- container_of((void *)psta, struct ieee80211_sta, drv_priv);
-
- rtlpriv->cfg->ops->update_rate_tbl(rtlpriv->hw, sta, rssi_level, false);
-}
-
-MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
-MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.h b/drivers/staging/rtlwifi/phydm/rtl_phydm.h
deleted file mode 100644
index b98d502ef196..000000000000
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __RTL_PHYDM_H__
-#define __RTL_PHYDM_H__
-
-struct rtl_phydm_ops *rtl_phydm_get_ops_pointer(void);
-
-#define rtlpriv_to_phydm(priv) \
- ((struct phy_dm_struct *)((priv)->phydm.internal))
-
-u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate,
- enum ht_channel_width bandwidth, u8 channel);
-void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs);
-void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
- u32 regaddr, u32 bitmask, u32 data);
-void phy_set_tx_power_limit(void *dm, u8 *regulation, u8 *band, u8 *bandwidth,
- u8 *rate_section, u8 *rf_path, u8 *channel,
- u8 *power_limit);
-
-void rtl_hal_update_ra_mask(void *adapter, struct rtl_sta_info *psta,
- u8 rssi_level);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h b/drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h
deleted file mode 100644
index b85c5e17efdf..000000000000
--- a/drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __HAL_COM_TXBF_H__
-#define __HAL_COM_TXBF_H__
-
-enum txbf_set_type {
- TXBF_SET_SOUNDING_ENTER,
- TXBF_SET_SOUNDING_LEAVE,
- TXBF_SET_SOUNDING_RATE,
- TXBF_SET_SOUNDING_STATUS,
- TXBF_SET_SOUNDING_FW_NDPA,
- TXBF_SET_SOUNDING_CLK,
- TXBF_SET_TX_PATH_RESET,
- TXBF_SET_GET_TX_RATE
-};
-
-enum txbf_get_type {
- TXBF_GET_EXPLICIT_BEAMFORMEE,
- TXBF_GET_EXPLICIT_BEAMFORMER,
- TXBF_GET_MU_MIMO_STA,
- TXBF_GET_MU_MIMO_AP
-};
-
-/* 2 HAL TXBF related */
-struct _HAL_TXBF_INFO {
- u8 txbf_idx;
- u8 ndpa_idx;
- u8 BW;
- u8 rate;
-
- struct timer_list txbf_fw_ndpa_timer;
-};
-
-#define hal_com_txbf_beamform_init(dm_void) NULL
-#define hal_com_txbf_config_gtab(dm_void) NULL
-#define hal_com_txbf_enter_work_item_callback(_adapter) NULL
-#define hal_com_txbf_leave_work_item_callback(_adapter) NULL
-#define hal_com_txbf_fw_ndpa_work_item_callback(_adapter) NULL
-#define hal_com_txbf_clk_work_item_callback(_adapter) NULL
-#define hal_com_txbf_rate_work_item_callback(_adapter) NULL
-#define hal_com_txbf_fw_ndpa_timer_callback(_adapter) NULL
-#define hal_com_txbf_status_work_item_callback(_adapter) NULL
-#define hal_com_txbf_get(_adapter, _get_type, _pout_buf)
-
-#endif /* #ifndef __HAL_COM_TXBF_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h b/drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h
deleted file mode 100644
index 2554fcc991de..000000000000
--- a/drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __HAL_TXBF_8822B_H__
-#define __HAL_TXBF_8822B_H__
-
-#define hal_txbf_8822b_enter(dm_void, idx)
-#define hal_txbf_8822b_leave(dm_void, idx)
-#define hal_txbf_8822b_status(dm_void, idx)
-#define hal_txbf_8822b_fw_txbf(dm_void, idx)
-#define hal_txbf_8822b_config_gtab(dm_void)
-
-void phydm_8822btxbf_rfmode(void *dm_void, u8 su_bfee_cnt, u8 mu_bfee_cnt);
-
-void phydm_8822b_sutxbfer_workaroud(void *dm_void, bool enable_su_bfer, u8 nc,
- u8 nr, u8 ng, u8 CB, u8 BW, bool is_vht);
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h b/drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h
deleted file mode 100644
index cf1ced07e138..000000000000
--- a/drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __HAL_TXBF_INTERFACE_H__
-#define __HAL_TXBF_INTERFACE_H__
-
-#define beamforming_get_ndpa_frame(dm, _pdu_os)
-#define beamforming_get_report_frame(adapter, precv_frame) RT_STATUS_FAILURE
-#define send_fw_ht_ndpa_packet(dm_void, RA, BW)
-#define send_sw_ht_ndpa_packet(dm_void, RA, BW)
-#define send_fw_vht_ndpa_packet(dm_void, RA, AID, BW)
-#define send_sw_vht_ndpa_packet(dm_void, RA, AID, BW)
-#define send_sw_vht_gid_mgnt_frame(dm_void, RA, idx)
-#define send_sw_vht_bf_report_poll(dm_void, RA, is_final_poll)
-#define send_sw_vht_mu_ndpa_packet(dm_void, BW)
-
-#endif
diff --git a/drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h b/drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h
deleted file mode 100644
index 4b30f062b7f7..000000000000
--- a/drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __HAL_TXBF_JAGUAR_H__
-#define __HAL_TXBF_JAGUAR_H__
-
-#define hal_txbf_8812a_set_ndpa_rate(dm_void, BW, rate)
-#define hal_txbf_jaguar_enter(dm_void, idx)
-#define hal_txbf_jaguar_leave(dm_void, idx)
-#define hal_txbf_jaguar_status(dm_void, idx)
-#define hal_txbf_jaguar_fw_txbf(dm_void, idx)
-#define hal_txbf_jaguar_patch(dm_void, operation)
-#define hal_txbf_jaguar_clk_8812a(dm_void)
-
-#endif /* #ifndef __HAL_TXBF_JAGUAR_H__ */
diff --git a/drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h b/drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h
deleted file mode 100644
index 278eb5d3bd4a..000000000000
--- a/drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __PHYDM_HAL_TXBF_API_H__
-#define __PHYDM_HAL_TXBF_API_H__
-
-#define tx_bf_nr(a, b) ((a > b) ? (b) : (a))
-
-u8 beamforming_get_htndp_tx_rate(void *dm_void, u8 comp_steering_num_of_bfer);
-
-u8 beamforming_get_vht_ndp_tx_rate(void *dm_void, u8 comp_steering_num_of_bfer);
-
-u8 phydm_get_beamforming_sounding_info(void *dm_void, u16 *troughput,
- u8 total_bfee_num, u8 *tx_rate);
-
-u8 phydm_get_ndpa_rate(void *dm_void);
-
-u8 phydm_get_mu_bfee_snding_decision(void *dm_void, u16 throughput);
-
-#endif
diff --git a/drivers/staging/rtlwifi/ps.c b/drivers/staging/rtlwifi/ps.c
deleted file mode 100644
index 5118773ea6f7..000000000000
--- a/drivers/staging/rtlwifi/ps.c
+++ /dev/null
@@ -1,996 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "wifi.h"
-#include "base.h"
-#include "ps.h"
-#include <linux/export.h>
-#include "btcoexist/rtl_btc.h"
-
-bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
-
- /*<1> reset trx ring */
- if (rtlhal->interface == INTF_PCI)
- rtlpriv->intf_ops->reset_trx_ring(hw);
-
- if (is_hal_stop(rtlhal))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Driver is already down!\n");
-
- /*<2> Enable Adapter */
- if (rtlpriv->cfg->ops->hw_init(hw))
- return false;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
- &rtlmac->retry_long);
- RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
-
- /*<2.1> Switch Channel & Bandwidth to last rtl_op_config setting*/
- rtlpriv->cfg->ops->switch_channel(hw);
- rtlpriv->cfg->ops->set_channel_access(hw);
- rtlpriv->cfg->ops->set_bw_mode(hw,
- cfg80211_get_chandef_type(&hw->conf.chandef));
-
- /*<3> Enable Interrupt */
- rtlpriv->cfg->ops->enable_interrupt(hw);
-
- /*<enable timer> */
- rtl_watch_dog_timer_callback(&rtlpriv->works.watchdog_timer);
-
- return true;
-}
-
-bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- /*<1> Stop all timer */
- rtl_deinit_deferred_work(hw);
-
- /*<2> Disable Interrupt */
- rtlpriv->cfg->ops->disable_interrupt(hw);
- tasklet_kill(&rtlpriv->works.irq_tasklet);
-
- /*<3> Disable Adapter */
- rtlpriv->cfg->ops->hw_disable(hw);
-
- return true;
-}
-
-static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
- enum rf_pwrstate state_toset,
- u32 changesource)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum rf_pwrstate rtstate;
- bool actionallowed = false;
- u16 rfwait_cnt = 0;
-
- /*Only one thread can change
- *the RF state at one time, and others
- *should wait to be executed.
- */
- while (true) {
- spin_lock(&rtlpriv->locks.rf_ps_lock);
- if (ppsc->rfchange_inprogress) {
- spin_unlock(&rtlpriv->locks.rf_ps_lock);
-
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "RF Change in progress! Wait to set..state_toset(%d).\n",
- state_toset);
-
- /* Set RF after the previous action is done. */
- while (ppsc->rfchange_inprogress) {
- rfwait_cnt++;
- mdelay(1);
- /*Wait too long, return false to avoid
- *to be stuck here.
- */
- if (rfwait_cnt > 100)
- return false;
- }
- } else {
- ppsc->rfchange_inprogress = true;
- spin_unlock(&rtlpriv->locks.rf_ps_lock);
- break;
- }
- }
-
- rtstate = ppsc->rfpwr_state;
-
- switch (state_toset) {
- case ERFON:
- ppsc->rfoff_reason &= (~changesource);
-
- if ((changesource == RF_CHANGE_BY_HW) &&
- (ppsc->hwradiooff)) {
- ppsc->hwradiooff = false;
- }
-
- if (!ppsc->rfoff_reason) {
- ppsc->rfoff_reason = 0;
- actionallowed = true;
- }
- break;
- case ERFOFF:
- if ((changesource == RF_CHANGE_BY_HW) && !ppsc->hwradiooff)
- ppsc->hwradiooff = true;
-
- ppsc->rfoff_reason |= changesource;
- actionallowed = true;
- break;
- case ERFSLEEP:
- ppsc->rfoff_reason |= changesource;
- actionallowed = true;
- break;
- default:
- pr_err("switch case %#x not processed\n", state_toset);
- break;
- }
-
- if (actionallowed)
- rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
-
- spin_lock(&rtlpriv->locks.rf_ps_lock);
- ppsc->rfchange_inprogress = false;
- spin_unlock(&rtlpriv->locks.rf_ps_lock);
-
- return actionallowed;
-}
-
-static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- ppsc->swrf_processing = true;
-
- if (ppsc->inactive_pwrstate == ERFON &&
- rtlhal->interface == INTF_PCI) {
- if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
- RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
- rtlhal->interface == INTF_PCI) {
- rtlpriv->intf_ops->disable_aspm(hw);
- RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
- }
- }
-
- rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate,
- RF_CHANGE_BY_IPS);
-
- if (ppsc->inactive_pwrstate == ERFOFF &&
- rtlhal->interface == INTF_PCI) {
- if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
- !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
- rtlpriv->intf_ops->enable_aspm(hw);
- RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
- }
- }
-
- ppsc->swrf_processing = false;
-}
-
-void rtl_ips_nic_off_wq_callback(void *data)
-{
- struct rtl_works *rtlworks =
- container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq);
- struct ieee80211_hw *hw = rtlworks->hw;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum rf_pwrstate rtstate;
-
- if (mac->opmode != NL80211_IFTYPE_STATION) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not station return\n");
- return;
- }
-
- if (mac->p2p_in_use)
- return;
-
- if (mac->link_state > MAC80211_NOLINK)
- return;
-
- if (is_hal_stop(rtlhal))
- return;
-
- if (rtlpriv->sec.being_setkey)
- return;
-
- if (rtlpriv->cfg->ops->bt_coex_off_before_lps)
- rtlpriv->cfg->ops->bt_coex_off_before_lps(hw);
-
- if (ppsc->inactiveps) {
- rtstate = ppsc->rfpwr_state;
-
- /*
- *Do not enter IPS in the following conditions:
- *(1) RF is already OFF or Sleep
- *(2) swrf_processing (indicates the IPS is still under going)
- *(3) Connectted (only disconnected can trigger IPS)
- *(4) IBSS (send Beacon)
- *(5) AP mode (send Beacon)
- *(6) monitor mode (rcv packet)
- */
-
- if (rtstate == ERFON &&
- !ppsc->swrf_processing &&
- (mac->link_state == MAC80211_NOLINK) &&
- !mac->act_scanning) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "IPSEnter(): Turn off RF\n");
-
- ppsc->inactive_pwrstate = ERFOFF;
- ppsc->in_powersavemode = true;
-
- /* call before RF off */
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_ips_notify(rtlpriv,
- ppsc->inactive_pwrstate);
-
- /*rtl_pci_reset_trx_ring(hw); */
- _rtl_ps_inactive_ps(hw);
- }
- }
-}
-
-void rtl_ips_nic_off(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- /* because when link with ap, mac80211 will ask us
- * to disable nic quickly after scan before linking,
- * this will cause link failed, so we delay 100ms here
- */
- queue_delayed_work(rtlpriv->works.rtl_wq,
- &rtlpriv->works.ips_nic_off_wq, MSECS(100));
-}
-
-/* NOTICE: any opmode should exc nic_on, or disable without
- * nic_on may something wrong, like adhoc TP
- */
-void rtl_ips_nic_on(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum rf_pwrstate rtstate;
-
- cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
-
- mutex_lock(&rtlpriv->locks.ips_mutex);
- if (ppsc->inactiveps) {
- rtstate = ppsc->rfpwr_state;
-
- if (rtstate != ERFON &&
- !ppsc->swrf_processing &&
- ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
- ppsc->inactive_pwrstate = ERFON;
- ppsc->in_powersavemode = false;
- _rtl_ps_inactive_ps(hw);
- /* call after RF on */
- if (rtlpriv->phydm.ops)
- rtlpriv->phydm.ops->phydm_reset_dm(rtlpriv);
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_ips_notify(rtlpriv,
- ppsc->inactive_pwrstate);
- }
- }
- mutex_unlock(&rtlpriv->locks.ips_mutex);
-}
-
-/*for FW LPS*/
-
-/*
- *Determine if we can set Fw into PS mode
- *in current condition.Return TRUE if it
- *can enter PS mode.
- */
-static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u32 ps_timediff;
-
- ps_timediff = jiffies_to_msecs(jiffies -
- ppsc->last_delaylps_stamp_jiffies);
-
- if (ps_timediff < 2000) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
- return false;
- }
-
- if (mac->link_state != MAC80211_LINKED)
- return false;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC)
- return false;
-
- return true;
-}
-
-/* Change current and default preamble mode.*/
-void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool enter_fwlps;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC)
- return;
-
- if (mac->link_state != MAC80211_LINKED)
- return;
-
- if (ppsc->dot11_psmode == rt_psmode && rt_psmode == EACTIVE)
- return;
-
- /* Update power save mode configured. */
- ppsc->dot11_psmode = rt_psmode;
-
- /*
- *<FW control LPS>
- *1. Enter PS mode
- * Set RPWM to Fw to turn RF off and send H2C fw_pwrmode
- * cmd to set Fw into PS mode.
- *2. Leave PS mode
- * Send H2C fw_pwrmode cmd to Fw to set Fw into Active
- * mode and set RPWM to turn RF on.
- */
-
- if ((ppsc->fwctrl_lps) && ppsc->report_linked) {
- if (ppsc->dot11_psmode == EACTIVE) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS leave ps_mode:%x\n",
- FW_PS_ACTIVE_MODE);
- enter_fwlps = false;
- ppsc->pwr_mode = FW_PS_ACTIVE_MODE;
- ppsc->smart_ps = 0;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_LPS_ACTION,
- (u8 *)(&enter_fwlps));
- if (ppsc->p2p_ps_info.opp_ps)
- rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
-
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
- } else {
- if (rtl_get_fwlps_doze(hw)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS enter ps_mode:%x\n",
- ppsc->fwctrl_psmode);
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
- enter_fwlps = true;
- ppsc->pwr_mode = ppsc->fwctrl_psmode;
- ppsc->smart_ps = 2;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_FW_LPS_ACTION,
- (u8 *)(&enter_fwlps));
-
- } else {
- /* Reset the power save related parameters. */
- ppsc->dot11_psmode = EACTIVE;
- }
- }
- }
-}
-
-/* Interrupt safe routine to enter the leisure power save mode.*/
-static void rtl_lps_enter_core(struct ieee80211_hw *hw)
-{
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (!ppsc->fwctrl_lps)
- return;
-
- if (rtlpriv->sec.being_setkey)
- return;
-
- if (rtlpriv->link_info.busytraffic)
- return;
-
- /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
- if (mac->cnt_after_linked < 5)
- return;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC)
- return;
-
- if (mac->link_state != MAC80211_LINKED)
- return;
-
- mutex_lock(&rtlpriv->locks.lps_mutex);
-
- /* Don't need to check (ppsc->dot11_psmode == EACTIVE), because
- * bt_ccoexist may ask to enter lps.
- * In normal case, this constraint move to rtl_lps_set_psmode().
- */
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Enter 802.11 power save mode...\n");
- rtl_lps_set_psmode(hw, EAUTOPS);
-
- mutex_unlock(&rtlpriv->locks.lps_mutex);
-}
-
-/* Interrupt safe routine to leave the leisure power save mode.*/
-static void rtl_lps_leave_core(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- mutex_lock(&rtlpriv->locks.lps_mutex);
-
- if (ppsc->fwctrl_lps) {
- if (ppsc->dot11_psmode != EACTIVE) {
- /*FIX ME */
- /*rtlpriv->cfg->ops->enable_interrupt(hw); */
-
- if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
- RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
- rtlhal->interface == INTF_PCI) {
- rtlpriv->intf_ops->disable_aspm(hw);
- RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
- }
-
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Busy Traffic,Leave 802.11 power save..\n");
-
- rtl_lps_set_psmode(hw, EACTIVE);
- }
- }
- mutex_unlock(&rtlpriv->locks.lps_mutex);
-}
-
-/* For sw LPS*/
-void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = data;
- struct ieee80211_tim_ie *tim_ie;
- u8 *tim;
- u8 tim_len;
- bool u_buffed;
- bool m_buffed;
-
- if (mac->opmode != NL80211_IFTYPE_STATION)
- return;
-
- if (!rtlpriv->psc.swctrl_lps)
- return;
-
- if (rtlpriv->mac80211.link_state != MAC80211_LINKED)
- return;
-
- if (!rtlpriv->psc.sw_ps_enabled)
- return;
-
- if (rtlpriv->psc.fwctrl_lps)
- return;
-
- if (likely(!(hw->conf.flags & IEEE80211_CONF_PS)))
- return;
-
- /* check if this really is a beacon */
- if (!ieee80211_is_beacon(hdr->frame_control))
- return;
-
- /* min. beacon length + FCS_LEN */
- if (len <= 40 + FCS_LEN)
- return;
-
- /* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
- return;
-
- rtlpriv->psc.last_beacon = jiffies;
-
- tim = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_TIM);
- if (!tim)
- return;
-
- if (tim[1] < sizeof(*tim_ie))
- return;
-
- tim_len = tim[1];
- tim_ie = (struct ieee80211_tim_ie *)&tim[2];
-
- if (!WARN_ON_ONCE(!hw->conf.ps_dtim_period))
- rtlpriv->psc.dtim_counter = tim_ie->dtim_count;
-
- /* Check whenever the PHY can be turned off again. */
-
- /* 1. What about buffered unicast traffic for our AID? */
- u_buffed = ieee80211_check_tim(tim_ie, tim_len,
- rtlpriv->mac80211.assoc_id);
-
- /* 2. Maybe the AP wants to send multicast/broadcast data? */
- m_buffed = tim_ie->bitmap_ctrl & 0x01;
- rtlpriv->psc.multi_buffered = m_buffed;
-
- /* unicast will process by mac80211 through
- * set ~IEEE80211_CONF_PS, So we just check
- * multicast frames here
- */
- if (!m_buffed) {
- /* back to low-power land. and delay is
- * prevent null power save frame tx fail
- */
- queue_delayed_work(rtlpriv->works.rtl_wq,
- &rtlpriv->works.ps_work, MSECS(5));
- } else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
- }
-}
-
-void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- if (!rtlpriv->psc.swctrl_lps)
- return;
- if (mac->link_state != MAC80211_LINKED)
- return;
-
- if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
- RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
- rtlpriv->intf_ops->disable_aspm(hw);
- RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
- }
-
- mutex_lock(&rtlpriv->locks.lps_mutex);
- rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- mutex_unlock(&rtlpriv->locks.lps_mutex);
-}
-
-void rtl_swlps_rfon_wq_callback(void *data)
-{
- struct rtl_works *rtlworks =
- container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq);
- struct ieee80211_hw *hw = rtlworks->hw;
-
- rtl_swlps_rf_awake(hw);
-}
-
-void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 sleep_intv;
-
- if (!rtlpriv->psc.sw_ps_enabled)
- return;
-
- if ((rtlpriv->sec.being_setkey) ||
- (mac->opmode == NL80211_IFTYPE_ADHOC))
- return;
-
- /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
- if ((mac->link_state != MAC80211_LINKED) || (mac->cnt_after_linked < 5))
- return;
-
- if (rtlpriv->link_info.busytraffic)
- return;
-
- spin_lock(&rtlpriv->locks.rf_ps_lock);
- if (rtlpriv->psc.rfchange_inprogress) {
- spin_unlock(&rtlpriv->locks.rf_ps_lock);
- return;
- }
- spin_unlock(&rtlpriv->locks.rf_ps_lock);
-
- mutex_lock(&rtlpriv->locks.lps_mutex);
- rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- mutex_unlock(&rtlpriv->locks.lps_mutex);
-
- if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
- !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
- rtlpriv->intf_ops->enable_aspm(hw);
- RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
- }
-
- /* here is power save alg, when this beacon is DTIM
- * we will set sleep time to dtim_period * n;
- * when this beacon is not DTIM, we will set sleep
- * time to sleep_intv = rtlpriv->psc.dtim_counter or
- * MAX_SW_LPS_SLEEP_INTV(default set to 5)
- */
-
- if (rtlpriv->psc.dtim_counter == 0) {
- if (hw->conf.ps_dtim_period == 1)
- sleep_intv = hw->conf.ps_dtim_period * 2;
- else
- sleep_intv = hw->conf.ps_dtim_period;
- } else {
- sleep_intv = rtlpriv->psc.dtim_counter;
- }
-
- if (sleep_intv > MAX_SW_LPS_SLEEP_INTV)
- sleep_intv = MAX_SW_LPS_SLEEP_INTV;
-
- /* this print should always be dtim_conter = 0 &
- * sleep = dtim_period, that meaons, we should
- * awake before every dtim
- */
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "dtim_counter:%x will sleep :%d beacon_intv\n",
- rtlpriv->psc.dtim_counter, sleep_intv);
-
- /* we tested that 40ms is enough for sw & hw sw delay */
- queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
- MSECS(sleep_intv *
- mac->vif->bss_conf.beacon_int - 40));
-}
-
-void rtl_lps_change_work_callback(struct work_struct *work)
-{
- struct rtl_works *rtlworks =
- container_of(work, struct rtl_works, lps_change_work);
- struct ieee80211_hw *hw = rtlworks->hw;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->enter_ps)
- rtl_lps_enter_core(hw);
- else
- rtl_lps_leave_core(hw);
-}
-
-void rtl_lps_enter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (!in_interrupt())
- return rtl_lps_enter_core(hw);
- rtlpriv->enter_ps = true;
- schedule_work(&rtlpriv->works.lps_change_work);
-}
-
-void rtl_lps_leave(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (!in_interrupt())
- return rtl_lps_leave_core(hw);
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
-}
-
-void rtl_swlps_wq_callback(void *data)
-{
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
- struct rtl_works,
- ps_work);
- struct ieee80211_hw *hw = rtlworks->hw;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- bool ps = false;
-
- ps = (hw->conf.flags & IEEE80211_CONF_PS);
-
- /* we can sleep after ps null send ok */
- if (rtlpriv->psc.state_inap) {
- rtl_swlps_rf_sleep(hw);
-
- if (rtlpriv->psc.state && !ps) {
- rtlpriv->psc.sleep_ms = jiffies_to_msecs(jiffies -
- rtlpriv->psc.last_action);
- }
-
- if (ps)
- rtlpriv->psc.last_slept = jiffies;
-
- rtlpriv->psc.last_action = jiffies;
- rtlpriv->psc.state = ps;
- }
-}
-
-static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
- unsigned int len)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_mgmt *mgmt = data;
- struct rtl_p2p_ps_info *p2pinfo = &rtlpriv->psc.p2p_ps_info;
- u8 *pos, *end, *ie;
- u16 noa_len;
- static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
- u8 noa_num, index, i, noa_index = 0;
- bool find_p2p_ie = false, find_p2p_ps_ie = false;
-
- pos = (u8 *)mgmt->u.beacon.variable;
- end = data + len;
- ie = NULL;
-
- while (pos + 1 < end) {
- if (pos + 2 + pos[1] > end)
- return;
-
- if (pos[0] == 221 && pos[1] > 4) {
- if (memcmp(&pos[2], p2p_oui_ie_type, 4) == 0) {
- ie = pos + 2 + 4;
- break;
- }
- }
- pos += 2 + pos[1];
- }
-
- if (!ie)
- return;
- find_p2p_ie = true;
- /*to find noa ie*/
- while (ie + 1 < end) {
- noa_len = READEF2BYTE((__le16 *)&ie[1]);
- if (ie + 3 + ie[1] > end)
- return;
-
- if (ie[0] == 12) {
- find_p2p_ps_ie = true;
- if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
- return;
- }
- noa_num = (noa_len - 2) / 13;
- noa_index = ie[3];
- if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
- P2P_PS_NONE || noa_index != p2pinfo->noa_index) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "update NOA ie.\n");
- p2pinfo->noa_index = noa_index;
- p2pinfo->opp_ps = (ie[4] >> 7);
- p2pinfo->ctwindow = ie[4] & 0x7F;
- p2pinfo->noa_num = noa_num;
- index = 5;
- for (i = 0; i < noa_num; i++) {
- p2pinfo->noa_count_type[i] =
- READEF1BYTE(ie + index);
- index += 1;
- p2pinfo->noa_duration[i] =
- READEF4BYTE((__le32 *)ie + index);
- index += 4;
- p2pinfo->noa_interval[i] =
- READEF4BYTE((__le32 *)ie + index);
- index += 4;
- p2pinfo->noa_start_time[i] =
- READEF4BYTE((__le32 *)ie + index);
- index += 4;
- }
-
- if (p2pinfo->opp_ps == 1) {
- p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
- /* Driver should wait LPS entering
- * CTWindow
- */
- if (rtlpriv->psc.fw_current_inpsmode)
- rtl_p2p_ps_cmd(hw,
- P2P_PS_ENABLE);
- } else if (p2pinfo->noa_num > 0) {
- p2pinfo->p2p_ps_mode = P2P_PS_NOA;
- rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
- } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
- rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
- }
- }
- break;
- }
- ie += 3 + noa_len;
- }
-
- if (find_p2p_ie) {
- if ((p2pinfo->p2p_ps_mode > P2P_PS_NONE) &&
- (!find_p2p_ps_ie))
- rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
- }
-}
-
-static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
- unsigned int len)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_mgmt *mgmt = data;
- struct rtl_p2p_ps_info *p2pinfo = &rtlpriv->psc.p2p_ps_info;
- u8 noa_num, index, i, noa_index = 0;
- u8 *pos, *end, *ie;
- u16 noa_len;
- static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
-
- pos = (u8 *)&mgmt->u.action.category;
- end = data + len;
- ie = NULL;
-
- if (pos[0] == 0x7f) {
- if (memcmp(&pos[1], p2p_oui_ie_type, 4) == 0)
- ie = pos + 3 + 4;
- }
-
- if (!ie)
- return;
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
- /*to find noa ie*/
- while (ie + 1 < end) {
- noa_len = READEF2BYTE((__le16 *)&ie[1]);
- if (ie + 3 + ie[1] > end)
- return;
-
- if (ie[0] == 12) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
- RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, "noa ie ",
- ie, noa_len);
- if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
- return;
- }
- noa_num = (noa_len - 2) / 13;
- noa_index = ie[3];
- if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
- P2P_PS_NONE || noa_index != p2pinfo->noa_index) {
- p2pinfo->noa_index = noa_index;
- p2pinfo->opp_ps = (ie[4] >> 7);
- p2pinfo->ctwindow = ie[4] & 0x7F;
- p2pinfo->noa_num = noa_num;
- index = 5;
- for (i = 0; i < noa_num; i++) {
- p2pinfo->noa_count_type[i] =
- READEF1BYTE(ie + index);
- index += 1;
- p2pinfo->noa_duration[i] =
- READEF4BYTE((__le32 *)ie + index);
- index += 4;
- p2pinfo->noa_interval[i] =
- READEF4BYTE((__le32 *)ie + index);
- index += 4;
- p2pinfo->noa_start_time[i] =
- READEF4BYTE((__le32 *)ie + index);
- index += 4;
- }
-
- if (p2pinfo->opp_ps == 1) {
- p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
- /* Driver should wait LPS entering
- * CTWindow
- */
- if (rtlpriv->psc.fw_current_inpsmode)
- rtl_p2p_ps_cmd(hw,
- P2P_PS_ENABLE);
- } else if (p2pinfo->noa_num > 0) {
- p2pinfo->p2p_ps_mode = P2P_PS_NOA;
- rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
- } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
- rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
- }
- }
- break;
- }
- ie += 3 + noa_len;
- }
-}
-
-void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
- struct rtl_p2p_ps_info *p2pinfo = &rtlpriv->psc.p2p_ps_info;
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "p2p state %x\n", p2p_ps_state);
- switch (p2p_ps_state) {
- case P2P_PS_DISABLE:
- p2pinfo->p2p_ps_state = p2p_ps_state;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- &p2p_ps_state);
- p2pinfo->noa_index = 0;
- p2pinfo->ctwindow = 0;
- p2pinfo->opp_ps = 0;
- p2pinfo->noa_num = 0;
- p2pinfo->p2p_ps_mode = P2P_PS_NONE;
- if (rtlps->fw_current_inpsmode) {
- if (rtlps->smart_ps == 0) {
- rtlps->smart_ps = 2;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_PWRMODE,
- &rtlps->pwr_mode);
- }
- }
- break;
- case P2P_PS_ENABLE:
- if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
- p2pinfo->p2p_ps_state = p2p_ps_state;
-
- if (p2pinfo->ctwindow > 0) {
- if (rtlps->smart_ps != 0) {
- rtlps->smart_ps = 0;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_PWRMODE,
- &rtlps->pwr_mode);
- }
- }
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- &p2p_ps_state);
- }
- break;
- case P2P_PS_SCAN:
- case P2P_PS_SCAN_DONE:
- case P2P_PS_ALLSTASLEEP:
- if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
- p2pinfo->p2p_ps_state = p2p_ps_state;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- &p2p_ps_state);
- }
- break;
- default:
- break;
- }
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "ctwindow %x oppps %x\n",
- p2pinfo->ctwindow, p2pinfo->opp_ps);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "count %x duration %x index %x interval %x start time %x noa num %x\n",
- p2pinfo->noa_count_type[0],
- p2pinfo->noa_duration[0],
- p2pinfo->noa_index,
- p2pinfo->noa_interval[0],
- p2pinfo->noa_start_time[0],
- p2pinfo->noa_num);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
-}
-
-void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = data;
-
- if (!mac->p2p)
- return;
- if (mac->link_state != MAC80211_LINKED)
- return;
- /* min. beacon length + FCS_LEN */
- if (len <= 40 + FCS_LEN)
- return;
-
- /* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
- return;
-
- /* check if this really is a beacon */
- if (!(ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control) ||
- ieee80211_is_action(hdr->frame_control)))
- return;
-
- if (ieee80211_is_action(hdr->frame_control))
- rtl_p2p_action_ie(hw, data, len - FCS_LEN);
- else
- rtl_p2p_noa_ie(hw, data, len - FCS_LEN);
-}
diff --git a/drivers/staging/rtlwifi/ps.h b/drivers/staging/rtlwifi/ps.h
deleted file mode 100644
index badd0fa7ece6..000000000000
--- a/drivers/staging/rtlwifi/ps.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __REALTEK_RTL_PCI_PS_H__
-#define __REALTEK_RTL_PCI_PS_H__
-
-#define MAX_SW_LPS_SLEEP_INTV 5
-
-bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
-bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
-void rtl_ips_nic_off(struct ieee80211_hw *hw);
-void rtl_ips_nic_on(struct ieee80211_hw *hw);
-void rtl_ips_nic_off_wq_callback(void *data);
-void rtl_lps_enter(struct ieee80211_hw *hw);
-void rtl_lps_leave(struct ieee80211_hw *hw);
-
-void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode);
-
-void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len);
-void rtl_swlps_wq_callback(void *data);
-void rtl_swlps_rfon_wq_callback(void *data);
-void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
-void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
-void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
-void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
-void rtl_lps_change_work_callback(struct work_struct *work);
-
-#endif
diff --git a/drivers/staging/rtlwifi/pwrseqcmd.h b/drivers/staging/rtlwifi/pwrseqcmd.h
deleted file mode 100644
index bd8ae84aca4f..000000000000
--- a/drivers/staging/rtlwifi/pwrseqcmd.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL8723E_PWRSEQCMD_H__
-#define __RTL8723E_PWRSEQCMD_H__
-
-#include "wifi.h"
-/*---------------------------------------------
- * 3 The value of cmd: 4 bits
- *---------------------------------------------
- */
-#define PWR_CMD_READ 0x00
-#define PWR_CMD_WRITE 0x01
-#define PWR_CMD_POLLING 0x02
-#define PWR_CMD_DELAY 0x03
-#define PWR_CMD_END 0x04
-
-/* define the base address of each block */
-#define PWR_BASEADDR_MAC 0x00
-#define PWR_BASEADDR_USB 0x01
-#define PWR_BASEADDR_PCIE 0x02
-#define PWR_BASEADDR_SDIO 0x03
-
-#define PWR_INTF_SDIO_MSK BIT(0)
-#define PWR_INTF_USB_MSK BIT(1)
-#define PWR_INTF_PCI_MSK BIT(2)
-#define PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-
-#define PWR_FAB_TSMC_MSK BIT(0)
-#define PWR_FAB_UMC_MSK BIT(1)
-#define PWR_FAB_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-
-#define PWR_CUT_TESTCHIP_MSK BIT(0)
-#define PWR_CUT_A_MSK BIT(1)
-#define PWR_CUT_B_MSK BIT(2)
-#define PWR_CUT_C_MSK BIT(3)
-#define PWR_CUT_D_MSK BIT(4)
-#define PWR_CUT_E_MSK BIT(5)
-#define PWR_CUT_F_MSK BIT(6)
-#define PWR_CUT_G_MSK BIT(7)
-#define PWR_CUT_ALL_MSK 0xFF
-
-enum pwrseq_delay_unit {
- PWRSEQ_DELAY_US,
- PWRSEQ_DELAY_MS,
-};
-
-struct wlan_pwr_cfg {
- u16 offset;
- u8 cut_msk;
- u8 fab_msk:4;
- u8 interface_msk:4;
- u8 base:4;
- u8 cmd:4;
- u8 msk;
- u8 value;
-};
-
-#define GET_PWR_CFG_OFFSET(__PWR_CMD) (__PWR_CMD.offset)
-#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) (__PWR_CMD.cut_msk)
-#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) (__PWR_CMD.fab_msk)
-#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) (__PWR_CMD.interface_msk)
-#define GET_PWR_CFG_BASE(__PWR_CMD) (__PWR_CMD.base)
-#define GET_PWR_CFG_CMD(__PWR_CMD) (__PWR_CMD.cmd)
-#define GET_PWR_CFG_MASK(__PWR_CMD) (__PWR_CMD.msk)
-#define GET_PWR_CFG_VALUE(__PWR_CMD) (__PWR_CMD.value)
-
-bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
- u8 fab_version, u8 interface_type,
- struct wlan_pwr_cfg pwrcfgcmd[]);
-
-#endif
diff --git a/drivers/staging/rtlwifi/rc.c b/drivers/staging/rtlwifi/rc.c
deleted file mode 100644
index 3ebfc67ee345..000000000000
--- a/drivers/staging/rtlwifi/rc.c
+++ /dev/null
@@ -1,309 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "wifi.h"
-#include "base.h"
-#include "rc.h"
-
-/*
- *Finds the highest rate index we can use
- *if skb is special data like DHCP/EAPOL, we set should
- *it to lowest rate CCK_1M, otherwise we set rate to
- *highest rate based on wireless mode used for iwconfig
- *show Tx rate.
- */
-static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
- struct ieee80211_sta *sta,
- struct sk_buff *skb, bool not_data)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_sta_info *sta_entry = NULL;
- u16 wireless_mode = 0;
- u8 nss; /* NSS -1 */
-
- if (get_rf_type(rtlphy) >= RF_4T4R)
- nss = 3;
- else if (get_rf_type(rtlphy) >= RF_3T3R)
- nss = 2;
- else if (get_rf_type(rtlphy) >= RF_2T2R)
- nss = 1;
- else
- nss = 0;
-
- /*
- *this rate is no use for true rate, firmware
- *will control rate at all it just used for
- *1.show in iwconfig in B/G mode
- *2.in rtl_get_tcb_desc when we check rate is
- * 1M we will not use FW rate but user rate.
- */
-
- if (sta) {
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- wireless_mode = sta_entry->wireless_mode;
- }
-
- if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true, false) ||
- not_data) {
- return 0;
- }
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- if (wireless_mode == WIRELESS_MODE_B) {
- return B_MODE_MAX_RIX;
- } else if (wireless_mode == WIRELESS_MODE_G) {
- return G_MODE_MAX_RIX;
- } else if (wireless_mode == WIRELESS_MODE_N_24G) {
- if (nss == 0)
- return N_MODE_MCS7_RIX;
- else
- return N_MODE_MCS15_RIX;
- } else if (wireless_mode == WIRELESS_MODE_AC_24G) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
- return AC_MODE_MCS8_RIX | (nss << 4);
- else
- return AC_MODE_MCS9_RIX | (nss << 4);
- }
- return 0;
- }
- if (wireless_mode == WIRELESS_MODE_A) {
- return A_MODE_MAX_RIX;
- } else if (wireless_mode == WIRELESS_MODE_N_5G) {
- if (nss == 0)
- return N_MODE_MCS7_RIX;
- else
- return N_MODE_MCS15_RIX;
- } else if (wireless_mode == WIRELESS_MODE_AC_5G) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
- return AC_MODE_MCS8_RIX | (nss << 4);
- else
- return AC_MODE_MCS9_RIX | (nss << 4);
- }
- return 0;
-}
-
-static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
- struct ieee80211_sta *sta,
- struct ieee80211_tx_rate *rate,
- struct ieee80211_tx_rate_control *txrc,
- u8 tries, s8 rix, int rtsctsenable,
- bool not_data)
-{
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- struct rtl_sta_info *sta_entry = NULL;
- u16 wireless_mode = 0;
- u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0;
-
- if (sta) {
- sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
- sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
- sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- wireless_mode = sta_entry->wireless_mode;
- }
- rate->count = tries;
- rate->idx = rix >= 0x00 ? rix : 0x00;
- if ((rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE ||
- rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8822BE) &&
- wireless_mode == WIRELESS_MODE_AC_5G)
- rate->idx |= 0x10;/*2NSS for 8812AE, 8822BE*/
-
- if (!not_data) {
- if (txrc->short_preamble)
- rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
- if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC) {
- if (sta && (sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40))
- rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (sta && sta->vht_cap.vht_supported)
- rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
- } else {
- if (mac->bw_80)
- rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
- else if (mac->bw_40)
- rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- }
-
- if (sgi_20 || sgi_40 || sgi_80)
- rate->flags |= IEEE80211_TX_RC_SHORT_GI;
- if (sta && sta->ht_cap.ht_supported &&
- (wireless_mode == WIRELESS_MODE_N_5G ||
- wireless_mode == WIRELESS_MODE_N_24G))
- rate->flags |= IEEE80211_TX_RC_MCS;
- if (sta && sta->vht_cap.vht_supported &&
- (wireless_mode == WIRELESS_MODE_AC_5G ||
- wireless_mode == WIRELESS_MODE_AC_24G ||
- wireless_mode == WIRELESS_MODE_AC_ONLY))
- rate->flags |= IEEE80211_TX_RC_VHT_MCS;
- }
-}
-
-static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta,
- void *priv_sta,
- struct ieee80211_tx_rate_control *txrc)
-{
- struct rtl_priv *rtlpriv = ppriv;
- struct sk_buff *skb = txrc->skb;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->control.rates;
- __le16 fc = rtl_get_fc(skb);
- u8 try_per_rate, i, rix;
- bool not_data = !ieee80211_is_data(fc);
-
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- rix = _rtl_rc_get_highest_rix(rtlpriv, sta, skb, not_data);
- try_per_rate = 1;
- _rtl_rc_rate_set_series(rtlpriv, sta, &rates[0], txrc,
- try_per_rate, rix, 1, not_data);
-
- if (!not_data) {
- for (i = 1; i < 4; i++)
- _rtl_rc_rate_set_series(rtlpriv, sta, &rates[i],
- txrc, i, (rix - i), 1,
- not_data);
- }
-}
-
-static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv,
- struct rtl_sta_info *sta_entry, u16 tid)
-{
- struct rtl_mac *mac = rtl_mac(rtlpriv);
-
- if (mac->act_scanning)
- return false;
-
- if (mac->opmode == NL80211_IFTYPE_STATION &&
- mac->cnt_after_linked < 3)
- return false;
-
- if (sta_entry->tids[tid].agg.agg_state == RTL_AGG_STOP)
- return true;
-
- return false;
-}
-
-/*mac80211 Rate Control callbacks*/
-static void rtl_tx_status(void *ppriv,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- struct rtl_priv *rtlpriv = ppriv;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
- __le16 fc = rtl_get_fc(skb);
- struct rtl_sta_info *sta_entry;
-
- if (!priv_sta || !ieee80211_is_data(fc))
- return;
-
- if (rtl_is_special_data(mac->hw, skb, true, true))
- return;
-
- if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
- is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
- return;
-
- if (sta) {
- /* Check if aggregation has to be enabled for this tid */
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- if (sta->ht_cap.ht_supported &&
- !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
- if (ieee80211_is_data_qos(fc)) {
- u8 tid = rtl_get_tid(skb);
-
- if (_rtl_tx_aggr_check(rtlpriv, sta_entry,
- tid)) {
- sta_entry->tids[tid].agg.agg_state =
- RTL_AGG_PROGRESS;
- ieee80211_start_tx_ba_session(sta, tid,
- 5000);
- }
- }
- }
- }
-}
-
-static void rtl_rate_init(void *ppriv,
- struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-
-static void rtl_rate_update(void *ppriv,
- struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
-{
-}
-
-static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- return rtlpriv;
-}
-
-static void rtl_rate_free(void *rtlpriv)
-{
-}
-
-static void *rtl_rate_alloc_sta(void *ppriv,
- struct ieee80211_sta *sta, gfp_t gfp)
-{
- struct rtl_priv *rtlpriv = ppriv;
- struct rtl_rate_priv *rate_priv;
-
- rate_priv = kzalloc(sizeof(*rate_priv), gfp);
- if (!rate_priv)
- return NULL;
-
- rtlpriv->rate_priv = rate_priv;
-
- return rate_priv;
-}
-
-static void rtl_rate_free_sta(void *rtlpriv,
- struct ieee80211_sta *sta, void *priv_sta)
-{
- struct rtl_rate_priv *rate_priv = priv_sta;
-
- kfree(rate_priv);
-}
-
-static const struct rate_control_ops rtl_rate_ops = {
- .name = "rtl_rc",
- .alloc = rtl_rate_alloc,
- .free = rtl_rate_free,
- .alloc_sta = rtl_rate_alloc_sta,
- .free_sta = rtl_rate_free_sta,
- .rate_init = rtl_rate_init,
- .rate_update = rtl_rate_update,
- .tx_status = rtl_tx_status,
- .get_rate = rtl_get_rate,
-};
-
-int rtl_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&rtl_rate_ops);
-}
-
-void rtl_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&rtl_rate_ops);
-}
diff --git a/drivers/staging/rtlwifi/rc.h b/drivers/staging/rtlwifi/rc.h
deleted file mode 100644
index 7f631175fac3..000000000000
--- a/drivers/staging/rtlwifi/rc.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_RC_H__
-#define __RTL_RC_H__
-
-#define B_MODE_MAX_RIX 3
-#define G_MODE_MAX_RIX 11
-#define A_MODE_MAX_RIX 7
-
-/* in mac80211 mcs0-mcs15 is idx0-idx15*/
-#define N_MODE_MCS7_RIX 7
-#define N_MODE_MCS15_RIX 15
-
-/* in mac80211 vht mcs0-9 is in [3:0], nss is in [:4] */
-#define AC_MODE_MCS7_RIX 7
-#define AC_MODE_MCS8_RIX 8
-#define AC_MODE_MCS9_RIX 9
-
-struct rtl_rate_priv {
- u8 ht_cap;
-};
-
-int rtl_rate_control_register(void);
-void rtl_rate_control_unregister(void);
-
-#endif
diff --git a/drivers/staging/rtlwifi/regd.c b/drivers/staging/rtlwifi/regd.c
deleted file mode 100644
index 5213ca771175..000000000000
--- a/drivers/staging/rtlwifi/regd.c
+++ /dev/null
@@ -1,458 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "wifi.h"
-#include "regd.h"
-
-static struct country_code_to_enum_rd allcountries[] = {
- {COUNTRY_CODE_FCC, "US"},
- {COUNTRY_CODE_IC, "US"},
- {COUNTRY_CODE_ETSI, "EC"},
- {COUNTRY_CODE_SPAIN, "EC"},
- {COUNTRY_CODE_FRANCE, "EC"},
- {COUNTRY_CODE_MKK, "JP"},
- {COUNTRY_CODE_MKK1, "JP"},
- {COUNTRY_CODE_ISRAEL, "EC"},
- {COUNTRY_CODE_TELEC, "JP"},
- {COUNTRY_CODE_MIC, "JP"},
- {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
- {COUNTRY_CODE_WORLD_WIDE_13, "EC"},
- {COUNTRY_CODE_TELEC_NETGEAR, "EC"},
- {COUNTRY_CODE_WORLD_WIDE_13_5G_ALL, "US"},
-};
-
-/*Only these channels all allow active
- *scan on all world regulatory domains
- */
-#define RTL819x_2GHZ_CH01_11 \
- REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
-
-/*We enable active scan on these a case
- *by case basis by regulatory domain
- */
-#define RTL819x_2GHZ_CH12_13 \
- REG_RULE(2467 - 10, 2472 + 10, 40, 0, 20,\
- NL80211_RRF_PASSIVE_SCAN)
-
-#define RTL819x_2GHZ_CH14 \
- REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_OFDM)
-
-/* 5G chan 36 - chan 64*/
-#define RTL819x_5GHZ_5150_5350 \
- REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30, 0)
-/* 5G chan 100 - chan 165*/
-#define RTL819x_5GHZ_5470_5850 \
- REG_RULE(5470 - 10, 5850 + 10, 80, 0, 30, 0)
-/* 5G chan 149 - chan 165*/
-#define RTL819x_5GHZ_5725_5850 \
- REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30, 0)
-
-#define RTL819x_5GHZ_ALL \
- (RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850)
-
-static const struct ieee80211_regdomain rtl_regdom_11 = {
- .n_reg_rules = 1,
- .alpha2 = "99",
- .reg_rules = {
- RTL819x_2GHZ_CH01_11,
- }
-};
-
-static const struct ieee80211_regdomain rtl_regdom_12_13 = {
- .n_reg_rules = 2,
- .alpha2 = "99",
- .reg_rules = {
- RTL819x_2GHZ_CH01_11,
- RTL819x_2GHZ_CH12_13,
- }
-};
-
-static const struct ieee80211_regdomain rtl_regdom_no_midband = {
- .n_reg_rules = 3,
- .alpha2 = "99",
- .reg_rules = {
- RTL819x_2GHZ_CH01_11,
- RTL819x_5GHZ_5150_5350,
- RTL819x_5GHZ_5725_5850,
- }
-};
-
-static const struct ieee80211_regdomain rtl_regdom_60_64 = {
- .n_reg_rules = 3,
- .alpha2 = "99",
- .reg_rules = {
- RTL819x_2GHZ_CH01_11,
- RTL819x_2GHZ_CH12_13,
- RTL819x_5GHZ_5725_5850,
- }
-};
-
-static const struct ieee80211_regdomain rtl_regdom_14_60_64 = {
- .n_reg_rules = 4,
- .alpha2 = "99",
- .reg_rules = {
- RTL819x_2GHZ_CH01_11,
- RTL819x_2GHZ_CH12_13,
- RTL819x_2GHZ_CH14,
- RTL819x_5GHZ_5725_5850,
- }
-};
-
-static const struct ieee80211_regdomain rtl_regdom_12_13_5g_all = {
- .n_reg_rules = 4,
- .alpha2 = "99",
- .reg_rules = {
- RTL819x_2GHZ_CH01_11,
- RTL819x_2GHZ_CH12_13,
- RTL819x_5GHZ_5150_5350,
- RTL819x_5GHZ_5470_5850,
- }
-};
-
-static const struct ieee80211_regdomain rtl_regdom_14 = {
- .n_reg_rules = 3,
- .alpha2 = "99",
- .reg_rules = {
- RTL819x_2GHZ_CH01_11,
- RTL819x_2GHZ_CH12_13,
- RTL819x_2GHZ_CH14,
- }
-};
-
-static bool _rtl_is_radar_freq(u16 center_freq)
-{
- return center_freq >= 5260 && center_freq <= 5700;
-}
-
-static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
-{
- enum nl80211_band band;
- struct ieee80211_supported_band *sband;
- const struct ieee80211_reg_rule *reg_rule;
- struct ieee80211_channel *ch;
- unsigned int i;
-
- for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!wiphy->bands[band])
- continue;
-
- sband = wiphy->bands[band];
-
- for (i = 0; i < sband->n_channels; i++) {
- ch = &sband->channels[i];
- if (_rtl_is_radar_freq(ch->center_freq) ||
- (ch->flags & IEEE80211_CHAN_RADAR))
- continue;
- if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- reg_rule = freq_reg_info(wiphy,
- ch->center_freq);
- if (IS_ERR(reg_rule))
- continue;
- /*
- *If 11d had a rule for this channel ensure
- *we enable adhoc/beaconing if it allows us to
- *use it. Note that we would have disabled it
- *by applying our static world regdomain by
- *default during init, prior to calling our
- *regulatory_hint().
- */
-
- if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
- ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
- if (!(reg_rule->flags &
- NL80211_RRF_PASSIVE_SCAN))
- ch->flags &=
- ~IEEE80211_CHAN_PASSIVE_SCAN;
- } else {
- if (ch->beacon_found)
- ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN);
- }
- }
- }
-}
-
-/* Allows active scan scan on Ch 12 and 13 */
-static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator
- initiator)
-{
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
- const struct ieee80211_reg_rule *reg_rule;
-
- if (!wiphy->bands[NL80211_BAND_2GHZ])
- return;
- sband = wiphy->bands[NL80211_BAND_2GHZ];
-
- /*
- *If no country IE has been received always enable active scan
- *on these channels. This is only done for specific regulatory SKUs
- */
- if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- ch = &sband->channels[11]; /* CH 12 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- ch = &sband->channels[12]; /* CH 13 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- return;
- }
-
- /*If a country IE has been received check its rule for this
- *channel first before enabling active scan. The passive scan
- *would have been enforced by the initial processing of our
- *custom regulatory domain.
- */
-
- ch = &sband->channels[11]; /* CH 12 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
- if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- }
-
- ch = &sband->channels[12]; /* CH 13 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
- if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- }
-}
-
-/*
- *Always apply Radar/DFS rules on
- *freq range 5260 MHz - 5700 MHz
- */
-static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
-{
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
- unsigned int i;
-
- if (!wiphy->bands[NL80211_BAND_5GHZ])
- return;
-
- sband = wiphy->bands[NL80211_BAND_5GHZ];
-
- for (i = 0; i < sband->n_channels; i++) {
- ch = &sband->channels[i];
- if (!_rtl_is_radar_freq(ch->center_freq))
- continue;
-
- /*
- *We always enable radar detection/DFS on this
- *frequency range. Additionally we also apply on
- *this frequency range:
- *- If STA mode does not yet have DFS supports disable
- * active scanning
- *- If adhoc mode does not support DFS yet then disable
- * adhoc in the frequency.
- *- If AP mode does not yet support radar detection/DFS
- *do not allow AP mode
- */
- if (!(ch->flags & IEEE80211_CHAN_DISABLED))
- ch->flags |= IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
- }
-}
-
-static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator,
- struct rtl_regulatory *reg)
-{
- _rtl_reg_apply_beaconing_flags(wiphy, initiator);
- _rtl_reg_apply_active_scan_flags(wiphy, initiator);
-}
-
-static void _rtl_dump_channel_map(struct wiphy *wiphy)
-{
- enum nl80211_band band;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
- unsigned int i;
-
- for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!wiphy->bands[band])
- continue;
- sband = wiphy->bands[band];
- for (i = 0; i < sband->n_channels; i++)
- ch = &sband->channels[i];
- }
-}
-
-static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
- struct regulatory_request *request,
- struct rtl_regulatory *reg)
-{
- /* We always apply this */
- _rtl_reg_apply_radar_flags(wiphy);
-
- switch (request->initiator) {
- case NL80211_REGDOM_SET_BY_DRIVER:
- case NL80211_REGDOM_SET_BY_CORE:
- case NL80211_REGDOM_SET_BY_USER:
- break;
- case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- _rtl_reg_apply_world_flags(wiphy, request->initiator, reg);
- break;
- }
-
- _rtl_dump_channel_map(wiphy);
-
- return 0;
-}
-
-static const struct ieee80211_regdomain *_rtl_regdomain_select(
- struct rtl_regulatory *reg)
-{
- switch (reg->country_code) {
- case COUNTRY_CODE_FCC:
- return &rtl_regdom_no_midband;
- case COUNTRY_CODE_IC:
- return &rtl_regdom_11;
- case COUNTRY_CODE_TELEC_NETGEAR:
- return &rtl_regdom_60_64;
- case COUNTRY_CODE_ETSI:
- case COUNTRY_CODE_SPAIN:
- case COUNTRY_CODE_FRANCE:
- case COUNTRY_CODE_ISRAEL:
- return &rtl_regdom_12_13;
- case COUNTRY_CODE_MKK:
- case COUNTRY_CODE_MKK1:
- case COUNTRY_CODE_TELEC:
- case COUNTRY_CODE_MIC:
- return &rtl_regdom_14_60_64;
- case COUNTRY_CODE_GLOBAL_DOMAIN:
- return &rtl_regdom_14;
- case COUNTRY_CODE_WORLD_WIDE_13:
- case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
- return &rtl_regdom_12_13_5g_all;
- default:
- return &rtl_regdom_no_midband;
- }
-}
-
-static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
- struct wiphy *wiphy,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *
- request))
-{
- const struct ieee80211_regdomain *regd;
-
- wiphy->reg_notifier = reg_notifier;
-
- wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
- wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
- wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
- regd = _rtl_regdomain_select(reg);
- wiphy_apply_custom_regulatory(wiphy, regd);
- _rtl_reg_apply_radar_flags(wiphy);
- _rtl_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
- return 0;
-}
-
-static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(allcountries); i++) {
- if (allcountries[i].countrycode == countrycode)
- return &allcountries[i];
- }
- return NULL;
-}
-
-static u8 channel_plan_to_country_code(u8 channelplan)
-{
- switch (channelplan) {
- case 0x20:
- case 0x21:
- return COUNTRY_CODE_WORLD_WIDE_13;
- case 0x22:
- return COUNTRY_CODE_IC;
- case 0x25:
- return COUNTRY_CODE_ETSI;
- case 0x32:
- return COUNTRY_CODE_TELEC_NETGEAR;
- case 0x41:
- return COUNTRY_CODE_GLOBAL_DOMAIN;
- case 0x7f:
- return COUNTRY_CODE_WORLD_WIDE_13_5G_ALL;
- default:
- return COUNTRY_CODE_MAX; /*Error*/
- }
-}
-
-int rtl_regd_init(struct ieee80211_hw *hw,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct wiphy *wiphy = hw->wiphy;
- struct country_code_to_enum_rd *country = NULL;
-
- if (!wiphy)
- return -EINVAL;
-
- /* init country_code from efuse channel plan */
- rtlpriv->regd.country_code =
- channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
-
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
- rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
-
- if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
-
- rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
- }
-
- country = _rtl_regd_find_country(rtlpriv->regd.country_code);
-
- if (country) {
- rtlpriv->regd.alpha2[0] = country->iso_name[0];
- rtlpriv->regd.alpha2[1] = country->iso_name[1];
- } else {
- rtlpriv->regd.alpha2[0] = '0';
- rtlpriv->regd.alpha2[1] = '0';
- }
-
- RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- "rtl: Country alpha2 being used: %c%c\n",
- rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
-
- _rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
-
- return 0;
-}
-
-void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
-
- _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
-}
diff --git a/drivers/staging/rtlwifi/regd.h b/drivers/staging/rtlwifi/regd.h
deleted file mode 100644
index c19e87936ad3..000000000000
--- a/drivers/staging/rtlwifi/regd.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_REGD_H__
-#define __RTL_REGD_H__
-
-/* for kernel 3.14 , both value are changed to IEEE80211_CHAN_NO_IR*/
-#define IEEE80211_CHAN_NO_IBSS IEEE80211_CHAN_NO_IR
-#define IEEE80211_CHAN_PASSIVE_SCAN IEEE80211_CHAN_NO_IR
-
-struct country_code_to_enum_rd {
- u16 countrycode;
- const char *iso_name;
-};
-
-enum country_code_type_t {
- COUNTRY_CODE_FCC = 0,
- COUNTRY_CODE_IC = 1,
- COUNTRY_CODE_ETSI = 2,
- COUNTRY_CODE_SPAIN = 3,
- COUNTRY_CODE_FRANCE = 4,
- COUNTRY_CODE_MKK = 5,
- COUNTRY_CODE_MKK1 = 6,
- COUNTRY_CODE_ISRAEL = 7,
- COUNTRY_CODE_TELEC = 8,
- COUNTRY_CODE_MIC = 9,
- COUNTRY_CODE_GLOBAL_DOMAIN = 10,
- COUNTRY_CODE_WORLD_WIDE_13 = 11,
- COUNTRY_CODE_TELEC_NETGEAR = 12,
- COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,
-
- /*add new channel plan above this line */
- COUNTRY_CODE_MAX
-};
-
-int rtl_regd_init(struct ieee80211_hw *hw,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
-void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
-
-#endif
diff --git a/drivers/staging/rtlwifi/rtl8822be/Makefile b/drivers/staging/rtlwifi/rtl8822be/Makefile
deleted file mode 100644
index d535ff8febf1..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-rtl8822be-objs := \
- fw.o \
- hw.o \
- led.o \
- phy.o \
- sw.o \
- trx.o
diff --git a/drivers/staging/rtlwifi/rtl8822be/def.h b/drivers/staging/rtlwifi/rtl8822be/def.h
deleted file mode 100644
index 596f73691d55..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/def.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL8822B_DEF_H__
-#define __RTL8822B_DEF_H__
-
-#define RX_DESC_NUM_8822BE 512
-
-#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
-#define HAL_PRIME_CHNL_OFFSET_LOWER 1
-#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-
-#define RX_MPDU_QUEUE 0
-
-#define IS_HT_RATE(_rate) (_rate >= DESC_RATEMCS0)
-#define IS_CCK_RATE(_rate) (_rate >= DESC_RATE1M && _rate <= DESC_RATE11M)
-#define IS_OFDM_RATE(_rate) (_rate >= DESC_RATE6M && _rate <= DESC_RATE54M)
-#define IS_1T_RATE(_rate) \
- ((_rate >= DESC_RATE1M && _rate <= DESC_RATEMCS7) || \
- (_rate >= DESC_RATEVHT1SS_MCS0 && _rate <= DESC_RATEVHT1SS_MCS9))
-#define IS_2T_RATE(_rate) \
- ((_rate >= DESC_RATEMCS8 && _rate <= DESC_RATEMCS15) || \
- (_rate >= DESC_RATEVHT2SS_MCS0 && _rate <= DESC_RATEVHT2SS_MCS9))
-
-#define IS_1T_RATESEC(_rs) \
- ((_rs == CCK) || (_rs == OFDM) || (_rs == HT_MCS0_MCS7) || \
- (_rs == VHT_1SSMCS0_1SSMCS9))
-#define IS_2T_RATESEC(_rs) \
- ((_rs == HT_MCS8_MCS15) || (_rs == VHT_2SSMCS0_2SSMCS9))
-
-enum rx_packet_type {
- NORMAL_RX,
- C2H_PACKET,
-};
-
-enum rtl_desc_qsel {
- QSLT_BK = 0x2,
- QSLT_BE = 0x0,
- QSLT_VI = 0x5,
- QSLT_VO = 0x7,
- QSLT_BEACON = 0x10,
- QSLT_HIGH = 0x11,
- QSLT_MGNT = 0x12,
- QSLT_CMD = 0x13,
-};
-
-enum vht_data_sc {
- VHT_DATA_SC_DONOT_CARE = 0,
- VHT_DATA_SC_20_UPPER_OF_80MHZ = 1,
- VHT_DATA_SC_20_LOWER_OF_80MHZ = 2,
- VHT_DATA_SC_20_UPPERST_OF_80MHZ = 3,
- VHT_DATA_SC_20_LOWEST_OF_80MHZ = 4,
- VHT_DATA_SC_20_RECV1 = 5,
- VHT_DATA_SC_20_RECV2 = 6,
- VHT_DATA_SC_20_RECV3 = 7,
- VHT_DATA_SC_20_RECV4 = 8,
- VHT_DATA_SC_40_UPPER_OF_80MHZ = 9,
- VHT_DATA_SC_40_LOWER_OF_80MHZ = 10,
-};
-#endif
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
deleted file mode 100644
index cf6b7a80b753..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ /dev/null
@@ -1,964 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../wifi.h"
-#include "../pci.h"
-#include "../base.h"
-#include "reg.h"
-#include "def.h"
-#include "fw.h"
-
-static bool _rtl8822be_check_fw_read_last_h2c(struct ieee80211_hw *hw,
- u8 boxnum)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 val_hmetfr;
- bool result = false;
-
- val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR_8822B);
- if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
- result = true;
- return result;
-}
-
-static void _rtl8822be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
- u32 cmd_len, u8 *cmdbuffer)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 boxnum;
- u16 box_reg = 0, box_extreg = 0;
- u8 u1b_tmp;
- bool isfw_read;
- u8 buf_index = 0;
- bool bwrite_success = false;
- u8 wait_h2c_limmit = 100;
- u8 boxcontent[4], boxextcontent[4];
- u32 h2c_waitcounter = 0;
- unsigned long flag;
- u8 idx;
-
- /* 1. Prevent race condition in setting H2C cmd.
- * (copy from MgntActSet_RF_State().)
- */
- while (true) {
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
- if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! wait..H2C_ID=%d.\n",
- element_id);
-
- while (rtlhal->h2c_setinprogress) {
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
- flag);
- h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
- udelay(100);
-
- if (h2c_waitcounter > 1000)
- return;
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
- flag);
- }
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- } else {
- rtlhal->h2c_setinprogress = true;
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- break;
- }
- }
-
- while (!bwrite_success) {
- /* 2. Find the last BOX number which has been written. */
- boxnum = rtlhal->last_hmeboxnum;
- switch (boxnum) {
- case 0:
- box_reg = REG_HMEBOX0_8822B;
- box_extreg = REG_HMEBOX_E0_8822B;
- break;
- case 1:
- box_reg = REG_HMEBOX1_8822B;
- box_extreg = REG_HMEBOX_E1_8822B;
- break;
- case 2:
- box_reg = REG_HMEBOX2_8822B;
- box_extreg = REG_HMEBOX_E2_8822B;
- break;
- case 3:
- box_reg = REG_HMEBOX3_8822B;
- box_extreg = REG_HMEBOX_E3_8822B;
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
- break;
- }
-
- /* 3. Check if the box content is empty. */
- u1b_tmp = rtl_read_byte(rtlpriv, REG_CR_8822B);
-
- if (u1b_tmp == 0xea) {
- if (rtl_read_byte(rtlpriv, REG_TXDMA_STATUS_8822B) ==
- 0xea ||
- rtl_read_byte(rtlpriv, REG_TXPKT_EMPTY_8822B) ==
- 0xea)
- rtl_write_byte(rtlpriv, REG_SYS_CFG1_8822B + 3,
- 0xff);
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "REG_CR is unavaliable\n");
- break;
- }
-
- wait_h2c_limmit = 100;
- isfw_read = _rtl8822be_check_fw_read_last_h2c(hw, boxnum);
- while (!isfw_read) {
- wait_h2c_limmit--;
- if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_WARNING,
- "Wait too long for FW clear MB%d!!!\n",
- boxnum);
- break;
- }
- udelay(10);
- isfw_read =
- _rtl8822be_check_fw_read_last_h2c(hw, boxnum);
- u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW clear MB%d!!! 0x130 = %2x\n",
- boxnum, u1b_tmp);
- }
-
- /* If Fw has not read the last H2C cmd,
- * break and give up this H2C.
- */
- if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C reg BOX[%d] fail,Fw don't read.\n",
- boxnum);
- break;
- }
- /* 4. Fill the H2C cmd into box */
- memset(boxcontent, 0, sizeof(boxcontent));
- memset(boxextcontent, 0, sizeof(boxextcontent));
- boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n", box_reg,
- element_id);
-
- switch (cmd_len) {
- case 1:
- case 2:
- case 3:
- /*boxcontent[0] &= ~(BIT(7));*/
- memcpy((u8 *)(boxcontent) + 1, cmdbuffer + buf_index,
- cmd_len);
-
- for (idx = 0; idx < 4; idx++) {
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- }
- break;
- case 4:
- case 5:
- case 6:
- case 7:
- /*boxcontent[0] |= (BIT(7));*/
- memcpy((u8 *)(boxextcontent), cmdbuffer + buf_index + 3,
- cmd_len - 3);
- memcpy((u8 *)(boxcontent) + 1, cmdbuffer + buf_index,
- 3);
-
- for (idx = 0; idx < 4; idx++) {
- rtl_write_byte(rtlpriv, box_extreg + idx,
- boxextcontent[idx]);
- }
-
- for (idx = 0; idx < 4; idx++) {
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- }
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
- break;
- }
-
- bwrite_success = true;
-
- rtlhal->last_hmeboxnum = boxnum + 1;
- if (rtlhal->last_hmeboxnum == 4)
- rtlhal->last_hmeboxnum = 0;
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
- }
-
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
- rtlhal->h2c_setinprogress = false;
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
-}
-
-void rtl8822be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len,
- u8 *cmdbuffer)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp_cmdbuf[8];
-
- if (!rtlhal->fw_ready) {
- WARN_ONCE(true,
- "return H2C cmd because of Fw download fail!!!\n");
- return;
- }
-
- memset(tmp_cmdbuf, 0, 8);
- memcpy(tmp_cmdbuf, cmdbuffer, cmd_len);
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- "h2c cmd: len=%d %02X%02X%02X%02X %02X%02X%02X%02X\n", cmd_len,
- tmp_cmdbuf[2], tmp_cmdbuf[1], tmp_cmdbuf[0], element_id,
- tmp_cmdbuf[6], tmp_cmdbuf[5], tmp_cmdbuf[4], tmp_cmdbuf[3]);
-
- _rtl8822be_fill_h2c_command(hw, element_id, cmd_len, tmp_cmdbuf);
-}
-
-void rtl8822be_set_default_port_id_cmd(struct ieee80211_hw *hw)
-{
- u8 h2c_set_default_port_id[H2C_DEFAULT_PORT_ID_LEN];
-
- SET_H2CCMD_DFTPID_PORT_ID(h2c_set_default_port_id, 0);
- SET_H2CCMD_DFTPID_MAC_ID(h2c_set_default_port_id, 0);
-
- rtl8822be_fill_h2c_cmd(hw, H2C_8822B_DEFAULT_PORT_ID,
- H2C_DEFAULT_PORT_ID_LEN,
- h2c_set_default_port_id);
-}
-
-void rtl8822be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 u1_h2c_set_pwrmode[H2C_8822B_PWEMODE_LENGTH] = {0};
- static u8 prev_h2c[H2C_8822B_PWEMODE_LENGTH] = {0};
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 rlbm, power_state = 0, byte5 = 0;
- u8 awake_intvl; /* DTIM = (awake_intvl - 1) */
- u8 smart_ps = 0;
- struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
- bool bt_ctrl_lps = (rtlpriv->cfg->ops->get_btc_status() ?
- btc_ops->btc_is_bt_ctrl_lps(rtlpriv) : false);
- bool bt_lps_on = (rtlpriv->cfg->ops->get_btc_status() ?
- btc_ops->btc_is_bt_lps_on(rtlpriv) : false);
-
- memset(u1_h2c_set_pwrmode, 0, H2C_8822B_PWEMODE_LENGTH);
-
- if (bt_ctrl_lps)
- mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
-
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
- mode, bt_ctrl_lps);
-
- switch (mode) {
- case FW_PS_MIN_MODE:
- rlbm = 0;
- awake_intvl = 2;
- smart_ps = ppsc->smart_ps;
- break;
- case FW_PS_MAX_MODE:
- rlbm = 1;
- awake_intvl = 2;
- smart_ps = ppsc->smart_ps;
- break;
- case FW_PS_DTIM_MODE:
- rlbm = 2;
- awake_intvl = ppsc->reg_max_lps_awakeintvl;
- /*
- * hw->conf.ps_dtim_period or mac->vif->bss_conf.dtim_period
- * is only used in swlps.
- */
- smart_ps = ppsc->smart_ps;
- break;
- case FW_PS_ACTIVE_MODE:
- rlbm = 0;
- awake_intvl = 1;
- break;
- default:
- rlbm = 2;
- awake_intvl = 4;
- smart_ps = ppsc->smart_ps;
- break;
- }
-
- if (rtlpriv->mac80211.p2p) {
- awake_intvl = 2;
- rlbm = 1;
- }
-
- if (mode == FW_PS_ACTIVE_MODE) {
- byte5 = 0x40;
- power_state = FW_PWR_STATE_ACTIVE;
- } else {
- if (bt_ctrl_lps) {
- byte5 = btc_ops->btc_get_lps_val(rtlpriv);
- power_state = btc_ops->btc_get_rpwm_val(rtlpriv);
-
- if (rlbm == 2 && (byte5 & BIT(4))) {
- /* Keep awake interval to 1 to prevent from
- * decreasing coex performance
- */
- awake_intvl = 2;
- rlbm = 2;
- }
- smart_ps = 0;
- } else {
- byte5 = 0x40;
- power_state = FW_PWR_STATE_RF_OFF;
- }
- }
-
- SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
- SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
- SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, smart_ps);
- SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, awake_intvl);
- SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
- SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
- SET_H2CCMD_PWRMODE_PARM_BYTE5(u1_h2c_set_pwrmode, byte5);
-
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "rtl8822be_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
- u1_h2c_set_pwrmode, H2C_8822B_PWEMODE_LENGTH);
- if (rtlpriv->cfg->ops->get_btc_status())
- btc_ops->btc_record_pwr_mode(rtlpriv, u1_h2c_set_pwrmode,
- H2C_8822B_PWEMODE_LENGTH);
-
- if (!memcmp(prev_h2c, u1_h2c_set_pwrmode, H2C_8822B_PWEMODE_LENGTH))
- return;
- memcpy(prev_h2c, u1_h2c_set_pwrmode, H2C_8822B_PWEMODE_LENGTH);
-
- rtl8822be_set_default_port_id_cmd(hw);
- rtl8822be_fill_h2c_cmd(hw, H2C_8822B_SETPWRMODE,
- H2C_8822B_PWEMODE_LENGTH, u1_h2c_set_pwrmode);
-}
-
-void rtl8822be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus)
-{
- u8 parm[4] = {0, 0, 0, 0};
- /* parm[0]: bit0=0-->Disconnect, bit0=1-->Connect
- * bit1=0-->update Media Status to MACID
- * bit1=1-->update Media Status from MACID to MACID_End
- * parm[1]: MACID, if this is INFRA_STA, MacID = 0
- * parm[2]: MACID_End
- * parm[3]: bit2-0: port ID
- */
-
- SET_H2CCMD_MSRRPT_PARM_OPMODE(parm, mstatus);
- SET_H2CCMD_MSRRPT_PARM_MACID_IND(parm, 0);
-
- rtl8822be_fill_h2c_cmd(hw, H2C_8822B_MSRRPT, 4, parm);
-}
-
-static bool _rtl8822be_send_bcn_or_cmd_packet(struct ieee80211_hw *hw,
- struct sk_buff *skb, u8 hw_queue)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl8192_tx_ring *ring;
- struct rtl_tx_desc *pdesc;
- struct rtl_tx_buffer_desc *pbd_desc;
- unsigned long flags;
- struct sk_buff *pskb = NULL;
- u8 *pdesc_or_bddesc;
- dma_addr_t dma_addr;
-
- if (hw_queue != BEACON_QUEUE && hw_queue != H2C_QUEUE)
- return false;
-
- ring = &rtlpci->tx_ring[hw_queue];
-
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
-
- if (hw_queue == BEACON_QUEUE) {
- pdesc = &ring->desc[0];
- pbd_desc = &ring->buffer_desc[0];
- pdesc_or_bddesc = (u8 *)pbd_desc;
-
- /* free previous beacon queue */
- pskb = __skb_dequeue(&ring->queue);
-
- if (!pskb)
- goto free_prev_skb_done;
-
- dma_addr = rtlpriv->cfg->ops->get_desc(
- hw, (u8 *)pbd_desc, true, HW_DESC_TXBUFF_ADDR);
-
- pci_unmap_single(rtlpci->pdev, dma_addr, pskb->len,
- PCI_DMA_TODEVICE);
- kfree_skb(pskb);
-
-free_prev_skb_done:
- ;
-
- } else { /* hw_queue == TXCMD_QUEUE */
- if (rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "get_available_desc fail hw_queue=%d\n",
- hw_queue);
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
- flags);
- return false;
- }
-
- pdesc = &ring->desc[ring->cur_tx_wp];
- pbd_desc = &ring->buffer_desc[ring->cur_tx_wp];
- pdesc_or_bddesc = (u8 *)pdesc;
- }
-
- rtlpriv->cfg->ops->fill_tx_special_desc(hw, (u8 *)pdesc, (u8 *)pbd_desc,
- skb, hw_queue);
-
- __skb_queue_tail(&ring->queue, skb);
-
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc_or_bddesc, true,
- HW_DESC_OWN, (u8 *)&hw_queue);
-
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
- rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
-
- return true;
-}
-
-bool rtl8822b_halmac_cb_write_data_rsvd_page(struct rtl_priv *rtlpriv, u8 *buf,
- u32 size)
-{
- struct sk_buff *skb = NULL;
- u8 u1b_tmp;
- int count;
-
- skb = dev_alloc_skb(size);
- if (!skb)
- return false;
- memcpy((u8 *)skb_put(skb, size), buf, size);
-
- if (!_rtl8822be_send_bcn_or_cmd_packet(rtlpriv->hw, skb, BEACON_QUEUE))
- return false;
-
- /* These code isn't actually need, because halmac will check
- * BCN_VALID
- */
-
- /* Polling Beacon Queue to send Beacon */
- u1b_tmp = rtl_read_byte(rtlpriv, REG_RX_RXBD_NUM_8822B + 1);
- count = 0;
- while ((count < 20) && (u1b_tmp & BIT(4))) {
- count++;
- udelay(10);
- u1b_tmp = rtl_read_byte(rtlpriv, REG_RX_RXBD_NUM_8822B + 1);
- }
-
- if (count >= 20)
- pr_err("%s polling beacon fail\n", __func__);
-
- return true;
-}
-
-bool rtl8822b_halmac_cb_write_data_h2c(struct rtl_priv *rtlpriv, u8 *buf,
- u32 size)
-{
- struct sk_buff *skb = NULL;
-
- /* without GFP_DMA, pci_map_single() may not work */
- skb = __netdev_alloc_skb(NULL, size, GFP_ATOMIC | GFP_DMA);
- if (!skb)
- return false;
- memcpy((u8 *)skb_put(skb, size), buf, size);
-
- return _rtl8822be_send_bcn_or_cmd_packet(rtlpriv->hw, skb, H2C_QUEUE);
-}
-
-/* Rsvd page HALMAC_RSVD_DRV_PGNUM_8822B occupies 16 page (2048 byte) */
-#define BEACON_PG 0 /* ->1 */
-#define PSPOLL_PG 2
-#define NULL_PG 3
-#define PROBERSP_PG 4 /* ->5 */
-#define QOS_NULL_PG 6
-#define BT_QOS_NULL_PG 7
-
-#define TOTAL_RESERVED_PKT_LEN 1024
-
-static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {/* page size = 128 */
- /* page 0 beacon */
- 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
- 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x20, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x64, 0x00, 0x10, 0x04, 0x00, 0x05, 0x54, 0x65,
- 0x73, 0x74, 0x32, 0x01, 0x08, 0x82, 0x84, 0x0B,
- 0x16, 0x24, 0x30, 0x48, 0x6C, 0x03, 0x01, 0x06,
- 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x02, 0x32,
- 0x04, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C,
- 0x09, 0x03, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x3D, 0x00, 0xDD, 0x07, 0x00, 0xE0, 0x4C,
- 0x02, 0x02, 0x00, 0x00, 0xDD, 0x18, 0x00, 0x50,
- 0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04,
- 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04, 0x01, 0x00,
-
- /* page 1 beacon */
- 0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x10, 0x00, 0x30, 0x84, 0x00, 0x12, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
- /* page 2 ps-poll */
- 0xA4, 0x10, 0x01, 0xC0, 0xEC, 0x1A, 0x59, 0x0B,
- 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x18, 0x00, 0x30, 0x84, 0x00, 0x12, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
- /* page 3 null */
- 0x48, 0x01, 0x00, 0x00, 0xEC, 0x1A, 0x59, 0x0B,
- 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
- 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x72, 0x00, 0x30, 0x84, 0x00, 0x12, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
- /* page 4 probe_resp */
- 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
- 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
- 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
- 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
- 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
- 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
- 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
- 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
- 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
- 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
- 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
- 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
- /* page 5 probe_resp */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x1A, 0x00, 0x30, 0x84, 0x00, 0x12, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
- /* page 6 qos null data */
- 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x1A, 0x00, 0x30, 0x84, 0x00, 0x12, 0x00, 0x00,
- 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
- /* page 7 BT-qos null data */
- 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct sk_buff *skb = NULL;
-
- u32 totalpacketlen;
- bool rtstatus;
- u8 u1_rsvd_page_loc[7] = {0};
- bool b_dlok = false;
-
- u8 *beacon;
- u8 *p_pspoll;
- u8 *nullfunc;
- u8 *p_probersp;
- u8 *qosnull;
- u8 *btqosnull;
-
- memset(u1_rsvd_page_loc, 0, sizeof(u1_rsvd_page_loc));
-
- /*---------------------------------------------------------
- * (1) beacon
- *---------------------------------------------------------
- */
- beacon = &reserved_page_packet[BEACON_PG * 128];
- SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
- SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
-
- /*-------------------------------------------------------
- * (2) ps-poll
- *--------------------------------------------------------
- */
- p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
- SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
- SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
- SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
-
- SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1_rsvd_page_loc, PSPOLL_PG);
-
- /*--------------------------------------------------------
- * (3) null data
- *---------------------------------------------------------
- */
- nullfunc = &reserved_page_packet[NULL_PG * 128];
- SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
- SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
- SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
-
- SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1_rsvd_page_loc, NULL_PG);
-
- /*---------------------------------------------------------
- * (4) probe response
- *----------------------------------------------------------
- */
- p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
- SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
- SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
- SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
-
- SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1_rsvd_page_loc, PROBERSP_PG);
-
- /*---------------------------------------------------------
- * (5) QoS null data
- *----------------------------------------------------------
- */
- qosnull = &reserved_page_packet[QOS_NULL_PG * 128];
- SET_80211_HDR_ADDRESS1(qosnull, mac->bssid);
- SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr);
- SET_80211_HDR_ADDRESS3(qosnull, mac->bssid);
-
- SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1_rsvd_page_loc, QOS_NULL_PG);
-
- /*---------------------------------------------------------
- * (6) BT QoS null data
- *----------------------------------------------------------
- */
- btqosnull = &reserved_page_packet[BT_QOS_NULL_PG * 128];
- SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid);
- SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
- SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
-
- SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1_rsvd_page_loc,
- BT_QOS_NULL_PG);
-
- totalpacketlen = TOTAL_RESERVED_PKT_LEN;
-
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
- "rtl8822be_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
- &reserved_page_packet[0], totalpacketlen);
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
- "rtl8822be_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
- u1_rsvd_page_loc, 3);
-
- skb = dev_alloc_skb(totalpacketlen);
- if (!skb)
- return;
- memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
- totalpacketlen);
-
- rtstatus = _rtl8822be_send_bcn_or_cmd_packet(hw, skb, BEACON_QUEUE);
-
- if (rtstatus)
- b_dlok = true;
-
- if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C_RSVDPAGE:\n",
- u1_rsvd_page_loc, 3);
- rtl8822be_fill_h2c_cmd(hw, H2C_8822B_RSVDPAGE,
- sizeof(u1_rsvd_page_loc),
- u1_rsvd_page_loc);
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
- }
-}
-
-/* Should check FW support p2p or not. */
-static void rtl8822be_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw,
- u8 ctwindow)
-{
- u8 u1_ctwindow_period[1] = {ctwindow};
-
- rtl8822be_fill_h2c_cmd(hw, H2C_8822B_P2P_PS_CTW_CMD, 1,
- u1_ctwindow_period);
-}
-
-void rtl8822be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_p2p_ps_info *p2pinfo = &rtlps->p2p_ps_info;
- struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
- u8 i;
- u16 ctwindow;
- u32 start_time, tsf_low;
-
- switch (p2p_ps_state) {
- case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
- memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
- break;
- case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
- /* update CTWindow value. */
- if (p2pinfo->ctwindow > 0) {
- p2p_ps_offload->ctwindow_en = 1;
- ctwindow = p2pinfo->ctwindow;
- rtl8822be_set_p2p_ctw_period_cmd(hw, ctwindow);
- }
- /* hw only support 2 set of NoA */
- for (i = 0; i < p2pinfo->noa_num; i++) {
- /* To control the register setting for which NOA*/
- rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
- if (i == 0)
- p2p_ps_offload->noa0_en = 1;
- else
- p2p_ps_offload->noa1_en = 1;
- /* config P2P NoA Descriptor Register */
- rtl_write_dword(rtlpriv, 0x5E0,
- p2pinfo->noa_duration[i]);
- rtl_write_dword(rtlpriv, 0x5E4,
- p2pinfo->noa_interval[i]);
-
- /*Get Current TSF value */
- tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR_8822B);
-
- start_time = p2pinfo->noa_start_time[i];
- if (p2pinfo->noa_count_type[i] != 1) {
- while (start_time <= (tsf_low + (50 * 1024))) {
- start_time += p2pinfo->noa_interval[i];
- if (p2pinfo->noa_count_type[i] != 255)
- p2pinfo->noa_count_type[i]--;
- }
- }
- rtl_write_dword(rtlpriv, 0x5E8, start_time);
- rtl_write_dword(rtlpriv, 0x5EC,
- p2pinfo->noa_count_type[i]);
- }
- if (p2pinfo->opp_ps == 1 || p2pinfo->noa_num > 0) {
- /* rst p2p circuit */
- rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST_8822B, BIT(4));
- p2p_ps_offload->offload_en = 1;
-
- if (rtlpriv->mac80211.p2p == P2P_ROLE_GO) {
- p2p_ps_offload->role = 1;
- p2p_ps_offload->allstasleep = 0;
- } else {
- p2p_ps_offload->role = 0;
- }
- p2p_ps_offload->discovery = 0;
- }
- break;
- case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
- p2p_ps_offload->discovery = 1;
- break;
- case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
- p2p_ps_offload->discovery = 0;
- p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
- break;
- default:
- break;
- }
-
- rtl8822be_fill_h2c_cmd(hw, H2C_8822B_P2P_PS_OFFLOAD, 1,
- (u8 *)p2p_ps_offload);
-}
-
-static
-void rtl8822be_c2h_content_parsing_ext(struct ieee80211_hw *hw,
- u8 c2h_sub_cmd_id,
- u8 c2h_cmd_len,
- u8 *c2h_content_buf)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_halmac_ops *halmac_ops;
-
- switch (c2h_sub_cmd_id) {
- case 0x0F:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_8822BE_TX_REPORT!\n");
- rtl_tx_report_handler(hw, c2h_content_buf, c2h_cmd_len);
- break;
- default:
- /* indicate c2h pkt + rx desc to halmac */
- halmac_ops = rtlpriv->halmac.ops;
- halmac_ops->halmac_c2h_handle(rtlpriv,
- c2h_content_buf - 24 - 2 - 2,
- c2h_cmd_len + 24 + 2 + 2);
- break;
- }
-}
-
-void rtl8822be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
- u8 c2h_cmd_len, u8 *tmp_buf)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
-
- if (c2h_cmd_id == 0xFF) {
- rtl8822be_c2h_content_parsing_ext(hw, tmp_buf[0],
- c2h_cmd_len - 2,
- tmp_buf + 2);
- return;
- }
-
- switch (c2h_cmd_id) {
- case C2H_8822B_DBG:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_8822BE_DBG!!\n");
- break;
- case C2H_8822B_TXBF:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_8822B_TXBF!!\n");
- break;
- case C2H_8822B_BT_INFO:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_8822BE_BT_INFO!!\n");
- if (rtlpriv->cfg->ops->get_btc_status())
- btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf,
- c2h_cmd_len);
- break;
- case C2H_8822B_BT_MP:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_8822BE_BT_MP!!\n");
- if (rtlpriv->cfg->ops->get_btc_status())
- btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf,
- c2h_cmd_len);
- break;
- default:
- if (!rtlpriv->phydm.ops->phydm_c2h_content_parsing(
- rtlpriv, c2h_cmd_id, c2h_cmd_len, tmp_buf))
- break;
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
- break;
- }
-}
-
-void rtl8822be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0;
- u8 *tmp_buf = NULL;
-
- c2h_cmd_id = buffer[0];
- c2h_cmd_seq = buffer[1];
- c2h_cmd_len = len - 2;
- tmp_buf = buffer + 2;
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n",
- c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len);
-
- RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
-
- switch (c2h_cmd_id) {
- case C2H_8822B_BT_INFO:
- case C2H_8822B_BT_MP:
- rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
- break;
- default:
- rtl8822be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len,
- tmp_buf);
- break;
- }
-}
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.h b/drivers/staging/rtlwifi/rtl8822be/fw.h
deleted file mode 100644
index 6e7eb52dba2d..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/fw.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL8822B__FW__H__
-#define __RTL8822B__FW__H__
-
-#define USE_OLD_WOWLAN_DEBUG_FW 0
-
-#define H2C_8822B_RSVDPAGE_LOC_LEN 5
-#define H2C_8822B_PWEMODE_LENGTH 7
-#define H2C_8822B_JOINBSSRPT_LENGTH 1
-#define H2C_8822B_AP_OFFLOAD_LENGTH 3
-#define H2C_8822B_WOWLAN_LENGTH 3
-#define H2C_8822B_KEEP_ALIVE_CTRL_LENGTH 3
-#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
-#define H2C_8822B_REMOTE_WAKE_CTRL_LEN 1
-#else
-#define H2C_8822B_REMOTE_WAKE_CTRL_LEN 3
-#endif
-#define H2C_8822B_AOAC_GLOBAL_INFO_LEN 2
-#define H2C_8822B_AOAC_RSVDPAGE_LOC_LEN 7
-#define H2C_DEFAULT_PORT_ID_LEN 2
-
-/* Fw PS state for RPWM.
- *BIT[2:0] = HW state
- *BIT[3] = Protocol PS state, 1: register active state, 0: register sleep state
- *BIT[4] = sub-state
- */
-#define FW_PS_RF_ON BIT(2)
-#define FW_PS_REGISTER_ACTIVE BIT(3)
-
-#define FW_PS_ACK BIT(6)
-#define FW_PS_TOGGLE BIT(7)
-
-/* 8822B RPWM value*/
-/* BIT[0] = 1: 32k, 0: 40M*/
-#define FW_PS_CLOCK_OFF BIT(0) /* 32k */
-#define FW_PS_CLOCK_ON 0 /* 40M */
-
-#define FW_PS_STATE_MASK (0x0F)
-#define FW_PS_STATE_HW_MASK (0x07)
-#define FW_PS_STATE_INT_MASK (0x3F)
-
-#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x))
-
-#define FW_PS_STATE_ALL_ON_8822B (FW_PS_CLOCK_ON)
-#define FW_PS_STATE_RF_ON_8822B (FW_PS_CLOCK_ON)
-#define FW_PS_STATE_RF_OFF_8822B (FW_PS_CLOCK_ON)
-#define FW_PS_STATE_RF_OFF_LOW_PWR (FW_PS_CLOCK_OFF)
-
-/* For 8822B H2C PwrMode Cmd ID 5.*/
-#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
-#define FW_PWR_STATE_RF_OFF 0
-
-#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK)
-
-#define IS_IN_LOW_POWER_STATE_8822B(fw_ps_state) \
- (FW_PS_STATE(fw_ps_state) == FW_PS_CLOCK_OFF)
-
-#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
-#define FW_PWR_STATE_RF_OFF 0
-
-enum rtl8822b_h2c_cmd {
- H2C_8822B_RSVDPAGE = 0,
- H2C_8822B_MSRRPT = 1,
- H2C_8822B_SCAN = 2,
- H2C_8822B_KEEP_ALIVE_CTRL = 3,
- H2C_8822B_DISCONNECT_DECISION = 4,
-#if (USE_OLD_WOWLAN_DEBUG_FW == 1)
- H2C_8822B_WO_WLAN = 5,
-#endif
- H2C_8822B_INIT_OFFLOAD = 6,
-#if (USE_OLD_WOWLAN_DEBUG_FW == 1)
- H2C_8822B_REMOTE_WAKE_CTRL = 7,
-#endif
- H2C_8822B_AP_OFFLOAD = 8,
- H2C_8822B_BCN_RSVDPAGE = 9,
- H2C_8822B_PROBERSP_RSVDPAGE = 10,
-
- H2C_8822B_SETPWRMODE = 0x20,
- H2C_8822B_PS_TUNING_PARA = 0x21,
- H2C_8822B_PS_TUNING_PARA2 = 0x22,
- H2C_8822B_PS_LPS_PARA = 0x23,
- H2C_8822B_P2P_PS_OFFLOAD = 024,
- H2C_8822B_DEFAULT_PORT_ID = 0x2C,
-
-#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
- H2C_8822B_WO_WLAN = 0x80,
- H2C_8822B_REMOTE_WAKE_CTRL = 0x81,
- H2C_8822B_AOAC_GLOBAL_INFO = 0x82,
- H2C_8822B_AOAC_RSVDPAGE = 0x83,
-#endif
- H2C_8822B_MACID_CFG = 0x40,
- H2C_8822B_RSSI_REPORT = 0x42,
- H2C_8822B_MACID_CFG_3SS = 0x46,
- /*Not defined CTW CMD for P2P yet*/
- H2C_8822B_P2P_PS_CTW_CMD = 0x99,
- MAX_8822B_H2CCMD
-};
-
-enum rtl8822b_c2h_evt {
- C2H_8822B_DBG = 0x00,
- C2H_8822B_LB = 0x01,
- C2H_8822B_TXBF = 0x02,
- C2H_8822B_TX_REPORT = 0x03,
- C2H_8822B_BT_INFO = 0x09,
- C2H_8822B_BT_MP = 0x0B,
- C2H_8822B_RA_RPT = 0x0C,
- MAX_8822B_C2HEVENT
-};
-
-/* H2C: 0x20 */
-#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 7, __val)
-#define SET_H2CCMD_PWRMODE_PARM_CLK_REQ(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 7, 1, __val)
-#define SET_H2CCMD_PWRMODE_PARM_RLBM(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 4, __val)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 4, 4, __val)
-#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 1, __val)
-#define SET_H2CCMD_PWRMODE_PARM_BCN_EARLY_RPT(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 2, 1, __val)
-#define SET_H2CCMD_PWRMODE_PARM_PORT_ID(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 5, 3, __val)
-#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 5, 0, 8, __val)
-
-/* H2C: 0x00 */
-#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 8, __val)
-#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
-#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
-#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
-
-/* H2C: 0x01 */
-#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
-#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
-#define SET_H2CCMD_MSRRPT_PARM_MACID(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd + 1, 0, 8, __val)
-#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd + 2, 0, 8, __val)
-
-/* H2C: 0x2C */
-#define SET_H2CCMD_DFTPID_PORT_ID(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(((u8 *)(__ph2ccmd)), 0, 8, (__val))
-#define SET_H2CCMD_DFTPID_MAC_ID(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(((u8 *)(__ph2ccmd)) + 1, 0, 8, (__val))
-
-void rtl8822be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len,
- u8 *cmdbuffer);
-void rtl8822be_set_default_port_id_cmd(struct ieee80211_hw *hw);
-void rtl8822be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
-void rtl8822be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus);
-void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
-void rtl8822be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
-void rtl8822be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len);
-void rtl8822be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
- u8 c2h_cmd_len, u8 *tmp_buf);
-bool rtl8822b_halmac_cb_write_data_rsvd_page(struct rtl_priv *rtlpriv, u8 *buf,
- u32 size);
-bool rtl8822b_halmac_cb_write_data_h2c(struct rtl_priv *rtlpriv, u8 *buf,
- u32 size);
-#endif
diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c
deleted file mode 100644
index 88ba5b2fea6a..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/hw.c
+++ /dev/null
@@ -1,2430 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../wifi.h"
-#include "../efuse.h"
-#include "../base.h"
-#include "../regd.h"
-#include "../cam.h"
-#include "../ps.h"
-#include "../pci.h"
-#include "reg.h"
-#include "def.h"
-#include "phy.h"
-#include "fw.h"
-#include "led.h"
-#include "hw.h"
-
-#define LLT_CONFIG 5
-
-u8 rtl_channel5g[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
- 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
- 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
- 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
- 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
- 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
- 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
-u8 rtl_channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122,
- 138, 155, 171};
-
-static void _rtl8822be_set_bcn_ctrl_reg(struct ieee80211_hw *hw, u8 set_bits,
- u8 clear_bits)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpci->reg_bcn_ctrl_val |= set_bits;
- rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
-
- rtl_write_byte(rtlpriv, REG_BCN_CTRL_8822B,
- (u8)rtlpci->reg_bcn_ctrl_val);
-}
-
-static void _rtl8822be_stop_tx_beacon(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp;
-
- tmp = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL_8822B + 2);
- rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL_8822B + 2, tmp & (~BIT(6)));
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT_8822B + 1, 0x64);
- tmp = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT_8822B + 2);
- tmp &= ~(BIT(0));
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT_8822B + 2, tmp);
-}
-
-static void _rtl8822be_resume_tx_beacon(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp;
-
- tmp = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL_8822B + 2);
- rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL_8822B + 2, tmp | BIT(6));
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT_8822B + 1, 0xff);
- tmp = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT_8822B + 2);
- tmp |= BIT(0);
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT_8822B + 2, tmp);
-}
-
-static void _rtl8822be_enable_bcn_sub_func(struct ieee80211_hw *hw)
-{
- _rtl8822be_set_bcn_ctrl_reg(hw, 0, BIT(1));
-}
-
-static void _rtl8822be_disable_bcn_sub_func(struct ieee80211_hw *hw)
-{
- _rtl8822be_set_bcn_ctrl_reg(hw, BIT(1), 0);
-}
-
-static void _rtl8822be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val,
- bool b_need_turn_off_ckk)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u32 count = 0, isr_regaddr, content;
- bool b_schedule_timer = b_need_turn_off_ckk;
-
- if (!rtlhal->fw_ready)
- return;
- if (!rtlpriv->psc.fw_current_inpsmode)
- return;
-
- while (1) {
- spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
- if (rtlhal->fw_clk_change_in_progress) {
- while (rtlhal->fw_clk_change_in_progress) {
- spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
- count++;
- udelay(100);
- if (count > 1000)
- return;
- spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
- }
- spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
- } else {
- rtlhal->fw_clk_change_in_progress = false;
- spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
- break;
- }
- }
-
- if (IS_IN_LOW_POWER_STATE_8822B(rtlhal->fw_ps_state)) {
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
- if (FW_PS_IS_ACK(rpwm_val)) {
- isr_regaddr = REG_HISR0_8822B;
- content = rtl_read_dword(rtlpriv, isr_regaddr);
- while (!(content & IMR_CPWM) && (count < 500)) {
- udelay(50);
- count++;
- content = rtl_read_dword(rtlpriv, isr_regaddr);
- }
-
- if (content & IMR_CPWM) {
- rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
- rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_8822B;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Receive CPWM INT!!! PSState = %X\n",
- rtlhal->fw_ps_state);
- }
- }
-
- spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
- rtlhal->fw_clk_change_in_progress = false;
- spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
- if (b_schedule_timer) {
- mod_timer(&rtlpriv->works.fw_clockoff_timer,
- jiffies + MSECS(10));
- }
-
- } else {
- spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
- rtlhal->fw_clk_change_in_progress = false;
- spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
- }
-}
-
-static void _rtl8822be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl8192_tx_ring *ring;
- enum rf_pwrstate rtstate;
- bool b_schedule_timer = false;
- u8 queue;
-
- if (!rtlhal->fw_ready)
- return;
- if (!rtlpriv->psc.fw_current_inpsmode)
- return;
- if (!rtlhal->allow_sw_to_change_hwclc)
- return;
-
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate));
- if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF)
- return;
-
- for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
- ring = &rtlpci->tx_ring[queue];
- if (skb_queue_len(&ring->queue)) {
- b_schedule_timer = true;
- break;
- }
- }
-
- if (b_schedule_timer) {
- mod_timer(&rtlpriv->works.fw_clockoff_timer,
- jiffies + MSECS(10));
- return;
- }
-
- if (FW_PS_STATE(rtlhal->fw_ps_state) != FW_PS_STATE_RF_OFF_LOW_PWR) {
- spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
- if (!rtlhal->fw_clk_change_in_progress) {
- rtlhal->fw_clk_change_in_progress = true;
- spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
- rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
- rtl_write_word(rtlpriv, REG_HISR0_8822B, 0x0100);
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
- spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
- rtlhal->fw_clk_change_in_progress = false;
- spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
- } else {
- spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
- mod_timer(&rtlpriv->works.fw_clockoff_timer,
- jiffies + MSECS(10));
- }
- }
-}
-
-static void _rtl8822be_set_fw_ps_rf_on(struct ieee80211_hw *hw)
-{
- u8 rpwm_val = 0;
-
- rpwm_val |= (FW_PS_STATE_RF_OFF_8822B | FW_PS_ACK);
- _rtl8822be_set_fw_clock_on(hw, rpwm_val, true);
-}
-
-static void _rtl8822be_set_fw_ps_rf_off_low_power(struct ieee80211_hw *hw)
-{
- u8 rpwm_val = 0;
-
- rpwm_val |= FW_PS_STATE_RF_OFF_LOW_PWR;
- _rtl8822be_set_fw_clock_off(hw, rpwm_val);
-}
-
-void rtl8822be_fw_clk_off_timer_callback(unsigned long data)
-{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
-
- _rtl8822be_set_fw_ps_rf_off_low_power(hw);
-}
-
-static void _rtl8822be_fwlps_leave(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- bool fw_current_inps = false;
- u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE;
-
- if (ppsc->low_power_enable) {
- rpwm_val = (FW_PS_STATE_ALL_ON_8822B | FW_PS_ACK); /* RF on */
- _rtl8822be_set_fw_clock_on(hw, rpwm_val, false);
- rtlhal->allow_sw_to_change_hwclc = false;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
- (u8 *)(&fw_current_inps));
- } else {
- rpwm_val = FW_PS_STATE_ALL_ON_8822B; /* RF on */
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
- (u8 *)(&fw_current_inps));
- }
-}
-
-static void _rtl8822be_fwlps_enter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- bool fw_current_inps = true;
- u8 rpwm_val;
-
- if (ppsc->low_power_enable) {
- rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR; /* RF off */
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
- (u8 *)(&fw_current_inps));
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
- rtlhal->allow_sw_to_change_hwclc = true;
- _rtl8822be_set_fw_clock_off(hw, rpwm_val);
- } else {
- rpwm_val = FW_PS_STATE_RF_OFF_8822B; /* RF off */
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
- (u8 *)(&fw_current_inps));
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
- }
-}
-
-void rtl8822be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- switch (variable) {
- case HW_VAR_RCR:
- *((u32 *)(val)) = rtlpci->receive_config;
- break;
- case HW_VAR_RF_STATE:
- *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
- break;
- case HW_VAR_FWLPS_RF_ON: {
- enum rf_pwrstate rf_state;
- u32 val_rcr;
-
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
- (u8 *)(&rf_state));
- if (rf_state == ERFOFF) {
- *((bool *)(val)) = true;
- } else {
- val_rcr = rtl_read_dword(rtlpriv, REG_RCR_8822B);
- val_rcr &= 0x00070000;
- if (val_rcr)
- *((bool *)(val)) = false;
- else
- *((bool *)(val)) = true;
- }
- } break;
- case HW_VAR_FW_PSMODE_STATUS:
- *((bool *)(val)) = ppsc->fw_current_inpsmode;
- break;
- case HW_VAR_CORRECT_TSF: {
- u64 tsf;
- u32 *ptsf_low = (u32 *)&tsf;
- u32 *ptsf_high = ((u32 *)&tsf) + 1;
-
- *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR_8822B + 4));
- *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR_8822B);
-
- *((u64 *)(val)) = tsf;
-
- } break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process %x\n", variable);
- break;
- }
-}
-
-static void _rtl8822be_download_rsvd_page(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp_regcr, tmp_reg422;
- u8 bcnvalid_reg /*, txbc_reg*/;
- u8 count = 0, dlbcn_count = 0;
- bool b_recover = false;
-
- /*Set REG_CR_8822B bit 8. DMA beacon by SW.*/
- tmp_regcr = rtl_read_byte(rtlpriv, REG_CR_8822B + 1);
- rtl_write_byte(rtlpriv, REG_CR_8822B + 1, tmp_regcr | BIT(0));
-
- /* Disable Hw protection for a time which revserd for Hw sending beacon.
- * Fix download reserved page packet fail
- * that access collision with the protection time.
- * 2010.05.11. Added by tynli.
- */
- _rtl8822be_set_bcn_ctrl_reg(hw, 0, BIT(3));
- _rtl8822be_set_bcn_ctrl_reg(hw, BIT(4), 0);
-
- /* Set FWHW_TXQ_CTRL 0x422[6]=0 to
- * tell Hw the packet is not a real beacon frame.
- */
- tmp_reg422 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL_8822B + 2);
- rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL_8822B + 2,
- tmp_reg422 & (~BIT(6)));
-
- if (tmp_reg422 & BIT(6))
- b_recover = true;
-
- do {
- /* Clear beacon valid check bit */
- bcnvalid_reg =
- rtl_read_byte(rtlpriv, REG_FIFOPAGE_CTRL_2_8822B + 1);
- bcnvalid_reg = bcnvalid_reg | BIT(7);
- rtl_write_byte(rtlpriv, REG_FIFOPAGE_CTRL_2_8822B + 1,
- bcnvalid_reg);
-
- /* download rsvd page */
- rtl8822be_set_fw_rsvdpagepkt(hw, false);
-
- /* check rsvd page download OK. */
- bcnvalid_reg =
- rtl_read_byte(rtlpriv, REG_FIFOPAGE_CTRL_2_8822B + 1);
-
- count = 0;
- while (!(BIT(7) & bcnvalid_reg) && count < 20) {
- count++;
- udelay(50);
- bcnvalid_reg = rtl_read_byte(
- rtlpriv, REG_FIFOPAGE_CTRL_2_8822B + 1);
- }
-
- dlbcn_count++;
- } while (!(BIT(7) & bcnvalid_reg) && dlbcn_count < 5);
-
- if (!(BIT(7) & bcnvalid_reg))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING,
- "Download RSVD page failed!\n");
-
- /* Enable Bcn */
- _rtl8822be_set_bcn_ctrl_reg(hw, BIT(3), 0);
- _rtl8822be_set_bcn_ctrl_reg(hw, 0, BIT(4));
-
- if (b_recover)
- rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL_8822B + 2,
- tmp_reg422);
-}
-
-void rtl8822be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_efuse *efuse = rtl_efuse(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- switch (variable) {
- case HW_VAR_ETHER_ADDR:
- rtlpriv->halmac.ops->halmac_set_mac_address(rtlpriv, 0, val);
- break;
- case HW_VAR_BASIC_RATE: {
- u16 b_rate_cfg = ((u16 *)val)[0];
-
- b_rate_cfg = b_rate_cfg & 0x15f;
- b_rate_cfg |= 0x01;
- b_rate_cfg = (b_rate_cfg | 0xd) & (~BIT(1));
- rtl_write_byte(rtlpriv, REG_RRSR_8822B, b_rate_cfg & 0xff);
- rtl_write_byte(rtlpriv, REG_RRSR_8822B + 1,
- (b_rate_cfg >> 8) & 0xff);
- } break;
- case HW_VAR_BSSID:
- rtlpriv->halmac.ops->halmac_set_bssid(rtlpriv, 0, val);
- break;
- case HW_VAR_SIFS:
- rtl_write_byte(rtlpriv, REG_SIFS_8822B + 1, val[0]);
- rtl_write_byte(rtlpriv, REG_SIFS_TRX_8822B + 1, val[1]);
-
- rtl_write_byte(rtlpriv, REG_SPEC_SIFS_8822B + 1, val[0]);
- rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS_8822B + 1, val[0]);
-
- if (!mac->ht_enable)
- rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM_8822B,
- 0x0e0e);
- else
- rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM_8822B,
- *((u16 *)val));
- break;
- case HW_VAR_SLOT_TIME: {
- u8 e_aci;
-
- RT_TRACE(rtlpriv, COMP_MLME, DBG_TRACE, "HW_VAR_SLOT_TIME %x\n",
- val[0]);
-
- rtl_write_byte(rtlpriv, REG_SLOT_8822B, val[0]);
-
- for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- (u8 *)(&e_aci));
- }
- } break;
- case HW_VAR_ACK_PREAMBLE: {
- u8 reg_tmp;
- u8 short_preamble = (bool)(*(u8 *)val);
-
- reg_tmp = (rtlpriv->mac80211.cur_40_prime_sc) << 5;
- if (short_preamble)
- reg_tmp |= 0x80;
- rtl_write_byte(rtlpriv, REG_RRSR_8822B + 2, reg_tmp);
- rtlpriv->mac80211.short_preamble = short_preamble;
- } break;
- case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG_8822B, *((u8 *)val));
- break;
- case HW_VAR_AMPDU_FACTOR: {
- u32 ampdu_len = (*((u8 *)val));
-
- ampdu_len = (0x2000 << ampdu_len) - 1;
- rtl_write_dword(rtlpriv, REG_AMPDU_MAX_LENGTH_8822B, ampdu_len);
- } break;
- case HW_VAR_AC_PARAM: {
- u8 e_aci = *((u8 *)val);
-
- if (mac->vif && mac->vif->bss_conf.assoc && !mac->act_scanning)
- rtl8822be_set_qos(hw, e_aci);
-
- if (rtlpci->acm_method != EACMWAY2_SW)
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
- (u8 *)(&e_aci));
- } break;
- case HW_VAR_ACM_CTRL: {
- u8 e_aci = *((u8 *)val);
- union aci_aifsn *aifs = (union aci_aifsn *)&mac->ac[0].aifs;
-
- u8 acm = aifs->f.acm;
- u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL_8822B);
-
- acm_ctrl = acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
-
- if (acm) {
- switch (e_aci) {
- case AC0_BE:
- acm_ctrl |= ACMHW_BEQ_EN;
- break;
- case AC2_VI:
- acm_ctrl |= ACMHW_VIQ_EN;
- break;
- case AC3_VO:
- acm_ctrl |= ACMHW_VOQ_EN;
- break;
- default:
- RT_TRACE(
- rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
- break;
- }
- } else {
- switch (e_aci) {
- case AC0_BE:
- acm_ctrl &= (~ACMHW_BEQ_EN);
- break;
- case AC2_VI:
- acm_ctrl &= (~ACMHW_VIQ_EN);
- break;
- case AC3_VO:
- acm_ctrl &= (~ACMHW_VOQ_EN);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process\n");
- break;
- }
- }
-
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
- rtl_write_byte(rtlpriv, REG_ACMHWCTRL_8822B, acm_ctrl);
- } break;
- case HW_VAR_RCR: {
- rtl_write_dword(rtlpriv, REG_RCR_8822B, ((u32 *)(val))[0]);
- rtlpci->receive_config = ((u32 *)(val))[0];
- } break;
- case HW_VAR_RETRY_LIMIT: {
- u8 retry_limit = ((u8 *)(val))[0];
-
- rtl_write_word(rtlpriv, REG_RETRY_LIMIT_8822B,
- retry_limit << RETRY_LIMIT_SHORT_SHIFT |
- retry_limit << RETRY_LIMIT_LONG_SHIFT);
- } break;
- case HW_VAR_DUAL_TSF_RST:
- rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST_8822B,
- (BIT(0) | BIT(1)));
- break;
- case HW_VAR_EFUSE_BYTES:
- efuse->efuse_usedbytes = *((u16 *)val);
- break;
- case HW_VAR_EFUSE_USAGE:
- efuse->efuse_usedpercentage = *((u8 *)val);
- break;
- case HW_VAR_IO_CMD:
- rtl8822be_phy_set_io_cmd(hw, (*(enum io_type *)val));
- break;
- case HW_VAR_SET_RPWM:
- break;
- case HW_VAR_H2C_FW_PWRMODE:
- rtl8822be_set_fw_pwrmode_cmd(hw, (*(u8 *)val));
- break;
- case HW_VAR_FW_PSMODE_STATUS:
- ppsc->fw_current_inpsmode = *((bool *)val);
- break;
- case HW_VAR_RESUME_CLK_ON:
- _rtl8822be_set_fw_ps_rf_on(hw);
- break;
- case HW_VAR_FW_LPS_ACTION: {
- bool b_enter_fwlps = *((bool *)val);
-
- if (b_enter_fwlps)
- _rtl8822be_fwlps_enter(hw);
- else
- _rtl8822be_fwlps_leave(hw);
- } break;
- case HW_VAR_H2C_FW_JOINBSSRPT: {
- u8 mstatus = (*(u8 *)val);
-
- if (mstatus == RT_MEDIA_CONNECT) {
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL);
- _rtl8822be_download_rsvd_page(hw);
- }
- rtl8822be_set_default_port_id_cmd(hw);
- rtl8822be_set_fw_media_status_rpt_cmd(hw, mstatus);
- } break;
- case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
- rtl8822be_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
- break;
- case HW_VAR_AID: {
- u16 u2btmp;
-
- u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT_8822B);
- u2btmp &= 0xC000;
- rtl_write_word(rtlpriv, REG_BCN_PSR_RPT_8822B,
- (u2btmp | mac->assoc_id));
- } break;
- case HW_VAR_CORRECT_TSF: {
- u8 btype_ibss = ((u8 *)(val))[0];
-
- if (btype_ibss)
- _rtl8822be_stop_tx_beacon(hw);
-
- _rtl8822be_set_bcn_ctrl_reg(hw, 0, BIT(3));
-
- rtl_write_dword(rtlpriv, REG_TSFTR_8822B,
- (u32)(mac->tsf & 0xffffffff));
- rtl_write_dword(rtlpriv, REG_TSFTR_8822B + 4,
- (u32)((mac->tsf >> 32) & 0xffffffff));
-
- _rtl8822be_set_bcn_ctrl_reg(hw, BIT(3), 0);
-
- if (btype_ibss)
- _rtl8822be_resume_tx_beacon(hw);
- } break;
- case HW_VAR_KEEP_ALIVE: {
- u8 array[2];
-
- array[0] = 0xff;
- array[1] = *((u8 *)val);
- rtl8822be_fill_h2c_cmd(hw, H2C_8822B_KEEP_ALIVE_CTRL, 2, array);
- } break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process %x\n", variable);
- break;
- }
-}
-
-static void _rtl8822be_gen_refresh_led_state(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_led *led0 = &pcipriv->ledctl.sw_led0;
-
- if (rtlpriv->rtlhal.up_first_time)
- return;
-
- if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
- rtl8822be_sw_led_on(hw, led0);
- else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
- rtl8822be_sw_led_on(hw, led0);
- else
- rtl8822be_sw_led_off(hw, led0);
-}
-
-static bool _rtl8822be_init_trxbd(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));*/
-
- u8 bytetmp;
- /*u16 wordtmp;*/
- u32 dwordtmp;
-
- /* Set TX/RX descriptor physical address -- HI part */
- if (!rtlpriv->cfg->mod_params->dma64)
- goto dma64_end;
-
- rtl_write_dword(rtlpriv, REG_H2CQ_TXBD_DESA_8822B + 4,
- ((u64)rtlpci->tx_ring[H2C_QUEUE].buffer_desc_dma) >>
- 32);
- rtl_write_dword(rtlpriv, REG_BCNQ_TXBD_DESA_8822B + 4,
- ((u64)rtlpci->tx_ring[BEACON_QUEUE].buffer_desc_dma) >>
- 32);
- rtl_write_dword(rtlpriv, REG_MGQ_TXBD_DESA_8822B + 4,
- (u64)rtlpci->tx_ring[MGNT_QUEUE].buffer_desc_dma >> 32);
- rtl_write_dword(rtlpriv, REG_VOQ_TXBD_DESA_8822B + 4,
- (u64)rtlpci->tx_ring[VO_QUEUE].buffer_desc_dma >> 32);
- rtl_write_dword(rtlpriv, REG_VIQ_TXBD_DESA_8822B + 4,
- (u64)rtlpci->tx_ring[VI_QUEUE].buffer_desc_dma >> 32);
- rtl_write_dword(rtlpriv, REG_BEQ_TXBD_DESA_8822B + 4,
- (u64)rtlpci->tx_ring[BE_QUEUE].buffer_desc_dma >> 32);
- rtl_write_dword(rtlpriv, REG_BKQ_TXBD_DESA_8822B + 4,
- (u64)rtlpci->tx_ring[BK_QUEUE].buffer_desc_dma >> 32);
- rtl_write_dword(rtlpriv, REG_HI0Q_TXBD_DESA_8822B + 4,
- (u64)rtlpci->tx_ring[HIGH_QUEUE].buffer_desc_dma >> 32);
-
- rtl_write_dword(rtlpriv, REG_RXQ_RXBD_DESA_8822B + 4,
- (u64)rtlpci->rx_ring[RX_MPDU_QUEUE].dma >> 32);
-
-dma64_end:
- /* Set TX/RX descriptor physical address(from OS API). */
- rtl_write_dword(rtlpriv, REG_H2CQ_TXBD_DESA_8822B,
- ((u64)rtlpci->tx_ring[H2C_QUEUE].buffer_desc_dma) &
- DMA_BIT_MASK(32));
- rtl_write_dword(rtlpriv, REG_BCNQ_TXBD_DESA_8822B,
- ((u64)rtlpci->tx_ring[BEACON_QUEUE].buffer_desc_dma) &
- DMA_BIT_MASK(32));
- rtl_write_dword(rtlpriv, REG_MGQ_TXBD_DESA_8822B,
- (u64)rtlpci->tx_ring[MGNT_QUEUE].buffer_desc_dma &
- DMA_BIT_MASK(32));
- rtl_write_dword(rtlpriv, REG_VOQ_TXBD_DESA_8822B,
- (u64)rtlpci->tx_ring[VO_QUEUE].buffer_desc_dma &
- DMA_BIT_MASK(32));
- rtl_write_dword(rtlpriv, REG_VIQ_TXBD_DESA_8822B,
- (u64)rtlpci->tx_ring[VI_QUEUE].buffer_desc_dma &
- DMA_BIT_MASK(32));
- rtl_write_dword(rtlpriv, REG_BEQ_TXBD_DESA_8822B,
- (u64)rtlpci->tx_ring[BE_QUEUE].buffer_desc_dma &
- DMA_BIT_MASK(32));
- dwordtmp = rtl_read_dword(rtlpriv, REG_BEQ_TXBD_DESA_8822B); /* need? */
- rtl_write_dword(rtlpriv, REG_BKQ_TXBD_DESA_8822B,
- (u64)rtlpci->tx_ring[BK_QUEUE].buffer_desc_dma &
- DMA_BIT_MASK(32));
- rtl_write_dword(rtlpriv, REG_HI0Q_TXBD_DESA_8822B,
- (u64)rtlpci->tx_ring[HIGH_QUEUE].buffer_desc_dma &
- DMA_BIT_MASK(32));
-
- rtl_write_dword(rtlpriv, REG_RXQ_RXBD_DESA_8822B,
- (u64)rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
- DMA_BIT_MASK(32));
-
- /* Reset R/W point */
- rtl_write_dword(rtlpriv, REG_BD_RWPTR_CLR_8822B, 0x3fffffff);
-
- /* Reset the H2CQ R/W point index to 0 */
- dwordtmp = rtl_read_dword(rtlpriv, REG_H2CQ_CSR_8822B);
- rtl_write_dword(rtlpriv, REG_H2CQ_CSR_8822B,
- (dwordtmp | BIT(8) | BIT(16)));
-
- bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_8822B + 3);
- rtl_write_byte(rtlpriv, REG_PCIE_CTRL_8822B + 3, bytetmp | 0xF7);
-
- rtl_write_dword(rtlpriv, REG_INT_MIG_8822B, 0);
-
- rtl_write_dword(rtlpriv, REG_MCUTST_I_8822B, 0x0);
-
- rtl_write_word(rtlpriv, REG_H2CQ_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_MGQ_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_VOQ_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_VIQ_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_BEQ_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_VOQ_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_BKQ_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_HI0Q_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_HI1Q_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_HI2Q_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_HI3Q_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_HI4Q_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_HI5Q_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_HI6Q_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- rtl_write_word(rtlpriv, REG_HI7Q_TXBD_NUM_8822B,
- TX_DESC_NUM_8822B |
- ((RTL8822BE_SEG_NUM << 12) & 0x3000));
- /*Rx*/
- rtl_write_word(rtlpriv, REG_RX_RXBD_NUM_8822B,
- RX_DESC_NUM_8822BE |
- ((RTL8822BE_SEG_NUM << 13) & 0x6000) | 0x8000);
-
- rtl_write_dword(rtlpriv, REG_BD_RWPTR_CLR_8822B, 0XFFFFFFFF);
-
- _rtl8822be_gen_refresh_led_state(hw);
-
- return true;
-}
-
-static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw)
-{
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u8 tmp;
-
- if (!ppsc->support_backdoor)
- return;
-
- pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp);
- pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3);
-
- pci_read_config_byte(rtlpci->pdev, 0x719, &tmp);
- pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4));
-}
-
-void rtl8822be_enable_hw_security_config(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 sec_reg_value;
- u8 tmp;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
-
- if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
- return;
- }
-
- sec_reg_value = SCR_TX_ENC_ENABLE | SRC_RX_DEC_ENABLE;
-
- if (rtlpriv->sec.use_defaultkey) {
- sec_reg_value |= SCR_TX_USE_DK;
- sec_reg_value |= SCR_RX_USE_DK;
- }
-
- sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
-
- tmp = rtl_read_byte(rtlpriv, REG_CR_8822B + 1);
- rtl_write_byte(rtlpriv, REG_CR_8822B + 1, tmp | BIT(1));
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "The SECR-value %x\n",
- sec_reg_value);
-
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
-}
-
-static bool _rtl8822be_check_pcie_dma_hang(struct rtl_priv *rtlpriv)
-{
- u8 tmp;
-
- /* write reg 0x350 Bit[26]=1. Enable debug port. */
- tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG_V1_8822B + 3);
- if (!(tmp & BIT(2))) {
- rtl_write_byte(rtlpriv, REG_DBI_FLAG_V1_8822B + 3,
- (tmp | BIT(2)));
- mdelay(100); /* Suggested by DD Justin_tsai. */
- }
-
- /* read reg 0x350 Bit[25] if 1 : RX hang
- * read reg 0x350 Bit[24] if 1 : TX hang
- */
- tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG_V1_8822B + 3);
- if ((tmp & BIT(0)) || (tmp & BIT(1))) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "CheckPcieDMAHang8822BE(): true!!\n");
- return true;
- } else {
- return false;
- }
-}
-
-static void _rtl8822be_reset_pcie_interface_dma(struct rtl_priv *rtlpriv,
- bool mac_power_on)
-{
- u8 tmp;
- bool release_mac_rx_pause;
- u8 backup_pcie_dma_pause;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "ResetPcieInterfaceDMA8822BE()\n");
-
- /* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03"
- * released by SD1 Alan.
- * 2013.05.07, by tynli.
- */
-
- /* 1. disable register write lock
- * write 0x1C bit[1:0] = 2'h0
- * write 0xCC bit[2] = 1'b1
- */
- tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL_8822B);
- tmp &= ~(BIT(1) | BIT(0));
- rtl_write_byte(rtlpriv, REG_RSV_CTRL_8822B, tmp);
- tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2_8822B);
- tmp |= BIT(2);
- rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2_8822B, tmp);
-
- /* 2. Check and pause TRX DMA
- * write 0x284 bit[18] = 1'b1
- * write 0x301 = 0xFF
- */
- tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL_8822B);
- if (tmp & BIT(2)) {
- /* Already pause before the function for another purpose. */
- release_mac_rx_pause = false;
- } else {
- rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL_8822B,
- (tmp | BIT(2)));
- release_mac_rx_pause = true;
- }
-
- backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_8822B + 1);
- if (backup_pcie_dma_pause != 0xFF)
- rtl_write_byte(rtlpriv, REG_PCIE_CTRL_8822B + 1, 0xFF);
-
- if (mac_power_on) {
- /* 3. reset TRX function
- * write 0x100 = 0x00
- */
- rtl_write_byte(rtlpriv, REG_CR_8822B, 0);
- }
-
- /* 4. Reset PCIe DMA
- * write 0x003 bit[0] = 0
- */
- tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN_8822B + 1);
- tmp &= ~(BIT(0));
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN_8822B + 1, tmp);
-
- /* 5. Enable PCIe DMA
- * write 0x003 bit[0] = 1
- */
- tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN_8822B + 1);
- tmp |= BIT(0);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN_8822B + 1, tmp);
-
- if (mac_power_on) {
- /* 6. enable TRX function
- * write 0x100 = 0xFF
- */
- rtl_write_byte(rtlpriv, REG_CR_8822B, 0xFF);
-
- /* We should init LLT & RQPN and
- * prepare Tx/Rx descrptor address later
- * because MAC function is reset.
- */
- }
-
- /* 7. Restore PCIe autoload down bit
- * write 0xF8 bit[17] = 1'b1
- */
- tmp = rtl_read_byte(rtlpriv, REG_SYS_STATUS2_8822B + 2);
- tmp |= BIT(1);
- rtl_write_byte(rtlpriv, REG_SYS_STATUS2_8822B + 2, tmp);
-
- /* In MAC power on state, BB and RF maybe in ON state,
- * if we release TRx DMA here
- * it will cause packets to be started to Tx/Rx,
- * so we release Tx/Rx DMA later.
- */
- if (!mac_power_on) {
- /* 8. release TRX DMA
- * write 0x284 bit[18] = 1'b0
- * write 0x301 = 0x00
- */
- if (release_mac_rx_pause) {
- tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL_8822B);
- rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL_8822B,
- (tmp & (~BIT(2))));
- }
- rtl_write_byte(rtlpriv, REG_PCIE_CTRL_8822B + 1,
- backup_pcie_dma_pause);
- }
-
- /* 9. lock system register
- * write 0xCC bit[2] = 1'b0
- */
- tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2_8822B);
- tmp &= ~(BIT(2));
- rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2_8822B, tmp);
-}
-
-int rtl8822be_hw_init(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- int err = 0;
- u8 tmp_u1b;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, " Rtl8822BE hw init\n");
- rtlpriv->rtlhal.being_init_adapter = true;
- rtlpriv->intf_ops->disable_aspm(hw);
-
- if (_rtl8822be_check_pcie_dma_hang(rtlpriv)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "8822be dma hang!\n");
- _rtl8822be_reset_pcie_interface_dma(rtlpriv,
- rtlhal->mac_func_enable);
- rtlhal->mac_func_enable = false;
- }
-
- /* init TRX BD */
- _rtl8822be_init_trxbd(hw);
-
- /* use halmac to init */
- err = rtlpriv->halmac.ops->halmac_init_hal(rtlpriv);
- if (err) {
- pr_err("halmac_init_hal failed\n");
- rtlhal->fw_ready = false;
- return err;
- }
-
- rtlhal->fw_ready = true;
-
- /* have to init after halmac init */
- tmp_u1b = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_8822B + 2);
- rtl_write_byte(rtlpriv, REG_PCIE_CTRL_8822B + 2, (tmp_u1b | BIT(4)));
-
- /*rtl_write_word(rtlpriv, REG_PCIE_CTRL_8822B, 0x8000);*/
- rtlhal->rx_tag = 0;
-
- rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ_8822B, 0x4);
-
- /*fw related variable initialize */
- ppsc->fw_current_inpsmode = false;
- rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8822B;
- rtlhal->fw_clk_change_in_progress = false;
- rtlhal->allow_sw_to_change_hwclc = false;
- rtlhal->last_hmeboxnum = 0;
-
- rtlphy->rfreg_chnlval[0] =
- rtl_get_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK);
- rtlphy->rfreg_chnlval[1] =
- rtl_get_rfreg(hw, RF90_PATH_B, RF_CHNLBW, RFREG_OFFSET_MASK);
- rtlphy->backup_rf_0x1a = (u32)rtl_get_rfreg(hw, RF90_PATH_A, RF_RX_G1,
- RFREG_OFFSET_MASK);
- rtlphy->rfreg_chnlval[0] =
- (rtlphy->rfreg_chnlval[0] & 0xfffff3ff) | BIT(10) | BIT(11);
-
- rtlhal->mac_func_enable = true;
-
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
-
- /* reset cam / set security */
- rtl_cam_reset_all_entry(hw);
- rtl8822be_enable_hw_security_config(hw);
-
- /* check RCR/ICV bit */
- rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
- rtl_write_dword(rtlpriv, REG_RCR_8822B, rtlpci->receive_config);
-
- /* clear rx ctrl frame */
- rtl_write_word(rtlpriv, REG_RXFLTMAP1_8822B, 0);
-
- ppsc->rfpwr_state = ERFON;
-
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
- _rtl8822be_enable_aspm_back_door(hw);
- rtlpriv->intf_ops->enable_aspm(hw);
-
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv);
- else
- rtlpriv->btcoexist.btc_ops->btc_init_hw_config_wifi_only(
- rtlpriv);
-
- rtlpriv->rtlhal.being_init_adapter = false;
-
- rtlpriv->phydm.ops->phydm_init_dm(rtlpriv);
-
- /* clear ISR, and IMR will be on later */
- rtl_write_dword(rtlpriv, REG_HISR0_8822B,
- rtl_read_dword(rtlpriv, REG_HISR0_8822B));
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8822BE hw init %x\n",
- err);
- return 0;
-}
-
-static u32 _rtl8822be_read_chip_version(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- /*enum version_8822b version = VERSION_UNKNOWN;*/
- u32 version;
- u32 value32;
-
- rtlphy->rf_type = RF_2T2R;
-
- value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1_8822B);
-
- version = value32;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
- (rtlphy->rf_type == RF_2T2R) ? "RF_2T2R" : "RF_1T1R");
-
- return version;
-}
-
-static int _rtl8822be_set_media_status(struct ieee80211_hw *hw,
- enum nl80211_iftype type)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
- enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
- u8 mode = MSR_NOLINK;
-
- bt_msr &= 0xfc;
-
- switch (type) {
- case NL80211_IFTYPE_UNSPECIFIED:
- mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
- break;
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_MESH_POINT:
- mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
- break;
- case NL80211_IFTYPE_STATION:
- mode = MSR_INFRA;
- ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
- break;
- case NL80211_IFTYPE_AP:
- mode = MSR_AP;
- ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
- break;
- default:
- pr_err("Network type %d not support!\n", type);
- return 1;
- }
-
- /* MSR_INFRA == Link in infrastructure network;
- * MSR_ADHOC == Link in ad hoc network;
- * Therefore, check link state is necessary.
- *
- * MSR_AP == AP mode; link state is not cared here.
- */
- if (mode != MSR_AP && rtlpriv->mac80211.link_state < MAC80211_LINKED) {
- mode = MSR_NOLINK;
- ledaction = LED_CTL_NO_LINK;
- }
-
- if (mode == MSR_NOLINK || mode == MSR_INFRA) {
- _rtl8822be_stop_tx_beacon(hw);
- _rtl8822be_enable_bcn_sub_func(hw);
- } else if (mode == MSR_ADHOC || mode == MSR_AP) {
- _rtl8822be_resume_tx_beacon(hw);
- _rtl8822be_disable_bcn_sub_func(hw);
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
- }
-
- rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
- rtlpriv->cfg->ops->led_control(hw, ledaction);
- if (mode == MSR_AP)
- rtl_write_byte(rtlpriv, REG_BCNTCFG_8822B + 1, 0x00);
- else
- rtl_write_byte(rtlpriv, REG_BCNTCFG_8822B + 1, 0x66);
- return 0;
-}
-
-void rtl8822be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 reg_rcr = rtlpci->receive_config;
-
- if (rtlpriv->psc.rfpwr_state != ERFON)
- return;
-
- if (check_bssid) {
- reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
- _rtl8822be_set_bcn_ctrl_reg(hw, 0, BIT(4));
- } else if (!check_bssid) {
- reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
- _rtl8822be_set_bcn_ctrl_reg(hw, BIT(4), 0);
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
- }
-}
-
-int rtl8822be_set_network_type(struct ieee80211_hw *hw,
- enum nl80211_iftype type)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (_rtl8822be_set_media_status(hw, type))
- return -EOPNOTSUPP;
-
- if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
- if (type != NL80211_IFTYPE_AP &&
- type != NL80211_IFTYPE_MESH_POINT)
- rtl8822be_set_check_bssid(hw, true);
- } else {
- rtl8822be_set_check_bssid(hw, false);
- }
-
- return 0;
-}
-
-void rtl8822be_set_qos(struct ieee80211_hw *hw, int aci)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- u32 ac_param;
-
- ac_param = rtl_get_hal_edca_param(hw, mac->vif, mac->mode,
- &mac->edca_param[aci]);
-
- switch (aci) {
- case AC1_BK:
- rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM_8822B, ac_param);
- break;
- case AC0_BE:
- rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM_8822B, ac_param);
- break;
- case AC2_VI:
- rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM_8822B, ac_param);
- break;
- case AC3_VO:
- rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM_8822B, ac_param);
- break;
- default:
- WARN_ONCE(true, "invalid aci: %d !\n", aci);
- break;
- }
-}
-
-void rtl8822be_enable_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- rtl_write_dword(rtlpriv, REG_HIMR0_8822B,
- rtlpci->irq_mask[0] & 0xFFFFFFFF);
- rtl_write_dword(rtlpriv, REG_HIMR1_8822B,
- rtlpci->irq_mask[1] & 0xFFFFFFFF);
- rtl_write_dword(rtlpriv, REG_HIMR3_8822B,
- rtlpci->irq_mask[3] & 0xFFFFFFFF);
- rtlpci->irq_enabled = true;
-}
-
-void rtl8822be_disable_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- rtl_write_dword(rtlpriv, REG_HIMR0_8822B, IMR_DISABLED);
- rtl_write_dword(rtlpriv, REG_HIMR1_8822B, IMR_DISABLED);
- rtl_write_dword(rtlpriv, REG_HIMR3_8822B, IMR_DISABLED);
- rtlpci->irq_enabled = false;
- /*synchronize_irq(rtlpci->pdev->irq);*/
-}
-
-void rtl8822be_card_disable(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- enum nl80211_iftype opmode;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8822be card disable\n");
-
- RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
-
- mac->link_state = MAC80211_NOLINK;
- opmode = NL80211_IFTYPE_UNSPECIFIED;
-
- _rtl8822be_set_media_status(hw, opmode);
-
- if (rtlpriv->rtlhal.driver_is_goingto_unload ||
- ppsc->rfoff_reason > RF_CHANGE_BY_PS)
- rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
-
- rtlpriv->phydm.ops->phydm_deinit_dm(rtlpriv);
-
- rtlpriv->halmac.ops->halmac_deinit_hal(rtlpriv);
-
- /* after power off we should do iqk again */
- if (!rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->phy.iqk_initialized = false;
-}
-
-void rtl8822be_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta,
- u32 *p_intb, u32 *p_intc, u32 *p_intd)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- *p_inta =
- rtl_read_dword(rtlpriv, REG_HISR0_8822B) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, REG_HISR0_8822B, *p_inta);
-
- *p_intb =
- rtl_read_dword(rtlpriv, REG_HISR1_8822B) & rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, REG_HISR1_8822B, *p_intb);
-
- *p_intd =
- rtl_read_dword(rtlpriv, REG_HISR3_8822B) & rtlpci->irq_mask[3];
- rtl_write_dword(rtlpriv, REG_HISR3_8822B, *p_intd);
-}
-
-void rtl8822be_set_beacon_related_registers(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u16 bcn_interval, atim_window;
-
- bcn_interval = mac->beacon_interval;
- atim_window = 2; /*FIX MERGE */
- rtl8822be_disable_interrupt(hw);
- rtl_write_word(rtlpriv, REG_ATIMWND_8822B, atim_window);
- rtl_write_word(rtlpriv, REG_MBSSID_BCN_SPACE_8822B, bcn_interval);
- rtl_write_word(rtlpriv, REG_BCNTCFG_8822B, 0x660f);
- rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK_8822B, 0x18);
- rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM_8822B, 0x18);
- rtl_write_byte(rtlpriv, 0x606, 0x30);
- rtlpci->reg_bcn_ctrl_val |= BIT(3);
- rtl_write_byte(rtlpriv, REG_BCN_CTRL_8822B,
- (u8)rtlpci->reg_bcn_ctrl_val);
-}
-
-void rtl8822be_set_beacon_interval(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u16 bcn_interval = mac->beacon_interval;
-
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n",
- bcn_interval);
- rtl_write_word(rtlpriv, REG_MBSSID_BCN_SPACE_8822B, bcn_interval);
-}
-
-void rtl8822be_update_interrupt_mask(struct ieee80211_hw *hw, u32 add_msr,
- u32 rm_msr)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
- add_msr, rm_msr);
-
- if (add_msr)
- rtlpci->irq_mask[0] |= add_msr;
- if (rm_msr)
- rtlpci->irq_mask[0] &= (~rm_msr);
- rtl8822be_disable_interrupt(hw);
- rtl8822be_enable_interrupt(hw);
-}
-
-static bool _rtl8822be_get_chnl_group(u8 chnl, u8 *group)
-{
- bool in_24g;
-
- if (chnl <= 14) {
- in_24g = true;
-
- if (chnl >= 1 && chnl <= 2)
- *group = 0;
- else if (chnl >= 3 && chnl <= 5)
- *group = 1;
- else if (chnl >= 6 && chnl <= 8)
- *group = 2;
- else if (chnl >= 9 && chnl <= 11)
- *group = 3;
- else if (chnl >= 12 && chnl <= 14)
- *group = 4;
- } else {
- in_24g = false;
-
- if (chnl >= 36 && chnl <= 42)
- *group = 0;
- else if (chnl >= 44 && chnl <= 48)
- *group = 1;
- else if (chnl >= 50 && chnl <= 58)
- *group = 2;
- else if (chnl >= 60 && chnl <= 64)
- *group = 3;
- else if (chnl >= 100 && chnl <= 106)
- *group = 4;
- else if (chnl >= 108 && chnl <= 114)
- *group = 5;
- else if (chnl >= 116 && chnl <= 122)
- *group = 6;
- else if (chnl >= 124 && chnl <= 130)
- *group = 7;
- else if (chnl >= 132 && chnl <= 138)
- *group = 8;
- else if (chnl >= 140 && chnl <= 144)
- *group = 9;
- else if (chnl >= 149 && chnl <= 155)
- *group = 10;
- else if (chnl >= 157 && chnl <= 161)
- *group = 11;
- else if (chnl >= 165 && chnl <= 171)
- *group = 12;
- else if (chnl >= 173 && chnl <= 177)
- *group = 13;
- }
- return in_24g;
-}
-
-static inline bool power_valid(u8 power)
-{
- if (power <= 63)
- return true;
-
- return false;
-}
-
-static inline s8 power_diff(s8 diff)
-{
- /* bit sign number to 8 bit sign number */
- if (diff & BIT(3))
- diff |= 0xF0;
-
- return diff;
-}
-
-static void _rtl8822be_read_power_value_fromprom(struct ieee80211_hw *hw,
- struct txpower_info_2g *pwr2g,
- struct txpower_info_5g *pwr5g,
- bool autoload_fail, u8 *hwinfo)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 rf, addr = EEPROM_TX_PWR_INX_8822B, group, i = 0;
- u8 power;
- s8 diff;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "hal_ReadPowerValueFromPROM8822B(): PROMContent[0x%x]=0x%x\n",
- (addr + 1), hwinfo[addr + 1]);
- if (hwinfo[addr + 1] == 0xFF) /*YJ,add,120316*/
- autoload_fail = true;
-
- memset(pwr2g, 0, sizeof(struct txpower_info_2g));
- memset(pwr5g, 0, sizeof(struct txpower_info_5g));
-
- if (autoload_fail) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "auto load fail : Use Default value!\n");
- for (rf = 0; rf < MAX_RF_PATH; rf++) {
- /* 2.4G default value */
- for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
- pwr2g->index_cck_base[rf][group] = 0x2D;
- pwr2g->index_bw40_base[rf][group] = 0x2D;
- }
- for (i = 0; i < MAX_TX_COUNT; i++) {
- if (i == 0) {
- pwr2g->bw20_diff[rf][0] = 0x02;
- pwr2g->ofdm_diff[rf][0] = 0x04;
- } else {
- pwr2g->bw20_diff[rf][i] = 0xFE;
- pwr2g->bw40_diff[rf][i] = 0xFE;
- pwr2g->cck_diff[rf][i] = 0xFE;
- pwr2g->ofdm_diff[rf][i] = 0xFE;
- }
- }
-
- /*5G default value*/
- for (group = 0; group < MAX_CHNL_GROUP_5G; group++)
- pwr5g->index_bw40_base[rf][group] = 0x2A;
-
- for (i = 0; i < MAX_TX_COUNT; i++) {
- if (i == 0) {
- pwr5g->ofdm_diff[rf][0] = 0x04;
- pwr5g->bw20_diff[rf][0] = 0x00;
- pwr5g->bw80_diff[rf][0] = 0xFE;
- pwr5g->bw160_diff[rf][0] = 0xFE;
- } else {
- pwr5g->ofdm_diff[rf][i] = 0xFE;
- pwr5g->bw20_diff[rf][i] = 0xFE;
- pwr5g->bw40_diff[rf][i] = 0xFE;
- pwr5g->bw80_diff[rf][i] = 0xFE;
- pwr5g->bw160_diff[rf][i] = 0xFE;
- }
- }
- }
- return;
- }
-
- rtl_priv(hw)->efuse.txpwr_fromeprom = true;
-
- for (rf = 0; rf < 2 /*MAX_RF_PATH*/; rf++) {
- /*2.4G default value*/
- for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
- power = hwinfo[addr++];
- if (power_valid(power))
- pwr2g->index_cck_base[rf][group] = power;
- }
- for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) {
- power = hwinfo[addr++];
- if (power_valid(power))
- pwr2g->index_bw40_base[rf][group] = power;
- }
- for (i = 0; i < MAX_TX_COUNT; i++) {
- if (i == 0) {
- pwr2g->bw40_diff[rf][i] = 0;
-
- diff = (hwinfo[addr] & 0xF0) >> 4;
- pwr2g->bw20_diff[rf][i] = power_diff(diff);
-
- diff = hwinfo[addr] & 0x0F;
- pwr2g->ofdm_diff[rf][i] = power_diff(diff);
-
- pwr2g->cck_diff[rf][i] = 0;
-
- addr++;
- } else {
- diff = (hwinfo[addr] & 0xF0) >> 4;
- pwr2g->bw40_diff[rf][i] = power_diff(diff);
-
- diff = hwinfo[addr] & 0x0F;
- pwr2g->bw20_diff[rf][i] = power_diff(diff);
-
- addr++;
-
- diff = (hwinfo[addr] & 0xF0) >> 4;
- pwr2g->ofdm_diff[rf][i] = power_diff(diff);
-
- diff = hwinfo[addr] & 0x0F;
- pwr2g->cck_diff[rf][i] = power_diff(diff);
-
- addr++;
- }
- }
-
- /*5G default value*/
- for (group = 0; group < MAX_CHNL_GROUP_5G; group++) {
- power = hwinfo[addr++];
- if (power_valid(power))
- pwr5g->index_bw40_base[rf][group] = power;
- }
-
- for (i = 0; i < MAX_TX_COUNT; i++) {
- if (i == 0) {
- pwr5g->bw40_diff[rf][i] = 0;
-
- diff = (hwinfo[addr] & 0xF0) >> 4;
- pwr5g->bw20_diff[rf][i] = power_diff(diff);
-
- diff = hwinfo[addr] & 0x0F;
- pwr5g->ofdm_diff[rf][i] = power_diff(diff);
-
- addr++;
- } else {
- diff = (hwinfo[addr] & 0xF0) >> 4;
- pwr5g->bw40_diff[rf][i] = power_diff(diff);
-
- diff = hwinfo[addr] & 0x0F;
- pwr5g->bw20_diff[rf][i] = power_diff(diff);
-
- addr++;
- }
- }
-
- diff = (hwinfo[addr] & 0xF0) >> 4;
- pwr5g->ofdm_diff[rf][1] = power_diff(diff);
-
- diff = hwinfo[addr] & 0x0F;
- pwr5g->ofdm_diff[rf][2] = power_diff(diff);
-
- addr++;
-
- diff = hwinfo[addr] & 0x0F;
- pwr5g->ofdm_diff[rf][3] = power_diff(diff);
-
- addr++;
-
- for (i = 0; i < MAX_TX_COUNT; i++) {
- diff = (hwinfo[addr] & 0xF0) >> 4;
- pwr5g->bw80_diff[rf][i] = power_diff(diff);
-
- diff = hwinfo[addr] & 0x0F;
- pwr5g->bw160_diff[rf][i] = power_diff(diff);
-
- addr++;
- }
- }
-}
-
-static void _rtl8822be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
- bool autoload_fail,
- u8 *hwinfo)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *efu = rtl_efuse(rtl_priv(hw));
- struct txpower_info_2g pwr2g;
- struct txpower_info_5g pwr5g;
- u8 channel5g[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
- 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
- 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
- 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
- 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
- 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
- 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
- u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122,
- 138, 155, 171};
- u8 rf, group;
- u8 i;
-
- _rtl8822be_read_power_value_fromprom(hw, &pwr2g, &pwr5g, autoload_fail,
- hwinfo);
-
- for (rf = 0; rf < MAX_RF_PATH; rf++) {
- for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) {
- _rtl8822be_get_chnl_group(i + 1, &group);
-
- if (i == CHANNEL_MAX_NUMBER_2G - 1) {
- efu->txpwrlevel_cck[rf][i] =
- pwr2g.index_cck_base[rf][5];
- efu->txpwrlevel_ht40_1s[rf][i] =
- pwr2g.index_bw40_base[rf][group];
- } else {
- efu->txpwrlevel_cck[rf][i] =
- pwr2g.index_cck_base[rf][group];
- efu->txpwrlevel_ht40_1s[rf][i] =
- pwr2g.index_bw40_base[rf][group];
- }
- }
- for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) {
- _rtl8822be_get_chnl_group(channel5g[i], &group);
- efu->txpwr_5g_bw40base[rf][i] =
- pwr5g.index_bw40_base[rf][group];
- }
- for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) {
- u8 upper, lower;
-
- _rtl8822be_get_chnl_group(channel5g_80m[i], &group);
- upper = pwr5g.index_bw40_base[rf][group];
- lower = pwr5g.index_bw40_base[rf][group + 1];
-
- efu->txpwr_5g_bw80base[rf][i] = (upper + lower) / 2;
- }
- for (i = 0; i < MAX_TX_COUNT; i++) {
- efu->txpwr_cckdiff[rf][i] = pwr2g.cck_diff[rf][i];
- efu->txpwr_legacyhtdiff[rf][i] = pwr2g.ofdm_diff[rf][i];
- efu->txpwr_ht20diff[rf][i] = pwr2g.bw20_diff[rf][i];
- efu->txpwr_ht40diff[rf][i] = pwr2g.bw40_diff[rf][i];
-
- efu->txpwr_5g_ofdmdiff[rf][i] = pwr5g.ofdm_diff[rf][i];
- efu->txpwr_5g_bw20diff[rf][i] = pwr5g.bw20_diff[rf][i];
- efu->txpwr_5g_bw40diff[rf][i] = pwr5g.bw40_diff[rf][i];
- efu->txpwr_5g_bw80diff[rf][i] = pwr5g.bw80_diff[rf][i];
- }
- }
-
- if (!autoload_fail)
- efu->eeprom_thermalmeter = hwinfo[EEPROM_THERMAL_METER_8822B];
- else
- efu->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
-
- if (efu->eeprom_thermalmeter == 0xff || autoload_fail) {
- efu->apk_thermalmeterignore = true;
- efu->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
- }
-
- efu->thermalmeter[0] = efu->eeprom_thermalmeter;
- RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "thermalmeter = 0x%x\n",
- efu->eeprom_thermalmeter);
-
- if (!autoload_fail) {
- efu->eeprom_regulatory =
- hwinfo[EEPROM_RF_BOARD_OPTION_8822B] & 0x07;
- if (hwinfo[EEPROM_RF_BOARD_OPTION_8822B] == 0xFF)
- efu->eeprom_regulatory = 0;
- } else {
- efu->eeprom_regulatory = 0;
- }
- RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "eeprom_regulatory = 0x%x\n",
- efu->eeprom_regulatory);
-}
-
-static void _rtl8822be_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo,
- bool autoload_fail)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
- if (!autoload_fail) {
- rtlhal->pa_type_2g = hwinfo[EEPROM_2G_5G_PA_TYPE_8822B];
- rtlhal->lna_type_2g =
- hwinfo[EEPROM_2G_LNA_TYPE_GAIN_SEL_AB_8822B];
- if (rtlhal->pa_type_2g == 0xFF)
- rtlhal->pa_type_2g = 0;
- if (rtlhal->lna_type_2g == 0xFF)
- rtlhal->lna_type_2g = 0;
-
- rtlhal->external_pa_2g = (rtlhal->pa_type_2g & BIT(4)) ? 1 : 0;
- rtlhal->external_lna_2g =
- (rtlhal->lna_type_2g & BIT(3)) ? 1 : 0;
-
- rtlhal->pa_type_5g = hwinfo[EEPROM_2G_5G_PA_TYPE_8822B];
- rtlhal->lna_type_5g =
- hwinfo[EEPROM_5G_LNA_TYPE_GAIN_SEL_AB_8822B];
- if (rtlhal->pa_type_5g == 0xFF)
- rtlhal->pa_type_5g = 0;
- if (rtlhal->lna_type_5g == 0xFF)
- rtlhal->lna_type_5g = 0;
-
- rtlhal->external_pa_5g = (rtlhal->pa_type_5g & BIT(0)) ? 1 : 0;
- rtlhal->external_lna_5g =
- (rtlhal->lna_type_5g & BIT(3)) ? 1 : 0;
- } else {
- rtlhal->external_pa_2g = 0;
- rtlhal->external_lna_2g = 0;
- rtlhal->external_pa_5g = 0;
- rtlhal->external_lna_5g = 0;
- }
-}
-
-static void _rtl8822be_read_amplifier_type(struct ieee80211_hw *hw, u8 *hwinfo,
- bool autoload_fail)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
- u8 ext_type_pa_2g_a =
- (hwinfo[EEPROM_2G_LNA_TYPE_GAIN_SEL_AB_8822B] & BIT(2)) >>
- 2; /* 0xBD[2] */
- u8 ext_type_pa_2g_b =
- (hwinfo[EEPROM_2G_LNA_TYPE_GAIN_SEL_AB_8822B] & BIT(6)) >>
- 6; /* 0xBD[6] */
- u8 ext_type_pa_5g_a =
- (hwinfo[EEPROM_5G_LNA_TYPE_GAIN_SEL_AB_8822B] & BIT(2)) >>
- 2; /* 0xBF[2] */
- u8 ext_type_pa_5g_b =
- (hwinfo[EEPROM_5G_LNA_TYPE_GAIN_SEL_AB_8822B] & BIT(6)) >>
- 6; /* 0xBF[6] */
- u8 ext_type_lna_2g_a = (hwinfo[EEPROM_2G_LNA_TYPE_GAIN_SEL_AB_8822B] &
- (BIT(1) | BIT(0))) >>
- 0; /* 0xBD[1:0] */
- u8 ext_type_lna_2g_b = (hwinfo[EEPROM_2G_LNA_TYPE_GAIN_SEL_AB_8822B] &
- (BIT(5) | BIT(4))) >>
- 4; /* 0xBD[5:4] */
- u8 ext_type_lna_5g_a = (hwinfo[EEPROM_5G_LNA_TYPE_GAIN_SEL_AB_8822B] &
- (BIT(1) | BIT(0))) >>
- 0; /* 0xBF[1:0] */
- u8 ext_type_lna_5g_b = (hwinfo[EEPROM_5G_LNA_TYPE_GAIN_SEL_AB_8822B] &
- (BIT(5) | BIT(4))) >>
- 4; /* 0xBF[5:4] */
-
- _rtl8822be_read_pa_type(hw, hwinfo, autoload_fail);
-
- /* [2.4G] Path A and B are both extPA */
- if ((rtlhal->pa_type_2g & (BIT(5) | BIT(4))) == (BIT(5) | BIT(4)))
- rtlhal->type_gpa = ext_type_pa_2g_b << 2 | ext_type_pa_2g_a;
-
- /* [5G] Path A and B are both extPA */
- if ((rtlhal->pa_type_5g & (BIT(1) | BIT(0))) == (BIT(1) | BIT(0)))
- rtlhal->type_apa = ext_type_pa_5g_b << 2 | ext_type_pa_5g_a;
-
- /* [2.4G] Path A and B are both extLNA */
- if ((rtlhal->lna_type_2g & (BIT(7) | BIT(3))) == (BIT(7) | BIT(3)))
- rtlhal->type_glna = ext_type_lna_2g_b << 2 | ext_type_lna_2g_a;
-
- /* [5G] Path A and B are both extLNA */
- if ((rtlhal->lna_type_5g & (BIT(7) | BIT(3))) == (BIT(7) | BIT(3)))
- rtlhal->type_alna = ext_type_lna_5g_b << 2 | ext_type_lna_5g_a;
-}
-
-static void _rtl8822be_read_rfe_type(struct ieee80211_hw *hw, u8 *hwinfo,
- bool autoload_fail)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
- if (!autoload_fail)
- rtlhal->rfe_type = hwinfo[EEPROM_RFE_OPTION_8822B];
- else
- rtlhal->rfe_type = 0;
-
- if (rtlhal->rfe_type == 0xFF)
- rtlhal->rfe_type = 0;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RFE Type: 0x%2x\n",
- rtlhal->rfe_type);
-}
-
-static void _rtl8822be_read_adapter_info(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_halmac_ops *halmac_ops = rtlpriv->halmac.ops;
- u16 i, usvalue;
- u8 *hwinfo;
- u16 eeprom_id;
- u32 efuse_size;
- int err;
-
- if (rtlefuse->epromtype != EEPROM_BOOT_EFUSE) {
- pr_err("RTL8822B Not boot from efuse!!");
- return;
- }
-
- /* read logical efuse size (normalely, 0x0300) */
- err = halmac_ops->halmac_get_logical_efuse_size(rtlpriv, &efuse_size);
-
- if (err || !efuse_size) {
- pr_err("halmac_get_logical_efuse_size err=%d efuse_size=0x%X",
- err, efuse_size);
- efuse_size = HWSET_MAX_SIZE;
- }
-
- if (efuse_size > HWSET_MAX_SIZE) {
- pr_err("halmac_get_logical_efuse_size efuse_size=0x%X > 0x%X",
- efuse_size, HWSET_MAX_SIZE);
- efuse_size = HWSET_MAX_SIZE;
- }
-
- /* read efuse */
- hwinfo = kzalloc(efuse_size, GFP_KERNEL);
-
- err = halmac_ops->halmac_read_logical_efuse_map(rtlpriv, hwinfo,
- efuse_size);
- if (err) {
- pr_err("%s: <ERROR> fail to get efuse map!\n", __func__);
- goto label_end;
- }
-
- /* copy to efuse_map (need?) */
- memcpy(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], hwinfo,
- EFUSE_MAX_LOGICAL_SIZE);
- memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], hwinfo,
- EFUSE_MAX_LOGICAL_SIZE);
-
- /* parse content */
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", hwinfo,
- HWSET_MAX_SIZE);
-
- eeprom_id = *((u16 *)&hwinfo[0]);
- if (eeprom_id != RTL8822B_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
-
- if (rtlefuse->autoload_failflag)
- goto label_end;
-
- /*VID DID SVID SDID*/
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
- rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
- rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM VID = 0x%4x\n",
- rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM DID = 0x%4x\n",
- rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM SVID = 0x%4x\n",
- rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM SMID = 0x%4x\n",
- rtlefuse->eeprom_smid);
- /*customer ID*/
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOM_ID_8822B];
- if (rtlefuse->eeprom_oemid == 0xFF)
- rtlefuse->eeprom_oemid = 0;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
- rtlefuse->eeprom_oemid);
- /*EEPROM version*/
- rtlefuse->eeprom_version = *(u8 *)&hwinfo[EEPROM_VERSION_8822B];
- /*mac address*/
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_8822BE + i];
- *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "dev_addr: %pM\n",
- rtlefuse->dev_addr);
-
- /* channel plan */
- rtlefuse->eeprom_channelplan =
- *(u8 *)&hwinfo[EEPROM_CHANNEL_PLAN_8822B];
-
- /* set channel plan from efuse */
- rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
- if (rtlefuse->channel_plan == 0xFF)
- rtlefuse->channel_plan = 0x7f; /* use 2G + 5G as default */
-
- /*tx power*/
- _rtl8822be_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
- hwinfo);
-
- rtl8822be_read_bt_coexist_info_from_hwpg(
- hw, rtlefuse->autoload_failflag, hwinfo);
-
- /*amplifier type*/
- _rtl8822be_read_amplifier_type(hw, hwinfo, rtlefuse->autoload_failflag);
-
- /*rfe type*/
- _rtl8822be_read_rfe_type(hw, hwinfo, rtlefuse->autoload_failflag);
-
- /*board type*/
- rtlefuse->board_type =
- (((*(u8 *)&hwinfo[EEPROM_RF_BOARD_OPTION_8822B]) & 0xE0) >> 5);
- if ((*(u8 *)&hwinfo[EEPROM_RF_BOARD_OPTION_8822B]) == 0xFF)
- rtlefuse->board_type = 0;
-
- if (rtlpriv->btcoexist.btc_info.btcoexist == 1)
- rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */
-
- /* phydm maintain rtlhal->board_type and rtlhal->package_type */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "board_type = 0x%x\n",
- rtlefuse->board_type);
- /*parse xtal*/
- rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8822B];
- if (hwinfo[EEPROM_XTAL_8822B] == 0xFF)
- rtlefuse->crystalcap = 0; /*0x20;*/
-
- /*antenna diversity*/
- rtlefuse->antenna_div_type = 0;
- rtlefuse->antenna_div_cfg = 0;
-
-label_end:
- kfree(hwinfo);
-}
-
-static void _rtl8822be_hal_customized_behavior(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- pcipriv->ledctl.led_opendrain = true;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RT Customized ID: 0x%02X\n",
- rtlhal->oem_id);
-}
-
-static void _rtl8822be_read_pa_bias(struct ieee80211_hw *hw,
- struct rtl_phydm_params *params)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_halmac_ops *halmac_ops = rtlpriv->halmac.ops;
- u32 size;
- u8 *map = NULL;
-
- /* fill default values */
- params->efuse0x3d7 = 0xFF;
- params->efuse0x3d8 = 0xFF;
-
- if (halmac_ops->halmac_get_physical_efuse_size(rtlpriv, &size))
- goto err;
-
- map = kmalloc(size, GFP_KERNEL);
- if (!map)
- goto err;
-
- if (halmac_ops->halmac_read_physical_efuse_map(rtlpriv, map, size))
- goto err;
-
- params->efuse0x3d7 = map[0x3d7];
- params->efuse0x3d8 = map[0x3d8];
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "efuse0x3d7 = 0x%2x, efuse0x3d8 = 0x%2x\n",
- params->efuse0x3d7, params->efuse0x3d8);
-
-err:
- kfree(map);
-}
-
-void rtl8822be_read_eeprom_info(struct ieee80211_hw *hw,
- struct rtl_phydm_params *params)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 tmp_u1b;
-
- rtlhal->version = _rtl8822be_read_chip_version(hw);
-
- params->mp_chip = (rtlhal->version & BIT_RTL_ID_8822B) ? 0 : 1;
- params->fab_ver = BIT_GET_VENDOR_ID_8822B(rtlhal->version) >> 2;
- params->cut_ver = BIT_GET_CHIP_VER_8822B(rtlhal->version);
-
- /* fab_ver mapping */
- if (params->fab_ver == 2)
- params->fab_ver = 1;
- else if (params->fab_ver == 1)
- params->fab_ver = 2;
-
- /* read PA bias: params->efuse0x3d7/efuse0x3d8 */
- _rtl8822be_read_pa_bias(hw, params);
-
- if (get_rf_type(rtlphy) == RF_1T1R)
- rtlpriv->dm.rfpath_rxenable[0] = true;
- else
- rtlpriv->dm.rfpath_rxenable[0] =
- rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
- tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_EEPROM_CTRL_8822B);
- if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
- rtlefuse->epromtype = EEPROM_93C46;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
- rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
- }
- if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- _rtl8822be_read_adapter_info(hw);
- } else {
- pr_err("Autoload ERR!!\n");
- }
- _rtl8822be_hal_customized_behavior(hw);
-
- rtlphy->rfpath_rx_enable[0] = true;
- if (rtlphy->rf_type == RF_2T2R)
- rtlphy->rfpath_rx_enable[1] = true;
-}
-
-void rtl8822be_read_eeprom_info_dummy(struct ieee80211_hw *hw)
-{
- /*
- * 8822b use halmac, so
- * move rtl8822be_read_eeprom_info() to rtl8822be_init_sw_vars()
- * after halmac_init_adapter().
- */
-}
-
-static u32 _rtl8822be_rate_to_bitmap_2ssvht(__le16 vht_rate)
-{
- u8 i, j, tmp_rate;
- u32 rate_bitmap = 0;
-
- for (i = j = 0; i < 4; i += 2, j += 10) {
- tmp_rate = (le16_to_cpu(vht_rate) >> i) & 3;
-
- switch (tmp_rate) {
- case 2:
- rate_bitmap = rate_bitmap | (0x03ff << j);
- break;
-
- case 1:
- rate_bitmap = rate_bitmap | (0x01ff << j);
- break;
-
- case 0:
- rate_bitmap = rate_bitmap | (0x00ff << j);
- break;
-
- default:
- break;
- }
- }
-
- return rate_bitmap;
-}
-
-static u8 _rtl8822be_get_vht_en(enum wireless_mode wirelessmode,
- u32 ratr_bitmap)
-{
- u8 ret = 0;
-
- if (wirelessmode < WIRELESS_MODE_N_24G) {
- ret = 0;
- } else if (wirelessmode == WIRELESS_MODE_AC_24G) {
- if (ratr_bitmap & 0xfff00000) /* Mix , 2SS */
- ret = 3;
- else /* Mix, 1SS */
- ret = 2;
- } else if (wirelessmode == WIRELESS_MODE_AC_5G) {
- ret = 1;
- } /* VHT */
-
- return ret << 4;
-}
-
-static u8 _rtl8822be_get_ra_ldpc(struct ieee80211_hw *hw, u8 mac_id,
- struct rtl_sta_info *sta_entry,
- enum wireless_mode wirelessmode)
-{
- u8 b_ldpc = 0;
- /*not support ldpc, do not open*/
- return b_ldpc << 2;
-}
-
-static u8 _rtl8822be_get_ra_rftype(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode,
- u32 ratr_bitmap)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 rf_type = RF_1T1R;
-
- if (rtlphy->rf_type == RF_1T1R) {
- rf_type = RF_1T1R;
- } else if (wirelessmode == WIRELESS_MODE_AC_5G ||
- wirelessmode == WIRELESS_MODE_AC_24G ||
- wirelessmode == WIRELESS_MODE_AC_ONLY) {
- if (ratr_bitmap & 0xffc00000)
- rf_type = RF_2T2R;
- } else if (wirelessmode == WIRELESS_MODE_N_5G ||
- wirelessmode == WIRELESS_MODE_N_24G) {
- if (ratr_bitmap & 0xfff00000)
- rf_type = RF_2T2R;
- }
-
- return rf_type;
-}
-
-static bool _rtl8822be_get_ra_shortgi(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 mac_id)
-{
- bool b_short_gi = false;
- u8 b_curshortgi_40mhz =
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
- u8 b_curshortgi_20mhz =
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
- u8 b_curshortgi_80mhz = 0;
-
- b_curshortgi_80mhz =
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) ? 1 : 0;
-
- if (mac_id == 99 /*MAC_ID_STATIC_FOR_BROADCAST_MULTICAST*/)
- b_short_gi = false;
-
- if (b_curshortgi_40mhz || b_curshortgi_80mhz || b_curshortgi_20mhz)
- b_short_gi = true;
-
- return b_short_gi;
-}
-
-static void rtl8822be_update_hal_rate_mask(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level, bool update_bw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_sta_info *sta_entry = NULL;
- u32 ratr_bitmap, ratr_bitmap_msb = 0;
- u8 ratr_index;
- enum wireless_mode wirelessmode = 0;
- u8 curtxbw_40mhz =
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
- bool b_shortgi = false;
- u8 rate_mask[7];
- u8 macid = 0;
- u8 rf_type;
-
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- wirelessmode = sta_entry->wireless_mode;
-
- RT_TRACE(rtlpriv, COMP_RATR, DBG_LOUD, "wireless mode = 0x%x\n",
- wirelessmode);
- if (mac->opmode == NL80211_IFTYPE_STATION ||
- mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- curtxbw_40mhz = mac->bw_40;
- } else if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC)
- macid = sta->aid + 1;
- if (wirelessmode == WIRELESS_MODE_N_5G ||
- wirelessmode == WIRELESS_MODE_AC_5G ||
- wirelessmode == WIRELESS_MODE_A)
- ratr_bitmap = (sta->supp_rates[NL80211_BAND_5GHZ]) << 4;
- else
- ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC)
- ratr_bitmap = 0xfff;
-
- if (wirelessmode == WIRELESS_MODE_N_24G ||
- wirelessmode == WIRELESS_MODE_N_5G)
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
- else if (wirelessmode == WIRELESS_MODE_AC_24G ||
- wirelessmode == WIRELESS_MODE_AC_5G ||
- wirelessmode == WIRELESS_MODE_AC_ONLY)
- ratr_bitmap |= _rtl8822be_rate_to_bitmap_2ssvht(
- sta->vht_cap.vht_mcs.rx_mcs_map)
- << 12;
-
- b_shortgi = _rtl8822be_get_ra_shortgi(hw, sta, macid);
- rf_type = _rtl8822be_get_ra_rftype(hw, wirelessmode, ratr_bitmap);
-
- ratr_index = rtlpriv->phydm.ops->phydm_rate_id_mapping(
- rtlpriv, wirelessmode, rf_type, rtlphy->current_chan_bw);
- sta_entry->ratr_index = ratr_index;
-
- rtlpriv->phydm.ops->phydm_get_ra_bitmap(
- rtlpriv, wirelessmode, rf_type, rtlphy->current_chan_bw,
- rssi_level, &ratr_bitmap_msb, &ratr_bitmap);
-
- RT_TRACE(rtlpriv, COMP_RATR, DBG_LOUD, "ratr_bitmap :%x\n",
- ratr_bitmap);
-
- rate_mask[0] = macid;
- rate_mask[1] = ratr_index | (b_shortgi ? 0x80 : 0x00);
- rate_mask[2] =
- rtlphy->current_chan_bw | ((!update_bw) << 3) |
- _rtl8822be_get_vht_en(wirelessmode, ratr_bitmap) |
- _rtl8822be_get_ra_ldpc(hw, macid, sta_entry, wirelessmode);
-
- rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
- rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8);
- rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
- rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
-
- RT_TRACE(
- rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%08x, %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- ratr_index, ratr_bitmap, rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3], rate_mask[4], rate_mask[5],
- rate_mask[6]);
- rtl8822be_fill_h2c_cmd(hw, H2C_8822B_MACID_CFG, 7, rate_mask);
-
- /* for h2c cmd 0x46, only modify cmd id & ra mask */
- /* Keep rate_mask0~2 of cmd 0x40, but clear byte3 and later */
- /* 8822B has no 3SS, so keep it zeros. */
- memset(rate_mask + 3, 0, 4);
-
- rtl8822be_fill_h2c_cmd(hw, H2C_8822B_MACID_CFG_3SS, 7, rate_mask);
-
- _rtl8822be_set_bcn_ctrl_reg(hw, BIT(3), 0);
-}
-
-void rtl8822be_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level,
- bool update_bw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->dm.useramask)
- rtl8822be_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
-}
-
-void rtl8822be_update_channel_access_setting(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u16 sifs_timer;
-
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
- if (!mac->ht_enable)
- sifs_timer = 0x0a0a;
- else
- sifs_timer = 0x0e0e;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
-}
-
-bool rtl8822be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
-{
- *valid = 1;
- return true;
-}
-
-void rtl8822be_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
- bool is_group, u8 enc_algo, bool is_wepkey,
- bool clear_all)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 *macaddr = p_macaddr;
- u32 entry_id = 0;
- bool is_pairwise = false;
-
- static u8 cam_const_addr[4][6] = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x03},
- };
- static u8 cam_const_broad[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- if (clear_all) {
- u8 idx = 0;
- u8 cam_offset = 0;
- u8 clear_number = 5;
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
-
- for (idx = 0; idx < clear_number; idx++) {
- rtl_cam_mark_invalid(hw, cam_offset + idx);
- rtl_cam_empty_entry(hw, cam_offset + idx);
-
- if (idx < 5) {
- memset(rtlpriv->sec.key_buf[idx], 0,
- MAX_KEY_LEN);
- rtlpriv->sec.key_len[idx] = 0;
- }
- }
-
- return;
- }
-
- switch (enc_algo) {
- case WEP40_ENCRYPTION:
- enc_algo = CAM_WEP40;
- break;
- case WEP104_ENCRYPTION:
- enc_algo = CAM_WEP104;
- break;
- case TKIP_ENCRYPTION:
- enc_algo = CAM_TKIP;
- break;
- case AESCCMP_ENCRYPTION:
- enc_algo = CAM_AES;
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", enc_algo);
- enc_algo = CAM_TKIP;
- break;
- }
-
- if (is_wepkey || rtlpriv->sec.use_defaultkey) {
- macaddr = cam_const_addr[key_index];
- entry_id = key_index;
- } else {
- if (is_group) {
- macaddr = cam_const_broad;
- entry_id = key_index;
- } else {
- if (mac->opmode == NL80211_IFTYPE_AP) {
- entry_id =
- rtl_cam_get_free_entry(hw, p_macaddr);
- if (entry_id >= TOTAL_CAM_ENTRY) {
- pr_err("Can not find free hwsecurity cam entry\n");
- return;
- }
- } else {
- entry_id = CAM_PAIRWISE_KEY_POSITION;
- }
-
- key_index = PAIRWISE_KEYIDX;
- is_pairwise = true;
- }
- }
-
- if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n", entry_id);
- if (mac->opmode == NL80211_IFTYPE_AP)
- rtl_cam_del_entry(hw, p_macaddr);
- rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
- } else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "add one entry\n");
- if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
-
- rtl_cam_add_one_entry(hw, macaddr, key_index, entry_id,
- enc_algo, CAM_CONFIG_NO_USEDK,
- rtlpriv->sec.key_buf[key_index]);
- } else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- rtl_cam_add_one_entry(
- hw, rtlefuse->dev_addr, PAIRWISE_KEYIDX,
- CAM_PAIRWISE_KEY_POSITION, enc_algo,
- CAM_CONFIG_NO_USEDK,
- rtlpriv->sec.key_buf[entry_id]);
- }
-
- rtl_cam_add_one_entry(hw, macaddr, key_index, entry_id,
- enc_algo, CAM_CONFIG_NO_USEDK,
- rtlpriv->sec.key_buf[entry_id]);
- }
- }
-}
-
-void rtl8822be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
- bool auto_load_fail, u8 *hwinfo)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value;
- u32 val32;
-
- val32 = rtl_read_dword(rtlpriv, REG_WL_BT_PWR_CTRL_8822B);
- if (val32 & BIT_BT_FUNC_EN_8822B)
- rtlpriv->btcoexist.btc_info.btcoexist = 1;
- else
- rtlpriv->btcoexist.btc_info.btcoexist = 0;
-
- if (!auto_load_fail) {
- value = hwinfo[EEPROM_RF_BT_SETTING_8822B];
-
- rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8822B;
- rtlpriv->btcoexist.btc_info.ant_num =
- (value & BIT(0) ? ANT_TOTAL_X1 : ANT_TOTAL_X2);
- } else {
- rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8822B;
- rtlpriv->btcoexist.btc_info.ant_num = ANT_TOTAL_X2;
- }
-}
-
-void rtl8822be_bt_reg_init(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- /* 0:Low, 1:High, 2:From Efuse. */
- rtlpriv->btcoexist.reg_bt_iso = 2;
- /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
- rtlpriv->btcoexist.reg_bt_sco = 3;
- /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
- rtlpriv->btcoexist.reg_bt_sco = 0;
-}
-
-void rtl8822be_suspend(struct ieee80211_hw *hw) {}
-
-void rtl8822be_resume(struct ieee80211_hw *hw) {}
diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.h b/drivers/staging/rtlwifi/rtl8822be/hw.h
deleted file mode 100644
index cf3536113f06..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/hw.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL8822B_HW_H__
-#define __RTL8822B_HW_H__
-
-extern u8 rtl_channel5g[CHANNEL_MAX_NUMBER_5G];
-extern u8 rtl_channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M];
-
-void rtl8822be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
-void rtl8822be_read_eeprom_info(struct ieee80211_hw *hw,
- struct rtl_phydm_params *params);
-void rtl8822be_read_eeprom_info_dummy(struct ieee80211_hw *hw);
-void rtl8822be_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta,
- u32 *p_intb, u32 *p_intc, u32 *p_intd);
-int rtl8822be_hw_init(struct ieee80211_hw *hw);
-void rtl8822be_card_disable(struct ieee80211_hw *hw);
-void rtl8822be_enable_interrupt(struct ieee80211_hw *hw);
-void rtl8822be_disable_interrupt(struct ieee80211_hw *hw);
-int rtl8822be_set_network_type(struct ieee80211_hw *hw,
- enum nl80211_iftype type);
-void rtl8822be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
-void rtl8822be_set_qos(struct ieee80211_hw *hw, int aci);
-void rtl8822be_set_beacon_related_registers(struct ieee80211_hw *hw);
-void rtl8822be_set_beacon_interval(struct ieee80211_hw *hw);
-void rtl8822be_update_interrupt_mask(struct ieee80211_hw *hw, u32 add_msr,
- u32 rm_msr);
-void rtl8822be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
-void rtl8822be_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level,
- bool update_bw);
-void rtl8822be_update_channel_access_setting(struct ieee80211_hw *hw);
-bool rtl8822be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
-void rtl8822be_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
- bool is_group, u8 enc_algo, bool is_wepkey,
- bool clear_all);
-void rtl8822be_enable_hw_security_config(struct ieee80211_hw *hw);
-void rtl8822be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
- bool autoload_fail, u8 *hwinfo);
-void rtl8822be_bt_reg_init(struct ieee80211_hw *hw);
-void rtl8822be_suspend(struct ieee80211_hw *hw);
-void rtl8822be_resume(struct ieee80211_hw *hw);
-void rtl8822be_fw_clk_off_timer_callback(unsigned long data);
-#endif
diff --git a/drivers/staging/rtlwifi/rtl8822be/led.c b/drivers/staging/rtlwifi/rtl8822be/led.c
deleted file mode 100644
index 6d6e1f271e18..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/led.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../wifi.h"
-#include "../pci.h"
-#include "reg.h"
-#include "led.h"
-
-static void _rtl8822be_init_led(struct ieee80211_hw *hw, struct rtl_led *pled,
- enum rtl_led_pin ledpin)
-{
- pled->hw = hw;
- pled->ledpin = ledpin;
- pled->ledon = false;
-}
-
-void rtl8822be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2_8822B, pled->ledpin);
-
- switch (pled->ledpin) {
- case LED_PIN_GPIO0:
- break;
- case LED_PIN_LED0:
- break;
- case LED_PIN_LED1:
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
- break;
- }
- pled->ledon = true;
-}
-
-void rtl8822be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2_8822B, pled->ledpin);
-
- switch (pled->ledpin) {
- case LED_PIN_GPIO0:
- break;
- case LED_PIN_LED0:
- break;
- case LED_PIN_LED1:
-
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
- break;
- }
- pled->ledon = false;
-}
-
-void rtl8822be_init_sw_leds(struct ieee80211_hw *hw)
-{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-
- _rtl8822be_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
- _rtl8822be_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
-}
-
-static void _rtl8822be_sw_led_control(struct ieee80211_hw *hw,
- enum led_ctl_mode ledaction)
-{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *led0 = &pcipriv->ledctl.sw_led0;
-
- switch (ledaction) {
- case LED_CTL_POWER_ON:
- case LED_CTL_LINK:
- case LED_CTL_NO_LINK:
- rtl8822be_sw_led_on(hw, led0);
- break;
- case LED_CTL_POWER_OFF:
- rtl8822be_sw_led_off(hw, led0);
- break;
- default:
- break;
- }
-}
-
-void rtl8822be_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- if (ppsc->rfoff_reason > RF_CHANGE_BY_PS &&
- (ledaction == LED_CTL_TX || ledaction == LED_CTL_RX ||
- ledaction == LED_CTL_SITE_SURVEY || ledaction == LED_CTL_LINK ||
- ledaction == LED_CTL_NO_LINK ||
- ledaction == LED_CTL_START_TO_LINK ||
- ledaction == LED_CTL_POWER_ON)) {
- return;
- }
- RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n", ledaction);
- _rtl8822be_sw_led_control(hw, ledaction);
-}
diff --git a/drivers/staging/rtlwifi/rtl8822be/led.h b/drivers/staging/rtlwifi/rtl8822be/led.h
deleted file mode 100644
index 9a19e17cf3a4..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/led.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL8822B_LED_H__
-#define __RTL8822B_LED_H__
-
-void rtl8822be_init_sw_leds(struct ieee80211_hw *hw);
-void rtl8822be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
-void rtl8822be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
-void rtl8822be_led_control(struct ieee80211_hw *hw,
- enum led_ctl_mode ledaction);
-#endif
diff --git a/drivers/staging/rtlwifi/rtl8822be/phy.c b/drivers/staging/rtlwifi/rtl8822be/phy.c
deleted file mode 100644
index 048904d783fc..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/phy.c
+++ /dev/null
@@ -1,2223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../wifi.h"
-#include "../pci.h"
-#include "../ps.h"
-#include "../base.h"
-#include "reg.h"
-#include "def.h"
-#include "phy.h"
-#include "trx.h"
-#include "../btcoexist/halbt_precomp.h"
-#include "hw.h"
-#include "../efuse.h"
-
-static u32 _rtl8822be_phy_calculate_bit_shift(u32 bitmask);
-static void
-_rtl8822be_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
-
-static long _rtl8822be_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode,
- u8 txpwridx);
-static void rtl8822be_phy_set_rf_on(struct ieee80211_hw *hw);
-static void rtl8822be_phy_set_io(struct ieee80211_hw *hw);
-
-static u8 cck_rates[] = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M};
-static u8 sizes_of_cck_retes = 4;
-static u8 ofdm_rates[] = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
- DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
- DESC_RATE48M, DESC_RATE54M};
-static u8 sizes_of_ofdm_retes = 8;
-static u8 ht_rates_1t[] = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
- DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
- DESC_RATEMCS6, DESC_RATEMCS7};
-static u8 sizes_of_ht_retes_1t = 8;
-static u8 ht_rates_2t[] = {DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
- DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
- DESC_RATEMCS14, DESC_RATEMCS15};
-static u8 sizes_of_ht_retes_2t = 8;
-static u8 vht_rates_1t[] = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
- DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
- DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
- DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
- DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9};
-static u8 vht_rates_2t[] = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
- DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
- DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
- DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
- DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9};
-static u8 sizes_of_vht_retes = 10;
-
-u32 rtl8822be_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
- u32 bitmask)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 returnvalue, originalvalue, bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask);
- originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl8822be_phy_calculate_bit_shift(bitmask);
- returnvalue = (originalvalue & bitmask) >> bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
-
- return returnvalue;
-}
-
-void rtl8822be_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
- u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 originalvalue, bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask,
- data);
-
- if (bitmask != MASKDWORD) {
- originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl8822be_phy_calculate_bit_shift(bitmask);
- data = ((originalvalue & (~bitmask)) |
- ((data << bitshift) & bitmask));
- }
-
- rtl_write_dword(rtlpriv, regaddr, data);
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask,
- data);
-}
-
-u32 rtl8822be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 /*original_value,*/ readback_value /*, bitshift*/;
- unsigned long flags;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n", regaddr, rfpath,
- bitmask);
-
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
-
- readback_value = rtlpriv->phydm.ops->phydm_read_rf_reg(
- rtlpriv, rfpath, regaddr, bitmask);
-
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
-
- return readback_value;
-}
-
-void rtl8822be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- unsigned long flags;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
-
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
-
- rtlpriv->phydm.ops->phydm_write_rf_reg(rtlpriv, rfpath, regaddr,
- bitmask, data);
-
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
-}
-
-static u32 _rtl8822be_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
-}
-
-bool rtl8822be_halmac_cb_init_mac_register(struct rtl_priv *rtlpriv)
-{
- return rtlpriv->phydm.ops->phydm_phy_mac_config(rtlpriv);
-}
-
-bool rtl8822be_phy_bb_config(struct ieee80211_hw *hw)
-{
- bool rtstatus = true;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 crystal_cap;
- /* u32 tmp; */
-
- rtstatus = rtlpriv->phydm.ops->phydm_phy_bb_config(rtlpriv);
-
- /* write 0x28[6:1] = 0x24[30:25] = CrystalCap */
- crystal_cap = rtlefuse->crystalcap & 0x3F;
- rtl_set_bbreg(hw, REG_AFE_XTAL_CTRL_8822B, 0x7E000000, crystal_cap);
- rtl_set_bbreg(hw, REG_AFE_PLL_CTRL_8822B, 0x7E, crystal_cap);
-
- /*rtlphy->reg_837 = rtl_read_byte(rtlpriv, 0x837);*/ /*unused*/
-
- return rtstatus;
-}
-
-bool rtl8822be_phy_rf_config(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
-
- if (rtlphy->rf_type == RF_1T1R)
- rtlphy->num_total_rfpath = 1;
- else
- rtlphy->num_total_rfpath = 2;
-
- return rtlpriv->phydm.ops->phydm_phy_rf_config(rtlpriv);
-}
-
-bool rtl8822be_halmac_cb_init_bb_rf_register(struct rtl_priv *rtlpriv)
-{
- struct ieee80211_hw *hw = rtlpriv->hw;
- enum radio_mask txpath, rxpath;
- bool tx2path;
- bool ret = false;
-
- _rtl8822be_phy_init_bb_rf_register_definition(hw);
-
- rtlpriv->halmac.ops->halmac_phy_power_switch(rtlpriv, 1);
-
- /* beofre bb/rf config */
- rtlpriv->phydm.ops->phydm_parameter_init(rtlpriv, 0);
-
- /* do bb/rf config */
- if (rtl8822be_phy_bb_config(hw) && rtl8822be_phy_rf_config(hw))
- ret = true;
-
- /* after bb/rf config */
- rtlpriv->phydm.ops->phydm_parameter_init(rtlpriv, 1);
-
- /* set trx mode (keep it to be last, r17376) */
- txpath = RF_MASK_A | RF_MASK_B;
- rxpath = RF_MASK_A | RF_MASK_B;
- tx2path = false;
- ret = rtlpriv->phydm.ops->phydm_trx_mode(rtlpriv, txpath, rxpath,
- tx2path);
-
- return ret;
-}
-
-static void _rtl8822be_phy_init_tx_power_by_rate(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
-
- u8 band, rfpath, txnum, rate;
-
- for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band)
- for (rfpath = 0; rfpath < TX_PWR_BY_RATE_NUM_RF; ++rfpath)
- for (txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum)
- for (rate = 0; rate < TX_PWR_BY_RATE_NUM_RATE;
- ++rate)
- rtlphy->tx_power_by_rate_offset
- [band][rfpath][txnum][rate] = 0;
-}
-
-static void _rtl8822be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
- u8 band, u8 path,
- u8 rate_section, u8 txnum,
- u8 value)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
-
- if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n",
- path);
- return;
- }
-
- if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid band %d in phy_SetTxPowerByRatBase()\n",
- band);
- return;
- }
-
- if (rate_section >= MAX_RATE_SECTION ||
- (band == BAND_ON_5G && rate_section == CCK)) {
- RT_TRACE(
- rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid rate_section %d in phy_SetTxPowerByRatBase()\n",
- rate_section);
- return;
- }
-
- if (band == BAND_ON_2_4G)
- rtlphy->txpwr_by_rate_base_24g[path][txnum][rate_section] =
- value;
- else /* BAND_ON_5G */
- rtlphy->txpwr_by_rate_base_5g[path][txnum][rate_section - 1] =
- value;
-}
-
-static u8 _rtl8822be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
- u8 band, u8 path, u8 txnum,
- u8 rate_section)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 value;
-
- if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d in phy_GetTxPowerByRatBase()\n",
- path);
- return 0;
- }
-
- if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid band %d in phy_GetTxPowerByRatBase()\n",
- band);
- return 0;
- }
-
- if (rate_section >= MAX_RATE_SECTION ||
- (band == BAND_ON_5G && rate_section == CCK)) {
- RT_TRACE(
- rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid rate_section %d in phy_GetTxPowerByRatBase()\n",
- rate_section);
- return 0;
- }
-
- if (band == BAND_ON_2_4G)
- value = rtlphy->txpwr_by_rate_base_24g[path][txnum]
- [rate_section];
- else /* BAND_ON_5G */
- value = rtlphy->txpwr_by_rate_base_5g[path][txnum]
- [rate_section - 1];
-
- return value;
-}
-
-static void _rtl8822be_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
-
- struct {
- enum rtl_desc_rate rate;
- enum rate_section section;
- } rate_sec_base[] = {
- {DESC_RATE11M, CCK},
- {DESC_RATE54M, OFDM},
- {DESC_RATEMCS7, HT_MCS0_MCS7},
- {DESC_RATEMCS15, HT_MCS8_MCS15},
- {DESC_RATEVHT1SS_MCS7, VHT_1SSMCS0_1SSMCS9},
- {DESC_RATEVHT2SS_MCS7, VHT_2SSMCS0_2SSMCS9},
- };
-
- u8 band, path, rs, tx_num, base;
- u8 rate, section;
-
- for (band = BAND_ON_2_4G; band <= BAND_ON_5G; band++) {
- for (path = RF90_PATH_A; path <= RF90_PATH_B; path++) {
- for (rs = 0; rs < MAX_RATE_SECTION; rs++) {
- rate = rate_sec_base[rs].rate;
- section = rate_sec_base[rs].section;
-
- if (IS_1T_RATE(rate))
- tx_num = RF_1TX;
- else
- tx_num = RF_2TX;
-
- if (band == BAND_ON_5G &&
- RX_HAL_IS_CCK_RATE(rate))
- continue;
-
- base = rtlphy->tx_power_by_rate_offset
- [band][path][tx_num][rate];
- _rtl8822be_phy_set_txpower_by_rate_base(
- hw, band, path, section, tx_num, base);
- }
- }
- }
-}
-
-static void __rtl8822be_phy_cross_reference_core(struct ieee80211_hw *hw,
- u8 regulation, u8 bw,
- u8 channel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 rs, ref_rs;
- s8 pwrlmt, ref_pwrlmt;
-
- for (rs = 0; rs < MAX_RATE_SECTION_NUM; ++rs) {
- /*5G 20M 40M VHT and HT can cross reference*/
- if (bw != HT_CHANNEL_WIDTH_20 && bw != HT_CHANNEL_WIDTH_20_40)
- continue;
-
- if (rs == HT_MCS0_MCS7)
- ref_rs = VHT_1SSMCS0_1SSMCS9;
- else if (rs == HT_MCS8_MCS15)
- ref_rs = VHT_2SSMCS0_2SSMCS9;
- else if (rs == VHT_1SSMCS0_1SSMCS9)
- ref_rs = HT_MCS0_MCS7;
- else if (rs == VHT_2SSMCS0_2SSMCS9)
- ref_rs = HT_MCS8_MCS15;
- else
- continue;
-
- ref_pwrlmt = rtlphy->txpwr_limit_5g[regulation][bw][ref_rs]
- [channel][RF90_PATH_A];
- if (ref_pwrlmt == MAX_POWER_INDEX)
- continue;
-
- pwrlmt = rtlphy->txpwr_limit_5g[regulation][bw][rs][channel]
- [RF90_PATH_A];
- if (pwrlmt != MAX_POWER_INDEX)
- continue;
-
- rtlphy->txpwr_limit_5g[regulation][bw][rs][channel]
- [RF90_PATH_A] = ref_pwrlmt;
- }
-}
-
-static void
-_rtl8822be_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee80211_hw *hw)
-{
- u8 regulation, bw, channel;
-
- for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) {
- for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G;
- ++channel) {
- __rtl8822be_phy_cross_reference_core(
- hw, regulation, bw, channel);
- }
- }
- }
-}
-
-static void __rtl8822be_txpwr_limit_to_index_2g(struct ieee80211_hw *hw,
- u8 regulation, u8 bw,
- u8 channel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 bw40_pwr_base_dbm2_4G;
- u8 rate_section;
- s8 temp_pwrlmt;
- enum rf_tx_num txnum;
- s8 temp_value;
- u8 rf_path;
-
- for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM;
- ++rate_section) {
- /* obtain the base dBm values in 2.4G band
- * CCK => 11M, OFDM => 54M, HT 1T => MCS7, HT 2T => MCS15
- */
-
- temp_pwrlmt =
- rtlphy->txpwr_limit_2_4g[regulation][bw][rate_section]
- [channel][RF90_PATH_A];
- txnum = IS_1T_RATESEC(rate_section) ? RF_1TX : RF_2TX;
-
- if (temp_pwrlmt == MAX_POWER_INDEX)
- continue;
-
- for (rf_path = RF90_PATH_A; rf_path < MAX_RF_PATH_NUM;
- ++rf_path) {
- bw40_pwr_base_dbm2_4G =
- _rtl8822be_phy_get_txpower_by_rate_base(
- hw, BAND_ON_2_4G, rf_path, txnum,
- rate_section);
-
- temp_value = temp_pwrlmt - bw40_pwr_base_dbm2_4G;
- rtlphy->txpwr_limit_2_4g[regulation][bw][rate_section]
- [channel][rf_path] = temp_value;
-
- RT_TRACE(
- rtlpriv, COMP_INIT, DBG_TRACE,
- "TxPwrLimit_2_4G[regulation %d][bw %d][rateSection %d][channel %d] = %d\n(TxPwrLimit in dBm %d - BW40PwrLmt2_4G[channel %d][rfPath %d] %d)\n",
- regulation, bw, rate_section, channel,
- rtlphy->txpwr_limit_2_4g[regulation][bw]
- [rate_section][channel]
- [rf_path],
- (temp_pwrlmt == 63) ? 0 : temp_pwrlmt / 2,
- channel, rf_path, bw40_pwr_base_dbm2_4G);
- }
- }
-}
-
-static void __rtl8822be_txpwr_limit_to_index_5g(struct ieee80211_hw *hw,
- u8 regulation, u8 bw,
- u8 channel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 bw40_pwr_base_dbm5G;
- u8 rate_section;
- s8 temp_pwrlmt;
- enum rf_tx_num txnum;
- s8 temp_value;
- u8 rf_path;
-
- for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM;
- ++rate_section) {
- /* obtain the base dBm values in 5G band
- * OFDM => 54M, HT 1T => MCS7, HT 2T => MCS15,
- * VHT => 1SSMCS7, VHT 2T => 2SSMCS7
- */
-
- temp_pwrlmt =
- rtlphy->txpwr_limit_5g[regulation][bw][rate_section]
- [channel][RF90_PATH_A];
- txnum = IS_1T_RATESEC(rate_section) ? RF_1TX : RF_2TX;
-
- if (temp_pwrlmt == MAX_POWER_INDEX)
- continue;
-
- for (rf_path = RF90_PATH_A; rf_path < MAX_RF_PATH_NUM;
- ++rf_path) {
- bw40_pwr_base_dbm5G =
- _rtl8822be_phy_get_txpower_by_rate_base(
- hw, BAND_ON_5G, rf_path, txnum,
- rate_section);
-
- temp_value = temp_pwrlmt - bw40_pwr_base_dbm5G;
- rtlphy->txpwr_limit_5g[regulation][bw][rate_section]
- [channel][rf_path] = temp_value;
-
- RT_TRACE(
- rtlpriv, COMP_INIT, DBG_TRACE,
- "TxPwrLimit_5G[regulation %d][bw %d][rateSection %d][channel %d] =%d\n(TxPwrLimit in dBm %d - BW40PwrLmt5G[chnl group %d][rfPath %d] %d)\n",
- regulation, bw, rate_section, channel,
- rtlphy->txpwr_limit_5g[regulation][bw]
- [rate_section][channel]
- [rf_path],
- temp_pwrlmt, channel, rf_path,
- bw40_pwr_base_dbm5G);
- }
- }
-}
-
-static void
-_rtl8822be_phy_convert_txpower_limit_to_power_index(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 regulation, bw, channel;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "=====> %s()\n", __func__);
-
- _rtl8822be_phy_cross_reference_ht_and_vht_txpower_limit(hw);
-
- for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_2_4G_BANDWIDTH_NUM; ++bw) {
- for (channel = 0; channel < CHANNEL_MAX_NUMBER_2G;
- ++channel) {
- __rtl8822be_txpwr_limit_to_index_2g(
- hw, regulation, bw, channel);
- }
- }
- }
-
- for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) {
- for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G;
- ++channel) {
- __rtl8822be_txpwr_limit_to_index_5g(
- hw, regulation, bw, channel);
- }
- }
- }
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<===== %s()\n", __func__);
-}
-
-static void _rtl8822be_phy_init_txpower_limit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 i, j, k, l, m;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "=====> %s()!\n", __func__);
-
- for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- for (j = 0; j < MAX_2_4G_BANDWIDTH_NUM; ++j)
- for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
- for (m = 0; m < CHANNEL_MAX_NUMBER_2G; ++m)
- for (l = 0; l < MAX_RF_PATH_NUM; ++l)
- rtlphy->txpwr_limit_2_4g[i][j]
- [k][m]
- [l] =
- MAX_POWER_INDEX;
- }
- for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- for (j = 0; j < MAX_5G_BANDWIDTH_NUM; ++j)
- for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
- for (m = 0; m < CHANNEL_MAX_NUMBER_5G; ++m)
- for (l = 0; l < MAX_RF_PATH_NUM; ++l)
- rtlphy->txpwr_limit_5g[i][j][k]
- [m][l] =
- MAX_POWER_INDEX;
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<===== %s()!\n", __func__);
-}
-
-static void
-_rtl8822be_phy_convert_txpower_dbm_to_relative_value(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
-
- u8 base = 0, i = 0, value = 0, band = 0, path = 0, txnum = 0;
-
- for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band) {
- for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) {
- for (txnum = RF_1TX; txnum <= RF_2TX; ++txnum) {
- /* CCK */
- base = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [DESC_RATE11M];
- for (i = 0; i < sizeof(cck_rates); ++i) {
- value = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [cck_rates[i]];
- rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [cck_rates[i]] = value - base;
- }
-
- /* OFDM */
- base = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [DESC_RATE54M];
- for (i = 0; i < sizeof(ofdm_rates); ++i) {
- value = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [ofdm_rates[i]];
- rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [ofdm_rates[i]] = value - base;
- }
-
- /* HT MCS0~7 */
- base = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [DESC_RATEMCS7];
- for (i = 0; i < sizeof(ht_rates_1t); ++i) {
- value = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [ht_rates_1t[i]];
- rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [ht_rates_1t[i]] = value - base;
- }
-
- /* HT MCS8~15 */
- base = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [DESC_RATEMCS15];
- for (i = 0; i < sizeof(ht_rates_2t); ++i) {
- value = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [ht_rates_2t[i]];
- rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [ht_rates_2t[i]] = value - base;
- }
-
- /* VHT 1SS */
- base = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [DESC_RATEVHT1SS_MCS7];
- for (i = 0; i < sizeof(vht_rates_1t); ++i) {
- value = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [vht_rates_1t[i]];
- rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [vht_rates_1t[i]] =
- value - base;
- }
-
- /* VHT 2SS */
- base = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [DESC_RATEVHT2SS_MCS7];
- for (i = 0; i < sizeof(vht_rates_2t); ++i) {
- value = rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [vht_rates_2t[i]];
- rtlphy->tx_power_by_rate_offset
- [band][path][txnum]
- [vht_rates_2t[i]] =
- value - base;
- }
- }
- }
- }
-
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "<===%s()\n", __func__);
-}
-
-static void
-_rtl8822be_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw)
-{
- /* copy rate_section from
- * tx_power_by_rate_offset[][rate] to txpwr_by_rate_base_24g/_5g[][rs]
- */
- _rtl8822be_phy_store_txpower_by_rate_base(hw);
-
- /* convert tx_power_by_rate_offset[] to relative value */
- _rtl8822be_phy_convert_txpower_dbm_to_relative_value(hw);
-}
-
-/* string is in decimal */
-static bool _rtl8822be_get_integer_from_string(char *str, u8 *pint)
-{
- u16 i = 0;
- *pint = 0;
-
- while (str[i] != '\0') {
- if (str[i] >= '0' && str[i] <= '9') {
- *pint *= 10;
- *pint += (str[i] - '0');
- } else {
- return false;
- }
- ++i;
- }
-
- return true;
-}
-
-static bool _rtl8822be_eq_n_byte(u8 *str1, u8 *str2, u32 num)
-{
- if (num == 0)
- return false;
- while (num > 0) {
- num--;
- if (str1[num] != str2[num])
- return false;
- }
- return true;
-}
-
-static char _rtl8822be_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
- u8 band, u8 channel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- char channel_index = -1;
- u8 i = 0;
-
- if (band == BAND_ON_2_4G) {
- channel_index = channel - 1;
- } else if (band == BAND_ON_5G) {
- for (i = 0; i < sizeof(rtl_channel5g) / sizeof(u8); ++i) {
- if (rtl_channel5g[i] == channel)
- channel_index = i;
- }
- } else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s",
- band, __func__);
- }
-
- if (channel_index == -1)
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Channel %d of Band %d in %s", channel, band,
- __func__);
-
- return channel_index;
-}
-
-void rtl8822be_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregulation,
- u8 *pband, u8 *pbandwidth,
- u8 *prate_section, u8 *prf_path,
- u8 *pchannel, u8 *ppower_limit)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 regulation = 0, bandwidth = 0, rate_section = 0, channel;
- u8 channel_index;
- char power_limit = 0, prev_power_limit, ret;
-
- if (!_rtl8822be_get_integer_from_string((char *)pchannel, &channel) ||
- !_rtl8822be_get_integer_from_string((char *)ppower_limit,
- &power_limit)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Illegal index of pwr_lmt table [chnl %d][val %d]\n",
- channel, power_limit);
- }
-
- power_limit =
- power_limit > MAX_POWER_INDEX ? MAX_POWER_INDEX : power_limit;
-
- if (_rtl8822be_eq_n_byte(pregulation, (u8 *)("FCC"), 3))
- regulation = 0;
- else if (_rtl8822be_eq_n_byte(pregulation, (u8 *)("MKK"), 3))
- regulation = 1;
- else if (_rtl8822be_eq_n_byte(pregulation, (u8 *)("ETSI"), 4))
- regulation = 2;
- else if (_rtl8822be_eq_n_byte(pregulation, (u8 *)("WW13"), 4))
- regulation = 3;
-
- if (_rtl8822be_eq_n_byte(prate_section, (u8 *)("CCK"), 3))
- rate_section = CCK;
- else if (_rtl8822be_eq_n_byte(prate_section, (u8 *)("OFDM"), 4))
- rate_section = OFDM;
- else if (_rtl8822be_eq_n_byte(prate_section, (u8 *)("HT"), 2) &&
- _rtl8822be_eq_n_byte(prf_path, (u8 *)("1T"), 2))
- rate_section = HT_MCS0_MCS7;
- else if (_rtl8822be_eq_n_byte(prate_section, (u8 *)("HT"), 2) &&
- _rtl8822be_eq_n_byte(prf_path, (u8 *)("2T"), 2))
- rate_section = HT_MCS8_MCS15;
- else if (_rtl8822be_eq_n_byte(prate_section, (u8 *)("VHT"), 3) &&
- _rtl8822be_eq_n_byte(prf_path, (u8 *)("1T"), 2))
- rate_section = VHT_1SSMCS0_1SSMCS9;
- else if (_rtl8822be_eq_n_byte(prate_section, (u8 *)("VHT"), 3) &&
- _rtl8822be_eq_n_byte(prf_path, (u8 *)("2T"), 2))
- rate_section = VHT_2SSMCS0_2SSMCS9;
-
- if (_rtl8822be_eq_n_byte(pbandwidth, (u8 *)("20M"), 3))
- bandwidth = HT_CHANNEL_WIDTH_20;
- else if (_rtl8822be_eq_n_byte(pbandwidth, (u8 *)("40M"), 3))
- bandwidth = HT_CHANNEL_WIDTH_20_40;
- else if (_rtl8822be_eq_n_byte(pbandwidth, (u8 *)("80M"), 3))
- bandwidth = HT_CHANNEL_WIDTH_80;
- else if (_rtl8822be_eq_n_byte(pbandwidth, (u8 *)("160M"), 4))
- bandwidth = 3;
-
- if (_rtl8822be_eq_n_byte(pband, (u8 *)("2.4G"), 4)) {
- ret = _rtl8822be_phy_get_chnl_idx_of_txpwr_lmt(hw, BAND_ON_2_4G,
- channel);
-
- if (ret == -1)
- return;
-
- channel_index = ret;
-
- prev_power_limit =
- rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
- [rate_section][channel_index]
- [RF90_PATH_A];
-
- if (power_limit < prev_power_limit)
- rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
- [rate_section][channel_index]
- [RF90_PATH_A] = power_limit;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "2.4G [regula %d][bw %d][sec %d][chnl %d][val %d]\n",
- regulation, bandwidth, rate_section, channel_index,
- rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
- [rate_section][channel_index]
- [RF90_PATH_A]);
- } else if (_rtl8822be_eq_n_byte(pband, (u8 *)("5G"), 2)) {
- ret = _rtl8822be_phy_get_chnl_idx_of_txpwr_lmt(hw, BAND_ON_5G,
- channel);
-
- if (ret == -1)
- return;
-
- channel_index = ret;
-
- prev_power_limit =
- rtlphy->txpwr_limit_5g[regulation][bandwidth]
- [rate_section][channel_index]
- [RF90_PATH_A];
-
- if (power_limit < prev_power_limit)
- rtlphy->txpwr_limit_5g[regulation][bandwidth]
- [rate_section][channel_index]
- [RF90_PATH_A] = power_limit;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "5G: [regul %d][bw %d][sec %d][chnl %d][val %d]\n",
- regulation, bandwidth, rate_section, channel,
- rtlphy->txpwr_limit_5g[regulation][bandwidth]
- [rate_section][channel_index]
- [RF90_PATH_A]);
-
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Cannot recognize the band info in %s\n", pband);
- return;
- }
-}
-
-bool rtl8822be_load_txpower_by_rate(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- bool rtstatus = true;
-
- _rtl8822be_phy_init_tx_power_by_rate(hw);
-
- rtstatus = rtlpriv->phydm.ops->phydm_load_txpower_by_rate(rtlpriv);
-
- if (!rtstatus) {
- pr_err("BB_PG Reg Fail!!\n");
- return false;
- }
-
- _rtl8822be_phy_txpower_by_rate_configuration(hw);
-
- return true;
-}
-
-bool rtl8822be_load_txpower_limit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
- bool rtstatus = true;
-
- _rtl8822be_phy_init_txpower_limit(hw);
-
- if (rtlefuse->eeprom_regulatory == 1)
- ;
- else
- return true;
-
- rtstatus = rtlpriv->phydm.ops->phydm_load_txpower_limit(rtlpriv);
-
- if (!rtstatus) {
- pr_err("RF TxPwr Limit Fail!!\n");
- return false;
- }
-
- _rtl8822be_phy_convert_txpower_limit_to_power_index(hw);
-
- return true;
-}
-
-static void _rtl8822be_get_rate_values_of_tx_power_by_rate(
- struct ieee80211_hw *hw, u32 reg_addr, u32 bit_mask, u32 value,
- u8 *rate, s8 *pwr_by_rate_val, u8 *rate_num)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 /*index = 0,*/ i = 0;
-
- switch (reg_addr) {
- case 0xE00: /*rTxAGC_A_Rate18_06:*/
- case 0x830: /*rTxAGC_B_Rate18_06:*/
- rate[0] = DESC_RATE6M;
- rate[1] = DESC_RATE9M;
- rate[2] = DESC_RATE12M;
- rate[3] = DESC_RATE18M;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xE04: /*rTxAGC_A_Rate54_24:*/
- case 0x834: /*rTxAGC_B_Rate54_24:*/
- rate[0] = DESC_RATE24M;
- rate[1] = DESC_RATE36M;
- rate[2] = DESC_RATE48M;
- rate[3] = DESC_RATE54M;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xE08: /*rTxAGC_A_CCK1_Mcs32:*/
- rate[0] = DESC_RATE1M;
- pwr_by_rate_val[0] = (s8)((((value >> (8 + 4)) & 0xF)) * 10 +
- ((value >> 8) & 0xF));
- *rate_num = 1;
- break;
-
- case 0x86C: /*rTxAGC_B_CCK11_A_CCK2_11:*/
- if (bit_mask == 0xffffff00) {
- rate[0] = DESC_RATE2M;
- rate[1] = DESC_RATE5_5M;
- rate[2] = DESC_RATE11M;
- for (i = 1; i < 4; ++i) {
- pwr_by_rate_val[i - 1] = (s8)(
- (((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 3;
- } else if (bit_mask == 0x000000ff) {
- rate[0] = DESC_RATE11M;
- pwr_by_rate_val[0] = (s8)((((value >> 4) & 0xF)) * 10 +
- (value & 0xF));
- *rate_num = 1;
- }
- break;
-
- case 0xE10: /*rTxAGC_A_Mcs03_Mcs00:*/
- case 0x83C: /*rTxAGC_B_Mcs03_Mcs00:*/
- rate[0] = DESC_RATEMCS0;
- rate[1] = DESC_RATEMCS1;
- rate[2] = DESC_RATEMCS2;
- rate[3] = DESC_RATEMCS3;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xE14: /*rTxAGC_A_Mcs07_Mcs04:*/
- case 0x848: /*rTxAGC_B_Mcs07_Mcs04:*/
- rate[0] = DESC_RATEMCS4;
- rate[1] = DESC_RATEMCS5;
- rate[2] = DESC_RATEMCS6;
- rate[3] = DESC_RATEMCS7;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xE18: /*rTxAGC_A_Mcs11_Mcs08:*/
- case 0x84C: /*rTxAGC_B_Mcs11_Mcs08:*/
- rate[0] = DESC_RATEMCS8;
- rate[1] = DESC_RATEMCS9;
- rate[2] = DESC_RATEMCS10;
- rate[3] = DESC_RATEMCS11;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xE1C: /*rTxAGC_A_Mcs15_Mcs12:*/
- case 0x868: /*rTxAGC_B_Mcs15_Mcs12:*/
- rate[0] = DESC_RATEMCS12;
- rate[1] = DESC_RATEMCS13;
- rate[2] = DESC_RATEMCS14;
- rate[3] = DESC_RATEMCS15;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
-
- break;
-
- case 0x838: /*rTxAGC_B_CCK1_55_Mcs32:*/
- rate[0] = DESC_RATE1M;
- rate[1] = DESC_RATE2M;
- rate[2] = DESC_RATE5_5M;
- for (i = 1; i < 4; ++i) {
- pwr_by_rate_val[i - 1] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 3;
- break;
-
- case 0xC20:
- case 0xE20:
- case 0x1820:
- case 0x1a20:
- rate[0] = DESC_RATE1M;
- rate[1] = DESC_RATE2M;
- rate[2] = DESC_RATE5_5M;
- rate[3] = DESC_RATE11M;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC24:
- case 0xE24:
- case 0x1824:
- case 0x1a24:
- rate[0] = DESC_RATE6M;
- rate[1] = DESC_RATE9M;
- rate[2] = DESC_RATE12M;
- rate[3] = DESC_RATE18M;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC28:
- case 0xE28:
- case 0x1828:
- case 0x1a28:
- rate[0] = DESC_RATE24M;
- rate[1] = DESC_RATE36M;
- rate[2] = DESC_RATE48M;
- rate[3] = DESC_RATE54M;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC2C:
- case 0xE2C:
- case 0x182C:
- case 0x1a2C:
- rate[0] = DESC_RATEMCS0;
- rate[1] = DESC_RATEMCS1;
- rate[2] = DESC_RATEMCS2;
- rate[3] = DESC_RATEMCS3;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC30:
- case 0xE30:
- case 0x1830:
- case 0x1a30:
- rate[0] = DESC_RATEMCS4;
- rate[1] = DESC_RATEMCS5;
- rate[2] = DESC_RATEMCS6;
- rate[3] = DESC_RATEMCS7;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC34:
- case 0xE34:
- case 0x1834:
- case 0x1a34:
- rate[0] = DESC_RATEMCS8;
- rate[1] = DESC_RATEMCS9;
- rate[2] = DESC_RATEMCS10;
- rate[3] = DESC_RATEMCS11;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC38:
- case 0xE38:
- case 0x1838:
- case 0x1a38:
- rate[0] = DESC_RATEMCS12;
- rate[1] = DESC_RATEMCS13;
- rate[2] = DESC_RATEMCS14;
- rate[3] = DESC_RATEMCS15;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC3C:
- case 0xE3C:
- case 0x183C:
- case 0x1a3C:
- rate[0] = DESC_RATEVHT1SS_MCS0;
- rate[1] = DESC_RATEVHT1SS_MCS1;
- rate[2] = DESC_RATEVHT1SS_MCS2;
- rate[3] = DESC_RATEVHT1SS_MCS3;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC40:
- case 0xE40:
- case 0x1840:
- case 0x1a40:
- rate[0] = DESC_RATEVHT1SS_MCS4;
- rate[1] = DESC_RATEVHT1SS_MCS5;
- rate[2] = DESC_RATEVHT1SS_MCS6;
- rate[3] = DESC_RATEVHT1SS_MCS7;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC44:
- case 0xE44:
- case 0x1844:
- case 0x1a44:
- rate[0] = DESC_RATEVHT1SS_MCS8;
- rate[1] = DESC_RATEVHT1SS_MCS9;
- rate[2] = DESC_RATEVHT2SS_MCS0;
- rate[3] = DESC_RATEVHT2SS_MCS1;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC48:
- case 0xE48:
- case 0x1848:
- case 0x1a48:
- rate[0] = DESC_RATEVHT2SS_MCS2;
- rate[1] = DESC_RATEVHT2SS_MCS3;
- rate[2] = DESC_RATEVHT2SS_MCS4;
- rate[3] = DESC_RATEVHT2SS_MCS5;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- case 0xC4C:
- case 0xE4C:
- case 0x184C:
- case 0x1a4C:
- rate[0] = DESC_RATEVHT2SS_MCS6;
- rate[1] = DESC_RATEVHT2SS_MCS7;
- rate[2] = DESC_RATEVHT2SS_MCS8;
- rate[3] = DESC_RATEVHT2SS_MCS9;
- for (i = 0; i < 4; ++i) {
- pwr_by_rate_val[i] =
- (s8)((((value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((value >> (i * 8)) & 0xF));
- }
- *rate_num = 4;
- break;
-
- default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Invalid reg_addr 0x%x in %s()\n", reg_addr, __func__);
- break;
- }
-}
-
-void rtl8822be_store_tx_power_by_rate(struct ieee80211_hw *hw, u32 band,
- u32 rfpath, u32 txnum, u32 regaddr,
- u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 i = 0, rates[4] = {0}, rate_num = 0;
- s8 pwr_by_rate_val[4] = {0};
-
- _rtl8822be_get_rate_values_of_tx_power_by_rate(
- hw, regaddr, bitmask, data, rates, pwr_by_rate_val, &rate_num);
-
- if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n",
- band);
- band = BAND_ON_2_4G;
- }
- if (rfpath >= MAX_RF_PATH) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n",
- rfpath);
- rfpath = MAX_RF_PATH - 1;
- }
- if (txnum >= MAX_RF_PATH) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n",
- txnum);
- txnum = MAX_RF_PATH - 1;
- }
-
- for (i = 0; i < rate_num; ++i) {
- u8 rate_idx = rates[i];
-
- if (IS_1T_RATE(rates[i]))
- txnum = RF_1TX;
- else if (IS_2T_RATE(rates[i]))
- txnum = RF_2TX;
- else
- WARN_ON(1);
-
- rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_idx] =
- pwr_by_rate_val[i];
-
- RT_TRACE(
- rtlpriv, COMP_INIT, DBG_LOUD,
- "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][rate_idx %d] = 0x%x\n",
- band, rfpath, txnum, rate_idx,
- rtlphy->tx_power_by_rate_offset[band][rfpath][txnum]
- [rate_idx]);
- }
-}
-
-static void
-_rtl8822be_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = RA_LSSIWRITE_8822B;
- rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = RB_LSSIWRITE_8822B;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RHSSIREAD_8822BE;
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RHSSIREAD_8822BE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RA_SIREAD_8822B;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RB_SIREAD_8822B;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = RA_PIREAD_8822B;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = RB_PIREAD_8822B;
-}
-
-void rtl8822be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 txpwr_level;
- long txpwr_dbm;
-
- txpwr_level = rtlphy->cur_cck_txpwridx;
- txpwr_dbm = _rtl8822be_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B,
- txpwr_level);
- txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
- if (_rtl8822be_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) >
- txpwr_dbm)
- txpwr_dbm = _rtl8822be_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
- txpwr_level);
- txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
- if (_rtl8822be_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
- txpwr_level) > txpwr_dbm)
- txpwr_dbm = _rtl8822be_phy_txpwr_idx_to_dbm(
- hw, WIRELESS_MODE_N_24G, txpwr_level);
- *powerlevel = txpwr_dbm;
-}
-
-static bool _rtl8822be_phy_get_chnl_index(u8 channel, u8 *chnl_index)
-{
- u8 rtl_channel5g[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
- 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
- 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
- 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
- 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
- 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
- 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
- u8 i = 0;
- bool in_24g = true;
-
- if (channel <= 14) {
- in_24g = true;
- *chnl_index = channel - 1;
- } else {
- in_24g = false;
-
- for (i = 0; i < CHANNEL_MAX_NUMBER_5G; ++i) {
- if (rtl_channel5g[i] == channel) {
- *chnl_index = i;
- return in_24g;
- }
- }
- }
- return in_24g;
-}
-
-static char _rtl8822be_phy_get_world_wide_limit(char *limit_table)
-{
- char min = limit_table[0];
- u8 i = 0;
-
- for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- if (limit_table[i] < min)
- min = limit_table[i];
- }
- return min;
-}
-
-static char _rtl8822be_phy_get_txpower_limit(struct ieee80211_hw *hw, u8 band,
- enum ht_channel_width bandwidth,
- enum radio_path rf_path, u8 rate,
- u8 channel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- short regulation = -1, rate_section = -1, channel_index = -1;
- char power_limit = MAX_POWER_INDEX;
-
- if (rtlefuse->eeprom_regulatory == 2)
- return MAX_POWER_INDEX;
-
- regulation = TXPWR_LMT_WW;
-
- switch (rate) {
- case DESC_RATE1M:
- case DESC_RATE2M:
- case DESC_RATE5_5M:
- case DESC_RATE11M:
- rate_section = CCK;
- break;
-
- case DESC_RATE6M:
- case DESC_RATE9M:
- case DESC_RATE12M:
- case DESC_RATE18M:
- case DESC_RATE24M:
- case DESC_RATE36M:
- case DESC_RATE48M:
- case DESC_RATE54M:
- rate_section = OFDM;
- break;
-
- case DESC_RATEMCS0:
- case DESC_RATEMCS1:
- case DESC_RATEMCS2:
- case DESC_RATEMCS3:
- case DESC_RATEMCS4:
- case DESC_RATEMCS5:
- case DESC_RATEMCS6:
- case DESC_RATEMCS7:
- rate_section = HT_MCS0_MCS7;
- break;
-
- case DESC_RATEMCS8:
- case DESC_RATEMCS9:
- case DESC_RATEMCS10:
- case DESC_RATEMCS11:
- case DESC_RATEMCS12:
- case DESC_RATEMCS13:
- case DESC_RATEMCS14:
- case DESC_RATEMCS15:
- rate_section = HT_MCS8_MCS15;
- break;
-
- case DESC_RATEVHT1SS_MCS0:
- case DESC_RATEVHT1SS_MCS1:
- case DESC_RATEVHT1SS_MCS2:
- case DESC_RATEVHT1SS_MCS3:
- case DESC_RATEVHT1SS_MCS4:
- case DESC_RATEVHT1SS_MCS5:
- case DESC_RATEVHT1SS_MCS6:
- case DESC_RATEVHT1SS_MCS7:
- case DESC_RATEVHT1SS_MCS8:
- case DESC_RATEVHT1SS_MCS9:
- rate_section = VHT_1SSMCS0_1SSMCS9;
- break;
-
- case DESC_RATEVHT2SS_MCS0:
- case DESC_RATEVHT2SS_MCS1:
- case DESC_RATEVHT2SS_MCS2:
- case DESC_RATEVHT2SS_MCS3:
- case DESC_RATEVHT2SS_MCS4:
- case DESC_RATEVHT2SS_MCS5:
- case DESC_RATEVHT2SS_MCS6:
- case DESC_RATEVHT2SS_MCS7:
- case DESC_RATEVHT2SS_MCS8:
- case DESC_RATEVHT2SS_MCS9:
- rate_section = VHT_2SSMCS0_2SSMCS9;
- break;
-
- default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Wrong rate 0x%x\n",
- rate);
- break;
- }
-
- if (band == BAND_ON_5G && rate_section == 0)
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Wrong rate 0x%x: No CCK in 5G Band\n", rate);
-
- /* workaround for wrong index combination to obtain tx power limit,
- * OFDM only exists in BW 20M
- */
- if (rate_section == 1)
- bandwidth = 0;
-
- /* workaround for wrong index combination to obtain tx power limit,
- * CCK table will only be given in BW 20M
- */
- if (rate_section == 0)
- bandwidth = 0;
-
- /* workaround for wrong indxe combination to obtain tx power limit,
- * HT on 80M will reference to HT on 40M
- */
- if ((rate_section == 2 || rate_section == 3) && band == BAND_ON_5G &&
- bandwidth == 2)
- bandwidth = 1;
-
- if (band == BAND_ON_2_4G)
- channel_index = _rtl8822be_phy_get_chnl_idx_of_txpwr_lmt(
- hw, BAND_ON_2_4G, channel);
- else if (band == BAND_ON_5G)
- channel_index = _rtl8822be_phy_get_chnl_idx_of_txpwr_lmt(
- hw, BAND_ON_5G, channel);
- else if (band == BAND_ON_BOTH)
- ; /* BAND_ON_BOTH don't care temporarily */
-
- if (band >= BANDMAX || regulation == -1 || bandwidth == -1 ||
- rate_section == -1 || channel_index == -1) {
- RT_TRACE(
- rtlpriv, COMP_POWER, DBG_LOUD,
- "Wrong index value to access power limit table [band %d][regulation %d][bandwidth %d][rf_path %d][rate_section %d][chnl %d]\n",
- band, regulation, bandwidth, rf_path, rate_section,
- channel_index);
- return MAX_POWER_INDEX;
- }
-
- if (band == BAND_ON_2_4G) {
- char limits[10] = {0};
- u8 i = 0;
-
- for (i = 0; i < 4; ++i)
- limits[i] = rtlphy->txpwr_limit_2_4g[i][bandwidth]
- [rate_section]
- [channel_index]
- [rf_path];
-
- power_limit =
- (regulation == TXPWR_LMT_WW) ?
- _rtl8822be_phy_get_world_wide_limit(limits) :
- rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
- [rate_section]
- [channel_index]
- [rf_path];
-
- } else if (band == BAND_ON_5G) {
- char limits[10] = {0};
- u8 i = 0;
-
- for (i = 0; i < MAX_REGULATION_NUM; ++i)
- limits[i] =
- rtlphy->txpwr_limit_5g[i][bandwidth]
- [rate_section]
- [channel_index][rf_path];
-
- power_limit =
- (regulation == TXPWR_LMT_WW) ?
- _rtl8822be_phy_get_world_wide_limit(limits) :
- rtlphy->txpwr_limit_5g[regulation]
- [channel_index]
- [rate_section]
- [channel_index][rf_path];
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "No power limit table of the specified band\n");
- }
-
- return power_limit;
-}
-
-static char
-_rtl8822be_phy_get_txpower_by_rate(struct ieee80211_hw *hw, u8 band, u8 path,
- u8 rate /* enum rtl_desc8822b_rate */)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 tx_num;
- char tx_pwr_diff = 0;
-
- if (band != BAND_ON_2_4G && band != BAND_ON_5G)
- return tx_pwr_diff;
-
- if (path > RF90_PATH_B)
- return tx_pwr_diff;
-
- if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9))
- tx_num = RF_2TX;
- else
- tx_num = RF_1TX;
-
- tx_pwr_diff = (char)(rtlphy->tx_power_by_rate_offset[band][path][tx_num]
- [rate] &
- 0xff);
-
- return tx_pwr_diff;
-}
-
-u8 rtl8822be_get_txpower_index(struct ieee80211_hw *hw, u8 path, u8 rate,
- u8 bandwidth, u8 channel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 index = (channel - 1);
- u8 txpower = 0;
- bool in_24g = false;
- char limit;
- char powerdiff_byrate = 0;
-
- if ((rtlhal->current_bandtype == BAND_ON_2_4G &&
- (channel > 14 || channel < 1)) ||
- (rtlhal->current_bandtype == BAND_ON_5G && channel <= 14)) {
- index = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Illegal channel!!\n");
- }
-
- /* 1. base tx power */
- in_24g = _rtl8822be_phy_get_chnl_index(channel, &index);
- if (in_24g) {
- if (RX_HAL_IS_CCK_RATE(rate))
- txpower = rtlefuse->txpwrlevel_cck[path][index];
- else if (rate >= DESC_RATE6M)
- txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
- else
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "invalid rate\n");
-
- if (rate >= DESC_RATE6M && rate <= DESC_RATE54M &&
- !RX_HAL_IS_CCK_RATE(rate))
- txpower += rtlefuse->txpwr_legacyhtdiff[path][TX_1S];
-
- if (bandwidth == HT_CHANNEL_WIDTH_20) {
- if ((rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower +=
- rtlefuse->txpwr_ht20diff[path][TX_1S];
- if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT2SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower +=
- rtlefuse->txpwr_ht20diff[path][TX_2S];
- } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
- if ((rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower +=
- rtlefuse->txpwr_ht40diff[path][TX_1S];
- if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT2SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower +=
- rtlefuse->txpwr_ht40diff[path][TX_2S];
- } else if (bandwidth == HT_CHANNEL_WIDTH_80) {
- if ((rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower +=
- rtlefuse->txpwr_ht40diff[path][TX_1S];
- if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT2SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower +=
- rtlefuse->txpwr_ht40diff[path][TX_2S];
- }
-
- } else {
- if (rate >= DESC_RATE6M)
- txpower = rtlefuse->txpwr_5g_bw40base[path][index];
- else
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_WARNING,
- "INVALID Rate.\n");
-
- if (rate >= DESC_RATE6M && rate <= DESC_RATE54M &&
- !RX_HAL_IS_CCK_RATE(rate))
- txpower += rtlefuse->txpwr_5g_ofdmdiff[path][TX_1S];
-
- if (bandwidth == HT_CHANNEL_WIDTH_20) {
- if ((rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower += rtlefuse->txpwr_5g_bw20diff[path]
- [TX_1S];
- if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT2SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower += rtlefuse->txpwr_5g_bw20diff[path]
- [TX_2S];
- } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
- if ((rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower += rtlefuse->txpwr_5g_bw40diff[path]
- [TX_1S];
- if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT2SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower += rtlefuse->txpwr_5g_bw40diff[path]
- [TX_2S];
- } else if (bandwidth == HT_CHANNEL_WIDTH_80) {
- u8 i = 0;
-
- for (i = 0; i < sizeof(rtl_channel5g_80m) / sizeof(u8);
- ++i)
- if (rtl_channel5g_80m[i] == channel)
- index = i;
-
- txpower = rtlefuse->txpwr_5g_bw80base[path][index];
-
- if ((rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower += rtlefuse->txpwr_5g_bw80diff[path]
- [TX_1S];
- if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
- (rate >= DESC_RATEVHT2SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9))
- txpower += rtlefuse->txpwr_5g_bw80diff[path]
- [TX_2S];
- }
- }
-
- /* 2. tx power by rate */
- if (rtlefuse->eeprom_regulatory != 2)
- powerdiff_byrate = _rtl8822be_phy_get_txpower_by_rate(
- hw, (u8)(!in_24g), path, rate);
-
- /* 3. tx power limit */
- if (rtlefuse->eeprom_regulatory == 1)
- limit = _rtl8822be_phy_get_txpower_limit(
- hw, (u8)(!in_24g), bandwidth, path, rate,
- channel);
- else
- limit = MAX_POWER_INDEX;
-
- /* ----- */
- powerdiff_byrate = powerdiff_byrate > limit ? limit : powerdiff_byrate;
-
- txpower += powerdiff_byrate;
-
- if (txpower > MAX_POWER_INDEX)
- txpower = MAX_POWER_INDEX;
-
- return txpower;
-}
-
-static void _rtl8822be_phy_set_txpower_index(struct ieee80211_hw *hw,
- u8 power_index, u8 path, u8 rate)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 shift = 0;
- static u32 index;
-
- /*
- * For 8822B, phydm api use 4 bytes txagc value driver must
- * combine every four 1 byte to one 4 byte and send to phydm
- */
- shift = rate & 0x03;
- index |= ((u32)power_index << (shift * 8));
-
- if (shift == 3) {
- rate = rate - 3;
-
- if (!rtlpriv->phydm.ops->phydm_write_txagc(rtlpriv, index, path,
- rate)) {
- RT_TRACE(rtlpriv, COMP_TXAGC, DBG_LOUD,
- "%s(index:%d, rfpath:%d, rate:0x%02x) fail\n",
- __func__, index, path, rate);
-
- WARN_ON(1);
- }
- index = 0;
- }
-}
-
-static void _rtl8822be_phy_set_txpower_level_by_path(struct ieee80211_hw *hw,
- u8 *array, u8 path,
- u8 channel, u8 size)
-{
- struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
- u8 i;
- u8 power_index;
-
- for (i = 0; i < size; i++) {
- power_index = rtl8822be_get_txpower_index(
- hw, path, array[i], rtlphy->current_chan_bw, channel);
- _rtl8822be_phy_set_txpower_index(hw, power_index, path,
- array[i]);
- }
-}
-
-void rtl8822be_phy_set_txpower_level_by_path(struct ieee80211_hw *hw,
- u8 channel, u8 path)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- /*
- * Below order is *VERY* important!
- * Because _rtl8822be_phy_set_txpower_index() do actually writing
- * every four power values.
- */
- if (rtlhal->current_bandtype == BAND_ON_2_4G)
- _rtl8822be_phy_set_txpower_level_by_path(
- hw, cck_rates, path, channel, sizes_of_cck_retes);
- _rtl8822be_phy_set_txpower_level_by_path(hw, ofdm_rates, path, channel,
- sizes_of_ofdm_retes);
- _rtl8822be_phy_set_txpower_level_by_path(hw, ht_rates_1t, path, channel,
- sizes_of_ht_retes_1t);
- _rtl8822be_phy_set_txpower_level_by_path(hw, ht_rates_2t, path, channel,
- sizes_of_ht_retes_2t);
- _rtl8822be_phy_set_txpower_level_by_path(hw, vht_rates_1t, path,
- channel, sizes_of_vht_retes);
- _rtl8822be_phy_set_txpower_level_by_path(hw, vht_rates_2t, path,
- channel, sizes_of_vht_retes);
-}
-
-void rtl8822be_phy_set_tx_power_index_by_rs(struct ieee80211_hw *hw, u8 channel,
- u8 path, enum rate_section rs)
-{
- struct {
- u8 *array;
- u8 size;
- } rs_ref[MAX_RATE_SECTION] = {
- {cck_rates, sizes_of_cck_retes},
- {ofdm_rates, sizes_of_ofdm_retes},
- {ht_rates_1t, sizes_of_ht_retes_1t},
- {ht_rates_2t, sizes_of_ht_retes_2t},
- {vht_rates_1t, sizes_of_vht_retes},
- {vht_rates_2t, sizes_of_vht_retes},
- };
-
- if (rs >= MAX_RATE_SECTION)
- return;
-
- _rtl8822be_phy_set_txpower_level_by_path(hw, rs_ref[rs].array, path,
- channel, rs_ref[rs].size);
-}
-
-void rtl8822be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 path = 0;
-
- for (path = RF90_PATH_A; path < rtlphy->num_total_rfpath; ++path)
- rtl8822be_phy_set_txpower_level_by_path(hw, channel, path);
-}
-
-static long _rtl8822be_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode,
- u8 txpwridx)
-{
- long offset;
- long pwrout_dbm;
-
- switch (wirelessmode) {
- case WIRELESS_MODE_B:
- offset = -7;
- break;
- case WIRELESS_MODE_G:
- case WIRELESS_MODE_N_24G:
- offset = -8;
- break;
- default:
- offset = -8;
- break;
- }
- pwrout_dbm = txpwridx / 2 + offset;
- return pwrout_dbm;
-}
-
-void rtl8822be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP_BAND0:
- iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_BACKUP_BAND1:
- iotype = IO_CMD_PAUSE_BAND1_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- pr_err("Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
-static u8 _rtl8822be_phy_get_pri_ch_id(struct rtl_priv *rtlpriv)
-{
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- u8 pri_ch_idx = 0;
-
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
- /* primary channel is at lower subband of 80MHz & 40MHz */
- if (mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER &&
- mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER) {
- pri_ch_idx = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
- /* primary channel is at
- * lower subband of 80MHz & upper subband of 40MHz
- */
- } else if ((mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER) &&
- (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER)) {
- pri_ch_idx = VHT_DATA_SC_20_LOWER_OF_80MHZ;
- /* primary channel is at
- * upper subband of 80MHz & lower subband of 40MHz
- */
- } else if ((mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER) &&
- (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER)) {
- pri_ch_idx = VHT_DATA_SC_20_UPPER_OF_80MHZ;
- /* primary channel is at
- * upper subband of 80MHz & upper subband of 40MHz
- */
- } else if ((mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER) &&
- (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER)) {
- pri_ch_idx = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
- } else {
- if (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)
- pri_ch_idx = VHT_DATA_SC_40_LOWER_OF_80MHZ;
- else if (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER)
- pri_ch_idx = VHT_DATA_SC_40_UPPER_OF_80MHZ;
- }
- } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- /* primary channel is at upper subband of 40MHz */
- if (mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER)
- pri_ch_idx = VHT_DATA_SC_20_UPPER_OF_80MHZ;
- /* primary channel is at lower subband of 40MHz */
- else if (mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)
- pri_ch_idx = VHT_DATA_SC_20_LOWER_OF_80MHZ;
- else
- ;
- }
-
- return pri_ch_idx;
-}
-
-void rtl8822be_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u8 tmp_bw = rtlphy->current_chan_bw;
-
- if (rtlphy->set_bwmode_inprogress)
- return;
- rtlphy->set_bwmode_inprogress = true;
- if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
- /* get primary channel index */
- u8 pri_ch_idx = _rtl8822be_phy_get_pri_ch_id(rtlpriv);
-
- /* 3.1 set MAC register */
- rtlpriv->halmac.ops->halmac_set_bandwidth(
- rtlpriv, rtlphy->current_channel, pri_ch_idx,
- rtlphy->current_chan_bw);
-
- /* 3.2 set BB/RF registet */
- rtlpriv->phydm.ops->phydm_switch_bandwidth(
- rtlpriv, pri_ch_idx, rtlphy->current_chan_bw);
-
- if (!mac->act_scanning)
- rtlpriv->phydm.ops->phydm_iq_calibrate(rtlpriv);
-
- rtlphy->set_bwmode_inprogress = false;
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "FALSE driver sleep or unload\n");
- rtlphy->set_bwmode_inprogress = false;
- rtlphy->current_chan_bw = tmp_bw;
- }
-}
-
-u8 rtl8822be_phy_sw_chnl(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u32 timeout = 1000, timecount = 0;
- u8 channel = rtlphy->current_channel;
-
- if (rtlphy->sw_chnl_inprogress)
- return 0;
- if (rtlphy->set_bwmode_inprogress)
- return 0;
-
- if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
- return 0;
- }
- while (rtlphy->lck_inprogress && timecount < timeout) {
- mdelay(50);
- timecount += 50;
- }
-
- if (rtlphy->current_channel > 14)
- rtlhal->current_bandtype = BAND_ON_5G;
- else if (rtlphy->current_channel <= 14)
- rtlhal->current_bandtype = BAND_ON_2_4G;
-
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_switch_band_notify(
- rtlpriv, rtlhal->current_bandtype, mac->act_scanning);
- else
- rtlpriv->btcoexist.btc_ops->btc_switch_band_notify_wifi_only(
- rtlpriv, rtlhal->current_bandtype, mac->act_scanning);
-
- rtlpriv->phydm.ops->phydm_switch_band(rtlpriv, rtlphy->current_channel);
-
- rtlphy->sw_chnl_inprogress = true;
- if (channel == 0)
- channel = 1;
-
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d, band type is %d\n",
- rtlphy->current_channel, rtlhal->current_bandtype);
-
- rtlpriv->phydm.ops->phydm_switch_channel(rtlpriv,
- rtlphy->current_channel);
-
- rtlpriv->phydm.ops->phydm_clear_txpowertracking_state(rtlpriv);
-
- rtl8822be_phy_set_txpower_level(hw, rtlphy->current_channel);
-
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
- rtlphy->sw_chnl_inprogress = false;
- return 1;
-}
-
-bool rtl8822be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- bool postprocessing = false;
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n", iotype,
- rtlphy->set_io_inprogress);
- do {
- switch (iotype) {
- case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
- postprocessing = true;
- break;
- case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
- postprocessing = true;
- break;
- default:
- pr_err("switch case not process\n");
- break;
- }
- } while (false);
- if (postprocessing && !rtlphy->set_io_inprogress) {
- rtlphy->set_io_inprogress = true;
- rtlphy->current_io_type = iotype;
- } else {
- return false;
- }
- rtl8822be_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
- return true;
-}
-
-static void rtl8822be_phy_set_io(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
- switch (rtlphy->current_io_type) {
- case IO_CMD_RESUME_DM_BY_SCAN:
- break;
- case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- break;
- case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
- break;
- default:
- pr_err("switch case not process\n");
- break;
- }
- rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "(%#x)\n",
- rtlphy->current_io_type);
-}
-
-static void rtl8822be_phy_set_rf_on(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_byte(rtlpriv, REG_SPS0_CTRL_8822B, 0x2b);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN_8822B, 0xE3);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN_8822B, 0xE2);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN_8822B, 0xE3);
- rtl_write_byte(rtlpriv, REG_TXPAUSE_8822B, 0x00);
-}
-
-static bool _rtl8822be_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool bresult = true;
- u8 i, queue_id;
- struct rtl8192_tx_ring *ring = NULL;
-
- switch (rfpwr_state) {
- case ERFON:
- if (ppsc->rfpwr_state == ERFOFF &&
- RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
- bool rtstatus = false;
- u32 initialize_count = 0;
-
- do {
- initialize_count++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
- rtstatus = rtl_ps_enable_nic(hw);
- } while ((!rtstatus) && (initialize_count < 10));
- RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
- } else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON slept:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies));
- ppsc->last_awake_jiffies = jiffies;
- rtl8822be_phy_set_rf_on(hw);
- }
- if (mac->link_state == MAC80211_LINKED)
- rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK);
- else
- rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
- break;
- case ERFOFF:
- for (queue_id = 0, i = 0;
- queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
- ring = &pcipriv->dev.tx_ring[queue_id];
- if (queue_id == BEACON_QUEUE ||
- skb_queue_len(&ring->queue) == 0) {
- queue_id++;
- continue;
- } else {
- RT_TRACE(
- rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
-
- udelay(10);
- i++;
- }
- if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(
- rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x, queue_id,
- skb_queue_len(&ring->queue));
- break;
- }
- }
-
- if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
- rtl_ps_disable_nic(hw);
- RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
- } else {
- if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
- rtlpriv->cfg->ops->led_control(hw,
- LED_CTL_NO_LINK);
- } else {
- rtlpriv->cfg->ops->led_control(
- hw, LED_CTL_POWER_OFF);
- }
- }
- break;
- default:
- pr_err("switch case not process\n");
- bresult = false;
- break;
- }
- if (bresult)
- ppsc->rfpwr_state = rfpwr_state;
- return bresult;
-}
-
-bool rtl8822be_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state)
-{
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- bool bresult = false;
-
- if (rfpwr_state == ppsc->rfpwr_state)
- return bresult;
- bresult = _rtl8822be_phy_set_rf_power_state(hw, rfpwr_state);
- return bresult;
-}
diff --git a/drivers/staging/rtlwifi/rtl8822be/phy.h b/drivers/staging/rtlwifi/rtl8822be/phy.h
deleted file mode 100644
index f33b086a0167..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/phy.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL8822BE_PHY_H__
-#define __RTL8822BE_PHY_H__
-
-/* It must always set to 4, otherwise read
- * efuse table sequence will be wrong.
- */
-#define MAX_TX_COUNT 4
-#define TX_1S 0
-#define TX_2S 1
-#define TX_3S 2
-#define TX_4S 3
-
-#define MAX_POWER_INDEX 0x3F
-
-#define MAX_PRECMD_CNT 16
-#define MAX_RFDEPENDCMD_CNT 16
-#define MAX_POSTCMD_CNT 16
-
-#define MAX_DOZE_WAITING_TIMES_9x 64
-
-#define RT_CANNOT_IO(hw) false
-#define HIGHPOWER_RADIOA_ARRAYLEN 22
-
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 10
-#define index_mapping_NUM 15
-
-#define APK_BB_REG_NUM 5
-#define APK_AFE_REG_NUM 16
-#define APK_CURVE_REG_NUM 4
-#define PATH_NUM 2
-
-#define LOOP_LIMIT 5
-#define MAX_STALL_TIME 50
-#define ANTENNA_DIVERSITY_VALUE 0x80
-#define MAX_TXPWR_IDX_NMODE_92S 63
-#define RESET_CNT_LIMIT 3
-
-#define IQK_ADDA_REG_NUM 16
-#define IQK_MAC_REG_NUM 4
-
-#define RF6052_MAX_PATH 2
-
-#define CT_OFFSET_MAC_ADDR 0X16
-
-#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
-#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
-#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66
-#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
-#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
-
-#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
-#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
-
-#define CT_OFFSET_CHANNEL_PLAH 0x75
-#define CT_OFFSET_THERMAL_METER 0x78
-#define CT_OFFSET_RF_OPTION 0x79
-#define CT_OFFSET_VERSION 0x7E
-#define CT_OFFSET_CUSTOMER_ID 0x7F
-
-#define RTL8822BE_MAX_PATH_NUM 2
-
-#define TARGET_CHNL_NUM_2G_5G_8822B 59
-
-u32 rtl8822be_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
- u32 bitmask);
-void rtl8822be_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
- u32 data);
-u32 rtl8822be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask);
-void rtl8822be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data);
-bool rtl8822be_phy_bb_config(struct ieee80211_hw *hw);
-bool rtl8822be_phy_rf_config(struct ieee80211_hw *hw);
-bool rtl8822be_halmac_cb_init_mac_register(struct rtl_priv *rtlpriv);
-bool rtl8822be_halmac_cb_init_bb_rf_register(struct rtl_priv *rtlpriv);
-void rtl8822be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
-void rtl8822be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-void rtl8822be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
-void rtl8822be_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-void rtl8822be_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-u8 rtl8822be_phy_sw_chnl(struct ieee80211_hw *hw);
-void rtl8822be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl8822be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl8822be_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
-void rtl8822be_phy_lc_calibrate(struct ieee80211_hw *hw);
-void rtl8822be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
-bool rtl8822be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
-bool rtl8822be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
-bool rtl8822be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-bool rtl8822be_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
-void rtl8822be_phy_set_txpower_level_by_path(struct ieee80211_hw *hw,
- u8 channel, u8 path);
-void rtl8822be_do_iqk(struct ieee80211_hw *hw, u8 delta_thermal_index,
- u8 thermal_value, u8 threshold);
-void rtl8822be_do_iqk(struct ieee80211_hw *hw, u8 delta_thermal_index,
- u8 thermal_value, u8 threshold);
-void rtl8822be_reset_iqk_result(struct ieee80211_hw *hw);
-
-u8 rtl8822be_get_txpower_index(struct ieee80211_hw *hw, u8 path, u8 rate,
- u8 bandwidth, u8 channel);
-void rtl8822be_phy_set_tx_power_index_by_rs(struct ieee80211_hw *hw, u8 channel,
- u8 path, enum rate_section rs);
-void rtl8822be_store_tx_power_by_rate(struct ieee80211_hw *hw, u32 band,
- u32 rfpath, u32 txnum, u32 regaddr,
- u32 bitmask, u32 data);
-void rtl8822be_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregulation,
- u8 *pband, u8 *pbandwidth,
- u8 *prate_section, u8 *prf_path,
- u8 *pchannel, u8 *ppower_limit);
-bool rtl8822be_load_txpower_by_rate(struct ieee80211_hw *hw);
-bool rtl8822be_load_txpower_limit(struct ieee80211_hw *hw);
-
-#endif
diff --git a/drivers/staging/rtlwifi/rtl8822be/reg.h b/drivers/staging/rtlwifi/rtl8822be/reg.h
deleted file mode 100644
index 8f0ec5b18c33..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/reg.h
+++ /dev/null
@@ -1,1642 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL8822B_REG_H__
-#define __RTL8822B_REG_H__
-
-#include "../halmac/halmac_reg_8822b.h"
-#include "../halmac/halmac_bit_8822b.h"
-
-#define TXPKT_BUF_SELECT 0x69
-#define RXPKT_BUF_SELECT 0xA5
-#define DISABLE_TRXPKT_BUF_ACCESS 0x0
-
-/* Page 0 */
-#define REG_LEDCFG2_8822B 0x004E /* need review */
-#define REG_SPS0_CTRL_8822B 0x0011 /* need review: swlps */
-
-#define REG_EFUSE_ACCESS_8822B (REG_PMC_DBG_CTRL2_8822B + 3) /*0x00CF*/
-#define REG_AFE_XTAL_CTRL_8822B REG_AFE_CTRL1_8822B
-#define REG_AFE_PLL_CTRL_8822B REG_AFE_CTRL2_8822B
-
-/* Page 1 */
-
-#define MSR (REG_CR_8822B + 2)
-
-/* for MSR 0x102 */
-#define MSR_NOLINK 0x00
-#define MSR_ADHOC 0x01
-#define MSR_INFRA 0x02
-#define MSR_AP 0x03
-
-/*-----------------------------------------------------
- *
- * 0x0200h ~ 0x027Fh TXDMA Configuration
- *
- *-----------------------------------------------------
- */
-
-/*-----------------------------------------------------
- *
- * 0x0280h ~ 0x02FFh RXDMA Configuration
- *
- *-----------------------------------------------------
- */
-#define REG_RXDMA_CONTROL_8822B (REG_RXPKT_NUM_8822B + 2) /* 0x0286 */
-
-/*-----------------------------------------------------
- *
- * 0x0300h ~ 0x03FFh PCIe
- *
- *-----------------------------------------------------
- */
-
-/* REG_HIMR3_8822B */
-#define IMR_H2CDOK BIT_SETH2CDOK_MASK_8822B
-
-/* spec version 11
- *-----------------------------------------------------
- *
- * 0x0400h ~ 0x047Fh Protocol Configuration
- *
- *-----------------------------------------------------
- */
-
-#define REG_MAX_AGGR_NUM_8822B (REG_PROT_MODE_CTRL_8822B + 2) /*0x04CA*/
-
-/* for RRSR 0x440 */
-#define RRSR_RSC_OFFSET 21
-#define RRSR_SHORT_OFFSET 23
-#define RRSR_RSC_BW_40M 0x600000
-#define RRSR_RSC_UPSUBCHNL 0x400000
-#define RRSR_RSC_LOWSUBCHNL 0x200000
-#define RRSR_1M BIT(0)
-#define RRSR_2M BIT(1)
-#define RRSR_5_5M BIT(2)
-#define RRSR_11M BIT(3)
-#define RRSR_6M BIT(4)
-#define RRSR_9M BIT(5)
-#define RRSR_12M BIT(6)
-#define RRSR_18M BIT(7)
-#define RRSR_24M BIT(8)
-#define RRSR_36M BIT(9)
-#define RRSR_48M BIT(10)
-#define RRSR_54M BIT(11)
-#define RRSR_MCS0 BIT(12)
-#define RRSR_MCS1 BIT(13)
-#define RRSR_MCS2 BIT(14)
-#define RRSR_MCS3 BIT(15)
-#define RRSR_MCS4 BIT(16)
-#define RRSR_MCS5 BIT(17)
-#define RRSR_MCS6 BIT(18)
-#define RRSR_MCS7 BIT(19)
-
-#define RRSR_ALL_CCK (RRSR_1M | RRSR_2M | RRSR_5_5M | RRSR_11M)
-#define RRSR_ALL_OFDM_AG \
- (RRSR_6M | RRSR_9M | RRSR_12M | RRSR_18M | RRSR_24M | RRSR_36M | \
- RRSR_48M | RRSR_54M)
-
-/*-----------------------------------------------------
- *
- * 0x0500h ~ 0x05FFh EDCA Configuration
- *
- *-----------------------------------------------------
- */
-
-#define REG_SIFS_TRX_8822B (REG_SIFS_8822B + 2) /*0x0516*/
-
-/*-----------------------------------------------------
- *
- * 0x0600h ~ 0x07FFh WMAC Configuration
- *
- *-----------------------------------------------------
- */
-
-#define RATR_1M 0x00000001
-#define RATR_2M 0x00000002
-#define RATR_55M 0x00000004
-#define RATR_11M 0x00000008
-#define RATR_6M 0x00000010
-#define RATR_9M 0x00000020
-#define RATR_12M 0x00000040
-#define RATR_18M 0x00000080
-#define RATR_24M 0x00000100
-#define RATR_36M 0x00000200
-#define RATR_48M 0x00000400
-#define RATR_54M 0x00000800
-#define RATR_MCS0 0x00001000
-#define RATR_MCS1 0x00002000
-#define RATR_MCS2 0x00004000
-#define RATR_MCS3 0x00008000
-#define RATR_MCS4 0x00010000
-#define RATR_MCS5 0x00020000
-#define RATR_MCS6 0x00040000
-#define RATR_MCS7 0x00080000
-#define RATR_MCS8 0x00100000
-#define RATR_MCS9 0x00200000
-#define RATR_MCS10 0x00400000
-#define RATR_MCS11 0x00800000
-#define RATR_MCS12 0x01000000
-#define RATR_MCS13 0x02000000
-#define RATR_MCS14 0x04000000
-#define RATR_MCS15 0x08000000
-
-#define RATE_1M BIT(0)
-#define RATE_2M BIT(1)
-#define RATE_5_5M BIT(2)
-#define RATE_11M BIT(3)
-#define RATE_6M BIT(4)
-#define RATE_9M BIT(5)
-#define RATE_12M BIT(6)
-#define RATE_18M BIT(7)
-#define RATE_24M BIT(8)
-#define RATE_36M BIT(9)
-#define RATE_48M BIT(10)
-#define RATE_54M BIT(11)
-#define RATE_MCS0 BIT(12)
-#define RATE_MCS1 BIT(13)
-#define RATE_MCS2 BIT(14)
-#define RATE_MCS3 BIT(15)
-#define RATE_MCS4 BIT(16)
-#define RATE_MCS5 BIT(17)
-#define RATE_MCS6 BIT(18)
-#define RATE_MCS7 BIT(19)
-#define RATE_MCS8 BIT(20)
-#define RATE_MCS9 BIT(21)
-#define RATE_MCS10 BIT(22)
-#define RATE_MCS11 BIT(23)
-#define RATE_MCS12 BIT(24)
-#define RATE_MCS13 BIT(25)
-#define RATE_MCS14 BIT(26)
-#define RATE_MCS15 BIT(27)
-
-/* CAM definition */
-
-#define CAM_NONE 0x0
-#define CAM_WEP40 0x01
-#define CAM_TKIP 0x02
-#define CAM_AES 0x04
-#define CAM_WEP104 0x05
-
-/*#define TOTAL_CAM_ENTRY 64*/
-/*#define HALF_CAM_ENTRY 32*/
-
-#define CAM_WRITE BIT(16)
-#define CAM_READ 0x00000000
-#define CAM_POLLINIG BIT(31)
-
-/*********************************************
- * 8822BE IMR/ISR bits
- *********************************************
- */
-#define IMR_DISABLED 0x0
-/* IMR DW0(0x0060-0063) Bit 0-31 */
-#define IMR_TIMER2 BIT(31)
-#define IMR_TIMER1 BIT(30)
-#define IMR_PSTIMEOUT BIT(29)
-#define IMR_GTINT4 BIT(28)
-#define IMR_GTINT3 BIT(27)
-#define IMR_TBDER BIT(26)
-#define IMR_TBDOK BIT(25)
-#define IMR_TSF_BIT32_TOGGLE BIT(24)
-#define IMR_BCNDMAINT0 BIT(20)
-#define IMR_BCNDOK0 BIT(16)
-#define IMR_HSISR_IND_ON_INT BIT(15)
-#define IMR_BCNDMAINT_E BIT(14)
-#define IMR_ATIMEND BIT(12)
-#define IMR_HISR1_IND_INT BIT(11)
-#define IMR_C2HCMD BIT(10)
-#define IMR_CPWM2 BIT(9)
-#define IMR_CPWM BIT(8)
-#define IMR_HIGHDOK BIT(7)
-#define IMR_MGNTDOK BIT(6)
-#define IMR_BKDOK BIT(5)
-#define IMR_BEDOK BIT(4)
-#define IMR_VIDOK BIT(3)
-#define IMR_VODOK BIT(2)
-#define IMR_RDU BIT(1)
-#define IMR_ROK BIT(0)
-
-/* IMR DW1(0x00B4-00B7) Bit 0-31 */
-#define IMR_TXFIFO_TH_INT_8822B BIT_TXFIFO_TH_INT_8822B
-#define IMR_BTON_STS_UPDATE_MASK_8822B BIT_BTON_STS_UPDATE_MASK_8822B
-#define IMR_MCUERR BIT(28)
-#define IMR_BCNDMAINT7 BIT(27)
-#define IMR_BCNDMAINT6 BIT(26)
-#define IMR_BCNDMAINT5 BIT(25)
-#define IMR_BCNDMAINT4 BIT(24)
-#define IMR_BCNDMAINT3 BIT(23)
-#define IMR_BCNDMAINT2 BIT(22)
-#define IMR_BCNDMAINT1 BIT(21)
-#define IMR_BCNDOK7 BIT(20)
-#define IMR_BCNDOK6 BIT(19)
-#define IMR_BCNDOK5 BIT(18)
-#define IMR_BCNDOK4 BIT(17)
-#define IMR_BCNDOK3 BIT(16)
-#define IMR_BCNDOK2 BIT(15)
-#define IMR_BCNDOK1 BIT(14)
-#define IMR_ATIMEND_E BIT(13)
-#define IMR_ATIMEND BIT(12)
-#define IMR_TXERR BIT(11)
-#define IMR_RXERR BIT(10)
-#define IMR_TXFOVW BIT(9)
-#define IMR_RXFOVW BIT(8)
-#define IMR_CPU_MGQ_TXDONE_MSK_8822B BIT_CPU_MGQ_TXDONE_MSK_8822B
-#define IMR_PS_TIMER_C_MSK_8822B BIT_PS_TIMER_C_MSK_8822B
-#define IMR_PS_TIMER_B_MSK_8822B BIT_PS_TIMER_B_MSK_8822B
-#define IMR_PS_TIMER_A_MSK_8822B BIT_PS_TIMER_A_MSK_8822B
-#define IMR_CPUMGQ_TX_TIMER_MSK_8822B BIT_CPUMGQ_TX_TIMER_MSK_8822B
-
-/*********************************************
- * 8822BE EFUSE definition
- *********************************************
- */
-#define HWSET_MAX_SIZE 1024
-#define EFUSE_MAX_SECTION 64
-#define EFUSE_REAL_CONTENT_LEN 1024
-#define EFUSE_OOB_PROTECT_BYTES 18
-
-#define EEPROM_DEFAULT_THERMALMETER 0x12
-
-#define RTL8822B_EEPROM_ID 0x8129
-
-#define PPG_BB_GAIN_2G_TXA_OFFSET_8822B 0xEE
-#define PPG_THERMAL_OFFSET_8822B 0xEF
-
-#define EEPROM_TX_PWR_INX_8822B 0x10
-
-#define EEPROM_CHANNEL_PLAN_8822B 0xB8
-#define EEPROM_XTAL_8822B 0xB9
-#define EEPROM_THERMAL_METER_8822B 0xBA
-#define EEPROM_IQK_LCK_8822B 0xBB
-#define EEPROM_2G_5G_PA_TYPE_8822B 0xBC
-/* PATH A & PATH B */
-#define EEPROM_2G_LNA_TYPE_GAIN_SEL_AB_8822B 0xBD
-/* PATH C & PATH D */
-#define EEPROM_2G_LNA_TYPE_GAIN_SEL_CD_8822B 0xBE
-/* PATH A & PATH B */
-#define EEPROM_5G_LNA_TYPE_GAIN_SEL_AB_8822B 0xBF
-/* PATH C & PATH D */
-#define EEPROM_5G_LNA_TYPE_GAIN_SEL_CD_8822B 0xC0
-
-#define EEPROM_RF_BOARD_OPTION_8822B 0xC1
-#define EEPROM_FEATURE_OPTION_8822B 0xC2
-#define EEPROM_RF_BT_SETTING_8822B 0xC3
-#define EEPROM_VERSION_8822B 0xC4
-#define EEPROM_CUSTOM_ID_8822B 0xC5
-#define EEPROM_TX_BBSWING_2G_8822B 0xC6
-#define EEPROM_TX_PWR_CALIBRATE_RATE_8822B 0xC8
-#define EEPROM_RF_ANTENNA_OPT_8822B 0xC9
-#define EEPROM_RFE_OPTION_8822B 0xCA
-#define EEPROM_COUNTRY_CODE_8822B 0xCB
-
-#define EEPROM_VID 0xD6
-#define EEPROM_DID 0xD8
-#define EEPROM_SVID 0xDA
-#define EEPROM_SMID 0xDC
-
-/* RTL8822BU */
-#define EEPROM_MAC_ADDR_8822BU 0x107
-#define EEPROM_VID_8822BU 0x100
-#define EEPROM_PID_8822BU 0x102
-#define EEPROM_USB_OPTIONAL_FUNCTION0_8822BU 0x104
-#define EEPROM_USB_MODE_8822BU 0x06
-
-/* RTL8822BS */
-#define EEPROM_MAC_ADDR_8822BS 0x11A
-
-/* RTL8822BE */
-#define EEPROM_MAC_ADDR_8822BE 0xD0
-
-/* ------------------------- */
-
-#define STOPBECON BIT(6)
-#define STOPHIGHT BIT(5)
-#define STOPMGT BIT(4)
-#define STOPVO BIT(3)
-#define STOPVI BIT(2)
-#define STOPBE BIT(1)
-#define STOPBK BIT(0)
-
-#define RCR_APPFCS BIT(31)
-#define RCR_APP_MIC BIT(30)
-#define RCR_APP_ICV BIT(29)
-#define RCR_APP_PHYST_RXFF BIT(28)
-#define RCR_APP_BA_SSN BIT(27)
-#define RCR_VHT_DACK BIT(26)
-#define RCR_ENMBID BIT(24)
-#define RCR_LSIGEN BIT(23)
-#define RCR_MFBEN BIT(22)
-#define RCR_HTC_LOC_CTRL BIT(14)
-#define RCR_AMF BIT(13)
-#define RCR_ACF BIT(12)
-#define RCR_ADF BIT(11)
-#define RCR_AICV BIT(9)
-#define RCR_ACRC32 BIT(8)
-#define RCR_CBSSID_BCN BIT(7)
-#define RCR_CBSSID_DATA BIT(6)
-#define RCR_CBSSID RCR_CBSSID_DATA
-#define RCR_APWRMGT BIT(5)
-#define RCR_ADD3 BIT(4)
-#define RCR_AB BIT(3)
-#define RCR_AM BIT(2)
-#define RCR_APM BIT(1)
-#define RCR_AAP BIT(0)
-#define RCR_MXDMA_OFFSET 8
-#define RCR_FIFO_OFFSET 13
-
-#define RSV_CTRL 0x001C
-#define RD_CTRL 0x0524
-
-#define REG_USB_INFO_8822B 0xFE17
-#define REG_USB_SPECIAL_OPTION_8822B 0xFE55
-#define REG_USB_DMA_AGG_TO_8822B 0xFE5B
-#define REG_USB_AGG_TO_8822B 0xFE5C
-#define REG_USB_AGG_TH_8822B 0xFE5D
-
-#define REG_USB_VID_8822B 0xFE60
-#define REG_USB_PID_8822B 0xFE62
-#define REG_USB_OPTIONAL_8822B 0xFE64
-#define REG_USB_CHIRP_K_8822B 0xFE65
-#define REG_USB_PHY_8822B 0xFE66
-#define REG_USB_MAC_ADDR_8822B 0xFE70
-#define REG_USB_HRPWM_8822B 0xFE58
-#define REG_USB_HCPWM_8822B 0xFE57
-
-#define SW18_FPWM BIT(3)
-
-#define ISO_MD2PP BIT(0)
-#define ISO_UA2USB BIT(1)
-#define ISO_UD2CORE BIT(2)
-#define ISO_PA2PCIE BIT(3)
-#define ISO_PD2CORE BIT(4)
-#define ISO_IP2MAC BIT(5)
-#define ISO_DIOP BIT(6)
-#define ISO_DIOE BIT(7)
-#define ISO_EB2CORE BIT(8)
-#define ISO_DIOR BIT(9)
-
-#define PWC_EV25V BIT(14)
-#define PWC_EV12V BIT(15)
-
-#define FEN_BBRSTB BIT(0)
-#define FEN_BB_GLB_RSTN BIT(1)
-#define FEN_USBA BIT(2)
-#define FEN_UPLL BIT(3)
-#define FEN_USBD BIT(4)
-#define FEN_DIO_PCIE BIT(5)
-#define FEN_PCIEA BIT(6)
-#define FEN_PPLL BIT(7)
-#define FEN_PCIED BIT(8)
-#define FEN_DIOE BIT(9)
-#define FEN_CPUEN BIT(10)
-#define FEN_DCORE BIT(11)
-#define FEN_ELDR BIT(12)
-#define FEN_DIO_RF BIT(13)
-#define FEN_HWPDN BIT(14)
-#define FEN_MREGEN BIT(15)
-
-#define PFM_LDALL BIT(0)
-#define PFM_ALDN BIT(1)
-#define PFM_LDKP BIT(2)
-#define PFM_WOWL BIT(3)
-#define EN_PDN BIT(4)
-#define PDN_PL BIT(5)
-#define APFM_ONMAC BIT(8)
-#define APFM_OFF BIT(9)
-#define APFM_RSM BIT(10)
-#define AFSM_HSUS BIT(11)
-#define AFSM_PCIE BIT(12)
-#define APDM_MAC BIT(13)
-#define APDM_HOST BIT(14)
-#define APDM_HPDN BIT(15)
-#define RDY_MACON BIT(16)
-#define SUS_HOST BIT(17)
-#define ROP_ALD BIT(20)
-#define ROP_PWR BIT(21)
-#define ROP_SPS BIT(22)
-#define SOP_MRST BIT(25)
-#define SOP_FUSE BIT(26)
-#define SOP_ABG BIT(27)
-#define SOP_AMB BIT(28)
-#define SOP_RCK BIT(29)
-#define SOP_A8M BIT(30)
-#define XOP_BTCK BIT(31)
-
-#define ANAD16V_EN BIT(0)
-#define ANA8M BIT(1)
-#define MACSLP BIT(4)
-#define LOADER_CLK_EN BIT(5)
-#define _80M_SSC_DIS BIT(7)
-#define _80M_SSC_EN_HO BIT(8)
-#define PHY_SSC_RSTB BIT(9)
-#define SEC_CLK_EN BIT(10)
-#define MAC_CLK_EN BIT(11)
-#define SYS_CLK_EN BIT(12)
-#define RING_CLK_EN BIT(13)
-
-#define BOOT_FROM_EEPROM BIT(4)
-#define EEPROM_EN BIT(5)
-
-#define AFE_BGEN BIT(0)
-#define AFE_MBEN BIT(1)
-#define MAC_ID_EN BIT(7)
-
-#define WLOCK_ALL BIT(0)
-#define WLOCK_00 BIT(1)
-#define WLOCK_04 BIT(2)
-#define WLOCK_08 BIT(3)
-#define WLOCK_40 BIT(4)
-#define R_DIS_PRST_0 BIT(5)
-#define R_DIS_PRST_1 BIT(6)
-#define LOCK_ALL_EN BIT(7)
-
-#define RF_EN BIT(0)
-#define RF_RSTB BIT(1)
-#define RF_SDMRSTB BIT(2)
-
-#define LDA15_EN BIT(0)
-#define LDA15_STBY BIT(1)
-#define LDA15_OBUF BIT(2)
-#define LDA15_REG_VOS BIT(3)
-#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
-
-#define LDV12_EN BIT(0)
-#define LDV12_SDBY BIT(1)
-#define LPLDO_HSM BIT(2)
-#define LPLDO_LSM_DIS BIT(3)
-#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
-
-#define XTAL_EN BIT(0)
-#define XTAL_BSEL BIT(1)
-#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
-#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
-#define XTAL_GATE_USB BIT(8)
-#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
-#define XTAL_GATE_AFE BIT(11)
-#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
-#define XTAL_RF_GATE BIT(14)
-#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
-#define XTAL_GATE_DIG BIT(17)
-#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
-#define XTAL_BT_GATE BIT(20)
-#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
-#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
-
-#define CKDLY_AFE BIT(26)
-#define CKDLY_USB BIT(27)
-#define CKDLY_DIG BIT(28)
-#define CKDLY_BT BIT(29)
-
-#define APLL_EN BIT(0)
-#define APLL_320_EN BIT(1)
-#define APLL_FREF_SEL BIT(2)
-#define APLL_EDGE_SEL BIT(3)
-#define APLL_WDOGB BIT(4)
-#define APLL_LPFEN BIT(5)
-
-#define APLL_REF_CLK_13MHZ 0x1
-#define APLL_REF_CLK_19_2MHZ 0x2
-#define APLL_REF_CLK_20MHZ 0x3
-#define APLL_REF_CLK_25MHZ 0x4
-#define APLL_REF_CLK_26MHZ 0x5
-#define APLL_REF_CLK_38_4MHZ 0x6
-#define APLL_REF_CLK_40MHZ 0x7
-
-#define APLL_320EN BIT(14)
-#define APLL_80EN BIT(15)
-#define APLL_1MEN BIT(24)
-
-#define ALD_EN BIT(18)
-#define EF_PD BIT(19)
-#define EF_FLAG BIT(31)
-
-#define EF_TRPT BIT(7)
-#define LDOE25_EN BIT(31)
-
-#define RSM_EN BIT(0)
-#define TIMER_EN BIT(4)
-
-#define TRSW0EN BIT(2)
-#define TRSW1EN BIT(3)
-#define EROM_EN BIT(4)
-#define EN_BT BIT(5)
-#define EN_UART BIT(8)
-#define UART_910 BIT(9)
-#define EN_PMAC BIT(10)
-#define SIC_SWRST BIT(11)
-#define EN_SIC BIT(12)
-#define SIC_23 BIT(13)
-#define EN_HDP BIT(14)
-#define SIC_LBK BIT(15)
-
-#define LED0PL BIT(4)
-#define LED1PL BIT(12)
-#define LED0DIS BIT(7)
-
-#define MCUFWDL_EN BIT(0)
-#define MCUFWDL_RDY BIT(1)
-#define FWDL_CHKSUM_RPT BIT(2)
-#define MACINI_RDY BIT(3)
-#define BBINI_RDY BIT(4)
-#define RFINI_RDY BIT(5)
-#define WINTINI_RDY BIT(6)
-#define CPRST BIT(23)
-
-#define XCLK_VLD BIT(0)
-#define ACLK_VLD BIT(1)
-#define UCLK_VLD BIT(2)
-#define PCLK_VLD BIT(3)
-#define PCIRSTB BIT(4)
-#define V15_VLD BIT(5)
-#define TRP_B15V_EN BIT(7)
-#define SIC_IDLE BIT(8)
-#define BD_MAC2 BIT(9)
-#define BD_MAC1 BIT(10)
-#define IC_MACPHY_MODE BIT(11)
-#define VENDOR_ID BIT(19)
-#define PAD_HWPD_IDN BIT(22)
-#define TRP_VAUX_EN BIT(23)
-#define TRP_BT_EN BIT(24)
-#define BD_PKG_SEL BIT(25)
-#define BD_HCI_SEL BIT(26)
-#define TYPE_ID BIT(27)
-
-#define CHIP_VER_RTL_MASK 0xF000
-#define CHIP_VER_RTL_SHIFT 12
-
-#define REG_LBMODE_8822B (REG_CR_8822B + 3)
-
-#define HCI_TXDMA_EN BIT(0)
-#define HCI_RXDMA_EN BIT(1)
-#define TXDMA_EN BIT(2)
-#define RXDMA_EN BIT(3)
-#define PROTOCOL_EN BIT(4)
-#define SCHEDULE_EN BIT(5)
-#define MACTXEN BIT(6)
-#define MACRXEN BIT(7)
-#define ENSWBCN BIT(8)
-#define ENSEC BIT(9)
-
-#define _NETTYPE(x) (((x) & 0x3) << 16)
-#define MASK_NETTYPE 0x30000
-#define NT_NO_LINK 0x0
-#define NT_LINK_AD_HOC 0x1
-#define NT_LINK_AP 0x2
-#define NT_AS_AP 0x3
-
-#define _LBMODE(x) (((x) & 0xF) << 24)
-#define MASK_LBMODE 0xF000000
-#define LOOPBACK_NORMAL 0x0
-#define LOOPBACK_IMMEDIATELY 0xB
-#define LOOPBACK_MAC_DELAY 0x3
-#define LOOPBACK_PHY 0x1
-#define LOOPBACK_DMA 0x7
-
-#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
-#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
-#define _PSRX_MASK 0xF
-#define _PSTX_MASK 0xF0
-#define _PSRX(x) (x)
-#define _PSTX(x) ((x) << 4)
-
-#define PBP_64 0x0
-#define PBP_128 0x1
-#define PBP_256 0x2
-#define PBP_512 0x3
-#define PBP_1024 0x4
-
-#define RXDMA_ARBBW_EN BIT(0)
-#define RXSHFT_EN BIT(1)
-#define RXDMA_AGG_EN BIT(2)
-#define QS_VO_QUEUE BIT(8)
-#define QS_VI_QUEUE BIT(9)
-#define QS_BE_QUEUE BIT(10)
-#define QS_BK_QUEUE BIT(11)
-#define QS_MANAGER_QUEUE BIT(12)
-#define QS_HIGH_QUEUE BIT(13)
-
-#define HQSEL_VOQ BIT(0)
-#define HQSEL_VIQ BIT(1)
-#define HQSEL_BEQ BIT(2)
-#define HQSEL_BKQ BIT(3)
-#define HQSEL_MGTQ BIT(4)
-#define HQSEL_HIQ BIT(5)
-
-#define _TXDMA_HIQ_MAP(x) (((x) & 0x3) << 14)
-#define _TXDMA_MGQ_MAP(x) (((x) & 0x3) << 12)
-#define _TXDMA_BKQ_MAP(x) (((x) & 0x3) << 10)
-#define _TXDMA_BEQ_MAP(x) (((x) & 0x3) << 8)
-#define _TXDMA_VIQ_MAP(x) (((x) & 0x3) << 6)
-#define _TXDMA_VOQ_MAP(x) (((x) & 0x3) << 4)
-
-#define QUEUE_LOW 1
-#define QUEUE_NORMAL 2
-#define QUEUE_HIGH 3
-
-#define _LLT_NO_ACTIVE 0x0
-#define _LLT_WRITE_ACCESS 0x1
-#define _LLT_READ_ACCESS 0x2
-
-#define _LLT_INIT_DATA(x) ((x) & 0xFF)
-#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
-#define _LLT_OP(x) (((x) & 0x3) << 30)
-#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
-
-#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
-#define BB_WRITE_EN BIT(30)
-#define BB_READ_EN BIT(31)
-
-#define _HPQ(x) ((x) & 0xFF)
-#define _LPQ(x) (((x) & 0xFF) << 8)
-#define _PUBQ(x) (((x) & 0xFF) << 16)
-#define _NPQ(x) ((x) & 0xFF)
-
-#define HPQ_PUBLIC_DIS BIT(24)
-#define LPQ_PUBLIC_DIS BIT(25)
-#define LD_RQPN BIT(31)
-
-#define BCN_VALID BIT(16)
-#define BCN_HEAD(x) (((x) & 0xFF) << 8)
-#define BCN_HEAD_MASK 0xFF00
-
-#define BLK_DESC_NUM_SHIFT 4
-#define BLK_DESC_NUM_MASK 0xF
-
-#define DROP_DATA_EN BIT(9)
-
-#define EN_AMPDU_RTY_NEW BIT(7)
-
-#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
-
-#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
-#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
-
-#define RATE_REG_BITMAP_ALL 0xFFFFF
-
-#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
-
-#define _RRSR_RSC(x) (((x) & 0x3) << 21)
-#define RRSR_RSC_RESERVED 0x0
-#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
-#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
-#define RRSR_RSC_DUPLICATE_MODE 0x3
-
-#define USE_SHORT_G1 BIT(20)
-
-#define _AGGLMT_MCS0(x) ((x) & 0xF)
-#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
-#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
-#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
-#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
-#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
-#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
-#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
-
-#define RETRY_LIMIT_SHORT_SHIFT 8
-#define RETRY_LIMIT_LONG_SHIFT 0
-
-#define _DARF_RC1(x) ((x) & 0x1F)
-#define _DARF_RC2(x) (((x) & 0x1F) << 8)
-#define _DARF_RC3(x) (((x) & 0x1F) << 16)
-#define _DARF_RC4(x) (((x) & 0x1F) << 24)
-#define _DARF_RC5(x) ((x) & 0x1F)
-#define _DARF_RC6(x) (((x) & 0x1F) << 8)
-#define _DARF_RC7(x) (((x) & 0x1F) << 16)
-#define _DARF_RC8(x) (((x) & 0x1F) << 24)
-
-#define _RARF_RC1(x) ((x) & 0x1F)
-#define _RARF_RC2(x) (((x) & 0x1F) << 8)
-#define _RARF_RC3(x) (((x) & 0x1F) << 16)
-#define _RARF_RC4(x) (((x) & 0x1F) << 24)
-#define _RARF_RC5(x) ((x) & 0x1F)
-#define _RARF_RC6(x) (((x) & 0x1F) << 8)
-#define _RARF_RC7(x) (((x) & 0x1F) << 16)
-#define _RARF_RC8(x) (((x) & 0x1F) << 24)
-
-#define AC_PARAM_TXOP_LIMIT_OFFSET 16
-#define AC_PARAM_ECW_MAX_OFFSET 12
-#define AC_PARAM_ECW_MIN_OFFSET 8
-#define AC_PARAM_AIFS_OFFSET 0
-
-#define _AIFS(x) (x)
-#define _ECW_MAX_MIN(x) ((x) << 8)
-#define _TXOP_LIMIT(x) ((x) << 16)
-
-#define _BCNIFS(x) ((x) & 0xFF)
-#define _BCNECW(x) ((((x) & 0xF)) << 8)
-
-#define _LRL(x) ((x) & 0x3F)
-#define _SRL(x) (((x) & 0x3F) << 8)
-
-#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
-#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8)
-
-#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
-#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8)
-
-#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
-
-#define DIS_EDCA_CNT_DWN BIT(11)
-
-#define EN_MBSSID BIT(1)
-#define EN_TXBCN_RPT BIT(2)
-#define EN_BCN_FUNCTION BIT(3)
-
-#define TSFTR_RST BIT(0)
-#define TSFTR1_RST BIT(1)
-
-#define STOP_BCNQ BIT(6)
-
-#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
-#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
-
-#define ACMHW_HW_EN BIT(0)
-#define ACMHW_BEQ_EN BIT(1)
-#define ACMHW_VIQ_EN BIT(2)
-#define ACMHW_VOQ_EN BIT(3)
-#define ACMHW_BEQ_STATUS BIT(4)
-#define ACMHW_VIQ_STATUS BIT(5)
-#define ACMHW_VOQ_STATUS BIT(6)
-
-#define APSDOFF BIT(6)
-#define APSDOFF_STATUS BIT(7)
-
-#define BW_20MHZ BIT(2)
-
-#define RATE_BITMAP_ALL 0xFFFFF
-
-#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
-
-#define TSFRST BIT(0)
-#define DIS_GCLK BIT(1)
-#define PAD_SEL BIT(2)
-#define PWR_ST BIT(6)
-#define PWRBIT_OW_EN BIT(7)
-#define ACRC BIT(8)
-#define CFENDFORM BIT(9)
-#define ICV BIT(10)
-
-#define AAP BIT(0)
-#define APM BIT(1)
-#define AM BIT(2)
-#define AB BIT(3)
-#define ADD3 BIT(4)
-#define APWRMGT BIT(5)
-#define CBSSID BIT(6)
-#define CBSSID_DATA BIT(6)
-#define CBSSID_BCN BIT(7)
-#define ACRC32 BIT(8)
-#define AICV BIT(9)
-#define ADF BIT(11)
-#define ACF BIT(12)
-#define AMF BIT(13)
-#define HTC_LOC_CTRL BIT(14)
-#define UC_DATA_EN BIT(16)
-#define BM_DATA_EN BIT(17)
-#define MFBEN BIT(22)
-#define LSIGEN BIT(23)
-#define EN_MBID BIT(24)
-#define APP_BASSN BIT(27)
-#define APP_PHYSTS BIT(28)
-#define APP_ICV BIT(29)
-#define APP_MIC BIT(30)
-#define APP_FCS BIT(31)
-
-#define _MIN_SPACE(x) ((x) & 0x7)
-#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
-
-#define RXERR_TYPE_OFDM_PPDU 0
-#define RXERR_TYPE_OFDM_FALSE_ALARM 1
-#define RXERR_TYPE_OFDM_MPDU_OK 2
-#define RXERR_TYPE_OFDM_MPDU_FAIL 3
-#define RXERR_TYPE_CCK_PPDU 4
-#define RXERR_TYPE_CCK_FALSE_ALARM 5
-#define RXERR_TYPE_CCK_MPDU_OK 6
-#define RXERR_TYPE_CCK_MPDU_FAIL 7
-#define RXERR_TYPE_HT_PPDU 8
-#define RXERR_TYPE_HT_FALSE_ALARM 9
-#define RXERR_TYPE_HT_MPDU_TOTAL 10
-#define RXERR_TYPE_HT_MPDU_OK 11
-#define RXERR_TYPE_HT_MPDU_FAIL 12
-#define RXERR_TYPE_RX_FULL_DROP 15
-
-#define RXERR_COUNTER_MASK 0xFFFFF
-#define RXERR_RPT_RST BIT(27)
-#define _RXERR_RPT_SEL(type) ((type) << 28)
-
-#define SCR_TX_USE_DK BIT(0)
-#define SCR_RX_USE_DK BIT(1)
-#define SCR_TX_ENC_ENABLE BIT(2)
-#define SRC_RX_DEC_ENABLE BIT(3)
-#define SCR_SK_BY_A2 BIT(4)
-#define SCR_NO_SKMC BIT(5)
-#define SCR_TXBCUSEDK BIT(6)
-#define SCR_RXBCUSEDK BIT(7)
-
-#define USB_IS_HIGH_SPEED 0
-#define USB_IS_FULL_SPEED 1
-#define USB_SPEED_MASK BIT(5)
-
-#define USB_NORMAL_SIE_EP_MASK 0xF
-#define USB_NORMAL_SIE_EP_SHIFT 4
-
-#define USB_TEST_EP_MASK 0x30
-#define USB_TEST_EP_SHIFT 4
-
-#define USB_AGG_EN BIT(3)
-
-#define MAC_ADDR_LEN 6
-#define LAST_ENTRY_OF_TX_PKT_BUFFER 175
-
-#define POLLING_LLT_THRESHOLD 20
-#define POLLING_READY_TIMEOUT_COUNT 3000
-
-#define MAX_MSS_DENSITY_2T 0x13
-#define MAX_MSS_DENSITY_1T 0x0A
-
-#define EPROM_CMD_OPERATING_MODE_MASK ((1 << 7) | (1 << 6))
-#define EPROM_CMD_CONFIG 0x3
-#define EPROM_CMD_LOAD 1
-
-#define HAL_8822B_HW_GPIO_WPS_BIT BIT(2)
-
-/*-----------------------------------------------------
- * BB / RF register
- *-----------------------------------------------------
- */
-
-#define RFPGA0_XA_HSSIPARAMETER1 0x820
-#define RFPGA0_XA_HSSIPARAMETER2 0x824
-#define RFPGA0_XB_HSSIPARAMETER1 0x828
-#define RFPGA0_XB_HSSIPARAMETER2 0x82c
-#define RCCAONSEC 0x838
-
-#define RFPGA0_XA_LSSIPARAMETER 0x840
-#define RFPGA0_XB_LSSIPARAMETER 0x844
-#define RL1PEAKTH 0x848
-
-#define RFPGA0_RFWAKEUPPARAMETER 0x850
-#define RFPGA0_RFSLEEPUPPARAMETER 0x854
-
-#define RFPGA0_XAB_SWITCHCONTROL 0x858
-#define RFPGA0_XCD_SWITCHCONTROL 0x85c
-
-#define RFPGA0_XA_RFINTERFACEOE 0x860
-#define RFC_AREA 0x860
-#define RFPGA0_XB_RFINTERFACEOE 0x864
-
-#define RFPGA0_XAB_RFINTERFACESW 0x870
-#define RFPGA0_XCD_RFINTERFACESW 0x874
-
-#define RFPGA0_XAB_RF_PARA_METER 0x878
-#define RFPGA0_XCD_RF_PARA_METER 0x87c
-
-#define RFPGA0_ANALOGPARAMETER1 0x880
-#define RFPGA0_ANALOGPARAMETER2 0x884
-#define RFPGA0_ANALOGPARAMETER3 0x888
-#define RFPGA0_ANALOGPARAMETER4 0x88c
-
-#define RFPGA0_XA_LSSIREADBACK 0x8a0
-#define RFPGA0_XB_LSSIREADBACK 0x8a4
-#define RFPGA0_XC_LSSIREADBACK 0x8a8
-/*#define RFPGA0_XD_LSSIREADBACK 0x8ac*/
-#define RRFMOD 0x8ac
-#define RHSSIREAD_8822BE 0x8b0
-
-#define RFPGA0_PSDREPORT 0x8b4
-#define TRANSCEIVEA_HSPI_READBACK 0x8b8
-#define TRANSCEIVEB_HSPI_READBACK 0x8bc
-/*#define REG_SC_CNT_8822B 0x8c4*/
-#define RADC_BUF_CLK 0x8c4
-#define RFPGA0_XAB_RFINTERFACERB 0x8e0
-#define RFPGA0_XCD_RFINTERFACERB 0x8e4
-
-/* PageB(0xB00) */
-
-/*Page C*/
-
-#define RA_TXPWRTRAING 0xc54
-#define RB_TXPWRTRAING 0xe54
-
-#define RA_LSSIWRITE_8822B 0xc90
-#define RB_LSSIWRITE_8822B 0xe90
-
-#define RA_PIREAD_8822B 0xd04
-#define RB_PIREAD_8822B 0xd44
-#define RA_SIREAD_8822B 0xd08
-#define RB_SIREAD_8822B 0xd48
-
-#define RZEBRA1_HSSIENABLE 0x0
-#define RZEBRA1_TRXENABLE1 0x1
-#define RZEBRA1_TRXENABLE2 0x2
-#define RZEBRA1_AGC 0x4
-#define RZEBRA1_CHARGEPUMP 0x5
-#define RZEBRA1_CHANNEL 0x7
-
-#define RZEBRA1_TXGAIN 0x8
-#define RZEBRA1_TXLPF 0x9
-#define RZEBRA1_RXLPF 0xb
-#define RZEBRA1_RXHPFCORNER 0xc
-
-#define RGLOBALCTRL 0
-#define RRTL8256_TXLPF 19
-#define RRTL8256_RXLPF 11
-#define RRTL8258_TXLPF 0x11
-#define RRTL8258_RXLPF 0x13
-#define RRTL8258_RSSILPF 0xa
-
-#define RF_AC 0x00
-
-#define RF_IQADJ_G1 0x01
-#define RF_IQADJ_G2 0x02
-#define RF_POW_TRSW 0x05
-
-#define RF_GAIN_RX 0x06
-#define RF_GAIN_TX 0x07
-
-#define RF_TXM_IDAC 0x08
-#define RF_BS_IQGEN 0x0F
-
-#define RF_MODE1 0x10
-#define RF_MODE2 0x11
-
-#define RF_RX_AGC_HP 0x12
-#define RF_TX_AGC 0x13
-#define RF_BIAS 0x14
-#define RF_IPA 0x15
-#define RF_POW_ABILITY 0x17
-#define RF_MODE_AG 0x18
-#define RRFCHANNEL 0x18
-#define RF_CHNLBW 0x18
-#define RF_TOP 0x19
-
-#define RF_RX_G1 0x1A
-#define RF_RX_G2 0x1B
-
-#define RF_RX_BB2 0x1C
-#define RF_RX_BB1 0x1D
-
-#define RF_RCK1 0x1E
-#define RF_RCK2 0x1F
-
-#define RF_TX_G1 0x20
-#define RF_TX_G2 0x21
-#define RF_TX_G3 0x22
-
-#define RF_TX_BB1 0x23
-#define RF_T_METER 0x42
-
-#define RF_SYN_G1 0x25
-#define RF_SYN_G2 0x26
-#define RF_SYN_G3 0x27
-#define RF_SYN_G4 0x28
-#define RF_SYN_G5 0x29
-#define RF_SYN_G6 0x2A
-#define RF_SYN_G7 0x2B
-#define RF_SYN_G8 0x2C
-
-#define RF_RCK_OS 0x30
-#define RF_TXPA_G1 0x31
-#define RF_TXPA_G2 0x32
-#define RF_TXPA_G3 0x33
-
-#define RF_TX_BIAS_A 0x35
-#define RF_TX_BIAS_D 0x36
-#define RF_LOBF_9 0x38
-#define RF_RXRF_A3 0x3C
-#define RF_TRSW 0x3F
-
-#define RF_TXRF_A2 0x41
-#define RF_TXPA_G4 0x46
-#define RF_TXPA_A4 0x4B
-
-#define RF_APK 0x63
-
-#define RF_WE_LUT 0xEF
-
-#define BBBRESETB 0x100
-#define BGLOBALRESETB 0x200
-#define BOFDMTXSTART 0x4
-#define BCCKTXSTART 0x8
-#define BCRC32DEBUG 0x100
-#define BPMACLOOPBACK 0x10
-#define BTXLSIG 0xffffff
-#define BOFDMTXRATE 0xf
-#define BOFDMTXRESERVED 0x10
-#define BOFDMTXLENGTH 0x1ffe0
-#define BOFDMTXPARITY 0x20000
-#define BTXHTSIG1 0xffffff
-#define BTXHTMCSRATE 0x7f
-#define BTXHTBW 0x80
-#define BTXHTLENGTH 0xffff00
-#define BTXHTSIG2 0xffffff
-#define BTXHTSMOOTHING 0x1
-#define BTXHTSOUNDING 0x2
-#define BTXHTRESERVED 0x4
-#define BTXHTAGGREATION 0x8
-#define BTXHTSTBC 0x30
-#define BTXHTADVANCECODING 0x40
-#define BTXHTSHORTGI 0x80
-#define BTXHTNUMBERHT_LTF 0x300
-#define BTXHTCRC8 0x3fc00
-#define BCOUNTERRESET 0x10000
-#define BNUMOFOFDMTX 0xffff
-#define BNUMOFCCKTX 0xffff0000
-#define BTXIDLEINTERVAL 0xffff
-#define BOFDMSERVICE 0xffff0000
-#define BTXMACHEADER 0xffffffff
-#define BTXDATAINIT 0xff
-#define BTXHTMODE 0x100
-#define BTXDATATYPE 0x30000
-#define BTXRANDOMSEED 0xffffffff
-#define BCCKTXPREAMBLE 0x1
-#define BCCKTXSFD 0xffff0000
-#define BCCKTXSIG 0xff
-#define BCCKTXSERVICE 0xff00
-#define BCCKLENGTHEXT 0x8000
-#define BCCKTXLENGHT 0xffff0000
-#define BCCKTXCRC16 0xffff
-#define BCCKTXSTATUS 0x1
-#define BOFDMTXSTATUS 0x2
-#define IS_BB_REG_OFFSET_92S(_offset) ((_offset >= 0x800) && (_offset <= 0xfff))
-
-#define BRFMOD 0x1
-#define BJAPANMODE 0x2
-#define BCCKTXSC 0x30
-/* Block & Path enable*/
-#define ROFDMCCKEN 0x808
-#define BCCKEN 0x10000000
-#define BOFDMEN 0x20000000
-/* Rx antenna*/
-#define RRXPATH 0x808
-#define BRXPATH 0xff
-/* Tx antenna*/
-#define RTXPATH 0x80c
-#define BTXPATH 0x0fffffff
-/* for cck rx path selection*/
-#define RCCK_RX 0xa04
-#define BCCK_RX 0x0c000000
-/* Use LSIG for VHT length*/
-#define RVHTLEN_USE_LSIG 0x8c3
-
-#define BOFDMRXADCPHASE 0x10000
-#define BOFDMTXDACPHASE 0x40000
-#define BXATXAGC 0x3f
-
-#define BXBTXAGC 0xf00
-#define BXCTXAGC 0xf000
-#define BXDTXAGC 0xf0000
-
-#define BPASTART 0xf0000000
-#define BTRSTART 0x00f00000
-#define BRFSTART 0x0000f000
-#define BBBSTART 0x000000f0
-#define BBBCCKSTART 0x0000000f
-#define BPAEND 0xf
-#define BTREND 0x0f000000
-#define BRFEND 0x000f0000
-#define BCCAMASK 0x000000f0
-#define BR2RCCAMASK 0x00000f00
-#define BHSSI_R2TDELAY 0xf8000000
-#define BHSSI_T2RDELAY 0xf80000
-#define BCONTXHSSI 0x400
-#define BIGFROMCCK 0x200
-#define BAGCADDRESS 0x3f
-#define BRXHPTX 0x7000
-#define BRXHP2RX 0x38000
-#define BRXHPCCKINI 0xc0000
-#define BAGCTXCODE 0xc00000
-#define BAGCRXCODE 0x300000
-
-#define B3WIREDATALENGTH 0x800
-#define B3WIREADDREAALENGTH 0x400
-
-#define B3WIRERFPOWERDOWN 0x1
-#define B5GPAPEPOLARITY 0x40000000
-#define B2GPAPEPOLARITY 0x80000000
-#define BRFSW_TXDEFAULTANT 0x3
-#define BRFSW_TXOPTIONANT 0x30
-#define BRFSW_RXDEFAULTANT 0x300
-#define BRFSW_RXOPTIONANT 0x3000
-#define BRFSI_3WIREDATA 0x1
-#define BRFSI_3WIRECLOCK 0x2
-#define BRFSI_3WIRELOAD 0x4
-#define BRFSI_3WIRERW 0x8
-#define BRFSI_3WIRE 0xf
-
-#define BRFSI_RFENV 0x10
-
-#define BRFSI_TRSW 0x20
-#define BRFSI_TRSWB 0x40
-#define BRFSI_ANTSW 0x100
-#define BRFSI_ANTSWB 0x200
-#define BRFSI_PAPE 0x400
-#define BRFSI_PAPE5G 0x800
-#define BBANDSELECT 0x1
-#define BHTSIG2_GI 0x80
-#define BHTSIG2_SMOOTHING 0x01
-#define BHTSIG2_SOUNDING 0x02
-#define BHTSIG2_AGGREATON 0x08
-#define BHTSIG2_STBC 0x30
-#define BHTSIG2_ADVCODING 0x40
-#define BHTSIG2_NUMOFHTLTF 0x300
-#define BHTSIG2_CRC8 0x3fc
-#define BHTSIG1_MCS 0x7f
-#define BHTSIG1_BANDWIDTH 0x80
-#define BHTSIG1_HTLENGTH 0xffff
-#define BLSIG_RATE 0xf
-#define BLSIG_RESERVED 0x10
-#define BLSIG_LENGTH 0x1fffe
-#define BLSIG_PARITY 0x20
-#define BCCKRXPHASE 0x4
-
-#define BLSSIREADADDRESS 0x7f800000
-#define BLSSIREADEDGE 0x80000000
-
-#define BLSSIREADBACKDATA 0xfffff
-
-#define BLSSIREADOKFLAG 0x1000
-#define BCCKSAMPLERATE 0x8
-#define BREGULATOR0STANDBY 0x1
-#define BREGULATORPLLSTANDBY 0x2
-#define BREGULATOR1STANDBY 0x4
-#define BPLLPOWERUP 0x8
-#define BDPLLPOWERUP 0x10
-#define BDA10POWERUP 0x20
-#define BAD7POWERUP 0x200
-#define BDA6POWERUP 0x2000
-#define BXTALPOWERUP 0x4000
-#define B40MDCLKPOWERUP 0x8000
-#define BDA6DEBUGMODE 0x20000
-#define BDA6SWING 0x380000
-
-#define BADCLKPHASE 0x4000000
-#define B80MCLKDELAY 0x18000000
-#define BAFEWATCHDOGENABLE 0x20000000
-
-#define BXTALCAP01 0xc0000000
-#define BXTALCAP23 0x3
-#define BXTALCAP92X 0x0f000000
-#define BXTALCAP 0x0f000000
-
-#define BINTDIFCLKENABLE 0x400
-#define BEXTSIGCLKENABLE 0x800
-#define BBANDGAP_MBIAS_POWERUP 0x10000
-#define BAD11SH_GAIN 0xc0000
-#define BAD11NPUT_RANGE 0x700000
-#define BAD110P_CURRENT 0x3800000
-#define BLPATH_LOOPBACK 0x4000000
-#define BQPATH_LOOPBACK 0x8000000
-#define BAFE_LOOPBACK 0x10000000
-#define BDA10_SWING 0x7e0
-#define BDA10_REVERSE 0x800
-#define BDA_CLK_SOURCE 0x1000
-#define BDA7INPUT_RANGE 0x6000
-#define BDA7_GAIN 0x38000
-#define BDA7OUTPUT_CM_MODE 0x40000
-#define BDA7INPUT_CM_MODE 0x380000
-#define BDA7CURRENT 0xc00000
-#define BREGULATOR_ADJUST 0x7000000
-#define BAD11POWERUP_ATTX 0x1
-#define BDA10PS_ATTX 0x10
-#define BAD11POWERUP_ATRX 0x100
-#define BDA10PS_ATRX 0x1000
-#define BCCKRX_AGC_FORMAT 0x200
-#define BPSDFFT_SAMPLE_POINT 0xc000
-#define BPSD_AVERAGE_NUM 0x3000
-#define BIQPATH_CONTROL 0xc00
-#define BPSD_FREQ 0x3ff
-#define BPSD_ANTENNA_PATH 0x30
-#define BPSD_IQ_SWITCH 0x40
-#define BPSD_RX_TRIGGER 0x400000
-#define BPSD_TX_TRIGGER 0x80000000
-#define BPSD_SINE_TONE_SCALE 0x7f000000
-#define BPSD_REPORT 0xffff
-
-#define BOFDM_TXSC 0x30000000
-#define BCCK_TXON 0x1
-#define BOFDM_TXON 0x2
-#define BDEBUG_PAGE 0xfff
-#define BDEBUG_ITEM 0xff
-#define BANTL 0x10
-#define BANT_NONHT 0x100
-#define BANT_HT1 0x1000
-#define BANT_HT2 0x10000
-#define BANT_HT1S1 0x100000
-#define BANT_NONHTS1 0x1000000
-
-#define BCCK_BBMODE 0x3
-#define BCCK_TXPOWERSAVING 0x80
-#define BCCK_RXPOWERSAVING 0x40
-
-#define BCCK_SIDEBAND 0x10
-
-#define BCCK_SCRAMBLE 0x8
-#define BCCK_ANTDIVERSITY 0x8000
-#define BCCK_CARRIER_RECOVERY 0x4000
-#define BCCK_TXRATE 0x3000
-#define BCCK_DCCANCEL 0x0800
-#define BCCK_ISICANCEL 0x0400
-#define BCCK_MATCH_FILTER 0x0200
-#define BCCK_EQUALIZER 0x0100
-#define BCCK_PREAMBLE_DETECT 0x800000
-#define BCCK_FAST_FALSECCA 0x400000
-#define BCCK_CH_ESTSTART 0x300000
-#define BCCK_CCA_COUNT 0x080000
-#define BCCK_CS_LIM 0x070000
-#define BCCK_BIST_MODE 0x80000000
-#define BCCK_CCAMASK 0x40000000
-#define BCCK_TX_DAC_PHASE 0x4
-#define BCCK_RX_ADC_PHASE 0x20000000
-#define BCCKR_CP_MODE 0x0100
-#define BCCK_TXDC_OFFSET 0xf0
-#define BCCK_RXDC_OFFSET 0xf
-#define BCCK_CCA_MODE 0xc000
-#define BCCK_FALSECS_LIM 0x3f00
-#define BCCK_CS_RATIO 0xc00000
-#define BCCK_CORGBIT_SEL 0x300000
-#define BCCK_PD_LIM 0x0f0000
-#define BCCK_NEWCCA 0x80000000
-#define BCCK_RXHP_OF_IG 0x8000
-#define BCCK_RXIG 0x7f00
-#define BCCK_LNA_POLARITY 0x800000
-#define BCCK_RX1ST_BAIN 0x7f0000
-#define BCCK_RF_EXTEND 0x20000000
-#define BCCK_RXAGC_SATLEVEL 0x1f000000
-#define BCCK_RXAGC_SATCOUNT 0xe0
-#define BCCK_RX_RF_SETTLE 0x1f
-#define BCCK_FIXED_RXAGC 0x8000
-#define BCCK_ANTENNA_POLARITY 0x2000
-#define BCCK_TXFILTER_TYPE 0x0c00
-#define BCCK_RXAGC_REPORTTYPE 0x0300
-#define BCCK_RXDAGC_EN 0x80000000
-#define BCCK_RXDAGC_PERIOD 0x20000000
-#define BCCK_RXDAGC_SATLEVEL 0x1f000000
-#define BCCK_TIMING_RECOVERY 0x800000
-#define BCCK_TXC0 0x3f0000
-#define BCCK_TXC1 0x3f000000
-#define BCCK_TXC2 0x3f
-#define BCCK_TXC3 0x3f00
-#define BCCK_TXC4 0x3f0000
-#define BCCK_TXC5 0x3f000000
-#define BCCK_TXC6 0x3f
-#define BCCK_TXC7 0x3f00
-#define BCCK_DEBUGPORT 0xff0000
-#define BCCK_DAC_DEBUG 0x0f000000
-#define BCCK_FALSEALARM_ENABLE 0x8000
-#define BCCK_FALSEALARM_READ 0x4000
-#define BCCK_TRSSI 0x7f
-#define BCCK_RXAGC_REPORT 0xfe
-#define BCCK_RXREPORT_ANTSEL 0x80000000
-#define BCCK_RXREPORT_MFOFF 0x40000000
-#define BCCK_RXREPORT_SQLOSS 0x20000000
-#define BCCK_RXREPORT_PKTLOSS 0x10000000
-#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
-#define BCCK_RXREPORT_RATEERROR 0x04000000
-#define BCCK_RXREPORT_RXRATE 0x03000000
-#define BCCK_RXFA_COUNTER_LOWER 0xff
-#define BCCK_RXFA_COUNTER_UPPER 0xff000000
-#define BCCK_RXHPAGC_START 0xe000
-#define BCCK_RXHPAGC_FINAL 0x1c00
-#define BCCK_RXFALSEALARM_ENABLE 0x8000
-#define BCCK_FACOUNTER_FREEZE 0x4000
-#define BCCK_TXPATH_SEL 0x10000000
-#define BCCK_DEFAULT_RXPATH 0xc000000
-#define BCCK_OPTION_RXPATH 0x3000000
-
-#define BNUM_OFSTF 0x3
-#define BSHIFT_L 0xc0
-#define BGI_TH 0xc
-#define BRXPATH_A 0x1
-#define BRXPATH_B 0x2
-#define BRXPATH_C 0x4
-#define BRXPATH_D 0x8
-#define BTXPATH_A 0x1
-#define BTXPATH_B 0x2
-#define BTXPATH_C 0x4
-#define BTXPATH_D 0x8
-#define BTRSSI_FREQ 0x200
-#define BADC_BACKOFF 0x3000
-#define BDFIR_BACKOFF 0xc000
-#define BTRSSI_LATCH_PHASE 0x10000
-#define BRX_LDC_OFFSET 0xff
-#define BRX_QDC_OFFSET 0xff00
-#define BRX_DFIR_MODE 0x1800000
-#define BRX_DCNF_TYPE 0xe000000
-#define BRXIQIMB_A 0x3ff
-#define BRXIQIMB_B 0xfc00
-#define BRXIQIMB_C 0x3f0000
-#define BRXIQIMB_D 0xffc00000
-#define BDC_DC_NOTCH 0x60000
-#define BRXNB_NOTCH 0x1f000000
-#define BPD_TH 0xf
-#define BPD_TH_OPT2 0xc000
-#define BPWED_TH 0x700
-#define BIFMF_WIN_L 0x800
-#define BPD_OPTION 0x1000
-#define BMF_WIN_L 0xe000
-#define BBW_SEARCH_L 0x30000
-#define BWIN_ENH_L 0xc0000
-#define BBW_TH 0x700000
-#define BED_TH2 0x3800000
-#define BBW_OPTION 0x4000000
-#define BRADIO_TH 0x18000000
-#define BWINDOW_L 0xe0000000
-#define BSBD_OPTION 0x1
-#define BFRAME_TH 0x1c
-#define BFS_OPTION 0x60
-#define BDC_SLOPE_CHECK 0x80
-#define BFGUARD_COUNTER_DC_L 0xe00
-#define BFRAME_WEIGHT_SHORT 0x7000
-#define BSUB_TUNE 0xe00000
-#define BFRAME_DC_LENGTH 0xe000000
-#define BSBD_START_OFFSET 0x30000000
-#define BFRAME_TH_2 0x7
-#define BFRAME_GI2_TH 0x38
-#define BGI2_SYNC_EN 0x40
-#define BSARCH_SHORT_EARLY 0x300
-#define BSARCH_SHORT_LATE 0xc00
-#define BSARCH_GI2_LATE 0x70000
-#define BCFOANTSUM 0x1
-#define BCFOACC 0x2
-#define BCFOSTARTOFFSET 0xc
-#define BCFOLOOPBACK 0x70
-#define BCFOSUMWEIGHT 0x80
-#define BDAGCENABLE 0x10000
-#define BTXIQIMB_A 0x3ff
-#define BTXIQIMB_b 0xfc00
-#define BTXIQIMB_C 0x3f0000
-#define BTXIQIMB_D 0xffc00000
-#define BTXIDCOFFSET 0xff
-#define BTXIQDCOFFSET 0xff00
-#define BTXDFIRMODE 0x10000
-#define BTXPESUDO_NOISEON 0x4000000
-#define BTXPESUDO_NOISE_A 0xff
-#define BTXPESUDO_NOISE_B 0xff00
-#define BTXPESUDO_NOISE_C 0xff0000
-#define BTXPESUDO_NOISE_D 0xff000000
-#define BCCA_DROPOPTION 0x20000
-#define BCCA_DROPTHRES 0xfff00000
-#define BEDCCA_H 0xf
-#define BEDCCA_L 0xf0
-#define BLAMBDA_ED 0x300
-#define BRX_INITIALGAIN 0x7f
-#define BRX_ANTDIV_EN 0x80
-#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
-#define BRX_HIGHPOWER_FLOW 0x8000
-#define BRX_AGC_FREEZE_THRES 0xc0000
-#define BRX_FREEZESTEP_AGC1 0x300000
-#define BRX_FREEZESTEP_AGC2 0xc00000
-#define BRX_FREEZESTEP_AGC3 0x3000000
-#define BRX_FREEZESTEP_AGC0 0xc000000
-#define BRXRSSI_CMP_EN 0x10000000
-#define BRXQUICK_AGCEN 0x20000000
-#define BRXAGC_FREEZE_THRES_MODE 0x40000000
-#define BRX_OVERFLOW_CHECKTYPE 0x80000000
-#define BRX_AGCSHIFT 0x7f
-#define BTRSW_TRI_ONLY 0x80
-#define BPOWER_THRES 0x300
-#define BRXAGC_EN 0x1
-#define BRXAGC_TOGETHER_EN 0x2
-#define BRXAGC_MIN 0x4
-#define BRXHP_INI 0x7
-#define BRXHP_TRLNA 0x70
-#define BRXHP_RSSI 0x700
-#define BRXHP_BBP1 0x7000
-#define BRXHP_BBP2 0x70000
-#define BRXHP_BBP3 0x700000
-#define BRSSI_H 0x7f0000
-#define BRSSI_GEN 0x7f000000
-#define BRXSETTLE_TRSW 0x7
-#define BRXSETTLE_LNA 0x38
-#define BRXSETTLE_RSSI 0x1c0
-#define BRXSETTLE_BBP 0xe00
-#define BRXSETTLE_RXHP 0x7000
-#define BRXSETTLE_ANTSW_RSSI 0x38000
-#define BRXSETTLE_ANTSW 0xc0000
-#define BRXPROCESS_TIME_DAGC 0x300000
-#define BRXSETTLE_HSSI 0x400000
-#define BRXPROCESS_TIME_BBPPW 0x800000
-#define BRXANTENNA_POWER_SHIFT 0x3000000
-#define BRSSI_TABLE_SELECT 0xc000000
-#define BRXHP_FINAL 0x7000000
-#define BRXHPSETTLE_BBP 0x7
-#define BRXHTSETTLE_HSSI 0x8
-#define BRXHTSETTLE_RXHP 0x70
-#define BRXHTSETTLE_BBPPW 0x80
-#define BRXHTSETTLE_IDLE 0x300
-#define BRXHTSETTLE_RESERVED 0x1c00
-#define BRXHT_RXHP_EN 0x8000
-#define BRXAGC_FREEZE_THRES 0x30000
-#define BRXAGC_TOGETHEREN 0x40000
-#define BRXHTAGC_MIN 0x80000
-#define BRXHTAGC_EN 0x100000
-#define BRXHTDAGC_EN 0x200000
-#define BRXHT_RXHP_BBP 0x1c00000
-#define BRXHT_RXHP_FINAL 0xe0000000
-#define BRXPW_RADIO_TH 0x3
-#define BRXPW_RADIO_EN 0x4
-#define BRXMF_HOLD 0x3800
-#define BRXPD_DELAY_TH1 0x38
-#define BRXPD_DELAY_TH2 0x1c0
-#define BRXPD_DC_COUNT_MAX 0x600
-#define BRXPD_DELAY_TH 0x8000
-#define BRXPROCESS_DELAY 0xf0000
-#define BRXSEARCHRANGE_GI2_EARLY 0x700000
-#define BRXFRAME_FUARD_COUNTER_L 0x3800000
-#define BRXSGI_GUARD_L 0xc000000
-#define BRXSGI_SEARCH_L 0x30000000
-#define BRXSGI_TH 0xc0000000
-#define BDFSCNT0 0xff
-#define BDFSCNT1 0xff00
-#define BDFSFLAG 0xf0000
-#define BMF_WEIGHT_SUM 0x300000
-#define BMINIDX_TH 0x7f000000
-#define BDAFORMAT 0x40000
-#define BTXCH_EMU_ENABLE 0x01000000
-#define BTRSW_ISOLATION_A 0x7f
-#define BTRSW_ISOLATION_B 0x7f00
-#define BTRSW_ISOLATION_C 0x7f0000
-#define BTRSW_ISOLATION_D 0x7f000000
-#define BEXT_LNA_GAIN 0x7c00
-
-#define BSTBC_EN 0x4
-#define BANTENNA_MAPPING 0x10
-#define BNSS 0x20
-#define BCFO_ANTSUM_ID 0x200
-#define BPHY_COUNTER_RESET 0x8000000
-#define BCFO_REPORT_GET 0x4000000
-#define BOFDM_CONTINUE_TX 0x10000000
-#define BOFDM_SINGLE_CARRIER 0x20000000
-#define BOFDM_SINGLE_TONE 0x40000000
-#define BHT_DETECT 0x100
-#define BCFOEN 0x10000
-#define BCFOVALUE 0xfff00000
-#define BSIGTONE_RE 0x3f
-#define BSIGTONE_IM 0x7f00
-#define BCOUNTER_CCA 0xffff
-#define BCOUNTER_PARITYFAIL 0xffff0000
-#define BCOUNTER_RATEILLEGAL 0xffff
-#define BCOUNTER_CRC8FAIL 0xffff0000
-#define BCOUNTER_MCSNOSUPPORT 0xffff
-#define BCOUNTER_FASTSYNC 0xffff
-#define BSHORTCFO 0xfff
-#define BSHORTCFOT_LENGTH 12
-#define BSHORTCFOF_LENGTH 11
-#define BLONGCFO 0x7ff
-#define BLONGCFOT_LENGTH 11
-#define BLONGCFOF_LENGTH 11
-#define BTAILCFO 0x1fff
-#define BTAILCFOT_LENGTH 13
-#define BTAILCFOF_LENGTH 12
-#define BNOISE_EN_PWDB 0xffff
-#define BCC_POWER_DB 0xffff0000
-#define BMOISE_PWDB 0xffff
-#define BPOWERMEAST_LENGTH 10
-#define BPOWERMEASF_LENGTH 3
-#define BRX_HT_BW 0x1
-#define BRXSC 0x6
-#define BRX_HT 0x8
-#define BNB_INTF_DET_ON 0x1
-#define BINTF_WIN_LEN_CFG 0x30
-#define BNB_INTF_TH_CFG 0x1c0
-#define BRFGAIN 0x3f
-#define BTABLESEL 0x40
-#define BTRSW 0x80
-#define BRXSNR_A 0xff
-#define BRXSNR_B 0xff00
-#define BRXSNR_C 0xff0000
-#define BRXSNR_D 0xff000000
-#define BSNR_EVMT_LENGTH 8
-#define BSNR_EVMF_LENGTH 1
-#define BCSI1ST 0xff
-#define BCSI2ND 0xff00
-#define BRXEVM1ST 0xff0000
-#define BRXEVM2ND 0xff000000
-#define BSIGEVM 0xff
-#define BPWDB 0xff00
-#define BSGIEN 0x10000
-
-#define BSFACTOR_QMA1 0xf
-#define BSFACTOR_QMA2 0xf0
-#define BSFACTOR_QMA3 0xf00
-#define BSFACTOR_QMA4 0xf000
-#define BSFACTOR_QMA5 0xf0000
-#define BSFACTOR_QMA6 0xf0000
-#define BSFACTOR_QMA7 0xf00000
-#define BSFACTOR_QMA8 0xf000000
-#define BSFACTOR_QMA9 0xf0000000
-#define BCSI_SCHEME 0x100000
-
-#define BNOISE_LVL_TOP_SET 0x3
-#define BCHSMOOTH 0x4
-#define BCHSMOOTH_CFG1 0x38
-#define BCHSMOOTH_CFG2 0x1c0
-#define BCHSMOOTH_CFG3 0xe00
-#define BCHSMOOTH_CFG4 0x7000
-#define BMRCMODE 0x800000
-#define BTHEVMCFG 0x7000000
-
-#define BLOOP_FIT_TYPE 0x1
-#define BUPD_CFO 0x40
-#define BUPD_CFO_OFFDATA 0x80
-#define BADV_UPD_CFO 0x100
-#define BADV_TIME_CTRL 0x800
-#define BUPD_CLKO 0x1000
-#define BFC 0x6000
-#define BTRACKING_MODE 0x8000
-#define BPHCMP_ENABLE 0x10000
-#define BUPD_CLKO_LTF 0x20000
-#define BCOM_CH_CFO 0x40000
-#define BCSI_ESTI_MODE 0x80000
-#define BADV_UPD_EQZ 0x100000
-#define BUCHCFG 0x7000000
-#define BUPDEQZ 0x8000000
-
-#define BRX_PESUDO_NOISE_ON 0x20000000
-#define BRX_PESUDO_NOISE_A 0xff
-#define BRX_PESUDO_NOISE_B 0xff00
-#define BRX_PESUDO_NOISE_C 0xff0000
-#define BRX_PESUDO_NOISE_D 0xff000000
-#define BRX_PESUDO_NOISESTATE_A 0xffff
-#define BRX_PESUDO_NOISESTATE_B 0xffff0000
-#define BRX_PESUDO_NOISESTATE_C 0xffff
-#define BRX_PESUDO_NOISESTATE_D 0xffff0000
-
-#define BZEBRA1_HSSIENABLE 0x8
-#define BZEBRA1_TRXCONTROL 0xc00
-#define BZEBRA1_TRXGAINSETTING 0x07f
-#define BZEBRA1_RXCOUNTER 0xc00
-#define BZEBRA1_TXCHANGEPUMP 0x38
-#define BZEBRA1_RXCHANGEPUMP 0x7
-#define BZEBRA1_CHANNEL_NUM 0xf80
-#define BZEBRA1_TXLPFBW 0x400
-#define BZEBRA1_RXLPFBW 0x600
-
-#define BRTL8256REG_MODE_CTRL1 0x100
-#define BRTL8256REG_MODE_CTRL0 0x40
-#define BRTL8256REG_TXLPFBW 0x18
-#define BRTL8256REG_RXLPFBW 0x600
-
-#define BRTL8258_TXLPFBW 0xc
-#define BRTL8258_RXLPFBW 0xc00
-#define BRTL8258_RSSILPFBW 0xc0
-
-#define BBYTE0 0x1
-#define BBYTE1 0x2
-#define BBYTE2 0x4
-#define BBYTE3 0x8
-#define BWORD0 0x3
-#define BWORD1 0xc
-#define BWORD 0xf
-
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-
-#define MASK4BITS 0x0f
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
-#define BMASKBYTE0 0xff
-#define BMASKBYTE1 0xff00
-#define BMASKBYTE2 0xff0000
-#define BMASKBYTE3 0xff000000
-#define BMASKHWORD 0xffff0000
-#define BMASKLWORD 0x0000ffff
-#define BMASKDWORD 0xffffffff
-#define BMASK12BITS 0xfff
-#define BMASKH4BITS 0xf0000000
-#define BMASKOFDM_D 0xffc00000
-#define BMASKCCK 0x3f3f3f3f
-
-#define BRFREGOFFSETMASK 0xfffff
-
-/* WOL bit information */
-#define WOL_REASON_PTK_UPDATE BIT(0)
-#define WOL_REASON_GTK_UPDATE BIT(1)
-#define WOL_REASON_DISASSOC BIT(2)
-#define WOL_REASON_DEAUTH BIT(3)
-#define WOL_REASON_FW_DISCONNECT BIT(4)
-
-#endif
diff --git a/drivers/staging/rtlwifi/rtl8822be/sw.c b/drivers/staging/rtlwifi/rtl8822be/sw.c
deleted file mode 100644
index a2ab19fa94f2..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/sw.c
+++ /dev/null
@@ -1,470 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../wifi.h"
-#include "../core.h"
-#include "../pci.h"
-#include "../base.h"
-#include "reg.h"
-#include "def.h"
-#include "phy.h"
-#include "hw.h"
-#include "sw.h"
-#include "fw.h"
-#include "trx.h"
-#include "led.h"
-#include "../btcoexist/rtl_btc.h"
-#include "../halmac/rtl_halmac.h"
-#include "../phydm/rtl_phydm.h"
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-
-static void rtl8822be_init_aspm_vars(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- /*close ASPM for AMD defaultly */
- rtlpci->const_amdpci_aspm = 0;
-
- /*
- * ASPM PS mode.
- * 0 - Disable ASPM,
- * 1 - Enable ASPM without Clock Req,
- * 2 - Enable ASPM with Clock Req,
- * 3 - Always Enable ASPM with Clock Req,
- * 4 - Always Enable ASPM without Clock Req.
- * set default to RTL8822BE:3 RTL8822B:2
- *
- */
- rtlpci->const_pci_aspm = 3;
-
- /*Setting for PCI-E device */
- rtlpci->const_devicepci_aspm_setting = 0x03;
-
- /*Setting for PCI-E bridge */
- rtlpci->const_hostpci_aspm_setting = 0x02;
-
- /*
- * In Hw/Sw Radio Off situation.
- * 0 - Default,
- * 1 - From ASPM setting without low Mac Pwr,
- * 2 - From ASPM setting with low Mac Pwr,
- * 3 - Bus D3
- * set default to RTL8822BE:0 RTL8192SE:2
- */
- rtlpci->const_hwsw_rfoff_d3 = 0;
-
- /*
- * This setting works for those device with
- * backdoor ASPM setting such as EPHY setting.
- * 0 - Not support ASPM,
- * 1 - Support ASPM,
- * 2 - According to chipset.
- */
- rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
-}
-
-int rtl8822be_init_sw_vars(struct ieee80211_hw *hw)
-{
- int err = 0;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- const char *fw_name;
- struct rtl_phydm_params params;
-
- rtl8822be_bt_reg_init(hw);
- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
- rtlpriv->halmac.ops = rtl_halmac_get_ops_pointer();
- rtlpriv->halmac.ops->halmac_init_adapter(rtlpriv);
-
- /* should after halmac_init_adapter() */
- rtl8822be_read_eeprom_info(hw, &params);
-
- /* need eeprom info */
- rtlpriv->phydm.ops = rtl_phydm_get_ops_pointer();
- rtlpriv->phydm.ops->phydm_init_priv(rtlpriv, &params);
-
- rtlpriv->dm.dm_initialgain_enable = 1;
- rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
- /*rtlpriv->dm.thermalvalue = 0;*/
- rtlpriv->dm.useramask = 1; /* turn on RA */
- rtlpci->transmit_config = CFENDFORM | BIT(15);
-
- rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
- /*following 2 is for register 5G band, refer to _rtl_init_mac80211()*/
- rtlpriv->rtlhal.bandset = BAND_ON_BOTH;
- rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
-
- rtlpci->receive_config = (RCR_APPFCS |
- RCR_APP_MIC |
- RCR_APP_ICV |
- RCR_APP_PHYST_RXFF |
- RCR_VHT_DACK |
- RCR_HTC_LOC_CTRL |
- /*RCR_AMF |*/
- RCR_CBSSID_BCN |
- RCR_CBSSID_DATA |
- /*RCR_ACF |*/
- /*RCR_ADF |*/
- /*RCR_AICV |*/
- /*RCR_ACRC32 |*/
- RCR_AB |
- RCR_AM |
- RCR_APM |
- 0);
-
- rtlpci->irq_mask[0] = (u32)(IMR_PSTIMEOUT |
- /*IMR_TBDER |*/
- /*IMR_TBDOK |*/
- /*IMR_BCNDMAINT0 |*/
- IMR_GTINT3 |
- IMR_HSISR_IND_ON_INT |
- IMR_C2HCMD |
- IMR_HIGHDOK |
- IMR_MGNTDOK |
- IMR_BKDOK |
- IMR_BEDOK |
- IMR_VIDOK |
- IMR_VODOK |
- IMR_RDU |
- IMR_ROK |
- 0);
-
- rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | IMR_TXFOVW | 0);
- rtlpci->irq_mask[3] = (u32)(BIT_SETH2CDOK_MASK | 0);
-
- /* for LPS & IPS */
- rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
- rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
- rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- if (rtlpriv->cfg->mod_params->disable_watchdog)
- pr_info("watchdog disabled\n");
- rtlpriv->psc.reg_fwctrl_lps = 2;
- rtlpriv->psc.reg_max_lps_awakeintvl = 2;
- /* for ASPM, you can close aspm through
- * set const_support_pciaspm = 0
- */
- rtl8822be_init_aspm_vars(hw);
-
- if (rtlpriv->psc.reg_fwctrl_lps == 1)
- rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
- else if (rtlpriv->psc.reg_fwctrl_lps == 2)
- rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
- else if (rtlpriv->psc.reg_fwctrl_lps == 3)
- rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
-
- /* for early mode */
- rtlpriv->rtlhal.earlymode_enable = false;
-
- /*low power */
- rtlpriv->psc.low_power_enable = false;
-
- /* for firmware buf */
- rtlpriv->rtlhal.pfirmware = vzalloc(0x40000);
- if (!rtlpriv->rtlhal.pfirmware) {
- /*pr_err("Can't alloc buffer for fw\n");*/
- return 1;
- }
-
- /* request fw */
- fw_name = "rtlwifi/rtl8822befw.bin";
-
- rtlpriv->max_fw_size = 0x40000;
- pr_info("Using firmware %s\n", fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev,
- GFP_KERNEL, hw, rtl_fw_cb);
- if (err) {
- pr_err("Failed to request firmware!\n");
- return 1;
- }
-
- /* init table of tx power by rate & limit */
- rtl8822be_load_txpower_by_rate(hw);
- rtl8822be_load_txpower_limit(hw);
-
- return 0;
-}
-
-void rtl8822be_deinit_sw_vars(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->halmac.ops->halmac_deinit_adapter(rtlpriv);
- rtlpriv->phydm.ops->phydm_deinit_priv(rtlpriv);
-
- if (rtlpriv->rtlhal.pfirmware) {
- vfree(rtlpriv->rtlhal.pfirmware);
- rtlpriv->rtlhal.pfirmware = NULL;
- }
-}
-
-/* get bt coexist status */
-bool rtl8822be_get_btc_status(void)
-{
- return true;
-}
-
-static void rtl8822be_phydm_watchdog(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
-
- tmp = rtl_read_dword(rtlpriv, 0xc00);
- if (tmp & 0xFF000000) { /* Recover 0xC00: 0xF800000C --> 0x0000000C */
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "found regaddr_c00=%08X\n", tmp);
- tmp &= ~0xFF000000;
- rtl_write_dword(rtlpriv, 0xc00, tmp);
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "apply regaddr_c00=%08X\n", tmp);
- }
-
- rtlpriv->phydm.ops->phydm_watchdog(rtlpriv);
-}
-
-static struct rtl_hal_ops rtl8822be_hal_ops = {
- .init_sw_vars = rtl8822be_init_sw_vars,
- .deinit_sw_vars = rtl8822be_deinit_sw_vars,
- .read_eeprom_info = rtl8822be_read_eeprom_info_dummy,
- .interrupt_recognized = rtl8822be_interrupt_recognized,
- .hw_init = rtl8822be_hw_init,
- .hw_disable = rtl8822be_card_disable,
- .hw_suspend = rtl8822be_suspend,
- .hw_resume = rtl8822be_resume,
- .enable_interrupt = rtl8822be_enable_interrupt,
- .disable_interrupt = rtl8822be_disable_interrupt,
- .set_network_type = rtl8822be_set_network_type,
- .set_chk_bssid = rtl8822be_set_check_bssid,
- .set_qos = rtl8822be_set_qos,
- .set_bcn_reg = rtl8822be_set_beacon_related_registers,
- .set_bcn_intv = rtl8822be_set_beacon_interval,
- .update_interrupt_mask = rtl8822be_update_interrupt_mask,
- .get_hw_reg = rtl8822be_get_hw_reg,
- .set_hw_reg = rtl8822be_set_hw_reg,
- .update_rate_tbl = rtl8822be_update_hal_rate_tbl,
- .pre_fill_tx_bd_desc = rtl8822be_pre_fill_tx_bd_desc,
- .rx_desc_buff_remained_cnt = rtl8822be_rx_desc_buff_remained_cnt,
- .rx_check_dma_ok = rtl8822be_rx_check_dma_ok,
- .fill_tx_desc = rtl8822be_tx_fill_desc,
- .fill_tx_special_desc = rtl8822be_tx_fill_special_desc,
- .query_rx_desc = rtl8822be_rx_query_desc,
- .radio_onoff_checking = rtl8822be_gpio_radio_on_off_checking,
- .switch_channel = rtl8822be_phy_sw_chnl,
- .set_channel_access = rtl8822be_update_channel_access_setting,
- .set_bw_mode = rtl8822be_phy_set_bw_mode,
- .dm_watchdog = rtl8822be_phydm_watchdog,
- .scan_operation_backup = rtl8822be_phy_scan_operation_backup,
- .set_rf_power_state = rtl8822be_phy_set_rf_power_state,
- .led_control = rtl8822be_led_control,
- .set_desc = rtl8822be_set_desc,
- .get_desc = rtl8822be_get_desc,
- .is_tx_desc_closed = rtl8822be_is_tx_desc_closed,
- .get_available_desc = rtl8822be_get_available_desc,
- .tx_polling = rtl8822be_tx_polling,
- .enable_hw_sec = rtl8822be_enable_hw_security_config,
- .set_key = rtl8822be_set_key,
- .init_sw_leds = rtl8822be_init_sw_leds,
- .get_bbreg = rtl8822be_phy_query_bb_reg,
- .set_bbreg = rtl8822be_phy_set_bb_reg,
- .get_rfreg = rtl8822be_phy_query_rf_reg,
- .set_rfreg = rtl8822be_phy_set_rf_reg,
- .fill_h2c_cmd = rtl8822be_fill_h2c_cmd,
- .set_default_port_id_cmd = rtl8822be_set_default_port_id_cmd,
- .get_btc_status = rtl8822be_get_btc_status,
- .rx_command_packet = rtl8822be_rx_command_packet,
- .c2h_content_parsing = rtl8822be_c2h_content_parsing,
- /* ops for halmac cb */
- .halmac_cb_init_mac_register = rtl8822be_halmac_cb_init_mac_register,
- .halmac_cb_init_bb_rf_register =
- rtl8822be_halmac_cb_init_bb_rf_register,
- .halmac_cb_write_data_rsvd_page =
- rtl8822b_halmac_cb_write_data_rsvd_page,
- .halmac_cb_write_data_h2c = rtl8822b_halmac_cb_write_data_h2c,
- /* ops for phydm cb */
- .get_txpower_index = rtl8822be_get_txpower_index,
- .set_tx_power_index_by_rs = rtl8822be_phy_set_tx_power_index_by_rs,
- .store_tx_power_by_rate = rtl8822be_store_tx_power_by_rate,
- .phy_set_txpower_limit = rtl8822be_phy_set_txpower_limit,
-};
-
-static struct rtl_mod_params rtl8822be_mod_params = {
- .sw_crypto = false,
- .inactiveps = true,
- .swctrl_lps = false,
- .fwctrl_lps = true,
- .msi_support = true,
- .dma64 = false,
- .aspm_support = 1,
- .disable_watchdog = false,
- .debug_level = 0,
- .debug_mask = 0,
-};
-
-static struct rtl_hal_cfg rtl8822be_hal_cfg = {
- .bar_id = 2,
- .write_readback = false,
- .name = "rtl8822be_pci",
- .ops = &rtl8822be_hal_ops,
- .mod_params = &rtl8822be_mod_params,
- .spec_ver = RTL_SPEC_NEW_RATEID | RTL_SPEC_SUPPORT_VHT |
- RTL_SPEC_NEW_FW_C2H,
- .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL_8822B,
- .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN_8822B,
- .maps[SYS_CLK] = REG_SYS_CLK_CTRL_8822B,
- .maps[MAC_RCR_AM] = AM,
- .maps[MAC_RCR_AB] = AB,
- .maps[MAC_RCR_ACRC32] = ACRC32,
- .maps[MAC_RCR_ACF] = ACF,
- .maps[MAC_RCR_AAP] = AAP,
- .maps[MAC_HIMR] = REG_HIMR0_8822B,
- .maps[MAC_HIMRE] = REG_HIMR1_8822B,
-
- .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS_8822B,
-
- .maps[EFUSE_TEST] = REG_LDO_EFUSE_CTRL_8822B,
- .maps[EFUSE_CTRL] = REG_EFUSE_CTRL_8822B,
- .maps[EFUSE_CLK] = 0,
- .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL_8822B,
- .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
- .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
- .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
- .maps[EFUSE_ANA8M] = ANA8M,
- .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
- .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
- .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
- .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
-
- .maps[RWCAM] = REG_CAMCMD_8822B,
- .maps[WCAMI] = REG_CAMWRITE_8822B,
- .maps[RCAMO] = REG_CAMREAD_8822B,
- .maps[CAMDBG] = REG_CAMDBG_8822B,
- .maps[SECR] = REG_SECCFG_8822B,
- .maps[SEC_CAM_NONE] = CAM_NONE,
- .maps[SEC_CAM_WEP40] = CAM_WEP40,
- .maps[SEC_CAM_TKIP] = CAM_TKIP,
- .maps[SEC_CAM_AES] = CAM_AES,
- .maps[SEC_CAM_WEP104] = CAM_WEP104,
-
- .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
- .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
- .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
- .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
- .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
- .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
- /* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/
- .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
- .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
- .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
- .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
- .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
- .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
- .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
- /* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/
- /* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/
-
- .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
- .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
- .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0,
- .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
- .maps[RTL_IMR_RDU] = IMR_RDU,
- .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
- .maps[RTL_IMR_H2CDOK] = IMR_H2CDOK,
- .maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
- .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
- .maps[RTL_IMR_TBDER] = IMR_TBDER,
- .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
- .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
- .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
- .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
- .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
- .maps[RTL_IMR_VODOK] = IMR_VODOK,
- .maps[RTL_IMR_ROK] = IMR_ROK,
- .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
-
- .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
-
- /*VHT hightest rate*/
- .maps[RTL_RC_VHT_RATE_1SS_MCS7] = DESC_RATEVHT1SS_MCS7,
- .maps[RTL_RC_VHT_RATE_1SS_MCS8] = DESC_RATEVHT1SS_MCS8,
- .maps[RTL_RC_VHT_RATE_1SS_MCS9] = DESC_RATEVHT1SS_MCS9,
- .maps[RTL_RC_VHT_RATE_2SS_MCS7] = DESC_RATEVHT2SS_MCS7,
- .maps[RTL_RC_VHT_RATE_2SS_MCS8] = DESC_RATEVHT2SS_MCS8,
- .maps[RTL_RC_VHT_RATE_2SS_MCS9] = DESC_RATEVHT2SS_MCS9,
-};
-
-static const struct pci_device_id rtl8822be_pci_ids[] = {
- {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtl8822be_hal_cfg)},
- {},
-};
-
-MODULE_DEVICE_TABLE(pci, rtl8822be_pci_ids);
-
-MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
-MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Realtek 8822BE 802.11n PCI wireless");
-MODULE_FIRMWARE("rtlwifi/rtl8822befw.bin");
-
-module_param_named(swenc, rtl8822be_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug_level, rtl8822be_mod_params.debug_level, int, 0644);
-module_param_named(debug_mask, rtl8822be_mod_params.debug_mask, ullong, 0644);
-module_param_named(ips, rtl8822be_mod_params.inactiveps, bool, 0444);
-module_param_named(swlps, rtl8822be_mod_params.swctrl_lps, bool, 0444);
-module_param_named(fwlps, rtl8822be_mod_params.fwctrl_lps, bool, 0444);
-module_param_named(msi, rtl8822be_mod_params.msi_support, bool, 0444);
-module_param_named(dma64, rtl8822be_mod_params.dma64, bool, 0444);
-module_param_named(aspm, rtl8822be_mod_params.aspm_support, int, 0444);
-module_param_named(disable_watchdog, rtl8822be_mod_params.disable_watchdog,
- bool, 0444);
-MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
-MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
-MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
-MODULE_PARM_DESC(dma64, "Set to 1 to use DMA 64 (default 0)\n");
-MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
-MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
-MODULE_PARM_DESC(disable_watchdog,
- "Set to 1 to disable the watchdog (default 0)\n");
-
-static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
-
-static struct pci_driver rtl8822be_driver = {
- .name = KBUILD_MODNAME,
- .id_table = rtl8822be_pci_ids,
- .probe = rtl_pci_probe,
- .remove = rtl_pci_disconnect,
- .driver.pm = &rtlwifi_pm_ops,
-};
-
-module_pci_driver(rtl8822be_driver);
diff --git a/drivers/staging/rtlwifi/rtl8822be/sw.h b/drivers/staging/rtlwifi/rtl8822be/sw.h
deleted file mode 100644
index 0983a8e9605b..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/sw.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL8822B_SW_H__
-#define __RTL8822B_SW_H__
-
-int rtl8822be_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8822be_deinit_sw_vars(struct ieee80211_hw *hw);
-bool rtl8822be_get_btc_status(void);
-#endif
diff --git a/drivers/staging/rtlwifi/rtl8822be/trx.c b/drivers/staging/rtlwifi/rtl8822be/trx.c
deleted file mode 100644
index 8fff2ea344eb..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/trx.c
+++ /dev/null
@@ -1,1004 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include "../wifi.h"
-#include "../pci.h"
-#include "../base.h"
-#include "../stats.h"
-#include "reg.h"
-#include "def.h"
-#include "phy.h"
-#include "trx.h"
-#include "led.h"
-#include "fw.h"
-
-#include <linux/vermagic.h>
-
-static u8 _rtl8822be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
-{
- switch (hw_queue) {
- case BEACON_QUEUE:
- return QSLT_BEACON;
- case H2C_QUEUE:
- return QSLT_CMD;
- case MGNT_QUEUE:
- return QSLT_MGNT;
- case HIGH_QUEUE:
- return QSLT_HIGH;
- default:
- return skb->priority;
- }
-}
-
-static void _rtl8822be_query_rxphystatus(struct ieee80211_hw *hw, u8 *phystrpt,
- struct ieee80211_hdr *hdr,
- struct rtl_stats *pstatus)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->phydm.ops->phydm_query_phy_status(rtlpriv, phystrpt, hdr,
- pstatus);
-
- /* UI BSS List signal strength(in percentage),
- * make it good looking, from 0~100.
- */
- pstatus->signalstrength =
- (u8)(rtl_signal_scale_mapping(hw, pstatus->rx_pwdb_all));
-}
-
-static void _rtl8822be_translate_rx_signal_stuff(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct rtl_stats *pstatus,
- u8 *p_phystrpt)
-{
- struct ieee80211_hdr *hdr;
- u8 *tmp_buf;
-
- tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift +
- 24;
-
- hdr = (struct ieee80211_hdr *)tmp_buf;
-
- /* query phy status */
- _rtl8822be_query_rxphystatus(hw, p_phystrpt, hdr, pstatus);
-
- /* packet statistics */
- if (pstatus->packet_beacon && pstatus->packet_matchbssid)
- rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++;
-
- if (pstatus->packet_matchbssid &&
- ieee80211_is_data_qos(hdr->frame_control) &&
- !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
- struct ieee80211_qos_hdr *hdr_qos =
- (struct ieee80211_qos_hdr *)tmp_buf;
- u16 tid = le16_to_cpu(hdr_qos->qos_ctrl) & 0xf;
-
- if (tid != 0 && tid != 3)
- rtl_priv(hw)->dm.dbginfo.num_non_be_pkt++;
- }
-
- /* signal statistics */
- if (p_phystrpt)
- rtl_process_phyinfo(hw, tmp_buf, pstatus);
-}
-
-static void _rtl8822be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
- u8 *virtualaddress)
-{
- u32 dwtmp = 0;
-
- memset(virtualaddress, 0, 8);
-
- SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
- if (ptcb_desc->empkt_num == 1) {
- dwtmp = ptcb_desc->empkt_len[0];
- } else {
- dwtmp = ptcb_desc->empkt_len[0];
- dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
- dwtmp += ptcb_desc->empkt_len[1];
- }
- SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
-
- if (ptcb_desc->empkt_num <= 3) {
- dwtmp = ptcb_desc->empkt_len[2];
- } else {
- dwtmp = ptcb_desc->empkt_len[2];
- dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
- dwtmp += ptcb_desc->empkt_len[3];
- }
- SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
- if (ptcb_desc->empkt_num <= 5) {
- dwtmp = ptcb_desc->empkt_len[4];
- } else {
- dwtmp = ptcb_desc->empkt_len[4];
- dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
- dwtmp += ptcb_desc->empkt_len[5];
- }
- SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
- SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
- if (ptcb_desc->empkt_num <= 7) {
- dwtmp = ptcb_desc->empkt_len[6];
- } else {
- dwtmp = ptcb_desc->empkt_len[6];
- dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
- dwtmp += ptcb_desc->empkt_len[7];
- }
- SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
- if (ptcb_desc->empkt_num <= 9) {
- dwtmp = ptcb_desc->empkt_len[8];
- } else {
- dwtmp = ptcb_desc->empkt_len[8];
- dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
- dwtmp += ptcb_desc->empkt_len[9];
- }
- SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
-}
-
-static bool rtl8822be_get_rxdesc_is_ht(struct ieee80211_hw *hw, u8 *pdesc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 rx_rate = 0;
-
- rx_rate = GET_RX_DESC_RX_RATE(pdesc);
-
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
-
- if (rx_rate >= DESC_RATEMCS0 && rx_rate <= DESC_RATEMCS15)
- return true;
- else
- return false;
-}
-
-static bool rtl8822be_get_rxdesc_is_vht(struct ieee80211_hw *hw, u8 *pdesc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 rx_rate = 0;
-
- rx_rate = GET_RX_DESC_RX_RATE(pdesc);
-
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
-
- if (rx_rate >= DESC_RATEVHT1SS_MCS0)
- return true;
- else
- return false;
-}
-
-static u8 rtl8822be_get_rx_vht_nss(struct ieee80211_hw *hw, u8 *pdesc)
-{
- u8 rx_rate = 0;
- u8 vht_nss = 0;
-
- rx_rate = GET_RX_DESC_RX_RATE(pdesc);
-
- if (rx_rate >= DESC_RATEVHT1SS_MCS0 &&
- rx_rate <= DESC_RATEVHT1SS_MCS9)
- vht_nss = 1;
- else if ((rx_rate >= DESC_RATEVHT2SS_MCS0) &&
- (rx_rate <= DESC_RATEVHT2SS_MCS9))
- vht_nss = 2;
-
- return vht_nss;
-}
-
-bool rtl8822be_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status,
- struct ieee80211_rx_status *rx_status, u8 *pdesc,
- struct sk_buff *skb)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *p_phystrpt = NULL;
- struct ieee80211_hdr *hdr;
-
- u32 phystatus = GET_RX_DESC_PHYST(pdesc);
-
- if (GET_RX_DESC_C2H(pdesc) == 0)
- status->packet_report_type = NORMAL_RX;
- else
- status->packet_report_type = C2H_PACKET;
-
- status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
- status->rx_drvinfo_size =
- (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * RX_DRV_INFO_SIZE_UNIT;
- status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03);
- status->icv = (u16)GET_RX_DESC_ICV_ERR(pdesc);
- status->crc = (u16)GET_RX_DESC_CRC32(pdesc);
- status->hwerror = (status->crc | status->icv);
- status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
- status->rate = (u8)GET_RX_DESC_RX_RATE(pdesc);
- status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
- status->isfirst_ampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
- status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- status->is_ht = rtl8822be_get_rxdesc_is_ht(hw, pdesc);
- status->is_vht = rtl8822be_get_rxdesc_is_vht(hw, pdesc);
- status->vht_nss = rtl8822be_get_rx_vht_nss(hw, pdesc);
- status->is_cck = RX_HAL_IS_CCK_RATE(status->rate);
-
- status->macid = GET_RX_DESC_MACID(pdesc);
- if (GET_RX_DESC_PATTERN_MATCH(pdesc))
- status->wake_match = BIT(2);
- else if (GET_RX_DESC_MAGIC_WAKE(pdesc))
- status->wake_match = BIT(1);
- else if (GET_RX_DESC_UNICAST_WAKE(pdesc))
- status->wake_match = BIT(0);
- else
- status->wake_match = 0;
- if (status->wake_match)
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- status->wake_match);
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
-
- if (phystatus)
- p_phystrpt = (skb->data + status->rx_bufshift + 24);
-
- hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size +
- status->rx_bufshift + 24);
-
- if (status->crc)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
-
- if (status->is_ht)
- rx_status->encoding = RX_ENC_HT;
- if (status->is_vht)
- rx_status->encoding = RX_ENC_VHT;
-
- rx_status->nss = status->vht_nss;
-
- rx_status->flag |= RX_FLAG_MACTIME_START;
-
- /* hw will set status->decrypted true, if it finds the
- * frame is open data frame or mgmt frame.
- */
- /* So hw will not decryption robust management frame
- * for IEEE80211w but still set status->decrypted
- * true, so here we should set it back to undecrypted
- * for IEEE80211w frame, and mac80211 sw will help
- * to decrypt it
- */
- if (status->decrypted) {
- if ((!_ieee80211_is_robust_mgmt_frame(hdr)) &&
- (ieee80211_has_protected(hdr->frame_control)))
- rx_status->flag |= RX_FLAG_DECRYPTED;
- else
- rx_status->flag &= ~RX_FLAG_DECRYPTED;
- }
-
- /* rate_idx: index of data rate into band's
- * supported rates or MCS index if HT rates
- * are use (RX_FLAG_HT)
- */
- /* Notice: this is diff with windows define */
- rx_status->rate_idx = rtlwifi_rate_mapping(
- hw, status->is_ht, status->is_vht, status->rate);
-
- rx_status->mactime = status->timestamp_low;
-
- _rtl8822be_translate_rx_signal_stuff(hw, skb, status, p_phystrpt);
-
- /* below info. are filled by _rtl8822be_translate_rx_signal_stuff() */
- if (!p_phystrpt)
- goto label_no_physt;
-
- rx_status->signal = status->recvsignalpower;
-
- if (status->rx_packet_bw == HT_CHANNEL_WIDTH_20_40)
- rx_status->bw = RATE_INFO_BW_40;
- else if (status->rx_packet_bw == HT_CHANNEL_WIDTH_80)
- rx_status->bw = RATE_INFO_BW_80;
-
-label_no_physt:
-
- return true;
-}
-
-void rtl8822be_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc,
- u8 queue_index)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 first_seg;
- u8 last_seg;
- u16 total_len;
- u16 read_cnt = 0;
-
- if (!header_desc)
- return;
-
- do {
- total_len = (u16)GET_RX_BUFFER_DESC_TOTAL_LENGTH(header_desc);
- first_seg = (u8)GET_RX_BUFFER_DESC_FS(header_desc);
- last_seg = (u8)GET_RX_BUFFER_DESC_LS(header_desc);
-
- if (read_cnt++ > 20) {
- RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- "RX chk DMA over %d times\n", read_cnt);
- break;
- }
-
- } while (total_len == 0 && first_seg == 0 && last_seg == 0);
-}
-
-u16 rtl8822be_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u16 desc_idx_hw = 0, desc_idx_host = 0, remind_cnt = 0;
- u32 tmp_4byte = 0;
-
- u32 rw_mask = 0x1ff;
-
- tmp_4byte = rtl_read_dword(rtlpriv, REG_RXQ_RXBD_IDX_8822B);
- desc_idx_hw = (u16)((tmp_4byte >> 16) & rw_mask);
- desc_idx_host = (u16)(tmp_4byte & rw_mask);
-
- /* may be no data, donot rx */
- if (desc_idx_hw == desc_idx_host)
- return 0;
-
- remind_cnt =
- (desc_idx_hw > desc_idx_host) ?
- (desc_idx_hw - desc_idx_host) :
- (RX_DESC_NUM_8822BE - (desc_idx_host - desc_idx_hw));
-
- rtlpci->rx_ring[queue_index].next_rx_rp = desc_idx_host;
-
- return remind_cnt;
-}
-
-static u16 get_desc_address_from_queue_index(u16 queue_index)
-{
- /*
- * Note: Access these registers will take a lot of cost.
- */
- u16 desc_address = REG_BEQ_TXBD_IDX_8822B;
-
- switch (queue_index) {
- case BK_QUEUE:
- desc_address = REG_BKQ_TXBD_IDX_8822B;
- break;
- case BE_QUEUE:
- desc_address = REG_BEQ_TXBD_IDX_8822B;
- break;
- case VI_QUEUE:
- desc_address = REG_VIQ_TXBD_IDX_8822B;
- break;
- case VO_QUEUE:
- desc_address = REG_VOQ_TXBD_IDX_8822B;
- break;
- case BEACON_QUEUE:
- desc_address = REG_BEQ_TXBD_IDX_8822B;
- break;
- case H2C_QUEUE:
- desc_address = REG_H2CQ_TXBD_IDX_8822B;
- break;
- case MGNT_QUEUE:
- desc_address = REG_MGQ_TXBD_IDX_8822B;
- break;
- case HIGH_QUEUE:
- desc_address = REG_HI0Q_TXBD_IDX_8822B;
- break;
- case HCCA_QUEUE:
- desc_address = REG_BEQ_TXBD_IDX_8822B;
- break;
- default:
- break;
- }
- return desc_address;
-}
-
-/*free desc that can be used */
-u16 rtl8822be_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx];
-
- return calc_fifo_space(ring->cur_tx_rp, ring->cur_tx_wp,
- TX_DESC_NUM_8822B);
-}
-
-void rtl8822be_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, u8 *tx_bd_desc,
- u8 *desc, u8 queue_index,
- struct sk_buff *skb, dma_addr_t data_addr)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 pkt_len = skb->len;
- u16 desc_size = 48; /*tx desc size*/
- u32 psblen = 0;
- u32 total_packet_size = 0;
- u16 current_bd_desc;
- u8 i = 0;
- /*u16 real_desc_size = 0x28;*/
- u16 append_early_mode_size = 0;
- u8 segmentnum = 1 << (RTL8822BE_SEG_NUM + 1);
- dma_addr_t desc_dma_addr;
- bool dma64 = rtlpriv->cfg->mod_params->dma64;
-
- current_bd_desc = rtlpci->tx_ring[queue_index].cur_tx_wp;
-
- total_packet_size = desc_size + pkt_len;
-
- if (rtlpriv->rtlhal.earlymode_enable) {
- if (queue_index < BEACON_QUEUE) {
- append_early_mode_size = 8;
- total_packet_size += append_early_mode_size;
- }
- }
-
- /* page number (round up) */
- psblen = (total_packet_size - 1) / 128 + 1;
-
- /* tx desc addr */
- desc_dma_addr = rtlpci->tx_ring[queue_index].dma +
- (current_bd_desc * TX_DESC_SIZE);
-
- /* Reset */
- SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, 0);
- SET_TX_BUFF_DESC_PSB(tx_bd_desc, 0);
- SET_TX_BUFF_DESC_OWN(tx_bd_desc, 0);
-
- for (i = 1; i < segmentnum; i++) {
- SET_TXBUFFER_DESC_LEN_WITH_OFFSET(tx_bd_desc, i, 0);
- SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(tx_bd_desc, i, 0);
- SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(tx_bd_desc, i, 0);
- SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(tx_bd_desc, i, 0, dma64);
- }
-
- /* Clear all status */
- CLEAR_PCI_TX_DESC_CONTENT(desc, TX_DESC_SIZE);
-
- if (rtlpriv->rtlhal.earlymode_enable) {
- if (queue_index < BEACON_QUEUE)
- SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size + 8);
- else
- SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size);
- } else {
- SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size);
- }
- SET_TX_BUFF_DESC_PSB(tx_bd_desc, psblen);
- SET_TX_BUFF_DESC_ADDR_LOW_0(tx_bd_desc, desc_dma_addr);
- SET_TX_BUFF_DESC_ADDR_HIGH_0(tx_bd_desc, ((u64)desc_dma_addr >> 32),
- dma64);
-
- SET_TXBUFFER_DESC_LEN_WITH_OFFSET(tx_bd_desc, 1, pkt_len);
- SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(tx_bd_desc, 1, 0);
- SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(tx_bd_desc, 1, data_addr);
- SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(tx_bd_desc, 1,
- ((u64)data_addr >> 32), dma64);
-
- SET_TX_DESC_TXPKTSIZE(desc, (u16)(pkt_len));
-}
-
-static u8 rtl8822be_bw_mapping(struct ieee80211_hw *hw,
- struct rtl_tcb_desc *ptcb_desc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 bw_setting_of_desc = 0;
-
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "%s, current_chan_bw %d, packet_bw %d\n", __func__,
- rtlphy->current_chan_bw, ptcb_desc->packet_bw);
-
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
- if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80)
- bw_setting_of_desc = 2;
- else if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40)
- bw_setting_of_desc = 1;
- else
- bw_setting_of_desc = 0;
- } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40 ||
- ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80)
- bw_setting_of_desc = 1;
- else
- bw_setting_of_desc = 0;
- } else {
- bw_setting_of_desc = 0;
- }
-
- return bw_setting_of_desc;
-}
-
-static u8 rtl8822be_sc_mapping(struct ieee80211_hw *hw,
- struct rtl_tcb_desc *ptcb_desc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
- u8 sc_setting_of_desc = 0;
-
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
- if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80) {
- sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE;
- } else if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) {
- if (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)
- sc_setting_of_desc =
- VHT_DATA_SC_40_LOWER_OF_80MHZ;
- else if (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER)
- sc_setting_of_desc =
- VHT_DATA_SC_40_UPPER_OF_80MHZ;
- else
- RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
- "%s: Not Correct Primary40MHz Setting\n",
- __func__);
- } else {
- if (mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER &&
- mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER)
- sc_setting_of_desc =
- VHT_DATA_SC_20_LOWEST_OF_80MHZ;
- else if ((mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER) &&
- (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER))
- sc_setting_of_desc =
- VHT_DATA_SC_20_LOWER_OF_80MHZ;
- else if ((mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER) &&
- (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER))
- sc_setting_of_desc =
- VHT_DATA_SC_20_UPPER_OF_80MHZ;
- else if ((mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER) &&
- (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER))
- sc_setting_of_desc =
- VHT_DATA_SC_20_UPPERST_OF_80MHZ;
- else
- RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
- "%s: Not Correct Primary40MHz Setting\n",
- __func__);
- }
- } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) {
- sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE;
- } else if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20) {
- if (mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_UPPER) {
- sc_setting_of_desc =
- VHT_DATA_SC_20_UPPER_OF_80MHZ;
- } else if (mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER) {
- sc_setting_of_desc =
- VHT_DATA_SC_20_LOWER_OF_80MHZ;
- } else {
- sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE;
- }
- }
- } else {
- sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE;
- }
-
- return sc_setting_of_desc;
-}
-
-void rtl8822be_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
- u8 *pdesc_tx, u8 *pbd_desc_tx,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta, struct sk_buff *skb,
- u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 *pdesc = (u8 *)pdesc_tx;
- u16 seq_number;
- __le16 fc = hdr->frame_control;
- u8 fw_qsel = _rtl8822be_map_hwqueue_to_fwqueue(skb, hw_queue);
- bool firstseg =
- ((hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
- bool lastseg = ((hdr->frame_control &
- cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
- dma_addr_t mapping;
- u8 short_gi = 0;
-
- seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
- rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
- /* reserve 8 byte for AMPDU early mode */
- if (rtlhal->earlymode_enable) {
- skb_push(skb, EM_HDR_LEN);
- memset(skb->data, 0, EM_HDR_LEN);
- }
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "DMA mapping error");
- return;
- }
-
- if (pbd_desc_tx)
- rtl8822be_pre_fill_tx_bd_desc(hw, pbd_desc_tx, pdesc, hw_queue,
- skb, mapping);
-
- if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
- firstseg = true;
- lastseg = true;
- }
- if (firstseg) {
- if (rtlhal->earlymode_enable) {
- SET_TX_DESC_PKT_OFFSET(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc,
- USB_HWDESC_HEADER_LEN + EM_HDR_LEN);
- if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
- _rtl8822be_insert_emcontent(ptcb_desc,
- (u8 *)(skb->data));
- }
- } else {
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
- }
-
- /* tx report */
- rtl_get_tx_report(ptcb_desc, pdesc, hw);
-
- if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G &&
- ptcb_desc->hw_rate < DESC_RATE6M) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_WARNING,
- "hw_rate=0x%X is invalid in 5G\n",
- ptcb_desc->hw_rate);
- ptcb_desc->hw_rate = DESC_RATE6M;
- }
- SET_TX_DESC_DATARATE(pdesc, ptcb_desc->hw_rate);
-
- if (ptcb_desc->hw_rate > DESC_RATEMCS0)
- short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
- else
- short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- SET_TX_DESC_AGG_EN(pdesc, 1);
- SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x1F);
- }
- SET_TX_DESC_SW_SEQ(pdesc, seq_number);
- SET_TX_DESC_RTSEN(pdesc, ((ptcb_desc->rts_enable &&
- !ptcb_desc->cts_enable) ?
- 1 :
- 0));
- SET_TX_DESC_HW_RTS_EN(pdesc, 0);
- SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
-
- SET_TX_DESC_RTSRATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(
- pdesc,
- ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
- (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
- (ptcb_desc->rts_use_shortgi ? 1 : 0)));
-
- if (ptcb_desc->tx_enable_sw_calc_duration)
- SET_TX_DESC_NAVUSEHDR(pdesc, 1);
-
- SET_TX_DESC_DATA_BW(pdesc, rtl8822be_bw_mapping(hw, ptcb_desc));
- SET_TX_DESC_DATA_SC(pdesc, rtl8822be_sc_mapping(hw, ptcb_desc));
-
- if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
-
- SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
- }
- if (info->control.hw_key) {
- struct ieee80211_key_conf *key = info->control.hw_key;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
- break;
- default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
- break;
- }
- }
-
- SET_TX_DESC_QSEL(pdesc, fw_qsel);
-
- if (rtlphy->current_channel > 14) {
- /* OFDM 6M */
- SET_TX_DESC_DATA_RTY_LOWEST_RATE(pdesc, 4);
- SET_TX_DESC_RTS_RTY_LOWEST_RATE(pdesc, 4);
- } else {
- /* CCK 1M */
- SET_TX_DESC_DATA_RTY_LOWEST_RATE(pdesc, 0);
- SET_TX_DESC_RTS_RTY_LOWEST_RATE(pdesc, 0);
- }
- SET_TX_DESC_DISDATAFB(pdesc,
- ptcb_desc->disable_ratefallback ? 1 : 0);
- SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
-
- /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/
- /* Set TxRate and RTSRate in TxDesc */
- /* This prevent Tx initial rate of new-coming packets */
- /* from being overwritten by retried packet rate.*/
- if (!ptcb_desc->use_driver_rate) {
- /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */
- /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
- }
- if (ieee80211_is_data_qos(fc)) {
- if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function.\n");
- SET_TX_DESC_RDG_EN(pdesc, 1);
- SET_TX_DESC_HTC(pdesc, 1);
- }
- }
-
- SET_TX_DESC_PORT_ID(pdesc, 0);
- SET_TX_DESC_MULTIPLE_PORT(pdesc, 0);
- }
-
- SET_TX_DESC_LS(pdesc, (lastseg ? 1 : 0));
- if (rtlpriv->dm.useramask) {
- SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
- } else {
- SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index);
- }
-
- SET_TX_DESC_MOREFRAG(pdesc, (lastseg ? 0 : 1));
- if (ptcb_desc->multicast || ptcb_desc->broadcast) {
- SET_TX_DESC_BMC(pdesc, 1);
- /* BMC must be not AGG */
- SET_TX_DESC_AGG_EN(pdesc, 0);
- }
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
-
- /* debug purpose: used to check tx desc is correct or not */
- /*rtlpriv->halmac.ops->halmac_chk_txdesc(rtlpriv, pdesc,
- * skb->len + USB_HWDESC_HEADER_LEN);
- */
-}
-
-void rtl8822be_tx_fill_special_desc(struct ieee80211_hw *hw, u8 *pdesc,
- u8 *pbd_desc, struct sk_buff *skb,
- u8 hw_queue)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u8 fw_queue;
- u8 txdesc_len = 48;
-
- dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
-
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "DMA mapping error");
- return;
- }
-
- rtl8822be_pre_fill_tx_bd_desc(hw, pbd_desc, pdesc, hw_queue, skb,
- mapping);
-
- /* it should be BEACON_QUEUE or H2C_QUEUE,
- * so skb=NULL is safe to assert
- */
- fw_queue = _rtl8822be_map_hwqueue_to_fwqueue(NULL, hw_queue);
-
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len);
-
- /* common part for BEACON and H2C */
- SET_TX_DESC_TXPKTSIZE((u8 *)pdesc, (u16)(skb->len));
-
- SET_TX_DESC_QSEL(pdesc, fw_queue);
-
- if (hw_queue == H2C_QUEUE) {
- /* fill H2C */
- SET_TX_DESC_OFFSET(pdesc, 0);
-
- } else {
- /* fill beacon */
- SET_TX_DESC_OFFSET(pdesc, txdesc_len);
-
- SET_TX_DESC_DATARATE(pdesc, DESC_RATE1M);
-
- SET_TX_DESC_SW_SEQ(pdesc, 0);
-
- SET_TX_DESC_RATE_ID(pdesc, 7);
- SET_TX_DESC_MACID(pdesc, 0);
-
- SET_TX_DESC_LS(pdesc, 1);
-
- SET_TX_DESC_OFFSET(pdesc, 48);
-
- SET_TX_DESC_USE_RATE(pdesc, 1);
- }
-
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
- pdesc, txdesc_len);
-}
-
-void rtl8822be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
- u8 desc_name, u8 *val)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 q_idx = *val;
- bool dma64 = rtlpriv->cfg->mod_params->dma64;
-
- if (istx) {
- switch (desc_name) {
- case HW_DESC_OWN: {
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx];
- u16 max_tx_desc = ring->entries;
-
- if (q_idx == BEACON_QUEUE) {
- /* in case of beacon, pdesc is BD desc. */
- u8 *pbd_desc = pdesc;
-
- ring->cur_tx_wp = 0;
- ring->cur_tx_rp = 0;
- SET_TX_BUFF_DESC_OWN(pbd_desc, 1);
- return;
- }
-
- /* make sure tx desc is available by caller */
- ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc);
-
- rtl_write_word(
- rtlpriv,
- get_desc_address_from_queue_index(
- q_idx),
- ring->cur_tx_wp);
- } break;
- }
- } else {
- switch (desc_name) {
- case HW_DESC_RX_PREPARE:
- SET_RX_BUFFER_DESC_LS(pdesc, 0);
- SET_RX_BUFFER_DESC_FS(pdesc, 0);
- SET_RX_BUFFER_DESC_TOTAL_LENGTH(pdesc, 0);
-
- SET_RX_BUFFER_DESC_DATA_LENGTH(
- pdesc, MAX_RECEIVE_BUFFER_SIZE + RX_DESC_SIZE);
-
- SET_RX_BUFFER_PHYSICAL_LOW(
- pdesc, (*(dma_addr_t *)val) & DMA_BIT_MASK(32));
- SET_RX_BUFFER_PHYSICAL_HIGH(
- pdesc, ((u64)(*(dma_addr_t *)val) >> 32),
- dma64);
- break;
- default:
- WARN_ONCE(true, "ERR rxdesc :%d not process\n",
- desc_name);
- break;
- }
- }
-}
-
-u64 rtl8822be_get_desc(struct ieee80211_hw *hw,
- u8 *pdesc, bool istx, u8 desc_name)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u64 ret = 0;
- u8 *pbd_desc = pdesc;
- bool dma64 = rtlpriv->cfg->mod_params->dma64;
-
- if (istx) {
- switch (desc_name) {
- case HW_DESC_TXBUFF_ADDR:
- ret = GET_TXBUFFER_DESC_ADDR_LOW(pbd_desc, 1);
- ret |= (u64)GET_TXBUFFER_DESC_ADDR_HIGH(pbd_desc, 1,
- dma64) << 32;
- break;
- default:
- WARN_ONCE(true, "ERR txdesc :%d not process\n",
- desc_name);
- break;
- }
- } else {
- switch (desc_name) {
- case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(pdesc);
- break;
- default:
- WARN_ONCE(true, "ERR rxdesc :%d not process\n",
- desc_name);
- break;
- }
- }
- return ret;
-}
-
-bool rtl8822be_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue,
- u16 index)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- bool ret = false;
- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
- u16 cur_tx_rp, cur_tx_wp;
- u16 tmp16;
-
- /*
- * design rule:
- * idx <= cur_tx_rp <= hw_rp <= cur_tx_wp = hw_wp
- */
-
- if (index == ring->cur_tx_rp) {
- /* update only if sw_rp reach hw_rp */
- tmp16 = rtl_read_word(
- rtlpriv,
- get_desc_address_from_queue_index(hw_queue) + 2);
-
- cur_tx_rp = tmp16 & 0x01ff;
- cur_tx_wp = ring->cur_tx_wp;
-
- /* don't need to update ring->cur_tx_wp */
- ring->cur_tx_rp = cur_tx_rp;
- }
-
- if (index == ring->cur_tx_rp)
- ret = false; /* no more */
- else
- ret = true; /* more */
-
- if (hw_queue == BEACON_QUEUE)
- ret = true;
-
- if (rtlpriv->rtlhal.driver_is_goingto_unload ||
- rtlpriv->psc.rfoff_reason > RF_CHANGE_BY_PS)
- ret = true;
-
- return ret;
-}
-
-void rtl8822be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (hw_queue == BEACON_QUEUE) {
- /* kick start */
- rtl_write_byte(
- rtlpriv, REG_RX_RXBD_NUM_8822B + 1,
- rtl_read_byte(rtlpriv, REG_RX_RXBD_NUM_8822B + 1) |
- BIT(4));
- }
-}
-
-u32 rtl8822be_rx_command_packet(struct ieee80211_hw *hw,
- const struct rtl_stats *status,
- struct sk_buff *skb)
-{
- u32 result = 0;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- switch (status->packet_report_type) {
- case NORMAL_RX:
- result = 0;
- break;
- case C2H_PACKET:
- rtl8822be_c2h_packet_handler(hw, skb->data, (u8)skb->len);
- result = 1;
- break;
- default:
- RT_TRACE(rtlpriv, COMP_RECV, DBG_TRACE,
- "Unknown packet type %d\n",
- status->packet_report_type);
- break;
- }
-
- return result;
-}
diff --git a/drivers/staging/rtlwifi/rtl8822be/trx.h b/drivers/staging/rtlwifi/rtl8822be/trx.h
deleted file mode 100644
index d7ba7f3e58b7..000000000000
--- a/drivers/staging/rtlwifi/rtl8822be/trx.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL8822B_TRX_H__
-#define __RTL8822B_TRX_H__
-
-#include "../halmac/halmac_tx_desc_nic.h"
-#include "../halmac/halmac_rx_desc_nic.h"
-
-#define TX_DESC_SIZE 64
-
-#define RX_DRV_INFO_SIZE_UNIT 8
-
-#define TX_DESC_NEXT_DESC_OFFSET 48
-#define USB_HWDESC_HEADER_LEN 48
-
-#define RX_DESC_SIZE 24
-#define MAX_RECEIVE_BUFFER_SIZE 8192
-
-#define SET_EARLYMODE_PKTNUM(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __val)
-#define SET_EARLYMODE_LEN0(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr, 4, 15, __val)
-#define SET_EARLYMODE_LEN1(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr, 16, 2, __val)
-#define SET_EARLYMODE_LEN1_1(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr, 19, 13, __val)
-#define SET_EARLYMODE_LEN1_2(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr + 4, 0, 2, __val)
-#define SET_EARLYMODE_LEN2(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr + 4, 2, 15, __val)
-#define SET_EARLYMODE_LEN2_1(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr, 2, 4, __val)
-#define SET_EARLYMODE_LEN2_2(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr + 4, 0, 8, __val)
-#define SET_EARLYMODE_LEN3(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr + 4, 17, 15, __val)
-#define SET_EARLYMODE_LEN4(__paddr, __val) \
- SET_BITS_TO_LE_4BYTE(__paddr + 4, 20, 12, __val)
-
-/* TX/RX buffer descriptor */
-
-/* for Txfilldescroptor8822be, fill the desc content. */
-#define SET_TXBUFFER_DESC_LEN_WITH_OFFSET(__pdesc, __offset, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + ((__offset) * 16), 0, 16, __val)
-#define SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(__pdesc, __offset, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + ((__offset) * 16), 31, 1, __val)
-#define SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(__pdesc, __offset, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + ((__offset) * 16) + 4, 0, 32, __val)
-#define SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(pbd, off, val, dma64) \
- (dma64 ? SET_BITS_TO_LE_4BYTE((pbd) + ((off) * 16) + 8, 0, 32, val) : 0)
-#define GET_TXBUFFER_DESC_ADDR_LOW(__pdesc, __offset) \
- LE_BITS_TO_4BYTE((__pdesc) + ((__offset) * 16) + 4, 0, 32)
-#define GET_TXBUFFER_DESC_ADDR_HIGH(pbd, off, dma64) \
- (dma64 ? LE_BITS_TO_4BYTE((pbd) + ((off) * 16) + 8, 0, 32) : 0)
-
-/* Dword 0 */
-#define SET_TX_BUFF_DESC_LEN_0(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
-#define SET_TX_BUFF_DESC_PSB(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 16, 15, __val)
-#define SET_TX_BUFF_DESC_OWN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
-
-/* Dword 1 */
-#define SET_TX_BUFF_DESC_ADDR_LOW_0(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 4, 0, 32, __val)
-/* Dword 2 */
-#define SET_TX_BUFF_DESC_ADDR_HIGH_0(bdesc, val, dma64) \
- SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(bdesc, 0, val, dma64)
-/* Dword 3 / RESERVED 0 */
-
-/* RX buffer */
-
-/* DWORD 0 */
-#define SET_RX_BUFFER_DESC_DATA_LENGTH(__rx_status_desc, __val) \
- SET_BITS_TO_LE_4BYTE(__rx_status_desc, 0, 14, __val)
-#define SET_RX_BUFFER_DESC_LS(__rx_status_desc, __val) \
- SET_BITS_TO_LE_4BYTE(__rx_status_desc, 15, 1, __val)
-#define SET_RX_BUFFER_DESC_FS(__rx_status_desc, __val) \
- SET_BITS_TO_LE_4BYTE(__rx_status_desc, 16, 1, __val)
-#define SET_RX_BUFFER_DESC_TOTAL_LENGTH(__rx_status_desc, __val) \
- SET_BITS_TO_LE_4BYTE(__rx_status_desc, 16, 15, __val)
-
-#define GET_RX_BUFFER_DESC_OWN(__rx_status_desc) \
- LE_BITS_TO_4BYTE(__rx_status_desc, 31, 1)
-#define GET_RX_BUFFER_DESC_LS(__rx_status_desc) \
- LE_BITS_TO_4BYTE(__rx_status_desc, 15, 1)
-#define GET_RX_BUFFER_DESC_FS(__rx_status_desc) \
- LE_BITS_TO_4BYTE(__rx_status_desc, 16, 1)
-#define GET_RX_BUFFER_DESC_TOTAL_LENGTH(__rx_status_desc) \
- LE_BITS_TO_4BYTE(__rx_status_desc, 16, 15)
-
-/* DWORD 1 */
-#define SET_RX_BUFFER_PHYSICAL_LOW(__rx_status_desc, __val) \
- SET_BITS_TO_LE_4BYTE(__rx_status_desc + 4, 0, 32, __val)
-
-/* DWORD 2 */
-#define SET_RX_BUFFER_PHYSICAL_HIGH(__rx_status_desc, __val, dma64) \
- (dma64 ? SET_BITS_TO_LE_4BYTE((__rx_status_desc) + 8, 0, 32, __val) : 0)
-
-#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
- do { \
- if (_size > TX_DESC_NEXT_DESC_OFFSET) \
- memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
- else \
- memset(__pdesc, 0, _size); \
- } while (0)
-
-void rtl8822be_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc,
- u8 queue_index);
-u16 rtl8822be_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw,
- u8 queue_index);
-u16 rtl8822be_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
-void rtl8822be_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, u8 *tx_bd_desc,
- u8 *desc, u8 queue_index,
- struct sk_buff *skb, dma_addr_t addr);
-
-void rtl8822be_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
- u8 *pdesc_tx, u8 *pbd_desc_tx,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta, struct sk_buff *skb,
- u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
-void rtl8822be_tx_fill_special_desc(struct ieee80211_hw *hw, u8 *pdesc,
- u8 *pbd_desc, struct sk_buff *skb,
- u8 hw_queue);
-bool rtl8822be_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status,
- struct ieee80211_rx_status *rx_status, u8 *pdesc,
- struct sk_buff *skb);
-void rtl8822be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
- u8 desc_name, u8 *val);
-u64 rtl8822be_get_desc(struct ieee80211_hw *hw,
- u8 *pdesc, bool istx, u8 desc_name);
-bool rtl8822be_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue,
- u16 index);
-void rtl8822be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
-void rtl8822be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
- bool firstseg, bool lastseg,
- struct sk_buff *skb);
-u32 rtl8822be_rx_command_packet(struct ieee80211_hw *hw,
- const struct rtl_stats *status,
- struct sk_buff *skb);
-#endif
diff --git a/drivers/staging/rtlwifi/stats.c b/drivers/staging/rtlwifi/stats.c
deleted file mode 100644
index 149b665a0e5c..000000000000
--- a/drivers/staging/rtlwifi/stats.c
+++ /dev/null
@@ -1,249 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#include "wifi.h"
-#include "stats.h"
-#include <linux/export.h>
-
-u8 rtl_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
-u8 rtl_evm_db_to_percentage(s8 value)
-{
- s8 ret_val = clamp(-value, 0, 33) * 3;
-
- if (ret_val == 99)
- ret_val = 100;
-
- return ret_val;
-}
-
-static long rtl_translate_todbm(struct ieee80211_hw *hw,
- u8 signal_strength_index)
-{
- long signal_power;
-
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
- return signal_power;
-}
-
-long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
-
- return retsig;
-}
-
-static void rtl_process_ui_rssi(struct ieee80211_hw *hw,
- struct rtl_stats *pstatus)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 rfpath;
- u32 last_rssi, tmpval;
-
- if (!pstatus->packet_toself && !pstatus->packet_beacon)
- return;
-
- rtlpriv->stats.pwdb_all_cnt += pstatus->rx_pwdb_all;
- rtlpriv->stats.rssi_calculate_cnt++;
-
- if (rtlpriv->stats.ui_rssi.total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
- rtlpriv->stats.ui_rssi.total_num = PHY_RSSI_SLID_WIN_MAX;
- last_rssi = rtlpriv->stats.ui_rssi.elements[
- rtlpriv->stats.ui_rssi.index];
- rtlpriv->stats.ui_rssi.total_val -= last_rssi;
- }
- rtlpriv->stats.ui_rssi.total_val += pstatus->signalstrength;
- rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++] =
- pstatus->signalstrength;
- if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
- rtlpriv->stats.ui_rssi.index = 0;
- tmpval = rtlpriv->stats.ui_rssi.total_val /
- rtlpriv->stats.ui_rssi.total_num;
- rtlpriv->stats.signal_strength = rtl_translate_todbm(hw, (u8)tmpval);
- pstatus->rssi = rtlpriv->stats.signal_strength;
-
- if (pstatus->is_cck)
- return;
-
- for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
- rfpath++) {
- if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- pstatus->rx_mimo_signalstrength[rfpath];
- }
- if (pstatus->rx_mimo_signalstrength[rfpath] >
- rtlpriv->stats.rx_rssi_percentage[rfpath]) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstatus->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
- } else {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstatus->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- }
- rtlpriv->stats.rx_snr_db[rfpath] = pstatus->rx_snr[rfpath];
- rtlpriv->stats.rx_evm_dbm[rfpath] =
- pstatus->rx_mimo_evm_dbm[rfpath];
- rtlpriv->stats.rx_cfo_short[rfpath] =
- pstatus->cfo_short[rfpath];
- rtlpriv->stats.rx_cfo_tail[rfpath] = pstatus->cfo_tail[rfpath];
- }
-}
-
-static void rtl_update_rxsignalstatistics(struct ieee80211_hw *hw,
- struct rtl_stats *pstatus)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int weighting = 0;
-
- if (rtlpriv->stats.recv_signal_power == 0)
- rtlpriv->stats.recv_signal_power = pstatus->recvsignalpower;
- if (pstatus->recvsignalpower > rtlpriv->stats.recv_signal_power)
- weighting = 5;
- else if (pstatus->recvsignalpower < rtlpriv->stats.recv_signal_power)
- weighting = (-5);
- rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
- 5 + pstatus->recvsignalpower + weighting) / 6;
-}
-
-static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_sta_info *drv_priv = NULL;
- struct ieee80211_sta *sta = NULL;
- long undec_sm_pwdb;
-
- rcu_read_lock();
- if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
- sta = rtl_find_sta(hw, pstatus->psaddr);
-
- /* adhoc or ap mode */
- if (sta) {
- drv_priv = (struct rtl_sta_info *)sta->drv_priv;
- undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb;
- } else {
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- }
-
- if (undec_sm_pwdb < 0)
- undec_sm_pwdb = pstatus->rx_pwdb_all;
- if (pstatus->rx_pwdb_all > (u32)undec_sm_pwdb) {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undec_sm_pwdb = undec_sm_pwdb + 1;
- } else {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- }
-
- if (sta)
- drv_priv->rssi_stat.undec_sm_pwdb = undec_sm_pwdb;
- else
- rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
- rcu_read_unlock();
-
- rtl_update_rxsignalstatistics(hw, pstatus);
-}
-
-static void rtl_process_ui_link_quality(struct ieee80211_hw *hw,
- struct rtl_stats *pstatus)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 last_evm, n_stream, tmpval;
-
- if (pstatus->signalquality == 0)
- return;
-
- if (rtlpriv->stats.ui_link_quality.total_num++ >=
- PHY_LINKQUALITY_SLID_WIN_MAX) {
- rtlpriv->stats.ui_link_quality.total_num =
- PHY_LINKQUALITY_SLID_WIN_MAX;
- last_evm = rtlpriv->stats.ui_link_quality.elements[
- rtlpriv->stats.ui_link_quality.index];
- rtlpriv->stats.ui_link_quality.total_val -= last_evm;
- }
- rtlpriv->stats.ui_link_quality.total_val += pstatus->signalquality;
- rtlpriv->stats.ui_link_quality.elements[
- rtlpriv->stats.ui_link_quality.index++] =
- pstatus->signalquality;
- if (rtlpriv->stats.ui_link_quality.index >=
- PHY_LINKQUALITY_SLID_WIN_MAX)
- rtlpriv->stats.ui_link_quality.index = 0;
- tmpval = rtlpriv->stats.ui_link_quality.total_val /
- rtlpriv->stats.ui_link_quality.total_num;
- rtlpriv->stats.signal_quality = tmpval;
- rtlpriv->stats.last_sigstrength_inpercent = tmpval;
- for (n_stream = 0; n_stream < 2; n_stream++) {
- if (pstatus->rx_mimo_sig_qual[n_stream] != -1) {
- if (rtlpriv->stats.rx_evm_percentage[n_stream] == 0) {
- rtlpriv->stats.rx_evm_percentage[n_stream] =
- pstatus->rx_mimo_sig_qual[n_stream];
- }
- rtlpriv->stats.rx_evm_percentage[n_stream] =
- ((rtlpriv->stats.rx_evm_percentage[n_stream]
- * (RX_SMOOTH_FACTOR - 1)) +
- (pstatus->rx_mimo_sig_qual[n_stream] * 1)) /
- (RX_SMOOTH_FACTOR);
- }
- }
-}
-
-void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
- struct rtl_stats *pstatus)
-{
- if (!pstatus->packet_matchbssid)
- return;
-
- rtl_process_ui_rssi(hw, pstatus);
- rtl_process_pwdb(hw, pstatus);
- rtl_process_ui_link_quality(hw, pstatus);
-}
diff --git a/drivers/staging/rtlwifi/stats.h b/drivers/staging/rtlwifi/stats.h
deleted file mode 100644
index aa4f30d40af0..000000000000
--- a/drivers/staging/rtlwifi/stats.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_STATS_H__
-#define __RTL_STATS_H__
-
-#define PHY_RSSI_SLID_WIN_MAX 100
-#define PHY_LINKQUALITY_SLID_WIN_MAX 20
-#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
-
-/* Rx smooth factor */
-#define RX_SMOOTH_FACTOR 20
-
-u8 rtl_query_rxpwrpercentage(s8 antpower);
-u8 rtl_evm_db_to_percentage(s8 value);
-long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
-void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
- struct rtl_stats *pstatus);
-
-#endif
diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h
deleted file mode 100644
index 9cb6c7906213..000000000000
--- a/drivers/staging/rtlwifi/wifi.h
+++ /dev/null
@@ -1,3362 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL_WIFI_H__
-#define __RTL_WIFI_H__
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/sched.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/usb.h>
-#include <net/mac80211.h>
-#include <linux/completion.h>
-#include "debug.h"
-
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-
-#define MASK4BITS 0x0f
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-
-#define MASK4BITS 0x0f
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
-#define RF_CHANGE_BY_INIT 0
-#define RF_CHANGE_BY_IPS BIT(28)
-#define RF_CHANGE_BY_PS BIT(29)
-#define RF_CHANGE_BY_HW BIT(30)
-#define RF_CHANGE_BY_SW BIT(31)
-
-#define IQK_ADDA_REG_NUM 16
-#define IQK_MAC_REG_NUM 4
-#define IQK_THRESHOLD 8
-
-#define MAX_KEY_LEN 61
-#define KEY_BUF_SIZE 5
-
-/* QoS related. */
-/*aci: 0x00 Best Effort*/
-/*aci: 0x01 Background*/
-/*aci: 0x10 Video*/
-/*aci: 0x11 Voice*/
-/*Max: define total number.*/
-#define AC0_BE 0
-#define AC1_BK 1
-#define AC2_VI 2
-#define AC3_VO 3
-#define AC_MAX 4
-#define QOS_QUEUE_NUM 4
-#define RTL_MAC80211_NUM_QUEUE 5
-#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
-#define RTL_USB_MAX_RX_COUNT 100
-#define QBSS_LOAD_SIZE 5
-#define MAX_WMMELE_LENGTH 64
-#define ASPM_L1_LATENCY 7
-
-#define TOTAL_CAM_ENTRY 32
-
-/*slot time for 11g. */
-#define RTL_SLOT_TIME_9 9
-#define RTL_SLOT_TIME_20 20
-
-/*related to tcp/ip. */
-#define SNAP_SIZE 6
-#define PROTOC_TYPE_SIZE 2
-
-/*related with 802.11 frame*/
-#define MAC80211_3ADDR_LEN 24
-#define MAC80211_4ADDR_LEN 30
-
-#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */
-#define CHANNEL_MAX_NUMBER_2G 14
-#define CHANNEL_MAX_NUMBER_5G 49 /* Please refer to
- *"phy_GetChnlGroup8812A" and
- * "Hal_ReadTxPowerInfo8812A"
- */
-#define CHANNEL_MAX_NUMBER_5G_80M 7
-#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */
-#define MAX_PG_GROUP 13
-#define CHANNEL_GROUP_MAX_2G 3
-#define CHANNEL_GROUP_IDX_5GL 3
-#define CHANNEL_GROUP_IDX_5GM 6
-#define CHANNEL_GROUP_IDX_5GH 9
-#define CHANNEL_GROUP_MAX_5G 9
-#define CHANNEL_MAX_NUMBER_2G 14
-#define AVG_THERMAL_NUM 8
-#define AVG_THERMAL_NUM_88E 4
-#define AVG_THERMAL_NUM_8723BE 4
-#define MAX_TID_COUNT 9
-
-/* for early mode */
-#define FCS_LEN 4
-#define EM_HDR_LEN 8
-
-enum rtl8192c_h2c_cmd {
- H2C_AP_OFFLOAD = 0,
- H2C_SETPWRMODE = 1,
- H2C_JOINBSSRPT = 2,
- H2C_RSVDPAGE = 3,
- H2C_RSSI_REPORT = 5,
- H2C_RA_MASK = 6,
- H2C_MACID_PS_MODE = 7,
- H2C_P2P_PS_OFFLOAD = 8,
- H2C_MAC_MODE_SEL = 9,
- H2C_PWRM = 15,
- H2C_P2P_PS_CTW_CMD = 24,
- MAX_H2CCMD
-};
-
-#define MAX_TX_COUNT 4
-#define MAX_REGULATION_NUM 4
-#define MAX_RF_PATH_NUM 4
-#define MAX_RATE_SECTION_NUM 6 /* = MAX_RATE_SECTION */
-#define MAX_2_4G_BANDWIDTH_NUM 4
-#define MAX_5G_BANDWIDTH_NUM 4
-#define MAX_RF_PATH 4
-#define MAX_CHNL_GROUP_24G 6
-#define MAX_CHNL_GROUP_5G 14
-
-#define TX_PWR_BY_RATE_NUM_BAND 2
-#define TX_PWR_BY_RATE_NUM_RF 4
-#define TX_PWR_BY_RATE_NUM_SECTION 12
-/* compatible with TX_PWR_BY_RATE_NUM_SECTION */
-#define TX_PWR_BY_RATE_NUM_RATE 84
-#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6 /* MAX_RATE_SECTION */
-#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5 /* MAX_RATE_SECTION -1 */
-
-#define BUFDESC_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
-
-#define DEL_SW_IDX_SZ 30
-
-/* For now, it's just for 8192ee
- * but not OK yet, keep it 0
- */
-#define RTL8192EE_SEG_NUM BUFDESC_SEG_NUM
-#define RTL8822BE_SEG_NUM BUFDESC_SEG_NUM
-
-enum rf_tx_num {
- RF_1TX = 0,
- RF_2TX,
- RF_MAX_TX_NUM,
- RF_TX_NUM_NONIMPLEMENT,
-};
-
-#define PACKET_NORMAL 0
-#define PACKET_DHCP 1
-#define PACKET_ARP 2
-#define PACKET_EAPOL 3
-
-#define MAX_SUPPORT_WOL_PATTERN_NUM 16
-#define RSVD_WOL_PATTERN_NUM 1
-#define WKFMCAM_ADDR_NUM 6
-#define WKFMCAM_SIZE 24
-
-#define MAX_WOL_BIT_MASK_SIZE 16
-/* MIN LEN keeps 13 here */
-#define MIN_WOL_PATTERN_SIZE 13
-#define MAX_WOL_PATTERN_SIZE 128
-
-#define WAKE_ON_MAGIC_PACKET BIT(0)
-#define WAKE_ON_PATTERN_MATCH BIT(1)
-
-#define WOL_REASON_PTK_UPDATE BIT(0)
-#define WOL_REASON_GTK_UPDATE BIT(1)
-#define WOL_REASON_DISASSOC BIT(2)
-#define WOL_REASON_DEAUTH BIT(3)
-#define WOL_REASON_AP_LOST BIT(4)
-#define WOL_REASON_MAGIC_PKT BIT(5)
-#define WOL_REASON_UNICAST_PKT BIT(6)
-#define WOL_REASON_PATTERN_PKT BIT(7)
-#define WOL_REASON_RTD3_SSID_MATCH BIT(8)
-#define WOL_REASON_REALWOW_V2_WAKEUPPKT BIT(9)
-#define WOL_REASON_REALWOW_V2_ACKLOST BIT(10)
-
-struct rtlwifi_firmware_header {
- __le16 signature;
- u8 category;
- u8 function;
- __le16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- __le16 ramcodesize;
- __le16 rsvd2;
- __le32 svnindex;
- __le32 rsvd3;
- __le32 rsvd4;
- __le32 rsvd5;
-};
-
-struct txpower_info_2g {
- u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
- u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
- /*If only one tx, only BW20 and OFDM are used.*/
- u8 cck_diff[MAX_RF_PATH][MAX_TX_COUNT];
- u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
- u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
- u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
- u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
- u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
-};
-
-struct txpower_info_5g {
- u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_5G];
- /*If only one tx, only BW20, OFDM, BW80 and BW160 are used.*/
- u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
- u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
- u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
- u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
- u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
-};
-
-enum rate_section {
- CCK = 0,
- OFDM,
- HT_MCS0_MCS7,
- HT_MCS8_MCS15,
- VHT_1SSMCS0_1SSMCS9,
- VHT_2SSMCS0_2SSMCS9,
- MAX_RATE_SECTION,
-};
-
-enum intf_type {
- INTF_PCI = 0,
- INTF_USB = 1,
-};
-
-enum radio_path {
- RF90_PATH_A = 0,
- RF90_PATH_B = 1,
- RF90_PATH_C = 2,
- RF90_PATH_D = 3,
-};
-
-enum radio_mask {
- RF_MASK_A = BIT(0),
- RF_MASK_B = BIT(1),
- RF_MASK_C = BIT(2),
- RF_MASK_D = BIT(3),
-};
-
-enum regulation_txpwr_lmt {
- TXPWR_LMT_FCC = 0,
- TXPWR_LMT_MKK = 1,
- TXPWR_LMT_ETSI = 2,
- TXPWR_LMT_WW = 3,
-
- TXPWR_LMT_MAX_REGULATION_NUM = 4
-};
-
-enum rt_eeprom_type {
- EEPROM_93C46,
- EEPROM_93C56,
- EEPROM_BOOT_EFUSE,
-};
-
-enum ttl_status {
- RTL_STATUS_INTERFACE_START = 0,
-};
-
-enum hardware_type {
- HARDWARE_TYPE_RTL8192E,
- HARDWARE_TYPE_RTL8192U,
- HARDWARE_TYPE_RTL8192SE,
- HARDWARE_TYPE_RTL8192SU,
- HARDWARE_TYPE_RTL8192CE,
- HARDWARE_TYPE_RTL8192CU,
- HARDWARE_TYPE_RTL8192DE,
- HARDWARE_TYPE_RTL8192DU,
- HARDWARE_TYPE_RTL8723AE,
- HARDWARE_TYPE_RTL8723U,
- HARDWARE_TYPE_RTL8188EE,
- HARDWARE_TYPE_RTL8723BE,
- HARDWARE_TYPE_RTL8192EE,
- HARDWARE_TYPE_RTL8821AE,
- HARDWARE_TYPE_RTL8812AE,
- HARDWARE_TYPE_RTL8822BE,
-
- /* keep it last */
- HARDWARE_TYPE_NUM
-};
-
-#define RTL_HW_TYPE(rtlpriv) (rtl_hal((struct rtl_priv *)rtlpriv)->hw_type)
-#define IS_NEW_GENERATION_IC(rtlpriv) \
- (RTL_HW_TYPE(rtlpriv) >= HARDWARE_TYPE_RTL8192EE)
-#define IS_HARDWARE_TYPE_8192CE(rtlpriv) \
- (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8192CE)
-#define IS_HARDWARE_TYPE_8812(rtlpriv) \
- (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8812AE)
-#define IS_HARDWARE_TYPE_8821(rtlpriv) \
- (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8821AE)
-#define IS_HARDWARE_TYPE_8723A(rtlpriv) \
- (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8723AE)
-#define IS_HARDWARE_TYPE_8723B(rtlpriv) \
- (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8723BE)
-#define IS_HARDWARE_TYPE_8192E(rtlpriv) \
- (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8192EE)
-#define IS_HARDWARE_TYPE_8822B(rtlpriv) \
- (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8822BE)
-
-#define RX_HAL_IS_CCK_RATE(rxmcs) \
- ((rxmcs) == DESC_RATE1M || \
- (rxmcs) == DESC_RATE2M || \
- (rxmcs) == DESC_RATE5_5M || \
- (rxmcs) == DESC_RATE11M)
-
-enum scan_operation_backup_opt {
- SCAN_OPT_BACKUP = 0,
- SCAN_OPT_BACKUP_BAND0 = 0,
- SCAN_OPT_BACKUP_BAND1,
- SCAN_OPT_RESTORE,
- SCAN_OPT_MAX
-};
-
-/*RF state.*/
-enum rf_pwrstate {
- ERFON,
- ERFSLEEP,
- ERFOFF
-};
-
-struct bb_reg_def {
- u32 rfintfs;
- u32 rfintfi;
- u32 rfintfo;
- u32 rfintfe;
- u32 rf3wire_offset;
- u32 rflssi_select;
- u32 rftxgain_stage;
- u32 rfhssi_para1;
- u32 rfhssi_para2;
- u32 rfsw_ctrl;
- u32 rfagc_control1;
- u32 rfagc_control2;
- u32 rfrxiq_imbal;
- u32 rfrx_afe;
- u32 rftxiq_imbal;
- u32 rftx_afe;
- u32 rf_rb; /* rflssi_readback */
- u32 rf_rbpi; /* rflssi_readbackpi */
-};
-
-enum io_type {
- IO_CMD_PAUSE_DM_BY_SCAN = 0,
- IO_CMD_PAUSE_BAND0_DM_BY_SCAN = 0,
- IO_CMD_PAUSE_BAND1_DM_BY_SCAN = 1,
- IO_CMD_RESUME_DM_BY_SCAN = 2,
-};
-
-enum hw_variables {
- HW_VAR_ETHER_ADDR = 0x0,
- HW_VAR_MULTICAST_REG = 0x1,
- HW_VAR_BASIC_RATE = 0x2,
- HW_VAR_BSSID = 0x3,
- HW_VAR_MEDIA_STATUS = 0x4,
- HW_VAR_SECURITY_CONF = 0x5,
- HW_VAR_BEACON_INTERVAL = 0x6,
- HW_VAR_ATIM_WINDOW = 0x7,
- HW_VAR_LISTEN_INTERVAL = 0x8,
- HW_VAR_CS_COUNTER = 0x9,
- HW_VAR_DEFAULTKEY0 = 0xa,
- HW_VAR_DEFAULTKEY1 = 0xb,
- HW_VAR_DEFAULTKEY2 = 0xc,
- HW_VAR_DEFAULTKEY3 = 0xd,
- HW_VAR_SIFS = 0xe,
- HW_VAR_R2T_SIFS = 0xf,
- HW_VAR_DIFS = 0x10,
- HW_VAR_EIFS = 0x11,
- HW_VAR_SLOT_TIME = 0x12,
- HW_VAR_ACK_PREAMBLE = 0x13,
- HW_VAR_CW_CONFIG = 0x14,
- HW_VAR_CW_VALUES = 0x15,
- HW_VAR_RATE_FALLBACK_CONTROL = 0x16,
- HW_VAR_CONTENTION_WINDOW = 0x17,
- HW_VAR_RETRY_COUNT = 0x18,
- HW_VAR_TR_SWITCH = 0x19,
- HW_VAR_COMMAND = 0x1a,
- HW_VAR_WPA_CONFIG = 0x1b,
- HW_VAR_AMPDU_MIN_SPACE = 0x1c,
- HW_VAR_SHORTGI_DENSITY = 0x1d,
- HW_VAR_AMPDU_FACTOR = 0x1e,
- HW_VAR_MCS_RATE_AVAILABLE = 0x1f,
- HW_VAR_AC_PARAM = 0x20,
- HW_VAR_ACM_CTRL = 0x21,
- HW_VAR_DIS_REQ_QSIZE = 0x22,
- HW_VAR_CCX_CHNL_LOAD = 0x23,
- HW_VAR_CCX_NOISE_HISTOGRAM = 0x24,
- HW_VAR_CCX_CLM_NHM = 0x25,
- HW_VAR_TXOPLIMIT = 0x26,
- HW_VAR_TURBO_MODE = 0x27,
- HW_VAR_RF_STATE = 0x28,
- HW_VAR_RF_OFF_BY_HW = 0x29,
- HW_VAR_BUS_SPEED = 0x2a,
- HW_VAR_SET_DEV_POWER = 0x2b,
-
- HW_VAR_RCR = 0x2c,
- HW_VAR_RATR_0 = 0x2d,
- HW_VAR_RRSR = 0x2e,
- HW_VAR_CPU_RST = 0x2f,
- HW_VAR_CHECK_BSSID = 0x30,
- HW_VAR_LBK_MODE = 0x31,
- HW_VAR_AES_11N_FIX = 0x32,
- HW_VAR_USB_RX_AGGR = 0x33,
- HW_VAR_USER_CONTROL_TURBO_MODE = 0x34,
- HW_VAR_RETRY_LIMIT = 0x35,
- HW_VAR_INIT_TX_RATE = 0x36,
- HW_VAR_TX_RATE_REG = 0x37,
- HW_VAR_EFUSE_USAGE = 0x38,
- HW_VAR_EFUSE_BYTES = 0x39,
- HW_VAR_AUTOLOAD_STATUS = 0x3a,
- HW_VAR_RF_2R_DISABLE = 0x3b,
- HW_VAR_SET_RPWM = 0x3c,
- HW_VAR_H2C_FW_PWRMODE = 0x3d,
- HW_VAR_H2C_FW_JOINBSSRPT = 0x3e,
- HW_VAR_H2C_FW_MEDIASTATUSRPT = 0x3f,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD = 0x40,
- HW_VAR_FW_PSMODE_STATUS = 0x41,
- HW_VAR_INIT_RTS_RATE = 0x42,
- HW_VAR_RESUME_CLK_ON = 0x43,
- HW_VAR_FW_LPS_ACTION = 0x44,
- HW_VAR_1X1_RECV_COMBINE = 0x45,
- HW_VAR_STOP_SEND_BEACON = 0x46,
- HW_VAR_TSF_TIMER = 0x47,
- HW_VAR_IO_CMD = 0x48,
-
- HW_VAR_RF_RECOVERY = 0x49,
- HW_VAR_H2C_FW_UPDATE_GTK = 0x4a,
- HW_VAR_WF_MASK = 0x4b,
- HW_VAR_WF_CRC = 0x4c,
- HW_VAR_WF_IS_MAC_ADDR = 0x4d,
- HW_VAR_H2C_FW_OFFLOAD = 0x4e,
- HW_VAR_RESET_WFCRC = 0x4f,
-
- HW_VAR_HANDLE_FW_C2H = 0x50,
- HW_VAR_DL_FW_RSVD_PAGE = 0x51,
- HW_VAR_AID = 0x52,
- HW_VAR_HW_SEQ_ENABLE = 0x53,
- HW_VAR_CORRECT_TSF = 0x54,
- HW_VAR_BCN_VALID = 0x55,
- HW_VAR_FWLPS_RF_ON = 0x56,
- HW_VAR_DUAL_TSF_RST = 0x57,
- HW_VAR_SWITCH_EPHY_WOWLAN = 0x58,
- HW_VAR_INT_MIGRATION = 0x59,
- HW_VAR_INT_AC = 0x5a,
- HW_VAR_RF_TIMING = 0x5b,
-
- HAL_DEF_WOWLAN = 0x5c,
- HW_VAR_MRC = 0x5d,
- HW_VAR_KEEP_ALIVE = 0x5e,
- HW_VAR_NAV_UPPER = 0x5f,
-
- HW_VAR_MGT_FILTER = 0x60,
- HW_VAR_CTRL_FILTER = 0x61,
- HW_VAR_DATA_FILTER = 0x62,
-};
-
-enum rt_media_status {
- RT_MEDIA_DISCONNECT = 0,
- RT_MEDIA_CONNECT = 1
-};
-
-enum rt_oem_id {
- RT_CID_DEFAULT = 0,
- RT_CID_8187_ALPHA0 = 1,
- RT_CID_8187_SERCOMM_PS = 2,
- RT_CID_8187_HW_LED = 3,
- RT_CID_8187_NETGEAR = 4,
- RT_CID_WHQL = 5,
- RT_CID_819X_CAMEO = 6,
- RT_CID_819X_RUNTOP = 7,
- RT_CID_819X_SENAO = 8,
- RT_CID_TOSHIBA = 9,
- RT_CID_819X_NETCORE = 10,
- RT_CID_NETTRONIX = 11,
- RT_CID_DLINK = 12,
- RT_CID_PRONET = 13,
- RT_CID_COREGA = 14,
- RT_CID_819X_ALPHA = 15,
- RT_CID_819X_SITECOM = 16,
- RT_CID_CCX = 17,
- RT_CID_819X_LENOVO = 18,
- RT_CID_819X_QMI = 19,
- RT_CID_819X_EDIMAX_BELKIN = 20,
- RT_CID_819X_SERCOMM_BELKIN = 21,
- RT_CID_819X_CAMEO1 = 22,
- RT_CID_819X_MSI = 23,
- RT_CID_819X_ACER = 24,
- RT_CID_819X_HP = 27,
- RT_CID_819X_CLEVO = 28,
- RT_CID_819X_ARCADYAN_BELKIN = 29,
- RT_CID_819X_SAMSUNG = 30,
- RT_CID_819X_WNC_COREGA = 31,
- RT_CID_819X_FOXCOON = 32,
- RT_CID_819X_DELL = 33,
- RT_CID_819X_PRONETS = 34,
- RT_CID_819X_EDIMAX_ASUS = 35,
- RT_CID_NETGEAR = 36,
- RT_CID_PLANEX = 37,
- RT_CID_CC_C = 38,
-};
-
-enum hw_descs {
- HW_DESC_OWN,
- HW_DESC_RXOWN,
- HW_DESC_TX_NEXTDESC_ADDR,
- HW_DESC_TXBUFF_ADDR,
- HW_DESC_RXBUFF_ADDR,
- HW_DESC_RXPKT_LEN,
- HW_DESC_RXERO,
- HW_DESC_RX_PREPARE,
-};
-
-enum prime_sc {
- PRIME_CHNL_OFFSET_DONT_CARE = 0,
- PRIME_CHNL_OFFSET_LOWER = 1,
- PRIME_CHNL_OFFSET_UPPER = 2,
-};
-
-enum rf_type {
- RF_1T1R = 0,
- RF_1T2R = 1,
- RF_2T2R = 2,
- RF_2T2R_GREEN = 3,
- RF_2T3R = 4,
- RF_2T4R = 5,
- RF_3T3R = 6,
- RF_3T4R = 7,
- RF_4T4R = 8,
-};
-
-enum ht_channel_width {
- HT_CHANNEL_WIDTH_20 = 0,
- HT_CHANNEL_WIDTH_20_40 = 1,
- HT_CHANNEL_WIDTH_80 = 2,
- HT_CHANNEL_WIDTH_MAX,
-};
-
-/* Ref: 802.11i spec D10.0 7.3.2.25.1
- * Cipher Suites Encryption Algorithms
- */
-enum rt_enc_alg {
- NO_ENCRYPTION = 0,
- WEP40_ENCRYPTION = 1,
- TKIP_ENCRYPTION = 2,
- RSERVED_ENCRYPTION = 3,
- AESCCMP_ENCRYPTION = 4,
- WEP104_ENCRYPTION = 5,
- AESCMAC_ENCRYPTION = 6, /*IEEE802.11w */
-};
-
-enum rtl_hal_state {
- _HAL_STATE_STOP = 0,
- _HAL_STATE_START = 1,
-};
-
-enum rtl_desc_rate {
- DESC_RATE1M = 0x00,
- DESC_RATE2M = 0x01,
- DESC_RATE5_5M = 0x02,
- DESC_RATE11M = 0x03,
-
- DESC_RATE6M = 0x04,
- DESC_RATE9M = 0x05,
- DESC_RATE12M = 0x06,
- DESC_RATE18M = 0x07,
- DESC_RATE24M = 0x08,
- DESC_RATE36M = 0x09,
- DESC_RATE48M = 0x0a,
- DESC_RATE54M = 0x0b,
-
- DESC_RATEMCS0 = 0x0c,
- DESC_RATEMCS1 = 0x0d,
- DESC_RATEMCS2 = 0x0e,
- DESC_RATEMCS3 = 0x0f,
- DESC_RATEMCS4 = 0x10,
- DESC_RATEMCS5 = 0x11,
- DESC_RATEMCS6 = 0x12,
- DESC_RATEMCS7 = 0x13,
- DESC_RATEMCS8 = 0x14,
- DESC_RATEMCS9 = 0x15,
- DESC_RATEMCS10 = 0x16,
- DESC_RATEMCS11 = 0x17,
- DESC_RATEMCS12 = 0x18,
- DESC_RATEMCS13 = 0x19,
- DESC_RATEMCS14 = 0x1a,
- DESC_RATEMCS15 = 0x1b,
- DESC_RATEMCS15_SG = 0x1c,
- DESC_RATEMCS32 = 0x20,
-
- DESC_RATEVHT1SS_MCS0 = 0x2c,
- DESC_RATEVHT1SS_MCS1 = 0x2d,
- DESC_RATEVHT1SS_MCS2 = 0x2e,
- DESC_RATEVHT1SS_MCS3 = 0x2f,
- DESC_RATEVHT1SS_MCS4 = 0x30,
- DESC_RATEVHT1SS_MCS5 = 0x31,
- DESC_RATEVHT1SS_MCS6 = 0x32,
- DESC_RATEVHT1SS_MCS7 = 0x33,
- DESC_RATEVHT1SS_MCS8 = 0x34,
- DESC_RATEVHT1SS_MCS9 = 0x35,
- DESC_RATEVHT2SS_MCS0 = 0x36,
- DESC_RATEVHT2SS_MCS1 = 0x37,
- DESC_RATEVHT2SS_MCS2 = 0x38,
- DESC_RATEVHT2SS_MCS3 = 0x39,
- DESC_RATEVHT2SS_MCS4 = 0x3a,
- DESC_RATEVHT2SS_MCS5 = 0x3b,
- DESC_RATEVHT2SS_MCS6 = 0x3c,
- DESC_RATEVHT2SS_MCS7 = 0x3d,
- DESC_RATEVHT2SS_MCS8 = 0x3e,
- DESC_RATEVHT2SS_MCS9 = 0x3f,
-};
-
-enum rtl_var_map {
- /*reg map */
- SYS_ISO_CTRL = 0,
- SYS_FUNC_EN,
- SYS_CLK,
- MAC_RCR_AM,
- MAC_RCR_AB,
- MAC_RCR_ACRC32,
- MAC_RCR_ACF,
- MAC_RCR_AAP,
- MAC_HIMR,
- MAC_HIMRE,
- MAC_HSISR,
-
- /*efuse map */
- EFUSE_TEST,
- EFUSE_CTRL,
- EFUSE_CLK,
- EFUSE_CLK_CTRL,
- EFUSE_PWC_EV12V,
- EFUSE_FEN_ELDR,
- EFUSE_LOADER_CLK_EN,
- EFUSE_ANA8M,
- EFUSE_HWSET_MAX_SIZE,
- EFUSE_MAX_SECTION_MAP,
- EFUSE_REAL_CONTENT_SIZE,
- EFUSE_OOB_PROTECT_BYTES_LEN,
- EFUSE_ACCESS,
-
- /*CAM map */
- RWCAM,
- WCAMI,
- RCAMO,
- CAMDBG,
- SECR,
- SEC_CAM_NONE,
- SEC_CAM_WEP40,
- SEC_CAM_TKIP,
- SEC_CAM_AES,
- SEC_CAM_WEP104,
-
- /*IMR map */
- RTL_IMR_BCNDMAINT6, /*Beacon DMA Interrupt 6 */
- RTL_IMR_BCNDMAINT5, /*Beacon DMA Interrupt 5 */
- RTL_IMR_BCNDMAINT4, /*Beacon DMA Interrupt 4 */
- RTL_IMR_BCNDMAINT3, /*Beacon DMA Interrupt 3 */
- RTL_IMR_BCNDMAINT2, /*Beacon DMA Interrupt 2 */
- RTL_IMR_BCNDMAINT1, /*Beacon DMA Interrupt 1 */
- RTL_IMR_BCNDOK8, /*Beacon Queue DMA OK Interrupt 8 */
- RTL_IMR_BCNDOK7, /*Beacon Queue DMA OK Interrupt 7 */
- RTL_IMR_BCNDOK6, /*Beacon Queue DMA OK Interrupt 6 */
- RTL_IMR_BCNDOK5, /*Beacon Queue DMA OK Interrupt 5 */
- RTL_IMR_BCNDOK4, /*Beacon Queue DMA OK Interrupt 4 */
- RTL_IMR_BCNDOK3, /*Beacon Queue DMA OK Interrupt 3 */
- RTL_IMR_BCNDOK2, /*Beacon Queue DMA OK Interrupt 2 */
- RTL_IMR_BCNDOK1, /*Beacon Queue DMA OK Interrupt 1 */
- RTL_IMR_TIMEOUT2, /*Timeout interrupt 2 */
- RTL_IMR_TIMEOUT1, /*Timeout interrupt 1 */
- RTL_IMR_TXFOVW, /*Transmit FIFO Overflow */
- RTL_IMR_PSTIMEOUT, /*Power save time out interrupt */
- RTL_IMR_BCNINT, /*Beacon DMA Interrupt 0 */
- RTL_IMR_RXFOVW, /*Receive FIFO Overflow */
- RTL_IMR_RDU, /*Receive Descriptor Unavailable */
- RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
- RTL_IMR_H2CDOK, /*H2C Queue DMA OK Interrupt */
- RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrupt */
- RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
- RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/
- RTL_IMR_TBDOK, /*Transmit Beacon OK interrupt */
- RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */
- RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */
- RTL_IMR_BKDOK, /*AC_BK DMA OK Interrupt */
- RTL_IMR_BEDOK, /*AC_BE DMA OK Interrupt */
- RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */
- RTL_IMR_VODOK, /*AC_VO DMA Interrupt */
- RTL_IMR_ROK, /*Receive DMA OK Interrupt */
- RTL_IMR_HSISR_IND, /*HSISR Interrupt*/
- RTL_IBSS_INT_MASKS, /*(RTL_IMR_BCNINT | RTL_IMR_TBDOK |
- * RTL_IMR_TBDER)
- */
- RTL_IMR_C2HCMD, /*fw interrupt*/
-
- /*CCK Rates, TxHT = 0 */
- RTL_RC_CCK_RATE1M,
- RTL_RC_CCK_RATE2M,
- RTL_RC_CCK_RATE5_5M,
- RTL_RC_CCK_RATE11M,
-
- /*OFDM Rates, TxHT = 0 */
- RTL_RC_OFDM_RATE6M,
- RTL_RC_OFDM_RATE9M,
- RTL_RC_OFDM_RATE12M,
- RTL_RC_OFDM_RATE18M,
- RTL_RC_OFDM_RATE24M,
- RTL_RC_OFDM_RATE36M,
- RTL_RC_OFDM_RATE48M,
- RTL_RC_OFDM_RATE54M,
-
- RTL_RC_HT_RATEMCS7,
- RTL_RC_HT_RATEMCS15,
-
- RTL_RC_VHT_RATE_1SS_MCS7,
- RTL_RC_VHT_RATE_1SS_MCS8,
- RTL_RC_VHT_RATE_1SS_MCS9,
- RTL_RC_VHT_RATE_2SS_MCS7,
- RTL_RC_VHT_RATE_2SS_MCS8,
- RTL_RC_VHT_RATE_2SS_MCS9,
-
- /*keep it last */
- RTL_VAR_MAP_MAX,
-};
-
-/*Firmware PS mode for control LPS.*/
-enum _fw_ps_mode {
- FW_PS_ACTIVE_MODE = 0,
- FW_PS_MIN_MODE = 1,
- FW_PS_MAX_MODE = 2,
- FW_PS_DTIM_MODE = 3,
- FW_PS_VOIP_MODE = 4,
- FW_PS_UAPSD_WMM_MODE = 5,
- FW_PS_UAPSD_MODE = 6,
- FW_PS_IBSS_MODE = 7,
- FW_PS_WWLAN_MODE = 8,
- FW_PS_PM_RADIO_OFF = 9,
- FW_PS_PM_CARD_DISABLE = 10,
-};
-
-enum rt_psmode {
- EACTIVE, /*Active/Continuous access. */
- EMAXPS, /*Max power save mode. */
- EFASTPS, /*Fast power save mode. */
- EAUTOPS, /*Auto power save mode. */
-};
-
-/*LED related.*/
-enum led_ctl_mode {
- LED_CTL_POWER_ON = 1,
- LED_CTL_LINK = 2,
- LED_CTL_NO_LINK = 3,
- LED_CTL_TX = 4,
- LED_CTL_RX = 5,
- LED_CTL_SITE_SURVEY = 6,
- LED_CTL_POWER_OFF = 7,
- LED_CTL_START_TO_LINK = 8,
- LED_CTL_START_WPS = 9,
- LED_CTL_STOP_WPS = 10,
-};
-
-enum rtl_led_pin {
- LED_PIN_GPIO0,
- LED_PIN_LED0,
- LED_PIN_LED1,
- LED_PIN_LED2
-};
-
-/* QoS related.*/
-/* acm implementation method.*/
-enum acm_method {
- EACMWAY0_SWANDHW = 0,
- EACMWAY1_HW = 1,
- EACMWAY2_SW = 2,
-};
-
-enum macphy_mode {
- SINGLEMAC_SINGLEPHY = 0,
- DUALMAC_DUALPHY,
- DUALMAC_SINGLEPHY,
-};
-
-enum band_type {
- BAND_ON_2_4G = 0,
- BAND_ON_5G,
- BAND_ON_BOTH,
- BANDMAX
-};
-
-/* aci/aifsn Field.
- * Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
- */
-union aci_aifsn {
- u8 char_data;
-
- struct {
- u8 aifsn:4;
- u8 acm:1;
- u8 aci:2;
- u8 reserved:1;
- } f; /* Field */
-};
-
-/*mlme related.*/
-enum wireless_mode {
- WIRELESS_MODE_UNKNOWN = 0x00,
- WIRELESS_MODE_A = 0x01,
- WIRELESS_MODE_B = 0x02,
- WIRELESS_MODE_G = 0x04,
- WIRELESS_MODE_AUTO = 0x08,
- WIRELESS_MODE_N_24G = 0x10,
- WIRELESS_MODE_N_5G = 0x20,
- WIRELESS_MODE_AC_5G = 0x40,
- WIRELESS_MODE_AC_24G = 0x80,
- WIRELESS_MODE_AC_ONLY = 0x100,
- WIRELESS_MODE_MAX = 0x800
-};
-
-#define IS_WIRELESS_MODE_A(wirelessmode) \
- (wirelessmode == WIRELESS_MODE_A)
-#define IS_WIRELESS_MODE_B(wirelessmode) \
- (wirelessmode == WIRELESS_MODE_B)
-#define IS_WIRELESS_MODE_G(wirelessmode) \
- (wirelessmode == WIRELESS_MODE_G)
-#define IS_WIRELESS_MODE_N_24G(wirelessmode) \
- (wirelessmode == WIRELESS_MODE_N_24G)
-#define IS_WIRELESS_MODE_N_5G(wirelessmode) \
- (wirelessmode == WIRELESS_MODE_N_5G)
-
-enum ratr_table_mode {
- RATR_INX_WIRELESS_NGB = 0,
- RATR_INX_WIRELESS_NG = 1,
- RATR_INX_WIRELESS_NB = 2,
- RATR_INX_WIRELESS_N = 3,
- RATR_INX_WIRELESS_GB = 4,
- RATR_INX_WIRELESS_G = 5,
- RATR_INX_WIRELESS_B = 6,
- RATR_INX_WIRELESS_MC = 7,
- RATR_INX_WIRELESS_A = 8,
- RATR_INX_WIRELESS_AC_5N = 8,
- RATR_INX_WIRELESS_AC_24N = 9,
-};
-
-enum ratr_table_mode_new {
- RATEID_IDX_BGN_40M_2SS = 0,
- RATEID_IDX_BGN_40M_1SS = 1,
- RATEID_IDX_BGN_20M_2SS_BN = 2,
- RATEID_IDX_BGN_20M_1SS_BN = 3,
- RATEID_IDX_GN_N2SS = 4,
- RATEID_IDX_GN_N1SS = 5,
- RATEID_IDX_BG = 6,
- RATEID_IDX_G = 7,
- RATEID_IDX_B = 8,
- RATEID_IDX_VHT_2SS = 9,
- RATEID_IDX_VHT_1SS = 10,
- RATEID_IDX_MIX1 = 11,
- RATEID_IDX_MIX2 = 12,
- RATEID_IDX_VHT_3SS = 13,
- RATEID_IDX_BGN_3SS = 14,
-};
-
-enum rtl_link_state {
- MAC80211_NOLINK = 0,
- MAC80211_LINKING = 1,
- MAC80211_LINKED = 2,
- MAC80211_LINKED_SCANNING = 3,
-};
-
-enum act_category {
- ACT_CAT_QOS = 1,
- ACT_CAT_DLS = 2,
- ACT_CAT_BA = 3,
- ACT_CAT_HT = 7,
- ACT_CAT_WMM = 17,
-};
-
-enum ba_action {
- ACT_ADDBAREQ = 0,
- ACT_ADDBARSP = 1,
- ACT_DELBA = 2,
-};
-
-enum rt_polarity_ctl {
- RT_POLARITY_LOW_ACT = 0,
- RT_POLARITY_HIGH_ACT = 1,
-};
-
-/* After 8188E, we use V2 reason define. 88C/8723A use V1 reason. */
-enum fw_wow_reason_v2 {
- FW_WOW_V2_PTK_UPDATE_EVENT = 0x01,
- FW_WOW_V2_GTK_UPDATE_EVENT = 0x02,
- FW_WOW_V2_DISASSOC_EVENT = 0x04,
- FW_WOW_V2_DEAUTH_EVENT = 0x08,
- FW_WOW_V2_FW_DISCONNECT_EVENT = 0x10,
- FW_WOW_V2_MAGIC_PKT_EVENT = 0x21,
- FW_WOW_V2_UNICAST_PKT_EVENT = 0x22,
- FW_WOW_V2_PATTERN_PKT_EVENT = 0x23,
- FW_WOW_V2_RTD3_SSID_MATCH_EVENT = 0x24,
- FW_WOW_V2_REALWOW_V2_WAKEUPPKT = 0x30,
- FW_WOW_V2_REALWOW_V2_ACKLOST = 0x31,
- FW_WOW_V2_REASON_MAX = 0xff,
-};
-
-enum wolpattern_type {
- UNICAST_PATTERN = 0,
- MULTICAST_PATTERN = 1,
- BROADCAST_PATTERN = 2,
- DONT_CARE_DA = 3,
- UNKNOWN_TYPE = 4,
-};
-
-enum package_type {
- PACKAGE_DEFAULT,
- PACKAGE_QFN68,
- PACKAGE_TFBGA90,
- PACKAGE_TFBGA80,
- PACKAGE_TFBGA79
-};
-
-enum rtl_spec_ver {
- RTL_SPEC_NEW_RATEID = BIT(0), /* use ratr_table_mode_new */
- RTL_SPEC_SUPPORT_VHT = BIT(1), /* support VHT */
- RTL_SPEC_NEW_FW_C2H = BIT(2), /* new FW C2H (e.g. TX REPORT) */
-};
-
-struct octet_string {
- u8 *octet;
- u16 length;
-};
-
-struct rtl_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[0];
-} __packed;
-
-struct rtl_info_element {
- u8 id;
- u8 len;
- u8 data[0];
-} __packed;
-
-struct rtl_probe_rsp {
- struct rtl_hdr_3addr header;
- u32 time_stamp[2];
- __le16 beacon_interval;
- __le16 capability;
- /* SSID, supported rates, FH params, DS params,
- * CF params, IBSS params, TIM (if beacon), RSN
- */
- struct rtl_info_element info_element[0];
-} __packed;
-
-struct rtl_beacon_keys {
- /*u8 ssid[32];*/
- /*u32 ssid_len;*/
- u8 bcn_channel;
- __le16 ht_cap_info;
- u8 ht_info_infos_0_sco; /* bit0 & bit1 in infos[0] is 2nd ch offset */
- bool valid;
-};
-
-/*LED related.*/
-/*ledpin Identify how to implement this SW led.*/
-struct rtl_led {
- void *hw;
- enum rtl_led_pin ledpin;
- bool ledon;
-};
-
-struct rtl_led_ctl {
- bool led_opendrain;
- struct rtl_led sw_led0;
- struct rtl_led sw_led1;
-};
-
-struct rtl_qos_parameters {
- __le16 cw_min;
- __le16 cw_max;
- u8 aifs;
- u8 flag;
- __le16 tx_op;
-} __packed;
-
-struct rt_smooth_data {
- u32 elements[100]; /*array to store values */
- u32 index; /*index to current array to store */
- u32 total_num; /*num of valid elements */
- u32 total_val; /*sum of valid elements */
-};
-
-struct false_alarm_statistics {
- u32 cnt_parity_fail;
- u32 cnt_rate_illegal;
- u32 cnt_crc8_fail;
- u32 cnt_mcs_fail;
- u32 cnt_fast_fsync_fail;
- u32 cnt_sb_search_fail;
- u32 cnt_ofdm_fail;
- u32 cnt_cck_fail;
- u32 cnt_all;
- u32 cnt_ofdm_cca;
- u32 cnt_cck_cca;
- u32 cnt_cca_all;
- u32 cnt_bw_usc;
- u32 cnt_bw_lsc;
-};
-
-struct init_gain {
- u8 xaagccore1;
- u8 xbagccore1;
- u8 xcagccore1;
- u8 xdagccore1;
- u8 cca;
-
-};
-
-struct wireless_stats {
- u64 txbytesunicast;
- u64 txbytesmulticast;
- u64 txbytesbroadcast;
- u64 rxbytesunicast;
-
- u64 txbytesunicast_inperiod;
- u64 rxbytesunicast_inperiod;
- u32 txbytesunicast_inperiod_tp;
- u32 rxbytesunicast_inperiod_tp;
- u64 txbytesunicast_last;
- u64 rxbytesunicast_last;
-
- long rx_snr_db[4];
- /* Correct smoothed ss in Dbm, only used
- * in driver to report real power now.
- */
- long recv_signal_power;
- long signal_quality;
- long last_sigstrength_inpercent;
-
- u32 rssi_calculate_cnt;
- u32 pwdb_all_cnt;
-
- /* Transformed, in dbm. Beautified signal
- * strength for UI, not correct.
- */
- long signal_strength;
-
- u8 rx_rssi_percentage[4];
- u8 rx_evm_dbm[4];
- u8 rx_evm_percentage[2];
-
- u16 rx_cfo_short[4];
- u16 rx_cfo_tail[4];
-
- struct rt_smooth_data ui_rssi;
- struct rt_smooth_data ui_link_quality;
-};
-
-struct rate_adaptive {
- u8 rate_adaptive_disabled;
- u8 ratr_state;
- u16 reserve;
-
- u32 high_rssi_thresh_for_ra;
- u32 high2low_rssi_thresh_for_ra;
- u8 low2high_rssi_thresh_for_ra40m;
- u32 low_rssi_thresh_for_ra40m;
- u8 low2high_rssi_thresh_for_ra20m;
- u32 low_rssi_thresh_for_ra20m;
- u32 upper_rssi_threshold_ratr;
- u32 middleupper_rssi_threshold_ratr;
- u32 middle_rssi_threshold_ratr;
- u32 middlelow_rssi_threshold_ratr;
- u32 low_rssi_threshold_ratr;
- u32 ultralow_rssi_threshold_ratr;
- u32 low_rssi_threshold_ratr_40m;
- u32 low_rssi_threshold_ratr_20m;
- u8 ping_rssi_enable;
- u32 ping_rssi_ratr;
- u32 ping_rssi_thresh_for_ra;
- u32 last_ratr;
- u8 pre_ratr_state;
- u8 ldpc_thres;
- bool use_ldpc;
- bool lower_rts_rate;
- bool is_special_data;
-};
-
-struct regd_pair_mapping {
- u16 reg_dmnenum;
- u16 reg_5ghz_ctl;
- u16 reg_2ghz_ctl;
-};
-
-struct dynamic_primary_cca {
- u8 pricca_flag;
- u8 intf_flag;
- u8 intf_type;
- u8 dup_rts_flag;
- u8 monitor_flag;
- u8 ch_offset;
- u8 mf_state;
-};
-
-struct rtl_regulatory {
- s8 alpha2[2];
- u16 country_code;
- u16 max_power_level;
- u32 tp_scale;
- u16 current_rd;
- u16 current_rd_ext;
- s16 power_limit;
- struct regd_pair_mapping *regpair;
-};
-
-struct rtl_rfkill {
- bool rfkill_state; /*0 is off, 1 is on */
-};
-
-/*for P2P PS**/
-#define P2P_MAX_NOA_NUM 2
-
-enum p2p_role {
- P2P_ROLE_DISABLE = 0,
- P2P_ROLE_DEVICE = 1,
- P2P_ROLE_CLIENT = 2,
- P2P_ROLE_GO = 3
-};
-
-enum p2p_ps_state {
- P2P_PS_DISABLE = 0,
- P2P_PS_ENABLE = 1,
- P2P_PS_SCAN = 2,
- P2P_PS_SCAN_DONE = 3,
- P2P_PS_ALLSTASLEEP = 4, /* for P2P GO */
-};
-
-enum p2p_ps_mode {
- P2P_PS_NONE = 0,
- P2P_PS_CTWINDOW = 1,
- P2P_PS_NOA = 2,
- P2P_PS_MIX = 3, /* CTWindow and NoA */
-};
-
-struct rtl_p2p_ps_info {
- enum p2p_ps_mode p2p_ps_mode; /* indicate p2p ps mode */
- enum p2p_ps_state p2p_ps_state; /* indicate p2p ps state */
- u8 noa_index; /* Identifies instance of Notice of Absence timing. */
- /* Client traffic window. A period of time in TU after TBTT. */
- u8 ctwindow;
- u8 opp_ps; /* opportunistic power save. */
- u8 noa_num; /* number of NoA descriptor in P2P IE. */
- /* Count for owner, Type of client. */
- u8 noa_count_type[P2P_MAX_NOA_NUM];
- /* Max duration for owner, preferred or min acceptable duration
- * for client.
- */
- u32 noa_duration[P2P_MAX_NOA_NUM];
- /* Length of interval for owner, preferred or max acceptable intervali
- * of client.
- */
- u32 noa_interval[P2P_MAX_NOA_NUM];
- /* schedule in terms of the lower 4 bytes of the TSF timer. */
- u32 noa_start_time[P2P_MAX_NOA_NUM];
-};
-
-struct p2p_ps_offload_t {
- u8 offload_en:1;
- u8 role:1; /* 1: Owner, 0: Client */
- u8 ctwindow_en:1;
- u8 noa0_en:1;
- u8 noa1_en:1;
- u8 allstasleep:1;
- u8 discovery:1;
- u8 reserved:1;
-};
-
-#define IQK_MATRIX_REG_NUM 8
-#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21)
-
-struct iqk_matrix_regs {
- bool iqk_done;
- long value[1][IQK_MATRIX_REG_NUM];
-};
-
-struct phy_parameters {
- u16 length;
- u32 *pdata;
-};
-
-enum hw_param_tab_index {
- PHY_REG_2T,
- PHY_REG_1T,
- PHY_REG_PG,
- RADIOA_2T,
- RADIOB_2T,
- RADIOA_1T,
- RADIOB_1T,
- MAC_REG,
- AGCTAB_2T,
- AGCTAB_1T,
- MAX_TAB
-};
-
-struct rtl_phy {
- struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */
- struct init_gain initgain_backup;
- enum io_type current_io_type;
-
- u8 rf_mode;
- u8 rf_type;
- u8 current_chan_bw;
- u8 max_ht_chan_bw;
- u8 max_vht_chan_bw;
- u8 set_bwmode_inprogress;
- u8 sw_chnl_inprogress;
- u8 sw_chnl_stage;
- u8 sw_chnl_step;
- u8 current_channel;
- u8 h2c_box_num;
- u8 set_io_inprogress;
- u8 lck_inprogress;
-
- /* record for power tracking */
- s32 reg_e94;
- s32 reg_e9c;
- s32 reg_ea4;
- s32 reg_eac;
- s32 reg_eb4;
- s32 reg_ebc;
- s32 reg_ec4;
- s32 reg_ecc;
- u8 rfpienable;
- u8 reserve_0;
- u16 reserve_1;
- u32 reg_c04, reg_c08, reg_874;
- u32 adda_backup[16];
- u32 iqk_mac_backup[IQK_MAC_REG_NUM];
- u32 iqk_bb_backup[10];
- bool iqk_initialized;
-
- bool rfpath_rx_enable[MAX_RF_PATH];
- u8 reg_837;
- /* Dual mac */
- bool need_iqk;
- struct iqk_matrix_regs iqk_matrix[IQK_MATRIX_SETTINGS_NUM];
-
- bool rfpi_enable;
- bool iqk_in_progress;
-
- u8 pwrgroup_cnt;
- u8 cck_high_power;
- /* this is for 88E & 8723A */
- u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
- /* MAX_PG_GROUP groups of pwr diff by rates */
- u32 mcs_offset[MAX_PG_GROUP][16];
- u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND]
- [TX_PWR_BY_RATE_NUM_RF]
- [TX_PWR_BY_RATE_NUM_RF]
- [TX_PWR_BY_RATE_NUM_RATE];
- /* compatible with TX_PWR_BY_RATE_NUM_SECTION*/
- u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF]
- [TX_PWR_BY_RATE_NUM_RF]
- [MAX_BASE_NUM_IN_PHY_REG_PG_24G];
- u8 txpwr_by_rate_base_5g[TX_PWR_BY_RATE_NUM_RF]
- [TX_PWR_BY_RATE_NUM_RF]
- [MAX_BASE_NUM_IN_PHY_REG_PG_5G];
- u8 default_initialgain[4];
-
- /* the current Tx power level */
- u8 cur_cck_txpwridx;
- u8 cur_ofdm24g_txpwridx;
- u8 cur_bw20_txpwridx;
- u8 cur_bw40_txpwridx;
-
- s8 txpwr_limit_2_4g[MAX_REGULATION_NUM]
- [MAX_2_4G_BANDWIDTH_NUM]
- [MAX_RATE_SECTION_NUM]
- [CHANNEL_MAX_NUMBER_2G]
- [MAX_RF_PATH_NUM];
- s8 txpwr_limit_5g[MAX_REGULATION_NUM]
- [MAX_5G_BANDWIDTH_NUM]
- [MAX_RATE_SECTION_NUM]
- [CHANNEL_MAX_NUMBER_5G]
- [MAX_RF_PATH_NUM];
-
- u32 rfreg_chnlval[2];
- bool apk_done;
- u32 reg_rf3c[2]; /* pathA / pathB */
-
- u32 backup_rf_0x1a;/*92ee*/
- /* bfsync */
- u8 framesync;
- u32 framesync_c34;
-
- u8 num_total_rfpath;
- struct phy_parameters hwparam_tables[MAX_TAB];
- u16 rf_pathmap;
-
- u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
- enum rt_polarity_ctl polarity_ctl;
-};
-
-#define MAX_TID_COUNT 9
-#define RTL_AGG_STOP 0
-#define RTL_AGG_PROGRESS 1
-#define RTL_AGG_START 2
-#define RTL_AGG_OPERATIONAL 3
-#define RTL_AGG_OFF 0
-#define RTL_AGG_ON 1
-#define RTL_RX_AGG_START 1
-#define RTL_RX_AGG_STOP 0
-#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
-#define RTL_AGG_EMPTYING_HW_QUEUE_DELBA 3
-
-struct rtl_ht_agg {
- u16 txq_id;
- u16 wait_for_ba;
- u16 start_idx;
- u64 bitmap;
- u32 rate_n_flags;
- u8 agg_state;
- u8 rx_agg_state;
-};
-
-struct rssi_sta {
- /* for old dm */
- long undec_sm_pwdb;
- long undec_sm_cck;
-
- /* for new phydm_mod */
- s32 undecorated_smoothed_pwdb;
- s32 undecorated_smoothed_cck;
- s32 undecorated_smoothed_ofdm;
- u8 ofdm_pkt;
- u8 cck_pkt;
- u16 cck_sum_power;
- u8 is_send_rssi;
- u64 packet_map;
- u8 valid_bit;
-};
-
-struct rtl_tid_data {
- u16 seq_number;
- struct rtl_ht_agg agg;
-};
-
-struct rtl_sta_info {
- struct list_head list;
- struct rtl_tid_data tids[MAX_TID_COUNT];
- /* just used for ap adhoc or mesh*/
- struct rssi_sta rssi_stat;
- u8 rssi_level;
- u16 wireless_mode;
- u8 ratr_index;
- u8 mimo_ps;
- u8 mac_addr[ETH_ALEN];
-} __packed;
-
-struct rtl_priv;
-struct rtl_io {
- struct device *dev;
- struct mutex bb_mutex;
-
- /*PCI MEM map */
- unsigned long pci_mem_end; /*shared mem end */
- unsigned long pci_mem_start; /*shared mem start */
-
- /*PCI IO map */
- unsigned long pci_base_addr; /*device I/O address */
-
- void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
- void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
- void (*writeN_sync)(struct rtl_priv *rtlpriv, u32 addr, void *buf,
- u16 len);
-
- u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr);
-
-};
-
-struct rtl_mac {
- u8 mac_addr[ETH_ALEN];
- u8 mac80211_registered;
- u8 beacon_enabled;
-
- u32 tx_ss_num;
- u32 rx_ss_num;
-
- struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
- struct ieee80211_hw *hw;
- struct ieee80211_vif *vif;
- enum nl80211_iftype opmode;
-
- /*Probe Beacon management */
- struct rtl_tid_data tids[MAX_TID_COUNT];
- enum rtl_link_state link_state;
- struct rtl_beacon_keys cur_beacon_keys;
- u8 new_beacon_cnt;
-
- int n_channels;
- int n_bitrates;
-
- bool offchan_delay;
- u8 p2p; /*using p2p role*/
- bool p2p_in_use;
-
- /*filters */
- u32 rx_conf;
- u16 rx_mgt_filter;
- u16 rx_ctrl_filter;
- u16 rx_data_filter;
-
- bool act_scanning;
- u8 cnt_after_linked;
- bool skip_scan;
-
- /* early mode */
- /* skb wait queue */
- struct sk_buff_head skb_waitq[MAX_TID_COUNT];
-
- u8 ht_stbc_cap;
- u8 ht_cur_stbc;
-
- /*vht support*/
- u8 vht_enable;
- u8 bw_80;
- u8 vht_cur_ldpc;
- u8 vht_cur_stbc;
- u8 vht_stbc_cap;
- u8 vht_ldpc_cap;
-
- /*RDG*/
- bool rdg_en;
-
- /*AP*/
- u8 bssid[ETH_ALEN] __aligned(2);
- u32 vendor;
- u8 mcs[16]; /* 16 bytes mcs for HT rates. */
- u32 basic_rates; /* b/g rates */
- u8 ht_enable;
- u8 sgi_40;
- u8 sgi_20;
- u8 bw_40;
- u16 mode; /* wireless mode */
- u8 slot_time;
- u8 short_preamble;
- u8 use_cts_protect;
- u8 cur_40_prime_sc;
- u8 cur_40_prime_sc_bk;
- u8 cur_80_prime_sc;
- u64 tsf;
- u8 retry_short;
- u8 retry_long;
- u16 assoc_id;
- bool hiddenssid;
-
- /*IBSS*/
- int beacon_interval;
-
- /*AMPDU*/
- u8 min_space_cfg; /*For Min spacing configurations */
- u8 max_mss_density;
- u8 current_ampdu_factor;
- u8 current_ampdu_density;
-
- /*QOS & EDCA */
- struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE];
- struct rtl_qos_parameters ac[AC_MAX];
-
- /* counters */
- u64 last_txok_cnt;
- u64 last_rxok_cnt;
- u32 last_bt_edca_ul;
- u32 last_bt_edca_dl;
-};
-
-struct btdm_8723 {
- bool all_off;
- bool agc_table_en;
- bool adc_back_off_on;
- bool b2_ant_hid_en;
- bool low_penalty_rate_adaptive;
- bool rf_rx_lpf_shrink;
- bool reject_aggre_pkt;
- bool tra_tdma_on;
- u8 tra_tdma_nav;
- u8 tra_tdma_ant;
- bool tdma_on;
- u8 tdma_ant;
- u8 tdma_nav;
- u8 tdma_dac_swing;
- u8 fw_dac_swing_lvl;
- bool ps_tdma_on;
- u8 ps_tdma_byte[5];
- bool pta_on;
- u32 val_0x6c0;
- u32 val_0x6c8;
- u32 val_0x6cc;
- bool sw_dac_swing_on;
- u32 sw_dac_swing_lvl;
- u32 wlan_act_hi;
- u32 wlan_act_lo;
- u32 bt_retry_index;
- bool dec_bt_pwr;
- bool ignore_wlan_act;
-};
-
-struct bt_coexist_8723 {
- u32 high_priority_tx;
- u32 high_priority_rx;
- u32 low_priority_tx;
- u32 low_priority_rx;
- u8 c2h_bt_info;
- bool c2h_bt_info_req_sent;
- bool c2h_bt_inquiry_page;
- u32 bt_inq_page_start_time;
- u8 bt_retry_cnt;
- u8 c2h_bt_info_original;
- u8 bt_inquiry_page_cnt;
- struct btdm_8723 btdm;
-};
-
-struct rtl_hal {
- struct ieee80211_hw *hw;
- bool driver_is_goingto_unload;
- bool up_first_time;
- bool first_init;
- bool being_init_adapter;
- bool bbrf_ready;
- bool mac_func_enable;
- bool pre_edcca_enable;
- struct bt_coexist_8723 hal_coex_8723;
-
- enum intf_type interface;
- u16 hw_type; /*92c or 92d or 92s and so on */
- u8 ic_class;
- u8 oem_id;
- u32 version; /*version of chip */
- u8 state; /*stop 0, start 1 */
- u8 board_type;
- u8 package_type;
- u8 external_pa;
-
- u8 pa_mode;
- u8 pa_type_2g;
- u8 pa_type_5g;
- u8 lna_type_2g;
- u8 lna_type_5g;
- u8 external_pa_2g;
- u8 external_lna_2g;
- u8 external_pa_5g;
- u8 external_lna_5g;
- u8 type_glna;
- u8 type_gpa;
- u8 type_alna;
- u8 type_apa;
- u8 rfe_type;
-
- /*firmware */
- u32 fwsize;
- u8 *pfirmware;
- u16 fw_version;
- u16 fw_subversion;
- bool h2c_setinprogress;
- u8 last_hmeboxnum;
- bool fw_ready;
- /*Reserve page start offset except beacon in TxQ. */
- u8 fw_rsvdpage_startoffset;
- u8 h2c_txcmd_seq;
- u8 current_ra_rate;
-
- /* FW Cmd IO related */
- u16 fwcmd_iomap;
- u32 fwcmd_ioparam;
- bool set_fwcmd_inprogress;
- u8 current_fwcmd_io;
-
- struct p2p_ps_offload_t p2p_ps_offload;
- bool fw_clk_change_in_progress;
- bool allow_sw_to_change_hwclc;
- u8 fw_ps_state;
- /**/
- bool driver_going2unload;
-
- /*AMPDU init min space*/
- u8 minspace_cfg; /*For Min spacing configurations */
-
- /* Dual mac */
- enum macphy_mode macphymode;
- enum band_type current_bandtype; /* 0:2.4G, 1:5G */
- enum band_type current_bandtypebackup;
- enum band_type bandset;
- /* dual MAC 0--Mac0 1--Mac1 */
- u32 interfaceindex;
- /* just for DualMac S3S4 */
- u8 macphyctl_reg;
- bool earlymode_enable;
- u8 max_earlymode_num;
- /* Dual mac*/
- bool during_mac0init_radiob;
- bool during_mac1init_radioa;
- bool reloadtxpowerindex;
- /* True if IMR or IQK have done
- * for 2.4G in scan progress
- */
- bool load_imrandiqk_setting_for2g;
-
- bool disable_amsdu_8k;
- bool master_of_dmsp;
- bool slave_of_dmsp;
-
- u16 rx_tag;/*for 92ee*/
- u8 rts_en;
-
- /*for wowlan*/
- bool wow_enable;
- bool enter_pnp_sleep;
- bool wake_from_pnp_sleep;
- bool wow_enabled;
- time64_t last_suspend_sec;
- u32 wowlan_fwsize;
- u8 *wowlan_firmware;
-
- u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
-
- bool real_wow_v2_enable;
- bool re_init_llt_table;
-};
-
-struct rtl_security {
- /*default 0 */
- bool use_sw_sec;
-
- bool being_setkey;
- bool use_defaultkey;
- /*Encryption Algorithm for Unicast Packet */
- enum rt_enc_alg pairwise_enc_algorithm;
- /*Encryption Algorithm for Brocast/Multicast */
- enum rt_enc_alg group_enc_algorithm;
- /*Cam Entry Bitmap */
- u32 hwsec_cam_bitmap;
- u8 hwsec_cam_sta_addr[TOTAL_CAM_ENTRY][ETH_ALEN];
- /* local Key buffer, indx 0 is for
- * pairwise key 1-4 is for agoup key.
- */
- u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN];
- u8 key_len[KEY_BUF_SIZE];
-
- /* The pointer of Pairwise Key,
- * it always points to KeyBuf[4]
- */
- u8 *pairwise_key;
-};
-
-#define ASSOCIATE_ENTRY_NUM 33
-
-struct fast_ant_training {
- u8 bssid[6];
- u8 antsel_rx_keep_0;
- u8 antsel_rx_keep_1;
- u8 antsel_rx_keep_2;
- u32 ant_sum[7];
- u32 ant_cnt[7];
- u32 ant_ave[7];
- u8 fat_state;
- u32 train_idx;
- u8 antsel_a[ASSOCIATE_ENTRY_NUM];
- u8 antsel_b[ASSOCIATE_ENTRY_NUM];
- u8 antsel_c[ASSOCIATE_ENTRY_NUM];
- u32 main_ant_sum[ASSOCIATE_ENTRY_NUM];
- u32 aux_ant_sum[ASSOCIATE_ENTRY_NUM];
- u32 main_ant_cnt[ASSOCIATE_ENTRY_NUM];
- u32 aux_ant_cnt[ASSOCIATE_ENTRY_NUM];
- u8 rx_idle_ant;
- bool becomelinked;
-};
-
-struct dm_phy_dbg_info {
- s8 rx_snrdb[4];
- u64 num_qry_phy_status;
- u64 num_qry_phy_status_cck;
- u64 num_qry_phy_status_ofdm;
- u16 num_qry_beacon_pkt;
- u16 num_non_be_pkt;
- s32 rx_evm[4];
-};
-
-struct rtl_dm {
- /*PHY status for Dynamic Management */
- long entry_min_undec_sm_pwdb;
- long undec_sm_cck;
- long undec_sm_pwdb; /*out dm */
- long entry_max_undec_sm_pwdb;
- s32 ofdm_pkt_cnt;
- bool dm_initialgain_enable;
- bool dynamic_txpower_enable;
- bool current_turbo_edca;
- bool is_any_nonbepkts; /*out dm */
- bool is_cur_rdlstate;
- bool txpower_trackinginit;
- bool disable_framebursting;
- bool cck_inch14;
- bool txpower_tracking;
- bool useramask;
- bool rfpath_rxenable[4];
- bool inform_fw_driverctrldm;
- bool current_mrc_switch;
- u8 txpowercount;
- u8 powerindex_backup[6];
-
- u8 thermalvalue_rxgain;
- u8 thermalvalue_iqk;
- u8 thermalvalue_lck;
- u8 thermalvalue;
- u8 last_dtp_lvl;
- u8 thermalvalue_avg[AVG_THERMAL_NUM];
- u8 thermalvalue_avg_index;
- u8 tm_trigger;
- bool done_txpower;
- u8 dynamic_txhighpower_lvl; /*Tx high power level */
- u8 dm_flag; /*Indicate each dynamic mechanism's status. */
- u8 dm_flag_tmp;
- u8 dm_type;
- u8 dm_rssi_sel;
- u8 txpower_track_control;
- bool interrupt_migration;
- bool disable_tx_int;
- s8 ofdm_index[MAX_RF_PATH];
- u8 default_ofdm_index;
- u8 default_cck_index;
- s8 cck_index;
- s8 delta_power_index[MAX_RF_PATH];
- s8 delta_power_index_last[MAX_RF_PATH];
- s8 power_index_offset[MAX_RF_PATH];
- s8 absolute_ofdm_swing_idx[MAX_RF_PATH];
- s8 remnant_ofdm_swing_idx[MAX_RF_PATH];
- s8 remnant_cck_idx;
- bool modify_txagc_flag_path_a;
- bool modify_txagc_flag_path_b;
-
- bool one_entry_only;
- struct dm_phy_dbg_info dbginfo;
-
- /* Dynamic ATC switch */
- bool atc_status;
- bool large_cfo_hit;
- bool is_freeze;
- int cfo_tail[2];
- int cfo_ave_pre;
- int crystal_cap;
- u8 cfo_threshold;
- u32 packet_count;
- u32 packet_count_pre;
- u8 tx_rate;
-
- /*88e tx power tracking*/
- u8 swing_idx_ofdm[MAX_RF_PATH];
- u8 swing_idx_ofdm_cur;
- u8 swing_idx_ofdm_base[MAX_RF_PATH];
- bool swing_flag_ofdm;
- u8 swing_idx_cck;
- u8 swing_idx_cck_cur;
- u8 swing_idx_cck_base;
- bool swing_flag_cck;
-
- s8 swing_diff_2g;
- s8 swing_diff_5g;
-
- /* DMSP */
- bool supp_phymode_switch;
-
- /* DulMac */
- struct fast_ant_training fat_table;
-
- u8 resp_tx_path;
- u8 path_sel;
- u32 patha_sum;
- u32 pathb_sum;
- u32 patha_cnt;
- u32 pathb_cnt;
-
- u8 pre_channel;
- u8 *p_channel;
- u8 linked_interval;
-
- u64 last_tx_ok_cnt;
- u64 last_rx_ok_cnt;
-};
-
-#define EFUSE_MAX_LOGICAL_SIZE 512
-
-struct rtl_efuse {
- bool autoload_ok;
- bool bootfromefuse;
- u16 max_physical_size;
-
- u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
- u16 efuse_usedbytes;
- u8 efuse_usedpercentage;
-
- u8 autoload_failflag;
- u8 autoload_status;
-
- short epromtype;
- u16 eeprom_vid;
- u16 eeprom_did;
- u16 eeprom_svid;
- u16 eeprom_smid;
- u8 eeprom_oemid;
- u16 eeprom_channelplan;
- u8 eeprom_version;
- u8 board_type;
- u8 external_pa;
-
- u8 dev_addr[6];
- u8 wowlan_enable;
- u8 antenna_div_cfg;
- u8 antenna_div_type;
-
- bool txpwr_fromeprom;
- u8 eeprom_crystalcap;
- u8 eeprom_tssi[2];
- u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
- u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
- u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
- u8 eeprom_chnlarea_txpwr_cck[MAX_RF_PATH][CHANNEL_GROUP_MAX_2G];
- u8 eeprom_chnlarea_txpwr_ht40_1s[MAX_RF_PATH][CHANNEL_GROUP_MAX];
- u8 eprom_chnl_txpwr_ht40_2sdf[MAX_RF_PATH][CHANNEL_GROUP_MAX];
-
- u8 internal_pa_5g[2]; /* pathA / pathB */
- u8 eeprom_c9;
- u8 eeprom_cc;
-
- /*For power group */
- u8 eeprom_pwrgroup[2][3];
- u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
- u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
-
- u8 txpwrlevel_cck[MAX_RF_PATH][CHANNEL_MAX_NUMBER_2G];
- /*For HT 40MHZ pwr */
- u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
- /*For HT 40MHZ pwr */
- u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
-
- /*--------------------------------------------------------*
- * 8192CE\8192SE\8192DE\8723AE use the following 4 arrays,
- * other ICs (8188EE\8723BE\8192EE\8812AE...)
- * define new arrays in Windows code.
- * BUT, in linux code, we use the same array for all ICs.
- *
- * The Correspondance relation between two arrays is:
- * txpwr_cckdiff[][] == CCK_24G_Diff[][]
- * txpwr_ht20diff[][] == BW20_24G_Diff[][]
- * txpwr_ht40diff[][] == BW40_24G_Diff[][]
- * txpwr_legacyhtdiff[][] == OFDM_24G_Diff[][]
- *
- * Sizes of these arrays are decided by the larger ones.
- */
- s8 txpwr_cckdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
- s8 txpwr_ht20diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
- s8 txpwr_ht40diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
- s8 txpwr_legacyhtdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
-
- u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
- u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
- s8 txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
- s8 txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
- s8 txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
- s8 txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
-
- u8 txpwr_safetyflag; /* Band edge enable flag */
- u16 eeprom_txpowerdiff;
- u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
- u8 antenna_txpwdiff[3];
-
- u8 eeprom_regulatory;
- u8 eeprom_thermalmeter;
- u8 thermalmeter[2]; /*ThermalMeter, index 0 for RFIC0, 1 for RFIC1 */
- u16 tssi_13dbm;
- u8 crystalcap; /* CrystalCap. */
- u8 delta_iqk;
- u8 delta_lck;
-
- u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */
- bool apk_thermalmeterignore;
-
- bool b1x1_recvcombine;
- bool b1ss_support;
-
- /*channel plan */
- u8 channel_plan;
-};
-
-struct rtl_tx_report {
- atomic_t sn;
- u16 last_sent_sn;
- unsigned long last_sent_time;
- u16 last_recv_sn;
-};
-
-struct rtl_ps_ctl {
- bool pwrdomain_protect;
- bool in_powersavemode;
- bool rfchange_inprogress;
- bool swrf_processing;
- bool hwradiooff;
- /* just for PCIE ASPM
- * If it supports ASPM, Offset[560h] = 0x40,
- * otherwise Offset[560h] = 0x00.
- */
- bool support_aspm;
- bool support_backdoor;
-
- /*for LPS */
- enum rt_psmode dot11_psmode; /*Power save mode configured. */
- bool swctrl_lps;
- bool leisure_ps;
- bool fwctrl_lps;
- u8 fwctrl_psmode;
- /*For Fw control LPS mode */
- u8 reg_fwctrl_lps;
- /*Record Fw PS mode status. */
- bool fw_current_inpsmode;
- u8 reg_max_lps_awakeintvl;
- bool report_linked;
- bool low_power_enable;/*for 32k*/
-
- /*for IPS */
- bool inactiveps;
-
- u32 rfoff_reason;
-
- /*RF OFF Level */
- u32 cur_ps_level;
- u32 reg_rfps_level;
-
- /*just for PCIE ASPM */
- u8 const_amdpci_aspm;
- bool pwrdown_mode;
-
- enum rf_pwrstate inactive_pwrstate;
- enum rf_pwrstate rfpwr_state; /*cur power state */
-
- /* for SW LPS*/
- bool sw_ps_enabled;
- bool state;
- bool state_inap;
- bool multi_buffered;
- u16 nullfunc_seq;
- unsigned int dtim_counter;
- unsigned int sleep_ms;
- unsigned long last_sleep_jiffies;
- unsigned long last_awake_jiffies;
- unsigned long last_delaylps_stamp_jiffies;
- unsigned long last_dtim;
- unsigned long last_beacon;
- unsigned long last_action;
- unsigned long last_slept;
-
- /*For P2P PS */
- struct rtl_p2p_ps_info p2p_ps_info;
- u8 pwr_mode;
- u8 smart_ps;
-
- /* wake up on line */
- u8 wo_wlan_mode;
- u8 arp_offload_enable;
- u8 gtk_offload_enable;
- /* Used for WOL, indicates the reason for waking event.*/
- u32 wakeup_reason;
- /* Record the last waking time for comparison with setting key. */
- u64 last_wakeup_time;
-};
-
-struct rtl_stats {
- u8 psaddr[ETH_ALEN];
- u32 mac_time[2];
- s8 rssi;
- u8 signal;
- u8 noise;
- u8 rate; /* hw desc rate */
- u8 received_channel;
- u8 control;
- u8 mask;
- u8 freq;
- u16 len;
- u64 tsf;
- u32 beacon_time;
- u8 nic_type;
- u16 length;
- u8 signalquality; /*in 0-100 index. */
- /*
- * Real power in dBm for this packet,
- * no beautification and aggregation.
- */
- s32 recvsignalpower;
- s8 rxpower; /*in dBm Translate from PWdB */
- u8 signalstrength; /*in 0-100 index. */
- u16 hwerror:1;
- u16 crc:1;
- u16 icv:1;
- u16 shortpreamble:1;
- u16 antenna:1;
- u16 decrypted:1;
- u16 wakeup:1;
- u32 timestamp_low;
- u32 timestamp_high;
- bool shift;
-
- u8 rx_drvinfo_size;
- u8 rx_bufshift;
- bool isampdu;
- bool isfirst_ampdu;
- bool rx_is40mhzpacket;
- u8 rx_packet_bw;
- u32 rx_pwdb_all;
- u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
- s8 rx_mimo_signalquality[4];
- u8 rx_mimo_evm_dbm[4];
- u16 cfo_short[4]; /* per-path's Cfo_short */
- u16 cfo_tail[4];
-
- s8 rx_mimo_sig_qual[4];
- u8 rx_pwr[4]; /* per-path's pwdb */
- u8 rx_snr[4]; /* per-path's SNR */
- u8 bandwidth;
- u8 bt_coex_pwr_adjust;
- bool packet_matchbssid;
- bool is_cck;
- bool is_ht;
- bool packet_toself;
- bool packet_beacon; /*for rssi */
- s8 cck_adc_pwdb[4]; /*for rx path selection */
-
- bool is_vht;
- bool is_short_gi;
- u8 vht_nss;
-
- u8 packet_report_type;
-
- u32 macid;
- u8 wake_match;
- u32 bt_rx_rssi_percentage;
- u32 macid_valid_entry[2];
-};
-
-struct rt_link_detect {
- /* count for roaming */
- u32 bcn_rx_inperiod;
- u32 roam_times;
-
- u32 num_tx_in4period[4];
- u32 num_rx_in4period[4];
-
- u32 num_tx_inperiod;
- u32 num_rx_inperiod;
-
- bool busytraffic;
- bool tx_busy_traffic;
- bool rx_busy_traffic;
- bool higher_busytraffic;
- bool higher_busyrxtraffic;
-
- u32 tidtx_in4period[MAX_TID_COUNT][4];
- u32 tidtx_inperiod[MAX_TID_COUNT];
- bool higher_busytxtraffic[MAX_TID_COUNT];
-};
-
-struct rtl_tcb_desc {
- u8 packet_bw:2;
- u8 multicast:1;
- u8 broadcast:1;
-
- u8 rts_stbc:1;
- u8 rts_enable:1;
- u8 cts_enable:1;
- u8 rts_use_shortpreamble:1;
- u8 rts_use_shortgi:1;
- u8 rts_sc:1;
- u8 rts_bw:1;
- u8 rts_rate;
-
- u8 use_shortgi:1;
- u8 use_shortpreamble:1;
- u8 use_driver_rate:1;
- u8 disable_ratefallback:1;
-
- u8 use_spe_rpt:1;
-
- u8 ratr_index;
- u8 mac_id;
- u8 hw_rate;
-
- u8 last_inipkt:1;
- u8 cmd_or_init:1;
- u8 queue_index;
-
- /* early mode */
- u8 empkt_num;
- /* The max value by HW */
- u32 empkt_len[10];
- bool tx_enable_sw_calc_duration;
-};
-
-struct rtl_wow_pattern {
- u8 type;
- u16 crc;
- u32 mask[4];
-};
-
-struct rtl_hal_ops {
- int (*init_sw_vars)(struct ieee80211_hw *hw);
- void (*deinit_sw_vars)(struct ieee80211_hw *hw);
- void (*read_chip_version)(struct ieee80211_hw *hw);
- void (*read_eeprom_info)(struct ieee80211_hw *hw);
- void (*interrupt_recognized)(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
- int (*hw_init)(struct ieee80211_hw *hw);
- void (*hw_disable)(struct ieee80211_hw *hw);
- void (*hw_suspend)(struct ieee80211_hw *hw);
- void (*hw_resume)(struct ieee80211_hw *hw);
- void (*enable_interrupt)(struct ieee80211_hw *hw);
- void (*disable_interrupt)(struct ieee80211_hw *hw);
- int (*set_network_type)(struct ieee80211_hw *hw,
- enum nl80211_iftype type);
- void (*set_chk_bssid)(struct ieee80211_hw *hw,
- bool check_bssid);
- void (*set_bw_mode)(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
- u8 (*switch_channel)(struct ieee80211_hw *hw);
- void (*set_qos)(struct ieee80211_hw *hw, int aci);
- void (*set_bcn_reg)(struct ieee80211_hw *hw);
- void (*set_bcn_intv)(struct ieee80211_hw *hw);
- void (*update_interrupt_mask)(struct ieee80211_hw *hw,
- u32 add_msr, u32 rm_msr);
- void (*get_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val);
- void (*set_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val);
- void (*update_rate_tbl)(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_leve,
- bool update_bw);
- void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc,
- u8 *desc, u8 queue_index,
- struct sk_buff *skb, dma_addr_t addr);
- void (*update_rate_mask)(struct ieee80211_hw *hw, u8 rssi_level);
- u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw,
- u8 queue_index);
- void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc,
- u8 queue_index);
- void (*fill_tx_desc)(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- u8 *pbd_desc_tx,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- struct sk_buff *skb, u8 hw_queue,
- struct rtl_tcb_desc *ptcb_desc);
- void (*fill_fake_txdesc)(struct ieee80211_hw *hw, u8 *pdesc,
- u32 buffer_len, bool bispspoll);
- void (*fill_tx_cmddesc)(struct ieee80211_hw *hw, u8 *pdesc,
- bool firstseg, bool lastseg,
- struct sk_buff *skb);
- void (*fill_tx_special_desc)(struct ieee80211_hw *hw,
- u8 *pdesc, u8 *pbd_desc,
- struct sk_buff *skb, u8 hw_queue);
- bool (*query_rx_desc)(struct ieee80211_hw *hw,
- struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status,
- u8 *pdesc, struct sk_buff *skb);
- void (*set_channel_access)(struct ieee80211_hw *hw);
- bool (*radio_onoff_checking)(struct ieee80211_hw *hw, u8 *valid);
- void (*dm_watchdog)(struct ieee80211_hw *hw);
- void (*scan_operation_backup)(struct ieee80211_hw *hw, u8 operation);
- bool (*set_rf_power_state)(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
- void (*led_control)(struct ieee80211_hw *hw,
- enum led_ctl_mode ledaction);
- void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
- u8 desc_name, u8 *val);
- u64 (*get_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
- u8 desc_name);
- bool (*is_tx_desc_closed)(struct ieee80211_hw *hw,
- u8 hw_queue, u16 index);
- void (*tx_polling)(struct ieee80211_hw *hw, u8 hw_queue);
- void (*enable_hw_sec)(struct ieee80211_hw *hw);
- void (*set_key)(struct ieee80211_hw *hw, u32 key_index,
- u8 *macaddr, bool is_group, u8 enc_algo,
- bool is_wepkey, bool clear_all);
- void (*init_sw_leds)(struct ieee80211_hw *hw);
- void (*deinit_sw_leds)(struct ieee80211_hw *hw);
- u32 (*get_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
- void (*set_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
- u32 data);
- u32 (*get_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask);
- void (*set_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data);
- void (*linked_set_reg)(struct ieee80211_hw *hw);
- void (*chk_switch_dmdp)(struct ieee80211_hw *hw);
- void (*dualmac_easy_concurrent)(struct ieee80211_hw *hw);
- void (*dualmac_switch_to_dmdp)(struct ieee80211_hw *hw);
- bool (*phy_rf6052_config)(struct ieee80211_hw *hw);
- void (*phy_rf6052_set_cck_txpower)(struct ieee80211_hw *hw,
- u8 *powerlevel);
- void (*phy_rf6052_set_ofdm_txpower)(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
- bool (*config_bb_with_headerfile)(struct ieee80211_hw *hw,
- u8 configtype);
- bool (*config_bb_with_pgheaderfile)(struct ieee80211_hw *hw,
- u8 configtype);
- void (*phy_lc_calibrate)(struct ieee80211_hw *hw, bool is2t);
- void (*phy_set_bw_mode_callback)(struct ieee80211_hw *hw);
- void (*dm_dynamic_txpower)(struct ieee80211_hw *hw);
- void (*c2h_command_handle)(struct ieee80211_hw *hw);
- void (*bt_wifi_media_status_notify)(struct ieee80211_hw *hw,
- bool mstate);
- void (*bt_coex_off_before_lps)(struct ieee80211_hw *hw);
- void (*fill_h2c_cmd)(struct ieee80211_hw *hw, u8 element_id,
- u32 cmd_len, u8 *p_cmdbuffer);
- void (*set_default_port_id_cmd)(struct ieee80211_hw *hw);
- bool (*get_btc_status)(void);
- bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr);
- u32 (*rx_command_packet)(struct ieee80211_hw *hw,
- const struct rtl_stats *status,
- struct sk_buff *skb);
- void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
- struct rtl_wow_pattern *rtl_pattern,
- u8 index);
- u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx);
- void (*c2h_content_parsing)(struct ieee80211_hw *hw, u8 tag, u8 len,
- u8 *val);
- /* ops for halmac cb */
- bool (*halmac_cb_init_mac_register)(struct rtl_priv *rtlpriv);
- bool (*halmac_cb_init_bb_rf_register)(struct rtl_priv *rtlpriv);
- bool (*halmac_cb_write_data_rsvd_page)(struct rtl_priv *rtlpriv,
- u8 *buf, u32 size);
- bool (*halmac_cb_write_data_h2c)(struct rtl_priv *rtlpriv, u8 *buf,
- u32 size);
- /* ops for phydm cb */
- u8 (*get_txpower_index)(struct ieee80211_hw *hw, u8 path,
- u8 rate, u8 bandwidth, u8 channel);
- void (*set_tx_power_index_by_rs)(struct ieee80211_hw *hw,
- u8 channel, u8 path,
- enum rate_section rs);
- void (*store_tx_power_by_rate)(struct ieee80211_hw *hw,
- u32 band, u32 rfpath,
- u32 txnum, u32 regaddr,
- u32 bitmask, u32 data);
- void (*phy_set_txpower_limit)(struct ieee80211_hw *hw, u8 *pregulation,
- u8 *pband, u8 *pbandwidth,
- u8 *prate_section, u8 *prf_path,
- u8 *pchannel, u8 *ppower_limit);
-};
-
-struct rtl_intf_ops {
- /*com */
- void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
- int (*adapter_start)(struct ieee80211_hw *hw);
- void (*adapter_stop)(struct ieee80211_hw *hw);
- bool (*check_buddy_priv)(struct ieee80211_hw *hw,
- struct rtl_priv **buddy_priv);
-
- int (*adapter_tx)(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct sk_buff *skb,
- struct rtl_tcb_desc *ptcb_desc);
- void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
- int (*reset_trx_ring)(struct ieee80211_hw *hw);
- bool (*waitq_insert)(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct sk_buff *skb);
-
- /*pci */
- void (*disable_aspm)(struct ieee80211_hw *hw);
- void (*enable_aspm)(struct ieee80211_hw *hw);
-
- /*usb */
-};
-
-struct rtl_mod_params {
- /* default: 0,0 */
- u64 debug_mask;
- /* default: 0 = using hardware encryption */
- bool sw_crypto;
-
- /* default: 0 = DBG_EMERG (0)*/
- int debug_level;
-
- /* default: 1 = using no linked power save */
- bool inactiveps;
-
- /* default: 1 = using linked sw power save */
- bool swctrl_lps;
-
- /* default: 1 = using linked fw power save */
- bool fwctrl_lps;
-
- /* default: 0 = not using MSI interrupts mode
- * submodules should set their own default value
- */
- bool msi_support;
-
- /* default: 0 = dma 32 */
- bool dma64;
-
- /* default: 1 = enable aspm */
- int aspm_support;
-
- /* default 0: 1 means disable */
- bool disable_watchdog;
-
- /* default 0: 1 means do not disable interrupts */
- bool int_clear;
-
- /* select antenna */
- int ant_sel;
-};
-
-struct rtl_hal_usbint_cfg {
- /* data - rx */
- u32 in_ep_num;
- u32 rx_urb_num;
- u32 rx_max_size;
-
- /* op - rx */
- void (*usb_rx_hdl)(struct ieee80211_hw *hw, struct sk_buff *skb);
- void (*usb_rx_segregate_hdl)(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct sk_buff_head *skbh);
-
- /* tx */
- void (*usb_tx_cleanup)(struct ieee80211_hw *hw, struct sk_buff *skb);
- int (*usb_tx_post_hdl)(struct ieee80211_hw *hw, struct urb *urb,
- struct sk_buff *skb);
- struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *hw,
- struct sk_buff_head *skbh);
-
- /* endpoint mapping */
- int (*usb_endpoint_mapping)(struct ieee80211_hw *hw);
- u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
-};
-
-struct rtl_hal_cfg {
- u8 bar_id;
- bool write_readback;
- char *name;
- char *alt_fw_name;
- struct rtl_hal_ops *ops;
- struct rtl_mod_params *mod_params;
- struct rtl_hal_usbint_cfg *usb_interface_cfg;
- enum rtl_spec_ver spec_ver;
-
- /* this map used for some registers or vars
- * defined int HAL but used in MAIN
- */
- u32 maps[RTL_VAR_MAP_MAX];
-
-};
-
-struct rtl_locks {
- /* mutex */
- struct mutex conf_mutex;
- struct mutex ips_mutex; /* mutex for enter/leave IPS */
- struct mutex lps_mutex; /* mutex for enter/leave LPS */
-
- /*spin lock */
- spinlock_t irq_th_lock;
- spinlock_t h2c_lock;
- spinlock_t rf_ps_lock;
- spinlock_t rf_lock;
- spinlock_t waitq_lock;
- spinlock_t entry_list_lock;
- spinlock_t usb_lock;
- spinlock_t c2hcmd_lock;
- spinlock_t scan_list_lock; /* lock for the scan list */
-
- /*FW clock change */
- spinlock_t fw_ps_lock;
-
- /*Dual mac*/
- spinlock_t cck_and_rw_pagea_lock;
-
- spinlock_t iqk_lock;
-};
-
-struct rtl_works {
- struct ieee80211_hw *hw;
-
- /*timer */
- struct timer_list watchdog_timer;
- struct timer_list dualmac_easyconcurrent_retrytimer;
- struct timer_list fw_clockoff_timer;
- struct timer_list fast_antenna_training_timer;
- /*task */
- struct tasklet_struct irq_tasklet;
- struct tasklet_struct irq_prepare_bcn_tasklet;
-
- /*work queue */
- struct workqueue_struct *rtl_wq;
- struct delayed_work watchdog_wq;
- struct delayed_work ips_nic_off_wq;
- struct delayed_work c2hcmd_wq;
-
- /* For SW LPS */
- struct delayed_work ps_work;
- struct delayed_work ps_rfon_wq;
- struct delayed_work fwevt_wq;
-
- struct work_struct lps_change_work;
- struct work_struct fill_h2c_cmd;
-};
-
-struct rtl_debug {
- /* add for debug */
- struct dentry *debugfs_dir;
- char debugfs_name[20];
-
- char *msg_buf;
-};
-
-#define MIMO_PS_STATIC 0
-#define MIMO_PS_DYNAMIC 1
-#define MIMO_PS_NOLIMIT 3
-
-struct rtl_dualmac_easy_concurrent_ctl {
- enum band_type currentbandtype_backfordmdp;
- bool close_bbandrf_for_dmsp;
- bool change_to_dmdp;
- bool change_to_dmsp;
- bool switch_in_process;
-};
-
-struct rtl_dmsp_ctl {
- bool activescan_for_slaveofdmsp;
- bool scan_for_anothermac_fordmsp;
- bool scan_for_itself_fordmsp;
- bool writedig_for_anothermacofdmsp;
- u32 curdigvalue_for_anothermacofdmsp;
- bool changecckpdstate_for_anothermacofdmsp;
- u8 curcckpdstate_for_anothermacofdmsp;
- bool changetxhighpowerlvl_for_anothermacofdmsp;
- u8 curtxhighlvl_for_anothermacofdmsp;
- long rssivalmin_for_anothermacofdmsp;
-};
-
-struct ps_t {
- u8 pre_ccastate;
- u8 cur_ccasate;
- u8 pre_rfstate;
- u8 cur_rfstate;
- u8 initialize;
- long rssi_val_min;
-};
-
-struct dig_t {
- u32 rssi_lowthresh;
- u32 rssi_highthresh;
- u32 fa_lowthresh;
- u32 fa_highthresh;
- long last_min_undec_pwdb_for_dm;
- long rssi_highpower_lowthresh;
- long rssi_highpower_highthresh;
- u32 recover_cnt;
- u32 pre_igvalue;
- u32 cur_igvalue;
- long rssi_val;
- u8 dig_enable_flag;
- u8 dig_ext_port_stage;
- u8 dig_algorithm;
- u8 dig_twoport_algorithm;
- u8 dig_dbgmode;
- u8 dig_slgorithm_switch;
- u8 cursta_cstate;
- u8 presta_cstate;
- u8 curmultista_cstate;
- u8 stop_dig;
- s8 back_val;
- s8 back_range_max;
- s8 back_range_min;
- u8 rx_gain_max;
- u8 rx_gain_min;
- u8 min_undec_pwdb_for_dm;
- u8 rssi_val_min;
- u8 pre_cck_cca_thres;
- u8 cur_cck_cca_thres;
- u8 pre_cck_pd_state;
- u8 cur_cck_pd_state;
- u8 pre_cck_fa_state;
- u8 cur_cck_fa_state;
- u8 pre_ccastate;
- u8 cur_ccasate;
- u8 large_fa_hit;
- u8 forbidden_igi;
- u8 dig_state;
- u8 dig_highpwrstate;
- u8 cur_sta_cstate;
- u8 pre_sta_cstate;
- u8 cur_ap_cstate;
- u8 pre_ap_cstate;
- u8 cur_pd_thstate;
- u8 pre_pd_thstate;
- u8 cur_cs_ratiostate;
- u8 pre_cs_ratiostate;
- u8 backoff_enable_flag;
- s8 backoffval_range_max;
- s8 backoffval_range_min;
- u8 dig_min_0;
- u8 dig_min_1;
- u8 bt30_cur_igi;
- bool media_connect_0;
- bool media_connect_1;
-
- u32 antdiv_rssi_max;
- u32 rssi_max;
-};
-
-struct rtl_global_var {
- /* from this list we can get
- * other adapter's rtl_priv
- */
- struct list_head glb_priv_list;
- spinlock_t glb_list_lock;
-};
-
-#define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */
-
-struct rtl_btc_info {
- u8 bt_type;
- u8 btcoexist;
- u8 ant_num;
- u8 single_ant_path;
-
- u8 ap_num;
- bool in_4way;
- unsigned long in_4way_ts;
-};
-
-struct bt_coexist_info {
- struct rtl_btc_ops *btc_ops;
- struct rtl_btc_info btc_info;
- /* btc context */
- void *btc_context;
- void *wifi_only_context;
- /* EEPROM BT info. */
- u8 eeprom_bt_coexist;
- u8 eeprom_bt_type;
- u8 eeprom_bt_ant_num;
- u8 eeprom_bt_ant_isol;
- u8 eeprom_bt_radio_shared;
-
- u8 bt_coexistence;
- u8 bt_ant_num;
- u8 bt_coexist_type;
- u8 bt_state;
- u8 bt_cur_state; /* 0:on, 1:off */
- u8 bt_ant_isolation; /* 0:good, 1:bad */
- u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
- u8 bt_service;
- u8 bt_radio_shared_type;
- u8 bt_rfreg_origin_1e;
- u8 bt_rfreg_origin_1f;
- u8 bt_rssi_state;
- u32 ratio_tx;
- u32 ratio_pri;
- u32 bt_edca_ul;
- u32 bt_edca_dl;
-
- bool init_set;
- bool bt_busy_traffic;
- bool bt_traffic_mode_set;
- bool bt_non_traffic_mode_set;
-
- bool fw_coexist_all_off;
- bool sw_coexist_all_off;
- bool hw_coexist_all_off;
- u32 cstate;
- u32 previous_state;
- u32 cstate_h;
- u32 previous_state_h;
-
- u8 bt_pre_rssi_state;
- u8 bt_pre_rssi_state1;
-
- u8 reg_bt_iso;
- u8 reg_bt_sco;
- bool balance_on;
- u8 bt_active_zero_cnt;
- bool cur_bt_disabled;
- bool pre_bt_disabled;
-
- u8 bt_profile_case;
- u8 bt_profile_action;
- bool bt_busy;
- bool hold_for_bt_operation;
- u8 lps_counter;
-};
-
-struct rtl_btc_ops {
- void (*btc_init_variables)(struct rtl_priv *rtlpriv);
- void (*btc_init_variables_wifi_only)(struct rtl_priv *rtlpriv);
- void (*btc_deinit_variables)(struct rtl_priv *rtlpriv);
- void (*btc_init_hal_vars)(struct rtl_priv *rtlpriv);
- void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
- void (*btc_init_hw_config)(struct rtl_priv *rtlpriv);
- void (*btc_init_hw_config_wifi_only)(struct rtl_priv *rtlpriv);
- void (*btc_ips_notify)(struct rtl_priv *rtlpriv, u8 type);
- void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
- void (*btc_scan_notify)(struct rtl_priv *rtlpriv, u8 scantype);
- void (*btc_scan_notify_wifi_only)(struct rtl_priv *rtlpriv,
- u8 scantype);
- void (*btc_connect_notify)(struct rtl_priv *rtlpriv, u8 action);
- void (*btc_mediastatus_notify)(struct rtl_priv *rtlpriv,
- enum rt_media_status mstatus);
- void (*btc_periodical)(struct rtl_priv *rtlpriv);
- void (*btc_halt_notify)(struct rtl_priv *rtlpriv);
- void (*btc_btinfo_notify)(struct rtl_priv *rtlpriv,
- u8 *tmp_buf, u8 length);
- void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv,
- u8 *tmp_buf, u8 length);
- bool (*btc_is_limited_dig)(struct rtl_priv *rtlpriv);
- bool (*btc_is_disable_edca_turbo)(struct rtl_priv *rtlpriv);
- bool (*btc_is_bt_disabled)(struct rtl_priv *rtlpriv);
- void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv,
- u8 pkt_type);
- void (*btc_switch_band_notify)(struct rtl_priv *rtlpriv, u8 type,
- bool scanning);
- void (*btc_switch_band_notify_wifi_only)(struct rtl_priv *rtlpriv,
- u8 type, bool scanning);
- void (*btc_display_bt_coex_info)(struct rtl_priv *rtlpriv,
- struct seq_file *m);
- void (*btc_record_pwr_mode)(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
- u8 (*btc_get_lps_val)(struct rtl_priv *rtlpriv);
- u8 (*btc_get_rpwm_val)(struct rtl_priv *rtlpriv);
- bool (*btc_is_bt_ctrl_lps)(struct rtl_priv *rtlpriv);
- void (*btc_get_ampdu_cfg)(struct rtl_priv *rtlpriv, u8 *reject_agg,
- u8 *ctrl_agg_size, u8 *agg_size);
- bool (*btc_is_bt_lps_on)(struct rtl_priv *rtlpriv);
-};
-
-struct rtl_halmac_ops {
- int (*halmac_init_adapter)(struct rtl_priv *rtlpriv);
- int (*halmac_deinit_adapter)(struct rtl_priv *rtlpriv);
- int (*halmac_init_hal)(struct rtl_priv *rtlpriv);
- int (*halmac_deinit_hal)(struct rtl_priv *rtlpriv);
- int (*halmac_poweron)(struct rtl_priv *rtlpriv);
- int (*halmac_poweroff)(struct rtl_priv *rtlpriv);
-
- int (*halmac_phy_power_switch)(struct rtl_priv *rtlpriv, u8 enable);
- int (*halmac_set_mac_address)(struct rtl_priv *rtlpriv, u8 hwport,
- u8 *addr);
- int (*halmac_set_bssid)(struct rtl_priv *rtlpriv, u8 hwport, u8 *addr);
-
- int (*halmac_get_physical_efuse_size)(struct rtl_priv *rtlpriv,
- u32 *size);
- int (*halmac_read_physical_efuse_map)(struct rtl_priv *rtlpriv,
- u8 *map, u32 size);
- int (*halmac_get_logical_efuse_size)(struct rtl_priv *rtlpriv,
- u32 *size);
- int (*halmac_read_logical_efuse_map)(struct rtl_priv *rtlpriv, u8 *map,
- u32 size);
-
- int (*halmac_set_bandwidth)(struct rtl_priv *rtlpriv, u8 channel,
- u8 pri_ch_idx, u8 bw);
-
- int (*halmac_c2h_handle)(struct rtl_priv *rtlpriv, u8 *c2h, u32 size);
-
- int (*halmac_chk_txdesc)(struct rtl_priv *rtlpriv, u8 *txdesc,
- u32 size);
-};
-
-struct rtl_halmac_indicator {
- struct completion *comp;
- u32 wait_ms;
-
- u8 *buffer;
- u32 buf_size;
- u32 ret_size;
- u32 status;
-};
-
-struct rtl_halmac {
- struct rtl_halmac_ops *ops; /* halmac ops (halmac.ko own this object) */
- void *internal; /* internal context of halmac, i.e. PHALMAC_ADAPTER */
- struct rtl_halmac_indicator *indicator; /* size=10 */
-
- /* flags */
- /*
- * send_general_info
- * 0: no need to call halmac_send_general_info()
- * 1: need to call halmac_send_general_info()
- */
- u8 send_general_info;
-};
-
-struct rtl_phydm_params {
- u8 mp_chip; /* 1: MP chip, 0: test chip */
- u8 fab_ver; /* 0: TSMC, 1: UMC, ...*/
- u8 cut_ver; /* 0: A, 1: B, ..., 10: K */
- u8 efuse0x3d7; /* default: 0xff */
- u8 efuse0x3d8; /* default: 0xff */
-};
-
-struct rtl_phydm_ops {
- /* init/deinit priv */
- int (*phydm_init_priv)(struct rtl_priv *rtlpriv,
- struct rtl_phydm_params *params);
- int (*phydm_deinit_priv)(struct rtl_priv *rtlpriv);
- bool (*phydm_load_txpower_by_rate)(struct rtl_priv *rtlpriv);
- bool (*phydm_load_txpower_limit)(struct rtl_priv *rtlpriv);
-
- /* init hw */
- int (*phydm_init_dm)(struct rtl_priv *rtlpriv);
- int (*phydm_deinit_dm)(struct rtl_priv *rtlpriv);
- int (*phydm_reset_dm)(struct rtl_priv *rtlpriv);
- bool (*phydm_parameter_init)(struct rtl_priv *rtlpriv, bool post);
- bool (*phydm_phy_bb_config)(struct rtl_priv *rtlpriv);
- bool (*phydm_phy_rf_config)(struct rtl_priv *rtlpriv);
- bool (*phydm_phy_mac_config)(struct rtl_priv *rtlpriv);
- bool (*phydm_trx_mode)(struct rtl_priv *rtlpriv,
- enum radio_mask tx_path, enum radio_mask rx_path,
- bool is_tx2_path);
- /* watchdog */
- bool (*phydm_watchdog)(struct rtl_priv *rtlpriv);
-
- /* channel */
- bool (*phydm_switch_band)(struct rtl_priv *rtlpriv, u8 central_ch);
- bool (*phydm_switch_channel)(struct rtl_priv *rtlpriv, u8 central_ch);
- bool (*phydm_switch_bandwidth)(struct rtl_priv *rtlpriv,
- u8 primary_ch_idx,
- enum ht_channel_width width);
- bool (*phydm_iq_calibrate)(struct rtl_priv *rtlpriv);
- bool (*phydm_clear_txpowertracking_state)(struct rtl_priv *rtlpriv);
- bool (*phydm_pause_dig)(struct rtl_priv *rtlpriv, bool pause);
-
- /* read/write reg */
- u32 (*phydm_read_rf_reg)(struct rtl_priv *rtlpriv,
- enum radio_path rfpath,
- u32 addr, u32 mask);
- bool (*phydm_write_rf_reg)(struct rtl_priv *rtlpriv,
- enum radio_path rfpath,
- u32 addr, u32 mask, u32 data);
- u8 (*phydm_read_txagc)(struct rtl_priv *rtlpriv,
- enum radio_path rfpath, u8 hw_rate);
- bool (*phydm_write_txagc)(struct rtl_priv *rtlpriv, u32 power_index,
- enum radio_path rfpath, u8 hw_rate);
-
- /* RX */
- bool (*phydm_c2h_content_parsing)(struct rtl_priv *rtlpriv, u8 cmd_id,
- u8 cmd_len, u8 *content);
- bool (*phydm_query_phy_status)(struct rtl_priv *rtlpriv, u8 *phystrpt,
- struct ieee80211_hdr *hdr,
- struct rtl_stats *pstatus);
-
- /* TX */
- u8 (*phydm_rate_id_mapping)(struct rtl_priv *rtlpriv,
- enum wireless_mode wireless_mode,
- enum rf_type rf_type,
- enum ht_channel_width bw);
- bool (*phydm_get_ra_bitmap)(struct rtl_priv *rtlpriv,
- enum wireless_mode wireless_mode,
- enum rf_type rf_type,
- enum ht_channel_width bw,
- u8 tx_rate_level, /* 0~6 */
- u32 *tx_bitmap_msb,
- u32 *tx_bitmap_lsb);
-
- /* STA */
- bool (*phydm_add_sta)(struct rtl_priv *rtlpriv,
- struct ieee80211_sta *sta);
- bool (*phydm_del_sta)(struct rtl_priv *rtlpriv,
- struct ieee80211_sta *sta);
-
- /* BTC */
- u32 (*phydm_get_version)(struct rtl_priv *rtlpriv);
- bool (*phydm_modify_ra_pcr_threshold)(struct rtl_priv *rtlpriv,
- u8 ra_offset_direction,
- u8 ra_threshold_offset);
- u32 (*phydm_query_counter)(struct rtl_priv *rtlpriv,
- const char *info_type);
-
- /* debug */
- bool (*phydm_debug_cmd)(struct rtl_priv *rtlpriv, char *in, u32 in_len,
- char *out, u32 out_len);
-
-};
-
-struct rtl_phydm {
- struct rtl_phydm_ops *ops;/* phydm ops (phydm_mod.ko own this object) */
- void *internal; /* internal context of phydm, i.e. PHY_DM_STRUCT */
-
- u8 adaptivity_en;
- /* debug */
- u16 forced_data_rate;
- u8 forced_igi_lb;
- u8 antenna_test;
-};
-
-struct proxim {
- bool proxim_on;
-
- void *proximity_priv;
- int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status,
- struct sk_buff *skb);
- u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
-};
-
-struct rtl_c2hcmd {
- struct list_head list;
- u8 tag;
- u8 len;
- u8 *val;
-};
-
-struct rtl_bssid_entry {
- struct list_head list;
- u8 bssid[ETH_ALEN];
- u32 age;
-};
-
-struct rtl_scan_list {
- int num;
- struct list_head list; /* sort by age */
-};
-
-struct rtl_priv {
- struct ieee80211_hw *hw;
- struct completion firmware_loading_complete;
- struct list_head list;
- struct rtl_priv *buddy_priv;
- struct rtl_global_var *glb_var;
- struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl;
- struct rtl_dmsp_ctl dmsp_ctl;
- struct rtl_locks locks;
- struct rtl_works works;
- struct rtl_mac mac80211;
- struct rtl_hal rtlhal;
- struct rtl_regulatory regd;
- struct rtl_rfkill rfkill;
- struct rtl_io io;
- struct rtl_phy phy;
- struct rtl_dm dm;
- struct rtl_security sec;
- struct rtl_efuse efuse;
- struct rtl_led_ctl ledctl;
- struct rtl_tx_report tx_report;
- struct rtl_scan_list scan_list;
- struct rtl_ps_ctl psc;
- struct rate_adaptive ra;
- struct dynamic_primary_cca primarycca;
- struct wireless_stats stats;
- struct rt_link_detect link_info;
- struct false_alarm_statistics falsealm_cnt;
- struct rtl_rate_priv *rate_priv;
- /* sta entry list for ap adhoc or mesh */
- struct list_head entry_list;
- /* c2hcmd list for kthread level access */
- struct list_head c2hcmd_list;
- struct rtl_debug dbg;
- int max_fw_size;
-
- /*hal_cfg : for diff cards
- *intf_ops : for diff interface usb/pcie
- */
- struct rtl_hal_cfg *cfg;
- const struct rtl_intf_ops *intf_ops;
-
- /* this var will be set by set_bit,
- * and was used to indicate status of
- * interface or hardware
- */
- unsigned long status;
-
- /* tables for dm */
- struct dig_t dm_digtable;
- struct ps_t dm_pstable;
-
- u32 reg_874;
- u32 reg_c70;
- u32 reg_85c;
- u32 reg_a74;
- bool reg_init; /* true if regs saved */
- bool bt_operation_on;
- __le32 *usb_data;
- int usb_data_index;
- bool initialized;
- bool enter_ps; /* true when entering PS */
- u8 rate_mask[5];
-
- /* intel Proximity, should be alloc mem
- * in intel Proximity module and can only
- * be used in intel Proximity mode
- */
- struct proxim proximity;
-
- /*for bt coexist use*/
- struct bt_coexist_info btcoexist;
-
- /* halmac for newer IC. (e.g. 8822B) */
- struct rtl_halmac halmac;
-
- /* phydm for newer IC. (e.g. 8822B) */
- struct rtl_phydm phydm;
-
- /* separate 92ee from other ICs,
- * 92ee use new trx flow.
- */
- bool use_new_trx_flow;
-
-#ifdef CONFIG_PM
- struct wiphy_wowlan_support wowlan;
-#endif
- /* This must be the last item so
- * that it points to the data allocated
- * beyond this structure like:
- * rtl_pci_priv or rtl_usb_priv
- */
- u8 priv[0] __aligned(sizeof(void *));
-};
-
-#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
-#define rtl_mac(rtlpriv) (&((rtlpriv)->mac80211))
-#define rtl_hal(rtlpriv) (&((rtlpriv)->rtlhal))
-#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
-#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
-
-/***************************************
- * Bluetooth Co-existence Related
- ***************************************/
-
-enum bt_ant_num {
- ANT_X2 = 0,
- ANT_X1 = 1,
-};
-
-enum bt_co_type {
- BT_2WIRE = 0,
- BT_ISSC_3WIRE = 1,
- BT_ACCEL = 2,
- BT_CSR_BC4 = 3,
- BT_CSR_BC8 = 4,
- BT_RTL8756 = 5,
- BT_RTL8723A = 6,
- BT_RTL8821A = 7,
- BT_RTL8723B = 8,
- BT_RTL8192E = 9,
- BT_RTL8812A = 11,
- BT_RTL8822B = 12,
-};
-
-enum bt_total_ant_num {
- ANT_TOTAL_X2 = 0,
- ANT_TOTAL_X1 = 1
-};
-
-enum bt_cur_state {
- BT_OFF = 0,
- BT_ON = 1,
-};
-
-enum bt_service_type {
- BT_SCO = 0,
- BT_A2DP = 1,
- BT_HID = 2,
- BT_HID_IDLE = 3,
- BT_SCAN = 4,
- BT_IDLE = 5,
- BT_OTHER_ACTION = 6,
- BT_BUSY = 7,
- BT_OTHERBUSY = 8,
- BT_PAN = 9,
-};
-
-enum bt_radio_shared {
- BT_RADIO_SHARED = 0,
- BT_RADIO_INDIVIDUAL = 1,
-};
-
-/****************************************
- * mem access macro define start
- * Call endian free function when
- * 1. Read/write packet content.
- * 2. Before write integer to IO.
- * 3. After read integer from IO.
- ***************************************/
-/* Convert little data endian to host ordering */
-#define EF1BYTE(_val) \
- ((u8)(_val))
-#define EF2BYTE(_val) \
- (le16_to_cpu(_val))
-#define EF4BYTE(_val) \
- (le32_to_cpu(_val))
-
-/* Read data from memory */
-#define READEF1BYTE(_ptr) \
- EF1BYTE(*((u8 *)(_ptr)))
-/* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr) \
- EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr) \
- EF4BYTE(*(_ptr))
-
-/* Create a bit mask
- * Examples:
- * BIT_LEN_MASK_32(0) => 0x00000000
- * BIT_LEN_MASK_32(1) => 0x00000001
- * BIT_LEN_MASK_32(2) => 0x00000003
- * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
- */
-#define BIT_LEN_MASK_32(__bitlen) \
- (0xFFFFFFFF >> (32 - (__bitlen)))
-#define BIT_LEN_MASK_16(__bitlen) \
- (0xFFFF >> (16 - (__bitlen)))
-#define BIT_LEN_MASK_8(__bitlen) \
- (0xFF >> (8 - (__bitlen)))
-
-/* Create an offset bit mask
- * Examples:
- * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
- * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
- */
-#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
-
-/*Description:
- * Return 4-byte value in host byte ordering from
- * 4-byte pointer in little-endian system.
- */
-#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
- (EF4BYTE(*((__le32 *)(__pstart))))
-#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
- (EF2BYTE(*((__le16 *)(__pstart))))
-#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
- (EF1BYTE(*((u8 *)(__pstart))))
-
-/* Description:
- * Translate subfield (continuous bits in little-endian) of 4-byte
- * value to host byte ordering.
- */
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_32(__bitlen) \
- )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_16(__bitlen) \
- )
-#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_8(__bitlen) \
- )
-
-/* Description:
- * Mask subfield (continuous bits in little-endian) of 4-byte value
- * and return the result in 4-byte value in host byte ordering.
- */
-#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
- )
-#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
- )
-#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
- )
-
-/* Description:
- * Set subfield of little-endian 4-byte value to specified value.
- */
-#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
- (*((__le32 *)(__pstart)) = \
- cpu_to_le32( \
- LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
- ))
-#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
- (*((__le16 *)(__pstart)) = \
- cpu_to_le16( \
- LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
- ))
-#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
- (*((u8 *)(__pstart)) = EF1BYTE \
- ( \
- LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
- ))
-
-#define N_BYTE_ALIGNMENT(__value, __alignment) ((__alignment == 1) ? \
- (__value) : (((__value + __alignment - 1) / \
- __alignment) * __alignment))
-
-/****************************************
- * mem access macro define end
- ****************************************/
-
-#define byte(x, n) ((x >> (8 * n)) & 0xff)
-
-#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
-#define RTL_WATCH_DOG_TIME 2000
-#define MSECS(t) msecs_to_jiffies(t)
-#define WLAN_FC_GET_VERS(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_VERS)
-#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
-#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
-#define rtl_dm(rtlpriv) (&((rtlpriv)->dm))
-
-#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */
-#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */
-#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /*PCI D3 mode */
-/*NIC halt, re-initialize hw parameters*/
-#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
-#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /*FW free, re-download the FW */
-#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */
-/*Always enable ASPM and Clock Req in initialization.*/
-#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
-/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
-#define RT_PS_LEVEL_ASPM BIT(7)
-/*When LPS is on, disable 2R if no packet is received or transmitted.*/
-#define RT_RF_LPS_DISALBE_2R BIT(30)
-#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */
-#define RT_IN_PS_LEVEL(ppsc, _ps_flg) \
- ((ppsc->cur_ps_level & _ps_flg) ? true : false)
-#define RT_CLEAR_PS_LEVEL(ppsc, _ps_flg) \
- (ppsc->cur_ps_level &= (~(_ps_flg)))
-#define RT_SET_PS_LEVEL(ppsc, _ps_flg) \
- (ppsc->cur_ps_level |= _ps_flg)
-
-#define container_of_dwork_rtl(x, y, z) \
- container_of(to_delayed_work(x), y, z)
-
-#define FILL_OCTET_STRING(_os, _octet, _len) \
- (_os).octet = (u8 *)(_octet); \
- (_os).length = (_len)
-
-#define CP_MACADDR(des, src) \
- ((des)[0] = (src)[0], (des)[1] = (src)[1],\
- (des)[2] = (src)[2], (des)[3] = (src)[3],\
- (des)[4] = (src)[4], (des)[5] = (src)[5])
-
-#define LDPC_HT_ENABLE_RX BIT(0)
-#define LDPC_HT_ENABLE_TX BIT(1)
-#define LDPC_HT_TEST_TX_ENABLE BIT(2)
-#define LDPC_HT_CAP_TX BIT(3)
-
-#define STBC_HT_ENABLE_RX BIT(0)
-#define STBC_HT_ENABLE_TX BIT(1)
-#define STBC_HT_TEST_TX_ENABLE BIT(2)
-#define STBC_HT_CAP_TX BIT(3)
-
-#define LDPC_VHT_ENABLE_RX BIT(0)
-#define LDPC_VHT_ENABLE_TX BIT(1)
-#define LDPC_VHT_TEST_TX_ENABLE BIT(2)
-#define LDPC_VHT_CAP_TX BIT(3)
-
-#define STBC_VHT_ENABLE_RX BIT(0)
-#define STBC_VHT_ENABLE_TX BIT(1)
-#define STBC_VHT_TEST_TX_ENABLE BIT(2)
-#define STBC_VHT_CAP_TX BIT(3)
-
-extern u8 channel5g[CHANNEL_MAX_NUMBER_5G];
-
-extern u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M];
-
-static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
-{
- return rtlpriv->io.read8_sync(rtlpriv, addr);
-}
-
-static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
-{
- return rtlpriv->io.read16_sync(rtlpriv, addr);
-}
-
-static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
-{
- return rtlpriv->io.read32_sync(rtlpriv, addr);
-}
-
-static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
-{
- rtlpriv->io.write8_async(rtlpriv, addr, val8);
-
- if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read8_sync(rtlpriv, addr);
-}
-
-static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
- u32 addr, u32 val8)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_byte(rtlpriv, addr, (u8)val8);
-}
-
-static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
-{
- rtlpriv->io.write16_async(rtlpriv, addr, val16);
-
- if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read16_sync(rtlpriv, addr);
-}
-
-static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
- u32 addr, u32 val32)
-{
- rtlpriv->io.write32_async(rtlpriv, addr, val32);
-
- if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read32_sync(rtlpriv, addr);
-}
-
-static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask)
-{
- struct rtl_priv *rtlpriv = hw->priv;
-
- return rtlpriv->cfg->ops->get_bbreg(hw, regaddr, bitmask);
-}
-
-static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
- u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = hw->priv;
-
- rtlpriv->cfg->ops->set_bbreg(hw, regaddr, bitmask, data);
-}
-
-static inline void rtl_set_bbreg_with_dwmask(struct ieee80211_hw *hw,
- u32 regaddr, u32 data)
-{
- rtl_set_bbreg(hw, regaddr, 0xffffffff, data);
-}
-
-static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask)
-{
- struct rtl_priv *rtlpriv = hw->priv;
-
- return rtlpriv->cfg->ops->get_rfreg(hw, rfpath, regaddr, bitmask);
-}
-
-static inline void rtl_set_rfreg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = hw->priv;
-
- rtlpriv->cfg->ops->set_rfreg(hw, rfpath, regaddr, bitmask, data);
-}
-
-static inline bool is_hal_stop(struct rtl_hal *rtlhal)
-{
- return (rtlhal->state == _HAL_STATE_STOP);
-}
-
-static inline void set_hal_start(struct rtl_hal *rtlhal)
-{
- rtlhal->state = _HAL_STATE_START;
-}
-
-static inline void set_hal_stop(struct rtl_hal *rtlhal)
-{
- rtlhal->state = _HAL_STATE_STOP;
-}
-
-static inline u8 get_rf_type(struct rtl_phy *rtlphy)
-{
- return rtlphy->rf_type;
-}
-
-static inline struct ieee80211_hdr *rtl_get_hdr(struct sk_buff *skb)
-{
- return (struct ieee80211_hdr *)(skb->data);
-}
-
-static inline __le16 rtl_get_fc(struct sk_buff *skb)
-{
- return rtl_get_hdr(skb)->frame_control;
-}
-
-static inline u16 rtl_get_tid_h(struct ieee80211_hdr *hdr)
-{
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
-}
-
-static inline u16 rtl_get_tid(struct sk_buff *skb)
-{
- return rtl_get_tid_h(rtl_get_hdr(skb));
-}
-
-static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid)
-{
- return ieee80211_find_sta(vif, bssid);
-}
-
-static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
- u8 *mac_addr)
-{
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- return ieee80211_find_sta(mac->vif, mac_addr);
-}
-
-#endif
diff --git a/drivers/staging/rts5208/Kconfig b/drivers/staging/rts5208/Kconfig
index 05c990f654a4..b864023d3ccb 100644
--- a/drivers/staging/rts5208/Kconfig
+++ b/drivers/staging/rts5208/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config RTS5208
tristate "Realtek PCI-E Card Reader RTS5208/5288 support"
depends on PCI && SCSI
diff --git a/drivers/staging/rts5208/Makefile b/drivers/staging/rts5208/Makefile
index 17b4471c4d6d..6a934c41c738 100644
--- a/drivers/staging/rts5208/Makefile
+++ b/drivers/staging/rts5208/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RTS5208) := rts5208.o
ccflags-y := -Idrivers/scsi
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index 7325362f835f..bac65784d4a1 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -153,7 +153,7 @@
#define DAT_PRTCT 0x07 /* read/write is desable */
#define BLNC_CHK 0x08 /* find blank/DOF in read */
/* write to unblank area */
-#define CPY_ABRT 0x0a /* Copy/Compare/Copy&Verify illgal */
+#define CPY_ABRT 0x0a /* Copy/Compare/Copy&Verify illegal */
#define ABRT_CMD 0x0b /* Target make the command in error */
#define EQUAL 0x0c /* Search Data end with Equal */
#define VLM_OVRFLW 0x0d /* Some data are left in buffer */
diff --git a/drivers/staging/sm750fb/Kconfig b/drivers/staging/sm750fb/Kconfig
index ccebc25c2ec1..fb5a086bf9b1 100644
--- a/drivers/staging/sm750fb/Kconfig
+++ b/drivers/staging/sm750fb/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config FB_SM750
tristate "Silicon Motion SM750 framebuffer support"
depends on FB && PCI
diff --git a/drivers/staging/sm750fb/Makefile b/drivers/staging/sm750fb/Makefile
index 4d781f78b95c..1cf3849cef23 100644
--- a/drivers/staging/sm750fb/Makefile
+++ b/drivers/staging/sm750fb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FB_SM750) += sm750fb.o
sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o ddk750_chip.o ddk750_power.o ddk750_mode.o
diff --git a/drivers/staging/sm750fb/ddk750.h b/drivers/staging/sm750fb/ddk750.h
index 734010324a8f..482c1c6ba422 100644
--- a/drivers/staging/sm750fb/ddk750.h
+++ b/drivers/staging/sm750fb/ddk750.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
*
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 4c1f00f551da..5a317cc98a4b 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -15,14 +15,14 @@ enum logical_chip_type sm750_get_chip_type(void)
return chip;
}
-void sm750_set_chip_type(unsigned short devId, u8 revId)
+void sm750_set_chip_type(unsigned short dev_id, u8 rev_id)
{
- if (devId == 0x718) {
+ if (dev_id == 0x718) {
chip = SM718;
- } else if (devId == 0x750) {
+ } else if (dev_id == 0x750) {
chip = SM750;
/* SM750 and SM750LE are different in their revision ID only. */
- if (revId == SM750LE_REVISION_ID) {
+ if (rev_id == SM750LE_REVISION_ID) {
chip = SM750LE;
pr_info("found sm750le\n");
}
@@ -45,7 +45,7 @@ static unsigned int get_mxclk_freq(void)
OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT;
POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT;
- return DEFAULT_INPUT_CLOCK * M / N / (1 << OD) / (1 << POD);
+ return DEFAULT_INPUT_CLOCK * M / N / BIT(OD) / BIT(POD);
}
/*
@@ -56,7 +56,7 @@ static unsigned int get_mxclk_freq(void)
static void set_chip_clock(unsigned int frequency)
{
struct pll_value pll;
- unsigned int ulActualMxClk;
+ unsigned int actual_mx_clk;
/* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */
if (sm750_get_chip_type() == SM750LE)
@@ -76,7 +76,7 @@ static void set_chip_clock(unsigned int frequency)
* Return value of sm750_calc_pll_value gives the actual
* possible clock.
*/
- ulActualMxClk = sm750_calc_pll_value(frequency, &pll);
+ actual_mx_clk = sm750_calc_pll_value(frequency, &pll);
/* Master Clock Control: MXCLK_PLL */
poke32(MXCLK_PLL_CTRL, sm750_format_pll_reg(&pll));
@@ -321,7 +321,7 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
int mini_diff;
unsigned int RN, quo, rem, fl_quo;
unsigned int input, request;
- unsigned int tmpClock, ret;
+ unsigned int tmp_clock, ret;
const int max_OD = 3;
int max_d = 6;
@@ -365,8 +365,8 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
if (M < 256 && M > 0) {
unsigned int diff;
- tmpClock = pll->inputFreq * M / N / X;
- diff = abs(tmpClock - request_orig);
+ tmp_clock = pll->inputFreq * M / N / X;
+ diff = abs(tmp_clock - request_orig);
if (diff < mini_diff) {
pll->M = M;
pll->N = N;
@@ -375,7 +375,7 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
pll->POD = d - max_OD;
pll->OD = d - pll->POD;
mini_diff = diff;
- ret = tmpClock;
+ ret = tmp_clock;
}
}
}
@@ -391,7 +391,6 @@ unsigned int sm750_format_pll_reg(struct pll_value *pPLL)
unsigned int OD = pPLL->OD;
unsigned int M = pPLL->M;
unsigned int N = pPLL->N;
- unsigned int reg = 0;
/*
* Note that all PLL's have the same format. Here, we just use
@@ -399,13 +398,11 @@ unsigned int sm750_format_pll_reg(struct pll_value *pPLL)
* register. On returning a 32 bit number, the value can be
* applied to any PLL in the calling function.
*/
- reg = PLL_CTRL_POWER |
+ return PLL_CTRL_POWER |
#ifndef VALIDATION_CHIP
((POD << PLL_CTRL_POD_SHIFT) & PLL_CTRL_POD_MASK) |
#endif
((OD << PLL_CTRL_OD_SHIFT) & PLL_CTRL_OD_MASK) |
((N << PLL_CTRL_N_SHIFT) & PLL_CTRL_N_MASK) |
((M << PLL_CTRL_M_SHIFT) & PLL_CTRL_M_MASK);
-
- return reg;
}
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index c72aac21b675..3e92b3297160 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -93,7 +93,7 @@ struct initchip_param {
};
enum logical_chip_type sm750_get_chip_type(void);
-void sm750_set_chip_type(unsigned short devId, u8 revId);
+void sm750_set_chip_type(unsigned short dev_id, u8 rev_id);
unsigned int sm750_calc_pll_value(unsigned int request, struct pll_value *pll);
unsigned int sm750_format_pll_reg(struct pll_value *pPLL);
unsigned int ddk750_get_vm_size(void);
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index f38051eedb6c..887ea8aef43f 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -85,7 +85,7 @@ static void primary_wait_vertical_sync(int delay)
}
}
-static void swPanelPowerSequence(int disp, int delay)
+static void sw_panel_power_sequence(int disp, int delay)
{
unsigned int reg;
@@ -111,7 +111,7 @@ static void swPanelPowerSequence(int disp, int delay)
primary_wait_vertical_sync(delay);
}
-void ddk750_setLogicalDispOut(enum disp_output output)
+void ddk750_set_logical_disp_out(enum disp_output output)
{
unsigned int reg;
@@ -147,12 +147,12 @@ void ddk750_setLogicalDispOut(enum disp_output output)
if (output & PNL_SEQ_USAGE) {
/* set panel sequence */
- swPanelPowerSequence((output & PNL_SEQ_MASK) >> PNL_SEQ_OFFSET,
- 4);
+ sw_panel_power_sequence((output & PNL_SEQ_MASK) >> PNL_SEQ_OFFSET,
+ 4);
}
if (output & DAC_USAGE)
- setDAC((output & DAC_MASK) >> DAC_OFFSET);
+ set_DAC((output & DAC_MASK) >> DAC_OFFSET);
if (output & DPMS_USAGE)
ddk750_set_dpms((output & DPMS_MASK) >> DPMS_OFFSET);
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index 7fd101d98199..7f713906dc16 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -102,6 +102,6 @@ enum disp_output {
do_CRT_SEC = CRT_2_SEC | SEC_TP_ON | DPMS_ON | DAC_ON,
};
-void ddk750_setLogicalDispOut(enum disp_output output);
+void ddk750_set_logical_disp_out(enum disp_output output);
#endif
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index b20d16198c17..cd564ea40779 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -30,31 +30,31 @@ static struct dvi_ctrl_device g_dcftSupportedDviController[] = {
#endif
};
-int dviInit(unsigned char edgeSelect,
- unsigned char busSelect,
- unsigned char dualEdgeClkSelect,
- unsigned char hsyncEnable,
- unsigned char vsyncEnable,
- unsigned char deskewEnable,
- unsigned char deskewSetting,
- unsigned char continuousSyncEnable,
- unsigned char pllFilterEnable,
- unsigned char pllFilterValue)
+int dviInit(unsigned char edge_select,
+ unsigned char bus_select,
+ unsigned char dual_edge_clk_select,
+ unsigned char hsync_enable,
+ unsigned char vsync_enable,
+ unsigned char deskew_enable,
+ unsigned char deskew_setting,
+ unsigned char continuous_sync_enable,
+ unsigned char pll_filter_enable,
+ unsigned char pll_filter_value)
{
struct dvi_ctrl_device *pCurrentDviCtrl;
pCurrentDviCtrl = g_dcftSupportedDviController;
if (pCurrentDviCtrl->pfnInit) {
- return pCurrentDviCtrl->pfnInit(edgeSelect,
- busSelect,
- dualEdgeClkSelect,
- hsyncEnable,
- vsyncEnable,
- deskewEnable,
- deskewSetting,
- continuousSyncEnable,
- pllFilterEnable,
- pllFilterValue);
+ return pCurrentDviCtrl->pfnInit(edge_select,
+ bus_select,
+ dual_edge_clk_select,
+ hsync_enable,
+ vsync_enable,
+ deskew_enable,
+ deskew_setting,
+ continuous_sync_enable,
+ pll_filter_enable,
+ pll_filter_value);
}
return -1; /* error */
}
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index e48c74ecbbd1..7002567a47d2 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -9,7 +9,7 @@ enum dpms {
crtDPMS_OFF = 0x3,
};
-#define setDAC(off) { \
+#define set_DAC(off) { \
poke32(MISC_CTRL, \
(peek32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
}
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 8391f57d5383..c8e856c13912 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -69,58 +69,58 @@ unsigned short sii164GetDeviceID(void)
* This function initialize and detect the DVI controller chip.
*
* Input:
- * edgeSelect - Edge Select:
- * 0 = Input data is falling edge latched (falling edge
- * latched first in dual edge mode)
- * 1 = Input data is rising edge latched (rising edge
- * latched first in dual edge mode)
- * busSelect - Input Bus Select:
- * 0 = Input data bus is 12-bits wide
- * 1 = Input data bus is 24-bits wide
- * dualEdgeClkSelect - Dual Edge Clock Select
- * 0 = Input data is single edge latched
- * 1 = Input data is dual edge latched
- * hsyncEnable - Horizontal Sync Enable:
- * 0 = HSYNC input is transmitted as fixed LOW
- * 1 = HSYNC input is transmitted as is
- * vsyncEnable - Vertical Sync Enable:
- * 0 = VSYNC input is transmitted as fixed LOW
- * 1 = VSYNC input is transmitted as is
- * deskewEnable - De-skewing Enable:
- * 0 = De-skew disabled
- * 1 = De-skew enabled
- * deskewSetting - De-skewing Setting (increment of 260psec)
- * 0 = 1 step --> minimum setup / maximum hold
- * 1 = 2 step
- * 2 = 3 step
- * 3 = 4 step
- * 4 = 5 step
- * 5 = 6 step
- * 6 = 7 step
- * 7 = 8 step --> maximum setup / minimum hold
- * continuousSyncEnable- SYNC Continuous:
- * 0 = Disable
- * 1 = Enable
- * pllFilterEnable - PLL Filter Enable
- * 0 = Disable PLL Filter
- * 1 = Enable PLL Filter
- * pllFilterValue - PLL Filter characteristics:
- * 0~7 (recommended value is 4)
+ * edge_select - Edge Select:
+ * 0 = Input data is falling edge latched (falling
+ * edge latched first in dual edge mode)
+ * 1 = Input data is rising edge latched (rising
+ * edge latched first in dual edge mode)
+ * bus_select - Input Bus Select:
+ * 0 = Input data bus is 12-bits wide
+ * 1 = Input data bus is 24-bits wide
+ * dual_edge_clk_select - Dual Edge Clock Select
+ * 0 = Input data is single edge latched
+ * 1 = Input data is dual edge latched
+ * hsync_enable - Horizontal Sync Enable:
+ * 0 = HSYNC input is transmitted as fixed LOW
+ * 1 = HSYNC input is transmitted as is
+ * vsync_enable - Vertical Sync Enable:
+ * 0 = VSYNC input is transmitted as fixed LOW
+ * 1 = VSYNC input is transmitted as is
+ * deskew_enable - De-skewing Enable:
+ * 0 = De-skew disabled
+ * 1 = De-skew enabled
+ * deskew_setting - De-skewing Setting (increment of 260psec)
+ * 0 = 1 step --> minimum setup / maximum hold
+ * 1 = 2 step
+ * 2 = 3 step
+ * 3 = 4 step
+ * 4 = 5 step
+ * 5 = 6 step
+ * 6 = 7 step
+ * 7 = 8 step --> maximum setup / minimum hold
+ * continuous_sync_enable- SYNC Continuous:
+ * 0 = Disable
+ * 1 = Enable
+ * pll_filter_enable - PLL Filter Enable
+ * 0 = Disable PLL Filter
+ * 1 = Enable PLL Filter
+ * pll_filter_value - PLL Filter characteristics:
+ * 0~7 (recommended value is 4)
*
* Output:
* 0 - Success
* -1 - Fail.
*/
-long sii164InitChip(unsigned char edgeSelect,
- unsigned char busSelect,
- unsigned char dualEdgeClkSelect,
- unsigned char hsyncEnable,
- unsigned char vsyncEnable,
- unsigned char deskewEnable,
- unsigned char deskewSetting,
- unsigned char continuousSyncEnable,
- unsigned char pllFilterEnable,
- unsigned char pllFilterValue)
+long sii164InitChip(unsigned char edge_select,
+ unsigned char bus_select,
+ unsigned char dual_edge_clk_select,
+ unsigned char hsync_enable,
+ unsigned char vsync_enable,
+ unsigned char deskew_enable,
+ unsigned char deskew_setting,
+ unsigned char continuous_sync_enable,
+ unsigned char pll_filter_enable,
+ unsigned char pll_filter_value)
{
unsigned char config;
@@ -139,31 +139,31 @@ long sii164InitChip(unsigned char edgeSelect,
*/
/* Select the edge */
- if (edgeSelect == 0)
+ if (edge_select == 0)
config = SII164_CONFIGURATION_LATCH_FALLING;
else
config = SII164_CONFIGURATION_LATCH_RISING;
/* Select bus wide */
- if (busSelect == 0)
+ if (bus_select == 0)
config |= SII164_CONFIGURATION_BUS_12BITS;
else
config |= SII164_CONFIGURATION_BUS_24BITS;
/* Select Dual/Single Edge Clock */
- if (dualEdgeClkSelect == 0)
+ if (dual_edge_clk_select == 0)
config |= SII164_CONFIGURATION_CLOCK_SINGLE;
else
config |= SII164_CONFIGURATION_CLOCK_DUAL;
/* Select HSync Enable */
- if (hsyncEnable == 0)
+ if (hsync_enable == 0)
config |= SII164_CONFIGURATION_HSYNC_FORCE_LOW;
else
config |= SII164_CONFIGURATION_HSYNC_AS_IS;
/* Select VSync Enable */
- if (vsyncEnable == 0)
+ if (vsync_enable == 0)
config |= SII164_CONFIGURATION_VSYNC_FORCE_LOW;
else
config |= SII164_CONFIGURATION_VSYNC_AS_IS;
@@ -175,12 +175,12 @@ long sii164InitChip(unsigned char edgeSelect,
* This fixes some artifacts problem in some mode on board 2.2.
* Somehow this fix does not affect board 2.1.
*/
- if (deskewEnable == 0)
+ if (deskew_enable == 0)
config = SII164_DESKEW_DISABLE;
else
config = SII164_DESKEW_ENABLE;
- switch (deskewSetting) {
+ switch (deskew_setting) {
case 0:
config |= SII164_DESKEW_1_STEP;
break;
@@ -209,19 +209,19 @@ long sii164InitChip(unsigned char edgeSelect,
i2cWriteReg(SII164_I2C_ADDRESS, SII164_DESKEW, config);
/* Enable/Disable Continuous Sync. */
- if (continuousSyncEnable == 0)
+ if (continuous_sync_enable == 0)
config = SII164_PLL_FILTER_SYNC_CONTINUOUS_DISABLE;
else
config = SII164_PLL_FILTER_SYNC_CONTINUOUS_ENABLE;
/* Enable/Disable PLL Filter */
- if (pllFilterEnable == 0)
+ if (pll_filter_enable == 0)
config |= SII164_PLL_FILTER_DISABLE;
else
config |= SII164_PLL_FILTER_ENABLE;
/* Set the PLL Filter value */
- config |= ((pllFilterValue & 0x07) << 1);
+ config |= ((pll_filter_value & 0x07) << 1);
i2cWriteReg(SII164_I2C_ADDRESS, SII164_PLL, config);
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.c b/drivers/staging/sm750fb/ddk750_swi2c.c
index 19c5ffc72b16..5c0ac747ea2b 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.c
+++ b/drivers/staging/sm750fb/ddk750_swi2c.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
*
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.h b/drivers/staging/sm750fb/ddk750_swi2c.h
index 3b8a96d6d25a..5868feea791b 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.h
+++ b/drivers/staging/sm750fb/ddk750_swi2c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
*
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index e9f10c2669ea..59568d18ce23 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -695,7 +696,7 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
output->paths = sm750_crt;
crtc->channel = sm750_secondary;
/* not consider of padding stuffs for oScreen,need fix */
- crtc->oScreen = (sm750_dev->vidmem_size >> 1);
+ crtc->oScreen = sm750_dev->vidmem_size >> 1;
crtc->vScreen = sm750_dev->pvMem + crtc->oScreen;
}
break;
@@ -709,7 +710,7 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
output->paths = sm750_crt;
crtc->channel = sm750_primary;
/* not consider of padding stuffs for oScreen,need fix */
- crtc->oScreen = (sm750_dev->vidmem_size >> 1);
+ crtc->oScreen = sm750_dev->vidmem_size >> 1;
crtc->vScreen = sm750_dev->pvMem + crtc->oScreen;
}
break;
@@ -747,7 +748,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
lynx750_ext, NULL, vesa_modes,
};
int cdb[] = {ARRAY_SIZE(lynx750_ext), 0, VESA_MODEDB_SIZE};
- static const char *mdb_desc[] = {
+ static const char * const mdb_desc[] = {
"driver prepared modes",
"kernel prepared default modedb",
"kernel HELPERS prepared vesa_modes",
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index eed840b251da..dbcbbd1055da 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -155,26 +155,26 @@ unsigned int rop2) /* ROP value */
if (sBase == dBase && sPitch == dPitch) {
/* Determine direction of operation */
if (sy < dy) {
- /* +----------+
- * |S |
- * | +----------+
- * | | | |
- * | | | |
- * +---|------+ |
- * | D|
- * +----------+
+ /* +----------+
+ * |S |
+ * | +----------+
+ * | | | |
+ * | | | |
+ * +---|------+ |
+ * | D|
+ * +----------+
*/
nDirection = BOTTOM_TO_TOP;
} else if (sy > dy) {
- /* +----------+
- * |D |
- * | +----------+
- * | | | |
- * | | | |
- * +---|------+ |
- * | S|
- * +----------+
+ /* +----------+
+ * |D |
+ * | +----------+
+ * | | | |
+ * | | | |
+ * +---|------+ |
+ * | S|
+ * +----------+
*/
nDirection = TOP_TO_BOTTOM;
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index edeae9d06883..ea1d3d4efbc2 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -207,7 +207,7 @@ int hw_sm750_output_setMode(struct lynxfb_output *output,
if (output->paths & sm750_crt)
disp_set |= do_CRT_SEC;
}
- ddk750_setLogicalDispOut(disp_set);
+ ddk750_set_logical_disp_out(disp_set);
} else {
/* just open DISPLAY_CONTROL_750LE register bit 3:0 */
u32 reg;
diff --git a/drivers/staging/speakup/Kconfig b/drivers/staging/speakup/Kconfig
index d8ec780f7741..0803c2013cf4 100644
--- a/drivers/staging/speakup/Kconfig
+++ b/drivers/staging/speakup/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "Speakup console speech"
config SPEAKUP
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 11c704b27c3c..41ae24ab5d08 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -99,7 +99,7 @@ static void report_char_chartab_status(int reset, int received, int used,
snprintf(buf + (len - 1), sizeof(buf) - (len - 1),
" with %d reject%s\n",
rejected, rejected > 1 ? "s" : "");
- printk(buf);
+ pr_info("%s", buf);
}
}
@@ -154,7 +154,10 @@ static ssize_t chars_chartab_store(struct kobject *kobj,
continue;
}
- /* Do not replace with kstrtoul: here we need temp to be updated */
+ /*
+ * Do not replace with kstrtoul:
+ * here we need temp to be updated
+ */
index = simple_strtoul(cp, &temp, 10);
if (index > 255) {
rejected++;
@@ -741,7 +744,7 @@ static void report_msg_status(int reset, int received, int used,
snprintf(buf + (len - 1), sizeof(buf) - (len - 1),
" with %d reject%s\n",
rejected, rejected > 1 ? "s" : "");
- printk(buf);
+ pr_info("%s", buf);
}
}
@@ -788,7 +791,10 @@ static ssize_t message_store_helper(const char *buf, size_t count,
continue;
}
- /* Do not replace with kstrtoul: here we need temp to be updated */
+ /*
+ * Do not replace with kstrtoul:
+ * here we need temp to be updated
+ */
index = simple_strtoul(cp, &temp, 10);
while ((temp < linefeed) && (*temp == ' ' || *temp == '\t'))
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index b6a65b0c1896..488f2539aa9a 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2319,6 +2319,7 @@ static void __exit speakup_exit(void)
unregister_keyboard_notifier(&keyboard_notifier_block);
unregister_vt_notifier(&vt_notifier_block);
speakup_unregister_devsynth();
+ speakup_cancel_selection();
speakup_cancel_paste();
del_timer_sync(&cursor_timer);
kthread_stop(speakup_task);
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 0ed1fefee0e9..a8b4d0c5ab7e 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -9,178 +9,138 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/atomic.h>
+#include <linux/console.h>
#include "speakup.h"
-/* ------ cut and paste ----- */
-/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
-#define ishardspace(c) ((c) == ' ')
-
unsigned short spk_xs, spk_ys, spk_xe, spk_ye; /* our region points */
-
-/* Variables for selection control. */
-/* must not be deallocated */
struct vc_data *spk_sel_cons;
-/* cleared by clear_selection */
-static int sel_start = -1;
-static int sel_end;
-static int sel_buffer_lth;
-static char *sel_buffer;
-static unsigned char sel_pos(int n)
-{
- return inverse_translate(spk_sel_cons,
- screen_glyph(spk_sel_cons, n), 0);
-}
+struct speakup_selection_work {
+ struct work_struct work;
+ struct tiocl_selection sel;
+ struct tty_struct *tty;
+};
void speakup_clear_selection(void)
{
- sel_start = -1;
+ console_lock();
+ clear_selection();
+ console_unlock();
}
-/* does screen address p correspond to character at LH/RH edge of screen? */
-static int atedge(const int p, int size_row)
+static void __speakup_set_selection(struct work_struct *work)
{
- return !(p % size_row) || !((p + 2) % size_row);
-}
+ struct speakup_selection_work *ssw =
+ container_of(work, struct speakup_selection_work, work);
-/* constrain v such that v <= u */
-static unsigned short limit(const unsigned short v, const unsigned short u)
-{
- return (v > u) ? u : v;
-}
+ struct tty_struct *tty;
+ struct tiocl_selection sel;
-int speakup_set_selection(struct tty_struct *tty)
-{
- int new_sel_start, new_sel_end;
- char *bp, *obp;
- int i, ps, pe;
- struct vc_data *vc = vc_cons[fg_console].d;
+ sel = ssw->sel;
- spk_xs = limit(spk_xs, vc->vc_cols - 1);
- spk_ys = limit(spk_ys, vc->vc_rows - 1);
- spk_xe = limit(spk_xe, vc->vc_cols - 1);
- spk_ye = limit(spk_ye, vc->vc_rows - 1);
- ps = spk_ys * vc->vc_size_row + (spk_xs << 1);
- pe = spk_ye * vc->vc_size_row + (spk_xe << 1);
+ /* this ensures we copy sel before releasing the lock below */
+ rmb();
- if (ps > pe) /* make sel_start <= sel_end */
- swap(ps, pe);
+ /* release the lock by setting tty of the struct to NULL */
+ tty = xchg(&ssw->tty, NULL);
if (spk_sel_cons != vc_cons[fg_console].d) {
- speakup_clear_selection();
spk_sel_cons = vc_cons[fg_console].d;
- dev_warn(tty->dev,
- "Selection: mark console not the same as cut\n");
- return -EINVAL;
+ pr_warn("Selection: mark console not the same as cut\n");
+ goto unref;
}
- new_sel_start = ps;
- new_sel_end = pe;
-
- /* select to end of line if on trailing space */
- if (new_sel_end > new_sel_start &&
- !atedge(new_sel_end, vc->vc_size_row) &&
- ishardspace(sel_pos(new_sel_end))) {
- for (pe = new_sel_end + 2; ; pe += 2)
- if (!ishardspace(sel_pos(pe)) ||
- atedge(pe, vc->vc_size_row))
- break;
- if (ishardspace(sel_pos(pe)))
- new_sel_end = pe;
- }
- if ((new_sel_start == sel_start) && (new_sel_end == sel_end))
- return 0; /* no action required */
-
- sel_start = new_sel_start;
- sel_end = new_sel_end;
- /* Allocate a new buffer before freeing the old one ... */
- bp = kmalloc((sel_end - sel_start) / 2 + 1, GFP_ATOMIC);
- if (!bp) {
- speakup_clear_selection();
- return -ENOMEM;
- }
- kfree(sel_buffer);
- sel_buffer = bp;
-
- obp = bp;
- for (i = sel_start; i <= sel_end; i += 2) {
- *bp = sel_pos(i);
- if (!ishardspace(*bp++))
- obp = bp;
- if (!((i + 2) % vc->vc_size_row)) {
- /* strip trailing blanks from line and add newline,
- * unless non-space at end of line.
- */
- if (obp != bp) {
- bp = obp;
- *bp++ = '\r';
- }
- obp = bp;
- }
+ console_lock();
+ set_selection_kernel(&sel, tty);
+ console_unlock();
+
+unref:
+ tty_kref_put(tty);
+}
+
+static struct speakup_selection_work speakup_sel_work = {
+ .work = __WORK_INITIALIZER(speakup_sel_work.work,
+ __speakup_set_selection)
+};
+
+int speakup_set_selection(struct tty_struct *tty)
+{
+ /* we get kref here first in order to avoid a subtle race when
+ * cancelling selection work. getting kref first establishes the
+ * invariant that if speakup_sel_work.tty is not NULL when
+ * speakup_cancel_selection() is called, it must be the case that a put
+ * kref is pending.
+ */
+ tty_kref_get(tty);
+ if (cmpxchg(&speakup_sel_work.tty, NULL, tty)) {
+ tty_kref_put(tty);
+ return -EBUSY;
}
- sel_buffer_lth = bp - sel_buffer;
+ /* now we have the 'lock' by setting tty member of
+ * speakup_selection_work. wmb() ensures that writes to
+ * speakup_sel_work don't happen before cmpxchg() above.
+ */
+ wmb();
+
+ speakup_sel_work.sel.xs = spk_xs + 1;
+ speakup_sel_work.sel.ys = spk_ys + 1;
+ speakup_sel_work.sel.xe = spk_xe + 1;
+ speakup_sel_work.sel.ye = spk_ye + 1;
+ speakup_sel_work.sel.sel_mode = TIOCL_SELCHAR;
+
+ schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work);
+
return 0;
}
-struct speakup_paste_work {
- struct work_struct work;
+void speakup_cancel_selection(void)
+{
struct tty_struct *tty;
-};
+
+ cancel_work_sync(&speakup_sel_work.work);
+ /* setting to null so that if work fails to run and we cancel it,
+ * we can run it again without getting EBUSY forever from there on.
+ * we need to use xchg here to avoid race with speakup_set_selection()
+ */
+ tty = xchg(&speakup_sel_work.tty, NULL);
+ if (tty)
+ tty_kref_put(tty);
+}
static void __speakup_paste_selection(struct work_struct *work)
{
- struct speakup_paste_work *spw =
- container_of(work, struct speakup_paste_work, work);
- struct tty_struct *tty = xchg(&spw->tty, NULL);
- struct vc_data *vc = (struct vc_data *)tty->driver_data;
- int pasted = 0, count;
- struct tty_ldisc *ld;
- DECLARE_WAITQUEUE(wait, current);
-
- ld = tty_ldisc_ref(tty);
- if (!ld)
- goto tty_unref;
- tty_buffer_lock_exclusive(&vc->port);
-
- add_wait_queue(&vc->paste_wait, &wait);
- while (sel_buffer && sel_buffer_lth > pasted) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_throttled(tty)) {
- schedule();
- continue;
- }
- count = sel_buffer_lth - pasted;
- count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
- count);
- pasted += count;
- }
- remove_wait_queue(&vc->paste_wait, &wait);
- __set_current_state(TASK_RUNNING);
+ struct speakup_selection_work *ssw =
+ container_of(work, struct speakup_selection_work, work);
+ struct tty_struct *tty = xchg(&ssw->tty, NULL);
- tty_buffer_unlock_exclusive(&vc->port);
- tty_ldisc_deref(ld);
-tty_unref:
+ paste_selection(tty);
tty_kref_put(tty);
}
-static struct speakup_paste_work speakup_paste_work = {
+static struct speakup_selection_work speakup_paste_work = {
.work = __WORK_INITIALIZER(speakup_paste_work.work,
__speakup_paste_selection)
};
int speakup_paste_selection(struct tty_struct *tty)
{
- if (cmpxchg(&speakup_paste_work.tty, NULL, tty))
+ tty_kref_get(tty);
+ if (cmpxchg(&speakup_paste_work.tty, NULL, tty)) {
+ tty_kref_put(tty);
return -EBUSY;
+ }
- tty_kref_get(tty);
schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
return 0;
}
void speakup_cancel_paste(void)
{
+ struct tty_struct *tty;
+
cancel_work_sync(&speakup_paste_work.work);
- tty_kref_put(speakup_paste_work.tty);
+ tty = xchg(&speakup_paste_work.tty, NULL);
+ if (tty)
+ tty_kref_put(tty);
}
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index e4f4f00be2dc..74fe49c2c511 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -72,6 +72,7 @@ void synth_buffer_add(u16 ch);
void synth_buffer_clear(void);
void speakup_clear_selection(void);
int speakup_set_selection(struct tty_struct *tty);
+void speakup_cancel_selection(void);
int speakup_paste_selection(struct tty_struct *tty);
void speakup_cancel_paste(void);
void speakup_register_devsynth(void);
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 459ee0c0bd57..798c42dfa16c 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -263,7 +263,7 @@ static int dt_wait_dma(void)
if (!dt_waitbit(STAT_dma_ready))
return 0;
while (--timeout > 0) {
- if ((dt_getstatus()&STAT_dma_state) == state)
+ if ((dt_getstatus() & STAT_dma_state) == state)
return 1;
udelay(50);
}
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index b788272da4f9..414827e888fc 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -233,7 +233,7 @@ static void do_catch_up(struct spk_synth *synth)
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies(delay_time_val));
- jiff_max = jiffies+jiffy_delta_val;
+ jiff_max = jiffies + jiffy_delta_val;
}
}
timeout = 1000;
@@ -261,7 +261,7 @@ static int synth_probe(struct spk_synth *synth)
synth_port = port_forced;
pr_info("probe forced to %x by kernel command line\n",
synth_port);
- if (synth_request_region(synth_port-1, SYNTH_IO_EXTENT)) {
+ if (synth_request_region(synth_port - 1, SYNTH_IO_EXTENT)) {
pr_warn("sorry, port already reserved\n");
return -EBUSY;
}
@@ -289,7 +289,7 @@ static int synth_probe(struct spk_synth *synth)
return -ENODEV;
}
pr_info("%s: %03x-%03x, driver version %s,\n", synth->long_name,
- synth_port, synth_port+SYNTH_IO_EXTENT-1,
+ synth_port, synth_port + SYNTH_IO_EXTENT - 1,
synth->version);
synth->alive = 1;
return 0;
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index 0057eb980bec..5a9eff08cb96 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -47,7 +47,7 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
{
struct spk_ldisc_data *ldisc_data;
- if (tty->ops->write == NULL)
+ if (!tty->ops->write)
return -EOPNOTSUPP;
speakup_tty = tty;
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
index c27dab3b610f..dc5e1bddc085 100644
--- a/drivers/staging/unisys/Kconfig
+++ b/drivers/staging/unisys/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Unisys SPAR driver configuration
#
diff --git a/drivers/staging/unisys/Makefile b/drivers/staging/unisys/Makefile
index e45f44b64202..c0f76cc196a6 100644
--- a/drivers/staging/unisys/Makefile
+++ b/drivers/staging/unisys/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Unisys SPAR drivers
#
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 45c785d80ce4..9ef812c0bc42 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2010 - 2016 UNISYS CORPORATION
* All rights reserved.
diff --git a/drivers/staging/unisys/visorhba/Kconfig b/drivers/staging/unisys/visorhba/Kconfig
index 241d8038261c..ed59ac11c322 100644
--- a/drivers/staging/unisys/visorhba/Kconfig
+++ b/drivers/staging/unisys/visorhba/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Unisys visorhba configuration
#
@@ -5,10 +6,10 @@
config UNISYS_VISORHBA
tristate "Unisys visorhba driver"
depends on UNISYSSPAR && UNISYS_VISORBUS && SCSI
- ---help---
- The Unisys visorhba driver provides support for s-Par HBA
- devices exposed on the s-Par visorbus. When a message is sent
- to visorbus to create a HBA device, the probe function of
- visorhba is called to create the scsi device.
- If you say Y here, you will enable the Unisys visorhba driver.
+ help
+ The Unisys visorhba driver provides support for s-Par HBA
+ devices exposed on the s-Par visorbus. When a message is sent
+ to visorbus to create a HBA device, the probe function of
+ visorhba is called to create the scsi device.
+ If you say Y here, you will enable the Unisys visorhba driver.
diff --git a/drivers/staging/unisys/visorhba/Makefile b/drivers/staging/unisys/visorhba/Makefile
index 97e48757944a..b613a7dcdae9 100644
--- a/drivers/staging/unisys/visorhba/Makefile
+++ b/drivers/staging/unisys/visorhba/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Unisys channel
#
diff --git a/drivers/staging/unisys/visorinput/Kconfig b/drivers/staging/unisys/visorinput/Kconfig
index a3817e0f7e5c..5f036393aee9 100644
--- a/drivers/staging/unisys/visorinput/Kconfig
+++ b/drivers/staging/unisys/visorinput/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Unisys visorinput configuration
#
@@ -5,11 +6,11 @@
config UNISYS_VISORINPUT
tristate "Unisys visorinput driver"
depends on UNISYSSPAR && UNISYS_VISORBUS && INPUT
- ---help---
- The Unisys s-Par visorinput driver provides a virtualized system
- console (keyboard and mouse) that is accessible through the
- s-Par firmware's user interface. s-Par provides video using the EFI
- GOP protocol, so If this driver is not present, the Linux guest should
- still boot with visible output in the partition desktop, but keyboard
- and mouse interaction will not be available.
+ help
+ The Unisys s-Par visorinput driver provides a virtualized system
+ console (keyboard and mouse) that is accessible through the
+ s-Par firmware's user interface. s-Par provides video using the EFI
+ GOP protocol, so If this driver is not present, the Linux guest should
+ still boot with visible output in the partition desktop, but keyboard
+ and mouse interaction will not be available.
diff --git a/drivers/staging/unisys/visorinput/Makefile b/drivers/staging/unisys/visorinput/Makefile
index 6e4bfa059a1f..68ced7c8a65f 100644
--- a/drivers/staging/unisys/visorinput/Makefile
+++ b/drivers/staging/unisys/visorinput/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Unisys visorinput
#
diff --git a/drivers/staging/unisys/visornic/Kconfig b/drivers/staging/unisys/visornic/Kconfig
index 1676dc7072d5..3f8f5570821b 100644
--- a/drivers/staging/unisys/visornic/Kconfig
+++ b/drivers/staging/unisys/visornic/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Unisys visornic configuration
#
@@ -5,11 +6,11 @@
config UNISYS_VISORNIC
tristate "Unisys visornic driver"
depends on UNISYSSPAR && UNISYS_VISORBUS && NET
- ---help---
- The Unisys Visornic driver provides support for s-Par network
- devices exposed on the s-Par visorbus. When a message is sent
- to visorbus to create a network device, the probe function of
- visornic is called to create the netdev device. Networking on
- s-Par switches will not work if this driver is not selected.
- If you say Y here, you will enable the Unisys visornic driver.
+ help
+ The Unisys Visornic driver provides support for s-Par network
+ devices exposed on the s-Par visorbus. When a message is sent
+ to visorbus to create a network device, the probe function of
+ visornic is called to create the netdev device. Networking on
+ s-Par switches will not work if this driver is not selected.
+ If you say Y here, you will enable the Unisys visornic driver.
diff --git a/drivers/staging/unisys/visornic/Makefile b/drivers/staging/unisys/visornic/Makefile
index 336a746f793b..f2984880c340 100644
--- a/drivers/staging/unisys/visornic/Makefile
+++ b/drivers/staging/unisys/visornic/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Unisys channel
#
diff --git a/drivers/staging/vboxvideo/TODO b/drivers/staging/vboxvideo/TODO
deleted file mode 100644
index 7f97c47a4042..000000000000
--- a/drivers/staging/vboxvideo/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
--Get a full review from the drm-maintainers on dri-devel done on this driver
--Drop all the logic around initial_mode_queried, the master_set and
- master_drop callbacks and everything related to this. kms clients can handle
- hotplugs.
--Extend this TODO with the results of that review
-
-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
-Hans de Goede <hdegoede@redhat.com>, Michael Thayer <michael.thayer@oracle.com>
-and dri-devel@lists.freedesktop.org .
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index 98064ce2c2b4..6baf9dd57f1f 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig BCM_VIDEOCORE
tristate "Broadcom VideoCore support"
depends on OF
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Kconfig b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
index 62c1c8ba4ad4..f66319512faf 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config SND_BCM2835
tristate "BCM2835 Audio"
depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Makefile b/drivers/staging/vc04_services/bcm2835-audio/Makefile
index 536bd0c11ddb..13fa6d7d9745 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/Makefile
+++ b/drivers/staging/vc04_services/bcm2835-audio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SND_BCM2835) += snd-bcm2835.o
snd-bcm2835-objs := bcm2835.o bcm2835-ctl.o bcm2835-pcm.o bcm2835-vchiq.o
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
index a6ec72a5f0be..4c2cae99776b 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
@@ -68,7 +68,7 @@ static int snd_bcm2835_ctl_get(struct snd_kcontrol *kcontrol,
}
static int snd_bcm2835_ctl_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
int val, *valp;
@@ -129,7 +129,7 @@ static const struct snd_kcontrol_new snd_bcm2835_ctl[] = {
};
static int snd_bcm2835_spdif_default_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
+ struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
@@ -137,7 +137,7 @@ static int snd_bcm2835_spdif_default_info(struct snd_kcontrol *kcontrol,
}
static int snd_bcm2835_spdif_default_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
int i;
@@ -153,7 +153,7 @@ static int snd_bcm2835_spdif_default_get(struct snd_kcontrol *kcontrol,
}
static int snd_bcm2835_spdif_default_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
unsigned int val = 0;
@@ -172,7 +172,7 @@ static int snd_bcm2835_spdif_default_put(struct snd_kcontrol *kcontrol,
}
static int snd_bcm2835_spdif_mask_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
+ struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
@@ -180,7 +180,7 @@ static int snd_bcm2835_spdif_mask_info(struct snd_kcontrol *kcontrol,
}
static int snd_bcm2835_spdif_mask_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
/*
* bcm2835 supports only consumer mode and sets all other format flags
diff --git a/drivers/staging/vc04_services/bcm2835-camera/Kconfig b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
index b8b01aa4e426..c81baf2c111e 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VIDEO_BCM2835
tristate "BCM2835 Camera"
depends on MEDIA_SUPPORT
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 7c6cf41645eb..68f08dc18da9 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -312,8 +312,7 @@ static void buffer_cleanup(struct vb2_buffer *vb)
static inline bool is_capturing(struct bm2835_mmal_dev *dev)
{
return dev->capture.camera_port ==
- &dev->
- component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_CAPTURE];
+ &dev->component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_CAPTURE];
}
static void buffer_cb(struct vchiq_mmal_instance *instance,
@@ -513,8 +512,8 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
msleep(300);
/* enable the connection from camera to encoder (if applicable) */
- if (dev->capture.camera_port != dev->capture.port
- && dev->capture.camera_port) {
+ if (dev->capture.camera_port != dev->capture.port &&
+ dev->capture.camera_port) {
ret = vchiq_mmal_port_enable(dev->instance,
dev->capture.camera_port, NULL);
if (ret) {
@@ -751,8 +750,7 @@ static int vidioc_overlay(struct file *file, void *f, unsigned int on)
return 0; /* already in requested state */
src =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_PREVIEW];
+ &dev->component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_PREVIEW];
if (!on) {
/* disconnect preview ports and disable component */
@@ -807,8 +805,7 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
*/
struct bm2835_mmal_dev *dev = video_drvdata(file);
struct vchiq_mmal_port *preview_port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_PREVIEW];
+ &dev->component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_PREVIEW];
a->capability = V4L2_FBUF_CAP_EXTERNOVERLAY |
V4L2_FBUF_CAP_GLOBAL_ALPHA;
@@ -1000,8 +997,7 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
dev->capture.camera_port, NULL);
dev->capture.camera_port = NULL;
ret = vchiq_mmal_component_disable(dev->instance,
- dev->capture.
- encode_component);
+ dev->capture.encode_component);
if (ret)
v4l2_err(&dev->v4l2_dev,
"Failed to disable encode component %d\n",
@@ -1013,29 +1009,25 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
switch (mfmt->mmal_component) {
case MMAL_COMPONENT_CAMERA:
/* Make a further decision on port based on resolution */
- if (f->fmt.pix.width <= max_video_width
- && f->fmt.pix.height <= max_video_height)
+ if (f->fmt.pix.width <= max_video_width &&
+ f->fmt.pix.height <= max_video_height)
camera_port = port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_VIDEO];
+ &dev->component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_VIDEO];
else
camera_port = port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_CAPTURE];
+ &dev->component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_CAPTURE];
break;
case MMAL_COMPONENT_IMAGE_ENCODE:
encode_component = dev->component[MMAL_COMPONENT_IMAGE_ENCODE];
port = &dev->component[MMAL_COMPONENT_IMAGE_ENCODE]->output[0];
camera_port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_CAPTURE];
+ &dev->component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_CAPTURE];
break;
case MMAL_COMPONENT_VIDEO_ENCODE:
encode_component = dev->component[MMAL_COMPONENT_VIDEO_ENCODE];
port = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
camera_port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_VIDEO];
+ &dev->component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_VIDEO];
break;
default:
break;
@@ -1075,15 +1067,13 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
- if (!ret
- && camera_port ==
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_VIDEO]) {
+ if (!ret &&
+ camera_port ==
+ &dev->component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_VIDEO]) {
bool overlay_enabled =
!!dev->component[MMAL_COMPONENT_PREVIEW]->enabled;
struct vchiq_mmal_port *preview_port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_PREVIEW];
+ &dev->component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_PREVIEW];
/* Preview and encode ports need to match on resolution */
if (overlay_enabled) {
/* Need to disable the overlay before we can update
@@ -1501,7 +1491,6 @@ static int set_camera_parameters(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *camera,
struct bm2835_mmal_dev *dev)
{
- int ret;
struct mmal_parameter_camera_config cam_config = {
.max_stills_w = dev->max_width,
.max_stills_h = dev->max_height,
@@ -1517,10 +1506,9 @@ static int set_camera_parameters(struct vchiq_mmal_instance *instance,
.use_stc_timestamp = MMAL_PARAM_TIMESTAMP_MODE_RAW_STC
};
- ret = vchiq_mmal_port_parameter_set(instance, &camera->control,
+ return vchiq_mmal_port_parameter_set(instance, &camera->control,
MMAL_PARAMETER_CAMERA_CONFIG,
&cam_config, sizeof(cam_config));
- return ret;
}
#define MAX_SUPPORTED_ENCODINGS 20
@@ -1673,8 +1661,7 @@ static int mmal_init(struct bm2835_mmal_dev *dev)
/* get the video encoder component ready */
ret = vchiq_mmal_component_init(dev->instance, "ril.video_encode",
- &dev->
- component[MMAL_COMPONENT_VIDEO_ENCODE]);
+ &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]);
if (ret < 0)
goto unreg_image_encoder;
@@ -1797,12 +1784,10 @@ static void bcm2835_cleanup_instance(struct bm2835_mmal_dev *dev)
dev->component[MMAL_COMPONENT_CAMERA]);
vchiq_mmal_component_finalise(dev->instance,
- dev->
- component[MMAL_COMPONENT_VIDEO_ENCODE]);
+ dev->component[MMAL_COMPONENT_VIDEO_ENCODE]);
vchiq_mmal_component_finalise(dev->instance,
- dev->
- component[MMAL_COMPONENT_IMAGE_ENCODE]);
+ dev->component[MMAL_COMPONENT_IMAGE_ENCODE]);
vchiq_mmal_component_finalise(dev->instance,
dev->component[MMAL_COMPONENT_PREVIEW]);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index a2c55cb2192a..9841c30450ce 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -270,11 +270,9 @@ static int ctrl_set_rotate(struct bm2835_mmal_dev *dev,
if (ret < 0)
return ret;
- ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
+ return vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
-
- return ret;
}
static int ctrl_set_flip(struct bm2835_mmal_dev *dev,
@@ -313,11 +311,9 @@ static int ctrl_set_flip(struct bm2835_mmal_dev *dev,
if (ret < 0)
return ret;
- ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
+ return vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
-
- return ret;
}
static int ctrl_set_exposure(struct bm2835_mmal_dev *dev,
@@ -607,18 +603,15 @@ static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
- int ret;
struct vchiq_mmal_port *encoder_out;
dev->capture.encode_bitrate = ctrl->val;
encoder_out = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
- ret = vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
- mmal_ctrl->mmal_id,
- &ctrl->val, sizeof(ctrl->val));
- ret = 0;
- return ret;
+ return vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
+ mmal_ctrl->mmal_id, &ctrl->val,
+ sizeof(ctrl->val));
}
static int ctrl_set_bitrate_mode(struct bm2835_mmal_dev *dev,
@@ -930,49 +923,49 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
V4L2_CID_SATURATION, MMAL_CONTROL_TYPE_STD,
-100, 100, 0, 1, NULL,
MMAL_PARAMETER_SATURATION,
- &ctrl_set_rational,
+ ctrl_set_rational,
false
},
{
V4L2_CID_SHARPNESS, MMAL_CONTROL_TYPE_STD,
-100, 100, 0, 1, NULL,
MMAL_PARAMETER_SHARPNESS,
- &ctrl_set_rational,
+ ctrl_set_rational,
false
},
{
V4L2_CID_CONTRAST, MMAL_CONTROL_TYPE_STD,
-100, 100, 0, 1, NULL,
MMAL_PARAMETER_CONTRAST,
- &ctrl_set_rational,
+ ctrl_set_rational,
false
},
{
V4L2_CID_BRIGHTNESS, MMAL_CONTROL_TYPE_STD,
0, 100, 50, 1, NULL,
MMAL_PARAMETER_BRIGHTNESS,
- &ctrl_set_rational,
+ ctrl_set_rational,
false
},
{
V4L2_CID_ISO_SENSITIVITY, MMAL_CONTROL_TYPE_INT_MENU,
0, ARRAY_SIZE(iso_qmenu) - 1, 0, 1, iso_qmenu,
MMAL_PARAMETER_ISO,
- &ctrl_set_iso,
+ ctrl_set_iso,
false
},
{
V4L2_CID_ISO_SENSITIVITY_AUTO, MMAL_CONTROL_TYPE_STD_MENU,
0, 1, V4L2_ISO_SENSITIVITY_AUTO, 1, NULL,
MMAL_PARAMETER_ISO,
- &ctrl_set_iso,
+ ctrl_set_iso,
false
},
{
V4L2_CID_IMAGE_STABILIZATION, MMAL_CONTROL_TYPE_STD,
0, 1, 0, 1, NULL,
MMAL_PARAMETER_VIDEO_STABILISATION,
- &ctrl_set_value,
+ ctrl_set_value,
false
},
/* {
@@ -983,7 +976,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
V4L2_CID_EXPOSURE_AUTO, MMAL_CONTROL_TYPE_STD_MENU,
~0x03, 3, V4L2_EXPOSURE_AUTO, 0, NULL,
MMAL_PARAMETER_EXPOSURE_MODE,
- &ctrl_set_exposure,
+ ctrl_set_exposure,
false
},
/* todo this needs mixing in with set exposure
@@ -996,7 +989,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
/* Units of 100usecs */
1, 1 * 1000 * 10, 100 * 10, 1, NULL,
MMAL_PARAMETER_SHUTTER_SPEED,
- &ctrl_set_exposure,
+ ctrl_set_exposure,
false
},
{
@@ -1004,7 +997,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
0, ARRAY_SIZE(ev_bias_qmenu) - 1,
(ARRAY_SIZE(ev_bias_qmenu) + 1) / 2 - 1, 0, ev_bias_qmenu,
MMAL_PARAMETER_EXPOSURE_COMP,
- &ctrl_set_value_ev,
+ ctrl_set_value_ev,
false
},
{
@@ -1012,7 +1005,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
0, 1,
0, 1, NULL,
0, /* Dummy MMAL ID as it gets mapped into FPS range*/
- &ctrl_set_exposure,
+ ctrl_set_exposure,
false
},
{
@@ -1020,7 +1013,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
MMAL_CONTROL_TYPE_STD_MENU,
~0x7, 2, V4L2_EXPOSURE_METERING_AVERAGE, 0, NULL,
MMAL_PARAMETER_EXP_METERING_MODE,
- &ctrl_set_metering_mode,
+ ctrl_set_metering_mode,
false
},
{
@@ -1028,56 +1021,56 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
MMAL_CONTROL_TYPE_STD_MENU,
~0x3ff, 9, V4L2_WHITE_BALANCE_AUTO, 0, NULL,
MMAL_PARAMETER_AWB_MODE,
- &ctrl_set_awb_mode,
+ ctrl_set_awb_mode,
false
},
{
V4L2_CID_RED_BALANCE, MMAL_CONTROL_TYPE_STD,
1, 7999, 1000, 1, NULL,
MMAL_PARAMETER_CUSTOM_AWB_GAINS,
- &ctrl_set_awb_gains,
+ ctrl_set_awb_gains,
false
},
{
V4L2_CID_BLUE_BALANCE, MMAL_CONTROL_TYPE_STD,
1, 7999, 1000, 1, NULL,
MMAL_PARAMETER_CUSTOM_AWB_GAINS,
- &ctrl_set_awb_gains,
+ ctrl_set_awb_gains,
false
},
{
V4L2_CID_COLORFX, MMAL_CONTROL_TYPE_STD_MENU,
0, 15, V4L2_COLORFX_NONE, 0, NULL,
MMAL_PARAMETER_IMAGE_EFFECT,
- &ctrl_set_image_effect,
+ ctrl_set_image_effect,
false
},
{
V4L2_CID_COLORFX_CBCR, MMAL_CONTROL_TYPE_STD,
0, 0xffff, 0x8080, 1, NULL,
MMAL_PARAMETER_COLOUR_EFFECT,
- &ctrl_set_colfx,
+ ctrl_set_colfx,
false
},
{
V4L2_CID_ROTATE, MMAL_CONTROL_TYPE_STD,
0, 360, 0, 90, NULL,
MMAL_PARAMETER_ROTATION,
- &ctrl_set_rotate,
+ ctrl_set_rotate,
false
},
{
V4L2_CID_HFLIP, MMAL_CONTROL_TYPE_STD,
0, 1, 0, 1, NULL,
MMAL_PARAMETER_MIRROR,
- &ctrl_set_flip,
+ ctrl_set_flip,
false
},
{
V4L2_CID_VFLIP, MMAL_CONTROL_TYPE_STD,
0, 1, 0, 1, NULL,
MMAL_PARAMETER_MIRROR,
- &ctrl_set_flip,
+ ctrl_set_flip,
false
},
{
@@ -1085,14 +1078,14 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
0, ARRAY_SIZE(bitrate_mode_qmenu) - 1,
0, 0, bitrate_mode_qmenu,
MMAL_PARAMETER_RATECONTROL,
- &ctrl_set_bitrate_mode,
+ ctrl_set_bitrate_mode,
false
},
{
V4L2_CID_MPEG_VIDEO_BITRATE, MMAL_CONTROL_TYPE_STD,
25 * 1000, 25 * 1000 * 1000, 10 * 1000 * 1000, 25 * 1000, NULL,
MMAL_PARAMETER_VIDEO_BIT_RATE,
- &ctrl_set_bitrate,
+ ctrl_set_bitrate,
false
},
{
@@ -1100,7 +1093,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
1, 100,
30, 1, NULL,
MMAL_PARAMETER_JPEG_Q_FACTOR,
- &ctrl_set_image_encode_output,
+ ctrl_set_image_encode_output,
false
},
{
@@ -1108,7 +1101,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
0, ARRAY_SIZE(mains_freq_qmenu) - 1,
1, 1, mains_freq_qmenu,
MMAL_PARAMETER_FLICKER_AVOID,
- &ctrl_set_flicker_avoidance,
+ ctrl_set_flicker_avoidance,
false
},
{
@@ -1116,7 +1109,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
0, 1,
0, 1, NULL,
MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER,
- &ctrl_set_video_encode_param_output,
+ ctrl_set_video_encode_param_output,
true /* Errors ignored as requires latest firmware to work */
},
{
@@ -1129,7 +1122,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, 1, NULL,
MMAL_PARAMETER_PROFILE,
- &ctrl_set_video_encode_profile_level,
+ ctrl_set_video_encode_profile_level,
false
},
{
@@ -1149,7 +1142,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
V4L2_MPEG_VIDEO_H264_LEVEL_4_0, 1, NULL,
MMAL_PARAMETER_PROFILE,
- &ctrl_set_video_encode_profile_level,
+ ctrl_set_video_encode_profile_level,
false
},
{
@@ -1158,14 +1151,14 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
V4L2_SCENE_MODE_TEXT,
V4L2_SCENE_MODE_NONE, 1, NULL,
MMAL_PARAMETER_PROFILE,
- &ctrl_set_scene_mode,
+ ctrl_set_scene_mode,
false
},
{
V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, MMAL_CONTROL_TYPE_STD,
0, 0x7FFFFFFF, 60, 1, NULL,
MMAL_PARAMETER_INTRAPERIOD,
- &ctrl_set_video_encode_param_output,
+ ctrl_set_video_encode_param_output,
false
},
};
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index 0b6fc0d31f4c..f85562b9ba9e 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHI_H_
#define VCHI_H_
@@ -67,8 +37,8 @@ struct opaque_vchi_service_t;
// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
struct vchi_held_msg {
- struct opaque_vchi_service_t *service;
- void *message;
+ struct opaque_vchi_service_t *service;
+ void *message;
};
// structure used to provide the information needed to open a server or a client
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
index 0d3c468c3504..89aa4e6122cd 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHI_CFG_H_
#define VCHI_CFG_H_
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
index 35f331f80812..e7955cbaf26a 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_common.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
@@ -1,124 +1,94 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHI_COMMON_H_
#define VCHI_COMMON_H_
//flags used when sending messages (must be bitmapped)
typedef enum {
- VCHI_FLAGS_NONE = 0x0,
- VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
- VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
- VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
- VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
- VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
- VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
-
- VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
- VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
- VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
- VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
- VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
- VCHI_FLAGS_INTERNAL = 0xFF0000
+ VCHI_FLAGS_NONE = 0x0,
+ VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
+ VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
+ VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
+ VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
+ VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
+ VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
+
+ VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
+ VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
+ VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
+ VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
+ VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
+ VCHI_FLAGS_INTERNAL = 0xFF0000
} VCHI_FLAGS_T;
// constants for vchi_crc_control()
typedef enum {
- VCHI_CRC_NOTHING = -1,
- VCHI_CRC_PER_SERVICE = 0,
- VCHI_CRC_EVERYTHING = 1,
+ VCHI_CRC_NOTHING = -1,
+ VCHI_CRC_PER_SERVICE = 0,
+ VCHI_CRC_EVERYTHING = 1,
} VCHI_CRC_CONTROL_T;
//callback reasons when an event occurs on a service
typedef enum {
- VCHI_CALLBACK_REASON_MIN,
-
- //This indicates that there is data available
- //handle is the msg id that was transmitted with the data
- // When a message is received and there was no FULL message available previously, send callback
- // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
- VCHI_CALLBACK_MSG_AVAILABLE,
- VCHI_CALLBACK_MSG_SENT,
- VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
-
- // This indicates that a transfer from the other side has completed
- VCHI_CALLBACK_BULK_RECEIVED,
- //This indicates that data queued up to be sent has now gone
- //handle is the msg id that was used when sending the data
- VCHI_CALLBACK_BULK_SENT,
- VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
- VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
-
- VCHI_CALLBACK_SERVICE_CLOSED,
-
- // this side has sent XOFF to peer due to lack of data consumption by service
- // (suggests the service may need to take some recovery action if it has
- // been deliberately holding off consuming data)
- VCHI_CALLBACK_SENT_XOFF,
- VCHI_CALLBACK_SENT_XON,
-
- // indicates that a bulk transfer has finished reading the source buffer
- VCHI_CALLBACK_BULK_DATA_READ,
-
- // power notification events (currently host side only)
- VCHI_CALLBACK_PEER_OFF,
- VCHI_CALLBACK_PEER_SUSPENDED,
- VCHI_CALLBACK_PEER_ON,
- VCHI_CALLBACK_PEER_RESUMED,
- VCHI_CALLBACK_FORCED_POWER_OFF,
-
- // some extra notifications provided by vchiq_arm
- VCHI_CALLBACK_SERVICE_OPENED,
- VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
- VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
-
- VCHI_CALLBACK_REASON_MAX
+ VCHI_CALLBACK_REASON_MIN,
+
+ //This indicates that there is data available
+ //handle is the msg id that was transmitted with the data
+ // When a message is received and there was no FULL message available previously, send callback
+ // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
+ VCHI_CALLBACK_MSG_AVAILABLE,
+ VCHI_CALLBACK_MSG_SENT,
+ VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
+
+ // This indicates that a transfer from the other side has completed
+ VCHI_CALLBACK_BULK_RECEIVED,
+ //This indicates that data queued up to be sent has now gone
+ //handle is the msg id that was used when sending the data
+ VCHI_CALLBACK_BULK_SENT,
+ VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
+ VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
+
+ VCHI_CALLBACK_SERVICE_CLOSED,
+
+ // this side has sent XOFF to peer due to lack of data consumption by service
+ // (suggests the service may need to take some recovery action if it has
+ // been deliberately holding off consuming data)
+ VCHI_CALLBACK_SENT_XOFF,
+ VCHI_CALLBACK_SENT_XON,
+
+ // indicates that a bulk transfer has finished reading the source buffer
+ VCHI_CALLBACK_BULK_DATA_READ,
+
+ // power notification events (currently host side only)
+ VCHI_CALLBACK_PEER_OFF,
+ VCHI_CALLBACK_PEER_SUSPENDED,
+ VCHI_CALLBACK_PEER_ON,
+ VCHI_CALLBACK_PEER_RESUMED,
+ VCHI_CALLBACK_FORCED_POWER_OFF,
+
+ // some extra notifications provided by vchiq_arm
+ VCHI_CALLBACK_SERVICE_OPENED,
+ VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
+ VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
+
+ VCHI_CALLBACK_REASON_MAX
} VCHI_CALLBACK_REASON_T;
// service control options
typedef enum {
- VCHI_SERVICE_OPTION_MIN,
+ VCHI_SERVICE_OPTION_MIN,
- VCHI_SERVICE_OPTION_TRACE,
- VCHI_SERVICE_OPTION_SYNCHRONOUS,
+ VCHI_SERVICE_OPTION_TRACE,
+ VCHI_SERVICE_OPTION_SYNCHRONOUS,
- VCHI_SERVICE_OPTION_MAX
+ VCHI_SERVICE_OPTION_MAX
} VCHI_SERVICE_OPTION_T;
//Callback used by all services / bulk transfers
typedef void (*VCHI_CALLBACK_T)(void *callback_param, //my service local param
- VCHI_CALLBACK_REASON_T reason,
- void *handle); //for transmitting msg's only
+ VCHI_CALLBACK_REASON_T reason,
+ void *handle); //for transmitting msg's only
/*
* Define vector struct for scatter-gather (vector) operations
@@ -138,8 +108,8 @@ typedef void (*VCHI_CALLBACK_T)(void *callback_param, //my service local param
*
*/
struct vchi_msg_vector {
- const void *vec_base;
- int32_t vec_len;
+ const void *vec_base;
+ int32_t vec_len;
};
// Opaque type for a connection API
@@ -155,10 +125,10 @@ typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
// is used again after messages for that service are removed/dequeued by any
// means other than vchi_msg_iter_... calls on the iterator itself.
struct vchi_msg_iter {
- struct opaque_vchi_service_t *service;
- void *last;
- void *next;
- void *remove;
+ struct opaque_vchi_service_t *service;
+ void *last;
+ void *next;
+ void *remove;
};
#endif // VCHI_COMMON_H_
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
index 21adf89a9065..25af99a0f394 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHIQ_VCHIQ_H
#define VCHIQ_VCHIQ_H
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index dd4898861b83..a9a22917ecdb 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -209,6 +179,9 @@ vchiq_platform_init_state(struct vchiq_state *state)
struct vchiq_2835_state *platform_state;
state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
+ if (!state->platform_state)
+ return VCHIQ_ERROR;
+
platform_state = (struct vchiq_2835_state *)state->platform_state;
platform_state->inited = 1;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 064d0db4c51e..ab7d6a0ce94c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1,35 +1,7 @@
-/**
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
* Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
@@ -560,7 +532,7 @@ add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
vchiq_log_trace(vchiq_arm_log_level,
"%s - completion queue full", __func__);
DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
- if (wait_for_completion_killable( &instance->remove_event)) {
+ if (wait_for_completion_killable(&instance->remove_event)) {
vchiq_log_info(vchiq_arm_log_level,
"service_callback interrupted");
return VCHIQ_RETRY;
@@ -1486,16 +1458,16 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
(ret != -EWOULDBLOCK))
vchiq_log_info(vchiq_arm_log_level,
- " ioctl instance %lx, cmd %s -> status %d, %ld",
- (unsigned long)instance,
+ " ioctl instance %pK, cmd %s -> status %d, %ld",
+ instance,
(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
ioctl_names[_IOC_NR(cmd)] :
"<invalid>",
status, ret);
else
vchiq_log_trace(vchiq_arm_log_level,
- " ioctl instance %lx, cmd %s -> status %d, %ld",
- (unsigned long)instance,
+ " ioctl instance %pK, cmd %s -> status %d, %ld",
+ instance,
(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
ioctl_names[_IOC_NR(cmd)] :
"<invalid>",
@@ -1540,9 +1512,7 @@ vchiq_compat_ioctl_create_service(
if (!args)
return -EFAULT;
- if (copy_from_user(&args32,
- (struct vchiq_create_service32 __user *)arg,
- sizeof(args32)))
+ if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
return -EFAULT;
if (put_user(args32.params.fourcc, &args->params.fourcc) ||
@@ -1593,7 +1563,7 @@ vchiq_compat_ioctl_queue_message(struct file *file,
unsigned int cmd,
unsigned long arg)
{
- struct vchiq_queue_message *args;
+ struct vchiq_queue_message __user *args;
struct vchiq_element __user *elements;
struct vchiq_queue_message32 args32;
unsigned int count;
@@ -1662,17 +1632,15 @@ vchiq_compat_ioctl_queue_bulk(struct file *file,
{
struct vchiq_queue_bulk_transfer __user *args;
struct vchiq_queue_bulk_transfer32 args32;
- struct vchiq_queue_bulk_transfer32 *ptrargs32 =
- (struct vchiq_queue_bulk_transfer32 *)arg;
+ struct vchiq_queue_bulk_transfer32 __user *ptrargs32 =
+ (struct vchiq_queue_bulk_transfer32 __user *)arg;
long ret;
args = compat_alloc_user_space(sizeof(*args));
if (!args)
return -EFAULT;
- if (copy_from_user(&args32,
- (struct vchiq_queue_bulk_transfer32 __user *)arg,
- sizeof(args32)))
+ if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
return -EFAULT;
if (put_user(args32.handle, &args->handle) ||
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index cdb963054975..c1d5a9d17071 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -1,35 +1,7 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
* Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef VCHIQ_ARM_H
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h
index d2797db702f9..c275e2e8e678 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2014 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2014 Broadcom. All rights reserved. */
#ifndef VCHIQ_CFG_H
#define VCHIQ_CFG_H
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 7d64e2ed7b42..e87e6619695e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#include "vchiq_connected.h"
#include "vchiq_core.h"
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
index 863b3e335c1a..ec5d2b716e7a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHIQ_CONNECTED_H
#define VCHIQ_CONNECTED_H
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 53f5a1cb4636..0c387b6473a5 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#include "vchiq_core.h"
@@ -1877,7 +1847,7 @@ bail_not_ready:
static int
slot_handler_func(void *v)
{
- struct vchiq_state *state = (struct vchiq_state *)v;
+ struct vchiq_state *state = v;
struct vchiq_shared_state *local = state->local;
DEBUG_INITIALISE(local)
@@ -1961,7 +1931,7 @@ slot_handler_func(void *v)
static int
recycle_func(void *v)
{
- struct vchiq_state *state = (struct vchiq_state *)v;
+ struct vchiq_state *state = v;
struct vchiq_shared_state *local = state->local;
BITSET_T *found;
size_t length;
@@ -1985,7 +1955,7 @@ recycle_func(void *v)
static int
sync_func(void *v)
{
- struct vchiq_state *state = (struct vchiq_state *)v;
+ struct vchiq_state *state = v;
struct vchiq_shared_state *local = state->local;
struct vchiq_header *header =
(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
@@ -2111,7 +2081,7 @@ vchiq_init_slots(void *mem_base, int mem_size)
int mem_align =
(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
struct vchiq_slot_zero *slot_zero =
- (struct vchiq_slot_zero *)((char *)mem_base + mem_align);
+ (struct vchiq_slot_zero *)(mem_base + mem_align);
int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
@@ -2239,6 +2209,8 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
status = vchiq_platform_init_state(state);
+ if (status != VCHIQ_SUCCESS)
+ return VCHIQ_ERROR;
/*
bring up slot handler thread
@@ -3039,13 +3011,13 @@ VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
case VCHIQ_BULK_MODE_CALLBACK:
break;
case VCHIQ_BULK_MODE_BLOCKING:
- bulk_waiter = (struct bulk_waiter *)userdata;
+ bulk_waiter = userdata;
init_completion(&bulk_waiter->event);
bulk_waiter->actual = 0;
bulk_waiter->bulk = NULL;
break;
case VCHIQ_BULK_MODE_WAITING:
- bulk_waiter = (struct bulk_waiter *)userdata;
+ bulk_waiter = userdata;
bulk = bulk_waiter->bulk;
goto waiting;
default:
@@ -3624,7 +3596,7 @@ VCHIQ_STATUS_T vchiq_send_remote_use_active(struct vchiq_state *state)
void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
size_t num_bytes)
{
- const u8 *mem = (const u8 *)void_mem;
+ const u8 *mem = void_mem;
size_t offset;
char line_buf[100];
char *s;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 5f07db519633..aee2d362e88d 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHIQ_CORE_H
#define VCHIQ_CORE_H
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 3928287cf5f7..2bb9120883fd 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -1,35 +1,7 @@
-/**
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
* Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/debugfs.h>
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
index 5be1a5663f51..9b563d105fdb 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. */
#ifndef VCHIQ_DEBUGFS_H
#define VCHIQ_DEBUGFS_H
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index 13ed23eacfae..5445f201e284 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHIQ_IF_H
#define VCHIQ_IF_H
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
index 56aef490e870..460ccea088bf 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHIQ_IOCTLS_H
#define VCHIQ_IOCTLS_H
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
index 4eaf7398cf2e..ebd12bfabb63 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHIQ_PAGELIST_H
#define VCHIQ_PAGELIST_H
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index ab6ca8fd6f14..13910d205fce 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#include <linux/module.h>
#include <linux/types.h>
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index 55c5fd82b911..6c519d8e48cb 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#include "vchiq_util.h"
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h
index d842194b4023..ee1459468171 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h
@@ -1,35 +1,5 @@
-/**
- * Copyright (c) 2010-2012 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#ifndef VCHIQ_UTIL_H
#define VCHIQ_UTIL_H
diff --git a/drivers/staging/vme/Makefile b/drivers/staging/vme/Makefile
index accdb72e39e2..cf2f686ccffe 100644
--- a/drivers/staging/vme/Makefile
+++ b/drivers/staging/vme/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += devices/
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme/devices/Kconfig
index c548dd8c91e1..5651bb16b28b 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme/devices/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
comment "VME Device Drivers"
config VME_USER
diff --git a/drivers/staging/vme/devices/Makefile b/drivers/staging/vme/devices/Makefile
index 459742a75283..5380115139b0 100644
--- a/drivers/staging/vme/devices/Makefile
+++ b/drivers/staging/vme/devices/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the VME device drivers.
#
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index 77cfc708c516..e4b224fedf5b 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VT6655
tristate "VIA Technologies VT6655 support"
depends on PCI && MAC80211 && m
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index d71022aa3f86..f422fb3c78bd 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -44,7 +44,8 @@ struct vnt_private;
void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type);
void CARDvUpdateBasicTopRate(struct vnt_private *priv);
bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
-void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode);
+void CARDvSetLoopbackMode(struct vnt_private *priv,
+ unsigned short wLoopbackMode);
bool CARDbSoftwareReset(struct vnt_private *priv);
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval);
@@ -61,6 +62,7 @@ bool CARDbRadioPowerOn(struct vnt_private *priv);
bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type);
bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
u64 qwBSSTimestamp);
-bool CARDbSetBeaconPeriod(struct vnt_private *priv, unsigned short wBeaconInterval);
+bool CARDbSetBeaconPeriod(struct vnt_private *priv,
+ unsigned short wBeaconInterval);
#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 53f623a4af65..0d2709607456 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index b4a0037b40c1..d4572847b08a 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 2f9e9219e8c8..29f354ced563 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index 6b41c74f7c2a..04db6a8d3db3 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index 93af4220605f..eac67794cc49 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index 0942d8703f98..9347776fa3a5 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 4750863c1bb7..f5ae7f1f5689 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -12,7 +12,6 @@
* Date: May 21, 1996
*
* Functions:
- * MACbIsRegBitsOn - Test if All test Bits On
* MACbIsRegBitsOff - Test if All test Bits Off
* MACbIsIntDisable - Test if MAC interrupt disable
* MACvSetShortRetryLimit - Set 802.11 Short Retry limit
@@ -44,29 +43,6 @@
/*
* Description:
- * Test if all test bits on
- *
- * Parameters:
- * In:
- * io_base - Base Address for MAC
- * byRegOfs - Offset of MAC Register
- * byTestBits - Test bits
- * Out:
- * none
- *
- * Return Value: true if all test bits On; otherwise false
- *
- */
-bool MACbIsRegBitsOn(struct vnt_private *priv, unsigned char byRegOfs,
- unsigned char byTestBits)
-{
- void __iomem *io_base = priv->PortOffset;
-
- return (ioread8(io_base + byRegOfs) & byTestBits) == byTestBits;
-}
-
-/*
- * Description:
* Test if all test bits off
*
* Parameters:
@@ -593,7 +569,6 @@ void MACvSetCurrRx1DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
iowrite32(curr_desc_addr, io_base + MAC_REG_RXDMAPTR1);
if (org_dma_ctl & DMACTL_RUN)
iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL1);
-
}
/*
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index b8ab09434773..c7888c4e96f2 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
@@ -876,18 +876,15 @@ do { \
#define MACvSetRFLE_LatchBase(iobase) \
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
-bool MACbIsRegBitsOn(struct vnt_private *priv, unsigned char byRegOfs,
- unsigned char byTestBits);
bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
unsigned char byTestBits);
bool MACbIsIntDisable(struct vnt_private *priv);
-void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit);
+void MACvSetShortRetryLimit(struct vnt_private *priv,
+ unsigned char byRetryLimit);
void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit);
-void MACvGetLongRetryLimit(struct vnt_private *priv,
- unsigned char *pbyRetryLimit);
void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode);
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index d6c581b31569..9725de3bca6a 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -48,11 +48,8 @@
*
*/
-void
-PSvEnablePowerSaving(
- struct vnt_private *priv,
- unsigned short wListenInterval
-)
+void PSvEnablePowerSaving(struct vnt_private *priv,
+ unsigned short wListenInterval)
{
u16 wAID = priv->current_aid | BIT(14) | BIT(15);
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index 2ec40045fddb..d1736c1cbfa8 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 03b0d56dbe9e..e80fed69bafe 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -163,7 +163,7 @@ static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
0x841FF200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
0x3FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11b/g // Need modify for 11a */
- /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
+ /* RoberYu:20050113, Rev0.47 Register Setting Guide */
0x802B5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 860207 */
@@ -171,7 +171,7 @@ static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
0xE0000A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: E0600A */
0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
- /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
+ /* RoberYu:20050113, Rev0.47 Register Setting Guide */
0x000A3C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 00143C */
0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
@@ -702,9 +702,9 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]);
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel - 1]);
ii++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel - 1]);
break;
/* Need to check, PLLON need to be low for channel setting */
@@ -723,11 +723,11 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]);
}
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel - 1]);
ii++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel - 1]);
ii++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel - 1]);
break;
case RF_NOTHING:
@@ -755,11 +755,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
* Return Value: true if succeeded; false if failed.
*
*/
-bool RFbSetPower(
- struct vnt_private *priv,
- unsigned int rate,
- u16 uCH
-)
+bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH)
{
bool ret = true;
unsigned char byPwr = 0;
@@ -792,7 +788,7 @@ bool RFbSetPower(
byDec = byPwr + 10;
if (byDec >= priv->byMaxPwrLevel)
- byDec = priv->byMaxPwrLevel-1;
+ byDec = priv->byMaxPwrLevel - 1;
byPwr = byDec;
break;
@@ -828,11 +824,8 @@ bool RFbSetPower(
*
*/
-bool RFbRawSetPower(
- struct vnt_private *priv,
- unsigned char byPwr,
- unsigned int rate
-)
+bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
+ unsigned int rate)
{
bool ret = true;
unsigned long dwMax7230Pwr = 0;
@@ -894,11 +887,7 @@ bool RFbRawSetPower(
*
*/
void
-RFvRSSITodBm(
- struct vnt_private *priv,
- unsigned char byCurrRSSI,
- long *pldBm
- )
+RFvRSSITodBm(struct vnt_private *priv, unsigned char byCurrRSSI, long *pldBm)
{
unsigned char byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03);
long b = (byCurrRSSI & 0x3F);
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index bfce5a89657d..042ac67a9709 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index a7c1e46a953e..a14908895b9e 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -397,17 +397,17 @@ s_uGetRTSCTSDuration(
switch (byDurType) {
case RTSDUR_BB: /* RTSDuration_bb */
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA: /* RTSDuration_ba */
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_AA: /* RTSDuration_aa */
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case CTSDUR_BA: /* CTSDuration_ba */
@@ -426,9 +426,9 @@ s_uGetRTSCTSDuration(
case RTSDUR_AA_F0: /* RTSDuration_aa_f0 */
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
break;
@@ -437,16 +437,16 @@ s_uGetRTSCTSDuration(
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
break;
case RTSDUR_AA_F1: /* RTSDuration_aa_f1 */
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
break;
@@ -1149,7 +1149,7 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
/* Auto Fall Back */
if (bRTS) { /* RTS_need */
pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab));
+ pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab));
pvRTS = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 08db848613f0..464dd89078b2 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index 577f20dc4308..d8aad3ff5f46 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
index 6795b5d74cfc..8f4699f0d1f4 100644
--- a/drivers/staging/vt6655/tmacro.h
+++ b/drivers/staging/vt6655/tmacro.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index 61b3e568ff9a..e086ec6e77f7 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
@@ -20,10 +20,8 @@
/*--------------------- Export Definitions -------------------------*/
-
/* For memory mapped IO */
-
#define VNSvInPortB(dwIOAddress, pbyData) \
(*(pbyData) = ioread8(dwIOAddress))
diff --git a/drivers/staging/vt6656/Kconfig b/drivers/staging/vt6656/Kconfig
index b602ef175d55..51e295265ba6 100644
--- a/drivers/staging/vt6656/Kconfig
+++ b/drivers/staging/vt6656/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VT6656
tristate "VIA Technologies VT6656 support"
depends on MAC80211 && USB && WLAN && m
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index a907e3026012..c3b8bbdb3ea1 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index 0a91d9ba4688..75cd340c0cce 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/channel.h b/drivers/staging/vt6656/channel.h
index 6d0d2825d992..cca330f0daf4 100644
--- a/drivers/staging/vt6656/channel.h
+++ b/drivers/staging/vt6656/channel.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index ac45ebb71195..3a83a9ea9a2a 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index a2feeb916836..6074ceda78bf 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
index ddd0cb710512..e080add823cb 100644
--- a/drivers/staging/vt6656/dpc.h
+++ b/drivers/staging/vt6656/dpc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/firmware.h b/drivers/staging/vt6656/firmware.h
index f30ae90cbb1f..161126faf396 100644
--- a/drivers/staging/vt6656/firmware.h
+++ b/drivers/staging/vt6656/firmware.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index 1e6ff925701a..987c454e99e9 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
index 1306ff441b87..918c07cf86cd 100644
--- a/drivers/staging/vt6656/key.h
+++ b/drivers/staging/vt6656/key.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/power.h b/drivers/staging/vt6656/power.h
index d5a3198206da..58755ae16e5a 100644
--- a/drivers/staging/vt6656/power.h
+++ b/drivers/staging/vt6656/power.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/rf.h b/drivers/staging/vt6656/rf.h
index f77866a9c177..6103117d6df5 100644
--- a/drivers/staging/vt6656/rf.h
+++ b/drivers/staging/vt6656/rf.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index 44698f41a234..d528607e02bf 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index 5d7708fcf557..2910ca54886e 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6656/wcmd.h b/drivers/staging/vt6656/wcmd.h
index 4a96f4de980d..a0d98cf74998 100644
--- a/drivers/staging/vt6656/wcmd.h
+++ b/drivers/staging/vt6656/wcmd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/staging/wilc1000/Kconfig
index f9d3ad41c862..59e58550d139 100644
--- a/drivers/staging/wilc1000/Kconfig
+++ b/drivers/staging/wilc1000/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config WILC1000
tristate
help
@@ -33,7 +34,6 @@ config WILC1000_SPI
config WILC1000_HW_OOB_INTR
bool "WILC1000 out of band interrupt"
depends on WILC1000_SDIO
- default n
help
This option enables out-of-band interrupt support for the WILC1000
chipset. This OOB interrupt is intended to provide a faster interrupt
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 4dd9a20f6a0b..ed15bd1bcd56 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -229,10 +229,10 @@ static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
}
int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
- u8 *ch_freq_list, u8 ch_list_len, const u8 *ies, size_t ies_len,
+ u8 *ch_freq_list, u8 ch_list_len,
void (*scan_result_fn)(enum scan_event,
struct wilc_rcvd_net_info *, void *),
- void *user_arg, struct wilc_probe_ssid *search)
+ void *user_arg, struct cfg80211_scan_request *request)
{
int result = 0;
struct wid wid_list[5];
@@ -258,9 +258,9 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
hif_drv->usr_scan_req.ch_cnt = 0;
- if (search) {
- for (i = 0; i < search->n_ssids; i++)
- valuesize += ((search->ssid_info[i].ssid_len) + 1);
+ if (request->n_ssids) {
+ for (i = 0; i < request->n_ssids; i++)
+ valuesize += ((request->ssids[i].ssid_len) + 1);
search_ssid_vals = kmalloc(valuesize + 1, GFP_KERNEL);
if (search_ssid_vals) {
wid_list[index].id = WID_SSID_PROBE_REQ;
@@ -268,13 +268,13 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
wid_list[index].val = search_ssid_vals;
buffer = wid_list[index].val;
- *buffer++ = search->n_ssids;
+ *buffer++ = request->n_ssids;
- for (i = 0; i < search->n_ssids; i++) {
- *buffer++ = search->ssid_info[i].ssid_len;
- memcpy(buffer, search->ssid_info[i].ssid,
- search->ssid_info[i].ssid_len);
- buffer += search->ssid_info[i].ssid_len;
+ for (i = 0; i < request->n_ssids; i++) {
+ *buffer++ = request->ssids[i].ssid_len;
+ memcpy(buffer, request->ssids[i].ssid,
+ request->ssids[i].ssid_len);
+ buffer += request->ssids[i].ssid_len;
}
wid_list[index].size = (s32)(valuesize + 1);
index++;
@@ -283,8 +283,8 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
wid_list[index].id = WID_INFO_ELEMENT_PROBE;
wid_list[index].type = WID_BIN_DATA;
- wid_list[index].val = (s8 *)ies;
- wid_list[index].size = ies_len;
+ wid_list[index].val = (s8 *)request->ie;
+ wid_list[index].size = request->ie_len;
index++;
wid_list[index].id = WID_SCAN_TYPE;
@@ -313,6 +313,9 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
wid_list[index].val = (s8 *)&scan_source;
index++;
+ hif_drv->usr_scan_req.scan_result = scan_result_fn;
+ hif_drv->usr_scan_req.arg = user_arg;
+
result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
index,
wilc_get_vif_idx(vif));
@@ -321,17 +324,13 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
goto error;
}
- hif_drv->usr_scan_req.scan_result = scan_result_fn;
- hif_drv->usr_scan_req.arg = user_arg;
hif_drv->scan_timer_vif = vif;
mod_timer(&hif_drv->scan_timer,
jiffies + msecs_to_jiffies(WILC_HIF_SCAN_TIMEOUT_MS));
error:
- if (search) {
- kfree(search->ssid_info);
- kfree(search_ssid_vals);
- }
+
+ kfree(search_ssid_vals);
return result;
}
@@ -775,7 +774,7 @@ int wilc_disconnect(struct wilc_vif *vif)
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
if (result) {
- netdev_err(vif->ndev, "Failed to send dissconect\n");
+ netdev_err(vif->ndev, "Failed to send disconnect\n");
return result;
}
@@ -1358,17 +1357,14 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
int wilc_set_pmkid_info(struct wilc_vif *vif, struct wilc_pmkid_attr *pmkid)
{
struct wid wid;
- int result;
wid.id = WID_PMKID_INFO;
wid.type = WID_STR;
wid.size = (pmkid->numpmkid * sizeof(struct wilc_pmkid)) + 1;
wid.val = (u8 *)pmkid;
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
-
- return result;
+ return wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
}
int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
@@ -1402,10 +1398,8 @@ int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies,
if (ies) {
conn_info->req_ies_len = ies_len;
conn_info->req_ies = kmemdup(ies, ies_len, GFP_KERNEL);
- if (!conn_info->req_ies) {
- result = -ENOMEM;
- return result;
- }
+ if (!conn_info->req_ies)
+ return -ENOMEM;
}
result = wilc_send_connect_wid(vif);
@@ -1570,7 +1564,6 @@ int wilc_hif_set_cfg(struct wilc_vif *vif, struct cfg_param_attr *param)
{
struct wid wid_list[4];
int i = 0;
- int result;
if (param->flag & WILC_CFG_PARAM_RETRY_SHORT) {
wid_list[i].id = WID_SHORT_RETRY_LIMIT;
@@ -1601,10 +1594,8 @@ int wilc_hif_set_cfg(struct wilc_vif *vif, struct cfg_param_attr *param)
i++;
}
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
- i, wilc_get_vif_idx(vif));
-
- return result;
+ return wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
+ i, wilc_get_vif_idx(vif));
}
static void get_periodic_rssi(struct timer_list *t)
@@ -2121,7 +2112,6 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, u32 enabled, u32 count,
int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power)
{
- int ret;
struct wid wid;
wid.id = WID_TX_POWER;
@@ -2129,15 +2119,12 @@ int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power)
wid.val = &tx_power;
wid.size = sizeof(char);
- ret = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
+ return wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
-
- return ret;
}
int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
{
- int ret;
struct wid wid;
wid.id = WID_TX_POWER;
@@ -2145,8 +2132,6 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
wid.val = tx_power;
wid.size = sizeof(char);
- ret = wilc_send_config_pkt(vif, WILC_GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
-
- return ret;
+ return wilc_send_config_pkt(vif, WILC_GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
}
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 678e62312215..a907c6d33012 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -115,16 +115,6 @@ struct wilc_rcvd_net_info {
struct ieee80211_mgmt *mgmt;
};
-struct wilc_probe_ssid_info {
- u8 ssid_len;
- u8 *ssid;
-};
-
-struct wilc_probe_ssid {
- struct wilc_probe_ssid_info *ssid_info;
- u8 n_ssids;
- u32 size;
-};
struct wilc_user_scan_req {
void (*scan_result)(enum scan_event evt,
@@ -205,10 +195,10 @@ int wilc_disconnect(struct wilc_vif *vif);
int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel);
int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level);
int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
- u8 *ch_freq_list, u8 ch_list_len, const u8 *ies, size_t ies_len,
+ u8 *ch_freq_list, u8 ch_list_len,
void (*scan_result_fn)(enum scan_event,
struct wilc_rcvd_net_info *, void *),
- void *user_arg, struct wilc_probe_ssid *search);
+ void *user_arg, struct cfg80211_scan_request *request);
int wilc_hif_set_cfg(struct wilc_vif *vif,
struct cfg_param_attr *cfg_param);
int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler);
diff --git a/drivers/staging/wilc1000/wilc_netdev.c b/drivers/staging/wilc1000/wilc_netdev.c
index 1787154ee088..ba78c08a17f1 100644
--- a/drivers/staging/wilc1000/wilc_netdev.c
+++ b/drivers/staging/wilc1000/wilc_netdev.c
@@ -708,7 +708,7 @@ static void wilc_set_multicast_list(struct net_device *dev)
return;
}
- mc_list = kmalloc_array(dev->mc.count, ETH_ALEN, GFP_KERNEL);
+ mc_list = kmalloc_array(dev->mc.count, ETH_ALEN, GFP_ATOMIC);
if (!mc_list)
return;
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 4a1be9e60d74..d8910bf9cb75 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -933,11 +933,9 @@ static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
u32 irq_flags;
int k = IRG_FLAGS_OFFSET + 5;
- if (spi_priv->has_thrpt_enh) {
- ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE,
- int_status);
- return ret;
- }
+ if (spi_priv->has_thrpt_enh)
+ return spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE,
+ int_status);
ret = wilc_spi_read_reg(wilc, WILC_VMM_TO_HOST_SIZE, &byte_cnt);
if (!ret) {
dev_err(&spi->dev,
@@ -982,9 +980,8 @@ static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val)
u32 tbl_ctl;
if (spi_priv->has_thrpt_enh) {
- ret = spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE,
- val);
- return ret;
+ return spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE,
+ val);
}
flags = val & (BIT(MAX_NUM_INT) - 1);
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 5e7a4676324e..f6825727bf77 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -214,50 +214,6 @@ static int set_channel(struct wiphy *wiphy,
return result;
}
-static inline int
-wilc_wfi_cfg_alloc_fill_ssid(struct cfg80211_scan_request *request,
- struct wilc_probe_ssid *search)
-{
- int i;
- int slot_id = 0;
-
- search->ssid_info = kcalloc(request->n_ssids,
- sizeof(*search->ssid_info), GFP_KERNEL);
- if (!search->ssid_info)
- goto out;
-
- search->n_ssids = request->n_ssids;
-
- for (i = 0; i < request->n_ssids; i++) {
- if (request->ssids[i].ssid_len > 0) {
- struct wilc_probe_ssid_info *info;
-
- info = &search->ssid_info[slot_id];
- info->ssid = kmemdup(request->ssids[i].ssid,
- request->ssids[i].ssid_len,
- GFP_KERNEL);
- if (!info->ssid)
- goto out_free;
-
- info->ssid_len = request->ssids[i].ssid_len;
- slot_id++;
- } else {
- search->n_ssids -= 1;
- }
- }
- return 0;
-
-out_free:
-
- for (i = 0; i < slot_id; i++)
- kfree(search->ssid_info[i].ssid);
-
- kfree(search->ssid_info);
-out:
-
- return -ENOMEM;
-}
-
static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct wilc_priv *priv = wiphy_priv(wiphy);
@@ -265,7 +221,6 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
u32 i;
int ret = 0;
u8 scan_ch_list[WILC_MAX_NUM_SCANNED_CH];
- struct wilc_probe_ssid probe_ssid;
if (request->n_channels > WILC_MAX_NUM_SCANNED_CH) {
netdev_err(priv->dev, "Requested scanned channels over\n");
@@ -280,28 +235,10 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
scan_ch_list[i] = ieee80211_frequency_to_channel(freq);
}
- if (request->n_ssids >= 1) {
- if (wilc_wfi_cfg_alloc_fill_ssid(request, &probe_ssid)) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = wilc_scan(vif, WILC_FW_USER_SCAN,
- WILC_FW_ACTIVE_SCAN, scan_ch_list,
- request->n_channels,
- (const u8 *)request->ie,
- request->ie_len, cfg_scan_result,
- (void *)priv, &probe_ssid);
- } else {
- ret = wilc_scan(vif, WILC_FW_USER_SCAN,
- WILC_FW_ACTIVE_SCAN, scan_ch_list,
- request->n_channels,
- (const u8 *)request->ie,
- request->ie_len, cfg_scan_result,
- (void *)priv, NULL);
- }
+ ret = wilc_scan(vif, WILC_FW_USER_SCAN, WILC_FW_ACTIVE_SCAN,
+ scan_ch_list, request->n_channels, cfg_scan_result,
+ (void *)priv, request);
-out:
if (ret) {
priv->scan_req = NULL;
priv->cfg_scanning = false;
@@ -1253,7 +1190,8 @@ static int mgmt_tx(struct wiphy *wiphy,
struct wilc_priv *priv = wiphy_priv(wiphy);
struct host_if_drv *wfi_drv = priv->hif_drv;
struct wilc_vif *vif = netdev_priv(wdev->netdev);
- u32 buf_len = len + sizeof(p2p_vendor_spec) + sizeof(priv->p2p.local_random);
+ u32 buf_len = len + sizeof(p2p_vendor_spec) +
+ sizeof(priv->p2p.local_random);
int ret = 0;
*cookie = prandom_u32();
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index c2389695fe20..0a713409ea98 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -11,7 +11,7 @@
static inline bool is_wilc1000(u32 id)
{
- return ((id & 0xfffff000) == 0x100000 ? true : false);
+ return (id & 0xfffff000) == 0x100000;
}
static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire)
@@ -316,7 +316,7 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
if (wilc->quit)
return 0;
- tqe = kmalloc(sizeof(*tqe), GFP_KERNEL);
+ tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe)
return 0;
@@ -408,7 +408,7 @@ void chip_wakeup(struct wilc *wilc)
wilc->hif_func->hif_write_reg(wilc, 1, reg & ~BIT(1));
do {
- usleep_range(2 * 1000, 2 * 1000);
+ usleep_range(2000, 2500);
wilc_get_chipid(wilc, true);
} while (wilc_get_chipid(wilc, true) == 0);
} while (wilc_get_chipid(wilc, true) == 0);
@@ -423,7 +423,7 @@ void chip_wakeup(struct wilc *wilc)
&clk_status_reg);
while ((clk_status_reg & 0x1) == 0) {
- usleep_range(2 * 1000, 2 * 1000);
+ usleep_range(2000, 2500);
wilc->hif_func->hif_read_reg(wilc, 0xf1,
&clk_status_reg);
diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
index 97238018b315..ac136663fa8e 100644
--- a/drivers/staging/wlan-ng/Kconfig
+++ b/drivers/staging/wlan-ng/Kconfig
@@ -1,9 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
config PRISM2_USB
tristate "Prism2.5/3 USB driver"
depends on WLAN && USB && CFG80211
select WIRELESS_EXT
select WEXT_PRIV
- default n
help
This is the wlan-ng prism 2.5/3 USB driver for a wide range of
old USB wireless devices.
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 992ebaa1071f..5ff740a8837d 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1369,8 +1369,8 @@ struct hfa384x {
void hfa384x_create(struct hfa384x *hw, struct usb_device *usb);
void hfa384x_destroy(struct hfa384x *hw);
-int
-hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int genesis);
+int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime,
+ int genesis);
int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport);
int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport);
int hfa384x_drvr_flashdl_enable(struct hfa384x *hw);
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 6261881e9bcd..6fde75d4f064 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* src/prism2/driver/hfa384x_usb.c
*
- * Functions that talk to the USB variantof the Intersil hfa384x MAC
+ * Functions that talk to the USB variant of the Intersil hfa384x MAC
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 5ce6e2a40e00..59d32453b891 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -573,7 +573,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
return 0;
}
-static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32);
+static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec,
+ u32 data_offset, u32 data_length);
static void iscsit_unmap_iovec(struct iscsi_cmd *);
static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
u32, u32, u32, u8 *);
@@ -604,7 +605,8 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
*header_digest);
}
- iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
+ iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
+ cmd->orig_iov_data_count - (iov_count + 2),
datain->offset, datain->length);
if (iov_ret < 0)
return -1;
@@ -886,13 +888,10 @@ EXPORT_SYMBOL(iscsit_reject_cmd);
* Map some portion of the allocated scatterlist to an iovec, suitable for
* kernel sockets to copy data in/out.
*/
-static int iscsit_map_iovec(
- struct iscsi_cmd *cmd,
- struct kvec *iov,
- u32 data_offset,
- u32 data_length)
+static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec,
+ u32 data_offset, u32 data_length)
{
- u32 i = 0;
+ u32 i = 0, orig_data_length = data_length;
struct scatterlist *sg;
unsigned int page_off;
@@ -901,9 +900,12 @@ static int iscsit_map_iovec(
*/
u32 ent = data_offset / PAGE_SIZE;
+ if (!data_length)
+ return 0;
+
if (ent >= cmd->se_cmd.t_data_nents) {
pr_err("Initial page entry out-of-bounds\n");
- return -1;
+ goto overflow;
}
sg = &cmd->se_cmd.t_data_sg[ent];
@@ -913,7 +915,12 @@ static int iscsit_map_iovec(
cmd->first_data_sg_off = page_off;
while (data_length) {
- u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+ u32 cur_len;
+
+ if (WARN_ON_ONCE(!sg || i >= nvec))
+ goto overflow;
+
+ cur_len = min_t(u32, data_length, sg->length - page_off);
iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
iov[i].iov_len = cur_len;
@@ -927,6 +934,16 @@ static int iscsit_map_iovec(
cmd->kmapped_nents = i;
return i;
+
+overflow:
+ pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
+ data_offset, orig_data_length, i, nvec);
+ for_each_sg(cmd->se_cmd.t_data_sg, sg,
+ cmd->se_cmd.t_data_nents, i) {
+ pr_err("[%d] off %d len %d\n",
+ i, sg->offset, sg->length);
+ }
+ return -1;
}
static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
@@ -1268,27 +1285,27 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
bool dump_payload)
{
int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ int rc;
+
/*
* Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
*/
- if (dump_payload)
- goto after_immediate_data;
- /*
- * Check for underflow case where both EDTL and immediate data payload
- * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
- * already been set in target_cmd_size_check() as se_cmd->data_length.
- *
- * For this special case, fail the command and dump the immediate data
- * payload.
- */
- if (cmd->first_burst_len > cmd->se_cmd.data_length) {
- cmd->sense_reason = TCM_INVALID_CDB_FIELD;
- goto after_immediate_data;
+ if (dump_payload) {
+ u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
+ cmd->first_burst_len);
+
+ pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n",
+ cmd->se_cmd.data_length, cmd->write_data_done,
+ cmd->first_burst_len, length);
+ rc = iscsit_dump_data_payload(cmd->conn, length, 1);
+ pr_debug("Finished dumping immediate data\n");
+ if (rc < 0)
+ immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER;
+ } else {
+ immed_ret = iscsit_handle_immediate_data(cmd, hdr,
+ cmd->first_burst_len);
}
- immed_ret = iscsit_handle_immediate_data(cmd, hdr,
- cmd->first_burst_len);
-after_immediate_data:
if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
/*
* A PDU/CmdSN carrying Immediate Data passed
@@ -1301,12 +1318,9 @@ after_immediate_data:
return -1;
if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
- int rc;
-
- rc = iscsit_dump_data_payload(cmd->conn,
- cmd->first_burst_len, 1);
target_put_sess_cmd(&cmd->se_cmd);
- return rc;
+
+ return 0;
} else if (cmd->unsolicited_data)
iscsit_set_unsolicited_dataout(cmd);
@@ -1568,14 +1582,16 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
{
struct kvec *iov;
u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
- u32 payload_length = ntoh24(hdr->dlength);
+ u32 payload_length;
int iov_ret, data_crc_failed = 0;
+ payload_length = min_t(u32, cmd->se_cmd.data_length,
+ ntoh24(hdr->dlength));
rx_size += payload_length;
iov = &cmd->iov_data[0];
- iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset),
- payload_length);
+ iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2,
+ be32_to_cpu(hdr->offset), payload_length);
if (iov_ret < 0)
return -1;
@@ -1595,6 +1611,7 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
rx_size += ISCSI_CRC_LEN;
}
+ WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
iscsit_unmap_iovec(cmd);
@@ -1860,6 +1877,7 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
rx_size += ISCSI_CRC_LEN;
}
+ WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc));
rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
if (rx_got != rx_size) {
ret = -1;
@@ -2265,6 +2283,7 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
rx_size += ISCSI_CRC_LEN;
}
+ WARN_ON_ONCE(niov > ARRAY_SIZE(iov));
rx_got = rx_data(conn, &iov[0], niov, rx_size);
if (rx_got != rx_size)
goto reject;
@@ -2575,14 +2594,34 @@ static int iscsit_handle_immediate_data(
u32 checksum, iov_count = 0, padding = 0;
struct iscsi_conn *conn = cmd->conn;
struct kvec *iov;
+ void *overflow_buf = NULL;
- iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
+ BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
+ rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
+ iov_ret = iscsit_map_iovec(cmd, cmd->iov_data,
+ cmd->orig_iov_data_count - 2,
+ cmd->write_data_done, rx_size);
if (iov_ret < 0)
return IMMEDIATE_DATA_CANNOT_RECOVER;
- rx_size = length;
iov_count = iov_ret;
iov = &cmd->iov_data[0];
+ if (rx_size < length) {
+ /*
+ * Special case: length of immediate data exceeds the data
+ * buffer size derived from the CDB.
+ */
+ overflow_buf = kmalloc(length - rx_size, GFP_KERNEL);
+ if (!overflow_buf) {
+ iscsit_unmap_iovec(cmd);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ }
+ cmd->overflow_buf = overflow_buf;
+ iov[iov_count].iov_base = overflow_buf;
+ iov[iov_count].iov_len = length - rx_size;
+ iov_count++;
+ rx_size = length;
+ }
padding = ((-length) & 3);
if (padding != 0) {
@@ -2597,6 +2636,7 @@ static int iscsit_handle_immediate_data(
rx_size += ISCSI_CRC_LEN;
}
+ WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
iscsit_unmap_iovec(cmd);
@@ -3121,6 +3161,12 @@ int iscsit_build_r2ts_for_cmd(
else
xfer_len = conn->sess->sess_ops->MaxBurstLength;
}
+
+ if ((s32)xfer_len < 0) {
+ cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+ break;
+ }
+
cmd->r2t_offset += xfer_len;
if (cmd->r2t_offset == cmd->se_cmd.data_length)
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 4e680d753941..ca7d7e8aecc0 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -252,7 +252,6 @@ static int chap_server_compute_md5(
}
desc->tfm = tfm;
- desc->flags = 0;
ret = crypto_shash_init(desc);
if (ret < 0) {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index ae3209efd0e0..683d04580eb3 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -883,9 +883,6 @@ int iscsit_setup_np(
return -EINVAL;
}
- np->np_ip_proto = IPPROTO_TCP;
- np->np_sock_type = SOCK_STREAM;
-
ret = sock_create(sockaddr->ss_family, np->np_sock_type,
np->np_ip_proto, &sock);
if (ret < 0) {
@@ -1159,13 +1156,13 @@ static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
pr_err("Unable to allocate conn->conn_cpumask\n");
- goto free_mask;
+ goto free_conn_ops;
}
return conn;
-free_mask:
- free_cpumask_var(conn->conn_cpumask);
+free_conn_ops:
+ kfree(conn->conn_ops);
put_transport:
iscsit_put_transport(conn->conn_transport);
free_conn:
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 3ac494f63a0b..fae85bfd790e 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -67,6 +67,8 @@ int iscsit_add_r2t_to_list(
lockdep_assert_held(&cmd->r2t_lock);
+ WARN_ON_ONCE((s32)xfer_len < 0);
+
r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
if (!r2t) {
pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
@@ -735,6 +737,7 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kfree(cmd->pdu_list);
kfree(cmd->seq_list);
kfree(cmd->tmr_req);
+ kfree(cmd->overflow_buf);
kfree(cmd->iov_data);
kfree(cmd->text_in_ptr);
@@ -769,6 +772,8 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
int rc;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
__iscsit_free_cmd(cmd, shutdown);
if (se_cmd) {
rc = transport_generic_free_cmd(se_cmd, shutdown);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index e09f0cf86bed..893f1fe8e373 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -1760,8 +1760,10 @@ void core_alua_free_tg_pt_gp(
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
- list_del(&tg_pt_gp->tg_pt_gp_list);
- dev->t10_alua.alua_tg_pt_gps_counter--;
+ if (tg_pt_gp->tg_pt_gp_valid_id) {
+ list_del(&tg_pt_gp->tg_pt_gp_list);
+ dev->t10_alua.alua_tg_pt_gps_count--;
+ }
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index fc5ef31f5ba8..db2558fe8d46 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1227,6 +1227,29 @@ static struct t10_wwn *to_t10_wwn(struct config_item *item)
return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
}
+static ssize_t target_check_inquiry_data(char *buf)
+{
+ size_t len;
+ int i;
+
+ len = strlen(buf);
+
+ /*
+ * SPC 4.3.1:
+ * ASCII data fields shall contain only ASCII printable characters
+ * (i.e., code values 20h to 7Eh) and may be terminated with one or
+ * more ASCII null (00h) characters.
+ */
+ for (i = 0; i < len; i++) {
+ if (buf[i] < 0x20 || buf[i] > 0x7E) {
+ pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n");
+ return -EINVAL;
+ }
+ }
+
+ return len;
+}
+
/*
* STANDARD and VPD page 0x83 T10 Vendor Identification
*/
@@ -1245,7 +1268,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
unsigned char buf[INQUIRY_VENDOR_LEN + 2];
char *stripped = NULL;
size_t len;
- int i;
+ ssize_t ret;
len = strlcpy(buf, page, sizeof(buf));
if (len < sizeof(buf)) {
@@ -1260,19 +1283,10 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
return -EOVERFLOW;
}
- /*
- * SPC 4.3.1:
- * ASCII data fields shall contain only ASCII printable characters (i.e.,
- * code values 20h to 7Eh) and may be terminated with one or more ASCII
- * null (00h) characters.
- */
- for (i = 0; i < len; i++) {
- if ((stripped[i] < 0x20) || (stripped[i] > 0x7E)) {
- pr_err("Emulated T10 Vendor Identification contains"
- " non-ASCII-printable characters\n");
- return -EINVAL;
- }
- }
+ ret = target_check_inquiry_data(stripped);
+
+ if (ret < 0)
+ return ret;
/*
* Check to see if any active exports exist. If they do exist, fail
@@ -1295,6 +1309,118 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
return count;
}
+static ssize_t target_wwn_product_id_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]);
+}
+
+static ssize_t target_wwn_product_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
+ struct se_device *dev = t10_wwn->t10_dev;
+ /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
+ unsigned char buf[INQUIRY_MODEL_LEN + 2];
+ char *stripped = NULL;
+ size_t len;
+ ssize_t ret;
+
+ len = strlcpy(buf, page, sizeof(buf));
+ if (len < sizeof(buf)) {
+ /* Strip any newline added from userspace. */
+ stripped = strstrip(buf);
+ len = strlen(stripped);
+ }
+ if (len > INQUIRY_MODEL_LEN) {
+ pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
+ __stringify(INQUIRY_MODEL_LEN)
+ "\n");
+ return -EOVERFLOW;
+ }
+
+ ret = target_check_inquiry_data(stripped);
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Check to see if any active exports exist. If they do exist, fail
+ * here as changing this information on the fly (underneath the
+ * initiator side OS dependent multipath code) could cause negative
+ * effects.
+ */
+ if (dev->export_count) {
+ pr_err("Unable to set T10 Model while active %d exports exist\n",
+ dev->export_count);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
+ strlcpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
+
+ pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
+ dev->t10_wwn.model);
+
+ return count;
+}
+
+static ssize_t target_wwn_revision_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]);
+}
+
+static ssize_t target_wwn_revision_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
+ struct se_device *dev = t10_wwn->t10_dev;
+ /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
+ unsigned char buf[INQUIRY_REVISION_LEN + 2];
+ char *stripped = NULL;
+ size_t len;
+ ssize_t ret;
+
+ len = strlcpy(buf, page, sizeof(buf));
+ if (len < sizeof(buf)) {
+ /* Strip any newline added from userspace. */
+ stripped = strstrip(buf);
+ len = strlen(stripped);
+ }
+ if (len > INQUIRY_REVISION_LEN) {
+ pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
+ __stringify(INQUIRY_REVISION_LEN)
+ "\n");
+ return -EOVERFLOW;
+ }
+
+ ret = target_check_inquiry_data(stripped);
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Check to see if any active exports exist. If they do exist, fail
+ * here as changing this information on the fly (underneath the
+ * initiator side OS dependent multipath code) could cause negative
+ * effects.
+ */
+ if (dev->export_count) {
+ pr_err("Unable to set T10 Revision while active %d exports exist\n",
+ dev->export_count);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
+ strlcpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
+
+ pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
+ dev->t10_wwn.revision);
+
+ return count;
+}
+
/*
* VPD page 0x80 Unit serial
*/
@@ -1442,6 +1568,8 @@ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
CONFIGFS_ATTR(target_wwn_, vendor_id);
+CONFIGFS_ATTR(target_wwn_, product_id);
+CONFIGFS_ATTR(target_wwn_, revision);
CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
@@ -1450,6 +1578,8 @@ CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
&target_wwn_attr_vendor_id,
+ &target_wwn_attr_product_id,
+ &target_wwn_attr_revision,
&target_wwn_attr_vpd_unit_serial,
&target_wwn_attr_vpd_protocol_identifier,
&target_wwn_attr_vpd_assoc_logical_unit,
@@ -1494,11 +1624,12 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
char *page)
{
+ struct se_session *sess = dev->reservation_holder;
struct se_node_acl *se_nacl;
ssize_t len;
- se_nacl = dev->dev_reserved_node_acl;
- if (se_nacl) {
+ if (sess) {
+ se_nacl = sess->se_node_acl;
len = sprintf(page,
"SPC-2 Reservation: %s Initiator: %s\n",
se_nacl->se_tpg->se_tpg_tfo->fabric_name,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 1f8482b6473b..7eae1c823c4e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -85,7 +85,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
goto out_unlock;
}
- se_cmd->se_lun = rcu_dereference(deve->se_lun);
+ se_cmd->se_lun = se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
@@ -176,7 +176,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
goto out_unlock;
}
- se_cmd->se_lun = rcu_dereference(deve->se_lun);
+ se_cmd->se_lun = se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 1597a9ebadca..03767693f580 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -111,10 +111,10 @@ target_scsi2_reservation_check(struct se_cmd *cmd)
break;
}
- if (!dev->dev_reserved_node_acl || !sess)
+ if (!dev->reservation_holder || !sess)
return 0;
- if (dev->dev_reserved_node_acl != sess->se_node_acl)
+ if (dev->reservation_holder->se_node_acl != sess->se_node_acl)
return TCM_RESERVATION_CONFLICT;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
@@ -200,6 +200,16 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
return 0;
}
+void target_release_reservation(struct se_device *dev)
+{
+ dev->reservation_holder = NULL;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
+ dev->dev_res_bin_isid = 0;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
+ }
+}
+
sense_reason_t
target_scsi2_reservation_release(struct se_cmd *cmd)
{
@@ -217,21 +227,16 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
return TCM_RESERVATION_CONFLICT;
spin_lock(&dev->dev_reservation_lock);
- if (!dev->dev_reserved_node_acl || !sess)
+ if (!dev->reservation_holder || !sess)
goto out_unlock;
- if (dev->dev_reserved_node_acl != sess->se_node_acl)
+ if (dev->reservation_holder->se_node_acl != sess->se_node_acl)
goto out_unlock;
if (dev->dev_res_bin_isid != sess->sess_bin_isid)
goto out_unlock;
- dev->dev_reserved_node_acl = NULL;
- dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
- if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
- dev->dev_res_bin_isid = 0;
- dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
- }
+ target_release_reservation(dev);
tpg = sess->se_tpg;
pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->"
" MAPPED LUN: %llu for %s\n",
@@ -275,13 +280,13 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
tpg = sess->se_tpg;
spin_lock(&dev->dev_reservation_lock);
- if (dev->dev_reserved_node_acl &&
- (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+ if (dev->reservation_holder &&
+ dev->reservation_holder->se_node_acl != sess->se_node_acl) {
pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
tpg->se_tpg_tfo->fabric_name);
pr_err("Original reserver LUN: %llu %s\n",
cmd->se_lun->unpacked_lun,
- dev->dev_reserved_node_acl->initiatorname);
+ dev->reservation_holder->se_node_acl->initiatorname);
pr_err("Current attempt - LUN: %llu -> MAPPED LUN: %llu"
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->orig_fe_lun,
@@ -290,7 +295,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
goto out_unlock;
}
- dev->dev_reserved_node_acl = sess->se_node_acl;
+ dev->reservation_holder = sess;
dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
if (sess->sess_bin_isid != 0) {
dev->dev_res_bin_isid = sess->sess_bin_isid;
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 198fad5c89dc..a31c93e4e19c 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -58,6 +58,7 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
+extern void target_release_reservation(struct se_device *dev);
extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 3a1bb799a9ab..344df737f3a3 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -390,7 +390,7 @@ int core_tmr_lun_reset(
if (!preempt_and_abort_list &&
(dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
- dev->dev_reserved_node_acl = NULL;
+ dev->reservation_holder = NULL;
dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9704b135a7bc..40b29ca5a98d 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -441,26 +441,26 @@ static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
static const struct genl_ops tcmu_genl_ops[] = {
{
.cmd = TCMU_CMD_SET_FEATURES,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
- .policy = tcmu_attr_policy,
.doit = tcmu_genl_set_features,
},
{
.cmd = TCMU_CMD_ADDED_DEVICE_DONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
- .policy = tcmu_attr_policy,
.doit = tcmu_genl_add_dev_done,
},
{
.cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
- .policy = tcmu_attr_policy,
.doit = tcmu_genl_rm_dev_done,
},
{
.cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
- .policy = tcmu_attr_policy,
.doit = tcmu_genl_reconfig_dev_done,
},
};
@@ -472,6 +472,7 @@ static struct genl_family tcmu_genl_family __ro_after_init = {
.name = "TCM-USER",
.version = 2,
.maxattr = TCMU_ATTR_MAX,
+ .policy = tcmu_attr_policy,
.mcgrps = tcmu_mcgrps,
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
.netnsok = true,
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 9be1418e919f..e59a896ac58a 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -389,7 +389,6 @@ out:
*/
struct xcopy_pt_cmd {
- bool remote_port;
struct se_cmd se_cmd;
struct completion xpt_passthrough_sem;
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
@@ -506,72 +505,20 @@ void target_xcopy_release_pt(void)
destroy_workqueue(xcopy_wq);
}
-static void target_xcopy_setup_pt_port(
- struct xcopy_pt_cmd *xpt_cmd,
- struct xcopy_op *xop,
- bool remote_port)
-{
- struct se_cmd *ec_cmd = xop->xop_se_cmd;
- struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
-
- if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
- /*
- * Honor destination port reservations for X-COPY PUSH emulation
- * when CDB is received on local source port, and READs blocks to
- * WRITE on remote destination port.
- */
- if (remote_port) {
- xpt_cmd->remote_port = remote_port;
- } else {
- pt_cmd->se_lun = ec_cmd->se_lun;
- pt_cmd->se_dev = ec_cmd->se_dev;
-
- pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
- " %p\n", pt_cmd->se_dev);
- pt_cmd->se_lun = ec_cmd->se_lun;
- pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
- pt_cmd->se_lun);
- }
- } else {
- /*
- * Honor source port reservation for X-COPY PULL emulation
- * when CDB is received on local desintation port, and READs
- * blocks from the remote source port to WRITE on local
- * destination port.
- */
- if (remote_port) {
- xpt_cmd->remote_port = remote_port;
- } else {
- pt_cmd->se_lun = ec_cmd->se_lun;
- pt_cmd->se_dev = ec_cmd->se_dev;
-
- pr_debug("Honoring local DST port from ec_cmd->se_dev:"
- " %p\n", pt_cmd->se_dev);
- pt_cmd->se_lun = ec_cmd->se_lun;
- pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
- pt_cmd->se_lun);
- }
- }
-}
-
-static void target_xcopy_init_pt_lun(struct se_device *se_dev,
- struct se_cmd *pt_cmd, bool remote_port)
-{
- /*
- * Don't allocate + init an pt_cmd->se_lun if honoring local port for
- * reservations. The pt_cmd->se_lun pointer will be setup from within
- * target_xcopy_setup_pt_port()
- */
- if (remote_port) {
- pr_debug("Setup emulated se_dev: %p from se_dev\n",
- pt_cmd->se_dev);
- pt_cmd->se_lun = &se_dev->xcopy_lun;
- pt_cmd->se_dev = se_dev;
- }
-
- pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
-}
-
+/*
+ * target_xcopy_setup_pt_cmd - set up a pass-through command
+ * @xpt_cmd: Data structure to initialize.
+ * @xop: Describes the XCOPY operation received from an initiator.
+ * @se_dev: Backend device to associate with @xpt_cmd if
+ * @remote_port == true.
+ * @cdb: SCSI CDB to be copied into @xpt_cmd.
+ * @remote_port: If false, use the LUN through which the XCOPY command has
+ * been received. If true, use @se_dev->xcopy_lun.
+ * @alloc_mem: Whether or not to allocate an SGL list.
+ *
+ * Set up a SCSI command (READ or WRITE) that will be used to execute an
+ * XCOPY command.
+ */
static int target_xcopy_setup_pt_cmd(
struct xcopy_pt_cmd *xpt_cmd,
struct xcopy_op *xop,
@@ -583,12 +530,19 @@ static int target_xcopy_setup_pt_cmd(
struct se_cmd *cmd = &xpt_cmd->se_cmd;
sense_reason_t sense_rc;
int ret = 0, rc;
+
/*
* Setup LUN+port to honor reservations based upon xop->op_origin for
* X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
*/
- target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
- target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
+ if (remote_port) {
+ cmd->se_lun = &se_dev->xcopy_lun;
+ cmd->se_dev = se_dev;
+ } else {
+ cmd->se_lun = xop->xop_se_cmd->se_lun;
+ cmd->se_dev = xop->xop_se_cmd->se_dev;
+ }
+ cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
cmd->tag = 0;
sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 0b9ab1d0dd45..49fd7312e2aa 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -273,7 +273,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
- rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
+ rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages);
if (rc > 0)
shm->num_pages = rc;
if (rc != num_pages) {
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 653aa27a25a4..66a709d5d6b9 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -200,6 +200,17 @@ config THERMAL_EMULATION
because userland can easily disable the thermal policy by simply
flooding this sysfs node with low temperature values.
+config THERMAL_MMIO
+ tristate "Generic Thermal MMIO driver"
+ depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This option enables the generic thermal MMIO driver that will use
+ memory-mapped reads to get the temperature. Any HW/System that
+ allows temperature reading by a single memory-mapped reading, be it
+ register or shared memory, is a potential candidate to work with this
+ driver.
+
config HISI_THERMAL
tristate "Hisilicon thermal driver"
depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 486d682be047..74a37c7f847a 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -29,6 +29,7 @@ thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o
# platform thermal drivers
obj-y += broadcom/
+obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
index 2284cbecedf3..475ce2900771 100644
--- a/drivers/thermal/broadcom/sr-thermal.c
+++ b/drivers/thermal/broadcom/sr-thermal.c
@@ -3,7 +3,6 @@
* Copyright (C) 2018 Broadcom
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -100,18 +99,11 @@ static const struct of_device_id sr_thermal_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sr_thermal_of_match);
-static const struct acpi_device_id sr_thermal_acpi_ids[] = {
- { .id = "BRCM0500" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(acpi, sr_thermal_acpi_ids);
-
static struct platform_driver sr_thermal_driver = {
.probe = sr_thermal_probe,
.driver = {
.name = "sr-thermal",
.of_match_table = sr_thermal_of_match,
- .acpi_match_table = ACPI_PTR(sr_thermal_acpi_ids),
},
};
module_platform_driver(sr_thermal_driver);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index f7c1f49ec87f..4c5db59a619b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -1,26 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/thermal/cpu_cooling.c
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
- * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*
- * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
+ * Copyright (C) 2012-2018 Linaro Limited.
*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * Authors: Amit Daniel <amit.kachhap@linaro.org>
+ * Viresh Kumar <viresh.kumar@linaro.org>
*
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/thermal.h>
@@ -99,7 +87,6 @@ struct cpufreq_cooling_device {
unsigned int clipped_freq;
unsigned int max_level;
struct freq_table *freq_table; /* In descending order */
- struct thermal_cooling_device *cdev;
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
@@ -207,8 +194,7 @@ static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
dev = get_cpu_device(cpu);
if (unlikely(!dev)) {
- dev_warn(&cpufreq_cdev->cdev->device,
- "No cpu device for cpu %d\n", cpu);
+ pr_warn("No cpu device for cpu %d\n", cpu);
return -ENODEV;
}
@@ -458,7 +444,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
load = 0;
total_load += load;
- if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
+ if (load_cpu)
load_cpu[i] = load;
i++;
@@ -541,7 +527,6 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
- power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
normalised_power = (power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
@@ -692,7 +677,6 @@ __cpufreq_cooling_register(struct device_node *np,
goto remove_ida;
cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
- cpufreq_cdev->cdev = cdev;
mutex_lock(&cooling_list_lock);
/* Register the notifier for first cpufreq cooling device */
@@ -810,7 +794,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- thermal_cooling_device_unregister(cpufreq_cdev->cdev);
+ thermal_cooling_device_unregister(cdev);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev->freq_table);
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index 45e7e5cbdffb..7c71ffb733a1 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -230,7 +230,7 @@ static void get_single_name(acpi_handle handle, char *name)
if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)))
pr_warn("Failed to get device name from acpi handle\n");
else {
- memcpy(name, buffer.pointer, ACPI_NAME_SIZE);
+ memcpy(name, buffer.pointer, ACPI_NAMESEG_SIZE);
kfree(buffer.pointer);
}
}
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 2df059cc07e2..dc5093be553e 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -5,6 +5,9 @@
* Copyright (C) 2013 Texas Instruments
* Copyright (C) 2013 Eduardo Valentin <eduardo.valentin@ti.com>
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/thermal.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 717a08600bb5..fc6fe50cdde4 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
-qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-v2.o
+
+qcom_tsens-y += tsens.o tsens-common.o tsens-v0_1.o \
+ tsens-8960.o tsens-v2.o tsens-v1.o
obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
deleted file mode 100644
index c6dd620ac029..000000000000
--- a/drivers/thermal/qcom/tsens-8916.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/platform_device.h>
-#include "tsens.h"
-
-/* eeprom layout data for 8916 */
-#define BASE0_MASK 0x0000007f
-#define BASE1_MASK 0xfe000000
-#define BASE0_SHIFT 0
-#define BASE1_SHIFT 25
-
-#define S0_P1_MASK 0x00000f80
-#define S1_P1_MASK 0x003e0000
-#define S2_P1_MASK 0xf8000000
-#define S3_P1_MASK 0x000003e0
-#define S4_P1_MASK 0x000f8000
-
-#define S0_P2_MASK 0x0001f000
-#define S1_P2_MASK 0x07c00000
-#define S2_P2_MASK 0x0000001f
-#define S3_P2_MASK 0x00007c00
-#define S4_P2_MASK 0x01f00000
-
-#define S0_P1_SHIFT 7
-#define S1_P1_SHIFT 17
-#define S2_P1_SHIFT 27
-#define S3_P1_SHIFT 5
-#define S4_P1_SHIFT 15
-
-#define S0_P2_SHIFT 12
-#define S1_P2_SHIFT 22
-#define S2_P2_SHIFT 0
-#define S3_P2_SHIFT 10
-#define S4_P2_SHIFT 20
-
-#define CAL_SEL_MASK 0xe0000000
-#define CAL_SEL_SHIFT 29
-
-static int calibrate_8916(struct tsens_device *tmdev)
-{
- int base0 = 0, base1 = 0, i;
- u32 p1[5], p2[5];
- int mode = 0;
- u32 *qfprom_cdata, *qfprom_csel;
-
- qfprom_cdata = (u32 *)qfprom_read(tmdev->dev, "calib");
- if (IS_ERR(qfprom_cdata))
- return PTR_ERR(qfprom_cdata);
-
- qfprom_csel = (u32 *)qfprom_read(tmdev->dev, "calib_sel");
- if (IS_ERR(qfprom_csel))
- return PTR_ERR(qfprom_csel);
-
- mode = (qfprom_csel[0] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
- dev_dbg(tmdev->dev, "calibration mode is %d\n", mode);
-
- switch (mode) {
- case TWO_PT_CALIB:
- base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT;
- p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
- p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
- p2[2] = (qfprom_cdata[1] & S2_P2_MASK) >> S2_P2_SHIFT;
- p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
- p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
- for (i = 0; i < tmdev->num_sensors; i++)
- p2[i] = ((base1 + p2[i]) << 3);
- /* Fall through */
- case ONE_PT_CALIB2:
- base0 = (qfprom_cdata[0] & BASE0_MASK);
- p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
- p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
- p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
- p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
- p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
- for (i = 0; i < tmdev->num_sensors; i++)
- p1[i] = (((base0) + p1[i]) << 3);
- break;
- default:
- for (i = 0; i < tmdev->num_sensors; i++) {
- p1[i] = 500;
- p2[i] = 780;
- }
- break;
- }
-
- compute_intercept_slope(tmdev, p1, p2, mode);
-
- return 0;
-}
-
-static const struct tsens_ops ops_8916 = {
- .init = init_common,
- .calibrate = calibrate_8916,
- .get_temp = get_temp_common,
-};
-
-const struct tsens_data data_8916 = {
- .num_sensors = 5,
- .ops = &ops_8916,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
- .hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
-};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index 0f0adb302a7b..8d9b721dadb6 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -56,21 +56,21 @@
#define TRDY_MASK BIT(7)
#define TIMEOUT_US 100
-static int suspend_8960(struct tsens_device *tmdev)
+static int suspend_8960(struct tsens_priv *priv)
{
int ret;
unsigned int mask;
- struct regmap *map = tmdev->tm_map;
+ struct regmap *map = priv->tm_map;
- ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
+ ret = regmap_read(map, THRESHOLD_ADDR, &priv->ctx.threshold);
if (ret)
return ret;
- ret = regmap_read(map, CNTL_ADDR, &tmdev->ctx.control);
+ ret = regmap_read(map, CNTL_ADDR, &priv->ctx.control);
if (ret)
return ret;
- if (tmdev->num_sensors > 1)
+ if (priv->num_sensors > 1)
mask = SLP_CLK_ENA | EN;
else
mask = SLP_CLK_ENA_8660 | EN;
@@ -82,10 +82,10 @@ static int suspend_8960(struct tsens_device *tmdev)
return 0;
}
-static int resume_8960(struct tsens_device *tmdev)
+static int resume_8960(struct tsens_priv *priv)
{
int ret;
- struct regmap *map = tmdev->tm_map;
+ struct regmap *map = priv->tm_map;
ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
if (ret)
@@ -95,80 +95,80 @@ static int resume_8960(struct tsens_device *tmdev)
* Separate CONFIG restore is not needed only for 8660 as
* config is part of CTRL Addr and its restored as such
*/
- if (tmdev->num_sensors > 1) {
+ if (priv->num_sensors > 1) {
ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG);
if (ret)
return ret;
}
- ret = regmap_write(map, THRESHOLD_ADDR, tmdev->ctx.threshold);
+ ret = regmap_write(map, THRESHOLD_ADDR, priv->ctx.threshold);
if (ret)
return ret;
- ret = regmap_write(map, CNTL_ADDR, tmdev->ctx.control);
+ ret = regmap_write(map, CNTL_ADDR, priv->ctx.control);
if (ret)
return ret;
return 0;
}
-static int enable_8960(struct tsens_device *tmdev, int id)
+static int enable_8960(struct tsens_priv *priv, int id)
{
int ret;
u32 reg, mask;
- ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg);
+ ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg);
if (ret)
return ret;
mask = BIT(id + SENSOR0_SHIFT);
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg | SW_RST);
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
- if (tmdev->num_sensors > 1)
+ if (priv->num_sensors > 1)
reg |= mask | SLP_CLK_ENA | EN;
else
reg |= mask | SLP_CLK_ENA_8660 | EN;
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg);
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg);
if (ret)
return ret;
return 0;
}
-static void disable_8960(struct tsens_device *tmdev)
+static void disable_8960(struct tsens_priv *priv)
{
int ret;
u32 reg_cntl;
u32 mask;
- mask = GENMASK(tmdev->num_sensors - 1, 0);
+ mask = GENMASK(priv->num_sensors - 1, 0);
mask <<= SENSOR0_SHIFT;
mask |= EN;
- ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg_cntl);
+ ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg_cntl);
if (ret)
return;
reg_cntl &= ~mask;
- if (tmdev->num_sensors > 1)
+ if (priv->num_sensors > 1)
reg_cntl &= ~SLP_CLK_ENA;
else
reg_cntl &= ~SLP_CLK_ENA_8660;
- regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
+ regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
}
-static int init_8960(struct tsens_device *tmdev)
+static int init_8960(struct tsens_priv *priv)
{
int ret, i;
u32 reg_cntl;
- tmdev->tm_map = dev_get_regmap(tmdev->dev, NULL);
- if (!tmdev->tm_map)
+ priv->tm_map = dev_get_regmap(priv->dev, NULL);
+ if (!priv->tm_map)
return -ENODEV;
/*
@@ -177,21 +177,21 @@ static int init_8960(struct tsens_device *tmdev)
* but the control registers stay in the same place, i.e
* directly after the first 5 status registers.
*/
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < priv->num_sensors; i++) {
if (i >= 5)
- tmdev->sensor[i].status = S0_STATUS_ADDR + 40;
- tmdev->sensor[i].status += i * 4;
+ priv->sensor[i].status = S0_STATUS_ADDR + 40;
+ priv->sensor[i].status += i * 4;
}
reg_cntl = SW_RST;
- ret = regmap_update_bits(tmdev->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
+ ret = regmap_update_bits(priv->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
if (ret)
return ret;
- if (tmdev->num_sensors > 1) {
+ if (priv->num_sensors > 1) {
reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
reg_cntl &= ~SW_RST;
- ret = regmap_update_bits(tmdev->tm_map, CONFIG_ADDR,
+ ret = regmap_update_bits(priv->tm_map, CONFIG_ADDR,
CONFIG_MASK, CONFIG);
} else {
reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
@@ -199,30 +199,30 @@ static int init_8960(struct tsens_device *tmdev)
reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
}
- reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
+ reg_cntl |= GENMASK(priv->num_sensors - 1, 0) << SENSOR0_SHIFT;
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
reg_cntl |= EN;
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
return 0;
}
-static int calibrate_8960(struct tsens_device *tmdev)
+static int calibrate_8960(struct tsens_priv *priv)
{
int i;
char *data;
- ssize_t num_read = tmdev->num_sensors;
- struct tsens_sensor *s = tmdev->sensor;
+ ssize_t num_read = priv->num_sensors;
+ struct tsens_sensor *s = priv->sensor;
- data = qfprom_read(tmdev->dev, "calib");
+ data = qfprom_read(priv->dev, "calib");
if (IS_ERR(data))
- data = qfprom_read(tmdev->dev, "calib_backup");
+ data = qfprom_read(priv->dev, "calib_backup");
if (IS_ERR(data))
return PTR_ERR(data);
@@ -243,21 +243,21 @@ static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
return adc_code * slope + offset;
}
-static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
+static int get_temp_8960(struct tsens_priv *priv, int id, int *temp)
{
int ret;
u32 code, trdy;
- const struct tsens_sensor *s = &tmdev->sensor[id];
+ const struct tsens_sensor *s = &priv->sensor[id];
unsigned long timeout;
timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
do {
- ret = regmap_read(tmdev->tm_map, INT_STATUS_ADDR, &trdy);
+ ret = regmap_read(priv->tm_map, INT_STATUS_ADDR, &trdy);
if (ret)
return ret;
if (!(trdy & TRDY_MASK))
continue;
- ret = regmap_read(tmdev->tm_map, s->status, &code);
+ ret = regmap_read(priv->tm_map, s->status, &code);
if (ret)
return ret;
*temp = code_to_mdegC(code, s);
@@ -277,7 +277,7 @@ static const struct tsens_ops ops_8960 = {
.resume = resume_8960,
};
-const struct tsens_data data_8960 = {
+const struct tsens_plat_data data_8960 = {
.num_sensors = 11,
.ops = &ops_8960,
};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index f80c73f11740..928e8e81ba69 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -12,18 +12,6 @@
#include <linux/regmap.h>
#include "tsens.h"
-/* SROT */
-#define TSENS_EN BIT(0)
-
-/* TM */
-#define STATUS_OFFSET 0x30
-#define SN_ADDR_OFFSET 0x4
-#define SN_ST_TEMP_MASK 0x3ff
-#define CAL_DEGC_PT1 30
-#define CAL_DEGC_PT2 120
-#define SLOPE_FACTOR 1000
-#define SLOPE_DEFAULT 3200
-
char *qfprom_read(struct device *dev, const char *cname)
{
struct nvmem_cell *cell;
@@ -46,18 +34,18 @@ char *qfprom_read(struct device *dev, const char *cname)
* and offset values are derived from tz->tzp->slope and tz->tzp->offset
* resp.
*/
-void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
+void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
u32 *p2, u32 mode)
{
int i;
int num, den;
- for (i = 0; i < tmdev->num_sensors; i++) {
- dev_dbg(tmdev->dev,
+ for (i = 0; i < priv->num_sensors; i++) {
+ dev_dbg(priv->dev,
"sensor%d - data_point1:%#x data_point2:%#x\n",
i, p1[i], p2[i]);
- tmdev->sensor[i].slope = SLOPE_DEFAULT;
+ priv->sensor[i].slope = SLOPE_DEFAULT;
if (mode == TWO_PT_CALIB) {
/*
* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
@@ -66,16 +54,30 @@ void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
num = p2[i] - p1[i];
num *= SLOPE_FACTOR;
den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
- tmdev->sensor[i].slope = num / den;
+ priv->sensor[i].slope = num / den;
}
- tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
+ priv->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
(CAL_DEGC_PT1 *
- tmdev->sensor[i].slope);
- dev_dbg(tmdev->dev, "offset:%d\n", tmdev->sensor[i].offset);
+ priv->sensor[i].slope);
+ dev_dbg(priv->dev, "offset:%d\n", priv->sensor[i].offset);
}
}
+bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id)
+{
+ u32 val;
+ int ret;
+
+ if ((hw_id > (priv->num_sensors - 1)) || (hw_id < 0))
+ return -EINVAL;
+ ret = regmap_field_read(priv->rf[SENSOR_EN], &val);
+ if (ret)
+ return ret;
+
+ return val & (1 << hw_id);
+}
+
static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
{
int degc, num, den;
@@ -95,18 +97,54 @@ static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
return degc;
}
-int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
+int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp)
{
- struct tsens_sensor *s = &tmdev->sensor[id];
- u32 code;
- unsigned int status_reg;
+ struct tsens_sensor *s = &priv->sensor[i];
+ u32 temp_idx = LAST_TEMP_0 + s->hw_id;
+ u32 valid_idx = VALID_0 + s->hw_id;
+ u32 last_temp = 0, valid, mask;
+ int ret;
+
+ ret = regmap_field_read(priv->rf[valid_idx], &valid);
+ if (ret)
+ return ret;
+ while (!valid) {
+ /* Valid bit is 0 for 6 AHB clock cycles.
+ * At 19.2MHz, 1 AHB clock is ~60ns.
+ * We should enter this loop very, very rarely.
+ */
+ ndelay(400);
+ ret = regmap_field_read(priv->rf[valid_idx], &valid);
+ if (ret)
+ return ret;
+ }
+
+ /* Valid bit is set, OK to read the temperature */
+ ret = regmap_field_read(priv->rf[temp_idx], &last_temp);
+ if (ret)
+ return ret;
+
+ if (priv->feat->adc) {
+ /* Convert temperature from ADC code to milliCelsius */
+ *temp = code_to_degc(last_temp, s) * 1000;
+ } else {
+ mask = GENMASK(priv->fields[LAST_TEMP_0].msb,
+ priv->fields[LAST_TEMP_0].lsb);
+ /* Convert temperature from deciCelsius to milliCelsius */
+ *temp = sign_extend32(last_temp, fls(mask) - 1) * 100;
+ }
+
+ return 0;
+}
+
+int get_temp_common(struct tsens_priv *priv, int i, int *temp)
+{
+ struct tsens_sensor *s = &priv->sensor[i];
int last_temp = 0, ret;
- status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * SN_ADDR_OFFSET;
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
+ ret = regmap_field_read(priv->rf[LAST_TEMP_0 + s->hw_id], &last_temp);
if (ret)
return ret;
- last_temp = code & SN_ST_TEMP_MASK;
*temp = code_to_degc(last_temp, s) * 1000;
@@ -127,21 +165,21 @@ static const struct regmap_config tsens_srot_config = {
.reg_stride = 4,
};
-int __init init_common(struct tsens_device *tmdev)
+int __init init_common(struct tsens_priv *priv)
{
void __iomem *tm_base, *srot_base;
+ struct device *dev = priv->dev;
struct resource *res;
- u32 code;
- int ret;
- struct platform_device *op = of_find_device_by_node(tmdev->dev->of_node);
- u16 ctrl_offset = tmdev->reg_offsets[SROT_CTRL_OFFSET];
+ u32 enabled;
+ int ret, i, j;
+ struct platform_device *op = of_find_device_by_node(priv->dev->of_node);
if (!op)
return -EINVAL;
if (op->num_resources > 1) {
/* DT with separate SROT and TM address space */
- tmdev->tm_offset = 0;
+ priv->tm_offset = 0;
res = platform_get_resource(op, IORESOURCE_MEM, 1);
srot_base = devm_ioremap_resource(&op->dev, res);
if (IS_ERR(srot_base)) {
@@ -149,16 +187,15 @@ int __init init_common(struct tsens_device *tmdev)
goto err_put_device;
}
- tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev, srot_base,
+ priv->srot_map = devm_regmap_init_mmio(dev, srot_base,
&tsens_srot_config);
- if (IS_ERR(tmdev->srot_map)) {
- ret = PTR_ERR(tmdev->srot_map);
+ if (IS_ERR(priv->srot_map)) {
+ ret = PTR_ERR(priv->srot_map);
goto err_put_device;
}
-
} else {
/* old DTs where SROT and TM were in a contiguous 2K block */
- tmdev->tm_offset = 0x1000;
+ priv->tm_offset = 0x1000;
}
res = platform_get_resource(op, IORESOURCE_MEM, 0);
@@ -168,19 +205,47 @@ int __init init_common(struct tsens_device *tmdev)
goto err_put_device;
}
- tmdev->tm_map = devm_regmap_init_mmio(tmdev->dev, tm_base, &tsens_config);
- if (IS_ERR(tmdev->tm_map)) {
- ret = PTR_ERR(tmdev->tm_map);
+ priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config);
+ if (IS_ERR(priv->tm_map)) {
+ ret = PTR_ERR(priv->tm_map);
goto err_put_device;
}
- if (tmdev->srot_map) {
- ret = regmap_read(tmdev->srot_map, ctrl_offset, &code);
- if (ret)
+ priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
+ priv->fields[TSENS_EN]);
+ if (IS_ERR(priv->rf[TSENS_EN])) {
+ ret = PTR_ERR(priv->rf[TSENS_EN]);
+ goto err_put_device;
+ }
+ ret = regmap_field_read(priv->rf[TSENS_EN], &enabled);
+ if (ret)
+ goto err_put_device;
+ if (!enabled) {
+ dev_err(dev, "tsens device is not enabled\n");
+ ret = -ENODEV;
+ goto err_put_device;
+ }
+
+ priv->rf[SENSOR_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
+ priv->fields[SENSOR_EN]);
+ if (IS_ERR(priv->rf[SENSOR_EN])) {
+ ret = PTR_ERR(priv->rf[SENSOR_EN]);
+ goto err_put_device;
+ }
+ /* now alloc regmap_fields in tm_map */
+ for (i = 0, j = LAST_TEMP_0; i < priv->feat->max_sensors; i++, j++) {
+ priv->rf[j] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[j]);
+ if (IS_ERR(priv->rf[j])) {
+ ret = PTR_ERR(priv->rf[j]);
goto err_put_device;
- if (!(code & TSENS_EN)) {
- dev_err(tmdev->dev, "tsens device is not enabled\n");
- ret = -ENODEV;
+ }
+ }
+ for (i = 0, j = VALID_0; i < priv->feat->max_sensors; i++, j++) {
+ priv->rf[j] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[j]);
+ if (IS_ERR(priv->rf[j])) {
+ ret = PTR_ERR(priv->rf[j]);
goto err_put_device;
}
}
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-v0_1.c
index 3d3fda3d731b..a319283c223f 100644
--- a/drivers/thermal/qcom/tsens-8974.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -6,6 +6,48 @@
#include <linux/platform_device.h>
#include "tsens.h"
+/* ----- SROT ------ */
+#define SROT_CTRL_OFF 0x0000
+
+/* ----- TM ------ */
+#define TM_INT_EN_OFF 0x0000
+#define TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF 0x0004
+#define TM_Sn_STATUS_OFF 0x0030
+#define TM_TRDY_OFF 0x005c
+
+/* eeprom layout data for 8916 */
+#define MSM8916_BASE0_MASK 0x0000007f
+#define MSM8916_BASE1_MASK 0xfe000000
+#define MSM8916_BASE0_SHIFT 0
+#define MSM8916_BASE1_SHIFT 25
+
+#define MSM8916_S0_P1_MASK 0x00000f80
+#define MSM8916_S1_P1_MASK 0x003e0000
+#define MSM8916_S2_P1_MASK 0xf8000000
+#define MSM8916_S3_P1_MASK 0x000003e0
+#define MSM8916_S4_P1_MASK 0x000f8000
+
+#define MSM8916_S0_P2_MASK 0x0001f000
+#define MSM8916_S1_P2_MASK 0x07c00000
+#define MSM8916_S2_P2_MASK 0x0000001f
+#define MSM8916_S3_P2_MASK 0x00007c00
+#define MSM8916_S4_P2_MASK 0x01f00000
+
+#define MSM8916_S0_P1_SHIFT 7
+#define MSM8916_S1_P1_SHIFT 17
+#define MSM8916_S2_P1_SHIFT 27
+#define MSM8916_S3_P1_SHIFT 5
+#define MSM8916_S4_P1_SHIFT 15
+
+#define MSM8916_S0_P2_SHIFT 12
+#define MSM8916_S1_P2_SHIFT 22
+#define MSM8916_S2_P2_SHIFT 0
+#define MSM8916_S3_P2_SHIFT 10
+#define MSM8916_S4_P2_SHIFT 20
+
+#define MSM8916_CAL_SEL_MASK 0xe0000000
+#define MSM8916_CAL_SEL_SHIFT 29
+
/* eeprom layout data for 8974 */
#define BASE1_MASK 0xff
#define S0_P1_MASK 0x3f00
@@ -91,7 +133,59 @@
#define BIT_APPEND 0x3
-static int calibrate_8974(struct tsens_device *tmdev)
+static int calibrate_8916(struct tsens_priv *priv)
+{
+ int base0 = 0, base1 = 0, i;
+ u32 p1[5], p2[5];
+ int mode = 0;
+ u32 *qfprom_cdata, *qfprom_csel;
+
+ qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ qfprom_csel = (u32 *)qfprom_read(priv->dev, "calib_sel");
+ if (IS_ERR(qfprom_csel))
+ return PTR_ERR(qfprom_csel);
+
+ mode = (qfprom_csel[0] & MSM8916_CAL_SEL_MASK) >> MSM8916_CAL_SEL_SHIFT;
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[1] & MSM8916_BASE1_MASK) >> MSM8916_BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & MSM8916_S0_P2_MASK) >> MSM8916_S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & MSM8916_S1_P2_MASK) >> MSM8916_S1_P2_SHIFT;
+ p2[2] = (qfprom_cdata[1] & MSM8916_S2_P2_MASK) >> MSM8916_S2_P2_SHIFT;
+ p2[3] = (qfprom_cdata[1] & MSM8916_S3_P2_MASK) >> MSM8916_S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[1] & MSM8916_S4_P2_MASK) >> MSM8916_S4_P2_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 3);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = (qfprom_cdata[0] & MSM8916_BASE0_MASK);
+ p1[0] = (qfprom_cdata[0] & MSM8916_S0_P1_MASK) >> MSM8916_S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & MSM8916_S1_P1_MASK) >> MSM8916_S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[0] & MSM8916_S2_P1_MASK) >> MSM8916_S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & MSM8916_S3_P1_MASK) >> MSM8916_S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[1] & MSM8916_S4_P1_MASK) >> MSM8916_S4_P1_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 3);
+ break;
+ default:
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(priv, p1, p2, mode);
+
+ return 0;
+}
+
+static int calibrate_8974(struct tsens_priv *priv)
{
int base1 = 0, base2 = 0, i;
u32 p1[11], p2[11];
@@ -99,11 +193,11 @@ static int calibrate_8974(struct tsens_device *tmdev)
u32 *calib, *bkp;
u32 calib_redun_sel;
- calib = (u32 *)qfprom_read(tmdev->dev, "calib");
+ calib = (u32 *)qfprom_read(priv->dev, "calib");
if (IS_ERR(calib))
return PTR_ERR(calib);
- bkp = (u32 *)qfprom_read(tmdev->dev, "calib_backup");
+ bkp = (u32 *)qfprom_read(priv->dev, "calib_backup");
if (IS_ERR(bkp))
return PTR_ERR(bkp);
@@ -184,25 +278,25 @@ static int calibrate_8974(struct tsens_device *tmdev)
switch (mode) {
case ONE_PT_CALIB:
- for (i = 0; i < tmdev->num_sensors; i++)
+ for (i = 0; i < priv->num_sensors; i++)
p1[i] += (base1 << 2) | BIT_APPEND;
break;
case TWO_PT_CALIB:
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < priv->num_sensors; i++) {
p2[i] += base2;
p2[i] <<= 2;
p2[i] |= BIT_APPEND;
}
/* Fall through */
case ONE_PT_CALIB2:
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < priv->num_sensors; i++) {
p1[i] += base1;
p1[i] <<= 2;
p1[i] |= BIT_APPEND;
}
break;
default:
- for (i = 0; i < tmdev->num_sensors; i++)
+ for (i = 0; i < priv->num_sensors; i++)
p2[i] = 780;
p1[0] = 502;
p1[1] = 509;
@@ -218,19 +312,71 @@ static int calibrate_8974(struct tsens_device *tmdev)
break;
}
- compute_intercept_slope(tmdev, p1, p2, mode);
+ compute_intercept_slope(priv, p1, p2, mode);
return 0;
}
+/* v0.1: 8916, 8974 */
+
+static const struct tsens_features tsens_v0_1_feat = {
+ .ver_major = VER_0_1,
+ .crit_int = 0,
+ .adc = 1,
+ .srot_split = 1,
+ .max_sensors = 11,
+};
+
+static const struct reg_field tsens_v0_1_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* No VERSION information */
+
+ /* CTRL_OFFSET */
+ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 13),
+
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0),
+
+ /* Sn_STATUS */
+ REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9),
+ /* No VALID field on v0.1 */
+ REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10),
+ REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11),
+ REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12),
+ /* No CRITICAL field on v0.1 */
+ REG_FIELD_FOR_EACH_SENSOR11(MAX_STATUS, TM_Sn_STATUS_OFF, 13, 13),
+
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+};
+
+static const struct tsens_ops ops_8916 = {
+ .init = init_common,
+ .calibrate = calibrate_8916,
+ .get_temp = get_temp_common,
+};
+
+const struct tsens_plat_data data_8916 = {
+ .num_sensors = 5,
+ .ops = &ops_8916,
+ .hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
+
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
+};
+
static const struct tsens_ops ops_8974 = {
.init = init_common,
.calibrate = calibrate_8974,
.get_temp = get_temp_common,
};
-const struct tsens_data data_8974 = {
+const struct tsens_plat_data data_8974 = {
.num_sensors = 11,
.ops = &ops_8974,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
};
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
new file mode 100644
index 000000000000..10b595d4f619
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include "tsens.h"
+
+/* ----- SROT ------ */
+#define SROT_HW_VER_OFF 0x0000
+#define SROT_CTRL_OFF 0x0004
+
+/* ----- TM ------ */
+#define TM_INT_EN_OFF 0x0000
+#define TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF 0x0004
+#define TM_Sn_STATUS_OFF 0x0044
+#define TM_TRDY_OFF 0x0084
+
+/* eeprom layout data for qcs404/405 (v1) */
+#define BASE0_MASK 0x000007f8
+#define BASE1_MASK 0x0007f800
+#define BASE0_SHIFT 3
+#define BASE1_SHIFT 11
+
+#define S0_P1_MASK 0x0000003f
+#define S1_P1_MASK 0x0003f000
+#define S2_P1_MASK 0x3f000000
+#define S3_P1_MASK 0x000003f0
+#define S4_P1_MASK 0x003f0000
+#define S5_P1_MASK 0x0000003f
+#define S6_P1_MASK 0x0003f000
+#define S7_P1_MASK 0x3f000000
+#define S8_P1_MASK 0x000003f0
+#define S9_P1_MASK 0x003f0000
+
+#define S0_P2_MASK 0x00000fc0
+#define S1_P2_MASK 0x00fc0000
+#define S2_P2_MASK_1_0 0xc0000000
+#define S2_P2_MASK_5_2 0x0000000f
+#define S3_P2_MASK 0x0000fc00
+#define S4_P2_MASK 0x0fc00000
+#define S5_P2_MASK 0x00000fc0
+#define S6_P2_MASK 0x00fc0000
+#define S7_P2_MASK_1_0 0xc0000000
+#define S7_P2_MASK_5_2 0x0000000f
+#define S8_P2_MASK 0x0000fc00
+#define S9_P2_MASK 0x0fc00000
+
+#define S0_P1_SHIFT 0
+#define S0_P2_SHIFT 6
+#define S1_P1_SHIFT 12
+#define S1_P2_SHIFT 18
+#define S2_P1_SHIFT 24
+#define S2_P2_SHIFT_1_0 30
+
+#define S2_P2_SHIFT_5_2 0
+#define S3_P1_SHIFT 4
+#define S3_P2_SHIFT 10
+#define S4_P1_SHIFT 16
+#define S4_P2_SHIFT 22
+
+#define S5_P1_SHIFT 0
+#define S5_P2_SHIFT 6
+#define S6_P1_SHIFT 12
+#define S6_P2_SHIFT 18
+#define S7_P1_SHIFT 24
+#define S7_P2_SHIFT_1_0 30
+
+#define S7_P2_SHIFT_5_2 0
+#define S8_P1_SHIFT 4
+#define S8_P2_SHIFT 10
+#define S9_P1_SHIFT 16
+#define S9_P2_SHIFT 22
+
+#define CAL_SEL_MASK 7
+#define CAL_SEL_SHIFT 0
+
+static int calibrate_v1(struct tsens_priv *priv)
+{
+ u32 base0 = 0, base1 = 0;
+ u32 p1[10], p2[10];
+ u32 mode = 0, lsb = 0, msb = 0;
+ u32 *qfprom_cdata;
+ int i;
+
+ qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ mode = (qfprom_cdata[4] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[4] & BASE1_MASK) >> BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
+ /* This value is split over two registers, 2 bits and 4 bits */
+ lsb = (qfprom_cdata[0] & S2_P2_MASK_1_0) >> S2_P2_SHIFT_1_0;
+ msb = (qfprom_cdata[1] & S2_P2_MASK_5_2) >> S2_P2_SHIFT_5_2;
+ p2[2] = msb << 2 | lsb;
+ p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
+ p2[5] = (qfprom_cdata[2] & S5_P2_MASK) >> S5_P2_SHIFT;
+ p2[6] = (qfprom_cdata[2] & S6_P2_MASK) >> S6_P2_SHIFT;
+ /* This value is split over two registers, 2 bits and 4 bits */
+ lsb = (qfprom_cdata[2] & S7_P2_MASK_1_0) >> S7_P2_SHIFT_1_0;
+ msb = (qfprom_cdata[3] & S7_P2_MASK_5_2) >> S7_P2_SHIFT_5_2;
+ p2[7] = msb << 2 | lsb;
+ p2[8] = (qfprom_cdata[3] & S8_P2_MASK) >> S8_P2_SHIFT;
+ p2[9] = (qfprom_cdata[3] & S9_P2_MASK) >> S9_P2_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 2);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = (qfprom_cdata[4] & BASE0_MASK) >> BASE0_SHIFT;
+ p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
+ p1[5] = (qfprom_cdata[2] & S5_P1_MASK) >> S5_P1_SHIFT;
+ p1[6] = (qfprom_cdata[2] & S6_P1_MASK) >> S6_P1_SHIFT;
+ p1[7] = (qfprom_cdata[2] & S7_P1_MASK) >> S7_P1_SHIFT;
+ p1[8] = (qfprom_cdata[3] & S8_P1_MASK) >> S8_P1_SHIFT;
+ p1[9] = (qfprom_cdata[3] & S9_P1_MASK) >> S9_P1_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 2);
+ break;
+ default:
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(priv, p1, p2, mode);
+
+ return 0;
+}
+
+/* v1.x: qcs404,405 */
+
+static const struct tsens_features tsens_v1_feat = {
+ .ver_major = VER_1_X,
+ .crit_int = 0,
+ .adc = 1,
+ .srot_split = 1,
+ .max_sensors = 11,
+};
+
+static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* VERSION */
+ [VER_MAJOR] = REG_FIELD(SROT_HW_VER_OFF, 28, 31),
+ [VER_MINOR] = REG_FIELD(SROT_HW_VER_OFF, 16, 27),
+ [VER_STEP] = REG_FIELD(SROT_HW_VER_OFF, 0, 15),
+ /* CTRL_OFFSET */
+ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 13),
+
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0),
+
+ /* Sn_STATUS */
+ REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9),
+ REG_FIELD_FOR_EACH_SENSOR11(VALID, TM_Sn_STATUS_OFF, 14, 14),
+ REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10),
+ REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11),
+ REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12),
+ /* No CRITICAL field on v1.x */
+ REG_FIELD_FOR_EACH_SENSOR11(MAX_STATUS, TM_Sn_STATUS_OFF, 13, 13),
+
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+};
+
+static const struct tsens_ops ops_generic_v1 = {
+ .init = init_common,
+ .calibrate = calibrate_v1,
+ .get_temp = get_temp_tsens_valid,
+};
+
+const struct tsens_plat_data data_tsens_v1 = {
+ .ops = &ops_generic_v1,
+ .feat = &tsens_v1_feat,
+ .fields = tsens_v1_regfields,
+};
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 381a212872bf..1099069f2aa3 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -4,76 +4,81 @@
* Copyright (c) 2018, Linaro Limited
*/
-#include <linux/regmap.h>
#include <linux/bitops.h>
+#include <linux/regmap.h>
#include "tsens.h"
-#define STATUS_OFFSET 0xa0
-#define LAST_TEMP_MASK 0xfff
-#define STATUS_VALID_BIT BIT(21)
+/* ----- SROT ------ */
+#define SROT_HW_VER_OFF 0x0000
+#define SROT_CTRL_OFF 0x0004
+
+/* ----- TM ------ */
+#define TM_INT_EN_OFF 0x0004
+#define TM_UPPER_LOWER_INT_STATUS_OFF 0x0008
+#define TM_UPPER_LOWER_INT_CLEAR_OFF 0x000c
+#define TM_UPPER_LOWER_INT_MASK_OFF 0x0010
+#define TM_CRITICAL_INT_STATUS_OFF 0x0014
+#define TM_CRITICAL_INT_CLEAR_OFF 0x0018
+#define TM_CRITICAL_INT_MASK_OFF 0x001c
+#define TM_Sn_UPPER_LOWER_THRESHOLD_OFF 0x0020
+#define TM_Sn_CRITICAL_THRESHOLD_OFF 0x0060
+#define TM_Sn_STATUS_OFF 0x00a0
+#define TM_TRDY_OFF 0x00e4
-static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
-{
- struct tsens_sensor *s = &tmdev->sensor[id];
- u32 code;
- unsigned int status_reg;
- u32 last_temp = 0, last_temp2 = 0, last_temp3 = 0;
- int ret;
+/* v2.x: 8996, 8998, sdm845 */
- status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * 4;
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
- if (ret)
- return ret;
- last_temp = code & LAST_TEMP_MASK;
- if (code & STATUS_VALID_BIT)
- goto done;
+static const struct tsens_features tsens_v2_feat = {
+ .ver_major = VER_2_X,
+ .crit_int = 1,
+ .adc = 0,
+ .srot_split = 1,
+ .max_sensors = 16,
+};
- /* Try a second time */
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp2 = code & LAST_TEMP_MASK;
- }
+static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* VERSION */
+ [VER_MAJOR] = REG_FIELD(SROT_HW_VER_OFF, 28, 31),
+ [VER_MINOR] = REG_FIELD(SROT_HW_VER_OFF, 16, 27),
+ [VER_STEP] = REG_FIELD(SROT_HW_VER_OFF, 0, 15),
+ /* CTRL_OFF */
+ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 18),
- /* Try a third/last time */
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp3 = code & LAST_TEMP_MASK;
- }
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ /* v2 has separate enables for UPPER/LOWER/CRITICAL interrupts */
+ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 2),
- if (last_temp == last_temp2)
- last_temp = last_temp2;
- else if (last_temp2 == last_temp3)
- last_temp = last_temp3;
-done:
- /* Convert temperature from deciCelsius to milliCelsius */
- *temp = sign_extend32(last_temp, fls(LAST_TEMP_MASK) - 1) * 100;
+ /* Sn_STATUS */
+ REG_FIELD_FOR_EACH_SENSOR16(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 11),
+ REG_FIELD_FOR_EACH_SENSOR16(VALID, TM_Sn_STATUS_OFF, 21, 21),
+ REG_FIELD_FOR_EACH_SENSOR16(MIN_STATUS, TM_Sn_STATUS_OFF, 16, 16),
+ REG_FIELD_FOR_EACH_SENSOR16(LOWER_STATUS, TM_Sn_STATUS_OFF, 17, 17),
+ REG_FIELD_FOR_EACH_SENSOR16(UPPER_STATUS, TM_Sn_STATUS_OFF, 18, 18),
+ REG_FIELD_FOR_EACH_SENSOR16(CRITICAL_STATUS, TM_Sn_STATUS_OFF, 19, 19),
+ REG_FIELD_FOR_EACH_SENSOR16(MAX_STATUS, TM_Sn_STATUS_OFF, 20, 20),
- return 0;
-}
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+};
static const struct tsens_ops ops_generic_v2 = {
.init = init_common,
- .get_temp = get_temp_tsens_v2,
+ .get_temp = get_temp_tsens_valid,
};
-const struct tsens_data data_tsens_v2 = {
- .ops = &ops_generic_v2,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
+const struct tsens_plat_data data_tsens_v2 = {
+ .ops = &ops_generic_v2,
+ .feat = &tsens_v2_feat,
+ .fields = tsens_v2_regfields,
};
/* Kept around for backward compatibility with old msm8996.dtsi */
-const struct tsens_data data_8996 = {
+const struct tsens_plat_data data_8996 = {
.num_sensors = 13,
.ops = &ops_generic_v2,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
+ .feat = &tsens_v2_feat,
+ .fields = tsens_v2_regfields,
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index f1ec9bbe4717..36b0b52db524 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -15,38 +15,38 @@
static int tsens_get_temp(void *data, int *temp)
{
const struct tsens_sensor *s = data;
- struct tsens_device *tmdev = s->tmdev;
+ struct tsens_priv *priv = s->priv;
- return tmdev->ops->get_temp(tmdev, s->id, temp);
+ return priv->ops->get_temp(priv, s->id, temp);
}
-static int tsens_get_trend(void *p, int trip, enum thermal_trend *trend)
+static int tsens_get_trend(void *data, int trip, enum thermal_trend *trend)
{
- const struct tsens_sensor *s = p;
- struct tsens_device *tmdev = s->tmdev;
+ const struct tsens_sensor *s = data;
+ struct tsens_priv *priv = s->priv;
- if (tmdev->ops->get_trend)
- return tmdev->ops->get_trend(tmdev, s->id, trend);
+ if (priv->ops->get_trend)
+ return priv->ops->get_trend(priv, s->id, trend);
return -ENOTSUPP;
}
static int __maybe_unused tsens_suspend(struct device *dev)
{
- struct tsens_device *tmdev = dev_get_drvdata(dev);
+ struct tsens_priv *priv = dev_get_drvdata(dev);
- if (tmdev->ops && tmdev->ops->suspend)
- return tmdev->ops->suspend(tmdev);
+ if (priv->ops && priv->ops->suspend)
+ return priv->ops->suspend(priv);
return 0;
}
static int __maybe_unused tsens_resume(struct device *dev)
{
- struct tsens_device *tmdev = dev_get_drvdata(dev);
+ struct tsens_priv *priv = dev_get_drvdata(dev);
- if (tmdev->ops && tmdev->ops->resume)
- return tmdev->ops->resume(tmdev);
+ if (priv->ops && priv->ops->resume)
+ return priv->ops->resume(priv);
return 0;
}
@@ -64,6 +64,9 @@ static const struct of_device_id tsens_table[] = {
.compatible = "qcom,msm8996-tsens",
.data = &data_8996,
}, {
+ .compatible = "qcom,tsens-v1",
+ .data = &data_tsens_v1,
+ }, {
.compatible = "qcom,tsens-v2",
.data = &data_tsens_v2,
},
@@ -76,22 +79,27 @@ static const struct thermal_zone_of_device_ops tsens_of_ops = {
.get_trend = tsens_get_trend,
};
-static int tsens_register(struct tsens_device *tmdev)
+static int tsens_register(struct tsens_priv *priv)
{
int i;
struct thermal_zone_device *tzd;
- for (i = 0; i < tmdev->num_sensors; i++) {
- tmdev->sensor[i].tmdev = tmdev;
- tmdev->sensor[i].id = i;
- tzd = devm_thermal_zone_of_sensor_register(tmdev->dev, i,
- &tmdev->sensor[i],
+ for (i = 0; i < priv->num_sensors; i++) {
+ if (!is_sensor_enabled(priv, priv->sensor[i].hw_id)) {
+ dev_err(priv->dev, "sensor %d: disabled\n",
+ priv->sensor[i].hw_id);
+ continue;
+ }
+ priv->sensor[i].priv = priv;
+ priv->sensor[i].id = i;
+ tzd = devm_thermal_zone_of_sensor_register(priv->dev, i,
+ &priv->sensor[i],
&tsens_of_ops);
if (IS_ERR(tzd))
continue;
- tmdev->sensor[i].tzd = tzd;
- if (tmdev->ops->enable)
- tmdev->ops->enable(tmdev, i);
+ priv->sensor[i].tzd = tzd;
+ if (priv->ops->enable)
+ priv->ops->enable(priv, i);
}
return 0;
}
@@ -101,8 +109,8 @@ static int tsens_probe(struct platform_device *pdev)
int ret, i;
struct device *dev;
struct device_node *np;
- struct tsens_device *tmdev;
- const struct tsens_data *data;
+ struct tsens_priv *priv;
+ const struct tsens_plat_data *data;
const struct of_device_id *id;
u32 num_sensors;
@@ -129,55 +137,55 @@ static int tsens_probe(struct platform_device *pdev)
return -EINVAL;
}
- tmdev = devm_kzalloc(dev,
- struct_size(tmdev, sensor, num_sensors),
+ priv = devm_kzalloc(dev,
+ struct_size(priv, sensor, num_sensors),
GFP_KERNEL);
- if (!tmdev)
+ if (!priv)
return -ENOMEM;
- tmdev->dev = dev;
- tmdev->num_sensors = num_sensors;
- tmdev->ops = data->ops;
- for (i = 0; i < tmdev->num_sensors; i++) {
+ priv->dev = dev;
+ priv->num_sensors = num_sensors;
+ priv->ops = data->ops;
+ for (i = 0; i < priv->num_sensors; i++) {
if (data->hw_ids)
- tmdev->sensor[i].hw_id = data->hw_ids[i];
+ priv->sensor[i].hw_id = data->hw_ids[i];
else
- tmdev->sensor[i].hw_id = i;
- }
- for (i = 0; i < REG_ARRAY_SIZE; i++) {
- tmdev->reg_offsets[i] = data->reg_offsets[i];
+ priv->sensor[i].hw_id = i;
}
+ priv->feat = data->feat;
+ priv->fields = data->fields;
- if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp)
+ if (!priv->ops || !priv->ops->init || !priv->ops->get_temp)
return -EINVAL;
- ret = tmdev->ops->init(tmdev);
+ ret = priv->ops->init(priv);
if (ret < 0) {
dev_err(dev, "tsens init failed\n");
return ret;
}
- if (tmdev->ops->calibrate) {
- ret = tmdev->ops->calibrate(tmdev);
+ if (priv->ops->calibrate) {
+ ret = priv->ops->calibrate(priv);
if (ret < 0) {
- dev_err(dev, "tsens calibration failed\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "tsens calibration failed\n");
return ret;
}
}
- ret = tsens_register(tmdev);
+ ret = tsens_register(priv);
- platform_set_drvdata(pdev, tmdev);
+ platform_set_drvdata(pdev, priv);
return ret;
}
static int tsens_remove(struct platform_device *pdev)
{
- struct tsens_device *tmdev = platform_get_drvdata(pdev);
+ struct tsens_priv *priv = platform_get_drvdata(pdev);
- if (tmdev->ops->disable)
- tmdev->ops->disable(tmdev);
+ if (priv->ops->disable)
+ priv->ops->disable(priv);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 7b7feee5dc46..eefe3844fb4e 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -9,17 +9,39 @@
#define ONE_PT_CALIB 0x1
#define ONE_PT_CALIB2 0x2
#define TWO_PT_CALIB 0x3
+#define CAL_DEGC_PT1 30
+#define CAL_DEGC_PT2 120
+#define SLOPE_FACTOR 1000
+#define SLOPE_DEFAULT 3200
+
#include <linux/thermal.h>
+#include <linux/regmap.h>
+
+struct tsens_priv;
-struct tsens_device;
+enum tsens_ver {
+ VER_0_1 = 0,
+ VER_1_X,
+ VER_2_X,
+};
+/**
+ * struct tsens_sensor - data for each sensor connected to the tsens device
+ * @priv: tsens device instance that this sensor is connected to
+ * @tzd: pointer to the thermal zone that this sensor is in
+ * @offset: offset of temperature adjustment curve
+ * @id: Sensor ID
+ * @hw_id: HW ID can be used in case of platform-specific IDs
+ * @slope: slope of temperature adjustment curve
+ * @status: 8960-specific variable to track 8960 and 8660 status register offset
+ */
struct tsens_sensor {
- struct tsens_device *tmdev;
+ struct tsens_priv *priv;
struct thermal_zone_device *tzd;
int offset;
- int id;
- int hw_id;
+ unsigned int id;
+ unsigned int hw_id;
int slope;
u32 status;
};
@@ -37,63 +59,274 @@ struct tsens_sensor {
*/
struct tsens_ops {
/* mandatory callbacks */
- int (*init)(struct tsens_device *);
- int (*calibrate)(struct tsens_device *);
- int (*get_temp)(struct tsens_device *, int, int *);
+ int (*init)(struct tsens_priv *priv);
+ int (*calibrate)(struct tsens_priv *priv);
+ int (*get_temp)(struct tsens_priv *priv, int i, int *temp);
/* optional callbacks */
- int (*enable)(struct tsens_device *, int);
- void (*disable)(struct tsens_device *);
- int (*suspend)(struct tsens_device *);
- int (*resume)(struct tsens_device *);
- int (*get_trend)(struct tsens_device *, int, enum thermal_trend *);
+ int (*enable)(struct tsens_priv *priv, int i);
+ void (*disable)(struct tsens_priv *priv);
+ int (*suspend)(struct tsens_priv *priv);
+ int (*resume)(struct tsens_priv *priv);
+ int (*get_trend)(struct tsens_priv *priv, int i, enum thermal_trend *trend);
};
-enum reg_list {
- SROT_CTRL_OFFSET,
+#define REG_FIELD_FOR_EACH_SENSOR11(_name, _offset, _startbit, _stopbit) \
+ [_name##_##0] = REG_FIELD(_offset, _startbit, _stopbit), \
+ [_name##_##1] = REG_FIELD(_offset + 4, _startbit, _stopbit), \
+ [_name##_##2] = REG_FIELD(_offset + 8, _startbit, _stopbit), \
+ [_name##_##3] = REG_FIELD(_offset + 12, _startbit, _stopbit), \
+ [_name##_##4] = REG_FIELD(_offset + 16, _startbit, _stopbit), \
+ [_name##_##5] = REG_FIELD(_offset + 20, _startbit, _stopbit), \
+ [_name##_##6] = REG_FIELD(_offset + 24, _startbit, _stopbit), \
+ [_name##_##7] = REG_FIELD(_offset + 28, _startbit, _stopbit), \
+ [_name##_##8] = REG_FIELD(_offset + 32, _startbit, _stopbit), \
+ [_name##_##9] = REG_FIELD(_offset + 36, _startbit, _stopbit), \
+ [_name##_##10] = REG_FIELD(_offset + 40, _startbit, _stopbit)
- REG_ARRAY_SIZE,
+#define REG_FIELD_FOR_EACH_SENSOR16(_name, _offset, _startbit, _stopbit) \
+ [_name##_##0] = REG_FIELD(_offset, _startbit, _stopbit), \
+ [_name##_##1] = REG_FIELD(_offset + 4, _startbit, _stopbit), \
+ [_name##_##2] = REG_FIELD(_offset + 8, _startbit, _stopbit), \
+ [_name##_##3] = REG_FIELD(_offset + 12, _startbit, _stopbit), \
+ [_name##_##4] = REG_FIELD(_offset + 16, _startbit, _stopbit), \
+ [_name##_##5] = REG_FIELD(_offset + 20, _startbit, _stopbit), \
+ [_name##_##6] = REG_FIELD(_offset + 24, _startbit, _stopbit), \
+ [_name##_##7] = REG_FIELD(_offset + 28, _startbit, _stopbit), \
+ [_name##_##8] = REG_FIELD(_offset + 32, _startbit, _stopbit), \
+ [_name##_##9] = REG_FIELD(_offset + 36, _startbit, _stopbit), \
+ [_name##_##10] = REG_FIELD(_offset + 40, _startbit, _stopbit), \
+ [_name##_##11] = REG_FIELD(_offset + 44, _startbit, _stopbit), \
+ [_name##_##12] = REG_FIELD(_offset + 48, _startbit, _stopbit), \
+ [_name##_##13] = REG_FIELD(_offset + 52, _startbit, _stopbit), \
+ [_name##_##14] = REG_FIELD(_offset + 56, _startbit, _stopbit), \
+ [_name##_##15] = REG_FIELD(_offset + 60, _startbit, _stopbit)
+
+/* reg_field IDs to use as an index into an array */
+enum regfield_ids {
+ /* ----- SROT ------ */
+ /* HW_VER */
+ VER_MAJOR = 0,
+ VER_MINOR,
+ VER_STEP,
+ /* CTRL_OFFSET */
+ TSENS_EN = 3,
+ TSENS_SW_RST,
+ SENSOR_EN,
+ CODE_OR_TEMP,
+
+ /* ----- TM ------ */
+ /* STATUS */
+ LAST_TEMP_0 = 7, /* Last temperature reading */
+ LAST_TEMP_1,
+ LAST_TEMP_2,
+ LAST_TEMP_3,
+ LAST_TEMP_4,
+ LAST_TEMP_5,
+ LAST_TEMP_6,
+ LAST_TEMP_7,
+ LAST_TEMP_8,
+ LAST_TEMP_9,
+ LAST_TEMP_10,
+ LAST_TEMP_11,
+ LAST_TEMP_12,
+ LAST_TEMP_13,
+ LAST_TEMP_14,
+ LAST_TEMP_15,
+ VALID_0 = 23, /* VALID reading or not */
+ VALID_1,
+ VALID_2,
+ VALID_3,
+ VALID_4,
+ VALID_5,
+ VALID_6,
+ VALID_7,
+ VALID_8,
+ VALID_9,
+ VALID_10,
+ VALID_11,
+ VALID_12,
+ VALID_13,
+ VALID_14,
+ VALID_15,
+ MIN_STATUS_0, /* MIN threshold violated */
+ MIN_STATUS_1,
+ MIN_STATUS_2,
+ MIN_STATUS_3,
+ MIN_STATUS_4,
+ MIN_STATUS_5,
+ MIN_STATUS_6,
+ MIN_STATUS_7,
+ MIN_STATUS_8,
+ MIN_STATUS_9,
+ MIN_STATUS_10,
+ MIN_STATUS_11,
+ MIN_STATUS_12,
+ MIN_STATUS_13,
+ MIN_STATUS_14,
+ MIN_STATUS_15,
+ MAX_STATUS_0, /* MAX threshold violated */
+ MAX_STATUS_1,
+ MAX_STATUS_2,
+ MAX_STATUS_3,
+ MAX_STATUS_4,
+ MAX_STATUS_5,
+ MAX_STATUS_6,
+ MAX_STATUS_7,
+ MAX_STATUS_8,
+ MAX_STATUS_9,
+ MAX_STATUS_10,
+ MAX_STATUS_11,
+ MAX_STATUS_12,
+ MAX_STATUS_13,
+ MAX_STATUS_14,
+ MAX_STATUS_15,
+ LOWER_STATUS_0, /* LOWER threshold violated */
+ LOWER_STATUS_1,
+ LOWER_STATUS_2,
+ LOWER_STATUS_3,
+ LOWER_STATUS_4,
+ LOWER_STATUS_5,
+ LOWER_STATUS_6,
+ LOWER_STATUS_7,
+ LOWER_STATUS_8,
+ LOWER_STATUS_9,
+ LOWER_STATUS_10,
+ LOWER_STATUS_11,
+ LOWER_STATUS_12,
+ LOWER_STATUS_13,
+ LOWER_STATUS_14,
+ LOWER_STATUS_15,
+ UPPER_STATUS_0, /* UPPER threshold violated */
+ UPPER_STATUS_1,
+ UPPER_STATUS_2,
+ UPPER_STATUS_3,
+ UPPER_STATUS_4,
+ UPPER_STATUS_5,
+ UPPER_STATUS_6,
+ UPPER_STATUS_7,
+ UPPER_STATUS_8,
+ UPPER_STATUS_9,
+ UPPER_STATUS_10,
+ UPPER_STATUS_11,
+ UPPER_STATUS_12,
+ UPPER_STATUS_13,
+ UPPER_STATUS_14,
+ UPPER_STATUS_15,
+ CRITICAL_STATUS_0, /* CRITICAL threshold violated */
+ CRITICAL_STATUS_1,
+ CRITICAL_STATUS_2,
+ CRITICAL_STATUS_3,
+ CRITICAL_STATUS_4,
+ CRITICAL_STATUS_5,
+ CRITICAL_STATUS_6,
+ CRITICAL_STATUS_7,
+ CRITICAL_STATUS_8,
+ CRITICAL_STATUS_9,
+ CRITICAL_STATUS_10,
+ CRITICAL_STATUS_11,
+ CRITICAL_STATUS_12,
+ CRITICAL_STATUS_13,
+ CRITICAL_STATUS_14,
+ CRITICAL_STATUS_15,
+ /* TRDY */
+ TRDY,
+ /* INTERRUPT ENABLE */
+ INT_EN, /* Pre-V1, V1.x */
+ LOW_INT_EN, /* V2.x */
+ UP_INT_EN, /* V2.x */
+ CRIT_INT_EN, /* V2.x */
+
+ /* Keep last */
+ MAX_REGFIELDS
};
/**
- * struct tsens_data - tsens instance specific data
- * @num_sensors: Max number of sensors supported by platform
+ * struct tsens_features - Features supported by the IP
+ * @ver_major: Major number of IP version
+ * @crit_int: does the IP support critical interrupts?
+ * @adc: do the sensors only output adc code (instead of temperature)?
+ * @srot_split: does the IP neatly splits the register space into SROT and TM,
+ * with SROT only being available to secure boot firmware?
+ * @max_sensors: maximum sensors supported by this version of the IP
+ */
+struct tsens_features {
+ unsigned int ver_major;
+ unsigned int crit_int:1;
+ unsigned int adc:1;
+ unsigned int srot_split:1;
+ unsigned int max_sensors;
+};
+
+/**
+ * struct tsens_plat_data - tsens compile-time platform data
+ * @num_sensors: Number of sensors supported by platform
* @ops: operations the tsens instance supports
* @hw_ids: Subset of sensors ids supported by platform, if not the first n
- * @reg_offsets: Register offsets for commonly used registers
+ * @feat: features of the IP
+ * @fields: bitfield locations
*/
-struct tsens_data {
+struct tsens_plat_data {
const u32 num_sensors;
const struct tsens_ops *ops;
- const u16 reg_offsets[REG_ARRAY_SIZE];
unsigned int *hw_ids;
+ const struct tsens_features *feat;
+ const struct reg_field *fields;
};
-/* Registers to be saved/restored across a context loss */
+/**
+ * struct tsens_context - Registers to be saved/restored across a context loss
+ */
struct tsens_context {
int threshold;
int control;
};
-struct tsens_device {
+/**
+ * struct tsens_priv - private data for each instance of the tsens IP
+ * @dev: pointer to struct device
+ * @num_sensors: number of sensors enabled on this device
+ * @tm_map: pointer to TM register address space
+ * @srot_map: pointer to SROT register address space
+ * @tm_offset: deal with old device trees that don't address TM and SROT
+ * address space separately
+ * @rf: array of regmap_fields used to store value of the field
+ * @ctx: registers to be saved and restored during suspend/resume
+ * @feat: features of the IP
+ * @fields: bitfield locations
+ * @ops: pointer to list of callbacks supported by this device
+ * @sensor: list of sensors attached to this device
+ */
+struct tsens_priv {
struct device *dev;
u32 num_sensors;
struct regmap *tm_map;
struct regmap *srot_map;
u32 tm_offset;
- u16 reg_offsets[REG_ARRAY_SIZE];
+ struct regmap_field *rf[MAX_REGFIELDS];
struct tsens_context ctx;
+ const struct tsens_features *feat;
+ const struct reg_field *fields;
const struct tsens_ops *ops;
struct tsens_sensor sensor[0];
};
-char *qfprom_read(struct device *, const char *);
-void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32);
-int init_common(struct tsens_device *);
-int get_temp_common(struct tsens_device *, int, int *);
+char *qfprom_read(struct device *dev, const char *cname);
+void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mode);
+int init_common(struct tsens_priv *priv);
+int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp);
+int get_temp_common(struct tsens_priv *priv, int i, int *temp);
+bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id);
+
+/* TSENS target */
+extern const struct tsens_plat_data data_8960;
+
+/* TSENS v0.1 targets */
+extern const struct tsens_plat_data data_8916, data_8974;
/* TSENS v1 targets */
-extern const struct tsens_data data_8916, data_8974, data_8960;
+extern const struct tsens_plat_data data_tsens_v1;
+
/* TSENS v2 targets */
-extern const struct tsens_data data_8996, data_tsens_v2;
+extern const struct tsens_plat_data data_8996, data_tsens_v2;
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 3b5f5b3fb1bc..7b364933bfb1 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -193,11 +193,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
struct qoriq_tmu_data *data;
struct device_node *np = pdev->dev.of_node;
- if (!np) {
- dev_err(&pdev->dev, "Device OF-Node is NULL");
- return -ENODEV;
- }
-
data = devm_kzalloc(&pdev->dev, sizeof(struct qoriq_tmu_data),
GFP_KERNEL);
if (!data)
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 88fa41cf16e8..83f306265ee1 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -14,7 +14,6 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/thermal.h>
@@ -82,7 +81,6 @@ struct rcar_gen3_thermal_tsc {
struct rcar_gen3_thermal_priv {
struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM];
unsigned int num_tscs;
- spinlock_t lock; /* Protect interrupts on and off */
void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc);
};
@@ -232,38 +230,16 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
{
struct rcar_gen3_thermal_priv *priv = data;
u32 status;
- int i, ret = IRQ_HANDLED;
+ int i;
- spin_lock(&priv->lock);
for (i = 0; i < priv->num_tscs; i++) {
status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
if (status)
- ret = IRQ_WAKE_THREAD;
+ thermal_zone_device_update(priv->tscs[i]->zone,
+ THERMAL_EVENT_UNSPECIFIED);
}
- if (ret == IRQ_WAKE_THREAD)
- rcar_thermal_irq_set(priv, false);
-
- spin_unlock(&priv->lock);
-
- return ret;
-}
-
-static irqreturn_t rcar_gen3_thermal_irq_thread(int irq, void *data)
-{
- struct rcar_gen3_thermal_priv *priv = data;
- unsigned long flags;
- int i;
-
- for (i = 0; i < priv->num_tscs; i++)
- thermal_zone_device_update(priv->tscs[i]->zone,
- THERMAL_EVENT_UNSPECIFIED);
-
- spin_lock_irqsave(&priv->lock, flags);
- rcar_thermal_irq_set(priv, true);
- spin_unlock_irqrestore(&priv->lock, flags);
-
return IRQ_HANDLED;
}
@@ -307,7 +283,7 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
usleep_range(1000, 2000);
- rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F);
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN, IRQ_TEMPD1 | IRQ_TEMP2);
@@ -331,6 +307,9 @@ MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
static int rcar_gen3_thermal_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev);
+
+ rcar_thermal_irq_set(priv, false);
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -371,8 +350,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (soc_device_match(r8a7795es1))
priv->thermal_init = rcar_gen3_thermal_init_r8a7795es1;
- spin_lock_init(&priv->lock);
-
platform_set_drvdata(pdev, priv);
/*
@@ -390,9 +367,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (!irqname)
return -ENOMEM;
- ret = devm_request_threaded_irq(dev, irq, rcar_gen3_thermal_irq,
- rcar_gen3_thermal_irq_thread,
- IRQF_SHARED, irqname, priv);
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rcar_gen3_thermal_irq,
+ IRQF_ONESHOT, irqname, priv);
if (ret)
return ret;
}
@@ -433,10 +410,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
}
tsc->zone = zone;
- ret = of_thermal_get_ntrips(tsc->zone);
- if (ret < 0)
- goto error_unregister;
-
tsc->zone->tzp->no_hwmon = false;
ret = thermal_add_hwmon_sysfs(tsc->zone);
if (ret)
@@ -448,6 +421,10 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
goto error_unregister;
}
+ ret = of_thermal_get_ntrips(tsc->zone);
+ if (ret < 0)
+ goto error_unregister;
+
dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret);
}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 97462e9b40d8..d0873de718da 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -52,6 +52,7 @@ struct rcar_thermal_chip {
unsigned int irq_per_ch : 1;
unsigned int needs_suspend_resume : 1;
unsigned int nirqs;
+ unsigned int ctemp_bands;
};
static const struct rcar_thermal_chip rcar_thermal = {
@@ -60,6 +61,7 @@ static const struct rcar_thermal_chip rcar_thermal = {
.irq_per_ch = 0,
.needs_suspend_resume = 0,
.nirqs = 1,
+ .ctemp_bands = 1,
};
static const struct rcar_thermal_chip rcar_gen2_thermal = {
@@ -68,6 +70,7 @@ static const struct rcar_thermal_chip rcar_gen2_thermal = {
.irq_per_ch = 0,
.needs_suspend_resume = 0,
.nirqs = 1,
+ .ctemp_bands = 1,
};
static const struct rcar_thermal_chip rcar_gen3_thermal = {
@@ -80,6 +83,7 @@ static const struct rcar_thermal_chip rcar_gen3_thermal = {
* interrupts to detect a temperature change, rise or fall.
*/
.nirqs = 2,
+ .ctemp_bands = 2,
};
struct rcar_thermal_priv {
@@ -263,7 +267,12 @@ static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
return ret;
mutex_lock(&priv->lock);
- tmp = MCELSIUS((priv->ctemp * 5) - 65);
+ if (priv->chip->ctemp_bands == 1)
+ tmp = MCELSIUS((priv->ctemp * 5) - 65);
+ else if (priv->ctemp < 24)
+ tmp = MCELSIUS(((priv->ctemp * 55) - 720) / 10);
+ else
+ tmp = MCELSIUS((priv->ctemp * 5) - 60);
mutex_unlock(&priv->lock);
if ((tmp < MCELSIUS(-45)) || (tmp > MCELSIUS(125))) {
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 9c7643d62ed7..bda1ca199abd 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -172,6 +172,9 @@ struct rockchip_thermal_data {
int tshut_temp;
enum tshut_mode tshut_mode;
enum tshut_polarity tshut_polarity;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state;
+ struct pinctrl_state *otp_state;
};
/**
@@ -222,11 +225,15 @@ struct rockchip_thermal_data {
#define GRF_TSADC_TESTBIT_L 0x0e648
#define GRF_TSADC_TESTBIT_H 0x0e64c
+#define PX30_GRF_SOC_CON2 0x0408
+
#define GRF_SARADC_TESTBIT_ON (0x10001 << 2)
#define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2)
#define GRF_TSADC_VCM_EN_L (0x10001 << 7)
#define GRF_TSADC_VCM_EN_H (0x10001 << 7)
+#define GRF_CON_TSADC_CH_INV (0x10001 << 1)
+
/**
* struct tsadc_table - code to temperature conversion table
* @code: the value of adc channel
@@ -689,6 +696,13 @@ static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs,
regs + TSADCV2_AUTO_CON);
}
+static void rk_tsadcv4_initialize(struct regmap *grf, void __iomem *regs,
+ enum tshut_polarity tshut_polarity)
+{
+ rk_tsadcv2_initialize(grf, regs, tshut_polarity);
+ regmap_write(grf, PX30_GRF_SOC_CON2, GRF_CON_TSADC_CH_INV);
+}
+
static void rk_tsadcv2_irq_ack(void __iomem *regs)
{
u32 val;
@@ -818,6 +832,30 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
writel_relaxed(val, regs + TSADCV2_INT_EN);
}
+static const struct rockchip_tsadc_chip px30_tsadc_data = {
+ .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+ .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
+ .chn_num = 2, /* 2 channels for tsadc */
+
+ .tshut_mode = TSHUT_MODE_CRU, /* default TSHUT via CRU */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv4_initialize,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+ .table = {
+ .id = rk3328_code_table,
+ .length = ARRAY_SIZE(rk3328_code_table),
+ .data_mask = TSADCV2_DATA_MASK,
+ .mode = ADC_INCREMENT,
+ },
+};
+
static const struct rockchip_tsadc_chip rv1108_tsadc_data = {
.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
.chn_num = 1, /* one channel for tsadc */
@@ -990,6 +1028,9 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
};
static const struct of_device_id of_rockchip_thermal_match[] = {
+ { .compatible = "rockchip,px30-tsadc",
+ .data = (void *)&px30_tsadc_data,
+ },
{
.compatible = "rockchip,rv1108-tsadc",
.data = (void *)&rv1108_tsadc_data,
@@ -1242,6 +1283,8 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
return error;
}
+ thermal->chip->control(thermal->regs, false);
+
error = clk_prepare_enable(thermal->clk);
if (error) {
dev_err(&pdev->dev, "failed to enable converter clock: %d\n",
@@ -1267,6 +1310,30 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
thermal->chip->initialize(thermal->grf, thermal->regs,
thermal->tshut_polarity);
+ if (thermal->tshut_mode == TSHUT_MODE_GPIO) {
+ thermal->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(thermal->pinctrl)) {
+ dev_err(&pdev->dev, "failed to find thermal pinctrl\n");
+ return PTR_ERR(thermal->pinctrl);
+ }
+
+ thermal->gpio_state = pinctrl_lookup_state(thermal->pinctrl,
+ "gpio");
+ if (IS_ERR_OR_NULL(thermal->gpio_state)) {
+ dev_err(&pdev->dev, "failed to find thermal gpio state\n");
+ return -EINVAL;
+ }
+
+ thermal->otp_state = pinctrl_lookup_state(thermal->pinctrl,
+ "otpout");
+ if (IS_ERR_OR_NULL(thermal->otp_state)) {
+ dev_err(&pdev->dev, "failed to find thermal otpout state\n");
+ return -EINVAL;
+ }
+
+ pinctrl_select_state(thermal->pinctrl, thermal->otp_state);
+ }
+
for (i = 0; i < thermal->chip->chn_num; i++) {
error = rockchip_thermal_register_sensor(pdev, thermal,
&thermal->sensors[i],
@@ -1337,8 +1404,8 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
clk_disable(thermal->pclk);
clk_disable(thermal->clk);
-
- pinctrl_pm_select_sleep_state(dev);
+ if (thermal->tshut_mode == TSHUT_MODE_GPIO)
+ pinctrl_select_state(thermal->pinctrl, thermal->gpio_state);
return 0;
}
@@ -1383,7 +1450,8 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
for (i = 0; i < thermal->chip->chn_num; i++)
rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
- pinctrl_pm_select_default_state(dev);
+ if (thermal->tshut_mode == TSHUT_MODE_GPIO)
+ pinctrl_select_state(thermal->pinctrl, thermal->otp_state);
return 0;
}
diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig
index b80f9a9e4f8f..d8b1a4586d0b 100644
--- a/drivers/thermal/st/Kconfig
+++ b/drivers/thermal/st/Kconfig
@@ -3,9 +3,9 @@
#
config ST_THERMAL
- tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
- help
- Support for thermal sensors on STMicroelectronics STi series of SoCs.
+ tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
+ help
+ Support for thermal sensors on STMicroelectronics STi series of SoCs.
config ST_THERMAL_SYSCFG
select ST_THERMAL
@@ -16,11 +16,11 @@ config ST_THERMAL_MEMMAP
tristate "STi series memory mapped access based thermal sensors"
config STM32_THERMAL
- tristate "Thermal framework support on STMicroelectronics STM32 series of SoCs"
- depends on MACH_STM32MP157
- default y
- help
- Support for thermal framework on STMicroelectronics STM32 series of
- SoCs. This thermal driver allows to access to general thermal framework
- functionalities and to acces to SoC sensor functionalities. This
- configuration is fully dependent of MACH_STM32MP157.
+ tristate "Thermal framework support on STMicroelectronics STM32 series of SoCs"
+ depends on MACH_STM32MP157
+ default y
+ help
+ Support for thermal framework on STMicroelectronics STM32 series of
+ SoCs. This thermal driver allows to access to general thermal framework
+ functionalities and to acces to SoC sensor functionalities. This
+ configuration is fully dependent of MACH_STM32MP157.
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index bbd73c5a4a4e..cf9ddc52f30e 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -570,8 +570,7 @@ thermal_unprepare:
static int stm_thermal_suspend(struct device *dev)
{
int ret;
- struct platform_device *pdev = to_platform_device(dev);
- struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+ struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
ret = stm_thermal_sensor_off(sensor);
if (ret)
@@ -585,8 +584,7 @@ static int stm_thermal_suspend(struct device *dev)
static int stm_thermal_resume(struct device *dev)
{
int ret;
- struct platform_device *pdev = to_platform_device(dev);
- struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+ struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
ret = stm_thermal_prepare(sensor);
if (ret)
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
index f8740f7852e3..fc0b33b3f26b 100644
--- a/drivers/thermal/tegra/Kconfig
+++ b/drivers/thermal/tegra/Kconfig
@@ -14,7 +14,7 @@ config TEGRA_BPMP_THERMAL
tristate "Tegra BPMP thermal sensing"
depends on TEGRA_BPMP || COMPILE_TEST
help
- Enable this option for support for sensing system temperature of NVIDIA
- Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
+ Enable this option for support for sensing system temperature of NVIDIA
+ Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
endmenu
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 70043a28eb7a..fcf70a3728b6 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014 - 2018, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Mikko Perttunen <mperttunen@nvidia.com>
@@ -22,6 +23,8 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -85,12 +88,51 @@
#define THERMCTL_LVL0_UP_STATS 0x10
#define THERMCTL_LVL0_DN_STATS 0x14
+#define THERMCTL_INTR_STATUS 0x84
+
+#define TH_INTR_MD0_MASK BIT(25)
+#define TH_INTR_MU0_MASK BIT(24)
+#define TH_INTR_GD0_MASK BIT(17)
+#define TH_INTR_GU0_MASK BIT(16)
+#define TH_INTR_CD0_MASK BIT(9)
+#define TH_INTR_CU0_MASK BIT(8)
+#define TH_INTR_PD0_MASK BIT(1)
+#define TH_INTR_PU0_MASK BIT(0)
+#define TH_INTR_IGNORE_MASK 0xFCFCFCFC
+
#define THERMCTL_STATS_CTL 0x94
#define STATS_CTL_CLR_DN 0x8
#define STATS_CTL_EN_DN 0x4
#define STATS_CTL_CLR_UP 0x2
#define STATS_CTL_EN_UP 0x1
+#define OC1_CFG 0x310
+#define OC1_CFG_LONG_LATENCY_MASK BIT(6)
+#define OC1_CFG_HW_RESTORE_MASK BIT(5)
+#define OC1_CFG_PWR_GOOD_MASK_MASK BIT(4)
+#define OC1_CFG_THROTTLE_MODE_MASK (0x3 << 2)
+#define OC1_CFG_ALARM_POLARITY_MASK BIT(1)
+#define OC1_CFG_EN_THROTTLE_MASK BIT(0)
+
+#define OC1_CNT_THRESHOLD 0x314
+#define OC1_THROTTLE_PERIOD 0x318
+#define OC1_ALARM_COUNT 0x31c
+#define OC1_FILTER 0x320
+#define OC1_STATS 0x3a8
+
+#define OC_INTR_STATUS 0x39c
+#define OC_INTR_ENABLE 0x3a0
+#define OC_INTR_DISABLE 0x3a4
+#define OC_STATS_CTL 0x3c4
+#define OC_STATS_CTL_CLR_ALL 0x2
+#define OC_STATS_CTL_EN_ALL 0x1
+
+#define OC_INTR_OC1_MASK BIT(0)
+#define OC_INTR_OC2_MASK BIT(1)
+#define OC_INTR_OC3_MASK BIT(2)
+#define OC_INTR_OC4_MASK BIT(3)
+#define OC_INTR_OC5_MASK BIT(4)
+
#define THROT_GLOBAL_CFG 0x400
#define THROT_GLOBAL_ENB_MASK BIT(0)
@@ -160,6 +202,15 @@
/* get dividend from the depth */
#define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1)
+/* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-sochterm.h
+ * level vector
+ * NONE 3'b000
+ * LOW 3'b001
+ * MED 3'b011
+ * HIGH 3'b111
+ */
+#define THROT_LEVEL_TO_DEPTH(level) ((0x1 << (level)) - 1)
+
/* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */
#define THROT_OFFSET 0x30
#define THROT_PSKIP_CTRL(throt, dev) (THROT_PSKIP_CTRL_LITE_CPU + \
@@ -173,6 +224,25 @@
#define THROT_DELAY_CTRL(throt) (THROT_DELAY_LITE + \
(THROT_OFFSET * throt))
+#define ALARM_OFFSET 0x14
+#define ALARM_CFG(throt) (OC1_CFG + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_CNT_THRESHOLD(throt) (OC1_CNT_THRESHOLD + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_THROTTLE_PERIOD(throt) (OC1_THROTTLE_PERIOD + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_ALARM_COUNT(throt) (OC1_ALARM_COUNT + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_FILTER(throt) (OC1_FILTER + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_STATS(throt) (OC1_STATS + \
+ (4 * (throt - THROTTLE_OC1)))
+
/* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/
#define CCROC_THROT_OFFSET 0x0c
#define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect) (CCROC_THROT_PSKIP_CTRL_CPU + \
@@ -184,15 +254,32 @@
#define THERMCTL_LVL_REGS_SIZE 0x20
#define THERMCTL_LVL_REG(rg, lv) ((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE))
+#define OC_THROTTLE_MODE_DISABLED 0
+#define OC_THROTTLE_MODE_BRIEF 2
+
static const int min_low_temp = -127000;
static const int max_high_temp = 127000;
enum soctherm_throttle_id {
THROTTLE_LIGHT = 0,
THROTTLE_HEAVY,
+ THROTTLE_OC1,
+ THROTTLE_OC2,
+ THROTTLE_OC3,
+ THROTTLE_OC4,
+ THROTTLE_OC5, /* OC5 is reserved */
THROTTLE_SIZE,
};
+enum soctherm_oc_irq_id {
+ TEGRA_SOC_OC_IRQ_1,
+ TEGRA_SOC_OC_IRQ_2,
+ TEGRA_SOC_OC_IRQ_3,
+ TEGRA_SOC_OC_IRQ_4,
+ TEGRA_SOC_OC_IRQ_5,
+ TEGRA_SOC_OC_IRQ_MAX,
+};
+
enum soctherm_throttle_dev_id {
THROTTLE_DEV_CPU = 0,
THROTTLE_DEV_GPU,
@@ -202,6 +289,11 @@ enum soctherm_throttle_dev_id {
static const char *const throt_names[] = {
[THROTTLE_LIGHT] = "light",
[THROTTLE_HEAVY] = "heavy",
+ [THROTTLE_OC1] = "oc1",
+ [THROTTLE_OC2] = "oc2",
+ [THROTTLE_OC3] = "oc3",
+ [THROTTLE_OC4] = "oc4",
+ [THROTTLE_OC5] = "oc5",
};
struct tegra_soctherm;
@@ -213,12 +305,23 @@ struct tegra_thermctl_zone {
const struct tegra_tsensor_group *sg;
};
+struct soctherm_oc_cfg {
+ u32 active_low;
+ u32 throt_period;
+ u32 alarm_cnt_thresh;
+ u32 alarm_filter;
+ u32 mode;
+ bool intr_en;
+};
+
struct soctherm_throt_cfg {
const char *name;
unsigned int id;
u8 priority;
u8 cpu_throt_level;
u32 cpu_throt_depth;
+ u32 gpu_throt_level;
+ struct soctherm_oc_cfg oc_cfg;
struct thermal_cooling_device *cdev;
bool init;
};
@@ -231,6 +334,9 @@ struct tegra_soctherm {
void __iomem *clk_regs;
void __iomem *ccroc_regs;
+ int thermal_irq;
+ int edp_irq;
+
u32 *calib;
struct thermal_zone_device **thermctl_tzs;
struct tegra_soctherm_soc *soc;
@@ -238,8 +344,19 @@ struct tegra_soctherm {
struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE];
struct dentry *debugfs_dir;
+
+ struct mutex thermctl_lock;
};
+struct soctherm_oc_irq_chip_data {
+ struct mutex irq_lock; /* serialize OC IRQs */
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
+ int irq_enable;
+};
+
+static struct soctherm_oc_irq_chip_data soc_irq_cdata;
+
/**
* ccroc_writel() - writes a value to a CCROC register
* @ts: pointer to a struct tegra_soctherm
@@ -446,6 +563,24 @@ find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name)
return NULL;
}
+static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
+{
+ int i, temp = min_low_temp;
+ struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
+
+ if (id >= TEGRA124_SOCTHERM_SENSOR_NUM)
+ return temp;
+
+ if (tt) {
+ for (i = 0; i < ts->soc->num_ttgs; i++) {
+ if (tt[i].id == id)
+ return tt[i].temp;
+ }
+ }
+
+ return temp;
+}
+
static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
{
struct tegra_thermctl_zone *zone = data;
@@ -464,7 +599,16 @@ static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
return ret;
if (type == THERMAL_TRIP_CRITICAL) {
- return thermtrip_program(dev, sg, temp);
+ /*
+ * If thermtrips property is set in DT,
+ * doesn't need to program critical type trip to HW,
+ * if not, program critical trip to HW.
+ */
+ if (min_low_temp == tsensor_group_thermtrip_get(ts, sg->id))
+ return thermtrip_program(dev, sg, temp);
+ else
+ return 0;
+
} else if (type == THERMAL_TRIP_HOT) {
int i;
@@ -519,10 +663,60 @@ static int tegra_thermctl_get_trend(void *data, int trip,
return 0;
}
+static void thermal_irq_enable(struct tegra_thermctl_zone *zn)
+{
+ u32 r;
+
+ /* multiple zones could be handling and setting trips at once */
+ mutex_lock(&zn->ts->thermctl_lock);
+ r = readl(zn->ts->regs + THERMCTL_INTR_ENABLE);
+ r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, TH_INTR_UP_DN_EN);
+ writel(r, zn->ts->regs + THERMCTL_INTR_ENABLE);
+ mutex_unlock(&zn->ts->thermctl_lock);
+}
+
+static void thermal_irq_disable(struct tegra_thermctl_zone *zn)
+{
+ u32 r;
+
+ /* multiple zones could be handling and setting trips at once */
+ mutex_lock(&zn->ts->thermctl_lock);
+ r = readl(zn->ts->regs + THERMCTL_INTR_DISABLE);
+ r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, 0);
+ writel(r, zn->ts->regs + THERMCTL_INTR_DISABLE);
+ mutex_unlock(&zn->ts->thermctl_lock);
+}
+
+static int tegra_thermctl_set_trips(void *data, int lo, int hi)
+{
+ struct tegra_thermctl_zone *zone = data;
+ u32 r;
+
+ thermal_irq_disable(zone);
+
+ r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset);
+ r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 0);
+ writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
+
+ lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain;
+ hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain;
+ dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo);
+
+ r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi);
+ r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo);
+ r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
+ writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
+
+ thermal_irq_enable(zone);
+
+ return 0;
+}
+
static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
.get_temp = tegra_thermctl_get_temp,
.set_trip_temp = tegra_thermctl_set_trip_temp,
.get_trend = tegra_thermctl_get_trend,
+ .set_trips = tegra_thermctl_set_trips,
};
static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
@@ -555,7 +749,8 @@ static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
* @dev: struct device * of the SOC_THERM instance
*
* Configure the SOC_THERM HW trip points, setting "THERMTRIP"
- * "THROTTLE" trip points , using "critical" or "hot" type trip_temp
+ * "THROTTLE" trip points , using "thermtrips", "critical" or "hot"
+ * type trip_temp
* from thermal zone.
* After they have been configured, THERMTRIP or THROTTLE will take
* action when the configured SoC thermal sensor group reaches a
@@ -577,28 +772,23 @@ static int tegra_soctherm_set_hwtrips(struct device *dev,
{
struct tegra_soctherm *ts = dev_get_drvdata(dev);
struct soctherm_throt_cfg *stc;
- int i, trip, temperature;
- int ret;
+ int i, trip, temperature, ret;
- ret = tz->ops->get_crit_temp(tz, &temperature);
- if (ret) {
- dev_warn(dev, "thermtrip: %s: missing critical temperature\n",
- sg->name);
- goto set_throttle;
- }
+ /* Get thermtrips. If missing, try to get critical trips. */
+ temperature = tsensor_group_thermtrip_get(ts, sg->id);
+ if (min_low_temp == temperature)
+ if (tz->ops->get_crit_temp(tz, &temperature))
+ temperature = max_high_temp;
ret = thermtrip_program(dev, sg, temperature);
if (ret) {
- dev_err(dev, "thermtrip: %s: error during enable\n",
- sg->name);
+ dev_err(dev, "thermtrip: %s: error during enable\n", sg->name);
return ret;
}
- dev_info(dev,
- "thermtrip: will shut down when %s reaches %d mC\n",
+ dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n",
sg->name, temperature);
-set_throttle:
ret = get_hot_temp(tz, &trip, &temperature);
if (ret) {
dev_info(dev, "throttrip: %s: missing hot temperature\n",
@@ -606,7 +796,7 @@ set_throttle:
return 0;
}
- for (i = 0; i < THROTTLE_SIZE; i++) {
+ for (i = 0; i < THROTTLE_OC1; i++) {
struct thermal_cooling_device *cdev;
if (!ts->throt_cfgs[i].init)
@@ -638,6 +828,461 @@ set_throttle:
return 0;
}
+static irqreturn_t soctherm_thermal_isr(int irq, void *dev_id)
+{
+ struct tegra_soctherm *ts = dev_id;
+ u32 r;
+
+ /* Case for no lock:
+ * Although interrupts are enabled in set_trips, there is still no need
+ * to lock here because the interrupts are disabled before programming
+ * new trip points. Hence there cant be a interrupt on the same sensor.
+ * An interrupt can however occur on a sensor while trips are being
+ * programmed on a different one. This beign a LEVEL interrupt won't
+ * cause a new interrupt but this is taken care of by the re-reading of
+ * the STATUS register in the thread function.
+ */
+ r = readl(ts->regs + THERMCTL_INTR_STATUS);
+ writel(r, ts->regs + THERMCTL_INTR_DISABLE);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * soctherm_thermal_isr_thread() - Handles a thermal interrupt request
+ * @irq: The interrupt number being requested; not used
+ * @dev_id: Opaque pointer to tegra_soctherm;
+ *
+ * Clears the interrupt status register if there are expected
+ * interrupt bits set.
+ * The interrupt(s) are then handled by updating the corresponding
+ * thermal zones.
+ *
+ * An error is logged if any unexpected interrupt bits are set.
+ *
+ * Disabled interrupts are re-enabled.
+ *
+ * Return: %IRQ_HANDLED. Interrupt was handled and no further processing
+ * is needed.
+ */
+static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id)
+{
+ struct tegra_soctherm *ts = dev_id;
+ struct thermal_zone_device *tz;
+ u32 st, ex = 0, cp = 0, gp = 0, pl = 0, me = 0;
+
+ st = readl(ts->regs + THERMCTL_INTR_STATUS);
+
+ /* deliberately clear expected interrupts handled in SW */
+ cp |= st & TH_INTR_CD0_MASK;
+ cp |= st & TH_INTR_CU0_MASK;
+
+ gp |= st & TH_INTR_GD0_MASK;
+ gp |= st & TH_INTR_GU0_MASK;
+
+ pl |= st & TH_INTR_PD0_MASK;
+ pl |= st & TH_INTR_PU0_MASK;
+
+ me |= st & TH_INTR_MD0_MASK;
+ me |= st & TH_INTR_MU0_MASK;
+
+ ex |= cp | gp | pl | me;
+ if (ex) {
+ writel(ex, ts->regs + THERMCTL_INTR_STATUS);
+ st &= ~ex;
+
+ if (cp) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_CPU];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ if (gp) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_GPU];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ if (pl) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_PLLX];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ if (me) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_MEM];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+ }
+
+ /* deliberately ignore expected interrupts NOT handled in SW */
+ ex |= TH_INTR_IGNORE_MASK;
+ st &= ~ex;
+
+ if (st) {
+ /* Whine about any other unexpected INTR bits still set */
+ pr_err("soctherm: Ignored unexpected INTRs 0x%08x\n", st);
+ writel(st, ts->regs + THERMCTL_INTR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt
+ * @alarm: The soctherm throttle id
+ * @enable: Flag indicating enable the soctherm over-current
+ * interrupt or disable it
+ *
+ * Enables a specific over-current pins @alarm to raise an interrupt if the flag
+ * is set and the alarm corresponds to OC1, OC2, OC3, or OC4.
+ */
+static void soctherm_oc_intr_enable(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id alarm,
+ bool enable)
+{
+ u32 r;
+
+ if (!enable)
+ return;
+
+ r = readl(ts->regs + OC_INTR_ENABLE);
+ switch (alarm) {
+ case THROTTLE_OC1:
+ r = REG_SET_MASK(r, OC_INTR_OC1_MASK, 1);
+ break;
+ case THROTTLE_OC2:
+ r = REG_SET_MASK(r, OC_INTR_OC2_MASK, 1);
+ break;
+ case THROTTLE_OC3:
+ r = REG_SET_MASK(r, OC_INTR_OC3_MASK, 1);
+ break;
+ case THROTTLE_OC4:
+ r = REG_SET_MASK(r, OC_INTR_OC4_MASK, 1);
+ break;
+ default:
+ r = 0;
+ break;
+ }
+ writel(r, ts->regs + OC_INTR_ENABLE);
+}
+
+/**
+ * soctherm_handle_alarm() - Handles soctherm alarms
+ * @alarm: The soctherm throttle id
+ *
+ * "Handles" over-current alarms (OC1, OC2, OC3, and OC4) by printing
+ * a warning or informative message.
+ *
+ * Return: -EINVAL for @alarm = THROTTLE_OC3, otherwise 0 (success).
+ */
+static int soctherm_handle_alarm(enum soctherm_throttle_id alarm)
+{
+ int rv = -EINVAL;
+
+ switch (alarm) {
+ case THROTTLE_OC1:
+ pr_debug("soctherm: Successfully handled OC1 alarm\n");
+ rv = 0;
+ break;
+
+ case THROTTLE_OC2:
+ pr_debug("soctherm: Successfully handled OC2 alarm\n");
+ rv = 0;
+ break;
+
+ case THROTTLE_OC3:
+ pr_debug("soctherm: Successfully handled OC3 alarm\n");
+ rv = 0;
+ break;
+
+ case THROTTLE_OC4:
+ pr_debug("soctherm: Successfully handled OC4 alarm\n");
+ rv = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ if (rv)
+ pr_err("soctherm: ERROR in handling %s alarm\n",
+ throt_names[alarm]);
+
+ return rv;
+}
+
+/**
+ * soctherm_edp_isr_thread() - log an over-current interrupt request
+ * @irq: OC irq number. Currently not being used. See description
+ * @arg: a void pointer for callback, currently not being used
+ *
+ * Over-current events are handled in hardware. This function is called to log
+ * and handle any OC events that happened. Additionally, it checks every
+ * over-current interrupt registers for registers are set but
+ * was not expected (i.e. any discrepancy in interrupt status) by the function,
+ * the discrepancy will logged.
+ *
+ * Return: %IRQ_HANDLED
+ */
+static irqreturn_t soctherm_edp_isr_thread(int irq, void *arg)
+{
+ struct tegra_soctherm *ts = arg;
+ u32 st, ex, oc1, oc2, oc3, oc4;
+
+ st = readl(ts->regs + OC_INTR_STATUS);
+
+ /* deliberately clear expected interrupts handled in SW */
+ oc1 = st & OC_INTR_OC1_MASK;
+ oc2 = st & OC_INTR_OC2_MASK;
+ oc3 = st & OC_INTR_OC3_MASK;
+ oc4 = st & OC_INTR_OC4_MASK;
+ ex = oc1 | oc2 | oc3 | oc4;
+
+ pr_err("soctherm: OC ALARM 0x%08x\n", ex);
+ if (ex) {
+ writel(st, ts->regs + OC_INTR_STATUS);
+ st &= ~ex;
+
+ if (oc1 && !soctherm_handle_alarm(THROTTLE_OC1))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC1, true);
+
+ if (oc2 && !soctherm_handle_alarm(THROTTLE_OC2))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC2, true);
+
+ if (oc3 && !soctherm_handle_alarm(THROTTLE_OC3))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC3, true);
+
+ if (oc4 && !soctherm_handle_alarm(THROTTLE_OC4))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC4, true);
+
+ if (oc1 && soc_irq_cdata.irq_enable & BIT(0))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 0));
+
+ if (oc2 && soc_irq_cdata.irq_enable & BIT(1))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 1));
+
+ if (oc3 && soc_irq_cdata.irq_enable & BIT(2))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 2));
+
+ if (oc4 && soc_irq_cdata.irq_enable & BIT(3))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 3));
+ }
+
+ if (st) {
+ pr_err("soctherm: Ignored unexpected OC ALARM 0x%08x\n", st);
+ writel(st, ts->regs + OC_INTR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * soctherm_edp_isr() - Disables any active interrupts
+ * @irq: The interrupt request number
+ * @arg: Opaque pointer to an argument
+ *
+ * Writes to the OC_INTR_DISABLE register the over current interrupt status,
+ * masking any asserted interrupts. Doing this prevents the same interrupts
+ * from triggering this isr repeatedly. The thread woken by this isr will
+ * handle asserted interrupts and subsequently unmask/re-enable them.
+ *
+ * The OC_INTR_DISABLE register indicates which OC interrupts
+ * have been disabled.
+ *
+ * Return: %IRQ_WAKE_THREAD, handler requests to wake the handler thread
+ */
+static irqreturn_t soctherm_edp_isr(int irq, void *arg)
+{
+ struct tegra_soctherm *ts = arg;
+ u32 r;
+
+ if (!ts)
+ return IRQ_NONE;
+
+ r = readl(ts->regs + OC_INTR_STATUS);
+ writel(r, ts->regs + OC_INTR_DISABLE);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * soctherm_oc_irq_lock() - locks the over-current interrupt request
+ * @data: Interrupt request data
+ *
+ * Looks up the chip data from @data and locks the mutex associated with
+ * a particular over-current interrupt request.
+ */
+static void soctherm_oc_irq_lock(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&d->irq_lock);
+}
+
+/**
+ * soctherm_oc_irq_sync_unlock() - Unlocks the OC interrupt request
+ * @data: Interrupt request data
+ *
+ * Looks up the interrupt request data @data and unlocks the mutex associated
+ * with a particular over-current interrupt request.
+ */
+static void soctherm_oc_irq_sync_unlock(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_unlock(&d->irq_lock);
+}
+
+/**
+ * soctherm_oc_irq_enable() - Enables the SOC_THERM over-current interrupt queue
+ * @data: irq_data structure of the chip
+ *
+ * Sets the irq_enable bit of SOC_THERM allowing SOC_THERM
+ * to respond to over-current interrupts.
+ *
+ */
+static void soctherm_oc_irq_enable(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ d->irq_enable |= BIT(data->hwirq);
+}
+
+/**
+ * soctherm_oc_irq_disable() - Disables overcurrent interrupt requests
+ * @irq_data: The interrupt request information
+ *
+ * Clears the interrupt request enable bit of the overcurrent
+ * interrupt request chip data.
+ *
+ * Return: Nothing is returned (void)
+ */
+static void soctherm_oc_irq_disable(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ d->irq_enable &= ~BIT(data->hwirq);
+}
+
+static int soctherm_oc_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ return 0;
+}
+
+/**
+ * soctherm_oc_irq_map() - SOC_THERM interrupt request domain mapper
+ * @h: Interrupt request domain
+ * @virq: Virtual interrupt request number
+ * @hw: Hardware interrupt request number
+ *
+ * Mapping callback function for SOC_THERM's irq_domain. When a SOC_THERM
+ * interrupt request is called, the irq_domain takes the request's virtual
+ * request number (much like a virtual memory address) and maps it to a
+ * physical hardware request number.
+ *
+ * When a mapping doesn't already exist for a virtual request number, the
+ * irq_domain calls this function to associate the virtual request number with
+ * a hardware request number.
+ *
+ * Return: 0
+ */
+static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct soctherm_oc_irq_chip_data *data = h->host_data;
+
+ irq_set_chip_data(virq, data);
+ irq_set_chip(virq, &data->irq_chip);
+ irq_set_nested_thread(virq, 1);
+ return 0;
+}
+
+/**
+ * soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts
+ * @d: Interrupt request domain
+ * @intspec: Array of u32s from DTs "interrupt" property
+ * @intsize: Number of values inside the intspec array
+ * @out_hwirq: HW IRQ value associated with this interrupt
+ * @out_type: The IRQ SENSE type for this interrupt.
+ *
+ * This Device Tree IRQ specifier translation function will translate a
+ * specific "interrupt" as defined by 2 DT values where the cell values map
+ * the hwirq number + 1 and linux irq flags. Since the output is the hwirq
+ * number, this function will subtract 1 from the value listed in DT.
+ *
+ * Return: 0
+ */
+static int soctherm_irq_domain_xlate_twocell(struct irq_domain *d,
+ struct device_node *ctrlr, const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 2))
+ return -EINVAL;
+
+ /*
+ * The HW value is 1 index less than the DT IRQ values.
+ * i.e. OC4 goes to HW index 3.
+ */
+ *out_hwirq = intspec[0] - 1;
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+static const struct irq_domain_ops soctherm_oc_domain_ops = {
+ .map = soctherm_oc_irq_map,
+ .xlate = soctherm_irq_domain_xlate_twocell,
+};
+
+/**
+ * soctherm_oc_int_init() - Initial enabling of the over
+ * current interrupts
+ * @np: The devicetree node for soctherm
+ * @num_irqs: The number of new interrupt requests
+ *
+ * Sets the over current interrupt request chip data
+ *
+ * Return: 0 on success or if overcurrent interrupts are not enabled,
+ * -ENOMEM (out of memory), or irq_base if the function failed to
+ * allocate the irqs
+ */
+static int soctherm_oc_int_init(struct device_node *np, int num_irqs)
+{
+ if (!num_irqs) {
+ pr_info("%s(): OC interrupts are not enabled\n", __func__);
+ return 0;
+ }
+
+ mutex_init(&soc_irq_cdata.irq_lock);
+ soc_irq_cdata.irq_enable = 0;
+
+ soc_irq_cdata.irq_chip.name = "soc_therm_oc";
+ soc_irq_cdata.irq_chip.irq_bus_lock = soctherm_oc_irq_lock;
+ soc_irq_cdata.irq_chip.irq_bus_sync_unlock =
+ soctherm_oc_irq_sync_unlock;
+ soc_irq_cdata.irq_chip.irq_disable = soctherm_oc_irq_disable;
+ soc_irq_cdata.irq_chip.irq_enable = soctherm_oc_irq_enable;
+ soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type;
+ soc_irq_cdata.irq_chip.irq_set_wake = NULL;
+
+ soc_irq_cdata.domain = irq_domain_add_linear(np, num_irqs,
+ &soctherm_oc_domain_ops,
+ &soc_irq_cdata);
+
+ if (!soc_irq_cdata.domain) {
+ pr_err("%s: Failed to create IRQ domain\n", __func__);
+ return -ENOMEM;
+ }
+
+ pr_debug("%s(): OC interrupts enabled successful\n", __func__);
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_FS
static int regs_show(struct seq_file *s, void *data)
{
@@ -929,6 +1574,120 @@ static const struct thermal_cooling_device_ops throt_cooling_ops = {
.set_cur_state = throt_set_cdev_state,
};
+static int soctherm_thermtrips_parse(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
+ const int max_num_prop = ts->soc->num_ttgs * 2;
+ u32 *tlb;
+ int i, j, n, ret;
+
+ if (!tt)
+ return -ENOMEM;
+
+ n = of_property_count_u32_elems(dev->of_node, "nvidia,thermtrips");
+ if (n <= 0) {
+ dev_info(dev,
+ "missing thermtrips, will use critical trips as shut down temp\n");
+ return n;
+ }
+
+ n = min(max_num_prop, n);
+
+ tlb = devm_kcalloc(&pdev->dev, max_num_prop, sizeof(u32), GFP_KERNEL);
+ if (!tlb)
+ return -ENOMEM;
+ ret = of_property_read_u32_array(dev->of_node, "nvidia,thermtrips",
+ tlb, n);
+ if (ret) {
+ dev_err(dev, "invalid num ele: thermtrips:%d\n", ret);
+ return ret;
+ }
+
+ i = 0;
+ for (j = 0; j < n; j = j + 2) {
+ if (tlb[j] >= TEGRA124_SOCTHERM_SENSOR_NUM)
+ continue;
+
+ tt[i].id = tlb[j];
+ tt[i].temp = tlb[j + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+static void soctherm_oc_cfg_parse(struct device *dev,
+ struct device_node *np_oc,
+ struct soctherm_throt_cfg *stc)
+{
+ u32 val;
+
+ if (of_property_read_bool(np_oc, "nvidia,polarity-active-low"))
+ stc->oc_cfg.active_low = 1;
+ else
+ stc->oc_cfg.active_low = 0;
+
+ if (!of_property_read_u32(np_oc, "nvidia,count-threshold", &val)) {
+ stc->oc_cfg.intr_en = 1;
+ stc->oc_cfg.alarm_cnt_thresh = val;
+ }
+
+ if (!of_property_read_u32(np_oc, "nvidia,throttle-period-us", &val))
+ stc->oc_cfg.throt_period = val;
+
+ if (!of_property_read_u32(np_oc, "nvidia,alarm-filter", &val))
+ stc->oc_cfg.alarm_filter = val;
+
+ /* BRIEF throttling by default, do not support STICKY */
+ stc->oc_cfg.mode = OC_THROTTLE_MODE_BRIEF;
+}
+
+static int soctherm_throt_cfg_parse(struct device *dev,
+ struct device_node *np,
+ struct soctherm_throt_cfg *stc)
+{
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nvidia,priority", &val);
+ if (ret) {
+ dev_err(dev, "throttle-cfg: %s: invalid priority\n", stc->name);
+ return -EINVAL;
+ }
+ stc->priority = val;
+
+ ret = of_property_read_u32(np, ts->soc->use_ccroc ?
+ "nvidia,cpu-throt-level" :
+ "nvidia,cpu-throt-percent", &val);
+ if (!ret) {
+ if (ts->soc->use_ccroc &&
+ val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
+ stc->cpu_throt_level = val;
+ else if (!ts->soc->use_ccroc && val <= 100)
+ stc->cpu_throt_depth = val;
+ else
+ goto err;
+ } else {
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "nvidia,gpu-throt-level", &val);
+ if (!ret && val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
+ stc->gpu_throt_level = val;
+ else
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(dev, "throttle-cfg: %s: no throt prop or invalid prop\n",
+ stc->name);
+ return -EINVAL;
+}
+
/**
* soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
* and register them as cooling devices.
@@ -939,8 +1698,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
struct tegra_soctherm *ts = dev_get_drvdata(dev);
struct device_node *np_stc, *np_stcc;
const char *name;
- u32 val;
- int i, r;
+ int i;
for (i = 0; i < THROTTLE_SIZE; i++) {
ts->throt_cfgs[i].name = throt_names[i];
@@ -958,6 +1716,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
for_each_child_of_node(np_stc, np_stcc) {
struct soctherm_throt_cfg *stc;
struct thermal_cooling_device *tcd;
+ int err;
name = np_stcc->name;
stc = find_throttle_cfg_by_name(ts, name);
@@ -967,51 +1726,34 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
continue;
}
- r = of_property_read_u32(np_stcc, "nvidia,priority", &val);
- if (r) {
- dev_info(dev,
- "throttle-cfg: %s: missing priority\n", name);
- continue;
+ if (stc->init) {
+ dev_err(dev, "throttle-cfg: %s: redefined!\n", name);
+ of_node_put(np_stcc);
+ break;
}
- stc->priority = val;
-
- if (ts->soc->use_ccroc) {
- r = of_property_read_u32(np_stcc,
- "nvidia,cpu-throt-level",
- &val);
- if (r) {
- dev_info(dev,
- "throttle-cfg: %s: missing cpu-throt-level\n",
- name);
- continue;
- }
- stc->cpu_throt_level = val;
+
+ err = soctherm_throt_cfg_parse(dev, np_stcc, stc);
+ if (err)
+ continue;
+
+ if (stc->id >= THROTTLE_OC1) {
+ soctherm_oc_cfg_parse(dev, np_stcc, stc);
+ stc->init = true;
} else {
- r = of_property_read_u32(np_stcc,
- "nvidia,cpu-throt-percent",
- &val);
- if (r) {
- dev_info(dev,
- "throttle-cfg: %s: missing cpu-throt-percent\n",
- name);
- continue;
- }
- stc->cpu_throt_depth = val;
- }
- tcd = thermal_of_cooling_device_register(np_stcc,
+ tcd = thermal_of_cooling_device_register(np_stcc,
(char *)name, ts,
&throt_cooling_ops);
- of_node_put(np_stcc);
- if (IS_ERR_OR_NULL(tcd)) {
- dev_err(dev,
- "throttle-cfg: %s: failed to register cooling device\n",
- name);
- continue;
+ if (IS_ERR_OR_NULL(tcd)) {
+ dev_err(dev,
+ "throttle-cfg: %s: failed to register cooling device\n",
+ name);
+ continue;
+ }
+ stc->cdev = tcd;
+ stc->init = true;
}
- stc->cdev = tcd;
- stc->init = true;
}
of_node_put(np_stc);
@@ -1141,6 +1883,50 @@ static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
}
/**
+ * throttlectl_gpu_level_select() - selects throttling level for GPU
+ * @throt: the LIGHT/HEAVY of throttle event id
+ *
+ * This function programs soctherm's interface to GK20a NV_THERM to select
+ * pre-configured "Low", "Medium" or "Heavy" throttle levels.
+ *
+ * Return: boolean true if HW was programmed
+ */
+static void throttlectl_gpu_level_select(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id throt)
+{
+ u32 r, level, throt_vect;
+
+ level = ts->throt_cfgs[throt].gpu_throt_level;
+ throt_vect = THROT_LEVEL_TO_DEPTH(level);
+ r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_GPU_MASK, throt_vect);
+ writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
+}
+
+static int soctherm_oc_cfg_program(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id throt)
+{
+ u32 r;
+ struct soctherm_oc_cfg *oc = &ts->throt_cfgs[throt].oc_cfg;
+
+ if (oc->mode == OC_THROTTLE_MODE_DISABLED)
+ return -EINVAL;
+
+ r = REG_SET_MASK(0, OC1_CFG_HW_RESTORE_MASK, 1);
+ r = REG_SET_MASK(r, OC1_CFG_THROTTLE_MODE_MASK, oc->mode);
+ r = REG_SET_MASK(r, OC1_CFG_ALARM_POLARITY_MASK, oc->active_low);
+ r = REG_SET_MASK(r, OC1_CFG_EN_THROTTLE_MASK, 1);
+ writel(r, ts->regs + ALARM_CFG(throt));
+ writel(oc->throt_period, ts->regs + ALARM_THROTTLE_PERIOD(throt));
+ writel(oc->alarm_cnt_thresh, ts->regs + ALARM_CNT_THRESHOLD(throt));
+ writel(oc->alarm_filter, ts->regs + ALARM_FILTER(throt));
+ soctherm_oc_intr_enable(ts, throt, oc->intr_en);
+
+ return 0;
+}
+
+/**
* soctherm_throttle_program() - programs pulse skippers' configuration
* @throt: the LIGHT/HEAVY of the throttle event id.
*
@@ -1156,12 +1942,17 @@ static void soctherm_throttle_program(struct tegra_soctherm *ts,
if (!stc.init)
return;
+ if ((throt >= THROTTLE_OC1) && (soctherm_oc_cfg_program(ts, throt)))
+ return;
+
/* Setup PSKIP parameters */
if (ts->soc->use_ccroc)
throttlectl_cpu_level_select(ts, throt);
else
throttlectl_cpu_mn(ts, throt);
+ throttlectl_gpu_level_select(ts, throt);
+
r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority);
writel(r, ts->regs + THROT_PRIORITY_CTRL(throt));
@@ -1215,6 +2006,57 @@ static void tegra_soctherm_throttle(struct device *dev)
writel(v, ts->regs + THERMCTL_STATS_CTL);
}
+static int soctherm_interrupts_init(struct platform_device *pdev,
+ struct tegra_soctherm *tegra)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = soctherm_oc_int_init(np, TEGRA_SOC_OC_IRQ_MAX);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "soctherm_oc_int_init failed\n");
+ return ret;
+ }
+
+ tegra->thermal_irq = platform_get_irq(pdev, 0);
+ if (tegra->thermal_irq < 0) {
+ dev_dbg(&pdev->dev, "get 'thermal_irq' failed.\n");
+ return 0;
+ }
+
+ tegra->edp_irq = platform_get_irq(pdev, 1);
+ if (tegra->edp_irq < 0) {
+ dev_dbg(&pdev->dev, "get 'edp_irq' failed.\n");
+ return 0;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ tegra->thermal_irq,
+ soctherm_thermal_isr,
+ soctherm_thermal_isr_thread,
+ IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ tegra);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq 'thermal_irq' failed.\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ tegra->edp_irq,
+ soctherm_edp_isr,
+ soctherm_edp_isr_thread,
+ IRQF_ONESHOT,
+ "soctherm_edp",
+ tegra);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq 'edp_irq' failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static void soctherm_init(struct platform_device *pdev)
{
struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
@@ -1292,6 +2134,7 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
if (!tegra)
return -ENOMEM;
+ mutex_init(&tegra->thermctl_lock);
dev_set_drvdata(&pdev->dev, tegra);
tegra->soc = soc;
@@ -1370,6 +2213,8 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
if (err)
return err;
+ soctherm_thermtrips_parse(pdev);
+
soctherm_init_hw_throt_cdev(pdev);
soctherm_init(pdev);
@@ -1406,6 +2251,8 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
goto disable_clocks;
}
+ err = soctherm_interrupts_init(pdev, tegra);
+
soctherm_debug_init(pdev);
return 0;
diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h
index e96ca73fd780..70501e73d586 100644
--- a/drivers/thermal/tegra/soctherm.h
+++ b/drivers/thermal/tegra/soctherm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
@@ -29,6 +30,14 @@
#define THERMCTL_THERMTRIP_CTL 0x80
/* BITs are defined in device file */
+#define THERMCTL_INTR_ENABLE 0x88
+#define THERMCTL_INTR_DISABLE 0x8c
+#define TH_INTR_UP_DN_EN 0x3
+#define THERM_IRQ_MEM_MASK (TH_INTR_UP_DN_EN << 24)
+#define THERM_IRQ_GPU_MASK (TH_INTR_UP_DN_EN << 16)
+#define THERM_IRQ_CPU_MASK (TH_INTR_UP_DN_EN << 8)
+#define THERM_IRQ_TSENSE_MASK (TH_INTR_UP_DN_EN << 0)
+
#define SENSOR_PDIV 0x1c0
#define SENSOR_PDIV_CPU_MASK (0xf << 12)
#define SENSOR_PDIV_GPU_MASK (0xf << 8)
@@ -70,6 +79,7 @@ struct tegra_tsensor_group {
u32 thermtrip_enable_mask;
u32 thermtrip_any_en_mask;
u32 thermtrip_threshold_mask;
+ u32 thermctl_isr_mask;
u16 thermctl_lvl0_offset;
u32 thermctl_lvl0_up_thresh_mask;
u32 thermctl_lvl0_dn_thresh_mask;
@@ -92,6 +102,11 @@ struct tegra_tsensor {
const struct tegra_tsensor_group *group;
};
+struct tsensor_group_thermtrips {
+ u8 id;
+ u32 temp;
+};
+
struct tegra_soctherm_fuse {
u32 fuse_base_cp_mask, fuse_base_cp_shift;
u32 fuse_base_ft_mask, fuse_base_ft_shift;
@@ -113,6 +128,7 @@ struct tegra_soctherm_soc {
const int thresh_grain;
const unsigned int bptt;
const bool use_ccroc;
+ struct tsensor_group_thermtrips *thermtrips;
};
int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c
index 36768630f78c..20ad27f4d1a1 100644
--- a/drivers/thermal/tegra/tegra124-soctherm.c
+++ b/drivers/thermal/tegra/tegra124-soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -55,6 +56,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -73,6 +75,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -89,6 +92,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -107,6 +111,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c
index 97fa30501eb1..b76308fdad9e 100644
--- a/drivers/thermal/tegra/tegra132-soctherm.c
+++ b/drivers/thermal/tegra/tegra132-soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -55,6 +56,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -73,6 +75,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -89,6 +92,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -107,6 +111,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c
index ad53169a8e95..d31b50050faa 100644
--- a/drivers/thermal/tegra/tegra210-soctherm.c
+++ b/drivers/thermal/tegra/tegra210-soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -56,6 +57,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -74,6 +76,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -90,6 +93,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -108,6 +112,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -203,6 +208,13 @@ static const struct tegra_soctherm_fuse tegra210_soctherm_fuse = {
.fuse_spare_realignment = 0,
};
+struct tsensor_group_thermtrips tegra210_tsensor_thermtrips[] = {
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+};
+
const struct tegra_soctherm_soc tegra210_soctherm = {
.tsensors = tegra210_tsensors,
.num_tsensors = ARRAY_SIZE(tegra210_tsensors),
@@ -212,4 +224,5 @@ const struct tegra_soctherm_soc tegra210_soctherm = {
.thresh_grain = TEGRA210_THRESH_GRAIN,
.bptt = TEGRA210_BPTT,
.use_ccroc = false,
+ .thermtrips = tegra210_tsensor_thermtrips,
};
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index e22fc60ad36d..deb244f12de4 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -29,6 +29,9 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
int temp, temp_hi, temp_lo, adc_hi, adc_lo;
int i;
+ if (!gti->lookup_table)
+ return val;
+
for (i = 0; i < gti->nlookup_table; i++) {
if (val >= gti->lookup_table[2 * i + 1])
break;
@@ -81,9 +84,9 @@ static int gadc_thermal_read_linear_lookup_table(struct device *dev,
ntable = of_property_count_elems_of_size(np, "temperature-lookup-table",
sizeof(u32));
- if (ntable < 0) {
- dev_err(dev, "Lookup table is not provided\n");
- return ntable;
+ if (ntable <= 0) {
+ dev_notice(dev, "no lookup table, assuming DAC channel returns milliCelcius\n");
+ return 0;
}
if (ntable % 2) {
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6590bb5cb688..e0b530603db6 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1046,6 +1046,55 @@ thermal_of_cooling_device_register(struct device_node *np,
}
EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register);
+static void thermal_cooling_device_release(struct device *dev, void *res)
+{
+ thermal_cooling_device_unregister(
+ *(struct thermal_cooling_device **)res);
+}
+
+/**
+ * devm_thermal_of_cooling_device_register() - register an OF thermal cooling
+ * device
+ * @dev: a valid struct device pointer of a sensor device.
+ * @np: a pointer to a device tree node.
+ * @type: the thermal cooling device type.
+ * @devdata: device private data.
+ * @ops: standard thermal cooling devices callbacks.
+ *
+ * This function will register a cooling device with device tree node reference.
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
+ */
+struct thermal_cooling_device *
+devm_thermal_of_cooling_device_register(struct device *dev,
+ struct device_node *np,
+ char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
+{
+ struct thermal_cooling_device **ptr, *tcd;
+
+ ptr = devres_alloc(thermal_cooling_device_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ tcd = __thermal_cooling_device_register(np, type, devdata, ops);
+ if (IS_ERR(tcd)) {
+ devres_free(ptr);
+ return tcd;
+ }
+
+ *ptr = tcd;
+ devres_add(dev, ptr);
+
+ return tcd;
+}
+EXPORT_SYMBOL_GPL(devm_thermal_of_cooling_device_register);
+
static void __unbind(struct thermal_zone_device *tz, int mask,
struct thermal_cooling_device *cdev)
{
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
new file mode 100644
index 000000000000..de3cceea23bc
--- /dev/null
+++ b/drivers/thermal/thermal_mmio.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+struct thermal_mmio {
+ void __iomem *mmio_base;
+ u32 (*read_mmio)(void __iomem *mmio_base);
+ u32 mask;
+ int factor;
+};
+
+static u32 thermal_mmio_readb(void __iomem *mmio_base)
+{
+ return readb(mmio_base);
+}
+
+static int thermal_mmio_get_temperature(void *private, int *temp)
+{
+ int t;
+ struct thermal_mmio *sensor =
+ (struct thermal_mmio *)private;
+
+ t = sensor->read_mmio(sensor->mmio_base) & sensor->mask;
+ t *= sensor->factor;
+
+ *temp = t;
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops thermal_mmio_ops = {
+ .get_temp = thermal_mmio_get_temperature,
+};
+
+static int thermal_mmio_probe(struct platform_device *pdev)
+{
+ struct resource *resource;
+ struct thermal_mmio *sensor;
+ int (*sensor_init_func)(struct platform_device *pdev,
+ struct thermal_mmio *sensor);
+ struct thermal_zone_device *thermal_zone;
+ int ret;
+ int temperature;
+
+ sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR(resource)) {
+ dev_err(&pdev->dev,
+ "fail to get platform memory resource (%ld)\n",
+ PTR_ERR(resource));
+ return PTR_ERR(resource);
+ }
+
+ sensor->mmio_base = devm_ioremap_resource(&pdev->dev, resource);
+ if (IS_ERR(sensor->mmio_base)) {
+ dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n",
+ PTR_ERR(sensor->mmio_base));
+ return PTR_ERR(sensor->mmio_base);
+ }
+
+ sensor_init_func = device_get_match_data(&pdev->dev);
+ if (sensor_init_func) {
+ ret = sensor_init_func(pdev, sensor);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to initialize sensor (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ thermal_zone = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ 0,
+ sensor,
+ &thermal_mmio_ops);
+ if (IS_ERR(thermal_zone)) {
+ dev_err(&pdev->dev,
+ "failed to register sensor (%ld)\n",
+ PTR_ERR(thermal_zone));
+ return PTR_ERR(thermal_zone);
+ }
+
+ thermal_mmio_get_temperature(sensor, &temperature);
+ dev_info(&pdev->dev,
+ "thermal mmio sensor %s registered, current temperature: %d\n",
+ pdev->name, temperature);
+
+ return 0;
+}
+
+static int al_thermal_init(struct platform_device *pdev,
+ struct thermal_mmio *sensor)
+{
+ sensor->read_mmio = thermal_mmio_readb;
+ sensor->mask = 0xff;
+ sensor->factor = 1000;
+
+ return 0;
+}
+
+static const struct of_device_id thermal_mmio_id_table[] = {
+ { .compatible = "amazon,al-thermal", .data = al_thermal_init},
+ {}
+};
+MODULE_DEVICE_TABLE(of, thermal_mmio_id_table);
+
+static struct platform_driver thermal_mmio_driver = {
+ .probe = thermal_mmio_probe,
+ .driver = {
+ .name = "thermal-mmio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(thermal_mmio_id_table),
+ },
+};
+
+module_platform_driver(thermal_mmio_driver);
+
+MODULE_AUTHOR("Talel Shenhar <talel@amazon.com>");
+MODULE_DESCRIPTION("Thermal MMIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index f2f0de27252b..833bdee3cec7 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
-thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
-thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o
+thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 9553305c63ea..8bf8e031f0bc 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -13,6 +13,7 @@
#define CAP_OFFSET_MAX 0xff
#define VSE_CAP_OFFSET_MAX 0xffff
+#define TMU_ACCESS_EN BIT(20)
struct tb_cap_any {
union {
@@ -22,28 +23,53 @@ struct tb_cap_any {
};
} __packed;
-/**
- * tb_port_find_cap() - Find port capability
- * @port: Port to find the capability for
- * @cap: Capability to look
- *
- * Returns offset to start of capability or %-ENOENT if no such
- * capability was found. Negative errno is returned if there was an
- * error.
- */
-int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
+static int tb_port_enable_tmu(struct tb_port *port, bool enable)
{
- u32 offset;
+ struct tb_switch *sw = port->sw;
+ u32 value, offset;
+ int ret;
/*
- * DP out adapters claim to implement TMU capability but in
- * reality they do not so we hard code the adapter specific
- * capability offset here.
+ * Legacy devices need to have TMU access enabled before port
+ * space can be fully accessed.
*/
- if (port->config.type == TB_TYPE_DP_HDMI_OUT)
- offset = 0x39;
+ if (tb_switch_is_lr(sw))
+ offset = 0x26;
+ else if (tb_switch_is_er(sw))
+ offset = 0x2a;
else
- offset = 0x1;
+ return 0;
+
+ ret = tb_sw_read(sw, &value, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ value |= TMU_ACCESS_EN;
+ else
+ value &= ~TMU_ACCESS_EN;
+
+ return tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
+}
+
+static void tb_port_dummy_read(struct tb_port *port)
+{
+ /*
+ * When reading from next capability pointer location in port
+ * config space the read data is not cleared on LR. To avoid
+ * reading stale data on next read perform one dummy read after
+ * port capabilities are walked.
+ */
+ if (tb_switch_is_lr(port->sw)) {
+ u32 dummy;
+
+ tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
+ }
+}
+
+static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
+{
+ u32 offset = 1;
do {
struct tb_cap_any header;
@@ -62,6 +88,31 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
return -ENOENT;
}
+/**
+ * tb_port_find_cap() - Find port capability
+ * @port: Port to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
+{
+ int ret;
+
+ ret = tb_port_enable_tmu(port, true);
+ if (ret)
+ return ret;
+
+ ret = __tb_port_find_cap(port, cap);
+
+ tb_port_dummy_read(port);
+ tb_port_enable_tmu(port, false);
+
+ return ret;
+}
+
static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
int offset = sw->config.first_cap_offset;
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 73b386de4d15..2427d73be731 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -720,7 +720,7 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
.port = port,
.error = error,
};
- tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
+ tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 7416bdbd8576..b7980c856898 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -678,7 +678,6 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
}
shash->tfm = tfm;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
memset(hmac, 0, sizeof(hmac));
ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index e3fc920af682..f1c10378fa3e 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -42,7 +42,6 @@
#define ICM_TIMEOUT 5000 /* ms */
#define ICM_APPROVE_TIMEOUT 10000 /* ms */
#define ICM_MAX_LINK 4
-#define ICM_MAX_DEPTH 6
/**
* struct icm - Internal connection manager private data
@@ -469,10 +468,15 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
pm_runtime_get_sync(&parent_sw->dev);
sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
- if (!sw)
+ if (IS_ERR(sw))
goto out;
sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
+ if (!sw->uuid) {
+ tb_sw_warn(sw, "cannot allocate memory for switch\n");
+ tb_switch_put(sw);
+ goto out;
+ }
sw->connection_id = connection_id;
sw->connection_key = connection_key;
sw->link = link;
@@ -709,7 +713,7 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
- if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+ if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
return;
}
@@ -739,7 +743,7 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
- if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+ if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
return;
}
@@ -793,9 +797,11 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
* connected another host to the same port, remove the switch
* first.
*/
- sw = get_switch_at_route(tb->root_switch, route);
- if (sw)
+ sw = tb_switch_find_by_route(tb, route);
+ if (sw) {
remove_switch(sw);
+ tb_switch_put(sw);
+ }
sw = tb_switch_find_by_link_depth(tb, link, depth);
if (!sw) {
@@ -1138,9 +1144,11 @@ icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
* connected another host to the same port, remove the switch
* first.
*/
- sw = get_switch_at_route(tb->root_switch, route);
- if (sw)
+ sw = tb_switch_find_by_route(tb, route);
+ if (sw) {
remove_switch(sw);
+ tb_switch_put(sw);
+ }
sw = tb_switch_find_by_route(tb, get_parent_route(route));
if (!sw) {
@@ -1191,6 +1199,8 @@ static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
return parent;
}
@@ -1560,7 +1570,7 @@ static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
if (val & REG_FW_STS_ICM_EN)
return 0;
- dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
+ dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
ret = icm_firmware_reset(tb, nhi);
if (ret)
@@ -1753,16 +1763,10 @@ static void icm_unplug_children(struct tb_switch *sw)
for (i = 1; i <= sw->config.max_port_number; i++) {
struct tb_port *port = &sw->ports[i];
- if (tb_is_upstream_port(port))
- continue;
- if (port->xdomain) {
+ if (port->xdomain)
port->xdomain->is_unplugged = true;
- continue;
- }
- if (!port->remote)
- continue;
-
- icm_unplug_children(port->remote->sw);
+ else if (tb_port_has_remote(port))
+ icm_unplug_children(port->remote->sw);
}
}
@@ -1773,23 +1777,16 @@ static void icm_free_unplugged_children(struct tb_switch *sw)
for (i = 1; i <= sw->config.max_port_number; i++) {
struct tb_port *port = &sw->ports[i];
- if (tb_is_upstream_port(port))
- continue;
-
if (port->xdomain && port->xdomain->is_unplugged) {
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
- continue;
- }
-
- if (!port->remote)
- continue;
-
- if (port->remote->sw->is_unplugged) {
- tb_switch_remove(port->remote->sw);
- port->remote = NULL;
- } else {
- icm_free_unplugged_children(port->remote->sw);
+ } else if (tb_port_has_remote(port)) {
+ if (port->remote->sw->is_unplugged) {
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ } else {
+ icm_free_unplugged_children(port->remote->sw);
+ }
}
}
}
@@ -1853,8 +1850,8 @@ static int icm_start(struct tb *tb)
tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
else
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
- if (!tb->root_switch)
- return -ENODEV;
+ if (IS_ERR(tb->root_switch))
+ return PTR_ERR(tb->root_switch);
/*
* NVM upgrade has not been tested on Apple systems and they
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
new file mode 100644
index 000000000000..ae1e92611c3e
--- /dev/null
+++ b/drivers/thunderbolt/lc.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt link controller support
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include "tb.h"
+
+/**
+ * tb_lc_read_uuid() - Read switch UUID from link controller common register
+ * @sw: Switch whose UUID is read
+ * @uuid: UUID is placed here
+ */
+int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
+{
+ if (!sw->cap_lc)
+ return -EINVAL;
+ return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
+}
+
+static int read_lc_desc(struct tb_switch *sw, u32 *desc)
+{
+ if (!sw->cap_lc)
+ return -EINVAL;
+ return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
+}
+
+static int find_port_lc_cap(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int start, phys, ret, size;
+ u32 desc;
+
+ ret = read_lc_desc(sw, &desc);
+ if (ret)
+ return ret;
+
+ /* Start of port LC registers */
+ start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
+ size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
+ phys = tb_phy_port_from_link(port->port);
+
+ return sw->cap_lc + start + phys * size;
+}
+
+static int tb_lc_configure_lane(struct tb_port *port, bool configure)
+{
+ bool upstream = tb_is_upstream_port(port);
+ struct tb_switch *sw = port->sw;
+ u32 ctrl, lane;
+ int cap, ret;
+
+ if (sw->generation < 2)
+ return 0;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+ if (ret)
+ return ret;
+
+ /* Resolve correct lane */
+ if (port->port % 2)
+ lane = TB_LC_SX_CTRL_L1C;
+ else
+ lane = TB_LC_SX_CTRL_L2C;
+
+ if (configure) {
+ ctrl |= lane;
+ if (upstream)
+ ctrl |= TB_LC_SX_CTRL_UPSTREAM;
+ } else {
+ ctrl &= ~lane;
+ if (upstream)
+ ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
+ }
+
+ return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+}
+
+/**
+ * tb_lc_configure_link() - Let LC know about configured link
+ * @sw: Switch that is being added
+ *
+ * Informs LC of both parent switch and @sw that there is established
+ * link between the two.
+ */
+int tb_lc_configure_link(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!sw->config.enabled || !tb_route(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
+
+ /* Configure parent link toward this switch */
+ ret = tb_lc_configure_lane(down, true);
+ if (ret)
+ return ret;
+
+ /* Configure upstream link from this switch to the parent */
+ ret = tb_lc_configure_lane(up, true);
+ if (ret)
+ tb_lc_configure_lane(down, false);
+
+ return ret;
+}
+
+/**
+ * tb_lc_unconfigure_link() - Let LC know about unconfigured link
+ * @sw: Switch to unconfigure
+ *
+ * Informs LC of both parent switch and @sw that the link between the
+ * two does not exist anymore.
+ */
+void tb_lc_unconfigure_link(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+
+ if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
+
+ tb_lc_configure_lane(up, false);
+ tb_lc_configure_lane(down, false);
+}
+
+/**
+ * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
+ * @sw: Switch to set sleep
+ *
+ * Let the switch link controllers know that the switch is going to
+ * sleep.
+ */
+int tb_lc_set_sleep(struct tb_switch *sw)
+{
+ int start, size, nlc, ret, i;
+ u32 desc;
+
+ if (sw->generation < 2)
+ return 0;
+
+ ret = read_lc_desc(sw, &desc);
+ if (ret)
+ return ret;
+
+ /* Figure out number of link controllers */
+ nlc = desc & TB_LC_DESC_NLC_MASK;
+ start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
+ size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
+
+ /* For each link controller set sleep bit */
+ for (i = 0; i < nlc; i++) {
+ unsigned int offset = sw->cap_lc + start + i * size;
+ u32 ctrl;
+
+ ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
+ offset + TB_LC_SX_CTRL, 1);
+ if (ret)
+ return ret;
+
+ ctrl |= TB_LC_SX_CTRL_SLP;
+ ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
+ offset + TB_LC_SX_CTRL, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 9aa44f9762a3..cac1ead5e302 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -27,8 +27,7 @@
* use this ring for anything else.
*/
#define RING_E2E_UNUSED_HOPID 2
-/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
-#define RING_FIRST_USABLE_HOPID 8
+#define RING_FIRST_USABLE_HOPID TB_PATH_MIN_HOPID
/*
* Minimal number of vectors when we use MSI-X. Two for control channel
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index a11956522bac..afe5f8391ebf 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -1,62 +1,330 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - path/tunnel functionality
+ * Thunderbolt driver - path/tunnel functionality
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
*/
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
#include "tb.h"
-
-static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
+static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs)
{
- tb_port_dbg(port, " Hop through port %d to hop %d (%s)\n",
- hop->out_port, hop->next_hop,
- hop->enable ? "enabled" : "disabled");
+ const struct tb_port *port = hop->in_port;
+
+ tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
+ hop->in_hop_index, regs->out_port, regs->next_hop);
tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
- hop->weight, hop->priority,
- hop->initial_credits, hop->drop_packages);
+ regs->weight, regs->priority,
+ regs->initial_credits, regs->drop_packages);
tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
- hop->counter_enable, hop->counter);
+ regs->counter_enable, regs->counter);
tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
- hop->ingress_fc, hop->egress_fc,
- hop->ingress_shared_buffer, hop->egress_shared_buffer);
+ regs->ingress_fc, regs->egress_fc,
+ regs->ingress_shared_buffer, regs->egress_shared_buffer);
tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
- hop->unknown1, hop->unknown2, hop->unknown3);
+ regs->unknown1, regs->unknown2, regs->unknown3);
+}
+
+static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid,
+ int dst_hopid)
+{
+ struct tb_port *port, *out_port = NULL;
+ struct tb_regs_hop hop;
+ struct tb_switch *sw;
+ int i, ret, hopid;
+
+ hopid = src_hopid;
+ port = src;
+
+ for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) {
+ sw = port->sw;
+
+ ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2);
+ if (ret) {
+ tb_port_warn(port, "failed to read path at %d\n", hopid);
+ return NULL;
+ }
+
+ if (!hop.enable)
+ return NULL;
+
+ out_port = &sw->ports[hop.out_port];
+ hopid = hop.next_hop;
+ port = out_port->remote;
+ }
+
+ return out_port && hopid == dst_hopid ? out_port : NULL;
+}
+
+static int tb_path_find_src_hopid(struct tb_port *src,
+ const struct tb_port *dst, int dst_hopid)
+{
+ struct tb_port *out;
+ int i;
+
+ for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) {
+ out = tb_path_find_dst_port(src, i, dst_hopid);
+ if (out == dst)
+ return i;
+ }
+
+ return 0;
+}
+
+/**
+ * tb_path_discover() - Discover a path
+ * @src: First input port of a path
+ * @src_hopid: Starting HopID of a path (%-1 if don't care)
+ * @dst: Expected destination port of the path (%NULL if don't care)
+ * @dst_hopid: HopID to the @dst (%-1 if don't care)
+ * @last: Last port is filled here if not %NULL
+ * @name: Name of the path
+ *
+ * Follows a path starting from @src and @src_hopid to the last output
+ * port of the path. Allocates HopIDs for the visited ports. Call
+ * tb_path_free() to release the path and allocated HopIDs when the path
+ * is not needed anymore.
+ *
+ * Note function discovers also incomplete paths so caller should check
+ * that the @dst port is the expected one. If it is not, the path can be
+ * cleaned up by calling tb_path_deactivate() before tb_path_free().
+ *
+ * Return: Discovered path on success, %NULL in case of failure
+ */
+struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
+ struct tb_port *dst, int dst_hopid,
+ struct tb_port **last, const char *name)
+{
+ struct tb_port *out_port;
+ struct tb_regs_hop hop;
+ struct tb_path *path;
+ struct tb_switch *sw;
+ struct tb_port *p;
+ size_t num_hops;
+ int ret, i, h;
+
+ if (src_hopid < 0 && dst) {
+ /*
+ * For incomplete paths the intermediate HopID can be
+ * different from the one used by the protocol adapter
+ * so in that case find a path that ends on @dst with
+ * matching @dst_hopid. That should give us the correct
+ * HopID for the @src.
+ */
+ src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid);
+ if (!src_hopid)
+ return NULL;
+ }
+
+ p = src;
+ h = src_hopid;
+ num_hops = 0;
+
+ for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
+ sw = p->sw;
+
+ ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
+ if (ret) {
+ tb_port_warn(p, "failed to read path at %d\n", h);
+ return NULL;
+ }
+
+ /* If the hop is not enabled we got an incomplete path */
+ if (!hop.enable)
+ break;
+
+ out_port = &sw->ports[hop.out_port];
+ if (last)
+ *last = out_port;
+
+ h = hop.next_hop;
+ p = out_port->remote;
+ num_hops++;
+ }
+
+ path = kzalloc(sizeof(*path), GFP_KERNEL);
+ if (!path)
+ return NULL;
+
+ path->name = name;
+ path->tb = src->sw->tb;
+ path->path_length = num_hops;
+ path->activated = true;
+
+ path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
+ if (!path->hops) {
+ kfree(path);
+ return NULL;
+ }
+
+ p = src;
+ h = src_hopid;
+
+ for (i = 0; i < num_hops; i++) {
+ int next_hop;
+
+ sw = p->sw;
+
+ ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
+ if (ret) {
+ tb_port_warn(p, "failed to read path at %d\n", h);
+ goto err;
+ }
+
+ if (tb_port_alloc_in_hopid(p, h, h) < 0)
+ goto err;
+
+ out_port = &sw->ports[hop.out_port];
+ next_hop = hop.next_hop;
+
+ if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
+ tb_port_release_in_hopid(p, h);
+ goto err;
+ }
+
+ path->hops[i].in_port = p;
+ path->hops[i].in_hop_index = h;
+ path->hops[i].in_counter_index = -1;
+ path->hops[i].out_port = out_port;
+ path->hops[i].next_hop_index = next_hop;
+
+ h = next_hop;
+ p = out_port->remote;
+ }
+
+ return path;
+
+err:
+ tb_port_warn(src, "failed to discover path starting at HopID %d\n",
+ src_hopid);
+ tb_path_free(path);
+ return NULL;
}
/**
- * tb_path_alloc() - allocate a thunderbolt path
+ * tb_path_alloc() - allocate a thunderbolt path between two ports
+ * @tb: Domain pointer
+ * @src: Source port of the path
+ * @src_hopid: HopID used for the first ingress port in the path
+ * @dst: Destination port of the path
+ * @dst_hopid: HopID used for the last egress port in the path
+ * @link_nr: Preferred link if there are dual links on the path
+ * @name: Name of the path
+ *
+ * Creates path between two ports starting with given @src_hopid. Reserves
+ * HopIDs for each port (they can be different from @src_hopid depending on
+ * how many HopIDs each port already have reserved). If there are dual
+ * links on the path, prioritizes using @link_nr.
*
* Return: Returns a tb_path on success or NULL on failure.
*/
-struct tb_path *tb_path_alloc(struct tb *tb, int num_hops)
+struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
+ struct tb_port *dst, int dst_hopid, int link_nr,
+ const char *name)
{
- struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL);
+ struct tb_port *in_port, *out_port;
+ int in_hopid, out_hopid;
+ struct tb_path *path;
+ size_t num_hops;
+ int i, ret;
+
+ path = kzalloc(sizeof(*path), GFP_KERNEL);
if (!path)
return NULL;
+
+ /*
+ * Number of hops on a path is the distance between the two
+ * switches plus the source adapter port.
+ */
+ num_hops = abs(tb_route_length(tb_route(src->sw)) -
+ tb_route_length(tb_route(dst->sw))) + 1;
+
path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
if (!path->hops) {
kfree(path);
return NULL;
}
+
+ in_hopid = src_hopid;
+ out_port = NULL;
+
+ for (i = 0; i < num_hops; i++) {
+ in_port = tb_next_port_on_path(src, dst, out_port);
+ if (!in_port)
+ goto err;
+
+ if (in_port->dual_link_port && in_port->link_nr != link_nr)
+ in_port = in_port->dual_link_port;
+
+ ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
+ if (ret < 0)
+ goto err;
+ in_hopid = ret;
+
+ out_port = tb_next_port_on_path(src, dst, in_port);
+ if (!out_port)
+ goto err;
+
+ if (out_port->dual_link_port && out_port->link_nr != link_nr)
+ out_port = out_port->dual_link_port;
+
+ if (i == num_hops - 1)
+ ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
+ dst_hopid);
+ else
+ ret = tb_port_alloc_out_hopid(out_port, -1, -1);
+
+ if (ret < 0)
+ goto err;
+ out_hopid = ret;
+
+ path->hops[i].in_hop_index = in_hopid;
+ path->hops[i].in_port = in_port;
+ path->hops[i].in_counter_index = -1;
+ path->hops[i].out_port = out_port;
+ path->hops[i].next_hop_index = out_hopid;
+
+ in_hopid = out_hopid;
+ }
+
path->tb = tb;
path->path_length = num_hops;
+ path->name = name;
+
return path;
+
+err:
+ tb_path_free(path);
+ return NULL;
}
/**
- * tb_path_free() - free a deactivated path
+ * tb_path_free() - free a path
+ * @path: Path to free
+ *
+ * Frees a path. The path does not need to be deactivated.
*/
void tb_path_free(struct tb_path *path)
{
- if (path->activated) {
- tb_WARN(path->tb, "trying to free an activated path\n")
- return;
+ int i;
+
+ for (i = 0; i < path->path_length; i++) {
+ const struct tb_path_hop *hop = &path->hops[i];
+
+ if (hop->in_port)
+ tb_port_release_in_hopid(hop->in_port,
+ hop->in_hop_index);
+ if (hop->out_port)
+ tb_port_release_out_hopid(hop->out_port,
+ hop->next_hop_index);
}
+
kfree(path->hops);
kfree(path);
}
@@ -74,14 +342,65 @@ static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
}
}
+static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
+ bool clear_fc)
+{
+ struct tb_regs_hop hop;
+ ktime_t timeout;
+ int ret;
+
+ /* Disable the path */
+ ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
+ if (ret)
+ return ret;
+
+ /* Already disabled */
+ if (!hop.enable)
+ return 0;
+
+ hop.enable = 0;
+
+ ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
+ if (ret)
+ return ret;
+
+ /* Wait until it is drained */
+ timeout = ktime_add_ms(ktime_get(), 500);
+ do {
+ ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
+ if (ret)
+ return ret;
+
+ if (!hop.pending) {
+ if (clear_fc) {
+ /* Clear flow control */
+ hop.ingress_fc = 0;
+ hop.egress_fc = 0;
+ hop.ingress_shared_buffer = 0;
+ hop.egress_shared_buffer = 0;
+
+ return tb_port_write(port, &hop, TB_CFG_HOPS,
+ 2 * hop_index, 2);
+ }
+
+ return 0;
+ }
+
+ usleep_range(10, 20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;
- struct tb_regs_hop hop = { };
+
for (i = first_hop; i < path->path_length; i++) {
- res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
- 2 * path->hops[i].in_hop_index, 2);
- if (res)
+ res = __tb_path_deactivate_hop(path->hops[i].in_port,
+ path->hops[i].in_hop_index,
+ path->clear_fc);
+ if (res && res != -ENODEV)
tb_port_warn(path->hops[i].in_port,
"hop deactivation failed for hop %d, index %d\n",
i, path->hops[i].in_hop_index);
@@ -94,12 +413,12 @@ void tb_path_deactivate(struct tb_path *path)
tb_WARN(path->tb, "trying to deactivate an inactive path\n");
return;
}
- tb_info(path->tb,
- "deactivating path from %llx:%x to %llx:%x\n",
- tb_route(path->hops[0].in_port->sw),
- path->hops[0].in_port->port,
- tb_route(path->hops[path->path_length - 1].out_port->sw),
- path->hops[path->path_length - 1].out_port->port);
+ tb_dbg(path->tb,
+ "deactivating %s path from %llx:%x to %llx:%x\n",
+ path->name, tb_route(path->hops[0].in_port->sw),
+ path->hops[0].in_port->port,
+ tb_route(path->hops[path->path_length - 1].out_port->sw),
+ path->hops[path->path_length - 1].out_port->port);
__tb_path_deactivate_hops(path, 0);
__tb_path_deallocate_nfc(path, 0);
path->activated = false;
@@ -122,12 +441,12 @@ int tb_path_activate(struct tb_path *path)
return -EINVAL;
}
- tb_info(path->tb,
- "activating path from %llx:%x to %llx:%x\n",
- tb_route(path->hops[0].in_port->sw),
- path->hops[0].in_port->port,
- tb_route(path->hops[path->path_length - 1].out_port->sw),
- path->hops[path->path_length - 1].out_port->port);
+ tb_dbg(path->tb,
+ "activating %s path from %llx:%x to %llx:%x\n",
+ path->name, tb_route(path->hops[0].in_port->sw),
+ path->hops[0].in_port->port,
+ tb_route(path->hops[path->path_length - 1].out_port->sw),
+ path->hops[path->path_length - 1].out_port->port);
/* Clear counters. */
for (i = path->path_length - 1; i >= 0; i--) {
@@ -153,30 +472,14 @@ int tb_path_activate(struct tb_path *path)
for (i = path->path_length - 1; i >= 0; i--) {
struct tb_regs_hop hop = { 0 };
- /*
- * We do (currently) not tear down paths setup by the firmeware.
- * If a firmware device is unplugged and plugged in again then
- * it can happen that we reuse some of the hops from the (now
- * defunct) firmeware path. This causes the hotplug operation to
- * fail (the pci device does not show up). Clearing the hop
- * before overwriting it fixes the problem.
- *
- * Should be removed once we discover and tear down firmeware
- * paths.
- */
- res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
- 2 * path->hops[i].in_hop_index, 2);
- if (res) {
- __tb_path_deactivate_hops(path, i);
- __tb_path_deallocate_nfc(path, 0);
- goto err;
- }
+ /* If it is left active deactivate it first */
+ __tb_path_deactivate_hop(path->hops[i].in_port,
+ path->hops[i].in_hop_index, path->clear_fc);
/* dword 0 */
hop.next_hop = path->hops[i].next_hop_index;
hop.out_port = path->hops[i].out_port->port;
- /* TODO: figure out why these are good values */
- hop.initial_credits = (i == path->path_length - 1) ? 16 : 7;
+ hop.initial_credits = path->hops[i].initial_credits;
hop.unknown1 = 0;
hop.enable = 1;
@@ -198,9 +501,8 @@ int tb_path_activate(struct tb_path *path)
& out_mask;
hop.unknown3 = 0;
- tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d",
- i, path->hops[i].in_hop_index);
- tb_dump_hop(path->hops[i].in_port, &hop);
+ tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i);
+ tb_dump_hop(&path->hops[i], &hop);
res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
2 * path->hops[i].in_hop_index, 2);
if (res) {
@@ -210,7 +512,7 @@ int tb_path_activate(struct tb_path *path)
}
}
path->activated = true;
- tb_info(path->tb, "path activation complete\n");
+ tb_dbg(path->tb, "path activation complete\n");
return 0;
err:
tb_WARN(path->tb, "path activation failed\n");
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
index b2f0d6386cee..d5b0cdb8f0b1 100644
--- a/drivers/thunderbolt/property.c
+++ b/drivers/thunderbolt/property.c
@@ -176,6 +176,10 @@ static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
} else {
dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid),
GFP_KERNEL);
+ if (!dir->uuid) {
+ tb_property_free_dir(dir);
+ return NULL;
+ }
content_offset = dir_offset + 4;
content_len = dir_len - 4; /* Length includes UUID */
}
@@ -548,6 +552,11 @@ int tb_property_add_data(struct tb_property_dir *parent, const char *key,
property->length = size / 4;
property->value.data = kzalloc(size, GFP_KERNEL);
+ if (!property->value.data) {
+ kfree(property);
+ return -ENOMEM;
+ }
+
memcpy(property->value.data, buf, buflen);
list_add_tail(&property->list, &parent->properties);
@@ -578,7 +587,12 @@ int tb_property_add_text(struct tb_property_dir *parent, const char *key,
return -ENOMEM;
property->length = size / 4;
- property->value.data = kzalloc(size, GFP_KERNEL);
+ property->value.text = kzalloc(size, GFP_KERNEL);
+ if (!property->value.text) {
+ kfree(property);
+ return -ENOMEM;
+ }
+
strcpy(property->value.text, text);
list_add_tail(&property->list, &parent->properties);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index cd96994dc094..c1b016574fb4 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -10,15 +10,13 @@
#include <linux/idr.h>
#include <linux/nvmem-provider.h>
#include <linux/pm_runtime.h>
+#include <linux/sched/signal.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "tb.h"
-/* Switch authorization from userspace is serialized by this lock */
-static DEFINE_MUTEX(switch_lock);
-
/* Switch NVM support */
#define NVM_DEVID 0x05
@@ -254,8 +252,8 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
struct tb_switch *sw = priv;
int ret = 0;
- if (mutex_lock_interruptible(&switch_lock))
- return -ERESTARTSYS;
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
/*
* Since writing the NVM image might require some special steps,
@@ -275,7 +273,7 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
memcpy(sw->nvm->buf + offset, val, bytes);
unlock:
- mutex_unlock(&switch_lock);
+ mutex_unlock(&sw->tb->lock);
return ret;
}
@@ -364,10 +362,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
}
nvm->non_active = nvm_dev;
- mutex_lock(&switch_lock);
sw->nvm = nvm;
- mutex_unlock(&switch_lock);
-
return 0;
err_nvm_active:
@@ -384,10 +379,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
{
struct tb_switch_nvm *nvm;
- mutex_lock(&switch_lock);
nvm = sw->nvm;
sw->nvm = NULL;
- mutex_unlock(&switch_lock);
if (!nvm)
return;
@@ -500,23 +493,22 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
if (state < 0)
return state;
if (state == TB_PORT_DISABLED) {
- tb_port_info(port, "is disabled (state: 0)\n");
+ tb_port_dbg(port, "is disabled (state: 0)\n");
return 0;
}
if (state == TB_PORT_UNPLUGGED) {
if (wait_if_unplugged) {
/* used during resume */
- tb_port_info(port,
- "is unplugged (state: 7), retrying...\n");
+ tb_port_dbg(port,
+ "is unplugged (state: 7), retrying...\n");
msleep(100);
continue;
}
- tb_port_info(port, "is unplugged (state: 7)\n");
+ tb_port_dbg(port, "is unplugged (state: 7)\n");
return 0;
}
if (state == TB_PORT_UP) {
- tb_port_info(port,
- "is connected, link is up (state: 2)\n");
+ tb_port_dbg(port, "is connected, link is up (state: 2)\n");
return 1;
}
@@ -524,9 +516,9 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
* After plug-in the state is TB_PORT_CONNECTING. Give it some
* time.
*/
- tb_port_info(port,
- "is connected, link is not up (state: %d), retrying...\n",
- state);
+ tb_port_dbg(port,
+ "is connected, link is not up (state: %d), retrying...\n",
+ state);
msleep(100);
}
tb_port_warn(port,
@@ -544,19 +536,47 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
*/
int tb_port_add_nfc_credits(struct tb_port *port, int credits)
{
- if (credits == 0)
+ u32 nfc_credits;
+
+ if (credits == 0 || port->sw->is_unplugged)
return 0;
- tb_port_info(port,
- "adding %#x NFC credits (%#x -> %#x)",
- credits,
- port->config.nfc_credits,
- port->config.nfc_credits + credits);
- port->config.nfc_credits += credits;
+
+ nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ nfc_credits += credits;
+
+ tb_port_dbg(port, "adding %d NFC credits to %lu",
+ credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
+
+ port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
+ port->config.nfc_credits |= nfc_credits;
+
return tb_port_write(port, &port->config.nfc_credits,
TB_CFG_PORT, 4, 1);
}
/**
+ * tb_port_set_initial_credits() - Set initial port link credits allocated
+ * @port: Port to set the initial credits
+ * @credits: Number of credits to to allocate
+ *
+ * Set initial credits value to be used for ingress shared buffering.
+ */
+int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
+ if (ret)
+ return ret;
+
+ data &= ~TB_PORT_LCA_MASK;
+ data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
+
+ return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
+}
+
+/**
* tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
*
* Return: Returns 0 on success or an error code on failure.
@@ -564,7 +584,7 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
int tb_port_clear_counter(struct tb_port *port, int counter)
{
u32 zero[3] = { 0, 0, 0 };
- tb_port_info(port, "clearing counter %d\n", counter);
+ tb_port_dbg(port, "clearing counter %d\n", counter);
return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
}
@@ -593,15 +613,304 @@ static int tb_init_port(struct tb_port *port)
port->cap_phy = cap;
else
tb_port_WARN(port, "non switch port without a PHY\n");
+ } else if (port->port != 0) {
+ cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
+ if (cap > 0)
+ port->cap_adap = cap;
}
tb_dump_port(port->sw->tb, &port->config);
- /* TODO: Read dual link port, DP port and more from EEPROM. */
+ /* Control port does not need HopID allocation */
+ if (port->port) {
+ ida_init(&port->in_hopids);
+ ida_init(&port->out_hopids);
+ }
+
return 0;
}
+static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
+ int max_hopid)
+{
+ int port_max_hopid;
+ struct ida *ida;
+
+ if (in) {
+ port_max_hopid = port->config.max_in_hop_id;
+ ida = &port->in_hopids;
+ } else {
+ port_max_hopid = port->config.max_out_hop_id;
+ ida = &port->out_hopids;
+ }
+
+ /* HopIDs 0-7 are reserved */
+ if (min_hopid < TB_PATH_MIN_HOPID)
+ min_hopid = TB_PATH_MIN_HOPID;
+
+ if (max_hopid < 0 || max_hopid > port_max_hopid)
+ max_hopid = port_max_hopid;
+
+ return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
+}
+
+/**
+ * tb_port_alloc_in_hopid() - Allocate input HopID from port
+ * @port: Port to allocate HopID for
+ * @min_hopid: Minimum acceptable input HopID
+ * @max_hopid: Maximum acceptable input HopID
+ *
+ * Return: HopID between @min_hopid and @max_hopid or negative errno in
+ * case of error.
+ */
+int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
+{
+ return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
+}
+
+/**
+ * tb_port_alloc_out_hopid() - Allocate output HopID from port
+ * @port: Port to allocate HopID for
+ * @min_hopid: Minimum acceptable output HopID
+ * @max_hopid: Maximum acceptable output HopID
+ *
+ * Return: HopID between @min_hopid and @max_hopid or negative errno in
+ * case of error.
+ */
+int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
+{
+ return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
+}
+
+/**
+ * tb_port_release_in_hopid() - Release allocated input HopID from port
+ * @port: Port whose HopID to release
+ * @hopid: HopID to release
+ */
+void tb_port_release_in_hopid(struct tb_port *port, int hopid)
+{
+ ida_simple_remove(&port->in_hopids, hopid);
+}
+
+/**
+ * tb_port_release_out_hopid() - Release allocated output HopID from port
+ * @port: Port whose HopID to release
+ * @hopid: HopID to release
+ */
+void tb_port_release_out_hopid(struct tb_port *port, int hopid)
+{
+ ida_simple_remove(&port->out_hopids, hopid);
+}
+
+/**
+ * tb_next_port_on_path() - Return next port for given port on a path
+ * @start: Start port of the walk
+ * @end: End port of the walk
+ * @prev: Previous port (%NULL if this is the first)
+ *
+ * This function can be used to walk from one port to another if they
+ * are connected through zero or more switches. If the @prev is dual
+ * link port, the function follows that link and returns another end on
+ * that same link.
+ *
+ * If the @end port has been reached, return %NULL.
+ *
+ * Domain tb->lock must be held when this function is called.
+ */
+struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
+ struct tb_port *prev)
+{
+ struct tb_port *next;
+
+ if (!prev)
+ return start;
+
+ if (prev->sw == end->sw) {
+ if (prev == end)
+ return NULL;
+ return end;
+ }
+
+ if (start->sw->config.depth < end->sw->config.depth) {
+ if (prev->remote &&
+ prev->remote->sw->config.depth > prev->sw->config.depth)
+ next = prev->remote;
+ else
+ next = tb_port_at(tb_route(end->sw), prev->sw);
+ } else {
+ if (tb_is_upstream_port(prev)) {
+ next = prev->remote;
+ } else {
+ next = tb_upstream_port(prev->sw);
+ /*
+ * Keep the same link if prev and next are both
+ * dual link ports.
+ */
+ if (next->dual_link_port &&
+ next->link_nr != prev->link_nr) {
+ next = next->dual_link_port;
+ }
+ }
+ }
+
+ return next;
+}
+
+/**
+ * tb_port_is_enabled() - Is the adapter port enabled
+ * @port: Port to check
+ */
+bool tb_port_is_enabled(struct tb_port *port)
+{
+ switch (port->config.type) {
+ case TB_TYPE_PCIE_UP:
+ case TB_TYPE_PCIE_DOWN:
+ return tb_pci_port_is_enabled(port);
+
+ case TB_TYPE_DP_HDMI_IN:
+ case TB_TYPE_DP_HDMI_OUT:
+ return tb_dp_port_is_enabled(port);
+
+ default:
+ return false;
+ }
+}
+
+/**
+ * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
+ * @port: PCIe port to check
+ */
+bool tb_pci_port_is_enabled(struct tb_port *port)
+{
+ u32 data;
+
+ if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+ return false;
+
+ return !!(data & TB_PCI_EN);
+}
+
+/**
+ * tb_pci_port_enable() - Enable PCIe adapter port
+ * @port: PCIe port to enable
+ * @enable: Enable/disable the PCIe adapter
+ */
+int tb_pci_port_enable(struct tb_port *port, bool enable)
+{
+ u32 word = enable ? TB_PCI_EN : 0x0;
+ if (!port->cap_adap)
+ return -ENXIO;
+ return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
+}
+
+/**
+ * tb_dp_port_hpd_is_active() - Is HPD already active
+ * @port: DP out port to check
+ *
+ * Checks if the DP OUT adapter port has HDP bit already set.
+ */
+int tb_dp_port_hpd_is_active(struct tb_port *port)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
+ if (ret)
+ return ret;
+
+ return !!(data & TB_DP_HDP);
+}
+
+/**
+ * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
+ * @port: Port to clear HPD
+ *
+ * If the DP IN port has HDP set, this function can be used to clear it.
+ */
+int tb_dp_port_hpd_clear(struct tb_port *port)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+ if (ret)
+ return ret;
+
+ data |= TB_DP_HPDC;
+ return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+}
+
+/**
+ * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
+ * @port: DP IN/OUT port to set hops
+ * @video: Video Hop ID
+ * @aux_tx: AUX TX Hop ID
+ * @aux_rx: AUX RX Hop ID
+ *
+ * Programs specified Hop IDs for DP IN/OUT port.
+ */
+int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
+ unsigned int aux_tx, unsigned int aux_rx)
+{
+ u32 data[2];
+ int ret;
+
+ ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+ ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+
+ data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
+ data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
+
+ data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
+ data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
+ data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
+
+ return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
+ ARRAY_SIZE(data));
+}
+
+/**
+ * tb_dp_port_is_enabled() - Is DP adapter port enabled
+ * @port: DP adapter port to check
+ */
+bool tb_dp_port_is_enabled(struct tb_port *port)
+{
+ u32 data;
+
+ if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+ return false;
+
+ return !!(data & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
+}
+
+/**
+ * tb_dp_port_enable() - Enables/disables DP paths of a port
+ * @port: DP IN/OUT port
+ * @enable: Enable/disable DP path
+ *
+ * Once Hop IDs are programmed DP paths can be enabled or disabled by
+ * calling this function.
+ */
+int tb_dp_port_enable(struct tb_port *port, bool enable)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ data |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
+ else
+ data &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
+
+ return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap, 1);
+}
+
/* switch utility functions */
static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
@@ -644,24 +953,6 @@ int tb_switch_reset(struct tb *tb, u64 route)
return res.err;
}
-struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
-{
- u8 next_port = route; /*
- * Routes use a stride of 8 bits,
- * eventhough a port index has 6 bits at most.
- * */
- if (route == 0)
- return sw;
- if (next_port > sw->config.max_port_number)
- return NULL;
- if (tb_is_upstream_port(&sw->ports[next_port]))
- return NULL;
- if (!sw->ports[next_port].remote)
- return NULL;
- return get_switch_at_route(sw->ports[next_port].remote->sw,
- route >> TB_ROUTE_SHIFT);
-}
-
/**
* tb_plug_events_active() - enable/disable plug events on a switch
*
@@ -716,8 +1007,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
{
int ret = -EINVAL;
- if (mutex_lock_interruptible(&switch_lock))
- return -ERESTARTSYS;
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
if (sw->authorized)
goto unlock;
@@ -760,7 +1051,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
}
unlock:
- mutex_unlock(&switch_lock);
+ mutex_unlock(&sw->tb->lock);
return ret;
}
@@ -817,15 +1108,15 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
struct tb_switch *sw = tb_to_switch(dev);
ssize_t ret;
- if (mutex_lock_interruptible(&switch_lock))
- return -ERESTARTSYS;
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
if (sw->key)
ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
else
ret = sprintf(buf, "\n");
- mutex_unlock(&switch_lock);
+ mutex_unlock(&sw->tb->lock);
return ret;
}
@@ -842,8 +1133,8 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
else if (hex2bin(key, buf, sizeof(key)))
return -EINVAL;
- if (mutex_lock_interruptible(&switch_lock))
- return -ERESTARTSYS;
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
if (sw->authorized) {
ret = -EBUSY;
@@ -858,7 +1149,7 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
}
}
- mutex_unlock(&switch_lock);
+ mutex_unlock(&sw->tb->lock);
return ret;
}
static DEVICE_ATTR(key, 0600, key_show, key_store);
@@ -904,8 +1195,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
bool val;
int ret;
- if (mutex_lock_interruptible(&switch_lock))
- return -ERESTARTSYS;
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
/* If NVMem devices are not yet added */
if (!sw->nvm) {
@@ -953,7 +1244,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
}
exit_unlock:
- mutex_unlock(&switch_lock);
+ mutex_unlock(&sw->tb->lock);
if (ret)
return ret;
@@ -967,8 +1258,8 @@ static ssize_t nvm_version_show(struct device *dev,
struct tb_switch *sw = tb_to_switch(dev);
int ret;
- if (mutex_lock_interruptible(&switch_lock))
- return -ERESTARTSYS;
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
if (sw->safe_mode)
ret = -ENODATA;
@@ -977,7 +1268,7 @@ static ssize_t nvm_version_show(struct device *dev,
else
ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
- mutex_unlock(&switch_lock);
+ mutex_unlock(&sw->tb->lock);
return ret;
}
@@ -1063,9 +1354,17 @@ static const struct attribute_group *switch_groups[] = {
static void tb_switch_release(struct device *dev)
{
struct tb_switch *sw = tb_to_switch(dev);
+ int i;
dma_port_free(sw->dma_port);
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ if (!sw->ports[i].disabled) {
+ ida_destroy(&sw->ports[i].in_hopids);
+ ida_destroy(&sw->ports[i].out_hopids);
+ }
+ }
+
kfree(sw->uuid);
kfree(sw->device_name);
kfree(sw->vendor_name);
@@ -1150,24 +1449,32 @@ static int tb_switch_get_generation(struct tb_switch *sw)
* separately. The returned switch should be released by calling
* tb_switch_put().
*
- * Return: Pointer to the allocated switch or %NULL in case of failure
+ * Return: Pointer to the allocated switch or ERR_PTR() in case of
+ * failure.
*/
struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
u64 route)
{
- int i;
- int cap;
struct tb_switch *sw;
- int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
+ int upstream_port;
+ int i, ret, depth;
+
+ /* Make sure we do not exceed maximum topology limit */
+ depth = tb_route_length(route);
+ if (depth > TB_SWITCH_MAX_DEPTH)
+ return ERR_PTR(-EADDRNOTAVAIL);
+
+ upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
if (upstream_port < 0)
- return NULL;
+ return ERR_PTR(upstream_port);
sw = kzalloc(sizeof(*sw), GFP_KERNEL);
if (!sw)
- return NULL;
+ return ERR_PTR(-ENOMEM);
sw->tb = tb;
- if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
+ ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
+ if (ret)
goto err_free_sw_ports;
tb_dbg(tb, "current switch config:\n");
@@ -1175,16 +1482,18 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
/* configure switch */
sw->config.upstream_port_number = upstream_port;
- sw->config.depth = tb_route_length(route);
- sw->config.route_lo = route;
- sw->config.route_hi = route >> 32;
+ sw->config.depth = depth;
+ sw->config.route_hi = upper_32_bits(route);
+ sw->config.route_lo = lower_32_bits(route);
sw->config.enabled = 0;
/* initialize ports */
sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
GFP_KERNEL);
- if (!sw->ports)
+ if (!sw->ports) {
+ ret = -ENOMEM;
goto err_free_sw_ports;
+ }
for (i = 0; i <= sw->config.max_port_number; i++) {
/* minimum setup for tb_find_cap and tb_drom_read to work */
@@ -1194,12 +1503,16 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
sw->generation = tb_switch_get_generation(sw);
- cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
- if (cap < 0) {
+ ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
+ if (ret < 0) {
tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
goto err_free_sw_ports;
}
- sw->cap_plug_events = cap;
+ sw->cap_plug_events = ret;
+
+ ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
+ if (ret > 0)
+ sw->cap_lc = ret;
/* Root switch is always authorized */
if (!route)
@@ -1218,7 +1531,7 @@ err_free_sw_ports:
kfree(sw->ports);
kfree(sw);
- return NULL;
+ return ERR_PTR(ret);
}
/**
@@ -1233,7 +1546,7 @@ err_free_sw_ports:
*
* The returned switch must be released by calling tb_switch_put().
*
- * Return: Pointer to the allocated switch or %NULL in case of failure
+ * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
*/
struct tb_switch *
tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
@@ -1242,7 +1555,7 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
sw = kzalloc(sizeof(*sw), GFP_KERNEL);
if (!sw)
- return NULL;
+ return ERR_PTR(-ENOMEM);
sw->tb = tb;
sw->config.depth = tb_route_length(route);
@@ -1291,25 +1604,27 @@ int tb_switch_configure(struct tb_switch *sw)
if (ret)
return ret;
+ ret = tb_lc_configure_link(sw);
+ if (ret)
+ return ret;
+
return tb_plug_events_active(sw, true);
}
-static void tb_switch_set_uuid(struct tb_switch *sw)
+static int tb_switch_set_uuid(struct tb_switch *sw)
{
u32 uuid[4];
- int cap;
+ int ret;
if (sw->uuid)
- return;
+ return 0;
/*
* The newer controllers include fused UUID as part of link
* controller specific registers
*/
- cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
- if (cap > 0) {
- tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
- } else {
+ ret = tb_lc_read_uuid(sw, uuid);
+ if (ret) {
/*
* ICM generates UUID based on UID and fills the upper
* two words with ones. This is not strictly following
@@ -1323,6 +1638,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
}
sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+ if (!sw->uuid)
+ return -ENOMEM;
+ return 0;
}
static int tb_switch_add_dma_port(struct tb_switch *sw)
@@ -1372,7 +1690,9 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
- tb_switch_set_uuid(sw);
+ ret = tb_switch_set_uuid(sw);
+ if (ret)
+ return ret;
nvm_set_auth_status(sw, status);
}
@@ -1422,7 +1742,9 @@ int tb_switch_add(struct tb_switch *sw)
}
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
- tb_switch_set_uuid(sw);
+ ret = tb_switch_set_uuid(sw);
+ if (ret)
+ return ret;
for (i = 0; i <= sw->config.max_port_number; i++) {
if (sw->ports[i].disabled) {
@@ -1484,18 +1806,18 @@ void tb_switch_remove(struct tb_switch *sw)
/* port 0 is the switch itself and never has a remote */
for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_is_upstream_port(&sw->ports[i]))
- continue;
- if (sw->ports[i].remote)
+ if (tb_port_has_remote(&sw->ports[i])) {
tb_switch_remove(sw->ports[i].remote->sw);
- sw->ports[i].remote = NULL;
- if (sw->ports[i].xdomain)
+ sw->ports[i].remote = NULL;
+ } else if (sw->ports[i].xdomain) {
tb_xdomain_remove(sw->ports[i].xdomain);
- sw->ports[i].xdomain = NULL;
+ sw->ports[i].xdomain = NULL;
+ }
}
if (!sw->is_unplugged)
tb_plug_events_active(sw, false);
+ tb_lc_unconfigure_link(sw);
tb_switch_nvm_remove(sw);
@@ -1520,8 +1842,10 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
}
sw->is_unplugged = true;
for (i = 0; i <= sw->config.max_port_number; i++) {
- if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
+ if (tb_port_has_remote(&sw->ports[i]))
tb_sw_set_unplugged(sw->ports[i].remote->sw);
+ else if (sw->ports[i].xdomain)
+ sw->ports[i].xdomain->is_unplugged = true;
}
}
@@ -1537,6 +1861,17 @@ int tb_switch_resume(struct tb_switch *sw)
if (tb_route(sw)) {
u64 uid;
+ /*
+ * Check first that we can still read the switch config
+ * space. It may be that there is now another domain
+ * connected.
+ */
+ err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
+ if (err < 0) {
+ tb_sw_info(sw, "switch not present anymore\n");
+ return err;
+ }
+
err = tb_drom_read_uid_only(sw, &uid);
if (err) {
tb_sw_warn(sw, "uid read failed\n");
@@ -1555,6 +1890,10 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
+ err = tb_lc_configure_link(sw);
+ if (err)
+ return err;
+
err = tb_plug_events_active(sw, true);
if (err)
return err;
@@ -1562,15 +1901,23 @@ int tb_switch_resume(struct tb_switch *sw)
/* check for surviving downstream switches */
for (i = 1; i <= sw->config.max_port_number; i++) {
struct tb_port *port = &sw->ports[i];
- if (tb_is_upstream_port(port))
- continue;
- if (!port->remote)
+
+ if (!tb_port_has_remote(port) && !port->xdomain)
continue;
- if (tb_wait_for_port(port, true) <= 0
- || tb_switch_resume(port->remote->sw)) {
+
+ if (tb_wait_for_port(port, true) <= 0) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
- tb_sw_set_unplugged(port->remote->sw);
+ if (tb_port_has_remote(port))
+ tb_sw_set_unplugged(port->remote->sw);
+ else if (port->xdomain)
+ port->xdomain->is_unplugged = true;
+ } else if (tb_port_has_remote(port)) {
+ if (tb_switch_resume(port->remote->sw)) {
+ tb_port_warn(port,
+ "lost during suspend, disconnecting\n");
+ tb_sw_set_unplugged(port->remote->sw);
+ }
}
}
return 0;
@@ -1584,13 +1931,11 @@ void tb_switch_suspend(struct tb_switch *sw)
return;
for (i = 1; i <= sw->config.max_port_number; i++) {
- if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
+ if (tb_port_has_remote(&sw->ports[i]))
tb_switch_suspend(sw->ports[i].remote->sw);
}
- /*
- * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
- * effect?
- */
+
+ tb_lc_set_sleep(sw);
}
struct tb_sw_lookup {
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 30e02c716f6c..1f7a9e1cc09c 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
+ * Thunderbolt driver - bus logic (NHI independent)
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
*/
#include <linux/slab.h>
@@ -12,7 +13,7 @@
#include "tb.h"
#include "tb_regs.h"
-#include "tunnel_pci.h"
+#include "tunnel.h"
/**
* struct tb_cm - Simple Thunderbolt connection manager
@@ -27,8 +28,100 @@ struct tb_cm {
bool hotplug_active;
};
+struct tb_hotplug_event {
+ struct work_struct work;
+ struct tb *tb;
+ u64 route;
+ u8 port;
+ bool unplug;
+};
+
+static void tb_handle_hotplug(struct work_struct *work);
+
+static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
+{
+ struct tb_hotplug_event *ev;
+
+ ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return;
+
+ ev->tb = tb;
+ ev->route = route;
+ ev->port = port;
+ ev->unplug = unplug;
+ INIT_WORK(&ev->work, tb_handle_hotplug);
+ queue_work(tb->wq, &ev->work);
+}
+
/* enumeration & hot plug handling */
+static void tb_discover_tunnels(struct tb_switch *sw)
+{
+ struct tb *tb = sw->tb;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port;
+ int i;
+
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ struct tb_tunnel *tunnel = NULL;
+
+ port = &sw->ports[i];
+ switch (port->config.type) {
+ case TB_TYPE_DP_HDMI_IN:
+ tunnel = tb_tunnel_discover_dp(tb, port);
+ break;
+
+ case TB_TYPE_PCIE_DOWN:
+ tunnel = tb_tunnel_discover_pci(tb, port);
+ break;
+
+ default:
+ break;
+ }
+
+ if (!tunnel)
+ continue;
+
+ if (tb_tunnel_is_pci(tunnel)) {
+ struct tb_switch *parent = tunnel->dst_port->sw;
+
+ while (parent != tunnel->src_port->sw) {
+ parent->boot = true;
+ parent = tb_switch_parent(parent);
+ }
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ }
+
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ if (tb_port_has_remote(&sw->ports[i]))
+ tb_discover_tunnels(sw->ports[i].remote->sw);
+ }
+}
+
+static void tb_scan_xdomain(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ struct tb_xdomain *xd;
+ u64 route;
+
+ route = tb_downstream_route(port);
+ xd = tb_xdomain_find_by_route(tb, route);
+ if (xd) {
+ tb_xdomain_put(xd);
+ return;
+ }
+
+ xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
+ NULL);
+ if (xd) {
+ tb_port_at(route, sw)->xdomain = xd;
+ tb_xdomain_add(xd);
+ }
+}
static void tb_scan_port(struct tb_port *port);
@@ -47,9 +140,21 @@ static void tb_scan_switch(struct tb_switch *sw)
*/
static void tb_scan_port(struct tb_port *port)
{
+ struct tb_cm *tcm = tb_priv(port->sw->tb);
+ struct tb_port *upstream_port;
struct tb_switch *sw;
+
if (tb_is_upstream_port(port))
return;
+
+ if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
+ !tb_dp_port_is_enabled(port)) {
+ tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
+ tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
+ false);
+ return;
+ }
+
if (port->config.type != TB_TYPE_PORT)
return;
if (port->dual_link_port && port->link_nr)
@@ -60,45 +165,95 @@ static void tb_scan_port(struct tb_port *port)
if (tb_wait_for_port(port, false) <= 0)
return;
if (port->remote) {
- tb_port_WARN(port, "port already has a remote!\n");
+ tb_port_dbg(port, "port already has a remote\n");
return;
}
sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
tb_downstream_route(port));
- if (!sw)
+ if (IS_ERR(sw)) {
+ /*
+ * If there is an error accessing the connected switch
+ * it may be connected to another domain. Also we allow
+ * the other domain to be connected to a max depth switch.
+ */
+ if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
+ tb_scan_xdomain(port);
return;
+ }
if (tb_switch_configure(sw)) {
tb_switch_put(sw);
return;
}
- sw->authorized = true;
+ /*
+ * If there was previously another domain connected remove it
+ * first.
+ */
+ if (port->xdomain) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
+ }
+
+ /*
+ * Do not send uevents until we have discovered all existing
+ * tunnels and know which switches were authorized already by
+ * the boot firmware.
+ */
+ if (!tcm->hotplug_active)
+ dev_set_uevent_suppress(&sw->dev, true);
if (tb_switch_add(sw)) {
tb_switch_put(sw);
return;
}
- port->remote = tb_upstream_port(sw);
- tb_upstream_port(sw)->remote = port;
+ /* Link the switches using both links if available */
+ upstream_port = tb_upstream_port(sw);
+ port->remote = upstream_port;
+ upstream_port->remote = port;
+ if (port->dual_link_port && upstream_port->dual_link_port) {
+ port->dual_link_port->remote = upstream_port->dual_link_port;
+ upstream_port->dual_link_port->remote = port->dual_link_port;
+ }
+
tb_scan_switch(sw);
}
+static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
+ struct tb_port *src_port, struct tb_port *dst_port)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tunnel->type == type &&
+ ((src_port && src_port == tunnel->src_port) ||
+ (dst_port && dst_port == tunnel->dst_port))) {
+ tb_tunnel_deactivate(tunnel);
+ list_del(&tunnel->list);
+ tb_tunnel_free(tunnel);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
/**
* tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
*/
static void tb_free_invalid_tunnels(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
- struct tb_pci_tunnel *tunnel;
- struct tb_pci_tunnel *n;
+ struct tb_tunnel *tunnel;
+ struct tb_tunnel *n;
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
- if (tb_pci_is_invalid(tunnel)) {
- tb_pci_deactivate(tunnel);
+ if (tb_tunnel_is_invalid(tunnel)) {
+ tb_tunnel_deactivate(tunnel);
list_del(&tunnel->list);
- tb_pci_free(tunnel);
+ tb_tunnel_free(tunnel);
}
}
}
@@ -111,136 +266,232 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
int i;
for (i = 1; i <= sw->config.max_port_number; i++) {
struct tb_port *port = &sw->ports[i];
- if (tb_is_upstream_port(port))
- continue;
- if (!port->remote)
+
+ if (!tb_port_has_remote(port))
continue;
+
if (port->remote->sw->is_unplugged) {
tb_switch_remove(port->remote->sw);
port->remote = NULL;
+ if (port->dual_link_port)
+ port->dual_link_port->remote = NULL;
} else {
tb_free_unplugged_children(port->remote->sw);
}
}
}
-
/**
- * find_pci_up_port() - return the first PCIe up port on @sw or NULL
+ * tb_find_port() - return the first port of @type on @sw or NULL
+ * @sw: Switch to find the port from
+ * @type: Port type to look for
*/
-static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
+static struct tb_port *tb_find_port(struct tb_switch *sw,
+ enum tb_port_type type)
{
int i;
for (i = 1; i <= sw->config.max_port_number; i++)
- if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
+ if (sw->ports[i].config.type == type)
return &sw->ports[i];
return NULL;
}
/**
- * find_unused_down_port() - return the first inactive PCIe down port on @sw
+ * tb_find_unused_port() - return the first inactive port on @sw
+ * @sw: Switch to find the port on
+ * @type: Port type to look for
*/
-static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
+static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
+ enum tb_port_type type)
{
int i;
- int cap;
- int res;
- int data;
+
for (i = 1; i <= sw->config.max_port_number; i++) {
if (tb_is_upstream_port(&sw->ports[i]))
continue;
- if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
- continue;
- cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
- if (cap < 0)
+ if (sw->ports[i].config.type != type)
continue;
- res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
- if (res < 0)
+ if (!sw->ports[i].cap_adap)
continue;
- if (data & 0x80000000)
+ if (tb_port_is_enabled(&sw->ports[i]))
continue;
return &sw->ports[i];
}
return NULL;
}
-/**
- * tb_activate_pcie_devices() - scan for and activate PCIe devices
- *
- * This method is somewhat ad hoc. For now it only supports one device
- * per port and only devices at depth 1.
- */
-static void tb_activate_pcie_devices(struct tb *tb)
+static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ /*
+ * To keep plugging devices consistently in the same PCIe
+ * hierarchy, do mapping here for root switch downstream PCIe
+ * ports.
+ */
+ if (!tb_route(sw)) {
+ int phy_port = tb_phy_port_from_link(port->port);
+ int index;
+
+ /*
+ * Hard-coded Thunderbolt port to PCIe down port mapping
+ * per controller.
+ */
+ if (tb_switch_is_cr(sw))
+ index = !phy_port ? 6 : 7;
+ else if (tb_switch_is_fr(sw))
+ index = !phy_port ? 6 : 8;
+ else
+ goto out;
+
+ /* Validate the hard-coding */
+ if (WARN_ON(index > sw->config.max_port_number))
+ goto out;
+ if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
+ goto out;
+ if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
+ goto out;
+
+ return &sw->ports[index];
+ }
+
+out:
+ return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
+}
+
+static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
{
- int i;
- int cap;
- u32 data;
- struct tb_switch *sw;
- struct tb_port *up_port;
- struct tb_port *down_port;
- struct tb_pci_tunnel *tunnel;
struct tb_cm *tcm = tb_priv(tb);
+ struct tb_switch *sw = out->sw;
+ struct tb_tunnel *tunnel;
+ struct tb_port *in;
+
+ if (tb_port_is_enabled(out))
+ return 0;
+
+ do {
+ sw = tb_to_switch(sw->dev.parent);
+ if (!sw)
+ return 0;
+ in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
+ } while (!in);
+
+ tunnel = tb_tunnel_alloc_dp(tb, in, out);
+ if (!tunnel) {
+ tb_port_dbg(out, "DP tunnel allocation failed\n");
+ return -ENOMEM;
+ }
- /* scan for pcie devices at depth 1*/
- for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
- if (tb_is_upstream_port(&tb->root_switch->ports[i]))
- continue;
- if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
- continue;
- if (!tb->root_switch->ports[i].remote)
- continue;
- sw = tb->root_switch->ports[i].remote->sw;
- up_port = tb_find_pci_up_port(sw);
- if (!up_port) {
- tb_sw_info(sw, "no PCIe devices found, aborting\n");
- continue;
- }
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(out, "DP tunnel activation failed, aborting\n");
+ tb_tunnel_free(tunnel);
+ return -EIO;
+ }
- /* check whether port is already activated */
- cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
- if (cap < 0)
- continue;
- if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
- continue;
- if (data & 0x80000000) {
- tb_port_info(up_port,
- "PCIe port already activated, aborting\n");
- continue;
- }
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ return 0;
+}
- down_port = tb_find_unused_down_port(tb->root_switch);
- if (!down_port) {
- tb_port_info(up_port,
- "All PCIe down ports are occupied, aborting\n");
- continue;
- }
- tunnel = tb_pci_alloc(tb, up_port, down_port);
- if (!tunnel) {
- tb_port_info(up_port,
- "PCIe tunnel allocation failed, aborting\n");
- continue;
- }
+static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
+{
+ tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
+}
- if (tb_pci_activate(tunnel)) {
- tb_port_info(up_port,
- "PCIe tunnel activation failed, aborting\n");
- tb_pci_free(tunnel);
- continue;
- }
+static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_port *up, *down, *port;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_switch *parent_sw;
+ struct tb_tunnel *tunnel;
+
+ up = tb_find_port(sw, TB_TYPE_PCIE_UP);
+ if (!up)
+ return 0;
- list_add(&tunnel->list, &tcm->tunnel_list);
+ /*
+ * Look up available down port. Since we are chaining it should
+ * be found right above this switch.
+ */
+ parent_sw = tb_to_switch(sw->dev.parent);
+ port = tb_port_at(tb_route(sw), parent_sw);
+ down = tb_find_pcie_down(parent_sw, port);
+ if (!down)
+ return 0;
+
+ tunnel = tb_tunnel_alloc_pci(tb, up, down);
+ if (!tunnel)
+ return -ENOMEM;
+
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(up,
+ "PCIe tunnel activation failed, aborting\n");
+ tb_tunnel_free(tunnel);
+ return -EIO;
}
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ return 0;
}
-/* hotplug handling */
+static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *nhi_port, *dst_port;
+ struct tb_tunnel *tunnel;
+ struct tb_switch *sw;
-struct tb_hotplug_event {
- struct work_struct work;
- struct tb *tb;
- u64 route;
- u8 port;
- bool unplug;
-};
+ sw = tb_to_switch(xd->dev.parent);
+ dst_port = tb_port_at(xd->route, sw);
+ nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
+
+ mutex_lock(&tb->lock);
+ tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
+ xd->transmit_path, xd->receive_ring,
+ xd->receive_path);
+ if (!tunnel) {
+ mutex_unlock(&tb->lock);
+ return -ENOMEM;
+ }
+
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(nhi_port,
+ "DMA tunnel activation failed, aborting\n");
+ tb_tunnel_free(tunnel);
+ mutex_unlock(&tb->lock);
+ return -EIO;
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+
+static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ struct tb_port *dst_port;
+ struct tb_switch *sw;
+
+ sw = tb_to_switch(xd->dev.parent);
+ dst_port = tb_port_at(xd->route, sw);
+
+ /*
+ * It is possible that the tunnel was already teared down (in
+ * case of cable disconnect) so it is fine if we cannot find it
+ * here anymore.
+ */
+ tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+}
+
+static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ if (!xd->is_unplugged) {
+ mutex_lock(&tb->lock);
+ __tb_disconnect_xdomain_paths(tb, xd);
+ mutex_unlock(&tb->lock);
+ }
+ return 0;
+}
+
+/* hotplug handling */
/**
* tb_handle_hotplug() - handle hotplug event
@@ -258,7 +509,7 @@ static void tb_handle_hotplug(struct work_struct *work)
if (!tcm->hotplug_active)
goto out; /* during init, suspend or shutdown */
- sw = get_switch_at_route(tb->root_switch, ev->route);
+ sw = tb_switch_find_by_route(tb, ev->route);
if (!sw) {
tb_warn(tb,
"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
@@ -269,43 +520,60 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_warn(tb,
"hotplug event from non existent port %llx:%x (unplug: %d)\n",
ev->route, ev->port, ev->unplug);
- goto out;
+ goto put_sw;
}
port = &sw->ports[ev->port];
if (tb_is_upstream_port(port)) {
- tb_warn(tb,
- "hotplug event for upstream port %llx:%x (unplug: %d)\n",
- ev->route, ev->port, ev->unplug);
- goto out;
+ tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
+ ev->route, ev->port, ev->unplug);
+ goto put_sw;
}
if (ev->unplug) {
- if (port->remote) {
- tb_port_info(port, "unplugged\n");
+ if (tb_port_has_remote(port)) {
+ tb_port_dbg(port, "switch unplugged\n");
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
+ if (port->dual_link_port)
+ port->dual_link_port->remote = NULL;
+ } else if (port->xdomain) {
+ struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
+
+ tb_port_dbg(port, "xdomain unplugged\n");
+ /*
+ * Service drivers are unbound during
+ * tb_xdomain_remove() so setting XDomain as
+ * unplugged here prevents deadlock if they call
+ * tb_xdomain_disable_paths(). We will tear down
+ * the path below.
+ */
+ xd->is_unplugged = true;
+ tb_xdomain_remove(xd);
+ port->xdomain = NULL;
+ __tb_disconnect_xdomain_paths(tb, xd);
+ tb_xdomain_put(xd);
+ } else if (tb_port_is_dpout(port)) {
+ tb_teardown_dp(tb, port);
} else {
- tb_port_info(port,
- "got unplug event for disconnected port, ignoring\n");
+ tb_port_dbg(port,
+ "got unplug event for disconnected port, ignoring\n");
}
} else if (port->remote) {
- tb_port_info(port,
- "got plug event for connected port, ignoring\n");
+ tb_port_dbg(port, "got plug event for connected port, ignoring\n");
} else {
- tb_port_info(port, "hotplug: scanning\n");
- tb_scan_port(port);
- if (!port->remote) {
- tb_port_info(port, "hotplug: no switch found\n");
- } else if (port->remote->sw->config.depth > 1) {
- tb_sw_warn(port->remote->sw,
- "hotplug: chaining not supported\n");
- } else {
- tb_sw_info(port->remote->sw,
- "hotplug: activating pcie devices\n");
- tb_activate_pcie_devices(tb);
+ if (tb_port_is_null(port)) {
+ tb_port_dbg(port, "hotplug: scanning\n");
+ tb_scan_port(port);
+ if (!port->remote)
+ tb_port_dbg(port, "hotplug: no switch found\n");
+ } else if (tb_port_is_dpout(port)) {
+ tb_tunnel_dp(tb, port);
}
}
+
+put_sw:
+ tb_switch_put(sw);
out:
mutex_unlock(&tb->lock);
kfree(ev);
@@ -320,7 +588,6 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
const struct cfg_event_pkg *pkg = buf;
- struct tb_hotplug_event *ev;
u64 route;
if (type != TB_CFG_PKG_EVENT) {
@@ -336,40 +603,59 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
pkg->port);
}
- ev = kmalloc(sizeof(*ev), GFP_KERNEL);
- if (!ev)
- return;
- INIT_WORK(&ev->work, tb_handle_hotplug);
- ev->tb = tb;
- ev->route = route;
- ev->port = pkg->port;
- ev->unplug = pkg->unplug;
- queue_work(tb->wq, &ev->work);
+ tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
}
static void tb_stop(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
- struct tb_pci_tunnel *tunnel;
- struct tb_pci_tunnel *n;
+ struct tb_tunnel *tunnel;
+ struct tb_tunnel *n;
/* tunnels are only present after everything has been initialized */
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
- tb_pci_deactivate(tunnel);
- tb_pci_free(tunnel);
+ /*
+ * DMA tunnels require the driver to be functional so we
+ * tear them down. Other protocol tunnels can be left
+ * intact.
+ */
+ if (tb_tunnel_is_dma(tunnel))
+ tb_tunnel_deactivate(tunnel);
+ tb_tunnel_free(tunnel);
}
tb_switch_remove(tb->root_switch);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
+static int tb_scan_finalize_switch(struct device *dev, void *data)
+{
+ if (tb_is_switch(dev)) {
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ /*
+ * If we found that the switch was already setup by the
+ * boot firmware, mark it as authorized now before we
+ * send uevent to userspace.
+ */
+ if (sw->boot)
+ sw->authorized = 1;
+
+ dev_set_uevent_suppress(dev, false);
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+ device_for_each_child(dev, NULL, tb_scan_finalize_switch);
+ }
+
+ return 0;
+}
+
static int tb_start(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
int ret;
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
- if (!tb->root_switch)
- return -ENOMEM;
+ if (IS_ERR(tb->root_switch))
+ return PTR_ERR(tb->root_switch);
/*
* ICM firmware upgrade needs running firmware and in native
@@ -393,7 +679,11 @@ static int tb_start(struct tb *tb)
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
- tb_activate_pcie_devices(tb);
+ /* Find out tunnels created by the boot firmware */
+ tb_discover_tunnels(tb->root_switch);
+ /* Make the discovered switches available to the userspace */
+ device_for_each_child(&tb->root_switch->dev, NULL,
+ tb_scan_finalize_switch);
/* Allow tb_handle_hotplug to progress events */
tcm->hotplug_active = true;
@@ -415,7 +705,7 @@ static int tb_suspend_noirq(struct tb *tb)
static int tb_resume_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
- struct tb_pci_tunnel *tunnel, *n;
+ struct tb_tunnel *tunnel, *n;
tb_dbg(tb, "resuming...\n");
@@ -426,7 +716,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
- tb_pci_restart(tunnel);
+ tb_tunnel_restart(tunnel);
if (!list_empty(&tcm->tunnel_list)) {
/*
* the pcie links need some time to get going.
@@ -442,12 +732,50 @@ static int tb_resume_noirq(struct tb *tb)
return 0;
}
+static int tb_free_unplugged_xdomains(struct tb_switch *sw)
+{
+ int i, ret = 0;
+
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ struct tb_port *port = &sw->ports[i];
+
+ if (tb_is_upstream_port(port))
+ continue;
+ if (port->xdomain && port->xdomain->is_unplugged) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
+ ret++;
+ } else if (port->remote) {
+ ret += tb_free_unplugged_xdomains(port->remote->sw);
+ }
+ }
+
+ return ret;
+}
+
+static void tb_complete(struct tb *tb)
+{
+ /*
+ * Release any unplugged XDomains and if there is a case where
+ * another domain is swapped in place of unplugged XDomain we
+ * need to run another rescan.
+ */
+ mutex_lock(&tb->lock);
+ if (tb_free_unplugged_xdomains(tb->root_switch))
+ tb_scan_switch(tb->root_switch);
+ mutex_unlock(&tb->lock);
+}
+
static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start,
.stop = tb_stop,
.suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq,
+ .complete = tb_complete,
.handle_event = tb_handle_event,
+ .approve_switch = tb_tunnel_pci,
+ .approve_xdomain_paths = tb_approve_xdomain_paths,
+ .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
};
struct tb *tb_probe(struct tb_nhi *nhi)
@@ -462,7 +790,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
if (!tb)
return NULL;
- tb->security_level = TB_SECURITY_NONE;
+ tb->security_level = TB_SECURITY_USER;
tb->cm_ops = &tb_cm_ops;
tcm = tb_priv(tb);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 52584c4003e3..b12c8f33d89c 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -43,6 +43,7 @@ struct tb_switch_nvm {
};
#define TB_SWITCH_KEY_SIZE 32
+#define TB_SWITCH_MAX_DEPTH 6
/**
* struct tb_switch - a thunderbolt switch
@@ -62,6 +63,7 @@ struct tb_switch_nvm {
* @device_name: Name of the device (or %NULL if not known)
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
+ * @cap_lc: Offset to the link controller capability (%0 if not found)
* @is_unplugged: The switch is going away
* @drom: DROM of the switch (%NULL if not found)
* @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
@@ -70,7 +72,6 @@ struct tb_switch_nvm {
* @boot: Whether the switch was already authorized on boot or not
* @rpm: The switch supports runtime PM
* @authorized: Whether the switch is authorized by user or policy
- * @work: Work used to automatically authorize a switch
* @security_level: Switch supported security level
* @key: Contains the key used to challenge the device or %NULL if not
* supported. Size of the key is %TB_SWITCH_KEY_SIZE.
@@ -80,8 +81,7 @@ struct tb_switch_nvm {
* @depth: Depth in the chain this switch is connected (ICM only)
*
* When the switch is being added or removed to the domain (other
- * switches) you need to have domain lock held. For switch authorization
- * internal switch_lock is enough.
+ * switches) you need to have domain lock held.
*/
struct tb_switch {
struct device dev;
@@ -97,6 +97,7 @@ struct tb_switch {
const char *device_name;
unsigned int generation;
int cap_plug_events;
+ int cap_lc;
bool is_unplugged;
u8 *drom;
struct tb_switch_nvm *nvm;
@@ -105,7 +106,6 @@ struct tb_switch {
bool boot;
bool rpm;
unsigned int authorized;
- struct work_struct work;
enum tb_security_level security_level;
u8 *key;
u8 connection_id;
@@ -121,11 +121,14 @@ struct tb_switch {
* @remote: Remote port (%NULL if not connected)
* @xdomain: Remote host (%NULL if not connected)
* @cap_phy: Offset, zero if not found
+ * @cap_adap: Offset of the adapter specific capability (%0 if not present)
* @port: Port number on switch
* @disabled: Disabled by eeprom
* @dual_link_port: If the switch is connected using two ports, points
* to the other port.
* @link_nr: Is this primary or secondary port on the dual_link.
+ * @in_hopids: Currently allocated input HopIDs
+ * @out_hopids: Currently allocated output HopIDs
*/
struct tb_port {
struct tb_regs_port_header config;
@@ -133,19 +136,35 @@ struct tb_port {
struct tb_port *remote;
struct tb_xdomain *xdomain;
int cap_phy;
+ int cap_adap;
u8 port;
bool disabled;
struct tb_port *dual_link_port;
u8 link_nr:1;
+ struct ida in_hopids;
+ struct ida out_hopids;
};
/**
* struct tb_path_hop - routing information for a tb_path
+ * @in_port: Ingress port of a switch
+ * @out_port: Egress port of a switch where the packet is routed out
+ * (must be on the same switch than @in_port)
+ * @in_hop_index: HopID where the path configuration entry is placed in
+ * the path config space of @in_port.
+ * @in_counter_index: Used counter index (not used in the driver
+ * currently, %-1 to disable)
+ * @next_hop_index: HopID of the packet when it is routed out from @out_port
+ * @initial_credits: Number of initial flow control credits allocated for
+ * the path
*
* Hop configuration is always done on the IN port of a switch.
* in_port and out_port have to be on the same switch. Packets arriving on
* in_port with "hop" = in_hop_index will get routed to through out_port. The
- * next hop to take (on out_port->remote) is determined by next_hop_index.
+ * next hop to take (on out_port->remote) is determined by
+ * next_hop_index. When routing packet to another switch (out->remote is
+ * set) the @next_hop_index must match the @in_hop_index of that next
+ * hop to make routing possible.
*
* in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
* port.
@@ -154,44 +173,71 @@ struct tb_path_hop {
struct tb_port *in_port;
struct tb_port *out_port;
int in_hop_index;
- int in_counter_index; /* write -1 to disable counters for this hop. */
+ int in_counter_index;
int next_hop_index;
+ unsigned int initial_credits;
};
/**
* enum tb_path_port - path options mask
+ * @TB_PATH_NONE: Do not activate on any hop on path
+ * @TB_PATH_SOURCE: Activate on the first hop (out of src)
+ * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last)
+ * @TB_PATH_DESTINATION: Activate on the last hop (into dst)
+ * @TB_PATH_ALL: Activate on all hops on the path
*/
enum tb_path_port {
TB_PATH_NONE = 0,
- TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */
- TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */
- TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */
+ TB_PATH_SOURCE = 1,
+ TB_PATH_INTERNAL = 2,
+ TB_PATH_DESTINATION = 4,
TB_PATH_ALL = 7,
};
/**
* struct tb_path - a unidirectional path between two ports
+ * @tb: Pointer to the domain structure
+ * @name: Name of the path (used for debugging)
+ * @nfc_credits: Number of non flow controlled credits allocated for the path
+ * @ingress_shared_buffer: Shared buffering used for ingress ports on the path
+ * @egress_shared_buffer: Shared buffering used for egress ports on the path
+ * @ingress_fc_enable: Flow control for ingress ports on the path
+ * @egress_fc_enable: Flow control for egress ports on the path
+ * @priority: Priority group if the path
+ * @weight: Weight of the path inside the priority group
+ * @drop_packages: Drop packages from queue tail or head
+ * @activated: Is the path active
+ * @clear_fc: Clear all flow control from the path config space entries
+ * when deactivating this path
+ * @hops: Path hops
+ * @path_length: How many hops the path uses
*
- * A path consists of a number of hops (see tb_path_hop). To establish a PCIe
- * tunnel two paths have to be created between the two PCIe ports.
- *
+ * A path consists of a number of hops (see &struct tb_path_hop). To
+ * establish a PCIe tunnel two paths have to be created between the two
+ * PCIe ports.
*/
struct tb_path {
struct tb *tb;
- int nfc_credits; /* non flow controlled credits */
+ const char *name;
+ int nfc_credits;
enum tb_path_port ingress_shared_buffer;
enum tb_path_port egress_shared_buffer;
enum tb_path_port ingress_fc_enable;
enum tb_path_port egress_fc_enable;
- int priority:3;
+ unsigned int priority:3;
int weight:4;
bool drop_packages;
bool activated;
+ bool clear_fc;
struct tb_path_hop *hops;
- int path_length; /* number of hops */
+ int path_length;
};
+/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
+#define TB_PATH_MIN_HOPID 8
+#define TB_PATH_MAX_HOPS 7
+
/**
* struct tb_cm_ops - Connection manager specific operations vector
* @driver_ready: Called right after control channel is started. Used by
@@ -261,7 +307,20 @@ static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
return &sw->ports[sw->config.upstream_port_number];
}
-static inline u64 tb_route(struct tb_switch *sw)
+/**
+ * tb_is_upstream_port() - Is the port upstream facing
+ * @port: Port to check
+ *
+ * Returns true if @port is upstream facing port. In case of dual link
+ * ports both return true.
+ */
+static inline bool tb_is_upstream_port(const struct tb_port *port)
+{
+ const struct tb_port *upstream_port = tb_upstream_port(port->sw);
+ return port == upstream_port || port->dual_link_port == upstream_port;
+}
+
+static inline u64 tb_route(const struct tb_switch *sw)
{
return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
}
@@ -276,9 +335,54 @@ static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
return &sw->ports[port];
}
+/**
+ * tb_port_has_remote() - Does the port have switch connected downstream
+ * @port: Port to check
+ *
+ * Returns true only when the port is primary port and has remote set.
+ */
+static inline bool tb_port_has_remote(const struct tb_port *port)
+{
+ if (tb_is_upstream_port(port))
+ return false;
+ if (!port->remote)
+ return false;
+ if (port->dual_link_port && port->link_nr)
+ return false;
+
+ return true;
+}
+
+static inline bool tb_port_is_null(const struct tb_port *port)
+{
+ return port && port->port && port->config.type == TB_TYPE_PORT;
+}
+
+static inline bool tb_port_is_pcie_down(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_PCIE_DOWN;
+}
+
+static inline bool tb_port_is_pcie_up(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_PCIE_UP;
+}
+
+static inline bool tb_port_is_dpin(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_DP_HDMI_IN;
+}
+
+static inline bool tb_port_is_dpout(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
+}
+
static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
+ if (sw->is_unplugged)
+ return -ENODEV;
return tb_cfg_read(sw->tb->ctl,
buffer,
tb_route(sw),
@@ -291,6 +395,8 @@ static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
+ if (sw->is_unplugged)
+ return -ENODEV;
return tb_cfg_write(sw->tb->ctl,
buffer,
tb_route(sw),
@@ -303,6 +409,8 @@ static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
static inline int tb_port_read(struct tb_port *port, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
+ if (port->sw->is_unplugged)
+ return -ENODEV;
return tb_cfg_read(port->sw->tb->ctl,
buffer,
tb_route(port->sw),
@@ -315,6 +423,8 @@ static inline int tb_port_read(struct tb_port *port, void *buffer,
static inline int tb_port_write(struct tb_port *port, const void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
+ if (port->sw->is_unplugged)
+ return -ENODEV;
return tb_cfg_write(port->sw->tb->ctl,
buffer,
tb_route(port->sw),
@@ -332,7 +442,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
#define __TB_SW_PRINT(level, sw, fmt, arg...) \
do { \
- struct tb_switch *__sw = (sw); \
+ const struct tb_switch *__sw = (sw); \
level(__sw->tb, "%llx: " fmt, \
tb_route(__sw), ## arg); \
} while (0)
@@ -343,7 +453,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
do { \
- struct tb_port *__port = (_port); \
+ const struct tb_port *__port = (_port); \
level(__port->sw->tb, "%llx:%x: " fmt, \
tb_route(__port->sw), __port->port, ## arg); \
} while (0)
@@ -385,6 +495,13 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
int tb_domain_disconnect_all_paths(struct tb *tb);
+static inline struct tb *tb_domain_get(struct tb *tb)
+{
+ if (tb)
+ get_device(&tb->dev);
+ return tb;
+}
+
static inline void tb_domain_put(struct tb *tb)
{
put_device(&tb->dev);
@@ -401,7 +518,6 @@ void tb_switch_suspend(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb *tb, u64 route);
void tb_sw_set_unplugged(struct tb_switch *sw);
-struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
@@ -431,14 +547,74 @@ static inline struct tb_switch *tb_to_switch(struct device *dev)
return NULL;
}
+static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
+{
+ return tb_to_switch(sw->dev.parent);
+}
+
+static inline bool tb_switch_is_lr(const struct tb_switch *sw)
+{
+ return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
+}
+
+static inline bool tb_switch_is_er(const struct tb_switch *sw)
+{
+ return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
+}
+
+static inline bool tb_switch_is_cr(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool tb_switch_is_fr(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
+int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
+int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
+void tb_port_release_in_hopid(struct tb_port *port, int hopid);
+int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
+void tb_port_release_out_hopid(struct tb_port *port, int hopid);
+struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
+ struct tb_port *prev);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
-
-struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
+bool tb_port_is_enabled(struct tb_port *port);
+
+bool tb_pci_port_is_enabled(struct tb_port *port);
+int tb_pci_port_enable(struct tb_port *port, bool enable);
+
+int tb_dp_port_hpd_is_active(struct tb_port *port);
+int tb_dp_port_hpd_clear(struct tb_port *port);
+int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
+ unsigned int aux_tx, unsigned int aux_rx);
+bool tb_dp_port_is_enabled(struct tb_port *port);
+int tb_dp_port_enable(struct tb_port *port, bool enable);
+
+struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
+ struct tb_port *dst, int dst_hopid,
+ struct tb_port **last, const char *name);
+struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
+ struct tb_port *dst, int dst_hopid, int link_nr,
+ const char *name);
void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
@@ -447,17 +623,16 @@ bool tb_path_is_invalid(struct tb_path *path);
int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
+int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_configure_link(struct tb_switch *sw);
+void tb_lc_unconfigure_link(struct tb_switch *sw);
+int tb_lc_set_sleep(struct tb_switch *sw);
static inline int tb_route_length(u64 route)
{
return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
}
-static inline bool tb_is_upstream_port(struct tb_port *port)
-{
- return port == tb_upstream_port(port->sw);
-}
-
/**
* tb_downstream_route() - get route to downstream switch
*
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 02c84aa3d018..afbe1d29bb03 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -492,6 +492,17 @@ struct tb_xdp_header {
u32 type;
};
+struct tb_xdp_uuid {
+ struct tb_xdp_header hdr;
+};
+
+struct tb_xdp_uuid_response {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ u32 src_route_hi;
+ u32 src_route_lo;
+};
+
struct tb_xdp_properties {
struct tb_xdp_header hdr;
uuid_t src_uuid;
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6f1ff04ee195..deb9d4a977b9 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -211,6 +211,38 @@ struct tb_regs_port_header {
} __packed;
+/* DWORD 4 */
+#define TB_PORT_NFC_CREDITS_MASK GENMASK(19, 0)
+#define TB_PORT_MAX_CREDITS_SHIFT 20
+#define TB_PORT_MAX_CREDITS_MASK GENMASK(26, 20)
+/* DWORD 5 */
+#define TB_PORT_LCA_SHIFT 22
+#define TB_PORT_LCA_MASK GENMASK(28, 22)
+
+/* Display Port adapter registers */
+
+/* DWORD 0 */
+#define TB_DP_VIDEO_HOPID_SHIFT 16
+#define TB_DP_VIDEO_HOPID_MASK GENMASK(26, 16)
+#define TB_DP_AUX_EN BIT(30)
+#define TB_DP_VIDEO_EN BIT(31)
+/* DWORD 1 */
+#define TB_DP_AUX_TX_HOPID_MASK GENMASK(10, 0)
+#define TB_DP_AUX_RX_HOPID_SHIFT 11
+#define TB_DP_AUX_RX_HOPID_MASK GENMASK(21, 11)
+/* DWORD 2 */
+#define TB_DP_HDP BIT(6)
+/* DWORD 3 */
+#define TB_DP_HPDC BIT(9)
+/* DWORD 4 */
+#define TB_DP_LOCAL_CAP 0x4
+/* DWORD 5 */
+#define TB_DP_REMOTE_CAP 0x5
+
+/* PCIe adapter registers */
+
+#define TB_PCI_EN BIT(31)
+
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
struct tb_regs_hop {
/* DWORD 0 */
@@ -234,8 +266,24 @@ struct tb_regs_hop {
bool egress_fc:1;
bool ingress_shared_buffer:1;
bool egress_shared_buffer:1;
- u32 unknown3:4; /* set to zero */
+ bool pending:1;
+ u32 unknown3:3; /* set to zero */
} __packed;
+/* Common link controller registers */
+#define TB_LC_DESC 0x02
+#define TB_LC_DESC_NLC_MASK GENMASK(3, 0)
+#define TB_LC_DESC_SIZE_SHIFT 8
+#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
+#define TB_LC_DESC_PORT_SIZE_SHIFT 16
+#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
+#define TB_LC_FUSE 0x03
+
+/* Link controller registers */
+#define TB_LC_SX_CTRL 0x96
+#define TB_LC_SX_CTRL_L1C BIT(16)
+#define TB_LC_SX_CTRL_L2C BIT(20)
+#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
+#define TB_LC_SX_CTRL_SLP BIT(31)
#endif
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
new file mode 100644
index 000000000000..31d0234837e4
--- /dev/null
+++ b/drivers/thunderbolt/tunnel.c
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - Tunneling support
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include "tunnel.h"
+#include "tb.h"
+
+/* PCIe adapters use always HopID of 8 for both directions */
+#define TB_PCI_HOPID 8
+
+#define TB_PCI_PATH_DOWN 0
+#define TB_PCI_PATH_UP 1
+
+/* DP adapters use HopID 8 for AUX and 9 for Video */
+#define TB_DP_AUX_TX_HOPID 8
+#define TB_DP_AUX_RX_HOPID 8
+#define TB_DP_VIDEO_HOPID 9
+
+#define TB_DP_VIDEO_PATH_OUT 0
+#define TB_DP_AUX_PATH_OUT 1
+#define TB_DP_AUX_PATH_IN 2
+
+#define TB_DMA_PATH_OUT 0
+#define TB_DMA_PATH_IN 1
+
+static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
+
+#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
+ do { \
+ struct tb_tunnel *__tunnel = (tunnel); \
+ level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
+ tb_route(__tunnel->src_port->sw), \
+ __tunnel->src_port->port, \
+ tb_route(__tunnel->dst_port->sw), \
+ __tunnel->dst_port->port, \
+ tb_tunnel_names[__tunnel->type], \
+ ## arg); \
+ } while (0)
+
+#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+#define tb_tunnel_warn(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+#define tb_tunnel_info(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+#define tb_tunnel_dbg(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
+
+static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
+ enum tb_tunnel_type type)
+{
+ struct tb_tunnel *tunnel;
+
+ tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
+ if (!tunnel->paths) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&tunnel->list);
+ tunnel->tb = tb;
+ tunnel->npaths = npaths;
+ tunnel->type = type;
+
+ return tunnel;
+}
+
+static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
+{
+ int res;
+
+ res = tb_pci_port_enable(tunnel->src_port, activate);
+ if (res)
+ return res;
+
+ if (tb_port_is_pcie_up(tunnel->dst_port))
+ return tb_pci_port_enable(tunnel->dst_port, activate);
+
+ return 0;
+}
+
+static void tb_pci_init_path(struct tb_path *path)
+{
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 3;
+ path->weight = 1;
+ path->drop_packages = 0;
+ path->nfc_credits = 0;
+ path->hops[0].initial_credits = 7;
+ path->hops[1].initial_credits = 16;
+}
+
+/**
+ * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
+ * @tb: Pointer to the domain structure
+ * @down: PCIe downstream adapter
+ *
+ * If @down adapter is active, follows the tunnel to the PCIe upstream
+ * adapter and back. Returns the discovered tunnel or %NULL if there was
+ * no tunnel.
+ */
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ if (!tb_pci_port_is_enabled(down))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_pci_activate;
+ tunnel->src_port = down;
+
+ /*
+ * Discover both paths even if they are not complete. We will
+ * clean them up by calling tb_tunnel_deactivate() below in that
+ * case.
+ */
+ path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
+ &tunnel->dst_port, "PCIe Up");
+ if (!path) {
+ /* Just disable the downstream port */
+ tb_pci_port_enable(down, false);
+ goto err_free;
+ }
+ tunnel->paths[TB_PCI_PATH_UP] = path;
+ tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
+
+ path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
+ "PCIe Down");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_PCI_PATH_DOWN] = path;
+ tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
+
+ /* Validate that the tunnel is complete */
+ if (!tb_port_is_pcie_up(tunnel->dst_port)) {
+ tb_port_warn(tunnel->dst_port,
+ "path does not end on a PCIe adapter, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (down != tunnel->src_port) {
+ tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
+ tb_tunnel_warn(tunnel,
+ "tunnel is not fully activated, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ tb_tunnel_dbg(tunnel, "discovered\n");
+ return tunnel;
+
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
+err_free:
+ tb_tunnel_free(tunnel);
+
+ return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_pci() - allocate a pci tunnel
+ * @tb: Pointer to the domain structure
+ * @up: PCIe upstream adapter port
+ * @down: PCIe downstream adapter port
+ *
+ * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
+ * TB_TYPE_PCIE_DOWN.
+ *
+ * Return: Returns a tb_tunnel on success or NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
+ struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_pci_activate;
+ tunnel->src_port = down;
+ tunnel->dst_port = up;
+
+ path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
+ "PCIe Down");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_pci_init_path(path);
+ tunnel->paths[TB_PCI_PATH_UP] = path;
+
+ path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
+ "PCIe Up");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_pci_init_path(path);
+ tunnel->paths[TB_PCI_PATH_DOWN] = path;
+
+ return tunnel;
+}
+
+static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
+{
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+ u32 in_dp_cap, out_dp_cap;
+ int ret;
+
+ /*
+ * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
+ * newer generation hardware.
+ */
+ if (in->sw->generation < 2 || out->sw->generation < 2)
+ return 0;
+
+ /* Read both DP_LOCAL_CAP registers */
+ ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
+ in->cap_adap + TB_DP_LOCAL_CAP, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
+ out->cap_adap + TB_DP_LOCAL_CAP, 1);
+ if (ret)
+ return ret;
+
+ /* Write IN local caps to OUT remote caps */
+ ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
+ out->cap_adap + TB_DP_REMOTE_CAP, 1);
+ if (ret)
+ return ret;
+
+ return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
+ in->cap_adap + TB_DP_REMOTE_CAP, 1);
+}
+
+static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
+{
+ int ret;
+
+ if (active) {
+ struct tb_path **paths;
+ int last;
+
+ paths = tunnel->paths;
+ last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
+
+ tb_dp_port_set_hops(tunnel->src_port,
+ paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
+ paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
+ paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
+
+ tb_dp_port_set_hops(tunnel->dst_port,
+ paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
+ paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
+ paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
+ } else {
+ tb_dp_port_hpd_clear(tunnel->src_port);
+ tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
+ if (tb_port_is_dpout(tunnel->dst_port))
+ tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
+ }
+
+ ret = tb_dp_port_enable(tunnel->src_port, active);
+ if (ret)
+ return ret;
+
+ if (tb_port_is_dpout(tunnel->dst_port))
+ return tb_dp_port_enable(tunnel->dst_port, active);
+
+ return 0;
+}
+
+static void tb_dp_init_aux_path(struct tb_path *path)
+{
+ int i;
+
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 2;
+ path->weight = 1;
+
+ for (i = 0; i < path->path_length; i++)
+ path->hops[i].initial_credits = 1;
+}
+
+static void tb_dp_init_video_path(struct tb_path *path, bool discover)
+{
+ u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
+
+ path->egress_fc_enable = TB_PATH_NONE;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_NONE;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 1;
+ path->weight = 1;
+
+ if (discover) {
+ path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ } else {
+ u32 max_credits;
+
+ max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
+ TB_PORT_MAX_CREDITS_SHIFT;
+ /* Leave some credits for AUX path */
+ path->nfc_credits = min(max_credits - 2, 12U);
+ }
+}
+
+/**
+ * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
+ * @tb: Pointer to the domain structure
+ * @in: DP in adapter
+ *
+ * If @in adapter is active, follows the tunnel to the DP out adapter
+ * and back. Returns the discovered tunnel or %NULL if there was no
+ * tunnel.
+ *
+ * Return: DP tunnel or %NULL if no tunnel found.
+ */
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_port *port;
+ struct tb_path *path;
+
+ if (!tb_dp_port_is_enabled(in))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->init = tb_dp_xchg_caps;
+ tunnel->activate = tb_dp_activate;
+ tunnel->src_port = in;
+
+ path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
+ &tunnel->dst_port, "Video");
+ if (!path) {
+ /* Just disable the DP IN port */
+ tb_dp_port_enable(in, false);
+ goto err_free;
+ }
+ tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
+ tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
+
+ path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
+
+ path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
+ &port, "AUX RX");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_DP_AUX_PATH_IN] = path;
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
+
+ /* Validate that the tunnel is complete */
+ if (!tb_port_is_dpout(tunnel->dst_port)) {
+ tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (!tb_dp_port_is_enabled(tunnel->dst_port))
+ goto err_deactivate;
+
+ if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
+ goto err_deactivate;
+
+ if (port != tunnel->src_port) {
+ tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ tb_tunnel_dbg(tunnel, "discovered\n");
+ return tunnel;
+
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
+err_free:
+ tb_tunnel_free(tunnel);
+
+ return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
+ * @tb: Pointer to the domain structure
+ * @in: DP in adapter port
+ * @out: DP out adapter port
+ *
+ * Allocates a tunnel between @in and @out that is capable of tunneling
+ * Display Port traffic.
+ *
+ * Return: Returns a tb_tunnel on success or NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path **paths;
+ struct tb_path *path;
+
+ if (WARN_ON(!in->cap_adap || !out->cap_adap))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->init = tb_dp_xchg_caps;
+ tunnel->activate = tb_dp_activate;
+ tunnel->src_port = in;
+ tunnel->dst_port = out;
+
+ paths = tunnel->paths;
+
+ path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
+ 1, "Video");
+ if (!path)
+ goto err_free;
+ tb_dp_init_video_path(path, false);
+ paths[TB_DP_VIDEO_PATH_OUT] = path;
+
+ path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
+ TB_DP_AUX_TX_HOPID, 1, "AUX TX");
+ if (!path)
+ goto err_free;
+ tb_dp_init_aux_path(path);
+ paths[TB_DP_AUX_PATH_OUT] = path;
+
+ path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
+ TB_DP_AUX_RX_HOPID, 1, "AUX RX");
+ if (!path)
+ goto err_free;
+ tb_dp_init_aux_path(path);
+ paths[TB_DP_AUX_PATH_IN] = path;
+
+ return tunnel;
+
+err_free:
+ tb_tunnel_free(tunnel);
+ return NULL;
+}
+
+static u32 tb_dma_credits(struct tb_port *nhi)
+{
+ u32 max_credits;
+
+ max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
+ TB_PORT_MAX_CREDITS_SHIFT;
+ return min(max_credits, 13U);
+}
+
+static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
+{
+ struct tb_port *nhi = tunnel->src_port;
+ u32 credits;
+
+ credits = active ? tb_dma_credits(nhi) : 0;
+ return tb_port_set_initial_credits(nhi, credits);
+}
+
+static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
+ unsigned int efc, u32 credits)
+{
+ int i;
+
+ path->egress_fc_enable = efc;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_shared_buffer = isb;
+ path->priority = 5;
+ path->weight = 1;
+ path->clear_fc = true;
+
+ for (i = 0; i < path->path_length; i++)
+ path->hops[i].initial_credits = credits;
+}
+
+/**
+ * tb_tunnel_alloc_dma() - allocate a DMA tunnel
+ * @tb: Pointer to the domain structure
+ * @nhi: Host controller port
+ * @dst: Destination null port which the other domain is connected to
+ * @transmit_ring: NHI ring number used to send packets towards the
+ * other domain
+ * @transmit_path: HopID used for transmitting packets
+ * @receive_ring: NHI ring number used to receive packets from the
+ * other domain
+ * @reveive_path: HopID used for receiving packets
+ *
+ * Return: Returns a tb_tunnel on success or NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
+ struct tb_port *dst, int transmit_ring,
+ int transmit_path, int receive_ring,
+ int receive_path)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+ u32 credits;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_dma_activate;
+ tunnel->src_port = nhi;
+ tunnel->dst_port = dst;
+
+ credits = tb_dma_credits(nhi);
+
+ path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
+ credits);
+ tunnel->paths[TB_DMA_PATH_IN] = path;
+
+ path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
+ tunnel->paths[TB_DMA_PATH_OUT] = path;
+
+ return tunnel;
+}
+
+/**
+ * tb_tunnel_free() - free a tunnel
+ * @tunnel: Tunnel to be freed
+ *
+ * Frees a tunnel. The tunnel does not need to be deactivated.
+ */
+void tb_tunnel_free(struct tb_tunnel *tunnel)
+{
+ int i;
+
+ if (!tunnel)
+ return;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i])
+ tb_path_free(tunnel->paths[i]);
+ }
+
+ kfree(tunnel->paths);
+ kfree(tunnel);
+}
+
+/**
+ * tb_tunnel_is_invalid - check whether an activated path is still valid
+ * @tunnel: Tunnel to check
+ */
+bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ WARN_ON(!tunnel->paths[i]->activated);
+ if (tb_path_is_invalid(tunnel->paths[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * tb_tunnel_restart() - activate a tunnel after a hardware reset
+ * @tunnel: Tunnel to restart
+ *
+ * Return: 0 on success and negative errno in case if failure
+ */
+int tb_tunnel_restart(struct tb_tunnel *tunnel)
+{
+ int res, i;
+
+ tb_tunnel_dbg(tunnel, "activating\n");
+
+ /*
+ * Make sure all paths are properly disabled before enabling
+ * them again.
+ */
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i]->activated) {
+ tb_path_deactivate(tunnel->paths[i]);
+ tunnel->paths[i]->activated = false;
+ }
+ }
+
+ if (tunnel->init) {
+ res = tunnel->init(tunnel);
+ if (res)
+ return res;
+ }
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ res = tb_path_activate(tunnel->paths[i]);
+ if (res)
+ goto err;
+ }
+
+ if (tunnel->activate) {
+ res = tunnel->activate(tunnel, true);
+ if (res)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ tb_tunnel_warn(tunnel, "activation failed\n");
+ tb_tunnel_deactivate(tunnel);
+ return res;
+}
+
+/**
+ * tb_tunnel_activate() - activate a tunnel
+ * @tunnel: Tunnel to activate
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_tunnel_activate(struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i]->activated) {
+ tb_tunnel_WARN(tunnel,
+ "trying to activate an already activated tunnel\n");
+ return -EINVAL;
+ }
+ }
+
+ return tb_tunnel_restart(tunnel);
+}
+
+/**
+ * tb_tunnel_deactivate() - deactivate a tunnel
+ * @tunnel: Tunnel to deactivate
+ */
+void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
+{
+ int i;
+
+ tb_tunnel_dbg(tunnel, "deactivating\n");
+
+ if (tunnel->activate)
+ tunnel->activate(tunnel, false);
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i] && tunnel->paths[i]->activated)
+ tb_path_deactivate(tunnel->paths[i]);
+ }
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
new file mode 100644
index 000000000000..c68bbcd3a62c
--- /dev/null
+++ b/drivers/thunderbolt/tunnel.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt driver - Tunneling support
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#ifndef TB_TUNNEL_H_
+#define TB_TUNNEL_H_
+
+#include "tb.h"
+
+enum tb_tunnel_type {
+ TB_TUNNEL_PCI,
+ TB_TUNNEL_DP,
+ TB_TUNNEL_DMA,
+};
+
+/**
+ * struct tb_tunnel - Tunnel between two ports
+ * @tb: Pointer to the domain
+ * @src_port: Source port of the tunnel
+ * @dst_port: Destination port of the tunnel. For discovered incomplete
+ * tunnels may be %NULL or null adapter port instead.
+ * @paths: All paths required by the tunnel
+ * @npaths: Number of paths in @paths
+ * @init: Optional tunnel specific initialization
+ * @activate: Optional tunnel specific activation/deactivation
+ * @list: Tunnels are linked using this field
+ * @type: Type of the tunnel
+ */
+struct tb_tunnel {
+ struct tb *tb;
+ struct tb_port *src_port;
+ struct tb_port *dst_port;
+ struct tb_path **paths;
+ size_t npaths;
+ int (*init)(struct tb_tunnel *tunnel);
+ int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ struct list_head list;
+ enum tb_tunnel_type type;
+};
+
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
+ struct tb_port *down);
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
+struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out);
+struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
+ struct tb_port *dst, int transmit_ring,
+ int transmit_path, int receive_ring,
+ int receive_path);
+
+void tb_tunnel_free(struct tb_tunnel *tunnel);
+int tb_tunnel_activate(struct tb_tunnel *tunnel);
+int tb_tunnel_restart(struct tb_tunnel *tunnel);
+void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
+bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
+
+static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_PCI;
+}
+
+static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_DP;
+}
+
+static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_DMA;
+}
+
+#endif
+
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
deleted file mode 100644
index 0637537ea53f..000000000000
--- a/drivers/thunderbolt/tunnel_pci.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Thunderbolt Cactus Ridge driver - PCIe tunnel
- *
- * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
- */
-
-#include <linux/slab.h>
-#include <linux/list.h>
-
-#include "tunnel_pci.h"
-#include "tb.h"
-
-#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
- do { \
- struct tb_pci_tunnel *__tunnel = (tunnel); \
- level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \
- tb_route(__tunnel->down_port->sw), \
- __tunnel->down_port->port, \
- tb_route(__tunnel->up_port->sw), \
- __tunnel->up_port->port, \
- ## arg); \
- } while (0)
-
-#define tb_tunnel_WARN(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
-#define tb_tunnel_warn(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
-#define tb_tunnel_info(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
-
-static void tb_pci_init_path(struct tb_path *path)
-{
- path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
- path->egress_shared_buffer = TB_PATH_NONE;
- path->ingress_fc_enable = TB_PATH_ALL;
- path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 3;
- path->weight = 1;
- path->drop_packages = 0;
- path->nfc_credits = 0;
-}
-
-/**
- * tb_pci_alloc() - allocate a pci tunnel
- *
- * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
- * TB_TYPE_PCIE_DOWN.
- *
- * Currently only paths consisting of two hops are supported (that is the
- * ports must be on "adjacent" switches).
- *
- * The paths are hard-coded to use hop 8 (the only working hop id available on
- * my thunderbolt devices). Therefore at most ONE path per device may be
- * activated.
- *
- * Return: Returns a tb_pci_tunnel on success or NULL on failure.
- */
-struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
- struct tb_port *down)
-{
- struct tb_pci_tunnel *tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
- if (!tunnel)
- goto err;
- tunnel->tb = tb;
- tunnel->down_port = down;
- tunnel->up_port = up;
- INIT_LIST_HEAD(&tunnel->list);
- tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2);
- if (!tunnel->path_to_up)
- goto err;
- tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2);
- if (!tunnel->path_to_down)
- goto err;
- tb_pci_init_path(tunnel->path_to_up);
- tb_pci_init_path(tunnel->path_to_down);
-
- tunnel->path_to_up->hops[0].in_port = down;
- tunnel->path_to_up->hops[0].in_hop_index = 8;
- tunnel->path_to_up->hops[0].in_counter_index = -1;
- tunnel->path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote;
- tunnel->path_to_up->hops[0].next_hop_index = 8;
-
- tunnel->path_to_up->hops[1].in_port = tb_upstream_port(up->sw);
- tunnel->path_to_up->hops[1].in_hop_index = 8;
- tunnel->path_to_up->hops[1].in_counter_index = -1;
- tunnel->path_to_up->hops[1].out_port = up;
- tunnel->path_to_up->hops[1].next_hop_index = 8;
-
- tunnel->path_to_down->hops[0].in_port = up;
- tunnel->path_to_down->hops[0].in_hop_index = 8;
- tunnel->path_to_down->hops[0].in_counter_index = -1;
- tunnel->path_to_down->hops[0].out_port = tb_upstream_port(up->sw);
- tunnel->path_to_down->hops[0].next_hop_index = 8;
-
- tunnel->path_to_down->hops[1].in_port =
- tb_upstream_port(up->sw)->remote;
- tunnel->path_to_down->hops[1].in_hop_index = 8;
- tunnel->path_to_down->hops[1].in_counter_index = -1;
- tunnel->path_to_down->hops[1].out_port = down;
- tunnel->path_to_down->hops[1].next_hop_index = 8;
- return tunnel;
-
-err:
- if (tunnel) {
- if (tunnel->path_to_down)
- tb_path_free(tunnel->path_to_down);
- if (tunnel->path_to_up)
- tb_path_free(tunnel->path_to_up);
- kfree(tunnel);
- }
- return NULL;
-}
-
-/**
- * tb_pci_free() - free a tunnel
- *
- * The tunnel must have been deactivated.
- */
-void tb_pci_free(struct tb_pci_tunnel *tunnel)
-{
- if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
- tb_tunnel_WARN(tunnel, "trying to free an activated tunnel\n");
- return;
- }
- tb_path_free(tunnel->path_to_up);
- tb_path_free(tunnel->path_to_down);
- kfree(tunnel);
-}
-
-/**
- * tb_pci_is_invalid - check whether an activated path is still valid
- */
-bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
-{
- WARN_ON(!tunnel->path_to_up->activated);
- WARN_ON(!tunnel->path_to_down->activated);
-
- return tb_path_is_invalid(tunnel->path_to_up)
- || tb_path_is_invalid(tunnel->path_to_down);
-}
-
-/**
- * tb_pci_port_active() - activate/deactivate PCI capability
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-static int tb_pci_port_active(struct tb_port *port, bool active)
-{
- u32 word = active ? 0x80000000 : 0x0;
- int cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
- if (cap < 0) {
- tb_port_warn(port, "TB_PORT_CAP_ADAP not found: %d\n", cap);
- return cap;
- }
- return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
-}
-
-/**
- * tb_pci_restart() - activate a tunnel after a hardware reset
- */
-int tb_pci_restart(struct tb_pci_tunnel *tunnel)
-{
- int res;
- tunnel->path_to_up->activated = false;
- tunnel->path_to_down->activated = false;
-
- tb_tunnel_info(tunnel, "activating\n");
-
- res = tb_path_activate(tunnel->path_to_up);
- if (res)
- goto err;
- res = tb_path_activate(tunnel->path_to_down);
- if (res)
- goto err;
-
- res = tb_pci_port_active(tunnel->down_port, true);
- if (res)
- goto err;
-
- res = tb_pci_port_active(tunnel->up_port, true);
- if (res)
- goto err;
- return 0;
-err:
- tb_tunnel_warn(tunnel, "activation failed\n");
- tb_pci_deactivate(tunnel);
- return res;
-}
-
-/**
- * tb_pci_activate() - activate a tunnel
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-int tb_pci_activate(struct tb_pci_tunnel *tunnel)
-{
- if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
- tb_tunnel_WARN(tunnel,
- "trying to activate an already activated tunnel\n");
- return -EINVAL;
- }
-
- return tb_pci_restart(tunnel);
-}
-
-
-
-/**
- * tb_pci_deactivate() - deactivate a tunnel
- */
-void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
-{
- tb_tunnel_info(tunnel, "deactivating\n");
- /*
- * TODO: enable reset by writing 0x04000000 to TB_CAP_PCIE + 1 on up
- * port. Seems to have no effect?
- */
- tb_pci_port_active(tunnel->up_port, false);
- tb_pci_port_active(tunnel->down_port, false);
- if (tunnel->path_to_down->activated)
- tb_path_deactivate(tunnel->path_to_down);
- if (tunnel->path_to_up->activated)
- tb_path_deactivate(tunnel->path_to_up);
-}
-
diff --git a/drivers/thunderbolt/tunnel_pci.h b/drivers/thunderbolt/tunnel_pci.h
deleted file mode 100644
index f9b65fa1fd4d..000000000000
--- a/drivers/thunderbolt/tunnel_pci.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Thunderbolt Cactus Ridge driver - PCIe tunnel
- *
- * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
- */
-
-#ifndef TB_PCI_H_
-#define TB_PCI_H_
-
-#include "tb.h"
-
-struct tb_pci_tunnel {
- struct tb *tb;
- struct tb_port *up_port;
- struct tb_port *down_port;
- struct tb_path *path_to_up;
- struct tb_path *path_to_down;
- struct list_head list;
-};
-
-struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
- struct tb_port *down);
-void tb_pci_free(struct tb_pci_tunnel *tunnel);
-int tb_pci_activate(struct tb_pci_tunnel *tunnel);
-int tb_pci_restart(struct tb_pci_tunnel *tunnel);
-void tb_pci_deactivate(struct tb_pci_tunnel *tunnel);
-bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel);
-
-#endif
-
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index e27dd8beb94b..5118d46702d5 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -18,6 +18,7 @@
#include "tb.h"
#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
+#define XDOMAIN_UUID_RETRIES 10
#define XDOMAIN_PROPERTIES_RETRIES 60
#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
@@ -222,6 +223,50 @@ static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
return 0;
}
+static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
+ uuid_t *uuid)
+{
+ struct tb_xdp_uuid_response res;
+ struct tb_xdp_uuid req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
+ sizeof(req));
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.hdr);
+ if (ret)
+ return ret;
+
+ uuid_copy(uuid, &res.src_uuid);
+ return 0;
+}
+
+static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
+ const uuid_t *uuid)
+{
+ struct tb_xdp_uuid_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
+ sizeof(res));
+
+ uuid_copy(&res.src_uuid, uuid);
+ res.src_route_hi = upper_32_bits(route);
+ res.src_route_lo = lower_32_bits(route);
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
enum tb_xdp_error error)
{
@@ -512,7 +557,14 @@ static void tb_xdp_handle_request(struct work_struct *work)
break;
}
+ case UUID_REQUEST_OLD:
+ case UUID_REQUEST:
+ ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
+ break;
+
default:
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_SUPPORTED);
break;
}
@@ -524,9 +576,11 @@ static void tb_xdp_handle_request(struct work_struct *work)
out:
kfree(xw->pkg);
kfree(xw);
+
+ tb_domain_put(tb);
}
-static void
+static bool
tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
size_t size)
{
@@ -534,13 +588,18 @@ tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
xw = kmalloc(sizeof(*xw), GFP_KERNEL);
if (!xw)
- return;
+ return false;
INIT_WORK(&xw->work, tb_xdp_handle_request);
xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
- xw->tb = tb;
+ if (!xw->pkg) {
+ kfree(xw);
+ return false;
+ }
+ xw->tb = tb_domain_get(tb);
- queue_work(tb->wq, &xw->work);
+ schedule_work(&xw->work);
+ return true;
}
/**
@@ -740,6 +799,7 @@ static void enumerate_services(struct tb_xdomain *xd)
struct tb_service *svc;
struct tb_property *p;
struct device *dev;
+ int id;
/*
* First remove all services that are not available anymore in
@@ -768,7 +828,12 @@ static void enumerate_services(struct tb_xdomain *xd)
break;
}
- svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ kfree(svc);
+ break;
+ }
+ svc->id = id;
svc->dev.bus = &tb_bus_type;
svc->dev.type = &tb_service_type;
svc->dev.parent = &xd->dev;
@@ -826,6 +891,55 @@ static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
}
}
+static void tb_xdomain_get_uuid(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
+ get_uuid_work.work);
+ struct tb *tb = xd->tb;
+ uuid_t uuid;
+ int ret;
+
+ ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
+ if (ret < 0) {
+ if (xd->uuid_retries-- > 0) {
+ queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
+ msecs_to_jiffies(100));
+ } else {
+ dev_dbg(&xd->dev, "failed to read remote UUID\n");
+ }
+ return;
+ }
+
+ if (uuid_equal(&uuid, xd->local_uuid)) {
+ dev_dbg(&xd->dev, "intra-domain loop detected\n");
+ return;
+ }
+
+ /*
+ * If the UUID is different, there is another domain connected
+ * so mark this one unplugged and wait for the connection
+ * manager to replace it.
+ */
+ if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
+ dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
+ xd->is_unplugged = true;
+ return;
+ }
+
+ /* First time fill in the missing UUID */
+ if (!xd->remote_uuid) {
+ xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
+ if (!xd->remote_uuid)
+ return;
+ }
+
+ /* Now we can start the normal properties exchange */
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(100));
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+}
+
static void tb_xdomain_get_properties(struct work_struct *work)
{
struct tb_xdomain *xd = container_of(work, typeof(*xd),
@@ -1032,21 +1146,29 @@ static void tb_xdomain_release(struct device *dev)
static void start_handshake(struct tb_xdomain *xd)
{
+ xd->uuid_retries = XDOMAIN_UUID_RETRIES;
xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
- /* Start exchanging properties with the other host */
- queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
- msecs_to_jiffies(100));
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
+ if (xd->needs_uuid) {
+ queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
+ msecs_to_jiffies(100));
+ } else {
+ /* Start exchanging properties with the other host */
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(100));
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+ }
}
static void stop_handshake(struct tb_xdomain *xd)
{
+ xd->uuid_retries = 0;
xd->properties_retries = 0;
xd->properties_changed_retries = 0;
+ cancel_delayed_work_sync(&xd->get_uuid_work);
cancel_delayed_work_sync(&xd->get_properties_work);
cancel_delayed_work_sync(&xd->properties_changed_work);
}
@@ -1089,7 +1211,7 @@ EXPORT_SYMBOL_GPL(tb_xdomain_type);
* other domain is reached).
* @route: Route string used to reach the other domain
* @local_uuid: Our local domain UUID
- * @remote_uuid: UUID of the other domain
+ * @remote_uuid: UUID of the other domain (optional)
*
* Allocates new XDomain structure and returns pointer to that. The
* object must be released by calling tb_xdomain_put().
@@ -1108,6 +1230,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
xd->route = route;
ida_init(&xd->service_ids);
mutex_init(&xd->lock);
+ INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
INIT_DELAYED_WORK(&xd->properties_changed_work,
tb_xdomain_properties_changed);
@@ -1116,9 +1239,14 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
if (!xd->local_uuid)
goto err_free;
- xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
- if (!xd->remote_uuid)
- goto err_free_local_uuid;
+ if (remote_uuid) {
+ xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
+ GFP_KERNEL);
+ if (!xd->remote_uuid)
+ goto err_free_local_uuid;
+ } else {
+ xd->needs_uuid = true;
+ }
device_initialize(&xd->dev);
xd->dev.parent = get_device(parent);
@@ -1282,14 +1410,12 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
struct tb_port *port = &sw->ports[i];
struct tb_xdomain *xd;
- if (tb_is_upstream_port(port))
- continue;
-
if (port->xdomain) {
xd = port->xdomain;
if (lookup->uuid) {
- if (uuid_equal(xd->remote_uuid, lookup->uuid))
+ if (xd->remote_uuid &&
+ uuid_equal(xd->remote_uuid, lookup->uuid))
return xd;
} else if (lookup->link &&
lookup->link == xd->link &&
@@ -1299,7 +1425,7 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
lookup->route == xd->route) {
return xd;
}
- } else if (port->remote) {
+ } else if (tb_port_has_remote(port)) {
xd = switch_find_xdomain(port->remote->sw, lookup);
if (xd)
return xd;
@@ -1416,10 +1542,8 @@ bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
* handlers in turn.
*/
if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
- if (type == TB_CFG_PKG_XDOMAIN_REQ) {
- tb_xdp_schedule_request(tb, hdr, size);
- return true;
- }
+ if (type == TB_CFG_PKG_XDOMAIN_REQ)
+ return tb_xdp_schedule_request(tb, hdr, size);
return false;
}
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index e0a04bfc873e..3b1d312bb175 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config TTY
bool "Enable TTY" if EXPERT
default y
@@ -83,7 +84,6 @@ config HW_CONSOLE
config VT_HW_CONSOLE_BINDING
bool "Support for binding and unbinding console drivers"
depends on HW_CONSOLE
- default n
---help---
The virtual terminal is the device that interacts with the physical
terminal through console drivers. On these systems, at least one
@@ -175,7 +175,7 @@ config ROCKETPORT
This driver supports Comtrol RocketPort and RocketModem PCI boards.
These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
modems. For information about the RocketPort/RocketModem boards
- and this driver read <file:Documentation/serial/rocket.txt>.
+ and this driver read <file:Documentation/serial/rocket.rst>.
To compile this driver as a module, choose M here: the
module will be called rocket.
@@ -193,7 +193,7 @@ config CYCLADES
your Linux box, for instance in order to become a dial-in server.
For information about the Cyclades-Z card, read
- <file:Documentation/serial/README.cycladesZ>.
+ <file:Documentation/serial/cyclades_z.rst>.
To compile this driver as a module, choose M here: the
module will be called cyclades.
@@ -312,7 +312,6 @@ config N_GSM
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
- default n
help
The trace router uses the Linux tty line discipline framework to
route trace data coming from a tty port (say UART for example) to
@@ -328,7 +327,6 @@ config TRACE_ROUTER
config TRACE_SINK
tristate "Trace data sink for MIPI P1149.7 cJTAG standard"
- default n
help
The trace sink uses the Linux line discipline framework to receive
trace data coming from the trace router line discipline driver
@@ -376,6 +374,20 @@ config PPC_EARLY_DEBUG_EHV_BC_HANDLE
there simply will be no early console output. This is true also
if you don't boot under a hypervisor at all.
+config NULL_TTY
+ tristate "NULL TTY driver"
+ help
+ Say Y here if you want a NULL TTY which simply discards messages.
+
+ This is useful to allow userspace applications which expect a console
+ device to work without modifications even when no console is
+ available or desired.
+
+ In order to use this driver, you should redirect the console to this
+ TTY, or boot the kernel with console=ttynull.
+
+ If unsure, say N.
+
config GOLDFISH_TTY
tristate "Goldfish TTY Driver"
depends on GOLDFISH
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index c72cafdf32b4..020b1cd9294f 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_ISI) += isicom.o
obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
obj-$(CONFIG_NOZOMI) += nozomi.o
+obj-$(CONFIG_NULL_TTY) += ttynull.o
obj-$(CONFIG_ROCKETPORT) += rocket.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 4293c172e120..4d22b911111f 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
if TTY
config HVC_DRIVER
@@ -24,7 +25,6 @@ config HVC_CONSOLE
config HVC_OLD_HVSI
bool "Old driver for pSeries serial port (/dev/hvsi*)"
depends on HVC_CONSOLE
- default n
config HVC_OPAL
bool "OPAL Console support"
@@ -73,7 +73,6 @@ config HVC_UDBG
bool "udbg based fake hypervisor console"
depends on PPC
select HVC_DRIVER
- default n
help
This is meant to be used during HW bring up or debugging when
no other console mechanism exist but udbg, to get you a quick
diff --git a/drivers/tty/ipwireless/Makefile b/drivers/tty/ipwireless/Makefile
index fe2e1730986b..a665d021e24d 100644
--- a/drivers/tty/ipwireless/Makefile
+++ b/drivers/tty/ipwireless/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the IPWireless driver
#
diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
index 3475e841ef5c..4c18bbfe1a92 100644
--- a/drivers/tty/ipwireless/main.c
+++ b/drivers/tty/ipwireless/main.c
@@ -114,6 +114,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ipw->common_memory = ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
+ if (!ipw->common_memory) {
+ ret = -ENOMEM;
+ goto exit1;
+ }
if (!request_mem_region(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]),
IPWIRELESS_PCCARD_NAME)) {
@@ -134,6 +138,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ipw->attr_memory = ioremap(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]));
+ if (!ipw->attr_memory) {
+ ret = -ENOMEM;
+ goto exit3;
+ }
if (!request_mem_region(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]),
IPWIRELESS_PCCARD_NAME)) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 9cdb0fa3c4bf..f9c584244f72 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -550,9 +550,9 @@ static ssize_t process_output_block(struct tty_struct *tty,
mutex_lock(&ldata->output_lock);
space = tty_write_room(tty);
- if (!space) {
+ if (space <= 0) {
mutex_unlock(&ldata->output_lock);
- return 0;
+ return space;
}
if (nr > space)
nr = space;
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index b121d8f8f3d7..5ba6816ebf81 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -266,7 +266,7 @@ MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc1
module_param_array(pc104_4, ulong, NULL, 0);
MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,...");
-static int rp_init(void);
+static int __init rp_init(void);
static void rp_cleanup_module(void);
module_init(rp_init);
@@ -1283,23 +1283,29 @@ static int rp_ioctl(struct tty_struct *tty,
return -ENXIO;
switch (cmd) {
- case RCKP_GET_STRUCT:
- if (copy_to_user(argp, info, sizeof (struct r_port)))
- ret = -EFAULT;
- break;
case RCKP_GET_CONFIG:
+ dev_warn_ratelimited(tty->dev,
+ "RCKP_GET_CONFIG option is deprecated\n");
ret = get_config(info, argp);
break;
case RCKP_SET_CONFIG:
+ dev_warn_ratelimited(tty->dev,
+ "RCKP_SET_CONFIG option is deprecated\n");
ret = set_config(tty, info, argp);
break;
case RCKP_GET_PORTS:
+ dev_warn_ratelimited(tty->dev,
+ "RCKP_GET_PORTS option is deprecated\n");
ret = get_ports(info, argp);
break;
case RCKP_RESET_RM2:
+ dev_warn_ratelimited(tty->dev,
+ "RCKP_RESET_RM2 option is deprecated\n");
ret = reset_rm2(info, argp);
break;
case RCKP_GET_VERSION:
+ dev_warn_ratelimited(tty->dev,
+ "RCKP_GET_VERSION option is deprecated\n");
ret = get_version(info, argp);
break;
default:
diff --git a/drivers/tty/rocket.h b/drivers/tty/rocket.h
index d0560203f215..d62ed6587f32 100644
--- a/drivers/tty/rocket.h
+++ b/drivers/tty/rocket.h
@@ -71,7 +71,6 @@ struct rocket_version {
/*
* Rocketport ioctls -- "RP"
*/
-#define RCKP_GET_STRUCT 0x00525001
#define RCKP_GET_CONFIG 0x00525002
#define RCKP_SET_CONFIG 0x00525003
#define RCKP_GET_PORTS 0x00525004
diff --git a/drivers/tty/serdev/Kconfig b/drivers/tty/serdev/Kconfig
index 1dbc8352e027..46ae732bfc68 100644
--- a/drivers/tty/serdev/Kconfig
+++ b/drivers/tty/serdev/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Serial bus device driver configuration
#
diff --git a/drivers/tty/serdev/Makefile b/drivers/tty/serdev/Makefile
index 0cbdb9444d9d..078417e5b068 100644
--- a/drivers/tty/serdev/Makefile
+++ b/drivers/tty/serdev/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
serdev-objs := core.o
obj-$(CONFIG_SERIAL_DEV_BUS) += serdev.o
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index d31b975dd3fd..284e8d052fc3 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param)
static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
{
- return param == chan->device->dev->parent;
+ return param == chan->device->dev;
}
/*
@@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
data->uart_16550_compatible = true;
}
- /* Platforms with iDMA */
+ /* Platforms with iDMA 64-bit */
if (platform_get_resource_byname(to_platform_device(p->dev),
IORESOURCE_MEM, "lpss_priv")) {
data->dma.rx_param = p->dev->parent;
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 0089aa305ef9..edd6dfe055bf 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -361,12 +361,15 @@ static const struct exar8250_platform iot2040_platform = {
.register_gpio = iot2040_register_gpio,
};
+/*
+ * For SIMATIC IOT2000, only IOT2040 and its variants have the Exar device,
+ * IOT2020 doesn't have. Therefore it is sufficient to match on the common
+ * board name after the device was found.
+ */
static const struct dmi_system_id exar_platforms[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
- "6ES7647-0AA00-1YA2"),
},
.driver_data = (void *)&iot2040_platform,
},
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 79a4958b3f5c..31c91c2f8c6e 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -303,8 +303,9 @@ static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
}
}
-void fintek_8250_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+static void fintek_8250_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
{
struct fintek_8250 *pdata = port->private_data;
unsigned int baud = tty_termios_baud_rate(termios);
diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c
index 127017cc41d9..02c5aff58a74 100644
--- a/drivers/tty/serial/8250/8250_men_mcb.c
+++ b/drivers/tty/serial/8250/8250_men_mcb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index c1fdbc0b6840..417c7c810df9 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -21,15 +21,32 @@
#include "8250.h"
-#define UART_MTK_HIGHS 0x09 /* Highspeed register */
-#define UART_MTK_SAMPLE_COUNT 0x0a /* Sample count register */
-#define UART_MTK_SAMPLE_POINT 0x0b /* Sample point register */
+#define MTK_UART_HIGHS 0x09 /* Highspeed register */
+#define MTK_UART_SAMPLE_COUNT 0x0a /* Sample count register */
+#define MTK_UART_SAMPLE_POINT 0x0b /* Sample point register */
#define MTK_UART_RATE_FIX 0x0d /* UART Rate Fix Register */
-
+#define MTK_UART_ESCAPE_DAT 0x10 /* Escape Character register */
+#define MTK_UART_ESCAPE_EN 0x11 /* Escape Enable register */
#define MTK_UART_DMA_EN 0x13 /* DMA Enable register */
+#define MTK_UART_RXTRI_AD 0x14 /* RX Trigger address */
+#define MTK_UART_FRACDIV_L 0x15 /* Fractional divider LSB address */
+#define MTK_UART_FRACDIV_M 0x16 /* Fractional divider MSB address */
+#define MTK_UART_IER_XOFFI 0x20 /* Enable XOFF character interrupt */
+#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
+#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
+
+#define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */
+#define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */
+#define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */
+#define MTK_UART_EFR_NO_SW_FC 0x0 /* no sw flow control */
+#define MTK_UART_EFR_XON1_XOFF1 0xa /* XON1/XOFF1 as sw flow control */
+#define MTK_UART_EFR_XON2_XOFF2 0x5 /* XON2/XOFF2 as sw flow control */
+#define MTK_UART_EFR_SW_FC_MASK 0xf /* Enable CTS Modem status interrupt */
+#define MTK_UART_EFR_HW_FC (MTK_UART_EFR_RTS | MTK_UART_EFR_CTS)
#define MTK_UART_DMA_EN_TX 0x2
#define MTK_UART_DMA_EN_RX 0x5
+#define MTK_UART_ESCAPE_CHAR 0x77 /* Escape char added under sw fc */
#define MTK_UART_TX_SIZE UART_XMIT_SIZE
#define MTK_UART_RX_SIZE 0x8000
#define MTK_UART_TX_TRIGGER 1
@@ -46,6 +63,7 @@ enum dma_rx_status {
struct mtk8250_data {
int line;
unsigned int rx_pos;
+ unsigned int clk_count;
struct clk *uart_clk;
struct clk *bus_clk;
struct uart_8250_dma *dma;
@@ -54,6 +72,13 @@ struct mtk8250_data {
#endif
};
+/* flow control mode */
+enum {
+ MTK_UART_FC_NONE,
+ MTK_UART_FC_SW,
+ MTK_UART_FC_HW,
+};
+
#ifdef CONFIG_SERIAL_8250_DMA
static void mtk8250_rx_dma(struct uart_8250_port *up);
@@ -192,13 +217,89 @@ static void mtk8250_shutdown(struct uart_port *port)
return serial8250_do_shutdown(port);
}
+static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
+{
+ serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask));
+}
+
+static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask)
+{
+ serial_out(up, UART_IER, serial_in(up, UART_IER) | mask);
+}
+
+static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
+{
+ struct uart_port *port = &up->port;
+ int lcr = serial_in(up, UART_LCR);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_LCR, lcr);
+ lcr = serial_in(up, UART_LCR);
+
+ switch (mode) {
+ case MTK_UART_FC_NONE:
+ serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR);
+ serial_out(up, MTK_UART_ESCAPE_EN, 0x00);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, serial_in(up, UART_EFR) &
+ (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)));
+ serial_out(up, UART_LCR, lcr);
+ mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI |
+ MTK_UART_IER_RTSI | MTK_UART_IER_CTSI);
+ break;
+
+ case MTK_UART_FC_HW:
+ serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR);
+ serial_out(up, MTK_UART_ESCAPE_EN, 0x00);
+ serial_out(up, UART_MCR, UART_MCR_RTS);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ /*enable hw flow control*/
+ serial_out(up, UART_EFR, MTK_UART_EFR_HW_FC |
+ (serial_in(up, UART_EFR) &
+ (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
+
+ serial_out(up, UART_LCR, lcr);
+ mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI);
+ mtk8250_enable_intrs(up, MTK_UART_IER_CTSI | MTK_UART_IER_RTSI);
+ break;
+
+ case MTK_UART_FC_SW: /*MTK software flow control */
+ serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR);
+ serial_out(up, MTK_UART_ESCAPE_EN, 0x01);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ /*enable sw flow control */
+ serial_out(up, UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
+ (serial_in(up, UART_EFR) &
+ (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
+
+ serial_out(up, UART_XON1, START_CHAR(port->state->port.tty));
+ serial_out(up, UART_XOFF1, STOP_CHAR(port->state->port.tty));
+ serial_out(up, UART_LCR, lcr);
+ mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI);
+ mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI);
+ break;
+ default:
+ break;
+ }
+}
+
static void
mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+ unsigned short fraction_L_mapping[] = {
+ 0, 1, 0x5, 0x15, 0x55, 0x57, 0x57, 0x77, 0x7F, 0xFF, 0xFF
+ };
+ unsigned short fraction_M_mapping[] = {
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3
+ };
struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned int baud, quot, fraction;
unsigned long flags;
- unsigned int baud, quot;
+ int mode;
#ifdef CONFIG_SERIAL_8250_DMA
if (up->dma) {
@@ -214,7 +315,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
serial8250_do_set_termios(port, termios, old);
/*
- * Mediatek UARTs use an extra highspeed register (UART_MTK_HIGHS)
+ * Mediatek UARTs use an extra highspeed register (MTK_UART_HIGHS)
*
* We need to recalcualte the quot register, as the claculation depends
* on the vaule in the highspeed register.
@@ -230,18 +331,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
port->uartclk / 16 / UART_DIV_MAX,
port->uartclk);
- if (baud <= 115200) {
- serial_port_out(port, UART_MTK_HIGHS, 0x0);
+ if (baud < 115200) {
+ serial_port_out(port, MTK_UART_HIGHS, 0x0);
quot = uart_get_divisor(port, baud);
- } else if (baud <= 576000) {
- serial_port_out(port, UART_MTK_HIGHS, 0x2);
-
- /* Set to next lower baudrate supported */
- if ((baud == 500000) || (baud == 576000))
- baud = 460800;
- quot = DIV_ROUND_UP(port->uartclk, 4 * baud);
} else {
- serial_port_out(port, UART_MTK_HIGHS, 0x3);
+ serial_port_out(port, MTK_UART_HIGHS, 0x3);
quot = DIV_ROUND_UP(port->uartclk, 256 * baud);
}
@@ -258,18 +352,40 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
/* reset DLAB */
serial_port_out(port, UART_LCR, up->lcr);
- if (baud > 460800) {
+ if (baud >= 115200) {
unsigned int tmp;
- tmp = DIV_ROUND_CLOSEST(port->uartclk, quot * baud);
- serial_port_out(port, UART_MTK_SAMPLE_COUNT, tmp - 1);
- serial_port_out(port, UART_MTK_SAMPLE_POINT,
- (tmp - 2) >> 1);
+ tmp = (port->uartclk / (baud * quot)) - 1;
+ serial_port_out(port, MTK_UART_SAMPLE_COUNT, tmp);
+ serial_port_out(port, MTK_UART_SAMPLE_POINT,
+ (tmp >> 1) - 1);
+
+ /*count fraction to set fractoin register */
+ fraction = ((port->uartclk * 100) / baud / quot) % 100;
+ fraction = DIV_ROUND_CLOSEST(fraction, 10);
+ serial_port_out(port, MTK_UART_FRACDIV_L,
+ fraction_L_mapping[fraction]);
+ serial_port_out(port, MTK_UART_FRACDIV_M,
+ fraction_M_mapping[fraction]);
} else {
- serial_port_out(port, UART_MTK_SAMPLE_COUNT, 0x00);
- serial_port_out(port, UART_MTK_SAMPLE_POINT, 0xff);
+ serial_port_out(port, MTK_UART_SAMPLE_COUNT, 0x00);
+ serial_port_out(port, MTK_UART_SAMPLE_POINT, 0xff);
+ serial_port_out(port, MTK_UART_FRACDIV_L, 0x00);
+ serial_port_out(port, MTK_UART_FRACDIV_M, 0x00);
}
+ if ((termios->c_cflag & CRTSCTS) && (!(termios->c_iflag & CRTSCTS)))
+ mode = MTK_UART_FC_HW;
+ else if (termios->c_iflag & CRTSCTS)
+ mode = MTK_UART_FC_SW;
+ else
+ mode = MTK_UART_FC_NONE;
+
+ mtk8250_set_flow_ctrl(up, mode);
+
+ if (uart_console(port))
+ up->port.cons->cflag = termios->c_cflag;
+
spin_unlock_irqrestore(&port->lock, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 15c2c5463835..296115f6a4d8 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# The 8250/16550 serial drivers. You shouldn't be in this list unless
# you somehow have an implicit or explicit dependency on SERIAL_8250.
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 72966bc0ac76..0d31251e04cc 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Serial device configuration
#
@@ -369,7 +370,6 @@ config SERIAL_MAX310X
depends on SPI_MASTER
select SERIAL_CORE
select REGMAP_SPI if SPI_MASTER
- default n
help
This selects support for an advanced UART from Maxim (Dallas).
Supported ICs are MAX3107, MAX3108, MAX3109, MAX14830.
@@ -652,7 +652,6 @@ config SERIAL_MUX_CONSOLE
config PDC_CONSOLE
bool "PDC software console support"
depends on PARISC && !SERIAL_MUX && VT
- default n
help
Saying Y here will enable the software based PDC console to be
used as the system console. This is useful for machines in
@@ -1095,6 +1094,30 @@ config SERIAL_OMAP_CONSOLE
your boot loader about how to pass options to the kernel at
boot time.)
+config SERIAL_SIFIVE
+ tristate "SiFive UART support"
+ depends on OF
+ select SERIAL_CORE
+ help
+ Select this option if you are building a kernel for a device that
+ contains a SiFive UART IP block. This type of UART is present on
+ SiFive FU540 SoCs, among others.
+
+config SERIAL_SIFIVE_CONSOLE
+ bool "Console on SiFive UART"
+ depends on SERIAL_SIFIVE=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Select this option if you would like to use a SiFive UART as the
+ system console.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySIFx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
+
config SERIAL_LANTIQ
bool "Lantiq serial driver"
depends on LANTIQ
@@ -1109,7 +1132,6 @@ config SERIAL_QE
depends on QUICC_ENGINE
select SERIAL_CORE
select FW_LOADER
- default n
help
This driver supports the QE serial ports on Freescale embedded
PowerPC that contain a QUICC Engine.
@@ -1582,6 +1604,32 @@ config SERIAL_RDA_CONSOLE
Say 'Y' here if you wish to use the RDA8810PL UART as the system
console. Only earlycon is implemented currently.
+config SERIAL_MILBEAUT_USIO
+ tristate "Milbeaut USIO/UART serial port support"
+ depends on ARCH_MILBEAUT || (COMPILE_TEST && OF)
+ default ARCH_MILBEAUT
+ select SERIAL_CORE
+ help
+ This selects the USIO/UART IP found in Socionext Milbeaut SoCs.
+
+config SERIAL_MILBEAUT_USIO_PORTS
+ int "Maximum number of CSIO/UART ports (1-8)"
+ range 1 8
+ depends on SERIAL_MILBEAUT_USIO
+ default "4"
+
+config SERIAL_MILBEAUT_USIO_CONSOLE
+ bool "Support for console on MILBEAUT USIO/UART serial port"
+ depends on SERIAL_MILBEAUT_USIO=y
+ default y
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+ help
+ Say 'Y' here if you wish to use a USIO/UART of Socionext Milbeaut
+ SoCs as the system console (the system console is the device which
+ receives all kernel messages and warnings and which allows logins in
+ single user mode).
+
endmenu
config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 40b702aaa85e..79c3d513db7e 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -92,6 +92,8 @@ obj-$(CONFIG_SERIAL_PIC32) += pic32_uart.o
obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o
obj-$(CONFIG_SERIAL_OWL) += owl-uart.o
obj-$(CONFIG_SERIAL_RDA) += rda-uart.o
+obj-$(CONFIG_SERIAL_MILBEAUT_USIO) += milbeaut_usio.o
+obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile
index 896a5d57881c..3f3a6ed02ed4 100644
--- a/drivers/tty/serial/cpm_uart/Makefile
+++ b/drivers/tty/serial/cpm_uart/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Motorola 8xx FEC ethernet controller
#
diff --git a/drivers/tty/serial/jsm/Makefile b/drivers/tty/serial/jsm/Makefile
index 705d1ff6fdeb..4f2dbada7741 100644
--- a/drivers/tty/serial/jsm/Makefile
+++ b/drivers/tty/serial/jsm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Jasmine adapter
#
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index ef89534dd760..e5d3ebab6dae 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -353,7 +353,6 @@ static void men_z135_handle_tx(struct men_z135_port *uart)
memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n);
xmit->tail = (xmit->tail + n) & (UART_XMIT_SIZE - 1);
- mmiowb();
iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL);
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
new file mode 100644
index 000000000000..949ab7efc4fc
--- /dev/null
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Socionext Inc.
+ */
+
+#if defined(CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#define USIO_NAME "mlb-usio-uart"
+#define USIO_UART_DEV_NAME "ttyUSI"
+
+static struct uart_port mlb_usio_ports[CONFIG_SERIAL_MILBEAUT_USIO_PORTS];
+
+#define RX 0
+#define TX 1
+static int mlb_usio_irq[CONFIG_SERIAL_MILBEAUT_USIO_PORTS][2];
+
+#define MLB_USIO_REG_SMR 0
+#define MLB_USIO_REG_SCR 1
+#define MLB_USIO_REG_ESCR 2
+#define MLB_USIO_REG_SSR 3
+#define MLB_USIO_REG_DR 4
+#define MLB_USIO_REG_BGR 6
+#define MLB_USIO_REG_FCR 12
+#define MLB_USIO_REG_FBYTE 14
+
+#define MLB_USIO_SMR_SOE BIT(0)
+#define MLB_USIO_SMR_SBL BIT(3)
+#define MLB_USIO_SCR_TXE BIT(0)
+#define MLB_USIO_SCR_RXE BIT(1)
+#define MLB_USIO_SCR_TBIE BIT(2)
+#define MLB_USIO_SCR_TIE BIT(3)
+#define MLB_USIO_SCR_RIE BIT(4)
+#define MLB_USIO_SCR_UPCL BIT(7)
+#define MLB_USIO_ESCR_L_8BIT 0
+#define MLB_USIO_ESCR_L_5BIT 1
+#define MLB_USIO_ESCR_L_6BIT 2
+#define MLB_USIO_ESCR_L_7BIT 3
+#define MLB_USIO_ESCR_P BIT(3)
+#define MLB_USIO_ESCR_PEN BIT(4)
+#define MLB_USIO_ESCR_FLWEN BIT(7)
+#define MLB_USIO_SSR_TBI BIT(0)
+#define MLB_USIO_SSR_TDRE BIT(1)
+#define MLB_USIO_SSR_RDRF BIT(2)
+#define MLB_USIO_SSR_ORE BIT(3)
+#define MLB_USIO_SSR_FRE BIT(4)
+#define MLB_USIO_SSR_PE BIT(5)
+#define MLB_USIO_SSR_REC BIT(7)
+#define MLB_USIO_SSR_BRK BIT(8)
+#define MLB_USIO_FCR_FE1 BIT(0)
+#define MLB_USIO_FCR_FE2 BIT(1)
+#define MLB_USIO_FCR_FCL1 BIT(2)
+#define MLB_USIO_FCR_FCL2 BIT(3)
+#define MLB_USIO_FCR_FSET BIT(4)
+#define MLB_USIO_FCR_FTIE BIT(9)
+#define MLB_USIO_FCR_FDRQ BIT(10)
+#define MLB_USIO_FCR_FRIIE BIT(11)
+
+static void mlb_usio_stop_tx(struct uart_port *port)
+{
+ writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FTIE,
+ port->membase + MLB_USIO_REG_FCR);
+ writeb(readb(port->membase + MLB_USIO_REG_SCR) & ~MLB_USIO_SCR_TBIE,
+ port->membase + MLB_USIO_REG_SCR);
+}
+
+static void mlb_usio_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ int count;
+
+ writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FTIE,
+ port->membase + MLB_USIO_REG_FCR);
+ writeb(readb(port->membase + MLB_USIO_REG_SCR) &
+ ~(MLB_USIO_SCR_TIE | MLB_USIO_SCR_TBIE),
+ port->membase + MLB_USIO_REG_SCR);
+
+ if (port->x_char) {
+ writew(port->x_char, port->membase + MLB_USIO_REG_DR);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ mlb_usio_stop_tx(port);
+ return;
+ }
+
+ count = port->fifosize -
+ (readw(port->membase + MLB_USIO_REG_FBYTE) & 0xff);
+
+ do {
+ writew(xmit->buf[xmit->tail], port->membase + MLB_USIO_REG_DR);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+
+ } while (--count > 0);
+
+ writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FDRQ,
+ port->membase + MLB_USIO_REG_FCR);
+
+ writeb(readb(port->membase + MLB_USIO_REG_SCR) | MLB_USIO_SCR_TBIE,
+ port->membase + MLB_USIO_REG_SCR);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ mlb_usio_stop_tx(port);
+}
+
+static void mlb_usio_start_tx(struct uart_port *port)
+{
+ u16 fcr = readw(port->membase + MLB_USIO_REG_FCR);
+
+ writew(fcr | MLB_USIO_FCR_FTIE, port->membase + MLB_USIO_REG_FCR);
+ if (!(fcr & MLB_USIO_FCR_FDRQ))
+ return;
+
+ writeb(readb(port->membase + MLB_USIO_REG_SCR) | MLB_USIO_SCR_TBIE,
+ port->membase + MLB_USIO_REG_SCR);
+
+ if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
+ mlb_usio_tx_chars(port);
+}
+
+static void mlb_usio_stop_rx(struct uart_port *port)
+{
+ writeb(readb(port->membase + MLB_USIO_REG_SCR) & ~MLB_USIO_SCR_RIE,
+ port->membase + MLB_USIO_REG_SCR);
+}
+
+static void mlb_usio_enable_ms(struct uart_port *port)
+{
+ writeb(readb(port->membase + MLB_USIO_REG_SCR) |
+ MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE,
+ port->membase + MLB_USIO_REG_SCR);
+}
+
+static void mlb_usio_rx_chars(struct uart_port *port)
+{
+ struct tty_port *ttyport = &port->state->port;
+ unsigned long flag = 0;
+ char ch = 0;
+ u8 status;
+ int max_count = 2;
+
+ while (max_count--) {
+ status = readb(port->membase + MLB_USIO_REG_SSR);
+
+ if (!(status & MLB_USIO_SSR_RDRF))
+ break;
+
+ if (!(status & (MLB_USIO_SSR_ORE | MLB_USIO_SSR_FRE |
+ MLB_USIO_SSR_PE))) {
+ ch = readw(port->membase + MLB_USIO_REG_DR);
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+ uart_insert_char(port, status, MLB_USIO_SSR_ORE,
+ ch, flag);
+ continue;
+ }
+ if (status & MLB_USIO_SSR_PE)
+ port->icount.parity++;
+ if (status & MLB_USIO_SSR_ORE)
+ port->icount.overrun++;
+ status &= port->read_status_mask;
+ if (status & MLB_USIO_SSR_BRK) {
+ flag = TTY_BREAK;
+ ch = 0;
+ } else
+ if (status & MLB_USIO_SSR_PE) {
+ flag = TTY_PARITY;
+ ch = 0;
+ } else
+ if (status & MLB_USIO_SSR_FRE) {
+ flag = TTY_FRAME;
+ ch = 0;
+ }
+ if (flag)
+ uart_insert_char(port, status, MLB_USIO_SSR_ORE,
+ ch, flag);
+
+ writeb(readb(port->membase + MLB_USIO_REG_SSR) |
+ MLB_USIO_SSR_REC,
+ port->membase + MLB_USIO_REG_SSR);
+
+ max_count = readw(port->membase + MLB_USIO_REG_FBYTE) >> 8;
+ writew(readw(port->membase + MLB_USIO_REG_FCR) |
+ MLB_USIO_FCR_FE2 | MLB_USIO_FCR_FRIIE,
+ port->membase + MLB_USIO_REG_FCR);
+ }
+
+ tty_flip_buffer_push(ttyport);
+}
+
+static irqreturn_t mlb_usio_rx_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+
+ spin_lock(&port->lock);
+ mlb_usio_rx_chars(port);
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mlb_usio_tx_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+
+ spin_lock(&port->lock);
+ if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
+ mlb_usio_tx_chars(port);
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int mlb_usio_tx_empty(struct uart_port *port)
+{
+ return (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI) ?
+ TIOCSER_TEMT : 0;
+}
+
+static void mlb_usio_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static unsigned int mlb_usio_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+
+}
+
+static void mlb_usio_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+static int mlb_usio_startup(struct uart_port *port)
+{
+ const char *portname = to_platform_device(port->dev)->name;
+ unsigned long flags;
+ int ret, index = port->line;
+ unsigned char escr;
+
+ ret = request_irq(mlb_usio_irq[index][RX], mlb_usio_rx_irq,
+ 0, portname, port);
+ if (ret)
+ return ret;
+ ret = request_irq(mlb_usio_irq[index][TX], mlb_usio_tx_irq,
+ 0, portname, port);
+ if (ret) {
+ free_irq(mlb_usio_irq[index][RX], port);
+ return ret;
+ }
+
+ escr = readb(port->membase + MLB_USIO_REG_ESCR);
+ if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
+ escr |= MLB_USIO_ESCR_FLWEN;
+ spin_lock_irqsave(&port->lock, flags);
+ writeb(0, port->membase + MLB_USIO_REG_SCR);
+ writeb(escr, port->membase + MLB_USIO_REG_ESCR);
+ writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
+ writeb(MLB_USIO_SSR_REC, port->membase + MLB_USIO_REG_SSR);
+ writew(0, port->membase + MLB_USIO_REG_FCR);
+ writew(MLB_USIO_FCR_FCL1 | MLB_USIO_FCR_FCL2,
+ port->membase + MLB_USIO_REG_FCR);
+ writew(MLB_USIO_FCR_FE1 | MLB_USIO_FCR_FE2 | MLB_USIO_FCR_FRIIE,
+ port->membase + MLB_USIO_REG_FCR);
+ writew(0, port->membase + MLB_USIO_REG_FBYTE);
+ writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
+
+ writeb(MLB_USIO_SCR_TXE | MLB_USIO_SCR_RIE | MLB_USIO_SCR_TBIE |
+ MLB_USIO_SCR_RXE, port->membase + MLB_USIO_REG_SCR);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void mlb_usio_shutdown(struct uart_port *port)
+{
+ int index = port->line;
+
+ free_irq(mlb_usio_irq[index][RX], port);
+ free_irq(mlb_usio_irq[index][TX], port);
+}
+
+static void mlb_usio_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ unsigned int escr, smr = MLB_USIO_SMR_SOE;
+ unsigned long flags, baud, quot;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ escr = MLB_USIO_ESCR_L_5BIT;
+ break;
+ case CS6:
+ escr = MLB_USIO_ESCR_L_6BIT;
+ break;
+ case CS7:
+ escr = MLB_USIO_ESCR_L_7BIT;
+ break;
+ case CS8:
+ default:
+ escr = MLB_USIO_ESCR_L_8BIT;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ smr |= MLB_USIO_SMR_SBL;
+
+ if (termios->c_cflag & PARENB) {
+ escr |= MLB_USIO_ESCR_PEN;
+ if (termios->c_cflag & PARODD)
+ escr |= MLB_USIO_ESCR_P;
+ }
+ /* Set hard flow control */
+ if (of_property_read_bool(port->dev->of_node, "auto-flow-control") ||
+ (termios->c_cflag & CRTSCTS))
+ escr |= MLB_USIO_ESCR_FLWEN;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
+ if (baud > 1)
+ quot = port->uartclk / baud - 1;
+ else
+ quot = 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ port->read_status_mask = MLB_USIO_SSR_ORE | MLB_USIO_SSR_RDRF |
+ MLB_USIO_SSR_TDRE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= MLB_USIO_SSR_FRE | MLB_USIO_SSR_PE;
+
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= MLB_USIO_SSR_FRE | MLB_USIO_SSR_PE;
+ if ((termios->c_iflag & IGNBRK) && (termios->c_iflag & IGNPAR))
+ port->ignore_status_mask |= MLB_USIO_SSR_ORE;
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= MLB_USIO_SSR_RDRF;
+
+ writeb(0, port->membase + MLB_USIO_REG_SCR);
+ writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
+ writeb(MLB_USIO_SSR_REC, port->membase + MLB_USIO_REG_SSR);
+ writew(0, port->membase + MLB_USIO_REG_FCR);
+ writeb(smr, port->membase + MLB_USIO_REG_SMR);
+ writeb(escr, port->membase + MLB_USIO_REG_ESCR);
+ writew(quot, port->membase + MLB_USIO_REG_BGR);
+ writew(0, port->membase + MLB_USIO_REG_FCR);
+ writew(MLB_USIO_FCR_FCL1 | MLB_USIO_FCR_FCL2 | MLB_USIO_FCR_FE1 |
+ MLB_USIO_FCR_FE2 | MLB_USIO_FCR_FRIIE,
+ port->membase + MLB_USIO_REG_FCR);
+ writew(0, port->membase + MLB_USIO_REG_FBYTE);
+ writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
+ writeb(MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE | MLB_USIO_SCR_TBIE |
+ MLB_USIO_SCR_TXE, port->membase + MLB_USIO_REG_SCR);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *mlb_usio_type(struct uart_port *port)
+{
+ return ((port->type == PORT_MLB_USIO) ? USIO_NAME : NULL);
+}
+
+static void mlb_usio_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_MLB_USIO;
+}
+
+static const struct uart_ops mlb_usio_ops = {
+ .tx_empty = mlb_usio_tx_empty,
+ .set_mctrl = mlb_usio_set_mctrl,
+ .get_mctrl = mlb_usio_get_mctrl,
+ .stop_tx = mlb_usio_stop_tx,
+ .start_tx = mlb_usio_start_tx,
+ .stop_rx = mlb_usio_stop_rx,
+ .enable_ms = mlb_usio_enable_ms,
+ .break_ctl = mlb_usio_break_ctl,
+ .startup = mlb_usio_startup,
+ .shutdown = mlb_usio_shutdown,
+ .set_termios = mlb_usio_set_termios,
+ .type = mlb_usio_type,
+ .config_port = mlb_usio_config_port,
+};
+
+#ifdef CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE
+
+static void mlb_usio_console_putchar(struct uart_port *port, int c)
+{
+ while (!(readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TDRE))
+ cpu_relax();
+
+ writew(c, port->membase + MLB_USIO_REG_DR);
+}
+
+static void mlb_usio_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &mlb_usio_ports[co->index];
+
+ uart_console_write(port, s, count, mlb_usio_console_putchar);
+}
+
+static int __init mlb_usio_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 115200;
+ int parity = 'n';
+ int flow = 'n';
+ int bits = 8;
+
+ if (co->index >= CONFIG_SERIAL_MILBEAUT_USIO_PORTS)
+ return -ENODEV;
+
+ port = &mlb_usio_ports[co->index];
+ if (!port->membase)
+ return -ENODEV;
+
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
+ flow = 'r';
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+
+static struct uart_driver mlb_usio_uart_driver;
+static struct console mlb_usio_console = {
+ .name = USIO_UART_DEV_NAME,
+ .write = mlb_usio_console_write,
+ .device = uart_console_device,
+ .setup = mlb_usio_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &mlb_usio_uart_driver,
+};
+
+static int __init mlb_usio_console_init(void)
+{
+ register_console(&mlb_usio_console);
+ return 0;
+}
+console_initcall(mlb_usio_console_init);
+
+
+static void mlb_usio_early_console_write(struct console *co, const char *s,
+ u_int count)
+{
+ struct earlycon_device *dev = co->data;
+
+ uart_console_write(&dev->port, s, count, mlb_usio_console_putchar);
+}
+
+static int __init mlb_usio_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+ device->con->write = mlb_usio_early_console_write;
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(mlb_usio, "socionext,milbeaut-usio-uart",
+ mlb_usio_early_console_setup);
+
+#define USIO_CONSOLE (&mlb_usio_console)
+#else
+#define USIO_CONSOLE NULL
+#endif
+
+static struct uart_driver mlb_usio_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = USIO_NAME,
+ .dev_name = USIO_UART_DEV_NAME,
+ .cons = USIO_CONSOLE,
+ .nr = CONFIG_SERIAL_MILBEAUT_USIO_PORTS,
+};
+
+static int mlb_usio_probe(struct platform_device *pdev)
+{
+ struct clk *clk = devm_clk_get(&pdev->dev, NULL);
+ struct uart_port *port;
+ struct resource *res;
+ int index = 0;
+ int ret;
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Missing clock\n");
+ return PTR_ERR(clk);
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Clock enable failed: %d\n", ret);
+ return ret;
+ }
+ of_property_read_u32(pdev->dev.of_node, "index", &index);
+ port = &mlb_usio_ports[index];
+
+ port->private_data = (void *)clk;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Missing regs\n");
+ ret = -ENODEV;
+ goto failed;
+ }
+ port->membase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ ret = platform_get_irq_byname(pdev, "rx");
+ mlb_usio_irq[index][RX] = ret;
+
+ ret = platform_get_irq_byname(pdev, "tx");
+ mlb_usio_irq[index][TX] = ret;
+
+ port->irq = mlb_usio_irq[index][RX];
+ port->uartclk = clk_get_rate(clk);
+ port->fifosize = 128;
+ port->iotype = UPIO_MEM32;
+ port->flags = UPF_BOOT_AUTOCONF | UPF_SPD_VHI;
+ port->line = index;
+ port->ops = &mlb_usio_ops;
+ port->dev = &pdev->dev;
+
+ ret = uart_add_one_port(&mlb_usio_uart_driver, port);
+ if (ret) {
+ dev_err(&pdev->dev, "Adding port failed: %d\n", ret);
+ goto failed;
+ }
+ return 0;
+
+failed:
+ clk_disable_unprepare(clk);
+
+ return ret;
+}
+
+static int mlb_usio_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = &mlb_usio_ports[pdev->id];
+ struct clk *clk = port->private_data;
+
+ uart_remove_one_port(&mlb_usio_uart_driver, port);
+ clk_disable_unprepare(clk);
+
+ return 0;
+}
+
+static const struct of_device_id mlb_usio_dt_ids[] = {
+ { .compatible = "socionext,milbeaut-usio-uart" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mlb_usio_dt_ids);
+
+static struct platform_driver mlb_usio_driver = {
+ .probe = mlb_usio_probe,
+ .remove = mlb_usio_remove,
+ .driver = {
+ .name = USIO_NAME,
+ .of_match_table = mlb_usio_dt_ids,
+ },
+};
+
+static int __init mlb_usio_init(void)
+{
+ int ret = uart_register_driver(&mlb_usio_uart_driver);
+
+ if (ret) {
+ pr_err("%s: uart registration failed: %d\n", __func__, ret);
+ return ret;
+ }
+ ret = platform_driver_register(&mlb_usio_driver);
+ if (ret) {
+ uart_unregister_driver(&mlb_usio_uart_driver);
+ pr_err("%s: drv registration failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mlb_usio_exit(void)
+{
+ platform_driver_unregister(&mlb_usio_driver);
+ uart_unregister_driver(&mlb_usio_uart_driver);
+}
+
+module_init(mlb_usio_init);
+module_exit(mlb_usio_exit);
+
+MODULE_AUTHOR("SOCIONEXT");
+MODULE_DESCRIPTION("MILBEAUT_USIO/UART Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 09a183dfc526..7d3ae31cc720 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -14,9 +14,9 @@
#include <linux/device.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
@@ -1179,7 +1179,8 @@ static int sc16is7xx_probe(struct device *dev,
struct regmap *regmap, int irq, unsigned long flags)
{
struct sched_param sched_param = { .sched_priority = MAX_RT_PRIO / 2 };
- unsigned long freq, *pfreq = dev_get_platdata(dev);
+ unsigned long freq = 0, *pfreq = dev_get_platdata(dev);
+ u32 uartclk = 0;
int i, ret;
struct sc16is7xx_port *s;
@@ -1193,10 +1194,17 @@ static int sc16is7xx_probe(struct device *dev,
return -ENOMEM;
}
+ /* Always ask for fixed clock rate from a property. */
+ device_property_read_u32(dev, "clock-frequency", &uartclk);
+
s->clk = devm_clk_get(dev, NULL);
if (IS_ERR(s->clk)) {
+ if (uartclk)
+ freq = uartclk;
if (pfreq)
freq = *pfreq;
+ if (freq)
+ dev_dbg(dev, "Clock frequency: %luHz\n", freq);
else
return PTR_ERR(s->clk);
} else {
@@ -1384,13 +1392,9 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
return ret;
if (spi->dev.of_node) {
- const struct of_device_id *of_id =
- of_match_device(sc16is7xx_dt_ids, &spi->dev);
-
- if (!of_id)
+ devtype = device_get_match_data(&spi->dev);
+ if (!devtype)
return -ENODEV;
-
- devtype = (struct sc16is7xx_devtype *)of_id->data;
} else {
const struct spi_device_id *id_entry = spi_get_device_id(spi);
@@ -1426,7 +1430,7 @@ MODULE_DEVICE_TABLE(spi, sc16is7xx_spi_id_table);
static struct spi_driver sc16is7xx_spi_uart_driver = {
.driver = {
.name = SC16IS7XX_NAME,
- .of_match_table = of_match_ptr(sc16is7xx_dt_ids),
+ .of_match_table = sc16is7xx_dt_ids,
},
.probe = sc16is7xx_spi_probe,
.remove = sc16is7xx_spi_remove,
@@ -1445,13 +1449,9 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
struct regmap *regmap;
if (i2c->dev.of_node) {
- const struct of_device_id *of_id =
- of_match_device(sc16is7xx_dt_ids, &i2c->dev);
-
- if (!of_id)
+ devtype = device_get_match_data(&i2c->dev);
+ if (!devtype)
return -ENODEV;
-
- devtype = (struct sc16is7xx_devtype *)of_id->data;
} else {
devtype = (struct sc16is7xx_devtype *)id->driver_data;
flags = IRQF_TRIGGER_FALLING;
@@ -1484,7 +1484,7 @@ MODULE_DEVICE_TABLE(i2c, sc16is7xx_i2c_id_table);
static struct i2c_driver sc16is7xx_i2c_uart_driver = {
.driver = {
.name = SC16IS7XX_NAME,
- .of_match_table = of_match_ptr(sc16is7xx_dt_ids),
+ .of_match_table = sc16is7xx_dt_ids,
},
.probe = sc16is7xx_i2c_probe,
.remove = sc16is7xx_i2c_remove,
@@ -1520,11 +1520,13 @@ static int __init sc16is7xx_init(void)
#endif
return ret;
+#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
err_spi:
+#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
i2c_del_driver(&sc16is7xx_i2c_uart_driver);
-#endif
err_i2c:
+#endif
uart_unregister_driver(&sc16is7xx_uart);
return ret;
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 351843f847c0..83f4dd0bfd74 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -130,9 +130,6 @@ static void uart_start(struct tty_struct *tty)
struct uart_port *port;
unsigned long flags;
- if (!state)
- return;
-
port = uart_port_lock(state, flags);
__uart_start(tty);
uart_port_unlock(port, flags);
@@ -730,9 +727,6 @@ static void uart_unthrottle(struct tty_struct *tty)
upstat_t mask = UPSTAT_SYNC_FIFO;
struct uart_port *port;
- if (!state)
- return;
-
port = uart_port_ref(state);
if (!port)
return;
@@ -1514,7 +1508,7 @@ static void uart_set_termios(struct tty_struct *tty,
}
uart_change_speed(tty, state, old_termios);
- /* reload cflag from termios; port driver may have overriden flags */
+ /* reload cflag from termios; port driver may have overridden flags */
cflag = tty->termios.c_cflag;
/* Handle transition to B0 status */
@@ -1747,6 +1741,16 @@ static void uart_dtr_rts(struct tty_port *port, int raise)
uart_port_deref(uport);
}
+static int uart_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct uart_driver *drv = driver->driver_state;
+ struct uart_state *state = drv->state + tty->index;
+
+ tty->driver_data = state;
+
+ return tty_standard_install(driver, tty);
+}
+
/*
* Calls to uart_open are serialised by the tty_lock in
* drivers/tty/tty_io.c:tty_open()
@@ -1759,11 +1763,8 @@ static void uart_dtr_rts(struct tty_port *port, int raise)
*/
static int uart_open(struct tty_struct *tty, struct file *filp)
{
- struct uart_driver *drv = tty->driver->driver_state;
- int retval, line = tty->index;
- struct uart_state *state = drv->state + line;
-
- tty->driver_data = state;
+ struct uart_state *state = tty->driver_data;
+ int retval;
retval = tty_port_open(&state->port, tty, filp);
if (retval > 0)
@@ -2448,6 +2449,7 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
#endif
static const struct tty_operations uart_ops = {
+ .install = uart_install,
.open = uart_open,
.close = uart_close,
.write = uart_write,
@@ -2505,7 +2507,7 @@ static const struct tty_port_operations uart_port_ops = {
int uart_register_driver(struct uart_driver *drv)
{
struct tty_driver *normal;
- int i, retval;
+ int i, retval = -ENOMEM;
BUG_ON(drv->state);
@@ -2557,7 +2559,7 @@ int uart_register_driver(struct uart_driver *drv)
out_kfree:
kfree(drv->state);
out:
- return -ENOMEM;
+ return retval;
}
/**
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 1b4008d022bf..d22ccb32aa9b 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -248,7 +248,6 @@ static void serial_txx9_initialize(struct uart_port *port)
sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
/* TX4925 BUG WORKAROUND. Accessing SIOC register
* immediately after soft reset causes bus error. */
- mmiowb();
udelay(1);
while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout)
udelay(1);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 2d1c626312cd..3cd139752d3f 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2512,14 +2512,16 @@ done:
* center of the last stop bit in sampling clocks.
*/
int last_stop = bits * 2 - 1;
- int deviation = min_err * srr * last_stop / 2 / baud;
+ int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
+ (int)(srr + 1),
+ 2 * (int)baud);
if (abs(deviation) >= 2) {
/* At least two sampling clocks off at the
* last stop bit; we can increase the error
* margin by shifting the sampling point.
*/
- int shift = min(-8, max(7, deviation / 2));
+ int shift = clamp(deviation / 2, -8, 7);
hssrr |= (shift << HSCIF_SRHP_SHIFT) &
HSCIF_SRHP_MASK;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
new file mode 100644
index 000000000000..be4687814353
--- /dev/null
+++ b/drivers/tty/serial/sifive.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SiFive UART driver
+ * Copyright (C) 2018 Paul Walmsley <paul@pwsan.com>
+ * Copyright (C) 2018-2019 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Based partially on:
+ * - drivers/tty/serial/pxa.c
+ * - drivers/tty/serial/amba-pl011.c
+ * - drivers/tty/serial/uartlite.c
+ * - drivers/tty/serial/omap-serial.c
+ * - drivers/pwm/pwm-sifive.c
+ *
+ * See the following sources for further documentation:
+ * - Chapter 19 "Universal Asynchronous Receiver/Transmitter (UART)" of
+ * SiFive FE310-G000 v2p3
+ * - The tree/master/src/main/scala/devices/uart directory of
+ * https://github.com/sifive/sifive-blocks/
+ *
+ * The SiFive UART design is not 8250-compatible. The following common
+ * features are not supported:
+ * - Word lengths other than 8 bits
+ * - Break handling
+ * - Parity
+ * - Flow control
+ * - Modem signals (DSR, RI, etc.)
+ * On the other hand, the design is free from the baggage of the 8250
+ * programming model.
+ */
+
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+/*
+ * Register offsets
+ */
+
+/* TXDATA */
+#define SIFIVE_SERIAL_TXDATA_OFFS 0x0
+#define SIFIVE_SERIAL_TXDATA_FULL_SHIFT 31
+#define SIFIVE_SERIAL_TXDATA_FULL_MASK (1 << SIFIVE_SERIAL_TXDATA_FULL_SHIFT)
+#define SIFIVE_SERIAL_TXDATA_DATA_SHIFT 0
+#define SIFIVE_SERIAL_TXDATA_DATA_MASK (0xff << SIFIVE_SERIAL_TXDATA_DATA_SHIFT)
+
+/* RXDATA */
+#define SIFIVE_SERIAL_RXDATA_OFFS 0x4
+#define SIFIVE_SERIAL_RXDATA_EMPTY_SHIFT 31
+#define SIFIVE_SERIAL_RXDATA_EMPTY_MASK (1 << SIFIVE_SERIAL_RXDATA_EMPTY_SHIFT)
+#define SIFIVE_SERIAL_RXDATA_DATA_SHIFT 0
+#define SIFIVE_SERIAL_RXDATA_DATA_MASK (0xff << SIFIVE_SERIAL_RXDATA_DATA_SHIFT)
+
+/* TXCTRL */
+#define SIFIVE_SERIAL_TXCTRL_OFFS 0x8
+#define SIFIVE_SERIAL_TXCTRL_TXCNT_SHIFT 16
+#define SIFIVE_SERIAL_TXCTRL_TXCNT_MASK (0x7 << SIFIVE_SERIAL_TXCTRL_TXCNT_SHIFT)
+#define SIFIVE_SERIAL_TXCTRL_NSTOP_SHIFT 1
+#define SIFIVE_SERIAL_TXCTRL_NSTOP_MASK (1 << SIFIVE_SERIAL_TXCTRL_NSTOP_SHIFT)
+#define SIFIVE_SERIAL_TXCTRL_TXEN_SHIFT 0
+#define SIFIVE_SERIAL_TXCTRL_TXEN_MASK (1 << SIFIVE_SERIAL_TXCTRL_TXEN_SHIFT)
+
+/* RXCTRL */
+#define SIFIVE_SERIAL_RXCTRL_OFFS 0xC
+#define SIFIVE_SERIAL_RXCTRL_RXCNT_SHIFT 16
+#define SIFIVE_SERIAL_RXCTRL_RXCNT_MASK (0x7 << SIFIVE_SERIAL_TXCTRL_TXCNT_SHIFT)
+#define SIFIVE_SERIAL_RXCTRL_RXEN_SHIFT 0
+#define SIFIVE_SERIAL_RXCTRL_RXEN_MASK (1 << SIFIVE_SERIAL_RXCTRL_RXEN_SHIFT)
+
+/* IE */
+#define SIFIVE_SERIAL_IE_OFFS 0x10
+#define SIFIVE_SERIAL_IE_RXWM_SHIFT 1
+#define SIFIVE_SERIAL_IE_RXWM_MASK (1 << SIFIVE_SERIAL_IE_RXWM_SHIFT)
+#define SIFIVE_SERIAL_IE_TXWM_SHIFT 0
+#define SIFIVE_SERIAL_IE_TXWM_MASK (1 << SIFIVE_SERIAL_IE_TXWM_SHIFT)
+
+/* IP */
+#define SIFIVE_SERIAL_IP_OFFS 0x14
+#define SIFIVE_SERIAL_IP_RXWM_SHIFT 1
+#define SIFIVE_SERIAL_IP_RXWM_MASK (1 << SIFIVE_SERIAL_IP_RXWM_SHIFT)
+#define SIFIVE_SERIAL_IP_TXWM_SHIFT 0
+#define SIFIVE_SERIAL_IP_TXWM_MASK (1 << SIFIVE_SERIAL_IP_TXWM_SHIFT)
+
+/* DIV */
+#define SIFIVE_SERIAL_DIV_OFFS 0x18
+#define SIFIVE_SERIAL_DIV_DIV_SHIFT 0
+#define SIFIVE_SERIAL_DIV_DIV_MASK (0xffff << SIFIVE_SERIAL_IP_DIV_SHIFT)
+
+/*
+ * Config macros
+ */
+
+/*
+ * SIFIVE_SERIAL_MAX_PORTS: maximum number of UARTs on a device that can
+ * host a serial console
+ */
+#define SIFIVE_SERIAL_MAX_PORTS 8
+
+/*
+ * SIFIVE_DEFAULT_BAUD_RATE: default baud rate that the driver should
+ * configure itself to use
+ */
+#define SIFIVE_DEFAULT_BAUD_RATE 115200
+
+/* SIFIVE_SERIAL_NAME: our driver's name that we pass to the operating system */
+#define SIFIVE_SERIAL_NAME "sifive-serial"
+
+/* SIFIVE_TTY_PREFIX: tty name prefix for SiFive serial ports */
+#define SIFIVE_TTY_PREFIX "ttySIF"
+
+/* SIFIVE_TX_FIFO_DEPTH: depth of the TX FIFO (in bytes) */
+#define SIFIVE_TX_FIFO_DEPTH 8
+
+/* SIFIVE_RX_FIFO_DEPTH: depth of the TX FIFO (in bytes) */
+#define SIFIVE_RX_FIFO_DEPTH 8
+
+#if (SIFIVE_TX_FIFO_DEPTH != SIFIVE_RX_FIFO_DEPTH)
+#error Driver does not support configurations with different TX, RX FIFO sizes
+#endif
+
+/*
+ *
+ */
+
+/**
+ * sifive_serial_port - driver-specific data extension to struct uart_port
+ * @port: struct uart_port embedded in this struct
+ * @dev: struct device *
+ * @ier: shadowed copy of the interrupt enable register
+ * @clkin_rate: input clock to the UART IP block.
+ * @baud_rate: UART serial line rate (e.g., 115200 baud)
+ * @clk_notifier: clock rate change notifier for upstream clock changes
+ *
+ * Configuration data specific to this SiFive UART.
+ */
+struct sifive_serial_port {
+ struct uart_port port;
+ struct device *dev;
+ unsigned char ier;
+ unsigned long clkin_rate;
+ unsigned long baud_rate;
+ struct clk *clk;
+ struct notifier_block clk_notifier;
+};
+
+/*
+ * Structure container-of macros
+ */
+
+#define port_to_sifive_serial_port(p) (container_of((p), \
+ struct sifive_serial_port, \
+ port))
+
+#define notifier_to_sifive_serial_port(nb) (container_of((nb), \
+ struct sifive_serial_port, \
+ clk_notifier))
+
+/*
+ * Forward declarations
+ */
+static void sifive_serial_stop_tx(struct uart_port *port);
+
+/*
+ * Internal functions
+ */
+
+/**
+ * __ssp_early_writel() - write to a SiFive serial port register (early)
+ * @port: pointer to a struct uart_port record
+ * @offs: register address offset from the IP block base address
+ * @v: value to write to the register
+ *
+ * Given a pointer @port to a struct uart_port record, write the value
+ * @v to the IP block register address offset @offs. This function is
+ * intended for early console use.
+ *
+ * Context: Intended to be used only by the earlyconsole code.
+ */
+static void __ssp_early_writel(u32 v, u16 offs, struct uart_port *port)
+{
+ writel_relaxed(v, port->membase + offs);
+}
+
+/**
+ * __ssp_early_readl() - read from a SiFive serial port register (early)
+ * @port: pointer to a struct uart_port record
+ * @offs: register address offset from the IP block base address
+ *
+ * Given a pointer @port to a struct uart_port record, read the
+ * contents of the IP block register located at offset @offs from the
+ * IP block base and return it. This function is intended for early
+ * console use.
+ *
+ * Context: Intended to be called only by the earlyconsole code or by
+ * __ssp_readl() or __ssp_writel() (in this driver)
+ *
+ * Returns: the register value read from the UART.
+ */
+static u32 __ssp_early_readl(struct uart_port *port, u16 offs)
+{
+ return readl_relaxed(port->membase + offs);
+}
+
+/**
+ * __ssp_writel() - write to a SiFive serial port register
+ * @v: value to write to the register
+ * @offs: register address offset from the IP block base address
+ * @ssp: pointer to a struct sifive_serial_port record
+ *
+ * Write the value @v to the IP block register located at offset @offs from the
+ * IP block base, given a pointer @ssp to a struct sifive_serial_port record.
+ *
+ * Context: Any context.
+ */
+static void __ssp_writel(u32 v, u16 offs, struct sifive_serial_port *ssp)
+{
+ __ssp_early_writel(v, offs, &ssp->port);
+}
+
+/**
+ * __ssp_readl() - read from a SiFive serial port register
+ * @ssp: pointer to a struct sifive_serial_port record
+ * @offs: register address offset from the IP block base address
+ *
+ * Read the contents of the IP block register located at offset @offs from the
+ * IP block base, given a pointer @ssp to a struct sifive_serial_port record.
+ *
+ * Context: Any context.
+ *
+ * Returns: the value of the UART register
+ */
+static u32 __ssp_readl(struct sifive_serial_port *ssp, u16 offs)
+{
+ return __ssp_early_readl(&ssp->port, offs);
+}
+
+/**
+ * sifive_serial_is_txfifo_full() - is the TXFIFO full?
+ * @ssp: pointer to a struct sifive_serial_port
+ *
+ * Read the transmit FIFO "full" bit, returning a non-zero value if the
+ * TX FIFO is full, or zero if space remains. Intended to be used to prevent
+ * writes to the TX FIFO when it's full.
+ *
+ * Returns: SIFIVE_SERIAL_TXDATA_FULL_MASK (non-zero) if the transmit FIFO
+ * is full, or 0 if space remains.
+ */
+static int sifive_serial_is_txfifo_full(struct sifive_serial_port *ssp)
+{
+ return __ssp_readl(ssp, SIFIVE_SERIAL_TXDATA_OFFS) &
+ SIFIVE_SERIAL_TXDATA_FULL_MASK;
+}
+
+/**
+ * __ssp_transmit_char() - enqueue a byte to transmit onto the TX FIFO
+ * @ssp: pointer to a struct sifive_serial_port
+ * @ch: character to transmit
+ *
+ * Enqueue a byte @ch onto the transmit FIFO, given a pointer @ssp to the
+ * struct sifive_serial_port * to transmit on. Caller should first check to
+ * ensure that the TXFIFO has space; see sifive_serial_is_txfifo_full().
+ *
+ * Context: Any context.
+ */
+static void __ssp_transmit_char(struct sifive_serial_port *ssp, int ch)
+{
+ __ssp_writel(ch, SIFIVE_SERIAL_TXDATA_OFFS, ssp);
+}
+
+/**
+ * __ssp_transmit_chars() - enqueue multiple bytes onto the TX FIFO
+ * @ssp: pointer to a struct sifive_serial_port
+ *
+ * Transfer up to a TX FIFO size's worth of characters from the Linux serial
+ * transmit buffer to the SiFive UART TX FIFO.
+ *
+ * Context: Any context. Expects @ssp->port.lock to be held by caller.
+ */
+static void __ssp_transmit_chars(struct sifive_serial_port *ssp)
+{
+ struct circ_buf *xmit = &ssp->port.state->xmit;
+ int count;
+
+ if (ssp->port.x_char) {
+ __ssp_transmit_char(ssp, ssp->port.x_char);
+ ssp->port.icount.tx++;
+ ssp->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&ssp->port)) {
+ sifive_serial_stop_tx(&ssp->port);
+ return;
+ }
+ count = SIFIVE_TX_FIFO_DEPTH;
+ do {
+ __ssp_transmit_char(ssp, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ ssp->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&ssp->port);
+
+ if (uart_circ_empty(xmit))
+ sifive_serial_stop_tx(&ssp->port);
+}
+
+/**
+ * __ssp_enable_txwm() - enable transmit watermark interrupts
+ * @ssp: pointer to a struct sifive_serial_port
+ *
+ * Enable interrupt generation when the transmit FIFO watermark is reached
+ * on the SiFive UART referred to by @ssp.
+ */
+static void __ssp_enable_txwm(struct sifive_serial_port *ssp)
+{
+ if (ssp->ier & SIFIVE_SERIAL_IE_TXWM_MASK)
+ return;
+
+ ssp->ier |= SIFIVE_SERIAL_IE_TXWM_MASK;
+ __ssp_writel(ssp->ier, SIFIVE_SERIAL_IE_OFFS, ssp);
+}
+
+/**
+ * __ssp_enable_rxwm() - enable receive watermark interrupts
+ * @ssp: pointer to a struct sifive_serial_port
+ *
+ * Enable interrupt generation when the receive FIFO watermark is reached
+ * on the SiFive UART referred to by @ssp.
+ */
+static void __ssp_enable_rxwm(struct sifive_serial_port *ssp)
+{
+ if (ssp->ier & SIFIVE_SERIAL_IE_RXWM_MASK)
+ return;
+
+ ssp->ier |= SIFIVE_SERIAL_IE_RXWM_MASK;
+ __ssp_writel(ssp->ier, SIFIVE_SERIAL_IE_OFFS, ssp);
+}
+
+/**
+ * __ssp_disable_txwm() - disable transmit watermark interrupts
+ * @ssp: pointer to a struct sifive_serial_port
+ *
+ * Disable interrupt generation when the transmit FIFO watermark is reached
+ * on the UART referred to by @ssp.
+ */
+static void __ssp_disable_txwm(struct sifive_serial_port *ssp)
+{
+ if (!(ssp->ier & SIFIVE_SERIAL_IE_TXWM_MASK))
+ return;
+
+ ssp->ier &= ~SIFIVE_SERIAL_IE_TXWM_MASK;
+ __ssp_writel(ssp->ier, SIFIVE_SERIAL_IE_OFFS, ssp);
+}
+
+/**
+ * __ssp_disable_rxwm() - disable receive watermark interrupts
+ * @ssp: pointer to a struct sifive_serial_port
+ *
+ * Disable interrupt generation when the receive FIFO watermark is reached
+ * on the UART referred to by @ssp.
+ */
+static void __ssp_disable_rxwm(struct sifive_serial_port *ssp)
+{
+ if (!(ssp->ier & SIFIVE_SERIAL_IE_RXWM_MASK))
+ return;
+
+ ssp->ier &= ~SIFIVE_SERIAL_IE_RXWM_MASK;
+ __ssp_writel(ssp->ier, SIFIVE_SERIAL_IE_OFFS, ssp);
+}
+
+/**
+ * __ssp_receive_char() - receive a byte from the UART
+ * @ssp: pointer to a struct sifive_serial_port
+ * @is_empty: char pointer to return whether the RX FIFO is empty
+ *
+ * Try to read a byte from the SiFive UART RX FIFO, referenced by
+ * @ssp, and to return it. Also returns the RX FIFO empty bit in
+ * the char pointed to by @ch. The caller must pass the byte back to the
+ * Linux serial layer if needed.
+ *
+ * Returns: the byte read from the UART RX FIFO.
+ */
+static char __ssp_receive_char(struct sifive_serial_port *ssp, char *is_empty)
+{
+ u32 v;
+ u8 ch;
+
+ v = __ssp_readl(ssp, SIFIVE_SERIAL_RXDATA_OFFS);
+
+ if (!is_empty)
+ WARN_ON(1);
+ else
+ *is_empty = (v & SIFIVE_SERIAL_RXDATA_EMPTY_MASK) >>
+ SIFIVE_SERIAL_RXDATA_EMPTY_SHIFT;
+
+ ch = (v & SIFIVE_SERIAL_RXDATA_DATA_MASK) >>
+ SIFIVE_SERIAL_RXDATA_DATA_SHIFT;
+
+ return ch;
+}
+
+/**
+ * __ssp_receive_chars() - receive multiple bytes from the UART
+ * @ssp: pointer to a struct sifive_serial_port
+ *
+ * Receive up to an RX FIFO's worth of bytes from the SiFive UART referred
+ * to by @ssp and pass them up to the Linux serial layer.
+ *
+ * Context: Expects ssp->port.lock to be held by caller.
+ */
+static void __ssp_receive_chars(struct sifive_serial_port *ssp)
+{
+ unsigned char ch;
+ char is_empty;
+ int c;
+
+ for (c = SIFIVE_RX_FIFO_DEPTH; c > 0; --c) {
+ ch = __ssp_receive_char(ssp, &is_empty);
+ if (is_empty)
+ break;
+
+ ssp->port.icount.rx++;
+ uart_insert_char(&ssp->port, 0, 0, ch, TTY_NORMAL);
+ }
+
+ spin_unlock(&ssp->port.lock);
+ tty_flip_buffer_push(&ssp->port.state->port);
+ spin_lock(&ssp->port.lock);
+}
+
+/**
+ * __ssp_update_div() - calculate the divisor setting by the line rate
+ * @ssp: pointer to a struct sifive_serial_port
+ *
+ * Calculate the appropriate value of the clock divisor for the UART
+ * and target line rate referred to by @ssp and write it into the
+ * hardware.
+ */
+static void __ssp_update_div(struct sifive_serial_port *ssp)
+{
+ u16 div;
+
+ div = DIV_ROUND_UP(ssp->clkin_rate, ssp->baud_rate) - 1;
+
+ __ssp_writel(div, SIFIVE_SERIAL_DIV_OFFS, ssp);
+}
+
+/**
+ * __ssp_update_baud_rate() - set the UART "baud rate"
+ * @ssp: pointer to a struct sifive_serial_port
+ * @rate: new target bit rate
+ *
+ * Calculate the UART divisor value for the target bit rate @rate for the
+ * SiFive UART described by @ssp and program it into the UART. There may
+ * be some error between the target bit rate and the actual bit rate implemented
+ * by the UART due to clock ratio granularity.
+ */
+static void __ssp_update_baud_rate(struct sifive_serial_port *ssp,
+ unsigned int rate)
+{
+ if (ssp->baud_rate == rate)
+ return;
+
+ ssp->baud_rate = rate;
+ __ssp_update_div(ssp);
+}
+
+/**
+ * __ssp_set_stop_bits() - set the number of stop bits
+ * @ssp: pointer to a struct sifive_serial_port
+ * @nstop: 1 or 2 (stop bits)
+ *
+ * Program the SiFive UART referred to by @ssp to use @nstop stop bits.
+ */
+static void __ssp_set_stop_bits(struct sifive_serial_port *ssp, char nstop)
+{
+ u32 v;
+
+ if (nstop < 1 || nstop > 2) {
+ WARN_ON(1);
+ return;
+ }
+
+ v = __ssp_readl(ssp, SIFIVE_SERIAL_TXCTRL_OFFS);
+ v &= ~SIFIVE_SERIAL_TXCTRL_NSTOP_MASK;
+ v |= (nstop - 1) << SIFIVE_SERIAL_TXCTRL_NSTOP_SHIFT;
+ __ssp_writel(v, SIFIVE_SERIAL_TXCTRL_OFFS, ssp);
+}
+
+/**
+ * __ssp_wait_for_xmitr() - wait for an empty slot on the TX FIFO
+ * @ssp: pointer to a struct sifive_serial_port
+ *
+ * Delay while the UART TX FIFO referred to by @ssp is marked as full.
+ *
+ * Context: Any context.
+ */
+static void __maybe_unused __ssp_wait_for_xmitr(struct sifive_serial_port *ssp)
+{
+ while (sifive_serial_is_txfifo_full(ssp))
+ udelay(1); /* XXX Could probably be more intelligent here */
+}
+
+/*
+ * Linux serial API functions
+ */
+
+static void sifive_serial_stop_tx(struct uart_port *port)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+
+ __ssp_disable_txwm(ssp);
+}
+
+static void sifive_serial_stop_rx(struct uart_port *port)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+
+ __ssp_disable_rxwm(ssp);
+}
+
+static void sifive_serial_start_tx(struct uart_port *port)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+
+ __ssp_enable_txwm(ssp);
+}
+
+static irqreturn_t sifive_serial_irq(int irq, void *dev_id)
+{
+ struct sifive_serial_port *ssp = dev_id;
+ u32 ip;
+
+ spin_lock(&ssp->port.lock);
+
+ ip = __ssp_readl(ssp, SIFIVE_SERIAL_IP_OFFS);
+ if (!ip) {
+ spin_unlock(&ssp->port.lock);
+ return IRQ_NONE;
+ }
+
+ if (ip & SIFIVE_SERIAL_IP_RXWM_MASK)
+ __ssp_receive_chars(ssp);
+ if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
+ __ssp_transmit_chars(ssp);
+
+ spin_unlock(&ssp->port.lock);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int sifive_serial_tx_empty(struct uart_port *port)
+{
+ return TIOCSER_TEMT;
+}
+
+static unsigned int sifive_serial_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR;
+}
+
+static void sifive_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* IP block does not support these signals */
+}
+
+static void sifive_serial_break_ctl(struct uart_port *port, int break_state)
+{
+ /* IP block does not support sending a break */
+}
+
+static int sifive_serial_startup(struct uart_port *port)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+
+ __ssp_enable_rxwm(ssp);
+
+ return 0;
+}
+
+static void sifive_serial_shutdown(struct uart_port *port)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+
+ __ssp_disable_rxwm(ssp);
+ __ssp_disable_txwm(ssp);
+}
+
+/**
+ * sifive_serial_clk_notifier() - clock post-rate-change notifier
+ * @nb: pointer to the struct notifier_block, from the notifier code
+ * @event: event mask from the notifier code
+ * @data: pointer to the struct clk_notifier_data from the notifier code
+ *
+ * On the V0 SoC, the UART IP block is derived from the CPU clock source
+ * after a synchronous divide-by-two divider, so any CPU clock rate change
+ * requires the UART baud rate to be updated. This presumably could corrupt any
+ * serial word currently being transmitted or received. It would probably
+ * be better to stop receives and transmits, then complete the baud rate
+ * change, then re-enable them.
+ */
+static int sifive_serial_clk_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *cnd = data;
+ struct sifive_serial_port *ssp = notifier_to_sifive_serial_port(nb);
+
+ if (event == POST_RATE_CHANGE && ssp->clkin_rate != cnd->new_rate) {
+ ssp->clkin_rate = cnd->new_rate;
+ __ssp_update_div(ssp);
+ }
+
+ return NOTIFY_OK;
+}
+
+static void sifive_serial_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+ unsigned long flags;
+ u32 v, old_v;
+ int rate;
+ char nstop;
+
+ if ((termios->c_cflag & CSIZE) != CS8)
+ dev_err_once(ssp->port.dev, "only 8-bit words supported\n");
+ if (termios->c_iflag & (INPCK | PARMRK))
+ dev_err_once(ssp->port.dev, "parity checking not supported\n");
+ if (termios->c_iflag & BRKINT)
+ dev_err_once(ssp->port.dev, "BREAK detection not supported\n");
+
+ /* Set number of stop bits */
+ nstop = (termios->c_cflag & CSTOPB) ? 2 : 1;
+ __ssp_set_stop_bits(ssp, nstop);
+
+ /* Set line rate */
+ rate = uart_get_baud_rate(port, termios, old, 0, ssp->clkin_rate / 16);
+ __ssp_update_baud_rate(ssp, rate);
+
+ spin_lock_irqsave(&ssp->port.lock, flags);
+
+ /* Update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, rate);
+
+ ssp->port.read_status_mask = 0;
+
+ /* Ignore all characters if CREAD is not set */
+ v = __ssp_readl(ssp, SIFIVE_SERIAL_RXCTRL_OFFS);
+ old_v = v;
+ if ((termios->c_cflag & CREAD) == 0)
+ v &= SIFIVE_SERIAL_RXCTRL_RXEN_MASK;
+ else
+ v |= SIFIVE_SERIAL_RXCTRL_RXEN_MASK;
+ if (v != old_v)
+ __ssp_writel(v, SIFIVE_SERIAL_RXCTRL_OFFS, ssp);
+
+ spin_unlock_irqrestore(&ssp->port.lock, flags);
+}
+
+static void sifive_serial_release_port(struct uart_port *port)
+{
+}
+
+static int sifive_serial_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void sifive_serial_config_port(struct uart_port *port, int flags)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+
+ ssp->port.type = PORT_SIFIVE_V0;
+}
+
+static int sifive_serial_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+static const char *sifive_serial_type(struct uart_port *port)
+{
+ return port->type == PORT_SIFIVE_V0 ? "SiFive UART v0" : NULL;
+}
+
+/*
+ * Early console support
+ */
+
+#ifdef CONFIG_SERIAL_EARLYCON
+static void early_sifive_serial_putc(struct uart_port *port, int c)
+{
+ while (__ssp_early_readl(port, SIFIVE_SERIAL_TXDATA_OFFS) &
+ SIFIVE_SERIAL_TXDATA_FULL_MASK)
+ cpu_relax();
+
+ __ssp_early_writel(c, SIFIVE_SERIAL_TXDATA_OFFS, port);
+}
+
+static void early_sifive_serial_write(struct console *con, const char *s,
+ unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+ struct uart_port *port = &dev->port;
+
+ uart_console_write(port, s, n, early_sifive_serial_putc);
+}
+
+static int __init early_sifive_serial_setup(struct earlycon_device *dev,
+ const char *options)
+{
+ struct uart_port *port = &dev->port;
+
+ if (!port->membase)
+ return -ENODEV;
+
+ dev->con->write = early_sifive_serial_write;
+
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(sifive, "sifive,uart0", early_sifive_serial_setup);
+OF_EARLYCON_DECLARE(sifive, "sifive,fu540-c000-uart0",
+ early_sifive_serial_setup);
+#endif /* CONFIG_SERIAL_EARLYCON */
+
+/*
+ * Linux console interface
+ */
+
+#ifdef CONFIG_SERIAL_SIFIVE_CONSOLE
+
+static struct sifive_serial_port *sifive_serial_console_ports[SIFIVE_SERIAL_MAX_PORTS];
+
+static void sifive_serial_console_putchar(struct uart_port *port, int ch)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+
+ __ssp_wait_for_xmitr(ssp);
+ __ssp_transmit_char(ssp, ch);
+}
+
+static void sifive_serial_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct sifive_serial_port *ssp = sifive_serial_console_ports[co->index];
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ if (!ssp)
+ return;
+
+ local_irq_save(flags);
+ if (ssp->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&ssp->port.lock);
+ else
+ spin_lock(&ssp->port.lock);
+
+ ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
+ __ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
+
+ uart_console_write(&ssp->port, s, count, sifive_serial_console_putchar);
+
+ __ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
+
+ if (locked)
+ spin_unlock(&ssp->port.lock);
+ local_irq_restore(flags);
+}
+
+static int __init sifive_serial_console_setup(struct console *co, char *options)
+{
+ struct sifive_serial_port *ssp;
+ int baud = SIFIVE_DEFAULT_BAUD_RATE;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= SIFIVE_SERIAL_MAX_PORTS)
+ return -ENODEV;
+
+ ssp = sifive_serial_console_ports[co->index];
+ if (!ssp)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&ssp->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver sifive_serial_uart_driver;
+
+static struct console sifive_serial_console = {
+ .name = SIFIVE_TTY_PREFIX,
+ .write = sifive_serial_console_write,
+ .device = uart_console_device,
+ .setup = sifive_serial_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sifive_serial_uart_driver,
+};
+
+static int __init sifive_console_init(void)
+{
+ register_console(&sifive_serial_console);
+ return 0;
+}
+
+console_initcall(sifive_console_init);
+
+static void __ssp_add_console_port(struct sifive_serial_port *ssp)
+{
+ sifive_serial_console_ports[ssp->port.line] = ssp;
+}
+
+static void __ssp_remove_console_port(struct sifive_serial_port *ssp)
+{
+ sifive_serial_console_ports[ssp->port.line] = 0;
+}
+
+#define SIFIVE_SERIAL_CONSOLE (&sifive_serial_console)
+
+#else
+
+#define SIFIVE_SERIAL_CONSOLE NULL
+
+static void __ssp_add_console_port(struct sifive_serial_port *ssp)
+{}
+static void __ssp_remove_console_port(struct sifive_serial_port *ssp)
+{}
+
+#endif
+
+static const struct uart_ops sifive_serial_uops = {
+ .tx_empty = sifive_serial_tx_empty,
+ .set_mctrl = sifive_serial_set_mctrl,
+ .get_mctrl = sifive_serial_get_mctrl,
+ .stop_tx = sifive_serial_stop_tx,
+ .start_tx = sifive_serial_start_tx,
+ .stop_rx = sifive_serial_stop_rx,
+ .break_ctl = sifive_serial_break_ctl,
+ .startup = sifive_serial_startup,
+ .shutdown = sifive_serial_shutdown,
+ .set_termios = sifive_serial_set_termios,
+ .type = sifive_serial_type,
+ .release_port = sifive_serial_release_port,
+ .request_port = sifive_serial_request_port,
+ .config_port = sifive_serial_config_port,
+ .verify_port = sifive_serial_verify_port,
+};
+
+static struct uart_driver sifive_serial_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = SIFIVE_SERIAL_NAME,
+ .dev_name = SIFIVE_TTY_PREFIX,
+ .nr = SIFIVE_SERIAL_MAX_PORTS,
+ .cons = SIFIVE_SERIAL_CONSOLE,
+};
+
+static int sifive_serial_probe(struct platform_device *pdev)
+{
+ struct sifive_serial_port *ssp;
+ struct resource *mem;
+ struct clk *clk;
+ void __iomem *base;
+ int irq, id, r;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "could not acquire interrupt\n");
+ return -EPROBE_DEFER;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev, "could not acquire device memory\n");
+ return PTR_ERR(base);
+ }
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to find controller clock\n");
+ return PTR_ERR(clk);
+ }
+
+ id = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (id < 0) {
+ dev_err(&pdev->dev, "missing aliases entry\n");
+ return id;
+ }
+
+#ifdef CONFIG_SERIAL_SIFIVE_CONSOLE
+ if (id > SIFIVE_SERIAL_MAX_PORTS) {
+ dev_err(&pdev->dev, "too many UARTs (%d)\n", id);
+ return -EINVAL;
+ }
+#endif
+
+ ssp = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL);
+ if (!ssp)
+ return -ENOMEM;
+
+ ssp->port.dev = &pdev->dev;
+ ssp->port.type = PORT_SIFIVE_V0;
+ ssp->port.iotype = UPIO_MEM;
+ ssp->port.irq = irq;
+ ssp->port.fifosize = SIFIVE_TX_FIFO_DEPTH;
+ ssp->port.ops = &sifive_serial_uops;
+ ssp->port.line = id;
+ ssp->port.mapbase = mem->start;
+ ssp->port.membase = base;
+ ssp->dev = &pdev->dev;
+ ssp->clk = clk;
+ ssp->clk_notifier.notifier_call = sifive_serial_clk_notifier;
+
+ r = clk_notifier_register(ssp->clk, &ssp->clk_notifier);
+ if (r) {
+ dev_err(&pdev->dev, "could not register clock notifier: %d\n",
+ r);
+ goto probe_out1;
+ }
+
+ /* Set up clock divider */
+ ssp->clkin_rate = clk_get_rate(ssp->clk);
+ ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
+ __ssp_update_div(ssp);
+
+ platform_set_drvdata(pdev, ssp);
+
+ /* Enable transmits and set the watermark level to 1 */
+ __ssp_writel((1 << SIFIVE_SERIAL_TXCTRL_TXCNT_SHIFT) |
+ SIFIVE_SERIAL_TXCTRL_TXEN_MASK,
+ SIFIVE_SERIAL_TXCTRL_OFFS, ssp);
+
+ /* Enable receives and set the watermark level to 0 */
+ __ssp_writel((0 << SIFIVE_SERIAL_RXCTRL_RXCNT_SHIFT) |
+ SIFIVE_SERIAL_RXCTRL_RXEN_MASK,
+ SIFIVE_SERIAL_RXCTRL_OFFS, ssp);
+
+ r = request_irq(ssp->port.irq, sifive_serial_irq, ssp->port.irqflags,
+ dev_name(&pdev->dev), ssp);
+ if (r) {
+ dev_err(&pdev->dev, "could not attach interrupt: %d\n", r);
+ goto probe_out2;
+ }
+
+ __ssp_add_console_port(ssp);
+
+ r = uart_add_one_port(&sifive_serial_uart_driver, &ssp->port);
+ if (r != 0) {
+ dev_err(&pdev->dev, "could not add uart: %d\n", r);
+ goto probe_out3;
+ }
+
+ return 0;
+
+probe_out3:
+ __ssp_remove_console_port(ssp);
+ free_irq(ssp->port.irq, ssp);
+probe_out2:
+ clk_notifier_unregister(ssp->clk, &ssp->clk_notifier);
+probe_out1:
+ return r;
+}
+
+static int sifive_serial_remove(struct platform_device *dev)
+{
+ struct sifive_serial_port *ssp = platform_get_drvdata(dev);
+
+ __ssp_remove_console_port(ssp);
+ uart_remove_one_port(&sifive_serial_uart_driver, &ssp->port);
+ free_irq(ssp->port.irq, ssp);
+ clk_notifier_unregister(ssp->clk, &ssp->clk_notifier);
+
+ return 0;
+}
+
+static const struct of_device_id sifive_serial_of_match[] = {
+ { .compatible = "sifive,fu540-c000-uart0" },
+ { .compatible = "sifive,uart0" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sifive_serial_of_match);
+
+static struct platform_driver sifive_serial_platform_driver = {
+ .probe = sifive_serial_probe,
+ .remove = sifive_serial_remove,
+ .driver = {
+ .name = SIFIVE_SERIAL_NAME,
+ .of_match_table = of_match_ptr(sifive_serial_of_match),
+ },
+};
+
+static int __init sifive_serial_init(void)
+{
+ int r;
+
+ r = uart_register_driver(&sifive_serial_uart_driver);
+ if (r)
+ goto init_out1;
+
+ r = platform_driver_register(&sifive_serial_platform_driver);
+ if (r)
+ goto init_out2;
+
+ return 0;
+
+init_out2:
+ uart_unregister_driver(&sifive_serial_uart_driver);
+init_out1:
+ return r;
+}
+
+static void __exit sifive_serial_exit(void)
+{
+ platform_driver_unregister(&sifive_serial_platform_driver);
+ uart_unregister_driver(&sifive_serial_uart_driver);
+}
+
+module_init(sifive_serial_init);
+module_exit(sifive_serial_exit);
+
+MODULE_DESCRIPTION("SiFive UART serial driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul Walmsley <paul@pwsan.com>");
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index fe9170731c16..283493358a62 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* C-Brick Serial Port (and console) driver for SGI Altix machines.
*
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 1891a45ac05d..73d71a4e6c0c 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -10,6 +10,9 @@
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/sprd-dma.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
@@ -75,6 +78,7 @@
/* control register 1 */
#define SPRD_CTL1 0x001C
+#define SPRD_DMA_EN BIT(15)
#define RX_HW_FLOW_CTL_THLD BIT(6)
#define RX_HW_FLOW_CTL_EN BIT(7)
#define TX_HW_FLOW_CTL_EN BIT(8)
@@ -86,6 +90,7 @@
#define THLD_TX_EMPTY 0x40
#define THLD_TX_EMPTY_SHIFT 8
#define THLD_RX_FULL 0x40
+#define THLD_RX_FULL_MASK GENMASK(6, 0)
/* config baud rate register */
#define SPRD_CLKD0 0x0024
@@ -100,15 +105,38 @@
#define SPRD_IMSR_TX_FIFO_EMPTY BIT(1)
#define SPRD_IMSR_BREAK_DETECT BIT(7)
#define SPRD_IMSR_TIMEOUT BIT(13)
+#define SPRD_DEFAULT_SOURCE_CLK 26000000
+
+#define SPRD_RX_DMA_STEP 1
+#define SPRD_RX_FIFO_FULL 1
+#define SPRD_TX_FIFO_FULL 0x20
+#define SPRD_UART_RX_SIZE (UART_XMIT_SIZE / 4)
+
+struct sprd_uart_dma {
+ struct dma_chan *chn;
+ unsigned char *virt;
+ dma_addr_t phys_addr;
+ dma_cookie_t cookie;
+ u32 trans_len;
+ bool enable;
+};
struct sprd_uart_port {
struct uart_port port;
char name[16];
+ struct clk *clk;
+ struct sprd_uart_dma tx_dma;
+ struct sprd_uart_dma rx_dma;
+ dma_addr_t pos;
+ unsigned char *rx_buf_tail;
};
static struct sprd_uart_port *sprd_port[UART_NR_MAX];
static int sprd_ports_num;
+static int sprd_start_dma_rx(struct uart_port *port);
+static int sprd_tx_dma_config(struct uart_port *port);
+
static inline unsigned int serial_in(struct uart_port *port,
unsigned int offset)
{
@@ -139,45 +167,389 @@ static void sprd_set_mctrl(struct uart_port *port, unsigned int mctrl)
/* nothing to do */
}
-static void sprd_stop_tx(struct uart_port *port)
+static void sprd_stop_rx(struct uart_port *port)
{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
unsigned int ien, iclr;
+ if (sp->rx_dma.enable)
+ dmaengine_terminate_all(sp->rx_dma.chn);
+
iclr = serial_in(port, SPRD_ICLR);
ien = serial_in(port, SPRD_IEN);
- iclr |= SPRD_IEN_TX_EMPTY;
- ien &= ~SPRD_IEN_TX_EMPTY;
+ ien &= ~(SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT);
+ iclr |= SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT;
- serial_out(port, SPRD_ICLR, iclr);
serial_out(port, SPRD_IEN, ien);
+ serial_out(port, SPRD_ICLR, iclr);
}
-static void sprd_start_tx(struct uart_port *port)
+static void sprd_uart_dma_enable(struct uart_port *port, bool enable)
{
- unsigned int ien;
+ u32 val = serial_in(port, SPRD_CTL1);
- ien = serial_in(port, SPRD_IEN);
- if (!(ien & SPRD_IEN_TX_EMPTY)) {
- ien |= SPRD_IEN_TX_EMPTY;
- serial_out(port, SPRD_IEN, ien);
+ if (enable)
+ val |= SPRD_DMA_EN;
+ else
+ val &= ~SPRD_DMA_EN;
+
+ serial_out(port, SPRD_CTL1, val);
+}
+
+static void sprd_stop_tx_dma(struct uart_port *port)
+{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ struct circ_buf *xmit = &port->state->xmit;
+ struct dma_tx_state state;
+ u32 trans_len;
+
+ dmaengine_pause(sp->tx_dma.chn);
+
+ dmaengine_tx_status(sp->tx_dma.chn, sp->tx_dma.cookie, &state);
+ if (state.residue) {
+ trans_len = state.residue - sp->tx_dma.phys_addr;
+ xmit->tail = (xmit->tail + trans_len) & (UART_XMIT_SIZE - 1);
+ port->icount.tx += trans_len;
+ dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
+ sp->tx_dma.trans_len, DMA_TO_DEVICE);
}
+
+ dmaengine_terminate_all(sp->tx_dma.chn);
+ sp->tx_dma.trans_len = 0;
}
-static void sprd_stop_rx(struct uart_port *port)
+static int sprd_tx_buf_remap(struct uart_port *port)
+{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ struct circ_buf *xmit = &port->state->xmit;
+
+ sp->tx_dma.trans_len =
+ CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ sp->tx_dma.phys_addr = dma_map_single(port->dev,
+ (void *)&(xmit->buf[xmit->tail]),
+ sp->tx_dma.trans_len,
+ DMA_TO_DEVICE);
+ return dma_mapping_error(port->dev, sp->tx_dma.phys_addr);
+}
+
+static void sprd_complete_tx_dma(void *data)
+{
+ struct uart_port *port = (struct uart_port *)data;
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
+ sp->tx_dma.trans_len, DMA_TO_DEVICE);
+
+ xmit->tail = (xmit->tail + sp->tx_dma.trans_len) & (UART_XMIT_SIZE - 1);
+ port->icount.tx += sp->tx_dma.trans_len;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit) || sprd_tx_buf_remap(port) ||
+ sprd_tx_dma_config(port))
+ sp->tx_dma.trans_len = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int sprd_uart_dma_submit(struct uart_port *port,
+ struct sprd_uart_dma *ud, u32 trans_len,
+ enum dma_transfer_direction direction,
+ dma_async_tx_callback callback)
+{
+ struct dma_async_tx_descriptor *dma_des;
+ unsigned long flags;
+
+ flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE,
+ SPRD_DMA_NO_TRG,
+ SPRD_DMA_FRAG_REQ,
+ SPRD_DMA_TRANS_INT);
+
+ dma_des = dmaengine_prep_slave_single(ud->chn, ud->phys_addr, trans_len,
+ direction, flags);
+ if (!dma_des)
+ return -ENODEV;
+
+ dma_des->callback = callback;
+ dma_des->callback_param = port;
+
+ ud->cookie = dmaengine_submit(dma_des);
+ if (dma_submit_error(ud->cookie))
+ return dma_submit_error(ud->cookie);
+
+ dma_async_issue_pending(ud->chn);
+
+ return 0;
+}
+
+static int sprd_tx_dma_config(struct uart_port *port)
+{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ u32 burst = sp->tx_dma.trans_len > SPRD_TX_FIFO_FULL ?
+ SPRD_TX_FIFO_FULL : sp->tx_dma.trans_len;
+ int ret;
+ struct dma_slave_config cfg = {
+ .dst_addr = port->mapbase + SPRD_TXD,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_maxburst = burst,
+ };
+
+ ret = dmaengine_slave_config(sp->tx_dma.chn, &cfg);
+ if (ret < 0)
+ return ret;
+
+ return sprd_uart_dma_submit(port, &sp->tx_dma, sp->tx_dma.trans_len,
+ DMA_MEM_TO_DEV, sprd_complete_tx_dma);
+}
+
+static void sprd_start_tx_dma(struct uart_port *port)
+{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ serial_out(port, SPRD_TXD, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ sprd_stop_tx_dma(port);
+ return;
+ }
+
+ if (sp->tx_dma.trans_len)
+ return;
+
+ if (sprd_tx_buf_remap(port) || sprd_tx_dma_config(port))
+ sp->tx_dma.trans_len = 0;
+}
+
+static void sprd_rx_full_thld(struct uart_port *port, u32 thld)
+{
+ u32 val = serial_in(port, SPRD_CTL2);
+
+ val &= ~THLD_RX_FULL_MASK;
+ val |= thld & THLD_RX_FULL_MASK;
+ serial_out(port, SPRD_CTL2, val);
+}
+
+static int sprd_rx_alloc_buf(struct sprd_uart_port *sp)
+{
+ sp->rx_dma.virt = dma_alloc_coherent(sp->port.dev, SPRD_UART_RX_SIZE,
+ &sp->rx_dma.phys_addr, GFP_KERNEL);
+ if (!sp->rx_dma.virt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sprd_rx_free_buf(struct sprd_uart_port *sp)
+{
+ if (sp->rx_dma.virt)
+ dma_free_coherent(sp->port.dev, SPRD_UART_RX_SIZE,
+ sp->rx_dma.virt, sp->rx_dma.phys_addr);
+
+}
+
+static int sprd_rx_dma_config(struct uart_port *port, u32 burst)
+{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ struct dma_slave_config cfg = {
+ .src_addr = port->mapbase + SPRD_RXD,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_maxburst = burst,
+ };
+
+ return dmaengine_slave_config(sp->rx_dma.chn, &cfg);
+}
+
+static void sprd_uart_dma_rx(struct uart_port *port)
+{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ struct tty_port *tty = &port->state->port;
+
+ port->icount.rx += sp->rx_dma.trans_len;
+ tty_insert_flip_string(tty, sp->rx_buf_tail, sp->rx_dma.trans_len);
+ tty_flip_buffer_push(tty);
+}
+
+static void sprd_uart_dma_irq(struct uart_port *port)
+{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(sp->rx_dma.chn,
+ sp->rx_dma.cookie, &state);
+ if (status == DMA_ERROR)
+ sprd_stop_rx(port);
+
+ if (!state.residue && sp->pos == sp->rx_dma.phys_addr)
+ return;
+
+ if (!state.residue) {
+ sp->rx_dma.trans_len = SPRD_UART_RX_SIZE +
+ sp->rx_dma.phys_addr - sp->pos;
+ sp->pos = sp->rx_dma.phys_addr;
+ } else {
+ sp->rx_dma.trans_len = state.residue - sp->pos;
+ sp->pos = state.residue;
+ }
+
+ sprd_uart_dma_rx(port);
+ sp->rx_buf_tail += sp->rx_dma.trans_len;
+}
+
+static void sprd_complete_rx_dma(void *data)
+{
+ struct uart_port *port = (struct uart_port *)data;
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ status = dmaengine_tx_status(sp->rx_dma.chn,
+ sp->rx_dma.cookie, &state);
+ if (status != DMA_COMPLETE) {
+ sprd_stop_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ if (sp->pos != sp->rx_dma.phys_addr) {
+ sp->rx_dma.trans_len = SPRD_UART_RX_SIZE +
+ sp->rx_dma.phys_addr - sp->pos;
+ sprd_uart_dma_rx(port);
+ sp->rx_buf_tail += sp->rx_dma.trans_len;
+ }
+
+ if (sprd_start_dma_rx(port))
+ sprd_stop_rx(port);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int sprd_start_dma_rx(struct uart_port *port)
+{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+ int ret;
+
+ if (!sp->rx_dma.enable)
+ return 0;
+
+ sp->pos = sp->rx_dma.phys_addr;
+ sp->rx_buf_tail = sp->rx_dma.virt;
+ sprd_rx_full_thld(port, SPRD_RX_FIFO_FULL);
+ ret = sprd_rx_dma_config(port, SPRD_RX_DMA_STEP);
+ if (ret)
+ return ret;
+
+ return sprd_uart_dma_submit(port, &sp->rx_dma, SPRD_UART_RX_SIZE,
+ DMA_DEV_TO_MEM, sprd_complete_rx_dma);
+}
+
+static void sprd_release_dma(struct uart_port *port)
+{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+
+ sprd_uart_dma_enable(port, false);
+
+ if (sp->rx_dma.enable)
+ dma_release_channel(sp->rx_dma.chn);
+
+ if (sp->tx_dma.enable)
+ dma_release_channel(sp->tx_dma.chn);
+
+ sp->tx_dma.enable = false;
+ sp->rx_dma.enable = false;
+}
+
+static void sprd_request_dma(struct uart_port *port)
{
+ struct sprd_uart_port *sp =
+ container_of(port, struct sprd_uart_port, port);
+
+ sp->tx_dma.enable = true;
+ sp->rx_dma.enable = true;
+
+ sp->tx_dma.chn = dma_request_chan(port->dev, "tx");
+ if (IS_ERR(sp->tx_dma.chn)) {
+ dev_err(port->dev, "request TX DMA channel failed, ret = %ld\n",
+ PTR_ERR(sp->tx_dma.chn));
+ sp->tx_dma.enable = false;
+ }
+
+ sp->rx_dma.chn = dma_request_chan(port->dev, "rx");
+ if (IS_ERR(sp->rx_dma.chn)) {
+ dev_err(port->dev, "request RX DMA channel failed, ret = %ld\n",
+ PTR_ERR(sp->rx_dma.chn));
+ sp->rx_dma.enable = false;
+ }
+}
+
+static void sprd_stop_tx(struct uart_port *port)
+{
+ struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port,
+ port);
unsigned int ien, iclr;
+ if (sp->tx_dma.enable) {
+ sprd_stop_tx_dma(port);
+ return;
+ }
+
iclr = serial_in(port, SPRD_ICLR);
ien = serial_in(port, SPRD_IEN);
- ien &= ~(SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT);
- iclr |= SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT;
+ iclr |= SPRD_IEN_TX_EMPTY;
+ ien &= ~SPRD_IEN_TX_EMPTY;
serial_out(port, SPRD_IEN, ien);
serial_out(port, SPRD_ICLR, iclr);
}
+static void sprd_start_tx(struct uart_port *port)
+{
+ struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port,
+ port);
+ unsigned int ien;
+
+ if (sp->tx_dma.enable) {
+ sprd_start_tx_dma(port);
+ return;
+ }
+
+ ien = serial_in(port, SPRD_IEN);
+ if (!(ien & SPRD_IEN_TX_EMPTY)) {
+ ien |= SPRD_IEN_TX_EMPTY;
+ serial_out(port, SPRD_IEN, ien);
+ }
+}
+
/* The Sprd serial does not support this function. */
static void sprd_break_ctl(struct uart_port *port, int break_state)
{
@@ -218,9 +590,16 @@ static int handle_lsr_errors(struct uart_port *port,
static inline void sprd_rx(struct uart_port *port)
{
+ struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port,
+ port);
struct tty_port *tty = &port->state->port;
unsigned int ch, flag, lsr, max_count = SPRD_TIMEOUT;
+ if (sp->rx_dma.enable) {
+ sprd_uart_dma_irq(port);
+ return;
+ }
+
while ((serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK) &&
max_count--) {
lsr = serial_in(port, SPRD_LSR);
@@ -304,6 +683,25 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void sprd_uart_dma_startup(struct uart_port *port,
+ struct sprd_uart_port *sp)
+{
+ int ret;
+
+ sprd_request_dma(port);
+ if (!(sp->rx_dma.enable || sp->tx_dma.enable))
+ return;
+
+ ret = sprd_start_dma_rx(port);
+ if (ret) {
+ sp->rx_dma.enable = false;
+ dma_release_channel(sp->rx_dma.chn);
+ dev_warn(port->dev, "fail to start RX dma mode\n");
+ }
+
+ sprd_uart_dma_enable(port, true);
+}
+
static int sprd_startup(struct uart_port *port)
{
int ret = 0;
@@ -332,6 +730,9 @@ static int sprd_startup(struct uart_port *port)
/* allocate irq */
sp = container_of(port, struct sprd_uart_port, port);
snprintf(sp->name, sizeof(sp->name), "sprd_serial%d", port->line);
+
+ sprd_uart_dma_startup(port, sp);
+
ret = devm_request_irq(port->dev, port->irq, sprd_handle_irq,
IRQF_SHARED, sp->name, port);
if (ret) {
@@ -346,7 +747,9 @@ static int sprd_startup(struct uart_port *port)
/* enable interrupt */
spin_lock_irqsave(&port->lock, flags);
ien = serial_in(port, SPRD_IEN);
- ien |= SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
+ ien |= SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
+ if (!sp->rx_dma.enable)
+ ien |= SPRD_IEN_RX_FULL;
serial_out(port, SPRD_IEN, ien);
spin_unlock_irqrestore(&port->lock, flags);
@@ -355,6 +758,7 @@ static int sprd_startup(struct uart_port *port)
static void sprd_shutdown(struct uart_port *port)
{
+ sprd_release_dma(port);
serial_out(port, SPRD_IEN, 0);
serial_out(port, SPRD_ICLR, ~0);
devm_free_irq(port->dev, port->irq, port);
@@ -491,6 +895,22 @@ static int sprd_verify_port(struct uart_port *port, struct serial_struct *ser)
return 0;
}
+static void sprd_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct sprd_uart_port *sup =
+ container_of(port, struct sprd_uart_port, port);
+
+ switch (state) {
+ case UART_PM_STATE_ON:
+ clk_prepare_enable(sup->clk);
+ break;
+ case UART_PM_STATE_OFF:
+ clk_disable_unprepare(sup->clk);
+ break;
+ }
+}
+
static const struct uart_ops serial_sprd_ops = {
.tx_empty = sprd_tx_empty,
.get_mctrl = sprd_get_mctrl,
@@ -507,6 +927,7 @@ static const struct uart_ops serial_sprd_ops = {
.request_port = sprd_request_port,
.config_port = sprd_config_port,
.verify_port = sprd_verify_port,
+ .pm = sprd_pm,
};
#ifdef CONFIG_SERIAL_SPRD_CONSOLE
@@ -668,6 +1089,43 @@ static int sprd_remove(struct platform_device *dev)
if (!sprd_ports_num)
uart_unregister_driver(&sprd_uart_driver);
+ sprd_rx_free_buf(sup);
+
+ return 0;
+}
+
+static int sprd_clk_init(struct uart_port *uport)
+{
+ struct clk *clk_uart, *clk_parent;
+ struct sprd_uart_port *u = sprd_port[uport->line];
+
+ clk_uart = devm_clk_get(uport->dev, "uart");
+ if (IS_ERR(clk_uart)) {
+ dev_warn(uport->dev, "uart%d can't get uart clock\n",
+ uport->line);
+ clk_uart = NULL;
+ }
+
+ clk_parent = devm_clk_get(uport->dev, "source");
+ if (IS_ERR(clk_parent)) {
+ dev_warn(uport->dev, "uart%d can't get source clock\n",
+ uport->line);
+ clk_parent = NULL;
+ }
+
+ if (!clk_uart || clk_set_parent(clk_uart, clk_parent))
+ uport->uartclk = SPRD_DEFAULT_SOURCE_CLK;
+ else
+ uport->uartclk = clk_get_rate(clk_uart);
+
+ u->clk = devm_clk_get(uport->dev, "enable");
+ if (IS_ERR(u->clk)) {
+ if (PTR_ERR(u->clk) != -EPROBE_DEFER)
+ dev_err(uport->dev, "uart%d can't get enable clock\n",
+ uport->line);
+ return PTR_ERR(u->clk);
+ }
+
return 0;
}
@@ -675,7 +1133,6 @@ static int sprd_probe(struct platform_device *pdev)
{
struct resource *res;
struct uart_port *up;
- struct clk *clk;
int irq;
int index;
int ret;
@@ -704,9 +1161,9 @@ static int sprd_probe(struct platform_device *pdev)
up->ops = &serial_sprd_ops;
up->flags = UPF_BOOT_AUTOCONF;
- clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR_OR_NULL(clk))
- up->uartclk = clk_get_rate(clk);
+ ret = sprd_clk_init(up);
+ if (ret)
+ return ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
up->membase = devm_ioremap_resource(&pdev->dev, res);
@@ -722,6 +1179,14 @@ static int sprd_probe(struct platform_device *pdev)
}
up->irq = irq;
+ /*
+ * Allocate one dma buffer to prepare for receive transfer, in case
+ * memory allocation failure at runtime.
+ */
+ ret = sprd_rx_alloc_buf(sprd_port[index]);
+ if (ret)
+ return ret;
+
if (!sprd_ports_num) {
ret = uart_register_driver(&sprd_uart_driver);
if (ret < 0) {
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 2b6376e6e5ad..6e3c66ab0e62 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1081,7 +1081,7 @@ static int qe_uart_verify_port(struct uart_port *port,
}
/* UART operations
*
- * Details on these functions can be found in Documentation/serial/driver
+ * Details on these functions can be found in Documentation/serial/driver.rst
*/
static const struct uart_ops qe_uart_pops = {
.tx_empty = qe_uart_tx_empty,
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 74089f5e5b53..605354fd60b1 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -193,6 +193,7 @@ struct cdns_uart {
int id;
struct notifier_block clk_rate_change_nb;
u32 quirks;
+ bool cts_override;
};
struct cdns_platform_data {
u32 quirks;
@@ -1000,6 +1001,11 @@ static void cdns_uart_config_port(struct uart_port *port, int flags)
*/
static unsigned int cdns_uart_get_mctrl(struct uart_port *port)
{
+ struct cdns_uart *cdns_uart_data = port->private_data;
+
+ if (cdns_uart_data->cts_override)
+ return 0;
+
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
}
@@ -1007,6 +1013,10 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
u32 val;
u32 mode_reg;
+ struct cdns_uart *cdns_uart_data = port->private_data;
+
+ if (cdns_uart_data->cts_override)
+ return;
val = readl(port->membase + CDNS_UART_MODEMCR);
mode_reg = readl(port->membase + CDNS_UART_MR);
@@ -1665,6 +1675,8 @@ static int cdns_uart_probe(struct platform_device *pdev)
console_port = NULL;
#endif
+ cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
+ "cts-override");
return 0;
err_out_pm_disable:
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index fa0ce7dd9e24..573b2055173c 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -208,7 +208,7 @@ static struct sysrq_key_op sysrq_showlocks_op = {
#endif
#ifdef CONFIG_SMP
-static DEFINE_SPINLOCK(show_lock);
+static DEFINE_RAW_SPINLOCK(show_lock);
static void showacpu(void *dummy)
{
@@ -218,10 +218,10 @@ static void showacpu(void *dummy)
if (idle_cpu(smp_processor_id()))
return;
- spin_lock_irqsave(&show_lock, flags);
+ raw_spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
show_stack(NULL, NULL);
- spin_unlock_irqrestore(&show_lock, flags);
+ raw_spin_unlock_irqrestore(&show_lock, flags);
}
static void sysrq_showregs_othercpus(struct work_struct *dummy)
@@ -527,8 +527,12 @@ void __handle_sysrq(int key, bool check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
+ int orig_suppress_printk;
int i;
+ orig_suppress_printk = suppress_printk;
+ suppress_printk = 0;
+
rcu_sysrq_start();
rcu_read_lock();
/*
@@ -574,6 +578,8 @@ void __handle_sysrq(int key, bool check_mask)
}
rcu_read_unlock();
rcu_sysrq_end();
+
+ suppress_printk = orig_suppress_printk;
}
void handle_sysrq(int key)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 5fa250157025..033ac7e6a70d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1173,7 +1173,7 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
* tty_init_termios - helper for termios setup
* @tty: the tty to set up
*
- * Initialise the termios structures for this tty. Thus runs under
+ * Initialise the termios structure for this tty. This runs under
* the tty_mutex currently so we can be relaxed about ordering.
*/
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index c4ecd66fafef..f8ed50a16848 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -44,7 +44,7 @@ int __tty_check_change(struct tty_struct *tty, int sig)
tty_pgrp = tty->pgrp;
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
- if (tty_pgrp && pgrp != tty->pgrp) {
+ if (tty_pgrp && pgrp != tty_pgrp) {
if (is_ignored(sig)) {
if (sig == SIGTTIN)
ret = -EIO;
@@ -313,7 +313,7 @@ void disassociate_ctty(int on_exit)
read_unlock(&tasklist_lock);
}
-/**
+/*
*
* no_tty - Ensure the current process does not have a controlling tty
*/
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index a9e12b3bc31d..044c3cbdcfa4 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
if (tty && C_HUPCL(tty))
tty_port_lower_dtr_rts(port);
- if (port->ops && port->ops->shutdown)
+ if (port->ops->shutdown)
port->ops->shutdown(port);
}
out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
*/
int tty_port_carrier_raised(struct tty_port *port)
{
- if (!port->ops || !port->ops->carrier_raised)
+ if (port->ops->carrier_raised == NULL)
return 1;
return port->ops->carrier_raised(port);
}
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
*/
void tty_port_raise_dtr_rts(struct tty_port *port)
{
- if (port->ops && port->ops->dtr_rts)
+ if (port->ops->dtr_rts)
port->ops->dtr_rts(port, 1);
}
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
*/
void tty_port_lower_dtr_rts(struct tty_port *port)
{
- if (port->ops && port->ops->dtr_rts)
+ if (port->ops->dtr_rts)
port->ops->dtr_rts(port, 0);
}
EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
if (!tty_port_initialized(port)) {
clear_bit(TTY_IO_ERROR, &tty->flags);
- if (port->ops && port->ops->activate) {
+ if (port->ops->activate) {
int retval = port->ops->activate(port, tty);
if (retval) {
mutex_unlock(&port->mutex);
diff --git a/drivers/tty/ttynull.c b/drivers/tty/ttynull.c
new file mode 100644
index 000000000000..17f05b7eb6d3
--- /dev/null
+++ b/drivers/tty/ttynull.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Axis Communications AB
+ *
+ * Based on ttyprintk.c:
+ * Copyright (C) 2010 Samo Pogacnik
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+
+static const struct tty_port_operations ttynull_port_ops;
+static struct tty_driver *ttynull_driver;
+static struct tty_port ttynull_port;
+
+static int ttynull_open(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_open(&ttynull_port, tty, filp);
+}
+
+static void ttynull_close(struct tty_struct *tty, struct file *filp)
+{
+ tty_port_close(&ttynull_port, tty, filp);
+}
+
+static void ttynull_hangup(struct tty_struct *tty)
+{
+ tty_port_hangup(&ttynull_port);
+}
+
+static int ttynull_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ return count;
+}
+
+static int ttynull_write_room(struct tty_struct *tty)
+{
+ return 65536;
+}
+
+static const struct tty_operations ttynull_ops = {
+ .open = ttynull_open,
+ .close = ttynull_close,
+ .hangup = ttynull_hangup,
+ .write = ttynull_write,
+ .write_room = ttynull_write_room,
+};
+
+static struct tty_driver *ttynull_device(struct console *c, int *index)
+{
+ *index = 0;
+ return ttynull_driver;
+}
+
+static struct console ttynull_console = {
+ .name = "ttynull",
+ .device = ttynull_device,
+};
+
+static int __init ttynull_init(void)
+{
+ struct tty_driver *driver;
+ int ret;
+
+ driver = tty_alloc_driver(1,
+ TTY_DRIVER_RESET_TERMIOS |
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_UNNUMBERED_NODE);
+ if (IS_ERR(driver))
+ return PTR_ERR(driver);
+
+ tty_port_init(&ttynull_port);
+ ttynull_port.ops = &ttynull_port_ops;
+
+ driver->driver_name = "ttynull";
+ driver->name = "ttynull";
+ driver->type = TTY_DRIVER_TYPE_CONSOLE;
+ driver->init_termios = tty_std_termios;
+ driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET;
+ tty_set_operations(driver, &ttynull_ops);
+ tty_port_link_device(&ttynull_port, driver, 0);
+
+ ret = tty_register_driver(driver);
+ if (ret < 0) {
+ put_tty_driver(driver);
+ tty_port_destroy(&ttynull_port);
+ return ret;
+ }
+
+ ttynull_driver = driver;
+ register_console(&ttynull_console);
+
+ return 0;
+}
+
+static void __exit ttynull_exit(void)
+{
+ unregister_console(&ttynull_console);
+ tty_unregister_driver(ttynull_driver);
+ put_tty_driver(ttynull_driver);
+ tty_port_destroy(&ttynull_port);
+}
+
+module_init(ttynull_init);
+module_exit(ttynull_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index 58b454c34560..d2a1e1228c82 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* vcc.c: sun4v virtual channel concentrator
*
* Copyright (C) 2017 Oracle. All rights reserved.
diff --git a/drivers/tty/vt/.gitignore b/drivers/tty/vt/.gitignore
index 83683a2d8e6a..9b38b85f9d9a 100644
--- a/drivers/tty/vt/.gitignore
+++ b/drivers/tty/vt/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
consolemap_deftbl.c
defkeymap.c
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 7c7ada0b3ea0..b28aa0d289f8 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -542,7 +542,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
if (!ct)
return 0;
- unilist = memdup_user(list, ct * sizeof(struct unipair));
+ unilist = vmemdup_user(list, ct * sizeof(struct unipair));
if (IS_ERR(unilist))
return PTR_ERR(unilist);
@@ -641,7 +641,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
out_unlock:
console_unlock();
- kfree(unilist);
+ kvfree(unilist);
return err;
}
@@ -743,7 +743,7 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni
struct uni_pagedir *p;
struct unipair *unilist;
- unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+ unilist = kvmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
if (!unilist)
return -ENOMEM;
@@ -775,7 +775,7 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni
if (copy_to_user(list, unilist, min(ect, ct) * sizeof(struct unipair)))
ret = -EFAULT;
put_user(ect, uct);
- kfree(unilist);
+ kvfree(unilist);
return ret ? ret : (ect <= ct) ? 0 : -ENOMEM;
}
diff --git a/drivers/tty/vt/cp437.uni b/drivers/tty/vt/cp437.uni
index bc6163484f62..a1991904c559 100644
--- a/drivers/tty/vt/cp437.uni
+++ b/drivers/tty/vt/cp437.uni
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Unicode table for IBM Codepage 437. Note that there are many more
# substitutions that could be conceived (for example, thick-line
diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
index d2208dfe3f67..c7095fb7d2d1 100644
--- a/drivers/tty/vt/defkeymap.c_shipped
+++ b/drivers/tty/vt/defkeymap.c_shipped
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Do not edit this file! It was automatically generated by */
/* loadkeys --mktable defkeymap.map > defkeymap.c */
diff --git a/drivers/tty/vt/defkeymap.map b/drivers/tty/vt/defkeymap.map
index 50b30cace261..37f1ac6ddfb9 100644
--- a/drivers/tty/vt/defkeymap.map
+++ b/drivers/tty/vt/defkeymap.map
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Default kernel keymap. This uses 7 modifier combinations.
keymaps 0-2,4-5,8,12
# Change the above line into
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 88312c6c92cc..515fc095e3b4 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -123,6 +123,7 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals);
static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock);
static DEFINE_SPINLOCK(led_lock);
+static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static bool dead_key_next;
@@ -1449,7 +1450,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
KBD_UNICODE, &param);
if (rc != NOTIFY_STOP)
if (down && !raw_mode)
- to_utf8(vc, keysym);
+ k_unicode(vc, keysym, !down);
return;
}
@@ -1990,11 +1991,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
char *p;
u_char *q;
u_char __user *up;
- int sz;
+ int sz, fnw_sz;
int delta;
char *first_free, *fj, *fnw;
int i, j, k;
int ret;
+ unsigned long flags;
if (!capable(CAP_SYS_TTY_CONFIG))
perm = 0;
@@ -2037,7 +2039,14 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
goto reterr;
}
+ fnw = NULL;
+ fnw_sz = 0;
+ /* race aginst other writers */
+ again:
+ spin_lock_irqsave(&func_buf_lock, flags);
q = func_table[i];
+
+ /* fj pointer to next entry after 'q' */
first_free = funcbufptr + (funcbufsize - funcbufleft);
for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
;
@@ -2045,10 +2054,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
fj = func_table[j];
else
fj = first_free;
-
+ /* buffer usage increase by new entry */
delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
+
if (delta <= funcbufleft) { /* it fits in current buf */
if (j < MAX_NR_FUNC) {
+ /* make enough space for new entry at 'fj' */
memmove(fj + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++)
if (func_table[k])
@@ -2061,20 +2072,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
sz = 256;
while (sz < funcbufsize - funcbufleft + delta)
sz <<= 1;
- fnw = kmalloc(sz, GFP_KERNEL);
- if(!fnw) {
- ret = -ENOMEM;
- goto reterr;
+ if (fnw_sz != sz) {
+ spin_unlock_irqrestore(&func_buf_lock, flags);
+ kfree(fnw);
+ fnw = kmalloc(sz, GFP_KERNEL);
+ fnw_sz = sz;
+ if (!fnw) {
+ ret = -ENOMEM;
+ goto reterr;
+ }
+ goto again;
}
if (!q)
func_table[i] = fj;
+ /* copy data before insertion point to new location */
if (fj > funcbufptr)
memmove(fnw, funcbufptr, fj - funcbufptr);
for (k = 0; k < j; k++)
if (func_table[k])
func_table[k] = fnw + (func_table[k] - funcbufptr);
+ /* copy data after insertion point to new location */
if (first_free > fj) {
memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++)
@@ -2087,7 +2106,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
funcbufleft = funcbufleft - delta + sz - funcbufsize;
funcbufsize = sz;
}
+ /* finally insert item itself */
strcpy(func_table[i], kbs->kb_string);
+ spin_unlock_irqrestore(&func_buf_lock, flags);
break;
}
ret = 0;
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 07496c711d7d..78732feaf65b 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -2,7 +2,9 @@
/*
* This module exports the functions:
*
- * 'int set_selection(struct tiocl_selection __user *, struct tty_struct *)'
+ * 'int set_selection_user(struct tiocl_selection __user *,
+ * struct tty_struct *)'
+ * 'int set_selection_kernel(struct tiocl_selection *, struct tty_struct *)'
* 'void clear_selection(void)'
* 'int paste_selection(struct tty_struct *)'
* 'int sel_loadlut(char __user *)'
@@ -80,6 +82,7 @@ void clear_selection(void)
sel_start = -1;
}
}
+EXPORT_SYMBOL_GPL(clear_selection);
/*
* User settable table: what characters are to be considered alphabetic?
@@ -154,7 +157,7 @@ static int store_utf8(u32 c, char *p)
}
/**
- * set_selection - set the current selection.
+ * set_selection_user - set the current selection.
* @sel: user selection info
* @tty: the console tty
*
@@ -163,35 +166,44 @@ static int store_utf8(u32 c, char *p)
* The entire selection process is managed under the console_lock. It's
* a lot under the lock but its hardly a performance path
*/
-int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
+int set_selection_user(const struct tiocl_selection __user *sel,
+ struct tty_struct *tty)
+{
+ struct tiocl_selection v;
+
+ if (copy_from_user(&v, sel, sizeof(*sel)))
+ return -EFAULT;
+
+ return set_selection_kernel(&v, tty);
+}
+
+int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
{
struct vc_data *vc = vc_cons[fg_console].d;
int new_sel_start, new_sel_end, spc;
- struct tiocl_selection v;
char *bp, *obp;
int i, ps, pe, multiplier;
u32 c;
int mode;
poke_blanked_console();
- if (copy_from_user(&v, sel, sizeof(*sel)))
- return -EFAULT;
- v.xs = min_t(u16, v.xs - 1, vc->vc_cols - 1);
- v.ys = min_t(u16, v.ys - 1, vc->vc_rows - 1);
- v.xe = min_t(u16, v.xe - 1, vc->vc_cols - 1);
- v.ye = min_t(u16, v.ye - 1, vc->vc_rows - 1);
- ps = v.ys * vc->vc_size_row + (v.xs << 1);
- pe = v.ye * vc->vc_size_row + (v.xe << 1);
+ v->xs = min_t(u16, v->xs - 1, vc->vc_cols - 1);
+ v->ys = min_t(u16, v->ys - 1, vc->vc_rows - 1);
+ v->xe = min_t(u16, v->xe - 1, vc->vc_cols - 1);
+ v->ye = min_t(u16, v->ye - 1, vc->vc_rows - 1);
+ ps = v->ys * vc->vc_size_row + (v->xs << 1);
+ pe = v->ye * vc->vc_size_row + (v->xe << 1);
- if (v.sel_mode == TIOCL_SELCLEAR) {
+ if (v->sel_mode == TIOCL_SELCLEAR) {
/* useful for screendump without selection highlights */
clear_selection();
return 0;
}
- if (mouse_reporting() && (v.sel_mode & TIOCL_SELMOUSEREPORT)) {
- mouse_report(tty, v.sel_mode & TIOCL_SELBUTTONMASK, v.xs, v.ys);
+ if (mouse_reporting() && (v->sel_mode & TIOCL_SELMOUSEREPORT)) {
+ mouse_report(tty, v->sel_mode & TIOCL_SELBUTTONMASK, v->xs,
+ v->ys);
return 0;
}
@@ -208,7 +220,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
else
use_unicode = 0;
- switch (v.sel_mode)
+ switch (v->sel_mode)
{
case TIOCL_SELCHAR: /* character-by-character selection */
new_sel_start = ps;
@@ -322,6 +334,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
sel_buffer_lth = bp - sel_buffer;
return 0;
}
+EXPORT_SYMBOL_GPL(set_selection_kernel);
/* Insert the contents of the selection buffer into the
* queue of the tty associated with the current console.
@@ -367,3 +380,4 @@ int paste_selection(struct tty_struct *tty)
tty_ldisc_deref(ld);
return 0;
}
+EXPORT_SYMBOL_GPL(paste_selection);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 160f46115aaa..1f042346e722 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Provide access to virtual console memory.
- * /dev/vcs0: the screen as it is being viewed right now (possibly scrolled)
+ * /dev/vcs: the screen as it is being viewed right now (possibly scrolled)
* /dev/vcsN: the screen of /dev/ttyN (1 <= N <= 63)
* [minor: N]
*
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index d34984aa646d..fdd12f8c3deb 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1520,7 +1520,8 @@ static void csi_J(struct vc_data *vc, int vpar)
return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
- update_region(vc, (unsigned long) start, count);
+ if (con_should_update(vc))
+ do_update_region(vc, (unsigned long) start, count);
vc->vc_need_wrap = 0;
}
@@ -1804,7 +1805,7 @@ void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry)
respond_string(buf, tty->port);
}
-/* invoked via ioctl(TIOCLINUX) and through set_selection */
+/* invoked via ioctl(TIOCLINUX) and through set_selection_user */
int mouse_reporting(void)
{
return vc_cons[fg_console].d->vc_report_mouse;
@@ -3008,7 +3009,7 @@ static struct console vt_console_driver = {
* There are some functions which can sleep for arbitrary periods
* (paste_selection) but we don't need the lock there anyway.
*
- * set_selection has locking, and definitely needs it
+ * set_selection_user has locking, and definitely needs it
*/
int tioclinux(struct tty_struct *tty, unsigned long arg)
@@ -3028,7 +3029,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
{
case TIOCL_SETSEL:
console_lock();
- ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
+ ret = set_selection_user((struct tiocl_selection
+ __user *)(p+1), tty);
console_unlock();
break;
case TIOCL_PASTESEL:
@@ -4178,8 +4180,6 @@ void do_blank_screen(int entering_gfx)
return;
}
- if (blank_state != blank_normal_wait)
- return;
blank_state = blank_off;
/* don't blank graphics */
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index 0ee3cd3c25ee..450e2f5c9b43 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -68,8 +68,8 @@ static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
static ssize_t reg_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
-DEVICE_ATTR(reg_br, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store);
-DEVICE_ATTR(reg_or, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store);
+static DEVICE_ATTR(reg_br, 0664, reg_show, reg_store);
+static DEVICE_ATTR(reg_or, 0664, reg_show, reg_store);
static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index 880009987460..b8b3caad889c 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -205,12 +205,9 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
if (IS_ERR(clk))
return PTR_ERR(clk);
- ci->fs_clk = clk = devm_clk_get(&pdev->dev, "fs");
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- ci->fs_clk = NULL;
- }
+ ci->fs_clk = clk = devm_clk_get_optional(&pdev->dev, "fs");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
ci->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ec666eb4b7b4..183b41753c98 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -468,14 +468,13 @@ static void acm_read_bulk_callback(struct urb *urb)
{
struct acm_rb *rb = urb->context;
struct acm *acm = rb->instance;
- unsigned long flags;
int status = urb->status;
+ bool stopped = false;
+ bool stalled = false;
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
rb->index, urb->actual_length, status);
- set_bit(rb->index, &acm->read_urbs_free);
-
if (!acm->dev) {
dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
return;
@@ -488,15 +487,16 @@ static void acm_read_bulk_callback(struct urb *urb)
break;
case -EPIPE:
set_bit(EVENT_RX_STALL, &acm->flags);
- schedule_work(&acm->work);
- return;
+ stalled = true;
+ break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&acm->data->dev,
"%s - urb shutting down with status: %d\n",
__func__, status);
- return;
+ stopped = true;
+ break;
default:
dev_dbg(&acm->data->dev,
"%s - nonzero urb status received: %d\n",
@@ -505,20 +505,29 @@ static void acm_read_bulk_callback(struct urb *urb)
}
/*
- * Unthrottle may run on another CPU which needs to see events
- * in the same order. Submission has an implict barrier
+ * Make sure URB processing is done before marking as free to avoid
+ * racing with unthrottle() on another CPU. Matches the barriers
+ * implied by the test_and_clear_bit() in acm_submit_read_urb().
*/
smp_mb__before_atomic();
+ set_bit(rb->index, &acm->read_urbs_free);
+ /*
+ * Make sure URB is marked as free before checking the throttled flag
+ * to avoid racing with unthrottle() on another CPU. Matches the
+ * smp_mb() in unthrottle().
+ */
+ smp_mb__after_atomic();
- /* throttle device if requested by tty */
- spin_lock_irqsave(&acm->read_lock, flags);
- acm->throttled = acm->throttle_req;
- if (!acm->throttled) {
- spin_unlock_irqrestore(&acm->read_lock, flags);
- acm_submit_read_urb(acm, rb->index, GFP_ATOMIC);
- } else {
- spin_unlock_irqrestore(&acm->read_lock, flags);
+ if (stopped || stalled) {
+ if (stalled)
+ schedule_work(&acm->work);
+ return;
}
+
+ if (test_bit(ACM_THROTTLED, &acm->flags))
+ return;
+
+ acm_submit_read_urb(acm, rb->index, GFP_ATOMIC);
}
/* data interface wrote those outgoing bytes */
@@ -655,10 +664,7 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
/*
* Unthrottle device in case the TTY was closed while throttled.
*/
- spin_lock_irq(&acm->read_lock);
- acm->throttled = 0;
- acm->throttle_req = 0;
- spin_unlock_irq(&acm->read_lock);
+ clear_bit(ACM_THROTTLED, &acm->flags);
retval = acm_submit_read_urbs(acm, GFP_KERNEL);
if (retval)
@@ -826,24 +832,19 @@ static void acm_tty_throttle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- spin_lock_irq(&acm->read_lock);
- acm->throttle_req = 1;
- spin_unlock_irq(&acm->read_lock);
+ set_bit(ACM_THROTTLED, &acm->flags);
}
static void acm_tty_unthrottle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- unsigned int was_throttled;
- spin_lock_irq(&acm->read_lock);
- was_throttled = acm->throttled;
- acm->throttled = 0;
- acm->throttle_req = 0;
- spin_unlock_irq(&acm->read_lock);
+ clear_bit(ACM_THROTTLED, &acm->flags);
+
+ /* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */
+ smp_mb();
- if (was_throttled)
- acm_submit_read_urbs(acm, GFP_KERNEL);
+ acm_submit_read_urbs(acm, GFP_KERNEL);
}
static int acm_tty_break_ctl(struct tty_struct *tty, int state)
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 515aad0847ee..ca1c026382c2 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -108,6 +108,7 @@ struct acm {
unsigned long flags;
# define EVENT_TTY_WAKEUP 0
# define EVENT_RX_STALL 1
+# define ACM_THROTTLED 2
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
@@ -122,8 +123,6 @@ struct acm {
unsigned int ctrl_caps; /* control capabilities from the class specific header */
unsigned int susp_count; /* number of suspended interfaces */
unsigned int combined_interfaces:1; /* control and data collapsed */
- unsigned int throttled:1; /* actually throttled */
- unsigned int throttle_req:1; /* throttle requested */
u8 bInterval;
struct usb_anchor delayed; /* writes queued for a device about to be woken */
unsigned long quirks;
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 73c8e6591746..18f5dcf58b0d 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -16,6 +16,22 @@
#include <linux/usb/otg.h>
#include <linux/of_platform.h>
+static const char *const ep_type_names[] = {
+ [USB_ENDPOINT_XFER_CONTROL] = "ctrl",
+ [USB_ENDPOINT_XFER_ISOC] = "isoc",
+ [USB_ENDPOINT_XFER_BULK] = "bulk",
+ [USB_ENDPOINT_XFER_INT] = "intr",
+};
+
+const char *usb_ep_type_string(int ep_type)
+{
+ if (ep_type < 0 || ep_type >= ARRAY_SIZE(ep_type_names))
+ return "unknown";
+
+ return ep_type_names[ep_type];
+}
+EXPORT_SYMBOL_GPL(usb_ep_type_string);
+
const char *usb_otg_state_string(enum usb_otg_state state)
{
static const char *const names[] = {
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 8987cec9549d..ebcadaad89d1 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- /* Undo any residual pm_autopm_get_interface_* calls */
- for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
- usb_autopm_put_interface_no_suspend(intf);
- atomic_set(&intf->pm_usage_cnt, 0);
-
if (!error)
usb_autosuspend_device(udev);
@@ -1633,7 +1628,6 @@ void usb_autopm_put_interface(struct usb_interface *intf)
int status;
usb_mark_last_busy(udev);
- atomic_dec(&intf->pm_usage_cnt);
status = pm_runtime_put_sync(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1662,7 +1656,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
int status;
usb_mark_last_busy(udev);
- atomic_dec(&intf->pm_usage_cnt);
status = pm_runtime_put(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1684,7 +1677,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
- atomic_dec(&intf->pm_usage_cnt);
pm_runtime_put_noidle(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
@@ -1715,8 +1707,6 @@ int usb_autopm_get_interface(struct usb_interface *intf)
status = pm_runtime_get_sync(&intf->dev);
if (status < 0)
pm_runtime_put_sync(&intf->dev);
- else
- atomic_inc(&intf->pm_usage_cnt);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
@@ -1750,8 +1740,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
status = pm_runtime_get(&intf->dev);
if (status < 0 && status != -EINPROGRESS)
pm_runtime_put_noidle(&intf->dev);
- else
- atomic_inc(&intf->pm_usage_cnt);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
@@ -1775,7 +1763,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
- atomic_inc(&intf->pm_usage_cnt);
pm_runtime_get_noresume(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 975d7c1288e3..94d22551fc1b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1878,23 +1878,10 @@ rescan:
/* kick hcd */
unlink1(hcd, urb, -ESHUTDOWN);
dev_dbg (hcd->self.controller,
- "shutdown urb %pK ep%d%s%s\n",
+ "shutdown urb %pK ep%d%s-%s\n",
urb, usb_endpoint_num(&ep->desc),
is_in ? "in" : "out",
- ({ char *s;
-
- switch (usb_endpoint_type(&ep->desc)) {
- case USB_ENDPOINT_XFER_CONTROL:
- s = ""; break;
- case USB_ENDPOINT_XFER_BULK:
- s = "-bulk"; break;
- case USB_ENDPOINT_XFER_INT:
- s = "-intr"; break;
- default:
- s = "-iso"; break;
- };
- s;
- }));
+ usb_ep_type_string(usb_endpoint_type(&ep->desc)));
usb_put_urb (urb);
/* list contents may have changed */
@@ -2448,6 +2435,19 @@ EXPORT_SYMBOL_GPL(usb_hcd_irq);
/*-------------------------------------------------------------------------*/
+/* Workqueue routine for when the root-hub has died. */
+static void hcd_died_work(struct work_struct *work)
+{
+ struct usb_hcd *hcd = container_of(work, struct usb_hcd, died_work);
+ static char *env[] = {
+ "ERROR=DEAD",
+ NULL
+ };
+
+ /* Notify user space that the host controller has died */
+ kobject_uevent_env(&hcd->self.root_hub->dev.kobj, KOBJ_OFFLINE, env);
+}
+
/**
* usb_hc_died - report abnormal shutdown of a host controller (bus glue)
* @hcd: pointer to the HCD representing the controller
@@ -2488,6 +2488,13 @@ void usb_hc_died (struct usb_hcd *hcd)
usb_kick_hub_wq(hcd->self.root_hub);
}
}
+
+ /* Handle the case where this function gets called with a shared HCD */
+ if (usb_hcd_is_primary_hcd(hcd))
+ schedule_work(&hcd->died_work);
+ else
+ schedule_work(&hcd->primary_hcd->died_work);
+
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
/* Make sure that the other roothub is also deallocated. */
}
@@ -2555,6 +2562,8 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
+ INIT_WORK(&hcd->died_work, hcd_died_work);
+
hcd->driver = driver;
hcd->speed = driver->flags & HCD_MASK;
hcd->product_desc = (driver->product_desc) ? driver->product_desc :
@@ -2908,6 +2917,7 @@ error_create_attr_group:
#ifdef CONFIG_PM
cancel_work_sync(&hcd->wakeup_work);
#endif
+ cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
@@ -2968,6 +2978,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
#ifdef CONFIG_PM
cancel_work_sync(&hcd->wakeup_work);
#endif
+ cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
@@ -3020,6 +3031,9 @@ usb_hcd_platform_shutdown(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
+ /* No need for pm_runtime_put(), we're shutting down */
+ pm_runtime_get_sync(&dev->dev);
+
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 8d4631c81b9f..2f94568ba385 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3174,13 +3174,14 @@ static int usb_disable_remote_wakeup(struct usb_device *udev)
}
/* Count of wakeup-enabled devices at or below udev */
-static unsigned wakeup_enabled_descendants(struct usb_device *udev)
+unsigned usb_wakeup_enabled_descendants(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
return udev->do_remote_wakeup +
(hub ? hub->wakeup_enabled_descendants : 0);
}
+EXPORT_SYMBOL_GPL(usb_wakeup_enabled_descendants);
/*
* usb_port_suspend - suspend a usb device's upstream port
@@ -3282,7 +3283,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
* Therefore we will turn on the suspend feature if udev or any of its
* descendants is enabled for remote wakeup.
*/
- else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
+ else if (PMSG_IS_AUTO(msg) || usb_wakeup_enabled_descendants(udev) > 0)
status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND);
else {
@@ -3668,7 +3669,6 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
struct usb_hub *hub = usb_get_intfdata(intf);
struct usb_device *hdev = hub->hdev;
unsigned port1;
- int status;
/*
* Warn if children aren't already suspended.
@@ -3687,7 +3687,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
}
if (udev)
hub->wakeup_enabled_descendants +=
- wakeup_enabled_descendants(udev);
+ usb_wakeup_enabled_descendants(udev);
}
if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
@@ -3702,12 +3702,12 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
if (hub_is_superspeed(hdev) && hdev->do_remote_wakeup) {
/* Enable hub to send remote wakeup for all ports. */
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
- status = set_port_feature(hdev,
- port1 |
- USB_PORT_FEAT_REMOTE_WAKE_CONNECT |
- USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT |
- USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT,
- USB_PORT_FEAT_REMOTE_WAKE_MASK);
+ set_port_feature(hdev,
+ port1 |
+ USB_PORT_FEAT_REMOTE_WAKE_CONNECT |
+ USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT |
+ USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT,
+ USB_PORT_FEAT_REMOTE_WAKE_MASK);
}
}
@@ -5902,7 +5902,10 @@ int usb_reset_device(struct usb_device *udev)
cintf->needs_binding = 1;
}
}
- usb_unbind_and_rebind_marked_interfaces(udev);
+
+ /* If the reset failed, hub_wq will unbind drivers later */
+ if (ret == 0)
+ usb_unbind_and_rebind_marked_interfaces(udev);
}
usb_autosuspend_device(udev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 82239f27c4cc..e844bb7b5676 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -820,9 +820,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
- if (size <= 0 || !buf || !index)
+ if (size <= 0 || !buf)
return -EINVAL;
buf[0] = 0;
+ if (index <= 0 || index >= 256)
+ return -EINVAL;
tbuf = kmalloc(256, GFP_NOIO);
if (!tbuf)
return -ENOMEM;
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 55d5ae2a7ec7..8b499d643461 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -1020,6 +1020,205 @@ int dwc2_hsotg_wait_bit_clear(struct dwc2_hsotg *hsotg, u32 offset, u32 mask,
return -ETIMEDOUT;
}
+/*
+ * Initializes the FSLSPClkSel field of the HCFG register depending on the
+ * PHY type
+ */
+void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
+{
+ u32 hcfg, val;
+
+ if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ hsotg->params.ulpi_fs_ls) ||
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ /* Full speed PHY */
+ val = HCFG_FSLSPCLKSEL_48_MHZ;
+ } else {
+ /* High speed PHY running at full speed or high speed */
+ val = HCFG_FSLSPCLKSEL_30_60_MHZ;
+ }
+
+ dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
+ hcfg = dwc2_readl(hsotg, HCFG);
+ hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
+ hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
+ dwc2_writel(hsotg, hcfg, HCFG);
+}
+
+static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+{
+ u32 usbcfg, ggpio, i2cctl;
+ int retval = 0;
+
+ /*
+ * core_init() is now called on every switch so only call the
+ * following for the first time through
+ */
+ if (select_phy) {
+ dev_dbg(hsotg->dev, "FS PHY selected\n");
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ if (!(usbcfg & GUSBCFG_PHYSEL)) {
+ usbcfg |= GUSBCFG_PHYSEL;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ /* Reset after a PHY select */
+ retval = dwc2_core_reset(hsotg, false);
+
+ if (retval) {
+ dev_err(hsotg->dev,
+ "%s: Reset failed, aborting", __func__);
+ return retval;
+ }
+ }
+
+ if (hsotg->params.activate_stm_fs_transceiver) {
+ ggpio = dwc2_readl(hsotg, GGPIO);
+ if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) {
+ dev_dbg(hsotg->dev, "Activating transceiver\n");
+ /*
+ * STM32F4x9 uses the GGPIO register as general
+ * core configuration register.
+ */
+ ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN;
+ dwc2_writel(hsotg, ggpio, GGPIO);
+ }
+ }
+ }
+
+ /*
+ * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
+ * do this on HNP Dev/Host mode switches (done in dev_init and
+ * host_init).
+ */
+ if (dwc2_is_host_mode(hsotg))
+ dwc2_init_fs_ls_pclk_sel(hsotg);
+
+ if (hsotg->params.i2c_enable) {
+ dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
+
+ /* Program GUSBCFG.OtgUtmiFsSel to I2C */
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ /* Program GI2CCTL.I2CEn */
+ i2cctl = dwc2_readl(hsotg, GI2CCTL);
+ i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
+ i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
+ i2cctl &= ~GI2CCTL_I2CEN;
+ dwc2_writel(hsotg, i2cctl, GI2CCTL);
+ i2cctl |= GI2CCTL_I2CEN;
+ dwc2_writel(hsotg, i2cctl, GI2CCTL);
+ }
+
+ return retval;
+}
+
+static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+{
+ u32 usbcfg, usbcfg_old;
+ int retval = 0;
+
+ if (!select_phy)
+ return 0;
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg_old = usbcfg;
+
+ /*
+ * HS PHY parameters. These parameters are preserved during soft reset
+ * so only program the first time. Do a soft reset immediately after
+ * setting phyif.
+ */
+ switch (hsotg->params.phy_type) {
+ case DWC2_PHY_TYPE_PARAM_ULPI:
+ /* ULPI interface */
+ dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
+ usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
+ usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
+ if (hsotg->params.phy_ulpi_ddr)
+ usbcfg |= GUSBCFG_DDRSEL;
+
+ /* Set external VBUS indicator as needed. */
+ if (hsotg->params.oc_disable)
+ usbcfg |= (GUSBCFG_ULPI_INT_VBUS_IND |
+ GUSBCFG_INDICATORPASSTHROUGH);
+ break;
+ case DWC2_PHY_TYPE_PARAM_UTMI:
+ /* UTMI+ interface */
+ dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
+ usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
+ if (hsotg->params.phy_utmi_width == 16)
+ usbcfg |= GUSBCFG_PHYIF16;
+
+ /* Set turnaround time */
+ if (dwc2_is_device_mode(hsotg)) {
+ usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
+ if (hsotg->params.phy_utmi_width == 16)
+ usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
+ else
+ usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
+ }
+ break;
+ default:
+ dev_err(hsotg->dev, "FS PHY selected at HS!\n");
+ break;
+ }
+
+ if (usbcfg != usbcfg_old) {
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ /* Reset after setting the PHY parameters */
+ retval = dwc2_core_reset(hsotg, false);
+ if (retval) {
+ dev_err(hsotg->dev,
+ "%s: Reset failed, aborting", __func__);
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+{
+ u32 usbcfg;
+ int retval = 0;
+
+ if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ /* If FS/LS mode with FS/LS PHY */
+ retval = dwc2_fs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
+ } else {
+ /* High speed PHY */
+ retval = dwc2_hs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
+ }
+
+ if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ hsotg->params.ulpi_fs_ls) {
+ dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg |= GUSBCFG_ULPI_FS_LS;
+ usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+ } else {
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg &= ~GUSBCFG_ULPI_FS_LS;
+ usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+ }
+
+ return retval;
+}
+
MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
MODULE_AUTHOR("Synopsys, Inc.");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 30bab8463c96..152ac41dfb2d 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -859,6 +859,8 @@ struct dwc2_hregs_backup {
* @gadget_enabled: Peripheral mode sub-driver initialization indicator.
* @ll_hw_enabled: Status of low-level hardware resources.
* @hibernated: True if core is hibernated
+ * @reset_phy_on_wake: Quirk saying that we should assert PHY reset on a
+ * remote wakeup.
* @frame_number: Frame number read from the core. For both device
* and host modes. The value ranges are from 0
* to HFNUM_MAX_FRNUM.
@@ -869,7 +871,6 @@ struct dwc2_hregs_backup {
* removed once all SoCs support usb transceiver.
* @supplies: Definition of USB power supplies
* @vbus_supply: Regulator supplying vbus.
- * @phyif: PHY interface width
* @lock: Spinlock that protects all the driver data structures
* @priv: Stores a pointer to the struct usb_hcd
* @queuing_high_bandwidth: True if multiple packets of a high-bandwidth
@@ -972,6 +973,7 @@ struct dwc2_hregs_backup {
* @status_buf_dma: DMA address for status_buf
* @start_work: Delayed work for handling host A-cable connection
* @reset_work: Delayed work for handling a port reset
+ * @phy_reset_work: Work structure for doing a PHY reset
* @otg_port: OTG port number
* @frame_list: Frame list
* @frame_list_dma: Frame list DMA address
@@ -991,6 +993,7 @@ struct dwc2_hregs_backup {
* @ctrl_buff: Buffer for EP0 control requests.
* @ctrl_req: Request for EP0 control packets.
* @ep0_state: EP0 control transfers state
+ * @delayed_status: true when gadget driver asks for delayed status
* @test_mode: USB test mode requested by the host
* @remote_wakeup_allowed: True if device is allowed to wake-up host by
* remote-wakeup signalling
@@ -1045,6 +1048,7 @@ struct dwc2_hsotg {
unsigned int gadget_enabled:1;
unsigned int ll_hw_enabled:1;
unsigned int hibernated:1;
+ unsigned int reset_phy_on_wake:1;
u16 frame_number;
struct phy *phy;
@@ -1052,7 +1056,6 @@ struct dwc2_hsotg {
struct dwc2_hsotg_plat *plat;
struct regulator_bulk_data supplies[DWC2_NUM_SUPPLIES];
struct regulator *vbus_supply;
- u32 phyif;
spinlock_t lock;
void *priv;
@@ -1147,6 +1150,7 @@ struct dwc2_hsotg {
struct delayed_work start_work;
struct delayed_work reset_work;
+ struct work_struct phy_reset_work;
u8 otg_port;
u32 *frame_list;
dma_addr_t frame_list_dma;
@@ -1172,6 +1176,7 @@ struct dwc2_hsotg {
void *ep0_buff;
void *ctrl_buff;
enum dwc2_ep0_state ep0_state;
+ unsigned delayed_status : 1;
u8 test_mode;
dma_addr_t setup_desc_dma[2];
@@ -1283,6 +1288,8 @@ int dwc2_exit_partial_power_down(struct dwc2_hsotg *hsotg, bool restore);
int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg, int is_host);
int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
int reset, int is_host);
+void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg);
+int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy);
void dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host);
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
@@ -1431,6 +1438,8 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg);
int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg);
int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg,
int rem_wakeup, int reset);
+static inline void dwc2_host_schedule_phy_reset(struct dwc2_hsotg *hsotg)
+{ schedule_work(&hsotg->phy_reset_work); }
#else
static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
{ return 0; }
@@ -1454,6 +1463,7 @@ static inline int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
static inline int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg,
int rem_wakeup, int reset)
{ return 0; }
+static inline void dwc2_host_schedule_phy_reset(struct dwc2_hsotg *hsotg) {}
#endif
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 19ae2595f1c3..6af6add3d4c0 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -435,6 +435,18 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
/* Restart the Phy Clock */
pcgcctl &= ~PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
+
+ /*
+ * If we've got this quirk then the PHY is stuck upon
+ * wakeup. Assert reset. This will propagate out and
+ * eventually we'll re-enumerate the device. Not great
+ * but the best we can do. We can't call phy_reset()
+ * at interrupt time but there's no hurry, so we'll
+ * schedule it for later.
+ */
+ if (hsotg->reset_phy_on_wake)
+ dwc2_host_schedule_phy_reset(hsotg);
+
mod_timer(&hsotg->wkp_timer,
jiffies + msecs_to_jiffies(71));
} else {
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 6812a8a3a98b..16ffd9fd9361 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -27,6 +27,8 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/phy.h>
+#include <linux/usb/composite.h>
+
#include "core.h"
#include "hw.h"
@@ -714,13 +716,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
unsigned int maxsize;
if (is_isoc)
- maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
- DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+ maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+ DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
+ MAX_DMA_DESC_NUM_HS_ISOC;
else
- maxsize = DEV_DMA_NBYTES_LIMIT;
-
- /* Above size of one descriptor was chosen, multiple it */
- maxsize *= MAX_DMA_DESC_NUM_GENERIC;
+ maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
return maxsize;
}
@@ -932,7 +932,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
/* Update index of last configured entry in the chain */
hs_ep->next_desc++;
- if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC)
+ if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
hs_ep->next_desc = 0;
return 0;
@@ -964,7 +964,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
}
/* Initialize descriptor chain by Host Busy status */
- for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) {
+ for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
desc = &hs_ep->desc_list[i];
desc->status = 0;
desc->status |= (DEV_DMA_BUFF_STS_HBUSY
@@ -1446,6 +1446,11 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
return 0;
}
+ /* Change EP direction if status phase request is after data out */
+ if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
+ hs->ep0_state == DWC2_EP0_DATA_OUT)
+ hs_ep->dir_in = 1;
+
if (first) {
if (!hs_ep->isochronous) {
dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
@@ -1938,6 +1943,10 @@ static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
}
+ hsotg->delayed_status = false;
+ if (ret == USB_GADGET_DELAYED_STATUS)
+ hsotg->delayed_status = true;
+
/*
* the request is either unhandlable, or is not formatted correctly
* so respond with a STALL for the status stage to indicate failure.
@@ -2157,12 +2166,17 @@ static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
*/
if (!hs_ep->dir_in && ureq->length & 0x3)
ureq->actual += 4 - (ureq->length & 0x3);
+
+ /* Set actual frame number for completed transfers */
+ ureq->frame_number =
+ (desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >>
+ DEV_DMA_ISOC_FRNUM_SHIFT;
}
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
hs_ep->compl_desc++;
- if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1))
+ if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
hs_ep->compl_desc = 0;
desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
}
@@ -2311,6 +2325,7 @@ static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
if (status & DEV_DMA_STS_MASK)
dev_err(hsotg->dev, "descriptor %d closed with %x\n",
i, status & DEV_DMA_STS_MASK);
+ desc++;
}
return bytes_rem;
@@ -2387,8 +2402,8 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
if (!using_desc_dma(hsotg) && epnum == 0 &&
hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
/* Move to STATUS IN */
- dwc2_hsotg_ep0_zlp(hsotg, true);
- return;
+ if (!hsotg->delayed_status)
+ dwc2_hsotg_ep0_zlp(hsotg, true);
}
/*
@@ -3053,8 +3068,20 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
/* Safety check EP0 state when STSPHSERCVD asserted */
if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
/* Move to STATUS IN for DDMA */
- if (using_desc_dma(hsotg))
- dwc2_hsotg_ep0_zlp(hsotg, true);
+ if (using_desc_dma(hsotg)) {
+ if (!hsotg->delayed_status)
+ dwc2_hsotg_ep0_zlp(hsotg, true);
+ else
+ /* In case of 3 stage Control Write with delayed
+ * status, when Status IN transfer started
+ * before STSPHSERCVD asserted, NAKSTS bit not
+ * cleared by CNAK in dwc2_hsotg_start_req()
+ * function. Clear now NAKSTS to allow complete
+ * transfer.
+ */
+ dwc2_set_bit(hsotg, DIEPCTL(0),
+ DXEPCTL_CNAK);
+ }
}
}
@@ -3314,21 +3341,14 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg, GUSBCFG);
- usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
-
- if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
- (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
- hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) {
- /* FS/LS Dedicated Transceiver Interface */
- usbcfg |= GUSBCFG_PHYSEL;
- } else {
- /* set the PLL on, remove the HNP/SRP and set the PHY */
- val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
- usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (val << GUSBCFG_USBTRDTIM_SHIFT);
- }
- dwc2_writel(hsotg, usbcfg, GUSBCFG);
+ usbcfg &= ~GUSBCFG_TOUTCAL_MASK;
+ usbcfg |= GUSBCFG_TOUTCAL(7);
+
+ /* remove the HNP/SRP and set the PHY */
+ usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ dwc2_phy_init(hsotg, true);
dwc2_hsotg_init_fifo(hsotg);
@@ -3899,6 +3919,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
unsigned int i, val, size;
int ret = 0;
unsigned char ep_type;
+ int desc_num;
dev_dbg(hsotg->dev,
"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
@@ -3945,11 +3966,15 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
__func__, epctrl, epctrl_reg);
+ if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
+ desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
+ else
+ desc_num = MAX_DMA_DESC_NUM_GENERIC;
+
/* Allocate DMA descriptor chain for non-ctrl endpoints */
if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
- MAX_DMA_DESC_NUM_GENERIC *
- sizeof(struct dwc2_dma_desc),
+ desc_num * sizeof(struct dwc2_dma_desc),
&hs_ep->desc_list_dma, GFP_ATOMIC);
if (!hs_ep->desc_list) {
ret = -ENOMEM;
@@ -4092,7 +4117,7 @@ error1:
error2:
if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
- dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ dmam_free_coherent(hsotg->dev, desc_num *
sizeof(struct dwc2_dma_desc),
hs_ep->desc_list, hs_ep->desc_list_dma);
hs_ep->desc_list = NULL;
@@ -4328,8 +4353,6 @@ static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
*/
static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
{
- u32 trdtim;
- u32 usbcfg;
/* unmask subset of endpoint interrupts */
dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
@@ -4353,17 +4376,6 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
dwc2_hsotg_init_fifo(hsotg);
- /* keep other bits untouched (so e.g. forced modes are not lost) */
- usbcfg = dwc2_readl(hsotg, GUSBCFG);
- usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
-
- /* set the PLL on, remove the HNP/SRP and set the PHY */
- trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
- usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (trdtim << GUSBCFG_USBTRDTIM_SHIFT);
- dwc2_writel(hsotg, usbcfg, GUSBCFG);
-
if (using_dma(hsotg))
dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
}
@@ -5073,6 +5085,7 @@ void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
+ val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL;
val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC;
dwc2_writel(hsotg, val, GLPMCFG);
dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 3f087962f498..b50ec3714fd8 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -97,196 +97,6 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
dwc2_writel(hsotg, intmsk, GINTMSK);
}
-/*
- * Initializes the FSLSPClkSel field of the HCFG register depending on the
- * PHY type
- */
-static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
-{
- u32 hcfg, val;
-
- if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
- hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->params.ulpi_fs_ls) ||
- hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
- /* Full speed PHY */
- val = HCFG_FSLSPCLKSEL_48_MHZ;
- } else {
- /* High speed PHY running at full speed or high speed */
- val = HCFG_FSLSPCLKSEL_30_60_MHZ;
- }
-
- dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
- hcfg = dwc2_readl(hsotg, HCFG);
- hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
- hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
- dwc2_writel(hsotg, hcfg, HCFG);
-}
-
-static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
-{
- u32 usbcfg, ggpio, i2cctl;
- int retval = 0;
-
- /*
- * core_init() is now called on every switch so only call the
- * following for the first time through
- */
- if (select_phy) {
- dev_dbg(hsotg->dev, "FS PHY selected\n");
-
- usbcfg = dwc2_readl(hsotg, GUSBCFG);
- if (!(usbcfg & GUSBCFG_PHYSEL)) {
- usbcfg |= GUSBCFG_PHYSEL;
- dwc2_writel(hsotg, usbcfg, GUSBCFG);
-
- /* Reset after a PHY select */
- retval = dwc2_core_reset(hsotg, false);
-
- if (retval) {
- dev_err(hsotg->dev,
- "%s: Reset failed, aborting", __func__);
- return retval;
- }
- }
-
- if (hsotg->params.activate_stm_fs_transceiver) {
- ggpio = dwc2_readl(hsotg, GGPIO);
- if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) {
- dev_dbg(hsotg->dev, "Activating transceiver\n");
- /*
- * STM32F4x9 uses the GGPIO register as general
- * core configuration register.
- */
- ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN;
- dwc2_writel(hsotg, ggpio, GGPIO);
- }
- }
- }
-
- /*
- * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
- * do this on HNP Dev/Host mode switches (done in dev_init and
- * host_init).
- */
- if (dwc2_is_host_mode(hsotg))
- dwc2_init_fs_ls_pclk_sel(hsotg);
-
- if (hsotg->params.i2c_enable) {
- dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
-
- /* Program GUSBCFG.OtgUtmiFsSel to I2C */
- usbcfg = dwc2_readl(hsotg, GUSBCFG);
- usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
- dwc2_writel(hsotg, usbcfg, GUSBCFG);
-
- /* Program GI2CCTL.I2CEn */
- i2cctl = dwc2_readl(hsotg, GI2CCTL);
- i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
- i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
- i2cctl &= ~GI2CCTL_I2CEN;
- dwc2_writel(hsotg, i2cctl, GI2CCTL);
- i2cctl |= GI2CCTL_I2CEN;
- dwc2_writel(hsotg, i2cctl, GI2CCTL);
- }
-
- return retval;
-}
-
-static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
-{
- u32 usbcfg, usbcfg_old;
- int retval = 0;
-
- if (!select_phy)
- return 0;
-
- usbcfg = dwc2_readl(hsotg, GUSBCFG);
- usbcfg_old = usbcfg;
-
- /*
- * HS PHY parameters. These parameters are preserved during soft reset
- * so only program the first time. Do a soft reset immediately after
- * setting phyif.
- */
- switch (hsotg->params.phy_type) {
- case DWC2_PHY_TYPE_PARAM_ULPI:
- /* ULPI interface */
- dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
- usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
- usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
- if (hsotg->params.phy_ulpi_ddr)
- usbcfg |= GUSBCFG_DDRSEL;
-
- /* Set external VBUS indicator as needed. */
- if (hsotg->params.oc_disable)
- usbcfg |= (GUSBCFG_ULPI_INT_VBUS_IND |
- GUSBCFG_INDICATORPASSTHROUGH);
- break;
- case DWC2_PHY_TYPE_PARAM_UTMI:
- /* UTMI+ interface */
- dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
- usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
- if (hsotg->params.phy_utmi_width == 16)
- usbcfg |= GUSBCFG_PHYIF16;
- break;
- default:
- dev_err(hsotg->dev, "FS PHY selected at HS!\n");
- break;
- }
-
- if (usbcfg != usbcfg_old) {
- dwc2_writel(hsotg, usbcfg, GUSBCFG);
-
- /* Reset after setting the PHY parameters */
- retval = dwc2_core_reset(hsotg, false);
- if (retval) {
- dev_err(hsotg->dev,
- "%s: Reset failed, aborting", __func__);
- return retval;
- }
- }
-
- return retval;
-}
-
-static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
-{
- u32 usbcfg;
- int retval = 0;
-
- if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
- hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
- hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
- /* If FS/LS mode with FS/LS PHY */
- retval = dwc2_fs_phy_init(hsotg, select_phy);
- if (retval)
- return retval;
- } else {
- /* High speed PHY */
- retval = dwc2_hs_phy_init(hsotg, select_phy);
- if (retval)
- return retval;
- }
-
- if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
- hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->params.ulpi_fs_ls) {
- dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
- usbcfg = dwc2_readl(hsotg, GUSBCFG);
- usbcfg |= GUSBCFG_ULPI_FS_LS;
- usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
- dwc2_writel(hsotg, usbcfg, GUSBCFG);
- } else {
- usbcfg = dwc2_readl(hsotg, GUSBCFG);
- usbcfg &= ~GUSBCFG_ULPI_FS_LS;
- usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
- dwc2_writel(hsotg, usbcfg, GUSBCFG);
- }
-
- return retval;
-}
-
static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
{
u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
@@ -2437,25 +2247,31 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
hcchar = dwc2_readl(hsotg, HCCHAR(i));
- hcchar &= ~HCCHAR_CHENA;
- hcchar |= HCCHAR_CHDIS;
- hcchar &= ~HCCHAR_EPDIR;
- dwc2_writel(hsotg, hcchar, HCCHAR(i));
+ if (hcchar & HCCHAR_CHENA) {
+ hcchar &= ~HCCHAR_CHENA;
+ hcchar |= HCCHAR_CHDIS;
+ hcchar &= ~HCCHAR_EPDIR;
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
+ }
}
/* Halt all channels to put them into a known state */
for (i = 0; i < num_channels; i++) {
hcchar = dwc2_readl(hsotg, HCCHAR(i));
- hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
- hcchar &= ~HCCHAR_EPDIR;
- dwc2_writel(hsotg, hcchar, HCCHAR(i));
- dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
- __func__, i);
-
- if (dwc2_hsotg_wait_bit_clear(hsotg, HCCHAR(i),
- HCCHAR_CHENA, 1000)) {
- dev_warn(hsotg->dev, "Unable to clear enable on channel %d\n",
- i);
+ if (hcchar & HCCHAR_CHENA) {
+ hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
+ hcchar &= ~HCCHAR_EPDIR;
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
+ dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
+ __func__, i);
+
+ if (dwc2_hsotg_wait_bit_clear(hsotg, HCCHAR(i),
+ HCCHAR_CHENA,
+ 1000)) {
+ dev_warn(hsotg->dev,
+ "Unable to clear enable on channel %d\n",
+ i);
+ }
}
}
}
@@ -4376,6 +4192,17 @@ static void dwc2_hcd_reset_func(struct work_struct *work)
spin_unlock_irqrestore(&hsotg->lock, flags);
}
+static void dwc2_hcd_phy_reset_func(struct work_struct *work)
+{
+ struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
+ phy_reset_work);
+ int ret;
+
+ ret = phy_reset(hsotg->phy);
+ if (ret)
+ dev_warn(hsotg->dev, "PHY reset failed\n");
+}
+
/*
* =========================================================================
* Linux HC Driver Functions
@@ -4471,6 +4298,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
unsigned long flags;
int ret = 0;
u32 hprt0;
+ u32 pcgctl;
spin_lock_irqsave(&hsotg->lock, flags);
@@ -4486,7 +4314,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
goto unlock;
- if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL)
+ if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
goto skip_power_saving;
/*
@@ -4495,21 +4323,35 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
*/
if (!hsotg->bus_suspended) {
hprt0 = dwc2_read_hprt0(hsotg);
- hprt0 |= HPRT0_SUSP;
- hprt0 &= ~HPRT0_PWR;
- dwc2_writel(hsotg, hprt0, HPRT0);
- spin_unlock_irqrestore(&hsotg->lock, flags);
- dwc2_vbus_supply_exit(hsotg);
- spin_lock_irqsave(&hsotg->lock, flags);
+ if (hprt0 & HPRT0_CONNSTS) {
+ hprt0 |= HPRT0_SUSP;
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL)
+ hprt0 &= ~HPRT0_PWR;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ }
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL) {
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ dwc2_vbus_supply_exit(hsotg);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ } else {
+ pcgctl = readl(hsotg->regs + PCGCTL);
+ pcgctl |= PCGCTL_STOPPCLK;
+ writel(pcgctl, hsotg->regs + PCGCTL);
+ }
}
- /* Enter partial_power_down */
- ret = dwc2_enter_partial_power_down(hsotg);
- if (ret) {
- if (ret != -ENOTSUPP)
- dev_err(hsotg->dev,
- "enter partial_power_down failed\n");
- goto skip_power_saving;
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL) {
+ /* Enter partial_power_down */
+ ret = dwc2_enter_partial_power_down(hsotg);
+ if (ret) {
+ if (ret != -ENOTSUPP)
+ dev_err(hsotg->dev,
+ "enter partial_power_down failed\n");
+ goto skip_power_saving;
+ }
+
+ /* After entering partial_power_down, hardware is no more accessible */
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
}
/* Ask phy to be suspended */
@@ -4519,9 +4361,6 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
spin_lock_irqsave(&hsotg->lock, flags);
}
- /* After entering partial_power_down, hardware is no more accessible */
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
skip_power_saving:
hsotg->lx_state = DWC2_L2;
unlock:
@@ -4534,6 +4373,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
+ u32 pcgctl;
int ret = 0;
spin_lock_irqsave(&hsotg->lock, flags);
@@ -4544,18 +4384,12 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
if (hsotg->lx_state != DWC2_L2)
goto unlock;
- if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL) {
+ if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL) {
hsotg->lx_state = DWC2_L0;
goto unlock;
}
/*
- * Set HW accessible bit before powering on the controller
- * since an interrupt may rise.
- */
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- /*
* Enable power if not already done.
* This must not be spinlocked since duration
* of this call is unknown.
@@ -4566,10 +4400,23 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
spin_lock_irqsave(&hsotg->lock, flags);
}
- /* Exit partial_power_down */
- ret = dwc2_exit_partial_power_down(hsotg, true);
- if (ret && (ret != -ENOTSUPP))
- dev_err(hsotg->dev, "exit partial_power_down failed\n");
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL) {
+ /*
+ * Set HW accessible bit before powering on the controller
+ * since an interrupt may rise.
+ */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+
+ /* Exit partial_power_down */
+ ret = dwc2_exit_partial_power_down(hsotg, true);
+ if (ret && (ret != -ENOTSUPP))
+ dev_err(hsotg->dev, "exit partial_power_down failed\n");
+ } else {
+ pcgctl = readl(hsotg->regs + PCGCTL);
+ pcgctl &= ~PCGCTL_STOPPCLK;
+ writel(pcgctl, hsotg->regs + PCGCTL);
+ }
hsotg->lx_state = DWC2_L0;
@@ -4581,10 +4428,12 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_port_resume(hsotg);
} else {
- dwc2_vbus_supply_init(hsotg);
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL) {
+ dwc2_vbus_supply_init(hsotg);
- /* Wait for controller to correctly update D+/D- level */
- usleep_range(3000, 5000);
+ /* Wait for controller to correctly update D+/D- level */
+ usleep_range(3000, 5000);
+ }
/*
* Clear Port Enable and Port Status changes.
@@ -5130,6 +4979,8 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
destroy_workqueue(hsotg->wq_otg);
}
+ cancel_work_sync(&hsotg->phy_reset_work);
+
del_timer(&hsotg->wkp_timer);
}
@@ -5271,11 +5122,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
hsotg->hc_ptr_array[i] = channel;
}
- /* Initialize hsotg start work */
+ /* Initialize work */
INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
-
- /* Initialize port reset work */
INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func);
+ INIT_WORK(&hsotg->phy_reset_work, dwc2_hcd_phy_reset_func);
/*
* Allocate space for storing data on status transactions. Normally no
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 98af924a9a5c..510e87ec0be8 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -310,12 +310,12 @@
#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14)
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
-#define GHWCFG4_ACG_SUPPORTED BIT(12)
-#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
-#define GHWCFG4_SERVICE_INTERVAL_SUPPORTED BIT(10)
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
+#define GHWCFG4_ACG_SUPPORTED BIT(12)
+#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
+#define GHWCFG4_SERVICE_INTERVAL_SUPPORTED BIT(10)
#define GHWCFG4_XHIBER BIT(7)
#define GHWCFG4_HIBER BIT(6)
#define GHWCFG4_MIN_AHB_FREQ BIT(5)
@@ -333,7 +333,7 @@
#define GLPMCFG_SNDLPM BIT(24)
#define GLPMCFG_RETRY_CNT_MASK (0x7 << 21)
#define GLPMCFG_RETRY_CNT_SHIFT 21
-#define GLPMCFG_LPM_ACCEPT_CTRL_CONTROL BIT(21)
+#define GLPMCFG_LPM_REJECT_CTRL_CONTROL BIT(21)
#define GLPMCFG_LPM_ACCEPT_CTRL_ISOC BIT(22)
#define GLPMCFG_LPM_CHNL_INDX_MASK (0xf << 17)
#define GLPMCFG_LPM_CHNL_INDX_SHIFT 17
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 24ff5f21cb25..6900eea57526 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -121,6 +121,16 @@ static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
}
+static void dwc2_set_amlogic_g12a_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->lpm = false;
+ p->lpm_clock_gating = false;
+ p->besl = false;
+ p->hird_threshold_en = false;
+}
+
static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
@@ -167,6 +177,8 @@ const struct of_device_id dwc2_of_match_table[] = {
.data = dwc2_set_amlogic_params },
{ .compatible = "amlogic,meson-gxbb-usb",
.data = dwc2_set_amlogic_params },
+ { .compatible = "amlogic,meson-g12a-usb",
+ .data = dwc2_set_amlogic_g12a_params },
{ .compatible = "amcc,dwc-otg", .data = dwc2_set_amcc_params },
{ .compatible = "st,stm32f4x9-fsotg",
.data = dwc2_set_stm32f4x9_fsotg_params },
@@ -273,6 +285,23 @@ static void dwc2_set_param_power_down(struct dwc2_hsotg *hsotg)
hsotg->params.power_down = val;
}
+static void dwc2_set_param_lpm(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->lpm = hsotg->hw_params.lpm_mode;
+ if (p->lpm) {
+ p->lpm_clock_gating = true;
+ p->besl = true;
+ p->hird_threshold_en = true;
+ p->hird_threshold = 4;
+ } else {
+ p->lpm_clock_gating = false;
+ p->besl = false;
+ p->hird_threshold_en = false;
+ }
+}
+
/**
* dwc2_set_default_params() - Set all core parameters to their
* auto-detected default values.
@@ -291,6 +320,7 @@ static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
dwc2_set_param_speed(hsotg);
dwc2_set_param_phy_utmi_width(hsotg);
dwc2_set_param_power_down(hsotg);
+ dwc2_set_param_lpm(hsotg);
p->phy_ulpi_ddr = false;
p->phy_ulpi_ext_vbus = false;
@@ -303,11 +333,6 @@ static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
p->reload_ctl = (hw->snpsid >= DWC2_CORE_REV_2_92a);
p->uframe_sched = true;
p->external_id_pin_ctl = false;
- p->lpm = true;
- p->lpm_clock_gating = true;
- p->besl = true;
- p->hird_threshold_en = true;
- p->hird_threshold = 4;
p->ipg_isoc_en = false;
p->service_interval = false;
p->max_packet_count = hw->max_packet_count;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index c0b64d483552..d10a7f8daec3 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -230,9 +230,6 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
reset_control_deassert(hsotg->reset_ecc);
- /* Set default UTMI width */
- hsotg->phyif = GUSBCFG_PHYIF16;
-
/*
* Attempt to find a generic PHY, then look for an old style
* USB PHY and then fall back to pdata
@@ -280,14 +277,14 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
* width is 8-bit and set the phyif appropriately.
*/
if (phy_get_bus_width(hsotg->phy) == 8)
- hsotg->phyif = GUSBCFG_PHYIF8;
+ hsotg->params.phy_utmi_width = 8;
}
/* Clock */
- hsotg->clk = devm_clk_get(hsotg->dev, "otg");
+ hsotg->clk = devm_clk_get_optional(hsotg->dev, "otg");
if (IS_ERR(hsotg->clk)) {
- hsotg->clk = NULL;
- dev_dbg(hsotg->dev, "cannot get otg clock\n");
+ dev_err(hsotg->dev, "cannot get otg clock\n");
+ return PTR_ERR(hsotg->clk);
}
/* Regulators */
@@ -481,6 +478,15 @@ static int dwc2_driver_probe(struct platform_device *dev)
hsotg->gadget_enabled = 1;
}
+ hsotg->reset_phy_on_wake =
+ of_property_read_bool(dev->dev.of_node,
+ "snps,reset-phy-on-wake");
+ if (hsotg->reset_phy_on_wake && !hsotg->phy) {
+ dev_warn(hsotg->dev,
+ "Quirk reset-phy-on-wake only supports generic PHYs\n");
+ hsotg->reset_phy_on_wake = false;
+ }
+
if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) {
retval = dwc2_hcd_init(hsotg);
if (retval) {
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 2b1494460d0c..4a62045cc812 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -54,7 +54,8 @@ comment "Platform Glue Driver Support"
config USB_DWC3_OMAP
tristate "Texas Instruments OMAP5 and similar Platforms"
- depends on EXTCON && (ARCH_OMAP2PLUS || COMPILE_TEST)
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on EXTCON || !EXTCON
depends on OF
default USB_DWC3
help
@@ -95,6 +96,16 @@ config USB_DWC3_KEYSTONE
Support of USB2/3 functionality in TI Keystone2 and AM654 platforms.
Say 'Y' or 'M' here if you have one such device
+config USB_DWC3_MESON_G12A
+ tristate "Amlogic Meson G12A Platforms"
+ depends on OF && COMMON_CLK
+ depends on ARCH_MESON || COMPILE_TEST
+ default USB_DWC3
+ select USB_ROLE_SWITCH
+ help
+ Support USB2/3 functionality in Amlogic G12A platforms.
+ Say 'Y' or 'M' if you have one such device.
+
config USB_DWC3_OF_SIMPLE
tristate "Generic OF Simple Glue Layer"
depends on OF && COMMON_CLK
@@ -115,7 +126,8 @@ config USB_DWC3_ST
config USB_DWC3_QCOM
tristate "Qualcomm Platform"
- depends on EXTCON && (ARCH_QCOM || COMPILE_TEST)
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on EXTCON || !EXTCON
depends on OF
default USB_DWC3
help
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 6e3ef6144e5d..ae86da0dc5bd 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
obj-$(CONFIG_USB_DWC3_HAPS) += dwc3-haps.o
obj-$(CONFIG_USB_DWC3_KEYSTONE) += dwc3-keystone.o
+obj-$(CONFIG_USB_DWC3_MESON_G12A) += dwc3-meson-g12a.o
obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o
obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index a1b126f90261..4aff1d8dbc4f 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -828,6 +828,7 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
ret = device_property_read_u32_array(dev,
"snps,incr-burst-type-adjustment", vals, ntype);
if (ret) {
+ kfree(vals);
dev_err(dev, "Error to get property\n");
return;
}
@@ -846,6 +847,8 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
incrx_mode = INCRX_BURST_MODE;
}
+ kfree(vals);
+
/* Enable Undefined Length INCR Burst and Enable INCRx Burst */
cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
if (incrx_mode)
@@ -893,12 +896,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
u32 reg;
int ret;
- if (!dwc3_core_is_valid(dwc)) {
- dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
- ret = -ENODEV;
- goto err0;
- }
-
/*
* Write Linux Version Code to our GUID register so it's easy to figure
* out which kernel version a bug was found.
@@ -1218,7 +1215,7 @@ static void dwc3_get_properties(struct dwc3 *dwc)
u8 tx_max_burst_prd;
/* default to highest possible threshold */
- lpm_nyet_threshold = 0xff;
+ lpm_nyet_threshold = 0xf;
/* default to -3.5dB de-emphasis */
tx_de_emphasis = 1;
@@ -1426,6 +1423,11 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->regs = regs;
dwc->regs_size = resource_size(&dwc_res);
+ if (!dwc3_core_is_valid(dwc)) {
+ dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+ return -ENODEV;
+ }
+
dwc3_get_properties(dwc);
dwc->reset = devm_reset_control_get_optional_shared(dev, NULL);
@@ -1600,6 +1602,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
+ synchronize_irq(dwc->irq_gadget);
dwc3_core_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
@@ -1632,6 +1635,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
+ synchronize_irq(dwc->irq_gadget);
}
dwc3_otg_exit(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 1528d395b156..f19cbeb01087 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -406,8 +406,7 @@
#define DWC3_DCTL_TRGTULST_SS_INACT (DWC3_DCTL_TRGTULST(6))
/* These apply for core versions 1.94a and later */
-#define DWC3_DCTL_LPM_ERRATA_MASK DWC3_DCTL_LPM_ERRATA(0xf)
-#define DWC3_DCTL_LPM_ERRATA(n) ((n) << 20)
+#define DWC3_DCTL_NYET_THRES(n) (((n) & 0xf) << 20)
#define DWC3_DCTL_KEEP_CONNECT BIT(19)
#define DWC3_DCTL_L1_HIBER_EN BIT(18)
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 6759a7efd8d5..068259fdfb0c 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -250,6 +250,9 @@ static inline void dwc3_decode_get_status(__u8 t, __u16 i, __u16 l, char *str,
size_t size)
{
switch (t & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ snprintf(str, size, "Get Device Status(Length = %d)", l);
+ break;
case USB_RECIP_INTERFACE:
snprintf(str, size, "Get Interface Status(Intf = %d, Length = %d)",
i, l);
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
new file mode 100644
index 000000000000..2aec31a2eacb
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -0,0 +1,604 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB Glue for Amlogic G12A SoCs
+ *
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+/*
+ * The USB is organized with a glue around the DWC3 Controller IP as :
+ * - Control registers for each USB2 Ports
+ * - Control registers for the USB PHY layer
+ * - SuperSpeed PHY can be enabled only if port is used
+ *
+ * TOFIX:
+ * - Add dynamic OTG switching with ID change interrupt
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
+#include <linux/regulator/consumer.h>
+
+/* USB2 Ports Control Registers */
+
+#define U2P_REG_SIZE 0x20
+
+#define U2P_R0 0x0
+ #define U2P_R0_HOST_DEVICE BIT(0)
+ #define U2P_R0_POWER_OK BIT(1)
+ #define U2P_R0_HAST_MODE BIT(2)
+ #define U2P_R0_POWER_ON_RESET BIT(3)
+ #define U2P_R0_ID_PULLUP BIT(4)
+ #define U2P_R0_DRV_VBUS BIT(5)
+
+#define U2P_R1 0x4
+ #define U2P_R1_PHY_READY BIT(0)
+ #define U2P_R1_ID_DIG BIT(1)
+ #define U2P_R1_OTG_SESSION_VALID BIT(2)
+ #define U2P_R1_VBUS_VALID BIT(3)
+
+/* USB Glue Control Registers */
+
+#define USB_R0 0x80
+ #define USB_R0_P30_LANE0_TX2RX_LOOPBACK BIT(17)
+ #define USB_R0_P30_LANE0_EXT_PCLK_REQ BIT(18)
+ #define USB_R0_P30_PCS_RX_LOS_MASK_VAL_MASK GENMASK(28, 19)
+ #define USB_R0_U2D_SS_SCALEDOWN_MODE_MASK GENMASK(30, 29)
+ #define USB_R0_U2D_ACT BIT(31)
+
+#define USB_R1 0x84
+ #define USB_R1_U3H_BIGENDIAN_GS BIT(0)
+ #define USB_R1_U3H_PME_ENABLE BIT(1)
+ #define USB_R1_U3H_HUB_PORT_OVERCURRENT_MASK GENMASK(4, 2)
+ #define USB_R1_U3H_HUB_PORT_PERM_ATTACH_MASK GENMASK(9, 7)
+ #define USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK GENMASK(13, 12)
+ #define USB_R1_U3H_HOST_U3_PORT_DISABLE BIT(16)
+ #define USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT BIT(17)
+ #define USB_R1_U3H_HOST_MSI_ENABLE BIT(18)
+ #define USB_R1_U3H_FLADJ_30MHZ_REG_MASK GENMASK(24, 19)
+ #define USB_R1_P30_PCS_TX_SWING_FULL_MASK GENMASK(31, 25)
+
+#define USB_R2 0x88
+ #define USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK GENMASK(25, 20)
+ #define USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK GENMASK(31, 26)
+
+#define USB_R3 0x8c
+ #define USB_R3_P30_SSC_ENABLE BIT(0)
+ #define USB_R3_P30_SSC_RANGE_MASK GENMASK(3, 1)
+ #define USB_R3_P30_SSC_REF_CLK_SEL_MASK GENMASK(12, 4)
+ #define USB_R3_P30_REF_SSP_EN BIT(13)
+
+#define USB_R4 0x90
+ #define USB_R4_P21_PORT_RESET_0 BIT(0)
+ #define USB_R4_P21_SLEEP_M0 BIT(1)
+ #define USB_R4_MEM_PD_MASK GENMASK(3, 2)
+ #define USB_R4_P21_ONLY BIT(4)
+
+#define USB_R5 0x94
+ #define USB_R5_ID_DIG_SYNC BIT(0)
+ #define USB_R5_ID_DIG_REG BIT(1)
+ #define USB_R5_ID_DIG_CFG_MASK GENMASK(3, 2)
+ #define USB_R5_ID_DIG_EN_0 BIT(4)
+ #define USB_R5_ID_DIG_EN_1 BIT(5)
+ #define USB_R5_ID_DIG_CURR BIT(6)
+ #define USB_R5_ID_DIG_IRQ BIT(7)
+ #define USB_R5_ID_DIG_TH_MASK GENMASK(15, 8)
+ #define USB_R5_ID_DIG_CNT_MASK GENMASK(23, 16)
+
+enum {
+ USB2_HOST_PHY = 0,
+ USB2_OTG_PHY,
+ USB3_HOST_PHY,
+ PHY_COUNT,
+};
+
+static const char *phy_names[PHY_COUNT] = {
+ "usb2-phy0", "usb2-phy1", "usb3-phy0",
+};
+
+struct dwc3_meson_g12a {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *clk;
+ struct reset_control *reset;
+ struct phy *phys[PHY_COUNT];
+ enum usb_dr_mode otg_mode;
+ enum phy_mode otg_phy_mode;
+ unsigned int usb2_ports;
+ unsigned int usb3_ports;
+ struct regulator *vbus;
+ struct usb_role_switch_desc switch_desc;
+ struct usb_role_switch *role_switch;
+};
+
+static void dwc3_meson_g12a_usb2_set_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode)
+{
+ if (mode == PHY_MODE_USB_HOST)
+ regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
+ U2P_R0_HOST_DEVICE,
+ U2P_R0_HOST_DEVICE);
+ else
+ regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
+ U2P_R0_HOST_DEVICE, 0);
+}
+
+static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv)
+{
+ int i;
+
+ if (priv->otg_mode == USB_DR_MODE_PERIPHERAL)
+ priv->otg_phy_mode = PHY_MODE_USB_DEVICE;
+ else
+ priv->otg_phy_mode = PHY_MODE_USB_HOST;
+
+ for (i = 0 ; i < USB3_HOST_PHY ; ++i) {
+ if (!priv->phys[i])
+ continue;
+
+ regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
+ U2P_R0_POWER_ON_RESET,
+ U2P_R0_POWER_ON_RESET);
+
+ if (i == USB2_OTG_PHY) {
+ regmap_update_bits(priv->regmap,
+ U2P_R0 + (U2P_REG_SIZE * i),
+ U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
+ U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS);
+
+ dwc3_meson_g12a_usb2_set_mode(priv, i,
+ priv->otg_phy_mode);
+ } else
+ dwc3_meson_g12a_usb2_set_mode(priv, i,
+ PHY_MODE_USB_HOST);
+
+ regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
+ U2P_R0_POWER_ON_RESET, 0);
+ }
+
+ return 0;
+}
+
+static void dwc3_meson_g12a_usb3_init(struct dwc3_meson_g12a *priv)
+{
+ regmap_update_bits(priv->regmap, USB_R3,
+ USB_R3_P30_SSC_RANGE_MASK |
+ USB_R3_P30_REF_SSP_EN,
+ USB_R3_P30_SSC_ENABLE |
+ FIELD_PREP(USB_R3_P30_SSC_RANGE_MASK, 2) |
+ USB_R3_P30_REF_SSP_EN);
+ udelay(2);
+
+ regmap_update_bits(priv->regmap, USB_R2,
+ USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK,
+ FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK, 0x15));
+
+ regmap_update_bits(priv->regmap, USB_R2,
+ USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK,
+ FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK, 0x20));
+
+ udelay(2);
+
+ regmap_update_bits(priv->regmap, USB_R1,
+ USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT,
+ USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT);
+
+ regmap_update_bits(priv->regmap, USB_R1,
+ USB_R1_P30_PCS_TX_SWING_FULL_MASK,
+ FIELD_PREP(USB_R1_P30_PCS_TX_SWING_FULL_MASK, 127));
+}
+
+static void dwc3_meson_g12a_usb_otg_apply_mode(struct dwc3_meson_g12a *priv)
+{
+ if (priv->otg_phy_mode == PHY_MODE_USB_DEVICE) {
+ regmap_update_bits(priv->regmap, USB_R0,
+ USB_R0_U2D_ACT, USB_R0_U2D_ACT);
+ regmap_update_bits(priv->regmap, USB_R0,
+ USB_R0_U2D_SS_SCALEDOWN_MODE_MASK, 0);
+ regmap_update_bits(priv->regmap, USB_R4,
+ USB_R4_P21_SLEEP_M0, USB_R4_P21_SLEEP_M0);
+ } else {
+ regmap_update_bits(priv->regmap, USB_R0,
+ USB_R0_U2D_ACT, 0);
+ regmap_update_bits(priv->regmap, USB_R4,
+ USB_R4_P21_SLEEP_M0, 0);
+ }
+}
+
+static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv)
+{
+ int ret;
+
+ ret = dwc3_meson_g12a_usb2_init(priv);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(priv->regmap, USB_R1,
+ USB_R1_U3H_FLADJ_30MHZ_REG_MASK,
+ FIELD_PREP(USB_R1_U3H_FLADJ_30MHZ_REG_MASK, 0x20));
+
+ regmap_update_bits(priv->regmap, USB_R5,
+ USB_R5_ID_DIG_EN_0,
+ USB_R5_ID_DIG_EN_0);
+ regmap_update_bits(priv->regmap, USB_R5,
+ USB_R5_ID_DIG_EN_1,
+ USB_R5_ID_DIG_EN_1);
+ regmap_update_bits(priv->regmap, USB_R5,
+ USB_R5_ID_DIG_TH_MASK,
+ FIELD_PREP(USB_R5_ID_DIG_TH_MASK, 0xff));
+
+ /* If we have an actual SuperSpeed port, initialize it */
+ if (priv->usb3_ports)
+ dwc3_meson_g12a_usb3_init(priv);
+
+ dwc3_meson_g12a_usb_otg_apply_mode(priv);
+
+ return 0;
+}
+
+static const struct regmap_config phy_meson_g12a_usb3_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = USB_R5,
+};
+
+static int dwc3_meson_g12a_get_phys(struct dwc3_meson_g12a *priv)
+{
+ int i;
+
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ priv->phys[i] = devm_phy_optional_get(priv->dev, phy_names[i]);
+ if (!priv->phys[i])
+ continue;
+
+ if (IS_ERR(priv->phys[i]))
+ return PTR_ERR(priv->phys[i]);
+
+ if (i == USB3_HOST_PHY)
+ priv->usb3_ports++;
+ else
+ priv->usb2_ports++;
+ }
+
+ dev_info(priv->dev, "USB2 ports: %d\n", priv->usb2_ports);
+ dev_info(priv->dev, "USB3 ports: %d\n", priv->usb3_ports);
+
+ return 0;
+}
+
+static enum phy_mode dwc3_meson_g12a_get_id(struct dwc3_meson_g12a *priv)
+{
+ u32 reg;
+
+ regmap_read(priv->regmap, USB_R5, &reg);
+
+ if (reg & (USB_R5_ID_DIG_SYNC | USB_R5_ID_DIG_REG))
+ return PHY_MODE_USB_DEVICE;
+
+ return PHY_MODE_USB_HOST;
+}
+
+static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
+{
+ int ret;
+
+ if (!priv->phys[USB2_OTG_PHY])
+ return -EINVAL;
+
+ if (mode == PHY_MODE_USB_HOST)
+ dev_info(priv->dev, "switching to Host Mode\n");
+ else
+ dev_info(priv->dev, "switching to Device Mode\n");
+
+ if (priv->vbus) {
+ if (mode == PHY_MODE_USB_DEVICE)
+ ret = regulator_disable(priv->vbus);
+ else
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ return ret;
+ }
+
+ priv->otg_phy_mode = mode;
+
+ dwc3_meson_g12a_usb2_set_mode(priv, USB2_OTG_PHY, mode);
+
+ dwc3_meson_g12a_usb_otg_apply_mode(priv);
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_role_set(struct device *dev, enum usb_role role)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+ enum phy_mode mode;
+
+ if (role == USB_ROLE_NONE)
+ return 0;
+
+ mode = (role == USB_ROLE_HOST) ? PHY_MODE_USB_HOST
+ : PHY_MODE_USB_DEVICE;
+
+ if (mode == priv->otg_phy_mode)
+ return 0;
+
+ return dwc3_meson_g12a_otg_mode_set(priv, mode);
+}
+
+static enum usb_role dwc3_meson_g12a_role_get(struct device *dev)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+
+ return priv->otg_phy_mode == PHY_MODE_USB_HOST ?
+ USB_ROLE_HOST : USB_ROLE_DEVICE;
+}
+
+static struct device *dwc3_meson_g12_find_child(struct device *dev,
+ const char *compatible)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_get_compatible_child(dev->of_node, compatible);
+ if (!np)
+ return NULL;
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return NULL;
+
+ return &pdev->dev;
+}
+
+static int dwc3_meson_g12a_probe(struct platform_device *pdev)
+{
+ struct dwc3_meson_g12a *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+ struct resource *res;
+ enum phy_mode otg_id;
+ int ret, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, base,
+ &phy_meson_g12a_usb3_regmap_conf);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(priv->vbus);
+ priv->vbus = NULL;
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ priv->clk);
+
+ platform_set_drvdata(pdev, priv);
+ priv->dev = dev;
+
+ priv->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ dev_err(dev, "failed to get device reset, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
+
+ ret = dwc3_meson_g12a_get_phys(priv);
+ if (ret)
+ return ret;
+
+ if (priv->vbus) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ return ret;
+ }
+
+ /* Get dr_mode */
+ priv->otg_mode = usb_get_dr_mode(dev);
+
+ dwc3_meson_g12a_usb_init(priv);
+
+ /* Init PHYs */
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ ret = phy_init(priv->phys[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Set PHY Power */
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ ret = phy_power_on(priv->phys[i]);
+ if (ret)
+ goto err_phys_exit;
+ }
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
+ goto err_phys_power;
+ }
+
+ /* Setup OTG mode corresponding to the ID pin */
+ if (priv->otg_mode == USB_DR_MODE_OTG) {
+ /* TOFIX Handle ID mode toggling via IRQ */
+ otg_id = dwc3_meson_g12a_get_id(priv);
+ if (otg_id != priv->otg_phy_mode) {
+ if (dwc3_meson_g12a_otg_mode_set(priv, otg_id))
+ dev_warn(dev, "Failed to switch OTG mode\n");
+ }
+ }
+
+ /* Setup role switcher */
+ priv->switch_desc.usb2_port = dwc3_meson_g12_find_child(dev,
+ "snps,dwc3");
+ priv->switch_desc.udc = dwc3_meson_g12_find_child(dev, "snps,dwc2");
+ priv->switch_desc.allow_userspace_control = true;
+ priv->switch_desc.set = dwc3_meson_g12a_role_set;
+ priv->switch_desc.get = dwc3_meson_g12a_role_get;
+
+ priv->role_switch = usb_role_switch_register(dev, &priv->switch_desc);
+ if (IS_ERR(priv->role_switch))
+ dev_warn(dev, "Unable to register Role Switch\n");
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ return 0;
+
+err_phys_power:
+ for (i = 0 ; i < PHY_COUNT ; ++i)
+ phy_power_off(priv->phys[i]);
+
+err_phys_exit:
+ for (i = 0 ; i < PHY_COUNT ; ++i)
+ phy_exit(priv->phys[i]);
+
+ return ret;
+}
+
+static int dwc3_meson_g12a_remove(struct platform_device *pdev)
+{
+ struct dwc3_meson_g12a *priv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int i;
+
+ usb_role_switch_unregister(priv->role_switch);
+
+ of_platform_depopulate(dev);
+
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ phy_power_off(priv->phys[i]);
+ phy_exit(priv->phys[i]);
+ }
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_meson_g12a_runtime_suspend(struct device *dev)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_meson_g12a_runtime_resume(struct device *dev)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+
+ return clk_enable(priv->clk);
+}
+
+static int __maybe_unused dwc3_meson_g12a_suspend(struct device *dev)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ phy_power_off(priv->phys[i]);
+ phy_exit(priv->phys[i]);
+ }
+
+ reset_control_assert(priv->reset);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+ int i, ret;
+
+ reset_control_deassert(priv->reset);
+
+ dwc3_meson_g12a_usb_init(priv);
+
+ /* Init PHYs */
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ ret = phy_init(priv->phys[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Set PHY Power */
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ ret = phy_power_on(priv->phys[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops dwc3_meson_g12a_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_meson_g12a_suspend, dwc3_meson_g12a_resume)
+ SET_RUNTIME_PM_OPS(dwc3_meson_g12a_runtime_suspend,
+ dwc3_meson_g12a_runtime_resume, NULL)
+};
+
+static const struct of_device_id dwc3_meson_g12a_match[] = {
+ { .compatible = "amlogic,meson-g12a-usb-ctrl" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dwc3_meson_g12a_match);
+
+static struct platform_driver dwc3_meson_g12a_driver = {
+ .probe = dwc3_meson_g12a_probe,
+ .remove = dwc3_meson_g12a_remove,
+ .driver = {
+ .name = "dwc3-meson-g12a",
+ .of_match_table = dwc3_meson_g12a_match,
+ .pm = &dwc3_meson_g12a_dev_pm_ops,
+ },
+};
+
+module_platform_driver(dwc3_meson_g12a_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Amlogic Meson G12A USB Glue Layer");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 67ce2037472d..bdac3e7d7b18 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -24,59 +24,13 @@
struct dwc3_of_simple {
struct device *dev;
- struct clk **clks;
+ struct clk_bulk_data *clks;
int num_clocks;
struct reset_control *resets;
bool pulse_resets;
bool need_reset;
};
-static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
-{
- struct device *dev = simple->dev;
- struct device_node *np = dev->of_node;
- int i;
-
- simple->num_clocks = count;
-
- if (!count)
- return 0;
-
- simple->clks = devm_kcalloc(dev, simple->num_clocks,
- sizeof(struct clk *), GFP_KERNEL);
- if (!simple->clks)
- return -ENOMEM;
-
- for (i = 0; i < simple->num_clocks; i++) {
- struct clk *clk;
- int ret;
-
- clk = of_clk_get(np, i);
- if (IS_ERR(clk)) {
- while (--i >= 0) {
- clk_disable_unprepare(simple->clks[i]);
- clk_put(simple->clks[i]);
- }
- return PTR_ERR(clk);
- }
-
- ret = clk_prepare_enable(clk);
- if (ret < 0) {
- while (--i >= 0) {
- clk_disable_unprepare(simple->clks[i]);
- clk_put(simple->clks[i]);
- }
- clk_put(clk);
-
- return ret;
- }
-
- simple->clks[i] = clk;
- }
-
- return 0;
-}
-
static int dwc3_of_simple_probe(struct platform_device *pdev)
{
struct dwc3_of_simple *simple;
@@ -84,7 +38,6 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- int i;
bool shared_resets = false;
simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL);
@@ -125,20 +78,18 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
goto err_resetc_put;
}
- ret = dwc3_of_simple_clk_init(simple, of_count_phandle_with_args(np,
- "clocks", "#clock-cells"));
+ ret = clk_bulk_get_all(simple->dev, &simple->clks);
+ if (ret < 0)
+ goto err_resetc_assert;
+
+ simple->num_clocks = ret;
+ ret = clk_bulk_prepare_enable(simple->num_clocks, simple->clks);
if (ret)
goto err_resetc_assert;
ret = of_platform_populate(np, NULL, NULL, dev);
- if (ret) {
- for (i = 0; i < simple->num_clocks; i++) {
- clk_disable_unprepare(simple->clks[i]);
- clk_put(simple->clks[i]);
- }
-
- goto err_resetc_assert;
- }
+ if (ret)
+ goto err_clk_put;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -146,6 +97,10 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
return 0;
+err_clk_put:
+ clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
+ clk_bulk_put_all(simple->num_clocks, simple->clks);
+
err_resetc_assert:
if (!simple->pulse_resets)
reset_control_assert(simple->resets);
@@ -159,14 +114,11 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
{
struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- int i;
of_platform_depopulate(dev);
- for (i = 0; i < simple->num_clocks; i++) {
- clk_disable_unprepare(simple->clks[i]);
- clk_put(simple->clks[i]);
- }
+ clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
+ clk_bulk_put_all(simple->num_clocks, simple->clks);
simple->num_clocks = 0;
if (!simple->pulse_resets)
@@ -184,10 +136,8 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
- int i;
- for (i = 0; i < simple->num_clocks; i++)
- clk_disable(simple->clks[i]);
+ clk_bulk_disable(simple->num_clocks, simple->clks);
return 0;
}
@@ -195,19 +145,8 @@ static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
- int ret;
- int i;
-
- for (i = 0; i < simple->num_clocks; i++) {
- ret = clk_enable(simple->clks[i]);
- if (ret < 0) {
- while (--i >= 0)
- clk_disable(simple->clks[i]);
- return ret;
- }
- }
- return 0;
+ return clk_bulk_enable(simple->num_clocks, simple->clks);
}
static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e293400cc6e9..d67655384eb2 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2863,7 +2863,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
"LPM Erratum not available on dwc3 revisions < 2.40a\n");
if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
- reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
+ reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
} else {
@@ -3301,6 +3301,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc->gadget.sg_supported = true;
dwc->gadget.name = "dwc3-gadget";
dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
+ dwc->gadget.lpm_capable = true;
/*
* FIXME We might be setting max_speed to <SUPER, however versions
@@ -3384,8 +3385,6 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
- synchronize_irq(dwc->irq_gadget);
-
return 0;
}
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index c9cfb100ecdc..cac991173ac0 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -533,8 +533,6 @@ static int xdbc_handle_external_reset(void)
xdbc_mem_init();
- mmiowb();
-
ret = xdbc_start();
if (ret < 0)
goto reset_out;
@@ -587,8 +585,6 @@ static int __init xdbc_early_setup(void)
xdbc_mem_init();
- mmiowb();
-
ret = xdbc_start();
if (ret < 0) {
writel(0, &xdbc.xdbc_reg->control);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 20413c276c61..47be961f1bf3 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1133,7 +1133,8 @@ error_lock:
error_mutex:
mutex_unlock(&epfile->mutex);
error:
- ffs_free_buffer(io_data);
+ if (ret != -EIOCBQUEUED) /* don't free if there is iocb queued */
+ ffs_free_buffer(io_data);
return ret;
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 5780fba620ab..2d6e76e4cffa 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -23,6 +23,7 @@
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_ncm.h"
+#include "configfs.h"
/*
* This function is a "CDC Network Control Model" (CDC NCM) Ethernet link.
@@ -35,9 +36,7 @@
/* to trigger crc/non-crc ndp signature */
-#define NCM_NDP_HDR_CRC_MASK 0x01000000
#define NCM_NDP_HDR_CRC 0x01000000
-#define NCM_NDP_HDR_NOCRC 0x00000000
enum ncm_notify_state {
NCM_NOTIFY_NONE, /* don't notify */
@@ -526,6 +525,7 @@ static inline void ncm_reset_values(struct f_ncm *ncm)
{
ncm->parser_opts = &ndp16_opts;
ncm->is_crc = false;
+ ncm->ndp_sign = ncm->parser_opts->ndp_sign;
ncm->port.cdc_filter = DEFAULT_FILTER;
/* doesn't make sense for ncm, fixed size used */
@@ -805,25 +805,20 @@ static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SET_CRC_MODE:
{
- int ndp_hdr_crc = 0;
-
if (w_length != 0 || w_index != ncm->ctrl_id)
goto invalid;
switch (w_value) {
case 0x0000:
ncm->is_crc = false;
- ndp_hdr_crc = NCM_NDP_HDR_NOCRC;
DBG(cdev, "non-CRC mode selected\n");
break;
case 0x0001:
ncm->is_crc = true;
- ndp_hdr_crc = NCM_NDP_HDR_CRC;
DBG(cdev, "CRC mode selected\n");
break;
default:
goto invalid;
}
- ncm->ndp_sign = ncm->parser_opts->ndp_sign | ndp_hdr_crc;
value = 0;
break;
}
@@ -840,6 +835,8 @@ invalid:
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
+ ncm->ndp_sign = ncm->parser_opts->ndp_sign |
+ (ncm->is_crc ? NCM_NDP_HDR_CRC : 0);
/* respond with data transfer or status phase? */
if (value >= 0) {
@@ -1395,6 +1392,16 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
return -EINVAL;
ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+
+ if (cdev->use_os_string) {
+ f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+ GFP_KERNEL);
+ if (!f->os_desc_table)
+ return -ENOMEM;
+ f->os_desc_n = 1;
+ f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ }
+
/*
* in drivers/usb/gadget/configfs.c:configfs_composite_bind()
* configurations are bound in sequence with list_for_each_entry,
@@ -1408,13 +1415,15 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
status = gether_register_netdev(ncm_opts->net);
mutex_unlock(&ncm_opts->lock);
if (status)
- return status;
+ goto fail;
ncm_opts->bound = true;
}
us = usb_gstrings_attach(cdev, ncm_strings,
ARRAY_SIZE(ncm_string_defs));
- if (IS_ERR(us))
- return PTR_ERR(us);
+ if (IS_ERR(us)) {
+ status = PTR_ERR(us);
+ goto fail;
+ }
ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
@@ -1431,6 +1440,10 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm_control_intf.bInterfaceNumber = status;
ncm_union_desc.bMasterInterface0 = status;
+ if (cdev->use_os_string)
+ f->os_desc_table[0].if_id =
+ ncm_iad_desc.bFirstInterface;
+
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
@@ -1510,6 +1523,9 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail:
+ kfree(f->os_desc_table);
+ f->os_desc_n = 0;
+
if (ncm->notify_req) {
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
@@ -1564,16 +1580,22 @@ static void ncm_free_inst(struct usb_function_instance *f)
gether_cleanup(netdev_priv(opts->net));
else
free_netdev(opts->net);
+ kfree(opts->ncm_interf_group);
kfree(opts);
}
static struct usb_function_instance *ncm_alloc_inst(void)
{
struct f_ncm_opts *opts;
+ struct usb_os_desc *descs[1];
+ char *names[1];
+ struct config_group *ncm_interf_group;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
+ opts->ncm_os_desc.ext_compat_id = opts->ncm_ext_compat_id;
+
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ncm_free_inst;
opts->net = gether_setup_default();
@@ -1582,8 +1604,20 @@ static struct usb_function_instance *ncm_alloc_inst(void)
kfree(opts);
return ERR_CAST(net);
}
+ INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
+
+ descs[0] = &opts->ncm_os_desc;
+ names[0] = "ncm";
config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
+ ncm_interf_group =
+ usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
+ names, THIS_MODULE);
+ if (IS_ERR(ncm_interf_group)) {
+ ncm_free_inst(&opts->func_inst);
+ return ERR_CAST(ncm_interf_group);
+ }
+ opts->ncm_interf_group = ncm_interf_group;
return &opts->func_inst;
}
@@ -1609,6 +1643,9 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
hrtimer_cancel(&ncm->task_timer);
+ kfree(f->os_desc_table);
+ f->os_desc_n = 0;
+
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c
index 24c086bcdeaa..6677ae932de0 100644
--- a/drivers/usb/gadget/function/f_uac1_legacy.c
+++ b/drivers/usb/gadget/function/f_uac1_legacy.c
@@ -54,8 +54,8 @@ static struct uac1_ac_header_descriptor_1 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_HEADER,
- .bcdADC = __constant_cpu_to_le16(0x0100),
- .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+ .bcdADC = cpu_to_le16(0x0100),
+ .wTotalLength = cpu_to_le16(UAC_DT_TOTAL_LENGTH),
.bInCollection = F_AUDIO_NUM_INTERFACES,
.baInterfaceNr = {
/* Interface number of the first AudioStream interface */
@@ -183,7 +183,7 @@ static struct uac_iso_endpoint_descriptor as_iso_out_desc = {
.bDescriptorSubtype = UAC_EP_GENERAL,
.bmAttributes = 1,
.bLockDelayUnits = 1,
- .wLockDelay = __constant_cpu_to_le16(1),
+ .wLockDelay = cpu_to_le16(1),
};
static struct usb_descriptor_header *f_audio_desc[] = {
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index d483e45c0f77..70da3201a1d0 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -20,6 +20,9 @@ struct f_ncm_opts {
struct net_device *net;
bool bound;
+ struct config_group *ncm_interf_group;
+ struct usb_os_desc ncm_os_desc;
+ char ncm_ext_compat_id[16];
/*
* Read/write access to configfs attributes is handled by configfs.
*
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 83340f4fdc6e..35941dc125f9 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -593,10 +593,6 @@ static int ast_vhub_epn_disable(struct usb_ep* u_ep)
static int ast_vhub_epn_enable(struct usb_ep* u_ep,
const struct usb_endpoint_descriptor *desc)
{
- static const char *ep_type_string[] __maybe_unused = { "ctrl",
- "isoc",
- "bulk",
- "intr" };
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub_dev *dev;
struct ast_vhub *vhub;
@@ -646,7 +642,7 @@ static int ast_vhub_epn_enable(struct usb_ep* u_ep,
ep->epn.wedged = false;
EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
- ep->epn.is_in ? "in" : "out", ep_type_string[type],
+ ep->epn.is_in ? "in" : "out", usb_ep_type_string(type),
usb_endpoint_num(desc), maxpacket);
/* Can we use DMA descriptor mode ? */
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 660712e0bf98..503d275bc4c4 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -358,8 +358,20 @@ static inline u32 usba_int_enb_get(struct usba_udc *udc)
return udc->int_enb_cache;
}
-static inline void usba_int_enb_set(struct usba_udc *udc, u32 val)
+static inline void usba_int_enb_set(struct usba_udc *udc, u32 mask)
{
+ u32 val;
+
+ val = udc->int_enb_cache | mask;
+ usba_writel(udc, INT_ENB, val);
+ udc->int_enb_cache = val;
+}
+
+static inline void usba_int_enb_clear(struct usba_udc *udc, u32 mask)
+{
+ u32 val;
+
+ val = udc->int_enb_cache & ~mask;
usba_writel(udc, INT_ENB, val);
udc->int_enb_cache = val;
}
@@ -629,14 +641,12 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
if (ep->can_dma) {
u32 ctrl;
- usba_int_enb_set(udc, usba_int_enb_get(udc) |
- USBA_BF(EPT_INT, 1 << ep->index) |
+ usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index) |
USBA_BF(DMA_INT, 1 << ep->index));
ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
usba_ep_writel(ep, CTL_ENB, ctrl);
} else {
- usba_int_enb_set(udc, usba_int_enb_get(udc) |
- USBA_BF(EPT_INT, 1 << ep->index));
+ usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index));
}
spin_unlock_irqrestore(&udc->lock, flags);
@@ -680,8 +690,7 @@ static int usba_ep_disable(struct usb_ep *_ep)
usba_dma_readl(ep, STATUS);
}
usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
- usba_int_enb_set(udc, usba_int_enb_get(udc) &
- ~USBA_BF(EPT_INT, 1 << ep->index));
+ usba_int_enb_clear(udc, USBA_BF(EPT_INT, 1 << ep->index));
request_complete_list(ep, &req_list, -ESHUTDOWN);
@@ -1694,6 +1703,9 @@ static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
}
}
+static int start_clock(struct usba_udc *udc);
+static void stop_clock(struct usba_udc *udc);
+
static irqreturn_t usba_udc_irq(int irq, void *devid)
{
struct usba_udc *udc = devid;
@@ -1708,10 +1720,13 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
DBG(DBG_INT, "irq, status=%#08x\n", status);
if (status & USBA_DET_SUSPEND) {
+ usba_writel(udc, INT_CLR, USBA_DET_SUSPEND|USBA_WAKE_UP);
+ usba_int_enb_set(udc, USBA_WAKE_UP);
+ usba_int_enb_clear(udc, USBA_DET_SUSPEND);
+ udc->suspended = true;
toggle_bias(udc, 0);
- usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
- usba_int_enb_set(udc, int_enb | USBA_WAKE_UP);
udc->bias_pulse_needed = true;
+ stop_clock(udc);
DBG(DBG_BUS, "Suspend detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->suspend) {
@@ -1722,14 +1737,17 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
}
if (status & USBA_WAKE_UP) {
+ start_clock(udc);
toggle_bias(udc, 1);
usba_writel(udc, INT_CLR, USBA_WAKE_UP);
- usba_int_enb_set(udc, int_enb & ~USBA_WAKE_UP);
DBG(DBG_BUS, "Wake Up CPU detected\n");
}
if (status & USBA_END_OF_RESUME) {
+ udc->suspended = false;
usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
+ usba_int_enb_clear(udc, USBA_WAKE_UP);
+ usba_int_enb_set(udc, USBA_DET_SUSPEND);
generate_bias_pulse(udc);
DBG(DBG_BUS, "Resume detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
@@ -1744,6 +1762,8 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
if (dma_status) {
int i;
+ usba_int_enb_set(udc, USBA_DET_SUSPEND);
+
for (i = 1; i <= USBA_NR_DMAS; i++)
if (dma_status & (1 << i))
usba_dma_irq(udc, &udc->usba_ep[i]);
@@ -1753,6 +1773,8 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
if (ep_status) {
int i;
+ usba_int_enb_set(udc, USBA_DET_SUSPEND);
+
for (i = 0; i < udc->num_ep; i++)
if (ep_status & (1 << i)) {
if (ep_is_control(&udc->usba_ep[i]))
@@ -1766,7 +1788,9 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
struct usba_ep *ep0, *ep;
int i, n;
- usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
+ usba_writel(udc, INT_CLR,
+ USBA_END_OF_RESET|USBA_END_OF_RESUME
+ |USBA_DET_SUSPEND|USBA_WAKE_UP);
generate_bias_pulse(udc);
reset_all_endpoints(udc);
@@ -1793,7 +1817,12 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
| USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
usba_ep_writel(ep0, CTL_ENB,
USBA_EPT_ENABLE | USBA_RX_SETUP);
- usba_int_enb_set(udc, int_enb | USBA_BF(EPT_INT, 1) |
+
+ /* If we get reset while suspended... */
+ udc->suspended = false;
+ usba_int_enb_clear(udc, USBA_WAKE_UP);
+
+ usba_int_enb_set(udc, USBA_BF(EPT_INT, 1) |
USBA_DET_SUSPEND | USBA_END_OF_RESUME);
/*
@@ -1827,6 +1856,8 @@ static int start_clock(struct usba_udc *udc)
if (udc->clocked)
return 0;
+ pm_stay_awake(&udc->pdev->dev);
+
ret = clk_prepare_enable(udc->pclk);
if (ret)
return ret;
@@ -1849,6 +1880,8 @@ static void stop_clock(struct usba_udc *udc)
clk_disable_unprepare(udc->pclk);
udc->clocked = false;
+
+ pm_relax(&udc->pdev->dev);
}
static int usba_start(struct usba_udc *udc)
@@ -1860,9 +1893,19 @@ static int usba_start(struct usba_udc *udc)
if (ret)
return ret;
+ if (udc->suspended)
+ return 0;
+
spin_lock_irqsave(&udc->lock, flags);
toggle_bias(udc, 1);
usba_writel(udc, CTRL, USBA_ENABLE_MASK);
+ /* Clear all requested and pending interrupts... */
+ usba_writel(udc, INT_ENB, 0);
+ udc->int_enb_cache = 0;
+ usba_writel(udc, INT_CLR,
+ USBA_END_OF_RESET|USBA_END_OF_RESUME
+ |USBA_DET_SUSPEND|USBA_WAKE_UP);
+ /* ...and enable just 'reset' IRQ to get us started */
usba_int_enb_set(udc, USBA_END_OF_RESET);
spin_unlock_irqrestore(&udc->lock, flags);
@@ -1873,6 +1916,9 @@ static void usba_stop(struct usba_udc *udc)
{
unsigned long flags;
+ if (udc->suspended)
+ return;
+
spin_lock_irqsave(&udc->lock, flags);
udc->gadget.speed = USB_SPEED_UNKNOWN;
reset_all_endpoints(udc);
@@ -1900,6 +1946,7 @@ static irqreturn_t usba_vbus_irq_thread(int irq, void *devid)
if (vbus) {
usba_start(udc);
} else {
+ udc->suspended = false;
usba_stop(udc);
if (udc->driver->disconnect)
@@ -1963,6 +2010,7 @@ static int atmel_usba_stop(struct usb_gadget *gadget)
if (fifo_mode == 0)
udc->configured_ep = 1;
+ udc->suspended = false;
usba_stop(udc);
udc->driver = NULL;
@@ -2276,6 +2324,7 @@ static int usba_udc_suspend(struct device *dev)
mutex_lock(&udc->vbus_mutex);
if (!device_may_wakeup(dev)) {
+ udc->suspended = false;
usba_stop(udc);
goto out;
}
@@ -2285,10 +2334,13 @@ static int usba_udc_suspend(struct device *dev)
* to request vbus irq, assuming always on.
*/
if (udc->vbus_pin) {
+ /* FIXME: right to stop here...??? */
usba_stop(udc);
enable_irq_wake(gpiod_to_irq(udc->vbus_pin));
}
+ enable_irq_wake(udc->irq);
+
out:
mutex_unlock(&udc->vbus_mutex);
return 0;
@@ -2302,8 +2354,12 @@ static int usba_udc_resume(struct device *dev)
if (!udc->driver)
return 0;
- if (device_may_wakeup(dev) && udc->vbus_pin)
- disable_irq_wake(gpiod_to_irq(udc->vbus_pin));
+ if (device_may_wakeup(dev)) {
+ if (udc->vbus_pin)
+ disable_irq_wake(gpiod_to_irq(udc->vbus_pin));
+
+ disable_irq_wake(udc->irq);
+ }
/* If Vbus is present, enable the controller and wait for reset */
mutex_lock(&udc->vbus_mutex);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 030bf797cd25..a0225e4543d4 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -331,6 +331,7 @@ struct usba_udc {
struct usba_ep *usba_ep;
bool bias_pulse_needed;
bool clocked;
+ bool suspended;
u16 devstatus;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index baf72f95f0f1..8414fac74493 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -617,21 +617,7 @@ static int dummy_enable(struct usb_ep *_ep,
_ep->name,
desc->bEndpointAddress & 0x0f,
(desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- ({ char *val;
- switch (usb_endpoint_type(desc)) {
- case USB_ENDPOINT_XFER_BULK:
- val = "bulk";
- break;
- case USB_ENDPOINT_XFER_ISOC:
- val = "iso";
- break;
- case USB_ENDPOINT_XFER_INT:
- val = "intr";
- break;
- default:
- val = "ctrl";
- break;
- } val; }),
+ usb_ep_type_string(usb_endpoint_type(desc)),
max, ep->stream_en ? "enabled" : "disabled");
/* at this point real hardware should be NAKing transfers
@@ -979,8 +965,18 @@ static int dummy_udc_start(struct usb_gadget *g,
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
- if (driver->max_speed == USB_SPEED_UNKNOWN)
+ switch (g->speed) {
+ /* All the speeds we support */
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ break;
+ default:
+ dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
+ driver->max_speed);
return -EINVAL;
+ }
/*
* SLAVE side init ... the layer above hardware, which
@@ -1784,9 +1780,10 @@ static void dummy_timer(struct timer_list *t)
/* Bus speed is 500000 bytes/ms, so use a little less */
total = 490000;
break;
- default:
+ default: /* Can't happen */
dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
- return;
+ total = 0;
+ break;
}
/* FIXME if HZ != 1000 this will probably misbehave ... */
@@ -1828,7 +1825,7 @@ restart:
/* Used up this frame's bandwidth? */
if (total <= 0)
- break;
+ continue;
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index b0781771704e..d8f1c60793ed 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -115,6 +115,11 @@ struct lpc32xx_ep {
bool wedge;
};
+enum atx_type {
+ ISP1301,
+ STOTG04,
+};
+
/*
* Common UDC structure
*/
@@ -129,8 +134,6 @@ struct lpc32xx_udc {
/* Board and device specific */
struct lpc32xx_usbd_cfg *board;
- u32 io_p_start;
- u32 io_p_size;
void __iomem *udp_baseaddr;
int udp_irq[4];
struct clk *usb_slv_clk;
@@ -151,10 +154,10 @@ struct lpc32xx_udc {
u8 last_vbus;
int pullup;
int poweron;
+ enum atx_type atx;
/* Work queues related to I2C support */
struct work_struct pullup_job;
- struct work_struct vbus_job;
struct work_struct power_job;
/* USB device peripheral - various */
@@ -553,6 +556,15 @@ static inline void remove_debug_file(struct lpc32xx_udc *udc) {}
/* Primary initialization sequence for the ISP1301 transceiver */
static void isp1301_udc_configure(struct lpc32xx_udc *udc)
{
+ u8 value;
+ s32 vendor, product;
+
+ vendor = i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00);
+ product = i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02);
+
+ if (vendor == 0x0483 && product == 0xa0c4)
+ udc->atx = STOTG04;
+
/* LPC32XX only supports DAT_SE0 USB mode */
/* This sequence is important */
@@ -572,8 +584,12 @@ static void isp1301_udc_configure(struct lpc32xx_udc *udc)
*/
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
(ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
+
+ value = MC2_BI_DI;
+ if (udc->atx != STOTG04)
+ value |= MC2_SPD_SUSP_CTRL;
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
- ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_SPD_SUSP_CTRL));
+ ISP1301_I2C_MODE_CONTROL_2, value);
/* Driver VBUS_DRV high or low depending on board setup */
if (udc->board->vbus_drv_pol != 0)
@@ -601,24 +617,19 @@ static void isp1301_udc_configure(struct lpc32xx_udc *udc)
(ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
OTG1_VBUS_DISCHRG);
- /* Clear and enable VBUS high edge interrupt */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
- ISP1301_I2C_INTERRUPT_FALLING, INT_VBUS_VLD);
- i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
- i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
- ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD);
- dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n",
- i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00));
- dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n",
- i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02));
+ dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n", vendor);
+ dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n", product);
dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14));
+
}
/* Enables or disables the USB device pullup via the ISP1301 transceiver */
@@ -661,6 +672,10 @@ static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup,
/* Powers up or down the ISP1301 transceiver */
static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
{
+ /* There is no "global power down" register for stotg04 */
+ if (udc->atx == STOTG04)
+ return;
+
if (enable != 0)
/* Power up ISP1301 - this ISP1301 will automatically wakeup
when VBUS is detected */
@@ -2830,11 +2845,9 @@ static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc)
* VBUS detection, pullup handler, and Gadget cable state notification
*
*/
-static void vbus_work(struct work_struct *work)
+static void vbus_work(struct lpc32xx_udc *udc)
{
u8 value;
- struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc,
- vbus_job);
if (udc->enabled != 0) {
/* Discharge VBUS real quick */
@@ -2870,18 +2883,13 @@ static void vbus_work(struct work_struct *work)
lpc32xx_vbus_session(&udc->gadget, udc->vbus);
}
}
-
- /* Re-enable after completion */
- enable_irq(udc->udp_irq[IRQ_USB_ATX]);
}
static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc)
{
struct lpc32xx_udc *udc = _udc;
- /* Defer handling of VBUS IRQ to work queue */
- disable_irq_nosync(udc->udp_irq[IRQ_USB_ATX]);
- schedule_work(&udc->vbus_job);
+ vbus_work(udc);
return IRQ_HANDLED;
}
@@ -2890,7 +2898,6 @@ static int lpc32xx_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct lpc32xx_udc *udc = to_udc(gadget);
- int i;
if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) {
dev_err(udc->dev, "bad parameter.\n");
@@ -2910,22 +2917,25 @@ static int lpc32xx_start(struct usb_gadget *gadget,
/* Force VBUS process once to check for cable insertion */
udc->last_vbus = udc->vbus = 0;
- schedule_work(&udc->vbus_job);
+ vbus_work(udc);
- /* Do not re-enable ATX IRQ (3) */
- for (i = IRQ_USB_LP; i < IRQ_USB_ATX; i++)
- enable_irq(udc->udp_irq[i]);
+ /* enable interrupts */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_FALLING, INT_SESS_VLD | INT_VBUS_VLD);
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_RISING, INT_SESS_VLD | INT_VBUS_VLD);
return 0;
}
static int lpc32xx_stop(struct usb_gadget *gadget)
{
- int i;
struct lpc32xx_udc *udc = to_udc(gadget);
- for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
- disable_irq(udc->udp_irq[i]);
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
if (udc->clocked) {
spin_lock(&udc->lock);
@@ -2999,7 +3009,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
dma_addr_t dma_handle;
struct device_node *isp1301_node;
- udc = kmemdup(&controller_template, sizeof(*udc), GFP_KERNEL);
+ udc = devm_kmemdup(dev, &controller_template, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
@@ -3022,8 +3032,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
if (!udc->isp1301_i2c_client) {
- retval = -EPROBE_DEFER;
- goto phy_fail;
+ return -EPROBE_DEFER;
}
dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n",
@@ -3032,7 +3041,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (retval)
- goto resource_fail;
+ return retval;
udc->board = &lpc32xx_usbddata;
@@ -3045,10 +3054,8 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
* IORESOURCE_IRQ, USB transceiver interrupt number
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- retval = -ENXIO;
- goto resource_fail;
- }
+ if (!res)
+ return -ENXIO;
spin_lock_init(&udc->lock);
@@ -3058,45 +3065,33 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
if (udc->udp_irq[i] < 0) {
dev_err(udc->dev,
"irq resource %d not available!\n", i);
- retval = udc->udp_irq[i];
- goto irq_fail;
+ return udc->udp_irq[i];
}
}
- udc->io_p_start = res->start;
- udc->io_p_size = resource_size(res);
- if (!request_mem_region(udc->io_p_start, udc->io_p_size, driver_name)) {
- dev_err(udc->dev, "someone's using UDC memory\n");
- retval = -EBUSY;
- goto request_mem_region_fail;
- }
-
- udc->udp_baseaddr = ioremap(udc->io_p_start, udc->io_p_size);
+ udc->udp_baseaddr = devm_ioremap_resource(dev, res);
if (!udc->udp_baseaddr) {
- retval = -ENOMEM;
dev_err(udc->dev, "IO map failure\n");
- goto io_map_fail;
+ return -ENOMEM;
}
/* Get USB device clock */
- udc->usb_slv_clk = clk_get(&pdev->dev, NULL);
+ udc->usb_slv_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(udc->usb_slv_clk)) {
dev_err(udc->dev, "failed to acquire USB device clock\n");
- retval = PTR_ERR(udc->usb_slv_clk);
- goto usb_clk_get_fail;
+ return PTR_ERR(udc->usb_slv_clk);
}
/* Enable USB device clock */
retval = clk_prepare_enable(udc->usb_slv_clk);
if (retval < 0) {
dev_err(udc->dev, "failed to start USB device clock\n");
- goto usb_clk_enable_fail;
+ return retval;
}
/* Setup deferred workqueue data */
udc->poweron = udc->pullup = 0;
INIT_WORK(&udc->pullup_job, pullup_work);
- INIT_WORK(&udc->vbus_job, vbus_work);
#ifdef CONFIG_PM
INIT_WORK(&udc->power_job, power_work);
#endif
@@ -3134,47 +3129,44 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
/* Request IRQs - low and high priority USB device IRQs are routed to
* the same handler, while the DMA interrupt is routed elsewhere */
- retval = request_irq(udc->udp_irq[IRQ_USB_LP], lpc32xx_usb_lp_irq,
- 0, "udc_lp", udc);
+ retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_LP],
+ lpc32xx_usb_lp_irq, 0, "udc_lp", udc);
if (retval < 0) {
dev_err(udc->dev, "LP request irq %d failed\n",
udc->udp_irq[IRQ_USB_LP]);
- goto irq_lp_fail;
+ goto irq_req_fail;
}
- retval = request_irq(udc->udp_irq[IRQ_USB_HP], lpc32xx_usb_hp_irq,
- 0, "udc_hp", udc);
+ retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_HP],
+ lpc32xx_usb_hp_irq, 0, "udc_hp", udc);
if (retval < 0) {
dev_err(udc->dev, "HP request irq %d failed\n",
udc->udp_irq[IRQ_USB_HP]);
- goto irq_hp_fail;
+ goto irq_req_fail;
}
- retval = request_irq(udc->udp_irq[IRQ_USB_DEVDMA],
- lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
+ retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_DEVDMA],
+ lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
if (retval < 0) {
dev_err(udc->dev, "DEV request irq %d failed\n",
udc->udp_irq[IRQ_USB_DEVDMA]);
- goto irq_dev_fail;
+ goto irq_req_fail;
}
/* The transceiver interrupt is used for VBUS detection and will
kick off the VBUS handler function */
- retval = request_irq(udc->udp_irq[IRQ_USB_ATX], lpc32xx_usb_vbus_irq,
- 0, "udc_otg", udc);
+ retval = devm_request_threaded_irq(dev, udc->udp_irq[IRQ_USB_ATX], NULL,
+ lpc32xx_usb_vbus_irq, IRQF_ONESHOT,
+ "udc_otg", udc);
if (retval < 0) {
dev_err(udc->dev, "VBUS request irq %d failed\n",
udc->udp_irq[IRQ_USB_ATX]);
- goto irq_xcvr_fail;
+ goto irq_req_fail;
}
/* Initialize wait queue */
init_waitqueue_head(&udc->ep_disable_wait_queue);
atomic_set(&udc->enabled_ep_cnt, 0);
- /* Keep all IRQs disabled until GadgetFS starts up */
- for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
- disable_irq(udc->udp_irq[i]);
-
retval = usb_add_gadget_udc(dev, &udc->gadget);
if (retval < 0)
goto add_gadget_fail;
@@ -3190,32 +3182,15 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
return 0;
add_gadget_fail:
- free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
-irq_xcvr_fail:
- free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
-irq_dev_fail:
- free_irq(udc->udp_irq[IRQ_USB_HP], udc);
-irq_hp_fail:
- free_irq(udc->udp_irq[IRQ_USB_LP], udc);
-irq_lp_fail:
+irq_req_fail:
dma_pool_destroy(udc->dd_cache);
dma_alloc_fail:
dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
udc->udca_v_base, udc->udca_p_base);
i2c_fail:
clk_disable_unprepare(udc->usb_slv_clk);
-usb_clk_enable_fail:
- clk_put(udc->usb_slv_clk);
-usb_clk_get_fail:
- iounmap(udc->udp_baseaddr);
-io_map_fail:
- release_mem_region(udc->io_p_start, udc->io_p_size);
dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
-request_mem_region_fail:
-irq_fail:
-resource_fail:
-phy_fail:
- kfree(udc);
+
return retval;
}
@@ -3231,24 +3206,14 @@ static int lpc32xx_udc_remove(struct platform_device *pdev)
udc_disable(udc);
pullup(udc, 0);
- free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
-
device_init_wakeup(&pdev->dev, 0);
remove_debug_file(udc);
dma_pool_destroy(udc->dd_cache);
dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
udc->udca_v_base, udc->udca_p_base);
- free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
- free_irq(udc->udp_irq[IRQ_USB_HP], udc);
- free_irq(udc->udp_irq[IRQ_USB_LP], udc);
clk_disable_unprepare(udc->usb_slv_clk);
- clk_put(udc->usb_slv_clk);
-
- iounmap(udc->udp_baseaddr);
- release_mem_region(udc->io_p_start, udc->io_p_size);
- kfree(udc);
return 0;
}
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index c2011cd7df8c..564aeee1a1fe 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -573,8 +573,7 @@ net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
/* completion */
if (unlikely(cleanup || is_short ||
- ((req->req.actual == req->req.length)
- && !req->req.zero))) {
+ req->req.actual == req->req.length)) {
if (cleanup) {
net2272_out_flush(ep);
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 898339e5df10..b6bbe2e448ba 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -789,8 +789,7 @@ static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
(void) readl(&ep->regs->ep_rsp);
}
- return is_short || ((req->req.actual == req->req.length) &&
- !req->req.zero);
+ return is_short || req->req.actual == req->req.length;
}
/* fill out dma descriptor to match a given request */
@@ -1058,7 +1057,7 @@ net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
/* PIO ... stuff the fifo, or unblock it. */
if (ep->is_in)
write_fifo(ep, _req);
- else if (list_empty(&ep->queue)) {
+ else {
u32 s;
/* OUT FIFO might have packet(s) buffered */
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index 3d12cdd5f999..3235d5307403 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -727,8 +727,7 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
}
ed->speed = (urb->dev->speed == USB_SPEED_LOW) ?
FHCI_LOW_SPEED : FHCI_FULL_SPEED;
- ed->max_pkt_size = usb_maxpacket(urb->dev,
- urb->pipe, usb_pipeout(urb->pipe));
+ ed->max_pkt_size = usb_endpoint_maxp(&urb->ep->desc);
urb->ep->hcpriv = ed;
fhci_dbg(fhci, "new ep speed=%d max_pkt_size=%d\n",
ed->speed, ed->max_pkt_size);
@@ -768,8 +767,7 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
if (urb->transfer_flags & URB_ZERO_PACKET &&
urb->transfer_buffer_length > 0 &&
((urb->transfer_buffer_length %
- usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe))) == 0))
+ usb_endpoint_maxp(&urb->ep->desc)) == 0))
urb_state = US_BULK0;
while (data_len > 4096) {
td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
@@ -807,8 +805,8 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
break;
case FHCI_TF_CTRL:
ed->dev_addr = usb_pipedevice(urb->pipe);
- ed->max_pkt_size = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ ed->max_pkt_size = usb_endpoint_maxp(&urb->ep->desc);
+
/* setup stage */
td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++, FHCI_TA_SETUP,
USB_TD_TOGGLE_DATA0, urb->setup_packet, 8, 0, 0, true);
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index ca8a94f15ac0..38183ac438c6 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -40,8 +40,6 @@ struct da8xx_ohci_hcd {
struct phy *usb11_phy;
struct regulator *vbus_reg;
struct notifier_block nb;
- unsigned int reg_enabled;
- struct gpio_desc *vbus_gpio;
struct gpio_desc *oc_gpio;
};
@@ -92,29 +90,21 @@ static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
struct device *dev = hcd->self.controller;
int ret;
- if (da8xx_ohci->vbus_gpio) {
- gpiod_set_value_cansleep(da8xx_ohci->vbus_gpio, on);
- return 0;
- }
-
if (!da8xx_ohci->vbus_reg)
return 0;
- if (on && !da8xx_ohci->reg_enabled) {
+ if (on) {
ret = regulator_enable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to enable regulator: %d\n", ret);
return ret;
}
- da8xx_ohci->reg_enabled = 1;
-
- } else if (!on && da8xx_ohci->reg_enabled) {
+ } else {
ret = regulator_disable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to disable regulator: %d\n", ret);
return ret;
}
- da8xx_ohci->reg_enabled = 0;
}
return 0;
@@ -124,9 +114,6 @@ static int ohci_da8xx_get_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- if (da8xx_ohci->vbus_gpio)
- return gpiod_get_value_cansleep(da8xx_ohci->vbus_gpio);
-
if (da8xx_ohci->vbus_reg)
return regulator_is_enabled(da8xx_ohci->vbus_reg);
@@ -159,9 +146,6 @@ static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- if (da8xx_ohci->vbus_gpio)
- return 1;
-
if (da8xx_ohci->vbus_reg)
return 1;
@@ -206,12 +190,18 @@ static int ohci_da8xx_regulator_event(struct notifier_block *nb,
return 0;
}
-static irqreturn_t ohci_da8xx_oc_handler(int irq, void *data)
+static irqreturn_t ohci_da8xx_oc_thread(int irq, void *data)
{
struct da8xx_ohci_hcd *da8xx_ohci = data;
+ struct device *dev = da8xx_ohci->hcd->self.controller;
+ int ret;
- if (gpiod_get_value(da8xx_ohci->oc_gpio))
- gpiod_set_value(da8xx_ohci->vbus_gpio, 0);
+ if (gpiod_get_value_cansleep(da8xx_ohci->oc_gpio) &&
+ da8xx_ohci->vbus_reg) {
+ ret = regulator_disable(da8xx_ohci->vbus_reg);
+ if (ret)
+ dev_err(dev, "Failed to disable regulator: %d\n", ret);
+ }
return IRQ_HANDLED;
}
@@ -424,11 +414,6 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
}
}
- da8xx_ohci->vbus_gpio = devm_gpiod_get_optional(dev, "vbus",
- GPIOD_OUT_HIGH);
- if (IS_ERR(da8xx_ohci->vbus_gpio))
- goto err;
-
da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
if (IS_ERR(da8xx_ohci->oc_gpio))
goto err;
@@ -438,8 +423,9 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
if (oc_irq < 0)
goto err;
- error = devm_request_irq(dev, oc_irq, ohci_da8xx_oc_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ error = devm_request_threaded_irq(dev, oc_irq, NULL,
+ ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"OHCI over-current indicator", da8xx_ohci);
if (error)
goto err;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 6343fbacd244..4a5c9b599c57 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3203,6 +3203,8 @@ static int __init u132_hcd_init(void)
return -ENODEV;
printk(KERN_INFO "driver %s\n", hcd_name);
workqueue = create_singlethread_workqueue("u132");
+ if (!workqueue)
+ return -ENOMEM;
retval = platform_driver_register(&u132_platform_driver);
if (retval)
destroy_workqueue(workqueue);
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index d932cc31711e..52e32644a4b2 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -421,8 +421,6 @@ static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
string_length = xhci_dbc_populate_strings(dbc->string);
xhci_dbc_init_contexts(xhci, string_length);
- mmiowb();
-
xhci_dbc_eps_init(xhci);
dbc->state = DS_INITIALIZED;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 96a740543183..3abe70ff1b1e 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -487,8 +487,8 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
/* Write 1 to disable the port */
writel(port_status | PORT_PE, addr);
port_status = readl(addr);
- xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
- wIndex, port_status);
+ xhci_dbg(xhci, "disable port %d-%d, portsc: 0x%x\n",
+ hcd->self.busnum, wIndex + 1, port_status);
}
static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
@@ -537,8 +537,9 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
/* Change bits are all write 1 to clear */
writel(port_status | status, addr);
port_status = readl(addr);
- xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
- port_change_bit, wIndex, port_status);
+
+ xhci_dbg(xhci, "clear port%d %s change, portsc: 0x%x\n",
+ wIndex + 1, port_change_bit, port_status);
}
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
@@ -565,13 +566,16 @@ static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
rhub = xhci_get_rhub(hcd);
port = rhub->ports[index];
temp = readl(port->addr);
+
+ xhci_dbg(xhci, "set port power %d-%d %s, portsc: 0x%x\n",
+ hcd->self.busnum, index + 1, on ? "ON" : "OFF", temp);
+
temp = xhci_port_state_to_neutral(temp);
+
if (on) {
/* Power on */
writel(temp | PORT_POWER, port->addr);
- temp = readl(port->addr);
- xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n",
- index, temp);
+ readl(port->addr);
} else {
/* Power off */
writel(temp & ~PORT_POWER, port->addr);
@@ -666,12 +670,17 @@ void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
u32 link_state)
{
u32 temp;
+ u32 portsc;
- temp = readl(port->addr);
- temp = xhci_port_state_to_neutral(temp);
+ portsc = readl(port->addr);
+ temp = xhci_port_state_to_neutral(portsc);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | link_state;
writel(temp, port->addr);
+
+ xhci_dbg(xhci, "Set port %d-%d link state, portsc: 0x%x, write 0x%x",
+ port->rhub->hcd->self.busnum, port->hcd_portnum + 1,
+ portsc, temp);
}
static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
@@ -840,7 +849,9 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
} else if (time_after_eq(jiffies, bus_state->resume_done[wIndex])) {
int time_left;
- xhci_dbg(xhci, "Resume USB2 port %d\n", wIndex + 1);
+ xhci_dbg(xhci, "resume USB2 port %d-%d\n",
+ hcd->self.busnum, wIndex + 1);
+
bus_state->resume_done[wIndex] = 0;
clear_bit(wIndex, &bus_state->resuming_ports);
@@ -867,9 +878,8 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
} else {
int port_status = readl(port->addr);
- xhci_warn(xhci, "Port resume %i msec timed out, portsc = 0x%x\n",
- XHCI_MAX_REXIT_TIMEOUT_MS,
- port_status);
+ xhci_warn(xhci, "Port resume timed out, port %d-%d: 0x%x\n",
+ hcd->self.busnum, wIndex + 1, port_status);
*status |= USB_PORT_STAT_SUSPEND;
clear_bit(wIndex, &bus_state->rexit_ports);
}
@@ -1124,9 +1134,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (status == 0xffffffff)
goto error;
- xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n",
- wIndex, temp);
- xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+ xhci_dbg(xhci, "Get port status %d-%d read: 0x%x, return 0x%x",
+ hcd->self.busnum, wIndex + 1, temp, status);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
/* if USB 3.1 extended port status return additional 4 bytes */
@@ -1182,7 +1191,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = readl(ports[wIndex]->addr);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
- xhci_warn(xhci, "USB core suspending device not in U0/U1/U2.\n");
+ xhci_warn(xhci, "USB core suspending port %d-%d not in U0/U1/U2\n",
+ hcd->self.busnum, wIndex + 1);
goto error;
}
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 60987c787e44..026fe18972d3 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -206,19 +206,6 @@ static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
return xhci_mtk_host_enable(mtk);
}
-/* ignore the error if the clock does not exist */
-static struct clk *optional_clk_get(struct device *dev, const char *id)
-{
- struct clk *opt_clk;
-
- opt_clk = devm_clk_get(dev, id);
- /* ignore error number except EPROBE_DEFER */
- if (IS_ERR(opt_clk) && (PTR_ERR(opt_clk) != -EPROBE_DEFER))
- opt_clk = NULL;
-
- return opt_clk;
-}
-
static int xhci_mtk_clks_get(struct xhci_hcd_mtk *mtk)
{
struct device *dev = mtk->dev;
@@ -229,15 +216,15 @@ static int xhci_mtk_clks_get(struct xhci_hcd_mtk *mtk)
return PTR_ERR(mtk->sys_clk);
}
- mtk->ref_clk = optional_clk_get(dev, "ref_ck");
+ mtk->ref_clk = devm_clk_get_optional(dev, "ref_ck");
if (IS_ERR(mtk->ref_clk))
return PTR_ERR(mtk->ref_clk);
- mtk->mcu_clk = optional_clk_get(dev, "mcu_ck");
+ mtk->mcu_clk = devm_clk_get_optional(dev, "mcu_ck");
if (IS_ERR(mtk->mcu_clk))
return PTR_ERR(mtk->mcu_clk);
- mtk->dma_clk = optional_clk_get(dev, "dma_ck");
+ mtk->dma_clk = devm_clk_get_optional(dev, "dma_ck");
return PTR_ERR_OR_ZERO(mtk->dma_clk);
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 0ac4ec975547..998241f5fce3 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -165,8 +165,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd;
- struct clk *clk;
- struct clk *reg_clk;
int ret;
int irq;
@@ -235,31 +233,32 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
+ xhci = hcd_to_xhci(hcd);
+
/*
* Not all platforms have clks so it is not an error if the
* clock do not exist.
*/
- reg_clk = devm_clk_get(&pdev->dev, "reg");
- if (!IS_ERR(reg_clk)) {
- ret = clk_prepare_enable(reg_clk);
- if (ret)
- goto put_hcd;
- } else if (PTR_ERR(reg_clk) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ xhci->reg_clk = devm_clk_get_optional(&pdev->dev, "reg");
+ if (IS_ERR(xhci->reg_clk)) {
+ ret = PTR_ERR(xhci->reg_clk);
goto put_hcd;
}
- clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(clk)) {
- ret = clk_prepare_enable(clk);
- if (ret)
- goto disable_reg_clk;
- } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ ret = clk_prepare_enable(xhci->reg_clk);
+ if (ret)
+ goto put_hcd;
+
+ xhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(xhci->clk)) {
+ ret = PTR_ERR(xhci->clk);
goto disable_reg_clk;
}
- xhci = hcd_to_xhci(hcd);
+ ret = clk_prepare_enable(xhci->clk);
+ if (ret)
+ goto disable_reg_clk;
+
priv_match = of_device_get_match_data(&pdev->dev);
if (priv_match) {
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
@@ -271,8 +270,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
device_wakeup_enable(hcd->self.controller);
- xhci->clk = clk;
- xhci->reg_clk = reg_clk;
xhci->main_hcd = hcd;
xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
dev_name(&pdev->dev), hcd);
@@ -348,10 +345,10 @@ put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
disable_clk:
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(xhci->clk);
disable_reg_clk:
- clk_disable_unprepare(reg_clk);
+ clk_disable_unprepare(xhci->reg_clk);
put_hcd:
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9215a28dad40..fed3385aeac0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1569,18 +1569,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
"WARN: xHC returned failed port status event\n");
port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
- xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
-
max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
if ((port_id <= 0) || (port_id > max_ports)) {
- xhci_warn(xhci, "Invalid port id %d\n", port_id);
+ xhci_warn(xhci, "Port change event with invalid port ID %d\n",
+ port_id);
inc_deq(xhci, xhci->event_ring);
return;
}
port = &xhci->hw_ports[port_id - 1];
if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
- xhci_warn(xhci, "Event for invalid port %u\n", port_id);
+ xhci_warn(xhci, "Port change event, no port for port ID %u\n",
+ port_id);
bogus_port_status = true;
goto cleanup;
}
@@ -1597,6 +1598,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
hcd_portnum = port->hcd_portnum;
portsc = readl(port->addr);
+ xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
+ hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
+
trace_xhci_handle_port_status(hcd_portnum, portsc);
if (hcd->state == HC_STATE_SUSPENDED) {
@@ -3275,6 +3279,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_IOC;
more_trbs_coming = false;
td->last_trb = ring->enqueue;
+
+ if (xhci_urb_suitable_for_idt(urb)) {
+ memcpy(&send_addr, urb->transfer_buffer,
+ trb_buff_len);
+ field |= TRB_IDT;
+ }
}
/* Only set interrupt on short packet for IN endpoints */
@@ -3414,6 +3424,12 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->transfer_buffer_length > 0) {
u32 length_field, remainder;
+ if (xhci_urb_suitable_for_idt(urb)) {
+ memcpy(&urb->transfer_dma, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ field |= TRB_IDT;
+ }
+
remainder = xhci_td_remainder(xhci, 0,
urb->transfer_buffer_length,
urb->transfer_buffer_length,
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index efb0cad8710e..294158113d62 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -161,6 +161,7 @@ struct tegra_xusb_soc {
} ports;
bool scale_ss_clock;
+ bool has_ipfs;
};
struct tegra_xusb {
@@ -637,16 +638,18 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void tegra_xusb_ipfs_config(struct tegra_xusb *tegra,
- struct resource *regs)
+static void tegra_xusb_config(struct tegra_xusb *tegra,
+ struct resource *regs)
{
u32 value;
- value = ipfs_readl(tegra, IPFS_XUSB_HOST_CONFIGURATION_0);
- value |= IPFS_EN_FPCI;
- ipfs_writel(tegra, value, IPFS_XUSB_HOST_CONFIGURATION_0);
+ if (tegra->soc->has_ipfs) {
+ value = ipfs_readl(tegra, IPFS_XUSB_HOST_CONFIGURATION_0);
+ value |= IPFS_EN_FPCI;
+ ipfs_writel(tegra, value, IPFS_XUSB_HOST_CONFIGURATION_0);
- usleep_range(10, 20);
+ usleep_range(10, 20);
+ }
/* Program BAR0 space */
value = fpci_readl(tegra, XUSB_CFG_4);
@@ -661,13 +664,15 @@ static void tegra_xusb_ipfs_config(struct tegra_xusb *tegra,
value |= XUSB_IO_SPACE_EN | XUSB_MEM_SPACE_EN | XUSB_BUS_MASTER_EN;
fpci_writel(tegra, value, XUSB_CFG_1);
- /* Enable interrupt assertion */
- value = ipfs_readl(tegra, IPFS_XUSB_HOST_INTR_MASK_0);
- value |= IPFS_IP_INT_MASK;
- ipfs_writel(tegra, value, IPFS_XUSB_HOST_INTR_MASK_0);
+ if (tegra->soc->has_ipfs) {
+ /* Enable interrupt assertion */
+ value = ipfs_readl(tegra, IPFS_XUSB_HOST_INTR_MASK_0);
+ value |= IPFS_IP_INT_MASK;
+ ipfs_writel(tegra, value, IPFS_XUSB_HOST_INTR_MASK_0);
- /* Set hysteresis */
- ipfs_writel(tegra, 0x80, IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0);
+ /* Set hysteresis */
+ ipfs_writel(tegra, 0x80, IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0);
+ }
}
static int tegra_xusb_clk_enable(struct tegra_xusb *tegra)
@@ -1015,10 +1020,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (IS_ERR(tegra->fpci_base))
return PTR_ERR(tegra->fpci_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- tegra->ipfs_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tegra->ipfs_base))
- return PTR_ERR(tegra->ipfs_base);
+ if (tegra->soc->has_ipfs) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ tegra->ipfs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->ipfs_base))
+ return PTR_ERR(tegra->ipfs_base);
+ }
tegra->xhci_irq = platform_get_irq(pdev, 0);
if (tegra->xhci_irq < 0)
@@ -1208,7 +1215,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto disable_rpm;
}
- tegra_xusb_ipfs_config(tegra, regs);
+ tegra_xusb_config(tegra, regs);
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
@@ -1380,6 +1387,7 @@ static const struct tegra_xusb_soc tegra124_soc = {
.usb3 = { .offset = 0, .count = 2, },
},
.scale_ss_clock = true,
+ .has_ipfs = true,
};
MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
@@ -1411,12 +1419,38 @@ static const struct tegra_xusb_soc tegra210_soc = {
.usb3 = { .offset = 0, .count = 4, },
},
.scale_ss_clock = false,
+ .has_ipfs = true,
};
MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
+static const char * const tegra186_supply_names[] = {
+};
+
+static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
+ { .name = "usb3", .num = 3, },
+ { .name = "usb2", .num = 3, },
+ { .name = "hsic", .num = 1, },
+};
+
+static const struct tegra_xusb_soc tegra186_soc = {
+ .firmware = "nvidia/tegra186/xusb.bin",
+ .supply_names = tegra186_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra186_supply_names),
+ .phy_types = tegra186_phy_types,
+ .num_types = ARRAY_SIZE(tegra186_phy_types),
+ .ports = {
+ .usb3 = { .offset = 0, .count = 3, },
+ .usb2 = { .offset = 3, .count = 3, },
+ .hsic = { .offset = 6, .count = 1, },
+ },
+ .scale_ss_clock = false,
+ .has_ipfs = false,
+};
+
static const struct of_device_id tegra_xusb_of_match[] = {
{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
+ { .compatible = "nvidia,tegra186-xusb", .data = &tegra186_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 88b427434bd8..052a269d86f2 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -366,6 +366,11 @@ DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep,
TP_ARGS(ctx)
);
+DEFINE_EVENT(xhci_log_ep_ctx, xhci_add_endpoint,
+ TP_PROTO(struct xhci_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
TP_PROTO(struct xhci_slot_ctx *ctx),
TP_ARGS(ctx),
@@ -432,6 +437,31 @@ DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint,
TP_ARGS(ctx)
);
+DECLARE_EVENT_CLASS(xhci_log_ctrl_ctx,
+ TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
+ TP_ARGS(ctrl_ctx),
+ TP_STRUCT__entry(
+ __field(u32, drop)
+ __field(u32, add)
+ ),
+ TP_fast_assign(
+ __entry->drop = le32_to_cpu(ctrl_ctx->drop_flags);
+ __entry->add = le32_to_cpu(ctrl_ctx->add_flags);
+ ),
+ TP_printk("%s", xhci_decode_ctrl_ctx(__entry->drop, __entry->add)
+ )
+);
+
+DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_address_ctrl_ctx,
+ TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
+ TP_ARGS(ctrl_ctx)
+);
+
+DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_configure_endpoint_ctrl_ctx,
+ TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
+ TP_ARGS(ctrl_ctx)
+);
+
DECLARE_EVENT_CLASS(xhci_log_ring,
TP_PROTO(struct xhci_ring *ring),
TP_ARGS(ring),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7fa58c99f126..a9bb796794e3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -893,7 +893,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
struct xhci_port **ports;
int port_index;
unsigned long flags;
- u32 t1, t2;
+ u32 t1, t2, portsc;
spin_lock_irqsave(&xhci->lock, flags);
@@ -902,10 +902,15 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
ports = xhci->usb3_rhub.ports;
while (port_index--) {
t1 = readl(ports[port_index]->addr);
+ portsc = t1;
t1 = xhci_port_state_to_neutral(t1);
t2 = t1 & ~PORT_WAKE_BITS;
- if (t1 != t2)
+ if (t1 != t2) {
writel(t2, ports[port_index]->addr);
+ xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
+ xhci->usb3_rhub.hcd->self.busnum,
+ port_index + 1, portsc, t2);
+ }
}
/* disable usb2 ports Wake bits */
@@ -913,12 +918,16 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
ports = xhci->usb2_rhub.ports;
while (port_index--) {
t1 = readl(ports[port_index]->addr);
+ portsc = t1;
t1 = xhci_port_state_to_neutral(t1);
t2 = t1 & ~PORT_WAKE_BITS;
- if (t1 != t2)
+ if (t1 != t2) {
writel(t2, ports[port_index]->addr);
+ xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
+ xhci->usb2_rhub.hcd->self.busnum,
+ port_index + 1, portsc, t2);
+ }
}
-
spin_unlock_irqrestore(&xhci->lock, flags);
}
@@ -1238,6 +1247,21 @@ EXPORT_SYMBOL_GPL(xhci_resume);
/*-------------------------------------------------------------------------*/
+/*
+ * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
+ * we'll copy the actual data into the TRB address register. This is limited to
+ * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
+ * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
+ */
+static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ if (xhci_urb_suitable_for_idt(urb))
+ return 0;
+
+ return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+}
+
/**
* xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
* HCDs. Find the index for an endpoint given its descriptor. Use the return
@@ -1783,6 +1807,7 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_container_ctx *in_ctx;
unsigned int ep_index;
struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_ep_ctx *ep_ctx;
u32 added_ctxs;
u32 new_add_flags, new_drop_flags;
struct xhci_virt_device *virt_dev;
@@ -1873,6 +1898,9 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
/* Store the usb_device pointer for later use */
ep->hcpriv = udev;
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ trace_xhci_add_endpoint(ep_ctx);
+
xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
@@ -2747,6 +2775,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
}
slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
+
+ trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
trace_xhci_configure_endpoint(slot_ctx);
if (!ctx_change)
@@ -4012,6 +4042,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
le32_to_cpu(slot_ctx->dev_info) >> 27);
+ trace_xhci_address_ctrl_ctx(ctrl_ctx);
spin_lock_irqsave(&xhci->lock, flags);
trace_xhci_setup_device(virt_dev);
ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
@@ -5154,6 +5185,7 @@ static const struct hc_driver xhci_hc_driver = {
/*
* managing i/o requests and associated device resources
*/
+ .map_urb_for_dma = xhci_map_urb_for_dma,
.urb_enqueue = xhci_urb_enqueue,
.urb_dequeue = xhci_urb_dequeue,
.alloc_dev = xhci_alloc_dev,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 9334cdee382a..a450a99e90eb 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1303,6 +1303,8 @@ enum xhci_setup_dev {
#define TRB_IOC (1<<5)
/* The buffer pointer contains immediate data */
#define TRB_IDT (1<<6)
+/* TDs smaller than this might use IDT */
+#define TRB_IDT_MAX_SIZE 8
/* Block Event Interrupt */
#define TRB_BEI (1<<9)
@@ -2149,6 +2151,21 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
urb->stream_id);
}
+/*
+ * TODO: As per spec Isochronous IDT transmissions are supported. We bypass
+ * them anyways as we where unable to find a device that matches the
+ * constraints.
+ */
+static inline bool xhci_urb_suitable_for_idt(struct urb *urb)
+{
+ if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) &&
+ usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE &&
+ urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE)
+ return true;
+
+ return false;
+}
+
static inline char *xhci_slot_state_string(u32 state)
{
switch (state) {
@@ -2384,6 +2401,35 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
return str;
}
+static inline const char *xhci_decode_ctrl_ctx(unsigned long drop,
+ unsigned long add)
+{
+ static char str[1024];
+ unsigned int bit;
+ int ret = 0;
+
+ if (drop) {
+ ret = sprintf(str, "Drop:");
+ for_each_set_bit(bit, &drop, 32)
+ ret += sprintf(str + ret, " %d%s",
+ bit / 2,
+ bit % 2 ? "in":"out");
+ ret += sprintf(str + ret, ", ");
+ }
+
+ if (add) {
+ ret += sprintf(str + ret, "Add:%s%s",
+ (add & SLOT_FLAG) ? " slot":"",
+ (add & EP0_FLAG) ? " ep0":"");
+ add &= ~(SLOT_FLAG | EP0_FLAG);
+ for_each_set_bit(bit, &add, 32)
+ ret += sprintf(str + ret, " %d%s",
+ bit / 2,
+ bit % 2 ? "in":"out");
+ }
+ return str;
+}
+
static inline const char *xhci_decode_slot_context(u32 info, u32 info2,
u32 tt_info, u32 state)
{
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 8142c6b4c4cf..320fc4739835 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -788,11 +788,11 @@ static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
mem_reads8(hcd->regs, qtd->payload_addr,
qtd->data_buffer,
qtd->actual_length);
- /* Fall through (?) */
+ /* Fall through */
case OUT_PID:
qtd->urb->actual_length +=
qtd->actual_length;
- /* Fall through ... */
+ /* Fall through */
case SETUP_PID:
break;
}
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index be04c117fe80..c97f270338bf 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -142,7 +142,6 @@ config USB_FTDI_ELAN
config USB_APPLEDISPLAY
tristate "Apple Cinema Display support"
- select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to control the backlight of Apple Cinema
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 006762b72ff5..6581774bdfa4 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -307,7 +307,7 @@ static int ld_usb_open(struct inode *inode, struct file *file)
int retval;
struct usb_interface *interface;
- nonseekable_open(inode, file);
+ stream_open(inode, file);
subminor = iminor(inode);
interface = usb_find_interface(&ld_usb_driver, subminor);
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 04684849d683..4d6ae3795a88 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/nls.h>
@@ -222,11 +223,51 @@ static const struct usb251xb_data usb2517i_data = {
.product_str = "USB2517i",
};
+#ifdef CONFIG_GPIOLIB
+static int usb251xb_check_dev_children(struct device *dev, void *child)
+{
+ if (dev->type == &i2c_adapter_type) {
+ return device_for_each_child(dev, child,
+ usb251xb_check_dev_children);
+ }
+
+ return (dev == child);
+}
+
+static int usb251x_check_gpio_chip(struct usb251xb *hub)
+{
+ struct gpio_chip *gc = gpiod_to_chip(hub->gpio_reset);
+ struct i2c_adapter *adap = hub->i2c->adapter;
+ int ret;
+
+ if (!hub->gpio_reset)
+ return 0;
+
+ if (!gc)
+ return -EINVAL;
+
+ ret = usb251xb_check_dev_children(&adap->dev, gc->parent);
+ if (ret) {
+ dev_err(hub->dev, "Reset GPIO chip is at the same i2c-bus\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#else
+static int usb251x_check_gpio_chip(struct usb251xb *hub)
+{
+ return 0;
+}
+#endif
+
static void usb251xb_reset(struct usb251xb *hub, int state)
{
if (!hub->gpio_reset)
return;
+ i2c_lock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
+
gpiod_set_value_cansleep(hub->gpio_reset, state);
/* wait for hub recovery/stabilization */
@@ -234,6 +275,8 @@ static void usb251xb_reset(struct usb251xb *hub, int state)
usleep_range(500, 750); /* >=500us at power on */
else
usleep_range(1, 10); /* >=1us at power down */
+
+ i2c_unlock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
}
static int usb251xb_connect(struct usb251xb *hub)
@@ -331,18 +374,31 @@ out_err:
}
#ifdef CONFIG_OF
+static void usb251xb_get_ports_field(struct usb251xb *hub,
+ const char *prop_name, u8 port_cnt, u8 *fld)
+{
+ struct device *dev = hub->dev;
+ struct property *prop;
+ const __be32 *p;
+ u32 port;
+
+ of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) {
+ if ((port >= 1) && (port <= port_cnt))
+ *fld |= BIT(port);
+ else
+ dev_warn(dev, "port %u doesn't exist\n", port);
+ }
+}
+
static int usb251xb_get_ofdata(struct usb251xb *hub,
struct usb251xb_data *data)
{
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
- int len, err, i;
- u32 port, property_u32 = 0;
- const u32 *cproperty_u32;
+ int len, err;
+ u32 property_u32 = 0;
const char *cproperty_char;
char str[USB251XB_STRING_BUFSIZE / 2];
- struct property *prop;
- const __be32 *p;
if (!np) {
dev_err(dev, "failed to get ofdata\n");
@@ -444,46 +500,16 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
hub->conf_data3 |= BIT(0);
hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES;
- cproperty_u32 = of_get_property(np, "non-removable-ports", &len);
- if (cproperty_u32 && (len / sizeof(u32)) > 0) {
- for (i = 0; i < len / sizeof(u32); i++) {
- u32 port = be32_to_cpu(cproperty_u32[i]);
-
- if ((port >= 1) && (port <= data->port_cnt))
- hub->non_rem_dev |= BIT(port);
- else
- dev_warn(dev, "NRD port %u doesn't exist\n",
- port);
- }
- }
+ usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt,
+ &hub->non_rem_dev);
hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF;
- cproperty_u32 = of_get_property(np, "sp-disabled-ports", &len);
- if (cproperty_u32 && (len / sizeof(u32)) > 0) {
- for (i = 0; i < len / sizeof(u32); i++) {
- u32 port = be32_to_cpu(cproperty_u32[i]);
-
- if ((port >= 1) && (port <= data->port_cnt))
- hub->port_disable_sp |= BIT(port);
- else
- dev_warn(dev, "PDS port %u doesn't exist\n",
- port);
- }
- }
+ usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt,
+ &hub->port_disable_sp);
hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS;
- cproperty_u32 = of_get_property(np, "bp-disabled-ports", &len);
- if (cproperty_u32 && (len / sizeof(u32)) > 0) {
- for (i = 0; i < len / sizeof(u32); i++) {
- u32 port = be32_to_cpu(cproperty_u32[i]);
-
- if ((port >= 1) && (port <= data->port_cnt))
- hub->port_disable_bp |= BIT(port);
- else
- dev_warn(dev, "PDB port %u doesn't exist\n",
- port);
- }
- }
+ usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt,
+ &hub->port_disable_bp);
hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
if (!of_property_read_u32(np, "sp-max-total-current-microamp",
@@ -546,10 +572,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
* register controls the USB DP/DM signal swapping for each port.
*/
hub->port_swap = USB251XB_DEF_PORT_SWAP;
- of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
- if (port <= data->port_cnt)
- hub->port_swap |= BIT(port);
- }
+ usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt,
+ &hub->port_swap);
+ if (of_get_property(np, "swap-us-lanes", NULL))
+ hub->port_swap |= BIT(0);
/* The following parameters are currently not exposed to devicetree, but
* may be as soon as needed.
@@ -621,6 +647,25 @@ static int usb251xb_probe(struct usb251xb *hub)
}
}
+ /*
+ * usb251x SMBus-slave SCL lane is muxed with CFG_SEL0 pin. So if anyone
+ * tries to work with the bus at the moment the hub reset is released,
+ * it may cause an invalid config being latched by usb251x. Particularly
+ * one of the config modes makes the hub loading a default registers
+ * value without SMBus-slave interface activation. If the hub
+ * accidentally gets this mode, this will cause the driver SMBus-
+ * functions failure. Normally we could just lock the SMBus-segment the
+ * hub i2c-interface resides for the device-specific reset timing. But
+ * the GPIO controller, which is used to handle the hub reset, might be
+ * placed at the same i2c-bus segment. In this case an error should be
+ * returned since we can't safely use the GPIO controller to clear the
+ * reset state (it may affect the hub configuration) and we can't lock
+ * the i2c-bus segment (it will cause a deadlock).
+ */
+ err = usb251x_check_gpio_chip(hub);
+ if (err)
+ return err;
+
err = usb251xb_connect(hub);
if (err) {
dev_err(dev, "Failed to connect hub (%d)\n", err);
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index d5141aa79dd4..72f39a9751b5 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -172,7 +172,6 @@ static int usb3503_probe(struct usb3503 *hub)
hub->gpio_reset = pdata->gpio_reset;
hub->mode = pdata->initial_mode;
} else if (np) {
- struct clk *clk;
u32 rate = 0;
hub->port_off_mask = 0;
@@ -198,34 +197,29 @@ static int usb3503_probe(struct usb3503 *hub)
}
}
- clk = devm_clk_get(dev, "refclk");
- if (IS_ERR(clk) && PTR_ERR(clk) != -ENOENT) {
+ hub->clk = devm_clk_get_optional(dev, "refclk");
+ if (IS_ERR(hub->clk)) {
dev_err(dev, "unable to request refclk (%ld)\n",
- PTR_ERR(clk));
- return PTR_ERR(clk);
+ PTR_ERR(hub->clk));
+ return PTR_ERR(hub->clk);
}
- if (!IS_ERR(clk)) {
- hub->clk = clk;
-
- if (rate != 0) {
- err = clk_set_rate(hub->clk, rate);
- if (err) {
- dev_err(dev,
- "unable to set reference clock rate to %d\n",
- (int) rate);
- return err;
- }
- }
-
- err = clk_prepare_enable(hub->clk);
+ if (rate != 0) {
+ err = clk_set_rate(hub->clk, rate);
if (err) {
dev_err(dev,
- "unable to enable reference clock\n");
+ "unable to set reference clock rate to %d\n",
+ (int)rate);
return err;
}
}
+ err = clk_prepare_enable(hub->clk);
+ if (err) {
+ dev_err(dev, "unable to enable reference clock\n");
+ return err;
+ }
+
property = of_get_property(np, "disabled-ports", &len);
if (property && (len / sizeof(u32)) > 0) {
int i;
@@ -324,8 +318,7 @@ static int usb3503_i2c_remove(struct i2c_client *i2c)
struct usb3503 *hub;
hub = i2c_get_clientdata(i2c);
- if (hub->clk)
- clk_disable_unprepare(hub->clk);
+ clk_disable_unprepare(hub->clk);
return 0;
}
@@ -348,8 +341,7 @@ static int usb3503_platform_remove(struct platform_device *pdev)
struct usb3503 *hub;
hub = platform_get_drvdata(pdev);
- if (hub->clk)
- clk_disable_unprepare(hub->clk);
+ clk_disable_unprepare(hub->clk);
return 0;
}
@@ -358,18 +350,14 @@ static int usb3503_platform_remove(struct platform_device *pdev)
static int usb3503_suspend(struct usb3503 *hub)
{
usb3503_switch_mode(hub, USB3503_MODE_STANDBY);
-
- if (hub->clk)
- clk_disable_unprepare(hub->clk);
+ clk_disable_unprepare(hub->clk);
return 0;
}
static int usb3503_resume(struct usb3503 *hub)
{
- if (hub->clk)
- clk_prepare_enable(hub->clk);
-
+ clk_prepare_enable(hub->clk);
usb3503_switch_mode(hub, hub->mode);
return 0;
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 6d9fd5f64903..7b306aa22d25 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -314,6 +314,7 @@ static void yurex_disconnect(struct usb_interface *interface)
usb_deregister_dev(interface, &yurex_class);
/* prevent more I/O from starting */
+ usb_poison_urb(dev->urb);
mutex_lock(&dev->io_mutex);
dev->interface = NULL;
mutex_unlock(&dev->io_mutex);
diff --git a/drivers/usb/mtu3/Makefile b/drivers/usb/mtu3/Makefile
index 4a9715812bf9..3bf8cbcc1add 100644
--- a/drivers/usb/mtu3/Makefile
+++ b/drivers/usb/mtu3/Makefile
@@ -2,10 +2,17 @@
ccflags-$(CONFIG_USB_MTU3_DEBUG) += -DDEBUG
+# define_trace.h needs to know how to find our header
+CFLAGS_mtu3_trace.o := -I$(src)
+
obj-$(CONFIG_USB_MTU3) += mtu3.o
mtu3-y := mtu3_plat.o
+ifneq ($(CONFIG_TRACING),)
+ mtu3-y += mtu3_trace.o
+endif
+
ifneq ($(filter y,$(CONFIG_USB_MTU3_HOST) $(CONFIG_USB_MTU3_DUAL_ROLE)),)
mtu3-y += mtu3_host.o
endif
@@ -17,3 +24,7 @@ endif
ifneq ($(CONFIG_USB_MTU3_DUAL_ROLE),)
mtu3-y += mtu3_dr.o
endif
+
+ifneq ($(CONFIG_DEBUG_FS),)
+ mtu3-y += mtu3_debugfs.o
+endif
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index 87823ac0d120..76ecf12fdf62 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -63,6 +63,15 @@ struct mtu3_request;
#define MTU3_U2_IP_SLOT_DEFAULT 1
/**
+ * IP TRUNK version
+ * from 0x1003 version, USB3 Gen2 is supported, two changes affect driver:
+ * 1. MAXPKT and MULTI bits layout of TXCSR1 and RXCSR1 are adjusted,
+ * but not backward compatible
+ * 2. QMU extend buffer length supported
+ */
+#define MTU3_TRUNK_VERS_1003 0x1003
+
+/**
* Normally the device works on HS or SS, to simplify fifo management,
* devide fifo into some 512B parts, use bitmap to manage it; And
* 128 bits size of bitmap is large enough, that means it can manage
@@ -135,45 +144,33 @@ struct mtu3_fifo_info {
* The format of TX GPD is a little different from RX one.
* And the size of GPD is 16 bytes.
*
- * @flag:
+ * @dw0_info:
* bit0: Hardware Own (HWO)
* bit1: Buffer Descriptor Present (BDP), always 0, BD is not supported
* bit2: Bypass (BPS), 1: HW skips this GPD if HWO = 1
+ * bit6: [EL] Zero Length Packet (ZLP), moved from @dw3_info[29]
* bit7: Interrupt On Completion (IOC)
- * @chksum: This is used to validate the contents of this GPD;
- * If TXQ_CS_EN / RXQ_CS_EN bit is set, an interrupt is issued
- * when checksum validation fails;
- * Checksum value is calculated over the 16 bytes of the GPD by default;
- * @data_buf_len (RX ONLY): This value indicates the length of
- * the assigned data buffer
- * @tx_ext_addr (TX ONLY): [3:0] are 4 extension bits of @buffer,
- * [7:4] are 4 extension bits of @next_gpd
+ * bit[31:16]: ([EL] bit[31:12]) allow data buffer length (RX ONLY),
+ * the buffer length of the data to receive
+ * bit[23:16]: ([EL] bit[31:24]) extension address (TX ONLY),
+ * lower 4 bits are extension bits of @buffer,
+ * upper 4 bits are extension bits of @next_gpd
* @next_gpd: Physical address of the next GPD
* @buffer: Physical address of the data buffer
- * @buf_len:
- * (TX): This value indicates the length of the assigned data buffer
- * (RX): The total length of data received
- * @ext_len: reserved
- * @rx_ext_addr(RX ONLY): [3:0] are 4 extension bits of @buffer,
- * [7:4] are 4 extension bits of @next_gpd
- * @ext_flag:
- * bit5 (TX ONLY): Zero Length Packet (ZLP),
+ * @dw3_info:
+ * bit[15:0]: ([EL] bit[19:0]) data buffer length,
+ * (TX): the buffer length of the data to transmit
+ * (RX): The total length of data received
+ * bit[23:16]: ([EL] bit[31:24]) extension address (RX ONLY),
+ * lower 4 bits are extension bits of @buffer,
+ * upper 4 bits are extension bits of @next_gpd
+ * bit29: ([EL] abandoned) Zero Length Packet (ZLP) (TX ONLY)
*/
struct qmu_gpd {
- __u8 flag;
- __u8 chksum;
- union {
- __le16 data_buf_len;
- __le16 tx_ext_addr;
- };
+ __le32 dw0_info;
__le32 next_gpd;
__le32 buffer;
- __le16 buf_len;
- union {
- __u8 ext_len;
- __u8 rx_ext_addr;
- };
- __u8 ext_flag;
+ __le32 dw3_info;
} __packed;
/**
@@ -316,6 +313,7 @@ static inline struct ssusb_mtk *dev_to_ssusb(struct device *dev)
* @may_wakeup: means device's remote wakeup is enabled
* @is_self_powered: is reported in device status and the config descriptor
* @delayed_status: true when function drivers ask for delayed status
+ * @gen2cp: compatible with USB3 Gen2 IP
* @ep0_req: dummy request used while handling standard USB requests
* for GET_STATUS and SET_SEL
* @setup_buf: ep0 response buffer for GET_STATUS and SET_SEL requests
@@ -356,6 +354,7 @@ struct mtu3 {
unsigned u2_enable:1;
unsigned is_u3_ip:1;
unsigned delayed_status:1;
+ unsigned gen2cp:1;
u8 address;
u8 test_mode_nr;
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index 4fee200795a5..f8bd1d57e795 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -16,6 +16,8 @@
#include <linux/platform_device.h>
#include "mtu3.h"
+#include "mtu3_debug.h"
+#include "mtu3_trace.h"
static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size)
{
@@ -299,6 +301,7 @@ int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
int interval, int burst, int mult)
{
void __iomem *mbase = mtu->mac_base;
+ bool gen2cp = mtu->gen2cp;
int epnum = mep->epnum;
u32 csr0, csr1, csr2;
int fifo_sgsz, fifo_addr;
@@ -319,7 +322,7 @@ int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
num_pkts = (burst + 1) * (mult + 1) - 1;
csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot);
- csr1 |= TX_MAX_PKT(num_pkts) | TX_MULT(mult);
+ csr1 |= TX_MAX_PKT(gen2cp, num_pkts) | TX_MULT(gen2cp, mult);
csr2 = TX_FIFOADDR(fifo_addr >> 4);
csr2 |= TX_FIFOSEGSIZE(fifo_sgsz);
@@ -355,7 +358,7 @@ int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
num_pkts = (burst + 1) * (mult + 1) - 1;
csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot);
- csr1 |= RX_MAX_PKT(num_pkts) | RX_MULT(mult);
+ csr1 |= RX_MAX_PKT(gen2cp, num_pkts) | RX_MULT(gen2cp, mult);
csr2 = RX_FIFOADDR(fifo_addr >> 4);
csr2 |= RX_FIFOSEGSIZE(fifo_sgsz);
@@ -600,6 +603,10 @@ static void mtu3_regs_init(struct mtu3 *mtu)
mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON);
/* enable automatical HWRW from L1 */
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, LPM_HRWE);
+
+ /* use new QMU format when HW version >= 0x1003 */
+ if (mtu->gen2cp)
+ mtu3_writel(mbase, U3D_QFCR, ~0x0);
}
static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
@@ -650,6 +657,8 @@ static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
break;
}
dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed));
+ mtu3_dbg_trace(mtu->dev, "link speed %s",
+ usb_speed_string(udev_speed));
mtu->g.speed = udev_speed;
mtu->g.ep0->maxpacket = maxpkt;
@@ -672,6 +681,7 @@ static irqreturn_t mtu3_u3_ltssm_isr(struct mtu3 *mtu)
ltssm &= mtu3_readl(mbase, U3D_LTSSM_INTR_ENABLE);
mtu3_writel(mbase, U3D_LTSSM_INTR, ltssm); /* W1C */
dev_dbg(mtu->dev, "=== LTSSM[%x] ===\n", ltssm);
+ trace_mtu3_u3_ltssm_isr(ltssm);
if (ltssm & (HOT_RST_INTR | WARM_RST_INTR))
mtu3_gadget_reset(mtu);
@@ -702,6 +712,7 @@ static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu)
u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE);
mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */
dev_dbg(mtu->dev, "=== U2COMM[%x] ===\n", u2comm);
+ trace_mtu3_u2_common_isr(u2comm);
if (u2comm & SUSPEND_INTR)
mtu3_gadget_suspend(mtu);
@@ -749,13 +760,15 @@ static irqreturn_t mtu3_irq(int irq, void *data)
static int mtu3_hw_init(struct mtu3 *mtu)
{
- u32 cap_dev;
+ u32 value;
int ret;
- mtu->hw_version = mtu3_readl(mtu->ippc_base, U3D_SSUSB_HW_ID);
+ value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_TRUNK_VERS);
+ mtu->hw_version = IP_TRUNK_VERS(value);
+ mtu->gen2cp = !!(mtu->hw_version >= MTU3_TRUNK_VERS_1003);
- cap_dev = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
- mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(cap_dev);
+ value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
+ mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(value);
dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version,
mtu->is_u3_ip ? "U3" : "U2");
@@ -893,6 +906,8 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
mtu3_stop(mtu);
+ ssusb_dev_debugfs_init(ssusb);
+
dev_dbg(dev, " %s() done...\n", __func__);
return 0;
diff --git a/drivers/usb/mtu3/mtu3_debug.h b/drivers/usb/mtu3/mtu3_debug.h
new file mode 100644
index 000000000000..e96a69234d05
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_debug.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mtu3_debug.h - debug header
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#ifndef __MTU3_DEBUG_H__
+#define __MTU3_DEBUG_H__
+
+#include <linux/debugfs.h>
+
+#define MTU3_DEBUGFS_NAME_LEN 32
+
+struct mtu3_regset {
+ char name[MTU3_DEBUGFS_NAME_LEN];
+ struct debugfs_regset32 regset;
+ size_t nregs;
+};
+
+struct mtu3_file_map {
+ const char *name;
+ int (*show)(struct seq_file *s, void *unused);
+};
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void ssusb_dev_debugfs_init(struct ssusb_mtk *ssusb);
+void ssusb_dr_debugfs_init(struct ssusb_mtk *ssusb);
+void ssusb_debugfs_create_root(struct ssusb_mtk *ssusb);
+void ssusb_debugfs_remove_root(struct ssusb_mtk *ssusb);
+
+#else
+static inline void ssusb_dev_debugfs_init(struct ssusb_mtk *ssusb) {}
+static inline void ssusb_dr_debugfs_init(struct ssusb_mtk *ssusb) {}
+static inline void ssusb_debugfs_create_root(struct ssusb_mtk *ssusb) {}
+static inline void ssusb_debugfs_remove_root(struct ssusb_mtk *ssusb) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+#if IS_ENABLED(CONFIG_TRACING)
+void mtu3_dbg_trace(struct device *dev, const char *fmt, ...);
+
+#else
+static inline void mtu3_dbg_trace(struct device *dev, const char *fmt, ...) {}
+
+#endif /* CONFIG_TRACING */
+
+#endif /* __MTU3_DEBUG_H__ */
diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
new file mode 100644
index 000000000000..62c57ddc554e
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_debugfs.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mtu3_debugfs.c - debugfs interface
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#include <linux/uaccess.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+#include "mtu3_debug.h"
+
+#define dump_register(nm) \
+{ \
+ .name = __stringify(nm), \
+ .offset = U3D_ ##nm, \
+}
+
+#define dump_prb_reg(nm, os) \
+{ \
+ .name = nm, \
+ .offset = os, \
+}
+
+static const struct debugfs_reg32 mtu3_ippc_regs[] = {
+ dump_register(SSUSB_IP_PW_CTRL0),
+ dump_register(SSUSB_IP_PW_CTRL1),
+ dump_register(SSUSB_IP_PW_CTRL2),
+ dump_register(SSUSB_IP_PW_CTRL3),
+ dump_register(SSUSB_OTG_STS),
+ dump_register(SSUSB_IP_XHCI_CAP),
+ dump_register(SSUSB_IP_DEV_CAP),
+ dump_register(SSUSB_U3_CTRL_0P),
+ dump_register(SSUSB_U2_CTRL_0P),
+ dump_register(SSUSB_HW_ID),
+ dump_register(SSUSB_HW_SUB_ID),
+ dump_register(SSUSB_IP_SPARE0),
+};
+
+static const struct debugfs_reg32 mtu3_dev_regs[] = {
+ dump_register(LV1ISR),
+ dump_register(LV1IER),
+ dump_register(EPISR),
+ dump_register(EPIER),
+ dump_register(EP0CSR),
+ dump_register(RXCOUNT0),
+ dump_register(QISAR0),
+ dump_register(QIER0),
+ dump_register(QISAR1),
+ dump_register(QIER1),
+ dump_register(CAP_EPNTXFFSZ),
+ dump_register(CAP_EPNRXFFSZ),
+ dump_register(CAP_EPINFO),
+ dump_register(MISC_CTRL),
+};
+
+static const struct debugfs_reg32 mtu3_csr_regs[] = {
+ dump_register(DEVICE_CONF),
+ dump_register(DEV_LINK_INTR_ENABLE),
+ dump_register(DEV_LINK_INTR),
+ dump_register(LTSSM_CTRL),
+ dump_register(USB3_CONFIG),
+ dump_register(LINK_STATE_MACHINE),
+ dump_register(LTSSM_INTR_ENABLE),
+ dump_register(LTSSM_INTR),
+ dump_register(U3U2_SWITCH_CTRL),
+ dump_register(POWER_MANAGEMENT),
+ dump_register(DEVICE_CONTROL),
+ dump_register(COMMON_USB_INTR_ENABLE),
+ dump_register(COMMON_USB_INTR),
+ dump_register(USB20_MISC_CONTROL),
+ dump_register(USB20_OPSTATE),
+};
+
+static int mtu3_link_state_show(struct seq_file *sf, void *unused)
+{
+ struct mtu3 *mtu = sf->private;
+ void __iomem *mbase = mtu->mac_base;
+
+ seq_printf(sf, "opstate: %#x, ltssm: %#x\n",
+ mtu3_readl(mbase, U3D_USB20_OPSTATE),
+ LTSSM_STATE(mtu3_readl(mbase, U3D_LINK_STATE_MACHINE)));
+
+ return 0;
+}
+
+static int mtu3_ep_used_show(struct seq_file *sf, void *unused)
+{
+ struct mtu3 *mtu = sf->private;
+ struct mtu3_ep *mep;
+ unsigned long flags;
+ int used = 0;
+ int i;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ for (i = 0; i < mtu->num_eps; i++) {
+ mep = mtu->in_eps + i;
+ if (mep->flags & MTU3_EP_ENABLED) {
+ seq_printf(sf, "%s - type: %d\n", mep->name, mep->type);
+ used++;
+ }
+
+ mep = mtu->out_eps + i;
+ if (mep->flags & MTU3_EP_ENABLED) {
+ seq_printf(sf, "%s - type: %d\n", mep->name, mep->type);
+ used++;
+ }
+ }
+ seq_printf(sf, "total used: %d eps\n", used);
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mtu3_link_state);
+DEFINE_SHOW_ATTRIBUTE(mtu3_ep_used);
+
+static void mtu3_debugfs_regset(struct mtu3 *mtu, void __iomem *base,
+ const struct debugfs_reg32 *regs, size_t nregs,
+ const char *name, struct dentry *parent)
+{
+ struct debugfs_regset32 *regset;
+ struct mtu3_regset *mregs;
+
+ mregs = devm_kzalloc(mtu->dev, sizeof(*regset), GFP_KERNEL);
+ if (!mregs)
+ return;
+
+ sprintf(mregs->name, "%s", name);
+ regset = &mregs->regset;
+ regset->regs = regs;
+ regset->nregs = nregs;
+ regset->base = base;
+
+ debugfs_create_regset32(mregs->name, 0444, parent, regset);
+}
+
+static void mtu3_debugfs_ep_regset(struct mtu3 *mtu, struct mtu3_ep *mep,
+ struct dentry *parent)
+{
+ struct debugfs_reg32 *regs;
+ int epnum = mep->epnum;
+ int in = mep->is_in;
+
+ regs = devm_kcalloc(mtu->dev, 7, sizeof(*regs), GFP_KERNEL);
+ if (!regs)
+ return;
+
+ regs[0].name = in ? "TCR0" : "RCR0";
+ regs[0].offset = in ? MU3D_EP_TXCR0(epnum) : MU3D_EP_RXCR0(epnum);
+ regs[1].name = in ? "TCR1" : "RCR1";
+ regs[1].offset = in ? MU3D_EP_TXCR1(epnum) : MU3D_EP_RXCR1(epnum);
+ regs[2].name = in ? "TCR2" : "RCR2";
+ regs[2].offset = in ? MU3D_EP_TXCR2(epnum) : MU3D_EP_RXCR2(epnum);
+ regs[3].name = in ? "TQHIAR" : "RQHIAR";
+ regs[3].offset = in ? USB_QMU_TQHIAR(epnum) : USB_QMU_RQHIAR(epnum);
+ regs[4].name = in ? "TQCSR" : "RQCSR";
+ regs[4].offset = in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+ regs[5].name = in ? "TQSAR" : "RQSAR";
+ regs[5].offset = in ? USB_QMU_TQSAR(epnum) : USB_QMU_RQSAR(epnum);
+ regs[6].name = in ? "TQCPR" : "RQCPR";
+ regs[6].offset = in ? USB_QMU_TQCPR(epnum) : USB_QMU_RQCPR(epnum);
+
+ mtu3_debugfs_regset(mtu, mtu->mac_base, regs, 7, "ep-regs", parent);
+}
+
+static int mtu3_ep_info_show(struct seq_file *sf, void *unused)
+{
+ struct mtu3_ep *mep = sf->private;
+ struct mtu3 *mtu = mep->mtu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ seq_printf(sf, "ep - type:%d, maxp:%d, slot:%d, flags:%x\n",
+ mep->type, mep->maxp, mep->slot, mep->flags);
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static int mtu3_fifo_show(struct seq_file *sf, void *unused)
+{
+ struct mtu3_ep *mep = sf->private;
+ struct mtu3 *mtu = mep->mtu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ seq_printf(sf, "fifo - seg_size:%d, addr:%d, size:%d\n",
+ mep->fifo_seg_size, mep->fifo_addr, mep->fifo_size);
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static int mtu3_qmu_ring_show(struct seq_file *sf, void *unused)
+{
+ struct mtu3_ep *mep = sf->private;
+ struct mtu3 *mtu = mep->mtu;
+ struct mtu3_gpd_ring *ring;
+ unsigned long flags;
+
+ ring = &mep->gpd_ring;
+ spin_lock_irqsave(&mtu->lock, flags);
+ seq_printf(sf,
+ "qmu-ring - dma:%pad, start:%p, end:%p, enq:%p, dep:%p\n",
+ &ring->dma, ring->start, ring->end,
+ ring->enqueue, ring->dequeue);
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static int mtu3_qmu_gpd_show(struct seq_file *sf, void *unused)
+{
+ struct mtu3_ep *mep = sf->private;
+ struct mtu3 *mtu = mep->mtu;
+ struct mtu3_gpd_ring *ring;
+ struct qmu_gpd *gpd;
+ dma_addr_t dma;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ ring = &mep->gpd_ring;
+ gpd = ring->start;
+ if (!gpd || !(mep->flags & MTU3_EP_ENABLED)) {
+ seq_puts(sf, "empty!\n");
+ goto out;
+ }
+
+ for (i = 0; i < MAX_GPD_NUM; i++, gpd++) {
+ dma = ring->dma + i * sizeof(*gpd);
+ seq_printf(sf, "gpd.%03d -> %pad, %p: %08x %08x %08x %08x\n",
+ i, &dma, gpd, gpd->dw0_info, gpd->next_gpd,
+ gpd->buffer, gpd->dw3_info);
+ }
+
+out:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static const struct mtu3_file_map mtu3_ep_files[] = {
+ {"ep-info", mtu3_ep_info_show, },
+ {"fifo", mtu3_fifo_show, },
+ {"qmu-ring", mtu3_qmu_ring_show, },
+ {"qmu-gpd", mtu3_qmu_gpd_show, },
+};
+
+static int mtu3_ep_open(struct inode *inode, struct file *file)
+{
+ const char *file_name = file_dentry(file)->d_iname;
+ const struct mtu3_file_map *f_map;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
+ f_map = &mtu3_ep_files[i];
+
+ if (strcmp(f_map->name, file_name) == 0)
+ break;
+ }
+
+ return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations mtu3_ep_fops = {
+ .open = mtu3_ep_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct debugfs_reg32 mtu3_prb_regs[] = {
+ dump_prb_reg("enable", U3D_SSUSB_PRB_CTRL0),
+ dump_prb_reg("byte-sell", U3D_SSUSB_PRB_CTRL1),
+ dump_prb_reg("byte-selh", U3D_SSUSB_PRB_CTRL2),
+ dump_prb_reg("module-sel", U3D_SSUSB_PRB_CTRL3),
+ dump_prb_reg("sw-out", U3D_SSUSB_PRB_CTRL4),
+ dump_prb_reg("data", U3D_SSUSB_PRB_CTRL5),
+};
+
+static int mtu3_probe_show(struct seq_file *sf, void *unused)
+{
+ const char *file_name = file_dentry(sf->file)->d_iname;
+ struct mtu3 *mtu = sf->private;
+ const struct debugfs_reg32 *regs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
+ regs = &mtu3_prb_regs[i];
+
+ if (strcmp(regs->name, file_name) == 0)
+ break;
+ }
+
+ seq_printf(sf, "0x%04x - 0x%08x\n", (u32)regs->offset,
+ mtu3_readl(mtu->ippc_base, (u32)regs->offset));
+
+ return 0;
+}
+
+static int mtu3_probe_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtu3_probe_show, inode->i_private);
+}
+
+static ssize_t mtu3_probe_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ const char *file_name = file_dentry(file)->d_iname;
+ struct seq_file *sf = file->private_data;
+ struct mtu3 *mtu = sf->private;
+ const struct debugfs_reg32 *regs;
+ char buf[32];
+ u32 val;
+ int i;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (kstrtou32(buf, 0, &val))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
+ regs = &mtu3_prb_regs[i];
+
+ if (strcmp(regs->name, file_name) == 0)
+ break;
+ }
+ mtu3_writel(mtu->ippc_base, (u32)regs->offset, val);
+
+ return count;
+}
+
+static const struct file_operations mtu3_probe_fops = {
+ .open = mtu3_probe_open,
+ .write = mtu3_probe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu)
+{
+ struct ssusb_mtk *ssusb = mtu->ssusb;
+ struct debugfs_reg32 *regs;
+ struct dentry *dir_prb;
+ int i;
+
+ dir_prb = debugfs_create_dir("probe", ssusb->dbgfs_root);
+
+ for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
+ regs = &mtu3_prb_regs[i];
+ debugfs_create_file(regs->name, 0644, dir_prb,
+ mtu, &mtu3_probe_fops);
+ }
+
+ mtu3_debugfs_regset(mtu, mtu->ippc_base, mtu3_prb_regs,
+ ARRAY_SIZE(mtu3_prb_regs), "regs", dir_prb);
+}
+
+static void mtu3_debugfs_create_ep_dir(struct mtu3_ep *mep,
+ struct dentry *parent)
+{
+ const struct mtu3_file_map *files;
+ struct dentry *dir_ep;
+ int i;
+
+ dir_ep = debugfs_create_dir(mep->name, parent);
+ mtu3_debugfs_ep_regset(mep->mtu, mep, dir_ep);
+
+ for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
+ files = &mtu3_ep_files[i];
+
+ debugfs_create_file(files->name, 0444, dir_ep,
+ mep, &mtu3_ep_fops);
+ }
+}
+
+static void mtu3_debugfs_create_ep_dirs(struct mtu3 *mtu)
+{
+ struct ssusb_mtk *ssusb = mtu->ssusb;
+ struct dentry *dir_eps;
+ int i;
+
+ dir_eps = debugfs_create_dir("eps", ssusb->dbgfs_root);
+
+ for (i = 1; i < mtu->num_eps; i++) {
+ mtu3_debugfs_create_ep_dir(mtu->in_eps + i, dir_eps);
+ mtu3_debugfs_create_ep_dir(mtu->out_eps + i, dir_eps);
+ }
+}
+
+void ssusb_dev_debugfs_init(struct ssusb_mtk *ssusb)
+{
+ struct mtu3 *mtu = ssusb->u3d;
+ struct dentry *dir_regs;
+
+ dir_regs = debugfs_create_dir("regs", ssusb->dbgfs_root);
+
+ mtu3_debugfs_regset(mtu, mtu->ippc_base,
+ mtu3_ippc_regs, ARRAY_SIZE(mtu3_ippc_regs),
+ "reg-ippc", dir_regs);
+
+ mtu3_debugfs_regset(mtu, mtu->mac_base,
+ mtu3_dev_regs, ARRAY_SIZE(mtu3_dev_regs),
+ "reg-dev", dir_regs);
+
+ mtu3_debugfs_regset(mtu, mtu->mac_base,
+ mtu3_csr_regs, ARRAY_SIZE(mtu3_csr_regs),
+ "reg-csr", dir_regs);
+
+ mtu3_debugfs_create_ep_dirs(mtu);
+
+ mtu3_debugfs_create_prb_files(mtu);
+
+ debugfs_create_file("link-state", 0444, ssusb->dbgfs_root,
+ mtu, &mtu3_link_state_fops);
+ debugfs_create_file("ep-used", 0444, ssusb->dbgfs_root,
+ mtu, &mtu3_ep_used_fops);
+}
+
+static int ssusb_mode_show(struct seq_file *sf, void *unused)
+{
+ struct ssusb_mtk *ssusb = sf->private;
+
+ seq_printf(sf, "current mode: %s(%s drd)\n(echo device/host)\n",
+ ssusb->is_host ? "host" : "device",
+ ssusb->otg_switch.manual_drd_enabled ? "manual" : "auto");
+
+ return 0;
+}
+
+static int ssusb_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssusb_mode_show, inode->i_private);
+}
+
+static ssize_t ssusb_mode_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *sf = file->private_data;
+ struct ssusb_mtk *ssusb = sf->private;
+ char buf[16];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "host", 4) && !ssusb->is_host) {
+ ssusb_mode_manual_switch(ssusb, 1);
+ } else if (!strncmp(buf, "device", 6) && ssusb->is_host) {
+ ssusb_mode_manual_switch(ssusb, 0);
+ } else {
+ dev_err(ssusb->dev, "wrong or duplicated setting\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations ssusb_mode_fops = {
+ .open = ssusb_mode_open,
+ .write = ssusb_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ssusb_vbus_show(struct seq_file *sf, void *unused)
+{
+ struct ssusb_mtk *ssusb = sf->private;
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ seq_printf(sf, "vbus state: %s\n(echo on/off)\n",
+ regulator_is_enabled(otg_sx->vbus) ? "on" : "off");
+
+ return 0;
+}
+
+static int ssusb_vbus_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssusb_vbus_show, inode->i_private);
+}
+
+static ssize_t ssusb_vbus_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *sf = file->private_data;
+ struct ssusb_mtk *ssusb = sf->private;
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+ char buf[16];
+ bool enable;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (kstrtobool(buf, &enable)) {
+ dev_err(ssusb->dev, "wrong setting\n");
+ return -EINVAL;
+ }
+
+ ssusb_set_vbus(otg_sx, enable);
+
+ return count;
+}
+
+static const struct file_operations ssusb_vbus_fops = {
+ .open = ssusb_vbus_open,
+ .write = ssusb_vbus_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void ssusb_dr_debugfs_init(struct ssusb_mtk *ssusb)
+{
+ struct dentry *root = ssusb->dbgfs_root;
+
+ debugfs_create_file("mode", 0644, root, ssusb, &ssusb_mode_fops);
+ debugfs_create_file("vbus", 0644, root, ssusb, &ssusb_vbus_fops);
+}
+
+void ssusb_debugfs_create_root(struct ssusb_mtk *ssusb)
+{
+ ssusb->dbgfs_root =
+ debugfs_create_dir(dev_name(ssusb->dev), usb_debug_root);
+}
+
+void ssusb_debugfs_remove_root(struct ssusb_mtk *ssusb)
+{
+ debugfs_remove_recursive(ssusb->dbgfs_root);
+ ssusb->dbgfs_root = NULL;
+}
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index ac60e9c8564e..5fcb71af875a 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -7,16 +7,9 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
-#include <linux/debugfs.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-
#include "mtu3.h"
#include "mtu3_dr.h"
+#include "mtu3_debug.h"
#define USB2_PORT 2
#define USB3_PORT 3
@@ -28,6 +21,22 @@ enum mtu3_vbus_id_state {
MTU3_VBUS_VALID,
};
+static char *mailbox_state_string(enum mtu3_vbus_id_state state)
+{
+ switch (state) {
+ case MTU3_ID_FLOAT:
+ return "ID_FLOAT";
+ case MTU3_ID_GROUND:
+ return "ID_GROUND";
+ case MTU3_VBUS_OFF:
+ return "VBUS_OFF";
+ case MTU3_VBUS_VALID:
+ return "VBUS_VALID";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static void toggle_opstate(struct ssusb_mtk *ssusb)
{
if (!ssusb->otg_switch.is_u3_drd) {
@@ -147,7 +156,8 @@ static void ssusb_set_mailbox(struct otg_switch_mtk *otg_sx,
container_of(otg_sx, struct ssusb_mtk, otg_switch);
struct mtu3 *mtu = ssusb->u3d;
- dev_dbg(ssusb->dev, "mailbox state(%d)\n", status);
+ dev_dbg(ssusb->dev, "mailbox %s\n", mailbox_state_string(status));
+ mtu3_dbg_trace(ssusb->dev, "mailbox %s", mailbox_state_string(status));
switch (status) {
case MTU3_ID_GROUND:
@@ -238,14 +248,18 @@ static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
otg_sx->vbus_nb.notifier_call = ssusb_vbus_notifier;
ret = devm_extcon_register_notifier(ssusb->dev, edev, EXTCON_USB,
&otg_sx->vbus_nb);
- if (ret < 0)
+ if (ret < 0) {
dev_err(ssusb->dev, "failed to register notifier for USB\n");
+ return ret;
+ }
otg_sx->id_nb.notifier_call = ssusb_id_notifier;
ret = devm_extcon_register_notifier(ssusb->dev, edev, EXTCON_USB_HOST,
&otg_sx->id_nb);
- if (ret < 0)
+ if (ret < 0) {
dev_err(ssusb->dev, "failed to register notifier for USB-HOST\n");
+ return ret;
+ }
dev_dbg(ssusb->dev, "EXTCON_USB: %d, EXTCON_USB_HOST: %d\n",
extcon_get_state(edev, EXTCON_USB),
@@ -266,7 +280,7 @@ static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
* This is useful in special cases, such as uses TYPE-A receptacle but also
* wants to support dual-role mode.
*/
-static void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
+void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
@@ -281,114 +295,6 @@ static void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
}
}
-static int ssusb_mode_show(struct seq_file *sf, void *unused)
-{
- struct ssusb_mtk *ssusb = sf->private;
-
- seq_printf(sf, "current mode: %s(%s drd)\n(echo device/host)\n",
- ssusb->is_host ? "host" : "device",
- ssusb->otg_switch.manual_drd_enabled ? "manual" : "auto");
-
- return 0;
-}
-
-static int ssusb_mode_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ssusb_mode_show, inode->i_private);
-}
-
-static ssize_t ssusb_mode_write(struct file *file,
- const char __user *ubuf, size_t count, loff_t *ppos)
-{
- struct seq_file *sf = file->private_data;
- struct ssusb_mtk *ssusb = sf->private;
- char buf[16];
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
- if (!strncmp(buf, "host", 4) && !ssusb->is_host) {
- ssusb_mode_manual_switch(ssusb, 1);
- } else if (!strncmp(buf, "device", 6) && ssusb->is_host) {
- ssusb_mode_manual_switch(ssusb, 0);
- } else {
- dev_err(ssusb->dev, "wrong or duplicated setting\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ssusb_mode_fops = {
- .open = ssusb_mode_open,
- .write = ssusb_mode_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int ssusb_vbus_show(struct seq_file *sf, void *unused)
-{
- struct ssusb_mtk *ssusb = sf->private;
- struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
-
- seq_printf(sf, "vbus state: %s\n(echo on/off)\n",
- regulator_is_enabled(otg_sx->vbus) ? "on" : "off");
-
- return 0;
-}
-
-static int ssusb_vbus_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ssusb_vbus_show, inode->i_private);
-}
-
-static ssize_t ssusb_vbus_write(struct file *file,
- const char __user *ubuf, size_t count, loff_t *ppos)
-{
- struct seq_file *sf = file->private_data;
- struct ssusb_mtk *ssusb = sf->private;
- struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- char buf[16];
- bool enable;
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
- if (kstrtobool(buf, &enable)) {
- dev_err(ssusb->dev, "wrong setting\n");
- return -EINVAL;
- }
-
- ssusb_set_vbus(otg_sx, enable);
-
- return count;
-}
-
-static const struct file_operations ssusb_vbus_fops = {
- .open = ssusb_vbus_open,
- .write = ssusb_vbus_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void ssusb_debugfs_init(struct ssusb_mtk *ssusb)
-{
- struct dentry *root;
-
- root = debugfs_create_dir(dev_name(ssusb->dev), usb_debug_root);
- ssusb->dbgfs_root = root;
-
- debugfs_create_file("mode", 0644, root, ssusb, &ssusb_mode_fops);
- debugfs_create_file("vbus", 0644, root, ssusb, &ssusb_vbus_fops);
-}
-
-static void ssusb_debugfs_exit(struct ssusb_mtk *ssusb)
-{
- debugfs_remove_recursive(ssusb->dbgfs_root);
-}
-
void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
enum mtu3_dr_force_mode mode)
{
@@ -415,25 +321,23 @@ void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+ int ret = 0;
INIT_WORK(&otg_sx->id_work, ssusb_id_work);
INIT_WORK(&otg_sx->vbus_work, ssusb_vbus_work);
if (otg_sx->manual_drd_enabled)
- ssusb_debugfs_init(ssusb);
+ ssusb_dr_debugfs_init(ssusb);
else
- ssusb_extcon_register(otg_sx);
+ ret = ssusb_extcon_register(otg_sx);
- return 0;
+ return ret;
}
void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- if (otg_sx->manual_drd_enabled)
- ssusb_debugfs_exit(ssusb);
-
cancel_work_sync(&otg_sx->id_work);
cancel_work_sync(&otg_sx->vbus_work);
}
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
index 50702fdcde28..ba6fe357ce29 100644
--- a/drivers/usb/mtu3/mtu3_dr.h
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -71,6 +71,7 @@ static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
#if IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
int ssusb_otg_switch_init(struct ssusb_mtk *ssusb);
void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb);
+void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host);
int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on);
void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
enum mtu3_dr_force_mode mode);
@@ -85,6 +86,9 @@ static inline int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
static inline void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
{}
+static inline void
+ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host) {}
+
static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
{
return 0;
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index bbcd3332471d..f93732e53fd8 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -8,6 +8,7 @@
*/
#include "mtu3.h"
+#include "mtu3_trace.h"
void mtu3_req_complete(struct mtu3_ep *mep,
struct usb_request *req, int status)
@@ -25,6 +26,8 @@ __acquires(mep->mtu->lock)
mtu = mreq->mtu;
mep->busy = 1;
+
+ trace_mtu3_req_complete(mreq);
spin_unlock(&mtu->lock);
/* ep0 makes use of PIO, needn't unmap it */
@@ -201,6 +204,7 @@ error:
spin_unlock_irqrestore(&mtu->lock, flags);
dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep);
+ trace_mtu3_gadget_ep_enable(mep);
return ret;
}
@@ -212,6 +216,7 @@ static int mtu3_gadget_ep_disable(struct usb_ep *ep)
unsigned long flags;
dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name);
+ trace_mtu3_gadget_ep_disable(mep);
if (!(mep->flags & MTU3_EP_ENABLED)) {
dev_warn(mtu->dev, "%s is already disabled\n", mep->name);
@@ -242,13 +247,17 @@ struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
mreq->request.dma = DMA_ADDR_INVALID;
mreq->epnum = mep->epnum;
mreq->mep = mep;
+ trace_mtu3_alloc_request(mreq);
return &mreq->request;
}
void mtu3_free_request(struct usb_ep *ep, struct usb_request *req)
{
- kfree(to_mtu3_request(req));
+ struct mtu3_request *mreq = to_mtu3_request(req);
+
+ trace_mtu3_free_request(mreq);
+ kfree(mreq);
}
static int mtu3_gadget_queue(struct usb_ep *ep,
@@ -278,10 +287,12 @@ static int mtu3_gadget_queue(struct usb_ep *ep,
__func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name,
mreq, ep->maxpacket, mreq->request.length);
- if (req->length > GPD_BUF_SIZE) {
+ if (req->length > GPD_BUF_SIZE ||
+ (mtu->gen2cp && req->length > GPD_BUF_SIZE_EL)) {
dev_warn(mtu->dev,
"req length > supported MAX:%d requested:%d\n",
- GPD_BUF_SIZE, req->length);
+ mtu->gen2cp ? GPD_BUF_SIZE_EL : GPD_BUF_SIZE,
+ req->length);
return -EOPNOTSUPP;
}
@@ -314,6 +325,7 @@ static int mtu3_gadget_queue(struct usb_ep *ep,
error:
spin_unlock_irqrestore(&mtu->lock, flags);
+ trace_mtu3_gadget_queue(mreq);
return ret;
}
@@ -331,6 +343,7 @@ static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req)
return -EINVAL;
dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req);
+ trace_mtu3_gadget_dequeue(mreq);
spin_lock_irqsave(&mtu->lock, flags);
@@ -401,6 +414,7 @@ static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value)
done:
spin_unlock_irqrestore(&mtu->lock, flags);
+ trace_mtu3_gadget_ep_set_halt(mep);
return ret;
}
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 7cb7ac980446..4da216c99726 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -11,6 +11,8 @@
#include <linux/usb/composite.h>
#include "mtu3.h"
+#include "mtu3_debug.h"
+#include "mtu3_trace.h"
/* ep0 is always mtu3->in_eps[0] */
#define next_ep0_request(mtu) next_request((mtu)->ep0)
@@ -634,6 +636,7 @@ __acquires(mtu->lock)
int handled = 0;
ep0_read_setup(mtu, &setup);
+ trace_mtu3_handle_setup(&setup);
if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
handled = handle_standard_request(mtu, &setup);
@@ -710,6 +713,7 @@ irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu)
ret = IRQ_HANDLED;
}
dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
+ mtu3_dbg_trace(mtu->dev, "ep0_state %s", decode_ep0_state(mtu));
switch (mtu->ep0_state) {
case MU3D_EP0_STATE_TX:
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 1d65b7476f23..8382d066749e 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -49,6 +49,7 @@
#define U3D_QCR1 (SSUSB_DEV_BASE + 0x0404)
#define U3D_QCR2 (SSUSB_DEV_BASE + 0x0408)
#define U3D_QCR3 (SSUSB_DEV_BASE + 0x040C)
+#define U3D_QFCR (SSUSB_DEV_BASE + 0x0428)
#define U3D_TXQHIAR1 (SSUSB_DEV_BASE + 0x0484)
#define U3D_RXQHIAR1 (SSUSB_DEV_BASE + 0x04C4)
@@ -133,11 +134,23 @@
#define TX_W1C_BITS (~(TX_SENTSTALL))
/* U3D_TX1CSR1 */
-#define TX_MULT(x) (((x) & 0x3) << 22)
-#define TX_MAX_PKT(x) (((x) & 0x3f) << 16)
+#define TX_MAX_PKT_G2(x) (((x) & 0x7f) << 24)
+#define TX_MULT_G2(x) (((x) & 0x7) << 21)
+#define TX_MULT_OG(x) (((x) & 0x3) << 22)
+#define TX_MAX_PKT_OG(x) (((x) & 0x3f) << 16)
#define TX_SLOT(x) (((x) & 0x3f) << 8)
#define TX_TYPE(x) (((x) & 0x3) << 4)
#define TX_SS_BURST(x) (((x) & 0xf) << 0)
+#define TX_MULT(g2c, x) \
+({ \
+ typeof(x) x_ = (x); \
+ (g2c) ? TX_MULT_G2(x_) : TX_MULT_OG(x_); \
+})
+#define TX_MAX_PKT(g2c, x) \
+({ \
+ typeof(x) x_ = (x); \
+ (g2c) ? TX_MAX_PKT_G2(x_) : TX_MAX_PKT_OG(x_); \
+})
/* for TX_TYPE & RX_TYPE */
#define TYPE_BULK (0x0)
@@ -160,11 +173,23 @@
#define RX_W1C_BITS (~(RX_SENTSTALL | RX_RXPKTRDY))
/* U3D_RX1CSR1 */
-#define RX_MULT(x) (((x) & 0x3) << 22)
-#define RX_MAX_PKT(x) (((x) & 0x3f) << 16)
+#define RX_MAX_PKT_G2(x) (((x) & 0x7f) << 24)
+#define RX_MULT_G2(x) (((x) & 0x7) << 21)
+#define RX_MULT_OG(x) (((x) & 0x3) << 22)
+#define RX_MAX_PKT_OG(x) (((x) & 0x3f) << 16)
#define RX_SLOT(x) (((x) & 0x3f) << 8)
#define RX_TYPE(x) (((x) & 0x3) << 4)
#define RX_SS_BURST(x) (((x) & 0xf) << 0)
+#define RX_MULT(g2c, x) \
+({ \
+ typeof(x) x_ = (x); \
+ (g2c) ? RX_MULT_G2(x_) : RX_MULT_OG(x_); \
+})
+#define RX_MAX_PKT(g2c, x) \
+({ \
+ typeof(x) x_ = (x); \
+ (g2c) ? RX_MAX_PKT_G2(x_) : RX_MAX_PKT_OG(x_); \
+})
/* U3D_RX1CSR2 */
#define RX_BINTERVAL(x) (((x) & 0xff) << 24)
@@ -265,6 +290,7 @@
#define U3D_LTSSM_CTRL (SSUSB_USB3_MAC_CSR_BASE + 0x0010)
#define U3D_USB3_CONFIG (SSUSB_USB3_MAC_CSR_BASE + 0x001C)
+#define U3D_LINK_STATE_MACHINE (SSUSB_USB3_MAC_CSR_BASE + 0x0134)
#define U3D_LTSSM_INTR_ENABLE (SSUSB_USB3_MAC_CSR_BASE + 0x013C)
#define U3D_LTSSM_INTR (SSUSB_USB3_MAC_CSR_BASE + 0x0140)
@@ -282,6 +308,9 @@
/* U3D_USB3_CONFIG */
#define USB3_EN BIT(0)
+/* U3D_LINK_STATE_MACHINE */
+#define LTSSM_STATE(x) ((x) & 0x1f)
+
/* U3D_LTSSM_INTR_ENABLE */
/* U3D_LTSSM_INTR */
#define U3_RESUME_INTR BIT(18)
@@ -347,6 +376,7 @@
#define U3D_USB20_FRAME_NUM (SSUSB_USB2_CSR_BASE + 0x003C)
#define U3D_USB20_LPM_PARAMETER (SSUSB_USB2_CSR_BASE + 0x0044)
#define U3D_USB20_MISC_CONTROL (SSUSB_USB2_CSR_BASE + 0x004C)
+#define U3D_USB20_OPSTATE (SSUSB_USB2_CSR_BASE + 0x0060)
/*---------------- SSUSB_USB2_CSR FIELD DEFINITION ----------------*/
@@ -419,6 +449,13 @@
#define U3D_SSUSB_DEV_RST_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x0098)
#define U3D_SSUSB_HW_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A0)
#define U3D_SSUSB_HW_SUB_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A4)
+#define U3D_SSUSB_IP_TRUNK_VERS (U3D_SSUSB_HW_SUB_ID)
+#define U3D_SSUSB_PRB_CTRL0 (SSUSB_SIFSLV_IPPC_BASE + 0x00B0)
+#define U3D_SSUSB_PRB_CTRL1 (SSUSB_SIFSLV_IPPC_BASE + 0x00B4)
+#define U3D_SSUSB_PRB_CTRL2 (SSUSB_SIFSLV_IPPC_BASE + 0x00B8)
+#define U3D_SSUSB_PRB_CTRL3 (SSUSB_SIFSLV_IPPC_BASE + 0x00BC)
+#define U3D_SSUSB_PRB_CTRL4 (SSUSB_SIFSLV_IPPC_BASE + 0x00C0)
+#define U3D_SSUSB_PRB_CTRL5 (SSUSB_SIFSLV_IPPC_BASE + 0x00C4)
#define U3D_SSUSB_IP_SPARE0 (SSUSB_SIFSLV_IPPC_BASE + 0x00C8)
/*---------------- SSUSB_SIFSLV_IPPC FIELD DEFINITION ----------------*/
@@ -483,4 +520,7 @@
/* U3D_SSUSB_DEV_RST_CTRL */
#define SSUSB_DEV_SW_RST BIT(0)
+/* U3D_SSUSB_IP_TRUNK_VERS */
+#define IP_TRUNK_VERS(x) (((x) >> 16) & 0xffff)
+
#endif /* _SSUSB_HW_REGS_H_ */
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index e086630e41a9..fd0f6c5dfbc1 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -16,6 +16,7 @@
#include "mtu3.h"
#include "mtu3_dr.h"
+#include "mtu3_debug.h"
/* u2-port0 should be powered on and enabled; */
int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks)
@@ -210,30 +211,16 @@ static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb)
mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
}
-/* ignore the error if the clock does not exist */
-static struct clk *get_optional_clk(struct device *dev, const char *id)
-{
- struct clk *opt_clk;
-
- opt_clk = devm_clk_get(dev, id);
- /* ignore error number except EPROBE_DEFER */
- if (IS_ERR(opt_clk) && (PTR_ERR(opt_clk) != -EPROBE_DEFER))
- opt_clk = NULL;
-
- return opt_clk;
-}
-
static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
{
struct device_node *node = pdev->dev.of_node;
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
struct device *dev = &pdev->dev;
- struct regulator *vbus;
struct resource *res;
int i;
int ret;
- ssusb->vusb33 = devm_regulator_get(&pdev->dev, "vusb33");
+ ssusb->vusb33 = devm_regulator_get(dev, "vusb33");
if (IS_ERR(ssusb->vusb33)) {
dev_err(dev, "failed to get vusb33\n");
return PTR_ERR(ssusb->vusb33);
@@ -245,15 +232,15 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
return PTR_ERR(ssusb->sys_clk);
}
- ssusb->ref_clk = get_optional_clk(dev, "ref_ck");
+ ssusb->ref_clk = devm_clk_get_optional(dev, "ref_ck");
if (IS_ERR(ssusb->ref_clk))
return PTR_ERR(ssusb->ref_clk);
- ssusb->mcu_clk = get_optional_clk(dev, "mcu_ck");
+ ssusb->mcu_clk = devm_clk_get_optional(dev, "mcu_ck");
if (IS_ERR(ssusb->mcu_clk))
return PTR_ERR(ssusb->mcu_clk);
- ssusb->dma_clk = get_optional_clk(dev, "dma_ck");
+ ssusb->dma_clk = devm_clk_get_optional(dev, "dma_ck");
if (IS_ERR(ssusb->dma_clk))
return PTR_ERR(ssusb->dma_clk);
@@ -286,7 +273,7 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
ssusb->dr_mode = USB_DR_MODE_OTG;
if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
- return 0;
+ goto out;
/* if host role is supported */
ret = ssusb_wakeup_of_property_parse(ssusb, node);
@@ -299,15 +286,14 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
of_property_read_u32(node, "mediatek,u3p-dis-msk",
&ssusb->u3p_dis_msk);
- vbus = devm_regulator_get(&pdev->dev, "vbus");
- if (IS_ERR(vbus)) {
+ otg_sx->vbus = devm_regulator_get(dev, "vbus");
+ if (IS_ERR(otg_sx->vbus)) {
dev_err(dev, "failed to get vbus\n");
- return PTR_ERR(vbus);
+ return PTR_ERR(otg_sx->vbus);
}
- otg_sx->vbus = vbus;
if (ssusb->dr_mode == USB_DR_MODE_HOST)
- return 0;
+ goto out;
/* if dual-role mode is supported */
otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd");
@@ -322,6 +308,7 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
}
}
+out:
dev_info(dev, "dr_mode: %d, is_u3_dr: %d, u3p_dis_msk: %x, drd: %s\n",
ssusb->dr_mode, otg_sx->is_u3_drd, ssusb->u3p_dis_msk,
otg_sx->manual_drd_enabled ? "manual" : "auto");
@@ -354,6 +341,8 @@ static int mtu3_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ssusb_debugfs_create_root(ssusb);
+
/* enable power domain */
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -401,7 +390,11 @@ static int mtu3_probe(struct platform_device *pdev)
goto gadget_exit;
}
- ssusb_otg_switch_init(ssusb);
+ ret = ssusb_otg_switch_init(ssusb);
+ if (ret) {
+ dev_err(dev, "failed to initialize switch\n");
+ goto host_exit;
+ }
break;
default:
dev_err(dev, "unsupported mode: %d\n", ssusb->dr_mode);
@@ -411,6 +404,8 @@ static int mtu3_probe(struct platform_device *pdev)
return 0;
+host_exit:
+ ssusb_host_exit(ssusb);
gadget_exit:
ssusb_gadget_exit(ssusb);
comm_exit:
@@ -418,6 +413,7 @@ comm_exit:
comm_init_err:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ ssusb_debugfs_remove_root(ssusb);
return ret;
}
@@ -445,6 +441,7 @@ static int mtu3_remove(struct platform_device *pdev)
ssusb_rscs_exit(ssusb);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ ssusb_debugfs_remove_root(ssusb);
return 0;
}
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 09f19f70fe8f..3f414f91b589 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -22,17 +22,49 @@
#include <linux/iopoll.h>
#include "mtu3.h"
+#include "mtu3_trace.h"
#define QMU_CHECKSUM_LEN 16
#define GPD_FLAGS_HWO BIT(0)
#define GPD_FLAGS_BDP BIT(1)
#define GPD_FLAGS_BPS BIT(2)
+#define GPD_FLAGS_ZLP BIT(6)
#define GPD_FLAGS_IOC BIT(7)
-
-#define GPD_EXT_FLAG_ZLP BIT(5)
-#define GPD_EXT_NGP(x) (((x) & 0xf) << 4)
-#define GPD_EXT_BUF(x) (((x) & 0xf) << 0)
+#define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
+
+#define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16)
+#define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12)
+#define GPD_RX_BUF_LEN(mtu, x) \
+({ \
+ typeof(x) x_ = (x); \
+ ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
+})
+
+#define GPD_DATA_LEN_OG(x) ((x) & 0xffff)
+#define GPD_DATA_LEN_EL(x) ((x) & 0xfffff)
+#define GPD_DATA_LEN(mtu, x) \
+({ \
+ typeof(x) x_ = (x); \
+ ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
+})
+
+#define GPD_EXT_FLAG_ZLP BIT(29)
+#define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20)
+#define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16)
+#define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28)
+#define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24)
+#define GPD_EXT_NGP(mtu, x) \
+({ \
+ typeof(x) x_ = (x); \
+ ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
+})
+
+#define GPD_EXT_BUF(mtu, x) \
+({ \
+ typeof(x) x_ = (x); \
+ ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
+})
#define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
#define HILO_DMA(hi, lo) \
@@ -125,7 +157,7 @@ static void reset_gpd_list(struct mtu3_ep *mep)
struct qmu_gpd *gpd = ring->start;
if (gpd) {
- gpd->flag &= ~GPD_FLAGS_HWO;
+ gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
gpd_ring_init(ring, gpd);
}
}
@@ -214,16 +246,14 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd = ring->enqueue;
struct usb_request *req = &mreq->request;
+ struct mtu3 *mtu = mep->mtu;
dma_addr_t enq_dma;
- u16 ext_addr;
-
- /* set all fields to zero as default value */
- memset(gpd, 0, sizeof(*gpd));
+ u32 ext_addr;
+ gpd->dw0_info = 0; /* SW own it */
gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
- ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
- gpd->buf_len = cpu_to_le16(req->length);
- gpd->flag |= GPD_FLAGS_IOC;
+ ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
+ gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
/* get the next GPD */
enq = advance_enq_gpd(ring);
@@ -231,17 +261,22 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
mep->epnum, gpd, enq, &enq_dma);
- enq->flag &= ~GPD_FLAGS_HWO;
+ enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
- ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
- gpd->tx_ext_addr = cpu_to_le16(ext_addr);
-
- if (req->zero)
- gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
+ ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
+ gpd->dw0_info = cpu_to_le32(ext_addr);
+
+ if (req->zero) {
+ if (mtu->gen2cp)
+ gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP);
+ else
+ gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
+ }
- gpd->flag |= GPD_FLAGS_HWO;
+ gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
+ trace_mtu3_prepare_gpd(mep, gpd);
return 0;
}
@@ -252,16 +287,14 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd = ring->enqueue;
struct usb_request *req = &mreq->request;
+ struct mtu3 *mtu = mep->mtu;
dma_addr_t enq_dma;
- u16 ext_addr;
-
- /* set all fields to zero as default value */
- memset(gpd, 0, sizeof(*gpd));
+ u32 ext_addr;
+ gpd->dw0_info = 0; /* SW own it */
gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
- ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
- gpd->data_buf_len = cpu_to_le16(req->length);
- gpd->flag |= GPD_FLAGS_IOC;
+ ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
+ gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
/* get the next GPD */
enq = advance_enq_gpd(ring);
@@ -269,13 +302,14 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
mep->epnum, gpd, enq, &enq_dma);
- enq->flag &= ~GPD_FLAGS_HWO;
+ enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
- ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
- gpd->rx_ext_addr = cpu_to_le16(ext_addr);
- gpd->flag |= GPD_FLAGS_HWO;
+ ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
+ gpd->dw3_info = cpu_to_le32(ext_addr);
+ gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
+ trace_mtu3_prepare_gpd(mep, gpd);
return 0;
}
@@ -382,27 +416,25 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd_current = NULL;
- struct usb_request *req = NULL;
struct mtu3_request *mreq;
dma_addr_t cur_gpd_dma;
u32 txcsr = 0;
int ret;
mreq = next_request(mep);
- if (mreq && mreq->request.length == 0)
- req = &mreq->request;
- else
+ if (mreq && mreq->request.length != 0)
return;
cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
- if (le16_to_cpu(gpd_current->buf_len) != 0) {
+ if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) {
dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
return;
}
- dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, req);
+ dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
+ trace_mtu3_zlp_exp_gpd(mep, gpd_current);
mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
@@ -415,8 +447,7 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
/* by pass the current GDP */
- gpd_current->flag |= GPD_FLAGS_BPS;
- gpd_current->flag |= GPD_FLAGS_HWO;
+ gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
/*enable DMAREQEN, switch back to QMU mode */
mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
@@ -448,7 +479,7 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
- while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+ while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
mreq = next_request(mep);
@@ -458,7 +489,8 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
}
request = &mreq->request;
- request->actual = le16_to_cpu(gpd->buf_len);
+ request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
+ trace_mtu3_complete_gpd(mep, gpd);
mtu3_req_complete(mep, request, 0);
gpd = advance_deq_gpd(ring);
@@ -486,7 +518,7 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
- while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+ while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
mreq = next_request(mep);
@@ -496,7 +528,8 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
}
req = &mreq->request;
- req->actual = le16_to_cpu(gpd->buf_len);
+ req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
+ trace_mtu3_complete_gpd(mep, gpd);
mtu3_req_complete(mep, req, 0);
gpd = advance_deq_gpd(ring);
@@ -574,6 +607,7 @@ irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
(qmu_done_status & 0xFFFF), qmu_done_status >> 16,
qmu_status);
+ trace_mtu3_qmu_isr(qmu_done_status, qmu_status);
if (qmu_done_status)
qmu_done_isr(mtu, qmu_done_status);
diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h
index 81f5151a55ed..9cfde201db63 100644
--- a/drivers/usb/mtu3/mtu3_qmu.h
+++ b/drivers/usb/mtu3/mtu3_qmu.h
@@ -15,6 +15,7 @@
#define QMU_GPD_RING_SIZE (MAX_GPD_NUM * QMU_GPD_SIZE)
#define GPD_BUF_SIZE 65532
+#define GPD_BUF_SIZE_EL 1048572
void mtu3_qmu_stop(struct mtu3_ep *mep);
int mtu3_qmu_start(struct mtu3_ep *mep);
diff --git a/drivers/usb/mtu3/mtu3_trace.c b/drivers/usb/mtu3/mtu3_trace.c
new file mode 100644
index 000000000000..4f5e7857ec31
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_trace.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * mtu3_trace.c - trace support
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "mtu3_trace.h"
+
+void mtu3_dbg_trace(struct device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ trace_mtu3_log(dev, &vaf);
+ va_end(args);
+}
diff --git a/drivers/usb/mtu3/mtu3_trace.h b/drivers/usb/mtu3/mtu3_trace.h
new file mode 100644
index 000000000000..050e30f0fbd4
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_trace.h
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * mtu3_trace.h - trace support
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mtu3
+
+#if !defined(__MTU3_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __MTU3_TRACE_H__
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include "mtu3.h"
+
+#define MTU3_MSG_MAX 256
+
+TRACE_EVENT(mtu3_log,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+ TP_ARGS(dev, vaf),
+ TP_STRUCT__entry(
+ __string(name, dev_name(dev))
+ __dynamic_array(char, msg, MTU3_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ vsnprintf(__get_str(msg), MTU3_MSG_MAX, vaf->fmt, *vaf->va);
+ ),
+ TP_printk("%s: %s", __get_str(name), __get_str(msg))
+);
+
+TRACE_EVENT(mtu3_u3_ltssm_isr,
+ TP_PROTO(u32 intr),
+ TP_ARGS(intr),
+ TP_STRUCT__entry(
+ __field(u32, intr)
+ ),
+ TP_fast_assign(
+ __entry->intr = intr;
+ ),
+ TP_printk("(%08x) %s %s %s %s %s %s", __entry->intr,
+ __entry->intr & HOT_RST_INTR ? "HOT_RST" : "",
+ __entry->intr & WARM_RST_INTR ? "WARM_RST" : "",
+ __entry->intr & ENTER_U3_INTR ? "ENT_U3" : "",
+ __entry->intr & EXIT_U3_INTR ? "EXIT_U3" : "",
+ __entry->intr & VBUS_RISE_INTR ? "VBUS_RISE" : "",
+ __entry->intr & VBUS_FALL_INTR ? "VBUS_FALL" : ""
+ )
+);
+
+TRACE_EVENT(mtu3_u2_common_isr,
+ TP_PROTO(u32 intr),
+ TP_ARGS(intr),
+ TP_STRUCT__entry(
+ __field(u32, intr)
+ ),
+ TP_fast_assign(
+ __entry->intr = intr;
+ ),
+ TP_printk("(%08x) %s %s %s", __entry->intr,
+ __entry->intr & SUSPEND_INTR ? "SUSPEND" : "",
+ __entry->intr & RESUME_INTR ? "RESUME" : "",
+ __entry->intr & RESET_INTR ? "RESET" : ""
+ )
+);
+
+TRACE_EVENT(mtu3_qmu_isr,
+ TP_PROTO(u32 done_intr, u32 exp_intr),
+ TP_ARGS(done_intr, exp_intr),
+ TP_STRUCT__entry(
+ __field(u32, done_intr)
+ __field(u32, exp_intr)
+ ),
+ TP_fast_assign(
+ __entry->done_intr = done_intr;
+ __entry->exp_intr = exp_intr;
+ ),
+ TP_printk("done (tx %04x, rx %04x), exp (%08x)",
+ __entry->done_intr & 0xffff,
+ __entry->done_intr >> 16,
+ __entry->exp_intr
+ )
+);
+
+DECLARE_EVENT_CLASS(mtu3_log_setup,
+ TP_PROTO(struct usb_ctrlrequest *setup),
+ TP_ARGS(setup),
+ TP_STRUCT__entry(
+ __field(__u8, bRequestType)
+ __field(__u8, bRequest)
+ __field(__u16, wValue)
+ __field(__u16, wIndex)
+ __field(__u16, wLength)
+ ),
+ TP_fast_assign(
+ __entry->bRequestType = setup->bRequestType;
+ __entry->bRequest = setup->bRequest;
+ __entry->wValue = le16_to_cpu(setup->wValue);
+ __entry->wIndex = le16_to_cpu(setup->wIndex);
+ __entry->wLength = le16_to_cpu(setup->wLength);
+ ),
+ TP_printk("setup - %02x %02x %04x %04x %04x",
+ __entry->bRequestType, __entry->bRequest,
+ __entry->wValue, __entry->wIndex, __entry->wLength
+ )
+);
+
+DEFINE_EVENT(mtu3_log_setup, mtu3_handle_setup,
+ TP_PROTO(struct usb_ctrlrequest *setup),
+ TP_ARGS(setup)
+);
+
+DECLARE_EVENT_CLASS(mtu3_log_request,
+ TP_PROTO(struct mtu3_request *mreq),
+ TP_ARGS(mreq),
+ TP_STRUCT__entry(
+ __string(name, mreq->mep->name)
+ __field(struct mtu3_request *, mreq)
+ __field(struct qmu_gpd *, gpd)
+ __field(unsigned int, actual)
+ __field(unsigned int, length)
+ __field(int, status)
+ __field(int, zero)
+ __field(int, no_interrupt)
+ ),
+ TP_fast_assign(
+ __assign_str(name, mreq->mep->name);
+ __entry->mreq = mreq;
+ __entry->gpd = mreq->gpd;
+ __entry->actual = mreq->request.actual;
+ __entry->length = mreq->request.length;
+ __entry->status = mreq->request.status;
+ __entry->zero = mreq->request.zero;
+ __entry->no_interrupt = mreq->request.no_interrupt;
+ ),
+ TP_printk("%s: req %p gpd %p len %u/%u %s%s --> %d",
+ __get_str(name), __entry->mreq, __entry->gpd,
+ __entry->actual, __entry->length,
+ __entry->zero ? "Z" : "z",
+ __entry->no_interrupt ? "i" : "I",
+ __entry->status
+ )
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_alloc_request,
+ TP_PROTO(struct mtu3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_free_request,
+ TP_PROTO(struct mtu3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_gadget_queue,
+ TP_PROTO(struct mtu3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_gadget_dequeue,
+ TP_PROTO(struct mtu3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_req_complete,
+ TP_PROTO(struct mtu3_request *req),
+ TP_ARGS(req)
+);
+
+DECLARE_EVENT_CLASS(mtu3_log_gpd,
+ TP_PROTO(struct mtu3_ep *mep, struct qmu_gpd *gpd),
+ TP_ARGS(mep, gpd),
+ TP_STRUCT__entry(
+ __string(name, mep->name)
+ __field(struct qmu_gpd *, gpd)
+ __field(u32, dw0)
+ __field(u32, dw1)
+ __field(u32, dw2)
+ __field(u32, dw3)
+ ),
+ TP_fast_assign(
+ __assign_str(name, mep->name);
+ __entry->gpd = gpd;
+ __entry->dw0 = le32_to_cpu(gpd->dw0_info);
+ __entry->dw1 = le32_to_cpu(gpd->next_gpd);
+ __entry->dw2 = le32_to_cpu(gpd->buffer);
+ __entry->dw3 = le32_to_cpu(gpd->dw3_info);
+ ),
+ TP_printk("%s: gpd %p - %08x %08x %08x %08x",
+ __get_str(name), __entry->gpd,
+ __entry->dw0, __entry->dw1,
+ __entry->dw2, __entry->dw3
+ )
+);
+
+DEFINE_EVENT(mtu3_log_gpd, mtu3_prepare_gpd,
+ TP_PROTO(struct mtu3_ep *mep, struct qmu_gpd *gpd),
+ TP_ARGS(mep, gpd)
+);
+
+DEFINE_EVENT(mtu3_log_gpd, mtu3_complete_gpd,
+ TP_PROTO(struct mtu3_ep *mep, struct qmu_gpd *gpd),
+ TP_ARGS(mep, gpd)
+);
+
+DEFINE_EVENT(mtu3_log_gpd, mtu3_zlp_exp_gpd,
+ TP_PROTO(struct mtu3_ep *mep, struct qmu_gpd *gpd),
+ TP_ARGS(mep, gpd)
+);
+
+DECLARE_EVENT_CLASS(mtu3_log_ep,
+ TP_PROTO(struct mtu3_ep *mep),
+ TP_ARGS(mep),
+ TP_STRUCT__entry(
+ __string(name, mep->name)
+ __field(unsigned int, type)
+ __field(unsigned int, slot)
+ __field(unsigned int, maxp)
+ __field(unsigned int, mult)
+ __field(unsigned int, maxburst)
+ __field(unsigned int, flags)
+ __field(unsigned int, direction)
+ __field(struct mtu3_gpd_ring *, gpd_ring)
+ ),
+ TP_fast_assign(
+ __assign_str(name, mep->name);
+ __entry->type = mep->type;
+ __entry->slot = mep->slot;
+ __entry->maxp = mep->ep.maxpacket;
+ __entry->mult = mep->ep.mult;
+ __entry->maxburst = mep->ep.maxburst;
+ __entry->flags = mep->flags;
+ __entry->direction = mep->is_in;
+ __entry->gpd_ring = &mep->gpd_ring;
+ ),
+ TP_printk("%s: type %d maxp %d slot %d mult %d burst %d ring %p/%pad flags %c:%c%c%c:%c",
+ __get_str(name), __entry->type,
+ __entry->maxp, __entry->slot,
+ __entry->mult, __entry->maxburst,
+ __entry->gpd_ring, &__entry->gpd_ring->dma,
+ __entry->flags & MTU3_EP_ENABLED ? 'E' : 'e',
+ __entry->flags & MTU3_EP_STALL ? 'S' : 's',
+ __entry->flags & MTU3_EP_WEDGE ? 'W' : 'w',
+ __entry->flags & MTU3_EP_BUSY ? 'B' : 'b',
+ __entry->direction ? '<' : '>'
+ )
+);
+
+DEFINE_EVENT(mtu3_log_ep, mtu3_gadget_ep_enable,
+ TP_PROTO(struct mtu3_ep *mep),
+ TP_ARGS(mep)
+);
+
+DEFINE_EVENT(mtu3_log_ep, mtu3_gadget_ep_disable,
+ TP_PROTO(struct mtu3_ep *mep),
+ TP_ARGS(mep)
+);
+
+DEFINE_EVENT(mtu3_log_ep, mtu3_gadget_ep_set_halt,
+ TP_PROTO(struct mtu3_ep *mep),
+ TP_ARGS(mep)
+);
+
+#endif /* __MTU3_TRACE_H__ */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mtu3_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index f742fddc5e2c..52f8e2b57ad5 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -67,7 +67,7 @@ config USB_MUSB_SUNXI
depends on NOP_USB_XCEIV
depends on PHY_SUN4I_USB
depends on EXTCON
- depends on GENERIC_PHY
+ select GENERIC_PHY
select SUNXI_SRAM
config USB_MUSB_DAVINCI
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index a60627bf7be3..5261f8dfedec 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -74,10 +74,14 @@ static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
static int jz4740_musb_init(struct musb *musb)
{
- usb_phy_generic_register();
- musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ struct device *dev = musb->controller->parent;
+
+ if (dev->of_node)
+ musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+ else
+ musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(musb->xceiv)) {
- pr_err("HS UDC: no transceiver configured\n");
+ dev_err(dev, "No transceiver configured\n");
return PTR_ERR(musb->xceiv);
}
@@ -91,13 +95,6 @@ static int jz4740_musb_init(struct musb *musb)
return 0;
}
-static int jz4740_musb_exit(struct musb *musb)
-{
- usb_put_phy(musb->xceiv);
-
- return 0;
-}
-
/*
* DMA has not been confirmed to work with CONFIG_USB_INVENTRA_DMA,
* so let's not set up the dma function pointers yet.
@@ -106,7 +103,6 @@ static const struct musb_platform_ops jz4740_musb_ops = {
.quirks = MUSB_DMA_INVENTRA | MUSB_INDEXED_EP,
.fifo_mode = 2,
.init = jz4740_musb_init,
- .exit = jz4740_musb_exit,
};
static int jz4740_probe(struct platform_device *pdev)
@@ -183,7 +179,6 @@ static int jz4740_remove(struct platform_device *pdev)
struct jz4740_glue *glue = platform_get_drvdata(pdev);
platform_device_unregister(glue->musb);
- usb_phy_generic_unregister(pdev);
clk_disable_unprepare(glue->clk);
return 0;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index b7d56272f9d1..9f5a4819a744 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1497,10 +1497,11 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
} else {
musb->is_multipoint = 0;
type = "";
-#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
- pr_err("%s: kernel must blacklist external hubs\n",
- musb_driver_name);
-#endif
+ if (IS_ENABLED(CONFIG_USB) &&
+ !IS_ENABLED(CONFIG_USB_OTG_BLACKLIST_HUB)) {
+ pr_err("%s: kernel must blacklist external hubs\n",
+ musb_driver_name);
+ }
}
/* log release info */
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 403eb97915f8..327d4f7baaf7 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -168,8 +168,7 @@ static void dsps_mod_timer_optional(struct dsps_glue *glue)
static void dsps_musb_enable(struct musb *musb)
{
struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev->parent);
- struct dsps_glue *glue = platform_get_drvdata(pdev);
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *reg_base = musb->ctrl_base;
u32 epmask, coremask;
@@ -195,8 +194,7 @@ static void dsps_musb_enable(struct musb *musb)
static void dsps_musb_disable(struct musb *musb)
{
struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev->parent);
- struct dsps_glue *glue = platform_get_drvdata(pdev);
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *reg_base = musb->ctrl_base;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index b1dd81fb5f55..a3d2fef67746 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -531,6 +531,9 @@ static int omap2430_runtime_suspend(struct device *dev)
omap2430_low_level_exit(musb);
+ phy_power_off(musb->phy);
+ phy_exit(musb->phy);
+
return 0;
}
@@ -542,6 +545,9 @@ static int omap2430_runtime_resume(struct device *dev)
if (!musb)
return 0;
+ phy_init(musb->phy);
+ phy_power_on(musb->phy);
+
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 8c509b060c09..24b4f091acb8 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,7 @@ config AB8500_USB
in host mode, low speed.
config FSL_USB2_OTG
- bool "Freescale USB OTG Transceiver Driver"
+ tristate "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index ff38aa8963cf..71a9206ea1e2 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -189,16 +189,6 @@ static int ark3116_port_remove(struct usb_serial_port *port)
return 0;
}
-static void ark3116_init_termios(struct tty_struct *tty)
-{
- struct ktermios *termios = &tty->termios;
- *termios = tty_std_termios;
- termios->c_cflag = B9600 | CS8
- | CREAD | HUPCL | CLOCAL;
- termios->c_ispeed = 9600;
- termios->c_ospeed = 9600;
-}
-
static void ark3116_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
@@ -645,7 +635,6 @@ static struct usb_serial_driver ark3116_device = {
.port_probe = ark3116_port_probe,
.port_remove = ark3116_port_remove,
.set_termios = ark3116_set_termios,
- .init_termios = ark3116_init_termios,
.get_serial = ark3116_get_serial_info,
.tiocmget = ark3116_tiocmget,
.tiocmset = ark3116_tiocmset,
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index ed51bc48eea6..72d3ae1ebc64 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -98,7 +98,6 @@ struct cypress_private {
int write_urb_interval; /* interval to use for write urb */
int read_urb_interval; /* interval to use for read urb */
int comm_is_ok; /* true if communication is (still) ok */
- int termios_initialized;
__u8 line_control; /* holds dtr / rts value */
__u8 current_status; /* received from last read - info on dsr,cts,cd,ri,etc */
__u8 current_config; /* stores the current configuration byte */
@@ -107,11 +106,7 @@ struct cypress_private {
int get_cfg_unsafe; /* If true, the CYPRESS_GET_CONFIG is unsafe */
int baud_rate; /* stores current baud rate in
integer form */
- int isthrottled; /* if throttled, discard reads */
char prev_status; /* used for TIOCMIWAIT */
- /* we pass a pointer to this as the argument sent to
- cypress_set_termios old_termios */
- struct ktermios tmp_termios; /* stores the old termios settings */
};
/* function prototypes for the Cypress USB to serial device */
@@ -126,6 +121,7 @@ static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void cypress_send(struct usb_serial_port *port);
static int cypress_write_room(struct tty_struct *tty);
+static void cypress_earthmate_init_termios(struct tty_struct *tty);
static void cypress_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static int cypress_tiocmget(struct tty_struct *tty);
@@ -153,6 +149,7 @@ static struct usb_serial_driver cypress_earthmate_device = {
.dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
+ .init_termios = cypress_earthmate_init_termios,
.set_termios = cypress_set_termios,
.tiocmget = cypress_tiocmget,
.tiocmset = cypress_tiocmset,
@@ -467,7 +464,6 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
priv->cmd_ctrl = 0;
priv->line_control = 0;
- priv->termios_initialized = 0;
priv->rx_flags = 0;
/* Default packet format setting is determined by packet size.
Anything with a size larger then 9 must have a separate
@@ -604,7 +600,7 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
cypress_send(port);
if (tty)
- cypress_set_termios(tty, port, &priv->tmp_termios);
+ cypress_set_termios(tty, port, NULL);
/* setup the port and start reading from the device */
usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
@@ -857,6 +853,11 @@ static int cypress_tiocmset(struct tty_struct *tty,
return cypress_write(tty, port, NULL, 0);
}
+static void cypress_earthmate_init_termios(struct tty_struct *tty)
+{
+ tty_encode_baud_rate(tty, 4800, 4800);
+}
+
static void cypress_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -868,45 +869,11 @@ static void cypress_set_termios(struct tty_struct *tty,
__u8 oldlines;
int linechange = 0;
- spin_lock_irqsave(&priv->lock, flags);
- /* We can't clean this one up as we don't know the device type
- early enough */
- if (!priv->termios_initialized) {
- if (priv->chiptype == CT_EARTHMATE) {
- tty->termios = tty_std_termios;
- tty->termios.c_cflag = B4800 | CS8 | CREAD | HUPCL |
- CLOCAL;
- tty->termios.c_ispeed = 4800;
- tty->termios.c_ospeed = 4800;
- } else if (priv->chiptype == CT_CYPHIDCOM) {
- tty->termios = tty_std_termios;
- tty->termios.c_cflag = B9600 | CS8 | CREAD | HUPCL |
- CLOCAL;
- tty->termios.c_ispeed = 9600;
- tty->termios.c_ospeed = 9600;
- } else if (priv->chiptype == CT_CA42V2) {
- tty->termios = tty_std_termios;
- tty->termios.c_cflag = B9600 | CS8 | CREAD | HUPCL |
- CLOCAL;
- tty->termios.c_ispeed = 9600;
- tty->termios.c_ospeed = 9600;
- }
- priv->termios_initialized = 1;
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
/* Unsupported features need clearing */
tty->termios.c_cflag &= ~(CMSPAR|CRTSCTS);
cflag = tty->termios.c_cflag;
- /* check if there are new settings */
- if (old_termios) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->tmp_termios = tty->termios;
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
/* set number of data bits, parity, stop bits */
/* when parity is disabled the parity type bit is ignored */
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index e7f244cf2c07..578ebdd86520 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -569,9 +569,9 @@ static int digi_set_modem_signals(struct usb_serial_port *port,
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
- port_priv->dp_modem_signals =
- (port_priv->dp_modem_signals&~(TIOCM_DTR|TIOCM_RTS))
- | (modem_signals&(TIOCM_DTR|TIOCM_RTS));
+ port_priv->dp_modem_signals &= ~(TIOCM_DTR | TIOCM_RTS);
+ port_priv->dp_modem_signals |=
+ modem_signals & (TIOCM_DTR | TIOCM_RTS);
}
spin_unlock(&port_priv->dp_port_lock);
spin_unlock_irqrestore(&oob_priv->dp_port_lock, flags);
@@ -740,9 +740,9 @@ static void digi_set_termios(struct tty_struct *tty,
/* set parity */
tty->termios.c_cflag &= ~CMSPAR;
- if ((cflag&(PARENB|PARODD)) != (old_cflag&(PARENB|PARODD))) {
- if (cflag&PARENB) {
- if (cflag&PARODD)
+ if ((cflag & (PARENB | PARODD)) != (old_cflag & (PARENB | PARODD))) {
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
arg = DIGI_PARITY_ODD;
else
arg = DIGI_PARITY_EVEN;
@@ -755,9 +755,9 @@ static void digi_set_termios(struct tty_struct *tty,
buf[i++] = 0;
}
/* set word size */
- if ((cflag&CSIZE) != (old_cflag&CSIZE)) {
+ if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
arg = -1;
- switch (cflag&CSIZE) {
+ switch (cflag & CSIZE) {
case CS5: arg = DIGI_WORD_SIZE_5; break;
case CS6: arg = DIGI_WORD_SIZE_6; break;
case CS7: arg = DIGI_WORD_SIZE_7; break;
@@ -765,7 +765,7 @@ static void digi_set_termios(struct tty_struct *tty,
default:
dev_dbg(dev,
"digi_set_termios: can't handle word size %d\n",
- (cflag&CSIZE));
+ cflag & CSIZE);
break;
}
@@ -779,9 +779,9 @@ static void digi_set_termios(struct tty_struct *tty,
}
/* set stop bits */
- if ((cflag&CSTOPB) != (old_cflag&CSTOPB)) {
+ if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) {
- if ((cflag&CSTOPB))
+ if ((cflag & CSTOPB))
arg = DIGI_STOP_BITS_2;
else
arg = DIGI_STOP_BITS_1;
@@ -794,15 +794,15 @@ static void digi_set_termios(struct tty_struct *tty,
}
/* set input flow control */
- if ((iflag&IXOFF) != (old_iflag&IXOFF)
- || (cflag&CRTSCTS) != (old_cflag&CRTSCTS)) {
+ if ((iflag & IXOFF) != (old_iflag & IXOFF) ||
+ (cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
arg = 0;
- if (iflag&IXOFF)
+ if (iflag & IXOFF)
arg |= DIGI_INPUT_FLOW_CONTROL_XON_XOFF;
else
arg &= ~DIGI_INPUT_FLOW_CONTROL_XON_XOFF;
- if (cflag&CRTSCTS) {
+ if (cflag & CRTSCTS) {
arg |= DIGI_INPUT_FLOW_CONTROL_RTS;
/* On USB-4 it is necessary to assert RTS prior */
@@ -822,19 +822,18 @@ static void digi_set_termios(struct tty_struct *tty,
}
/* set output flow control */
- if ((iflag & IXON) != (old_iflag & IXON)
- || (cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
+ if ((iflag & IXON) != (old_iflag & IXON) ||
+ (cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
arg = 0;
if (iflag & IXON)
arg |= DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF;
else
arg &= ~DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF;
- if (cflag & CRTSCTS) {
+ if (cflag & CRTSCTS)
arg |= DIGI_OUTPUT_FLOW_CONTROL_CTS;
- } else {
+ else
arg &= ~DIGI_OUTPUT_FLOW_CONTROL_CTS;
- }
buf[i++] = DIGI_CMD_SET_OUTPUT_FLOW_CONTROL;
buf[i++] = priv->dp_port_num;
@@ -1084,7 +1083,7 @@ static int digi_chars_in_buffer(struct tty_struct *tty)
static void digi_dtr_rts(struct usb_serial_port *port, int on)
{
/* Adjust DTR and RTS */
- digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
+ digi_set_modem_signals(port, on * (TIOCM_DTR | TIOCM_RTS), 1);
}
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 0dcdcb4b2cde..43fa1f0716b7 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -28,7 +28,8 @@ static const struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
/* Maximum baudrate for F81232 */
-#define F81232_MAX_BAUDRATE 115200
+#define F81232_MAX_BAUDRATE 1500000
+#define F81232_DEF_BAUDRATE 9600
/* USB Control EP parameter */
#define F81232_REGISTER_REQUEST 0xa0
@@ -41,19 +42,46 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define FIFO_CONTROL_REGISTER (0x02 + SERIAL_BASE_ADDRESS)
#define LINE_CONTROL_REGISTER (0x03 + SERIAL_BASE_ADDRESS)
#define MODEM_CONTROL_REGISTER (0x04 + SERIAL_BASE_ADDRESS)
+#define LINE_STATUS_REGISTER (0x05 + SERIAL_BASE_ADDRESS)
#define MODEM_STATUS_REGISTER (0x06 + SERIAL_BASE_ADDRESS)
+/*
+ * F81232 Clock registers (106h)
+ *
+ * Bit1-0: Clock source selector
+ * 00: 1.846MHz.
+ * 01: 18.46MHz.
+ * 10: 24MHz.
+ * 11: 14.77MHz.
+ */
+#define F81232_CLK_REGISTER 0x106
+#define F81232_CLK_1_846_MHZ 0
+#define F81232_CLK_18_46_MHZ BIT(0)
+#define F81232_CLK_24_MHZ BIT(1)
+#define F81232_CLK_14_77_MHZ (BIT(1) | BIT(0))
+#define F81232_CLK_MASK GENMASK(1, 0)
+
struct f81232_private {
struct mutex lock;
u8 modem_control;
u8 modem_status;
+ u8 shadow_lcr;
+ speed_t baud_base;
+ struct work_struct lsr_work;
struct work_struct interrupt_work;
struct usb_serial_port *port;
};
-static int calc_baud_divisor(speed_t baudrate)
+static u32 const baudrate_table[] = { 115200, 921600, 1152000, 1500000 };
+static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ,
+ F81232_CLK_18_46_MHZ, F81232_CLK_24_MHZ };
+
+static int calc_baud_divisor(speed_t baudrate, speed_t clockrate)
{
- return DIV_ROUND_CLOSEST(F81232_MAX_BAUDRATE, baudrate);
+ if (!baudrate)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(clockrate, baudrate);
}
static int f81232_get_register(struct usb_serial_port *port, u16 reg, u8 *val)
@@ -127,6 +155,21 @@ static int f81232_set_register(struct usb_serial_port *port, u16 reg, u8 val)
return status;
}
+static int f81232_set_mask_register(struct usb_serial_port *port, u16 reg,
+ u8 mask, u8 val)
+{
+ int status;
+ u8 tmp;
+
+ status = f81232_get_register(port, reg, &tmp);
+ if (status)
+ return status;
+
+ tmp = (tmp & ~mask) | (val & mask);
+
+ return f81232_set_register(port, reg, tmp);
+}
+
static void f81232_read_msr(struct usb_serial_port *port)
{
int status;
@@ -282,6 +325,7 @@ exit:
static void f81232_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
+ struct f81232_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
char tty_flag;
unsigned int i;
@@ -315,6 +359,7 @@ static void f81232_process_read_urb(struct urb *urb)
if (lsr & UART_LSR_OE) {
port->icount.overrun++;
+ schedule_work(&priv->lsr_work);
tty_insert_flip_char(&port->port, 0,
TTY_OVERRUN);
}
@@ -333,22 +378,72 @@ static void f81232_process_read_urb(struct urb *urb)
static void f81232_break_ctl(struct tty_struct *tty, int break_state)
{
- /* FIXME - Stubbed out for now */
+ struct usb_serial_port *port = tty->driver_data;
+ struct f81232_private *priv = usb_get_serial_port_data(port);
+ int status;
- /*
- * break_state = -1 to turn on break, and 0 to turn off break
- * see drivers/char/tty_io.c to see it used.
- * last_set_data_urb_value NEVER has the break bit set in it.
- */
+ mutex_lock(&priv->lock);
+
+ if (break_state)
+ priv->shadow_lcr |= UART_LCR_SBC;
+ else
+ priv->shadow_lcr &= ~UART_LCR_SBC;
+
+ status = f81232_set_register(port, LINE_CONTROL_REGISTER,
+ priv->shadow_lcr);
+ if (status)
+ dev_err(&port->dev, "set break failed: %d\n", status);
+
+ mutex_unlock(&priv->lock);
}
-static void f81232_set_baudrate(struct usb_serial_port *port, speed_t baudrate)
+static int f81232_find_clk(speed_t baudrate)
{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(baudrate_table); ++idx) {
+ if (baudrate <= baudrate_table[idx] &&
+ baudrate_table[idx] % baudrate == 0)
+ return idx;
+ }
+
+ return -EINVAL;
+}
+
+static void f81232_set_baudrate(struct tty_struct *tty,
+ struct usb_serial_port *port, speed_t baudrate,
+ speed_t old_baudrate)
+{
+ struct f81232_private *priv = usb_get_serial_port_data(port);
u8 lcr;
int divisor;
int status = 0;
+ int i;
+ int idx;
+ speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE };
+
+ for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
+ idx = f81232_find_clk(baud_list[i]);
+ if (idx >= 0) {
+ baudrate = baud_list[i];
+ tty_encode_baud_rate(tty, baudrate, baudrate);
+ break;
+ }
+ }
- divisor = calc_baud_divisor(baudrate);
+ if (idx < 0)
+ return;
+
+ priv->baud_base = baudrate_table[idx];
+ divisor = calc_baud_divisor(baudrate, priv->baud_base);
+
+ status = f81232_set_mask_register(port, F81232_CLK_REGISTER,
+ F81232_CLK_MASK, clock_table[idx]);
+ if (status) {
+ dev_err(&port->dev, "%s failed to set CLK_REG: %d\n",
+ __func__, status);
+ return;
+ }
status = f81232_get_register(port, LINE_CONTROL_REGISTER,
&lcr); /* get LCR */
@@ -435,9 +530,11 @@ static int f81232_port_disable(struct usb_serial_port *port)
static void f81232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
+ struct f81232_private *priv = usb_get_serial_port_data(port);
u8 new_lcr = 0;
int status = 0;
speed_t baudrate;
+ speed_t old_baud;
/* Don't change anything if nothing has changed */
if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
@@ -450,11 +547,12 @@ static void f81232_set_termios(struct tty_struct *tty,
baudrate = tty_get_baud_rate(tty);
if (baudrate > 0) {
- if (baudrate > F81232_MAX_BAUDRATE) {
- baudrate = F81232_MAX_BAUDRATE;
- tty_encode_baud_rate(tty, baudrate, baudrate);
- }
- f81232_set_baudrate(port, baudrate);
+ if (old_termios)
+ old_baud = tty_termios_baud_rate(old_termios);
+ else
+ old_baud = F81232_DEF_BAUDRATE;
+
+ f81232_set_baudrate(tty, port, baudrate, old_baud);
}
if (C_PARENB(tty)) {
@@ -486,11 +584,18 @@ static void f81232_set_termios(struct tty_struct *tty,
break;
}
+ mutex_lock(&priv->lock);
+
+ new_lcr |= (priv->shadow_lcr & UART_LCR_SBC);
status = f81232_set_register(port, LINE_CONTROL_REGISTER, new_lcr);
if (status) {
dev_err(&port->dev, "%s failed to set LCR: %d\n",
__func__, status);
}
+
+ priv->shadow_lcr = new_lcr;
+
+ mutex_unlock(&priv->lock);
}
static int f81232_tiocmget(struct tty_struct *tty)
@@ -556,9 +661,13 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
static void f81232_close(struct usb_serial_port *port)
{
+ struct f81232_private *port_priv = usb_get_serial_port_data(port);
+
f81232_port_disable(port);
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
+ flush_work(&port_priv->interrupt_work);
+ flush_work(&port_priv->lsr_work);
}
static void f81232_dtr_rts(struct usb_serial_port *port, int on)
@@ -587,11 +696,12 @@ static int f81232_get_serial_info(struct tty_struct *tty,
struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
+ struct f81232_private *priv = usb_get_serial_port_data(port);
ss->type = PORT_16550A;
ss->line = port->minor;
ss->port = port->port_number;
- ss->baud_base = F81232_MAX_BAUDRATE;
+ ss->baud_base = priv->baud_base;
return 0;
}
@@ -603,6 +713,21 @@ static void f81232_interrupt_work(struct work_struct *work)
f81232_read_msr(priv->port);
}
+static void f81232_lsr_worker(struct work_struct *work)
+{
+ struct f81232_private *priv;
+ struct usb_serial_port *port;
+ int status;
+ u8 tmp;
+
+ priv = container_of(work, struct f81232_private, lsr_work);
+ port = priv->port;
+
+ status = f81232_get_register(port, LINE_STATUS_REGISTER, &tmp);
+ if (status)
+ dev_warn(&port->dev, "read LSR failed: %d\n", status);
+}
+
static int f81232_port_probe(struct usb_serial_port *port)
{
struct f81232_private *priv;
@@ -613,6 +738,7 @@ static int f81232_port_probe(struct usb_serial_port *port)
mutex_init(&priv->lock);
INIT_WORK(&priv->interrupt_work, f81232_interrupt_work);
+ INIT_WORK(&priv->lsr_work, f81232_lsr_worker);
usb_set_serial_port_data(port, priv);
@@ -632,6 +758,42 @@ static int f81232_port_remove(struct usb_serial_port *port)
return 0;
}
+static int f81232_suspend(struct usb_serial *serial, pm_message_t message)
+{
+ struct usb_serial_port *port = serial->port[0];
+ struct f81232_private *port_priv = usb_get_serial_port_data(port);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+ usb_kill_urb(port->read_urbs[i]);
+
+ usb_kill_urb(port->interrupt_in_urb);
+
+ if (port_priv) {
+ flush_work(&port_priv->interrupt_work);
+ flush_work(&port_priv->lsr_work);
+ }
+
+ return 0;
+}
+
+static int f81232_resume(struct usb_serial *serial)
+{
+ struct usb_serial_port *port = serial->port[0];
+ int result;
+
+ if (tty_port_initialized(&port->port)) {
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+ if (result) {
+ dev_err(&port->dev, "submit interrupt urb failed: %d\n",
+ result);
+ return result;
+ }
+ }
+
+ return usb_serial_generic_resume(serial);
+}
+
static struct usb_serial_driver f81232_device = {
.driver = {
.owner = THIS_MODULE,
@@ -655,6 +817,8 @@ static struct usb_serial_driver f81232_device = {
.read_int_callback = f81232_read_int_callback,
.port_probe = f81232_port_probe,
.port_remove = f81232_port_remove,
+ .suspend = f81232_suspend,
+ .resume = f81232_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 2274d9625f63..1be8bea372a2 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -106,12 +106,8 @@ void usb_serial_generic_deregister(void)
int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result = 0;
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
- port->throttled = 0;
- port->throttle_req = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ clear_bit(USB_SERIAL_THROTTLED, &port->flags);
if (port->bulk_in_size)
result = usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
@@ -375,7 +371,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- unsigned long flags;
+ bool stopped = false;
int status = urb->status;
int i;
@@ -383,42 +379,55 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
if (urb == port->read_urbs[i])
break;
}
- set_bit(i, &port->read_urbs_free);
dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
urb->actual_length);
switch (status) {
case 0:
+ usb_serial_debug_data(&port->dev, __func__, urb->actual_length,
+ data);
+ port->serial->type->process_read_urb(urb);
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
__func__, status);
- return;
+ stopped = true;
+ break;
case -EPIPE:
dev_err(&port->dev, "%s - urb stopped: %d\n",
__func__, status);
- return;
+ stopped = true;
+ break;
default:
dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
__func__, status);
- goto resubmit;
+ break;
}
- usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
- port->serial->type->process_read_urb(urb);
+ /*
+ * Make sure URB processing is done before marking as free to avoid
+ * racing with unthrottle() on another CPU. Matches the barriers
+ * implied by the test_and_clear_bit() in
+ * usb_serial_generic_submit_read_urb().
+ */
+ smp_mb__before_atomic();
+ set_bit(i, &port->read_urbs_free);
+ /*
+ * Make sure URB is marked as free before checking the throttled flag
+ * to avoid racing with unthrottle() on another CPU. Matches the
+ * smp_mb() in unthrottle().
+ */
+ smp_mb__after_atomic();
-resubmit:
- /* Throttle the device if requested by tty */
- spin_lock_irqsave(&port->lock, flags);
- port->throttled = port->throttle_req;
- if (!port->throttled) {
- spin_unlock_irqrestore(&port->lock, flags);
- usb_serial_generic_submit_read_urb(port, i, GFP_ATOMIC);
- } else {
- spin_unlock_irqrestore(&port->lock, flags);
- }
+ if (stopped)
+ return;
+
+ if (test_bit(USB_SERIAL_THROTTLED, &port->flags))
+ return;
+
+ usb_serial_generic_submit_read_urb(port, i, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
@@ -454,10 +463,9 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
default:
dev_err_console(port, "%s - nonzero urb status: %d\n",
__func__, status);
- goto resubmit;
+ break;
}
-resubmit:
usb_serial_generic_write_start(port, GFP_ATOMIC);
usb_serial_port_softint(port);
}
@@ -466,26 +474,24 @@ EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
void usb_serial_generic_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
- port->throttle_req = 1;
- spin_unlock_irqrestore(&port->lock, flags);
+ set_bit(USB_SERIAL_THROTTLED, &port->flags);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_throttle);
void usb_serial_generic_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- int was_throttled;
- spin_lock_irq(&port->lock);
- was_throttled = port->throttled;
- port->throttled = port->throttle_req = 0;
- spin_unlock_irq(&port->lock);
+ clear_bit(USB_SERIAL_THROTTLED, &port->flags);
+
+ /*
+ * Matches the smp_mb__after_atomic() in
+ * usb_serial_generic_read_bulk_callback().
+ */
+ smp_mb();
- if (was_throttled)
- usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
+ usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 4ca31c0e4174..48a439298a68 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1751,7 +1751,7 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
edge_serial->rxState = EXPECT_HDR2;
break;
}
- /* otherwise, drop on through */
+ /* Fall through */
case EXPECT_HDR2:
edge_serial->rxHeader2 = *buffer;
++buffer;
@@ -1790,29 +1790,20 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
edge_serial->rxHeader2, 0);
edge_serial->rxState = EXPECT_HDR1;
break;
- } else {
- edge_serial->rxPort =
- IOSP_GET_HDR_PORT(edge_serial->rxHeader1);
- edge_serial->rxBytesRemaining =
- IOSP_GET_HDR_DATA_LEN(
- edge_serial->rxHeader1,
- edge_serial->rxHeader2);
- dev_dbg(dev, "%s - Data for Port %u Len %u\n",
- __func__,
- edge_serial->rxPort,
- edge_serial->rxBytesRemaining);
-
- /* ASSERT(DevExt->RxPort < DevExt->NumPorts);
- * ASSERT(DevExt->RxBytesRemaining <
- * IOSP_MAX_DATA_LENGTH);
- */
-
- if (bufferLength == 0) {
- edge_serial->rxState = EXPECT_DATA;
- break;
- }
- /* Else, drop through */
}
+
+ edge_serial->rxPort = IOSP_GET_HDR_PORT(edge_serial->rxHeader1);
+ edge_serial->rxBytesRemaining = IOSP_GET_HDR_DATA_LEN(edge_serial->rxHeader1,
+ edge_serial->rxHeader2);
+ dev_dbg(dev, "%s - Data for Port %u Len %u\n", __func__,
+ edge_serial->rxPort,
+ edge_serial->rxBytesRemaining);
+
+ if (bufferLength == 0) {
+ edge_serial->rxState = EXPECT_DATA;
+ break;
+ }
+ /* Fall through */
case EXPECT_DATA: /* Expect data */
if (bufferLength < edge_serial->rxBytesRemaining) {
rxLen = bufferLength;
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 449e89db9cea..d5bff69b1769 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -942,9 +942,7 @@ static void iuu_close(struct usb_serial_port *port)
static void iuu_init_termios(struct tty_struct *tty)
{
- tty->termios = tty_std_termios;
- tty->termios.c_cflag = CLOCAL | CREAD | CS8 | B9600
- | TIOCM_CTS | CSTOPB | PARENB;
+ tty->termios.c_cflag = B9600 | CS8 | CSTOPB | CREAD | PARENB | CLOCAL;
tty->termios.c_ispeed = 9600;
tty->termios.c_ospeed = 9600;
tty->termios.c_lflag = 0;
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index ae9cb15ee02d..38ae0fc826cc 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -393,10 +393,7 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty)
static void oti6858_init_termios(struct tty_struct *tty)
{
- tty->termios = tty_std_termios;
- tty->termios.c_cflag = B38400 | CS8 | CREAD | HUPCL | CLOCAL;
- tty->termios.c_ispeed = 38400;
- tty->termios.c_ospeed = 38400;
+ tty_encode_baud_rate(tty, 38400, 38400);
}
static void oti6858_set_termios(struct tty_struct *tty,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index bb3f9aa4a909..55122ac84518 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -145,6 +145,8 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define UART_OVERRUN_ERROR 0x40
#define UART_CTS 0x80
+#define PL2303_FLOWCTRL_MASK 0xf0
+
static void pl2303_set_break(struct usb_serial_port *port, bool enable);
enum pl2303_type {
@@ -156,6 +158,7 @@ enum pl2303_type {
struct pl2303_type_data {
speed_t max_baud_rate;
unsigned long quirks;
+ unsigned int no_autoxonxoff:1;
};
struct pl2303_serial_private {
@@ -173,11 +176,12 @@ struct pl2303_private {
static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
[TYPE_01] = {
- .max_baud_rate = 1228800,
- .quirks = PL2303_QUIRK_LEGACY,
+ .max_baud_rate = 1228800,
+ .quirks = PL2303_QUIRK_LEGACY,
+ .no_autoxonxoff = true,
},
[TYPE_HX] = {
- .max_baud_rate = 12000000,
+ .max_baud_rate = 12000000,
},
};
@@ -223,6 +227,29 @@ static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
return 0;
}
+static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
+{
+ int ret = 0;
+ u8 *buf;
+
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = pl2303_vendor_read(serial, reg | 0x80, buf);
+ if (ret)
+ goto out_free;
+
+ *buf &= ~mask;
+ *buf |= val & mask;
+
+ ret = pl2303_vendor_write(serial, reg, *buf);
+out_free:
+ kfree(buf);
+
+ return ret;
+}
+
static int pl2303_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
@@ -552,6 +579,20 @@ static bool pl2303_termios_change(const struct ktermios *a, const struct ktermio
return tty_termios_hw_change(a, b) || ixon_change;
}
+static bool pl2303_enable_xonxoff(struct tty_struct *tty, const struct pl2303_type_data *type)
+{
+ if (!I_IXON(tty) || I_IXANY(tty))
+ return false;
+
+ if (START_CHAR(tty) != 0x11 || STOP_CHAR(tty) != 0x13)
+ return false;
+
+ if (type->no_autoxonxoff)
+ return false;
+
+ return true;
+}
+
static void pl2303_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -678,14 +719,13 @@ static void pl2303_set_termios(struct tty_struct *tty,
if (C_CRTSCTS(tty)) {
if (spriv->quirks & PL2303_QUIRK_LEGACY)
- pl2303_vendor_write(serial, 0x0, 0x41);
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x40);
else
- pl2303_vendor_write(serial, 0x0, 0x61);
- } else if (I_IXON(tty) && !I_IXANY(tty) && START_CHAR(tty) == 0x11 &&
- STOP_CHAR(tty) == 0x13) {
- pl2303_vendor_write(serial, 0x0, 0xc0);
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x60);
+ } else if (pl2303_enable_xonxoff(tty, spriv->type)) {
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
} else {
- pl2303_vendor_write(serial, 0x0, 0x0);
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
}
kfree(buf);
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index b42714855364..3bac55bd9bd9 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -281,10 +281,7 @@ static void spcp8x5_dtr_rts(struct usb_serial_port *port, int on)
static void spcp8x5_init_termios(struct tty_struct *tty)
{
- tty->termios = tty_std_termios;
- tty->termios.c_cflag = B115200 | CS8 | CREAD | HUPCL | CLOCAL;
- tty->termios.c_ispeed = 115200;
- tty->termios.c_ospeed = 115200;
+ tty_encode_baud_rate(tty, 115200, 115200);
}
static void spcp8x5_set_termios(struct tty_struct *tty,
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 7e89efbf2c28..676c296103a2 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -164,9 +164,9 @@ void usb_serial_put(struct usb_serial *serial)
* @driver: the driver (USB in our case)
* @tty: the tty being created
*
- * Create the termios objects for this tty. We use the default
+ * Initialise the termios structure for this tty. We use the default
* USB serial settings but permit them to be overridden by
- * serial->type->init_termios.
+ * serial->type->init_termios on first open.
*
* This is the first place a new tty gets used. Hence this is where we
* acquire references to the usb_serial structure and the driver module,
@@ -178,6 +178,7 @@ static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
int idx = tty->index;
struct usb_serial *serial;
struct usb_serial_port *port;
+ bool init_termios;
int retval = -ENODEV;
port = usb_serial_port_get_by_minor(idx);
@@ -192,14 +193,16 @@ static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
if (retval)
goto error_get_interface;
+ init_termios = (driver->termios[idx] == NULL);
+
retval = tty_standard_install(driver, tty);
if (retval)
goto error_init_termios;
mutex_unlock(&serial->disc_mutex);
- /* allow the driver to update the settings */
- if (serial->type->init_termios)
+ /* allow the driver to update the initial settings */
+ if (init_termios && serial->type->init_termios)
serial->type->init_termios(tty);
tty->driver_data = port;
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 31b024441938..cc794e25a0b6 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -763,18 +763,16 @@ static void rts51x_suspend_timer_fn(struct timer_list *t)
break;
case RTS51X_STAT_IDLE:
case RTS51X_STAT_SS:
- usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n",
- atomic_read(&us->pusb_intf->pm_usage_cnt),
+ usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
- if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
+ if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
usb_stor_dbg(us, "Ready to enter SS state\n");
rts51x_set_stat(chip, RTS51X_STAT_SS);
/* ignore mass storage interface's children */
pm_suspend_ignore_children(&us->pusb_intf->dev, true);
usb_autopm_put_interface_async(us->pusb_intf);
- usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n",
- atomic_read(&us->pusb_intf->pm_usage_cnt),
+ usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
}
break;
@@ -807,11 +805,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
int ret;
if (working_scsi(srb)) {
- usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n",
- atomic_read(&us->pusb_intf->pm_usage_cnt),
+ usb_stor_dbg(us, "working scsi, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
- if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
+ if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
ret = usb_autopm_get_interface(us->pusb_intf);
usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index a73ea495d5a7..59190d88fa9f 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -65,6 +65,7 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
+ int maxp;
/*
* Set the INQUIRY transfer length to 36. We don't use any of
@@ -74,20 +75,17 @@ static int slave_alloc (struct scsi_device *sdev)
sdev->inquiry_len = 36;
/*
- * USB has unusual DMA-alignment requirements: Although the
- * starting address of each scatter-gather element doesn't matter,
- * the length of each element except the last must be divisible
- * by the Bulk maxpacket value. There's currently no way to
- * express this by block-layer constraints, so we'll cop out
- * and simply require addresses to be aligned at 512-byte
- * boundaries. This is okay since most block I/O involves
- * hardware sectors that are multiples of 512 bytes in length,
- * and since host controllers up through USB 2.0 have maxpacket
- * values no larger than 512.
- *
- * But it doesn't suffice for Wireless USB, where Bulk maxpacket
- * values can be as large as 2048. To make that work properly
- * will require changes to the block layer.
+ * USB has unusual scatter-gather requirements: the length of each
+ * scatterlist element except the last must be divisible by the
+ * Bulk maxpacket value. Fortunately this value is always a
+ * power of 2. Inform the block layer about this requirement.
+ */
+ maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
+ blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
+
+ /*
+ * Some host controllers may have alignment requirements.
+ * We'll play it safe by requiring 512-byte alignment always.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index 6ac60abd2e15..e605cbc3d8bf 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -194,8 +194,6 @@ int sierra_ms_init(struct us_data *us)
kfree(swocInfo);
}
complete:
- result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
-
- return 0;
+ return device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index a6d68191c861..047c5922618f 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -789,24 +789,33 @@ static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
+ int maxp;
sdev->hostdata = devinfo;
/*
- * USB has unusual DMA-alignment requirements: Although the
- * starting address of each scatter-gather element doesn't matter,
- * the length of each element except the last must be divisible
- * by the Bulk maxpacket value. There's currently no way to
- * express this by block-layer constraints, so we'll cop out
- * and simply require addresses to be aligned at 512-byte
- * boundaries. This is okay since most block I/O involves
- * hardware sectors that are multiples of 512 bytes in length,
- * and since host controllers up through USB 2.0 have maxpacket
- * values no larger than 512.
+ * We have two requirements here. We must satisfy the requirements
+ * of the physical HC and the demands of the protocol, as we
+ * definitely want no additional memory allocation in this path
+ * ruling out using bounce buffers.
*
- * But it doesn't suffice for Wireless USB, where Bulk maxpacket
- * values can be as large as 2048. To make that work properly
- * will require changes to the block layer.
+ * For a transmission on USB to continue we must never send
+ * a package that is smaller than maxpacket. Hence the length of each
+ * scatterlist element except the last must be divisible by the
+ * Bulk maxpacket value.
+ * If the HC does not ensure that through SG,
+ * the upper layer must do that. We must assume nothing
+ * about the capabilities off the HC, so we use the most
+ * pessimistic requirement.
+ */
+
+ maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
+ blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
+
+ /*
+ * The protocol has no requirements on alignment in the strict sense.
+ * Controllers may or may not have alignment restrictions.
+ * As this is not exported, we use an extremely conservative guess.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index ef2226eb7a33..187690fd1a5b 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -12,4 +12,14 @@ config TYPEC_DP_ALTMODE
To compile this driver as a module, choose M here: the
module will be called typec_displayport.
+config TYPEC_NVIDIA_ALTMODE
+ tristate "NVIDIA Alternate Mode driver"
+ depends on TYPEC_DP_ALTMODE
+ help
+ Latest NVIDIA GPUs support VirtualLink devices. Select this
+ to enable support for VirtualLink devices with NVIDIA GPUs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called typec_displayport.
+
endmenu
diff --git a/drivers/usb/typec/altmodes/Makefile b/drivers/usb/typec/altmodes/Makefile
index eda8456f1c92..45717548b396 100644
--- a/drivers/usb/typec/altmodes/Makefile
+++ b/drivers/usb/typec/altmodes/Makefile
@@ -2,3 +2,5 @@
obj-$(CONFIG_TYPEC_DP_ALTMODE) += typec_displayport.o
typec_displayport-y := displayport.o
+obj-$(CONFIG_TYPEC_NVIDIA_ALTMODE) += typec_nvidia.o
+typec_nvidia-y := nvidia.o
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 610d790bc9be..4092248a5936 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -14,7 +14,7 @@
#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec_dp.h>
-#define DP_HEADER(cmd) (VDO(USB_TYPEC_DP_SID, 1, cmd) | \
+#define DP_HEADER(_dp, cmd) (VDO((_dp)->alt->svid, 1, cmd) | \
VDO_OPOS(USB_TYPEC_DP_MODE))
enum {
@@ -100,7 +100,7 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
- else
+ else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK)
pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
if (!pin_assign)
@@ -155,7 +155,7 @@ static int dp_altmode_configured(struct dp_altmode *dp)
static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
{
- u32 header = DP_HEADER(DP_CMD_CONFIGURE);
+ u32 header = DP_HEADER(dp, DP_CMD_CONFIGURE);
int ret;
ret = typec_altmode_notify(dp->alt, TYPEC_STATE_SAFE, &dp->data);
@@ -193,7 +193,7 @@ static void dp_altmode_work(struct work_struct *work)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
case DP_STATE_UPDATE:
- header = DP_HEADER(DP_CMD_STATUS_UPDATE);
+ header = DP_HEADER(dp, DP_CMD_STATUS_UPDATE);
vdo = 1;
ret = typec_altmode_vdm(dp->alt, header, &vdo, 2);
if (ret)
@@ -507,7 +507,7 @@ static const struct attribute_group dp_altmode_group = {
.attrs = dp_altmode_attrs,
};
-static int dp_altmode_probe(struct typec_altmode *alt)
+int dp_altmode_probe(struct typec_altmode *alt)
{
const struct typec_altmode *port = typec_altmode_get_partner(alt);
struct dp_altmode *dp;
@@ -545,14 +545,16 @@ static int dp_altmode_probe(struct typec_altmode *alt)
return 0;
}
+EXPORT_SYMBOL_GPL(dp_altmode_probe);
-static void dp_altmode_remove(struct typec_altmode *alt)
+void dp_altmode_remove(struct typec_altmode *alt)
{
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
cancel_work_sync(&dp->work);
}
+EXPORT_SYMBOL_GPL(dp_altmode_remove);
static const struct typec_device_id dp_typec_id[] = {
{ USB_TYPEC_DP_SID, USB_TYPEC_DP_MODE },
diff --git a/drivers/usb/typec/altmodes/displayport.h b/drivers/usb/typec/altmodes/displayport.h
new file mode 100644
index 000000000000..e120364da9fd
--- /dev/null
+++ b/drivers/usb/typec/altmodes/displayport.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
+int dp_altmode_probe(struct typec_altmode *alt);
+void dp_altmode_remove(struct typec_altmode *alt);
+#else
+int dp_altmode_probe(struct typec_altmode *alt) { return -ENOTSUPP; }
+void dp_altmode_remove(struct typec_altmode *alt) { }
+#endif /* CONFIG_TYPEC_DP_ALTMODE */
diff --git a/drivers/usb/typec/altmodes/nvidia.c b/drivers/usb/typec/altmodes/nvidia.c
new file mode 100644
index 000000000000..c36769736405
--- /dev/null
+++ b/drivers/usb/typec/altmodes/nvidia.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
+ *
+ * NVIDIA USB Type-C Alt Mode Driver
+ */
+#include <linux/module.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include "displayport.h"
+
+static int nvidia_altmode_probe(struct typec_altmode *alt)
+{
+ if (alt->svid == USB_TYPEC_NVIDIA_VLINK_SID)
+ return dp_altmode_probe(alt);
+ else
+ return -ENOTSUPP;
+}
+
+static void nvidia_altmode_remove(struct typec_altmode *alt)
+{
+ if (alt->svid == USB_TYPEC_NVIDIA_VLINK_SID)
+ dp_altmode_remove(alt);
+}
+
+static const struct typec_device_id nvidia_typec_id[] = {
+ { USB_TYPEC_NVIDIA_VLINK_SID, TYPEC_ANY_MODE },
+ { },
+};
+MODULE_DEVICE_TABLE(typec, nvidia_typec_id);
+
+static struct typec_altmode_driver nvidia_altmode_driver = {
+ .id_table = nvidia_typec_id,
+ .probe = nvidia_altmode_probe,
+ .remove = nvidia_altmode_remove,
+ .driver = {
+ .name = "typec_nvidia",
+ .owner = THIS_MODULE,
+ },
+};
+module_typec_altmode_driver(nvidia_altmode_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NVIDIA USB Type-C Alt Mode Driver");
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 64eb5983e17a..9294e85fd34b 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -84,7 +84,8 @@ static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
switch (state) {
case TYPEC_STATE_SAFE:
- new_conf = PI3USB30532_CONF_OPEN;
+ new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
+ PI3USB30532_CONF_OPEN;
break;
case TYPEC_STATE_USB:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index e9344997329c..7302f7501ec9 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -23,6 +23,7 @@
#include <linux/sched/clock.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
@@ -78,6 +79,10 @@ struct fusb302_chip {
struct regulator *vbus;
+ spinlock_t irq_lock;
+ struct work_struct irq_work;
+ bool irq_suspended;
+ bool irq_while_suspended;
int gpio_int_n;
int gpio_int_n_irq;
struct extcon_dev *extcon;
@@ -85,9 +90,6 @@ struct fusb302_chip {
struct workqueue_struct *wq;
struct delayed_work bc_lvl_handler;
- atomic_t pm_suspend;
- atomic_t i2c_busy;
-
/* lock for sharing chip states */
struct mutex lock;
@@ -99,7 +101,6 @@ struct fusb302_chip {
bool intr_comp_chng;
/* port status */
- bool pull_up;
bool vconn_on;
bool vbus_on;
bool charge_on;
@@ -124,13 +125,13 @@ struct fusb302_chip {
*/
#ifdef CONFIG_DEBUG_FS
-
static bool fusb302_log_full(struct fusb302_chip *chip)
{
return chip->logbuffer_tail ==
(chip->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
}
+__printf(2, 0)
static void _fusb302_log(struct fusb302_chip *chip, const char *fmt,
va_list args)
{
@@ -234,43 +235,15 @@ static void fusb302_debugfs_exit(const struct fusb302_chip *chip) { }
#endif
-#define FUSB302_RESUME_RETRY 10
-#define FUSB302_RESUME_RETRY_SLEEP 50
-
-static bool fusb302_is_suspended(struct fusb302_chip *chip)
-{
- int retry_cnt;
-
- for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) {
- if (atomic_read(&chip->pm_suspend)) {
- dev_err(chip->dev, "i2c: pm suspend, retry %d/%d\n",
- retry_cnt + 1, FUSB302_RESUME_RETRY);
- msleep(FUSB302_RESUME_RETRY_SLEEP);
- } else {
- return false;
- }
- }
-
- return true;
-}
-
static int fusb302_i2c_write(struct fusb302_chip *chip,
u8 address, u8 data)
{
int ret = 0;
- atomic_set(&chip->i2c_busy, 1);
-
- if (fusb302_is_suspended(chip)) {
- atomic_set(&chip->i2c_busy, 0);
- return -ETIMEDOUT;
- }
-
ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data);
if (ret < 0)
fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d",
data, address, ret);
- atomic_set(&chip->i2c_busy, 0);
return ret;
}
@@ -282,19 +255,12 @@ static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address,
if (length <= 0)
return ret;
- atomic_set(&chip->i2c_busy, 1);
-
- if (fusb302_is_suspended(chip)) {
- atomic_set(&chip->i2c_busy, 0);
- return -ETIMEDOUT;
- }
ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address,
length, data);
if (ret < 0)
fusb302_log(chip, "cannot block write 0x%02x, len=%d, ret=%d",
address, length, ret);
- atomic_set(&chip->i2c_busy, 0);
return ret;
}
@@ -304,18 +270,10 @@ static int fusb302_i2c_read(struct fusb302_chip *chip,
{
int ret = 0;
- atomic_set(&chip->i2c_busy, 1);
-
- if (fusb302_is_suspended(chip)) {
- atomic_set(&chip->i2c_busy, 0);
- return -ETIMEDOUT;
- }
-
ret = i2c_smbus_read_byte_data(chip->i2c_client, address);
*data = (u8)ret;
if (ret < 0)
fusb302_log(chip, "cannot read %02x, ret=%d", address, ret);
- atomic_set(&chip->i2c_busy, 0);
return ret;
}
@@ -327,12 +285,6 @@ static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address,
if (length <= 0)
return ret;
- atomic_set(&chip->i2c_busy, 1);
-
- if (fusb302_is_suspended(chip)) {
- atomic_set(&chip->i2c_busy, 0);
- return -ETIMEDOUT;
- }
ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address,
length, data);
@@ -348,8 +300,6 @@ static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address,
}
done:
- atomic_set(&chip->i2c_busy, 0);
-
return ret;
}
@@ -519,32 +469,6 @@ static int tcpm_get_current_limit(struct tcpc_dev *dev)
return current_limit;
}
-static int fusb302_set_cc_pull(struct fusb302_chip *chip,
- bool pull_up, bool pull_down)
-{
- int ret = 0;
- u8 data = 0x00;
- u8 mask = FUSB_REG_SWITCHES0_CC1_PU_EN |
- FUSB_REG_SWITCHES0_CC2_PU_EN |
- FUSB_REG_SWITCHES0_CC1_PD_EN |
- FUSB_REG_SWITCHES0_CC2_PD_EN;
-
- if (pull_up)
- data |= (chip->cc_polarity == TYPEC_POLARITY_CC1) ?
- FUSB_REG_SWITCHES0_CC1_PU_EN :
- FUSB_REG_SWITCHES0_CC2_PU_EN;
- if (pull_down)
- data |= FUSB_REG_SWITCHES0_CC1_PD_EN |
- FUSB_REG_SWITCHES0_CC2_PD_EN;
- ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES0,
- mask, data);
- if (ret < 0)
- return ret;
- chip->pull_up = pull_up;
-
- return ret;
-}
-
static int fusb302_set_src_current(struct fusb302_chip *chip,
enum src_current_status status)
{
@@ -634,6 +558,8 @@ static int fusb302_set_toggling(struct fusb302_chip *chip,
return ret;
chip->intr_togdone = false;
} else {
+ /* Datasheet says vconn MUST be off when toggling */
+ WARN(chip->vconn_on, "Vconn is on during toggle start");
/* unmask TOGDONE interrupt */
ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA,
FUSB_REG_MASKA_TOGDONE);
@@ -676,26 +602,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
+ u8 switches0_mask = FUSB_REG_SWITCHES0_CC1_PU_EN |
+ FUSB_REG_SWITCHES0_CC2_PU_EN |
+ FUSB_REG_SWITCHES0_CC1_PD_EN |
+ FUSB_REG_SWITCHES0_CC2_PD_EN;
+ u8 rd_mda, switches0_data = 0x00;
int ret = 0;
- bool pull_up, pull_down;
- u8 rd_mda;
- enum toggling_mode mode;
mutex_lock(&chip->lock);
switch (cc) {
case TYPEC_CC_OPEN:
- pull_up = false;
- pull_down = false;
break;
case TYPEC_CC_RD:
- pull_up = false;
- pull_down = true;
+ switches0_data |= FUSB_REG_SWITCHES0_CC1_PD_EN |
+ FUSB_REG_SWITCHES0_CC2_PD_EN;
break;
case TYPEC_CC_RP_DEF:
case TYPEC_CC_RP_1_5:
case TYPEC_CC_RP_3_0:
- pull_up = true;
- pull_down = false;
+ switches0_data |= (chip->cc_polarity == TYPEC_POLARITY_CC1) ?
+ FUSB_REG_SWITCHES0_CC1_PU_EN :
+ FUSB_REG_SWITCHES0_CC2_PU_EN;
break;
default:
fusb302_log(chip, "unsupported cc value %s",
@@ -703,34 +630,38 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
ret = -EINVAL;
goto done;
}
+
+ fusb302_log(chip, "cc := %s", typec_cc_status_name[cc]);
+
ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
- fusb302_log(chip, "cannot stop toggling, ret=%d", ret);
+ fusb302_log(chip, "cannot set toggling mode, ret=%d", ret);
goto done;
}
- ret = fusb302_set_cc_pull(chip, pull_up, pull_down);
+
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES0,
+ switches0_mask, switches0_data);
if (ret < 0) {
- fusb302_log(chip,
- "cannot set cc pulling up %s, down %s, ret = %d",
- pull_up ? "True" : "False",
- pull_down ? "True" : "False",
- ret);
+ fusb302_log(chip, "cannot set pull-up/-down, ret = %d", ret);
goto done;
}
/* reset the cc status */
chip->cc1 = TYPEC_CC_OPEN;
chip->cc2 = TYPEC_CC_OPEN;
+
/* adjust current for SRC */
- if (pull_up) {
- ret = fusb302_set_src_current(chip, cc_src_current[cc]);
- if (ret < 0) {
- fusb302_log(chip, "cannot set src current %s, ret=%d",
- typec_cc_status_name[cc], ret);
- goto done;
- }
+ ret = fusb302_set_src_current(chip, cc_src_current[cc]);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot set src current %s, ret=%d",
+ typec_cc_status_name[cc], ret);
+ goto done;
}
+
/* enable/disable interrupts, BC_LVL for SNK and COMP_CHNG for SRC */
- if (pull_up) {
+ switch (cc) {
+ case TYPEC_CC_RP_DEF:
+ case TYPEC_CC_RP_1_5:
+ case TYPEC_CC_RP_3_0:
rd_mda = rd_mda_value[cc_src_current[cc]];
ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
if (ret < 0) {
@@ -748,10 +679,9 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
ret);
goto done;
}
- chip->intr_bc_lvl = false;
chip->intr_comp_chng = true;
- }
- if (pull_down) {
+ break;
+ case TYPEC_CC_RD:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
FUSB_REG_MASK_BC_LVL |
FUSB_REG_MASK_COMP_CHNG,
@@ -762,32 +692,10 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
goto done;
}
chip->intr_bc_lvl = true;
- chip->intr_comp_chng = false;
- }
- fusb302_log(chip, "cc := %s", typec_cc_status_name[cc]);
-
- /* Enable detection for fixed SNK or SRC only roles */
- switch (cc) {
- case TYPEC_CC_RD:
- mode = TOGGLING_MODE_SNK;
- break;
- case TYPEC_CC_RP_DEF:
- case TYPEC_CC_RP_1_5:
- case TYPEC_CC_RP_3_0:
- mode = TOGGLING_MODE_SRC;
break;
default:
- mode = TOGGLING_MODE_OFF;
break;
}
-
- if (mode != TOGGLING_MODE_OFF) {
- ret = fusb302_set_toggling(chip, mode);
- if (ret < 0)
- fusb302_log(chip,
- "cannot set fixed role toggling mode, ret=%d",
- ret);
- }
done:
mutex_unlock(&chip->lock);
@@ -1005,13 +913,27 @@ done:
return ret;
}
-static int tcpm_start_drp_toggling(struct tcpc_dev *dev,
- enum typec_cc_status cc)
+static int tcpm_start_toggling(struct tcpc_dev *dev,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
+ enum toggling_mode mode = TOGGLING_MODE_OFF;
int ret = 0;
+ switch (port_type) {
+ case TYPEC_PORT_SRC:
+ mode = TOGGLING_MODE_SRC;
+ break;
+ case TYPEC_PORT_SNK:
+ mode = TOGGLING_MODE_SNK;
+ break;
+ case TYPEC_PORT_DRP:
+ mode = TOGGLING_MODE_DRP;
+ break;
+ }
+
mutex_lock(&chip->lock);
ret = fusb302_set_src_current(chip, cc_src_current[cc]);
if (ret < 0) {
@@ -1019,7 +941,7 @@ static int tcpm_start_drp_toggling(struct tcpc_dev *dev,
typec_cc_status_name[cc], ret);
goto done;
}
- ret = fusb302_set_toggling(chip, TOGGLING_MODE_DRP);
+ ret = fusb302_set_toggling(chip, mode);
if (ret < 0) {
fusb302_log(chip,
"unable to start drp toggling, ret=%d", ret);
@@ -1217,7 +1139,7 @@ static void init_tcpc_dev(struct tcpc_dev *fusb302_tcpc_dev)
fusb302_tcpc_dev->set_vbus = tcpm_set_vbus;
fusb302_tcpc_dev->set_pd_rx = tcpm_set_pd_rx;
fusb302_tcpc_dev->set_roles = tcpm_set_roles;
- fusb302_tcpc_dev->start_drp_toggling = tcpm_start_drp_toggling;
+ fusb302_tcpc_dev->start_toggling = tcpm_start_toggling;
fusb302_tcpc_dev->pd_transmit = tcpm_pd_transmit;
}
@@ -1226,38 +1148,36 @@ static const char * const cc_polarity_name[] = {
[TYPEC_POLARITY_CC2] = "Polarity_CC2",
};
-static int fusb302_set_cc_polarity(struct fusb302_chip *chip,
- enum typec_cc_polarity cc_polarity)
+static int fusb302_set_cc_polarity_and_pull(struct fusb302_chip *chip,
+ enum typec_cc_polarity cc_polarity,
+ bool pull_up, bool pull_down)
{
int ret = 0;
- u8 switches0_mask = FUSB_REG_SWITCHES0_CC1_PU_EN |
- FUSB_REG_SWITCHES0_CC2_PU_EN |
- FUSB_REG_SWITCHES0_VCONN_CC1 |
- FUSB_REG_SWITCHES0_VCONN_CC2 |
- FUSB_REG_SWITCHES0_MEAS_CC1 |
- FUSB_REG_SWITCHES0_MEAS_CC2;
u8 switches0_data = 0x00;
u8 switches1_mask = FUSB_REG_SWITCHES1_TXCC1_EN |
FUSB_REG_SWITCHES1_TXCC2_EN;
u8 switches1_data = 0x00;
+ if (pull_down)
+ switches0_data |= FUSB_REG_SWITCHES0_CC1_PD_EN |
+ FUSB_REG_SWITCHES0_CC2_PD_EN;
+
if (cc_polarity == TYPEC_POLARITY_CC1) {
- switches0_data = FUSB_REG_SWITCHES0_MEAS_CC1;
+ switches0_data |= FUSB_REG_SWITCHES0_MEAS_CC1;
if (chip->vconn_on)
switches0_data |= FUSB_REG_SWITCHES0_VCONN_CC2;
- if (chip->pull_up)
+ if (pull_up)
switches0_data |= FUSB_REG_SWITCHES0_CC1_PU_EN;
switches1_data = FUSB_REG_SWITCHES1_TXCC1_EN;
} else {
- switches0_data = FUSB_REG_SWITCHES0_MEAS_CC2;
+ switches0_data |= FUSB_REG_SWITCHES0_MEAS_CC2;
if (chip->vconn_on)
switches0_data |= FUSB_REG_SWITCHES0_VCONN_CC1;
- if (chip->pull_up)
+ if (pull_up)
switches0_data |= FUSB_REG_SWITCHES0_CC2_PU_EN;
switches1_data = FUSB_REG_SWITCHES1_TXCC2_EN;
}
- ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES0,
- switches0_mask, switches0_data);
+ ret = fusb302_i2c_write(chip, FUSB_REG_SWITCHES0, switches0_data);
if (ret < 0)
return ret;
ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES1,
@@ -1278,16 +1198,10 @@ static int fusb302_handle_togdone_snk(struct fusb302_chip *chip,
enum typec_cc_polarity cc_polarity;
enum typec_cc_status cc_status_active, cc1, cc2;
- /* set pull_up, pull_down */
- ret = fusb302_set_cc_pull(chip, false, true);
- if (ret < 0) {
- fusb302_log(chip, "cannot set cc to pull down, ret=%d", ret);
- return ret;
- }
- /* set polarity */
+ /* set polarity and pull_up, pull_down */
cc_polarity = (togdone_result == FUSB_REG_STATUS1A_TOGSS_SNK1) ?
TYPEC_POLARITY_CC1 : TYPEC_POLARITY_CC2;
- ret = fusb302_set_cc_polarity(chip, cc_polarity);
+ ret = fusb302_set_cc_polarity_and_pull(chip, cc_polarity, false, true);
if (ret < 0) {
fusb302_log(chip, "cannot set cc polarity %s, ret=%d",
cc_polarity_name[cc_polarity], ret);
@@ -1337,6 +1251,62 @@ static int fusb302_handle_togdone_snk(struct fusb302_chip *chip,
return ret;
}
+/* On error returns < 0, otherwise a typec_cc_status value */
+static int fusb302_get_src_cc_status(struct fusb302_chip *chip,
+ enum typec_cc_polarity cc_polarity,
+ enum typec_cc_status *cc)
+{
+ u8 ra_mda = ra_mda_value[chip->src_current_status];
+ u8 rd_mda = rd_mda_value[chip->src_current_status];
+ u8 switches0_data, status0;
+ int ret;
+
+ /* Step 1: Set switches so that we measure the right CC pin */
+ switches0_data = (cc_polarity == TYPEC_POLARITY_CC1) ?
+ FUSB_REG_SWITCHES0_CC1_PU_EN | FUSB_REG_SWITCHES0_MEAS_CC1 :
+ FUSB_REG_SWITCHES0_CC2_PU_EN | FUSB_REG_SWITCHES0_MEAS_CC2;
+ ret = fusb302_i2c_write(chip, FUSB_REG_SWITCHES0, switches0_data);
+ if (ret < 0)
+ return ret;
+
+ fusb302_i2c_read(chip, FUSB_REG_SWITCHES0, &status0);
+ fusb302_log(chip, "get_src_cc_status switches: 0x%0x", status0);
+
+ /* Step 2: Set compararator volt to differentiate between Open and Rd */
+ ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(50, 100);
+ ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
+ if (ret < 0)
+ return ret;
+
+ fusb302_log(chip, "get_src_cc_status rd_mda status0: 0x%0x", status0);
+ if (status0 & FUSB_REG_STATUS0_COMP) {
+ *cc = TYPEC_CC_OPEN;
+ return 0;
+ }
+
+ /* Step 3: Set compararator input to differentiate between Rd and Ra. */
+ ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, ra_mda);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(50, 100);
+ ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
+ if (ret < 0)
+ return ret;
+
+ fusb302_log(chip, "get_src_cc_status ra_mda status0: 0x%0x", status0);
+ if (status0 & FUSB_REG_STATUS0_COMP)
+ *cc = TYPEC_CC_RD;
+ else
+ *cc = TYPEC_CC_RA;
+
+ return 0;
+}
+
static int fusb302_handle_togdone_src(struct fusb302_chip *chip,
u8 togdone_result)
{
@@ -1347,77 +1317,62 @@ static int fusb302_handle_togdone_src(struct fusb302_chip *chip,
* - set I_COMP interrupt on
*/
int ret = 0;
- u8 status0;
- u8 ra_mda = ra_mda_value[chip->src_current_status];
u8 rd_mda = rd_mda_value[chip->src_current_status];
- bool ra_comp, rd_comp;
+ enum toggling_mode toggling_mode = chip->toggling_mode;
enum typec_cc_polarity cc_polarity;
- enum typec_cc_status cc_status_active, cc1, cc2;
+ enum typec_cc_status cc1, cc2;
- /* set pull_up, pull_down */
- ret = fusb302_set_cc_pull(chip, true, false);
- if (ret < 0) {
- fusb302_log(chip, "cannot set cc to pull up, ret=%d", ret);
+ /*
+ * The toggle-engine will stop in a src state if it sees either Ra or
+ * Rd. Determine the status for both CC pins, starting with the one
+ * where toggling stopped, as that is where the switches point now.
+ */
+ if (togdone_result == FUSB_REG_STATUS1A_TOGSS_SRC1)
+ ret = fusb302_get_src_cc_status(chip, TYPEC_POLARITY_CC1, &cc1);
+ else
+ ret = fusb302_get_src_cc_status(chip, TYPEC_POLARITY_CC2, &cc2);
+ if (ret < 0)
return ret;
- }
- /* set polarity */
- cc_polarity = (togdone_result == FUSB_REG_STATUS1A_TOGSS_SRC1) ?
- TYPEC_POLARITY_CC1 : TYPEC_POLARITY_CC2;
- ret = fusb302_set_cc_polarity(chip, cc_polarity);
+ /* we must turn off toggling before we can measure the other pin */
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
- fusb302_log(chip, "cannot set cc polarity %s, ret=%d",
- cc_polarity_name[cc_polarity], ret);
+ fusb302_log(chip, "cannot set toggling mode off, ret=%d", ret);
return ret;
}
- /* fusb302_set_cc_polarity() has set the correct measure block */
- ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
- if (ret < 0)
- return ret;
- usleep_range(50, 100);
- ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
+ /* get the status of the other pin */
+ if (togdone_result == FUSB_REG_STATUS1A_TOGSS_SRC1)
+ ret = fusb302_get_src_cc_status(chip, TYPEC_POLARITY_CC2, &cc2);
+ else
+ ret = fusb302_get_src_cc_status(chip, TYPEC_POLARITY_CC1, &cc1);
if (ret < 0)
return ret;
- rd_comp = !!(status0 & FUSB_REG_STATUS0_COMP);
- if (!rd_comp) {
- ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, ra_mda);
- if (ret < 0)
- return ret;
- usleep_range(50, 100);
- ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
- if (ret < 0)
- return ret;
- ra_comp = !!(status0 & FUSB_REG_STATUS0_COMP);
+
+ /* determine polarity based on the status of both pins */
+ if (cc1 == TYPEC_CC_RD &&
+ (cc2 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_RA)) {
+ cc_polarity = TYPEC_POLARITY_CC1;
+ } else if (cc2 == TYPEC_CC_RD &&
+ (cc1 == TYPEC_CC_OPEN || cc1 == TYPEC_CC_RA)) {
+ cc_polarity = TYPEC_POLARITY_CC2;
+ } else {
+ fusb302_log(chip, "unexpected CC status cc1=%s, cc2=%s, restarting toggling",
+ typec_cc_status_name[cc1],
+ typec_cc_status_name[cc2]);
+ return fusb302_set_toggling(chip, toggling_mode);
}
- if (rd_comp)
- cc_status_active = TYPEC_CC_OPEN;
- else if (ra_comp)
- cc_status_active = TYPEC_CC_RD;
- else
- /* Ra is not supported, report as Open */
- cc_status_active = TYPEC_CC_OPEN;
- /* restart toggling if the cc status on the active line is OPEN */
- if (cc_status_active == TYPEC_CC_OPEN) {
- fusb302_log(chip, "restart toggling as CC_OPEN detected");
- ret = fusb302_set_toggling(chip, chip->toggling_mode);
+ /* set polarity and pull_up, pull_down */
+ ret = fusb302_set_cc_polarity_and_pull(chip, cc_polarity, true, false);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot set cc polarity %s, ret=%d",
+ cc_polarity_name[cc_polarity], ret);
return ret;
}
/* update tcpm with the new cc value */
- cc1 = (cc_polarity == TYPEC_POLARITY_CC1) ?
- cc_status_active : TYPEC_CC_OPEN;
- cc2 = (cc_polarity == TYPEC_POLARITY_CC2) ?
- cc_status_active : TYPEC_CC_OPEN;
if ((chip->cc1 != cc1) || (chip->cc2 != cc2)) {
chip->cc1 = cc1;
chip->cc2 = cc2;
tcpm_cc_change(chip->tcpm_port);
}
- /* turn off toggling */
- ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
- if (ret < 0) {
- fusb302_log(chip,
- "cannot set toggling mode off, ret=%d", ret);
- return ret;
- }
/* set MDAC to Rd threshold, and unmask I_COMP for unplug detection */
ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
if (ret < 0)
@@ -1427,7 +1382,7 @@ static int fusb302_handle_togdone_src(struct fusb302_chip *chip,
FUSB_REG_MASK_COMP_CHNG);
if (ret < 0) {
fusb302_log(chip,
- "cannot unmask bc_lcl interrupt, ret=%d", ret);
+ "cannot unmask comp_chng interrupt, ret=%d", ret);
return ret;
}
chip->intr_comp_chng = true;
@@ -1532,6 +1487,25 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip,
static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
{
struct fusb302_chip *chip = dev_id;
+ unsigned long flags;
+
+ /* Disable our level triggered IRQ until our irq_work has cleared it */
+ disable_irq_nosync(chip->gpio_int_n_irq);
+
+ spin_lock_irqsave(&chip->irq_lock, flags);
+ if (chip->irq_suspended)
+ chip->irq_while_suspended = true;
+ else
+ schedule_work(&chip->irq_work);
+ spin_unlock_irqrestore(&chip->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void fusb302_irq_work(struct work_struct *work)
+{
+ struct fusb302_chip *chip = container_of(work, struct fusb302_chip,
+ irq_work);
int ret = 0;
u8 interrupt;
u8 interrupta;
@@ -1602,11 +1576,9 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
fusb302_log(chip, "IRQ: COMP_CHNG, comp=%s",
comp_result ? "true" : "false");
if (comp_result) {
- /* cc level > Rd_threashold, detach */
- if (chip->cc_polarity == TYPEC_POLARITY_CC1)
- chip->cc1 = TYPEC_CC_OPEN;
- else
- chip->cc2 = TYPEC_CC_OPEN;
+ /* cc level > Rd_threshold, detach */
+ chip->cc1 = TYPEC_CC_OPEN;
+ chip->cc2 = TYPEC_CC_OPEN;
tcpm_cc_change(chip->tcpm_port);
}
}
@@ -1662,8 +1634,7 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
}
done:
mutex_unlock(&chip->lock);
-
- return IRQ_HANDLED;
+ enable_irq(chip->gpio_int_n_irq);
}
static int init_gpio(struct fusb302_chip *chip)
@@ -1779,6 +1750,8 @@ static int fusb302_probe(struct i2c_client *client,
if (!chip->wq)
return -ENOMEM;
+ spin_lock_init(&chip->irq_lock);
+ INIT_WORK(&chip->irq_work, fusb302_irq_work);
INIT_DELAYED_WORK(&chip->bc_lvl_handler, fusb302_bc_lvl_handler_work);
init_tcpc_dev(&chip->tcpc_dev);
@@ -1798,10 +1771,9 @@ static int fusb302_probe(struct i2c_client *client,
goto destroy_workqueue;
}
- ret = devm_request_threaded_irq(chip->dev, chip->gpio_int_n_irq,
- NULL, fusb302_irq_intn,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW,
- "fsc_interrupt_int_n", chip);
+ ret = request_irq(chip->gpio_int_n_irq, fusb302_irq_intn,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "fsc_interrupt_int_n", chip);
if (ret < 0) {
dev_err(dev, "cannot request IRQ for GPIO Int_N, ret=%d", ret);
goto tcpm_unregister_port;
@@ -1824,6 +1796,10 @@ static int fusb302_remove(struct i2c_client *client)
{
struct fusb302_chip *chip = i2c_get_clientdata(client);
+ disable_irq_wake(chip->gpio_int_n_irq);
+ free_irq(chip->gpio_int_n_irq, chip);
+ cancel_work_sync(&chip->irq_work);
+ cancel_delayed_work_sync(&chip->bc_lvl_handler);
tcpm_unregister_port(chip->tcpm_port);
destroy_workqueue(chip->wq);
fusb302_debugfs_exit(chip);
@@ -1834,19 +1810,29 @@ static int fusb302_remove(struct i2c_client *client)
static int fusb302_pm_suspend(struct device *dev)
{
struct fusb302_chip *chip = dev->driver_data;
+ unsigned long flags;
- if (atomic_read(&chip->i2c_busy))
- return -EBUSY;
- atomic_set(&chip->pm_suspend, 1);
+ spin_lock_irqsave(&chip->irq_lock, flags);
+ chip->irq_suspended = true;
+ spin_unlock_irqrestore(&chip->irq_lock, flags);
+ /* Make sure any pending irq_work is finished before the bus suspends */
+ flush_work(&chip->irq_work);
return 0;
}
static int fusb302_pm_resume(struct device *dev)
{
struct fusb302_chip *chip = dev->driver_data;
+ unsigned long flags;
- atomic_set(&chip->pm_suspend, 0);
+ spin_lock_irqsave(&chip->irq_lock, flags);
+ if (chip->irq_while_suspended) {
+ schedule_work(&chip->irq_work);
+ chip->irq_while_suspended = false;
+ }
+ chip->irq_suspended = false;
+ spin_unlock_irqrestore(&chip->irq_lock, flags);
return 0;
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index ac6b418b15f1..c1f7073a56de 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -100,13 +100,17 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
return 0;
}
-static int tcpci_start_drp_toggling(struct tcpc_dev *tcpc,
- enum typec_cc_status cc)
+static int tcpci_start_toggling(struct tcpc_dev *tcpc,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc)
{
int ret;
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg = TCPC_ROLE_CTRL_DRP;
+ if (port_type != TYPEC_PORT_DRP)
+ return -EOPNOTSUPP;
+
/* Handle vendor drp toggling */
if (tcpci->data->start_drp_toggling) {
ret = tcpci->data->start_drp_toggling(tcpci, tcpci->data, cc);
@@ -511,7 +515,7 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.get_cc = tcpci_get_cc;
tcpci->tcpc.set_polarity = tcpci_set_polarity;
tcpci->tcpc.set_vconn = tcpci_set_vconn;
- tcpci->tcpc.start_drp_toggling = tcpci_start_drp_toggling;
+ tcpci->tcpc.start_toggling = tcpci_start_toggling;
tcpci->tcpc.set_pd_rx = tcpci_set_pd_rx;
tcpci->tcpc.set_roles = tcpci_set_roles;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index a2233d72ae7c..fba32d84e578 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -31,7 +31,7 @@
#define FOREACH_STATE(S) \
S(INVALID_STATE), \
- S(DRP_TOGGLING), \
+ S(TOGGLING), \
S(SRC_UNATTACHED), \
S(SRC_ATTACH_WAIT), \
S(SRC_ATTACHED), \
@@ -472,7 +472,7 @@ static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
/* Do not log while disconnected and unattached */
if (tcpm_port_is_disconnected(port) &&
(port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
- port->state == DRP_TOGGLING))
+ port->state == TOGGLING))
return;
va_start(args, fmt);
@@ -2540,20 +2540,16 @@ static int tcpm_set_charge(struct tcpm_port *port, bool charge)
return 0;
}
-static bool tcpm_start_drp_toggling(struct tcpm_port *port,
- enum typec_cc_status cc)
+static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
{
int ret;
- if (port->tcpc->start_drp_toggling &&
- port->port_type == TYPEC_PORT_DRP) {
- tcpm_log_force(port, "Start DRP toggling");
- ret = port->tcpc->start_drp_toggling(port->tcpc, cc);
- if (!ret)
- return true;
- }
+ if (!port->tcpc->start_toggling)
+ return false;
- return false;
+ tcpm_log_force(port, "Start toggling");
+ ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
+ return ret == 0;
}
static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
@@ -2847,15 +2843,15 @@ static void run_state_machine(struct tcpm_port *port)
port->enter_state = port->state;
switch (port->state) {
- case DRP_TOGGLING:
+ case TOGGLING:
break;
/* SRC states */
case SRC_UNATTACHED:
if (!port->non_pd_role_swap)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_src_detach(port);
- if (tcpm_start_drp_toggling(port, tcpm_rp_cc(port))) {
- tcpm_set_state(port, DRP_TOGGLING, 0);
+ if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
+ tcpm_set_state(port, TOGGLING, 0);
break;
}
tcpm_set_cc(port, tcpm_rp_cc(port));
@@ -3053,8 +3049,8 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_pps_complete(port, -ENOTCONN);
tcpm_snk_detach(port);
- if (tcpm_start_drp_toggling(port, TYPEC_CC_RD)) {
- tcpm_set_state(port, DRP_TOGGLING, 0);
+ if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
+ tcpm_set_state(port, TOGGLING, 0);
break;
}
tcpm_set_cc(port, TYPEC_CC_RD);
@@ -3621,7 +3617,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
: "connected");
switch (port->state) {
- case DRP_TOGGLING:
+ case TOGGLING:
if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
tcpm_port_is_source(port))
tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 6770afd40765..6b317c150bdd 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -416,12 +416,16 @@ static int wcove_pd_transmit(struct tcpc_dev *tcpc,
return regmap_write(wcove->regmap, USBC_TXCMD, cmd | USBC_TXCMD_START);
}
-static int wcove_start_drp_toggling(struct tcpc_dev *tcpc,
- enum typec_cc_status cc)
+static int wcove_start_toggling(struct tcpc_dev *tcpc,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
unsigned int usbc_ctrl;
+ if (port_type != TYPEC_PORT_DRP)
+ return -EOPNOTSUPP;
+
usbc_ctrl = USBC_CONTROL1_MODE_DRP | USBC_CONTROL1_DRPTOGGLE_RANDOM;
switch (cc) {
@@ -587,17 +591,14 @@ static const u32 snk_pdo[] = {
PDO_VAR(5000, 12000, 3000),
};
-static struct tcpc_config wcove_typec_config = {
- .src_pdo = src_pdo,
- .nr_src_pdo = ARRAY_SIZE(src_pdo),
- .snk_pdo = snk_pdo,
- .nr_snk_pdo = ARRAY_SIZE(snk_pdo),
-
- .operating_snk_mw = 15000,
-
- .type = TYPEC_PORT_DRP,
- .data = TYPEC_PORT_DRD,
- .default_role = TYPEC_SINK,
+static const struct property_entry wcove_props[] = {
+ PROPERTY_ENTRY_STRING("data-role", "dual"),
+ PROPERTY_ENTRY_STRING("power-role", "dual"),
+ PROPERTY_ENTRY_STRING("try-power-role", "sink"),
+ PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
+ PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 15000),
+ { }
};
static int wcove_typec_probe(struct platform_device *pdev)
@@ -642,23 +643,28 @@ static int wcove_typec_probe(struct platform_device *pdev)
wcove->tcpc.set_polarity = wcove_set_polarity;
wcove->tcpc.set_vconn = wcove_set_vconn;
wcove->tcpc.set_current_limit = wcove_set_current_limit;
- wcove->tcpc.start_drp_toggling = wcove_start_drp_toggling;
+ wcove->tcpc.start_toggling = wcove_start_toggling;
wcove->tcpc.set_pd_rx = wcove_set_pd_rx;
wcove->tcpc.set_roles = wcove_set_roles;
wcove->tcpc.pd_transmit = wcove_pd_transmit;
- wcove->tcpc.config = &wcove_typec_config;
+ wcove->tcpc.fwnode = fwnode_create_software_node(wcove_props, NULL);
+ if (IS_ERR(wcove->tcpc.fwnode))
+ return PTR_ERR(wcove->tcpc.fwnode);
wcove->tcpm = tcpm_register_port(wcove->dev, &wcove->tcpc);
- if (IS_ERR(wcove->tcpm))
+ if (IS_ERR(wcove->tcpm)) {
+ fwnode_remove_software_node(wcove->tcpc.fwnode);
return PTR_ERR(wcove->tcpm);
+ }
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
wcove_typec_irq, IRQF_ONESHOT,
"wcove_typec", wcove);
if (ret) {
tcpm_unregister_port(wcove->tcpm);
+ fwnode_remove_software_node(wcove->tcpc.fwnode);
return ret;
}
@@ -678,6 +684,7 @@ static int wcove_typec_remove(struct platform_device *pdev)
regmap_write(wcove->regmap, USBC_IRQMASK2, val | USBC_IRQMASK2_ALL);
tcpm_unregister_port(wcove->tcpm);
+ fwnode_remove_software_node(wcove->tcpc.fwnode);
return 0;
}
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index 2f4900b26210..b35e15a1f02c 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -1,12 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS_trace.o := -I$(src)
+CFLAGS_trace.o := -I$(src)
-obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o
+obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o
-typec_ucsi-y := ucsi.o
+typec_ucsi-y := ucsi.o
-typec_ucsi-$(CONFIG_TRACING) += trace.o
+typec_ucsi-$(CONFIG_TRACING) += trace.o
-obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
+ifneq ($(CONFIG_TYPEC_DP_ALTMODE),)
+ typec_ucsi-y += displayport.o
+endif
-obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
+obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
+obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
new file mode 100644
index 000000000000..6c103697c582
--- /dev/null
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI DisplayPort Alternate Mode Support
+ *
+ * Copyright (C) 2018, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/pd_vdo.h>
+
+#include "ucsi.h"
+
+#define UCSI_CMD_SET_NEW_CAM(_con_num_, _enter_, _cam_, _am_) \
+ (UCSI_SET_NEW_CAM | ((_con_num_) << 16) | ((_enter_) << 23) | \
+ ((_cam_) << 24) | ((u64)(_am_) << 32))
+
+struct ucsi_dp {
+ struct typec_displayport_data data;
+ struct ucsi_connector *con;
+ struct typec_altmode *alt;
+ struct work_struct work;
+ int offset;
+
+ bool override;
+ bool initialized;
+
+ u32 header;
+ u32 *vdo_data;
+ u8 vdo_size;
+};
+
+/*
+ * Note. Alternate mode control is optional feature in UCSI. It means that even
+ * if the system supports alternate modes, the OS may not be aware of them.
+ *
+ * In most cases however, the OS will be able to see the supported alternate
+ * modes, but it may still not be able to configure them, not even enter or exit
+ * them. That is because UCSI defines alt mode details and alt mode "overriding"
+ * as separate options.
+ *
+ * In case alt mode details are supported, but overriding is not, the driver
+ * will still display the supported pin assignments and configuration, but any
+ * changes the user attempts to do will lead into failure with return value of
+ * -EOPNOTSUPP.
+ */
+
+static int ucsi_displayport_enter(struct typec_altmode *alt)
+{
+ struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
+ struct ucsi_control ctrl;
+ u8 cur = 0;
+ int ret;
+
+ mutex_lock(&dp->con->lock);
+
+ if (!dp->override && dp->initialized) {
+ const struct typec_altmode *p = typec_altmode_get_partner(alt);
+
+ dev_warn(&p->dev,
+ "firmware doesn't support alternate mode overriding\n");
+ mutex_unlock(&dp->con->lock);
+ return -EOPNOTSUPP;
+ }
+
+ UCSI_CMD_GET_CURRENT_CAM(ctrl, dp->con->num);
+ ret = ucsi_send_command(dp->con->ucsi, &ctrl, &cur, sizeof(cur));
+ if (ret < 0) {
+ if (dp->con->ucsi->ppm->data->version > 0x0100) {
+ mutex_unlock(&dp->con->lock);
+ return ret;
+ }
+ cur = 0xff;
+ }
+
+ if (cur != 0xff) {
+ mutex_unlock(&dp->con->lock);
+ return -EBUSY;
+ }
+
+ /*
+ * We can't send the New CAM command yet to the PPM as it needs the
+ * configuration value as well. Pretending that we have now entered the
+ * mode, and letting the alt mode driver continue.
+ */
+
+ dp->header = VDO(USB_TYPEC_DP_SID, 1, CMD_ENTER_MODE);
+ dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE);
+ dp->header |= VDO_CMDT(CMDT_RSP_ACK);
+
+ dp->vdo_data = NULL;
+ dp->vdo_size = 1;
+
+ schedule_work(&dp->work);
+
+ mutex_unlock(&dp->con->lock);
+
+ return 0;
+}
+
+static int ucsi_displayport_exit(struct typec_altmode *alt)
+{
+ struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
+ struct ucsi_control ctrl;
+ int ret = 0;
+
+ mutex_lock(&dp->con->lock);
+
+ if (!dp->override) {
+ const struct typec_altmode *p = typec_altmode_get_partner(alt);
+
+ dev_warn(&p->dev,
+ "firmware doesn't support alternate mode overriding\n");
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
+ ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
+ ret = ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+ if (ret < 0)
+ goto out_unlock;
+
+ dp->header = VDO(USB_TYPEC_DP_SID, 1, CMD_EXIT_MODE);
+ dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE);
+ dp->header |= VDO_CMDT(CMDT_RSP_ACK);
+
+ dp->vdo_data = NULL;
+ dp->vdo_size = 1;
+
+ schedule_work(&dp->work);
+
+out_unlock:
+ mutex_unlock(&dp->con->lock);
+
+ return ret;
+}
+
+/*
+ * We do not actually have access to the Status Update VDO, so we have to guess
+ * things.
+ */
+static int ucsi_displayport_status_update(struct ucsi_dp *dp)
+{
+ u32 cap = dp->alt->vdo;
+
+ dp->data.status = DP_STATUS_ENABLED;
+
+ /*
+ * If pin assignement D is supported, claiming always
+ * that Multi-function is preferred.
+ */
+ if (DP_CAP_CAPABILITY(cap) & DP_CAP_UFP_D) {
+ dp->data.status |= DP_STATUS_CON_UFP_D;
+
+ if (DP_CAP_UFP_D_PIN_ASSIGN(cap) & BIT(DP_PIN_ASSIGN_D))
+ dp->data.status |= DP_STATUS_PREFER_MULTI_FUNC;
+ } else {
+ dp->data.status |= DP_STATUS_CON_DFP_D;
+
+ if (DP_CAP_DFP_D_PIN_ASSIGN(cap) & BIT(DP_PIN_ASSIGN_D))
+ dp->data.status |= DP_STATUS_PREFER_MULTI_FUNC;
+ }
+
+ dp->vdo_data = &dp->data.status;
+ dp->vdo_size = 2;
+
+ return 0;
+}
+
+static int ucsi_displayport_configure(struct ucsi_dp *dp)
+{
+ u32 pins = DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
+ struct ucsi_control ctrl;
+
+ if (!dp->override)
+ return 0;
+
+ ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
+
+ return ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+}
+
+static int ucsi_displayport_vdm(struct typec_altmode *alt,
+ u32 header, const u32 *data, int count)
+{
+ struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
+ int cmd_type = PD_VDO_CMDT(header);
+ int cmd = PD_VDO_CMD(header);
+
+ mutex_lock(&dp->con->lock);
+
+ if (!dp->override && dp->initialized) {
+ const struct typec_altmode *p = typec_altmode_get_partner(alt);
+
+ dev_warn(&p->dev,
+ "firmware doesn't support alternate mode overriding\n");
+ mutex_unlock(&dp->con->lock);
+ return -EOPNOTSUPP;
+ }
+
+ switch (cmd_type) {
+ case CMDT_INIT:
+ dp->header = VDO(USB_TYPEC_DP_SID, 1, cmd);
+ dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE);
+
+ switch (cmd) {
+ case DP_CMD_STATUS_UPDATE:
+ if (ucsi_displayport_status_update(dp))
+ dp->header |= VDO_CMDT(CMDT_RSP_NAK);
+ else
+ dp->header |= VDO_CMDT(CMDT_RSP_ACK);
+ break;
+ case DP_CMD_CONFIGURE:
+ dp->data.conf = *data;
+ if (ucsi_displayport_configure(dp)) {
+ dp->header |= VDO_CMDT(CMDT_RSP_NAK);
+ } else {
+ dp->header |= VDO_CMDT(CMDT_RSP_ACK);
+ if (dp->initialized)
+ ucsi_altmode_update_active(dp->con);
+ else
+ dp->initialized = true;
+ }
+ break;
+ default:
+ dp->header |= VDO_CMDT(CMDT_RSP_ACK);
+ break;
+ }
+
+ schedule_work(&dp->work);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&dp->con->lock);
+
+ return 0;
+}
+
+static const struct typec_altmode_ops ucsi_displayport_ops = {
+ .enter = ucsi_displayport_enter,
+ .exit = ucsi_displayport_exit,
+ .vdm = ucsi_displayport_vdm,
+};
+
+static void ucsi_displayport_work(struct work_struct *work)
+{
+ struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work);
+ int ret;
+
+ mutex_lock(&dp->con->lock);
+
+ ret = typec_altmode_vdm(dp->alt, dp->header,
+ dp->vdo_data, dp->vdo_size);
+ if (ret)
+ dev_err(&dp->alt->dev, "VDM 0x%x failed\n", dp->header);
+
+ dp->vdo_data = NULL;
+ dp->vdo_size = 0;
+ dp->header = 0;
+
+ mutex_unlock(&dp->con->lock);
+}
+
+void ucsi_displayport_remove_partner(struct typec_altmode *alt)
+{
+ struct ucsi_dp *dp;
+
+ if (!alt)
+ return;
+
+ dp = typec_altmode_get_drvdata(alt);
+ dp->data.conf = 0;
+ dp->data.status = 0;
+ dp->initialized = false;
+}
+
+struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
+ bool override, int offset,
+ struct typec_altmode_desc *desc)
+{
+ u8 all_assignments = BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D) |
+ BIT(DP_PIN_ASSIGN_E);
+ struct typec_altmode *alt;
+ struct ucsi_dp *dp;
+
+ /* We can't rely on the firmware with the capabilities. */
+ desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE;
+
+ /* Claiming that we support all pin assignments */
+ desc->vdo |= all_assignments << 8;
+ desc->vdo |= all_assignments << 16;
+
+ alt = typec_port_register_altmode(con->port, desc);
+ if (IS_ERR(alt))
+ return alt;
+
+ dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp) {
+ typec_unregister_altmode(alt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK(&dp->work, ucsi_displayport_work);
+ dp->override = override;
+ dp->offset = offset;
+ dp->con = con;
+ dp->alt = alt;
+
+ alt->ops = &ucsi_displayport_ops;
+ typec_altmode_set_drvdata(alt, dp);
+
+ return alt;
+}
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index ffa3b4c3f338..1dabafb74320 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -60,3 +60,15 @@ const char *ucsi_cci_str(u32 cci)
return "";
}
+
+static const char * const ucsi_recipient_strs[] = {
+ [UCSI_RECIPIENT_CON] = "port",
+ [UCSI_RECIPIENT_SOP] = "partner",
+ [UCSI_RECIPIENT_SOP_P] = "plug (prime)",
+ [UCSI_RECIPIENT_SOP_PP] = "plug (double prime)",
+};
+
+const char *ucsi_recipient_str(u8 recipient)
+{
+ return ucsi_recipient_strs[recipient];
+}
diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h
index 5e2906df2db7..783ec9c72055 100644
--- a/drivers/usb/typec/ucsi/trace.h
+++ b/drivers/usb/typec/ucsi/trace.h
@@ -7,6 +7,7 @@
#define __UCSI_TRACE_H
#include <linux/tracepoint.h>
+#include <linux/usb/typec_altmode.h>
const char *ucsi_cmd_str(u64 raw_cmd);
const char *ucsi_ack_str(u8 ack);
@@ -134,6 +135,31 @@ DEFINE_EVENT(ucsi_log_connector_status, ucsi_register_port,
TP_ARGS(port, status)
);
+DECLARE_EVENT_CLASS(ucsi_log_register_altmode,
+ TP_PROTO(u8 recipient, struct typec_altmode *alt),
+ TP_ARGS(recipient, alt),
+ TP_STRUCT__entry(
+ __field(u8, recipient)
+ __field(u16, svid)
+ __field(u8, mode)
+ __field(u32, vdo)
+ ),
+ TP_fast_assign(
+ __entry->recipient = recipient;
+ __entry->svid = alt->svid;
+ __entry->mode = alt->mode;
+ __entry->vdo = alt->vdo;
+ ),
+ TP_printk("%s alt mode: svid %04x, mode %d vdo %x",
+ ucsi_recipient_str(__entry->recipient), __entry->svid,
+ __entry->mode, __entry->vdo)
+);
+
+DEFINE_EVENT(ucsi_log_register_altmode, ucsi_register_altmode,
+ TP_PROTO(u8 recipient, struct typec_altmode *alt),
+ TP_ARGS(recipient, alt)
+);
+
#endif /* __UCSI_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 8d0a6fe748bd..7850b851cecd 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/usb/typec.h>
+#include <linux/usb/typec_dp.h>
#include "ucsi.h"
#include "trace.h"
@@ -39,49 +39,6 @@
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
-enum ucsi_status {
- UCSI_IDLE = 0,
- UCSI_BUSY,
- UCSI_ERROR,
-};
-
-struct ucsi_connector {
- int num;
-
- struct ucsi *ucsi;
- struct work_struct work;
- struct completion complete;
-
- struct typec_port *port;
- struct typec_partner *partner;
-
- struct typec_capability typec_cap;
-
- struct ucsi_connector_status status;
- struct ucsi_connector_capability cap;
-};
-
-struct ucsi {
- struct device *dev;
- struct ucsi_ppm *ppm;
-
- enum ucsi_status status;
- struct completion complete;
- struct ucsi_capability cap;
- struct ucsi_connector *connector;
-
- struct work_struct work;
-
- /* PPM Communication lock */
- struct mutex ppm_lock;
-
- /* PPM communication flags */
- unsigned long flags;
-#define EVENT_PENDING 0
-#define COMMAND_PENDING 1
-#define ACK_PENDING 2
-};
-
static inline int ucsi_sync(struct ucsi *ucsi)
{
if (ucsi->ppm && ucsi->ppm->sync)
@@ -238,8 +195,226 @@ err:
return ret;
}
+int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+ void *retval, size_t size)
+{
+ int ret;
+
+ mutex_lock(&ucsi->ppm_lock);
+ ret = ucsi_run_command(ucsi, ctrl, retval, size);
+ mutex_unlock(&ucsi->ppm_lock);
+
+ return ret;
+}
+
/* -------------------------------------------------------------------------- */
+void ucsi_altmode_update_active(struct ucsi_connector *con)
+{
+ const struct typec_altmode *altmode = NULL;
+ struct ucsi_control ctrl;
+ int ret;
+ u8 cur;
+ int i;
+
+ UCSI_CMD_GET_CURRENT_CAM(ctrl, con->num);
+ ret = ucsi_run_command(con->ucsi, &ctrl, &cur, sizeof(cur));
+ if (ret < 0) {
+ if (con->ucsi->ppm->data->version > 0x0100) {
+ dev_err(con->ucsi->dev,
+ "GET_CURRENT_CAM command failed\n");
+ return;
+ }
+ cur = 0xff;
+ }
+
+ if (cur < UCSI_MAX_ALTMODES)
+ altmode = typec_altmode_get_partner(con->port_altmode[cur]);
+
+ for (i = 0; con->partner_altmode[i]; i++)
+ typec_altmode_update_active(con->partner_altmode[i],
+ con->partner_altmode[i] == altmode);
+}
+
+static u8 ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid)
+{
+ u8 mode = 1;
+ int i;
+
+ for (i = 0; alt[i]; i++)
+ if (alt[i]->svid == svid)
+ mode++;
+
+ return mode;
+}
+
+static int ucsi_next_altmode(struct typec_altmode **alt)
+{
+ int i = 0;
+
+ for (i = 0; i < UCSI_MAX_ALTMODES; i++)
+ if (!alt[i])
+ return i;
+
+ return -ENOENT;
+}
+
+static int ucsi_register_altmode(struct ucsi_connector *con,
+ struct typec_altmode_desc *desc,
+ u8 recipient)
+{
+ struct typec_altmode *alt;
+ bool override;
+ int ret;
+ int i;
+
+ override = !!(con->ucsi->cap.features & UCSI_CAP_ALT_MODE_OVERRIDE);
+
+ switch (recipient) {
+ case UCSI_RECIPIENT_CON:
+ i = ucsi_next_altmode(con->port_altmode);
+ if (i < 0) {
+ ret = i;
+ goto err;
+ }
+
+ desc->mode = ucsi_altmode_next_mode(con->port_altmode,
+ desc->svid);
+
+ switch (desc->svid) {
+ case USB_TYPEC_DP_SID:
+ case USB_TYPEC_NVIDIA_VLINK_SID:
+ alt = ucsi_register_displayport(con, override, i, desc);
+ break;
+ default:
+ alt = typec_port_register_altmode(con->port, desc);
+ break;
+ }
+
+ if (IS_ERR(alt)) {
+ ret = PTR_ERR(alt);
+ goto err;
+ }
+
+ con->port_altmode[i] = alt;
+ break;
+ case UCSI_RECIPIENT_SOP:
+ i = ucsi_next_altmode(con->partner_altmode);
+ if (i < 0) {
+ ret = i;
+ goto err;
+ }
+
+ desc->mode = ucsi_altmode_next_mode(con->partner_altmode,
+ desc->svid);
+
+ alt = typec_partner_register_altmode(con->partner, desc);
+ if (IS_ERR(alt)) {
+ ret = PTR_ERR(alt);
+ goto err;
+ }
+
+ con->partner_altmode[i] = alt;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ trace_ucsi_register_altmode(recipient, alt);
+
+ return 0;
+
+err:
+ dev_err(con->ucsi->dev, "failed to registers svid 0x%04x mode %d\n",
+ desc->svid, desc->mode);
+
+ return ret;
+}
+
+static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
+{
+ int max_altmodes = UCSI_MAX_ALTMODES;
+ struct typec_altmode_desc desc;
+ struct ucsi_altmode alt[2];
+ struct ucsi_control ctrl;
+ int num = 1;
+ int ret;
+ int len;
+ int j;
+ int i;
+
+ if (!(con->ucsi->cap.features & UCSI_CAP_ALT_MODE_DETAILS))
+ return 0;
+
+ if (recipient == UCSI_RECIPIENT_SOP && con->partner_altmode[0])
+ return 0;
+
+ if (recipient == UCSI_RECIPIENT_CON)
+ max_altmodes = con->ucsi->cap.num_alt_modes;
+
+ for (i = 0; i < max_altmodes;) {
+ memset(alt, 0, sizeof(alt));
+ UCSI_CMD_GET_ALTERNATE_MODES(ctrl, recipient, con->num, i, 1);
+ len = ucsi_run_command(con->ucsi, &ctrl, alt, sizeof(alt));
+ if (len <= 0)
+ return len;
+
+ /*
+ * This code is requesting one alt mode at a time, but some PPMs
+ * may still return two. If that happens both alt modes need be
+ * registered and the offset for the next alt mode has to be
+ * incremented.
+ */
+ num = len / sizeof(alt[0]);
+ i += num;
+
+ for (j = 0; j < num; j++) {
+ if (!alt[j].svid)
+ return 0;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.vdo = alt[j].mid;
+ desc.svid = alt[j].svid;
+ desc.roles = TYPEC_PORT_DRD;
+
+ ret = ucsi_register_altmode(con, &desc, recipient);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
+{
+ const struct typec_altmode *pdev;
+ struct typec_altmode **adev;
+ int i = 0;
+
+ switch (recipient) {
+ case UCSI_RECIPIENT_CON:
+ adev = con->port_altmode;
+ break;
+ case UCSI_RECIPIENT_SOP:
+ adev = con->partner_altmode;
+ break;
+ default:
+ return;
+ }
+
+ while (adev[i]) {
+ if (recipient == UCSI_RECIPIENT_SOP &&
+ (adev[i]->svid == USB_TYPEC_DP_SID ||
+ adev[i]->svid == USB_TYPEC_NVIDIA_VLINK_SID)) {
+ pdev = typec_altmode_get_partner(adev[i]);
+ ucsi_displayport_remove_partner((void *)pdev);
+ }
+ typec_unregister_altmode(adev[i]);
+ adev[i++] = NULL;
+ }
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (con->status.pwr_op_mode) {
@@ -299,10 +474,43 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
if (!con->partner)
return;
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
typec_unregister_partner(con->partner);
con->partner = NULL;
}
+static void ucsi_partner_change(struct ucsi_connector *con)
+{
+ int ret;
+
+ if (!con->partner)
+ return;
+
+ switch (con->status.partner_type) {
+ case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ typec_set_data_role(con->port, TYPEC_HOST);
+ break;
+ case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+ typec_set_data_role(con->port, TYPEC_DEVICE);
+ break;
+ default:
+ break;
+ }
+
+ /* Complete pending data role swap */
+ if (!completion_done(&con->complete))
+ complete(&con->complete);
+
+ /* Can't rely on Partner Flags field. Always checking the alt modes. */
+ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
+ if (ret)
+ dev_err(con->ucsi->dev,
+ "con%d: failed to register partner alternate modes\n",
+ con->num);
+ else
+ ucsi_altmode_update_active(con);
+}
+
static void ucsi_connector_change(struct work_struct *work)
{
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
@@ -311,10 +519,10 @@ static void ucsi_connector_change(struct work_struct *work)
struct ucsi_control ctrl;
int ret;
- mutex_lock(&ucsi->ppm_lock);
+ mutex_lock(&con->lock);
UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
- ret = ucsi_run_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+ ret = ucsi_send_command(ucsi, &ctrl, &con->status, sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
__func__, ret);
@@ -332,23 +540,6 @@ static void ucsi_connector_change(struct work_struct *work)
complete(&con->complete);
}
- if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE) {
- switch (con->status.partner_type) {
- case UCSI_CONSTAT_PARTNER_TYPE_UFP:
- typec_set_data_role(con->port, TYPEC_HOST);
- break;
- case UCSI_CONSTAT_PARTNER_TYPE_DFP:
- typec_set_data_role(con->port, TYPEC_DEVICE);
- break;
- default:
- break;
- }
-
- /* Complete pending data role swap */
- if (!completion_done(&con->complete))
- complete(&con->complete);
- }
-
if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
typec_set_pwr_role(con->port, con->status.pwr_dir);
@@ -369,6 +560,19 @@ static void ucsi_connector_change(struct work_struct *work)
ucsi_unregister_partner(con);
}
+ if (con->status.change & UCSI_CONSTAT_CAM_CHANGE) {
+ /*
+ * We don't need to know the currently supported alt modes here.
+ * Running GET_CAM_SUPPORTED command just to make sure the PPM
+ * does not get stuck in case it assumes we do so.
+ */
+ UCSI_CMD_GET_CAM_SUPPORTED(ctrl, con->num);
+ ucsi_run_command(con->ucsi, &ctrl, NULL, 0);
+ }
+
+ if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
+ ucsi_partner_change(con);
+
ret = ucsi_ack(ucsi, UCSI_ACK_EVENT);
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
@@ -377,7 +581,7 @@ static void ucsi_connector_change(struct work_struct *work)
out_unlock:
clear_bit(EVENT_PENDING, &ucsi->flags);
- mutex_unlock(&ucsi->ppm_lock);
+ mutex_unlock(&con->lock);
}
/**
@@ -427,7 +631,7 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
UCSI_CMD_CONNECTOR_RESET(ctrl, con, hard);
- return ucsi_run_command(con->ucsi, &ctrl, NULL, 0);
+ return ucsi_send_command(con->ucsi, &ctrl, NULL, 0);
}
static int ucsi_reset_ppm(struct ucsi *ucsi)
@@ -481,15 +685,17 @@ static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
{
int ret;
- ret = ucsi_run_command(con->ucsi, ctrl, NULL, 0);
+ ret = ucsi_send_command(con->ucsi, ctrl, NULL, 0);
if (ret == -ETIMEDOUT) {
struct ucsi_control c;
/* PPM most likely stopped responding. Resetting everything. */
+ mutex_lock(&con->ucsi->ppm_lock);
ucsi_reset_ppm(con->ucsi);
+ mutex_unlock(&con->ucsi->ppm_lock);
UCSI_CMD_SET_NTFY_ENABLE(c, UCSI_ENABLE_NTFY_ALL);
- ucsi_run_command(con->ucsi, &c, NULL, 0);
+ ucsi_send_command(con->ucsi, &c, NULL, 0);
ucsi_reset_connector(con, true);
}
@@ -504,10 +710,12 @@ ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
struct ucsi_control ctrl;
int ret = 0;
- if (!con->partner)
- return -ENOTCONN;
+ mutex_lock(&con->lock);
- mutex_lock(&con->ucsi->ppm_lock);
+ if (!con->partner) {
+ ret = -ENOTCONN;
+ goto out_unlock;
+ }
if ((con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
role == TYPEC_DEVICE) ||
@@ -520,18 +728,14 @@ ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
if (ret < 0)
goto out_unlock;
- mutex_unlock(&con->ucsi->ppm_lock);
-
if (!wait_for_completion_timeout(&con->complete,
msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
- return -ETIMEDOUT;
-
- return 0;
+ ret = -ETIMEDOUT;
out_unlock:
- mutex_unlock(&con->ucsi->ppm_lock);
+ mutex_unlock(&con->lock);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int
@@ -541,10 +745,12 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
struct ucsi_control ctrl;
int ret = 0;
- if (!con->partner)
- return -ENOTCONN;
+ mutex_lock(&con->lock);
- mutex_lock(&con->ucsi->ppm_lock);
+ if (!con->partner) {
+ ret = -ENOTCONN;
+ goto out_unlock;
+ }
if (con->status.pwr_dir == role)
goto out_unlock;
@@ -554,13 +760,11 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
if (ret < 0)
goto out_unlock;
- mutex_unlock(&con->ucsi->ppm_lock);
-
if (!wait_for_completion_timeout(&con->complete,
- msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
- return -ETIMEDOUT;
-
- mutex_lock(&con->ucsi->ppm_lock);
+ msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) {
+ ret = -ETIMEDOUT;
+ goto out_unlock;
+ }
/* Something has gone wrong while swapping the role */
if (con->status.pwr_op_mode != UCSI_CONSTAT_PWR_OPMODE_PD) {
@@ -569,7 +773,7 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
}
out_unlock:
- mutex_unlock(&con->ucsi->ppm_lock);
+ mutex_unlock(&con->lock);
return ret;
}
@@ -595,6 +799,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
INIT_WORK(&con->work, ucsi_connector_change);
init_completion(&con->complete);
+ mutex_init(&con->lock);
con->num = index + 1;
con->ucsi = ucsi;
@@ -636,6 +841,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
if (IS_ERR(con->port))
return PTR_ERR(con->port);
+ /* Alternate modes */
+ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
+ if (ret)
+ dev_err(ucsi->dev, "con%d: failed to register alt modes\n",
+ con->num);
+
/* Get the status */
UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
ret = ucsi_run_command(ucsi, &ctrl, &con->status, sizeof(con->status));
@@ -662,6 +873,16 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
if (con->status.connected)
ucsi_register_partner(con);
+ if (con->partner) {
+ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
+ if (ret)
+ dev_err(ucsi->dev,
+ "con%d: failed to register alternate modes\n",
+ con->num);
+ else
+ ucsi_altmode_update_active(con);
+ }
+
trace_ucsi_register_port(con->num, &con->status);
return 0;
@@ -730,6 +951,7 @@ static void ucsi_init(struct work_struct *work)
err_unregister:
for (con = ucsi->connector; con->port; con++) {
ucsi_unregister_partner(con);
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
typec_unregister_port(con->port);
con->port = NULL;
}
@@ -788,17 +1010,15 @@ void ucsi_unregister_ppm(struct ucsi *ucsi)
/* Make sure that we are not in the middle of driver initialization */
cancel_work_sync(&ucsi->work);
- mutex_lock(&ucsi->ppm_lock);
-
/* Disable everything except command complete notification */
UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE)
- ucsi_run_command(ucsi, &ctrl, NULL, 0);
-
- mutex_unlock(&ucsi->ppm_lock);
+ ucsi_send_command(ucsi, &ctrl, NULL, 0);
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
ucsi_unregister_partner(&ucsi->connector[i]);
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
typec_unregister_port(ucsi->connector[i].port);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 53b80f40a908..1e2981aef629 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/types.h>
+#include <linux/usb/typec.h>
/* -------------------------------------------------------------------------- */
@@ -60,6 +61,20 @@ struct ucsi_uor_cmd {
u16:6; /* reserved */
} __packed;
+/* Get Alternate Modes Command structure */
+struct ucsi_altmode_cmd {
+ u8 cmd;
+ u8 length;
+ u8 recipient;
+#define UCSI_RECIPIENT_CON 0
+#define UCSI_RECIPIENT_SOP 1
+#define UCSI_RECIPIENT_SOP_P 2
+#define UCSI_RECIPIENT_SOP_PP 3
+ u8 con_num;
+ u8 offset;
+ u8 num_altmodes;
+} __packed;
+
struct ucsi_control {
union {
u64 raw_cmd;
@@ -67,6 +82,7 @@ struct ucsi_control {
struct ucsi_uor_cmd uor;
struct ucsi_ack_cmd ack;
struct ucsi_con_rst con_rst;
+ struct ucsi_altmode_cmd alt;
};
};
@@ -112,6 +128,30 @@ struct ucsi_control {
(_ctrl_).cmd.data = _con_; \
}
+/* Helper for preparing ucsi_control for GET_ALTERNATE_MODES command. */
+#define UCSI_CMD_GET_ALTERNATE_MODES(_ctrl_, _r_, _con_num_, _o_, _num_)\
+{ \
+ __UCSI_CMD((_ctrl_), UCSI_GET_ALTERNATE_MODES) \
+ _ctrl_.alt.recipient = (_r_); \
+ _ctrl_.alt.con_num = (_con_num_); \
+ _ctrl_.alt.offset = (_o_); \
+ _ctrl_.alt.num_altmodes = (_num_) - 1; \
+}
+
+/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
+#define UCSI_CMD_GET_CAM_SUPPORTED(_ctrl_, _con_) \
+{ \
+ __UCSI_CMD((_ctrl_), UCSI_GET_CAM_SUPPORTED) \
+ _ctrl_.cmd.data = (_con_); \
+}
+
+/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
+#define UCSI_CMD_GET_CURRENT_CAM(_ctrl_, _con_) \
+{ \
+ __UCSI_CMD((_ctrl_), UCSI_GET_CURRENT_CAM) \
+ _ctrl_.cmd.data = (_con_); \
+}
+
/* Helper for preparing ucsi_control for GET_CONNECTOR_STATUS command. */
#define UCSI_CMD_GET_CONNECTOR_STATUS(_ctrl_, _con_) \
{ \
@@ -334,4 +374,82 @@ struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm);
void ucsi_unregister_ppm(struct ucsi *ucsi);
void ucsi_notify(struct ucsi *ucsi);
+/* -------------------------------------------------------------------------- */
+
+enum ucsi_status {
+ UCSI_IDLE = 0,
+ UCSI_BUSY,
+ UCSI_ERROR,
+};
+
+struct ucsi {
+ struct device *dev;
+ struct ucsi_ppm *ppm;
+
+ enum ucsi_status status;
+ struct completion complete;
+ struct ucsi_capability cap;
+ struct ucsi_connector *connector;
+
+ struct work_struct work;
+
+ /* PPM Communication lock */
+ struct mutex ppm_lock;
+
+ /* PPM communication flags */
+ unsigned long flags;
+#define EVENT_PENDING 0
+#define COMMAND_PENDING 1
+#define ACK_PENDING 2
+};
+
+#define UCSI_MAX_SVID 5
+#define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6)
+
+struct ucsi_connector {
+ int num;
+
+ struct ucsi *ucsi;
+ struct mutex lock; /* port lock */
+ struct work_struct work;
+ struct completion complete;
+
+ struct typec_port *port;
+ struct typec_partner *partner;
+
+ struct typec_altmode *port_altmode[UCSI_MAX_ALTMODES];
+ struct typec_altmode *partner_altmode[UCSI_MAX_ALTMODES];
+
+ struct typec_capability typec_cap;
+
+ struct ucsi_connector_status status;
+ struct ucsi_connector_capability cap;
+};
+
+int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+ void *retval, size_t size);
+
+void ucsi_altmode_update_active(struct ucsi_connector *con);
+
+#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
+struct typec_altmode *
+ucsi_register_displayport(struct ucsi_connector *con,
+ bool override, int offset,
+ struct typec_altmode_desc *desc);
+
+void ucsi_displayport_remove_partner(struct typec_altmode *adev);
+
+#else
+static inline struct typec_altmode *
+ucsi_register_displayport(struct ucsi_connector *con,
+ bool override, int offset,
+ struct typec_altmode_desc *desc)
+{
+ return NULL;
+}
+
+static inline void
+ucsi_displayport_remove_partner(struct typec_altmode *adev) { }
+#endif /* CONFIG_TYPEC_DP_ALTMODE */
+
#endif /* __DRIVER_USB_TYPEC_UCSI_H */
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index de8a43bdff68..9d46aa9e4e35 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -9,6 +9,7 @@
*/
#include <linux/acpi.h>
#include <linux/delay.h>
+#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -17,18 +18,172 @@
#include <asm/unaligned.h>
#include "ucsi.h"
+enum enum_fw_mode {
+ BOOT, /* bootloader */
+ FW1, /* FW partition-1 (contains secondary fw) */
+ FW2, /* FW partition-2 (contains primary fw) */
+ FW_INVALID,
+};
+
+#define CCGX_RAB_DEVICE_MODE 0x0000
+#define CCGX_RAB_INTR_REG 0x0006
+#define DEV_INT BIT(0)
+#define PORT0_INT BIT(1)
+#define PORT1_INT BIT(2)
+#define UCSI_READ_INT BIT(7)
+#define CCGX_RAB_JUMP_TO_BOOT 0x0007
+#define TO_BOOT 'J'
+#define TO_ALT_FW 'A'
+#define CCGX_RAB_RESET_REQ 0x0008
+#define RESET_SIG 'R'
+#define CMD_RESET_I2C 0x0
+#define CMD_RESET_DEV 0x1
+#define CCGX_RAB_ENTER_FLASHING 0x000A
+#define FLASH_ENTER_SIG 'P'
+#define CCGX_RAB_VALIDATE_FW 0x000B
+#define CCGX_RAB_FLASH_ROW_RW 0x000C
+#define FLASH_SIG 'F'
+#define FLASH_RD_CMD 0x0
+#define FLASH_WR_CMD 0x1
+#define FLASH_FWCT1_WR_CMD 0x2
+#define FLASH_FWCT2_WR_CMD 0x3
+#define FLASH_FWCT_SIG_WR_CMD 0x4
+#define CCGX_RAB_READ_ALL_VER 0x0010
+#define CCGX_RAB_READ_FW2_VER 0x0020
+#define CCGX_RAB_UCSI_CONTROL 0x0039
+#define CCGX_RAB_UCSI_CONTROL_START BIT(0)
+#define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
+#define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
+#define REG_FLASH_RW_MEM 0x0200
+#define DEV_REG_IDX CCGX_RAB_DEVICE_MODE
+#define CCGX_RAB_PDPORT_ENABLE 0x002C
+#define PDPORT_1 BIT(0)
+#define PDPORT_2 BIT(1)
+#define CCGX_RAB_RESPONSE 0x007E
+#define ASYNC_EVENT BIT(7)
+
+/* CCGx events & async msg codes */
+#define RESET_COMPLETE 0x80
+#define EVENT_INDEX RESET_COMPLETE
+#define PORT_CONNECT_DET 0x84
+#define PORT_DISCONNECT_DET 0x85
+#define ROLE_SWAP_COMPELETE 0x87
+
+/* ccg firmware */
+#define CYACD_LINE_SIZE 527
+#define CCG4_ROW_SIZE 256
+#define FW1_METADATA_ROW 0x1FF
+#define FW2_METADATA_ROW 0x1FE
+#define FW_CFG_TABLE_SIG_SIZE 256
+
+static int secondary_fw_min_ver = 41;
+
+enum enum_flash_mode {
+ SECONDARY_BL, /* update secondary using bootloader */
+ PRIMARY, /* update primary using secondary */
+ SECONDARY, /* update secondary using primary */
+ FLASH_NOT_NEEDED, /* update not required */
+ FLASH_INVALID,
+};
+
+static const char * const ccg_fw_names[] = {
+ "ccg_boot.cyacd",
+ "ccg_primary.cyacd",
+ "ccg_secondary.cyacd"
+};
+
+struct ccg_dev_info {
+#define CCG_DEVINFO_FWMODE_SHIFT (0)
+#define CCG_DEVINFO_FWMODE_MASK (0x3 << CCG_DEVINFO_FWMODE_SHIFT)
+#define CCG_DEVINFO_PDPORTS_SHIFT (2)
+#define CCG_DEVINFO_PDPORTS_MASK (0x3 << CCG_DEVINFO_PDPORTS_SHIFT)
+ u8 mode;
+ u8 bl_mode;
+ __le16 silicon_id;
+ __le16 bl_last_row;
+} __packed;
+
+struct version_format {
+ __le16 build;
+ u8 patch;
+ u8 ver;
+#define CCG_VERSION_MIN_SHIFT (0)
+#define CCG_VERSION_MIN_MASK (0xf << CCG_VERSION_MIN_SHIFT)
+#define CCG_VERSION_MAJ_SHIFT (4)
+#define CCG_VERSION_MAJ_MASK (0xf << CCG_VERSION_MAJ_SHIFT)
+} __packed;
+
+struct version_info {
+ struct version_format base;
+ struct version_format app;
+};
+
+struct fw_config_table {
+ u32 identity;
+ u16 table_size;
+ u8 fwct_version;
+ u8 is_key_change;
+ u8 guid[16];
+ struct version_format base;
+ struct version_format app;
+ u8 primary_fw_digest[32];
+ u32 key_exp_length;
+ u8 key_modulus[256];
+ u8 key_exp[4];
+};
+
+/* CCGx response codes */
+enum ccg_resp_code {
+ CMD_NO_RESP = 0x00,
+ CMD_SUCCESS = 0x02,
+ FLASH_DATA_AVAILABLE = 0x03,
+ CMD_INVALID = 0x05,
+ FLASH_UPDATE_FAIL = 0x07,
+ INVALID_FW = 0x08,
+ INVALID_ARG = 0x09,
+ CMD_NOT_SUPPORT = 0x0A,
+ TRANSACTION_FAIL = 0x0C,
+ PD_CMD_FAIL = 0x0D,
+ UNDEF_ERROR = 0x0F,
+ INVALID_RESP = 0x10,
+};
+
+#define CCG_EVENT_MAX (EVENT_INDEX + 43)
+
+struct ccg_cmd {
+ u16 reg;
+ u32 data;
+ int len;
+ u32 delay; /* ms delay for cmd timeout */
+};
+
+struct ccg_resp {
+ u8 code;
+ u8 length;
+};
+
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
struct ucsi_ppm ppm;
struct i2c_client *client;
-};
+ struct ccg_dev_info info;
+ /* version info for boot, primary and secondary */
+ struct version_info version[FW2 + 1];
+ /* CCG HPI communication flags */
+ unsigned long flags;
+#define RESET_PENDING 0
+#define DEV_CMD_PENDING 1
+ struct ccg_resp dev_resp;
+ u8 cmd_resp;
+ int port_num;
+ int irq;
+ struct work_struct work;
+ struct mutex lock; /* to sync between user and driver thread */
-#define CCGX_RAB_INTR_REG 0x06
-#define CCGX_RAB_UCSI_CONTROL 0x39
-#define CCGX_RAB_UCSI_CONTROL_START BIT(0)
-#define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
-#define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
+ /* fw build with vendor information */
+ u16 fw_build;
+};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
{
@@ -220,6 +375,687 @@ static irqreturn_t ccg_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static int get_fw_info(struct ucsi_ccg *uc)
+{
+ int err;
+
+ err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)(&uc->version),
+ sizeof(uc->version));
+ if (err < 0)
+ return err;
+
+ err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
+ sizeof(uc->info));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static inline bool invalid_async_evt(int code)
+{
+ return (code >= CCG_EVENT_MAX) || (code < EVENT_INDEX);
+}
+
+static void ccg_process_response(struct ucsi_ccg *uc)
+{
+ struct device *dev = uc->dev;
+
+ if (uc->dev_resp.code & ASYNC_EVENT) {
+ if (uc->dev_resp.code == RESET_COMPLETE) {
+ if (test_bit(RESET_PENDING, &uc->flags))
+ uc->cmd_resp = uc->dev_resp.code;
+ get_fw_info(uc);
+ }
+ if (invalid_async_evt(uc->dev_resp.code))
+ dev_err(dev, "invalid async evt %d\n",
+ uc->dev_resp.code);
+ } else {
+ if (test_bit(DEV_CMD_PENDING, &uc->flags)) {
+ uc->cmd_resp = uc->dev_resp.code;
+ clear_bit(DEV_CMD_PENDING, &uc->flags);
+ } else {
+ dev_err(dev, "dev resp 0x%04x but no cmd pending\n",
+ uc->dev_resp.code);
+ }
+ }
+}
+
+static int ccg_read_response(struct ucsi_ccg *uc)
+{
+ unsigned long target = jiffies + msecs_to_jiffies(1000);
+ struct device *dev = uc->dev;
+ u8 intval;
+ int status;
+
+ /* wait for interrupt status to get updated */
+ do {
+ status = ccg_read(uc, CCGX_RAB_INTR_REG, &intval,
+ sizeof(intval));
+ if (status < 0)
+ return status;
+
+ if (intval & DEV_INT)
+ break;
+ usleep_range(500, 600);
+ } while (time_is_after_jiffies(target));
+
+ if (time_is_before_jiffies(target)) {
+ dev_err(dev, "response timeout error\n");
+ return -ETIME;
+ }
+
+ status = ccg_read(uc, CCGX_RAB_RESPONSE, (u8 *)&uc->dev_resp,
+ sizeof(uc->dev_resp));
+ if (status < 0)
+ return status;
+
+ status = ccg_write(uc, CCGX_RAB_INTR_REG, &intval, sizeof(intval));
+ if (status < 0)
+ return status;
+
+ return 0;
+}
+
+/* Caller must hold uc->lock */
+static int ccg_send_command(struct ucsi_ccg *uc, struct ccg_cmd *cmd)
+{
+ struct device *dev = uc->dev;
+ int ret;
+
+ switch (cmd->reg & 0xF000) {
+ case DEV_REG_IDX:
+ set_bit(DEV_CMD_PENDING, &uc->flags);
+ break;
+ default:
+ dev_err(dev, "invalid cmd register\n");
+ break;
+ }
+
+ ret = ccg_write(uc, cmd->reg, (u8 *)&cmd->data, cmd->len);
+ if (ret < 0)
+ return ret;
+
+ msleep(cmd->delay);
+
+ ret = ccg_read_response(uc);
+ if (ret < 0) {
+ dev_err(dev, "response read error\n");
+ switch (cmd->reg & 0xF000) {
+ case DEV_REG_IDX:
+ clear_bit(DEV_CMD_PENDING, &uc->flags);
+ break;
+ default:
+ dev_err(dev, "invalid cmd register\n");
+ break;
+ }
+ return -EIO;
+ }
+ ccg_process_response(uc);
+
+ return uc->cmd_resp;
+}
+
+static int ccg_cmd_enter_flashing(struct ucsi_ccg *uc)
+{
+ struct ccg_cmd cmd;
+ int ret;
+
+ cmd.reg = CCGX_RAB_ENTER_FLASHING;
+ cmd.data = FLASH_ENTER_SIG;
+ cmd.len = 1;
+ cmd.delay = 50;
+
+ mutex_lock(&uc->lock);
+
+ ret = ccg_send_command(uc, &cmd);
+
+ mutex_unlock(&uc->lock);
+
+ if (ret != CMD_SUCCESS) {
+ dev_err(uc->dev, "enter flashing failed ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ccg_cmd_reset(struct ucsi_ccg *uc)
+{
+ struct ccg_cmd cmd;
+ u8 *p;
+ int ret;
+
+ p = (u8 *)&cmd.data;
+ cmd.reg = CCGX_RAB_RESET_REQ;
+ p[0] = RESET_SIG;
+ p[1] = CMD_RESET_DEV;
+ cmd.len = 2;
+ cmd.delay = 5000;
+
+ mutex_lock(&uc->lock);
+
+ set_bit(RESET_PENDING, &uc->flags);
+
+ ret = ccg_send_command(uc, &cmd);
+ if (ret != RESET_COMPLETE)
+ goto err_clear_flag;
+
+ ret = 0;
+
+err_clear_flag:
+ clear_bit(RESET_PENDING, &uc->flags);
+
+ mutex_unlock(&uc->lock);
+
+ return ret;
+}
+
+static int ccg_cmd_port_control(struct ucsi_ccg *uc, bool enable)
+{
+ struct ccg_cmd cmd;
+ int ret;
+
+ cmd.reg = CCGX_RAB_PDPORT_ENABLE;
+ if (enable)
+ cmd.data = (uc->port_num == 1) ?
+ PDPORT_1 : (PDPORT_1 | PDPORT_2);
+ else
+ cmd.data = 0x0;
+ cmd.len = 1;
+ cmd.delay = 10;
+
+ mutex_lock(&uc->lock);
+
+ ret = ccg_send_command(uc, &cmd);
+
+ mutex_unlock(&uc->lock);
+
+ if (ret != CMD_SUCCESS) {
+ dev_err(uc->dev, "port control failed ret=%d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int ccg_cmd_jump_boot_mode(struct ucsi_ccg *uc, int bl_mode)
+{
+ struct ccg_cmd cmd;
+ int ret;
+
+ cmd.reg = CCGX_RAB_JUMP_TO_BOOT;
+
+ if (bl_mode)
+ cmd.data = TO_BOOT;
+ else
+ cmd.data = TO_ALT_FW;
+
+ cmd.len = 1;
+ cmd.delay = 100;
+
+ mutex_lock(&uc->lock);
+
+ set_bit(RESET_PENDING, &uc->flags);
+
+ ret = ccg_send_command(uc, &cmd);
+ if (ret != RESET_COMPLETE)
+ goto err_clear_flag;
+
+ ret = 0;
+
+err_clear_flag:
+ clear_bit(RESET_PENDING, &uc->flags);
+
+ mutex_unlock(&uc->lock);
+
+ return ret;
+}
+
+static int
+ccg_cmd_write_flash_row(struct ucsi_ccg *uc, u16 row,
+ const void *data, u8 fcmd)
+{
+ struct i2c_client *client = uc->client;
+ struct ccg_cmd cmd;
+ u8 buf[CCG4_ROW_SIZE + 2];
+ u8 *p;
+ int ret;
+
+ /* Copy the data into the flash read/write memory. */
+ put_unaligned_le16(REG_FLASH_RW_MEM, buf);
+
+ memcpy(buf + 2, data, CCG4_ROW_SIZE);
+
+ mutex_lock(&uc->lock);
+
+ ret = i2c_master_send(client, buf, CCG4_ROW_SIZE + 2);
+ if (ret != CCG4_ROW_SIZE + 2) {
+ dev_err(uc->dev, "REG_FLASH_RW_MEM write fail %d\n", ret);
+ mutex_unlock(&uc->lock);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ /* Use the FLASH_ROW_READ_WRITE register to trigger */
+ /* writing of data to the desired flash row */
+ p = (u8 *)&cmd.data;
+ cmd.reg = CCGX_RAB_FLASH_ROW_RW;
+ p[0] = FLASH_SIG;
+ p[1] = fcmd;
+ put_unaligned_le16(row, &p[2]);
+ cmd.len = 4;
+ cmd.delay = 50;
+ if (fcmd == FLASH_FWCT_SIG_WR_CMD)
+ cmd.delay += 400;
+ if (row == 510)
+ cmd.delay += 220;
+ ret = ccg_send_command(uc, &cmd);
+
+ mutex_unlock(&uc->lock);
+
+ if (ret != CMD_SUCCESS) {
+ dev_err(uc->dev, "write flash row failed ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ccg_cmd_validate_fw(struct ucsi_ccg *uc, unsigned int fwid)
+{
+ struct ccg_cmd cmd;
+ int ret;
+
+ cmd.reg = CCGX_RAB_VALIDATE_FW;
+ cmd.data = fwid;
+ cmd.len = 1;
+ cmd.delay = 500;
+
+ mutex_lock(&uc->lock);
+
+ ret = ccg_send_command(uc, &cmd);
+
+ mutex_unlock(&uc->lock);
+
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ return 0;
+}
+
+static bool ccg_check_vendor_version(struct ucsi_ccg *uc,
+ struct version_format *app,
+ struct fw_config_table *fw_cfg)
+{
+ struct device *dev = uc->dev;
+
+ /* Check if the fw build is for supported vendors */
+ if (le16_to_cpu(app->build) != uc->fw_build) {
+ dev_info(dev, "current fw is not from supported vendor\n");
+ return false;
+ }
+
+ /* Check if the new fw build is for supported vendors */
+ if (le16_to_cpu(fw_cfg->app.build) != uc->fw_build) {
+ dev_info(dev, "new fw is not from supported vendor\n");
+ return false;
+ }
+ return true;
+}
+
+static bool ccg_check_fw_version(struct ucsi_ccg *uc, const char *fw_name,
+ struct version_format *app)
+{
+ const struct firmware *fw = NULL;
+ struct device *dev = uc->dev;
+ struct fw_config_table fw_cfg;
+ u32 cur_version, new_version;
+ bool is_later = false;
+
+ if (request_firmware(&fw, fw_name, dev) != 0) {
+ dev_err(dev, "error: Failed to open cyacd file %s\n", fw_name);
+ return false;
+ }
+
+ /*
+ * check if signed fw
+ * last part of fw image is fw cfg table and signature
+ */
+ if (fw->size < sizeof(fw_cfg) + FW_CFG_TABLE_SIG_SIZE)
+ goto out_release_firmware;
+
+ memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
+ sizeof(fw_cfg) - FW_CFG_TABLE_SIG_SIZE, sizeof(fw_cfg));
+
+ if (fw_cfg.identity != ('F' | 'W' << 8 | 'C' << 16 | 'T' << 24)) {
+ dev_info(dev, "not a signed image\n");
+ goto out_release_firmware;
+ }
+
+ /* compare input version with FWCT version */
+ cur_version = le16_to_cpu(app->build) | app->patch << 16 |
+ app->ver << 24;
+
+ new_version = le16_to_cpu(fw_cfg.app.build) | fw_cfg.app.patch << 16 |
+ fw_cfg.app.ver << 24;
+
+ if (!ccg_check_vendor_version(uc, app, &fw_cfg))
+ goto out_release_firmware;
+
+ if (new_version > cur_version)
+ is_later = true;
+
+out_release_firmware:
+ release_firmware(fw);
+ return is_later;
+}
+
+static int ccg_fw_update_needed(struct ucsi_ccg *uc,
+ enum enum_flash_mode *mode)
+{
+ struct device *dev = uc->dev;
+ int err;
+ struct version_info version[3];
+
+ err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
+ sizeof(uc->info));
+ if (err) {
+ dev_err(dev, "read device mode failed\n");
+ return err;
+ }
+
+ err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)version,
+ sizeof(version));
+ if (err) {
+ dev_err(dev, "read device mode failed\n");
+ return err;
+ }
+
+ if (memcmp(&version[FW1], "\0\0\0\0\0\0\0\0",
+ sizeof(struct version_info)) == 0) {
+ dev_info(dev, "secondary fw is not flashed\n");
+ *mode = SECONDARY_BL;
+ } else if (le16_to_cpu(version[FW1].base.build) <
+ secondary_fw_min_ver) {
+ dev_info(dev, "secondary fw version is too low (< %d)\n",
+ secondary_fw_min_ver);
+ *mode = SECONDARY;
+ } else if (memcmp(&version[FW2], "\0\0\0\0\0\0\0\0",
+ sizeof(struct version_info)) == 0) {
+ dev_info(dev, "primary fw is not flashed\n");
+ *mode = PRIMARY;
+ } else if (ccg_check_fw_version(uc, ccg_fw_names[PRIMARY],
+ &version[FW2].app)) {
+ dev_info(dev, "found primary fw with later version\n");
+ *mode = PRIMARY;
+ } else {
+ dev_info(dev, "secondary and primary fw are the latest\n");
+ *mode = FLASH_NOT_NEEDED;
+ }
+ return 0;
+}
+
+static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
+{
+ struct device *dev = uc->dev;
+ const struct firmware *fw = NULL;
+ const char *p, *s;
+ const char *eof;
+ int err, row, len, line_sz, line_cnt = 0;
+ unsigned long start_time = jiffies;
+ struct fw_config_table fw_cfg;
+ u8 fw_cfg_sig[FW_CFG_TABLE_SIG_SIZE];
+ u8 *wr_buf;
+
+ err = request_firmware(&fw, ccg_fw_names[mode], dev);
+ if (err) {
+ dev_err(dev, "request %s failed err=%d\n",
+ ccg_fw_names[mode], err);
+ return err;
+ }
+
+ if (((uc->info.mode & CCG_DEVINFO_FWMODE_MASK) >>
+ CCG_DEVINFO_FWMODE_SHIFT) == FW2) {
+ err = ccg_cmd_port_control(uc, false);
+ if (err < 0)
+ goto release_fw;
+ err = ccg_cmd_jump_boot_mode(uc, 0);
+ if (err < 0)
+ goto release_fw;
+ }
+
+ eof = fw->data + fw->size;
+
+ /*
+ * check if signed fw
+ * last part of fw image is fw cfg table and signature
+ */
+ if (fw->size < sizeof(fw_cfg) + sizeof(fw_cfg_sig))
+ goto not_signed_fw;
+
+ memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
+ sizeof(fw_cfg) - sizeof(fw_cfg_sig), sizeof(fw_cfg));
+
+ if (fw_cfg.identity != ('F' | ('W' << 8) | ('C' << 16) | ('T' << 24))) {
+ dev_info(dev, "not a signed image\n");
+ goto not_signed_fw;
+ }
+ eof = fw->data + fw->size - sizeof(fw_cfg) - sizeof(fw_cfg_sig);
+
+ memcpy((uint8_t *)&fw_cfg_sig,
+ fw->data + fw->size - sizeof(fw_cfg_sig), sizeof(fw_cfg_sig));
+
+ /* flash fw config table and signature first */
+ err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg,
+ FLASH_FWCT1_WR_CMD);
+ if (err)
+ goto release_fw;
+
+ err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg + CCG4_ROW_SIZE,
+ FLASH_FWCT2_WR_CMD);
+ if (err)
+ goto release_fw;
+
+ err = ccg_cmd_write_flash_row(uc, 0, &fw_cfg_sig,
+ FLASH_FWCT_SIG_WR_CMD);
+ if (err)
+ goto release_fw;
+
+not_signed_fw:
+ wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
+ if (!wr_buf)
+ return -ENOMEM;
+
+ err = ccg_cmd_enter_flashing(uc);
+ if (err)
+ goto release_mem;
+
+ /*****************************************************************
+ * CCG firmware image (.cyacd) file line format
+ *
+ * :00rrrrllll[dd....]cc/r/n
+ *
+ * :00 header
+ * rrrr is row number to flash (4 char)
+ * llll is data len to flash (4 char)
+ * dd is a data field represents one byte of data (512 char)
+ * cc is checksum (2 char)
+ * \r\n newline
+ *
+ * Total length: 3 + 4 + 4 + 512 + 2 + 2 = 527
+ *
+ *****************************************************************/
+
+ p = strnchr(fw->data, fw->size, ':');
+ while (p < eof) {
+ s = strnchr(p + 1, eof - p - 1, ':');
+
+ if (!s)
+ s = eof;
+
+ line_sz = s - p;
+
+ if (line_sz != CYACD_LINE_SIZE) {
+ dev_err(dev, "Bad FW format line_sz=%d\n", line_sz);
+ err = -EINVAL;
+ goto release_mem;
+ }
+
+ if (hex2bin(wr_buf, p + 3, CCG4_ROW_SIZE + 4)) {
+ err = -EINVAL;
+ goto release_mem;
+ }
+
+ row = get_unaligned_be16(wr_buf);
+ len = get_unaligned_be16(&wr_buf[2]);
+
+ if (len != CCG4_ROW_SIZE) {
+ err = -EINVAL;
+ goto release_mem;
+ }
+
+ err = ccg_cmd_write_flash_row(uc, row, wr_buf + 4,
+ FLASH_WR_CMD);
+ if (err)
+ goto release_mem;
+
+ line_cnt++;
+ p = s;
+ }
+
+ dev_info(dev, "total %d row flashed. time: %dms\n",
+ line_cnt, jiffies_to_msecs(jiffies - start_time));
+
+ err = ccg_cmd_validate_fw(uc, (mode == PRIMARY) ? FW2 : FW1);
+ if (err)
+ dev_err(dev, "%s validation failed err=%d\n",
+ (mode == PRIMARY) ? "FW2" : "FW1", err);
+ else
+ dev_info(dev, "%s validated\n",
+ (mode == PRIMARY) ? "FW2" : "FW1");
+
+ err = ccg_cmd_port_control(uc, false);
+ if (err < 0)
+ goto release_mem;
+
+ err = ccg_cmd_reset(uc);
+ if (err < 0)
+ goto release_mem;
+
+ err = ccg_cmd_port_control(uc, true);
+ if (err < 0)
+ goto release_mem;
+
+release_mem:
+ kfree(wr_buf);
+
+release_fw:
+ release_firmware(fw);
+ return err;
+}
+
+/*******************************************************************************
+ * CCG4 has two copies of the firmware in addition to the bootloader.
+ * If the device is running FW1, FW2 can be updated with the new version.
+ * Dual firmware mode allows the CCG device to stay in a PD contract and support
+ * USB PD and Type-C functionality while a firmware update is in progress.
+ ******************************************************************************/
+static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
+{
+ int err;
+
+ while (flash_mode != FLASH_NOT_NEEDED) {
+ err = do_flash(uc, flash_mode);
+ if (err < 0)
+ return err;
+ err = ccg_fw_update_needed(uc, &flash_mode);
+ if (err < 0)
+ return err;
+ }
+ dev_info(uc->dev, "CCG FW update successful\n");
+
+ return err;
+}
+
+static int ccg_restart(struct ucsi_ccg *uc)
+{
+ struct device *dev = uc->dev;
+ int status;
+
+ status = ucsi_ccg_init(uc);
+ if (status < 0) {
+ dev_err(dev, "ucsi_ccg_start fail, err=%d\n", status);
+ return status;
+ }
+
+ status = request_threaded_irq(uc->irq, NULL, ccg_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), uc);
+ if (status < 0) {
+ dev_err(dev, "request_threaded_irq failed - %d\n", status);
+ return status;
+ }
+
+ uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
+ if (IS_ERR(uc->ucsi)) {
+ dev_err(uc->dev, "ucsi_register_ppm failed\n");
+ return PTR_ERR(uc->ucsi);
+ }
+
+ return 0;
+}
+
+static void ccg_update_firmware(struct work_struct *work)
+{
+ struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work);
+ enum enum_flash_mode flash_mode;
+ int status;
+
+ status = ccg_fw_update_needed(uc, &flash_mode);
+ if (status < 0)
+ return;
+
+ if (flash_mode != FLASH_NOT_NEEDED) {
+ ucsi_unregister_ppm(uc->ucsi);
+ free_irq(uc->irq, uc);
+
+ ccg_fw_update(uc, flash_mode);
+ ccg_restart(uc);
+ }
+}
+
+static ssize_t do_flash_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
+ bool flash;
+
+ if (kstrtobool(buf, &flash))
+ return -EINVAL;
+
+ if (!flash)
+ return n;
+
+ if (uc->fw_build == 0x0) {
+ dev_err(dev, "fail to flash FW due to missing FW build info\n");
+ return -EINVAL;
+ }
+
+ schedule_work(&uc->work);
+ return n;
+}
+
+static DEVICE_ATTR_WO(do_flash);
+
+static struct attribute *ucsi_ccg_sysfs_attrs[] = {
+ &dev_attr_do_flash.attr,
+ NULL,
+};
+
+static struct attribute_group ucsi_ccg_attr_group = {
+ .attrs = ucsi_ccg_sysfs_attrs,
+};
+
static int ucsi_ccg_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -240,6 +1076,14 @@ static int ucsi_ccg_probe(struct i2c_client *client,
uc->ppm.sync = ucsi_ccg_sync;
uc->dev = dev;
uc->client = client;
+ mutex_init(&uc->lock);
+ INIT_WORK(&uc->work, ccg_update_firmware);
+
+ /* Only fail FW flashing when FW build information is not provided */
+ status = device_property_read_u16(dev, "ccgx,firmware-build",
+ &uc->fw_build);
+ if (status)
+ dev_err(uc->dev, "failed to get FW build information\n");
/* reset ccg device and initialize ucsi */
status = ucsi_ccg_init(uc);
@@ -248,15 +1092,27 @@ static int ucsi_ccg_probe(struct i2c_client *client,
return status;
}
- status = devm_request_threaded_irq(dev, client->irq, NULL,
- ccg_irq_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
- dev_name(dev), uc);
+ status = get_fw_info(uc);
+ if (status < 0) {
+ dev_err(uc->dev, "get_fw_info failed - %d\n", status);
+ return status;
+ }
+
+ uc->port_num = 1;
+
+ if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
+ uc->port_num++;
+
+ status = request_threaded_irq(client->irq, NULL, ccg_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), uc);
if (status < 0) {
dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
return status;
}
+ uc->irq = client->irq;
+
uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
if (IS_ERR(uc->ucsi)) {
dev_err(uc->dev, "ucsi_register_ppm failed\n");
@@ -273,6 +1129,11 @@ static int ucsi_ccg_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, uc);
+
+ status = sysfs_create_group(&uc->dev->kobj, &ucsi_ccg_attr_group);
+ if (status)
+ dev_err(uc->dev, "cannot create sysfs group: %d\n", status);
+
return 0;
}
@@ -280,7 +1141,10 @@ static int ucsi_ccg_remove(struct i2c_client *client)
{
struct ucsi_ccg *uc = i2c_get_clientdata(client);
+ cancel_work_sync(&uc->work);
ucsi_unregister_ppm(uc->ucsi);
+ free_irq(uc->irq, uc);
+ sysfs_remove_group(&uc->dev->kobj, &ucsi_ccg_attr_group);
return 0;
}
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 97b09a42a10c..b0a855acafa3 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -17,9 +17,9 @@ static int is_clear_halt_cmd(struct urb *urb)
req = (struct usb_ctrlrequest *) urb->setup_packet;
- return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
- (req->bRequestType == USB_RECIP_ENDPOINT) &&
- (req->wValue == USB_ENDPOINT_HALT);
+ return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
+ (req->bRequestType == USB_RECIP_ENDPOINT) &&
+ (req->wValue == USB_ENDPOINT_HALT);
}
static int is_set_interface_cmd(struct urb *urb)
@@ -361,16 +361,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
}
if (usb_endpoint_xfer_isoc(epd)) {
- /* validate packet size and number of packets */
- unsigned int maxp, packets, bytes;
-
- maxp = usb_endpoint_maxp(epd);
- maxp *= usb_endpoint_maxp_mult(epd);
- bytes = pdu->u.cmd_submit.transfer_buffer_length;
- packets = DIV_ROUND_UP(bytes, maxp);
-
+ /* validate number of packets */
if (pdu->u.cmd_submit.number_of_packets < 0 ||
- pdu->u.cmd_submit.number_of_packets > packets) {
+ pdu->u.cmd_submit.number_of_packets >
+ USBIP_MAX_ISO_PACKETS) {
dev_err(&sdev->udev->dev,
"CMD_SUBMIT: isoc invalid num packets %d\n",
pdu->u.cmd_submit.number_of_packets);
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index bf8afe9b5883..8be857a4fa13 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -121,6 +121,13 @@ extern struct device_attribute dev_attr_usbip_debug;
#define USBIP_DIR_OUT 0x00
#define USBIP_DIR_IN 0x01
+/*
+ * Arbitrary limit for the maximum number of isochronous packets in an URB,
+ * compare for example the uhci_submit_isochronous function in
+ * drivers/usb/host/uhci-q.c
+ */
+#define USBIP_MAX_ISO_PACKETS 1024
+
/**
* struct usbip_header_basic - data pertinent to every request
* @command: the usbip request type
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index f46ee1fefe02..000ab7225717 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -508,6 +508,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_U1_TIMEOUT:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_U1_TIMEOUT\n");
+ /* Fall through */
case USB_PORT_FEAT_U2_TIMEOUT:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_U2_TIMEOUT\n");
@@ -654,15 +655,9 @@ error:
static void vhci_tx_urb(struct urb *urb, struct vhci_device *vdev)
{
struct vhci_priv *priv;
- struct vhci_hcd *vhci_hcd;
+ struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
unsigned long flags;
- if (!vdev) {
- pr_err("could not get virtual device");
- return;
- }
- vhci_hcd = vdev_to_vhci_hcd(vdev);
-
priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC);
if (!priv) {
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index 9de5ed38da83..3798d77d131c 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -22,7 +22,6 @@ menuconfig VFIO
tristate "VFIO Non-Privileged userspace driver framework"
depends on IOMMU_API
select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64)
- select ANON_INODES
help
VFIO provides a framework for secure userspace device drivers.
See Documentation/vfio.txt for more details.
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index b96fedc77ee5..3cc1a05fde1c 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -88,7 +88,7 @@ static void mdev_release_parent(struct kref *kref)
put_device(dev);
}
-static inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
+static struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
{
if (parent)
kref_get(&parent->ref);
@@ -96,7 +96,7 @@ static inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
return parent;
}
-static inline void mdev_put_parent(struct mdev_parent *parent)
+static void mdev_put_parent(struct mdev_parent *parent)
{
if (parent)
kref_put(&parent->ref, mdev_release_parent);
@@ -141,7 +141,7 @@ static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
*/
ret = parent->ops->remove(mdev);
if (ret && !force_remove)
- return -EBUSY;
+ return ret;
sysfs_remove_groups(&mdev->dev.kobj, parent->ops->mdev_attr_groups);
return 0;
@@ -149,10 +149,10 @@ static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
static int mdev_device_remove_cb(struct device *dev, void *data)
{
- if (!dev_is_mdev(dev))
- return 0;
+ if (dev_is_mdev(dev))
+ mdev_device_remove(dev, true);
- return mdev_device_remove(dev, data ? *(bool *)data : true);
+ return 0;
}
/*
@@ -181,6 +181,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
/* Check for duplicate */
parent = __find_parent_device(dev);
if (parent) {
+ parent = NULL;
ret = -EEXIST;
goto add_dev_err;
}
@@ -239,7 +240,6 @@ EXPORT_SYMBOL(mdev_register_device);
void mdev_unregister_device(struct device *dev)
{
struct mdev_parent *parent;
- bool force_remove = true;
mutex_lock(&parent_list_lock);
parent = __find_parent_device(dev);
@@ -253,8 +253,7 @@ void mdev_unregister_device(struct device *dev)
list_del(&parent->next);
class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
- device_for_each_child(dev, (void *)&force_remove,
- mdev_device_remove_cb);
+ device_for_each_child(dev, NULL, mdev_device_remove_cb);
parent_remove_sysfs_files(parent);
@@ -310,7 +309,6 @@ int mdev_device_create(struct kobject *kobj,
mutex_unlock(&mdev_list_lock);
mdev->parent = parent;
- kref_init(&mdev->ref);
mdev->dev.parent = dev;
mdev->dev.bus = &mdev_bus_type;
@@ -390,6 +388,24 @@ int mdev_device_remove(struct device *dev, bool force_remove)
return 0;
}
+int mdev_set_iommu_device(struct device *dev, struct device *iommu_device)
+{
+ struct mdev_device *mdev = to_mdev_device(dev);
+
+ mdev->iommu_device = iommu_device;
+
+ return 0;
+}
+EXPORT_SYMBOL(mdev_set_iommu_device);
+
+struct device *mdev_get_iommu_device(struct device *dev)
+{
+ struct mdev_device *mdev = to_mdev_device(dev);
+
+ return mdev->iommu_device;
+}
+EXPORT_SYMBOL(mdev_get_iommu_device);
+
static int __init mdev_init(void)
{
return mdev_bus_register();
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index 379758c52b1b..36cbbdb754de 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -30,9 +30,9 @@ struct mdev_device {
struct mdev_parent *parent;
guid_t uuid;
void *driver_data;
- struct kref ref;
struct list_head next;
struct kobject *type_kobj;
+ struct device *iommu_device;
bool active;
};
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 5193a0e0ce5a..cbf94b8165ea 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -280,7 +280,7 @@ type_link_failed:
void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type)
{
+ sysfs_remove_files(&dev->kobj, mdev_device_attrs);
sysfs_remove_link(&dev->kobj, "mdev_type");
sysfs_remove_link(type->devices_kobj, dev_name(dev));
- sysfs_remove_files(&dev->kobj, mdev_device_attrs);
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index a25659b5a5d1..cab71da46f4a 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/eventfd.h>
@@ -287,12 +288,11 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
pci_save_state(pdev);
vdev->pci_saved_state = pci_store_saved_state(pdev);
if (!vdev->pci_saved_state)
- pr_debug("%s: Couldn't store %s saved state\n",
- __func__, dev_name(&pdev->dev));
+ pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
if (likely(!nointxmask)) {
if (vfio_pci_nointx(pdev)) {
- dev_info(&pdev->dev, "Masking broken INTx support\n");
+ pci_info(pdev, "Masking broken INTx support\n");
vdev->nointx = true;
pci_intx(pdev, 0);
} else
@@ -336,8 +336,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
ret = vfio_pci_igd_init(vdev);
if (ret) {
- dev_warn(&vdev->pdev->dev,
- "Failed to setup Intel IGD regions\n");
+ pci_warn(pdev, "Failed to setup Intel IGD regions\n");
goto disable_exit;
}
}
@@ -346,8 +345,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
ret = vfio_pci_nvdia_v100_nvlink2_init(vdev);
if (ret && ret != -ENODEV) {
- dev_warn(&vdev->pdev->dev,
- "Failed to setup NVIDIA NV2 RAM region\n");
+ pci_warn(pdev, "Failed to setup NVIDIA NV2 RAM region\n");
goto disable_exit;
}
}
@@ -356,8 +354,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
ret = vfio_pci_ibm_npu2_init(vdev);
if (ret && ret != -ENODEV) {
- dev_warn(&vdev->pdev->dev,
- "Failed to setup NVIDIA NV2 ATSD region\n");
+ pci_warn(pdev, "Failed to setup NVIDIA NV2 ATSD region\n");
goto disable_exit;
}
}
@@ -429,8 +426,7 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
* is just busy work.
*/
if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
- pr_info("%s: Couldn't reload %s saved state\n",
- __func__, dev_name(&pdev->dev));
+ pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
if (!vdev->reset_works)
goto out;
@@ -1255,17 +1251,18 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
static void vfio_pci_request(void *device_data, unsigned int count)
{
struct vfio_pci_device *vdev = device_data;
+ struct pci_dev *pdev = vdev->pdev;
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
if (!(count % 10))
- dev_notice_ratelimited(&vdev->pdev->dev,
+ pci_notice_ratelimited(pdev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(vdev->req_trigger, 1);
} else if (count == 0) {
- dev_warn(&vdev->pdev->dev,
+ pci_warn(pdev,
"No device request channel registered, blocked until released by user\n");
}
@@ -1661,11 +1658,11 @@ static void __init vfio_pci_fill_ids(void)
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
- pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+ pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
- pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+ pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index e82b51114687..52963a904790 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -412,8 +412,7 @@ static void vfio_bar_restore(struct vfio_pci_device *vdev)
if (pdev->is_virtfn)
return;
- pr_info("%s: %s reset recovery - restoring bars\n",
- __func__, dev_name(&pdev->dev));
+ pci_info(pdev, "%s: reset recovery - restoring BARs\n", __func__);
for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++)
pci_user_write_config_dword(pdev, i, *rbar);
@@ -1298,8 +1297,8 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
else
return PCI_SATA_SIZEOF_SHORT;
default:
- pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n",
- dev_name(&pdev->dev), __func__, cap, pos);
+ pci_warn(pdev, "%s: unknown length for PCI cap %#x@%#x\n",
+ __func__, cap, pos);
}
return 0;
@@ -1372,8 +1371,8 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
}
return PCI_TPH_BASE_SIZEOF;
default:
- pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n",
- dev_name(&pdev->dev), __func__, ecap, epos);
+ pci_warn(pdev, "%s: unknown length for PCI ecap %#x@%#x\n",
+ __func__, ecap, epos);
}
return 0;
@@ -1474,8 +1473,8 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
}
if (!len) {
- pr_info("%s: %s hiding cap 0x%x\n",
- __func__, dev_name(&pdev->dev), cap);
+ pci_info(pdev, "%s: hiding cap %#x@%#x\n", __func__,
+ cap, pos);
*prev = next;
pos = next;
continue;
@@ -1486,9 +1485,8 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
continue;
- pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n",
- __func__, dev_name(&pdev->dev),
- pos + i, map[pos + i], cap);
+ pci_warn(pdev, "%s: PCI config conflict @%#x, was cap %#x now cap %#x\n",
+ __func__, pos + i, map[pos + i], cap);
}
BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
@@ -1549,8 +1547,8 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
}
if (!len) {
- pr_info("%s: %s hiding ecap 0x%x@0x%x\n",
- __func__, dev_name(&pdev->dev), ecap, epos);
+ pci_info(pdev, "%s: hiding ecap %#x@%#x\n",
+ __func__, ecap, epos);
/* If not the first in the chain, we can skip over it */
if (prev) {
@@ -1572,9 +1570,8 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
continue;
- pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n",
- __func__, dev_name(&pdev->dev),
- epos + i, map[epos + i], ecap);
+ pci_warn(pdev, "%s: PCI config conflict @%#x, was ecap %#x now ecap %#x\n",
+ __func__, epos + i, map[epos + i], ecap);
}
/*
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index 32f695ffe128..50fe3c4f7feb 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -472,6 +472,8 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
return 0;
free_exit:
+ if (data->base)
+ memunmap(data->base);
kfree(data);
return ret;
diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
index 3ddb2704221d..fe95964bc3be 100644
--- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
+++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
@@ -89,7 +89,8 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
} while ((pcs_value & MDIO_CTRL1_RESET) && --count);
if (pcs_value & MDIO_CTRL1_RESET)
- pr_warn("%s XGBE PHY reset timeout\n", __func__);
+ dev_warn(vdev->device, "%s: XGBE PHY reset timeout\n",
+ __func__);
/* disable auto-negotiation */
value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_CTRL1);
@@ -114,7 +115,7 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
usleep_range(500, 600);
if (!count)
- pr_warn("%s MAC SW reset failed\n", __func__);
+ dev_warn(vdev->device, "%s: MAC SW reset failed\n", __func__);
return 0;
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index c0cd824be2b7..2a45b36bcf58 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -12,6 +12,8 @@
* GNU General Public License for more details.
*/
+#define dev_fmt(fmt) "VFIO: " fmt
+
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/iommu.h>
@@ -63,7 +65,7 @@ static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
adev = ACPI_COMPANION(dev);
if (!adev) {
- pr_err("VFIO: ACPI companion device not found for %s\n",
+ dev_err(dev, "ACPI companion device not found for %s\n",
vdev->name);
return -ENODEV;
}
@@ -638,7 +640,7 @@ static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
ret = device_property_read_string(dev, "compatible",
&vdev->compat);
if (ret)
- pr_err("VFIO: Cannot retrieve compat for %s\n", vdev->name);
+ dev_err(dev, "Cannot retrieve compat for %s\n", vdev->name);
return ret;
}
@@ -680,14 +682,14 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev,
ret = vfio_platform_get_reset(vdev);
if (ret && vdev->reset_required) {
- pr_err("VFIO: No reset function found for device %s\n",
- vdev->name);
+ dev_err(dev, "No reset function found for device %s\n",
+ vdev->name);
return ret;
}
group = vfio_iommu_group_get(dev);
if (!group) {
- pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
+ dev_err(dev, "No IOMMU group for device %s\n", vdev->name);
ret = -EINVAL;
goto put_reset;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index a3030cdf3c18..82fcf07fa9ea 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -34,6 +34,7 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/wait.h>
+#include <linux/sched/signal.h>
#define DRIVER_VERSION "0.3"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
@@ -704,8 +705,8 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
return 0;
/* TODO Prevent device auto probing */
- WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
- iommu_group_id(group->iommu_group));
+ dev_WARN(dev, "Device added to live group %d!\n",
+ iommu_group_id(group->iommu_group));
return 0;
}
@@ -748,25 +749,22 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
*/
break;
case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
- pr_debug("%s: Device %s, group %d binding to driver\n",
- __func__, dev_name(dev),
- iommu_group_id(group->iommu_group));
+ dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
+ iommu_group_id(group->iommu_group));
break;
case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
- pr_debug("%s: Device %s, group %d bound to driver %s\n",
- __func__, dev_name(dev),
- iommu_group_id(group->iommu_group), dev->driver->name);
+ dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
+ iommu_group_id(group->iommu_group), dev->driver->name);
BUG_ON(vfio_group_nb_verify(group, dev));
break;
case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
- pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
- __func__, dev_name(dev),
- iommu_group_id(group->iommu_group), dev->driver->name);
+ dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
+ __func__, iommu_group_id(group->iommu_group),
+ dev->driver->name);
break;
case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
- pr_debug("%s: Device %s, group %d unbound from driver\n",
- __func__, dev_name(dev),
- iommu_group_id(group->iommu_group));
+ dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
+ iommu_group_id(group->iommu_group));
/*
* XXX An unbound device in a live group is ok, but we'd
* really like to avoid the above BUG_ON by preventing other
@@ -830,8 +828,8 @@ int vfio_add_group_dev(struct device *dev,
device = vfio_group_get_device(group, dev);
if (device) {
- WARN(1, "Device %s already exists on group %d\n",
- dev_name(dev), iommu_group_id(iommu_group));
+ dev_WARN(dev, "Device already exists on group %d\n",
+ iommu_group_id(iommu_group));
vfio_device_put(device);
vfio_group_put(group);
return -EBUSY;
@@ -904,30 +902,17 @@ void *vfio_device_data(struct vfio_device *device)
}
EXPORT_SYMBOL_GPL(vfio_device_data);
-/* Given a referenced group, check if it contains the device */
-static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
-{
- struct vfio_device *device;
-
- device = vfio_group_get_device(group, dev);
- if (!device)
- return false;
-
- vfio_device_put(device);
- return true;
-}
-
/*
* Decrement the device reference count and wait for the device to be
* removed. Open file descriptors for the device... */
void *vfio_del_group_dev(struct device *dev)
{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct vfio_device *device = dev_get_drvdata(dev);
struct vfio_group *group = device->group;
void *device_data = device->device_data;
struct vfio_unbound_dev *unbound;
unsigned int i = 0;
- long ret;
bool interrupted = false;
/*
@@ -964,6 +949,8 @@ void *vfio_del_group_dev(struct device *dev)
* interval with counter to allow the driver to take escalating
* measures to release the device if it has the ability to do so.
*/
+ add_wait_queue(&vfio.release_q, &wait);
+
do {
device = vfio_group_get_device(group, dev);
if (!device)
@@ -975,12 +962,10 @@ void *vfio_del_group_dev(struct device *dev)
vfio_device_put(device);
if (interrupted) {
- ret = wait_event_timeout(vfio.release_q,
- !vfio_dev_present(group, dev), HZ * 10);
+ wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
} else {
- ret = wait_event_interruptible_timeout(vfio.release_q,
- !vfio_dev_present(group, dev), HZ * 10);
- if (ret == -ERESTARTSYS) {
+ wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
+ if (signal_pending(current)) {
interrupted = true;
dev_warn(dev,
"Device is currently in use, task"
@@ -989,8 +974,10 @@ void *vfio_del_group_dev(struct device *dev)
current->comm, task_pid_nr(current));
}
}
- } while (ret <= 0);
+ } while (1);
+
+ remove_wait_queue(&vfio.release_q, &wait);
/*
* In order to support multiple devices per group, devices can be
* plucked from the group while other devices in the group are still
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 8dbb270998f4..40ddc0c5f677 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -532,7 +532,8 @@ static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
enum dma_data_direction direction = iommu_tce_direction(tce);
if (get_user_pages_fast(tce & PAGE_MASK, 1,
- direction != DMA_TO_DEVICE, &page) != 1)
+ direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
+ &page) != 1)
return -EFAULT;
*hpa = __pa((unsigned long) page_address(page));
@@ -1398,7 +1399,7 @@ unlock_exit:
mutex_unlock(&container->lock);
}
-const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
+static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
.name = "iommu-vfio-powerpc",
.owner = THIS_MODULE,
.open = tce_iommu_open,
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 73652e21efec..3ddc375e7063 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
MODULE_PARM_DESC(disable_hugepages,
"Disable VFIO IOMMU support for IOMMU hugepages.");
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+ "Maximum number of user DMA mappings per container (65535).");
+
struct vfio_iommu {
struct list_head domain_list;
struct vfio_domain *external_domain; /* domain for external user */
struct mutex lock;
struct rb_root dma_list;
struct blocking_notifier_head notifier;
+ unsigned int dma_avail;
bool v2;
bool nesting;
};
@@ -91,6 +97,7 @@ struct vfio_dma {
struct vfio_group {
struct iommu_group *iommu_group;
struct list_head next;
+ bool mdev_group; /* An mdev group */
};
/*
@@ -351,7 +358,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
down_read(&mm->mmap_sem);
if (mm == current->mm) {
- ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
+ ret = get_user_pages(vaddr, 1, flags | FOLL_LONGTERM, page,
+ vmas);
} else {
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
vmas, NULL);
@@ -558,7 +566,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
mutex_lock(&iommu->lock);
/* Fail if notifier list is empty */
- if ((!iommu->external_domain) || (!iommu->notifier.head)) {
+ if (!iommu->notifier.head) {
ret = -EINVAL;
goto pin_done;
}
@@ -640,11 +648,6 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
mutex_lock(&iommu->lock);
- if (!iommu->external_domain) {
- mutex_unlock(&iommu->lock);
- return -EINVAL;
- }
-
do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
for (i = 0; i < npage; i++) {
struct vfio_dma *dma;
@@ -836,6 +839,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
vfio_unlink_dma(iommu, dma);
put_task_struct(dma->task);
kfree(dma);
+ iommu->dma_avail++;
}
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1085,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
goto out_unlock;
}
+ if (!iommu->dma_avail) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma) {
ret = -ENOMEM;
goto out_unlock;
}
+ iommu->dma_avail--;
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
@@ -1298,13 +1308,109 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
return ret;
}
+static struct device *vfio_mdev_get_iommu_device(struct device *dev)
+{
+ struct device *(*fn)(struct device *dev);
+ struct device *iommu_device;
+
+ fn = symbol_get(mdev_get_iommu_device);
+ if (fn) {
+ iommu_device = fn(dev);
+ symbol_put(mdev_get_iommu_device);
+
+ return iommu_device;
+ }
+
+ return NULL;
+}
+
+static int vfio_mdev_attach_domain(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+ struct device *iommu_device;
+
+ iommu_device = vfio_mdev_get_iommu_device(dev);
+ if (iommu_device) {
+ if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
+ return iommu_aux_attach_device(domain, iommu_device);
+ else
+ return iommu_attach_device(domain, iommu_device);
+ }
+
+ return -EINVAL;
+}
+
+static int vfio_mdev_detach_domain(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+ struct device *iommu_device;
+
+ iommu_device = vfio_mdev_get_iommu_device(dev);
+ if (iommu_device) {
+ if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
+ iommu_aux_detach_device(domain, iommu_device);
+ else
+ iommu_detach_device(domain, iommu_device);
+ }
+
+ return 0;
+}
+
+static int vfio_iommu_attach_group(struct vfio_domain *domain,
+ struct vfio_group *group)
+{
+ if (group->mdev_group)
+ return iommu_group_for_each_dev(group->iommu_group,
+ domain->domain,
+ vfio_mdev_attach_domain);
+ else
+ return iommu_attach_group(domain->domain, group->iommu_group);
+}
+
+static void vfio_iommu_detach_group(struct vfio_domain *domain,
+ struct vfio_group *group)
+{
+ if (group->mdev_group)
+ iommu_group_for_each_dev(group->iommu_group, domain->domain,
+ vfio_mdev_detach_domain);
+ else
+ iommu_detach_group(domain->domain, group->iommu_group);
+}
+
+static bool vfio_bus_is_mdev(struct bus_type *bus)
+{
+ struct bus_type *mdev_bus;
+ bool ret = false;
+
+ mdev_bus = symbol_get(mdev_bus_type);
+ if (mdev_bus) {
+ ret = (bus == mdev_bus);
+ symbol_put(mdev_bus_type);
+ }
+
+ return ret;
+}
+
+static int vfio_mdev_iommu_device(struct device *dev, void *data)
+{
+ struct device **old = data, *new;
+
+ new = vfio_mdev_get_iommu_device(dev);
+ if (!new || (*old && *old != new))
+ return -EINVAL;
+
+ *old = new;
+
+ return 0;
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_group *group;
struct vfio_domain *domain, *d;
- struct bus_type *bus = NULL, *mdev_bus;
+ struct bus_type *bus = NULL;
int ret;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base;
@@ -1339,23 +1445,30 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_free;
- mdev_bus = symbol_get(mdev_bus_type);
+ if (vfio_bus_is_mdev(bus)) {
+ struct device *iommu_device = NULL;
- if (mdev_bus) {
- if ((bus == mdev_bus) && !iommu_present(bus)) {
- symbol_put(mdev_bus_type);
+ group->mdev_group = true;
+
+ /* Determine the isolation type */
+ ret = iommu_group_for_each_dev(iommu_group, &iommu_device,
+ vfio_mdev_iommu_device);
+ if (ret || !iommu_device) {
if (!iommu->external_domain) {
INIT_LIST_HEAD(&domain->group_list);
iommu->external_domain = domain;
- } else
+ } else {
kfree(domain);
+ }
list_add(&group->next,
&iommu->external_domain->group_list);
mutex_unlock(&iommu->lock);
+
return 0;
}
- symbol_put(mdev_bus_type);
+
+ bus = iommu_device->bus;
}
domain->domain = iommu_domain_alloc(bus);
@@ -1373,7 +1486,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_domain;
}
- ret = iommu_attach_group(domain->domain, iommu_group);
+ ret = vfio_iommu_attach_group(domain, group);
if (ret)
goto out_domain;
@@ -1405,8 +1518,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
list_for_each_entry(d, &iommu->domain_list, next) {
if (d->domain->ops == domain->domain->ops &&
d->prot == domain->prot) {
- iommu_detach_group(domain->domain, iommu_group);
- if (!iommu_attach_group(d->domain, iommu_group)) {
+ vfio_iommu_detach_group(domain, group);
+ if (!vfio_iommu_attach_group(d, group)) {
list_add(&group->next, &d->group_list);
iommu_domain_free(domain->domain);
kfree(domain);
@@ -1414,7 +1527,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
return 0;
}
- ret = iommu_attach_group(domain->domain, iommu_group);
+ ret = vfio_iommu_attach_group(domain, group);
if (ret)
goto out_domain;
}
@@ -1440,7 +1553,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
return 0;
out_detach:
- iommu_detach_group(domain->domain, iommu_group);
+ vfio_iommu_detach_group(domain, group);
out_domain:
iommu_domain_free(domain->domain);
out_free:
@@ -1531,7 +1644,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (!group)
continue;
- iommu_detach_group(domain->domain, iommu_group);
+ vfio_iommu_detach_group(domain, group);
list_del(&group->next);
kfree(group);
/*
@@ -1583,6 +1696,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
INIT_LIST_HEAD(&iommu->domain_list);
iommu->dma_list = RB_ROOT;
+ iommu->dma_avail = dma_entry_limit;
mutex_init(&iommu->lock);
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
@@ -1596,7 +1710,7 @@ static void vfio_release_domain(struct vfio_domain *domain, bool external)
list_for_each_entry_safe(group, group_tmp,
&domain->group_list, next) {
if (!external)
- iommu_detach_group(domain->domain, group->iommu_group);
+ vfio_iommu_detach_group(domain, group);
list_del(&group->next);
kfree(group);
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 618fb6461017..c090d177bd75 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1443,7 +1443,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs;
vs_tpg[tpg->tport_tpgt] = tpg;
- smp_mb__after_atomic();
match = true;
}
mutex_unlock(&tpg->tv_tpg_mutex);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5ace833de746..1e3ed41ae1f3 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end,
u64 userspace_addr, int perm)
{
- struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+ struct vhost_umem_node *tmp, *node;
+ if (!size)
+ return -EFAULT;
+
+ node = kmalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
@@ -1700,7 +1704,7 @@ static int set_bit_to_user(int nr, void __user *addr)
int bit = nr + (log % PAGE_SIZE) * 8;
int r;
- r = get_user_pages_fast(log, 1, 1, &page);
+ r = get_user_pages_fast(log, 1, FOLL_WRITE, &page);
if (r < 0)
return r;
BUG_ON(r != 1);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 71ee978c848f..3ed1d9084f94 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -2,13 +2,7 @@
# Backlight & LCD drivers configuration
#
-menuconfig BACKLIGHT_LCD_SUPPORT
- bool "Backlight & LCD device support"
- help
- Enable this to be able to choose the drivers for controlling the
- backlight and the LCD panel on some platforms, for example on PDAs.
-
-if BACKLIGHT_LCD_SUPPORT
+menu "Backlight & LCD device support"
#
# LCD
@@ -199,7 +193,6 @@ config BACKLIGHT_IPAQ_MICRO
config BACKLIGHT_LM3533
tristate "Backlight Driver for LM3533"
- depends on BACKLIGHT_CLASS_DEVICE
depends on MFD_LM3533
help
Say Y to enable the backlight driver for National Semiconductor / TI
@@ -323,7 +316,7 @@ config BACKLIGHT_ADP5520
config BACKLIGHT_ADP8860
tristate "Backlight Driver for ADP8860/ADP8861/ADP8863 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
+ depends on I2C
select NEW_LEDS
select LEDS_CLASS
help
@@ -335,7 +328,7 @@ config BACKLIGHT_ADP8860
config BACKLIGHT_ADP8870
tristate "Backlight Driver for ADP8870 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
+ depends on I2C
select NEW_LEDS
select LEDS_CLASS
help
@@ -353,28 +346,28 @@ config BACKLIGHT_88PM860X
config BACKLIGHT_PCF50633
tristate "Backlight driver for NXP PCF50633 MFD"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_PCF50633
+ depends on MFD_PCF50633
help
If you have a backlight driven by a NXP PCF50633 MFD, say Y here to
enable its driver.
config BACKLIGHT_AAT2870
tristate "AnalogicTech AAT2870 Backlight"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE
+ depends on MFD_AAT2870_CORE
help
If you have a AnalogicTech AAT2870 say Y to enable the
backlight driver.
config BACKLIGHT_LM3630A
tristate "Backlight Driver for LM3630A"
- depends on BACKLIGHT_CLASS_DEVICE && I2C && PWM
+ depends on I2C && PWM
select REGMAP_I2C
help
This supports TI LM3630A Backlight Driver
config BACKLIGHT_LM3639
tristate "Backlight Driver for LM3639"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
+ depends on I2C
select REGMAP_I2C
select NEW_LEDS
select LEDS_CLASS
@@ -383,20 +376,20 @@ config BACKLIGHT_LM3639
config BACKLIGHT_LP855X
tristate "Backlight driver for TI LP855X"
- depends on BACKLIGHT_CLASS_DEVICE && I2C && PWM
+ depends on I2C && PWM
help
This supports TI LP8550, LP8551, LP8552, LP8553, LP8555, LP8556 and
LP8557 backlight driver.
config BACKLIGHT_LP8788
tristate "Backlight driver for TI LP8788 MFD"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_LP8788 && PWM
+ depends on MFD_LP8788 && PWM
help
This supports TI LP8788 backlight driver.
config BACKLIGHT_OT200
tristate "Backlight driver for ot200 visualisation device"
- depends on BACKLIGHT_CLASS_DEVICE && CS5535_MFGPT && GPIO_CS5535
+ depends on CS5535_MFGPT && GPIO_CS5535
help
To compile this driver as a module, choose M here: the module will be
called ot200_bl.
@@ -410,7 +403,7 @@ config BACKLIGHT_PANDORA
config BACKLIGHT_SKY81452
tristate "Backlight driver for SKY81452"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_SKY81452
+ depends on MFD_SKY81452
help
If you have a Skyworks SKY81452, say Y to enable the
backlight driver.
@@ -420,14 +413,14 @@ config BACKLIGHT_SKY81452
config BACKLIGHT_TPS65217
tristate "TPS65217 Backlight"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_TPS65217
+ depends on MFD_TPS65217
help
If you have a Texas Instruments TPS65217 say Y to enable the
backlight driver.
config BACKLIGHT_AS3711
tristate "AS3711 Backlight"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_AS3711
+ depends on MFD_AS3711
help
If you have an Austrian Microsystems AS3711 say Y to enable the
backlight driver.
@@ -466,4 +459,4 @@ config BACKLIGHT_RAVE_SP
endif # BACKLIGHT_CLASS_DEVICE
-endif # BACKLIGHT_LCD_SUPPORT
+endmenu
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 2030a6b77a09..75d996490cf0 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -35,6 +35,14 @@
#define REG_MAX 0x50
#define INT_DEBOUNCE_MSEC 10
+
+#define LM3630A_BANK_0 0
+#define LM3630A_BANK_1 1
+
+#define LM3630A_NUM_SINKS 2
+#define LM3630A_SINK_0 0
+#define LM3630A_SINK_1 1
+
struct lm3630a_chip {
struct device *dev;
struct delayed_work work;
@@ -201,7 +209,7 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
LM3630A_LEDA_ENABLE, LM3630A_LEDA_ENABLE);
if (ret < 0)
goto out_i2c_err;
- return bl->props.brightness;
+ return 0;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access\n");
@@ -278,7 +286,7 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
LM3630A_LEDB_ENABLE, LM3630A_LEDB_ENABLE);
if (ret < 0)
goto out_i2c_err;
- return bl->props.brightness;
+ return 0;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
@@ -329,15 +337,17 @@ static const struct backlight_ops lm3630a_bank_b_ops = {
static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
{
- struct backlight_properties props;
struct lm3630a_platform_data *pdata = pchip->pdata;
+ struct backlight_properties props;
+ const char *label;
props.type = BACKLIGHT_RAW;
if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
props.brightness = pdata->leda_init_brt;
props.max_brightness = pdata->leda_max_brt;
+ label = pdata->leda_label ? pdata->leda_label : "lm3630a_leda";
pchip->bleda =
- devm_backlight_device_register(pchip->dev, "lm3630a_leda",
+ devm_backlight_device_register(pchip->dev, label,
pchip->dev, pchip,
&lm3630a_bank_a_ops, &props);
if (IS_ERR(pchip->bleda))
@@ -348,8 +358,9 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
(pdata->ledb_ctrl != LM3630A_LEDB_ON_A)) {
props.brightness = pdata->ledb_init_brt;
props.max_brightness = pdata->ledb_max_brt;
+ label = pdata->ledb_label ? pdata->ledb_label : "lm3630a_ledb";
pchip->bledb =
- devm_backlight_device_register(pchip->dev, "lm3630a_ledb",
+ devm_backlight_device_register(pchip->dev, label,
pchip->dev, pchip,
&lm3630a_bank_b_ops, &props);
if (IS_ERR(pchip->bledb))
@@ -364,6 +375,123 @@ static const struct regmap_config lm3630a_regmap = {
.max_register = REG_MAX,
};
+static int lm3630a_parse_led_sources(struct fwnode_handle *node,
+ int default_led_sources)
+{
+ u32 sources[LM3630A_NUM_SINKS];
+ int ret, num_sources, i;
+
+ num_sources = fwnode_property_read_u32_array(node, "led-sources", NULL,
+ 0);
+ if (num_sources < 0)
+ return default_led_sources;
+ else if (num_sources > ARRAY_SIZE(sources))
+ return -EINVAL;
+
+ ret = fwnode_property_read_u32_array(node, "led-sources", sources,
+ num_sources);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_sources; i++) {
+ if (sources[i] < LM3630A_SINK_0 || sources[i] > LM3630A_SINK_1)
+ return -EINVAL;
+
+ ret |= BIT(sources[i]);
+ }
+
+ return ret;
+}
+
+static int lm3630a_parse_bank(struct lm3630a_platform_data *pdata,
+ struct fwnode_handle *node, int *seen_led_sources)
+{
+ int led_sources, ret;
+ const char *label;
+ u32 bank, val;
+ bool linear;
+
+ ret = fwnode_property_read_u32(node, "reg", &bank);
+ if (ret)
+ return ret;
+
+ if (bank < LM3630A_BANK_0 || bank > LM3630A_BANK_1)
+ return -EINVAL;
+
+ led_sources = lm3630a_parse_led_sources(node, BIT(bank));
+ if (led_sources < 0)
+ return led_sources;
+
+ if (*seen_led_sources & led_sources)
+ return -EINVAL;
+
+ *seen_led_sources |= led_sources;
+
+ linear = fwnode_property_read_bool(node,
+ "ti,linear-mapping-mode");
+ if (bank) {
+ if (led_sources & BIT(LM3630A_SINK_0) ||
+ !(led_sources & BIT(LM3630A_SINK_1)))
+ return -EINVAL;
+
+ pdata->ledb_ctrl = linear ?
+ LM3630A_LEDB_ENABLE_LINEAR :
+ LM3630A_LEDB_ENABLE;
+ } else {
+ if (!(led_sources & BIT(LM3630A_SINK_0)))
+ return -EINVAL;
+
+ pdata->leda_ctrl = linear ?
+ LM3630A_LEDA_ENABLE_LINEAR :
+ LM3630A_LEDA_ENABLE;
+
+ if (led_sources & BIT(LM3630A_SINK_1))
+ pdata->ledb_ctrl = LM3630A_LEDB_ON_A;
+ }
+
+ ret = fwnode_property_read_string(node, "label", &label);
+ if (!ret) {
+ if (bank)
+ pdata->ledb_label = label;
+ else
+ pdata->leda_label = label;
+ }
+
+ ret = fwnode_property_read_u32(node, "default-brightness",
+ &val);
+ if (!ret) {
+ if (bank)
+ pdata->ledb_init_brt = val;
+ else
+ pdata->leda_init_brt = val;
+ }
+
+ ret = fwnode_property_read_u32(node, "max-brightness", &val);
+ if (!ret) {
+ if (bank)
+ pdata->ledb_max_brt = val;
+ else
+ pdata->leda_max_brt = val;
+ }
+
+ return 0;
+}
+
+static int lm3630a_parse_node(struct lm3630a_chip *pchip,
+ struct lm3630a_platform_data *pdata)
+{
+ int ret = -ENODEV, seen_led_sources = 0;
+ struct fwnode_handle *node;
+
+ device_for_each_child_node(pchip->dev, node) {
+ ret = lm3630a_parse_bank(pdata, node, &seen_led_sources);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int lm3630a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -396,13 +524,18 @@ static int lm3630a_probe(struct i2c_client *client,
GFP_KERNEL);
if (pdata == NULL)
return -ENOMEM;
+
/* default values */
- pdata->leda_ctrl = LM3630A_LEDA_ENABLE;
- pdata->ledb_ctrl = LM3630A_LEDB_ENABLE;
pdata->leda_max_brt = LM3630A_MAX_BRIGHTNESS;
pdata->ledb_max_brt = LM3630A_MAX_BRIGHTNESS;
pdata->leda_init_brt = LM3630A_MAX_BRIGHTNESS;
pdata->ledb_init_brt = LM3630A_MAX_BRIGHTNESS;
+
+ rval = lm3630a_parse_node(pchip, pdata);
+ if (rval) {
+ dev_err(&client->dev, "fail : parse node\n");
+ return rval;
+ }
}
pchip->pdata = pdata;
@@ -470,11 +603,17 @@ static const struct i2c_device_id lm3630a_id[] = {
{}
};
+static const struct of_device_id lm3630a_match_table[] = {
+ { .compatible = "ti,lm3630a", },
+ { },
+};
+
MODULE_DEVICE_TABLE(i2c, lm3630a_id);
static struct i2c_driver lm3630a_i2c_driver = {
.driver = {
.name = LM3630A_NAME,
+ .of_match_table = lm3630a_match_table,
},
.probe = lm3630a_probe,
.remove = lm3630a_remove,
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 53b8ceea9bde..fb45f866b923 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -155,21 +155,6 @@ static const struct backlight_ops pwm_backlight_ops = {
#ifdef CONFIG_OF
#define PWM_LUMINANCE_SCALE 10000 /* luminance scale */
-/* An integer based power function */
-static u64 int_pow(u64 base, int exp)
-{
- u64 result = 1;
-
- while (exp) {
- if (exp & 1)
- result *= base;
- exp >>= 1;
- base *= base;
- }
-
- return result;
-}
-
/*
* CIE lightness to PWM conversion.
*
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 58a9590c9db6..bf6b77b964f1 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -45,25 +45,25 @@ menuconfig FB
device-aware may cause unexpected results. If unsure, say N.
config FIRMWARE_EDID
- bool "Enable firmware EDID"
- depends on FB
- ---help---
- This enables access to the EDID transferred from the firmware.
- On the i386, this is from the Video BIOS. Enable this if DDC/I2C
- transfers do not work for your driver and if you are using
- nvidiafb, i810fb or savagefb.
-
- In general, choosing Y for this option is safe. If you
- experience extremely long delays while booting before you get
- something on your display, try setting this to N. Matrox cards in
- combination with certain motherboards and monitors are known to
- suffer from this problem.
+ bool "Enable firmware EDID"
+ depends on FB
+ ---help---
+ This enables access to the EDID transferred from the firmware.
+ On the i386, this is from the Video BIOS. Enable this if DDC/I2C
+ transfers do not work for your driver and if you are using
+ nvidiafb, i810fb or savagefb.
+
+ In general, choosing Y for this option is safe. If you
+ experience extremely long delays while booting before you get
+ something on your display, try setting this to N. Matrox cards in
+ combination with certain motherboards and monitors are known to
+ suffer from this problem.
config FB_DDC
- tristate
- depends on FB
- select I2C_ALGOBIT
- select I2C
+ tristate
+ depends on FB
+ select I2C_ALGOBIT
+ select I2C
config FB_BOOT_VESA_SUPPORT
bool
@@ -160,8 +160,8 @@ config FB_LITTLE_ENDIAN
endchoice
config FB_SYS_FOPS
- tristate
- depends on FB
+ tristate
+ depends on FB
config FB_DEFERRED_IO
bool
@@ -180,41 +180,40 @@ config FB_SVGALIB
cards.
config FB_MACMODES
- tristate
- depends on FB
+ tristate
+ depends on FB
config FB_BACKLIGHT
tristate
depends on FB
- select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
config FB_MODE_HELPERS
- bool "Enable Video Mode Handling Helpers"
- depends on FB
+ bool "Enable Video Mode Handling Helpers"
+ depends on FB
---help---
This enables functions for handling video modes using the
Generalized Timing Formula and the EDID parser. A few drivers rely
- on this feature such as the radeonfb, rivafb, and the i810fb. If
+ on this feature such as the radeonfb, rivafb, and the i810fb. If
your driver does not take advantage of this feature, choosing Y will
just increase the kernel size by about 5K.
config FB_TILEBLITTING
- bool "Enable Tile Blitting Support"
- depends on FB
- ---help---
- This enables tile blitting. Tile blitting is a drawing technique
- where the screen is divided into rectangular sections (tiles), whereas
- the standard blitting divides the screen into pixels. Because the
- default drawing element is a tile, drawing functions will be passed
- parameters in terms of number of tiles instead of number of pixels.
- For example, to draw a single character, instead of using bitmaps,
- an index to an array of bitmaps will be used. To clear or move a
- rectangular section of a screen, the rectangle will be described in
- terms of number of tiles in the x- and y-axis.
-
- This is particularly important to one driver, matroxfb. If
- unsure, say N.
+ bool "Enable Tile Blitting Support"
+ depends on FB
+ ---help---
+ This enables tile blitting. Tile blitting is a drawing technique
+ where the screen is divided into rectangular sections (tiles), whereas
+ the standard blitting divides the screen into pixels. Because the
+ default drawing element is a tile, drawing functions will be passed
+ parameters in terms of number of tiles instead of number of pixels.
+ For example, to draw a single character, instead of using bitmaps,
+ an index to an array of bitmaps will be used. To clear or move a
+ rectangular section of a screen, the rectangle will be described in
+ terms of number of tiles in the x- and y-axis.
+
+ This is particularly important to one driver, matroxfb. If
+ unsure, say N.
comment "Frame buffer hardware drivers"
depends on FB
@@ -226,7 +225,7 @@ config FB_GRVGA
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
---help---
- This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
+ This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
config FB_CIRRUS
tristate "Cirrus Logic support"
@@ -281,7 +280,6 @@ config FB_ARMCLCD
select FB_CFB_IMAGEBLIT
select FB_MODE_HELPERS if OF
select VIDEOMODE_HELPERS if OF
- select BACKLIGHT_LCD_SUPPORT if OF
select BACKLIGHT_CLASS_DEVICE if OF
help
This framebuffer device driver is for the ARM PrimeCell PL110
@@ -293,14 +291,6 @@ config FB_ARMCLCD
here and read <file:Documentation/kbuild/modules.txt>. The module
will be called amba-clcd.
-# Helper logic selected only by the ARM Versatile platform family.
-config PLAT_VERSATILE_CLCD
- def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
- depends on ARM
- depends on FB_ARMCLCD && FB=y
- select REGMAP
- select MFD_SYSCON
-
config FB_ACORN
bool "Acorn VIDC support"
depends on (FB = y) && ARM && ARCH_ACORN
@@ -315,7 +305,6 @@ config FB_ACORN
config FB_CLPS711X
tristate "CLPS711X LCD support"
depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
- select BACKLIGHT_LCD_SUPPORT
select FB_MODE_HELPERS
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
@@ -343,7 +332,6 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
depends on FB && ARCH_MXC
- select BACKLIGHT_LCD_SUPPORT
select LCD_CLASS_DEVICE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -655,17 +643,17 @@ config FB_EFI
using the EFI framebuffer as your console.
config FB_N411
- tristate "N411 Apollo/Hecuba devkit support"
- depends on FB && X86 && MMU
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
- select FB_HECUBA
- help
- This enables support for the Apollo display controller in its
- Hecuba form using the n411 devkit.
+ tristate "N411 Apollo/Hecuba devkit support"
+ depends on FB && X86 && MMU
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select FB_DEFERRED_IO
+ select FB_HECUBA
+ help
+ This enables support for the Apollo display controller in its
+ Hecuba form using the n411 devkit.
config FB_HGA
tristate "Hercules mono graphics support"
@@ -685,7 +673,7 @@ config FB_GBE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- help
+ help
This is the frame buffer device driver for SGI Graphics Backend.
This chip is used in SGI O2 and Visual Workstation 320/540.
@@ -866,8 +854,8 @@ config FB_S1D13XXX
<http://vdc.epson.com/>
config FB_ATMEL
- tristate "AT91/AT32 LCD Controller support"
- depends on FB && HAVE_FB_ATMEL
+ tristate "AT91 LCD Controller support"
+ depends on FB && OF && HAVE_FB_ATMEL
select FB_BACKLIGHT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -875,7 +863,7 @@ config FB_ATMEL
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
- This enables support for the AT91/AT32 LCD Controller.
+ This enables support for the AT91 LCD Controller.
config FB_NVIDIA
tristate "nVidia Framebuffer Support"
@@ -897,10 +885,10 @@ config FB_NVIDIA
module will be called nvidiafb.
config FB_NVIDIA_I2C
- bool "Enable DDC Support"
- depends on FB_NVIDIA
- select FB_DDC
- help
+ bool "Enable DDC Support"
+ depends on FB_NVIDIA
+ select FB_DDC
+ help
This enables I2C support for nVidia Chipsets. This is used
only for getting EDID information from the attached display
allowing for robust video mode handling and switching.
@@ -943,10 +931,10 @@ config FB_RIVA
module will be called rivafb.
config FB_RIVA_I2C
- bool "Enable DDC Support"
- depends on FB_RIVA
- select FB_DDC
- help
+ bool "Enable DDC Support"
+ depends on FB_RIVA
+ select FB_DDC
+ help
This enables I2C support for nVidia Chipsets. This is used
only for getting EDID information from the attached display
allowing for robust video mode handling and switching.
@@ -991,37 +979,37 @@ config FB_I810
select FB_CFB_IMAGEBLIT
select VGASTATE
help
- This driver supports the on-board graphics built in to the Intel 810
- and 815 chipsets. Say Y if you have and plan to use such a board.
+ This driver supports the on-board graphics built in to the Intel 810
+ and 815 chipsets. Say Y if you have and plan to use such a board.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called i810fb.
- For more information, please read
+ For more information, please read
<file:Documentation/fb/intel810.txt>
config FB_I810_GTF
bool "use VESA Generalized Timing Formula"
depends on FB_I810
help
- If you say Y, then the VESA standard, Generalized Timing Formula
- or GTF, will be used to calculate the required video timing values
- per video mode. Since the GTF allows nondiscrete timings
- (nondiscrete being a range of values as opposed to discrete being a
- set of values), you'll be able to use any combination of horizontal
+ If you say Y, then the VESA standard, Generalized Timing Formula
+ or GTF, will be used to calculate the required video timing values
+ per video mode. Since the GTF allows nondiscrete timings
+ (nondiscrete being a range of values as opposed to discrete being a
+ set of values), you'll be able to use any combination of horizontal
and vertical resolutions, and vertical refresh rates without having
to specify your own timing parameters. This is especially useful
- to maximize the performance of an aging display, or if you just
- have a display with nonstandard dimensions. A VESA compliant
+ to maximize the performance of an aging display, or if you just
+ have a display with nonstandard dimensions. A VESA compliant
monitor is recommended, but can still work with non-compliant ones.
- If you need or want this, then select this option. The timings may
- not be compliant with Intel's recommended values. Use at your own
+ If you need or want this, then select this option. The timings may
+ not be compliant with Intel's recommended values. Use at your own
risk.
- If you say N, the driver will revert to discrete video timings
+ If you say N, the driver will revert to discrete video timings
using a set recommended by Intel in their documentation.
-
- If unsure, say N.
+
+ If unsure, say N.
config FB_I810_I2C
bool "Enable DDC Support"
@@ -1060,8 +1048,8 @@ config FB_INTEL
depends on !DRM_I915
help
This driver supports the on-board graphics built in to the Intel
- 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
- Say Y if you have and plan to use such a board.
+ 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
+ Say Y if you have and plan to use such a board.
To make FB_INTEL=Y work you need to say AGP_INTEL=y too.
@@ -1142,10 +1130,10 @@ config FB_MATROX_G
G450/G550 secondary head and digital output are supported without
additional modules.
- The driver starts in monitor mode. You must use the matroxset tool
- (available at <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to
- swap primary and secondary head outputs, or to change output mode.
- Secondary head driver always start in 640x480 resolution and you
+ The driver starts in monitor mode. You must use the matroxset tool
+ (available at <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to
+ swap primary and secondary head outputs, or to change output mode.
+ Secondary head driver always start in 640x480 resolution and you
must use fbset to change it.
Do not forget that second head supports only 16 and 32 bpp
@@ -1228,7 +1216,7 @@ config FB_RADEON_I2C
select FB_DDC
default y
help
- Say Y here if you want DDC/I2C support for your Radeon board.
+ Say Y here if you want DDC/I2C support for your Radeon board.
config FB_RADEON_BACKLIGHT
bool "Support for backlight control"
@@ -1357,10 +1345,10 @@ config FB_SAVAGE
will be called savagefb.
config FB_SAVAGE_I2C
- bool "Enable DDC2 Support"
- depends on FB_SAVAGE
- select FB_DDC
- help
+ bool "Enable DDC2 Support"
+ depends on FB_SAVAGE
+ select FB_DDC
+ help
This enables I2C support for S3 Savage Chipsets. This is used
only for getting EDID information from the attached display
allowing for robust video mode handling and switching.
@@ -1370,12 +1358,12 @@ config FB_SAVAGE_I2C
here.
config FB_SAVAGE_ACCEL
- bool "Enable Console Acceleration"
- depends on FB_SAVAGE
- help
- This option will compile in console acceleration support. If
- the resulting framebuffer console has bothersome glitches, then
- choose N here.
+ bool "Enable Console Acceleration"
+ depends on FB_SAVAGE
+ help
+ This option will compile in console acceleration support. If
+ the resulting framebuffer console has bothersome glitches, then
+ choose N here.
config FB_SIS
tristate "SiS/XGI display support"
@@ -1408,17 +1396,17 @@ config FB_SIS_315
as XGI V3XT, V5, V8 and Z7.
config FB_VIA
- tristate "VIA UniChrome (Pro) and Chrome9 display support"
- depends on FB && PCI && GPIOLIB && I2C && (X86 || COMPILE_TEST)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select I2C_ALGOBIT
- help
+ tristate "VIA UniChrome (Pro) and Chrome9 display support"
+ depends on FB && PCI && GPIOLIB && I2C && (X86 || COMPILE_TEST)
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select I2C_ALGOBIT
+ help
This is the frame buffer device driver for Graphics chips of VIA
UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
CN700/VN800,CX700/VX700,P4M890) and Chrome9 Family (K8M890,CN896
- /P4M900,VX800)
+ /P4M900,VX800)
Say Y if you have a VIA UniChrome graphics board.
To compile this driver as a module, choose M here: the
@@ -1455,7 +1443,7 @@ config FB_NEOMAGIC
select VGASTATE
help
This driver supports notebooks with NeoMagic PCI chips.
- Say Y if you have such a graphics card.
+ Say Y if you have such a graphics card.
To compile this driver as a module, choose M here: the
module will be called neofb.
@@ -1510,7 +1498,7 @@ config FB_VOODOO1
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
---help---
- Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
+ Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
Voodoo2 (cvg) based graphics card.
To compile this driver as a module, choose M here: the
@@ -1679,9 +1667,9 @@ config FB_HIT
config FB_PMAG_AA
tristate "PMAG-AA TURBOchannel framebuffer support"
depends on FB && TC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Support for the PMAG-AA TURBOchannel framebuffer card (1280x1024x1)
used mainly in the MIPS-based DECstation series.
@@ -1689,9 +1677,9 @@ config FB_PMAG_AA
config FB_PMAG_BA
tristate "PMAG-BA TURBOchannel framebuffer support"
depends on FB && TC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Support for the PMAG-BA TURBOchannel framebuffer card (1024x864x8)
used mainly in the MIPS-based DECstation series.
@@ -1699,9 +1687,9 @@ config FB_PMAG_BA
config FB_PMAGB_B
tristate "PMAGB-B TURBOchannel framebuffer support"
depends on FB && TC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Support for the PMAGB-B TURBOchannel framebuffer card used mainly
in the MIPS-based DECstation series. The card is currently only
@@ -1710,9 +1698,9 @@ config FB_PMAGB_B
config FB_MAXINE
bool "Maxine (Personal DECstation) onboard framebuffer support"
depends on (FB = y) && MACH_DECSTATION
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Support for the onboard framebuffer (1024x768x8) in the Personal
DECstation series (Personal DECstation 5000/20, /25, /33, /50,
@@ -1721,9 +1709,9 @@ config FB_MAXINE
config FB_G364
bool "G364 frame buffer support"
depends on (FB = y) && (MIPS_MAGNUM_4000 || OLIVETTI_M700)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
The G364 driver is the framebuffer used in MIPS Magnum 4000 and
Olivetti M700-10 systems.
@@ -1731,9 +1719,9 @@ config FB_G364
config FB_68328
bool "Motorola 68328 native frame buffer support"
depends on (FB = y) && (M68328 || M68EZ328 || M68VZ328)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Say Y here if you want to support the built-in frame buffer of
the Motorola 68328 CPU family.
@@ -1812,13 +1800,13 @@ config FB_MBX
Accelerator
config FB_MBX_DEBUG
- bool "Enable debugging info via debugfs"
- depends on FB_MBX && DEBUG_FS
- ---help---
- Enable this if you want debugging information using the debug
- filesystem (debugfs)
+ bool "Enable debugging info via debugfs"
+ depends on FB_MBX && DEBUG_FS
+ ---help---
+ Enable this if you want debugging information using the debug
+ filesystem (debugfs)
- If unsure, say N.
+ If unsure, say N.
config FB_FSL_DIU
tristate "Freescale DIU framebuffer support"
@@ -1834,9 +1822,9 @@ config FB_FSL_DIU
config FB_W100
tristate "W100 frame buffer support"
depends on FB && ARCH_PXA
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
---help---
Frame buffer driver for the w100 as found on the Sharp SL-Cxx series.
It can also drive the w3220 chip found on iPAQ hx4700.
@@ -1901,10 +1889,10 @@ config FB_S3C
Currently the support is only for the S3C6400 and S3C6410 SoCs.
config FB_S3C_DEBUG_REGWRITE
- bool "Debug register writes"
- depends on FB_S3C
- ---help---
- Show all register writes via pr_debug()
+ bool "Debug register writes"
+ depends on FB_S3C
+ ---help---
+ Show all register writes via pr_debug()
config FB_S3C2410
tristate "S3C2410 LCD framebuffer support"
@@ -1930,18 +1918,18 @@ config FB_S3C2410_DEBUG
through sysfs
config FB_NUC900
- tristate "NUC900 LCD framebuffer support"
- depends on FB && ARCH_W90X900
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- ---help---
- Frame buffer driver for the built-in LCD controller in the Nuvoton
- NUC900 processor
+ tristate "NUC900 LCD framebuffer support"
+ depends on FB && ARCH_W90X900
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ ---help---
+ Frame buffer driver for the built-in LCD controller in the Nuvoton
+ NUC900 processor
config GPM1040A0_320X240
- bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD"
- depends on FB_NUC900
+ bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD"
+ depends on FB_NUC900
config FB_SM501
tristate "Silicon Motion SM501 framebuffer support"
@@ -2183,7 +2171,7 @@ config FB_EP93XX
config FB_PRE_INIT_FB
bool "Don't reinitialize, use bootloader's GDC/Display configuration"
- depends on FB && FB_MB862XX_LIME
+ depends on FB && (FB_MB862XX_LIME || FB_MXS)
---help---
Select this option if display contents should be inherited as set by
the bootloader.
@@ -2192,7 +2180,6 @@ config FB_MX3
tristate "MX3 Framebuffer support"
depends on FB && MX3_IPU
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 846b0c9ea9db..655f2537cac1 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -76,8 +76,6 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
obj-$(CONFIG_FB_PVR2) += pvr2fb.o
obj-$(CONFIG_FB_VOODOO1) += sstfb.o
obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
-obj-$(CONFIG_ARCH_NOMADIK) += amba-clcd-nomadik.o
-obj-$(CONFIG_PLAT_VERSATILE_CLCD) += amba-clcd-versatile.o
obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
obj-$(CONFIG_FB_68328) += 68328fb.o
obj-$(CONFIG_FB_GBE) += gbefb.o
diff --git a/drivers/video/fbdev/amba-clcd-nomadik.c b/drivers/video/fbdev/amba-clcd-nomadik.c
deleted file mode 100644
index cd2db1113e67..000000000000
--- a/drivers/video/fbdev/amba-clcd-nomadik.c
+++ /dev/null
@@ -1,251 +0,0 @@
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-
-#include "amba-clcd-nomadik.h"
-
-static struct gpio_desc *grestb;
-static struct gpio_desc *scen;
-static struct gpio_desc *scl;
-static struct gpio_desc *sda;
-
-static u8 tpg110_readwrite_reg(bool write, u8 address, u8 outval)
-{
- int i;
- u8 inval = 0;
-
- /* Assert SCEN */
- gpiod_set_value_cansleep(scen, 1);
- ndelay(150);
- /* Hammer out the address */
- for (i = 5; i >= 0; i--) {
- if (address & BIT(i))
- gpiod_set_value_cansleep(sda, 1);
- else
- gpiod_set_value_cansleep(sda, 0);
- ndelay(150);
- /* Send an SCL pulse */
- gpiod_set_value_cansleep(scl, 1);
- ndelay(160);
- gpiod_set_value_cansleep(scl, 0);
- ndelay(160);
- }
-
- if (write) {
- /* WRITE */
- gpiod_set_value_cansleep(sda, 0);
- } else {
- /* READ */
- gpiod_set_value_cansleep(sda, 1);
- }
- ndelay(150);
- /* Send an SCL pulse */
- gpiod_set_value_cansleep(scl, 1);
- ndelay(160);
- gpiod_set_value_cansleep(scl, 0);
- ndelay(160);
-
- if (!write)
- /* HiZ turn-around cycle */
- gpiod_direction_input(sda);
- ndelay(150);
- /* Send an SCL pulse */
- gpiod_set_value_cansleep(scl, 1);
- ndelay(160);
- gpiod_set_value_cansleep(scl, 0);
- ndelay(160);
-
- /* Hammer in/out the data */
- for (i = 7; i >= 0; i--) {
- int value;
-
- if (write) {
- value = !!(outval & BIT(i));
- gpiod_set_value_cansleep(sda, value);
- } else {
- value = gpiod_get_value(sda);
- if (value)
- inval |= BIT(i);
- }
- ndelay(150);
- /* Send an SCL pulse */
- gpiod_set_value_cansleep(scl, 1);
- ndelay(160);
- gpiod_set_value_cansleep(scl, 0);
- ndelay(160);
- }
-
- gpiod_direction_output(sda, 0);
- /* Deassert SCEN */
- gpiod_set_value_cansleep(scen, 0);
- /* Satisfies SCEN pulse width */
- udelay(1);
-
- return inval;
-}
-
-static u8 tpg110_read_reg(u8 address)
-{
- return tpg110_readwrite_reg(false, address, 0);
-}
-
-static void tpg110_write_reg(u8 address, u8 outval)
-{
- tpg110_readwrite_reg(true, address, outval);
-}
-
-static void tpg110_startup(struct device *dev)
-{
- u8 val;
-
- dev_info(dev, "TPG110 display enable\n");
- /* De-assert the reset signal */
- gpiod_set_value_cansleep(grestb, 0);
- mdelay(1);
- dev_info(dev, "de-asserted GRESTB\n");
-
- /* Test display communication */
- tpg110_write_reg(0x00, 0x55);
- val = tpg110_read_reg(0x00);
- if (val == 0x55)
- dev_info(dev, "passed communication test\n");
- val = tpg110_read_reg(0x01);
- dev_info(dev, "TPG110 chip ID: %d version: %d\n",
- val>>4, val&0x0f);
-
- /* Show display resolution */
- val = tpg110_read_reg(0x02);
- val &= 7;
- switch (val) {
- case 0x0:
- dev_info(dev, "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)");
- break;
- case 0x1:
- dev_info(dev, "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)");
- break;
- case 0x4:
- dev_info(dev, "480x640 RGB");
- break;
- case 0x5:
- dev_info(dev, "480x272 RGB");
- break;
- case 0x6:
- dev_info(dev, "640x480 RGB");
- break;
- case 0x7:
- dev_info(dev, "800x480 RGB");
- break;
- default:
- dev_info(dev, "ILLEGAL RESOLUTION");
- break;
- }
-
- val = tpg110_read_reg(0x03);
- dev_info(dev, "resolution is controlled by %s\n",
- (val & BIT(7)) ? "software" : "hardware");
-}
-
-static void tpg110_enable(struct clcd_fb *fb)
-{
- struct device *dev = &fb->dev->dev;
- static bool startup;
- u8 val;
-
- if (!startup) {
- tpg110_startup(dev);
- startup = true;
- }
-
- /* Take chip out of standby */
- val = tpg110_read_reg(0x03);
- val |= BIT(0);
- tpg110_write_reg(0x03, val);
-}
-
-static void tpg110_disable(struct clcd_fb *fb)
-{
- u8 val;
-
- dev_info(&fb->dev->dev, "TPG110 display disable\n");
- val = tpg110_read_reg(0x03);
- /* Put into standby */
- val &= ~BIT(0);
- tpg110_write_reg(0x03, val);
-}
-
-static void tpg110_init(struct device *dev, struct device_node *np,
- struct clcd_board *board)
-{
- dev_info(dev, "TPG110 display init\n");
-
- /* This asserts the GRESTB signal, putting the display into reset */
- grestb = devm_fwnode_get_gpiod_from_child(dev, "grestb", &np->fwnode,
- GPIOD_OUT_HIGH, "grestb");
- if (IS_ERR(grestb)) {
- dev_err(dev, "no GRESTB GPIO\n");
- return;
- }
- scen = devm_fwnode_get_gpiod_from_child(dev, "scen", &np->fwnode,
- GPIOD_OUT_LOW, "scen");
- if (IS_ERR(scen)) {
- dev_err(dev, "no SCEN GPIO\n");
- return;
- }
- scl = devm_fwnode_get_gpiod_from_child(dev, "scl", &np->fwnode,
- GPIOD_OUT_LOW, "scl");
- if (IS_ERR(scl)) {
- dev_err(dev, "no SCL GPIO\n");
- return;
- }
- sda = devm_fwnode_get_gpiod_from_child(dev, "sda", &np->fwnode,
- GPIOD_OUT_LOW, "sda");
- if (IS_ERR(sda)) {
- dev_err(dev, "no SDA GPIO\n");
- return;
- }
- board->enable = tpg110_enable;
- board->disable = tpg110_disable;
-}
-
-int nomadik_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel)
-{
- if (of_device_is_compatible(panel, "tpo,tpg110"))
- tpg110_init(&fb->dev->dev, panel, fb->board);
- else
- dev_info(&fb->dev->dev, "unknown panel\n");
-
- /* Unknown panel, fall through */
- return 0;
-}
-EXPORT_SYMBOL_GPL(nomadik_clcd_init_panel);
-
-#define PMU_CTRL_OFFSET 0x0000
-#define PMU_CTRL_LCDNDIF BIT(26)
-
-int nomadik_clcd_init_board(struct amba_device *adev,
- struct clcd_board *board)
-{
- struct regmap *pmu_regmap;
-
- dev_info(&adev->dev, "Nomadik CLCD board init\n");
- pmu_regmap =
- syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
- if (IS_ERR(pmu_regmap)) {
- dev_err(&adev->dev, "could not find PMU syscon regmap\n");
- return PTR_ERR(pmu_regmap);
- }
- regmap_update_bits(pmu_regmap,
- PMU_CTRL_OFFSET,
- PMU_CTRL_LCDNDIF,
- 0);
- dev_info(&adev->dev, "set PMU mux to CLCD mode\n");
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(nomadik_clcd_init_board);
diff --git a/drivers/video/fbdev/amba-clcd-nomadik.h b/drivers/video/fbdev/amba-clcd-nomadik.h
deleted file mode 100644
index 462c31381fa1..000000000000
--- a/drivers/video/fbdev/amba-clcd-nomadik.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _AMBA_CLCD_NOMADIK_H
-#define _AMBA_CLCD_NOMADIK_H
-
-#include <linux/amba/bus.h>
-
-#ifdef CONFIG_ARCH_NOMADIK
-int nomadik_clcd_init_board(struct amba_device *adev,
- struct clcd_board *board);
-int nomadik_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel);
-#else
-static inline int nomadik_clcd_init_board(struct amba_device *adev,
- struct clcd_board *board)
-{
- return 0;
-}
-static inline int nomadik_clcd_init_panel(struct clcd_fb *fb,
- struct device_node *panel)
-{
- return 0;
-}
-#endif
-
-#endif /* inclusion guard */
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c
deleted file mode 100644
index d42047dc4e4e..000000000000
--- a/drivers/video/fbdev/amba-clcd-versatile.c
+++ /dev/null
@@ -1,567 +0,0 @@
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-#include <linux/platform_data/video-clcd-versatile.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-#include <linux/bitops.h>
-#include "amba-clcd-versatile.h"
-
-static struct clcd_panel vga = {
- .mode = {
- .name = "VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 39721,
- .left_margin = 40,
- .right_margin = 24,
- .upper_margin = 32,
- .lower_margin = 11,
- .hsync_len = 96,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888,
- .bpp = 16,
-};
-
-static struct clcd_panel xvga = {
- .mode = {
- .name = "XVGA",
- .refresh = 60,
- .xres = 1024,
- .yres = 768,
- .pixclock = 15748,
- .left_margin = 152,
- .right_margin = 48,
- .upper_margin = 23,
- .lower_margin = 3,
- .hsync_len = 104,
- .vsync_len = 4,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888,
- .bpp = 16,
-};
-
-/* Sanyo TM38QV67A02A - 3.8 inch QVGA (320x240) Color TFT */
-static struct clcd_panel sanyo_tm38qv67a02a = {
- .mode = {
- .name = "Sanyo TM38QV67A02A",
- .refresh = 116,
- .xres = 320,
- .yres = 240,
- .pixclock = 100000,
- .left_margin = 6,
- .right_margin = 6,
- .upper_margin = 5,
- .lower_margin = 5,
- .hsync_len = 6,
- .vsync_len = 6,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551,
- .bpp = 16,
-};
-
-static struct clcd_panel sanyo_2_5_in = {
- .mode = {
- .name = "Sanyo QVGA Portrait",
- .refresh = 116,
- .xres = 240,
- .yres = 320,
- .pixclock = 100000,
- .left_margin = 20,
- .right_margin = 10,
- .upper_margin = 2,
- .lower_margin = 2,
- .hsync_len = 10,
- .vsync_len = 2,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IVS | TIM2_IHS | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551,
- .bpp = 16,
-};
-
-/* Epson L2F50113T00 - 2.2 inch 176x220 Color TFT */
-static struct clcd_panel epson_l2f50113t00 = {
- .mode = {
- .name = "Epson L2F50113T00",
- .refresh = 390,
- .xres = 176,
- .yres = 220,
- .pixclock = 62500,
- .left_margin = 3,
- .right_margin = 2,
- .upper_margin = 1,
- .lower_margin = 0,
- .hsync_len = 3,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551,
- .bpp = 16,
-};
-
-static struct clcd_panel *panels[] = {
- &vga,
- &xvga,
- &sanyo_tm38qv67a02a,
- &sanyo_2_5_in,
- &epson_l2f50113t00,
-};
-
-struct clcd_panel *versatile_clcd_get_panel(const char *name)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(panels); i++)
- if (strcmp(panels[i]->mode.name, name) == 0)
- break;
-
- if (i < ARRAY_SIZE(panels))
- return panels[i];
-
- pr_err("CLCD: couldn't get parameters for panel %s\n", name);
-
- return NULL;
-}
-
-int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
-{
- dma_addr_t dma;
-
- fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, framesize, &dma,
- GFP_KERNEL);
- if (!fb->fb.screen_base) {
- pr_err("CLCD: unable to map framebuffer\n");
- return -ENOMEM;
- }
-
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = framesize;
-
- return 0;
-}
-
-int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
- fb->fb.fix.smem_start, fb->fb.fix.smem_len);
-}
-
-void versatile_clcd_remove_dma(struct clcd_fb *fb)
-{
- dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
- fb->fb.fix.smem_start);
-}
-
-#ifdef CONFIG_OF
-
-static struct regmap *versatile_syscon_map;
-static struct regmap *versatile_ib2_map;
-
-/*
- * We detect the different syscon types from the compatible strings.
- */
-enum versatile_clcd {
- INTEGRATOR_CLCD_CM,
- VERSATILE_CLCD,
- REALVIEW_CLCD_EB,
- REALVIEW_CLCD_PB1176,
- REALVIEW_CLCD_PB11MP,
- REALVIEW_CLCD_PBA8,
- REALVIEW_CLCD_PBX,
-};
-
-static const struct of_device_id versatile_clcd_of_match[] = {
- {
- .compatible = "arm,core-module-integrator",
- .data = (void *)INTEGRATOR_CLCD_CM,
- },
- {
- .compatible = "arm,versatile-sysreg",
- .data = (void *)VERSATILE_CLCD,
- },
- {
- .compatible = "arm,realview-eb-syscon",
- .data = (void *)REALVIEW_CLCD_EB,
- },
- {
- .compatible = "arm,realview-pb1176-syscon",
- .data = (void *)REALVIEW_CLCD_PB1176,
- },
- {
- .compatible = "arm,realview-pb11mp-syscon",
- .data = (void *)REALVIEW_CLCD_PB11MP,
- },
- {
- .compatible = "arm,realview-pba8-syscon",
- .data = (void *)REALVIEW_CLCD_PBA8,
- },
- {
- .compatible = "arm,realview-pbx-syscon",
- .data = (void *)REALVIEW_CLCD_PBX,
- },
- {},
-};
-
-/*
- * Core module CLCD control on the Integrator/CP, bits
- * 8 thru 19 of the CM_CONTROL register controls a bunch
- * of CLCD settings.
- */
-#define INTEGRATOR_HDR_CTRL_OFFSET 0x0C
-#define INTEGRATOR_CLCD_LCDBIASEN BIT(8)
-#define INTEGRATOR_CLCD_LCDBIASUP BIT(9)
-#define INTEGRATOR_CLCD_LCDBIASDN BIT(10)
-/* Bits 11,12,13 controls the LCD type */
-#define INTEGRATOR_CLCD_LCDMUX_MASK (BIT(11)|BIT(12)|BIT(13))
-#define INTEGRATOR_CLCD_LCDMUX_LCD24 BIT(11)
-#define INTEGRATOR_CLCD_LCDMUX_VGA565 BIT(12)
-#define INTEGRATOR_CLCD_LCDMUX_SHARP (BIT(11)|BIT(12))
-#define INTEGRATOR_CLCD_LCDMUX_VGA555 BIT(13)
-#define INTEGRATOR_CLCD_LCDMUX_VGA24 (BIT(11)|BIT(12)|BIT(13))
-#define INTEGRATOR_CLCD_LCD0_EN BIT(14)
-#define INTEGRATOR_CLCD_LCD1_EN BIT(15)
-/* R/L flip on Sharp */
-#define INTEGRATOR_CLCD_LCD_STATIC1 BIT(16)
-/* U/D flip on Sharp */
-#define INTEGRATOR_CLCD_LCD_STATIC2 BIT(17)
-/* No connection on Sharp */
-#define INTEGRATOR_CLCD_LCD_STATIC BIT(18)
-/* 0 = 24bit VGA, 1 = 18bit VGA */
-#define INTEGRATOR_CLCD_LCD_N24BITEN BIT(19)
-
-#define INTEGRATOR_CLCD_MASK (INTEGRATOR_CLCD_LCDBIASEN | \
- INTEGRATOR_CLCD_LCDBIASUP | \
- INTEGRATOR_CLCD_LCDBIASDN | \
- INTEGRATOR_CLCD_LCDMUX_MASK | \
- INTEGRATOR_CLCD_LCD0_EN | \
- INTEGRATOR_CLCD_LCD1_EN | \
- INTEGRATOR_CLCD_LCD_STATIC1 | \
- INTEGRATOR_CLCD_LCD_STATIC2 | \
- INTEGRATOR_CLCD_LCD_STATIC | \
- INTEGRATOR_CLCD_LCD_N24BITEN)
-
-static void integrator_clcd_enable(struct clcd_fb *fb)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val;
-
- dev_info(&fb->dev->dev, "enable Integrator CLCD connectors\n");
-
- /* FIXME: really needed? */
- val = INTEGRATOR_CLCD_LCD_STATIC1 | INTEGRATOR_CLCD_LCD_STATIC2 |
- INTEGRATOR_CLCD_LCD0_EN | INTEGRATOR_CLCD_LCD1_EN;
- if (var->bits_per_pixel <= 8 ||
- (var->bits_per_pixel == 16 && var->green.length == 5))
- /* Pseudocolor, RGB555, BGR555 */
- val |= INTEGRATOR_CLCD_LCDMUX_VGA555;
- else if (fb->fb.var.bits_per_pixel <= 16)
- /* truecolor RGB565 */
- val |= INTEGRATOR_CLCD_LCDMUX_VGA565;
- else
- val = 0; /* no idea for this, don't trust the docs */
-
- regmap_update_bits(versatile_syscon_map,
- INTEGRATOR_HDR_CTRL_OFFSET,
- INTEGRATOR_CLCD_MASK,
- val);
-}
-
-/*
- * This configuration register in the Versatile and RealView
- * family is uniformly present but appears more and more
- * unutilized starting with the RealView series.
- */
-#define SYS_CLCD 0x50
-#define SYS_CLCD_MODE_MASK (BIT(0)|BIT(1))
-#define SYS_CLCD_MODE_888 0
-#define SYS_CLCD_MODE_5551 BIT(0)
-#define SYS_CLCD_MODE_565_R_LSB BIT(1)
-#define SYS_CLCD_MODE_565_B_LSB (BIT(0)|BIT(1))
-#define SYS_CLCD_CONNECTOR_MASK (BIT(2)|BIT(3)|BIT(4)|BIT(5))
-#define SYS_CLCD_NLCDIOON BIT(2)
-#define SYS_CLCD_VDDPOSSWITCH BIT(3)
-#define SYS_CLCD_PWR3V5SWITCH BIT(4)
-#define SYS_CLCD_VDDNEGSWITCH BIT(5)
-#define SYS_CLCD_TSNSS BIT(6) /* touchscreen enable */
-#define SYS_CLCD_SSPEXP BIT(7) /* SSP expansion enable */
-
-/* The Versatile can detect the connected panel type */
-#define SYS_CLCD_CLCDID_MASK (BIT(8)|BIT(9)|BIT(10)|BIT(11)|BIT(12))
-#define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8)
-#define SYS_CLCD_ID_SHARP_8_4 (0x01 << 8)
-#define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8)
-#define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8)
-#define SYS_CLCD_ID_VGA (0x1f << 8)
-
-#define SYS_CLCD_TSNDAV BIT(13) /* data ready from TS */
-
-/* IB2 control register for the Versatile daughterboard */
-#define IB2_CTRL 0x00
-#define IB2_CTRL_LCD_SD BIT(1) /* 1 = shut down LCD */
-#define IB2_CTRL_LCD_BL_ON BIT(0)
-#define IB2_CTRL_LCD_MASK (BIT(0)|BIT(1))
-
-static void versatile_clcd_disable(struct clcd_fb *fb)
-{
- dev_info(&fb->dev->dev, "disable Versatile CLCD connectors\n");
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_CONNECTOR_MASK,
- 0);
-
- /* If we're on an IB2 daughterboard, turn off display */
- if (versatile_ib2_map) {
- dev_info(&fb->dev->dev, "disable IB2 display\n");
- regmap_update_bits(versatile_ib2_map,
- IB2_CTRL,
- IB2_CTRL_LCD_MASK,
- IB2_CTRL_LCD_SD);
- }
-}
-
-static void versatile_clcd_enable(struct clcd_fb *fb)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val = 0;
-
- dev_info(&fb->dev->dev, "enable Versatile CLCD connectors\n");
- switch (var->green.length) {
- case 5:
- val |= SYS_CLCD_MODE_5551;
- break;
- case 6:
- if (var->red.offset == 0)
- val |= SYS_CLCD_MODE_565_R_LSB;
- else
- val |= SYS_CLCD_MODE_565_B_LSB;
- break;
- case 8:
- val |= SYS_CLCD_MODE_888;
- break;
- }
-
- /* Set up the MUX */
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_MODE_MASK,
- val);
-
- /* Then enable the display */
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_CONNECTOR_MASK,
- SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
-
- /* If we're on an IB2 daughterboard, turn on display */
- if (versatile_ib2_map) {
- dev_info(&fb->dev->dev, "enable IB2 display\n");
- regmap_update_bits(versatile_ib2_map,
- IB2_CTRL,
- IB2_CTRL_LCD_MASK,
- IB2_CTRL_LCD_BL_ON);
- }
-}
-
-static void versatile_clcd_decode(struct clcd_fb *fb, struct clcd_regs *regs)
-{
- clcdfb_decode(fb, regs);
-
- /* Always clear BGR for RGB565: we do the routing externally */
- if (fb->fb.var.green.length == 6)
- regs->cntl &= ~CNTL_BGR;
-}
-
-static void realview_clcd_disable(struct clcd_fb *fb)
-{
- dev_info(&fb->dev->dev, "disable RealView CLCD connectors\n");
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_CONNECTOR_MASK,
- 0);
-}
-
-static void realview_clcd_enable(struct clcd_fb *fb)
-{
- dev_info(&fb->dev->dev, "enable RealView CLCD connectors\n");
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_CONNECTOR_MASK,
- SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
-}
-
-struct versatile_panel {
- u32 id;
- char *compatible;
- bool ib2;
-};
-
-static const struct versatile_panel versatile_panels[] = {
- {
- .id = SYS_CLCD_ID_VGA,
- .compatible = "VGA",
- },
- {
- .id = SYS_CLCD_ID_SANYO_3_8,
- .compatible = "sanyo,tm38qv67a02a",
- },
- {
- .id = SYS_CLCD_ID_SHARP_8_4,
- .compatible = "sharp,lq084v1dg21",
- },
- {
- .id = SYS_CLCD_ID_EPSON_2_2,
- .compatible = "epson,l2f50113t00",
- },
- {
- .id = SYS_CLCD_ID_SANYO_2_5,
- .compatible = "sanyo,alr252rgt",
- .ib2 = true,
- },
-};
-
-static void versatile_panel_probe(struct device *dev, struct device_node *panel)
-{
- struct versatile_panel const *vpanel = NULL;
- u32 val;
- int ret;
- int i;
-
- /*
- * The Versatile CLCD has a panel auto-detection mechanism.
- * We use this and look for the compatible panel in the
- * device tree.
- */
- ret = regmap_read(versatile_syscon_map, SYS_CLCD, &val);
- if (ret) {
- dev_err(dev, "cannot read CLCD syscon register\n");
- return;
- }
- val &= SYS_CLCD_CLCDID_MASK;
-
- /* First find corresponding panel information */
- for (i = 0; i < ARRAY_SIZE(versatile_panels); i++) {
- vpanel = &versatile_panels[i];
-
- if (val == vpanel->id) {
- dev_err(dev, "autodetected panel \"%s\"\n",
- vpanel->compatible);
- break;
- }
- }
- if (i == ARRAY_SIZE(versatile_panels)) {
- dev_err(dev, "could not auto-detect panel\n");
- return;
- }
-
- if (!of_device_is_compatible(panel, vpanel->compatible))
- dev_err(dev, "panel in DT is not compatible with the "
- "auto-detected panel, continuing anyway\n");
-
- /*
- * If we have a Sanyo 2.5" port
- * that we're running on an IB2 and proceed to look for the
- * IB2 syscon regmap.
- */
- if (!vpanel->ib2)
- return;
-
- versatile_ib2_map = syscon_regmap_lookup_by_compatible(
- "arm,versatile-ib2-syscon");
- if (IS_ERR(versatile_ib2_map)) {
- dev_err(dev, "could not locate IB2 control register\n");
- versatile_ib2_map = NULL;
- return;
- }
-}
-
-int versatile_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel)
-{
- const struct of_device_id *clcd_id;
- enum versatile_clcd versatile_clcd_type;
- struct device_node *np;
- struct regmap *map;
- struct device *dev = &fb->dev->dev;
-
- np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
- &clcd_id);
- if (!np) {
- /* Vexpress does not have this */
- return 0;
- }
- versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
-
- map = syscon_node_to_regmap(np);
- if (IS_ERR(map)) {
- dev_err(dev, "no Versatile syscon regmap\n");
- return PTR_ERR(map);
- }
-
- switch (versatile_clcd_type) {
- case INTEGRATOR_CLCD_CM:
- versatile_syscon_map = map;
- fb->board->enable = integrator_clcd_enable;
- /* Override the caps, we have only these */
- fb->board->caps = CLCD_CAP_5551 | CLCD_CAP_RGB565 |
- CLCD_CAP_888;
- dev_info(dev, "set up callbacks for Integrator PL110\n");
- break;
- case VERSATILE_CLCD:
- versatile_syscon_map = map;
- fb->board->enable = versatile_clcd_enable;
- fb->board->disable = versatile_clcd_disable;
- fb->board->decode = versatile_clcd_decode;
- versatile_panel_probe(dev, panel);
- dev_info(dev, "set up callbacks for Versatile\n");
- break;
- case REALVIEW_CLCD_EB:
- case REALVIEW_CLCD_PB1176:
- case REALVIEW_CLCD_PB11MP:
- case REALVIEW_CLCD_PBA8:
- case REALVIEW_CLCD_PBX:
- versatile_syscon_map = map;
- fb->board->enable = realview_clcd_enable;
- fb->board->disable = realview_clcd_disable;
- dev_info(dev, "set up callbacks for RealView PL111\n");
- break;
- default:
- dev_info(dev, "unknown Versatile system controller\n");
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(versatile_clcd_init_panel);
-#endif
diff --git a/drivers/video/fbdev/amba-clcd-versatile.h b/drivers/video/fbdev/amba-clcd-versatile.h
deleted file mode 100644
index b20baa47e6ad..000000000000
--- a/drivers/video/fbdev/amba-clcd-versatile.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Special local versatile callbacks
- */
-#include <linux/of.h>
-#include <linux/amba/bus.h>
-#include <linux/platform_data/video-clcd-versatile.h>
-
-#if defined(CONFIG_PLAT_VERSATILE_CLCD) && defined(CONFIG_OF)
-int versatile_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel);
-#else
-static inline int versatile_clcd_init_panel(struct clcd_fb *fb,
- struct device_node *panel)
-{
- return 0;
-}
-#endif
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 38c1f324ce15..89324e42a033 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -30,9 +30,6 @@
#include <video/of_display_timing.h>
#include <video/videomode.h>
-#include "amba-clcd-nomadik.h"
-#include "amba-clcd-versatile.h"
-
#define to_clcd(info) container_of(info, struct clcd_fb, fb)
/* This is limited to 16 characters when displayed by X startup */
@@ -223,15 +220,6 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
var->blue.length = 4;
}
break;
- case 24:
- if (fb->vendor->packed_24_bit_pixels) {
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- } else {
- ret = -EINVAL;
- }
- break;
case 32:
/* If we can't do 888, reject */
caps &= CLCD_CAP_888;
@@ -318,12 +306,6 @@ static int clcdfb_set_par(struct fb_info *info)
clcdfb_disable(fb);
- /* Some variants must be clocked here */
- if (fb->vendor->clock_timregs && !fb->clk_enabled) {
- fb->clk_enabled = true;
- clk_enable(fb->clk);
- }
-
writel(regs.tim0, fb->regs + CLCD_TIM0);
writel(regs.tim1, fb->regs + CLCD_TIM1);
writel(regs.tim2, fb->regs + CLCD_TIM2);
@@ -465,14 +447,8 @@ static int clcdfb_register(struct clcd_fb *fb)
fb->off_ienb = CLCD_PL111_IENB;
fb->off_cntl = CLCD_PL111_CNTL;
} else {
- if (of_machine_is_compatible("arm,versatile-ab") ||
- of_machine_is_compatible("arm,versatile-pb")) {
- fb->off_ienb = CLCD_PL111_IENB;
- fb->off_cntl = CLCD_PL111_CNTL;
- } else {
- fb->off_ienb = CLCD_PL110_IENB;
- fb->off_cntl = CLCD_PL110_CNTL;
- }
+ fb->off_ienb = CLCD_PL110_IENB;
+ fb->off_cntl = CLCD_PL110_CNTL;
}
fb->clk = clk_get(&fb->dev->dev, NULL);
@@ -713,42 +689,6 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
if (r0 != 0 && b0 == 0)
fb->panel->bgr_connection = true;
- if (fb->panel->caps && fb->vendor->st_bitmux_control) {
- /*
- * Set up the special bits for the Nomadik control register
- * (other platforms tend to do this through an external
- * register).
- */
-
- /* Offset of the highest used color */
- int maxoff = max3(r0, g0, b0);
- /* Most significant bit out, highest used bit */
- int msb = 0;
-
- if (fb->panel->caps & CLCD_CAP_888) {
- msb = maxoff + 8 - 1;
- } else if (fb->panel->caps & CLCD_CAP_565) {
- msb = maxoff + 5 - 1;
- fb->panel->cntl |= CNTL_ST_1XBPP_565;
- } else if (fb->panel->caps & CLCD_CAP_5551) {
- msb = maxoff + 5 - 1;
- fb->panel->cntl |= CNTL_ST_1XBPP_5551;
- } else if (fb->panel->caps & CLCD_CAP_444) {
- msb = maxoff + 4 - 1;
- fb->panel->cntl |= CNTL_ST_1XBPP_444;
- }
-
- /* Send out as many bits as we need */
- if (msb > 17)
- fb->panel->cntl |= CNTL_ST_CDWID_24;
- else if (msb > 15)
- fb->panel->cntl |= CNTL_ST_CDWID_18;
- else if (msb > 11)
- fb->panel->cntl |= CNTL_ST_CDWID_16;
- else
- fb->panel->cntl |= CNTL_ST_CDWID_12;
- }
-
return fb->panel->caps ? 0 : -EINVAL;
}
@@ -775,12 +715,6 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
if (!panel)
return -ENODEV;
- if (fb->vendor->init_panel) {
- err = fb->vendor->init_panel(fb, panel);
- if (err)
- return err;
- }
-
err = clcdfb_of_get_backlight(panel, fb->panel);
if (err)
return err;
@@ -941,7 +875,6 @@ static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev)
static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clcd_board *board = dev_get_platdata(&dev->dev);
- struct clcd_vendor_data *vendor = id->data;
struct clcd_fb *fb;
int ret;
@@ -951,12 +884,6 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
if (!board)
return -EINVAL;
- if (vendor->init_board) {
- ret = vendor->init_board(dev, board);
- if (ret)
- return ret;
- }
-
ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
if (ret)
goto out;
@@ -974,7 +901,6 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
}
fb->dev = dev;
- fb->vendor = vendor;
fb->board = board;
dev_info(&fb->dev->dev, "PL%03x designer %02x rev%u at 0x%08llx\n",
@@ -1021,30 +947,10 @@ static int clcdfb_remove(struct amba_device *dev)
return 0;
}
-static struct clcd_vendor_data vendor_arm = {
- /* Sets up the versatile board displays */
- .init_panel = versatile_clcd_init_panel,
-};
-
-static struct clcd_vendor_data vendor_nomadik = {
- .clock_timregs = true,
- .packed_24_bit_pixels = true,
- .st_bitmux_control = true,
- .init_board = nomadik_clcd_init_board,
- .init_panel = nomadik_clcd_init_panel,
-};
-
static const struct amba_id clcdfb_id_table[] = {
{
.id = 0x00041110,
.mask = 0x000ffffe,
- .data = &vendor_arm,
- },
- /* ST Electronics Nomadik variant */
- {
- .id = 0x00180110,
- .mask = 0x00fffffe,
- .data = &vendor_nomadik,
},
{ 0, 0 },
};
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index fcd2dd670a65..b986af2a8042 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -47,7 +47,6 @@
#define ATAFB_EXT
#define ATAFB_FALCON
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -55,6 +54,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
@@ -3073,28 +3073,22 @@ int __init atafb_setup(char *options)
return 0;
}
-int __init atafb_init(void)
+static int __init atafb_probe(struct platform_device *pdev)
{
int pad, detected_mode, error;
unsigned int defmode = 0;
unsigned long mem_req;
-
-#ifndef MODULE
char *option = NULL;
if (fb_get_options("atafb", &option))
return -ENODEV;
atafb_setup(option);
-#endif
- printk("atafb_init: start\n");
-
- if (!MACH_IS_ATARI)
- return -ENODEV;
+ dev_dbg(&pdev->dev, "%s: start\n", __func__);
do {
#ifdef ATAFB_EXT
if (external_addr) {
- printk("atafb_init: initializing external hw\n");
+ dev_dbg(&pdev->dev, "initializing external hw\n");
fbhw = &ext_switch;
atafb_ops.fb_setcolreg = &ext_setcolreg;
defmode = DEFMODE_EXT;
@@ -3103,7 +3097,7 @@ int __init atafb_init(void)
#endif
#ifdef ATAFB_TT
if (ATARIHW_PRESENT(TT_SHIFTER)) {
- printk("atafb_init: initializing TT hw\n");
+ dev_dbg(&pdev->dev, "initializing TT hw\n");
fbhw = &tt_switch;
atafb_ops.fb_setcolreg = &tt_setcolreg;
defmode = DEFMODE_TT;
@@ -3112,7 +3106,7 @@ int __init atafb_init(void)
#endif
#ifdef ATAFB_FALCON
if (ATARIHW_PRESENT(VIDEL_SHIFTER)) {
- printk("atafb_init: initializing Falcon hw\n");
+ dev_dbg(&pdev->dev, "initializing Falcon hw\n");
fbhw = &falcon_switch;
atafb_ops.fb_setcolreg = &falcon_setcolreg;
error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 0,
@@ -3127,7 +3121,7 @@ int __init atafb_init(void)
#ifdef ATAFB_STE
if (ATARIHW_PRESENT(STND_SHIFTER) ||
ATARIHW_PRESENT(EXTD_SHIFTER)) {
- printk("atafb_init: initializing ST/E hw\n");
+ dev_dbg(&pdev->dev, "initializing ST/E hw\n");
fbhw = &st_switch;
atafb_ops.fb_setcolreg = &stste_setcolreg;
defmode = DEFMODE_STE;
@@ -3135,7 +3129,8 @@ int __init atafb_init(void)
}
fbhw = &st_switch;
atafb_ops.fb_setcolreg = &stste_setcolreg;
- printk("Cannot determine video hardware; defaulting to ST(e)\n");
+ dev_warn(&pdev->dev,
+ "Cannot determine video hardware; defaulting to ST(e)\n");
#else /* ATAFB_STE */
/* no default driver included */
/* Nobody will ever see this message :-) */
@@ -3175,8 +3170,8 @@ int __init atafb_init(void)
kernel_set_cachemode(screen_base, screen_len,
IOMAP_WRITETHROUGH);
}
- printk("atafb: screen_base %p phys_screen_base %lx screen_len %d\n",
- screen_base, phys_screen_base, screen_len);
+ dev_info(&pdev->dev, "phys_screen_base %lx screen_len %d\n",
+ phys_screen_base, screen_len);
#ifdef ATAFB_EXT
} else {
/* Map the video memory (physical address given) to somewhere
@@ -3223,12 +3218,12 @@ int __init atafb_init(void)
fb_alloc_cmap(&(fb_info.cmap), 1 << fb_info.var.bits_per_pixel, 0);
- printk("Determined %dx%d, depth %d\n",
- fb_info.var.xres, fb_info.var.yres, fb_info.var.bits_per_pixel);
+ dev_info(&pdev->dev, "Determined %dx%d, depth %d\n", fb_info.var.xres,
+ fb_info.var.yres, fb_info.var.bits_per_pixel);
if ((fb_info.var.xres != fb_info.var.xres_virtual) ||
(fb_info.var.yres != fb_info.var.yres_virtual))
- printk(" virtual %dx%d\n", fb_info.var.xres_virtual,
- fb_info.var.yres_virtual);
+ dev_info(&pdev->dev, " virtual %dx%d\n",
+ fb_info.var.xres_virtual, fb_info.var.yres_virtual);
if (register_framebuffer(&fb_info) < 0) {
#ifdef ATAFB_EXT
@@ -3251,14 +3246,32 @@ int __init atafb_init(void)
return 0;
}
-module_init(atafb_init);
+static void atafb_shutdown(struct platform_device *pdev)
+{
+ /* Unblank before kexec */
+ if (fbhw->blank)
+ fbhw->blank(0);
+}
-#ifdef MODULE
-MODULE_LICENSE("GPL");
+static struct platform_driver atafb_driver = {
+ .shutdown = atafb_shutdown,
+ .driver = {
+ .name = "atafb",
+ },
+};
-int cleanup_module(void)
+static int __init atafb_init(void)
{
- unregister_framebuffer(&fb_info);
- return atafb_deinit();
+ struct platform_device *pdev;
+
+ if (!MACH_IS_ATARI)
+ return -ENODEV;
+
+ pdev = platform_device_register_simple("atafb", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return platform_driver_probe(&atafb_driver, atafb_probe);
}
-#endif /* MODULE */
+
+device_initcall(atafb_init);
diff --git a/drivers/video/fbdev/atafb_iplan2p2.c b/drivers/video/fbdev/atafb_iplan2p2.c
index 8cc9c50379d0..a1660c24bf36 100644
--- a/drivers/video/fbdev/atafb_iplan2p2.c
+++ b/drivers/video/fbdev/atafb_iplan2p2.c
@@ -10,7 +10,6 @@
* more details.
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
@@ -269,25 +268,3 @@ void atafb_iplan2p2_linefill(struct fb_info *info, u_long next_line,
if (width)
fill8_2col((u8 *)dest, fgcolor, bgcolor, *data);
}
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-#endif /* MODULE */
-
-
- /*
- * Visible symbols for modules
- */
-
-EXPORT_SYMBOL(atafb_iplan2p2_copyarea);
-EXPORT_SYMBOL(atafb_iplan2p2_fillrect);
-EXPORT_SYMBOL(atafb_iplan2p2_linefill);
diff --git a/drivers/video/fbdev/atafb_iplan2p4.c b/drivers/video/fbdev/atafb_iplan2p4.c
index bee0d89463f7..663d66582d79 100644
--- a/drivers/video/fbdev/atafb_iplan2p4.c
+++ b/drivers/video/fbdev/atafb_iplan2p4.c
@@ -10,7 +10,6 @@
* more details.
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
@@ -284,25 +283,3 @@ void atafb_iplan2p4_linefill(struct fb_info *info, u_long next_line,
if (width)
fill8_2col((u8 *)dest, fgcolor, bgcolor, *data);
}
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-#endif /* MODULE */
-
-
- /*
- * Visible symbols for modules
- */
-
-EXPORT_SYMBOL(atafb_iplan2p4_copyarea);
-EXPORT_SYMBOL(atafb_iplan2p4_fillrect);
-EXPORT_SYMBOL(atafb_iplan2p4_linefill);
diff --git a/drivers/video/fbdev/atafb_iplan2p8.c b/drivers/video/fbdev/atafb_iplan2p8.c
index 356fb52ce443..39a6cbbb6ca3 100644
--- a/drivers/video/fbdev/atafb_iplan2p8.c
+++ b/drivers/video/fbdev/atafb_iplan2p8.c
@@ -10,7 +10,6 @@
* more details.
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
@@ -321,25 +320,3 @@ void atafb_iplan2p8_linefill(struct fb_info *info, u_long next_line,
if (width)
fill8_2col((u8 *)dest, fgcolor, bgcolor, *data);
}
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-#endif /* MODULE */
-
-
- /*
- * Visible symbols for modules
- */
-
-EXPORT_SYMBOL(atafb_iplan2p8_copyarea);
-EXPORT_SYMBOL(atafb_iplan2p8_fillrect);
-EXPORT_SYMBOL(atafb_iplan2p8_linefill);
diff --git a/drivers/video/fbdev/atafb_mfb.c b/drivers/video/fbdev/atafb_mfb.c
index 6a352d62eecf..384fd3e4d3e1 100644
--- a/drivers/video/fbdev/atafb_mfb.c
+++ b/drivers/video/fbdev/atafb_mfb.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
@@ -88,25 +87,3 @@ void atafb_mfb_linefill(struct fb_info *info, u_long next_line,
*dest++ = *data++;
}
}
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-#endif /* MODULE */
-
-
- /*
- * Visible symbols for modules
- */
-
-EXPORT_SYMBOL(atafb_mfb_copyarea);
-EXPORT_SYMBOL(atafb_mfb_fillrect);
-EXPORT_SYMBOL(atafb_mfb_linefill);
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 4ed55e6bbb84..e67dfd94bf1d 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1,5 +1,5 @@
/*
- * Driver for AT91/AT32 LCD Controller
+ * Driver for AT91 LCD Controller
*
* Copyright (C) 2007 Atmel Corporation
*
@@ -99,86 +99,6 @@ static struct atmel_lcdfb_config at91sam9rl_config = {
.have_intensity_bit = true,
};
-static struct atmel_lcdfb_config at32ap_config = {
- .have_hozval = true,
-};
-
-static const struct platform_device_id atmel_lcdfb_devtypes[] = {
- {
- .name = "at91sam9261-lcdfb",
- .driver_data = (unsigned long)&at91sam9261_config,
- }, {
- .name = "at91sam9263-lcdfb",
- .driver_data = (unsigned long)&at91sam9263_config,
- }, {
- .name = "at91sam9g10-lcdfb",
- .driver_data = (unsigned long)&at91sam9g10_config,
- }, {
- .name = "at91sam9g45-lcdfb",
- .driver_data = (unsigned long)&at91sam9g45_config,
- }, {
- .name = "at91sam9g45es-lcdfb",
- .driver_data = (unsigned long)&at91sam9g45es_config,
- }, {
- .name = "at91sam9rl-lcdfb",
- .driver_data = (unsigned long)&at91sam9rl_config,
- }, {
- .name = "at32ap-lcdfb",
- .driver_data = (unsigned long)&at32ap_config,
- }, {
- /* terminator */
- }
-};
-MODULE_DEVICE_TABLE(platform, atmel_lcdfb_devtypes);
-
-static struct atmel_lcdfb_config *
-atmel_lcdfb_get_config(struct platform_device *pdev)
-{
- unsigned long data;
-
- data = platform_get_device_id(pdev)->driver_data;
-
- return (struct atmel_lcdfb_config *)data;
-}
-
-#if defined(CONFIG_ARCH_AT91)
-#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
- | FBINFO_PARTIAL_PAN_OK \
- | FBINFO_HWACCEL_YPAN)
-
-static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
- struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
-
-}
-#elif defined(CONFIG_AVR32)
-#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
- | FBINFO_PARTIAL_PAN_OK \
- | FBINFO_HWACCEL_XPAN \
- | FBINFO_HWACCEL_YPAN)
-
-static void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
- struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- u32 dma2dcfg;
- u32 pixeloff;
-
- pixeloff = (var->xoffset * info->var.bits_per_pixel) & 0x1f;
-
- dma2dcfg = (info->var.xres_virtual - info->var.xres)
- * info->var.bits_per_pixel / 8;
- dma2dcfg |= pixeloff << ATMEL_LCDC_PIXELOFF_OFFSET;
- lcdc_writel(sinfo, ATMEL_LCDC_DMA2DCFG, dma2dcfg);
-
- /* Update configuration */
- lcdc_writel(sinfo, ATMEL_LCDC_DMACON,
- lcdc_readl(sinfo, ATMEL_LCDC_DMACON)
- | ATMEL_LCDC_DMAUPDT);
-}
-#endif
-
static u32 contrast_ctr = ATMEL_LCDC_PS_DIV8
| ATMEL_LCDC_POL_POSITIVE
| ATMEL_LCDC_ENA_PWMENABLE;
@@ -404,8 +324,6 @@ static void atmel_lcdfb_update_dma(struct fb_info *info,
/* Set framebuffer DMA base address and pixel offset */
lcdc_writel(sinfo, ATMEL_LCDC_DMABADDR1, dma_addr);
-
- atmel_lcdfb_update_dma2d(sinfo, var, info);
}
static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
@@ -978,7 +896,6 @@ static void atmel_lcdfb_stop_clock(struct atmel_lcdfb_info *sinfo)
clk_disable_unprepare(sinfo->lcdc_clk);
}
-#ifdef CONFIG_OF
static const struct of_device_id atmel_lcdfb_dt_ids[] = {
{ .compatible = "atmel,at91sam9261-lcdc" , .data = &at91sam9261_config, },
{ .compatible = "atmel,at91sam9263-lcdc" , .data = &at91sam9263_config, },
@@ -986,7 +903,6 @@ static const struct of_device_id atmel_lcdfb_dt_ids[] = {
{ .compatible = "atmel,at91sam9g45-lcdc" , .data = &at91sam9g45_config, },
{ .compatible = "atmel,at91sam9g45es-lcdc" , .data = &at91sam9g45es_config, },
{ .compatible = "atmel,at91sam9rl-lcdc" , .data = &at91sam9rl_config, },
- { .compatible = "atmel,at32ap-lcdc" , .data = &at32ap_config, },
{ /* sentinel */ }
};
@@ -1122,19 +1038,12 @@ put_display_node:
of_node_put(display_np);
return ret;
}
-#else
-static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
-{
- return 0;
-}
-#endif
static int __init atmel_lcdfb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fb_info *info;
struct atmel_lcdfb_info *sinfo;
- struct atmel_lcdfb_pdata *pdata = NULL;
struct resource *regs = NULL;
struct resource *map = NULL;
struct fb_modelist *modelist;
@@ -1159,21 +1068,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
ret = atmel_lcdfb_of_init(sinfo);
if (ret)
goto free_info;
- } else if (dev_get_platdata(dev)) {
- struct fb_monspecs *monspecs;
- int i;
-
- pdata = dev_get_platdata(dev);
- monspecs = pdata->default_monspecs;
- sinfo->pdata = *pdata;
-
- for (i = 0; i < monspecs->modedb_len; i++)
- fb_add_videomode(&monspecs->modedb[i], &info->modelist);
-
- sinfo->config = atmel_lcdfb_get_config(pdev);
-
- info->var.bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
- memcpy(&info->monspecs, pdata->default_monspecs, sizeof(info->monspecs));
} else {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
@@ -1186,7 +1080,8 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
if (IS_ERR(sinfo->reg_lcd))
sinfo->reg_lcd = NULL;
- info->flags = ATMEL_LCDFB_FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
+ FBINFO_HWACCEL_YPAN;
info->pseudo_palette = sinfo->pseudo_palette;
info->fbops = &atmel_lcdfb_ops;
@@ -1357,12 +1252,10 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fb_info *info = dev_get_drvdata(dev);
struct atmel_lcdfb_info *sinfo;
- struct atmel_lcdfb_pdata *pdata;
if (!info || !info->par)
return 0;
sinfo = info->par;
- pdata = &sinfo->pdata;
cancel_work_sync(&sinfo->task);
exit_backlight(sinfo);
@@ -1435,7 +1328,6 @@ static struct platform_driver atmel_lcdfb_driver = {
.remove = __exit_p(atmel_lcdfb_remove),
.suspend = atmel_lcdfb_suspend,
.resume = atmel_lcdfb_resume,
- .id_table = atmel_lcdfb_devtypes,
.driver = {
.name = "atmel_lcdfb",
.of_match_table = of_match_ptr(atmel_lcdfb_dt_ids),
@@ -1444,6 +1336,6 @@ static struct platform_driver atmel_lcdfb_driver = {
module_platform_driver_probe(atmel_lcdfb_driver, atmel_lcdfb_probe);
-MODULE_DESCRIPTION("AT91/AT32 LCD Controller framebuffer driver");
+MODULE_DESCRIPTION("AT91 LCD Controller framebuffer driver");
MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 68a113594808..2811c4afde01 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -94,6 +94,8 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
int size = len * sizeof(u16);
int ret = -ENOMEM;
+ flags |= __GFP_NOWARN;
+
if (cmap->len != len) {
fb_dealloc_cmap(cmap);
if (!len)
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index cd059a801662..786f9aab55df 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1069,7 +1069,7 @@ static void fbcon_init(struct vc_data *vc, int init)
cap = info->flags;
- if (console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
+ if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
logo_shown = FBCON_LOGO_DONTSHOW;
if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 4721491e6c8c..d1949c92be98 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1882,14 +1882,35 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const
{
struct apertures_struct *ap;
bool primary = false;
- int err;
+ int err, idx, bar;
+ bool res_id_found = false;
+
+ for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ continue;
+ idx++;
+ }
- ap = alloc_apertures(1);
+ ap = alloc_apertures(idx);
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = pci_resource_start(pdev, res_id);
- ap->ranges[0].size = pci_resource_len(pdev, res_id);
+ for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ continue;
+ ap->ranges[idx].base = pci_resource_start(pdev, bar);
+ ap->ranges[idx].size = pci_resource_len(pdev, bar);
+ pci_info(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
+ (unsigned long)pci_resource_start(pdev, bar),
+ (unsigned long)pci_resource_end(pdev, bar));
+ idx++;
+ if (res_id == bar)
+ res_id_found = true;
+ }
+ if (!res_id_found)
+ pci_warn(pdev, "%s: passed res_id (%d) is not a memory bar\n",
+ __func__, res_id);
+
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW;
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 283d9307df21..ac049871704d 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -935,6 +935,9 @@ void fb_var_to_videomode(struct fb_videomode *mode,
if (var->vmode & FB_VMODE_DOUBLE)
vtotal *= 2;
+ if (!htotal || !vtotal)
+ return;
+
hfreq = pixclock/htotal;
mode->refresh = hfreq/vtotal;
}
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 43f2a4816860..ec62274b914b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1097,9 +1097,9 @@ static int fb_remove(struct platform_device *dev)
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
- dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
+ dma_free_coherent(par->dev, PALETTE_SIZE, par->v_palette_base,
par->p_palette_base);
- dma_free_coherent(NULL, par->vram_size, par->vram_virt,
+ dma_free_coherent(par->dev, par->vram_size, par->vram_virt,
par->vram_phys);
pm_runtime_put_sync(&dev->dev);
pm_runtime_disable(&dev->dev);
@@ -1425,7 +1425,7 @@ static int fb_probe(struct platform_device *device)
par->vram_size = roundup(par->vram_size/8, ulcm);
par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
- par->vram_virt = dma_alloc_coherent(NULL,
+ par->vram_virt = dma_alloc_coherent(par->dev,
par->vram_size,
&par->vram_phys,
GFP_KERNEL | GFP_DMA);
@@ -1446,7 +1446,7 @@ static int fb_probe(struct platform_device *device)
da8xx_fb_fix.line_length - 1;
/* allocate palette buffer */
- par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE,
+ par->v_palette_base = dma_alloc_coherent(par->dev, PALETTE_SIZE,
&par->p_palette_base,
GFP_KERNEL | GFP_DMA);
if (!par->v_palette_base) {
@@ -1532,11 +1532,12 @@ err_dealloc_cmap:
fb_dealloc_cmap(&da8xx_fb_info->cmap);
err_release_pl_mem:
- dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
+ dma_free_coherent(par->dev, PALETTE_SIZE, par->v_palette_base,
par->p_palette_base);
err_release_fb_mem:
- dma_free_coherent(NULL, par->vram_size, par->vram_virt, par->vram_phys);
+ dma_free_coherent(par->dev, par->vram_size, par->vram_virt,
+ par->vram_phys);
err_release_fb:
framebuffer_release(da8xx_fb_info);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ba906876cc45..9e529cc2b4ff 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -464,7 +464,8 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+ if (efi_enabled(EFI_BOOT) &&
+ !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
if ((efifb_fix.smem_start + efifb_fix.smem_len) >
(md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
diff --git a/drivers/video/fbdev/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c
index d9e816d53531..1bddcc20b2c0 100644
--- a/drivers/video/fbdev/fb-puv3.c
+++ b/drivers/video/fbdev/fb-puv3.c
@@ -20,7 +20,7 @@
#include <linux/console.h>
#include <linux/mm.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/pgtable.h>
#include <mach/hardware.h>
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 1a242b1338e9..3fcb33232ba3 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1162,9 +1162,9 @@ static int gbefb_probe(struct platform_device *p_dev)
}
gbe_revision = gbe->ctrlstat & 15;
- gbe_tiles.cpu =
- dma_alloc_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
- &gbe_tiles.dma, GFP_KERNEL);
+ gbe_tiles.cpu = dmam_alloc_coherent(&p_dev->dev,
+ GBE_TLB_SIZE * sizeof(uint16_t),
+ &gbe_tiles.dma, GFP_KERNEL);
if (!gbe_tiles.cpu) {
printk(KERN_ERR "gbefb: couldn't allocate tiles table\n");
ret = -ENOMEM;
@@ -1178,19 +1178,20 @@ static int gbefb_probe(struct platform_device *p_dev)
if (!gbe_mem) {
printk(KERN_ERR "gbefb: couldn't map framebuffer\n");
ret = -ENOMEM;
- goto out_tiles_free;
+ goto out_release_mem_region;
}
gbe_dma_addr = 0;
} else {
/* try to allocate memory with the classical allocator
* this has high chance to fail on low memory machines */
- gbe_mem = dma_alloc_wc(NULL, gbe_mem_size, &gbe_dma_addr,
- GFP_KERNEL);
+ gbe_mem = dmam_alloc_attrs(&p_dev->dev, gbe_mem_size,
+ &gbe_dma_addr, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
if (!gbe_mem) {
printk(KERN_ERR "gbefb: couldn't allocate framebuffer memory\n");
ret = -ENOMEM;
- goto out_tiles_free;
+ goto out_release_mem_region;
}
gbe_mem_phys = (unsigned long) gbe_dma_addr;
@@ -1237,11 +1238,6 @@ static int gbefb_probe(struct platform_device *p_dev)
out_gbe_unmap:
arch_phys_wc_del(par->wc_cookie);
- if (gbe_dma_addr)
- dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
-out_tiles_free:
- dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
- (void *)gbe_tiles.cpu, gbe_tiles.dma);
out_release_mem_region:
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
out_release_framebuffer:
@@ -1258,10 +1254,6 @@ static int gbefb_remove(struct platform_device* p_dev)
unregister_framebuffer(info);
gbe_turn_off();
arch_phys_wc_del(par->wc_cookie);
- if (gbe_dma_addr)
- dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
- dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
- (void *)gbe_tiles.cpu, gbe_tiles.dma);
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
gbefb_remove_sysfs(&p_dev->dev);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 463028543173..59e1cae57948 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -285,6 +285,8 @@ static int hga_card_detect(void)
hga_vram_len = 0x08000;
hga_vram = ioremap(0xb0000, hga_vram_len);
+ if (!hga_vram)
+ goto error;
if (request_region(0x3b0, 12, "hgafb"))
release_io_ports = 1;
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 4b9615e4ce74..35bba3c2036d 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1515,6 +1515,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
info->fix.smem_start = addr;
info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
0x400000 : 0x800000);
+ if (!info->screen_base) {
+ release_mem_region(addr, size);
+ framebuffer_release(info);
+ return -ENOMEM;
+ }
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
par->cmap_regs_phys = addr + 0x840000;
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index e707e617bf1c..8820a556014c 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -120,10 +120,7 @@ struct jet_cmap_regs {
#define PIXEL_TO_MM(a) (((a)*10)/28) /* width in mm at 72 dpi */
static struct fb_var_screeninfo macfb_defined = {
- .bits_per_pixel = 8,
.activate = FB_ACTIVATE_NOW,
- .width = -1,
- .height = -1,
.right_margin = 32,
.upper_margin = 16,
.lower_margin = 4,
@@ -139,7 +136,6 @@ static struct fb_fix_screeninfo macfb_fix = {
static void *slot_addr;
static struct fb_info fb_info;
static u32 pseudo_palette[16];
-static int inverse;
static int vidtest;
/*
@@ -152,7 +148,7 @@ static int dafb_setpalette(unsigned int regno, unsigned int red,
unsigned int green, unsigned int blue,
struct fb_info *info)
{
- static int lastreg = -1;
+ static int lastreg = -2;
unsigned long flags;
local_irq_save(flags);
@@ -201,9 +197,6 @@ static int v8_brazil_setpalette(unsigned int regno, unsigned int red,
unsigned int bpp = info->var.bits_per_pixel;
unsigned long flags;
- if (bpp > 8)
- return 1; /* failsafe */
-
local_irq_save(flags);
/* On these chips, the CLUT register numbers are spread out
@@ -234,9 +227,6 @@ static int rbv_setpalette(unsigned int regno, unsigned int red,
{
unsigned long flags;
- if (info->var.bits_per_pixel > 8)
- return 1; /* failsafe */
-
local_irq_save(flags);
/* From the VideoToolbox driver. Seems to be saying that
@@ -353,9 +343,6 @@ static int civic_setpalette(unsigned int regno, unsigned int red,
unsigned long flags;
int clut_status;
- if (info->var.bits_per_pixel > 8)
- return 1; /* failsafe */
-
local_irq_save(flags);
/* Set the register address */
@@ -532,7 +519,7 @@ static void __init macfb_setup(char *options)
continue;
if (!strcmp(this_opt, "inverse"))
- inverse = 1;
+ fb_invert_cmaps();
else
if (!strcmp(this_opt, "vidtest"))
vidtest = 1; /* enable experimental CLUT code */
@@ -688,17 +675,14 @@ static int __init macfb_init(void)
case NUBUS_DRHW_APPLE_MDC:
strcpy(macfb_fix.id, "Mac Disp. Card");
macfb_setpalette = mdc_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
case NUBUS_DRHW_APPLE_TFB:
strcpy(macfb_fix.id, "Toby");
macfb_setpalette = toby_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
case NUBUS_DRHW_APPLE_JET:
strcpy(macfb_fix.id, "Jet");
macfb_setpalette = jet_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
default:
strcpy(macfb_fix.id, "Generic NuBus");
@@ -731,7 +715,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "DAFB");
macfb_setpalette = dafb_setpalette;
dafb_cmap_regs = ioremap(DAFB_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -741,7 +724,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "V8");
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -755,7 +737,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "Brazil");
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -772,7 +753,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "Sonora");
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -785,7 +765,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "RBV");
macfb_setpalette = rbv_setpalette;
rbv_cmap_regs = ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -796,7 +775,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "Civic");
macfb_setpalette = civic_setpalette;
civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
@@ -810,7 +788,6 @@ static int __init macfb_init(void)
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs =
ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
}
break;
@@ -823,7 +800,6 @@ static int __init macfb_init(void)
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs =
ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
}
break;
@@ -892,7 +868,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "CSC");
macfb_setpalette = csc_setpalette;
csc_cmap_regs = ioremap(CSC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
default:
diff --git a/drivers/video/fbdev/mmp/Kconfig b/drivers/video/fbdev/mmp/Kconfig
index f56a7e2e8136..1b5e80c8a984 100644
--- a/drivers/video/fbdev/mmp/Kconfig
+++ b/drivers/video/fbdev/mmp/Kconfig
@@ -1,7 +1,7 @@
menuconfig MMP_DISP
- tristate "Marvell MMP Display Subsystem support"
- depends on CPU_PXA910 || CPU_MMP2
- help
+ tristate "Marvell MMP Display Subsystem support"
+ depends on CPU_PXA910 || CPU_MMP2
+ help
Marvell Display Subsystem support.
if MMP_DISP
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index 12c8bd1d24d5..1fdd1eb38fe0 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -181,6 +181,7 @@ struct mxsfb_info {
const struct mxsfb_devdata *devdata;
u32 sync;
struct regulator *reg_lcd;
+ int pre_init;
};
#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
@@ -419,6 +420,12 @@ static int mxsfb_set_par(struct fb_info *fb_info)
fb_info->fix.line_length = line_size;
+ if (host->pre_init) {
+ mxsfb_enable_controller(fb_info);
+ host->pre_init = 0;
+ return 0;
+ }
+
/*
* It seems, you can't re-program the controller if it is still running.
* This may lead into shifted pictures (FIFO issue?).
@@ -623,7 +630,6 @@ static int mxsfb_restore_mode(struct fb_info *fb_info,
struct fb_videomode *vmode)
{
struct mxsfb_info *host = fb_info->par;
- unsigned line_count;
unsigned period;
unsigned long pa, fbsize;
int bits_per_pixel, ofs, ret = 0;
@@ -710,7 +716,6 @@ static int mxsfb_restore_mode(struct fb_info *fb_info,
writel(fb_info->fix.smem_start, host->base + host->devdata->next_buf);
}
- line_count = fb_info->fix.smem_len / fb_info->fix.line_length;
fb_info->fix.ypanstep = 1;
clk_prepare_enable(host->clk);
@@ -931,6 +936,10 @@ static int mxsfb_probe(struct platform_device *pdev)
if (IS_ERR(host->reg_lcd))
host->reg_lcd = NULL;
+#if defined(CONFIG_FB_PRE_INIT_FB)
+ host->pre_init = 1;
+#endif
+
fb_info->pseudo_palette = devm_kcalloc(&pdev->dev, 16, sizeof(u32),
GFP_KERNEL);
if (!fb_info->pseudo_palette) {
@@ -963,6 +972,7 @@ static int mxsfb_probe(struct platform_device *pdev)
mxsfb_enable_controller(fb_info);
}
+ host->pre_init = 0;
dev_info(&pdev->dev, "initialized\n");
return 0;
diff --git a/drivers/video/fbdev/nuc900fb.c b/drivers/video/fbdev/nuc900fb.c
index 6680edae4696..44ea5380a546 100644
--- a/drivers/video/fbdev/nuc900fb.c
+++ b/drivers/video/fbdev/nuc900fb.c
@@ -455,7 +455,7 @@ static int nuc900fb_cpufreq_transition(struct notifier_block *nb,
struct fb_info *fbinfo;
long delta_f;
info = container_of(nb, struct nuc900fb_info, freq_transition);
- fbinfo = platform_get_drvdata(to_platform_device(info->dev));
+ fbinfo = dev_get_drvdata(info->dev);
delta_f = info->clk_rate - clk_get_rate(info->clk);
diff --git a/drivers/video/fbdev/omap/Kconfig b/drivers/video/fbdev/omap/Kconfig
index 29d250da8a3e..ca147936bb5c 100644
--- a/drivers/video/fbdev/omap/Kconfig
+++ b/drivers/video/fbdev/omap/Kconfig
@@ -6,7 +6,7 @@ config FB_OMAP
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
help
- Frame buffer driver for OMAP based boards.
+ Frame buffer driver for OMAP based boards.
config FB_OMAP_LCDC_EXTERNAL
bool "External LCD controller support"
@@ -49,13 +49,11 @@ config FB_OMAP_LCD_H3
H3 board.
config FB_OMAP_DMA_TUNE
- bool "Set DMA SDRAM access priority high"
- depends on FB_OMAP
- help
- On systems in which video memory is in system memory
- (SDRAM) this will speed up graphics DMA operations.
- If you have such a system and want to use rotation
- answer yes. Answer no if you have a dedicated video
- memory, or don't use any of the accelerated features.
-
-
+ bool "Set DMA SDRAM access priority high"
+ depends on FB_OMAP
+ help
+ On systems in which video memory is in system memory
+ (SDRAM) this will speed up graphics DMA operations.
+ If you have such a system and want to use rotation
+ answer yes. Answer no if you have a dedicated video
+ memory, or don't use any of the accelerated features.
diff --git a/drivers/video/fbdev/omap2/omapfb/Kconfig b/drivers/video/fbdev/omap2/omapfb/Kconfig
index 3bf154e676d1..0410e07bb29e 100644
--- a/drivers/video/fbdev/omap2/omapfb/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/Kconfig
@@ -2,23 +2,23 @@ config OMAP2_VRFB
bool
menuconfig FB_OMAP2
- tristate "OMAP2+ frame buffer support"
- depends on FB
- depends on DRM_OMAP = n
+ tristate "OMAP2+ frame buffer support"
+ depends on FB
+ depends on DRM_OMAP = n
depends on GPIOLIB
- select FB_OMAP2_DSS
+ select FB_OMAP2_DSS
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
Frame buffer driver for OMAP2+ based boards.
if FB_OMAP2
config FB_OMAP2_DEBUG_SUPPORT
- bool "Debug support for OMAP2+ FB"
+ bool "Debug support for OMAP2+ FB"
default y
depends on FB_OMAP2
help
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
index 08f12039dd02..3df8736cf8d8 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
@@ -1,5 +1,5 @@
menu "OMAPFB Panel and Encoder Drivers"
- depends on FB_OMAP2_DSS
+ depends on FB_OMAP2_DSS
config FB_OMAP2_ENCODER_OPA362
tristate "OPA362 external analog amplifier"
@@ -8,29 +8,29 @@ config FB_OMAP2_ENCODER_OPA362
through a GPIO.
config FB_OMAP2_ENCODER_TFP410
- tristate "TFP410 DPI to DVI Encoder"
+ tristate "TFP410 DPI to DVI Encoder"
help
Driver for TFP410 DPI to DVI encoder.
config FB_OMAP2_ENCODER_TPD12S015
- tristate "TPD12S015 HDMI ESD protection and level shifter"
+ tristate "TPD12S015 HDMI ESD protection and level shifter"
help
Driver for TPD12S015, which offers HDMI ESD protection and level
shifting.
config FB_OMAP2_CONNECTOR_DVI
- tristate "DVI Connector"
+ tristate "DVI Connector"
depends on I2C
help
Driver for a generic DVI connector.
config FB_OMAP2_CONNECTOR_HDMI
- tristate "HDMI Connector"
+ tristate "HDMI Connector"
help
Driver for a generic HDMI connector.
config FB_OMAP2_CONNECTOR_ANALOG_TV
- tristate "Analog TV Connector"
+ tristate "Analog TV Connector"
help
Driver for a generic analog TV connector.
@@ -58,29 +58,29 @@ config FB_OMAP2_PANEL_LGPHILIPS_LB035Q02
LCD Panel used on the Gumstix Overo Palo35
config FB_OMAP2_PANEL_SHARP_LS037V7DW01
- tristate "Sharp LS037V7DW01 LCD Panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- LCD Panel used in TI's SDP3430 and EVM boards
+ tristate "Sharp LS037V7DW01 LCD Panel"
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ LCD Panel used in TI's SDP3430 and EVM boards
config FB_OMAP2_PANEL_TPO_TD028TTEC1
- tristate "TPO TD028TTEC1 LCD Panel"
- depends on SPI
- help
- LCD panel used in Openmoko.
+ tristate "TPO TD028TTEC1 LCD Panel"
+ depends on SPI
+ help
+ LCD panel used in Openmoko.
config FB_OMAP2_PANEL_TPO_TD043MTEA1
- tristate "TPO TD043MTEA1 LCD Panel"
- depends on SPI
- help
- LCD Panel used in OMAP3 Pandora
+ tristate "TPO TD043MTEA1 LCD Panel"
+ depends on SPI
+ help
+ LCD Panel used in OMAP3 Pandora
config FB_OMAP2_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 Panel"
depends on SPI
depends on BACKLIGHT_CLASS_DEVICE
help
- This NEC NL8048HL11 panel is TFT LCD used in the
- Zoom2/3/3630 sdp boards.
+ This NEC NL8048HL11 panel is TFT LCD used in the
+ Zoom2/3/3630 sdp boards.
endmenu
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
index 356b89b378d4..a34820e8ab97 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
@@ -3,7 +3,7 @@ config FB_OMAP2_DSS_INIT
bool
config FB_OMAP2_DSS
- tristate
+ tristate
select VIDEOMODE_HELPERS
select FB_OMAP2_DSS_INIT
select HDMI
@@ -53,7 +53,7 @@ config FB_OMAP2_DSS_RFBI
config FB_OMAP2_DSS_VENC
bool "VENC support"
- default y
+ default y
help
OMAP Video Encoder support for S-Video and composite TV-out.
@@ -62,7 +62,7 @@ config FB_OMAP2_DSS_HDMI_COMMON
config FB_OMAP4_DSS_HDMI
bool "HDMI support for OMAP4"
- default y
+ default y
select FB_OMAP2_DSS_HDMI_COMMON
help
HDMI support for OMAP4 based SoCs.
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
index 136d30484d02..5da7ed6d653e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
@@ -111,6 +111,8 @@ static void __init omapdss_omapify_node(struct device_node *node)
new_len = prop->length + strlen(prefix) * num_strs;
new_compat = kmalloc(new_len, GFP_KERNEL);
+ if (!new_compat)
+ return;
omapdss_prefix_strcpy(new_compat, new_len, prop->value, prop->length);
@@ -193,8 +195,10 @@ static int __init omapdss_boot_init(void)
dss = of_find_matching_node(NULL, omapdss_of_match);
- if (dss == NULL || !of_device_is_available(dss))
+ if (dss == NULL || !of_device_is_available(dss)) {
+ of_node_put(dss);
return 0;
+ }
omapdss_walk_device(dss, true);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 8a53d1de611d..4e4d6a0df978 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,7 +686,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_fast((unsigned long)buf, nr_pages, true, pages);
+ ret = get_user_pages_fast((unsigned long)buf, nr_pages, FOLL_WRITE, pages);
if (ret < nr_pages) {
nr_pages = ret;
ret = -EINVAL;
@@ -1071,7 +1071,6 @@ static struct pvr2_board {
static int __init pvr2fb_init(void)
{
int i, ret = -ENODEV;
- int size;
#ifndef MODULE
char *option = NULL;
@@ -1080,7 +1079,6 @@ static int __init pvr2fb_init(void)
return -ENODEV;
pvr2fb_setup(option);
#endif
- size = sizeof(struct fb_info) + sizeof(struct pvr2fb_par) + 16 * sizeof(u32);
fb_info = framebuffer_alloc(sizeof(struct pvr2fb_par), NULL);
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 69cfb337c857..047a2fa4b87e 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -96,6 +96,7 @@ struct pxa3xx_gcu_batch {
};
struct pxa3xx_gcu_priv {
+ struct device *dev;
void __iomem *mmio_base;
struct clk *clk;
struct pxa3xx_gcu_shared *shared;
@@ -493,7 +494,7 @@ pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
if (size != SHARED_SIZE)
return -EINVAL;
- return dma_mmap_coherent(NULL, vma,
+ return dma_mmap_coherent(priv->dev, vma,
priv->shared, priv->shared_phys, size);
case SHARED_SIZE >> PAGE_SHIFT:
@@ -670,6 +671,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->resource_mem = r;
+ priv->dev = dev;
pxa3xx_gcu_reset(priv);
pxa3xx_gcu_init_debug_timer(priv);
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index a67e4567e656..a702da89910b 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -777,7 +777,7 @@ static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
long delta_f;
info = container_of(nb, struct s3c2410fb_info, freq_transition);
- fbinfo = platform_get_drvdata(to_platform_device(info->dev));
+ fbinfo = dev_get_drvdata(info->dev);
/* work out change, <0 for speed-up */
delta_f = info->clk_rate - clk_get_rate(info->clk);
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index c09d7426cd92..47b78f0138c3 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -2155,9 +2155,9 @@ static int savage_init_fb_info(struct fb_info *info, struct pci_dev *dev,
err = fb_alloc_cmap(&info->cmap, NR_PALETTE, 0);
if (!err)
- info->flags |= FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT |
- FBINFO_HWACCEL_IMAGEBLIT;
+ info->flags |= FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
}
#endif
return err;
diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
index aad1cc4be34a..c7ebf03b8d53 100644
--- a/drivers/video/fbdev/sm712.h
+++ b/drivers/video/fbdev/sm712.h
@@ -15,14 +15,10 @@
#define FB_ACCEL_SMI_LYNX 88
-#define SCREEN_X_RES 1024
-#define SCREEN_Y_RES 600
-#define SCREEN_BPP 16
-
-/*Assume SM712 graphics chip has 4MB VRAM */
-#define SM712_VIDEOMEMORYSIZE 0x00400000
-/*Assume SM722 graphics chip has 8MB VRAM */
-#define SM722_VIDEOMEMORYSIZE 0x00800000
+#define SCREEN_X_RES 1024
+#define SCREEN_Y_RES_PC 768
+#define SCREEN_Y_RES_NETBOOK 600
+#define SCREEN_BPP 16
#define dac_reg (0x3c8)
#define dac_val (0x3c9)
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 502d0de2feec..f1dcc6766d1e 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -530,6 +530,65 @@ static const struct modeinit vgamode[] = {
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
},
},
+ { /* 1024 x 768 16Bpp 60Hz */
+ 1024, 768, 16, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
{ /* mode#5: 1024 x 768 24Bpp 60Hz */
1024, 768, 24, 60,
/* Init_MISC */
@@ -827,67 +886,80 @@ static inline unsigned int chan_to_field(unsigned int chan,
static int smtc_blank(int blank_mode, struct fb_info *info)
{
+ struct smtcfb_info *sfb = info->par;
+
/* clear DPMS setting */
switch (blank_mode) {
case FB_BLANK_UNBLANK:
/* Screen On: HSync: On, VSync : On */
+
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ break;
+ case 0x720:
+ smtc_seqw(0x6a, 0x0d);
+ smtc_seqw(0x6b, 0x02);
+ break;
+ }
+
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
break;
case FB_BLANK_NORMAL:
/* Screen Off: HSync: On, VSync : On Soft blank */
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
smtc_seqw(0x6a, 0x16);
smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
break;
case FB_BLANK_VSYNC_SUSPEND:
/* Screen On: HSync: On, VSync : Off */
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
break;
case FB_BLANK_HSYNC_SUSPEND:
/* Screen On: HSync: Off, VSync : On */
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
break;
case FB_BLANK_POWERDOWN:
/* Screen On: HSync: Off, VSync : Off */
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
break;
default:
return -EINVAL;
@@ -1145,8 +1217,10 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
/* init SEQ register SR30 - SR75 */
for (i = 0; i < SIZE_SR30_SR75; i++)
- if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
- (i + 0x30) != 0x6b)
+ if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 &&
+ (i + 0x30) != 0x6a && (i + 0x30) != 0x6b &&
+ (i + 0x30) != 0x70 && (i + 0x30) != 0x71 &&
+ (i + 0x30) != 0x74 && (i + 0x30) != 0x75)
smtc_seqw(i + 0x30,
vgamode[j].init_sr30_sr75[i]);
@@ -1171,8 +1245,12 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
/* init CRTC register CR30 - CR4D */
- for (i = 0; i < SIZE_CR30_CR4D; i++)
+ for (i = 0; i < SIZE_CR30_CR4D; i++) {
+ if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F)
+ /* side-effect, don't write to CR3B-CR3F */
+ continue;
smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
+ }
/* init CRTC register CR90 - CRA7 */
for (i = 0; i < SIZE_CR90_CRA7; i++)
@@ -1323,6 +1401,11 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
{
sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
+ if (sfb->chip_id == 0x720)
+ /* on SM720, the framebuffer starts at the 1 MB offset */
+ sfb->fb->fix.smem_start += 0x00200000;
+
+ /* XXX: is it safe for SM720 on Big-Endian? */
if (sfb->fb->var.bits_per_pixel == 32)
sfb->fb->fix.smem_start += big_addr;
@@ -1360,12 +1443,82 @@ static inline void sm7xx_init_hw(void)
outb_p(0x11, 0x3c5);
}
+static u_long sm7xx_vram_probe(struct smtcfb_info *sfb)
+{
+ u8 vram;
+
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ /*
+ * Assume SM712 graphics chip has 4MB VRAM.
+ *
+ * FIXME: SM712 can have 2MB VRAM, which is used on earlier
+ * laptops, such as IBM Thinkpad 240X. This driver would
+ * probably crash on those machines. If anyone gets one of
+ * those and is willing to help, run "git blame" and send me
+ * an E-mail.
+ */
+ return 0x00400000;
+ case 0x720:
+ outb_p(0x76, 0x3c4);
+ vram = inb_p(0x3c5) >> 6;
+
+ if (vram == 0x00)
+ return 0x00800000; /* 8 MB */
+ else if (vram == 0x01)
+ return 0x01000000; /* 16 MB */
+ else if (vram == 0x02)
+ return 0x00400000; /* illegal, fallback to 4 MB */
+ else if (vram == 0x03)
+ return 0x00400000; /* 4 MB */
+ }
+ return 0; /* unknown hardware */
+}
+
+static void sm7xx_resolution_probe(struct smtcfb_info *sfb)
+{
+ /* get mode parameter from smtc_scr_info */
+ if (smtc_scr_info.lfb_width != 0) {
+ sfb->fb->var.xres = smtc_scr_info.lfb_width;
+ sfb->fb->var.yres = smtc_scr_info.lfb_height;
+ sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
+ goto final;
+ }
+
+ /*
+ * No parameter, default resolution is 1024x768-16.
+ *
+ * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600
+ * panel, also see the comments about Thinkpad 240X above.
+ */
+ sfb->fb->var.xres = SCREEN_X_RES;
+ sfb->fb->var.yres = SCREEN_Y_RES_PC;
+ sfb->fb->var.bits_per_pixel = SCREEN_BPP;
+
+#ifdef CONFIG_MIPS
+ /*
+ * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original
+ * target platform of this driver, but nearly all old x86 laptops have
+ * 1024x768. Lighting 768 panels using 600's timings would partially
+ * garble the display, so we don't want that. But it's not possible to
+ * distinguish them reliably.
+ *
+ * So we change the default to 768, but keep 600 as-is on MIPS.
+ */
+ sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK;
+#endif
+
+final:
+ big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
+}
+
static int smtcfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct smtcfb_info *sfb;
struct fb_info *info;
- u_long smem_size = 0x00800000; /* default 8MB */
+ u_long smem_size;
int err;
unsigned long mmio_base;
@@ -1405,29 +1558,19 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
sm7xx_init_hw();
- /* get mode parameter from smtc_scr_info */
- if (smtc_scr_info.lfb_width != 0) {
- sfb->fb->var.xres = smtc_scr_info.lfb_width;
- sfb->fb->var.yres = smtc_scr_info.lfb_height;
- sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
- } else {
- /* default resolution 1024x600 16bit mode */
- sfb->fb->var.xres = SCREEN_X_RES;
- sfb->fb->var.yres = SCREEN_Y_RES;
- sfb->fb->var.bits_per_pixel = SCREEN_BPP;
- }
-
- big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
/* Map address and memory detection */
mmio_base = pci_resource_start(pdev, 0);
pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
+ smem_size = sm7xx_vram_probe(sfb);
+ dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n",
+ smem_size / 1048576);
+
switch (sfb->chip_id) {
case 0x710:
case 0x712:
sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
sfb->fb->fix.mmio_len = 0x00400000;
- smem_size = SM712_VIDEOMEMORYSIZE;
sfb->lfb = ioremap(mmio_base, mmio_addr);
if (!sfb->lfb) {
dev_err(&pdev->dev,
@@ -1459,8 +1602,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
case 0x720:
sfb->fb->fix.mmio_start = mmio_base;
sfb->fb->fix.mmio_len = 0x00200000;
- smem_size = SM722_VIDEOMEMORYSIZE;
- sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
+ sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
sfb->lfb = sfb->dp_regs + 0x00200000;
sfb->mmio = (smtc_regbaseaddress =
sfb->dp_regs + 0x000c0000);
@@ -1477,6 +1619,9 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
goto failed_fb;
}
+ /* probe and decide resolution */
+ sm7xx_resolution_probe(sfb);
+
/* can support 32 bpp */
if (sfb->fb->var.bits_per_pixel == 15)
sfb->fb->var.bits_per_pixel = 16;
@@ -1487,7 +1632,11 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
if (err)
goto failed;
- smtcfb_setmode(sfb);
+ /*
+ * The screen would be temporarily garbled when sm712fb takes over
+ * vesafb or VGA text mode. Zero the framebuffer.
+ */
+ memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len);
err = register_framebuffer(info);
if (err < 0)
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 1d034dddc556..5a0d6fb02bbc 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -594,8 +594,7 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
return 0;
}
-static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
- int width, int height, char *data)
+static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
{
int i, ret;
char *cmd;
@@ -607,21 +606,29 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
start_cycles = get_cycles();
+ mutex_lock(&dlfb->render_mutex);
+
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
x = aligned_x;
if ((width <= 0) ||
(x + width > dlfb->info->var.xres) ||
- (y + height > dlfb->info->var.yres))
- return -EINVAL;
+ (y + height > dlfb->info->var.yres)) {
+ ret = -EINVAL;
+ goto unlock_ret;
+ }
- if (!atomic_read(&dlfb->usb_active))
- return 0;
+ if (!atomic_read(&dlfb->usb_active)) {
+ ret = 0;
+ goto unlock_ret;
+ }
urb = dlfb_get_urb(dlfb);
- if (!urb)
- return 0;
+ if (!urb) {
+ ret = 0;
+ goto unlock_ret;
+ }
cmd = urb->transfer_buffer;
for (i = y; i < y + height ; i++) {
@@ -641,7 +648,7 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
*cmd++ = 0xAF;
/* Send partial buffer remaining before exiting */
len = cmd - (char *) urb->transfer_buffer;
- ret = dlfb_submit_urb(dlfb, urb, len);
+ dlfb_submit_urb(dlfb, urb, len);
bytes_sent += len;
} else
dlfb_urb_completion(urb);
@@ -655,7 +662,55 @@ error:
>> 10)), /* Kcycles */
&dlfb->cpu_kcycles_used);
- return 0;
+ ret = 0;
+
+unlock_ret:
+ mutex_unlock(&dlfb->render_mutex);
+ return ret;
+}
+
+static void dlfb_init_damage(struct dlfb_data *dlfb)
+{
+ dlfb->damage_x = INT_MAX;
+ dlfb->damage_x2 = 0;
+ dlfb->damage_y = INT_MAX;
+ dlfb->damage_y2 = 0;
+}
+
+static void dlfb_damage_work(struct work_struct *w)
+{
+ struct dlfb_data *dlfb = container_of(w, struct dlfb_data, damage_work);
+ int x, x2, y, y2;
+
+ spin_lock_irq(&dlfb->damage_lock);
+ x = dlfb->damage_x;
+ x2 = dlfb->damage_x2;
+ y = dlfb->damage_y;
+ y2 = dlfb->damage_y2;
+ dlfb_init_damage(dlfb);
+ spin_unlock_irq(&dlfb->damage_lock);
+
+ if (x < x2 && y < y2)
+ dlfb_handle_damage(dlfb, x, y, x2 - x, y2 - y);
+}
+
+static void dlfb_offload_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
+{
+ unsigned long flags;
+ int x2 = x + width;
+ int y2 = y + height;
+
+ if (x >= x2 || y >= y2)
+ return;
+
+ spin_lock_irqsave(&dlfb->damage_lock, flags);
+ dlfb->damage_x = min(x, dlfb->damage_x);
+ dlfb->damage_x2 = max(x2, dlfb->damage_x2);
+ dlfb->damage_y = min(y, dlfb->damage_y);
+ dlfb->damage_y2 = max(y2, dlfb->damage_y2);
+ spin_unlock_irqrestore(&dlfb->damage_lock, flags);
+
+ schedule_work(&dlfb->damage_work);
}
/*
@@ -679,7 +734,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
(u32)info->var.yres);
dlfb_handle_damage(dlfb, 0, start, info->var.xres,
- lines, info->screen_base);
+ lines);
}
return result;
@@ -694,8 +749,8 @@ static void dlfb_ops_copyarea(struct fb_info *info,
sys_copyarea(info, area);
- dlfb_handle_damage(dlfb, area->dx, area->dy,
- area->width, area->height, info->screen_base);
+ dlfb_offload_damage(dlfb, area->dx, area->dy,
+ area->width, area->height);
}
static void dlfb_ops_imageblit(struct fb_info *info,
@@ -705,8 +760,8 @@ static void dlfb_ops_imageblit(struct fb_info *info,
sys_imageblit(info, image);
- dlfb_handle_damage(dlfb, image->dx, image->dy,
- image->width, image->height, info->screen_base);
+ dlfb_offload_damage(dlfb, image->dx, image->dy,
+ image->width, image->height);
}
static void dlfb_ops_fillrect(struct fb_info *info,
@@ -716,8 +771,8 @@ static void dlfb_ops_fillrect(struct fb_info *info,
sys_fillrect(info, rect);
- dlfb_handle_damage(dlfb, rect->dx, rect->dy, rect->width,
- rect->height, info->screen_base);
+ dlfb_offload_damage(dlfb, rect->dx, rect->dy, rect->width,
+ rect->height);
}
/*
@@ -739,17 +794,19 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
int bytes_identical = 0;
int bytes_rendered = 0;
+ mutex_lock(&dlfb->render_mutex);
+
if (!fb_defio)
- return;
+ goto unlock_ret;
if (!atomic_read(&dlfb->usb_active))
- return;
+ goto unlock_ret;
start_cycles = get_cycles();
urb = dlfb_get_urb(dlfb);
if (!urb)
- return;
+ goto unlock_ret;
cmd = urb->transfer_buffer;
@@ -782,6 +839,8 @@ error:
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&dlfb->cpu_kcycles_used);
+unlock_ret:
+ mutex_unlock(&dlfb->render_mutex);
}
static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len)
@@ -859,8 +918,7 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
if (area.y > info->var.yres)
area.y = info->var.yres;
- dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h,
- info->screen_base);
+ dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h);
}
return 0;
@@ -942,6 +1000,10 @@ static void dlfb_ops_destroy(struct fb_info *info)
{
struct dlfb_data *dlfb = info->par;
+ cancel_work_sync(&dlfb->damage_work);
+
+ mutex_destroy(&dlfb->render_mutex);
+
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
@@ -1065,8 +1127,7 @@ static int dlfb_ops_set_par(struct fb_info *info)
pix_framebuffer[i] = 0x37e6;
}
- dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres,
- info->screen_base);
+ dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres);
return 0;
}
@@ -1639,6 +1700,11 @@ static int dlfb_usb_probe(struct usb_interface *intf,
dlfb->ops = dlfb_ops;
info->fbops = &dlfb->ops;
+ mutex_init(&dlfb->render_mutex);
+ dlfb_init_damage(dlfb);
+ spin_lock_init(&dlfb->damage_lock);
+ INIT_WORK(&dlfb->damage_work, dlfb_damage_work);
+
INIT_LIST_HEAD(&info->modelist);
if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 34dc8e53a1e9..d707fdb97354 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1543,7 +1543,7 @@ static void uvesafb_ioremap(struct fb_info *info)
static ssize_t uvesafb_show_vbe_ver(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
return snprintf(buf, PAGE_SIZE, "%.4x\n", par->vbe_ib.vbe_version);
@@ -1554,7 +1554,7 @@ static DEVICE_ATTR(vbe_version, S_IRUGO, uvesafb_show_vbe_ver, NULL);
static ssize_t uvesafb_show_vbe_modes(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
int ret = 0, i;
@@ -1573,7 +1573,7 @@ static DEVICE_ATTR(vbe_modes, S_IRUGO, uvesafb_show_vbe_modes, NULL);
static ssize_t uvesafb_show_vendor(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_vendor_name_ptr)
@@ -1588,7 +1588,7 @@ static DEVICE_ATTR(oem_vendor, S_IRUGO, uvesafb_show_vendor, NULL);
static ssize_t uvesafb_show_product_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_name_ptr)
@@ -1603,7 +1603,7 @@ static DEVICE_ATTR(oem_product_name, S_IRUGO, uvesafb_show_product_name, NULL);
static ssize_t uvesafb_show_product_rev(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_rev_ptr)
@@ -1618,7 +1618,7 @@ static DEVICE_ATTR(oem_product_rev, S_IRUGO, uvesafb_show_product_rev, NULL);
static ssize_t uvesafb_show_oem_string(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_string_ptr)
@@ -1633,7 +1633,7 @@ static DEVICE_ATTR(oem_string, S_IRUGO, uvesafb_show_oem_string, NULL);
static ssize_t uvesafb_show_nocrtc(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
return snprintf(buf, PAGE_SIZE, "%d\n", par->nocrtc);
@@ -1642,7 +1642,7 @@ static ssize_t uvesafb_show_nocrtc(struct device *dev,
static ssize_t uvesafb_store_nocrtc(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (count > 0) {
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index 528fe917dd49..dc1f9cfb6e7e 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -336,8 +336,8 @@ static int vesafb_probe(struct platform_device *dev)
printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
if (pmi_base[3]) {
printk(KERN_INFO "vesafb: pmi: ports = ");
- for (i = pmi_base[3]/2; pmi_base[i] != 0xffff; i++)
- printk("%x ",pmi_base[i]);
+ for (i = pmi_base[3]/2; pmi_base[i] != 0xffff; i++)
+ printk("%x ", pmi_base[i]);
printk("\n");
if (pmi_base[i] != 0xffff) {
/*
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 6a4bbc9e1fb0..a3d6b6db221b 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -677,7 +677,7 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* fall through - Missed the backend's CLOSING state. */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 8ba726e600e9..93d5bebf9572 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -215,6 +215,9 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
* hypervisor.
*/
lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
+ if (param.count == 0 ||
+ param.count > U64_MAX - lb_offset - PAGE_SIZE + 1)
+ return -EINVAL;
num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Allocate the buffers we need */
@@ -244,7 +247,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/* Get the physical addresses of the source buffer */
num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
- num_pages, param.source != -1, pages);
+ num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
if (num_pinned != num_pages) {
/* get_user_pages() failed */
@@ -331,8 +334,8 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
struct fsl_hv_ioctl_prop param;
char __user *upath, *upropname;
void __user *upropval;
- char *path = NULL, *propname = NULL;
- void *propval = NULL;
+ char *path, *propname;
+ void *propval;
int ret = 0;
/* Get the parameters from the user. */
@@ -344,32 +347,30 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
upropval = (void __user *)(uintptr_t)param.propval;
path = strndup_user(upath, FH_DTPROP_MAX_PATHLEN);
- if (IS_ERR(path)) {
- ret = PTR_ERR(path);
- goto out;
- }
+ if (IS_ERR(path))
+ return PTR_ERR(path);
propname = strndup_user(upropname, FH_DTPROP_MAX_PATHLEN);
if (IS_ERR(propname)) {
ret = PTR_ERR(propname);
- goto out;
+ goto err_free_path;
}
if (param.proplen > FH_DTPROP_MAX_PROPLEN) {
ret = -EINVAL;
- goto out;
+ goto err_free_propname;
}
propval = kmalloc(param.proplen, GFP_KERNEL);
if (!propval) {
ret = -ENOMEM;
- goto out;
+ goto err_free_propname;
}
if (set) {
if (copy_from_user(propval, upropval, param.proplen)) {
ret = -EFAULT;
- goto out;
+ goto err_free_propval;
}
param.ret = fh_partition_set_dtprop(param.handle,
@@ -388,7 +389,7 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
if (copy_to_user(upropval, propval, param.proplen) ||
put_user(param.proplen, &p->proplen)) {
ret = -EFAULT;
- goto out;
+ goto err_free_propval;
}
}
}
@@ -396,10 +397,12 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
if (put_user(param.ret, &p->ret))
ret = -EFAULT;
-out:
- kfree(path);
+err_free_propval:
kfree(propval);
+err_free_propname:
kfree(propname);
+err_free_path:
+ kfree(path);
return ret;
}
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 8ca333f21292..2307b0329aec 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1298,6 +1298,20 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
return ret;
}
+static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
+{
+ switch (type) {
+ case VMMDEV_HGCM_PARM_TYPE_32BIT:
+ case VMMDEV_HGCM_PARM_TYPE_64BIT:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
struct vbg_session *session, bool f32bit,
struct vbg_ioctl_hgcm_call *call)
@@ -1333,6 +1347,23 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
}
call->hdr.size_out = actual_size;
+ /* Validate parameter types */
+ if (f32bit) {
+ struct vmmdev_hgcm_function_parameter32 *parm =
+ VBG_IOCTL_HGCM_CALL_PARMS32(call);
+
+ for (i = 0; i < call->parm_count; i++)
+ if (!vbg_param_valid(parm[i].type))
+ return -EINVAL;
+ } else {
+ struct vmmdev_hgcm_function_parameter *parm =
+ VBG_IOCTL_HGCM_CALL_PARMS(call);
+
+ for (i = 0; i < call->parm_count; i++)
+ if (!vbg_param_valid(parm[i].type))
+ return -EINVAL;
+ }
+
/*
* Validate the client id.
*/
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index d0584c040c60..7a0398bb84f7 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev)
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
- for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ if (vp_dev->msix_affinity_masks) {
+ for (i = 0; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->msix_affinity_masks[i])
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ }
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 18846afb39da..0a7b3ce3fb75 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -882,6 +882,8 @@ static struct virtqueue *vring_create_virtqueue_split(
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (queue)
break;
+ if (!may_reduce_num)
+ return NULL;
}
if (!num)
@@ -1002,6 +1004,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
if (unlikely(vq->vq.num_free < 1)) {
pr_debug("Can't add buf len 1 - avail = 0\n");
+ kfree(desc);
END_USE(vq);
return -ENOSPC;
}
@@ -1716,10 +1719,10 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/**
* virtqueue_add_sgs - expose buffers to other end
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
* @sgs: array of terminated scatterlists.
- * @out_num: the number of scatterlists readable by other side
- * @in_num: the number of scatterlists which are writable (after readable ones)
+ * @out_sgs: the number of scatterlists readable by other side
+ * @in_sgs: the number of scatterlists which are writable (after readable ones)
* @data: the token identifying the buffer.
* @gfp: how to do memory allocations (if necessary).
*
@@ -1819,7 +1822,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
- * @vq: the struct virtqueue
+ * @_vq: the struct virtqueue
*
* Instead of virtqueue_kick(), you can do:
* if (virtqueue_kick_prepare(vq))
@@ -1839,7 +1842,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
/**
* virtqueue_notify - second half of split virtqueue_kick call.
- * @vq: the struct virtqueue
+ * @_vq: the struct virtqueue
*
* This does not need to be serialized.
*
@@ -1883,8 +1886,9 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
/**
* virtqueue_get_buf - get the next used buffer
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
+ * @ctx: extra context for the token
*
* If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
@@ -1914,7 +1918,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
/**
* virtqueue_disable_cb - disable callbacks
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
@@ -1934,7 +1938,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/**
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns current queue state
* in an opaque unsigned value. This value should be later tested by
@@ -1955,7 +1959,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
/**
* virtqueue_poll - query pending used buffers
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
* @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
*
* Returns "true" if there are pending used buffers in the queue.
@@ -1974,7 +1978,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll);
/**
* virtqueue_enable_cb - restart callbacks after disable_cb.
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
@@ -1993,7 +1997,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
/**
* virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks but hints to the other side to delay
* interrupts until most of the available buffers have been processed;
@@ -2015,7 +2019,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
/**
* virtqueue_detach_unused_buf - detach first unused buffer
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
* This is not valid on an active queue; it is useful only for device
@@ -2247,7 +2251,7 @@ EXPORT_SYMBOL_GPL(vring_transport_features);
/**
* virtqueue_get_vring_size - return the size of the virtqueue's vring
- * @vq: the struct virtqueue containing the vring of interest.
+ * @_vq: the struct virtqueue containing the vring of interest.
*
* Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized.
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 8b5e598ffdb3..8f2b25f1614c 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -37,6 +37,11 @@ module_param_named(active_pullup, ds2482_active_pullup, int, 0644);
MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
"0-disable, 1-enable (default)");
+/* extra configurations - e.g. 1WS */
+static int extra_config;
+module_param(extra_config, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8=1WS");
+
/**
* The DS2482 registers - there are 3 registers that are addressed by a read
* pointer. The read pointer is set by the last command executed.
@@ -70,8 +75,6 @@ MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
#define DS2482_REG_CFG_PPM 0x02 /* presence pulse masking */
#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
-/* extra configurations - e.g. 1WS */
-static int extra_config;
/**
* Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only).
@@ -130,6 +133,8 @@ struct ds2482_data {
*/
static inline u8 ds2482_calculate_config(u8 conf)
{
+ conf |= extra_config;
+
if (ds2482_active_pullup)
conf |= DS2482_REG_CFG_APU;
@@ -405,7 +410,7 @@ static u8 ds2482_w1_reset_bus(void *data)
/* If the chip did reset since detect, re-config it */
if (err & DS2482_REG_STS_RST)
ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
- ds2482_calculate_config(extra_config));
+ ds2482_calculate_config(0x00));
}
mutex_unlock(&pdev->access_lock);
@@ -431,7 +436,8 @@ static u8 ds2482_w1_set_pullup(void *data, int delay)
ds2482_wait_1wire_idle(pdev);
/* note: it seems like both SPU and APU have to be set! */
retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
- ds2482_calculate_config(extra_config|DS2482_REG_CFG_SPU|DS2482_REG_CFG_APU));
+ ds2482_calculate_config(DS2482_REG_CFG_SPU |
+ DS2482_REG_CFG_APU));
ds2482_wait_1wire_idle(pdev);
}
@@ -484,7 +490,7 @@ static int ds2482_probe(struct i2c_client *client,
/* Set all config items to 0 (off) */
ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
- ds2482_calculate_config(extra_config));
+ ds2482_calculate_config(0x00));
mutex_init(&data->access_lock);
@@ -559,7 +565,5 @@ module_i2c_driver(ds2482_driver);
MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
MODULE_DESCRIPTION("DS2482 driver");
-module_param(extra_config, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8=1WS");
MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 0f4ecfcdb549..a9fb77585272 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1016,15 +1016,15 @@ static int ds_probe(struct usb_interface *intf,
/* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
alt = 3;
err = usb_set_interface(dev->udev,
- intf->altsetting[alt].desc.bInterfaceNumber, alt);
+ intf->cur_altsetting->desc.bInterfaceNumber, alt);
if (err) {
dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
"for %d interface: err=%d.\n", alt,
- intf->altsetting[alt].desc.bInterfaceNumber, err);
+ intf->cur_altsetting->desc.bInterfaceNumber, err);
goto err_out_clear;
}
- iface_desc = &intf->altsetting[alt];
+ iface_desc = intf->cur_altsetting;
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
pr_info("Num endpoints=%d. It is not DS9490R.\n",
iface_desc->desc.bNumEndpoints);
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index b535d5ec35b6..92e8f0755b9a 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -138,14 +138,37 @@ static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
W1_F29_REG_CONTROL_AND_STATUS, buf);
}
+#ifdef fCONFIG_W1_SLAVE_DS2408_READBACK
+static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
+{
+ u8 w1_buf[3];
+
+ if (w1_reset_resume_command(sl->master))
+ return false;
+
+ w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
+ w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE;
+ w1_buf[2] = 0;
+
+ w1_write_block(sl->master, w1_buf, 3);
+
+ return (w1_read_8(sl->master) == expected);
+}
+#else
+static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
+{
+ return true;
+}
+#endif
+
static ssize_t output_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[3];
- u8 readBack;
unsigned int retries = W1_F29_RETRIES;
+ ssize_t bytes_written = -EIO;
if (count != 1 || off != 0)
return -EFAULT;
@@ -155,54 +178,33 @@ static ssize_t output_write(struct file *filp, struct kobject *kobj,
dev_dbg(&sl->dev, "mutex locked");
if (w1_reset_select_slave(sl))
- goto error;
+ goto out;
- while (retries--) {
+ do {
w1_buf[0] = W1_F29_FUNC_CHANN_ACCESS_WRITE;
w1_buf[1] = *buf;
w1_buf[2] = ~(*buf);
- w1_write_block(sl->master, w1_buf, 3);
- readBack = w1_read_8(sl->master);
+ w1_write_block(sl->master, w1_buf, 3);
- if (readBack != W1_F29_SUCCESS_CONFIRM_BYTE) {
- if (w1_reset_resume_command(sl->master))
- goto error;
- /* try again, the slave is ready for a command */
- continue;
+ if (w1_read_8(sl->master) == W1_F29_SUCCESS_CONFIRM_BYTE &&
+ optional_read_back_valid(sl, *buf)) {
+ bytes_written = 1;
+ goto out;
}
-#ifdef CONFIG_W1_SLAVE_DS2408_READBACK
- /* here the master could read another byte which
- would be the PIO reg (the actual pin logic state)
- since in this driver we don't know which pins are
- in and outs, there's no value to read the state and
- compare. with (*buf) so end this command abruptly: */
if (w1_reset_resume_command(sl->master))
- goto error;
+ goto out; /* unrecoverable error */
+ /* try again, the slave is ready for a command */
+ } while (--retries);
- /* go read back the output latches */
- /* (the direct effect of the write above) */
- w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
- w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE;
- w1_buf[2] = 0;
- w1_write_block(sl->master, w1_buf, 3);
- /* read the result of the READ_PIO_REGS command */
- if (w1_read_8(sl->master) == *buf)
-#endif
- {
- /* success! */
- mutex_unlock(&sl->master->bus_mutex);
- dev_dbg(&sl->dev,
- "mutex unlocked, retries:%d", retries);
- return 1;
- }
- }
-error:
+out:
mutex_unlock(&sl->master->bus_mutex);
- dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
- return -EIO;
+ dev_dbg(&sl->dev, "%s, mutex unlocked retries:%d\n",
+ (bytes_written > 0) ? "succeeded" : "error", retries);
+
+ return bytes_written;
}
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 0364d3329c52..3516ce6718d9 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -432,8 +432,7 @@ int w1_reset_resume_command(struct w1_master *dev)
if (w1_reset_bus(dev))
return -1;
- /* This will make only the last matched slave perform a skip ROM. */
- w1_write_8(dev, W1_RESUME_CMD);
+ w1_write_8(dev, dev->slave_count > 1 ? W1_RESUME_CMD : W1_SKIP_ROM);
return 0;
}
EXPORT_SYMBOL_GPL(w1_reset_resume_command);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 242eea859637..7ea60371bda0 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -30,7 +30,7 @@ menuconfig WATCHDOG
if WATCHDOG
config WATCHDOG_CORE
- bool "WatchDog Timer Driver Core"
+ tristate "WatchDog Timer Driver Core"
---help---
Say Y here if you want to use the new watchdog timer driver core.
This driver provides a framework for all watchdog timer drivers
@@ -63,6 +63,66 @@ config WATCHDOG_SYSFS
Say Y here if you want to enable watchdog device status read through
sysfs attributes.
+comment "Watchdog Pretimeout Governors"
+
+config WATCHDOG_PRETIMEOUT_GOV
+ bool "Enable watchdog pretimeout governors"
+ depends on WATCHDOG_CORE
+ help
+ The option allows to select watchdog pretimeout governors.
+
+config WATCHDOG_PRETIMEOUT_GOV_SEL
+ tristate
+ depends on WATCHDOG_PRETIMEOUT_GOV
+ default m
+ select WATCHDOG_PRETIMEOUT_GOV_PANIC if WATCHDOG_PRETIMEOUT_GOV_NOOP=n
+
+if WATCHDOG_PRETIMEOUT_GOV
+
+config WATCHDOG_PRETIMEOUT_GOV_NOOP
+ tristate "Noop watchdog pretimeout governor"
+ depends on WATCHDOG_CORE
+ default WATCHDOG_CORE
+ help
+ Noop watchdog pretimeout governor, only an informational
+ message is added to kernel log buffer.
+
+config WATCHDOG_PRETIMEOUT_GOV_PANIC
+ tristate "Panic watchdog pretimeout governor"
+ depends on WATCHDOG_CORE
+ default WATCHDOG_CORE
+ help
+ Panic watchdog pretimeout governor, on watchdog pretimeout
+ event put the kernel into panic.
+
+choice
+ prompt "Default Watchdog Pretimeout Governor"
+ default WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
+ help
+ This option selects a default watchdog pretimeout governor.
+ The governor takes its action, if a watchdog is capable
+ to report a pretimeout event.
+
+config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP
+ bool "noop"
+ depends on WATCHDOG_PRETIMEOUT_GOV_NOOP
+ help
+ Use noop watchdog pretimeout governor by default. If noop
+ governor is selected by a user, write a short message to
+ the kernel log buffer and don't do any system changes.
+
+config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
+ bool "panic"
+ depends on WATCHDOG_PRETIMEOUT_GOV_PANIC
+ help
+ Use panic watchdog pretimeout governor by default, if
+ a watchdog pretimeout event happens, consider that
+ a watchdog feeder is dead and reboot is unavoidable.
+
+endchoice
+
+endif # WATCHDOG_PRETIMEOUT_GOV
+
#
# General Watchdog drivers
#
@@ -90,6 +150,18 @@ config SOFT_WATCHDOG_PRETIMEOUT
watchdog. Be aware that governors might affect the watchdog because it
is purely software, e.g. the panic governor will stall it!
+config BD70528_WATCHDOG
+ tristate "ROHM BD70528 PMIC Watchdog"
+ depends on MFD_ROHM_BD70528
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog in the ROHM BD70528 PMIC. Watchdog trigger
+ cause system reset.
+
+ Say Y here to include support for the ROHM BD70528 watchdog.
+ Alternatively say M to compile the driver as a module,
+ which will be called bd70528_wdt.
+
config DA9052_WATCHDOG
tristate "Dialog DA9052 Watchdog"
depends on PMIC_DA9052 || COMPILE_TEST
@@ -552,7 +624,7 @@ config COH901327_WATCHDOG
compiled as a module.
config NPCM7XX_WATCHDOG
- bool "Nuvoton NPCM750 watchdog"
+ tristate "Nuvoton NPCM750 watchdog"
depends on ARCH_NPCM || COMPILE_TEST
default y if ARCH_NPCM7XX
select WATCHDOG_CORE
@@ -641,6 +713,22 @@ config IMX2_WDT
To compile this driver as a module, choose M here: the
module will be called imx2_wdt.
+config IMX_SC_WDT
+ tristate "IMX SC Watchdog"
+ depends on HAVE_ARM_SMCCC
+ select WATCHDOG_CORE
+ help
+ This is the driver for the system controller watchdog
+ on the NXP i.MX SoCs with system controller inside, the
+ watchdog driver will call ARM SMC API and trap into
+ ARM-Trusted-Firmware for operations, ARM-Trusted-Firmware
+ will request system controller to execute the operations.
+ If you have one of these processors and wish to have
+ watchdog support enabled, say Y, otherwise say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx_sc_wdt.
+
config UX500_WATCHDOG
tristate "ST-Ericsson Ux500 watchdog"
depends on MFD_DB8500_PRCMU
@@ -1179,6 +1267,15 @@ config HP_WATCHDOG
To compile this driver as a module, choose M here: the module will be
called hpwdt.
+config HPWDT_NMI_DECODING
+ bool "NMI support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
+ depends on HP_WATCHDOG
+ default y
+ help
+ Enables the NMI handler for the watchdog pretimeout NMI and the iLO
+ "Generate NMI to System" virtual button. When an NMI is claimed
+ by the driver, panic is called.
+
config KEMPLD_WDT
tristate "Kontron COM Watchdog Timer"
depends on MFD_KEMPLD
@@ -1190,15 +1287,6 @@ config KEMPLD_WDT
This driver can also be built as a module. If so, the module will be
called kempld_wdt.
-config HPWDT_NMI_DECODING
- bool "NMI support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
- depends on HP_WATCHDOG
- default y
- help
- Enables the NMI handler for the watchdog pretimeout NMI and the iLO
- "Generate NMI to System" virtual button. When an NMI is claimed
- by the driver, panic is called.
-
config SC1200_WDT
tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog"
depends on X86
@@ -1647,7 +1735,7 @@ config BCM_KONA_WDT
config BCM_KONA_WDT_DEBUG
bool "DEBUGFS support for BCM Kona Watchdog"
- depends on BCM_KONA_WDT || COMPILE_TEST
+ depends on BCM_KONA_WDT
help
If enabled, adds /sys/kernel/debug/bcm_kona_wdt/info which provides
access to the driver's internal data structures as well as watchdog
@@ -2024,53 +2112,4 @@ config USBPCWATCHDOG
Most people will say N.
-comment "Watchdog Pretimeout Governors"
-
-config WATCHDOG_PRETIMEOUT_GOV
- bool "Enable watchdog pretimeout governors"
- help
- The option allows to select watchdog pretimeout governors.
-
-if WATCHDOG_PRETIMEOUT_GOV
-
-choice
- prompt "Default Watchdog Pretimeout Governor"
- default WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
- help
- This option selects a default watchdog pretimeout governor.
- The governor takes its action, if a watchdog is capable
- to report a pretimeout event.
-
-config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP
- bool "noop"
- select WATCHDOG_PRETIMEOUT_GOV_NOOP
- help
- Use noop watchdog pretimeout governor by default. If noop
- governor is selected by a user, write a short message to
- the kernel log buffer and don't do any system changes.
-
-config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
- bool "panic"
- select WATCHDOG_PRETIMEOUT_GOV_PANIC
- help
- Use panic watchdog pretimeout governor by default, if
- a watchdog pretimeout event happens, consider that
- a watchdog feeder is dead and reboot is unavoidable.
-
-endchoice
-
-config WATCHDOG_PRETIMEOUT_GOV_NOOP
- tristate "Noop watchdog pretimeout governor"
- help
- Noop watchdog pretimeout governor, only an informational
- message is added to kernel log buffer.
-
-config WATCHDOG_PRETIMEOUT_GOV_PANIC
- tristate "Panic watchdog pretimeout governor"
- help
- Panic watchdog pretimeout governor, on watchdog pretimeout
- event put the kernel into panic.
-
-endif # WATCHDOG_PRETIMEOUT_GOV
-
endif # WATCHDOG
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index ba930e464657..7caa920e7e60 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS4800_WATCHDOG) += ts4800_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
+obj-$(CONFIG_IMX_SC_WDT) += imx_sc_wdt.o
obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
@@ -205,6 +206,7 @@ obj-$(CONFIG_WATCHDOG_SUN4V) += sun4v_wdt.o
obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
+obj-$(CONFIG_BD70528_WATCHDOG) += bd70528_wdt.o
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
obj-$(CONFIG_DA9062_WATCHDOG) += da9062_wdt.o
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index d6210d946082..957d1255d4ca 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -200,7 +200,7 @@ static int acq_open(struct inode *inode, struct file *file)
/* Activate */
acq_keepalive();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int acq_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index f61944369c1a..2766af292a71 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -199,7 +199,7 @@ static int advwdt_open(struct inode *inode, struct file *file)
*/
advwdt_ping();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int advwdt_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index 60f0c2eb8531..39a07bb5f6d5 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -249,7 +249,7 @@ static int ali_open(struct inode *inode, struct file *file)
/* Activate */
ali_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/*
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 12f7ea62dddd..689b8a0593c1 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -214,7 +214,7 @@ static int fop_open(struct inode *inode, struct file *file)
return -EBUSY;
/* Good, fire up the show */
wdt_startup();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
@@ -277,8 +277,8 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
timeout = new_timeout;
wdt_keepalive();
- /* Fall through */
}
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index ee1ab12ab04f..b9b2d06b3879 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -163,7 +163,7 @@ static int ar7_wdt_open(struct inode *inode, struct file *file)
ar7_wdt_enable_wdt();
expect_close = 0;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int ar7_wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index 4b4054f54df9..e5dcb26d85f0 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -244,6 +244,11 @@ static const struct watchdog_ops armada_37xx_wdt_ops = {
.get_timeleft = armada_37xx_wdt_get_timeleft,
};
+static void armada_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int armada_37xx_wdt_probe(struct platform_device *pdev)
{
struct armada_37xx_watchdog *dev;
@@ -278,12 +283,14 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
ret = clk_prepare_enable(dev->clk);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev,
+ armada_clk_disable_unprepare, dev->clk);
+ if (ret)
+ return ret;
dev->clk_rate = clk_get_rate(dev->clk);
- if (!dev->clk_rate) {
- ret = -EINVAL;
- goto disable_clk;
- }
+ if (!dev->clk_rate)
+ return -EINVAL;
/*
* Since the timeout in seconds is given as 32 bit unsigned int, and
@@ -307,35 +314,15 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
watchdog_set_nowayout(&dev->wdt, nowayout);
- ret = watchdog_register_device(&dev->wdt);
+ watchdog_stop_on_reboot(&dev->wdt);
+ ret = devm_watchdog_register_device(&pdev->dev, &dev->wdt);
if (ret)
- goto disable_clk;
+ return ret;
dev_info(&pdev->dev, "Initial timeout %d sec%s\n",
dev->wdt.timeout, nowayout ? ", nowayout" : "");
return 0;
-
-disable_clk:
- clk_disable_unprepare(dev->clk);
- return ret;
-}
-
-static int armada_37xx_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdt = platform_get_drvdata(pdev);
- struct armada_37xx_watchdog *dev = watchdog_get_drvdata(wdt);
-
- watchdog_unregister_device(wdt);
- clk_disable_unprepare(dev->clk);
- return 0;
-}
-
-static void armada_37xx_wdt_shutdown(struct platform_device *pdev)
-{
- struct watchdog_device *wdt = platform_get_drvdata(pdev);
-
- armada_37xx_wdt_stop(wdt);
}
static int __maybe_unused armada_37xx_wdt_suspend(struct device *dev)
@@ -370,8 +357,6 @@ MODULE_DEVICE_TABLE(of, armada_37xx_wdt_match);
static struct platform_driver armada_37xx_wdt_driver = {
.probe = armada_37xx_wdt_probe,
- .remove = armada_37xx_wdt_remove,
- .shutdown = armada_37xx_wdt_shutdown,
.driver = {
.name = "armada_37xx_wdt",
.of_match_table = of_match_ptr(armada_37xx_wdt_match),
diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c
index 9768e44ffeb8..c5b9aae544dd 100644
--- a/drivers/watchdog/asm9260_wdt.c
+++ b/drivers/watchdog/asm9260_wdt.c
@@ -196,6 +196,11 @@ static const struct watchdog_ops asm9260_wdt_ops = {
.restart = asm9260_restart,
};
+static void asm9260_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int asm9260_wdt_get_dt_clks(struct asm9260_wdt_priv *priv)
{
int err;
@@ -219,26 +224,32 @@ static int asm9260_wdt_get_dt_clks(struct asm9260_wdt_priv *priv)
dev_err(priv->dev, "Failed to enable ahb_clk!\n");
return err;
}
+ err = devm_add_action_or_reset(priv->dev,
+ asm9260_clk_disable_unprepare,
+ priv->clk_ahb);
+ if (err)
+ return err;
err = clk_set_rate(priv->clk, CLOCK_FREQ);
if (err) {
- clk_disable_unprepare(priv->clk_ahb);
dev_err(priv->dev, "Failed to set rate!\n");
return err;
}
err = clk_prepare_enable(priv->clk);
if (err) {
- clk_disable_unprepare(priv->clk_ahb);
dev_err(priv->dev, "Failed to enable clk!\n");
return err;
}
+ err = devm_add_action_or_reset(priv->dev,
+ asm9260_clk_disable_unprepare,
+ priv->clk);
+ if (err)
+ return err;
/* wdt has internal divider */
clk = clk_get_rate(priv->clk);
if (!clk) {
- clk_disable_unprepare(priv->clk);
- clk_disable_unprepare(priv->clk_ahb);
dev_err(priv->dev, "Failed, clk is 0!\n");
return -EINVAL;
}
@@ -274,25 +285,23 @@ static void asm9260_wdt_get_dt_mode(struct asm9260_wdt_priv *priv)
static int asm9260_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct asm9260_wdt_priv *priv;
struct watchdog_device *wdd;
- struct resource *res;
int ret;
static const char * const mode_name[] = { "hw", "sw", "debug", };
- priv = devm_kzalloc(&pdev->dev, sizeof(struct asm9260_wdt_priv),
- GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(struct asm9260_wdt_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->dev = &pdev->dev;
+ priv->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->iobase = devm_ioremap_resource(&pdev->dev, res);
+ priv->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->iobase))
return PTR_ERR(priv->iobase);
- priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst");
+ priv->rst = devm_reset_control_get_exclusive(dev, "wdt_rst");
if (IS_ERR(priv->rst))
return PTR_ERR(priv->rst);
@@ -305,7 +314,7 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
wdd->ops = &asm9260_wdt_ops;
wdd->min_timeout = 1;
wdd->max_timeout = BM_WDTC_MAX(priv->wdt_freq);
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_set_drvdata(wdd, priv);
@@ -315,7 +324,7 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
* the max instead.
*/
wdd->timeout = ASM9260_WDT_DEFAULT_TIMEOUT;
- watchdog_init_timeout(wdd, 0, &pdev->dev);
+ watchdog_init_timeout(wdd, 0, dev);
asm9260_wdt_get_dt_mode(priv);
@@ -327,49 +336,25 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
* Not all supported platforms specify an interrupt for the
* watchdog, so let's make it optional.
*/
- ret = devm_request_irq(&pdev->dev, priv->irq,
- asm9260_wdt_irq, 0, pdev->name, priv);
+ ret = devm_request_irq(dev, priv->irq, asm9260_wdt_irq, 0,
+ pdev->name, priv);
if (ret < 0)
- dev_warn(&pdev->dev, "failed to request IRQ\n");
+ dev_warn(dev, "failed to request IRQ\n");
}
watchdog_set_restart_priority(wdd, 128);
- ret = watchdog_register_device(wdd);
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
- goto clk_off;
+ return ret;
platform_set_drvdata(pdev, priv);
- dev_info(&pdev->dev, "Watchdog enabled (timeout: %d sec, mode: %s)\n",
+ dev_info(dev, "Watchdog enabled (timeout: %d sec, mode: %s)\n",
wdd->timeout, mode_name[priv->mode]);
return 0;
-
-clk_off:
- clk_disable_unprepare(priv->clk);
- clk_disable_unprepare(priv->clk_ahb);
- return ret;
-}
-
-static void asm9260_wdt_shutdown(struct platform_device *pdev)
-{
- struct asm9260_wdt_priv *priv = platform_get_drvdata(pdev);
-
- asm9260_wdt_disable(&priv->wdd);
-}
-
-static int asm9260_wdt_remove(struct platform_device *pdev)
-{
- struct asm9260_wdt_priv *priv = platform_get_drvdata(pdev);
-
- asm9260_wdt_disable(&priv->wdd);
-
- watchdog_unregister_device(&priv->wdd);
-
- clk_disable_unprepare(priv->clk);
- clk_disable_unprepare(priv->clk_ahb);
-
- return 0;
}
static const struct of_device_id asm9260_wdt_of_match[] = {
@@ -384,8 +369,6 @@ static struct platform_driver asm9260_wdt_driver = {
.of_match_table = asm9260_wdt_of_match,
},
.probe = asm9260_wdt_probe,
- .remove = asm9260_wdt_remove,
- .shutdown = asm9260_wdt_shutdown,
};
module_platform_driver(asm9260_wdt_driver);
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 1abe4d021fd2..34117745c65f 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -187,22 +187,21 @@ static const struct watchdog_info aspeed_wdt_info = {
static int aspeed_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct aspeed_wdt_config *config;
const struct of_device_id *ofdid;
struct aspeed_wdt *wdt;
- struct resource *res;
struct device_node *np;
const char *reset_type;
u32 duration;
u32 status;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
@@ -214,12 +213,12 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
wdt->wdd.info = &aspeed_wdt_info;
wdt->wdd.ops = &aspeed_wdt_ops;
wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT;
- watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
- np = pdev->dev.of_node;
+ np = dev->of_node;
ofdid = of_match_node(aspeed_wdt_of_table, np);
if (!ofdid)
@@ -288,11 +287,11 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
u32 max_duration = config->ext_pulse_width_mask + 1;
if (duration == 0 || duration > max_duration) {
- dev_err(&pdev->dev, "Invalid pulse duration: %uus\n",
- duration);
+ dev_err(dev, "Invalid pulse duration: %uus\n",
+ duration);
duration = max(1U, min(max_duration, duration));
- dev_info(&pdev->dev, "Pulse duration set to %uus\n",
- duration);
+ dev_info(dev, "Pulse duration set to %uus\n",
+ duration);
}
/*
@@ -314,9 +313,9 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY)
wdt->wdd.bootstatus = WDIOF_CARDRESET;
- ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
+ ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register\n");
+ dev_err(dev, "failed to register\n");
return ret;
}
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index b45fc0aee667..907a4545dee6 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -110,7 +110,7 @@ static int at91_wdt_open(struct inode *inode, struct file *file)
return -EBUSY;
at91_wdt_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/*
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index f4050a229eb5..292b5a1ca831 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -327,7 +327,6 @@ static inline int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
static int __init at91wdt_probe(struct platform_device *pdev)
{
- struct resource *r;
int err;
struct at91wdt *wdt;
@@ -346,8 +345,7 @@ static int __init at91wdt_probe(struct platform_device *pdev)
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0xFFFF;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(&pdev->dev, r);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index e2209bf5fa8a..02234c254b10 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -132,7 +132,7 @@ static int ath79_wdt_open(struct inode *inode, struct file *file)
clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
ath79_wdt_enable();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int ath79_wdt_release(struct inode *inode, struct file *file)
@@ -250,15 +250,13 @@ static struct miscdevice ath79_wdt_miscdev = {
static int ath79_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
u32 ctrl;
int err;
if (wdt_base)
return -EBUSY;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt_base))
return PTR_ERR(wdt_base);
diff --git a/drivers/watchdog/atlas7_wdt.c b/drivers/watchdog/atlas7_wdt.c
index 4abdcabd8219..79337d2a8a8e 100644
--- a/drivers/watchdog/atlas7_wdt.c
+++ b/drivers/watchdog/atlas7_wdt.c
@@ -125,80 +125,57 @@ static const struct of_device_id atlas7_wdt_ids[] = {
{}
};
+static void atlas7_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int atlas7_wdt_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct atlas7_wdog *wdt;
- struct resource *res;
struct clk *clk;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- clk = of_clk_get(np, 0);
+ clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(&pdev->dev, "clk enable failed\n");
- goto err;
+ dev_err(dev, "clk enable failed\n");
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, atlas7_clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
/* disable watchdog hardware */
writel(0, wdt->base + ATLAS7_WDT_CNT_CTRL);
wdt->tick_rate = clk_get_rate(clk);
- if (!wdt->tick_rate) {
- ret = -EINVAL;
- goto err1;
- }
+ if (!wdt->tick_rate)
+ return -EINVAL;
wdt->clk = clk;
atlas7_wdd.min_timeout = 1;
atlas7_wdd.max_timeout = UINT_MAX / wdt->tick_rate;
- watchdog_init_timeout(&atlas7_wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&atlas7_wdd, 0, dev);
watchdog_set_nowayout(&atlas7_wdd, nowayout);
watchdog_set_drvdata(&atlas7_wdd, wdt);
platform_set_drvdata(pdev, &atlas7_wdd);
- ret = watchdog_register_device(&atlas7_wdd);
- if (ret)
- goto err1;
-
- return 0;
-
-err1:
- clk_disable_unprepare(clk);
-err:
- clk_put(clk);
- return ret;
-}
-
-static void atlas7_wdt_shutdown(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
- struct atlas7_wdog *wdt = watchdog_get_drvdata(wdd);
-
- atlas7_wdt_disable(wdd);
- clk_disable_unprepare(wdt->clk);
-}
-
-static int atlas7_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
- struct atlas7_wdog *wdt = watchdog_get_drvdata(wdd);
-
- atlas7_wdt_shutdown(pdev);
- clk_put(wdt->clk);
- return 0;
+ watchdog_stop_on_reboot(&atlas7_wdd);
+ watchdog_stop_on_unregister(&atlas7_wdd);
+ return devm_watchdog_register_device(dev, &atlas7_wdd);
}
static int __maybe_unused atlas7_wdt_suspend(struct device *dev)
@@ -236,8 +213,6 @@ static struct platform_driver atlas7_wdt_driver = {
.of_match_table = atlas7_wdt_ids,
},
.probe = atlas7_wdt_probe,
- .remove = atlas7_wdt_remove,
- .shutdown = atlas7_wdt_shutdown,
};
module_platform_driver(atlas7_wdt_driver);
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 1834524ae373..560c1c54c177 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -177,7 +177,6 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
wdt = devm_kzalloc(dev, sizeof(struct bcm2835_wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- platform_set_drvdata(pdev, wdt);
spin_lock_init(&wdt->lock);
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index d3c1113e774c..e2af37c9a266 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -116,7 +116,7 @@ static int bcm63xx_wdt_open(struct inode *inode, struct file *file)
return -EBUSY;
bcm63xx_wdt_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int bcm63xx_wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index ce3f646e8077..d3d88f6703d7 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -107,11 +107,15 @@ static const struct watchdog_ops bcm7038_wdt_ops = {
.get_timeleft = bcm7038_wdt_get_timeleft,
};
+static void bcm7038_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int bcm7038_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm7038_watchdog *wdt;
- struct resource *res;
int err;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -120,8 +124,7 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
@@ -131,6 +134,11 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
err = clk_prepare_enable(wdt->clk);
if (err)
return err;
+ err = devm_add_action_or_reset(dev,
+ bcm7038_clk_disable_unprepare,
+ wdt->clk);
+ if (err)
+ return err;
wdt->rate = clk_get_rate(wdt->clk);
/* Prevent divide-by-zero exception */
if (!wdt->rate)
@@ -148,10 +156,11 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
wdt->wdd.parent = dev;
watchdog_set_drvdata(&wdt->wdd, wdt);
- err = watchdog_register_device(&wdt->wdd);
+ watchdog_stop_on_reboot(&wdt->wdd);
+ watchdog_stop_on_unregister(&wdt->wdd);
+ err = devm_watchdog_register_device(dev, &wdt->wdd);
if (err) {
dev_err(dev, "Failed to register watchdog device\n");
- clk_disable_unprepare(wdt->clk);
return err;
}
@@ -160,19 +169,6 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
return 0;
}
-static int bcm7038_wdt_remove(struct platform_device *pdev)
-{
- struct bcm7038_watchdog *wdt = platform_get_drvdata(pdev);
-
- if (!nowayout)
- bcm7038_wdt_stop(&wdt->wdd);
-
- watchdog_unregister_device(&wdt->wdd);
- clk_disable_unprepare(wdt->clk);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int bcm7038_wdt_suspend(struct device *dev)
{
@@ -198,14 +194,6 @@ static int bcm7038_wdt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend,
bcm7038_wdt_resume);
-static void bcm7038_wdt_shutdown(struct platform_device *pdev)
-{
- struct bcm7038_watchdog *wdt = platform_get_drvdata(pdev);
-
- if (watchdog_active(&wdt->wdd))
- bcm7038_wdt_stop(&wdt->wdd);
-}
-
static const struct of_device_id bcm7038_wdt_match[] = {
{ .compatible = "brcm,bcm7038-wdt" },
{},
@@ -214,8 +202,6 @@ MODULE_DEVICE_TABLE(of, bcm7038_wdt_match);
static struct platform_driver bcm7038_wdt_driver = {
.probe = bcm7038_wdt_probe,
- .remove = bcm7038_wdt_remove,
- .shutdown = bcm7038_wdt_shutdown,
.driver = {
.name = "bcm7038-wdt",
.of_match_table = bcm7038_wdt_match,
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 4249b47902bd..e2ad44816359 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -271,16 +271,10 @@ static struct watchdog_device bcm_kona_wdt_wdd = {
.timeout = SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
};
-static void bcm_kona_wdt_shutdown(struct platform_device *pdev)
-{
- bcm_kona_wdt_stop(&bcm_kona_wdt_wdd);
-}
-
static int bcm_kona_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm_kona_wdt *wdt;
- struct resource *res;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -289,8 +283,7 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return -ENODEV;
@@ -303,7 +296,7 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdt);
watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
- bcm_kona_wdt_wdd.parent = &pdev->dev;
+ bcm_kona_wdt_wdd.parent = dev;
ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0);
if (ret) {
@@ -311,7 +304,9 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
return ret;
}
- ret = watchdog_register_device(&bcm_kona_wdt_wdd);
+ watchdog_stop_on_reboot(&bcm_kona_wdt_wdd);
+ watchdog_stop_on_unregister(&bcm_kona_wdt_wdd);
+ ret = devm_watchdog_register_device(dev, &bcm_kona_wdt_wdd);
if (ret) {
dev_err(dev, "Failed to register watchdog device");
return ret;
@@ -326,8 +321,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
static int bcm_kona_wdt_remove(struct platform_device *pdev)
{
bcm_kona_wdt_debug_exit(pdev);
- bcm_kona_wdt_shutdown(pdev);
- watchdog_unregister_device(&bcm_kona_wdt_wdd);
dev_dbg(&pdev->dev, "Watchdog driver disabled");
return 0;
@@ -346,7 +339,6 @@ static struct platform_driver bcm_kona_wdt_driver = {
},
.probe = bcm_kona_wdt_probe,
.remove = bcm_kona_wdt_remove,
- .shutdown = bcm_kona_wdt_shutdown,
};
module_platform_driver(bcm_kona_wdt_driver);
diff --git a/drivers/watchdog/bd70528_wdt.c b/drivers/watchdog/bd70528_wdt.c
new file mode 100644
index 000000000000..b0152fef4fc7
--- /dev/null
+++ b/drivers/watchdog/bd70528_wdt.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 ROHM Semiconductors
+// ROHM BD70528MWV watchdog driver
+
+#include <linux/bcd.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd70528.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/watchdog.h>
+
+/*
+ * Max time we can set is 1 hour, 59 minutes and 59 seconds
+ * and Minimum time is 1 second
+ */
+#define WDT_MAX_MS ((2 * 60 * 60 - 1) * 1000)
+#define WDT_MIN_MS 1000
+#define DEFAULT_TIMEOUT 60
+
+#define WD_CTRL_MAGIC1 0x55
+#define WD_CTRL_MAGIC2 0xAA
+
+struct wdtbd70528 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct rohm_regmap_dev *mfd;
+ struct watchdog_device wdt;
+};
+
+/**
+ * bd70528_wdt_set - arm or disarm watchdog timer
+ *
+ * @data: device data for the PMIC instance we want to operate on
+ * @enable: new state of WDT. zero to disable, non zero to enable
+ * @old_state: previous state of WDT will be filled here
+ *
+ * Arm or disarm WDT on BD70528 PMIC. Expected to be called only by
+ * BD70528 RTC and BD70528 WDT drivers. The rtc_timer_lock must be taken
+ * by calling bd70528_wdt_lock before calling bd70528_wdt_set.
+ */
+int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state)
+{
+ int ret, i;
+ unsigned int tmp;
+ struct bd70528_data *bd70528 = container_of(data, struct bd70528_data,
+ chip);
+ u8 wd_ctrl_arr[3] = { WD_CTRL_MAGIC1, WD_CTRL_MAGIC2, 0 };
+ u8 *wd_ctrl = &wd_ctrl_arr[2];
+
+ ret = regmap_read(bd70528->chip.regmap, BD70528_REG_WDT_CTRL, &tmp);
+ if (ret)
+ return ret;
+
+ *wd_ctrl = (u8)tmp;
+
+ if (old_state) {
+ if (*wd_ctrl & BD70528_MASK_WDT_EN)
+ *old_state |= BD70528_WDT_STATE_BIT;
+ else
+ *old_state &= ~BD70528_WDT_STATE_BIT;
+ if ((!enable) == (!(*old_state & BD70528_WDT_STATE_BIT)))
+ return 0;
+ }
+
+ if (enable) {
+ if (*wd_ctrl & BD70528_MASK_WDT_EN)
+ return 0;
+ *wd_ctrl |= BD70528_MASK_WDT_EN;
+ } else {
+ if (*wd_ctrl & BD70528_MASK_WDT_EN)
+ *wd_ctrl &= ~BD70528_MASK_WDT_EN;
+ else
+ return 0;
+ }
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(bd70528->chip.regmap, BD70528_REG_WDT_CTRL,
+ wd_ctrl_arr[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(bd70528->chip.regmap, BD70528_REG_WDT_CTRL, &tmp);
+ if ((tmp & BD70528_MASK_WDT_EN) != (*wd_ctrl & BD70528_MASK_WDT_EN)) {
+ dev_err(bd70528->chip.dev,
+ "Watchdog ctrl mismatch (hw) 0x%x (set) 0x%x\n",
+ tmp, *wd_ctrl);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(bd70528_wdt_set);
+
+/**
+ * bd70528_wdt_lock - take WDT lock
+ *
+ * @bd70528: device data for the PMIC instance we want to operate on
+ *
+ * Lock WDT for arming/disarming in order to avoid race condition caused
+ * by WDT state changes initiated by WDT and RTC drivers.
+ */
+void bd70528_wdt_lock(struct rohm_regmap_dev *data)
+{
+ struct bd70528_data *bd70528 = container_of(data, struct bd70528_data,
+ chip);
+
+ mutex_lock(&bd70528->rtc_timer_lock);
+}
+EXPORT_SYMBOL(bd70528_wdt_lock);
+
+/**
+ * bd70528_wdt_unlock - unlock WDT lock
+ *
+ * @bd70528: device data for the PMIC instance we want to operate on
+ *
+ * Unlock WDT lock which has previously been taken by call to
+ * bd70528_wdt_lock.
+ */
+void bd70528_wdt_unlock(struct rohm_regmap_dev *data)
+{
+ struct bd70528_data *bd70528 = container_of(data, struct bd70528_data,
+ chip);
+
+ mutex_unlock(&bd70528->rtc_timer_lock);
+}
+EXPORT_SYMBOL(bd70528_wdt_unlock);
+
+static int bd70528_wdt_set_locked(struct wdtbd70528 *w, int enable)
+{
+ return bd70528_wdt_set(w->mfd, enable, NULL);
+}
+
+static int bd70528_wdt_change(struct wdtbd70528 *w, int enable)
+{
+ int ret;
+
+ bd70528_wdt_lock(w->mfd);
+ ret = bd70528_wdt_set_locked(w, enable);
+ bd70528_wdt_unlock(w->mfd);
+
+ return ret;
+}
+
+static int bd70528_wdt_start(struct watchdog_device *wdt)
+{
+ struct wdtbd70528 *w = watchdog_get_drvdata(wdt);
+
+ dev_dbg(w->dev, "WDT ping...\n");
+ return bd70528_wdt_change(w, 1);
+}
+
+static int bd70528_wdt_stop(struct watchdog_device *wdt)
+{
+ struct wdtbd70528 *w = watchdog_get_drvdata(wdt);
+
+ dev_dbg(w->dev, "WDT stopping...\n");
+ return bd70528_wdt_change(w, 0);
+}
+
+static int bd70528_wdt_set_timeout(struct watchdog_device *wdt,
+ unsigned int timeout)
+{
+ unsigned int hours;
+ unsigned int minutes;
+ unsigned int seconds;
+ int ret;
+ struct wdtbd70528 *w = watchdog_get_drvdata(wdt);
+
+ seconds = timeout;
+ hours = timeout / (60 * 60);
+ /* Maximum timeout is 1h 59m 59s => hours is 1 or 0 */
+ if (hours)
+ seconds -= (60 * 60);
+ minutes = seconds / 60;
+ seconds = seconds % 60;
+
+ bd70528_wdt_lock(w->mfd);
+
+ ret = bd70528_wdt_set_locked(w, 0);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_update_bits(w->regmap, BD70528_REG_WDT_HOUR,
+ BD70528_MASK_WDT_HOUR, hours);
+ if (ret) {
+ dev_err(w->dev, "Failed to set WDT hours\n");
+ goto out_en_unlock;
+ }
+ ret = regmap_update_bits(w->regmap, BD70528_REG_WDT_MINUTE,
+ BD70528_MASK_WDT_MINUTE, bin2bcd(minutes));
+ if (ret) {
+ dev_err(w->dev, "Failed to set WDT minutes\n");
+ goto out_en_unlock;
+ }
+ ret = regmap_update_bits(w->regmap, BD70528_REG_WDT_SEC,
+ BD70528_MASK_WDT_SEC, bin2bcd(seconds));
+ if (ret)
+ dev_err(w->dev, "Failed to set WDT seconds\n");
+ else
+ dev_dbg(w->dev, "WDT tmo set to %u\n", timeout);
+
+out_en_unlock:
+ ret = bd70528_wdt_set_locked(w, 1);
+out_unlock:
+ bd70528_wdt_unlock(w->mfd);
+
+ return ret;
+}
+
+static const struct watchdog_info bd70528_wdt_info = {
+ .identity = "bd70528-wdt",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops bd70528_wdt_ops = {
+ .start = bd70528_wdt_start,
+ .stop = bd70528_wdt_stop,
+ .set_timeout = bd70528_wdt_set_timeout,
+};
+
+static int bd70528_wdt_probe(struct platform_device *pdev)
+{
+ struct rohm_regmap_dev *bd70528;
+ struct wdtbd70528 *w;
+ int ret;
+ unsigned int reg;
+
+ bd70528 = dev_get_drvdata(pdev->dev.parent);
+ if (!bd70528) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ return -EINVAL;
+ }
+ w = devm_kzalloc(&pdev->dev, sizeof(*w), GFP_KERNEL);
+ if (!w)
+ return -ENOMEM;
+
+ w->regmap = bd70528->regmap;
+ w->mfd = bd70528;
+ w->dev = &pdev->dev;
+
+ w->wdt.info = &bd70528_wdt_info;
+ w->wdt.ops = &bd70528_wdt_ops;
+ w->wdt.min_hw_heartbeat_ms = WDT_MIN_MS;
+ w->wdt.max_hw_heartbeat_ms = WDT_MAX_MS;
+ w->wdt.parent = pdev->dev.parent;
+ w->wdt.timeout = DEFAULT_TIMEOUT;
+ watchdog_set_drvdata(&w->wdt, w);
+ watchdog_init_timeout(&w->wdt, 0, pdev->dev.parent);
+
+ ret = bd70528_wdt_set_timeout(&w->wdt, w->wdt.timeout);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set the watchdog timeout\n");
+ return ret;
+ }
+
+ bd70528_wdt_lock(w->mfd);
+ ret = regmap_read(w->regmap, BD70528_REG_WDT_CTRL, &reg);
+ bd70528_wdt_unlock(w->mfd);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get the watchdog state\n");
+ return ret;
+ }
+ if (reg & BD70528_MASK_WDT_EN) {
+ dev_dbg(&pdev->dev, "watchdog was running during probe\n");
+ set_bit(WDOG_HW_RUNNING, &w->wdt.status);
+ }
+
+ ret = devm_watchdog_register_device(&pdev->dev, &w->wdt);
+ if (ret < 0)
+ dev_err(&pdev->dev, "watchdog registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver bd70528_wdt = {
+ .driver = {
+ .name = "bd70528-wdt"
+ },
+ .probe = bd70528_wdt_probe,
+};
+
+module_platform_driver(bd70528_wdt);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD70528 watchdog driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index c3924356d173..a22f2d431a35 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -274,6 +274,11 @@ static const struct watchdog_ops cdns_wdt_ops = {
.set_timeout = cdns_wdt_settimeout,
};
+static void cdns_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
/************************Platform Operations*****************************/
/**
* cdns_wdt_probe - Probe call for the device.
@@ -285,13 +290,13 @@ static const struct watchdog_ops cdns_wdt_ops = {
*/
static int cdns_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct device *dev = &pdev->dev;
int ret, irq;
unsigned long clock_f;
struct cdns_wdt *wdt;
struct watchdog_device *cdns_wdt_device;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -302,19 +307,18 @@ static int cdns_wdt_probe(struct platform_device *pdev)
cdns_wdt_device->min_timeout = CDNS_WDT_MIN_TIMEOUT;
cdns_wdt_device->max_timeout = CDNS_WDT_MAX_TIMEOUT;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->regs = devm_ioremap_resource(&pdev->dev, res);
+ wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->regs))
return PTR_ERR(wdt->regs);
/* Register the interrupt */
- wdt->rst = of_property_read_bool(pdev->dev.of_node, "reset-on-timeout");
+ wdt->rst = of_property_read_bool(dev->of_node, "reset-on-timeout");
irq = platform_get_irq(pdev, 0);
if (!wdt->rst && irq >= 0) {
- ret = devm_request_irq(&pdev->dev, irq, cdns_wdt_irq_handler, 0,
+ ret = devm_request_irq(dev, irq, cdns_wdt_irq_handler, 0,
pdev->name, pdev);
if (ret) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"cannot register interrupt handler err=%d\n",
ret);
return ret;
@@ -322,30 +326,28 @@ static int cdns_wdt_probe(struct platform_device *pdev)
}
/* Initialize the members of cdns_wdt structure */
- cdns_wdt_device->parent = &pdev->dev;
-
- ret = watchdog_init_timeout(cdns_wdt_device, wdt_timeout, &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "unable to set timeout value\n");
- return ret;
- }
+ cdns_wdt_device->parent = dev;
+ watchdog_init_timeout(cdns_wdt_device, wdt_timeout, dev);
watchdog_set_nowayout(cdns_wdt_device, nowayout);
watchdog_stop_on_reboot(cdns_wdt_device);
watchdog_set_drvdata(cdns_wdt_device, wdt);
- wdt->clk = devm_clk_get(&pdev->dev, NULL);
+ wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(&pdev->dev, "input clock not found\n");
- ret = PTR_ERR(wdt->clk);
- return ret;
+ dev_err(dev, "input clock not found\n");
+ return PTR_ERR(wdt->clk);
}
ret = clk_prepare_enable(wdt->clk);
if (ret) {
- dev_err(&pdev->dev, "unable to enable clock\n");
+ dev_err(dev, "unable to enable clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, cdns_clk_disable_unprepare,
+ wdt->clk);
+ if (ret)
+ return ret;
clock_f = clk_get_rate(wdt->clk);
if (clock_f <= CDNS_WDT_CLK_75MHZ) {
@@ -358,56 +360,20 @@ static int cdns_wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->io_lock);
- ret = watchdog_register_device(cdns_wdt_device);
+ watchdog_stop_on_reboot(cdns_wdt_device);
+ watchdog_stop_on_unregister(cdns_wdt_device);
+ ret = devm_watchdog_register_device(dev, cdns_wdt_device);
if (ret) {
- dev_err(&pdev->dev, "Failed to register wdt device\n");
- goto err_clk_disable;
+ dev_err(dev, "Failed to register wdt device\n");
+ return ret;
}
platform_set_drvdata(pdev, wdt);
- dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n",
+ dev_info(dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n",
wdt->regs, cdns_wdt_device->timeout,
nowayout ? ", nowayout" : "");
return 0;
-
-err_clk_disable:
- clk_disable_unprepare(wdt->clk);
-
- return ret;
-}
-
-/**
- * cdns_wdt_remove - Probe call for the device.
- *
- * @pdev: handle to the platform device structure.
- * Return: 0 on success, otherwise negative error.
- *
- * Unregister the device after releasing the resources.
- */
-static int cdns_wdt_remove(struct platform_device *pdev)
-{
- struct cdns_wdt *wdt = platform_get_drvdata(pdev);
-
- cdns_wdt_stop(&wdt->cdns_wdt_device);
- watchdog_unregister_device(&wdt->cdns_wdt_device);
- clk_disable_unprepare(wdt->clk);
-
- return 0;
-}
-
-/**
- * cdns_wdt_shutdown - Stop the device.
- *
- * @pdev: handle to the platform structure.
- *
- */
-static void cdns_wdt_shutdown(struct platform_device *pdev)
-{
- struct cdns_wdt *wdt = platform_get_drvdata(pdev);
-
- cdns_wdt_stop(&wdt->cdns_wdt_device);
- clk_disable_unprepare(wdt->clk);
}
/**
@@ -462,8 +428,6 @@ MODULE_DEVICE_TABLE(of, cdns_wdt_of_match);
/* Driver Structure */
static struct platform_driver cdns_wdt_driver = {
.probe = cdns_wdt_probe,
- .remove = cdns_wdt_remove,
- .shutdown = cdns_wdt_shutdown,
.driver = {
.name = "cdns-wdt",
.of_match_table = cdns_wdt_of_match,
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index f29d1edc5bad..260c50b08483 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -6,7 +6,7 @@
* Watchdog driver for the ST-Ericsson AB COH 901 327 IP core
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/watchdog.h>
@@ -243,27 +243,15 @@ static struct watchdog_device coh901327_wdt = {
.timeout = U300_WDOG_DEFAULT_TIMEOUT,
};
-static int __exit coh901327_remove(struct platform_device *pdev)
-{
- watchdog_unregister_device(&coh901327_wdt);
- coh901327_disable();
- free_irq(irq, pdev);
- clk_disable_unprepare(clk);
- clk_put(clk);
- return 0;
-}
-
static int __init coh901327_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
u16 val;
- struct resource *res;
parent = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virtbase = devm_ioremap_resource(dev, res);
+ virtbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(virtbase))
return PTR_ERR(virtbase);
@@ -408,19 +396,13 @@ static struct platform_driver coh901327_driver = {
.driver = {
.name = "coh901327_wdog",
.of_match_table = coh901327_dt_match,
+ .suppress_bind_attrs = true,
},
- .remove = __exit_p(coh901327_remove),
.suspend = coh901327_suspend,
.resume = coh901327_resume,
};
+builtin_platform_driver_probe(coh901327_driver, coh901327_probe);
-module_platform_driver_probe(coh901327_driver, coh901327_probe);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("COH 901 327 Watchdog");
-
+/* not really modular, but ... */
module_param(margin, uint, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
-
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:coh901327-watchdog");
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index 6cfb102c397c..475360de6e9e 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -140,7 +140,7 @@ static int cpu5wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &cpu5wdt_device.inuse))
return -EBUSY;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int cpu5wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 32156e199c51..b5b078bdebe6 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -394,7 +394,7 @@ static int cpwd_open(struct inode *inode, struct file *f)
mutex_unlock(&cpwd_mutex);
- return nonseekable_open(inode, f);
+ return stream_open(inode, f);
}
static int cpwd_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index e263bad99574..a2feef1ff307 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -150,13 +150,13 @@ static const struct watchdog_ops da9052_wdt_ops = {
static int da9052_wdt_probe(struct platform_device *pdev)
{
- struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct da9052 *da9052 = dev_get_drvdata(dev->parent);
struct da9052_wdt_data *driver_data;
struct watchdog_device *da9052_wdt;
int ret;
- driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
- GFP_KERNEL);
+ driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
if (!driver_data)
return -ENOMEM;
driver_data->da9052 = da9052;
@@ -166,18 +166,17 @@ static int da9052_wdt_probe(struct platform_device *pdev)
da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
da9052_wdt->info = &da9052_wdt_info;
da9052_wdt->ops = &da9052_wdt_ops;
- da9052_wdt->parent = &pdev->dev;
+ da9052_wdt->parent = dev;
watchdog_set_drvdata(da9052_wdt, driver_data);
ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
DA9052_CONTROLD_TWDSCALE, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n",
- ret);
+ dev_err(dev, "Failed to disable watchdog bits, %d\n", ret);
return ret;
}
- ret = devm_watchdog_register_device(&pdev->dev, &driver_data->wdt);
+ ret = devm_watchdog_register_device(dev, &driver_data->wdt);
if (ret != 0) {
dev_err(da9052->dev, "watchdog_register_device() failed: %d\n",
ret);
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
index 26a5b2984094..389a4bdd208c 100644
--- a/drivers/watchdog/da9055_wdt.c
+++ b/drivers/watchdog/da9055_wdt.c
@@ -119,13 +119,13 @@ static const struct watchdog_ops da9055_wdt_ops = {
static int da9055_wdt_probe(struct platform_device *pdev)
{
- struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct da9055 *da9055 = dev_get_drvdata(dev->parent);
struct da9055_wdt_data *driver_data;
struct watchdog_device *da9055_wdt;
int ret;
- driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
- GFP_KERNEL);
+ driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
if (!driver_data)
return -ENOMEM;
@@ -136,17 +136,17 @@ static int da9055_wdt_probe(struct platform_device *pdev)
da9055_wdt->timeout = DA9055_DEF_TIMEOUT;
da9055_wdt->info = &da9055_wdt_info;
da9055_wdt->ops = &da9055_wdt_ops;
- da9055_wdt->parent = &pdev->dev;
+ da9055_wdt->parent = dev;
watchdog_set_nowayout(da9055_wdt, nowayout);
watchdog_set_drvdata(da9055_wdt, driver_data);
ret = da9055_wdt_stop(da9055_wdt);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to stop watchdog, %d\n", ret);
+ dev_err(dev, "Failed to stop watchdog, %d\n", ret);
return ret;
}
- ret = devm_watchdog_register_device(&pdev->dev, &driver_data->wdt);
+ ret = devm_watchdog_register_device(dev, &driver_data->wdt);
if (ret != 0)
dev_err(da9055->dev, "watchdog_register_device() failed: %d\n",
ret);
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index fe169d8e1fb2..aac749cfaccb 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -46,14 +46,9 @@ static unsigned int da9062_wdt_timeout_to_sel(unsigned int secs)
static int da9062_reset_watchdog_timer(struct da9062_watchdog *wdt)
{
- int ret;
-
- ret = regmap_update_bits(wdt->hw->regmap,
- DA9062AA_CONTROL_F,
- DA9062AA_WATCHDOG_MASK,
- DA9062AA_WATCHDOG_MASK);
-
- return ret;
+ return regmap_update_bits(wdt->hw->regmap, DA9062AA_CONTROL_F,
+ DA9062AA_WATCHDOG_MASK,
+ DA9062AA_WATCHDOG_MASK);
}
static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt,
@@ -190,15 +185,16 @@ MODULE_DEVICE_TABLE(of, da9062_compatible_id_table);
static int da9062_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct da9062 *chip;
struct da9062_watchdog *wdt;
- chip = dev_get_drvdata(pdev->dev.parent);
+ chip = dev_get_drvdata(dev->parent);
if (!chip)
return -EINVAL;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -211,13 +207,13 @@ static int da9062_wdt_probe(struct platform_device *pdev)
wdt->wdtdev.min_hw_heartbeat_ms = DA9062_RESET_PROTECTION_MS;
wdt->wdtdev.timeout = DA9062_WDG_DEFAULT_TIMEOUT;
wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
- wdt->wdtdev.parent = &pdev->dev;
+ wdt->wdtdev.parent = dev;
watchdog_set_restart_priority(&wdt->wdtdev, 128);
watchdog_set_drvdata(&wdt->wdtdev, wdt);
- ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdtdev);
+ ret = devm_watchdog_register_device(dev, &wdt->wdtdev);
if (ret < 0) {
dev_err(wdt->hw->dev,
"watchdog registration failed (%d)\n", ret);
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 384dca16af8b..3d65e92a4e3f 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -188,17 +188,18 @@ static const struct watchdog_ops da9063_watchdog_ops = {
static int da9063_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct da9063 *da9063;
struct watchdog_device *wdd;
- if (!pdev->dev.parent)
+ if (!dev->parent)
return -EINVAL;
- da9063 = dev_get_drvdata(pdev->dev.parent);
+ da9063 = dev_get_drvdata(dev->parent);
if (!da9063)
return -EINVAL;
- wdd = devm_kzalloc(&pdev->dev, sizeof(*wdd), GFP_KERNEL);
+ wdd = devm_kzalloc(dev, sizeof(*wdd), GFP_KERNEL);
if (!wdd)
return -ENOMEM;
@@ -207,22 +208,24 @@ static int da9063_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = DA9063_WDT_MIN_TIMEOUT;
wdd->max_timeout = DA9063_WDT_MAX_TIMEOUT;
wdd->min_hw_heartbeat_ms = DA9063_RESET_PROTECTION_MS;
- wdd->timeout = DA9063_WDG_TIMEOUT;
- wdd->parent = &pdev->dev;
-
+ wdd->parent = dev;
wdd->status = WATCHDOG_NOWAYOUT_INIT_STATUS;
watchdog_set_restart_priority(wdd, 128);
-
watchdog_set_drvdata(wdd, da9063);
+ /* Set default timeout, maybe override it with DT value, scale it */
+ wdd->timeout = DA9063_WDG_TIMEOUT;
+ watchdog_init_timeout(wdd, 0, dev);
+ da9063_wdt_set_timeout(wdd, wdd->timeout);
+
/* Change the timeout to the default value if the watchdog is running */
if (da9063_wdt_is_running(da9063)) {
- da9063_wdt_update_timeout(da9063, DA9063_WDG_TIMEOUT);
+ da9063_wdt_update_timeout(da9063, wdd->timeout);
set_bit(WDOG_HW_RUNNING, &wdd->status);
}
- return devm_watchdog_register_device(&pdev->dev, wdd);
+ return devm_watchdog_register_device(dev, wdd);
}
static struct platform_driver da9063_wdt_driver = {
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index ebb85d60b6d5..7b2ee35b5ffd 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -191,11 +191,15 @@ static const struct watchdog_ops davinci_wdt_ops = {
.restart = davinci_wdt_restart,
};
+static void davinci_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int davinci_wdt_probe(struct platform_device *pdev)
{
int ret = 0;
struct device *dev = &pdev->dev;
- struct resource *wdt_mem;
struct watchdog_device *wdd;
struct davinci_wdt_device *davinci_wdt;
@@ -207,15 +211,19 @@ static int davinci_wdt_probe(struct platform_device *pdev)
if (IS_ERR(davinci_wdt->clk)) {
if (PTR_ERR(davinci_wdt->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get clock node\n");
+ dev_err(dev, "failed to get clock node\n");
return PTR_ERR(davinci_wdt->clk);
}
ret = clk_prepare_enable(davinci_wdt->clk);
if (ret) {
- dev_err(&pdev->dev, "failed to prepare clock\n");
+ dev_err(dev, "failed to prepare clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, davinci_clk_disable_unprepare,
+ davinci_wdt->clk);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, davinci_wdt);
@@ -225,7 +233,7 @@ static int davinci_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = 1;
wdd->max_timeout = MAX_HEARTBEAT;
wdd->timeout = DEFAULT_HEARTBEAT;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_init_timeout(wdd, heartbeat, dev);
@@ -235,35 +243,17 @@ static int davinci_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdd, 1);
watchdog_set_restart_priority(wdd, 128);
- wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
- if (IS_ERR(davinci_wdt->base)) {
- ret = PTR_ERR(davinci_wdt->base);
- goto err_clk_disable;
- }
+ davinci_wdt->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(davinci_wdt->base))
+ return PTR_ERR(davinci_wdt->base);
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
dev_err(dev, "cannot register watchdog device\n");
- goto err_clk_disable;
+ return ret;
}
return 0;
-
-err_clk_disable:
- clk_disable_unprepare(davinci_wdt->clk);
-
- return ret;
-}
-
-static int davinci_wdt_remove(struct platform_device *pdev)
-{
- struct davinci_wdt_device *davinci_wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&davinci_wdt->wdd);
- clk_disable_unprepare(davinci_wdt->clk);
-
- return 0;
}
static const struct of_device_id davinci_wdt_of_match[] = {
@@ -278,7 +268,6 @@ static struct platform_driver platform_wdt_driver = {
.of_match_table = davinci_wdt_of_match,
},
.probe = davinci_wdt_probe,
- .remove = davinci_wdt_remove,
};
module_platform_driver(platform_wdt_driver);
diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c
index a9e11df155b8..8af6e9a67d0d 100644
--- a/drivers/watchdog/digicolor_wdt.c
+++ b/drivers/watchdog/digicolor_wdt.c
@@ -116,7 +116,6 @@ static struct watchdog_device dc_wdt_wdd = {
static int dc_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct dc_wdt *wdt;
int ret;
@@ -125,8 +124,7 @@ static int dc_wdt_probe(struct platform_device *pdev)
if (!wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index aa95f57cc1c3..39e43750ab08 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -238,15 +238,13 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct dw_wdt *dw_wdt;
- struct resource *mem;
int ret;
dw_wdt = devm_kzalloc(dev, sizeof(*dw_wdt), GFP_KERNEL);
if (!dw_wdt)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dw_wdt->regs = devm_ioremap_resource(dev, mem);
+ dw_wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dw_wdt->regs))
return PTR_ERR(dw_wdt->regs);
diff --git a/drivers/watchdog/ebc-c384_wdt.c b/drivers/watchdog/ebc-c384_wdt.c
index 4c4c8ce78021..c176f59fea28 100644
--- a/drivers/watchdog/ebc-c384_wdt.c
+++ b/drivers/watchdog/ebc-c384_wdt.c
@@ -117,10 +117,7 @@ static int ebc_c384_wdt_probe(struct device *dev, unsigned int id)
wdd->max_timeout = WATCHDOG_MAX_TIMEOUT;
watchdog_set_nowayout(wdd, nowayout);
-
- if (watchdog_init_timeout(wdd, timeout, dev))
- dev_warn(dev, "Invalid timeout (%u seconds), using default (%u seconds)\n",
- timeout, WATCHDOG_TIMEOUT);
+ watchdog_init_timeout(wdd, timeout, dev);
return devm_watchdog_register_device(dev, wdd);
}
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index f9b14e6efd9a..38e26f160b9a 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -89,18 +89,17 @@ static const struct watchdog_ops ep93xx_wdt_ops = {
static int ep93xx_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct ep93xx_wdt_priv *priv;
struct watchdog_device *wdd;
- struct resource *res;
unsigned long val;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(&pdev->dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio))
return PTR_ERR(priv->mmio);
@@ -112,21 +111,21 @@ static int ep93xx_wdt_probe(struct platform_device *pdev)
wdd->ops = &ep93xx_wdt_ops;
wdd->min_timeout = 1;
wdd->max_hw_heartbeat_ms = 200;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_set_nowayout(wdd, nowayout);
wdd->timeout = WDT_TIMEOUT;
- watchdog_init_timeout(wdd, timeout, &pdev->dev);
+ watchdog_init_timeout(wdd, timeout, dev);
watchdog_set_drvdata(wdd, priv);
- ret = devm_watchdog_register_device(&pdev->dev, wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
- dev_info(&pdev->dev, "EP93XX watchdog driver %s\n",
- (val & 0x08) ? " (nCS1 disable detected)" : "");
+ dev_info(dev, "EP93XX watchdog driver %s\n",
+ (val & 0x08) ? " (nCS1 disable detected)" : "");
return 0;
}
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 47f77a6fdfd6..89129e6fa9b6 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -316,7 +316,7 @@ static int eurwdt_open(struct inode *inode, struct file *file)
eurwdt_timeout = WDT_TIMEOUT; /* initial timeout */
/* Activate the WDT */
eurwdt_activate_timer();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/**
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 9a1c761258ce..041172e6c469 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -338,8 +338,11 @@ static int f71862fg_pin_configure(unsigned short ioaddr)
static int watchdog_start(void)
{
+ int err;
+ u8 tmp;
+
/* Make sure we don't die as soon as the watchdog is enabled below */
- int err = watchdog_keepalive();
+ err = watchdog_keepalive();
if (err)
return err;
@@ -386,19 +389,18 @@ static int watchdog_start(void)
break;
case f81866:
- /* Set pin 70 to WDTRST# */
- superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL,
- BIT(3) | BIT(0));
- superio_set_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL,
- BIT(2));
/*
* GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0.
* The PIN 70(GPIO15/WDTRST) is controlled by 2Ch:
* BIT5: 0 -> WDTRST#
* 1 -> GPIO15
*/
- superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_GPIO1,
- BIT(5));
+ tmp = superio_inb(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL);
+ tmp &= ~(BIT(3) | BIT(0));
+ tmp |= BIT(2);
+ superio_outb(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL, tmp);
+
+ superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_GPIO1, 5);
break;
default:
@@ -525,7 +527,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
__module_get(THIS_MODULE);
watchdog.expect_close = 0;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int watchdog_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/ftwdt010_wdt.c b/drivers/watchdog/ftwdt010_wdt.c
index a9c2912ee280..9ea0e56fa7ee 100644
--- a/drivers/watchdog/ftwdt010_wdt.c
+++ b/drivers/watchdog/ftwdt010_wdt.c
@@ -124,7 +124,6 @@ static const struct watchdog_info ftwdt010_wdt_info = {
static int ftwdt010_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct ftwdt010_wdt *gwdt;
unsigned int reg;
int irq;
@@ -134,8 +133,7 @@ static int ftwdt010_wdt_probe(struct platform_device *pdev)
if (!gwdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gwdt->base = devm_ioremap_resource(dev, res);
+ gwdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gwdt->base))
return PTR_ERR(gwdt->base);
@@ -171,7 +169,7 @@ static int ftwdt010_wdt_probe(struct platform_device *pdev)
ret = devm_watchdog_register_device(dev, &gwdt->wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register watchdog\n");
+ dev_err(dev, "failed to register watchdog\n");
return ret;
}
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 006e2348022c..26350b319505 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -229,7 +229,7 @@ static int gef_wdt_open(struct inode *inode, struct file *file)
gef_wdt_handler_enable();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int gef_wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 88e01238f01b..c5a727da6657 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -92,7 +92,7 @@ static int geodewdt_open(struct inode *inode, struct file *file)
__module_get(THIS_MODULE);
geodewdt_ping();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int geodewdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index ea77cae03c9d..bc24674b4d9e 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -154,25 +154,14 @@ static int gpio_wdt_probe(struct platform_device *pdev)
priv->wdd.parent = dev;
priv->wdd.timeout = SOFT_TIMEOUT_DEF;
- watchdog_init_timeout(&priv->wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&priv->wdd, 0, dev);
watchdog_stop_on_reboot(&priv->wdd);
if (priv->always_running)
gpio_wdt_start(&priv->wdd);
- ret = watchdog_register_device(&priv->wdd);
-
- return ret;
-}
-
-static int gpio_wdt_remove(struct platform_device *pdev)
-{
- struct gpio_wdt_priv *priv = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&priv->wdd);
-
- return 0;
+ return devm_watchdog_register_device(dev, &priv->wdd);
}
static const struct of_device_id gpio_wdt_dt_ids[] = {
@@ -187,7 +176,6 @@ static struct platform_driver gpio_wdt_driver = {
.of_match_table = gpio_wdt_dt_ids,
},
.probe = gpio_wdt_probe,
- .remove = gpio_wdt_remove,
};
#ifdef CONFIG_GPIO_WATCHDOG_ARCH_INITCALL
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index ef30c7e9728d..db1bf6f546ae 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -311,8 +311,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
goto error_init_nmi_decoding;
watchdog_set_nowayout(&hpwdt_dev, nowayout);
- if (watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL))
- dev_warn(&dev->dev, "Invalid soft_margin: %d.\n", soft_margin);
+ watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL);
if (pretimeout && hpwdt_dev.timeout <= PRETIMEOUT_SEC) {
dev_warn(&dev->dev, "timeout <= pretimeout. Setting pretimeout to zero\n");
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index 950c71a8bb22..17941c03996b 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -311,10 +311,7 @@ static int esb_probe(struct pci_dev *pdev,
edev->wdd.min_timeout = ESB_HEARTBEAT_MIN;
edev->wdd.max_timeout = ESB_HEARTBEAT_MAX;
edev->wdd.timeout = ESB_HEARTBEAT_DEFAULT;
- if (watchdog_init_timeout(&edev->wdd, heartbeat, NULL))
- dev_info(&pdev->dev,
- "heartbeat value must be " ESB_HEARTBEAT_RANGE
- ", using %u\n", edev->wdd.timeout);
+ watchdog_init_timeout(&edev->wdd, heartbeat, NULL);
watchdog_set_nowayout(&edev->wdd, nowayout);
watchdog_stop_on_reboot(&edev->wdd);
watchdog_stop_on_unregister(&edev->wdd);
@@ -328,8 +325,8 @@ static int esb_probe(struct pci_dev *pdev,
goto err_unmap;
}
dev_info(&pdev->dev,
- "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
- edev->base, edev->wdd.timeout, nowayout);
+ "initialized. heartbeat=%d sec (nowayout=%d)\n",
+ edev->wdd.timeout, nowayout);
return 0;
err_unmap:
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 0a5318b7865e..89cea6ce9a08 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -545,6 +545,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
}
watchdog_stop_on_reboot(&p->wddev);
+ watchdog_stop_on_unregister(&p->wddev);
ret = devm_watchdog_register_device(dev, &p->wddev);
if (ret != 0) {
pr_err("cannot register watchdog device (err=%d)\n", ret);
@@ -557,17 +558,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
return 0;
}
-static int iTCO_wdt_remove(struct platform_device *pdev)
-{
- struct iTCO_wdt_private *p = platform_get_drvdata(pdev);
-
- /* Stop the timer before we leave */
- if (!nowayout)
- iTCO_wdt_stop(&p->wddev);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
/*
* Suspend-to-idle requires this, because it stops the ticks and timekeeping, so
@@ -620,7 +610,6 @@ static const struct dev_pm_ops iTCO_wdt_pm = {
static struct platform_driver iTCO_wdt_driver = {
.probe = iTCO_wdt_probe,
- .remove = iTCO_wdt_remove,
.driver = {
.name = DRV_NAME,
.pm = ITCO_WDT_PM_OPS,
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index cc262284a6aa..30d6cec582af 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -238,7 +238,7 @@ static int ibwdt_open(struct inode *inode, struct file *file)
/* Activate */
ibwdt_ping();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int ibwdt_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 366b0474f278..897f7eda9e6a 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -323,7 +323,7 @@ static int asr_open(struct inode *inode, struct file *file)
asr_toggle();
asr_enable();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int asr_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index a3134ffa59f8..0fc31aadeee3 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -178,59 +178,69 @@ static const struct watchdog_ops pdc_wdt_ops = {
.restart = pdc_wdt_restart,
};
+static void pdc_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int pdc_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
u64 div;
int ret, val;
unsigned long clk_rate;
- struct resource *res;
struct pdc_wdt_dev *pdc_wdt;
- pdc_wdt = devm_kzalloc(&pdev->dev, sizeof(*pdc_wdt), GFP_KERNEL);
+ pdc_wdt = devm_kzalloc(dev, sizeof(*pdc_wdt), GFP_KERNEL);
if (!pdc_wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdc_wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ pdc_wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdc_wdt->base))
return PTR_ERR(pdc_wdt->base);
- pdc_wdt->sys_clk = devm_clk_get(&pdev->dev, "sys");
+ pdc_wdt->sys_clk = devm_clk_get(dev, "sys");
if (IS_ERR(pdc_wdt->sys_clk)) {
- dev_err(&pdev->dev, "failed to get the sys clock\n");
+ dev_err(dev, "failed to get the sys clock\n");
return PTR_ERR(pdc_wdt->sys_clk);
}
- pdc_wdt->wdt_clk = devm_clk_get(&pdev->dev, "wdt");
+ pdc_wdt->wdt_clk = devm_clk_get(dev, "wdt");
if (IS_ERR(pdc_wdt->wdt_clk)) {
- dev_err(&pdev->dev, "failed to get the wdt clock\n");
+ dev_err(dev, "failed to get the wdt clock\n");
return PTR_ERR(pdc_wdt->wdt_clk);
}
ret = clk_prepare_enable(pdc_wdt->sys_clk);
if (ret) {
- dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
+ dev_err(dev, "could not prepare or enable sys clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, pdc_clk_disable_unprepare,
+ pdc_wdt->sys_clk);
+ if (ret)
+ return ret;
ret = clk_prepare_enable(pdc_wdt->wdt_clk);
if (ret) {
- dev_err(&pdev->dev, "could not prepare or enable wdt clock\n");
- goto disable_sys_clk;
+ dev_err(dev, "could not prepare or enable wdt clock\n");
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, pdc_clk_disable_unprepare,
+ pdc_wdt->wdt_clk);
+ if (ret)
+ return ret;
/* We use the clock rate to calculate the max timeout */
clk_rate = clk_get_rate(pdc_wdt->wdt_clk);
if (clk_rate == 0) {
- dev_err(&pdev->dev, "failed to get clock rate\n");
- ret = -EINVAL;
- goto disable_wdt_clk;
+ dev_err(dev, "failed to get clock rate\n");
+ return -EINVAL;
}
if (order_base_2(clk_rate) > PDC_WDT_CONFIG_DELAY_MASK + 1) {
- dev_err(&pdev->dev, "invalid clock rate\n");
- ret = -EINVAL;
- goto disable_wdt_clk;
+ dev_err(dev, "invalid clock rate\n");
+ return -EINVAL;
}
if (order_base_2(clk_rate) == 0)
@@ -245,10 +255,10 @@ static int pdc_wdt_probe(struct platform_device *pdev)
do_div(div, clk_rate);
pdc_wdt->wdt_dev.max_timeout = div;
pdc_wdt->wdt_dev.timeout = PDC_WDT_DEF_TIMEOUT;
- pdc_wdt->wdt_dev.parent = &pdev->dev;
+ pdc_wdt->wdt_dev.parent = dev;
watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
- watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
+ watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, dev);
pdc_wdt_stop(&pdc_wdt->wdt_dev);
@@ -259,24 +269,22 @@ static int pdc_wdt_probe(struct platform_device *pdev)
case PDC_WDT_TICKLE_STATUS_TICKLE:
case PDC_WDT_TICKLE_STATUS_TIMEOUT:
pdc_wdt->wdt_dev.bootstatus |= WDIOF_CARDRESET;
- dev_info(&pdev->dev,
- "watchdog module last reset due to timeout\n");
+ dev_info(dev, "watchdog module last reset due to timeout\n");
break;
case PDC_WDT_TICKLE_STATUS_HRESET:
- dev_info(&pdev->dev,
+ dev_info(dev,
"watchdog module last reset due to hard reset\n");
break;
case PDC_WDT_TICKLE_STATUS_SRESET:
- dev_info(&pdev->dev,
+ dev_info(dev,
"watchdog module last reset due to soft reset\n");
break;
case PDC_WDT_TICKLE_STATUS_USER:
- dev_info(&pdev->dev,
+ dev_info(dev,
"watchdog module last reset due to user reset\n");
break;
default:
- dev_info(&pdev->dev,
- "contains an illegal status code (%08x)\n", val);
+ dev_info(dev, "contains an illegal status code (%08x)\n", val);
break;
}
@@ -285,36 +293,9 @@ static int pdc_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdc_wdt);
- ret = watchdog_register_device(&pdc_wdt->wdt_dev);
- if (ret)
- goto disable_wdt_clk;
-
- return 0;
-
-disable_wdt_clk:
- clk_disable_unprepare(pdc_wdt->wdt_clk);
-disable_sys_clk:
- clk_disable_unprepare(pdc_wdt->sys_clk);
- return ret;
-}
-
-static void pdc_wdt_shutdown(struct platform_device *pdev)
-{
- struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
-
- pdc_wdt_stop(&pdc_wdt->wdt_dev);
-}
-
-static int pdc_wdt_remove(struct platform_device *pdev)
-{
- struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
-
- pdc_wdt_stop(&pdc_wdt->wdt_dev);
- watchdog_unregister_device(&pdc_wdt->wdt_dev);
- clk_disable_unprepare(pdc_wdt->wdt_clk);
- clk_disable_unprepare(pdc_wdt->sys_clk);
-
- return 0;
+ watchdog_stop_on_reboot(&pdc_wdt->wdt_dev);
+ watchdog_stop_on_unregister(&pdc_wdt->wdt_dev);
+ return devm_watchdog_register_device(dev, &pdc_wdt->wdt_dev);
}
static const struct of_device_id pdc_wdt_match[] = {
@@ -329,8 +310,6 @@ static struct platform_driver pdc_wdt_driver = {
.of_match_table = pdc_wdt_match,
},
.probe = pdc_wdt_probe,
- .remove = pdc_wdt_remove,
- .shutdown = pdc_wdt_shutdown,
};
module_platform_driver(pdc_wdt_driver);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 2b52514eaa86..a606005dd65f 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -178,8 +178,10 @@ static void __imx2_wdt_set_timeout(struct watchdog_device *wdog,
static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
unsigned int new_timeout)
{
- __imx2_wdt_set_timeout(wdog, new_timeout);
+ unsigned int actual;
+ actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
+ __imx2_wdt_set_timeout(wdog, actual);
wdog->timeout = new_timeout;
return 0;
}
@@ -247,7 +249,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
{
struct imx2_wdt_device *wdev;
struct watchdog_device *wdog;
- struct resource *res;
void __iomem *base;
int ret;
u32 val;
@@ -256,8 +257,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (!wdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
new file mode 100644
index 000000000000..49848b66186c
--- /dev/null
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2019 NXP.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define DEFAULT_TIMEOUT 60
+/*
+ * Software timer tick implemented in scfw side, support 10ms to 0xffffffff ms
+ * in theory, but for normal case, 1s~128s is enough, you can change this max
+ * value in case it's not enough.
+ */
+#define MAX_TIMEOUT 128
+
+#define IMX_SIP_TIMER 0xC2000002
+#define IMX_SIP_TIMER_START_WDOG 0x01
+#define IMX_SIP_TIMER_STOP_WDOG 0x02
+#define IMX_SIP_TIMER_SET_WDOG_ACT 0x03
+#define IMX_SIP_TIMER_PING_WDOG 0x04
+#define IMX_SIP_TIMER_SET_TIMEOUT_WDOG 0x05
+#define IMX_SIP_TIMER_GET_WDOG_STAT 0x06
+#define IMX_SIP_TIMER_SET_PRETIME_WDOG 0x07
+
+#define SC_TIMER_WDOG_ACTION_PARTITION 0
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0000);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int imx_sc_wdt_ping(struct watchdog_device *wdog)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_PING_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ return 0;
+}
+
+static int imx_sc_wdt_start(struct watchdog_device *wdog)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_START_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ return -EACCES;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_WDOG_ACT,
+ SC_TIMER_WDOG_ACTION_PARTITION,
+ 0, 0, 0, 0, 0, &res);
+ return res.a0 ? -EACCES : 0;
+}
+
+static int imx_sc_wdt_stop(struct watchdog_device *wdog)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_STOP_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ return res.a0 ? -EACCES : 0;
+}
+
+static int imx_sc_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int timeout)
+{
+ struct arm_smccc_res res;
+
+ wdog->timeout = timeout;
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_TIMEOUT_WDOG,
+ timeout * 1000, 0, 0, 0, 0, 0, &res);
+
+ return res.a0 ? -EACCES : 0;
+}
+
+static const struct watchdog_ops imx_sc_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = imx_sc_wdt_start,
+ .stop = imx_sc_wdt_stop,
+ .ping = imx_sc_wdt_ping,
+ .set_timeout = imx_sc_wdt_set_timeout,
+};
+
+static const struct watchdog_info imx_sc_wdt_info = {
+ .identity = "i.MX SC watchdog timer",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE | WDIOF_PRETIMEOUT,
+};
+
+static int imx_sc_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *imx_sc_wdd;
+ int ret;
+
+ imx_sc_wdd = devm_kzalloc(dev, sizeof(*imx_sc_wdd), GFP_KERNEL);
+ if (!imx_sc_wdd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, imx_sc_wdd);
+
+ imx_sc_wdd->info = &imx_sc_wdt_info;
+ imx_sc_wdd->ops = &imx_sc_wdt_ops;
+ imx_sc_wdd->min_timeout = 1;
+ imx_sc_wdd->max_timeout = MAX_TIMEOUT;
+ imx_sc_wdd->parent = dev;
+ imx_sc_wdd->timeout = DEFAULT_TIMEOUT;
+
+ watchdog_init_timeout(imx_sc_wdd, 0, dev);
+ watchdog_stop_on_reboot(imx_sc_wdd);
+ watchdog_stop_on_unregister(imx_sc_wdd);
+
+ ret = devm_watchdog_register_device(dev, imx_sc_wdd);
+ if (ret) {
+ dev_err(dev, "Failed to register watchdog device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused imx_sc_wdt_suspend(struct device *dev)
+{
+ struct watchdog_device *imx_sc_wdd = dev_get_drvdata(dev);
+
+ if (watchdog_active(imx_sc_wdd))
+ imx_sc_wdt_stop(imx_sc_wdd);
+
+ return 0;
+}
+
+static int __maybe_unused imx_sc_wdt_resume(struct device *dev)
+{
+ struct watchdog_device *imx_sc_wdd = dev_get_drvdata(dev);
+
+ if (watchdog_active(imx_sc_wdd))
+ imx_sc_wdt_start(imx_sc_wdd);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(imx_sc_wdt_pm_ops,
+ imx_sc_wdt_suspend, imx_sc_wdt_resume);
+
+static const struct of_device_id imx_sc_wdt_dt_ids[] = {
+ { .compatible = "fsl,imx-sc-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_sc_wdt_dt_ids);
+
+static struct platform_driver imx_sc_wdt_driver = {
+ .probe = imx_sc_wdt_probe,
+ .driver = {
+ .name = "imx-sc-wdt",
+ .of_match_table = imx_sc_wdt_dt_ids,
+ .pm = &imx_sc_wdt_pm_ops,
+ },
+};
+module_platform_driver(imx_sc_wdt_driver);
+
+MODULE_AUTHOR("Robin Gong <yibin.gong@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX system controller watchdog driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 5d20cdd30efe..5592b975fe3a 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -77,7 +77,7 @@ static int indydog_open(struct inode *inode, struct file *file)
pr_info("Started watchdog timer\n");
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int indydog_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 72c108a12c19..6cf7cc1ff615 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -110,12 +110,13 @@ static const struct watchdog_ops mid_wdt_ops = {
static int mid_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdt_dev;
- struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+ struct intel_mid_wdt_pdata *pdata = dev->platform_data;
int ret;
if (!pdata) {
- dev_err(&pdev->dev, "missing platform data\n");
+ dev_err(dev, "missing platform data\n");
return -EINVAL;
}
@@ -125,7 +126,7 @@ static int mid_wdt_probe(struct platform_device *pdev)
return ret;
}
- wdt_dev = devm_kzalloc(&pdev->dev, sizeof(*wdt_dev), GFP_KERNEL);
+ wdt_dev = devm_kzalloc(dev, sizeof(*wdt_dev), GFP_KERNEL);
if (!wdt_dev)
return -ENOMEM;
@@ -134,16 +135,15 @@ static int mid_wdt_probe(struct platform_device *pdev)
wdt_dev->min_timeout = MID_WDT_TIMEOUT_MIN;
wdt_dev->max_timeout = MID_WDT_TIMEOUT_MAX;
wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT;
- wdt_dev->parent = &pdev->dev;
+ wdt_dev->parent = dev;
- watchdog_set_drvdata(wdt_dev, &pdev->dev);
+ watchdog_set_drvdata(wdt_dev, dev);
- ret = devm_request_irq(&pdev->dev, pdata->irq, mid_wdt_irq,
+ ret = devm_request_irq(dev, pdata->irq, mid_wdt_irq,
IRQF_SHARED | IRQF_NO_SUSPEND, "watchdog",
wdt_dev);
if (ret) {
- dev_err(&pdev->dev, "error requesting warning irq %d\n",
- pdata->irq);
+ dev_err(dev, "error requesting warning irq %d\n", pdata->irq);
return ret;
}
@@ -163,13 +163,13 @@ static int mid_wdt_probe(struct platform_device *pdev)
/* Make sure the watchdog is serviced */
set_bit(WDOG_HW_RUNNING, &wdt_dev->status);
- ret = devm_watchdog_register_device(&pdev->dev, wdt_dev);
+ ret = devm_watchdog_register_device(dev, wdt_dev);
if (ret) {
- dev_err(&pdev->dev, "error registering watchdog device\n");
+ dev_err(dev, "error registering watchdog device\n");
return ret;
}
- dev_info(&pdev->dev, "Intel MID watchdog device probed\n");
+ dev_info(dev, "Intel MID watchdog device probed\n");
return 0;
}
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 0caab6241eb7..f7baf75d38c0 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -25,7 +25,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/compiler.h>
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -224,7 +223,7 @@ static int intel_scu_set_heartbeat(u32 t)
watchdog_device.timer_tbl_ptr->freq_hz);
pr_debug("set_heartbeat: timer_set is %x (hex)\n",
watchdog_device.timer_set);
- pr_debug("set_hearbeat: timer_margin is %x (hex)\n", timer_margin);
+ pr_debug("set_heartbeat: timer_margin is %x (hex)\n", timer_margin);
pr_debug("set_heartbeat: threshold is %x (hex)\n",
watchdog_device.threshold);
pr_debug("set_heartbeat: soft_threshold is %x (hex)\n",
@@ -304,7 +303,7 @@ static int intel_scu_open(struct inode *inode, struct file *file)
if (watchdog_device.driver_closed)
return -EPERM;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int intel_scu_release(struct inode *inode, struct file *file)
@@ -545,21 +544,4 @@ register_reboot_error:
iounmap(watchdog_device.timer_load_count_addr);
return ret;
}
-
-static void __exit intel_scu_watchdog_exit(void)
-{
-
- misc_deregister(&watchdog_device.miscdev);
- unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
- /* disable the timer */
- iowrite32(0x00000002, watchdog_device.timer_control_addr);
- iounmap(watchdog_device.timer_load_count_addr);
-}
-
late_initcall(intel_scu_watchdog_init);
-module_exit(intel_scu_watchdog_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index b16013ffacc2..d910a7dec21b 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -101,7 +101,7 @@ static int iop_wdt_open(struct inode *inode, struct file *file)
clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
wdt_enable();
set_bit(WDT_ENABLED, &wdt_status);
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static ssize_t iop_wdt_write(struct file *file, const char *data, size_t len,
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 41b3979a9d87..b1567240a0e6 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -327,7 +327,7 @@ static int it8712f_wdt_open(struct inode *inode, struct file *file)
ret = it8712f_wdt_enable();
if (ret)
return ret;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int it8712f_wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index f20cc53ff719..9067998759e3 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/of.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/bitops.h>
@@ -65,7 +66,7 @@ static int ixp4xx_wdt_open(struct inode *inode, struct file *file)
clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
wdt_enable();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static ssize_t
@@ -176,6 +177,14 @@ static int __init ixp4xx_wdt_init(void)
{
int ret;
+ /*
+ * FIXME: we bail out on device tree boot but this really needs
+ * to be fixed in a nicer way: this registers the MDIO bus before
+ * even matching the driver infrastructure, we should only probe
+ * detected hardware.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
pr_err("Rev. A0 IXP42x CPU detected - watchdog disabled\n");
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index ec4d99a830ba..d1bc7cbd4f2b 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -163,12 +163,12 @@ MODULE_DEVICE_TABLE(of, jz4740_wdt_of_matches);
static int jz4740_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct jz4740_wdt_drvdata *drvdata;
struct watchdog_device *jz4740_wdt;
- struct resource *res;
int ret;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(struct jz4740_wdt_drvdata),
+ drvdata = devm_kzalloc(dev, sizeof(struct jz4740_wdt_drvdata),
GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
@@ -182,27 +182,24 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
jz4740_wdt->timeout = heartbeat;
jz4740_wdt->min_timeout = 1;
jz4740_wdt->max_timeout = MAX_HEARTBEAT;
- jz4740_wdt->parent = &pdev->dev;
+ jz4740_wdt->parent = dev;
watchdog_set_nowayout(jz4740_wdt, nowayout);
watchdog_set_drvdata(jz4740_wdt, drvdata);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drvdata->base = devm_ioremap_resource(&pdev->dev, res);
+ drvdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- drvdata->rtc_clk = devm_clk_get(&pdev->dev, "rtc");
+ drvdata->rtc_clk = devm_clk_get(dev, "rtc");
if (IS_ERR(drvdata->rtc_clk)) {
- dev_err(&pdev->dev, "cannot find RTC clock\n");
+ dev_err(dev, "cannot find RTC clock\n");
return PTR_ERR(drvdata->rtc_clk);
}
- ret = devm_watchdog_register_device(&pdev->dev, &drvdata->wdt);
+ ret = devm_watchdog_register_device(dev, &drvdata->wdt);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, drvdata);
-
return 0;
}
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index e268add43010..543eb0f27a42 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -467,7 +467,7 @@ static int kempld_wdt_probe(struct platform_device *pdev)
KEMPLD_WDT_CFG_GLOBAL_LOCK)) {
if (!nowayout)
dev_warn(dev,
- "Forcing nowayout - watchdog lock enabled!\n");
+ "Forcing nowayout - watchdog lock enabled!\n");
nowayout = true;
}
@@ -492,7 +492,9 @@ static int kempld_wdt_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, wdt_data);
- ret = watchdog_register_device(wdd);
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
@@ -501,26 +503,6 @@ static int kempld_wdt_probe(struct platform_device *pdev)
return 0;
}
-static void kempld_wdt_shutdown(struct platform_device *pdev)
-{
- struct kempld_wdt_data *wdt_data = platform_get_drvdata(pdev);
-
- kempld_wdt_stop(&wdt_data->wdd);
-}
-
-static int kempld_wdt_remove(struct platform_device *pdev)
-{
- struct kempld_wdt_data *wdt_data = platform_get_drvdata(pdev);
- struct watchdog_device *wdd = &wdt_data->wdd;
- int ret = 0;
-
- if (!nowayout)
- ret = kempld_wdt_stop(wdd);
- watchdog_unregister_device(wdd);
-
- return ret;
-}
-
#ifdef CONFIG_PM
/* Disable watchdog if it is active during suspend */
static int kempld_wdt_suspend(struct platform_device *pdev,
@@ -567,8 +549,6 @@ static struct platform_driver kempld_wdt_driver = {
.name = "kempld-wdt",
},
.probe = kempld_wdt_probe,
- .remove = kempld_wdt_remove,
- .shutdown = kempld_wdt_shutdown,
.suspend = kempld_wdt_suspend,
.resume = kempld_wdt_resume,
};
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 1e41818a44bc..0565cf30017b 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -142,7 +142,7 @@ static int ks8695_wdt_open(struct inode *inode, struct file *file)
return -EBUSY;
ks8695_wdt_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/*
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 83da84d6074b..4caf02ba5d49 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -203,7 +203,6 @@ static int ltq_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ltq_wdt_priv *priv;
struct watchdog_device *wdt;
- struct resource *res;
struct clk *clk;
const struct ltq_wdt_hw *ltq_wdt_hw;
int ret;
@@ -213,8 +212,7 @@ static int ltq_wdt_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->membase = devm_ioremap_resource(dev, res);
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->membase))
return PTR_ERR(priv->membase);
diff --git a/drivers/watchdog/loongson1_wdt.c b/drivers/watchdog/loongson1_wdt.c
index 3aee50c64a36..d8075e2affa7 100644
--- a/drivers/watchdog/loongson1_wdt.c
+++ b/drivers/watchdog/loongson1_wdt.c
@@ -83,38 +83,44 @@ static const struct watchdog_ops ls1x_wdt_ops = {
.set_timeout = ls1x_wdt_set_timeout,
};
+static void ls1x_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int ls1x_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct ls1x_wdt_drvdata *drvdata;
struct watchdog_device *ls1x_wdt;
unsigned long clk_rate;
- struct resource *res;
int err;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drvdata->base = devm_ioremap_resource(&pdev->dev, res);
+ drvdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- drvdata->clk = devm_clk_get(&pdev->dev, pdev->name);
+ drvdata->clk = devm_clk_get(dev, pdev->name);
if (IS_ERR(drvdata->clk))
return PTR_ERR(drvdata->clk);
err = clk_prepare_enable(drvdata->clk);
if (err) {
- dev_err(&pdev->dev, "clk enable failed\n");
+ dev_err(dev, "clk enable failed\n");
return err;
}
+ err = devm_add_action_or_reset(dev, ls1x_clk_disable_unprepare,
+ drvdata->clk);
+ if (err)
+ return err;
clk_rate = clk_get_rate(drvdata->clk);
- if (!clk_rate) {
- err = -EINVAL;
- goto err0;
- }
+ if (!clk_rate)
+ return -EINVAL;
drvdata->clk_rate = clk_rate;
ls1x_wdt = &drvdata->wdt;
@@ -123,41 +129,27 @@ static int ls1x_wdt_probe(struct platform_device *pdev)
ls1x_wdt->timeout = DEFAULT_HEARTBEAT;
ls1x_wdt->min_timeout = 1;
ls1x_wdt->max_hw_heartbeat_ms = U32_MAX / clk_rate * 1000;
- ls1x_wdt->parent = &pdev->dev;
+ ls1x_wdt->parent = dev;
- watchdog_init_timeout(ls1x_wdt, heartbeat, &pdev->dev);
+ watchdog_init_timeout(ls1x_wdt, heartbeat, dev);
watchdog_set_nowayout(ls1x_wdt, nowayout);
watchdog_set_drvdata(ls1x_wdt, drvdata);
- err = watchdog_register_device(&drvdata->wdt);
+ err = devm_watchdog_register_device(dev, &drvdata->wdt);
if (err) {
- dev_err(&pdev->dev, "failed to register watchdog device\n");
- goto err0;
+ dev_err(dev, "failed to register watchdog device\n");
+ return err;
}
platform_set_drvdata(pdev, drvdata);
- dev_info(&pdev->dev, "Loongson1 Watchdog driver registered\n");
-
- return 0;
-err0:
- clk_disable_unprepare(drvdata->clk);
- return err;
-}
-
-static int ls1x_wdt_remove(struct platform_device *pdev)
-{
- struct ls1x_wdt_drvdata *drvdata = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&drvdata->wdt);
- clk_disable_unprepare(drvdata->clk);
+ dev_info(dev, "Loongson1 Watchdog driver registered\n");
return 0;
}
static struct platform_driver ls1x_wdt_driver = {
.probe = ls1x_wdt_probe,
- .remove = ls1x_wdt_remove,
.driver = {
.name = "ls1x-wdt",
},
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
index 331cadb459ac..0e82abd71d35 100644
--- a/drivers/watchdog/lpc18xx_wdt.c
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -200,19 +200,22 @@ static const struct watchdog_ops lpc18xx_wdt_ops = {
.restart = lpc18xx_wdt_restart,
};
+static void lpc18xx_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int lpc18xx_wdt_probe(struct platform_device *pdev)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
lpc18xx_wdt = devm_kzalloc(dev, sizeof(*lpc18xx_wdt), GFP_KERNEL);
if (!lpc18xx_wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpc18xx_wdt->base = devm_ioremap_resource(dev, res);
+ lpc18xx_wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc18xx_wdt->base))
return PTR_ERR(lpc18xx_wdt->base);
@@ -233,19 +236,26 @@ static int lpc18xx_wdt_probe(struct platform_device *pdev)
dev_err(dev, "could not prepare or enable sys clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, lpc18xx_clk_disable_unprepare,
+ lpc18xx_wdt->reg_clk);
+ if (ret)
+ return ret;
ret = clk_prepare_enable(lpc18xx_wdt->wdt_clk);
if (ret) {
dev_err(dev, "could not prepare or enable wdt clock\n");
- goto disable_reg_clk;
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, lpc18xx_clk_disable_unprepare,
+ lpc18xx_wdt->wdt_clk);
+ if (ret)
+ return ret;
/* We use the clock rate to calculate timeouts */
lpc18xx_wdt->clk_rate = clk_get_rate(lpc18xx_wdt->wdt_clk);
if (lpc18xx_wdt->clk_rate == 0) {
dev_err(dev, "failed to get clock rate\n");
- ret = -EINVAL;
- goto disable_wdt_clk;
+ return -EINVAL;
}
lpc18xx_wdt->wdt_dev.info = &lpc18xx_wdt_info;
@@ -276,24 +286,8 @@ static int lpc18xx_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lpc18xx_wdt);
- ret = watchdog_register_device(&lpc18xx_wdt->wdt_dev);
- if (ret)
- goto disable_wdt_clk;
-
- return 0;
-
-disable_wdt_clk:
- clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
-disable_reg_clk:
- clk_disable_unprepare(lpc18xx_wdt->reg_clk);
- return ret;
-}
-
-static void lpc18xx_wdt_shutdown(struct platform_device *pdev)
-{
- struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
-
- lpc18xx_wdt_stop(&lpc18xx_wdt->wdt_dev);
+ watchdog_stop_on_reboot(&lpc18xx_wdt->wdt_dev);
+ return devm_watchdog_register_device(dev, &lpc18xx_wdt->wdt_dev);
}
static int lpc18xx_wdt_remove(struct platform_device *pdev)
@@ -303,10 +297,6 @@ static int lpc18xx_wdt_remove(struct platform_device *pdev)
dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n");
del_timer(&lpc18xx_wdt->timer);
- watchdog_unregister_device(&lpc18xx_wdt->wdt_dev);
- clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
- clk_disable_unprepare(lpc18xx_wdt->reg_clk);
-
return 0;
}
@@ -323,7 +313,6 @@ static struct platform_driver lpc18xx_wdt_driver = {
},
.probe = lpc18xx_wdt_probe,
.remove = lpc18xx_wdt_remove,
- .shutdown = lpc18xx_wdt_shutdown,
};
module_platform_driver(lpc18xx_wdt_driver);
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index da6fa2b68074..752d03620f0a 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -85,7 +85,7 @@ static int m54xx_wdt_open(struct inode *inode, struct file *file)
clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
wdt_enable();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static ssize_t m54xx_wdt_write(struct file *file, const char *data,
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 88d823d87a4b..c0c9e948adbc 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -177,6 +177,7 @@ static inline void zf_set_timer(unsigned short new, unsigned char n)
switch (n) {
case WD1:
zf_writew(COUNTER_1, new);
+ /* fall through */
case WD2:
zf_writeb(COUNTER_2, new > 0xff ? 0xff : new);
default:
@@ -318,7 +319,7 @@ static long zf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_KEEPALIVE:
- zf_ping(0);
+ zf_ping(NULL);
break;
default:
return -ENOTTY;
@@ -333,7 +334,7 @@ static int zf_open(struct inode *inode, struct file *file)
if (nowayout)
__module_get(THIS_MODULE);
zf_timer_on();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int zf_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index bf6a068245ba..3a899628a834 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -187,9 +187,7 @@ static void max63xx_mmap_set(struct max63xx_wdt *wdt, u8 set)
static int max63xx_mmap_init(struct platform_device *p, struct max63xx_wdt *wdt)
{
- struct resource *mem = platform_get_resource(p, IORESOURCE_MEM, 0);
-
- wdt->base = devm_ioremap_resource(&p->dev, mem);
+ wdt->base = devm_platform_ioremap_resource(p, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
@@ -202,11 +200,12 @@ static int max63xx_mmap_init(struct platform_device *p, struct max63xx_wdt *wdt)
static int max63xx_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct max63xx_wdt *wdt;
struct max63xx_timeout *table;
int err;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -217,7 +216,7 @@ static int max63xx_wdt_probe(struct platform_device *pdev)
wdt->timeout = max63xx_select_timeout(table, heartbeat);
if (!wdt->timeout) {
- dev_err(&pdev->dev, "unable to satisfy %ds heartbeat request\n",
+ dev_err(dev, "unable to satisfy %ds heartbeat request\n",
heartbeat);
return -EINVAL;
}
@@ -229,30 +228,22 @@ static int max63xx_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &wdt->wdd);
watchdog_set_drvdata(&wdt->wdd, wdt);
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->wdd.timeout = wdt->timeout->twd;
wdt->wdd.info = &max63xx_wdt_info;
wdt->wdd.ops = &max63xx_wdt_ops;
watchdog_set_nowayout(&wdt->wdd, nowayout);
- err = watchdog_register_device(&wdt->wdd);
+ err = devm_watchdog_register_device(dev, &wdt->wdd);
if (err)
return err;
- dev_info(&pdev->dev, "using %ds heartbeat with %ds initial delay\n",
+ dev_info(dev, "using %ds heartbeat with %ds initial delay\n",
wdt->timeout->twd, wdt->timeout->tdelay);
return 0;
}
-static int max63xx_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(wdd);
- return 0;
-}
-
static const struct platform_device_id max63xx_id_table[] = {
{ "max6369_wdt", (kernel_ulong_t)max6369_table, },
{ "max6370_wdt", (kernel_ulong_t)max6369_table, },
@@ -266,7 +257,6 @@ MODULE_DEVICE_TABLE(platform, max63xx_id_table);
static struct platform_driver max63xx_wdt_driver = {
.probe = max63xx_wdt_probe,
- .remove = max63xx_wdt_remove,
.id_table = max63xx_id_table,
.driver = {
.name = "max63xx_wdt",
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index 70c9cd3ba938..3ca6b9337932 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -112,17 +112,18 @@ static const struct watchdog_ops max77620_wdt_ops = {
static int max77620_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct max77620_wdt *wdt;
struct watchdog_device *wdt_dev;
unsigned int regval;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt->dev = &pdev->dev;
- wdt->rmap = dev_get_regmap(pdev->dev.parent, NULL);
+ wdt->dev = dev;
+ wdt->rmap = dev_get_regmap(dev->parent, NULL);
if (!wdt->rmap) {
dev_err(wdt->dev, "Failed to get parent regmap\n");
return -ENODEV;
@@ -183,25 +184,16 @@ static int max77620_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdt_dev, nowayout);
watchdog_set_drvdata(wdt_dev, wdt);
- ret = watchdog_register_device(wdt_dev);
+ watchdog_stop_on_unregister(wdt_dev);
+ ret = devm_watchdog_register_device(dev, wdt_dev);
if (ret < 0) {
- dev_err(&pdev->dev, "watchdog registration failed: %d\n", ret);
+ dev_err(dev, "watchdog registration failed: %d\n", ret);
return ret;
}
return 0;
}
-static int max77620_wdt_remove(struct platform_device *pdev)
-{
- struct max77620_wdt *wdt = platform_get_drvdata(pdev);
-
- max77620_wdt_stop(&wdt->wdt_dev);
- watchdog_unregister_device(&wdt->wdt_dev);
-
- return 0;
-}
-
static const struct platform_device_id max77620_wdt_devtype[] = {
{ .name = "max77620-watchdog", },
{ },
@@ -213,7 +205,6 @@ static struct platform_driver max77620_wdt_driver = {
.name = "max77620-watchdog",
},
.probe = max77620_wdt_probe,
- .remove = max77620_wdt_remove,
.id_table = max77620_wdt_devtype,
};
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index 6db69883ece6..e9ca4e0e25dc 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -127,19 +127,20 @@ static struct watchdog_device a21_wdt = {
static int a21_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct a21_wdt_drv *drv;
unsigned int reset = 0;
int num_gpios;
int ret;
int i;
- drv = devm_kzalloc(&pdev->dev, sizeof(struct a21_wdt_drv), GFP_KERNEL);
+ drv = devm_kzalloc(dev, sizeof(struct a21_wdt_drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
- num_gpios = gpiod_count(&pdev->dev, NULL);
+ num_gpios = gpiod_count(dev, NULL);
if (num_gpios != NUM_GPIOS) {
- dev_err(&pdev->dev, "gpios DT property wrong, got %d want %d",
+ dev_err(dev, "gpios DT property wrong, got %d want %d",
num_gpios, NUM_GPIOS);
return -ENODEV;
}
@@ -152,12 +153,9 @@ static int a21_wdt_probe(struct platform_device *pdev)
gflags = GPIOD_ASIS;
else
gflags = GPIOD_IN;
- drv->gpios[i] = devm_gpiod_get_index(&pdev->dev, NULL, i,
- gflags);
- if (IS_ERR(drv->gpios[i])) {
- ret = PTR_ERR(drv->gpios[i]);
- return ret;
- }
+ drv->gpios[i] = devm_gpiod_get_index(dev, NULL, i, gflags);
+ if (IS_ERR(drv->gpios[i]))
+ return PTR_ERR(drv->gpios[i]);
gpiod_set_consumer_name(drv->gpios[i], "MEN A21 Watchdog");
@@ -173,10 +171,10 @@ static int a21_wdt_probe(struct platform_device *pdev)
}
}
- watchdog_init_timeout(&a21_wdt, 30, &pdev->dev);
+ watchdog_init_timeout(&a21_wdt, 30, dev);
watchdog_set_nowayout(&a21_wdt, nowayout);
watchdog_set_drvdata(&a21_wdt, drv);
- a21_wdt.parent = &pdev->dev;
+ a21_wdt.parent = dev;
reset = a21_wdt_get_bootstatus(drv);
if (reset == 2)
@@ -189,15 +187,15 @@ static int a21_wdt_probe(struct platform_device *pdev)
a21_wdt.bootstatus |= WDIOF_EXTERN2;
drv->wdt = a21_wdt;
- dev_set_drvdata(&pdev->dev, drv);
+ dev_set_drvdata(dev, drv);
- ret = devm_watchdog_register_device(&pdev->dev, &a21_wdt);
+ ret = devm_watchdog_register_device(dev, &a21_wdt);
if (ret) {
- dev_err(&pdev->dev, "Cannot register watchdog device\n");
+ dev_err(dev, "Cannot register watchdog device\n");
return ret;
}
- dev_info(&pdev->dev, "MEN A21 watchdog timer driver enabled\n");
+ dev_info(dev, "MEN A21 watchdog timer driver enabled\n");
return 0;
}
diff --git a/drivers/watchdog/menf21bmc_wdt.c b/drivers/watchdog/menf21bmc_wdt.c
index 3aefddebb386..b1dbff553cdc 100644
--- a/drivers/watchdog/menf21bmc_wdt.c
+++ b/drivers/watchdog/menf21bmc_wdt.c
@@ -117,12 +117,12 @@ static const struct watchdog_ops menf21bmc_wdt_ops = {
static int menf21bmc_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret, bmc_timeout;
struct menf21bmc_wdt *drv_data;
- struct i2c_client *i2c_client = to_i2c_client(pdev->dev.parent);
+ struct i2c_client *i2c_client = to_i2c_client(dev->parent);
- drv_data = devm_kzalloc(&pdev->dev,
- sizeof(struct menf21bmc_wdt), GFP_KERNEL);
+ drv_data = devm_kzalloc(dev, sizeof(struct menf21bmc_wdt), GFP_KERNEL);
if (!drv_data)
return -ENOMEM;
@@ -130,7 +130,7 @@ static int menf21bmc_wdt_probe(struct platform_device *pdev)
drv_data->wdt.info = &menf21bmc_wdt_info;
drv_data->wdt.min_timeout = BMC_WD_TIMEOUT_MIN;
drv_data->wdt.max_timeout = BMC_WD_TIMEOUT_MAX;
- drv_data->wdt.parent = &pdev->dev;
+ drv_data->wdt.parent = dev;
drv_data->i2c_client = i2c_client;
/*
@@ -140,40 +140,28 @@ static int menf21bmc_wdt_probe(struct platform_device *pdev)
bmc_timeout = i2c_smbus_read_word_data(drv_data->i2c_client,
BMC_CMD_WD_TIME);
if (bmc_timeout < 0) {
- dev_err(&pdev->dev, "failed to get current WDT timeout\n");
+ dev_err(dev, "failed to get current WDT timeout\n");
return bmc_timeout;
}
- watchdog_init_timeout(&drv_data->wdt, bmc_timeout / 10, &pdev->dev);
+ watchdog_init_timeout(&drv_data->wdt, bmc_timeout / 10, dev);
watchdog_set_nowayout(&drv_data->wdt, nowayout);
watchdog_set_drvdata(&drv_data->wdt, drv_data);
platform_set_drvdata(pdev, drv_data);
ret = menf21bmc_wdt_set_bootstatus(drv_data);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to set Watchdog bootstatus\n");
+ dev_err(dev, "failed to set Watchdog bootstatus\n");
return ret;
}
- ret = watchdog_register_device(&drv_data->wdt);
+ ret = devm_watchdog_register_device(dev, &drv_data->wdt);
if (ret) {
- dev_err(&pdev->dev, "failed to register Watchdog device\n");
+ dev_err(dev, "failed to register Watchdog device\n");
return ret;
}
- dev_info(&pdev->dev, "MEN 14F021P00 BMC Watchdog device enabled\n");
-
- return 0;
-}
-
-static int menf21bmc_wdt_remove(struct platform_device *pdev)
-{
- struct menf21bmc_wdt *drv_data = platform_get_drvdata(pdev);
-
- dev_warn(&pdev->dev,
- "Unregister MEN 14F021P00 BMC Watchdog device, board may reset\n");
-
- watchdog_unregister_device(&drv_data->wdt);
+ dev_info(dev, "MEN 14F021P00 BMC Watchdog device enabled\n");
return 0;
}
@@ -191,7 +179,6 @@ static struct platform_driver menf21bmc_wdt = {
.name = DEVNAME,
},
.probe = menf21bmc_wdt_probe,
- .remove = menf21bmc_wdt_remove,
.shutdown = menf21bmc_wdt_shutdown,
};
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 69adeab3fde7..d17c1a6ed723 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -136,32 +136,40 @@ static const struct of_device_id meson_gxbb_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, meson_gxbb_wdt_dt_ids);
+static void meson_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int meson_gxbb_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct meson_gxbb_wdt *data;
- struct resource *res;
int ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ data->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->reg_base))
return PTR_ERR(data->reg_base);
- data->clk = devm_clk_get(&pdev->dev, NULL);
+ data->clk = devm_clk_get(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
ret = clk_prepare_enable(data->clk);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, meson_clk_disable_unprepare,
+ data->clk);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, data);
- data->wdt_dev.parent = &pdev->dev;
+ data->wdt_dev.parent = dev;
data->wdt_dev.info = &meson_gxbb_wdt_info;
data->wdt_dev.ops = &meson_gxbb_wdt_ops;
data->wdt_dev.max_hw_heartbeat_ms = GXBB_WDT_TCNT_SETUP_MASK;
@@ -178,37 +186,12 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
- ret = watchdog_register_device(&data->wdt_dev);
- if (ret) {
- clk_disable_unprepare(data->clk);
- return ret;
- }
-
- return 0;
-}
-
-static int meson_gxbb_wdt_remove(struct platform_device *pdev)
-{
- struct meson_gxbb_wdt *data = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&data->wdt_dev);
-
- clk_disable_unprepare(data->clk);
-
- return 0;
-}
-
-static void meson_gxbb_wdt_shutdown(struct platform_device *pdev)
-{
- struct meson_gxbb_wdt *data = platform_get_drvdata(pdev);
-
- meson_gxbb_wdt_stop(&data->wdt_dev);
+ watchdog_stop_on_reboot(&data->wdt_dev);
+ return devm_watchdog_register_device(dev, &data->wdt_dev);
}
static struct platform_driver meson_gxbb_wdt_driver = {
.probe = meson_gxbb_wdt_probe,
- .remove = meson_gxbb_wdt_remove,
- .shutdown = meson_gxbb_wdt_shutdown,
.driver = {
.name = "meson-gxbb-wdt",
.pm = &meson_gxbb_wdt_pm_ops,
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index cd0275a6cdac..01889cef81e1 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -164,28 +164,27 @@ MODULE_DEVICE_TABLE(of, meson_wdt_dt_ids);
static int meson_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct device *dev = &pdev->dev;
struct meson_wdt_dev *meson_wdt;
const struct of_device_id *of_id;
int err;
- meson_wdt = devm_kzalloc(&pdev->dev, sizeof(*meson_wdt), GFP_KERNEL);
+ meson_wdt = devm_kzalloc(dev, sizeof(*meson_wdt), GFP_KERNEL);
if (!meson_wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- meson_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ meson_wdt->wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson_wdt->wdt_base))
return PTR_ERR(meson_wdt->wdt_base);
- of_id = of_match_device(meson_wdt_dt_ids, &pdev->dev);
+ of_id = of_match_device(meson_wdt_dt_ids, dev);
if (!of_id) {
- dev_err(&pdev->dev, "Unable to initialize WDT data\n");
+ dev_err(dev, "Unable to initialize WDT data\n");
return -ENODEV;
}
meson_wdt->data = of_id->data;
- meson_wdt->wdt_dev.parent = &pdev->dev;
+ meson_wdt->wdt_dev.parent = dev;
meson_wdt->wdt_dev.info = &meson_wdt_info;
meson_wdt->wdt_dev.ops = &meson_wdt_ops;
meson_wdt->wdt_dev.max_timeout =
@@ -197,18 +196,18 @@ static int meson_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&meson_wdt->wdt_dev, meson_wdt);
- watchdog_init_timeout(&meson_wdt->wdt_dev, timeout, &pdev->dev);
+ watchdog_init_timeout(&meson_wdt->wdt_dev, timeout, dev);
watchdog_set_nowayout(&meson_wdt->wdt_dev, nowayout);
watchdog_set_restart_priority(&meson_wdt->wdt_dev, 128);
meson_wdt_stop(&meson_wdt->wdt_dev);
watchdog_stop_on_reboot(&meson_wdt->wdt_dev);
- err = devm_watchdog_register_device(&pdev->dev, &meson_wdt->wdt_dev);
+ err = devm_watchdog_register_device(dev, &meson_wdt->wdt_dev);
if (err)
return err;
- dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
+ dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
meson_wdt->wdt_dev.timeout, nowayout);
return 0;
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index 3cc07447c655..ece56db0a379 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -150,7 +150,7 @@ static int mixcomwd_open(struct inode *inode, struct file *file)
mixcomwd_timer_alive = 0;
}
}
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int mixcomwd_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/mlx_wdt.c b/drivers/watchdog/mlx_wdt.c
index 70c2cbf9c993..03b9ac4b99af 100644
--- a/drivers/watchdog/mlx_wdt.c
+++ b/drivers/watchdog/mlx_wdt.c
@@ -233,20 +233,21 @@ static int mlxreg_wdt_init_timeout(struct mlxreg_wdt *wdt,
static int mlxreg_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct mlxreg_core_platform_data *pdata;
struct mlxreg_wdt *wdt;
int rc;
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "Failed to get platform data.\n");
+ dev_err(dev, "Failed to get platform data.\n");
return -EINVAL;
}
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->regmap = pdata->regmap;
mlxreg_wdt_config(wdt, pdata);
@@ -266,12 +267,11 @@ static int mlxreg_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
mlxreg_wdt_check_card_reset(wdt);
- rc = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
+ rc = devm_watchdog_register_device(dev, &wdt->wdd);
register_error:
if (rc)
- dev_err(&pdev->dev,
- "Cannot register watchdog device (err=%d)\n", rc);
+ dev_err(dev, "Cannot register watchdog device (err=%d)\n", rc);
return rc;
}
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 430c3ab84c07..6340a1f5f471 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -91,8 +91,6 @@ static int moxart_wdt_probe(struct platform_device *pdev)
{
struct moxart_wdt_dev *moxart_wdt;
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- struct resource *res;
struct clk *clk;
int err;
unsigned int max_timeout;
@@ -104,12 +102,11 @@ static int moxart_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, moxart_wdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- moxart_wdt->base = devm_ioremap_resource(dev, res);
+ moxart_wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(moxart_wdt->base))
return PTR_ERR(moxart_wdt->base);
- clk = of_clk_get(node, 0);
+ clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
pr_err("%s: of_clk_get failed\n", __func__);
return PTR_ERR(clk);
@@ -136,7 +133,8 @@ static int moxart_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&moxart_wdt->dev, moxart_wdt);
- err = watchdog_register_device(&moxart_wdt->dev);
+ watchdog_stop_on_unregister(&moxart_wdt->dev);
+ err = devm_watchdog_register_device(dev, &moxart_wdt->dev);
if (err)
return err;
@@ -146,15 +144,6 @@ static int moxart_wdt_probe(struct platform_device *pdev)
return 0;
}
-static int moxart_wdt_remove(struct platform_device *pdev)
-{
- struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev);
-
- moxart_wdt_stop(&moxart_wdt->dev);
-
- return 0;
-}
-
static const struct of_device_id moxart_watchdog_match[] = {
{ .compatible = "moxa,moxart-watchdog" },
{ },
@@ -163,7 +152,6 @@ MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
static struct platform_driver moxart_wdt_driver = {
.probe = moxart_wdt_probe,
- .remove = moxart_wdt_remove,
.driver = {
.name = "moxart-watchdog",
.of_match_table = moxart_watchdog_match,
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 069072e6747d..9b6d6a5a27ad 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -149,8 +149,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
if (!ddata)
return -ENOMEM;
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- ddata->base = devm_ioremap_resource(dev, res);
+ ddata->base = devm_platform_ioremap_resource(ofdev, 0);
if (IS_ERR(ddata->base))
return PTR_ERR(ddata->base);
@@ -205,9 +204,10 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
if (ddata->wdd.timeout < ddata->wdd.min_timeout)
ddata->wdd.timeout = ddata->wdd.min_timeout;
- ret = watchdog_register_device(&ddata->wdd);
+ ret = devm_watchdog_register_device(dev, &ddata->wdd);
if (ret) {
- dev_err(dev, "cannot register watchdog device (err=%d)\n", ret);
+ dev_err(dev, "cannot register watchdog device (err=%d)\n",
+ ret);
return ret;
}
@@ -219,17 +219,6 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
return 0;
}
-static int mpc8xxx_wdt_remove(struct platform_device *ofdev)
-{
- struct mpc8xxx_wdt_ddata *ddata = platform_get_drvdata(ofdev);
-
- dev_crit(&ofdev->dev, "Watchdog removed, expect the %s soon!\n",
- reset ? "reset" : "machine check exception");
- watchdog_unregister_device(&ddata->wdd);
-
- return 0;
-}
-
static const struct of_device_id mpc8xxx_wdt_match[] = {
{
.compatible = "mpc83xx_wdt",
@@ -260,7 +249,6 @@ MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match);
static struct platform_driver mpc8xxx_wdt_driver = {
.probe = mpc8xxx_wdt_probe,
- .remove = mpc8xxx_wdt_remove,
.driver = {
.name = "mpc8xxx_wdt",
.of_match_table = mpc8xxx_wdt_match,
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 81208cd3f4ec..cbb3c0dde136 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -133,21 +133,19 @@ static struct watchdog_device mt7621_wdt_dev = {
static int mt7621_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt7621_wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ struct device *dev = &pdev->dev;
+ mt7621_wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mt7621_wdt_base))
return PTR_ERR(mt7621_wdt_base);
- mt7621_wdt_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ mt7621_wdt_reset = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(mt7621_wdt_reset))
reset_control_deassert(mt7621_wdt_reset);
mt7621_wdt_dev.bootstatus = mt7621_wdt_bootcause();
watchdog_init_timeout(&mt7621_wdt_dev, mt7621_wdt_dev.max_timeout,
- &pdev->dev);
+ dev);
watchdog_set_nowayout(&mt7621_wdt_dev, nowayout);
if (mt7621_wdt_is_running(&mt7621_wdt_dev)) {
/*
@@ -164,7 +162,7 @@ static int mt7621_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &mt7621_wdt_dev.status);
}
- return devm_watchdog_register_device(&pdev->dev, &mt7621_wdt_dev);
+ return devm_watchdog_register_device(dev, &mt7621_wdt_dev);
}
static void mt7621_wdt_shutdown(struct platform_device *pdev)
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 4baf64f21aa1..9c3d0033260d 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -153,18 +153,17 @@ static const struct watchdog_ops mtk_wdt_ops = {
static int mtk_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct mtk_wdt_dev *mtk_wdt;
- struct resource *res;
int err;
- mtk_wdt = devm_kzalloc(&pdev->dev, sizeof(*mtk_wdt), GFP_KERNEL);
+ mtk_wdt = devm_kzalloc(dev, sizeof(*mtk_wdt), GFP_KERNEL);
if (!mtk_wdt)
return -ENOMEM;
platform_set_drvdata(pdev, mtk_wdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mtk_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ mtk_wdt->wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mtk_wdt->wdt_base))
return PTR_ERR(mtk_wdt->wdt_base);
@@ -173,9 +172,9 @@ static int mtk_wdt_probe(struct platform_device *pdev)
mtk_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
mtk_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
mtk_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
- mtk_wdt->wdt_dev.parent = &pdev->dev;
+ mtk_wdt->wdt_dev.parent = dev;
- watchdog_init_timeout(&mtk_wdt->wdt_dev, timeout, &pdev->dev);
+ watchdog_init_timeout(&mtk_wdt->wdt_dev, timeout, dev);
watchdog_set_nowayout(&mtk_wdt->wdt_dev, nowayout);
watchdog_set_restart_priority(&mtk_wdt->wdt_dev, 128);
@@ -183,29 +182,13 @@ static int mtk_wdt_probe(struct platform_device *pdev)
mtk_wdt_stop(&mtk_wdt->wdt_dev);
- err = watchdog_register_device(&mtk_wdt->wdt_dev);
+ watchdog_stop_on_reboot(&mtk_wdt->wdt_dev);
+ err = devm_watchdog_register_device(dev, &mtk_wdt->wdt_dev);
if (unlikely(err))
return err;
- dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
- mtk_wdt->wdt_dev.timeout, nowayout);
-
- return 0;
-}
-
-static void mtk_wdt_shutdown(struct platform_device *pdev)
-{
- struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
-
- if (watchdog_active(&mtk_wdt->wdt_dev))
- mtk_wdt_stop(&mtk_wdt->wdt_dev);
-}
-
-static int mtk_wdt_remove(struct platform_device *pdev)
-{
- struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&mtk_wdt->wdt_dev);
+ dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
+ mtk_wdt->wdt_dev.timeout, nowayout);
return 0;
}
@@ -247,8 +230,6 @@ static const struct dev_pm_ops mtk_wdt_pm_ops = {
static struct platform_driver mtk_wdt_driver = {
.probe = mtk_wdt_probe,
- .remove = mtk_wdt_remove,
- .shutdown = mtk_wdt_shutdown,
.driver = {
.name = DRV_NAME,
.pm = &mtk_wdt_pm_ops,
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index e028e0a2eca0..25a92857b217 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -118,7 +118,7 @@ static int mtx1_wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &mtx1_wdt_device.inuse))
return -EBUSY;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
index 315275d7bab6..c785f4f0a196 100644
--- a/drivers/watchdog/mv64x60_wdt.c
+++ b/drivers/watchdog/mv64x60_wdt.c
@@ -133,7 +133,7 @@ static int mv64x60_wdt_open(struct inode *inode, struct file *file)
mv64x60_wdt_handler_enable();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int mv64x60_wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/ni903x_wdt.c b/drivers/watchdog/ni903x_wdt.c
index dc67742e9018..fbc1df86c6cc 100644
--- a/drivers/watchdog/ni903x_wdt.c
+++ b/drivers/watchdog/ni903x_wdt.c
@@ -217,9 +217,7 @@ static int ni903x_acpi_add(struct acpi_device *device)
wdd->parent = dev;
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
- ret = watchdog_init_timeout(wdd, timeout, dev);
- if (ret)
- dev_err(dev, "unable to set timeout value, using default\n");
+ watchdog_init_timeout(wdd, timeout, dev);
ret = watchdog_register_device(wdd);
if (ret) {
diff --git a/drivers/watchdog/nic7018_wdt.c b/drivers/watchdog/nic7018_wdt.c
index dcd265685837..82843abe38f8 100644
--- a/drivers/watchdog/nic7018_wdt.c
+++ b/drivers/watchdog/nic7018_wdt.c
@@ -211,10 +211,7 @@ static int nic7018_probe(struct platform_device *pdev)
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
-
- ret = watchdog_init_timeout(wdd, timeout, dev);
- if (ret)
- dev_warn(dev, "unable to set timeout value, using default\n");
+ watchdog_init_timeout(wdd, timeout, dev);
/* Unlock WDT register */
outb(UNLOCK, wdt->io_base + WDT_REG_LOCK);
diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c
index 0d4213652ecc..9d6c1689b12c 100644
--- a/drivers/watchdog/npcm_wdt.c
+++ b/drivers/watchdog/npcm_wdt.c
@@ -181,16 +181,14 @@ static int npcm_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct npcm_wdt *wdt;
- struct resource *res;
int irq;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->reg = devm_ioremap_resource(dev, res);
+ wdt->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->reg))
return PTR_ERR(wdt->reg);
@@ -216,8 +214,8 @@ static int npcm_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
- ret = devm_request_irq(dev, irq, npcm_wdt_interrupt, 0,
- "watchdog", wdt);
+ ret = devm_request_irq(dev, irq, npcm_wdt_interrupt, 0, "watchdog",
+ wdt);
if (ret)
return ret;
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index 830bd04ff911..f36eae34e848 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -131,7 +131,7 @@ static int nuc900_wdt_open(struct inode *inode, struct file *file)
nuc900_wdt_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int nuc900_wdt_close(struct inode *inode, struct file *file)
@@ -242,7 +242,6 @@ static struct miscdevice nuc900wdt_miscdev = {
static int nuc900wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
int ret = 0;
nuc900_wdt = devm_kzalloc(&pdev->dev, sizeof(*nuc900_wdt),
@@ -254,8 +253,7 @@ static int nuc900wdt_probe(struct platform_device *pdev)
spin_lock_init(&nuc900_wdt->wdt_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nuc900_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ nuc900_wdt->wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nuc900_wdt->wdt_base))
return PTR_ERR(nuc900_wdt->wdt_base);
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index a0fabf6f92b0..98d4f5371cf4 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -161,7 +161,7 @@ static int nv_tco_open(struct inode *inode, struct file *file)
/* Reload and activate timer */
tco_timer_keepalive();
tco_timer_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int nv_tco_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index d3f7eb046678..03786992b701 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -151,43 +151,46 @@ static u32 xwdt_selftest(struct xwdt_device *xdev)
return XWT_TIMER_FAILED;
}
+static void xwdt_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int xwdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int rc;
u32 pfreq = 0, enable_once = 0;
- struct resource *res;
struct xwdt_device *xdev;
struct watchdog_device *xilinx_wdt_wdd;
- xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ xdev = devm_kzalloc(dev, sizeof(*xdev), GFP_KERNEL);
if (!xdev)
return -ENOMEM;
xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
xilinx_wdt_wdd->info = &xilinx_wdt_ident;
xilinx_wdt_wdd->ops = &xilinx_wdt_ops;
- xilinx_wdt_wdd->parent = &pdev->dev;
+ xilinx_wdt_wdd->parent = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xdev->base = devm_ioremap_resource(&pdev->dev, res);
+ xdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xdev->base))
return PTR_ERR(xdev->base);
- rc = of_property_read_u32(pdev->dev.of_node, "xlnx,wdt-interval",
+ rc = of_property_read_u32(dev->of_node, "xlnx,wdt-interval",
&xdev->wdt_interval);
if (rc)
- dev_warn(&pdev->dev,
- "Parameter \"xlnx,wdt-interval\" not found\n");
+ dev_warn(dev, "Parameter \"xlnx,wdt-interval\" not found\n");
- rc = of_property_read_u32(pdev->dev.of_node, "xlnx,wdt-enable-once",
+ rc = of_property_read_u32(dev->of_node, "xlnx,wdt-enable-once",
&enable_once);
if (rc)
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"Parameter \"xlnx,wdt-enable-once\" not found\n");
watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
- xdev->clk = devm_clk_get(&pdev->dev, NULL);
+ xdev->clk = devm_clk_get(dev, NULL);
if (IS_ERR(xdev->clk)) {
if (PTR_ERR(xdev->clk) != -ENOENT)
return PTR_ERR(xdev->clk);
@@ -198,10 +201,10 @@ static int xwdt_probe(struct platform_device *pdev)
*/
xdev->clk = NULL;
- rc = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ rc = of_property_read_u32(dev->of_node, "clock-frequency",
&pfreq);
if (rc)
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"The watchdog clock freq cannot be obtained\n");
} else {
pfreq = clk_get_rate(xdev->clk);
@@ -220,44 +223,34 @@ static int xwdt_probe(struct platform_device *pdev)
rc = clk_prepare_enable(xdev->clk);
if (rc) {
- dev_err(&pdev->dev, "unable to enable clock\n");
+ dev_err(dev, "unable to enable clock\n");
return rc;
}
+ rc = devm_add_action_or_reset(dev, xwdt_clk_disable_unprepare,
+ xdev->clk);
+ if (rc)
+ return rc;
rc = xwdt_selftest(xdev);
if (rc == XWT_TIMER_FAILED) {
- dev_err(&pdev->dev, "SelfTest routine error\n");
- goto err_clk_disable;
+ dev_err(dev, "SelfTest routine error\n");
+ return rc;
}
- rc = watchdog_register_device(xilinx_wdt_wdd);
+ rc = devm_watchdog_register_device(dev, xilinx_wdt_wdd);
if (rc) {
- dev_err(&pdev->dev, "Cannot register watchdog (err=%d)\n", rc);
- goto err_clk_disable;
+ dev_err(dev, "Cannot register watchdog (err=%d)\n", rc);
+ return rc;
}
clk_disable(xdev->clk);
- dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds\n",
+ dev_info(dev, "Xilinx Watchdog Timer at %p with timeout %ds\n",
xdev->base, xilinx_wdt_wdd->timeout);
platform_set_drvdata(pdev, xdev);
return 0;
-err_clk_disable:
- clk_disable_unprepare(xdev->clk);
-
- return rc;
-}
-
-static int xwdt_remove(struct platform_device *pdev)
-{
- struct xwdt_device *xdev = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&xdev->xilinx_wdt_wdd);
- clk_disable_unprepare(xdev->clk);
-
- return 0;
}
/**
@@ -305,7 +298,6 @@ MODULE_DEVICE_TABLE(of, xwdt_of_match);
static struct platform_driver xwdt_driver = {
.probe = xwdt_probe,
- .remove = xwdt_remove,
.driver = {
.name = WATCHDOG_NAME,
.of_match_table = xwdt_of_match,
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index cbd752f9ac56..d49688d93f6a 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -231,7 +231,6 @@ static const struct watchdog_ops omap_wdt_ops = {
static int omap_wdt_probe(struct platform_device *pdev)
{
struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct resource *res;
struct omap_wdt_dev *wdev;
int ret;
@@ -245,8 +244,7 @@ static int omap_wdt_probe(struct platform_device *pdev)
mutex_init(&wdev->lock);
/* reserve static register mappings */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdev->base = devm_ioremap_resource(&pdev->dev, res);
+ wdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdev->base))
return PTR_ERR(wdev->base);
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 9db3b09f7568..cdb0d174c5e2 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -349,13 +349,6 @@ static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
return readl(dev->reg + dev->data->wdt_counter_offset) / dev->clk_rate;
}
-static int orion_wdt_set_timeout(struct watchdog_device *wdt_dev,
- unsigned int timeout)
-{
- wdt_dev->timeout = timeout;
- return 0;
-}
-
static const struct watchdog_info orion_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "Orion Watchdog",
@@ -366,7 +359,6 @@ static const struct watchdog_ops orion_wdt_ops = {
.start = orion_wdt_start,
.stop = orion_wdt_stop,
.ping = orion_wdt_ping,
- .set_timeout = orion_wdt_set_timeout,
.get_timeleft = orion_wdt_get_timeleft,
};
@@ -502,8 +494,7 @@ static int orion_wdt_get_regs(struct platform_device *pdev,
of_device_is_compatible(node, "marvell,armada-xp-wdt")) {
/* Dedicated RSTOUT register, can be requested. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dev->rstout = devm_ioremap_resource(&pdev->dev, res);
+ dev->rstout = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dev->rstout))
return PTR_ERR(dev->rstout);
@@ -511,8 +502,7 @@ static int orion_wdt_get_regs(struct platform_device *pdev,
of_device_is_compatible(node, "marvell,armada-380-wdt")) {
/* Dedicated RSTOUT register, can be requested. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dev->rstout = devm_ioremap_resource(&pdev->dev, res);
+ dev->rstout = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dev->rstout))
return PTR_ERR(dev->rstout);
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 2ffa39b46970..ca21d6c240a3 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -286,7 +286,7 @@ static int pc87413_open(struct inode *inode, struct file *file)
pr_info("Watchdog enabled. Timeout set to %d minute(s).\n", timeout);
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/**
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index b72ce68eacd3..a3415cf07c98 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -695,7 +695,7 @@ static int pcwd_open(struct inode *inode, struct file *file)
/* Activate */
pcwd_start();
pcwd_keepalive();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int pcwd_close(struct inode *inode, struct file *file)
@@ -734,7 +734,7 @@ static int pcwd_temp_open(struct inode *inode, struct file *file)
if (!pcwd_private.supports_temp)
return -ENODEV;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int pcwd_temp_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 1f78f0908621..5773d2591d3f 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -578,7 +578,7 @@ static int pcipcwd_open(struct inode *inode, struct file *file)
/* Activate */
pcipcwd_start();
pcipcwd_keepalive();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int pcipcwd_release(struct inode *inode, struct file *file)
@@ -620,7 +620,7 @@ static int pcipcwd_temp_open(struct inode *inode, struct file *file)
if (!pcipcwd_private.supports_temp)
return -ENODEV;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int pcipcwd_temp_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 4d02f26156f9..5de6182dae33 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -485,7 +485,7 @@ static int usb_pcwd_open(struct inode *inode, struct file *file)
/* Activate */
usb_pcwd_start(usb_pcwd_device);
usb_pcwd_keepalive(usb_pcwd_device);
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int usb_pcwd_release(struct inode *inode, struct file *file)
@@ -524,7 +524,7 @@ static ssize_t usb_pcwd_temperature_read(struct file *file, char __user *data,
static int usb_pcwd_temperature_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int usb_pcwd_temperature_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c
index c797305f8338..9a3c53e03c60 100644
--- a/drivers/watchdog/pic32-dmt.c
+++ b/drivers/watchdog/pic32-dmt.c
@@ -168,70 +168,61 @@ static struct watchdog_device pic32_dmt_wdd = {
.ops = &pic32_dmt_fops,
};
+static void pic32_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int pic32_dmt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct pic32_dmt *dmt;
- struct resource *mem;
struct watchdog_device *wdd = &pic32_dmt_wdd;
- dmt = devm_kzalloc(&pdev->dev, sizeof(*dmt), GFP_KERNEL);
+ dmt = devm_kzalloc(dev, sizeof(*dmt), GFP_KERNEL);
if (!dmt)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmt->regs = devm_ioremap_resource(&pdev->dev, mem);
+ dmt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmt->regs))
return PTR_ERR(dmt->regs);
- dmt->clk = devm_clk_get(&pdev->dev, NULL);
+ dmt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(dmt->clk)) {
- dev_err(&pdev->dev, "clk not found\n");
+ dev_err(dev, "clk not found\n");
return PTR_ERR(dmt->clk);
}
ret = clk_prepare_enable(dmt->clk);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, pic32_clk_disable_unprepare,
+ dmt->clk);
+ if (ret)
+ return ret;
wdd->timeout = pic32_dmt_get_timeout_secs(dmt);
if (!wdd->timeout) {
- dev_err(&pdev->dev,
- "failed to read watchdog register timeout\n");
- ret = -EINVAL;
- goto out_disable_clk;
+ dev_err(dev, "failed to read watchdog register timeout\n");
+ return -EINVAL;
}
- dev_info(&pdev->dev, "timeout %d\n", wdd->timeout);
+ dev_info(dev, "timeout %d\n", wdd->timeout);
wdd->bootstatus = pic32_dmt_bootstatus(dmt) ? WDIOF_CARDRESET : 0;
watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
watchdog_set_drvdata(wdd, dmt);
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev, "watchdog register failed, err %d\n", ret);
- goto out_disable_clk;
+ dev_err(dev, "watchdog register failed, err %d\n", ret);
+ return ret;
}
platform_set_drvdata(pdev, wdd);
return 0;
-
-out_disable_clk:
- clk_disable_unprepare(dmt->clk);
- return ret;
-}
-
-static int pic32_dmt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
- struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
-
- watchdog_unregister_device(wdd);
- clk_disable_unprepare(dmt->clk);
-
- return 0;
}
static const struct of_device_id pic32_dmt_of_ids[] = {
@@ -242,7 +233,6 @@ MODULE_DEVICE_TABLE(of, pic32_dmt_of_ids);
static struct platform_driver pic32_dmt_driver = {
.probe = pic32_dmt_probe,
- .remove = pic32_dmt_remove,
.driver = {
.name = "pic32-dmt",
.of_match_table = of_match_ptr(pic32_dmt_of_ids),
diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c
index e2761068dc6f..540500940cc0 100644
--- a/drivers/watchdog/pic32-wdt.c
+++ b/drivers/watchdog/pic32-wdt.c
@@ -166,89 +166,77 @@ static const struct of_device_id pic32_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, pic32_wdt_dt_ids);
+static void pic32_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int pic32_wdt_drv_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct watchdog_device *wdd = &pic32_wdd;
struct pic32_wdt *wdt;
- struct resource *mem;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->regs = devm_ioremap_resource(&pdev->dev, mem);
+ wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->regs))
return PTR_ERR(wdt->regs);
- wdt->rst_base = devm_ioremap(&pdev->dev, PIC32_BASE_RESET, 0x10);
+ wdt->rst_base = devm_ioremap(dev, PIC32_BASE_RESET, 0x10);
if (!wdt->rst_base)
return -ENOMEM;
- wdt->clk = devm_clk_get(&pdev->dev, NULL);
+ wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(&pdev->dev, "clk not found\n");
+ dev_err(dev, "clk not found\n");
return PTR_ERR(wdt->clk);
}
ret = clk_prepare_enable(wdt->clk);
if (ret) {
- dev_err(&pdev->dev, "clk enable failed\n");
+ dev_err(dev, "clk enable failed\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, pic32_clk_disable_unprepare,
+ wdt->clk);
+ if (ret)
+ return ret;
if (pic32_wdt_is_win_enabled(wdt)) {
- dev_err(&pdev->dev, "windowed-clear mode is not supported.\n");
- ret = -ENODEV;
- goto out_disable_clk;
+ dev_err(dev, "windowed-clear mode is not supported.\n");
+ return -ENODEV;
}
- wdd->timeout = pic32_wdt_get_timeout_secs(wdt, &pdev->dev);
+ wdd->timeout = pic32_wdt_get_timeout_secs(wdt, dev);
if (!wdd->timeout) {
- dev_err(&pdev->dev,
- "failed to read watchdog register timeout\n");
- ret = -EINVAL;
- goto out_disable_clk;
+ dev_err(dev, "failed to read watchdog register timeout\n");
+ return -EINVAL;
}
- dev_info(&pdev->dev, "timeout %d\n", wdd->timeout);
+ dev_info(dev, "timeout %d\n", wdd->timeout);
wdd->bootstatus = pic32_wdt_bootstatus(wdt) ? WDIOF_CARDRESET : 0;
watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
watchdog_set_drvdata(wdd, wdt);
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev, "watchdog register failed, err %d\n", ret);
- goto out_disable_clk;
+ dev_err(dev, "watchdog register failed, err %d\n", ret);
+ return ret;
}
platform_set_drvdata(pdev, wdd);
return 0;
-
-out_disable_clk:
- clk_disable_unprepare(wdt->clk);
-
- return ret;
-}
-
-static int pic32_wdt_drv_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
- struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
-
- watchdog_unregister_device(wdd);
- clk_disable_unprepare(wdt->clk);
-
- return 0;
}
static struct platform_driver pic32_wdt_driver = {
.probe = pic32_wdt_drv_probe,
- .remove = pic32_wdt_drv_remove,
.driver = {
.name = "pic32-wdt",
.of_match_table = of_match_ptr(pic32_wdt_dt_ids),
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index bb97f5b2f7eb..8938b3fb2b2d 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -118,7 +118,7 @@ static int pikawdt_open(struct inode *inode, struct file *file)
pikawdt_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/*
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 7f10041fcf5b..2d3652004e39 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -132,15 +132,16 @@ static const struct watchdog_ops pm8916_wdt_ops = {
static int pm8916_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct pm8916_wdt *wdt;
struct device *parent;
int err, irq;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- parent = pdev->dev.parent;
+ parent = dev->parent;
/*
* The pm8916-pon-wdt is a child of the pon device, which is a child
@@ -150,20 +151,20 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
*/
wdt->regmap = dev_get_regmap(parent->parent, NULL);
if (!wdt->regmap) {
- dev_err(&pdev->dev, "failed to locate regmap\n");
+ dev_err(dev, "failed to locate regmap\n");
return -ENODEV;
}
err = device_property_read_u32(parent, "reg", &wdt->baseaddr);
if (err) {
- dev_err(&pdev->dev, "failed to get pm8916-pon address\n");
+ dev_err(dev, "failed to get pm8916-pon address\n");
return err;
}
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
- if (devm_request_irq(&pdev->dev, irq, pm8916_wdt_isr, 0,
- "pm8916_wdt", wdt))
+ if (devm_request_irq(dev, irq, pm8916_wdt_isr, 0, "pm8916_wdt",
+ wdt))
irq = 0;
}
@@ -172,23 +173,23 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL,
RESET_TYPE_HARD);
if (err) {
- dev_err(&pdev->dev, "failed configure watchdog\n");
+ dev_err(dev, "failed configure watchdog\n");
return err;
}
wdt->wdev.info = (irq > 0) ? &pm8916_wdt_pt_ident : &pm8916_wdt_ident,
wdt->wdev.ops = &pm8916_wdt_ops,
- wdt->wdev.parent = &pdev->dev;
+ wdt->wdev.parent = dev;
wdt->wdev.min_timeout = PM8916_WDT_MIN_TIMEOUT;
wdt->wdev.max_timeout = PM8916_WDT_MAX_TIMEOUT;
wdt->wdev.timeout = PM8916_WDT_DEFAULT_TIMEOUT;
wdt->wdev.pretimeout = 0;
watchdog_set_drvdata(&wdt->wdev, wdt);
- watchdog_init_timeout(&wdt->wdev, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdev, 0, dev);
pm8916_wdt_configure_timers(&wdt->wdev);
- return devm_watchdog_register_device(&pdev->dev, &wdt->wdev);
+ return devm_watchdog_register_device(dev, &wdt->wdev);
}
static const struct of_device_id pm8916_wdt_id_table[] = {
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 8e261799c84e..d9e03544aeae 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -183,54 +183,50 @@ static struct watchdog_device pnx4008_wdd = {
.max_timeout = MAX_HEARTBEAT,
};
+static void pnx4008_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int pnx4008_wdt_probe(struct platform_device *pdev)
{
- struct resource *r;
+ struct device *dev = &pdev->dev;
int ret = 0;
- watchdog_init_timeout(&pnx4008_wdd, heartbeat, &pdev->dev);
+ watchdog_init_timeout(&pnx4008_wdd, heartbeat, dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt_base = devm_ioremap_resource(&pdev->dev, r);
+ wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt_base))
return PTR_ERR(wdt_base);
- wdt_clk = devm_clk_get(&pdev->dev, NULL);
+ wdt_clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt_clk))
return PTR_ERR(wdt_clk);
ret = clk_prepare_enable(wdt_clk);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, pnx4008_clk_disable_unprepare,
+ wdt_clk);
+ if (ret)
+ return ret;
pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ?
WDIOF_CARDRESET : 0;
- pnx4008_wdd.parent = &pdev->dev;
+ pnx4008_wdd.parent = dev;
watchdog_set_nowayout(&pnx4008_wdd, nowayout);
watchdog_set_restart_priority(&pnx4008_wdd, 128);
- pnx4008_wdt_stop(&pnx4008_wdd); /* disable for now */
+ if (readl(WDTIM_CTRL(wdt_base)) & COUNT_ENAB)
+ set_bit(WDOG_HW_RUNNING, &pnx4008_wdd.status);
- ret = watchdog_register_device(&pnx4008_wdd);
+ ret = devm_watchdog_register_device(dev, &pnx4008_wdd);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot register watchdog device\n");
- goto disable_clk;
+ dev_err(dev, "cannot register watchdog device\n");
+ return ret;
}
- dev_info(&pdev->dev, "heartbeat %d sec\n", pnx4008_wdd.timeout);
-
- return 0;
-
-disable_clk:
- clk_disable_unprepare(wdt_clk);
- return ret;
-}
-
-static int pnx4008_wdt_remove(struct platform_device *pdev)
-{
- watchdog_unregister_device(&pnx4008_wdd);
-
- clk_disable_unprepare(wdt_clk);
+ dev_info(dev, "heartbeat %d sec\n", pnx4008_wdd.timeout);
return 0;
}
@@ -249,7 +245,6 @@ static struct platform_driver platform_wdt_driver = {
.of_match_table = of_match_ptr(pnx4008_wdt_match),
},
.probe = pnx4008_wdt_probe,
- .remove = pnx4008_wdt_remove,
};
module_platform_driver(platform_wdt_driver);
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
index 882fdcb46ad1..312899f39fd2 100644
--- a/drivers/watchdog/pnx833x_wdt.c
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -116,7 +116,7 @@ static int pnx833x_wdt_open(struct inode *inode, struct file *file)
pr_info("Started watchdog timer\n");
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int pnx833x_wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 5dfd604477a4..6d29c33b1316 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -142,22 +142,28 @@ static const struct watchdog_info qcom_wdt_info = {
.identity = KBUILD_MODNAME,
};
+static void qcom_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int qcom_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct qcom_wdt *wdt;
struct resource *res;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
const u32 *regs;
u32 percpu_offset;
int ret;
- regs = of_device_get_match_data(&pdev->dev);
+ regs = of_device_get_match_data(dev);
if (!regs) {
- dev_err(&pdev->dev, "Unsupported QCOM WDT module\n");
+ dev_err(dev, "Unsupported QCOM WDT module\n");
return -ENODEV;
}
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -172,21 +178,25 @@ static int qcom_wdt_probe(struct platform_device *pdev)
res->start += percpu_offset;
res->end += percpu_offset;
- wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ wdt->base = devm_ioremap_resource(dev, res);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- wdt->clk = devm_clk_get(&pdev->dev, NULL);
+ wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(&pdev->dev, "failed to get input clock\n");
+ dev_err(dev, "failed to get input clock\n");
return PTR_ERR(wdt->clk);
}
ret = clk_prepare_enable(wdt->clk);
if (ret) {
- dev_err(&pdev->dev, "failed to setup clock\n");
+ dev_err(dev, "failed to setup clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, qcom_clk_disable_unprepare,
+ wdt->clk);
+ if (ret)
+ return ret;
/*
* We use the clock rate to calculate the max timeout, so ensure it's
@@ -199,16 +209,15 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->rate = clk_get_rate(wdt->clk);
if (wdt->rate == 0 ||
wdt->rate > 0x10000000U) {
- dev_err(&pdev->dev, "invalid clock rate\n");
- ret = -EINVAL;
- goto err_clk_unprepare;
+ dev_err(dev, "invalid clock rate\n");
+ return -EINVAL;
}
wdt->wdd.info = &qcom_wdt_info;
wdt->wdd.ops = &qcom_wdt_ops;
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->layout = regs;
if (readl(wdt_addr(wdt, WDT_STS)) & 1)
@@ -220,29 +229,16 @@ static int qcom_wdt_probe(struct platform_device *pdev)
* the max instead.
*/
wdt->wdd.timeout = min(wdt->wdd.max_timeout, 30U);
- watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
- ret = watchdog_register_device(&wdt->wdd);
+ ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register watchdog\n");
- goto err_clk_unprepare;
+ dev_err(dev, "failed to register watchdog\n");
+ return ret;
}
platform_set_drvdata(pdev, wdt);
return 0;
-
-err_clk_unprepare:
- clk_disable_unprepare(wdt->clk);
- return ret;
-}
-
-static int qcom_wdt_remove(struct platform_device *pdev)
-{
- struct qcom_wdt *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&wdt->wdd);
- clk_disable_unprepare(wdt->clk);
- return 0;
}
static int __maybe_unused qcom_wdt_suspend(struct device *dev)
@@ -277,7 +273,6 @@ MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
static struct platform_driver qcom_watchdog_driver = {
.probe = qcom_wdt_probe,
- .remove = qcom_wdt_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qcom_wdt_of_table,
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 3a75f3b53452..e74d5cf272ab 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -150,7 +150,7 @@ static int rc32434_wdt_open(struct inode *inode, struct file *file)
rc32434_wdt_start();
rc32434_wdt_ping();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int rc32434_wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index a281aa84bfb1..4382e9556860 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -142,7 +142,7 @@ static int rdc321x_wdt_open(struct inode *inode, struct file *file)
if (test_and_set_bit(0, &rdc321x_wdt_device.inuse))
return -EBUSY;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int rdc321x_wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 622ede529912..565dbc1ec638 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -151,7 +151,6 @@ static const struct soc_device_attribute rwdt_quirks_match[] = {
.data = (void *)1, /* needs single CPU */
}, {
.soc_id = "r8a7792",
- .revision = "*",
.data = (void *)0, /* needs SMP disabled */
},
{ /* sentinel */ }
@@ -177,7 +176,6 @@ static inline bool rwdt_blacklisted(struct device *dev) { return false; }
static int rwdt_probe(struct platform_device *pdev)
{
struct rwdt_priv *priv;
- struct resource *res;
struct clk *clk;
unsigned long clks_per_sec;
int ret, i;
@@ -189,8 +187,7 @@ static int rwdt_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -238,9 +235,7 @@ static int rwdt_probe(struct platform_device *pdev)
watchdog_stop_on_unregister(&priv->wdev);
/* This overrides the default timeout only if DT configuration was found */
- ret = watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
- if (ret)
- dev_warn(&pdev->dev, "Specified timeout value invalid, using default\n");
+ watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
ret = watchdog_register_device(&priv->wdev);
if (ret < 0)
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index aba53424605e..f7f7a7a62022 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -76,7 +76,7 @@ static void riowd_writereg(struct riowd *p, u8 val, int index)
static int riowd_open(struct inode *inode, struct file *filp)
{
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
return 0;
}
diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c
index e60f55702ab7..21fcb36f9074 100644
--- a/drivers/watchdog/rn5t618_wdt.c
+++ b/drivers/watchdog/rn5t618_wdt.c
@@ -146,11 +146,12 @@ static const struct watchdog_ops rn5t618_wdt_ops = {
static int rn5t618_wdt_probe(struct platform_device *pdev)
{
- struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct rn5t618 *rn5t618 = dev_get_drvdata(dev->parent);
struct rn5t618_wdt *wdt;
int min_timeout, max_timeout;
- wdt = devm_kzalloc(&pdev->dev, sizeof(struct rn5t618_wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(struct rn5t618_wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -163,10 +164,10 @@ static int rn5t618_wdt_probe(struct platform_device *pdev)
wdt->wdt_dev.min_timeout = min_timeout;
wdt->wdt_dev.max_timeout = max_timeout;
wdt->wdt_dev.timeout = max_timeout;
- wdt->wdt_dev.parent = &pdev->dev;
+ wdt->wdt_dev.parent = dev;
watchdog_set_drvdata(&wdt->wdt_dev, wdt);
- watchdog_init_timeout(&wdt->wdt_dev, timeout, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdt_dev, timeout, dev);
watchdog_set_nowayout(&wdt->wdt_dev, nowayout);
platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index db7c57d82cfd..905e60f45eec 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -141,19 +141,18 @@ static struct watchdog_device rt288x_wdt_dev = {
static int rt288x_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct device *dev = &pdev->dev;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rt288x_wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ rt288x_wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rt288x_wdt_base))
return PTR_ERR(rt288x_wdt_base);
- rt288x_wdt_clk = devm_clk_get(&pdev->dev, NULL);
+ rt288x_wdt_clk = devm_clk_get(dev, NULL);
if (IS_ERR(rt288x_wdt_clk))
return PTR_ERR(rt288x_wdt_clk);
- rt288x_wdt_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ rt288x_wdt_reset = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(rt288x_wdt_reset))
reset_control_deassert(rt288x_wdt_reset);
@@ -161,31 +160,20 @@ static int rt288x_wdt_probe(struct platform_device *pdev)
rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
- rt288x_wdt_dev.parent = &pdev->dev;
+ rt288x_wdt_dev.parent = dev;
watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout,
- &pdev->dev);
+ dev);
watchdog_set_nowayout(&rt288x_wdt_dev, nowayout);
- ret = watchdog_register_device(&rt288x_wdt_dev);
+ watchdog_stop_on_reboot(&rt288x_wdt_dev);
+ ret = devm_watchdog_register_device(dev, &rt288x_wdt_dev);
if (!ret)
- dev_info(&pdev->dev, "Initialized\n");
+ dev_info(dev, "Initialized\n");
return 0;
}
-static int rt288x_wdt_remove(struct platform_device *pdev)
-{
- watchdog_unregister_device(&rt288x_wdt_dev);
-
- return 0;
-}
-
-static void rt288x_wdt_shutdown(struct platform_device *pdev)
-{
- rt288x_wdt_stop(&rt288x_wdt_dev);
-}
-
static const struct of_device_id rt288x_wdt_match[] = {
{ .compatible = "ralink,rt2880-wdt" },
{},
@@ -194,8 +182,6 @@ MODULE_DEVICE_TABLE(of, rt288x_wdt_match);
static struct platform_driver rt288x_wdt_driver = {
.probe = rt288x_wdt_probe,
- .remove = rt288x_wdt_remove,
- .shutdown = rt288x_wdt_shutdown,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = rt288x_wdt_match,
diff --git a/drivers/watchdog/rtd119x_wdt.c b/drivers/watchdog/rtd119x_wdt.c
index d001c17ddfde..834b94ff3f90 100644
--- a/drivers/watchdog/rtd119x_wdt.c
+++ b/drivers/watchdog/rtd119x_wdt.c
@@ -9,7 +9,6 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -95,37 +94,43 @@ static const struct of_device_id rtd119x_wdt_dt_ids[] = {
{ }
};
+static void rtd119x_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int rtd119x_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rtd119x_watchdog_device *data;
- struct resource *res;
int ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
- data->clk = of_clk_get(pdev->dev.of_node, 0);
+ data->clk = devm_clk_get(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
ret = clk_prepare_enable(data->clk);
- if (ret) {
- clk_put(data->clk);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, rtd119x_clk_disable_unprepare,
+ data->clk);
+ if (ret)
return ret;
- }
data->wdt_dev.info = &rtd119x_wdt_info;
data->wdt_dev.ops = &rtd119x_wdt_ops;
data->wdt_dev.timeout = 120;
data->wdt_dev.max_timeout = 0xffffffff / clk_get_rate(data->clk);
data->wdt_dev.min_timeout = 1;
- data->wdt_dev.parent = &pdev->dev;
+ data->wdt_dev.parent = dev;
watchdog_stop_on_reboot(&data->wdt_dev);
watchdog_set_drvdata(&data->wdt_dev, data);
@@ -135,31 +140,11 @@ static int rtd119x_wdt_probe(struct platform_device *pdev)
rtd119x_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
rtd119x_wdt_stop(&data->wdt_dev);
- ret = devm_watchdog_register_device(&pdev->dev, &data->wdt_dev);
- if (ret) {
- clk_disable_unprepare(data->clk);
- clk_put(data->clk);
- return ret;
- }
-
- return 0;
-}
-
-static int rtd119x_wdt_remove(struct platform_device *pdev)
-{
- struct rtd119x_watchdog_device *data = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&data->wdt_dev);
-
- clk_disable_unprepare(data->clk);
- clk_put(data->clk);
-
- return 0;
+ return devm_watchdog_register_device(dev, &data->wdt_dev);
}
static struct platform_driver rtd119x_wdt_driver = {
.probe = rtd119x_wdt_probe,
- .remove = rtd119x_wdt_remove,
.driver = {
.name = "rtd1295-watchdog",
.of_match_table = rtd119x_wdt_dt_ids,
diff --git a/drivers/watchdog/rza_wdt.c b/drivers/watchdog/rza_wdt.c
index 781bb572e6af..7b6c365f7cd3 100644
--- a/drivers/watchdog/rza_wdt.c
+++ b/drivers/watchdog/rza_wdt.c
@@ -166,35 +166,34 @@ static const struct watchdog_ops rza_wdt_ops = {
static int rza_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rza_wdt *priv;
- struct resource *res;
unsigned long rate;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
rate = clk_get_rate(priv->clk);
if (rate < 16384) {
- dev_err(&pdev->dev, "invalid clock rate (%ld)\n", rate);
+ dev_err(dev, "invalid clock rate (%ld)\n", rate);
return -ENOENT;
}
priv->wdev.info = &rza_wdt_ident,
priv->wdev.ops = &rza_wdt_ops,
- priv->wdev.parent = &pdev->dev;
+ priv->wdev.parent = dev;
- priv->cks = (u8)(uintptr_t)of_device_get_match_data(&pdev->dev);
+ priv->cks = (u8)(uintptr_t) of_device_get_match_data(dev);
if (priv->cks == CKS_4BIT) {
/* Assume slowest clock rate possible (CKS=0xF) */
priv->wdev.max_timeout = (DIVIDER_4BIT * U8_MAX) / rate;
@@ -209,19 +208,19 @@ static int rza_wdt_probe(struct platform_device *pdev)
* max_hw_heartbeat_ms.
*/
priv->wdev.max_hw_heartbeat_ms = (1000 * U8_MAX) / rate;
- dev_dbg(&pdev->dev, "max hw timeout of %dms\n",
- priv->wdev.max_hw_heartbeat_ms);
+ dev_dbg(dev, "max hw timeout of %dms\n",
+ priv->wdev.max_hw_heartbeat_ms);
}
priv->wdev.min_timeout = 1;
priv->wdev.timeout = DEFAULT_TIMEOUT;
- watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
+ watchdog_init_timeout(&priv->wdev, 0, dev);
watchdog_set_drvdata(&priv->wdev, priv);
- ret = devm_watchdog_register_device(&pdev->dev, &priv->wdev);
+ ret = devm_watchdog_register_device(dev, &priv->wdev);
if (ret)
- dev_err(&pdev->dev, "Cannot register watchdog device\n");
+ dev_err(dev, "Cannot register watchdog device\n");
return ret;
}
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index adaa43543f0a..4267b9e8734b 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -522,7 +522,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct s3c2410_wdt *wdt;
- struct resource *wdt_mem;
struct resource *wdt_irq;
unsigned int wtcon;
int started = 0;
@@ -554,8 +553,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
}
/* get the memory region for the watchdog timer */
- wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->reg_base = devm_ioremap_resource(dev, wdt_mem);
+ wdt->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->reg_base)) {
ret = PTR_ERR(wdt->reg_base);
goto err;
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index d3be4f831db5..bfa035e1a75e 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -59,7 +59,7 @@ static int sa1100dog_open(struct inode *inode, struct file *file)
writel_relaxed(OSSR_M3, OSSR);
writel_relaxed(OWER_WME, OWER);
writel_relaxed(readl_relaxed(OIER) | OIER_E3, OIER);
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/*
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index 1e93c1b0e3cf..111695223aae 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -199,15 +199,15 @@ static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
static int sama5d4_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct sama5d4_wdt *wdt;
- struct resource *res;
void __iomem *regs;
u32 irq = 0;
u32 timeout;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -221,33 +221,31 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(wdd, wdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
wdt->reg_base = regs;
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq)
- dev_warn(&pdev->dev, "failed to get IRQ from DT\n");
+ dev_warn(dev, "failed to get IRQ from DT\n");
- ret = of_sama5d4_wdt_init(pdev->dev.of_node, wdt);
+ ret = of_sama5d4_wdt_init(dev->of_node, wdt);
if (ret)
return ret;
if ((wdt->mr & AT91_WDT_WDFIEN) && irq) {
- ret = devm_request_irq(&pdev->dev, irq, sama5d4_wdt_irq_handler,
+ ret = devm_request_irq(dev, irq, sama5d4_wdt_irq_handler,
IRQF_SHARED | IRQF_IRQPOLL |
IRQF_NO_SUSPEND, pdev->name, pdev);
if (ret) {
- dev_err(&pdev->dev,
- "cannot register interrupt handler\n");
+ dev_err(dev, "cannot register interrupt handler\n");
return ret;
}
}
- watchdog_init_timeout(wdd, wdt_timeout, &pdev->dev);
+ watchdog_init_timeout(wdd, wdt_timeout, dev);
timeout = WDT_SEC2TICKS(wdd->timeout);
@@ -260,31 +258,21 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdd, nowayout);
- ret = watchdog_register_device(wdd);
+ watchdog_stop_on_unregister(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register watchdog device\n");
+ dev_err(dev, "failed to register watchdog device\n");
return ret;
}
platform_set_drvdata(pdev, wdt);
- dev_info(&pdev->dev, "initialized (timeout = %d sec, nowayout = %d)\n",
+ dev_info(dev, "initialized (timeout = %d sec, nowayout = %d)\n",
wdd->timeout, nowayout);
return 0;
}
-static int sama5d4_wdt_remove(struct platform_device *pdev)
-{
- struct sama5d4_wdt *wdt = platform_get_drvdata(pdev);
-
- sama5d4_wdt_stop(&wdt->wdd);
-
- watchdog_unregister_device(&wdt->wdd);
-
- return 0;
-}
-
static const struct of_device_id sama5d4_wdt_of_match[] = {
{ .compatible = "atmel,sama5d4-wdt", },
{ }
@@ -312,7 +300,6 @@ static SIMPLE_DEV_PM_OPS(sama5d4_wdt_pm_ops, NULL,
static struct platform_driver sama5d4_wdt_driver = {
.probe = sama5d4_wdt_probe,
- .remove = sama5d4_wdt_remove,
.driver = {
.name = "sama5d4_wdt",
.pm = &sama5d4_wdt_pm_ops,
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 3abae50773b8..5a6ced7a7e8f 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -67,7 +67,7 @@ static DEFINE_SPINLOCK(sbwd_lock);
*
* wdog is the iomem address of the cfg register
*/
-void sbwdog_set(char __iomem *wdog, unsigned long t)
+static void sbwdog_set(char __iomem *wdog, unsigned long t)
{
spin_lock(&sbwd_lock);
__raw_writeb(0, wdog);
@@ -81,7 +81,7 @@ void sbwdog_set(char __iomem *wdog, unsigned long t)
*
* wdog is the iomem address of the cfg register
*/
-void sbwdog_pet(char __iomem *wdog)
+static void sbwdog_pet(char __iomem *wdog)
{
spin_lock(&sbwd_lock);
__raw_writeb(__raw_readb(wdog) | 1, wdog);
@@ -105,7 +105,7 @@ static const struct watchdog_info ident = {
*/
static int sbwdog_open(struct inode *inode, struct file *file)
{
- nonseekable_open(inode, file);
+ stream_open(inode, file);
if (test_and_set_bit(0, &sbwdog_gate))
return -EBUSY;
__module_get(THIS_MODULE);
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index 72d15fd1f183..4d127a91cbdc 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -208,7 +208,7 @@ static int fop_open(struct inode *inode, struct file *file)
/* Good, fire up the show */
wdt_startup();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index 5f268add17ce..efc81b318939 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -136,7 +136,7 @@ static int fop_open(struct inode *inode, struct file *file)
wdt_enable();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index da60560ca446..3396024e7b76 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -271,7 +271,7 @@ static int sbc8360_open(struct inode *inode, struct file *file)
/* Activate and ping once to start the countdown */
sbc8360_activate();
sbc8360_ping();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int sbc8360_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index a1c502e0d8ec..783037ffd7d8 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -78,7 +78,7 @@ static int epx_c3_open(struct inode *inode, struct file *file)
epx_c3_alive = 1;
pr_info("Started watchdog timer\n");
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int epx_c3_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index a517d8bae757..3822a60a8d2b 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -75,7 +75,7 @@ static int fitpc2_wdt_open(struct inode *inode, struct file *file)
wdt_enable();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static ssize_t fitpc2_wdt_write(struct file *file, const char *data,
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index e8bd9887c566..3219422f67a9 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -161,7 +161,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
- arch_counter_get_cntvct();
+ arch_timer_read_counter();
do_div(timeleft, gwdt->clk);
@@ -231,7 +231,6 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct sbsa_gwdt *gwdt;
- struct resource *res;
int ret, irq;
u32 status;
@@ -240,13 +239,11 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, gwdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cf_base = devm_ioremap_resource(dev, res);
+ cf_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cf_base))
return PTR_ERR(cf_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- rf_base = devm_ioremap_resource(dev, res);
+ rf_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(rf_base))
return PTR_ERR(rf_base);
@@ -313,7 +310,8 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
*/
sbsa_gwdt_set_timeout(wdd, wdd->timeout);
- ret = watchdog_register_device(wdd);
+ watchdog_stop_on_reboot(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
@@ -324,22 +322,6 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
return 0;
}
-static void sbsa_gwdt_shutdown(struct platform_device *pdev)
-{
- struct sbsa_gwdt *gwdt = platform_get_drvdata(pdev);
-
- sbsa_gwdt_stop(&gwdt->wdd);
-}
-
-static int sbsa_gwdt_remove(struct platform_device *pdev)
-{
- struct sbsa_gwdt *gwdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&gwdt->wdd);
-
- return 0;
-}
-
/* Disable watchdog if it is active during suspend */
static int __maybe_unused sbsa_gwdt_suspend(struct device *dev)
{
@@ -385,8 +367,6 @@ static struct platform_driver sbsa_gwdt_driver = {
.of_match_table = sbsa_gwdt_of_match,
},
.probe = sbsa_gwdt_probe,
- .remove = sbsa_gwdt_remove,
- .shutdown = sbsa_gwdt_shutdown,
.id_table = sbsa_gwdt_pdev_match,
};
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index e035a4d4b299..3c2e9355410a 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -178,7 +178,7 @@ static int sc1200wdt_open(struct inode *inode, struct file *file)
sc1200wdt_start();
pr_info("Watchdog enabled, timeout = %d min(s)", timeout);
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 403542f9ed8d..44797414c886 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -258,7 +258,7 @@ static int fop_open(struct inode *inode, struct file *file)
/* Good, fire up the show */
wdt_startup();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 814cdf539b0f..ed6e9fac5d74 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -316,7 +316,7 @@ static int sch311x_wdt_open(struct inode *inode, struct file *file)
* Activate
*/
sch311x_wdt_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int sch311x_wdt_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index ec4063ebb41a..85f2d8e06cd0 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -102,7 +102,7 @@ static int scx200_wdt_open(struct inode *inode, struct file *file)
return -EBUSY;
scx200_wdt_enable();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int scx200_wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index a7d6425db807..e7617b7df70b 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -220,7 +220,6 @@ static struct watchdog_device sh_wdt_dev = {
static int sh_wdt_probe(struct platform_device *pdev)
{
struct sh_wdt *wdt;
- struct resource *res;
int rc;
/*
@@ -245,8 +244,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
wdt->clk = NULL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(wdt->dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index ac0c9d2c4aee..e79a4097d50b 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -146,22 +146,23 @@ static struct watchdog_device sirfsoc_wdd = {
static int sirfsoc_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct device *dev = &pdev->dev;
int ret;
void __iomem *base;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
watchdog_set_drvdata(&sirfsoc_wdd, (__force void *)base);
- watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev);
+ watchdog_init_timeout(&sirfsoc_wdd, timeout, dev);
watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
- sirfsoc_wdd.parent = &pdev->dev;
+ sirfsoc_wdd.parent = dev;
- ret = watchdog_register_device(&sirfsoc_wdd);
+ watchdog_stop_on_reboot(&sirfsoc_wdd);
+ watchdog_stop_on_unregister(&sirfsoc_wdd);
+ ret = devm_watchdog_register_device(dev, &sirfsoc_wdd);
if (ret)
return ret;
@@ -170,19 +171,6 @@ static int sirfsoc_wdt_probe(struct platform_device *pdev)
return 0;
}
-static void sirfsoc_wdt_shutdown(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
-
- sirfsoc_wdt_disable(wdd);
-}
-
-static int sirfsoc_wdt_remove(struct platform_device *pdev)
-{
- sirfsoc_wdt_shutdown(pdev);
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int sirfsoc_wdt_suspend(struct device *dev)
{
@@ -220,8 +208,6 @@ static struct platform_driver sirfsoc_wdt_driver = {
.of_match_table = sirfsoc_wdt_of_match,
},
.probe = sirfsoc_wdt_probe,
- .remove = sirfsoc_wdt_remove,
- .shutdown = sirfsoc_wdt_shutdown,
};
module_platform_driver(sirfsoc_wdt_driver);
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index c768dcd53034..a22170775273 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -366,7 +366,7 @@ static int wb_smsc_wdt_open(struct inode *inode, struct file *file)
pr_info("Watchdog enabled. Timeout set to %d %s\n",
timeout, (unit == UNIT_SECOND) ? "second(s)" : "minute(s)");
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/* close => shut off the timer */
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 41aaae2d5287..553735b256e2 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -395,9 +395,7 @@ static int sp5100_tco_probe(struct platform_device *pdev)
wdd->min_timeout = 1;
wdd->max_timeout = 0xffff;
- if (watchdog_init_timeout(wdd, heartbeat, NULL))
- dev_info(dev, "timeout value invalid, using %d\n",
- wdd->timeout);
+ watchdog_init_timeout(wdd, heartbeat, NULL);
watchdog_set_nowayout(wdd, nowayout);
watchdog_stop_on_reboot(wdd);
watchdog_stop_on_unregister(wdd);
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index ff9397d9638a..14874e9b207b 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -245,9 +245,7 @@ static u32 sprd_wdt_get_timeleft(struct watchdog_device *wdd)
u32 val;
val = sprd_wdt_get_cnt_value(wdt);
- val = val / SPRD_WDT_CNT_STEP;
-
- return val;
+ return val / SPRD_WDT_CNT_STEP;
}
static const struct watchdog_ops sprd_wdt_ops = {
@@ -269,70 +267,68 @@ static const struct watchdog_info sprd_wdt_info = {
static int sprd_wdt_probe(struct platform_device *pdev)
{
- struct resource *wdt_res;
+ struct device *dev = &pdev->dev;
struct sprd_wdt *wdt;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(&pdev->dev, wdt_res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- wdt->enable = devm_clk_get(&pdev->dev, "enable");
+ wdt->enable = devm_clk_get(dev, "enable");
if (IS_ERR(wdt->enable)) {
- dev_err(&pdev->dev, "can't get the enable clock\n");
+ dev_err(dev, "can't get the enable clock\n");
return PTR_ERR(wdt->enable);
}
- wdt->rtc_enable = devm_clk_get(&pdev->dev, "rtc_enable");
+ wdt->rtc_enable = devm_clk_get(dev, "rtc_enable");
if (IS_ERR(wdt->rtc_enable)) {
- dev_err(&pdev->dev, "can't get the rtc enable clock\n");
+ dev_err(dev, "can't get the rtc enable clock\n");
return PTR_ERR(wdt->rtc_enable);
}
wdt->irq = platform_get_irq(pdev, 0);
if (wdt->irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ resource\n");
+ dev_err(dev, "failed to get IRQ resource\n");
return wdt->irq;
}
- ret = devm_request_irq(&pdev->dev, wdt->irq, sprd_wdt_isr,
- IRQF_NO_SUSPEND, "sprd-wdt", (void *)wdt);
+ ret = devm_request_irq(dev, wdt->irq, sprd_wdt_isr, IRQF_NO_SUSPEND,
+ "sprd-wdt", (void *)wdt);
if (ret) {
- dev_err(&pdev->dev, "failed to register irq\n");
+ dev_err(dev, "failed to register irq\n");
return ret;
}
wdt->wdd.info = &sprd_wdt_info;
wdt->wdd.ops = &sprd_wdt_ops;
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->wdd.min_timeout = SPRD_WDT_MIN_TIMEOUT;
wdt->wdd.max_timeout = SPRD_WDT_MAX_TIMEOUT;
wdt->wdd.timeout = SPRD_WDT_MAX_TIMEOUT;
ret = sprd_wdt_enable(wdt);
if (ret) {
- dev_err(&pdev->dev, "failed to enable wdt\n");
+ dev_err(dev, "failed to enable wdt\n");
return ret;
}
- ret = devm_add_action(&pdev->dev, sprd_wdt_disable, wdt);
+ ret = devm_add_action_or_reset(dev, sprd_wdt_disable, wdt);
if (ret) {
- sprd_wdt_disable(wdt);
- dev_err(&pdev->dev, "Failed to add wdt disable action\n");
+ dev_err(dev, "Failed to add wdt disable action\n");
return ret;
}
watchdog_set_nowayout(&wdt->wdd, WATCHDOG_NOWAYOUT);
- watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
- ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
+ ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret) {
sprd_wdt_disable(wdt);
- dev_err(&pdev->dev, "failed to register watchdog\n");
+ dev_err(dev, "failed to register watchdog\n");
return ret;
}
platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 177829b379da..7a90184eb950 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -142,13 +142,18 @@ static struct watchdog_device st_wdog_dev = {
.ops = &st_wdog_ops,
};
+static void st_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int st_wdog_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct of_device_id *match;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct st_wdog *st_wdog;
struct regmap *regmap;
- struct resource *res;
struct clk *clk;
void __iomem *base;
uint32_t mode;
@@ -156,7 +161,7 @@ static int st_wdog_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "st,lpc-mode", &mode);
if (ret) {
- dev_err(&pdev->dev, "An LPC mode must be provided\n");
+ dev_err(dev, "An LPC mode must be provided\n");
return -EINVAL;
}
@@ -164,35 +169,34 @@ static int st_wdog_probe(struct platform_device *pdev)
if (mode != ST_LPC_MODE_WDT)
return -ENODEV;
- st_wdog = devm_kzalloc(&pdev->dev, sizeof(*st_wdog), GFP_KERNEL);
+ st_wdog = devm_kzalloc(dev, sizeof(*st_wdog), GFP_KERNEL);
if (!st_wdog)
return -ENOMEM;
- match = of_match_device(st_wdog_match, &pdev->dev);
+ match = of_match_device(st_wdog_match, dev);
if (!match) {
- dev_err(&pdev->dev, "Couldn't match device\n");
+ dev_err(dev, "Couldn't match device\n");
return -ENODEV;
}
st_wdog->syscfg = (struct st_wdog_syscfg *)match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(regmap)) {
- dev_err(&pdev->dev, "No syscfg phandle specified\n");
+ dev_err(dev, "No syscfg phandle specified\n");
return PTR_ERR(regmap);
}
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Unable to request clock\n");
+ dev_err(dev, "Unable to request clock\n");
return PTR_ERR(clk);
}
- st_wdog->dev = &pdev->dev;
+ st_wdog->dev = dev;
st_wdog->base = base;
st_wdog->clk = clk;
st_wdog->regmap = regmap;
@@ -200,39 +204,38 @@ static int st_wdog_probe(struct platform_device *pdev)
st_wdog->clkrate = clk_get_rate(st_wdog->clk);
if (!st_wdog->clkrate) {
- dev_err(&pdev->dev, "Unable to fetch clock rate\n");
+ dev_err(dev, "Unable to fetch clock rate\n");
return -EINVAL;
}
st_wdog_dev.max_timeout = 0xFFFFFFFF / st_wdog->clkrate;
- st_wdog_dev.parent = &pdev->dev;
+ st_wdog_dev.parent = dev;
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock\n");
+ dev_err(dev, "Unable to enable clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, st_clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
watchdog_set_drvdata(&st_wdog_dev, st_wdog);
watchdog_set_nowayout(&st_wdog_dev, WATCHDOG_NOWAYOUT);
/* Init Watchdog timeout with value in DT */
- ret = watchdog_init_timeout(&st_wdog_dev, 0, &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "Unable to initialise watchdog timeout\n");
- clk_disable_unprepare(clk);
+ ret = watchdog_init_timeout(&st_wdog_dev, 0, dev);
+ if (ret)
return ret;
- }
- ret = watchdog_register_device(&st_wdog_dev);
+ ret = devm_watchdog_register_device(dev, &st_wdog_dev);
if (ret) {
- dev_err(&pdev->dev, "Unable to register watchdog\n");
- clk_disable_unprepare(clk);
+ dev_err(dev, "Unable to register watchdog\n");
return ret;
}
st_wdog_setup(st_wdog, true);
- dev_info(&pdev->dev, "LPC Watchdog driver registered, reset type is %s",
+ dev_info(dev, "LPC Watchdog driver registered, reset type is %s",
st_wdog->warm_reset ? "warm" : "cold");
return ret;
@@ -243,8 +246,6 @@ static int st_wdog_remove(struct platform_device *pdev)
struct st_wdog *st_wdog = watchdog_get_drvdata(&st_wdog_dev);
st_wdog_setup(st_wdog, false);
- watchdog_unregister_device(&st_wdog_dev);
- clk_disable_unprepare(st_wdog->clk);
return 0;
}
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index e00e3b3526c6..d569a3634d9b 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -34,36 +34,44 @@
#define KR_KEY_EWA 0x5555 /* write access enable */
#define KR_KEY_DWA 0x0000 /* write access disable */
-/* IWDG_PR register bit values */
-#define PR_4 0x00 /* prescaler set to 4 */
-#define PR_8 0x01 /* prescaler set to 8 */
-#define PR_16 0x02 /* prescaler set to 16 */
-#define PR_32 0x03 /* prescaler set to 32 */
-#define PR_64 0x04 /* prescaler set to 64 */
-#define PR_128 0x05 /* prescaler set to 128 */
-#define PR_256 0x06 /* prescaler set to 256 */
+/* IWDG_PR register */
+#define PR_SHIFT 2
+#define PR_MIN BIT(PR_SHIFT)
/* IWDG_RLR register values */
-#define RLR_MIN 0x07C /* min value supported by reload register */
-#define RLR_MAX 0xFFF /* max value supported by reload register */
+#define RLR_MIN 0x2 /* min value recommended */
+#define RLR_MAX GENMASK(11, 0) /* max value of reload register */
/* IWDG_SR register bit mask */
-#define FLAG_PVU BIT(0) /* Watchdog prescaler value update */
-#define FLAG_RVU BIT(1) /* Watchdog counter reload value update */
+#define SR_PVU BIT(0) /* Watchdog prescaler value update */
+#define SR_RVU BIT(1) /* Watchdog counter reload value update */
/* set timeout to 100000 us */
#define TIMEOUT_US 100000
#define SLEEP_US 1000
-#define HAS_PCLK true
+struct stm32_iwdg_data {
+ bool has_pclk;
+ u32 max_prescaler;
+};
+
+static const struct stm32_iwdg_data stm32_iwdg_data = {
+ .has_pclk = false,
+ .max_prescaler = 256,
+};
+
+static const struct stm32_iwdg_data stm32mp1_iwdg_data = {
+ .has_pclk = true,
+ .max_prescaler = 1024,
+};
struct stm32_iwdg {
struct watchdog_device wdd;
+ const struct stm32_iwdg_data *data;
void __iomem *regs;
struct clk *clk_lsi;
struct clk *clk_pclk;
unsigned int rate;
- bool has_pclk;
};
static inline u32 reg_read(void __iomem *base, u32 reg)
@@ -79,31 +87,35 @@ static inline void reg_write(void __iomem *base, u32 reg, u32 val)
static int stm32_iwdg_start(struct watchdog_device *wdd)
{
struct stm32_iwdg *wdt = watchdog_get_drvdata(wdd);
- u32 val = FLAG_PVU | FLAG_RVU;
- u32 reload;
+ u32 tout, presc, iwdg_rlr, iwdg_pr, iwdg_sr;
int ret;
dev_dbg(wdd->parent, "%s\n", __func__);
- /* prescaler fixed to 256 */
- reload = clamp_t(unsigned int, ((wdd->timeout * wdt->rate) / 256) - 1,
- RLR_MIN, RLR_MAX);
+ tout = clamp_t(unsigned int, wdd->timeout,
+ wdd->min_timeout, wdd->max_hw_heartbeat_ms / 1000);
+
+ presc = DIV_ROUND_UP(tout * wdt->rate, RLR_MAX + 1);
+
+ /* The prescaler is align on power of 2 and start at 2 ^ PR_SHIFT. */
+ presc = roundup_pow_of_two(presc);
+ iwdg_pr = presc <= 1 << PR_SHIFT ? 0 : ilog2(presc) - PR_SHIFT;
+ iwdg_rlr = ((tout * wdt->rate) / presc) - 1;
/* enable write access */
reg_write(wdt->regs, IWDG_KR, KR_KEY_EWA);
/* set prescaler & reload registers */
- reg_write(wdt->regs, IWDG_PR, PR_256); /* prescaler fix to 256 */
- reg_write(wdt->regs, IWDG_RLR, reload);
+ reg_write(wdt->regs, IWDG_PR, iwdg_pr);
+ reg_write(wdt->regs, IWDG_RLR, iwdg_rlr);
reg_write(wdt->regs, IWDG_KR, KR_KEY_ENABLE);
/* wait for the registers to be updated (max 100ms) */
- ret = readl_relaxed_poll_timeout(wdt->regs + IWDG_SR, val,
- !(val & (FLAG_PVU | FLAG_RVU)),
+ ret = readl_relaxed_poll_timeout(wdt->regs + IWDG_SR, iwdg_sr,
+ !(iwdg_sr & (SR_PVU | SR_RVU)),
SLEEP_US, TIMEOUT_US);
if (ret) {
- dev_err(wdd->parent,
- "Fail to set prescaler or reload registers\n");
+ dev_err(wdd->parent, "Fail to set prescaler, reload regs\n");
return ret;
}
@@ -138,38 +150,52 @@ static int stm32_iwdg_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static void stm32_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int stm32_iwdg_clk_init(struct platform_device *pdev,
struct stm32_iwdg *wdt)
{
+ struct device *dev = &pdev->dev;
u32 ret;
- wdt->clk_lsi = devm_clk_get(&pdev->dev, "lsi");
+ wdt->clk_lsi = devm_clk_get(dev, "lsi");
if (IS_ERR(wdt->clk_lsi)) {
- dev_err(&pdev->dev, "Unable to get lsi clock\n");
+ dev_err(dev, "Unable to get lsi clock\n");
return PTR_ERR(wdt->clk_lsi);
}
/* optional peripheral clock */
- if (wdt->has_pclk) {
- wdt->clk_pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (wdt->data->has_pclk) {
+ wdt->clk_pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(wdt->clk_pclk)) {
- dev_err(&pdev->dev, "Unable to get pclk clock\n");
+ dev_err(dev, "Unable to get pclk clock\n");
return PTR_ERR(wdt->clk_pclk);
}
ret = clk_prepare_enable(wdt->clk_pclk);
if (ret) {
- dev_err(&pdev->dev, "Unable to prepare pclk clock\n");
+ dev_err(dev, "Unable to prepare pclk clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev,
+ stm32_clk_disable_unprepare,
+ wdt->clk_pclk);
+ if (ret)
+ return ret;
}
ret = clk_prepare_enable(wdt->clk_lsi);
if (ret) {
- dev_err(&pdev->dev, "Unable to prepare lsi clock\n");
- clk_disable_unprepare(wdt->clk_pclk);
+ dev_err(dev, "Unable to prepare lsi clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, stm32_clk_disable_unprepare,
+ wdt->clk_lsi);
+ if (ret)
+ return ret;
wdt->rate = clk_get_rate(wdt->clk_lsi);
@@ -191,35 +217,31 @@ static const struct watchdog_ops stm32_iwdg_ops = {
};
static const struct of_device_id stm32_iwdg_of_match[] = {
- { .compatible = "st,stm32-iwdg", .data = (void *)!HAS_PCLK },
- { .compatible = "st,stm32mp1-iwdg", .data = (void *)HAS_PCLK },
+ { .compatible = "st,stm32-iwdg", .data = &stm32_iwdg_data },
+ { .compatible = "st,stm32mp1-iwdg", .data = &stm32mp1_iwdg_data },
{ /* end node */ }
};
MODULE_DEVICE_TABLE(of, stm32_iwdg_of_match);
static int stm32_iwdg_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
- const struct of_device_id *match;
struct stm32_iwdg *wdt;
- struct resource *res;
int ret;
- match = of_match_device(stm32_iwdg_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt->has_pclk = match->data;
+ wdt->data = of_device_get_match_data(&pdev->dev);
+ if (!wdt->data)
+ return -ENODEV;
/* This is the timer base. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->regs = devm_ioremap_resource(&pdev->dev, res);
+ wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->regs)) {
- dev_err(&pdev->dev, "Could not get resource\n");
+ dev_err(dev, "Could not get resource\n");
return PTR_ERR(wdt->regs);
}
@@ -229,50 +251,30 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
/* Initialize struct watchdog_device. */
wdd = &wdt->wdd;
+ wdd->parent = dev;
wdd->info = &stm32_iwdg_info;
wdd->ops = &stm32_iwdg_ops;
- wdd->min_timeout = ((RLR_MIN + 1) * 256) / wdt->rate;
- wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * 256 * 1000) / wdt->rate;
- wdd->parent = &pdev->dev;
+ wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate);
+ wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler *
+ 1000) / wdt->rate;
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
+ watchdog_init_timeout(wdd, 0, dev);
- ret = watchdog_init_timeout(wdd, 0, &pdev->dev);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to set timeout value, using default\n");
-
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register watchdog device\n");
- goto err;
+ dev_err(dev, "failed to register watchdog device\n");
+ return ret;
}
platform_set_drvdata(pdev, wdt);
return 0;
-err:
- clk_disable_unprepare(wdt->clk_lsi);
- clk_disable_unprepare(wdt->clk_pclk);
-
- return ret;
-}
-
-static int stm32_iwdg_remove(struct platform_device *pdev)
-{
- struct stm32_iwdg *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&wdt->wdd);
- clk_disable_unprepare(wdt->clk_lsi);
- clk_disable_unprepare(wdt->clk_pclk);
-
- return 0;
}
static struct platform_driver stm32_iwdg_driver = {
.probe = stm32_iwdg_probe,
- .remove = stm32_iwdg_remove,
.driver = {
.name = "iwdg",
.of_match_table = of_match_ptr(stm32_iwdg_of_match),
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index 994c54cc68e9..671f4ba7b4ed 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -89,31 +89,31 @@ static struct notifier_block wdt_notifier = {
static int stmp3xxx_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
- watchdog_set_drvdata(&stmp3xxx_wdd, &pdev->dev);
+ watchdog_set_drvdata(&stmp3xxx_wdd, dev);
stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT);
- stmp3xxx_wdd.parent = &pdev->dev;
+ stmp3xxx_wdd.parent = dev;
- ret = watchdog_register_device(&stmp3xxx_wdd);
+ ret = devm_watchdog_register_device(dev, &stmp3xxx_wdd);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot register watchdog device\n");
+ dev_err(dev, "cannot register watchdog device\n");
return ret;
}
if (register_reboot_notifier(&wdt_notifier))
- dev_warn(&pdev->dev, "cannot register reboot notifier\n");
+ dev_warn(dev, "cannot register reboot notifier\n");
- dev_info(&pdev->dev, "initialized watchdog with heartbeat %ds\n",
- stmp3xxx_wdd.timeout);
+ dev_info(dev, "initialized watchdog with heartbeat %ds\n",
+ stmp3xxx_wdd.timeout);
return 0;
}
static int stmp3xxx_wdt_remove(struct platform_device *pdev)
{
unregister_reboot_notifier(&wdt_notifier);
- watchdog_unregister_device(&stmp3xxx_wdd);
return 0;
}
diff --git a/drivers/watchdog/stpmic1_wdt.c b/drivers/watchdog/stpmic1_wdt.c
index ad431d8ad95f..45d0c543466f 100644
--- a/drivers/watchdog/stpmic1_wdt.c
+++ b/drivers/watchdog/stpmic1_wdt.c
@@ -81,18 +81,19 @@ static const struct watchdog_ops pmic_watchdog_ops = {
static int pmic_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct stpmic1 *pmic;
struct stpmic1_wdt *wdt;
- if (!pdev->dev.parent)
+ if (!dev->parent)
return -EINVAL;
- pmic = dev_get_drvdata(pdev->dev.parent);
+ pmic = dev_get_drvdata(dev->parent);
if (!pmic)
return -EINVAL;
- wdt = devm_kzalloc(&pdev->dev, sizeof(struct stpmic1_wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(struct stpmic1_wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -102,15 +103,15 @@ static int pmic_wdt_probe(struct platform_device *pdev)
wdt->wdtdev.ops = &pmic_watchdog_ops;
wdt->wdtdev.min_timeout = PMIC_WDT_MIN_TIMEOUT;
wdt->wdtdev.max_timeout = PMIC_WDT_MAX_TIMEOUT;
- wdt->wdtdev.parent = &pdev->dev;
+ wdt->wdtdev.parent = dev;
wdt->wdtdev.timeout = PMIC_WDT_DEFAULT_TIMEOUT;
- watchdog_init_timeout(&wdt->wdtdev, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdtdev, 0, dev);
watchdog_set_nowayout(&wdt->wdtdev, nowayout);
watchdog_set_drvdata(&wdt->wdtdev, wdt);
- ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdtdev);
+ ret = devm_watchdog_register_device(dev, &wdt->wdtdev);
if (ret)
return ret;
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index c6c73656997e..9c22f7753c6b 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -233,20 +233,19 @@ MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
static int sunxi_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct sunxi_wdt_dev *sunxi_wdt;
- struct resource *res;
int err;
- sunxi_wdt = devm_kzalloc(&pdev->dev, sizeof(*sunxi_wdt), GFP_KERNEL);
+ sunxi_wdt = devm_kzalloc(dev, sizeof(*sunxi_wdt), GFP_KERNEL);
if (!sunxi_wdt)
return -EINVAL;
- sunxi_wdt->wdt_regs = of_device_get_match_data(&pdev->dev);
+ sunxi_wdt->wdt_regs = of_device_get_match_data(dev);
if (!sunxi_wdt->wdt_regs)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sunxi_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ sunxi_wdt->wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sunxi_wdt->wdt_base))
return PTR_ERR(sunxi_wdt->wdt_base);
@@ -255,9 +254,9 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
sunxi_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
sunxi_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
sunxi_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
- sunxi_wdt->wdt_dev.parent = &pdev->dev;
+ sunxi_wdt->wdt_dev.parent = dev;
- watchdog_init_timeout(&sunxi_wdt->wdt_dev, timeout, &pdev->dev);
+ watchdog_init_timeout(&sunxi_wdt->wdt_dev, timeout, dev);
watchdog_set_nowayout(&sunxi_wdt->wdt_dev, nowayout);
watchdog_set_restart_priority(&sunxi_wdt->wdt_dev, 128);
@@ -266,12 +265,12 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
sunxi_wdt_stop(&sunxi_wdt->wdt_dev);
watchdog_stop_on_reboot(&sunxi_wdt->wdt_dev);
- err = devm_watchdog_register_device(&pdev->dev, &sunxi_wdt->wdt_dev);
+ err = devm_watchdog_register_device(dev, &sunxi_wdt->wdt_dev);
if (unlikely(err))
return err;
- dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
- sunxi_wdt->wdt_dev.timeout, nowayout);
+ dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
+ sunxi_wdt->wdt_dev.timeout, nowayout);
return 0;
}
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
index d0b53f3c0d17..1afb0e9d808c 100644
--- a/drivers/watchdog/tangox_wdt.c
+++ b/drivers/watchdog/tangox_wdt.c
@@ -108,10 +108,14 @@ static const struct watchdog_ops tangox_wdt_ops = {
.restart = tangox_wdt_restart,
};
+static void tangox_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int tangox_wdt_probe(struct platform_device *pdev)
{
struct tangox_wdt_device *dev;
- struct resource *res;
u32 config;
int err;
@@ -119,8 +123,7 @@ static int tangox_wdt_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(&pdev->dev, res);
+ dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->base))
return PTR_ERR(dev->base);
@@ -131,12 +134,14 @@ static int tangox_wdt_probe(struct platform_device *pdev)
err = clk_prepare_enable(dev->clk);
if (err)
return err;
+ err = devm_add_action_or_reset(&pdev->dev,
+ tangox_clk_disable_unprepare, dev->clk);
+ if (err)
+ return err;
dev->clk_rate = clk_get_rate(dev->clk);
- if (!dev->clk_rate) {
- err = -EINVAL;
- goto err;
- }
+ if (!dev->clk_rate)
+ return -EINVAL;
dev->wdt.parent = &pdev->dev;
dev->wdt.info = &tangox_wdt_info;
@@ -170,31 +175,16 @@ static int tangox_wdt_probe(struct platform_device *pdev)
watchdog_set_restart_priority(&dev->wdt, 128);
- err = watchdog_register_device(&dev->wdt);
+ watchdog_stop_on_unregister(&dev->wdt);
+ err = devm_watchdog_register_device(&pdev->dev, &dev->wdt);
if (err)
- goto err;
+ return err;
platform_set_drvdata(pdev, dev);
dev_info(&pdev->dev, "SMP86xx/SMP87xx watchdog registered\n");
return 0;
-
- err:
- clk_disable_unprepare(dev->clk);
- return err;
-}
-
-static int tangox_wdt_remove(struct platform_device *pdev)
-{
- struct tangox_wdt_device *dev = platform_get_drvdata(pdev);
-
- tangox_wdt_stop(&dev->wdt);
- clk_disable_unprepare(dev->clk);
-
- watchdog_unregister_device(&dev->wdt);
-
- return 0;
}
static const struct of_device_id tangox_wdt_dt_ids[] = {
@@ -206,7 +196,6 @@ MODULE_DEVICE_TABLE(of, tangox_wdt_dt_ids);
static struct platform_driver tangox_wdt_driver = {
.probe = tangox_wdt_probe,
- .remove = tangox_wdt_remove,
.driver = {
.name = "tangox-wdt",
.of_match_table = tangox_wdt_dt_ids,
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 877dd39bd41f..a58b000acc4f 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -181,15 +181,14 @@ static const struct watchdog_ops tegra_wdt_ops = {
static int tegra_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct tegra_wdt *wdt;
- struct resource *res;
void __iomem *regs;
int ret;
/* This is the timer base. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
@@ -197,7 +196,7 @@ static int tegra_wdt_probe(struct platform_device *pdev)
* Allocate our watchdog driver data, which has the
* struct watchdog_device nested within it.
*/
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -212,39 +211,27 @@ static int tegra_wdt_probe(struct platform_device *pdev)
wdd->ops = &tegra_wdt_ops;
wdd->min_timeout = MIN_WDT_TIMEOUT;
wdd->max_timeout = MAX_WDT_TIMEOUT;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
- ret = devm_watchdog_register_device(&pdev->dev, wdd);
+ watchdog_stop_on_unregister(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev,
- "failed to register watchdog device\n");
+ dev_err(dev, "failed to register watchdog device\n");
return ret;
}
platform_set_drvdata(pdev, wdt);
- dev_info(&pdev->dev,
- "initialized (heartbeat = %d sec, nowayout = %d)\n",
+ dev_info(dev, "initialized (heartbeat = %d sec, nowayout = %d)\n",
heartbeat, nowayout);
return 0;
}
-static int tegra_wdt_remove(struct platform_device *pdev)
-{
- struct tegra_wdt *wdt = platform_get_drvdata(pdev);
-
- tegra_wdt_stop(&wdt->wdd);
-
- dev_info(&pdev->dev, "removed wdt\n");
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int tegra_wdt_runtime_suspend(struct device *dev)
{
@@ -280,7 +267,6 @@ static const struct dev_pm_ops tegra_wdt_pm_ops = {
static struct platform_driver tegra_wdt_driver = {
.probe = tegra_wdt_probe,
- .remove = tegra_wdt_remove,
.driver = {
.name = "tegra-wdt",
.pm = &tegra_wdt_pm_ops,
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c
index 52941207a12a..72d0b0adde38 100644
--- a/drivers/watchdog/tqmx86_wdt.c
+++ b/drivers/watchdog/tqmx86_wdt.c
@@ -70,11 +70,12 @@ static struct watchdog_ops tqmx86_wdt_ops = {
static int tqmx86_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct tqmx86_wdt *priv;
struct resource *res;
int err;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -82,14 +83,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- priv->io_base = devm_ioport_map(&pdev->dev, res->start,
- resource_size(res));
+ priv->io_base = devm_ioport_map(dev, res->start, resource_size(res));
if (!priv->io_base)
return -ENOMEM;
watchdog_set_drvdata(&priv->wdd, priv);
- priv->wdd.parent = &pdev->dev;
+ priv->wdd.parent = dev;
priv->wdd.info = &tqmx86_wdt_info;
priv->wdd.ops = &tqmx86_wdt_ops;
priv->wdd.min_timeout = 1;
@@ -97,16 +97,16 @@ static int tqmx86_wdt_probe(struct platform_device *pdev)
priv->wdd.max_hw_heartbeat_ms = 4096*1000;
priv->wdd.timeout = WDT_TIMEOUT;
- watchdog_init_timeout(&priv->wdd, timeout, &pdev->dev);
+ watchdog_init_timeout(&priv->wdd, timeout, dev);
watchdog_set_nowayout(&priv->wdd, WATCHDOG_NOWAYOUT);
tqmx86_wdt_set_timeout(&priv->wdd, priv->wdd.timeout);
- err = devm_watchdog_register_device(&pdev->dev, &priv->wdd);
+ err = devm_watchdog_register_device(dev, &priv->wdd);
if (err)
return err;
- dev_info(&pdev->dev, "TQMx86 watchdog\n");
+ dev_info(dev, "TQMx86 watchdog\n");
return 0;
}
diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
index 89843b16b04a..9dc6d7f45806 100644
--- a/drivers/watchdog/ts4800_wdt.c
+++ b/drivers/watchdog/ts4800_wdt.c
@@ -108,7 +108,8 @@ static const struct watchdog_info ts4800_wdt_info = {
static int ts4800_wdt_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct device_node *syscon_np;
struct watchdog_device *wdd;
struct ts4800_wdt *wdt;
@@ -117,18 +118,18 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
syscon_np = of_parse_phandle(np, "syscon", 0);
if (!syscon_np) {
- dev_err(&pdev->dev, "no syscon property\n");
+ dev_err(dev, "no syscon property\n");
return -ENODEV;
}
ret = of_property_read_u32_index(np, "syscon", 1, &reg);
if (ret < 0) {
- dev_err(&pdev->dev, "no offset in syscon\n");
+ dev_err(dev, "no offset in syscon\n");
return ret;
}
/* allocate memory for watchdog struct */
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -137,13 +138,13 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
wdt->regmap = syscon_node_to_regmap(syscon_np);
of_node_put(syscon_np);
if (IS_ERR(wdt->regmap)) {
- dev_err(&pdev->dev, "cannot get parent's regmap\n");
+ dev_err(dev, "cannot get parent's regmap\n");
return PTR_ERR(wdt->regmap);
}
/* Initialize struct watchdog_device */
wdd = &wdt->wdd;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
wdd->info = &ts4800_wdt_info;
wdd->ops = &ts4800_wdt_ops;
wdd->min_timeout = ts4800_wdt_map[0].timeout;
@@ -151,7 +152,7 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
- watchdog_init_timeout(wdd, 0, &pdev->dev);
+ watchdog_init_timeout(wdd, 0, dev);
/*
* As this watchdog supports only a few values, ts4800_wdt_set_timeout
@@ -169,31 +170,20 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
*/
ts4800_wdt_stop(wdd);
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev,
- "failed to register watchdog device\n");
+ dev_err(dev, "failed to register watchdog device\n");
return ret;
}
platform_set_drvdata(pdev, wdt);
- dev_info(&pdev->dev,
- "initialized (timeout = %d sec, nowayout = %d)\n",
+ dev_info(dev, "initialized (timeout = %d sec, nowayout = %d)\n",
wdd->timeout, nowayout);
return 0;
}
-static int ts4800_wdt_remove(struct platform_device *pdev)
-{
- struct ts4800_wdt *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&wdt->wdd);
-
- return 0;
-}
-
static const struct of_device_id ts4800_wdt_of_match[] = {
{ .compatible = "technologic,ts4800-wdt", },
{ },
@@ -202,7 +192,6 @@ MODULE_DEVICE_TABLE(of, ts4800_wdt_of_match);
static struct platform_driver ts4800_wdt_driver = {
.probe = ts4800_wdt_probe,
- .remove = ts4800_wdt_remove,
.driver = {
.name = "ts4800_wdt",
.of_match_table = ts4800_wdt_of_match,
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 811e43c39ec4..bf918f5fa131 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -122,22 +122,20 @@ static const struct watchdog_ops ts72xx_wdt_ops = {
static int ts72xx_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct ts72xx_wdt_priv *priv;
struct watchdog_device *wdd;
- struct resource *res;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->control_reg = devm_ioremap_resource(&pdev->dev, res);
+ priv->control_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->control_reg))
return PTR_ERR(priv->control_reg);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->feed_reg = devm_ioremap_resource(&pdev->dev, res);
+ priv->feed_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->feed_reg))
return PTR_ERR(priv->feed_reg);
@@ -146,20 +144,20 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
wdd->ops = &ts72xx_wdt_ops;
wdd->min_timeout = 1;
wdd->max_hw_heartbeat_ms = 8000;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_set_nowayout(wdd, nowayout);
wdd->timeout = TS72XX_WDT_DEFAULT_TIMEOUT;
- watchdog_init_timeout(wdd, timeout, &pdev->dev);
+ watchdog_init_timeout(wdd, timeout, dev);
watchdog_set_drvdata(wdd, priv);
- ret = devm_watchdog_register_device(&pdev->dev, wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
- dev_info(&pdev->dev, "TS-72xx Watchdog driver\n");
+ dev_info(dev, "TS-72xx Watchdog driver\n");
return 0;
}
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 569fe85e52da..74c5737cd934 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -70,10 +70,10 @@ static const struct watchdog_ops twl4030_wdt_ops = {
static int twl4030_wdt_probe(struct platform_device *pdev)
{
- int ret = 0;
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdt;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -83,27 +83,14 @@ static int twl4030_wdt_probe(struct platform_device *pdev)
wdt->timeout = 30;
wdt->min_timeout = 1;
wdt->max_timeout = 30;
- wdt->parent = &pdev->dev;
+ wdt->parent = dev;
watchdog_set_nowayout(wdt, nowayout);
platform_set_drvdata(pdev, wdt);
twl4030_wdt_stop(wdt);
- ret = watchdog_register_device(wdt);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int twl4030_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(wdt);
-
- return 0;
+ return devm_watchdog_register_device(dev, wdt);
}
#ifdef CONFIG_PM
@@ -137,7 +124,6 @@ MODULE_DEVICE_TABLE(of, twl_wdt_of_match);
static struct platform_driver twl4030_wdt_driver = {
.probe = twl4030_wdt_probe,
- .remove = twl4030_wdt_remove,
.suspend = twl4030_wdt_suspend,
.resume = twl4030_wdt_resume,
.driver = {
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 6f7a9deb27d0..fcb4da5b1f4c 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -103,7 +103,6 @@ static struct watchdog_device txx9wdt = {
static int __init txx9wdt_probe(struct platform_device *dev)
{
- struct resource *res;
int ret;
txx9_imclk = clk_get(NULL, "imbus_clk");
@@ -119,8 +118,7 @@ static int __init txx9wdt_probe(struct platform_device *dev)
goto exit;
}
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- txx9wdt_reg = devm_ioremap_resource(&dev->dev, res);
+ txx9wdt_reg = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(txx9wdt_reg)) {
ret = PTR_ERR(txx9wdt_reg);
goto exit;
diff --git a/drivers/watchdog/uniphier_wdt.c b/drivers/watchdog/uniphier_wdt.c
index e20a7a459d69..8e9242c23022 100644
--- a/drivers/watchdog/uniphier_wdt.c
+++ b/drivers/watchdog/uniphier_wdt.c
@@ -191,8 +191,6 @@ static int uniphier_wdt_probe(struct platform_device *pdev)
if (!wdev)
return -ENOMEM;
- platform_set_drvdata(pdev, wdev);
-
parent = of_get_parent(dev->of_node); /* parent should be syscon node */
regmap = syscon_node_to_regmap(parent);
of_node_put(parent);
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index 37c084353cce..9fa7f95f7554 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -86,8 +86,9 @@ static struct watchdog_device ux500_wdt = {
static int ux500_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
- struct ux500_wdt_data *pdata = dev_get_platdata(&pdev->dev);
+ struct ux500_wdt_data *pdata = dev_get_platdata(dev);
if (pdata) {
if (pdata->timeout > 0)
@@ -96,7 +97,7 @@ static int ux500_wdt_probe(struct platform_device *pdev)
ux500_wdt.max_timeout = WATCHDOG_MAX28;
}
- ux500_wdt.parent = &pdev->dev;
+ ux500_wdt.parent = dev;
watchdog_set_nowayout(&ux500_wdt, nowayout);
/* disable auto off on sleep */
@@ -105,18 +106,11 @@ static int ux500_wdt_probe(struct platform_device *pdev)
/* set HW initial value */
prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
- ret = watchdog_register_device(&ux500_wdt);
+ ret = devm_watchdog_register_device(dev, &ux500_wdt);
if (ret)
return ret;
- dev_info(&pdev->dev, "initialized\n");
-
- return 0;
-}
-
-static int ux500_wdt_remove(struct platform_device *dev)
-{
- watchdog_unregister_device(&ux500_wdt);
+ dev_info(dev, "initialized\n");
return 0;
}
@@ -153,7 +147,6 @@ static int ux500_wdt_resume(struct platform_device *pdev)
static struct platform_driver ux500_wdt_driver = {
.probe = ux500_wdt_probe,
- .remove = ux500_wdt_remove,
.suspend = ux500_wdt_suspend,
.resume = ux500_wdt_resume,
.driver = {
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index db9b6488e388..8dd953f90680 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -224,7 +224,7 @@ static int fop_open(struct inode *inode, struct file *file)
/* Good, fire up the show */
wdt_startup();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 672b61a7f9a3..184324c1edae 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -298,7 +298,7 @@ static int wdt_open(struct inode *inode, struct file *file)
__module_get(THIS_MODULE);
wdt_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int wdt_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index 93c5b610e264..0a8073b419f8 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -197,7 +197,7 @@ static int wafwdt_open(struct inode *inode, struct file *file)
* Activate
*/
wafwdt_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int wafwdt_close(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index eb8fa25f8eb2..62be9e52a4de 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -105,34 +105,48 @@ static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
* timeout module parameter (if it is valid value) or the timeout-sec property
* (only if it is a valid value and the timeout_parm is out of bounds).
* If none of them are valid then we keep the old value (which should normally
- * be the default timeout value).
+ * be the default timeout value). Note that for the module parameter, '0' means
+ * 'use default' while it is an invalid value for the timeout-sec property.
+ * It should simply be dropped if you want to use the default value then.
*
- * A zero is returned on success and -EINVAL for failure.
+ * A zero is returned on success or -EINVAL if all provided values are out of
+ * bounds.
*/
int watchdog_init_timeout(struct watchdog_device *wdd,
unsigned int timeout_parm, struct device *dev)
{
+ const char *dev_str = wdd->parent ? dev_name(wdd->parent) :
+ (const char *)wdd->info->identity;
unsigned int t = 0;
int ret = 0;
watchdog_check_min_max_timeout(wdd);
- /* try to get the timeout module parameter first */
- if (!watchdog_timeout_invalid(wdd, timeout_parm) && timeout_parm) {
- wdd->timeout = timeout_parm;
- return ret;
- }
- if (timeout_parm)
+ /* check the driver supplied value (likely a module parameter) first */
+ if (timeout_parm) {
+ if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
+ wdd->timeout = timeout_parm;
+ return 0;
+ }
+ pr_err("%s: driver supplied timeout (%u) out of range\n",
+ dev_str, timeout_parm);
ret = -EINVAL;
+ }
/* try to get the timeout_sec property */
- if (dev == NULL || dev->of_node == NULL)
- return ret;
- of_property_read_u32(dev->of_node, "timeout-sec", &t);
- if (!watchdog_timeout_invalid(wdd, t) && t)
- wdd->timeout = t;
- else
+ if (dev && dev->of_node &&
+ of_property_read_u32(dev->of_node, "timeout-sec", &t) == 0) {
+ if (t && !watchdog_timeout_invalid(wdd, t)) {
+ wdd->timeout = t;
+ return 0;
+ }
+ pr_err("%s: DT supplied timeout (%u) out of range\n", dev_str, t);
ret = -EINVAL;
+ }
+
+ if (ret < 0 && wdd->timeout)
+ pr_warn("%s: falling back to default timeout (%u)\n", dev_str,
+ wdd->timeout);
return ret;
}
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index f6c24b22b37c..252a7c7b6592 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -825,7 +825,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
kref_get(&wd_data->kref);
/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
out_mod:
module_put(wd_data->wdd->ops->owner);
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 56ad19608a9b..430ee4e9b185 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -287,7 +287,7 @@ static unsigned int wdat_wdt_get_timeleft(struct watchdog_device *wdd)
struct wdat_wdt *wdat = to_wdat_wdt(wdd);
u32 periods = 0;
- wdat_wdt_run_action(wdat, ACPI_WDAT_GET_COUNTDOWN, 0, &periods);
+ wdat_wdt_run_action(wdat, ACPI_WDAT_GET_CURRENT_COUNTDOWN, 0, &periods);
return periods * wdat->period / 1000;
}
@@ -308,6 +308,7 @@ static const struct watchdog_ops wdat_wdt_ops = {
static int wdat_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct acpi_wdat_entry *entries;
const struct acpi_table_wdat *tbl;
struct wdat_wdt *wdat;
@@ -321,11 +322,11 @@ static int wdat_wdt_probe(struct platform_device *pdev)
if (ACPI_FAILURE(status))
return -ENODEV;
- wdat = devm_kzalloc(&pdev->dev, sizeof(*wdat), GFP_KERNEL);
+ wdat = devm_kzalloc(dev, sizeof(*wdat), GFP_KERNEL);
if (!wdat)
return -ENOMEM;
- regs = devm_kcalloc(&pdev->dev, pdev->num_resources, sizeof(*regs),
+ regs = devm_kcalloc(dev, pdev->num_resources, sizeof(*regs),
GFP_KERNEL);
if (!regs)
return -ENOMEM;
@@ -350,15 +351,15 @@ static int wdat_wdt_probe(struct platform_device *pdev)
res = &pdev->resource[i];
if (resource_type(res) == IORESOURCE_MEM) {
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_ioremap_resource(dev, res);
if (IS_ERR(reg))
return PTR_ERR(reg);
} else if (resource_type(res) == IORESOURCE_IO) {
- reg = devm_ioport_map(&pdev->dev, res->start, 1);
+ reg = devm_ioport_map(dev, res->start, 1);
if (!reg)
return -ENOMEM;
} else {
- dev_err(&pdev->dev, "Unsupported resource\n");
+ dev_err(dev, "Unsupported resource\n");
return -EINVAL;
}
@@ -376,12 +377,11 @@ static int wdat_wdt_probe(struct platform_device *pdev)
action = entries[i].action;
if (action >= MAX_WDAT_ACTIONS) {
- dev_dbg(&pdev->dev, "Skipping unknown action: %u\n",
- action);
+ dev_dbg(dev, "Skipping unknown action: %u\n", action);
continue;
}
- instr = devm_kzalloc(&pdev->dev, sizeof(*instr), GFP_KERNEL);
+ instr = devm_kzalloc(dev, sizeof(*instr), GFP_KERNEL);
if (!instr)
return -ENOMEM;
@@ -398,7 +398,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
r.flags = IORESOURCE_IO;
} else {
- dev_dbg(&pdev->dev, "Unsupported address space: %d\n",
+ dev_dbg(dev, "Unsupported address space: %d\n",
gas->space_id);
continue;
}
@@ -413,14 +413,15 @@ static int wdat_wdt_probe(struct platform_device *pdev)
}
if (!instr->reg) {
- dev_err(&pdev->dev, "I/O resource not found\n");
+ dev_err(dev, "I/O resource not found\n");
return -EINVAL;
}
instructions = wdat->instructions[action];
if (!instructions) {
- instructions = devm_kzalloc(&pdev->dev,
- sizeof(*instructions), GFP_KERNEL);
+ instructions = devm_kzalloc(dev,
+ sizeof(*instructions),
+ GFP_KERNEL);
if (!instructions)
return -ENOMEM;
@@ -441,7 +442,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdat);
watchdog_set_nowayout(&wdat->wdd, nowayout);
- return devm_watchdog_register_device(&pdev->dev, &wdat->wdd);
+ return devm_watchdog_register_device(dev, &wdat->wdd);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index 0240c60d14e3..3c3ed512ce1e 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -376,7 +376,7 @@ static int wdrtas_open(struct inode *inode, struct file *file)
wdrtas_timer_start();
wdrtas_timer_keepalive();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/**
@@ -442,7 +442,7 @@ static ssize_t wdrtas_temp_read(struct file *file, char __user *buf,
*/
static int wdrtas_temp_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/**
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index e481fbbc4ae7..3d2f5ed60e88 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -421,7 +421,7 @@ static int wdt_open(struct inode *inode, struct file *file)
* Activate
*/
wdt_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/**
@@ -481,7 +481,7 @@ static ssize_t wdt_temp_read(struct file *file, char __user *buf,
static int wdt_temp_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/**
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index ebbb183be618..68843e7f224d 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -101,7 +101,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
ret = 0;
#endif
- nonseekable_open(inode, file);
+ stream_open(inode, file);
return ret;
}
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index a8e6f87f60c9..59ed644dd4a9 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -273,7 +273,7 @@ static int wdt977_open(struct inode *inode, struct file *file)
__module_get(THIS_MODULE);
wdt977_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int wdt977_release(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 10e2cda0ee5a..ff3a41f47127 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -461,7 +461,7 @@ static int wdtpci_open(struct inode *inode, struct file *file)
* Activate
*/
wdtpci_start();
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/**
@@ -524,7 +524,7 @@ static ssize_t wdtpci_temp_read(struct file *file, char __user *buf,
static int wdtpci_temp_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/**
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 116c2f47b463..9b6565a3fab4 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -180,8 +180,9 @@ static const struct watchdog_ops wm831x_wdt_ops = {
static int wm831x_wdt_probe(struct platform_device *pdev)
{
- struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *chip_pdata = dev_get_platdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct wm831x *wm831x = dev_get_drvdata(dev->parent);
+ struct wm831x_pdata *chip_pdata = dev_get_platdata(dev->parent);
struct wm831x_watchdog_pdata *pdata;
struct wm831x_wdt_drvdata *driver_data;
struct watchdog_device *wm831x_wdt;
@@ -198,8 +199,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
if (reg & WM831X_WDOG_DEBUG)
dev_warn(wm831x->dev, "Watchdog is paused\n");
- driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
- GFP_KERNEL);
+ driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
if (!driver_data)
return -ENOMEM;
@@ -210,7 +210,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
wm831x_wdt->info = &wm831x_wdt_info;
wm831x_wdt->ops = &wm831x_wdt_ops;
- wm831x_wdt->parent = &pdev->dev;
+ wm831x_wdt->parent = dev;
watchdog_set_nowayout(wm831x_wdt, nowayout);
watchdog_set_drvdata(wm831x_wdt, driver_data);
@@ -240,10 +240,9 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT;
if (pdata->update_gpio) {
- ret = devm_gpio_request_one(&pdev->dev,
- pdata->update_gpio,
- GPIOF_OUT_INIT_LOW,
- "Watchdog update");
+ ret = devm_gpio_request_one(dev, pdata->update_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "Watchdog update");
if (ret < 0) {
dev_err(wm831x->dev,
"Failed to request update GPIO: %d\n",
@@ -268,7 +267,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
}
}
- ret = devm_watchdog_register_device(&pdev->dev, &driver_data->wdt);
+ ret = devm_watchdog_register_device(dev, &driver_data->wdt);
if (ret != 0) {
dev_err(wm831x->dev, "watchdog_register_device() failed: %d\n",
ret);
diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c
index f1c016d015b3..25a1af5e1787 100644
--- a/drivers/watchdog/xen_wdt.c
+++ b/drivers/watchdog/xen_wdt.c
@@ -122,35 +122,33 @@ static struct watchdog_device xen_wdt_dev = {
static int xen_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct sched_watchdog wd = { .id = ~0 };
int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd);
if (ret == -ENOSYS) {
- dev_err(&pdev->dev, "watchdog not supported by hypervisor\n");
+ dev_err(dev, "watchdog not supported by hypervisor\n");
return -ENODEV;
}
if (ret != -EINVAL) {
- dev_err(&pdev->dev, "unexpected hypervisor error (%d)\n", ret);
+ dev_err(dev, "unexpected hypervisor error (%d)\n", ret);
return -ENODEV;
}
- if (watchdog_init_timeout(&xen_wdt_dev, timeout, NULL))
- dev_info(&pdev->dev, "timeout value invalid, using %d\n",
- xen_wdt_dev.timeout);
+ watchdog_init_timeout(&xen_wdt_dev, timeout, NULL);
watchdog_set_nowayout(&xen_wdt_dev, nowayout);
watchdog_stop_on_reboot(&xen_wdt_dev);
watchdog_stop_on_unregister(&xen_wdt_dev);
- ret = devm_watchdog_register_device(&pdev->dev, &xen_wdt_dev);
+ ret = devm_watchdog_register_device(dev, &xen_wdt_dev);
if (ret) {
- dev_err(&pdev->dev, "cannot register watchdog device (%d)\n",
- ret);
+ dev_err(dev, "cannot register watchdog device (%d)\n", ret);
return ret;
}
- dev_info(&pdev->dev, "initialized (timeout=%ds, nowayout=%d)\n",
- xen_wdt_dev.timeout, nowayout);
+ dev_info(dev, "initialized (timeout=%ds, nowayout=%d)\n",
+ xen_wdt_dev.timeout, nowayout);
return 0;
}
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index d3594aa3a374..43e6b575c32c 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -658,11 +658,7 @@ static int ziirave_wdt_probe(struct i2c_client *client,
w_priv->wdd.parent = &client->dev;
w_priv->wdd.groups = ziirave_wdt_groups;
- ret = watchdog_init_timeout(&w_priv->wdd, wdt_timeout, &client->dev);
- if (ret) {
- dev_info(&client->dev,
- "Unable to select timeout value, using default\n");
- }
+ watchdog_init_timeout(&w_priv->wdd, wdt_timeout, &client->dev);
/*
* The default value set in the watchdog should be perfectly valid, so
diff --git a/drivers/watchdog/zx2967_wdt.c b/drivers/watchdog/zx2967_wdt.c
index 9261f7c77f6d..c8549bf07cc9 100644
--- a/drivers/watchdog/zx2967_wdt.c
+++ b/drivers/watchdog/zx2967_wdt.c
@@ -188,11 +188,15 @@ static void zx2967_wdt_reset_sysctrl(struct device *dev)
of_node_put(out_args.np);
}
+static void zx2967_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int zx2967_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zx2967_wdt *wdt;
- struct resource *base;
int ret;
struct reset_control *rstc;
@@ -207,10 +211,9 @@ static int zx2967_wdt_probe(struct platform_device *pdev)
wdt->wdt_device.timeout = ZX2967_WDT_DEFAULT_TIMEOUT;
wdt->wdt_device.max_timeout = ZX2967_WDT_MAX_TIMEOUT;
wdt->wdt_device.min_timeout = ZX2967_WDT_MIN_TIMEOUT;
- wdt->wdt_device.parent = &pdev->dev;
+ wdt->wdt_device.parent = dev;
- base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->reg_base = devm_ioremap_resource(dev, base);
+ wdt->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->reg_base))
return PTR_ERR(wdt->reg_base);
@@ -227,13 +230,16 @@ static int zx2967_wdt_probe(struct platform_device *pdev)
dev_err(dev, "failed to enable clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, zx2967_clk_disable_unprepare,
+ wdt->clock);
+ if (ret)
+ return ret;
clk_set_rate(wdt->clock, ZX2967_WDT_CLK_FREQ);
rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(rstc)) {
dev_err(dev, "failed to get rstc");
- ret = PTR_ERR(rstc);
- goto err;
+ return PTR_ERR(rstc);
}
reset_control_assert(rstc);
@@ -244,28 +250,14 @@ static int zx2967_wdt_probe(struct platform_device *pdev)
ZX2967_WDT_DEFAULT_TIMEOUT, dev);
watchdog_set_nowayout(&wdt->wdt_device, WATCHDOG_NOWAYOUT);
- ret = watchdog_register_device(&wdt->wdt_device);
+ ret = devm_watchdog_register_device(dev, &wdt->wdt_device);
if (ret)
- goto err;
+ return ret;
dev_info(dev, "watchdog enabled (timeout=%d sec, nowayout=%d)",
wdt->wdt_device.timeout, WATCHDOG_NOWAYOUT);
return 0;
-
-err:
- clk_disable_unprepare(wdt->clock);
- return ret;
-}
-
-static int zx2967_wdt_remove(struct platform_device *pdev)
-{
- struct zx2967_wdt *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&wdt->wdt_device);
- clk_disable_unprepare(wdt->clock);
-
- return 0;
}
static const struct of_device_id zx2967_wdt_match[] = {
@@ -276,7 +268,6 @@ MODULE_DEVICE_TABLE(of, zx2967_wdt_match);
static struct platform_driver zx2967_wdt_driver = {
.probe = zx2967_wdt_probe,
- .remove = zx2967_wdt_remove,
.driver = {
.name = "zx2967-wdt",
.of_match_table = of_match_ptr(zx2967_wdt_match),
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index f3fbb700f569..05a286d24f14 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -4,12 +4,13 @@
#include <xen/xen.h>
#include <xen/page.h>
+/* check if @page can be merged with 'vec1' */
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2)
+ const struct page *page)
{
#if XEN_PAGE_SIZE == PAGE_SIZE
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
- unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
+ unsigned long bfn2 = pfn_to_bfn(page_to_pfn(page));
return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
#else
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 117e76b2f939..084e45882c73 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1687,7 +1687,6 @@ void __init xen_init_IRQ(void)
#ifdef CONFIG_X86
if (xen_pv_domain()) {
- irq_ctx_init(smp_processor_id());
if (xen_initial_domain())
pci_xen_initial_domain();
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 6d1a5e58968f..f341b016672f 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -664,7 +664,7 @@ static int evtchn_open(struct inode *inode, struct file *filp)
filp->private_data = u;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int evtchn_release(struct inode *inode, struct file *filp)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 7cf9c51318aa..469dfbd6cf90 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -526,20 +526,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
struct gntdev_grant_map *map;
int ret = 0;
- if (range->blockable)
+ if (mmu_notifier_range_blockable(range))
mutex_lock(&priv->lock);
else if (!mutex_trylock(&priv->lock))
return -EAGAIN;
list_for_each_entry(map, &priv->maps, next) {
ret = unmap_if_in_range(map, range->start, range->end,
- range->blockable);
+ mmu_notifier_range_blockable(range));
if (ret)
goto out_unlock;
}
list_for_each_entry(map, &priv->freeable_maps, next) {
ret = unmap_if_in_range(map, range->start, range->end,
- range->blockable);
+ mmu_notifier_range_blockable(range));
if (ret)
goto out_unlock;
}
@@ -852,7 +852,7 @@ static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
unsigned long xen_pfn;
int ret;
- ret = get_user_pages_fast(addr, 1, writeable, &page);
+ ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
if (ret < 0)
return ret;
@@ -1084,7 +1084,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
int index = vma->vm_pgoff;
int count = vma_pages(vma);
struct gntdev_grant_map *map;
- int i, err = -EINVAL;
+ int err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -1145,12 +1145,9 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto out_put_map;
if (!use_ptemod) {
- for (i = 0; i < count; i++) {
- err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
- map->pages[i]);
- if (err)
- goto out_put_map;
- }
+ err = vm_map_pages(vma, map->pages, map->count);
+ if (err)
+ goto out_put_map;
} else {
#ifdef CONFIG_X86
/*
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index de01a6d0059d..dd5bbb6e1b6b 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
- GFP_KERNEL);
+ vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL);
if (!vma_priv)
return -ENOMEM;
@@ -166,12 +165,8 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (vma_priv->n_pages != count)
ret = -ENOMEM;
else
- for (i = 0; i < vma_priv->n_pages; i++) {
- ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
- vma_priv->pages[i]);
- if (ret)
- break;
- }
+ ret = vm_map_pages_zero(vma, vma_priv->pages,
+ vma_priv->n_pages);
if (ret)
privcmd_buf_vmapriv_free(vma_priv);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 877baf2a94f4..5dcb06fe9667 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -391,13 +391,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
- (swiotlb_force != SWIOTLB_FORCE)) {
- /* we are not interested in the dma_addr returned by
- * xen_dma_map_page, only in the potential cache flushes executed
- * by the function. */
- xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
- return dev_addr;
- }
+ swiotlb_force != SWIOTLB_FORCE)
+ goto done;
/*
* Oh well, have to allocate and map a bounce buffer.
@@ -410,19 +405,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map);
- xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
- dev_addr, map & ~PAGE_MASK, size, dir, attrs);
/*
* Ensure that the address returned is DMA'ble
*/
- if (dma_capable(dev, dev_addr, size))
- return dev_addr;
-
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+ if (unlikely(!dma_capable(dev, dev_addr, size))) {
+ swiotlb_tbl_unmap_single(dev, map, size, dir,
+ attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ return DMA_MAPPING_ERROR;
+ }
- return DMA_MAPPING_ERROR;
+ page = pfn_to_page(map >> PAGE_SHIFT);
+ offset = map & ~PAGE_MASK;
+done:
+ /*
+ * we are not interested in the dma_addr returned by xen_dma_map_page,
+ * only in the potential cache flushes executed by the function.
+ */
+ xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
+ return dev_addr;
}
/*
@@ -455,48 +456,28 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
-/*
- * Make physical memory consistent for a single streaming mode DMA translation
- * after a transfer.
- *
- * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
- * using the cpu, yet do not wish to teardown the dma mapping, you must
- * call this function before doing so. At the next point you give the dma
- * address back to the card, you must first perform a
- * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
- */
static void
-xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- enum dma_sync_target target)
+xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
{
- phys_addr_t paddr = xen_bus_to_phys(dev_addr);
-
- BUG_ON(dir == DMA_NONE);
-
- if (target == SYNC_FOR_CPU)
- xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
+ phys_addr_t paddr = xen_bus_to_phys(dma_addr);
- /* NOTE: We use dev_addr here, not paddr! */
- if (is_xen_swiotlb_buffer(dev_addr))
- swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
+ xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir);
- if (target == SYNC_FOR_DEVICE)
- xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
+ if (is_xen_swiotlb_buffer(dma_addr))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
}
-void
-xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
+static void
+xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
{
- xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
-}
+ phys_addr_t paddr = xen_bus_to_phys(dma_addr);
-void
-xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
-{
- xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
+ if (is_xen_swiotlb_buffer(dma_addr))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+
+ xen_dma_sync_single_for_device(dev, dma_addr, size, dir);
}
/*
@@ -504,9 +485,8 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
* concerning calls here are the same as for swiotlb_unmap_page() above.
*/
static void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
+xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -518,26 +498,9 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
}
-/*
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above xen_swiotlb_map_page
- * interface. Here the scatter gather list elements are each tagged with the
- * appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
- * same here.
- */
static int
-xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
+xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -545,85 +508,44 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- phys_addr_t paddr = sg_phys(sg);
- dma_addr_t dev_addr = xen_phys_to_bus(paddr);
-
- if (swiotlb_force == SWIOTLB_FORCE ||
- xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
- !dma_capable(hwdev, dev_addr, sg->length) ||
- range_straddles_page_boundary(paddr, sg->length)) {
- phys_addr_t map = swiotlb_tbl_map_single(hwdev,
- start_dma_addr,
- sg_phys(sg),
- sg->length,
- dir, attrs);
- if (map == DMA_MAPPING_ERROR) {
- dev_warn(hwdev, "swiotlb buffer is full\n");
- /* Don't panic here, we expect map_sg users
- to do proper error handling. */
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
- attrs);
- sg_dma_len(sgl) = 0;
- return 0;
- }
- dev_addr = xen_phys_to_bus(map);
- xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
- dev_addr,
- map & ~PAGE_MASK,
- sg->length,
- dir,
- attrs);
- sg->dma_address = dev_addr;
- } else {
- /* we are not interested in the dma_addr returned by
- * xen_dma_map_page, only in the potential cache flushes executed
- * by the function. */
- xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
- dev_addr,
- paddr & ~PAGE_MASK,
- sg->length,
- dir,
- attrs);
- sg->dma_address = dev_addr;
- }
+ sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
+ sg->offset, sg->length, dir, attrs);
+ if (sg->dma_address == DMA_MAPPING_ERROR)
+ goto out_unmap;
sg_dma_len(sg) = sg->length;
}
+
return nelems;
+out_unmap:
+ xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ sg_dma_len(sgl) = 0;
+ return 0;
}
-/*
- * Make physical memory consistent for a set of streaming mode DMA translations
- * after a transfer.
- *
- * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
- * and usage.
- */
static void
-xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- enum dma_sync_target target)
+xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
- for_each_sg(sgl, sg, nelems, i)
- xen_swiotlb_sync_single(hwdev, sg->dma_address,
- sg_dma_len(sg), dir, target);
-}
-
-static void
-xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
+ for_each_sg(sgl, sg, nelems, i) {
+ xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
+ sg->length, dir);
+ }
}
static void
-xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir)
{
- xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i) {
+ xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
+ sg->length, dir);
+ }
}
/*
@@ -690,8 +612,8 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
- .map_sg = xen_swiotlb_map_sg_attrs,
- .unmap_sg = xen_swiotlb_unmap_sg_attrs,
+ .map_sg = xen_swiotlb_map_sg,
+ .unmap_sg = xen_swiotlb_unmap_sg,
.map_page = xen_swiotlb_map_page,
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 23f7f6ec7d1f..833b2d2c4318 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -697,7 +697,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
/* We need to force a call to our callback here in case
* xend already configured us!
*/
- xen_pcibk_be_watch(&pdev->be_watch, NULL, 0);
+ xen_pcibk_be_watch(&pdev->be_watch, NULL, NULL);
out:
return err;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c3e201025ef0..faf452d0edf0 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -465,7 +465,6 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
struct watch_adapter *watch;
char *path, *token;
int err, rc;
- LIST_HEAD(staging_q);
path = u->u.buffer + sizeof(u->u.msg);
token = memchr(path, 0, u->u.msg.len);
@@ -523,7 +522,6 @@ static ssize_t xenbus_file_write(struct file *filp,
uint32_t msg_type;
int rc = len;
int ret;
- LIST_HEAD(staging_q);
/*
* We're expecting usermode to be writing properly formed
@@ -622,9 +620,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
if (xen_store_evtchn == 0)
return -ENOENT;
- nonseekable_open(inode, filp);
-
- filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+ stream_open(inode, filp);
u = kzalloc(sizeof(*u), GFP_KERNEL);
if (u == NULL)